././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5550468 neutron-16.0.0.0b2.dev214/0000755000175000017500000000000000000000000015143 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/.coveragerc0000644000175000017500000000013300000000000017261 0ustar00coreycorey00000000000000[run] branch = True source = neutron omit = neutron/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/.mailmap0000644000175000017500000000111600000000000016563 0ustar00coreycorey00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/.pylintrc0000644000175000017500000000724600000000000017021 0ustar00coreycorey00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise c-extension-no-member, locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, not-an-iterable, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, keyword-arg-before-vararg, literal-comparison, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, useless-super-delegation, # TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims nonstandard-exception, # "C" Coding convention violations bad-continuation, consider-iterating-dictionary, consider-using-enumerate, invalid-name, len-as-condition, misplaced-comparison-constant, missing-docstring, singleton-comparison, superfluous-parens, ungrouped-imports, wrong-import-order, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, consider-merging-isinstance, consider-using-ternary, duplicate-code, inconsistent-return-statements, interface-not-implemented, no-else-return, no-self-use, redefined-argument-from-local, simplifiable-if-statement, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements, # new for python3 version of pylint consider-using-set-comprehension, unnecessary-pass, useless-object-inheritance [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/.stestr.conf0000644000175000017500000000010500000000000017410 0ustar00coreycorey00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./neutron/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/AUTHORS0000644000175000017500000013012500000000000016215 0ustar00coreycorey00000000000000AKamyshnikova Aaron Rosen Aaron Rosen Aaron-Zhang231 Abhishek Chanda Abhishek Raut Abhishek Raut Abhishek Talwar Abishek Subramanian Adam Gandelman Adam Harwell Adam Spiers Adelina Tuvenie Adin Scannell Adit Sarfaty Aditya Reddy Nagaram Adolfo Duarte Adolfo Duarte Adrian Chiris Adrien Cunin Adrien Vergé Ahmed Zaid Ailing Zhang Akash Gangil Akihiro MOTOKI Akihiro Motoki Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Holden Alex Oughton Alex Stafeyev AlexSTafeyev Alexander Ignatov Alexander Ignatyev Alexander Maretskiy Alexandra Settle Alexandros Soumplis Alexei Kornienko Alexey I. Froloff Ali Sanhaji Aliaksandr Dziarkach Alin Balutoiu Alin Serdean Allain Legacy Allain Legacy Aman Kumar Amir Sadoughi Amit Saha Anand Shanmugam Andre Pech Andreas Jaeger Andreas Jaeger Andreas Karis Andreas Scheuring Andrew Austin Andrew Boik Andrew Boik Andrew Karpow Andrey Epifanov Andrey Kurilin Andrey Kurilin Andrey Shestakov Andrii Ostapenko Andy Hill Angela Smith Angus Lees Anh Tran Anindita Das Ankur Gupta Ann Ann Kamyshnikova Ann Taraday Anna Khmelnitsky Ante Karamatic Anthony Chow Anthony Veiga Anton Frolov Aparupa Aqsa Aradhana Singh Arata Notsu Arie Bregman Arjun Baindur Armando Migliaccio Armando Migliaccio Armando Migliaccio Arnaud Morin Artur Korzeniewski Arun Sriraman Arvind Somy Arvind Somya Assaf Muller Attila Czira Attila Fazekas Aviram Bar-Haim Avishay Balderman Babu Shanmugam Baodong (Robert) Li Baodong Li Baohua Yang Ben Nemec Ben Nemec Bence Romsics Bence Romsics Benedikt Trefzer Bernard Cafarelli Bernhard M. Wiedemann Bertrand Lallau Bertrand Lallau Bhagyashri Shewale Bhuvan Arumugam Billy Olsen Bin Yu Bin Zhou Bo Chi Bo Chi Bo Wang Bob Kukura Bob Melander Boden R Bogdan Tabor Boris Pavlovic Brad Hall Brad Hall Bradley Jones Brandon Logan Brandon Palm Brant Knudson Brent Eagles Brian Bowen Brian Haley Brian Haley Brian Haley Brian Haley Brian Waldon Britt Houser Bruce Tan Béla Vancsics Cady_Chen Cao Xuan Hoang Carl Baldwin Carl Baldwin Carl Baldwin Carlos Goncalves Carol Bouchard Cedric Brandily Chandan Dutta Chowdhury Chandan Kumar (raukadah) Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Chengli XU Chengqian Liu Chirag Shahani Choi-Sung-Hoon Christian Berendt Christoph Arnold Christoph Thiel Chuck Chuck Carlino Chuck Short ChuckC Clark Boylan Claudiu Belu Clayton O'Neill Clint Byrum Cong Phuoc Hoang Corey Bryant Corey Wright Cuong Nguyen Cyril Roelandt Cyril Roelandt Cédric Ollivier Dan Florea Dan Prince Dan Wendlandt Dane LeBlanc Daniel Alvarez Daniel Bengtsson Daniel Gollub Daniel Gonzalez Daniel Mellado Daniel Russell Dao Cong Tien Darek Smigiel (dasm) Darek Smigiel Dariusz Smigiel (dasm) Dariusz Smigiel (dasm) Dariusz Smigiel Darragh O'Reilly Darragh O'Reilly Darragh O'Reilly Darragh O'Reilly Darren Birkett Davanum Srinivas Davanum Srinivas Dave Cahill Dave Hill Dave Lapsley Dave Tucker David Edery David Rabel David Ripton David Shaughnessy David Wahlstrom David-gb Dazhao Debo Deepak N Denis Buliga Derek Higgins Dermot Tynan Dhanashree Gosavi Dina Belova Dipa Thakkar Dirk Mueller Divya ChanneGowda Dmitrii Shcherbakov Dmitriy Ukhlov Dmitry Ratushnyy Dmitry Sutyagin Dong Jun Dongcan Ye Doug Hellmann Doug Hellmann Doug Wiegley Doug Wiegley Doug Wiegley Doug Wiegley Drew Thorstensen DuYaHong Duan Jiong Duarte Nunes Dustin Lundquist Ed Bak Edan David Edgar Magana Edgar Magana Edward Hope-Morley Einst Crazy Elena Ezhova Elod Illes Emilien Macchi Emilien Macchi EmilienM Emma Foley Enyinna Ochulor Eoghan Glynn Eran Gampel Eric Brown Eric Larese Eric Windisch Erik Colnick Erik Colnick Erik Olof Gunnar Andersson Ester Niclos Ferreras Eugene Nikanorov Evgeny Fedoruk Eyal Fang Zhen Farhad Sunavala Fawad Khaliq Federico Ressi Fei Long Wang Felipe Monteiro Felipe Reyes Flavio Fernandes Flavio Percoco Francisco Souza Franck Yelles Francois Deppierraz Francois Eleouet Frank Wang Frode Nordahl Frode Nordahl Gabriel Wainer Gabriele Cerami Gal Sagie Gandharva Gary Kotton Gary Kotton Gauvain Pocentek Genadi Chereshnya Gerard Braad Ghanshyam Mann Ghanshyan Mann Ghe Rivero Gong Zhang Gordon Chung Gordon Chung Goutham Pratapa Graham Hayes Guilherme Salgado Guoqiang Ding Guoshuai Li Gyorgy Szombathelyi Gábor Antal Ha Van Tu Haim Daniel Haiwei Xu Hamdy Khader Han Zhou Hang Yang Harald Jensas Harald Jensas Harald Jensås Hareesh Puthalath Harsh Prasad Harshada Mangesh Kakad He Jie Xu He Qing He Yongli Hemanth Ravi Henry Gessau Henry Gessau Henry Gessau HenryGessau HenryVIII Herman Ge Hidekazu Nakamura Hieu LE Hiroaki KAWAI Hirofumi Ichihara Hironori Shiina Hisaharu Ishii Hong Hui Xiao Hong Hui Xiao Hongbin Lu Hongbin Lu Hongbin Lu Hu Yupeng Huan Xie Huang Cheng Hui HX Xiang Hui Xiang Huifeng Le Hunt Xu Hynek Mlnarik IWAMOTO Toshihiro Ian Wienand Ignacio Scopetta Igor Malinovskiy Ihar Hrachyshka Ihar Hrachyshka Ilya Chukhnakov Ilya Pekelny Ilya Shakhat Ilya Sokolov Inessa Vasilevskaya Ionuț Arțăriși Irena Berezovsky Irena Berezovsky Iryoung Jeong Isaku Yamahata Isaku Yamahata Itsuro Oda Itzik Brown Itzik Brown Iury Gregory Melo Ferreira Ivan Kolodyazhny Ivar Lazzaro Ivar Lazzaro JJ Asghar JUN JIE NAN Jacek Swiderski Jack McCann Jacky Hu Jakob Englisch Jakub Libosvar James Anziano James Arendt James E. Blair James E. Blair James E. Blair James Page Jamie Lennox Jamie Lennox Jas Jason Dillaman Jason Kölker Jason Zhang Jaume Devesa Jay Pipes Jay S. Bryant Jean-Philippe Evrard Jens Harbott Jens Rosenboom Jeremy Hanmer Jeremy McDermond Jeremy Stanley Jerry Zhao Jesse Jesse Andrews Jesse Pretorius (odyssey4me) Jesse Pretorius Jiajun Liu Jian Wen Jian Wen Jianghua Wang Jianing Yang JieLee Jiri Kotlin Joe Gordon Joe Harrison Joe Heck Joe Mills Joe Talerico John Belamaric John Davidge John Davidge John Davidge John Dewey John Dunning John Jason Brzozowski John Kasperski John Nielsen John Perkins John Perkins John Schwarz John-Paul Robinson Jon Grimm Jonathan LaCour Jonathan Rosser Jordan Pittier Jordan Tardif Jorge Miramontes JuPing Juan Antonio Osorio Robles Juergen Brendel Juha Kosonen Julia Kreger Julia Varlamova Juliano Martinez Juliano Martinez Julie Pichon Julien Danjou Jun Park Junjie Wang Justin Hammond Justin Lund KAWAI Hiroaki KIYOHIRO ADACHI Kahou Lei Kailun Qin Kailun Qin Kaiwei Fan Kanzhe Jiang Kawaguchi Ken'ichi Ohmichi Kenji Yasui Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin Benton Kevin Fox Kevin L. Mitchell Kiall Mac Innes Kim Bao Long Kiseok Kim Kobi Samoray Koert van der Veer Koteswara Rao Kelam Koteswara Rao Kelam Kris Lindgren Kui Shi Kumar Acharya Kun Huang Kyle Mestery Kyle Mestery LIU Yulong LIU Yulong LIU Yulong LIU Yulong Lajos Katona Lars Kellogg-Stedman Lenny Verkhovsky Leon Cui Li Ma Li Ma Li Xipeng Li Zhixin Li-zhigang Liang Bo Lianghwa Jou Lina He Liping Mao (limao) Liping Mao LipingMao LiuNanke LiuYong Liuyuanfeng Livnat Peer Lizhixin Lorin Hochstein Louis Taylor Lu lei Lubosz Kosnik Lucas Alvares Gomes Lucian Petrut Luis A. Garcia Luiz H Ozaki Lujin Lujin Luo Luke Gorrie Luong Anh Tuan Ly Loi Maciej Józefczyk Maciej Józefczyk Madhav Puri Madhu Mohan Nelemane Maho Koshiya Major Hayden Mandeep Dhami Manish Godara Manjeet Singh Bhatia Manjunath Patil Marc Koderer Marga Millet Marga Millet Margaret Frances Mark Doffman Mark Goddard Mark McClain Mark McClain Mark McClain Mark McLoughlin Mark T. Voelker Martin Hickey Martin Kletzander Martin Matyáš Martin Roy Martin Roy Martins Jakubovics Maru Newby Maru Newby Maruti Mate Lakat Mathieu Gagné Mathieu Rohon Matt Dietz Matt Odden Matt Riedemann Matt Riedemann Matt Thompson Matt Welch Matthew Booth Matthew Edmonds Matthew Thode Matthew Treinish Matthew Treinish Matthew Weeks Max Max Pavlukhin Maxime Guyot Meenakshi Kaushik Mehdi Abaakouk Michael J Fork Michael Johnson Michael Krotscheck Michael Smith Michael Still Michal Arbet Miguel Angel Ajo Miguel Angel Ajo Miguel Lavalle Miguel Lavalle Miguel Lavalle Miguel Lavalle Miguel Ángel Ajo Mike Bayer Mike Dorman Mike King Mike Kolesnik Mike Perez Ming Shuang Xian Mithil Arun Mitsuhiro SHIGEMATSU Mohammad Banikazemi Mohit Malik Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA Mr. Bojangles Mr. Bojangles Mukul Murali Birru Mykola Yakovliev NGUYEN TUONG THANH Na Nachi Ueno Nachi Ueno Nader Lahouti Nakul Dahiwade Nam Nguyen Hoai Natal Ngétal Nate Johnston Nate Johnston Nate Johnston Nate Johnston Neil Jerram Neil Jerram Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Nguyen Phuong An Nguyen Thanh Cong Nguyen Tuong Thanh Nguyen Van Trung Nick Nick Bartos Nikita Gerasimov Nikola Dipanov Nikola Dipanov Nikolay Fedotov Nikolay Sobolevskiy Nir Magnezi Numan Siddique Numan Siddique Oleg Bondarev OmarM Omer Anson Ondřej Nový OpenStack Release Bot Pallavi.s Paul Belanger Paul Carver Paul Michali Paul Michali Paul Ward Pavel Bondar Pawel Suder Pedro Henrique Peng Xiao Peng Yong Pepijn Oomen Perry Zou Peter Feiner Petronio Carlos Bezerra Petrut Lucian Pierre RAMBAUD Pierre Riteau Pierre Rognant Piotr Siwczak Piotr Siwczak Pradeep Kilambi Praneet Bachheti Prashant Shetty Prasoon Telang Prateek Arora Praveen Kumar SM Praveen Yalagandula Preeti Mirji Prince Nana Pritesh Kothari Przemyslaw Czesnowicz Qiaowei Ren Qin Zhao Quan Tian QunyingRan Rabi Mishra Radosław Piliszek Rahul Priyadarshi Raildo Mascena Rajaram Mallya Rajeev Grover Rajesh Mohan Rajesh Mohan Ralf Haferkamp Ramanjaneya Ramu Ramamurthy Ravi Kota Ravi Shekhar Jethani Rawlin Peters Rawlin Peters Ray Chen Reedip Reedip Reedip Reedip Reedip Riccardo Pittau Rich Curran Richard Theis Rick Clark Rico Lin Ritesh Anand Ritesh Anand Robert Collins Robert Collins Robert Kukura Robert Li Robert Mizielski Robert Pothier Robin Cernin Robin Naundorf RobinWang Rodolfo Alonso Hernandez Rodolfo Alonso Hernandez Roee Agiman Roey Chen Roey Chen Rohit Agarwalla Rohit Agarwalla Roman Bogorodskiy Roman Dobosz Roman Podoliaka Roman Podolyaka Roman Prykhodchenko Roman Sokolkov Romil Gupta Ronald Bradford RongzeZhu Rosario Di Somma Rossella Sblendido Rossella Sblendido RoyKing Rudrajit Tapadar Rui Zang Russell Bryant Ryan Bridges Ryan Moats Ryan Moe Ryan O'Hara Ryan Petrello Ryan Rossiter Ryan Tidwell Ryan Tidwell Ryan Tidwell Ryota MIBU Ryu Ishimoto Sachi King Sachi King Saggi Mizrahi Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Sai Sindhur Malleni Saisrikiran Mudigonda Saju Madhavan Saksham Varma Salvatore Salvatore Orlando Salvatore Orlando Salvatore Orlando Salvatore Orlando Sam Betts Sam Hague Sam Morrison Samer Deeb Sandhya Dasu Sanjeev Rampal Santhosh Santhosh Kumar SapanaJadhav Sara Nierodzik Sascha Peilicke Sascha Peilicke Sascha Peilicke SauloAislan Saurabh Chordiya Saverio Proto Sayaji Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McCully Sean McGinnis Sean Mooney Sean Mooney Sean Redmond Sebastian Lohff Senhua Huang Serge Maskalik Sergey Belous Sergey Kolekonov Sergey Lukjanov Sergey Nechaev Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shang Yong Shaohe Feng Shashank Hegde Shashank Hegde Shashank Kumar Shankar Shih-Hao Li Shiv Haris Shivakumar M Shivakumar M Shuangtai Tian Shweta P Shweta P Shweta Patil Siming Yin Simon Pasquier Sindhu Devale Sitaram Dontu Slawek Kaplonski Slawek Kaplonski Soheil Hassas Yeganeh Somik Behera Somik Behera SongmingYan Sourabh Patwardhan Sphoorti Joglekar Sreekumar S Sridar Kandaswamy Sridhar Ramaswamy Sridhar S Sridhar Venkat Sripriya Stanislav Kudriashev Stefan Nica Stefan Nica Stephen Eilert Stephen Finucane Stephen Finucane Stephen Gordon Stephen Gran Stephen Ma Stephen Ma Steve Kipp Steve Kowalik Steven Gonzales Steven Hillman Steven Ren Sudhakar Sudhakar Babu Gariganti Sudheendra Murthy Sudipta Biswas Sukhdev Sukhdev Sukhdev Kapur Sumit Naiksatam Sumit Naiksatam Sushil Kumar Sven Anderson Swaminathan Vasudevan Swaminathan Vasudevan Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński Sławek Kapłoński Takaaki Suzuki Takashi NATSUME Takuma Watanabe Tan Lin Tang Chen Tatyana Leontovich Terry Wilson Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Herve Thomas Morin Tim Miller Tim Rozet Tim Swanson Tobias Urdin Tom Cammann Tom Fifield Tom Holtzen Tom Stappaerts Tomasz Paszkowski Tomoaki Sato Tomoe Sugihara Tomoko Inoue Tong Li Tong Liu Toni Ylenius Tony Breeds Tony Xu Tovin Seven Tracy Jones Trevor McCasland Trinath Somanchi TrinathSomanchi Trygve Vea Tu Hong Jun Tuan Do Anh Tyler Smith Vadim ponomarev Vadivel Poonathan Van Hung Pham Vasiliy Khomenko Vasyl Saienko Victor Laza Victor Morales Victor Stinner Viktor Varga Vincent Legoll Vincent Untz Vishal Agarwal Vishvananda Ishaya Vivekanandan Narasimhan Vlad Gridin Vladimir Eremin Vladislav Belogrudov Volodymyr Litovka Vu Cong Tuan Waldemar Znoinski Wanlong Gao Wei Hu Wei Wang WeiHu Weidong Shao Wenran Xiao Wenxin Wang Wim De Clercq Wlodzimierz Borkowski Wu Wenxiang XiaojueGuan Xiaolin Zhang XieYingYun Xu Chen Xu Han Peng Xuhan Peng YAMAMOTO Takashi YAMAMOTO Takashi Yaguang Tang Yaguo Zhou Yalei Wang YanXingan Yang JianFeng Yang Li Yang Youseok Yang Yu Yang Yu YangLei Yannick Thomas Yaohua Yan Yasuhiro Kimura Yatin Kumbhare Yi Zhao Ying Liu Yong Sheng Gong Yong Sheng Gong Yoni Shafrir Yoshihiro Kaneko Youcef Laribi Yu Fukuyama Yuanchao Sun Yuji Yuki Nishiwaki Yunxiang Tao Yuriy Taraday Yushiro FURUKAWA Yusuke Ide Yusuke Muraoka Yuuichi Fujioka Yves-Gwenael Bourhis ZHU ZHU Zachary Zainub Wahid Zang MingJie Zhao Lei ZhaoBo Zhengguang Zhenguo Niu Zhenguo Niu Zhengwei Gao Zhenmei Zhesen ZhiQiang Fan ZhiQiang Fan ZhijunWei ZhongShengping Zhongyue Luo Zuo ZongMing aaronorosen aaronzhang231 abhishek.talwar abhishek60014726 abhishekkekane adolfo duarte adreznec ajmiller alexpilotti ankitagrawal aojeagarcia armando-migliaccio armando-migliaccio berlin bno1 boden caoyuan caoyue cedric.brandily changzhi changzhi1990 chen, hao chen-li chen-li chenghuiyu chenke chenxing chnm-kulkarni da52700 david shaughnessy daz deepakmourya dekehn dineshbhor donnydavis dql dukhlov durga.malleswari e0ne elajkat enriquetaso eperdomo eperdomo@cisco.com <> ericxiett fellypefca fpxie fujioka yuuichi fumihiko kakuma gaofei gaozhengwei garyduan garyk gengchc2 gessau gh159m ghanshyam gong yong sheng gongysh gongysh gordon chung guiyanxing guotao.bj hgangwx hobo.kengo houming-wang huangpengtao hujin huzhiling hyunsun imran malik ivan-zhu jasonrad jeckxie jeremy.zhang jingliuqing joe@midokura.com john_a_joyce johndavidge jufeng jun xie jun xie junbo justin Lund karimb kedar kulkarni lawrancejing leegy leejian0612 lei zhang lianghao lidong lijianlj lilintan linb lingyongxu lioplhp liu-sheng liuchengqian90 liudong liuqing liusheng liyingjun lizheming lizheng lizhixin3016 llg8212 longqianzhao lujin luqitao lzklibj malos mamtap maoshuai marios mark mcclain mat mathieu-rohon melissaml miaoyuliang mid_one mmidolesov mohankumar_n mohit.mohit2atcognizant.com mohit.mohit2atcognizant.com mouad benchchaoui nanaboat ncode nfedotov nick.zhuyj nicky niusmallnan nmagnezi openstack openstack pandatt pawnesh.kumar pengyuesheng qinchunhua rajat29 rajeev rajeev reedip ricolin rohitagarwalla rohitagarwalla roagarwa@cisco.com <> ronak root root root rossella rtmdk sadasu salvatore <> salvatore sanuptpm sapana45 sayalilunkad shaofeng_cheng shashi.kant shenjiatong shihanzhang shihanzhang shmcfarl shu,xinxin sindhu devale sindhudevale singhannie siyingchun snaiksat sonu sonu.kumar sridhargaddam sridhargaddam stanzgy stephen-ma sukhdev sushma_korati sysnet tianhui tianquan ting.wang tonytan4ever tonytan4ever trinaths ushen venkata anil venkata anil venkatamahesh venkatamahesh vijaychundury vikas vikram.choudhary vinkesh banka waleed mousa wangbo wangfaxin wanghongtaozz wangqi watanabe isao watanabe.isao whitekid wlfightup xchenum xiaoli xiexs yan.haifeng yangjianfeng yangxurong yanhongchang5 yanpuqing yanyaohua yaowei ycx ymadhavi@in.ibm.com yong sheng gong yuhui_inspur yujie yuyangbj zengfagao zhangboye zhangdebo1987 zhanghao zhanghao2 zhangyanxian zhangyanxian zhangyuhe <1073258077@qq.com> zhangzs zhengyong zhhuabj zhiyuan_cai zhouhenglc zhsun zhufl zoukeke@cmss.chinamobile.com Édouard Thuleau Édouard Thuleau 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/CONTRIBUTING.rst0000644000175000017500000000113300000000000017602 0ustar00coreycorey00000000000000The source repository for this project can be found at: https://opendev.org/openstack/neutron Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/neutron For more specific information about contributing to this repository, see the Neutron contributor guide: https://docs.openstack.org/neutron/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982289.0 neutron-16.0.0.0b2.dev214/ChangeLog0000644000175000017500000225751700000000000016740 0ustar00coreycorey00000000000000CHANGES ======= * OVN: Add note re IGMP groups upon ovn-controller service restart * Allow sharing of subnet pools via RBAC mechanism * Promote tempest and fullstack uwsgi jobs to be voting * [OVN] Bump up transaction timeout for functional tests * [OVN] Add IGMP snooping configuration guide * Add fullstack tests for stateless security groups * Enable back mac spoofing and disabled port security tests * Don't reschedule hosts unless we need to * "keepalived\_state\_change" needs to use threading to send arping * Replace ctype.CDLL by ctypes.PyDLL in linux.ip\_lib * Add tunnel\_ip\_version to tunnel\_sync() error message * Revert "Subcribe trunk & subport events to set subport id" * Add trunk subports to be one of dvr serviced device owners * [DVR] Don't populate unbound ports in router's ARP cache * Migrate the OVN migration scripts * Fix neutron debug probe to use integration\_bridge * Revert "[OVN] Set 'unknown' address properly when port sec is disabled" * Fix name of "subnet\_dns\_publish\_fixed\_ip" extension in docs * [OVN] HA Chassis Group: Ignore UPDATES when external\_ids hasn't changed * Fix return correct cache when reusing port * mech\_driver.OVNMechanismDriver "\_ovn\_client" is a read-only property * Wait before deleting trunk bridges for DPDK vhu * Filter subnet by segment ID or None * Prioritize port create and update ready messages * Mark security group fullstack tests as stable * Do not link up HA router gateway in backup node * Use dict .get() to avoid a KeyError in the segment plugin * Enable neutron-ovn jobs in check queue * Switch tripleo based job to be run on Centos 8 * Revert "Switch to use cast method in dhcp\_ready\_on\_ports method" * [OVN] Fix: DevStack guide * [ovn] Documentation: Fix broken links in the OVN Doc * Add usage note in utils.Timer class * [OVN] Fix check\_for\_ha\_chassis\_group\_address maintenance task * Finish migration to new engine facade in test\_db\_base\_plugin\_v2 * Revert "Remove native openflow driver 'deferred' code" * Switch to new engine facade in extensions unit tests * Switch to new engine facade in test\_allowedaddresspairs\_db module * Reno only - Make stateless allocation segment aware * Add known gaps between ML2/OVS and OVN * [ovn] Stricter matching on metadata port binding event * Allow sharing of address scopes via RBAC mechanism * Clear lsp.addresses always if port is OVN LB VIP port * Mark TestVirtualPorts tests as unstable * Use "datetime.datetime.isoformat" instead of "timeutils.isotime" * Neutron ovs agent: Removing SmartNIC OVS representor port on instance tear down and resync * [OVN] Call OVNClient methods with context when possible * Change OVS tunnel tests to support unittest.mock * Change TestOVNL3RouterPlugin to support unittest.mock * Deny delete last slaac subnet with allocation on segment * subnet create - segment aware auto-addr allocation * Subcribe trunk & subport events to set subport id * [fullstack] Add option for custom apipaste.ini * Filter subnets on fixed\_ips segment * [OVN] Switch release jobs to OVN 20.03 (v20.03.0 tag) * Add fip\_pf\_description extension to be supported by service plugin * Switch to new enginefacade in l3\_hascheduler\_db module * Unblock security group update with same stateful data * Use OVN mech driver OVSDB connections for l3 plugin * [OVN] Updates to tools/migrate\_names.txt * Bump neutron-lib to 2.2.0 * [OVN] OVN DB schema update aware maintenance tasks * [OVN] Add support for external ports * [OVN] Default to TLS for OVN-enabled tests * Fix bulk port tag test\_type\_args\_passed\_to\_extension() * Register DNSMASQ\_OPTS in functional sanity tests * Provide meaningful defaults for OVN/OVS\_REPO\_NAME * Check tc\_lib.add\_tc\_policy\_class input parameters * Remove native openflow driver 'deferred' code * Support for stateless security groups * Run fullstack security group test always serially * Limit ovn sanity checks to q-svc * Rename devstack service neutron-ovn-metadata-agent * [OVN] Add IGMP snooping support * DHCPv6 - Use addr6\_list in dnsmasq * [OVN] Add missing OVN functional tests * [Community goal] Add contributor and PTL guide * Fix \_notify\_l3\_agent\_port\_update when there is no binding host * Clean dvr fip gateway entry when fip agent gw port is deleted * Increase log information in "TestMonitorDaemon" * Fix assertEqual() calls in metadata agent unit tests * Remove extra header fields in proxied metadata requests * [OVN] DevStack: Split the OVN\_BRANCH and OVS\_BRANCH variables * Allow usage of assert\_called\_once method in unit tests * [OVN] Enable live-migration tests for devstack multinode * Make neutron-ovn-tempest-slow job green * Change way of retrieving LSP in virtual\_port functional tests * [OVN] Always update router static route * Add tag-ports-during-bulk-creation to devstack * Ensure that default SG exists during list of SG rules API call * [OVN] Remove SG dependency on original port dict * Revert "Blacklist test\_multiple\_ports\_portrange\_remote for OVN" * Fix logging call in the segment plugin * [OVN] Update zuul jobs to use local OVN driver * Fix queries to retrieve allocations with network\_segment\_range * Ensure netlink.nla\_slot tuple key is a string * Catch PortBindingChassisEvent in test\_agent\_resync\_on\_non\_existing\_bridge * Remove leftovers of get\_external\_network\_id for router * Add functional test for metadata agent monitoring * Remove neutron-tempest-dvr job from CI * [OvS] Handle re\_added multi ports * Improve log message on exception for invalid tunnel type * Blacklist test\_multiple\_ports\_portrange\_remote for OVN * Move rejection of min-bw rule on non-physnet port to the ovs qos driver * Drop invalid rootwrap filters 16.0.0.0b1 ---------- * Filter by owner SGs when retrieving the SG rules * Remove method get\_security\_group\_rules\_count * [OVN] Merge networking-ovn vagrant into neutron * [OVN] Add OVN functional tests - part 1 * Check SG members instead of ports to skip flow update * [Fullstack] Wait until min QoS and Queue registers are set in DB * Add "project\_id" filter when changing the network segmentation ID * Set a default IP route metric in ip\_lib.list\_ip\_routes * Workaround in NetworkSegmentRange OVO until "project\_id" migration * Deprecate config option "ovs\_integration\_bridge" * Mark NetcatTesterTestCase tests as unstable * Centralize traffic when LB and member has FIP * Revert "Add "ncat" rootwrap filter for debug" * Set an absolute path in rootwrap filters\_path * Bump neutron-lib to 2.1.0 * "ping"/"ping6" command support in rootwrap filters * Add "ncat" rootwrap filter for debug * Reduce the VLAN/tunneled ranges in fullstack tests * Improve VLAN allocations synchronization * Add 'fip-port-details' to OVN ML2 supported extensions * Enhance agent's resources\_synced check * Add NEUTRON\_PATH to configure\_for\_func\_testing.sh * Enforce "qos\_max\_rate" and "qos\_burst" in OVN QoS options * [OVN] Reduce the number of watched tables in MetadataProxyHandler * Add SR-IOV agent resource provider information * dhcp-agent: equalize port create\_low/update/delete priority * Add 'is\_available' function * [OVN] Devstack: Fix adding another compute node * Add ovn-octavia-provider tag as official tag * Revert "Disable neutron-ovn-tempest-ovs-release temporarily" * Use sys.executable instead of 'python' * Re-use existing ProcessLauncher from wsgi in RPC workers * Remove plugins.ml2.db.get\_binding\_levels * Ensure there is always at most 1 dvr fip gw port per agent and network * Revert "[DVR] Add lock during creation of FIP agent gateway port" * Use Python 3 as interpreter in test * Remove duplicate QoS backend support table * [OVN] Update QoS related code from networking-ovn * Disable neutron-ovn-tempest-ovs-release temporarily * Fix duplicate ID warning during doc build * OVN Metadata agent gets OVSDB updates for only its Chassis * Permit OVS build can be set * Increate tox-py38 timeout to 3600 seconds * Increase log information when a RootHelperProcess fails * Remove sleep command when retrieving OVS dp * Increase tox-cover timeout to 4800 seconds * Enable ovsdb debug messages in functional and fullstack * DVR: Remove control plane arp updates for DVR * [OVN] Remove VLAN check when setting external\_mac * Implement tagging during bulk port creation * Fix OVN agent devstack script to support IPv6 * Add "ovn" as official tag used on Neutron's Launchpad * [OVN] Add an interval between agents health checks * Check "security\_group\_rule" quota during security group creation * Stop configuring install\_command in tox * [OVN] Provide local.conf sample file for DevStack * [OVN] scripts for networking-ovn code migration * Pass result dict to extensions by create port bulk * [OVN] Bump ovsdbapp requirements to 1.0.0 * Collect exception information in ARPSpoofTestCase only if initialized * Define new ipv6-only job for OVN * Use distutils.version to check dnsmasq supported version * Ensure we don't pass empty addresses to lsp\_set\_addresses * Ensure driver error preventing trunk port deletion is logged * Add description field to portforwarding NAT rules * [OVN] Migrate the OVN tools * [OVN] Double the tempest\_concurrency * Increase tox-py3{6,7} and lower-constraints timeout to 3600 seconds * Remove networking-midonet job from Neutron's check queue * [ovn] Agent liveness - allow time to propagate checks * Assign local variable before being referred * Update release-checklist doc page * Follow up to change of exception raised * Remove python 3.5 from L3 rootwrap filters * [OVN] Delete NAT entry first on any FIP update * Allow to kill keepalived state change monitor process * [OVN] Pass context while removing objects in OVN client * [OVN] Use STACK\_USER's actual primary group in starting services * [OVN] Re-enable test\_port\_security\_macspoofing\_port * "\_create\_security\_group\_precommit" additional arguments are kwargs * Remove "update\_device\_up" parameter from port notification * Remove TripleO job from Neutron's check queue * Add "qos\_network\_policy\_id" to port definition * Install OVN for functional tests * Replace support matrix ext with common library * Fix invalid assertFalse statements * [OVN] Enable neutron-ovn-tempest-ovs-release job * Set OpenFlow 1.0, 1.3 and 1.4 by default on bridges * Add support for direct ports with QoS in OVS * [OVN] Add missing pyOpenSSL requirement * Add MariaDB 10.3 repository for Ubuntu Bionic * [OVN] Change DevStack to use the local OVN driver * [OVN] Set 'unknown' address properly when port sec is disabled * Complete dropping py27 support goal * [OVN] Add missing OvnSbIdl and OvnNbIdl unit tests * [OVN] TrivialFix: use\_new\_ovn\_repository must check major version * Modify exception string to have more information * Fix bug number in release note * Remove references to unittest2 library * IPv6 accepts first address only for routers * Bump neutron-lib to 2.0.0 * Add accepted egress direct flow * Add Dongcan Ye as neutron-vpnaas lieutenant and bug contact person * Set binding profile directly from OVNTrunkDriver (redo cont.) * ovs agent: signal to plugin if tunnel refresh needed * [OVN] Add OVN metadata entry points * Remove neutron-rootwrap-xen-dom0 script * [OVN] Migrate the OVN mech driver * DhcpLocalProcess.\_enable should not call DhcpLocalProcess.restart * Switch to reader context * Use effective MAC address for macvtap assigned VFs * Add OVS QoS driver cache for minimum bandwidth rules * Remove the condition that the mtu is nullable * Move arp device check out of loop * Add missing parameter option in log message * Pass correct port data to extension manager * Subnet gateway should be a valid IP * Disable not needed services in grenade jobs * Imported Translations from Zanata * Improve ovs cleanup utility help * OVSInterfaceDriver.plug\_new should catch correct exceptions * Work around potential double row.delete() call * [L3] Switch order of processing added and removed router ports * Move tripleo standalone job to ostempest * Unnecessary routers should not be created * Update TOX\_ENV\_SRC\_MODULES example * Check "keepalived" is stopped in KeepalivedManagerTestCase * Remove the neutron-grenade job definition * Support L3 agent cleanup on shutdown * Check mtu on network update * Add Neutron Priorities to dashboards list * [OVN] Import OVN Client, L3 and QoS related code * Fix TestHashRingManager test cases * Allow to select subnets to publish DNS records * Fix pep8 errors found by hacking 2.0.0 * don't clear skb mark when ovs is hw-offload enabled * Remove unused arguments in test methods * Remove locks from privileged ip\_lib module * Bump neutron-lib to 1.30.0 * Bump min pyroute2 version to 0.5.7 * Add etc/neutron/\*.sample files to be ignored by git * Add retries to update trunk port * [OVN] Move trunk driver * Use "via" in gateway dictionary in Linux Bridge agent * Use constraints for docs tox target * Add a tox requirements target * Move zuul jobs definitions to zuul.d directory * Locate RP-tree parent by hypervisor name * Load the glibc library only once for Pyroute2 * [OVN] Move OVN metadata agent * [OVN] Import ovsdb related code (part 2) * Replace "ip monitor" command with Pyroute2 implementation * [OVN] Move OVN ACL commands to Neutron tree * doc: fixed word 'neutron-serer' spelling error * Avoid raising NetworkInterfaceNotFound exception in DHCP agent logs * [OVS] Handle added/removed ports in the same polling iteration * Migrate from ofctl to native QoS DSCP * Move DevStack OVN module to Neutron * [OVN] Move OVN common.acl to Neutron tree * [OVN] Import ovsdb related code * [OVN] Move OVN commons to neutron tree * [OVN] Add revision number maintenance methods * [OVN] Add revision number methods * Update comments messages in abandon\_old\_reviews script * Log exact error during getting value from file * [OVN] Add hash ring methods * Serialize subnet creating depending on the network ID * [OVS FW] Clean port rules if port not found in ovsdb * Add upgrade check for NIC Switch agent * Do not inherit from built-in "dict" * Remove grenade-py3 job from Neutron CI * Revert "Revise log when create port failed" * [OVN] Add DB tables for OVN backend * Add more condition to check sg member exist * [OVN] Disable ovn jobs temporary * Set ignore\_basepython\_conflict to true in tox.ini * Add upgrade check for networking-ovn db version * List SG rules which belongs to tenant's SG * [OVN] Sync requirements with OVN * Rehome networking-ovn CI jobs to neutron repository * use objects common\_types from neutron-lib * Dont schedule Network, respecting network\_auto\_schedule config * Force arp\_responder to True when DVR and tunneling enabled * Update networking-bgpvpn and networking-bagpipe liuetenants * OpenFlow monitor * NetcatTester.stop\_processes skip "No such process" exception * Explicit mention py2 for neutron-grenade job * Start README.rst with a better title * Remove neutron-grenade job from Neutron CI queues * Parameter 'fileds' value is not used in \_get\_subnets * Switch neutron-tempest-with-os-ken-master job to zuul v3 * HA race condition test for DHCP scheduling * Make network support read and write separation * [Fullstack] Fix waiting for packet with DSCP mark in test * Remove one of iptables\_hybrid jobs * Update liaison for networking-sfc project * doc: Adjust heading levels * [fullstack] find ip based on allocation\_pool * Trivial: fix firewall\_refresh\_needed return value type * Revise log when create port failed * SR-IOV: macvtap assigned vf check using sysfs * Remove old, legacy experimental CI jobs * Set DB retry for quota\_enforcement pecan\_wsgi hook * Do not initialize snat-ns twice * Enable bulk updates for the dnsmasq * Increase waiting time for network rescheduling * Catch FileNotFoundError when listing namespace PIDs * Move migration to proper dir and fix objects tests * TestMinBwQoSOvs must call the correct methods * Add "igmp\_snooping\_enable" config option for OVS agent * Remove deprecated agent\_type option * Fix Geneve max\_header\_size config option text * Add profiler decorator to Neutron * Update security group rule if port range is all ports * Set rootwrap daemon timeout for fullstack and functional tests * PSExtDriverTestCase: initialize variable in case of exception * Increase timeout when waiting for dnsmasq enablement * Remove info about "ovsdb\_interface" from config description * Adding check for IPv6 address in setup\_controllers * "round" with ndigits=None returns an integer * Add neutron-tempest-mariadb-full periodic job * Remove Floating IP DNS record upon associated port deletion * Add Source links to readme and update Documentation * Make the MTU attribute not nullable * Router synch shouldn't return unrelated routers * Clean-up ssl packages from bindep.txt * Stop testing python 2 * Check dnsmasq process is active when spawned * Do not use exceptions in get\_link\_id() to control the code flow * Reset timeout exception in DietTestCase when retrying * Switch to use cast method in dhcp\_ready\_on\_ports method * Update bug contacts * Use threads insted of greethreads in IP monitor * Temporary disable CI job neutron-functional-python27 * Fix mismatch of tags in dnsmasq options * Handle OVSFWPortNotFound and OVSFWTagNotFound in ovs firewall * Add more info when parsing config fails * Log OVS firewall conjunction creation * [nova][train] Docs: Remove deprecated RetryFilter * Update list of Neutron lieuetenants * Start using oslo\_utils.netutils.is\_ipv6\_enabled() * [doc][IPv6] make kernel config addr\_gen\_mode clear * Don't call super() in Timer class \_\_init\_\_ method * Convert Pyroute2 byte results to strings * Mock openstacksdk raise\_from\_response in ironic unit test * Fix misspell word * Add radvd\_user config option * Switch to openstack-python3-ussuri-jobs * Add policy config for get\_flavor\_service\_profile operation * Adding LOG statements to debug 1838449 * ovsdb monitor: handle modified ports * Use admin context when updating router gateway * doc: Update QoS minimum bandwidth limitations for Train * Switch neutron-tempest-postgres-full job definition to Zuul v3 * Code review policy - add note about approving mechanical changes * Imported Translations from Zanata * Fix designate driver not being fully region aware * Add irrelevant files to neutron-centos-7-tripleo-standalone * Remove QoS related objects unneeded compatibility conversions * Update master for stable/train 15.0.0.0rc1 ----------- * Randomize VNI in FdbInterfaceTestCase * Always make sure openvswitch kernel module is loaded * Randomize BaseFullStackTestCase.\_find\_available\_ips * Make port list by mac case insesitive * train release notes: Do not use prelude * Release note for API extension: extraroute-atomic * Configure keepalived interfaces according to config file * Prevent providing privsep-helper paths outside /etc * Log the IPTables rules if "debug\_iptables\_rules" * Revert "Temporary switch broken CI jobs to non-voting" * Handle ports assigned to routers without routerports * Remove get\_external\_network\_id for router * Change ip\_lib decorators order * Remove Neutron LBaaS * Avoid calling util.kill\_process to kill "sleep" * Avoid unnecessary operation of ovsdb and flows * Use openstacksdk for ironic notifiers * Fix race condition when getting cmdline * switch to the newly created opensuse-15 nodeset * fixed\_configured=True when Add/Remove port IPs * doc: Remove stale references to external\_network\_bridge 15.0.0.0b1 ---------- * Initialize nova\_notifier for only port data changes * Add l3 conntrack helper to devstack plugin * Fix unit tests for ironic notifier * L3 Conntrack Helper - Release Note * Implement Floating IP association logic only once * Only notify nova of port status changes if configured * Windows: Fix local adapter ipv6 check * Filter only IPv4 addresses when looking for gateway * Add correct UPPER\_CONSTRAINTS\_FILE to neutron-functional job * Re execute a test case if fixtures.TimeoutException is thrown * Add note about apparmor issue with fullstack tests * Document the incompatibility of trunk ports with iptables\_hybrid fw * Implement extension: extraroute-atomic * Update fwaas v2 scenario doc * bump neutron-lib to 1.29.1 * Log when FIP is associated/disassociated * Implement "list\_ns\_pids" inside Neutron * Update DHCP port information during setup * Check mech driver connectivity during port binding * Fix bulk port functioning with requested security groups * Temporary switch broken CI jobs to non-voting * Fix py3 compatibility * Create \_mech\_context before delete to avoid race * Remove unused global\_refresh\_firewall * Change ovs-agent iteration log level to INFO * Add "Port" to "RouterPort" foreign\_keys * Make Neutron gate great again * Add info log about ready DHCP config for ports * Remove references to FWaaS v1 * Update doc about L2 firewalling * Fix OVS build issue on Fedora * Change PDF file name * Fix some double spaces * Fix creation of vlan network with segmentation\_id set to 0 * Add binding\_index to NetworkDhcpAgentBinding * Agent - Conntrack Helper * Conntrack Helper - Plugin * Increase number of retries in \_process\_trunk\_subport\_bindings * Delay HA router transition from "backup" to "master" * Handle already created objects in "test\_update\_timpestamp" * Add VLAN type conntrack direct flow * OVS flows for custom ethertypes must be on EGRESS * PDF documentation build * Optimize DVR related port DB query * Force deletion of interfaces to create in "FdbInterfaceTestCase" * Avoid "utcnow" call clash in "test\_update\_timpestamp" * bump neutron-lib to 1.29.0 * Remove eventlet import from docs conf.py * doc: remove deprecated [neutron]/url from compute install guide * Fixing URLS * Adopt timers for new tenacity release * Kill all processes running in a namespace before deletion * DVR: Ignore DHCP port during DVR host query * Force "network\_id" in "subnet" DB registers * Remove L3 IP QoS cache when router is down * Initialize phys bridges before setup\_rpc * Open Ussuri DB branch * [Trivial]Remove unused helper \_remove\_fip\_rate\_limit\_cache * [Trivial]Remove unused helper get\_port\_external\_ids/mac * Fix objname error in QosPolicy * Remove dead link * Move factory assignment in \_ipam\_allocate\_ips() * Removing invalid double asterics entry from the doc file doc/source/admin/fwaas.rst * New extension: extraroute-atomic * Fix sort issue in test\_dhcp\_agent\_scheduler.test\_filter\_bindings * fix update neutron resource with incorrect body key server return 500 * remove try\_again from \_commit\_port\_binding * test\_l3: Be a bit more selective where to inject a failiure * Clear skb mark on encapsulating packets * fix update port bug * Remove default getattr values in RouterFipRateLimitMaps * Add 3 retry attempts to initialize ovs agent * use payloads for ROUTER\_GATEWAY events * Fix tools/configure\_for\_func\_testing.sh * fix test nit * Fix log directory permissions for functional job * [DOC] Add info about new CI job to jobs list * Fix default RPC worker count * Refactor the L3 agent batch notifier * [DVR] Add lock during creation of FIP agent gateway port * DVR: Cleanup ml2 dvr portbindings on migration * use callback payloads for PROVISIONING\_COMPLETE * Relax subnet pool network affinity constraints * Pass get\_networks() callback to interface driver * Fix init of RequestConstructor in test\_rpc.py * Initialize modifiable list of resources in CacheBackedPluginApi * Remove 'gateway\_external\_network\_id' config option * Update api-ref location * fix indentation of WarningsFixture use * Don't crash ovs agent during reconfigure of phys bridges * Run 'tempest-ipv6-only' job in gate * [FUP] Add functional test for IPDevice link attributes * Add interface kind property * shim standard\_attr db for neutron-lib * Add RPC method to get networks for L3 and DHCP agents * Start enforcing E125 flake8 directive * Switch test cases to sudo dependent * Trace receipt of update\_device\_\* rpc messages in neutron-server * Add process name to log in external\_process disable method * Rename tripleo-ci-centos-7-scenario007-standalone job * Fix missing arg in test\_\_cast\_message() * Do not use privsep context when listing the namespaces in fullstack * Check project\_id when creating and updating NetworkSegmentRange * Fix assertEqual argument ordering in test\_securitygroup.py * Functional testing script password update * Switch neutron tempest jobs to run only networking related tests * Remove tempest-full jobs from Neutron CI * Normalize protocol number 1 to 58 for IPv6 * Add a common timecost wrapper * Fix race in test\_keepalived\_state\_change\_notification * Add TripleO container based job to Neutron's check queue * Remove deprecated "split\_branches" config option * Retry creating iptables managers and adding metering rules * Improve "OVSFirewallDriver.process\_trusted\_ports" * Add fullstack tests for update network's segmentation\_id * Register SG opts when testing test\_firewall * ovs-agent: Trace remote methods only * Yield control to other greenthreads while processing trusted ports * Remove the l2pop agent\_boot\_time config * [DHCP] Implement an aging method for deleted\_ports * segments: fix rp inventory update * Improve "QoSPlugin.\_validate\_create\_port\_callback" * use callback payloads for ROUTER\_CONTROLLER events * consume WarningsFixture from neutron-lib * Propagate profiler info into BatchNotifier threads * Replace "integrated-gate" template with new "integrated-gate-networking" * use SqlFixture and StaticSqlFixture from neutron-lib * Rally task definition for port binding scenario * Fix list security groups performance with RBAC * Fix update of network's segmentation id for network with ports * Add base support for update\_port\_bulk * Add Python 3 Train unit tests * Fix bulk port binding * Add "connectivity" parameter in vif\_details * Use Pyroute2 "add\_tc\_qdisc" function in l3\_tc\_lib * Use Pyroute2 "list\_tc\_qdiscs" function in l3\_tc\_lib * Refactor qos\_plugin.\_extend\_port\_resource\_request * Switch to the new canonical constraints URL on master * Remove rootwrap filters for TC commands in Linux Bridge * Add ipam.utils.check\_gateway\_invalid\_in\_subnet unit tests * bump neutron-lib to 1.28.0 * Do not use privsep context when listing the namespaces in functional * Switch to new engine facade for TrunkPlugin and TrunkSkeleton * TC filter functions should pass "prio" parameter * Ignore first local port update notification * change from fixup\_uca to fixup\_ubuntu * Mark fullstack test\_ha\_router\_restart\_agents\_no\_packet\_lost as unstable * Adjust some HA router log * Minimizing L3 agent QoS extensions lock granularity * Enforce router admin state before distributed * Implement subnetpool prefix operations * Switch to use configure\_keystone\_authtoken\_middleware in devstack plugin * Add qos\_network\_policy\_id to Port OVO * Implement "ip route delete" command using Pyroute2 * Don't match input interface in POSTROUTING table * Blacklist sphinx 2.1.0 (autodoc bug) * Add custom ethertype processing * Limit max ports per rpc for dhcp\_ready\_on\_ports() * Switch to new engine facade for ExtraRoute\_dbonly\_mixin * BGP Documentation Updates * Implement "ip route" commands using Pyroute2 * Treat networks shared by RBAC in same way as shared with all tenants * Fix of\_\* config options help messages * use AuthenticIPNetwork and AuthenticEUI from neutron-lib * Add default value to agent create\_or\_update\_agent new param * Import "Manage Networking service quotas" admin guide * Disable "of\_inactivity\_probe" in fullstack tests * [Follow Up] Add Smart NIC representor port to integration bridge * Assert HA router has one active hosting agent * Switch to new engine facade for BaseResourceFilter * [OVS] Network segmentation ID change not allowed if multisegments * Remove some synchronization decorators from ip\_lib * Release notes for dns\_domain behavioural changes * Add more debug information TestNeutronServer * Update DPDK docs with note about using veth pairs * use object event\_types from neutron-lib * use subnet\_service\_types extension from neutron-lib * bump neutron-lib to 1.27.0 * Read IP monitor changes in a parallel thread * Remove mock of not existing method in L3 agent UT * Add agent timestamp to "\_log\_heartbeat" method * Increase TestDhcpAgentHA.agent\_down\_time to 30 seconds * [Doc] Drop some experimental warnings from admin guide * Update some docs to use openstack client * Allow VM booting rally scenarios to time out * Add fwaas\_v2 log optional configuration in l3\_agent.ini * Switch to new engine facade for DriverController * Retry trunk status updates failing with StaleDataError * Fix mis-use of \_dvr\_to\_src\_mac\_table\_id * Fix update of network's segmentation id * Fix :param: in docstring * use object standardattributes from neutron-lib * Switch to new engine facade for Schedulers * Add missing ws separator between words * segments: Fix resource provider inventories update * DVR: on new port only send router update on port's host * Minimizing lock for port forwarding * Add log information in agent schedulers * Force segments to use placement 1.1 * Add guidelines for removal of OVO version downgrades * Log message when legacy IPv6 ICMP protocol name used in SG rule * rbac: Catch correct exception for duplicated entry * Updating bgp driver to match what neutron-dynamic-routing supports * Turn CIDR in query filter into proper subnet * fix NetworkSegmentRange OVO entry point * Reset MAC on unbinding direct-physical port * Run nova's VM boot rally scenario in the neutron gate * Add a generic coordination lock mechanism * Bump openstackdocstheme to 1.30.0 * Add ip\_monitor command implemented using Pyroute2 * Add Timer context manager class * SRIOV agent: wait VFs initialization on embedded switch create * use test tools from neutron-lib * Switch to new engine facade for L3\_HA\_NAT\_db\_mixin * improve dvr port update under large scale deployment * [DVR] Block ARP to dvr router's port instead of subnet's gateway * Exclude broken ironicclient versions 2.7.1 * Add kill hooks for external processes * Fix mock of execute module in fdb\_population UT * Revert "Pass network's dns\_domain to dnsmasq conf" * Removing existing dirs from .gitignore * Use list instead of six.viewkeys to avoid py2 to py3 problems * update sriov config docs for live migration support * Switch to new engine facade for Route objects * Add debug information to AutoScheduler and BaseScheduler * Add short info about tempest API/scenario tests to reviewers guide * DVR: Modify DVR flows to allow ARP requests to hit ARP Responder table * Switch to new engine facade for StandardAttribute objects * Switch to new engine facade for Subnet object * Stop OVS agent before starting it again * Fix bug: AttributeError arises while sorting with standard attributes * Switch DB \_utils to new engine facade * Fix resource schemas and releated \`get\_sorts\` test cases * Add TC filtering for VXLAN traffic * Switch to new engine facade for QoS * Support multiple external networks in L3 agent * Notify ironic on port status changes * OVS DPDK port representors support * Canonicalize IPv6 ICMP protocol name in security groups * Switch to new engine facade for L3RpcCallback * Trivalfix: set a new segment ID for routed network * Remove "\_make\_security\_group\_rule\_filter\_dict" * Add devstack plugin for placement service plugin * Drop of\_interface option * [Functional tests] Test keepalived in namespaces * Modify comments on some methods * Add Smart NIC representor port to integration bridge * Set neutron-keepalived-state-change proctitle * use logapi constants from neutron-lib * Add "project\_name" variable to configure\_functional\_tests role * Wait to ipv6 accept\_ra be really changed by L3 agent * Log when the placement service plugin is used wrongly * Optimize the code that fixes the race condition of DHCP agent * Add documentation about OVO "obj\_make\_compatible" * Mark fullstack test\_ha\_router\_restart\_agents\_no\_packet\_lost as unstable * [Docs] Add summary of Tempest and Grenade jobs running in CI * Allow first address in an IPv6 subnet as valid unicast * Ignore config-samples and policy sample file in git * Change provider network segmentation ID in OVS agent * Show all SG rules belong to SG in group's details * Conntrack Helper - OVO and db script * Integrate rally with osprofiler * Use openstack-python3-train-jobs template * Correct fwaas\_v2 scenario in neutron * Fix pep8 E1111 warning in test\_securitygroup.py * Make OVS controller inactivity\_probe configurable * Fullstack test for placement sync * Add RHEL8 platform-python to the L3 rootwrap filters * Retrieve statistics from TC policy classes * Define orm relationships after db classes * Use six.viewkeys instead of dict.keys to avoid py2 to py3 problems * Define dhcp\_agents\_per\_network with min=1 * Remove rootwrap configuration from neutron-keepalived-state-change * Revert "Bump Pyroute2 version to 0.5.5" * ML2 plugin: extract and postpone limit in port query * Fix use of port context in tests * Update sphinx requirements * Use created subnet in port generator in "test\_port\_ip\_update\_revises" * Use --bind-dynamic with dnsmasq instead of --bind-interfaces * Change curl to osc for listing resource provider inventories * Async notify neutron-server for HA states * Add info about nf\_conntrack\_proto\_gre when ovs fw is used * Add skip\_if\_timeout to additional MySQL migration test * Fixes agent gw port creation * [DHCP] Don't resync network if same port is alredy in cache * Make sure the port still in port map when prepare\_port\_filter * Fix bandit warning * Limit monotonic to python < 3.3 * FdbInterfaceTestCase interface names should be randomly generated * Error in "tc\_lib.\_handle\_from\_hex\_to\_string" formatting * Bump Pyroute2 version to 0.5.5 * Blacklist bandit 1.6.0 due to directory exclusion bug * use payloads for RBAC\_POLICY events * Fix handling of network:shared field in policy module * Ensure dvr ha router gateway port binding host * use publish for SEGMENT\_HOST\_MAPPING callback events * [Trivial fix]Remove unnecessary slash * Removing unused "configure\_ml2" function leftover * Keep HA ports info for HA router during entire lifecycle * Fix creating policy rules from subattributes * Update guest IPv6 information in docs * Remove deprecated SR-IOV devstack file * Use segment range exceptions from neutron-lib * Switch neutron-tempest-dvr-ha-multinode-full job to Python 3 * Switch neutron-tempest-with-uwsgi job to python 3 * Populate binding levels when concurrent ops fail * "add\_tc\_policy\_class" must always assign a default value to "min\_kbps" * bump neutron-lib to 1.26.0 * Add namespace support for "bridge" commands * List ha router count should not include dvr local router * Trivalfix: set the right function to fip qos test * Install doc: append provider network creation link * Not process port forwarding if no snat functionality * Only TC class "htb" is supported * Add router\_factory to l3-agent and L3 extension API * Get ports query: extract limit and use it only at the end * Don't add arp responder for non tunnel network port * Add update\_id for ResourceUpdate * Do not initialize the iptables nat table in the dhcp-agent * Revert iptables TCP checksum-fill code * Parametrize ipset locking with network namespace * Fix some formatting issues in TESTING.rst * Move wsgi jobs from experimental to check queue * Rehome provider\_network\_attribute\_updates\_supported to API definition * [Doc] Add note for port forwarding floating IP QoS * Check in "\_update\_segmentation\_id" that the mech\_driver has an agent * use trunk constants from neutron-lib * use publish for SUBNETPOOL\_ADDRESS\_SCOPE callback events * Move subnet postcommit out of transaction for bulk create * Replace git.openstack.org URLs with opendev.org URLs * Reorder classless static route (RFC3442) records * OpenDev Migration Patch * Don't count ports with inactive bindings as serviceable dvr ports * Add log file for neutron-keepalived-state-change * delete common\_db\_mixin * use rpc agent topics from neutron-lib * Dropping the py35 testing * Packets getting lost during SNAT with too many connections * Use '-p ip' instead of '-p 0' with conntrack * Avoid fullstack QoS tests to handle other test ports * Choose random value for HA routes' vr\_id * Mock check if ipv6 is enabled in L3 agent unit tests * Handle DBConnectionError in skip\_if\_timeout decorator * DVR: Correctly change MTU in fip namespace * Revert "Switch all grenade jobs to be non voting and non gating temporary" * use api def ALIAS in supported\_extension\_aliases * Fix a KeyError * Decouple local VLAN internal management and OVS enforcement * Open Train DB branch * Give some HA router case specific resources * Check master/backup router status in DVR functional tests * Fix error 500 during segment range creation * remove neutron.common.constants * move neutron only common constants to private module * Join on explcit relationship paths * Don't pass None arg to neutron-keepalived-state-change * Ensure that bulk port dicts have unix-style MAC addresses * Format generated MAC addresses using eui48.mac\_unix\_expanded * Make networking-ovn-tempest-dsvm-ovs-release job voting * Add a midonet non-voting job * Mock OVSBrdge.clear\_minimum\_bandwidth\_qos in fullstack tests * Mark neutron-tempest-iptables\_hybrid-fedora voting * Set HA failover bridges veth pair devices UP * Remove oslosphinx from lower-constraints * Revert "Disable fullstack job from gate and make it non-voting" * Allow provider net attr in update if no change * Switch all grenade jobs to be non voting and non gating temporary * Do not call update\_device\_list in large sets * postgresql compatibility for get\_l3\_agent routines * Change provider network segmentation ID * Divide-and-conquer security group beasts * Convert int to bytes for py3 * use callback payloads for AGENT OVS\_RESTARTED events * consume get\_random\_cidr from neutron-lib * Remove conntrack rule when FIP is deleted * Add extra unit test for get\_cmdline\_from\_pid function * Do not use subnet broadcast address in unit test * Check the namespace is ready in test\_mtu\_update tests * Remove outdated instructions about horizon integration * use EXT\_PARENT\_RESOURCE\_MAPPING from neutron-lib * Remove local port regenerate mac address apidef * Divide-and-conquer local bridge flows beasts * Avoid iterating over all of the segment data just for counting * Check if process' cmdline is "space separarated" * Specify physical network in QoS fullstack tests * use neutron-lib trunk resource names * Adds revision description for network\_segment\_range * consume sqlalchemytypes from neutron-lib * FUP: Unit tests for https://review.openstack.org/636970 * Replace openstack.org git:// URLs with https:// * Change default local ovs connection timeout * Update master for stable/stein 14.0.0 ------ * Reject min-bw rule operations on non-physnet networks/ports * Try to enable dnsmasq process several times * Disable fullstack job from gate and make it non-voting * Add check for external\_network\_bridge in neutron-status * Increase timeouts for OVSDB in functional tests * Dynamically increase DHCP process queue green pool size * Migrate legacy jobs to Ubuntu Bionic * Add test case to duplicated service plugin * Random IP for concurrent create pf and update\_port * More accurate agent restart state transfer * Define qos-rules-alias extension * Switch neutron-functional-with-wsgi job to zuul v3 syntax * Fix neutron-status upgrade check CLI tool * Utilize bulk port creation ops in ml2 plugin * FUP Networking guide: Guaranteed Minimum Bandwidth * Remove "msg" from tasks in ansible roles * Networking guide: Guaranteed Minimum Bandwidth * Remove local segment-peer-subnet host-route apidef * Explicitly set neutron service ports for fullstack * Migrate fullstack job definition to zuulv3 syntax * Better handle ports in security groups * Enable ipv6\_forwarding in HA router's namespace * Fix pylint R1717 (consider-using-dict-comprehension) refactor messages * Use dynamic lazy mode for fetching security group rules * Add documentation for subnet onboard * Fix misuse of assertTrue/assertFalse * Specify tenant\_id in TestRevisionPlugin objects * Add rootwrap filters to kill state change monitor * Migrate neutron-functional job to zuul v3 syntax * [Fullstack] Don't compile ovs kernel module on Ubuntu Bionic * Migrate neutron-tempest-dvr-ha-multinode-full job to zuulv3 * Fix pep8 E128 warnings in non-test code * Use allocated trunk\_id in TestOVSDBHandler * Fix fwaas\_v2 driver string in docs * Fix pylint E1128 (assignment-from-no-return) in l3-agent * Set initial ha router state in neutron-keepalived-state-change * Do not rise exception if OVS Queue is not present when being deleted * Fail placement sync if \_get\_rp\_by\_name() fails * [doc] Add network segment ranges into admin guide * consume is\_bsd from neutron-lib * Agent configuration bandwidth report change * consume UnorderedList from neutron-lib * Fix handling no qos\_queues while removing min bw limit rule * Fix QoS rule update 14.0.0.0b3 ---------- * Fix KeyError in OVS firewall * Join on explcit relationship paths * Fix a couple of bw burst calc unit tests for macs * Devstack plugin for network-segment-range api * Allow sharing of security groups via RBAC mechanism * Mark dvr\_ha\_router\_failover tests as unstable * Remove deprecated 'external\_network\_bridge' option * Fix fullstack test\_dscp\_marking\_packets test * When converting sg rules to iptables, do not emit dport if not supported * Use network segment ranges for segment allocation * Drive binding by placement allocation * Add TC filter functions implemented with pyroute2 * Add a more robust method to check OVSDB values in BaseOVSTestCase * Mock router info functions for a functional case * Catch OVSFWTagNotFound in update\_port\_filter * DVR-HA: Unbinding a HA router from agent does not clear HA interface * [Fullstack] Use newer openvswitch * Support Network Segment Range CRUD as extensions * Remove quota\_db backward compat file * Store journal log in functional tests results * remove neutron.db.api references * Make "phys\_brs" argument in OVSAgentExtensionAPI optional * Spawn metadata proxy on dvr ha standby routers * Revert "Restart all L3 agents in test that check no packet loss in HA routers" * Delete port binding level for deleted bindings * Add method to tox to use local neutron-lib with unit tests * [Functional tests] Change way how conntrack entries are checked * Change process name of neutron-server to match worker role * Don't disable radvd if radvd if it wasn't initialized * Add oslo.privsep to config-generator list * [Functional] Don't assert that HA router don't have IPs configured * Placement reporting service plugin * Restart all L3 agents in test that check no packet loss in HA routers * Bump neutron-lib requirement to 1.25.0 * Do not print router resize messages when not resizing * add python 3.7 unit test job * Cleanup DVR SNAT port list creation code * Fix regression with SG read API with empty ruleset * Enable adoption of subnets into a subnet pool * Update appdirs to 1.4.3 * Add QoS minimum egress bandwidth rule into ovs-agent * DVR edge router: avoid accidental centralized floating IP remove * Use constant from neutron-lib * Bump pbr version to 4.0.0 14.0.0.0b2 ---------- * Switch neutron-tempest-dvr-ha-multinode-full job to python3 * Fix slow SG api calls when limiting fields * Fix pylint R1716 (chained-comparison) refactor messages * Fix pylint R1714 (consider-using-in) refactor messages * Reject QoS minimum bandwidth rule updates for bound ports as NotImplemented * Add "rpc\_response\_max\_timeout" config variable in OVS agent * Bump pylint version to one that supports python3.7 * Class to represent Placement state and sync * New agent attribute: resources\_synced * Add job from openstacksdk to avoid regressing * Avoid loading same service plugin more than once * Add support for binding activate and deactivate * Modify api and rpc default number of workers * Use subnet without dhcp in portforwarding functional tests * Switch multinode grenade jobs to python3 * Use pyroute2 to check vlan/vxlan in use * Call \_safe\_router\_removed during pool resize testing * Run revision bump operations en masse * Consume constant from neutron-lib: DHCPV6\_CLIENT\_PORT * Retrieve devices with link not present * Bump pyroute2 version to 0.5.3 * use api def ALIAS in \_supported\_extension\_aliases * Add delete\_tc\_policy\_class using pyroute2 * Add add\_tc\_policy\_class and list\_tc\_policy\_classes using pyroute2 * Implement delete\_tc\_qdisc using pyroute2 * Add dependency for service plugin * [OVS] Exception message when retrieving bridge-id and is not present * Add common neutron config options to status CLI tool * Remove conntrackd comment * Fix \_find\_related\_obj() RuntimeError output * Fix notification about arp entries for dvr routers * Add new test decorator skip\_if\_timeout * Not set the HA port down at regular l3-agent restart * Dynamically increase l3 router process queue green pool size * Remove trunk rally scenario from plugins * bump neutron-lib to 1.24.0 * rally-jobs: Add FloatingIP workloads: assoc/dissoc * Change common agent code to log message after failure * Enhance rally-job for sec group rules to creates multiple rules * Change SR-IOV agent to log message after failure * Improve invalid port ranges error message * doc: Use DocumentedRuleDefault * Change OVS agent to log message after failure * Remove redundant event listening logic from neutron/db/api * remove string use of neutron.common.rpc * Clear old rules that have been applied before applying new rules * Remove redundant if condition check * Fix the way how upgrade checks are loaded * Fix update of ports cache in router\_info class * Implement add\_tc\_qdisc and list\_tc\_qdiscs using pyroute2 * remove neutron.common.rpc * Update the processing of assigned addresses when assigning addresses * DHCP: fix default dns search name * Change agents to use get\_devices\_with\_ip() * Change Metering agent to log message after failure * doc: replace nova security\_group\_api option with use\_neutron * OVS agent: always send start flag during initial sync * Change log level for l3 agent router updates * Don't specify exactly IP in portforwarding functional test * Add some create subnet negative tests * remove neutron.common.exceptions * Add Security Group scenarios to rally-jobs * Fix port forwarding functional tests logging * Ensure dnsmasq is down before enabling it in restart method * Retrieve the device info in trunk\_plumber using pyroute2 * use payloads for SECURITY\_GROUP BEFORE\_CREATE events * [Configuration] Clean up .gitignore references to personal tools * Bump neutron-lib to 1.23.0 * use payloads for SECURITY\_GROUP BEFORE\_DELETE events * Do not release DHCP lease when no client ID is set on port * use payloads for all SUBNETPOOL\_ADDRESS\_SCOPE events * use payloads for all PORT\_DEVICE events * Add VLAN and VXLAN link information in get\_devices\_info * use payloads for PORT BEFORE\_DELETE callbacks * remove unused methods in common\_db\_mixin * Switch tempest-slow to be run on python 3 * Switch tempest-multinode-full to be run on python 3 * Restore tenant\_id check on security group rule adds to previous semantic * Switch isolated metadata proxy to bind to 169.254.169.254 * Add port forwarding floating IP QoS * Switch default functional tests to python3, add job for python2 * Fix port update deferred IP allocation with host\_id + new MAC * Update neutron files for new over-indentation hacking rule (E117) * Improve port dhcp Provisioning * Remove extra spaces in admin config files * Add IPWrapper.get\_devices\_info using PyRoute2 * remove the neutron.db.\_model\_query module * shim remaining common exceptions * Delete virtual interface "ovs-system" before load openvswitch module * Use renamed template 'integrated-gate-py3' * Add test cases: invalidate CIDR * Change DHCP agent to log message after failure * Change L3 agent to log message after failure * Remove IPv6 addresses in dnsmasq leases file * Change duplicate OVS bridge datapath-ids * Not set fip to ERROR if it exists on device * Change DVR to use ip\_lib.add\_ip\_address() directly * bump neutron-lib to 1.22.0 * Set lower addr to avoid IP out of range * Correct arguments to nfct\_open * Always fill UDP checksums in DHCPv6 replies * Fix performance regression adding rules to security groups * Add lock\_path in installation guide * Support iproute2 4.15 in l3\_tc\_lib * Revert "Skip ovsdb\_monitor events for ports with ofport=-1" * Update team and bug ownership info * Add a new configuration parameter rpc\_response\_max\_timeout * Use status\_code instead of status in requests * Fix indent level of netlink\_lib.py * Clear residual qos rules after l2-agent restarts * Skip ovsdb\_monitor events for ports with ofport=-1 * protect DHCP agent cache out of sync * Fix l3-agent usage of L3AgentExtension class * Gracefully handle fetch network fail in qos extend port * Remove hooks for dsvm-scenario-linuxbridge * Add neutron-tempest-iptables\_hybrid job on Fedora * Migrate neutron-tempest-linuxbridge job to zuulv3 and python 3 * Lock privileged.agent.linux.ip\_lib functions * Guideline on defining in-code policies * Cleanup pecan required version * Refactor some l3\_dvr\_db code * Define types for C calls in netlink\_lib * Rename ironic job * use payloads for SEGMENT AFTER\_DELETE events * Fix a minor typo in SR-IOV with ConnectX-3 section * Rename README.policy.json.txt * Define missing policies for attributes with enforce\_policy * Revert "Remove -u root as mysql is executed with root user" * Update SR-IOV configuration admin docs * Migrate neutron-tempest-dvr job to zuulv3 and python 3 * port-resource-request policy rule to constant * neutron-tempest-iptables\_hybrid job inherits from tempest-full-py3 * Prevent bind fip to port has port forwarding * Make test service\_plugins override simple * remove the neutron.db.\_resource\_extend module * Use ADMIN\_ONLY constant in policy rules * consume get\_updatable\_fields from neutron-lib * cleanup SQLAlchemy required version * Migrate neutron-tempest-iptables\_hybrid job to zuulv3 and python 3 * Don't modify global variables in unit tests * Use oslo\_log instead of alembic.util in functional tests * fullstack: retry on updating port's IP address 14.0.0.0b1 ---------- * Convert Port to OVO in db/ipam\_pluggable\_backend.py * Convert Subnet to OVO in ipam\_pluggable\_backend.py * Revert "Log alembic migration script's output with oslo\_log" * doc: Add policy reference * Remove polling from test\_create\_bridges * Use publish for AGENT AFTER\_CREATE/AFTER\_UPDATE * Define popular policy rules by constants (part 2) * Add ipxe6 dnsmasq tag to enable ipv6 dhcp client matching * Define popular policy rules by constants * remove context\_manager from neutron * doc: Add NICs known to support ovs offload * Log alembic migration script's output with oslo\_log * Upgrade check command - add support for 3rd party checks * Fill port-resource-request * tox: Hide deprecation warnings from distgit * Disable displaying warnings in functional tests * use payloads for ROUTER\_INTERFACE BEFORE\_DELETE events * rally-jobs: Set floating network as a parameter * Implement IpAddrCommand.get\_devices\_with\_ip using pyroute2 * Add logging for functional tests * Fix mysql functional migration tests * Revert "Make neutron-tempest-iptable\_hybrid job non-voting" * Remove unused get\_ rules * Drop 3rd-party plugin specific policies * Implement IpRuleCommand.delete() using pyroute2 * Add debug information of the dhclient process * Convert policy.json into policy-in-code * shim \_resource\_extend for neutron-lib * Pass elevated admin context when fetching snat router for FIP to unbound port * Prevent some L3 ports change IP address * Remove unnecessary 'set -x' at EOL in genconfig script * Add irrelevant-files for grenade-py3 jobs * Mark mysql related functional tests as unstable * Replace ryu with os\_ken * get\_standard\_device\_mappings for mechdriver * [DVR] Allow multiple subnets per external network * Table name in "add\_ip\_rule" can be a string * Do state report after setting start\_flag on OVS restart * Do not delete trunk bridges if service port attached * Update mailinglist from dev to discuss * Make neutron-tempest-iptable\_hybrid job non-voting * Support fetching specific db column in OVO * Switch neutron-rally-task job to python 3 * tox: Reuse envdirs * Check port VNIC type when associating a floating IP * Fix dnsmasq option6 tagging logic * Enable delete bound trunk for linux bridge agent * Remove sqlalchemy osprofiler * Add debug information in BridgeCreateEvent * bump neutron-lib to 1.21.0 * Implement IpRuleCommand.add() using pyroute2 * Add tempest-slow to gate queue * Fix the bug about DHCP port whose network has multiple subnets * Check if agent can reach neutron server * [Doc] L3 router gateway IP rate limit * Add native OVSDB implementation for bridge monitor * Reinitialize ovs firewall after ovs-vswitchd restart * Change "cmd" folder in fullstack tests * [L3][QoS] Agent side router gateway IP rate limit * [L3][QoS] Neutron server side router gateway IP QoS * Send global\_request\_id for tracing calls from neutron to nova * Add propagate\_uplink\_status to port * rally-jobs: Add FloatingIP workloads * Add bulk port creation of DB objects * Force all fdb entries update after ovs-vswitchd restart * Secure dnsmasq process against external abuse * Sync up model\_query module to neutron-lib * Only store segmenthostmapping when enable segment plugin * [Fullstack] Configure policy.json file only if it is set * Convert instance method to staticmethod in linuxbridge\_neutron\_agent * Implement IpRuleCommand.list\_rules() using pyroute2 * Store journal log from host in fullstack's job results * Get centralized FIP only on router's snat host * Cleanup old jobs from experimental queue * Trivial: Fix ovs cleanup script on Windows * All functional tests logs results now * DVR Floating IP create don't raise exception if agent not found * Add get\_availability\_zone rule to policy.json * tox: Hide deprecation warnings * remove unused methods from CommonDbMixin * segments use neutron\_lib placement client * filter "updated\_at" and "revision\_number" in \_gateway\_ports\_equal * Add bulk IP address assignment to ipam driver * Duplicate words were deleted in openvswitch\_agent.rst * Add missing ws seperator between words * Add test cases for port number 0 for port\_forwarding * use payloads for SEGMENT BEFORE\_DELETE callbacks * Add kill\_timeout to AsyncProcess * Enable 'all' IPv6 forwarding knob correctly * Change to use iproute2 instead of brctl * Add networking-ovn job to Neutron's check pipeline * Implement ip\_lib get\_devices using pyroute2 * raise priority of dead vlan drop * DVR: Centralized FloatingIPs are not cleared after migration * Make neutron-fullstack job run py3 * Notify router\_update after directly gateway IP change * Removed duplicated word "and" * Import neutron.db.api module in common.utils * Add port\_forwarding to devstack plugin * Prevent create port forwarding to port which has binding fip * Refactor \_cache\_related\_dvr\_routers\_info\_before\_interface\_removal() * Add python 3.6 unit test job * Incorporate capability for multiple mac generation * Use RBACBaseObject OVO in neutron/db/rbac\_db\_mixin.py * ml2/rpc: provide a small helper to avoid redundancy * Switch to new engine facade in neutron/db/agents\_db.py * DevStack: OVS: Only install kernel-\* packages when needed * Bump neutron-lib to version 1.20.0 * Add missing step for ovs deploy guides * Add protocol to port\_forwarding uniq constraint * Wait to ipv6 forwarding be really changed by L3 agent * use MAX\_RETRIES from neutron-lib * Update lower-constraints to meet neutron-lib * Use NetworkRBAC OVO in tests/unit/db/test\_db\_base\_plugin\_v2.py * Set router\_id if floating IP has port\_forwardings * supported\_vnic\_type configurable for sriov * Increase timout of gate jobs * Add test cases to verify port number 0 for port\_forwaring * Scan for MAC through all devices in macvtap agent * Fix connection between 2 dvr routers * Update docs for disabling DNS server announcement * iptables-restore wait period cannot be zero * Block port update from unbound DHCP agent * Increase tempest-full jobs timeout * Verify port\_forwarding subnet and IP address both * Fix incorrectly passing ext\_ips as gateway ips * Fix incorrect usage of '# flake8: noqa' * Add capabilities for privsep * sriov-agent: fullstack test resource info report * Event driven periodic resync task for DHCP agents * ovs/sriov mech drivers: resource\_provider\_uuid5\_namespace * notification: Add 'status' to agent after\_create/update * Add neutron-status upgrade check command framework * remove unused db api functions * OVSNeutronAgent.rpc\_loop is always called with "polling\_manager" * use context manager from neutron-lib * Revert "Add openstack/placement as a required project for neutron-grenade\*" * Increase tempest jobs timeout * Use NetworkRBAC OVO in neutron/db/external\_net\_db.py * Use NetworkRBAC OVO in neutron/db/db\_base\_plugin\_v2.py * [Functional] Increase test\_timeout for db migration tests * Make port binding attempt after agent is revived * use payloads for SUBNET BEFORE\_DELETE events * neutron.conf needs lock\_path set for router to operate This change is adding required configuration in neutron.conf to set the lock\_path parameter, which was missing in compute-install-ubuntu.rst * Fix flake8 N534 untranslated exception message * Fix dvr ha router gateway goes wrong host * Add Network RBAC functional tests for External\_net\_db\_mixin * Add a request validator for SNAT logging * Introduce Port resource request extension * Enable ingress direction for min\_bw rule * Fix enable\_distributed\_routing option section in docs * Introduce L3 agent extension for SNAT logging * use payloads for NETWORK BEFORE\_DELETE events * supported\_vnic\_type configurable for ovs * Use l3plugin property in neutron/db/l3\_dvr\_db.py module * Allow update request with current segment\_id * [Functional] Increase test\_timeout for db migration tests * use retry\_if\_session\_inactive from neutron-lib * Increment versioning with pbr instruction * DVR-HA: Configure extra routes on router namespace in dvr\_snat node * Introduce functional testing for Network RBAC policies * Use tempest-pg-full * use ovo for L3HARouterAgentPortBinding in l3\_hamode * Remove deprecated vsctl ovsdb\_interface api * Delete tempest config module * Add permanent ARP entries for DVR fip/qrouter veth pair * ovs-agent: fullstack test resource info report * sriov-agent: Report resource info in heartbeat * ovs-agent: Report resource info in heartbeat * Allow Ipv6 addresses for nova\_metadata\_host * Modify the QoS doc in Neutron * Replace openSUSE experimental check with newer version * Make grenade-multinode-dvr job voting again * Add both directions for minimum bw rule in doc * Exclude fallback tunnel devices from netns cleanup * netns\_cleanup: Remove unused parameter from unplug\_devices() * Ingress direction for min bandwidth rule * Fix ADMIN output examples * Fix flake8 H404 errors * Install centralized floating IP nat rules to all ha nodes * Trigger router update only when gateway port IP changed * fix tox python3 overrides * Upgrade pylint to a version that works with python3 * Introduce floating IP pool resource * remove trailing comma for linter * Update tests neutron.conf to use transport\_url * Add argument to get ports by SG to allow excluding owners * Optimize OVS DVR CSNAT port code a little * Fix corner case in failure assigning MAC to SR-IOV NIC * Fix best\_match() deprecation warning * Update code review policies for sub-projects * bump neutron-lib version to 1.19.0 * Add openstack/placement as a required project for neutron-grenade\* * Create temp file in setUp() to avoid warning * Always set ovs bridge name in vif:binding-details * use payloads for AGENT BEFORE\_DELETE callbacks * Disable some pylint checks * Remove a bare raise noticed by the linter * tell pylint to ignore python2 version of gettext * Remove population of is\_filter keyword * Do not fail deleting namespace if it does not exist * Make binding statement singular * Fix doc output examples * Do not install centralized FIP if HA port is down * Refactor l3hamode for ha migration * Fix pep8 F402 import shadowing * Add PortForwarding to neutron.objects entrypoint * Use extension 'empty-string-filtering' from neutron-lib * Bump pecan version to 1.3.2 * Remove async\_process, moved to "common" * use is\_retriable from neutron-lib * Make neutron-fullstack-python36 job voting * doc: OpenVSwitch firewall internal design, improvements (+ newline fix) * Use templates for cover and lower-constraints * Fix broken link to Open vSwitch installation instructions * Use mktemp instead of tempfile in gate\_hook.sh * Use Tempest slow job to run all slow tests * use TenantIdProjectIdFilterConflict from neutron-lib * Fix IPv6 prefix delegation issue on agent restart * remove \_setUpExtension * Integration of Port Binding Level OVO * Don't uninstall flow tables in setup\_dvr\_flows\_on\_integ\_br * Use tempest-multinode-full job from Tempest * Update FIP QoS Docs * [Docs] Update note about default burst value in QoS bw limit * Revert "Use nested virt in scenario jobs" * Fix pylint warning with eventlet > 0.22.0 * Follow-up: add fullstack test for logging * doc: Small clearances for fullstack testing * Delete ovs port if namespace is corrupted * Add fullstack test for logging * ovs fw: apply the NORMAL action on egress traffic in a single table * doc: Address nits from I45fdc8e40121698bf144d0e0a65537edda2c805d * Fix devstack/lib/ovs to run with Fedora 28 * doc: Additional fixes to the SR-IOV doc * l2 pop: check for more than 1 first active port on a node * Consolidate irrelevant files added for py3 project * add python 3.6 unit test job * use security\_groups\_port\_filtering from neutron-lib * Update abandon\_old\_reviews script to check Zuul votes * switch documentation job to new PTI * import zuul job settings from project-config * Remove deprecated RamFilter from admin guide * ovsdb monitor: do not die on ovsdb-client stderr output * Use Tag OVO in neutron/services/tag/tag\_plugin.py * [Fullstack] Mark securitygroups tests as unstable * fix spell error * Fix incorrect log resources querying * Skip L3 ha functional IPv6 test if IPv6 is disabled * Disallow networks with first ip 0.0.0.0 with dhcp enabled * Use constant IP\_VERSION\_4/6 in functional tests * Use constant IP\_VERSION\_4/6 in unit tests * Modify abandon\_old\_reviews script * DVR: Add IPv6 csnat port address correctly * Fix the RFC and private internet network address * Parse dhcp leases file in a more robust way * Remove Future work from OVS-fw docs * Revert "DVR: Inter Tenant Traffic between networks not possible with shared net" * Revert "DVR: Add error handling for get\_network\_info\_for\_id rpc call" * shim neutron rpc with neutron-lib * Fetch specific columns rather than full ORM entities * metadata: use requests for comms with nova api * doc: add known limitation about attaching SR-IOV ports * Use nested virt in scenario jobs * [Fullstack] Add debug\_iptables=True in security groups tests * Enable nonvoting python 3.6 unit test job template * Update list of skipped bandit plugins * Doc: update the qos devref about SR-IOV support * Reduce qos rpc times on the ovs-agent side * Docs: Update API & Scenario Tests * add pycodestyle to test-requirements * Fix lost connection when create security group log * shim model query hooks to use neutron-lib * Revert "Update network revision only when it needs" * Consolidate irrelevant-files in zuul config * Windows OVS: minimize polling * Trivial: Move platform independent modules to common dir * Pass context in l3 flavor notifications * Update network revision only when it needs * Update metering driver to load interface driver * Update vpnaas-scenario docs * Create veth pair devices using pyroute2 * Remove fdb entries for ha router interfaces when going DOWN * get\_subnet\_for\_dvr returns SNAT mac instead of distributed gateway in subnet\_info * Update reno for stable/rocky * Fix iptables metering driver entrypoint 13.0.0.0rc1 ----------- * Config privsep in the metering agent * Remove -u root as mysql is executed with root user * Use system protocol assigments for iptables protocol map * Fix import style * Correctly call is\_distributed\_router() * Add FIP port forwarding to Networking Guide * Open Stein DB branch * Enable filter validation for portforwarding plugin * Add release note for port forwardings * Remove \_migrate\_python\_ns\_metadata\_proxy\_if\_needed method * Add delete rule for auto\_allocated\_topology * Add note to RBAC chapter of Networking Guide * Move MAX\_CHAIN\_LEN constants to constants file * Don't run ironic-tempest job for irrelevant changes * Fix no packet log data when debug is set False in configuration * ipam\_backend\_mixin: Segments model to OVO * Fix no ACCEPT event can get for security group logging * Adding resources callback handler * Generic validate\_request method for logging * [Docs] Add note about Floating IP QoS * Add a guide to deploy Neutron API under WSGI server * Add ext\_parent policy check * [server side] Fix race issue for port forwarding plugin * use get\_port\_binding\_by\_status\_and\_host from lib * WSGI: Add experimental jobs with uwsgi * [log] Generic RPC stuffs for logging in server side * [agent side] L3 agent side Floating IP port forwarding * [server side] Expose port forwardings in FIP API * Bug deputy routines for dummies * Move neutron-fullstack-python35 out of experimental queue * Use OVO for tests/unit/plugins/ml2/drivers/l2pop/test\_db.py * Make dvr router port creation retriable * use autonested\_transaction from neutron-lib * [doc] Follow-up logging service for firewall\_group 13.0.0.0b3 ---------- * Trivial: avoid KeyError while processing ports * [server side] Floating IP port forwarding plugin * use sqla functions from neutron-lib * Update the doc url about SUSE basic network configuration * Allow neutron-api load config from WSGI process * Dont use dict.get() to know certain key is in dict * use callback payloads for ROUTER/ROUTER\_GATEWAY BEFORE\_DELETE events * [Fullstack] Use string for global\_physnet\_mtu config option * Rename router processing queue code to be more generic * [QoS] Clear rate limits when default null values are used * Fix ml2 hierarchical port binding driver check error * Add binding activation to the Linuxbridge agent * bump neutron-lib requirements to 1.18.0 * Implement filter validation * Add binding activation to OVS agent * Reduce IP address collision during port creating * Fix neutron-openvswitch-agent Windows support * Add missing policy actions to policy.json file * Add osprofiler to api-paste pipeline * Docs: Fix wrong stestr argument * Change neutron CLI to openstack CLI in neutron doc * Update \_check\_router\_retain\_needed() * Add list of all working DSCP marks * use retry\_db\_errors from neutron-lib * Move l3\_notifier check in remove\_router\_from\_l3\_agent() * [server side] Floating IP port forwarding OVO and db script * Add multiple IPv6 EUI-64 addresses test * Fix fwaas v1 configuration doc * Add binding de-activation to OVS agent * Support binding\_deactivate in CacheBackedPluginApi * Add binding\_deactivate method to Linux bridge agent * Update get\_port\_binding\_host for multiple bindings * Pluralize binding relationship in Port * Multiple port binding for ML2 * use plugin utils from neutron-lib * Update pypi url to new url * Send update instead of remove for DVR reschedule * Fix attribute mismatch in router provider update * Invalid link of doc reference * Fix UnixDomainHttpProtocol class to support all eventlet versions * Add release notes link in README * Add fullstack test to restart agent with active l3-ha router * use get reader/writer session from neutron-lib * Adopt Port OVO for tests/unit/db/test\_provisioning\_blocks.py * Use OVO in plugins/ml2/test\_plugin.py * Ensure request's object type is dict * Adopt hacking 1.1.0 * Allow sub-resources to have standard attributes * Modify logic of l3-agent to be notified * Disallow router interface out of subnet IP range * Bump neutron-lib version to 1.17.0 * use db utils from lib * ipam\_backend\_mixin: Service Type model to OVO * Adjust filters on listing availability zones * [Fullstack] Remove central\_external\_bridge * Add geneve in neutron tunnel protocols doc * python3: fix netlink\_lib delete\_entries * [Unit tests] Skip TestWSGIServer with IPv6 if no IPv6 enabled * [Fullstack] HA L3 agent restart only standby agents * Routed Networks - peer-subnet/segment host-routes (2/2) * Update the Bugs link for triage * Remove an extra backslash in a docstring * Add support to regenerate port mac\_address * Routed Networks - peer-subnet/segment host-routes (1/2) * Pass network's dns\_domain to dnsmasq conf * DVR: FIP gateway port is tagged as DEAD port by OVS with external-bridge * objects: don't refetch a non-list object field if it's None * Introduce l3\_logging extension base * [Fullstack] Use string for api\_worker config option * Fix bug to doc:auth * Add missing transaction in driver\_controller * Skip MTU check during deletion of Networks * Dropping radvd process privileges * Fix local doc builds * [Fullstack] Ensure connectivity to ext gw before agents restart * Fix docstrings to work with Sphinx 1.7.4 * Mark unit test test\_get\_objects\_queries\_constant as unstable * Make L3 IP tc filter rate limit more accurate * Refactor duplicated implementation of \_get\_policy\_obj * DVR: FloatingIP create throws an error if no l3 agent * Fix the tests for filtering with qos\_policy\_id * DVR: Self recover from the loss of 'fg' ports in FIP Namespace * Update doc on stable release frequency * Remove the unit test 'test\_ports\_vnic\_type\_list' * Improve patching of sys.argv * Mark test\_ha\_router\_restart\_agents\_no\_packet\_lost as unstable * fix tox python3 overrides * Fix exception in neutron-sanity-check * Allow DHCP\_OPT\_CLIENT\_ID\_NUM to be string * Fix DVR scheduling checks wrong profile for host * Convert missing exception on device.link.delete() * remove model\_query\_scope from common db mixin * Configure privsep helper in neutron-netns-cleanup * Update links in README * Add BEFORE notification event for subnet in ml2 plugin * Fix url in alembic\_migrations.rst * Use OVO in ml2/test\_db * Annotate filter parameters for tag attributes * use object utils from neutron-lib * use make\_weak\_ref and resolve\_ref from neutron-lib * Fix unscheduling fail when concurrent port delete * [OVS] Add mac-table-size to be set on each ovs bridge * remove safe\_reference from common db mixin * Fix url in thirdparty-ci.rst * Improve error message in fullstack test * Fix bug to doc:testing * Update tunnel\_types config option help msg * Revert "DVR: Fix allowed\_address\_pair IP, ARP table update by neutron agent" * Generate correct url in api pagination 13.0.0.0b2 ---------- * [Fullstack] Wait for SG to be applied by L2 agent * [Fullstack] Change time waiting for async ping results * Fix UT BridgeLibTest when IPv6 is disabled * Switch to oslo\_messaging.ConfFixture.transport\_url * Don't skip DVR port while neutron-openvswitch-agent is restared * Add flavor\_id argument to router tests * fix a typo: s/paramater/parameter * Replace ovsfw tempest job with iptables-hybrid tempest job * Stop mocking keystoneauth internals * Switch to stestr * Remove local DEVICE\_OWNER\_ROUTER\_GW value * use std attr description for the segment extension * doc: update doc to refer new trusted tag for SR-IOV * Track neutron-lib migration of agent extensions * Mention neutron-rootwrap-daemon in root\_helper\_daemon option help * Disable IPv6 forwarding by default on HA routers * Fix build-openstack-sphinx-docs test job * Fix an attribute error on listing AZs with filters * Update neutron configuration documentation URL * docs: Fix formatting block in ovs firewall docs * use dhcpagentscheduler api def from neutron-lib * expose objects as entry points in setup.cfg * Update docs about openvswitch compilation * Allow setting network-segment on subnet update * Update ubuntu image used for testing * Use OVO in test\_ipam\_pluggable\_backend * Change ovs release used to compile kernel module for tests node * Fix wrong release names in docs * use exc\_to\_retry from lib * Fix lack of routes for neighbour IPv4 subnets * l3 flavor: more events/notifications and callback priority * [rally] Port custom plugins to use proper code * Use Param DHCP\_OPT\_CLIENT\_ID\_NUM * Fix W605 warnings * Refactor for L3 router QoS extensions * Fixed PortBindingLevelDbObjectTestCase * [ci][rally] make the job inherit the right parent * objects: avoid deepcopying models in test\_db\_obj * py3: Fix list\_entries for netlink\_lib * py3: Fix ingress\_bw\_limit\_for\_dpdk\_port() value conversion * Fullstack: Add using multiple security groups * python3: Add experimental jobs for functional and fullstack * Update auth\_url in install docs * Trivial: Update pypi url to new url * Fix ip\_conntrack\_proto\_sctp modprobe error * Change defaults in tools/configure\_for\_func\_testing.sh * Update fullstack section in TESTING.rst * Populate project info before using it * trivial: Fix file permissions * dhcp: serializing port delete and network rpc calls * Pass context in disassociate\_floatingip notification * Fix triger typo * Avoid agents adding ports as trunk by default * Annotate filter parameters for standard attributes * Make neutron-tempest-ovsfw job gating * Monitor phys\_bridges to reconfigured it if created again * Fix \_setUpExtension shim * Use extension fip-port-details from neutron-lib * Update install doc for ubuntu * Change metadata agent to log message after failure * bump neutron-lib version to 1.14.0 * [L3][QoS] Cover mixed dvr\_snat and compute node dvr router * remove description API attr from securitygroup * doc: Clarify network types compatible with physnets * doc: Fix typo * Fix all pep8 E129 errors * Stop using legacy nodesets * [Docs] Add guide on how to do advanced gate job debugging * Support filter attribute with empty string * Change minimum pecan version to 1.1.1 * Read ha\_state file only once * remove common db alaises to db utils * shim \_setUpExtension in prep to changing args * Fix all pep8 E265 errors * Fix hw\_vif\_multiqueue\_enabled typo * Select service\_provider on the basis of service\_module * Add a debug env for unit tests * objects: automatically detect whether engine facade is used * Retry dhcp\_release on failures * remove rpc create\_connection * Convert Segment db model to OVO * Revert "Revert "Automatically expire obsolete relationships"" * Remove deprecated IVS interface driver * Document mixed scenario with LB and OVS over VXLAN * fullstack: Migration from iptables\_hybrid to openvswitch * Increase lower-constraints job timeout to 60 minutes * Switch rally trunk scenario to use new rally validators * Remove N536 ignore in tox.ini * Re-factor the iptables\_manager tests * Add port\_details to Floating IP * No need for www\_authenticate\_uri in [designate] * Fix the supported\_extension\_aliases in unit test * Adds egress and ingress forward rules for trusted ports * Move standard-attr-segment to service plugin * Do not remove conntrack jump rules if no zone * Eliminate possible insecure usage of temp file/directory * [Linuxbridge] Handle properly too big VXLAN MTU values * ovs-fw: Apply openflow rules immediately during update * Revert "Automatically expire obsolete relationships" * Automatically expire obsolete relationships * Fix E402 pep8 errors * fullstack: Simplify ConfigFixture * eventlet v0.22 changed HttpProtocol.\_\_init\_\_ 13.0.0.0b1 ---------- * Make OVSDB debug logging optional * Add a shim extension standard-attr-segment * Fix pylint no-value-for-parameter error * pecan.jsonify v1.3 adjustment * Update auth\_uri option to www\_authenticate\_uri * of\_interface: Implement bundled() method * Fix handling of dstport option for VXLANs * Override ovsdb\_timeout default value in ovs\_cleanup tool * ovs: split OVS\_RESTARTED handler into a separate method * ovs: raise RuntimeError in \_get\_dp if id is None * use plugin common utils from neutron-lib * Re-order items in policy.json * Change ha\_state property to always return a value * Fix W503 pep8 warnings * [RBAC] Fix setting network as not shared * [Scenario] Enable "qos-fip" network extension in tempest config * Update RYU to be 4.24 to sort out issues with pip 10 * Enable sorting and pagination for segments * use sub-resource API extension support * Fix potential race condition in privileged ip\_lib module * tests: don't configure plugin for tests that don't need it * Renamed subnet\_service\_type\_db\_models module * ovs: survive errors from check\_ovs\_status * Fix some pep8 warnings * fix tox release note generation * ovs\_lib: add support for OpenFlow 1.5 * tests/tools: add prefix "\_" to CALLBACK\_PRIORITY\_SUPPORTED * test\_servicetype: Service Type model to OVO * uncap eventlet * Add standard attributes to segment resource * zuul: Make ovsfw tempest job voting * unit test: unbreak test cases for callbacks.subscribe * Fix pep8 errors * Replace assert to raise AssertionError * remove unused common exceptions * Fix incompatible requirement in lower-constraints * tests: fixed NoSuchGroupError in TestNovaSegmentNotifier * DVR: Restarting l3 agent loses centralized fip ip on qg-interface * Add more info to vpnaas admin guide * DVR: Avoid address scope rules for dvr\_no\_external agents * Refresh router objects after port binding * Handle AgentNotFoundByTypeHost exception properly * Router should flip to standby if all L3 nodes down * DVR: Check for item\_allocator key before releasing * Replace usage of 'user' by 'user\_id' * [doc][vpnaas] Fix reference url after recent change * Use cidr during tenant network rule deletion * Removing deprecated function 'IPWrapper.get\_namespaces()' * Fix DHCP isolated subnets with routed networks * DVR: Add error handling for get\_network\_info\_for\_id rpc call * use multiprovidernet api definition from neutron-lib * Add fullstack job to gate queue * Removing deprecated module neutron.api.versions * use neutron-lib servicetype api def * Don't define polling\_interval option in two places * Catch Errors When Nova API Microversions Are Not Supported * [Functional] Add test for ip\_lib.IPRule lifecycle * [Docs] Update path to QoS API tempest tests * Removing remaining Tempest bits * [Fullstack] Add block test until IPs will be really configured * Handle adding ip address when it already exists * [Functional tests] Add gateway add/delete/flush tests * Switch ip link command to pyroute2 * Follow the new PTI for document build * Fix l3-agent crash on routers without ha\_state * add lower-constraints job * Revert "use segment api def from neutron-lib" * Don't set administratively disabled ports as ACTIVE * Remove usage of TestCase.skip() method * Enable mutable config in Neutron * Revert "[Fullstack] Mark security group test as unstable" * Don't raise error when removing not existing IP address * use segment api def from neutron-lib * Switch IPDevice.exists() method to use pyroute2 * remove tag and tag\_ext extensions * DVR: Fix mac format for backward compatibility with vsctl api * Add docstrings for ip\_lib IP address methods * ovs-fw: Fix firewall blink * Have LB agent use ip\_lib.ensure\_device\_is\_ready() * Add a test class to validate UTs fixes of new\_facade flag * tests: port test\_port\_presence\_prevents\_network\_rbac\_policy\_deletion * Remove race and simplify conntrack state management * ovs-fw: Clear conntrack information before egress pipeline * DVR: Fix allowed\_address\_pair IP, ARP table update by neutron agent * Switch fullstack job to be voting * use callback payloads for PRECOMMIT\_UPDATE events * use common agent topics from neutron-lib * Set trusted port only once in iptables firewall driver * Add setting server\_default in alter\_enum(\_add\_value) * Switch create/delete interfaces to pyroute2 * Updated from global requirements * Fix UTs to accommodate new\_facade flag in OVO objects * Add l3\_agent\_mode configuration in tempest * use is\_extension\_supported from neutron-lib * Add broadcast to IpAddrCommand.get\_devices\_with\_ip() return value * Updated from global requirements * Switch ip addr add/del/flush commands to use pyroute2 * consume neutron-lib resources attr map * remove unused common rpc apis * Revert "Do not start conntrack worker thread from \_\_init\_\_" * [L3] Expunge context session during floating IP updating * [Scenario tests] Try longer SSH timeout for ubuntu image * DVR: Inter Tenant Traffic between networks not possible with shared net * remove SHARED constant that now lives in neutron-lib * Add search capability to the docs * Update upper-constraints consumption * Additional functional tests for ip\_lib.IpAddrCommand * Make log extension more generic * async\_process: Log stdout and stderr on process error * Do not start conntrack worker thread from \_\_init\_\_ * functional: Fix RABBIT\_PASSWORD env var * use plugin names from neutron\_lib.plugins.constants * Spawn/stop metadata proxies upon adding/deleting router interfaces * Iptables firewall driver adds forward rules for trusted ports * ovsfw: Use bundle when adding security group rules * Enable more extensions in pep8 * Imported Translations from Zanata * Only allow SG port ranges for whitelisted protocols * use revision if match api def from neutron-lib * Add ip link functional tests * Update mysql connection in config-dhcp-ha.rst * Try to enable L3 agent extension \`fip\_qos\` * Add functional tests for link create methods * use EGRESS\_DIRECTION and INGRESS\_DIRECTION from neutron-lib * use DVR constants from neutron-lib * [Fullstack] Limit number of test workers to 4 * use qos rule type details api def from neutron-lib * use router service type api def from neutron-lib * Fix creation of port when network has admin's QoS policy set * reno: Remove remote names from branch specifiers * Fix a log formatting error on linuxbridge * use sorting api def from neutron-lib * [Linuxbridge] Check if vxlan network MTU can be set * [Fullstack] Limit number of Neutron's api workers * use vlantransparent api def from neutron-lib * DNS doc: remove whitespace * Use oslo\_db.sqlalchemy.test\_fixtures * [Fullstack] Respawn dhclient process in case of error * Ensure \_get\_changed\_synthetic\_fields() return updatable fields * Pull in ubuntu cloud archive openvswitch pkg * Updated from global requirements * Set mtu for VXLAN interface in linuxbridge * l3: an unit test for set\_extra\_attr\_value * Fixing the filter in get MTU by network list query * Revert "remove tag and tag\_ext extensions" * Use ip\_substring\_port\_filtering from neutron-lib * Fix context deprecation warnings * Fix arguments to oslo\_context.RequestContext() * Remove registry.subscribe() * Add unit test cases for floating IP QoS extension * [Fullstack] Unmark test\_dscp\_marking\_packets as unstable * Fix error message when duplicate QoS rule is created * Compile ovs for ovsfw to avoid ovs-vswitchd core dumps * Always pass device\_owner to \_ipam\_get\_subnets() * use DVRMacAddressNotFound exception from neutron-lib * use qos default api def from neutron-lib * Move periodic jobs to Neutron repo * remove tag and tag\_ext extensions * ovsfw: Update SG rules even if OVSFW Port is not found * Docs: Update tempest test directory * Fix unnecessary security\_groups\_member\_updated notification * Imported Translations from Zanata * Add test cases for external network default * Update reno for stable/queens 12.0.0.0rc1 ----------- * Use Meter Label OVO in neutron/db/metering/metering\_db.py * Open Rocky DB branch * Tag the alembic migration revisions for Queens * Remove redundant get\_object call when creating fip qos binding * Fixes using SSL OVSDB connection * Allow objects to opt in new engine facade * DVR: Fix dvr\_no\_external agent restart with fips * Use Router OVO in external\_net\_db * [doc] Support networking guide for VPNaaS * l3: don't begin db transaction in set\_extra\_attr\_value * Update documentation for DNS integration * Fix iptables rule removal warnings from QoS extension * Process conntrack updates in worker threads * [Fullstack] Mark security group test as unstable * ovs-fw: Don't modify passed rules to update * Filter port-list based on security\_groups * Mock ipv6\_utils.is\_enabled\_and\_bind\_by\_default method * Add/rm centralized fips for DVR+HA from standby node * Use RBAC\_POLICY callback constant from neutron-lib * Add notification for floatingip update/delete * Windows: fix exec calls * Revert "[Fullstack] Mark test\_bw\_limit\_qos\_port\_removed as unstable" * fix same mechanism driver called twice bug * Updated from global requirements * Zuul: Remove project name * Mark neutron-lbaas as deprecated * Move Linuxbridge ARP spoofing to nat table PREROUTING chain * This patch changes the CT zone allocation range * Kill \_connection\_to\_manager\_uri dummy * Drop strict-order flag from dnsmasq invocation * Fix delete vlan allocation error for ML2 * Revert "Integration of (Distributed) Port Binding OVO" * ovs: removed sinkhole RPC target for old topics * Fix eventlet imports issue * Minor fix to remove legacy OVSDB IDL connection 12.0.0.0b3 ---------- * Remove deprecated classes from SG agent code * Fix race condition with enabling SG on many ports at once * [Fullstack] SG test use not too many fake hosts * More efficiently clean up OVS ports * Fix Port OVO filtering based on security groups * Add log-tag to haproxy config file * Modify link address from http to https * Log warnings when test fails because of them * [Fullstack] Clean DSCP mark rule when port removed * log-api: Don't enable logging for linuxbridge * Add retry decorator update\_segment\_host\_mapping() * Fixes minor typo in neutron * Remove deprecated nova\_metadata\_ip * Adding DSCP mark and inheritance in OVS and LB tunnels outer header * Update the gerrit dashboard * [Doc][L3][QoS] Enable floating IP qos * Fix \_port\_arg for security rules with icmp/ipv6-icmp aliases * l3\_ha: only pass host into update\_port when updating router port bindings * remove neutron.callbacks package * Updated from global requirements * Use Router OVO in metering\_db * Configure log extension for ovs scenario job only * Fix ingress bw limit for OVS DPDK ports * Switch to use directly ovsdbapp.api module * [OVS] Fix for cleaning after skipped\_devices * Support filtering port with IP address substring * ovs-lib: Pass string as udp port to ovsdb * Fix mocking of ovsdb connection in UT * Treat device with no MAC address as not ready * Remove usage of deprecated idlutils module * Remove deprecated method OVSBridge.set\_protocols() * [log] [doc] Include logging for security groups * Speed up trunk MTU enforcement check * Removed neutron-tempest-full * Fix fwaas v2 configuration doc * Add missing iptable rule in snat ns for centralized fips * Revert "Use writer for trunk database operations" * Switch rally job from q-\* to neutron-\* service names * broken HybridIptablesHelper function override * [Fullstack] Mark test\_bw\_limit\_qos\_port\_removed as unstable * [log]: functional test for logging api * [log] ovs fw logging implementation * Use constant 'IS\_DEFAULT' from neutron-lib * [Fullstack] Change how DSCP marking packets are tested * L3: prevent associating a FIP to a DHCP port * Cleaned up get\_session * Log radvd config contents on reload when debug is on * ovsfw: Don't create rules if updated port doesn't exist * Use same instance of iptables\_manager in L2 agent and extensions * [OVS] Shut down the port on changing the admin-state to false * Switch to Subnet OVO in ipam\_backend\_mixin \_save\_subnet method * [doc] Added update regarding URL difference based on deployment * DVR: verify subnet has gateway\_ip before installing IPv4 flow * [trivial fix]fix typos in neutron * Moving legacy check and gate jobs * test\_provisioning\_blocks: Network model to OVO * [Fullstack] Shutdown tcpdump process gracefully * Updated from global requirements * Integration of (Distributed) Port Binding OVO * use callback payloads for BEFORE\_READ events * use callback payloads for REQUEST/RESPONSE events * use callback payloads for \_SPAWN events * ipam: don't commit IPAllocation while IpamAllocation is rolled back * Update gerrit dashboards * Fix NeutronPrimaryKeyMissing instantiation * Improve Qos Policy Rule test * ovsfw: Create tables for further consumption * Introduce rfe-confirmed and rfe-triaged tags * [L3][QoS] L3 agent side Floating IP rate limit * Add iptables metadata marking rule on router init * Fix 1 doc typo * [log]: Devstack plugin for logging api * [log]: Change entry point name of logging plugin * Updated from global requirements * Switch to use \_get\_subnet\_object() in ipam\_backend\_mixin module * Ensure floating IP create does not break * FIP: update\_floatingip\_status() to return the updated object * use l3 api def from neutron-lib * l3\_agentschedulers\_db: convert from Agent model to OVO * Allow \_\_new\_\_ method to accept extra arguments * Update some l3-agent log messages * Remove \_get\_subnets\_by\_cidr from neutron/db/extraroute\_db.py * Fix URL in SR-IOV internals doc * Update the documentation links * [Fullstack] Additional log of tcpdump stderr output * Honor both floating\_ip\_address and subnet\_id when creating FIP * [Qos] ingress bandwidth limit by ovs is not accurate * Use hexadecimal when adding DSCP rules * Switch to use \_get\_subnet\_object in neutrondb\_ipam driver * Tags: harden validations * test\_dhcp\_rpc\_agent\_api: convert from Agent model to OVO * bugs.rst: Add rfe-postponed to the list of tags * Allow port create/update by shared nw owners * Update network external attribute for RBAC change * Switch to use subnet OVO object in get\_subnet * test\_metering\_plugin: convert from Agent model to OVO * Updated from global requirements * Fix error when using protocol number in security groups * doc: Clarify RFE Triaged state a bit * Prevent LBaaS VRRP ports from populating DVR router ARP table 12.0.0.0b2 ---------- * test\_timestamp: switch to Network OVO * test\_l3\_dvr\_db: switch to OVO for Agent * test\_dhcp\_agent\_scheduler: Network model to OVO * test\_l3\_agent\_scheduler: convert from RouterL3AgentBinding model to OVO * test\_l3\_agent\_scheduler: convert from Agent model to OVO * Move segment deletion back to PRECOMMIT\_DELETE * [Qos] Fix residues of ovs in ingress bw limit * Add CREATE\_PRECOMMIT notification for Floating IPs * burst should be set near max-rate * Build metadata\_agent.ini reproducibly * Remove router\_ids argument to auto\_schedule\_routers() * FakeNotifier class 'topic' argument change to 'topics' * [Fullstack] Log tcpdump output lines in DSCP mark tests * Integration of Floating IP OVO * ovs-fw: catches exception from ovsdb * Fix meter-label-rule creation * Fix missing backslashes in QoS docs * [Fullstack] Fix destroy of VM fixture * Revert "Revert "objects: get, update and delete converted to Subnet OVO usage"" * Fix HA router initialization exception * use log api plugin const from neutron-lib * Correctly configure the IPv6 LLA address * Fix DNS connectivity issues with DVR+HA routers and DHCP-HA * [log]: Add driver api and rpc stuff for logging * [L3][QoS] Neutron server side Floating IP QoS * [QoS] Add info about rate limiting on router's ports * docs: Correct various mistakes in QoS doc * fullstack: disable all test\_connectivity test cases * Disable test\_dscp\_marking\_packets fullstack test case * clarify nova prereq for controller install docs * Call update\_all\_ha\_network\_port\_statuses on agent start * fullstack: Wait at least 3 minute for agents to report * Router to OVO * Remove the ensure\_dir * Remove the bundled intree neutron tempest plugin * Remove unused variables ‘LOG' * [OVO] Switch to use own registry * Add dns-integration setup to devstack plugin * use agent api def from neutron-lib * Added zuulv3 jobs for testing neutron tempest plugin * Remove neutron tempest plugin jobs from neutron project * Skip tempest plugin tests on releasenotes changes * use flavors api def from neutron-lib * Remove Rodolfo Alonso as QoS bug contact * use l3 ext gw mode api def from neutron-lib * Remove setting of version/release from releasenotes * Updated from global requirements * use dns domain ports api def from neutron-lib * Updated from global requirements * ovsfw: Use multiple priorities in RULES\_\*\_TABLE * ovsfw: Merge multiple conjunction flows * Add unit test to validate non DB base core plugin can be loaded * use logging api def from neutron-lib * Raise exception when synthetic field invalid * [L3][QoS] Adding L3 rate limit TC lib * Support that an extension extends a sub-resource * use net mtu api writable def from neutron-lib * use net mtu api def from neutron-lib * use PROVISIONAL\_IPV6\_PD\_PREFIX from neutron-lib * Donot run neutron-tempest-plugin-api job in gate pipeline * Tempest: Add availability-zone for agent * use network az api def from neutron-lib * use addr pairs api def from lib * use l3 flavors api def from neutron-lib * use dvr api def from neutron-lib * use qos api def from neutron-lib * use router az api def from neutron-lib * Added zuulv3 jobs for testing neutron tempest plugin * Revert "objects: get, update and delete converted to Subnet OVO usage" * Renamed ovs\_vsctl\_timeout into ovsdb\_timeout * Support protocol numbers in security group API * Remove DocImpact info from contributor docs * use ml2 driver api from neutron-lib * Remove deprecated cache\_url * use l3 ext ha mode api def from neutron-lib * Skip IPv6 sysctl calls when IPv6 is disabled * Do not load default service plugins if core plugin is not DB based * use ip allocation api def from neutron-lib * tests: delete in-use security group * Change iptables-restore lock interval to 5 per second * Fix typo "extention" -> "extension" * Fix typo: allow\_address\_pair -> allowed\_address\_pair * use project id api def from neutron-lib * Remove neutron.common.ipv6\_utils.is\_enabled() * use core resource api defs from lib * Enable bridge command for openvswitch agent * Move check\_ha\_state\_for\_router() into notification code * test\_security\_groups: Randomize SG names * use pagination api def from neutron-lib * use net ip availability api def from neutron-lib * Update section with links to backport/RC potential bugs * Use Agent OVO in l3\_agentschedulers\_db * RPC callbacks: add hook to register additional resources * Update link to stable branch policy page * objects: get, update and delete converted to Subnet OVO usage * use metering api def from neutron-lib * use l2 adjacency api def from neutron-lib * Fullstack: init trunk agent's driver only when necessary * Use Agent OVO in agents\_db and test\_agents\_db * Add initialization in StringMatchingFilterObj class * Add some missing mocks in l3-agent tests * use extra route api def from lib * use FAULT\_MAP from neutron-lib * Updated from global requirements * [rally] Port input task to the new format * [Tempest] Testing remote\_ip\_prefix for security groups * Add RBAC access\_as\_external unit tests * Replace http with https for doc links * Adding OVS Offload documentation * Integration of L3HARouterAgentPortBinding in ml2/drivers/l2pop/db.py * Reorder checks in apply\_filters in db/\_model\_query * Fix callers of get\_devices\_with\_ip() to pass addresses * tests: Add decorator to mark unstable tests * docs: Update supported QoS rule types * use external net api def from lib * Fix the wrong usage of new style class in metering * Modify the wrong command in config qos doc * Correct link in config-ml2.rst * Change QoS configuration manual * revert base extension unit test param removal * Remove the unused code * use availability zone api def from lib * use qos constants from neutron-lib * tempest: Sprinkle extension checks * shim l3 exceptions with neutron-lib * Revisit the process on how RFEs are handled * cleanup unit test usage of api extension maps * Always call iptables-restore with -w if done once * Security Groups: Test all protocols names and nums * Updated from global requirements * consume load\_class\_by\_alias\_or\_classname from neutron-lib 12.0.0.0b1 ---------- * Do not create fip agent port for dvr\_no\_external node * use dns api def from neutron-lib * use default subnetpool api def from lib * Do not try and iterate [None] in l3-agent network\_update() * Redundant alias in import statement * shim FAULT\_MAP from neutron-lib * Add a new method ha\_state\_change to L3 agent extension * create\_security\_group: Expunge an object selectively * \_test\_security\_group\_precommit\_create\_event: Check the result of create * Add NULL check before passing to in\_() column operator * shim ml2 driver\_api with neutron-lib's api * trunk: Fix init\_handler() agent parameter * Only create one IPWrapper class instance in \_arping() * Stop arping when IP address gets deleted * Add some debug logs to metadata agent * use plugin constants from neutron-lib * Notify port\_update to agent for status change * Don't pass trailing dash (-) to rand\_name * Add additional tests for subnet filtering and count * consume common constants from lib * Remove release notes from reverted patch * Remove argument "watch\_log = " * Fullstack: Add l3\_agent\_mode for testing different agent types * Fullstack: add ability to specify router scheduler * iptables: don't log lock error if we haven't passed -w * Wrong path of rpc\_api.rst in class docstring * Checksum-fill proxied metadata replies * [log]: implement logging agent extension * Add a test csnat port removing in DVR migration * Fix wrong OS ENV type * ovs-fw: Remove iptables rules on hybrid ports * tempest: check router interface exists before ssh * Change OVS agent to update skipped port status to DOWN * clarify deferred fixed IP assignment for ports on routed networks * clarify agent file name in config qos doc * use new payload objects for \*\_INIT callbacks * Update get\_l3\_agents() scheduler tests * Remove ip\_lib SubProcessBase.\_execute() as class method * Switch test\_mtu.py to tempest.common.utils.requires\_ext * Allow to configure DHCP T1 and T2 timers in dnsmasq * Change ip\_lib network namespace code to use pyroute2 * DVR: Fix unbound fip port migration to bound port * DVR: Fix centralized floatingip with DVR and HA * Fullstack: add availability zone to host descriptor * use synchronized lock decorator from neutron-lib * Refactoring db config options * Update the QoS bugs contact * Refactor DVR HA migarations DB operations * Use port object in notifiers/test\_nova.py * br\_int: Make removal of DVR flows more strict * Remove dead code in L3 HA scheduler * Remove unnecessary IPWrapper() creation * Updated from global requirements * Update team and bug ownership info * Change metering code to iterate through all L3 agents * of\_native: Use int for comparing datapath ID * fullstack: Remove ovsdb\_interface config opt * Fix the link to the rally docs in README.rst * ml2: fix update\_device\_up to send lm events with linux bridge * Replace default subnetpool API tests with UT * Linux Bridge, remove unnecessary logic to retrieve bridge name * [Tempest] Scenarios for several sec groups on VM * Revert "Fix for race condition during netns creation" * Fix \_verify\_gateway\_port() in multiple subnets * DVR: handle unbound allowed\_address\_pairs added * ML2: remove method get\_locked\_port\_and\_binding * Update correct reference for tags * Remove translation of help messages from tests * Remove get\_ip\_version from ip\_lib * Remove deprecated get\_random\_mac() * Remove deprecated IpNeighCommand.show() * Updated links to dashboards * Remove security\_groups\_provider\_updated rpc code * rally: switch to new format for context name * ovs: log config options when all of them are registered * Include all rootwrap filters when building wheels * Ensure default security group before port update * gate: don't configure linuxbridge jobs for geneve * [Tempest] Check connectivity between VM with different MTU size net * add doc link validation to release checklist and tox * DVR: Always initialize floating IP host * make net\_helpers functions work on OpenSUSE * use neutron-lib address scope apidef * Deprecate ivs interface driver * Fix device\_owner during DVR and HA migrations * Remove dead versions code * Allow fullstack to operate on dhclient-script for OpenSUSE * Fix for race condition during netns creation * Dont log about skipping notification in normal case * Tweak configure\_for\_func\_testing to work on OpenSUSE * fix broken link in config-agents doc * Fix missing content of neutron database creation * Change join relationship between routerport and router * Add API tests for Tag resource with standard attribute * Don't assume RPC push object has an ID * Update config-sfc documentation * Stop using subscribe in l3\_db * Updated from global requirements * DVR: Fix bad arping call in centralized floating IP code * tests: generate unique network ids for L3HARouterVRIdAllocation * Fix documentation for DNS resolver config * Update link for API extensions * Stop using is\_agent\_down in agents\_db * Switch to tempest.common.utils.is\_extension\_enabled * Fix missing super's skip\_checks() * gate\_hook: configure range of GRE ids * Switch to tempest.common.utils.requires\_ext * Delete dead API v2 router code * Switch to Pecan for unit tests * Drop the web\_framework option * Remove run\_tests.sh * gate\_hook: Switched to $NEUTRON\_CORE\_PLUGIN\_CONF * Remove duplicated ICMPv6 RA rule from iptables firewall * Updated from global requirements * Fixing hyperlink issue * Fixing external hyperlink * Deprecate ovsdb\_interface option * Don't trigger DVR port update if status the same * Remove csnat port when DVR migrated to non-DVR * Fix post gate hook to accommodate for new os-testr * Allow OS\_TEST\_TIMEOUT to be configurable from env * ovs mech: bind only if user request switchdev * Treat lack of segment info in port object as unbound * fix missing l2pop config option docs * doc inherit segmentation type for trunking * doc for quota details extension * Cleanup unused params * Remove gw\_port expire call * Pecan: fix logic of hiding authZ failures as 404s * Pecan: add plugin pagination/sorting validation * Refactoring config options for ml2 config opts * Pecan: Add missing body validations * CountableResource: try count/get functions for all plugins * DVR: Multiple csnat ports created when RouterPort table update fails * DVR: get\_router\_cidrs not returning the centralized\_floating\_ip cidrs * OVO for NetworkDhcpAgentBinding * doc br\_netfilter prereq for linux bridge * Update config-dns-res to use openstack CLI * OVO for L3HARouter * Add a new method get\_router\_info to L3 agent extension API * Allow to disable DNS server announcement per subnet * [Tempest] Creating sec group rule with integer * [Tempest] Testing default security group scenarios * Validate security group rules for port ranges * Avoid redundant HA port creation during migration * ovsfw: Fix up port\_range and ICMP type/code handling * ovsfw: Fix port\_ranges handling * use qos DriverBase from neutron-lib * linuxbridge-agent: add missing sysctl rootwrap entry * Fix the incorrect doc for class SecurityGroupAgentRpcCallbackMixin * Fix cleaning QoS rules for not existing port * Fix generation of thousands of DHCP tap interfaces * Match load\_rc\_for\_rally logic to load\_rc\_hook * OVSBridge: use ovs-ofctl with at least OF protocol version x * Document dns\_domain for ports attribute * Fix port deletion when dns\_integration is enabled * Tempest: Fix cleaning of subnets * Pecan: add missing body to delete notify payload * DHCP provisioning block only on port addr update * Remove vestigate HUDSON\_PUBLISH\_DOCS reference * update docs to use nova\_metadata\_host * Pecan: strip duplicate and empty user fields * Pecan: Add missing emulated bulk create method * test\_ha\_router: wait until two agents are scheduled * update static urls for pike * Fix to use . to source script files * Pecan: set tenant\_id field when project\_id set * Pecan: add calls to resync/dirty quota usages * DVR: Fix agent to process only floatingips that have a host match * Pecan: process filters at end of hook pipeline * Make use of -w argument for iptables calls * l3 agent: stop expecting old server when fetching service plugins * fullstack: skip test\_mtu\_update when DHCP agent is not in rootns * complete docs for revision number * Update link for contribution * dvr: Don't raise KeyError in \_get\_floatingips\_bound\_to\_host * Revert "DVR: \_get\_floatingips\_bound\_to\_host throws KeyError" * Prioritize tox environment executables for fullstack/functional tests * update docs for stdattr tag support * Integration of Port OVO in db\_base\_plugin\_common.py * use neutron-lib's OVO exceptions * API test refactoring about create\_project * dhcp agent start md-proxy with vrouter id only when has metadata subnet * Refactoring agent linux&ovsdb config * Tempest: change way how QoS policies are cleaned * DVR: \_get\_floatingips\_bound\_to\_host throws KeyError * update contributor internals index * Tempest: Fix cleaning of networks after API tests * fix formatting in ubuntu controller install guide * Open Queens DB branch * functional: Remove ovsdb tests * Updated from global requirements * functional: Fix reference to ovsdb connection * Fix default qos policy when creating network * Fix test\_keepalived\_ipv6\_support for Keepalived v1.2.20 * Add network ip availability filtered by project\_id * add doc section for ml2 extension drivers * Treat Filter as Dict in get\_policies * Fixes input for netlink-lib functional tests * Stop using v2 identity API * Add stubs for new ovsdbapp API functions * tests: Log spawned processes by RootHelperProcess * Revert "functional: disable netlink tests" * DB migration checklist task for Pike * Fix DefaultSubnetPool API test * Make code follow log translation guideline * Stop logging full object in RPC push code * releasenotes: Move Pike ignore-notes to a proper file * Apply network MTU changes to dhcp ports * Apply network MTU changes to l3 ports * Log policy filters in one line * Stop logging versions on every agent update * Update reno for stable/pike * Add a target to Sinkhole so it's compatible 11.0.0.0rc1 ----------- * Allow unprivileged users to get their quota usage * Deprecate web\_framework option * Allow to set/modify network mtu * DVR: Provide options for DVR North/South routing centralized * Reduce rally sub-port count * Fixing test\_convert\_default\_subnetpool\_to\_non\_default * Remove 'persisted dirty' log message * fullstack: Actually run ovsfw tests * Hook bandit security linter to pep8 target * Add API test for port dns\_domain * Add unit tests for dns\_domain for ports * functional: disable netlink tests * Add documentation for Linux Bridge and OVS ingress QoS * Add port dns\_domain processing logic * Allow extension driver to provide multiple aliases * Fill device\_info with port\_security\_enabled data * DVR: Configure centralized floatingips to snat\_namespace * Don't check full subnet body in test\_filtering\_shared\_subnets * [log]: Add validator to logging api * [log]: add driver manager to LoggingPlugin * tests: apply latest release milestone alembic scripts first * doc: Fix non-existing URLs * Do not use prelude section for individual release notes * Exclude relnote from past releases from Pike relnotes * tests: don't set description in test\_blank\_update\_clears\_association * ovs-fw: Handle only known trusted ports * Remove configuration options max\_fixed\_ips\_per\_port * Sinkhole workaround for old topics * Correct tag link in README * Updated from global requirements * Remove code to debug auto address allocation error * Degrade log message for missing fanout exchange to debug * Bump network rev on RBAC change * devstack: fix ovs install on fedora like OS * docs: clarify wording about how to use advanced glance image * Enable QoS scenario tests * ovsfw: fix allowed\_address\_pairs MAC issue * Change ovsdbapp vlog to INFO * Drop port\_delete and port\_update debug msgs * Don't log 'Exit code: 0' * Check if record is stale after bulk pull * Removed unnecessary setUp calls in tests * Fix some pep8 errors under l3 unit tests * Add auto-generated config reference * Remove compat checks for psutil 1.x * docs: Fix in TESTING.rst * Add bandit target to tox * Use push notification for security groups * Log revision number in resource updated message 11.0.0.0b3 ---------- * Add segments service plug-in devref * use neutron-lib auto allocated topology apidef * Error in docs for configuring dvr router * DVR: Fix router\_update failure when agent restarts * Updated from global requirements * Catch exceptions for all rpc casts * RouterPort OVO integration * DVR: Fix binding info for DVR port not found error * Add netlink-lib to manage conntrack entries * use neutron-lib.callback.events.AFTER\_SPAWN * Fixed FlushError on subnet creation retry * Add project links to feature classification matrix * [Tempest] Running Trunk test with advanced image only * Enforce ethertype with IPv6 integer protocols * Add datapath\_type to vif\_details in OVS driver * FloatingIP to OVO * Add specific values to specific fields in get\_random\_object\_fields() * ml2 plugin: add (PORT, BEFORE\_UPDATE) callback * Tag mechanism supports resources with standard attribute * ovs-fw: Update internal docs with TRANSIENT table * Update the documentation link for doc migration * Only ensure default security group exists if necessary * Ignore duplicate record\_resource\_delete calls * Remove network\_id from network\_map on net delete * Always try to delete bridge for ID on network\_delete * Updated from global requirements * Remove deprecated prevent\_arp\_spoofing option * use APIExtensionDescriptor for plugins with defs in lib * hardware offload support for openvswitch * DistributedVirtualRouter mac address to OVO * neutron-teams.rst: Remove an orphan footnote * ovsfw: Fix overlapping MAC addresses on integration bridge * DVR: Server side patch to schedule an unbound port with Floating IP * Ignore cast exceptions in AgentNotifierApi * hacking: Remove dead code * OVS firewall: do strip\_vlan in TRANSIENT\_TABLE * neutron-teams.rst: Update the list of networking-midonet lieutenants * Log reserved cookies in cleanup\_flows method * Use context interface for constraint * import the admin guide content from openstack-manuals * Add callback BEFORE\_DELETE for delete\_router * Imported Translations from Zanata * Stop using non-ascii characters * Updated from global requirements * Extend Quota API to report usage statistics * Remove neutron-fwaas specific policies * Tempest: Fix DeprecationWarning for Read-only property * Adding option to check VM connectivity with packet of given size * Updated from global requirements * Tempest: Adopt keystone api v3 for tests * Fix typo * Add port dns\_domain to DB and object models * Add dns\_domain attribute to ports in the API * Ensure that fault map translations work correctly * Replace test.attr() with decorators.attr() * SR-IOV: remove ml2\_conf\_sriov.ini from manual * Fixed AttributeError in l2pop.delete\_port\_postcommit * tests: kill a bunch of unneeded mocks in l2pop unit tests * Introduce trusted ports to firewall driver API * [log]: implement logging plugin * ovs-fw: Use TRANSIENT table for traffic classification * TrivialFix: Remove only\_contrib argument * stop ovs that installed from git on unstack * New API call to get details of supported QoS rule type * Remove unused exceptions * import content from cli-reference in openstack-manuals * ovo\_rpc: Avoid flooding logs with semantic violation warning * RouterPort to OVO * Rearrange existing documentation to fit the new standard layout * Updated from global requirements * Do not defer allocation if fixed-ips is in the port create request * Support object string field filtering on "LIKE" statement * Add QoS policy network binding OVO * Allow to set UDP ports for VXLAN in Linuxbridge agent * Enable an update test in UT of logging * releasenote: Specify openstackdocs as html\_theme * Do not respond to ARP on IPv6-only interfaces * Tempest: Add default-subnetpools tests * Use \_is\_dns\_integration\_supported for \_delete\_floatingip * SR-IOV agent should specify host when requesting devices info * devstack ovs: get correct kernel rpm version on customed kernel * Switch from oslosphinx to openstackdocstheme * import installation guide pages from openstack-manuals * Fix alter\_enum\_add\_value * New RPC to set HA network port status to DOWN * Updated from global requirements * Use flake8-import-order plugin * Modify the execution of alter\_enum\_add\_value SQL * objects: support tenant\_id filter for get\_\* if project\_id is present * use core resource attribute constants from neutron-lib * Add "default" behaviour to QoS policies documentation * Use subqueryload in l2pop DB for binding ports * of\_interface: allow install\_instructions to accept string actions * API compare-and-swap updates based on revision\_number * Change all config options in fullstack tests to be strings * DVR: Fix neutron metering agent to notify hosts hosting DVR * Update after python-neutronclient doc rearrangement * Replace the usage of 'admin\_manager' with 'os\_admin' * Add fullstack-python35 testenv in tox.ini * import the networking guide content from openstack-manuals * use service type constants from neutron\_lib plugins * Remove 'description' attribute * Common Agent loop: Catch delete\_port extension failures * Fix list QoS rule\_types tempest API test * Add missing description of Linuxbridge egress bw limit * test\_floatingip: Add a case for SRC without FIP * Enable some off-by-default checks * Add new ovo field type class * Replaced assertTrue(False) with fail() * Fix tempest router creation * DHCP Agent: Set dhcp-range netmask property * tempest-api: Skip test if deployment has not enough agents * Ingress bandwidth limit rule in Linuxbridge agent * Correct the config group in check\_trunk\_dependencies * use attribute functions/operations from neutron-lib * Fix some tempest deprecation warnings * functional: Replace unicode() with six.u() * Integration of IPAllocation * security group: pass update value to precommit\_update * Using constants in local * Fix bug when checking duplicated subnets for router interface * Add missing info about supported ingress bandwidth limit rule * functional-tests: Make addresses for tunneling unique * Add provider info to network for update * functional: Don't write strings to pipe * python3: use binary mode to open file in test * Make HA deletion attempt on RouterNotFound race * dvr: Move normal/output br-int flows to table TRANSIENT * Fix html\_last\_updated\_fmt for Python3 * Updated from global requirements * Add support for list querying in resource cache * Add revises\_on\_change to Binding DB models * Use objects instead of SQLA deep copies in PortContext * OVO: Allow port queries based on security\_group\_ids * Clean up test cases in test\_iptables\_firewall.py * tempest: Make \_create\_router\_with\_client obey enable\_snat=False * Fix SG callbacks notification * Fix race between create subnet and port requests * use six.u rather than unicode for py3 compat * DHCP Agent: Separate local from non-local subnets * DHCP RPC: Separate local from non-local subnets * Store segmentation\_id during segment create * Stop binding attempts when network has no segments * Pass the complete info in sg/rules db into PRECOMMIT\_XXX callback * OVO: ensure decomposed plugin do not break with OVO * Linuxbridge agent: detect existing IP on bridge * docs: Fix indent level * docs: reorganize developer reference for new theme * docs: switch to openstackdocstheme * Updated from global requirements * Lazy load of resources in resource cache * DVR: Add forwarding routes based on address\_scopes * Add a missing \_LW() * functional: Add support for python35 flavor * net\_helpers: Set process streams to text mode * Trigger port status DOWN on VIF replug * Remove unreachable code in OVS mech driver * Add "default" behaviour to QoS policies documentation * Use super to make \_build\_routers\_list safer * Fix test\_dvr\_gateway\_host\_binding\_is\_set * Manually increment revision numbers in revision plugin * Decompose SG RPC API DB methods * Integration of Allocation/Endpoints OVO * python3: use a list of IPDevice objects in tests * Fix linuxbridge ebtables locking * TC doesn't rise exception if device doesn't exist * Fix usage of registry.receives in Nova notifier * replace WorkerSupportServiceMixin with neutron-lib's WorkerBase * use neutron-lib's callback fixture * Add support for ingress bandwidth limit rules in ovs agent * Don't log ipam driver on every IP allocation * Remove unnecessary debug statement from OVO push * Reduce extension logging on init * python3: Do not pass MagicMock as ConfigOpts * Retry ebtables lock acquisition failures * Move info retrieval methods below notifier * Move db methods to bottom of SG RPC class * Add a dashboard for Infra reviews * Add myself to the list of our Infra liaison * Move retry decorator to DB methods * Integrate Security Groups OVO * Add libffi-dev to bindep.txt * DVR: Fix DVR Router snat ports and gateway ports host binding issue * Stop using nested transactions in OVO get/delete * Switch to start\_all\_workers in RPC server * Don't log about no notification on GET requests * [log]: db models and migration rules * Split allowed ICMPv6 types into two constants * Mask password when logging request body * Remove unused class 'QoSPolicyDefaultNotFound' * Removed Mitaka times compatibility code from RPC callbacks * Use e.exc\_type instead of calling str on exception * Remove redundant code in QosServiceDriverManager * Do not defer allocation if fixed-ips is in the port create request * Set HA network port to DOWN when l3 agent starts * Warn the admin of a potential OVS firewall\_driver misconfiguration * Add QoS policy port binding OVO * Integration of IPAllocationPool * Stop arping when interface gets deleted * Honor the common session options for the placement API * Drop IPv6 Router Advertisements in OVS firewall * Checks if net\_dns is None and returns so that we don't attempt to load None objects * Don't iterate updated\_rule\_sg\_ids or updated\_sg\_members * tests: use devstack-gate to deploy dstat for functional/fullstack * Use rootwrap for fullstack test runner * objects: exclude revision\_number from updatable fields * Revert "Use vif\_type='tap' for LinuxBridge for consistency" * Change allowed directions for QoS min-bw rule in SR-IOV * Update pylint disable list to pass pylint 1.7.1 checks * Updated from global requirements * Fix incorrect comments in ip availability test * Add alter\_enum\_add\_value function * neutron-rpc-server fails with no plugins loaded * Add libssl packages to bindep * remove unused reraise\_as\_retryrequest * Fix html\_last\_updated\_fmt for Python3 11.0.0.0b2 ---------- * use MechanismDriver from neutron-lib + shim * Removed ovsdbapp deprecation warnings * Switch to olso.messaging:get\_rpc\_transport * Extend QoS L2 drivers interface to handle ingress rule types * Update minimum tox version to 2.3.2 * Configure root\_helper and root\_helper\_daemon in fullstack tests * l3\_ha\_mode: call bulk \_populate\_mtu\_and\_subnets\_for\_ports * Fix updating Qos policy to be default/not default * api: work around Routes cutting off suffix from resource id * Add relationship between QosPolicyDefault and QosPolicy * ovs: bubble up failures into main thread in native ofctl mode * Fix file permissions * python3: fix log index for test case messages * Switched to pyroute2.config.asyncio.asyncio\_config * Change supported vif type in Linux Bridge * use extra\_dhcp\_opt api-def from neutron-lib * Provide fallback for disabled port security extension * Fixed docs job failure * api-tests: Common way to define required extensions * Fixes import\_modules\_recursively for Windows * Revert "Change list of available qos rules" * Switch to constant for 'tap' VIF\_TYPE * Update the host\_id for network:router\_gateway interfaces * VXLAN multicast groups in linuxbridge * Add "default" behaviour to QoS policies * [OVO] Integration of RouterL3AgentBinding * Updated from global requirements * Fix security group rules created for dhcpv6 * devstack: Adapt to lib/neutron * Use push-notificates for OVSPluginAPI * Revert "DVR: Add forwarding routes based on address\_scopes" * python3: return str from read\_stdout * python3: convert range object to list before comparing with a list * Drop 'notifies\_port\_ready' check for DHCP agents * Fixed python3 failure in functional tests using net\_helpers * objects: added update\_objects to OVO framework * objects: update fields\_no\_update to reflect models * Renamed tox targets for functional with python3 * use is\_port\_trusted from neutron-lib * Add precommit calls to the QoSDriver class * objects: don't allow to update create\_at and update\_at * Updated from global requirements * Allow port security updates even without security-groups enabled * Fix functional test for ovsdbapp 0.4.0 * Update team members according to latest events * Changing create\_server to be "non class method" * DVR: Do not check HA state on DVR-only routers * Send both gratuitous ARP REQUESTs and REPLYs * Fix nullable data\_plane\_status reference in port OVO * Use vif\_type='tap' for LinuxBridge for consistency * Move get\_vif\_type hook point into mech\_agent * Send port ID in network-changed event to Nova * Allow fip associate to different tenant port if admin * Add IPv6 default route to DHCP namespace * Update to support the ovdsbapp 0.4.0 API * Add "direction" parameter to QosBandwidthLimitRule * Bulk up port status updating in ML2 RPC * Separate port status update from getting details * Updated from global requirements * Add check for Bandwidth Limit Rules * Handle PortNotFound when deleting a network * Wait 2 seconds between gratuitous ARP updates instead of 1 second * Enable segments plugin in gate * Fix tempest test failing with segments extension * use worker from neutron-lib * Notify L2pop driver from update\_device\_(up|down) * Eliminate SUBNET\_GATEWAY resource * Use SUBNET instead of SUBNET\_GATEWAY event in L3 * Add BEFORE\_UPDATE subnet event * Get orig subnet in precommit method * Move subnet event to db\_base\_plugin\_v2 * Add missing unit test for segment db * Change PATH for "ip addr list" command so it could work with cloud-user * Add tempest test for l3-ha extension * Add QoS backend/rule support table to documentation * Adds an option to skip ports on ovs\_cleanup * ovsfw: followup cleanups for the conjunction patch * Updated from global requirements * service: add callback AFTER\_SPAWN * Disable QoS scenario tests differently * test\_dhcp: Use a safer host name * devstack: Add neutron-sriov-agent alias for lib/neutron * Fix: set IPv6 forwarding when there's an IPv6 gw * Trivial fix typos while reading doc * Fix tempest router migration test when HA enabled, v2 * Revert "Fix tempest router migration test when HA enabled" * Remove deprecated eventlet TimeoutError exception * OVSBridge: add --strict to allow priority in delete\_flows * Add the parameter sub-resource-ID to the test show/update resource function * Updated from global requirements * Fix error getting segment\_id in linux DHCP driver * Change list of available qos rules * TrivialFix: Remove dead code in iptables\_firewall * tempest: Obey ssh\_timeout config option * Clean MissingAuthPlugin from unit tests * Move \_get\_marker\_obj() out of CommonDbMixin * Fix errors in PrefixDelegation.remove\_stale\_ri\_ifname * Fix tempest router migration test when HA enabled * tests: removed 'retargetable' framework * tempest: Obey identity\_feature\_enabled.api\_v2\_admin in a few tests * Utils: make delete\_port\_on\_error more informative * use neutron-lib port security api-def * DVR: Add forwarding routes based on address\_scopes * Add QoS bandwidth limit for instance ingress traffic * l3\_db: Fix a regression in the recent CommonDbMixin change * Metering to OVO * Make api\_all\_exntesions hook readable * use neutron-lib callbacks * Stop using CommonDbMixin * Update auto-addresses on MAC change * Only add "on-link" routes for L2 adjacent subnets * Refactor CommonDbMixin for removal * Use unused argument in dns extension * remove and shim callbacks * Remove unused parameter in test\_extension\_driver\_port\_security.py * Add Linuxbridge agent to no dhcp fullstack connectivity test * Creating subnet for tagged network without GW * Tempest: Edited bash commands to work with multiple OSes * Prevent regression of IP loss on MAC update * ProcessManager: honor run\_as\_root when stopping process * Add deperecated reference to impl\_idl.Transaction * [Pecan] Fix custom tenant\_id project\_id matching * Eliminate lookup of "resource extend" funcs by name * Register ovs config options before using them * Fullstack: enable DHCP agent * Monkey patch the os and thread modules on Windows * Replace subprocess.Popen with CreateProcess on Windows * Add network\_id in segment check log * Remove unused functions from devstack/lib/ovs * Use conjunction for security group rules with remote\_group\_id * Use the ovsdbapp library * fullstack: Compile openvswitch module for fullstack test * fullstack: Don't let dhcp agents failover * Use dirname in object recursive import * Use HostAddressOpt for opts that accept IP and hostnames * Stop extension warnings in UTs * Remove EXTERNAL\_NETWORK callbacks * Bulk up port context retrieval * Stop DHCP agent scheduler warnings in UTs * Fixup event transaction semantics for ML2 bulk ops * Set MTU on tap devices in Linux Bridge agent * Updated from global requirements * Revert "Update auto-addresses on MAC change" * docs: Update TESTING.rst about openvswitch requirements * Make QoS policy object compatible with versions 1.2 and higher * Correct param type description of supported\_rules of QoS driver * Eliminate mixin references in DVRResourceOperationHandler * Split out DVR DB into multiple classes * DVR: move snat and csnat functions * DVR: move delete\_floatingip\_agent\_gateway\_port * DVR: move \_get\_device\_owner and floatingip CUD * DVR: Move \_get\_floatingip\_by\_port to l3\_db * Use NETWORK callbacks in get-me-a-network code * sanity check: deprecate all version based checks * Disable new N537 hacking check from next neutron-lib * Ensure behavior of None for device\_id * Remove deprecated send\_arp\_for\_ha option * Add a skip check to make sure that vlan is actually in available\_type\_drivers * Add a new configuration variable for api links * Add devref for supporting use floatingip cross scopes * Replace six.iteritems with dict.items(Part-2) * Update auto-addresses on MAC change * Stop loading OVOServerRpcInterface in ML2 testcase * Agent-side receiver cache for ML2 OVO * Delete segments using OVO code * Don't load rels on detached objects * Add api test to create vxlan network * segments: make sure we pass lists into get\_objects * tests: allow database layer to generate id for standard attributes * Refactoring \_create\_test methods for OVO UTs * Revert "Rally: decrease SLA for avg list of ports and nets" * qos: removed silly tests validating that oslo.versionedobjects works * Remove deprecated support for QoS notification\_drivers * Rename method to better reflect what it does * ml2: Remove no longer necessary subtransaction workaround * Update operation dns notes url * Update release page url * devref: Remove resolved fullstack TODO items * Floating IP association without subnet gateway IP * Replace six.iteritems with dict.items(Part-1) * Expose neutron api application as a wsgi script * Allow to disable DVR api extension loading * Add IPAllocations to the port fixed\_ips * Integration of Router Extra Attributes OVO * Eliminate lookup of model query hooks by name * move make\_port\_dict back out of txn * Integrate NetworkSegment OVO * Add launchpad bug update support to abandon script * Implement '-F' option for 'net-ip-availability-\*' command respond * Exhaust VLAN allocations in physnet order * Check permutations of router migrations * Don't set use\_stderr to False for tests * Reduce rpc calls in SR-IOV agent * Dont try to apply iptables rules in a endless loop * Handle CIDR IP address in allowed address pairs * Allow self-sharing RBAC rules to be deleted without usage check * Remove unused variable * Use registry.receives decorator in neutron.db.l3\_db * Use 'segment' instead of 'network' in log * fix overaggressive 403->404 conversion * Updated from global requirements * ip\_lib: ignore gre and lo devices in get\_devices by default * Ignore gre devices in namespaces when cleaning up devices * Record queries for helpful failure msg in bounds test * Add string validation on security group's name * Pass MTU and AZ into network\_create\_precommit * Don't override default values for oslo.db options * tests: removed support for OS\_CHECK\_PLUGIN\_DEALLOCATION * Stop direct access to CONF.debug * Switch to neutron-lib hacking factory * Add some bulk lookup methods to ML2 for RPC handling * Allow offloading lookups in driver contexts * Don't mock registry in revision\_plugin test * Call expire\_all in revision tests before re-use * Pecan: /v2.0/ views response with resources * Deprecate watch\_log= argument for Daemon * Refactor the usage of save\_and\_reraise\_exception * docs: Update TESTING.rst with gate information about ovs * policies: Add policy for rechecking failed jobs on Gerrit * Adding missing neutron policies to policy.json * fullstack: Test vms are pingable before testing data plane * DVR: Create router to fip namespace connection based on gateway state * Port data plane status extension implementation * DVR: Don't clean snat-ns of DVR HA router when fullsync * Exit on failure to load mechanism drivers 11.0.0.0b1 ---------- * Add sanity check for conntrack * Fix some reST field lists in docstrings * New enginefacade for ports and sg groups * extraroute\_db: Remove \_get\_extra\_routes\_dict\_by\_router\_id * extraroute\_db: Clean up update\_router * Add Apache License Content in index.rst * Correct the mistake in ../conf.py * Fix some grammatical errors in TESTING.rst * DVR: properly track SNAT traffic * Ignore gre devices when fetching devices in test\_cleanup\_stale\_devices * DocFix: sriov\_nic\_agent supports qos * Optimize the link address * Assert contents of returned devices in test * Tempest: Fixing L3 agent hosting router for DVR setup * Flush objects by ourselves before processing before\_commit event * Don't check for enable\_security\_group vs. firewall\_driver compatibility * execute: don't call greenthread.sleep directly * Fixed validation of create\_and\_list\_trunk\_subports rally scenario * Fix SQL fixture to preserve engine facade settings * Quota list API returns project\_id * Log messages for keepalived-state-change in syslog * Enable keepalived debug logs when debug=True * Add sem-ver flag so pbr generates correct version * ovs-agent: Clear in\_port=ofport flow earlier * Print useful error on rootwrap daemon failure * Fix tempest router timestamp test when HA enabled * Fix TypeError in native of\_interface \_get\_dpid * hacking: disable log translations check * Disable dvr tempest tests using DISABLE\_NETWORK\_API\_EXTENSIONS * Add net precommit create callback and req to update * Inherit segmentation details for trunk subports if requested * Removing workaround for bug 1656947 * Updated from global requirements * Update QoS devref * Improve validation of supported QoS rules * deepcopy binding and binding levels avoid expiration * Move notify\_security\_groups\_member\_updated to callback * Handle auto-address subnets on port update * use neutron\_lib's portbindings api-def * Deal with port commonly when hypervisor is XenServer * Egress sg\_rules should get 'prefix' from 'dest\_ip\_prefix' * Move conntrack zones to IPTablesFirewall * Use os-xenapi for neutron when XenServer as hypervisor * TestTrackedResource: register core plugin in directory * Retrieve fresh network DB data before getting it in ml2 * Remove stale floating IP addresses from rfp devices * Use is\_loaded in manager.init * Revert "Skip DHCP agent query in provisioning block setup" * New enginefacade for networks, subnets * Fix relationship event handler for flushes and nested * Make RBAC entry removal enginefacade friendly * Fix call a method of extension class * Handle empty body in add\_router\_interface * Remove unnecessary setUp function in testcase * LOG.exception for mech dict extend failure * Add an example for update in object\_usage * Use new enginefacade for quota and provisioning blocks * Port idlutils.get\_schema\_helper from ovsdbapp to neutron * raise Exception instead of LOG.error on cookie/mask inconsistency * Verify metering label exists before applying rule * Remove a release note for reverted patch * Avoid router ri.process if initialize() fails * Throttle SIGHUPs to keepalived * ExtensionTestCase: register correct core plugin * Use router tenant for interface attach * Load all eager relationships on 'before\_commit' event * Update metering agent to use stevedore alias for driver * Fix Quota error while running tests * Clear QoS rules from ports without a qos policy * Correct file mode * Feature Classification Cleanup * Delete segments on BEFORE\_DELETE instead of PRECOMMIT * OVO for Quotas and Reservation * Remove minimum\_bandwidth\_rule from rules supported by Linuxbridge agent * Refactor OVSCookieBridge: always use bridge cookie * delete\_flows shall only touch flows with the bridge cookie * Log instance interface addrs in subport test * Skip DHCP agent query in provisioning block setup * Optimize pid property in AsyncProcess class * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Configure tempest.conf via test-config phase * Stabilizing process monitor function test case * Use oslo.context class method to construct context object * Fix some reST field lists in docstrings * Updated from global requirements * Fix Python 3 compatibility in idlutils * Bump default quotas for ports, subnets, and networks * Write vrrp\_script before (re)starting keepalived * Use BEFORE\_CREATE events for \_ensure\_default\_security\_group * Apply QoS policy on network:router\_gateway * Move NEUTRON\_\* definitions from plugin.sh into settings file * Simplify RootHelperProcess.\_read\_stream() * Switch RootHelperProcess from select.poll to select.select * Reconcile quitting\_rpc\_timeout with backoff RPC client * Use unique binding\_index for RouterL3AgentBinding * Add precommit notifications for create/update port/network in ML2 * Rally: decrease SLA for avg list of ports and nets * Revert "Stop skipping compute owner in Linux Bridge loop" * Fix copy-paste error in hacking checks * Update stadium stable dashboard * teach logger mech driver vlan transparency * Fix linuxbridge agent startup issue with IPv6 * Consume ServicePluginBase from neutron-lib * OVO creation for RouterL3AgentBinding * Disable RA and IPv6 forwarding on backup HA routers * Updated from global requirements * Pass parameters when create eventlet.wsgi server * Remove network port special-case in provisioning block * Downgrade callback abortable event log to debug * Remove redundant/stale sections from neutron-teams doc page * pecan: Make admin context if no context was created * TrivialFix: Remove extra space from log * Do not use -1 as an OpenFlow flow cookie * Stop spilling tracebacks during normal operation * Neutron Feature Classification * Don't IPAddressGenerationFailure for DHCP ports * Get rid of custom wrap\_db\_retry call in sync\_allocations * Use correct retry mechanism in tags * Fix link rendering for grafana dashboards * Agent common config * On update\_tags, clean up tags from the requested resource only * Use warn\_on\_missing\_entrypoint from stevedore 1.20.0 * use neutron\_lib's provider\_net api-def * Bump os-log-merger version in post\_test\_hook * Don't add duplicate metadata rules after router update * Render service plugins configurable * Removed Netmtu\_db\_mixin from the tree * Expose register\_objects function to register all known objects * Use @registry.receives in neutron.services.segments.plugin * Add PD support in HA router * Remove dependency on strings for kill\_process * Trim rally scenario sizes * Let setup.py compile\_catalog process all language files * Untangle WaitTimeout class from eventlet.TimeoutError * qos: Pass correctly table\_id to openflow driver * Switched gate to new neutron-\* service names * Check for None in \_get\_agent\_fdb for agent * Fix two spelling errors * Generate index file containing oslo.log messages for all tests * Remove deprecated method get\_interface\_mac * Decouple hook and func registration from CommonDbMixin * TrivialFix: Do not use synchronous\_reader argument * Change the way to distinguish the port type * Replaces yaml.load() with yaml.safe\_load() in neutron * Switch to Pecan by default * Init policy in pecan after hook as well * Set OVS inactivity\_probe to vsctl\_timeout when adding manager * Network OVO in neutron/tests/unit/plugins/ml2 * Change in-tree code to use moved get\_ip\_version() * devref: docs about how to use NeutronDbObject * Switch ns-metadata-proxy to haproxy * Don't return null-byte separated string from ExternalProcess.cmdline() * Guard against os-log-merger failure * Update is\_default field only when specified in the request * OVO for Tag * devstack: use neutron\_service\_plugin\_class\_add for service plugins * devstack: added new neutron-\* aliases for services * devstack: switch to new NEUTRON\_\* variables * Fixes crash when starting Neutron OVS Agent on Windows * Make query in quota api lockless * Lockless segmentation synchronization in ML2 type drivers * OVO External Networks * Decouple tests from default quota size option values * Fix bashate warnings * Added docstring for Pager for OVO * use neutron\_lib's get\_random\_mac * add 4094 to fix the range of available local vlans * devstack: configure ml2 extension drivers using lib/neutron * Get rid of delete\_subnet method in ML2 * Ensure ovsdb\_connection enabled before calling monitor * Remove unnecessary overrides in Subnetpool OVO * Switched rpc access policy to DefaultRPCAccessPolicy * Stop skipping compute owner in Linux Bridge loop * Spin off context module * Clean up dsvm-scenario handler for gate\_hook * Cleaned up the list of lieutenants and stadium points-of-contact * Updated from global requirements * Get rid of ML2 inheritance of delete\_network * Use Sphinx 1.5 warning-is-error * Bump to Nova V2.1 * Prepare trunk rules module to deal with the Ironic use case * Stop killing conntrack state without CT Zone * Fix trunk subport scenario test * Deprecate nova\_metadata\_ip in favor of nova\_metadata\_host option * Include metadata content for debugging * [Fix gate]Update test requirement * Use @registry.receives in neutron.services.trunk * Don't use save\_and\_reraise\_exception when we never reraise * Fix typo in test\_rules.py * Use @registry.receives in neutron.services.qos.drivers.base * Use registry.receives decorator in driver\_controller * Use registry.receives decorator in neutron.db.dvr\_mac\_db * Fix has\_registry\_receivers when super.\_\_new\_\_ is object.\_\_new\_\_ * Provide more useful repr for IPDevice * Scan for mac through all devices * Use registry.receives decorator in neutron.notifiers.nova * Stop making IP-specific provider rules in SG code * LB Trunk: Stop matching MAC of subport to port model * Switch external network create event to precommit * Use registry.receives decorator in neutron.db.db\_base\_plugin\_v2 * Updated from global requirements * Convert gate\_hook to devstack-tools * Remove unused enable\_snat attribute * Fix has\_registry\_receivers for classes w/o \_\_new\_\_ * Use registry.receives decorator in neutron.db.availability\_zone.router * Use registry.receives decorator in neutron.db.l3\_hamode\_db * Use registry.receives decorator in neutron.db.l3\_dvr\_db * Fetch trunk port MTU only on trunk validation * iptables: stop 'fixing' kernel sysctl bridge firewalling knobs * Use registry decorator in ML2 plugin * Prevent double-subscribes with registry decorator * Use registry decorator in external\_net\_db * Switch to use idempotent\_id decorator from tempest.lib * Adding a code coverage threshold * Remove a NOTE that doesn't make sense anymore * Add bashate support * Cleanup \_find\_related\_obj * tiny refine: capitalize mod\_name with string capitalize function * Mark of\_interface option deprecated * functional: Stop compiling OVS from source * Clean up deprecated sqla model import paths * gate\_hook: accept dsvm-scenario-[ovs|linuxbridge] venv name * OpenFlowSwitchMixin: do not override delete\_flows * Fix OVSBridge.delete\_flows when called with no args * Remove obsolete comment * Configure HA as False when creating a router for DVR tests * Deprecate gateway\_external\_network\_id option * gate-hook: Accomodate devstack-gate local.conf changes * tempest: Skip QoS test until fixed * Fix string case issue in rpc\_api documentation * Avoid applying noop lambdas per object in db\_api.get\_objects * Stop handling interface drivers that don't support MTU * Provide hook for networking-sfc OVS agent code * Registry decorator to bring subscribes to methods * Add some debug statements to root-cause 1666493 * Allow no network to be passed into PortContext * Avoid segment DB lookup in \_expand\_segment on port * Make ML2 OVO push notification asynchronous * Avoid loading network and all rels for subnet query * Don't log about retrying failed devices if empty * Allow no network to be passed into subnet context * Enforce port QoS policies for all ports * Pecan: Get loaded by paste deploy * Macvtap: Check for no original port in is\_live\_migration * Skip native DHCP notifications on status change * Move dhcp\_release6\_supported to runtime checks file * Disable process monitor for keepalived test * Flavors: move dict extend out of txn * Remove deprecated methods supported by neutron\_lib * Fix typo in blueprints.rst * Remove logging import unused * Add a tempest scenario for floating-ip * Invert device\_owner to avoid filtering too much * Clean up ovsdb-native's use of verify() * Simple logging de-dup logic to RPC version\_manager * fullstack: Fix race between updating port security and test * Adding cleanup of floating ips * For typo in sqlalchemytypes * Updated from global requirements * Only log IDs of callbacks in debug message * Add helper for driver to check if it owns router * Enable dstat for in-gate functional and fullstack test runs * [TrivialFix]Remove the file encoding which is unnecessary * Don't configure neutron.conf as part of functional/fullstack gate * Remove dependency on memory heavy python-requests * Pass --concurrent flag to ebtables calls * Revert "Linux Bridge: driver support for QoS egress minimum bandwidth" * Switch to use test\_utils.call\_until\_true * Fix typo in .pylintrc file * Update devref for enhanced tag mechanism * Use ipam AllocationOnAutoAddressSubnet class * Simplify the QoS bandwidth test to increase reliability * Raise InvalidInput exception on provider-net creation without seg\_id * Fix error in openvswitch firewall doc * Fix duplicated sg rules check for remote\_ip\_prefix * Typo fix: underlaying => underlying * Skip segment checks during network delete operations * Open Pike DB migration branch * Tag the alembic migration revisions for Ocata * Deprecate get\_locked\_port\_and\_binding * DVR: Look at all SNAT ports for a subnet match * Don't delete\_port on binding deadlock * Simplify notify transaction tests * Remove ORM relationship between ports and networks * Switch to 'subquery' for 1-M relationships * Add DBError to \_is\_nested\_instance * Enhance tag mechanism * Update reno for stable/ocata 10.0.0.0rc1 ----------- * Use same session object for ml2 dict extend functions * Skip reading config files from neutron.conf.d for the namespace proxy * Turn nova notifier into a proper rate limiter * Add Loki service plugin for optional DB havoc * Use bytes for python3 friendly os.write * Updated from global requirements * Use addCleanup instead of tearDown * Remove --omit argument in run\_tests.sh * Integration of Provisioning Block OVO * Pecan: Fix tags handling for pecan * Move ovsdb\_nested transaction to ovs\_lib * Add support for Keepalived VRRP health check * Ensure that subnet update does not creat RPC client * Revert "Don't disable Nagle algorithm in HttpProtocol" * Increase coverage in agent/l3/dvr\_local\_router.py * Use socket.AF\_INET6 in ip\_lib tests * tempest: Pin Ubuntu image to one version * Update bug triaging policy for documentation * Remove duplicated and unused code in iptables\_firewall * Terminate macvtap agent when physical\_interface\_mapping config not present * Add IPv6 Prefix Delegation support for DVR * Fix port update for service subnets * Update in-tree code to use new neighbour functions * tempest: Log server console output of failed SSH * functional: Check for processes only if there are any * Fix link in QoS devref * Clear conntrack entries without zones if CT zones are not used * Fix iptables rules for Prefix Delegated subnets * policies: Replace Joe's graphite graphs with Grafana * Init privsep on l3 agent start * Display deprecation warning in test only once * Kill the metadata proxy process unconditionally * Always acquire network.id lock in dhcp port update * Fix WaitTimeout string representations * Remove baremetal notification from nova notifier * Addressing L3 HA keepalived failures in functional tests * Stop passing datapath as 'may\_exist' argument * DVR: Add static routes to FIP namespace * Refactor L3 scheduler (unify code paths) * Retry on routerport delete race * Do not raise an error deleting neighbour entry * Clean up inconsistent syntax * [Live Migration] Extend ml2\_port\_binding table * Fix some pylint errors in IPAM tests * Use weakrefs for common\_db\_mixin callbacks * Add bulk pull OVO interface * Handle attempt to enable br\_netfilter in namespace 10.0.0.0b3 ---------- * Check arg type for SegmentTypeDriver functions * Add missing port UPDATE event to ML2 * Fixes to allow OVO deserializion of ports/networks * Add missing module-level SUBNET ref * ovsfw: Refresh OFPort when necessary * Fix a bad docstring in provisioning blocks module * Transition qos notification driver into qos driver * OVS: merge the required OpenFlow version rather than replace * Fix typo in test\_l2\_lb\_agent.py * Adjust psutil usage for psutil > 2 * Connectivity tests for OVS agent failures/restarts * Allow all migration of routers * Set access\_policy for messaging's dispatcher * Elminate join for network owner filter * Always add unique sort keys when sorting * Allow the other nic to allocate VMs post PCI-PT VM creation * trunk: Add tempest test validating subport connectivity * Change neighbour commands to use pyroute2 * Routed networks IPv4 inventory in Nova GRP * Correctly set project\_name * Break circular import with use of common\_types * ovsfw: Support protocol numbers instead of just tcp and udp * Add KillFilter for python 3.5 * Register sqlalchemy events through hook for UT cleanup * Fix empty string check for python 3 * Update docstring in validate\_provider\_segment * Update docstring for base OVO * Modify docref commands to reflect OSC transition * Server-side push notifications for ML2 * Allow 0 in port range for securitygrouprule object * Updated from global requirements * OVO for Ipam(Allocation,Subnet,Pool) * Remove get\_router\_cidrs method of dvr\_edge\_ha router * Use plugin directory fixture * openvswitch agent: add OVS\_RESTARTED event * Stop using legacy facade * XenAPI: Support daemon mode for rootwrap * Fix importing old path for exceptions * Pecan: Fix policy checks for lists * Use ubuntu image for tempest scenarios from Neutron * Catch invalid subnet service\_types on input * Fullstack SG test for LinuxBridge agent * Get rid of L3 HA override of \_update\_router\_db * Neutron server was not compatible with member actions * Get rid of create\_router override in l3\_ha mixin * Add a ReST client for placement API * Add OVO for AutoAllocatedTopology * Fix typos * Fix broken Windows compatibility in ovs\_lib * Add IPLink class to Windows ip\_lib implementation * Support ovsdb-client monitor with remote connection * Protect against non-determinstic sort * Remove deprecated min\_l3\_agents\_per\_router * Include port\_security check in fullstack tests * Revert "Setup firewall filters only for required ports" * test\_l3: Enable native pagination and sort * Revert "Protect against non-determinstic sort" * Multiple tweaks for Ocata release notes * L3: Add in missing translation * Fix broken links in devref * Rename a test method * DVR: delete stale devices after router update * Linux Bridge: driver support for QoS egress minimum bandwidth * Use project\_id instead of tenant\_id in objects * Remove references to defunct Stadium docs * Use new enginefacade for dvr mac db * DHCP: enhance DHCPAgent startup procedure * Use writer for trunk database operations * Add check for ha state * DVR: fix csnat port missing after router update * Manually add pk for alembic\_version table * Only migrate ports on DVR migration * Protect against non-determinstic sort * Decompose router extra attr processing * Use unique subnetpools in SubnetPoolPrefixDbObjectTestCase * Fix netns\_cleanup interrupted on rwd I/O * Don't emit SG rule AFTER events until TX closed * Updated from global requirements * neutron-lib: use L3 constant * Use the session loader in keystoneauth1 for designate * Fix delete\_network to delete ports one by one * Cleanup unused helper function from test code * Added UT for floating ips with dns * Centralize creation of security group test obj * Suppress annoying "Could not load" stevedore warnings * adds support for vhost user reconnect * Clean up \_get\_ports\_query * Get rid of additional fixed\_ip filter join * Remove advertise\_mtu config option * Get rid of \_network\_model\_hook for external\_net * Protect against '.delete()' for quota and revisions * Don't use .delete() on a port in unit test * Stop using .delete() in external net handling code * Revert "Add ALLOCATING state to routers" * Get rid of ml2 port model hook join * Add retry to \_create\_ha\_port\_binding * Correct the msg ipv6 enable in system * Change default exception in wait\_until\_true * Checking functionality of DVR * l3scheduler: create ha\_vr\_id more robustly * Remove python 3.4 support * Get rid of l3 ha delete\_router inheritance * Allow to pass suite flavor to gate\_hook to disable dvr tests * Don't create default SG in transaction * Kill neutron-keepalived-state-change gracefully * gate\_hook: Add support for neutron-full with the ovs firewall * Add filter check for quering * Fix dir doc typo error * Flavor and Service Profile to OVO * DHCP: "reserved\_dhcp\_port" not well managed during startup * Raise AssertionError instead of eventlet.timeout.Timeout when failing * Adding debug trace to IPWrapper.get\_devices() * Use gate\_hook to enable q-trunk for rally * Update contacts and team list * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Reduce IP link show calls for SR-IOV scan loop * Add DictOfMiscValuesField in OVO for dict usage * SR-IOV: Remove physical\_device\_mappings depreciation warning * Fix a typo in iptables\_manager.py * DB: remove deprecated oslo\_db warnings * Fullstack tests for DHCP agent HA * Remove greenlet useless requirement * Fix pylint warning in test\_l3\_hamode\_db.py * Fix typo * Add more protocols to the iptables module map * Replace nc command with ncat * Refactors QosAgentDriver * Change the order of installing flows for br-int * Updated from global requirements * [TrivialFix] Fix comment typo error * Don't create HA resources until needed * Do not try and remove non-existent iptables chains * Fix ext alias in revision API test * Modify error word "procedence" to "precedence" * Updated from global requirements * [TrivialFix] Fix comment typo error * neutron-lib: use CORE from neutron lib constants * Use Port OVO in ml2/db and plugins/ml2/test\_db * gate\_hook: Add a no-op rally case * Remove Duplicate line in privsep.filters * Use DB field sizes instead of \_MAX\_LEN constants * Account for unwire failures during OVS trunk rewiring operations * Change passing session to context in segments db functions * Fix a bug in process\_spawn binding on ports * team update: john-davidge doc liasion * OVO for FlatAllocation * Fix python3 issues with devstack * iptables: don't enable arptables firewall * Pecan: Fix subresource policy check * Adopt privsep and read routing table with pyroute2 * Change passing session to context for TypeDriver * of\_interface: Revert patching ryu * Remove iptables nat and mangle rules for security group * SR-IOV: remove ml2\_conf\_sriov.ini from oslo-config-generator * Sort and Remove duplicate field types * Remove test cases that moved to tempest * Kill processes when cleaning up namespaces * Don't return content when we set HTTP 204 * Set standard\_attr\_id as property in DeclarativeObject * Add agent object in router info * Bump revision of resource on tag add/remove * Use subqueries for rbac\_entries and subnets<->network * Use assertGreater(len(x), y) instead of assertTrue(len(x) > y) * ovsfw: Raise exception if tag cannot be found in other\_config * Proposing tidwellr and njohnston as service layer go-to contacts * Propose mlavalle as neutron core and L3 second in command * Use get\_random\_string from neutron-lib * Moving pyroute and oslo.privsep deps into requirements.txt * devref: suggest bug deputies to send brief reports to ML * Ensure random object unique constraints aren't violated * Update comment about foreign keys * Document how to proceed with new platform features * Update MTU on existing devices * Added log messages while creating neutron objects * Add subprojects database migration * Fullstack test for DHCP agent * get\_random\_object\_fields() for setting object attr * Updated from global requirements * Remove external\_fixed\_ips from Plural\_mappings in l3.py * Clean-up L3 constant TODOs * Unittests improvement 10.0.0.0b2 ---------- * Restore extraroute dict after OVO change * tests: change order of assertion in \_validate/compare\_resource * Refactor ml2\_db to pass context * Propose abhiraut as neutronclient core * OVO for Allocation and Endpoint * Correctly print --limit value passed via API * Fix flake8 error in DHCPOptsTestCase class * objects: add delete\_objects public method * DVR: Fix race condition in creation of fip gateway * Bulk creation of SecurityGroups * Remove model\_base deprecations * DSCP packet marking support in Linuxbridge agent * Remove duplicated revises\_on\_change in qos db model * Fix DHCP Port Creation on Service Subnets * tempest: Fix qos extension check * Unplug external device when delete snat namespace * Revert "Squash revert to breaking changes" * Optimize trunk details extension hook * rally trunk port list * Get rid of DVR override of remove\_router\_interface * Check for unbound ports in L3 RPC handler * devref: don't suggest to manually request stable branches * Revert "lb-agent: ensure tap mtu is the same as physical device" * Fix some slow unit tests * DVR: Fix IPtables driver for metering with DVR routers * Get rid of DVR override of add\_router\_interface * Remove redundant "dvr" in test name * Pass context and resource\_type in RPC callback * Tempest tests uses only supported QoS rule types * Get rid of DVR override of \_port\_has\_ipv6\_address * Add filters support to constant queries test * Remove deprecated dhcp\_domain from dhcp\_agent.ini * SRIOV: don't block report\_state with device count * OVO for VlanAllocation * Correctly configure IPv6 addresses on upgrades * Add janitor to cleanup orphaned fip ports * Make UUIDField actually validate UUIDs * Delete related conntrack when delete vm * Add validation for security\_groups parameter in Port * Pecan: Change Quotas to use original controllers * Use Query.column\_descriptions instead of private property * Remove fallback functions in agent/rpc * Convert filters to empty dict if None in DB's get\_subnetpools() * stadium guidelines: Document stable branch creation * doc: Fix a warning * Allow keystone v3 in the designate driver * QoS: update the database before notifying the backend on delete * Expose [agent] extensions option into l3\_agent.ini * Include ovsdb\_connection option into all interface\_driver agents * Eliminate DVR inheritance of router create/update\_db * Use ExtensionDescriptor from neutron-lib * Support alembic 0.8.9 in test\_autogen\_process\_directives * Use Port OVO object in test\_l3\_ext\_gw\_mode * Refactoring config opts for ml2 plugin openvswitch * Don't compile OVS kernel modules for functional job * Update bug tag contacts due to absconsions * configure\_for\_func\_testing.sh: Remove neutron-legacy inclusion * configure\_for\_func\_testing.sh: Source lib/neutron as well * Functional tests: change assert to wait\_until\_true * revise doc: support\_extension\_aliases to supported\_extension\_aliases * Pecan: Remove get\_resources() from get\_pecan\_resources() * Fix docs headers * Update DB lieutenant to Ann Taraday * Compare port\_rule\_masking() results with different approach * Use to\_policy\_values for policy enforcement * Skip larger than /64 subnets in DHCP agent * Refactoring config options for mech\_sriov opts * Delete conntrack when remote ipset member removed * Migrate device\_owner for router's interface * Remove allow\_pagination and allow\_sorting config options * Integration of RouterRoute OVO * tests: introduce update\_obj\_fields method in base objects test class * Add notify for tag operations * gate\_hook: Remove a stale comment * Renamed all [AGENT] config sections into [agent] * Reuse plugin's update method for changing trunk status * Calculate IPv4 DHCP subnets once for metadata * Lock in DHCP agent based on network\_id * Fix OSprofiler support * Introduce objects\_exist method in neutron/objects/base.py * OVO: add SubnetServiceType object and code integration * Revert "Deprecate SR-IOV 'physical\_device\_mappings' config option" * Removed deprecated method * ml2: Add original port to context on \_bind\_port * Integrate Address Scope OVO into DB * Add missing revises\_on\_change attribute * Redirect ExtensionDescriptor to neutron-lib * Remove PLURALS * Show team and repo badges on README * ovs-agent: Close ryu app on all exceptions * Get rid of DVR inheritance of \_delete\_current\_gw\_port * Solve unexpected NoneType returned by \_get\_routers\_can\_schedule * fullstack: add some more security group tests * Update network dict with segment and mtu info in \_create\_subnet\_db * Capture NotFound Exceptions in subnet postcommit * Revert "DHCP agent: advertise SLAAC prefixes" * Get rid of floating IP bgp next\_hop query * L3: Only send notifications if l3 plugin exists * Pecan: Minor Fix in startup * Squash revert to breaking changes * Remove REVERSED\_PLURALS and get\_resource\_info() * Remove legacy oslo.messaging.notify.drivers * Fix InvalidInput exception output * Revert "Fix file permissions" * Adopt neutron-lib plugin directory * DB: use get\_unique\_keys from oslo\_db * ovsdb: don't erase existing ovsdb managers * OVO for Provisioning Block DB Model * ovs-agent: Catch exceptions in agent\_main\_wrapper * functional: Remove unused l3 config * Updated from global requirements * Improve performance of \_modify\_rules * Refactoring config options for plugin l2pop opts * Refactoring config options for plugin macvtap opts * Refactoring config options for l2 agent ext opts * Refactoring agent metadata config * Metering: sync only active routers hosted on the same host * Fix security\_groups\_provider\_updated for linuxbridge * Using a new security group in scenario tests * Fix file permissions * DB: remove deprecation warnings for BASEV2 * cors: update default configuration * Add unit test in test\_allowedaddresspairs\_db * Adjustment method execution order * functional: Pass string tag to other\_config * functional: Use VLAN tags from range <1; 4094> * Replace create\_test\_segment with NetworkSegment object * Remove unused LOG * Get post-subnet actions out of transaction * ProviderResourceAssociation to OVO * sanity check: Check that ip\_nonlocal\_bind works with namespaces * callbacks: Make the value of FLOATING\_IP match with api resource * Move SR-IOV VIF type constants to the portbindings extension * Use new enginefacade metering db * Move DVR fip agent gw port create out of transaction * Separate floating IP port creation from transaction * Use callbacks to create DVR floating GW port * Disallow specifying too long name and description for qos * Move AgentStatusCheckWorker to PeriodicWorker * Speed-up iptables\_manager remove\_chain() code * Typo on side\_effect rendering the test useless * Fix reset/start methods on AgentStatusCheckWorker * Pecan:Add functional test for QuotasController * Elimination duplication of lots of DVR tests * Have qos object increment port/network revision * Update devref examples about alembic migrations * Trivial Fix - Update code to use Pike as the code name * Stop using deprecated CORS.set\_latent() * L3 scheduler: add retry indicators for bind\_router * Install OVS from git if current version < 2.5.1 * Fix typos in standard\_attr.py & attributes.py * Clean up agent status check debt * Collect OVS Debt * Disable 'accept\_ra' in DHCP agent namespace * Replace assertEqual(None, \*) with assertIsNone in tests * Fix ML2, base db plugin update\_subnet for transactions * DVR: Fix func docstring and comments * Move sysctl out of IPDevice class 10.0.0.0b1 ---------- * Parse the output of ip route more robustly * Add L3 HA test with linux bridge * Fix "failed unplugging ha interface" error when deleting router * Add a trunk rally test * Updated from global requirements * Fix typo in release note filename * Use new enginefacade for servicetype\_db * Remove floatingip address ignores ha\_state * ovsfw: small cleanups to improve readability * Use new enginefacade for tag * Change import statement to not rename the module * Add unit tests for ip\_lib.get\_routing\_table * policy: cache extracted parent fields for OwnerCheck * Changing arping command execute to accept 1 as extra OK code * l3-ha: Send gratuitous ARP when new floating IP is added * Make OVO exception NeutronDbObjectDuplicateEntry retriable * Move OVO exceptions to neutron/objects/exceptions * neutron-lib: complete usage of helpers * Add check to address\_pair that items in list are dict * Add rally hook to neutron devstack plugin * Agent to OVO * Retire deprecations for constants, exceptions, attributes * Remove second -m protocol from iptables rules * Truncate IPDevice's name to interface max size * Change cfg.set\_defaults into cors.set\_defaults * DHCP: Add missing path for marking ports as ready * Stop using osprofiler options as if they are part of public API * Register osprofiler options before setting engine hook * Don't pass config object to send\_ip\_addr\_adv\_notif() * Deprecate SR-IOV 'physical\_device\_mappings' config option * Devref to explain nova-neutron interactions during live-migration * Remove timeutils.clear\_time\_override in neutron * Refactor OVSDB native lib to be more customizable * Deprecate run\_tests.sh * Removed deprecation warning for waiting\_until\_true * Removed deprecated tests function * Removed deprecated function import\_modules\_recursively * Switch to new hacking 0.12 * Avoid UnboundLocalErrors in \_create\_bulk\_ml2 * Introduce context in methods for Router Extra Attributes OVO usage * Pecan: Find subresource controllers by parent * Removed dispose\_pool() from db/api * Removed deprecation warning for converters and validators * Removed get\_engine() from db/api * Removed paginate\_query and sqlalchemyutils module * Removed deprecated methods for AgentSchedulers * Removed deprecated checks * Removed update\_network from plugin.common.utils * Removed network\_segment functions from ml2.db * Removed deprecated class LocalVLANMapping * Deprecate send\_arp\_for\_ha option * Change IPAM DB API to use context instead of session object * Add fullstack test for check DSCP marks outbounds * L3-HA: remove unused deprecated code * neutron-lib: use replace\_file from neutron lib * Remove unused configuration variable * Fix typo in comment * Migrate to neutron-lib released API definition for trunk APIs * Updated from global requirements * Reduce rally executions of create\_and\_list\_ports * Neutron lib adoption * Use ensure\_tree from oslo\_utils.fileutils * Remove last vestiges of oslo-incubator * Updated from global requirements * Updated from global requirements * Pecan: No fields to plugin on GET member actions * IP Conntrack Manager changes for FWaaS v2 * Only mark ports ready on synced networks * Fix modify\_fields\_from\_db for vif\_details empty str * Forbid contract migration scripts for Ocata * Check if namespace exists before getting devices * DHCP agent: advertise SLAAC prefixes * Use new enginefacade for address\_scope\_db * Replace a test with use of network object * Add 'to\_primitive' for MACAddress and IPNetwork * Update Lieutenant table with new entries * Usage of new enginefacade for flavor db * Don't depend on translated strings for error check * Simplify resources module in RPC callbacks * SR-IOV: Remove deprecated supported\_pci\_vendor\_devs option * ipv6\_utils: delete get\_ipv6\_addr\_by\_EUI64 * Fix specs broken link * Make README less verbose and to the point * Make crystal clear how to contribute/approach the neutron community * Refactor/prepare db common utils for neutron-lib * Add OVO for dns Objects * Fix test\_unwatch\_log() to cleanup after itself * Updated from global requirements * TrivialFix: Modify the spelling mistake * Rename ipv6\_utils.is\_enabled() * Handle db\_add in transaction for new objects * [TrivialFix] Replace 'assertTrue(a in b)' with 'assertIn(a, b)' * Correcting a spelling in README * Refactoring config options for cache\_utils opts * Updated from global requirements * OVO for SegmentHostMapping * Updated from global requirements * Pecan: Bulk create with one item returns plural * Log OVS IDL library errors to neutron logs * Replace retrying with tenacity * Removes the superfluous 'that' * Add functional tests for OVSDB Connection * Removed unnecessary file(openstack/common) in run\_stack.sh * objects: add validate\_filters option for count() method * ovsfw: Add a dl\_type match for action=ct flows * Update metadata proxy when subnet add/delete * OVO for Router Extra Attributes * Validate type of allowed\_address\_pairs * Pecan: Fix internal server error on QoS rule PUT * Pecan: Don't prefetch resource fields on DELETE * Generate OVSDB schema helper in a separate method * objects: Remove tenant\_id from to\_dict() when project\_id is not set * Updated from global requirements * Cleanup coverage configuration * Fix spelling errors * Make ovs functional tests mutually isolated * Pecan: rework notifier hook for registry callbacks * set\_db\_attribute differs between vsctl and native * Only send string values to OVSDB other\_config column * test\_routers\_flavors: Skip if requirements are not met * Add http\_proxy\_to\_wsgi to api-paste * Only emit deprecation warning when deprecated value read * Swap the order of arguments to \_check\_equal * Cleanup of SecurityGroup classes * Remove deprecation warnings for agents\_db * Have RouterRoute object increment Router revision * Allow to override Idl class in OVSDB Connection * Drop MANIFEST.in - it's not needed by pbr * Updated from global requirements * fullstack: Add security group tests * neutron-lib: use dict methods from helpers * neutron-lib: use get\_hostname * neutron-lib: use cpu\_count * Make the HA router state change notification more faster * Pecan: add http\_proxy\_to\_wsgi middleware * Use BaseDbObjectTestCase in AddressScope UT * Devref: fix repeated 'testing documentation' text * Removes remaining Hyper-V plugin * Added trailing slash in link to Networking API v2.0 * Add db\_add to OVSDB API * Extend project\_id in object dicts (Newton only) * Integrate SubnetRoute OVO * Fix callers of \_make\_port\_dict() * objects: Removed project\_id/tenant\_id field translation * Remove deprecation warnings for l3\_hamode\_db * Fix IPv6 PD with pluggable IPAM * OVO for Router Route * Improved readibility and fixed indentation * neutron-lib: start using neutron-lib helpers * Cleanup Newton Release Notes * Fix periodic jobs: Add databases to bindep.txt * Relocate DNS db models * Updated from global requirements * ovsfw: Fix warning message when fetching port's other\_config * Relocate Agent DB model * Add api test for create update network and port with dns * Refactor code to deprecate get\_interface\_mac * Objects: Add README for neutron/objects directory tree * Add sample\_default for state change server config * Ignore gre0 and gretap0 devices in netns cleanup script * Get rid of double-join to rbac\_entries without filter * Deterministic ordering of fixed\_ips * Account for Py2/Py3 differences in fcntl.ioctl return value * Enable release notes translation * Handle label\_id's DBReferenceError when creating label-rule * Add required extension to trunk\_details descriptor * Relocate Segment DB Models * Make Jakub Libosvar contact for ovs-fw tag * Relocate Flavor and ServiceProfile DB models * Fix typo in comment * L3 DVR: always notify on ROUTER\_INTERFACE AFTER\_CREATE * Fix \_list\_availability\_zones for PostgreSQL * Relocate dvr model * Fix the 'port\_id' attribute not exist error * Relocate L3HARouter DB model * Replace create\_test\_network with network object * Relocate Tag DB model * DVR: remove misleading error log * Enable OVSDB Connection unit tests for py3 * Handle uuid references within an ovsdb transaction * Remove deprecation warning for l3\_agent\_scheduler * Expose OVS bridge related options into metering agent config file * Refactoring config options for ml2 plugin drivers * Remove recursion from import\_modules\_recursively * Refactoring config options for l3 ha agent opts * Relocate SegmentHostMapping DB model * Relocate Router Extra Attributes DB Model * Remove old oslo.messaging transport aliases * Relocate router route DB model * Disallow specifying too long name for meter-label * Fix misleading error trace in trunk fullstack life cycle test * Relocate Geneve DB models * TestSanityCheck: drop test tables during cleanup * Removed is\_deadlock from db/api * Relocate external network db models * fullstack: VLAN aware VMs test * Introduce ovo objects for networks * Schedulers: use consistent plugin, context args * Fixed functional iptables firewall tests for newer kernels * Relocate ProviderResourceAssociation DB models * Introduce ovo objects for ports * Relocate VlanAllocation DB model * Ignore NULL fixed\_ips for duplicate floating\_ip check * Relocate Metering DB models * Relocate RouterL3AgentBinding DB model * Updated from global requirements * Relocate L3 DB Models * Relocate Provisioning Block DB Model * Process OVS trunk bridges associated to VM deletes * Fix typo overriden => overridden * iptables: fail to start ovs/linuxbridge agents on missing sysctl knobs * Refetch subnet/network from DB in ML2 update ops * Send a callback before attaching a subnet to a router * Don't try to delete non-existent namespace * Added bindep.txt to the project * Don't swallow ImportError from load\_class\_by\_alias\_or\_classname * Remove requests-mock from test-requirements * Correct fix for IPv6 auto address interfaces * Darek (@dasm) is our new release liaison for Ocata * Make HenryG the Magnificent responsible for 'oslo' LP tag * Add missing revision number altercations * Fixes for missing IPs on subnets in DHCP agent * Retry port update on IpAddressAllocationNotFound * New option for num\_threads for state change server * Handle add/remove subports events loss due to agent failures * Fullstack: Add helper FakeFullstackMachinesList * trunk: Log RPC communication * tests: Move testtools to 3rd party libraries section * Fix pep8 E501 line too long * Fix dhcp\_release6 error when not supported * XenAPI: add support for conntrack with XenServer * Fix ML2 revision\_number handling in port updates * Fix ML2 test extension driver API test cases * Relocate PortBindingPort DB Model * Relocate VxlanAllocation/VxlanEndpoints DB model * Support new osprofiler API * Include entity in provisioning block logs * API tests: Check MTU sanity of trunk/subport * Use assertItemsEqual to compare dicts * Fix wrong use of six.moves.queue.get() * Allow more time for DB migration tests * Update trunk metadata during wire/unwire operations * Ensure trunk status is acknowledged during OVS subport operations * Clean-up Tempest test networks with project\_id * Add MAC address to subports in trunk\_details * Updated from global requirements * Install dibbler.filters rootwrap file * tests: catch eventlet.Timeout exception * Add basic status transition trunk scenario test * Remove deprecated class NeutronController * Fix typos in test\_ipam\_pluggable\_backend.py & base.py * Do not retry default security group creation * Fix a release note typo for implicit provider loading deprecation * Revert "Do not retry default security group creation" * Try to reuse existing IPs when a port update specifies subnet * Make DHCP agent use 'revision\_number' * Stop oslo\_messaging from error logging CallbackNotFound * Don't immediately restart in DHCP agent on port change * Always cleanup stale devices on DHCP setup failure * Reduce log level for extensions not being supported * Add admin api tests for project-id * Do not retry default security group creation * Increase subnet count for rally test * Lower concurrency on rally port job * Add to rally quotas to handle worst case quota race * Fix linuxbridge trunk subport RPC event handler * OVS agent: configure both OF10 and OF13 * Improve tunnel\_sync * Disable DHCP on agent port removal * Garbage collect HasStandardAttributes subclasses in StandardAttrTestCase * fullstack: execute qos tests for all ovsdb/of interface permutations * Change the prefix for trunk subports device\_owner * Fix devref typo * Keepalived global\_defs configuration entries required to avoid DNS lookup * DHCP: enhance DHCP release log * Make neutron-db-manage ready for ocata development * Update reno for stable/newton * Fix useless use of \_nxm variant in an OVS field name 9.0.0.0rc1 ---------- * Added missing translation marker for an error * Add metadata proxy router\_update callback handler * Fix events misnomer in callback registry debug trace * Fix metering-agent iptables restore failure * Add fullstack test with OVS arp\_responder * Don't raise RetryRequest for port in delete\_subnet * Retry setting mac address on new OVS port 10 times * Forbid importing neutron.tests.\* from outside tests subtree * Raise level of message to info * ovs agent, native ARP response: set Eth src/dst * Remove unused phys\_net parameter form EmbSwitch class * Expire DB objects in ML2 infinity loops * Stop olso\_messaging from error logging CallbackNotFound * Add API test to ensure IPs can be added by subnet * Revert "Don't allocate IP on port update when existing subnet specified" * Add common way to extend standard attribute models * Fix migration of legacy router to DVR * Catch DBReferenceError in IPAM and convert to SubnetNotFound * Don't warn in VLAN Plumber on tagless children * Skip warnings during DHCP port actions if no error is raised * Include timezone in timestamp fields * LinuxBridge: Use ifindex for logical 'timestamp' * Correct floating IP extra attributes updating issues * Refactor for floating IP updating checks * Prevent use filter(lambda obj: test(obj), data) * Catch RPC errors in trunk skeleton * Make DHCP notifier use core resource events * Disable DHCP on test\_create\_port\_when\_quotas\_is\_full * Capture SubnetNotFound from update\_port call * Add retry decorator to provisioning blocks module * Tag the alembic migration revisions for Newton * Utilize retry\_if\_session\_inactive in dvr\_mac\_db * Handle racey teardowns in DHCP agent * Demote error trace to debug level for auto allocation operations * Fix TypeError in sanity check logging format * Don't allocate IP on port update when existing subnet specified * Prevent duplicate LLA iptables rules * Remove erroneous newton milestone tag * Preventing iptables rule to be thrashed * tests: Don't raise TimeoutError when waiting for ping * Mark quota operations as retriable * Pass not IPDevice but port\_name into OVSBridge's add\_port() * devstack: the last fix of is\_kernel\_supported\_for\_ovs25 condition * Ensure there are fdb\_entries before iterating * Fix Rollback port's device\_owner * Protect security group operations with the retry decorator * Don't work with native DB port objects in DVR code * of\_interface: Use vlan\_tci instead of vlan\_vid * Set device\_owner for trunk subports * Only do deferred ip allocation on deferred ports * Accept and return project\_id for API calls * trunk: Remove ovs constants from trunk utils module * Skip can\_be\_trunked validation for subports * ovsdb-handler: Follow up patch * Make l2/l3 operations retriable at plugin level * Reduce log level for ryu in OVS agent log * Use ConfigParser instead of SafeConfigParser * Replace assertEqual(None, \*) with assertIsNone * Emit router payload only during failures * tests: don't override base object test class attributes * objects: expose database model for NeutronDbObject instances * Defer setting 'ha'/'distributed' flags in L3 code * Updated from global requirements * Add sg\_id in the AFTER\_DELETE event of sg\_rule delete * Fix sudo errors in test\_\_set\_trunk\_metadata\_with\_None\_params * Add release note for blueprint vlan-aware-vms * Make optional the validation of filters * Mark unaddressed ports with none in ip\_allocation field * Fix misnomer in \_set\_trunk\_metadata * stadium: remove neutron-release from release job for stadium projects * LinuxBridge: Pass host into get\_devices\_details\_list * Downgrade binding skip in mech\_agent * Use singleton for Nova notifier * trunk: Handle subports notification in ovs agent side * objects: don't call DeclarativeObject's \_\_init\_\_ twice for rbac objects * Move retry decorator in auto allocate * Add retry decorator to RBAC module * Mark agents db mixin operations retriable * Add sanity pings to restart tests to ensure connection * Implementation of vlan-aware-vms for Linux Bridge * Add Trunk Plumber module for Linux Bridge * Move 'get\_tap\_device\_name' to utils module * devstack: finally fix the is\_kernel\_supported\_for\_ovs25 condition * tests: remove self.db\_obj from objects tests * devref: cleaning up release guidelines for stadium subprojects * Add missing index on inserted\_primary\_key in migration * l2pop fdb flows for HA router ports * OVSDBHandler for trunk ports * Prepare retry decorator to move to plugin level * devstack: fix kernel version matching to detect ovs compilation support * Neutron versions api now has its own middleware pipeline * Use correct exception in L3 driver controller * pylint: remove 'openstack' from ignore list * Fix SR-IOV qos extension calls to clear\_rate functions * Don't use nested transaction in provisioning blocks * Rollback port after failed to add it to router * Quotas: add in missing translation * Ensure UnknownProvisioningError can be printed * DHCP: remove deprecated config dhcp\_lease\_time * Standardize release note page ordering * DHCP: device manager: two new methods - plug and unplug * Detect DBReferenceError when deleting flavor * Add API test for L3 Flavors * Reduce the number of debug traces emitted by registry callbacks * Add a composite unique key to floatingip table in Neutron database * Don't return ext net DB object in auto allocate * Be more clear about DOWN\_STATUS for trunk resources * Include timestamps in trunk API test comparisions * Except if tracked resource registered as countable * Convert OVO fields to primitives for to\_dict() * Add link to review dashboard in the effective guide * Fix wrong HA router state * Config: change deprecation for max\_fixed\_ips\_per\_port to Ocata * Only schedule routers from drivers that need it * Remove FWaaS policy.json entries * Update L3 agent extensions devref * Remove duplicated tool * Remove availability range code and model * tests: refactor objects test cases to use db models instead of dicts * gate\_hook: Compile ovs only with supported kernels * objects: use correct object class name in NeutronPrimaryKeyMissing * l3 ha: don't send routers without '\_ha\_interface' * ovsfw: fix troublesome port\_rule\_masking * Check MTU sanity of trunk port subports * QOS-DB: remove deprecation warning * Skip setup\_port\_filters for skipped\_devices * Disallow specifying too long description for meter-label * Use bridge\_lib's FdbInterface calls instead of execute util * Expose revision\_number to trunk API * Change 'revision' to 'revision\_number' * macvtap: Mech driver detects invalid migration * Remove workaround for bug/1543094 * Deal with unknown exceptions during auto allocation * Revisit support for trunk segmentation types * Set trunk status to DOWN when parent port is unbound * db migration: Alter column before setting a FK on column * Switch order for cleanup in NamespaceFixture * Add binding\_index to RouterL3AgentBinding * Add timestamp fields for neutron ext resources * Make L3 agent use different request-id for each request * Refactor \_ipam\_get\_subnets * Fix AddrFormatError in DHCPv6 lease release code 9.0.0.0b3 --------- * Add name and desc to networksegments table * Fix QoS standardattr migration foreign key step * Ignore extra subnet IPs in DHCP agent * Truncate microseconds before DB insertion * Updated from global requirements * Make milestone-review-dash script pull all Stadium projects * Adding support of releasing DHCPv6 leases * qos: require min\_kbps parameter for minimum bandwidth rule * Mock call to sysctl for test\_get\_free\_namespace\_port * split out l3-ha specific test from TestMl2PortsV2 * Add L3 agent extension API object * Switch to neutron-lib for model\_base * Do not warn under normal network circumstances * Narrow down DBError to DBReferenceError in DHCP action handler * Expose timestamp attributes on trunk API * Remove non-pluggable IPAM implementation * Expose description attribute on trunk API * DVR: SNAT redirect rules should be removed only on Gateway clear * Implement state management for trunks * debtcollector for globals * Retain port info from DSCP rule creation * Use method get\_ipv6\_addr\_by\_EUI64 from oslo.utils * Use plugin obj from neutron manager in the TestMl2PortsV2 unit test * Add new configuration test in sanity check: vf\_extended\_management * IPAM: fix the mode of ipam\_backend\_mixin.py * Pecan: Handle member actions * Get rid of flush() in create\_segment() * Fixes DBDeadlock race condition during driver initialization * Remove FWaaS entries from neutron setup.cfg * fix port address allocation for auto-addr subnet * DVR: Cleanup the stale snat redirect rules in router namespace * objects: remove support for multiple db models in from\_db\_object * Check for ha port to become ACTIVE * Fix ipam\_driver config help * Auto allocation: ensure that networks and subnets are cleaned up * Add the l3 agent 'host' when logging routers * Handle fullstack oslo deprecations * fullstack: Use ovs-2.5 for tests * Relocate Flat Allocation DB model * Relocate subnet\_service\_types db models * Remove unnecessary setUp and resource\_setup * Delete unused pom.xml file from doc * Remove unused config.CONF * Remove unused logging import * Add test cases for Invalid exception type * Allow SR-IOV agent to start when number of vf is 0 * Add standard attributes to qospolicy * Enable create and delete segments in ML2 * Use MultipleExceptions from neutorn-lib * Move standard attr out of model\_base * SR-IOV: add agent QoS driver to support egress minimum bandwidth * Add QoS minimum bandwidth rule for instance egress traffic * Remove vpn service plugin stevedore aliases * Remove lbaas v1 service plugin stevedore aliases * Generate new context for each DHCP RPC call * Fix unbalanced-tuple-unpacking pylint error in conn\_testers * Don't catch DBError in DHCP action handler * Catch InvalidInput in DHCP port actions * Wait for ovsdb\_monitor to be active before use it * Fix deprecation warnings * spelling error * Updated from global requirements * Make addbr safe to bridge add races * Not check subnet with dhcp disabled when get\_isolated\_subnets * Add Unit Test for Foreign Keys Not Found Error * Deprecate allow\_sorting and allow\_pagination options * functional: Make trunk tests more robust * Fix passing error physical network for get\_mtu * Fix indent * Remove useless line for tenant\_id * Implement check\_vlan\_transparency to return True in L2population mech driver * Security group call back need cascading delete the related rules * Clean import in code * Set L3 agent standby if admin\_state\_up=False * Fix internal server error during updating QoS rule * Make OVS and LinuxBridge trunk drivers' is\_loaded() property more robust * IP allocation with Service Subnets * Include [agent] extensions option into ovs/linuxbridge agent files * Using revision\_number to ensure no overlap in \*one\* network * Fix the QoSPluginBase methods signature * Implement check\_vlan\_transparency to return True in SR-IOV mech driver * TrivialFix: Remove logging import unused * TrunkManager for the OVS agent * SR-IOV: remove unused supported\_pci\_vendor\_info variable * Catch SubnetAllocationError during auto-allocated-topology provisioning * Revisit the Stadium section of the developer guide * objects: add support for per parent type foreign keys * Fix bug in L3 agent extension manager * Added the appropriate links in developer guide * L3 DVR: use fanout when sending dvr arp table update * Fix the attribute name: \_flavor\_plugin\_ref * Use more specific asserts in tests * Implement the DELETE method for get-me-a-network * Update README to reflect ML2 Exception in Dir Tree * Revert "Fix NoSuchOptError on identity config option lookup" * Use row.uuid as getattr works for inserted row * fullstack: test for IPv6 north-south traffic * Add mechanism driver error details to MechanismDriverError * Make auto allocate cleanup retry * Updated from global requirements * Increase default packet count to 3 in assert\_ping * L2 Agent Extensions handle unimplemented methods * Relocate GRE Db models * docs: Small Open vSwitch devref tweaks * Filter out external networks in NetworksSearchCriteriaTest * Remove useless deprecation warning for tenant\_id * Fix init method for HasStandardAttributes * Imported Translations from Zanata * TrunkStub.trunk\_deleted is called with NULL trunk object * Fix NoSuchOptError on identity config option lookup * Fix bridge assertion error when times are equal * Avoid KeyError when accessing "dns\_name" as it may not exist * Add tool to list moved globals * Introduce ovo objects for network segments * Add agent-side driver scaffolding for trunk functionality * Revert "Publish segment id in port responses" * Increase rally network/port count and add quotas * Extensions: fix file mode permissions * Update the homepage with developer documentation page * ML2: Lower log level of "Host filtering is disabled" message * Don't create another plugin instance in ML2 tests * Relocate AddressScope DB model * Enable ra on gateway when add gateway to HA router * Remove override of \_compare\_server\_default in \_TestModelsMigrations * Make callback manager Object Oriented friendly * Switch to pluggable IPAM implementation * Update "devref/quality\_of\_service" with QoS DSCP rule reference * Removing unused file neutron/agent/l3/config * Use only\_contrib option for nova\_client calls * Adjust spacing in METADATA\_PROXY\_HANDLER\_OPTS * Refactoring config options for plugin agent opts * Reduce DB hit when checking for trunk-able ports * Add API test for tag * Added tests for checking expand/contract branch upgrade * Don't pass argument sqlite\_db in method set\_defaults * SR-IOV: deprecate supported\_pci\_vendor\_devs * Add error informations for users when value is invalid in database * Implement L3 Agent Extension Manager * Correct floating IP updating with same port\_id issue * Fixed neutron-db-manage without neutron/tests installed * Clean imports in code * Prevent duplicate SG rules in 'concurrent requests' case * Relax bound constraint for trunk parent ports * Fix potential problem in test\_router\_add\_interface\_port * Fix test\_router\_add\_interface\_delete\_port\_after\_failure * Remove stale configuration l3\_agent\_manager * Add RPC layer for Trunk Plugin and driver plumbing * Make auto-allocate plugin handle sneaky DB errors * Broken extensions should not show up in the extension list * Introduce ovo objects for security groups * Add debug option to verify iptables rules * Avoid IPAM driver reusing a session that has been rolled back * Fix neutron\_lib.constants DeprecationWarning from db.models\_v2 * Use dispose\_pool() from oslo.db * Get rid of get\_engine() in db/api.py * models: move AllowedAddressPair model under neutron/db/models * Refactor setting OSprofiler for db calls * Raise 501 instead of 500 when updating meter-lebel and rule * Updated from global requirements * Constrain remaining tox targets * Check content type by completely match instead of partial match * Allow bound ports to be trunked if the driver can support it * Publish segment id in port responses * Fix some spelling errors in net\_helpers.py * Refactoring config options of l3 agent keepalived * Fix check\_asserttruefalse syntax validator * Relocate Security Group DB models * Add floating IP test to ensure backwards compat * Always start transactions in quota cleanup methods * Refactoring config options for services opts * Fix a spelling error * isolate test\_db\_find\_column\_type\_list * Include db\_models document to avoid errors * Adds a default reload callback to ProcessManager * tests: added missing space in a skip test message * Set secure fail mode for physical bridges * Avoid allocating ports from ip\_local\_port\_range * lb-agent: handle exception when bridge slave already removed * Ensure ML2's create/update\_port methods not in transaction * Add flush command to iproute in ip\_lib * Better utilize the L3 Namespace class * Fix typo in l3-agent namespace code * ovs-agent: Seperate VLAN mapping outside of the agent * Updated from global requirements * Check the router gateway IPs prefixlen existence * pep8: fixed F821 violation in a unit test * Add devref for Relocating DB models * Handle deleted ports when creating a list of fdb entries * Remove unused params named "context" in filter\_attributes * Set bridge\_name in OVS trunk port's vif\_details * ml2: allow retry on retriabable db error by precommit * Rollback router intf port update if csnat update fails * Enable DeprecationWarning in test environments * Check target\_tenant when create rbac policy * Delete HA network if last HA router is migrated * Add linux bridge trunk server side driver * Enable CRUD for Subnet Service Types * Make revision bump robust to concurrent removals * Fix duplicate routerport handling * Cleanup DB retry logic in ML2 * Refactoring config options for extension opts * Refactoring security group config options * Don't use versions in \_DeprecateSubset * Add RouterPort bindings for all HA ports * Log full exception before retry in decorator * L3 agent: check router namespace existence before delete * Consider baremetal device\_owner as compute for nova notify * Delete conntrack entry with remote\_ip on the other direction * Do not remove the HA/legacy router gateway secondary IPs * DHCP Auto Scheduling for routed provider networks * Restore old assert\_ping behavior * DVR: Clean stale snat-ns by checking its existence when agent restarts * Remove neutron-lib warnings * Ensure most of ML2's core methods not in transaction * Add scaffolding for trunk plugin/server-side driver integration * Remove neutron lib warnings from ipv6\_utils * Check for l3 agents count before router update * Updated from global requirements * Introduce state management for trunk resources * Refactoring config options for wsgi opts * Add a space after openvswitch error message * Remove local subports validator * objects: introduce count() API to count matching objects * Rename DB columns: tenant -> project * Fix for creation of network environment twice * Use neutron-lib add\_validator for registration * objects: introduce a util function to handle tenant\_id filter * tests: check that trunk sub\_ports field is properly populated * Fix indexerror in delete\_csnat\_port * Add a unique key to port\_id in routerports table * Refactoring cfg opts for ml2 plugin linuxbridge * Port device events for common agent * Fix module import for ovs\_vsctl\_timeout option * Change external\_network\_bridge default to '' * Fix link reference in OVS agent devref * Support callbacks for L3 plugins without an agent * Remove deprecated default subnetpools * Fixes the midonet test\_l3 unit test failures * fixed a typo in src code * Suppresses a warning when no agents are configured * Introduce bulk push to rpc callback mechanism * Enable sorting and pagination by default * Added API extensions to detect sorting/pagination features * stadium: adopt openstack/releases in subproject release process * L2-Adjacency support * corrected the link in README.rst * Neutron-lib: use the L3\_AGENT\* definitions from neutron-lib * Fix a typo in neutron/services/trunk/rules.py * Refactoring config options of agent/common/ovs\_lib * Add a callback registry event for the init process - before spawning * Refactoring config options for cmd * Don't use file() to write object hashes * Fix L3 NAT DB signature mismatch * Add in missing translations for exceptions * Fix that api\_workers=0 doesn't spawn any api workers * Use revision to discard stale DHCP updates * modify the home-page info with the developer documentation * Filter HA router without HA port bindings after race conditions * Fix updating allocation\_pools on subnet update * trunk: avoid redundant refetch of subports on create * tests: enable test\_get\_objects\_queries\_constant for trunk ports * Don't use exponential back-off for report\_state * bug tag: Add 'api-ref' for API reference * Add link in README.rst * Set prefix on floating\_ip\_mangle rules * Remove 'released-neutronclient' tag from official bug tags * Increment revision numbers on object changes * Print out specific filter that failed in object filtering unit test * objects: loading synthetic fields from defined ORM relationships * objects: forbid updates for project\_id field for subnets * tests: stop using ml2 plugin full import paths in tests * Add API tests for router and DHCP port status * Skip DHCP provisioning block for network ports * Wait for vswitchd to add interfaces in native ovsdb * Add flavor/service provider support to routers * Add some negative policy router interface tests * Add notifications for trunk CRUD and standardize payload * Refactoring config options for common config opts * Prevent port update from binding a host where IPs won't work * policies: Add tempest tag for launchpad bugs * Fix for check\_vlan\_transparency on mech drivers not called * Refactor DNS integration out of DB core plugin * Fix typo in method description * Filter out subnets on different segments for dhcp * Add information about using file based sqlite for unit tests * Deprecate implicit loading of service\_providers from neutron\_\*.conf * Remove deprecated network\_device\_mtu option * objects: Add update\_fields method in base class * Remove unused code in neutron/agent/linux/utils.py * Pass timeout in milliseconds to timer\_wait * Prohibit deletion of ports currently in use by a trunk * Mark DBConnectionError as retriable * Add subresources support for PECAN * Refactoring config options for l3 agent config * Improve the segmentation ID validation logic * Revisit (add|remove)\_subports request body * objects: Adjust Subnet fields, add tenant\_id and segment\_id * Use is\_valid\_port from oslo.utils * Validate device to mac instead of port id to mac * Updated from global requirements * Don't interrupt device loop for missing device * Enable passive deletes on trunk deletion * Removed smoke tags from \*SearchCriteriaTest test cases * Calculate MTU on every network fetch instead of on create * Fix wait\_until\_true condition in dhcp test * Add callbacks for networks and subnets in ML2 * Check compatibility when auto schedule ha routers * Remove execute permission which is added by mistake in midonet * Ensure test\_cleanup\_stale\_devices fails gracefully * Add new attributes to trunk model * Generalize agent extension mechanism * fullstack: Add hybrid plug support * Use db\_api.retry\_db\_errors in quota engine * Update ovsdb release notes re: new OVS ports * objects: better apply filters for objects/db/api/get\_object query * Use DEVICE\_OWNER\_COMPUTE\_PREFIX from neutron-lib * Imported Translations from Zanata * Fix misuse of assertTrue in L3 DVR test case * Pecan: Define plugin crud methods in base class * Fix broken URLs in bugs.rst (core-reviewers.html -> neutron-teams.html) * objects: Convert filters to string for list values * fullstack: Log testrunner again * QoSTest: skip if qos extension is not available * Add support for Python 3.5 * Only ports on routed networks are deferred * Fill in trunk\_details on port resource * Fix a pylint error in an L3 agent unit test * DVR: Fix ItemAllocator class to handle exceptions * Add RouterPort binding to ha interface creation * objects: Add RBAC to Subnet OVO * Improve cleanup logic for trunk tests * Updated from global requirements * Add retry decorator to dhcp\_ready\_on\_ports * delete event payload * Add function to return all hosts with mapped segments * Handle non existing network in segment creation 9.0.0.0b2 --------- * Hacking: add unit test for LOG.warn validations * Allow unique keys to be used with get\_object * Add object versioning to QoS DSCP * Replace device owners hard coded strings to neutron\_lib constants * Add function to remove constraints from database * Add dhcp to Fdb extension's permitted device owners * Use context from\_environ to load contexts * Use from\_dict to load context params * Add a hacking rule for string interpolation at logging * Add check that factory started in dispose * Delete gw port on exceptions * Avoid duplicate ipset processing for security groups * DVR: handle floating IP reassociation on the same host * Refactor usage of dict.values()[0] * qos basic scenario * Check for provisioning blocks before updating port up * Rename dvr portbinding functions * Emit registry events on subport addition/removal * Ensure deferred IP fails when host is provided no IP allocated * Extension to tell when deferred binding is in effect * Fix typo in message string causing server exception * Deprecate option min\_l3\_agents\_per\_router * Address outstanding TODO for callback event * Allow tox to be run with python 3 * Incorporate tweaks to subport validator * Allow auto-addressed ips deletion on port update * Delete default route if no gateway in external net * Add information about contract creation exceptions in devref * ML2: don't use IntegrityError for duplicate detection * Grammar error fixed * Fixed Typo in contribute.rst * Refactoring config options for dhcp agent * Revert "Support unique labels for alembic branches" * DVR: Ensure that only one fg device can exist at a time in fip ns * New engine facade from oslo\_db: Step 2 * When deleting floating IP catch PortNotFound * Notify nova with network-vif-plugged in case of live migration * Skip TrunksSearchCriteriaTest if the extension is not available * Don't catch DBDuplicate in default SG creation * Catch missing binding record in provision handler * Pull stadium projects from governance.o.o in utility script * exclusive\_resources: use set remove instead of discard * Add an independent function to map segment to hosts * List only admin\_state\_up auto-allocated resources * Change tunnel MTU calculation to support IPv6 * Fix broken link * ML2 remove extra checks in ovs\_dvr\_neutron\_agent * Updated from global requirements * Fixed typos * Fixes a link error * next() is incompatible in test\_network\_ip\_availability.py * Run 'ip netns list' according to 'AGENT/use\_helper\_for\_ns\_read' * Remove unused LOG * Fix order of arguments in assertEqual * Reuse common code in securitygroups\_rpc module * Release note: fix a typo in add-time-stamp-fields * Imported Translations from Zanata * Update the template for model sync test docs * Add sorting and pagination tests for trunk resources * Enable CRUD for trunk ports * OVS-agent: Switch the default to "native" of\_interface * Use tempest.lib tenants\_client * Stable Branch URL Fixed * Support unique labels for alembic branches * create\_router: Report the original exception * ml2: postpone exception logs to when retry mechanism fails to recover * Fix OVSBridge.set\_protocols arg * Create segment\_host mapping after new network * Fix spelling mistakes in the docs * Adding the appropriate log hints where needed * Lower ML2 message severity * spelling error: modify assocations -> associations in files as follows: neutron/agent/l3/link\_local\_allocator.py:38 * Make create\_object\_with\_dependency cleanup * Restore MySQL and Postgresql functional testing * functional: Use assertItemsEqual for db\_find outputs * Adding FDB population agent extension * pep8: Register checks with their code * sriov: Fix macvtap vf interface regex pattern * Mock threading.Thread to prevent daemon creation by unit tests * Fix some typos * Register the dict extend function when service plugin starts * Remove notification for process event * Add two more callbacks registry events * Do not depend on Python error strings in parse\_network\_vlan\_range() * Fix code that's trying to read from a stale DB object * Remove 'origin/' in OVS\_BRANCH * Only update SegmentHostMapping for the given host * Move Nova notification logic out of API controller * Create segment\_host mapping after new segment * Skip INVALID and UNASSIGNED ofport in vlan restore * objects: introduce NetworkPortSecurity object * objects: Introduce the DNSNameServer OVO in the code * Implementation details to support trunk ports * Move wait\_until\_true to neutron.common.utils * Imported Translations from Zanata * Agent extension: fix comment * enable OVSDB native interface by default * Pecan: Implement pagination * Not auto schedule router when sync routers from agent * Updated from global requirements * Remove the deprecated config "quota\_items" * Fix simple typos * Create auto allocated networks in disabled state * Move DHCP notification logic out of API controller * Pecan: move fields and filters logic to hooks * DHCP Agent scheduling with segments * Fixes port device\_id/device\_owner change in failed operation * Remove the deprecated config 'router\_id' * Separate exception class for retriables in callbacks * Revert "OVS: don't throw KeyError when duplicate VLAN tags exist" * Updated from global requirements * Add revision\_number to standard attr for OVO * Check for RetryRequest in MultipleException types * Remove IP availability range recalculation logic * Rename ml2\_dvr\_port\_bindings to make it generic * Override 'create' for Trunk and SubPort * Retry DBDuplicate errors in retry decorator * Complete removing support for unsplit alembic branches * Revert "move import to top and rename to make more readable" * Make pep8 job great again * Disable warnerrors=True in setup.cfg * Move state reporting initialization to after worker forking * Do not rewrite original exception for IPAM part 2 * Change addCleanup create\_tenant to delete\_tenant, fix gate * Obsolete mac\_generation\_retries and deprecate the option * Remove unnecessary flush for duplicate address detection * Fix minor spelling error in debug log * tests: clean up designate client session mock on test exit * Remove unnecessary import from segment plugin * OVS: UnboundLocalError on switch timeout fixed * ovsfw: Fix variable names in UT * ovs: set device MTU after it's moved into a namespace * cache\_utils: fixed cache misses for the new (oslo.cache) configuration * Syntax fix * ml2 lb: do not program arp responder when unused * Remove deprecated TODO notes in L2 agent extension manager * Fix pep8 violations in fullstack qos test * Don't return marker item when paginating backwards * Fix release name in deprecating message for agent status checks * Typo fix * Fix bug with mac generation * Mark port as ready after enabling dhcp at agent * Add exponential gap to retry decorator * Fix designate dns driver for SSL based endpoints * Update Neutron documentation with \`project\` * Provide DB isolation for WalkMigrations tests * Refactor NetworkDhcpAgentBinding * Tempest: fix a typo * qos: support native sorting/pagination for policies and rules * qos: added api sorting/pagination tests for policies * Check for alembic Add/DropColumn exceptions in migrations * objects: switch base plugin class to using subnetpool object * l3: support native sorting/pagination for routers * Added sorting/pagination tests for routers * Added sorting/pagination tests for subnets * DHCP: delete config option dnsmasq\_dns\_server * Refactor update\_segment\_host\_mapping\_for\_agent for agentless topology * OVS agent: remove unused variables * Updated from global requirements * Remove the plugin import from auto\_alloc extension * Use neutron-lib constants * qos: fix shared filter for policies * objects: Add filter query hook to NeutronDbObject * Extend utils create methods to accept check\_allow\_post * tests: fetch unshared subnetpools only * Cleanup Liberty to Mitaka upgrade corner cases not necessary anymore * Fixes variable name confusion * Use StringField for tenant\_id fields * Remove useless str() * Add router precommit events * Allow self-unsubscribing callbacks * Add data model for standard attr revision numbers * DVR: Fix allowed\_address\_pair port binding with delayed fip * Update networking-ovn project functionality * Revert "Add index on trunk\_id in the subports model" * Update Neutron server to use only keystoneauth * Make segment aware IPAM compatible with ML2 * Fix of ping usage in net\_helpers.async\_ping() * Remove MAC duplicate detection for generated macs * Pecan: handle single fields query parameter * Compute IPAvailabilityRanges in memory during IP allocation * Clean up subports model * After a migration clean up the floating ip on the source host * Check for mysql SAVEPOINT error in retry decorator * Check for StaleData errors in retry decorator * Use session delete for IPs to trigger events * Fix update of shared QoS policy * Fixed variable shadowing in ipam\_backend\_mixin.py * Add type information to ObjectNotFound message * Add README for api-ref * Use next available address for dhcp ports * objects: in get\_object(s), filter by fields, not model attributes * Refactor the rpc callback version discovery mechanism * Add index on trunk\_id in the subports model * Support for MultipleExceptions in db\_api decorators * Convert multiple exception types in the API * Add sanity check for idempotent id's in tempest tests * Remove some options from neutron-db-manage * tests: added sorting/pagination api tests for subnetpools * Drop neutron/i18n.py in favor of neutron/\_i18n.py * QoS DSCP fullstack tests * Hacking: make use of neutron lib * Return fixed\_ips from port update * Deprecate the functions map after segment moving out of ml2 * Use callback to update SegmentHostMapping * Match filter criteria when constructing URI for href based iteration * Make service plugins' get\_plugin\_type classmethods * OVS: don't throw KeyError when duplicate VLAN tags exist * Revert "DVR: Clear SNAT namespace when agent restarts after router move" * objects: Use common plugin \_model\_query in get\_object * Tox: Remove neutron/openstack/common from excludes list * Fix missing availability\_zone in dhcp and l3 conf * qos: Add API test for shared policy * Imported Translations from Zanata * l3\_db: Make gw port in-use check overridable * Fix server\_default comparison for BigInteger * Update ml2 delete\_subnet to deallocate via ipam * Make IPAM segment aware on port update * Make sure AZAwareWeightScheduler base on weight of agent * Provide router-id for ROUTER\_INTERFACE/BEFORE\_DELETE event * L3 DB: only read dns 'dns-integration' once * Force "out-of-band" controller connection mode * Devref: Add inline codeblock to upgrade command * oslo.cache: Remove memory alias for cache * devref: remove references to oslo incubator * Revert "Revert "Remove threading before process forking"" * Use IPv[46]\_BITS instead of IPV[46]\_MAX\_PREFIXLEN * Adding basic connectivity scenario to Neutron * Switch to oslo.cache * [TrivialFix] Deprecate unused NeutronController class * remove unused rpc method get\_active\_networks * Added API tests for page\_reverse and href next/previous links * Correct hyperlinks to sub\_projects documentation * DB base plugin: correct typo id to subnet\_id * Devstack support for macvtap agent * Segments: remove log message indicating segments are not supported * Updated from global requirements * Move dragonflow to affiliated project list * Fix getting dhcp agents for multiple networks * Pass ha\_router\_port flag for \_snat\_router\_interfaces ports * objects: stop using internal \_context attribute * Fix get\_free\_namespace\_port to actually avoid used ports 9.0.0.0b1 --------- * DVRHA interfaces should be created in all cases * Make IPAM segment aware on port create * Updated from global requirements * Guard against config\_dirs not defined on ConfigOpts * Macvtap: Allow noop alias as FW driver * Generalise the logic of resource auto rescheduling * objects: support advanced criteria for get\_objects * Fix wrong description in the class DhcpAgentNotifyAPI docstring * Follow effective Neutron guidelines * DB: remove deprecated warnings * Dont use query if network\_ids is empty * Fix two spelling mistakes * ML2: use neutron-lib for PORT\_STATUS\_ACTIVE * Use IPv6\_LLA\_PREFIX from neutron-lib * RPC unit tests: remove UUID future warnings * Use ICMPV6\_TYPE\_\* constants from neutron-lib * Updated from global requirements * fullstack: test for IPv6 east-west traffic * Allow the service plugin to import the extension * functional: fix OVSFW failure with native OVSDB api * ovsdb: Don't skip ovsdb test cases in py34 suite * ovsdb: Don't let block() wait indefinitely * [qos] section is missing from neutron.conf * oslo: remove usage of oslo.log verbose option * Adopt to config\_dir option being a list and not a string * OVS: compare names when checking devices both added and deleted * Do not inherit test case classes from regular Neutron classes * Fix validation of floating-ip association * tests: guarantee unique networks in get\_list\_of\_random\_networks * Pecan: bind attribute map to controllers * Refactor QoS plugin to avoid code duplication * Avoid shadowing the method's port argument * OVO for VLAN aware VMs * tests: cover port with existing sorting/pagination api tests * Allow min\_l3\_agents\_per\_router to equal one * How to support trunk ports with Open vSwitch Agent * Introduce official lib tag for neutron-lib issues * Pecan: tell the plugin about field selection * Add segments to hosts mappings * Remove using PROTO\_NAME\_IPV6\_ICMP\_LEGACY from neutron.common.constants * Prevent adding duplicated sg rules with diff description * Updated from global requirements * Remove remaining BGP code from neutron * DVR: Pings to floatingip returns with fixed-ip on same network * Migration for qospolicyrbacs has hardcoded InnoDB * Remove the inaccurate help message for min\_l3\_agents\_per\_router * OSProfiler imports break decomposed plugins * tests: validate sorting and pagination for networks * Fix unit test for new list of icmpv6 allowed\_types * Use unittest2 uniformly across Neutron * Call ext\_manager.delete\_port on port removal * devref: remove l2 agent extensions steps literal * Fixed help messages for path\_mtu and global\_physnet\_mtus options * Trivial fix - NETWORK\_TYPE validate * Change log level when L3 agent is disabled * Add quota tests in api tests * Introducing the Subnet OVO * unit: fix ValueError on TransactionQueue init with py34 * Add OSprofiler support * fullstack: test for snat and floatingip * Integrate the port allowed address pairs VersionedObject in Neutron * Deprecate advertise\_mtu option * Pecan: Backwards compatible/cleaner init * fix wrong default value of qos.notification\_drivers * Rename \_find\_router\_port\_by\_network\_and\_device\_owner * DVR: Fix check multiprefix when delete ipv4 router interface * fullstack: test for east west traffic * Update unit tests to use correct method being used * Deprecate neutron-debug * functional: Fix ovsdb-monitor waiting for first events * Fix broken URL in Mitaka Neutron release note * Remove nested transaction from ipam driver * Allow for excepted operations in migrations * functional: Run OVSFW tests with ovsdb native interface * Mock out poller in ovsdb unittests * DVR: Moving router from dvr\_snat node removes the qrouters * Pecan: remove deprecation warning * Update for removing ofagent * Ensure unit tests don't assume an IP address allocation strategy * Updated from global requirements * Segment: remove deprecation warning for converters * Add negative API tests that try to remove the resources in use * Respond negatively to tenant detachment of enforced QoS policies * Removed invalid test due to invalid mocking * Check if pool update is needed in reference driver * Remove cliff requirement in test-requirements.txt * sriov\_nic config options were declared under wrong group * Make deepcopy of update body in API layer * Refactoring config options for service * Pecan: Handle hyphenated collection with shims * Enforce UUID of port/subnet ID for router interfaces * Make exception translation common and add to pecan * Remove unhelpful test of oslo.service ServiceLauncher * Remove a SGs API test following sync to Tempest * Avoid testing oslo.service library internals * Restart dsnmasq on any network subnet change * Add service-type extension to flavors plugin * Add method to get service provider names by resources * Enable flavor plugin as a default service plugin * Add setting default max\_burst value if not given by user * Remove the file i18n.py and other related cleanups * Fix for 'ofport' query retries during neutron agent start * Segment extension: remove deprecated warnings * Add provisioning blocks to status ACTIVE transition * devref: Cleanup OVS vHost User Guide * Refactoring config options for quota * Trivial Fixes for 'sub\_project.rst' devref document * Add retry support to pecan * Remove openstack-common.conf * Enable IPAM drivers to allocate from more than one subnet * Associate subnets to segments through subnet API * Insert validation with request params for HTTP DELETE * OVS Mech: Set hybrid plug based on agent config * Remove deprecated references to attributes * SR-IOV driver and SimpleAgentMechanismDriverBase * Delete a magic number of security\_group\_rule quotas test * Pecan: skip all notifications on failures * Remove tools/pecan\_server.sh * DVR: Ensure fpr and rfp devices are configured correctly * Changing VPNaaS bug contact name * Check for existence of snat port before deleting it * Move address scope specific code out of iptables\_manager * Use correct session in update\_allocation\_pools * Revise ICMPV6\_ALLOWED\_TYPES * Revert "Remove threading before process forking" * Basic Extension and CRUD for Segments * Classes lack metaclass decoration * DVR: Use existing IPDevice to add address on FIP VETH * Fix SR-IOV binding when two NICs mapped to one physnet * Add exponential back-off RPC client * Fix prefix delegation deleting addresses for ports * Fix the sriov agent config group name * DVR: Clear SNAT namespace when agent restarts after router move * Delete post\_test\_hook.generate\_test\_logs * Add logging for some functional tests * Fix functional tests logging * Word about notification\_driver * New engine facade from oslo\_db: Step 1 * OVS: Add support for IPv6 addresses as tunnel endpoints * DVR: Fix TypeError in arp update with allowed\_address\_pairs * Remove threading before process forking * Add API to retrieve default quotas * fullstack: Use noop firewall * Don't load DNS integration in l3\_router\_plugin * Annotate alembic migration for VLAN trunking * Fix update target tenant RBAC external path * Preserve backward compatibility with OVS hybrid plugging * Avoid eventlet monkey patching for the tempest plugin * Policies: additional go-to contacts for IPv6 bugs * Added PecanResourceExtension * Resigning as "go to contact" for DVR bugs * Pecan: Fix Shim status codes * Do not rewrite original exception for IPAM * update networking-odl lieutenant * Mock mechanism manager instead of the test driver * add PROTO\_NUM\_IPV6\_ICMP for \_validate\_port\_range * Resignation from FwaaS duties * Remove old fwaas remnants from neutron * agentnotifiers: retried notifications refactor * Avoid L3 agent termination without server * Use paginate\_query from oslo\_db 8.1.0 ----- * Revert "Improve performance of ensure\_namespace" * IPtables firewall prevent ICMPv6 spoofing * Replaced this with "get-me-a-network" * Remove BGP code from neutron * Add device\_id index to Port * Revert "Improve performance of ensure\_namespace" * Updated devref documents for neutron-dynamic-routing * Data models for vlan-aware-vms * Pass through setattr to deprecated things * Use converters and validators from neutron-lib * Revert "Switch to inheriting hacking checks from neutron-lib" * DVR: Use IPDevice class consistently * Use constants from neutron-lib * Decorate AvailabilityZonePluginBase properly * Remove functional migration script about external tables * add networking-fortinet in affiliated projects * Allow OVSDB Connection to register certain tables * Switch to inheriting hacking checks from neutron-lib * Add test that ensures subnet is usable after update * Use exceptions from neutron-lib * Revert "ML2: Configure path\_mtu to default to 1500 bytes" * Imported Translations from Zanata * Object tests: couple of pep8 and list comparison improvements * Add a missing address-scope extension check * policies: changed owner for release-subproject tag to @ihrachyshka * Address Scope added to OVO * Tests: correcting typo in get\_random\_cidr * Updated from global requirements * Mark remaining negative subnetpool tests with 'negative' tag * Removed smoke tags from all -api job tests * Remove two exceptions already in neutron-lib * Correct the interval between two reports * Don't log warning for missing resource\_versions * Imported Translations from Zanata * tempest: enable hacking rule checks * Return a 404 on quota delete if entry not found * Move class properties to instances for dhcp/test\_agent.py * Updated from global requirements * LinuxBridge agent's QoS driver bw limit for egress traffic * Add check that external gw port exist when metering-agent adds a rule * Port allowed address pairs to OVO * Neutron objects test: correct the db\_obj to obj\_fields where needed * Fetch router port subnets in bulk * QUOTAS: add in missing space to help text * Revert "Deprecate 'ovs\_use\_veth' and 'veth\_mtu' options" * DHCP: remove update\_lease\_expiration * Imported Translations from Zanata * release note to deprecate prevent\_arp\_spoofing option * Modify an option for delete-port as recovery * Don't update DHCP ports in a transaction * Use switch-case instead of if-then-elif * Match job name for scenario tests * Clear DVR MAC on last agent deletion from host * Move class properties to instances for dhcp tests * Optimize get\_ports query by filtering on subnet * Avoid calling \_get\_subnet(s) multiple times in ipam driver * Be explicit about the APIs tested in the gate * Move away from invoking api tests explicitly * Updated from global requirements * Add an option for WSGI pool size * Make gate-hook structure more readable and extensible * Don't disconnect br-int from phys br if connected * Refactor test\_ipam functional testing module * Avoid referencing code from master branch * Support interface drivers that don't support mtu parameter for plug\_new * Use tempest plugin interface * Add 169.254.169.254 when enable force\_metadata * Fix deprecation warning for external\_network\_bridge * Add ALLOCATING state to routers * Change wrong word "propogated" to "propagated" * Delete conntrack entry on the other direction * Skip l2\_ovs\_agent functional tests * Added initial release check list * Skip firewall blink test for ovs-fw * Fix doc build if git is absent * Cleanup stale OVS flows for physical bridges * Updated from global requirements * Revert "Add 169.254.169.254 when enable force\_metadata" * Fix broken Tempest conf options in API tests * Add functional test for device\_exists() with invalid namespace * Add an option for WSGI pool size * Add support for building ovs from git sources on stacking * Make tunnel\_sync logging less verbose * Notify resource\_versions from agents only when needed * ADDRESS\_SCOPE\_MARK\_IDS should not be global for L3 agent * Wrap all update/delete l3\_rpc handlers with retries * Cleanup override of compare\_type from oslo\_db * Notify resource\_versions from agents only when needed * Values for [ml2]/physical\_network\_mtus should not be unique * Use new DB context when checking if agent is online during rescheduling * Remove Openflow Agent(OFAgent) mechanism driver * Add RECLONE support for ovs * ovsfw: Load vlan tag from other\_config * ovsfw: Load vlan tag from other\_config * Reuse to\_utf8() and jsonutils.dump\_as\_bytes() * Postgresql: add do\_drop arg in alter\_enum function * Imported Translations from Zanata * firewall: don't warn about a driver that does not accept bridge * Add uselist=True to subnet rbac\_entries relationship * Add 169.254.169.254 when enable force\_metadata * RPC: remove unused parameter * Fullstack connectivity: test interface scenario added * Only load timestamp service plugin in timestamp tests * Deprecate 'ovs\_use\_veth' and 'veth\_mtu' options * Get rid of unnecessary \_ha\_routers\_present check * Clear DVR MAC on last agent deletion from host * Iptables firewall prevent IP spoofed DHCP requests * ovo: Introduce standard attributes to objects * Tempest 11.0.0 is released * Fix race conditions in IP availability API tests * Switched from fixtures to mock to mock out starting RPC consumers * Imported Translations from Zanata * Imported Translations from Zanata * Use new DB context when checking if agent is online during rescheduling * Add quota tests in unit tests * Refactor IPAM ip allocation method * Fix zuul\_cloner errors during tox job setup * When checking if there is a duplicate rule ignore the id field * Revert "Add instrumentation devref, Part I" * Return oslo\_config Opts to config generator * Refactor and fix dummy process fixture * Switches metering agent to stateless iptables * Remove obsolete keepalived PID files before start * Add IPAllocation object to session info to stop GC * Ensure metadata agent doesn't use SSL for UNIX socket * Fix Windows IPDevice.device\_has\_ip racefulness * Switched from fixtures to mock to mock out starting RPC consumers * Values for [ml2]/physical\_network\_mtus should not be unique * Fix regexp for ss output * Integrate the Extra Dhcp Opt VersionedObject in Neutron * ADDRESS\_SCOPE\_MARK\_IDS should not be global for L3 agent * Move all link-local cidr constants to a central location * DVR: Increase the link-local address pair range * Instantiate Enum() rather than subclassing * Fix keepalived functional tests * Always call ipam driver on subnet update * objects: avoid validation in stringify for custom field types * Remove unused PolicyFileNotFound exception * Add custom OVO field type for MAC address * ovo: Update object with data from db on update() * Add ALLOCATING state to routers * Fix race conditions in IP availability API tests * DVR: Increase the link-local address pair range * Make auto allocate plugin safe for unit/func tests * Define context.roles with base class * Ignore timestamp fields in model update * standard attributes: expose created\_at/updated\_at on models * Updated from global requirements * SG protocol validation to allow numbers or names * Define localstatedir for ovs compilation * L3 agent: match format used by iptables * Partial revert "DVR: Fix issue of SNAT rule for DVR with floating ip" * Add semaphore to ML2 create\_port db operation * OVS: Add mac spoofing filtering to flows * Use right class method in IP availability tests * Don't drop 'protocol' from client supplied security\_group\_rule dict * L3 agent: match format used by iptables * Make L3 HA interface creation concurrency safe * Updated from global requirements * ovo: Make return values from objects db api consistent * ovsfw: Remove vlan tag before injecting packets to port * Fix invalid mock name in test\_ovs\_neutron\_agent * lb: interface name hashing for too long vlan interface names * Imported Translations from Zanata * QoS DSCP use mod\_flow instead of mod\_flows * Change get\_root\_helper\_child\_pid to stop when it finds cmd * API tests: Check extensions with proper functions * Simplify chained comparison * Improve handle port\_update and port\_delete events in ovs qos agent * Cleaned up tox\_install.sh * devref: Update ovs-firewall * ovsfw: Remove vlan tag before injecting packets to port * Updated from global requirements * Use right class method in IP availability tests * Fix zuul\_cloner errors during tox job setup * Increase ports per network and add SLA for rally * test\_network\_ip\_availability: Skip IPv6 tests when configured so * Iptables firewall prevent IP spoofed DHCP requests * objects: SubnetPool, SubnetPoolPrefix * Don't disconnect br-int from phys br if connected * De-dup user-defined SG rules before iptables call * Ensure bridge names are shorter than max device name len * Retry updating agents table in case of deadlock * Improve error when exclusive resource allocation fails * Cleanup stale OVS flows for physical bridges * Avoids logging error on ML2 OVS agent start * Allow to use several nics for physnet with SR-IOV * Log fullstack allocation and release of exclusive resources * Hacking rule to check i18n usage 8.0.0 ----- * Support Routes==2.3 * Rename first argument to extend\_dict hook correctly * port security: gracefully handle resources with no bindings * Switches metering agent to stateless iptables * Wrap all update/delete l3\_rpc handlers with retries * Allow to use several nics for physnet with SR-IOV * Fix error format message * Ignore exception when deleting linux bridge if doesn't exist * Refactor and fix dummy process fixture * Don't delete br-int to br-tun patch on startup * Constraint requirements using mitaka upper-constraints.txt file * functional: Update ref used from ovs branch-2.5 * Imported Translations from Zanata * Don't delete br-int to br-tun patch on startup * Delete Devref Enable Service for Network IP Availability * DVR: rebind port if ofport changes * Support Routes==2.3 * ovs-fw: Mark conntrack entries invalid if no rule is matched * l3: Send notify on router\_create when ext gw is specified * l3: Send notify on router\_create when ext gw is specified * Remove obsolete keepalived PID files before start * Imported Translations from Zanata * SG protocol validation to allow numbers or names * Linux Bridge: Add mac spoofing filtering to ebtables * Remove redundant FLOATINGIPS declaration * Retry updating agents table in case of deadlock * Add fullstack cross-process port/ip address fixtures * Make L3 HA interface creation concurrency safe * Imported Translations from Zanata * Effective Neutron: some notes about deprecation * test\_network\_ip\_availability: Skip IPv6 tests when configured so * api tests: Check correct extensions * DVR: rebind port if ofport changes * Remove deprecated methods from migration CLI file * Fix typos in Neutron documentation * Make network segment table available for standalone plugin 8.0.0.0rc2 ---------- * api tests: Check correct extensions * devref: Remove stale description about network\_ip\_availability * Imported Translations from Zanata * Add db migrations test framework with data * Remove unnecessary executable permissions * Removes redundant "and", "this" and "the" * Tweak process to improve handling of RFEs * port security: gracefully handle resources with no bindings * Fix setting peer to bridge interfaces * Add fullstack connectivity test with ovsdb native interface * firewall: don't warn about a driver that does not accept bridge * Fix fullstack LB agent not connecting to rabbit * Ignore exception when deleting linux bridge if doesn't exist * Fix setting peer to bridge interfaces * Note fullstack depends on released neutronclient * Updated from global requirements * Skip fullstack L3 HA test * Imported Translations from Zanata * Clean imports in code * Add OVS flow tests * Adopt oslo.log default\_log\_levels * Common utils: remove deprecated methods * Fix test failure against latest oslo.\* from master * Fix reference to uninitialized iptables manager * AGENTS: remove deprecated methods * Fix reference to uninitialized iptables manager * DVR: Fix issue of SNAT rule for DVR with floating ip * Remove deprecated method from agent utils file * ovs-fw: Mark conntrack entries invalid if no rule is matched * Add fullstack logging * Fix alembic 'No handlers could be found for logger...' * Move db query to fetch down bindings under try/except * Close XenAPI sessions in neutron-rootwrap-xen-dom0 * Watch for 'new' events in ovsdb monitor for ofport * conn\_testers: Bump timeout for ICMPv6 echo tests * Port Security added to OVO * Adds Enum fields for different types * Removes host file contents from DHCP agent logs * Removing 'force\_gateway\_on\_subnet' option * Imported Translations from Zanata * Fix a couple of problems with random hex string generation * Fix latest doc error * milestone-review-dash detect current milestone * Remove deprecated methods from common file * DB: remove method \_get\_tenant\_id\_for\_create * use separate device owner for HA router interface * QOS: Provide get methods for policy & network/port binding * Fix spelling mistake * Fixes typo * Imported Translations from Zanata * functional: Update ref used from ovs branch-2.5 * neutron-db-manage: revision: fail for --autogenerate and branch * Enforce alembic branch when creating scripts manually * Ovs agent can't start on Windows because of validate\_local\_ip * Fix wrong use of list of dict in \_check\_driver\_to\_bind * Fixed typos in wsgi.py * Imported Translations from Zanata * objects: introduce object for extra dhcp options * Imported Translations from Zanata * Ovs agent can't start on Windows because of validate\_local\_ip * Update reno for stable/mitaka * Skip fullstack L3 HA test * objects: minor cleanup in base class * Close XenAPI sessions in neutron-rootwrap-xen-dom0 * Remove local variable named 'meh' * Move qos\_dscp migration to the Newton alembic branch * Add Newton branch to neutron-db-manage * Accept icmpv6 as protocol of SG rule for backward compatibility * conn\_testers: Bump timeout for ICMPv6 echo tests * DSCP QoS rule implementation * objects: fixed base to\_dict implementation * Updated from global requirements * Watch for 'new' events in ovsdb monitor for ofport * Update devstack plugin for dependent packages * Move db query to fetch down bindings under try/except * Accept icmpv6 as protocol of SG rule for backward compatibility * Outerjoin to networks for port ownership filter * Imported Translations from Zanata * Update devstack plugin for dependent packages * Remove test\_external\_network\_visibility * Outerjoin to networks for port ownership filter * Update .gitreview for stable/mitaka 8.0.0.0rc1 ---------- * tests: register all objects before validating their hash versions * Handle synthetic fields in NeutronDbObject * Prevent all primary keys in Neutron OVOs from being updated * Add uselist=True to subnet rbac\_entries relationship * De-dup conntrack deletions before running them * Imported Translations from Zanata * Fix auto\_allocated\_topology migration with PostgreSQL * Removes host file contents from DHCP agent logs * Fix add\_is\_default\_to\_subnetpool migration * Add custom SQLAlchemy type for MACAddress * DVR: Handle unbound allowed\_address\_pair port with FIP * Improve release notes for dvr fixes * Use context manager in bind NetworkDhcpAgent * Add custom SQLAlchemy type for CIDR * Add custom SQLAlchemy type for IP addresses * Fixes force to set ondelete=CASCADE in create\_foreign\_keys() * Fixes "OVS Agent doesn't start on Windows" * RBAC: Fix port query and deletion for network owner * DVR: Agent side change for live migration with floatingip * DVR:Pro-active router creation with live migration * Return oslo\_config Opts to config generator * Update testing coverage document * devref doc config option separation * Added test cases for DVR L3 schedulers * Update Neutron with temporary registry pattern from VersionedObjectRegistry * Reset RNG seed with current time and pid for each test started * Create a hook in base object to modify the fields before DB operations * Add API test ensure tenant can't delete other ports * Add IPAllocation object to session info to stop GC * SG PRECOMMIT\_CREATE should be triggered only once * fullstack: use SIGTERM when stopping ovs agents * Extend dicts with original model in create/update * Imported Translations from Zanata * Fix dict.keys() incompatibility in py34 in dhcp test * Update network object in DHCP agent with router interface changes * Block delete\_(network|subnet) transactioned calls * Imported Translations from Zanata * ADD API tests for network ip availability * Pecan: Allow unauthenticated version listing * L3HA: Do not wrap create/delete in transaction * Add metrics notifier to Pecan * Fix latest doc errors that crept in * Add remote vs local FAQ for callbacks * Revise deputy instructions to include deprecation warnings * Add deprecation tag * Remove test\_external\_network\_visibility * register the config generator default hook with the right name * Stops update\_network handling updates it shouldn't * Fix PUT tag failure * Remove unused pngmath Sphinx extension * fullstack: increase test timeout * DHCP: Downgrade 'network has been deleted' logs * Fix the context passed to get\_subnets in \_validate\_routes * Add reno for deprecation of max\_fixed\_ips\_per\_port * ML2: Downgrade 'no bound segment' warning * Delete 118~ API tests from Neutron * Using LOG.warning replace LOG.warn * policies: add an official 'l3-bgp' bug tag * linuxbridge: added missing space in option help text * Check tap bridge timestamps to detect local changes * Remove unused Tempest AdminManager * Construct exceptions before passing to retryrequest * Copy tempest.common.tempest\_fixtures in to Neutron * Queries for DVR-aware floating IP next-hop lookups * Adds unit tests for external DNS integration * Fixes external DNS driver failure with Python 3.4 * Updates external DNS service if IP address changes * Add logging statements to help debug L3 sync * Only clear dns\_name when user specifies parameter * Catch DB reference errors in binding DVR ports * Add BGP Callback and agent RPC notifcation implementations * Set DEFAULT\_NETWORK\_MTU to 1500 and use it * Downgrade network not found log in DHCP RPC * Downgrade "device not found" log message * Add global\_physnet\_mtu and deprecate segment\_mtu * Ensures DNS\_DRIVER is loaded before it is checked * Add Nova notifier hook calls to pecan * Add fip nat rules even if router disables shared snat * Add timestamp changed-since for core resources * Security groups: ensure correct ID is passed to exception * Pecan routing for agent schedulers * Use testscenarios for OVS flow tests * Tag the alembic migration revisions for Mitaka * Remove unused -constraints tox targets * constraints: fixed typo in tox\_install.sh * security-groups: Add ipv6 support to ovs firewall * Fix tempest lib import in API tests * Delay description association proxy construction * Release notes: fix broken release notes * Fix API test for external subnet visibility * Release notes: prelude items should not have a - (aka bullet) * Use floating IP to connect different address scopes * Add a description field to all standard resources * Add timestamp for neutron core resources * Skip racey BGP tests * Continue the fwaas decoupling and cleanup * Remove obsolete todo * Nit: Occurances of OpenStack * Make all tox targets constrained * reno: Fix bad yaml in reno that ruins html output * Mock out database access for QoS policy object interface tests * Fix branch order when upgrading to alembic milestone * Fix pecan collection->plugin map for extensions * Autogenerate macvtap agent config file * Updates to Bug Deputy section of Bugs DevRef * hacking: remove oslo.\* import check * devref: added details on the new l2 agent API mechanism * Revert "Revert "Functional test for address scope"" * Correct Pecan extensions test 8.0.0.0b3 --------- * Add Queries For BGP Route Lookups * Fix docs tox target for local runs * Improve logging for port binding * Allow auto-allocate's dry-run API call to work for regular users * Make OVS agent tunnel configuration logging less scary * make/update\_subnetpool\_dict call \_dict\_extend * Check if plugin supports starting rpc listeners * Make run\_ofctl check for socket error * unbreak unit test caused by c5fa665de3173f3ad82cc3e7624b5968bc52c08d * Add filter for resource tag * Add tag mechanism for network resources * Make API framework more flexible for various extensions * Moved CORS middleware configuration into oslo-config-generator * Objects DB api: added composite key to handle multiple primary key * IP Availability: remove unused imports * BGP: remove unnecessary configuration setting * Add support for QoS for LinuxBridge agent * RPC Callback rolling upgrades reporting, and integration * Set veth\_mtu default to 9000 * Provide dry-run flag to validate deployment requirements * Use network RBAC feature for external access * Deprecate network\_device\_mtu * Catch DBDuplicateEntry errors in RBAC code * DVR:Remove unwanted check in \_get\_dvr\_service\_port\_hostid * Make agent interface plugging utilize network MTU * Removed static reference to LinuxBridge in logging * Add API extension for reporting IP availability usage statistics * Updated from global requirements * Filter HA routers without HA interface and state * Translations: use neutron.\_18n instead of neutron.18n * Collect details on ARP spoof functional failures * Revert "Functional test for address scope" * Remove effectively empty directories * Added agent specific API support to L2 extensions * Qos policy RBAC DB setup and migration * macvtap: Macvtap L2 Agent * ML2: Increase segment\_mtu from 0 to 1500 bytes * Switch to using in-tree tempest lib * Catch DBReferenceError in HA router race conditions * Catch PortNotFound after HA router race condition * Change the exception type from ValueError to IpamValueInvalid * Fix test\_get\_device\_id() failure on OSX * Make \_\_table\_args\_\_ declarative in RBACColumns * Fix tox -e docs * Override addOnException to catch exceptions * BGP Dynamic Routing: introduce BgpDriver * Update default gateway in the fip namespace after subnet-update * Update docstring in test/tools.py * Pecan: filter items need type conversion * Pecan: use reservations in quota enforcement hook * Add use\_default\_subnetpool to subnet create requests * Ensure DVR unit tests use '/tmp' directory * API test for get-me-network * ovs-fw: Enhance port ranges with masks * Fix sanity check --no\* BoolOpts * Correlate address scope with network * Fix generate\_records\_for\_existing in migrations * Revert "tests: Collect info on failure of conn\_tester" * Updated from global requirements * Revert the unused code for address scope * Deprecate 'force\_gateway\_on\_subnet' configuration option * Fix 'TypeError: format requires a mapping' in OVS agent * Allow non-admins to define "external" extra-routes * Don't assume simplejson in pecan exception catch * IPAM: add missing translation * Functional test for address scope * deprecated: Raise message * Allow address pairs to be cleared with None * Document the ability to load service plugins at startup * .testr.conf: revert workaround of testtools bug * Add fullstack resources for linuxbridge agent * Pecan: get rid of member action hook * Pecan: replace dashes with underscores on controller lookup * Fix for adding gateway with IP outside subnet * Allow other extensions to extend Securitygroup resources * Adopt Grafana to plot Neutron Failure Rates * BGP Dynamic Routing: introduce BgpDrAgent * Add missing character * stadium: revise the introduction to the document * stadium: Add a guideline about contributor overlap * Security group: use correct logging format * Update devstack hooks to work with pecan jobs * Fix typo error for wrong msg format when CallbackFailure * Stop using non-existent method of Mock * Fix GROUP BY usage for PostgreSQL in migrations * Add bug tag for auto allocated topology * macvtap: ML2 mech driver for macvtap network attachments * Don't disable Nagle algorithm in HttpProtocol * Preserve subnet\_create behavior in presence of subnet pools * Open vSwitch conntrack based firewall driver * Add VLAN tag info to port before applying SG initial setup * QOS: get rid of warnings for unit tests * Remove NEC plugin tables * DHCP: release DHCP port if not enough memory * Cleanup unused conf variables * Make DHCP agent scheduler physical\_network aware * Deprecate ARP spoofing protection option * tests: Use constants for icmp and arp in conn\_testers * Add to the neutron bug deputy directions * L3 agent: log traceback on floating ip setup failure * Add the rebinding chance in \_bind\_port\_if\_needed * Pecan: implement DHCP notifications in NotifierHook * Pecan: Always associate plugins with resource * Remove deprecation warnings * Get rid of UnionModel for RBAC * Add necessary executable permission * Updated from global requirements * Add precommit\_XXX event for security group and rules * Give the qos extension a friendly name * tests: Collect info on failure of conn\_tester * Address masking issue during auto-allocation failure * Fix typo 'indepedent' in alembic\_migration.rst * BGP Dynamic Routing: introduce BgpDrScheduler model * macvtap: Common functions and constants * Fix typo of dnsmasq * add arp\_responder flag to linuxbridge agent * Switch "dsvm-functional:" into same pattern as constraints * Add BGP Dynamic Routing DB Model and Basic CRUD * fullstack: Gracefully stop neutron-server process * Remove VPN installation plumbing * Remove vpnaas tests from the Neutron API tree * Make netns\_cleanup to purge resources of selected agent only * Add extension requirement in port-security api test * ML2: delete\_port on deadlock during binding * Start using neutron-lib for shared constants and exceptions * Remove fwaas tests from the Neutron API tree * Remove office-hours from Polcies docs index * Add the ability to load a set of service plugins on startup * ML2: Configure path\_mtu to default to 1500 bytes * Support MTU advertisement using IPv6 RAs * Pecan: wrap PUT response with resource name * Pecan: Controller and test refactor * stadium: Add a guideline related to project scope * stadium: Propose kuryr as an independent project * stadium: Separate proprietary interface projects * stadium: Add python-neutron-pd-driver * stadium: Group lbaas repos together * Remove PTL office hours * Bring back dvr routers autoscheduling * Fix getting agent id in linuxbridge agent * RPC Callback rolling upgrades logic * OVO common enum class for IPv6 modes * Move check\_dvr\_serviceable\_ports\_on\_host() to dvr scheduler * L3: enable plugin to decide if subnet is mandatory * Implement 'get-me-a-network' API building block * Test helpers to facilitate testing BGP dynamic routing * Fix logging error for Guru Meditation Report * HA for DVR - Neutron Server side code changes * IP\_LIB: fix indentations * idlutils: add in missing translations * sub\_project\_guidelines: Document the procedure to make a branch EOL * sub\_project\_guidelines: Remove "add tarball to launchpad" step * sub\_project\_guidelines: Update after direct-release default * Only restrict gateway\_ip change for router ports * Make add\_tap\_interface resillient to removal * Updated from global requirements * Remove flavors from the list of extensions provided by core plugin * Log warning message if get\_subnet\_for\_dvr fails * devstack: Fix check for blank prefix arg * ML2: Call \_dict\_extend in create\_(net|port) ops * Pecan: add tenant\_id to quota resource * Prevent binding IPv6 addresses to Neutron interfaces * Moving Common Agent into separate module * Add hacking check for assertEqual HTTP code * Pecan: Fix association of plugins with resources * Add missing periods * Postpone heavy policy check for ports to later * LB agent: Downgrade network not available message * Imported Translations from Zanata * Call Ryu's clean up function when ovs\_neutron\_agent.main terminates * Protect 'show' and 'index' with Retry decorator * Update related router when subnetpool change scope * Only ensure admin state on ports that exist * stadium: Update list of Neutron sub-projects * ML2: Update help text for path\_mtu * Correct dev documentation for has\_offline\_migrations command * Reno note regarding OVS DVR agent failure on startup * Fix regression in routers auto scheduling logic * Compile OVS for functional tests * Trigger dhcp port\_update for new auto\_address subnets * Correction of spelling * Get tempest via zuul-cloner if needed and it is available * Fix typo in SecurityGroup HTTP error message * DHCP: fix regression with DNS nameservers * Add address scopes support to the L3 agent * Get rid of marshall\_fdb\_entries * Correct insufficient name for external process in manager log * Fix port relationship for DVRPortBinding * Fix params order in assertEqual * Address i18n related deprecation warnings * nova-notifier: Change warning to debug * Warn about a gotcha in the sub-project process * ML2: update port's status to DOWN if its binding info has changed * Fix remove\_router\_from\_l3\_agent for 'dvr' mode agent * DHCP: add in missing space at the end of the line * Fix bug when enable configuration named dnsmasq\_base\_log\_dir * DVR: avoid race on dvr serviceable port deletion * Remove bindings of DVR routers to L3 agents on compute nodes * Only prevent l3 port deletion if router exists * Unmarshall portinfo on update\_fdb\_entries calls * Remove dead method delete\_dvr\_port\_binding * SR-IOV: Agent remove listen to network delete event * Use a thinner try/except in \_build\_cmdline\_callback * Fail if required extensions are missing * Add UniqueConstraint in L3HARouterAgentPortBinding * Delete Tempest fork, import from tempest and tempest\_lib * Add relationship between port and floating ip * Update translation setup for neutron subprojects * Fix required extensions mix-up * Uniquely identify tunnel interfaces for fullstack tests * DVR: Remove unwanted call to \_get\_routers while arp update * lb: ml2-agt: Separate AgentLoop from LinuxBridge specific impl * item allocator should return same value for same key * Set default value for dnsmasq\_local\_resolv to False * Improve autonested\_transaction * Rename confusing dvr\_deletens\_if\_no\_port * Bump AgentExtRpcCallback version to 1.1 * Raise RetryRequest on policy parent not found * create\_object should not add an ID if not present in the DB model * Add generated port id to port dict * Updated from global requirements * Support api\_workers option when using pecan * Elevate context for router lookups during floating IP association * Update alembic migration documentation * Add separate transport for notifications * Neutron review tool use message instead of topics for bugs * Increase default IPv6 router advertisement interval * Remove stale ofport drop-rule upon port-delete * Updated from global requirements * l3\_db: Check dns-integration extension * Add dns\_db to models/head.py * devref: Fix a typo in i18n.rst * Replace exit() by sys.exit() * Add missing index entry for external dns integration * l2pop rpc: Add a unit test for dualstacked port * Pecan: remove deprecated warning * RPC worker support for pecan server * Don't decide web\_framework before config parse * Remove unwanted NOTE from dvr\_local\_router * DVR: Fix Duplicate IPtables rule detected warning message in l3agent * Make advertisement intervals for radvd configurable * Fix module's import order * neutron-db-manage: add has\_offline\_migrations command * Add popular IP protocols for security group * Decorate methods in ExtensionDescriptor with abstractmethod * Updated from global requirements * Remove obsolete plugin stuff * External DNS driver reference implementation * Move helper methods to create resorces to test\_base * db\_api: handle db objects that don't have 'id' as primary key * Introduce new queries to return DVR routers for a host * Refactor remove\_router\_interface() for DVR * sriov-mech: Introduce a new VIF type for PF vnic type * Ensure that tunnels are fully reset on ovs restart * OVS agent should fail if it can't get DVR mac address * Python3: Fix using dictionary keys() as list * Add network\_update RPC into SR-IOV agent * Add L3 Notifications To Enable BGP Dynamic Routing * Fix check in \_validate\_ip\_address() to fail on OSX * Remove floatingip address only when the address has been configured * Use tools\_path/venv environment variables in install\_venv * fix \_validate\_shared\_update for dvr router ports * Rename new\_network to new\_network\_id * DVR: Add action specific functions for arp\_entry\_update * Fixed qos devstack service name (should be q-qos) for -plus gate hook * bump the min tox version to 2.3.1 * Updated from global requirements 8.0.0.0b2 --------- * Add more log when dhcp agent sync\_state * Imported Translations from Zanata * Fix docstring for check\_dvr\_serviceable\_ports\_on\_host * Fixes typos Openstack -> OpenStack * Add tests for RPC methods/classes * OVS agent set max number of attempts to sync failed devices * Don't sync all devices when some fail * Make Neutron attempt to advertise MTUs by default * Optimize get\_ports\_on\_host\_by\_subnet() dvr rpc handler * Do not remove router from dvr\_snat agents on dvr port deletion * Make object creation methods in l3\_hamode\_db atomic * Remove dead method \_get\_router\_ids * DVR: Optimize getting arp entry info * Add support for neutron-full and api jobs using pecan to gate\_hook * Updated from global requirements * Move L2populationDbMixin to module-level functions * Fix L3 HA with IPv6 * Fix the duplicated references * Add opnfv tag to the list of auto-complete tags * Refactor router delete processing * Revert "Change function call order in ovs\_neutron\_agent." * Remove LinuxBridge manager get\_local\_ip\_device arg * devref: added more details on rolling upgrade for notifications * Filter by device\_owner instead of iterating by all subnet ports * Make security\_groups\_provider\_updated work with Kilo agents * Introduce new query to return all hosts for DVR router * fix get\_ha\_sync\_data\_for\_host for non-dvr agent * dhcp: handle advertise\_mtu=True when plugin does not set mtu values * Retry port create/update on duplicate db records * doc: Update Ryu Ishimoto's IRC nick * Make neutron pecan server an option instead of binary * DVR: when updating port's fixed\_ips, update arp * Fix Linux bridge test\_report\_state\_revived failure on OSX * Prevent PD subnets with incorrect IPv6 modes * Added Keystone and RequestID headers to CORS middleware * Unify exceptions for assign router to dvr agent * Unify using assertIsInstance * HACKING: update HACKING.rst file to include latest changes * devstack: use stevedore entry point for flavor service plugin * Do not prohibit VXLAN over IPv6 * Updated from global requirements * tests: stop validating neutronclient in neutron-debug tests * Remove 'validate' key in 'type:dict\_or\_nodata' type * ML2: verify if required extension drivers are loaded * Add --dry-run mode to code review abandon tool * Fix typo in test path in Testing.rst * Fix floatingip status for an HA router * Fix URLs for pep8, and unit tests jobs * Static routes not added to qrouter namespace for DVR * Pass environment variables of proxy to tox * Pecan: fix quota management * Pecan: Fixes and tests for the policy enforcement hook * gate\_hook: add support for dsvm-plus job type * Scope get\_tenant\_quotas by tenant\_id * Add 'ovs' to requirements.txt * Fix params order in assertEqual * Use admin context when removing DVR router on vm port deletion * eliminate retries inside of \_ensure\_default\_security\_group * Register RA and PD config options in l3-agent * Provide kwargs for callback abort * Pecan controller loads service plugins * Make sure datapath\_type is updated on bridges changed * Log INFO message when setting admin state up flag to False for OVS port * Fix regression with unbound ports and l2pop * L3 agent: paginate sync routers task * Remove duplicate for check\_ports\_exist\_on\_l3agent * ML2: Simplified boolean variable check * Pecan: Streamline request body processing * make floating IP specification test robust to races * Fix get\_subnet\_for\_dvr() to return correct gateway mac * Updated from global requirements * Ensure agent binding modules are loaded * portbindings: use constants for extension keys * Add README with links on how to create release notes * Ensure that decomposed plugins do not break * LBaaS tests code removal * Make neutron-debug command follow cliff command convention * Rename \_get\_vm\_port\_hostid in dvr to reflect the right functionality * DVR: Rename dvr\_vmarp\_table\_update * Remove unnecessary argument in limit manage * remove openstack-common.conf * Move notifications before DB retry decorator * Create a routing table manager * Fix uuid passing in disable\_isolated\_metadata\_proxy * Fix incorrect classmethod declaration * Add unit test cases for linuxbridge agent when prevent\_arp\_spoofing is True * Adopt oslotest BaseTestCase as a base class for DietTestCase * Use oslo.utils.reflection extract the class name * Utils: Add missing translation to exception * Unify assertEqual for empty usages * SR-IOV: Fix macvtap assigned vf check when kernel < 3.13 * Delete metadata\_proxy for network if it is not needed * Remove references to model\_base through models\_v2 * Allow get\_unused\_ip method to skip v6 and fix iter * Revert "Revert "Revert "Remove TEMPEST\_CONFIG\_DIR in the api tox env""" * Fix meter label rule creation * Remove l2pop \_get\_port\_infos method * Remove L2populationDbMixin parent * devstack: don't enable qos service with the plugin * Add test for Neutron object versions * SR-IOV agent: display loaded extensions * Imported Translations from Zanata * Allow tox to pass more arguments to ostestr command * Add systemd notification after reporting initial state * Avoid duplicating tenant check when creating resources * Add extension\_manager and support for extensions in linuxbridge agent * Fix API tests * Rule, member updates are missed with enhanced rpc * radvd prefix configuration for DHCPV6\_Stateful RA * DVR: Rename dvr\_update\_router\_addvm function * Support rootwrap sysctl and conntrack commands for non-l3 nodes * Remove openstack.common.\_i18n from Neutron * Kilo initial migration * Check missed ip6tables utility * Keep py3.X compatibility for urllib * Updated from global requirements * Misspelling in message * Mitigate restriction for fixed ips per dhcp port * dhcp: Default to using local DNS resolution * Fixing the deprecated library function * Remove unused variable use\_call in ovs-agent * Wrong usage of "an" * Wrong usage of "a" * Trival: Remove unused logging import * Allow to control to use constraint env for functional jobs * DVR: optimize check\_ports\_exist\_on\_l3\_agent() * Don't call add\_ha\_port inside a transaction * Call \_allocate\_vr\_id outside of transaction * Change log level from error to warning * Fix Security-rule's port should not set to 0 when Protocol is TCP/UDP * Add constant to L3 extension for floating ips * dibbler: fix import order * Add address scope to floating IPs in RPC response to L3 agent * Add firewall blink + remote SG functional tests * Add test cases to testing firewall drivers * Ignore non rules related qos\_policy changes * Remove check on dhcp enabled subnets while scheduling dvr * Run functional gate jobs in a constrained environment * update docstring for get\_ports\_on\_host\_by\_subnet * Correct state\_path option's help string * Updated from global requirements * Restore \_validate\_subnet/uuid\_list not to break subproject gates * Delete test\_restart\_l3\_agent\_on\_sighup * DVR: Remove get\_port call from dvr\_update\_router\_addvm * DVR:Fix \_notify\_l3\_agent\_new\_port for proper arp update * Add tests that constrain db query count * Don't raise if polling manager is running when stopped * Add abstractmethod to FirewallDriver abstract class * Add a link of availability zone document into releasenote * Corrected wrong ethertype exception message * Misspelling in message * Use the constant HOST\_ID instead of 'binding:host\_id' * Force L3 agent to resync router it could not configure * Provide pointer for ML2 extension manager to effective guide * Add notes on loading strategies for ORM relationships * Enable Guru Meditation Reports for other refarch agents * Updated from global requirements * Catch known exceptions during deleting last HA router * Add new troubleshooting bugs tag * Add to deprecate OFAgent in release note * Refactor the subnetpools API tests * Clean up code for bug1511311 * Kill the vrrp orphan process when (re)spawn keepalived * reject leading '0's in IPv4 addr to avoid ambiguity * Remove duplicated code in attribute.py * QOS: add in missing translation * Separate the command for replace\_port to delete and add * Fix comparison of Variant and other type in test\_model\_sync * Add check that list of agents is not empty in \_get\_enabled\_agents * Remove unused parameter from \_update\_router\_db method * Use a joined relationship for AZ info on routers * Cleanup all the release notes * Improve tox to show coverage report on same window * Tune \_get\_candidates for faster scheduling in dvr * Updating devref for networking-onos project * Use a joined relationship for AZ info on networks * Correct return values for bridge sysctl calls * Batch db segment retrieval * Separate rbac calculation from \_make\_network\_dict * Add explicit address family to AddressScope * DVR: handle dvr serviceable port's host change * Adding a VNIC type for physical functions * Add functional test for availability\_zone support * OVS: Reorder table-id constants * Deprecated tox -downloadcache option removed * API: \_validate\_ip\_address should not raise an exception * Removing adv svcs dependencies on neutron * Return availability\_zone\_hints as list when net-create * Decompose OFAgent mechanism driver from neutron tree completely * Ignore possible suffix in iproute commands * Add option for nova endpoint type * Force service provider relationships to load * Add linuxbridge job to the dashboard * ML2: Add tests to validate quota usage tracking * Updated from global requirements * Add explanations and examples to TESTING.rst * Added CORS support to Neutron * L3 DB: set get\_assoc\_data to be an internal method * ovs\_vhostuser: fix vhostuser\_socket\_dir typo * fix call which is only specific to enhanced\_rpc * select router with subnet's gateway\_ip for floatingip * Refactor OVS-agent tunnel config validate * Make keepalived add\_vip idempotent * Fix timestamp in RBAC extension * Document relationship between ways of documenting new stuff * lb: Correct String formatting to get rid of logged ValueError * Skip keepalived\_respawns test * Add release note covering keystoneauth and v3 * Pull project out of request in addition to tenant * Don't emit confusing error in netns-cleanup * Add address scope to ports in RPC response to L3 agent * Updated from global requirements * Avoid full\_sync in l3\_agent for router updates * move usage\_audit to cmd/eventlet package * Use keystoneauth instead of keystoneclient * Deprecate \_ builtin translation function * Use \_ from neutron.\_i18n * Using substitution for Python String * Tox: Remove fullstack env, keep only dsvm-fullstack * Fix some inconsistency in docstrings * Set timetable for removal of oslo.messaging.notify.drivers * Delete stale neutron-server manual * Final decomposition of the nuage plugin * Final decomposition of Brocade vendor code * Trivial typo fix in LinuxBridge dashboard * Add a script to create review dashboard for a milestone * Remove Neutron core static example configuration files - addition * test\_migrations: Avoid returning a filter object for python3 * Cleanup veth-pairs in default netns for functional tests * Reuse constants defined in attributes * Add availability\_zone support for router * Fix default RBAC policy quota * Moved fullstack test doc content to TESTING.rst * Allow multiple imports for both ways of doing i18n * [policy] Clarify bug deputy does not require core bit * Run NOT NULL alterations before foreign key adds * Do not autoreschedule routers if l3 agent is back online * Add instrumentation devref, Part I * Updated from global requirements * Hyper-V: remove driver from the neutron tree * Fix typo in Docstring * Remove lbaas cruft from neutron gate\_hook * Make port binding message on dead agents clear * Notify about port create/update unconditionally * HACKING: fix edge case with log hints * I18n related guideline for subprojects * Optimize "open" method with context manager * L3: add missing space to log message * Revert "Revert "OVS agent reacts to events instead of polling"" * XenAPI: Fix netwrap to support security group * Move i18n to \_i18n, as per oslo\_i18n guidelines * Clean up FIP namespace in DVR functional tests * devref: Rolling upgrade mechanism for rpc-callbacks * Remove version from setup.cfg * DVR:don't reschedule the l3 agent running on compute node 8.0.0.0b1 --------- * Add native of\_interface fullstack tests * Disallow updating SG rule direction in RESOURCE\_ATTRIBUTE\_MAP * l3\_db: it updates port attribute without L2 plugin * In port\_dead, handle case when port already deleted * Change check\_ports\_exist\_on\_l3agent to pass the subnet\_ids * lb: avoid doing nova VIF work plumbing tap to qbr * Remove Neutron core static example configuration files * Update 'Contributing Extensions' devref for Mitaka * HACKING: align the underline text and header * Imported Translations from Zanata * Remove transparent VLAN support from base plugin * Automatically generate neutron core configuration files * Support Unicode request\_id on Python 3 * Stop using deprecated timeutils.total\_seconds() * Correct unwatch\_log to support python <= 2.7.5 * Move a note to bridge\_lib * Add Guru Meditation Reports support to Neutron services * Fix alignment in message and remove unused module * Update toctree of neutron document * Don't drop ARP table jump during OVS rewiring * Remove useless lb-agent remove\_empty\_bridges * Delete HA network when last HA router is deleted * Change instances of Openstack to OpenStack * force releasenotes warnings to be treated as errors * Add availability\_zone support for network * fix some misspellings * Freescale ML2 driver code complete decomposition * Add Incomplete state to list of acceptable states for RFE bugs * Fix typo for OVSDB * Clarify how we milestones are assigned * Support for IPv6 RDNSS Option in Router Advts * tox: pass TEMPEST\_CONFIG\_DIR envvar into api target environment * Wait for the watch process in test case * Add UnionModel support to filter query generator * Minor doc fix in alembic\_migrations.rst * Some minor misspellings in comment block * Optimize router delete execution * Deprecate l3-agent router\_id option * Make Neutron resources reference standard attr table * devref: add upgrade strategy page * Remove duplicate deprecation messages for quota\_items option * Log error instead of exception trace * Refactor OVS-agent init-method * neutron-db-manage: mark several options as deprecated * ovs: Make interface name hashing algorithm common and extend it * Check gateway ip when update subnet * Use diffs for iptables restore instead of all rules * IPAM: add in missing exception translations * Remove BigSwitch plugin and driver * Add Access Control bug tag * Add index entry to vhost documentation * Make fullstack test\_connectivity tests more forgiving * Fix get\_subnet\_ids\_on\_router in dvr scheduler * Remove misplaced copyright attribution * Fix misspelled word in docstring * neutron-db-manage: expose alembic 'heads' command * Reorganize and improve l3\_agent functional tests * Make sure we return unicode strings for process output * Use compare-and-swap for IpamAvailabilityRange * Replace neutron-specific LengthStrOpt with StrOpt * Fix use of fields argument in get\_rbac\_policies * Updated from global requirements * Fix dashboard graphite URLs * Fix Neutron flavor framework * Keep reading stdout/stderr until after kill * Updated from global requirements * Fix the end point test for client * IPAM: fix 'enable-dhcp' with internal driver * Update HA router state if agent is not active * Send 50% less debug information when executing cmd * Fix alignment in message * Datapath on L2pop only for agents with tunneling-ip * Add hosted agents list to dhcp agent scheduler * Add vhost-user support via ovs capabilities/datapath\_type * Remove deprecated use\_namespaces option * Resync L3, DHCP and OVS/LB agents upon revival * Ensure metadata agent doesn't use SSL for UNIX socket * Updated from global requirements * Add networking-infoblox sub-project * Firewall: fix typo * Fix the indentation issue * Elaborate how priorities are assigned to blueprints * Don't add default route to HA router if there is no gateway ip * Add a better description for notification\_driver * Use DEVICE\_OWNER\_\* for 'network:\*' constants * Add the missing arg of RetryRequest exception in \_lock\_subnetpool * Update networking-powervm sub-project docs * Remove unused delete\_dvr\_mac\_address method * Add fullstack testing doc content * Fix releasenotes/../unreleased.rst * Avoid race condition for reserved DHCP ports * Revert "Move dhcp\_lease\_duration into DHCP agent config options list" * sub\_projects.rst: Update midonet functionalities * Switch to using neutron.common.utils:replace\_file() * Trivial fix in ml2 conf * Remove the useless l3plugin check in l3\_rpc.py * Fix some reST field lists in docstrings * Use DEVICE\_OWNER\_COMPUTE constant everywhere * Fix broken references in doc * Skip bindings with agent\_id=None * Updated from global requirements * Use admin context when requesting floating ip's router info * Cleanup dhcp namespace upon dhcp setup * Use SIGUSR1 to notify l3 agent of changing prefix file * Last sync from oslo-incubator * Remove SysV init script for neutron-server * Refactor test\_server functional tests * Undeprecate force\_gateway\_on\_subnet option * Move dhcp\_lease\_duration into DHCP agent config options list * Add transaction for setting agent\_id in L3HARouterAgentPortBinding * Check missed IPSet utility using neutron-sanity-check * Change the repos from stackforge to OpenStack * Revert "Revert "Remove TEMPEST\_CONFIG\_DIR in the api tox env"" * Require tox >= 2.0 * Use assertFalse(observed) instead of assertEqual(False, observed) * Fix heading markers for better docment toc view * Clarify that RFE bug reports should not have an importance set * Remove TEMPEST\_CONFIG\_DIR in the api tox env * Revert "Remove TEMPEST\_CONFIG\_DIR in the api tox env" * Lower l2pop "isn't bound to any segement" log to debug * DVR: remove redundant check * Disable IPv6 on bridge devices in LinuxBridgeManager * More graceful ovs-agent restart * sriov: add extensions option to configuration file * Fix dvr\_local\_router.floating\_ip\_added\_dist failure after agent restart * Don't use duplicate filter names for functional testing * Replace get\_all\_neutron\_bridges by get\_deletable\_bridges * Revert "OVS agent reacts to events instead of polling" * configure\_for\_func\_testing.sh: Fix arguments for get\_packages * Add call to pluggable IPAM from ml2 delete\_subnet * Add "unreleased" release notes page * Final decomposition of opendaylight driver * Adding security-groups unittests * Don't snat traffic between fixed IPs behind same router * Remove MidonetInterfaceDriver * Update internal snat port prefix for multiple IPv6 subnets * Use get\_interface\_bridge instead of get\_bridge\_for\_tap\_device * Move LinuxBridge related features to bridge\_lib * Reduce duplicated code in test\_linuxbridge\_neutron\_agent * Document the neutron-release team * Updated from global requirements * Trivial fix in l3 agent * IPAM: make max fixed IP validations DRY * Fix misuse of log marker functions in neutron * More instructions for neutron-db-manage revision --autogenerate * Add in missing spaces at end of line * Do not use log hints for exceptions * Fix notification driver package * Adding a function prefix before parenthesis * Make command log in neutron utils.execute() a single line * move import to top and rename to make more readable * Move update\_fip\_statuses to Router class * Replace subnetpool config options with admin-only API * Add new config option for IPv6 Prefix Delegation * Correction and clarification to subproject stable guidelines * Make '\*' the default ml2 flat\_networks configuration * Add PyPI link for networking-calico * Deprecate new= argument from create\_connection * OVS agent reacts to events instead of polling * Remove default=None for configuration bindings * Log hints should only be used for log messages * Add reno for release notes management * Add a note about the Neutron Bugs team in Launchpad * Update deprecated messages * Switch to using neutron.common.utils:replace\_file() * Change function call order in ovs\_neutron\_agent * Ensure to decode bytes or fail * Optimize delete\_csnat\_router\_interface\_ports db query * Make string representation of DictModel generic * Add IRC part for effective neutron * PortOpt cleanups * Fix QoS VALID\_RULE\_TYPES location in devref * Docs: clarify that AnySubnetRequest is optional * Update neutron-debug to use stevedore aliases * Fix incorrect passing port dict in pluggable IPAM * Per-branch HEAD files for conflict management * Replace internal oslo\_policy mock with public fixture * sub\_project\_guidelines.rst: Clarify stable branch creation for subprojects * Use a more pythonic string construction * Add ops tag to bugs policy * IPSet Manager: make code more pythonic * Imported Translations from Zanata * Remove deprecated nova\_\* options * Fixed a bunch of typos throughout Neutron * Decompose ML2 mechanism driver for Mellanox * Add text for deprecated parameter * Clarify with example mentioning gratuitous whitespace changes * Removes the use of mutables as default args * Decompose ML2 mechanism driver for OVSvApp * Fix usage of mutable object as default value * Make the Neutron Stadium documentation toplevel * Add notes about stable merge requirements for sub-projects * Fix incorrect capitilization of PyPI * Updated from global requirements * Log end of router updates for PD and delete branches * Don't update metadata\_proxy if metadata is not enabled * Imported Translations from Zanata * DHCP agent: log when reloading allocations for a new VM port * Update specs backlog directory * Log the exception in linuxbridge\_neutron\_agent as exception * Replace utils.exec for IpNeighComm LinuxBridge drv * Formatting exception messages * Optimize get\_bridge\_for\_tap\_device * Optimize interface\_exists\_on\_bridge * Correct indentation in linuxbridge\_neutron\_agent * Use oslo\_config new type PortOpt for port options * Updated from global requirements * DVR: only notify needed agents on new VM port creation * Ensure l3 agent receives notification about added router * Imported Translations from Zanata * Support migrating of legacy routers to HA and back * Use string formatting instead of string replace * Delete fipnamespace when external net removed on DVR * Better tolerate deleted OVS ports in OVS agent * Remove GBP as a Neutron sub-project * get\_device\_by\_ip: don't fail if device was deleted * Allow to specify branch for creating new migration * Mark for removal deadcode in neutron.common.utils * Adds base in-tree functional testing of the dhcp agent (OVS) * Fix \_restore\_local\_vlan\_map race * DVR: notify specific agent when deleting floating ip * Move test\_extend\_port\_dict\_no\_port\_security to where it belongs to * Fix the latest glitches that broke docs generation * Add effective note on DB exception to be aware of * Minor improvement in port\_bound operation * Introduce an API test for specified floating ip address * Clarify what gerrit repositories can target neutron-specs * Fix error code when L3 HA + DVR router is created or updated * Spawn dedicated rpc workers for state reports queue * Fix l2pop regression * Remove deprecated sriov agent\_required option * Remove deprecated namespace deletion options * Deepcopy port dict in dhcp rpc handler * Don't remove ip addresses if not master * Include alembic versions directory to the package * Fix formatting of hyperlinks provided in the office-hours doc * Remove IBM SDN-VE left-overs * Remove the port-forwarding sub-project from the list * Set security group provider rule for icmpv6 RA in DVR * Properly handle segmentation\_id in OVS agent * ovs: remove several unneeded object attributes from setup\_rpc() * Set ip\_nonlocal\_bind in namespace if it exists * Remove SUPPORTED\_AGENT\_TYPES for l2pop * DVR: Notify specific agent when update floatingip * Move some projects url from cgit/stackforge to cgit/openstack * Remove non-existent enable\_tunneling conf from fullstack * Update notes about the Neutron teams * Validate ethertype for icmp protocols * Refactor \_populate\_ports\_for\_subnets for testability * Split the FIP Namespace delete in L3 agent for DVR * Add stevedore aliases for interface\_driver configuration * Register oslo\_service.wsgi options correctly * ovs\_neutron\_agent: display loaded extensions * Improvements to the blueprint management process * Add a note to ban agents from connecting to the DB * Revert "DVR: Notify specific agent when update floatingip" * Imported Translations from Zanata * Fix DVR downgrade exception / error code * Fix AttributeError on port\_bound for missing ports * The exception type is wrong and makes the except block not work * Fix rendering * DVR: Notify specific agent when update floatingip * Do not try to delete a veth from a nonexistent namespace * Do not accept abbreviated CIDRs * Spelling and grammar corrections * Cross link sub-project release processes * Lower the log level for the message about concurrent port delete * Updated from global requirements * Update RFE documentation to clarify when the tag is not appropriate * Cache the ARP entries in L3 Agent for DVR * Revert "Make OVS interface name hashing algorithm common and extend it" * Enable specific extra\_dhcp\_opt to be left blank * Python 3: skip test\_json\_with\_utf8 on Py3 * test\_create\_network\_segment\_allocation\_fails: Assert the status * The first word of the error message should be capitalized * Create ipset set\_name\_exists() method * Add section for code review in effective neutron * Add -constraints sections for base CI jobs * Python 3: make post\_test\_hook work with more tox targets * Remove useless code in L3 HA unit tests * Move retries out of ML2 plugin * Include external bridge deprecation warning in string * Tweak RFE guidelines * Fix link in devref guide * Add ml2 extension drivers examples * Improve performance of ensure\_namespace * Kill conntrackd state on HA routers FIP disassociation 7.0.0 ----- * Mock oslo policy HTTPCheck instead of urllib * Avoid DuplicateOptError in functional tests * Make test\_server work with older versions of oslo.service * Always send status update for processed floating ips * Fix inconsistency in DHCPv6 hosts and options generation * L3 agent: use run\_immediately parameter to sync after start * test\_db\_base\_plugin\_v2: Skip a few tests on some platforms * Fix error returned when an HA router is updated to DVR * Remove disable\_service from DBs configuration * Replaced deprecated isotime() function * DVR: notify specific agent when creating floating ip * Fix the bug of "Spelling error of a word" * Fix iptables modules references in rule generation * Remove the embrane plugin * Fix functional test\_server tests * Add deadlock warning to 'effective neutron' * Quick optimization to avoid a query if no ports have fixed ips * Add OpenFixture and get rid of 'open' mocks * Use assertTrue(observed) instead of assertEqual(True, observed) * Imported Translations from Zanata * QoS SR-IOV: allow to reset vf rate when VF is assigend to VM * Add track\_quota\_usage conf into neutron.conf * Only lock in set\_members on mutating operations * Add pointers to access Neutron test coverage details * Consume ConfigurableMiddleware from oslo\_middleware * Remove excessive fallback iptables ACCEPT rules * Consume sslutils and wsgi modules from oslo.service * test\_create\_router\_gateway\_fails fixes * Code refactor for generating integer in testcase * Effective: avoid mocking open() if you can * Cleaned up remaining incorrect usage for LOG.exception * Remove usage of WritableLogger from oslo\_log * Fixed multiple py34 gate issues * Removed release\_port\_fixed\_ip dead code * Validate local\_ip for linuxbridge-agent * Removed neutronclient option from metadata agent * Adding headers to the devref docs 7.0.0.0rc2 ---------- * DHCP: protect against case when device name is None * Add testresources used by oslo.db fixture * Add the functional-py34 and dsvm-functional-py34 targets to tox.ini * Improvements to the RFE management process * Mock oslo policy HTTPCheck instead of urllib * Add py34 tags to the list of official tags * Updated from global requirements * Fix rule generation for single and all host rules * Fix iptables comments for bare jump rules * Add another patch scoping bullet point to effective\_neutron * Removed a pile of debtcollector removals from neutron.context * L3 Agent support for routers with HA and DVR * Python 3: add classifiers * Adding Effective tips for plugin development * Add networking-bgpvpn lieutenants * Update gate dashboard URLs * Add some test guidelines to 'effective neutron' * Fix capitalization nit in patch 230218 * DHCP: protect against case when device name is None * Execute ipset command using check\_exit\_code * Add note in database section of 'effective neutron' * Correct MAC representation to match iptables output * Add note about negative feedback to 'effective neutron' * Add a note about agent/server compat to 'effective neutron' * Add a patch scope section to 'effective neutron' * Add a logging guideline to 'effective neutron' * Fix missing parent start() call in RpcWorker * Remove OneConvergence plugin from the source tree * Use assertIsNone(observed) instead of assertEqual(None, observed) * Document self.assertEqual(expected, observed) pattern * Move gateway processing out of init\_router\_port * Use assertIn and assertNotIn * Deprecate max\_fixed\_ips\_per\_port * Don't register agents for QoS l2pop fullstack test * The option force\_metadata=True breaks the dhcp agent * Updated from global requirements * Do not log an error when deleting a linuxbridge does not exist * The option force\_metadata=True breaks the dhcp agent * Decomposition phase2 for MidoNet plugin * Updated from global requirements * Changes in Neutron defect management * Tag the alembic migration revisions for Liberty * /common/utils.py py34 incompatibility * Just call set-manager if connecting fails * Fixes 'ovs-agent cannot start on Windows because root\_helper opt is not found' * Use format to convert ints to strings * Fixes 'ovs-agent fails to start on Windows beacause of SIGHUP' * usage\_audit: Fix usage\_audit to work with ML2 * Pecan: Fix quota enforcement * metadata: don't crash proxy on non-unicode user data * Do not log an error when deleting a linuxbridge does not exist * /common/utils.py py34 incompatibility * Remove debtcollector.removals tagged ensure\_dir * Consume service plugins queues in RPC workers * Imported Translations from Zanata * Add more commit msg tips to 'effective neutron' * Remove local variables from IPDevice.exists * Add availability\_zone support base * Pecan: Fix quota enforcement * metadata: don't crash proxy on non-unicode user data * Add neutron-linuxbridge-cleanup util * Effective Neutron: add link to low-hanging-fruit bugs * Effective Neutron: add link to logging guidelines * Add IPDevice.exists() method * Simplify L3 HA scheduler tests * Python 3: fix invalid operation on dict\_items objects * Use format to convert ints to strings * Add periodic agents health check * Imported Translations from Zanata * Fix db error when running python34 Unit tests * Remove OpenContrail plugin from the source tree * Correct cisco\_ml2\_apic\_contracts.router\_id length * Remove is\_ha property from the router * Remove log decorator deprecated in Liberty * Deprecate branchless migration chains from neutron-db-manage * Support new mitaka directory with revisions * Fix the bug of "Error spelling of 'accomodate'" * Just call set-manager if connecting fails * Check idl.run() return value before blocking * Use separate queue for agent state reports * Remove remaining uses of load\_admin\_roles flag in tests * Make OVS interface name hashing algorithm common and extend it * Simplify extension processing * Fix URL target problem * Add devref for alembic milestone tagging * Add compatibility with iproute2 >= 4.0 * Tag the alembic migration revisions for Liberty * api test: Skip address-scope tests when the extension is not enabled * Check idl.run() return value before blocking * Check supported subnet CIDR * Remove zombie pecan hook * Adding trailing underscores to devref links * Python 3: use "open" instead of "file" * Imported Translations from Zanata * Handle empty bridge case in OVSBridge.get\_ports\_attributes * Devref for authorization policies enforcement * Fixing traces of "Replace prt variable by port" * Kill HEADS file 7.0.0.0rc1 ---------- * Don't write DHCP opts for SLAAC entries * Cleanup of Translations * Cleanup of Translations * Move ConfigDict and ConfigFileFixture to neutron.tests.common * Turn device not found errors in to exceptions * Fix quota usage tracker for security group rules * Update default branch in .gitreview to stable/liberty * SimpleInterfaceMonitor: get rid of self.data\_received flag * Fixes 'ovs-agent fails to start on Windows beacause of SIGHUP' * Forbid more than one branch point in alembic dependency chains * Fix quota usage tracker for security group rules * Fixes 'ovs-agent cannot start on Windows because root\_helper opt is not found' * Imported Translations from Zanata * Fix a few nits with the dashboard pages * Open Mitaka development * Fix the broken link in devref docs * Eliminate autoaddress check for DNS integration * Only get host data for floating ips on DVR routers * Add neutron subproject & stable branch gerrit review links * Link dashboards into generated documentation * Add neutron/master review link to dashboard/index.html * Create dashboard page with gate jobs statistics * ml2: don't consider drivers with no bind\_port for qos supported rule types * Adds configurable agent type * Imported Translations from Zanata * Updated from global requirements * Relax service module check on service providers * Get rid of ConfigParser code in ProviderConfiguration * Rename check pipeline dashboards * tests: don't validate respawn as part of ovsdb monitor functional test * ovsdb monitor: get rid of custom \_read\_stdout/\_read\_stderr methods * Change ignore-errors to ignore\_errors * Change router unbinding logic to be consistent with data model * delete\_port: ensure quota usage is marked as dirty * Fix hostname roaming for ml2 tunnel endpoints * Execute ipset command using check\_exit\_code * Refactoring devstack script * Fix adding tap failure if bridge mapping is not provided * SubnetPoolsTest: Skip IPv6 tests appropriately * Remove an invalid comment * Fixes SNAT port not found for internal port * Don't write DHCP opts for SLAAC entries * Simplify join to rbac\_entries for subnets * Update \_TestModelMigration * Add --verbose to subset of cmds in neutron-db-manage * Use pecan controllers for routing * test\_networks: Stop assuming net-mtu extension * Imported Translations from Zanata * Add skeleton to 'Effective Neutron' devref * Introduce kill\_signal parameter to AsynProcess.stop() * Remove early yields in \_iter\_hosts in dhcp agent * Optimize if statement in dvr\_local\_router.py * Re-adds VIF\_TYPE\_VHOST\_USER to portbindings extension * Introduce a separate RPC server * Fix log statement to log correct variable first\_ip * Remove pecan branch reference from .gitreview file * ipam: Prevent none from being passed to delete * Remove restriction of adding constraints to expand * Delete unused file tests/unit/database\_stubs.py * No network devices on network attached qos policies * Revert "Revert "Pecan WSGI: prevent plugins from opening AMQP connections"" * Use tempest-lib's token\_client * Revert "Pecan WSGI: prevent plugins from opening AMQP connections" * Add constraint target to tox.ini * Fix establishing UDP connection * ovsdb: Fix a few docstring * Remove requirements.txt for the ofagent mechanism driver * Always return iterables in L3 get\_candidates * Remove plural param to QUOTAS.count * Return version info on version controller * Log exception.msg before exception.message * Fix pecan policy enforcement for GET requests * Add missing resource discriminator in update resp * Fix missing check for admin/adv\_service * Clarify and add a TODO in the controller * Set expected HTTP codes for create and delete * Add basic bulk support to collection controller * Prevent full sync in dhcp\_agent when possible * Remove duplicated API server * Add QoS fullstack test * QoS agent extension and driver refactoring * Add IPv6 Address Resolution protection * Revert "AsyncProcess: try to kill tender" * Remove out-of-tree vendor AGENT\_TYPE\_\* constant * func: Don't use private method of AsyncProcess * Remove unused ovs\_lib method reset\_bridge * Fix TypeError caused by delete\_agent\_gateway\_port() * sub\_project\_guidelines: Add richer documentation * Fix typo: Large Ops, not Large Opts * Fix query in get\_l3\_agent\_with\_min\_routers * Do not specify host for l2population topics * Add utility function for checking trusted port * Fix typo in error message in NetcatTester * docstring fix * AsyncProcess: try to kill tender * Enable servicing lbaasV2 vip by DVR * Switch scheduler drivers to load based schedulers * Fix BadRequest error on add\_router\_interface for DVR * Fix missing value types for log message * Tweak test\_keepalived\_respawns test logic * Reservations: Don't count usage if resource is unlimited * Restore reservations in API controller * ovs: don't use ARP responder for IPv6 addresses * Install sriov-agent.ini on 'setup.py install' * Configure gw\_iface for RAs only in Master HA Router * Remove useless log from periodic\_sync\_routers\_task * Replace is\_this\_snat\_host validation with internal function * Revert "Remove address scopes from supported extensions" * Add l2pop support to full stack tests * Add tunneling support to full stack tests * Remove an unused DVR function * Handle ObjectDeletedError when deleting network ports/subnets * OVSAgentTestFramework: Remove \_bind\_ports * Descheduling DVR routers when ports are unbound from VM * Updated from global requirements * Reduce the chance of random check/gate test failures * Allow passing arbitrary ip route parameters to add/delete\_route * Make ip address optional to add\_route and delete\_route * Add list routes * Fix dvr update for subnet attach multi subnets * Make ip rule comparison more robust * Remove hack for discovery novaclients extension * Check ICMP codes in range [0,255] * Remove address scopes from supported extensions * Add test to check that correct functions is used in expand/contract * SR-IOV: devstack support for SR-IOV agent * Fix test\_external\_tables\_not\_changed * Delete gateway conntrack state when remove external gateway * Updated from global requirements * Add non-model index names to autogen exclude filters * Implement expand/contract autogenerate extension * Cleanup the fip agent gateway port delete routines * Add RPC command and delete if last FIP on Agent * Delete FIP agent gateway port with external gw port * Remove ebtables\_driver/manager dead code * Stop device\_owner from being set to 'network:\*' * Add oslo rootwrap daemon logging during functional tests * ovs agent resync may miss port remove event * tests: disable process monitor before managers * Retry metadata request on connection refused error * Add ability to use custom config in DHCP-agent * Improve DB operations for quota reservation * Qos SR-IOV: Refactor extension delete to get mac and pci slot * Adds support to provide the csum option for the OVS tunnels * Delete the useless variable agent\_host * Handle process disappearing before we ask for its PPID * Allow only GET on Root controller * OVS agent: handle deleted ports on each rpc\_loop iteration * Final decomposition of Cisco plugin * Remove Cisco Meta and N1KV monolithic plugins * Workaround test stream corruption issue * Fix RBAC filter query for negative case * Updated from global requirements * Remove \_extract\_roles method from neutron.policy * Fixed functional test that validates graceful ovs agent restart * \_bind\_devices query only existing ports * Stop logging deadlock tracebacks * Don't log exceptions in GW update on router create * Remove an unnecessary extension check for rbac * OVS agent: flush firewall rules for all deleted ports at once * Enable most unit tests for py34 job * Changed filter field to router\_id * Fix a wrong condition for the \_purge\_metering\_info function * Don't log deadlock or retry exceptions in L3 DB * Make sure service providers can be loaded correctly * sriov: update port state even if ip link fails * Retain logs for functional test cases 7.0.0.0b3 --------- * Don't setup ARP protection on OVS for network ports * Don't setup ARP protection on LB for network ports * Add support for PluginWorker and Process creation notification * Implement external physical bridge mapping in linuxbridge * Avoid DB errors when deleting network's ports and subnets * Better message on allowed address pairs error * Add info to debug test\_keepalived\_respawns gate failure * Enable to update external network subnet's gateway-ip * Make Neutron service flavor save service\_type * Add tenant\_id to flavor service profiles attributes * Remove implicit registration of \*-aas service providers * Rename 'newapi' to 'pecan\_wsgi' * Catch errors on 'port not found' while deleting subnet * Process user iptables rules before INVALID * OVS-agent: Introduce Ryu based OpenFlow implementation * Deprecate external\_network\_bridge option in L3 agent * Do not track active reservations * Deprecate --service option for neutron-db-manage * Add constraint target to tox.ini * DHCP agent: allow using gateway IPs instead of uniquely allocated * Resolve issue where router can't be removed from L3-agent in dvr mode * OVS agent add functional tests of OVS status * check\_changed\_vlans doesn't need registered\_ports as param * [rpc] pull: removed a hack to avoid object backport triggered * Enable py34 tests for pluggable ipam backend * test\_migrations: Remove unnecessary midonetclient mocks * Updated from global requirements * Fix import path in neutron-sanity-check for ml2\_sriov opts * Decentralize the managemement of service providers * Remove requirements.txt for decomposed plugins/drivers * Linuxbridge-agent: fix bridge deletion * Correct neutron-ns-metadata-proxy command when watch\_log is False * Split SR-IOV configuration file into driver and agent pieces * Python 3: use a hash to sort dictionaries * Implement TODO for version listing * Fix hooks for dealing with member actions * Fixed filters for functional tests * Fix usage of netaddr '.broadcast' * Add lieutenants contact for networking-calico * Adding networking-calico to sub\_projects document * Fix locale problem in execute() * Remove duplicated codes in two test cases * Fixes wrong neutron Hyper-V Agent name in constants * Updated from global requirements * Improve python code for missing suggestion * Fix misnomer on network attribute * Refactor IpRouteCommand to allow using it without a device * Revert "Add support for unaddressed port" * Improve logging upon failure in iptables functional tests * handle gw\_info outside of the db transaction on router creation * Remove ml2 resource extension success logging * Replace "prt" variable by "port" * Add optional file permission argument to replace\_file() * Fixed the typo in the doc string of the class SubnetPoolReader * Add flows to tunnel bridge with proper cookie * Add lieutenants contact for networking-onos * Adding networking-onos to sub\_projects document * Add policy and policy rule belongs check * Base on SqlTestCase to init db tables correctly * Stops patching an object method which could be gone at cleanup * Add enable\_new\_agents to neutron server * Document prefix delegation testing issues * Fix Prefix delegation router deletion key error * Add Geneve type driver support to ML2 * Fix DVR log strings in agent * devref: Add sub-project release notes * Process update\_network in the openvswitch agent * Removing the SDN-VE monolithic plugin * [neutron-db-manage] Introduce contract and expand commands * Fix DBDuplicateEntry when creating port with fixed\_ips on PD subnet * Update template for ModelMigrationSync test * Fix py34 No sql\_connection parameter is established error * Switch to using os-testr's copy of subunit2html * Add a functional test to validate dvr snat namespace * Add snat ports cache to dvr router * DHCP agent: add 'bridged' property to interface driver * SR-IOV: deprecate agent\_required option * SimpleInterfaceMonitor handle case when ofport is an empty set * Make delete-vlan-bridge and delete-vlan functions clear * Run py34 tests with testr * Use directly neutron.common.constants constants in l3\_dvr\_db * Make a couple of methods private * Add IPv6 Prefix Delegation compatibility to ipam\_pluggable\_backend * Validate router admin\_state\_up on upgrade to distributed * Fix AttributeError in \_clean\_updated\_sg\_member\_conntrack\_entries() * PLUMgrid plugin decomposition part II * Quota enforcement: remove locks on \_dirty\_tenants * L3 agent changes and reference implementation for IPv6 PD * Decomposition phase2 of NEC plugin * Allow py34 to run tests individually * Add dns\_label processing for Ports * Remove out-of-tree vendor VIF\_TYPE\_\* constants * Move in-tree vendor AGENT\_TYPE\_\* constants * devref: added guidelines on how to maintain sub-projects * Stop logging STDOUT and STDERR on every shell out * Defer freeing of conntrack zone ids until allocation fails * Update the URLs to the Cloud Admin Guide * Remove redundant logging statements from RootWrapDaemonHelper * Rationalize neutron logs to help in troubleshooting router issues * Move db agent schedulers test to a more appropriate place * OVS agent don't hard code tunnel bridge name * Make models\_v2 explicitly import rbac\_db\_models * Make NeutronDbObjectDuplicateEntry exception more verbose * Add empty policy rule to get\_rule\_type action * test\_ovs\_neutron\_agent: Fix test\_cleanup\_stale\_flows\_iter\_0 * Support dhcp metadata service for all networks * Move docstring to FakeMachineBase * Update rootwrap.conf to add /usr/local/sbin * Remove the ML2 Nuage driver code * Template for ModelMigrationTest for external repos * Only mark metadata packets on internal interfaces * Python 3: do not do "assertFalse(filter(...))" * ip\_lib: support creating Linux dummy interface * Graceful OVS restart for DVR * DHCP agent: clarify logic of setup\_dhcp\_port * Add config option to specify ovs datapath * Python 3: fix test\_ovs\_tunnel * Python 3: use \_\_code\_\_ instead of func\_code * IPv6 display suitable message when MTU is invalid on iface * Update oslo messaging configuration section for fullstack * Imported Translations from Transifex * QoS: fix get bandwidth limit rules to filter them per policy * Neutron RBAC API and network support * Fixed broken link in neutron-server's documents * Used namedtuple for ReservationInfo * Move in-tree vendor VIF\_TYPE\_\* constants * Remove VIF\_TYPES constant * Added initial devstack plugin * Fix qos api-tests after policy changes * fullstack: use migration scripts to create db schema * Only validate local\_ip if using tunneling * qos: Delete bw limit rule when policy is deleted * Do not query reservations table when counting resources * Add support for unaddressed port * Sync FK constraints in db models with migration scripts * Add EnvironmentDescription, pass it down * Dropped release name from migration branch labels * Split DRIVER\_TABLES in external.py * DVR: make sure snat portion is always scheduled when needed * neutron-db-manage: sync HEADS file with 'current' output * Fix \_ensure\_default\_security\_group logic * Add missing tenant\_id validation in RESOURCE\_ATTRIBUTE\_MAP * Graceful ovs-agent restart * l2pop: check port mac in pre-commit to stop change * Adding Ale Omniswitch to sub\_projects document * Add high-level functional/integration DVR tests * Add a fullstack fake VM, basic connectivity test * Final decomposition of ML2 Cisco UCSM driver * Fix query in get\_reservations\_for\_resources * Move tests for non pluggable ipam backend * fullstack: Skip NotFound in safe\_client cleanup * Fix tenant access to qos policies * Rename args for alembic 0.8.0 * Update sub projects git urls * Stop using quota reservations on base controller * Final decomposition of ML2 Nexus Driver * manual add/remove router for dvr\_snat agent * DVR: fix router rescheduling on agent side * Python 3: fix test\_utils * lb: stop handling Havana device updates * quota: synchronize resync and count with other dirty\_tenants code * Add logging to debug oslo.messaging failure * Setup firewall filters only for required ports * Updated from global requirements * Quota enforcement: python3 compatibility * Devref for quotas * Reservations support * Fix .gitreview to not point at a branch * Don't fatal error during initialization for missing service providers * NSX: Move DB models as part of core vendor decomposition * doc: Improve table rendering using multi-row cells * Rename function '\_update\_port\_down' * Redundant tests removed from ovs-lib unit tests: * Add network to SubnetContext * Unskip firewall test * NSX plugin: Moving away plugin extensions * Get rid of exception converter in db/api.py * Python 3: encode or decode i/o data of Popen.communicate() * Updated from global requirements * Use a conntrack zone per port in OVS * Fix some issues around tempest in fullstack testing doc * Add lieutenants contact for kuryr * Add dashboard folder and graphite dashboard to doc * lieutenants: Add Neutron infra lieutenants * DVR: do not reschedule router for down agents on compute nodes * Replace internal calls of create\_{network, subnet, port} * ml2: Remove a redundant assignment in \_bind\_port\_level * ml2: \_commit\_port\_binding: Don't use None to mean False * Minor typo fix * l3: not use L2 plugin \_get\_subnet unnecessarily * l3\_db: not use L2 plugin \_get\_port unnecessarily * Break down \_bind\_port\_if\_needed in ML2 * Pecan WSGI: prevent plugins from opening AMQP connections * Remove 'action' argument from \_handle\_fip\_nat\_rules() * Remove vmware plugin from neutron (etc part) * Setup reference service providers for API test runs * [neutron-db-manage] check\_migration: validate labels * Python 3: fix neutron.tests.unit.api.test\_extensions * Add configurable options for HA networks * Add test that checks external tables are not changed * [neutron-db-manage] remove old HEAD file when updating for branches * Remove unneeded shebangs * Python 3: hmac requires bytes key/msg * Python 3: encode unicode response bodies * Support for independent alembic branches in sub-projects * Remove bigswitch mech\_driver entry point definition * Updated from global requirements * Python 3: specify a bytes to an argument for a format type 's' of struct.pack() * Preserve DVR FIP rule priority over Agent restarts * Treat sphinx warnings as errors * Distributed router can not add routes * Update fullstack multinode simulation image * Fix docs job * Improve callback registry devref documentation and usability * Final decomposition of the ML2 NCS driver * Fix update\_subnet for prefix delegation * The unnecessary value "sgids" was deleted * Fix DVR interface delete by port when gateway is set * Skip FwaaS test that is failing due to race condition * Destroy ipset when the corresponding rule is removed * Python 3: compare response.body to bytes in namespace\_proxy test * Forbid attaching rules if policy isn't accessible * DVR: fix router rescheduling on server side * Fix the low level OVS driver to really do egress * SR-IOV: Add Agent QoS driver to support bandwidth limit * Pass the extension driver exception to plugin * Update documentation acording to last QoS/OvS changes * OVS agent functional test for policy rule delete * Add Kuryr to sub\_projects.rst * Clean up test\_dvr\_router\_rem\_fips\_on\_restarted\_agent * Fix \_update\_subnet\_allocation\_pools returning empty list * devref: update quality\_of\_service * Replace 'import json' with oslo\_serialization * SR-IOV: Convert max rate from kbps to Mbps * Add testing coverage .rst, missing test infrastructure to-dos * Python 3: encode unicode response bodies * Update port functional tests for qos agent * Neutron-Ironic integration patch * DVR: fix router scheduling * TESTING.rst love * Removed configuration option for qos agent driver selection * Add delete\_port api to agent extension manager * Functional test for QoS policy bandwidth rule update * Support delegation of bind\_port to networking-odl backend driver * Use oslo.log library instead of system logging module * resources\_rpc: fixed singleton behavior for ResourcesPullRpcApi * Add thread locks on port routines for qos ext * Avoid dhcp\_release for ipv6 addresses * SR-IOV: fixed singletion behavior for ESwitchManager * Validate local\_ip for OVS tunnel * Imported Translations from Transifex * db\_base\_plugin\_v2: Avoid creating another session * Consistent layout and headings for devref * Use DeferredOVSBridge in setup\_default\_table * Fix get\_objects to allow filtering * QoS core extension: fixed dict extension when QoS policy is unset * OVS agent QoS extension functional test for bandwidth limit rules * Propagate notifications to agent consumers callbacks * Add rpc agent api and callbacks to resources\_rpc * neutron.api.rpc.callbacks interface rework * Moved l2/agent\_extensions\_manager into l2/extensions/manager.py * Moved extensions/qos\_agent.py into extensions/qos.py * Introduce base interface for core resource extensions * Do not delete fip namespace during l3 dvr agent resync * Introduce ItemAllocator class * Validate updated allocation pool before using it * Remove quotes from subshell call in tools/split.sh * Don't claim Linux Bridge ml2 driver supports bandwidth limit QoS rules * Clean up QoS rules first, then QoS policies * Pass the extension driver exception to plugin * Remove a few obsolete options from midonet.ini example * Rename a test method in test\_policy.py * Revert "Add extension callbacks support for networks" * Updated quality\_of\_service devref doc to reflect reality * Broadcast service port's arp in DVR * usage\_audit: Fix usage\_audit to work with ML2 * Revert "Remove VPN from API tests" * Enable VPN plugin for API test * Validate interface\_mappings on Linux bridge init * Initialize ancillary\_port\_info dict as blank in OVS agent * Enable fullstack multinode tests, add L3 HA test exemplar * SR-IOV: Update eswitch manager to support rate * Follow up with some cleanup for agent qos\_driver * Gracefully handle duplicate rule creation * Fix: Skip rescheduling networks if no DHCP agents available * DB, IPAM & RPC changes for IPv6 Prefix Delegation * Python 3: convert dict\_keys object to list * Python 3: do not compare int and NoneType * Remove VPN from API tests * Fix typos in neutron code * "FakeV4Subnet" class be inherited by following class * Update OVS driver to work with objects * Python 3: fix test\_ovs\_tunnel * \_get\_dvr\_sync\_data: Return a list, rather than dict\_values for python3 * Fixing ICMP type and code validation * Support subnetpool association to an address scope * Add API tests for non-accessible policies * Gracefully handle fetching nonexistent rule * use single transaction to update qos policy associatation * Replaces reduce with six.moves.reduce for py 2/3 compatibility * Add oslo db retry decorator to the RPC handlers * Python 3: Fix test\_security\_groups\_db * Replace to\_dict() calls with a function decorator * Add DNS and DHCP log into dhcp agent * Install arp spoofing protection flow after setting port tag * Move 1c844d1677f7 expand migration to appropriate branch * Fix ipset can't be destroyed when last rule is deleted * Guarantee there is only one bandwidth limit rule per policy * Cleaned up some TODO comments for feature/qos that do not apply anymore * L2 agent extension manager: read extensions list from config file * objects.qos.policy: forbid deletion when attached to a port or a network * Remove handle\_network/handle\_subnet from l2 agent extensions * Move away nested transaction from \_ensure\_default\_security\_group * Moved QOS\_POLICY\_ID into qos\_consts.py * Introduce get\_ports\_attributes in OVSBridge * Added missing [qos] section into neutron.conf * Enable rule delete test * objects: consolidate single transaction checks into test\_base * objects.qos.policy: provide rules field, not type specific * Unite qos\_rules and qos\_\*\_rules tables * Switch controller to actually call the plugins * Add extensions listing to the controller * Add placeholder for notifier hook * Add hook for policy enforcement * Add quota enforcement hook * Add ownership validation hook * Add attribute population hook * Add resource/plugin identification hook * Add hook to create a context from the headers * Add hook to translate exceptions into HTTP codes * Add startup hook after pecan init for plugins * Add keystone middleware wrapper to pecan app * Fix accessing shared policies, add assoc tests * qos: forbid creating rules when there is no access to policy * Initial pecan structure * Remove unnecessary executable permission * NSX: Rename default\_interface\_name option * Arista Drivers decomposition part II * Python 3: pass bytes to base64.encode{string,bytes} * Python3: pass bytes to binascii.crc32 * Fix order of calls in update\_port * Check that VXLAN is not in use in LB VXLAN check * Initialize port\_info dict as blank in OVS agent * Ensure non-overlapping cidrs in subnetpools with galera * SR-IOV: update pci lib to support rate limit * SR-IOV: Fix SR-IOV agent to run ip link commands as root * QosPolicy: made shared field required and with default value = False * Python 3: Use '//' instead of '/' * Prevent update alloc pool over existing gateway ip * Moved out cisco n1kv mech driver and db models * Updated from global requirements 7.0.0.0b2 --------- * sriov: implement spoofchecking configuration * [qos] ovs: removed TODO for getting integration bridge from arguments * Fixes a typo phys\_brs in place of phys\_br * Update dhcp agent cache for network:dhcp ports * Keep dns nameserver order consistency * Extend vxlan\_group option to allow a range of group addresses * Load the QoS notification driver from the configuration file * Add pluggable backend driver for QoS Service notification * Enable resource usage tracking for reference plugins * Add plural names for quota resources * Introduce usage data tracking for Neutron * Create packages for quota modules * Python 3: fix test\_attributes * Add FUJITSU vendor plugin in sub\_projects * Python 3: fix test\_dhcp * test\_db\_base\_plugin\_v2: Improve DBReferenceError generation * Fix a microsecond format of isoformat() * Add update tests for policies and rules * Updated from global requirements * Python 3: fix test\_context * Fix KeyError: 'L3\_ROUTER\_NAT' in l3 scheduler functional test * Introduce mechanism to determine supported qos rule types for a plugin * Cleanup IPAM tests * get\_info: request object backport only if desired version is different * rpc.callbacks.registry: validate that callback provider is registered * rpc.callbacks.registry: validate type of callback result * Add UT for agent\_extensions\_manager * Don't set tenant\_id for rule objects * Fix dhcp autoschedule test assertion logic * Fix inconsistency of if/return logic in attributes.py * Imported Translations from Transifex * [neutron-db-manage] revision: properly bootstrap a new branch * Add DB support for resource usage tracking * QoS: Remove type attribute from QoS rules * Don't enforce qos ml2 extension driver * ml2: added qos\_profile\_id to get\_device\_details payload * Add versioned object serialize/deserialize for resources RPC * policy: made attach\_\* and detach\_\* methods more robust * Decompose Apic ML2 mechanism driver * Remove duplicate DHCP agent registration in unit test * Python 3: do not index dict\_values objects * L2 agent RPC add new RPC calls * Add Cathy Zhang as networking-sfc Lieutenant * Add error message when migrate from distributed router to centralized * Avoid printing log options multiple times * Support qos rules and fields parameters in GET requests * Pass context when deleting bandwidth limit rule * Add Pluggable IPAM Backend Part 2 * Create fip on subnet id * Python 3: fix neutron.tests.unit.agent.dhcp.test\_agent * Updated from global requirements * Update port bindings for master router * [qos] cleanup \_find\_object from neutron.db.api * Revert "Mute neutron.callbacks notification logs." * qos: kill get\_namespace() from service plugin * Base infrastructure for QoS API tests * Metaplugin removal * Remove line number of link and useless link * Disable port creation when invalid MAC address is provided * Fix handling of port-range-min 0 in secgroup RPC and agent * Fix a property comment in metadata\_agent files * Add address scope API tests * Python 3: enable more tests * Add new ovs DB API to inquire interfaces name list in a bridge * Tweak wording for project inclusion process * Define fullstack router/network/subnet management fixture * Fix race condition by using lock on enable\_radvd * Fix note in devref/contribute.rst * ensure\_dir: move under neutron.common.utils * Add conntrack-tool to manage security groups * Adding a cleanup for 'qlbaas-' namespaces in netns\_cleanup * Bug-Fix for unexpected DHCP agent redundant * Remove deprecated OVS and LB plugin DB tables * ovs\_lib: Fix native implementation of db\_list * Stop use of oslo\_utils.timeutils.strtime() * Fix gateway port could not retrieve for subnet * Port help text for dvr\_base\_mac from neutron.conf * Add documentation for SRIOV NIC agent (previously missing) * Python 3: fix neutron.tests.unit.agent.linux.test\_async\_process * Adds garp\_master\_repeat and garp\_master\_refresh to keepalived.conf * Added functional tests for L3 schedulers * Always use BridgeDevice to manage linuxbridges * Update OVS Agent to work with Agent Extension Mgr * Instantiate qos agent driver * objects.rule: enable database tests for QosRule * Handle qos\_policy on network/port create/update * Updated from global requirements * Validate that context exists * neutron-db-manage: fix check\_migration for branch-less migration directories * Use only the lower 16 bits of iptables mark for marking * Python 3: fix test\_provider\_configuration * Add address\_scope\_db to neutron/models/head.py * OVS agent factor our port stats processing * Python3: Do not compare NoneType and integers * Use oslo\_log.helpers.log\_method\_call * Unplug the VIF if dhcp port is deleted * Python 3: Wrap map() in a list call * Devref documentation for client command extension support * Alter unit test to match bug and cleanup ext logic * Allow overriding of the neutron endpoint URL in metadata agent * Allow passing table argument to construct IpRouteCommand * Make external\_gateway\_nat\_rules easier to understand * Remove perform\_snat\_action indirection * Flavor Framework implementation * Add breakages in public API of devref * objects.qos.policy: support per type rule lists as synthetic fields * Network RBAC DB setup and legacy migration * [devref] db\_layer: expand on how new migration scripts look like * Add oslo db retry decorator to non-CRUD actions * QoS Service devref * Implement QoS plugin * Add oslo db retry decorator to non-CRUD actions * Change prefix for namespace fixture * Imported Translations from Transifex * OVS-agent: Fix a docstring typo * Python 3: do not use types.ClassType * Create dvr base class and stop passing around snat\_ports * Add qos section to ovs agent config * Mute neutron.callbacks notification logs * Small fixes in test\_qos\_agent UT * Add unit tests and fixes for OVS Agent QoS Extension Driver * Correct two spelling mistakes in Neutron devrefs * Improve check\_migration command error message * Avoid using logging in signal handler * Galera multi-writers compliant sync\_allocations * Fix SR-IOV mechanism driver tests directory * Switch to the oslo\_utils.fileutils * Fix a regression in a recent IPAM change * Fix update\_port\_postcommit and port not found with DVR * Tighten exception handler for import\_object * Updated from global requirements * bugs: Update info about current bug czar * Add another Lieutenant contact for Dragonflow * [neutron-db-manage] support separate migration branches * Add OVS QoS extension agent driver * Disable python3 tests failing due to Routes < 2.0 * Fix typo of 'receive' in test\_dhcp\_ipv6.py * Fix typo 'adress' * Add sub-project lieutenant for networking-midonet * Lower log level for extending network/subnet/port * Cleanup unused method get\_plugin\_version * Remove db-access semaphore in ML2 * Moving out cisco n1kv extensions * Remove self.snat\_ports, a dvr thing, from router base class * Include comment in DHCP ip6tables rules * Qos Agent Extension * Fixed L3 agent manual scheduling for HA routers * Ensure floating IPs only use IPv4 addresses * Implement QoS policy detach from port and network * Add API stub for QoS support rule\_type resource * Lower log level of errors caused by user requests to INFO * Reject router-interface-add with a port which doesn't have any addresses * Fix bug that resources in attr\_map may point to same object * Updated sub\_projects.rst for networking-vsphere * Imported Translations from Transifex * Enforce specific order for firewall.(un)filtered\_ports and devices * objects.base: fixed object.delete() * objects.qos.policy: fixed get\_\*\_policy and attach\_\* methods * objects.base: reset changes after getting objects from database * BaseObjectTestCase: rename test\_class into \_test\_class * Cleanup rule models and objects * objects.qos: fixed create and update for QosBandwidthLimitRule * Use \_is\_this\_snat\_host and remove \_get\_gw\_port\_host * Move more snat code to dvr class that does snat * Add constants for vhost-user vif * get\_vif\_ports: ignore non-Interface ports * Add Pluggable IPAM Backend Part 1 * Fix duplicate entry catch for allowed address pairs * Fix failures introduced by the new version of mock * Arista ML2 driver should ignore non-vlan networks * Ensure that update\_fip\_statuses gets called * Make IPAM more pythonic * Move DVR related method to proper class * Introduce connection testers module * Allow IPAM backend switch * Correct fcntl.flock use in Pidfile.unlock * Move update\_security\_group\_on\_port to SecurityGroupDbMixin * Python 3: Fix a TypeError in policy.py * In Arista ML2 driver Reconfigure VLAN on VM migration * Add sub-project lieutenant for networking-plumgrid * Fix issues with allocation pool generation for ::/64 cidr * Add extra subnet route to ha router * Remove lingering traces of q\_ * Make sure path\_prefix is set during unit tests * Add IP\_ANY dict to ease choosing between IPv4 and IPv6 "any" address * Python3: cast the result of zip() to list * Track allocation\_pools in SubnetRequest * Add ARP spoofing protection for LinuxBridge agent * COMMON\_PREFIXES cleanup - patch 5/5 * List up necessary files for thirdparty-ci.rst * Refactor init\_l3 to separate router port use case * Devref for out-of-tree plugin/driver contribution * Python3: do not add dict\_values objects * portsecurity\_db\_common: Access db columns in a consistent way * Python 3: do not index dict\_keys objects * Remove unneeded OS\_TEST\_DBAPI\_ADMIN\_CONNECTION * Update DVR agent to use get\_vifs\_by\_id * DVR: cleanup stale floating ip namespaces * COMMON\_PREFIXES cleanup - patch 1/5 * Fall back on empty path if prefix is missing * Refactor IpRuleCommand to take more arguments * objects.qos: added unit tests for QosPolicy neutron object * objects.base: avoid db access if object does not have changes * Start documenting potential API breakages in devref:neutron\_api * QoS extension fixes * Install more-specific ICMPv6 rule in DVR routers * devref: document API status for neutron.openstack.common.\* * Python3: do not use urllib.urlencode * AgentExtensionsManager and AgentCoreResourceExtension * Generic Resources RPC * DVR: remove unused method * Generic rpc callback mechanism which could be reused * Update dhcp host portbinding on failover * OVS native DBListcommand if\_exists support * Introduce the AFTER\_READ callback for ports and networks * Collapse create\_subnet into single method * Downgrade log level for gone port on status update * Add extension callbacks support for networks * [qos] policy: add methods to interact with policy bindings * Support Basic Address Scope CRUD as extensions * First QoS versioned objects, ever * Add bandwidth\_limit rule type constant * Use EXT\_TO\_SERVICE\_MAPPING instead of ALLOWED\_SERVICES * Change the half of the bridge name used for ports * Fix log traces induced by retry decorator * Remove unused linux bridge agent configuration options * Add bandwidth\_limit rules as sub-collection of qos policy * QoS: db models and migration rules * Add Create/Destroy API to OVS QoS BW Limiting * Fixing indentation and typo in comments * docs: link quality of service doc stub to devref index * Update PLUMgrid plugin information * Improve fixture usage * Move pylint dep from tox.ini to test-requirements * Disable pylint job * Remove bridge cleanup call * Move windows requirements to requirements.txt * Adds base in-tree functional testing of the ovs\_neutron\_agent * fix spelling mistakes * Register extraroute extension * Increase ping count on ARP spoof test * Read vif port information in bulk * Do not mock arping in L3AgentTestFramework functional tests * Fix Consolidate sriov agent and driver code * Remove failing SafeFixture tests * QoS service plugin stub * Create the QoS API extension stub * Switch to oslo.service * Revert "Removed test\_lib module" * Don't access mock's attribute directly especially when it's not needed * Fix subnet updating failure on valid allocation pools * Add documentation for Linux Bridge (previously missing) * Add parent\_id to \_item calling from \_handle\_action * Add logging of agent heartbeats * populate port security default into network * Revert "Fix 'router\_gateway' port status can't be updated" * RootHelperProcess: kill can consume signal number * Move NetcatTester to common/net\_helpers * Make '\_create\_router' function handle Boolean kwargs correctly * ip\_lib: Add flush() command to IpNeigh to clean arp cache * Refactor NetcatTester class * Use REST rather than ReST * lb-agent: handle security group updates in main loop * Add a double-mock guard to the base test case * Remove duplicated mock patch of ip\_lib * Consolidate sriov agent and driver code * Restructure agent code in preparation for decomp * Fix ip\_lib get\_gateway for default gateway on an iface * fixing typo in gerrit query link in third party policies doc * Use last address in v6 allocation pool generation * Extend SubnetRequestFactory to access subnet dict * Remove duplicated call to setup\_coreplugin * Remove double mock of dhcp agent periodic check * Remove double fanout mock * Remove double callback manager mocks * Remove ensure\_dirs double-patch * Decompose \_save\_subnet * Fix tenant-id in Arista ML2 driver to support HA router * Log OVS agent configuration mismatch * Avoid env variable duplication in tox.ini * Skip ARP protection if 0.0.0.0/0 in addr pairs * linuxbridge: clean up README file * Fix tox errors in thirdparty-ci docs * Removed test\_lib module * Updated from global requirements * Define SafeFixture base fixture * Remove quantum untracked files from .gitignore * Context class should initialise its own data * Abstract sync\_allocations * ovsdb: attempt to enable connection\_uri for native impl on startup * Just use {0,1,2} rather sys.std\*.fileno() * Make Daemon pidfile arg optional * Different approach to indicate failure on SystemExit * Move third-party CI policy under docs/policies * Remove lbaas API tests, which are now in the lbaas repo 7.0.0.0b1 --------- * Only create one netaddr.IPNetwork object * Provide work around for 0.0.0.0/0 ::/0 for ipset * Fix >80 char lines that pep8 failed to detect * Deprecate "router\_delete\_namespaces" and "dhcp\_delete\_namespaces" * Make DHCPv6 out of bounds API test deterministic * Don't process network\_delete events on OVS agent * dhcp fails if extra\_dhcp\_opts for stateless subnet enabled * Revert "Fix subnet creation failure on IPv6 valid gateway" * Support oslo\_db 1.12 * Python 3: do not use itertools.izip * Override opportunistic database tests to PyMySQL * Extend default setenv instead of replacing it in tox.ini * Fix FloatingIP Namespace creation in DVR for Late Binding * Cleanup get\_plugin\_name() from the tree * Bulk move methods to ipam\_backend\_mixin.py * NSXv: update ini file to support dhcp\_lease\_time * Use sets to calculate added/original/removed ips * Add IPset cleanup script * Optimize ipset usage in IptablesFirewallDriver * Python3: do not set Request.body to a text string * Prepare for full stack CI job * Fix callback registry notification for security group rule * Python3: do not use \_\_builtin\_\_ * Ease debugging alembic by passing proper scripts path in alembic.ini * Use string exception casting everywhere * l3 agent: do router cleanup for unknown routers * Switch to oslo\_utils.uuidutils * Fix subnet creation failure on IPv6 valid gateway * Decompose \_create\_subnet\_from\_pool * Move \_delete\_port * Decompose create\_port and save\_subnet * Retry port status update on StaleDataError * Allow setting Agents description to None * Fix RPC version to be a string * Decompose DVR CSNAT L3 Agent from Compute Node L3 Agent * cleanup openstack-common.conf and sync updated files * Fix l3 agent to not create already deleted router * Python3: do not use '+' on dict\_items objects * Disable keepalived process in keepalived func test * Python3: do not use im\_self/im\_func/func\_closure * Add request factory for pluggable IPAM * Python3: use dict.keys() instead of dict.iterkeys() * NSX QoS ext: RXTX factor can be decimal * Move \_add\_auto\_addrs\_on\_network\_ports * DHCP agent: Set an "ipxe" tag to work with Ironic * Add sanity\_check for keepalived ipv6 support * Remove \_check\_ip\_in\_allocation\_pool * Precision networking-bagpipe-l2 subproject * Don't delete DVR namespace if there are still ports on this node * Updated from global requirements * Fixed the only sphinx warning in docs * Fix SR-IOV mech driver to set port status to down when agent is required * read\_hosts\_file\_leases shouldn't parse stateless IPv6 * Fix 'router\_gateway' port status can't be updated * Update version for Liberty 7.0.0a0 ------- * Add networking-sfc to the list of affiliated Neutron projects * Minor improvements to sub\_projects document * Python 3: do not use cmp(), nor sorted(..., cmp=...) * Move get\_inteface\_by\_ip from LinuxBridge class to ip\_lib * Add policy files specific to NSX plugins * Fix cisco\_csr\_identifier\_map.ipsec\_site\_conn\_id * fix rootwrap debug filter for ping all * Refactor rpc\_loop() in ovs neutron agent * Add deadlock retry to API and ML2 RPC port update * ovsdb: session.rpc never initialized * Remove duplicated debug logging around locking * Refactor scan\_ports() and update\_ancillary\_ports() in OVS Neutron Agent * Python3: do not change the size of a dict while iterating over it * Refactor TestRpcWorker and TestWorkerService * Juno\_initial migration * docs: added job to well known tox envlist * API Extensions: inherit from the ExtensionDescriptor * Remove fossilized remains * Refactor update\_port in db\_base\_plugin\_v2 * Refactor \_update\_ips\_for\_port * Python 3: use dict.values instead of dict.itervalues * Put output of docs job into doc/build/html * Remove get\_namespace from API extensions * Ensure no "db" related functional/fullstack tests are skipped in the gate * Use PyMySQL in MySQL related functional/fullstack tests * Skip rescheduling networks if no DHCP agents available * Reflect project moves from stackforge to openstack * VMWare NSXv: Add distributed URL locking to ini * Revert "Revert "Add VIF\_DELETED notification event to Nova"" * Decompose db\_base\_plugin\_v2.py with changes * Remove duplicate tunnel id check in sync\_allocations * Remove meaningless no\_delete from L3 test * Revert "Revert "Set default of api\_workers to number of CPUs"" * OVSNeutronAgent pass the config as parameter * Refactor \_update\_subnet\_allocation\_pools * Stop sending gratuitous arp when ip version is 6 * Set .gitreview defaultbranch to feature/pecan * Fix Enum usage in 589f9237ca0e\_cisco\_n1kv\_ml2\_driver\_tables * Imported Translations from Transifex * power grab * Change ensure\_dir to not check directory exists first * Document existence of br-tun and br-int in the OVS agent * Correct indentation in neutron.api.v2.attributes * Python3: replace 'unicode' with 'six.text\_type' * Fullstack testing devref follow up * Moving out the cisco n1kv section to stackforge * Ensure no "agent" functional tests are skipped in the gate * Remove useless pass from methods in type\_tunnel.py * Make Vlantransparent extension inherit from ExtensionDescriptor * Actually allow to pass TRACE\_FAILONLY to ostestr * Switch to os-testr to control testr * Introduce functions using arping executable * Revert "Defer segment lookup in NetworkContext object" * Added networking-plumgrid in plugin requirements * Switch from MySQL-python to PyMySQL * Context: Remove logic for read\_deleted and deprecate it * Python 3: use next() instead of iterator.next() * Consume oslo.policy * policy: cleanup deprecation code to handle old extension:xxx rules * Fix a regression in "Separate ovs-ofctl using code as a driver" change * Break Pinger class to functions * Handle SIGHUP: neutron-server (multiprocess) and metadata agent * Allow update\_port\_status to take network param * Make pep8 job succeed when /etc/neutron/neutron.conf is not installed * Add a comment on \_check\_update\_has\_security\_groups * Change defaultbranch in .gitreview * Enable all deprecation warnings for test runs * Remove get\_admin\_roles and associated logic * Add documentations for VXLAN Tunnels * Defer segment lookup in NetworkContext object * Fix typos in docs * Fixes bulk insertion of data to ml2\_port\_binding * Add Neutron PTL Office Hours * Python3: Enable all working tests in tox.ini * Add get\_events to OVSDB monitor * Update ipset members when corresponding sg member is empty * Send 'security\_groups\_member\_updated' when port changes * Remove full stack log noise * ML2: Remove TYPE\_MULTI\_SEGMENT * L3 agent should do report state before full sync at start * Clean only floating-ip related connection states * Refactor awkward logic in setup\_dhcp\_port * Add a "light" base test class for DB tests * Make \_val\_to\_py and \_py\_to\_val not private * Decompose db\_base\_plugin\_v2.py part 2 * Fix typo in test class name * Start linuxbridge neutron agent using a launcher * Handle SIGHUP in ovs neutron agent * test\_ovs\_neutron\_agent: Remove useless ofport=10 arguments * test\_l3: Don't assume the order of subnets * Python 3: do not index a dict\_values object * versionutils: switch from incubator version to oslo.log * Run RootHelperProcess always as root * Changes in rally-jobs/README.rst * Add more API tests for port-security extension: * Decompose the NCS ML2 Mechanism Driver * test\_db\_base\_plugin\_v2: Don't assume the order of fixed\_ips * pylint: enable \`duplicate-key\` check * Remove reference to non-existent fullstack fixture * Enhance utils.ensure\_dir to be resilient to concurrent workers * Use a single method to remove an address with its conntrack state * Decompose db\_base\_plugin\_v2.py * Add sub-project lieutenants * Fix confusing parameters names * Extra indent in test\_ovs\_neutron\_agent * Make MockFixedIntervalLoopingCall class as a helper class * Revert "Add VIF\_DELETED notification event to Nova" * Wrap ML2 delete\_port with db retry decorator * Remove extra indent in testcases * Check for 'removed' in port\_info before reference * Catch broad exception in methods used in FixedIntervalLoopingCall * Add devref that explains fullstack testing and its direction * Remove get\_dhcp\_port RPC method * Refactor type\_tunnel/gre/vxlan to reduce duplicate code * Imported Translations from Transifex * Update rootwrap.conf to add /usr/local/bin * Add route to metadata IP by default * Python3: use six.iteritems() instead of dict.iteritems() * Modify ipset functional tests to pass on older machines * Add a non-mixin function for model queries * Implement IPAM Driver loader * Remove comment about hash seed in tox.ini * Refactor mlnx mechanism driver to support infiniband only * Remove unused \_uuid function alias from test\_iptables.py * test\_ovs\_neutron\_agent: Remove unnecessary mocking * Refactor type\_gre.vxlan tests to reduce duplicate code * Removed duplicate keys in dicts in test * Don't update floating IP status if no change * Don't delete port from bridge on delete\_port event * Enable random hash seeds * Fix formatting of core-reviewers doc * Get completely rid of contextlib.nested * Fix indentation errors in tests * Improve test\_set\_members\_deleting\_less\_than\_5 * Rename test\_periodoc\_resync\_helper to test\_periodic\_resync\_helper * Sort \_get\_new/deleted\_set\_ips responses in unittests * Ensure netfilter is enabled for bridges * Cleanup stale metadata processes on l3 agent sync * Imported Translations from Transifex * Fix ovs agent restore local\_vlan\_map failed * Use correct time delta function * Do not assume order of security group rules * ML2: Incorrect commented cisco mechanism driver name * py34: don't run any tests except unit tests * Move full-stack logs post-tests * Fix PYTHONHASHSEED bugs in test\_security\_groups\_rpc * Addressing follow up comments for OVS\_LIB fail\_mode setting API * Move pool dispose() before os.fork * Add RFE submission guidelines * Switch to dictionary for iptables find * Process port IP requests before subnet requests * Remove time formatting in agent clock error * Persist DHCP leases to a local database * Flesh out the new RFE process and set deadlines for it's use * Do not assume order of dictionary elements in init\_l3 * Introduce the Lieutenant system into Neutron * Isolate use of fixed\_ips[0] to avoid confusion * Use the correct name for the "Repository Creator's Guide" * Do not assume order of convert\_kvp\_list\_to\_dict method responses * Do not assume order of iptables\_firewall method responses * Do not assume order of get\_sync\_data\_metering response elements * OVS-agent: Remove optional flags from br\_tun.deferred() method * OVS\_LIB support API for setting fail mode 'standalone' * Remove hack for sending gratuitous arp from fip ns * Force order of dhcp.needs\_resync\_reasons dictionary elements * Remove use of contextlib.nested (api-tests) * Use os.\_exit after forking * test\_fork\_error: Fix incorrect test mock * Skip external tables for neutron-db-manage --autogenerate * Fix a typo in \_schedule\_network method * Ensure non-overlapping cidrs in subnetpools without galera * Add callback prior to deleting a subnet * OVS-agent: Separate ovs-ofctl using code as a driver * Imported Translations from Transifex * Remove unnecessary brackets * Ensure mac address added to iptables is always in unix format * Remove use of contextlib.nested * Adding loadbalanacerv2 device owner constant to neutron constants * Python 3: use six.string\_types instead of basestring * Fix minor errors in the Vyatta L3 Plugin: * Remove middleware oslo-incubator module * Match order of iptables arguments to iptables-save * fix DHCP port changed when dhcp-agent restart * VMware NSXV: update configuration file * IPAM reference driver * Python 3: Use six.moves.range * ovs-agent: prevent ARP requests with faked IP addresses * Use convenience method from db api to create nested transaction * Remove a unused Context class * Use namespace names in NetcatTester * Optimize IptablesManager.\_find\_last\_entry * Take Daemon stdin/stdout/stderr args as file objects * Support for concurrent full-stack tests * OVS-DVR: Suppress a confusing error log about csnat port * OVS-DVR: Improve an error log about csnat port * Replace ci.o.o links with docs.o.o/infra * Refactor initialize() of sriov mech driver * Centralized register\_OVS\_agent in tests * Don't pass namespace name in disable\_isolated\_metadata\_proxy * Add client id option support to dhcp agent * Remove use of contextlib.nested * Allow updating port 'binding:host\_id' be None * Block subnet create when a network hosts subnets allocated from different pools * Fix neutron tests * Allow unit tests to be run independently * SystemExit is ok for child processes * When disabling dhcp, delete fixed ip properly * Update build hooks * Append @randtoken to L3 agent namespaces in full stack tests * Add VIF\_DELETED notification event to Nova * setup port filters when sg rules change * tests: don't allow oslo.config to autodiscover config files * mlnx MD: mlnx\_direct removal * l2pop UT: Reduce code duplication in migration tests * Add unit tests for ML2 DVR port binding and fix PortContext inconsistencies * Make it clear the rfe tag is lower-case * Remove H305 from tox.ini pep8 ignore list * Allow users to run 'tox -epy34' * Deprecate quota\_items, register resources upon REST initialization * Support BP:ipv6-router in Neutron HA Router * Catch ObjectDeletedError and skip port or subnet removal * Randomize tunnel id query to avoid contention * Remove skip of service-type management API test * Imported Translations from Transifex * Add capability to wait for IPv6 address in ip\_lib * Remove from BridgeDevice homemade execute in namespace * remove router interface on Arista L3 plugin fails * Extenuate register\_dhcp\_agent code duplication in tests * Fix typos related to IPv6 use-cases * Refactor checks for device existence * Updated from global requirements * Check for missing network in \_bind\_devices * Add missed actions into policy.json * Reuse caller's session in ML2 DB methods * ARP spoofing patch: Data structures for rules * Limit router gw ports' stateful fixed IPs to one per address family * VMWare NSXv: Metadata for distributed router * VMware: update supported plugins * Allow to define enable\_snat default value * Update the specs process for Liberty * changes log level to debug for help calls * Remove use of contextlib.nested * Fix fetching prevent\_arp\_spoofing from cfg in neutron-sanity-check * VMware: add in router types for NSXv * Reduce prefix and suffix length in ipsets * Add port-security extension API test cases * Add test for security groups * Use iptables zone to separate different ip\_conntrack * Fix dhcp \_test\_sync\_state\_helper asserting calls wrong * Updated from global requirements * Enhance configure\_for\_func\_testing.sh for \*aaS use * Add IP version support to all ip\_lib code * Imported Translations from Transifex * Get all interfaces for get\_snat\_sync\_interfaces * OVS-agent: Ignore IPv6 addresses for ARP spoofing prevention * Remove un-used keys in keepalived tests * Deprecate config-based Quota Driver * Clarify stackforge/vmware-nsx is for VMware NSX suite * Updated from global requirements * l3 agent: fix grammar in router info not found warning * Finally let L3 and DHCP agents cleanup namespaces by default * Context: is\_admin==True implies is\_advsvc=True * Fix port creation verification of the port-security extension * Add some tests for floating ips * Add notes about official sub-projects * Updated ovsvapp\_agent.ini in neutron * Don't use iterator in search for tunnel type * Remove is\_active property from SimpleInterfaceMonitor * Updated from global requirements * Disembowel register\_l3\_agent code duplication in tests * Ensure mocks for lla allocator \_write in test\_agent * Fix \_device\_to\_port\_id for non-tap devices * Imported Translations from Transifex * Rename delete\_gateway method name * Drop use of 'oslo' namespace package * Remove 'IP' from device exception message * Add icmpv6 to sg\_supported\_protocols * Suppress exception when trying to remove non existing device in SNAT redirect 2015.1.0 -------- * Run radvd as root * Add devices to update in RPC call security\_groups\_provider\_updated * Run radvd as root * Support multiple IPv6 prefixes on internal router ports for an HA Router * Not creating HA router when not enough l3 agents * Eliminate extra queries used to retrieve gw\_ports * Don't update port with host id of None * fix l3-agent restart with last runtime fip for dvr * Refactoring to adhere to coding convention * Replace unnecessary call to get\_sync\_routers * Move test\_get\_user\_allocation\*returns\_none test to a proper class * Replace BaseLinuxTestCase by BaseSudoTestCase * Remove RecursivePermDirFixture useless cleanup * Utilities for building/parsing netns names to facilitate testing * Fix MismatchError to nondeterministic order for list of controllers * Add missing interface to populate subnets method * Don't resync on DHCP agent setup failure * Refactor socket ssl wrapping * Don't resync on DHCP agent setup failure * Replace BaseIPVethTestCase by FakeMachine * Return exception when attempting to add duplicate VIP * Imported Translations from Transifex * Allow plugin to specify router\_id * Neutron to Drop Router Advts from VM ports * Fix L3 agent functional tests random failures * Mock report\_state during L3 agent functional tests * Remove backward compatibility for check\_is\_admin 2015.1.0rc2 ----------- * Add weak reference test for callback manager * Spawn RADVD only in the master HA router * tests: confirm that \_output\_hosts\_file does not log too often * Double functional testing timeout to 180s * Restrict subnet create/update to avoid DHCP resync * Only update MTU in update code for MTU * Restrict subnet create/update to avoid DHCP resync * Make sure OVS restarts when Exception occurred * Updated from global requirements * Remove dependency on weak reference for registry callbacks * Ensure metadata network works with DVR * Change callbacks logging from INFO to DEBUG * Fix DVR functional tests resources leak * Create bridges in ovsdb monitor functional tests * Refactor RESOURCE\_ATTRIBUTE\_MAP cleanup * remove metadata\_proxy\_local filters for rootwrap * Add use\_slave DB api support * Fix incorrect query for user ip allocations * Fix typo acomplished => accomplished * OOP naming cleanup in l3\_dvr\_db * ARP spoofing patch: Low level ebtables integration * Fix test discovery for api and functional paths * Block allowed address pairs on other tenants' net * tests: confirm that \_output\_hosts\_file does not log too often * Fix super cleanUp for fullstack ProcessFixture * Add security groups events * Block subnet create with mismatched IP versions * Remove neutron.tests.common.agents package * L3 DB: Defer port DB subnet lookups * lb-agent: ensure tap mtu is the same as physical device * Only update MTU in update code for MTU * Revive BaseLinuxTestCase.\_create\_namespace * Defer creation of router JSON in get\_routers RPC * ovs\_lib: Fix a race between get\_port\_tag\_dict and port removal * Correct inconsistent enable\_snat management * \_create\_subnet\_from\_implicit\_pool assumes external network extension * Log caught exceptions while deleting a router * Define FakeMachine helper for functional/fullstack tests * Replace custom method call logger with oslo.log helper * ML2: Change port status only when it's bound to the host * Release Import of Translations from Transifex * Simplify keepalived.virtual\_routes * l2pop UT: Simplify migration tests * l2pop UT: Expire cached db objects before reusing a session * Correct typo for matching non-dict ovsdb rows * Fixes race condition and boosts the scheduling performance * Register ibm-db-alembic import for DB2 migrations * Fixes race condition and boosts the scheduling performance * ML2: Change port status only when it's bound to the host * Remove double queries in l3 DB get methods * Strip unnecessary overrides in extraroute\_db mixin * Set loading strategy to joined for Routerport/Port * Avoid double-hopping deletes for security group rules * Set IPset hash type to 'net' instead of 'ip' * Revert "Add ipset element and hashsize tunables" * Set IPset hash type to 'net' instead of 'ip' * Update .gitreview to point to stable/kilo * Add Kilo release milestone * Quota model: use HasTenantId mixin * Clarify the init logic for the ML2 plugin * Deal with TODO related to Security Groups RPC API's classes * Add Kilo release milestone * Add some more comments to models/frozen.py * IPv6 SLAAC subnet create should update ports on net * Two api tests for 'firewall insertion mode' feature * OVS\_LIB API addition - change bridge controller connection-mode * Imported Translations from Transifex * Drop the ovs\_lib compat layer as per TODO note * Removed ml2\_conf\_odl.ini config file * IPv6 SLAAC subnet create should update ports on net * Use 'port' instead of 'ports' to reference port from IPAllocation * Enhance OVSDB Transaction timeout configuration * Added config variable for External Network type in ML2 * Update decomp progress chart * Provide details for configure multiple DHCP agents * Stop running L3 functional tests with both OVSDB interfaces * Fix formatting errors in TESTING.rst * Pass correct port ID back to RPC caller * Fix intermittent ipset\_manager test failure * Fix mock return settings in test\_full\_uuids\_skip\_port\_id\_lookup * Add full-stack test * create\_resource should return maximum length str * Add clock sync error detection on agent registration * Log RPC initialization in L3 service plugin and ML2 * Add block name to switch config options for MLX plug-ins * Fix the ImportErrors in l3 and dhcp scheduler functional tests * Removed jsonrpclib dependency * Additions to TESTING.rst * Handle race condition on subnet-delete * Move values for network\_type to plugins.common.constants.py * allow OVSDB connection schema to be configurable * Add OVSDB connection as a parameter to the transaction * l3\_rpc: Fix a comment typo * Fix native OVSDB db\_get handling for UUID columns * Move iptables and ipset config registration into modules * Kill hostname validation for subnet:dns\_nameservers * Adds DVR functional test for multi-external networks * context: reuse base oslo.context class for to\_dict() * Fix routerid constraint migration * Synced versionutils from oslo-incubator * Removed ml2\_conf\_odl.ini config file * Router is not unscheduled when the last port is deleted * Remove L3 report\_state logging * Double functional testing timeout to 180s * Non-json body on POST 500's * OVSDB python binding should use row.delete() to remove rows * Revert connection option post full-stack tests * Handle SIGHUP in dhcp and l3 agents * Sync service from oslo-incubator * Imported Translations from Transifex 2015.1.0rc1 ----------- * Add logging to dangling port to ml2 delete\_subnet * Avoid synchronizing session when deleting networkdhcpagentbinding * Update L3 Agent Scheduler API tests * Revert "IPv6 SLAAC subnet create should update ports on net" * Add missing config parameters in neutron.conf * Moving VLAN Transparency support from core to extension * Re-use context session in ML2 DB get\_port\_binding\_host * Consider all address scopes in init\_l3 * Improves the description string for the config parameter metadata\_workers * Fix intermittent UT failures in test\_utils * OOP cleanup: start protected method names with underscore * Enhance TESTING.rst * Remove check for bash usage * Return from check\_ports\_exist\_on\_l3agent if no subnet found * Open Liberty development * Remove duplicated l3 router scheduler test cases * Remove tests from HA routers test framework * linuxbridge UT: Fix a regression of the recent ip\_lib change * Fix dynamic arp populate error for dvr routers * Reorganize plugin test modules * Reorganize unit test tree * Add ipset element and hashsize tunables * Allow metadata proxy running with nobody user/group * Skip example retargetable functional test * Prepare for unit test reorg * Remove orphaned nuage unit test module * Add API tests for subnet-create with subnetpool * Refactoring cleanup for L3 agent callbacks * Imported Translations from Transifex * Support multiple IPv6 prefixes on internal router ports * Fix functional test using local timeout value * Add index for port * Always run dnsmasq as root * Move network MTU from core REST API to extension API * Refactoring of L3 agent notifications for router * Fix docstring for l3\_dvr\_db.dvr\_vmarp\_table\_update * Treat all negative quota values as -1 * Router test enhancements * ovs\_neutron\_agent: Remove a redundant assignment of ovs\_status * Move orphaned api test - deux * IPv6 SLAAC subnet create should update ports on net * Add API tests for Neutron DVR extension * Add missing neutron/tests/unit/agent/common/\_\_init\_\_.py * Allow metadata proxy to log with nobody user/group * Move orphaned api test * Implement default subnet pool configuration settings * Define bridge/port fixtures for OVS/LinuxBridge/Veth backends * Update core reviewer responsibilities * Remove "Arguments dropped when creating context" logging * Some cleanup in L3 HA code * Fix reference to non-existent setup\_dvr\_flows\_on\_integ\_tun\_br * Modify a different agent in test\_update\_agent\_description * Move API tests to neutron.test.api * Simple subnetpool allocation quotas * Subnet allocation from a subnet pool * Simplify retargetable test framework * Increase max attempts to 2 for pings on ARP tests * Revert "Add ipset element and hashsize tunables" * Add API tests for subnetpool allocation * Handle no ofport in get\_vif\_port\_to\_ofport\_map * Update .coveragerc after the removal of Cisco Nexus monolithic plugin * Make floatingip reachable from the same network * Fix functional configure script * Enable ARP spoofing prevention by default * Support IPv6 Router * Move final remnants of router processing to router classes * Only call get\_engine().pool.dispose if \_FACADE * Stop using deprecated DEFAULT group for lock\_path * tests: don't rely on configuration files outside tests directory * Set floating IP port status to "N/A" * Add simple ARP spoofing protection * Imported Translations from Transifex * Add tests for the l3 agent namespaces manager * Make L3 agent honor periodic\_interval setting * Handle non-index lookups in native OVSDB backend * Fix error raising in security groups method * Update NEC plugin decomposition status * Auto-update gateway port after subnet-create * Allow update of ext gateway IP's w/out port delete * Support Dual-Stack Gateway Ports on Neutron Routers * Remove auto deletion of routers in unit tests * No allocation needed for specific IPv6 SLAAC addr assignment * Remove neutron.tests.sub\_base * Fix test case for DHCP agent interface restart * Store and log correct exception info * Test to verify shared attribute of network * Enable Process Monitor by default * Reload DHCP interface when its port is updated * Don't eagerly load ranges from IPAllocationPool * Revert "Fix validation of physical network name for flat nets" * Enable services on agents with admin\_state\_up False * Simplify base test cases * Send only one rule in queue on rule create/delete * Add full-stack tests framework * Stop any spawned ProcessMonitor at test cleanup * Add missing DeferredOVSBridge export * Use router state in get\_ha\_device\_name and ha\_network\_added * Added note about removing bridge from mappings * Add language around re-proposing specs for new releases * Follow up patch for Validate when DVR enabled, l2\_pop is also enabled * Fix displaying of devref for TestModelsMigrations * Use 1/0 as booleans for DB2 * Remove allow\_overlap from subnetpools API * If configured, set the MTU for fpr/rfp intefaces * Add L3 router plugin shim for Brocade MLX * Moves ovs\_lib to agent/common * OVS agent support on Hyper-V * No IPv6 SLAAC addrs for create router intf without fixed\_ips * Cisco UCS Manager ML2 Mechanism Driver * Cisco Nexus1000V ML2 Mechanism Driver * Rename/move/remove HaRouter methods * lb-agent: use 'replace' instead of 'add' with 'bridge fdb' * Add some useful notes in devref/db\_layer.rst * Fix a usage error of joinedload + filter in l3 scheduler * Move process\_ha\_router\_added/removed from HA agent to router * Ml2 Mechanism Driver for OVSvApp Solution * Add eventlet monkey\_patch helper * Move create\_dvr\_fip\_interfaces in to DVR * Deprecate use\_namespaces option * Add the default\_ipv6\_subnet\_pool config option * Fix common misspellings * Fix port status not being updated properly * Fix handling of before/after notifications in linuxbridge agent * Move external port processing to router classes * Expose ha\_state per router to agent binding via API * Decouple L3 and service plugins during DVR router migration * Transform BaseLinuxTestCase methods in helpers * Remove downgrade from existing migrations * Fix minor nits in \_notify\_l3\_agent\_new\_port() * Drop support for SQL Schema Downgrades * VMWare NSXv: Metadata default gateway param * Imported Translations from Transifex * Move README.odl into opendaylight directory * Fix missing spaces in strings split across lines * Fix typos in neutron/db/migration * Remove unnecessary 'IN vs ==' sql query branches * Fix intermittent failure in TestNetworksFailover UT * Fixes floating IP regression with multiple routers * Add no\_delete flag to UT router context manager * Updated from global requirements * Send notification to controller about HA router state change * Fix usage of 'default' parameter in 1955efc66455 migration * Move metadata proxy shared options to neutron.conf * Reuse nova batch notifier * Allow plugin to specify security-group rules ids upon creation * Add native OVSDB implementation of OVSDB API * Break coupling between ML2 and L3 during delete operation * Fix validation of physical network name for flat nets * Validate when DVR enabled, l2\_pop is also enabled * Fix create\_security\_group\_rule\_bulk\_native to return all created rules 2015.1.0b3 ---------- * Prepare Base(OVS)LinuxTestCase transformation in helpers * Improve DVR scale performance * Remove redundant unit tests from OVS DVR Agent * Hyper-V Agent decomposition * Enable to apply policies to resources with special plural * Add a missing mock in DHCPAgentWeightSchedulerTestCase * Basic subnetpool CRUD * Enable to specify context on POST requests during unittests * Fix a usage error of joinedload + filter in dhcp scheduler * Allow to request metadata proxy only from internal interfaces * Remove unused L3 HA RPC method * Replace keepalived notifier bash script with Python ip monitor * Add sanity check for OVSDB native support * Fix metering agent failure when chain missing * Fix minor decomp progress chart issues * Adding VLAN Transparency support for ML2 along with REST API changes * DHCP Service LoadBalancing Scheduler * Make DHCP tests cleanup neutron manager reference * Include IPv6 SLAAC addresses implicitly for port update * Api tests to cover network mtu attribute * Run more Rally benchmark on every patch * Fix DBDuplicateError handling in \_ensure\_default\_security\_group * Add ML2 VLAN mechanism driver for Brocade MLX and ICX switches * Include IPv6 SLAAC addresses implicitly for port create * Don't delete HA router primary VIP on agent restarts * Introduce External IPAM Interface * Expose Rest Api access to mtu attributes * Advertise mtu over dhcp * Add MTU selection to ML2 * IBM SDN-VE Plugin decomposition * Brocade Vyatta vrouter shim plugin for vendor decomposition * Fix spelling error in neutron.conf * OVS DVR UT: Remove an inappropriate str() conversion * Handle DBDuplicateError exception properly when creating default sg * Imported Translations from Transifex * Schedule net to a DHCP agt on subnet create * Revert "Set default of api\_workers to number of CPUs" * Add portsecurity extension support * Revert "fix check\_ports\_exist\_on\_l3agent in no subnet case" * Move Unix domain socket helpers to a common place * Move mlnx agent to be under ml2/drivers/mlnx * iptables firewall: add framework for iptables firewall functional test * Adding a cleanup for 'fip-' and 'snat-' namespaces in netns\_cleanup * replaces enumeration method used to get a list of interfaces * Remove unneeded DVRAgentRpcApiMixin from OVSDVRNeutronAgent * Prevent updating mac address of bound port * Update api tests from tempest * Set TEMPEST\_CONFIG\_DIR in the api tox env * Remove vendor entry point * Add a netns-cleanup functional test * Reduce db calls count in get\_devices\_details\_list * Move internal port processing to router classes * Brocade vendor code decomposition from neutron repo * Refactor \_remove\_unused\_security\_group\_info * Add MTU selection & advertisement settings to Neutron config * ML2 cisco\_nexus MD: sync config and models with vendor repo * fix check\_ports\_exist\_on\_l3agent in no subnet case * Fix netns-cleanup broken by ProcessMonitor refactor * Improve validate of remove\_router\_interface * Set default of api\_workers to number of CPUs * Refactor retry mechanism used in some DB operations * Revert "Revert "Remove port from ovsdb after its deletion"" * Add rootwrap daemon mode support * Break coupling between ML2 and L3 during create/update operations * Fix incorrect comments * Start metadata agent without trying to connect db * Remove router binding with router-interface-delete * Remove dead code * Update contribute.rst with Big Switch decomp * Migrate to oslo.log * Fix l3\_agentschedulers\_db for consistency of code * Return 404 when executing net-list-on-dhcp-agent with invalid agent\_id * ofagent: Update after networking-ofagent release * Use common agent.linux.utils.ensure\_dir method * Stop using RPC namespace to unbreak rolling upgrades * Add Mellanox decomposition progress to chart * Arista L3 Service Plugin decomposition * Fix pylint issue with type VS isinstance in event\_observers * Raise QuotaResourceUnknown in the quota engine * utils.execute: Add a debug-level log to record stdin * Imported Translations from Transifex * contribute.rst: Use consistent tags * Add README and requirements.txt for VMware plugins * Fix non-existent self.local\_subnets in DvrRouter class * Added oslo.log dependency * Don't notify dead DHCP agent of removed networks * Prevent calling waitall() inside a GreenPool's greenthread * Added check for emptyness where in\_ is being used * Improve performance of \_get\_security\_group\_member\_ips * NEC plugin code split * Imported Translations from Transifex * Change linux/ip\_lib code to better handle address families * portsecurity\_db: Fix a usage of is\_attr\_set * ofagent: Have a thin driver module * Don't start transaction during floating IP delete * linuxbridge UT: Mock get\_interface\_by\_ip * linuxbridge UT: Do not create the same instance in each cases * In Arista ML2 delete tenant without any resources * Initial copy of api tests from tempest * Fix tempest api testing * Use an existing function in process monitor tests * Fix dhcp config dir removed too soon * FIP debug messages * Add proccess monitor to keepalived * Fix wrong log output in neutron/neutron/agent/linux/dhcp.py * [contribute.rst] Current status of Freescale Codebase * portsecurity\_db: Use is\_attr\_set instead of a home-grown equivalent * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add script to copy neutron api tests from tempest * ofagent: kill the left over after decomposition * Use accessors instead of private attributes for Ml2 plugin * Remove 'free' exclusions from pylint * Refactor the ProcessMonitor API * Networking OVS-DPDK plugin decomposition * Fix DB2 upgrade problem for Remove Hyper-V plugin * Big Switch Networks code split * Reduce code duplication and fix argument order in test\_wsgi * Replace IPv4 and IPv6 default addresses with constants * VMware NSX: Update decomposition progress table * Updated from global requirements * Vendor decomposition to move CSR1000v support to the networking-cisco repo * Move Neutron Policy pages into the tree * Adding DB model changes for Nuage Plugin post decomposition * Add ability to run pylint check on modified files only * Fix test tautology for DVR * Decompose the VMware plugin * Remove references to 0.0.0.0/0 in iptable rules * Updated from global requirements * Change metadata driver unit tests to use monitored spawn * Decouple L3 and VPN service plugins during router operations * Move \_set\_subnet\_arp\_info to dvr\_router * Refactor DVR \_arp\_entry methods * Refactor management of namespaces in the L3 Agent * Raise error upon deleting subnet with router ports * Imported Translations from Transifex * OVS UT: Remove useless return\_value for setup\_integration\_br * Introduce ip address monitor * Add cisco decomposition progress to chart * oslo: sync all modules that depend on incubator log module * test\_metadata\_agent: don't check implementation details * Progress chart for MidoNet * Extend test coverage for iptables\_firewall.py * Default the Linuxbridge agent to enabling VXLAN * Remove HyperVNeutronPlugin * ml2 plugin: use attributes.{NETWORK, SUBNET, PORT} consistently * ml2 extension driver: more tests, fix data argument inconsistency * Use oslo\_config choices support * Metaplugin decomposition * ofagent: Vendor code decomposition * contribute.rst: Fill in the current status of ofagent * Missing entry points for cisco apic topology agents * Prevent direct port-delete of FIP Agent GW and CSNAT * PLUMgrid plugin decomposition * Improve structure of Vendor Decomposition progress chart * Removing a router twice from the same agent shouldn't cause an error * Simplify prepare\_veth\_pairs in functional tests * Add a functional test for iptables\_manager.binary\_name * Add InvalidIpForNetwork and InvalidIpForSubnet exception * ovs\_neutron\_agent should exit gracefully * Ensure tests run under python2.7 * Validate string length at API level * Capture progress chart for vendor decomposition * Fixes formatting errors in devref documentation * Imported Translations from Transifex * Fix retrieval of shared firewall\_policies * Password config options should be marked secret * Check whether sudo is enabled in BaseSudoTestCase.setUpClass * Revert "Remove port from ovsdb after its deletion" * Add filter for provider network attributes in ML2 * tests: initialize policy in BaseTestCase * policy: don't hack around oslo.config path search algorithm * Make listing security groups faster * Allow AsyncProcess to block on process start and stop * Don't mock plugin in L3SchedulerTestBaseMixin * Adds migration script for Hyper-V Plugin tables * Make del\_fdb\_flow() idempotent * Update default tox envlist to match voting gate checks * Added a policy for retrieving the agent hosting a load balancer * Avoid ObjectDeletedError while accessing deleted binding * Correct db functional tests to support oslo.db 1.50 * Avoid DetachedInstanceError after session rollback * Always fill UDP checksums in DHCP replies * remove unused code in metadata agent code * Move pylint checks to pep8 testenv * Change L3 agent AdvancedService class to be non-singleton * Passes the plugin context variable in the ML2 Extension Driver API * devref: added guidelines to maintain service entry points * VMware NSXv: Added router-type to database model * Remove discover from test requirements * Add callbacks-based system to Neutron * Refactor Pinger class * Create/Delete FIP Agent gateway port only if DVR Routers * Move the assignment of existing\_floating\_ips before try block * Fix misspellings words in neutron * Ensure arping always exits * Updated from global requirements * wsgi: remove explicit monkey\_patch() call * If providers exist in neutron.conf, don't look in services conf * test\_ovs\_dvr\_neutron\_agent: Use consistent variable names * Nuage core plugin decomposition * devref: consider sphinx warnings as failures * devref: don't link to nonlocal images * devref: fixed class name for test\_migrations autodocumentation * devref: updated documentation for oslo-incubator * devref: updated documentation for advanced services * Avoid fetching network in \_commit\_port\_binding * VMware: Router Type Extension Support * OVS UT: Change misleading constants in veth tests * test\_l2population: Use a fake mech driver instead of ofagent * l2population\_rpc: Make fdb\_add\_tun/fdb\_remove\_tun more flexible * Make nova notifier work with sessions * Fix parameters in exception handling * adopt namespace-less oslo imports * Do not run neutron-ns-metadata-proxy as root on dhcp agent * Move Floating IP processing to Router classes * Updated from global requirements * Improve exception handling in \_process\_router\_update() * Cisco Nexus ML2 Vendor decomposition * Remove versioning import of novaclient * Remove remaining uses of passing root\_helper * Remove root\_helper arg from sanity checks * Enable pylint unnecessary-pass * Enable pylint no-value-for-parameter * Enable pylint bad-super-call * Enable 'free' pylint checks * Remove reference to self.services\_sync * Fix type of exception in ml2 l2pop * VMware NSXv: Add configuration options to nsx.ini * Mock link local allocator write so UT doesn't write a file * VMWare NSXv: Add configuration params to nsx.ini * Remove error logs for a common situation (non created ipsets) * Default route missing for IPv6 subnets in HA Router * Unify logic that determines liveliness of DHCP agent * fix for \_get\_external\_device\_interface\_name trace * ML2: remove underscore from public method * Fix static strings with labels for DVR * Get rid of rpc to fetch fip agent port on agent * Combining sec groups member update RPC calls * VMWare NSXv: id fields should be nullable * Check if routing rule exists before adding * Remove root\_helper arg from DHCP agent * Remove root\_helper arg from AsyncProcess * Remove root\_helper arg from linuxbridge * Remove root\_helper arg from SecurityGroupAgentRpc * Moved several services into neutron.cmd.eventlet * Monkey patch all the code inside neutron/cmd/eventlet/.. * tests: monkey patch stdlib before importing other modules * Don't monkey patch netns\_cleanup * Remove root\_helper arg from IpsetManager * Revert "Add the rebinding chance in \_bind\_port\_if\_needed" * Remove root\_helper arg from IptablesManager * Remove root\_helper arg from external\_process * Add a functional test that checks HA router is configured on a restarted agent * Update midonet plugin requirements * Stop using passed root\_helper in ip\_lib * OVS UT: Fix some confusions between local vlan id and segmentation id * Un-break tox for unit tests * Fix FIP agent gw port delete based on external net * Skip DBDuplicateEntry exception in security group creation * Hyper-V: Fixes security groups issue * Fix the api job * Setup br-tun in secure fail mode to avoid broadcast storms * Delete qg device during DVR-SNAT router deletion * Automate host configuration for functional testing * ML2: Hierarchical port binding * ML2: DB changes for hierarchical port binding * Remove RPC dependency to create FIP agent gw port * Fix typo in bash tool * Remove remaining root\_helper args from plugins * Fix usage drop\_constraint in 2a1ee2fb59e0 migration * Fix index name in downgrade 26b54cf9024d migration * Remove root\_helper arg from linux interface * Remove root\_helper arg from L3 Agent * OVS DVR: Remove dead code * Updated from global requirements * Fix AttributeError exception for API's test\_network\_lifecycle * Remove root\_helper arg for ovs\_lib * Raise timeout for test\_conntrack\_disassociate\_fip * Cleanup in keepalived tests * Add run\_as\_root option to utils.execute * Revert "monkey patch stdlib before importing other modules" * Remove unused RPC methods from l3\_rpc * Tweak mocking logic for L3 plugin tests * Move NCS mech driver to its new home * Added policy for lbaas v2 agent extension resource * keepalived: use sh instead of bash for notifier scripts * Refactor to facilitate DVR scale performance * hacking: also catch 'import oslo.\*' imports * Moved hacking unit test into proper location * Stale VXLAN and GRE tunnel port/flow deletion * Use ovsdb-api neutral column/value mappings * Prepare to functionally test OVSDB interfaces * NEC: Merge NEC plugin models into single module * Remove remaining do\_delete from unit tests * Typos fixed * Scope state reporting rpc api using a messaging namespace * Remove use of keepalived 'vrrp\_sync\_group' as it is unused * Scope dvr rpc api using a messaging namespace * Updated from global requirements * Remove port from ovsdb after its deletion * Add index on tenant\_id * Remove deprecated DEFAULT.root\_helper * Provide routes for neighbor IPv4 subnets * OVS DVR: Use a right mac address value to compose a flow * Refactor radvd control in the l3-agent * monkey patch stdlib before importing other modules * Don't crash when adding duplicate gre allocation * Fix lack of device ownership enforcement for DVR routers * Search in /sys/class/net for network devices * Adopt rpc\_api devref to new oslo\_messaging namespace * Fix minor nits with the devref's contribute section * Remove VPN specific exception * Correctly mock-out 'ip route...' calls in IPv6 test * Cleanup dead code for dnsmasq * Add mtu attributes to network model * Add the rebinding chance in \_bind\_port\_if\_needed * Add vlan\_transparent attribute to network model * Check conntrack rule cleanup on floating IP disassociate * l2-pop shouldn't notify agent about inactive ports * Drop devstack-gate files from Neutron repo * Use weak ref to avoid deleting fip namespace through agent * Move DVR floating ip methods to dvr\_router * Provide more details about testing strategies * Add section for DevStack Integration Strategies to the DevRef * VMware: consolidate NSX models * Restore and fix vmware unit tests * Move extra routes processing to router classes * oslo: migrate to namespace-less import paths 2015.1.0b2 ---------- * Fix breakage in all service repo unit tests, due to duplicate imports of exts * Log entry when no Floating IP interface present * Refactor logging in loop to only log debug messages once * Nuke a useless lambda wrapper and call to iterkeys (review feedback) * Nuke remaining service config and extensions from main repo * Pass '--dhcp-authoritative' option to dnsmasq * Imported Translations from Transifex * ml2: Simplify \_process\_provider\_create * Fix extra-dhcp-opt on stateless dhcpv6 subnet * Updated from global requirements * ML2: Use same port binding logic for DVR ports as non-DVR ports * Improve robustness of network failover * Decrease rpc timeout after agent receives SIGTERM * Configures RADVD to send managed flag in RA for DHCP\_STATEFUL * Make prevent\_l3\_port\_deletion handle missing port * Backout 152195, which doesn't check the same token that it saves * NSX DB models split, part 3 (and final) * NSX DB models split, part 2 * Discriminate loaded extensions using their alias * Refactor ml2 manager * Extension moving tweaks, exceptions and extension path fix * Log tenant ports if subnet could not be deleted * Fixing several misspellings in neutron * NSX DB models split, part 1 * Imported Translations from Transifex * Enable super-on-old-class pylint check * fixes error logging to use the right exception parent class * Drop bw compact module for OpenDayLight * Don't pass the port down to the floating ip processing * Move agent.linux.utils tests to proper location * Drop deprecated namespace for oslo.rootwrap * Encapsulate DVR Fip namespace * Move ha router functionality from the agent to ha\_router * Remove duplicate logging of attribute validation errors * Add requirements.txt file for OpenDaylight Mech Driver * Mechanisms to move extensions and config into service repos * Fix flake exclude matching of .\* * Hardening unittest, make resilient to address assignment order * Allow to request metadata proxy only with redirection * Remove unused mocks * Thining Arista ML2 driver from neutron tree * Allow port mac\_address to be modified * Removed redundant statement from l3agentscheduler * Implements the ProcessMonitor in the l3\_agent * Add option to remove networks from dead DHCP agents * Thin MLNX ML2 mechanism driver and agent * Fixing a log message in Arista L3 Service Plugin * Not assign dynamic IPv6 address on dhcp interface * Default security group table * Support Extra DHCP Options for IPv4 and IPv6 * Refactor \_convert\_sgr\_to\_iptables\_rules in iptables\_firewall * Do not check twice IP allocations for auto-address subnets * Make the interface driver available to the router classes * Make agent config available to the router classes * Updated from global requirements * Drop bin/neutron-rootwrap * Refactor iptables rule expansion for the non ipset case * Set locale before check dnsmasq version * Freescale FWaaS Plugin: Update to setup.cfg * Allow 'max\_l3\_agents\_per\_router' to be set to '0' * test\_agent\_scheduler: Fix a misleading variable name * Fix AttributeError when using DVRServerRpcApi * Add abandon script from nova * Add missing Connection.close() method * Deleting HA router with attached port causes DB inconsistencies * Refactor the ProcessMonitor \_exit\_handler to ProcessMonitor * TestL2PopulationRpcTestCase: Stop loading linuxbridge mech driver * Return 404 when executing router-list-on-l3-agent with invalid agent\_id * VLAN support for DVR * Fixes Hyper-V agent root\_helper issue * Ensure ofports are converted to string before trying to use join() * Add coverage for extra routes extension * Add address family to 'ip rule' calls * Add OVSDB abstract API * Add functional tests for IptablesManager using tcp/udp * dhcp: move dnsmasq version check to sanity\_check * Use DVRServerRpcApi instead of a mixin * Scope secgroup rpc api using a messaging namespace * Add and use SecurityGroupAgentRpc * hyperv: drop useless messaging.Target on HyperVSecurityAgent * tests: don't spread fixtures.TempDir throughout test cases * Extract l2pop/DVR controller logic to common method * Imported Translations from Transifex * attributes: Additional IP address validation * Mention networking\_odl in README.odl * Updated from global requirements * Overload correctly BASEV2.\_\_table\_args\_\_ * Add notes on how to deal with stable branches * Configure IPv6 LLADDR only on master L3 HA instance * Do not duplicate message consumers * Add index on db "allocated" columns * pep8: cleaned up excludes * Remove check\_i18n tox target * Implements ProcessMonitor in the dhcp\_agent * Functional test IPAM DB operation * If router is HA, get current\_cidrs from keepalived object * Move process monitor settings to neutron.conf AGENT section * Drop SecurityGroupServerRpcApiMixin * sriovnicagent: drop usage of SecurityGroupServerRpcApiMixin * sriovnicagent: untangle SecurityGroupAgentRpcMixin * mlnx: drop usage of SecurityGroupServerRpcApiMixin * mlnx: untangle SecurityGroupAgentRpcMixin * linuxbridge: drop usage of SecurityGroupServerRpcApiMixin * linuxbridge: untangle SecurityGroupAgentRpcMixin * Use db constraint to ensure mac address uniqueness * Ignore 404 error and lower a warning log to info * Reorganize OVSDB API * Use proper capitalization for OVS table names * Move shared metadata driver related config options * Remove useless constant from l3 agent module * Added test\_dvr\_router\_lifecycle to cover dvr * Imported Translations from Transifex * Use constants from networking\_odl project * Initialize dist\_fip\_count after agent restart * Fixes Multiple External Networks issue with DVR * Replace FLOATING\_IP\_CIDR\_SUFFIX constant with utils * tests: drop usage of SecurityGroupServerRpcApiMixin * ovs: drop usage of SecurityGroupServerRpcApiMixin * oneconvergence: drop usage of SecurityGroupServerRpcApiMixin * ofagent: drop usage of SecurityGroupServerRpcApiMixin * nec: drop usage of SecurityGroupServerRpcApiMixin * hyperv: drop usage of SecurityGroupServerRpcApiMixin * bigswitch: drop usage of SecurityGroupServerRpcApiMixin * Create SecurityGroupServerRpcApi and add some docs * Improve agent-based flat/vlan ml2 port binding failure logging * ml2: remove stale \_filter\_nets\_l3 in get\_networks * drop unused test rootwrap filter file * Updated from global requirements * SIGHUP keepalived if L3 agent restarts * Update \_cur names to \_current in iptables\_firewall.py * Added comments, and refactored \_add\_rule\_by\_security\_group * Improve test coverage of dhcp agent scheduling * Imported Translations from Transifex * tools/split.sh: Tweak commit message * Switch to using abc in the retargetable client * common\_db\_mixin.py: simplify CommonDbMixin * Fixes blocking of VRF config in Arista L3 Plugin * Drop \_test\_rootwrap\_exec test * Fix pylint unbalanced-tuple-unpacking warning * Corrected singulars/plurals in iptables\_firewall.py * Create DvrRouter and HaRouter as a sub-class of Router * Remove unused self.sync\_progress attribute * DHCP agent restructuring * Move Monkey patch back to being as early as possible * Fix outstanding failures with Neutron API job * Disable unbalanced-tuple-unpacking * Revert "Change transaction isolation so retry logic could work properly" * Change transaction isolation so retry logic could work properly * Updated from global requirements * Refactor the \_get\_external\_device\_interface\_name method * Refactor of floating ip processing in L3 Agent * ML2: Driver API changes for hierarchical port binding * Fix some assertEqual argument orders * Don't log a warning if an iptables chain doesn't exist * Migrate to oslo.concurrency * Replace missing space in error message * Clarify misleading iptables comment * Fix missing spaces in error messages * make delete\_router send delete\_port to core\_plugin * VMWare-NSXv: VMWare NSXv extensions * Dropped fixture module * base.py: Improve exception handling * Correct \_test\_delete\_ports\_by\_device\_id\_second\_call\_failure * Add ovsdb-related functional tests * VMWare-NSXv: VMWare NSXv configuration file * Imported Translations from Transifex * Create arping helper in ip\_lib * Initial thin ML2 mechanism driver * Enable adding new tag with options * Call on dhcp-agent DhcpLocalProcess.restart() breaks dhcp * Fixs shared networks in Arista ML2 driver * Move agent cleanup scripts to cmd module * Fix IP allocation for multiple slaac subnets * tests: don't restore stopped mock that is set in setUp() * misc-sanity-checks.sh: Some cleanups * Log iptables rules in a readable format * Remove main alias for bw compat with vpn agent * Midonet plugin decomposition * Fix topic for provider security group update * Specify prefix length for IPv6 subnets * Service split: cleaned up setup.cfg * VMWare NSXv DB model bugfix * Speed up initial L3 full sync time * hacking: enable H238 (old style class declaration, use new style) * hacking: enable W292 (no newline at end of file) * Update hacking to 0.10 * Use "if dict.get(key):" instead "if key in dict and dict[key]:" * Rename qexception->nexception * Fix AttributeError on check\_foreign\_keys in functional job * Catch StaleDataError in update\_device\_down * Code improvement in type\_vxlan.py and type\_gre.py files * Ensure config directory created before updating leases * Allow IptablesManager to manage mangle table * Fix IPv6 Subnet Slaac Check * Imported Translations from Transifex * Move non-bridge-related OVSBridge methods to BaseOVS * Move metadata agent entry to its own file * Run only one instance of Nuage sync cycle at a time * Updated from global requirements * Scope metadata rpc api using a messaging namespace * Provide doc string pointers for the dhcp agent rpc api * Remove DBDuplicateEntry columns check * Limit permission change * Break out config and entry point out of l3/agent file * Validate legacy router services before migration * Clarify dnsmasq version check failure message * Update comment about metadata\_proxy\_shared\_secret config * Remove redundant tunnel ids from ovs agent * Add index generation for IPv6 rules for DVR * Correct l3-agent iptables rule for metadata proxy * Fix UT for L2pop test\_get\_agent\_ports\_no\_data() * Move postcommit ops out of transaction for bulk * Reset policies after RESOURCE\_ATTRIBUTE\_MAP is populated * Remove SELECT FOR UPDATE from delete\_network and delete\_subnet * Bump minimal dnsmasq version to 2.67 * Make L3 HA VIPs ordering consistent in keepalived.conf * Add Process class helper to manage processes with namespace * Make lb mechanism driver use enable\_security\_group flag * Catch PortNotFound and SubnetNotFound during network\_delete * HA for DVR - schema migration and change * Revert "Revert "Add metadata proxy L3 agent driver"" * moving vxlan module check to sanity checks and making practical * Drop functional/contrib directory * refactor l3-agent to include dvr.py * Validate IPv6 subnet while associating to Router * VMWare-NSXv: VMWare NSXv database models * Deal with PEP-0476 certificate chaining checking * Reduce duplicate code in test\_iptables\_manager * Add support for retargetable functional api testing * print error when no match mapping found in check\_segment\_for\_agent * Tweak gate hooks scripts to handle both functional and api jobs * Replace mention of nose with nose2 in devref * Skip adding ips from non dhcp enabled subnets to hosts file * Add developer documentation for plugins/drivers contributions * Deletes floating agent gw port on disassociate * Add help text for 'host' parameter in neutron.conf file * Updated keystone\_admin conf section to reflect changes in middleware * Removed spurious check for ip version * Ensure test\_metaplugin handles random hashseeds * Ignore non-existent ports during OVS intf list * [apic ml2] Bind ports regardless of the owner * Improve unit test coverage for Ml2 db.py * Delete the console scripts for lbaas and vpnaas * Confusing message deleting default security group * Enable the "not-callable" pylint check * ovs\_dvr: Use lazy logging interpolation * Add a constant for router interface device owners * Stale VXLAN & GRE tunnel endpoint deletion from DB * Add support for flat networks in SRIOV Mechanism Driver * Retry on unassigned ofport instead of treating it as a failure * VMware: fix security group check on port create * Eventlet green threads not released back to pool * Don't unnecessarily loop through all ports/interfaces * Set type=internal as part of port creation * Fix DVR flow problems for IPv6 subnet * Allow to specify IP address of floating ip * Do not count dvr agents while creating HA ports * csr1kv\_hd\_driver: Improve exception handling * Remove \_delete\_port\_security\_group\_bindings from delete\_port * Remove useless parameter from l3\_dvr\_db.py * Clean-up sanity checks done via shell scripts * Do not run neutron-ns-metadata-proxy as root on L3 agent * Correct invalid indentation in is\_dvr\_serviced * Add validation for the dvr router l3agent binding * Fixes spelling error * get\_binary\_name should returns strings without spaces * validate L3 HA min/max \_l3\_agents\_per\_router * Enable pylint checks for "anomalous" string escapes * Tighten dnsmasq version regex * Remove unnecessary regex grouping * Combine author\_tag and log\_translation\_hint regexes * ML2 UT: Fix incorrect mock return value * ipv6: set OtherConfig flag for DHCPv6 stateless subnets * PLUMgrid plugin: Fix for delete subnet with admin context * brocade: Use lazy logging interpolation * linuxbridge: Use lazy logging interpolation * embrane: Use lazy logging interpolation * bigswitch: Use lazy logging interpolation * Use lazy logging interpolation * Cisco: logging incorrectly called with (fmt, arg) tuple * ml2: remove superfluous %s in LOG.debug() format * Fix typo'd format parameter in midonet\_lib.py * Update L3 agent drivers singletons to look at new agent * Prevent symlinks to be added to the tree * Copy the contrib directory instead of moving it * Revert "Add metadata proxy L3 agent driver" * Scope dhcp rpc api using a messaging namespace * Validate local\_ip for Linuxbridge agent * Allow setting a tenant router's external IP * Remove NSX 'service' plugin * Imported Translations from Transifex * Move DB TestModelsMigrations from unit to functional * tests: drop unit tests that only check default configuration values * Backward compatibility for advanced services * Update heal\_script for alembic 0.7.1 2015.1.0b1 ---------- * Add metadata proxy L3 agent driver * Updated from global requirements * Move contrib directory to base test directory * Add OVS status and fix OVS crash * Option for root\_helper when checking namespace * Cleanup req\_format in test\_api\_v2\_resource * Imported Translations from Transifex * Cisco: unsupported format character in log format * Correct arguments to logging function * Support 'alive' filter for get\_agents() in agents\_db * Minor lbaasv2 things from the feature branch, needed in neutron * Advanced services support in neutron-db-manage * Remove locking from network and subnet delete op * Removed unused iso8601 dependency * Avoid unnecessary explicit str() conversion around exceptions * Add functional test for l3-agent metadata proxy * Remove mlnx plugin * Set timeout for functional job * Enable test\_migration * Fix neutron hang for IPv6 allocation pool update * tests: initialize admin context after super().setUp call * Improve performance of get\_active\_networks\_info * Fixed test test\_update\_port\_security\_off\_address\_pairs * openvswitch/ofagent: Remove OVS.enable\_tunneling option * Imported Translations from Transifex * Remove unused dependencies * Generate testr\_results.html for neutron functional job * L3 Agent restructure - observer hierarchy * Replace non-ovs\_lib calls of run\_vsctl with libary functions * Don't restore stopped mock that is initialized in setUp() * Separate wait\_until to standalone function * Imported Translations from Transifex * Mock up time.sleep to avoid unnecessary wait in test\_ovs\_tunnel * Catch duplicate errors scheduling SNAT service * Fix for KeyError: 'gw\_port\_host' on l3\_agent * Migrate to oslo.context * Have L3 agent catch the correct exception * Not nova but neutron * Remove broad exception catch from periodic\_sync\_routers\_task * Fix race condition in ProcessMonitor * Updated from global requirements * Refactor process\_router method in L3 agent * Switch to using subunit-trace from tempest-lib * Move classes out of l3\_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Fix IPv6 RA security group rule for DVR * Imported Translations from Transifex * ofa\_test\_base: Fix NoSuchOptError in UT * Add lbaasv2 extension to Neutron for REST refactor * Remove TODO for H404 * Update rpc\_api docs with example version update * Auto allocate gateway\_ip even for SLAAC subnets * Updated from global requirements * Split services code out of Neutron, pass 1 * Use comments rather than no-op string statements * Fix AttributeError during startup of ovs agent in DVR mode * Enforce log hints * Disallow log hints in LOG.debug * Reduce code duplication in test\_linux\_dhcp * Print version info at start * Enforce log hints in ofagent and oneconvergence * Make sudo check in ip\_lib.IpNetnsCommand.execute optional * Move set\_override('root\_helper', ...) to base functional class * Imported Translations from Transifex * IpsetManager refactoring * Update i18n translation for NEC plugin log msg's * return the dict of port when no sec-group involved * Imported Translations from Transifex * Update i18n translation for IBM plugin log msg's * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Remove RpcCallback class * Convert several uses of RpcCallback * Fix up an old RpcProxy assumption * Remove RpcProxy class * Cleanup recent generalization in post mortem debugger * radvd: pass -m syslog to avoid thread lock for radvd 2.0+ * Get rid of py26 references: OrderedDict, httplib, xml testing * Imported Translations from Transifex * Fix enable\_metadata\_network flag * Fix program name in --version output * Enforce log hints in opencontrail * Update i18n translation for Metaplugin plugin * Update i18n translation for Brocade plugin log msg's * Update i18n translation for Nuage plugin * Update i18n translation for Embrane plugin * Enforce log hints in neutron.plugins.plumgrid * Remove ovs-vsctl call from OVSInterfaceDriver * Update i18n translation for Midonet plugin * Enforce log hints in neutron.plugins.sriovnicagent * Enforce log hints in neutron.plugins.hyperv * Imported Translations from Transifex * Drop RpcProxy usage from DhcpAgentNotifyAPI * Updated the README.rst * Fix base test class for functional api testing * Use oslo function for parsing bool from env var * Don't block on rpc calls in unit tests * Refactor test\_migration * Strip square brackets from IPv6 addresses * Update i18n translation for BigSwitch plugin log msg's * Imported Translations from Transifex * pretty\_tox.sh: Portablity improvement * iptables\_manager: Fix get\_binary\_name for eventlet * test\_dhcp\_agent: Fix no-op tests * Drop old code from SecurityGroupAgentRpcApiMixin * Drop RpcProxy usage from ml2 AgentNotifierApi * Update i18n translation for Mellanox plugin and agent log msg's * Drop RpcProxy usage from L3AgentNotifyAPI * Simplify L3 HA unit test structure * Update i18n translation for VMware NSX plugin log msg's * Alter execute\_alembic\_command() to not assume all commands * hacking: Check if correct log markers are used * Fix hostname validation for nameservers * Removed python2.6 rootwrap filters * Imported Translations from Transifex * MeteringPluginRpc: Fix crash in periodic\_task * Enable undefined-loop-variable pylint check * Remove unused variables from get\_devices\_details\_list * Change description of default security group * Fix incorrect exception order in \_execute\_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Remove unused xml constants * Check metadata iptables chains during functional test * Drop RpcProxy usage from MeteringAgentNotifyAPI * Drop RpcProxy usage from l2population code * Drop RpcProxy usage from cisco apic ml2 plugin * Drop RpcProxy usage from oneconvergence plugin * Synced processutils and periodic\_task modules * Migrate to oslo.utils * Fix floating-ips in error state in dvr mode * Reject trailing whitespaces in IP address * Imported Translations from Transifex * CSCO:Tenants not to access unshared n/w profiles * Drop sudo requirement from a unit test * Remove Python 2.6 classifier * Update i18n translation for Cisco plugins and cfg agent log msg's * Remove ryu plugin * Imported Translations from Transifex * Drop RpcProxy usage from nec plugin * Drop RpcProxy usage from mlnx plugin * Drop RpcProxy usage from ibm plugin * Drop RpcProxy usage from hyperv plugin * Drop RpcProxy usage from cisco.l3 * Drop RpcProxy usage from cisco.cfg\_agent * Drop RpcProxy usage from brocade plugin * Update rally-jobs files * Test HA router failover * Imported Translations from Transifex * Update i18n translation for linuxbridge log msg's * Update i18n translation for openvswitch log msg's * Update i18n translation for ML2 plugin log msg's * Updated from global requirements * Imported Translations from Transifex * Enforce log hints in neutron.services * Enforce log hints in neutron.services.metering * Fix metadata proxy start problem for v6-v4 network * Fix AttributeError in RPC code for DVR * Drop RpcProxy usage from bigswitch plugin * Drop RpcProxy usage from VPNaaS code * Drop RpcProxy usage from metering\_agent * Fix context.elevated * Tighten up try/except block around rpc call * Implement migration of legacy routers to distributed * run\_tests.sh OS X script fixes * Eliminate unnecessary indirection in L3 agent * Show progress output while running unit tests * Drop RpcProxy usage from LBaaS code * Enforce log hints in neutron.services.loadbalancer * Enforce log hints in neutron.services.firewall * Enforce log hints in neutron.services.l3\_router * enable H401 hacking check * enable H237 check * Updated from global requirements * Check for default sec-group made case insensitive * Update i18n translation for neutron.server/scheduler log msg's * Update i18n translation for neutron.notifiers log msg's * Update i18n translation for neutron.common/debug log msg's * Imported Translations from Transifex * ofagent: Remove obsolete bridge\_mappings (plugin side) * Delete FIP namespace when last VM is deleted * Fix a race condition adding a security group rule * Drop RpcProxy usage from FWaaS code * Drop RpcProxy usage from neutron.agent.rpc.PluginApi * Fix a copy/pasted test mistake * Drop test code copied from nova * Drop several uses of RpcCallback * Add some basic rpc api docs * Drop RpcCallback usage from DhcpRpcCallback * Drop RpcProxy usage from PluginReportStateAPI * Fix hostname regex pattern * Catch NoResultFound in \_get\_policy\_profile\_by\_name * Validate loadbalancing method when updating a pool * Update i18n translation for neutron.api log msg's * Catch DBReferenceError exception during binding a router * Enable default SNAT from networks connected to a router indirectly * Imported Translations from Transifex * BSN: Optimistic locking strategy for consistency * BSN: include missing data in floating IP call * ofagent: Remove obsolete bridge\_mappings (agent side) * NSX: Validate gateway device list against DB * Drop RpcProxy usage from MetadataPluginApi * Drop usage of RpcProxy from L3PluginApi * Prevent an iteration through ports on IPv6 slaac * Use a string multiplier instead of 59 repetitions * Convert all incoming protocol numbers to string * Updated from global requirements * Correct raw table regex in test\_security\_groups\_rpc * BSN: Add network to ext\_gw\_info sent to backend * BSN: Set inconsistency record on delete failure * Fix PYTHONHASHSEED bugs in test\_security\_groups\_rpc * Subnet delete for IPv6 SLAAC should not require prior port disassoc * Fix client side versions in dhcp rpc API * Drop usage of RpcProxy from DhcpPluginApi * linuxbridge-agent: make vxlan unicast check more efficent * Moved out common testcases from test\_type\_vxlan.py * Update i18n translation for neutron.extension log msg's * Update i18n translation for neutron.db log msg's * Update i18n translation for neutron.cmd log msg's * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Decrease policy logging verbosity * Support pudb as a different post mortem debugger * Cleanup and refactor methods in unit/test\_security\_groups\_rpc * switch to oslo.serialization * Add rootwrap filters for ofagent * Updated policy module from oslo-incubator * Resolving some spelling mistakes * Fix for FIPs duplicated across hosts for DVR * Drop neutron.common.rpc.MessagingTimeout * Remove neutron.common.rpc.RemoteError * Remove neutron.common.rpc.RPCException * Remove useless return * Cisco VPNaaS and L3 router plugin integration * Fix missing allowed command in openvswitch xenapi agent * fix event\_send for re-assign floating ip * Remove openvswitch core plugin entry point * rootwrap config files reference deleted quantum binaries * Fix L3 HA network creation to allow user to create router * Update default value for agent\_required attribute * SRIOV: Fix Wrong Product ID for Intel NIC example * Imported Translations from Transifex * Updated from global requirements * Purge use of "PRED and A or B" poor-mans-ternary * Include call to delete\_subnet from delete\_network at DB level * Use correct base class for unit tests for ML2 drivers * Replace "nova" entries in iptables\_manager with "neutron" * Drop and recreate FK if adding new PK to routerl3bindings * Imported Translations from Transifex * Remove duplicate ensure\_remove\_chain method in iptables\_manager * ML2: fix file permissions * Fix sneaky copypaste typo in ovs agent scheduler test * Make L2 DVR Agent start successfully without an active neutron server * Detect if iproute2 support SR-IOV commands * Use stop() method on MessageHandlingServer * Rename constant to a more appropriate name * Big Switch: Fix SSL version on get\_server\_cert * Check for concurrent port binding deletion before binding the port * Imported Translations from Transifex * Batch ports from security groups RPC handler * Fix incorrect int/tuple comparison during binary search * Big Switch: Send notification after port update * Allow to add router interface to IPv6 SLAAC network * ML2 Cisco Nexus MD - not overwriting existing config * Reorder operations in (l3\_dvr) update floating ip * Use RPC instead of neutron client in metadata agent * Add assertion to test\_page\_reverse method * Adds an option to enable broadcast replies to Dnsmasq * Add advsvc role to neutron policy file * NSX: allow multiple networks with same vlan on different phy\_net * NSX: Fix foreign key constraint delete provider network * Imported Translations from Transifex * Fix 'Length too long' error in neutron-dsvm-functional tests * Remove use\_namespaces from RouterInfo Property * Fix handling of CIDR in allowed address pairs * Updated from global requirements * Remove XML support * enable F402 check for flake8 * enable E713 in pep8 tests * NEC plugin: Allow to apply Packet filter on OFC router interface * \_update\_router\_db: don't hold open transactions * Big Switch: Switch to TLSv1 in server manager * Only resync DHCP for a particular network when their is a failure * Validate network config (vlan) * Validate local\_ip for OVS agent is actual ip address * Imported Translations from Transifex * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Move disabling of metadata and ipv6\_ra to \_destroy\_router\_namespace * Updated from global requirements * Adds macvtap support * Remove duplicate import of constants module * Switch run-time import to using importutils.import\_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * l2population\_rpc: docstring improvements * Fix race condition on processing DVR floating IPs * neutron-db-manage finds automatically config file * Ensure test\_agent\_manager handles random hashseeds * Ensure ofagent unit tests handles random hashseeds * Moves the HA resource creations outside of transaction * Modify docstring on send\_delete\_port\_request in N1kv plugin * Empty files should not contain copyright or license * Remove superfluous except/re-raise * Remove single occurrence of lost-exception warning * Schema enhancement to support MultiSegment Network * Remove redundant initialization and check from DVR RPC mixin * Improve performance of security group DB query * Optimize query in \_select\_dhcp\_ips\_for\_network\_ids * Updated cache module and its dependencies * Updated service.py and its dependencies * Updated fileutils and its dependencies * Cisco N1kv: Fix update network profile for add tenants * DB: Only ask for MAC instead of entire port * Only fetch port\_id from SG binding table * NSX: Make conn\_idle\_timeout configurable * nsx plugin: keep old priority when reconnecting bad connection * l3\_agent: avoid name conflict with context * Guard against concurrent port removal in DVR * Refactor l2\_pop code to pass mac/ip info more readably * Fix KeyError in dhcp\_rpc when plugin.port\_update raise exception * Refactor \_make\_subnet\_dict to avoid issuing unnecessary queries * openvswitch: Remove no longer used options * VPNaaS Cisco unit test clean-up * Call DVR VMARP notify outside of transaction 2014.2 ------ * remove E251 exemption from pep8 check * Race for l2pop when ports go up/down on same host * Catch exceptions in router rescheduler * Minor: remove unnecessary intermediate variable * Handle unused set\_context in L3NatTestCaseMixin.floatingip\_with\_assoc * Use EUI64 for IPv6 SLAAC when subnet is specified * Arista L3 Ops is success if it is successful on one peer * Add unique constraints in IPAvailabilityRange * Remove two sets that are not referenced * Update VPN logging to use new i18n functions * mock.assert\_called\_once() is not a valid method * Check for VPN Objects when deleting interfaces * Compare subnet length as well when deleting DHCP entry * Add pylint tox environment and disable all existing warnings * Updated from global requirements * update the relative path of api\_extensions\_path * Reduce security group db calls to neutron server * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * NSX: drop support to deprecated dist-router extension * Execute udevadm on other linux installs * Avoid constructing a RouterInfo object to get namespace name * Drop sslutils and versionutils modules * Imported Translations from Transifex * Remove an argument that is never used * Refactor \_process\_routers to handle a single router * Add Juno release milestone * Add database relationship between router and ports * Fix L2 agent does not remove unused ipset set 2014.2.rc2 ---------- * Add Juno release milestone * Add database relationship between router and ports * Disable PUT for IPv6 subnet attributes * Skip IPv6 Tests in the OpenContrail plugin * Remove all\_routers argument from \_process\_routers * update ml2\_migration to reflect optional methods * Disable PUT for IPv6 subnet attributes * Do not assume order of lvm.tun\_ofports set elements * Skip IPv6 Tests in the OpenContrail plugin * Removed kombu from requirements * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * Remove two sets that are not referenced * Forbid update of HA property of routers * Forbid update of HA property of routers * Teach DHCP Agent about DVR router interfaces * Updated from global requirements * Allow reading a tenant router's external IP * Raise exception if ipv6 prefix is inappropriate for address mode * Retry getting the list of service plugins * Add missing methods to NoopFirewallDriver * Don't fail when trying to unbind a router * Modify the ProcessMonitor class to have one less config parameter * Big Switch: Don't clear hash before sync * Remove sslutils from openstack.common * Divide \_cleanup\_namespaces for easy extensibility * L3 Agent should generate ns\_name in a single place * Add comments to iptables rules to help debugging * nit : missing a "%s" in a log message * L3 agent should always use a unique CONF object * Iterate over same port\_id if more than one exists * Fix setup of Neutron core plugin in VPNaaS UT 2014.2.rc1 ---------- * remove openvswitch plugin * Fix pid file location to avoid I->J changes that break metadata * Don't fail when trying to unbind a router * remove linuxbridge plugin * Allow reading a tenant router's external IP * Fix sleep function call * Add admin tenant name to nova notifier * ML2: move L3 cleanup out of network transaction * Open Kilo development * ML2 Cisco Nexus MD: Fix UT to send one create vlan message * Implement ModelsMigrationsSync test from oslo.db * Imported Translations from Transifex * Update migration scripts to support DB2 * Do not assume order of report list elements * Disallow unsharing used firewall policy * Imported Translations from Transifex * Add missing methods to NoopFirewallDriver * Raise exception if ipv6 prefix is inappropriate for address mode * Fix broken port query in Extraroute test case * Revert "Cleanup floatingips also on router delete" * fix dvr snat bindings for external-gw-clear * Fix quota limit range validator * Remove default dictionary from function def * Fix KeyError when getting secgroup info for ports * Create DHCP port for IPv6 subnet * Deletes floating ip related connection states * Do not lookup l3-agent for floating IP if host=None, dvr issue * Remove RPC notification from transaction in create/update port * Do not assume order of body and tags elements * Remove the translation tag for debug level logs in vmware plugin * Retry getting the list of service plugins * Fix entrypoint of OneConvergencePlugin plugin * Forbid regular users to reset admin-only attrs to default values * Finish small unit test refactor of API v2 tests * Security groups: prevent race for default security group creation * Stop admin using other tenants unshared rules * Eliminate OrderedDict from test\_api\_v2.py * Mock out all RPC calls with a fixture * Add logging for enforced policy rules * Imported Translations from Transifex * Remove unnecessary \_make\_port function in BSN UTs * ofagent: Drop log level of tenant-triggerable events * Set vif\_details to reflect enable\_security\_group * Use dict\_extend\_functions to populate provider network attributes * Fix foreign key constraint error on ml2\_dvr\_port\_bindings * Some clean up of code I'm preparing to modify * Indicate the begin and end of the sync process to EOS * DVR to delete router namespaces for service ports * Do not assume order of device\_ids set elements * Fix 500 error on retrieving metadata by invalid URI * Only setup dhcp interface if dhcp is not active on network * HA routers master state now distributed amongst agents * Rework and enable VPNaaS UT for Cisco CSR REST * Update URL of Ryu official site in ofagent README files * Set dsvm-functional job to use system packages * Delete a broken subnet delete unit test * Fix to delete user and group association in Nuage Plugin * Deletes FIP agent gw port when last VM is deleted * Delete DB records instead of tables to speedup UT * Stop exception log in Big Switch unit tests * Separate Configuration from Freescale SDN ML2 mechanism Driver * NSX plugin: set VNIC\_TYPE port binding attribute * Access correct key for template name * ofagent: Ignore unknown l2pop entry removals * Neutron metering does not check overlap ip range * Rename workers to api\_workers and simplify code * Fix DVR to service DHCP Ports * Tunnel ID range validation for VXLAN/GRE networks * Remove @author(s) from copyright statements * BSN: Add context to backend request for debugging * Don't create unused ipset chain * Imported Translations from Transifex * Avoid an extra database query in schedule\_snat\_router * Add HA support to the l3 agent * Stop ignoring 400 errors returned by ODL * Fix a test\_db\_plugin unit test side\_effect usage * Imported Translations from Transifex * Fix KeyError on missing gw\_port\_host for L3 agent in DVR mode * Stop using intersphinx * Updated from global requirements * Cisco N1kv: Remove vmnetwork delete REST call on last port delete * Remove the Cisco Nexus monolithic plugin * L3 Metering label as shared * Check for ports in subnet before deleting it from Nuage VSD * ofagent: Fix a possible crash in arp responder * Add a new scheduler for the l3 HA * Add functional testing to ipset\_manager * Properly handle empty before/after notifications in l2pop code * Remove logic for conditional migrations * Make Juno migrations config independent * Introduce havana initial state * Adds ipset support for Security Groups * Refactor l3\_agent.process\_router\_floating\_ip\_addresses * Cleanup floatingips also on router delete * use TRUE in SQL for boolean var * Remove faulty .assert\_has\_calls([]) * Fail on None before iteration attempt * Imported Translations from Transifex * ofagent: Remove broken XenAPI support * Passing admin tenant name to EOS * Fix for floating ip association and deletion * BSN: Allow concurrent reads to consistency DB * Remove useless check in \_rpc\_update\_firewall * Use renamed \_fail\_second\_call() in cisco nexus tests * Add L3 VRRP HA base classes * Allow DHCPv6 reply from server to client * Don't allow user to set firewall rule with port and no protocol * Added TAP\_DEVICE\_PREFIX info to common/constants * Fix comments in api.rpc.handlers * ofagent: Clean up logging * UTs: Disable auto deletion of ports/subnets/nets * Remove second call to get\_subnets in delete\_subnet * Changes to support FWaaS in a DVR based environment * Imported Translations from Transifex * Remove hints from schedule\_router * Call unbind\_snat\_servicenode from schedule router * NSX: Correct allowed\_address\_pair return value on create\_port * Add the unit tests for ml2.rpc module * Neutron should not use the neutronclient utils module for import\_class * Add unit-test assert to check dict is superset of dict * Pythonified sanity\_check.all\_tests\_passed * Removed direct access to MessagingServer * Remove subnet\_id from check\_ports\_exist\_on\_l3agent * Add requests\_mock to test-requirements.txt * Removed kombu from requirements * Fix metadata agent's auth info caching * Throw exception instances instead of classes * Add scheduler unit tests to enable bug fixes and refactoring * Fix AttributeError when setting external gateway on DVR router * Stop tracking connections in DVR FIP Namespace * Fixes formatting for debug output in neutron/agent/l3\_agent.py * Avoid testing code duplication which introduced testing bugs * Supply missing cisco\_cfg\_agent.ini file * Reset IPv6 detection flag after IPv6 tests * Remove unused arg to config.setup\_logging() * Updated from global requirements * Revert "Skip functional l3 agent test" 2014.2.b3 --------- * Fix leftover Timeout effecting most eventlet calls * shared policy shouldn't have unshared rules * ofagent: Remove @author tags and update copyright notices * Work toward Python 3.4 support and testing * Cleanup rename of get\_compute\_ports\_on\_host\_by\_subnet * Revert "Cisco DFA ML2 Mechanism Driver" * Refactor security group rpc call * Avoid auto-scheduling for distributed routers * Fix interface IP address for DVR with gateway * BSN: Bind external ports in ML2 driver * Remove SELECT FOR UPDATE use in delete\_firewall * Big Switch: Retry on 503 errors from backend * Remove absolute path in KillFilter for metadata-proxy * Implements sync mechanism between Neutron and Nuage VSD * ofagent: Implement physical\_interface\_mappings * ofagent: Enable local arp responder for TYPE\_LOCAL * ofagent: Enable local arp responder for TYPE\_FLAT * Implements ProcessMonitor to watch over external processes * Skip functional l3 agent test * ofagent: Local arp responder for VLAN * Prevent SystemExits when running tests * Big Switch: Separate L3 functions into L3 service * Apic drivers enhancements (second approach): Topology * Big Switch: Bind IVS ports in ML2 driver * Add functional test for IptablesManager * Clarify message when no probes are cleared * Remove reference to cisco\_cfg\_agent.ini from setup.cfg again * Fix a bug in Mellanox plugin RPC caused by secgroup RPC refactoring * Don't spawn metadata-proxy for non-isolated nets * l2pop: Allow network types overridable * ML2: Fix release of network segments to allocation pools * Fix a recent ipv6 UT regression * Imported Translations from Transifex * Add endpoint\_type parameter to MetaInterfaceDriver * Remove chain for correct router during update\_routers() * ofagent: Enable local arp responder for local VMs * ofagent: merge br-tun into br-int * Apic drivers enhancements (second approach): Sync * Apic drivers enhancements (second approach): L3 refactor * ML2 Type Driver refactor part 2 * Adds router service plugin for CSR1kv * Introduces a keepalived manager for HA * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Improve some plugins help strings * Provide a quick way to run flake8 * Apic drivers enhancements (second approach): L2 refactor * Make SecurityGroupsRpcCallback a separate callback class * Subnets with prefix length 0 are invalid * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state\_path in tests * Add functional test for l3\_agent * remove explicit include of the ovs plugin * NSX: log request body to NSX as debug * Datacenter moid should not be tuple * Remove ovs dependency in embrane plugin * Layer 3 service plugin to support hardware based routing * Remove binding:profile update from Mellanox ML2 MD * Remove old policies from policy.json * Apic drivers enhancements (second approach): Backend * Make DvrServerRpcCallback a separate callback class * Make DhcpRpcCallback a separate callback class * Adding support of DNS nameserver and Host routes for the Nuage Plugin * Block downgrade from icehouse to havana * Use lockutils module for tox functional env * Do not use auto\_schedule\_routers to add router to agent * Fix func job hook script permission problems * Check for IPv6 file before reading * Remove SELECT FOR UPDATE use in update\_firewall * Fix l3 agent scheduling logic to avoid unwanted failures * Fix InvalidRequestError in auto\_schedule\_routers * Fix incorrect number of args to string format * Add support for provider-network extension in nuage Plugin * Make L3RpcCallback a separate callback class * Cisco VPN with in-band CSR (interim solution) * Inline "for val in [ref]" statements * Minor refactoring for add\_router\_to\_l3\_agent * Predictable iptables chains output order * Prefer "val !=/== ref" over "val (not) in [ref]" in conditions * Heal script: Drop fks before operating on columns * Fixed template of IPsecSiteConnectionNotFound message * Fix DVR to service LBaaS VIP Ports * Refactor test\_type\_gre/vxlan to reduce duplicate code * Fix heal\_script for MySQL specifics * Make log level in linux.utils.execute configurable * Imported Translations from Transifex * Networks are not scheduled to DHCP agents for Cisco N1KV plugin * ext-gw update on dvr router improperly handled by l3-agent * metering driver default value is different in code and config file * Fix for floatingip-delete not removing fip\_gw port * Increase the default poll duration for Cisco n1kv * Fix IpNetnsCommand to execute without root\_wrapper when no netns * Increase ovsdb\_monitor.SimpleInterfaceMonitor start timeout * Change autogenerate to be unconditional * Remove status initialization from plugin's create\_firewall * Set firewall state to CREATED when dealing with DVR * Add template attr. for subnet, router create in Nuage plugin * Implement ip\_lib.device\_exists\_with\_ip\_mac * Add \_store\_ip\_allocation method * Updated from global requirements * Refactor plugin setup helpers out of test.base * Raise proper exception in case duplicate ipv6 address is allocated * Do not explicitly set mysql\_engine * Fixes Hyper-V agent issue on Hyper-V 2008 R2 * Removing sorted() function from assertEqual() * Add hook scripts for the functional infra job * ML2 Type driver refactor part 1 * Minor refactoring of auto\_schedule\_routers * Add ipv6 forwarding for router namespaces * Refresh rpc\_backend values in unit tests to those from oslo.messaging * Add unit tests covering single operations to ODL * One Convergence: Skip all tests with 'v6' in name * VPNaaS: Enable UT cases with newer oslo.messaging * Do not log WARN messages about lack of L3 agents for DVR routers * Add specific docs build option to tox * Fix policy rules for adding and removing router interfaces * Refactor type\_tunnel/gre/vxlan to reduce duplicate code * Join tables in query for down L3 agents * Rename range to avoid shadowing the builtin * Fixes Hyper-V issue due to ML2 RPC versioning * A10 Networks LBaaS v1 Driver * Assign Cisco nw profile to multi-tenants in single request * Remove unused network parameter from \_allocate\_ips\_for\_port * corrects the typos in l3\_router\_plugin's comments * Support Stateful and Stateless DHCPv6 by dnsmasq * Implements securitygroup extension for nuage plugin * Fix bigswitch setup.cfg lines * Arista Layer 3 Sevice Plugin * Add config for visibility of cisco-policy-profile * Ensure ip6tables are used only if ipv6 is enabled in kernel * Remove invalid or useless initialization in test\_type\_vxlan * Fix migration set\_length\_of\_description\_field\_metering * Set InnoDB engine for all existing tables * Use oslo.db create\_engine instead of SQLAlchemy * Big Switch: Check for 'id' in port before lookup * Reorder operations in create\_vip * Send HTTP exceptions in the format expected by neutronclient * Change nexus\_dict to accept port lists * Update DVR Binding when router\_id changes * Imported Translations from Transifex * Remove auto-generation of db schema from models at startup * Cisco N1kv plugin to send subtype on network profile creation * Implement namespace cleanup for new DVR namespaces * Fix config option names in ml2\_conf\_sriov.ini * NSX: Avoid floating IP status reset * correct getLoggers to use \_\_name\_\_ in code * Skip FWaaS config mismatch check if RPC method is unsupported * NSX: lift restriction on DVR update * Updated from global requirements * Use jsonutils instead of stdlib json * Remove INACTIVE status from FWaaS * Ignore http\_proxy while connecting to test WSGI server * Fix interface add for dvr with gateway * l2pop: get\_agent\_ports: Don't yield (None, {}) * ML2: Make get\_device\_details report mac address as well * Delete DVR namespaces on node after removing last VM * Fix PortNotFound error during update\_device\_up for DVR * Option to remove routers from dead l3 agents * Remove SELECT FOR UPDATE use in ML2 tunnel driver add\_endpoint * Fix KeyError during sync\_routers * Fix PortNotFound exception during sync\_routers * VPNaaS: Cisco fix validation for GW IP * Raise NotImplementedError instead of NotImplemented * Imported Translations from Transifex * Fix duplicate function: test\_getattr\_unallowed\_attr * Preserve link local IP allocations for DVR fip ns across restart * Fix 404 error fetching metadata when using DVR * Raise exception for network delete with subnets presents * SecurityGroupRuleExists should point out rule id inseand of group id * Opencontrail plug-in implementation for core resources * Do not assume order of new\_peers list elements * Make plugin and l3plugin available as mixin's properties * Use call to report state when ovs\_agent starts up * add auth token to context * Fixes an issue with FIP re-association * NSX: unify the two distributed routing extensions * NSX: fix wording for configuration option * MLNX Agent: ensure removed ports get treated on resyncs * Add delete operations for the ODL MechanismDriver * Predictable field and filter ordering * Fixing neutron-db-manage with some options other than upgrade/downgrade * Removes extra indents from TestSubresourcePlugin * ofagent: Upgrade note about firewall\_driver * Return port context from \_bind\_port\_if\_needed * MLNX Agent: Process port\_update notifications in the main agent loop * Fix session's InvalidRequestError because of nested rollback * Remove unneeded device\_owner field from l2pop tuple * ofagent: Remove network\_delete method * Do not assume order of parameters in OVSBridge.add\_flow call * Fix to throw correct error code for bad attribute * Improve external gateway update handling * Do not assume order of pci slot list * DeferredBridge to allow add\_tunnel\_port passthru * Enabled Cisco ML2 driver to use new upstream ncclient * Fix to enable L2pop to serve DVR * Remove duplicated check for router connect to external net * ofagent: Add a missing normalized\_port\_name * Return 403 instead of 404 on attr policy failures * Proper validation for inserting firewall rule * Imported Translations from Transifex * Ensure assertion matches dict iter order in test * Fix 500 error during router-update for dvr routers * Simple refactor to stop passing around an unused parameter * Make \_build\_uri\_path output predictable * Radware: When a pip is needed, reuse the Port * Remove redundant topic from rpc calls * l3\_db: refactor L3\_NAT\_DB\_mixin * OVS flows apply concurrently using a deferred OVSBridge * Do not assume order of network\_uuid's * Big Switch: Only update hash header on success * ofagent: Stop monitoring ovsdb for port changes * ofagent: Desupport ancillary bridges * Add a tox test environment for random hashseed testing * OFAgent: Implement arp responder * Updated from global requirements * Do not assume order of quotas dictionary elements * Move Cisco VPN RESTapi URI strings to constants * Remove ignored do\_request timeout argument * Move from Python logging to Openstack logging * Imported Translations from Transifex * NSX: remove duplicate call to set\_auth\_cookie() * NSX: Correct default timeout params * Remove reference to cisco\_cfg\_agent.ini from setup.cfg * Exit Firewall Agent if config is invalid * Fix spelling mistakes * Fix DB Duplicate error when scheduling distributed routers * Imported Translations from Transifex * Make ML2 ensure\_dvr\_port\_binding more robust * centralized router is incorrectly scheduled * Fix-DVR Gateway clear doesn't delete csnat port * Fix spelling in get\_plugin\_interface docstring * Use storage engine when creating tables in migrations * Removed configobj from test requirements * Implement Midonet Juno Network Api calls * Add missing ml2 plugin to migration 1fcfc149aca4 * Replace nullable from primary keys in tz\_network\_bindings with default * Use correct section for log message if interface\_driver import fails * Make sure that gateway is in CIDR range by default * test\_l3\_plugin: L3AgentDbInteTestCase L3AgentDbSepTestCase fails * Add L3 Scheduler Changes for Distributed Routers * Pass filters in arrays in get\_agent\_gw\_ports\_exist\_for\_network * Do not schedule network when creating reserved DHCP port * Check that router info is set before calling \_update\_arp\_entry * Move ARP responder test to sanity command * neutron.conf does not have the definition of firewall quotas * Fix wrong order of tables in downgrade * Fix deprecated opt in haproxy driver * Race condition of L3-agent to add/remove routers * Replaced the strings with respective constants * Make dvr\_vmarp\_table\_update call conditional to dvr extension * ofagent: Update a comment in port\_bound * Updated from global requirements * Set promote\_secondaries when creating namespaces * Functional tests work fine with random PYTHONHASHSEED * Call config\_parse in base test setup * ML2 additions to support DVR * Make test\_l3\_agent.\_prepare\_router\_data a module function * Remove redundant code in tests/unit/test\_l3\_agent * Fix ML2 Plugin binding:profile update * Set python hash seed to 0 in tox.ini * Add definition for new VIF type * Configuration agent for Cisco devices * Handle bool correctly during \_extend\_extra\_router\_dict * Encapsulate some port properties in the PortContext * Changes to remove the use of mapping tables from Nuage plugin * Updated from global requirements * Log exceptions inside spawned functions * Correct misspelled variable name * Avoid RequestURITooLong exception in metadata agent * Move loadbalancer vip port creation outside of transaction * Define some abstract methods in VpnDriver class * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * Modify L3 Agent for Distributed Routers * Audited attribute for policy update not changing * OFAgent: Share codes of l2-population in OVS agent 2014.2.b2 --------- * This patch changes the name of directory from mech\_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Add rule for updating network's router:external attribute * L2 Agent-side additions to support DVR * Imported Translations from Transifex * NSX: fix router ports port\_security\_enabled=False * Add partial specs support in ML2 for multiprovider extension * Add partial specs support in ML2 for gre/vxlan provider networks * Set nullable=False on tenant\_id in apic\_contracts table * call security\_groups\_member\_updated in port\_update * The default value of quota\_firewall\_rule should not be -1 * Correct LOG.debug use * Fix incorrect downgrade * Fix spelling mistake in the log message * Imported Translations from Transifex * Support Router Advertisement Daemon (radvd) for IPv6 * Move plugin.delete\_port call out of transaction * Add partial specs support in ML2 for vlan provider networks * ML2: Update a comment after the recent bind\_port change * NSX: fix validation logic on network gateway connect * Initialize RpcProxy objects correctly * Fix DVR regression for ofagent * RPC additions to support DVR * no quota for allowed address pair * Allow to import \_LC, \_LE, \_LI and \_LW functions directly * L2 Model additions to support DVR * Fixed audit notifications for dhcp-agent-network * Make readme reference git.openstack.org not github * Fix enums usage for postgres in migrations * Return a tuple of None's instead of one None * Fix a log typo in ML2 manager.bind\_port() * Big Switch: Remove consistency hash on full sync * VPNaaS: Separate validation for Cisco impl * VPNaaS: separate out validation logic for ref impl * VMWare: don't notify on disassociate\_floatingips() * Add L3 Extension for Distributed Routers * VPNaaS Cisco REST client enhance CSR create * Bump hacking to version 0.9.2 * Log methods using rpc communcation * Fixes port update failure when device ID is not updated * Support Quota extension in MidoNet plugin * NSX: Remove unneed call to \_ensure\_default\_security\_group * Use auth\_token from keystonemiddleware * update vsm credential correctly * Shamelessly removing commented print line * L3 agent prefers RPC messages over full sync * Dnsmasq config files syntax issue when dhcp\_domain is empty * Database healing migration * Fix incorrect default paramater in migration * Use method's logger in log decorator * Fixed audit notifications for l3-agent-router ops * Expand arp\_responder help text * Send network name and uuid to subnet create * Cisco: Fix test cases which make incorrect create requests * ML2: Bind ports outside transactions * Freeze models for healing migration * NSX: Optionally not enforce nat rule match length check * ofagent: Handle device name prefixes other than "tap" * Add -s option for neutron metering rules * Security groups extension for PLUMgrid plugin * Missing max\_routes in neutron.conf * Clear entries in Cisco N1KV specific tables on rollback * Allow unsharing a network used as gateway/floatingip * Change all occurences of no\_delete to do\_delete * Split up metering test case into plugin + test case * Use integer server\_default value for multicast\_ip\_index * Validate expected parameters in add/remove router interfaces * Revert "VPNaaS REST Client UT Broken" * Mock out tunnel\_sync in test to avoid sleeping * Add 'server\_default' parameter * Add BSN plugin to agent migration script * Move \_convert\_to\_nsx\_transport\_zones into nsx\_utils * Extract CommonDBMixin to a separate file * Remove dead helper function from test\_l3\_plugin * Added support for NOS version 4.1.0, 5.0.0 and greater * Remove reference to setuptools\_git * NSX: neutron router-interface-add should clear security-groups * Refactor 'if false do nothing' logic in l3 scheduler db * Imported Translations from Transifex * Add a gate-specific tox env for functional tests * NSX: remove unnecessary checks on network delete * Bump min required version for dnsmasq to 2.63 * Add CONTRIBUTING.rst * Do not mark device as processed if it wasn't * Fix 'server\_default' parameter usage in models * Fix missing migration default value * Add a link to a blog post by RedHat that discusses GRE tunnels in OVS * Updated from global requirements * VPNaaS REST Client UT Broken * Avoid notifying while inside transaction opened in delete\_port() * sync periodic\_task fix from incubator * Omit mode keyword when spawning dnsmasq with some ipv6 subnets * Fixed spelling mistake in securitygroups\_rpc * OVS agent: fix a comment on CANARY\_TABLE * ofagent: Fix an argument mismatch bug in commit 9d13ea88 * Fix UnboundLocalError raised during L3 router sync task * Updated from global requirements * Fix isinstance assertions * Imported Translations from Transifex * Allow setting a rootwrap cmd for functional tests * Fix OVSBridge.get\_port\_ofport to handle empty output * Ignore variable column widths in ovsdb functional tests * Add configurable http\_timeout parameter for Cisco N1K * NSX: fix indentations * BSN: Remove db lock and add missing contexts * NSX: properly handle floating ip status * Updated from global requirements * Fix example for running individual tests * Stop the dhcp-agent process when dnsmasq version is not determined * Switch to using of oslo.db * Replace occurences of 'test\_tenant' with 'test-tenant' in tests * lb-agent: ensure removed devices get treated on resyncs * Imported Translations from Transifex * Add sanity check for nova notification support * changes ovs agent to get bridges via ovs\_lib * Use correct MAX\_LEN constant in agent functional tests * remove unsupported middleware * Fix re-creation of the pool directory * Add config for performance gate job * Use patch ports to interconnect integration/physical bridges * Exit rpc\_loop when SIGTERM is recieved in ovs-agent * LBaaS new object model logging no-op driver * ofagent: Use port desc to monitor ports on br-int * Fixed dhcp & gateway ip conflict in PLUMgrid plugin * Introduce bulk calls for get device details * validate flat networks physical name * Remove \_\_init\_\_ method from TunnelCallback mixin * OVS agent: Correct bridge setup ordering * Revert "Revert "ovs-agent: Ensure integration bridge is created"" * Imported Translations from Transifex * Synced log module and its dependencies from olso-incubator * Pass newly created router to \_update\_router\_gw\_info * don't ignore rules that are already enforced * Updated neutron.conf to reflect new RPC options * Moved rpc\_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Don't convert numeric protocol values to int * Imported Translations from Transifex * Revert "Check NVP router's status before deploying a service" * Remove the useless vim modelines * Imported Translations from Transifex * Changing the poll\_duration parameter type to int * Add test cases for plugins/ml2/plugin.py * Removed local modification in incubator code * Removed 'rpc' and 'notifier' incubator modules * Removed create\_rpc\_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Pass serializer to oslo.messaging Notifier * Fix auto\_schedule\_networks to resist DBDuplicateEntry * Imported Translations from Transifex * Control active number of REST calls from Cisco N1kv plugin to VSM * Revert "ovs-agent: Ensure integration bridge is created" * ValueError should use '%' instead of ',' * NSX: return 400 if dscp set for trusted queue * NSX sync cache: add a flag to skip item deletion * NSX: propagate network name updates to backend * Renamed argument for create\_consumer[s] * Renamed consume\_in\_thread -> consume\_in\_threads * Renamed start\_rpc\_listener -> start\_rpc\_listeners * Port to oslo.messaging * Imported Translations from Transifex * Pass 'top' to remove\_rule so that rule matching succeeds * Big Switch: Stop watchdog on interval of 0 * Remove old quantum scripts * Move \_filter\_non\_model\_columns method to CommonDbMixin * Updated from global requirements * Ignore emacs checkpoint files * Big Switch: Lock consistency table for REST calls * Check port value when creating firewall rule with icmp protocol * Improve docstring for OVSNeutronAgent constructor * Big Switch ML2: sync detection in port-update * Imported Translations from Transifex * Remove SELECT FOR UPDATE use in ML2 type driver release\_segment * Add vlan type driver unittests * Make sure we call BaseTestCase.setUp() first * Don't explicitly call .stop() on mock.patch objects * Don't instantiate RPC clients on import * Configure agents using neutron.common.config.init (formerly .parse) * linuxbridge-agent: process port updates in the main loop * Notify systemd when starting Neutron server * Ensure entries in dnsmasq belong to a subnet using DHCP * Added missing core\_plugins symbolic names * Trigger provider security group update for RA * NSX: revert queue extension name change * Fix pool statistics for LBaaS Haproxy driver * Don't use root\_helper when it's not needed * Introduced rpc\_compat.create\_connection() * Copy-paste RPC Service class for backwards compatibility * Introduce RpcCallback class * Fix opt helpstring for dhcp\_lease\_duration * Consistently use jsonutils instead of specific implementation * Imported Translations from Transifex * Adding static routes data for members * remove pep8 E122 exemption and correct style * Change default netpartition behavior in nuage plugin * Add 'ip rule ...' support to ip\_lib * Add missing keyword raise to get\_profile\_binding function * Add logging for NSX status sync cache 2014.2.b1 --------- * Big Switch: Remove unnecessary initialization code * Big Switch: Import DB module in unit test * When l2-pop ON, clean stale ports in table0 br-tun * remove E112 hacking exemption and fix errors * Updated from global requirements * Allowed address pair: Removing check for overlap with fixed ips * NeutronManager: Remove explicit check of the existence of an attribute * Fix invalid IPv6 address used in FakeV6 variables * Improve vxlan type driver initialization performance * Floatingip extension support for nuage plugin * ovs-agent: Ensure integration bridge is created * Brocade mechanism driver depends on the brocade plugin templates * Brocade mechanism driver should be derived from ML2 plugin base class * changes ovs agent\_id init to use hostname instead of mac * multiprovidernet: fix a comment * Imported Translations from Transifex * Fix race condition with firewall deletion * extensions: remove 'check\_env' method * Check the validation of 'delay' and 'timeout' * Control update, delete for cisco-network-profile * Ensure routing key is specified in the address for a direct producer * Support Subnets that are configured by external RAs * Refactor code in update\_subnet, splitting into individual methods * Make allocation\_pools attribute of subnet updateable by PUT * Monkey patch threading module as early as possible * Introduced transition RPC exception types * Added RpcProxy class * ofagent: Fix VLAN usage for TYPE\_FLAT and TYPE\_VLAN * Big Switch: Catch exceptions in watchdog thread * Use import from six.moves to import the queue module * Start an unstarted patch in the hyperv unit tests * Imported Translations from Transifex * Fix NVP FWaaS occurs error when deleting a shared rule * Check NVP router's status before deploying a service * Add an option to turn off DF for GRE and VXLAN tunnels * Increase default metadata\_workers, backlog to 4096 * Big Switch: Add missing data to topology sync * Replace XML with JSON for N1kv REST calls * Big Switch: Call correct method in watchdog * Freescale SDN Mechanism Driver for ML2 Plugin * OVS Agent: limit veth names to 15 chars * Added note to neutron.conf * Return no active network if the agent has not been learnt yet * Sync service module from oslo-incubator * ovs, ofagent: Remove dead code * Default to setting secure mode on the integration bridge * Cisco APIC Layer 3 Service plugin * Allow neutron-sanity-check to check OVS patch port support * Remove run-time version checking for openvswitch features * Add flat type driver unittests * Changed DictModel to dict with attribute access * Pass object to policy when finding fields to strip * Allow L3 base to handle extensions on router creation * Refactor some router-related methods * Add local type driver unittests * add engine parameter for offline migrations * Check DB scheme prior to migration to Ml2 * Removes unnecessary Embrane module-level mocks * Improve module-level mocks in midonet tests * Big Switch: fix capabilities retrieval code * Improve iptables\_manager \_modify\_rules() method * NSX: bump http\_timeout to 30 seconds * Log firewall status on delete in case of status inconsistency * BSN: Set hash header to empty instead of False * Neutron does not follow the RFC 3442 spec for DHCP * LBaaS add missing rootwrap filter for route * Radware LBaaS driver is able to flip to a secondary backend node * NSX: fix invalid docstring * NSX: fix tenant\_id passed as security\_profile\_id * NSX: Fix request\_id in api\_client to increment * Improve usage of MagicMocks in ML2 and L3 tests * Improve readability of MagicMock use in RYU test * Remove function replacement with mock patch * Remove unnecessary MagicMocks in cisco unit tests * Handle errors from run\_ofctl() when dumping flows * Sync periodic\_task from oslo-incubator * Added missing plugin .ini files to setup.cfg * Imported Translations from Transifex * Make linux.utils.execute log error on return codes * FWaaS plugin doesn't need to handle firewall rule del ops * Reprogram flows when ovs-vswitchd restarts * Revert "fix openvswitch requirement check" * Updated from global requirements * Fix KeyError exception while updating dhcp port * NSX: fix bug for flat provider network * Disallow regular user to update firewall's shared attribute * Support 'infinite' dhcp\_lease\_duration * l2-pop : removing a TODO for the delete port use case * NEC plugin: Bump L3RPC callback version to 1.1 * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * fix openvswitch requirement check * NSX: replace strong references to the plugin with weakref ones * Fixes bugs for requests sent to SDN-VE controller * Install SNAT rules for ipv4 only * Imported Translations from Transifex * Add NVP advanced service check before deleting a router * Disallow 'timeout' in health\_monitor to be negative * Remove redundant default=None for config options * Fix for multiple misspelled words * Use list copy for events in nova notifier * Extraroute extension support for nuage plugin * OFAgent: Fixing lost vlan ids on interfaces * Set onlink routes for all subnets on an external network * Cisco APIC ML2 mechanism driver, part 2 * Remove all mostly untranslated PO files * remove token from notifier middleware * NSX: get rid of the last Nicira/NVP bits * Metadata agent caches networks for routers * Common decorator for caching methods * Make pid file locking non-blocking * Allowed Addresspairs: Removing check for overlap with fixed ips * Do not defer IPTables apply in firewall path * Metaclass Python 3.x Compatibility * Fix non-existent 'assert' calls to mocks * Log iptables rules when they fail to apply * Remove hard dependency on novaclient * Provide way to reserve dhcp port during failovers * Imported Translations from Transifex * Implement local ARP responder onto OVS agent * Fix typos in ovs\_neutron\_agent.py * Allow vlan type usage for OpenDaylight ml2 * NSX: do not raise on missing router during migration step * NSX: fix error when creating VM ports on subnets without dhcp * NSX: allow net-migration only in combined mode * OFAgent: Avoid processing ports which are not yet ready * Add missing translation support * Reorg table ml2\_port\_bindings when db migration * Remove unused parameter * NSX: Do a single query for all gateway devices * Add mailmap entry * Add 'secret' property for 'connection' option * NSX: Do not extend fault map for network gateway ext * Ensure tenant owns devices when creating a gateway * Corrected the syntax of port\_update call to NVSD agent * Fix some typos in neutron/db and IBM SDN-VE plugin * Fix issubclass() hook behavior in PluginInterface * Imported Translations from Transifex * LBaaS VIP doesn't work after delete and re-add * OVS lib defer apply doesn't handle concurrency * Big Switch: Don't use MagicMocks unnecessarily * Make plugin deallocation check optional * Restore GARP by default for floating IPs * Ensure core plugin deallocation after every test * Updated from global requirements * Big Switch: Check source\_address attribute exists * Revert "Big Switch: Check source\_address attribute exists" * ML2 VxlanTypeDriver: Synchronize of VxlanAllocation table * Start ping listener also for postgresql * ofagent: Add a missing push\_vlan action * NSX: ensure that no LSN is created on external networks * Make VPNaaS 'InUse' exception more clear * Remove explicit dependency on amqplib * Revert "Disable debug messages when running unit tests" * eswitch\_neutron\_agent: Whitespace fixes in comments * Upgrade failure for DB2 at ml2\_binding\_vif\_details * Remove duplicate module-rgx line in .pylintrc * Disable debug messages when running unit tests * Perform policy checks only once on list responses * Allow DHCPv6 solicit from VM * Fix importing module in test\_netscaler\_driver * Record and log reason for dhcp agent resync * Big Switch: Check source\_address attribute exists * L3 RPC loop could delete a router on concurrent update * Adding tenant-id while creating Radware ADC service * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * Imported Translations from Transifex * lbaas on a network without gateway * Optimize querying for security groups * NSX: pass the right argument during metadata setup * Improve help strings for radware LbaaS driver * Fix network profile subtype validation in N1kv plugin * Performance improvement of router routes operations * Add support to dynamically upload drivers in PLUMgrid plugin * Imported Translations from Transifex * Reference new get\_engine() method from wsgi.py * Allow test\_l3\_agent unit test to run individually * tests/unit: refactor reading neutron.conf.test * Don't print duplicate messages on SystemExit * Unit test cases for quota\_db.py * Cisco VPN device driver - support IPSec connection updates * OVS and OF Agents: Create updated\_ports attribute before setup\_rpc * Imported Translations from Transifex * Updated from global requirements * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * NSX: fix migration for networks without a subnet * Allow ML2 plugin test cases to be run independently * Removed signing\_dir from neutron.conf * Add physical\_network to binding:vif\_details dictionary * Database exception causes UnboundLocalError in linuxbridge-agent * Wrong key router.interface reported by ceilometer * Imported Translations from Transifex * NSX: fix API payloads for dhcp/metadata setup * Improve ODL ML2 Exception Handling * NSX: change api mapping for Service Cluster to Edge Cluster * Fix protocol value for SG IPV6 RA rule * Cisco APIC ML2 mechanism driver, part 1 * LBaaS: remove orphan haproxy instances on agent start * Fixed floating IP logic in PLUMgrid plugin * Segregate the VSM calls from database calls in N1kv plugin * NSX: add nsx switch lookup to dhcp and metadata operations * Use set\_gateway from ip\_lib * Fix incorrect usage of sa.String() type * Re-submit "ML2 plugin should not delete ports on subnet deletion" * LBaaS: Set correct nullable parameter for agent\_id * Vmware: Set correct nullable for lsn\_id, nsx\_port\_id * IBM: set secret=True on passwd config field * Restore ability to run functional tests with run\_tests.sh * Fix H302 violations in extensions package * Sync db code from oslo-incubator * Imported Translations from Transifex * Remove List events API from Cisco N1kv Neutron * NSX: Fix fake\_api\_client to raise NotFound * Replace loopingcall in notifier with a delayed send * ip-lib : use "ip neigh replace" instead of "ip neigh add" * Add 2-leg configuration to Radware LBaaS Driver * Fix H302 violations in db package and services * Cisco: Set correct nullable for switch\_ip, instance\_id, vlan\_id * Ml2: Set correct nullable for admin\_state\_up * Drop service\* tables only if they exist * Updated from global requirements * Make help texts more descriptive in Metaplugin * ML2 Cisco Nexus MD: Improve Unit Test Coverage * Fix migration that breaks Grenade jobs * Fix incorrect change of Enum type * allow delete\_port to work when there are multiple floating ips * Add nova\_ca\_certificates\_file option to neutron * gw\_port should be set as lazy='join' * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Imported Translations from Transifex * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Validate IPv6 modes in API when IP version is 4 * Add 'ip neigh' to ip\_lib * OFAgent: Improve handling of security group updates * OFAgent: Process port\_update notifications in the main agent loop * NSX: sync thread catches wrong exceptions on not found * Notifier: Catch NotFound error from nova * Switch over to FixedIntervalLoopingCall * Check if bridge exists and make sure it's UP in ensure\_bridge * Validate CIDR given as ip-prefix in security-group-rule-create * Support enhancements to Cisco CSR VPN REST APIs * Fix uninitialized variable reference * Nuage Plugin: Delete router requires precommit checks * Delete DHCP port without DHCP server on a net node * Improved quota error message * Remove device\_exists in LinuxBridgeManager * Add support for multiple RPC workers under Metaplugin * Security Group rule validation for ICMP rules * Fix Metering doesn't respect the l3 agent binding * DHCP agent should check interface is UP before adding route * Remove workaround for bug #1219530 * Fix LBaaS Haproxy occurs error if no member is added * Add functional tests to verify ovs\_lib VXLAN detection * Add nova\_api\_insecure flag to neutron * Allow combined certificate/key files for SSL * Verify ML2 type driver exists before calling del * Fix dangling patches in Cisco and Midonet tests * Make default nova\_url use a version * ML2 Cisco Nexus MD: Remove unnecessary Cisco nexus DB * NSX plugin: fix get\_gateway\_devices * Exclude .ropeproject from flake8 checks * Register LBaaS resources to quotas engine * Remove mock.patch.stop from tests that inherit from BaseTestCase * Reschedule router if new external gateway is on other network * Update ensure()/reconnect() to catch MessagingError * Properly apply column default in migration pool\_monitor\_status * Remove "reuse\_existing" from setup method in dhcp.py * Enable flake8 E711 and E712 checking * Fixes Hyper-V agent security groups disabling * Fixes Hyper-V agent security group ICMP rules * Fix typo in ml2 configuration file * Edge firewall: improve exception handling * Edge driver: Improve exception handling * Fix typo in comment * NSX: Fix KeyError in sync if nsx\_router\_id not found * VMware: log backend port creation in the right place * Revert "Hide ipv6 subnet API attributes" * BigSwitch: Create router ports synchronously * NSX: ensure dhcp port is setup on metadata network * Hide ipv6 subnet API attributes * Set correct columns' length * Enforce required config params for ODL driver * Add L2 Agent side handling for non consistent security\_group settings * BSN: Remove module-level ref to httplib method * BigSwitch: Stop HTTP patch before overriding * Typographical correction of Arista ML2 help * Fix wrong section name "security\_group" in sample config files * Set the log level to debug for loading extensions * Updated from global requirements * set api.extensions logging to ERROR in unit tests * Add common base class for agent functional tests * Remove RPC to plugin when dhcp sets default route * Imported Translations from Transifex * Add missing comma in nsx router mappings migration * OFAgent: Avoid re-wiring ports unnecessarily * BigSwitch: Improves server manager UT coverage * BigSwitch: Don't import portbindings\_db until use * lb-agent: fix get\_interfaces\_on\_bridge returning None * Clean out namespaces even if we don't delete namespaces * Call policy.init() once per API request * ofa\_neutron\_agent: Fix \_phys\_br\_block\_untranslated\_traffic * Don't emit log for missing attribute check policy * Sync service and systemd modules from oslo-incubator * Imported Translations from Transifex * Move bash whitelisting to pep8 testenv * Fix test MAC addresses to be valid * ML2: ODL driver sets port status * Add a note that rpc\_workers option is experimental * Fix Jenkins translation jobs * Redundant SG rule create calls in unit tests * Set ns\_name in RouterInfo as attribute * Replace HTTPSConnection in NEC plugin * ignore build directory for pep8 * Imported Translations from Transifex * Delete routers that are requested but not reported as active * Explicitly import state\_path opt in tests.base * fixes tests using called\_once\_ without assert * Remove invalid copyright headers under API module * update doc string - correct typo * Revert changes removing OVSBridge return * fixes broken neutron-netns-cleanup * Remove duplicated tests for check\_ovs\_vxlan\_version * Permit ICMPv6 RAs only from known routers * Return 409 for second firewall creation * OFA agent: use hexadecimal IP address in tunnel port name * Fixing Arista CLI command * use floatingip's ID as key instead of itself * Use a temp dir for CONF.state\_path * Use os.uname() instead of calling uname in subprocess * Enable hacking H301 check * Stop using portbindings\_db in BSN ML2 driver * NSX: Fix pagination support * Removing vim header lines * Fix function parsing the kernel version * Updated from global requirements 2014.1.rc1 ---------- * Restore NOT NULL constraint lost by earlier migrations * BigSwitch: Semaphore on port status update * Remove last parts of Quantum compatibility shim * Imported Translations from Transifex * Fix quota\_health\_monitor opt name in neutron.conf * Add missing DB migrations for BSN ML2 plugin * Only send notifications on uuid device\_id's * Add Icehouse no-op migration * Add support for https requests on nova metadata * Delete disassociated floating ips on external network deletion * Imported Translations from Transifex * Invoke \_process\_l3\_create within plugin session * Invalid ovs-agent test case - test\_fdb\_add\_flows * Add missing parameters for port creation * Move test\_ovs\_lib to tests/unit/agent/linux * Update BigSwitch Name to its correct name * Cancelling thread start while unit tests running * Delete duplicate external devices in router namespace * Deals with fails in update\_\*\_postcommit ops * ML2 Cisco Nexus MD: Support portchannel interfaces * Changed the message line of RouterInUse class * UT: do not hide an original error in test resource ctxtmgr * BigSwitch: Move attr ref after error check * Fix namespace exist() method * Make dnsmasq aware of all names * Open Juno development * Prevent cross plugging router ports from other tenants * Adds OVS\_HYBRID\_PLUG flag to portbindings * Disable XML tests on Py26 * Subnets should be set as lazy='join' * nec plugin: allow to delete resource with ERROR status * Synced rpc and gettextutils modules from oslo-incubator * Import request\_id middleware bug fix from oslo * Add unit test for add\_vxlan in test\_linux\_ip\_lib * Start using oslosphinx theme for docs * Migrate data from cap\_port\_filter to vif\_details * Imported Translations from Transifex * Include cisco plugin in migration plugins with ovs * ML2 Cisco Nexus MD: Remove workaround for bug 1276395 * Fixed TypeError when creating MlnxException * Replace a usage of the deprecated root\_helper option * Cisco VPN driver correct reporting for admin state chg * Add script to migrate ovs or lb db to ml2 db * Correct OVS VXLAN version check * LBaaS: make device driver decide whether to deploy instance * NSX plugin: return 400 for invalid gw certificate * Imported Translations from Transifex * Remove extra space in help string * Add enable\_security\_group to BigSwitch and OneConvergence ini files * Add nec plugin to allowed address pairs migration * Imported Translations from Transifex * Fix segment allocation tables in Cisco N1kv plugin * Updated from global requirements * NEC plugin: Rename quantum\_id column to neutron\_id * Log received pool.status * NEC plugin: Allow to add prefix to OFC REST URL * NEC plugin: Remove a colon from binding:profile key due to XML problem * rename ACTIVE\_PENDING to ACTIVE\_PENDING\_STATUSES * VPNaaS support for VPN service admin state change and reporting * Use save\_and\_reraise\_exception when reraise exception * Return meaningful error message on pool creation error * Don't set priority when calling mod\_flow * Avoid creating FixedIntervalLoopingCall in agent UT * Imported Translations from Transifex * Big Switch Plugin: No REST port delete on net del * Add enable\_security\_group option * Get rid of additional db contention on fetching VIP * Fix typo in lbaas agent exception message * De-duplicate unit tests for ports in Big Switch * ML2: Remove validate\_port\_binding() and unbind\_port() * Imported Translations from Transifex * Fix duplicate name of NVP LBaaS objs not allowed on vShield Edge * tests/unit: clean up notification driver * Use different name for the same constraint * Add a semaphore to some ML2 operations * Log dnsmasq host file generation * add HEAD sentinel file that contains migration revision * Added config value help text in ns metadata proxy * Fix usage of save\_and\_reraise\_exception * Cisco VPN device driver post-merge cleanup * Fixes the Hyper-V agent individual ports metrics * Sync excutils from oslo * BigSwitch ML2: Include bound\_segment in port * NEC plugin: Honor Retry-After response from OFC * Add update binding:profile with physical\_network * return false or true according to binding result * Enable to select an RPC handling plugin under Metaplugin * Ensure to count firewalls in target tenant * Mock agent RPC for FWaaS tests to delete DB objs * Allow CIDRs with non-zero masked portions * Cisco plugin fails with ParseError no elem found * Cisco Nexus: maximum recursion error in ConnectionContext.\_\_del\_\_ * Don't use root to list namespaces * Fixes Hyper-V agent security groups enable issue * ML2 BigSwitch: Don't modify parent context * Advanced Services documentation * LBaaS: small cleanup in agent device driver interface * Change report\_interval from 4 to 30, agent\_down\_time from 9 to 75 * Stop removing ip allocations on port delete * Imported Translations from Transifex * Ignore PortNotFound exceptions on lockless delete * Show neutron API request body with debug enabled * Add session persistence support for NVP advanced LBaaS * Fix misleading error message about failed dhcp notifications * NSX: Fix router-interface-delete returns 404 when router not in nsx * Fix \_validate\_mac\_address method * BigSwitch: Watchdog thread start after servers * Calculate stateless IPv6 address * Create new IPv6 attributes for Subnets * Remove individual cfg.CONF.resets from tests * BigSwitch: Sync workaround for port del deadlock * NSX: Ensure gateway devices are usable after upgrade * Correctly inherit \_\_table\_args\_\_ from parent class * Process ICMP type for iptables firewall * Imported Translations from Transifex * Added missing l3\_update call in update\_network * ML2 plugin involves in agent\_scheduler migration * Imported Translations from Transifex * Avoid long transaction in plugin.delete\_ports() * cisco: Do not change supported\_extension\_aliases directly * Fix KeyError except on router\_info in FW Agent * NSX: remove last of unneed quantum references * NSX: fix intermetting UT failure on vshield test\_router\_create * Bugfix and refactoring for ovs\_lib flow methods * Send fdb remove message when a port is migrated * Imported Translations from Transifex * Send network-changed notifications to nova * Notify nova when ports are ready * Skip radware failing test for now * NSX: Propagate name updates for security profiles * Fix in admin\_state\_up check function * NSX: lower the severity of messages about VIF's on external networks * Kill 'Skipping unknown group key: firewall\_driver' log trace * Imported Translations from Transifex * API layer documentation * BigSwitch: Use eventlet.sleep in watchdog * Embrane LBaaS Driver * BigSwitch: Widen range of HTTPExceptions caught * Fix ml2 & nec plugins for allowedaddresspairs tests * Fix unittest failure in radware lbaas driver * Removes calls to mock.patch.stopall in unit tests * Stop mock patches by default in base test class * Query for port before calling l3plugin.disassociate\_floatingips() * Optimize floating IP status update * NSX: Allow multiple references to same gw device * VPNaaS Device Driver for Cisco CSR * Updated from global requirements * BigSwitch: Fix certificate file helper functions * Create agents table when ML2 core\_plugin is used * Fix usage of sqlalchemy type Integer * Fixing lost vlan ids on interfaces * Fix bug:range() is not same in py3.x and py2.x * Call target plugin out of DB transaction in the Metaplugin * NSX: Sync do not pass around model object * NSX: Make replication mode configurable * Updated from global requirements * Fix ml2 db migration of subnetroutes table * Imported Translations from Transifex * After bulk create send DHCP notification * Fix lack of extended port's attributes in Metaplugin * Add missing ondelete option to Cisco N1kv tables * Migration support for Mellanox Neutron plugin * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add support for tenant-provided NSX gateways devices * NSX: fix nonsensical log trace on update port * BigSwitch: Fix rest call in consistency watchdog * BigSwitch: Fix cfg.Error format in exception * BigSwitch: Fix error for server config check * Fixed Spelling error in Readme * Adds state reporting to SDN-VE agent * Fix unittest failure in radware lbaas driver * Log configuration values for OFA agent * NSX: Add ability to retry on 503's returned by the controller * Cisco Neutron plugin fails DB migration * Floatingip\_status migration not including Embrane's plugin * One Convergence Neutron Plugin l3 ext support * Nuage plugin was missed in floatingip\_status db migration script * ML2 Cisco Nexus MD: VM migration support * Drop old nvp extension file * Makes the Extension loader behavior predictable * One Convergence Neutron Plugin Implementation * NEC plugin: delete old OFC ID mapping tables * Imported Translations from Transifex * Fix typo in migration script * Enhance GET networks performance of metaplugin * Adds the missing migration for gw\_ext\_mode * BigSwitch: Add SSL Certificate Validation * BigSwitch: Auto re-sync on backend inconsistencies * VPNaaS Service Driver for Cisco CSR 2014.1.b3 --------- * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Replaces network:\* strings by constants * Check vxlan enablement via modinfo * Do fip\_status migration only for l3-capable plugins * Fix race condition in update\_floatingip\_statuses * Implementaion of Mechanism driver for Brocade VDX cluster of switches * NSX: passing wrong security\_group id mapping to nsx backend * Avoid unnecessarily checking the existence of a device * Refactor netns.execute so that it is not necessary to check namespace * Minor refactoring for Hyper-V utils and tests * Adds Hyper-V Security Groups implementation * Rename migration lb\_stats\_needs\_bigint to match revision number * Imported Translations from Transifex * NVP LBaaS: check for association before deleting health monitor * Different class names for VPNaaS migrations * ML2: database needs to be initalized after drivers loaded * replace rest of q\_exc to n\_exc in code base * Adds multiple RPC worker processes to neutron server * NEC plugin: PFC packet fitler support * Fix NVP/Nicira nits * Remove unused method update\_fixed\_ip\_lease\_expiration * NSX: nicira\_models should import model\_base directly * NSX: make sync backend run more often * Embrane Plugin fails alembic migrations * Implement Mellanox ML2 MechanismDriver * Use database session from the context in N1kv plugin * Delete subnet fails if assoc port has IPs from another subnet * Remove nvplib and move utility methods into nsxlib * BigSwitch: Add address pair support to plugin * Remove unused 'as e' in exception blocks * Remove vim line from db migartion template * Imported Translations from Transifex * Support advanced NVP IPsec VPN Service * Improves Arista's ML2 driver's sync performance * Fix NVP FWaaS errors when creating firewall without policy * Remove call to addCleanup(cfg.CONF.reset) * nec plugin: Avoid long transaction in delete\_ports * Avoid using "raise" to reraise with modified exception * Imported Translations from Transifex * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * Log configuration values for OVS agent * BigSwitch: Asynchronous rest calls for port create * Introduce status for floating IPs * BigSwitch: Add agent to support neutron sec groups * N1kv: Fixes fields argument not None * Adds the new IBM SDN-VE plugin * Imported Translations from Transifex * Nuage Networks Plugin * Fixes spelling error Closes-Bug: #1284257 * Openvswitch update\_port should return updated port info * Updated from global requirements * Remove unused variable * Change firewall to DOWN when admin state down * ovs-agent: use hexadecimal IP address in tunnel port name * NSX: add missing space 'routeron' * Imported Translations from Transifex * Fix DetachedInstanceError for Agent instance * Update License Headers to replace Nicira with VMware * Renaming plugin-specific exceptions to match NSX * Imported Translations from Transifex * DB Mappings for NSX security groups * NSX: port status must reflect fabric, not link status * Typo/grammar fixes for the example neutron config file * NSX: Pass NSX uuid when plugging l2 gw attachment * stats table needs columns to be bigint * Remove import extension dep from db migration * Fix get\_vif\_port\_by\_id to only return relevant ports * Developer documentation * Fix NSX migration path * ML2 mechanism driver access to binding details * Add user-supplied arguments in log\_handler * Imported Translations from Transifex * NSX: Fix newly created port's status should be DOWN * BigSwitch: Stop using external locks * Rename/refactoring of NVP api client to NSX * Remove pyudev dependency * Rename DB models and related resources for VMware NSX plugin * Lower log level of errors due to user requests to INFO * Include proper Content-Type in the HTTP response headers * LBaaS: check for associations before deleting health monitor * l2-population/lb/vxlan : ip neigh add command failed * l2-population : send flooding entries when the last port goes down * tests/service: consolidate setUp/tearDown logic * Ensure ovsdb-client is stopped when OVS agent dies * NSX: Fix status sync with correct mappings * Support Port Binding Extension in Cisco N1kv plugin * change Openstack to OpenStack in neutron * ML2 binding:profile port attribute * Rename/remove Nicira NVP references from VMware NSX unit tests * Fix webob.exc.HTTPForbidden parameter miss * Sync oslo cache with oslo-incubator * Change tenant network type usage for IB Fabric * options: consolidate options definitions * Replace binding:capabilities with binding:vif\_details * Make sure dnsmasq can distinguish IPv6 address from MAC address * Rename Neutron core/service plugins for VMware NSX * Make metaplugin be used with a router service plugin * Fix wrap target in iptables\_manager * BigSwitch: Fix tenant\_id for shared net requests * BigSwitch: Use backend floating IP endpoint * Updated from global requirements * Imported Translations from Transifex * Raise max header size to accommodate large tokens * NSX: get\_port\_status passed wrong id for network * Imported Translations from Transifex * Reset API naming scheme for VMware NSX plugin * remove pointless test TestN1kvNonDbTest * Rename Security Groups related methods for VMware NSX plugin * Rename L2 Switch/Gateway related methods for VMware NSX plugin * Rename Router related methods for VMware NSX plugin * Plugins should call \_\_init\_\_ of db\_base\_plugin for db.configure * Fixes Tempest XML test failures for Cisco N1kv plugin * Fixes broken documentation hyperlinks * Use "!=" instead of "is not" when comparing two values * ML2/vxlan/test: remove unnecessary self.addCleanup(cfg.CONF.reset) * Fix test\_db\_plugin.test\_delete\_port * Handle racing condition in OFC port deletion * Imported Translations from Transifex * Adds https support for metadata agent * Fix VPN agent does not handle multiple connections per vpn service * Don't require passing in port\_security=False if security\_groups present * wsgi.run\_server no longer used * Use different context for each API request in unit tests * Sync minimum requirements * Implements an LBaaS driver for NetScaler devices * vshield task manager: abort tasks in stop() on termination * Copy cache package from oslo-incubator * BigSwitch: Move config and REST to diff modules * Implements provider network support in PLUMgrid plugin * Should specify expect\_errors=False for success response * Fix unshortened IPv6 address caused DHCP crash * Add support to request vnic type on port * tests/unit: Initialize core plugin in TestL3GwModeMixin * Revert "Skip a test for nicira service plugin" * Improve unit test coverage for Cisco plugin model code * Imported Translations from Transifex * Fix class name typo in test\_db\_rpc\_base * Embrane Tempest Compliance * ipt\_mgr.ipv6 written in the wrong ipt\_mgr.ipv4 * Update help message of flag 'enable\_isolated\_metadata' * Imported Translations from Transifex * Fix invalid facilities documented in rootwrap.conf * Reset the policy after loading extensions * Fix typo in service\_drivers.ipsec * Validate rule uuids provided for update\_policy * Add update from agent to plugin on device up * Remove dependent module py3kcompat * Delete duplicate internal devices in router namespace * Use six.StringIO/BytesIO instead of StringIO.StringIO * Parse JSON in ovs\_lib.get\_vif\_port\_by\_id * Imported Translations from Transifex * Skip a test for nicira service plugin * Remove DEBUG:....nsx\_cluster:Attribute is empty or null * Fix request timeout errors during calls to NSX controller * remove unused imports * L3 agent fetches the external network id once * Avoid processing ports which are not yet ready * Ensure that session is rolled back on bulk creates * Add DB mappings with NSX logical routers * Use save\_and\_reraise\_exception when reraise exception * nec plugin: Compare OFS datapath\_id as hex int * Use six.moves.urllib.parse instead of urlparse * Rename Queue related methods for VMware NSX plugin * Lowercase OVS sample config section headers * Add DB mappings with NSX logical switches * NSX: Fix possible deadlock in sync code * Raise an error from ovs\_lib list operations * Add additional unit tests for the ML2 plugin * Fix ValueError in ip\_lib.IpRouteCommand.get\_gateway() * Imported Translations from Transifex * Fix log-related tracebacks in nsx plugin * add router\_id to response for CRU on fw/vip objs * Move db migration of ml2 security groups to havana * Sync latest oslo.db code into neutron * Add support for router scheduling in Cisco N1kv Plugin * Imported Translations from Transifex * Add migration support from agent to NSX dhcp/metadata services * Validate multicast ip range in Cisco N1kv Plugin * NSX plugin: fix floatingip re-association * Re-enable lazy translation * Do not append to messages with + * Remove psutil dependency * Remove legacy quantum config path * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Fix binding:host\_id is set to None when port update * Return request-id in API response * Skip extra logging when devices is empty * Add extraroute\_db support for Cisco N1kv Plugin * Improve handling of security group updates * ML2 plugin cannot raise NoResultFound exception * Fix typo in rootwrap files: neuton -> neutron * Imported Translations from Transifex * Prepare for multiple cisco ML2 mech drivers * ML2 Cisco Nexus MD: Create pre/post DB event handlers * Support building wheels (PEP-427) * NVP plugin:fix delete sec group when backend is out of sync * Use oslo.rootwrap library instead of local copy * Fix misspellings in neutron * Remove unnecessary call to get\_dhcp\_port from DeviceManager * Refactor to remove \_recycle\_ip * Allow multiple DNS forwarders for dnsmasq * Fix passing keystone token to neutronclient instance * Don't document non-existing flag '--hide-elapsed' * Fix race condition in network scheduling to dhcp agent * add quota support for ryu plugin * Imported Translations from Transifex * Enables BigSwitch/Restproxy ML2 VLAN driver * Add and update subnet properties in Cisco N1kv plugin * Fix error message typo * Configure floating IPs addresses after NAT rules * Add an explicit tox job for functional tests * improve UT coverage for nicira\_db operations * Avoid re-wiring ports unnecessarily * Process port\_update notifications in the main agent loop * Base ML2 bulk support on the loaded drivers * Imported Translations from Transifex * Removes an incorrect and unnecessary return * Reassign IP to vlan interface when deleting a VLAN bridge * Imported Translations from Transifex * Change metadata-agent to have a configurable backlog * Sync with commit-id: 9d529dd324d234d7aeaa3e6b4d3ab961f177e2ed * Remove unused RPC calls from n1kv plugin code * Change metadata-agent to spawn multiple workers * Extending quota support for neutron LBaaS entities * Tweak version nvp/nsx version validation logic for router operations * Simplify ip allocation/recycling to relieve db pressure * Remove unused code * Reduce severity of log messages in validation methods * Disallow non-admin users update net's shared attribute * Fix error while connecting to busy NSX L2 Gateway * Remove extra network scheduling from vmware nsx plugin * L3 Agent restart causes network outage * Remove garbage in vim header * Enable hacking H233 rule * Rename nvp\_cluster for VMware NSX plugin * Minimize the cost of checking for api worker exit * Remove and recreate interface if already exists 2014.1.b2 --------- * Use an independent iptables lock per namespace * Report proper error message in PLUMgrid Plugin * Fix interprocess locks for run\_tests.sh * Clean up ML2 Manager * Expunge session contents between plugin requests * Remove release\_lease from the DHCP driver interface * VMware NSX: add sanity checks for NSX cluster backend * Update RPC code from oslo * Fix the migration adding a UC to agents table * Configure plugins by name * Fix negative unit test for sec group rules * NVP: Add LOG.exception to see why router was not created * Add binding:host\_id when creating port for probe * Fix race condition in delete\_port method. Fix update\_port method * Use information from the dnsmasq hosts file to call dhcp\_release * Fix pip install failure due to missing nvp.ini file * Imported Translations from Transifex * Imported Translations from Transifex * Make timeout for ovs-vsctl configurable * Remove extra whitespace * Fix extension description and remove unused exception * Fix mistake in usage drop\_constraint parameters * Fix race condition on ml2 delete and update port methods * Fix Migration 50e86cb2637a and 38335592a0dc * L3 Agent can handle many external networks * Update lockutils and fixture in openstack.common * Add test to port\_security to test with security\_groups * LBaaS: handle NotFound exceptions in update\_status callback * VMware NSX: Fix db integrity error on dhcp port operations * Use base.BaseTestCase in NVP config test * Remove plugin\_name\_v2 and extension\_manager in test\_config * Enables quota extension on BigSwitch plugin * Add security groups tables for ML2 plugin via migration * Rename nicira configuration elements to match new naming structure * Fix race in get\_network(s) in OVS plugin * Imported Translations from Transifex * Fix empty network deletion in db\_base\_plugin for postgresql * Remove unused imports * nicira: fix db integrity error during port deletion * Rename check\_nvp\_config utility tool * Remove redundant codes * Remove dupl. for get\_resources in adv. services * Start of new developer documentation * Fix NoSuchOptError in lbaas agent test * Corrects broken format strings in check\_i18n.py * [ML2] l2-pop MD handle multi create/delete ports * Dnsmasq uses all agent IPs as nameservers * Imported Translations from Transifex * BigSwitch: Fixes floating IP backend updates * neutron-rootwrap-xen-dom0 handles data from stdin * Remove FWaaS Noop driver as default and move to unit tests dir * Send DHCP notifications regardless of agent status * Mock looping\_call in metadata agent tests * Imported Translations from Transifex * Change default eswitchd port to avoid conflict * Midonet plugin: Fix source NAT * Add support for NSX/NVP Metadata services * Update the descriptions for the log cfg opts * Add VXLAN example to ovs\_neutron\_plugin.ini * Imported Translations from Transifex * ml2/type\_gre: Adds missing clear\_db to test\_type\_gre.py * ml2: gre, vxlan type driver can leak segment\_id * NVP: propagate net-gw update to backend * Imported Translations from Transifex * Nicira: Fix core\_plugin path and update default values in README * Include lswitch id in NSX plugin port mappings * Imported Translations from Transifex * Revert "move rpc\_setup to the last step of \_\_init\_\_" * extra\_dhcp\_opt add checks for empty strings * LBaaS: synchronize haproxy deploy/undeploy\_instance methods * NVP plugin: Do backend router delete out from db transaction * NVP plugin: Avoid timeouts if creating routers in parallel * Updates tox.ini to use new features * LBaaS: fix handling pending create/update members and health monitors * Add X-Tenant-ID to metadata request * Do not trigger agent notification if bindings do not change * fix --excluded of meter-label-rule-create is not working * move rpc\_setup to the last step of \_\_init\_\_ * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Update common network type consts to same origin * Remove start index 0 in range() * LBaaS: unify haproxy-on-host plugin driver and agent * change variable name from plugin into agent * Imported Translations from Transifex * Add post-mortem debug option for tests * validate if the router has external gateway interface set * Remove root\_helper config from plugin ini * Fix a race condition in agents status update code * Add LeastRouters Scheduler to Neutron L3 Agent * Imported Translations from Transifex * Imported Translations from Transifex * Remove dead code \_arp\_spoofing\_rule() * Add fwaas\_driver.ini to setup.cfg * Switch to using spawn to properly treat errors during sync\_state * Fix a typo in log exception in the metering agent * Sync rpc fix from oslo-incubator * Do not concatenate localized strings * Imported Translations from Transifex * Removed erronus config file comment * Fix str2dict and dict2str's incorrect behavior * Improve unit test coverage for Cisco plugin common code * Change to improve dhcp-agent sync\_state * Fix downgrade in migration * Sync dhcp\_agent.ini with the codes * Imported Translations from Transifex * Handle failures on update\_dhcp\_port * Handle exceptions on create\_dhcp\_port 2014.1.b1 --------- * Imported Translations from Transifex * Add vpnaas and debug filters to setup.cfg * Fix misspells * Fix bad call in port\_update in linuxbridge agent * atomically setup ovs ports * Adds id in update\_floatingip API in PLUMgrid plugin driver * Sync Log Levels from OSLO * update error msg for invalid state to update vpn resources * Add missing quota flags in the config file sample * Imported Translations from Transifex * Fix unable to add allow all IPv4/6 security group rule * Add request timeout handling for Mellanox Neutron Agent * Revert "ML2 plugin should not delete ports on subnet deletion" * Improve OVS agent logging for profiling * l3\_agent: make process\_router more robust * Fixes missing method in Hyper-V Utils (Metering) * Fix metering iptables driver doesn't read root\_helper param * Updates .gitignore * Stop logging unnecessary warning on context create * Avoid loading policy when processing rpc requests * Improve unit test coverage for Cisco plugin base code * Pass in certain ICMPv6 types by default * Ensure NVP API connection port is always an integer * Mocking ryu plugin notifier in ryu plugin test * Rebind security groups only when they're updated * Fix format errors seen in rpc logging * Add test\_handle\_router\_snat\_rules\_add\_rules * Rebind allowed address pairs only if they changed * Enforce unique constraint on neutron pool members * Send only one agent notification on port update * Fix showing nonexistent NetworkGateway throws 500 instead of 404 * Imported Translations from Transifex * Update Zhenguo Niu's mailmap * Improve unit test coverage for Cisco plugin nexus code * Preserve floating ips when initializing l3 gateway interface * Fwaas can't run in operating system without namespace feature * Imported Translations from Transifex * metaplugin: use correct parameter to call neutron client * Replace stubout with fixtures * Imported Translations from Transifex * Imported Translations from Transifex * Mock the udevadm in the TunnelTestWithMTU test * Avoid dhcp agent race condition on subnet and network delete * Sync openstack.common.local from oslo * Imported Translations from Transifex * ML2 plugin should not delete ports on subnet deletion * Add state reporting to the metadata agent * Move MidonetInterfaceDriver and use mm-ctl * Do not add DHCP info to subnet if DHCP is disabled * Handle IPAddressGenerationFailure during get\_dhcp\_port * Add request-id to log messages * Imported Translations from Transifex * Enable polling minimization * Add configurable ovsdb monitor respawn interval * Ensure get\_pid\_to\_kill works with rootwrap script * Adds tests, fixes Radware LBaaS driver as a result * Optionally delete namespaces when they are no longer needed * Call \_destroy\_metadata\_proxy from \_destroy\_router\_namespaces * Added check on plugin.supported\_extension\_aliases * Cisco nexus plugin fails to untrunk vlan if other hosts using vlan * Catch PortNotFound exception during get\_dhcp\_port * Reduce the severity of dhcp related log traces * MidoNet: Added support for the admin\_state\_up flag * Fix OVS agent reclaims local VLAN * Replace mox in unit tests with mock * LBaaS: fix reported binary name of a loadbalancer agent * Apply six for metaclass * NVP plugin:fix connectivity to fip from internal nw * Imported Translations from Transifex * Add support for NSX/NVP DHCP services * Fix downgrade in migration * Imported Translations from Transifex * Add log statements for policy check failures * Lower severity of log trace for DB integrity error * Adds delete of a extra\_dhcp\_opt on a port * Round-robin SVI switch selection fails on Cisco Nexus plugin * Tune up report and downtime intervals for l2 agent * Fix DB integrity issues when using postgres * Move Loadbalancer Noop driver to the unit tests * Removes unused nvp plugin config param * Midonet to support port association at floating IP creation * Arista ML2 mech driver cleanup and integration with portbindings * Fix MeteringLabel model to not clear router's tenant id on deletion * Fix downgrade in migration * Fix sqlalchemy DateTime type usage * Linux device name can have '@' or ':' characters * Remove the warning for Scheduling Network * Do not run "ovs-ofctl add-flow" with an invalid in\_port * Replace a non-existing exception * Fix random unit-test failure for NVP advanced plugin * Updated from global requirements * Cleanup HACKING.rst * Remove confusing comment and code for LBaaS * Don't shadow str * ExtraRoute: fix \_get\_extra\_routes\_by\_router\_id() * remove repeated network type definition in cisco plugin * Refactor configuring of floating ips on a router * Remove database section from plugin.ini * Fix import log\_handler error with publish\_errors set * DHCP agent scheduler support for BigSwitch plugin * Fix segment range in N1KV test to remove overlap * Fix query error on dhcp release port for postgresql * sync log from oslo * Imported Translations from Transifex * Use correct device\_manager member in dhcp driver * LBaaS UT: use constants vs magic numbers for http error codes * Modified configuration group name to lowercase * Avoid dhcp agent race condition on subnet and network delete * Ensure OVS plugin is loaded in OVS plugin test * Remove deprecated fields in keystone auth middleware * Fix error while creating l2 gateway services in nvp * Fix update\_device\_up method of linuxbridge plugin * LBaaS: Fix incorrect pool status change * Imported Translations from Transifex * NVP: Correct NVP router port mac to match neutron * Updated from global requirements * Removing workflows from the Radware driver code * LBaaS: when returning VIP include session\_persistence even if None * Imported Translations from Transifex * change assertEquals to assertEqual * Fix TypeError: kill doesn't make sense * Update latest OSLO * Revert back to 'call' for agent reports * Imported Translations from Transifex * Imported Translations from Transifex * Fixing the syntax error in the XML Serializer * Raise VipExists exception in case Vip is created or updated for a pool that already has a Vip * Imported Translations from Transifex * NVP metadata access - create elevated context once * Fix race condition in dhcp agent * adding parameter to configure QueuePool in SQLAlchemy * Fix issues with db pooling * use the fact that empty sequences are false * Ensure that lockfile are defined in a common place * Imported Translations from Transifex * Fix typo in policy.json and checks in nicira plugin * Fix DB query returning ready devices in LoadBalancerCallbacks * Imported Translations from Transifex * Load all the necessary database tables when running cisco plugin * Fix haproxy cfg unit test * fix mis-placed paren in log statement for l3-scheduler * Imported Translations from Transifex * Add bulking support for Cisco plugin * Validate protocol when creating VIP * Allow tests in TestDhcpAgentEventHandler run independently * Add scheduling support for the Brocade plugin * Imported Translations from Transifex * Synchronize QuantumManager.get\_instance() method * Imported Translations from Transifex * Imported Translations from Transifex * Pin SQLAlchemy to 0.7.x * Improve test coverage for quantum wsgi module * Adds delete-orphan to database deletion * Imported Translations from Transifex * Do not disable propagate on root logger * NVP metadata access - create elevated context once * Registers root\_helper option for test\_iptables\_firewall * Resolves ryu plugin unittest errors * Set fake rpc implementation in test\_lb\_quantum\_agent * Ensure DB pooling code works with newer eventlet versions * Imported Translations from Transifex * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * sync Oslo Grizzly stable branch with Quantum * First havana commit * Ensure port get works when NVP mapping not stored in Quantum DB * remove references to netstack in setup.py * Imported Translations from Transifex * port\_security migration does not migrate data * Adds Grizzly migration revision * Switch to final 1.1.0 oslo.config release * Fix detection of deleted networks in DHCP agent * Add l3 db migration for plugins which did not support in folsom * Updates latest OSLO changes * Set fake rpc backend impl for TestLinuxBridgeAgent * Imported Translations from Transifex * Update oslo rpc libraries * Sets default MySql engine to InnoDB * Solve branch in migration path * Fixes Hyper-V agent issue with mixed network types * Imported Translations from Transifex * missing - in --config-file * Fix typo * Log the configuration options for metadata-proxy and agent * Imported Translations from Transifex * NVP plugin: return 409 if wrong router interface info on remove * Imported Translations from Transifex * Ensure metadata access network does not prevent router deletion * Filter out router ports without IPs when gathering router sync data * Do not delete subnets with IPs on router interfaces * Update to Quantum Client 2.2.0 * Add explicit egress rules to nvp security profile * Update tox.ini to support RHEL 6.x * Fix exception typo * Disable secgroup extension when Noop Firewall driver is used * Wrap quota controller with resource.Resource * Allow probe-create to specify device\_owner * Enable handling the report\_state RPC call in Brocade Plugin * Imported Translations from Transifex * Create quantum client for each api request in metadata agent * Lock tables for update on allocation/deletion * NVP plugin: configure metadata network only if overlapping IPs are enabled * Show default configuration Quotas * add ns-metadata-proxy rootwrap filters to dhcp.filters * isolated network metadata does not work with nvp plugin * Imported Translations from Transifex * Load quota resources dynamically * Notify creation or deletion of dhcp port for security group * fix mis-matched kwargs for a few calls to NvpPluginException * Populate default explicit allow rules for egress * Switch to oslo.config * Moved the configuration variables * Make run\_tests.sh pep8 conf match tox * Fix syntax error in credential.py and missing \_\_init\_\_.py * Imported Translations from Transifex * Add common test base class to hold common things * fix incorrect pathname * Prevent DoS through XML entity expansion * Delete DATABASE option checkup testcases * Fixes linuxbridge agent downs with tap device deletion timing issue * Rename source\_(group\_id/ip\_prefix) to remote\_(group\_id/ip\_prefix) * Imported Translations from Transifex * Setup device alias by device flavor information * L3 port delete prevention: do not raise if no IP on port * Pin pep8 to 1.3.3 * Avoid sending names longer than 40 character to NVP * move cisco-specific extensions to Cisco extensions directory * Add UT for LBaaS HAProxy driver * Include health monitors expected codes upper bound into HAProxy config * Allow DHCP and L3 agents to choose if they should report state * Imported Translations from Transifex * Enable HA proxy to work with fedora * Prevent exception with VIP deletion * Change the default l3\_agent\_manager to L3NATAgent * Imported Translations from Transifex * NEC plugin support for dhcp network and router scheduling * enable linuxbridge for agent scheduler * Move network schedule to first port creation * Imported Translations from Transifex * Host route to metadata server with Bigswitch/Floodlight Plugin * Incorrect argument in calling post\_json * fix update\_port to get tenant\_id from db rather than request * Ensure max length of iptables chain name w/o prefix is up to 11 chars * Cisco plugin support for creating ports without instances * mock quantum.agent.common.config.setup\_logging * Imported Translations from Transifex * Add initial testr support * Replace direct tempfile usage with a fixture * Set fake rpc implementation in metaplugin test configuration * Enabled add gateway to refrain from checking exit code * Add stats reporting to HAProxy namespace driver * Add session persistence support to LBaaS HAProxy driver * Remove deprecated assertEquals alias * LBaaS Agent Reference Implementation * Imported Translations from Transifex * create a Quantum port to reserve VIP address * NVP plugin support for dhcp network scheduling * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * NVP Router: Do no perfom SNAT on E-W traffic * Enable multiple L3 GW services on NVP plugin * Fix retrieval of shared networks * Imported Translations from Transifex * Remove network type validation from provider networks extension * Fix NVP plugin not notifying metadata access network to DHCP agent * Limit amount of fixed ips per port * Fetch all pages when listing NVP Nat Rules * Unpin PasteDeploy dependency version * Make sure all db accesses use subtransaction * Use testtools instead of unittest or unittest2 * Port update with existing ip\_address only causes exception * Enables packetfilter ext in NEC plugin based on its driver config * Set default api\_extensions\_path for NEC plugin * Fixes import reorder nits * Imported Translations from Transifex * Latest common updates * Limit chain name to 28 characters * Add midonet to setup.py * Add password secret to brocade plugin * Use db model hook to filter external network * Add default state\_path to quantum.conf * Imported Translations from Transifex * Imported Translations from Transifex * refactor LoadBalancerPluginDbTestCase setUp() * Imported Translations from Transifex * Remove external\_id and security group proxy code * Add pagination parameters for extension extraroute * Imported Translations from Transifex * Provide a default api\_extensions\_path for nvp\_plugin * AttributeError: No such RPC function 'report\_state' * Add pagination support for xml * Sync latest install\_venv\_common.py with olso * Imported Translations from Transifex * Add check-nvp-config utility * Close file descriptors when executing sub-processes * Add support Quantum Security Groups for Ryu plugin * Resolve branches in db migration scripts to G-3 release * Add Quantum support for NVP Layer-2 gateways * Implement MidoNet Quantum Plugin * Routing table configuration support on L3 * Correct permissions on quantum-hyperv-agent * Raising error if invalid attribute passed in * Support Port Binding Extension in BigSwitch plugin * Exit if DHCP agent interface\_driver is not defined * Supporting pagination in api v2.0 * Update latest OSLO files * Modify dhcp agent for agent management extension * Imported Translations from Transifex * Metadata support for NVP plugin * Add routed-service-insertion * plugin/nec: Make sure resources on OFC is globally unique * Fix SG interface to reflect the reality * Add unit test for ryu-agent * Agent management extension * Need to pass port['port'] to \_get\_tenant\_id\_for\_create() * Improve error handling when nvp and quantum are out of sync * Decouple helper functions from L3NatDBTestCase * Imported Translations from Transifex * Add Migration for nvp-qos extension * Use oslo-config-2013.1b3 * Shorten the DHCP default resync\_interval * Add nvp qos extension * Imported Translations from Transifex * Unable to update port as non-admin nvp plugin * Update nvplib to use HTTP constants * Rename admin\_status\_up to admin\_state\_up * Fixed the typo of loadbalancer test case * Allow nicira plugin to handle multiple NVP API versions * Imported Translations from Transifex * L3 API support for BigSwitch-FloodLight Plugin * Add an update option to run\_tests.sh * Avoid extra query when overlapping IPs are disabled * Allow tests from test\_dhcp\_agent run independently * Imported Translations from Transifex * Mark password config options with secret * Adds Brocade Plugin implementation * Add support for extended attributes for extension resources * Imported Translations from Transifex * Support iptables-based security group in NEC plugin * Persist updated expiration time * Support advanced validation of dictionaries in the API * Synchronize code from oslo * Add check for subnet update with conflict gateway and allocation\_pools * Alembic migration script for Loadbalancing service * Fix NVP L3 gateway ports admin\_state\_down on creation * Remove cfg option default value and check if missing * Remove duplicated option state\_path from netns cleanup * only destroy single namespace if router\_id is set * Use AssertEqual instead of AssertTrue * Imported Translations from Transifex * Move auth\_token configurations to quantum.conf * L3 API support for nicira plugin * Unused methods in quantum.wsgi clean up * Add firewall\_driver option to linuxbridge\_conf.ini * Adds API parameters to quantum.api.extension.ResourceExtension * fix grammar in NetworkInUse exception * Imported Translations from Transifex * PLUMgrid quantum plugin * Implements quantum security groups support on OVS plugin * Sync latest cfg from oslo-incubator * Improvements to API validation logic * Imported Translations from Transifex * add non-routed subnet metadata support * Imported Translations from Transifex * Enable OVS and NETNS utilities to perform logging * Add unit tests for Open vSwitch Quantum plugin * Add NVP Security group support * Fix import error in ryu-agent * Imported Translations from Transifex * Bad translation from network types to nvp transport types * Update .coveragerc * Register root\_helper in test\_debug\_commands and test\_dhcp\_agent * Adds xml support for quantum v2 API * Allow tools/install\_venv\_common.py to be run from within the source directory * Cisco plugin cleanup follow up commit * Be smarter when figuring out broadcast address * Use policy\_file parameter in quantum.policy * Imported Translations from Transifex * Define root\_helper variable under the [AGENT] section * Fixes rest of "not in" usage * Updated to latest oslo-version code * Imported Translations from Transifex * Imported Translations from Transifex * Imported Translations from Transifex * Resetting session persisnence for a VIP * Improve data access method of ryu-agent * Fixes 'not in' operator usage * Imported Translations from Transifex * Adds support of TCP protocol for LBaaS VIPs * Sync latest cfg from oslo-incubator * Remove redunant key list generation in Cisco plugin * Fixes if statement inefficiency in quantum.agent.linux.interface * Imported Translations from Transifex * Postgresql ENUM type requires a name exceptions NVP Plugin * correct spelling of Notify in classname * Disable dhcp\_domain distribution when dhcp\_domain is empty * Make protocol and ethertype case insensitive for security groups * Fix branch in db migration scripts * Finish adding help strings to all config options in Quantum code * Add NVP port security implementation * Imported Translations from Transifex * Set default lock\_path in state\_path * Use install\_venv\_common.py from oslo * Make get\_security\_groups() return security group rules * Fix OVSQuantumAgent.port\_update if not admin\_state\_up * Clean up test\_extensions.py imports * Fixes import order errors * OVS cleanup utility removes veth pairs * Revert "Reqd. core\_plugin for plugin agents & show cfg opts loaded." * Reqd. core\_plugin for plugin agents & show cfg opts loaded * Ensure that correct root helper is used * Fix InvalidContentType can't be raised because of error in constructor * OVS: update status according to admin\_state\_up * Cisco plugin cleanup * Improving code reuse with loadbalancer entity deletion * Fix database reconnection * Fixes per tenant quota doesn't work * Adds port security api extension and base class * LinuxBridge: set port status as 'DOWN' on creation * LinuxBridge: update status according to admin\_state\_up * Use babel to generate translation file * LBaaS plugin returns unnecessary information for PING and TCP health monitors * Fix all extension contract classes inherit from extensions.ExtensionDescriptor * get\_security\_group() now returns rules * set allocation\_pool\_id nullable=False * make IPv6 unit test work on systems with eth0 * Support Port Binding Extension in NEC plugin * Enable NEC OpenFlow plugin to use per-tenant quota * Enhance wsgi to listen on ipv6 address * Fix i18n messages * Update Oslo rpc * Enforces generic sqlalchemy types in migrations * Remove redudant code * Removes redundant code in quantum.api.api\_common * Fix i18n messages in quantum.api.api\_common * Completes unittest coverage of quantum.api.api\_common * Enable test\_agent\_ovs\_cleanup to be run alone * Fix i18n messages for cisco plugin * Provide atomic database access for ports in linuxbridge plugin * Add help strings to config file options in Quantum code * Document that code is on github now in README * Config lockutils to use a temp path for tests * Fix downgrade revision to make db migration linear * Send notification on router interface create/delete * More unittests for quantum.api.v2.base * Fixes inefficiency in quantum.api.v2.base.\_filters * Refactor hyperv plugin and agent * Update Oslo rpc module * Provide atomic database access nvp plugin * \_validate\_security\_groups\_on\_port was not validating external\_ids * Update WebOb version to >=1.2 * Ensure that agents also set control\_exchange * Add a common test case for Port Binding Extension * Fix line endings from CRLF to LF * Fixes import order nits * Fix ATTR\_NOT\_SPECIFIED comparison errors * Add migration for network bindings in NVP plugin * NEC OpenFlow plugin supports L3 agent RPC * Update latest OSLO * Catch up RPC context fixes on NEC OpenFlow plugin * ensure all enums in loadbalancer models have names * Adding multi switch support to the Cisco Nexus plugin * Name the securitygrouprules.direction enum * Adds support for deploying Quantum on Windows * Adds a Hyper-V Quantum plugin * Add exception validation for subnet used * Remove accessing cfg.CONF.DATABASE in nec-agent * Inform a client if Quantum provides port filtering feature * Remove unsused imports in the plugins package * DHCP agent unable to access port when restarting * Remove unused imports in unit tests * Use default\_notification\_level when notification * Latest OSLO updates * NvpPluginException mixes err\_msg and err\_desc * Fixes i18n messages in nvp plugin * Optimize if/else logic in quantum.api.v2.base.prepare\_request\_body() * Fixes quantum.api.v2.base.\_filters to be more intuitive * Fix for loadbalancer vips list * rename port attribute variable to SECURITYGROUPS from SECURITYGROUP * Remove relative imports from NVP plugin * Port to argparse based cfg * Fix database configuration of ryu-agent * Pass X-Forwarded-For header to Nova * The change implemented Lbaas CRUD Sqlalchemy operations * Iptables security group implementation for LinuxBridge * Update the migration template's default kwargs * add migration support for lb security groups * Fix import for quantum-db-manage * Allow nvp\_api to load balance requests * API extension and DB support for service types * Add migration support to Quantum * Remove some unused imports * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * Add common support for database configuration * Fixup import syntax error in unit test * Enable the user to enforce validity of the gateway IP * Add comment to indicate bridge names' length * refactor QuotaV2 import to match to other exts * change xxx\_metadata\_agent() into xxx\_metadata\_proxy() * Fix the replacement placeholder in string * Ensure that exception prints UUID and not pointer * .gitignore cleanup * Fixes i18n message for nec plugin * Fixes i18n message for ryu plugin * Remove unused imports in debug package * sql\_dbpool\_enabled not passed to configured\_db nvp\_plugin * Enable tenants to set non-owned ext network as router gateway * Upgrade WebOb to 1.2.3 * Logging module cleanup * Remove unused imports in common package * Remove unused imports in rootwrap package * Remove unused imports in db package * Remove unused imports in api package * Provider network implementation for NVP plugin * Remove unused imports in agent package * Set default core\_plugin to None * Ensure that exception prints correct text * Cleans up bulk\_body generation in quantum.api.v2.base.prepare\_request\_body() * Exceptions cleanup * Readjust try/catch block in quantum.api.v2.base.create() * Ensures that the dnsmasq configuration file flag is always set * Ensure allocation pools are deleted from database * Raise InvalidInput directly instead of catch it * Ensure bulk creations have quota validations * Correct exception output for subnet deletion when port is used * Update the configuration help for the OVS cleanup utility * Implementing string representation for model classes * Provide "atomic" database access for networks * Add OVS cleanup utility * Removes redundant code in quantum.api.v2.base.create() * Add eventlet db\_pool use for mysql * Clean up executable modules * Fixes import order nits * Fix log message for unreferenced variable * The patch introduces an API extension for LBaaS service * Fix pep8 issues * Add tox artifacts to .gitignore * Correct i18n messages for bigswitch plugin * dhcp\_agent.ini, l3\_agent.ini: update dhcp/l3\_agent.ini * Make patch-tun and patch-int configurable * Update test\_router\_list to validate the router returned * Fixed the security group port binding should be automatically deleted when delete\_port * Add restproxy.ini to config\_path in setup.py * Replaces assertEquals to assertEqual * Completes coverage of quantum.api.v2.resource * Fixed the unit tests using SQLite do not check foreign keys * dhcp.filters needs ovs\_vsctl permission * Correct i18n message for nicira plugin * Correct i18n message for metaplugin * add parent/sub-resource support into Quantum API framework * plugins/ryu: l3 agent rpc for Ryu plugin is broken * pluins/ryu: Fixes context exception in Ryu plugin * DRY for network() and subnet() in test\_db\_plugin.py * Adds validity checks for ethertype and protocol * Add script for checking i18n message * Update evenlet monkey patch flags * Remove unnecessary port deletion * Support to reset dnsname\_servers and host\_routes to empty * Prevent unnecessary database read by l3 agent * Correct i18n message for linuxbridge plugin * Add router testcases that missing in L3NatDBTestCase * Releasing resources of context manager functions if exceptions occur * Drop duplicated port\_id check in remove\_router\_interface() * Returns more appropriate error when address pool is exhausted * Add VIF binding extensions * Sort router testcases as group for L3NatDBTestCase * Refactor resources listing testcase for test\_db\_plugin.py * l3 agent rpc * Fix rootwrap cfg for src installed metadata proxy * Add metadata\_agent.ini to config\_path in setup.py * add state\_path sample back to l3\_agent.ini file * plugin/ryu: make live-migration work with Ryu plugin * Remove \_\_init\_\_.py from bin/ and tools/ * Removes unused code in quantum.common * Fixes import order nits * update state\_path default to be the same value * Use /usr/bin/ for the metadata proxy in l3.filters * prevent deletion of router interface if it is needed by a floating ip * Completes coverage of quantum.agent.linux.utils * Fixes Rpc related exception in NVP plugin * make the DHCP agent use a unique queue name * Fixes Context exception in BigSwitch/FloodLight Plugin * fix remap of floating-ip within l3-agent polling interval * Completes coverage of quantum.agent.rpc.py * Completes coverage of quantum.agent.netns\_cleanup.py * add metadata proxy support for Quantum Networks * Make signing dir a subdir in /var/lib/quantum * Use openstack.common.logging in NEC OpenFlow plugin * Correct i18n message for api and db module * Fixes update router gateway successful with existed floatingip association * Fixes order of route entries * fix so cisco plugin db model to not override count methods * Use auth\_token middleware in keystoneclient * Fixes pep8 nit * Make sure we can update when there is no gateway port linked to it * Fix syntax error in nvplib * Removes quantum.tests.test\_api\_v2.\_uuid() * Add filters for quantum-debug * Removing unnecessary setUp()/tearDown() in SecurityGroupsTestCase * Fix exception when security group rule already exists * Don't force run\_tests.sh pep8 only to use -N * Correct i18n message for ovs plugin * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Correct i18n message * Removes \_validate\_boolean() * Removes quantum.common.utils.str\_uuid() * Refactors quantum.api.v2.attributes.py * Updates tearDown() to release instance objects * pass static to argv to quantum-debug config parser * Improve openvswitch and linuxbridge agents' parsing of mappings * Move extension.py into quantum/api * Ensure that the expiration time for leased IP is updated correctly * Fix context problem * bug 1057844: improve floating-ip association checks * fix broken logic of only using hasattr to check for get\_x\_counts * Prevent router being deleted if it is used by a floating IP * Updates clear\_db() to unregister models and close session * The change allows loading several service plugins along with core plugin * fix incorrect kwarg param name for region with l3-agent * All egress traffic allowed by default should be implied * Fix unitest test\_router\_list with wrong fake return value * Delete floating port and floatingip in the same transaction * Completes unittest coverage of quantum.api.v2.attributes.py * Use DB count to get resource counts * plugin/ryu, linux/interface: remove ryu specific interface driver * Allow NVP plugin to use per-tenant quota extension * Revert "Put gw\_port into router dict result." * Ensure that deleted gateway IP address is recycled correctly * Ensure that fixed port IP address is in valid allocation range * RESTProxy Plugin for Floodlight and BigSwitch * Ensure that mac address is set to namespace side veth end * plugin/ryu: update for ryu update * plugin/ryu: add tunnel support * Adds tests for attribute.\_validate\_uuid * Adds tests to attribute.convert\_to\_int * Adds tests for attributes.is\_attr\_set * Adds test scripts for \_validate\_string * Adds test scripts for \_validate\_range * Part of the patch set that enables VM's to use libvirts bridge type * Remove qpid configuration variables no longer supported * Removing unsed code for Cisco Quantum Plugin V1 * Add QUANTUM\_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Adds test scripts for \_validate\_values * Clean up quantum.api.v2.validators * Add indication when quantum server started * Import lockutils and fileutils from openstack-common * Update latest openstack-common code * Clean up executable modules * Remove nova code from Quantum Cisco Plugin * Use isinstance for \_validate\_boolean * Fixes convert\_to\_boolean logic * Updated openstack-common setup and version code * Validate L3 inputs * Treat case when pid is None * Fix openssl zombies * Ensure that the anyjson version is correct * Add eventlet\_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Sync latest notifier changes from openstack-common * Update KillFilter to handle 'deleted' exe's * Pep8 fixes for quantum master * Use \_validate\_uuid in quantum.plugins.nec.extensions.packetfilter.py * Use is\_uuid\_like in quantum.extensions.securitygroup.py * Removes regex validation of UUIDs in dhcp\_agent * Use uuidutils.is\_uuid\_like in quantum.extentions.l3 * Implements \_validate\_uuid * Use uuidutils for uuid validation * Drop lxml dependency * Testcase of listing collection shouldn't depend on default order of db query * Add uuidutils module * Log loaded extension messages as INFO not WARNING * db\_base\_plugin\_v2.QuantumDbPluginV2.create\_port clean-up * Clean-up comments in quantum/db/l3\_db.py * Import order clean-up * let metaplugin work with plugin which has not l3 extension support * Ensure that HTTP 400 codes are returned for invalid input * Use openstack common log to do logging * Put gw\_port into router dict result * Add check for cidr overrapping for adding external gateway * Fix unnecessary logging messages during tests * support 'send\_arp\_for\_ha' option in l3\_agent * pin sqlalchemy to 0.7 * Remove unused metaplugin agents * Get subnets of router interfaces with an elevated context * Support external network in probe-create * remove unused modules for linuxbridge/ovs plugin agent * Chmod agent/linux/iptables\_manager.py * Quantum Security Groups API * Make create\_floatingip support transaction * Update policies * Notify about router and floating IP usages * Fix exception when port status is updated with linux bridge plugin * Call iptables without absolute path * Delete the child object via setting the parent's attribute to None * Add unit tests for the ovs quantum agent * Add MTU support to Linux bridge * Correct Intended Audience * Add OpenStack trove classifier for PyPI * use object directly instead of the foreigh key to update master db object * Remove database access from agents * Fix database clear when table does not exist * IP subnet validation fixes * Update default base database to be V2 * Update common * add test for create subnet with default gateway and conflict allocation pool * Logging indicates when service starts and terminates * Ensures port is not created when database exception occurs * Improve unit test times * Add control\_exchange option to common/config.py * Treat invalid namespace call * get\_network in nvp plugin didn't return subnet information * tests/unit/ryu/test\_ryu\_db: db failure * correct nvplib to update device\_id * Update rpc and notifier libs from openstack.common * Add quantum-usage-audit * Fix filters default value in get\_networks * l3\_nat\_agent was renamed to l3\_agent and this was missed * Update vif driver of Ryu plugin * Support for several HA RabbitMQ servers * Correct the error message in the Class NoNetworkAvailable * Fix flag name for l3 agent external network id * clean notification options in quantum.conf * Add log setting options into quantum.conf * Warn about use of overlapping ips in config file * Do global CIDR check if overlapping IPs disabled * Fix rootwrap filter for dnsmasq when no namespace is used * Add common popen support to the cisco plugin * Use sqlite db on file for unit tests * Uses a common subprocess popen function * remove default value of local\_ip in OVS agent * Remove a function that is not used * all rootwrap filter for 'route', used by l3-agent * l3-agent: move check if ext-net bridge exists within daemon loop * Add catch-call try/catch within rpc\_loop in ovs plugin agent * Fix OVS and LB plugins' VLAN allocation table synchronization * ZMQ fixes for Quantum from openstack-common * Restore SIGPIPE default action for subprocesses * Fix for flat network creation in Cisco plugin * Removes test desription that is no longer valid * Modified code Pyflakes warning * Fix deadlock of Metaplugin * remove unittest section for nec plugin README file * remove unittest section for ryu plugin README file * Fix for DB error in the Cisco plugin * modify the wrong phy\_brs into phys\_brs * NVP plugin missing dhcp rpc callbacks * make README point to real v2 API spec * README file changes for Cisco plugin * fix for nested rootwrap checks with 'ip netns exec' * always push down metadata rules for router, not just if gateway exists * Removed eval of unchecked strings * Update NVP plugin to Quantum v2 * ovs-lib: make db\_get\_map return empty dict on error * Update l3-agent.ini with missing configuration flags * Sync a change to rpc from openstack-common * Fix for failing network operations in Cisco plugin * add missing files from setup.py * Add quantum-nec-agent to bin directory * remove not need shebang line in quantum debug * make rootwrap filters path consistent with other openstack project * Bump version to 2013.1, open Grizzly * Fix lack of L3 support of NEC OpenFlow plugin * Add a new interface driver OVSVethInterfaceDriver * Ensure that l3 agent does not crash on restart * make subnets attribute of a network read-only * Exclude openstack-common from pep8 test * Ensures that the Linux Bridge Plugin runs with L3 agent * Remove an external port when an error occurs during FIP creation * Remove the exeception handler since it makes no sense * Add enable\_tunneling openvswitch configuration variable * Create .mailmap file * Update default policy for add/remove router interface to admin\_or\_owner * Add periodic check resync check to DHCP agent * Update metaplugin with l3 extension update * Add DHCP RPC API support to NEC OpenFlow plugin * Remove an external interface when router-gateway is removed * openvswitch plugin does not remove inbound unicast flow in br-tun * Remove default name for DHCP port * Added policy checks for add interface and remove interface * allow multiple l3-agents to run, each with one external gateway net * Prevent floating-ip and ex-gateway ports should prevent net deletion * fix generation of exception for mismatched floating ip tenant-ids * Give better error to client on server 500 error * Change 422 error to 400 error * Add IP version check for IP address fields * Policies for external networks * Add IP commands to rootwrap fileter for OVS agent * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Fix broken L3 support of Ryu plugin * check subnet overlapping when adding interface to router * add local network type and use by default for tenant networks * Fix data passed to policy engine on update * remove incorrect mock assert\_called in unit tests * Fix dhcp agent rpc exception handling * Add missing include for logging when log\_config is used * Modified code Pyflakes warning * Modified code pyflakes warning * Improve error message when flat network already exists * Lower webob dep from v1.2.0 to v1.0.8 * Allocation pool creation should check if gateway is in subnet * Make sure floating IPs + gateways must be on external nets * restart dnsmasq when subnet cidr set changes * supress dhcp router opt for subnets with null gw * add rootwrap filters to wrap ip netns exec * Implements agent for Quantum Networking testing * Quantum dhcp crashes if no networks exist * Update with latest code from openstack-common (stable/folsom) * Fixes undefined variable 'network\_type' in OVS agent * Create utility to clean-up netns * Fix lack of L3 support of Ryu plugin * Ensure that port update set correct tag in OVS * ovs\_lib unable to parse return when port == -1 * L3: make use of namespaces by agent configurable * Fix error in rule for metadata server dnat * Fix programming error of ryu-plugin * Ensure network delete is handled by OVS agent * Implement L3 support in Metaplugin * Fixes agent problem with RPC * netns commands should always run in the root ns * Add lease expiration management to ip recycling * misc L3 fixes * expose openvswitch GRE tunnel\_id via provider API * Do not transfer ips if there isn't any * prevent invalid deletion of ports using by L3 devices * Modified code PEP8 warning * Implementation of 2nd phase of provider extension for openswitch * Mangle network namespace name used by dhcp\_agent * Update rootwrap; track changes in nova/cinder * remove policy check for host\_routes in update\_port * Ensure proper validation for l3 API attributes * Cisco nexus sub-plugin update\_network fix * Fix dhcp option distribution by dnsmasq * fix bug where network owned resources block delete * Plugin aware extensions should also be reset at each test setup * Ensure network connectivity for linuxbridge flat network * Execute unit tests for Cisco plugin with Quantum tests * prevent OVS + LB plugins from clearing device\_id and device\_owner * updated outdated comments in base v2 plugin class * clear db.\_ENGINE for each plugin init in Metaplugin * Enable tox to run OVS plugin unit tests * Allow tox to run plugin specific unit tests * fixes cisco nexus plugin delete network issue * Fix Metainterface driver with namespace * Add lease expiration script support for dnsmasq * Remove 'verbose' API capability * PEP8 issues fixed * removed some unused global variable * Update TESTING file * Typo fix in quantum: existant => existent * Add DHCP RPC API support to Ryu plugin * Run core unit tests for each plugin * OVS plugin tunnel bridges never learn * Add nosehtmloutput as a test dependency * fix typo in OVS plugin from recent bugfix * enable router deletion logic in l3-agent * Enable users to list subnets on shared networks * Fix IP allocation on shared networks ports * Move metaplugin test for common test directory * Enable DHCP agent to work with plugin when L2 agents use DB polling * fix associating a floating IP during floating IP creation * Ensure that LB agent does not terminate if interface already exists in bridge * Treat exceptions when invoking ovs-vsctl * Remove v1.0 and v1.1 API from version info * Get OVS port details from port ID * Fix undefined variables * Fixing unit test failures in Cisco plugin * fix netns delete so that it works when a ns is set * Linuxbridge support for L3 agent * Fix exception message for bulk create failure * quantum l3 + floating IP support * Add missing conversion specifiers in exception messages * Use a common constant for the port/network 'status' value * Remove unused variable * Log message missing parameter causes exception * Update README for v2 API * Fix flavor extension based on new attribute extension spec * Update the Nicira NVP plugin to support the v2 Quantum API * Enhancements to Cisco v2 meta-plugin * Add model support for DHCP lease expiration * Trivial openvswitch plugin cleanup * Convert DHCP from polling to RPC * Add quota per-tenant * Reset device owner when port on agent is down * Allow extra config files in unit tests * Fix visual indentation for PEP8 conformance * Updates pip requirements * NEC OpenFlow plugin support * Enables Cisco NXOS to configure multiple ports Implements blueprint cisco-nxos-enables-multiple-ports * Implementation of second phase of provider extension * deal with parent\_id not in target * remove old gflags config code * convert query string according to attr map * Add device\_owner attribute to port * implementation for bug 1008180 * Fix bulk create operations and make them atomic * Make sure that there's a way of creating a subnet without a gateway * Update latest openstack files * improve test\_db\_plugin so it can be leveraged by extension tests * Adds the 'public network' concept to Quantum * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * Make dhcp agent configurable for namespace * Linux Agent improvements for L3 * In some cases device check causes an exception * normalize the json output of show a given extension * move the correct veth into the netns for the LB * linux bridge fixes following v1 code removal * fixes typo in ensure\_namespace * Remove v1 code from quantum-server * Add netns to support overlapping address ranges * dhcp-agent: Ryu plugin support for dhcp agent * fix missing deallocation of gateway ip * RPC support for Linux Bridge Plugin and Agent * Implementation of bp per-net-dhcp-enable * Enhance Base MAC validation * Use function registration for policy checks * Exempt openstack-common from pep8 check * Make 4th octet of mac\_range configurable * Replace openvswitch plugin's VlanMap with vlan\_ids DB table * Remove unused properties * Notification for network/subnet/port create/delete/update. blueprint quantum-notifications * Make the plugin for test\_db\_plugin configurable * update DHCP agent to work with linuxbridge plug-in * ryu/plugin, agent: unbreak 610017c460b85e1b7d11327d050972bb03fcc0c3 * Add classmethod decorator to class methods of providervlan ext * Only delete VLAN information after Quantum network is deleted * Make quantum pipeline configurable from quantum.conf * ovs\_quantum\_plugin should use reconnect\_interval in common conf * add name into port and subnet * Update openvswitch tunnel unittest * Enable agents and plugins to use the same configuration file * Fix linuxbridge agent tests * Update openstack-common files * Initial V2 implementation of provider extension * Implements data-driven views and extended attributes * Add v2 API support for the Cisco plugin Blueprint cisco-plugin-v2-api-support * Enhance V2 validations to work better for integers and booleans * Refactor the test cases so that all the test cases are under one test class * Add quota features into quantum. Blueprint quantum-api-quotas * Assume that subclass validates value of UUID * fix bug lp:1025526,update iniparser.py to accept empty value * Ensures policy file is reloaded only if updated * Provide way to specify id in models\_v2 * Add validity checks to Quantum v2 resources * Avoid removal of attributes used by policy engine * Raise proper exception if policy file do not exist * Introduce files from openstack common * Ensures API v2 router does not load plugin twice * ovs-agent exception non-existent ports * Ryu plugin support for v2 Quantum API * Add option sql\_max\_retries for database connection * Enable quantum agents to work with global cfg.CONF * Create DHCP agent tap device from port ID * Fix some syntax errors * fix bug lp:1019230,update rpc from openstack-common * Fix v2 API policy checks when keystone is in use * implement dhcp agent for quantum * Corrects imported modules in Cisco and Ryu according to latest nova packages * Validate that network\_id in port/subnet POST belong to the same tenant * Verify CIDR overlaps among networks' subnets * Address problems with foreign keys with subnet and network deletion * Add 'allocation\_pools' to Quantum v2 API subnets * Delete IP allocation range for subnet when deleting subnet * Fix linux bridge plugin to be consistent with naming rules * v2 support for the linux bridge plugin * OVS plugin support for v2 Quantum API * Check if interface exists in bridge prior to adding * Ensure that subnet\_id is on correct network * Use setuptools git plugin for file inclusion * Cisco's unplug\_iface refers to non existing exception * Implement IP address allocation * Enable user to configure base mac address * Bug #1012418 - quantum agent for OVS does not install properly on Xen XCP * Add simple file loggin to ovs\_quantum\_agent * Fixing pep8 warning messages Bug #1017805 * Network deletion and subnet creation bug fixes bug 1017395 * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Bug 1015953 - linuxbridge\_quantum\_agent device\_exists() is buggy * Reorder imports by full module path * Added iptables\_manager ( based on openstack/linux\_net.py ) This module will be the base library to implement security groups and generic firewall. It is an independent iptables module, made to be easy to package if used by agents and also inside quantum * Unit test and Readme changes related to cisco plugin * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Ensure unique mac address allocation. This is the first part of bug 1008029 * Add authZ through incorporation of policy checks * Fix additional pep8 issues on Jenkins bug 1014644 * removed "runthis" and other unused functions from utils.py * Linux bridge agents did not work with common linus utils bug 1014286 * Added vlan range management for OVS plugin * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Remove wrong base class for l2network\_models after v2.0 API * Cisco cli cannot find argument action\_prefix * Use openstack.common.exception * Remove unused functions in common/utils.py * API v2: mprove validation of post/put, rename few attributes * Bug #1000406 - Return value of shell commands is not checked by plugins * Fix python2.4 incompatibility * Add API v2 support * Binaries should report versions * Fix up test running to match jenkins expectation * Add build\_sphinx options * Remove unused imports * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * AuthN support for Quantum * fix bug lp:1007557,remove unused functions in utils.py * Add common dir for shared agent code, add OVS lib * Bug #1007153 * Register enable\_tunneling as bool opt * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Automatically determine Quantum version from source * Fix linux bridge section name Bug #1006684 * Remove the reference to non existing exception by linuxbridgeplugin * bug #1006281 * Parse linuxbridge plugins using openstack.common.cfg * Bug #1004584 * fix some pylint warnings * fix errors in database test cases * Log the exception so app loading issues can be debuged * remove unneeded import from OVS agent that break 2.4 compat * blueprint man-support and fix documentation build bug 995283 * Fix print error for linux bridge bindings bug 1001941 * Add HACKING.rst to tarball generation bug 1001220 * fall back to \`ip link\` when \`ip tuntap\` unavailable bug 989868 * Cisco plugin CLI call to quantumclient CLI * Calling Super method from QuantumPortAwareScheduler.\_\_init\_\_ * OVS plugin: add tunnel ips to central database * Include AUTHORS in release package * blueprint database-common bug 995438 * bug 996163 * Bug #994758 * Change Resource.\_\_call\_\_() to not leak internal errors * Let OVSQuantumTunnelAgent sync with database * Cleaned up log usage * blueprint agent-db-ha bug 985470 bug 985646 * Update codebase for HACKING compliance * Make sample quantum.conf compliant with docs * Make ovs Interface option set properly * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * return 404 for invalid api version request * fix issue with OVS plugin VLAN allocation after a quantum-server restart * bug 963152: add a few missing files to sdist tarball * API docs: fix typo for network delete * Open Folsom * Bug #956559 VIF driver and scheduler for UCS plugin are broken since the flag configuration mechanism in nova is changed. Fixing that and also fixing some property names, along changes to how the quantum client code is invoked * plugin/ryu/agent: unbreak a06b316cb47369ef4a2c522f5240fa3f7f529135 * Fix path to python-quantumclient * Split out pip requires and aligned tox file * ryu/nova: catch up d1888a3359345acffd8d0845c137eefd88072112 * Add root\_helper to quantum agents * Fix missing files in sdist package [bug 954906] * Fix for bug 921743 Response codes for create ops in API v1.0 not compliant with spec * bug 954538 Fix for the cisco unit tests * check connection in Listener. refer to Bug #943031 * fixed incorrect duplicate title * Fixed incorrect title for example 3.10 * Downgraded required version of WebOb to 1.0.8 * Bug #949261 Removing nova drivers for Linux Bridge Plugin * Remove outdated content from OVS plugin README, point to website instead * add git commit date / sha1 to sphinx html docs * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * Introducing the tenant owenrship checks in the Cisco plugin, changes are almost identical to those in Bug#942713 * Fix some plugins that don't check that nets + ports are owned by tenant * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * Fix bug 940732 stack.sh can't match sql\_connection string * Return appropriate error for invalid-port state in create port API * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * unittests: setup FLAGS.state\_path properly: bug 938637 * Cleanup the source distribution * Fix ovs config file location * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) * bug 923510: avoid querying all ports for non-detail GET Network call * Make tox config work * Pin versions to standard versions * bp/api-filters This changeset implements filters for core Quantum API and provides unit tests * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * bp/api-error-codes Restructured API error codes for Quantum API v1.1 This changeset provides the following changes: - Only standard HTTP errors for Quantum API v1.1 - Customized fault response body formatting according to API version - Changes to unit tests to deal with version specific status codes * blueprint ovs-portstats * Add support for dealing with 501 errors (notimplemented) * Improved VlanMap * moving batch config out of quantum-server repo * bug 920299: remove duplicate + outdate README * Getting ready for the client split * Removed erroneous print from setup.py * Fixes setup scripts for quantum plugins * Base version.py on glance * fix mysql port in sql\_connection example.. * Make the quantum top-level a namespace package * Add \_\_init\_\_.py from plugin to be copied on setup scripts * Fix lp bug 897882 * PEP8 quantum cleanup * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Updating Cisco README with instructions on installing the patched ncclient library * Remove plugin pip-requires * blueprint refactor-readme-to-manual * Bug #890028 * Implementation of the BP services-insertion-wrapper inside the Cisco Plugin * blueprint operational-status-ovs-plugin * bug 903580: remove invalid extensions path from quantum.conf * Fix for bug 902175 * Readme Fix * blueprint api-framework-essex * Fix for bug 900277 * Fix for bug 900316 * Modified the Readme for Unit Test Execution Instructions * Bug 900093 Remove unused function in db/api.py * bug #891246: Fix paths in agent Makefile * Second round of packaging changes * Bug 891705 Fix to change reference to the Quantum CLI from within the Cisco extensions' CLI module * Correcting the plugins classpath in the Quantum README * The relative path for the "ucs\_inventory.ini" file has been fixed * bug #891267 : for XS, grab iface-id from XAPI directly if needed * Changes to make pip-based tests work with jenkins * Fix for bug 890498 * Fix for bug 888811 * Fixing find\_config\_file after packaging changes * Added timeout flag to ovs-vsctl to avoid infinte waiting * Add quantum.exceptions path to configed ext paths * Fix for Bug #888820 - pip-requires file support for plugins * Fixing Cisco plugin after update\_\* change * Fix for bug 888207 * Fix for bug 877525 * Bug #875995: Quantum README fixes * Change version numbers to be compatible with debian packaging * Make the openvswitch plugin tests work again * Swich over to update\_{net,port} instead of rename\_net and set\_port\_state * Added try import to quantum-server and quantum-cli * Bug 887706 * Blueprint authentication-for-quantum * blueprint quantum-packaging * Moved the initialization of the blade state so that the interfaces which are configured outside of Quantum are also initialized in the blade state * fix minor double-serialization bug in client.py * bug #863635: remove vestigial cheetah import from bin/cli * Change the ovs plugin create\_\*() calls to take the kwargs param * Changing the log messages in order to be always identified by their sub-packages of origin, and they can even be filtered on that basis * Add .gitreview config file for gerrit * New tests are being adding to the Diablo code (Cisco L2-Network plugin), and some fixes in the case where the tests were failing * Add the ability to specify multiple extension directories * Add code-coverage support to run\_tests.sh (lp860160) * Change port/net create calls to take an additional kwargs param * ovs plugin: Remove reference to set\_external\_ids.sh * fix pep8 issues in Cisco plugin * Remove hack for figuring out the vif interface identifier (lp859864) 2011.3 ------ * Update openvswitch plugin README * Update openvswitch plugin README * Get output from run\_tests * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * Addressing Dan's comment on output generator * merge sumit's branch for lp837752 * merge salv's branch for bug834013 * merge salv's branch for keystone token on client bug838006 * merge rohit's db test branch: lp838318 * merge salv fix for bug 841982, fix minor pep8 violation * merge salv fix for bug834008 * Changes to address Salvatore's review comments, removed unnecessary imports, and changed a debug message * changing key names to confirm to api specs * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Implementing Dan's suggestion concerning fixing the bug in db api rather than FakePlugin * Fixing bad indent * syncing diverged branches * merging from lp:quantum * merging from lp:quantum * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Fixing the bug in FakePlugin * made general exception handling messages consistent removed LOG pylint errors cleanup in tests * Create operation now generate response with status code 202 * restoring correct default pipeline * Mergin from lp:quantum * Add information about quantum dependency for nova * merge salv's branch to remove dummy plugin * Changing communication between UCSM driver to UCSM to HTTPS * Adding CLI usage examlpes to the README * Adding client-side support for Keystone integration * Keystone-integrated pipeline should not be default in quantum.conf * Removing class DUmmyDataPlugin * Removed redundant configuration, and added more comments in the configuration files * Updating the README file * Merging Shweta's test cases for mutliport resource * Adding Multinic tests * Typo fix in README * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * More fixes for multi-nic support * Fixed a bug with plug\_interface * Merging from Cisco branch * Changes to incorporate earlier review comments, also for multiport resource * adding quantum database unit test cases * Merging changes from Ying's branch (new mutliport resource) * add multiport and exception handling * add multiport resource * Merging from lp:quantum * Avoiding deserializing body multiple times with several parameters * merge cisco consolidated plugin changes * Test on param\_value changes as follows: * Merging lp:~salvatore-orlando/quantum/bug834449 * Merging Ying's changes (minor) * fix print statements in novatenant and portprofile * merge trunk * Minor refactoring * Changes to l2network\_plugin for create\_ports and pylint fixes to cli.py * Modified CLI to handle both core and extensions CLI * merge trunk * lp835216 client lib was not passing in kwargs when creating exceptions * lp834694 fix integrity error when deleting network with unattached ports. Add unit test * Minor fix in delete\_port * merging changes from cisco consolidated branch * Fixes to support multinic * Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports * Merging from the Cisco branch * adding new api methods using just port\_id * Fixing the extensions URL to 1.0 and pep8 error * bug fixes to handle multinic * Merging Shweta's fix for extensions' test cases (clean up was not happening completely) * Adding Network and Port clean up functions for portprofile unit tests * Merging from lp:quantum * Merging Shweta's fixes in the tests for key names changes in the Core API * make CLI show\_port command display interface-id, add additional test case * merge salvatore's new cli code * Dictionary key values changes in test\_extension * Merging lp:quantum, resolving conflict * merge two pep8 branch * Merging Ying's pep8 fixes * fix pep8 issues * Merging quantum trunk * fix pep8 warnings * Updating common/extensions.py in order not to instantiate a QuantumManager when retrieving plugin * Cleaning pep8 * Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin (Critical) * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * lp834491: change plugin to work with API code after the API alignment merge * Merging Shweta's fixes to the test cases for the extensions * Added Extension & ucs driver test changes and fixes * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging changes from Sumit's branch * Changes qos description to string; changes extension API names for get\_host and get\_instance\_port * Mergin Ying's branch * change get\_host and get\_instance\_port function name * Cleaning (removing) unused code..hooray ! fixes for extension tests * Sorting correctly all imports for the Nexus Driver and Unit Test * Fixed the Unit Test for Nexus Driver * add cisco\_faults under l2network package * move faults/exceptions to l2network package, remove unecessary faults definitions change the portprofile action api's method fix imports order and other comments issues * Merging from Sumit's branch, import ordering related changes * Changing the order of imports (to satisfy convention) * Merging the Cisco branch * Updating README according to Somik's comment * Finishing cli work Fixing bug with XML deserialization * Completing Unit Tests * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * Configuration of multiple VLANs on the same Nexus Switch Interfaces * Adding unit test for rename\_network * Added logging to syslog or file specified at command line removed plugin direct mode fixed unit tests to reflect changes in cli code fixex pep8 errors * Merging from Sumit's branch * Fixed some bugs with credential and qos resources; also fixed l2network\_single\_blade * Merging Rohit's changes * helper function to get creds based on name * integration with l2network\_plugin.py * fixing relative import in nexus\_db.py * putting in db support for creds and qos * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * Addressing Somik's comment * Templated output for CLI completed! * PEP8 fixes for setup.py * delete quantum/common/test\_lib.py to prepare for quantum merge * Made changes according to reviewer's comments. Add addtional information on extension test in README * Merging changes from Sumit's branch * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Fixed a bug in the initialization of the UCS inventory; fixed another bug in deleting a port * Noticed some pep8 errors, fixed them * Merging lp:quantum * Changes to incorporate reviwer's comments. Also changed client.py to handle extension URLs * Review Changes * remove unnecessary code and sync faults and exception handling * Code changed base on Reviews pep8 passed pylint 9.10 * merging with lp:quantum * merging from lp:quantum * Fixes based on review comments * Addressing comments from Ziad and Somik * merge lp:~bgh/quantum/lp837174 * Fix unit test printing (lp837174) * Fixing issue in view builders concerning attachment identifiers * Code clean up as per reviewr's request; documentation strings, unused code, etc * Rewording of the README file to clarify the use of the SSh port * clean up code and fix some comments * clean code and fix some comments * Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py; Refactoring of code to generalize inventory handling (enhancement) * Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py, changes discussed & approved by Rohit * Making Keystone version configurable * Accidentally took quantum.conf out of branch. Now back in * Merging lp:~raxnetworking/quantum/bug827272 * Merging branch: lp:~danwent/quantum/test-refactor * Removing "excess" file * Missed adding a file earlier, fixed a small issue * Refactoring of code to generalize inventory handling (enhancement) * Merging UCS inventory state initialization fix from Sumit's branch * Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum * Removed obsolete instructions from README * Changes to reflect the new features (mutli-blade, multi-chassis support) * Changes to support calls from VIF Driver and Scheduler * Pep8, pylint fixes * fixing pep8 error * adding helper function for port binding model * UCS inventore persistence and pep8/pylint fixes * UCS persistence fixes * added new columns to models for ucs plugin multi blade support updated methods in ucs\_db for newly added columns changed column dynamic\_vnic\_id in port binding table to blade\_intf\_dn updated tests to handle new column name * Merging rohit's UCS persistence support * UCS plugin persistence * Persistence support for UCS plugin network * adding utility functions to create dictionaries * Merging changes from Rohit's branch * Merging changes from cisco extensions * added ucs plugin related execptions in cisco\_exceptions.py added ucs plugin persistence related modules - ucs\_models.py and ucs\_db.py added ucs db related unit tests in test\_database.py fixed formatting in l2network\_models.py and test\_database.py * Adding some error checks * Reduced excessive logging * Several fixes to initial version * fixing the the test\_database.py tests * pylint and pep8 fixes * Change profile-id * merged Shweta's branch for ext test. Minor fix for review comments * Review Changes * merged Shweta's ext test branch * Initial commit with lots of changes * Moved the conf file uncer the cisco directory * Moved the conf file uncer the cisco directory * Updated conf file * Adding Entension API unt tests * Syncing with lp:quantum * Code refactored, made changes are per reviwer's suggestions * sync up with l2network exception handling for extension * merged Cisco branch's latest changes * Adding changes from Sumit's latest merge * merge with lp:~cisco-openstack/quantum/l2network-plugin-extensions * replace exception handler by using cisco\_exceptions * Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound * Changing exception name to QosNotFound * Mergin from Cisco branch * Raising exceptions in extension resources handling (where missing) * Merging fixes to client side exception handling. Thanks lp:tylesmit ! * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Adding the Nexus support to the Persistence Framwork Modification of the Nexus Unit Case to be running with Persistence Framework pep8 passed pylint 8.81/10 * added nexus exception in cisco\_exceptions.py added log to methods in l2network\_db.py added nexus\_db.py and nexus\_models.py - persistence modules for nexus plugin * add plugins.ini back * add all conf/\*.ini back * merge with ying's branch * merging with Ying's extension branch * remove ying's test ciscoplugin * remove all configuration files * remove cisco\_demo and test\_scripts directory, which were used by our local tests * Removed concatenation per review comments * change the configuration files to the default values * pylint and pep8 fix * merging with ~cisco-openstack/quantum/l2network-plugin-extensions * fix pylint issuses * Making keystone integration optional in quantum configuration * Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando ! * Fixing typo * Making the client raise the appropriate exception if needed. Also increasing the pylint score to above 8 * pep8 error fixed for l2network\_db.py * Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler * Removed extra spaces to satisfy pep8 * VIF driver for 802.1qbh and Quantum aware scheduler * fix some pylint issues * Pylint and pep8 fixes * Changes to support credentials, qos, and novatenant extensions * Removing unused error response codes * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * Fixed indentation and changed file comments * add extension change to ying's branch * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging Shweta's change to fix a function call in the test code * Adding the changed UCS Driver function names in test\_ucs\_driver * Santhosh/Deepak | Fixed an issue where collection actions for PUT and DELETE methods in resource extension were routing to update and delete action of the resource * Merging from Sumit's branch pylint fixes and incorporating review comments * Changes to README file and merging Shweta's changes * Mergin Shweta's test changes, also README file * Changes to test structure. Adding pylint correctons * Fixes to the README file per earlier review comments. Also removed main from one of the modules * Mergin from cisco brach * Merging from lp:quantum * Pulling changes from Cisco branch * Pylint fixes * exit unit tests if tests are invoked specifying a particular test * Merging Nexus pylint changes and other enhancements from Edgar * pep8 passed pylint 8.83 * Merging Rohit's changes * Partial commit * Moved test\_database.py to plugins/cisco/tests/unit/ Edited test\_database.py to be able to run like other tests pylint for cisco/db folder - 8.85/10 pylint for cisco/tests/unit/test\_database.py - 8.42/10 pep8 done * Adding a new file with all the XML snippets to make code easier to read Moving the Nexus SSH server port to the configuration file Removing main functions Making some changes based on Dan and Salvatore reviews * Changes in the README file to incorporate Somik's comments * pylint changes - pylint score for cisco/db folder - 8.27/10 pep8 checks done * Removing extra testing function on Nexus Driver * Merging plugin and tests' changes * Fixes to the tests which were breaking, including fixes to the test cases * Pulling in changes from Rohit's branch * Pulling in changes from Shweta's branch * Removed main from modules as per review comments * updated README file to include persistence framework setup instructions updated db api.py unset\_attachment method to return port moved db\_conn.ini into cisco/conf/ with other configuration files updated l2network\_plugin\_configuration.py to get db config cleaned up l2network\_db.py - removed config parser code as using cisco config parser updated l2network\_db.py to raise specific exceptions in error cases updated create\_vlanid method in l2network\_db.py to not raise exception if vlan rows exist updated portprofile and portprofile\_binding methods to include tenant\_id as an argument added cisco/db/test\_database.py containing unit tests for quantum and l2network\_plugin tables edited get\_pp\_binding method in l2network\_db.py to return empty list when no results found pep8 checks done * Adding Persistence unit test * Fixed bugs while testing * pep8 errors fixed * Merging rohit's changes * Changes to support persistence framework * Merging: lp:~danwent/quantum/client-lib * Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do\_request * First, trivial, implementation of authN+authZ * fixes from rohit's branch * from rohit's branch * Adding more templates More tests * - Added new tables VlanID to generate ids and maintain usage of vlans - Added wrapper functions to get next unused vlan, populate vlans, release vlans, getall vlans, isused van and delete van - Added ported instead of networked for portprofile binding table - Changed wrapper methods and test cases for portprofile binding to use portid * Adding missing files to branch * Simplifying condition * FIxing missing 'output' variable @ line 243 (syntax error) * Adding automattic serialization to all requests by moving it to do\_request * added network and port models similar to quantum with following changes - - InnoDB as storage engine to allow foreign key constraints - joinedLoad operation on the queries to make use of relation between Network and Port Moved out the network and port code to make l2network contain vlanbinding, portprofile and portprofile bindings * Authentication with Keystone. auth\_token Middleware tweaked and imported in Quantum tree Developing Authorization middleware * Introducting cheetah Updating list\_nets in CLI Writing unit tests for list\_nets Stubbing out with FakeConnection now * I'm too tired * Stubout work in progress * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * - added network and port models into the l2network plugin instead of using quantum models - added api methods for network and ports - restructured code to use the l2network network and port - added l2network base class for other tables to inherit - added support for l2network plugin model objects to behave like dictionary (gets rid of code to convert objects into dictionaries) - added foreign key constraints to l2network plugin model attributes representing columns - added attributes to represent relation between models in l2network plugin - added joinedload only to network and port (need to to for others) - added InnoDB as the storage medium in base table for imposing foreign keys - updated l2network test cases to handle foreign key constraints * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Removing exceptions as well (previously only API faults were removed) * Merged quantum trunk * adding renamed client-lib tests * Tiny change to the README file, instructions on how to get ncclient * - Adding setup script * Adding db connection and l2network plugin database modules * update CLI to use show instead of list for calls that do not return a list * rename client\_lib unit tests so it is run by ./run\_tests.sh, update tests to handle name changes * force batch\_config.py to use json, as XML has issues (see bug: 798262) * update batch\_config.py to use new client lib, hooray for deleting code * Changed to default plugin class name * Rajaram/Vinkesh | Added examples of scoping extension alias in request and action extension * Added tests directory to list of modules in the README file * Added "tests" directory to the list modules in the README file * Adding the required build for Nexus support * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * Fixed typo in README * README file updates (pointer to Nova Cactus branch), and numerous other edits based on Mark's template * L2 Network Plugin Framework merge * Incorporated changes in response to review comments from Ram * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * Making a check for the presence of UCS/Nexus plugin (earlier it was not in certain cases). With this change, if the UCS/Nexus plugins are not enabled, the core API tests can be run even on Ubuntu (and RHEL without the requirement of any specific network hardware) * Merging test cases from Shwetas' branch, and further modified README file * Merging the test framework from Shweta's branch * decluttering \_parse\_request\_params method for QuantumController * Fixing detail action for port collection Adding PortIsDown exception Adding unit tests for detail actions and PortIsDown PEP8 FIXES * Adding Unit Test Cases Now * Adding Cisco Unit Tests * minor enhancements to quantum client-lib * RHEL limitation updated * Adding support for expressing format through Content-Type header Adding action detail for port resource (Member & Collection) * Changes to enhance L2 network plugin framework * undo unintentional formatting change in run\_tests.sh * remove unneeded \_\_init\_\_ * refactoring testing code to support plugin tests * Added QuantunPluginBase as the base class for the l2network\_plugin * Generalized and put placeholders * another merge * pep8 cleanup, restore defaults * Added info about ssh conf required for nexus switch * merge * remove unneeded tests from ovs\_quantum\_plugin * Nexus plugin classpath was incorrect, fixed it * Edits to reflect conf changes, made it easier to follow * merge heckj's pip-requires fixes * Fixed issue with creating new port profiles (one configuration parameter got left out during the migration to the new configuration scheme). Also fixed a bug in the calculation of the profile id * Fixes the broken call to second level of plugins. Renaming will work now * updates to pip-requires for CI * Loading of device-specific plugins and drivers is done dynamically by setting configuration. All configuration is driven through configuration files place in the conf directory. Each .ini conf file contains info on the configuration. README file updated to reflect all the changes. Fixed issue with delete\_network deleting the network even when attachments were present. Fixed issue with port id generation * Deepak/Vinkesh | Fixed show action in extension controller to return 404, added example to include namespace in a request extension * Merged quantum trunk * Santhosh/Vinkesh | Added extension\_stubs file * Removing extra file in Nexus Driver * Removing extra file in Nexus Driver * Relabelling API version to 1.0! * Cosmetic changes to unit tests for client library. Pep8 fixes * Removed quantum/plugins/cisco/db/ and quantum/cisco\_extensions since these will be merged separately * Adding conf directory for configuration files * Fixed pep8 error * Merging changes * Merging changes from lp:quantum * Fixed an issue selecting the right port interface and also properly switching off the Nexus Interface * Completing API spec alignment Unit tests aligned with changes in the API spec * Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518 * Adding controller and view builder for attachment resource * Merging the port profile client name fix * Earlier fix resulted in a different issue (profile client name, was also being used as profile name, hence breaking) * Truncated the port profile client name length to 16 characters (ucsm excepts max 17 chars) * Mergin fix for Bug 818321 * Merging approved OVS plugin configuration change branch. Thanks lp:danwent ! * Merging the brand new Quantum-client-library feature * Requests now send the Content-Type in the HTTP request * fix broken flush in db.network\_destroy, pep8 fixes * req/res alignment complete. Status code alignment ALMOST complete (need to sort out 200 vs 202 for create ops) * Vinkesh | Changed import orders according to pep8 recommendations * Including a flag to activate the NX-OS driver Updating the README documentation * merging branch for bug802772, which this branch is stacked on top of * WIP. Still need to align APIs for interface plug/unplug * Fixing pep8 errors * Adding the Nexus OS driver based on the new PlugIn structure * fix incorrect handling of duplicate network name, add exception for duplicate network name, and add unit test to confirm detection * WIP * Merging lp:quantum updates * Fixing syntax issue. I had a 2.7+ style dict comprehension, so I made it 2.6 friendly * Removing a debugging line * pep8 fix * Fixing API behaviour for throwing 400 error on invalid body. Adding unit test for creating a port without request body * make ovs plugin pay attention to port state * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Vinkesh/Santhosh | Moved the stub classes in test\_extensions to a separate file extension\_stubs * Merged from trunk * bug802772 update exception handling in OVS plugin to use API exceptions * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again * removing a few additional lines that aren't needed once we don't calculate port count * Adding a tests directory, this can be used for plugin-specific test cases * also remove line that computes portcount, as it is unneeded now that we don't return it * Including copyright info * merge branch for to fix bug817826 * For the modules to get added, missed in the earlier checkin * remove PortCount attribute of network object, as it is not in the spec and was causing us to hit bug 818321 (note: this commit does not fix the underlyingproblem with xml deserialization, it just makes sure we don't hit it with the existing API code) * Changed the directory structure to a more organized one. Fixed the imports to reflect the new structure * Merging the latest changes from lp:quantum * change default integration bridge from br100 to br-int to reflect new default for OVS vif-plugging in nova Diablo-3 release * fix bug 817826 and similar error in batch\_config.py * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Merging in main repo updates * Updating to fix some SSL issues * Removing extra quantum.py file from source control removing unused import from quantum/api/\_\_init\_\_.py * Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813 * Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012 * Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517 * bug 817813: default provider in plugins.ini accidentally changed. Changing it back to FakePlugin * Changed the param name "network-name" to "net-name" since the Quantum service expects the later * Removing some legacy code from the unit tests * Adding unit tests to cover the client library * Changing the CLI to use the new client library * Adding refactored API Client * pep8 fixes * fix bug 814012, add unit tests for it * Resolving Bug 814517 which caused XML to have extra whitespace * Vinkesh/Santhosh | Removed loading extensions from 'contrib' and fixed an indentation bug while loading extensions * Santhosh/Rajaram|modified extensions section in README * Rajaram/Santhosh | Added logging to the PluginAwareExtensionManager failures * Rajaram/Santhosh|Added plugin interface in foxinsox and Updated README * Rajaram/Santhosh|quantum manager loads plugin only once, even though both extension middleware and APIRouter calls it * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Re-fixing issues with XML deserialization (changes got lost in merges with trunk) Adapting assertions in unit tests merged from trunk to reflect changes in the API due to RFE requested by Erik Carlin * Rajaram/Vinkesh | Plugins advertise which extensions it supports * Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Fixing silly pep8 error * doh * Restoring quantum\_plugin\_base to previous state. Will discuss in the future whether allow API layer to pass options to plugins upon initialization * Vinkesh/Santhosh | Added tests to check the member and collection custom actions of ResourceExtensions * Vinkesh/Deepak | Moved plugin related checks in ExtensionManager code to PluginAwareExtensionManager * Deepak/Vinkesh | Added an base abstract class which can be inherited by PluginInterface class which defines the contract expected by extension * Vinkesh/Deepak| Added doc and small refactoring * Unit tests for API completed fixed pep8 errors * Add TESTING document: description and polices for quantum tests * Adding more unit tests * Deepak/Santhosh | ExtensionManager verifies that plugin implements the interface expected by the extension * Santhosh/Deepak | Made supports\_extension method optional for plugin, plugin will be loaded only once * Merged from quantum trunk * Santhosh/deepak| Load extensions supported by plugin * add extension code in.(last push does not include this directory.) * add api extensions (including portprofiles resources and associate/disassociate actions.) * Changes to support port-profile extension. Fixed an error in the README file * Very initial version of the nxos driver .... lets call it ver 0.0.1! * Removing code related to functional tests * Porting shell script get-vif.sh to python module get-vif.py for cisco ucsm module * Required for recognizing the "cisco" package. Missed in the initial checkin * Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237 * minor pep8 fix * Changed some credentials (does not affect functionality) * This file is not required * Initial checkin for the L2-Network Plugin with all the associated modules and artifacts * Rajaram/Santosh|misc readablity improvements to extension tests * Santosh/Rajaram| added extenstion test to show header extensibility * Rajaram/Vinkesh | Added tests to confirm extensions can edit previously uneditable field * removing pep8 errors * Added more unit tests for API Starting work on functional tests, importing code from Glance * Now REALLY using in-memory db * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Fixing error introduced in find\_config * Removing excess debug line * Fixing syntax errors in db/models.py * Temporary commit * Now loading plugin before setting up routes. Passing same plugin instance to API controllers * Adding unit test Applying pep8 fixes * Starting implementation of unit tests Fixing minor bugs with FakePlugin * Removing static data for FakePlugin * - Unit tests will use FakePlugin - FakePlugin adapted to db API with sqlite - db Models updated to inherit from generic Quantum Base model (provides utility functions and capabilities for treating db objects as dicts - see nova.db.models.NovaBase) - functional tests commented out temporarily. Will un-comment when code for starting actual service is in place * Adding Routes>=1.12.3 to tools/pip-requires * Work in progress - just starting * ...and again! * I hope I get the commit right now * removing "quantum" folder as well from etc * removing api-paste.ini * Addressing comments from Somik * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * fix pep8 introduced by trunk merge * A small start on unit tests: mostly a proof of concept that contains a test for api/ports.py * Added some more plugin agnostic tests (attachment and negative tests) and some pep8 fixes * merge * more pep8 goodness * Fixing bug #798262 * refactor batch\_config, allow multiple attaches with the empty string * Merge: bzr merge lp:~bgh/quantum/bugfixes * Fix cut and paste error in api\_unplug\_iface * Fixing bug #798261 * no-commit * Santhosh/Vinkesh | Added extensions framework * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch * add example to usage string for batch\_config.py * Bug fixes and clean-up, including supporting libvirt * Fix typo in mysql package check * Fix typo in mysql package check * Adding support for 'detail' action on networks objects * README fixes * Santhosh/Deepak | Fixed the import issue and config.load\_paste\_app issue * Santhosh/Vinkesh | Fixed all the pep8 violations. Modified the 'req' to 'request' across all the services and wsgi so that it's consistent with other projects * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * merged remote README changes * Fix cli.py from last merge when it got overwritten * Fixing pep8 errors removing excess debug lines * Add dependencies to README and fix whitespace * Fix merge indentation errors * Merged Brad's ovsplugin code * pep8 changes for quantum-framework code pieces * Update Quantum README file with instructions to launch the service and get going * Updated quantum\_plugin\_base with with return type dataformats as well as exceptions * Added a basic README file and updated Quantum plugin base class with appropriate exceptions * Initial commit of exceptions that are raised by a quantum plugin * Make the wording a little clearer * Remove -a option from examples (it no longer exists) * Make the API the default * Address Dan's review comments * Make the manager a little smarter about finding its config file * Fix another TODO: remove main function from manager * Fix detail\_net and list\_ports commands * Remove get\_all\_interfaces and fix detail\_network commands * Initial version of openvswitch plugin * \* Merged changes from Salvatore's branch - quantum-api-workinprogress \* Removed spurious methods from quantum\_base\_plugin class. \* Updated the sample plugins to be compliant with the new QuantumBase class * Update readme with quantum specific instructions * Address some of the remaining TODOs and general cleanup * Add headers * Initial cut of openvswitch plugin * Add database models/functions for ports and networks * Print the command list in the help * Whitespace fixes * Added api functions for the interface commands * Initial rework of cli to use the WS api * Copy over miniclient from testscripts and port tests.py to use unittest * Adding ports.py to source control * pep8 fixes (1st batch) * First working version of Quantum API * Adding views/networks.py to bzr * Adding serialization/deserilization for network resources. Adding fake plugin * networks api with final URL structure. No serialization yet * Implementing interface with plugin * adpating wsgi files * Work in progress on network API * Adding first files for quantum API * Minor fixes: indentation in bin/quantum and fix import in config.py * Adding api paste configuration file * Removing .pydevproject from version control * Branching from quantum-framework * Adding flags.py to infrastructure code * Move plugin configuration to plugins.ini - a config file * 1) Created a DummDataPlugin in SamplePlugin module * merged salvatore's changes to local branch * 1) Added a bare-bones framework for quantum plugins. 2) Created demo quantum plugin that conforms to QuantumPluginBase Abstract class specification. 3) Demonstrated plugin registration and invocation using the demo plugin called "QuantumEchoPlugin" 4) Created the initial file structure for a quantum CLI 5) Seeded the utils module that will contain frequently used Quantum utilities. 6) Modified the manager module to initialize and register the quantum plugin defined in a configuration file. I have hard-coded the path to plugin for now but this will move to a quantum.conf file * Fixing pep8 errors * adding /bzrignore to precent checking in pyc files and that sort of stuff.. * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in Shweta's fixes from the review by Sumit * Minor Fix in ucs tests * Fixing issues discussed in merge prop. The UCS Inventory clears the DB on teardown. The multiblade tests now check to see if a port exists in the db before deleting it. It checks to make sure the UCSInventory is set in the config * Adding UCS inventory tests * Merging in latest changes from lp:quantum * Merging in Shweta's test changes * Ading Ucs db tests * Removing excess imports * Fixing pep8 errors and pushing pylint score up to 8.57 * Fix for bug/893663 Making Cisco CLI usable from installed packages * Bug 903684: functions defined twice in utils.py * blueprint api-operational-status * Adds sqlalchemy support for ovs\_quantum\_plugin * bug 903581: remove etc/quantum.conf.sample as it is invalid * Fixing bug/903829 Making setup\_server.py not try to install quantum.conf.sample * Removing a couple extra lines * Adding some tests, fixing some bugs, and making the tearDown correctly remove PortProfiles * Adding author information * Removing a negative test until I can figure out how to implement it * Removing some negative tests until I can figure out how to implement them * Updating tests * Fixing port-related calls * Adding tests * Tweaking other multiblade tests * Updating multiblade create\_network test * Starting making multi\_blade model return data * Adding initial multi blade test file from Shubhangi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/HACKING.rst0000644000175000017500000000436300000000000016747 0ustar00coreycorey00000000000000Neutron Style Commandments ========================== - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Neutron Specific Commandments ----------------------------- Some rules are enforced by `neutron-lib hacking factory `_ while other rules are specific to Neutron repository. Below you can find a list of checks specific to this repository. - [N322] Detect common errors with assert_called_once_with - [N328] Detect wrong usage with assertEqual - [N330] Use assertEqual(*empty*, observed) instead of assertEqual(observed, *empty*) - [N331] Detect wrong usage with assertTrue(isinstance()). - [N332] Use assertEqual(expected_http_code, observed_http_code) instead of assertEqual(observed_http_code, expected_http_code). - [N340] Check usage of .i18n (and neutron.i18n) - [N341] Check usage of _ from python builtins - [N343] Production code must not import from neutron.tests.* - [N344] Python 3: Do not use filter(lambda obj: test(obj), data). Replace it with [obj for obj in data if test(obj)]. .. note:: When adding a new hacking check to this repository or ``neutron-lib``, make sure its number (Nxxx) doesn't clash with any other check. .. note:: As you may have noticed, the numbering for Neutron checks has gaps. This is because some checks were removed or moved to ``neutron-lib``. Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/LICENSE0000644000175000017500000002363700000000000016163 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5550468 neutron-16.0.0.0b2.dev214/PKG-INFO0000644000175000017500000000347600000000000016252 0ustar00coreycorey00000000000000Metadata-Version: 1.2 Name: neutron Version: 16.0.0.0b2.dev214 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================= OpenStack Neutron ================= .. image:: https://governance.openstack.org/tc/badges/neutron.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Neutron is an OpenStack project to provide "network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., Nova). To learn more about neutron: * Documentation: https://docs.openstack.org/neutron/latest/ * Features: https://specs.openstack.org/openstack/neutron-specs * Defects: https://launchpad.net/neutron * Release notes: https://docs.openstack.org/releasenotes/neutron/index.html * Source: https://opendev.org/openstack/neutron Get in touch via `email `_. Use [Neutron] in your subject. To learn how to contribute, please read the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/README.rst0000644000175000017500000000161300000000000016633 0ustar00coreycorey00000000000000================= OpenStack Neutron ================= .. image:: https://governance.openstack.org/tc/badges/neutron.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Neutron is an OpenStack project to provide "network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., Nova). To learn more about neutron: * Documentation: https://docs.openstack.org/neutron/latest/ * Features: https://specs.openstack.org/openstack/neutron-specs * Defects: https://launchpad.net/neutron * Release notes: https://docs.openstack.org/releasenotes/neutron/index.html * Source: https://opendev.org/openstack/neutron Get in touch via `email `_. Use [Neutron] in your subject. To learn how to contribute, please read the CONTRIBUTING.rst file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/TESTING.rst0000644000175000017500000007352700000000000017030 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _testing_neutron: Testing Neutron =============== Why Should You Care ------------------- There's two ways to approach testing: 1) Write unit tests because they're required to get your patch merged. This typically involves mock heavy tests that assert that your code is as written. 2) Putting as much thought into your testing strategy as you do to the rest of your code. Use different layers of testing as appropriate to provide high *quality* coverage. Are you touching an agent? Test it against an actual system! Are you adding a new API? Test it for race conditions against a real database! Are you adding a new cross-cutting feature? Test that it does what it's supposed to do when run on a real cloud! Do you feel the need to verify your change manually? If so, the next few sections attempt to guide you through Neutron's different test infrastructures to help you make intelligent decisions and best exploit Neutron's test offerings. Definitions ----------- We will talk about three classes of tests: unit, functional and integration. Each respective category typically targets a larger scope of code. Other than that broad categorization, here are a few more characteristic: * Unit tests - Should be able to run on your laptop, directly following a 'git clone' of the project. The underlying system must not be mutated, mocks can be used to achieve this. A unit test typically targets a function or class. * Functional tests - Run against a pre-configured environment (tools/configure_for_func_testing.sh). Typically test a component such as an agent using no mocks. * Integration tests - Run against a running cloud, often target the API level, but also 'scenarios' or 'user stories'. You may find such tests under tests/fullstack, and in the Tempest, Rally and neutron-tempest-plugin(neutron_tempest_plugin/api|scenario) projects. Tests in the Neutron tree are typically organized by the testing infrastructure used, and not by the scope of the test. For example, many tests under the 'unit' directory invoke an API call and assert that the expected output was received. The scope of such a test is the entire Neutron server stack, and clearly not a specific function such as in a typical unit test. Testing Frameworks ------------------ The different frameworks are listed below. The intent is to list the capabilities of each testing framework as to help the reader understand when should each tool be used. Remember that when adding code that touches many areas of Neutron, each area should be tested with the appropriate framework. Overlap between different test layers is often desirable and encouraged. Unit Tests ~~~~~~~~~~ Unit tests (neutron/tests/unit/) are meant to cover as much code as possible. They are designed to test the various pieces of the Neutron tree to make sure any new changes don't break existing functionality. Unit tests have no requirements nor make changes to the system they are running on. They use an in-memory sqlite database to test DB interaction. At the start of each test run: * RPC listeners are mocked away. * The fake Oslo messaging driver is used. At the end of each test run: * Mocks are automatically reverted. * The in-memory database is cleared of content, but its schema is maintained. * The global Oslo configuration object is reset. The unit testing framework can be used to effectively test database interaction, for example, distributed routers allocate a MAC address for every host running an OVS agent. One of DVR's DB mixins implements a method that lists all host MAC addresses. Its test looks like this: .. code-block:: python def test_get_dvr_mac_address_list(self): self._create_dvr_mac_entry('host_1', 'mac_1') self._create_dvr_mac_entry('host_2', 'mac_2') mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(2, len(mac_list)) It inserts two new host MAC address, invokes the method under test and asserts its output. The test has many things going for it: * It targets the method under test correctly, not taking on a larger scope than is necessary. * It does not use mocks to assert that methods were called, it simply invokes the method and asserts its output (In this case, that the list method returns two records). This is allowed by the fact that the method was built to be testable - The method has clear input and output with no side effects. You can get oslo.db to generate a file-based sqlite database by setting OS_TEST_DBAPI_ADMIN_CONNECTION to a file based URL as described in `this mailing list post`__. This file will be created but (confusingly) won't be the actual file used for the database. To find the actual file, set a break point in your test method and inspect self.engine.url. __ file-based-sqlite_ .. code-block:: shell $ OS_TEST_DBAPI_ADMIN_CONNECTION=sqlite:///sqlite.db .tox/py37/bin/python -m \ testtools.run neutron.tests.unit... ... (Pdb) self.engine.url sqlite:////tmp/iwbgvhbshp.db Now, you can inspect this file using sqlite3. .. code-block:: shell $ sqlite3 /tmp/iwbgvhbshp.db Functional Tests ~~~~~~~~~~~~~~~~ Functional tests (neutron/tests/functional/) are intended to validate actual system interaction. Mocks should be used sparingly, if at all. Care should be taken to ensure that existing system resources are not modified and that resources created in tests are properly cleaned up both on test success and failure. Let's examine the benefits of the functional testing framework. Neutron offers a library called 'ip_lib' that wraps around the 'ip' binary. One of its methods is called 'device_exists' which accepts a device name and a namespace and returns True if the device exists in the given namespace. It's easy building a test that targets the method directly, and such a test would be considered a 'unit' test. However, what framework should such a test use? A test using the unit tests framework could not mutate state on the system, and so could not actually create a device and assert that it now exists. Such a test would look roughly like this: * It would mock 'execute', a method that executes shell commands against the system to return an IP device named 'foo'. * It would then assert that when 'device_exists' is called with 'foo', it returns True, but when called with a different device name it returns False. * It would most likely assert that 'execute' was called using something like: 'ip link show foo'. The value of such a test is arguable. Remember that new tests are not free, they need to be maintained. Code is often refactored, reimplemented and optimized. * There are other ways to find out if a device exists (Such as by looking at '/sys/class/net'), and in such a case the test would have to be updated. * Methods are mocked using their name. When methods are renamed, moved or removed, their mocks must be updated. This slows down development for avoidable reasons. * Most importantly, the test does not assert the behavior of the method. It merely asserts that the code is as written. When adding a functional test for 'device_exists', several framework level methods were added. These methods may now be used by other tests as well. One such method creates a virtual device in a namespace, and ensures that both the namespace and the device are cleaned up at the end of the test run regardless of success or failure using the 'addCleanup' method. The test generates details for a temporary device, asserts that a device by that name does not exist, creates that device, asserts that it now exists, deletes it, and asserts that it no longer exists. Such a test avoids all three issues mentioned above if it were written using the unit testing framework. Functional tests are also used to target larger scope, such as agents. Many good examples exist: See the OVS, L3 and DHCP agents functional tests. Such tests target a top level agent method and assert that the system interaction that was supposed to be performed was indeed performed. For example, to test the DHCP agent's top level method that accepts network attributes and configures dnsmasq for that network, the test: * Instantiates an instance of the DHCP agent class (But does not start its process). * Calls its top level function with prepared data. * Creates a temporary namespace and device, and calls 'dhclient' from that namespace. * Assert that the device successfully obtained the expected IP address. Test exceptions +++++++++++++++ Test neutron.tests.functional.agent.test_ovs_flows.OVSFlowTestCase.\ test_install_flood_to_tun is currently skipped if openvswitch version is less than 2.5.1. This version contains bug where appctl command prints wrong output for Final flow. It's been fixed in openvswitch 2.5.1 in `this commit `_. If openvswitch version meets the test requirement then the test is triggered normally. Fullstack Tests ~~~~~~~~~~~~~~~ Why? ++++ The idea behind "fullstack" testing is to fill a gap between unit + functional tests and Tempest. Tempest tests are expensive to run, and target black box API tests exclusively. Tempest requires an OpenStack deployment to be run against, which can be difficult to configure and setup. Full stack testing addresses these issues by taking care of the deployment itself, according to the topology that the test requires. Developers further benefit from full stack testing as it can sufficiently simulate a real environment and provide a rapidly reproducible way to verify code while you're still writing it. How? ++++ Full stack tests set up their own Neutron processes (Server & agents). They assume a working Rabbit and MySQL server before the run starts. Instructions on how to run fullstack tests on a VM are available below. Each test defines its own topology (What and how many servers and agents should be running). Since the test runs on the machine itself, full stack testing enables "white box" testing. This means that you can, for example, create a router through the API and then assert that a namespace was created for it. Full stack tests run in the Neutron tree with Neutron resources alone. You may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone is out of the picture). VMs may be simulated with a container-like class: neutron.tests.fullstack.resources.machine.FakeFullstackMachine. An example of its usage may be found at: neutron/tests/fullstack/test_connectivity.py. Full stack testing can simulate multi node testing by starting an agent multiple times. Specifically, each node would have its own copy of the OVS/LinuxBridge/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent is connected to its own pair of br-int/br-ex, and those bridges are then interconnected. For LinuxBridge agent each agent is started in its own namespace, called "host-". Such namespaces are connected with OVS "central" bridge to each other. .. image:: images/fullstack_multinode_simulation.png Segmentation at the database layer is guaranteed by creating a database per test. The messaging layer achieves segmentation by utilizing a RabbitMQ feature called 'vhosts'. In short, just like a MySQL server serve multiple databases, so can a RabbitMQ server serve multiple messaging domains. Exchanges and queues in one 'vhost' are segmented from those in another 'vhost'. Please note that if the change you would like to test using fullstack tests involves a change to python-neutronclient as well as neutron, then you should make sure your fullstack tests are in a separate third change that depends on the python-neutronclient change using the 'Depends-On' tag in the commit message. You will need to wait for the next release of python-neutronclient, and a minimum version bump for python-neutronclient in the global requirements, before your fullstack tests will work in the gate. This is because tox uses the version of python-neutronclient listed in the upper-constraints.txt file in the openstack/requirements repository. When? +++++ 1) You'd like to test the interaction between Neutron components (Server and agents) and have already tested each component in isolation via unit or functional tests. You should have many unit tests, fewer tests to test a component and even fewer to test their interaction. Edge cases should not be tested with full stack testing. 2) You'd like to increase coverage by testing features that require multi node testing such as l2pop, L3 HA and DVR. 3) You'd like to test agent restarts. We've found bugs in the OVS, DHCP and L3 agents and haven't found an effective way to test these scenarios. Full stack testing can help here as the full stack infrastructure can restart an agent during the test. Example +++++++ Neutron offers a Quality of Service API, initially offering bandwidth capping at the port level. In the reference implementation, it does this by utilizing an OVS feature. neutron.tests.fullstack.test_qos.TestBwLimitQoSOvs.test_bw_limit_qos_policy_rule_lifecycle is a positive example of how the fullstack testing infrastructure should be used. It creates a network, subnet, QoS policy & rule and a port utilizing that policy. It then asserts that the expected bandwidth limitation is present on the OVS bridge connected to that port. The test is a true integration test, in the sense that it invokes the API and then asserts that Neutron interacted with the hypervisor appropriately. Gate exceptions +++++++++++++++ Currently we compile openvswitch kernel module from source for fullstack job on the gate. The reason is to fix bug related to local VXLAN tunneling which is present in current Ubuntu Xenial 16.04 kernel. Kernel was fixed with this `commit `_ and backported with this `openvswitch commit `_. API Tests ~~~~~~~~~ API tests (neutron-tempest-plugin/neutron_tempest_plugin/api/) are intended to ensure the function and stability of the Neutron API. As much as possible, changes to this path should not be made at the same time as changes to the code to limit the potential for introducing backwards-incompatible changes, although the same patch that introduces a new API should include an API test. Since API tests target a deployed Neutron daemon that is not test-managed, they should not depend on controlling the runtime configuration of the target daemon. API tests should be black-box - no assumptions should be made about implementation. Only the contract defined by Neutron's REST API should be validated, and all interaction with the daemon should be via a REST client. The neutron-tempest-plugin/neutron_tempest_plugin directory was copied from the Tempest project around the Kilo timeframe. At the time, there was an overlap of tests between the Tempest and Neutron repositories. This overlap was then eliminated by carving out a subset of resources that belong to Tempest, with the rest in Neutron. API tests that belong to Tempest deal with a subset of Neutron's resources: * Port * Network * Subnet * Security Group * Router * Floating IP These resources were chosen for their ubiquity. They are found in most Neutron deployments regardless of plugin, and are directly involved in the networking and security of an instance. Together, they form the bare minimum needed by Neutron. This is excluding extensions to these resources (For example: Extra DHCP options to subnets, or snat_gateway mode to routers) that are not mandatory in the majority of cases. Tests for other resources should be contributed to the Neutron repository. Scenario tests should be similarly split up between Tempest and Neutron according to the API they're targeting. To create an API test, the testing class must at least inherit from neutron_tempest_plugin.api.base.BaseNetworkTest base class. As some of tests may require certain extensions to be enabled, the base class provides ``required_extensions`` class attribute which can be used by subclasses to define a list of required extensions for particular test class. Scenario Tests ~~~~~~~~~~~~~~ Scenario tests (neutron-tempest-plugin/neutron_tempest_plugin/scenario), like API tests, use the Tempest test infrastructure and have the same requirements. Guidelines for writing a good scenario test may be found at the Tempest developer guide: https://docs.openstack.org/tempest/latest/field_guide/scenario.html Scenario tests, like API tests, are split between the Tempest and Neutron repositories according to the Neutron API the test is targeting. Some scenario tests require advanced ``Glance`` images (for example, ``Ubuntu`` or ``CentOS``) in order to pass. Those tests are skipped by default. To enable them, include the following in ``tempest.conf``: .. code-block:: ini [compute] image_ref = [neutron_plugin_options] image_is_advanced = True Specific test requirements for advanced images are: #. ``test_trunk`` requires ``802.11q`` kernel module loaded. Rally Tests ~~~~~~~~~~~ Rally tests (rally-jobs/plugins) use the `rally `_ infrastructure to exercise a neutron deployment. Guidelines for writing a good rally test can be found in the `rally plugin documentation `_. There are also some examples in tree; the process for adding rally plugins to neutron requires three steps: 1) write a plugin and place it under rally-jobs/plugins/. This is your rally scenario; 2) (optional) add a setup file under rally-jobs/extra/. This is any devstack configuration required to make sure your environment can successfully process your scenario requests; 3) edit neutron-neutron.yaml. This is your scenario 'contract' or SLA. Development Process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Any bugs fixes that are submitted must also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Structure of the Unit Test Tree ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The structure of the unit test tree should match the structure of the code tree, e.g. :: - target module: neutron.agent.utils - test module: neutron.tests.unit.agent.test_utils Unit test modules should have the same path under neutron/tests/unit/ as the module they target has under neutron/, and their name should be the name of the target module prefixed by `test_`. This requirement is intended to make it easier for developers to find the unit tests for a given module. Similarly, when a test module targets a package, that module's name should be the name of the package prefixed by `test_` with the same path as when a test targets a module, e.g. :: - target package: neutron.ipam - test module: neutron.tests.unit.test_ipam The following command can be used to validate whether the unit test tree is structured according to the above requirements: :: ./tools/check_unit_test_structure.sh Where appropriate, exceptions can be added to the above script. If code is not part of the Neutron namespace, for example, it's probably reasonable to exclude their unit tests from the check. .. note :: At no time should the production code import anything from testing subtree (neutron.tests). There are distributions that split out neutron.tests modules in a separate package that is not installed by default, making any code that relies on presence of the modules to fail. For example, RDO is one of those distributions. Running Tests ------------- Before submitting a patch for review you should always ensure all tests pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python. Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.org/project/virtualenv PEP8 and Unit Tests ~~~~~~~~~~~~~~~~~~~ Running pep8 and unit tests is as easy as executing this in the root directory of the Neutron source code:: tox To run only pep8:: tox -e pep8 Since pep8 includes running pylint on all files, it can take quite some time to run. To restrict the pylint check to only the files altered by the latest patch changes:: tox -e pep8 HEAD~1 To run only the unit tests:: tox -e py37 Many changes span across both the neutron and neutron-lib repos, and tox will always build the test environment using the published module versions specified in requirements.txt and lower-constraints.txt. To run tox tests against a different version of neutron-lib, use the TOX_ENV_SRC_MODULES environment variable to point at a local package repo. For example, to run against the 'master' branch of neutron-lib:: cd $SRC git clone https://opendev.org/openstack/neutron-lib cd $NEUTRON_DIR env TOX_ENV_SRC_MODULES=$SRC/neutron-lib tox -r -e py37 To run against a change of your own, repeat the same steps, but use the directory with your changes, not a fresh clone. To run against a particular gerrit change of the lib (substituting the desired gerrit refs for this example):: cd $SRC git clone https://opendev.org/openstack/neutron-lib cd neutron-lib git fetch https://opendev.org/openstack/neutron-lib refs/changes/13/635313/6 && git checkout FETCH_HEAD cd $NEUTRON_DIR env TOX_ENV_SRC_MODULES=$SRC/neutron-lib tox -r -e py37 Note that the '-r' is needed to re-create the tox virtual envs, and will also be needed to restore them to standard when not using this method. Any pip installable package can be overriden with this environment variable, not just neutron-lib. To specify multiple packages to override, specify them as a space separated list to TOX_ENV_SRC_MODULES. Example:: env TOX_ENV_SRC_MODULES="$SRC/neutron-lib $SRC/oslo.db" tox -r -e py37 Functional Tests ~~~~~~~~~~~~~~~~ To run functional tests that do not require sudo privileges or specific-system dependencies:: tox -e functional To run all the functional tests, including those requiring sudo privileges and system-specific dependencies, the procedure defined by tools/configure_for_func_testing.sh should be followed. IMPORTANT: configure_for_func_testing.sh relies on DevStack to perform extensive modification to the underlying host. Execution of the script requires sudo privileges and it is recommended that the following commands be invoked only on a clean and disposable VM. A VM that has had DevStack previously installed on it is also fine. :: git clone https://opendev.org/openstack/devstack ../devstack ./tools/configure_for_func_testing.sh ../devstack -i tox -e dsvm-functional The '-i' option is optional and instructs the script to use DevStack to install and configure all of Neutron's package dependencies. It is not necessary to provide this option if DevStack has already been used to deploy Neutron to the target host. Fullstack Tests ~~~~~~~~~~~~~~~ To run all the fullstack tests, you may use: :: tox -e dsvm-fullstack Since fullstack tests often require the same resources and dependencies as the functional tests, using the configuration script tools/configure_for_func_testing.sh is advised (as described above). Before running the script, you must first set the following environment variable so things are setup correctly :: export VENV=dsvm-fullstack When running fullstack tests on a clean VM for the first time, it is important to make sure all of Neutron's package dependencies have been met. As mentioned in the functional test section above, this can be done by running the configure script with the '-i' argument :: ./tools/configure_for_func_testing.sh ../devstack -i You can also run './stack.sh', and if successful, it will have also verified the package dependencies have been met. When running on a new VM it is suggested to set the following environment variable as well, to make sure that all requirements (including database and message bus) are installed and set :: export IS_GATE=False Fullstack-based Neutron daemons produce logs to a sub-folder in the $OS_LOG_PATH directory (default: /opt/stack/logs, note: if running fullstack tests on a newly created VM, make sure that $OS_LOG_PATH exists with the correct permissions) called 'dsvm-fullstack-logs'. For example, a test named "test_example" will produce logs in $OS_LOG_PATH/dsvm-fullstack-logs/test_example/, as well as create $OS_LOG_PATH/dsvm-fullstack-logs/test_example.txt, so that is a good place to look if your test is failing. The fullstack test suite assumes 240.0.0.0/4 (Class E) range in the root namespace of the test machine is available for its usage. Fullstack tests execute a custom dhclient-script. From kernel version 4.14 onward, apparmor on certain distros could deny the execution of this script. To be sure, check journalctl :: sudo journalctl | grep DENIED | grep fullstack-dhclient-script To execute these tests, the easiest workaround is to disable apparmor :: sudo systemctl stop apparmor sudo systemctl disable apparmor A more granular solution could be to disable apparmor only for dhclient :: sudo ln -s /etc/apparmor.d/sbin.dhclient /etc/apparmor.d/disable/ API & Scenario Tests ~~~~~~~~~~~~~~~~~~~~ To run the api or scenario tests, deploy Tempest, neutron-tempest-plugin and Neutron with DevStack and then run the following command, from the tempest directory: :: $ export DEVSTACK_GATE_TEMPEST_REGEX="neutron" $ tox -e all-plugin $DEVSTACK_GATE_TEMPEST_REGEX If you want to limit the amount of tests, or run an individual test, you can do, for instance: :: $ tox -e all-plugin neutron_tempest_plugin.api.admin.test_routers_ha $ tox -e all-plugin neutron_tempest_plugin.api.test_qos.QosTestJSON.test_create_policy If you want to use special config for Neutron, like use advanced images (Ubuntu or CentOS) testing advanced features, you may need to add config in tempest/etc/tempest.conf: .. code-block:: ini [neutron_plugin_options] image_is_advanced = True The Neutron tempest plugin configs are under ``neutron_plugin_options`` scope of ``tempest.conf``. Running Individual Tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules, cases or tests, you just need to pass the dot-separated path you want as an argument to it. For example, the following would run only a single test or test case:: $ tox -e py37 neutron.tests.unit.test_manager $ tox -e py37 neutron.tests.unit.test_manager.NeutronManagerTestCase $ tox -e py37 neutron.tests.unit.test_manager.NeutronManagerTestCase.test_service_plugin_is_loaded If you want to pass other arguments to stestr, you can do the following:: $ tox -e py37 -- neutron.tests.unit.test_manager --serial Coverage -------- Neutron has a fast growing code base and there are plenty of areas that need better coverage. To get a grasp of the areas where tests are needed, you can check current unit tests coverage by running:: $ tox -ecover Since the coverage command can only show unit test coverage, a coverage document is maintained that shows test coverage per area of code in: doc/source/devref/testing_coverage.rst. You could also rely on Zuul logs, that are generated post-merge (not every project builds coverage results). To access them, do the following: * Check out the latest `merge commit `_ * Go to: http://logs.openstack.org///post/neutron-coverage/. * `Spec `_ is a work in progress to provide a better landing page. Debugging --------- By default, calls to pdb.set_trace() will be ignored when tests are run. For pdb statements to work, invoke tox as follows:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the Neutron source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Post-mortem Debugging ~~~~~~~~~~~~~~~~~~~~~ TBD: how to do this with tox. References ~~~~~~~~~~ .. _file-based-sqlite: http://lists.openstack.org/pipermail/openstack-dev/2016-July/099861.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0390422 neutron-16.0.0.0b2.dev214/api-ref/0000755000175000017500000000000000000000000016466 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/api-ref/README.rst0000644000175000017500000000015300000000000020154 0ustar00coreycorey00000000000000Networking API reference is maintained in the neutron-lib repo. See api-ref in the neutron-lib repository. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/babel.cfg0000644000175000017500000000002100000000000016662 0ustar00coreycorey00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/bindep.txt0000644000175000017500000000136700000000000017154 0ustar00coreycorey00000000000000# This file contains runtime (non-python) dependencies # More info at: http://docs.openstack.org/infra/bindep/readme.html # tools/misc-sanity-checks.sh validates .po[t] files gettext [test] # cffi (required by oslo.privsep) and PyNaCL (required by paramiko) libffi-dev [platform:dpkg] libffi-devel [platform:rpm] # MySQL and PostgreSQL databases since some jobs are set up in # OpenStack infra that need these like # periodic-neutron-py35-with-neutron-lib-master. haproxy libmysqlclient-dev [platform:dpkg test] mysql [platform:rpm test] mysql-client [platform:dpkg test] mysql-devel [platform:rpm test] mysql-server [test] postgresql [test] postgresql-client [platform:dpkg test] postgresql-devel [platform:rpm test] postgresql-server [platform:rpm test] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0390422 neutron-16.0.0.0b2.dev214/devstack/0000755000175000017500000000000000000000000016747 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0430424 neutron-16.0.0.0b2.dev214/devstack/lib/0000755000175000017500000000000000000000000017515 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/dns0000644000175000017500000000135000000000000020223 0ustar00coreycorey00000000000000function configure_dns_extension { neutron_ml2_extension_driver_add "subnet_dns_publish_fixed_ip" } function configure_dns_integration { iniset $NEUTRON_CONF DEFAULT external_dns_driver designate # Use new /dns based uri by default if no port is set. if [[ -z "$DESIGNATE_SERVICE_PORT" ]]; then iniset $NEUTRON_CONF designate url "$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST/dns/v2" else iniset $NEUTRON_CONF designate url "$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/v2" fi configure_keystone_authtoken_middleware $NEUTRON_CONF designate designate } function post_config_dns_extension { iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/fip_port_forwarding0000644000175000017500000000023000000000000023477 0ustar00coreycorey00000000000000function configure_port_forwarding { neutron_service_plugin_class_add "port_forwarding" plugin_agent_add_l3_agent_extension "port_forwarding" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/flavors0000644000175000017500000000023700000000000021116 0ustar00coreycorey00000000000000# Neutron flavors plugin # ---------------------- FLAVORS_PLUGIN=flavors function configure_flavors { neutron_service_plugin_class_add $FLAVORS_PLUGIN } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/l2_agent0000644000175000017500000000064400000000000021137 0ustar00coreycorey00000000000000function plugin_agent_add_l2_agent_extension { local l2_agent_extension=$1 if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then L2_AGENT_EXTENSIONS=$l2_agent_extension elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then L2_AGENT_EXTENSIONS+=",$l2_agent_extension" fi } function configure_l2_agent { iniset /$NEUTRON_CORE_PLUGIN_CONF agent extensions "$L2_AGENT_EXTENSIONS" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/l2_agent_sriovnicswitch0000644000175000017500000000223100000000000024267 0ustar00coreycorey00000000000000SRIOV_AGENT_CONF="${NEUTRON_CORE_PLUGIN_CONF_PATH}/sriov_agent.ini" SRIOV_AGENT_BINARY="${NEUTRON_BIN_DIR}/neutron-sriov-nic-agent" function configure_l2_agent_sriovnicswitch { if [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then PHYSICAL_DEVICE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE fi if [[ -n "$PHYSICAL_DEVICE_MAPPINGS" ]]; then iniset /$SRIOV_AGENT_CONF sriov_nic physical_device_mappings $PHYSICAL_DEVICE_MAPPINGS fi iniset /$SRIOV_AGENT_CONF securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver iniset /$SRIOV_AGENT_CONF agent extensions "$L2_AGENT_EXTENSIONS" } function start_l2_agent_sriov { local SERVICE_NAME if is_neutron_legacy_enabled; then SERVICE_NAME=q-sriov-agt else SERVICE_NAME=neutron-sriov-agent fi run_process $SERVICE_NAME "$SRIOV_AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$SRIOV_AGENT_CONF" } function stop_l2_agent_sriov { local SERVICE_NAME if is_neutron_legacy_enabled; then SERVICE_NAME=q-sriov-agt else SERVICE_NAME=neutron-sriov-agent fi stop_process $SERVICE_NAME } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/l3_agent0000644000175000017500000000063200000000000021135 0ustar00coreycorey00000000000000function plugin_agent_add_l3_agent_extension { local l3_agent_extension=$1 if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then L3_AGENT_EXTENSIONS=$l3_agent_extension elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then L3_AGENT_EXTENSIONS+=",$l3_agent_extension" fi } function configure_l3_agent { iniset $NEUTRON_L3_CONF agent extensions "$L3_AGENT_EXTENSIONS" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/l3_conntrack_helper0000644000175000017500000000023600000000000023360 0ustar00coreycorey00000000000000function configure_l3_conntrack_helper { neutron_service_plugin_class_add "conntrack_helper" plugin_agent_add_l3_agent_extension "conntrack_helper" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/log0000644000175000017500000000040200000000000020215 0ustar00coreycorey00000000000000function configure_log_service_plugin { neutron_service_plugin_class_add "log" } function configure_log_extension { plugin_agent_add_l2_agent_extension "log" } function configure_log { configure_log_service_plugin configure_log_extension } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/macvtap_agent0000644000175000017500000000165500000000000022260 0ustar00coreycorey00000000000000# Neutron Macvtap L2 agent # ------------------------ # Save trace setting _XTRACE_NEUTRON_MACVTAP=$(set +o | grep xtrace) set +o xtrace function is_neutron_ovs_base_plugin { # macvtap doesn't use OVS return 1 } function neutron_plugin_create_nova_conf { : } function neutron_plugin_install_agent_packages { : } function neutron_plugin_configure_debug_command { : } function neutron_plugin_configure_dhcp_agent { : } function neutron_plugin_configure_l3_agent { : } function neutron_plugin_configure_plugin_agent { # Only the NooPFirewallDriver is supported. If not set, the agent will # terminate. iniset /$NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver noop AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-macvtap-agent" } function neutron_plugin_setup_interface_driver { : } function neutron_plugin_check_adv_test_requirements { : } # Restore xtrace $_XTRACE_NEUTRON_MACVTAP ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/ml20000644000175000017500000000125600000000000020136 0ustar00coreycorey00000000000000function configure_qos_ml2 { neutron_ml2_extension_driver_add "qos" } function configure_ml2_extension_drivers { if is_neutron_legacy_enabled; then # NOTE(yamamoto): This overwrites what neutron-legacy set, # with the latest set of drivers. # While we modifies Q_ML2_PLUGIN_EXT_DRIVERS (via # neutron_ml2_extension_driver_add calls) in the post-config phase, # lib/neutron-legcay populates this in "configure_neutron", which is # before post-config. # REVISIT(yamamoto): Probably this ought to be in lib/neutron-legcay iniset /$NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers ${Q_ML2_PLUGIN_EXT_DRIVERS} fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/network_segment_range0000644000175000017500000000015200000000000024025 0ustar00coreycorey00000000000000function configure_network_segment_range { neutron_service_plugin_class_add "network_segment_range" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/ovn_agent0000644000175000017500000007412000000000000021424 0ustar00coreycorey00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Global Sources # -------------- # There are some ovs functions OVN depends on that must be sourced from # the ovs neutron plugins. After doing this, the OVN overrides must be # re-sourced. source $TOP_DIR/lib/neutron_plugins/ovs_base source $TOP_DIR/lib/neutron_plugins/openvswitch_agent # Defaults # -------- # Set variables for building OVN from source OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git} OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.') OVN_REPO_NAME=${OVN_REPO_NAME:-ovn} OVN_BRANCH=${OVN_BRANCH:-master} # Set variables for building OVS from source OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_REPO_NAME=${OVS_REPO_NAME:-ovs} OVS_BRANCH=${OVS_BRANCH:-master} if is_service_enabled tls-proxy; then OVN_PROTO=ssl else OVN_PROTO=tcp fi # How to connect to ovsdb-server hosting the OVN SB database. OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642} # How to connect to ovsdb-server hosting the OVN NB database OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641} # ml2/config for neutron_sync_mode OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log} # Configured DNS servers to be used with internal_dns extension, only # if the subnet DNS is not configured. OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8} # The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the # hypervisor/chassis where a routers gateway should be hosted in OVN. The # default OVN L3 scheduler is leastloaded OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded} # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated. A randomly generated UUID will be saved in a file # 'ovn-uuid' so that the same one will be re-used if you re-run DevStack. OVN_UUID=${OVN_UUID:-} # Whether or not to build the openvswitch kernel module from ovs. This is required # unless the distro kernel includes ovs+conntrack support. OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES) # Whether or not to install the ovs python module from ovs source. This can be # used to test and validate new ovs python features. This should only be used # for development purposes since the ovs python version is controlled by OpenStack # requirements. OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE) # GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version # overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined # based on the ML2 overlay_ip_version option. The ML2 framework will use this to # configure the MTU DHCP option. OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38} # The log level of the OVN databases (north and south) OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info} OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)} # ovsdb-server wants an IPv6 address in the quoted form, [::1] # Initialize un-quoted to handle IPv4, but add them back if version is IPv6 OVSDB_SERVER_LOCAL_HOST=$(ipv6_unquote $SERVICE_LOCAL_HOST) if [[ "$SERVICE_IP_VERSION" == 6 ]]; then OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST] fi OVS_PREFIX=/usr/local OVS_SBINDIR=$OVS_PREFIX/sbin OVS_BINDIR=$OVS_PREFIX/bin OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts OVS_DATADIR=$DATA_DIR/ovs OVN_DATADIR=$DATA_DIR/ovn OVN_SHAREDIR=$OVS_PREFIX/share/ovn OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts OVN_RUNDIR=$OVS_PREFIX/var/run/ovn NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix) NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent" STACK_GROUP="$( id --group --name "$STACK_USER" )" # Libs from source # ---------------- # ovsdbapp used by neutron GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git} GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH} GITDIR["ovsdbapp"]=$DEST/ovsdbapp # Defaults Overwrite # ------------------ Q_PLUGIN=${Q_PLUGIN:-"ml2"} Q_AGENT=${Q_AGENT:-""} Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger} Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve} Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"} Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"} Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,dns} ML2_L3_PLUGIN="ovn-router,trunk" # Utility Functions # ----------------- function is_kernel_module_loaded { if lsmod | grep $1 >& /dev/null; then return 0 else return 1 fi } function use_new_ovn_repository { # IF OVN_BRANCH is "master" or branch-2.13 (or higher), use the new # OVN repository [ "$OVN_BRANCH" == "master" ] && return 0 return $(! printf "%s\n%s" ${OVN_BRANCH//[!0-9.]/} 2.12 | sort -C -V) } # NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge # and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup # removed. The call is not relevant for OVN, as it is specific to the use # of Neutron's OVS agent and hangs when running stack.sh because # neutron-ovs-cleanup uses the OVSDB native interface. function ovn_base_setup_bridge { local bridge=$1 local addbr_cmd="ovs-vsctl --no-wait -- --may-exist add-br $bridge" if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" fi $addbr_cmd ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } function _start_process { $SYSTEMCTL daemon-reload $SYSTEMCTL enable $1 $SYSTEMCTL restart $1 } function _run_process { local service=$1 local cmd="$2" local stop_cmd="$3" local group=$4 local user=${5:-$STACK_USER} local systemd_service="devstack@$service.service" local unit_file="$SYSTEMD_DIR/$systemd_service" local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR" echo "Starting $service executed command": $cmd write_user_unit_file $systemd_service "$cmd" "$group" "$user" iniset -sudo $unit_file "Service" "Type" "forking" iniset -sudo $unit_file "Service" "RemainAfterExit" "yes" iniset -sudo $unit_file "Service" "KillMode" "mixed" iniset -sudo $unit_file "Service" "LimitNOFILE" "65536" iniset -sudo $unit_file "Service" "Environment" "$environment" if [ -n "$stop_cmd" ]; then iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd" fi _start_process $systemd_service local testcmd="test -e $OVS_RUNDIR/$service.pid" test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1 sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info } function clone_repository { local repo=$1 local dir=$2 local branch=$3 if [ ! -d $dir ] ; then git_timed clone $repo $dir pushd $dir git checkout $branch popd else # Even though the directory already exists, call git_clone to update it # if needed based on the RECLONE option git_clone $repo $dir $branch fi } function get_ext_gw_interface { # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH # This function is copied directly from the devstack neutron-legacy script if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then echo $Q_PUBLIC_VETH_EX else # Disable in-band as we are going to use local port # to communicate with VMs sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ other_config:disable-in-band=true echo $PUBLIC_BRIDGE fi } function create_public_bridge { # Create the public bridge that OVN will use # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6 local ext_gw_ifc ext_gw_ifc=$(get_ext_gw_interface) ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13 ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc if [ -n "$FLOATING_RANGE" ]; then local cidr_len=${FLOATING_RANGE#*/} sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc fi # Ensure IPv6 RAs are accepted on the interface with the default route. # This is needed for neutron-based devstack clouds to work in # IPv6-only clouds in the gate. Please do not remove this without # talking to folks in Infra. This fix is based on a devstack fix for # neutron L3 agent: https://review.openstack.org/#/c/359490/. default_route_dev=$(ip route | grep ^default | awk '{print $5}') sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2 sudo sysctl -w net.ipv6.conf.all.forwarding=1 if [ -n "$IPV6_PUBLIC_RANGE" ]; then local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc # NOTE(numans): Commenting the below code for now as this is breaking # the CI after xenial upgrade. # https://bugs.launchpad.net/networking-ovn/+bug/1648670 # sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_PUBLIC_NETWORK_GATEWAY dev $ext_gw_ifc fi sudo ip link set $ext_gw_ifc up } function _disable_libvirt_apparmor { if ! sudo aa-status --enabled ; then return 0 fi # NOTE(arosen): This is used as a work around to allow newer versions # of libvirt to work with ovs configured ports. See LP#1466631. # requires the apparmor-utils install_package apparmor-utils # disables apparmor for libvirtd sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd } # OVN compilation functions # ------------------------- # Fetch the ovs git repository and install packages needed for # the compilation. function _prepare_for_ovs_compilation { local build_modules=$1 clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH if [[ "$build_modules" == "False" ]]; then return fi KERNEL_VERSION=`uname -r` if is_fedora ; then # is_fedora covers Fedora, RHEL, CentOS, etc... if [[ "$os_VENDOR" == "Fedora" ]]; then install_package elfutils-libelf-devel KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then # dash is illegal character in rpm version so replace # them with underscore like it is done in the kernel # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 # but only for latest series of the kernel, not 3.x KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` fi echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation echo failed, please, provide a repository with the package, or yum update / reboot echo your machine to get the latest kernel. install_package kernel-devel-$KERNEL_VERSION install_package kernel-headers-$KERNEL_VERSION elif is_ubuntu ; then install_package linux-headers-$KERNEL_VERSION fi } # Reload the ovs kernel modules function _reload_ovs_kernel_modules { ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) if [ -n "$ovs_system" ]; then sudo ovs-dpctl del-dp ovs-system fi sudo modprobe -r vport_geneve sudo modprobe -r openvswitch sudo modprobe openvswitch || (dmesg && die $LINENO "FAILED TO LOAD openvswitch") sudo modprobe vport-geneve || (dmesg && echo "FAILED TO LOAD vport-geneve") } # Compile openvswitch and its kernel module function _compile_ovs { local build_modules=$1 # Install the dependencies install_package autoconf automake libtool gcc patch make # TODO(flaviof): Would prefer to use pip_install wrapper, but that is not # useable right now because REQUIREMENTS_DIR variable is hard coded in # starckrc sudo pip3 install six _prepare_for_ovs_compilation $build_modules pushd $DEST/$OVS_REPO_NAME [ -f configure ] || ./boot.sh if [ ! -f config.status ] || [ configure -nt config.status ] ; then if [[ "$build_modules" == "True" ]]; then ./configure --with-linux=/lib/modules/$(uname -r)/build else ./configure fi fi make -j$(($(nproc) + 1)) sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install if [ $? -eq 0 ]; then _reload_ovs_kernel_modules else echo "Compiling OVS kernel modules failed" fi fi popd } # compile_ovn() - Compile OVN from source and load needed modules # Accepts three parameters: # - first optional is False by default and means that # modules are built and installed. # - second optional parameter defines prefix for # ovn compilation # - third optional parameter defines localstatedir for # ovn single machine runtime function compile_ovn { local build_modules=${1:-False} local prefix=$2 local localstatedir=$3 # First, compile OVS _compile_ovs $build_modules if [ -n "$prefix" ]; then prefix="--prefix=$prefix" fi if [ -n "$localstatedir" ]; then localstatedir="--localstatedir=$localstatedir" fi clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH pushd $DEST/$OVN_REPO_NAME if [ ! -f configure ] ; then ./boot.sh fi if [ ! -f config.status ] || [ configure -nt config.status ] ; then ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir fi make -j$(($(nproc) + 1)) sudo make install popd } # OVN Neutron driver functions # ---------------------------- # OVN service sanity check function ovn_sanity_check { if is_service_enabled q-agt neutron-agt; then die $LINENO "The q-agt/neutron-agt service must be disabled with OVN." elif is_service_enabled q-l3 neutron-l3; then die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN." elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS" elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN" fi } # install_ovn() - Collect source and prepare function install_ovn { echo "Installing OVN and dependent packages" # Check the OVN configuration ovn_sanity_check # If OVS is already installed, remove it, because we're about to re-install # it from source. for package in openvswitch openvswitch-switch openvswitch-common; do if is_package_installed $package ; then uninstall_package $package fi done # Install tox, used to generate the config (see devstack/override-defaults) pip_install tox source $NEUTRON_DIR/devstack/lib/ovs remove_ovs_packages sudo rm -f $OVS_RUNDIR/* if use_new_ovn_repository; then compile_ovn $OVN_BUILD_MODULES else compile_ovs $OVN_BUILD_MODULES fi sudo mkdir -p $OVS_RUNDIR sudo chown $(whoami) $OVS_RUNDIR sudo mkdir -p $OVS_PREFIX/var/log/openvswitch sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch sudo mkdir -p $OVS_PREFIX/var/log/ovn sudo chown $(whoami) $OVS_PREFIX/var/log/ovn # Archive log files and create new local log_archive_dir=$LOGDIR/archive mkdir -p $log_archive_dir for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do if [ -f "$LOGDIR/$logfile" ] ; then mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}" fi done # Install ovsdbapp from source if requested if use_library_from_git "ovsdbapp"; then git_clone_by_name "ovsdbapp" setup_dev_lib "ovsdbapp" fi # Install ovs python module from ovs source. if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then sudo pip uninstall -y ovs # Clone the OVS repository if it's not yet present clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH sudo pip install -e $DEST/$OVS_REPO_NAME/python fi } function configure_ovn_plugin { echo "Configuring Neutron for OVN" if is_service_enabled q-svc ; then # NOTE(arosen) needed for tempest export NETWORK_API_EXTENSIONS=$($PYTHON -c \ 'from neutron.common.ovn import extensions ;\ print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))') export NETWORK_API_EXTENSIONS=$NETWORK_API_EXTENSIONS,$($PYTHON -c \ 'from neutron.common.ovn import extensions ;\ print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))') populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE" if is_service_enabled tls-proxy; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE" populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER" populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP" inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver if is_service_enabled q-ovn-metadata-agent; then populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True else populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False fi if is_service_enabled q-dns neutron-dns ; then iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS" fi fi if is_service_enabled q-dhcp neutron-dhcp ; then iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True else iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False fi if is_service_enabled n-api-meta ; then if is_service_enabled q-ovn-metadata-agent ; then iniset $NOVA_CONF neutron service_metadata_proxy True fi fi } function configure_ovn { echo "Configuring OVN" if [ -z "$OVN_UUID" ] ; then if [ -f ./ovn-uuid ] ; then OVN_UUID=$(cat ovn-uuid) else OVN_UUID=$(uuidgen) echo $OVN_UUID > ovn-uuid fi fi # Metadata if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2 (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF configure_root_helper_options $OVN_META_CONF iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH iniset $OVN_META_CONF ovs ovsdb_connection unix:$OVS_RUNDIR/db.sock iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE if is_service_enabled tls-proxy; then iniset $OVN_META_CONF ovn \ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem iniset $OVN_META_CONF ovn \ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt iniset $OVN_META_CONF ovn \ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key fi fi } function init_ovn { # clean up from previous (possibly aborted) runs # create required data files # Assumption: this is a dedicated test system and there is nothing important # in the ovn, ovn-nb, or ovs databases. We're going to trash them and # create new ones on each devstack run. _disable_libvirt_apparmor mkdir -p $OVN_DATADIR mkdir -p $OVS_DATADIR rm -f $OVS_DATADIR/*.db rm -f $OVS_DATADIR/.*.db.~lock~ rm -f $OVN_DATADIR/*.db rm -f $OVN_DATADIR/.*.db.~lock~ } function _start_ovs { echo "Starting OVS" if is_service_enabled ovn-controller || is_service_enabled ovn-controller-vtep ; then # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names. enable_service ovsdb-server enable_service ovs-vswitchd if [ ! -f $OVS_DATADIR/conf.db ]; then ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema fi if is_service_enabled ovn-controller-vtep; then if [ ! -f $OVS_DATADIR/vtep.db ]; then ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema fi fi local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file" dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options" if is_service_enabled ovn-controller-vtep; then dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db" fi dbcmd+=" $OVS_DATADIR/conf.db" _run_process ovsdb-server "$dbcmd" echo "Configuring OVSDB" if is_service_enabled tls-proxy; then ovs-vsctl --no-wait set-ssl \ $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ $INT_CA_DIR/ca-chain.pem fi ovs-vsctl --no-wait set open_vswitch . system-type="devstack" ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID" ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE" ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int" ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve" ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP" # Select this chassis to host gateway routers if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw" fi ovn_base_setup_bridge br-int ovs-vsctl --no-wait set bridge br-int fail-mode=secure other-config:disable-in-band=true local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach" _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE} fi if is_service_enabled ovn-controller-vtep ; then ovn_base_setup_bridge br-v vtep-ctl add-ps br-v vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP enable_service ovs-vtep local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v" _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" vtep-ctl set-manager tcp:$HOST_IP:6640 fi fi cd $_pwd } function _start_ovn_services { _start_process "devstack@ovsdb-server.service" _start_process "devstack@ovs-vswitchd.service" if is_service_enabled ovs-vtep ; then _start_process "devstack@ovs-vtep.service" fi if is_service_enabled ovn-northd ; then _start_process "devstack@ovn-northd.service" fi if is_service_enabled ovn-controller ; then _start_process "devstack@ovn-controller.service" fi if is_service_enabled ovn-controller-vtep ; then _start_process "devstack@ovn-controller-vtep.service" fi if is_service_enabled q-ovn-metadata-agent; then _start_process "devstack@q-ovn-metadata-agent.service" fi } # start_ovn() - Start running processes, including screen function start_ovn { echo "Starting OVN" _start_ovs local SCRIPTDIR=$OVN_SCRIPTDIR if ! use_new_ovn_repository; then SCRIPTDIR=$OVS_SCRIPTDIR fi if is_service_enabled ovn-northd ; then if is_service_enabled tls-proxy; then local tls_args="\ --ovn-nb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ --ovn-nb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ --ovn-nb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ --ovn-sb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \ --ovn-sb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \ --ovn-sb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \ " else local tls_args="" fi local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor $tls_args start_northd" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd" _run_process ovn-northd "$cmd" "$stop_cmd" ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000 sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL fi if is_service_enabled ovn-controller ; then local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller" local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller" _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" fi if is_service_enabled ovn-controller-vtep ; then local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE" _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" fi if is_service_enabled q-ovn-metadata-agent; then run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF" # Format logging setup_logging $OVN_META_CONF fi if is_service_enabled br-ex-tcpdump ; then # tcpdump monitor on br-ex for ARP, reverse ARP and ICMP v4 / v6 packets sudo ip link set dev $PUBLIC_BRIDGE up run_process br-ex-tcpdump "/usr/sbin/tcpdump -i $PUBLIC_BRIDGE arp or rarp or icmp or icmp6 -enlX" "$STACK_GROUP" root fi if is_service_enabled br-int-flows ; then run_process br-int-flows "/bin/sh -c \"set +e; while true; do echo ovs-ofctl dump-flows br-int; ovs-ofctl dump-flows br-int ; sleep 30; done; \"" "$STACK_GROUP" root fi # NOTE(lucasagomes): To keep things simpler, let's reuse the same # RUNDIR for both OVS and OVN. This way we avoid having to specify the # --db option in the ovn-{n,s}bctl commands while playing with DevStack if use_new_ovn_repository; then sudo ln -s $OVS_RUNDIR $OVN_RUNDIR fi _start_ovn_services } function _stop_ovs_dp { sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp is_kernel_module_loaded vport_geneve && sudo rmmod vport_geneve is_kernel_module_loaded vport_vxlan && sudo rmmod vport_vxlan is_kernel_module_loaded openvswitch && sudo rmmod openvswitch } function stop_ovn { if is_service_enabled q-ovn-metadata-agent; then sudo pkill -9 -f haproxy || : stop_process neutron-ovn-metadata-agent fi if is_service_enabled ovn-controller-vtep ; then stop_process ovn-controller-vtep fi if is_service_enabled ovn-controller ; then stop_process ovn-controller fi if is_service_enabled ovn-northd ; then stop_process ovn-northd fi if is_service_enabled ovs-vtep ; then stop_process ovs-vtep fi stop_process ovs-vswitchd stop_process ovsdb-server _stop_ovs_dp } function _cleanup { local path=${1:-$DEST/$OVN_REPO_NAME} pushd $path cd $path sudo make uninstall sudo make distclean popd } # cleanup_ovn() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ovn { local ovn_path=$DEST/$OVN_REPO_NAME local ovs_path=$DEST/$OVS_REPO_NAME if [ -d $ovn_path ]; then _cleanup $ovn_path fi if [ -d $ovs_path ]; then _cleanup $ovs_path fi sudo rm -f $OVN_RUNDIR } function neutron_plugin_create_nova_conf { : } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/ovs0000644000175000017500000001631300000000000020253 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') OVS_BRANCH=${OVS_BRANCH:-master} # Functions # load_module() - Load module using modprobe module given by argument and dies # on failure # - fatal argument is optional and says whether function should # exit if module can't be loaded function load_module { local module=$1 local fatal=$2 if [ "$(trueorfalse True fatal)" == "True" ]; then sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module") else sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg) fi } # prepare_for_compilation() - Fetch ovs git repository and install packages needed for # compilation. function prepare_for_compilation { local build_modules=${1:-False} OVS_DIR=$DEST/$OVS_REPO_NAME if [ ! -d $OVS_DIR ] ; then # We can't use git_clone here because we want to ignore ERROR_ON_CLONE git_timed clone $OVS_REPO $OVS_DIR cd $OVS_DIR git checkout $OVS_BRANCH else # Even though the directory already exists, call git_clone to update it # if needed based on the RECLONE option git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH cd $OVS_DIR fi # TODO: Can you create package list files like you can inside devstack? install_package autoconf automake libtool gcc patch make # If build_modules is False, we don't need to install the kernel-* # packages. Just return. if [[ "$build_modules" == "False" ]]; then return fi KERNEL_VERSION=`uname -r` if is_fedora ; then # is_fedora covers Fedora, RHEL, CentOS, etc... if [[ "$os_VENDOR" == "Fedora" ]]; then install_package elfutils-libelf-devel KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1` elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then # dash is illegal character in rpm version so replace # them with underscore like it is done in the kernel # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25 # but only for latest series of the kernel, not 3.x KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _` fi echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation echo failed, please, provide a repository with the package, or yum update / reboot echo your machine to get the latest kernel. install_package kernel-devel-$KERNEL_VERSION install_package kernel-headers-$KERNEL_VERSION elif is_ubuntu ; then install_package linux-headers-$KERNEL_VERSION fi } # load_kernel_modules() - load openvswitch kernel module function load_kernel_modules { load_module openvswitch load_module vport-geneve False dmesg | tail } # reload_kernel_modules() - reload openvswitch kernel module function reload_kernel_modules { local ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system) if [ -n "$ovs_system" ]; then sudo ovs-dpctl del-dp ovs-system fi sudo modprobe -r vport_geneve sudo modprobe -r openvswitch load_kernel_modules } # compile_ovs() - Compile OVS from source and load needed modules. # Accepts two parameters: # - first one is False by default and means that modules are not built and installed. # - second optional parameter defines prefix for ovs compilation # - third optional parameter defines localstatedir for ovs single machine runtime # Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set function compile_ovs { local _pwd=$PWD local build_modules=${1:-False} local prefix=$2 local localstatedir=$3 if [ -n "$prefix" ]; then prefix="--prefix=$prefix" fi if [ -n "$localstatedir" ]; then localstatedir="--localstatedir=$localstatedir" fi prepare_for_compilation $build_modules if [ ! -f configure ] ; then ./boot.sh fi if [ ! -f config.status ] || [ configure -nt config.status ] ; then if [[ "$build_modules" == "True" ]]; then ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build else ./configure $prefix $localstatedir fi fi make -j$[$(nproc) + 1] sudo make install if [[ "$build_modules" == "True" ]]; then sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install reload_kernel_modules else load_kernel_modules fi cd $_pwd } # action_service - call an action over openvswitch service # Accepts one parameter that can be either # 'start', 'restart' and 'stop'. function action_openvswitch { local action=$1 if is_ubuntu; then ${action}_service openvswitch-switch elif is_fedora; then ${action}_service openvswitch elif is_suse; then if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then ${action}_service openvswitch-switch else ${action}_service openvswitch fi fi } # compile_ovs_kernel_module() - Compile openvswitch kernel module and load it function compile_ovs_kernel_module { local _pwd=$PWD prepare_for_compilation [ -f configure ] || ./boot.sh if [ ! -f config.status ] || [ configure -nt config.status ] ; then ./configure --with-linux=/lib/modules/$(uname -r)/build fi action_openvswitch stop make -j$[$(nproc) + 1] sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install if [ $? -eq 0 ]; then reload_kernel_modules else echo "Compiling OVS kernel module failed" fi action_openvswitch start cd $_pwd } # start_new_ovs() - removes old ovs database, creates a new one and starts ovs function start_new_ovs () { sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~ sudo /usr/share/openvswitch/scripts/ovs-ctl start } # stop_new_ovs() - stops ovs function stop_new_ovs () { local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl' if [ -x $ovs_ctl ] ; then sudo $ovs_ctl stop fi } # remove_ovs_packages() - removes old ovs packages from the system function remove_ovs_packages() { for package in openvswitch openvswitch-switch openvswitch-common; do if is_package_installed $package; then uninstall_package $package fi done } # load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module function load_conntrack_gre_module() { sudo modprobe nf_conntrack_proto_gre } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/placement0000644000175000017500000000146600000000000021417 0ustar00coreycorey00000000000000function configure_placement_service_plugin { neutron_service_plugin_class_add "placement" } function configure_placement_neutron { iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE" iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI" iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME" iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD" iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME" iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME" iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME" iniset $NEUTRON_CONF placement region_name "$REGION_NAME" } function configure_placement_extension { configure_placement_service_plugin configure_placement_neutron } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/qos0000644000175000017500000000111000000000000020233 0ustar00coreycorey00000000000000function configure_qos_service_plugin { neutron_service_plugin_class_add "qos" } function configure_qos_core_plugin { configure_qos_$NEUTRON_CORE_PLUGIN } function configure_qos_l2_agent { plugin_agent_add_l2_agent_extension "qos" } function configure_qos { configure_qos_service_plugin configure_qos_core_plugin configure_qos_l2_agent } function configure_l3_agent_extension_fip_qos { plugin_agent_add_l3_agent_extension "fip_qos" } function configure_l3_agent_extension_gateway_ip_qos { plugin_agent_add_l3_agent_extension "gateway_ip_qos" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/segments0000644000175000017500000000025600000000000021270 0ustar00coreycorey00000000000000function configure_segments_service_plugin { neutron_service_plugin_class_add segments } function configure_segments_extension { configure_segments_service_plugin } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/tag_ports_during_bulk_creation0000644000175000017500000000020600000000000025711 0ustar00coreycorey00000000000000function configure_tag_ports_during_bulk_creation_extension { neutron_ml2_extension_driver_add "tag_ports_during_bulk_creation" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/trunk0000644000175000017500000000024400000000000020603 0ustar00coreycorey00000000000000function configure_trunk_service_plugin { neutron_service_plugin_class_add "trunk" } function configure_trunk_extension { configure_trunk_service_plugin } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/lib/uplink_status_propagation0000644000175000017500000000017400000000000024752 0ustar00coreycorey00000000000000function configure_uplink_status_propagation_extension { neutron_ml2_extension_driver_add "uplink_status_propagation" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/ovn-compute-local.conf.sample0000644000175000017500000000606100000000000024445 0ustar00coreycorey00000000000000# # Sample DevStack local.conf. # # This sample file is intended to be used when adding an additional compute node # to your test environment. It runs a very minimal set of services. # # For this configuration to work, you *must* set the SERVICE_HOST option to the # IP address of the main DevStack host. You must also set HOST_IP to the IP # address of this host. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password Q_AGENT=ovn NEUTRON_AGENT=$Q_AGENT Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE="geneve" # Enable devstack spawn logging LOGFILE=$DEST/logs/stack.sh.log # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch: #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn enable_plugin neutron https://opendev.org/openstack/neutron disable_all_services enable_service n-cpu enable_service placement-client enable_service ovn-controller enable_service q-ovn-metadata-agent # Set this to the address of the main DevStack host running the rest of the # OpenStack services. SERVICE_HOST= RABBIT_HOST=$SERVICE_HOST Q_HOST=$SERVICE_HOST # How to connect to ovsdb-server hosting the OVN SB database OVN_SB_REMOTE=tcp:$SERVICE_HOST:6642 # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # Whether or not to build custom openvswitch kernel modules from the ovs git # tree. This is enabled by default. This is required unless your distro kernel # includes ovs+conntrack support. This support was first released in Linux 4.3, # and will likely be backported by some distros. #OVN_BUILD_MODULES=False HOST_IP= NOVA_VNC_ENABLED=True NOVNCPROXY_URL=http://$SERVICE_HOST:6080/vnc_lite.html VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN # Skydive #enable_plugin skydive https://github.com/redhat-cip/skydive.git #enable_service skydive-agent # Provider Network # If you want to enable a provider network instead of the default private # network after your DevStack environment installation, you *must* set the # Q_USE_PROVIDER_NETWORKING to True, and give value to both PHYSICAL_NETWORK # and OVS_PHYSICAL_BRIDGE. #Q_USE_PROVIDER_NETWORKING=True #PHYSICAL_NETWORK=providernet #OVS_PHYSICAL_BRIDGE=br-provider #PUBLIC_INTERFACE= # If the admin wants to enable this chassis to host gateway routers for # external connectivity, then set ENABLE_CHASSIS_AS_GW to True. # Then devstack will set ovn-cms-options with enable-chassis-as-gw # in Open_vSwitch table's external_ids column. # If this option is not set on any chassis, all the of them with bridge # mappings configured will be eligible to host a gateway. #ENABLE_CHASSIS_AS_GW=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/ovn-db-local.conf.sample0000644000175000017500000000240700000000000023356 0ustar00coreycorey00000000000000# # Sample DevStack local.conf. # # This sample file is intented to be used for running ovn-northd and the # OVN DBs on a separate node. # # For this configuration to work, you *must* set the SERVICE_HOST option to the # IP address of the main DevStack host. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch: #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn enable_plugin neutron https://git.openstack.org/openstack/neutron disable_all_services enable_service ovn-northd # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # Whether or not to build custom openvswitch kernel modules from the ovs git # tree. This is enabled by default. This is required unless your distro kernel # includes ovs+conntrack support. This support was first released in Linux 4.3, # and will likely be backported by some distros. #OVN_BUILD_MODULES=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/ovn-local.conf.sample0000644000175000017500000001466700000000000023006 0ustar00coreycorey00000000000000# # Sample DevStack local.conf. # # This sample file is intended to be used for your typical DevStack environment # that's running all of OpenStack on a single host. This can also be used as # the first host of a multi-host test environment. # # No changes to this sample configuration are required for this to work. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password Q_AGENT=ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS=ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS=local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE="geneve" # Enable devstack spawn logging LOGFILE=$DEST/logs/stack.sh.log # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch: #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn enable_service ovn-northd enable_service ovn-controller enable_service q-ovn-metadata-agent # Use Neutron instead of nova-network disable_service n-net enable_service q-svc # Disable Neutron agents not used with OVN. disable_service q-agt disable_service q-l3 disable_service q-dhcp disable_service q-meta # Enable services, these services depend on neutron plugin. enable_plugin neutron https://opendev.org/openstack/neutron enable_service q-trunk enable_service q-dns #enable_service q-qos # Enable neutron tempest plugin tests enable_plugin neutron-tempest-plugin https://opendev.org/openstack/neutron-tempest-plugin # To enable the advanced images tempest tests, uncomment the lines below: #DOWNLOAD_DEFAULT_IMAGES=False #IMAGE_URLS="http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img" #DEFAULT_IMAGE_NAME=cirros-0.4.0-x86_64-disk #DEFAULT_IMAGE_FILE_NAME=cirros-0.4.0-x86_64-disk.img #ADVANCED_IMAGE_NAME=ubuntu-16.04-server-cloudimg-amd64-disk1 #ADVANCED_INSTANCE_TYPE=ds512M #ADVANCED_INSTANCE_USER=ubuntu #BUILD_TIMEOUT=784 # Horizon (the web UI) is enabled by default. You may want to disable # it here to speed up DevStack a bit. enable_service horizon #disable_service horizon # Cinder (OpenStack Block Storage) is disabled by default to speed up # DevStack a bit. You may enable it here if you would like to use it. disable_service cinder c-sch c-api c-vol #enable_service cinder c-sch c-api c-vol # To enable Rally, uncomment the line below #enable_plugin rally https://github.com/openstack/rally master # How to connect to ovsdb-server hosting the OVN NB database. #OVN_NB_REMOTE=tcp:$SERVICE_HOST:6641 # How to connect to ovsdb-server hosting the OVN SB database. #OVN_SB_REMOTE=tcp:$SERVICE_HOST:6642 # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # If using the OVN native layer-3 service, choose a router scheduler to # manage the distribution of router gateways on hypervisors/chassis. # Default value is leastloaded. #OVN_L3_SCHEDULER=leastloaded # Whether or not to build custom openvswitch kernel modules from the ovs git # tree. This is enabled by default. This is required unless your distro kernel # includes ovs+conntrack support. This support was first released in Linux 4.3, # and will likely be backported by some distros. #OVN_BUILD_MODULES=False # Skydive #enable_plugin skydive https://github.com/redhat-cip/skydive.git #enable_service skydive-analyzer #enable_service skydive-agent # Octavia with OVN provider driver. OVN provider driver uses the OVN native # load balancer and it only supports L4 TCP load balancer and doesn't support # health check monitor and other advanced features supported by Octavia # amphore driver. If you want to enable Octavia amphore driver, please see # http://opendev.org/openstack/networking-ovn/tree/devstack/ovn-octavia-provider.conf.sample # If you want to enable a provider network instead of the default private # network after your DevStack environment installation, you *must* set # the Q_USE_PROVIDER_NETWORKING to True, and also give FIXED_RANGE, # NETWORK_GATEWAY and ALLOCATION_POOL option to the correct value that can # be used in your environment. Specifying Q_AGENT is needed to allow devstack # to run various "ip link set" and "ovs-vsctl" commands for the provider # network setup. #Q_AGENT=openvswitch #Q_USE_PROVIDER_NETWORKING=True #PHYSICAL_NETWORK=providernet #PROVIDER_NETWORK_TYPE=flat #PUBLIC_INTERFACE= #OVS_PHYSICAL_BRIDGE=br-provider #PROVIDER_SUBNET_NAME=provider-subnet # If the admin wants to enable this chassis to host gateway routers for # external connectivity, then set ENABLE_CHASSIS_AS_GW to True. # Then devstack will set ovn-cms-options with enable-chassis-as-gw # in Open_vSwitch table's external_ids column. # If this option is not set on any chassis, all the of them with bridge # mappings configured will be eligible to host a gateway. ENABLE_CHASSIS_AS_GW=True # use the following for IPv4 #IP_VERSION=4 #FIXED_RANGE= #NETWORK_GATEWAY= #ALLOCATION_POOL= # use the following for IPv4+IPv6 #IP_VERSION=4+6 #FIXED_RANGE= #NETWORK_GATEWAY= #ALLOCATION_POOL= # IPV6_PROVIDER_FIXED_RANGE= # IPV6_PROVIDER_NETWORK_GATEWAY= # If you wish to use the provider network for public access to the cloud, # set the following #Q_USE_PROVIDERNET_FOR_PUBLIC=True #PUBLIC_NETWORK_NAME= #PUBLIC_NETWORK_GATEWAY= #PUBLIC_PHYSICAL_NETWORK= #IP_VERSION=4 #PUBLIC_SUBNET_NAME= #Q_FLOATING_ALLOCATION_POOL= #FLOATING_RANGE= # NOTE: DO NOT MOVE THESE SECTIONS FROM THE END OF THIS FILE # IF YOU DO, THEY WON'T WORK!!!!! # # Enable Nova automatic host discovery for cell every 2 seconds # Only needed in case of multinode devstack, as otherwise there will be issues # when the 2nd compute node goes online. # The next line is used to insert extra configuration here from the vagrant # script, please don't modify or remove, keep it before any post-config items #EXTRA_CONFIG [[post-config|$NOVA_CONF]] [scheduler] discover_hosts_in_cells_interval = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/ovn-vtep-local.conf.sample0000644000175000017500000000237700000000000023755 0ustar00coreycorey00000000000000# # Sample DevStack local.conf. # # This sample file is intended for running the HW VTEP emulator on a # separate node. # # For this configuration to work, you *must* set the SERVICE_HOST option to the # IP address of the main DevStack host. # [[local|localrc]] DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password ADMIN_PASSWORD=password # The DevStack plugin defaults to using the ovn branch from the official ovs # repo. You can optionally use a different one. For example, you may want to # use the latest patches in blp's ovn branch: #OVN_REPO=https://github.com/blp/ovs-reviews.git #OVN_BRANCH=ovn enable_plugin neutron https://git.openstack.org/openstack/neutron disable_all_services enable_service ovn-controller-vtep # A UUID to uniquely identify this system. If one is not specified, a random # one will be generated and saved in the file 'ovn-uuid' for re-use in future # DevStack runs. #OVN_UUID= # Whether or not to build custom openvswitch kernel modules from the ovs git # tree. This is enabled by default. This is required unless your distro kernel # includes ovs+conntrack support. This support was first released in Linux 4.3, # and will likely be backported by some distros. #OVN_BUILD_MODULES=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/plugin.sh0000644000175000017500000001210200000000000020575 0ustar00coreycorey00000000000000LIBDIR=$DEST/neutron/devstack/lib source $LIBDIR/dns source $LIBDIR/flavors source $LIBDIR/l2_agent source $LIBDIR/l2_agent_sriovnicswitch source $LIBDIR/l3_agent source $LIBDIR/l3_conntrack_helper source $LIBDIR/ml2 source $LIBDIR/network_segment_range source $LIBDIR/qos source $LIBDIR/ovs source $LIBDIR/segments source $LIBDIR/trunk source $LIBDIR/placement source $LIBDIR/log source $LIBDIR/fip_port_forwarding source $LIBDIR/uplink_status_propagation source $LIBDIR/tag_ports_during_bulk_creation Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) function is_ovn_enabled { [[ $NEUTRON_AGENT == "ovn" ]] && return 0 return 1 } if [ -f $LIBDIR/${NEUTRON_AGENT}_agent ]; then source $LIBDIR/${NEUTRON_AGENT}_agent fi if [[ "$1" == "stack" ]]; then case "$2" in install) if [[ "$NEUTRON_AGENT" == "openvswitch" ]] && \ [[ "$Q_BUILD_OVS_FROM_GIT" == "True" ]]; then remove_ovs_packages compile_ovs False /usr /var load_conntrack_gre_module start_new_ovs fi if is_ovn_enabled; then install_ovn configure_ovn init_ovn fi ;; post-config) if is_service_enabled neutron-tag-ports-during-bulk-creation; then configure_tag_ports_during_bulk_creation_extension fi if is_service_enabled neutron-uplink-status-propagation; then configure_uplink_status_propagation_extension fi if is_service_enabled q-flavors neutron-flavors; then configure_flavors fi if is_service_enabled q-qos neutron-qos; then configure_qos fi if is_service_enabled q-trunk neutron-trunk; then configure_trunk_extension fi if is_service_enabled q-placement neutron-placement; then configure_placement_extension fi if is_service_enabled q-log neutron-log; then configure_log fi if is_service_enabled q-dns neutron-dns; then configure_dns_extension post_config_dns_extension if is_service_enabled designate; then configure_dns_integration fi fi if is_service_enabled neutron-segments; then configure_segments_extension fi if is_service_enabled neutron-network-segment-range; then configure_network_segment_range fi if is_service_enabled q-agt neutron-agent; then configure_l2_agent fi #Note: sriov agent should run with OVS or linux bridge agent #because they are the mechanisms that bind the DHCP and router ports. #Currently devstack lacks the option to run two agents on the same node. #Therefore we create new service, q-sriov-agt, and the # q-agt/neutron-agent should be OVS or linux bridge. if is_service_enabled q-sriov-agt neutron-sriov-agent; then configure_l2_agent configure_l2_agent_sriovnicswitch fi if is_service_enabled q-l3 neutron-l3; then if is_service_enabled q-qos neutron-qos; then configure_l3_agent_extension_fip_qos configure_l3_agent_extension_gateway_ip_qos fi if is_service_enabled q-port-forwarding neutron-port-forwarding; then configure_port_forwarding fi if is_service_enabled q-conntrack-helper neutron-conntrack-helper; then configure_l3_conntrack_helper fi configure_l3_agent fi if [ $NEUTRON_CORE_PLUGIN = ml2 ]; then configure_ml2_extension_drivers fi if is_ovn_enabled; then configure_ovn_plugin start_ovn fi ;; extra) if is_service_enabled q-sriov-agt neutron-sriov-agent; then start_l2_agent_sriov fi if is_ovn_enabled; then if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored because" echo "NEUTRON_CREATE_INITIAL_NETWORKS is set to False" else create_public_bridge fi fi fi ;; esac elif [[ "$1" == "unstack" ]]; then if is_service_enabled q-sriov-agt neutron-sriov-agent; then stop_l2_agent_sriov fi if [[ "$NEUTRON_AGENT" == "openvswitch" ]] && \ [[ "$Q_BUILD_OVS_FROM_GIT" == "True" ]]; then stop_new_ovs fi if is_ovn_enabled; then stop_ovn cleanup_ovn fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/devstack/settings0000644000175000017500000000110600000000000020530 0ustar00coreycorey00000000000000L2_AGENT_EXTENSIONS=${L2_AGENT_EXTENSIONS:-} L3_AGENT_EXTENSIONS=${L3_AGENT_EXTENSIONS:-} NEUTRON_PLACEMENT_AUTH_TYPE=${NEUTRON_PLACEMENT_AUTH_TYPE:-password} NEUTRON_PLACEMENT_USERNAME=${NEUTRON_PLACEMENT_USERNAME:-placement} if is_neutron_legacy_enabled; then NEUTRON_CORE_PLUGIN=$Q_PLUGIN NEUTRON_AGENT=$Q_AGENT # NOTE(ihrachys) those values are defined same way as in # lib/neutron_plugins/ml2:neutron_plugin_configure_common NEUTRON_CORE_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2 NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/ml2_conf.ini fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0430424 neutron-16.0.0.0b2.dev214/doc/0000755000175000017500000000000000000000000015710 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/Makefile0000644000175000017500000000633700000000000017361 0ustar00coreycorey00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXSOURCE = source PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest .DEFAULT_GOAL = html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* if [ -f .autogenerated ] ; then \ cat .autogenerated | xargs rm ; \ rm .autogenerated ; \ fi html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/requirements.txt0000644000175000017500000000064400000000000021200 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD sphinx_feature_classification>=1.0.0 # Apache-2.0 openstackdocstheme>=1.30.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0430424 neutron-16.0.0.0b2.dev214/doc/source/0000755000175000017500000000000000000000000017210 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/_intro.rst0000644000175000017500000000266700000000000021247 0ustar00coreycorey00000000000000.. Copyright 2011- OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neutron is an OpenStack project to provide "network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., nova). It implements the `OpenStack Networking API`_. .. _`OpenStack Networking API`: https://docs.openstack.org/api-ref/network/ This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional documentation on Neutron and other components of OpenStack can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`. The `Neutron Development wiki`_ is also a good resource for new contributors. .. _`OpenStack wiki`: https://wiki.openstack.org .. _`Neutron section of the wiki`: https://wiki.openstack.org/Neutron .. _`Neutron Development wiki`: https://wiki.openstack.org/NeutronDevelopment Enjoy! ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0430424 neutron-16.0.0.0b2.dev214/doc/source/_static/0000755000175000017500000000000000000000000020636 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/_static/support_matrix.css0000644000175000017500000000072200000000000024451 0ustar00coreycorey00000000000000 .sp_feature_required { font-weight: bold; } .sp_impl_complete { color: rgb(0, 120, 0); font-weight: normal; } .sp_impl_missing { color: rgb(120, 0, 0); font-weight: normal; } .sp_impl_partial { color: rgb(170, 170, 0); font-weight: normal; } .sp_impl_unknown { color: rgb(170, 170, 170); font-weight: normal; } .sp_impl_summary { font-size: 2em; } .sp_cli { font-family: monospace; background-color: #F5F5F5; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0590425 neutron-16.0.0.0b2.dev214/doc/source/admin/0000755000175000017500000000000000000000000020300 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0630424 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/0000755000175000017500000000000000000000000022104 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/adv-config.rst0000644000175000017500000000440500000000000024656 0ustar00coreycorey00000000000000============================== Advanced configuration options ============================== This section describes advanced configuration options for various system components. For example, configuration options where the default works but that the user wants to customize options. After installing from packages, ``$NEUTRON_CONF_DIR`` is ``/etc/neutron``. L3 metering agent ~~~~~~~~~~~~~~~~~ You can run an L3 metering agent that enables layer-3 traffic metering. In general, you should launch the metering agent on all nodes that run the L3 agent: .. code-block:: console $ neutron-metering-agent --config-file NEUTRON_CONFIG_FILE \ --config-file L3_METERING_CONFIG_FILE You must configure a driver that matches the plug-in that runs on the service. The driver adds metering to the routing interface. +------------------------------------------+---------------------------------+ | Option | Value | +==========================================+=================================+ | **Open vSwitch** | | +------------------------------------------+---------------------------------+ | interface\_driver | | | ($NEUTRON\_CONF\_DIR/metering\_agent.ini)| openvswitch | +------------------------------------------+---------------------------------+ | **Linux Bridge** | | +------------------------------------------+---------------------------------+ | interface\_driver | | | ($NEUTRON\_CONF\_DIR/metering\_agent.ini)| linuxbridge | +------------------------------------------+---------------------------------+ L3 metering driver ------------------ You must configure any driver that implements the metering abstraction. Currently the only available implementation uses iptables for metering. .. code-block:: ini driver = iptables L3 metering service driver -------------------------- To enable L3 metering, you must set the following option in the ``neutron.conf`` file on the host that runs ``neutron-server``: .. code-block:: ini service_plugins = metering ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/adv-features.rst0000644000175000017500000007741700000000000025244 0ustar00coreycorey00000000000000.. _adv-features: ======================================== Advanced features through API extensions ======================================== Several plug-ins implement API extensions that provide capabilities similar to what was available in ``nova-network``. These plug-ins are likely to be of interest to the OpenStack community. Provider networks ~~~~~~~~~~~~~~~~~ Networks can be categorized as either project networks or provider networks. Project networks are created by normal users and details about how they are physically realized are hidden from those users. Provider networks are created with administrative credentials, specifying the details of how the network is physically realized, usually to match some existing network in the data center. Provider networks enable administrators to create networks that map directly to the physical networks in the data center. This is commonly used to give projects direct access to a public network that can be used to reach the Internet. It might also be used to integrate with VLANs in the network that already have a defined meaning (for example, enable a VM from the marketing department to be placed on the same VLAN as bare-metal marketing hosts in the same data center). The provider extension allows administrators to explicitly manage the relationship between Networking virtual networks and underlying physical mechanisms such as VLANs and tunnels. When this extension is supported, Networking client users with administrative privileges see additional provider attributes on all virtual networks and are able to specify these attributes in order to create provider networks. The provider extension is supported by the Open vSwitch and Linux Bridge plug-ins. Configuration of these plug-ins requires familiarity with this extension. Terminology ----------- A number of terms are used in the provider extension and in the configuration of plug-ins supporting the provider extension: .. list-table:: **Provider extension terminology** :widths: 30 70 :header-rows: 1 * - Term - Description * - virtual network - A Networking L2 network (identified by a UUID and optional name) whose ports can be attached as vNICs to Compute instances and to various Networking agents. The Open vSwitch and Linux Bridge plug-ins each support several different mechanisms to realize virtual networks. * - physical network - A network connecting virtualization hosts (such as compute nodes) with each other and with other network resources. Each physical network might support multiple virtual networks. The provider extension and the plug-in configurations identify physical networks using simple string names. * - project network - A virtual network that a project or an administrator creates. The physical details of the network are not exposed to the project. * - provider network - A virtual network administratively created to map to a specific network in the data center, typically to enable direct access to non-OpenStack resources on that network. Project can be given access to provider networks. * - VLAN network - A virtual network implemented as packets on a specific physical network containing IEEE 802.1Q headers with a specific VID field value. VLAN networks sharing the same physical network are isolated from each other at L2 and can even have overlapping IP address spaces. Each distinct physical network supporting VLAN networks is treated as a separate VLAN trunk, with a distinct space of VID values. Valid VID values are 1 through 4094. * - flat network - A virtual network implemented as packets on a specific physical network containing no IEEE 802.1Q header. Each physical network can realize at most one flat network. * - local network - A virtual network that allows communication within each host, but not across a network. Local networks are intended mainly for single-node test scenarios, but can have other uses. * - GRE network - A virtual network implemented as network packets encapsulated using GRE. GRE networks are also referred to as *tunnels*. GRE tunnel packets are routed by the IP routing table for the host, so GRE networks are not associated by Networking with specific physical networks. * - Virtual Extensible LAN (VXLAN) network - VXLAN is a proposed encapsulation protocol for running an overlay network on existing Layer 3 infrastructure. An overlay network is a virtual network that is built on top of existing network Layer 2 and Layer 3 technologies to support elastic compute architectures. The ML2, Open vSwitch, and Linux Bridge plug-ins support VLAN networks, flat networks, and local networks. Only the ML2 and Open vSwitch plug-ins currently support GRE and VXLAN networks, provided that the required features exist in the hosts Linux kernel, Open vSwitch, and iproute2 packages. Provider attributes ------------------- The provider extension extends the Networking network resource with these attributes: .. list-table:: **Provider network attributes** :widths: 10 10 10 49 :header-rows: 1 * - Attribute name - Type - Default Value - Description * - provider: network\_type - String - N/A - The physical mechanism by which the virtual network is implemented. Possible values are ``flat``, ``vlan``, ``local``, ``gre``, and ``vxlan``, corresponding to flat networks, VLAN networks, local networks, GRE networks, and VXLAN networks as defined above. All types of provider networks can be created by administrators, while project networks can be implemented as ``vlan``, ``gre``, ``vxlan``, or ``local`` network types depending on plug-in configuration. * - provider: physical_network - String - If a physical network named "default" has been configured and if provider:network_type is ``flat`` or ``vlan``, then "default" is used. - The name of the physical network over which the virtual network is implemented for flat and VLAN networks. Not applicable to the ``local``, ``vxlan`` or ``gre`` network types. * - provider:segmentation_id - Integer - N/A - For VLAN networks, the VLAN VID on the physical network that realizes the virtual network. Valid VLAN VIDs are 1 through 4094. For GRE networks, the tunnel ID. Valid tunnel IDs are any 32 bit unsigned integer. Not applicable to the ``flat`` or ``local`` network types. To view or set provider extended attributes, a client must be authorized for the ``extension:provider_network:view`` and ``extension:provider_network:set`` actions in the Networking policy configuration. The default Networking configuration authorizes both actions for users with the admin role. An authorized client or an administrative user can view and set the provider extended attributes through Networking API calls. See the section called :ref:`Authentication and authorization` for details on policy configuration. .. _L3-routing-and-NAT: L3 routing and NAT ~~~~~~~~~~~~~~~~~~ The Networking API provides abstract L2 network segments that are decoupled from the technology used to implement the L2 network. Networking includes an API extension that provides abstract L3 routers that API users can dynamically provision and configure. These Networking routers can connect multiple L2 Networking networks and can also provide a gateway that connects one or more private L2 networks to a shared external network. For example, a public network for access to the Internet. See the `OpenStack Configuration Reference `_ for details on common models of deploying Networking L3 routers. The L3 router provides basic NAT capabilities on gateway ports that uplink the router to external networks. This router SNATs all traffic by default and supports floating IPs, which creates a static one-to-one mapping from a public IP on the external network to a private IP on one of the other subnets attached to the router. This allows a project to selectively expose VMs on private networks to other hosts on the external network (and often to all hosts on the Internet). You can allocate and map floating IPs from one port to another, as needed. Basic L3 operations ------------------- External networks are visible to all users. However, the default policy settings enable only administrative users to create, update, and delete external networks. This table shows example :command:`openstack` commands that enable you to complete basic L3 operations: .. list-table:: **Basic L3 Operations** :widths: 30 50 :header-rows: 1 * - Operation - Command * - Creates external networks. - .. code-block:: console $ openstack network create public --external $ openstack subnet create --network public --subnet-range 172.16.1.0/24 subnetname * - Lists external networks. - .. code-block:: console $ openstack network list --external * - Creates an internal-only router that connects to multiple L2 networks privately. - .. code-block:: console $ openstack network create net1 $ openstack subnet create --network net1 --subnet-range 10.0.0.0/24 subnetname1 $ openstack network create net2 $ openstack subnet create --network net2 --subnet-range 10.0.1.0/24 subnetname2 $ openstack router create router1 $ openstack router add subnet router1 subnetname1 $ openstack router add subnet router1 subnetname2 An internal router port can have only one IPv4 subnet and multiple IPv6 subnets that belong to the same network ID. When you call ``router-interface-add`` with an IPv6 subnet, this operation adds the interface to an existing internal port with the same network ID. If a port with the same network ID does not exist, a new port is created. * - Connects a router to an external network, which enables that router to act as a NAT gateway for external connectivity. - .. code-block:: console $ openstack router set --external-gateway EXT_NET_ID router1 $ openstack router set --route destination=172.24.4.0/24,gateway=172.24.4.1 router1 The router obtains an interface with the gateway_ip address of the subnet and this interface is attached to a port on the L2 Networking network associated with the subnet. The router also gets a gateway interface to the specified external network. This provides SNAT connectivity to the external network as well as support for floating IPs allocated on that external networks. Commonly an external network maps to a network in the provider. * - Lists routers. - .. code-block:: console $ openstack router list * - Shows information for a specified router. - .. code-block:: console $ openstack router show ROUTER_ID * - Shows all internal interfaces for a router. - .. code-block:: console $ openstack port list --router ROUTER_ID $ openstack port list --router ROUTER_NAME * - Identifies the PORT_ID that represents the VM NIC to which the floating IP should map. - .. code-block:: console $ openstack port list -c ID -c "Fixed IP Addresses" --server INSTANCE_ID This port must be on a Networking subnet that is attached to a router uplinked to the external network used to create the floating IP. Conceptually, this is because the router must be able to perform the Destination NAT (DNAT) rewriting of packets from the floating IP address (chosen from a subnet on the external network) to the internal fixed IP (chosen from a private subnet that is behind the router). * - Creates a floating IP address and associates it with a port. - .. code-block:: console $ openstack floating ip create EXT_NET_ID $ openstack floating ip add port FLOATING_IP_ID --port-id INTERNAL_VM_PORT_ID * - Creates a floating IP on a specific subnet in the external network. - .. code-block:: console $ openstack floating ip create EXT_NET_ID --subnet SUBNET_ID If there are multiple subnets in the external network, you can choose a specific subnet based on quality and costs. * - Creates a floating IP address and associates it with a port, in a single step. - .. code-block:: console $ openstack floating ip create --port INTERNAL_VM_PORT_ID EXT_NET_ID * - Lists floating IPs - .. code-block:: console $ openstack floating ip list * - Finds floating IP for a specified VM port. - .. code-block:: console $ openstack floating ip list --port INTERNAL_VM_PORT_ID * - Disassociates a floating IP address. - .. code-block:: console $ openstack floating ip remove port FLOATING_IP_ID * - Deletes the floating IP address. - .. code-block:: console $ openstack floating ip delete FLOATING_IP_ID * - Clears the gateway. - .. code-block:: console $ openstack router unset --external-gateway router1 * - Removes the interfaces from the router. - .. code-block:: console $ openstack router remove subnet router1 SUBNET_ID If this subnet ID is the last subnet on the port, this operation deletes the port itself. * - Deletes the router. - .. code-block:: console $ openstack router delete router1 Security groups ~~~~~~~~~~~~~~~ Security groups and security group rules allow administrators and projects to specify the type of traffic and direction (ingress/egress) that is allowed to pass through a port. A security group is a container for security group rules. When a port is created in Networking it is associated with a security group. If a security group is not specified the port is associated with a 'default' security group. By default, this group drops all ingress traffic and allows all egress. Rules can be added to this group in order to change the behavior. To use the Compute security group APIs or use Compute to orchestrate the creation of ports for instances on specific security groups, you must complete additional configuration. You must configure the ``/etc/nova/nova.conf`` file and set the ``use_neutron=True`` option on every node that runs nova-compute, nova-conductor and nova-api. After you make this change, restart those nova services to pick up this change. Then, you can use both the Compute and OpenStack Network security group APIs at the same time. .. note:: - To use the Compute security group API with Networking, the Networking plug-in must implement the security group API. The following plug-ins currently implement this: ML2, Open vSwitch, Linux Bridge, NEC, and VMware NSX. - You must configure the correct firewall driver in the ``securitygroup`` section of the plug-in/agent configuration file. Some plug-ins and agents, such as Linux Bridge Agent and Open vSwitch Agent, use the no-operation driver as the default, which results in non-working security groups. - When using the security group API through Compute, security groups are applied to all ports on an instance. The reason for this is that Compute security group APIs are instances based and not port based as Networking. Basic security group operations ------------------------------- This table shows example neutron commands that enable you to complete basic security group operations: .. list-table:: **Basic security group operations** :widths: 30 50 :header-rows: 1 * - Operation - Command * - Creates a security group for our web servers. - .. code-block:: console $ openstack security group create webservers \ --description "security group for webservers" * - Lists security groups. - .. code-block:: console $ openstack security group list * - Creates a security group rule to allow port 80 ingress. - .. code-block:: console $ openstack security group rule create --ingress \ --protocol tcp SECURITY_GROUP_UUID * - Lists security group rules. - .. code-block:: console $ openstack security group rule list * - Deletes a security group rule. - .. code-block:: console $ openstack security group rule delete SECURITY_GROUP_RULE_UUID * - Deletes a security group. - .. code-block:: console $ openstack security group delete SECURITY_GROUP_UUID * - Creates a port and associates two security groups. - .. code-block:: console $ openstack port create port1 --security-group SECURITY_GROUP_ID1 \ --security-group SECURITY_GROUP_ID2 --network NETWORK_ID * - Removes security groups from a port. - .. code-block:: console $ openstack port set --no-security-group PORT_ID Plug-in specific extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each vendor can choose to implement additional API extensions to the core API. This section describes the extensions for each plug-in. VMware NSX extensions --------------------- These sections explain NSX plug-in extensions. VMware NSX QoS extension ^^^^^^^^^^^^^^^^^^^^^^^^ The VMware NSX QoS extension rate-limits network ports to guarantee a specific amount of bandwidth for each port. This extension, by default, is only accessible by a project with an admin role but is configurable through the ``policy.json`` file. To use this extension, create a queue and specify the min/max bandwidth rates (kbps) and optionally set the QoS Marking and DSCP value (if your network fabric uses these values to make forwarding decisions). Once created, you can associate a queue with a network. Then, when ports are created on that network they are automatically created and associated with the specific queue size that was associated with the network. Because one size queue for a every port on a network might not be optimal, a scaling factor from the nova flavor ``rxtx_factor`` is passed in from Compute when creating the port to scale the queue. Lastly, if you want to set a specific baseline QoS policy for the amount of bandwidth a single port can use (unless a network queue is specified with the network a port is created on) a default queue can be created in Networking which then causes ports created to be associated with a queue of that size times the rxtx scaling factor. Note that after a network or default queue is specified, queues are added to ports that are subsequently created but are not added to existing ports. Basic VMware NSX QoS operations ''''''''''''''''''''''''''''''' This table shows example neutron commands that enable you to complete basic queue operations: .. list-table:: **Basic VMware NSX QoS operations** :widths: 30 50 :header-rows: 1 * - Operation - Command * - Creates QoS queue (admin-only). - .. code-block:: console $ neutron queue-create --min 10 --max 1000 myqueue * - Associates a queue with a network. - .. code-block:: console $ neutron net-create network --queue_id QUEUE_ID * - Creates a default system queue. - .. code-block:: console $ neutron queue-create --default True --min 10 --max 2000 default * - Lists QoS queues. - .. code-block:: console $ neutron queue-list * - Deletes a QoS queue. - .. code-block:: console $ neutron queue-delete QUEUE_ID_OR_NAME VMware NSX provider networks extension ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Provider networks can be implemented in different ways by the underlying NSX platform. The *FLAT* and *VLAN* network types use bridged transport connectors. These network types enable the attachment of large number of ports. To handle the increased scale, the NSX plug-in can back a single OpenStack Network with a chain of NSX logical switches. You can specify the maximum number of ports on each logical switch in this chain on the ``max_lp_per_bridged_ls`` parameter, which has a default value of 5,000. The recommended value for this parameter varies with the NSX version running in the back-end, as shown in the following table. **Recommended values for max_lp_per_bridged_ls** +---------------+---------------------+ | NSX version | Recommended Value | +===============+=====================+ | 2.x | 64 | +---------------+---------------------+ | 3.0.x | 5,000 | +---------------+---------------------+ | 3.1.x | 5,000 | +---------------+---------------------+ | 3.2.x | 10,000 | +---------------+---------------------+ In addition to these network types, the NSX plug-in also supports a special *l3_ext* network type, which maps external networks to specific NSX gateway services as discussed in the next section. VMware NSX L3 extension ^^^^^^^^^^^^^^^^^^^^^^^ NSX exposes its L3 capabilities through gateway services which are usually configured out of band from OpenStack. To use NSX with L3 capabilities, first create an L3 gateway service in the NSX Manager. Next, in ``/etc/neutron/plugins/vmware/nsx.ini`` set ``default_l3_gw_service_uuid`` to this value. By default, routers are mapped to this gateway service. VMware NSX L3 extension operations '''''''''''''''''''''''''''''''''' Create external network and map it to a specific NSX gateway service: .. code-block:: console $ openstack network create public --external --provider-network-type l3_ext \ --provider-physical-network L3_GATEWAY_SERVICE_UUID Terminate traffic on a specific VLAN from a NSX gateway service: .. code-block:: console $ openstack network create public --external --provider-network-type l3_ext \ --provider-physical-network L3_GATEWAY_SERVICE_UUID --provider-segment VLAN_ID Operational status synchronization in the VMware NSX plug-in ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Starting with the Havana release, the VMware NSX plug-in provides an asynchronous mechanism for retrieving the operational status for neutron resources from the NSX back-end; this applies to *network*, *port*, and *router* resources. The back-end is polled periodically and the status for every resource is retrieved; then the status in the Networking database is updated only for the resources for which a status change occurred. As operational status is now retrieved asynchronously, performance for ``GET`` operations is consistently improved. Data to retrieve from the back-end are divided in chunks in order to avoid expensive API requests; this is achieved leveraging NSX APIs response paging capabilities. The minimum chunk size can be specified using a configuration option; the actual chunk size is then determined dynamically according to: total number of resources to retrieve, interval between two synchronization task runs, minimum delay between two subsequent requests to the NSX back-end. The operational status synchronization can be tuned or disabled using the configuration options reported in this table; it is however worth noting that the default values work fine in most cases. .. list-table:: **Configuration options for tuning operational status synchronization in the NSX plug-in** :widths: 10 10 10 10 35 :header-rows: 1 * - Option name - Group - Default value - Type and constraints - Notes * - ``state_sync_interval`` - ``nsx_sync`` - 10 seconds - Integer; no constraint. - Interval in seconds between two run of the synchronization task. If the synchronization task takes more than ``state_sync_interval`` seconds to execute, a new instance of the task is started as soon as the other is completed. Setting the value for this option to 0 will disable the synchronization task. * - ``max_random_sync_delay`` - ``nsx_sync`` - 0 seconds - Integer. Must not exceed ``min_sync_req_delay`` - When different from zero, a random delay between 0 and ``max_random_sync_delay`` will be added before processing the next chunk. * - ``min_sync_req_delay`` - ``nsx_sync`` - 1 second - Integer. Must not exceed ``state_sync_interval``. - The value of this option can be tuned according to the observed load on the NSX controllers. Lower values will result in faster synchronization, but might increase the load on the controller cluster. * - ``min_chunk_size`` - ``nsx_sync`` - 500 resources - Integer; no constraint. - Minimum number of resources to retrieve from the back-end for each synchronization chunk. The expected number of synchronization chunks is given by the ratio between ``state_sync_interval`` and ``min_sync_req_delay``. This size of a chunk might increase if the total number of resources is such that more than ``min_chunk_size`` resources must be fetched in one chunk with the current number of chunks. * - ``always_read_status`` - ``nsx_sync`` - False - Boolean; no constraint. - When this option is enabled, the operational status will always be retrieved from the NSX back-end ad every ``GET`` request. In this case it is advisable to disable the synchronization task. When running multiple OpenStack Networking server instances, the status synchronization task should not run on every node; doing so sends unnecessary traffic to the NSX back-end and performs unnecessary DB operations. Set the ``state_sync_interval`` configuration option to a non-zero value exclusively on a node designated for back-end status synchronization. The ``fields=status`` parameter in Networking API requests always triggers an explicit query to the NSX back end, even when you enable asynchronous state synchronization. For example, ``GET /v2.0/networks/NET_ID?fields=status&fields=name``. Big Switch plug-in extensions ----------------------------- This section explains the Big Switch neutron plug-in-specific extension. Big Switch router rules ^^^^^^^^^^^^^^^^^^^^^^^ Big Switch allows router rules to be added to each project router. These rules can be used to enforce routing policies such as denying traffic between subnets or traffic to external networks. By enforcing these at the router level, network segmentation policies can be enforced across many VMs that have differing security groups. Router rule attributes '''''''''''''''''''''' Each project router has a set of router rules associated with it. Each router rule has the attributes in this table. Router rules and their attributes can be set using the :command:`neutron router-update` command, through the horizon interface or the Networking API. .. list-table:: **Big Switch Router rule attributes** :widths: 10 10 10 35 :header-rows: 1 * - Attribute name - Required - Input type - Description * - source - Yes - A valid CIDR or one of the keywords 'any' or 'external' - The network that a packet's source IP must match for the rule to be applied. * - destination - Yes - A valid CIDR or one of the keywords 'any' or 'external' - The network that a packet's destination IP must match for the rule to be applied. * - action - Yes - 'permit' or 'deny' - Determines whether or not the matched packets will allowed to cross the router. * - nexthop - No - A plus-separated (+) list of next-hop IP addresses. For example, ``1.1.1.1+1.1.1.2``. - Overrides the default virtual router used to handle traffic for packets that match the rule. Order of rule processing '''''''''''''''''''''''' The order of router rules has no effect. Overlapping rules are evaluated using longest prefix matching on the source and destination fields. The source field is matched first so it always takes higher precedence over the destination field. In other words, longest prefix matching is used on the destination field only if there are multiple matching rules with the same source. Big Switch router rules operations '''''''''''''''''''''''''''''''''' Router rules are configured with a router update operation in OpenStack Networking. The update overrides any previous rules so all rules must be provided at the same time. Update a router with rules to permit traffic by default but block traffic from external networks to the 10.10.10.0/24 subnet: .. code-block:: console $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ source=any,destination=any,action=permit \ source=external,destination=10.10.10.0/24,action=deny Specify alternate next-hop addresses for a specific subnet: .. code-block:: console $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ source=any,destination=any,action=permit \ source=10.10.10.0/24,destination=any,action=permit,nexthops=10.10.10.254+10.10.10.253 Block traffic between two subnets while allowing everything else: .. code-block:: console $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ source=any,destination=any,action=permit \ source=10.10.10.0/24,destination=10.20.20.20/24,action=deny L3 metering ~~~~~~~~~~~ The L3 metering API extension enables administrators to configure IP ranges and assign a specified label to them to be able to measure traffic that goes through a virtual router. The L3 metering extension is decoupled from the technology that implements the measurement. Two abstractions have been added: One is the metering label that can contain metering rules. Because a metering label is associated with a project, all virtual routers in this project are associated with this label. Basic L3 metering operations ---------------------------- Only administrators can manage the L3 metering labels and rules. This table shows example :command:`neutron` commands that enable you to complete basic L3 metering operations: .. list-table:: **Basic L3 operations** :widths: 20 50 :header-rows: 1 * - Operation - Command * - Creates a metering label. - .. code-block:: console $ openstack network meter label create LABEL1 \ --description "DESCRIPTION_LABEL1" * - Lists metering labels. - .. code-block:: console $ openstack network meter label list * - Shows information for a specified label. - .. code-block:: console $ openstack network meter label show LABEL_UUID $ openstack network meter label show LABEL1 * - Deletes a metering label. - .. code-block:: console $ openstack network meter label delete LABEL_UUID $ openstack network meter label delete LABEL1 * - Creates a metering rule. - .. code-block:: console $ openstack network meter label rule create LABEL_UUID \ --remote-ip-prefix CIDR \ --direction DIRECTION --exclude For example: .. code-block:: console $ openstack network meter label rule create label1 \ --remote-ip-prefix 10.0.0.0/24 --direction ingress $ openstack network meter label rule create label1 \ --remote-ip-prefix 20.0.0.0/24 --exclude * - Lists metering all label rules. - .. code-block:: console $ openstack network meter label rule list * - Shows information for a specified label rule. - .. code-block:: console $ openstack network meter label rule show RULE_UUID * - Deletes a metering label rule. - .. code-block:: console $ openstack network meter label rule delete RULE_UUID * - Lists the value of created metering label rules. - .. code-block:: console $ ceilometer sample-list -m SNMP_MEASUREMENT For example: .. code-block:: console $ ceilometer sample-list -m hardware.network.bandwidth.bytes $ ceilometer sample-list -m hardware.network.incoming.bytes $ ceilometer sample-list -m hardware.network.outgoing.bytes $ ceilometer sample-list -m hardware.network.outgoing.errors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/adv-operational-features.rst0000644000175000017500000000727100000000000027546 0ustar00coreycorey00000000000000============================= Advanced operational features ============================= Logging settings ~~~~~~~~~~~~~~~~ Networking components use Python logging module to do logging. Logging configuration can be provided in ``neutron.conf`` or as command-line options. Command options override ones in ``neutron.conf``. To configure logging for Networking components, use one of these methods: - Provide logging settings in a logging configuration file. See `Python logging how-to `__ to learn more about logging. - Provide logging setting in ``neutron.conf``. .. code-block:: ini [DEFAULT] # Default log level is WARNING # Show debugging output in logs (sets DEBUG log level output) # debug = False # log_date_format = %Y-%m-%d %H:%M:%S # use_syslog = False # syslog_log_facility = LOG_USER # if use_syslog is False, we can set log_file and log_dir. # if use_syslog is False and we do not set log_file, # the log will be printed to stdout. # log_file = # log_dir = Notifications ~~~~~~~~~~~~~ Notifications can be sent when Networking resources such as network, subnet and port are created, updated or deleted. Notification options -------------------- To support DHCP agent, ``rpc_notifier`` driver must be set. To set up the notification, edit notification options in ``neutron.conf``: .. code-block:: ini # Driver or drivers to handle sending notifications. (multi # valued) # notification_driver=messagingv2 # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics notification_topics = notifications Setting cases ------------- Logging and RPC ^^^^^^^^^^^^^^^ These options configure the Networking server to send notifications through logging and RPC. The logging options are described in OpenStack Configuration Reference . RPC notifications go to ``notifications.info`` queue bound to a topic exchange defined by ``control_exchange`` in ``neutron.conf``. **Notification System Options** A notification can be sent when a network, subnet, or port is created, updated or deleted. The notification system options are: * ``notification_driver`` Defines the driver or drivers to handle the sending of a notification. The six available options are: * ``messaging`` Send notifications using the 1.0 message format. * ``messagingv2`` Send notifications using the 2.0 message format (with a message envelope). * ``routing`` Configurable routing notifier (by priority or event_type). * ``log`` Publish notifications using Python logging infrastructure. * ``test`` Store notifications in memory for test verification. * ``noop`` Disable sending notifications entirely. * ``default_notification_level`` Is used to form topic names or to set a logging level. * ``default_publisher_id`` Is a part of the notification payload. * ``notification_topics`` AMQP topic used for OpenStack notifications. They can be comma-separated values. The actual topic names will be the values of ``default_notification_level``. * ``control_exchange`` This is an option defined in oslo.messaging. It is the default exchange under which topics are scoped. May be overridden by an exchange name specified in the ``transport_url`` option. It is a string value. Below is a sample ``neutron.conf`` configuration file: .. code-block:: ini notification_driver = messagingv2 default_notification_level = INFO host = myhost.com default_publisher_id = $host notification_topics = notifications control_exchange = openstack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/arch.rst0000644000175000017500000001053700000000000023561 0ustar00coreycorey00000000000000======================= Networking architecture ======================= Before you deploy Networking, it is useful to understand the Networking services and how they interact with the OpenStack components. Overview ~~~~~~~~ Networking is a standalone component in the OpenStack modular architecture. It is positioned alongside OpenStack components such as Compute, Image service, Identity, or Dashboard. Like those components, a deployment of Networking often involves deploying several services to a variety of hosts. The Networking server uses the neutron-server daemon to expose the Networking API and enable administration of the configured Networking plug-in. Typically, the plug-in requires access to a database for persistent storage (also similar to other OpenStack services). If your deployment uses a controller host to run centralized Compute components, you can deploy the Networking server to that same host. However, Networking is entirely standalone and can be deployed to a dedicated host. Depending on your configuration, Networking can also include the following agents: +----------------------------+---------------------------------------------+ | Agent | Description | +============================+=============================================+ |**plug-in agent** | | |(``neutron-*-agent``) | Runs on each hypervisor to perform | | | local vSwitch configuration. The agent that | | | runs, depends on the plug-in that you use. | | | Certain plug-ins do not require an agent. | +----------------------------+---------------------------------------------+ |**dhcp agent** | | |(``neutron-dhcp-agent``) | Provides DHCP services to project networks. | | | Required by certain plug-ins. | +----------------------------+---------------------------------------------+ |**l3 agent** | | |(``neutron-l3-agent``) | Provides L3/NAT forwarding to provide | | | external network access for VMs on project | | | networks. Required by certain plug-ins. | +----------------------------+---------------------------------------------+ |**metering agent** | | |(``neutron-metering-agent``)| Provides L3 traffic metering for project | | | networks. | +----------------------------+---------------------------------------------+ These agents interact with the main neutron process through RPC (for example, RabbitMQ or Qpid) or through the standard Networking API. In addition, Networking integrates with OpenStack components in a number of ways: - Networking relies on the Identity service (keystone) for the authentication and authorization of all API requests. - Compute (nova) interacts with Networking through calls to its standard API. As part of creating a VM, the ``nova-compute`` service communicates with the Networking API to plug each virtual NIC on the VM into a particular network. - The dashboard (horizon) integrates with the Networking API, enabling administrators and project users to create and manage network services through a web-based GUI. VMware NSX integration ~~~~~~~~~~~~~~~~~~~~~~ OpenStack Networking uses the NSX plug-in to integrate with an existing VMware vCenter deployment. When installed on the network nodes, the NSX plug-in enables a NSX controller to centrally manage configuration settings and push them to managed network nodes. Network nodes are considered managed when they are added as hypervisors to the NSX controller. The diagrams below depict some VMware NSX deployment examples. The first diagram illustrates the traffic flow between VMs on separate Compute nodes, and the second diagram between two VMs on a single compute node. Note the placement of the VMware NSX plug-in and the neutron-server service on the network node. The green arrow indicates the management relationship between the NSX controller and the network node. .. figure:: figures/vmware_nsx_ex1.png .. figure:: figures/vmware_nsx_ex2.png ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/auth.rst0000644000175000017500000001662100000000000023605 0ustar00coreycorey00000000000000.. _Authentication and authorization: ================================ Authentication and authorization ================================ Networking uses the Identity service as the default authentication service. When the Identity service is enabled, users who submit requests to the Networking service must provide an authentication token in ``X-Auth-Token`` request header. Users obtain this token by authenticating with the Identity service endpoint. For more information about authentication with the Identity service, see `OpenStack Identity service API v3 Reference `__. When the Identity service is enabled, it is not mandatory to specify the project ID for resources in create requests because the project ID is derived from the authentication token. The default authorization settings only allow administrative users to create resources on behalf of a different project. Networking uses information received from Identity to authorize user requests. Networking handles two kind of authorization policies: - **Operation-based** policies specify access criteria for specific operations, possibly with fine-grained control over specific attributes. - **Resource-based** policies specify whether access to specific resource is granted or not according to the permissions configured for the resource (currently available only for the network resource). The actual authorization policies enforced in Networking might vary from deployment to deployment. The policy engine reads entries from the ``policy.json`` file. The actual location of this file might vary from distribution to distribution. Entries can be updated while the system is running, and no service restart is required. Every time the policy file is updated, the policies are automatically reloaded. Currently the only way of updating such policies is to edit the policy file. In this section, the terms *policy* and *rule* refer to objects that are specified in the same way in the policy file. There are no syntax differences between a rule and a policy. A policy is something that is matched directly from the Networking policy engine. A rule is an element in a policy, which is evaluated. For instance in ``"create_subnet": "rule:admin_or_network_owner"``, *create_subnet* is a policy, and *admin_or_network_owner* is a rule. Policies are triggered by the Networking policy engine whenever one of them matches a Networking API operation or a specific attribute being used in a given operation. For instance the ``create_subnet`` policy is triggered every time a ``POST /v2.0/subnets`` request is sent to the Networking server; on the other hand ``create_network:shared`` is triggered every time the *shared* attribute is explicitly specified (and set to a value different from its default) in a ``POST /v2.0/networks`` request. It is also worth mentioning that policies can also be related to specific API extensions; for instance ``extension:provider_network:set`` is triggered if the attributes defined by the Provider Network extensions are specified in an API request. An authorization policy can be composed by one or more rules. If more rules are specified then the evaluation policy succeeds if any of the rules evaluates successfully; if an API operation matches multiple policies, then all the policies must evaluate successfully. Also, authorization rules are recursive. Once a rule is matched, the rule(s) can be resolved to another rule, until a terminal rule is reached. The Networking policy engine currently defines the following kinds of terminal rules: - **Role-based rules** evaluate successfully if the user who submits the request has the specified role. For instance ``"role:admin"`` is successful if the user who submits the request is an administrator. - **Field-based rules** evaluate successfully if a field of the resource specified in the current request matches a specific value. For instance ``"field:networks:shared=True"`` is successful if the ``shared`` attribute of the ``network`` resource is set to true. - **Generic rules** compare an attribute in the resource with an attribute extracted from the user's security credentials and evaluates successfully if the comparison is successful. For instance ``"tenant_id:%(tenant_id)s"`` is successful if the project identifier in the resource is equal to the project identifier of the user submitting the request. This extract is from the default ``policy.json`` file: - A rule that evaluates successfully if the current user is an administrator or the owner of the resource specified in the request (project identifier is equal). .. code-block:: none { "admin_or_owner": "role:admin", "tenant_id:%(tenant_id)s", "admin_or_network_owner": "role:admin", "tenant_id:%(network_tenant_id)s", "admin_only": "role:admin", "regular_user": "", "shared":"field:networks:shared=True", "default": - The default policy that is always evaluated if an API operation does not match any of the policies in ``policy.json``. .. code-block:: none "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner", "rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_network": "", "get_network": "rule:admin_or_owner", - This policy evaluates successfully if either *admin_or_owner*, or *shared* evaluates successfully. .. code-block:: none "rule:shared", "create_network:shared": "rule:admin_only" - This policy restricts the ability to manipulate the *shared* attribute for a network to administrators only. .. code-block:: none , "update_network": "rule:admin_or_owner", "delete_network": "rule:admin_or_owner", "create_port": "", "create_port:mac_address": "rule:admin_or_network_owner", "create_port:fixed_ips": - This policy restricts the ability to manipulate the *mac_address* attribute for a port only to administrators and the owner of the network where the port is attached. .. code-block:: none "rule:admin_or_network_owner", "get_port": "rule:admin_or_owner", "update_port": "rule:admin_or_owner", "delete_port": "rule:admin_or_owner" } In some cases, some operations are restricted to administrators only. This example shows you how to modify a policy file to permit project to define networks, see their resources, and permit administrative users to perform all other operations: .. code-block:: none { "admin_or_owner": "role:admin", "tenant_id:%(tenant_id)s", "admin_only": "role:admin", "regular_user": "", "default": "rule:admin_only", "create_subnet": "rule:admin_only", "get_subnet": "rule:admin_or_owner", "update_subnet": "rule:admin_only", "delete_subnet": "rule:admin_only", "create_network": "", "get_network": "rule:admin_or_owner", "create_network:shared": "rule:admin_only", "update_network": "rule:admin_or_owner", "delete_network": "rule:admin_or_owner", "create_port": "rule:admin_only", "get_port": "rule:admin_or_owner", "update_port": "rule:admin_only", "delete_port": "rule:admin_only" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/config-agents.rst0000644000175000017500000003463100000000000025371 0ustar00coreycorey00000000000000======================== Configure neutron agents ======================== Plug-ins typically have requirements for particular software that must be run on each node that handles data packets. This includes any node that runs nova-compute and nodes that run dedicated OpenStack Networking service agents such as ``neutron-dhcp-agent``, ``neutron-l3-agent`` or ``neutron-metering-agent``. A data-forwarding node typically has a network interface with an IP address on the management network and another interface on the data network. This section shows you how to install and configure a subset of the available plug-ins, which might include the installation of switching software (for example, ``Open vSwitch``) and as agents used to communicate with the ``neutron-server`` process running elsewhere in the data center. Configure data-forwarding nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Node set up: NSX plug-in ------------------------ If you use the NSX plug-in, you must also install Open vSwitch on each data-forwarding node. However, you do not need to install an additional agent on each node. .. warning:: It is critical that you run an Open vSwitch version that is compatible with the current version of the NSX Controller software. Do not use the Open vSwitch version that is installed by default on Ubuntu. Instead, use the Open vSwitch version that is provided on the VMware support portal for your NSX Controller version. **To set up each node for the NSX plug-in** #. Ensure that each data-forwarding node has an IP address on the management network, and an IP address on the data network that is used for tunneling data traffic. For full details on configuring your forwarding node, see the `NSX Administration Guide `__. #. Use the NSX Administrator Guide to add the node as a Hypervisor by using the NSX Manager GUI. Even if your forwarding node has no VMs and is only used for services agents like ``neutron-dhcp-agent`` , it should still be added to NSX as a Hypervisor. #. After following the NSX Administrator Guide, use the page for this Hypervisor in the NSX Manager GUI to confirm that the node is properly connected to the NSX Controller Cluster and that the NSX Controller Cluster can see the ``br-int`` integration bridge. Configure DHCP agent ~~~~~~~~~~~~~~~~~~~~ The DHCP service agent is compatible with all existing plug-ins and is required for all deployments where VMs should automatically receive IP addresses through DHCP. **To install and configure the DHCP agent** #. You must configure the host running the neutron-dhcp-agent as a data forwarding node according to the requirements for your plug-in. #. Install the DHCP agent: .. code-block:: console # apt-get install neutron-dhcp-agent #. Update any options in the ``/etc/neutron/dhcp_agent.ini`` file that depend on the plug-in in use. See the sub-sections. .. important:: If you reboot a node that runs the DHCP agent, you must run the :command:`neutron-ovs-cleanup` command before the ``neutron-dhcp-agent`` service starts. On Red Hat, SUSE, and Ubuntu based systems, the ``neutron-ovs-cleanup`` service runs the :command:`neutron-ovs-cleanup` command automatically. However, on Debian-based systems, you must manually run this command or write your own system script that runs on boot before the ``neutron-dhcp-agent`` service starts. Networking dhcp-agent can use `dnsmasq `__ driver which supports stateful and stateless DHCPv6 for subnets created with ``--ipv6_address_mode`` set to ``dhcpv6-stateful`` or ``dhcpv6-stateless``. For example: .. code-block:: console $ openstack subnet create --ip-version 6 --ipv6-ra-mode dhcpv6-stateful \ --ipv6-address-mode dhcpv6-stateful --network NETWORK --subnet-range \ CIDR SUBNET_NAME .. code-block:: console $ openstack subnet create --ip-version 6 --ipv6-ra-mode dhcpv6-stateless \ --ipv6-address-mode dhcpv6-stateless --network NETWORK --subnet-range \ CIDR SUBNET_NAME If no dnsmasq process for subnet's network is launched, Networking will launch a new one on subnet's dhcp port in ``qdhcp-XXX`` namespace. If previous dnsmasq process is already launched, restart dnsmasq with a new configuration. Networking will update dnsmasq process and restart it when subnet gets updated. .. note:: For dhcp-agent to operate in IPv6 mode use at least dnsmasq v2.63. After a certain, configured timeframe, networks uncouple from DHCP agents when the agents are no longer in use. You can configure the DHCP agent to automatically detach from a network when the agent is out of service, or no longer needed. This feature applies to all plug-ins that support DHCP scaling. For more information, see the `DHCP agent configuration options `__ listed in the OpenStack Configuration Reference. DHCP agent setup: OVS plug-in ----------------------------- These DHCP agent options are required in the ``/etc/neutron/dhcp_agent.ini`` file for the OVS plug-in: .. code-block:: bash [DEFAULT] enable_isolated_metadata = True interface_driver = openvswitch DHCP agent setup: NSX plug-in ----------------------------- These DHCP agent options are required in the ``/etc/neutron/dhcp_agent.ini`` file for the NSX plug-in: .. code-block:: bash [DEFAULT] enable_metadata_network = True enable_isolated_metadata = True interface_driver = openvswitch DHCP agent setup: Linux-bridge plug-in -------------------------------------- These DHCP agent options are required in the ``/etc/neutron/dhcp_agent.ini`` file for the Linux-bridge plug-in: .. code-block:: bash [DEFAULT] enabled_isolated_metadata = True interface_driver = linuxbridge Configure L3 agent ~~~~~~~~~~~~~~~~~~ The OpenStack Networking service has a widely used API extension to allow administrators and projects to create routers to interconnect L2 networks, and floating IPs to make ports on private networks publicly accessible. Many plug-ins rely on the L3 service agent to implement the L3 functionality. However, the following plug-ins already have built-in L3 capabilities: - Big Switch/Floodlight plug-in, which supports both the open source `Floodlight `__ controller and the proprietary Big Switch controller. .. note:: Only the proprietary BigSwitch controller implements L3 functionality. When using Floodlight as your OpenFlow controller, L3 functionality is not available. - IBM SDN-VE plug-in - MidoNet plug-in - NSX plug-in - PLUMgrid plug-in .. warning:: Do not configure or use ``neutron-l3-agent`` if you use one of these plug-ins. **To install the L3 agent for all other plug-ins** #. Install the ``neutron-l3-agent`` binary on the network node: .. code-block:: console # apt-get install neutron-l3-agent #. To uplink the node that runs ``neutron-l3-agent`` to the external network, create a bridge named ``br-ex`` and attach the NIC for the external network to this bridge. For example, with Open vSwitch and NIC eth1 connected to the external network, run: .. code-block:: console # ovs-vsctl add-br br-ex # ovs-vsctl add-port br-ex eth1 When the ``br-ex`` port is added to the ``eth1`` interface, external communication is interrupted. To avoid this, edit the ``/etc/network/interfaces`` file to contain the following information: .. code-block:: shell ## External bridge auto br-ex iface br-ex inet static address 192.27.117.101 netmask 255.255.240.0 gateway 192.27.127.254 dns-nameservers 8.8.8.8 ## External network interface auto eth1 iface eth1 inet manual up ifconfig $IFACE 0.0.0.0 up up ip link set $IFACE promisc on down ip link set $IFACE promisc off down ifconfig $IFACE down .. note:: The external bridge configuration address is the external IP address. This address and gateway should be configured in ``/etc/network/interfaces``. After editing the configuration, restart ``br-ex``: .. code-block:: console # ifdown br-ex && ifup br-ex Do not manually configure an IP address on the NIC connected to the external network for the node running ``neutron-l3-agent``. Rather, you must have a range of IP addresses from the external network that can be used by OpenStack Networking for routers that uplink to the external network. This range must be large enough to have an IP address for each router in the deployment, as well as each floating IP. #. The ``neutron-l3-agent`` uses the Linux IP stack and iptables to perform L3 forwarding and NAT. In order to support multiple routers with potentially overlapping IP addresses, ``neutron-l3-agent`` defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result, the IP addresses of routers are not visible simply by running the :command:`ip addr list` or :command:`ifconfig` command on the node. Similarly, you cannot directly :command:`ping` fixed IPs. To do either of these things, you must run the command within a particular network namespace for the router. The namespace has the name ``qrouter-ROUTER_UUID``. These example commands run in the router namespace with UUID 47af3868-0fa8-4447-85f6-1304de32153b: .. code-block:: console # ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ip addr list .. code-block:: console # ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ping FIXED_IP .. important:: If you reboot a node that runs the L3 agent, you must run the :command:`neutron-ovs-cleanup` command before the ``neutron-l3-agent`` service starts. On Red Hat, SUSE and Ubuntu based systems, the neutron-ovs-cleanup service runs the :command:`neutron-ovs-cleanup` command automatically. However, on Debian-based systems, you must manually run this command or write your own system script that runs on boot before the neutron-l3-agent service starts. **How routers are assigned to L3 agents** By default, a router is assigned to the L3 agent with the least number of routers (LeastRoutersScheduler). This can be changed by altering the ``router_scheduler_driver`` setting in the configuration file. Configure metering agent ~~~~~~~~~~~~~~~~~~~~~~~~ The Neutron Metering agent resides beside neutron-l3-agent. **To install the metering agent and configure the node** #. Install the agent by running: .. code-block:: console # apt-get install neutron-metering-agent #. If you use one of the following plug-ins, you need to configure the metering agent with these lines as well: - An OVS-based plug-in such as OVS, NSX, NEC, BigSwitch/Floodlight: .. code-block:: ini interface_driver = openvswitch - A plug-in that uses LinuxBridge: .. code-block:: ini interface_driver = linuxbridge #. To use the reference implementation, you must set: .. code-block:: ini driver = iptables #. Set the ``service_plugins`` option in the ``/etc/neutron/neutron.conf`` file on the host that runs ``neutron-server``: .. code-block:: ini service_plugins = metering If this option is already defined, add ``metering`` to the list, using a comma as separator. For example: .. code-block:: ini service_plugins = router,metering Configure Hyper-V L2 agent ~~~~~~~~~~~~~~~~~~~~~~~~~~ Before you install the OpenStack Networking Hyper-V L2 agent on a Hyper-V compute node, ensure the compute node has been configured correctly using these `instructions `__. **To install the OpenStack Networking Hyper-V agent and configure the node** #. Download the OpenStack Networking code from the repository: .. code-block:: console > cd C:\OpenStack\ > git clone https://opendev.org/openstack/neutron #. Install the OpenStack Networking Hyper-V Agent: .. code-block:: console > cd C:\OpenStack\neutron\ > python setup.py install #. Copy the ``policy.json`` file: .. code-block:: console > xcopy C:\OpenStack\neutron\etc\policy.json C:\etc\ #. Create the ``C:\etc\neutron-hyperv-agent.conf`` file and add the proper configuration options and the `Hyper-V related options `__. Here is a sample config file: .. code-block:: ini [DEFAULT] control_exchange = neutron policy_file = C:\etc\policy.json rpc_backend = neutron.openstack.common.rpc.impl_kombu rabbit_host = IP_ADDRESS rabbit_port = 5672 rabbit_userid = guest rabbit_password = logdir = C:\OpenStack\Log logfile = neutron-hyperv-agent.log [AGENT] polling_interval = 2 physical_network_vswitch_mappings = *:YOUR_BRIDGE_NAME enable_metrics_collection = true [SECURITYGROUP] firewall_driver = hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver enable_security_group = true #. Start the OpenStack Networking Hyper-V agent: .. code-block:: console > C:\Python27\Scripts\neutron-hyperv-agent.exe --config-file C:\etc\neutron-hyperv-agent.conf Basic operations on agents ~~~~~~~~~~~~~~~~~~~~~~~~~~ This table shows examples of Networking commands that enable you to complete basic operations on agents. .. list-table:: :widths: 50 50 :header-rows: 1 * - Operation - Command * - List all available agents. - ``$ openstack network agent list`` * - Show information of a given agent. - ``$ openstack network agent show AGENT_ID`` * - Update the admin status and description for a specified agent. The command can be used to enable and disable agents by using ``--admin-state-up`` parameter set to ``False`` or ``True``. - ``$ neutron agent-update --admin-state-up False AGENT_ID`` * - Delete a given agent. Consider disabling the agent before deletion. - ``$ openstack network agent delete AGENT_ID`` **Basic operations on Networking agents** See the `OpenStack Command-Line Interface Reference `__ for more information on Networking commands. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/config-identity.rst0000644000175000017500000003011600000000000025733 0ustar00coreycorey00000000000000========================================= Configure Identity service for Networking ========================================= **To configure the Identity service for use with Networking** #. Create the ``get_id()`` function The ``get_id()`` function stores the ID of created objects, and removes the need to copy and paste object IDs in later steps: a. Add the following function to your ``.bashrc`` file: .. code-block:: bash function get_id () { echo `"$@" | awk '/ id / { print $4 }'` } b. Source the ``.bashrc`` file: .. code-block:: console $ source .bashrc #. Create the Networking service entry Networking must be available in the Compute service catalog. Create the service: .. code-block:: console $ NEUTRON_SERVICE_ID=$(get_id openstack service create network \ --name neutron --description 'OpenStack Networking Service') #. Create the Networking service endpoint entry The way that you create a Networking endpoint entry depends on whether you are using the SQL or the template catalog driver: - If you are using the ``SQL driver``, run the following command with the specified region (``$REGION``), IP address of the Networking server (``$IP``), and service ID (``$NEUTRON_SERVICE_ID``, obtained in the previous step). .. code-block:: console $ openstack endpoint create $NEUTRON_SERVICE_ID --region $REGION \ --publicurl 'http://$IP:9696/' --adminurl 'http://$IP:9696/' \ --internalurl 'http://$IP:9696/' For example: .. code-block:: console $ openstack endpoint create $NEUTRON_SERVICE_ID --region myregion \ --publicurl "http://10.211.55.17:9696/" \ --adminurl "http://10.211.55.17:9696/" \ --internalurl "http://10.211.55.17:9696/" - If you are using the ``template driver``, specify the following parameters in your Compute catalog template file (``default_catalog.templates``), along with the region (``$REGION``) and IP address of the Networking server (``$IP``). .. code-block:: bash catalog.$REGION.network.publicURL = http://$IP:9696 catalog.$REGION.network.adminURL = http://$IP:9696 catalog.$REGION.network.internalURL = http://$IP:9696 catalog.$REGION.network.name = Network Service For example: .. code-block:: bash catalog.$Region.network.publicURL = http://10.211.55.17:9696 catalog.$Region.network.adminURL = http://10.211.55.17:9696 catalog.$Region.network.internalURL = http://10.211.55.17:9696 catalog.$Region.network.name = Network Service #. Create the Networking service user You must provide admin user credentials that Compute and some internal Networking components can use to access the Networking API. Create a special ``service`` project and a ``neutron`` user within this project, and assign an ``admin`` role to this role. a. Create the ``admin`` role: .. code-block:: console $ ADMIN_ROLE=$(get_id openstack role create admin) b. Create the ``neutron`` user: .. code-block:: console $ NEUTRON_USER=$(get_id openstack user create neutron \ --password "$NEUTRON_PASSWORD" --email demo@example.com \ --project service) c. Create the ``service`` project: .. code-block:: console $ SERVICE_TENANT=$(get_id openstack project create service \ --description "Services project" --domain default) d. Establish the relationship among the project, user, and role: .. code-block:: console $ openstack role add $ADMIN_ROLE --user $NEUTRON_USER \ --project $SERVICE_TENANT For information about how to create service entries and users, see the `Ocata Installation Tutorials and Guides `_ for your distribution. Compute ~~~~~~~ If you use Networking, do not run the Compute ``nova-network`` service (like you do in traditional Compute deployments). Instead, Compute delegates most network-related decisions to Networking. .. note:: Uninstall ``nova-network`` and reboot any physical nodes that have been running ``nova-network`` before using them to run Networking. Inadvertently running the ``nova-network`` process while using Networking can cause problems, as can stale iptables rules pushed down by previously running ``nova-network``. Compute proxies project-facing API calls to manage security groups and floating IPs to Networking APIs. However, operator-facing tools such as ``nova-manage``, are not proxied and should not be used. .. warning:: When you configure networking, you must use this guide. Do not rely on Compute networking documentation or past experience with Compute. If a :command:`nova` command or configuration option related to networking is not mentioned in this guide, the command is probably not supported for use with Networking. In particular, you cannot use CLI tools like ``nova-manage`` and ``nova`` to manage networks or IP addressing, including both fixed and floating IPs, with Networking. To ensure that Compute works properly with Networking (rather than the legacy ``nova-network`` mechanism), you must adjust settings in the ``nova.conf`` configuration file. Networking API and credential configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each time you provision or de-provision a VM in Compute, ``nova-\*`` services communicate with Networking using the standard API. For this to happen, you must configure the following items in the ``nova.conf`` file (used by each ``nova-compute`` and ``nova-api`` instance). .. list-table:: **nova.conf API and credential settings prior to Mitaka** :widths: 20 50 :header-rows: 1 * - Attribute name - Required * - ``[DEFAULT] use_neutron`` - Modify from the default to ``True`` to indicate that Networking should be used rather than the traditional nova-network networking model. * - ``[neutron] url`` - Update to the host name/IP and port of the neutron-server instance for this deployment. * - ``[neutron] auth_strategy`` - Keep the default ``keystone`` value for all production deployments. * - ``[neutron] admin_project_name`` - Update to the name of the service tenant created in the above section on Identity configuration. * - ``[neutron] admin_username`` - Update to the name of the user created in the above section on Identity configuration. * - ``[neutron] admin_password`` - Update to the password of the user created in the above section on Identity configuration. * - ``[neutron] admin_auth_url`` - Update to the Identity server IP and port. This is the Identity (keystone) admin API server IP and port value, and not the Identity service API IP and port. .. list-table:: **nova.conf API and credential settings in Newton** :widths: 20 50 :header-rows: 1 * - Attribute name - Required * - ``[DEFAULT] use_neutron`` - Modify from the default to ``True`` to indicate that Networking should be used rather than the traditional nova-network networking model. * - ``[neutron] url`` - Update to the host name/IP and port of the neutron-server instance for this deployment. * - ``[neutron] auth_strategy`` - Keep the default ``keystone`` value for all production deployments. * - ``[neutron] project_name`` - Update to the name of the service tenant created in the above section on Identity configuration. * - ``[neutron] username`` - Update to the name of the user created in the above section on Identity configuration. * - ``[neutron] password`` - Update to the password of the user created in the above section on Identity configuration. * - ``[neutron] auth_url`` - Update to the Identity server IP and port. This is the Identity (keystone) admin API server IP and port value, and not the Identity service API IP and port. Configure security groups ~~~~~~~~~~~~~~~~~~~~~~~~~ The Networking service provides security group functionality using a mechanism that is more flexible and powerful than the security group capabilities built into Compute. Therefore, if you use Networking, you should always disable built-in security groups and proxy all security group calls to the Networking API. If you do not, security policies will conflict by being simultaneously applied by both services. To proxy security groups to Networking, use the following configuration values in the ``nova.conf`` file: **nova.conf security group settings** +-----------------------+-----------------------------------------------------+ | Item | Configuration | +=======================+=====================================================+ | ``firewall_driver`` | Update to ``nova.virt.firewall.NoopFirewallDriver``,| | | so that nova-compute does not perform | | | iptables-based filtering itself. | +-----------------------+-----------------------------------------------------+ Configure metadata ~~~~~~~~~~~~~~~~~~ The Compute service allows VMs to query metadata associated with a VM by making a web request to a special 169.254.169.254 address. Networking supports proxying those requests to nova-api, even when the requests are made from isolated networks, or from multiple networks that use overlapping IP addresses. To enable proxying the requests, you must update the following fields in ``[neutron]`` section in the ``nova.conf``. **nova.conf metadata settings** +---------------------------------+------------------------------------------+ | Item | Configuration | +=================================+==========================================+ | ``service_metadata_proxy`` | Update to ``true``, otherwise nova-api | | | will not properly respond to requests | | | from the neutron-metadata-agent. | +---------------------------------+------------------------------------------+ | ``metadata_proxy_shared_secret``| Update to a string "password" value. | | | You must also configure the same value in| | | the ``metadata_agent.ini`` file, to | | | authenticate requests made for metadata. | | | | | | The default value of an empty string in | | | both files will allow metadata to | | | function, but will not be secure if any | | | non-trusted entities have access to the | | | metadata APIs exposed by nova-api. | +---------------------------------+------------------------------------------+ .. note:: As a precaution, even when using ``metadata_proxy_shared_secret``, we recommend that you do not expose metadata using the same nova-api instances that are used for projects. Instead, you should run a dedicated set of nova-api instances for metadata that are available only on your management network. Whether a given nova-api instance exposes metadata APIs is determined by the value of ``enabled_apis`` in its ``nova.conf``. Example nova.conf (for nova-compute and nova-api) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Example values for the above settings, assuming a cloud controller node running Compute and Networking with an IP address of 192.168.1.2: .. code-block:: ini [DEFAULT] use_neutron = True firewall_driver=nova.virt.firewall.NoopFirewallDriver [neutron] url=http://192.168.1.2:9696 auth_strategy=keystone admin_tenant_name=service admin_username=neutron admin_password=password admin_auth_url=http://192.168.1.2:5000/v2.0 service_metadata_proxy=true metadata_proxy_shared_secret=foo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/config-plugins.rst0000644000175000017500000001712100000000000025564 0ustar00coreycorey00000000000000====================== Plug-in configurations ====================== For configurations options, see `Networking configuration options `__ in Configuration Reference. These sections explain how to configure specific plug-ins. Configure Big Switch (Floodlight REST Proxy) plug-in ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Edit the ``/etc/neutron/neutron.conf`` file and add this line: .. code-block:: ini core_plugin = bigswitch #. In the ``/etc/neutron/neutron.conf`` file, set the ``service_plugins`` option: .. code-block:: ini service_plugins = neutron.plugins.bigswitch.l3_router_plugin.L3RestProxy #. Edit the ``/etc/neutron/plugins/bigswitch/restproxy.ini`` file for the plug-in and specify a comma-separated list of controller\_ip:port pairs: .. code-block:: ini server = CONTROLLER_IP:PORT For database configuration, see `Install Networking Services `__ in the Installation Tutorials and Guides. (The link defaults to the Ubuntu version.) #. Restart the ``neutron-server`` to apply the settings: .. code-block:: console # service neutron-server restart Configure Brocade plug-in ~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install the Brocade-modified Python netconf client (ncclient) library, which is available at https://github.com/brocade/ncclient: .. code-block:: console $ git clone https://github.com/brocade/ncclient #. As root, run this command: .. code-block:: console # cd ncclient;python setup.py install #. Edit the ``/etc/neutron/neutron.conf`` file and set the following option: .. code-block:: ini core_plugin = brocade #. Edit the ``/etc/neutron/plugins/brocade/brocade.ini`` file for the Brocade plug-in and specify the admin user name, password, and IP address of the Brocade switch: .. code-block:: ini [SWITCH] username = ADMIN password = PASSWORD address = SWITCH_MGMT_IP_ADDRESS ostype = NOS For database configuration, see `Install Networking Services `__ in any of the Installation Tutorials and Guides in the `OpenStack Documentation index `__. (The link defaults to the Ubuntu version.) #. Restart the ``neutron-server`` service to apply the settings: .. code-block:: console # service neutron-server restart Configure NSX-mh plug-in ~~~~~~~~~~~~~~~~~~~~~~~~ The instructions in this section refer to the VMware NSX-mh platform, formerly known as Nicira NVP. #. Install the NSX plug-in: .. code-block:: console # apt-get install python-vmware-nsx #. Edit the ``/etc/neutron/neutron.conf`` file and set this line: .. code-block:: ini core_plugin = vmware Example ``neutron.conf`` file for NSX-mh integration: .. code-block:: ini core_plugin = vmware rabbit_host = 192.168.203.10 allow_overlapping_ips = True #. To configure the NSX-mh controller cluster for OpenStack Networking, locate the ``[default]`` section in the ``/etc/neutron/plugins/vmware/nsx.ini`` file and add the following entries: - To establish and configure the connection with the controller cluster you must set some parameters, including NSX-mh API endpoints, access credentials, and optionally specify settings for HTTP timeouts, redirects and retries in case of connection failures: .. code-block:: ini nsx_user = ADMIN_USER_NAME nsx_password = NSX_USER_PASSWORD http_timeout = HTTP_REQUEST_TIMEOUT # (seconds) default 75 seconds retries = HTTP_REQUEST_RETRIES # default 2 redirects = HTTP_REQUEST_MAX_REDIRECTS # default 2 nsx_controllers = API_ENDPOINT_LIST # comma-separated list To ensure correct operations, the ``nsx_user`` user must have administrator credentials on the NSX-mh platform. A controller API endpoint consists of the IP address and port for the controller; if you omit the port, port 443 is used. If multiple API endpoints are specified, it is up to the user to ensure that all these endpoints belong to the same controller cluster. The OpenStack Networking VMware NSX-mh plug-in does not perform this check, and results might be unpredictable. When you specify multiple API endpoints, the plug-in takes care of load balancing requests on the various API endpoints. - The UUID of the NSX-mh transport zone that should be used by default when a project creates a network. You can get this value from the Transport Zones page for the NSX-mh manager: Alternatively the transport zone identifier can be retrieved by query the NSX-mh API: ``/ws.v1/transport-zone`` .. code-block:: ini default_tz_uuid = TRANSPORT_ZONE_UUID - .. code-block:: ini default_l3_gw_service_uuid = GATEWAY_SERVICE_UUID .. warning:: Ubuntu packaging currently does not update the neutron init script to point to the NSX-mh configuration file. Instead, you must manually update ``/etc/default/neutron-server`` to add this line: .. code-block:: ini NEUTRON_PLUGIN_CONFIG = /etc/neutron/plugins/vmware/nsx.ini For database configuration, see `Install Networking Services `__ in the Installation Tutorials and Guides. #. Restart ``neutron-server`` to apply settings: .. code-block:: console # service neutron-server restart .. warning:: The neutron NSX-mh plug-in does not implement initial re-synchronization of Neutron resources. Therefore resources that might already exist in the database when Neutron is switched to the NSX-mh plug-in will not be created on the NSX-mh backend upon restart. Example ``nsx.ini`` file: .. code-block:: ini [DEFAULT] default_tz_uuid = d3afb164-b263-4aaa-a3e4-48e0e09bb33c default_l3_gw_service_uuid=5c8622cc-240a-40a1-9693-e6a5fca4e3cf nsx_user=admin nsx_password=changeme nsx_controllers=10.127.0.100,10.127.0.200:8888 .. note:: To debug :file:`nsx.ini` configuration issues, run this command from the host that runs neutron-server: .. code-block:: console # neutron-check-nsx-config PATH_TO_NSX.INI This command tests whether ``neutron-server`` can log into all of the NSX-mh controllers and the SQL server, and whether all UUID values are correct. Configure PLUMgrid plug-in ~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Edit the ``/etc/neutron/neutron.conf`` file and set this line: .. code-block:: ini core_plugin = plumgrid #. Edit the [PLUMgridDirector] section in the ``/etc/neutron/plugins/plumgrid/plumgrid.ini`` file and specify the IP address, port, admin user name, and password of the PLUMgrid Director: .. code-block:: ini [PLUMgridDirector] director_server = "PLUMgrid-director-ip-address" director_server_port = "PLUMgrid-director-port" username = "PLUMgrid-director-admin-username" password = "PLUMgrid-director-admin-password" For database configuration, see `Install Networking Services `__ in the Installation Tutorials and Guides. #. Restart the ``neutron-server`` service to apply the settings: .. code-block:: console # service neutron-server restart ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0630424 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/figures/0000755000175000017500000000000000000000000023550 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/figures/vmware_nsx_ex1.graffle0000644000175000017500000000722400000000000030053 0ustar00coreycorey00000000000000]iSIl~E.4u^g1;"hBִZ 7[G_ s:;3'?]FW$%Kݤ^~8yC뵭v pyG^nlnn=uN<(css"67X)\ZQ L]CaePMQz9m/f^l}1WYW&}̷M-$qf&}6oYdPǢY M0K˯~r4 CxKGFlmNDJҘ $b%iEFYhͪґٜ _lq\ݙؤ1+1͚hC׽?JsN Y;Iey;))o!ɮ"3H"/Rry ;8,7s6w7$\Bvk0B䬜<`_ Du|&9 rh.5cYEYe<^d|]ߘEש9Լ>F_0$+= .BkilCհl|ks5uN݆ANNI7 l0gڟpxz޽ҡɐD_MAk45=i?5&o΢GQ5Z|R9Yw%Bg8RhS)5'X3$m($ B=A!\Iy 8c/s%  3̸\b?D>9HBl@d`&MYBމ8w`q~b-G#_"%RJ W >XYXKPEAX9 U3jzRs@&m,  m zO/܅W.rX« r0ʌwV=B1RLQbaM9rt3| Jlf͉$KftO>7 f[V>#pZR7?8Gs񕋯a&Z6al/k[w %l Q2 YJI9Ӕݘ+j;J૛.$TAD͐ҖgSBxr 3.`LR'}Ry!GPK0N##N^M!z'.N xD~PJ,z{.{Ќo]Ђd-V8 2PEZ6}ìXt:ADHM>ʛ4KFcTbӾ%1hGAJyA|Հ_YDӀ&4WEt.LTg3Ms <ջ'=- 7C¯nGQW^P&ɻP g9H2|gYexۑ~* ʲtӼ'e5&sydYr>H%-iQdUgjsT6I?8a\SfEݤgz5F?o&ow}qq|ר߁Ͻۏ6?w;ߢ/7ߜg?2 A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYsgRiTXtXML:com.adobe.xmp 5 2 1 2@IDATx`յJZ.K%ٖ-ɽc6I 4^B $/=it4mlql*Vuw]hv]JiwnjvΜr                          =&oT5o7\DGa4'GD%q!ިup$`SKsS[fk 59&x&1y`    oVX1;3#_=7 Н46X@XmnR}S+8[Fm*SHȏ{ѷX~)Ke$dAvF*]:?r3(>6kBE ] :SZEM{*%J2q?PS׳@@@@`,Y)<J'3*_:h8@<46ۏ8lvLt$$,?ֻ] "#\t^q)d;w׼q"tZ,lܼKW{G*Wh@@@^-K>Էad5KLLZ0rbedQ901E=裥A>,t@@@@YSs+CQ˅@W[S:c͎ukּ޵$@ʒ@@@M4nr<.~;Oy w F`ޔq;.s=PjalGe޲("C?h([8 M-.@,qM2yK .iI46=/<›lwW[]kM&Ǖٹc @ ,9 ;_I(    BDT+ $o! G`nn^ct]%1Zz5"1v; @\L$EG7IU 0,aNDvJ73hjpA1UfԾqFIo4->`sd ~~ M-;bjnm QRGEǰ8ʪ?}MPDx}7Rb|O[ikҷI7O+Nշ}Lij"w\:}㶕lKEϾ!94VߴKoslP 7]%~R*| gSw?N%Ikf+ XOo]Aia,=<_YS?7(/_3(DoE\wqu_beKfSlkW3Xffs KbudQ9wQ>,uB\^;ZA<t@@@Al;ls!>iL:zقl|&*NGϜSZZ^G7]*F@x>3zΔTRIe-Vi    p]Yڰ0CA_m9q,&I;5xIJ$RRQCO YZ1o.q+:Kn*KҠfgxtEkdfRuE ODє!0No[Tv >!bR^ݠ+V2b] \eDGP4il:+}]Nj8&& SWsuz3&&ժڭFfr_tw*ӥ:DEiT1cξWC4oesYu%1 3SOX9{N5,s)''-ˣ(Gϔg ղ.sd_Tto>& @`tei۾.dLJQ%p=v^]R}c Mtt^FVv9Ӓh(kd{,v# VF+8͜`JA_bV2Sv矦77VۜyP]"#+禸=?z&L;\-+_?|'.y]}"I DjHUmzm5P.G/x"5QrEy4k2+'۔*qGvҹɱ"ާy1n {oeC[\)@|ne.9gԈD;M*kU@@@@ @ I9[h4bН,1I$S^KQ;KO6}4z^VRqU IL+'BёRK[;_(JRP?8'z#+ӟ<(MaU6v,_8uH>z^2OU==}oA:˖Q^W6]j]^76u9Wq~}e-휻P[U˿J[9ࡺVgV28`(o~EQ9iJ,i/+Y$kVDS'rQDQ ?W]4L \}'u$rJw$HxCS+ۼOJ&[s~ c޸Tvoܙ(M="52Rp#I9Ub뢔ܸ|^<*zQjE$*2\YrCt\\PI\QQGa}+i#SIbĢI${+vD]` [/rt$pO !+v? %yJϧ:]UvZ8H!"1R;؂p.[HvNMl^.7irϋ#ʅԜ ?JLٱ"I'[ߏ=]\y*(n~r29^ˬNCy•zgN%]D%̛2>ڕO'Ζ-Qd-+ۚ_~+u2I2Fќc߿gO|cp~U|Ͽ{3W<+wQrC[&Ew@#@[3*n2TmV%';FKft)bT4:2u);r:]%(SDb4EII<1aBs7V ){$2WLBMIV}*Uu -PkD,/FʼC=zF%0Ϻ[QTNsf'td+~eV"n"ƺ%HAE,8i%&&̞VKIղԙ]NK\@D=k%'%!FϦw#ĸeO2 Ԋ2fT -9>w\2AiKl1Fg$nI:J;R/XEAQ$U6מ~޽˶ B@@@@.A,+UړkV>͜TAENbT$'1$^!_k^؅˘];2`}Ƈ}u;eQ51Z꤬[f<- .I]XګULIaWE9w׳E˓Թ]EYrQ e%Oc$ ד$    C@Wc7e,mqD*8#X ryȕ4YƨMw̱s./I!Zp=M$' u4"YJ+kH2JۜPF%.2"q.%=5n󊏉,弽|A+.nݥx[JB*#㗌j"S溒1q&䊫'.ͭm~a    ɬS%%I`2.rbrW<4j5HA{{%>G"tT9pmd]yNW' $4}|_$&pKf8YdLsK=gt&Ƙܥ 6@@@@A,Ć:Pɘ&23I3HϝWvNE+/7'w ҡ"2f:ڒX"V_4='xN%.ʓ6DNO.iEdV[aH]ߝw ׎~*w(D) rOMeç;v5-ieBn5cW,N[XibҞ_E%I仰M_6GM<K8Z>~@+-%&N1{I98gOT׋Iq+2tZ{X6A, ynB$V߸T͛dD{+Kg&ʼ8FEI2Ih_6{2wX.ZZO42'QL/!YB@@@@.PĚq{w_E~k/O|,;UvRSc_f)~ dio޾BRC&{u]<_M_e9-~-J֍lq0̹EtM(]-XE }+v-+vHr>Y(^udt4/ nP,~`@@@@XMqՏglyH,5="֗TuU@cxzXUSIE5sbLN GT7qJI$52I}ri2Ռ0D\I||Jzݝ^^>yɸz揱 #?Z,yĂ     Ý A@@@@<Ș%= DGA8,t@@@@(K>̒[>#7f1@Yd4     |,5CA@@@@@ʒ #e)z      PM(Kwc?h@@@@@ @Y k%?@F     GR]3@@@@@,2>P     ~ e@| =(K~&@@@@@f1 4  %؛.rcB#͉氐"^\2 mZVo29LP߭޹U<ږ%vc"):,,$.2œl" }BnX&SD;}&)A @G ÇO>iooBn6Nu8C1Sk[ w{:leOBN8ѯ|*j_[X>knp~[6jΛ܄vO؏֛BB}urPyI5OgۭG쭎/É$F$RfZE(?0fQFV*3Ud9rv2}cO>b}5?@!4M;|JV 5w.%1G[}@bnQ;+mjlnEВȣaJӏco,CA?%;NNEϙLƤS\Ld ~-tm{( WlV]4S=#Bǟ_ #oG$Si JIK(" әJڛ>}TՏ 5# A< G C5,aO(o\]sl:,p9i.7?KGND:}rcL귟>K;oPz8947o|Nc(;#U}._48Lo?Zכ{'7GyGU _kl +_@{ Υoq|[:ME~A:}=0Cs/_G@Q mVg_4?ޓc[7ӳ{;7CY -'$;/35+ϠoܶfccՏ?t }.Vr Σ{o\qIp .e6ӓYqr%Aqvu8`PG C=-~Z,J2ild=,au=]?*@# 1J6cE勦i ._e9twy&& :K',A@'z꩑&?)enliZNfOq exr?_Zgа[a-+srǒC@  X֗ ub?eɝA@ZZFLzL*Y}mmǟ{pǝW]Qx@Bl4q"K&zT6l%~"eA!LkkoZ1C*$SՍϢNetm8c` ,cz,S C@ LI3&w6nw?R2d۩*kj yfp Ȕx,Rӹ86< X-[n*wH><8h?U\sHc]0}U+9afbBc\\@ ܼb?^$lZS{@w+Kd\iOm !;vf(6:S\;uV&|ͱ &2+KDv e).vٜ;2l dIc Btu,A,iJX9Co|uΑvJlj&㠂a=` RcD$GP]T45DFѡb1ҲytiiQdf&1! _UʑϚ<^YtO~#@q +WsX@X0='%%o^@QS\^C/}Nc[)?US&Q0$PG2hz'}'蚋f҅3'X7 CniUSfZD _yP "S⩢ljʤ`[r øӦҪ삗: /lw!X.68fJv_y>T%͚dcC'kPk6]PO N0^7$57Тs2mm̝NtnY9a2yvs*9b-} EY{)#?llaEi@tdD46F%@I(K(5I-W6mUwP\kK ER_qvdO'OOPyu}9)DR52i2a4h u(}F q 86VƚI ғOBUNSV?\DQV|nݵRYM/d-PB'*DLDxQ!Ç#ReH;|i|S!;҇es^&JV#..;R+KFE]v3';)WQFwmH)L0~YV(LA{Eq#T 1mtiCAe2ԯIƧ]7d5 4@FpoZW^skY (YhtM%]zd6,AF4u{?%6m?X@[W |_E?VS, }z$U7Pt{zHE*U%Ga54dSxP    $ J-mm|E.5P~ 3 NNvV/AYf_ @@@`HheI:-+:|j[(f A'q[a]ں$54KP    ȠNͪd9-tDwv!5RsDUSSD$5FDl|j|kؗP9a+DH0_&4   $ʒ(LV^>m'$ך8(-V[JݼcakPzuVeY"mݵ(K,=Z_y:-KNf/xζpzyb(M=W@Q _jƌBޭ¤H%grjʒw[Bm     ʒS14;@m3}RJB,%I! ~\USfTީٻh]ݡ|y>, ByL2x',~fݺbW )Tfe0 ¤}@NY;2Ya @n̝-52Ӓ+I (cF1YO%ӪQ#$$ T. V]||J't _XeIKુ{:vHM"sX͡T]ǤSgl5S}d`ML߀ UQ]iPec+,/0:ҳWyfT    D0x[O~»tˇxdƕ3wKo\&y@?&+2?x>m}NWhj7.ue.R6.1f>~+}D<4Pu\<,3v     p^`Y:/|Γ[j%:" V! Q(2np;67o~SLcs+h+V KM}d~w:pH}*jC>]&'8-J اQ9 CP&q7bmXDxM0K'vƜf nw/vCS 'T\^m0Y@@@@`X^_vqĉ qzv;swkln%ns3&Q< /9>jxTEx$b'Ζ)qTU۠ok+eUlIOMt9jkj!A stIwt..(Rʏ6yS.ˈp'Nx,,rZI+OR]HC'(y}    AOR\9c[l N*c8'4<=ҒDWד.EM"9YɃDE);cv}Y}[(Z݃dٟp@ lp z'YrGёS%jȩRh 3FkI),4ۑ9A2SiӞ͚\' iBdg#ҏɍ_oc@`p }?k/єOi©$VgHv͊9te j{J*q'fo]1hIrYICh[>6mPOSh.e9C[URbg ϽQo_ƌJѷYK&:ݣH߉>eϨ\[]skzX&ÛySٜRAQg1f}2:LK<5<^04q#A=p^YS/_C~yO z!^Y^.W<ؿۮࡆg~sіbE_wRE6v3:kFˡ'~V\-7/̘8ZWr0q)_ .ҏo)jtێ/x8iHZ%H'(JRճ zbkQ$QLƫþIůˡͣ$ǥ{C@}Cݭ$e?CGe7\}VLP{c%-X\q=2Y˒ckq7^r8B9֘LvߴZ{#T\Ž̓Գ[Lcp♜n<]\JiCz{uKD4I.7C$Ӟzyz9oqvku-x$?4XI;["tGMdG2ii".boo;۰J_ Ǧĝ=qcHUBʒW0m%͙NSkYLhzZJSʒj=cC#StbBmqPlҏ_W0b&&.C!SxfA:*b6u6(l'Qĵ2l>m>_msI񘰳)pu6;mcvެʄPpbx]ar>;vcWﴜCoQ&sb<ƥ4W\ݤNex&?)K-m^;2{&X4ؠcG)09M%KZ^ݠVhuʜu|nI[v$t)?ĩCch1r=zΖuƍj`  Е~!7uÑFDc_Ir^iVihR֡8ɶXD91ڗ]0Mmu{1r&T=H*}皒F" i'"X]O}47=MReI%%ym,85'C 587oگxh)24 9dV&Ld%XQIe+!<>tx?~t~X1QobE)6:׵Mޣ;-bRGC; ~8Pqg-ٖCGϒm}o%aG;q8jU|N)sب"4K* 97UI\dV&<$hp!3_hʒc19HSlOSK&,"4dHqᳯ,h{☑oqvOs.1V+ KRLqzKk`EIcZ;@\Jc@nC;c-qFT8g<ʼnLu|1ރD)`~ .*2\%V8Sl|a{Wh2vT~/ݲr>쏖X,"`۲吕݉!!QCfժ7ͬ/KOV$ㄵ}efkQ9 :xM&r:Vѡ\hJp%Jqu _S0)VY"++Upsc{dElN;fq!ч#.(FvE'*No{ؕ(voL;qSD\`nZ1_]䠸HysgO㲢.Mf¼!ֱ 8q)Д?r8?G}ò.r5湽p7az'J,ɱlMItǽJ; yR=57fwQ/2bߺrg+TAKʒy{51-O2H K=RD\]F& *`l!D)"&y9(~~<^g--S9`-/Nyh|ֆη^+SZCx"%#6I^m%>qiRYU \Yd~8mU-ePg{sNrL6$([>w^15$I`I]Wy3άqwp.@[b6 FN-e͵ۓ@G4˵ [dp ޾fXjKy٭Oh (KZ7_m|KH>oi/\1yrwY1: $昪2%7*(!5EH­|80e$?\om)T@ Ėv/&sX'!)H謁K%LXڛc+T'@jrBwI,m$rLGH?e ߶jĺ ۰Xm 6oރtK܃*&]ˎ'/ɗpi8pa<?{Uytp*&`2GPSDmt>sr J PzXy2L´Pbp]z{OtG &-5"]mU!HQxBG$.-{zx'J:%C_RFh:j}XOS8զ=yk̨{1OxG=3㹋oo;?r*OK of7f7i ]ۏ tjyn HOH%KvWs*)<_sj"+U3pC|{1<7-mQ҄9Y}d̊jn.0DjʒLV")B^6iIz^u,:.xZR .+aI5,I9>=xRe"%tIJ. k"\ ϵom>3,X1 +3V'xI$Ӛk={T-!MYHMZvS#O,nBLj?{hUv[$B)T"DP"r4gNNB)`ݪ[܍/Bs-cR&gs"}$1UnĒ]##>{@gwC-  Tkʓ3Nj*b$?Μ4#%,}W %=29yg鮈~yxkirDŽ.yCJ}cF'vF_R?} O@b$Qr,'=HڹUom=+-+w,<E\{1<ϔ&[1,Ϗ,Kgyhqg b?ƳiJL %o'zSc!ȧ7jQ:TsrUPw{T^Su]Ol igaQX凫s岝Muq&*RcrjisٜVX,H캓Tt_F+% k\23P9O[g5noGϜw-@A{i!\U߇j-٦[.>#%J*kHcrx"yZ3c^~'e{68 ~# Q/'$LN1?4m=4T\^+JRY{(yH3uaϔ>Vɓv1@E,nx/AdLV׎K<%h e"H=JULӜfU*'*ȁ';~+  E2̙&1{5}%eP@H@ t[8E薠K=]&n~@3$(  WHWR( @@AV&QAkAy5)p9 x Rp\'@`hciS]qMAae(DHR&d_r2L0zdJe/\,BoA9v+iI|[Y?t@`hiь C{ x@-ޫI] d4ջ-6!  DG:EPZ@aCwN :Ae .9Ɠo @Y-_F'%ig1+gj^59ydUuXy˨}[U&]m!m9+4kHRYJ"l9Oh+8ޱ<.)+c\~vp{lq9t C8 楈r~гqʴ<>(☜o {‹=OKV?Kt>p.0SH+w/^6jow9jݺc}5V%ֱt+L;'SJTY[OuMaVF$3;2Qhm@P >PZW@'Q\S(|z{g԰Phdi7woQIERD|ެc]O-iPrz.[vxX1{Pdx8%ES ZsEJp;,y#j;Y¿V7j6URPOW.D!{y=?Rͬ4ŝK;7Q3otب")B +B< Fp01ɞ0g$PTS;E;B)ی zEJ W4'(kD /(K>cbacYpE)5)Ƥ'SYu[yjI,O" M-6w_E0"SlVBÉXb M>5@fߋNdlkH@ :Sʃ윩@,7V^*>Ve jV"jx&F!!TkR}[4z/ypy+>7$S/5MicL񦱒XHQ\_Z fC yھȦ=!mIۖSrB~jsKEj_QY5=SFr22\yf*C!XP@ LI8ظ]%$?AK溔x%oD] @8[tRx䃟c a +k\ǧK=9YCFֹcM$ƹtK3R).:~Xѹj}ݸ"2*c'PL^_ޣQ?+W%Ѩ7˦R**ZGeLQ4:=K{bXqȒV+TXu,?#ewlQ3 (BtXNqY貦XK"5]&OTFxe[},-=&|XmUImm92%|lq>)!道e1Y ]j.w\H/+$v*Pq1̷oWzo{gJ+^xW?W[yuj{VRM{ K_H&s_1v~}J1a  ?٣\o6S0`d\oZMΥZFq1Q)K/N7\:so:tn&Sϭ/NK?%nb ʒ%%`E C 2v|԰~p1_svV$vZ;+JJ"qw̟2rF?ul rQN%G2؉)oN9URѹkŝrƫ퇺=\e`-] }ǵMǻ9#W?$sُ uM?C>}m,[vD9CzcS͉Wz|ɉΜSIv|BʒORLMeop"z(JN+qM~]ny$qUy|LK5($Y*K}7A׻p;9kf%tI%Γ[+Hvۓ1B|ON߷@@`|bZڔZ?ѭdl) ot7GEecLk<&K,|j ≫XD,G磉$]DE)hRX%[VW.tFt^^RN/xqnc{oX֙ .ZS}\<{L{~+A=̒j3e'o7o_,Pn%߱E Ü@"[.sbXKDQ#+.!tVBy)oiC|$ȩb~Y,F>g9o|L•j(Kord+L477$GKg:NzqSQ*kB.+Be&-؝(bʍ pl;IŒ(~cv$>3wҒ v3J)gL6k"/ڦǥ1C@NA؇ei8^u@Clmj<2gH{4.ҙ滧i9#d"-NdbiX-cS+DΔ/qώNĽ"Mٵe ϩ! G`6I>kuqOiQ:VO#3o6L>+Vc&cr [:҂KqF[_۽laҤZ)ѳ;3)/YpfmQ ?ڕϙj'4$@0fnke˺$o>I0IYU"VO@ec6Y,CJ_$9$pс/@6nТ&J @ÃgcyĪIdYd"w,R4vHZ2KB-e>#Q誢E[یDh%(7.F!M{e.9*U&qc@@3Ab{`, 9$/E$)m+UQlNDKbE[(Zo .x F~,W-vy`CW.[V.va  D2Q3K5&woܾVr i) ~dc:ss{7b__*hCd^B@^Vr氋\IB́sp9K~^>򦶊+HTHV>E>zn}[2Yɧ7z,rTZ> 'o&أ7gNl;+jƦ$vp'z.L&@@O@^t089{TeqVۯn5_\3gZ\Ӛop/۽)3i9ov6{Xϓ<=>?Gh @z# 5-<^R$4(V'*PK߹,;(^'fd%X~P!#e 4NYUzFTTŦ8ʀ@BcA >  &eKsJTM{R'5tߍ;hJ)ԞXXhfBf  "_b @;Wt9~? xu.5sL1tG W6ݢÑo~0n~/q?:1Ʒ4ӬSj(o|{( )`ǐ>;ZHv;-D@|q:I+ l{ @YO^x0۬TP\A׹(7-mu[U9') YF@5g]<(K>ËA@(K^maUoE^Uؒh1+}6XNbP }c l6;˫1Y7[DŽEX] ]7%PnYbCm>P"eWhd}-7O>'cq?ߧ (h\SJizXqzh&BY[T   0@Y 趝([x68Bb􏀸2>('uZ[) F@\|ya@@^]pѨv}Όt(c,vڲ(ݲr>M8 ОT6kUpwǼUI͍tӮ(BKt╤# ,y6Jʒ̻jNSUAON~@i4oj͟M#|؃઺]u49U,qv')/:~"-z  h "*K /6>~,(:MKˠ#LH瘦J:yE[ZH [I86ho>x]Co"(K>*,IU&Q?x@yM 7z^ʒ@aG@=`VԜ[]Pm9P|9H\:5ɽCEAmt@lsfE]t4*<jkDdI ۧ;c2Qf:xf(;5l!(Ke@A`͚{Z&9ey Y?V(pݺ[of`NLJØN{߇,A@@tpU҄kQS|@ f[(ՌI+n(۟PcS+eM/>86߯ySNa72 o@pظㅍ_y3e)/ @HW ÝGV{4{ жuWQe33wJBv]vEWOW는$$ XqUwu-*Ŋ"M!W;g^e$%yI&Ν{Ͻ?N! 4 B؏ZUӯ~wuP.znQgxoc/z&3iU|fiG]-aLZ]12@@#3'=ij}q֙vF|#o@KCRG,qb~޲ ?EJA'A{t3 $ a{&tm:}ԝfz3MR32@B '#%>|'n, +*5E>Mo&xոxmS^g`Z9!7پrNXwh&N]_\ I,2&K-N8F Wernb5x5?;6Ԍ|ua(XdkmZNF*52->7^;-8A4d>Ѓkoc8?₴t˞/$3"@A ϼ4JhU06TVfs1sbFf8z495 \yi!Wr?jPƠ`UjVZ ߮nD|[&gJ 1iYcn[ztno~%_60qp ,#xl>v|Rz]ԟZFIEɒ> }iV+/ĚBhY<pp_ȇy}WAgZ ̝;R"g'carbxA=}:Ѡg"p&o??ogA6=`eWz,0a co(yZ#MSY WP t6$0q>%Gzvge-YAMS68_ klCm6{m /(,\(?D9w4ݨ@BYuʔl;1~X,a!v1e#dQb@w-i,%p\t607G6V]mZX0}z4jceT Oam5A (eǀ5vmSbd@&MTR'&H?IKϖckZŦU^haTUrOm??X^ m! [8eH>oOJ<pK:,@N܄SA3Y_aqin #GevG]gf=%??ABg< JTZ(}cfE`?3jL9n3 J] Eh2a;)ہYizh%̱wCv}MI77*3G;1*HRCNRloqqcר2e'>pGQC[\=Ac[@2v kFLRv_r9/`NhS] Ī~ob(. NQ{Z {= 0|26$Ks_랪M.F+ Aw 5:EϾ A ٪E.)UdV$AvƋJu`u}QJoVBKRZsOo/5Kc3^5 ߆8kf3mZ#Ү&e>!̙P'YܘhaO?Y(Ѱ$$.aRA?lQ>>C8U;!)29ƎV(E!8Z:ȅ`;'QR `*0MPC(^|釟CO*G95/9#Тb*}zA`)f0b;%, KZ4pbKɏyX#0m&K| W;Lh%o-˺_~,.O:Ȍԭ Hk/>ʿ]6U^۝:vp-|ŋ߄r$1d?}g]3`(ڢE`{P!V%~$tlI;UyE9 6wř6}dBGtVi=jsZ5/Q^[օ@NzLܨ ]LOV0n0@A}^K#0 ї;ϵ `;xX[}S5 A ϔ .1#zv!~ ۉՈtU Csb*Oh-߁^4E Ơ3؄_j%vȮ Z+ٙ%dG3_²ܵbijZϬ2hFl}蔛V! UFP# ̠^Ui <]&dȕޅFR=:;p+2m +d]1įGi 氱Q^g UQ`3 4$]^79qVmẻ9sRVzܜ0#F`7vȁ}^qxHM0ixslR?Q&Am7y#0@%iXdr|Y6tUb$MN:p0L+(gdI]:5m56پ"OZ+ȲL3,`Z*LZq3@ vhɟa0% ˱oCCKw' UE*\_c"ԸycHR|E_ )-WO5gUgF%"d%53#4 ɳ3&hd1ЕqTRT] MF' 25$=0E1ȶ/G6LS}eF `Ԅ`sW#r<;VFIB 0ً!{$wOm[3"&y9(LBEhm\V*:1#Z`Z,ϋ`F 9%v. DIqh܏,kچhnU5gu)BE9|m+4u.dF"dT#xf?ᰵH¢i)E"ci$ ,0 匀dIN q>nWBFjtrJݮs#0 &K|Fke]*RHuObx)"7:k8~g ~k&`C ۜQHu o&e<2#z`z%τ`|@ )->]~E}oUMQ3] 9XURw~T7D|K^ۨ͘3D߅,vUF`Z6EA˞Fӏ>7 w#\ i:\qP!"L90&;MԥD}/TCiax_dF@./5J0}Bo0tIV.aQU9ACp}w::mӏ+1z_@bAfF /7ooR$U-j] 4qorb=jEك`T<ׂnPr_̉ɩ-Qҵ ^}%L|ҟ,U+1^"Y#"W!aKfǔtܚټ?_NɜaF`{)$2S ДG![FQweX~#Et| "EuK V θU+1>"̴i >7IEeϗ_Y `F `䧛2_4!4t+<isIG9&gyDLP]]`z,F^ RBO!$)_5 Pn`_Jݟ8Nr]UL<KU.c|E 95sgr`LxnQwqQtF2K  >c͓*tU_1A x$3nL=#%Vmi ""!;!ofsѼLlgA,K{zXi#<ȅE`m/G\4/~!ʗR&v/~5/xYK=A?*y_h? !{&=E8^c`m޽@~ ӞvʬaY Qk1׬aizO)&0~YY '.aYZbd;0 X1ã61\`2ر=!!H`t}z('{^xxbv(_aױ~#*SD|~ezw:96XNz^agV}~չ9E5J 1L(0)Dn:m:hC3_Đu֩}!0򝹻PV0͇@Nzʻ$Lį_FV$μ9ܖ`D ȒNr \bQ bkBreR8E㸑2!m[{2oQMp.!)Rȟɶ{҆6PP2dh.2ɒ6_m!gk'g^TZri<0C?:}33& )ࣆ5sB QzB; !(5JcŞ&E Nrtw-|%|sI֌#4E\A`Щd)%+T:U,01;2SS` jӾ-@IDAT4GReo$K(wj ,q1μuÌYki!iulGYן5)v]iVKNCeÃGMH`{G^ ;nc;*Zws=F NF 18 UK?$3# <1T6||1]_@Пz %.vȁ"^]*N4lcF9rh6A(IAG(a9mR|(a(Xb+($CWV?[T[tM&An<ʋUB$Bs=i// e SY#V2n䡎b[zJRjflNFJzո`C 5K>Dd4| ayډj&"sȎH ?A/A+p8Go"q+׻ִ9y36nWYJ`TIgNM[wDFsh|$J(V֢f*ϓxmp", e :]i:6,ɜ=##ԏ@%([y:h9aζs7(1A^RJEsu5 N(]lI}ʿ^ClG?yg7j<*6"_uhuh拓f<' MS[sP cd:$M;V40yۭEi`C0"Jb i aar.Wa ކ^uag,n0@c!P7FYi\6nqc[\Q B^>dPPfK큫vUצ),/Ե3iqHhVW"3;! #,橮1oD$2_Gݵ>guPHw ;@tLDFD@x8F\S|؏/ d=7ef ~EGiX Nkyr%Гv{$XB'b]w*|Ǻ[ud$^ z0O-ꞯ#@Wp.-";UI]Ρԥh/ОHd;%ˆO?Q"طIe,Mۘ(&nCK\p#^=jUDEǭ?rMGg$bxJ0` bf1aXaߜ+'d6@% MO v1*bpa7\<O8ЭlMdhЃ=s+c6a*?S)]Q``>n8+HRg܏3͐ y)Bځrh`͗ afZWOIn08D$-4uDռbDL;ad<󳡗a6E=Gp;l~r\1ǗF GfxHqUzSOD!&  ǴwG" 4%{O_2 b ?3"Q甞Y"Ub8w扅 Cϗ|Di)jp%XJ1<8%GD J$fgkyF#='3$W\0l:yvD_p[F`@%Y" N#.>Iބ`_(:<bG~*"޾|BaB8|>XwZ}!QB;.H!JAAigU5œXTޖo~94?+4MӠ+Ͼ$IW"vMJ<#C9v9;B8 "];b\|zAH'&҂DϿ̋­eVJw】ŷ44+uYK[DvQ037oH4@>$e.I|íF V]ok5"i QZb쐐` D$L1F;!rF_K.'t ̉QlpxyM4)|%; !Ҥ<<+yV]}|d[4ӹ\[C LF`ZEmܚ`MFo0Q"( ЏDI2(++ X`B;-:[7 L-Z̒;G$G%G<"t%f_|0tXˈT01q4,蛔>JkqYψ~҂D]Fr-{{wrjyy^Ɵ1Eu|WoO|`Oh$bPEZ%ڇXjr$KD, ar8Z) C @Qw*˪}h% @QS y&J5A k TUE)Z:Q̙]]F];~v0hw۹iRz9晇\2F`@!KDU&x.Vyy`$KVEdC?'V%=:K"bNauߩ#>JѹuR4QqJFr4JN;;A bHz((j,9)igeI^&jE”ybsQ7c6@ KtŬ#sh풩¨hfTd <^LQ}ZK/M"-cY#G'>;dt`%vW A4h ЧI0sGi/"joKu ܞhd.2Fk$p| 4 V#b("#~z=;lZ YRG09 ORV @JII5sj[ɒ$DGDmJ"@"T2.U':r-ao;(5L7gXQNjORT{0 E-5JGi ‰*g/I30 #=')v |1 p]V`?KJw]jRfg[-xF>& F܀'F Z8eȩ p>@YhfaXLVn/-R}/↡ED$(ᗱp6R%\K;ȸ09T)\AeSSX+(CځԮ+/S5w1.6opa'u\0E`9u@2O{w=쌴 Cf=h6~3$ n}.DIuUTm C.6t@һ3{鏰#&ҹ>p|qOz]SI:r8r9s(G( 1ܟD$i<&JESLZ# @rO颯og|[½fl^&ͳrV@C2Q B4|U1 ::l3d>#@W9bE^t>Vǂ.k :DW|!F *BU{*m@X #xE]H?m2L:n Ժֈisg#Q)5bo,`AQC`ˆTHw̙S,a T6\0- EdI<(MJ;Br$J P>%OCL!<&Jh*(iܘ`|FlTK5L9> ̝u=O@[ t +]:xҜ2@"pK`Hθdj]/k%4A9JPP܄F8lNn=ny6r1`hAa~rTSa:+H0=pӶ\`5LMDa=ȗV~=|UzqA=WFY ?oA&#Zȗ&Ϟ'WEp#T17=\2Fhla~aBJtʒS2FU7hBӣ]':/A!G- d '#4H^o1 ؠ'^J-6JE݄vqݕ # 2"<|܄E/M6MزWȫنF Xu.p.iΡ&μl,ǶE`1oC;xnf(of y~ bFCԷqߣS/wQlWwx_j+|X.s#(:/~]`o9Zd*1}nM N@KB wVLVdA|>yvv8 ] ^~jHvx*y <@(-gC gN(^]|zɁ)pE%>@D=F֯Dye}h]%ID-;.-T'#[,6o`w>sCs 9yv``aWoCCFi+݅2/rFhfZӉcOC $T60v AF?D FhBzvjAF|"ǜ,1}}Jx ]1/b˯ުXoEml((jCg/F앢PtA",̹Xxږ3@ 9eAGcpU$);)%c2vMP%Z̟#Bxqa)^`KDLyasT *h,Y*,6aEvy ΥwWibd˓Rq[n0?2 H`Lz=Vܑ,>N&DtO0E :"X(r5j6y._6ڃ_1˃}<m3@!a#h/XPS à UZoFI2tn:147)2nGBekEHWfGŔIC3ڗ#azMbW^yQGћ7ק?p4x6w:F^v2[G˫˭z<xj>@ig4[ 2ܱ #?G,u%plw_=ɼShf<OVfI>n:6,YtC81@!v~Py25<'; 12%1:y:%o+7d3u KHwXDIy, /|?\8'#-&~SMï)LqO?"cI1=UgEA <2@ @s) ߎ*J_O_11mki?7̓2.}/)"f ?!2fVFy(H I":9m޵3|iMQ?"4X7).-_'/~VLj6j`5Zh?57T}&Q|?Baq/WH'+6*$!DA?ZQE$=g/(M^ƒe򋪻FQ)y|j@#~K㾩E6<aIA,y81a8|fgJ DN%=*÷`M~Q/o"7rLJ͜mIO("6c;A=O2ZYUO- 6)ҁgK 8rA|8OZu`P`fWrV,Wol#rWWmt~&/"|TmWX j܋vDd{C!:7P3/ <`9k ^iռUns|zD҆p)ٜq1n>y+whԿ惬OF$Hhs\wH@Yi0ʿI8"JRD&$J'<rFx~7 j\h,I7>H"mYIsR}hȏAw+ 0} %~iälbXq5|pO4v~uHXQ wZz$v'.4m[a&2 zPq$/UѠ:9a0,党:F8HDDHMdG$sh8^(P.FE>Dɠd0ZE2jh;j=!|n1UJo~􆈰tՈ3N̓-hBϋJNyEgS?wEhf'j]dRt4>%tKolj-vēS2f 1OA]DqRgyC ^PoE0m9)+em_!a~Lh10lMxo Kw)C{a ?ALĢp͚m9i'Q OA s=( A?ڡ _VM%5iPzVi>ʡӄzy^_vW?v.EHyM ID=/|q5ʹOdI; MOjs`trub3)wHvŐpl P* 0^0X#"^MQV2МLo1ٙy+'%vW4[UJI*sPRZ6jXo'Y2 {ݐգE!2@e,A'Ju=p9"-jL`@5Q֏ʃed>jj(-r\mJft1A_s\";q4E4 5~s1*5%_:F)ڗkuA=DΩqoq4չRS aE`SyB M(W7KQsF?_4`De=S22Fg) 7q93x-op l\1ڶ:XEU94g?~e$I~:*AoiSDf`cDQN/24T0R%@ JЩkNv}~V|^v. UjU_MQWM1d` )Y/l8D/%|{H'L ~փ Yk}@ז,Yu=_d&7NJܮF'p:2yEj1ml[PѧwG[VWx祃ϡ2P JYv A>4dpG&"J!ǤMe vݾf矢;c;QKT%{X#򠶏|Ty,5#5ςS( U7 7}~=ui!widžz|`5#i` N(u?~6?4귿\G{DiSMS=Zc>jMd)|to*uآ`:lBnPw&VV=uê /C%mufc>*fxM4+eS$J=!J_F@Nr%^z /:d0Cu l?rH#uP_w6 >+U`,"i+DD%ȩsƃhL5†ADf~w}B]媋rɔo9}M_<_O5i IQ0 5vA{1bÕ.cRS#1娼pA2DQ֋ЪEtTK#xLAh,^ lU|iHmڌ=u"ըoٹE N8!l?P&\=p/J:Z^>+٥T!AL=!VɒѪe׍?"r?7wq쳄6"nz+e4<O&6jd@t1Þ`L)2@4kܾϱGn pCgnwyҎC+'PY ;`/8 荍FuD .`hL6@NFHă'D4,7I7_g`˞#1nTKh(&O5%b[ݫʈ@|b#WRmhwPS*⤖iCeY})*q~f%9[MڼFg6mDVeM V lAWmMǢu?_sOb 0MmM6^w==U֊\ʰ_/cY`",w Զ oW-F-eEo]S~ZS/m]u䉵BNSd5MQu%k7j6Ȑ67EjH3ޛ(oq54z_ݐi E^a1|f&! J՗|?o+i( pps=y|h {=e<F핍A-O].+7AP!#Q ڔCsf-~=ϛ e5yF7>wgZUn}ٙlG)z/wck.7jQ8Fϣy”E|h scI` !I "tl}Bp@궕c 䒈Zw7χY4nGm,ߗ'UTm#C TSN&6(K1 kɭqaƾ0@3 xN7Sp[/0LJa<-KKÓD oyGͺE/ARI77c Շ_cZ!3vaEsho$$HR.6aD<f 9#Вx՜VIWIó% 0~Gɒ!e@`"`e]hܱryYup? D`*jP y:f,LV5X+0#DdgD;Ds7@"d)`o ŻiNc#䊲ڡѝ$J ?cqC&@94_gAæqj76WF,]10{D{9,E3G(Uk㯾%]>ςX.2 >}suUrF`f:f&y+{,?#4~ 0՜Ce Ƃ G_D1'g+#b@R:gijh r2Alǣ(qO/[(m玎Ky8, ށ^_ˁ‰3@` `‡-L` RRJej4nUppur$/cZ~!3cF~]b' (^.6L,QuiOd(VCn0@"/Yd-{˧w`ռf "isn~M5`Cb){P>gpO܂ M7@5crjˆâgM,Ƀhg7Eq"JލV:]ޭ 뫉1NF)c^pz|`@E`ҤI6䤴sh<#Pb"Yzȼ(ROE# vv1' u#q KcZ9'}`ݶp̅($Ms MyoΜ/hv"a2jt Q:{2jh hP!}o 'Rce2#/rSg&dE3hFo,a}(}ڤ~(#n5"Ãtj<,Fu!1678K؄v`&Fq%@d"Q%1P@4[Jǡ|zp;I0@@ ;39tE8߷D?&Rb< F!V4isAz"J0󞛄nR{Xx%zS.IAɂl-aJDlg^ވD)_Q(!Im¿(s#a! a_]$Fh$Kɩ z[eۧ$o0ޛsF .N@Nd~h+d51f& y%K]U(g UFh dI]kQ~a<F"P/Ye+>Ǘ=;m+7#\~IHl9['1P$J"J]]0cc ,`@CY? 7a P/Y[_@>1a0WNm;Ka^O׍hvp `EN%k]?)Ҽ\0@KF@d#F$KI q7ooւ}at943۹>dAhA?a$9 u0#0@dIϒez ]:Py#b2^ Su`:MXN.Ig%@6(+0#0#pIe]{5tıZؔx@E| '`9' (uB$&J ('37fF`F%Y^Ȱ>`j"0jX0vlޙ Ơ3H6 ྋ- NPP*0#0#:pIPt+MoXnc< F !@X.MAgRk$D'CݽmHy!7`F`F @ELM{*%y`6`C˶V` :Q~\8 "<5@XW!0#0# E0.pwgp׵)֏@X%dE1sgƢ)+EYK9 O`Ļ59#0#0Zl. )ptxp9N!A16$\qb@QQp\.VdP >DiS-2X?/gZ׸`F`V@-]g #B|ť cq()+w/"t 5S?vhɷ@ ]^aG{K獙Ypc~;_ݘ9e<{B^a E)h2g4i KPyZ(."Ij}N*oN&@p2@` +s܂ rY\ͅ'+{&nPf fk~Wo owt~z~صu_OBQ+KIV)J =鸵fDe} ϛE59#0#0mm U-u:ڽ ET7Gziesuޅn6FwNk|h卝C$Jhg02(5`#0#4;Yex5pӥ;xቻ'Io:e/א e[(g|iF+Fiߏ3RCRtm5.pS&  P.2#0#h\,7nN8~悳fbxxE$]Gi(8{?zuR?(%Sc(+$R~]sy 9;H#(DX(*^(@Y84 akn{&)d$b_vS!$:B$_3m{ 앫Pe:gr P~hp`Z20A_][ww<܇ vH+ٰ\2Mr a Χs! 7I#!Fr M4$"!}$~^PGὊ=5y}Jʜœo:W6Q~^F)ˌ~@~9c`g(m3#0#hvf˾jn0Q]1\4ӒRg?C'u1f@IDAT(q!]t$2:V?vulcB0㞛|us ,_kKBߏFRU7g+7*"am`2& \q/TcG+5F w^_UU^g \p}j脂Pqύ->Nɗ[e`@ppS?Ɋ0oh\zB0]uNc8"D0{& QRDI[_\S1vtF&"*QRˆ$KTF1ɒ_nK&**0#0W)MΠFM}wԕ.ǨatJj<@Jqof]r\.ۡU"b+a`X(-[^(QE򿚇~Xsd(h$P*5`,d/Vk.uu#?lѱlqԐQ}+|zku>>w]-ے,؎K\A*$^<Iއ @qzq{%KdsGwwj3;Ν]I7s*xQ^Q$zj0=ʆ~uMm(rW ٢kxm34g~SkN3_LaL 0&E ɞ@LK7b\.%+TcW?G(-·ų=OUG) ڿہcnзw0.X} HAOρUV;R /30߰)B?2rCbQH଩yB<9V оޜ/q9-u;}̭ $p>B</* یaa&H!IΙ9Z#Qr뚅"y]Ζ|tHJ($YF!#ЊǶ $;I3&+% \7??y m [6w|W m("eS`c@cwx+ Z"#A{r`>}w/'Xzs~q+w_%7g |(l;xZ/ !iq?BSP佳Uɉ@@l^9V{ C(9络 IɣtMJ^ +(S`L #mohnQWvR*_32d̞+,'VdwJTš£}]tN/_4Fφd@`bij?8)(J=$=47uⵠcbO(g+>;~>IM]ߟHBMm³%Pj#BHQx%܋ь7RGMʼl n |$m ?)>+Q̾IF0)!EŹFy_D q )$L1(4JF_4'3E~<h|9RJqiV#TsDqζ-IvþGr2˜`L`HR$QB*0مQ>t :pת,%u{#FhuOnO2$e .]@SW&)aK)}EkQ3Q[ -˦Wכ.oGk4F/$I_IuEyBTgBk`L Ir2t5GB^HUIM| XwOѓae͗bF4!H4)*EAvP2 ͢N4ehz6YUqx`d9uBԝ}`n,/A44jlo(6nl>MbBva&oPdVǤ6j涾ϕ1*B-d9}$ϥ m1J:AϪۘ`LOl,~¹<&`p;Z:bRCJyCn+yw(i[av'wAk6D\;1kttK{aKvGtJO©zvy#Ƹ'_&<* ҧDSѼE4Aۍ~ܻpt6׼ Л94/@6zv ;;pL,*z;9z3}EPQ'7Fs+.3'0{z%:V-}O͗3.zoBcj[ J-&=QB sC+s{Y E3/ ❼oiI z>=GmԞ4וzK 4 B2B^/n=hLhrcPD&vo`L D)8-_0 %Iܘz=bT~QdLnǫA[>ϼD[p\ e,Swh)8u͙/:yfQnOkM:H)+- 8_N2K7HKQ&_q:nS^;ڦCuux;[_P9ċ& M #!t'1jd7p^35\@$Ȋ&eJJ-.7eԓS=G+J%ZɎ4gny,ns[/%AoQ?(w1&HR(QK8W1|aI=B)"[JMa`vL ԶԇU g:4i eL0ͻBWSG@Qb,_jp πQՠtF^'+j#>O,1#Uw)ڦGr#'ODTP_\#R Y̞*B)CR2 ?*zlɽh y.ț@8 44JSMsm D)H"!Ie$ffL/7*^ۉ|mʘM44JxCPtQi%O :0z`:iut SnV.?жU\qڇ?9{^y~! Z:gzwm1oz\(e`F2Or ?sӈ=q2^6&`u#MSql=@L.(6n`èR[4Q= O 0'Q|4EG&041<6@⣮54+LHכBux3`q/~G(p>CVQ 5kAv<[fd )>! LDcׅctZJ`U$@Ϛ*!$HPH=Gt 1Z2jw>Ps p_) V_sQ{ ;[-Qo2 z@`Fm4v p2 0&D$@gHSg#U;pى`">4(w,c:qf%z($aQx4CX ee,׏5^6kڤQ݆P"yh| -KEi(`׆b?LZf`>y!*g8n{6`L 0X#@BI,90 =&h5.5FP٠wX‰m;쨣H&6:A$? ׍ 0&`L d<K8ƷǙZpCޟ6d%2.B"2BX,2&`L`bYrJ4Y'uN쇏KV`걻pF+bz17n)"b;ȏ`L 0%@ȫ]8>f]HHj}nRso|Q+}h >3o }NȘ s'ϱAfh@SK'HL]%K_=d9Hh&`L Wc09Q5y[%S|X&CO[)yJ"'Mgt } +(7,Z @sGx7?[&lnq=`L 0q! eg<鵊i\nPhs廾=ɹ+|۴2IZ \>`fњ۟9` ֆV74.{ ::՛؎CDֆ(wgR(i|&`L L ruM KIt4샄yjMAgLyb̛b@"zԊCTFwIh`NS4ȋ5ʧľݿ@gL?/}hCFၓ}RACw U{ .g$d͇~nW_0oaٹkD9)A)q;(Tt6C% #ALK,%,aL 0&")x$a4,1bᅡ\PSh>v} 3' N[ &AR"嚌Y3KZK-Yx$y_!>c8nkZ)dϸHsB$,FaMJW?BH+T]!D!σ"X!Bnb)lL 0&NĒ/OyqLAvn8juC! eR'z Ϙ#ִYbv@ hSWIgKE^Tl>Zqp:x*:HP;Z Y|;z4ÕǃC=Kȥ0&`L &Db%(SӪ/5 ,{gHN19`VIF$TL{4 .MK_[f1(rp4B|Z]Xzh_CO`K ^O!4P.D=KeL 0&D' ,)m0NL㩥j;>'p[&t t8sO{%Pofʤxv:ի; pPʟ ,S#x#pyO@Jexz-mq SRV9?'Ѱ_쟼 gƽbKk` C\j) i=}kZ̹=/\\>sgkS`u| pL 0&`A&@#cCt3߃%i\ZYlgNE%P$D߄$biB6+?`L 0&@N7$d.q=e!-nG;1T-qFOO6yre\e~?xĎFpNꌃ^xvsX5rؠpFyI`L 0&0~\V0ZaҜ{~=yb턮csȻTcT Zj4@ x []?Fr%P7^BR[UsOC - L[Ԧ?q9K ?L[ ~=bSikrg~ ,S+(_rs4!ȜzlJ.LVMM9oQⓙ`L 0&Ɠ@Xo~;<8:kݮGxeW> _tΟ|1^  c✚ϗ;)lϔ0I5yhT҇48_>k9Ul^3X0g9J< dVr=/EFfLpf Yh,WrRMBD)%ax%3&`L f s]誅qN<܆Vo) ;vE8()r+cys>'ʠ^v?+BikP:`sW[0g^x*Kŧ2&`L ? ҧ\ tITfGqNSY&LRx4% K#wmR yFG/1>{+:Ĭ℺ϊ甐lURQ\x pŶcnPjE;S[KSo}q=7o">'?`L 0&@ I6.,ARDg1IYOIًKcJW.owIN~!Qњ۞M+9zOйK= m% /m S 9` KeTw9M}jȷ{{ x4G|2.~Fzpn&212۔}^lV!vYҖ^!?Gs@oJ<̺n*!|qf1!^o8=@oˢ1L 0&BJhTvoBPKi)vRΒ>6(,H沷<72y=.L]z*҈UN0~'7?S<7݉&WD3xmWqߧxsmfGmY;Ǚ?Zm翖9>u?rTeFQ+H PmhU>z< V,\Þ%·`=LuI}B,ol-{&{`L`βPZY2 5ΙK@2:qv!#1|lTExi+ Y 3;~c p`kyje8hKubIz</"GhҠY[ŵ0<܊*g ./83a_~oj?<ܴR_3`L PYzt:[KԴⵍRó{+~/,Q?i$*vhBPI px-gĤdjc P+*D$Zzp3oCku5pCwM{(%`sJ1T6cϼq$Eac&7 0& ;Lԉtz"NoN'feD瞶r/| bpL XR;S7.)㗂u. 0&@$PX!`L D<[GY(ڂ PݾHbSz\󳐢ly` 5D Rpc;xtOBVQ%q,zOD%WBNU7P]'T;qӶ8+8tpkӁ17K_ .'0-92;@s}BtT5LR70lL 0&&*;G6,nJzq.t4$'ctMԃAc\^H]tۛmqfz!+j$X"כ /J*"Œ*9Mm@NnTDlh[FWP9p.9\16@nCaCiTtH\4@ȑ3`uu:Lz0cx K#!4&lw@[ voqΟ@")Kл!و%"PIHd%d#!,0mܪb~( q+km-yp,f0)a7.vyZRxVTu==V t碕0 '7k4Wrw'4$$7r3Sz#Gl1&ySжQJzqI衠y6MDaP$ C7~y5F_.>ߔՎ6X$%%LAc(OM.q=9E,y\mSI&MFc6O/fjHaE0g:jG.E!I5f;[G{8B=ۀ2E%t х^QL!nR~w:Db[q`L }"z3H(n!$K&"E,LQw(ṁEU@ہb[L%!/ ]d&&{sBFFu^} 7@rj1,( xi=西^ aD&HBD;Ug$L 0&0 kH,)tɰ~ogX"%7F FLxXR,I %H'= !$4?f14X"E?JQ7ND%w p(ȴ8%ZlB(8p̢hzDsZ~|7^4 '~V{de/{2_LD$X`L 0q FCi.&!cȋƧqYp<K(%p5/yz}TqJPxd^.!TĒ?v5΃ 3Ef 5^"Wϼ8G1`9ܳmXH3JZ!/Hk`L $lӢ&.wBhѺP _M@CFO@RR !z0Ѷx'<18D?).D z~+HT`ټhzT3b/y/oBxXzh]JWA`L 0N;~iq>ꈋ1346 üF& O%%):Up ^B"$Ox|>Rĉ~O%4Vɉ3SOs =.;\?ëp<+qR8LA1F*Oؘ`L 0?qQof ,хWGX9+j`g Ef@Qn:k9yt(n`L 0&Ƒw.=&5OM@ŪLJx1}pѢ9U'j 0&`L M Ē2U/R'$A3R!SčjR2FE닀HfucL 0&xX4p Yj5rY3ANFY"500`L 0&b 4}"JI* E@v"P%FX}> 0&`L DvRSW%XA߿ '}kaj2>JFٟ_Rl?x*W6cg_|>G~4ȏb=cJ.|+;\~tn&[08T2´>LJd8^qxV. 0&`L X,7W"D_6To8ܴ..w\E"&N GQ 5g r!L 0&`QK{tK'd5ևUL("fO>3ʩ@_p¯B#g;x 0&`L DKX(.l]w>ˍn;L/A'Y&Z}tyҩz idۼP؟}j,wy|y 0&`L D0K8j 0Mm{WceLA&u%;5gtmJJVپcݥs.nWƱ3}>&`L 0& X,EA+/-DO2.{(`l/,&#̜:i'G2ID}s;%IoԼ:ѹ:i30RP`MMom`L 0&# Wa0ּd88,ZGޛgΉzEO HE%inq>uQS&e=Eyv7@lS‡. ہ<,ʒh6$McM.Y 0&POQ_7C[g70j=nq`zqfC~K3Y,EI/:)( ]9ޡ38VW,Y4SM.%u*0tyރB|sUc|;y 0&)$<@8k)azX< |Hq 0U+ob)_J om_2$u quXbSr RzSb%GA|wCK;U<l/#L 0&&@fXq);Y㵀kLj v=`Bpҫq1qˋvJz\ABMR&DP\M7^6_U-HTt' ;x*1ͭ|©0' t #S|c0>ܚE3aRL""r`L 0q! =Ij{;awor(cƝZw:p ^Il `6 `c`9\|_*~i,ON%u qږp9CH,Ӓ>a h {Ŷ/H1N2JMVFs+Tb 3 grWϽ Uu;Siӧo}x 0??Xe/uk/`"O_tboya4jxc]m=iavy,rg jtf.C 2$w;#B?E(aQ ŏF낉}/&bIM]c4!?+(;tzҒ 1~x__G( n_pcdy&*&g{Xωo)p3=e>=bGvDMȋR;?>x_5/=wwDcpEZ(oj?B)̨Zw2l i-%؊Teb)ʾr%ۍ1D VO>*$hnӘBl$v'w+N?Œ(WfecFsd㼎tvKs3?Q Wz]N|y n'}'|U;OP鄝ˡú(f@Mͮu`28^&%DޥXL opo]̘ܯ F/ ȋVlL '#?-E_K/͚ ?͚o]KfO0 '`2a>8M+GRd@IDATUCIv;HEm~. % \p»ۏt,񋡾Ic"(HEc>@̉$SF 4>€ۼEjy %?\nJ%sO]y?glL Rp >u&14ovsv`hPփ5 (bɋj:rZy 5[ٌs0uv|L_tO|r)2WO^8x;;@,x{?8_BO)?$oDd<{aN:|ѸTJG~|TdԐk^% xL,6>>ۖ2jq̙: .QVD^M3&>dz$ʊ&WRD W$X{Rx!yKw^% vNke|_ ;άi3~v4a~pX&Q9Q92+N K^$Nwe1*]ްtCt~eRj&( /Y瑘jk{\ P SxC=KbL 0&7v tB5 7Ƨw* =N4 Su ExE9EC\GTYxT {EGvq ]&`L 0 G@&xǃَoKiws/u QZ::(<:4{ŜI ̎=vؽ4tA_Ae=uP L-N-:>hķ~ӍN g[Y,>ߛ 0&`L :Q2' C3,ut wV4!.-lA𢣝L 0&`L 1&`L 0&X,EG;q-`L 0&1K!ηcL 0&`L :XvZ2&`L 0&b,B o`L 0&@t`ĵdL 0&`L X,8ߎ 0&`L 0 b):ډk`L 0&@ X 1p`L 0&@hh4]AOVPcl5㫆"0H>`L 0&G >N ׬@Yt: y[vڡX4\ySuv7Z<=zN/K'PVlKU0o ڻ<Գ߷_L 0&`L`4 ˌSNi`,#wG<ыD6i:=^/?W3 ')^)ZH[PT UgκL=fĵFڅC{\&`L 0`HJy^| +]`j~J:ٙ:8[カVʼnsuou]x`3T)BGtCCV q(¦\RD*b}!a'b)M`L 0&N1=QX4pp (ngt1DXh ӧ(]FK$H m]pw74<-6> N?P?wP?op2&`L H2 okq̒٨F\yvp[t;zEn֔ W`hܙFHJP V=#z"Zf!x{PY2V`|U@RX` `L 0 Pw-FKՌLd*}#k;ㄈYtw\IΑDiMd[`Jp286`n<M-><D#zւq0nQL3DR%o #X s`L 0&mdg/3 Zh'?iB(&=q0.nZPzSn3NT@J-??)m G`5$x/A$ޱXK=Ə݃KfL 0&p5I)py0P77KJ}VCO\>+ ?߼Z2)w-oMW><5/nR,`ꢬxnnI;/^\@x5v#9FH7m7`;1{.^xDUJb"x`!%vֹCǘ!*MdU)֌)0k$Scom9;+/KhByXCGJblב3spe}y#|P6a(qX铟+XW#H(X'JzLBɌ뜔8KP|NhGaދB : eA &lSWX%}KXzqlkDQqfqD-޸tڜ£W4x׍gNwW._~W~;S7vOs`">r0kY#Ghh^Ix9 ;f/h׮ 3v0:ݶz-(^xo֩w~ygވ|f6ǷOǧ‹$ݑXo8NvC͛=P"`+ t̖fBŠrcf8(\/))GDBj*ɌB},JGb)_) 硰*2ϯo]lLA]Laq؄!xd^CG*5 }:Zv[>wS;n_ހ5`6 Ԥx9@Pl^1l=pNU:HknA1Eآ ~y 9^SfxPrMާqw?zǕ)@8L/Ib:4 %I~qLv0$w0<|7zE{ OxE3cKInB+ƙG0Bz=Kbz8 a R[ƢXVim-ѧ[c&}񃝰nT1ė0&*D ?ZqmOe g >7sj%.R gOðhTx `o v0+u)DsQ4iWB,`cP0)a{8d őmSv D^" WpVzdCJ KŲXik67,,1FLexf)·*D]5-dj V FCq@  ϮbR?S&e`88{^^ HDI;+RN-S +(<77;^Ot])d]D!z& Yrz4w)tO1yv^$'W<HTGuc4t΍pG%<$BMjRE,fkFs:{ C`v:`c4L D,Dv-f#Cg_ z0lxK襗JNO ~%s̎wh1lAiy00RִWBt{C,3z0 4vIɜ'Cg)$@%Y2%;.QIқ%,$q^OIki!t2k .80w (4 /R 2)`I`͢7b(mGWϠqNǍ2)Rme8hRZ_"ڟ^6=-uߩ`҉pRZN!Cs7M.;ZOJ(Q$$<JJ=֚C.'D_d$Gmxt›D"eSX 7A+4&$B &3t`Cg+.ǣ[ފv98,{b[;Q /_(8I%ʎwwW-EIwY697]nӔ1Sm_~1:o?8\.0u޶J}znv03P ?8WSgw)m9M6ٞcpd5̞;'V7<ڮ"-E0=`nPG]鐓 N:έIRzG g)uȳD D_8)I~5X, ̅2&@P*5a"3^{JX3686o2E5J|ESL-rZs>=Vr`%azo{ˎ"H4)%J[ H!DzHဳXgL 0 "@W\T p`V\ ?m֖ kF"hb~<Ҟ}c/ Ӄ_6? V3z^h .E #az\a,CF)-Ja{mO $x;Y ea%p˪PҎ!:fHǰ,̔g1of~;:{{_g O;33BOR`L`$j 2i| @0_Zg5gp [L @lO4\`L 0&+pG=x\|nx Xyv~N&kسk-`L #'_z]#~ԥL D KR\O&`L 4p6[=TB_ #`a"b)LL 0&@d0;_*ZrK`L 0&kyQV5N ! zWXU@w+%@D^蟮+4RRI}̽{7l6w'sg晙g;wo9yb6gw4O뺣vǵSI]*!' Ї pvqkf׌@5L( @ f7A `,u1P @ ̮Z@c7%@mVQ]c~%8C L k `,u11M?i >{+)J55neCM[_u|N@_&[̲5- {dO#,f$ev]nó?l# Vz,z쭡au0!{{bz*~c.+-DW5Сs9}@EUKbZYmo-\)Nyi/K͗⫧[~%5a0q5lrykNZOhd/nDLŅVRg%E6bp دL]<^YeuOТ֜!%5xScڧYe-k˞PS,V5끣ECOnovLݵQWWo[VE.#)d;&@ݎ#w+l~*^Cھ{Icw{05m*oY^X V}gݳ=aŊqbkZLޢ|%|O6733u/o*wY}iEenY#ZӚu -7~@TO&+RpVl5>K=lD ~c0ߪk쿳ߵ{'~^7s}z?*oj|qM70?ώ9h{BK2[vٸȡe:Yly[\oWT[YiM=&G\vV=,EJfu^~nnPO^nmRa㒛eS'2zͅ:v5}60gA=uʸ,o xz!6~~G%_:5㶾AO-1%ƀ0IoOOzi_{|+C6W'ozm\g[ r-3z`vQY *SZ-]m?a}GY~0ZXjcl^˙&'Ee^}U\s͔f%Zݑ5|\Mr-{}ÂXQRo5{=D.CM5aQqJľ~? )N޶V}CU8m5GwWasC"j(E{YzX/[5Mz7؟ a'}؃=>^?gO-v'X^n-a;bovH.l!6{r)*.?znƒfixUPG7Kӏk>>(ɏ%?_ إ6[mL}G޿=JphOKQZu"0zϪIO:wu2,2kMzDXA6zC +WYb+Mgg`eݱ̑͆e [f< ?zJ *:Ӷ Z5/kMV?-0k1M4в'ssT7桀~X_<7ʍW߰b*-; 0^r;J/;/(Z7j k٘0|{p^y7nyp3QyOv>oI'(92bpO]܎dhG*U6C=(#:uWw7}5Ʀ&Fu W˶Yc>>pJ#6DYVކ?!{cH&S&-֗ۂֈLKo[#u-kn{ľmQK-{[K睔fT.1VwwZQk|pekhmZcOeu  u^h,ϙg ޵Vǃq6S^n&c%k}4Kg?c{Xv]ŷ~ k7liQ[lwÍ7B),ȳ=]!bmf{nvGGN2cl#"gBxj{^sn,RjD{G=;:4l`뾸7fn6'N'~S0OcMhy򜽹`=J⥑y%_QB6w剗0&fG+=V TyoѶaQC {=N^єe@o=28kcn"ȈԨv`(ڒ!%b\Qco`WP!z7(40<mAqGY{H"Oj,5'm% A}޲F=ɣ/<?4^-d W69WZ2'ߵ|]c8"vS& Sغ^5Ve[ayGd_Wo6|NoK4|/h<%;m'{,OH5p5M\ hN{ibtozͩu7?fʊ\ޏtEi2l)NM[m,[(u !|J#}HQV|Rۓ;U77^!B?P{"9s'm,@wߋޒ6Hs1QG2T gȾ&K>/et;'zZꎻi}B݋ +Nleb1VZ &n{ }B[t ){רv}Vq@LSϠ΋ܬvӂR["Su XJE/Ej2\o@+z򈨸mSyEsζ?<]I'U~c}9W|Yw14:{}b>_`(bM}.x}{]+Y0(Wt6n_F^{}LRͽ[oj;jo#xt@߽OQ(yw K2 e8lڞ& '/b}rڨ,Z^HTݻBj-τ-%]{?ڞxźAE?j=3kUU' |Fo&YR;B9㄃՝^楤 _/1#>Iۜ|67RK|K ˜4=sN` S{W?z}c6/jn 'NS޻;[NS{$ĉ}&%_9UtǶ')G|R6}G9gߧ(R9^xZ ؆lX{ Vv=s[SaKe`{ȥ}I,M_!#O; &mIcd땾w7߆6ƈevY]ZnB.mEZf&-kg+(?r^ږt>v!S5YmM𠕝I'Essmx{4[Vhup>j9Ld5n i/} ޷Vu07:= D:?'SYBpcɵ)YL&_<T-z=R]+Սi{[._2P5w^~y{O;caq~bFso/bʕm% .o]67-G?51jȳQƒōyMAJ]Xjﷅ `n0 UJqΊrM7lgy_Ia;:vG=aѱ]ߍU7uu=@³lxzv&WƖ{(e( 4䚤!K-4{n٠Upçeޱe6[UX(C-ggO řoε\{(K ,t9Rr  @@_#Kf-v|֋~aҞ{؇F꿇w&[ J]a `|ژ6J0g} +m +ޘ7z{no?cA@!\ve^X)0< n:vdi2o} .?6Ɏ>$X1Fd0wkB9@Vu7ݣuk1}UY~PcCQI+u|6*4.1!=۫Ď6;rCWgcNR[/or7Y>qGFuИa܇ c) @hhv e)R,Ti<@IDATEe~6&IrRر '}[g|x{[sEӕhg|IİdF sM(4Aaz[!rp#ǥB=;1 }K*ұڎK+Cq<_E4]&B %ׅvV%ݲx*V.<=쀠? ZӞO-+ե9mh˃k?U솝nGL.ҋSsV򬝮 @9- .~4qc#9mO:=MQciE6o'rH7'Rh]4т'Sxuc$ ktc7N~8}S :~w̷"H&z[iGkUJ%J-0,1`։Pt˄9ψL,X]M~'g5>f-%{e}({xV_5YבӻX ;]!l9??UR @=2S^`4&ls#?|y_ ] y.|p]s& -Ah{yBX[!zFZ4Mɹ֋g=,X᧷?eOj:m d:лU|e>\`$wvܹxE|[0n2}E\+OP2%ik:{|z"SGe^ye&xިmcx&#SG1qI,}HY6ΙqyY{%? c-?ƅijKƽ6Z]bzSKyzJV2=-aqB;UUָx5\*K6CcYU6AJcA DœoNZ4D_Z6 @HAeqs-vZRAF1>2zmx0 wN5_ypO)&/&;WU#='p.L^?k7zr'kT'9XKe=cg' iޮ}n4"V:7Mi%KLSכ vä苟|EQRUZüAqAӭ̒F;a|:(87Y? )zZ5YwU`?O|1Xe+Bi\<,cyGloҷ_Y;my73e}6y.of~Dcz+@ гUv vǢ*y{ܰꋳ\ȝ&cɓ>{o~66t?*IaԲR24{],/DYƓR< T/_a6`DB 烺D}O%+<[%oXG%+웓J-cuGXIqōV+95b|dZzDVҫ/+n)k]{#RpoK=Ah,!K9{Mh2w򶴚'6׻AOfXb=fL˭a|_kUZ5\?}PM=ַV~w +Љ۬@ |7;ԉoF❅*s޵-҄6Hm˔a2EYqClM(;B)]BJJX-Xj7[|Siʽ=>FqS>!' o ym圅[>:]ܠ'^F$~Yj?6}ik\0& d|xXZ[̑͆{l^SsM=^>׽m9-۶|"Y+2Lٖ2gKzd5 Z(5x|Ébg+o=/{)(/'.Pjڸy^|5,nP+ofRGZ@ ]<|Edj;jtteH zyb leCgo.FyQa^CZվu;t+4wGXcc {Mؕ7>6eiLO>7+6]V/#Iw5Z|qH3U4jУ5h5I]r&%^ci/=2a8G=.g^RXPlw*,/[FQ3ܷ/gҞߴ@ F7DD{yG C5Td_H=$%?#ڹ!-g Cf%VW!q 2NK*Ie}ZQR\̈́R5s[P%Paޚs>6[֐M;ԕpv+{1R.9$ 48jR.㏖4QMEK5WBiX(\m^6(04dž4.M [-#+:VO*EA׊{ԟU_ ^IC<+ :ϽR=]IS=zGYg' lyr'k Jyh^n2ԯA)2\4%xZ\.nEK|H߰[ 7Vr΁HNjHͯŞ{G(ID>ie_<=(U1qLg?eU}.1#3aeA:b@yRb0eb}t8c)4M/11vԟ*J<]*Yb'k殖qkX<]Uָtmh1_uJߩ׶ ʿyQ c-e/oĢ~UוC=XRvcNv@<+@]@eublk,v7v6C؅]xTz‡U嶶_>&No8tZ=gi_O=&Hշj|ecָlEbn$J㲕V~0XAY?SmOeQ#ϋVK=rX0g{RZi^&+|KnG=6r]K+/xKťulbv-mjj*Q_~tɶ,B]}! HcC< Ù᳠@ Lc{%"5{a^FJt 6:Ȋ ,_S&BYJY}#o(b@Ʉ?0@7 ]BU=aE=X.axVQ7؆' <qCv J_QۆP28J 4U:+}>up N* ^aҠw#CHI+DϪ+.~L?;/lbBZ OI< [VpJ Њ?ϋ:olIc U}Z=%0bg lP7YU¬">`{QaySW-֬^g HZ߽CFa#À-Xb+..&EW׍7jw_*+B=y>+9pJlE\mXlbw@MKxi&>XQ3}Uz7#'H~רlSqʗM&6LǕؚ6<;3ZR[meUUVRSeŵD%owP 2+V_ oeiNixS;7JJTKzgɓ:Rg9@nb5/X>7=m_?෩fg xUz-{r|'I@AzCUhj9 m-ܐ 7Ϙ-_?99y̌2{0%7PJh۾{23k3~z{ˋgkE2n+:y=٨?\?㜪.߻ ˾t!2VmuK\H\4r@a0S%sIL{QxZMcRXp=k =Q7*zwӕEhm/M,C9j`ryXψն\hP[_4|! ؍bsS][%Az7γlMaM`(w^Y1/HRP)}Bh.Rr$O $K)!'xAΞCxcO:^n{5Ų*e M[+m톭I)/<_3!@?ôϏ9a.nSezcLY̲kXYgc7&Jc{䖼JTyWK" hhn0s04;}˽+/x3;Iz,4Y,W>3iHʧoik*lҞhZ]% 6iUy}paybċK /9nie^e,9Vl#%ݨؼ\S4>~7㫗>҄Zцh&757hG l9Jr& pJCp#xg)[ݵ-pE њ{6>pdFҶwn$'S@ %ś/w+ $!,g\@"@c9'd#X =K0czڗlL]- |jcMOc;ee&jz w>mޚQ!޸^k,&MF p=<%"=~e[P@}@,K>NLWh𜫱@b kk[CJ-jd0e!y.h+z5n%0PFuVXy`69}N=#^]6W^$?zj b-B @"pH?~keuNYy(Pi\l4T"[Ӫ,jRzM6Lym}im)ʳ&BJZ(+:(e^PT4iR0<-rz^s.IK;] V %7|xKnVmQ‡ӘeLM2Sj_Ƹ{Y4i(Ų9 24JT\Td%%EV\\,UVy kY3PZjmۚ7hNȗ}Xvu@ Unyܳ.Hx[8ޥ`*<%* mEnNxF,&oFNUǭ!۪Ll)pv_6BMV`%y2 P{ZuuYAkL5`rc)4ehx, ^/ ߋGɽJfdJxn2Ad/ ge5L=HaTWV}DSXF>nɵHּÄ5_mRƒgIK "$yhS]&JϾ>ϞxmSm7o<眴J2nցg_2A}mR syJ n0 _pvi[VMIӷ>yR` A=@-OyzLJm~]mJx +xuSB}_M3UY A\t?V_[K~xM!}vj ._kk67i=@o$Ϋw4 xIO{cw:!ݗ L2>R}}b&&%Ҍ{dr]BCG{L27/M0,X&97WA+ymI<>N7ͼ׽RaY?*7Tڰbӯ͵c: w?_PYw\s[{Ph  >A晗.?>k7G_b{'v2a,%hrEcI54hb疡>D 0}InD:w՞}VMmݧ1JO$ '(vۨImzɕgiߛKK 3vzgE MY펇. ':@K3gj{/Ocgp XngnzI'6jܳt-X$/wC)Ѻ{so҃OZYU؅7kӇ6+7澩G?>=8=[nUBطo_@}3+iltYS<-=S4>e܈SZaA^zB)L`9( c(a0eBȗ,W^ g7W8;+3/}}.%yUCC^@UM]}#/Ȝ1㸆nl 8wƏ'jOLX,+>_QA aReE!*M:eu+55u>kbe'77gyfmXϹojݯM9g>i@__-*(:nυ t/9V?>p;t2C& /Җx,~1%>fc'GܡjF,';0bh]CHS5FGo.Ɋ|+/ՍMR @ X hހk$ ?~{yZqO7@C,K)L~Qu@@!a+笋gUg9zh!GO!Ѝ6oTF%ۋVKƶjꑹ~:cƙu.@KNEic\iLi2qಜE}%FJUuԯٸ%y3el+,ڋ/^ӕפ.@ m:e,ϘZtqXNi@% u|Ş̊S.uS @@:;e,E+$Op|}S^x|/%ToiA<760g5矿o^B @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ 1>ߴ=´;)@})ivQ@ !r2;׍[\@rd^# @M +{G @ tR'q @ 02;t~Є4k<͡;]4J5t-T@0*@wMvw -Rw͈V%s˥g%+, @@S8 =FdEJkFjt4` ~5.EJ\tOSAxP,UǏå~.ar @4NDS+\r6>/Jk鮔wǽ9+\;=TosxP,1{ 4 +w`pN)S tUk4^:O+Sm%<.tϒ"vDI˱ {#&kc+=_o징a(ߵ}ȋ+uVs_iT {PUttyojuGm{1|WQ*,ωaAry^~er;\, ,` @@f|27hˤWK},L.oW~[zt'Rsq)./HU8!O+IM-yS;^M\gyr;bޕ7!oHJ]^:|#)> HdY89Y @2@N@#)FÑp/wFʧk5iR72ܸ:Gt{BCrJ8.uCɽn}C tݰ6=#%RIgJݐKR7>ӧu76J]ܨ*@ A,eͤ+ 7t>[ޤ?FDi4\-Snp|AZ-Hi i~p'ֿ-CC)νeOJ7qB1E`@)4܀:@^@ A02ft~%uOqRҮ4T%ߠ Ҩx2㥟s=T/UZ>~@^2GyRB+]*YrlXfi=Lf@@o&#@$*Mstq-RʬnvLHV^]-{&/VɥK.A%Kx^\y@@ʐ~ @kܡj~(IgHGKKGZ)JHV(]Ywj])sXP:hzFnz'$ucnX<@@XʰJw JK8t='eҮ6jNHmn%o[N7ōTaR7BNn&}%4sݘxRWJZtttgŽ4'K(&RZ?W"RP=rcÑܻu4 w-R7p⹱ԽL @H 5%.N.p"PkW]Ce =K>[Ү4htݤiW_cdų vŵ ^LR/y4@}pOmb7$'we: @%xV!]=x}ٻ T5zw'h= @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ ^HyK^/JJ'm4Iv^n毒:, p9=}yg_?q_ Vnyɧzxվ~*ۑƴ>ZRﺻ$lwQ74PpF~MNҡn (yZw޷%Y@v>o,T۱?WbE=8GeJ*x@t{Ҟ Ո Ҫڐ@Jˤnl} I '=!0;1Bwa5U6|tp-u1ImٞŏHN7D+ΡQ붷;oauwa[XBv)ǻKA.'7^R _KݨZ) ބ1*[.}S7=U+ߗxhw뤫~&uRV(ްÂrJJߒz=Fm?^o(N>9RoiTiniԏ }Wz4*HcĞzҏJ݀R ߛJSۡPK׻Yt>M'>yYZz{Nu«L< Э=tk -J^Sv+' Ie!=T FJIpY*!=_ԯ'i(n8MPFiu)'WH#\_Jݸ)Tꆂ?[Έ_P\k[ kR7tߗ?iPrNzcO{JPjŏwC*飷s}HVuRүtx$gk2+M70iԍѭTy5Y09em{ۣ4SmGwn.$Mx䲽}Lmg^_{u) rK1@X+މ8uK-qciԽYnrVI$uOt'NH#eoٗ JOfI(pH{۰ghLdДSwٞ>Wڐdy{F5;+jOZ]@@w q@=;j:BOLAQqOSI#}Rԏ4x.FԽ\Jzh{rܳYioʓأ ^h k/suNm鷒av, ,uJenPUKC^]^ ƤIߓmomHObF) t@NNˤ޶XL}R_3 oKm?%D7H܍H뤞Y/IX^$:{\#@^7% 7Q^_>!uc>/II,"u]]&}Ez(߳u|WyA礃sI}.SL-Iݳ*M*I,R6 @@&wۋ ?o4'p!?Kϱ8-zQ]meV|CaAr^?6zK>$?\VP s#R;.H};]|T{av4h5S^z/Q F`? P/KwoO//U6;n^(-F#}|D'/,韟@@nmzϫŷISޓd -.&Gitݤ{S ˤn<_.]tp_ uCi4Ld4DB,~7ܘq# } rKnD%=E[m @V:l]!@ n\{x~#E @ y>Vō$Wy8 d4,es2Lj? @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @oHݎ@'h 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0` 0`5}IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/figures/vmware_nsx_ex1.svg0000644000175000017500000004672300000000000027253 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.0.5 2015-02-10 00:01ZCanvas 1Layer 1Physical RouterNSX Controller Network NodeVMware NSX pluginDHCP agentNeutron serverTraffic flowPhysical connectionPhysical Switch Compute Node 2Tenant 1 Network 1VM2 Compute Node 1Tenant 1 Network 1VM1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/figures/vmware_nsx_ex2.graffle0000644000175000017500000000701200000000000030047 0ustar00coreycorey00000000000000]kSF\~|-_R$%@1I 3{1jrJu 6]E{~zOѤ0~}3q7qGOmG$^{׊O{a7{{sd~peq|ڴO!03798y $)"i?l J'qO?OydD"->A*Vڜ&]a%۽Flu4i}o{"&uY4lVϟѺ{˗ZqN v(,lDy[O۔TdW)%%.!}Z .w`\,a8О~*?-84r } 0 a Ӧ[ڑy$]Q؏ҝAЅ$f '(0/V/w4Bfo`anj{?"+M$I_Y4hF (th.ghs3gadNsPl`Iwtiioɉb-Ll]QLO pZɹXj />RrLjϱI*8n]mUE00m6i$^be*^+lQ-Tm1Á;A<,zdrk6ը:Z2kHٔLҶ5߈Ǧq*j3za]x֭I"gಁ}%/ԌFIEQfZo6{vxuu~jb6s̫ tKi=+vCKRP1i=ؤA/՛j>jҰ)[N݆KwDŽnMD/ Gg` ;Np݋  yMd3$@Ԙ~8FE9bw}+<)d(EEE8E }8Dla( ݵ5[3 aixo^/ |9k,+`:r[ޑrH90c+;_wtxw ~g,cr ?7AoqŪtp8d jaU{d6R iz$h_!TSF&\ r{(n3c!>KɈjjnYehN`7Xx%tV+3"ni1M?^σhOB`"BN&J6580<,@+S6?۷׍ lS-4(:c@h$c!ϖG)\uOjJ"Rr%"׍׍=0_܈6Xw >.W'Pcp W)5w!'M,69G~6ft䝘83{&'{|iSaN, A|U?$;-AkF0K\|D1bc@X *ʹ]>tJ=,9k:lAa4!mjfW7[p}nx;` oc$Tg͏X>>A@gXn[-ny9r}Fyqu/5 ͯ2} ɸvd=^ڮC%ɄYC{!Uᣄa o!x phWًgγ nehȅ uț!]7cк0G [h E Bz>RB`H(e[6aHo!0aɄfJ6TlṰ;q5+@=lyqk;vzčݸNھR#ҺN\9NҹNuzhӉ=vs0^3}sIg@nAT 6C(BB/tZb.!C۝o0{Wk4 ̰IsGB 9ZqHkư&2ՁKUQ@PIVo1 sRF3I?1 E'%(P bjT#VvTKCpPܕ KW[LH 9r4D;G^7q[H9AAnUGHԍ dpTHW&<&Mm* 5Aoa|$*aSL5%S1nj=(c73s$rrBRwHa1?|oXg."T@JL*N|UŽ9r t'Xhf&Qx 6*bǖ$Dk{[C!($47~vJH8_8fZ;owr X7Pΐ^'For*L$\.Qpaq5gЈ;4M;cK5k]xg8 @ @+\%q27~a1_)^8xS !)ed dp?x/|Isw5a p 9H5@P>H NrA6a0Aj4b+A} [ pԹj]GnĿR'?P6hI51jMNgKSxhcRt 'eZž'idVEM8 J_MGA]>dO%3fptA3.:2i g+~e(Ap2gy#KoB3QtB QXm2̜R4.|0&x7f Ǫӹl 45đQ^̽p3W^N&Mܯ4?!B=Y0Bq2"7A` S9}T(L_%[JOO% D60z ~-7WP& nF9H2|eYyۙ~* ʲtiB.!kst&Y\ ~p+4( AO>3\7'iw;*7Q7^kFO{_F4zy~kۗQ{񻋳om''wo{۝ S44Vקߢ*Y~YXkuR{\/7b|3nҟq t #:q= cpfP:NƵq@J,EKsBo<~`Xs\AŰ>x{yd<q׬u~ҕf/f6{x1LЉl/,[fy;-~ާAu蓲LyUͱ cކ=B` %oʘ)j5[Y:2QTsЩgA%PVj?cӯ!جnoDH䌴T]\6W7 \s/gi1Zӟ=M,5& y?././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/figures/vmware_nsx_ex2.png0000644000175000017500000026566100000000000027245 0ustar00coreycorey00000000000000PNG  IHDRK|4 AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYsgRiTXtXML:com.adobe.xmp 5 2 1 2@IDATx|յjWw 4^:G K%@B ӻ1.`{{dVejejV{;9B KOY88Jlr##BVk^+ "t:ubUeٙ:kCN\{*Ba萲2$2rFv8@MVK,(Nm†B    aRrr\'oXyij|l HP  vKhhS=jpkuz엠4X   ]J}9׿JlA|L͟>͢.*hIL)/b.GU+1_O#.>g/^lJ-rq' >Ju[F ,4ylx.GzA@@@ TVl\#c"hn@fDpp-h3~hU@"SYr(DQk@p‰j qҴrF/A@@@Z(KbgXIwN8z В@td8f׭-K`@[ PV?rƊ ^Vߤփr &!0u wOtIn@Ph,9e3835(/N`,N;0ۀ :HQatb|ccn'BY8I)KaAqqe5p> > o}S9,an @O%"ԝ5;(Q*kiUΚzجVJOM!Y}蚹(2c:*ڸޣӲƋƕwov]Ƿuw/}BN6FJnȯue JYxML]Y;Ad̄bӞ#ڒuT]۬$is8jIfrЃ7-@B] N+ˍew+ YSrS.?i$%wucYz~aJ8}Cs]Z5 F 4X@; 0k-~(4/[(Jr>WSHFZtɡZOZN!tO>ϒW?]qlx4i'[dQ)zw*W[@d-=vdQg.17_^ї@gߺfsm7K@;ySh Ke|L 4QWmɥ9S|~G4 _l޺ߣ=QD\{> 2n    C (%98])LY}iY7K$/~HQ51}4g|yECɿ-Ļ PŔQh}$_K{w2ax=;)KRHfJ9&X|ϘHԿEzt8mIkM2HfV紶+.:,[; 9蕏VUݻoDW٠/4hhR?vDkKm>gOȽ*bVf#r*2֏93IpqJ(ao/13apd#3KA着V|~4@Zd?m߹u_:(Q4l=^HnYSV{98^|_n& D%W_@@@@<%Vjv1z%MB;u$3ĵҘ~L]Q,.:o@P͖h<+,ˏEX!"g!YThIB[k24_r$&{Lbx6 k yk&CD0"A񿿯fLp㞣ϟoב-"֚t^^3wJ>V߽e .Mv9-7/Ns8eƸ*RDu8XϢ*0o?)#uv ywt]ed]Ogp-2o,.&     ?-3U3Km]s4E0К?ϹnYSEu8Bfj:?4\t_WnVՈrޡ%̀EG)ӸАO䅪ᄷBjK$DΫf.զrMyVٗS効|qDa2ҙ{)*R5'OYþw2?=c?A@@@  WFCUtUeMTs$bۘmnFfĄ#"3WEIϻQDA:۬9̽=ꈘIG4)` @@@@@LCʒin:     `&Pt7j%PdGfB%RMtvN`(Es/d% @@@@@D,f+     !+<\.GOLMM}{9(,B;YDgJ+jS, :a!!NaACRDx(%*n);|cO'}$Y)ŕ`!ނ@,zRP),EQQ)9oZJ3x?]&OE8OX*R5dN?5m)Hv6>LӾT`6hY-48I3a*SϹ~FY}쩫]世UNgc_0yQi0ABi_"{s#OlqYiI⟟xH+=zBCXQr_Ӯ(NҾNlEol#l'(%MIjmh]TVUshl2hhp{Ԓ~+ f0 X* 糵;hl?mE̓jl/اX误Yï%dԆd pmZȰN-(˫(t)RG%$s~vUx p_Γc_;+r-($Ϡc jK>.\5uTUSOGٲў߶omV[Ǟ{ϻ{]d)KN'kPӠBJ+/%A2Hw(#i3sOOI;.Icf ^v{~(7 7\.l)#/$O32SicE PY]KІ]iP3x{y=Mi>/-۱7N&dYmL9F HP:v8M;[4p._}r?yi0^({ ۳>Ӵ?1t)qKWQ'q/Hv^e%iơ4QA`_Z&>x&"lV]iGh]=c?h=8}j6Y\o*MLMldxX*Bi$g/Pѓ,84$<ad8ݜǸƊy&RQ-W͙Hq0zc Tdb>YB F ̿t7G@L DS7/}WSVi1{Hc&v!Ɵ8B5t*.y^Ye j쒙f)CyEYzA=,_{ulz!?&q)MџB&M=r>*c\'_˖?h~LB+nifXXk/\{&H4~X? ?$Z9}΂՛V-*`ۃi=Q޾ߣwѵ[ֺ @{g6B wo:y߭*w籧+<:m`]Qr|L6 'q%>6ZQnNbLu.u399.kMcJ2T[WGo,DlH'O٥s]L>v&8ffMm,fEݤ6oiӒk.P>m&)Ջ d&ҽ7̵p~Wqu7b==Xv\ϲH>F@"1>p|JI容;OǴʒ̡VoO%S_Kt.vA"`6J vb\)*Pu<~fB2+AkFL/dwn,Wi:jf%%;.-Yj)%y$ʍT߿BH8kX>}Q|DVsr Y.u= d8\Ks T 飭c$UTe.8AG$wq?yKf 'e )#4|%2#-n/Nh8]u%=zj$)&h܍u05Im0zHejtĝ}6$#+~N9fZiW@'RR)F$. T3*֘hx7WK K(Vfʽ[bm :{{R*m8E@\fc鼓{e\M/穁FPA# (Ke7Efyt3R+[);6o Mzch`z ERDXhu oh [#ղѓEpJ_WCi4JS2'h:vb#Thͪ&cxϪ~ᡮnsu@`ژ!%kaNʐpIfP8G@@ ֟$:']Po2SΊjfmV%F`z_bI3$:V$s1C2)-%^/f'h|ZaT-= Bqb13(eڐs)TEYZew٩ 1Q! lĿ%,41}0`_˙Ȗ6a!:]RaI!(Kmne$G"Qe[0KiY鸩+M'ЌqCE6KMُ)T=}i||FVhRt;ZYBxuSNg").5|6 g͝mwc [^ֱ pt3iee.yj{swWryynhhx`k4jp&;W á(fhFc\l9;fҮL5ߎS4^5T~vs']P[Yb+r kvY\*]C@  $(KS)K̒05p4[V:Fr:X8c }(*Y; f?}!͛X2v2OJhu~;tW;ZB%Y)HVQQߎ#;Bƛr)G%XC@i٦S~K62_Ob'kMhhAttt>f0Iwy4ʜ;|!    IdK텓719oRݭCgb6 Fuj٨ + )z   DTʒ0K%3^4e҉WјR1ulNj%1wf:ڬz%dɭ, ' /FS]dXAO7 `n"Z=_t^;k.w2    ЭL, eSЕThd~ֻt8#clU_[W. @[ NYjkT.7=Kugȁ7|}M&},u I;YNo+IM'5h&yf'    I4RPDPMX8ElDj@L2"È+m76\mK~.'lc#IAz/.P:yjdM-O޿/#ZuBjhķޥv;zRߝ%NSZr~H=O{ _u m=FWB-m=ӊh@ۏ%tuE?aH{GIvr O"wM5P_omЉ,"bvlwlEp @@@@fL~Dbs݇iܰ~`:jvcM9JUD}m$t?&>fgcHd;&m{Rk@@@@ @Y2-dK<ܔUZ\^E=z}ΔV{ѐ>j %lȐԳ)O##     Г /dV~Vlܫtסl     0wOfMo(Yyd`F*dHV_f%cmI/e, =PB\vHI)m'),S~K ^IꟖU%JPOnC2=ZBk1PNk)x"NJ)Nғ֔HC@@@@@7$w]ZTV79NJU5$D`v:PM4%ܡ-h4$wi0Gƒ@o'e)FeHBo{Jx)bGTmO~9"[A"A@@@@@D߀)btzHH}\+a!ԏ׊H`_I"鵧s     ,ٝ3F%MΖV+ܔVr+ifxWmJRycTj i +IյtxXeMlÙ( `vFj7BmGi)q4o(F2|뗋h}L+;oԻo_I9_\wȎ'诋MrՔ+@gA?3cO3-}3*8S7vX=xӂn+A]R3{W͙Di| I>w?nGw^1߸l& `.iht?]7cw:ya@@gARGm}jhh]8)9X ٺ|C@z?r2x ƀUu)HBl$IJo), Yh,"HkgJRH$>/'lm*Z>Ĝpi+,SE\O(%%Xn;%x}P Hѕ% |%GzztdG%ýѕO;7e|:nAzkYsR_}%i,>Jw6[o)8=_юIbsxXeFj۵eZJr!/?/J +h=}I1*@{$uٶÁg}(/;VHvGLMi"/#qҕY䀻n$.G{K*jg]~̹DYg_֗l"y\OiQO)Krĩb(Kg!gTm<ďRDRTR@(@]| :vFӻ!_{It`d4b'Q4)fi=#m`Vgse)EMO1y~H(,&񩂀t=<$ʥw,A MY.~ʳA$Y"JKN @%y0O}d[mG FQzj:)(Ӈ#Ef^cxit:cA@ `$"$CkplVW+-c=_J-EҔ < k25{2#UѠ_C#Y S|ֲ/XV7^f~eG)3(.}Ӵk[3JY2M9dGQ$¥R*hdY; #zv 5d˪.mNniGTxnq+/K>GJX8*iq:mX}j_26vNe#oRηݕηQk ]csG;63",\0yd@j&(?AH8!}_Jħ !Yyh~J͕ĭ^ie=V$DFGE.k_891 E<1æy.N~/kEh@?K2-o0g&x'2ڜ{$ztzlvTWsið)D=YQ]KuF.UyҙxST(%7'WS6uƝ*Ij=-Ʀ=rpՕ[}"ȹS0m}XŦxF)(e`LOl_XU!Iᤸ*.w¦fcQyd'?* A&i"! '0iʒ ڒH򬑜r" #2$dv[Dsdƞ=Ϛ1J!'6*Kb~9VgA:nY-(|2j)K61{!5}yI'N$EOheE|!4eI b7fh*-/;_lʥ#I+}; yEl=Jkaw) *Gp;%P^ /fb%p{IN:jy6V#&KRdVnR3\bˆʔ-!yE.HP-:HI9I[AF.f^d滃@ b '#b"Q&/#Ғ8 ;Z$$Eg!79o\6{GȿqNhq73ƪT~Y1&V};F78nMt9T']ԟG@m4eIExCyCMʒs;%[INNWA^ 왔==/|HeY*IIVD9(=dF3CY2A@@ h3H(ɗylͿ>VADZ~x9wܖVfӊ{>e-9IBo]u3-TW6pm%^B[1y2_w\J,;e/as֊+f'?Ƌw]WΦl5䕚/^?֕ԟLif*x@{ o1tejMyt[) U\",s'vD9K&xi$t9c/e߿ٸs]#9L |ȁOYaEeTR^A-\"Uy#wyؖdNS>GrpCY-rID,Y.(*(}x"?}Zz7nXsl@O$g0yϊ sX6w^l֕%@wlsr*Rfqq Pf"ϐ~*hxgJ4_JMP*D8ϠHmG˻m]A  =8.k\ t-1M'(:Y}EC @84gw \"~2S\es\f3'FP~,eɏpQ5VvY*pHG /(0A@dM|4gq4BY6XGtt. @g `%7xT#C!  " Oҡsqt~L>lYW ByS#/'RJX~}\&PB;GR3L3KI0czVbSK RK9N|L--.'.x[-=ː\|S,mHYH"0vXV^\,4cPsu JP f@COQQYWSTx ЏJ@zY @@ @Y (n4 #t-mTUӬh= aϲ'Kf(m_;8oŷrX@IDAT?V=F|^/]E 6V@GM_ p" eAWpζ𳞃 U,uI AJo.][᠕Ц=G߾8TwS/~DeU5'痦 頚f%Q^ ! 9YTF[sѽϡA o-ۨwCrF!!@:L`=-8=&z$p.^NQ^]CO]=F:ew 16444%y>GހY9W &jrKFj"=t|JvQ Cyվw tʸYnԫױ I.23sRib;MwrYqegIX}ĩ /AFqi&oɬLV5;Y>/EqЉJMmw/}V˕'28%D/veym:KfJ+\#AY2:|Ղ rXncԷOީ;zd(SGQa!98J{8m'e)OeHA^w]!o(Ixdo3bȷ-%))~g2M}&a6pV[״(I?xE-+P)JҁwWlbejY?X+Jg)C]L3K] Ձ@08rty$А4Bxf gl&$"l'Se-G xѩB;W'O& gӜURds>M$7qCI߾8NCtUkE=mO\L5H]ÒvodZ$Lq6@GZ$ߚQ*8Lt,^ec6dZ!M"#Iĉ׻A=iޣw2<yVll9l~B}U+7RsRvl)_mۯqdD7,+C6=D̞UItquʒ_r07LQ~=LD}j$/SYy d 0Msq!ՇSg9764vȹg!!ek}(9qf/iیN~KiT4i҈سI%X tFħSAM 48<,Tm3IMȳGw^m*a _ʒ]3Y+~%3JתSPH ha*cE%^32 `tuwd]C^D 92|N[y\D^:~+dV˻lLD`@j>x(MVs8qMd۞^?i/FL*j LQ"uT>7Jz`9'>XYmPt:X Ez%ʖmIo }GhCDɳLI5Qi4u(-eٔ>|vGp'mo1򦯺/2viqda%(s9G2lL" l~- 1ikPdes-rN "IE7]4 eE$ǜDb:qQ2d͟6JK֙S֍)#rx5S%'ju;\sd>K>D`JwR&DhK~:IkT乴p&!$ n< &@K7)$ĝW }i@@@|͇nHEJ@ I_Nzk.YDed( fqbng +f)ƙ'yg[H45t<^uۥ3F兀Lg_nQH~+ǁ X-\[{s$eu9SmQ*oM]7g1VVlg CEٞ\ ޜ`M6ԈlgBuXlra56Ҵ|E~{hʻϒ<]ZyJ@YZ /KhQHG/<+{YA;#wZN|%ɵ&!vyD(<v#)c ) *O970tg9UV~sʻT  Tz^?ΜUN` )1QOѷ/♩SG)zl gS'3W ٵC>%eysi*XzJ<EMj%kIώ㭏!F.&MEhq H>#31^$+mZHM^@_!t$䐓)QY̕EA$8 yB57+g.WW%xnqP\l2$fG;v    .VuͶu\U~'tq[A_8ՔzDx @.#D    + @YH7l^C5*A˗h,2(I(o44Gio.N kzP@@@@{ҝ_/ŧI޳o?C"O[qKg^YB/mzLCLPN:q;֝4;S?9O2nڸV C ݇gܰ~cI2Fch?u#NVN"'D+&kS件^Cl:g(L[OC9^FʒoD^pNp0>dWʀ(sH {$#pΑscWj',$qrIO. %q$ ҹ$_CfV[rP+ori,:7N'QR @ Q ##8f2ǥpXH&>s0MS Wب@@@47j/+M{*"*X [TvWMP5A@@@LKʒio :f$PGSETw}2%,d{B ,4ֵR?GMfr     `ZPL{k1_bЄ_${=[8X,cB@ X 468 z |BK= C,Π_H=Kc>8അ+"Gh?GAF_M˥5Ast ru^  Y+`2 ˫.q&ۀd@֮u4hRv6Tl.zCY ~U%љA|Ng 捚N3S,AYoV8-8y_N?֛V 2r(_-Yb;|!R(>paeϧ7.O|{\cA~5~o$n!^bձޑ {ڛaޣ徝cqjf\ YϾGaYVj_}wmL#[,zg,ĜM\uaCy\ -#}YxDqe.g'"I 5\d~I۝k.LF^7<@.K`t`4|z_6z +wl5s'   Kةm }F[`Eɵ_-#diY{@8(ri.zt ʒ3b~ip8nJK>O,&n *Ӱf=5nIw@y"q|\5dj7Y$oQ fOۮqVBNS%}:y<ϏcrR ϐJoOURi/EH>F|v \SPʑ=wE@ #$}IAcɒU[ќ#{UzmT/)_~fp=m:nrK.-9d&'t.c=ɲ?=5Dǁ75\_z*J%".]@1={j>{w$y)yqzѰ9(%U2NCO=vrsʐ5|N4qL'J9j*TqN/o[MY{wVsns7NzXoY-xvWLp^\N FsI u{UޫxW?oSe԰~9F?1 ԝ/VRr?o}*DYش)nb^xqN#T~Ը9:[ϭ?P#o%Ёz5%yU=o59IӰm7 K]ʿ *+x$S' =@^Ј7E6SYUkYFu=jz}z듏0jSuͳXOWݲNAHBKSmL˨bI`O+ČL}D_:G &46>zܹZq6t/6u2nqY-;H|5㗚,k*Ii r~ :c^p7 2C'*?QQ )t8"72*>>NSemDkw>w$HQ>1@͢xWB_p:µ|fLrkz}7{Z%*39Pzvu0gTUS.Wte%TYsOqu, BwMMTTӫ$"Qb_}T$(qHȈ԰a+u$bu[FߎTTXOM5S`j2ƐqS{Ux!;c]uɖ{/SL$@ۡ% bHKH6!!zqýKmϜ]ڦ]w[N=Ww̙s+(~IhF[~6PϾG|=E5dxb^g,W`U&TS&"@^ztc/k-R#4ϴS,KIq5۱Kٯ&9 ߿'99?B Y-j=ce8rsY)A_Z훓"bAԟI=%d?45׻_[KO<8c(=$|@ $J' MNZ.Zqm{P^w7h²EN of}a mSF29P;wЗw9ЈdXo7k&]ie)8ߑP ;ׁŸ2~p+BOE/V$R(Y&Og,di08.DS1̻n Yȹ{6,"TaG5HI ޻Tq7RV;}L{)ځEs^Ǣ]8xDt=\Y`6쐹$H  Ԥfs J*ͥq{|?蔩t5ONNVb\f2Q>XWE'Lnޝ>i۵7nIQj]$ 1ER2Cv$Y@vE^klp*&#ꟛEvq8>x~/}RdËJ '3%@iGC|gDz4.)fI<i˧i`&O\/?6 {vBņה]p<㈴q~?;^C {{hܦM[w^~utaY.H5$[Dޯ6Ļkld=GkäCcՔH:(-=WTlkAoO"-re%̅ 綠m}9"i8z讻0zcw~ɶ7}9J2h&.N=6_''rWhI=@ ,53<#YXM;#H0Q@_#e8K?|썵I!Q^rMJ{h@F&}5,h{}CW=P?o,))zP@z1(Kxpѵ_zR/" @@ {tNc]d;%`$򨙑@ (KQ WPT[+#Ǎɺ#=ap!7[mĿLÇRO-$nگkk3K""M0Q@o e)N9; LRX1 Ubf"Gm qӯgۃ v[%/V+IǑEYϾL|WE}A|}7RhGں`e?P%9 UՐw>UP*CK/STW4,P-[;m4ΑxKVB;AR^ca~'E &|C($\GK8@cp_U$#[m^@Y%FT!KymM~A(l-ӄ*g0Fk.7ynT\e7\MYwޢ4relVc ~}/R_RQr/Nu7ݩ~WOI %KIW4}w8m3 dztVC0ڗ}HKPb6C"VǵFV2~BkT??CwVvͽfU_?7뫽^ak$蝹扁1 XD6I($)ㆫ罛QPwn~O&z)acYz6n&;:B-`H~97B~~NyUeki+ &ļߘ6E8H\Kf;>JP@Ҙ@J*KjȖ.*Yĵ]uׅtB -ɷs75?&竩W9mڐzSOZ}rÓcʯdb77Iqԧrdn&<+wĊԴoJ&ܟ~N> ^~N_O_D=9b_?2 N?\BMO<_N_#/Tv[nܺkoCA`9Ox5/^댥WD^GK=(  H epʑlRLyJ+qf eukpqR&-pkdiq3 (KQcF*>s;gŗk" Iɻu|P4\,ӳw`睯O 0"a%oMM.u@'Dٽ5 M_3=xQ YA@ P0) \ogZؿ">rk/ɻe:2Jי&$Ulc-LQTn(~UcaM(ywؿ eo;`H4jdg)3LIٍMDƋDE`f~s0 Z KM/X2ceA@ L )*/%A d=4.OMO2Rg͹pHF\Y?C"^ټ]}2O W(~z2JGD$'Qʎd=e%3ђKf6q?c@7XZ $XvpdB $X57>p{? Q5Γ3_4@!H#:IUbj3nU!}HQn"߾Vk1'9mn^VnDIREB-ٿq*@ |֋ ᤻Re>13!ʑ\N**ӻi;ɜ&z%cƏn8jk6܃{lZcPI;^H|B"%YtVw5r~w'ʵOH;Ͳ]6g)ܵ+\Éh ܣVH&vunGCDҟdd^_XA8Ri__7R(JbU (LFR< ~:$ϪȽ%Dw;%wݝRa]"g+{Z.$xDgIғ!jGoPI5B[k*bKSs3*~𯪪zNڹݴ6q$f0ܾ]{k(C@3u*\&v3䳒|y1'oLu+ئ[YTE&~P W)(m"Gd^`%|(l/*+3R=C`f܏H[ω4?uN@.){įO;Ú %3`%@R@JYB-J&~48 `%ӪcL{K?XyJb)8DP]ir/^zAQ0㗴.yެX>3mfg8 %ԡOx\X\H{]-,+tmzS⹣b 5@jH2]Y7fvRoVIyIxN85y,5BB@m X6%|҈z]Fyެ-+@*H)eI@)˒lQ (Jvq HB?}<);B^D&#{` wVIp(V7@DW Ez}H'J XaO#C#߫}/ZP@z@)KA,p8(d<%'Ϻ>t,5YpٳRmOF>b:GÔ!{JP(-Z*=VjBh@{ ~ky&KU0}DOXO@64r^2̉4Q,R3:?ɦ,Jʪ$TجJ@lxf6xl^ޛ?{zg$_+X\Zj1^L0DL e%yK/MoG~~ݮ+JPymnuzePkԸy0ܸ,rA@ RRY%MT~%JR{EI@zLf[\k*٣˧[H pT&Vr̬ Yqz52IdF7AYY*6a/JJ(J[]6(K: |Jon`-wbDWxvK](  &ʒt\W,Cef'9Vrzɮ\zn{0u+p9'[blZd/^Y%=ЈX2YitSm2bP-i7[f7ZQtXJK{UrDݯXުR\^WU<cײ`tbe'y< (JԺRMޒ5tN\}'wr!%H@qxv6U)ǻ_l }.+[%lQ@ 5 k8‘h_Ck i/iNE27 @@ u$7dౚ$?6[Qd%/o>(K ԹRQVeI,JhҢ,&Y$-ɱ̧2iC[DvTe iO-.pG2Emf^aI> HI), 5]aj@'%"%nw DӃB SJJ =[$tX,Rby}mqOTB@U @ '=7Sl%y_f3x}Xej}'"[+JW)|'z'$ݢ/p5߉^=%[vԽEJN'JDԚ_^¥:}$*@IDAThv[Ѡ']HGx]rEX0WfD+o 7Ͻꉲ@@ FY;Pd(L->+L&5$ĸx:E|{IR{Kgj cP˱'hK=ڸ@Pĉ²{ߜ]2"~KE[-+LYꍙskѼ@@ NY[M1r:W)K,KH V}%ћ/0JRnyjUBϮxT%^f m  ̽w̒_ETq4's ,8HKeIﻮ4ɱPzԺr¢gW~+L~-^7 4% bE q hn){:_ce悙/*3/zP@A Pìz R( wF ܽYpG+^uC.lsn7%e ?yg45۬0ME^BFR_q7z_&ڐxH  Nxpnf7JGWz&f/7)ļأ>㙥yS Θ>Jc(tIp5^N*S![w:T3+>pE.ċ VxϑH@ 9ݹjN[Gr_iv5U]p) $$ nK{}…BYJ1T@Ad :Xz].'xڢyŷ}u^璛甍nsjnlX A ~pA1Κ ddM`*/\;b"HaWd4o}+F)+KI\)QVqbV2Ő9JG d+¤Tp"AY G@ Ni]F8HUO-~̠n`c` Y@6n.E'$@~#51g)C*AR~n4di_,"JXo%eR^͟k*f8l4! (/;S\<Nl/=Dp q$eJ^q*T ޻`A6&fcY<4z^Pc@A6/nJH2l"+L7Dp q$`7jdJowTy*'Ewe+cc?)W(+ckʌiٙv%FaBA@ї5UN^AAҒ{< ΜK'T׏,ݺ4ͯ<^%:oKA1mV3?j(>L05̠ ONX`;_&S%s|e1ذ#X'M\xr8tVһ׫S6~B/'lrZ‡oKhqd:l5,K67d;>Ak-%,HuuW(20;VYyQhјa5 !PZj?QVI>>U!RySBh:/k1Gп;.AO!{SAY Y`P=kg?n/Z"HTൺfzŴy9Y.u\!g^}ng}Ul_+ϼ>99aeTdңϽGItGѐrYJIklv#5x&:IzGRȾŏ;.cUU=p nx0 W$w^.%PT  )A`97*@Zhvb'uKd* Ĵkj"Ct(I%/nHPl M;ͱ UHi-YsZE).xotGVORhC庀# cget/gL"xX}!Vp(Is[v*Od,h07g  9eW/b)|΋e5Û_B8Zt'gTNv>Z)h,b-g?7X`|tތ>GPKG l^IzÖ$.oIGjꃗmU^d57ǎF39nB:2(SmvmLLJ[SCf6Ѧي#V@Ibazӯ"*jZTIXϚN'L\ERwItH9@ yaD%_n @JX4xb8YE dktc,j' \?P"ۛFC/Rd;pYBM/cXyBI%y󝬼NO7;+Z$ݲ۲Ȱ@`.PJLGjٵo['||:~:v])_ ʧ yT? z@z:f\RoβuS/?#x͎u΅JPU]Um{65K/՜_ccۅү/Km_<'4KCOҊF"Jd9eՏW3 4 .pđɪMUpw˼!B]VքLZrfDӓ$uDY9ZLh~]xfh_\o/Wqk!}Mu̢}{&\Jde)F&H7_IQv<.y9mGU~-zJ5Z\O:3ҵ&!kQZN=flu1Zs$脄}R,hU\$z[J~N>}jvZG; x[[;RT Ћ 0I& {ODJFhPkv's23x..7qw4@Э$S?Z|tLHp9^-! %*Y l7=_ttӃE$h3G#, Hǩ;BS\_h]$84o{?BJ>dF;} RaYr=.M=$xN,'H5$,ԟ`?:T"',@@K s/ {Yg  o JЌky-@$x7g7Kg^}./zzHiVnTuym#yU*ޒ#eB-ɹ.xz>߻D->k$I_+" m5h,3 K3[:K_o+/~ tq芳Ze^?ɆpL6* ]N+}\vdl5pҩ9#1~ߖ>#0xnsv} D@ ǞiQ}a&O*kC(Z2ɿ3DBĝ#ݙ&Qg#Hx}>^G#UX )W|zHH<_IvmvE 4EsDZwDO 7>t' $R^-Vq`/[Qd4uBB˖$+d~l}1~!QhR4G4$L)54"Kܟ4 *@x|=˙lܓB@HS\RG# eR$ x]mf_Ies$%c;ߢc)1dUo4T,#(GR?$eKކԵ곎|MO-eJ5ӎS']9}i:7)t!Ag<OPմ?pY<ʗXʤ~IG}67'[`ur AFkw_?O}<*tx$ߗ4kTPo{BdKF  teI毈Sϕ7򤻕E&oit'qxtd`X YYjI2I-WnlNF^LձXr 3[ZYC&%M})]呹Qr7Id(-߮f 24lWi!V.#O,ڳڑ X>iYJ`F@԰fGjn['Kw^1,*L~.=Mjղ>o)gd4YsSJUwxR<9dɠ lYêI[I;%џR4A)A>O3Y9Gh(Iƚ%o*aי˗QEgyc|[n7 6K)J#ŋhTAd-}_q]a`YJmFҕukݙLd.ğ0>_52(oHŧSyVSRQ+iWyO/<ER)~2jƿ駃5 '5R(5'!C6GR>H''IEcC{9z0r8CREm%eLV.u -E/TN OsS:k2u:T6 XTM~E㿭6GJ*X 8Iu:[Lh@@w>yNp]ytp -:KB'$ .C럢Hb{T[TPb7YYxtE|*~ 0QȺRb CJ.X8Y}hQY4Ɓuď@*WNQM(/]PI"uCꓵ$(MWC[^Taofm +r F =O]lf;m&rFK*EIfTDEiI vk% lUBt%xH7.X^#Z˜~ac?j-P^gݛ׫SYt#腦\iٿCdjONvߡ,%8KY~99wClKJ) @/# Ɩ4KH=K 9{hJ@-KLQ:12!@ԳzE) 碌N7R@@@@3P:#}fvY_;tB6*%6 @Y 7H6,@ h@@@b&e)f ] xIk“pq h@@@BR\0t$ %,%#:vJNC2B    q"qjҋN6')Lьu@Z4 `d#wYx="7q\޻W%$ϝ#"BYJ1DI@I .e4= A9L  ЇLi):4z*Vzv{4r3γ0[iФl :khgԹi}T,ߛ;tlv-xے{yׂע9ɖ5vx*6JO~e=uOƿ؁: L@S&yi~gIδ+UyE42vE<$ٍi5k06T-G4j/ OҁSf4ߨk[Ů?'[cP&dJw{8ÚvlvÚJIe@@қ]oQ(#o p-l< JYc8[$C-[}I <>*-t2 }uz3U}6Ǣpٲ́g [`}/Heզ~ :smNkM?uhgx iv |P՜%59#")#4%X;':r/ 8ç=&iI~Dh @@7K ~KЂԾo:ef+(?/X@3nXNS/{3EhU Žes#H+MfUg\*|*(mn'>VS UV<{*y[AkY6[b#e)6~(}:<!YINnLVsh@@zw$ٖKæ$l*wˮmV'g}@q%p:;4wPl\SU]+R.}KeК5"ppx:5k!ƒcD?+UV ˇ(KC&}PC梒[w)|_@4v*]H-F KT2Hˆp=e]Tv~>|2Z9?Ve*<8?XiO/x,;Î9his!́nqGWyl<(dc:IΆsZiݮX)8<wvw)Bl:i} d="7M H:=whvK֥ei_d$ZJI3M9$A$]hj<ReNckE?F^lIwT[Ge)`}ZXMN9Lx2ޝGdJ@@@{/X y/ĸ#Q~sT3xp̕&¶,eΞ&"5Y_Yd.ОQRIHרhs#gMKYRbB ]4$UtSM wì/7"e\B,Nzm F&SW~-ʰ:ɔ'5ʒgUmF'$?OxC`kY#k`6f67MoFV@@@]<~|˴z!9,i0ء^[5ORx$v i*HnV> مǪK)[<'N]-N+aolEQ°XY,疏ΊC*XEfI>'[,%JѺ7~HkjThk/ZyY9#WtX۲rzMO.ʾzXݦ<9'i҅ԧY>/^ܗCi٧]ڹ|.m:igXH,J1nї6-˥$ $`%?=72 FPGNe R%.H]D>.y&dF/^]פ6+PN~Yr0 6ʀ+y`n}x6\kWRgJTR,evڢ;d(jiQ'߫QV5l Tpueuagz@C(ŭcVS޴St%rZ-p>tO oS󖋿T ESɕ{)Xjn=aҰ|a\n cm]  K@*L&~6~Y-Kc7M+|IvxkjtymBs> މʊ4_&[ju)Zi,$䫋ynR"M۷N5e;,nDӜS@+s큉j7!VTO5LȢfb.~>e $ RZWr348F.ຈb.OAz{#ֳIXpOuƐ-Xfy]EVfqI̓ʒnME-{7>#G ter9JJSn?[9Kԙ.~rEVڶ}>9m1O/F  N@W#Yx%Bd:rTb Nt%[,qTdɴ;Ui%A@s|- YmH6g=.yeP@߀ z) W5i ɵg +j|źuRzQPEHu-O4%]36LT@IDAT(J3.%ܢN9 F}8RC^tJTFg@@@7Xr@mx ;-ʢ<K,y8x|\{o.-7}='V%զUu/[JyeI\<o}L*+|PړT=vS^Z~Oətt+O릋ׯUr3o.SܫP>,h@@\%Kh5o{\ $ɟK4̦MFJ7>iMqԪIqe)tR|),C<|z^Zm/O?LUKG74r(Q*>Y(JZw՚CմjӮ$&$ĺ$sX̼ô-K(98lf&ٕ̖%?=°I:M-nQ)6;U/pPF+M%Z̕c(0@ \"VRRlREHlT1!TGrQN/E;%R0DDB8Q""E u:HD۳ RqEb 19Km u8:KI7fT5},\4*$1#8rYaŶT\۠x"Zf io-jTf|f٥g~7^ÅF`DD 0KQ—sxLIk&;t8̨! BL{, 1b'@'kDcC y(N*b|%"H;$ILcWd~8EOkjDc93evz |`x~V" 5+o ?m,<2hli@و!e@Gfg$ԝum ,wgZ/^W0#0 0>B*O Z9:p~oг$Ȓ.(]Lێˮr!DDIz UxpMI!IMrX=JacSI>k b V1k JJ  $ E9m“-pN]:灙>*Q#J9(I_l#0@ FC Y z9NK"Fd'D%Zh,h.DIYj$1J,˥׫xĚD2௡G_G½7,BvaXHw߽|G=>FL_Ö|]UHQRa&K+`FH0E[+I^ xAWYDa}`T1d[@%Hc@DhMv4. I{'{֕qF 5'qS% 뺅AZݍܰ{Є!yU5pبth'/CឍQJH=X \ #0@!x4'^…gH슼HDb;ԒZ.0 ƅIR׈Y˥򲩈=t9[ۇ~9j3OHO2PW Rk89÷5U?Q+bF`5itWB}/,%]%&GIM<[Y]U'|O'L8}r)utWǕ}pOsW[VH/yGT<cF` |Ǔkcc%ʘ)+c ׉rϸ"K),$LmjTda؝ɉ| Y"ϒ~ \##0#t?5[+Կ[O=2lב$i973 &΃Ǡr{z.Q>iq.0#01@_c27kd^_[P$vfN( E`ք^3j0{u fF`FHN,%l:[J/ja zb#Dg%Ul4 %M .F`F`!Kj%hMLŹ3!@dNG͎]nVG8'%hN8^ kDMԠnila)n`F`F !`6zx>X]4Ҋe46a c:V#xʏ*ަT+,mCoR i$;TM!1#0# U,b$̚Pm#zZ U8('8רjw2{5p@AsFdh=H#o٨\hn\8Dj]XKle#0#0@ d)yI;ˏvzW>Ȣ\Xy(CawǍ$neEy@s:[.9#ԿVor,V;YbcF`E;!Ľ9̓gL[LUiVhlVBhҢ' ɓ\F]yP:IԂfWdI^kF`F`z"ZP).Tm)ݗCd6j1 S,&J) ɓ\w_Y` c?{B)|N'I쉥ru^V*ޙ0l#dޒ cٝ^'F`F)DHq {1B8kbS=E06 qZV 2tjb c9kY⤊q}i\OB(mEUȭĩ t:V \`F`"# Iy>Z 8O0t^㵁Ƈ }:BװE bO [m0.:c e Dĉ-&Kx,t֣YWwbM\ iZ0tLF/F`uF Li;c6ysAWhBcb.-b&yuV _$"K$Y,|px;w>\]8[%g5%C#70RI"H=I8q"pd}fc#+~y$D#~pEpW/G0Q=|E(9 :fi@1sݳLgJI( ^8,;wl# U5<4NFcR f-=o^u~"Evg-l@"ɶObfob?&i O 4b>.|&}I\HЁT(G$f%Mqυj֓ hm@nfi$)lƞdq_F:?ȏ(aLkh:O [zC\!#Ke~)L;B}ϳ䎇j8#p \%J< `zrPR$;|ѠۯZb_:u`[ڇ: Y¹JGk-k=,5^c8d<'c(D=0 0k|?s'Y3h>=#QBnV:68%2<GA^+"6 q,-(Wx$kZ&K<$|`8>}zNOϙ]q> *VTMyIؽzɃ!n߱G< dӀI7FޗqP1֝1;S#D}!H.O(`^UA""#u*N]R{)KTd'>A9SU(,AFs(QarxPvT} AL0c-2@#N`&8~@@?>0>ƽ`Aھ㔐:RtPJ BPHa4#*PuA(T##TA~?"UIa33POڑz0qW#0#0 " C ]6rLc !LDT}nŨ<7mS5k JP>,T&REzD;"TovB~F`W:"uov&;uu_cE9 س 0#0@sX\ukƐUE(*;z|0 5j] +}^"ߎDyRS16G`F`@@R7mZz ,̛,5@FJbY0ཏڡ ^G6I-f{RΜF6Õnhl6ƔS"WyN!]mC m*I+ZvDcCudi,#0#01@bT<*>mPوXArw93 +}pB:;8G|\^rH0/H{VȒj#(p]i4%ǸDW[ SPi DI SH7CDW8x'WF=KQx655@QC-Yrû+FF+^(챤ZuU1@x bROZvt>&z{ X-&3C;'N`~N:\0oa{o-"I!pI?Bo aˣx)d ^jEA .k,ɂ—qwr! ;d"Q?&GW_g.B`R~>۷^{?ﴉ@٢zX㠨ۣܽ0}BDh^QzN6^L.[U' 5aMw;iFd毈9K~`;.x.V68 x{$h˄\IUݨO'S<J,Buf,HOJ4vY\Pp? ?ٷWLܓU 0٤p4򲇋,su A;_K]Ie0w((2tE jQT`u8rY~i.? (J <BEXFdbX^Z]hȓq\£*66Jsw>]i' &>(A779!Kd*g)XFP<ﮂۯ^ړ*,Ъ& B*%w+7R#{n;;c|PWj-zdQ> Ɣ䣈0 5lB3yW)nF"QI}s0{MazB n?g(^X5é.^AhޠIEP-xkB& >I5N R{f/ KABDRUJJ 6U$׫D1Y <6HU[Y։#;/1?ʎGq53T#%ŧ_HԸϙ5^$kD*)1q|6!c{đpӻҹ,m_ $mʘ^Nx8KGV N>*ТH@daH M. z@`[$QlV r0kkD\3ū&M;#qE!9*ɱ`$Қ.ع ޛzSfskfw87Vu- dk>ɗ1yvd/«(%~lE߽|XH[YU쫀q9dӪ/8YB#jV+Uc5T}i02E2B$&JDt(m(6QUM߁圐].81gYn7n'Kfuˤ\|a@Q".B'ыdE%g:DvKJgF#J4< K_73(7J-$G55%(QK#~abZ( H]2!n q(x7@Ϝ1V,-mJ 9^< j< }|0]7raFD)DhJBn|W0 Jo$ZrW@'\iEE;NI ,IE9,87IS"҄ۂ(HUʅfF#0Oc6sC\0ӯQl>kl7>$BFvvjmiJ3ɂ%])n',;׭v^)P("@a^m|]m蝁| "0<Ln0gN5(O^ |hiwDc5IsO)rW)EgLj2ScXhv~C "u&}c^~~U@$96՛b0k<)k)i~_ɳD%7FHQ0)dISp'SȒY"Jc@,!Q"bD^$Zdsr{z"! vniGqS";v>!Urw_bu&3|(UvhPtIe$۝w''MuH!`63_)!qdUuLP JSBz1^]B$?T@ Y]S+,Y^8[u!7cN/E[nSGi<',E"KL>HjD=",=KyhKu2DԤ:W2YX75W֯& vE#ޖ Hka7.n9-@&T ccF`b^dҝوS+q7vD~qJɠΙd}u? i, $Žf2J|}~ANNvkLXj i0\$Z_QB›D$>WB(NxTv:% IvܧCD~d{$ɑC&IY_2,xm0|X֦8.r _V &tdz1AH_y~No6|0ނEyK{x1 GWL9`F+2z)n;3@i?$XH4_& 0#0#C0<:oVu/b#0YJA.3#0@l@-2Eo}w`[l{1@0X#1#0@ i+z q(#D&KQ+aF`xQ{4TFL_0#$4Z;Z5GÙnʌ5SwV3@ d)Io`FP#`Yq? {~Xl_\ x5mu# >$9LgF` $H5xfl&K}/aF`Bk@ d)^F0@?hln f@ANtCȷ "L|n`"fW>ʏA#Pf<ܙ`1x`#W> @;Lչp]\O|}C@? lCR Z7ҟmȱ.wn.$I`P%_s+@/ٙsf Z"nw ^A$;zz=|~eӃ~K]cҳz%)Uujvs# SVH@NF; \'m j,E޲ޖf_XWCa$Qc׿ ޓ4ogIM W&ޚ]ĥFʏV3o8 Qcv=s',{nX$^1Pl,73ǎE(&M]4Yw\r~fc@472#%Z\ЍD)^$C/}&#@Qi]zuJ;k৷Ge`;>ìf FRu} ,Y6ݠA zt)(QggF,.74'vep%L '@8 :S1o])ad)?dɹj-_z#ruEQ TPa_[fU>~~LhRs6bz= A['`5B} RHX&Vê_m.nu@vs)_7,>RliUS&BXA}w oK2ho6b۹z6Wmfrt| ِyC[@!v;o7,iֻEg  ?=☷Z޵ A/~8G2#ZZo7/IH#tmC\~Ό$O3ﮖx핰@شJ;>r%ŗb*DK ?%]Z$ALU|e1]p~&q1ΙJq3ņڼϿ %v3e%9gk{P@MZ~IORp('79ǧmPw凵Y]AR;F`_׌#eQqR-ސ,Wl!J4);Z)Cߌ<߉BtάqP7`2 F,]y,hE]튊q#`j5KkRz,& Yt2"rDYFi0Cj_|X>tLxfM(^aoQ=|j~-BPU$:(D2ܺɴx%QOs7E")LoۈLpw];7.e%ԙ NaK9Jd/+6٭VH]bߴS'yEX!O4uk~p<^ X8;׌/8GLJoeյ"Q1"(-~ ߮ x ;ᙷVoZ i;Lw1p=c4pv[OVDAlFk4uQcjo?7;^~[4zEU xdM- @6# <y9r.Po[&ySrcќI, J%ym HDȋDd/vz"GAiQB%ẙ^y(0u~w8EΞx+ FͿ"۲nI/;'YW2k+>lKH(΍kmB5&!ܰ(R ^ V Dv2!y(4I= >ER@" dwMJhy GDʝ|Ǟvy:7ochί3#0P \_$&T[$83*ejzU rf`mSy`~Di!obȈܗ0ռx}%K'1OmD\}H$8AsH)0"SxtmG%"KDhQ@5KA4Hf1C"o5"Y8i"]~ ÌզP9uۥJ>yHA5Jz0Rf3b&IsN79P"p$ۆP+Nˢ2^2J3Yğ ϡÁp8ü` p&+,D zmuF q#4^#Jf?(&A }l#00̞X#*'ђUV$AF.^Ч!ve:EdV '-?y|0֔7H}:Aǒ{SuHY$Ϙ 0_^d}R`#b@yܨJ% .vEkii#ng#z:t4$,I{+Q :6T;ˌ۠N= OH%yy}J%T۴B(tY8[ny}9&%;|~[G HY9&!JYmz <&KCc)ǓBIgD? )[m~[.5cb~$̚!lGNWD v&_k[sΕki;& (U}djÓ@K$ky%?)w:ǡsqSW/L-5Ђ/Ҧe_(^54S0?nT FQw^~rڄܨw:a?T x[>zeG\~ۂI)yPe[b'1#X0 җn\L^Cd3B:IRX%|TD"%m߹<#H0ޯϥ!܃ǟ>ƑQSO&cL0#0@?e5ahD_r8KeD"3{7 0t'SIYo- ;L\v?۾`L$=_)-BW(wU[ 37;K+"Y7uܯ.qۄ$EWFU93~"AkunWm:^%yDiO"BQl~QEfŻ$nBъB.(ʃ)i#%˄}oB΄ lc{S3yYpϸF C9HX 8T Sˆo)YڏD{c&?l;XvF'kCG # Jx7FL}r_c$q L؀0#0#0H'x jFBd|; %K4ɡK9J(²KEV\^K:ݧ@2;z:cHDȎ`DT|$GW.['cѫG*Gdw3'  {YsZqy&#Y{bvvT6Q_73&B-<kBkyUc-ZZ8m=ZR`F`AB^%Z.Fzv,OUؑ67=5dU"^/`XcNwR##N$#3đݳa;loϟ 7@$:(Ą߸C}L4Jmwc"U580'äTyd1l߲[]\0IgN"gf_Vnϝ'H_՜%وce-&ϚR5)#0#0F@4\WG^af9F" cF؄;XLvM!mFau/`Hքt|ȃ3>=u.iVT# R#{8*)JO8B!19*PVHވĩMwEJjwDԊ,赊+>cxӸ'Ed`F`F@ wT$Jзj:ptx8Y:rVKxA@ׅp0Mm4Ɏt[u~yK|pd`^ܾ =>B02@W: I#Ft,Oh8(tHljTa.hG![kBӷ~ުP)F2HD熭  늏ܿqu#~!ӵf#t"b<{&R̈#·X]6F`F\66^+s3;lBLs+I#2ss(T)OBvCyF?onFE>$y$Q}HԂ[y}M~{@sq8Um7u b]ׅ"My>Sb]KQBCN<>J[r`0Ly ?Y}/bF`!0!X!9,[&:yu}DŽ׈HZfQB(>LRKD쓰`/qdnGaubM#캒"BZV,[1yGGKbsrq7;I sE,EQ92;D6|`Dw&`I]QXɦyH'+[cKq.S.䡪~R#?([kFO%jp!@mP=KQR׆af C},M-2 ~ݐ'4fsF+ː0g.<ǁZ(.nʅ0VuE\clMb_T]q4 )>[hpn&Sߌ 2U=|`F` CvFy~CrϕW϶` #"JH+R)׈>c>&"=`[R{j$)Ұ@%rk DD1Q-Ӎ^:|ifV@A x)wwe 47W7>D=KA,孮"Q%`Z|6t|zwhU_0528>Z}hۀ!?OJ3#ZZ xkkm"D/= mM߼<ǫ9 i3Uhͣ"ӢC<7}_~/L&r_m{gUrˏ/&_dz$=$oS㄄L7zƃsUh)0*@+ IpEZjuK(<#D mf0 Hԋ<:gÏ?;Pr^20HLJK3DN(!N@.bM .$]lCR:|٠-nu0m> C`hhWTT*$>. 9hBoN:hsY7 ¦nƍ%ݸQ ۚ4HGa Cnrՠ+.ǟGk"+EgPD؆6?R@9?9QkD->? Te"1#0#0=ADhH#UWa}¤$A!{PŎ_}'XȾ%;Zd؝(m8Pֽ<o xF`F`"@yDhofLg̀ ! Lc$!n+nDdmFsquW1%O[<7eԪDtHT3`#0܎қɹ>ʤYU @I4׆-bSO}sd>u8$m;dIW2\}PMM'$ t4H;;FPև*hݑl<0#0mƩ%MP搬ݤ3nT.xS0]g k 7mNv%"Յ#ҡNJh~MjoAz([L^=*NIv9p8Ф6'+-7<ݩQZQ_{6->l uios.>j7)ބ;6F`F`@ّܽ<-~ToU};z rn>W\yxOkA\ )m$(!I|}bF)y<܃lҴ"6)t^8#0#0@"Wd`-e7SJg%ȯlWϡ@bdy|B[7l?.=X~r i0G47"_[ؕ!}ľ ԗ}3w:|,ݷa7 pTzkZ\JcKS>&Mt[keNo Y(1^L>9)Y3/}~0#0# Ó/Z E*x2,pF}dZ& 8e4HOAsVD(zQFy2a{jx 7ܡp; ţ9R<"sS8\[vAsD&J_Rs ʗRoN(뼏?=sONr@8}. *"";J*w`E@P\ʊ( & BrL&suT&LL?U]U]ǧ:=穧Øs]W &/U;]'7թ^rpg]6&M~n7&ù?lkxRTcFOhVD`KcKw&iH6J@"Q5K]h&ׇ u!Y-=>3wQ:;|4ߟκ[Ƌlwf;g ;(TXܥv\*qJpu?s%9-44}{?<dz bavQ9|}e3 /Տg4fMҽY:>n|ԟeo,Kojlڍ[_1@ /L_pJ9tRuX91kV{ =JٝKg[6}t`}9] 4qԘj~D55Ko11^hYm׳R^ JֆaNDy7V=B?wE%g!֑\n$w=sj!Ie3KrH8SLw.YkD`6mmLSVVN4t̬FJl9QPV!l U:DQVY}vcT,%q銧CGGذyZv=wsE!ڍB :KVpiQ lWKnT 5Օaޔ8;]jjWKߣWddReww/]]*?D/W[64 ߶T9Bw&nZ0[5nι>4V*yQ1&Uu)cTөxNWuuuև\sJQz5|Pv6]wQsW5ᢿ}{)栬 0H+]~|뽾EdVjJ\ jiQSYZtq=#l,qc[\ӌwZ&~qڷ#ɒKuRyLjC^ckgڮFt.K[ko4uu02V9w&ZlTӪ4MU*Oͷny 8=I \u~*= xD <|u[O]se/^xWvjtDԨ1uluXq{8dit=Ħi㪳7oKd)e=$OLYIMmZ=}MjMiݩЦ J8+c|cRTt \N\T[kDɵLWɝ8QgӉ>t/][ +gfNPZ_`{ՔmFz4Ԉ:SsgXZM5c_sFq^d.סٓDɿyLW5#vuØ|=7ۋM0NKW1c2|Q|Wu Ŧw瀥Ӯ᤻$pw^Õ ݷi58oIA`8YlE9ݰ%Y}}ͽ}o&Ut;^ejqQ)lk ^jL/.:ps:L˄:51~->T,|#:Y$&_2\^2D-k^G6akKF  3_Or2^؅N,/]y\yUJ/?K_zEhVM\Wf^uac fO&d±+-}*^RnéIMD?x႟-]aKE.࿙{sԅW7[Zú5EPF@UGW]L,ʅ}}Kd'#0ŋZŬH6()P7bɒM]*uߎ5c)*җ~:,҉Quu.Irs;/շ-T1 ,䒭:λl2̕흯V/&fi7嬭u]̟>N}[Æ-LmSkoT}㪆 3E`tCMZÖ0oMcxz{i]Xw?L=Bc(Ӆ8ݯZ ]c~vo$K}/JY#;QR)^L= o֥DLrr_Ϙ?g;j 9IH~zQ*OJu]o82gfR/#!+A@ dԻů*jʮ>E@ɺj|âi[0wmc =|gC][gH+=jz}|G@y *LIRًZ]h|h\]Kqu S TLŐ@DC=˟8ϟ= ,ewi'uOӼBL&5YvNP7{Cd~z&}6eՏkNVw&,%T73^/c+)5a==[S10χkod?H&W^ۚ|\'VN} [3]Q,ţEjM0 &LjЭa.r;$0DK?cQ61g- gR诀i{(kji+#q>yqרzkM[iu6u*aj ǭR Ъ(#K;fkFW6NHԡھ kw^4J^qiT&KxaaYY&4uw ,咤S8Y\@Lr_gnA(y)5–m%eJev͒;>vg0}sal^_L ฿?,~MaƖ%&)Jϫϯ[sR\jpR<&9(Xya7\kXڇn5K/߹Lk[՞V=IVU9e86VfjrkT45KJ:j#tW/܊adIWMz*QhUIA)nGN^}ww!$uUv~e6Q*Nl=SLw?bOYvKjjS2mLFPJ7*5Q?v&>Zz3Ɠ>#H_xdųֆyc$&0;%]B܉CKJ:=v?;blgRr8yp:qҴk=Qr8Q[sYz-#p%@hLƓ.;˽/k3r^5*Ws "Iv&ZdNY]~|&\Ҕ%NCg](D~we%5V|#{:^S \qH{&B׊ϝsz8I{Z@`t7/-uJ}k^> _>y|{O J'=2Ś#'EP8]^.YD421Yk. rυpá($).;545pWXz'_a=qɒp=mn?o:#@nk~z @`λKdžLt?:w!u!3{7FbҔKv&F$*]N#faGskZ㞷3xt ׽Q&lVw.ԩCr=O\Tf>u_G$B7秷g0~-F+ @ |jѢ͙U*sgw0q%gvsU iٗu  /WvSMp  @O~;Umٟ zPx9N8pYꮹ;Ks2w'GXuoҕk+ <:{$e\WnK_]*~WWV3IG;^_qYpknZ%3GC]֏ wxE㖚DeL8>LSHʒ"Т.77n53];TT: =[8iJ ,v5T^AUg8#;'g  0 K\٦wj|`}lh0lM  |;Sa(&!隆m-dhhȔZeX>pʄp^hg +ijz UzZx3ҧd)nH[@&9%|p܃b @`\υ*8YrQԔ e ~oX  ,Y ܵR݄w8e0ql][WUgo"@3aШֿԸsЮ]]Y[7  ` ;YVwv.1HtOS:c+ZNiLIDATbe:{<7[`m  =9YJȵMM'tʦin6eG fmHe2i}.՗^k`I@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@REv<>e.̔7dRd8p@@D%r{:̊ U{Zy$\'_3  PeyX   00n@@(R"=(zl j@A@Mdi(Y.pnIJ!E,M[3NliETlR<_5v.οf <oB`Dtj/ؓ >\ѥxǼ_Rx_Rhz.1xyVubv3)z)  Pb$K%v9ܢxJGsT#(\s& jf׊%\hZ=O%_W. te@H /8)0Q'Nl(4݋ޠr1 sMlP,U]}N/t Ќ)+/)V }+N=O^3ɺUV)@("": }p ʹ;N:Z.W*pPq Ź;.+ʎV% '{ي#)ˁGn*V8;AQl osS~4}~4M|X(IQpw EL~NkyߠeFަXخ  @ ,P[ɊcN\>XxJ";N^+NQ\<{U8*4]v+N|UU|RQ@Q9X~ixok,;[u(2ik Fxٞ5Qq>C@$K P]kIZ,eq!E{eRn0)9|E\w)0>AKqup{slYȯ ,nL{<_ɩ_:itqp:ܝ8MѦpm@P "ɒFqSq"&  @1 ,XooDCwVEk4 d(7ʵYg+W\ؠb0JVm72=Y\Ln&xI1@(2Y* 0?KX\_O~7w+ާpuN4ZMqۍŽ֝p`۴3Vb>>ʼnG+'f.ѸO  -rgRJځ@*\#S(2[v9'4ac0Cx:y܃ኧK4@v/@ )J/Gz8iNc[, -@B:uV$ޥOa!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@F@j0ع_ƐnMQ*NKqIu݊Xi޳a^흥8EbtxřCZp"P@F@zpW{J&dNm*]? N8VR%?\qt*b%< ۱oѢ+VOm[b" |AAHDq'*2*)FJ~9{l@^W25y@% @q ,yJ'+ L(4sW=&>hm\EeSbϼݣ*0n7kjɼ޾ʼnK_닙@!!6C*T~,yw+6+6)TS75F8!1t"'[.6Eڮx\ZE|C/|/1ɉ{ABM=E"Y O6)V|F16'^TxKoW,gkiId~< w&E؜`zE ܣ|Q,>Px_cޥ Eoeo?כ~zޜ] E~ @??{V@8!&_l'ߪ8Bq?*nPpboGqq!#*Vz/?L݊xnu&?P2K#w(>]b TK9PozDqn^ 'Q2QŝViDE,LY+R8azNJXoSxۮ'o*NVsyF(co3buom,d e @2q(4F/ޝDo_<_p ?*NUxmdL#_W|Nѕ{NQP$I&J l'.PwM/R'-n297yGqB/C'"NXcӗj)Oq8)p"ڟs-dV/R].TxEL4-Nm6Kϋ(^x?Xnˏ'hD1&J ?TT*h8w8,, `ٵ=6A}q~bcy~ZXg?q K,NJk]0F}EL\51(n'kN2~p2/?^ZiXG#'W*\c2[}re^=M5MX̏,C/Bq *E,őpu~$/x{Gub-%pb~*z8, m  ҐrUG?%٤nO{@3(\Heި}9'7(ܤlrh~kV\ \msR}'g. v'KSl#}e뙭M{XZc{pX}/X@!(uJ=/{|*WKQ8Cᚐ{^_1UW)OqPipc- o,L8ib_JqX)YݗecB$ՉS09~i|8,}! 0d$KCFˊ(:'K.QzbӒqEJ_(>p/bBQ(zB\ܬgq Փ;+ݤX~WZf^K9)LzLqs~ӳ N]U{Szcµi NF^]k1Xos)}aD@.Xe^ 8( Gzλ BG=ȵNxTeciՈk||a_?4}_St+\^PJ:c@##')LܮSq_'8i:Yg¨ItuCQ:[ՊXϊ(NO쫱ToH(LG@(at쾰}g? sC/~<y[{zU:i:NEs"8-^{ /)'#*\siW*bq2i^TܨتxA$uuv 'oc=&NH qi{;7%MsRb^dqtqqǒ Produced by OmniGraffle 6.0.5 2015-02-10 00:00ZCanvas 1Layer 1Physical RouterNSX Controller Network NodeVMware NSX pluginDHCP agentNeutron serverTraffic flowPhysical connectionPhysical Switch Compute NodeTenant 1 Network 1Tenant 1 Network 2VM1VM1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/index.rst0000644000175000017500000000071000000000000023743 0ustar00coreycorey00000000000000================= Archived Contents ================= .. note:: Contents here have been moved from the unified version of Administration Guide. They will be merged into the Networking Guide gradually. .. toctree:: :maxdepth: 2 introduction.rst arch.rst config-plugins.rst config-agents.rst config-identity.rst adv-config.rst multi-dhcp-agents.rst use.rst adv-features.rst adv-operational-features.rst auth.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/introduction.rst0000644000175000017500000001601600000000000025363 0ustar00coreycorey00000000000000========================== Introduction to Networking ========================== The Networking service, code-named neutron, provides an API that lets you define network connectivity and addressing in the cloud. The Networking service enables operators to leverage different networking technologies to power their cloud networking. The Networking service also provides an API to configure and manage a variety of network services ranging from L3 forwarding and NAT to edge firewalls, and IPsec VPN. For a detailed description of the Networking API abstractions and their attributes, see the `OpenStack Networking API v2.0 Reference `__. .. note:: If you use the Networking service, do not run the Compute ``nova-network`` service (like you do in traditional Compute deployments). When you configure networking, see the Compute-related topics in this Networking section. Networking API ~~~~~~~~~~~~~~ Networking is a virtual network service that provides a powerful API to define the network connectivity and IP addressing that devices from other services, such as Compute, use. The Compute API has a virtual server abstraction to describe computing resources. Similarly, the Networking API has virtual network, subnet, and port abstractions to describe networking resources. +---------------+-------------------------------------------------------------+ | Resource | Description | +===============+=============================================================+ | **Network** | An isolated L2 segment, analogous to VLAN in the physical | | | networking world. | +---------------+-------------------------------------------------------------+ | **Subnet** | A block of v4 or v6 IP addresses and associated | | | configuration state. | +---------------+-------------------------------------------------------------+ | **Port** | A connection point for attaching a single device, such as | | | the NIC of a virtual server, to a virtual network. Also | | | describes the associated network configuration, such as | | | the MAC and IP addresses to be used on that port. | +---------------+-------------------------------------------------------------+ **Networking resources** To configure rich network topologies, you can create and configure networks and subnets and instruct other OpenStack services like Compute to attach virtual devices to ports on these networks. In particular, Networking supports each project having multiple private networks and enables projects to choose their own IP addressing scheme, even if those IP addresses overlap with those that other projects use. The Networking service: - Enables advanced cloud networking use cases, such as building multi-tiered web applications and enabling migration of applications to the cloud without changing IP addresses. - Offers flexibility for administrators to customize network offerings. - Enables developers to extend the Networking API. Over time, the extended functionality becomes part of the core Networking API. Configure SSL support for networking API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack Networking supports SSL for the Networking API server. By default, SSL is disabled but you can enable it in the ``neutron.conf`` file. Set these options to configure SSL: ``use_ssl = True`` Enables SSL on the networking API server. ``ssl_cert_file = PATH_TO_CERTFILE`` Certificate file that is used when you securely start the Networking API server. ``ssl_key_file = PATH_TO_KEYFILE`` Private key file that is used when you securely start the Networking API server. ``ssl_ca_file = PATH_TO_CAFILE`` Optional. CA certificate file that is used when you securely start the Networking API server. This file verifies connecting clients. Set this option when API clients must authenticate to the API server by using SSL certificates that are signed by a trusted CA. ``tcp_keepidle = 600`` The value of TCP\_KEEPIDLE, in seconds, for each server socket when starting the API server. Not supported on OS X. ``retry_until_window = 30`` Number of seconds to keep retrying to listen. ``backlog = 4096`` Number of backlog requests with which to configure the socket. Firewall-as-a-Service (FWaaS) overview ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For information on Firewall-as-a-Service (FWaaS), please consult the :doc:`Networking Guide <../fwaas>`. Allowed-address-pairs ~~~~~~~~~~~~~~~~~~~~~ ``Allowed-address-pairs`` enables you to specify mac_address and ip_address(cidr) pairs that pass through a port regardless of subnet. This enables the use of protocols such as VRRP, which floats an IP address between two instances to enable fast data plane failover. .. note:: Currently, only the ML2, Open vSwitch, and VMware NSX plug-ins support the allowed-address-pairs extension. **Basic allowed-address-pairs operations.** - Create a port with a specified allowed address pair: .. code-block:: console $ openstack port create port1 --allowed-address \ ip-address=[,mac_address=[,mac_address=` in the Networking Guide. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/archives/use.rst0000644000175000017500000004217300000000000023441 0ustar00coreycorey00000000000000============== Use Networking ============== You can manage OpenStack Networking services by using the service command. For example: .. code-block:: console # service neutron-server stop # service neutron-server status # service neutron-server start # service neutron-server restart Log files are in the ``/var/log/neutron`` directory. Configuration files are in the ``/etc/neutron`` directory. Administrators and projects can use OpenStack Networking to build rich network topologies. Administrators can create network connectivity on behalf of projects. Core Networking API features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After installing and configuring Networking (neutron), projects and administrators can perform create-read-update-delete (CRUD) API networking operations. This is performed using the Networking API directly with either the :command:`neutron` command-line interface (CLI) or the :command:`openstack` CLI. The :command:`neutron` CLI is a wrapper around the Networking API. Every Networking API call has a corresponding :command:`neutron` command. The :command:`openstack` CLI is a common interface for all OpenStack projects, however, not every API operation has been implemented. For the list of available commands, see `Command List `__. The :command:`neutron` CLI includes a number of options. For details, see `Create and manage networks `__. Basic Networking operations --------------------------- To learn about advanced capabilities available through the :command:`neutron` command-line interface (CLI), read the networking section `Create and manage networks `__ in the OpenStack End User Guide. This table shows example :command:`openstack` commands that enable you to complete basic network operations: +-------------------------+-------------------------------------------------+ | Operation | Command | +=========================+=================================================+ |Creates a network. | | | | | | | ``$ openstack network create net1`` | +-------------------------+-------------------------------------------------+ |Creates a subnet that is | | |associated with net1. | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--subnet-range 10.0.0.0/24`` | | | ``--network net1`` | +-------------------------+-------------------------------------------------+ |Lists ports for a | | |specified project. | | | | | | | ``$ openstack port list`` | +-------------------------+-------------------------------------------------+ |Lists ports for a | | |specified project | | |and displays the ``ID``, | | |``Fixed IP Addresses`` | | | | | | | ``$ openstack port list -c ID`` | | | ``-c "Fixed IP Addresses`` | +-------------------------+-------------------------------------------------+ |Shows information for a | | |specified port. | | | | ``$ openstack port show PORT_ID`` | +-------------------------+-------------------------------------------------+ **Basic Networking operations** .. note:: The ``device_owner`` field describes who owns the port. A port whose ``device_owner`` begins with: - ``network`` is created by Networking. - ``compute`` is created by Compute. Administrative operations ------------------------- The administrator can run any :command:`openstack` command on behalf of projects by specifying an Identity ``project`` in the command, as follows: .. code-block:: console $ openstack network create --project PROJECT_ID NETWORK_NAME For example: .. code-block:: console $ openstack network create --project 5e4bbe24b67a4410bc4d9fae29ec394e net1 .. note:: To view all project IDs in Identity, run the following command as an Identity service admin user: .. code-block:: console $ openstack project list Advanced Networking operations ------------------------------ This table shows example CLI commands that enable you to complete advanced network operations: +-------------------------------+--------------------------------------------+ | Operation | Command | +===============================+============================================+ |Creates a network that | | |all projects can use. | | | | | | | ``$ openstack network create`` | | | ``--share public-net`` | +-------------------------------+--------------------------------------------+ |Creates a subnet with a | | |specified gateway IP address. | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--gateway 10.0.0.254 --network net1`` | +-------------------------------+--------------------------------------------+ |Creates a subnet that has | | |no gateway IP address. | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--no-gateway --network net1`` | +-------------------------------+--------------------------------------------+ |Creates a subnet with DHCP | | |disabled. | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--network net1 --no-dhcp`` | +-------------------------------+--------------------------------------------+ |Specifies a set of host routes | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--network net1 --host-route`` | | | ``destination=40.0.1.0/24,`` | | | ``gateway=40.0.0.2`` | +-------------------------------+--------------------------------------------+ |Creates a subnet with a | | |specified set of dns name | | |servers. | | | | | | | ``$ openstack subnet create subnet1`` | | | ``--network net1 --dns-nameserver`` | | | ``8.8.4.4`` | +-------------------------------+--------------------------------------------+ |Displays all ports and | | |IPs allocated on a network. | | | | | | | ``$ openstack port list --network NET_ID`` | +-------------------------------+--------------------------------------------+ **Advanced Networking operations** .. note:: During port creation and update, specific extra-dhcp-options can be left blank. For example, ``router`` and ``classless-static-route``. This causes dnsmasq to have an empty option in the ``opts`` file related to the network. For example: .. code-block:: console tag:tag0,option:classless-static-route, tag:tag0,option:router, Use Compute with Networking ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Basic Compute and Networking operations --------------------------------------- This table shows example :command:`openstack` commands that enable you to complete basic VM networking operations: +----------------------------------+-----------------------------------------+ | Action | Command | +==================================+=========================================+ |Checks available networks. | | | | | | | ``$ openstack network list`` | +----------------------------------+-----------------------------------------+ |Boots a VM with a single NIC on | | |a selected Networking network. | | | | | | | ``$ openstack server create --image`` | | | ``IMAGE --flavor FLAVOR --nic`` | | | ``net-id=NET_ID VM_NAME`` | +----------------------------------+-----------------------------------------+ |Searches for ports with a | | |``device_id`` that matches the | | |Compute instance UUID. See :ref: | | |`Create and delete VMs` | | | | | | |``$ openstack port list --server VM_ID`` | +----------------------------------+-----------------------------------------+ |Searches for ports, but shows | | |only the ``mac_address`` of | | |the port. | | | | | | | ``$ openstack port list -c`` | | | ``"MAC Address" --server VM_ID`` | +----------------------------------+-----------------------------------------+ |Temporarily disables a port from | | |sending traffic. | | | | | | | ``$ openstack port set PORT_ID`` | | | ``--disable`` | +----------------------------------+-----------------------------------------+ **Basic Compute and Networking operations** .. note:: The ``device_id`` can also be a logical router ID. .. note:: - When you boot a Compute VM, a port on the network that corresponds to the VM NIC is automatically created and associated with the default security group. You can configure `security group rules <#enable-ping-and-ssh-on-vms-security-groups>`__ to enable users to access the VM. .. _Create and delete VMs: - When you delete a Compute VM, the underlying Networking port is automatically deleted. Advanced VM creation operations ------------------------------- This table shows example :command:`openstack` commands that enable you to complete advanced VM creation operations: +-------------------------------------+--------------------------------------+ | Operation | Command | +=====================================+======================================+ |Boots a VM with multiple | | |NICs. | | | | ``$ openstack server create --image``| | | ``IMAGE --flavor FLAVOR --nic`` | | | ``net-id=NET_ID VM_NAME`` | | | ``net-id=NET2-ID VM_NAME`` | +-------------------------------------+--------------------------------------+ |Boots a VM with a specific IP | | |address. Note that you cannot | | |use the ``--max`` or ``--min`` | | |parameters in this case. | | | | | | | ``$ openstack server create --image``| | | ``IMAGE --flavor FLAVOR --nic`` | | | ``net-id=NET_ID VM_NAME`` | | | ``v4-fixed-ip=IP-ADDR VM_NAME`` | +-------------------------------------+--------------------------------------+ |Boots a VM that connects to all | | |networks that are accessible to the | | |project who submits the request | | |(without the ``--nic`` option). | | | | | | | ``$ openstack server create --image``| | | ``IMAGE --flavor FLAVOR`` | +-------------------------------------+--------------------------------------+ **Advanced VM creation operations** .. note:: Cloud images that distribution vendors offer usually have only one active NIC configured. When you boot with multiple NICs, you must configure additional interfaces on the image or the NICs are not reachable. The following Debian/Ubuntu-based example shows how to set up the interfaces within the instance in the ``/etc/network/interfaces`` file. You must apply this configuration to the image. .. code-block:: bash # The loopback network interface auto lo iface lo inet loopback auto eth0 iface eth0 inet dhcp auto eth1 iface eth1 inet dhcp Enable ping and SSH on VMs (security groups) -------------------------------------------- You must configure security group rules depending on the type of plug-in you are using. If you are using a plug-in that: - Implements Networking security groups, you can configure security group rules directly by using the :command:`openstack security group rule create` command. This example enables ``ping`` and ``ssh`` access to your VMs. .. code-block:: console $ openstack security group rule create --protocol icmp \ --ingress SECURITY_GROUP .. code-block:: console $ openstack security group rule create --protocol tcp \ --egress --description "Sample Security Group" SECURITY_GROUP - Does not implement Networking security groups, you can configure security group rules by using the :command:`openstack security group rule create` or :command:`euca-authorize` command. These :command:`openstack` commands enable ``ping`` and ``ssh`` access to your VMs. .. code-block:: console $ openstack security group rule create --protocol icmp default $ openstack security group rule create --protocol tcp --dst-port 22:22 default .. note:: If your plug-in implements Networking security groups, you can also leverage Compute security groups by setting ``use_neutron = True`` in the ``nova.conf`` file. After you set this option, all Compute security group commands are proxied to Networking. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-address-scopes.rst0000644000175000017500000007124600000000000025226 0ustar00coreycorey00000000000000.. _config-address-scopes: ============== Address scopes ============== Address scopes build from subnet pools. While subnet pools provide a mechanism for controlling the allocation of addresses to subnets, address scopes show where addresses can be routed between networks, preventing the use of overlapping addresses in any two subnets. Because all addresses allocated in the address scope do not overlap, neutron routers do not NAT between your projects' network and your external network. As long as the addresses within an address scope match, the Networking service performs simple routing between networks. Accessing address scopes ~~~~~~~~~~~~~~~~~~~~~~~~ Anyone with access to the Networking service can create their own address scopes. However, network administrators can create shared address scopes, allowing other projects to create networks within that address scope. Access to addresses in a scope are managed through subnet pools. Subnet pools can either be created in an address scope, or updated to belong to an address scope. With subnet pools, all addresses in use within the address scope are unique from the point of view of the address scope owner. Therefore, add more than one subnet pool to an address scope if the pools have different owners, allowing for delegation of parts of the address scope. Delegation prevents address overlap across the whole scope. Otherwise, you receive an error if two pools have the same address ranges. Each router interface is associated with an address scope by looking at subnets connected to the network. When a router connects to an external network with matching address scopes, network traffic routes between without Network address translation (NAT). The router marks all traffic connections originating from each interface with its corresponding address scope. If traffic leaves an interface in the wrong scope, the router blocks the traffic. Backwards compatibility ~~~~~~~~~~~~~~~~~~~~~~~ Networks created before the Mitaka release do not contain explicitly named address scopes, unless the network contains subnets from a subnet pool that belongs to a created or updated address scope. The Networking service preserves backwards compatibility with pre-Mitaka networks through special address scope properties so that these networks can perform advanced routing: #. Unlimited address overlap is allowed. #. Neutron routers, by default, will NAT traffic from internal networks to external networks. #. Pre-Mitaka address scopes are not visible through the API. You cannot list address scopes or show details. Scopes exist implicitly as a catch-all for addresses that are not explicitly scoped. Create shared address scopes as an administrative user ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section shows how to set up shared address scopes to allow simple routing for project networks with the same subnet pools. .. note:: Irrelevant fields have been trimmed from the output of these commands for brevity. #. Create IPv6 and IPv4 address scopes: .. code-block:: console $ openstack address scope create --share --ip-version 6 address-scope-ip6 +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | headers | | | id | 28424dfc-9abd-481b-afa3-1da97a8fead7 | | ip_version | 6 | | name | address-scope-ip6 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | shared | True | +------------+--------------------------------------+ .. code-block:: console $ openstack address scope create --share --ip-version 4 address-scope-ip4 +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | headers | | | id | 3193bd62-11b5-44dc-acf8-53180f21e9f2 | | ip_version | 4 | | name | address-scope-ip4 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | shared | True | +------------+--------------------------------------+ #. Create subnet pools specifying the name (or UUID) of the address scope that the subnet pool belongs to. If you have existing subnet pools, use the :command:`openstack subnet pool set` command to put them in a new address scope: .. code-block:: console $ openstack subnet pool create --address-scope address-scope-ip6 \ --share --pool-prefix 2001:db8:a583::/48 --default-prefix-length 64 \ subnet-pool-ip6 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | 28424dfc-9abd-481b-afa3-1da97a8fead7 | | created_at | 2016-12-13T22:53:30Z | | default_prefixlen | 64 | | default_quota | None | | description | | | id | a59ff52b-0367-41ff-9781-6318b927dd0e | | ip_version | 6 | | is_default | False | | max_prefixlen | 128 | | min_prefixlen | 64 | | name | subnet-pool-ip6 | | prefixes | 2001:db8:a583::/48 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2016-12-13T22:53:30Z | +-------------------+--------------------------------------+ .. code-block:: console $ openstack subnet pool create --address-scope address-scope-ip4 \ --share --pool-prefix 203.0.113.0/24 --default-prefix-length 26 \ subnet-pool-ip4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | 3193bd62-11b5-44dc-acf8-53180f21e9f2 | | created_at | 2016-12-13T22:55:09Z | | default_prefixlen | 26 | | default_quota | None | | description | | | id | d02af70b-d622-426f-8e60-ed9df2a8301f | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | subnet-pool-ip4 | | prefixes | 203.0.113.0/24 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2016-12-13T22:55:09Z | +-------------------+--------------------------------------+ #. Make sure that subnets on an external network are created from the subnet pools created above: .. code-block:: console $ openstack subnet show ipv6-public-subnet +-------------------+------------------------------------------+ | Field | Value | +-------------------+------------------------------------------+ | allocation_pools | 2001:db8:a583::2-2001:db8:a583:0:ffff:ff | | | ff:ffff:ffff | | cidr | 2001:db8:a583::/64 | | created_at | 2016-12-10T21:36:04Z | | description | | | dns_nameservers | | | enable_dhcp | False | | gateway_ip | 2001:db8:a583::1 | | host_routes | | | id | b333bf5a-758c-4b3f-97ec-5f12d9bfceb7 | | ip_version | 6 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | ipv6-public-subnet | | network_id | 05a8d31e-330b-4d96-a3fa-884b04abfa4c | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | segment_id | None | | service_types | | | subnetpool_id | a59ff52b-0367-41ff-9781-6318b927dd0e | | tags | [] | | updated_at | 2016-12-10T21:36:04Z | +-------------------+------------------------------------------+ .. code-block:: console $ openstack subnet show public-subnet +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.2-203.0.113.62 | | cidr | 203.0.113.0/26 | | created_at | 2016-12-10T21:35:52Z | | description | | | dns_nameservers | | | enable_dhcp | False | | gateway_ip | 203.0.113.1 | | host_routes | | | id | 7fd48240-3acc-4724-bc82-16c62857edec | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | public-subnet | | network_id | 05a8d31e-330b-4d96-a3fa-884b04abfa4c | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | segment_id | None | | service_types | | | subnetpool_id | d02af70b-d622-426f-8e60-ed9df2a8301f | | tags | [] | | updated_at | 2016-12-10T21:35:52Z | +-------------------+--------------------------------------+ Routing with address scopes for non-privileged users ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section shows how non-privileged users can use address scopes to route straight to an external network without NAT. #. Create a couple of networks to host subnets: .. code-block:: console $ openstack network create network1 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-13T23:21:01Z | | description | | | headers | | | id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | network1 | | port_security_enabled | True | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 94 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-13T23:21:01Z | +---------------------------+--------------------------------------+ .. code-block:: console $ openstack network create network2 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-13T23:21:45Z | | description | | | headers | | | id | 6c583603-c097-4141-9c5c-288b0e49c59f | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | network2 | | port_security_enabled | True | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 81 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-13T23:21:45Z | +---------------------------+--------------------------------------+ #. Create a subnet not associated with a subnet pool or an address scope: .. code-block:: console $ openstack subnet create --network network1 --subnet-range \ 198.51.100.0/26 subnet-ip4-1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 198.51.100.2-198.51.100.62 | | cidr | 198.51.100.0/26 | | created_at | 2016-12-13T23:24:16Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 198.51.100.1 | | headers | | | host_routes | | | id | 66874039-d31b-4a27-85d7-14c89341bbb7 | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | subnet-ip4-1 | | network_id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | service_types | | | subnetpool_id | None | | tags | [] | | updated_at | 2016-12-13T23:24:16Z | +-------------------+--------------------------------------+ .. code-block:: console $ openstack subnet create --network network1 --ipv6-ra-mode slaac \ --ipv6-address-mode slaac --ip-version 6 --subnet-range \ 2001:db8:80d2:c4d3::/64 subnet-ip6-1 +-------------------+-----------------------------------------+ | Field | Value | +-------------------+-----------------------------------------+ | allocation_pools | 2001:db8:80d2:c4d3::2-2001:db8:80d2:c4d | | | 3:ffff:ffff:ffff:ffff | | cidr | 2001:db8:80d2:c4d3::/64 | | created_at | 2016-12-13T23:28:28Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 2001:db8:80d2:c4d3::1 | | headers | | | host_routes | | | id | a7551b23-2271-4a88-9c41-c84b048e0722 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | subnet-ip6-1 | | network_id | 1bcf3fe9-a0cb-4d88-a067-a4d7f8e635f0 | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | service_types | | | subnetpool_id | None | | tags | [] | | updated_at | 2016-12-13T23:28:28Z | +-------------------+-----------------------------------------+ #. Create a subnet using a subnet pool associated with an address scope from an external network: .. code-block:: console $ openstack subnet create --subnet-pool subnet-pool-ip4 \ --network network2 subnet-ip4-2 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.2-203.0.113.62 | | cidr | 203.0.113.0/26 | | created_at | 2016-12-13T23:32:12Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 203.0.113.1 | | headers | | | host_routes | | | id | 12be8e8f-5871-4091-9e9e-4e0651b9677e | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | subnet-ip4-2 | | network_id | 6c583603-c097-4141-9c5c-288b0e49c59f | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | service_types | | | subnetpool_id | d02af70b-d622-426f-8e60-ed9df2a8301f | | tags | [] | | updated_at | 2016-12-13T23:32:12Z | +-------------------+--------------------------------------+ .. code-block:: console $ openstack subnet create --ip-version 6 --ipv6-ra-mode slaac \ --ipv6-address-mode slaac --subnet-pool subnet-pool-ip6 \ --network network2 subnet-ip6-2 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 2001:db8:a583::2-2001:db8:a583:0:fff | | | f:ffff:ffff:ffff | | cidr | 2001:db8:a583::/64 | | created_at | 2016-12-13T23:31:17Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 2001:db8:a583::1 | | headers | | | host_routes | | | id | b599c2be-e3cd-449c-ba39-3cfcc744c4be | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | subnet-ip6-2 | | network_id | 6c583603-c097-4141-9c5c-288b0e49c59f | | project_id | 098429d072d34d3596c88b7dbf7e91b6 | | revision_number | 2 | | service_types | | | subnetpool_id | a59ff52b-0367-41ff-9781-6318b927dd0e | | tags | [] | | updated_at | 2016-12-13T23:31:17Z | +-------------------+--------------------------------------+ By creating subnets from scoped subnet pools, the network is associated with the address scope. .. code-block:: console $ openstack network show network2 +---------------------------+------------------------------+ | Field | Value | +---------------------------+------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-12-13T23:21:45Z | | description | | | id | 6c583603-c097-4141-9c5c- | | | 288b0e49c59f | | ipv4_address_scope | 3193bd62-11b5-44dc- | | | acf8-53180f21e9f2 | | ipv6_address_scope | 28424dfc-9abd-481b- | | | afa3-1da97a8fead7 | | mtu | 1450 | | name | network2 | | port_security_enabled | True | | project_id | 098429d072d34d3596c88b7dbf7e | | | 91b6 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 81 | | revision_number | 10 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | 12be8e8f-5871-4091-9e9e- | | | 4e0651b9677e, b599c2be-e3cd- | | | 449c-ba39-3cfcc744c4be | | tags | [] | | updated_at | 2016-12-13T23:32:12Z | +---------------------------+------------------------------+ #. Connect a router to each of the project subnets that have been created, for example, using a router called ``router1``: .. code-block:: console $ openstack router add subnet router1 subnet-ip4-1 $ openstack router add subnet router1 subnet-ip4-2 $ openstack router add subnet router1 subnet-ip6-1 $ openstack router add subnet router1 subnet-ip6-2 Checking connectivity --------------------- This example shows how to check the connectivity between networks with address scopes. #. Launch two instances, ``instance1`` on ``network1`` and ``instance2`` on ``network2``. Associate a floating IP address to both instances. #. Adjust security groups to allow pings and SSH (both IPv4 and IPv6): .. code-block:: console $ openstack server list +--------------+-----------+---------------------------------------------------------------------------+--------+----------+ | ID | Name | Networks | Image | Flavor | +--------------+-----------+---------------------------------------------------------------------------+--------+----------+ | 97e49c8e-... | instance1 | network1=2001:db8:80d2:c4d3:f816:3eff:fe52:b69f, 198.51.100.3, 203.0.113.3| cirros | m1.tiny | | ceba9638-... | instance2 | network2=203.0.113.3, 2001:db8:a583:0:f816:3eff:fe42:1eeb, 203.0.113.4 | centos | m1.small | +--------------+-----------+---------------------------------------------------------------------------+--------+----------+ Regardless of address scopes, the floating IPs can be pinged from the external network: .. code-block:: console $ ping -c 1 203.0.113.3 1 packets transmitted, 1 received, 0% packet loss, time 0ms $ ping -c 1 203.0.113.4 1 packets transmitted, 1 received, 0% packet loss, time 0ms You can now ping ``instance2`` directly because ``instance2`` shares the same address scope as the external network: .. note:: BGP routing can be used to automatically set up a static route for your instances. .. code-block:: console # ip route add 203.0.113.0/26 via 203.0.113.2 $ ping -c 1 203.0.113.3 1 packets transmitted, 1 received, 0% packet loss, time 0ms .. code-block:: console # ip route add 2001:db8:a583::/64 via 2001:db8::1 $ ping6 -c 1 2001:db8:a583:0:f816:3eff:fe42:1eeb 1 packets transmitted, 1 received, 0% packet loss, time 0ms You cannot ping ``instance1`` directly because the address scopes do not match: .. code-block:: console # ip route add 198.51.100.0/26 via 203.0.113.2 $ ping -c 1 198.51.100.3 1 packets transmitted, 0 received, 100% packet loss, time 0ms .. code-block:: console # ip route add 2001:db8:80d2:c4d3::/64 via 2001:db8::1 $ ping6 -c 1 2001:db8:80d2:c4d3:f816:3eff:fe52:b69f 1 packets transmitted, 0 received, 100% packet loss, time 0ms If the address scopes match between networks then pings and other traffic route directly through. If the scopes do not match between networks, the router either drops the traffic or applies NAT to cross scope boundaries. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-auto-allocation.rst0000644000175000017500000002445700000000000025404 0ustar00coreycorey00000000000000.. _config-auto-allocation: ========================================== Automatic allocation of network topologies ========================================== The auto-allocation feature introduced in Mitaka simplifies the procedure of setting up an external connectivity for end-users, and is also known as **Get Me A Network**. Previously, a user had to configure a range of networking resources to boot a server and get access to the Internet. For example, the following steps are required: * Create a network * Create a subnet * Create a router * Uplink the router on an external network * Downlink the router on the previously created subnet These steps need to be performed on each logical segment that a VM needs to be connected to, and may require networking knowledge the user might not have. This feature is designed to automate the basic networking provisioning for projects. The steps to provision a basic network are run during instance boot, making the networking setup hands-free. To make this possible, provide a default external network and default subnetpools (one for IPv4, or one for IPv6, or one of each) so that the Networking service can choose what to do in lieu of input. Once these are in place, users can boot their VMs without specifying any networking details. The Compute service will then use this feature automatically to wire user VMs. Enabling the deployment for auto-allocation ------------------------------------------- To use this feature, the neutron service must have the following extensions enabled: * ``auto-allocated-topology`` * ``subnet_allocation`` * ``external-net`` * ``router`` Before the end-user can use the auto-allocation feature, the operator must create the resources that will be used for the auto-allocated network topology creation. To perform this task, proceed with the following steps: #. Set up a default external network Setting up an external network is described in `OpenStack Networking Guide <./archives/adv-features.html>`_. Assuming the external network to be used for the auto-allocation feature is named ``public``, make it the ``default`` external network with the following command: .. code-block:: console $ openstack network set public --default .. note:: The flag ``--default`` (and ``--no-default`` flag) is only effective with external networks and has no effects on regular (or internal) networks. #. Create default subnetpools The auto-allocation feature requires at least one default subnetpool. One for IPv4, or one for IPv6, or one of each. .. code-block:: console $ openstack subnet pool create --share --default \ --pool-prefix 192.0.2.0/24 --default-prefix-length 26 \ shared-default +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | None | | created_at | 2017-01-12T15:10:34Z | | default_prefixlen | 26 | | default_quota | None | | description | | | headers | | | id | b41b7b9c-de57-4c19-b1c5-731985bceb7f | | ip_version | 4 | | is_default | True | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | shared-default | | prefixes | 192.0.2.0/24 | | project_id | 86acdbd1d72745fd8e8320edd7543400 | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2017-01-12T15:10:34Z | +-------------------+--------------------------------------+ $ openstack subnet pool create --share --default \ --pool-prefix 2001:db8:8000::/48 --default-prefix-length 64 \ default-v6 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | None | | created_at | 2017-01-12T15:14:35Z | | default_prefixlen | 64 | | default_quota | None | | description | | | headers | | | id | 6f387016-17f0-4564-96ad-e34775b6ea14 | | ip_version | 6 | | is_default | True | | max_prefixlen | 128 | | min_prefixlen | 64 | | name | default-v6 | | prefixes | 2001:db8:8000::/48 | | project_id | 86acdbd1d72745fd8e8320edd7543400 | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2017-01-12T15:14:35Z | +-------------------+--------------------------------------+ Get Me A Network ---------------- In a deployment where the operator has set up the resources as described above, they can get their auto-allocated network topology as follows: .. code-block:: console $ openstack network auto allocated topology create --or-show +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | id | a380c780-d6cd-4510-a4c0-1a6ec9b85a29 | | name | None | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | +------------+--------------------------------------+ .. note:: When the ``--or-show`` option is used the command returns the topology information if it already exists. Operators (and users with admin role) can get the auto-allocated topology for a project by specifying the project ID: .. code-block:: console $ openstack network auto allocated topology create --project \ cfd1889ac7d64ad891d4f20aef9f8d7c --or-show +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | id | a380c780-d6cd-4510-a4c0-1a6ec9b85a29 | | name | None | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | +------------+--------------------------------------+ The ID returned by this command is a network which can be used for booting a VM. .. code-block:: console $ openstack server create --flavor m1.small --image \ cirros-0.3.5-x86_64-uec --nic \ net-id=8b835bfb-cae2-4acc-b53f-c16bb5f9a7d0 vm1 The auto-allocated topology for a user never changes. In practice, when a user boots a server omitting the ``--nic`` option, and there is more than one network available, the Compute service will invoke the API behind ``auto allocated topology create``, fetch the network UUID, and pass it on during the boot process. Validating the requirements for auto-allocation ----------------------------------------------- To validate that the required resources are correctly set up for auto-allocation, without actually provisioning anything, use the ``--check-resources`` option: .. code-block:: console $ openstack network auto allocated topology create --check-resources Deployment error: No default router:external network. $ openstack network set public --default $ openstack network auto allocated topology create --check-resources Deployment error: No default subnetpools defined. $ openstack subnet pool set shared-default --default $ openstack network auto allocated topology create --check-resources +---------+-------+ | Field | Value | +---------+-------+ | dry-run | pass | +---------+-------+ The validation option behaves identically for all users. However, it is considered primarily an admin or service utility since it is the operator who must set up the requirements. Project resources created by auto-allocation -------------------------------------------- The auto-allocation feature creates one network topology in every project where it is used. The auto-allocated network topology for a project contains the following resources: +--------------------+------------------------------+ |Resource |Name | +====================+==============================+ |network |``auto_allocated_network`` | +--------------------+------------------------------+ |subnet (IPv4) |``auto_allocated_subnet_v4`` | +--------------------+------------------------------+ |subnet (IPv6) |``auto_allocated_subnet_v6`` | +--------------------+------------------------------+ |router |``auto_allocated_router`` | +--------------------+------------------------------+ Compatibility notes ------------------- Nova uses the ``auto allocated topology`` feature with API micro version 2.37 or later. This is because, unlike the neutron feature which was implemented in the Mitaka release, the integration for nova was completed during the Newton release cycle. Note that the CLI option ``--nic`` can be omitted regardless of the microversion used as long as there is no more than one network available to the project, in which case nova fails with a 400 error because it does not know which network to use. Furthermore, nova does not start using the feature, regardless of whether or not a user requests micro version 2.37 or later, unless all of the ``nova-compute`` services are running Newton-level code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-az.rst0000644000175000017500000004261400000000000022716 0ustar00coreycorey00000000000000.. _config-az: ================== Availability zones ================== An availability zone groups network nodes that run services like DHCP, L3, FW, and others. It is defined as an agent's attribute on the network node. This allows users to associate an availability zone with their resources so that the resources get high availability. Use case -------- An availability zone is used to make network resources highly available. The operators group the nodes that are attached to different power sources under separate availability zones and configure scheduling for resources with high availability so that they are scheduled on different availability zones. Required extensions ------------------- The core plug-in must support the ``availability_zone`` extension. The core plug-in also must support the ``network_availability_zone`` extension to schedule a network according to availability zones. The ``Ml2Plugin`` supports it. The router service plug-in must support the ``router_availability_zone`` extension to schedule a router according to the availability zones. The ``L3RouterPlugin`` supports it. .. code-block:: console $ openstack extension list --network -c Alias -c Name +---------------------------+---------------------------+ | Name | Alias | +---------------------------+---------------------------+ ... | Network Availability Zone | network_availability_zone | ... | Availability Zone | availability_zone | ... | Router Availability Zone | router_availability_zone | ... +---------------------------+---------------------------+ Availability zone of agents ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``availability_zone`` attribute can be defined in ``dhcp-agent`` and ``l3-agent``. To define an availability zone for each agent, set the value into ``[AGENT]`` section of ``/etc/neutron/dhcp_agent.ini`` or ``/etc/neutron/l3_agent.ini``: .. code-block:: ini [AGENT] availability_zone = zone-1 To confirm the agent's availability zone: .. code-block:: console $ openstack network agent show 116cc128-4398-49af-a4ed-3e95494cd5fc +---------------------+---------------------------------------------------+ | Field | Value | +---------------------+---------------------------------------------------+ | admin_state_up | UP | | agent_type | DHCP agent | | alive | True | | availability_zone | zone-1 | | binary | neutron-dhcp-agent | | configurations | dhcp_driver='neutron.agent.linux.dhcp.Dnsmasq', | | | dhcp_lease_duration='86400', | | | log_agent_heartbeats='False', networks='2', | | | notifies_port_ready='True', ports='6', subnets='4 | | created_at | 2016-12-14 00:25:54 | | description | None | | heartbeat_timestamp | 2016-12-14 06:20:24 | | host | ankur-desktop | | id | 116cc128-4398-49af-a4ed-3e95494cd5fc | | started_at | 2016-12-14 00:25:54 | | topic | dhcp_agent | +---------------------+---------------------------------------------------+ $ openstack network agent show 9632309a-2aa4-4304-8603-c4de02c4a55f +---------------------+-------------------------------------------------+ | Field | Value | +---------------------+-------------------------------------------------+ | admin_state_up | UP | | agent_type | L3 agent | | alive | True | | availability_zone | zone-1 | | binary | neutron-l3-agent | | configurations | agent_mode='legacy', ex_gw_ports='2', | | | floating_ips='0', | | | handle_internal_only_routers='True', | | | interface_driver='openvswitch', interfaces='4', | | | log_agent_heartbeats='False', routers='2' | | created_at | 2016-12-14 00:25:58 | | description | None | | heartbeat_timestamp | 2016-12-14 06:20:28 | | host | ankur-desktop | | id | 9632309a-2aa4-4304-8603-c4de02c4a55f | | started_at | 2016-12-14 00:25:58 | | topic | l3_agent | +---------------------+-------------------------------------------------+ Availability zone related attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following attributes are added into network and router: .. list-table:: :header-rows: 1 :widths: 25 10 10 10 50 * - Attribute name - Access - Required - Input type - Description * - availability_zone_hints - RW(POST only) - No - list of string - availability zone candidates for the resource * - availability_zones - RO - N/A - list of string - availability zones for the resource Use ``availability_zone_hints`` to specify the zone in which the resource is hosted: .. code-block:: console $ openstack network create --availability-zone-hint zone-1 \ --availability-zone-hint zone-2 net1 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | zone-1 | | | zone-2 | | availability_zones | | | created_at | 2016-12-14T06:23:36Z | | description | | | headers | | | id | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | net1 | | port_security_enabled | True | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 77 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-14T06:23:37Z | +---------------------------+--------------------------------------+ .. code-block:: console $ openstack router create --ha --availability-zone-hint zone-1 \ --availability-zone-hint zone-2 router1 +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | zone-1 | | | zone-2 | | availability_zones | | | created_at | 2016-12-14T06:25:40Z | | description | | | distributed | False | | external_gateway_info | null | | flavor_id | None | | ha | False | | headers | | | id | ced10262-6cfe-47c1-8847-cd64276a868c | | name | router1 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | revision_number | 3 | | routes | | | status | ACTIVE | | tags | [] | | updated_at | 2016-12-14T06:25:40Z | +-------------------------+--------------------------------------+ Availability zone is selected from ``default_availability_zones`` in ``/etc/neutron/neutron.conf`` if a resource is created without ``availability_zone_hints``: .. code-block:: ini default_availability_zones = zone-1,zone-2 To confirm the availability zone defined by the system: .. code-block:: console $ openstack availability zone list +-----------+-------------+ | Zone Name | Zone Status | +-----------+-------------+ | zone-1 | available | | zone-2 | available | | zone-1 | available | | zone-2 | available | +-----------+-------------+ Look at the ``availability_zones`` attribute of each resource to confirm in which zone the resource is hosted: .. code-block:: console $ openstack network show net1 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | zone-1 | | | zone-2 | | availability_zones | zone-1 | | | zone-2 | | created_at | 2016-12-14T06:23:36Z | | description | | | headers | | | id | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | net1 | | port_security_enabled | True | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 77 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-14T06:23:37Z | +---------------------------+--------------------------------------+ .. code-block:: console $ openstack router show router1 +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | zone-1 | | | zone-2 | | availability_zones | zone-1 | | | zone-2 | | created_at | 2016-12-14T06:25:40Z | | description | | | distributed | False | | external_gateway_info | null | | flavor_id | None | | ha | False | | headers | | | id | ced10262-6cfe-47c1-8847-cd64276a868c | | name | router1 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | revision_number | 3 | | routes | | | status | ACTIVE | | tags | [] | | updated_at | 2016-12-14T06:25:40Z | +-------------------------+--------------------------------------+ .. note:: The ``availability_zones`` attribute does not have a value until the resource is scheduled. Once the Networking service schedules the resource to zones according to ``availability_zone_hints``, ``availability_zones`` shows in which zone the resource is hosted practically. The ``availability_zones`` may not match ``availability_zone_hints``. For example, even if you specify a zone with ``availability_zone_hints``, all agents of the zone may be dead before the resource is scheduled. In general, they should match, unless there are failures or there is no capacity left in the zone requested. Availability zone aware scheduler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Network scheduler ----------------- Set ``AZAwareWeightScheduler`` to ``network_scheduler_driver`` in ``/etc/neutron/neutron.conf`` so that the Networking service schedules a network according to the availability zone: .. code-block:: ini network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler dhcp_load_type = networks The Networking service schedules a network to one of the agents within the selected zone as with ``WeightScheduler``. In this case, scheduler refers to ``dhcp_load_type`` as well. Router scheduler ---------------- Set ``AZLeastRoutersScheduler`` to ``router_scheduler_driver`` in file ``/etc/neutron/neutron.conf`` so that the Networking service schedules a router according to the availability zone: .. code-block:: ini router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler The Networking service schedules a router to one of the agents within the selected zone as with ``LeastRouterScheduler``. Achieving high availability with availability zone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although, the Networking service provides high availability for routers and high availability and fault tolerance for networks' DHCP services, availability zones provide an extra layer of protection by segmenting a Networking service deployment in isolated failure domains. By deploying HA nodes across different availability zones, it is guaranteed that network services remain available in face of zone-wide failures that affect the deployment. This section explains how to get high availability with the availability zone for L3 and DHCP. You should naturally set above configuration options for the availability zone. L3 high availability -------------------- Set the following configuration options in file ``/etc/neutron/neutron.conf`` so that you get L3 high availability. .. code-block:: ini l3_ha = True max_l3_agents_per_router = 3 HA routers are created on availability zones you selected when creating the router. DHCP high availability ---------------------- Set the following configuration options in file ``/etc/neutron/neutron.conf`` so that you get DHCP high availability. .. code-block:: ini dhcp_agents_per_network = 2 DHCP services are created on availability zones you selected when creating the network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-bgp-dynamic-routing.rst0000644000175000017500000013252500000000000026164 0ustar00coreycorey00000000000000.. _config-bgp-dynamic-routing: =================== BGP dynamic routing =================== BGP dynamic routing enables advertisement of self-service (private) network prefixes to physical network devices that support BGP such as routers, thus removing the conventional dependency on static routes. The feature relies on :ref:`address scopes ` and requires knowledge of their operation for proper deployment. BGP dynamic routing consists of a service plug-in and an agent. The service plug-in implements the Networking service extension and the agent manages BGP peering sessions. A cloud administrator creates and configures a BGP speaker using the CLI or API and manually schedules it to one or more hosts running the agent. Agents can reside on hosts with or without other Networking service agents. Prefix advertisement depends on the binding of external networks to a BGP speaker and the address scope of external and internal IP address ranges or subnets. .. image:: figures/bgp-dynamic-routing-overview.png :alt: BGP dynamic routing overview .. note:: Although self-service networks generally use private IP address ranges (RFC1918) for IPv4 subnets, BGP dynamic routing can advertise any IPv4 address ranges. Example configuration ~~~~~~~~~~~~~~~~~~~~~ The example configuration involves the following components: * One BGP agent. * One address scope containing IP address range 203.0.113.0/24 for provider networks, and IP address ranges 192.0.2.0/25 and 192.0.2.128/25 for self-service networks. * One provider network using IP address range 203.0.113.0/24. * Three self-service networks. * Self-service networks 1 and 2 use IP address ranges inside of the address scope. * Self-service network 3 uses a unique IP address range 198.51.100.0/24 to demonstrate that the BGP speaker does not advertise prefixes outside of address scopes. * Three routers. Each router connects one self-service network to the provider network. * Router 1 contains IP addresses 203.0.113.11 and 192.0.2.1 * Router 2 contains IP addresses 203.0.113.12 and 192.0.2.129 * Router 3 contains IP addresses 203.0.113.13 and 198.51.100.1 .. note:: The example configuration assumes sufficient knowledge about the Networking service, routing, and BGP. For basic deployment of the Networking service, consult one of the :ref:`deploy`. For more information on BGP, see `RFC 4271 `_. Controller node --------------- * In the ``neutron.conf`` file, enable the conventional layer-3 and BGP dynamic routing service plug-ins: .. code-block:: ini [DEFAULT] service_plugins = neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin,neutron.services.l3_router.l3_router_plugin.L3RouterPlugin Agent nodes ----------- * In the ``bgp_dragent.ini`` file: * Configure the driver. .. code-block:: ini [BGP] bgp_speaker_driver = neutron_dynamic_routing.services.bgp.agent.driver.os_ken.driver.OsKenBgpDriver .. note:: The agent currently only supports the os-ken BGP driver. * Configure the router ID. .. code-block:: ini [BGP] bgp_router_id = ROUTER_ID Replace ``ROUTER_ID`` with a suitable unique 32-bit number, typically an IPv4 address on the host running the agent. For example, 192.0.2.2. Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of each BGP dynamic routing agent. .. code-block:: console $ openstack network agent list --agent-type bgp +--------------------------------------+---------------------------+------------+-------------------+-------+-------+---------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+---------------------------+------------+-------------------+-------+-------+---------------------+ | 37729181-2224-48d8-89ef-16eca8e2f77e | BGP dynamic routing agent | controller | None | :-) | UP | neutron-bgp-dragent | +--------------------------------------+---------------------------+------------+-------------------+-------+-------+---------------------+ Create the address scope and subnet pools ----------------------------------------- #. Create an address scope. The provider (external) and self-service networks must belong to the same address scope for the agent to advertise those self-service network prefixes. .. code-block:: console $ openstack address scope create --share --ip-version 4 bgp +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | headers | | | id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 | | ip_version | 4 | | name | bgp | | project_id | 86acdbd1d72745fd8e8320edd7543400 | | shared | True | +------------+--------------------------------------+ #. Create subnet pools. The provider and self-service networks use different pools. * Create the provider network pool. .. code-block:: console $ openstack subnet pool create --pool-prefix 203.0.113.0/24 \ --address-scope bgp provider +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 | | created_at | 2017-01-12T14:58:57Z | | default_prefixlen | 8 | | default_quota | None | | description | | | headers | | | id | 63532225-b9a0-445a-9935-20a15f9f68d1 | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | provider | | prefixes | 203.0.113.0/24 | | project_id | 86acdbd1d72745fd8e8320edd7543400 | | revision_number | 1 | | shared | False | | tags | [] | | updated_at | 2017-01-12T14:58:57Z | +-------------------+--------------------------------------+ * Create the self-service network pool. .. code-block:: console $ openstack subnet pool create --pool-prefix 192.0.2.0/25 \ --pool-prefix 192.0.2.128/25 --address-scope bgp \ --share selfservice +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | f71c958f-dbe8-49a2-8fb9-19c5f52a37f1 | | created_at | 2017-01-12T15:02:31Z | | default_prefixlen | 8 | | default_quota | None | | description | | | headers | | | id | 8d8270b1-b194-4b7e-914c-9c741dcbd49b | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | selfservice | | prefixes | 192.0.2.0/25, 192.0.2.128/25 | | project_id | 86acdbd1d72745fd8e8320edd7543400 | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2017-01-12T15:02:31Z | +-------------------+--------------------------------------+ Create the provider and self-service networks --------------------------------------------- #. Create the provider network. .. code-block:: console $ openstack network create provider --external --provider-physical-network \ provider --provider-network-type flat Created a new network: +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-21T08:47:41Z | | description | | | headers | | | id | 190ca651-2ee3-4a4b-891f-dedda47974fe | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | mtu | 1450 | | name | provider | | port_security_enabled | True | | project_id | c961a8f6d3654657885226378ade8220 | | provider:network_type | flat | | provider:physical_network | provider | | provider:segmentation_id | 66 | | revision_number | 3 | | router:external | External | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-21T08:47:41Z | +---------------------------+--------------------------------------+ #. Create a subnet on the provider network using an IP address range from the provider subnet pool. .. code-block:: console $ openstack subnet create --subnet-pool provider \ --prefix-length 24 --gateway 203.0.113.1 --network provider \ --allocation-pool start=203.0.113.11,end=203.0.113.254 provider +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.11-203.0.113.254 | | cidr | 203.0.113.0/24 | | created_at | 2016-03-17T23:17:16 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 203.0.113.1 | | host_routes | | | id | 8ed65d41-2b2a-4f3a-9f92-45adb266e01a | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | provider | | network_id | 68ec148c-181f-4656-8334-8f4eb148689d | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | segment_id | None | | service_types | | | subnetpool_id | 3771c0e7-7096-46d3-a3bd-699c58e70259 | | tags | | | updated_at | 2016-03-17T23:17:16 | +-------------------+--------------------------------------+ .. note:: The IP address allocation pool starting at ``.11`` improves clarity of the diagrams. You can safely omit it. #. Create the self-service networks. .. code-block:: console $ openstack network create selfservice1 Created a new network: +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-21T08:49:38Z | | description | | | headers | | | id | 9d842606-ef3d-4160-9ed9-e03fa63aed96 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | selfservice1 | | port_security_enabled | True | | project_id | c961a8f6d3654657885226378ade8220 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 106 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-21T08:49:38Z | +---------------------------+--------------------------------------+ $ openstack network create selfservice2 Created a new network: +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-21T08:50:05Z | | description | | | headers | | | id | f85639e1-d23f-438e-b2b1-f40570d86b1c | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | selfservice2 | | port_security_enabled | True | | project_id | c961a8f6d3654657885226378ade8220 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 21 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-21T08:50:05Z | +---------------------------+--------------------------------------+ $ openstack network create selfservice3 Created a new network: +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-12-21T08:50:35Z | | description | | | headers | | | id | eeccdb82-5cf4-4999-8ab3-e7dc99e7d43b | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | selfservice3 | | port_security_enabled | True | | project_id | c961a8f6d3654657885226378ade8220 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 86 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-12-21T08:50:35Z | +---------------------------+--------------------------------------+ #. Create a subnet on the first two self-service networks using an IP address range from the self-service subnet pool. .. code-block:: console $ openstack subnet create --network selfservice1 --subnet-pool selfservice \ --prefix-length 25 selfservice1 +-------------------+----------------------------------------------------+ | Field | Value | +-------------------+----------------------------------------------------+ | allocation_pools | 192.0.2.2-192.0.2.127 | | cidr | 192.0.2.0/25 | | created_at | 2016-03-17T23:20:20 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 198.51.100.1 | | host_routes | | | id | 8edd3dc2-df40-4d71-816e-a4586d61c809 | | ip_version | 4 | | ipv6_address_mode | | | ipv6_ra_mode | | | name | selfservice1 | | network_id | be79de1e-5f56-11e6-9dfb-233e41cec48c | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | subnetpool_id | c7e9737a-cfd3-45b5-a861-d1cee1135a92 | | tags | [] | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | updated_at | 2016-03-17T23:20:20 | +-------------------+----------------------------------------------------+ $ openstack subnet create --network selfservice2 --subnet-pool selfservice \ --prefix-length 25 selfservice2 +-------------------+------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------+ | allocation_pools | 192.0.2.130-192.0.2.254 | | cidr | 192.0.2.128/25 | | created_at | 2016-03-17T23:20:20 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.0.2.129 | | host_routes | | | id | 8edd3dc2-df40-4d71-816e-a4586d61c809 | | ip_version | 4 | | ipv6_address_mode | | | ipv6_ra_mode | | | name | selfservice2 | | network_id | c1fd9846-5f56-11e6-a8ac-0f998d9cc0a2 | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | subnetpool_id | c7e9737a-cfd3-45b5-a861-d1cee1135a92 | | tags | [] | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | updated_at | 2016-03-17T23:20:20 | +-------------------+------------------------------------------------+ #. Create a subnet on the last self-service network using an IP address range outside of the address scope. .. code-block:: console $ openstack subnet create --network selfservice3 --prefix 198.51.100.0/24 subnet3 +-------------------+----------------------------------------------------+ | Field | Value | +-------------------+----------------------------------------------------+ | allocation_pools | 198.51.100.2-198.51.100.254 | | cidr | 198.51.100.0/24 | | created_at | 2016-03-17T23:20:20 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 198.51.100.1 | | host_routes | | | id | cd9f9156-5f59-11e6-aeec-172ec7ee939a | | ip_version | 4 | | ipv6_address_mode | | | ipv6_ra_mode | | | name | selfservice3 | | network_id | c283dc1c-5f56-11e6-bfb6-efc30e1eb73b | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | subnetpool_id | | | tags | [] | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | updated_at | 2016-03-17T23:20:20 | +-------------------+----------------------------------------------------+ Create and configure the routers -------------------------------- #. Create the routers. .. code-block:: console $ openstack router create router1 +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-10T13:15:19Z | | description | | | distributed | False | | external_gateway_info | null | | flavor_id | None | | ha | False | | headers | | | id | 3f6f4ef8-63be-11e6-bbb3-2fbcef363ab8 | | name | router1 | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | routes | | | status | ACTIVE | | tags | [] | | updated_at | 2017-01-10T13:15:19Z | +-------------------------+--------------------------------------+ $ openstack router create router2 +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-10T13:15:19Z | | description | | | distributed | False | | external_gateway_info | null | | flavor_id | None | | ha | False | | headers | | | id | 3fd21a60-63be-11e6-9c95-5714c208c499 | | name | router2 | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | routes | | | status | ACTIVE | | tags | [] | | updated_at | 2017-01-10T13:15:19Z | +-------------------------+--------------------------------------+ $ openstack router create router3 +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-10T13:15:19Z | | description | | | distributed | False | | external_gateway_info | null | | flavor_id | None | | ha | False | | headers | | | id | 40069a4c-63be-11e6-9ecc-e37c1eaa7e84 | | name | router3 | | project_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | | revision_number | 1 | | routes | | | status | ACTIVE | | tags | [] | | updated_at | 2017-01-10T13:15:19Z | +-------------------------+--------------------------------------+ #. For each router, add one self-service subnet as an interface on the router. .. code-block:: console $ openstack router add subnet router1 selfservice1 $ openstack router add subnet router2 selfservice2 $ openstack router add subnet router3 selfservice3 #. Add the provider network as a gateway on each router. .. code-block:: console $ openstack router set --external-gateway provider router1 $ openstack router set --external-gateway provider router2 $ openstack router set --external-gateway provider router3 Create and configure the BGP speaker ------------------------------------ The BGP speaker advertises the next-hop IP address for eligible self-service networks and floating IP addresses for instances using those networks. #. Create the BGP speaker. .. code-block:: console $ openstack bgp speaker create --ip-version 4 \ --local-as LOCAL_AS bgpspeaker Created a new bgp_speaker: +-----------------------------------+--------------------------------------+ | Field | Value | +-----------------------------------+--------------------------------------+ | advertise_floating_ip_host_routes | True | | advertise_tenant_networks | True | | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | | ip_version | 4 | | local_as | 1234 | | name | bgpspeaker | | networks | | | peers | | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | +-----------------------------------+--------------------------------------+ Replace ``LOCAL_AS`` with an appropriate local autonomous system number. The example configuration uses AS 1234. #. A BGP speaker requires association with a provider network to determine eligible prefixes. The association builds a list of all virtual routers with gateways on provider and self-service networks in the same address scope so the BGP speaker can advertise self-service network prefixes with the corresponding router as the next-hop IP address. Associate the BGP speaker with the provider network. .. code-block:: console $ openstack bgp speaker add network bgpspeaker provider Added network provider to BGP speaker bgpspeaker. #. Verify association of the provider network with the BGP speaker. .. code-block:: console $ openstack bgp speaker show bgpspeaker +-----------------------------------+--------------------------------------+ | Field | Value | +-----------------------------------+--------------------------------------+ | advertise_floating_ip_host_routes | True | | advertise_tenant_networks | True | | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | | ip_version | 4 | | local_as | 1234 | | name | bgpspeaker | | networks | 68ec148c-181f-4656-8334-8f4eb148689d | | peers | | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | +-----------------------------------+--------------------------------------+ #. Verify the prefixes and next-hop IP addresses that the BGP speaker advertises. .. code-block:: console $ openstack bgp speaker list advertised routes bgpspeaker +-----------------+--------------+ | Destination | Nexthop | +-----------------+--------------+ | 192.0.2.0/25 | 203.0.113.11 | | 192.0.2.128/25 | 203.0.113.12 | +-----------------+--------------+ #. Create a BGP peer. .. code-block:: console $ openstack bgp peer create --peer-ip 192.0.2.1 \ --remote-as REMOTE_AS bgppeer Created a new bgp_peer: +-----------+--------------------------------------+ | Field | Value | +-----------+--------------------------------------+ | auth_type | none | | id | 35c89ca0-ac5a-4298-a815-0b073c2362e9 | | name | bgppeer | | peer_ip | 192.0.2.1 | | remote_as | 4321 | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | +-----------+--------------------------------------+ Replace ``REMOTE_AS`` with an appropriate remote autonomous system number. The example configuration uses AS 4321 which triggers EBGP peering. .. note:: The host containing the BGP agent must have layer-3 connectivity to the provider router. #. Add a BGP peer to the BGP speaker. .. code-block:: console $ openstack bgp speaker add peer bgpspeaker bgppeer Added BGP peer bgppeer to BGP speaker bgpspeaker. #. Verify addition of the BGP peer to the BGP speaker. .. code-block:: console $ openstack bgp speaker show bgpspeaker +-----------------------------------+--------------------------------------+ | Field | Value | +-----------------------------------+--------------------------------------+ | advertise_floating_ip_host_routes | True | | advertise_tenant_networks | True | | id | 5f227f14-4f46-4eca-9524-fc5a1eabc358 | | ip_version | 4 | | local_as | 1234 | | name | bgpspeaker | | networks | 68ec148c-181f-4656-8334-8f4eb148689d | | peers | 35c89ca0-ac5a-4298-a815-0b073c2362e9 | | tenant_id | b3ac05ef10bf441fbf4aa17f16ae1e6d | +-----------------------------------+--------------------------------------+ .. note:: After creating a peering session, you cannot change the local or remote autonomous system numbers. Schedule the BGP speaker to an agent ------------------------------------ #. Unlike most agents, BGP speakers require manual scheduling to an agent. BGP speakers only form peering sessions and begin prefix advertisement after scheduling to an agent. Schedule the BGP speaker to agent ``37729181-2224-48d8-89ef-16eca8e2f77e``. .. code-block:: console $ openstack bgp dragent add speaker 37729181-2224-48d8-89ef-16eca8e2f77e bgpspeaker Associated BGP speaker bgpspeaker to the Dynamic Routing agent. #. Verify scheduling of the BGP speaker to the agent. .. code-block:: console $ openstack bgp speaker show dragents bgpspeaker +--------------------------------------+------------+-------+-------+ | ID | Host | State | Alive | +--------------------------------------+------------+-------+-------+ | 37729181-2224-48d8-89ef-16eca8e2f77e | controller | True | :-) | +--------------------------------------+------------+-------+-------+ Prefix advertisement ~~~~~~~~~~~~~~~~~~~~ BGP dynamic routing advertises prefixes for self-service networks and host routes for floating IP addresses. Advertisement of a self-service network requires satisfying the following conditions: * The external and self-service network reside in the same address scope. * The router contains an interface on the self-service subnet and a gateway on the external network. * The BGP speaker associates with the external network that provides a gateway on the router. * The BGP speaker has the ``advertise_tenant_networks`` attribute set to ``True``. .. image:: figures/bgp-dynamic-routing-example1.png :alt: Example of prefix advertisements with self-service networks Advertisement of a floating IP address requires satisfying the following conditions: * The router with the floating IP address binding contains a gateway on an external network with the BGP speaker association. * The BGP speaker has the ``advertise_floating_ip_host_routes`` attribute set to ``True``. .. image:: figures/bgp-dynamic-routing-example2.png :alt: Example of prefix advertisements with floating IP addresses Operation with Distributed Virtual Routers (DVR) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For both floating IP and IPv4 fixed IP addresses, the BGP speaker advertises the floating IP agent gateway on the corresponding compute node as the next-hop IP address. When using IPv6 fixed IP addresses, the BGP speaker advertises the DVR SNAT node as the next-hop IP address. For example, consider the following components: #. A provider network using IP address range 203.0.113.0/24, and supporting floating IP addresses 203.0.113.101, 203.0.113.102, and 203.0.113.103. #. A self-service network using IP address range 198.51.100.0/24. #. Instances with fixed IP's 198.51.100.11, 198.51.100.12, and 198.51.100.13 #. The SNAT gateway resides on 203.0.113.11. #. The floating IP agent gateways (one per compute node) reside on 203.0.113.12, 203.0.113.13, and 203.0.113.14. #. Three instances, one per compute node, each with a floating IP address. #. ``advertise_tenant_networks`` is set to ``False`` on the BGP speaker .. code-block:: console $ openstack bgp speaker list advertised routes bgpspeaker +------------------+--------------+ | Destination | Nexthop | +------------------+--------------+ | 198.51.100.0/24 | 203.0.113.11 | | 203.0.113.101/32 | 203.0.113.12 | | 203.0.113.102/32 | 203.0.113.13 | | 203.0.113.103/32 | 203.0.113.14 | +------------------+--------------+ When floating IP's are disassociated and ``advertise_tenant_networks`` is set to ``True``, the following routes will be advertised: .. code-block:: console $ openstack bgp speaker list advertised routes bgpspeaker +------------------+--------------+ | Destination | Nexthop | +------------------+--------------+ | 198.51.100.0/24 | 203.0.113.11 | | 198.51.100.11/32 | 203.0.113.12 | | 198.51.100.12/32 | 203.0.113.13 | | 198.51.100.13/32 | 203.0.113.14 | +------------------+--------------+ You can also identify floating IP agent gateways in your environment to assist with verifying operation of the BGP speaker. .. code-block:: console $ openstack port list --device-owner network:floatingip_agent_gateway +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+ | ID | Name | MAC Address | Fixed IP Addresses | +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+ | 87cf2970-4970-462e-939e-00e808295dfa | | fa:16:3e:7c:68:e3 | ip_address='203.0.113.12', subnet_id='8ed65d41-2b2a-4f3a-9f92-45adb266e01a' | | 8d218440-0d2e-49d0-8a7b-3266a6146dc1 | | fa:16:3e:9d:78:cf | ip_address='203.0.113.13', subnet_id='8ed65d41-2b2a-4f3a-9f92-45adb266e01a' | | 87cf2970-4970-462e-939e-00e802281dfa | | fa:16:3e:6b:18:e0 | ip_address='203.0.113.14', subnet_id='8ed65d41-2b2a-4f3a-9f92-45adb266e01a' | +--------------------------------------+------+-------------------+--------------------------------------------------------------------------------------------------------+ IPv6 ~~~~ BGP dynamic routing supports peering via IPv6 and advertising IPv6 prefixes. * To enable peering via IPv6, create a BGP peer and use an IPv6 address for ``peer_ip``. * To enable advertising IPv6 prefixes, create an address scope with ``ip_version=6`` and a BGP speaker with ``ip_version=6``. .. note:: DVR lacks support for routing directly to a fixed IPv6 address via the floating IP agent gateway port and thus prevents the BGP speaker from advertising /128 host routes. High availability ~~~~~~~~~~~~~~~~~ BGP dynamic routing supports scheduling a BGP speaker to multiple agents which effectively multiplies prefix advertisements to the same peer. If an agent fails, the peer continues to receive advertisements from one or more operational agents. #. Show available dynamic routing agents. .. code-block:: console $ openstack network agent list --agent-type bgp +--------------------------------------+---------------------------+------- --+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+---------------------------+----------+-------------------+-------+-------+---------------------------+ | 37729181-2224-48d8-89ef-16eca8e2f77e | BGP dynamic routing agent | bgp-ha1 | None | :-) | UP | neutron-bgp-dragent | | 1a2d33bb-9321-30a2-76ab-22eff3d2f56a | BGP dynamic routing agent | bgp-ha2 | None | :-) | UP | neutron-bgp-dragent | +--------------------------------------+---------------------------+----------+-------------------+-------+-------+---------------------------+ #. Schedule BGP speaker to multiple agents. .. code-block:: console $ openstack bgp dragent add speaker 37729181-2224-48d8-89ef-16eca8e2f77e bgpspeaker Associated BGP speaker bgpspeaker to the Dynamic Routing agent. $ openstack bgp dragent add speaker 1a2d33bb-9321-30a2-76ab-22eff3d2f56a bgpspeaker Associated BGP speaker bgpspeaker to the Dynamic Routing agent. $ openstack bgp speaker show dragents bgpspeaker +--------------------------------------+---------+-------+-------+ | ID | Host | State | Alive | +--------------------------------------+---------+-------+-------+ | 37729181-2224-48d8-89ef-16eca8e2f77e | bgp-ha1 | True | :-) | | 1a2d33bb-9321-30a2-76ab-22eff3d2f56a | bgp-ha2 | True | :-) | +--------------------------------------+---------+-------+-------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-dhcp-ha.rst0000644000175000017500000006224400000000000023611 0ustar00coreycorey00000000000000.. _config-dhcp-ha: ========================== High-availability for DHCP ========================== This section describes how to use the agent management (alias agent) and scheduler (alias agent_scheduler) extensions for DHCP agents scalability and HA. .. note:: Use the :command:`openstack extension list` command to check if these extensions are enabled. Check ``agent`` and ``agent_scheduler`` are included in the output. .. code-block:: console $ openstack extension list --network -c Name -c Alias +-------------------------------------------------------------+---------------------------+ | Name | Alias | +-------------------------------------------------------------+---------------------------+ | Default Subnetpools | default-subnetpools | | Network IP Availability | network-ip-availability | | Network Availability Zone | network_availability_zone | | Auto Allocated Topology Services | auto-allocated-topology | | Neutron L3 Configurable external gateway mode | ext-gw-mode | | Port Binding | binding | | Neutron Metering | metering | | agent | agent | | Subnet Allocation | subnet_allocation | | L3 Agent Scheduler | l3_agent_scheduler | | Neutron external network | external-net | | Neutron Service Flavors | flavors | | Network MTU | net-mtu | | Availability Zone | availability_zone | | Quota management support | quotas | | HA Router extension | l3-ha | | Provider Network | provider | | Multi Provider Network | multi-provider | | Address scope | address-scope | | Neutron Extra Route | extraroute | | Subnet service types | subnet-service-types | | Resource timestamps | standard-attr-timestamp | | Neutron Service Type Management | service-type | | Router Flavor Extension | l3-flavors | | Neutron Extra DHCP opts | extra_dhcp_opt | | Resource revision numbers | standard-attr-revisions | | Pagination support | pagination | | Sorting support | sorting | | security-group | security-group | | DHCP Agent Scheduler | dhcp_agent_scheduler | | Router Availability Zone | router_availability_zone | | RBAC Policies | rbac-policies | | standard-attr-description | standard-attr-description | | Neutron L3 Router | router | | Allowed Address Pairs | allowed-address-pairs | | project_id field enabled | project-id | | Distributed Virtual Router | dvr | +-------------------------------------------------------------+---------------------------+ Demo setup ~~~~~~~~~~ .. figure:: figures/demo_multiple_dhcp_agents.png There will be three hosts in the setup. .. list-table:: :widths: 25 50 :header-rows: 1 * - Host - Description * - OpenStack controller host - controlnode - Runs the Networking, Identity, and Compute services that are required to deploy VMs. The node must have at least one network interface that is connected to the Management Network. Note that ``nova-network`` should not be running because it is replaced by Neutron. * - HostA - Runs ``nova-compute``, the Neutron L2 agent and DHCP agent * - HostB - Same as HostA Configuration ~~~~~~~~~~~~~ **controlnode: neutron server** #. Neutron configuration file ``/etc/neutron/neutron.conf``: .. code-block:: ini [DEFAULT] core_plugin = linuxbridge rabbit_host = controlnode allow_overlapping_ips = True host = controlnode agent_down_time = 5 dhcp_agents_per_network = 1 .. note:: In the above configuration, we use ``dhcp_agents_per_network = 1`` for this demonstration. In usual deployments, we suggest setting ``dhcp_agents_per_network`` to more than one to match the number of DHCP agents in your deployment. See :ref:`conf-dhcp-agents-per-network`. #. Update the plug-in configuration file ``/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini``: .. code-block:: ini [vlans] tenant_network_type = vlan network_vlan_ranges = physnet1:1000:2999 [database] connection = mysql+pymysql://root:root@127.0.0.1:3306/neutron_linux_bridge retry_interval = 2 [linux_bridge] physical_interface_mappings = physnet1:eth0 **HostA and HostB: L2 agent** #. Neutron configuration file ``/etc/neutron/neutron.conf``: .. code-block:: ini [DEFAULT] rabbit_host = controlnode rabbit_password = openstack # host = HostB on hostb host = HostA #. Update the plug-in configuration file ``/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini``: .. code-block:: ini [vlans] tenant_network_type = vlan network_vlan_ranges = physnet1:1000:2999 [database] connection = mysql://root:root@127.0.0.1:3306/neutron_linux_bridge retry_interval = 2 [linux_bridge] physical_interface_mappings = physnet1:eth0 #. Update the nova configuration file ``/etc/nova/nova.conf``: .. code-block:: ini [DEFAULT] use_neutron=True firewall_driver=nova.virt.firewall.NoopFirewallDriver [neutron] admin_username=neutron admin_password=servicepassword admin_auth_url=http://controlnode:35357/v2.0/ auth_strategy=keystone admin_tenant_name=servicetenant url=http://203.0.113.10:9696/ **HostA and HostB: DHCP agent** - Update the DHCP configuration file ``/etc/neutron/dhcp_agent.ini``: .. code-block:: ini [DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver Prerequisites for demonstration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Admin role is required to use the agent management and scheduler extensions. Ensure you run the following commands under a project with an admin role. To experiment, you need VMs and a neutron network: .. code-block:: console $ openstack server list +--------------------------------------+-----------+--------+----------------+--------+----------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------+--------+----------------+--------+----------+ | c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=192.0.2.3 | cirros | m1.tiny | | 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=192.0.2.4 | ubuntu | m1.small | | c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=192.0.2.5 | centos | m1.small | +--------------------------------------+-----------+--------+----------------+--------+----------+ $ openstack network list +--------------------------------------+------+--------------------------------------+ | ID | Name | Subnets | +--------------------------------------+------+--------------------------------------+ | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | net1 | 8086db87-3a7a-4cad-88c9-7bab9bc69258 | +--------------------------------------+------+--------------------------------------+ Managing agents in neutron deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. List all agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent | | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | DHCP agent | HostA | None | True | UP | neutron-dhcp-agent | | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent | | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ Every agent that supports these extensions will register itself with the neutron server when it starts up. The output shows information for four agents. The ``alive`` field shows ``True`` if the agent reported its state within the period defined by the ``agent_down_time`` option in the ``neutron.conf`` file. Otherwise the ``alive`` is ``False``. #. List DHCP agents that host a specified network: .. code-block:: console $ openstack network agent list --network net1 +--------------------------------------+---------------+----------------+-------+ | ID | Host | Admin State Up | Alive | +--------------------------------------+---------------+----------------+-------+ | 22467163-01ea-4231-ba45-3bd316f425e6 | HostA | UP | True | +--------------------------------------+---------------+----------------+-------+ #. List the networks hosted by a given DHCP agent: This command is to show which networks a given dhcp agent is managing. .. code-block:: console $ openstack network list --agent 22467163-01ea-4231-ba45-3bd316f425e6 +--------------------------------+------------------------+---------------------------------+ | ID | Name | Subnets | +--------------------------------+------------------------+---------------------------------+ | ad88e059-e7fa- | net1 | 8086db87-3a7a-4cad- | | 4cf7-8857-6731a2a3a554 | | 88c9-7bab9bc69258 | +--------------------------------+------------------------+---------------------------------+ #. Show agent details. The :command:`openstack network agent show` command shows details for a specified agent: .. code-block:: console $ openstack network agent show 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b +---------------------+--------------------------------------------------+ | Field | Value | +---------------------+--------------------------------------------------+ | admin_state_up | UP | | agent_type | DHCP agent | | alive | True | | availability_zone | nova | | binary | neutron-dhcp-agent | | configurations | dhcp_driver='neutron.agent.linux.dhcp.Dnsmasq', | | | dhcp_lease_duration='86400', | | | log_agent_heartbeats='False', networks='1', | | | notifies_port_ready='True', ports='3', | | | subnets='1' | | created_at | 2016-12-14 00:25:54 | | description | None | | last_heartbeat_at | 2016-12-14 06:53:24 | | host | HostA | | id | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | | started_at | 2016-12-14 00:25:54 | | topic | dhcp_agent | +---------------------+--------------------------------------------------+ In this output, ``last_heartbeat_at`` is the time on the neutron server. You do not need to synchronize all agents to this time for this extension to run correctly. ``configurations`` describes the static configuration for the agent or run time data. This agent is a DHCP agent and it hosts one network, one subnet, and three ports. Different types of agents show different details. The following output shows information for a Linux bridge agent: .. code-block:: console $ openstack network agent show 22467163-01ea-4231-ba45-3bd316f425e6 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | admin_state_up | UP | | agent_type | Linux bridge agent | | alive | True | | availability_zone | nova | | binary | neutron-linuxbridge-agent | | configurations | { | | | "physnet1": "eth0", | | | "devices": "4" | | | } | | created_at | 2016-12-14 00:26:54 | | description | None | | last_heartbeat_at | 2016-12-14 06:53:24 | | host | HostA | | id | 22467163-01ea-4231-ba45-3bd316f425e6 | | started_at | 2016-12-14T06:48:39.000000 | | topic | N/A | +---------------------+--------------------------------------+ The output shows ``bridge-mapping`` and the number of virtual network devices on this L2 agent. Managing assignment of networks to DHCP agent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A single network can be assigned to more than one DHCP agents and one DHCP agent can host more than one network. You can add a network to a DHCP agent and remove one from it. #. Default scheduling. When you create a network with one port, the network will be scheduled to an active DHCP agent. If many active DHCP agents are running, select one randomly. You can design more sophisticated scheduling algorithms in the same way as nova-schedule later on. .. code-block:: console $ openstack network create net2 $ openstack subnet create --network net2 --subnet-range 198.51.100.0/24 subnet2 $ openstack port create port2 --network net2 $ openstack network agent list --network net2 +--------------------------------------+---------------+----------------+-------+ | ID | Host | Admin State Up | Alive | +--------------------------------------+---------------+----------------+-------+ | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True | +--------------------------------------+---------------+----------------+-------+ It is allocated to DHCP agent on HostA. If you want to validate the behavior through the :command:`dnsmasq` command, you must create a subnet for the network because the DHCP agent starts the dnsmasq service only if there is a DHCP. #. Assign a network to a given DHCP agent. To add another DHCP agent to host the network, run this command: .. code-block:: console $ openstack network agent add network --dhcp \ 55569f4e-6f31-41a6-be9d-526efce1f7fe net2 $ openstack network agent list --network net2 +--------------------------------------+-------+----------------+--------+ | ID | Host | Admin State Up | Alive | +--------------------------------------+-------+----------------+--------+ | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True | | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True | +--------------------------------------+-------+----------------+--------+ Both DHCP agents host the ``net2`` network. #. Remove a network from a specified DHCP agent. This command is the sibling command for the previous one. Remove ``net2`` from the DHCP agent for HostA: .. code-block:: console $ openstack network agent remove network --dhcp \ 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b net2 $ openstack network agent list --network net2 +--------------------------------------+-------+----------------+-------+ | ID | Host | Admin State Up | Alive | +--------------------------------------+-------+----------------+-------+ | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True | +--------------------------------------+-------+----------------+-------+ You can see that only the DHCP agent for HostB is hosting the ``net2`` network. HA of DHCP agents ~~~~~~~~~~~~~~~~~ Boot a VM on ``net2``. Let both DHCP agents host ``net2``. Fail the agents in turn to see if the VM can still get the desired IP. #. Boot a VM on ``net2``: .. code-block:: console $ openstack network list +--------------------------------------+------+--------------------------------------+ | ID | Name | Subnets | +--------------------------------------+------+--------------------------------------+ | ad88e059-e7fa-4cf7-8857-6731a2a3a554 | net1 | 8086db87-3a7a-4cad-88c9-7bab9bc69258 | | 9b96b14f-71b8-4918-90aa-c5d705606b1a | net2 | 6979b71a-0ae8-448c-aa87-65f68eedcaaa | +--------------------------------------+------+--------------------------------------+ $ openstack server create --image tty --flavor 1 myserver4 \ --nic net-id=9b96b14f-71b8-4918-90aa-c5d705606b1a ... $ openstack server list +--------------------------------------+-----------+--------+-------------------+---------+----------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------+--------+-------------------+---------+----------+ | c394fcd0-0baa-43ae-a793-201815c3e8ce | myserver1 | ACTIVE | net1=192.0.2.3 | cirros | m1.tiny | | 2d604e05-9a6c-4ddb-9082-8a1fbdcc797d | myserver2 | ACTIVE | net1=192.0.2.4 | ubuntu | m1.small | | c7c0481c-3db8-4d7a-a948-60ce8211d585 | myserver3 | ACTIVE | net1=192.0.2.5 | centos | m1.small | | f62f4731-5591-46b1-9d74-f0c901de567f | myserver4 | ACTIVE | net2=198.51.100.2 | cirros1 | m1.tiny | +--------------------------------------+-----------+--------+-------------------+---------+----------+ #. Make sure both DHCP agents hosting ``net2``: Use the previous commands to assign the network to agents. .. code-block:: console $ openstack network agent list --network net2 +--------------------------------------+-------+----------------+-------+ | ID | Host | Admin State Up | Alive | +--------------------------------------+-------+----------------+-------+ | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | HostA | UP | True | | 55569f4e-6f31-41a6-be9d-526efce1f7fe | HostB | UP | True | +--------------------------------------+-------+----------------+-------+ To test the HA of DHCP agent: #. Log in to the ``myserver4`` VM, and run ``udhcpc``, ``dhclient`` or other DHCP client. #. Stop the DHCP agent on HostA. Besides stopping the ``neutron-dhcp-agent`` binary, you must stop the ``dnsmasq`` processes. #. Run a DHCP client in VM to see if it can get the wanted IP. #. Stop the DHCP agent on HostB too. #. Run ``udhcpc`` in the VM; it cannot get the wanted IP. #. Start DHCP agent on HostB. The VM gets the wanted IP again. Disabling and removing an agent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An administrator might want to disable an agent if a system hardware or software upgrade is planned. Some agents that support scheduling also support disabling and enabling agents, such as L3 and DHCP agents. After the agent is disabled, the scheduler does not schedule new resources to the agent. After the agent is disabled, you can safely remove the agent. Even after disabling the agent, resources on the agent are kept assigned. Ensure you remove the resources on the agent before you delete the agent. Disable the DHCP agent on HostA before you stop it: .. code-block:: console $ openstack network agent set 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b --disable $ openstack network agent list +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent | | 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b | DHCP agent | HostA | None | True | DOWN | neutron-dhcp-agent | | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent | | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ After you stop the DHCP agent on HostA, you can delete it by the following command: .. code-block:: console $ openstack network agent delete 2444c54d-0d28-460c-ab0f-cd1e6b5d3c7b $ openstack network agent list +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ | 22467163-01ea-4231-ba45-3bd316f425e6 | Linux bridge agent | HostA | None | True | UP | neutron-linuxbridge-agent | | 3066d20c-9f8f-440c-ae7c-a40ffb4256b6 | Linux bridge agent | HostB | nova | True | UP | neutron-linuxbridge-agent | | 55569f4e-6f31-41a6-be9d-526efce1f7fe | DHCP agent | HostB | nova | True | UP | neutron-dhcp-agent | +--------------------------------------+--------------------+-------+-------------------+-------+-------+---------------------------+ After deletion, if you restart the DHCP agent, it appears on the agent list again. .. _conf-dhcp-agents-per-network: Enabling DHCP high availability by default ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can control the default number of DHCP agents assigned to a network by setting the following configuration option in the file ``/etc/neutron/neutron.conf``. .. code-block:: ini dhcp_agents_per_network = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-dns-int-ext-serv.rst0000644000175000017500000025025200000000000025432 0ustar00coreycorey00000000000000.. _config-dns-int-ext-serv: ======================================== DNS integration with an external service ======================================== This page serves as a guide for how to use the DNS integration functionality of the Networking service with an external DNSaaS (DNS-as-a-Service). As a prerequisite this needs the internal DNS functionality offered by the Networking service to be enabled, see :ref:`config-dns-int`. Configuring OpenStack Networking for integration with an external DNS service ----------------------------------------------------------------------------- The first step to configure the integration with an external DNS service is to enable the functionality described in :ref:`config-dns-int-dns-resolution`. Once this is done, the user has to take the following steps and restart ``neutron-server``. #. Edit the ``[default]`` section of ``/etc/neutron/neutron.conf`` and specify the external DNS service driver to be used in parameter ``external_dns_driver``. The valid options are defined in namespace ``neutron.services.external_dns_drivers``. The following example shows how to set up the driver for the OpenStack DNS service: .. code-block:: console external_dns_driver = designate #. If the OpenStack DNS service is the target external DNS, the ``[designate]`` section of ``/etc/neutron/neutron.conf`` must define the following parameters: * ``url``: the OpenStack DNS service public endpoint URL. Note that this must always be the versioned endpoint currently. * ``auth_type``: the authorization plugin to use. Usually this should be ``password``, see https://docs.openstack.org/keystoneauth/latest/authentication-plugins.html for other options. * ``auth_url``: the Identity service authorization endpoint url. This endpoint will be used by the Networking service to authenticate as an user to create and update reverse lookup (PTR) zones. * ``username``: the username to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``password``: the password of the user to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``project_name``: the name of the project to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``project_domain_name``: the name of the domain for the project to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``user_domain_name``: the name of the domain for the user to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``region_name``: the name of the region to be used by the Networking service to create and update reverse lookup (PTR) zones. * ``allow_reverse_dns_lookup``: a boolean value specifying whether to enable or not the creation of reverse lookup (PTR) records. * ``ipv4_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv4 reverse lookup (PTR) zones. * ``ipv6_ptr_zone_prefix_size``: the size in bits of the prefix for the IPv6 reverse lookup (PTR) zones. * ``ptr_zone_email``: the email address to use when creating new reverse lookup (PTR) zones. The default is ``admin@`` where ```` is the domain for the first record being created in that zone. * ``insecure``: whether to disable SSL certificate validation. By default, certificates are validated. * ``cafile``: Path to a valid Certificate Authority (CA) certificate. Optional, the system CAs are used as default. The following is an example: .. code-block:: console [designate] url = http://192.0.2.240:9001/v2 auth_type = password auth_url = http://192.0.2.240:5000 username = neutron password = PASSWORD project_name = service project_domain_name = Default user_domain_name = Default allow_reverse_dns_lookup = True ipv4_ptr_zone_prefix_size = 24 ipv6_ptr_zone_prefix_size = 116 ptr_zone_email = admin@example.org cafile = /etc/ssl/certs/my_ca_cert Once the ``neutron-server`` has been configured and restarted, users will have functionality that covers three use cases, described in the following sections. In each of the use cases described below: * The examples assume the OpenStack DNS service as the external DNS. * A, AAAA and PTR records will be created in the DNS service. * Before executing any of the use cases, the user must create in the DNS service under their project a DNS zone where the A and AAAA records will be created. For the description of the use cases below, it is assumed the zone ``example.org.`` was created previously. * The PTR records will be created in zones owned by the project specified for ``project_name`` above. Use case 1: Floating IPs are published with associated port DNS attributes -------------------------------------------------------------------------- In this use case, the address of a floating IP is published in the external DNS service in conjunction with the ``dns_name`` of its associated port and the ``dns_domain`` of the port's network. The steps to execute in this use case are the following: #. Assign a valid domain name to the network's ``dns_domain`` attribute. This name must end with a period (``.``). #. Boot an instance or alternatively, create a port specifying a valid value to its ``dns_name`` attribute. If the port is going to be used for an instance boot, the value assigned to ``dns_name`` must be equal to the ``hostname`` that the Compute service will assign to the instance. Otherwise, the boot will fail. #. Create a floating IP and associate it to the port. Following is an example of these steps: .. code-block:: console $ openstack network set --dns-domain example.org. 38c5e950-b450-4c30-83d4-ee181c28aad3 $ openstack network show 38c5e950-b450-4c30-83d4-ee181c28aad3 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-05-04T19:27:34Z | | description | | | dns_domain | example.org. | | id | 38c5e950-b450-4c30-83d4-ee181c28aad3 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | private | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | provider:network_type | vlan | | provider:physical_network | None | | provider:segmentation_id | 24 | | qos_policy_id | None | | revision_number | 1 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | 43414c53-62ae-49bc-aa6c-c9dd7705818a | | | 5b9282a1-0be1-4ade-b478-7868ad2a16ff | | tags | | | updated_at | 2016-05-04T19:27:34Z | +---------------------------+--------------------------------------+ $ openstack server create --image cirros --flavor 42 \ --nic net-id=38c5e950-b450-4c30-83d4-ee181c28aad3 my_vm +--------------------------------------+----------------------------------------------------------------+ | Field | Value | +--------------------------------------+----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | oTLQLR3Kezmt | | config_drive | | | created | 2016-02-15T19:27:34Z | | flavor | m1.nano (42) | | hostId | | | id | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 | | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) | | key_name | - | | locked | False | | metadata | {} | | name | my_vm | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | d5660cb1e6934612a01b4fb2fb630725 | | updated | 2016-02-15T19:27:34Z | | user_id | 8bb6e578cba24e7db9d3810633124525 | +--------------------------------------+----------------------------------------------------------------+ $ openstack server list +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 | my_vm | ACTIVE | private=fda4:653e:71b0:0:f816:3eff:fe16:b5f2, 192.0.2.15 | cirros | m1.nano | +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ $ openstack port list --device-id 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+--------+ | da0b1f75-c895-460f-9fc1-4d6ec84cf85f | | fa:16:3e:16:b5:f2 | ip_address='192.0.2.15', subnet_id='5b9282a1-0be1-4ade-b478-7868ad2a16ff' | ACTIVE | | | | | ip_address='fda4:653e:71b0:0:f816:3eff:fe16:b5f2', subnet_id='43414c53-62ae-49bc-aa6c-c9dd7705818a' | | +--------------------------------------+------+-------------------+-------------------------------------------------------------------------------------------------------------+--------+ $ openstack port show da0b1f75-c895-460f-9fc1-4d6ec84cf85f +-----------------------+------------------------------------------------------------------------------------------------------------+ | Field | Value | +-----------------------+------------------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | vultr.guest | | binding_profile | | | binding_vif_details | datapath_type='system', ovs_hybrid_plug='True', port_filter='True' | | binding_vif_type | ovs | | binding_vnic_type | normal | | created_at | 2016-02-15T19:27:34Z | | data_plane_status | None | | description | | | device_id | 43f328bb-b2d1-4cf1-a36f-3b2593397cb1 | | device_owner | compute:None | | dns_assignment | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='192.0.2.15' | | | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='fda4:653e:71b0:0:f816:3eff:fe16:b5f2' | | dns_domain | example.org. | | dns_name | my-vm | | extra_dhcp_opts | | | fixed_ips | ip_address='192.0.2.15', subnet_id='5b9282a1-0be1-4ade-b478-7868ad2a16ff' | | | ip_address='fda4:653e:71b0:0:f816:3eff:fe16:b5f2', subnet_id='43414c53-62ae-49bc-aa6c-c9dd7705818a' | | id | da0b1f75-c895-460f-9fc1-4d6ec84cf85f | | mac_address | fa:16:3e:16:b5:f2 | | name | | | network_id | 38c5e950-b450-4c30-83d4-ee181c28aad3 | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | security_group_ids | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 | | status | ACTIVE | | tags | | | trunk_details | None | | updated_at | 2016-02-15T19:27:34Z | +-----------------------+------------------------------------------------------------------------------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | a5fe696d-203f-4018-b0d8-590221adb513 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | e7c05a5d-83a0-4fe5-8bd5-ab058a3326aa | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1513767794 3532 600 86400 3600 | ACTIVE | NONE | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ $ openstack floating ip create 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a \ --port da0b1f75-c895-460f-9fc1-4d6ec84cf85f +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | created_at | 2016-02-15T20:27:34Z | | description | | | dns_domain | | | dns_name | | | fixed_ip_address | 192.0.2.15 | | floating_ip_address | 198.51.100.4 | | floating_network_id | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a | | id | e78f6eb1-a35f-4a90-941d-87c888d5fcc7 | | name | 198.51.100.4 | | port_id | da0b1f75-c895-460f-9fc1-4d6ec84cf85f | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | router_id | 970ebe83-c4a3-4642-810e-43ab7b0c2b5f | | status | DOWN | | subnet_id | None | | tags | [] | | updated_at | 2016-02-15T20:27:34Z | +---------------------+--------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | a5fe696d-203f-4018-b0d8-590221adb513 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | e7c05a5d-83a0-4fe5-8bd5-ab058a3326aa | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1513768814 3532 600 86400 3600 | ACTIVE | NONE | | 5ff53fd0-3746-48da-b9c9-77ed3004ec67 | my-vm.example.org. | A | 198.51.100.4 | ACTIVE | NONE | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ In this example, notice that the data is published in the DNS service when the floating IP is associated to the port. Following are the PTR records created for this example. Note that for IPv4, the value of ``ipv4_ptr_zone_prefix_size`` is 24. Also, since the zone for the PTR records is created in the ``service`` project, you need to use admin credentials in order to be able to view it. .. code-block:: console $ openstack recordset list --all-projects 100.51.198.in-addr.arpa. +--------------------------------------+----------------------------------+----------------------------+------+---------------------------------------------------------------------+--------+--------+ | id | project_id | name | type | data | status | action | +--------------------------------------+----------------------------------+-----------------------------------+---------------------------------------------------------------------+--------+--------+ | 2dd0b894-25fa-4563-9d32-9f13bd67f329 | 07224d17d76d42499a38f00ba4339710 | 100.51.198.in-addr.arpa. | NS | ns1.devstack.org. | ACTIVE | NONE | | 47b920f1-5eff-4dfa-9616-7cb5b7cb7ca6 | 07224d17d76d42499a38f00ba4339710 | 100.51.198.in-addr.arpa. | SOA | ns1.devstack.org. admin.example.org. 1455564862 3600 600 86400 3600 | ACTIVE | NONE | | fb1edf42-abba-410c-8397-831f45fd0cd7 | 07224d17d76d42499a38f00ba4339710 | 4.100.51.198.in-addr.arpa. | PTR | my-vm.example.org. | ACTIVE | NONE | +--------------------------------------+----------------------------------+----------------------------+------+---------------------------------------------------------------------+--------+--------+ Use case 2: Floating IPs are published in the external DNS service ------------------------------------------------------------------ In this use case, the user assigns ``dns_name`` and ``dns_domain`` attributes to a floating IP when it is created. The floating IP data becomes visible in the external DNS service as soon as it is created. The floating IP can be associated with a port on creation or later on. The following example shows a user booting an instance and then creating a floating IP associated to the port allocated for the instance: .. code-block:: console $ openstack network show 38c5e950-b450-4c30-83d4-ee181c28aad3 +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-05-04T19:27:34Z | | description | | | dns_domain | example.org. | | id | 38c5e950-b450-4c30-83d4-ee181c28aad3 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | private | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | provider:network_type | vlan | | provider:physical_network | None | | provider:segmentation_id | 24 | | qos_policy_id | None | | revision_number | 1 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | 43414c53-62ae-49bc-aa6c-c9dd7705818a, 5b9282a1-0be1-4ade-b478-7868ad2a16ff | | tags | | | updated_at | 2016-05-04T19:27:34Z | +---------------------------+----------------------------------------------------------------------------+ $ openstack server create --image cirros --flavor 42 \ --nic net-id=38c5e950-b450-4c30-83d4-ee181c28aad3 my_vm +--------------------------------------+----------------------------------------------------------------+ | Field | Value | +--------------------------------------+----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | HLXGznYqXM4J | | config_drive | | | created | 2016-02-15T19:42:44Z | | flavor | m1.nano (42) | | hostId | | | id | 71fb4ac8-eed8-4644-8113-0641962bb125 | | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) | | key_name | - | | locked | False | | metadata | {} | | name | my_vm | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | d5660cb1e6934612a01b4fb2fb630725 | | updated | 2016-02-15T19:42:44Z | | user_id | 8bb6e578cba24e7db9d3810633124525 | +--------------------------------------+----------------------------------------------------------------+ $ openstack server list +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ | 71fb4ac8-eed8-4644-8113-0641962bb125 | my_vm | ACTIVE | private=fda4:653e:71b0:0:f816:3eff:fe24:8614, 192.0.2.16 | cirros | m1.nano | +--------------------------------------+-------+--------+----------------------------------------------------------+--------+---------+ $ openstack port list --device-id 71fb4ac8-eed8-4644-8113-0641962bb125 +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ | 1e7033fb-8e9d-458b-89ed-8312cafcfdcb | | fa:16:3e:24:86:14 | ip_address='192.0.2.16', subnet_id='5b9282a1-0be1-4ade-b478-7868ad2a16ff' | ACTIVE | | | | | ip_address='fda4:653e:71b0:0:f816:3eff:fe24:8614', subnet_id='43414c53-62ae-49bc-aa6c-c9dd7705818a' | | +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ $ openstack port show 1e7033fb-8e9d-458b-89ed-8312cafcfdcb +-----------------------+------------------------------------------------------------------------------------------------------------+ | Field | Value | +-----------------------+------------------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | vultr.guest | | binding_profile | | | binding_vif_details | datapath_type='system', ovs_hybrid_plug='True', port_filter='True' | | binding_vif_type | ovs | | binding_vnic_type | normal | | created_at | 2016-02-15T19:42:44Z | | data_plane_status | None | | description | | | device_id | 71fb4ac8-eed8-4644-8113-0641962bb125 | | device_owner | compute:None | | dns_assignment | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='192.0.2.16' | | | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='fda4:653e:71b0:0:f816:3eff:fe24:8614' | | dns_domain | example.org. | | dns_name | my-vm | | extra_dhcp_opts | | | fixed_ips | ip_address='192.0.2.16', subnet_id='5b9282a1-0be1-4ade-b478-7868ad2a16ff' | | | ip_address='fda4:653e:71b0:0:f816:3eff:fe24:8614', subnet_id='43414c53-62ae-49bc-aa6c-c9dd7705818a' | | id | 1e7033fb-8e9d-458b-89ed-8312cafcfdcb | | mac_address | fa:16:3e:24:86:14 | | name | | | network_id | 38c5e950-b450-4c30-83d4-ee181c28aad3 | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | security_group_ids | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 | | status | ACTIVE | | tags | | | trunk_details | None | | updated_at | 2016-02-15T19:42:44Z | +-----------------------+------------------------------------------------------------------------------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | 56ca0b88-e343-4c98-8faa-19746e169baf | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | 10a36008-6ecf-47c3-b321-05652a929b04 | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1455565110 3532 600 86400 3600 | ACTIVE | NONE | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ $ openstack floating ip create --dns-domain example.org. --dns-name my-floatingip 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | created_at | 2019-06-12T15:54:45Z | | description | | | dns_domain | example.org. | | dns_name | my-floatingip | | fixed_ip_address | None | | floating_ip_address | 198.51.100.5 | | floating_network_id | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a | | id | 3ae82f53-3349-4aac-810e-ed2a8f6374b8 | | name | 198.51.100.53 | | port_details | None | | port_id | None | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 0 | | router_id | None | | status | DOWN | | subnet_id | None | | tags | [] | | updated_at | 2019-06-12T15:54:45Z | +---------------------+--------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+----------------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+----------------------------+------+-----------------------------------------------------------------------+--------+--------+ | 56ca0b88-e343-4c98-8faa-19746e169baf | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | 10a36008-6ecf-47c3-b321-05652a929b04 | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1455565110 3532 600 86400 3600 | ACTIVE | NONE | | 8884c56f-3ef5-446e-ae4d-8053cc8bc2b4 | my-floatingip.example.org. | A | 198.51.100.53 | ACTIVE | NONE | +--------------------------------------+----------------------------+------+-----------------------------------------------------------------------+--------+--------+ Note that in this use case: * The ``dns_name`` and ``dns_domain`` attributes of a floating IP must be specified together on creation. They cannot be assigned to the floating IP separately and they cannot be changed after the floating IP has been created. * The ``dns_name`` and ``dns_domain`` of a floating IP have precedence, for purposes of being published in the external DNS service, over the ``dns_name`` of its associated port and the ``dns_domain`` of the port's network, whether they are specified or not. Only the ``dns_name`` and the ``dns_domain`` of the floating IP are published in the external DNS service. Following are the PTR records created for this example. Note that for IPv4, the value of ``ipv4_ptr_zone_prefix_size`` is 24. Also, since the zone for the PTR records is created in the project specified in the ``[designate]`` section in the config above, usually the ``service`` project, you need to use admin credentials in order to be able to view it. .. code-block:: console $ openstack recordset list --all-projects 100.51.198.in-addr.arpa. +--------------------------------------+----------------------------------+----------------------------+------+---------------------------------------------------------------------+--------+--------+ | id | project_id | name | type | data | status | action | +--------------------------------------+----------------------------------+-----------------------------------+---------------------------------------------------------------------+--------+--------+ | 2dd0b894-25fa-4563-9d32-9f13bd67f329 | 07224d17d76d42499a38f00ba4339710 | 100.51.198.in-addr.arpa. | NS | ns1.devstack.org. | ACTIVE | NONE | | 47b920f1-5eff-4dfa-9616-7cb5b7cb7ca6 | 07224d17d76d42499a38f00ba4339710 | 100.51.198.in-addr.arpa. | SOA | ns1.devstack.org. admin.example.org. 1455564862 3600 600 86400 3600 | ACTIVE | NONE | | 589a0171-e77a-4ab6-ba6e-23114f2b9366 | 07224d17d76d42499a38f00ba4339710 | 5.100.51.198.in-addr.arpa. | PTR | my-floatingip.example.org. | ACTIVE | NONE | +--------------------------------------+----------------------------------+----------------------------+------+---------------------------------------------------------------------+--------+--------+ .. _config-dns-use-case-3: Use case 3: Ports are published directly in the external DNS service -------------------------------------------------------------------- In this case, the user is creating ports or booting instances on a network that is accessible externally. There are multiple possible scenarios here depending on which of the DNS extensions is enabled in the Neutron configuration. These extensions are described in the following in descending order of priority. Use case 3a: The ``subnet_dns_publish_fixed_ip`` extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When the ``subnet_dns_publish_fixed_ip`` extension is enabled, it is possible to make a selection per subnet whether DNS records should be published for fixed IPs that are assigned to ports from that subnet. This happens via the ``dns_publish_fixed_ips`` attribute that this extension adds to the definition of the subnet resource. It is a boolean flag with a default value of ``False`` but it can be set to ``True`` when creating or updating subnets. When the flag is ``True``, all fixed IPs from this subnet are published in the external DNS service, while at the same time IPs from other subnets having the flag set to ``False`` are not published, even if they otherwise would meet the criteria from the other use cases below. A typical scenario for this use case is a dual stack deployment, where a tenant network would be configured with both an IPv4 and an IPv6 subnet. The IPv4 subnet will usually be using some RFC1918 address space and being NATted towards the outside on the attached router, therefore the fixed IPs from this subnet will not be globally routed and they also should not be published in the DNS service. (One can still bind floating IPs to these fixed IPs and DNS records for those floating IPs can still be published as described above in use cases 1 and 2). But for the IPv6 subnet, no NAT will happen, instead the subnet will be configured with some globally routable prefix and thus the user will want to publish DNS records for fixed IPs from this subnet. This can be achieved by setting the ``dns_publish_fixed_ips`` attribute for the IPv6 subnet to ``True`` while leaving the flag set to ``False`` for the IPv4 subnet. Example: .. code-block:: console $ openstack network create dualstack ... output omitted ... $ openstack subnet create --network dualstack dualstackv4 --subnet-range 192.0.2.0/24 ... output omitted ... $ openstack subnet create --network dualstack dualstackv6 --protocol ipv6 --subnet-range 2001:db8:42:42::/64 --dns-publish-fixed-ip ... output omitted ... $ openstack zone create example.org. --email mail@example.org ... output omitted ... $ openstack recordset list example.org. +--------------------------------------+--------------+------+--------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------+------+--------------------------------------------------------------------+--------+--------+ | 404e9846-1482-433b-8bbc-67677e587d28 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | de73576a-f9c7-4892-934c-259b77ff02c0 | example.org. | SOA | ns1.devstack.org. mail.example.org. 1575897792 3559 600 86400 3600 | ACTIVE | NONE | +--------------------------------------+--------------+------+--------------------------------------------------------------------+--------+--------+ $ openstack port create port1 --dns-domain example.org. --dns-name port1 --network dualstack +-------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | None | | binding_profile | None | | binding_vif_details | None | | binding_vif_type | None | | binding_vnic_type | normal | | created_at | 2019-12-09T13:23:52Z | | data_plane_status | None | | description | | | device_id | | | device_owner | | | dns_assignment | fqdn='port1.openstackgate.local.', hostname='port1', ip_address='192.0.2.100' | | | fqdn='port1.openstackgate.local.', hostname='port1', ip_address='2001:db8:42:42::2a2' | | dns_domain | example.org. | | dns_name | port1 | | extra_dhcp_opts | | | fixed_ips | ip_address='192.0.2.100', subnet_id='47cc9a39-c88b-4082-a52c-1237c2a1d479' | | | ip_address='2001:db8:42:42::2a2', subnet_id='f9c04195-1000-4575-a203-3c174772617f' | | id | f8bc991b-1f84-435a-a5f8-814bd8b9ae9f | | location | cloud='devstack', project.domain_id='default', project.domain_name=, project.id='86de4dab952d48f79e625b106f7a75f7', project.name='demo', region_name='RegionOne', zone= | | mac_address | fa:16:3e:13:7a:56 | | name | port1 | | network_id | fa8118ed-b7c2-41b8-89bc-97e46f0491ac | | port_security_enabled | True | | project_id | 86de4dab952d48f79e625b106f7a75f7 | | propagate_uplink_status | None | | qos_policy_id | None | | resource_request | None | | revision_number | 1 | | security_group_ids | f0b02df0-a0b9-4ce8-b067-8b61a8679e9d | | status | DOWN | | tags | | | trunk_details | None | | updated_at | 2019-12-09T13:23:53Z | +-------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------------+------+--------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------------+------+--------------------------------------------------------------------+--------+--------+ | 404e9846-1482-433b-8bbc-67677e587d28 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | de73576a-f9c7-4892-934c-259b77ff02c0 | example.org. | SOA | ns1.devstack.org. mail.example.org. 1575897833 3559 600 86400 3600 | ACTIVE | NONE | | 85ce74a5-7dd6-42d3-932c-c9a029dea05e | port1.example.org. | AAAA | 2001:db8:42:42::2a2 | ACTIVE | NONE | +--------------------------------------+--------------------+------+--------------------------------------------------------------------+--------+--------+ Use case 3b: The ``dns_domain_ports`` extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the ``dns_domain for ports`` extension has been configured, the user can create a port specifying a non-blank value in its ``dns_domain`` attribute. If the port is created in an externally accessible network, DNS records will be published for this port: .. code-block:: console $ openstack port create --network 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 --dns-name my-vm --dns-domain port-domain.org. test +-------------------------+-------------------------------------------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | None | | binding_profile | None | | binding_vif_details | None | | binding_vif_type | None | | binding_vnic_type | normal | | created_at | 2019-06-12T15:43:29Z | | data_plane_status | None | | description | | | device_id | | | device_owner | | | dns_assignment | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='203.0.113.9' | | | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='2001:db8:10::9' | | dns_domain | port-domain.org. | | dns_name | my-vm | | extra_dhcp_opts | | | fixed_ips | ip_address='203.0.113.9', subnet_id='277eca5d-9869-474b-960e-6da5951d09f7' | | | ip_address='2001:db8:10::9', subnet_id='eab47748-3f0a-4775-a09f-b0c24bb64bc4' | | id | 57541c27-f8a9-41f1-8dde-eb10155496e6 | | mac_address | fa:16:3e:55:d6:c7 | | name | test | | network_id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | | port_security_enabled | True | | project_id | 07b21ad4-edb6-420b-bd76-9bb4aab0d135 | | propagate_uplink_status | None | | qos_policy_id | None | | resource_request | None | | revision_number | 1 | | security_group_ids | 82227b10-d135-4bca-b41f-63c1f2286b3e | | status | DOWN | | tags | | | trunk_details | None | | updated_at | 2019-06-12T15:43:29Z | +-------------------------+-------------------------------------------------------------------------------+ In this case, the port's ``dns_name`` (``my-vm``) will be published in the ``port-domain.org.`` zone, as shown here: .. code-block:: console $ openstack recordset list port-domain.org. +--------------------------------------+-------------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+-------------------------+------+-----------------------------------------------------------------------+--------+--------+ | 03e5a35b-d984-4d10-942a-2de8ccb9b941 | port-domain.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1503272259 3549 600 86400 3600 | ACTIVE | NONE | | d2dd1dfe-531d-4fea-8c0e-f5b559942ac5 | port-domain.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | 67a8e83d-7e3c-4fb1-9261-0481318bb7b5 | my-vm.port-domain.org. | A | 203.0.113.9 | ACTIVE | NONE | | 5a4f671c-9969-47aa-82e1-e05754021852 | my-vm.port-domain.org. | AAAA | 2001:db8:10::9 | ACTIVE | NONE | +--------------------------------------+-------------------------+------+-----------------------------------------------------------------------+--------+--------+ .. note:: If both the port and its network have a valid non-blank string assigned to their ``dns_domain`` attributes, the port's ``dns_domain`` takes precedence over the network's. .. note:: The name assigned to the port's ``dns_domain`` attribute must end with a period (``.``). .. note:: In the above example, the ``port-domain.org.`` zone must be created before Neutron can publish any port data to it. .. note:: See :ref:`config-dns-int-ext-serv-net` for detailed instructions on how to create the externally accessible network. Use case 3c: The ``dns`` extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the user wants to publish a port in the external DNS service in a zone specified by the ``dns_domain`` attribute of the network, these are the steps to be taken: #. Assign a valid domain name to the network's ``dns_domain`` attribute. This name must end with a period (``.``). #. Boot an instance specifying the externally accessible network. Alternatively, create a port on the externally accessible network specifying a valid value to its ``dns_name`` attribute. If the port is going to be used for an instance boot, the value assigned to ``dns_name`` must be equal to the ``hostname`` that the Compute service will assign to the instance. Otherwise, the boot will fail. Once these steps are executed, the port's DNS data will be published in the external DNS service. This is an example: .. code-block:: console $ openstack network list +--------------------------------------+----------+-----------------------------------------------------------------------------+ | ID | Name | Subnets | +--------------------------------------+----------+-----------------------------------------------------------------------------+ | 41fa3995-9e4a-4cd9-bb51-3e5424f2ff2a | public | a67cfdf7-9d5d-406f-8a19-3f38e4fc3e74, cbd8c6dc-ca81-457e-9c5d-f8ece7ef67f8 | | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | external | 277eca5d-9869-474b-960e-6da5951d09f7, eab47748-3f0a-4775-a09f-b0c24bb64bc4 | | bf2802a0-99a0-4e8c-91e4-107d03f158ea | my-net | 6141b474-56cd-430f-b731-71660bb79b79 | | 38c5e950-b450-4c30-83d4-ee181c28aad3 | private | 43414c53-62ae-49bc-aa6c-c9dd7705818a, 5b9282a1-0be1-4ade-b478-7868ad2a16ff | +--------------------------------------+----------+-----------------------------------------------------------------------------+ $ openstack network set --dns-domain example.org. 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 $ openstack network show 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-02-14T19:42:44Z | | description | | | dns_domain | example.org. | | id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | external | | port_security_enabled | True | | project_id | 04fc2f83966245dba907efb783f8eab9 | | provider:network_type | vlan | | provider:physical_network | None | | provider:segmentation_id | 2016 | | qos_policy_id | None | | revision_number | 4 | | router:external | Internal | | segments | None | | shared | True | | status | ACTIVE | | subnets | eab47748-3f0a-4775-a09f-b0c24bb64bc4, 277eca5d-9869-474b-960e-6da5951d09f7 | | tags | | | updated_at | 2016-02-15T13:42:44Z | +---------------------------+----------------------------------------------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------+------+-----------------------------------------------------------------------+--------+--------+ | a5fe696d-203f-4018-b0d8-590221adb513 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | e7c05a5d-83a0-4fe5-8bd5-ab058a3326aa | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1513767619 3532 600 86400 3600 | ACTIVE | NONE | +--------------------------------------+--------------+------+-----------------------------------------------------------------------+--------+--------+ $ openstack port create --network 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 --dns-name my-vm test +-----------------------+-------------------------------------------------------------------------------+ | Field | Value | +-----------------------+-------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | | | binding_profile | | | binding_vif_details | | | binding_vif_type | unbound | | binding_vnic_type | normal | | created_at | 2016-02-15T16:42:44Z | | data_plane_status | None | | description | | | device_id | | | device_owner | | | dns_assignment | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='203.0.113.9' | | | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='2001:db8:10::9' | | dns_domain | None | | dns_name | my-vm | | extra_dhcp_opts | | | fixed_ips | ip_address='203.0.113.9', subnet_id='277eca5d-9869-474b-960e-6da5951d09f7' | | | ip_address='2001:db8:10::9', subnet_id=‘eab47748-3f0a-4775-a09f-b0c24bb64bc4’ | | id | 04be331b-dc5e-410a-9103-9c8983aeb186 | | mac_address | fa:16:3e:0f:4b:e4 | | name | test | | network_id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | security_group_ids | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 | | status | DOWN | | tags | | | trunk_details | None | | updated_at | 2016-02-15T16:42:44Z | +-----------------------+-------------------------------------------------------------------------------+ $ openstack recordset list example.org. +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | id | name | type | records | status | action | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ | a5fe696d-203f-4018-b0d8-590221adb513 | example.org. | NS | ns1.devstack.org. | ACTIVE | NONE | | e7c05a5d-83a0-4fe5-8bd5-ab058a3326aa | example.org. | SOA | ns1.devstack.org. malavall.us.ibm.com. 1513767794 3532 600 86400 3600 | ACTIVE | NONE | | fa753ab8-bffa-400d-9ef8-d4a3b1a7ffbf | my-vm.example.org. | A | 203.0.113.9 | ACTIVE | NONE | | 04abf9f8-c7a3-43f6-9a55-95cee9b144a9 | my-vm.example.org. | AAAA | 2001:db8:10::9 | ACTIVE | NONE | +--------------------------------------+--------------------+------+-----------------------------------------------------------------------+--------+--------+ $ openstack server create --image cirros --flavor 42 \ --nic port-id=04be331b-dc5e-410a-9103-9c8983aeb186 my_vm +--------------------------------------+----------------------------------------------------------------+ | Field | Value | +--------------------------------------+----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | TDc9EpBT3B9W | | config_drive | | | created | 2016-02-15T19:10:43Z | | flavor | m1.nano (42) | | hostId | | | id | 62c19691-d1c7-4d7b-a88e-9cc4d95d4f41 | | image | cirros-0.3.5-x86_64-uec (b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) | | key_name | - | | locked | False | | metadata | {} | | name | my_vm | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | d5660cb1e6934612a01b4fb2fb630725 | | updated | 2016-02-15T19:10:43Z | | user_id | 8bb6e578cba24e7db9d3810633124525 | +--------------------------------------+----------------------------------------------------------------+ $ openstack server list +--------------------------------------+-------+--------+--------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-------+--------+--------------------------------------+--------+---------+ | 62c19691-d1c7-4d7b-a88e-9cc4d95d4f41 | my_vm | ACTIVE | external=203.0.113.9, 2001:db8:10::9 | cirros | m1.nano | +--------------------------------------+-------+--------+--------------------------------------+--------+---------+ In this example the port is created manually by the user and then used to boot an instance. Notice that: * The port's data was visible in the DNS service as soon as it was created. * See :ref:`config-dns-performance-considerations` for an explanation of the potential performance impact associated with this use case. Following are the PTR records created for this example. Note that for IPv4, the value of ``ipv4_ptr_zone_prefix_size`` is 24. In the case of IPv6, the value of ``ipv6_ptr_zone_prefix_size`` is 116. .. code-block:: console $ openstack recordset list --all-projects 113.0.203.in-addr.arpa. +--------------------------------------+----------------------------------+---------------------------+------+---------------------------------------------------------------------+--------+--------+ | id | project_id | name | type | records | status | action | +--------------------------------------+----------------------------------+---------------------------+------+---------------------------------------------------------------------+--------+--------+ | 32f1c05b-7c5d-4230-9088-961a0a462d28 | 07224d17d76d42499a38f00ba4339710 | 113.0.203.in-addr.arpa. | SOA | ns1.devstack.org. admin.example.org. 1455563035 3600 600 86400 3600 | ACTIVE | NONE | | 3d402c43-b215-4a75-a730-51cbb8999cb8 | 07224d17d76d42499a38f00ba4339710 | 113.0.203.in-addr.arpa. | NS | ns1.devstack.org. | ACTIVE | NONE | | 8e4e618c-24b0-43db-ab06-91b741a91c10 | 07224d17d76d42499a38f00ba4339710 | 9.113.0.203.in-addr.arpa. | PTR | my-vm.example.org. | ACTIVE | NONE | +--------------------------------------+----------------------------------+---------------------------+------+---------------------------------------------------------------------+--------+--------+ $ openstack recordset list --all-projects 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. +--------------------------------------+----------------------------------+---------------------------------------------------------------------------+------+---------------------------------------------------------------------+--------+--------+ | id | project_id | name | type | records | status | action | +--------------------------------------+----------------------------------+---------------------------------------------------------------------------+------+---------------------------------------------------------------------+--------+--------+ | d8923354-13eb-4bd9-914a-0a2ae5f95989 | 07224d17d76d42499a38f00ba4339710 | 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | SOA | ns1.devstack.org. admin.example.org. 1455563036 3600 600 86400 3600 | ACTIVE | NONE | | 72e60acd-098d-41ea-9771-5b6546c9c06f | 07224d17d76d42499a38f00ba4339710 | 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | NS | ns1.devstack.org. | ACTIVE | NONE | | 877e0215-2ddf-4d01-a7da-47f1092dfd56 | 07224d17d76d42499a38f00ba4339710 | 9.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.8.b.d.0.1.0.0.2.ip6.arpa. | PTR | my-vm.example.org. | ACTIVE | NONE | +--------------------------------------+----------------------------------+---------------------------------------------------------------------------+------+---------------------------------------------------------------------+--------+--------+ See :ref:`config-dns-int-ext-serv-net` for detailed instructions on how to create the externally accessible network. .. _config-dns-performance-considerations: Performance considerations -------------------------- Only for :ref:`config-dns-use-case-3`, if the port binding extension is enabled in the Networking service, the Compute service will execute one additional port update operation when allocating the port for the instance during the boot process. This may have a noticeable adverse effect in the performance of the boot process that should be evaluated before adoption of this use case. .. _config-dns-int-ext-serv-net: Configuration of the externally accessible network for use cases 3b and 3c -------------------------------------------------------------------------- For use cases 3b and 3c, the externally accessible network must meet the following requirements: * The network may not have attribute ``router:external`` set to ``True``. * The network type can be FLAT, VLAN, GRE, VXLAN or GENEVE. * For network types VLAN, GRE, VXLAN or GENEVE, the segmentation ID must be outside the ranges assigned to project networks. This usually implies that these use cases only work for networks specifically created for this purpose by an admin, they do not work for networks which tenants can create on their own. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-dns-int.rst0000644000175000017500000004315300000000000023657 0ustar00coreycorey00000000000000.. _config-dns-int: =============== DNS integration =============== This page serves as a guide for how to use the DNS integration functionality of the Networking service and its interaction with the Compute service. The integration of the Networking service with an external DNSaaS (DNS-as-a-Service) is described in :ref:`config-dns-int-ext-serv`. Users can control the behavior of the Networking service in regards to DNS using two attributes associated with ports, networks, and floating IPs. The following table shows the attributes available for each one of these resources: .. list-table:: :header-rows: 1 :widths: 30 30 30 * - Resource - dns_name - dns_domain * - Ports - Yes - Yes * - Networks - No - Yes * - Floating IPs - Yes - Yes .. note:: The ``DNS Integration`` extension enables all the attribute and resource combinations shown in the previous table, except for ``dns_domain`` for ports, which requires the ``dns_domain for ports`` extension. .. note:: Since the ``DNS Integration`` extension is a subset of ``dns_domain for ports``, if ``dns_domain`` functionality for ports is required, only the latter extension has to be configured. .. note:: When the ``dns_domain for ports`` extension is configured, ``DNS Integration`` is also included when the Neutron server responds to a request to list the active API extensions. This preserves backwards API compatibility. .. _config-dns-int-dns-resolution: The Networking service internal DNS resolution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Networking service enables users to control the name assigned to ports by the internal DNS. To enable this functionality, do the following: 1. Edit the ``/etc/neutron/neutron.conf`` file and assign a value different to ``openstacklocal`` (its default value) to the ``dns_domain`` parameter in the ``[default]`` section. As an example: .. code-block:: ini dns_domain = example.org. 2. Add ``dns`` (for the ``DNS Integration`` extension) or ``dns_domain_ports`` (for the ``dns_domain for ports`` extension) to ``extension_drivers`` in the ``[ml2]`` section of ``/etc/neutron/plugins/ml2/ml2_conf.ini``. The following is an example: .. code-block:: ini [ml2] extension_drivers = port_security,dns_domain_ports After re-starting the ``neutron-server``, users will be able to assign a ``dns_name`` attribute to their ports. .. note:: The enablement of this functionality is prerequisite for the enablement of the Networking service integration with an external DNS service, which is described in detail in :ref:`config-dns-int-ext-serv`. The following illustrates the creation of a port with ``my-port`` in its ``dns_name`` attribute. .. note:: The name assigned to the port by the Networking service internal DNS is now visible in the response in the ``dns_assignment`` attribute. .. code-block:: console $ openstack port create --network my-net --dns-name my-port test +-----------------------+-------------------------------------------------------------------------------+ | Field | Value | +-----------------------+-------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | | | binding_profile | | | binding_vif_details | | | binding_vif_type | unbound | | binding_vnic_type | normal | | created_at | 2016-02-05T21:35:04Z | | data_plane_status | None | | description | | | device_id | | | device_owner | | | dns_assignment | fqdn='my-port.example.org.', hostname='my-port', ip_address='192.0.2.67' | | dns_domain | None | | dns_name | my-port | | extra_dhcp_opts | | | fixed_ips | ip_address='192.0.2.67', subnet_id='6141b474-56cd-430f-b731-71660bb79b79' | | id | fb3c10f4-017e-420c-9be1-8f8c557ae21f | | mac_address | fa:16:3e:aa:9b:e1 | | name | test | | network_id | bf2802a0-99a0-4e8c-91e4-107d03f158ea | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | security_group_ids | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 | | status | DOWN | | tags | | | trunk_details | None | | updated_at | 2016-02-05T21:35:04Z | +-----------------------+-------------------------------------------------------------------------------+ When this functionality is enabled, it is leveraged by the Compute service when creating instances. When allocating ports for an instance during boot, the Compute service populates the ``dns_name`` attributes of these ports with the ``hostname`` attribute of the instance, which is a DNS sanitized version of its display name. As a consequence, at the end of the boot process, the allocated ports will be known in the dnsmasq associated to their networks by their instance ``hostname``. The following is an example of an instance creation, showing how its ``hostname`` populates the ``dns_name`` attribute of the allocated port: .. code-block:: console $ openstack server create --image cirros --flavor 42 \ --nic net-id=37aaff3a-6047-45ac-bf4f-a825e56fd2b3 my_vm +--------------------------------------+----------------------------------------------------------------+ | Field | Value | +--------------------------------------+----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | dB45Zvo8Jpfe | | config_drive | | | created | 2016-02-05T21:35:04Z | | flavor | m1.nano (42) | | hostId | | | id | 66c13cb4-3002-4ab3-8400-7efc2659c363 | | image | cirros-0.3.5-x86_64-uec(b9d981eb-d21c-4ce2-9dbc-dd38f3d9015f) | | key_name | - | | locked | False | | metadata | {} | | name | my_vm | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | d5660cb1e6934612a01b4fb2fb630725 | | updated | 2016-02-05T21:35:04Z | | user_id | 8bb6e578cba24e7db9d3810633124525 | +--------------------------------------+----------------------------------------------------------------+ $ openstack port list --device-id 66c13cb4-3002-4ab3-8400-7efc2659c363 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+--------+ | b3ecc464-1263-44a7-8c38-2d8a52751773 | | fa:16:3e:a8:ce:b8 | ip_address='203.0.113.8', subnet_id='277eca5d-9869-474b-960e-6da5951d09f7' | ACTIVE | | | | | ip_address='2001:db8:10::8', subnet_id='eab47748-3f0a-4775-a09f-b0c24bb64bc4' | | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------------+--------+ $ openstack port show b3ecc464-1263-44a7-8c38-2d8a52751773 +-----------------------+------------------------------------------------------------------------------------------------------------+ | Field | Value | +-----------------------+------------------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | vultr.guest | | binding_profile | | | binding_vif_details | datapath_type='system', ovs_hybrid_plug='True', port_filter='True' | | binding_vif_type | ovs | | binding_vnic_type | normal | | created_at | 2016-02-05T21:35:04Z | | data_plane_status | None | | description | | | device_id | 66c13cb4-3002-4ab3-8400-7efc2659c363 | | device_owner | compute:None | | dns_assignment | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='203.0.113.8' | | | fqdn='my-vm.example.org.', hostname='my-vm', ip_address='2001:db8:10::8' | | dns_domain | example.org. | | dns_name | my-vm | | extra_dhcp_opts | | | fixed_ips | ip_address='203.0.113.8', subnet_id='277eca5d-9869-474b-960e-6da5951d09f7' | | | ip_address='2001:db8:10::8', subnet_id='eab47748-3f0a-4775-a09f-b0c24bb64bc4' | | id | b3ecc464-1263-44a7-8c38-2d8a52751773 | | mac_address | fa:16:3e:a8:ce:b8 | | name | | | network_id | 37aaff3a-6047-45ac-bf4f-a825e56fd2b3 | | port_security_enabled | True | | project_id | d5660cb1e6934612a01b4fb2fb630725 | | qos_policy_id | None | | revision_number | 1 | | security_group_ids | 1f0ddd73-7e3c-48bd-a64c-7ded4fe0e635 | | status | ACTIVE | | tags | | | trunk_details | None | | updated_at | 2016-02-05T21:35:04Z | +-----------------------+------------------------------------------------------------------------------------------------------------+ In the above example notice that: * The name given to the instance by the user, ``my_vm``, is sanitized by the Compute service and becomes ``my-vm`` as the port's ``dns_name``. * The port's ``dns_assignment`` attribute shows that its FQDN is ``my-vm.example.org.`` in the Networking service internal DNS, which is the result of concatenating the port's ``dns_name`` with the value configured in the ``dns_domain`` parameter in ``neutron.conf``, as explained previously. * The ``dns_assignment`` attribute also shows that the port's ``hostname`` in the Networking service internal DNS is ``my-vm``. * Instead of having the Compute service create the port for the instance, the user might have created it and assigned a value to its ``dns_name`` attribute. In this case, the value assigned to the ``dns_name`` attribute must be equal to the value that Compute service will assign to the instance's ``hostname``, in this example ``my-vm``. Otherwise, the instance boot will fail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-dns-res.rst0000644000175000017500000001136300000000000023654 0ustar00coreycorey00000000000000.. _config-dns-res: ============================ DNS resolution for instances ============================ The Networking service offers several methods to configure name resolution (DNS) for instances. Most deployments should implement case 1 or 2a. Case 2b requires security considerations to prevent leaking internal DNS information to instances. .. note:: All of these setups require the configured DNS resolvers to be reachable from the virtual network in question. So unless the resolvers are located inside the virtual network itself, this implies the need for a router to be attached to that network having an external gateway configured. Case 1: Each virtual network uses unique DNS resolver(s) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In this case, the DHCP agent offers one or more unique DNS resolvers to instances via DHCP on each virtual network. You can configure a DNS resolver when creating or updating a subnet. To configure more than one DNS resolver, repeat the option multiple times. * Configure a DNS resolver when creating a subnet. .. code-block:: console $ openstack subnet create --dns-nameserver DNS_RESOLVER Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable from the virtual network. Repeat the option if you want to specify multiple IP addresses. For example: .. code-block:: console $ openstack subnet create --dns-nameserver 203.0.113.8 --dns-nameserver 198.51.100.53 .. note:: This command requires additional options outside the scope of this content. * Add a DNS resolver to an existing subnet. .. code-block:: console $ openstack subnet set --dns-nameserver DNS_RESOLVER SUBNET_ID_OR_NAME Replace ``DNS_RESOLVER`` with the IP address of a DNS resolver reachable from the virtual network and ``SUBNET_ID_OR_NAME`` with the UUID or name of the subnet. For example, using the ``selfservice`` subnet: .. code-block:: console $ openstack subnet set --dns-nameserver 203.0.113.9 selfservice * Remove all DNS resolvers from a subnet. .. code-block:: console $ openstack subnet set --no-dns-nameservers SUBNET_ID_OR_NAME Replace ``SUBNET_ID_OR_NAME`` with the UUID or name of the subnet. For example, using the ``selfservice`` subnet: .. code-block:: console $ openstack subnet set --no-dns-nameservers selfservice .. note:: You can use this option in combination with the previous one in order to replace all existing DNS resolver addresses with new ones. You can also set the DNS resolver address to ``0.0.0.0`` for IPv4 subnets, or ``::`` for IPv6 subnets, which are special values that indicate to the DHCP agent that it should not announce any DNS resolver at all on the subnet. .. note:: When DNS resolvers are explicitly specified for a subnet this way, that setting will take precedence over the options presented in case 2. Case 2: DHCP agents forward DNS queries from instances ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In this case, the DHCP agent offers the list of all DHCP agent's IP addresses on a subnet as DNS resolver(s) to instances via DHCP on that subnet. The DHCP agent then runs a masquerading forwarding DNS resolver with two possible options to determine where the DNS queries are sent to. .. note:: The DHCP agent will answer queries for names and addresses of instances running within the virtual network directly instead of forwarding them. Case 2a: Queries are forwarded to an explicitly configured set of DNS resolvers ------------------------------------------------------------------------------- In the ``dhcp_agent.ini`` file, configure one or more DNS resolvers. To configure more than one DNS resolver, use a comma between the values. .. code-block:: ini [DEFAULT] dnsmasq_dns_servers = DNS_RESOLVER Replace ``DNS_RESOLVER`` with a list of IP addresses of DNS resolvers reachable from all virtual networks. For example: .. code-block:: ini [DEFAULT] dnsmasq_dns_servers = 203.0.113.8, 198.51.100.53 .. note:: You must configure this option for all eligible DHCP agents and restart them to activate the values. Case 2b: Queries are forwarded to DNS resolver(s) configured on the host ------------------------------------------------------------------------ In this case, the DHCP agent forwards queries from the instances to the DNS resolver(s) configured in the ``resolv.conf`` file on the host running the DHCP agent. This requires these resolvers being reachable from all virtual networks. In the ``dhcp_agent.ini`` file, enable using the DNS resolver(s) configured on the host. .. code-block:: ini [DEFAULT] dnsmasq_local_resolv = True .. note:: You must configure this option for all eligible DHCP agents and restart them to activate this setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-dvr-ha-snat.rst0000644000175000017500000001314100000000000024421 0ustar00coreycorey00000000000000.. _config-dvr-snat-ha-ovs: ===================================== Distributed Virtual Routing with VRRP ===================================== :ref:`deploy-ovs-ha-dvr` supports augmentation using Virtual Router Redundancy Protocol (VRRP). Using this configuration, virtual routers support both the ``--distributed`` and ``--ha`` options. Similar to legacy HA routers, DVR/SNAT HA routers provide a quick fail over of the SNAT service to a backup DVR/SNAT router on an l3-agent running on a different node. SNAT high availability is implemented in a manner similar to the :ref:`deploy-lb-ha-vrrp` and :ref:`deploy-ovs-ha-vrrp` examples where ``keepalived`` uses VRRP to provide quick failover of SNAT services. During normal operation, the master router periodically transmits *heartbeat* packets over a hidden project network that connects all HA routers for a particular project. If the DVR/SNAT backup router stops receiving these packets, it assumes failure of the master DVR/SNAT router and promotes itself to master router by configuring IP addresses on the interfaces in the ``snat`` namespace. In environments with more than one backup router, the rules of VRRP are followed to select a new master router. .. warning:: There is a known bug with ``keepalived`` v1.2.15 and earlier which can cause packet loss when ``max_l3_agents_per_router`` is set to 3 or more. Therefore, we recommend that you upgrade to ``keepalived`` v1.2.16 or greater when using this feature. Configuration example ~~~~~~~~~~~~~~~~~~~~~ The basic deployment model consists of one controller node, two or more network nodes, and multiple computes nodes. Controller node configuration ----------------------------- #. Add the following to ``/etc/neutron/neutron.conf``: .. code-block:: ini [DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = True router_distributed = True l3_ha = True l3_ha_net_cidr = 169.254.192.0/18 max_l3_agents_per_router = 3 When the ``router_distributed = True`` flag is configured, routers created by all users are distributed. Without it, only privileged users can create distributed routers by using ``--distributed True``. Similarly, when the ``l3_ha = True`` flag is configured, routers created by all users default to HA. It follows that with these two flags set to ``True`` in the configuration file, routers created by all users will default to distributed HA routers (DVR HA). The same can explicitly be accomplished by a user with administrative credentials setting the flags in the :command:`openstack router create` command: .. code-block:: console $ openstack router create name-of-router --distributed --ha .. note:: The *max_l3_agents_per_router* determine the number of backup DVR/SNAT routers which will be instantiated. #. Add the following to ``/etc/neutron/plugins/ml2/ml2_conf.ini``: .. code-block:: ini [ml2] type_drivers = flat,vxlan tenant_network_types = vxlan mechanism_drivers = openvswitch,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = external [ml2_type_vxlan] vni_ranges = MIN_VXLAN_ID:MAX_VXLAN_ID Replace ``MIN_VXLAN_ID`` and ``MAX_VXLAN_ID`` with VXLAN ID minimum and maximum values suitable for your environment. .. note:: The first value in the ``tenant_network_types`` option becomes the default project network type when a regular user creates a network. Network nodes ------------- #. Configure the Open vSwitch agent. Add the following to ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``: .. code-block:: ini [ovs] local_ip = TUNNEL_INTERFACE_IP_ADDRESS bridge_mappings = external:br-ex [agent] enable_distributed_routing = True tunnel_types = vxlan l2_population = True Replace ``TUNNEL_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN project networks. #. Configure the L3 agent. Add the following to ``/etc/neutron/l3_agent.ini``: .. code-block:: ini [DEFAULT] ha_vrrp_auth_password = password interface_driver = openvswitch agent_mode = dvr_snat Compute nodes ------------- #. Configure the Open vSwitch agent. Add the following to ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``: .. code-block:: ini [ovs] local_ip = TUNNEL_INTERFACE_IP_ADDRESS bridge_mappings = external:br-ex [agent] enable_distributed_routing = True tunnel_types = vxlan l2_population = True [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver #. Configure the L3 agent. Add the following to ``/etc/neutron/l3_agent.ini``: .. code-block:: ini [DEFAULT] interface_driver = openvswitch agent_mode = dvr Replace ``TUNNEL_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN project networks. Keepalived VRRP health check ---------------------------- .. include:: shared/keepalived-vrrp-healthcheck.txt Known limitations ~~~~~~~~~~~~~~~~~ * Migrating a router from distributed only, HA only, or legacy to distributed HA is not supported at this time. The router must be created as distributed HA. The reverse direction is also not supported. You cannot reconfigure a distributed HA router to be only distributed, only HA, or legacy. * There are certain scenarios where l2pop and distributed HA routers do not interact in an expected manner. These situations are the same that affect HA only routers and l2pop. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-fip-port-forwardings.rst0000644000175000017500000000344400000000000026365 0ustar00coreycorey00000000000000.. _config-fip-port-forwardings: =========================== Floating IP port forwarding =========================== Floating IP port forwarding enables users to forward traffic from a TCP/UDP/other protocol port of a floating IP to a TCP/UDP/other protocol port associated to one of the fixed IPs of a Neutron port. This is accomplished by associating ``port_forwarding`` sub-resource to a floating IP. CRUD operations for port forwarding are implemented by a Neutron API extension and a service plug-in. Please refer to the Neutron API Reference documentation for details on the CRUD operations. Configuring floating IP port forwarding ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To configure floating IP port forwarding, take the following steps: * Add the ``port_forwarding`` service to the ``service_plugins`` setting in ``/etc/neutron/neutron.conf``. For example: .. code-block:: console service_plugins = router,segments,port_forwarding * Set the ``extensions`` option in the ``[agent]`` section of ``/etc/neutron/l3_agent.ini`` to include ``port_forwarding``. This has to be done in each network and compute node where the L3 agent is running. For example: .. code-block:: console extensions = port_forwarding .. note:: The ``router`` service plug-in manages floating IPs and routers. As a consequence, it has to be configured along with the ``port_forwarding`` service plug-in. .. note:: After updating the options in the configuration files, the neutron-server and every neutron-l3-agent need to be restarted for the new values to take effect. After configuring floating IP port forwarding, the ``floating-ip-port-forwarding`` extension alias will be included in the output of the following command: .. code-block:: console $ openstack extension list --network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ipam.rst0000644000175000017500000000335400000000000023230 0ustar00coreycorey00000000000000.. _config-ipam: ================== IPAM configuration ================== Starting with the Liberty release, OpenStack Networking includes a pluggable interface for the IP Address Management (IPAM) function. This interface creates a driver framework for the allocation and de-allocation of subnets and IP addresses, enabling the integration of alternate IPAM implementations or third-party IP Address Management systems. The basics ~~~~~~~~~~ In Liberty and Mitaka, the IPAM implementation within OpenStack Networking provided a pluggable and non-pluggable flavor. As of Newton, the non-pluggable flavor is no longer available. Instead, it is completely replaced with a reference driver implementation of the pluggable framework. All data will be automatically migrated during the upgrade process, unless you have previously configured a pluggable IPAM driver. In that case, no migration is necessary. To configure a driver other than the reference driver, specify it in the ``neutron.conf`` file. Do this after the migration is complete. There is no need to specify any value if you wish to use the reference driver. .. code-block:: ini ipam_driver = ipam-driver-name There is no need to specify any value if you wish to use the reference driver, though specifying ``internal`` will explicitly choose the reference driver. The documentation for any alternate drivers will include the value to use when specifying that driver. Known limitations ~~~~~~~~~~~~~~~~~ * The driver interface is designed to allow separate drivers for each subnet pool. However, the current implementation allows only a single IPAM driver system-wide. * Third-party drivers must provide their own migration mechanisms to convert existing OpenStack installations to their IPAM. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ipv6.rst0000644000175000017500000007121100000000000023163 0ustar00coreycorey00000000000000.. _config-ipv6: ==== IPv6 ==== This section describes the following items: * How to enable dual-stack (IPv4 and IPv6 enabled) instances. * How those instances receive an IPv6 address. * How those instances communicate across a router to other subnets or the internet. * How those instances interact with other OpenStack services. Enabling a dual-stack network in OpenStack Networking simply requires creating a subnet with the ``ip_version`` field set to ``6``, then the IPv6 attributes (``ipv6_ra_mode`` and ``ipv6_address_mode``) set. The ``ipv6_ra_mode`` and ``ipv6_address_mode`` will be described in detail in the next section. Finally, the subnets ``cidr`` needs to be provided. This section does not include the following items: * Single stack IPv6 project networking * OpenStack control communication between servers and services over an IPv6 network. * Connection to the OpenStack APIs via an IPv6 transport network * IPv6 multicast * IPv6 support in conjunction with any out of tree routers, switches, services or agents whether in physical or virtual form factors. Neutron subnets and the IPv6 API attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As of Juno, the OpenStack Networking service (neutron) provides two new attributes to the subnet object, which allows users of the API to configure IPv6 subnets. There are two IPv6 attributes: * ``ipv6_ra_mode`` * ``ipv6_address_mode`` These attributes can be set to the following values: * ``slaac`` * ``dhcpv6-stateful`` * ``dhcpv6-stateless`` The attributes can also be left unset. IPv6 addressing --------------- The ``ipv6_address_mode`` attribute is used to control how addressing is handled by OpenStack. There are a number of different ways that guest instances can obtain an IPv6 address, and this attribute exposes these choices to users of the Networking API. Router advertisements --------------------- The ``ipv6_ra_mode`` attribute is used to control router advertisements for a subnet. The IPv6 Protocol uses Internet Control Message Protocol packets (ICMPv6) as a way to distribute information about networking. ICMPv6 packets with the type flag set to 134 are called "Router Advertisement" packets, which contain information about the router and the route that can be used by guest instances to send network traffic. The ``ipv6_ra_mode`` is used to specify if the Networking service should generate Router Advertisement packets for a subnet. ipv6_ra_mode and ipv6_address_mode combinations ----------------------------------------------- .. list-table:: :header-rows: 1 :widths: 10 10 10 10 60 * - ipv6 ra mode - ipv6 address mode - radvd A,M,O - External Router A,M,O - Description * - *N/S* - *N/S* - Off - Not Defined - Backwards compatibility with pre-Juno IPv6 behavior. * - *N/S* - slaac - Off - 1,0,0 - Guest instance obtains IPv6 address from non-OpenStack router using SLAAC. * - *N/S* - dhcpv6-stateful - Off - 0,1,1 - Not currently implemented in the reference implementation. * - *N/S* - dhcpv6-stateless - Off - 1,0,1 - Not currently implemented in the reference implementation. * - slaac - *N/S* - 1,0,0 - Off - Not currently implemented in the reference implementation. * - dhcpv6-stateful - *N/S* - 0,1,1 - Off - Not currently implemented in the reference implementation. * - dhcpv6-stateless - *N/S* - 1,0,1 - Off - Not currently implemented in the reference implementation. * - slaac - slaac - 1,0,0 - Off - Guest instance obtains IPv6 address from OpenStack managed radvd using SLAAC. * - dhcpv6-stateful - dhcpv6-stateful - 0,1,1 - Off - Guest instance obtains IPv6 address from dnsmasq using DHCPv6 stateful and optional info from dnsmasq using DHCPv6. * - dhcpv6-stateless - dhcpv6-stateless - 1,0,1 - Off - Guest instance obtains IPv6 address from OpenStack managed radvd using SLAAC and optional info from dnsmasq using DHCPv6. * - slaac - dhcpv6-stateful - - - *Invalid combination.* * - slaac - dhcpv6-stateless - - - *Invalid combination.* * - dhcpv6-stateful - slaac - - - *Invalid combination.* * - dhcpv6-stateful - dhcpv6-stateless - - - *Invalid combination.* * - dhcpv6-stateless - slaac - - - *Invalid combination.* * - dhcpv6-stateless - dhcpv6-stateful - - - *Invalid combination.* Project network considerations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dataplane --------- Both the Linux bridge and the Open vSwitch dataplane modules support forwarding IPv6 packets amongst the guests and router ports. Similar to IPv4, there is no special configuration or setup required to enable the dataplane to properly forward packets from the source to the destination using IPv6. Note that these dataplanes will forward Link-local Address (LLA) packets between hosts on the same network just fine without any participation or setup by OpenStack components after the ports are all connected and MAC addresses learned. Addresses for subnets --------------------- There are three methods currently implemented for a subnet to get its ``cidr`` in OpenStack: #. Direct assignment during subnet creation via command line or Horizon #. Referencing a subnet pool during subnet creation #. Using a Prefix Delegation (PD) client to request a prefix for a subnet from a PD server In the future, additional techniques could be used to allocate subnets to projects, for example, use of an external IPAM module. Address modes for ports ----------------------- .. note:: An external DHCPv6 server in theory could override the full address OpenStack assigns based on the EUI-64 address, but that would not be wise as it would not be consistent through the system. IPv6 supports three different addressing schemes for address configuration and for providing optional network information. Stateless Address Auto Configuration (SLAAC) Address configuration using Router Advertisement (RA). DHCPv6-stateless Address configuration using RA and optional information using DHCPv6. DHCPv6-stateful Address configuration and optional information using DHCPv6. OpenStack can be setup such that OpenStack Networking directly provides RA, DHCP relay and DHCPv6 address and optional information for their networks or this can be delegated to external routers and services based on the drivers that are in use. There are two neutron subnet attributes - ``ipv6_ra_mode`` and ``ipv6_address_mode`` – that determine how IPv6 addressing and network information is provided to project instances: * ``ipv6_ra_mode``: Determines who sends RA. * ``ipv6_address_mode``: Determines how instances obtain IPv6 address, default gateway, or optional information. For the above two attributes to be effective, ``enable_dhcp`` of the subnet object must be set to True. Using SLAAC for addressing -------------------------- When using SLAAC, the currently supported combinations for ``ipv6_ra_mode`` and ``ipv6_address_mode`` are as follows. .. list-table:: :header-rows: 1 :widths: 10 10 50 * - ipv6_ra_mode - ipv6_address_mode - Result * - Not specified. - SLAAC - Addresses are assigned using EUI-64, and an external router will be used for routing. * - SLAAC - SLAAC - Address are assigned using EUI-64, and OpenStack Networking provides routing. Setting ``ipv6_ra_mode`` to ``slaac`` will result in OpenStack Networking routers being configured to send RA packets, when they are created. This results in the following values set for the address configuration flags in the RA messages: * Auto Configuration Flag = 1 * Managed Configuration Flag = 0 * Other Configuration Flag = 0 New or existing neutron networks that contain a SLAAC enabled IPv6 subnet will result in all neutron ports attached to the network receiving IPv6 addresses. This is because when RA broadcast messages are sent out on a neutron network, they are received by all IPv6 capable ports on the network, and each port will then configure an IPv6 address based on the information contained in the RA packet. In some cases, an IPv6 SLAAC address will be added to a port, in addition to other IPv4 and IPv6 addresses that the port already has been assigned. DHCPv6 ------ For DHCPv6, the currently supported combinations are as follows: .. list-table:: :header-rows: 1 :widths: 10 10 50 * - ipv6_ra_mode - ipv6_address_mode - Result * - DHCPv6-stateless - DHCPv6-stateless - Addresses are assigned through RAs (see SLAAC above) and optional information is delivered through DHCPv6. * - DHCPv6-stateful - DHCPv6-stateful - Addresses and optional information are assigned using DHCPv6. Setting DHCPv6-stateless for ``ipv6_ra_mode`` configures the neutron router with radvd agent to send RAs. The list below captures the values set for the address configuration flags in the RA packet in this scenario. Similarly, setting DHCPv6-stateless for ``ipv6_address_mode`` configures neutron DHCP implementation to provide the additional network information. * Auto Configuration Flag = 1 * Managed Configuration Flag = 0 * Other Configuration Flag = 1 Setting DHCPv6-stateful for ``ipv6_ra_mode`` configures the neutron router with radvd agent to send RAs. The list below captures the values set for the address configuration flags in the RA packet in this scenario. Similarly, setting DHCPv6-stateful for ``ipv6_address_mode`` configures neutron DHCP implementation to provide addresses and additional network information through DHCPv6. * Auto Configuration Flag = 0 * Managed Configuration Flag = 1 * Other Configuration Flag = 1 Router support ~~~~~~~~~~~~~~ The behavior of the neutron router for IPv6 is different than for IPv4 in a few ways. Internal router ports, that act as default gateway ports for a network, will share a common port for all IPv6 subnets associated with the network. This implies that there will be an IPv6 internal router interface with multiple IPv6 addresses from each of the IPv6 subnets associated with the network and a separate IPv4 internal router interface for the IPv4 subnet. On the other hand, external router ports are allowed to have a dual-stack configuration with both an IPv4 and an IPv6 address assigned to them. Neutron project networks that are assigned Global Unicast Address (GUA) prefixes and addresses don't require NAT on the neutron router external gateway port to access the outside world. As a consequence of the lack of NAT the external router port doesn't require a GUA to send and receive to the external networks. This implies a GUA IPv6 subnet prefix is not necessarily needed for the neutron external network. By default, a IPv6 LLA associated with the external gateway port can be used for routing purposes. To handle this scenario, the implementation of router-gateway-set API in neutron has been modified so that an IPv6 subnet is not required for the external network that is associated with the neutron router. The LLA address of the upstream router can be learned in two ways. #. In the absence of an upstream RA support, ``ipv6_gateway`` flag can be set with the external router gateway LLA in the neutron L3 agent configuration file. This also requires that no subnet is associated with that port. #. The upstream router can send an RA and the neutron router will automatically learn the next-hop LLA, provided again that no subnet is assigned and the ``ipv6_gateway`` flag is not set. Effectively the ``ipv6_gateway`` flag takes precedence over an RA that is received from the upstream router. If it is desired to use a GUA next hop that is accomplished by allocating a subnet to the external router port and assigning the upstream routers GUA address as the gateway for the subnet. .. note:: It should be possible for projects to communicate with each other on an isolated network (a network without a router port) using LLA with little to no participation on the part of OpenStack. The authors of this section have not proven that to be true for all scenarios. .. note:: When using the neutron L3 agent in a configuration where it is auto-configuring an IPv6 address via SLAAC, and the agent is learning its default IPv6 route from the ICMPv6 Router Advertisement, it may be necessary to set the ``net.ipv6.conf..accept_ra`` sysctl to the value ``2`` in order for routing to function correctly. For a more detailed description, please see the `bug `__. Neutron's Distributed Router feature and IPv6 --------------------------------------------- IPv6 does work when the Distributed Virtual Router functionality is enabled, but all ingress/egress traffic is via the centralized router (hence, not distributed). More work is required to fully enable this functionality. Advanced services ~~~~~~~~~~~~~~~~~ VPNaaS ------ VPNaaS supports IPv6, but support in Kilo and prior releases will have some bugs that may limit how it can be used. More thorough and complete testing and bug fixing is being done as part of the Liberty release. IPv6-based VPN-as-a-Service is configured similar to the IPv4 configuration. Either or both the ``peer_address`` and the ``peer_cidr`` can specified as an IPv6 address. The choice of addressing modes and router modes described above should not impact support. FWaaS ----- FWaaS allows creation of IPv6 based rules. NAT & Floating IPs ------------------ At the current time OpenStack Networking does not provide any facility to support any flavor of NAT with IPv6. Unlike IPv4 there is no current embedded support for floating IPs with IPv6. It is assumed that the IPv6 addressing amongst the projects is using GUAs with no overlap across the projects. Security considerations ~~~~~~~~~~~~~~~~~~~~~~~ For more information about security considerations, see the ``Security groups`` section in :doc:`intro-os-networking`. Configuring interfaces of the guest ----------------------------------- OpenStack currently doesn't support the Privacy Extensions defined by RFC 4941, or the Opaque Identifier generation methods defined in RFC 7217. The interface identifier and DUID used must be directly derived from the MAC address as described in RFC 2373. The compute instances must not be set up to utilize either of these methods when generating their interface identifier, or they might not be able to communicate properly on the network. For example, in Linux guests, these are controlled via these two ``sysctl`` variables: - ``net.ipv6.conf.*.use_tempaddr`` (Privacy Extensions) This allows the use of non-changing interface identifiers for IPv6 addresses according to RFC3041 semantics. It should be disabled (zero) so that stateless addresses are constructed using a stable, EUI64-based value. - ``net.ipv6.conf.*.addr_gen_mode`` This defines how link-local and auto-configured IPv6 addresses are generated. It should be set to zero (default) so that IPv6 addresses are generated using an EUI64-based value. .. note:: Support for ``addr_gen_mode`` was added in kernel version 4.11. Other types of guests might have similar configuration options, please consult your distribution documentation for more information. There are no provisions for an IPv6-based metadata service similar to what is provided for IPv4. In the case of dual-stacked guests though it is always possible to use the IPv4 metadata service instead. IPv6-only guests will have to use another method for metadata injection such as using a configuration drive, which is described in the Nova documentation on `config-drive `__. Unlike IPv4, the MTU of a given network can be conveyed in both the Router Advertisement messages sent by the router, as well as in DHCP messages. OpenStack control & management network considerations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As of the Kilo release, considerable effort has gone in to ensuring the project network can handle dual stack IPv6 and IPv4 transport across the variety of configurations described above. OpenStack control network can be run in a dual stack configuration and OpenStack API endpoints can be accessed via an IPv6 network. At this time, Open vSwitch (OVS) tunnel types - STT, VXLAN, GRE, support both IPv4 and IPv6 endpoints. Prefix delegation ~~~~~~~~~~~~~~~~~ From the Liberty release onwards, OpenStack Networking supports IPv6 prefix delegation. This section describes the configuration and workflow steps necessary to use IPv6 prefix delegation to provide automatic allocation of subnet CIDRs. This allows you as the OpenStack administrator to rely on an external (to the OpenStack Networking service) DHCPv6 server to manage your project network prefixes. .. note:: Prefix delegation became available in the Liberty release, it is not available in the Kilo release. HA and DVR routers are not currently supported by this feature. Configuring OpenStack Networking for prefix delegation ------------------------------------------------------ To enable prefix delegation, edit the ``/etc/neutron/neutron.conf`` file. .. code-block:: console ipv6_pd_enabled = True .. note:: If you are not using the default dibbler-based driver for prefix delegation, then you also need to set the driver in ``/etc/neutron/neutron.conf``: .. code-block:: console pd_dhcp_driver = Drivers other than the default one may require extra configuration, please refer to :ref:`extra-driver-conf` This tells OpenStack Networking to use the prefix delegation mechanism for subnet allocation when the user does not provide a CIDR or subnet pool id when creating a subnet. Requirements ------------ To use this feature, you need a prefix delegation capable DHCPv6 server that is reachable from your OpenStack Networking node(s). This could be software running on the OpenStack Networking node(s) or elsewhere, or a physical router. For the purposes of this guide we are using the open-source DHCPv6 server, Dibbler. Dibbler is available in many Linux package managers, or from source at `tomaszmrugalski/dibbler `_. When using the reference implementation of the OpenStack Networking prefix delegation driver, Dibbler must also be installed on your OpenStack Networking node(s) to serve as a DHCPv6 client. Version 1.0.1 or higher is required. This guide assumes that you are running a Dibbler server on the network node where the external network bridge exists. If you already have a prefix delegation capable DHCPv6 server in place, then you can skip the following section. Configuring the Dibbler server ------------------------------ After installing Dibbler, edit the ``/etc/dibbler/server.conf`` file: .. code-block:: none script "/var/lib/dibbler/pd-server.sh" iface "br-ex" { pd-class { pd-pool 2001:db8:2222::/48 pd-length 64 } } The options used in the configuration file above are: - ``script`` Points to a script to be run when a prefix is delegated or released. This is only needed if you want instances on your subnets to have external network access. More on this below. - ``iface`` The name of the network interface on which to listen for prefix delegation messages. - ``pd-pool`` The larger prefix from which you want your delegated prefixes to come. The example given is sufficient if you do not need external network access, otherwise a unique globally routable prefix is necessary. - ``pd-length`` The length that delegated prefixes will be. This must be 64 to work with the current OpenStack Networking reference implementation. To provide external network access to your instances, your Dibbler server also needs to create new routes for each delegated prefix. This is done using the script file named in the config file above. Edit the ``/var/lib/dibbler/pd-server.sh`` file: .. code-block:: bash if [ "$PREFIX1" != "" ]; then if [ "$1" == "add" ]; then sudo ip -6 route add ${PREFIX1}/64 via $REMOTE_ADDR dev $IFACE fi if [ "$1" == "delete" ]; then sudo ip -6 route del ${PREFIX1}/64 via $REMOTE_ADDR dev $IFACE fi fi The variables used in the script file above are: - ``$PREFIX1`` The prefix being added/deleted by the Dibbler server. - ``$1`` The operation being performed. - ``$REMOTE_ADDR`` The IP address of the requesting Dibbler client. - ``$IFACE`` The network interface upon which the request was received. The above is all you need in this scenario, but more information on installing, configuring, and running Dibbler is available in the Dibbler user guide, at `Dibbler – a portable DHCPv6 `_. To start your Dibbler server, run: .. code-block:: console # dibbler-server run Or to run in headless mode: .. code-block:: console # dibbler-server start When using DevStack, it is important to start your server after the ``stack.sh`` script has finished to ensure that the required network interfaces have been created. User workflow ------------- First, create a network and IPv6 subnet: .. code-block:: console $ openstack network create ipv6-pd +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-25T19:26:01Z | | description | | | headers | | | id | 4b782725-6abe-4a2d-b061-763def1bb029 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | ipv6-pd | | port_security_enabled | True | | project_id | 61b7eba037fd41f29cfba757c010faff | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 46 | | revision_number | 3 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2017-01-25T19:26:01Z | +---------------------------+--------------------------------------+ $ openstack subnet create --ip-version 6 --ipv6-ra-mode slaac \ --ipv6-address-mode slaac --use-default-subnet-pool \ --network ipv6-pd ipv6-pd-1 +------------------------+--------------------------------------+ | Field | Value | +------------------------+--------------------------------------+ | allocation_pools | ::2-::ffff:ffff:ffff:ffff | | cidr | ::/64 | | created_at | 2017-01-25T19:31:53Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | ::1 | | headers | | | host_routes | | | id | 1319510d-c92c-4532-bf5d-8bcf3da761a1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | ipv6-pd-1 | | network_id | 4b782725-6abe-4a2d-b061-763def1bb029 | | project_id | 61b7eba037fd41f29cfba757c010faff | | revision_number | 2 | | service_types | | | subnetpool_id | prefix_delegation | | tags | [] | | updated_at | 2017-01-25T19:31:53Z | | use_default_subnetpool | True | +------------------------+--------------------------------------+ The subnet is initially created with a temporary CIDR before one can be assigned by prefix delegation. Any number of subnets with this temporary CIDR can exist without raising an overlap error. The subnetpool_id is automatically set to ``prefix_delegation``. To trigger the prefix delegation process, create a router interface between this subnet and a router with an active interface on the external network: .. code-block:: console $ openstack router add subnet router1 ipv6-pd-1 The prefix delegation mechanism then sends a request via the external network to your prefix delegation server, which replies with the delegated prefix. The subnet is then updated with the new prefix, including issuing new IP addresses to all ports: .. code-block:: console $ openstack subnet show ipv6-pd-1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 2001:db8:2222:6977::2-2001:db8:2222: | | | 6977:ffff:ffff:ffff:ffff | | cidr | 2001:db8:2222:6977::/64 | | created_at | 2017-01-25T19:31:53Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 2001:db8:2222:6977::1 | | host_routes | | | id | 1319510d-c92c-4532-bf5d-8bcf3da761a1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | ipv6-pd-1 | | network_id | 4b782725-6abe-4a2d-b061-763def1bb029 | | project_id | 61b7eba037fd41f29cfba757c010faff | | revision_number | 4 | | service_types | | | subnetpool_id | prefix_delegation | | tags | [] | | updated_at | 2017-01-25T19:35:26Z | +-------------------+--------------------------------------+ If the prefix delegation server is configured to delegate globally routable prefixes and setup routes, then any instance with a port on this subnet should now have external network access. Deleting the router interface causes the subnet to be reverted to the temporary CIDR, and all ports have their IPs updated. Prefix leases are released and renewed automatically as necessary. References ---------- The following presentation from the Barcelona Summit provides a great guide for setting up IPv6 with OpenStack: `Deploying IPv6 in OpenStack Environments `_. .. _extra-driver-conf: Extra configuration ------------------- Neutron dhcpv6_pd_agent ^^^^^^^^^^^^^^^^^^^^^^^ To enable the driver for the dhcpv6_pd_agent, set pd_dhcp_driver to this in ``/etc/neutron/neutron.conf``: .. code-block:: console pd_dhcp_driver = neutron_pd_agent To allow the neutron-pd-agent to communicate with prefix delegation servers, you must set which network interface to use for external communication. In DevStack the default for this is ``br-ex``: .. code-block:: console pd_interface = br-ex Once you have stacked run the command below to start the neutron-pd-agent: .. code-block:: console neutron-pd-agent --config-file /etc/neutron/neutron.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-logging.rst0000644000175000017500000003572500000000000023737 0ustar00coreycorey00000000000000.. _config-logging: ================================ Neutron Packet Logging Framework ================================ Packet logging service is designed as a Neutron plug-in that captures network packets for relevant resources (e.g. security group or firewall group) when the registered events occur. .. image:: figures/logging-framework.png :width: 100% :alt: Packet Logging Framework Supported loggable resource types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From Rocky release, both of ``security_group`` and ``firewall_group`` are supported as resource types in Neutron packet logging framework. Service Configuration ~~~~~~~~~~~~~~~~~~~~~ To enable the logging service, follow the below steps. #. On Neutron controller node, add ``log`` to ``service_plugins`` setting in ``/etc/neutron/neutron.conf`` file. For example: .. code-block:: none service_plugins = router,metering,log #. To enable logging service for ``security_group`` in Layer 2, add ``log`` to option ``extensions`` in section ``[agent]`` in ``/etc/neutron/plugins/ml2/ml2_conf.ini`` for controller node and in ``/etc/neutron/plugins/ml2/openvswitch_agent.ini`` for compute/network nodes. For example: .. code-block:: ini [agent] extensions = log .. note:: Fwaas v2 log is currently only supported by openvswitch, the firewall logging driver of linuxbridge is not implemented. #. To enable logging service for ``firewall_group`` in Layer 3, add ``fwaas_v2_log`` to option ``extensions`` in section ``[AGENT]`` in ``/etc/neutron/l3_agent.ini`` for network nodes. For example: .. code-block:: ini [AGENT] extensions = fwaas_v2,fwaas_v2_log #. On compute/network nodes, add configuration for logging service to ``[network_log]`` in ``/etc/neutron/plugins/ml2/openvswitch_agent.ini`` and in ``/etc/neutron/l3_agent.ini`` as shown bellow: .. code-block:: ini [network_log] rate_limit = 100 burst_limit = 25 #local_output_log_base = In which, ``rate_limit`` is used to configure the maximum number of packets to be logged per second (packets per second). When a high rate triggers ``rate_limit``, logging queues packets to be logged. ``burst_limit`` is used to configure the maximum of queued packets. And logged packets can be stored anywhere by using ``local_output_log_base``. .. note:: - It requires at least ``100`` for ``rate_limit`` and at least ``25`` for ``burst_limit``. - If ``rate_limit`` is unset, logging will log unlimited. - If we don't specify ``local_output_log_base``, logged packets will be stored in system journal like ``/var/log/syslog`` by default. Trusted projects policy.json configuration ---------------------------------------------- With the default ``/etc/neutron/policy.json``, administrators must set up resource logging on behalf of the cloud projects. If projects are trusted to administer their own loggable resources in their cloud, neutron's policy file ``policy.json`` can be modified to allow this. Modify ``/etc/neutron/policy.json`` entries as follows: .. code-block:: none "get_loggable_resources": "rule:regular_user", "create_log": "rule:regular_user", "get_log": "rule:regular_user", "get_logs": "rule:regular_user", "update_log": "rule:regular_user", "delete_log": "rule:regular_user", Service workflow for Operator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. To check the loggable resources that are supported by framework: .. code-block:: console $ openstack network loggable resources list +-----------------+ | Supported types | +-----------------+ | security_group | | firewall_group | +-----------------+ .. note:: - In VM ports, logging for ``security_group`` in currently works with ``openvswitch`` firewall driver only. ``linuxbridge`` is under development. - Logging for ``firewall_group`` works on internal router ports only. VM ports would be supported in the future. #. Log creation: * Create a logging resource with an appropriate resource type .. code-block:: console $ openstack network log create --resource-type security_group \ --description "Collecting all security events" \ --event ALL Log_Created +-----------------+------------------------------------------------+ | Field | Value | +-----------------+------------------------------------------------+ | Description | Collecting all security events | | Enabled | True | | Event | ALL | | ID | 8085c3e6-0fa2-4954-b5ce-ff6207931b6d | | Name | Log_Created | | Project | 02568bd62b414221956f15dbe9527d16 | | Resource | None | | Target | None | | Type | security_group | | created_at | 2017-07-05T02:56:43Z | | revision_number | 0 | | tenant_id | 02568bd62b414221956f15dbe9527d16 | | updated_at | 2017-07-05T02:56:43Z | +-----------------+------------------------------------------------+ .. warning:: In the case of ``--resource`` and ``--target`` are not specified from the request, these arguments will be assigned to ``ALL`` by default. Hence, there is an enormous range of log events will be created. * Create logging resource with a given resource (sg1 or fwg1) .. code-block:: console $ openstack network log create my-log --resource-type security_group --resource sg1 $ openstack network log create my-log --resource-type firewall_group --resource fwg1 * Create logging resource with a given target (portA) .. code-block:: console $ openstack network log create my-log --resource-type security_group --target portA * Create logging resource for only the given target (portB) and the given resource (sg1 or fwg1) .. code-block:: console $ openstack network log create my-log --resource-type security_group --target portB --resource sg1 $ openstack network log create my-log --resource-type firewall_group --target portB --resource fwg1 .. note:: - The ``Enabled`` field is set to ``True`` by default. If enabled, logged events are written to the destination if ``local_output_log_base`` is configured or ``/var/log/syslog`` in default. - The ``Event`` field will be set to ``ALL`` if ``--event`` is not specified from log creation request. #. Enable/Disable log We can ``enable`` or ``disable`` logging objects at runtime. It means that it will apply to all registered ports with the logging object immediately. For example: .. code-block:: console $ openstack network log set --disable Log_Created $ openstack network log show Log_Created +-----------------+------------------------------------------------+ | Field | Value | +-----------------+------------------------------------------------+ | Description | Collecting all security events | | Enabled | False | | Event | ALL | | ID | 8085c3e6-0fa2-4954-b5ce-ff6207931b6d | | Name | Log_Created | | Project | 02568bd62b414221956f15dbe9527d16 | | Resource | None | | Target | None | | Type | security_group | | created_at | 2017-07-05T02:56:43Z | | revision_number | 1 | | tenant_id | 02568bd62b414221956f15dbe9527d16 | | updated_at | 2017-07-05T03:12:01Z | +-----------------+------------------------------------------------+ Logged events description ~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, packet logging framework supports to collect ``ACCEPT`` or ``DROP`` or both events related to registered resources. As mentioned above, Neutron packet logging framework offers two loggable resources through the ``log`` service plug-in: ``security_group`` and ``firewall_group``. The general characteristics of each event will be shown as the following: * Log every ``DROP`` event: Every ``DROP`` security events will be generated when an incoming or outgoing session is blocked by the security groups or firewall groups * Log an ``ACCEPT`` event: The ``ACCEPT`` security event will be generated only for each ``NEW`` incoming or outgoing session that is allowed by security groups or firewall groups. More details for the ``ACCEPT`` events are shown as bellow: * North/South ``ACCEPT``: For a North/South session there would be a single ``ACCEPT`` event irrespective of direction. * East/West ``ACCEPT``/``ACCEPT``: In an intra-project East/West session where the originating port allows the session and the destination port allows the session, i.e. the traffic is allowed, there would be two ``ACCEPT`` security events generated, one from the perspective of the originating port and one from the perspective of the destination port. * East/West ``ACCEPT``/``DROP``: In an intra-project East/West session initiation where the originating port allows the session and the destination port does not allow the session there would be ``ACCEPT`` security events generated from the perspective of the originating port and ``DROP`` security events generated from the perspective of the destination port. #. The security events that are collected by security group should include: * A timestamp of the flow. * A status of the flow ``ACCEPT``/``DROP``. * An indication of the originator of the flow, e.g which project or log resource generated the events. * An identifier of the associated instance interface (neutron port id). * A layer 2, 3 and 4 information (mac, address, port, protocol, etc). * Security event record format: * Logged data of an ``ACCEPT`` event would look like: .. code-block:: console May 5 09:05:07 action=ACCEPT project_id=736672c700cd43e1bd321aeaf940365c log_resource_ids=['4522efdf-8d44-4e19-b237-64cafc49469b', '42332d89-df42-4588-a2bb-3ce50829ac51'] vm_port=e0259ade-86de-482e-a717-f58258f7173f ethernet(dst='fa:16:3e:ec:36:32',ethertype=2048,src='fa:16:3e:50:aa:b5'), ipv4(csum=62071,dst='10.0.0.4',flags=2,header_length=5,identification=36638,offset=0, option=None,proto=6,src='172.24.4.10',tos=0,total_length=60,ttl=63,version=4), tcp(ack=0,bits=2,csum=15097,dst_port=80,offset=10,option=[TCPOptionMaximumSegmentSize(kind=2,length=4,max_seg_size=1460), TCPOptionSACKPermitted(kind=4,length=2), TCPOptionTimestamps(kind=8,length=10,ts_ecr=0,ts_val=196418896), TCPOptionNoOperation(kind=1,length=1), TCPOptionWindowScale(kind=3,length=3,shift_cnt=3)], seq=3284890090,src_port=47825,urgent=0,window_size=14600) * Logged data of a ``DROP`` event: .. code-block:: console May 5 09:05:07 action=DROP project_id=736672c700cd43e1bd321aeaf940365c log_resource_ids=['4522efdf-8d44-4e19-b237-64cafc49469b'] vm_port=e0259ade-86de-482e-a717-f58258f7173f ethernet(dst='fa:16:3e:ec:36:32',ethertype=2048,src='fa:16:3e:50:aa:b5'), ipv4(csum=62071,dst='10.0.0.4',flags=2,header_length=5,identification=36638,offset=0, option=None,proto=6,src='172.24.4.10',tos=0,total_length=60,ttl=63,version=4), tcp(ack=0,bits=2,csum=15097,dst_port=80,offset=10,option=[TCPOptionMaximumSegmentSize(kind=2,length=4,max_seg_size=1460), TCPOptionSACKPermitted(kind=4,length=2), TCPOptionTimestamps(kind=8,length=10,ts_ecr=0,ts_val=196418896), TCPOptionNoOperation(kind=1,length=1), TCPOptionWindowScale(kind=3,length=3,shift_cnt=3)], seq=3284890090,src_port=47825,urgent=0,window_size=14600) #. The events that are collected by firewall group should include: * A timestamp of the flow. * A status of the flow ``ACCEPT``/``DROP``. * The identifier of log objects that are collecting this event * An identifier of the associated instance interface (neutron port id). * A layer 2, 3 and 4 information (mac, address, port, protocol, etc). * Security event record format: * Logged data of an ``ACCEPT`` event would look like: .. code-block:: console Jul 26 14:46:20: action=ACCEPT, log_resource_ids=[u'2e030f3a-e93d-4a76-bc60-1d11c0f6561b'], port=9882c485-b808-4a34-a3fb-b537642c66b2 pkt=ethernet(dst='fa:16:3e:8f:47:c5',ethertype=2048,src='fa:16:3e:1b:3e:67') ipv4(csum=47423,dst='10.10.1.16',flags=2,header_length=5,identification=27969,offset=0,option=None,proto=1,src='10.10.0.5',tos=0,total_length=84,ttl=63,version=4) icmp(code=0,csum=41376,data=echo(data='\xe5\xf2\xfej\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00',id=29185,seq=0),type=8) * Logged data of a ``DROP`` event: .. code-block:: console Jul 26 14:51:20: action=DROP, log_resource_ids=[u'2e030f3a-e93d-4a76-bc60-1d11c0f6561b'], port=9882c485-b808-4a34-a3fb-b537642c66b2 pkt=ethernet(dst='fa:16:3e:32:7d:ff',ethertype=2048,src='fa:16:3e:28:83:51') ipv4(csum=17518,dst='10.10.0.5',flags=2,header_length=5,identification=57874,offset=0,option=None,proto=1,src='10.10.1.16',tos=0,total_length=84,ttl=63,version=4) icmp(code=0,csum=23772,data=echo(data='\x8a\xa0\xac|\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00',id=25601,seq=5),type=8) .. note:: No other extraneous events are generated within the security event logs, e.g. no debugging data, etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-macvtap.rst0000644000175000017500000001466100000000000023740 0ustar00coreycorey00000000000000.. _config-macvtap: ======================== Macvtap mechanism driver ======================== The Macvtap mechanism driver for the ML2 plug-in generally increases network performance of instances. Consider the following attributes of this mechanism driver to determine practicality in your environment: * Supports only instance ports. Ports for DHCP and layer-3 (routing) services must use another mechanism driver such as Linux bridge or Open vSwitch (OVS). * Supports only untagged (flat) and tagged (VLAN) networks. * Lacks support for security groups including basic (sanity) and anti-spoofing rules. * Lacks support for layer-3 high-availability mechanisms such as Virtual Router Redundancy Protocol (VRRP) and Distributed Virtual Routing (DVR). * Only compute resources can be attached via macvtap. Attaching other resources like DHCP, Routers and others is not supported. Therefore run either OVS or linux bridge in VLAN or flat mode on the controller node. * Instance migration requires the same values for the ``physical_interface_mapping`` configuration option on each compute node. For more information, see ``_. Prerequisites ~~~~~~~~~~~~~ You can add this mechanism driver to an existing environment using either the Linux bridge or OVS mechanism drivers with only provider networks or provider and self-service networks. You can change the configuration of existing compute nodes or add compute nodes with the Macvtap mechanism driver. The example configuration assumes addition of compute nodes with the Macvtap mechanism driver to the :ref:`deploy-lb-selfservice` or :ref:`deploy-ovs-selfservice` deployment examples. Add one or more compute nodes with the following components: * Three network interfaces: management, provider, and overlay. * OpenStack Networking Macvtap layer-2 agent and any dependencies. .. note:: To support integration with the deployment examples, this content configures the Macvtap mechanism driver to use the overlay network for untagged (flat) or tagged (VLAN) networks in addition to overlay networks such as VXLAN. Your physical network infrastructure must support VLAN (802.1q) tagging on the overlay network. Architecture ~~~~~~~~~~~~ The Macvtap mechanism driver only applies to compute nodes. Otherwise, the environment resembles the prerequisite deployment example. .. image:: figures/config-macvtap-compute1.png :alt: Macvtap mechanism driver - compute node components .. image:: figures/config-macvtap-compute2.png :alt: Macvtap mechanism driver - compute node connectivity Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for the Macvtap mechanism driver to an existing operational environment. Controller node --------------- #. In the ``ml2_conf.ini`` file: * Add ``macvtap`` to mechanism drivers. .. code-block:: ini [ml2] mechanism_drivers = macvtap * Configure network mappings. .. code-block:: ini [ml2_type_flat] flat_networks = provider,macvtap [ml2_type_vlan] network_vlan_ranges = provider,macvtap:VLAN_ID_START:VLAN_ID_END .. note:: Use of ``macvtap`` is arbitrary. Only the self-service deployment examples require VLAN ID ranges. Replace ``VLAN_ID_START`` and ``VLAN_ID_END`` with appropriate numerical values. #. Restart the following services: * Server Network nodes ------------- No changes. Compute nodes ------------- #. Install the Networking service Macvtap layer-2 agent. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. In the ``macvtap_agent.ini`` file, configure the layer-2 agent. .. code-block:: ini [macvtap] physical_interface_mappings = macvtap:MACVTAP_INTERFACE [securitygroup] firewall_driver = noop Replace ``MACVTAP_INTERFACE`` with the name of the underlying interface that handles Macvtap mechanism driver interfaces. If using a prerequisite deployment example, replace ``MACVTAP_INTERFACE`` with the name of the underlying interface that handles overlay networks. For example, ``eth1``. #. Start the following services: * Macvtap agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 31e1bc1b-c872-4429-8fc3-2c8eba52634e | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | 378f5550-feee-42aa-a1cb-e548b7c2601f | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent | | 7d2577d0-e640-42a3-b303-cb1eb077f2b6 | L3 agent | compute1 | nova | True | UP | neutron-l3-agent | | d5d7522c-ad14-4c63-ab45-f6420d6a81dd | Metering agent | compute1 | None | True | UP | neutron-metering-agent | | e838ef5c-75b1-4b12-84da-7bdbd62f1040 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- This mechanism driver simply changes the virtual network interface driver for instances. Thus, you can reference the ``Create initial networks`` content for the prerequisite deployment example. Verify network operation ------------------------ This mechanism driver simply changes the virtual network interface driver for instances. Thus, you can reference the ``Verify network operation`` content for the prerequisite deployment example. Network traffic flow ~~~~~~~~~~~~~~~~~~~~ This mechanism driver simply removes the Linux bridge handling security groups on the compute nodes. Thus, you can reference the network traffic flow scenarios for the prerequisite deployment example. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ml2.rst0000644000175000017500000004005600000000000022774 0ustar00coreycorey00000000000000.. _config-plugin-ml2: =========== ML2 plug-in =========== Architecture ~~~~~~~~~~~~ The Modular Layer 2 (ML2) neutron plug-in is a framework allowing OpenStack Networking to simultaneously use the variety of layer 2 networking technologies found in complex real-world data centers. The ML2 framework distinguishes between the two kinds of drivers that can be configured: * Type drivers Define how an OpenStack network is technically realized. Example: VXLAN Each available network type is managed by an ML2 type driver. Type drivers maintain any needed type-specific network state. They validate the type specific information for provider networks and are responsible for the allocation of a free segment in project networks. * Mechanism drivers Define the mechanism to access an OpenStack network of a certain type. Example: Open vSwitch mechanism driver. The mechanism driver is responsible for taking the information established by the type driver and ensuring that it is properly applied given the specific networking mechanisms that have been enabled. Mechanism drivers can utilize L2 agents (via RPC) and/or interact directly with external devices or controllers. Multiple mechanism and type drivers can be used simultaneously to access different ports of the same virtual network. .. todo:: Picture showing relationships ML2 driver support matrix ------------------------- .. list-table:: Mechanism drivers and L2 agents :header-rows: 1 * - type driver / mech driver - Flat - VLAN - VXLAN - GRE * - Open vSwitch - yes - yes - yes - yes * - Linux bridge - yes - yes - yes - no * - SRIOV - yes - yes - no - no * - MacVTap - yes - yes - no - no * - L2 population - no - no - yes - yes .. note:: L2 population is a special mechanism driver that optimizes BUM (Broadcast, unknown destination address, multicast) traffic in the overlay networks VXLAN and GRE. It needs to be used in conjunction with either the Linux bridge or the Open vSwitch mechanism driver and cannot be used as standalone mechanism driver. For more information, see the *Mechanism drivers* section below. Configuration ~~~~~~~~~~~~~ Network type drivers -------------------- To enable type drivers in the ML2 plug-in. Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file: .. code-block:: ini [ml2] type_drivers = flat,vlan,vxlan,gre .. note:: For more details,see the `Bug 1567792 `__. For more details, see the `Networking configuration options <../configuration/ml2-conf.html>`__ of Configuration Reference. The following type drivers are available * Flat * VLAN * GRE * VXLAN Provider network types ^^^^^^^^^^^^^^^^^^^^^^ Provider networks provide connectivity like project networks. But only administrative (privileged) users can manage those networks because they interface with the physical network infrastructure. More information about provider networks see :doc:`intro-os-networking`. * Flat The administrator needs to configure a list of physical network names that can be used for provider networks. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#ml2-type-flat>`__. * VLAN The administrator needs to configure a list of physical network names that can be used for provider networks. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#ml2-type-vlan>`__. * GRE No additional configuration required. * VXLAN The administrator can configure the VXLAN multicast group that should be used. .. note:: VXLAN multicast group configuration is not applicable for the Open vSwitch agent. As of today it is not used in the Linux bridge agent. The Linux bridge agent has its own agent specific configuration option. For more details, see the `Bug 1523614 `__. Project network types ^^^^^^^^^^^^^^^^^^^^^ Project networks provide connectivity to instances for a particular project. Regular (non-privileged) users can manage project networks within the allocation that an administrator or operator defines for them. More information about project and provider networks see :doc:`intro-os-networking`. Project network configurations are made in the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` configuration file on the neutron server: * VLAN The administrator needs to configure the range of VLAN IDs that can be used for project network allocation. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#ml2-type-vlan>`__. * GRE The administrator needs to configure the range of tunnel IDs that can be used for project network allocation. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#ml2-type-gre>`__. * VXLAN The administrator needs to configure the range of VXLAN IDs that can be used for project network allocation. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#ml2-type-vxlan>`__. .. note:: Flat networks for project allocation are not supported. They only can exist as a provider network. Mechanism drivers ----------------- To enable mechanism drivers in the ML2 plug-in, edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file on the neutron server: .. code-block:: ini [ml2] mechanism_drivers = ovs,l2pop .. note:: For more details, see the `Bug 1567792 `__. For more details, see the `Configuration Reference <../configuration/ml2-conf.html#ml2>`__. * Linux bridge No additional configurations required for the mechanism driver. Additional agent configuration is required. For details, see the related *L2 agent* section below. * Open vSwitch No additional configurations required for the mechanism driver. Additional agent configuration is required. For details, see the related *L2 agent* section below. * SRIOV The SRIOV driver accepts all PCI vendor devices. * MacVTap No additional configurations required for the mechanism driver. Additional agent configuration is required. Please see the related section. * L2 population The administrator can configure some optional configuration options. For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#l2pop>`__. * Specialized * Open source External open source mechanism drivers exist as well as the neutron integrated reference implementations. Configuration of those drivers is not part of this document. For example: * OpenDaylight * OpenContrail * Proprietary (vendor) External mechanism drivers from various vendors exist as well as the neutron integrated reference implementations. Configuration of those drivers is not part of this document. Supported VNIC types ^^^^^^^^^^^^^^^^^^^^ The ``vnic_type_blacklist`` option is used to remove values from the mechanism driver's ``supported_vnic_types`` list. .. list-table:: Mechanism drivers and supported VNIC types :header-rows: 1 * - mech driver / supported_vnic_types - supported VNIC types - blacklisting available * - Linux bridge - normal - no * - MacVTap - macvtap - no * - Open vSwitch - normal, direct - yes (ovs_driver vnic_type_blacklist, see: `Configuration Reference <../configuration/ml2-conf.html#ovs_driver>`__) * - SRIOV - direct, macvtap, direct_physical - yes (sriov_driver vnic_type_blacklist, see: `Configuration Reference <../configuration/ml2-conf.html#sriov_driver>`__) Extension Drivers ----------------- The ML2 plug-in also supports extension drivers that allows other pluggable drivers to extend the core resources implemented in the ML2 plug-in (``networks``, ``ports``, etc.). Examples of extension drivers include support for QoS, port security, etc. For more details see the ``extension_drivers`` configuration option in the `Configuration Reference <../configuration/ml2-conf.html#ml2.extension_drivers>`__. Agents ------ L2 agent ^^^^^^^^ An L2 agent serves layer 2 (Ethernet) network connectivity to OpenStack resources. It typically runs on each Network Node and on each Compute Node. * Open vSwitch agent The Open vSwitch agent configures the Open vSwitch to realize L2 networks for OpenStack resources. Configuration for the Open vSwitch agent is typically done in the ``openvswitch_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/openvswitch-agent.html>`__. * Linux bridge agent The Linux bridge agent configures Linux bridges to realize L2 networks for OpenStack resources. Configuration for the Linux bridge agent is typically done in the ``linuxbridge_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/linuxbridge-agent.html>`__. * SRIOV Nic Switch agent The sriov nic switch agent configures PCI virtual functions to realize L2 networks for OpenStack instances. Network attachments for other resources like routers, DHCP, and so on are not supported. Configuration for the SRIOV nic switch agent is typically done in the ``sriov_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/sriov-agent.html>`__. * MacVTap agent The MacVTap agent uses kernel MacVTap devices for realizing L2 networks for OpenStack instances. Network attachments for other resources like routers, DHCP, and so on are not supported. Configuration for the MacVTap agent is typically done in the ``macvtap_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/macvtap-agent.html>`__. L3 agent ^^^^^^^^ The L3 agent offers advanced layer 3 services, like virtual Routers and Floating IPs. It requires an L2 agent running in parallel. Configuration for the L3 agent is typically done in the ``l3_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/l3-agent.html>`__. DHCP agent ^^^^^^^^^^ The DHCP agent is responsible for DHCP (Dynamic Host Configuration Protocol) and RADVD (Router Advertisement Daemon) services. It requires a running L2 agent on the same node. Configuration for the DHCP agent is typically done in the ``dhcp_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/dhcp-agent.html>`__. Metadata agent ^^^^^^^^^^^^^^ The Metadata agent allows instances to access cloud-init meta data and user data via the network. It requires a running L2 agent on the same node. Configuration for the Metadata agent is typically done in the ``metadata_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/metadata-agent.html>`__. L3 metering agent ^^^^^^^^^^^^^^^^^ The L3 metering agent enables layer3 traffic metering. It requires a running L3 agent on the same node. Configuration for the L3 metering agent is typically done in the ``metering_agent.ini`` configuration file. Make sure that on agent start you pass this configuration file as argument. For a detailed list of configuration options, see the related section in the `Configuration Reference <../configuration/metering-agent.html>`__. Security -------- L2 agents support some important security configurations. * Security Groups For more details, see the related section in the `Configuration Reference <../configuration/ml2-conf.html#securitygroup>`__. * Arp Spoofing Prevention Configured in the *L2 agent* configuration. Reference implementations ~~~~~~~~~~~~~~~~~~~~~~~~~ Overview -------- In this section, the combination of a mechanism driver and an L2 agent is called 'reference implementation'. The following table lists these implementations: .. list-table:: Mechanism drivers and L2 agents :header-rows: 1 * - Mechanism Driver - L2 agent * - Open vSwitch - Open vSwitch agent * - Linux bridge - Linux bridge agent * - SRIOV - SRIOV nic switch agent * - MacVTap - MacVTap agent * - L2 population - Open vSwitch agent, Linux bridge agent The following tables shows which reference implementations support which non-L2 neutron agents: .. list-table:: Reference implementations and other agents :header-rows: 1 * - Reference Implementation - L3 agent - DHCP agent - Metadata agent - L3 Metering agent * - Open vSwitch & Open vSwitch agent - yes - yes - yes - yes * - Linux bridge & Linux bridge agent - yes - yes - yes - yes * - SRIOV & SRIOV nic switch agent - no - no - no - no * - MacVTap & MacVTap agent - no - no - no - no .. note:: L2 population is not listed here, as it is not a standalone mechanism. If other agents are supported depends on the conjunctive mechanism driver that is used for binding a port. More information about L2 population see the `OpenStack Manuals `_. Buying guide ------------ This guide characterizes the L2 reference implementations that currently exist. * Open vSwitch mechanism and Open vSwitch agent Can be used for instance network attachments as well as for attachments of other network resources like routers, DHCP, and so on. * Linux bridge mechanism and Linux bridge agent Can be used for instance network attachments as well as for attachments of other network resources like routers, DHCP, and so on. * SRIOV mechanism driver and SRIOV NIC switch agent Can only be used for instance network attachments (device_owner = compute). Is deployed besides an other mechanism driver and L2 agent such as OVS or Linux bridge. It offers instances direct access to the network adapter through a PCI Virtual Function (VF). This gives an instance direct access to hardware capabilities and high performance networking. The cloud consumer can decide via the neutron APIs VNIC_TYPE attribute, if an instance gets a normal OVS port or an SRIOV port. Due to direct connection, some features are not available when using SRIOV. For example, DVR, security groups, migration. For more information see the :ref:`config-sriov`. * MacVTap mechanism driver and MacVTap agent Can only be used for instance network attachments (device_owner = compute) and not for attachment of other resources like routers, DHCP, and so on. It is positioned as alternative to Open vSwitch or Linux bridge support on the compute node for internal deployments. MacVTap offers a direct connection with very little overhead between instances and down to the adapter. You can use MacVTap agent on the compute node when you require a network connection that is performance critical. It does not require specific hardware (like with SRIOV). Due to the direct connection, some features are not available when using it on the compute node. For example, DVR, security groups and arp-spoofing protection. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-mtu.rst0000644000175000017500000000743200000000000023110 0ustar00coreycorey00000000000000.. _config-mtu: ================== MTU considerations ================== The Networking service uses the MTU of the underlying physical network to calculate the MTU for virtual network components including instance network interfaces. By default, it assumes a standard 1500-byte MTU for the underlying physical network. The Networking service only references the underlying physical network MTU. Changing the underlying physical network device MTU requires configuration of physical network devices such as switches and routers. Jumbo frames ~~~~~~~~~~~~ The Networking service supports underlying physical networks using jumbo frames and also enables instances to use jumbo frames minus any overlay protocol overhead. For example, an underlying physical network with a 9000-byte MTU yields a 8950-byte MTU for instances using a VXLAN network with IPv4 endpoints. Using IPv6 endpoints for overlay networks adds 20 bytes of overhead for any protocol. The Networking service supports the following underlying physical network architectures. Case 1 refers to the most common architecture. In general, architectures should avoid cases 2 and 3. .. note:: After you adjust MTU configuration options in ``neutron.conf`` and ``ml2_conf.ini``, you should update ``mtu`` attribute for all existing networks that need a new MTU. (Network MTU update is available for all core plugins that implement the ``net-mtu-writable`` API extension.) Case 1 ------ For typical underlying physical network architectures that implement a single MTU value, you can leverage jumbo frames using two options, one in the ``neutron.conf`` file and the other in the ``ml2_conf.ini`` file. Most environments should use this configuration. For example, referencing an underlying physical network with a 9000-byte MTU: #. In the ``neutron.conf`` file: .. code-block:: ini [DEFAULT] global_physnet_mtu = 9000 #. In the ``ml2_conf.ini`` file: .. code-block:: ini [ml2] path_mtu = 9000 Case 2 ------ Some underlying physical network architectures contain multiple layer-2 networks with different MTU values. You can configure each flat or VLAN provider network in the bridge or interface mapping options of the layer-2 agent to reference a unique MTU value. For example, referencing a 4000-byte MTU for ``provider2``, a 1500-byte MTU for ``provider3``, and a 9000-byte MTU for other networks using the Open vSwitch agent: #. In the ``neutron.conf`` file: .. code-block:: ini [DEFAULT] global_physnet_mtu = 9000 #. In the ``openvswitch_agent.ini`` file: .. code-block:: ini [ovs] bridge_mappings = provider1:eth1,provider2:eth2,provider3:eth3 #. In the ``ml2_conf.ini`` file: .. code-block:: ini [ml2] physical_network_mtus = provider2:4000,provider3:1500 path_mtu = 9000 Case 3 ------ Some underlying physical network architectures contain a unique layer-2 network for overlay networks using protocols such as VXLAN and GRE. For example, referencing a 4000-byte MTU for overlay networks and a 9000-byte MTU for other networks: #. In the ``neutron.conf`` file: .. code-block:: ini [DEFAULT] global_physnet_mtu = 9000 #. In the ``ml2_conf.ini`` file: .. code-block:: ini [ml2] path_mtu = 4000 .. note:: Other networks including provider networks and flat or VLAN self-service networks assume the value of the ``global_physnet_mtu`` option. Instance network interfaces (VIFs) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The DHCP agent provides an appropriate MTU value to instances using IPv4, while the L3 agent provides an appropriate MTU value to instances using IPv6. IPv6 uses RA via the L3 agent because the DHCP agent only supports IPv4. Instances using IPv4 and IPv6 should obtain the same MTU value regardless of method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-network-segment-ranges.rst0000644000175000017500000004264200000000000026713 0ustar00coreycorey00000000000000.. _config-network-segment-ranges: ====================== Network segment ranges ====================== The network segment range service exposes the segment range management to be administered via the Neutron API. In addition, it introduces the ability for the administrator to control the segment ranges globally or on a per-tenant basis. Why you need it ~~~~~~~~~~~~~~~ Before Stein, network segment ranges were configured as an entry in ML2 config file ``ml2_conf.ini`` that was statically defined for tenant network allocation and therefore had to be managed as part of the host deployment and management. When a regular tenant user creates a network, Neutron assigns the next free segmentation ID (VLAN ID, VNI etc.) from the configured segment ranges. Only an administrator can assign a specific segment ID via the provider extension. The network segment range management service provides the following capabilities that the administrator may be interested in: #. To check out the network segment ranges defined by the operators in the ML2 config file so that the admin can use this information to make segment range allocation. #. To dynamically create and assign network segment ranges, which can help with the distribution of the underlying network connection mapping for privacy or dedicated business connection needs. This includes: * global shared network segment ranges * tenant-specific network segment ranges #. To dynamically update a network segment range to offer the ability to adapt to the connection mapping changes. #. To dynamically manage a network segment range when there are no segment ranges defined within the ML2 config file ``ml2_conf.ini`` and no restart of the Neutron server is required in this situation. #. To check the availability and usage statistics of network segment ranges. How it works ~~~~~~~~~~~~ A network segment range manages a set of segments from which self-service networks can be allocated. The network segment range management service is admin-only. As a regular project in an OpenStack cloud, you can not create a network segment range of your own and you just create networks in regular way. If you are an admin, you can create a network segment range which can be shared (i.e. used by any regular project) or tenant-specific (i.e. assignment on a per-tenant basis). Your network segment ranges will not be visible to any other regular projects. Other CRUD operations are also supported. When a tenant allocates a segment, it will first be allocated from an available segment range assigned to the tenant, and then a shared range if no tenant specific allocation is possible. Default network segment ranges ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A set of ``default`` network segment ranges are created out of the values defined in the ML2 config file: ``network_vlan_ranges`` for ml2_type_vlan, ``vni_ranges`` for ml2_type_vxlan, ``tunnel_id_ranges`` for ml2_type_gre and ``vni_ranges`` for ml2_type_geneve. They will be reloaded when Neutron server starts or restarts. The ``default`` network segment ranges are ``read-only``, but will be treated as any other ``shared`` ranges on segment allocation. The administrator can use the default network segment range information to make shared and/or per-tenant range creation and assignment. Example configuration ~~~~~~~~~~~~~~~~~~~~~ Controller node --------------- #. Enable the network segment range service plugin by appending ``network_segment_range`` to the list of ``service_plugins`` in the ``neutron.conf`` file on all nodes running the ``neutron-server`` service: .. code-block:: ini [DEFAULT] # ... service_plugins = ...,network_segment_range,... #. Restart the ``neutron-server`` service. Verify service operation ------------------------ #. Source the administrative project credentials and list the enabled extensions. #. Use the command :command:`openstack extension list --network` to verify that the ``Neutron Network Segment Range`` extension with Alias ``network-segment-range`` is enabled. .. code-block:: console $ openstack extension list --network +-------------------------------+-----------------------+-----------------------------------------------------------+ | Name | Alias | Description | +-------------------------------+-----------------------+-----------------------------------------------------------+ | ...... | ...... | ...... | +-------------------------------+-----------------------+-----------------------------------------------------------+ | Neutron Network Segment Range | network-segment-range | Provides support for the network segment range management | +-------------------------------+-----------------------+-----------------------------------------------------------+ | ...... | ...... | ...... | +-------------------------------+-----------------------+-----------------------------------------------------------+ Workflow ~~~~~~~~ At a high level, the basic workflow for a network segment range creation is the following: #. The Cloud administrator: * Lists the existing network segment ranges. * Creates a shared or a tenant-specific network segment range based on the requirement. #. A regular tenant creates a network in regular way. The network created will automatically allocate a segment from the segment ranges assigned to the tenant or shared if no tenant specific range available. At a high level, the basic workflow for a network segment range update is the following: #. The Cloud administrator: * Lists the existing network segment ranges and identifies the one that needs to be updated. * Updates the network segment range based on the requirement. #. A regular tenant creates a network in regular way. The network created will automatically allocate a segment from the updated network segment ranges available. List the network segment ranges or show a network segment range --------------------------------------------------------------- As admin, list the existing network segment ranges: .. code-block:: console $ openstack network segment range list +--------------------------------------+-------------------+---------+--------+----------------------------------+--------------+------------------+------------+------------+ | ID | Name | Default | Shared | Project ID | Network Type | Physical Network | Minimum ID | Maximum ID | +--------------------------------------+-------------------+---------+--------+----------------------------------+--------------+------------------+------------+------------+ | 20ce94e1-4e51-4aa0-a5f1-26bdfb5bd90e | | True | True | None | vxlan | None | 1 | 200 | | 4b7af684-ec97-422d-ba38-8b9c2919ae67 | test_range_3 | False | False | 7011dc7fccac4efda89dc3b7f0d0975a | gre | None | 100 | 120 | | a021e582-6b0f-49f5-90cb-79a670c61973 | | True | True | None | vlan | default | 1 | 100 | | a3373630-969b-4ce9-bae7-dff0f8fa2f92 | test_range_2 | False | True | None | vxlan | None | 501 | 505 | | a5707a8f-76f0-4f90-9aa7-c42bf54e94b5 | | True | True | None | gre | None | 1 | 150 | | aad1b55b-43f1-46f9-8c35-85f270863ed6 | | True | True | None | geneve | None | 1 | 120 | | e3233178-2866-4f40-b794-7c6fecdc8655 | test_range_1 | False | False | 7011dc7fccac4efda89dc3b7f0d0975a | vlan | group0-data0 | 11 | 11 | +--------------------------------------+-------------------+---------+--------+----------------------------------+--------------+------------------+------------+------------+ The network segment ranges with ``Default`` as ``True`` are the ranges specified by the operators in the ML2 config file. Besides, there are also shared and tenant specific network segment ranges created by the admin previously. The admin is also able to check/show the detailed information (e.g. availability and usage statistics) of a network segment range: .. code-block:: console $ openstack network segment range show test_range_1 +------------------+-----------------------------------------------+ | Field | Value | +------------------+-----------------------------------------------+ | available | [] | | default | False | | id | e3233178-2866-4f40-b794-7c6fecdc8655 | | location | None | | maximum | 11 | | minimum | 11 | | name | test_range_1 | | network_type | vlan | | physical_network | group0-data0 | | project_id | 7011dc7fccac4efda89dc3b7f0d0975a | | shared | False | | used | {u'7011dc7fccac4efda89dc3b7f0d0975a': ['11']} | +------------------+-----------------------------------------------+ Create or update the network segment range ------------------------------------------ As admin, create a network segment range based on your requirement: .. code-block:: console $ openstack network segment range create --private --project demo \ --network-type vxlan --minimum 120 --maximum 140 test_range_4 +------------------+--------------------------------------+ | Field | Value | +------------------+--------------------------------------+ | available | ['120-140'] | | default | False | | id | c016dcda-5bc3-4e98-b41f-6773e92fcd2d | | location | None | | maximum | 140 | | minimum | 120 | | name | test_range_4 | | network_type | vxlan | | physical_network | None | | project_id | 7011dc7fccac4efda89dc3b7f0d0975a | | shared | False | | used | {} | +------------------+--------------------------------------+ Update a network segment range based on your requirement: .. code-block:: console $ openstack network segment range set --minimum 100 --maximum 150 \ test_range_4 Create a tenant network ----------------------- Now, as project ``demo`` (to source the client environment script ``demo-openrc`` for ``demo`` project according to https://docs.openstack.org/keystone/latest/install/keystone-openrc-rdo.html), create a network in a regular way. .. code-block:: console $ source demo-openrc $ openstack network create test_net +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2019-02-25T23:20:36Z | | description | | | dns_domain | | | id | 39e5b95c-ad7a-40b5-9ec1-a4b4a8a43f14 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | is_vlan_transparent | None | | location | None | | mtu | 1450 | | name | test_net | | port_security_enabled | True | | project_id | 7011dc7fccac4efda89dc3b7f0d0975a | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | None | | qos_policy_id | None | | revision_number | 2 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | | | updated_at | 2019-02-25T23:20:36Z | +---------------------------+--------------------------------------+ Then, switch back to the admin to check the segmentation ID of the tenant network created. .. code-block:: console $ source admin-openrc $ openstack network show test_net +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2019-02-25T23:20:36Z | | description | | | dns_domain | | | id | 39e5b95c-ad7a-40b5-9ec1-a4b4a8a43f14 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | is_vlan_transparent | None | | location | None | | mtu | 1450 | | name | test_net | | port_security_enabled | True | | project_id | 7011dc7fccac4efda89dc3b7f0d0975a | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 137 | | qos_policy_id | None | | revision_number | 2 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | | | updated_at | 2019-02-25T23:20:36Z | +---------------------------+--------------------------------------+ The tenant network created automatically allocates a segment with segmentation ID ``137`` from the network segment range with segmentation ID range ``120-140`` that is assigned to the tenant. If no more available segment in the network segment range assigned to this tenant, then the segment allocation would refer to the ``shared`` segment ranges to check whether there's one segment available. If still there is no segment available, the allocation will fail as follows: .. code-block:: console $ openstack network create test_net $ Unable to create the network. No tenant network is available for allocation. In this case, the admin is advised to check the availability and usage statistics of the related network segment ranges in order to take further actions (e.g. enlarging a segment range etc.). Known limitations ~~~~~~~~~~~~~~~~~ * This service plugin is only compatible with ML2 core plugin for now. However, it is possible for other core plugins to support this feature with a follow-on effort. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ovs-dpdk.rst0000644000175000017500000001321000000000000024021 0ustar00coreycorey00000000000000.. _config-ovs-dpdk: =============================== Open vSwitch with DPDK datapath =============================== This page serves as a guide for how to use the OVS with DPDK datapath functionality available in the Networking service as of the Mitaka release. The basics ~~~~~~~~~~ Open vSwitch (OVS) provides support for a Data Plane Development Kit (DPDK) datapath since OVS 2.2, and a DPDK-backed ``vhost-user`` virtual interface since OVS 2.4. The DPDK datapath provides lower latency and higher performance than the standard kernel OVS datapath, while DPDK-backed ``vhost-user`` interfaces can connect guests to this datapath. For more information on DPDK, refer to the `DPDK `__ website. OVS with DPDK, or OVS-DPDK, can be used to provide high-performance networking between instances on OpenStack compute nodes. Prerequisites ------------- Using DPDK in OVS requires the following minimum software versions: * OVS 2.4 * DPDK 2.0 * QEMU 2.1.0 * libvirt 1.2.13 Support of ``vhost-user`` multiqueue that enables use of multiqueue with ``virtio-net`` and ``igb_uio`` is available if the following newer versions are used: * OVS 2.5 * DPDK 2.2 * QEMU 2.5 * libvirt 1.2.17 In both cases, install and configure Open vSwitch with DPDK support for each node. For more information, see the `OVS-DPDK `__ installation guide (select an appropriate OVS version in the :guilabel:`Branch` drop-down menu). :doc:`/contributor/internals/ovs_vhostuser` for configuration of neutron OVS agent. In case you wish to configure multiqueue, see the `OVS configuration chapter on vhost-user `__ in QEMU documentation. The technical background of multiqueue is explained in the corresponding `blueprint `__. Additionally, OpenStack supports ``vhost-user`` reconnect feature starting from the Ocata release, as implementation of fix for `bug 1604924 `__. Starting from OpenStack Ocata release this feature is used without any configuration necessary in case the following minimum software versions are used: * OVS 2.6 * DPDK 16.07 * QEMU 2.7 The support of this feature is not yet present in ML2 OVN and ODL mechanism drivers. Using vhost-user interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once OVS and neutron are correctly configured with DPDK support, ``vhost-user`` interfaces are completely transparent to the guest (except in case of multiqueue configuration described below). However, guests must request huge pages. This can be done through flavors. For example: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=large For more information about the syntax for ``hw:mem_page_size``, refer to the `Flavors `__ guide. .. note:: ``vhost-user`` requires file descriptor-backed shared memory. Currently, the only way to request this is by requesting large pages. This is why instances spawned on hosts with OVS-DPDK must request large pages. The aggregate flavor affinity filter can be used to associate flavors with large page support to hosts with OVS-DPDK support. Create and add ``vhost-user`` network interfaces to instances in the same fashion as conventional interfaces. These interfaces can use the kernel ``virtio-net`` driver or a DPDK-compatible driver in the guest .. code-block:: console $ openstack server create --nic net-id=$net_id ... testserver Using vhost-user multiqueue ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use this feature, the following should be set in the flavor extra specs (flavor keys): .. code-block:: console $ openstack flavor set $m1.large --property hw:vif_multiqueue_enabled=true This setting can be overridden by the image metadata property if the feature is enabled in the extra specs: .. code-block:: console $ openstack image set --property hw_vif_multiqueue_enabled=true IMAGE_NAME Support of ``virtio-net`` multiqueue needs to be present in kernel of guest VM and is available starting from Linux kernel 3.8. Check pre-set maximum for number of combined channels in channel configuration. Configuration of OVS and flavor done successfully should result in maximum being more than '1'): .. code-block:: console $ ethtool -l INTERFACE_NAME To increase number of current combined channels run following command in guest VM: .. code-block:: console $ ethtool -L INTERFACE_NAME combined QUEUES_NR The number of queues should typically match the number of vCPUs defined for the instance. In newer kernel versions this is configured automatically. Known limitations ~~~~~~~~~~~~~~~~~ * This feature is only supported when using the libvirt compute driver, and the KVM/QEMU hypervisor. * Huge pages are required for each instance running on hosts with OVS-DPDK. If huge pages are not present in the guest, the interface will appear but will not function. * Expect performance degradation of services using tap devices: these devices do not support DPDK. Example services include DVR and FWaaS. * When the ``ovs_use_veth`` option is set to ``True``, any traffic sent from a DHCP namespace will have an incorrect TCP checksum. This means that if ``enable_isolated_metadata`` is set to ``True`` and metadata service is reachable through the DHCP namespace, responses from metadata will be dropped due to an invalid checksum. In such cases, ``ovs_use_veth`` should be switched to ``False`` and Open vSwitch (OVS) internal ports should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ovs-offload.rst0000644000175000017500000003412200000000000024516 0ustar00coreycorey00000000000000.. _config-ovs-offload: ================================ Open vSwitch hardware offloading ================================ The purpose of this page is to describe how to enable Open vSwitch hardware offloading functionality available in OpenStack (using OpenStack Networking). This functionality was first introduced in the OpenStack Pike release. This page intends to serve as a guide for how to configure OpenStack Networking and OpenStack Compute to enable Open vSwitch hardware offloading. The basics ~~~~~~~~~~ Open vSwitch is a production quality, multilayer virtual switch licensed under the open source Apache 2.0 license. It is designed to enable massive network automation through programmatic extension, while still supporting standard management interfaces and protocols. Open vSwitch (OVS) allows Virtual Machines (VM) to communicate with each other and with the outside world. The OVS software based solution is CPU intensive, affecting system performance and preventing fully utilizing available bandwidth. .. list-table:: :header-rows: 1 :widths: 30 90 * - Term - Definition * - PF - Physical Function. The physical Ethernet controller that supports SR-IOV. * - VF - Virtual Function. The virtual PCIe device created from a physical Ethernet controller. * - Representor Port - Virtual network interface similar to SR-IOV port that represents Nova instance. * - First Compute Node - OpenStack Compute Node that can host Compute instances (Virtual Machines). * - Second Compute Node - OpenStack Compute Node that can host Compute instances (Virtual Machines). Supported Ethernet controllers ------------------------------ The following manufacturers are known to work: - Mellanox ConnectX-4 NIC (VLAN Offload) - Mellanox ConnectX-4 Lx/ConnectX-5 NICs (VLAN/VXLAN Offload) - Broadcom NetXtreme-S series NICs - Broadcom NetXtreme-E series NICs For information on **Mellanox Ethernet Cards**, see `Mellanox: Ethernet Cards - Overview `_. Prerequisites ------------- - Linux Kernel >= 4.13 - Open vSwitch >= 2.8 - iproute >= 4.12 - Mellanox or Broadcom NIC .. note:: Mellanox NIC FW that supports Open vSwitch hardware offloading: ConnectX-5 >= 16.21.0338 ConnectX-4 >= 12.18.2000 ConnectX-4 Lx >= 14.21.0338 Using Open vSwitch hardware offloading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to enable Open vSwitch hardware offloading, the following steps are required: #. Enable SR-IOV #. Configure NIC to switchdev mode (relevant Nodes) #. Enable Open vSwitch hardware offloading .. note:: Throughout this guide, ``enp3s0f0`` is used as the PF and ``eth3`` is used as the representor port. These ports may vary in different environments. .. note:: Throughout this guide, we use ``systemctl`` to restart OpenStack services. This is correct for ``systemd`` OS. Other methods to restart services should be used in other environments. Create Compute virtual functions ---------------------------------- Create the VFs for the network interface that will be used for SR-IOV. We use ``enp3s0f0`` as PF, which is also used as the interface for the VLAN provider network and has access to the private networks of all nodes. .. note:: The following steps detail how to create VFs using Mellanox ConnectX-4 and SR-IOV Ethernet cards on an Intel system. Steps may be different for the hardware of your choice. #. Ensure SR-IOV and VT-d are enabled on the system. Enable IOMMU in Linux by adding ``intel_iommu=on`` to kernel parameters, for example, using GRUB. #. On each Compute node, create the VFs: .. code-block:: bash # echo '4' > /sys/class/net/enp3s0f0/device/sriov_numvfs .. note:: A network interface can be used both for PCI passthrough, using the PF, and SR-IOV, using the VFs. If the PF is used, the VF number stored in the ``sriov_numvfs`` file is lost. If the PF is attached again to the operating system, the number of VFs assigned to this interface will be zero. To keep the number of VFs always assigned to this interface, update a relevant file according to your OS. See some examples below: In Ubuntu, modifying the ``/etc/network/interfaces`` file: .. code-block:: ini auto enp3s0f0 iface enp3s0f0 inet dhcp pre-up echo '4' > /sys/class/net/enp3s0f0/device/sriov_numvfs In Red Hat, modifying the ``/sbin/ifup-local`` file: .. code-block:: bash #!/bin/sh if [[ "$1" == "enp3s0f0" ]] then echo '4' > /sys/class/net/enp3s0f0/device/sriov_numvfs fi .. warning:: Alternatively, you can create VFs by passing the ``max_vfs`` to the kernel module of your network interface. However, the ``max_vfs`` parameter has been deprecated, so the PCI /sys interface is the preferred method. You can determine the maximum number of VFs a PF can support: .. code-block:: bash # cat /sys/class/net/enp3s0f0/device/sriov_totalvfs 8 #. Verify that the VFs have been created and are in ``up`` state: .. note:: The PCI bus number of the PF (03:00.0) and VFs (03:00.2 .. 03:00.5) will be used later. .. code-block::bash # lspci | grep Ethernet 03:00.0 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5] 03:00.1 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5] 03:00.2 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5 Virtual Function] 03:00.3 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5 Virtual Function] 03:00.4 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5 Virtual Function] 03:00.5 Ethernet controller: Mellanox Technologies MT27800 Family [ConnectX-5 Virtual Function] .. code-block:: bash # ip link show enp3s0f0 8: enp3s0f0: mtu 1500 qdisc mq state UP mode DEFAULT qlen 1000 link/ether a0:36:9f:8f:3f:b8 brd ff:ff:ff:ff:ff:ff vf 0 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 1 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 2 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 3 MAC 00:00:00:00:00:00, spoof checking on, link-state auto If the interfaces are down, set them to ``up`` before launching a guest, otherwise the instance will fail to spawn: .. code-block:: bash # ip link set enp3s0f0 up Configure Open vSwitch hardware offloading ------------------------------------------ #. Change the e-switch mode from legacy to switchdev on the PF device. This will also create the VF representor network devices in the host OS. .. code-block:: bash # echo 0000:03:00.2 > /sys/bus/pci/drivers/mlx5_core/unbind This tells the driver to unbind VF 03:00.2 .. note:: This should be done for all relevant VFs (in this example 0000:03:00.2 .. 0000:03:00.5) #. Enable Open vSwitch hardware offloading, set PF to switchdev mode and bind VFs back. .. code-block:: bash # sudo devlink dev eswitch set pci/0000:03:00.0 mode switchdev # sudo ethtool -K enp3s0f0 hw-tc-offload on # echo 0000:03:00.2 > /sys/bus/pci/drivers/mlx5_core/bind .. note:: This should be done for all relevant VFs (in this example 0000:03:00.2 .. 0000:03:00.5) #. Restart Open vSwitch .. code-block:: bash # sudo systemctl enable openvswitch.service # sudo ovs-vsctl set Open_vSwitch . other_config:hw-offload=true # sudo systemctl restart openvswitch.service .. note:: The given aging of OVS is given in milliseconds and can be controlled with: .. code-block:: bash # ovs-vsctl set Open_vSwitch . other_config:max-idle=30000 Configure Nodes (VLAN Configuration) ------------------------------------- #. Update ``/etc/neutron/plugins/ml2/ml2_conf.ini`` on Controller nodes .. code-block:: ini [ml2] tenant_network_types = vlan type_drivers = vlan mechanism_drivers = openvswitch .. end #. Update ``/etc/neutron/neutron.conf`` on Controller nodes .. code-block:: ini [DEFAULT] core_plugin = ml2 .. end #. Update ``/etc/nova/nova.conf`` on Controller nodes .. code-block:: ini [filter_scheduler] enabled_filters = PciPassthroughFilter .. end #. Update ``/etc/nova/nova.conf`` on Compute nodes .. code-block:: ini [pci] #VLAN Configuration passthrough_whitelist example passthrough_whitelist ={"'"address"'":"'"*:'"03:00"'.*"'","'"physical_network"'":"'"physnet2"'"} .. end Configure Nodes (VXLAN Configuration) ------------------------------------- #. Update ``/etc/neutron/plugins/ml2/ml2_conf.ini`` on Controller nodes .. code-block:: ini [ml2] tenant_network_types = vxlan type_drivers = vxlan mechanism_drivers = openvswitch .. end #. Update ``/etc/neutron/neutron.conf`` on Controller nodes .. code-block:: ini [DEFAULT] core_plugin = ml2 .. end #. Update ``/etc/nova/nova.conf`` on Controller nodes .. code-block:: ini [filter_scheduler] enabled_filters = PciPassthroughFilter .. end #. Update ``/etc/nova/nova.conf`` on Compute nodes .. note:: VXLAN configuration requires physical_network to be null. .. code-block:: ini [pci] #VLAN Configuration passthrough_whitelist example passthrough_whitelist ={"'"address"'":"'"*:'"03:00"'.*"'","'"physical_network"'":null} .. end #. Restart nova and neutron services .. code-block:: bash # sudo systemctl restart openstack-nova-compute.service # sudo systemctl restart openstack-nova-scheduler.service # sudo systemctl restart neutron-server.service Validate Open vSwitch hardware offloading ----------------------------------------- .. note:: In this example we will bring up two instances on different Compute nodes and send ICMP echo packets between them. Then we will check TCP packets on a representor port and we will see that only the first packet will be shown there. All the rest will be offloaded. #. Create a port ``direct`` on ``private`` network .. code-block:: bash # openstack port create --network private --vnic-type=direct --binding-profile '{"capabilities": ["switchdev"]}' direct_port1 #. Create an instance using the direct port on 'First Compute Node' .. code-block:: bash # openstack server create --flavor m1.small --image mellanox_fedora --nic port-id=direct_port1 vm1 .. note:: In this example, we used Mellanox Image with NIC Drivers that can be downloaded from http://www.mellanox.com/repository/solutions/openstack/images/mellanox_eth.img #. Repeat steps above and create a second instance on 'Second Compute Node' .. code-block:: bash # openstack port create --network private --vnic-type=direct --binding-profile '{"capabilities": ["switchdev"]}' direct_port2 # openstack server create --flavor m1.small --image mellanox_fedora --nic port-id=direct_port2 vm2 .. note:: You can use --availability-zone nova:compute_node_1 option to set the desired Compute Node #. Connect to instance1 and send ICMP Echo Request packets to instance2 .. code-block:: bash # vncviewer localhost:5900 vm_1# ping vm2 #. Connect to 'Second Compute Node' and find representor port of the instance .. note:: Find a representor port first, in our case it's eth3 .. code-block:: console compute_node2# ip link show enp3s0f0 6: enp3s0f0: mtu 1500 qdisc mq master ovs-system state UP mode DEFAULT group default qlen 1000 link/ether ec:0d:9a:46:9e:84 brd ff:ff:ff:ff:ff:ff vf 0 MAC 00:00:00:00:00:00, spoof checking off, link-state enable, trust off, query_rss off vf 1 MAC 00:00:00:00:00:00, spoof checking off, link-state enable, trust off, query_rss off vf 2 MAC 00:00:00:00:00:00, spoof checking off, link-state enable, trust off, query_rss off vf 3 MAC fa:16:3e:b9:b8:ce, vlan 57, spoof checking on, link-state enable, trust off, query_rss off compute_node2# ls -l /sys/class/net/ lrwxrwxrwx 1 root root 0 Sep 11 10:54 eth0 -> ../../devices/virtual/net/eth0 lrwxrwxrwx 1 root root 0 Sep 11 10:54 eth1 -> ../../devices/virtual/net/eth1 lrwxrwxrwx 1 root root 0 Sep 11 10:54 eth2 -> ../../devices/virtual/net/eth2 lrwxrwxrwx 1 root root 0 Sep 11 10:54 eth3 -> ../../devices/virtual/net/eth3 compute_node2# sudo ovs-dpctl show system@ovs-system: lookups: hit:1684 missed:1465 lost:0 flows: 0 masks: hit:8420 total:1 hit/pkt:2.67 port 0: ovs-system (internal) port 1: br-enp3s0f0 (internal) port 2: br-int (internal) port 3: br-ex (internal) port 4: enp3s0f0 port 5: tapfdc744bb-61 (internal) port 6: qr-a7b1e843-4f (internal) port 7: qg-79a77e6d-8f (internal) port 8: qr-f55e4c5f-f3 (internal) port 9: eth3 .. end #. Check traffic on the representor port. Verify that only the first ICMP packet appears. .. code-block:: console compute_node2# tcpdump -nnn -i eth3 tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on eth3, link-type EN10MB (Ethernet), capture size 262144 bytes 17:12:41.220447 ARP, Request who-has 172.0.0.10 tell 172.0.0.13, length 46 17:12:41.220684 ARP, Reply 172.0.0.10 is-at fa:16:3e:f2:8b:23, length 42 17:12:41.260487 IP 172.0.0.13 > 172.0.0.10: ICMP echo request, id 1263, seq 1, length 64 17:12:41.260778 IP 172.0.0.10 > 172.0.0.13: ICMP echo reply, id 1263, seq 1, length 64 17:12:46.268951 ARP, Request who-has 172.0.0.13 tell 172.0.0.10, length 42 17:12:46.271771 ARP, Reply 172.0.0.13 is-at fa:16:3e:1a:10:05, length 46 17:12:55.354737 IP6 fe80::f816:3eff:fe29:8118 > ff02::1: ICMP6, router advertisement, length 64 17:12:56.106705 IP 0.0.0.0.68 > 255.255.255.255.67: BOOTP/DHCP, Request from 62:21:f0:89:40:73, length 300 .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-ovsfwdriver.rst0000644000175000017500000000537500000000000024667 0ustar00coreycorey00000000000000.. _config-ovsfwdriver: =================================== Native Open vSwitch firewall driver =================================== Historically, Open vSwitch (OVS) could not interact directly with *iptables* to implement security groups. Thus, the OVS agent and Compute service use a Linux bridge between each instance (VM) and the OVS integration bridge ``br-int`` to implement security groups. The Linux bridge device contains the *iptables* rules pertaining to the instance. In general, additional components between instances and physical network infrastructure cause scalability and performance problems. To alleviate such problems, the OVS agent includes an optional firewall driver that natively implements security groups as flows in OVS rather than the Linux bridge device and *iptables*. This increases scalability and performance. Configuring heterogeneous firewall drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ L2 agents can be configured to use differing firewall drivers. There is no requirement that they all be the same. If an agent lacks a firewall driver configuration, it will default to what is configured on its server. This also means there is no requirement that the server has any firewall driver configured at all, as long as the agents are configured correctly. Prerequisites ~~~~~~~~~~~~~ The native OVS firewall implementation requires kernel and user space support for *conntrack*, thus requiring minimum versions of the Linux kernel and Open vSwitch. All cases require Open vSwitch version 2.5 or newer. * Kernel version 4.3 or newer includes *conntrack* support. * Kernel version 3.3, but less than 4.3, does not include *conntrack* support and requires building the OVS modules. Enable the native OVS firewall driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * On nodes running the Open vSwitch agent, edit the ``openvswitch_agent.ini`` file and enable the firewall driver. .. code-block:: ini [securitygroup] firewall_driver = openvswitch For more information, see the :doc:`/contributor/internals/openvswitch_firewall` and the `video `_. Using GRE tunnels inside VMs with OVS firewall driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If GRE tunnels from VM to VM are going to be used, the native OVS firewall implementation requires ``nf_conntrack_proto_gre`` module to be loaded in the kernel on nodes running the Open vSwitch agent. It can be loaded with the command: .. code-block:: console # modprobe nf_conntrack_proto_gre Some Linux distributions have files that can be used to automatically load kernel modules at boot time, for example, ``/etc/modules``. Check with your distribution for further information. This isn't necessary to use ``gre`` tunnel network type Neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-qos-min-bw.rst0000644000175000017500000006061300000000000024274 0ustar00coreycorey00000000000000Quality of Service (QoS): Guaranteed Minimum Bandwidth ====================================================== Most Networking Quality of Service (QoS) features are implemented solely by OpenStack Neutron and they are already documented in the :doc:`QoS configuration chapter of the Networking Guide `. Some more complex QoS features necessarily involve the scheduling of a cloud server, therefore their implementation is shared between OpenStack Nova, Neutron and Placement. As of the OpenStack Stein release the Guaranteed Minimum Bandwidth feature is like the latter. This Networking Guide chapter does not aim to replace Nova or Placement documentation in any way, but it still hopes to give an overall OpenStack-level guide to understanding and configuring a deployment to use the Guaranteed Minimum Bandwidth feature. A guarantee of minimum available bandwidth can be enforced on two levels: * Scheduling a server on a compute host where the bandwidth is available. To be more precise: scheduling one or more ports of a server on a compute host's physical network interfaces where the bandwidth is available. * Queueing network packets on a physical network interface to provide the guaranteed bandwidth. In short the enforcement has two levels: * (server) placement and * data plane. Since the data plane enforcement is already documented in the :doc:`QoS chapter `, here we only document the placement-level enforcement. Limitations ----------- * A pre-created port with a ``minimum-bandwidth`` rule must be passed when booting a server (``openstack server create``). Passing a network with a minimum-bandwidth rule at boot is not supported because of technical reasons (in this case the port is created too late for Neutron to affect scheduling). * Bandwidth guarantees for ports can only be requested on networks backed by a physical network (physnet). * In Stein there is no support for networks with multiple physnets. However some simpler multi-segment networks are still supported: * Networks with multiple segments all having the same physnet name. * Networks with only one physnet segment (the other segments being tunneled segments). * If you mix ports with and without bandwidth guarantees on the same physical interface then the ports without a guarantee may starve. Therefore mixing them is not recommended. Instead it is recommended to separate them by :nova-doc:`Nova host aggregates `. * Changing the guarantee of a QoS policy (adding/deleting a ``minimum_bandwidth`` rule, or changing the ``min_kbps`` field of a ``minimum_bandwidth`` rule) is only possible while the policy is not in effect. That is ports of the QoS policy are not yet used by Nova. Requests to change guarantees of in-use policies are rejected. * The first data-plane-only Guaranteed Minimum Bandwidth implementation (for SR-IOV egress traffic) was released in the Newton release of Neutron. Because of the known lack of placement-level enforcement it was marked as "`best effort `_" (5th bullet point). Since placement-level enforcement was not implemented bandwidth may have become overallocated and the system level resource inventory may have become inconsistent. Therefore for users of the data-plane-only implementation a migration/healing process is mandatory (see section `On Healing of Allocations`_) to bring the system level resource inventory to a consistent state. Further operations that would reintroduce inconsistency (e.g. migrating a server with ``minimum_bandwidth`` QoS rule, but no resource allocation in Placement) are rejected now in a backward-incompatible way. * The Guaranteed Minimum Bandwidth feature is not complete in the Stein release. Not all Nova server lifecycle operations can be executed on a server with bandwidth guarantees. Since Stein (Nova API microversion 2.72+) you can boot and delete a server with a guarantee and detach a port with a guarantee. Since Train you can also migrate and resize a server with a guarantee. Support for further server move operations (for example evacuate, live-migrate and unshelve after shelve-offload) is to be implemented later. For the definitive documentation please refer to the `Port with Resource Request chapter `_ of the OpenStack Compute API Guide. * If an SR-IOV physical function is configured for use by the neutron-openvswitch-agent, and the same physical function's virtual functions are configured for use by the neutron-sriov-agent then the available bandwidth must be statically split between the corresponding resource providers by administrative choice. For example a 10 Gbps SR-IOV capable physical NIC could be treated as two independent NICs - a 5 Gbps NIC (technically the physical function of the NIC) added to an Open vSwitch bridge, and another 5 Gbps NIC whose virtual functions can be handed out to servers by neutron-sriov-agent. Placement pre-requisites ------------------------ Placement must support `microversion 1.29 `_. This was first released in Rocky. Nova pre-requisites ------------------- Nova must support `microversion 2.72 `_. This was first released in Stein. Not all Nova virt drivers are supported, please refer to the `Virt Driver Support section of the Nova Admin Guide `_. Neutron pre-requisites ---------------------- Neutron must support the following API extensions: * ``agent-resources-synced`` * ``port-resource-request`` * ``qos-bw-minimum-ingress`` These were all first released in Stein. Supported drivers and agents ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In release Stein the following agent-based ML2 mechanism drivers are supported: * Open vSwitch (``openvswitch``) vnic_types: ``normal``, ``direct`` * SR-IOV (``sriovnicswitch``) vnic_types: ``direct``, ``macvtap`` neutron-server config ~~~~~~~~~~~~~~~~~~~~~ The ``placement`` service plugin synchronizes the agents' resource provider information from neutron-server to Placement. Since neutron-server talks to Placement you need to configure how neutron-server should find Placement and authenticate to it. ``/etc/neutron/neutron.conf`` (on controller nodes): .. code-block:: ini [DEFAULT] service_plugins = placement,... auth_strategy = keystone [placement] auth_type = password auth_url = https://controller/identity password = secret project_domain_name = Default project_name = service user_domain_name = Default username = placement If a vnic_type is supported by default by multiple ML2 mechanism drivers (e.g. ``vnic_type=direct`` by both ``openvswitch`` and ``sriovnicswitch``) and multiple agents' resources are also meant to be tracked by Placement, then the admin must decide which driver to take ports of that vnic_type by blacklisting the vnic_type for the unwanted drivers. Use :oslo.config:option:`ovs_driver.vnic_type_blacklist` in this case. Valid values are all the ``supported_vnic_types`` of the `respective mechanism drivers `_. ``/etc/neutron/plugins/ml2/ml2_conf.ini`` (on controller nodes): .. code-block:: ini [ovs_driver] vnic_type_blacklist = direct [sriov_driver] #vnic_type_blacklist = direct neutron-openvswitch-agent config ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set the agent configuration as the authentic source of the resources available. Set it on a per-bridge basis by :oslo.config:option:`ovs.resource_provider_bandwidths`. The format is: ``bridge:egress:ingress,...`` You may set only one direction and omit the other. .. note:: ``egress`` / ``ingress`` is meant from the perspective of a cloud server. That is ``egress`` = cloud server upload, ``ingress`` = download. Egress and ingress available bandwidth values are in ``kilobit/sec (kbps)``. If desired, resource provider inventory fields can be tweaked on a per-agent basis by setting :oslo.config:option:`ovs.resource_provider_inventory_defaults`. Valid values are all the `optional parameters of the update resource provider inventory call `_. ``/etc/neutron/plugins/ml2/ovs_agent.ini`` (on compute and network nodes): .. code-block:: ini [ovs] bridge_mappings = physnet0:br-physnet0,... resource_provider_bandwidths = br-physnet0:10000000:10000000,... #resource_provider_inventory_defaults = step_size:1000,... neutron-sriov-agent config ~~~~~~~~~~~~~~~~~~~~~~~~~~ The configuration of neutron-sriov-agent is analog to that of neutron-openvswitch-agent. However look out for: * The different .ini section names as you can see below. * That neutron-sriov-agent allows a physnet to be backed by multiple physical devices. * Of course refer to SR-IOV physical functions instead of bridges in :oslo.config:option:`sriov_nic.resource_provider_bandwidths`. ``/etc/neutron/plugins/ml2/sriov_agent.ini`` (on compute nodes): .. code-block:: ini [sriov_nic] physical_device_mappings = physnet0:ens5,physnet0:ens6,... resource_provider_bandwidths = ens5:40000000:40000000,ens6:40000000:40000000,... #resource_provider_inventory_defaults = step_size:1000,... Propagation of resource information ----------------------------------- The flow of information is different for available and used resources. The authentic source of available resources is neutron agent configuration - where the resources actually exist, as described in the agent configuration sections above. This information is propagated in the following chain: ``neutron-l2-agent -> neutron-server -> Placement``. From neutron agent to server the information is included in the ``configurations`` field of the agent heartbeat message sent on the message queue periodically. .. code-block:: console # as admin $ openstack network agent list --agent-type open-vswitch --host devstack0 +--------------------------------------+--------------------+-----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+-----------+-------------------+-------+-------+---------------------------+ | 5e57b85f-b017-419a-8745-9c406e149f9e | Open vSwitch agent | devstack0 | None | :-) | UP | neutron-openvswitch-agent | +--------------------------------------+--------------------+-----------+-------------------+-------+-------+---------------------------+ # output shortened and pretty printed # note: 'configurations' on the wire, but 'configuration' in the cli $ openstack network agent show -f value -c configuration 5e57b85f-b017-419a-8745-9c406e149f9e {'bridge_mappings': {'physnet0': 'br-physnet0'}, 'resource_provider_bandwidths': {'br-physnet0': {'egress': 10000000, 'ingress': 10000000}}, 'resource_provider_inventory_defaults': {'allocation_ratio': 1.0, 'min_unit': 1, 'reserved': 0, 'step_size': 1}, ... } Re-reading the resource related subset of configuration on ``SIGHUP`` is not implemented. The agent must be restarted to pick up and send changed configuration. Neutron-server propagates the information further to Placement for the resources of each agent via Placement's HTTP REST API. To avoid overloading Placement this synchronization generally does not happen on every received heartbeat message. Instead the re-synchronization of the resources of one agent is triggered by: * The creation of a network agent record (as queried by ``openstack network agent list``). Please note that deleting an agent record and letting the next heartbeat to re-create it can be used to trigger synchronization without restarting an agent. * The restart of that agent (technically ``start_flag`` being present in the heartbeat message). Both of these can be used by an admin to force a re-sync if needed. The success of a synchronization attempt from neutron-server to Placement is persisted into the relevant agent's ``resources_synced`` attribute. For example: .. code-block:: console # as admin $ openstack network agent show -f value -c resources_synced 5e57b85f-b017-419a-8745-9c406e149f9e True ``resources_synced`` may take the value True, False and None: * None: No sync was attempted (normal for agents not reporting Placement-backed resources). * True: The last sync attempt was completely successful. * False: The last sync attempt was partially or utterly unsuccessful. In case ``resources_synced`` is not True for an agent, neutron-server does try to re-sync on receiving every heartbeat message from that agent. Therefore it should be able to recover from transient errors of Neutron-Placement communication (e.g. Placement being started later than Neutron). It is important to note that the restart of neutron-server does not trigger any kind of re-sync to Placement (to avoid an update storm). As mentioned before, the information flow for resources requested and (if proper) allocated is different. It involves a conversation between Nova, Neutron and Placement. #. Neutron exposes a port's resource needs in terms of resource classes and traits as the admin-only ``resource_request`` attribute of that port. #. Nova reads this and `incorporates it as a numbered request group `_ into the cloud servers overall allocation candidate request to Placement. #. Nova selects (schedules) and allocates one candidate returned by Placement. #. Nova informs Neutron when binding the port of which physical network interface resource provider had been selected for the port's resource request in the ``binding:profile.allocation`` sub-attribute of that port. For details please see `slides 13-15 `_ of a (pre-release) demo that was presented on the Berlin Summit in November 2018. Sample usage ------------ Physnets and QoS policies (together with their rules) are usually pre-created by a cloud admin: .. code-block:: console # as admin $ openstack network create net0 \ --provider-network-type vlan \ --provider-physical-network physnet0 \ --provider-segment 100 $ openstack subnet create subnet0 \ --network net0 \ --subnet-range 10.0.4.0/24 $ openstack network qos policy create policy0 $ openstack network qos rule create policy0 \ --type minimum-bandwidth \ --min-kbps 1000000 \ --egress $ openstack network qos rule create policy0 \ --type minimum-bandwidth \ --min-kbps 1000000 \ --ingress Then a normal user can use the pre-created policy to create ports and boot servers with those ports: .. code-block:: console # as an unprivileged user # an ordinary soft-switched port: ``--vnic-type normal`` is the default $ openstack port create port-normal-qos \ --network net0 \ --qos-policy policy0 # alternatively an SR-IOV port, unused in this example $ openstack port create port-direct-qos \ --network net0 \ --vnic-type direct \ --qos-policy policy0 $ openstack server create server0 \ --flavor cirros256 \ --image cirros-0.4.0-x86_64-disk \ --port port-normal-qos On Healing of Allocations ------------------------- Since Placement carries a global view of a cloud deployment's resources (what is available, what is used) it may in some conditions get out of sync with reality. One important case is when the data-plane-only Minimum Guaranteed Bandwidth feature was used before Stein (first released in Newton). Since before Stein guarantees were not enforced during server placement the available resources may have become overallocated without notice. In this case Placement's view and the reality of resource usage should be made consistent during/after an upgrade to Stein. Another case stems from OpenStack not having distributed transactions to allocate resources provided by multiple OpenStack components (here Nova and Neutron). There are known race conditions in which Placement's view may get out of sync with reality. The design knowingly minimizes the race condition windows, but there are known problems: * If a QoS policy is modified after Nova read a port's ``resource_request`` but before the port is bound its state before the modification will be applied. * If a bound port with a resource allocation is deleted. The port's allocation is leaked. ``_ .. note:: Deleting a bound port has no known use case. Please consider detaching the interface first by ``openstack server remove port`` instead. Incorrect allocations may be fixed by: * Moving the server, which will delete the wrong allocation and create the correct allocation as soon as move operations are implemented (not in Stein unfortunately). Moving servers fixes local overallocations. * The need for an upgrade-helper allocation healing tool is being tracked in `bug 1819923 `_. * Manually, by using `openstack resource provider allocation set `_ /`delete `_. Debugging --------- * Are all components running at least the Stein release? * Is the ``placement`` service plugin enabled in neutron-server? * Is ``resource_provider_bandwidths`` configured for the relevant neutron agent? * Is ``resource_provider_bandwidths`` aligned with ``bridge_mappings`` or ``physical_device_mappings``? * Was the agent restarted since changing the configuration file? * Is ``resource_provider_bandwidths`` reaching neutron-server? .. code-block:: console # as admin $ openstack network agent show ... | grep configurations Please find an example in section `Propagation of resource information`_. * Did neutron-server successfully sync to Placement? .. code-block:: console # as admin $ openstack network agent show ... | grep resources_synced Please find an example in section `Propagation of resource information`_. * Is the resource provider tree correct? Is the root a compute host? One level below the agents? Two levels below the physical network interfaces? .. code-block:: console $ openstack --os-placement-api-version 1.17 resource provider list +--------------------------------------+------------------------------------------+------------+--------------------------------------+--------------------------------------+ | uuid | name | generation | root_provider_uuid | parent_provider_uuid | +--------------------------------------+------------------------------------------+------------+--------------------------------------+--------------------------------------+ | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | devstack0 | 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | None | | 4a8a819d-61f9-5822-8c5c-3e9c7cb942d6 | devstack0:NIC Switch agent | 0 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | | 1c7e83f0-108d-5c35-ada7-7ebebbe43aad | devstack0:NIC Switch agent:ens5 | 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 4a8a819d-61f9-5822-8c5c-3e9c7cb942d6 | | 89ca1421-5117-5348-acab-6d0e2054239c | devstack0:Open vSwitch agent | 0 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | | f9c9ce07-679d-5d72-ac5f-31720811629a | devstack0:Open vSwitch agent:br-physnet0 | 2 | 3b36d91e-bf60-460f-b1f8-3322dee5cdfd | 89ca1421-5117-5348-acab-6d0e2054239c | +--------------------------------------+------------------------------------------+------------+--------------------------------------+--------------------------------------+ * Does Placement have the expected traits? .. code-block:: console # as admin $ openstack --os-placement-api-version 1.17 trait list | awk '/CUSTOM_/ { print $2 }' | sort CUSTOM_PHYSNET_PHYSNET0 CUSTOM_VNIC_TYPE_DIRECT CUSTOM_VNIC_TYPE_DIRECT_PHYSICAL CUSTOM_VNIC_TYPE_MACVTAP CUSTOM_VNIC_TYPE_NORMAL * Do the physical network interface resource providers have the proper trait associations and inventories? .. code-block:: console # as admin $ openstack --os-placement-api-version 1.17 resource provider trait list RP-UUID $ openstack --os-placement-api-version 1.17 resource provider inventory list RP-UUID * Does the QoS policy have a ``minimum-bandwidth`` rule? * Does the port have the proper policy? * Does the port have a ``resource_request``? .. code-block:: console # as admin $ openstack port show port-normal-qos | grep resource_request * Was the server booted with a port (as opposed to a network)? * Did nova allocate resources for the server in Placement? .. code-block:: console # as admin $ openstack --os-placement-api-version 1.17 resource provider allocation show SERVER-UUID * Does the allocation have a part on the expected physical network interface resource provider? .. code-block:: console # as admin $ openstack --os-placement-api-version 1.17 resource provider show --allocations RP-UUID * Did placement manage to produce an allocation candidate list to nova during scheduling? * Did nova manage to schedule the server? * Did nova tell neutron which physical network interface resource provider was allocated to satisfy the bandwidth request? .. code-block:: console # as admin $ openstack port show port-normal-qos | grep binding.profile.*allocation * Did neutron manage to bind the port? Links ----- * Pre-release `feature demo `_ presented on the Berlin Summit in November 2018 * Nova documentation on using a port with ``resource_request`` * `API Guide `_ * `Admin Guide `_ * Neutron spec: QoS minimum bandwidth allocation in Placement API * `on specs.openstack.org `__ * `on review.opendev.org `__ * Nova spec: Network Bandwidth resource provider * `on specs.openstack.org `__ * `on review.opendev.org `__ * Relevant OpenStack Networking API references * https://docs.openstack.org/api-ref/network/v2/#agent-resources-synced-extension * https://docs.openstack.org/api-ref/network/v2/#port-resource-request * https://docs.openstack.org/api-ref/network/v2/#qos-minimum-bandwidth-rules * Microversion histories * `Compute 2.72 `_ * `Placement 1.29 `_ * Implementation * `on review.opendev.org `_ * Known Bugs * `Missing tool to heal allocations `_ * `Bandwidth resource is leaked `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-qos.rst0000644000175000017500000010605200000000000023103 0ustar00coreycorey00000000000000.. _config-qos: ======================== Quality of Service (QoS) ======================== QoS is defined as the ability to guarantee certain network requirements like bandwidth, latency, jitter, and reliability in order to satisfy a Service Level Agreement (SLA) between an application provider and end users. Network devices such as switches and routers can mark traffic so that it is handled with a higher priority to fulfill the QoS conditions agreed under the SLA. In other cases, certain network traffic such as Voice over IP (VoIP) and video streaming needs to be transmitted with minimal bandwidth constraints. On a system without network QoS management, all traffic will be transmitted in a "best-effort" manner making it impossible to guarantee service delivery to customers. QoS is an advanced service plug-in. QoS is decoupled from the rest of the OpenStack Networking code on multiple levels and it is available through the ml2 extension driver. Details about the DB models, API extension, and use cases are out of the scope of this guide but can be found in the `Neutron QoS specification `_. Supported QoS rule types ~~~~~~~~~~~~~~~~~~~~~~~~ QoS supported rule types are now available as ``VALID_RULE_TYPES`` in `QoS rule types `_: * bandwidth_limit: Bandwidth limitations on networks, ports or floating IPs. * dscp_marking: Marking network traffic with a DSCP value. * minimum_bandwidth: Minimum bandwidth constraints on certain types of traffic. Any QoS driver can claim support for some QoS rule types by providing a driver property called ``supported_rules``, the QoS driver manager will recalculate rule types dynamically that the QoS driver supports. The following table shows the Networking back ends, QoS supported rules, and traffic directions (from the VM point of view). .. table:: **Networking back ends, supported rules, and traffic direction** ==================== ======================= ======================= =================== Rule \\ back end Open vSwitch SR-IOV Linux bridge ==================== ======================= ======================= =================== Bandwidth limit Egress \\ Ingress Egress (1) Egress \\ Ingress Minimum bandwidth Egress \\ Ingress (2) Egress \\ Ingress (2) - DSCP marking Egress - Egress ==================== ======================= ======================= =================== .. note:: (1) Max burst parameter is skipped because it is not supported by the IP tool. (2) Placement based enforcement works for both egress and ingress directions, but dataplane enforcement depends on the backend. .. table:: **Neutron backends, supported directions and enforcement types for Minimum Bandwidth rule** ============================ ==================== ==================== ============== Enforcement type \ Backend Open vSwitch SR-IOV Linux Bridge ============================ ==================== ==================== ============== Dataplane - Egress (1) - Placement Egress/Ingress (2) Egress/Ingress (2) - ============================ ==================== ==================== ============== .. note:: (1) Since Newton (2) Since Stein In the most simple case, the property can be represented by a simple Python list defined on the class. For an ml2 plug-in, the list of supported QoS rule types and parameters is defined as a common subset of rules supported by all active mechanism drivers. A QoS rule is always attached to a QoS policy. When a rule is created or updated: * The QoS plug-in will check if this rule and parameters are supported by any active mechanism driver if the QoS policy is not attached to any port or network. * The QoS plug-in will check if this rule and parameters are supported by the mechanism drivers managing those ports if the QoS policy is attached to any port or network. Valid DSCP Marks ---------------- Valid DSCP mark values are even numbers between 0 and 56, except 2-6, 42, 44, and 50-54. The full list of valid DSCP marks is: 0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 46, 48, 56 Configuration ~~~~~~~~~~~~~ To enable the service on a cloud with the architecture described in `Networking architecture `_, follow the steps below: On the controller nodes: #. Add the QoS service to the ``service_plugins`` setting in ``/etc/neutron/neutron.conf``. For example: .. code-block:: none service_plugins = \ neutron.services.l3_router.l3_router_plugin.L3RouterPlugin, neutron.services.metering.metering_plugin.MeteringPlugin, neutron.services.qos.qos_plugin.QoSPlugin #. Optionally, set the needed ``notification_drivers`` in the ``[qos]`` section in ``/etc/neutron/neutron.conf`` (``message_queue`` is the default). #. Optionally, in order to enable the floating IP QoS extension ``qos-fip``, set the ``service_plugins`` option in ``/etc/neutron/neutron.conf`` to include both ``router`` and ``qos``. For example: .. code-block:: none service_plugins = router,qos #. In ``/etc/neutron/plugins/ml2/ml2_conf.ini``, add ``qos`` to ``extension_drivers`` in the ``[ml2]`` section. For example: .. code-block:: ini [ml2] extension_drivers = port_security,qos #. Edit the configuration file for the agent you are using and set the ``extensions`` to include ``qos`` in the ``[agent]`` section of the configuration file. The agent configuration file will reside in ``/etc/neutron/plugins/ml2/_agent.ini`` where ``agent_name`` is the name of the agent being used (for example ``openvswitch``). For example: .. code-block:: ini [agent] extensions = qos On the network and compute nodes: #. Edit the configuration file for the agent you are using and set the ``extensions`` to include ``qos`` in the ``[agent]`` section of the configuration file. The agent configuration file will reside in ``/etc/neutron/plugins/ml2/_agent.ini`` where ``agent_name`` is the name of the agent being used (for example ``openvswitch``). For example: .. code-block:: ini [agent] extensions = qos #. Optionally, in order to enable QoS for floating IPs, set the ``extensions`` option in the ``[agent]`` section of ``/etc/neutron/l3_agent.ini`` to include ``fip_qos``. If ``dvr`` is enabled, this has to be done for all the L3 agents. For example: .. code-block:: ini [agent] extensions = fip_qos .. note:: Floating IP associated to neutron port or to port forwarding can all have bandwidth limit since Stein release. These neutron server side and agent side extension configs will enable it once for all. #. Optionally, in order to enable QoS for router gateway IPs, set the ``extensions`` option in the ``[agent]`` section of ``/etc/neutron/l3_agent.ini`` to include ``gateway_ip_qos``. Set this to all the ``dvr_snat`` or ``legacy`` L3 agents. For example: .. code-block:: ini [agent] extensions = gateway_ip_qos And ``gateway_ip_qos`` should work together with the ``fip_qos`` in L3 agent for centralized routers, then all L3 IPs with binding QoS policy can be limited under the QoS bandwidth limit rules: .. code-block:: ini [agent] extensions = fip_qos, gateway_ip_qos #. As rate limit doesn't work on Open vSwitch's ``internal`` ports, optionally, as a workaround, to make QoS bandwidth limit work on router's gateway ports, set ``ovs_use_veth`` to ``True`` in ``DEFAULT`` section in ``/etc/neutron/l3_agent.ini`` .. code-block:: ini [DEFAULT] ovs_use_veth = True .. note:: QoS currently works with ml2 only (SR-IOV, Open vSwitch, and linuxbridge are drivers enabled for QoS). DSCP marking on outer header for overlay networks ------------------------------------------------- When using overlay networks (e.g., VxLAN), the DSCP marking rule only applies to the inner header, and during encapsulation, the DSCP mark is not automatically copied to the outer header. #. In order to set the DSCP value of the outer header, modify the ``dscp`` configuration option in ``/etc/neutron/plugins/ml2/_agent.ini`` where ```` is the name of the agent being used (e.g., ``openvswitch``): .. code-block:: ini [agent] dscp = 8 #. In order to copy the DSCP field of the inner header to the outer header, change the ``dscp_inherit`` configuration option to true in ``/etc/neutron/plugins/ml2/_agent.ini`` where ```` is the name of the agent being used (e.g., ``openvswitch``): .. code-block:: ini [agent] dscp_inherit = true If the ``dscp_inherit`` option is set to true, the previous ``dscp`` option is overwritten. Trusted projects policy.json configuration ------------------------------------------ If projects are trusted to administrate their own QoS policies in your cloud, neutron's file ``policy.json`` can be modified to allow this. Modify ``/etc/neutron/policy.json`` policy entries as follows: .. code-block:: none "get_policy": "rule:regular_user", "create_policy": "rule:regular_user", "update_policy": "rule:regular_user", "delete_policy": "rule:regular_user", "get_rule_type": "rule:regular_user", To enable bandwidth limit rule: .. code-block:: none "get_policy_bandwidth_limit_rule": "rule:regular_user", "create_policy_bandwidth_limit_rule": "rule:regular_user", "delete_policy_bandwidth_limit_rule": "rule:regular_user", "update_policy_bandwidth_limit_rule": "rule:regular_user", To enable DSCP marking rule: .. code-block:: none "get_policy_dscp_marking_rule": "rule:regular_user", "create_dscp_marking_rule": "rule:regular_user", "delete_dscp_marking_rule": "rule:regular_user", "update_dscp_marking_rule": "rule:regular_user", To enable minimum bandwidth rule: .. code-block:: none "get_policy_minimum_bandwidth_rule": "rule:regular_user", "create_policy_minimum_bandwidth_rule": "rule:regular_user", "delete_policy_minimum_bandwidth_rule": "rule:regular_user", "update_policy_minimum_bandwidth_rule": "rule:regular_user", User workflow ~~~~~~~~~~~~~ QoS policies are only created by admins with the default ``policy.json``. Therefore, you should have the cloud operator set them up on behalf of the cloud projects. If projects are trusted to create their own policies, check the trusted projects ``policy.json`` configuration section. First, create a QoS policy and its bandwidth limit rule: .. code-block:: console $ openstack network qos policy create bw-limiter +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | 5df855e9-a833-49a3-9c82-c0839a5f103f | | is_default | False | | name | bw-limiter | | project_id | 4db7c1ed114a4a7fb0f077148155c500 | | rules | [] | | shared | False | +-------------------+--------------------------------------+ $ openstack network qos rule create --type bandwidth-limit --max-kbps 3000 \ --max-burst-kbits 2400 --egress bw-limiter +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | direction | egress | | id | 92ceb52f-170f-49d0-9528-976e2fee2d6f | | max_burst_kbps | 2400 | | max_kbps | 3000 | | name | None | | project_id | | +----------------+--------------------------------------+ .. note:: The QoS implementation requires a burst value to ensure proper behavior of bandwidth limit rules in the Open vSwitch and Linux bridge agents. Configuring the proper burst value is very important. If the burst value is set too low, bandwidth usage will be throttled even with a proper bandwidth limit setting. This issue is discussed in various documentation sources, for example in `Juniper's documentation `_. For TCP traffic it is recommended to set burst value as 80% of desired bandwidth limit value. For example, if the bandwidth limit is set to 1000kbps then enough burst value will be 800kbit. If the configured burst value is too low, achieved bandwidth limit will be lower than expected. If the configured burst value is too high, too few packets could be limited and achieved bandwidth limit would be higher than expected. If you do not provide a value, it defaults to 80% of the bandwidth limit which works for typical TCP traffic. Second, associate the created policy with an existing neutron port. In order to do this, user extracts the port id to be associated to the already created policy. In the next example, we will assign the ``bw-limiter`` policy to the VM with IP address ``192.0.2.1``. .. code-block:: console $ openstack port list +--------------------------------------+-----------------------------------+ | ID | Fixed IP Addresses | +--------------------------------------+-----------------------------------+ | 0271d1d9-1b16-4410-bd74-82cdf6dcb5b3 | { ... , "ip_address": "192.0.2.1"}| | 88101e57-76fa-4d12-b0e0-4fc7634b874a | { ... , "ip_address": "192.0.2.3"}| | e04aab6a-5c6c-4bd9-a600-33333551a668 | { ... , "ip_address": "192.0.2.2"}| +--------------------------------------+-----------------------------------+ $ openstack port set --qos-policy bw-limiter \ 88101e57-76fa-4d12-b0e0-4fc7634b874a In order to detach a port from the QoS policy, simply update again the port configuration. .. code-block:: console $ openstack port unset --qos-policy 88101e57-76fa-4d12-b0e0-4fc7634b874a Ports can be created with a policy attached to them too. .. code-block:: console $ openstack port create --qos-policy bw-limiter --network private port1 +-----------------------+--------------------------------------------------+ | Field | Value | +-----------------------+--------------------------------------------------+ | admin_state_up | UP | | allowed_address_pairs | | | binding_host_id | | | binding_profile | | | binding_vif_details | | | binding_vif_type | unbound | | binding_vnic_type | normal | | created_at | 2017-05-15T08:43:00Z | | data_plane_status | None | | description | | | device_id | | | device_owner | | | dns_assignment | None | | dns_name | None | | extra_dhcp_opts | | | fixed_ips | ip_address='10.0.10.4', subnet_id='292f8c1e-...' | | id | f51562ee-da8d-42de-9578-f6f5cb248226 | | ip_address | None | | mac_address | fa:16:3e:d9:f2:ba | | name | port1 | | network_id | 55dc2f70-0f92-4002-b343-ca34277b0234 | | option_name | None | | option_value | None | | port_security_enabled | False | | project_id | 4db7c1ed114a4a7fb0f077148155c500 | | qos_policy_id | 5df855e9-a833-49a3-9c82-c0839a5f103f | | revision_number | 6 | | security_group_ids | 0531cc1a-19d1-4cc7-ada5-49f8b08245be | | status | DOWN | | subnet_id | None | | tags | [] | | trunk_details | None | | updated_at | 2017-05-15T08:43:00Z | +-----------------------+--------------------------------------------------+ You can attach networks to a QoS policy. The meaning of this is that any compute port connected to the network will use the network policy by default unless the port has a specific policy attached to it. Internal network owned ports like DHCP and internal router ports are excluded from network policy application. In order to attach a QoS policy to a network, update an existing network, or initially create the network attached to the policy. .. code-block:: console $ openstack network set --qos-policy bw-limiter private The created policy can be associated with an existing floating IP. In order to do this, user extracts the floating IP id to be associated to the already created policy. In the next example, we will assign the ``bw-limiter`` policy to the floating IP address ``172.16.100.18``. .. code-block:: console $ openstack floating ip list +--------------------------------------+---------------------+------------------+------+-----+ | ID | Floating IP Address | Fixed IP Address | Port | ... | +--------------------------------------+---------------------+------------------+------+-----+ | 1163d127-6df3-44bb-b69c-c0e916303eb3 | 172.16.100.9 | None | None | ... | | d0ed7491-3eb7-4c4f-a0f0-df04f10a067c | 172.16.100.18 | None | None | ... | | f5a9ed48-2e9f-411c-8787-2b6ecd640090 | 172.16.100.2 | None | None | ... | +--------------------------------------+---------------------+------------------+------+-----+ .. code-block:: console $ openstack floating ip set --qos-policy bw-limiter d0ed7491-3eb7-4c4f-a0f0-df04f10a067c In order to detach a floating IP from the QoS policy, simply update the floating IP configuration. .. code-block:: console $ openstack floating ip set --no-qos-policy d0ed7491-3eb7-4c4f-a0f0-df04f10a067c Or use the ``unset`` action. .. code-block:: console $ openstack floating ip unset --qos-policy d0ed7491-3eb7-4c4f-a0f0-df04f10a067c Floating IPs can be created with a policy attached to them too. .. code-block:: console $ openstack floating ip create --qos-policy bw-limiter public +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | created_at | 2017-12-06T02:12:09Z | | description | | | fixed_ip_address | None | | floating_ip_address | 172.16.100.12 | | floating_network_id | 4065eb05-cccb-4048-988c-e8c5480a746f | | id | 6a0efeef-462b-4312-b4ad-627cde8a20e6 | | name | 172.16.100.12 | | port_id | None | | project_id | 916e39e8be52433ba040da3a3a6d0847 | | qos_policy_id | 5df855e9-a833-49a3-9c82-c0839a5f103f | | revision_number | 1 | | router_id | None | | status | DOWN | | updated_at | 2017-12-06T02:12:09Z | +---------------------+--------------------------------------+ The QoS bandwidth limit rules attached to a floating IP will become active when you associate the latter with a port. For example, to associate the previously created floating IP ``172.16.100.12`` to the instance port with uuid ``a7f25e73-4288-4a16-93b9-b71e6fd00862`` and fixed IP ``192.168.222.5``: .. code-block:: console $ openstack floating ip set --port a7f25e73-4288-4a16-93b9-b71e6fd00862 \ 0eeb1f8a-de96-4cd9-a0f6-3f535c409558 .. note:: The QoS policy attached to a floating IP is not applied to a port, it is applied to an associated floating IP only. Thus the ID of QoS policy attached to a floating IP will not be visible in a port's ``qos_policy_id`` field after asscoating a floating IP to the port. It is only visible in the floating IP attributes. .. note:: For now, the L3 agent floating IP QoS extension only supports ``bandwidth_limit`` rules. Other rule types (like DSCP marking) will be silently ignored for floating IPs. A QoS policy that does not contain any ``bandwidth_limit`` rules will have no effect when attached to a floating IP. If floating IP is bound to a port, and both have binding QoS bandwidth rules, the L3 agent floating IP QoS extension ignores the behavior of the port QoS, and installs the rules from the QoS policy associated to the floating IP on the appropriate device in the router namespace. Each project can have at most one default QoS policy, although it is not mandatory. If a default QoS policy is defined, all new networks created within this project will have this policy assigned, as long as no other QoS policy is explicitly attached during the creation process. If the default QoS policy is unset, no change to existing networks will be made. In order to set a QoS policy as default, the parameter ``--default`` must be used. To unset this QoS policy as default, the parameter ``--no-default`` must be used. .. code-block:: console $ openstack network qos policy create --default bw-limiter +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | 5df855e9-a833-49a3-9c82-c0839a5f103f | | is_default | True | | name | bw-limiter | | project_id | 4db7c1ed114a4a7fb0f077148155c500 | | rules | [] | | shared | False | +-------------------+--------------------------------------+ $ openstack network qos policy set --no-default bw-limiter +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | 5df855e9-a833-49a3-9c82-c0839a5f103f | | is_default | False | | name | bw-limiter | | project_id | 4db7c1ed114a4a7fb0f077148155c500 | | rules | [] | | shared | False | +-------------------+--------------------------------------+ Administrator enforcement ------------------------- Administrators are able to enforce policies on project ports or networks. As long as the policy is not shared, the project is not be able to detach any policy attached to a network or port. If the policy is shared, the project is able to attach or detach such policy from its own ports and networks. Rule modification ----------------- You can modify rules at runtime. Rule modifications will be propagated to any attached port. .. code-block:: console $ openstack network qos rule set --max-kbps 2000 --max-burst-kbits 1600 \ --ingress bw-limiter 92ceb52f-170f-49d0-9528-976e2fee2d6f $ openstack network qos rule show \ bw-limiter 92ceb52f-170f-49d0-9528-976e2fee2d6f +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | direction | ingress | | id | 92ceb52f-170f-49d0-9528-976e2fee2d6f | | max_burst_kbps | 1600 | | max_kbps | 2000 | | name | None | | project_id | | +----------------+--------------------------------------+ Just like with bandwidth limiting, create a policy for DSCP marking rule: .. code-block:: console $ openstack network qos policy create dscp-marking +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | d1f90c76-fbe8-4d6f-bb87-a9aea997ed1e | | is_default | False | | name | dscp-marking | | project_id | 4db7c1ed114a4a7fb0f077148155c500 | | rules | [] | | shared | False | +-------------------+--------------------------------------+ You can create, update, list, delete, and show DSCP markings with the neutron client: .. code-block:: console $ openstack network qos rule create --type dscp-marking --dscp-mark 26 \ dscp-marking +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | dscp_mark | 26 | | id | 115e4f70-8034-4176-8fe9-2c47f8878a7d | | name | None | | project_id | | +----------------+--------------------------------------+ .. code-block:: console $ openstack network qos rule set --dscp-mark 22 \ dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d $ openstack network qos rule list dscp-marking +--------------------------------------+----------------------------------+ | ID | DSCP Mark | +--------------------------------------+----------------------------------+ | 115e4f70-8034-4176-8fe9-2c47f8878a7d | 22 | +--------------------------------------+----------------------------------+ $ openstack network qos rule show \ dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | dscp_mark | 22 | | id | 115e4f70-8034-4176-8fe9-2c47f8878a7d | | name | None | | project_id | | +----------------+--------------------------------------+ $ openstack network qos rule delete \ dscp-marking 115e4f70-8034-4176-8fe9-2c47f8878a7d You can also include minimum bandwidth rules in your policy: .. code-block:: console $ openstack network qos policy create bandwidth-control +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | 8491547e-add1-4c6c-a50e-42121237256c | | is_default | False | | name | bandwidth-control | | project_id | 7cc5a84e415d48e69d2b06aa67b317d8 | | revision_number | 1 | | rules | [] | | shared | False | +-------------------+--------------------------------------+ $ openstack network qos rule create \ --type minimum-bandwidth --min-kbps 1000 --egress bandwidth-control +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | direction | egress | | id | da858b32-44bc-43c9-b92b-cf6e2fa836ab | | min_kbps | 1000 | | name | None | | project_id | | +------------+--------------------------------------+ A policy with a minimum bandwidth ensures best efforts are made to provide no less than the specified bandwidth to each port on which the rule is applied. However, as this feature is not yet integrated with the Compute scheduler, minimum bandwidth cannot be guaranteed. It is also possible to combine several rules in one policy, as long as the type or direction of each rule is different. For example, You can specify two ``bandwidth-limit`` rules, one with ``egress`` and one with ``ingress`` direction. .. code-block:: console $ openstack network qos rule create --type bandwidth-limit \ --max-kbps 50000 --max-burst-kbits 50000 --egress bandwidth-control +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | direction | egress | | id | 0db48906-a762-4d32-8694-3f65214c34a6 | | max_burst_kbps | 50000 | | max_kbps | 50000 | | name | None | | project_id | | +----------------+--------------------------------------+ $ openstack network qos rule create --type bandwidth-limit \ --max-kbps 10000 --max-burst-kbits 10000 --ingress bandwidth-control +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | direction | ingress | | id | faabef24-e23a-4fdf-8e92-f8cb66998834 | | max_burst_kbps | 10000 | | max_kbps | 10000 | | name | None | | project_id | | +----------------+--------------------------------------+ $ openstack network qos rule create --type minimum-bandwidth \ --min-kbps 1000 --egress bandwidth-control +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | direction | egress | | id | da858b32-44bc-43c9-b92b-cf6e2fa836ab | | min_kbps | 1000 | | name | None | | project_id | | +------------+--------------------------------------+ $ openstack network qos policy show bandwidth-control +-------------------+-------------------------------------------------------------------+ | Field | Value | +-------------------+-------------------------------------------------------------------+ | description | | | id | 8491547e-add1-4c6c-a50e-42121237256c | | is_default | False | | name | bandwidth-control | | project_id | 7cc5a84e415d48e69d2b06aa67b317d8 | | revision_number | 4 | | rules | [{u'max_kbps': 50000, u'direction': u'egress', | | | u'type': u'bandwidth_limit', | | | u'id': u'0db48906-a762-4d32-8694-3f65214c34a6', | | | u'max_burst_kbps': 50000, | | | u'qos_policy_id': u'8491547e-add1-4c6c-a50e-42121237256c'}, | | | [{u'max_kbps': 10000, u'direction': u'ingress', | | | u'type': u'bandwidth_limit', | | | u'id': u'faabef24-e23a-4fdf-8e92-f8cb66998834', | | | u'max_burst_kbps': 10000, | | | u'qos_policy_id': u'8491547e-add1-4c6c-a50e-42121237256c'}, | | | {u'direction': | | | u'egress', u'min_kbps': 1000, u'type': u'minimum_bandwidth', | | | u'id': u'da858b32-44bc-43c9-b92b-cf6e2fa836ab', | | | u'qos_policy_id': u'8491547e-add1-4c6c-a50e-42121237256c'}] | | shared | False | +-------------------+-------------------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-rbac.rst0000644000175000017500000010346000000000000023210 0ustar00coreycorey00000000000000.. _config-rbac: ================================ Role-Based Access Control (RBAC) ================================ The Role-Based Access Control (RBAC) policy framework enables both operators and users to grant access to resources for specific projects. Supported objects for sharing with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, the access that can be granted using this feature is supported by: * Regular port creation permissions on networks (since Liberty). * Binding QoS policies permissions to networks or ports (since Mitaka). * Attaching router gateways to networks (since Mitaka). * Binding security groups to ports (since Stein). * Assigning address scopes to subnet pools (since Ussuri). * Assigning subnet pools to subnets (since Ussuri). Sharing an object with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sharing an object with a specific project is accomplished by creating a policy entry that permits the target project the ``access_as_shared`` action on that object. Sharing a network with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a network to share: .. code-block:: console $ openstack network create secret_network +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-25T20:16:40Z | | description | | | dns_domain | None | | id | f55961b9-3eb8-42eb-ac96-b97038b568de | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | mtu | 1450 | | name | secret_network | | port_security_enabled | True | | project_id | 61b7eba037fd41f29cfba757c010faff | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 9 | | qos_policy_id | None | | revision_number | 3 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2017-01-25T20:16:40Z | +---------------------------+--------------------------------------+ Create the policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``b87b2fc13e0248a4a031d38e06dc191d``): .. code-block:: console $ openstack network rbac create --target-project \ b87b2fc13e0248a4a031d38e06dc191d --action access_as_shared \ --type network f55961b9-3eb8-42eb-ac96-b97038b568de +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | f93efdbf-f1e0-41d2-b093-8328959d469e | | name | None | | object_id | f55961b9-3eb8-42eb-ac96-b97038b568de | | object_type | network | | project_id | 61b7eba037fd41f29cfba757c010faff | | target_project_id | b87b2fc13e0248a4a031d38e06dc191d | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the network. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter says that the target object is a network. The final parameter is the ID of the network we are granting access to. Project ``b87b2fc13e0248a4a031d38e06dc191d`` will now be able to see the network when running :command:`openstack network list` and :command:`openstack network show` and will also be able to create ports on that network. No other users (other than admins and the owner) will be able to see the network. .. note:: Subnets inherit the RBAC policy entries of their network. To remove access for that project, delete the policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete f93efdbf-f1e0-41d2-b093-8328959d469e If that project has ports on the network, the server will prevent the policy from being deleted until the ports have been deleted: .. code-block:: console $ openstack network rbac delete f93efdbf-f1e0-41d2-b093-8328959d469e RBAC policy on object f93efdbf-f1e0-41d2-b093-8328959d469e cannot be removed because other objects depend on it. This process can be repeated any number of times to share a network with an arbitrary number of projects. Sharing a QoS policy with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a QoS policy to share: .. code-block:: console $ openstack network qos policy create secret_policy +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | description | | | id | 1f730d69-1c45-4ade-a8f2-89070ac4f046 | | name | secret_policy | | project_id | 61b7eba037fd41f29cfba757c010faff | | revision_number | 1 | | rules | [] | | shared | False | | tags | [] | +-------------------+--------------------------------------+ Create the RBAC policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``be98b82f8fdf46b696e9e01cebc33fd9``): .. code-block:: console $ openstack network rbac create --target-project \ be98b82f8fdf46b696e9e01cebc33fd9 --action access_as_shared \ --type qos_policy 1f730d69-1c45-4ade-a8f2-89070ac4f046 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | 8828e38d-a0df-4c78-963b-e5f215d3d550 | | name | None | | object_id | 1f730d69-1c45-4ade-a8f2-89070ac4f046 | | object_type | qos_policy | | project_id | 61b7eba037fd41f29cfba757c010faff | | target_project_id | be98b82f8fdf46b696e9e01cebc33fd9 | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the QoS policy. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter says that the target object is a QoS policy. The final parameter is the ID of the QoS policy we are granting access to. Project ``be98b82f8fdf46b696e9e01cebc33fd9`` will now be able to see the QoS policy when running :command:`openstack network qos policy list` and :command:`openstack network qos policy show` and will also be able to bind it to its ports or networks. No other users (other than admins and the owner) will be able to see the QoS policy. To remove access for that project, delete the RBAC policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550 If that project has ports or networks with the QoS policy applied to them, the server will not delete the RBAC policy until the QoS policy is no longer in use: .. code-block:: console $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550 RBAC policy on object 8828e38d-a0df-4c78-963b-e5f215d3d550 cannot be removed because other objects depend on it. This process can be repeated any number of times to share a qos-policy with an arbitrary number of projects. Sharing a security group with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a security group to share: .. code-block:: console $ openstack security group create my_security_group +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created_at | 2019-02-07T06:09:59Z | | description | my_security_group | | id | 5ba835b7-22b0-4be6-bdbe-e0722d1b5f24 | | location | None | | name | my_security_group | | project_id | 077e8f39d3db4c9e998d842b0503283a | | revision_number | 1 | | rules | ... | | tags | [] | | updated_at | 2019-02-07T06:09:59Z | +-------------------+--------------------------------------+ Create the RBAC policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``32016615de5d43bb88de99e7f2e26a1e``): .. code-block:: console $ openstack network rbac create --target-project \ 32016615de5d43bb88de99e7f2e26a1e --action access_as_shared \ --type security_group 5ba835b7-22b0-4be6-bdbe-e0722d1b5f24 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | 8828e38d-a0df-4c78-963b-e5f215d3d550 | | name | None | | object_id | 5ba835b7-22b0-4be6-bdbe-e0722d1b5f24 | | object_type | security_group | | project_id | 077e8f39d3db4c9e998d842b0503283a | | target_project_id | 32016615de5d43bb88de99e7f2e26a1e | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the security group. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter says that the target object is a security group. The final parameter is the ID of the security group we are granting access to. Project ``32016615de5d43bb88de99e7f2e26a1e`` will now be able to see the security group when running :command:`openstack security group list` and :command:`openstack security group show` and will also be able to bind it to its ports. No other users (other than admins and the owner) will be able to see the security group. To remove access for that project, delete the RBAC policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550 If that project has ports with the security group applied to them, the server will not delete the RBAC policy until the security group is no longer in use: .. code-block:: console $ openstack network rbac delete 8828e38d-a0df-4c78-963b-e5f215d3d550 RBAC policy on object 8828e38d-a0df-4c78-963b-e5f215d3d550 cannot be removed because other objects depend on it. This process can be repeated any number of times to share a security-group with an arbitrary number of projects. Sharing an address scope with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create an address scope to share: .. code-block:: console $ openstack address scope create my_address_scope +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | id | c19cb654-3489-4160-9c82-8a3015483643 | | ip_version | 4 | | location | ... | | name | my_address_scope | | project_id | 34304bc4f233470fa4a2448d153b6324 | | shared | False | +-------------------+--------------------------------------+ Create the RBAC policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``32016615de5d43bb88de99e7f2e26a1e``): .. code-block:: console $ openstack network rbac create --target-project \ 32016615de5d43bb88de99e7f2e26a1e --action access_as_shared \ --type address_scope c19cb654-3489-4160-9c82-8a3015483643 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | d54b1482-98c4-44aa-9115-ede80387ffe0 | | location | ... | | name | None | | object_id | c19cb654-3489-4160-9c82-8a3015483643 | | object_type | address_scope | | project_id | 34304bc4f233470fa4a2448d153b6324 | | target_project_id | 32016615de5d43bb88de99e7f2e26a1e | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the address scope. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter says that the target object is an address scope. The final parameter is the ID of the address scope we are granting access to. Project ``32016615de5d43bb88de99e7f2e26a1e`` will now be able to see the address scope when running :command:`openstack address scope list` and :command:`openstack address scope show` and will also be able to assign it to its subnet pools. No other users (other than admins and the owner) will be able to see the address scope. To remove access for that project, delete the RBAC policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete d54b1482-98c4-44aa-9115-ede80387ffe0 If that project has subnet pools with the address scope applied to them, the server will not delete the RBAC policy until the address scope is no longer in use: .. code-block:: console $ openstack network rbac delete d54b1482-98c4-44aa-9115-ede80387ffe0 RBAC policy on object c19cb654-3489-4160-9c82-8a3015483643 cannot be removed because other objects depend on it. This process can be repeated any number of times to share an address scope with an arbitrary number of projects. Sharing a subnet pool with specific projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a subnet pool to share: .. code-block:: console $ openstack subnet pool create my_subnetpool --pool-prefix 203.0.113.0/24 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | None | | created_at | 2020-03-16T14:23:01Z | | default_prefixlen | 8 | | default_quota | None | | description | | | id | 11f79287-bc17-46b2-bfd0-2562471eb631 | | ip_version | 4 | | is_default | False | | location | ... | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | my_subnetpool | | project_id | 290ccedbcf594ecc8e76eff06f964f7e | | revision_number | 0 | | shared | False | | tags | | | updated_at | 2020-03-16T14:23:01Z | +-------------------+--------------------------------------+ Create the RBAC policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``32016615de5d43bb88de99e7f2e26a1e``): .. code-block:: console $ openstack network rbac create --target-project \ 32016615de5d43bb88de99e7f2e26a1e --action access_as_shared \ --type subnetpool 11f79287-bc17-46b2-bfd0-2562471eb631 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | d54b1482-98c4-44aa-9115-ede80387ffe0 | | location | ... | | name | None | | object_id | 11f79287-bc17-46b2-bfd0-2562471eb631 | | object_type | subnetpool | | project_id | 290ccedbcf594ecc8e76eff06f964f7e | | target_project_id | 32016615de5d43bb88de99e7f2e26a1e | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the subnet pool. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter says that the target object is a subnet pool. The final parameter is the ID of the subnet pool we are granting access to. Project ``32016615de5d43bb88de99e7f2e26a1e`` will now be able to see the subnet pool when running :command:`openstack subnet pool list` and :command:`openstack subnet pool show` and will also be able to assign it to its subnets. No other users (other than admins and the owner) will be able to see the subnet pool. To remove access for that project, delete the RBAC policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete d54b1482-98c4-44aa-9115-ede80387ffe0 If that project has subnets with the subnet pool applied to them, the server will not delete the RBAC policy until the subnet pool is no longer in use: .. code-block:: console $ openstack network rbac delete d54b1482-98c4-44aa-9115-ede80387ffe0 RBAC policy on object 11f79287-bc17-46b2-bfd0-2562471eb631 cannot be removed because other objects depend on it. This process can be repeated any number of times to share a subnet pool with an arbitrary number of projects. How the 'shared' flag relates to these entries ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As introduced in other guide entries, neutron provides a means of making an object (``address-scope``, ``network``, ``qos-policy``, ``security-group``, ``subnetpool``) available to every project. This is accomplished using the ``shared`` flag on the supported object: .. code-block:: console $ openstack network create global_network --share +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-25T20:32:06Z | | description | | | dns_domain | None | | id | 84a7e627-573b-49da-af66-c9a65244f3ce | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | mtu | 1450 | | name | global_network | | port_security_enabled | True | | project_id | 61b7eba037fd41f29cfba757c010faff | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 7 | | qos_policy_id | None | | revision_number | 3 | | router:external | Internal | | segments | None | | shared | True | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2017-01-25T20:32:07Z | +---------------------------+--------------------------------------+ This is the equivalent of creating a policy on the network that permits every project to perform the action ``access_as_shared`` on that network. Neutron treats them as the same thing, so the policy entry for that network should be visible using the :command:`openstack network rbac list` command: .. code-block:: console $ openstack network rbac list +-------------------------------+-------------+--------------------------------+ | ID | Object Type | Object ID | +-------------------------------+-------------+--------------------------------+ | 58a5ee31-2ad6-467d- | qos_policy | 1f730d69-1c45-4ade- | | 8bb8-8c2ae3dd1382 | | a8f2-89070ac4f046 | | 27efbd79-f384-4d89-9dfc- | network | 84a7e627-573b-49da- | | 6c4a606ceec6 | | af66-c9a65244f3ce | +-------------------------------+-------------+--------------------------------+ Use the :command:`openstack network rbac show` command to see the details: .. code-block:: console $ openstack network rbac show 27efbd79-f384-4d89-9dfc-6c4a606ceec6 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_shared | | id | 27efbd79-f384-4d89-9dfc-6c4a606ceec6 | | name | None | | object_id | 84a7e627-573b-49da-af66-c9a65244f3ce | | object_type | network | | project_id | 61b7eba037fd41f29cfba757c010faff | | target_project_id | * | +-------------------+--------------------------------------+ The output shows that the entry allows the action ``access_as_shared`` on object ``84a7e627-573b-49da-af66-c9a65244f3ce`` of type ``network`` to target_tenant ``*``, which is a wildcard that represents all projects. Currently, the ``shared`` flag is just a mapping to the underlying RBAC policies for a network. Setting the flag to ``True`` on a network creates a wildcard RBAC entry. Setting it to ``False`` removes the wildcard entry. When you run :command:`openstack network list` or :command:`openstack network show`, the ``shared`` flag is calculated by the server based on the calling project and the RBAC entries for each network. For QoS objects use :command:`openstack network qos policy list` or :command:`openstack network qos policy show` respectively. If there is a wildcard entry, the ``shared`` flag is always set to ``True``. If there are only entries that share with specific projects, only the projects the object is shared to will see the flag as ``True`` and the rest will see the flag as ``False``. Allowing a network to be used as an external network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To make a network available as an external network for specific projects rather than all projects, use the ``access_as_external`` action. #. Create a network that you want to be available as an external network: .. code-block:: console $ openstack network create secret_external_network +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-25T20:36:59Z | | description | | | dns_domain | None | | id | 802d4e9e-4649-43e6-9ee2-8d052a880cfb | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | mtu | 1450 | | name | secret_external_network | | port_security_enabled | True | | project_id | 61b7eba037fd41f29cfba757c010faff | | proider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 21 | | qos_policy_id | None | | revision_number | 3 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2017-01-25T20:36:59Z | +---------------------------+--------------------------------------+ #. Create a policy entry using the :command:`openstack network rbac create` command (in this example, the ID of the project we want to share with is ``838030a7bf3c4d04b4b054c0f0b2b17c``): .. code-block:: console $ openstack network rbac create --target-project \ 838030a7bf3c4d04b4b054c0f0b2b17c --action access_as_external \ --type network 802d4e9e-4649-43e6-9ee2-8d052a880cfb +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | action | access_as_external | | id | afdd5b8d-b6f5-4a15-9817-5231434057be | | name | None | | object_id | 802d4e9e-4649-43e6-9ee2-8d052a880cfb | | object_type | network | | project_id | 61b7eba037fd41f29cfba757c010faff | | target_project_id | 838030a7bf3c4d04b4b054c0f0b2b17c | +-------------------+--------------------------------------+ The ``target-project`` parameter specifies the project that requires access to the network. The ``action`` parameter specifies what the project is allowed to do. The ``type`` parameter indicates that the target object is a network. The final parameter is the ID of the network we are granting external access to. Now project ``838030a7bf3c4d04b4b054c0f0b2b17c`` is able to see the network when running :command:`openstack network list` and :command:`openstack network show` and can attach router gateway ports to that network. No other users (other than admins and the owner) are able to see the network. To remove access for that project, delete the policy that allows it using the :command:`openstack network rbac delete` command: .. code-block:: console $ openstack network rbac delete afdd5b8d-b6f5-4a15-9817-5231434057be If that project has router gateway ports attached to that network, the server prevents the policy from being deleted until the ports have been deleted: .. code-block:: console $ openstack network rbac delete afdd5b8d-b6f5-4a15-9817-5231434057be RBAC policy on object afdd5b8d-b6f5-4a15-9817-5231434057be cannot be removed because other objects depend on it. This process can be repeated any number of times to make a network available as external to an arbitrary number of projects. If a network is marked as external during creation, it now implicitly creates a wildcard RBAC policy granting everyone access to preserve previous behavior before this feature was added. .. code-block:: console $ openstack network create global_external_network --external +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2017-01-25T20:41:44Z | | description | | | dns_domain | None | | id | 72a257a2-a56e-4ac7-880f-94a4233abec6 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | mtu | 1450 | | name | global_external_network | | port_security_enabled | True | | project_id | 61b7eba037fd41f29cfba757c010faff | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 69 | | qos_policy_id | None | | revision_number | 4 | | router:external | External | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2017-01-25T20:41:44Z | +---------------------------+--------------------------------------+ In the output above the standard ``router:external`` attribute is ``External`` as expected. Now a wildcard policy is visible in the RBAC policy listings: .. code-block:: console $ openstack network rbac list --long -c ID -c Action +--------------------------------------+--------------------+ | ID | Action | +--------------------------------------+--------------------+ | b694e541-bdca-480d-94ec-eda59ab7d71a | access_as_external | +--------------------------------------+--------------------+ You can modify or delete this policy with the same constraints as any other RBAC ``access_as_external`` policy. Preventing regular users from sharing objects with each other ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default ``policy.json`` file will not allow regular users to share objects with every other project using a wildcard; however, it will allow them to share objects with specific project IDs. If an operator wants to prevent normal users from doing this, the ``"create_rbac_policy":`` entry in ``policy.json`` can be adjusted from ``""`` to ``"rule:admin_only"``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-routed-networks.rst0000644000175000017500000006331100000000000025455 0ustar00coreycorey00000000000000.. _config-routed-provider-networks: ======================== Routed provider networks ======================== .. note:: Use of this feature requires the OpenStack client version 3.3 or newer. Before routed provider networks, the Networking service could not present a multi-segment layer-3 network as a single entity. Thus, each operator typically chose one of the following architectures: * Single large layer-2 network * Multiple smaller layer-2 networks Single large layer-2 networks become complex at scale and involve significant failure domains. Multiple smaller layer-2 networks scale better and shrink failure domains, but leave network selection to the user. Without additional information, users cannot easily differentiate these networks. A routed provider network enables a single provider network to represent multiple layer-2 networks (broadcast domains) or segments and enables the operator to present one network to users. However, the particular IP addresses available to an instance depend on the segment of the network available on the particular compute node. Similar to conventional networking, layer-2 (switching) handles transit of traffic between ports on the same segment and layer-3 (routing) handles transit of traffic between segments. Each segment requires at least one subnet that explicitly belongs to that segment. The association between a segment and a subnet distinguishes a routed provider network from other types of networks. The Networking service enforces that either zero or all subnets on a particular network associate with a segment. For example, attempting to create a subnet without a segment on a network containing subnets with segments generates an error. The Networking service does not provide layer-3 services between segments. Instead, it relies on physical network infrastructure to route subnets. Thus, both the Networking service and physical network infrastructure must contain configuration for routed provider networks, similar to conventional provider networks. In the future, implementation of dynamic routing protocols may ease configuration of routed networks. Prerequisites ~~~~~~~~~~~~~ Routed provider networks require additional prerequisites over conventional provider networks. We recommend using the following procedure: #. Begin with segments. The Networking service defines a segment using the following components: * Unique physical network name * Segmentation type * Segmentation ID For example, ``provider1``, ``VLAN``, and ``2016``. See the `API reference `__ for more information. Within a network, use a unique physical network name for each segment which enables reuse of the same segmentation details between subnets. For example, using the same VLAN ID across all segments of a particular provider network. Similar to conventional provider networks, the operator must provision the layer-2 physical network infrastructure accordingly. #. Implement routing between segments. The Networking service does not provision routing among segments. The operator must implement routing among segments of a provider network. Each subnet on a segment must contain the gateway address of the router interface on that particular subnet. For example: =========== ======= ======================= ===================== Segment Version Addresses Gateway =========== ======= ======================= ===================== segment1 4 203.0.113.0/24 203.0.113.1 segment1 6 fd00:203:0:113::/64 fd00:203:0:113::1 segment2 4 198.51.100.0/24 198.51.100.1 segment2 6 fd00:198:51:100::/64 fd00:198:51:100::1 =========== ======= ======================= ===================== #. Map segments to compute nodes. Routed provider networks imply that compute nodes reside on different segments. The operator must ensure that every compute host that is supposed to participate in a router provider network has direct connectivity to one of its segments. =========== ====== ================ Host Rack Physical Network =========== ====== ================ compute0001 rack 1 segment 1 compute0002 rack 1 segment 1 ... ... ... compute0101 rack 2 segment 2 compute0102 rack 2 segment 2 compute0102 rack 2 segment 2 ... ... ... =========== ====== ================ #. Deploy DHCP agents. Unlike conventional provider networks, a DHCP agent cannot support more than one segment within a network. The operator must deploy at least one DHCP agent per segment. Consider deploying DHCP agents on compute nodes containing the segments rather than one or more network nodes to reduce node count. =========== ====== ================ Host Rack Physical Network =========== ====== ================ network0001 rack 1 segment 1 network0002 rack 2 segment 2 ... ... ... =========== ====== ================ #. Configure communication of the Networking service with the Compute scheduler. An instance with an interface with an IPv4 address in a routed provider network must be placed by the Compute scheduler in a host that has access to a segment with available IPv4 addresses. To make this possible, the Networking service communicates to the Compute scheduler the inventory of IPv4 addresses associated with each segment of a routed provider network. The operator must configure the authentication credentials that the Networking service will use to communicate with the Compute scheduler's placement API. Please see below an example configuration. .. note:: Coordination between the Networking service and the Compute scheduler is not necessary for IPv6 subnets as a consequence of their large address spaces. .. note:: The coordination between the Networking service and the Compute scheduler requires the following minimum API micro-versions. * Compute service API: 2.41 * Placement API: 1.1 Example configuration ~~~~~~~~~~~~~~~~~~~~~ Controller node --------------- #. Enable the segments service plug-in by appending ``segments`` to the list of ``service_plugins`` in the ``neutron.conf`` file on all nodes running the ``neutron-server`` service: .. code-block:: ini [DEFAULT] # ... service_plugins = ...,segments #. Add a ``placement`` section to the ``neutron.conf`` file with authentication credentials for the Compute service placement API: .. code-block:: ini [placement] www_authenticate_uri = http://192.0.2.72/identity project_domain_name = Default project_name = service user_domain_name = Default password = apassword username = nova auth_url = http://192.0.2.72/identity_admin auth_type = password region_name = RegionOne #. Restart the ``neutron-server`` service. Network or compute nodes ------------------------ * Configure the layer-2 agent on each node to map one or more segments to the appropriate physical network bridge or interface and restart the agent. Create a routed provider network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following steps create a routed provider network with two segments. Each segment contains one IPv4 subnet and one IPv6 subnet. #. Source the administrative project credentials. #. Create a VLAN provider network which includes a default segment. In this example, the network uses the ``provider1`` physical network with VLAN ID 2016. .. code-block:: console $ openstack network create --share --provider-physical-network provider1 \ --provider-network-type vlan --provider-segment 2016 multisegment1 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | ipv4_address_scope | None | | ipv6_address_scope | None | | l2_adjacency | True | | mtu | 1500 | | name | multisegment1 | | port_security_enabled | True | | provider:network_type | vlan | | provider:physical_network | provider1 | | provider:segmentation_id | 2016 | | revision_number | 1 | | router:external | Internal | | shared | True | | status | ACTIVE | | subnets | | | tags | [] | +---------------------------+--------------------------------------+ #. Rename the default segment to ``segment1``. .. code-block:: console $ openstack network segment list --network multisegment1 +--------------------------------------+----------+--------------------------------------+--------------+---------+ | ID | Name | Network | Network Type | Segment | +--------------------------------------+----------+--------------------------------------+--------------+---------+ | 43e16869-ad31-48e4-87ce-acf756709e18 | None | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2016 | +--------------------------------------+----------+--------------------------------------+--------------+---------+ .. code-block:: console $ openstack network segment set --name segment1 43e16869-ad31-48e4-87ce-acf756709e18 .. note:: This command provides no output. #. Create a second segment on the provider network. In this example, the segment uses the ``provider2`` physical network with VLAN ID 2017. .. code-block:: console $ openstack network segment create --physical-network provider2 \ --network-type vlan --segment 2017 --network multisegment1 segment2 +------------------+--------------------------------------+ | Field | Value | +------------------+--------------------------------------+ | description | None | | headers | | | id | 053b7925-9a89-4489-9992-e164c8cc8763 | | name | segment2 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | network_type | vlan | | physical_network | provider2 | | revision_number | 1 | | segmentation_id | 2017 | | tags | [] | +------------------+--------------------------------------+ #. Verify that the network contains the ``segment1`` and ``segment2`` segments. .. code-block:: console $ openstack network segment list --network multisegment1 +--------------------------------------+----------+--------------------------------------+--------------+---------+ | ID | Name | Network | Network Type | Segment | +--------------------------------------+----------+--------------------------------------+--------------+---------+ | 053b7925-9a89-4489-9992-e164c8cc8763 | segment2 | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2017 | | 43e16869-ad31-48e4-87ce-acf756709e18 | segment1 | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | vlan | 2016 | +--------------------------------------+----------+--------------------------------------+--------------+---------+ #. Create subnets on the ``segment1`` segment. In this example, the IPv4 subnet uses 203.0.113.0/24 and the IPv6 subnet uses fd00:203:0:113::/64. .. code-block:: console $ openstack subnet create \ --network multisegment1 --network-segment segment1 \ --ip-version 4 --subnet-range 203.0.113.0/24 \ multisegment1-segment1-v4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.2-203.0.113.254 | | cidr | 203.0.113.0/24 | | enable_dhcp | True | | gateway_ip | 203.0.113.1 | | id | c428797a-6f8e-4cb1-b394-c404318a2762 | | ip_version | 4 | | name | multisegment1-segment1-v4 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | revision_number | 1 | | segment_id | 43e16869-ad31-48e4-87ce-acf756709e18 | | tags | [] | +-------------------+--------------------------------------+ $ openstack subnet create \ --network multisegment1 --network-segment segment1 \ --ip-version 6 --subnet-range fd00:203:0:113::/64 \ --ipv6-address-mode slaac multisegment1-segment1-v6 +-------------------+------------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------------+ | allocation_pools | fd00:203:0:113::2-fd00:203:0:113:ffff:ffff:ffff:ffff | | cidr | fd00:203:0:113::/64 | | enable_dhcp | True | | gateway_ip | fd00:203:0:113::1 | | id | e41cb069-9902-4c01-9e1c-268c8252256a | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | None | | name | multisegment1-segment1-v6 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | revision_number | 1 | | segment_id | 43e16869-ad31-48e4-87ce-acf756709e18 | | tags | [] | +-------------------+------------------------------------------------------+ .. note:: By default, IPv6 subnets on provider networks rely on physical network infrastructure for stateless address autoconfiguration (SLAAC) and router advertisement. #. Create subnets on the ``segment2`` segment. In this example, the IPv4 subnet uses 198.51.100.0/24 and the IPv6 subnet uses fd00:198:51:100::/64. .. code-block:: console $ openstack subnet create \ --network multisegment1 --network-segment segment2 \ --ip-version 4 --subnet-range 198.51.100.0/24 \ multisegment1-segment2-v4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 198.51.100.2-198.51.100.254 | | cidr | 198.51.100.0/24 | | enable_dhcp | True | | gateway_ip | 198.51.100.1 | | id | 242755c2-f5fd-4e7d-bd7a-342ca95e50b2 | | ip_version | 4 | | name | multisegment1-segment2-v4 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | revision_number | 1 | | segment_id | 053b7925-9a89-4489-9992-e164c8cc8763 | | tags | [] | +-------------------+--------------------------------------+ $ openstack subnet create \ --network multisegment1 --network-segment segment2 \ --ip-version 6 --subnet-range fd00:198:51:100::/64 \ --ipv6-address-mode slaac multisegment1-segment2-v6 +-------------------+--------------------------------------------------------+ | Field | Value | +-------------------+--------------------------------------------------------+ | allocation_pools | fd00:198:51:100::2-fd00:198:51:100:ffff:ffff:ffff:ffff | | cidr | fd00:198:51:100::/64 | | enable_dhcp | True | | gateway_ip | fd00:198:51:100::1 | | id | b884c40e-9cfe-4d1b-a085-0a15488e9441 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | None | | name | multisegment1-segment2-v6 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | revision_number | 1 | | segment_id | 053b7925-9a89-4489-9992-e164c8cc8763 | | tags | [] | +-------------------+--------------------------------------------------------+ #. Verify that each IPv4 subnet associates with at least one DHCP agent. .. code-block:: console $ openstack network agent list --agent-type dhcp --network multisegment1 +--------------------------------------+------------+-------------+-------------------+-------+-------+--------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+------------+-------------+-------------------+-------+-------+--------------------+ | c904ed10-922c-4c1a-84fd-d928abaf8f55 | DHCP agent | compute0001 | nova | :-) | UP | neutron-dhcp-agent | | e0b22cc0-d2a6-4f1c-b17c-27558e20b454 | DHCP agent | compute0101 | nova | :-) | UP | neutron-dhcp-agent | +--------------------------------------+------------+-------------+-------------------+-------+-------+--------------------+ #. Verify that inventories were created for each segment IPv4 subnet in the Compute service placement API (for the sake of brevity, only one of the segments is shown in this example). .. code-block:: console $ SEGMENT_ID=053b7925-9a89-4489-9992-e164c8cc8763 $ openstack resource provider inventory list $SEGMENT_ID +----------------+------------------+----------+----------+-----------+----------+-------+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | +----------------+------------------+----------+----------+-----------+----------+-------+ | IPV4_ADDRESS | 1.0 | 1 | 2 | 1 | 1 | 30 | +----------------+------------------+----------+----------+-----------+----------+-------+ #. Verify that host aggregates were created for each segment in the Compute service (for the sake of brevity, only one of the segments is shown in this example). .. code-block:: console $ openstack aggregate list +----+---------------------------------------------------------+-------------------+ | Id | Name | Availability Zone | +----+---------------------------------------------------------+-------------------+ | 10 | Neutron segment id 053b7925-9a89-4489-9992-e164c8cc8763 | None | +----+---------------------------------------------------------+-------------------+ #. Launch one or more instances. Each instance obtains IP addresses according to the segment it uses on the particular compute node. .. note:: If a fixed IP is specified by the user in the port create request, that particular IP is allocated immediately to the port. However, creating a port and passing it to an instance yields a different behavior than conventional networks. If the fixed IP is not specified on the port create request, the Networking service defers assignment of IP addresses to the port until the particular compute node becomes apparent. For example: .. code-block:: console $ openstack port create --network multisegment1 port1 +-----------------------+--------------------------------------+ | Field | Value | +-----------------------+--------------------------------------+ | admin_state_up | UP | | binding_vnic_type | normal | | id | 6181fb47-7a74-4add-9b6b-f9837c1c90c4 | | ip_allocation | deferred | | mac_address | fa:16:3e:34:de:9b | | name | port1 | | network_id | 6ab19caa-dda9-4b3d-abc4-5b8f435b98d9 | | port_security_enabled | True | | revision_number | 1 | | security_groups | e4fcef0d-e2c5-40c3-a385-9c33ac9289c5 | | status | DOWN | | tags | [] | +-----------------------+--------------------------------------+ Migrating non-routed networks to routed ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Migration of existing non-routed networks is only possible if there is only one segment and one subnet on the network. To migrate a candidate network, update the subnet and set ``id`` of the existing network segment as ``segment_id``. .. note:: In the case where there are multiple subnets or segments it is not possible to safely migrate. The reason for this is that in non-routed networks addresses from the subnet's allocation pools are assigned to ports without considering to which network segment the port is bound. Example ------- The following steps migrate an existing non-routed network with one subnet and one segment to a routed one. #. Source the administrative project credentials. #. Get the ``id`` of the current network segment on the network that is being migrated. .. code-block:: console $ openstack network segment list --network my_network +--------------------------------------+------+--------------------------------------+--------------+---------+ | ID | Name | Network | Network Type | Segment | +--------------------------------------+------+--------------------------------------+--------------+---------+ | 81e5453d-4c9f-43a5-8ddf-feaf3937e8c7 | None | 45e84575-2918-471c-95c0-018b961a2984 | flat | None | +--------------------------------------+------+--------------------------------------+--------------+---------+ #. Get the ``id`` or ``name`` of the current subnet on the network. .. code-block:: console $ openstack subnet list --network my_network +--------------------------------------+-----------+--------------------------------------+---------------+ | ID | Name | Network | Subnet | +--------------------------------------+-----------+--------------------------------------+---------------+ | 71d931d2-0328-46ae-93bc-126caf794307 | my_subnet | 45e84575-2918-471c-95c0-018b961a2984 | 172.24.4.0/24 | +--------------------------------------+-----------+--------------------------------------+---------------+ #. Verify the current ``segment_id`` of the subnet is ``None``. .. code-block:: console $ openstack subnet show my_subnet --c segment_id +------------+-------+ | Field | Value | +------------+-------+ | segment_id | None | +------------+-------+ #. Update the ``segment_id`` of the subnet. .. code-block:: console $ openstack subnet set --network-segment 81e5453d-4c9f-43a5-8ddf-feaf3937e8c7 my_subnet #. Verify that the subnet is now associated with the desired network segment. .. code-block:: console $ openstack subnet show my_subnet --c segment_id +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | segment_id | 81e5453d-4c9f-43a5-8ddf-feaf3937e8c7 | +------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-service-subnets.rst0000644000175000017500000004644500000000000025433 0ustar00coreycorey00000000000000.. _config-service-subnets: =============== Service subnets =============== Service subnets enable operators to define valid port types for each subnet on a network without limiting networks to one subnet or manually creating ports with a specific subnet ID. Using this feature, operators can ensure that ports for instances and router interfaces, for example, always use different subnets. Operation ~~~~~~~~~ Define one or more service types for one or more subnets on a particular network. Each service type must correspond to a valid device owner within the port model in order for it to be used. During IP allocation, the :ref:`IPAM ` driver returns an address from a subnet with a service type matching the port device owner. If no subnets match, or all matching subnets lack available IP addresses, the IPAM driver attempts to use a subnet without any service types to preserve compatibility. If all subnets on a network have a service type, the IPAM driver cannot preserve compatibility. However, this feature enables strict IP allocation from subnets with a matching device owner. If multiple subnets contain the same service type, or a subnet without a service type exists, the IPAM driver selects the first subnet with a matching service type. For example, a floating IP agent gateway port uses the following selection process: * ``network:floatingip_agent_gateway`` * ``None`` .. note:: Ports with the device owner ``network:dhcp`` are exempt from the above IPAM logic for subnets with ``dhcp_enabled`` set to ``True``. This preserves the existing automatic DHCP port creation behaviour for DHCP-enabled subnets. Creating or updating a port with a specific subnet skips this selection process and explicitly uses the given subnet. Usage ~~~~~ .. note:: Creating a subnet with a service type requires administrative privileges. Example 1 - Proof-of-concept ---------------------------- This following example is not typical of an actual deployment. It is shown to allow users to experiment with configuring service subnets. #. Create a network. .. code-block:: console $ openstack network create demo-net1 +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | description | | | headers | | | id | b5b729d8-31cc-4d2c-8284-72b3291fec02 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | demo-net1 | | port_security_enabled | True | | project_id | a3db43cd0f224242a847ab84d091217d | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 110 | | revision_number | 1 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | +---------------------------+--------------------------------------+ #. Create a subnet on the network with one or more service types. For example, the ``compute:nova`` service type enables instances to use this subnet. .. code-block:: console $ openstack subnet create demo-subnet1 --subnet-range 192.0.2.0/24 \ --service-type 'compute:nova' --network demo-net1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | id | 6e38b23f-0b27-4e3c-8e69-fd23a3df1935 | | ip_version | 4 | | cidr | 192.0.2.0/24 | | name | demo-subnet1 | | network_id | b5b729d8-31cc-4d2c-8284-72b3291fec02 | | revision_number | 1 | | service_types | ['compute:nova'] | | tags | [] | | tenant_id | a8b3054cc1214f18b1186b291525650f | +-------------------+--------------------------------------+ #. Optionally, create another subnet on the network with a different service type. For example, the ``compute:foo`` arbitrary service type. .. code-block:: console $ openstack subnet create demo-subnet2 --subnet-range 198.51.100.0/24 \ --service-type 'compute:foo' --network demo-net1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | id | ea139dcd-17a3-4f0a-8cca-dff8b4e03f8a | | ip_version | 4 | | cidr | 198.51.100.0/24 | | name | demo-subnet2 | | network_id | b5b729d8-31cc-4d2c-8284-72b3291fec02 | | revision_number | 1 | | service_types | ['compute:foo'] | | tags | [] | | tenant_id | a8b3054cc1214f18b1186b291525650f | +-------------------+--------------------------------------+ #. Launch an instance using the network. For example, using the ``cirros`` image and ``m1.tiny`` flavor. .. code-block:: console $ openstack server create demo-instance1 --flavor m1.tiny \ --image cirros --nic net-id=b5b729d8-31cc-4d2c-8284-72b3291fec02 +--------------------------------------+-----------------------------------------------+ | Field | Value | +--------------------------------------+-----------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-SRV-ATTR:host | None | | OS-EXT-SRV-ATTR:hypervisor_hostname | None | | OS-EXT-SRV-ATTR:instance_name | instance-00000009 | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | None | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | | | adminPass | Fn85skabdxBL | | config_drive | | | created | 2016-09-19T15:07:42Z | | flavor | m1.tiny (1) | | hostId | | | id | 04222b73-1a6e-4c2a-9af4-ef3d17d521ff | | image | cirros (4aaec87d-c655-4856-8618-b2dada3a2b11) | | key_name | None | | name | demo-instance1 | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | project_id | d44c19e056674381b86430575184b167 | | properties | | | security_groups | [{u'name': u'default'}] | | status | BUILD | | updated | 2016-09-19T15:07:42Z | | user_id | 331afbeb322d4c559a181e19051ae362 | +--------------------------------------+-----------------------------------------------+ #. Check the instance status. The ``Networks`` field contains an IP address from the subnet having the ``compute:nova`` service type. .. code-block:: console $ openstack server list +--------------------------------------+-----------------+---------+---------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------------+---------+---------------------+--------+---------+ | 20181f46-5cd2-4af8-9af0-f4cf5c983008 | demo-instance1 | ACTIVE | demo-net1=192.0.2.3 | cirros | m1.tiny | +--------------------------------------+-----------------+---------+---------------------+--------+---------+ Example 2 - DVR configuration ----------------------------- The following example outlines how you can configure service subnets in a DVR-enabled deployment, with the goal of minimizing public IP address consumption. This example uses three subnets on the same external network: * 192.0.2.0/24 for instance floating IP addresses * 198.51.100.0/24 for floating IP agent gateway IPs configured on compute nodes * 203.0.113.0/25 for all other IP allocations on the external network This example uses again the private network, ``demo-net1`` (b5b729d8-31cc-4d2c-8284-72b3291fec02) which was created in `Example 1 - Proof-of-concept`_. .. note: The output of the commands is not always shown since it is very similar to the above. #. Create an external network: .. code-block:: console $ openstack network create --external demo-ext-net #. Create a subnet on the external network for the instance floating IP addresses. This uses the ``network:floatingip`` service type. .. code-block:: console $ openstack subnet create demo-floating-ip-subnet \ --subnet-range 192.0.2.0/24 --no-dhcp \ --service-type 'network:floatingip' --network demo-ext-net #. Create a subnet on the external network for the floating IP agent gateway IP addresses, which are configured by DVR on compute nodes. This will use the ``network:floatingip_agent_gateway`` service type. .. code-block:: console $ openstack subnet create demo-floating-ip-agent-gateway-subnet \ --subnet-range 198.51.100.0/24 --no-dhcp \ --service-type 'network:floatingip_agent_gateway' \ --network demo-ext-net #. Create a subnet on the external network for all other IP addresses allocated on the external network. This will not use any service type. It acts as a fall back for allocations that do not match either of the above two service subnets. .. code-block:: console $ openstack subnet create demo-other-subnet \ --subnet-range 203.0.113.0/25 --no-dhcp \ --network demo-ext-net #. Create a router: .. code-block:: console $ openstack router create demo-router #. Add an interface to the router on demo-subnet1: .. code-block:: console $ openstack router add subnet demo-router demo-subnet1 #. Set the external gateway for the router, which will create an interface and allocate an IP address on demo-ext-net: .. code-block:: console $ openstack router set --external-gateway demo-ext-net demo-router #. Launch an instance on a private network and retrieve the neutron port ID that was allocated. As above, use the ``cirros`` image and ``m1.tiny`` flavor: .. code-block:: console $ openstack server create demo-instance1 --flavor m1.tiny \ --image cirros --nic net-id=b5b729d8-31cc-4d2c-8284-72b3291fec02 $ openstack port list --server demo-instance1 +--------------------------------------+------+-------------------+--------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+--------------------------------------------------+--------+ | a752bb24-9bf2-4d37-b9d6-07da69c86f19 | | fa:16:3e:99:54:32 | ip_address='203.0.113.130', | ACTIVE | | | | | subnet_id='6e38b23f-0b27-4e3c-8e69-fd23a3df1935' | | +--------------------------------------+------+-------------------+--------------------------------------------------+--------+ #. Associate a floating IP with the instance port and verify it was allocated an IP address from the correct subnet: .. code-block:: console $ openstack floating ip create --port \ a752bb24-9bf2-4d37-b9d6-07da69c86f19 demo-ext-net +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | 203.0.113.130 | | floating_ip_address | 192.0.2.12 | | floating_network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 | | id | f15cae7f-5e05-4b19-bd25-4bb71edcf3de | | port_id | a752bb24-9bf2-4d37-b9d6-07da69c86f19 | | project_id | d44c19e056674381b86430575184b167 | | revision_number | 1 | | router_id | 5a8ca19f-3703-4f81-bc29-db6bc2f528d6 | | status | ACTIVE | | tags | [] | +---------------------+--------------------------------------+ #. As the `admin` user, verify the neutron routers are allocated IP addresses from their correct subnets. Use ``openstack port list`` to find ports associated with the routers. First, the router gateway external port: .. code-block:: console $ openstack port show f148ffeb-3c26-4067-bc5f-5c3dfddae2f5 +-----------------------+--------------------------------------------------------------------------+ | Field | Value | +-----------------------+--------------------------------------------------------------------------+ | admin_state_up | UP | | device_id | 5a8ca19f-3703-4f81-bc29-db6bc2f528d6 | | device_owner | network:router_gateway | | extra_dhcp_opts | | | fixed_ips | ip_address='203.0.113.11', | | | subnet_id='67c251d9-2b7a-4200-99f6-e13785b0334d' | | id | f148ffeb-3c26-4067-bc5f-5c3dfddae2f5 | | mac_address | fa:16:3e:2c:0f:69 | | network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 | | revision_number | 1 | | project_id | | | status | ACTIVE | | tags | [] | +-----------------------+--------------------------------------------------------------------------+ Second, the router floating IP agent gateway external port: .. code-block:: console $ openstack port show a2d1e756-8ae1-4f96-9aa1-e7ea16a6a68a +-----------------------+--------------------------------------------------------------------------+ | Field | Value | +-----------------------+--------------------------------------------------------------------------+ | admin_state_up | UP | | device_id | 3d0c98eb-bca3-45cc-8aa4-90ae3deb0844 | | device_owner | network:floatingip_agent_gateway | | extra_dhcp_opts | | | fixed_ips | ip_address='198.51.100.10', | | | subnet_id='67c251d9-2b7a-4200-99f6-e13785b0334d' | | id | a2d1e756-8ae1-4f96-9aa1-e7ea16a6a68a | | mac_address | fa:16:3e:f4:5d:fa | | network_id | 02d236d5-dad9-4082-bb6b-5245f9f84d13 | | project_id | | | revision_number | 1 | | status | ACTIVE | | tags | [] | +-----------------------+--------------------------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-services-agent.rst0000644000175000017500000000746100000000000025224 0ustar00coreycorey00000000000000.. _config-services-agent: =================== Services and agents =================== A usual neutron setup consists of multiple services and agents running on one or multiple nodes (though some setups may not need any agents). Each of these services provide some of the networking or API services. Among those of special interest are: #. The neutron-server that provides API endpoints and serves as a single point of access to the database. It usually runs on the controller nodes. #. Layer2 agent that can utilize Open vSwitch, Linux Bridge or other vendor-specific technology to provide network segmentation and isolation for project networks. The L2 agent should run on every node where it is deemed responsible for wiring and securing virtual interfaces (usually both compute and network nodes). #. Layer3 agent that runs on network node and provides east-west and north-south routing plus some advanced services such as FWaaS or VPNaaS. Configuration options ~~~~~~~~~~~~~~~~~~~~~ The neutron configuration options are segregated between neutron-server and agents. Both services and agents may load the main ``neutron.conf`` since this file should contain the oslo.messaging configuration for internal neutron RPCs and may contain host specific configuration, such as file paths. The ``neutron.conf`` contains the database, keystone, nova credentials, and endpoints strictly for neutron-server to use. In addition, neutron-server may load a plugin-specific configuration file, yet the agents should not. As the plugin configuration is primarily site wide options and the plugin provides the persistence layer for neutron, agents should be instructed to act upon these values through RPC. Each individual agent may have its own configuration file. This file should be loaded after the main ``neutron.conf`` file, so the agent configuration takes precedence. The agent-specific configuration may contain configurations which vary between hosts in a neutron deployment such as the ``local_ip`` for an L2 agent. If any agent requires access to additional external services beyond the neutron RPC, those endpoints should be defined in the agent-specific configuration file (for example, nova metadata for metadata agent). External processes run by agents ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some neutron agents, like DHCP, Metadata or L3, often run external processes to provide some of their functionalities. It may be keepalived, dnsmasq, haproxy or some other process. Neutron agents are responsible for spawning and killing such processes when necessary. By default, to kill such processes, agents use a simple ``kill`` command, but in some cases, like for example when those additional services are running inside containers, it may be not a good solution. To address this problem, operators should use the ``AGENT`` config group option ``kill_scripts_path`` to configure a path to where ``kill scripts`` for such processes live. By default, it is set to ``/etc/neutron/kill_scripts/``. If option ``kill_scripts_path`` is changed in the config to the different location, ``exec_dirs`` in ``/etc/rootwrap.conf`` should be changed accordingly. If ``kill_scripts_path`` is set, every time neutron has to kill a process, for example ``dnsmasq``, it will look in this directory for a file with the name ``-kill``. So for ``dnsmasq`` process it will look for a ``dnsmasq-kill`` script. If such a file exists there, it will be called instead of using the ``kill`` command. Kill scripts are called with two parameters: .. code-block:: -kill where: ```` is the signal, same as with the ``kill`` command, for example ``9`` or ``SIGKILL``; and ```` is pid of the process to kill. This external script should then handle killing of the given process as neutron will not call the ``kill`` command for it anymore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-sfc.rst0000644000175000017500000002731700000000000023062 0ustar00coreycorey00000000000000.. _adv-config-sfc: ========================= Service function chaining ========================= Service function chain (SFC) essentially refers to the software-defined networking (SDN) version of policy-based routing (PBR). In many cases, SFC involves security, although it can include a variety of other features. Fundamentally, SFC routes packets through one or more service functions instead of conventional routing that routes packets using destination IP address. Service functions essentially emulate a series of physical network devices with cables linking them together. A basic example of SFC involves routing packets from one location to another through a firewall that lacks a "next hop" IP address from a conventional routing perspective. A more complex example involves an ordered series of service functions, each implemented using multiple instances (VMs). Packets must flow through one instance and a hashing algorithm distributes flows across multiple instances at each hop. Architecture ~~~~~~~~~~~~ All OpenStack Networking services and OpenStack Compute instances connect to a virtual network via ports making it possible to create a traffic steering model for service chaining using only ports. Including these ports in a port chain enables steering of traffic through one or more instances providing service functions. A port chain, or service function path, consists of the following: * A set of ports that define the sequence of service functions. * A set of flow classifiers that specify the classified traffic flows entering the chain. If a service function involves a pair of ports, the first port acts as the ingress port of the service function and the second port acts as the egress port. If both ports use the same value, they function as a single virtual bidirectional port. A port chain is a unidirectional service chain. The first port acts as the head of the service function chain and the second port acts as the tail of the service function chain. A bidirectional service function chain consists of two unidirectional port chains. A flow classifier can only belong to one port chain to prevent ambiguity as to which chain should handle packets in the flow. A check prevents such ambiguity. However, you can associate multiple flow classifiers with a port chain because multiple flows can request the same service function path. Currently, SFC lacks support for multi-project service functions. The port chain plug-in supports backing service providers including the OVS driver and a variety of SDN controller drivers. The common driver API enables different drivers to provide different implementations for the service chain path rendering. .. image:: figures/port-chain-architecture-diagram.png :alt: Port chain architecture .. image:: figures/port-chain-diagram.png :alt: Port chain model See the `networking-sfc documentation `_ for more information. Resources ~~~~~~~~~ Port chain ---------- * ``id`` - Port chain ID * ``project_id`` - Project ID * ``name`` - Readable name * ``description`` - Readable description * ``port_pair_groups`` - List of port pair group IDs * ``flow_classifiers`` - List of flow classifier IDs * ``chain_parameters`` - Dictionary of chain parameters A port chain consists of a sequence of port pair groups. Each port pair group is a hop in the port chain. A group of port pairs represents service functions providing equivalent functionality. For example, a group of firewall service functions. A flow classifier identifies a flow. A port chain can contain multiple flow classifiers. Omitting the flow classifier effectively prevents steering of traffic through the port chain. The ``chain_parameters`` attribute contains one or more parameters for the port chain. Currently, it only supports a correlation parameter that defaults to ``mpls`` for consistency with Open vSwitch (OVS) capabilities. Future values for the correlation parameter may include the network service header (NSH). Port pair group --------------- * ``id`` - Port pair group ID * ``project_id`` - Project ID * ``name`` - Readable name * ``description`` - Readable description * ``port_pairs`` - List of service function port pairs A port pair group may contain one or more port pairs. Multiple port pairs enable load balancing/distribution over a set of functionally equivalent service functions. Port pair --------- * ``id`` - Port pair ID * ``project_id`` - Project ID * ``name`` - Readable name * ``description`` - Readable description * ``ingress`` - Ingress port * ``egress`` - Egress port * ``service_function_parameters`` - Dictionary of service function parameters A port pair represents a service function instance that includes an ingress and egress port. A service function containing a bidirectional port uses the same ingress and egress port. The ``service_function_parameters`` attribute includes one or more parameters for the service function. Currently, it only supports a correlation parameter that determines association of a packet with a chain. This parameter defaults to ``none`` for legacy service functions that lack support for correlation such as the NSH. If set to ``none``, the data plane implementation must provide service function proxy functionality. Flow classifier --------------- * ``id`` - Flow classifier ID * ``project_id`` - Project ID * ``name`` - Readable name * ``description`` - Readable description * ``ethertype`` - Ethertype (IPv4/IPv6) * ``protocol`` - IP protocol * ``source_port_range_min`` - Minimum source protocol port * ``source_port_range_max`` - Maximum source protocol port * ``destination_port_range_min`` - Minimum destination protocol port * ``destination_port_range_max`` - Maximum destination protocol port * ``source_ip_prefix`` - Source IP address or prefix * ``destination_ip_prefix`` - Destination IP address or prefix * ``logical_source_port`` - Source port * ``logical_destination_port`` - Destination port * ``l7_parameters`` - Dictionary of L7 parameters A combination of the source attributes defines the source of the flow. A combination of the destination attributes defines the destination of the flow. The ``l7_parameters`` attribute is a place holder that may be used to support flow classification using layer 7 fields, such as a URL. If unspecified, the ``logical_source_port`` and ``logical_destination_port`` attributes default to ``none``, the ``ethertype`` attribute defaults to ``IPv4``, and all other attributes default to a wildcard value. Operations ~~~~~~~~~~ Create a port chain ------------------- The following example uses the ``openstack`` command-line interface (CLI) to create a port chain consisting of three service function instances to handle HTTP (TCP) traffic flows from 192.0.2.11:1000 to 198.51.100.11:80. * Instance 1 * Name: vm1 * Function: Firewall * Port pair: [p1, p2] * Instance 2 * Name: vm2 * Function: Firewall * Port pair: [p3, p4] * Instance 3 * Name: vm3 * Function: Intrusion detection system (IDS) * Port pair: [p5, p6] .. note:: The example network ``net1`` must exist before creating ports on it. #. Source the credentials of the project that owns the ``net1`` network. #. Create ports on network ``net1`` and record the UUID values. .. code-block:: console $ openstack port create p1 --network net1 $ openstack port create p2 --network net1 $ openstack port create p3 --network net1 $ openstack port create p4 --network net1 $ openstack port create p5 --network net1 $ openstack port create p6 --network net1 #. Launch service function instance ``vm1`` using ports ``p1`` and ``p2``, ``vm2`` using ports ``p3`` and ``p4``, and ``vm3`` using ports ``p5`` and ``p6``. .. code-block:: console $ openstack server create --nic port-id=P1_ID --nic port-id=P2_ID vm1 $ openstack server create --nic port-id=P3_ID --nic port-id=P4_ID vm2 $ openstack server create --nic port-id=P5_ID --nic port-id=P6_ID vm3 Replace ``P1_ID``, ``P2_ID``, ``P3_ID``, ``P4_ID``, ``P5_ID``, and ``P6_ID`` with the UUIDs of the respective ports. .. note:: This command requires additional options to successfully launch an instance. See the `CLI reference `_ for more information. Alternatively, you can launch each instance with one network interface and attach additional ports later. #. Create flow classifier ``FC1`` that matches the appropriate packet headers. .. code-block:: console $ openstack sfc flow classifier create \ --description "HTTP traffic from 192.0.2.11 to 198.51.100.11" \ --ethertype IPv4 \ --source-ip-prefix 192.0.2.11/32 \ --destination-ip-prefix 198.51.100.11/32 \ --protocol tcp \ --source-port 1000:1000 \ --destination-port 80:80 FC1 .. note:: When using the (default) OVS driver, the ``--logical-source-port`` parameter is also required #. Create port pair ``PP1`` with ports ``p1`` and ``p2``, ``PP2`` with ports ``p3`` and ``p4``, and ``PP3`` with ports ``p5`` and ``p6``. .. code-block:: console $ openstack sfc port pair create \ --description "Firewall SF instance 1" \ --ingress p1 \ --egress p2 PP1 $ openstack sfc port pair create \ --description "Firewall SF instance 2" \ --ingress p3 \ --egress p4 PP2 $ openstack sfc port pair create \ --description "IDS SF instance" \ --ingress p5 \ --egress p6 PP3 #. Create port pair group ``PPG1`` with port pair ``PP1`` and ``PP2`` and ``PPG2`` with port pair ``PP3``. .. code-block:: console $ openstack sfc port pair group create \ --port-pair PP1 --port-pair PP2 PPG1 $ openstack sfc port pair group create \ --port-pair PP3 PPG2 .. note:: You can repeat the ``--port-pair`` option for multiple port pairs of functionally equivalent service functions. #. Create port chain ``PC1`` with port pair groups ``PPG1`` and ``PPG2`` and flow classifier ``FC1``. .. code-block:: console $ openstack sfc port chain create \ --port-pair-group PPG1 --port-pair-group PPG2 \ --flow-classifier FC1 PC1 .. note:: You can repeat the ``--port-pair-group`` option to specify additional port pair groups in the port chain. A port chain must contain at least one port pair group. You can repeat the ``--flow-classifier`` option to specify multiple flow classifiers for a port chain. Each flow classifier identifies a flow. Update a port chain or port pair group -------------------------------------- * Use the :command:`openstack sfc port chain set` command to dynamically add or remove port pair groups or flow classifiers on a port chain. * For example, add port pair group ``PPG3`` to port chain ``PC1``: .. code-block:: console $ openstack sfc port chain set \ --port-pair-group PPG1 --port-pair-group PPG2 --port-pair-group PPG3 \ --flow-classifier FC1 PC1 * For example, add flow classifier ``FC2`` to port chain ``PC1``: .. code-block:: console $ openstack sfc port chain set \ --port-pair-group PPG1 --port-pair-group PPG2 \ --flow-classifier FC1 --flow-classifier FC2 PC1 SFC steers traffic matching the additional flow classifier to the port pair groups in the port chain. * Use the :command:`openstack sfc port pair group set` command to perform dynamic scale-out or scale-in operations by adding or removing port pairs on a port pair group. .. code-block:: console $ openstack sfc port pair group set \ --port-pair PP1 --port-pair PP2 --port-pair PP4 PPG1 SFC performs load balancing/distribution over the additional service functions in the port pair group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-sriov.rst0000644000175000017500000006036200000000000023446 0ustar00coreycorey00000000000000.. _config-sriov: ====== SR-IOV ====== The purpose of this page is to describe how to enable SR-IOV functionality available in OpenStack (using OpenStack Networking). This functionality was first introduced in the OpenStack Juno release. This page intends to serve as a guide for how to configure OpenStack Networking and OpenStack Compute to create SR-IOV ports. The basics ~~~~~~~~~~ PCI-SIG Single Root I/O Virtualization and Sharing (SR-IOV) functionality is available in OpenStack since the Juno release. The SR-IOV specification defines a standardized mechanism to virtualize PCIe devices. This mechanism can virtualize a single PCIe Ethernet controller to appear as multiple PCIe devices. Each device can be directly assigned to an instance, bypassing the hypervisor and virtual switch layer. As a result, users are able to achieve low latency and near-line wire speed. The following terms are used throughout this document: .. list-table:: :header-rows: 1 :widths: 10 90 * - Term - Definition * - PF - Physical Function. The physical Ethernet controller that supports SR-IOV. * - VF - Virtual Function. The virtual PCIe device created from a physical Ethernet controller. SR-IOV agent ------------ The SR-IOV agent allows you to set the admin state of ports, configure port security (enable and disable spoof checking), and configure QoS rate limiting and minimum bandwidth. You must include the SR-IOV agent on each compute node using SR-IOV ports. .. note:: The SR-IOV agent was optional before Mitaka, and was not enabled by default before Liberty. .. note:: The ability to control port security and QoS rate limit settings was added in Liberty. Supported Ethernet controllers ------------------------------ The following manufacturers are known to work: - Intel - Mellanox - QLogic - Broadcom For information on **Mellanox SR-IOV Ethernet ConnectX cards**, see: - `Mellanox: How To Configure SR-IOV VFs on ConnectX-4 or newer `_. - `Mellanox: How To Configure SR-IOV VFs on ConnectX-3/ConnectX-3 Pro `_. For information on **QLogic SR-IOV Ethernet cards**, see: - `User's Guide OpenStack Deployment with SR-IOV Configuration `_. For information on **Broadcom NetXtreme-E Series Ethernet cards**, see the `Broadcom NetXtreme-C/NetXtreme-E User Guide `_. For information on **Broadcom NetXtreme-S Series Ethernet cards**, see the `Broadcom NetXtreme-S Product Page `_. Using SR-IOV interfaces ~~~~~~~~~~~~~~~~~~~~~~~ In order to enable SR-IOV, the following steps are required: #. Create Virtual Functions (Compute) #. Whitelist PCI devices in nova-compute (Compute) #. Configure neutron-server (Controller) #. Configure nova-scheduler (Controller) #. Enable neutron sriov-agent (Compute) We recommend using VLAN provider networks for segregation. This way you can combine instances without SR-IOV ports and instances with SR-IOV ports on a single network. .. note:: Throughout this guide, ``eth3`` is used as the PF and ``physnet2`` is used as the provider network configured as a VLAN range. These ports may vary in different environments. Create Virtual Functions (Compute) ---------------------------------- Create the VFs for the network interface that will be used for SR-IOV. We use ``eth3`` as PF, which is also used as the interface for the VLAN provider network and has access to the private networks of all machines. .. note:: The steps detail how to create VFs using Mellanox ConnectX-4 and newer/Intel SR-IOV Ethernet cards on an Intel system. Steps may differ for different hardware configurations. #. Ensure SR-IOV and VT-d are enabled in BIOS. #. Enable IOMMU in Linux by adding ``intel_iommu=on`` to the kernel parameters, for example, using GRUB. #. On each compute node, create the VFs via the PCI SYS interface: .. code-block:: console # echo '8' > /sys/class/net/eth3/device/sriov_numvfs .. note:: On some PCI devices, observe that when changing the amount of VFs you receive the error ``Device or resource busy``. In this case, you must first set ``sriov_numvfs`` to ``0``, then set it to your new value. .. note:: A network interface could be used both for PCI passthrough, using the PF, and SR-IOV, using the VFs. If the PF is used, the VF number stored in the ``sriov_numvfs`` file is lost. If the PF is attached again to the operating system, the number of VFs assigned to this interface will be zero. To keep the number of VFs always assigned to this interface, modify the interfaces configuration file adding an ``ifup`` script command. On Ubuntu, modify the ``/etc/network/interfaces`` file: .. code-block:: ini auto eth3 iface eth3 inet dhcp pre-up echo '4' > /sys/class/net/eth3/device/sriov_numvfs On RHEL and derivatives, modify the ``/sbin/ifup-local`` file: .. code-block:: bash #!/bin/sh if [[ "$1" == "eth3" ]] then echo '4' > /sys/class/net/eth3/device/sriov_numvfs fi .. warning:: Alternatively, you can create VFs by passing the ``max_vfs`` to the kernel module of your network interface. However, the ``max_vfs`` parameter has been deprecated, so the PCI SYS interface is the preferred method. You can determine the maximum number of VFs a PF can support: .. code-block:: console # cat /sys/class/net/eth3/device/sriov_totalvfs 63 #. Verify that the VFs have been created and are in ``up`` state. For example: .. code-block:: console # lspci | grep Ethernet 82:00.0 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) 82:00.1 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) 82:10.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:10.2 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:10.4 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:10.6 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:11.0 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:11.2 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:11.4 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) 82:11.6 Ethernet controller: Intel Corporation 82599 Ethernet Controller Virtual Function (rev 01) .. code-block:: console # ip link show eth3 8: eth3: mtu 1500 qdisc mq state UP mode DEFAULT qlen 1000 link/ether a0:36:9f:8f:3f:b8 brd ff:ff:ff:ff:ff:ff vf 0 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 1 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 2 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 3 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 4 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 5 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 6 MAC 00:00:00:00:00:00, spoof checking on, link-state auto vf 7 MAC 00:00:00:00:00:00, spoof checking on, link-state auto If the interfaces are down, set them to ``up`` before launching a guest, otherwise the instance will fail to spawn: .. code-block:: console # ip link set eth3 up #. Persist created VFs on reboot: .. code-block:: console # echo "echo '7' > /sys/class/net/eth3/device/sriov_numvfs" >> /etc/rc.local .. note:: The suggested way of making PCI SYS settings persistent is through the ``sysfsutils`` tool. However, this is not available by default on many major distributions. Whitelist PCI devices nova-compute (Compute) -------------------------------------------- #. Configure which PCI devices the ``nova-compute`` service may use. Edit the ``nova.conf`` file: .. code-block:: ini [pci] passthrough_whitelist = { "devname": "eth3", "physical_network": "physnet2"} This tells the Compute service that all VFs belonging to ``eth3`` are allowed to be passed through to instances and belong to the provider network ``physnet2``. Alternatively the ``[pci] passthrough_whitelist`` parameter also supports whitelisting by: - PCI address: The address uses the same syntax as in ``lspci`` and an asterisk (``*``) can be used to match anything. .. code-block:: ini [pci] passthrough_whitelist = { "address": "[[[[]:]]:][][.[]]", "physical_network": "physnet2" } For example, to match any domain, bus ``0a``, slot ``00``, and all functions: .. code-block:: ini [pci] passthrough_whitelist = { "address": "*:0a:00.*", "physical_network": "physnet2" } - PCI ``vendor_id`` and ``product_id`` as displayed by the Linux utility ``lspci``. .. code-block:: ini [pci] passthrough_whitelist = { "vendor_id": "", "product_id": "", "physical_network": "physnet2" } If the device defined by the PCI address or ``devname`` corresponds to an SR-IOV PF, all VFs under the PF will match the entry. Multiple ``[pci] passthrough_whitelist`` entries per host are supported. In order to enable SR-IOV to request "trusted mode", the ``[pci] passthrough_whitelist`` parameter also supports a ``trusted`` tag. .. note:: This capability is only supported starting with version 18.0.0 (Rocky) release of the compute service configured to use the libvirt driver. .. important:: There are security implications of enabling trusted ports. The trusted VFs can be set into VF promiscuous mode which will enable it to receive unmatched and multicast traffic sent to the physical function. For example, to allow users to request SR-IOV devices with trusted capabilities on device ``eth3``: .. code-block:: ini [pci] passthrough_whitelist = { "devname": "eth3", "physical_network": "physnet2", "trusted":"true" } The ports will have to be created with a binding profile to match the ``trusted`` tag, see `Launching instances with SR-IOV ports`_. #. Restart the ``nova-compute`` service for the changes to go into effect. .. _configure_sriov_neutron_server: Configure neutron-server (Controller) ------------------------------------- #. Add ``sriovnicswitch`` as mechanism driver. Edit the ``ml2_conf.ini`` file on each controller: .. code-block:: ini [ml2] mechanism_drivers = openvswitch,sriovnicswitch #. Ensure your physnet is configured for the chosen network type. Edit the ``ml2_conf.ini`` file on each controller: .. code-block:: ini [ml2_type_vlan] network_vlan_ranges = physnet2 #. Add the ``plugin.ini`` file as a parameter to the ``neutron-server`` service. Edit the appropriate initialization script to configure the ``neutron-server`` service to load the plugin configuration file: .. code-block:: bash --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini #. Restart the ``neutron-server`` service. Configure nova-scheduler (Controller) ------------------------------------- #. On every controller node running the ``nova-scheduler`` service, add ``PciPassthroughFilter`` to ``[filter_scheduler] enabled_filters`` to enable this filter. Ensure ``[filter_scheduler] available_filters`` is set to the default of ``nova.scheduler.filters.all_filters``: .. code-block:: ini [filter_scheduler] enabled_filters = AvailabilityZoneFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, PciPassthroughFilter available_filters = nova.scheduler.filters.all_filters #. Restart the ``nova-scheduler`` service. Enable neutron-sriov-nic-agent (Compute) ---------------------------------------- #. Install the SR-IOV agent, if necessary. #. Edit the ``sriov_agent.ini`` file on each compute node. For example: .. code-block:: ini [securitygroup] firewall_driver = neutron.agent.firewall.NoopFirewallDriver [sriov_nic] physical_device_mappings = physnet2:eth3 exclude_devices = .. note:: The ``physical_device_mappings`` parameter is not limited to be a 1-1 mapping between physical networks and NICs. This enables you to map the same physical network to more than one NIC. For example, if ``physnet2`` is connected to ``eth3`` and ``eth4``, then ``physnet2:eth3,physnet2:eth4`` is a valid option. The ``exclude_devices`` parameter is empty, therefore, all the VFs associated with eth3 may be configured by the agent. To exclude specific VFs, add them to the ``exclude_devices`` parameter as follows: .. code-block:: ini exclude_devices = eth1:0000:07:00.2;0000:07:00.3,eth2:0000:05:00.1;0000:05:00.2 #. Ensure the SR-IOV agent runs successfully: .. code-block:: console # neutron-sriov-nic-agent \ --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/sriov_agent.ini #. Enable the neutron SR-IOV agent service. If installing from source, you must configure a daemon file for the init system manually. (Optional) FDB L2 agent extension ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Forwarding DataBase (FDB) population is an L2 agent extension to OVS agent or Linux bridge. Its objective is to update the FDB table for existing instance using normal port. This enables communication between SR-IOV instances and normal instances. The use cases of the FDB population extension are: * Direct port and normal port instances reside on the same compute node. * Direct port instance that uses floating IP address and network node are located on the same host. For additional information describing the problem, refer to: `Virtual switching technologies and Linux bridge. `_ #. Edit the ``ovs_agent.ini`` or ``linuxbridge_agent.ini`` file on each compute node. For example: .. code-block:: console [agent] extensions = fdb #. Add the FDB section and the ``shared_physical_device_mappings`` parameter. This parameter maps each physical port to its physical network name. Each physical network can be mapped to several ports: .. code-block:: console [FDB] shared_physical_device_mappings = physnet1:p1p1, physnet1:p1p2 Launching instances with SR-IOV ports ------------------------------------- Once configuration is complete, you can launch instances with SR-IOV ports. #. If it does not already exist, create a network and subnet for the chosen physnet. This is the network to which SR-IOV ports will be attached. For example: .. code-block:: console $ openstack network create --provider-physical-network physnet2 \ --provider-network-type vlan --provider-segment 1000 \ sriov-net $ openstack subnet create --network sriov-net \ --subnet-pool shared-default-subnetpool-v4 \ sriov-subnet #. Get the ``id`` of the network where you want the SR-IOV port to be created: .. code-block:: console $ net_id=$(openstack network show sriov-net -c id -f value) #. Create the SR-IOV port. ``vnic-type=direct`` is used here, but other options include ``normal``, ``direct-physical``, and ``macvtap``: .. code-block:: console $ openstack port create --network $net_id --vnic-type direct \ sriov-port Alternatively, to request that the SR-IOV port accept trusted capabilities, the binding profile should be enhanced with the ``trusted`` tag. .. code-block:: console $ openstack port create --network $net_id --vnic-type direct \ --binding-profile trusted=true \ sriov-port #. Get the ``id`` of the created port: .. code-block:: console $ port_id=$(openstack port show sriov-port -c id -f value) #. Create the instance. Specify the SR-IOV port created in step two for the NIC: .. code-block:: console $ openstack server create --flavor m1.large --image ubuntu_18.04 \ --nic port-id=$port_id \ test-sriov .. note:: There are two ways to attach VFs to an instance. You can create an SR-IOV port or use the ``pci_alias`` in the Compute service. For more information about using ``pci_alias``, refer to `nova-api configuration`__. __ https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#configure-nova-api-controller SR-IOV with ConnectX-3/ConnectX-3 Pro Dual Port Ethernet ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In contrast to Mellanox newer generation NICs, ConnectX-3 family network adapters expose a single PCI device (PF) in the system regardless of the number of physical ports. When the device is **dual port** and SR-IOV is enabled and configured we can observe some inconsistencies in linux networking subsystem. .. note:: In the example below ``enp4s0`` represents PF net device associated with physical port 1 and ``enp4s0d1`` represents PF net device associated with physical port 2. **Example:** A system with ConnectX-3 dual port device and a total of four VFs configured, two VFs assigned to port one and two VFs assigned to port two. .. code-block:: console $ lspci | grep Mellanox 04:00.0 Network controller: Mellanox Technologies MT27520 Family [ConnectX-3 Pro] 04:00.1 Network controller: Mellanox Technologies MT27500/MT27520 Family [ConnectX-3/ConnectX-3 Pro Virtual Function] 04:00.2 Network controller: Mellanox Technologies MT27500/MT27520 Family [ConnectX-3/ConnectX-3 Pro Virtual Function] 04:00.3 Network controller: Mellanox Technologies MT27500/MT27520 Family [ConnectX-3/ConnectX-3 Pro Virtual Function] 04:00.4 Network controller: Mellanox Technologies MT27500/MT27520 Family [ConnectX-3/ConnectX-3 Pro Virtual Function] Four VFs are available in the system, however, .. code-block:: console $ ip link show 31: enp4s0: mtu 1500 qdisc noop master ovs-system state DOWN mode DEFAULT group default qlen 1000 link/ether f4:52:14:01:d9:e1 brd ff:ff:ff:ff:ff:ff vf 0 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 1 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 2 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 3 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto 32: enp4s0d1: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether f4:52:14:01:d9:e2 brd ff:ff:ff:ff:ff:ff vf 0 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 1 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 2 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto vf 3 MAC 00:00:00:00:00:00, vlan 4095, spoof checking off, link-state auto **ip** command identifies each PF associated net device as having four VFs *each*. .. note:: Mellanox ``mlx4`` driver allows *ip* commands to perform configuration of *all* VFs from either PF associated network devices. To allow neutron SR-IOV agent to properly identify the VFs that belong to the correct PF network device (thus to the correct network port) Admin is required to provide the ``exclude_devices`` configuration option in ``sriov_agent.ini`` **Step 1**: derive the VF to Port mapping from mlx4 driver configuration file: ``/etc/modprobe.d/mlnx.conf`` or ``/etc/modprobe.d/mlx4.conf`` .. code-block:: console $ cat /etc/modprobe.d/mlnx.conf | grep "options mlx4_core" options mlx4_core port_type_array=2,2 num_vfs=2,2,0 probe_vf=2,2,0 log_num_mgm_entry_size=-1 Where: ``num_vfs=n1,n2,n3`` - The driver will enable ``n1`` VFs on physical port 1, ``n2`` VFs on physical port 2 and ``n3`` dual port VFs (applies only to dual port HCA when all ports are Ethernet ports). ``probe_vfs=m1,m2,m3`` - the driver probes ``m1`` single port VFs on physical port 1, ``m2`` single port VFs on physical port 2 (applies only if such a port exist) ``m3`` dual port VFs. Those VFs are attached to the hypervisor. (applies only if all ports are configured as Ethernet). The VFs will be enumerated in the following order: 1. port 1 VFs 2. port 2 VFs 3. dual port VFs In our example: | 04:00.0 : PF associated to **both** ports. | 04:00.1 : VF associated to port **1** | 04:00.2 : VF associated to port **1** | 04:00.3 : VF associated to port **2** | 04:00.4 : VF associated to port **2** **Step 2:** Update ``exclude_devices`` configuration option in ``sriov_agent.ini`` with the correct mapping Each PF associated net device shall exclude the **other** port's VFs .. code-block:: ini [sriov_nic] physical_device_mappings = physnet1:enp4s0,physnet2:enp4s0d1 exclude_devices = enp4s0:0000:04:00.3;0000:04:00.4,enp4s0d1:0000:04:00.1;0000:04:00.2 SR-IOV with InfiniBand ~~~~~~~~~~~~~~~~~~~~~~ The support for SR-IOV with InfiniBand allows a Virtual PCI device (VF) to be directly mapped to the guest, allowing higher performance and advanced features such as RDMA (remote direct memory access). To use this feature, you must: #. Use InfiniBand enabled network adapters. #. Run InfiniBand subnet managers to enable InfiniBand fabric. All InfiniBand networks must have a subnet manager running for the network to function. This is true even when doing a simple network of two machines with no switch and the cards are plugged in back-to-back. A subnet manager is required for the link on the cards to come up. It is possible to have more than one subnet manager. In this case, one of them will act as the master, and any other will act as a slave that will take over when the master subnet manager fails. #. Install the ``ebrctl`` utility on the compute nodes. Check that ``ebrctl`` is listed somewhere in ``/etc/nova/rootwrap.d/*``: .. code-block:: console $ grep 'ebrctl' /etc/nova/rootwrap.d/* If ``ebrctl`` does not appear in any of the rootwrap files, add this to the ``/etc/nova/rootwrap.d/compute.filters`` file in the ``[Filters]`` section. .. code-block:: ini [Filters] ebrctl: CommandFilter, ebrctl, root Known limitations ~~~~~~~~~~~~~~~~~ * When using Quality of Service (QoS), ``max_burst_kbps`` (burst over ``max_kbps``) is not supported. In addition, ``max_kbps`` is rounded to Mbps. * Security groups are not supported when using SR-IOV, thus, the firewall driver must be disabled. This can be done in the ``neutron.conf`` file. .. code-block:: ini [securitygroup] firewall_driver = neutron.agent.firewall.NoopFirewallDriver * SR-IOV is not integrated into the OpenStack Dashboard (horizon). Users must use the CLI or API to configure SR-IOV interfaces. * Live migration support has been added to the Libvirt Nova virt-driver in the Train release for instances with neutron SR-IOV ports. Indirect mode SR-IOV interfaces (vnic-type: macvtap or virtio-forwarder) can now be migrated transparently to the guest. Direct mode SR-IOV interfaces (vnic-type: direct or direct-physical) are detached before the migration and reattached after the migration so this is not transparent to the guest. To avoid loss of network connectivy when live migrating with direct mode sriov the user should create a failover bond in the guest with a transparently live migration port type e.g. vnic-type normal or indirect mode SR-IOV. .. note:: SR-IOV features may require a specific NIC driver version, depending on the vendor. Intel NICs, for example, require ixgbe version 4.4.6 or greater, and ixgbevf version 3.2.2 or greater. * Attaching SR-IOV ports to existing servers is not currently supported, see `bug 1708433 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-subnet-onboard.rst0000644000175000017500000002327700000000000025232 0ustar00coreycorey00000000000000.. _config-subnet-onboard: ============== Subnet onboard ============== The subnet onboard feature allows you to take existing subnets that have been created outside of a subnet pool and move them into an existing subnet pool. This enables you to begin using subnet pools and address scopes if you haven't allocated existing subnets from subnet pools. It also allows you to move individual subnets between subnet pools, and by extension, move them between address scopes. How it works ~~~~~~~~~~~~ One of the fundamental constraints of subnet pools is that all subnets of the same address family (IPv4, IPv6) on a network must be allocated from the same subnet pool. Because of this constraint, subnets must be moved, or "onboarded", into a subnet pool as a group at the network level rather than being handled individually. As such, the onboarding of subnets requires users to supply the UUID of the network the subnet(s) to onboard are associated with, and the UUID of the target subnet pool to perform the operation. Does my environment support subnet onboard? ------------------------------------------- To test that subnet onboard is supported in your environment, execute the following command: .. code-block:: console $ openstack extension list --network -c Alias -c Description | grep subnet_onboard | subnet_onboard | Provides support for onboarding subnets into subnet pools Support for subnet onboard exists in the ML2 plugin as of the Stein release. If you require subnet onboard but your current environment does not support it, consider upgrading to a release that supports subnet onboard. When using third-party plugins with neutron, check with the supplier of the plugin regarding support for subnet onboard. Demo ---- Suppose an administrator has an existing provider network in their environment that was created without allocating its subnets from a subnet pool. .. code-block:: console $ openstack network list +--------------------------------------+----------------+--------------------------------------+ | ID | Name | Subnets | +--------------------------------------+----------------+--------------------------------------+ | f643a4f5-f8d3-4325-b1fe-6061a9af0f07 | provider-net-1 | 5153cab7-7ab6-4956-8466-39aa85dccc9a | +--------------------------------------+----------------+--------------------------------------+ $ openstack subnet show 5153cab7-7ab6-4956-8466-39aa85dccc9a +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 192.168.0.2-192.168.7.254 | | cidr | 192.168.0.0/21 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.168.0.1 | | host_routes | | | id | 5153cab7-7ab6-4956-8466-39aa85dccc9a | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | network_id | f643a4f5-f8d3-4325-b1fe-6061a9af0f07 | | prefix_length | None | | project_id | 7b80998e5e044cee91c1cdb2e9c63afd | | revision_number | 0 | | segment_id | None | | service_types | | | subnetpool_id | None | | tags | | | updated_at | 2019-03-13T18:24:37Z | +-------------------+--------------------------------------+ The administrator has created a subnet pool named ``routable-prefixes`` and wants to onboard the subnets associated with network ``provider-net-1``. The administrator now wants to manage the address space for provider networks using a subnet pool, but doesn't have the prefixes used by these provider networks under the management of a subnet pool or address scope. .. code-block:: console $ openstack subnet pool list +--------------------------------------+-------------------+--------------+ | ID | Name | Prefixes | +--------------------------------------+-------------------+--------------+ | d05e9f61-248c-43f2-98f4-5142570127e1 | routable-prefixes | 10.10.0.0/16 | +--------------------------------------+-------------------+--------------+ .. code-block:: console $ openstack subnet pool show routable-prefixes +-------------------+--------------------------------+ | Field | Value | +-------------------+--------------------------------+ | address_scope_id | None | | created_at | 2019-03-102T05:45:01Z | | default_prefixlen | 26 | | default_quota | None | | description | Routable prefixes for projects | | headers | | | id | d3aefb76-2527-43d4-bc21-0ec253 | | | 908545 | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | routable-prefixes | | prefixes | 10.10.0.0/16 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d | | | 7c | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2019-03-10T05:45:01Z | +-------------------+--------------------------------+ The administrator can use the following command to bring these subnets under the management of a subnet pool: .. code-block:: console $ openstack network onboard subnets provider-net-1 routable-prefixes The subnets on ``provider-net-1`` should now all have their subnetpool_id updated to match the UUID of the ``routable-prefixes`` subnet pool: .. code-block:: console $ openstack subnet show 5153cab7-7ab6-4956-8466-39aa85dccc9a +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 192.168.0.2-192.168.7.254 | | cidr | 192.168.0.0/21 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.168.0.1 | | host_routes | | | id | 5153cab7-7ab6-4956-8466-39aa85dccc9a | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | network_id | f643a4f5-f8d3-4325-b1fe-6061a9af0f07 | | prefix_length | None | | project_id | 7b80998e5e044cee91c1cdb2e9c63afd | | revision_number | 0 | | segment_id | None | | service_types | | | subnetpool_id | d3aefb76-2527-43d4-bc21-0ec253908545 | | updated_at | 2019-03-13T18:24:37Z | +-------------------+--------------------------------------+ The subnet pool will also now show the onboarded prefix(es) in its prefix list: .. code-block:: console $ openstack subnet pool show routable-prefixes +-------------------+--------------------------------+ | Field | Value | +-------------------+--------------------------------+ | address_scope_id | None | | created_at | 2019-03-102T05:45:01Z | | default_prefixlen | 26 | | default_quota | None | | description | Routable prefixes for projects | | headers | | | id | d3aefb76-2527-43d4-bc21-0ec253 | | | 908545 | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | routable-prefixes | | prefixes | 10.10.0.0/16, 192.168.0.0/21 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d | | | 7c | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2019-03-12T13:11:037Z | +-------------------+--------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-subnet-pools.rst0000644000175000017500000003063000000000000024731 0ustar00coreycorey00000000000000.. _config-subnet-pools: ============ Subnet pools ============ Subnet pools have been made available since the Kilo release. It is a simple feature that has the potential to improve your workflow considerably. It also provides a building block from which other new features will be built in to OpenStack Networking. To see if your cloud has this feature available, you can check that it is listed in the supported aliases. You can do this with the OpenStack client. .. code-block:: console $ openstack extension list | grep subnet_allocation | Subnet Allocation | subnet_allocation | Enables allocation of subnets from a subnet pool | Why you need them ~~~~~~~~~~~~~~~~~ Before Kilo, Networking had no automation around the addresses used to create a subnet. To create one, you had to come up with the addresses on your own without any help from the system. There are valid use cases for this but if you are interested in the following capabilities, then subnet pools might be for you. First, would not it be nice if you could turn your pool of addresses over to Neutron to take care of? When you need to create a subnet, you just ask for addresses to be allocated from the pool. You do not have to worry about what you have already used and what addresses are in your pool. Subnet pools can do this. Second, subnet pools can manage addresses across projects. The addresses are guaranteed not to overlap. If the addresses come from an externally routable pool then you know that all of the projects have addresses which are *routable* and unique. This can be useful in the following scenarios. #. IPv6 since OpenStack Networking has no IPv6 floating IPs. #. Routing directly to a project network from an external network. How they work ~~~~~~~~~~~~~ A subnet pool manages a pool of addresses from which subnets can be allocated. It ensures that there is no overlap between any two subnets allocated from the same pool. As a regular project in an OpenStack cloud, you can create a subnet pool of your own and use it to manage your own pool of addresses. This does not require any admin privileges. Your pool will not be visible to any other project. If you are an admin, you can create a pool which can be accessed by any regular project. Being a shared resource, there is a quota mechanism to arbitrate access. Quotas ~~~~~~ Subnet pools have a quota system which is a little bit different than other quotas in Neutron. Other quotas in Neutron count discrete instances of an object against a quota. Each time you create something like a router, network, or a port, it uses one from your total quota. With subnets, the resource is the IP address space. Some subnets take more of it than others. For example, 203.0.113.0/24 uses 256 addresses in one subnet but 198.51.100.224/28 uses only 16. If address space is limited, the quota system can encourage efficient use of the space. With IPv4, the default_quota can be set to the number of absolute addresses any given project is allowed to consume from the pool. For example, with a quota of 128, I might get 203.0.113.128/26, 203.0.113.224/28, and still have room to allocate 48 more addresses in the future. With IPv6 it is a little different. It is not practical to count individual addresses. To avoid ridiculously large numbers, the quota is expressed in the number of /64 subnets which can be allocated. For example, with a default_quota of 3, I might get 2001:db8:c18e:c05a::/64, 2001:db8:221c:8ef3::/64, and still have room to allocate one more prefix in the future. Default subnet pools ~~~~~~~~~~~~~~~~~~~~ Beginning with Mitaka, a subnet pool can be marked as the default. This is handled with a new extension. .. code-block:: console $ openstack extension list | grep default-subnetpools | Default Subnetpools | default-subnetpools | Provides ability to mark and use a subnetpool as the default | An administrator can mark a pool as default. Only one pool from each address family can be marked default. .. code-block:: console $ openstack subnet pool set --default 74348864-f8bf-4fc0-ab03-81229d189467 If there is a default, it can be requested by passing ``--use-default-subnetpool`` instead of ``--subnet-pool SUBNETPOOL``. Demo ---- If you have access to an OpenStack Kilo or later based neutron, you can play with this feature now. Give it a try. All of the following commands work equally as well with IPv6 addresses. First, as admin, create a shared subnet pool: .. code-block:: console $ openstack subnet pool create --share --pool-prefix 203.0.113.0/24 \ --default-prefix-length 26 demo-subnetpool4 +-------------------+--------------------------------+ | Field | Value | +-------------------+--------------------------------+ | address_scope_id | None | | created_at | 2016-12-14T07:21:26Z | | default_prefixlen | 26 | | default_quota | None | | description | | | headers | | | id | d3aefb76-2527-43d4-bc21-0ec253 | | | 908545 | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | demo-subnetpool4 | | prefixes | 203.0.113.0/24 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d | | | 7c | | revision_number | 1 | | shared | True | | tags | [] | | updated_at | 2016-12-14T07:21:26Z | +-------------------+--------------------------------+ The ``default_prefix_length`` defines the subnet size you will get if you do not specify ``--prefix-length`` when creating a subnet. Do essentially the same thing for IPv6 and there are now two subnet pools. Regular projects can see them. (the output is trimmed a bit for display) .. code-block:: console $ openstack subnet pool list +------------------+------------------+--------------------+ | ID | Name | Prefixes | +------------------+------------------+--------------------+ | 2b7cc19f-0114-4e | demo-subnetpool | 2001:db8:a583::/48 | | f4-ad86-c1bb91fc | | | | d1f9 | | | | d3aefb76-2527-43 | demo-subnetpool4 | 203.0.113.0/24 | | d4-bc21-0ec25390 | | | | 8545 | | | +------------------+------------------+--------------------+ Now, use them. It is easy to create a subnet from a pool: .. code-block:: console $ openstack subnet create --ip-version 4 --subnet-pool \ demo-subnetpool4 --network demo-network1 demo-subnet1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.194-203.0.113.254 | | cidr | 203.0.113.192/26 | | created_at | 2016-12-14T07:33:13Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 203.0.113.193 | | headers | | | host_routes | | | id | 8d4fbae3-076c-4c08-b2dd-2d6175115a5e | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | demo-subnet1 | | network_id | 6b377f77-ce00-4ff6-8676-82343817470d | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | revision_number | 2 | | service_types | | | subnetpool_id | d3aefb76-2527-43d4-bc21-0ec253908545 | | tags | [] | | updated_at | 2016-12-14T07:33:13Z | +-------------------+--------------------------------------+ You can request a specific subnet from the pool. You need to specify a subnet that falls within the pool's prefixes. If the subnet is not already allocated, the request succeeds. You can leave off the IP version because it is deduced from the subnet pool. .. code-block:: console $ openstack subnet create --subnet-pool demo-subnetpool4 \ --network demo-network1 --subnet-range 203.0.113.128/26 subnet2 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.130-203.0.113.190 | | cidr | 203.0.113.128/26 | | created_at | 2016-12-14T07:27:40Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 203.0.113.129 | | headers | | | host_routes | | | id | d32814e3-cf46-4371-80dd-498a80badfba | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | subnet2 | | network_id | 6b377f77-ce00-4ff6-8676-82343817470d | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | revision_number | 2 | | service_types | | | subnetpool_id | d3aefb76-2527-43d4-bc21-0ec253908545 | | tags | [] | | updated_at | 2016-12-14T07:27:40Z | +-------------------+--------------------------------------+ If the pool becomes exhausted, load some more prefixes: .. code-block:: console $ openstack subnet pool set --pool-prefix \ 198.51.100.0/24 demo-subnetpool4 $ openstack subnet pool show demo-subnetpool4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | address_scope_id | None | | created_at | 2016-12-14T07:21:26Z | | default_prefixlen | 26 | | default_quota | None | | description | | | id | d3aefb76-2527-43d4-bc21-0ec253908545 | | ip_version | 4 | | is_default | False | | max_prefixlen | 32 | | min_prefixlen | 8 | | name | demo-subnetpool4 | | prefixes | 198.51.100.0/24, 203.0.113.0/24 | | project_id | cfd1889ac7d64ad891d4f20aef9f8d7c | | revision_number | 2 | | shared | True | | tags | [] | | updated_at | 2016-12-14T07:30:32Z | +-------------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-trunking.rst0000644000175000017500000004176300000000000024151 0ustar00coreycorey00000000000000.. _config-trunking: ======== Trunking ======== The network trunk service allows multiple networks to be connected to an instance using a single virtual NIC (vNIC). Multiple networks can be presented to an instance by connecting it to a single port. Operation ~~~~~~~~~ Network trunking consists of a service plug-in and a set of drivers that manage trunks on different layer-2 mechanism drivers. Users can create a port, associate it with a trunk, and launch an instance on that port. Users can dynamically attach and detach additional networks without disrupting operation of the instance. Every trunk has a parent port and can have any number of subports. The parent port is the port that the trunk is associated with. Users create instances and specify the parent port of the trunk when launching instances attached to a trunk. The network presented by the subport is the network of the associated port. When creating a subport, a ``segmentation-id`` may be required by the driver. ``segmentation-id`` defines the segmentation ID on which the subport network is presented to the instance. ``segmentation-type`` may be required by certain drivers like OVS. At this time the following ``segmentation-type`` values are supported: * ``vlan`` uses VLAN for segmentation. * ``inherit`` uses the ``segmentation-type`` from the network the subport is connected to if no ``segmentation-type`` is specified for the subport. Note that using the ``inherit`` type requires the ``provider`` extension to be enabled and only works when the connected network's ``segmentation-type`` is ``vlan``. .. note:: The ``segmentation-type`` and ``segmentation-id`` parameters are optional in the Networking API. However, all drivers as of the Newton release require both to be provided when adding a subport to a trunk. Future drivers may be implemented without this requirement. The ``segmentation-type`` and ``segmentation-id`` specified by the user on the subports is intentionally decoupled from the ``segmentation-type`` and ID of the networks. For example, it is possible to configure the Networking service with ``tenant_network_types = vxlan`` and still create subports with ``segmentation_type = vlan``. The Networking service performs remapping as necessary. Example configuration ~~~~~~~~~~~~~~~~~~~~~ The ML2 plug-in supports trunking with the following mechanism drivers: * Open vSwitch (OVS) * Linux bridge * Open Virtual Network (OVN) When using a ``segmentation-type`` of ``vlan``, the OVS and Linux bridge drivers present the network of the parent port as the untagged VLAN and all subports as tagged VLANs. Controller node --------------- * In the ``neutron.conf`` file, enable the trunk service plug-in: .. code-block:: ini [DEFAULT] service_plugins = trunk Verify service operation ------------------------ #. Source the administrative project credentials and list the enabled extensions. #. Use the command :command:`openstack extension list --network` to verify that the ``Trunk Extension`` and ``Trunk port details`` extensions are enabled. Workflow -------- At a high level, the basic steps to launching an instance on a trunk are the following: #. Create networks and subnets for the trunk and subports #. Create the trunk #. Add subports to the trunk #. Launch an instance on the trunk Create networks and subnets for the trunk and subports ------------------------------------------------------ Create the appropriate networks for the trunk and subports that will be added to the trunk. Create subnets on these networks to ensure the desired layer-3 connectivity over the trunk. Create the trunk ---------------- * Create a parent port for the trunk. .. code-block:: console $ openstack port create --network project-net-A trunk-parent +-------------------+-------------------------------------------------------------------------+ | Field | Value | +-------------------+-------------------------------------------------------------------------+ | admin_state_up | UP | | binding_vif_type | unbound | | binding_vnic_type | normal | | fixed_ips | ip_address='192.0.2.7',subnet_id='8b957198-d3cf-4953-8449-ad4e4dd712cc' | | id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | mac_address | fa:16:3e:dd:c4:d1 | | name | trunk-parent | | network_id | 1b47d3e7-cda5-48e4-b0c8-d20bd7e35f55 | | revision_number | 1 | | tags | [] | +-------------------+-------------------------------------------------------------------------+ * Create the trunk using ``--parent-port`` to reference the port from the previous step: .. code-block:: console $ openstack network trunk create --parent-port trunk-parent trunk1 +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | admin_state_up | UP | | id | fdf02fcb-1844-45f1-9d9b-e4c2f522c164 | | name | trunk1 | | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | revision_number | 1 | | sub_ports | | +-----------------+--------------------------------------+ Add subports to the trunk ------------------------- Subports can be added to a trunk in two ways: creating the trunk with subports or adding subports to an existing trunk. * Create trunk with subports: This method entails creating the trunk with subports specified at trunk creation. .. code-block:: console $ openstack port create --network project-net-A trunk-parent +-------------------+-------------------------------------------------------------------------+ | Field | Value | +-------------------+-------------------------------------------------------------------------+ | admin_state_up | UP | | binding_vif_type | unbound | | binding_vnic_type | normal | | fixed_ips | ip_address='192.0.2.7',subnet_id='8b957198-d3cf-4953-8449-ad4e4dd712cc' | | id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | mac_address | fa:16:3e:dd:c4:d1 | | name | trunk-parent | | network_id | 1b47d3e7-cda5-48e4-b0c8-d20bd7e35f55 | | revision_number | 1 | | tags | [] | +-------------------+-------------------------------------------------------------------------+ $ openstack port create --network trunked-net subport1 +-------------------+----------------------------------------------------------------------------+ | Field | Value | +-------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | binding_vif_type | unbound | | binding_vnic_type | normal | | fixed_ips | ip_address='198.51.100.8',subnet_id='2a860e2c-922b-437b-a149-b269a8c9b120' | | id | 91f9dde8-80a4-4506-b5da-c287feb8f5d8 | | mac_address | fa:16:3e:ba:f0:4d | | name | subport1 | | network_id | aef78ec5-16e3-4445-b82d-b2b98c6a86d9 | | revision_number | 1 | | tags | [] | +-------------------+----------------------------------------------------------------------------+ $ openstack network trunk create \ --parent-port trunk-parent \ --subport port=subport1,segmentation-type=vlan,segmentation-id=100 \ trunk1 +----------------+-------------------------------------------------------------------------------------------------+ | Field | Value | +----------------+-------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 | | name | trunk1 | | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | revision_number| 1 | | sub_ports | port_id='73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38', segmentation_id='100', segmentation_type='vlan' | | tags | [] | +----------------+-------------------------------------------------------------------------------------------------+ * Add subports to an existing trunk: This method entails creating a trunk, then adding subports to the trunk after it has already been created. .. code-block:: console $ openstack network trunk set --subport \ port=subport1,segmentation-type=vlan,segmentation-id=100 \ trunk1 .. note:: The command provides no output. .. code-block:: console $ openstack network trunk show trunk1 +----------------+-------------------------------------------------------------------------------------------------+ | Field | Value | +----------------+-------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 | | name | trunk1 | | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | revision_number| 1 | | sub_ports | port_id='73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38', segmentation_id='100', segmentation_type='vlan' | | tags | [] | +----------------+-------------------------------------------------------------------------------------------------+ * When using the OVN driver, additional logical switch port information is available using the following commands: .. code-block:: console $ ovn-nbctl lsp-get-parent 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 $ ovn-nbctl lsp-get-tag 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 Launch an instance on the trunk ------------------------------- * Show trunk details to get the ``port_id`` of the trunk. .. code-block:: console $ openstack network trunk show trunk1 +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | admin_state_up | UP | | id | 61d8e620-fe3a-4d8f-b9e6-e1b0dea6d9e3 | | name | trunk | | port_id | 73fb9d54-43a7-4bb1-a8dc-569e0e0a0a38 | | revision_number| 1 | | sub_ports | | | tags | [] | +----------------+--------------------------------------+ * Launch the instance by specifying ``port-id`` using the value of ``port_id`` from the trunk details. Launching an instance on a subport is not supported. Using trunks and subports inside an instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When configuring instances to use a subport, ensure that the interface on the instance is set to use the MAC address assigned to the port by the Networking service. Instances are not made aware of changes made to the trunk after they are active. For example, when a subport with a ``segmentation-type`` of ``vlan`` is added to a trunk, any operations specific to the instance operating system that allow the instance to send and receive traffic on the new VLAN must be handled outside of the Networking service. When creating subports, the MAC address of the trunk parent port can be set on the subport. This will allow VLAN subinterfaces inside an instance launched on a trunk to be configured without explicitly setting a MAC address. Although unique MAC addresses can be used for subports, this can present issues with ARP spoof protections and the native OVS firewall driver. If the native OVS firewall driver is to be used, we recommend that the MAC address of the parent port be re-used on all subports. Trunk states ~~~~~~~~~~~~ * ``ACTIVE`` The trunk is ``ACTIVE`` when both the logical and physical resources have been created. This means that all operations within the Networking and Compute services have completed and the trunk is ready for use. * ``DOWN`` A trunk is ``DOWN`` when it is first created without an instance launched on it, or when the instance associated with the trunk has been deleted. * ``DEGRADED`` A trunk can be in a ``DEGRADED`` state when a temporary failure during the provisioning process is encountered. This includes situations where a subport add or remove operation fails. When in a degraded state, the trunk is still usable and some subports may be usable as well. Operations that cause the trunk to go into a ``DEGRADED`` state can be retried to fix temporary failures and move the trunk into an ``ACTIVE`` state. * ``ERROR`` A trunk is in ``ERROR`` state if the request leads to a conflict or an error that cannot be fixed by retrying the request. The ``ERROR`` status can be encountered if the network is not compatible with the trunk configuration or the binding process leads to a persistent failure. When a trunk is in ``ERROR`` state, it must be brought to a sane state (``ACTIVE``), or else requests to add subports will be rejected. * ``BUILD`` A trunk is in ``BUILD`` state while the resources associated with the trunk are in the process of being provisioned. Once the trunk and all of the subports have been provisioned successfully, the trunk transitions to ``ACTIVE``. If there was a partial failure, the trunk transitions to ``DEGRADED``. When ``admin_state`` is set to ``DOWN``, the user is blocked from performing operations on the trunk. ``admin_state`` is set by the user and should not be used to monitor the health of the trunk. Limitations and issues ~~~~~~~~~~~~~~~~~~~~~~ * In ``neutron-ovs-agent`` the use of ``iptables_hybrid`` firewall driver and trunk ports are not compatible with each other. The ``iptables_hybrid`` firewall is not going to filter the traffic of subports. Instead use other firewall drivers like ``openvswitch``. * See `bugs `__ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config-wsgi.rst0000644000175000017500000001031600000000000023247 0ustar00coreycorey00000000000000.. _config-wsgi: Installing Neutron API via WSGI =============================== This document is a guide to deploying neutron using WSGI. There are two ways to deploy using WSGI: ``uwsgi`` and Apache ``mod_wsgi``. Please note that if you intend to use mode uwsgi, you should install the ``mode_proxy_uwsgi`` module. For example on deb-based system: .. code-block:: console # sudo apt-get install libapache2-mod-proxy-uwsgi # sudo a2enmod proxy # sudo a2enmod proxy_uwsgi .. end WSGI Application ---------------- The function ``neutron.server.get_application`` will setup a WSGI application to run behind uwsgi and mod_wsgi. Neutron API behind uwsgi ------------------------ Create a ``/etc/neutron/neutron-api-uwsgi.ini`` file with the content below: .. code-block:: ini [uwsgi] chmod-socket = 666 socket = /var/run/uwsgi/neutron-api.socket lazy-apps = true add-header = Connection: close buffer-size = 65535 hook-master-start = unix_signal:15 gracefully_kill_them_all thunder-lock = true plugins = python enable-threads = true worker-reload-mercy = 90 exit-on-reload = false die-on-term = true master = true processes = 2 wsgi-file = /neutron-api .. end Start neutron-api: .. code-block:: console # uwsgi --procname-prefix neutron-api --ini /etc/neutron/neutron-api-uwsgi.ini .. end Neutron API behind mod_wsgi --------------------------- Create ``/etc/apache2/neutron.conf`` with content below: .. code-block:: ini Listen 9696 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined Require all granted WSGIDaemonProcess neutron-server processes=1 threads=1 user=stack display-name=%{GROUP} WSGIProcessGroup neutron-server WSGIScriptAlias / /neutron-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On ErrorLogFormat "%M" ErrorLog /var/log/neutron/neutron.log CustomLog /var/log/neutron/neutron_access.log neutron_combined Alias /networking /neutron-api SetHandler wsgi-script Options +ExecCGI WSGIProcessGroup neutron-server WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On WSGISocketPrefix /var/run/apache2 .. end For deb-based systems copy or symlink the file to ``/etc/apache2/sites-available``. Then enable the neutron site: .. code-block:: console # a2ensite neutron # systemctl reload apache2.service .. end For rpm-based systems copy the file to ``/etc/httpd/conf.d``. Then enable the neutron site: .. code-block:: console # systemctl reload httpd.service .. end Start Neutron RPC server ------------------------ When Neutron API is served by a web server (like Apache2) it is difficult to start an rpc listener thread. So start the Neutron RPC server process to serve this job: .. code-block:: console # /usr/bin/neutron-rpc-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini .. end Neutron Worker Processes ------------------------ Neutron will attempt to spawn a number of child processes for handling API and RPC requests. The number of API workers is set to the number of CPU cores, further limited by available memory, and the number of RPC workers is set to half that number. It is strongly recommended that all deployers set these values themselves, via the api_workers and rpc_workers configuration parameters. For a cloud with a high load to a relatively small number of objects, a smaller value for api_workers will provide better performance than many (somewhere around 4-8.) For a cloud with a high load to lots of different objects, then the more the better. Budget neutron-server using about 2GB of RAM in steady-state. For rpc_workers, there needs to be enough to keep up with incoming events from the various neutron agents. Signs that there are too few can be agent heartbeats arriving late, nova vif bindings timing out on the hypervisors, or rpc message timeout exceptions in agent logs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/config.rst0000644000175000017500000000151400000000000022300 0ustar00coreycorey00000000000000.. _config: ============= Configuration ============= .. toctree:: :maxdepth: 2 config-services-agent config-ml2 config-address-scopes config-auto-allocation config-az config-bgp-dynamic-routing config-dhcp-ha config-dns-int config-dns-int-ext-serv config-dns-res config-dvr-ha-snat config-fip-port-forwardings config-ipam config-ipv6 config-logging config-macvtap config-mtu config-network-segment-ranges config-ovs-dpdk config-ovs-offload config-ovsfwdriver config-qos config-qos-min-bw config-rbac config-routed-networks config-sfc config-sriov config-subnet-pools config-subnet-onboard config-service-subnets config-trunking config-wsgi .. note:: For general configuration, see the `Configuration Reference <../configuration/>`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-lb-ha-vrrp.rst0000644000175000017500000001503000000000000024275 0ustar00coreycorey00000000000000.. _deploy-lb-ha-vrrp: ========================================== Linux bridge: High availability using VRRP ========================================== .. include:: shared/deploy-ha-vrrp.txt .. warning:: This high-availability mechanism is not compatible with the layer-2 population mechanism. You must disable layer-2 population in the ``linuxbridge_agent.ini`` file and restart the Linux bridge agent on all existing network and compute nodes prior to deploying the example configuration. Prerequisites ~~~~~~~~~~~~~ Add one network node with the following components: * Three network interfaces: management, provider, and overlay. * OpenStack Networking layer-2 agent, layer-3 agent, and any dependencies. .. note:: You can keep the DHCP and metadata agents on each compute node or move them to the network nodes. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-lb-ha-vrrp-overview.png :alt: High-availability using Linux bridge with VRRP - overview The following figure shows components and connectivity for one self-service network and one untagged (flat) network. The master router resides on network node 1. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace and Linux bridge with a port on the overlay physical network interface. .. image:: figures/deploy-lb-ha-vrrp-compconn1.png :alt: High-availability using Linux bridge with VRRP - components and connectivity - one network Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for high-availability using VRRP to an existing operational environment that supports self-service networks. Controller node --------------- #. In the ``neutron.conf`` file: * Enable VRRP. .. code-block:: ini [DEFAULT] l3_ha = True #. Restart the following services: * Server Network node 1 -------------- No changes. Network node 2 -------------- #. Install the Networking service Linux bridge layer-2 agent and layer-3 agent. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. In the ``linuxbridge_agent.ini`` file, configure the layer-2 agent. .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE [vxlan] enable_vxlan = True local_ip = OVERLAY_INTERFACE_IP_ADDRESS [securitygroup] firewall_driver = iptables .. warning:: By default, Linux uses UDP port ``8472`` for VXLAN tunnel traffic. This default value doesn't follow the IANA standard, which assigned UDP port ``4789`` for VXLAN communication. As a consequence, if this node is part of a mixed deployment, where nodes with both OVS and Linux bridge must communicate over VXLAN tunnels, it is recommended that a line containing ``udp_dstport = 4789`` be added to the [vxlan] section of all the Linux bridge agents. OVS follows the IANA standard. Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. In the ``l3_agent.ini`` file, configure the layer-3 agent. .. code-block:: ini [DEFAULT] interface_driver = linuxbridge #. Start the following services: * Linux bridge agent * Layer-3 agent Compute nodes ------------- No changes. Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents. .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | None | True | UP | neutron-linuxbridge-agent | | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | None | True | UP | neutron-linuxbridge-agent | | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 598f6357-4331-4da5-a420-0f5be000bec9 | L3 agent | network1 | nova | True | UP | neutron-l3-agent | | f4734e0f-bcd5-4922-a19d-e31d56b0a7ae | Linux bridge agent | network1 | None | True | UP | neutron-linuxbridge-agent | | 670e5805-340b-4182-9825-fa8319c99f23 | Linux bridge agent | network2 | None | True | UP | neutron-linuxbridge-agent | | 96224e89-7c15-42e9-89c4-8caac7abdd54 | L3 agent | network2 | nova | True | UP | neutron-l3-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-ha-vrrp-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-ha-vrrp-verifynetworkoperation.txt Verify failover operation ------------------------- .. include:: shared/deploy-ha-vrrp-verifyfailoveroperation.txt Keepalived VRRP health check ---------------------------- .. include:: shared/keepalived-vrrp-healthcheck.txt Network traffic flow ~~~~~~~~~~~~~~~~~~~~ This high-availability mechanism simply augments :ref:`deploy-lb-selfservice` with failover of layer-3 services to another router if the master router fails. Thus, you can reference :ref:`Self-service network traffic flow ` for normal operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-lb-provider.rst0000644000175000017500000003174000000000000024556 0ustar00coreycorey00000000000000.. _deploy-lb-provider: =============================== Linux bridge: Provider networks =============================== The provider networks architecture example provides layer-2 connectivity between instances and the physical network infrastructure using VLAN (802.1q) tagging. It supports one untagged (flat) network and up to 4095 tagged (VLAN) networks. The actual quantity of VLAN networks depends on the physical network infrastructure. For more information on provider networks, see :ref:`intro-os-networking-provider`. Prerequisites ~~~~~~~~~~~~~ One controller node with the following components: * Two network interfaces: management and provider. * OpenStack Networking server service and ML2 plug-in. Two compute nodes with the following components: * Two network interfaces: management and provider. * OpenStack Networking Linux bridge layer-2 agent, DHCP agent, metadata agent, and any dependencies. .. note:: Larger deployments typically deploy the DHCP and metadata agents on a subset of compute nodes to increase performance and redundancy. However, too many agents can overwhelm the message bus. Also, to further simplify any deployment, you can omit the metadata agent and use a configuration drive to provide metadata to instances. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-lb-provider-overview.png :alt: Provider networks using Linux bridge - overview The following figure shows components and connectivity for one untagged (flat) network. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace and Linux bridge with a port on the provider physical network interface. .. image:: figures/deploy-lb-provider-compconn1.png :alt: Provider networks using Linux bridge - components and connectivity - one network The following figure describes virtual connectivity among components for two tagged (VLAN) networks. Essentially, each network uses a separate bridge that contains a port on the VLAN sub-interface on the provider physical network interface. Similar to the single untagged network case, the DHCP agent may reside on a different compute node. .. image:: figures/deploy-lb-provider-compconn2.png :alt: Provider networks using Linux bridge - components and connectivity - multiple networks .. note:: These figures omit the controller node because it does not handle instance network traffic. Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to deploy provider networks in your environment. Controller node --------------- #. Install the Networking service components that provides the ``neutron-server`` service and ML2 plug-in. #. In the ``neutron.conf`` file: * Configure common options: .. include:: shared/deploy-config-neutron-common.txt * Disable service plug-ins because provider networks do not require any. However, this breaks portions of the dashboard that manage the Networking service. See the latest `Install Tutorials and Guides <../install/>`__ for more information. .. code-block:: ini [DEFAULT] service_plugins = * Enable two DHCP agents per network so both compute nodes can provide DHCP service provider networks. .. code-block:: ini [DEFAULT] dhcp_agents_per_network = 2 * If necessary, :ref:`configure MTU `. #. In the ``ml2_conf.ini`` file: * Configure drivers and network types: .. code-block:: ini [ml2] type_drivers = flat,vlan tenant_network_types = mechanism_drivers = linuxbridge extension_drivers = port_security * Configure network mappings: .. code-block:: ini [ml2_type_flat] flat_networks = provider [ml2_type_vlan] network_vlan_ranges = provider .. note:: The ``tenant_network_types`` option contains no value because the architecture does not support self-service networks. .. note:: The ``provider`` value in the ``network_vlan_ranges`` option lacks VLAN ID ranges to support use of arbitrary VLAN IDs. #. Populate the database. .. code-block:: console # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron #. Start the following services: * Server Compute nodes ------------- #. Install the Networking service Linux bridge layer-2 agent. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. In the ``linuxbridge_agent.ini`` file, configure the Linux bridge agent: .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE [vxlan] enable_vxlan = False [securitygroup] firewall_driver = iptables Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. #. In the ``dhcp_agent.ini`` file, configure the DHCP agent: .. code-block:: ini [DEFAULT] interface_driver = linuxbridge enable_isolated_metadata = True force_metadata = True .. note:: The ``force_metadata`` option forces the DHCP agent to provide a host route to the metadata service on ``169.254.169.254`` regardless of whether the subnet contains an interface on a router, thus maintaining similar and predictable metadata behavior among subnets. #. In the ``metadata_agent.ini`` file, configure the metadata agent: .. code-block:: ini [DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = METADATA_SECRET The value of ``METADATA_SECRET`` must match the value of the same option in the ``[neutron]`` section of the ``nova.conf`` file. #. Start the following services: * Linux bridge agent * DHCP agent * Metadata agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | None | True | UP | neutron-linuxbridge-agent | | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | None | True | UP | neutron-linuxbridge-agent | | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-provider-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-provider-verifynetworkoperation.txt Network traffic flow ~~~~~~~~~~~~~~~~~~~~ .. include:: shared/deploy-provider-networktrafficflow.txt North-south scenario: Instance with a fixed IP address ------------------------------------------------------ * The instance resides on compute node 1 and uses provider network 1. * The instance sends a packet to a host on the Internet. The following steps involve compute node 1. #. The instance interface (1) forwards the packet to the provider bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the provider bridge handle firewalling and connection tracking for the packet. #. The VLAN sub-interface port (4) on the provider bridge forwards the packet to the physical network interface (5). #. The physical network interface (5) adds VLAN tag 101 to the packet and forwards it to the physical network infrastructure switch (6). The following steps involve the physical network infrastructure: #. The switch removes VLAN tag 101 from the packet and forwards it to the router (7). #. The router routes the packet from the provider network (8) to the external network (9) and forwards the packet to the switch (10). #. The switch forwards the packet to the external network (11). #. The external network (12) receives the packet. .. image:: figures/deploy-lb-provider-flowns1.png :alt: Provider networks using Linux bridge - network traffic flow - north/south .. note:: Return traffic follows similar steps in reverse. East-west scenario 1: Instances on the same network --------------------------------------------------- Instances on the same network communicate directly between compute nodes containing those instances. * Instance 1 resides on compute node 1 and uses provider network 1. * Instance 2 resides on compute node 2 and uses provider network 1. * Instance 1 sends a packet to instance 2. The following steps involve compute node 1: #. The instance 1 interface (1) forwards the packet to the provider bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the provider bridge handle firewalling and connection tracking for the packet. #. The VLAN sub-interface port (4) on the provider bridge forwards the packet to the physical network interface (5). #. The physical network interface (5) adds VLAN tag 101 to the packet and forwards it to the physical network infrastructure switch (6). The following steps involve the physical network infrastructure: #. The switch forwards the packet from compute node 1 to compute node 2 (7). The following steps involve compute node 2: #. The physical network interface (8) removes VLAN tag 101 from the packet and forwards it to the VLAN sub-interface port (9) on the provider bridge. #. Security group rules (10) on the provider bridge handle firewalling and connection tracking for the packet. #. The provider bridge instance port (11) forwards the packet to the instance 2 interface (12) via ``veth`` pair. .. image:: figures/deploy-lb-provider-flowew1.png :alt: Provider networks using Linux bridge - network traffic flow - east/west scenario 1 .. note:: Return traffic follows similar steps in reverse. East-west scenario 2: Instances on different networks ----------------------------------------------------- Instances communicate via router on the physical network infrastructure. * Instance 1 resides on compute node 1 and uses provider network 1. * Instance 2 resides on compute node 1 and uses provider network 2. * Instance 1 sends a packet to instance 2. .. note:: Both instances reside on the same compute node to illustrate how VLAN tagging enables multiple logical layer-2 networks to use the same physical layer-2 network. The following steps involve the compute node: #. The instance 1 interface (1) forwards the packet to the provider bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the provider bridge handle firewalling and connection tracking for the packet. #. The VLAN sub-interface port (4) on the provider bridge forwards the packet to the physical network interface (5). #. The physical network interface (5) adds VLAN tag 101 to the packet and forwards it to the physical network infrastructure switch (6). The following steps involve the physical network infrastructure: #. The switch removes VLAN tag 101 from the packet and forwards it to the router (7). #. The router routes the packet from provider network 1 (8) to provider network 2 (9). #. The router forwards the packet to the switch (10). #. The switch adds VLAN tag 102 to the packet and forwards it to compute node 1 (11). The following steps involve the compute node: #. The physical network interface (12) removes VLAN tag 102 from the packet and forwards it to the VLAN sub-interface port (13) on the provider bridge. #. Security group rules (14) on the provider bridge handle firewalling and connection tracking for the packet. #. The provider bridge instance port (15) forwards the packet to the instance 2 interface (16) via ``veth`` pair. .. image:: figures/deploy-lb-provider-flowew2.png :alt: Provider networks using Linux bridge - network traffic flow - east/west scenario 2 .. note:: Return traffic follows similar steps in reverse. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-lb-selfservice.rst0000644000175000017500000004261700000000000025243 0ustar00coreycorey00000000000000.. _deploy-lb-selfservice: =================================== Linux bridge: Self-service networks =================================== This architecture example augments :ref:`deploy-lb-provider` to support a nearly limitless quantity of entirely virtual networks. Although the Networking service supports VLAN self-service networks, this example focuses on VXLAN self-service networks. For more information on self-service networks, see :ref:`intro-os-networking-selfservice`. .. note:: The Linux bridge agent lacks support for other overlay protocols such as GRE and Geneve. Prerequisites ~~~~~~~~~~~~~ Add one network node with the following components: * Three network interfaces: management, provider, and overlay. * OpenStack Networking Linux bridge layer-2 agent, layer-3 agent, and any dependencies. Modify the compute nodes with the following components: * Add one network interface: overlay. .. note:: You can keep the DHCP and metadata agents on each compute node or move them to the network node. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-lb-selfservice-overview.png :alt: Self-service networks using Linux bridge - overview The following figure shows components and connectivity for one self-service network and one untagged (flat) provider network. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace and Linux bridge with a port on the overlay physical network interface. .. image:: figures/deploy-lb-selfservice-compconn1.png :alt: Self-service networks using Linux bridge - components and connectivity - one network Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for self-service networks to an existing operational environment that supports provider networks. Controller node --------------- #. In the ``neutron.conf`` file: * Enable routing and allow overlapping IP address ranges. .. code-block:: ini [DEFAULT] service_plugins = router allow_overlapping_ips = True #. In the ``ml2_conf.ini`` file: * Add ``vxlan`` to type drivers and project network types. .. code-block:: ini [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan * Enable the layer-2 population mechanism driver. .. code-block:: ini [ml2] mechanism_drivers = linuxbridge,l2population * Configure the VXLAN network ID (VNI) range. .. code-block:: ini [ml2_type_vxlan] vni_ranges = VNI_START:VNI_END Replace ``VNI_START`` and ``VNI_END`` with appropriate numerical values. #. Restart the following services: * Server Network node ------------ #. Install the Networking service layer-3 agent. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. In the ``linuxbridge_agent.ini`` file, configure the layer-2 agent. .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE [vxlan] enable_vxlan = True l2_population = True local_ip = OVERLAY_INTERFACE_IP_ADDRESS [securitygroup] firewall_driver = iptables .. warning:: By default, Linux uses UDP port ``8472`` for VXLAN tunnel traffic. This default value doesn't follow the IANA standard, which assigned UDP port ``4789`` for VXLAN communication. As a consequence, if this node is part of a mixed deployment, where nodes with both OVS and Linux bridge must communicate over VXLAN tunnels, it is recommended that a line containing ``udp_dstport = 4789`` be added to the [vxlan] section of all the Linux bridge agents. OVS follows the IANA standard. Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. In the ``l3_agent.ini`` file, configure the layer-3 agent. .. code-block:: ini [DEFAULT] interface_driver = linuxbridge #. Start the following services: * Linux bridge agent * Layer-3 agent Compute nodes ------------- #. In the ``linuxbridge_agent.ini`` file, enable VXLAN support including layer-2 population. .. code-block:: ini [vxlan] enable_vxlan = True l2_population = True local_ip = OVERLAY_INTERFACE_IP_ADDRESS .. warning:: By default, Linux uses UDP port ``8472`` for VXLAN tunnel traffic. This default value doesn't follow the IANA standard, which assigned UDP port ``4789`` for VXLAN communication. As a consequence, if this node is part of a mixed deployment, where nodes with both OVS and Linux bridge must communicate over VXLAN tunnels, it is recommended that a line containing ``udp_dstport = 4789`` be added to the [vxlan] section of all the Linux bridge agents. OVS follows the IANA standard. Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. Restart the following services: * Linux bridge agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents. .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 09de6af6-c5f1-4548-8b09-18801f068c57 | Linux bridge agent | compute2 | None | True | UP | neutron-linuxbridge-agent | | 188945d1-9e70-4803-a276-df924e0788a4 | Linux bridge agent | compute1 | None | True | UP | neutron-linuxbridge-agent | | e76c440d-d5f6-4316-a674-d689630b629e | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | e67367de-6657-11e6-86a4-931cd04404bb | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | e8174cae-6657-11e6-89f0-534ac6d0cb5c | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | ece49ec6-6657-11e6-bafb-c7560f19197d | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 598f6357-4331-4da5-a420-0f5be000bec9 | L3 agent | network1 | nova | True | UP | neutron-l3-agent | | f4734e0f-bcd5-4922-a19d-e31d56b0a7ae | Linux bridge agent | network1 | None | True | UP | neutron-linuxbridge-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-selfservice-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-selfservice-verifynetworkoperation.txt .. _deploy-lb-selfservice-networktrafficflow: Network traffic flow ~~~~~~~~~~~~~~~~~~~~ .. include:: shared/deploy-selfservice-networktrafficflow.txt North-south scenario 1: Instance with a fixed IP address -------------------------------------------------------- For instances with a fixed IPv4 address, the network node performs SNAT on north-south traffic passing from self-service to external networks such as the Internet. For instances with a fixed IPv6 address, the network node performs conventional routing of traffic between self-service and external networks. * The instance resides on compute node 1 and uses self-service network 1. * The instance sends a packet to a host on the Internet. The following steps involve compute node 1: #. The instance interface (1) forwards the packet to the self-service bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge forwards the packet to the VXLAN interface (4) which wraps the packet using VNI 101. #. The underlying physical interface (5) for the VXLAN interface forwards the packet to the network node via the overlay network (6). The following steps involve the network node: #. The underlying physical interface (7) for the VXLAN interface forwards the packet to the VXLAN interface (8) which unwraps the packet. #. The self-service bridge router port (9) forwards the packet to the self-service network interface (10) in the router namespace. * For IPv4, the router performs SNAT on the packet which changes the source IP address to the router IP address on the provider network and sends it to the gateway IP address on the provider network via the gateway interface on the provider network (11). * For IPv6, the router sends the packet to the next-hop IP address, typically the gateway IP address on the provider network, via the provider gateway interface (11). #. The router forwards the packet to the provider bridge router port (12). #. The VLAN sub-interface port (13) on the provider bridge forwards the packet to the provider physical network interface (14). #. The provider physical network interface (14) adds VLAN tag 101 to the packet and forwards it to the Internet via physical network infrastructure (15). .. note:: Return traffic follows similar steps in reverse. However, without a floating IPv4 address, hosts on the provider or external networks cannot originate connections to instances on the self-service network. .. image:: figures/deploy-lb-selfservice-flowns1.png :alt: Self-service networks using Linux bridge - network traffic flow - north/south scenario 1 North-south scenario 2: Instance with a floating IPv4 address ------------------------------------------------------------- For instances with a floating IPv4 address, the network node performs SNAT on north-south traffic passing from the instance to external networks such as the Internet and DNAT on north-south traffic passing from external networks to the instance. Floating IP addresses and NAT do not apply to IPv6. Thus, the network node routes IPv6 traffic in this scenario. * The instance resides on compute node 1 and uses self-service network 1. * A host on the Internet sends a packet to the instance. The following steps involve the network node: #. The physical network infrastructure (1) forwards the packet to the provider physical network interface (2). #. The provider physical network interface removes VLAN tag 101 and forwards the packet to the VLAN sub-interface on the provider bridge. #. The provider bridge forwards the packet to the self-service router gateway port on the provider network (5). * For IPv4, the router performs DNAT on the packet which changes the destination IP address to the instance IP address on the self-service network and sends it to the gateway IP address on the self-service network via the self-service interface (6). * For IPv6, the router sends the packet to the next-hop IP address, typically the gateway IP address on the self-service network, via the self-service interface (6). #. The router forwards the packet to the self-service bridge router port (7). #. The self-service bridge forwards the packet to the VXLAN interface (8) which wraps the packet using VNI 101. #. The underlying physical interface (9) for the VXLAN interface forwards the packet to the network node via the overlay network (10). The following steps involve the compute node: #. The underlying physical interface (11) for the VXLAN interface forwards the packet to the VXLAN interface (12) which unwraps the packet. #. Security group rules (13) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge instance port (14) forwards the packet to the instance interface (15) via ``veth`` pair. .. note:: Egress instance traffic flows similar to north-south scenario 1, except SNAT changes the source IP address of the packet to the floating IPv4 address rather than the router IP address on the provider network. .. image:: figures/deploy-lb-selfservice-flowns2.png :alt: Self-service networks using Linux bridge - network traffic flow - north/south scenario 2 East-west scenario 1: Instances on the same network --------------------------------------------------- Instances with a fixed IPv4/IPv6 or floating IPv4 address on the same network communicate directly between compute nodes containing those instances. By default, the VXLAN protocol lacks knowledge of target location and uses multicast to discover it. After discovery, it stores the location in the local forwarding database. In large deployments, the discovery process can generate a significant amount of network that all nodes must process. To eliminate the latter and generally increase efficiency, the Networking service includes the layer-2 population mechanism driver that automatically populates the forwarding database for VXLAN interfaces. The example configuration enables this driver. For more information, see :ref:`config-plugin-ml2`. * Instance 1 resides on compute node 1 and uses self-service network 1. * Instance 2 resides on compute node 2 and uses self-service network 1. * Instance 1 sends a packet to instance 2. The following steps involve compute node 1: #. The instance 1 interface (1) forwards the packet to the self-service bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge forwards the packet to the VXLAN interface (4) which wraps the packet using VNI 101. #. The underlying physical interface (5) for the VXLAN interface forwards the packet to compute node 2 via the overlay network (6). The following steps involve compute node 2: #. The underlying physical interface (7) for the VXLAN interface forwards the packet to the VXLAN interface (8) which unwraps the packet. #. Security group rules (9) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge instance port (10) forwards the packet to the instance 1 interface (11) via ``veth`` pair. .. note:: Return traffic follows similar steps in reverse. .. image:: figures/deploy-lb-selfservice-flowew1.png :alt: Self-service networks using Linux bridge - network traffic flow - east/west scenario 1 East-west scenario 2: Instances on different networks ----------------------------------------------------- Instances using a fixed IPv4/IPv6 address or floating IPv4 address communicate via router on the network node. The self-service networks must reside on the same router. * Instance 1 resides on compute node 1 and uses self-service network 1. * Instance 2 resides on compute node 1 and uses self-service network 2. * Instance 1 sends a packet to instance 2. .. note:: Both instances reside on the same compute node to illustrate how VXLAN enables multiple overlays to use the same layer-3 network. The following steps involve the compute node: #. The instance 1 interface (1) forwards the packet to the self-service bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge forwards the packet to the VXLAN interface (4) which wraps the packet using VNI 101. #. The underlying physical interface (5) for the VXLAN interface forwards the packet to the network node via the overlay network (6). The following steps involve the network node: #. The underlying physical interface (7) for the VXLAN interface forwards the packet to the VXLAN interface (8) which unwraps the packet. #. The self-service bridge router port (9) forwards the packet to the self-service network 1 interface (10) in the router namespace. #. The router sends the packet to the next-hop IP address, typically the gateway IP address on self-service network 2, via the self-service network 2 interface (11). #. The router forwards the packet to the self-service network 2 bridge router port (12). #. The self-service network 2 bridge forwards the packet to the VXLAN interface (13) which wraps the packet using VNI 102. #. The physical network interface (14) for the VXLAN interface sends the packet to the compute node via the overlay network (15). The following steps involve the compute node: #. The underlying physical interface (16) for the VXLAN interface sends the packet to the VXLAN interface (17) which unwraps the packet. #. Security group rules (18) on the self-service bridge handle firewalling and connection tracking for the packet. #. The self-service bridge instance port (19) forwards the packet to the instance 2 interface (20) via ``veth`` pair. .. note:: Return traffic follows similar steps in reverse. .. image:: figures/deploy-lb-selfservice-flowew2.png :alt: Self-service networks using Linux bridge - network traffic flow - east/west scenario 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-lb.rst0000644000175000017500000000072100000000000022721 0ustar00coreycorey00000000000000.. _deploy-lb: ============================= Linux bridge mechanism driver ============================= The Linux bridge mechanism driver uses only Linux bridges and ``veth`` pairs as interconnection devices. A layer-2 agent manages Linux bridges on each compute node and any other node that provides layer-3 (routing), DHCP, metadata, or other network services. .. toctree:: :maxdepth: 2 deploy-lb-provider deploy-lb-selfservice deploy-lb-ha-vrrp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-ovs-ha-dvr.rst0000644000175000017500000005352200000000000024321 0ustar00coreycorey00000000000000.. _deploy-ovs-ha-dvr: ========================================= Open vSwitch: High availability using DVR ========================================= This architecture example augments the self-service deployment example with the Distributed Virtual Router (DVR) high-availability mechanism that provides connectivity between self-service and provider networks on compute nodes rather than network nodes for specific scenarios. For instances with a floating IPv4 address, routing between self-service and provider networks resides completely on the compute nodes to eliminate single point of failure and performance issues with network nodes. Routing also resides completely on the compute nodes for instances with a fixed or floating IPv4 address using self-service networks on the same distributed virtual router. However, instances with a fixed IP address still rely on the network node for routing and SNAT services between self-service and provider networks. Consider the following attributes of this high-availability mechanism to determine practicality in your environment: * Only provides connectivity to an instance via the compute node on which the instance resides if the instance resides on a self-service network with a floating IPv4 address. Instances on self-service networks with only an IPv6 address or both IPv4 and IPv6 addresses rely on the network node for IPv6 connectivity. * The instance of a router on each compute node consumes an IPv4 address on the provider network on which it contains a gateway. Prerequisites ~~~~~~~~~~~~~ Modify the compute nodes with the following components: * Install the OpenStack Networking layer-3 agent. .. note:: Consider adding at least one additional network node to provide high-availability for instances with a fixed IP address. See See :ref:`config-dvr-snat-ha-ovs` for more information. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-ovs-ha-dvr-overview.png :alt: High-availability using Open vSwitch with DVR - overview The following figure shows components and connectivity for one self-service network and one untagged (flat) network. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace with a port on the OVS integration bridge. .. image:: figures/deploy-ovs-ha-dvr-compconn1.png :alt: High-availability using Open vSwitch with DVR - components and connectivity - one network Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for high-availability using DVR to an existing operational environment that supports self-service networks. Controller node --------------- #. In the ``neutron.conf`` file: * Enable distributed routing by default for all routers. .. code-block:: ini [DEFAULT] router_distributed = True .. note:: For a large scale cloud, if your deployment is running DVR with DHCP, we recommend you set ``host_dvr_for_dhcp=False`` to achieve higher L3 agent router processing performance. When this is set to False, DNS functionality will not be available via the DHCP namespace (dnsmasq) however, a different nameserver will have to be configured, for example, by specifying a value in ``dns_nameservers`` for subnets. #. Restart the following services: * Server Network node ------------ #. In the ``openvswitch_agent.ini`` file, enable distributed routing. .. code-block:: ini [agent] enable_distributed_routing = True #. In the ``l3_agent.ini`` file, configure the layer-3 agent to provide SNAT services. .. code-block:: ini [DEFAULT] agent_mode = dvr_snat #. Restart the following services: * Open vSwitch agent * Layer-3 agent Compute nodes ------------- #. Install the Networking service layer-3 agent. #. In the ``openswitch_agent.ini`` file, enable distributed routing. .. code-block:: ini [agent] enable_distributed_routing = True #. In the ``l3_agent.ini`` file, configure the layer-3 agent. .. code-block:: ini [DEFAULT] interface_driver = openvswitch agent_mode = dvr #. Restart the following services: * Open vSwitch agent * Layer-3 agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents. .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 05d980f2-a4fc-4815-91e7-a7f7e118c0db | L3 agent | compute1 | nova | True | UP | neutron-l3-agent | | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 2a2e9a90-51b8-4163-a7d6-3e199ba2374b | L3 agent | compute2 | nova | True | UP | neutron-l3-agent | | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | None | True | UP | neutron-openvswitch-agent | | 513caa68-0391-4e53-a530-082e2c23e819 | Linux bridge agent | compute1 | None | True | UP | neutron-linuxbridge-agent | | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent | | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | None | True | UP | neutron-openvswitch-agent | | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- Similar to the self-service deployment example, this configuration supports multiple VXLAN self-service networks. After enabling high-availability, all additional routers use distributed routing. The following procedure creates an additional self-service network and router. The Networking service also supports adding distributed routing to existing routers. #. Source a regular (non-administrative) project credentials. #. Create a self-service network. .. code-block:: console $ openstack network create selfservice2 +-------------------------+--------------+ | Field | Value | +-------------------------+--------------+ | admin_state_up | UP | | mtu | 1450 | | name | selfservice2 | | port_security_enabled | True | | revision_number | 1 | | router:external | Internal | | shared | False | | status | ACTIVE | | tags | [] | +-------------------------+--------------+ #. Create a IPv4 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range 192.0.2.0/24 \ --network selfservice2 --dns-nameserver 8.8.4.4 selfservice2-v4 +-------------------+---------------------------+ | Field | Value | +-------------------+---------------------------+ | allocation_pools | 192.0.2.2-192.0.2.254 | | cidr | 192.0.2.0/24 | | dns_nameservers | 8.8.4.4 | | enable_dhcp | True | | gateway_ip | 192.0.2.1 | | ip_version | 4 | | name | selfservice2-v4 | | revision_number | 1 | | tags | [] | +-------------------+---------------------------+ #. Create a IPv6 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range fd00:192:0:2::/64 --ip-version 6 \ --ipv6-ra-mode slaac --ipv6-address-mode slaac --network selfservice2 \ --dns-nameserver 2001:4860:4860::8844 selfservice2-v6 +-------------------+------------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------------+ | allocation_pools | fd00:192:0:2::2-fd00:192:0:2:ffff:ffff:ffff:ffff | | cidr | fd00:192:0:2::/64 | | dns_nameservers | 2001:4860:4860::8844 | | enable_dhcp | True | | gateway_ip | fd00:192:0:2::1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | selfservice2-v6 | | revision_number | 1 | | tags | [] | +-------------------+------------------------------------------------------+ #. Create a router. .. code-block:: console $ openstack router create router2 +-----------------------+---------+ | Field | Value | +-----------------------+---------+ | admin_state_up | UP | | name | router2 | | revision_number | 1 | | status | ACTIVE | | tags | [] | +-----------------------+---------+ #. Add the IPv4 and IPv6 subnets as interfaces on the router. .. code-block:: console $ openstack router add subnet router2 selfservice2-v4 $ openstack router add subnet router2 selfservice2-v6 .. note:: These commands provide no output. #. Add the provider network as a gateway on the router. .. code-block:: console $ openstack router set router2 --external-gateway provider1 Verify network operation ------------------------ #. Source the administrative project credentials. #. Verify distributed routing on the router. .. code-block:: console $ openstack router show router2 +-------------------------+---------+ | Field | Value | +-------------------------+---------+ | admin_state_up | UP | | distributed | True | | ha | False | | name | router2 | | revision_number | 1 | | status | ACTIVE | +-------------------------+---------+ #. On each compute node, verify creation of a ``qrouter`` namespace with the same ID. Compute node 1: .. code-block:: console # ip netns qrouter-78d2f628-137c-4f26-a257-25fc20f203c1 Compute node 2: .. code-block:: console # ip netns qrouter-78d2f628-137c-4f26-a257-25fc20f203c1 #. On the network node, verify creation of the ``snat`` and ``qrouter`` namespaces with the same ID. .. code-block:: console # ip netns snat-78d2f628-137c-4f26-a257-25fc20f203c1 qrouter-78d2f628-137c-4f26-a257-25fc20f203c1 .. note:: The namespace for router 1 from :ref:`deploy-ovs-selfservice` should also appear on network node 1 because of creation prior to enabling distributed routing. #. Launch an instance with an interface on the additional self-service network. For example, a CirrOS image using flavor ID 1. .. code-block:: console $ openstack server create --flavor 1 --image cirros --nic net-id=NETWORK_ID selfservice-instance2 Replace ``NETWORK_ID`` with the ID of the additional self-service network. #. Determine the IPv4 and IPv6 addresses of the instance. .. code-block:: console $ openstack server list +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ | bde64b00-77ae-41b9-b19a-cd8e378d9f8b | selfservice-instance2 | ACTIVE | selfservice2=fd00:192:0:2:f816:3eff:fe71:e93e, 192.0.2.4 | cirros | m1.tiny | +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ #. Create a floating IPv4 address on the provider network. .. code-block:: console $ openstack floating ip create provider1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | fixed_ip | None | | id | 0174056a-fa56-4403-b1ea-b5151a31191f | | instance_id | None | | ip | 203.0.113.17 | | pool | provider1 | | revision_number | 1 | | tags | [] | +-------------------+--------------------------------------+ #. Associate the floating IPv4 address with the instance. .. code-block:: console $ openstack server add floating ip selfservice-instance2 203.0.113.17 .. note:: This command provides no output. #. On the compute node containing the instance, verify creation of the ``fip`` namespace with the same ID as the provider network. .. code-block:: console # ip netns fip-4bfa3075-b4b2-4f7d-b88e-df1113942d43 Network traffic flow ~~~~~~~~~~~~~~~~~~~~ .. include:: shared/deploy-selfservice-networktrafficflow.txt This section only contains flow scenarios that benefit from distributed virtual routing or that differ from conventional operation. For other flow scenarios, see :ref:`deploy-ovs-selfservice-networktrafficflow`. North-south scenario 1: Instance with a fixed IP address -------------------------------------------------------- Similar to :ref:`deploy-ovs-selfservice-networktrafficflow-ns1`, except the router namespace on the network node becomes the SNAT namespace. The network node still contains the router namespace, but it serves no purpose in this case. .. image:: figures/deploy-ovs-ha-dvr-flowns1.png :alt: High-availability using Open vSwitch with DVR - network traffic flow - north/south scenario 1 North-south scenario 2: Instance with a floating IPv4 address ------------------------------------------------------------- For instances with a floating IPv4 address using a self-service network on a distributed router, the compute node containing the instance performs SNAT on north-south traffic passing from the instance to external networks such as the Internet and DNAT on north-south traffic passing from external networks to the instance. Floating IP addresses and NAT do not apply to IPv6. Thus, the network node routes IPv6 traffic in this scenario. north-south traffic passing between the instance and external networks such as the Internet. * Instance 1 resides on compute node 1 and uses self-service network 1. * A host on the Internet sends a packet to the instance. The following steps involve the compute node: #. The physical network infrastructure (1) forwards the packet to the provider physical network interface (2). #. The provider physical network interface forwards the packet to the OVS provider bridge provider network port (3). #. The OVS provider bridge swaps actual VLAN tag 101 with the internal VLAN tag. #. The OVS provider bridge ``phy-br-provider`` port (4) forwards the packet to the OVS integration bridge ``int-br-provider`` port (5). #. The OVS integration bridge port for the provider network (6) removes the internal VLAN tag and forwards the packet to the provider network interface (7) in the floating IP namespace. This interface responds to any ARP requests for the instance floating IPv4 address. #. The floating IP namespace routes the packet (8) to the distributed router namespace (9) using a pair of IP addresses on the DVR internal network. This namespace contains the instance floating IPv4 address. #. The router performs DNAT on the packet which changes the destination IP address to the instance IP address on the self-service network via the self-service network interface (10). #. The router forwards the packet to the OVS integration bridge port for the self-service network (11). #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge removes the internal VLAN tag from the packet. #. The OVS integration bridge security group port (12) forwards the packet to the security group bridge OVS port (13) via ``veth`` pair. #. Security group rules (14) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (15) forwards the packet to the instance interface (16) via ``veth`` pair. .. image:: figures/deploy-ovs-ha-dvr-flowns2.png :alt: High-availability using Open vSwitch with DVR - network traffic flow - north/south scenario 2 .. note:: Egress traffic follows similar steps in reverse, except SNAT changes the source IPv4 address of the packet to the floating IPv4 address. East-west scenario 1: Instances on different networks on the same router ------------------------------------------------------------------------ Instances with fixed IPv4/IPv6 address or floating IPv4 address on the same compute node communicate via router on the compute node. Instances on different compute nodes communicate via an instance of the router on each compute node. .. note:: This scenario places the instances on different compute nodes to show the most complex situation. The following steps involve compute node 1: #. The instance interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge port for self-service network 1 (6) removes the internal VLAN tag and forwards the packet to the self-service network 1 interface in the distributed router namespace (6). #. The distributed router namespace routes the packet to self-service network 2. #. The self-service network 2 interface in the distributed router namespace (8) forwards the packet to the OVS integration bridge port for self-service network 2 (9). #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge ``patch-tun`` port (10) forwards the packet to the OVS tunnel bridge ``patch-int`` port (11). #. The OVS tunnel bridge (12) wraps the packet using VNI 101. #. The underlying physical interface (13) for overlay networks forwards the packet to compute node 2 via the overlay network (14). The following steps involve compute node 2: #. The underlying physical interface (15) for overlay networks forwards the packet to the OVS tunnel bridge (16). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge ``patch-int`` patch port (17) forwards the packet to the OVS integration bridge ``patch-tun`` patch port (18). #. The OVS integration bridge removes the internal VLAN tag from the packet. #. The OVS integration bridge security group port (19) forwards the packet to the security group bridge OVS port (20) via ``veth`` pair. #. Security group rules (21) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (22) forwards the packet to the instance 2 interface (23) via ``veth`` pair. .. note:: Routing between self-service networks occurs on the compute node containing the instance sending the packet. In this scenario, routing occurs on compute node 1 for packets from instance 1 to instance 2 and on compute node 2 for packets from instance 2 to instance 1. .. image:: figures/deploy-ovs-ha-dvr-flowew1.png :alt: High-availability using Open vSwitch with DVR - network traffic flow - east/west scenario 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-ovs-ha-vrrp.rst0000644000175000017500000001405100000000000024511 0ustar00coreycorey00000000000000.. _deploy-ovs-ha-vrrp: ========================================== Open vSwitch: High availability using VRRP ========================================== .. include:: shared/deploy-ha-vrrp.txt Prerequisites ~~~~~~~~~~~~~ Add one network node with the following components: * Three network interfaces: management, provider, and overlay. * OpenStack Networking layer-2 agent, layer-3 agent, and any dependencies. .. note:: You can keep the DHCP and metadata agents on each compute node or move them to the network nodes. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-ovs-ha-vrrp-overview.png :alt: High-availability using VRRP with Linux bridge - overview The following figure shows components and connectivity for one self-service network and one untagged (flat) network. The master router resides on network node 1. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace and Linux bridge with a port on the overlay physical network interface. .. image:: figures/deploy-ovs-ha-vrrp-compconn1.png :alt: High-availability using VRRP with Linux bridge - components and connectivity - one network Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for high-availability using VRRP to an existing operational environment that supports self-service networks. Controller node --------------- #. In the ``neutron.conf`` file: * Enable VRRP. .. code-block:: ini [DEFAULT] l3_ha = True #. Restart the following services: * Server Network node 1 -------------- No changes. Network node 2 -------------- #. Install the Networking service OVS layer-2 agent and layer-3 agent. #. Install OVS. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. Start the following services: * OVS #. Create the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-br br-provider #. Add the provider network interface as a port on the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-port br-provider PROVIDER_INTERFACE Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. #. In the ``openvswitch_agent.ini`` file, configure the layer-2 agent. .. code-block:: ini [ovs] bridge_mappings = provider:br-provider local_ip = OVERLAY_INTERFACE_IP_ADDRESS [agent] tunnel_types = vxlan l2_population = true [securitygroup] firewall_driver = iptables_hybrid Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. In the ``l3_agent.ini`` file, configure the layer-3 agent. .. code-block:: ini [DEFAULT] interface_driver = openvswitch #. Start the following services: * Open vSwitch agent * Layer-3 agent Compute nodes ------------- No changes. Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents. .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | None | True | UP | neutron-openvswitch-agent | | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent | | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | None | True | UP | neutron-openvswitch-agent | | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent | | 7f00d759-f2c9-494a-9fbf-fd9118104d03 | Open vSwitch agent | network2 | None | True | UP | neutron-openvswitch-agent | | b28d8818-9e32-4888-930b-29addbdd2ef9 | L3 agent | network2 | nova | True | UP | neutron-l3-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-ha-vrrp-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-ha-vrrp-verifynetworkoperation.txt Verify failover operation ------------------------- .. include:: shared/deploy-ha-vrrp-verifyfailoveroperation.txt Keepalived VRRP health check ---------------------------- .. include:: shared/keepalived-vrrp-healthcheck.txt Network traffic flow ~~~~~~~~~~~~~~~~~~~~ This high-availability mechanism simply augments :ref:`deploy-ovs-selfservice` with failover of layer-3 services to another router if the master router fails. Thus, you can reference :ref:`Self-service network traffic flow ` for normal operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-ovs-provider.rst0000644000175000017500000003717200000000000024775 0ustar00coreycorey00000000000000.. _deploy-ovs-provider: =============================== Open vSwitch: Provider networks =============================== This architecture example provides layer-2 connectivity between instances and the physical network infrastructure using VLAN (802.1q) tagging. It supports one untagged (flat) network and up to 4095 tagged (VLAN) networks. The actual quantity of VLAN networks depends on the physical network infrastructure. For more information on provider networks, see :ref:`intro-os-networking-provider`. .. warning:: Linux distributions often package older releases of Open vSwitch that can introduce issues during operation with the Networking service. We recommend using at least the latest long-term stable (LTS) release of Open vSwitch for the best experience and support from Open vSwitch. See ``__ for available releases and the `installation instructions `__ for more details. Prerequisites ~~~~~~~~~~~~~ One controller node with the following components: * Two network interfaces: management and provider. * OpenStack Networking server service and ML2 plug-in. Two compute nodes with the following components: * Two network interfaces: management and provider. * OpenStack Networking Open vSwitch (OVS) layer-2 agent, DHCP agent, metadata agent, and any dependencies including OVS. .. note:: Larger deployments typically deploy the DHCP and metadata agents on a subset of compute nodes to increase performance and redundancy. However, too many agents can overwhelm the message bus. Also, to further simplify any deployment, you can omit the metadata agent and use a configuration drive to provide metadata to instances. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-ovs-provider-overview.png :alt: Provider networks using OVS - overview The following figure shows components and connectivity for one untagged (flat) network. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace with a port on the OVS integration bridge. .. image:: figures/deploy-ovs-provider-compconn1.png :alt: Provider networks using OVS - components and connectivity - one network The following figure describes virtual connectivity among components for two tagged (VLAN) networks. Essentially, all networks use a single OVS integration bridge with different internal VLAN tags. The internal VLAN tags almost always differ from the network VLAN assignment in the Networking service. Similar to the untagged network case, the DHCP agent may reside on a different compute node. .. image:: figures/deploy-ovs-provider-compconn2.png :alt: Provider networks using OVS - components and connectivity - multiple networks .. note:: These figures omit the controller node because it does not handle instance network traffic. Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to deploy provider networks in your environment. Controller node --------------- #. Install the Networking service components that provide the ``neutron-server`` service and ML2 plug-in. #. In the ``neutron.conf`` file: * Configure common options: .. include:: shared/deploy-config-neutron-common.txt * Disable service plug-ins because provider networks do not require any. However, this breaks portions of the dashboard that manage the Networking service. See the latest `Install Tutorials and Guides <../install/>`__ for more information. .. code-block:: ini [DEFAULT] service_plugins = * Enable two DHCP agents per network so both compute nodes can provide DHCP service provider networks. .. code-block:: ini [DEFAULT] dhcp_agents_per_network = 2 * If necessary, :ref:`configure MTU `. #. In the ``ml2_conf.ini`` file: * Configure drivers and network types: .. code-block:: ini [ml2] type_drivers = flat,vlan tenant_network_types = mechanism_drivers = openvswitch extension_drivers = port_security * Configure network mappings: .. code-block:: ini [ml2_type_flat] flat_networks = provider [ml2_type_vlan] network_vlan_ranges = provider .. note:: The ``tenant_network_types`` option contains no value because the architecture does not support self-service networks. .. note:: The ``provider`` value in the ``network_vlan_ranges`` option lacks VLAN ID ranges to support use of arbitrary VLAN IDs. #. Populate the database. .. code-block:: console # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron #. Start the following services: * Server Compute nodes ------------- #. Install the Networking service OVS layer-2 agent, DHCP agent, and metadata agent. #. Install OVS. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. In the ``openvswitch_agent.ini`` file, configure the OVS agent: .. code-block:: ini [ovs] bridge_mappings = provider:br-provider [securitygroup] firewall_driver = iptables_hybrid #. In the ``dhcp_agent.ini`` file, configure the DHCP agent: .. code-block:: ini [DEFAULT] interface_driver = openvswitch enable_isolated_metadata = True force_metadata = True .. note:: The ``force_metadata`` option forces the DHCP agent to provide a host route to the metadata service on ``169.254.169.254`` regardless of whether the subnet contains an interface on a router, thus maintaining similar and predictable metadata behavior among subnets. #. In the ``metadata_agent.ini`` file, configure the metadata agent: .. code-block:: ini [DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = METADATA_SECRET The value of ``METADATA_SECRET`` must match the value of the same option in the ``[neutron]`` section of the ``nova.conf`` file. #. Start the following services: * OVS #. Create the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-br br-provider #. Add the provider network interface as a port on the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-port br-provider PROVIDER_INTERFACE Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. #. Start the following services: * OVS agent * DHCP agent * Metadata agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | None | True | UP | neutron-openvswitch-agent | | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-provider-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-provider-verifynetworkoperation.txt Network traffic flow ~~~~~~~~~~~~~~~~~~~~ .. include:: shared/deploy-provider-networktrafficflow.txt North-south ----------- * The instance resides on compute node 1 and uses provider network 1. * The instance sends a packet to a host on the Internet. The following steps involve compute node 1. #. The instance interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge ``int-br-provider`` patch port (6) forwards the packet to the OVS provider bridge ``phy-br-provider`` patch port (7). #. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag 101. #. The OVS provider bridge provider network port (8) forwards the packet to the physical network interface (9). #. The physical network interface forwards the packet to the physical network infrastructure switch (10). The following steps involve the physical network infrastructure: #. The switch removes VLAN tag 101 from the packet and forwards it to the router (11). #. The router routes the packet from the provider network (12) to the external network (13) and forwards the packet to the switch (14). #. The switch forwards the packet to the external network (15). #. The external network (16) receives the packet. .. image:: figures/deploy-ovs-provider-flowns1.png :alt: Provider networks using Open vSwitch - network traffic flow - north/south .. note:: Return traffic follows similar steps in reverse. East-west scenario 1: Instances on the same network --------------------------------------------------- Instances on the same network communicate directly between compute nodes containing those instances. * Instance 1 resides on compute node 1 and uses provider network 1. * Instance 2 resides on compute node 2 and uses provider network 1. * Instance 1 sends a packet to instance 2. The following steps involve compute node 1: #. The instance 1 interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge ``int-br-provider`` patch port (6) forwards the packet to the OVS provider bridge ``phy-br-provider`` patch port (7). #. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag 101. #. The OVS provider bridge provider network port (8) forwards the packet to the physical network interface (9). #. The physical network interface forwards the packet to the physical network infrastructure switch (10). The following steps involve the physical network infrastructure: #. The switch forwards the packet from compute node 1 to compute node 2 (11). The following steps involve compute node 2: #. The physical network interface (12) forwards the packet to the OVS provider bridge provider network port (13). #. The OVS provider bridge ``phy-br-provider`` patch port (14) forwards the packet to the OVS integration bridge ``int-br-provider`` patch port (15). #. The OVS integration bridge swaps the actual VLAN tag 101 with the internal VLAN tag. #. The OVS integration bridge security group port (16) forwards the packet to the security group bridge OVS port (17). #. Security group rules (18) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (19) forwards the packet to the instance 2 interface (20) via ``veth`` pair. .. image:: figures/deploy-ovs-provider-flowew1.png :alt: Provider networks using Open vSwitch - network traffic flow - east/west scenario 1 .. note:: Return traffic follows similar steps in reverse. East-west scenario 2: Instances on different networks ----------------------------------------------------- Instances communicate via router on the physical network infrastructure. * Instance 1 resides on compute node 1 and uses provider network 1. * Instance 2 resides on compute node 1 and uses provider network 2. * Instance 1 sends a packet to instance 2. .. note:: Both instances reside on the same compute node to illustrate how VLAN tagging enables multiple logical layer-2 networks to use the same physical layer-2 network. The following steps involve the compute node: #. The instance 1 interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge ``int-br-provider`` patch port (6) forwards the packet to the OVS provider bridge ``phy-br-provider`` patch port (7). #. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag 101. #. The OVS provider bridge provider network port (8) forwards the packet to the physical network interface (9). #. The physical network interface forwards the packet to the physical network infrastructure switch (10). The following steps involve the physical network infrastructure: #. The switch removes VLAN tag 101 from the packet and forwards it to the router (11). #. The router routes the packet from provider network 1 (12) to provider network 2 (13). #. The router forwards the packet to the switch (14). #. The switch adds VLAN tag 102 to the packet and forwards it to compute node 1 (15). The following steps involve the compute node: #. The physical network interface (16) forwards the packet to the OVS provider bridge provider network port (17). #. The OVS provider bridge ``phy-br-provider`` patch port (18) forwards the packet to the OVS integration bridge ``int-br-provider`` patch port (19). #. The OVS integration bridge swaps the actual VLAN tag 102 with the internal VLAN tag. #. The OVS integration bridge security group port (20) removes the internal VLAN tag and forwards the packet to the security group bridge OVS port (21). #. Security group rules (22) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (23) forwards the packet to the instance 2 interface (24) via ``veth`` pair. .. image:: figures/deploy-ovs-provider-flowew2.png :alt: Provider networks using Open vSwitch - network traffic flow - east/west scenario 2 .. note:: Return traffic follows similar steps in reverse. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-ovs-selfservice.rst0000644000175000017500000005167000000000000025454 0ustar00coreycorey00000000000000.. _deploy-ovs-selfservice: =================================== Open vSwitch: Self-service networks =================================== This architecture example augments :ref:`deploy-ovs-provider` to support a nearly limitless quantity of entirely virtual networks. Although the Networking service supports VLAN self-service networks, this example focuses on VXLAN self-service networks. For more information on self-service networks, see :ref:`intro-os-networking-selfservice`. Prerequisites ~~~~~~~~~~~~~ Add one network node with the following components: * Three network interfaces: management, provider, and overlay. * OpenStack Networking Open vSwitch (OVS) layer-2 agent, layer-3 agent, and any including OVS. Modify the compute nodes with the following components: * Add one network interface: overlay. .. note:: You can keep the DHCP and metadata agents on each compute node or move them to the network node. Architecture ~~~~~~~~~~~~ .. image:: figures/deploy-ovs-selfservice-overview.png :alt: Self-service networks using OVS - overview The following figure shows components and connectivity for one self-service network and one untagged (flat) provider network. In this particular case, the instance resides on the same compute node as the DHCP agent for the network. If the DHCP agent resides on another compute node, the latter only contains a DHCP namespace and with a port on the OVS integration bridge. .. image:: figures/deploy-ovs-selfservice-compconn1.png :alt: Self-service networks using OVS - components and connectivity - one network Example configuration ~~~~~~~~~~~~~~~~~~~~~ Use the following example configuration as a template to add support for self-service networks to an existing operational environment that supports provider networks. Controller node --------------- #. In the ``neutron.conf`` file: * Enable routing and allow overlapping IP address ranges. .. code-block:: ini [DEFAULT] service_plugins = router allow_overlapping_ips = True #. In the ``ml2_conf.ini`` file: * Add ``vxlan`` to type drivers and project network types. .. code-block:: ini [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan * Enable the layer-2 population mechanism driver. .. code-block:: ini [ml2] mechanism_drivers = openvswitch,l2population * Configure the VXLAN network ID (VNI) range. .. code-block:: ini [ml2_type_vxlan] vni_ranges = VNI_START:VNI_END Replace ``VNI_START`` and ``VNI_END`` with appropriate numerical values. #. Restart the following services: * Neutron Server * Open vSwitch agent Network node ------------ #. Install the Networking service OVS layer-2 agent and layer-3 agent. #. Install OVS. #. In the ``neutron.conf`` file, configure common options: .. include:: shared/deploy-config-neutron-common.txt #. Start the following services: * OVS #. Create the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-br br-provider #. Add the provider network interface as a port on the OVS provider bridge ``br-provider``: .. code-block:: console $ ovs-vsctl add-port br-provider PROVIDER_INTERFACE Replace ``PROVIDER_INTERFACE`` with the name of the underlying interface that handles provider networks. For example, ``eth1``. #. In the ``openvswitch_agent.ini`` file, configure the layer-2 agent. .. code-block:: ini [ovs] bridge_mappings = provider:br-provider local_ip = OVERLAY_INTERFACE_IP_ADDRESS [agent] tunnel_types = vxlan l2_population = True [securitygroup] firewall_driver = iptables_hybrid Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. In the ``l3_agent.ini`` file, configure the layer-3 agent. .. code-block:: ini [DEFAULT] interface_driver = openvswitch #. Start the following services: * Open vSwitch agent * Layer-3 agent Compute nodes ------------- #. In the ``openvswitch_agent.ini`` file, enable VXLAN support including layer-2 population. .. code-block:: ini [ovs] local_ip = OVERLAY_INTERFACE_IP_ADDRESS [agent] tunnel_types = vxlan l2_population = True Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the interface that handles VXLAN overlays for self-service networks. #. Restart the following services: * Open vSwitch agent Verify service operation ------------------------ #. Source the administrative project credentials. #. Verify presence and operation of the agents. .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ | 1236bbcb-e0ba-48a9-80fc-81202ca4fa51 | Metadata agent | compute2 | None | True | UP | neutron-metadata-agent | | 457d6898-b373-4bb3-b41f-59345dcfb5c5 | Open vSwitch agent | compute2 | None | True | UP | neutron-openvswitch-agent | | 71f15e84-bc47-4c2a-b9fb-317840b2d753 | DHCP agent | compute2 | nova | True | UP | neutron-dhcp-agent | | 8805b962-de95-4e40-bdc2-7a0add7521e8 | L3 agent | network1 | nova | True | UP | neutron-l3-agent | | a33cac5a-0266-48f6-9cac-4cef4f8b0358 | Open vSwitch agent | network1 | None | True | UP | neutron-openvswitch-agent | | a6c69690-e7f7-4e56-9831-1282753e5007 | Metadata agent | compute1 | None | True | UP | neutron-metadata-agent | | af11f22f-a9f4-404f-9fd8-cd7ad55c0f68 | DHCP agent | compute1 | nova | True | UP | neutron-dhcp-agent | | bcfc977b-ec0e-4ba9-be62-9489b4b0e6f1 | Open vSwitch agent | compute1 | None | True | UP | neutron-openvswitch-agent | +--------------------------------------+--------------------+----------+-------------------+-------+-------+---------------------------+ Create initial networks ----------------------- .. include:: shared/deploy-selfservice-initialnetworks.txt Verify network operation ------------------------ .. include:: shared/deploy-selfservice-verifynetworkoperation.txt .. _deploy-ovs-selfservice-networktrafficflow: Network traffic flow ~~~~~~~~~~~~~~~~~~~~ .. include:: shared/deploy-selfservice-networktrafficflow.txt .. _deploy-ovs-selfservice-networktrafficflow-ns1: North-south scenario 1: Instance with a fixed IP address -------------------------------------------------------- For instances with a fixed IPv4 address, the network node performs SNAT on north-south traffic passing from self-service to external networks such as the Internet. For instances with a fixed IPv6 address, the network node performs conventional routing of traffic between self-service and external networks. * The instance resides on compute node 1 and uses self-service network 1. * The instance sends a packet to a host on the Internet. The following steps involve compute node 1: #. The instance interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge patch port (6) forwards the packet to the OVS tunnel bridge patch port (7). #. The OVS tunnel bridge (8) wraps the packet using VNI 101. #. The underlying physical interface (9) for overlay networks forwards the packet to the network node via the overlay network (10). The following steps involve the network node: #. The underlying physical interface (11) for overlay networks forwards the packet to the OVS tunnel bridge (12). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge patch port (13) forwards the packet to the OVS integration bridge patch port (14). #. The OVS integration bridge port for the self-service network (15) removes the internal VLAN tag and forwards the packet to the self-service network interface (16) in the router namespace. * For IPv4, the router performs SNAT on the packet which changes the source IP address to the router IP address on the provider network and sends it to the gateway IP address on the provider network via the gateway interface on the provider network (17). * For IPv6, the router sends the packet to the next-hop IP address, typically the gateway IP address on the provider network, via the provider gateway interface (17). #. The router forwards the packet to the OVS integration bridge port for the provider network (18). #. The OVS integration bridge adds the internal VLAN tag to the packet. #. The OVS integration bridge ``int-br-provider`` patch port (19) forwards the packet to the OVS provider bridge ``phy-br-provider`` patch port (20). #. The OVS provider bridge swaps the internal VLAN tag with actual VLAN tag 101. #. The OVS provider bridge provider network port (21) forwards the packet to the physical network interface (22). #. The physical network interface forwards the packet to the Internet via physical network infrastructure (23). .. note:: Return traffic follows similar steps in reverse. However, without a floating IPv4 address, hosts on the provider or external networks cannot originate connections to instances on the self-service network. .. image:: figures/deploy-ovs-selfservice-flowns1.png :alt: Self-service networks using Open vSwitch - network traffic flow - north/south scenario 1 North-south scenario 2: Instance with a floating IPv4 address ------------------------------------------------------------- For instances with a floating IPv4 address, the network node performs SNAT on north-south traffic passing from the instance to external networks such as the Internet and DNAT on north-south traffic passing from external networks to the instance. Floating IP addresses and NAT do not apply to IPv6. Thus, the network node routes IPv6 traffic in this scenario. * The instance resides on compute node 1 and uses self-service network 1. * A host on the Internet sends a packet to the instance. The following steps involve the network node: #. The physical network infrastructure (1) forwards the packet to the provider physical network interface (2). #. The provider physical network interface forwards the packet to the OVS provider bridge provider network port (3). #. The OVS provider bridge swaps actual VLAN tag 101 with the internal VLAN tag. #. The OVS provider bridge ``phy-br-provider`` port (4) forwards the packet to the OVS integration bridge ``int-br-provider`` port (5). #. The OVS integration bridge port for the provider network (6) removes the internal VLAN tag and forwards the packet to the provider network interface (6) in the router namespace. * For IPv4, the router performs DNAT on the packet which changes the destination IP address to the instance IP address on the self-service network and sends it to the gateway IP address on the self-service network via the self-service interface (7). * For IPv6, the router sends the packet to the next-hop IP address, typically the gateway IP address on the self-service network, via the self-service interface (8). #. The router forwards the packet to the OVS integration bridge port for the self-service network (9). #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge ``patch-tun`` patch port (10) forwards the packet to the OVS tunnel bridge ``patch-int`` patch port (11). #. The OVS tunnel bridge (12) wraps the packet using VNI 101. #. The underlying physical interface (13) for overlay networks forwards the packet to the network node via the overlay network (14). The following steps involve the compute node: #. The underlying physical interface (15) for overlay networks forwards the packet to the OVS tunnel bridge (16). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge ``patch-int`` patch port (17) forwards the packet to the OVS integration bridge ``patch-tun`` patch port (18). #. The OVS integration bridge removes the internal VLAN tag from the packet. #. The OVS integration bridge security group port (19) forwards the packet to the security group bridge OVS port (20) via ``veth`` pair. #. Security group rules (21) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (22) forwards the packet to the instance interface (23) via ``veth`` pair. .. image:: figures/deploy-ovs-selfservice-flowns2.png :alt: Self-service networks using Open vSwitch - network traffic flow - north/south scenario 2 .. note:: Egress instance traffic flows similar to north-south scenario 1, except SNAT changes the source IP address of the packet to the floating IPv4 address rather than the router IP address on the provider network. East-west scenario 1: Instances on the same network --------------------------------------------------- Instances with a fixed IPv4/IPv6 address or floating IPv4 address on the same network communicate directly between compute nodes containing those instances. By default, the VXLAN protocol lacks knowledge of target location and uses multicast to discover it. After discovery, it stores the location in the local forwarding database. In large deployments, the discovery process can generate a significant amount of network that all nodes must process. To eliminate the latter and generally increase efficiency, the Networking service includes the layer-2 population mechanism driver that automatically populates the forwarding database for VXLAN interfaces. The example configuration enables this driver. For more information, see :ref:`config-plugin-ml2`. * Instance 1 resides on compute node 1 and uses self-service network 1. * Instance 2 resides on compute node 2 and uses self-service network 1. * Instance 1 sends a packet to instance 2. The following steps involve compute node 1: #. The instance 1 interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge patch port (6) forwards the packet to the OVS tunnel bridge patch port (7). #. The OVS tunnel bridge (8) wraps the packet using VNI 101. #. The underlying physical interface (9) for overlay networks forwards the packet to compute node 2 via the overlay network (10). The following steps involve compute node 2: #. The underlying physical interface (11) for overlay networks forwards the packet to the OVS tunnel bridge (12). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge ``patch-int`` patch port (13) forwards the packet to the OVS integration bridge ``patch-tun`` patch port (14). #. The OVS integration bridge removes the internal VLAN tag from the packet. #. The OVS integration bridge security group port (15) forwards the packet to the security group bridge OVS port (16) via ``veth`` pair. #. Security group rules (17) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (18) forwards the packet to the instance 2 interface (19) via ``veth`` pair. .. image:: figures/deploy-ovs-selfservice-flowew1.png :alt: Self-service networks using Open vSwitch - network traffic flow - east/west scenario 1 .. note:: Return traffic follows similar steps in reverse. East-west scenario 2: Instances on different networks ----------------------------------------------------- Instances using a fixed IPv4/IPv6 address or floating IPv4 address communicate via router on the network node. The self-service networks must reside on the same router. * Instance 1 resides on compute node 1 and uses self-service network 1. * Instance 2 resides on compute node 1 and uses self-service network 2. * Instance 1 sends a packet to instance 2. .. note:: Both instances reside on the same compute node to illustrate how VXLAN enables multiple overlays to use the same layer-3 network. The following steps involve the compute node: #. The instance interface (1) forwards the packet to the security group bridge instance port (2) via ``veth`` pair. #. Security group rules (3) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge OVS port (4) forwards the packet to the OVS integration bridge security group port (5) via ``veth`` pair. #. The OVS integration bridge adds an internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge ``patch-tun`` patch port (6) forwards the packet to the OVS tunnel bridge ``patch-int`` patch port (7). #. The OVS tunnel bridge (8) wraps the packet using VNI 101. #. The underlying physical interface (9) for overlay networks forwards the packet to the network node via the overlay network (10). The following steps involve the network node: #. The underlying physical interface (11) for overlay networks forwards the packet to the OVS tunnel bridge (12). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge ``patch-int`` patch port (13) forwards the packet to the OVS integration bridge ``patch-tun`` patch port (14). #. The OVS integration bridge port for self-service network 1 (15) removes the internal VLAN tag and forwards the packet to the self-service network 1 interface (16) in the router namespace. #. The router sends the packet to the next-hop IP address, typically the gateway IP address on self-service network 2, via the self-service network 2 interface (17). #. The router forwards the packet to the OVS integration bridge port for self-service network 2 (18). #. The OVS integration bridge adds the internal VLAN tag to the packet. #. The OVS integration bridge exchanges the internal VLAN tag for an internal tunnel ID. #. The OVS integration bridge ``patch-tun`` patch port (19) forwards the packet to the OVS tunnel bridge ``patch-int`` patch port (20). #. The OVS tunnel bridge (21) wraps the packet using VNI 102. #. The underlying physical interface (22) for overlay networks forwards the packet to the compute node via the overlay network (23). The following steps involve the compute node: #. The underlying physical interface (24) for overlay networks forwards the packet to the OVS tunnel bridge (25). #. The OVS tunnel bridge unwraps the packet and adds an internal tunnel ID to it. #. The OVS tunnel bridge exchanges the internal tunnel ID for an internal VLAN tag. #. The OVS tunnel bridge ``patch-int`` patch port (26) forwards the packet to the OVS integration bridge ``patch-tun`` patch port (27). #. The OVS integration bridge removes the internal VLAN tag from the packet. #. The OVS integration bridge security group port (28) forwards the packet to the security group bridge OVS port (29) via ``veth`` pair. #. Security group rules (30) on the security group bridge handle firewalling and connection tracking for the packet. #. The security group bridge instance port (31) forwards the packet to the instance interface (32) via ``veth`` pair. .. note:: Return traffic follows similar steps in reverse. .. image:: figures/deploy-ovs-selfservice-flowew2.png :alt: Self-service networks using Open vSwitch - network traffic flow - east/west scenario 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy-ovs.rst0000644000175000017500000000107600000000000023137 0ustar00coreycorey00000000000000.. _deploy-ovs: ============================= Open vSwitch mechanism driver ============================= The Open vSwitch (OVS) mechanism driver uses a combination of OVS and Linux bridges as interconnection devices. However, optionally enabling the OVS native implementation of security groups removes the dependency on Linux bridges. We recommend using Open vSwitch version 2.4 or higher. Optional features may require a higher minimum version. .. toctree:: :maxdepth: 2 deploy-ovs-provider deploy-ovs-selfservice deploy-ovs-ha-vrrp deploy-ovs-ha-dvr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/deploy.rst0000644000175000017500000001137600000000000022336 0ustar00coreycorey00000000000000.. _deploy: =================== Deployment examples =================== The following deployment examples provide building blocks of increasing architectural complexity using the Networking service reference architecture which implements the Modular Layer 2 (ML2) plug-in and either the Open vSwitch (OVS) or Linux bridge mechanism drivers. Both mechanism drivers support the same basic features such as provider networks, self-service networks, and routers. However, more complex features often require a particular mechanism driver. Thus, you should consider the requirements (or goals) of your cloud before choosing a mechanism driver. After choosing a :ref:`mechanism driver `, the deployment examples generally include the following building blocks: #. Provider (public/external) networks using IPv4 and IPv6 #. Self-service (project/private/internal) networks including routers using IPv4 and IPv6 #. High-availability features #. Other features such as BGP dynamic routing Prerequisites ~~~~~~~~~~~~~ Prerequisites, typically hardware requirements, generally increase with each building block. Each building block depends on proper deployment and operation of prior building blocks. For example, the first building block (provider networks) only requires one controller and two compute nodes, the second building block (self-service networks) adds a network node, and the high-availability building blocks typically add a second network node for a total of five nodes. Each building block could also require additional infrastructure or changes to existing infrastructure such as networks. For basic configuration of prerequisites, see the latest `Install Tutorials and Guides <../install/>`__. .. note:: Example commands using the ``openstack`` client assume version 3.2.0 or higher. Nodes ----- The deployment examples refer one or more of the following nodes: * Controller: Contains control plane components of OpenStack services and their dependencies. * Two network interfaces: management and provider. * Operational SQL server with databases necessary for each OpenStack service. * Operational message queue service. * Operational OpenStack Identity (keystone) service. * Operational OpenStack Image Service (glance). * Operational management components of the OpenStack Compute (nova) service with appropriate configuration to use the Networking service. * OpenStack Networking (neutron) server service and ML2 plug-in. * Network: Contains the OpenStack Networking service layer-3 (routing) component. High availability options may include additional components. * Three network interfaces: management, overlay, and provider. * OpenStack Networking layer-2 (switching) agent, layer-3 agent, and any dependencies. * Compute: Contains the hypervisor component of the OpenStack Compute service and the OpenStack Networking layer-2, DHCP, and metadata components. High-availability options may include additional components. * Two network interfaces: management and provider. * Operational hypervisor components of the OpenStack Compute (nova) service with appropriate configuration to use the Networking service. * OpenStack Networking layer-2 agent, DHCP agent, metadata agent, and any dependencies. Each building block defines the quantity and types of nodes including the components on each node. .. note:: You can virtualize these nodes for demonstration, training, or proof-of-concept purposes. However, you must use physical hosts for evaluation of performance or scaling. Networks and network interfaces ------------------------------- The deployment examples refer to one or more of the following networks and network interfaces: * Management: Handles API requests from clients and control plane traffic for OpenStack services including their dependencies. * Overlay: Handles self-service networks using an overlay protocol such as VXLAN or GRE. * Provider: Connects virtual and physical networks at layer-2. Typically uses physical network infrastructure for switching/routing traffic to external networks such as the Internet. .. note:: For best performance, 10+ Gbps physical network infrastructure should support jumbo frames. For illustration purposes, the configuration examples typically reference the following IP address ranges: * Provider network 1: * IPv4: 203.0.113.0/24 * IPv6: fd00:203:0:113::/64 * Provider network 2: * IPv4: 192.0.2.0/24 * IPv6: fd00:192:0:2::/64 * Self-service networks: * IPv4: 198.51.100.0/24 in /24 segments * IPv6: fd00:198:51::/48 in /64 segments You may change them to work with your particular network infrastructure. .. _deploy-mechanism-drivers: Mechanism drivers ~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 1 deploy-lb deploy-ovs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1070428 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/0000755000175000017500000000000000000000000021744 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/NetworkTypes.png0000644000175000017500000021603200000000000025134 0ustar00coreycorey00000000000000PNG  IHDR ~l pHYs.#.#x?vtEXtSoftwareAdobe ImageReadyqe<IDATxݽncG`IP{D;X,lƤ:85t qpB 4毁66n`}ZtCW<`fI*}ۗyvm)16 ~?nO=[2aσ-[SwstB1{ 67гSs/mCU!عk-{| n=MBkjր+:Sn?wMOM;"`炯-[^ = f +@^wp48u|s,xw3U* PO_O;:w+@[ZzuoG]zhOK__ttM _E:W _fn}u+@<5ï׵]oVW¯ m+ _~[+ Ч%n]]+ ЯR_O;M%¯]W`ïSuqfEWW ܶ6+ or_O;/r|]}iXƖ!/PNU]WS)ïw3* 9¯_^&EumbGי+Kz7sۢB'zvy;. V[KCB%:_^ 1v6C@ k_oo^(hM|=ﷆLs]^]WrYn6]^hiUUWr[8:UWpΆ^n(Z)+ͻЫnvf: д)*wC@ O/sܦ+9 >?=ͰP\N4|ziS -x~z|9wVB4m z}~z%O/}g8@__ -z~z|{ Zwg@_1 NWB| WB| WB| WB| WB| WB| WB| WB| WB| WB| WB| WB| WB| WB7ŗ_@1?aWqĒ+YpJ_IBaYK̈́]HE%_YE܄`9G𕫄]Ecv5BWZ';.W]Iv<x|+=@^l_! h`#9WF'N `3z 5&_;! Ťk:& š 5`^ /v8^ ?yv :r{m|u3s߿ 0o1QBv._# K WૃXIW@Dt|Sj:Wpqȵ^LJM@D..u2ו_<`.EbQ! 0 '0>X{9uOt{@1` P/I z F`?P .JƠ~C_@ZR ?uAׅt{e\LfD͆V:Ou!WzhTN8VC+__70Evn\X/i=:.+=,  kP . J4 5}rs=gAsH?_QػT_ׄ\huk'_@?a8/24x -k*О?͟߼1tCMpD WZ8.*y?1G :r~@fX`zc^%8׼wq:a>T-ʸ38VKAb(`s:0z_8RuG +@EOAԠy{T``0:ۚQ$ U` +T\cI ط2 ``1`p:u킈t*0U .`0L (D7*`/ l# -,-Q`O c| Էhm]$ U .9Ի?< E՟߼ MM֒K!^k/+Kk}r0gZ8W8_W| ]{O\䷴p<Ь[fOZR<sfk[WPk ŗ_V-Bndi.rkN{1>\~skRTOj95:_Wֹ3?|tչ8-TS :=:QI5k:Kp[s`c98% ;,?ii>c\+@d5Fk^{ĚȵܿmD>V|2+X0X!'?/ L V3PC=pS}]m1cԱœIR|EB@?D8zpuT:.?9 X׵@mN~ڿG0?< v%kŘpLW@΅"OLK#=je?Kwm\Ro=±KAs s:̇ӍٞqsKy+*}6,!<thM(WizEU8j(瞨s FDwQ PO8ʼm^0OQ #b!wWQqmpX+8Y/z|<|NXyTuFm}[3N.)0_):.}Ma7| `=5F$9!F^Spγe[ƽh9S{ۂ7$A. ~c,׍MϿWW)8a4%G2{Xk95l}?Dn(E s E-Mp㚚t_5cnn}Z3_P/a$tqv6>(KM9R0Z-4@uR>GNKZ(ҿ-Hm̙"S8Tz.B[@-S}Ãp>GW\ZQ:YO cV5zV0 _ }ƦN #Iت5xWJ0l9\V+!)Ժ KyMB5C6@,)diƼq1g=ƞo)la_|S<CAӞ.-r=_׷uyD3g>+H(eTU\3P D"gR0xf_ w2xt3)C5WE0L\^ ?Wz sF>B QNϑb3uƠGWs XBM9> <:5>)k,)7G2"02z%/씪8:BERcp/4K !1m:;yFx/ͷv5{jn5@?_iB4 $Kec+,8D=S=+p#0"5z$-t);`ٚ!$X.}hڱ>WKkV=-`9GU|) $Dq-z-Du>kr^zK4Xn}cyO^@c?>[5H`4! LksK{kS^7z:K=l1"?<RAW8@TRmy̔Cfͭs (MMZ=|IvJ8)^XQ1me[j+U),ЋZlJz|oKL$Jcn@P?|b^˯ Bf"O?t>%BV^:K{,k=w9zk<:H|}cpoPk%P%xzk7΁@4SEL0:A.A]S8qJ 9JQ "j CAf}x M|W Wz KƤAt9Qy-Ca_`'i+q" $JV @d¯@m?# !J6B@T~ ~4AW`lD&J#@49.O ,p+)~ P+=,p+,+)7  ) L +TZD%! j!`uoHA?m#Pn;V`c`@~ Hma {2WvQ`Th+NzI Gv~"<of}|eOL'0?.oo; hW~yz+( Aq}g+Np+?m&q =8_YD^ :p|4C="MhS[h@+)kDxtG2\= ?Bl Z+@;_ +w?iO m|e騹%%pJlX`oicA=Л./M?<*eu^q7 @smAy@7 ]4G_@:%ot{%҆Be9jVP+iE>JEթ W~+-m֘'@_ T] |/\l~&ntZYT |, \}#:8^ɹN7Xj :W՚ W n" yLWR-=]_EXM1 ,.@zugƤ+[FFBja Xx6t~t_sKZ;S\-c3P_Qca M .`_ Lt|)T2tT3R/}t|QWaXCt|)!:2zzv:@ @jDl+X$PίpLA0]CPzl# .Ev8&Z':1(1zg45F^WEqKֹ7}B@uo __!|X471r:vLA@t׵ .{רO:,# 2jzX`{C'R`Cmw蒎]9HE?u:W`! ҚK v'pԟbQρ_@|Q+"K v'`6@o7`qԦzۇ Dӷx03+@¯@)® ,#^ W 'Wz]ׄ`λ31M_]~B: zZ0&W)0_";.o `?Z>|e8PX_:ԧN!Ec C1ڡ^,!GS}AXIڣn\"I9tOb|kB65t|[zk&%tKbPm_ P+0|K#h_8ĥ|  :$H_b8$,%J^/j~W$?AB_1WBSt|%,Wwr]_ViYdҕ!!"S b$)̎PG@V-kz@R AHP*NM#>>Ο{~s>0-ofν3/{įDJJx_7`LW`lWR6}B5~Џߛ06+)^gc5^x@n-ydz哧8ܹ`ٳgncc|#qJhW AcB_玩$| 5D}>= +ayB 1_H_"ǁ7PcK@Dޤ֚W|䩅^y?+pL|%+hrK7%|b=835CJ(ޔ@;W/ ?S_ ǖ( 2S j΅?] rE~uwϺ :fΛӚq~-`&|%+@]KZB_g[{?oݧk_֬T:=}%}\zkZtWS_`8/]˭ǬFPrWZ5k=f+ГKAӞ8k~,1s=+ _薿xd% ˷έssquulВ7MS_EPwɥx+}p׬k뜹+"Myx#|k_䩅w)~=¥]u{{: 1o}[~1; w\3?#}-GVL{F*\[TCcv}5qG8/^2=hE\Uj:|`ٺ*أy8cK@ d˶|%ȵOU zZk ٙ |?R}-;@8r8o.ib+cۯYrњ⸶}_W/AyJq"|PTFr+v+pǖLA哧hb΅? k?`<&0$ +(Nh _Gmbt%Dm JG"[g^c+CLG`=+y# _3G } *!8J|Xl=+#|kLKh %&]xm [}\Gy>:"|`h?Aɩ=b#6ku0+pDȶ& ے!ү%^~\l1q5_Ɣ6EB1ؑڥh]-ݷk޴׸51kxl _>yj!榘Tuk_ە2nŵυǵc82*L{ئd|5zȕ;Þk<:V" 51[u2q90+IV@,#_os{>co}>uc8_ex_O?4 b O ?z]حR>C/"4pmbe='qw-O| yor5^ |@VW# _k߄Lww0d@lWRRi@-W_ |S |+I @ W0M @ W8Ox |e7S#_HA WPw-,&| +@rE @S&K _ ? ~"М`XB <F @+էF _C_E\"|S$9WB#C-8kѯߦ:T)<",4=Ypu`tWΉ] ?|3Q\'~P p"|r9Xį| 5+է@nXN{%1M!ޯug+@rW p x]:+@bWඇ/$%zqLD #zF$|HF :M~ + ~F&|HF J i脯 H iWL}F#|δW 1S__HA _~m~E @ W>C %g|'[`W KV H Ԛڟ'xD@fW@JG׾F Oo~>*r"X +@cK&z'|ŋٳgñ1(j%fN1S mڜ%CcxmppL(s"XWp?v^_hݚEoacc p뵯#2Mϯf}Wri"X +Em` tM:OcÝ@Q[/ֈOy{]﯑Xk}_)<7ēi(ZЕMaW#׵[ |ة)k8#K@oM495S^{^/=oZ/sL|{~`1vkV=E|#hbKfD2׵$X4+ʭ8ZLvwkb:|׻ߕjٖSr=Gy>8Gjϑה@4kL &|d#Kem5%זרϏү{o9fv"XG' ,5jjxXˣ˞isO 4Ŗk^u:5A2(EZ{ѪJ?8"yc'z`k]"X`ǖ5a3_#=k63s[!kJ!zj:?@GtgϏ ֟Y2l]2v[l鏹ZHcz͏zMYFrOcKWk="X+]#Y}kd'j-k959>Gxm}̙_SJދ^!4Q&0 S^c* ` /+ݨq jeRaٖnmnsU<9uu)-'QCi}Wz쟋\Ly͡Dk ,%| "cOa`d?\5}|;LyK 5~=VɅ8ٍ4gZΑz=2c5}x>` G )t"P~ ?{ddwԚ^7qIߺ^S.1>M9f"0P[I'29q Dy+Z{F= L>_ r= Rxϥ5;MyoD+@B!2h:Gh/.i1W= t-GDx |Ę^_'L}2u{(z E`+A:_&Qׇ5~e, t`Q:tLq`kV#b< rLPGz?2ϯzd M_~۵?'Ьwq,^u=(FgM9 %:Αk+lkaM8(\&DDpǖ蝸axozJ).UspϚ{MY6''O(VN)r`z Vg{O>uS'[)='@*BW2bՎ /Va۽,>#aהVF9G 4G/S^ S_n;oA hY )t z&\8p_.Ej{By:>G5% k͔׌fq~q|{@ "t?o~яkڿ?*v;⹇Tlujfsے() 51q`r-В)wW^d]k9^vG9V%is*N羾4 oMK:=Cc&m:G&y8DWS]GS2pk14RK?הa ^}n?'L7 TcL}% Z;j#(ձ$X _b3+"AV8mkLo|,[#G-kݿ)GI\ ^||{Rq$ϓY @F o?p釟}澼էNw#/ډ6[Z@m qّ1p;V9_Sbj}.3&yE-159G?z] /__ńRG?v(oi*,F&w. @Y"י2uc_.nY#8P|z `B "t ]S^׹Z#8l ,Q_`PBיЕQQLy--2~8`H \׋/,Ќ)܏Q5ίɞY ф)&WS^˱>y`N ΄ڵٳgʔWK @tWHH:kK'^LymOHDD$|3+\6W+P' +$tBiW+S фU %*xj͖@ӹnDA ΄P8ė׾VstYG"X |ʄ3+'x%۪}~?|\P ΄yEѫ5#c6ñXt+v\gז"SY*3N}u>l=`\WXI*tQzp5]į֨I!|+D3k;k Ly%kɒu1`\WG:s@QBy#=koS_/o֫ W&t ]S^{v-J4u4zדO~+o05|[w_|VS^N$!嚇+\"|kBW+@Ϟ?nإŔ^YW+k_u&t퓩)xaVǯ? [%t ]!~3"Ny ^a;+[ _HC: z`kf\T|٧:v!W+{ _K*rmeTri$W~8G @WD3\]ךqlUu+_$tz7Zgz`OѡeV$JkW2T$t Z@K#GѵЕ(d&|(H*`kێ,Dd'|A*`k `9gdZ$xWD3ᮉ'q^L#}Y y ^W3!;BlWz%|%t: S`'"XZG}?,ȕ9WupwD5Csmfs-z +%L=k*!eZXkGٙ H@3QPJDKב ^HI:@ݽk-'e-)˟S^HA* nE[ב ^I*{aFZ#=S^G_E |6c e`&xׄᄮ3Q O>@"8ǯ{8U o ]gBpc`;į׻ [@qBW dgv@"ضz`3ގ^c' nBW ds@ "vOG%:Uj+ _UD3 qMDf_E#ѫicW ]go?9`qlD*z@ A:@{zIN6WS^My`=+ N*|)l;"؈)#t{`[Ly`+tL:==C9^My +tD:0#ˤVuf+_ 1L6 -"-^My<+$"t}vQS_W@`BW Gu`akU*z.+!r TȞhE{]kMW+@#Bי(/Dz-z!x^8"t Q{#{ l%VLyp<+T"t=#`=5{(IHՔW%|`3LO@6"Xz`3S^ń3a`?ho=K+z51_H:c^`"X2Fי)B 7 ] ՔW#|Upbh` ^K 0Ldvh?`i^^G^'WF&|u&7`925U +@'31@}}t+ͧV+x-^N U<ei)[Vj+<$|HB*ȸߴD`1 z'|H:I⼭Dۖ!S^?`W3a@?{Q{;S`'"F^~H ]gb>y"X&"XS^/?o`4W]Q=DʔWXN PU0~(ILy;\g"@M"Xz#x턯+ ]g{Y{C,ى^`+ Bי>FK&W(C U`k#%2+#|'t,C}CD (D4z:WkBי0^@1Ly@W31}}&u"Xj0 ]Gz&e/S^8W HDe+K %r hF&S^ +u&, ^5, _f31`e+'|#tcm v WC T#t om OWE !rȼ(Gb]gzً#G q _E31=s3E?{uDu^ >+pՇGh"FWhK "t?c+"|\g>`0@\#Xe@>Wu})6S^{_}ja_CBיOūo"XS^ ;+t@{nTS  ̭d֔W}{h&Me@?uCzN6S^?WF:<`<}'O-0z+xe'HИux|_` @EW&t;=}7 ^iPp d6XK؛ S_,)0&+ t0}_a@oįC֔W02|a"D@ ]}`پ߰gB\GG'W%r~a?į_~@E _uq{{{F"~Dp<+K6@#>~^S $k _EQ@4z@ rW ɛ@kWW^XN  ?=^˼9E q `r`=+ 7tON/,yOh&|#k7>]" ʵ)ߴk7^! յrߤ7\wM{Iq+t7=TIo@k u _!9߀@7$g'%`ϵ_!)l@7)g^{)r{> C z{Fs:￰ϞYH@ WH7M {J E_I _"Pzm_ з-=x= +\w]F }%%; ,$bhv:`DWZz"`_ pǖAvR+ A5guWZ-[as x8O \,'|F|p^]O B>{p _`>}8w8_{Kۄp P`7l'|`?C#|%Be> p>!| +T(K *a`k'||3xu_a ` _Q`=&@]W 'w7MdgTVJ63a lx.!d=k\Fd $%sN@ƶZ*^u| ,c P?WȀ$2&b&9-fq&" Y-g Y Y @_`r 4G s@+RP KW { gA"(4@h+Tb @ZKRH?S+@)%R|)$%W@.ya'@&\i PW 4q֡ 8A,ȗ_nl EhUJ"(T` ]vS0+ qYJG ?AȯPzKO76l F(pJa`) S7;_:nR"@~۞ v4(,S|谘[_:) 4~͎;<@_:uW" *д0 +@GP5) @m @U-ca`T"_WƴWFJ) \WWhNJJ- Ty/k)Ev*^:W0O'ٳe Y5jmA oWQ|hإzk> OxJɯȴWX+@b<9~DU~u wt)*va xNj$e#D0ſ7J_~2򿯿W S|(Pᓕ Q !P"K]96$ SNFe+($vaF^ErVgY?%bRv-)t;nɯȮ t[iaANuQ@5\;+9J]ȯ˓_YiȤ0WfQ|\†j?{;:</_=b+P@;@5nɯȬ8fr)9 ӡN;֔b++@bzU0b'zD@l r̯nɯ+\ f+m͹)l㶰]|p6^X((tX+˯{S5+*r)]WW&)d$vi`g{W}U5P"݌ )t+ɯ4[+*M[RM{6Q|@ZcO/xڣIZJ4I^ Q~](+""@<Ж\Js+@bKv*|՜ufYWJaoo޼) Tdͽ8_eWy. nS@ ؅ͭ^TZ[?=}qU$׊uU-1ZU>wW (4,va6- Lƺ(FG |pf nI`g?,A < :N:?%""P*S_zQ|hPiv V8.7)P" @~m:[;]%""r_:^i#W. ܻ?;5B/Nl)\{{Ə^gq>(kL^U~m+W 7)ʯ´WOF {jyKYpQ"MzD4e;7L~y\!dɯ| PԥW^i+WD/ ܽ7Lj`s:2=?>{&sFhE~R8uYkwYżB2*PWbܹڔ烲>*wkvw,#%f)PQ,׈u"zi @ ɶ_ȑ+K ؅?^>h45ux|ع֟x5lwJJ~]#.܊S!Z,:?,˔_Ka+@Ko߉7ج(v;,5%HG_bn͕-RN} _ȅiP?W5. T)`ng5r5.7BJbz5N85pZ+ʯr*@S_\Jv+㶰n}Y&n3KWD(]ϯ.j,f@L~rW @,W@`IK֌D3 JI`;Uˤ %Q矕.9u*ֹZk_Cꩯ+)2(WbnnK ׏8*󥑶а7~(|6V1Q _ԩȺ~5-JɯUSմWBإ7nOb߳u-|.sY`Bk%Z5C^hsȯqj?X@L}+Lz4_47*[U4XSRwƷQ (+['Iʩ @<,u\J(0߾(4u&^essg|>~|9D(NbW 9L}1:2&`N%9;AG`X͢TJ{I%TINH$@6sfpO̸6>C#@ !Ve[7##߈x#ylj_C`Хs>c}%lzȣJ@я/nɯC.j+} S- O3R;l^zٕ}ȣB_vKOq+/]tn=V~_W R~ D+} ^[zOϹ兝" Pm`yuy\v~AjknQ5pҞ* f_(#x(4pyo4'hMBoXB]A/|Q(@k8BfsXڵ/n] @F6j+} R瞛 /7 =Ak|ꪲ6uTGו@~-_'ˬai/n ҕ~ @L_(cJ)09sw\{@m.[Ͷ^z/>D_#ɕ_ _܊ȯ̰Ġ jy+Kgu*UszjViE~jX׳Kv2J ؿi~ qK~˯_Xې_ ]ز N)0\O- Iu%۬D0|ܗDͯ/nȩnqK~_Ka.+(Viaܲe#B͚ވoVZ$1NuEiNW⋷>W_ңQn*@]5pN-WnukfXrP'WhWB|˙ ,+Vmj[SA.wE{wzkVzW.:=(9 A/nͯ]V_,@y1_3K $WSB|d4oUf@ꬆՍSjz=!Et/D;ܕ"AV+ֱnתdrl[ڏ/qT ׌,@j+,S|:nJ,-h .5mjEދ.tt=An*Mڔ_[\z*tym v ]8ӓ7oNƏN_]nUm /I.`ǥ+PԴ/DL-$z:G֖_-R|ѯ ;WBLܴiB# N_ {][/Y*˙Ӗ֪ _'^=LNw 6DWnV_]}ٙvyyؕ+JKni{R|!֓_TdtRxt~M]kBljWQhNWz|@. rʩŇN tްDMػݵ;HN4ʩov!W~SʯXd6{_V]8w^9}^ jG&]Wt%ain]&f?g/bErfm+:o~ MkKCp_Wvk[5 wNks6) ]8xv3XCX @Nʤaࠃr_V"(;5ZT '6ŭc7Jʥ!X]kF \L NV ' vNJA%XxZgՎSiqeN[PS_SD~=q{~]8MKOqP lCu M׼xYv=Uͻ)Z~(4v d(N(gUnY&/>E 2 1Q|qƥPL!6*Ӝ00xig`=3X* Sz)e%Nw@_˯sj®|T"Z(v+wֽ˓`u]8 c%Bc1]ذaC2j賧u6lִיB Cߍb

鵉uj^]5+U/KeKbX _Ju'GyiSN* tNl4 f+ Η_ɌMmϴ3ֵյoUUOi [hVK}7(*BH@eBOJK缽OŵtTLvEq.]tǝwodRB)@7k#_܊ bVԟ_# k)SaZD!h: BcUY8Cy?9c p%/cۗ0;zƵ;<. G&{ _z9뎋<ִyKV'YvYմW .-OM395k=}M󖗆=ОFf(~~K4┳&'Ωn$g.O~϶ʯdϼʤUgۘsLXrl R|5[=?9s9oeV_MjNb[ W$Lʚ_ST)I8ȯ56?6-92@)teiY9sG.2'O4c_rug`qb1H* 6Cgryػ?7nPzd[xŽ WUrvr֙g%[_v|Vuӧꚺڍ9Y eWx,r٦'N^̯iM1 Jé ϯJMgϺޢs.Ͷ+P N&_&-gn]qӴ=𓺦 E6vr,n`D9 T@T8:]Bƻʺ+|Y5,! @y+Ј~]坱Ϳ"hECV3_Ba&8ȯk_ E[ ʑ.P$_,23'+$(2_.fi7鴰 ;ls{EYOSJ l Ɗ׳5K) ̧\8@v Ps~Msr]Z(/w+Q.˯qfϼ(m.NzedJh?_Fe=6m?!wT'puMje;܄1p05ˮdQǖWi?3Kְ]lr=+UhC94[ aQXHy&rɧ$trr'&bZ@멫[A^)YT0e헓O_/+x5ˮk7r<̗ ͍#WN:a9~dㆍY4Zf'vA3|=MLɪG'?9XȯQy95ޕiWLZ6WmK[BmSb,P-S =_Vxdq |cG֓߾/V8qk0['_*'ˮ+3%y+*;cI~O:ǭTHC'sQn5muד^fJV87ŭVkѦ3l˴WEOP|(裏N:#tT^@Zi~t+9HвZ2_ fMkkV#[25I WvwyGgҥ:#H8C?zPVUډSj=k_( v _C8ry>_i/iUPx(\"8Ó÷=ƚ{AӴѐfJV3V;H|@~ma~}G4v[k{iMdնtTnN ,s |o5:V~gggGߞJC,|نf޵SY $NϩI]_SjS~mǶʯ"nX' 3-@P"8Mo]IAEŀ[ 8)ѾBÚ}ndJe+ v/ɩq 6͕Eiܺrin/S(ĕπn.bׅɾoH7$oxwsܬia䬇4QpZظL;Au_Oϯd NӴ i]^{3oҫc 4*[a@uL|Z ^?V_eβ =A+Ե̲f߽Y@*4U^^% @~1]HXSWIm,uV \2L/o VJ?篾|ݨ ϩm40@(J̯_͟ASXΦ yn[$O[| ݴ &W tBJG׿%Cǟϕ( 4c}@~mg~}k~{5pLm~qK~"˘Mn_)og&!Ws~Glx^5 Ÿ]6ujgF֫4kmu;\|4i[J b^:a!<@wI3絷Dʃ]~Xb_,X<l.@Wz( ( c~mK~eʯ1jXk`@Ki=֏GnMtBOt}Vyi3_ _-_V+t>5ulմW`J#^,Lt_y?̳y^K?w]_mW5uM|>;ٵH~kVsJfO(F蔏󑩷~ۇ'~^+X*;ykk_v? 6Qh[a?^ȯ3.uj_s *vSSѐ[W({~oB>cyMDf;?)b$Pz˔"_6@4[V挵) (740ޝ|(<0vyx%L[K?e9kQx@~_ןlϯ1WuʘMNjrJʮS|z瞕偂~w߽}??/&c4ت2h4ж@F>*&^AsWb,*TOh2Ł̞{PM\Kޟ]}Y"cWl4M 8<2%_?|A~͍ak_ܒ_Wh3WPQ"رݟVW?σO)` ˔LJq m 0fO=5K_OkWԾ}?-iYkj_ɯ]֞{ʯ+t+̺@:ӟ԰D翠%EOWTi0,(o׎?k˯+t+jЇrOsOjyV[K/~ &~/j+XkJV1J +v[ ] prx(OY5-+]tDy +g=_^:^/ W zE?r IkڦeA?^6D򗙒UtʯLLͯ:v*ʯ| (A{>2~m+K?&iYs-!2?A.0`B|uVnW~*ʯQ|Z.,`ZVEZc%niT-? *ʯ)4OYӌ>tS1kZԇedy%xO>?zy7%S Pz5$VU~_SdU6R|*Dy`螏=[o/rž&6]x+ye%ZYXseSujXR$5Z. ֛_>a~WdU.S|*3Nh~MWU~KWJS|c s-N>[Ju^d,yڍkuZ[6 V ʯr)2+@{(Q[y".,:5 :jJva)Y ?lО Kvms~ DYU~m[֑IVR|Z%"%Z"75-tiY1ƍf B2%Ki z?ȮkZG_.g 9T~"W)~Ei5?캲V+W90ȯs׎m_W W)?~ӰDib_õ>+( k:*} PXJnclT%L"AZv4ȯ +@ )ܟW"5-EPצk$+ʯ@_jg;w LɺF4ȯ1dnXh)WS"D0oqG_M6* k{C%) ȯ+P5W | +)Ee) ȯ+gʯ@(0!ã)?R_ے_eWh?^P| W,%V)4 ʯ@^6S_P|Ke) ȯ+@)0K\dWhi@r@(XJ቟+ C^]b̯*̦ @0+7[$t ɛ㏉˔_2L}@R$:C|Y+dR|m,* 'T\+KڵD4*scJ+4*@|̉m+ʯ@h۶mKN;+V;x_A&K ʦMef_ "gM @id V~LB Va;+(Vm+* t;6U`We=Te @iӯ'D V~ #u *|o]w:v'>qoo,)@ S,Я|zͷ&Uˤiw_%XS|O?|>pKwaR| EӁnB`4MQY`6WiK; t H8^U ]n[m_ !m/zOo4 ހMv`ӥ$0@d_: t7/3-7?@uݘ ,fOOөHnu%XM+D;߂buP|hХou}i&(^ͦ뮻a-YIu[ù񆛓aA' 2^dS`7B+@K.|~,5\Wzk 0n/}.u[~6Q|hЅӷ.xج㴱irb8|zU,fs0gpM5ty\?`V+] *h]5ҹqӍ7+ȧ/2hid99x%4ԧ>))_M +@B U0b;P_6lzPL~[2,&yW3@_"Tp5%1nKNaJ]ϧ[/|g1peV-o=^`wC@+ ~ *h]}c?-ϚXi?@>]ܥl]QXx9dgiX3w+qkJj똧Wd[¾?MӶn[ە܆k3#Q|@RUW^t V}^uXJ+t~\r٪}1krkdތ50և~PZoZhe(dve [蔷ũyQdW_ݧu!ϵ#E3϶?ffȿum-_TpŕWZw,@E_=}N4MAoi^^3V/^s,MH,l){T&?U059k 9:)t+@uW(OպFԧ>? .‹K|jlk֯kU]ӟNtr9GmU3hk?y5U/] =5 ]*غ Wc**[2{u7^C)4O/ҼBzW-`?%X=v^ilh-ZB UjUf߮|N2۬}RtP:5x +@MB .tt+ L w ]?u^{h=@@_adٍj98sU׎3@+,)L{"eզ/S{c]%Td|~}lW6y-Nx +@ B .dϓeō:z0Z+TMH3uW]y~K|(>k0şiqeB:ʔGuM8g'߁X:sߧ&SMBK_ri t?x%k]<8NS[ |Zrbh=DCa+]u5WنJ0tG%.-n4{rfե6~cڦ9mby] =etC ./]>6p,0hn=Y w\b.:}hs>}{ޛ[I(2֖C#^W^3@(MwBL,򼢅C뉭(XkmQzX>p{х{شAN 皫]ӟ3cU(S2 M{{;Cfᮡ3n;fc/]qrs3  ^]^L!05~ 94}ö7kL;QzVt&^p^48uW_u˵Q|Lj?D=w'yoeCӴZ٪_W\ta`ɒw=Ics}<{zy,ϳϠ7(,Hkk;6+fTnO]zwwΰXӋHMɊYuxSU9J?}\wUϱ.'3 @Xϱ }}`tՀg͢Zc=].g>NAS[@i|WnOT·iVɼd.˯G< ЕU}鶳Y9y7q}>}om+@ K+.6Qhf6d>Pvρ8(=VOw_ n\λ [^5zoyQ|eah"}N 6NJP|CS R@^Y |uxTW,f>kl)V~Ow̕sWju [-G`OxESq~'@_ ]*xwe jM]^ɠ#`)^qr sSʯ@im翽IöY˲mX=[+G;G?' @)QtRB'],nPWB +wZ&]q;ߟ$?gׁj(z_.ӷY$6eZz(nk| J#es|BP|QwݣeiWv]I5_~hJ=wyWOc >Co^Uյ+Fx  P0MUlW)B w-~dҦl.QG`Lς@yW2)V5j3.ɨ]z?ዟowI6mRf2g\߉i>o^7_ 0&toRA.U_֭WޜGlyWMsݙ̊c <wb40UK/>W_/qKNL_ /dwDo`DNZ֭Wv#*NQυRs>=묳 Csgଘ_T/dzS%خa5@n DM("ܧʯM..tߖmot vi:aU]rҥߋhw>rYI VqsדTr[Gyh9=y%Pc;@x@o>ַ&/-uyYe^1kb>@gnI*СM a%YDigSiS}2Lȍ<ߗY<Kz]Qɓ>CrO?b9Mu(K/|Qtеl3-J' F3\tr(Mv}|hVo{(WWBO9s;ܓϝڢcuM]Mn^rS_&*POC:㌷C+5: |H#`/ /&7en+@-9)Gͦ0L< Bg}Nr9@ZSk5m`*EK/ٺtd%߱`!T>=}d3ɞca1Dl–iTmfʯK ufC`E E IU8UUeX+y>}?)tYRB.gdg1Li>~2aZ)} y*l6z瓕_/K~WR5l'O=/Do2뛶ަm**\wǵo﫿]Q|:+-[J:묰N ⭣K m.: @&Mi2V`M+N`*V}2EeeML}m}^ tN3lt[+25zBc뚪UqAE[]Q,ˢ4 .SNΧ)ٴi3WKư|{]tI[q46;^QىE^O۝';Hnl{<2^~}G}կ{+M 8S06q_VxC_V7PTfy۷Ug_X:J1Q[v 67o]r\v;l}Ԙ /]{ m؉'tIAb號qӰ+rV`k L{BOܟޫ޵}yרP@k.Ɲv`XgO?zZ^giEKF/6;n:}ͦNm9]'ov[f??L>bmcK[g-sB(P~U8`^UdCOO87ʔ)51wdE[ֳaeYq1y9x,&cs-z JC&|12&6M®g,+ՆҜV\f>^eZbb20W&](8KM||Nռ* [^I*Nv_O5ִ&VxM>ʧGu4'MFϰ] bO8Pe%FAVxm#eFX@B 6n8s]%CdXxZ`cj卺xfMy]){lV~.c6 Oє% [{ސJK c?wkT8ݵb{woV; Qvҿ=!ɨ-< &_B^\4_̄*Zvw @_ʅ.f>y eBkObj:ClбZhjp-;+uebV^2tz=BhUx8%X YUiQ;lc׹։\}U^_J.y䑣Fκ9Ģ'wD?*B؇9EYhu"y\X TV}珮G Ʊ/85u¬ [g3hSls.Y.B^?#ti:y*\tT($ծgP뉭~}}yԫn̦J@{չ眛s9g0ga\ZKXL;n׳u @+9M0@x&zᇏT0K<*t׼D?޵a.]~mtǝwtzgKEG &go9{|3-';Ic]e"94ϺԾ^+_@*dMEj?W𜳅n/*ʤ]D5ӵkVre/Zܬr?Z֬7o2_ 2f٤A _JA[S-[ E)7Qx(@_`-Ov!+f\}}by Ӗ,Ј6o~[ׯP@E M}f9%|ǻ.g-t k;@7?/w y)tI65m۶mv@ @Lz`3W85ɝowײ+Faz G^au޲jxN?m7AtЋ_OBojֵ5*@S/nvЄ븷{~r޹%sLs9")E&U ⳣ^(f> 2C.-#ԤЅЪ e_ɮ~c;ڶkөSO=59S 0 nvN+\Z,uYVxUz˔HQ|Vմs>79s_yKUӰKNg1fzl KjUdN9䔓NIN>Ra/8%/lΌ ),EMcmpGwG5 'l[^ }nJ<_*K*GV Q2D:]J@~6tI0e KW=//Mo6Ŷu}eӀ9nVjr=U ƶ @<2ɶm ʜ&&k] =˙[3rf3ޒbOvkZd=IL_^WiMjJ׾O =Us`Ox|qƩA4 abdqM%^YU;H6/(,N.27o>#|i\R^Y AV@Af׺KNnwp^Yuq n#&b75y;qk*v/wƞKb`5ʯlWSNtzrڦӒM6r_|k:1!zeJZ[vҥn t͢g+qt9&Pޭ,.Zj]ȧR xWmtSNMN9UW^_cǮUe ,Ǜ**ji_˔=')e>5S4㘣N>Bnᷞ4jYȧdϐȤUsM{BEQ|z!+r'&&_#[!vtVĔWn*{-,sǝw$~.]#J<Ïȏfܺ~ ǎQxEMA蛓NX.~[ 8`n_ڠד?m5[Afڵ6^HXx7| G~d "em:ƙ׬٧ʧȚ@[eœm۶@- L|js׶ۦ?>9~dㆍ&'_>J 73`-_?xtgRM.a㮚Z@jw1UkLyy tEualoz_{6Ѓcs] )1j 1g,g*QL] 0q; //)Z jiyXzeҫRAM;(\WeN])o]ov=!0|lJw#@>햢3@h @LE3}ѣsUX'zke]a~S: ([*u}q'P`_/r35\eng3C5%v({ʤME,(R|jsǝw$пZGy_yqU5( Im}XfF^ReP(R@Pԕ^||l9Zo-2uO^(-b*ʙ@UY6mT7@_Ft>s=w ~uc]ϸ6MjT&JJUf:XLyڔۜ_W{ϞOa=5؋wrX Ќiفᥭ=CIy!;nrP﹭|Y\~6"|gM+%sW0 6N~뒃_TfkkR7-r٦X*mW4y=̳K[_~u7im>{Q{5˦i<ٳ66Kc:@_ N&f^7V~_}7sij rkF ~M:5;p[^nH{nù9hIot?{lb^پr'@_VJ 5~{A!*=_lˬE tK^.=wtG!n_%93O?3JuV5k],;[f{K ۴ tuQcb@f]g<;<־WUɫ^T9i:4e}Tz2smȹv6yug۬ 븻?z7O?ެw{% MJ u^lo{LsV>=a+=;_lKyA?\x bk۰{׳Z71I-)^O7'=ӣ{+{-g?ϕq)hd}+n}: O tj=Ŭ3.]'__n5E3{^zm-StBX]M](=ӣK[+=^헗%xZqpjj+!rrG~myakVk׌%Ʈ>|Iz%RnĊ 2``qk/2EQjS)9 ca'c &<  Ȧil#-z8'uN]{9P*N[Z|NfӆunOЪebs|b]x UOsOVz?"[לKIþrz?x.+ov˷K/mV/?T7l p3W2&hec)@HTe}L`?y{7=}?{aG/}9{vi ?Um \uEm_|pöRݿտW=W}]{uWΣXB*zecKIԟXyYv׮u{umo:?>>+Ww}Q^EOS^NMMqOi Էq`<_jꉓ@w۟ԧ W]VwŜuUZNwmM:/+xC <.嗾3uuϛQ;~ul;~3 6}hٳڟμ7-e{ۮ?vY_, ۰Yޓg{ԧ_}'^/˺)0K_<8sڥ^M{wG"۟qfΜε?WMG=WȃSoG/S98߿quLӮfںS^mD#̹n(u ; `Jl[ч}}G7?(i~립g}{]@ p𻏏'E%zMQhJޡ޻&?<\#ӧݸSd<$x?-{i})P뿶-W K/^ָO:>89m쇽"ؔ", ^ 6=l{}n¶)#=7n|Y"x?-g-(ݴ1*> vٺ‹į׮u{`ׇ׊`?.gNSꜢ5K=vc]?Cx0t*9p}"@@/]|˻U[0]nExfyړ`o~PN{̃\)Z^66Fz?/w}ױuA'_ZޜzS*^mM@V'ٳ?xlc}ޗg{vQKk#Dj<4l?]ul<ڱќ`5ߩ,ύO~x!ѫi4vXu{Gb\/"'x}eyN޸a̙,r+yN?u5}oCͣ;ulϏ8ce^O1̈́@fVߟKŮ^ćGFOv)yHڇcu~Y7?ySx{D~W OzeR\{Q3]"z5EKT@:ߢ^=~+7?8Fs`?.ڟֹ?5I5/@݄@FV3T^7/Et|xYӧov3=}6EK @]ҷ޾t+3~w.$7?e].uj&`| ؁:^*vi_(-oo}]s"/ѫUP@#eDuɩOvR۟ڟ̴W+//]~/=jG܎`oow;箭kЃ>Dhю`_xiֽ˘`:\V;mGN}7[njڶija _`W'dUsCo~xBOݟЃ^䶛%( /sǯ^|q[uzۍbXSM ] jSyMO -f$0fl'MwY7?y_xѫ)ZKݻ.a=-)ڛB@ @C $^zq2MZEyntgvD~FڇvO[* ^z奣/`޻k['nշv~\;5??R _(~i/}K#OwξZlb֠KqR"%%(=ncV\|b1{V}@6B؟ڟ@Ʉ@v.+mI`k3͵ozi'Zh eCؗ޾xb{!z۞<j |z&6|D״׍jc{Kݎ`&]?_e{Ƣ앑lA{׹I!j"|jl\^| _YU}۰yun|d5 X1^c:]0SS_^z0͕Dx~񆬦š%*>җ,Tn,r!w8zշz퉧۟ڟ@dRyW^:zv4]BB'oFh BxO~,ï#o>޵{5vڟڟq_Y$3D{qEgѬ??S2EKT_liqQ3MCaOM)_٥|/tDt~w l{kX Ӂr# IozW}*7??"Zrq{.qߗ]E}CXSۄB0%Ws6#ݻ߻&kۘ8I $%iW#$}J6+5EKT·]yl{״nutz)0HV'; q?ĉ_Ώ1)ZL}(m/܎`7gٻ2F|=B*OҎ`ygoNJ9T{'zj oqw:>t)%++d"oo`v`NzXQ>L} vs-*?]9qO_^慭5`+x߼x(e>UP Go`ۻ7SnW<W޾܅ɮg]1&5UTNFVw-Q콩}&%|Lz=uГN:qz֏ "J~۫v>5l)_~oHMv?׳}KոO[Hu,셗 `G/=wpO' SWRy׶GL{M!5{ګ>s Hk/o}+w]'=ZNUr%|)=UP@ il^ 6xG%OG>oOշO=4}#I A:l%`Uǟn~>OFP&" SqOm宺w??zm}ڟ+@)%3>3`gן®޴E ÓO7"~{ɓ]km]Ӽ~` 8I 0t^v[5_>^Xӯr#(Hs?܎`z֏۴x#gʬįp`&iFO|_߳:xMj +*c?|굣OFӻ?>?2jjo e26+҉`8z`>/#&]%z,EOFO6e2+1+R`~:D]6~H;]ܿ=kWߺs?D0g+((k?|ꛭ#}jZ>kcWoqO?ԉ{GxlۦvqDe#Od.w?cIWD?ټl{IwvWA2E#kz:_:I tGO=u~G6G]5WApW*8ܓLS R[1NW ի׎nwvǟ~vUT`/]Oi3c nW̥їs;=>ȿ) zw~#w?e_:t$']}[g?>G"*Vl aMKq?+@`ߞsBk^z?a1 !6cpW $^;`<ĖZȉ Aӎ`ֽ)l ,T&'߼# 쇗7?eVbLr]d9W[ _*f_W{\t"?B@i߳pK:'DH @aSdV|;I 0;[;"1^E?r%pg bvSl*x )*+U pL @oO/wZ!7֏0^0>,j%|`TΛ?j!,P +Qd ۟S@Y˅@DKƸ^M u}0#XAS+OX`/90#XA)OĮ@A F "R`Ec"X Wd+8bW B J @R`EN ̱)"X+9g/}.݂d)\]I;v"X$"X`)WvWr%x%7ۢ]'{"&| {VJ#x%G}m'" |Hؕ\ ^w] `AWr>Go曣.W F LA +93^ P~G $| 1C1_f x%wc ؇`BWJck)W^S^ԳWC _"Rm1$]"XMX/+O,0_E ^)EW:G:FXQX&t RWؗ")CnÐXL WH3C ?WJQ>DeP7+D psYA`+%ik}`%|= }A{PHR^o,@^4+I {9A%1RtE]k16̻_B}0j!<' zglk ,26,C  W?`YWJ`k\1"?x-${8+A q?`Mn'~r _D 0-+zI`R_Lw[M ؋XS_&Wr7uTk]cn[`_' AԶN[' g]k+ \ޱS`_w ezI>җ,@$WrĔT\'ڎ`^ 0|'~r_v0ud+9)w(~2m `3'z%W}N=h~`?50(ȁZ};}Θ {_h+_oVN$w.&s[vߴW U^n־sY46 e寜+?y>S3s 2mQgSXS8ncAs. `_*PpB)C:v)q_cq0(+=^IQS^躟wD@Ym뮩C6c:`ܙ Y3tE lql+b@Mytߚ02YKt< `Mrc+M d˴WRak],+T9 *Y(.+%^Y),`0)p0k`l+f+I dǴWb+]D/c#XS`MЃ5oSLw}TOs"X 7Bz@VL{en]k zƾaiN%v~{S;O3tmI,I [Z%}#ؔ>+_`)uI9mn[m}Iq뮯^`W ?5Ly-[1eؾ+uu׬{/弅D_ɼ+[ w!jUa2[?3'TN g(иn˝1oG8uvp|c5W/׾x,ܧ&| :ӡ7[ שn)>1~S^sșZ)~}S_+@C 2+n˞1o^7۾c3Luh8B[E 9\r?Ƕ/Ep%:'t_^]ڹ}-R]kAcƠ'yIqn^E.&BLڽ.ZsK(raJJv}Sn)rS S ^פ/))W!|yԿO5ke Ξcdh.c;@]LystzK- gs_ !|y瘉^y S`϶9/wůa)gqq}^5vU:Wv/JWȈ^󬱉^= @ߋ!}دW ?5(ԄSgjSQ:OR;mѫuB*a\y|~3V;"@Rz?vJ 5-1}& ;1.o?SklefW>n?b܇}n/~|j]Wc]BRW!x1ulW9nK+<7T}R-9'_^ y)֚ΗC_=,&z~,ؒs]~?b=ƺ' ϔ\Wqn=j-z k^C@-uX@rrcWRe4khWc ~ :w=6Ai/3nM+ BҎYJS)+gxn'bOhS[7_?Yd߯;j>fq^gMT޹/]cʯs}L/&9GC"lckm9vMyI q׮˞>M6n^ԧzO`o܇),}?ou}!qzUn 9y|>_w9Dsy:ܦnW'zOe׶4+%fts9{ߍk;RX|XcNSs=eiGA;5dH|4ǩ535=A1)(vSn9uC I'RD8i&z=A)>.waʿε߭1n9Z: 7]nXrcT{agWGM*0ccԯ@=˒?c|iɒ^cMc(@y?wهv~\ט1iVZ'S^2$tl|1|IN)}ny\v%:~sMmĺҐxo=)<Ĝ\)Lk:uKaj9SY %{ͯTtʰoi5#z]ƾc-su6uj92 1ѫ&z-9^e=&x o)9|0ukl;K}TJXqEyeg̨s;g@i*/@ PTjΒ1^}-,>ѫk>oD\bua P!uX!as9P7,'xǮs['VH7ݎ1B6s|ƬT 0'+T>pP9{ bkIؐ2{^9`uc"X|zMtbcՒir 6ߡ!jߏu9Be<ۿ6L^i=KOrYo+14)o/vh8$t\2rzS^׏c8a&7gi1os1ȑv PP"c^W׮/~@sɩcn;&6Mȝ@ `(Uɓ`sckSJq`2}kbh}c56/cn,$+P5|]}!G@5Do-Qd\Y @ TD//jDN˔5#|eoac߶dD/  /O=_ňl@ۭs)}k gWΝTM;-l%W`NW:&z `4D65 ^}_* ]Σ[Hk~X+ ౶֔ײעV_;^0LN2U]bHN.s]k!x6Fi`My-}-ji+E @ֆFmN֔/E`WXΣ[>:D9@D!o[`r}v֛o|)v _E]X p9@5M1^V;1S`"Bϱѫ)u4+W4tk;"CϮuלH5C/Zc!MW)!&|%0 o[`6c]#MKvAbNZZ?E؄~C@;zk-Ԅl/W3܏4vsF@^Dao[ @6;g)FNo֔5W[H"`D]lv~z @-@$լ; XpH`;?sS B$??vm ̢M+z]ZeP}|4656sakkC -_ $g{ϸ«X9bkkGd??vm @PrER`vsؔ N-L{4lȱS4S^:+"+2HUjS`!)4Lyj"|5^H}!'@|"Xrd+P#+ Z+90,9 N??vn^mL; qH`^\_3~ᡅ}+0&q lD| |e&9N}@m{v36H%56O~s{/[ `6WB7#ejΫNWpXb(H?ԲEx]9wI `&"o@/O)y0!og = yi"M+])T[S^^WH:"@\%L{1f lJk Ly]*%|eV9N}6?!/`ŔakS_ WA,3.^T `l\cڦ&Bpxmu꫸{!}'@+ 1)v?])Ak7WX7s9?ϴW uM Fn_Ԧ_`YW@L]+=5v7S^CZ ,OZ- T{ ;a+3l9Ny]?mNy R^M}_YLS_q{oA`jc"lęCؠ֔~dK1~t;y|ut{{4W؃cisiᕿMؠ`K`Myۄ6 K /7}\reqn}̥?v]'m"|-LG ܧ`}[{D9<֔kǒkS_>W8/s\ ݖ$` i}%ĕͯB"DͯŎ^Cc֡Z=_BmA>X1s۽k-=L{}ݧ>D\L|eq%L}m/P1O7Gx/!R;d l$>M75NyݴϚ?3_ 'sO}_R\k+0'k?~^8< 8L{8D]lDS66 )C·mk|?5}¿T]a`Lv^T_IBIS_ۄ "` R `SkkvGJůs'<s7](=|ގ&|`XNi^+}&Ô~0v-r!י%{s;c z0\MD @JR`)i0knsb߾s+I)~QڴWG,̬`ؠSojZ RZp˶l+-(1z l%DͯB8{Bkt]m)8)=K@ P=_?xw;?n];~gw~l;rۿV曽?o92ŔM15SMb~>+ _IRǮm!^0kC;1l6hb!lsd)۔"~ L`ؠ'cAj `"XS^B -W؏U ')A:l-DPWzak ~qD5-m ѫ)' _5@?L{R ]FWY˔>k!z!|LnD@+ 3V:l-D V׆a)A `#XS^E(W@z̴W 'KGcMt;Ŕ ՔW`nWI DD@r`kkS2ZmS_+K cj/w_1l6H=5uZ^_On׹fkH2 ̉A$"L1I!f̈D_t,6<"v/"~z^߲ Xsm l Hs^V^? +07+P [WA;1+"ر+k밳Kzz|9nnv_(0+2|w_/ߩk%صQk+ +P!?kpW(ൟ`9WZCזv~#Cгׯ8 +P+__ ~We _9DSӟOV^_^Z_D*|%|% ;k?+D26m`9cH^įW'|X!UB[Kj0+__Ez`yWƕ-B LJ$Wk/į`Nz`W iq*B NJ$ej_Jzrkݻ"oϯOEt [ؾuk =,N +^#zXWb6KX+,W̬),~} `mWvݠoHkv~`:]h,:-kW x+,4Kq;ekW"*|%|-,K#zCZD16.`%|%fs24įI:hk~ק"nznk}WkO DÉ^J$V`b +4DcUK:k?+S#zIJT"ظFV^8FJ4eҚX,`=_9W|Bz \ `!+V^bzz|9nnį_8Fz9+@l_>}p44m `w,O?;}g |!|]<X^/'zAJv"ؘ،!zOJDeښڇXK醇C"ػwϯE JJ `%xXK#]Ҳ NJaW:^%z"h#Xl},ǟ~v P6D95OZWWB:,y^ D pį `rG d$W4 LJcx!Lz9,@y^@D{_q~JJd? ,YM:?+P+v V۩U NJhi`b.C nl lV^b ]==>[7!D.G P/>88alݫZ6+a_՟WRhoR `c`9e^ڵ"z,uXi]!zW [k/ZyGJ_-PsZ:,x^u#!YyI yX|a5>K P0`X| ,,Z|%1˯ p5+xmh94; .+b<į"zWG-+K- ^W^W `uYV`Z ^̣a`e`z_{X|G.,P"kLWrJ[s+@^5'k "X 3klWJ` _!|%#fs2$Mo캺D@tBWN ˶;⫕W\DdfUKZkl"X`Ib׼DK qd`_#z%;fs2d qk\ ^ƞC"ػwϯZ=WuX|a<|Bײ^+5v1G{ 'kLİ@CZ6+B|c#)1!zIJ)^r_#\"z`*Wcvhj /+% _{ _&~-M įϒ᫕WDF\ Q{][cD٩V `=t +@nW(_!~q, 'je +s ͵{l+@~WJeUKZ,s`5^X1eZyOJɄ^:_`~W ~\ӟOWDN*|%|!x`mW(EeR,LG @W(+@Wj!|eD$~:L^rR., 'z 2+eh+xCJm^W_|^%Os`W @+@LWڲ p4TVb~Fh];F{+P& g2}_ab.({^Hh1vs{X27_!x~?w0; d& 2_N +<ad%ܻ 3|cV>@ Į0Wi>O %| `X! `#`e\==>a7X`JbWXWp;w AK^`ym!|.ӳzW0, IX+dsLrX P/+%J\LJcws{HE P6+G d"v}!t_ ~%+,@ĮP,#|}@"X|ĮP6(>D"| t`nd",1ws{(`}bW\X`NbWaLWJ'Xu!,Į9 #gJD ]5`CĮÄ_!,[BW ,M _vPq@@tT_ @~bW(~ׁį0B H \.~׎XJ#A,+F lY|ebK[#y\.k _GB,X A,G Czܵ# `]} 605#Y}8lDfՌIJH Y{=2.4+19`7R,8W @ _S _HA:16Ggx+D0RD tH:  įP/ᴄ.Z` _&&z uAW(^p>W30༄.j` _.$z]\@ I ,  kWG<᫋H70o=WoL:u]==>C<7`8B:DX|#|bzzv!{g՛ AMUqs ;@Wo>.' ^sJ /eN @DVB @mVF @VH @VL @ lrh_9I ZĮt _D Į2]9I`JPWf!`ЕK _Y"W |euX,I @ [G@WR5ͫWR5|WRqIq @ WR«r$D߶Z| +) _'a`iVp0| ZZV | o"ra @ GW,TjNV_9ͪWR8+| \mU-W6Qc[G@W\jL]u`tj.iPkt+Lќn|!it S6ۨ_Mݖn3GSud0Kj^sl_0qِn~2w; %m)Xv~cϿw8oфnkFZ-趶ol@~ vyB (c[@Hg.8juPtEm9C.vYXFҭ C~+2 _[Xd ^[)ז`k+u9xmv`)!v*.|m `Z _D@J]_ aj]Uk_c!IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/NetworkTypes.svg0000644000175000017500000224450100000000000025153 0ustar00coreycorey00000000000000 image/svg+xml PhysicalNetwork Project Network 1 Project Network 2 Compute Node VM1 VM2 VM3 VM4 Project Network Neutron Router Network Node Provider Network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-example1.graffle0000644000175000017500000001124300000000000030666 0ustar00coreycorey00000000000000]kSH<zuAdL2CHrՖFHI@|%`4U3vӭVs>t}?Kfagv ӟ_~:e]כog菃]Y|{\v#s;<򠎍/gy}quuVo'V08HI=l nӫ7/~z\l M.7p1ssj7;l9TWP,O@ <O÷_$2F_dD c 'RFD^o 5"O6;l ooqn= CŬD66l5X\Dr.$L2ٚ'~BRa` OӰ&~.y-%<ތx~<^!} PP>5A+ Dv7 1^%ou$;*Hz0Fa; : ^~WKX.[ }qn⼮Î$c=Ő挨Dc8gXkB<¹HI҂$|P0jWzutM;1wI2Y?`0z'pyVJ9H^b]ÿD/nQN>gAycp ߶o u45&_  xV ǘBBX~BBBJ !Aq_Amӳ'A0z>q$kOw`7d^lz6s7BsE1J&G3qOQ&IR#&l֎@kFNr(&8k笝vwvK%RXk'X7@kA4A{(" D?--fZf612=?\?@g{6 8FGJ ɂUǜ1K?Mlv:) )Z<~42}%%ГKʢ4d$ Arq4AywnQ1 >5$lx|YB6*24<ܤq}wM'0SiWjh04WRRi5+Pk%@)vM|$0G]t;MUӹ` !8z6MRR4]`E.nx"dL; J.5U,83 `)!d&SO3<_MZMԜatiTfl$>kM7T'0obD %=N&^;(&@[]`2ѹI7v"yZBKń&4P\c  ܈KyTtJlb ((#q[`W`B -MC<ME!@ϹiOlZX1VKO|H! A1<5 \$ 0s_9on(5LVhT5v,d,9XR4 ;&09G&[. RMcGAj8ӥ 8('MA8Ø3m3~e c`01}:dV<ZE֊$XH- LAZKU\xr1;Xu}1ȁ{~֬J~T\h9(P06MD68$ ځ1`ՔHqzfRG0@a9l\6,M$L$T6 glZRVaZ:I$7Zfvm 0DgM&eiPH)``NH|w ߩ~Mhx H6fmlѽ ( 'a̴3ӏhJgŇAN}Ś3gW,29uYtfyErfA E@ggWί̢3,D}nHptHp`Sza%B20f T(pExi }y22|( vZEp+sVnJ>1s!#sȸ@ds2E"cm.^ςB?Sd\jC9q,1v"XDH0ŹBك멻ǼI݂{dvN'wUyGZa= h0! EEĴ@G)y#!.1E4qV+ r؄q ÒdrǙ%~֬ w-^y_0Q.ojϹ&}Z.Ve0bs-K"\ ` ROĜ[k?'$rlj4DGK&q=Bp琉-ey;q.; 0.Ƀ"uߟ3Kq![H0Mx {L"d?b&ėvFbDJwu'w?2Ҟ aܹ@yWVܿzF >RMW1 R92mi!\"EfX€IQ.咹4ӘwD4mq{c мawPZ}>m2 F$ ,ͤ&k|AhERJ`ǰ [RrT6AmJ`RnN&nXN&q|ylzm83FI jʼnFJD%]6iyaf6\zTi:f0M׌ϡIZ 9yoyTS{ 1y;a/d (~PhPfƧa<ޖN52bħ#a i"MY~uéM6I{ƕA>9<3#j<özgM*%u1y+؞WQш/\y9 ?$ywHÐrM[eSZu8&W Dk4guh:Q#v_YJ3/a3zTo0%A63^rznMyZ%a:շ/zq@ww.t{ R ]#h`\ÉA:O/FNj4cv2U7=7(a5V#^gO}ќVk댬y?ZdzyTIN$uZˬY#r4d6O~M\c(S~v#P7/Kba/././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-example1.png0000644000175000017500000033327300000000000030056 0ustar00coreycorey00000000000000PNG  IHDRdCbsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|̖{S@QQ{}+ >+=b (E:ٙL2aلM~;~3gmDbB@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! @`;! \ {Ny3p xp}H{'z{C1fW]Jt;|p`:Vv\jr^Ȥ[Qn1Sf[5@zޖ{];zN^#U` ,)q vMhfN!}JЇxyf Yr 50"#[% nC keAgVHYڕ\ Z~u2 wv3zчRwد0, ;L.o6\)3Ҧ$|x+U]p 54H(BP(CQԙ|a[]7jgHW@|l񉩏z=0 OUê<3vqWy½AQ70cU|bʵB8K7H%/sR8g4?W`g>Գ:~ RrP6^In[a@(PHw;usY+P~ꩈCyc:]'=xvi(ڀ2'9G%M=S73!O /&nwXѥ}Uaޓb왓to(eTb:ћA 9;54k9ha (bB F֋@P ̞0/Jc_};rrxR) &"lܙ_}1c8L1$]X~?+yҢv!!!8Cb0*z52r,_eSVcg0,C+i&rytA>kc}ŸJ|4b/9)7^A.6ZOLI!ܑɝCvi;GhNYS? =iݼw)xv32b<3yr_)a=x i>k#4yke(3yoWJŽ}GLLBV:7-9a\O7ղ#fCu?(yi)Iv 5gǣQ?.oUNcb~Ȥ^HC$ 8-r ;ź5K@45[buM5_b|6!x脔x K̚Aчqu%׸=^( ӸmqWjS+p_xbN AWlS3 9Zb1鈶CxUF@ K["P}fƒ~ ?oA8qfh]S`Go> C ~BH!3[HC.OMm.mO΋= dھEĹŐ-7BLUc9nIV:JyǤ ߯p{i?yy6lɺPS)J iFĹA-weʨ2u`v7UR6QCg?Z鐣5O@45\bGFROh=tF +ek-}\RGVtZAy0{ lk2 af:'M{/yGJN`݋OJ9c Wwb=ieM_6gk$nݳV&Bx:={HS[65i?B5` ҹ uӽS6/ѭo 1VxK0g(׌Vmh`Tw%\QdB} 40^d&ɓ *˺WDx8T@ZM uWj?cg?7uHv;5q4ic-1"IB{ϡ]g/X'^,,#cY6ϊ.,_qwˡy\ ;9OaQMG;WNJ CO|H3Nx(?~$B@FR`eu1 C  .tVp||w9^ϣ>V<ĽҺ252BbNm]aRinvE[BvAQiSe䚸9>1{\ۥ@GXѱV)-v*Ƃ@'y$ݮ?";ئnAw';,mF|ǏIw(ۍ]%jSC:;Qƅ’Ia(Cp~LkԝpY m ȈڢltM_<506{ya6ox׭9Fmnc!l)אMPO o keέHTB@C@r-!p~.cGЍYЂ43M1uhxR/&OXI*AX3>XJ'd8t׌4Y骍kLpԱ.9OfK9.#P'4Aɡ)n+YBŻR[%ƅ%}x=he >CrZyo=J^~Δ_r%0I  d((J =Ӧ55wp13Uofj <")%uS!*<5SZ`g;{/ vKDavO}TV-P!8fX q;`qjah‘Ѯ[[{ϧAc3!H O4AL/d 3,,bѫ]J U@8̰Ƶa)עn-HRay ~B$WVq1k`Yx_iẺR ! {0čh _uR,cC`h -Osk_,-v7t#ѽ7~jx+vnJVnLr0w ?jbvoBp:CZLw=pBߋCVط3RAOiӡS;0 n:,HN 2e+oeG<6e}cXL$H` /?rr݋x #3߷}OKid+Xkq}*Ug`t2ZݙX*6E^KhNtǀНt ̂kq.!Ψ;l x>4w<:WOFXk+3"1e'V3>?)t$11Q&]Z A/63ZOJ v2X)t q^. xAޢ" zF?ď⦶5<!jg4fS!:BguckU5WW\u7+N/Z> IߺסƴkcMC^w]`tpPzEIg%R8]+o]z`S&" ŘiN܀aҵ]%:!P HzB9ƂF fl`ZJu=풾APAbM>^T+QM!@v: 펦77aZr!'$gI@ǕaOK[IU_Fu5&{`w[̶a vbBJVr*B@! B@! B@! B@! B@! B@! B@! B@! B@!  YGNr%'PpR["͹ekPWiPU`7Js!חL?8Ѻ_Wn;+yҢ.+=_ H@H"i2 '|kư@8Th;@i<(5er-,+^P].EXk_'{뚏 .,^Cy[UT_W<9k:{„#u5JW:s,?5q늢mj".C46"H6oUhJ Xo'ו#rHJ6,]K~ϻ\5 Z[{Wҗ8'ÓVHO}3 B@Tg&>j9S?(_lsYYlF%M;ūk- kvi)Y["3Soꭩu– :D@:Tb']#sR@vլQ3 OmЭdPoC1ݵAQԴ)KH.UZs C7{" O  wbwz5qs0h{}9Z{6jY,e1kMS?O~aug~ ?$>~fxރGf$}c19ZdUUf%'̵G%&ʳqK)'&j(,PЦ[-ySc;E!}c5OH9jj]Q'L3);ujs-׸[7poDK@4W2_E\I@`,M8;'5B+Lc( tMF UC9udgZj#~9)I3;%d߱\N:۠(Ӓ'Ż"ɳqnҦ*3>юxҞBPCQ-Pd$JI[XUgl㞪KDs]OB!sZcYe)sh1aM&9 !PH:1BpƓx t}C[E40(SUE҇m/I/?B !{/Ci, ø,[ qhc@{F;VkdB 7 >Y0QIkMHwG.-qK@_{ǻ: uc}_ӚϽL{xR E}pҴl`ŽD} )-w]{+y;|_okZ+U153 AV!NK_B6FUm_!ޕuOMø0_iڣ F&ފr1:x=J/ŧ9 ~p _(HMON=ݔizAz'@A|q_brWo0UgZ/G%ܢhgAFF&MlXV9#oaL}(X!@va 2.4(ƅO{Gr^LM}\{,nՆ ]Я9>v4K(X)`_4S Y$^ҷERZJ]BONFnmFwd0br0-%Qnkڗ$7j{,Wsa}3T]]൑ 22qi)X*aXPZZ7~ꩈC9A@xvwvl󾡜 cË8ߜI?HJmK@%[6'&|-tT#Zرav;Cb y:lA }oA;ǬBA7pH <ų$~mE2ɀe3Xk`a#떝5#/@Ky-Ii^;CklwJXQd)H m#soD}d/,GZ!31xʮ3XSXz9vƌ 0xwNJV19]…?6qȆgRu#_i7Ϛ`U0/G! J{Qx^¬TC{ڠ þn=dcaטM{_{>Wv螜׆M#^!wG_{>W ۾v@>`wy!x9v6U_{#B+xbaS>P88aOG?3afp{7|z4a94:k{>{HF&N{:J09VGZ%Xi0-reg,q/>f[<|B 5uiF: `}sn;f9V5ˬ>\^M;OAi?^2c1rrU7 y}?6V8rB$H!Wu̝}d9#m55в=5/?Ivq:~4^F1aK. ݝM9L/#k۩*m1.޺0܍u/Mteo!u6;]ה-y} ,25ohPF?j۸˼R&2N-7v\8Fm¸{=}rxwLgU9Ƽk ~BuhڨLMmka5 tQQm5;Ժn"POćHC9qylc]rj?V}w/B@(0B>@EV!4RHݳ 殐y>‹hjW0F f_ڣy8x@ dj_Ф$;^86E-&MLA 2Ҿ mxm y4P c|/e{LѽH.+lcs=5i=?Đ_+v=Ӧ5eUYC cAN @iżz~^qBS#mrv"ur>hɣpo1Uem•SUR w`\Bmka/냬 A,A4B!>['bۊ>;>,MӐK\COŰH8;yjֵn Oz>ws7޳=!}{5"}oWm8d Ku8fo{:Ƶj.mSn/QW᱔b1>3(x8ԋAìP#U܆l ;c!wW:n *r%l:Szz~hө+u}-8r /hx-x L̈́sYxĽh,D#XJZ>ụ~v ku`.lXh4MZʸMC8*i꙼4!DmQ[Tv=4_ ĕoSmW`Ą|O푎y,Vya9DZ.]:D%^c^AP)v=Anvk[mg\.ܚ3ޱV&ЧmX&`ݯ1 O4 rs=Gb(''=iY[aXi1,WMhOD:֊~Py!1\~8R׽8Zޓ[ WBù{#;6Le~-nCmz).|n ]rD5boDՇ#@c w!x|iU1X‹ Ty/ǣ9mOtq_ӴIs5ֿR5()ZpV;u)-ax;"ye w K_u5wXϞcv:g0Kζ[,3q'BBXفgfW(0q,B:\84! B@e"Hґ ! BYup$iBOEZ&]̄Z! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! BqPi9߾y=oH*m5^ܒS!PcM2#}sLLrC4,E2_2ws TT"IU*4⬙ddnv3Ӗ#f'/)%8 V 烡_gk"υ*WT3.H*o3~mxpIlpKHL {Ep^XPT0q.g[СWGvx½IȪV,=4!ٳ]yH<jd$R!P= 9a/{u =:أ=EUFjnVoNk2ܼ^ewf7@(n50H]ۤKa]M w4hBh{ҿRuzM.<ֳSP\u7 Cz7}a%?/gO=R[h'nՇYZH;m̄#HۮEWFa^3#υSR!P&[w sDêj{b`4z0Uzq4_Ou,ڗs~W/Yc'elT=*:TKt mI>&:7^tFPKM(&PISs~?5exM2ಈk'kD+YSF`膗6]Hv*hg6?3$аXrH4yc[!AU&qQaoLIږP %3f./a\UKLhg6?y.jIHB/ EcZf.U7%z@ME"iuazY2>06,\8ԅ8m=}s;ŏ˕` υ\$?aa؆%mE0hr&#^Yl4]pz_:^a7[B!wF1Q G\3tt0# ⢣_eLWlN>X@CO7^|zqOy9 +NC(Ox XBdq\C Jbì3EN6ω{?c1+6'?Nv%ʍv5[v?,dѳ۷_aZa]d0Ua'3pJ;6@Cزiެ7Ү[qjs@AVmI=fo+ūh_鰓[vl칄m iiރ{t`־= uWWp|dSUzߙɎOzYd֓z3G},7VL7ڰ.rb1/_Cvk/>͗ ڴʼ6 q !P !}U`h͋Vl6q9uF7p<hi H!4iSnL yG\sy/QD"hD;~#WUU] aS1at!~zQTkYUC%!P/yѶ#v}RώtBx&h u;oE=:4lۊ.PJC"*@rŝ;{e>OVw3kG75~wőڴf={=h9βω ;C Ȳٰ}c{vjM`N lQLdޯVeZdtş?Nm:h:yfGt1շ#P ;'džLԟc_VN?[Bxtt@R6IL5qqoۢIq\r"*޵]#_<)knb(ؿwgji r\,]s'>C 7ˇі:#YwSTHֆ6-X6,0ٰm?[X$Ѷs_*ZT`Mb-k-)ZΣD_q^$}%ÎozwmW}9<,fޢUpL|$Hl`DރDĉhpJK3/,Gߚ$Yc <^~0 mpr7,O/`yR yh&X~' -Mga^Zm5Bn-r*'Yɑ0z]WxYG~oUJ-(?/~_z+QgU/^M.]f惍[ӽfWq^A-[;EKbn+9:9l%@DW _l]RWsGVK*iV'&մhh\v*cosX@f4 'B@"ރi3Vd#bMfok΄9nbk$i^?* B@AP䱎mEayjBaqX", ; ,ulj4s ]Y?fX J93A{ݫ3{tlM#Job'qdžľ *9Px7d@b)I[s氓7+8o9r ,D;dkx[Xuv~]D;7^΅& ٰJ.;IO^['{/t?ៗJ,pAw\qtFDo$)У:˧ޜG+nZ7◉_~%ۄFOMhs=O޵ٶ-a.aX&29߬K|XyWg\^\L?B Ȭk,yHtSF0x6:چ03 +*DrZ~B7Bkusi#}6*v 'h'D{f?ĺY-/ytƠB@IdH(k.|O [3! TOlC$%yB@! (A@8B! B X"HKJ ! B@ d r!B@!,$%%B@! JAB@! wB@! %:%pȅ o[c!ھ ?@n:KA't!~FZ_FUνnwwjBsR'=2)50}shBޚ4Ǻ棸?JC] C)PUenZrGYe YjG:O-EY-ZM(p־e^ }x+)Q}O!EW.,AG9t~w>I^:ޜG+nZ7◉4,={R+Ҷ32s׿Ù9xޠ>ԭC+j2~>ݼ}٫3RBRP!C O/<# c$$GvNс"h`"D֣ a0 ߧ2/>!HC}SXjf! %]7_2Ҷ\wrS!`PŰۛ #7xmdB& dlB@!Ј7+0O>>ʕ2eRYARēu׫ӺR$-B@S.׹Zg<N]3xx.2 YBXl=7? !Pg L?>PbdyWلց Y A @,ZztlMaU H| ! I]q;HM@֑c ْNPFv. <~.&1<.vkO*,b@$5ޟAw\1J O$mJU^{OKsD8L(q鱊fDC#qaxR1YӽFá*:)S>|'#3So8F ǣh5t$M.•< S_WFaň|#>wHPsE;g&^*(߶KINj89m|u݆]~Fc#)mBUN;z1Z I=H?՞TҙvkkP^Jf@IDAT=7OX@9E(Er̭p FE5V/Nki /;_A#b%ȩ7/?F8Վ'!hI^6vPG+j#YfvD3X}ܯ~L<v *nxg/vD_FCKۯ>vƌLmAz!Q 5',Lwt4δ:JzOam< $uA Ci܎5% ʱ3鈽EcM'/N;@rDm.*r.O#"#6=hoep,[(P6ahfPmqM<撠ksz(O<&3[PotvQHo hSPBx Z9R<f3ةN ;׾_#K5@^͘+߮Lu~gcGXqZ$PGiN]n6<r^kh:)ẑv 5¿ahCSQ {̠ІFb' K)R/77/mߪc&ݩJᎾÊ+' iHx<;賟 S#-QՓ؎k`m;>R`M-:EmVW@X6y@~MǙ]]"LV k%+ϔn5Q++Awnt-̥ 8VtS)̮ƃUJ˙䅑Jaay!+>X`CS``Scc_l=)-)`<:ՔqSR0׈y2vv/~=舣֏Z Alj{8tQ W-o7>6wܖ 5Z nlZXbn|{.7YY7۠{̢MltϩevU4F8;2t08CZh&2,z_hʨ˵[[8 Z? Ͷǩn%V"ȣs:IZQ(kAkq,U:{ ULUսs7̀[=GfTGXEyj! G)|]M}³Esmm6IS'@QWgONQ4 /vXHnD~^->6.fVJD~zlHS[٬gwWhPޗai}ԵH:c H6i{*}M;A͏#-?ztDmhy]3|H+/k&O fevIU?C&BgaLd*wg{0ka~ W(Hr*S`P6|N$F{uz|r3x̸0cjTh0\+FC#tprսS.CW45Pi~̥#:Aw׸)ZMNJ5ǿmOi١٘5#N0Hھ9GIikBSv6!Ap,aC@ “N[y=mdi'&'Iku 4?wkĚyy[=(:],ŶG~\]ΧL9{e ;wog(, {6[uA\F{!nY ⸔1̏g/X˜D/s9;;k"0up;gi5hP9y~.أѓEQV- 6< J_4ZWVQ*M;?_S@A٫J7)TD t[FҼ-bS[͡?idCdr~ziUd#S,n6WG q۸K#־ k-a؊1&NϢY >8q/Hj$4S׎QD.~wF96Ѯ۵|sQl>RWiomOi/3Q˞^O>y _с YeXcvRdz 9s)mͥB&$an⟿o}^Ghj7ҒPvz NbU(B&@qVXf=X@ӹ+ˠe4: v ǿ=tAC>}rq=KYVoݮ=ÄW[ O2]K G&,rRK 殊>ejƒSk kX#P>^".\' \_C|( ! ^?Gr gxV2㒭#ͮqԵ|h#tzh<ߤ!7bᬿiaP^zs_;sfL %a +_.,0DdA8|c. (ZCf x2N;3::eJ )q{ͯ8arOiRxrzcd?T(Tq\CִgM"/.iLvN{Ͳ C7g.K` 57^je*&X(F9i uӋXv8sL[= =6,zӤs諢1NJyH彜f\ xQ Hws='u(g&h|}OzNX)~n7eBQ~ Wgv:M:#VRѕe_(>O'_2.g5IV.|6'9sfb@] Ej 3r#gDRlJlFm'Q+h#G65ltZ{;թd_*w ͇?*D}^ o#)m%k|ŵۯl ɩDa ìW)??਀Ο e#:7=kZM噉^YOw?:&o'mߟJSjgn2JX|;^Exr_ WAȂQ}j;]yCI-\Q NG}xRŪ{3yx%-MG}CÈ k/F3~ɥN3rK'B>XV@_iH'rb)HrefIvFfG ᆗ%t\ |E4=OX"+TA\1B3%TԶ\U f l7}5#'`OS FaLkQKA mה"}n v9u"!?i jZ~]iS:۫J/xA,ܹh8SYgAۏӻJ0h} kb9^U.c!8)͢u~R6h ZK,+KЦۏֻ򀃢! ^s kӰnNz B hy1Kju&<p Ď8 AOὶG$M;570FeG^GRvzC*X^s5jL/Z`H(FYkzھ/;4j{8(A{!Oa`1G-7y$@+ @ig LS8Z霾0x=م,/"9.hs~,^AÄ4a0k`gI崲U PܥMb)'{mO  DHP`Ї.! k۪_>ܕ<=U;d{GU,ɖ-1b .o@ob*6 Jp?6I˧1ND5w96򐝖,z xǦ(.u3>/41f[<Ñ>Ob;GjjTUj/,{mA J ou Ȏ1GN<'T8#[`ǚ;y1U ?~EHE #j2),iV+GBzgꑦ}qձc#y oO({8y#GOwƑٞYoN &blBbYR&C:^$+7zh™_--mbΏDX~Tzt[Điq½ m=۩{m$(4|v¤VXu ^:C9̛{ik9E})HB`;vmBA]B@E.'>ҳX!%,Aп ^YK&@3i bf :s^;Rbrc-9liZI7h\nۡ5&7]MxƓ̎kly9Qe?ƨ~OsB ]$k+y3[[/V'RoCs;;ӄE'E1{c' XeV3t/5xv0ݖ:N˛:3N=pᄛv9 ᛌj? ՞)^:xi'ŌԵ]pd^|♯tOIى7_tJV6Z|敳vaNKwgAlL w{hti ʛ1st3 ࠒ]+-%WFa@9)Ug;]Y~p ]x B$' AvbͣI݉?I>+e" T<"7%W-7{D|#^ۍyD >ٱGM J7/!4sx^LtUoѪofр+^>HΈ巧)ˠϚ\ʎXlRs8|TӁ?P ƞɦe`l5x&8Is^g~ȋ$XZ 4SԨH;Lg>f`][#} /|(h7"iaF<905/իu,q0=G ر9<}܉YTˇ (kb=XQXN%Henwb!,<4~7|B(̆a윷ʂ䟘{yHu` ӈZ6׌VOtEIًOwxOcMN1'\9;?1>g'~F~ĔWcGdCb$+.D֍.>4oi!乨u)/sE5 aK&#[53XJw@;50m6,L1v6X7r/]ާݛ>#WQǾ7A(VЂ&P3BS9tԦH>P@b<}k,@sNhtfuY|r癭s yNZ A_X=w_Z&' AՖM?z qIL |O_G1]|e~gf[zBDTEE^@9=YN{+Ip;ۉcG RD&  @ۦw6l~)_g<~oq$5lu_W$ /9v ) N0 bz, .mV?D2Ǝx"y4aܶ?20E6.N}p[H;Kg9`U^>zz\ _B7 Wi5%wP,:c\=}J;7ʷ6,,bi6ʾ bASXQj\P'W9Gđ:x9o)!?x,5iȜ׼TNt9Eyn<]z셞aQy_WO.lcY\ ~=\'LUcnJ}ıٺnHVW䗋bRҧ)mu-"+ ;ċdo͙qۉB!_c yw t%^ذrKxTʰ6,,Lg?xYiEw>8J;hn¡~8fɡ6ꮧ鎡"\DHC2]BFXAr:19Ӑ$=R01hl *pLʾ\RⷹzN*4]DnrHĵnug.IcoXjYEuTD2$jsTҴMom"Gho5U֕è96H㘵- "xe☏GOgw7&~&|]oE6aʿ!p6 pMʕ]~etޒJPO2+I%=&3`@8Nm}[謜TE+Ru J^AV[\,!$b?jEz~ ?bkɴm^JGs˾?|}cPoS_?]z(H3a I9/!Q/$=2Zk[Q$mΔ ˢ;V,"yy[cmN{>+ Y'HL(9~nNxJj8zю9$cm>[^ zl8ryr^gY0 p{F.uA Y]6CYnzd5 HX."\ u~ғ$ N$/:/6ӮuI$K,z%E-ѥws6aW L@NNiqte2 䆚hdFX᜜zWR4Q,HJs֮͌k;:\   #][C5f3Y4~a9z3pG>H6[kI$Wjj*To8S <nP9fY>F k2B }Au F%S1Yyn7;C~ lLm_r}ҋpWX~',H&GW.$_HJB[bQ;~.XۼXVp9YBBBBBBp:@HcbSbi3ga9ypo 揕700<*Hȑڡ=lSDĺH*ɩpO;G%F?"LxVR@>ܗ;9h孼.{ԕQݝNp .*Her&p>"i C{_+YjN5ncJig3TkH* [c>z S)C -􁍴!:,f) \4ˬqf.Q/dIsFD$CN*_vog#ZdwUWL ;=~'$WM ?}AS"И͙B\MϚGR١6Χ FGDsvu}%,+a* ux]O]L^5]Ujý9ڶGk} /j7 [ÆJ#ƟGݜB_ߘJ YBМ{M^Nc* @ \cR/hai_TU u t.UjI̗"/9T}~98xk.rRic8u7PKߺ:t 4W>gu*GxĀCI}C.>SvoW۩m /C {S5[qӪy?86ws'Ucrcf*Xoe!P6lۭӽc|c6G_*3΁vxHtְ{`Ӄl.D ESDXo79v ҝvv=$ħTD\8Tؾ9҈WC#aۭȜ>A /na=0 bYeŞ x1_Y E%+oDoC % :zQc6$3ovRB=}"Bm͹ӖUvZD8]W;E T 7N'\K[~II"vw}6G噴 iA1SV}h˲-+sЉ=KP+fKZSB9R3Cǚcqd[fG9zm'|G I-n38Oo8ʳݙ?vmR\"6(UHI6 72"DF*w(h`RZjcmhXV_& aHqPU81" MpķΥ]ޣ[ MQXcxLjZ(.XM+>F!)i=lt:X$8=NFT# I8? Nу^3qڥ&Z:j@ 4ՋvK{>z #^ r%.F#A*?G?,?tO|^_ b6\L_XASxtimÿނ#O>ũd Ss'[~#G6Qo?lvB bə ޻z7ѩ'-"< lqyYn`fY8ֶo;2^-H^$HЏ ,{~[dTj*;}9 bbu\ _ :7rn\BױhPqت$ttpd>݅zcOFǕ20u_V{ A4ld!ի,;\0H0.2AA]Z̶p`^$S|u#v^i"|S(8fMy֔ Z  ?4|ֿ$-x=4_U c.YCCJO/J;Ȥq]Xrք U-Cϑq4K,t) 6:%6@?{/sr&c1N}tzzsvW>]sfљ>n-+fի[6pѓǷn /׾ p$: R1oJ1PͦmȚ]/.PnmC3V8teI5"\E8Յ{o҉ah$NjRE&jFþApz۬{?FGFO>M+oO RtM"ށmBlCwORW8G>`oo7՛`*>V*d|bqxT+*$rl/GvM;3'nMZ,4L$@:,v,  -!oDGֽO%EPKTrm"WB{E"'n UfKzay҈hRyod&Dڔ_I~ x i v; d} Ucq+QP˩8QʶhV밶{sJK/& iqp?o+TI-قثR<:k?I$Oߊ=Z>#lۭ=UL;K&4eߟZ`#+î[(x$ `8*ԧ4$8/%%AJ)vKc~05|[O .ͬ6 }KwRX2MC2X%L%߈{?C8&ڨ[F PAh%*!K*MՀh)x*ǮK } d'us05|K[ySVB0muJZ&a?k>֧PXwyT4㏅،1DsQE~Ҋ641(k-}9~|]F5̘&VXib%o]alV[sۜiD:K"9wTOƕeOmS^@e0ܚ)̲Xbm'JmmaJ-+Op|ɫ%,[xK*)2h7\ϿH>e"խq։duHr׷}\dyډ==zˢCPvU_+Kєd0Tx564&j w ]#ulV<O/"S1^.%S `KpFZv)%)I " O=6S,1+6_}N$Cg18S\LpvN 磀8J9|PnC +Nm}5d]$wCѦ?d?l͆5Xnm)X2L_cjۨkkRt61G8Y*Gg*oI7̋X?Ʌ'9^=>1/UP<)ͱF=61pz99d?;)M. xz47ڎ CsH/͒fOɘi[czW.{3RWbi6R eJ(#I5'yG*(^ϪJZ Opy3g;R$]P=NH%&$DSb (q8VA( 9 _Mb1?F#qŤ u*9I)Ix :+ֱ9 l~+V3+X1BGE%&L. g V8`*qko+6-M+0y)ގ(Kê<$Deb]{Qf72V.lp%m E+M'%_Z [o|:}@Inec֏d Z$RQ ٲK6Z)>@BQ-}@V'RGڔ< ϑxQDDJrI䑉xJJ%L&3+[۴6vl/U++Y4|Mޯ0~D;0h|\NHc.do6[&d_hYd3vYm+Y4ֽ0hcy7gьN${t$e]o"woھzu_ %!^O˲% NL*DRO:%D81d$uK!6LuHLg$RI#k7;7}fYLf% -ֶu q{JXX$v"iH#u=r^_(L8_l$DF8 2䒉%4pa #O Akw8})mg\Ó?UE~YєL[xUkBqtXk]ޯjcuWkڱh)XqG;c{K7q!BU6^(CAqq: qЏbCJ=JCْg+!$9O[3f?,d&|O2<1~Cs__ 2iI$A@]A~0QS[{-+vIH2I.ÅIii/b6z]~`H&aiadbĒH? nHEHLד4dt(\F_`^IƆ#_L2h'D2\Vu?rw+Q}H>Q?"Jޏu,ȭ[9-@$y ꄥƈns"Ku&iN 18AN=Cy=1RƠ(I I :<͒75͕ca$LUPQV8ׯuW{%kBBXD%I]"?s^yswy?^:ggmH麟I~x29 OӲԍk H&4-1^&6011`?⇭ٰF˿KJ+׼mHZDX ײuWaxp2q^uWն,,uZГd]<~!I˾rX.:AvRԒZjJX5Ic>mx'oPEAQm}Ox X`e?|}¤ݼ}Gۼ#0.ۣպуk;R1RI~+NK/~c.?9Szwg@\|bG+f;A&QJQx=ee~߸~5+8O[ԑI#I&+yZЏJCXۍi"PAͪc!bmG[ 4K% rh&~Ze=KtV#FR "xb觲đ #Oc’H&4 X-vfl!`!p!`ڎ$"'Ć%cLrXZŅ$;K.HZ$ 4 2jCD#Ɋʅ|/d!`!`!`!`!`!ЂhiD 8L\Lf0tbq`HZDI)'[HA֙L2i}>nEuqJ-F$?ab zeIDL - _cad ni=3h@$jzu %CM;S' $]5LCbx5e>5p({3wEXϚ0hŪڸX߱d8"־@CBʲB~1lZcι)'dZۓERf-]u`vZLZɦ |hv眬}EW&]AMZ GAxL:RR  ^D/ {S\)g|^~bGC{VoH #_{*p6nU|9V1s#k#Tx'֭+ݷpit攋y\^vVnB1=)V=Hqh0 d=e󥿒#/MK T&قR{߬/ vE&av5ujk/-^J;IT!uWR|,4.@6bvT@#KY- U|8YgMaΔ ˌJc5g_8B;i4`- tvH;wb}' oz@U@IDAT:43'wMk}xhtxyzGִSdU^G /࢟>''ƌ_cj!.۫iI)DW4/ 9Yu8cJ$y9o鰼pn\GS*[?>v1Օ75Gq5f$FI,VLLu>ԾU2 Յ$#m$Mvk~zLΚ,vd0 dJ4AD BV$ bhSh$Տ rtH>@6E ~|H&SF_yTm&#SAX4;qivQ@$cA5ae֥ ^ N4E`9:#<ԏ!$X[E$^Z$:ܼ$ W fxK4ҾŅׅ_4|{f,7*ހs{2y]'TYI$Gz6O>pGc7 s3Q"0׉kI$Og$>~ K$þ|D* ;0Qin?t[md{"\.8wv,qXIM CF;I}5g?H -ITu$(L65j%r Z,RJP1t̺my~fy=nOsvH6J¦9cO>XWZq3م?Q@{_H@ݙ9o6 tWKFgdAG Uӵ7C.㷌Yyٙa_Ghm>dE9xC%+s FlCQ,v{FCCʏm<~ /86|.hstbF_FgN} /{c3 ΃Pn( Ss'g| P?D^&NԵ8Z}eqmE=~ِnC?uv<۝㘌L:(\JCOAdJGbm-Y'•Nd?ZM>XսԾ. .Fs^dp.0 ISi6oHBjMXbAɸI^T,ogb/nԪB@_pWs9 K@\| 9c4D2Yp[\>i u0mZVd|1A@K0 $նSꅽ194:3MaR7*4hmڎ WӞQn?ti$Iwȏ\|灌bfqq+e)dPd)p̩#feߊ*&p h#Al,]KCeN-<k6Lk=8p2n 8O },3m'!r8D|5`T6.SW? qσt&yw64s3:ae?$ 4cbX@s~Z]N R>qq'uw&QmheBHLHO@睕D>NOꠝ68D3-׵Pf@xm/j,.A">r :HL2߭iRjb|;9!6§ca!g($oazY<&gA&,QuX #%麶 OfHl,$:{.P!URWqn9ֶo;2z'_|+`S Rß\.9YdɊ /g+|wfOeYc~sW,-CTRqSN{zsކTzn{L~9 rvoB93/;c ƻ('0rO 5oWۨk>ٙN%=qQ׏>z~< @HY3>}'wJF^')1="oJsh |Ŕ܇,/>wfۮ)8nQnvٓ3 D"9z >/a}hCbYfO|7؊{ k:(/[>#%X!2a.:pu0y^cnjGqV{hvt]$$.@iiAbzUT( zBS66dY9}Ա:5HL3F]ٚVO?G"qnxbDz +dpp?W倶 ==S{ LuK7{׀E:wOrzϔ@7mſxz5C-I.LG R3{ =AI @$h@ RLSHhv&SQ#ѮVqOVVU| !_ KIxdbYL06rNk@l-ϸiiVL/0KhuJD]FRLp_KI pby~-,UV8x/ ̯O%dcUS^ k!vͣ T`dЧdAEJKti.Խ~;?+kw u=JS+C Ke* "L,u=tכcǏǟPN918X#1t#) \XD0qgZ&{Vz&;NPIhpV%v風:@k4KG 61+ϝ\wflQ+"I4|֔ ?U(A<ŕV1ǣwKdLE0`IMJ<'-G-׍G)I"=tgPpҶ2Z쿌lMrF+[߼T to覾ɜ2C@ #Vz#] }3)7_3cn*ڟnQXE.&-Q}&gb:A]?/ﱉ XFgM= @ȪkML' cS7OC+> Fla4lKFSp]4ɐbB~~;-Bq[\F4m=;3 0$ >Fs5e{o${GZ[,mL?5:<`Yۤ'Bٺ'Hu8qqT9Oĭ5M(k8v\I$}>?cP!FWiяeq&IaE]6LS6ˉp#uTut9K$'?E#w迵!d}Θ&HΊqR5}j= "y Kpx'\ªMz(ZAmZ״i%i3"CX( ~D?_rg\XQuj/z+#|[)g{R$!' )/BDٚ(v?:#υsr:™L_j.._1ʼnn'V:3N blT W]P,UMn[ܬS]N̏ ̀vr PTaYAD܄ ^ 1V7B +[m;^X" l |+nz~[RJWH+XdbW>睙D))6Z>8Q _ztsQvvbž5p [gnV׿%ϖ1Yǒ%IL=m͇io?թ/1z#/%V*I38?7\htBxTFxWK zU7y^׼${܋_Kң)y61A"(Mԧ-HbfjZ}e>dFPtxPFO(/N}@HJX_H:nVگsqmx꩸Ίe*Kk,z߸gO׫7n݉ ؔQQQ1Ompu< !$%dÚIDJTFHw:vpjǤ/jMwwARE?Ѱ&m;/%0~?а󒉉%N C/n? ቉/I ?L//&s%uϡDrf`&5zṢu_W20jtXHogaV5wu/+ 68zUDPظP a\B۠gR=0?6⿹ \>wh2.SoH$]Rc,tũoeo@bC0F$_|[LEnviI0Ri)i?JjH%­ۀٿ\FJLz-r3` ƞV*U⡱M4^yD6,PAUk_eZOj'R@юgRL_uAdp#E dM2HHr&0vOӟ߭K}gg$~A>]hD웪~4f nN_|W/y7H#УYj} ҬIf8f8 $2v.ҟC7^F' ~(Ēd>Q,j-ZZ4Fb7R8#NM‡L"}CNK+Bn~Y_A+z3t"sSh 6Pc_v\RsNOyKLY%NH_4{vxBhK9Ob!!T?X4|azu K׳U!XcmqO"%D֖ #OPǴ%Թ].0zà$si:b\l(QNF;}Ēu٪*ށt$mVv}Г,e>1k4,"YO X"V80|X[ŧ%]s~uZd-׷?^܊|?c9IcvnPwg?A8vdyi,[aQdZ ;򫾹9'/zj0<4VBLXі`'od*[Xn>c0}W|Q(ۘ pNyW'tRQ)#O[z{qJHnt6K^8$_Q[$OGXSA`0Ēk?庅vg \3ZLAקJ`eCxg#&طbGHw [)&XD2&0ZXX.cm[OXG}@z*vsu$/B۷^0>\XyK);_F9Gm)prɗ6@P(tV01_SW]d}htDvӀۓ lK.K2yqٓ@/h+Z"" co7~zlGC5+?`5SvGao]TdXձ!v8\jp~z8ʚ?)# 567?M4|"!;|J9aFXF6d0V29YUC5Oɐ7x/`In1C;lSogx`;OA29TXoo ЉІ/zg%HtXgl(bgsHp-,fyBҺ;olGԲVlXҲ1A<FvڳBNԍr]@CZIor8fc?xTY) *\p=#ZhKpcE@LB] GP^]ʔkށD .%'ks(&c {pyr&/O}cpIXNM׹^n̤1!禰QWьɚ:TӔE+]A~`[gxئ{{[ Չ5{iʛȚ>՜7!ˆ H8ߑ9f< ^oGL]Dj#swhok |]o|8_q/5d\6"a R7ҝTC4֘##KK2sڎj'HLMzuR_;D.Pe9r&md' "鸹5 !t H$ӽupՂbszo[DP|bIr:IНaY bۍA\' smuJ>wkU PL.A"gP=n5r;6sȿ;\eᶦ+: v]h4/tW}hu6;L/'oدv̜&!<#5>o͔B׎O46Gj.1@K=qa~oz0^?Uh?xaLfx^}u"an>|1}ΎGNmZ0?$[ƶN=Ef9)tܦ|eH=?\n>?mc7Bk6xb_ 6"2'gv2QM7y(<a,t{*uEp01rvL'mh'H8&NNՑTi&\PG$g\ bHnNBҞTYeGK\ R4/;s&"$\z.+'ns{ܞ۳N7Lbg'liQp<2wh)RNDZhwMT <(k^c̳ۆOځhpB-4y;G.wg挾$]'OX *p1iGh7-}(ΞචR6 fA ԗIZEaœ t#b7DZ̩p>}7 >Ʉs|hԎ3lC;mA'i9A6?'y:nYp 2*9hmm نpzFЈVe-=տSIM^O?v|z"=:gsp>$6sJУ{aỉ% zx͒Bodjmޥ nwY[hǺvv߯ DH@]0怈Xl͝i=s&- ZbHR =C%$[Sboy+v$dǏZ"쬭8Q 2ǛOk(H't3Mt-߰]C/S=}0m^&|%IyUD(]-_]<+A+~Dw8MZ·Gڵ^IxmI5t<RVhf|h8鞙E~nK|ιS2x̜ :UIvo혆FFZ0ٮ@ <Ή0rm(}goP YZRB|819cJxKXvg/a ';Oy57Dil9<"OoO@sb{m09@ڜomZ^pO،Ǎ%C,ϙC;c7QM(MP,0cm#w*1-G)|;מqU@6˱_Q5$l O~eܖ(ئ F3 &O=>߱"ƽ#7' 5H_Qnvٓ3߃KEyԩq>gIs@ •@ߝ98]b ϩS M{LH#;7zP;T\bU]x8?8C!9}]=B 2bMjM VV\aI%1N*tvǶ@ D\,5UeGnʔD.jA]C@^7o|c;IY7:k@'H³SOk 8uM+*cj\AHI B ::G'r&Sq#q%6oj{~tf/Yٺ1 q.=VqxcLd qJPv: .'*K/wQ|ƷJ`Dfaň˪$:LXwя!4jd55_9"8>*=}!c\4Bd,p oC,#Inx_ZS\ܤa1YٗP["~a۫ySSy5-p->m~t;$YV:lPֵK^= YDyl5ңnx#/| -ނSJ6^hk7K./ѧR.gG^oK88V]LZ(%Mm 4uyyM7fҴ`^Zi`/qO}]Į40.Σ=>^w ~͵&=)Co/p٫qA?>N$[hgy:&VCɠwr%A\%/&ڮ8κj7 &qɥ)͍C@|x}LSS ooc'\( I|'4Ϭ XU2V ܠ9;~Q3i'ODO ټlxN$9Vv.N: H 3Nh!d{:hXͣ8LO;p[tkzpB*wAj$h6^䖭Ac ]TűIYɌYZCsf "iot ?Q&[y3q 2K&:;PўSs9;ƴmթ6mWϱ,<=UP͓l[v~ՍuP*6< Jȇ1Yp>$}{S<mzT~d?>tR?\EP 7y/lx*CP[tgW'[ հXq,lrS@UdWnl3ڝ\#4&9Mm"Z=~9)L^6XN ?Kl,689P'JQ'XC, gIM,? *͜8CV!V(1;$?~5-_G許gr\(+!y*vٺi`uN-pwt{àkU)|)/K[YӐheh޷C>5+=]!w/b]ZSAKZD眑L3ߥG^5=\"7v_nL3y|l\guS麫[Ӷ҇=$z!\CNfizMZ}FU2#&g|kίm| X:Mꭽ#h0j1E##/8 T[[-5^CSIㆎ.;V{{ﭡc>"WЈbA"\2U>O ]}%08nhr6܍'-<,s'~nx@"2cqDY $n.k؃2-ն٠4`򗚂zǖ)[6&XsoC7͕GMSrSǸi҃pIvV2 횎8r|m𴶑:jsZxXL,k@ x9>K͖?(۫nB8gW7]>ܥn}dy.?xV;PQĝ q dV6QbD2 kBBG 0,ubbTMg;Pny}Qo=#~邳+׏l ,5}ǿcIel82i֡U7g-mf#؅$eb"}<=66? ]gL\>k22ɶ_,ˎ ,!s3HdQd1Z8H$oݩ vC_dzx]ڄ xtI7/^K'L vbNgBF }zp'hŴxY_/rةckoKi?ϝeF򻖩~mDz-dy@gWBPUENasr2Ξ2aec±oϚ6[zOv:-{*V\rT6s%Zfλ+kz8ybWAܕ|4|"xф1HmԣjE;vϱl*Po$/6G`iTKjy`; qa>%ٽAS0צuui!),SOm#_o;,_+s iͯElx^,?Ʊ!o+5 <#3/A4|Cqٱ4X_E#C CU+#u]kC{p9>%r2nirzmU!?\vk` XV-Sx]A<-rJ,||\?_La $8 '&jZ: i3?IIq^E."\X(kEx٩C{Jde |3/;Cwnk앺rщ\|&.W[?*D{c~O ^ɦ[ 7jMKC{'X5gSGЎjԔSOݪ?>2wL+׆K Lfri6.YcӺۭ\wyNG"X~ P'8 t``}CRDʞ es9HPOj֣ø-0psL/'Z[GQ1s{ͱo0QSsZ^.>pv\".uP7FxY&T519q-\ASeCD<# rk#`7 &g6&N qs5EwڰY=s9NJWI}@BccsxQ߯D'̗ (<"=qSߐ^ک83Vjsry=-K)Cj/uIR-5lH,]/ǣRhF d%x[(Y-TqKIo=Q U"z~gIaHzpnRqQ}XG'<9QO? vЭmҜ faRIaheRP"ڄ@IGrsXBTYeB$\nN&M,q &Hx]~s!$u)vܞ5u|&aF`w>̦gJG8t120y1φ6|o%&vg̜7@\1Ak KA>Gg:&mmu e8ϛEAgvď}eo6 !^0L* +y\0+E$_ayG4|UkGFge/v5Hq]Q??ㄓ Eq ؄אQ2 Q㘝ɚ jc`mLm3t9s!%QTETP6dr :wh2-F % u{m1d,l#Ӓh'#s[];;ɿLs$$RZޱ+Ikj;vW>;t)%}fK?LN|*J6'i5`%3D0|%wy|B_rM4;RHR$*HTj^nv[:iB=|^,xLrplfѶB!4~<7[㼍6I$$7A$)}7'Q]pseqmHChsE w];<(ƀy~$)2e\$rVE uIQwfw VPGXI(c䟈R{% آ(]PAu7{{q׸{Mygz>V0'xx6]/CϜr.D&YHNHs?=A訪ߊ?0`?NX[V;]1Fi/潪!"~,tp4e<}qk8U:\ > }7)+n06c}w^;mG 旁6" c Adq8F E[j64qj?/Ӑ"R$$Q4= L87pĩ01g?gnxs_ljFMQSdbms\<&NAk,(EeμW*?sP 1뮪C[Mp%7b%`5Z&u0mF#T-m <'΢QDp,x 㥍Pذ &Ȏ + 9KZlnȞj 3StT@||-`'K$ ESZ~ޱEӢ~=Aajf)䙳Üa&' [_wVk$oAwLǭOdƽۜ>aCB:9lY@V; EPy^ÌYǧ4|3(sgON1ہ/=P,1{mf;Z75gk`S:_7c'a3z_`ܭT@wU1wCc69W/h>podߖq7;i߀_ 8:vQp4Zx {DH$Ҏt;DeͅT)ѰհA'4@I7̋Y&_p)FXUĔ䟅@IDATRx>51$n,/Vbaaukx%C?⇝ر&"Q4(3"NQS[|SP~t=++!5y^gUƱ42}&M(1 e]Z">q*˔C oUw6:;HwFsڝ'{TB< sPpu)! AD|3z^#D[7Ιs\V/c%|uP'e?0v?KaV$!C{4ƁX<;R糧f ]W\^M.^MQtʨF[[#YhE)1fpIi9 ^Gzȵ'}kQ; Uzs[u e -6JƹPڮmiAڏoa32_.U|na?9wn S ç5x.]݇A[oۭۜ]Bx+TCx%ò>5oysoꚶvX.LJƵO4װG5~qQ Bʜu Cu3lM"LgS}vNh0}o0y!D0o߿AJy3B v!Zßsuȱ#moSyFߛۍT[™Lo- fhd6|$sm8Rq ᖶCsy3س\-Z#z[澬[GQe-^Bc&L0i7D'zO"h$kY7 ʵ KY]xL&2""XDyQ WLH<ޖ5gD6;Tl"cj'La2i|ϞSնTϬ|}w OdOi6ɁlLW?"<cbgJz"}YguUAO뙶ayTn͠xwm`8MjThGHֹbΐ8x=ڜy9mvC0'%KuwcG1 {'t>4M*2)drȎ6NXfi$H`Gi` Ht $K%a $L"MIfsWѦ{W_*, %A!>ĝzޱCj?r Hn& K k-O&oIh+鱻9Pd%xI%K"Nfi8oh M2!3!4(X"|X*1"YfH)ڝ)"i݄!tqXȋM}j7~ȸMQkteVB%3Ɲ0jR$ ;I@I$•@„Ko!|^lL&9cdl`q])@0"p̹k5W l, lNRxa/R.AS5/ ˌ@a!#VӲԍkLH&&4 }SEL6ؑ7`gAgΙ9\s8?ב# DA@ʪhwĸh@H.L,o8mydXU[O~P7/s%s\O>b06+Sܯ֪j镏i g[W7k~8rG( s ӵOZm&,A|yM{r99">Fte`<2r\ uogq0%o?+sQrk>O܄ >wsco+ȩfonv+@Otۓq A@`] 4/ @" D2XgVKA@.F@d,    ֙A@A"K  +B$ufA@A@bHv1Ҽ  `Y/A@A@!] 4/ @" )ufA<ۼ<kNh#ݣ߾y\ւ ɖPc Q44rKi<*:r8D2xh Q\T%FQ3_F73A r L Q\A ڤdIRhkޯ[EKVlk.: ד~G4qFk4xƨx4_xQ5K0E V].N+Y4w/>ROͮTZK]|3dJHM ?oVnOHI:]I ڲ:8#e㏧sO: " dRS|ѧ85 u\-y*uKb QbBQ!Ǽ[ߛK6#(d=^8B ױ(TYtMeoQ_x:{C!D\6^ZB-8POLQ)= !=lBd8 й$rI_/DA'*RˣzUܓLs涙Q% {9353'ΡU4e$D#UM9Z#_kNCfN5u4iڬJA骞*&LWm/6pR!݇$GVg3;n[}Db]/ݱ(,odv5&Ш!}b#G`n{K|0Vn5KW, B E3 Kq~2^ &I?Ic"g{@1/:FoF*4߷͘=΢i##zkt5PN0-a85܈LFP ci8Cۧϩ4c"? E|DCi!hȶ L"Y͒ʪp:J:X[R&kFkQND Ɋ!%J]VXJT<fpA\BWPڂ޺[yBo| FPgd Kta k^[j3 Jyܦn-:=,2jC ܳs&͜H糳Ƿ_$6Hʭ 4kY+3:ñFxȾX,6JTR#mպ(/KOa.VS7Ȝyy*T~칃(hu )c)p~uQ܅?R}w`cF5(rP})!TVJd^ߐs8h sbo^߭ ǛHB**I ,|nMyCkWFf|Ea)F}}GΤ)QTN_:c2$T5Ǫz/,e>wd9[7BHbz5]R, H^pr^(^H8#;؃\'D aA@\D' "ɆX'3-%| Z Lg-6<&lTφ2(Yx69]5TK;EEZ( sf_S Ա[mK>Ϧߞ`HoD'ƑL@HgH>g̺~ w(\ݠEkh?~:ݚ0=r@P" <1#!veMkbh0 5+],8TxT lDBhK .Uk.q\ v|+MzDj1©PieA$oqia'|&5%1|%7)??s>mYϸֿze ; IIIX:sUsR[)눑ɐ81̅ {_%4+uƮ58& %tPS4$[.ye륯7;pʯEۡVݶӑp7pf g̽"t*HbU)Ll 4!5Nqx sրwqNm-qSw @Lvh  A@9Tmnk*.ɝ_E 7SI%DK@L&AHY֯-Q?"O1YMs4T츣FبCx5Z`hM7orAvc@0bI;p ='NMն*ǃuk>Fo)5<) ^T}yH =^'9Tþh8aBtþX.hQm@FTm38`]v;/1l Sn= j'g9m ~JW|"dР.#At7&%W({ unWR'POFtfJ@uIpbM0%~ 3{@#>e}c)w!o,:J"ziU}/MI8Q 'd45/@ 充~K( K;Ǩ]PW2U92"ю@^T=pRdH 1LbzC,-.SҟDoHcm& $w rD?Զ+H$:fTM5~4RʺY,~e-?B=GC^ cuR&)|H4f|&S>. E$0~xYLhYk\=9 )|(,wK,rq쫤@\Xy+bmX6:q}n$򋝆DlʯPd;ZVKXʓmK" DRA@JLs$HdPz|LÐFQ/jvSvMۯ{+(d %%GW:Y"א`! Ca^qq59't#j 7xP8ˁQ)0+>tbԓɨaIn@=҅bJ[.,Idv + w#_qT6Á˚IJb;ߘsRؠzq?svdq!5CAvwJtrv(n[v w]ʞ_atPϟu, ]kAEpUm)!Q 6k\C~f $͝t+mH2tK;Ն+Rl*݇M%O_d׼uPSm%Dz# YNXhu DspVA!qؾ+tx9`Ŏc1=gaX$*y3~cM{Q_2R&ޯaC85+ǰ¼%;f"o[̆cmĹ{s=ܙSwc&Q^sNc"(4XL}i8xD˶oU(w߶;m&a|@6B.gv|Rیr8;_W/jTH& dHX74iƜܼY=er0:cBІ2a*KyӞ48D㆟?"P%V#jb+VZ0PYqTJ!h3,pNgڙ2G55is{Rm2^!zݲ-M8 y~3%*rL&r LS8M LJd ,kFaS^\e:U9HfV6>qCpgNh:L7s8cO'w[/ql@IRq }SjCF ?h* MYp}`n_$^+!^[y63/4m46/j 2(_r[Ynt=SLtFyEi/=2S@vDF[WgIAzǞ$J{@x ݚ}wG50\VW_>"Vmu=hM_u{d( "K{Rǎ=^IAU8pQt>wWGYD3b)uw/\2yc. s ⻮ O.w|D5?Gp;I޿Mԑ݇?O/7glͪn2y-GS5 y?n~1s:~vlH&FI6Aw\аr&͜XYS^S ,>OӤEcЋx;n|ˌ]#mil"P8+P;DY@Twп~I)>N7: \8N.4i&?>v d8=To\nUu_x/ju9s/BnU O?8MZ]SO"Zm<Y:dR3}有:HqP_b$eݚe\V?WfU3G`ӋسHk^jW-!FFEoo NRJv&g6%on0UY*Q 5bRa059Vw6NuI{ڐk'AA@&4MN&m6..t#fٕr,eft"~}Znd Ɉ"_CO%,vd/ahCiE2YqZ(rk>T NP=HTWq M"`@5.pG=L{{~pC t;BtUGN$MT6æfZ'Y`-lYȐ׬̥GBҹǣfǐ┡Kpt#}i4׊Xӊ˅/ۣUêPRԱj! @P!ĦyNoKkT%;A^[u<\k^}7dх\faIcޣK)hۿo\mYRX ujYv(7445ʡx][Bv Sh][ÃdiDXw}EQjDs7Z"M  vX$⿴ʆ*dV&! 2J}bhїG+U= 8U0&\8 p46 AXbC}jF"%7l%;o|74"h2ΌwQќ7WΟ>.ˤɥ]ZCJ1p?M!p*JlnbP uA `@nөf5a!+V(Xu#vX>$}o+6T Qdw/Ve'#҃FC޷#҄,"= 9?'w?A9ڢ.Me <)V"nkxڇۼncE,,}4AmR{5T:yq'Cw6;քbVGѠ$ĄxkJnu zeISo>䃸 [ 8\0"|O xF/vH[YMHbU!mUlYcMb74ۡ']@Ce۾uMAZ#N>z``u+;}$K\.1*hGةC>ZÝ,m)ݙg4cS}3E>/ROqo6tm1 %:qe|۫u])ܟ1o?xhH!O4 egj =x`YJA~y556\:J uNe4 ;{b6h{ Y B$pdȂ@#`H^3_lI\P|I* ^ A$+xI&TVvƱSM5琮L$G .0pydklZq8s{ ;h[c̽7_ N[ #&F\4y"i DvCr@"M M\Xjvgp?$3)Nc5(Y*gR[L2brĒ5VӲԍk/-S^%RE  Y1 I" &K8b7aيm&klJbL"sƋisv*%L"E `EA x`R\LPؾ$;꛿LHŜ&y382kXx<ϧA@`H^INto3aRbxscm8`ݜH (]\x^cIV[L*yv8$E!8k2fAw!d \mʒHiR2} ϑ\I~ `k^H"A " ( |0Y5Kl&&Iͅ煋9G&4 o֖  )zLXLig:ؘRHSi{@GfM"ksah.#4pG\^銞EwmٺNTJnE|ܬV]  @k=~),וUU5w(ztDl"m89˥UTuvΙ樁ʨj6/ Pgw|.}ÎEUs5]e왟wY4@3s:}r?rw3 t>diQGU8IU49,VҴU-^p!'-e9xRժ/~oӗKqdOG$+Dr5z4+5~uɖ!}ӺrȽ9g?yr*HQgx׃v"9){^n!<6[fg )k1YdTRm} D۝%\T3=W,K2'ؓwWCk &AG$Y'GWoy&~ {4h.5w2Q0??HdmsV`91:h V(!pRKb.ys6[%)]Kh#2O ^ 6^'(H>=Zdbj:,*bzP"!'VsyEړŢ?TCwA^DhXtәI{?:: NYg͚]E t6{g46$$=/8+2s5zȍFzK5t1ip4¬Q.Ez#>UM<4-Cy.xNxn>ٳGnfMBD}~aH .z(Y2Ҟ$%ъq9S{heD  58Rz<<' لzƠdAOZ=0%JK}A 72DDm3foJw) @0}q 6sE _$4}4H煅oe?c̷{%ɉoS|0Oׯ @!;v+z!99dE tȫgvTZJMz (]36=O2Eh3QF;Zu#y 6;ûf.{1Gr,ҷ t/AC$ 1wvai!NO]߯hN>ssJs@n~$"Ąʝ8ӏp5h"U?Sܲ#A|,[G|pH=n#> tAmR.V %ESF a|d5BxhgnYu-}x58?EKg=,9" d7oW=`xЗF9*jb7;}!(o6:y]y֋T]W@VЉo'Π>؁to#wG/^u6M9ʪv_j?6yFwS)#WFr>E xsqQ}]M[~$;EFr<^l@# NqJB $@+6U[R.WwZAEeU4Ϋ2if|t]|q4zzh@f2Ѱ*7[.;~ZvG:]74GKз+Pxh;ahѻnZϴz^,tt)G^bbz`0.GX-a4%@-XJw77[ zɳc@&z#% ;^F+6d 3(Uet_Pq F٣g:;!f L.جaDނ,5~W<U*tu_Ҿ D"Ish),$XVгo~E!t97z)B/Pu;|#Ye9A d$SUz25tܰ ]GUi\337Hlq7Xq: A =3g9ţ9œo ٗN3]z O ߥģ H۱w oeqw>:"҉\YЊxT;H$K"+.3S^D%ʆ =" c]PD5$"K']1ߺJ*.a I䚭9<{if"bRjI%2-{ALC麋IM}WgÎ}t̐,(EKZR!!e$. A >;#r'L٭ ` |'@ D:jS#xnھ :lV6Tr{pSQKas2 s-{>I+Ȉ>r]q:,zQCl@o@@dgO~IFk|~+c}`Ki>T%g$jH"#)Rjbꓚָm9dtŦ=:md-:G-y5uǦGEjՃng@wM\k?iA"yv Ɨiяj|rA436zWcOv]wBQǣw]Ig]q9,腪Ec3ޟIK3 8\]z dKج&{#^r $\ ˪a `9j-^⢣ Mm'2L$Bg8.?iFBlCւ@#0qDigNQӑbkBh}t#g*GwA݇VG׏ϞyVGے{s)9͂Ip>3z*! +<|+EE4tXzg/TXrJ 9{kA Ў7 i},?aGΤބzSq,$]B>?Sw${F }"%&1`ZA)>Mu z7,%rZg$}A@ ,ڲ$Uqf%]ؒf̊sִ0e3cD u2ܲ j3o7GuKɖf+Arfy9&KA 曳Kw,Odx uE70YľNNǶ5 HidwGJ"W+[c)4)F>8Lj4KֶJ$nj9C WxUUsIs =zHQ]\V+IA@xH {'yȴ(vJL @p59'{:7ЌA]I}SO9OGNur8Ǎ[^c>Y  ^B#5Qt3@:eFo-c;CMơ6:ęGCJ;n%.J##<ҽ L tx9,%"BikAWT^RKȍzRz}-^0];4EK@ Ztl8W6L.ˑʐ OSXh,K9DrX e\wŋhݮhbNN8 z,1%Gvn>݇},T1(TTFܝJ_ ~̥GA `f.rBŬjY`rb ! aCƉEe ߐum>b:yŒ7_?M^^J I*҅QFK zKAx{{7a7!vOذQSRQoV6$>v%e;}͸~F3JYWZq;9}jFg߷;>A! DٲFaHgOWw2eay#cG=;:>0 ]uyTTȸ b7!]Пo!/A9Ds H""v "D] 8wķXPCN [4iY!yZ|gp龂"WѸcb>rL0Ґ˞T[_bl7+[rGoVͥ4E7vaȯp5;C-f/~NA 1OClL< ۭؖ"ANธR~i}Z`*c9oMt1A%E8-DM3UˍN* L}! .AlC4)$U?2j-f[ZXNU_GnW^][@+Ih旌"u=%F7AN8(,$׬qH% JQ !iJMŕhH hO[_]btٸ+.'l~GO탨Wbj =wM^z>B$;y%KIo~#l\$2H&J!y8F`-!&Hfz)=ƗFOYS{2,[duٍT8#+5J+j;Q!Ί#%`SoBx1C+9 f%Jɡ!G)D1E]CÑ <]GTq,yaUgҷzt|m~~:;ΫVVFlOz۫nS_C/~x.&WSdXw. N~_VI68̤+ 58|Bw`,ݼ'>X!~ -g˃A@Ivܴew>%F$]3 a!thӮ2}3[B QT ~g9b9 N &y,"UTZzv3.MBx/oʳ3;ϼui.)lWRVI~5]itR&A] x9CI-]1téS;b~9VlԴYԄQ5uQ ح]ºkKMBvP1+,=T7Q6k^kBU1꽙>uV"ՂSNLb֯Bt zZ]~|Z-–wNdDpe3_$n{?_%{Mi5y&P߯?wX8W\Ux8?ru@bM{p!} @F@d(3ˤ8a"ڇo#G6K"ڴTdYjCȅm$;µ[)F iҧڸ$\tiuUf*F;&DD(qL{1 W~S 5bf6uIjT#JkY Ob=𮔎)ؑv8u k?y$ $Gr^8j^~KΧx0%'+p!} 0O16h Zԫ+L(Ő rrqunU?*C4O! Ċ )nMcըbC:d)O݌Y 9B$ψ3=>P~rYxjֳ bOQY_t(mb3SzsT˥"^A䅃cc<$A Ѭw|A^ x1ܨsM杜tM~KV1]]3똺qŒ<'β< [.li!A|UVi8+^JA QyvԽd₀ H;IiH@~nJntZm4o7>=:җ߯T ;x:ldb\4ΒSMEKOdYXjTzM&{lع!; q/N99";(|!8e { ߕa?|+KG1?? R@tdԯ}hz|OeR-UMT9UWnˏl+cӧxc{ih?Лa}ӯF@xU?D󲳆pDGI@&x0?s A4AASr /9'EgesMH2bsӗ:ʪA.&zCGAȝ=aVj$NHabUBs9ČʃwT^qj# 9!ߗ겹'VxmyeC~ʾ <6I^BcۄEm UX=0ں ^li{#<7bhh5cƌ1L?wA}~[UӤ~=lip L?R0DjgbVmJ 7 [9WcgSͮwU1:A{#0mqxEF2Xhx)ʏqo  !Nx" skiHUz*RJȊ'Zm3 D(xP茁pB0aZmyPa(*f!G4>Qo2O  ',dcX'Pq:ZRn"@DKl_WNtwjo֓V^GϽn LDN" '4'I!$.]M]Ԙ GL)`}eY [ϒ#  К8aD=ڭe^yF[a~cS6Ӿߐ O2ϧMk_{16:K2 EA@N<8 QX$AJDДDs GTk u8.GJh7 ,CFE| Kp"ׂ9  !dlWu5-^3TV&Qg7mvy56wGb5mC[POX~s+tT*)*@$t>}FC#pq4%w=\&vQrRјn_7KK&F}D_ez_~PN>aIN_ Y  P*mvrw붙ّv).5v(1eZ*>OԩIӈk^"gE_b؃Xͅ^ࠨN[&L9RѩWP!WюUsc/E+֛2RA@ABI4R3wRSnDTQtKi?PŁ-}C UYDR-dW[I='oԶYdmg[Oe7J]xZXҊjڑ sČZI[нѐ24tv%G6VwomeNS5t i$B@:ϩlm)vn*G޾~٥ޮnYrs !܁6;0HC1Ųm7uwHp}9ҏ  D'>XV- 6nI x+'wcRuzCW%x*mXNE;3Ǚ @ˡYo#fw~'uD V !G=Y BUIoqYtG O8#X tѝ+HhwCzOS=]_m@}NrY_ ,N#9əH29ݨD +(_oB4C@[ - 5ϣC!j\jrsJGW('-}bz{饭C2S{_Z&}ˇx*0Okzl\ję$ExO $Cl$̓@4 Ѭ=A@Z.H}F҃?DX{yܕ^ mY7Qg8أڀDLu}80O19(oI3Ef]T  вhDaYq$,[oXݦ3|HddS#DF`̙IT>S!TSIqw3G-򾇹H/'銲ls;o !kwO;/殤 hD fi~c (D34Ux0cv~[n0w ~mڢjU6Ӫ(اjJlS4CFp*`kcO5S[Ovk#jT0NZ j11eٙA!Ъ$c'D"cocK Œp)k^ގ@X6pIp|҇q996ciz{`lEs0f)Qyd]SL88$SHҔ! N[{l\zXkԺRʿ8jS灊@4JKu2m`n?LDv?8j*{ 7ݭsgҋ vcq i}{O\Zu'D2hJ)o@"h%ͦ:Νe~{$jxT N18&O.~D.̢h%/9*,fH!bJ%vowXsUs\{gQuC] H"Ք*P+Ł]uHA H? $FMIwg#Y ?9SV Z"y,$dCJ8@SRRuڳęiR)wQBȄ _bauv/mE.ot`̡Ap6F,p9RB4#l˄ۆvHr/V^BiA39Y K?>Y]],pXW^mxQWaPUuZ- u.p(eAp^NwhvGײǦe~hQ9u1Y *-1iqe,jZau2V9J >ӹwRx^nZAζF%҅.C'VeB4h۠ҲAӎvʡAΟvXXepҪVўoSt<Ӕr~oV}oIj\j sQن;)*yuݹO11d Z!δa,AFknvqvM 7+FrKp=C֔:R[MӖ-g  R%lTh?u^E yv~$A{_AU)8jۦ'1,ŌXZv} z׮x,:.7uBȂ y?M[>k߁"#R-"<6O1#Sy@ܪų/n)Ql]c3&mlj&̬WxK2u=r1Kz8'+5H+lR/-I$TLr5G׼Ng.o~Xi 3IsAYI_Wo]%΃WQ^5bQɿ! D33 \ Kޤ*LR89:a[#jbNW!2&NpcYI$zHOroNN B^viY9?p R0"C&@23L瘪\*BʇU^h?WE&y?@; $m`J frөfJu9mR}m- ͹q()JEԅ֒%',]X~y~yӼ-l2A@0F; ~gO&{l[wͤꊽR=?ϧn|Z`OKq.:^ vTom_H/qX]Lb7,A7,ɠWjr }L{63SʉM?!ciU UM9 "[˵R^X9˘~̩߷:AqZ͏l(ɅHZ!n-&hv TM8jŚwV61Fe`G/6GZ;58sk x.]%rs ?kӫFӬhQnaDgq.*֣za5nPݸ\:cH$#CI$mH[gib~<$9I.>޼@W뛑D=w) ߼<ފTq.7uNT<عR+DzL_ZB$[ Z4-h~n3dsD3vbsƖJ}$_WLV e6u'trwG=4ZW@̥AItsoKiwEҎz" WB}nՃ *c)|K]܎£ѧhQ^d%[nƭ1n:"YHR'BH/U$47ǚhVY{n7#/~˺w!mۘGoIK aiR֮ uu #%S5bRk/T f6 43iU'1^8 5.ܟ6;LB9m7?=nmgJ+}N }y6CQG [g2#or F) p DeC Dӎl,1՚=QG 6ϩ vMXC<4]ep3hsIy$>ol Rne%3ǚ䒗^A!B& ڷ2SARGGhHrh7k!9E * 1GUIU:|`*Ҝ)E4|87V h ьsiofro 6n6N,1,vwuc^Mk&O1 ) ,E" ޘʼnشHYH] ;Ŝeg|gP2NKWk߿5Ϩ2 Z-dbSD!KA=nuX?+y?߽̟F%QIQ|TVyȦ*p=fڷs1GGÑԮT3m;RWfzD_?{1z"F_9V8Z"Ci1*Lk]o3FB/B!)|A`u.KV=9g Tti} x|׷];tV3k7ښ䵹`|6Jȩ}U =u;b/Adzu~LFm.?%*EwǶʡm+_Aӯ}П) ʄigmsDbqzvJ6UۑCCIGפ61Jp$W>~`CoW%)x5pЉIl@d·۵z3k=N_ Vx?7$-V-HҩP5%" {#cSPަn2ѐ6qohS{EOVNk3=nXtmIŅއLIjqɵ]0`c*0c+!֘w Sa{^ȣ{]yr^^) ünb(F&[Ǧg}6nZP6 :hDO"a*im %ަ@*Y}Zϰ]]Ob(*FNDZ C?q@&& X!dUSEv/B[>7RrURUY&@y8%Loo&\/Tqpi(͓[q+S蔓S 1sdGhf^2K$:Ҫt,ں8 !a,%r@ҞC gdG/rKA D*=  dT@&(9*]LTy3YюuڿkUWw=9s?LI#{juᎅTPc<خ:@%khOR.Qaih5?g*M~ʏn;?{%L"s7%wH2qe|g[ 9e96hhh_UU4ĺI׫)=*ӰR{NߩGݸq9L꾘߬_QDC<]݊_LW9m /\m"Y͒ȓԯKOƕed}mg5C) <A geu՝3G\H D[0_p;nH}裝L/"m2q*9wK(6XW+,^ bZ QN^qie-yZV d׺er[sSO q֞:ZR7hDvqwE($2XM97XsHXöYGJ"KƓqe|}}I~z,D 7xWuRcv6zQG3?=@IDATM&ޥA"i}ϗ6}`F@eRb]-d:2 A@Hds %*AtQ|.W5U#Vxk*AA ByI迢ұ rl.4Nd`'ҪxC$H])Oƕex;w ^ء  06#ϕM,.䫎D`/]rwzZ[OAItst-mm1z%_>PZ^tFlNsM4uj8 D_9܁K?;vS1ln></;3Avхfc k@ y;})ς0}K вh5DҢZ>t-{\:sH/?ʬnegd $҃j/yv;Ɋ\m=.-G9LNぼb+tQ<-䑉c~ L&'֧Vz(=(b[wWym--GF 1 Zk6 43iU'1^8 gKh{i9U#㿶B|Pr d'mAL$zc]&!TgulOu6}Z4w޶ -@܊CNDY牳N;*~+➭5|K墡}_nA@h"b攕o"g~$$xI&TV9]NL4*&HP "7-Ƌq1@m f1$Id$s _4g@_Dnҳ ENE)>$Gc5m}ytkߓ&# k'SR{$vI񴹤>oo q03)ЫӹRs= {**2;EPחQo܃/{*=v,a16^8 zdZ>? F ZƏ3G'[A@8*Z 亭{foU@}{t#Q8/9dZ= !0m'A&=D}ҧfxAkHƆ i#K"Mն_Y .A B93(sV ٺO[6i=y*̓W:7/O@eZjdDI]icq)-/g02s.vCL{%;ױWVH8-S`˷+Fڟnw2շ;sϒFrE5jqN%MS@"]m]+Ͼm*29bԍD̰@rH: DRT҃7M*Å%J#fvll*m>θ-n;>sugA : rKĚ,-4%"0rsN Dn/~WElI xyfRbm{LeAZ^ ,0뙘]eg܇wP{Dś%NQ( lg{~`^l'@%) p" Ъqs\z杹E7\XK#&aԂXHD8a:Qxo1חO#N3ښ?,d&\O>naq''ugA 6(/L\_ڸ-$_Z19|vrDZVqY1tlvyJdǒF8jGDVCFzҙHd+&PٱD,L]׆_2 %F>!~;k@:suCsnVd_hND\!MUtsΥ.j_ v: -y {[:e pj"p8DsB4 $3IUmr3|d7㺟}qqࣀp<;;M%K5+ԝB*a?o(8a=smics0qJ:$Z4#`W^GBB@&d8 }.G]OC1` e?4)㔔hKpV͏8sԩ޴7]z!'--uVt28XOe;φ l7A\AژpWyd16&}\y 0N$n)QE*,FT-6F38˜ U,5Xa=cю!  PB$CEA@kYAyPw ^mX4@ Ί?G™{ؖъ!!UANXjlKs'OK7lA@h"B$."0qJϩ Xv#`'H9 LDTaeiQoPUALO:#vnR;n &=yy                                                                                         g^1>cƩ'eaB]Z]7'}fmuߢ,H qiY1T ݪb|4'+ĸ7RVdj1t5:>6=%"3S!:6^똼+kx&ѥl9XXUil`mN>k1Dr9#<]1 M 2JURaE[>]?n@#[T\xݹx(="EU]|(54@1OOuC06=yYsHbU6h\z>/;K{V}z`zdP)NlW-O۹}: e1e㚹4iY#yy5O7򚺆cyg͏1%UUym!x@Ӎ|ܤ5ŻTy܃_YYoslAϊsJQqqP5x-㡏g?3m]=g&eQrs2^9y ӧ~~GiHd;- X2"sIݮ=o(պ˘|e%;-U[ToP+۹YicWávZ=}̠3"9y>#v֏ǟ ~տj)==xh-X{N4@ 'hF& eu}5렖{Ǐ[o!mI(5FmT뽱5djFxC$gX/'s|}>ETTyǟVŸ'cW$XG͸jsӧ5s3ӟ񷳩"M +gBBڍ/1cxxq= aNؖp=Z77+z,QO8pL}j}fc_`kgZCuR^dJU cmfpϷ,\`_ABNwXtºĪ*J8Wp{?`ϷMM~aSf1jSߘ.%U( a"] sԷb ̄8smc(oຘkmk%{(*r! (=wo+o)~m!9bjl>YA~{woX{9.v-ex2T/L"A1A/?V7a2>W5IASA[A鋉Y=iYA"9\F+BO>[<NA=_BpePvoyC 6H]v,n<Țr=Nw.DzO0MjURMx@q{ydbY5Qi(=q3bcg¶vh>Z%^p,2_V(.CοlY ktXvcz?>Ǡ^}5H\R? rh;=>We$$yL%Mux݃Gr\`xnj4-ӍVv.nxS{@X_?7<5˚~X;x<"ۺd b|$}>l utWoj{OW~.:'Y ~K$6"x~ ۼX,ʋZeG<~πZa> a(h52/3AC9YԴj{q>4.'f{FqVr\Fn

sB5sզё@3QϭvCHt,7_.jN^OvE ,>k]|5IWQGэ:Dҹ^ZZ$׏aλ!SMevx_c Ku+2:>Zdmp#،~5jV>du}Դ,,Xrf_/*%ӳ+F j6_yTy$?c߷aRg4 ͶP.$#'k:f#8a4Ck_ K䚹nlruMafs1Mm!M\Xi/ f5!!9u2땣C] z7m Umk*a~z8ηYk q'HڜF+6yS[w=Z*rb;y@k'B'oyP rge9'=$e-uUTn @O pF _U c /Շ.\Z.NX뫪 1+ZݺU/< ݚ3% *ܻaOj*_PY|rhI;~|"9!=vQAl#8fDG$Eir pG{" /c)gP}PT`r)9sXZ-iJ}mQX`SQ6 Z;@@S6އ(4>堊aYG.|StB[ץ|~ mc}_ M-HD͎Tñ:ꦿp7bpRwȹw^|ϑ)~ER˦OY]\~Q4 f#*ZRmk6BodsHe{J{_p6rEզYGGvzi? ۍp$}U}1sֺzv&KHֵvՕ ) '|6H g\[-ֶokTm3cf9%%)ϵ ̩½B pT ݚ1H] ֎S77e`(k!i\Xjb,1 +*6p@ () C{W"r돏˘eJ|jdmlcW"|7d!R]q'V\Do=X-ĵq)֘ rTkn[k}Yi00+Bn/!|OaC2q\ojM*ϝ_ǧgfv/fVs2څk+Y o?b#9x|;.;:鞛7Do<C=K8n uDcρc<3|ueX  AQ%͆9H7rk̅h(JH9~g*`% a(|8|Q-x@.-oqK?\VLEF;sj Q`~H"X#Y67Tha[Z:n\sob$M>fi{[?8ol*^BjYUQ `6^sqMD }uٰ6#)nK=18ϬG~\>,k!KἬ76_}}[Nn1Td+r2#J %pևKgdQ7I鱎<I#:WU=s VvcnfL;4%su73wh[:+ 5#UH)ˡc4Ơcs/Ky7u'9_/yD ȃus^;'Y?ۅk_p!~E+jɂ4dikื(sBF͟>F#39-@0_ s߾x ~9:p"R|4~+~Hc ]54A(s0Bi/mF`VED_cMӏBL5'~ ``w5dX9(Y((5Of!h75%!ޮ^q4!,ɣfߛno5q|})|R@3rAN@tGx3RKUE@詀d"sK̷Ӳ5B< @lv͍kV֟l|r5N˾q [-tH_ZqUrq j6MN괆2/sε]ul O=;iu0͒1ו0 nci NkM#ܳ"=FnK ʵ-4⋿Ӓ-<׺q"N$-2@H$4Ⱦ66?NM=$@T'@0bg%gNL@<Uyduج L⭧atjS- mAq\ydsf[0ǫ ޣ gAh$GCkO{]sAQѐq+XG PsC˱&e^na[$K1Hsb/Tb6ϪXk,{ & 6a}N?`qCy|ՠ M:4ZdXmu-ȌÍ@H$ J#YxtdaQ4dgBƌ"{"HVz 50{lòSp%f;6[@R痐ɰY⬿qʔٳ3KJup|,:tQ9MH5o~P;e?#hnw HU+cL@>9t8}k)pXfj>; |O`*U57IVACq$tՂ{cKZB$jڪCykC+[tG?d#НmPNWCrrwEWo8s 5P ϯ3G"ԲБ 1~ 5N{Y׮ml=Pg]'dY[wHH,K}Ʋ9o1и>/TR3fGp|wl-]Q鈥me;^:.׾s Ps=[c&>0 yxX00@G}d-aihz;žئu+K"f \A L /TK@0"sIڭ0X ^a9m-6¡fgHke%sܣ"vA̶#,}PA9hPg ߪΤ/ArGabazw*AB۴l4BDr23;-;h_MuL)1u [;icK]=^@W\ysG;~_K9!~o9;pv ?vfø1|8N˾޴~'}*x%r>5۾Ǻc֙*x]1bPymYh (|oϙp3/0i٢d^mTxp2lCagIi pcUU,/.H-N5g7;錿/[=U0s+qtSSʀpPboQ4gR7B]zCD3ϟ>9b` Pg9y<4kVBͬAO"=[-`C |Ho=?5|~%HxVL{?H?ֵ7q]})uux$m92u>c '65< * 3nZܕU7`i<>ɣ1^z4BCͼd)M;h~WP@vg--vmɪPj>QנiQ.y'CNcc`Zʡ|n\F֌/li˃¡4/)_7n}6dhEKj}HPVo*8 o%wyi \_mcA;<=3VIy455'NbV97 - kF6\ש* ڥc3=2571֜=XV |YXog64_Ƽƅ1\b[P _O*ZKl[MK˯;# tG؋ h[d yESW b겒rJ_C0+jU"tvo6)_Ba=zpQ[8&x!z>釜̴Z B%_ټM%9Yma)6{ 0[ZOtkWKחs=CeNYs2mruㆹ7O:dA '4>ǯ82\=xG.V!4\A0f̜frmj:g :ҹ+>0cFܳn͘~!L^ҹi%=;׷mlf̄S(ܓYm$"5񃿥)}>6^8mM"YbXUXNGV G_Gy냥;13lc666DF(黸?' u_㾄lC3TlNCU#?hr}]apl0X`o6j_ r`bA fF]xbpH>Y:1SmX?^@jJwLT4_nzHkzۙѠO O3k|Jtm96>X#bYF_)k螩Gc 'f}椧g&d%5l/ԾeJ$yJn-^Xomzv䩿 'W>8O dMAV6Y>9y=A5 pw7`2Ͼ-K$A_/+_ݡ0] "Wm?]ѻ3S=Nx 6m33feƞH9~t{U]+0_8v%!EGEz#Z}oii D401W괬k5}ױ}(} Rgб`L`XdeY <ٮ2S~$LRN$yk0~)0F/%ܚצ6jsrK;zHAoڨs|Pa)ry/b]n hyjz64 ,{ DU5*|n[tm|6ԟ#ÔQۦҍ (sȴ?nlus2ܬ^C,맾yIcXxFPWvŤŤ4 }8Z٪uaCO~s_/>s346\K^WWMGMϱ(X/G_lcB$ykn yM;6AYuІp(^rC=Zj&~7poǤî|=h ^zxSlr&6䳎B' hOл?Cr 00H5(7p}4 q@|/OLF vFq<>65Џ\LN=X2u~,}c/ XTY8o+)ueVe2j ib_q>u1=ޑ0qJ(}j}t{AБ0**y / `Kmj/عC)T`LNvh]9u@osyTi8*;ㆴ(EmbDێkO%Jͯȿ}/ fٖV;;<=h?ez_뤹Au,uJ;65bk<&O/dBt FT±u4.sv? H]kvl Fwv;p\s4Nei*@Ҋ 9^T-0[Î~NJ3!pLdW>n3>A@h"i], 草#Ƅ+Vϒc-Lo|ƌSuC{Nڮ[C6_8aANy}$x vWϲw}#]dN^ {6`87s=Wxcao}sa¨uN fzȑA]?U"wUTwf^BX@ błPkݿmտK$VUY+ k4EJh IHf%xTf}{n;K7"cVѭSbYxej,: CtV_yR'6 mfffBu?y[v90;vH_͍ wSZXm4@ﵽ!H*ʿ仴qǍҡ]<[Ƣ]Ma˻]"Mwo6decbL NRUģ9)qtsVq)Ў%,2d+^?ϗ_\ mE yk_w$Śԃ>AB%A ǚvoJ>]?SOe ok|{]ӯzRp\|gu%,&UX;^s"!DvR 4}CD}uN(GN0lZ/ViZ+ J//D:<j}ū3ɦyS;R/͇q fֻL 0& =?t3!M4w~X%'yO&H?CBe32 JP;4C9ZUfaFK$Dq=`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0zRks,E(jGk=e#Q9 oΥ4io[Q(]ZKcYY\;7o{Jil*ENiR7r4[WݔTQln׮)ذl>S2^;~67tsAZ_>Rv|oi_4nҺ3r;R-gn{ swN,=4I':'uw?7_{Cei|RڋW]m;d{7M6m.}5o|b^߸u2};Z9bo@kE9V&#p!6 H1>d =[HO,t)mtCTRVdzv=8W˾q7si'U(u_8' [F%Kʆ\ F/ЮyTy]0|ƌ%W8ʹRH[(Rjg$JZ}j.R! Hfd˧R!m|kHm+k r/2}%,SBA4Vl݅t%w;Bzl{TV;GKKUXl!PĪs~VRI^K3']]*mu(HC\ B_+Ʉk, ۠ 囯7҇Tߛ }T_}T;é=JC.yKE渟V7{TÔWx"AB)ٱj-TFd+ JK]c_G:έZb35֏\A? s7A4:]HS//~qb 4MoxF]ޫܿԏ܆ڣ|>zɵc~ɓ$"Byuv ;<׬ܱKzcwrKTͮ94Z]yCeu>͹V֍xt:w#~OmyB>c" E딪pQ"=]0"j%9n /{+(T5Gn Obo{Q"wj\N\5uJO+_Ce,TF:T#4'4yבJ壻k :t84;+r3Ⱦ*d%8Fq\swd/ dzҤ4-RybГn'Y(OFz7#sGG<4MmJ 47Luo) rpF0Z鞗hʱܿc2^n{L1}BKKiJ~+lcJ,h( SBR/<ƟM(]Y=zwHN;./,heVcg|tl5nU"5Ys[>ϣLThq;AKJ,mvr9S*K' Y*3Tﶣo_47IDAT+ te.t3PVh2B(A';=$iW IJr^2Ѯ3 @y^ڀ}njcӌ ڶ/`zt8s Ci;r'5])a{W/whر;٧ OHǂVF1QhmGZͥ1p9B\k9C y]!m[!;1p0ɱ/&GQ =L>VC睃m+4mLQVrQ 4O`Av9s2#ysJcQa6@\@)f`qQUȇV=#W%7N*xz*Nbd( n*Mvw7[HGunH0!-ZȞX7AxpVFkZT6r<XKD۩S'S C9G֎7!4P΄0±ǴuF9W)6aۙzk}.م^BdдaфHrK1UT7!{=zЊ]/!:`|rϒ^CP4@NmI#:2Gą=G] A4ҺǓrl)OBghZ\ nAxJ%PU,y<1Wu9:ƐN 2l /x/Md$1K. QX&Z^X3塅+麋WOSjCmHa{=.Y ێ旸am yJsx܁=v")__!_+Cz DsLU >Z8= =]A!u}[}BdMh/AgMӽaHԩ: )Ъ3s{>~o'/m}5<`T&ZxAhiG,1̍.^#h,L+4)mƽl܍zUQRGA ,|4BU}l44mNh$+ lV&mRЪ!>C%g#`0g]:4VxM]u;*֭kmh'5{uC{$_Bo/' 4sr?_8Byf ?Ш|0:BAG܏#[$ƒǎ aEIFF(& XFu̯6&u U #Yx&M;B<~u؁on{>G׌SBv( 8I& nXTgw-pq'FKQ*|~^FCX~4;+]%.?hFS{x9樭GHiuD`3`Y({/P9-g>롙W_=4ڦ:389WOmE'!$Oln|aMfgT x =eg #PB%%dWMT-]g=3T2Ot, tqG63sO`xrԌg|{iv@6 0o9+N[P*On*^EhREuv>);}yi *K@ Uhh4N "$^飰N̤\->/_mM&Hx6Jj N&|_ҢU_H4+1s>i F߼˱PaaJـJ*Xє߯rS=Ѻvnq=#DlwjiWW"s~ȕ(PmvaAH+*AP-ގ3ժP*0;q HZ+R 6KQ`Ej%: _WSs8qy/0Vn(q{؟{7J*"2t9fD wIB#]Be>~@5]4O? yЩ|/}rƔ3% 4,Q$cNq'k;Nv.qbO 4j >v(+W۾/!Jqʗ]!§)<h`WA4T/y_LC'i&{4Y])6k,@w?Xlڒ:ʺtS W%?/)"VȰ|e i! HFkIyc <̑hϯ[sLo4MA+7-+a wGbmf~ h"1;#Nh>rEX>D9 ǻ]> k`+J6}g5Kw@-9Ea9D*쑣b%nUW_f,aھV6Ut04y|mk=UمIQFaG e*wnCQ^RWD|pcĆj3ֵM6l 'z_Ga}~LEم)YG5]R?O&|-᮳~.?PcdzՍ~k4V3xm2Ƶ-_BnpK={*^˪ OC?4 E (tF: {]L^ !Ωt ֩mn8~{Z,yo*v[w웇Y`2zgL<5(# Q`]$F4REA\ӍX&eY jQj*-|),_ ؟lɮԳ 0Ycvi|'j9bL nҵŶ;n`8aT}~FP^OD@ڲu(*h24]^-%_l۠| Kf9fFGR:Bu5F3>j."i-e (1_Es kv8`] $~/QwGq0` 뜰GTCТA^%i7RP˅?4;.4hx/BGEDBPf&)hS;1Bs֣ti~?gL}pLmEeꍴa#V2/Tk<_aebYr!܊)ǣMfo_f_4D ̒fE{FYu#]WC3Up;k_uuh:l e/GZjІmdAu8%{!;ϺC۶5-$B"4^,Yюŝո}T S.6ض 7Wђ 79"3βŅ {!`~Z([QEu=a ]hzaBQ,]ރ x0-̝ :Re zo' tK@#4 CBٱ]qŖt!Rd~$ݯՂH`~&RlTTŅ^ pzTλ1)ɳc-y6v??o5Ri.f7څh(V$¹FGE_ise܎%IX14!i`*j{yG4;'3׿`꽐OMi.P&Il˱a0Ri QzM5V֞eb|> ^-08}JKҜvbC݀v$_Q om!:7a&x/| λS yY·ht hJF}\ zvn]ƼC[v)-cj ' ~7,o(+(!E;1A攎w+8~) S}j Цկ*|(L^!,N = X Ιhbo7=^0[]G 0Y2}>?N=qavPCeEt_ԩeK`R@8yU`_/ n0QE>jwF!FgwnNp45+V*>aib 4L=Lq*4&;1|GrTZ\4~_?"h6C?n±X;š~koXOoaĺ*۳]۔cW"~\5f/UO}+NÆ5X8-6tx=ia@ބNi n#O}HfU @;Oׄ:TOkPqrbCH@XrS)IwaG4:Ƅ彾Rq`IH+ԡT^Ubf) B0d@]҉ϥ[Kз֡*(ƙbOb7" M+=u-zdY*!OszJ ~_h'7w )}Uٿ Jҟ{Nm\Iz1&ȱho%>57%"b8!CFeOow;?ؖZKqfmT#*rX}8f_ӱWShCk0g+QHː0zof0cCqSV C٘1!#OS?Gw8ȱVu@4WYXunJ6DwU;SѴJIL,4P)vE,d1޲(mQ-iV7GPj$0Aoib8gZZ3C&xDq9Gپ_F @ oqIBm8%hx^uw\wxֶ{覥w}*۪xmVmlW%lgqc#[<]"jttv|s#"}ܒixOr5Cu0 y:Cvc!O|FӬM}uș4bBbOt6fiSm(%^M"Hb,FN⥨vmNIw(o7 =yn7"ME:h<=Ѿ1!B|h.:-G*|tNfCUq 9n9o2Zx<++fw1}^N 0oҔ|?{Vs/hL{yɆGM;(FddW|K* YwG>#Q]v(x*z}Φqv:. E6uSmH|<˲^AGUCOM)_o;SaפfD3Yw 3QxH+o0 h\NqIf N&|Ps~7 <6OeX69D_C+k#Dj$sQGj}<_v_~s'҄OHzEO4 dA;ѡC.POrFEK&L|lLs/i<[OI|^,]b6*!F,1 Eiٵ"1#Vy(oOHW֝GtO%To_'7.QS\ν7QQ%&h.z &]|-Tص&-Mj74%h&P7h7C(:z/Hs^,e}\&}wɄg-h#?@ߝcŲV6@t ν \* pq  _h15^_m|4G3PWuf`,G ǽ'ˡ}T2(sorA}_ؽ#5xS-T kG~Lw1,П hv;qcYY8;uG~bU0ey ""A"_9uzɊœqE⮡t} }{*:ZeYr 5d)2SV{u#ݜzPFv^ߌlCkSzjOX}T z}K~ߺAE^yLRׯ$:ڐ**4yvLVLМV+tMtDPd݅(*r{T$Q^_;}A)<;΀'vY+G]c4Q m :dI(7"%鎐M]*hJB8=cJ_jC}S̜o28m6XCKR}=9_ξl#}2sıa‚U9C՗.mQSJ[6ʹj3||A3 ,Aq-{ ^CÏuiCV/;LνWE}N)TW7bz]rtLy"hHGƹj1ݑg9~k  7dݹr5gvj 3\}˶s{VOr{w85 #"sEQG}RD]L#9@ܘBMk[.:=ZllR1x/AGSQV[ӽ9u`ɦ[_/ 蘺cԬA ?.:6!S{o⤆ Uy=Nxt܈6lt'hdʋrh<:vUf2U)tZ,;~qdWNJvb޼gQPufrJt{BKwQzhD5#{z:uGnc]ktwrk4:k3(;dEfIqezٙAXl?v1BKMI@K$\=Q4GǓ {>={Roz^U4_U:M7']XibOzovdePX$ef)Hߛ@L.0\aaߌ΁Zhxr>*V=>>zkV=8h7"L;`I;bV|~H߲dˋm)*iXᡃo ]cOyˠi DUvLi ebk U*kRn8K_\ I̎J ǚw;qP i7ymn0 C2ᡮ܂*A3_r :8^`X}%->%ҝ_q9;K[fËUߴ&8 "36_L`'@/ېZ>2ɺGU~i ZK Й_58?CdPXBs,wEw&7W$ۧDMf>*Z]q`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&%YT t%Z857ig&`L 0 @֫"<~o&`L 0@72fL 0&~Ng 0&`-$۟3`L 0دICCƾ`L 0&~%HƠX;̹80&`L ԎQ;5]C})0&`L -HF o-Ҵ<) 9rL 0&WS۵ARr` ouԱq;=qn-`L 0&Fr/c88_JqN'JaOL 0&؇ F2ƏM9'gy=c8cc&`L ꭑtJ&1hh^ށRZ],a}JȨR2hB%X%ZMcʺBݟ/fNRρɹpMjJ iJ~+lcJB+~rJQ7ɓlGJRovfo?cBLo3)΍۽1>IJ&ì M*Sx 4teHvCܓ,Jv0/B.he9wn-)Һfxg`L 47HMGꐰFܜkx ?m9?۶z ֞C5O !)uCk,+wT~ÓSx"3>RmBM[s) 4+=y1/s^ ;|ا!>o{ l[1RǓrl)OBghZr7?V#.Mdk!v+Cz!#!(SfCy'7?乇sFJ/XzC˱ tNTcS4F ^4Л&Ƴ#Ðӳ|ӳ}hy {Þz! av|27J9'ԕZImd( 0&h^X{`VJ [G8@SUΥe^zh%GW_X`$uu>RB%5mQM.vM/kZ?RٌOxvaxUawV~iUkk^6RU@'vgGBM"5`&`LYE16 7g eBr۶Y6P+ V5MC6nN:\GC[2O47d6wmB8cʯ6X\ّ}e' #; ԭ s9wWm."%s%2zˑB/0el^|L 0&@s"_ n?/ڏPi3. R+2ϟo.׏cjv]C`o+:k;kRa235UQ@U'Ǯh_zo! ֔nRO.ɚ?ȜQn/rh;B@mMZxvi>>jԮ1SھAnk}0şRi Qz\ӻT_2Л;VX*;Vb7D׃K'U\PlO%*|$70&`͊V 课4 li|idM{@CR^t-.GUL~Z+eOǚ_s#:+z Produced by OmniGraffle 6.6 2016-08-10 22:15:39 +0000Canvas 1Layer 1BGP Dynamic RoutingExample with self-service networksSelf-service network 110.0.1.0/24External networksL3 AgentPeering network192.0.2.0/30BGP AgentExternalNetworksSelf-service network 210.0.2.0/24Provider RouterProviderNetworkSelf-serviceNetwork 1Self-serviceNetwork 2Self-serviceNetwork 3Router 1203.0.113.110.0.1.1Router 2203.0.113.210.0.2.1Router 3203.0.113.310.0.3.1Provider network203.0.113.0/24Self-service network 310.0.3.0/24Prefix Advertisements10.0.1.0/24 next-hop 203.0.113.110.0.2.0/24 next-hop 203.0.113.2Address scopePeering SessionAS 1234Peering SessionAS 4321 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-example2.graffle0000644000175000017500000001153500000000000030673 0ustar00coreycorey00000000000000]kSH< AdKf lL݀6BHL忿,Y TرNZ>sӯ,.LI ;I7O~~u_o^o^fwikow{Efccp;mzP/y{qyyV$gV08HI=l ݼnӯv7k?flvW&ݍ +\ ܜ z1:,;AU~<*i`:Sx7B?9Ó4948>x1BXBT1êM8ϓγ|t<=7盽e܅NHl1+ 襇~\Dr.$L2KO*+%„Qy[vd(,T+S;}__E>N"T|+]_I˲Ye>7:{Ko~fk0X"$,ɰnE$:?7$O_HzAɧ&Pcŗ(ڝ 7:yX?MV<~\iP/f(juhΠuμz9M@I:g& :H2ѓY iPHiΘp M4s&G81V)4UZB$O`\p UjRRNiRA'&q'SjGcGE Um(,Ԍ5HPZRKǞA{&nq2gQukeƋj#GSYA%V_}p|uOӡ1owFur+ w G2Xo4ޑ<c˿icI#XYPTJV^xgA7L u4xۺalՙe˨HĀזlhJ|+åI?<}VNcuz'`sv'$#;?3nGQ>;Ayp ߱o .u$5&_s dpxv ǘBBXVЅcG"B7L ?Og'0Z[x>0ڝcck2_{m1JLQ> D+E( GqO(Ya` b>c><'I$\v݆ktA>0e1 -¼6p_:G??U@6rc$o;RC3sC7m*oQpkR;F}dh5yp2.O8#/n egaJgO_5}2}%%KEbg°4d$ A D)6 4eI27˒k"JA4<^~F 1DeTZڧj04WRQi5+Pk%@)vN|$0 G+-Ľk:u4M>BgRc.f]HffHS0$~JM/b^EOrs9G;¦,&8˃c<~속|sQSjqdHϹFXh&#%CБ#CGDd9ɐ3c1pddȈ#CG .$E&R}) OI*"5_, g0qd52)N`"GJN(9NǢhc\ǤT)D FЙCJI~<\*-5_$EU-ϫ2zfҋckL~)b 5wロ Yް4jaJ B4ӓۢey3MƵndz홓sU?U &gL6L}Aג/a~Z<*1skr=q>/GmŴs!c%VȨ2.'hȨO`A-V0"hJkaUq# ¼|4 2U*LbDŽ>͍PO}|2m7IVڴgA^9 SS 6"B L3k ZYerv2_wg\_(13u,y3>O)a:@ f^zb:R-dM%]Ȼ.aR$_sݞ]X´%$P QL1F ʸ_X@ZP z(V1yy'LEMk]´}n3o݀yQv0eZq Ba g8+d)4&Z ?J7r 4v&H}'~&"Zs('d0 qB;#;+G=V+`!I&+]ƥr]\+7s&Uޤ30?&9KةKXOfD'.*rrp8pRf%Zdx&NĀsq>i'bέu?'߿r,?gcͅz RVXsw ߙ%l]KK"@Dd?~H"/mFbDJ2VoHF]H+ry3Vf&.KK1’ճ@y"W.X]hҞQ3șGJBW0 B92li!\"E\,%sa׆1(˜3%z߃8ۄw]PZ} LճTF$@5|An")%0cņ-Q)c9*vxԲ)%n᱀zx;<\N=g&_cהti fkn=mI^3>^_IhyVk7|'aLbDfy8PLy.-k7?BLa-@P}jW Įڠq2]R0" ZU?`/](KsR3Fɸ "V\E ;?,`"0tds:ҕL*JA p=ݩ"{ۑsrpݑoGNvE Y(9 Ϙ [G/*b@rgy8CdL㌒2Dj*N =O]z.;VxA,3x BE_Ӱ;:dtA\=Hz(ssrRx3V㠗&3ȗO_OS艢''m^c6]O@ZAk(.i1T lT 5 2 1 2@IDATx|'ޫ)TY?)y@]IAg*Fzzg ͦl!s?;3K$A! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@!PJe2I! j} -i"H ٟ<_{J>օtѺ[jZ n.W,]ܣaÆWM]%3W+:w~3^Kt[i%V-VnwS[vyU+fr?jM1R$Z3ZmDPmgD#$0>bWB*3-e?Ҟ8Ps#N7%*xẖ&[o/ d[BժLn]g)o=<.Cӡ,'m+,D_NO;7?nSV3bcIWBu]YZ))c,kD|?(jʌؑY-@!֝JK(B ĕX>O-!<ҝ+G*c֬YNZv"7$cyRU ~! !Fg}6(Ta\۩K#WQ(gnq)>)6qM1sGMRf!7{,`s:|\'pas3Tsk8/ U᱉Ml3؍Y}D !Pր& !Pc <&lwb^xzK l4sl- Ϸ?ɗydXPih(}W^ >&$䍘Hֻ !L{ 4= BDA55)nw7L.n,8Ci3CtF8td~[͆lf ՒBw_$NӗhOs/G~1n Sx=Y<&])h5b v/!̰șf[♿Wnj2QפSikS&,le/Y#DS+2iwC*/&%V|vU=Ԕ)>lJK]v>{ϫH)@-$ ZxRIB f؟: Bmy\Y2Ʊ@',44PS@h]pl%4#q#.Fm531ikhUYkp{nvLB 8O?_eOv}׀g'}OzpOG+i WzO v6&vL[#Ǿi,NsܞTgjq ']YԧR6xۨwVQ) qYxBGP01:q{?._9)OqDљ;l+Ua_$:\&q(5aqI16Ya8s'B@QAZ!d% -߾cclVG&p)$7Bb:Fvm4o_n!G:+<mHbإT!n-ە 5nTB#Ov_Ax25 u , -O1׸tע{HwΣ?BP2{Bhm!$ v,b C{]1II-=m'_PHunam?dm5Bf j !畑IСX:oGtHbY4MKAko3(b3 S*S(w?cFB[P^T6+>Y {fYz34Fs( @ |/Ef`crfkp#"nzFBwނ&w9 z,%o^I]N1ԙIl^A[/}fB||a9G&M]O+2003lm6Vɺ CUm-&.q~JV~6c-{z+y%q}/ KO? PPuDy0W(?V[-Hu%L$ܨo3dzXh.i4-ebAs+u\<|iSJrqtDl'D*pހ8n>{r֐#X)kq޾0} :^NO? 4,PY\pj8D$!P+ \+O4JTv*Ҩr 3-r鰛^e͖ ,1e7E ˫ 3"WnHĝ!ЛB0}ˁlB+!4ez\\ȵrvrUxEe4C E-@ЧUWᵁ@cY0k:5=v"Gc]K-ˢ?bٔ)|;޼VwpZ`HUa__~ןB;Îq>!йPlgN{޳"${m!P}  !?B!p @@D].MQ,؄t~>x BOk Z>ۑPKnOs]Y2X~MOGcn|JWc~p0.W,}l}{FTĴǴ}C g%e|?'*s3О+v|Y(꜍'raGG56t+ؤx&¶JZQBr3;)nDKk?%m&P)\:c6)C#Pn)s_X ;z{HU]IJ'%%aS!$׵KV+z6 ߅.8۝sǍ{$~+m⓲8.;sxB]zPT[o`.ˢlHy*Bq* FJg"Ƕ!tOaS*! *I@JlB.6cBUmWm( #c1Rc MDE<5azyÐ86HLs=z=ėͳȻ]5[%ueAm~k,ωۅ1%<˹.y:ٿ54·BȦ#|P2!d7'5㋯+JTU: 3Ӟֈx|HRN@jG,%0jҤ t:]#C{pgV0ݰ4 V6|ƟɥOrzl~k+_,:;9ԃMXxx@(a_ I3"Dwz{mjGۧFT.66qIu',O#˃20(;>) ho,?x e*8Gn}w !(6\tRP]-r*kU B@UW#+V,wCS0LbmQ^'b*B`v6(=;yߚ>0`*Nhi.=,rJmC{`!v7 VWZgm?0ON,]w VL8BWRٞʲZO;]8{χ;_1P-l:!?eijc47f$xܜ%%qcy5s5hCT+\֠-(vr?_gf-"!tW4$_L0/*CU>C{!>=/7P&~m:O_<619<.q|i U_h$B@ QA!At@h G $)%&1kAoڔQ İ_! Be;8 Ծ/@-Tfs30S aZrϠC_:+[)Y>L[aQ^\7Ue{+rabŢ\au-x݋2.ؚV|۞91vx>~N^gg xاyenϘ 0+i TkOf/+VS_sX~䅯ĭݔÕޚ@EB (I?[pW() c%sޘqI`U TWK 5R UWU)wtlb[MlI7ՄO_;ۆ(՟IP83Win5YKipجH;Y-!Q[_3&(1.`UrbC@Fd`[1RonU-OKɽ%@m! rm9!  !6CEY}ӏk43pDi__'XK>#!I5G@L/j,)"!y!$ɧi\ Yzڲ0W9~ \IBV|H̞u5Q)&\)CI@U#dp4<:c@e<:oF-k! j1E'C"@` KA!LX\`=^;t {<%1  b,.Ib_^q=C@n$⾷ᬲKB@k+@eO.&@HiNRwf$L(*I4vT˝%F@ YhwVbN)B@! B@! B@! B@! B@! B@! B@! B@! B@! B@!QEb{E2%! ?4)fA;,Ag؟:\^TCyzyi}gwmþpA8] aoNpW{jt; P)6rFbOʒB@/2u:r4 ܚOMS>աHt]SMX;Fzh-vf6yo\D ̻:*ޠt55-+יtb /41n ˬlz(t\wL%}}nqj}ij¸'UÖ*vM*E !P{ i2!P`ɢ깨r:ʚ;REϳݏ_lmh,ijg$čAĸQnE`+iwZU}SRHnCߊ:AU,ע]!$rZ>ڞtN!eD\N4qē8&5Wdw%FyZQepB iǴg6#Z,-5ܨ3\N:K{?Qk* B@ (%N51Wg$iDGMНHyFbܮqh|Byd WO?%Naj W)2k?c"]A~X}(l}BC#ad߄pv1b%̴a<mr?Oߵ`?+Ϩt,{?i?x Yh৤$~hƏK֕m펷Ǭ+,0cpMwMN/Def~h8]#oHmQQ_zl3:&6Tgpĩ`AhL=<>E7p" +6{+$%}5(8VWp\m1V1m*aA#(۳y&lCv~ D#~/~h3f$]dm4sUV̋k!8]qlBSܮhZRmМ,酔D."ɍ]9C_6 Ĺ߂2._=nzpݨ7P[4ScC{J96\IE0.CE*H){ B@4 x&7!uҩ-Lyˌ#ѷPHIW!{o)&zIӵTEq< _5y?Ua. D!,Wb!NWsW@3Lk="6v qKa|OöV,,2 UQi(~NdWs▃>!t63v)EUT y/926nEw8 T]X1">Qι),G[Ӑz/`Q ml&}Pp_㙮)fGx#Xw9z뮒q]/ zoF#̴U]E4?Xf90B ` ^Lqe1EhUo1jeUvfkA}l\{DtWܚF9c5['-K'8WҡHHNIC<8nߣ-86 siڂg.r8WWA*/fB@TA{tgn\KM֖6;Sp1uP[^=uϰv+יq޶?q%MQ!6oڟ>O?qB-q1 —)q`}ҏ\O!)x7&x ;ȯIIBCh: OJhzA}S#Aܥ6<<07x3hIX5;X=@j tE-3_c ОtR[ HԔ) g32`]ik1;FQ掲6*\w WK'8nHN8|q01k#w8aT|EnMK@rY|ރEQ' Ţ\̸^57R ?fG[^0qx 8(~զyp%"M{VeC-ASA/ Z>`6G STRNIVmK fRbYPo%THمecSBm궏=~[jxm !]x=T5EW <=!4~J#:wVAd&A9cC9welBi_rRϭw2+?wNM p5*ޔM8ތg Ә`:s/[w ӁԶY?pZoJ#-egYٟ<:'ɼcd=c'Xd3LϵB5ƛp&=^[mwVM#ֆ ;47M4Ljl Mjoy-\Ms)ٮ<,5sbL=Ф.EX?6w ı1|rURb FQ-:]طyn,7-8Uwjۑ><7W;YsYF9Ȝn:ԢVhEX Hudm.8f\8f'55'V=M$FyoxCs$pm8!4Oܸ;A(}TL{;h6wnWWȆ~A/\XT?V~Cgbo&YGrBsJװoLGkgw zv_"vk'@!l[FyyMQn68k7N8 TF}0>O׫R lʷ(߅^'. !jJYeJ0ܖ(r١64 oI !ˣuvʮ^t B@,#,tH qC/ׁf8õ]ڞam~璮C{-L&~[n6Bp ͩD>N9g`3=)#nq/=wǦ+D1T!@ \aTPT?āK{$ ثuY 1WL-29`fs]-Q`q_rUWAW _]<W%4_ IaUl8uǤ?C/bk[ȲEZm+HbeĹ5fN%fAcO(J;YYQ(^ZcUl=Oڲkw(;h,Yό|g??ۇ9`Ǽ r+RWiy%^Jh|'X! k.C|efb$PC1q`8nBGhɲZ/03k, ;߁^bGڧ4Cz@յԺe_u^$䗚 4씡=Ʊ+ضSS.!cvB5Wwi<l{^<=lf'P(. \_\270s.Ȳba5tD79=_Ѽ3ΟJlWf \"Mclq{{ONS+w\yuyB@& e\$VDG'fx7ĉ|7V%:sbl| Gg3m K^#Ey]Cq(t4btkM5Wk\d9~qI 0FmhX]KeꌄqELZ'-AĽn A(t(0[|}W_l> aofs{bN\| |CU֨a >S,dfke0֗tXRݡRUn YT<.؋Y،5&G(L"`s۩/u;|вf؟: M Y 35<|n`6)hC0ɪ5jfB,|U룠)#_Wt&p2<şsk}ce<Ց=۞=vɦB8]_&R5bXJ0aŌ3kr5H.ykc1)S ! j"1^WTe(N15kKE?uY/&a/Ey!sZ2 Ɩ<0odtUbQ[x N]egWW… 껠CݍG6PE Ѣ”_$tj3r̜jRBD3 [ZFгrV\a@t9yv[*-_C_mʁF?#K'{*NS,$8K\ټuWl.6q|o4uat- 3d"s7쎇huZ|>8yMIz δu4JKiIS0^?wH Q5OX+q:wLJ?}y.]Ķb\+Xi?XH1lcD5l+v9FC=e =UX|[RZ+N;MPԄwE͗(!YAI;|B?{I]nZr7_ *Jo"!;Yv/ʽ]Q|+e׺ewv5XZ#$(W_}wDv~nݼa8kEHkW-{ttѡݩ8qM0O",ik!I6-5ktW{;ۯ{MFRvA dSC[jiֳ㣚d [}Ԟ "-aw^5HśkW@y{,(_p 4c UAөErj7|i~/'({~_Eƍñ#/Ā_w -Z-v9!4<,szH@"PezG§osvWP>s40Xr>馠ɡ^6/|6{SPO}}#:OsB A9,2 p$K\9iں`/X" H(?:14-u@2yɝ!2:zH:y.B@N>< ^ڞgܓDJ?٧z`̽Or}N5ө>>L`[HH+ޮ_|3oy.ԯS,G#`2^%U.`JRjfmf2 >b\礖Wf!,ۂlM:.j^n~Ş|}~:~fg24ՙ|:79v!p:Q]E/ULb!WOxvbPOE1sҖipQdxu`76NτQpݥ 3n44ߙtUꩮMPM($-#XTKJ5db V@;|b:킪Ke_(/IM#>]*J74枫k9vƝmQFqEGʅ꺠P ^zփ ɣo~^F}`:C|KMF[/3{Ҏ<gwv{WS-_IAm7w0Ԫp.n/Ӹ f ?B ,T_+b\/PV  r|sbmS窭ͯgܡ^eq^݂_W:A@nos-h F~ !Pc꺠l>X1zxV HjeYi1Ck R ML$Z˚-{iɚm>elf>ye4~Gjm/9І)feUmރGS릆F4`Hct}ɢsOdGnnz :\tPL^yԙNNj{vjM_/Xi[Ъ-{p ϦX~Nj0>8}<{1etjVMƙ& u ! J3/-?Q|b-0S85M!d|ow? foMAV+Ydobyң9~͗?a!BՐ>FGNdGҹΠ/K@+: |F T_k!P:yL=D>V}}v6䢵!yԹm3hÍ{ͪjZ lH[("Pe>j}Ȳ&9KilK a{hCLQp`!>+Zgo}4y4Yt ;碝 k[slv9 sؼmkN98nvn> sQdX0 ݙX+vf8"a]ƴjn l\I'$P}_}TAs|tV'gf^8q=:e }N !$s]|l]Tl! @EGlc-kb٦7voOёaUze 3yXn}]cKAkNd@>@DҔH,$6{ oZ@a4mϢ"Ҿ(>f=b- uG9uLRTk: +gYw]I;*V>=l5g*qȝ{;翲-JJkZvJ,qLM7eȮͰ6σ/LOf3(y3s/W6@fp}kYP4 B:{׸bP/,$4>2k=b˭eᔞZvҜ ṔAF_[Av/!(LQ~^iA>1:@zI"8ujTC/|Vҥ S\Vi/xY 1iW0_wu\;qpߢӟq/H43^Ahy|a^3Dž!.Ic?p( 8 2~5 p\d)IbggZD6xao6!i ^@C\62uXg qh_6k4C rGYlkoeb=:h~PeR s (xP^ECڑ=C_x`AyGҳ[ =^tnۜF2>yʁ#o,60_,z^~NCXX"0,Yx1Ø{2̢>a1V#:1Ley / <#xawqqnEz!˶B@B{u9J5KO}ӣsªrd+< Z8ᤔ/&t'U|Osh?|}H ,X;FӞ@= lGǾ~v.kޮY2aÍdeBul?]_>d~>lض{ŻSoDfy.MP2!P׋TYyP_u Zc,6q>ze_!ʟcZ'_*+$V}F B@!P1S{TlI@gwmGiG2?V/r&mn?+J;IQB@kT)(em׋x @% y=rdB@=c#P8HB@! @1"(!?B@! @JB@! >D ! B@B@! >D ! B@B@! >D ! C&]@IDATB@pD! 5]ioQڝv8LyN:Dzv[hgdu{p UU]B$=;]3hzbL7%I§A*$ľl DP.DbܚFl9 EPM::ůDl`~5tu*I_&,SGQ(tAdY  qܾ{mm<sh?|} ,sc < ӈl=}]soȡ6NmQpϺd aö+ޝx#"υ>R=4q A0_* !FY! U:'Jcᅱ(Br;r@B ` MW&ҥJ@zB@JM,<֡eidB(hIoDl}zAMOOH3קÒcBL~L]UrcP3Z@@y~{*=Mdzr(JQ.PlFDPN]Kv> A^ WomSXHp)>|$c=$03ao^nۑ)>Q5PTf!Ύ5nΡLA >Cn/lj~>:k/g9*9k pct0;ڥ*TN}Nd՝yýAkϓ{YfY~Y\Ek!^<<.4l_ݭZN\n=HqY*ܤuXȭ)[*Nݦ9e!Hfz+&.qNʟKO?mbLs_Zzv ~٭ݖJhn۾s#rMI I>ya[RPf󖭧o~,tawĺRk&,6î6z볹thȹ=aq1GOd&!A嶼-%:amRn)w15gy˽_.'FgrFzvkgYh5[j((dbLHGZ6DP;8'y\8a+T-ec Ͽj谪  tcm%=}5eS_ۓ@2etRv-NK}9ٝ۫/=iOKkkܳyak(l*UWMQk[fS5!$jjm]O}B"k[skU{qe:{3}1'TޭU'4l'hԠ|micX버R p'beE=oˑNRaղ1&]͎54[;:fm 7k)ǡC-Qĕұ{tݎLI=lOpb:,SS^0iS3ԁS(8!Kt=Z-e8A־cf'.n>3a<O+wyJMQeAG':ظsc&'Rei#(/JG^m/}G< 쳲/4->#O-txBr7i{vҒT{iY$ދw&WO|DrJTG/qhlYzhVJ-6@=G>>_PrMa},KH'egș9k 9V\(j+]JamR@UwPg rۄh"ZtpXNS L> }XϰcҜ Jr}<8>5,$?t%Eqg|#[/Ji#('Ws_Qz$j |i_3uat7iѧRMU&vJs DiJ'GBDREf!F|6vӤ?sh3U  eߖ#U/lmma%c*[XV&}b8&+̜%ttzFҜEyNGtݩiNKL!i*dfCk՜: VͬF{uyRyo0'ܖA΢474766Xr-L9l>AQ#AMiW*]sN 7[Y|asQ落CU-6 $ӣ41u9-em% ˕~_ӧ_lnpЯz"Bw;iˢI!]jNsdFϧm?5 Ky$ڽ]w8[J- OԸѸY,!԰ttbڽ=԰@jvhTh3 fϬ o!C?0r:uHaB ̜\$,FIgD[hԀz|`(1Mr (WM52o/ufyKYʽoBz/Fr |]J<5}ВamD*<ء-o'+n9 X,QC-OLö#%ԝAsPwG-o%(Wnj MUVZneʕ4°"Lw w70~Bv Lsـw(qәA=Q|x4:4 jH׵̣ۆ'+O-㇇j<ѫsmn%Tcᑑ#Q Ϫa\|e}:D7C~] 2, GgۘO1ς/D%>ϸD21tmW&&ShMt䤩kW=m5 Sa'g6smwcSZBzPk}s9쮭rI ҕC陽|~pf]>Z9tND*|JK~#C/i ٍ־(|?}q8|”SLAtٿVWS36QZ|Zstrf,1+WVr41sf&&φKӳ rVCcǽD=f3܂x7¶L) YҠQ>?NjA- ˯Xj.O-~SOa!d:]Ҙ6kkrov8ǷD#6'SvMaRt[ 70뷍KHKE[bZ;Kz+M+ҏfRj' WLߏ4*]rtpb6CȴSqއ\r~|a5:^f#ӺLs`283o B6uV*6삇Ѽ.n5zo0c@^3h10ǸBC]٪Ի`` nwk`>0BBn偲8s r ;FPA:p=\-f܇+މ(łA3t'Kډ|S%[܉mC_%^djl7 oP"Clk([7kXC-8Y{Jb<TMDeE@l[.Pe@0,St5z/O hv,xKL[% GCH6/zԁ69͇oj@ !+^)^KA!4>^Ns;/ȳ^PV'JEGs4t]>gxAd8VE,~1^[KJՂʫ2XW3W{ I6^ *"Ɇtѻ6fKgՍi heδra4飼+;g4w)4'}I;qyG`<8uɾBwE'tBsw@jAa1_jȧ}nc=(~{>3 ԆQ^``Ҙ "Xe/r҆~^B0g}M]udK?Jm7/M;!.߰lx6c : E9mYԭ} #mM:0 f\F1:czt[ E~^Fomj^6]9W 2|t}D@dlz8&]v5?0qy~GyY(&Yc7 ̗^T7i)sq'?09y>Yea 8dYs!Tp' e 3F Y(Ӆͳ2bS}8:m![K%Pr,>60r\>贅 \ÍᄋB!!M{d Чk{N!1Y)w8܋/v)s^.Y2nrlt /lg 6e~Y}gm#Lw!ey=s fJݚX?G3__R}GMTepw&&?]^YXg*dmؑJ9y?y) ~~!֎6,>kE} ^TC2: y[_w7fSZ%ُ f[W}Ϡ[9%Bq$̜MwtR(yJ{BAVW`d6y g2Dwa0۲B?D 3 ?-e΂)?s#eS34 E>Ož tWk.mcl7}ʂV*#s:j؉*7k2 I8#)[t" Li0Kًh̯i@Nt5&J+7bO5?OO_u,Y9ԮEbEQƖ=i}_:ذh#󵫨 V K+I?뵠̇]! {RwQ͖$MO {MPA䰣bەbѼUv8{E, b7靄^n;3f6dlj7p #Wx 7W"fxnז5444><=Mk8"iսQde>W+uGH:*Hݑt_oCsl/kIq@CmP"=Y1`+pM s!}5&Vy]W&.f?Ď֭InDeɎ*G6pciIG lrF+CG`G;6@;)Ep+t]iB-A4dl`:%ƴn3F7t$zd%X@@Eb{ pnfܯZPkwyǹ*SDzOrY*:&pLiN)S.ZtUh=>,}Px(4Փ~ Nةcwx f'׭J2+\RqS~LϞ9meU`?Rw"J"-^>q%&1 +1yKEw]=Dqcf/А nXu> vȁ+蝯*uoK=;P"_|h |0 7]^ΙϧNjWZWp{ůfDoB0:/FYyq.6\VA]΢ehܝZtltb<Բ)l+Rn$Ǔ Y KfRht%r "_4v;]_"V\.{02B`TtKh{|)L ~ sMCfu*:k~wSq&о-jw9o G(GeC̄G=}yC8Ai'HSK oH$¤J8D"%^8G=g:CV.Gj@s2KQG!+n4׬(3H_|{jWjlЋtfUUM*Gu5vR7 jQyam_0br 5.:Yһ UsFg+5fU 祍i?viײق`wOY~ l?JTt ?q5v {3^Hl*H rё3{ݠUknphD -&uسL~AeQrnеy^925Jߨ=;=^ek,|A{_W)[?4(w;.Hl><\p\dزm W-%|T 3!hγ֫ 91Dx8[Fz5e9pt'%] $+z <cMOd̊x^ofn[р?Ły~1w{K^*@ )~"@k@}(~dxl' Wu**ĶT:3I814-F̴iwcMSD^yb͊F::#q+_=/Mڢ@4 3]/2E\Pvj2I#p6׊viJk9H4kK0 f":~y&9"L˰ؾZ辴_4uF]EQPh L(Qʺ g(n.A66!#&CP_*=ڳb:#qHtb:{!!_&> Y[ֆa~lb,2 b)9ΠijFv0ZEiCuts ڗᤱ]8ů|wbC:H#Yςn=Y5C`}Y..Bcvnl0Q~l3,0!1Ho_8(~ckL~:]l+&◶F.8j~5^pPR}O<鍦ҡ@hCX Nڽl ڑFtz2[}lhV2}Swh BੋLCȜbK>~3B 5܃͈7iU w].2Wq0ZGPEDU+WY8tހKM,>Tm M,9^sC.tB/ɻ3{z(Ne_nBf f(Z\u"3N:6bvDsioU+on3$ovpݾeX(q`vȵp|NQ[XCwe?.p2hSGz?6Ц8aNSpyH4kҭqp]OЉS;'b>˷)i[5m۽̑*VlNFHw]r?='JےQ HDA>'KzQ6o?Ž:PTD{ZЈr,ޱJneD(k,mZsI=kCK|ʶ2„=E66`IEk[.>#h=eB1˞۹1*E張gr_hr7mƛ"fQ)L 筑arR¨fw-Wsi /)E?p V |+0I'3htוa-fZz[;5 GZXʪ:65#x iiOzɩG@+o%i(3&o\]2 2PFOڡhv"Hܳ}ڋ装+(qq&rZ5;qv[KBK)c8b=Y4AݔԪ}],sOh;ޚ|8Mfҳ Jr'ϖ~8*C;Z^-y^QlS2A4nUjYPG׼<y~u2q0:1&`!pIϗݿmhLkBlcr9buχ >_'L~Az_r3v!6lv6`zꗠ+IΚ;]gN=J{z'y/5n=-nI*;`"(Lw?w5!PkײR #phH`痢:VƎ+4=L]SןpҨ/shRzcc=l`%IoU9 O0ۘ d>|}5O,'rE7~eGOGU5]tbm}1`jڨItvM$ٖB2\!qD][7[Sy|bܩUE˜p:l5D)xNa&f¼[fvHSl2fiFY1ِrQ<}QU]zݟ[ ; KN`xe Cǭ Hдth{ԶH ҊG^P*>WA1w)wC[gc]̓Sk>8-u^27C82x DZeO,&S^F)aK.6doA u a*jyM`ق&>e/5'b8%QEevD$e!Fbd$I m]9hg:GNB貌قzR[W.+뵬ieer_8l[S8G ^.1<[` w *nRw~Nvi[zhpheUPrl&1nMbeӀnm鮫 :JKۦѡ\wg>,d "*%k_ܫ=OB䌿Gǖ d<S^F^M!e|YfOV'_*sAeo6:Xat(FΎܸX F-/='Sx\ńbבP$2W$$c+u,z@({Pq4HU| sʺWۻ_:b6I-\~^`eYMbܶ~BkNCJv]˞Dk?֑NAͪlX+DG=}r}ݍ EZ q/.?#mxDow/!;l*J!Dqǎv|ZagμjQKzPs:hJ;9QgR tUVXշ&M]YHp_w(.ıvv{Aդ7 y!_)3u=6E%U2"_`ҝonoM4ޮ3\xrӉs q~&g{dq>݇LhfD.{bf.s8ǵ~0 If*M.0~ukDk!WXƲ [fs8I EGN(<gtA dUOR"oZ|}y[zHA GSyA^JFt& ~0Yb_ $LJy#z_65CcmBΞLK¾5!\X]1~o<)c^m (Z%I6PPP\a.5&xF](,=-Y3z^.c)/k?xO ![Ep;SL{z/} *ά1#_}JY.ÖAa)9Q׾zU͝Q/=$(/ yUD@=]' u'm $`bڷDAi~C %_z8OZt؁WM"uT2i~MQ,]=˔l0&Μ[Ýٽvٙ3 t4lu M)9qBMLNϕ`xGE*V8Kօz#6?^yϭv٩j89W`ޘ6oFM#[oF^OO5Za )O*YV1.Xph 9`eG`vcjCC &vKVI2MK^!*^s(]Sm_?MM2Q`agt%m_lȖI9;uIv  G>ƗqVC Pѵ2Šk?:[nstS 8G\_Z8[/̡rE#+_cU4޲fVk^>xRr(+o, tTiP@IDATXW&r!b !&ɧTkW"-۴֚},J˙#;*W}9t%҅ʬF TY7ZuԜf͝O*U+IfJ]#UB+;oq շ8u^h5*DkiԵ`# PG0}FVfJVH !Ik;AHऱ]a%{f{CYuoKo5L>_C+_ӦbmGh퐅P ښļ`ƭaﻝ J$nY66͵@ ١3 &2eYUO-SX,r4CdۣɃ[% …|C:B ݰl0`Κl zl$?zh9*`4I*;CD+"p\/ `Jܤ)73,C`P,d]?7;]'3[lF6ln䃻wKP ͔+8W{\0b:xrB 0$!KJzD| ʵ)[O a !8;#bH0)#sYqe|;5Zps!O\ѯ\s^i<ꕿ Jظ c|rMmvIO~W8NO3wBV3T; {fGbGPB&+Xg<1nV|' O `gl- TLee^ʡ&BG"@̗N͙ ٬aZevcy?Ʋq 7Z՚~O*YVUu j#xvNEDY+T|Uj{WܓGCn\N!3n|ZFhxEU8۵@@r,A4im_\(DQW kU[O9!e5Xv}w-MTcʢd>UɬQkoP o6    W& g,ƓיıS  *L$Ƭ5!!!!!!!P-.48&ǞLzjfLYTlU̶ky;i~QbQ       Fq&dL<ɜUn N'Yf'$Y        (?乚W ԭ:i6*b͙$dY%꺺UZ! TZ(u՚h\D>&uVSvPul?{Q%ʪVLI^N"xvF}EL+!!P-d!Z:QV,1c$꺲C**6SdϳN{:h̙9m|C8N#Pr٣]YDu        R6j+ Qn迻vea* uURyHhs z@C6-ID «5!!P_P$I$I2;A{QfnXz W1GYPB)&L]$RK׈smL$ E[f+C`ܴmy)AFl $D=~xlaj9J(Y!fYcIp\$ɒT97uY'/ב3S9#WꂌMIwmC% ! - r iTڸP':^nm7_iDٍ!!p"jNh߯kw Xs(4q#/ 3t$]6*dY. W _  %pct:)Pj†o4hrΗ<zBv/I'4>zcm793fZ F%Ec"%L, SЉi: S-S^xBt&4o9ڷh5)F/ 4[N~X_Qs >OwqF@%vE=34e4(H B͞!~k>P#qf>) DA'&{d(^:2y:,5$]&Iƴ"e~XҫMs,ɗ8wj"xH3AmZlPxE'Qsܴd`8{Mcǎ($ɖ'A_׹|5CܐUxgZϫM{'y,Ze(Wh%44c܂Iv)$PصFGuͨ(~IRf"aq&qQ 0;_&] ?kq|sKF}/-Yplm0ϒS 1m{dvuGo|%?ҧIOvSVUWs f8^$9L܂59ʝY$~y"ih4Խ(p((6@?ODZ4X 5'ke-@7!Z5CD1 Dv8FN  ֱVeK( y":3vAA(aw*:![w6#ɐ 05K Ѹêd'1 Ƭ x*r~L6* qS>5OzvR^wCBB6mhp蠁f[-ZtpXJFWOUmшr!_WnvXYY0m&gv 7Q~)pLD[_T'|Ӕ. ݬYw9hUui6!dΌvr٬Hczk$[Uc?ci}/^]9̲DM.8$lvZu?XZ-DN)SIG7^C! BI!7_C-(uɟ9eg2ƻQ~D e*zѨ#)Tpah@_3}s&gʾ07?e*RΡtʭ٘O7^MͶ8AK]VatFsM_؞rצ=4jp-|\,_i%i'pԉQ R {b) WJrY23z|7{:T.52Ⱥrdr#NMZ$uOͱLWI3 z/^][? ;I-j)vۨA7X?h>Ly -y|P_+}o^]H*) Aw 8{Nu& Θ)_i ڲl׀Id+]/G߫Q/Ee*|jhL!s_='zNˎ/hTHWqi]v)Ÿ{lۡMefi@n b.~ȤNC[LQxxi:ڶ A;FP"07Khj!1L̥`StlgĦi{͑*c7("QBVlIc_5(>2!t5No`;_AԞϾ.)X {v:ѐF( {dl4&̜6A`h׶W;fz]$߇XN}c_M0 vGkóhhq/9M? p#9W)n?'izK\wLg#Yj6N()SR:h!S7{yeB;`+TEx߁nJ3t{3G(H|tΉxI]2-)bwM or@X}j,Y cL>R6oo퍓eX| O\gav@z_Crc} 8u -D"q Ƒll9 t7](+UY:^BHA ee@_?7i~t]#c(=d-F}B9LF |`U9:Ԃ#'agФ>-efy? hheb40%=`ID1xh [fu9ófłT9:4Rp~jGh<-SB9^)`9%7K0FeK?8ῇV}NS} fΙ1MxmaWaL?n% 81ܨ͚4u‡/N'E >y;Ե i׳,PAXc&Ch9T$Ogth 3U Ec #KcDP]9UvGtM* Q~.{<ڴ2FԺJb՘~ ZggKG W2㭉oX, YqOe,̻߾J Ď>aB哐Zk8>&. q0`o[ab\m[ D?"/|ԛ9L&,̓T2VD k 69R_'t@:?Nrefz:~fә'qv.G=.QδL D0/5za@dLqZ2 |& 1 niAah.n16ݔk#랩b\+Wˆ oi30iktqN}{Z k!3(tV-Z_յw}EpllQ|4 z1F7M&ʬ6%j AT]OX5;m+*_v~{NuNۉGO{3YubhV)<Hla#^+X2r&#?I?8m<7WL&'-ZMB XEsu:ZS^& GʧVDHz`t^06k8>Cfzg_uĥԯWbߩ=8QFfcJDذ=";_Wx!ܵrRCmlGmhc_,w/Q/sa!tpdѪ.2?i}qOc#q~X ZS>K?'x$*>QzR2?J|?.6KTtq^]Y\~U$9?#`oS gN~SC8ָr,5YEMlӢR=s?ʳ&%ND_0׉bԛsIEtv,?>)'x&M-ᏟrOd65kDkWx2zS x,ă f&5oNdu2e*n/;vh"]cg2S E}!L/|C$E$ՐEN%j-z ChۮڎsR .ꎛbo3<+0rB͉lZDz:^D2h9_8K [YEL|_F|綺t4QFKbIsW2 N1E8f>'2pNxѾ-] ֯]ڧ'h]tt&5 8nsfaFZz $jF_QL:lm؈!wMې_wiM7 O(.]7mM>J.߅FVhz{ۥ}:O+ҡԳԳ?-^$5S5ծP1j T8ѽpQxWtMU$C¾In z&رg]DVI2i tHꃋ`Ygq+ߵ<&.pPv. Q~*1 }0wmw[|QxLX~<\DZ^ڱ)I>]etCvGt~QpBٗ_&8#UY%q8OYT%ɞeqJ~>H1I.+X{ s6cM0G&0;OVHQم7)sVIKsf:a65Kl6ṃ-;TrE4rh$\S`fK fCڈ"RK_F\NEvTea1_]8"cfCLmXw#_p?.&BYF)!ZP*ytLonCyqz:jm6dFc/6ԻcKڋub23pY?i*A*|3zg vy3ܲvH{7-sɪe>ItʘF&U%nL ߒ3YOŚrdzpA4csؑs9*d/BC#([nm$MY9A;#ՍsfL/n=!R7e #3G)azʭ=z2lOvqi@:H3q bU@Y4?Z^ZQv[hh=eׁ㊦;>6xjI[+/Y̽[hY5B*FUSUE鳆¹mJMZ?}Rt!b&դZIsQרb_9]YaYU>d}D ͙x1jUvoV2a)20b5@@%pIxըXlSy~uB JN$FtfYgdwʚn*.m/gsLz;C<ƴnS>-\')=N:f2v;VXt=û7N\B2QZsJOB<hᯙ {ٕ;9wK>ӓY^o䭪&%~lD42׵܅0hCwH1PvU O]o}[Z4)՚4):x4msĽ/=û5j۬];| jL?(V ׈Rm"%DHC;ߒOO'>iڕK3ލZL XNLMhj䲛wMreb'Q }rL KH:vFefWdh)gdZo(o&[ڶBvϢ~=رm}iJZө9 n]5ֳ,ΣBl&f FI9ecT w(h@=@ÓI[TAs8*gЀxm[4wpB7v^ ];;A3Lq:k㘸lkBO)t`Ħt Oj=קG(!{'턣Tmm>]gm^1f!SXgȎh'b/t'yXiI_p |~L%TZol{[ȷz=xkdžH-|XY 'ᐅ|z?'v+X0Gd!-B4i3ߓ ͱ<˴e6sIk>(xvǕdMbcV c(4󖬧'^BԉHv/^H!Atݰ>B?v@.]m̑/Xvo.'OGE}; ŚH1n;j,zŕȁvճ?yڀ4f.NpOߧt$ceΟ0dz#L &y0Q YzW{#F;Ƨn<}&-M?-whLGA?,, %~qVHL~/Ac͋aU.q.<0rmƔaJhUpR% L&?T ?p4 ^z'͝MP-qm$v`3=iR)-+z+-mB꫓$9G:RjN_YHEB;DӤ;GUV0m:\lw)t )0Tx6&R8L̈́S8 Ib!PKq/yqq (OW`l̫M\%DZ  #A$O_"> YcM&҂_2#*^ġ`QϦ! eU6f:(emg*l٪EZnx3f׷SسXSRH[ 843(S g)dM/Ƒ CX|@ ^J-ٱ, $FwǤՓ$3&5%ɮar@`',oEp %.^leOqM)Bx^8? &75fRY pfLcV:ڨ;}i&LYڶ HX $pd8bit/V߳mnzq3E(Ɔ~3ӻ{VUmB=~:pkJ*It p:rB.ɻ4CXST M+(dȳȟÅfw$:}jmx93lwxoxn']B8㐖8Eg02eOC(h\ Hy@4ڙ]WԗgY&Yo:H'}}=(p5n̻iu32|5kuƂ< g^ XQ_ۊs1p ϼU$#m ; oDŽvF!Ws>vę3q=0}SQju>uL%k#S{wzvi߹Oyeҝel6;aEۏRnyިO!t0z#EITz&aP\,Md6& xOExn0ۗV9~e˜}3!u6G&0u퍔eힽl[ $Z8섇 Q-V nρ>y-:T'jtU"0X }ƤǛʚz}xpzG;cdNӶ񼿱W7VT{#Qt5lX?Jn4~*oF?&Y/@j F]_ )~YF] >;2A`V~u^Q_~u~Ry@D<&A%4ۇ:RQ3!IU _0I[j .ǖWCCuMFdē#k\Y*klY8/O"s2dxl'OgƫcVXC͙B]_3^[s&)ڛlorH(I`.VT]@TJIw'R-QVT&R$H%Wo/{;.7of{6ћtw{3L"$wf8r9yVV%*~Aj]羰ЕᠣZ/WKO?\__xC{Dxk\Un{2UT|{ <_i'>N<3cWĢihp1l2NEvH\Ὂ0.E]rmnp̬i\ AeΑGD1̟79yS\0#o_ήB>?\$ofgǢTDz:4X *^ 6ȯ^Ë3)tQhQ?j6 {~黐p;bX8fduvRC5hs*20˸SɩJgO98,IW:TADYolazz:Cy~逗h/O)IO5=+w9(u`.Q=W=`T4 7No=ϡ# Oz7EjcNw[3: NͶ,XlWv%Lm3``C {S[aVּ&ڶ6{nC~֝.ڡK&0nlҼECHVu&fЮ[,M{qry>Ħ`tìvy`|%pU=&M&`1Q <`|c{2lmba\o`BZTQSw-$'V t`8!̴|Cӳ+ # 6GQr[\4<ʥh;r*n_@mT;63 \҇3Y)"ٖCŎh=6r@lQUY5U-x;Ϝa5{UAm "߱,}!9@Gfrγ' $E Ix.4U7_w& yWY£&WG]Dqzk.oc 6M1M{q.wv,6Wi=ɔ=otiGXᮛcUڽA_~ҍiR{uwI,BG͊}ц@IDATTyдLEAiG<3~6k'hX^)ؤz>6C>˵A6VBDB}vRKԅE4U -f"Ϭ)zY:Qn+Br3WgFʼ0E (˹!%mg :tj|2u4:E3i\~ÎPG:3}^+nB펳mSz; Ze/Wک{Ԁ~ Jd]3 w;?k.xO]ۼث߆)H<-%5 }} 0UO̊Xb*v֤^{^;ĕ#z~g[UEY,DJޯaw@:D87A/mgtsг-)}6+35FPrD @z joh{=$H/SlVS,lLki^YPKff4HmۣTk3?{ T'YUYU|ʝQHD}あ/|iHN.i5)HA 4BHTQy[|Cp,LZZyſ2ZPX,фO;^(<ӹɐdD'#N;x4cBw,},*ɽ We!VBEYcc4s{0Q~o eGa3ߟup&L,^삺}3#Z 9{9<.@F$ JQP0k^vVxT~lHbZHLO8skE$U&M48#JNw\gH a#_N+[Lc:_<ϳc?[J XH2gxßCm,,OveS?zviQ ?tL#dEy(;P~"ʷNixVɋ#n&L;o7i`ڃdDĿHT S\֦@*{J-"ⳇx= TM5`Mk'Oʻ UH]oA}5\1S +1 -.%YPY*. Z=!c5ɮ^h7>@P*!N_KNx }[\ T-3pZg؇΂T#F2k{c%:> {g3+;V.`%g+jU!]xdϞ*<ڃө@*u \$3ҬcWp:'`%`hvX=l眞,}ӣh˚~H9AlfzBܭS4ECϛ@$̠yR3t@Ӻ*ͯ˱0g[eP.K ~\^O9pkA02zo) Hn ?ЭW[.;n\ZVm#C.!vr= x۹aAu()'"v|UoΚ]OG-KgscV;vxO/m3~y^moLsͣr#Y k/M^!Y]B /pqBe@bY&bIf?TA:W9|(g<\~{;e= i=|Xb> 533ztXlڲ`*Izs5Tc;T&#+.mPԋ!b}g_| }?bv0t%4-" Uȶ]wKuyvFJO>mZ,C3!R=]?{Vz8ԏhdewaTQPG` N$u?ύZI|vw[{i+.הk f+yOVlJep|׭A)m#5wzUNfX߉gAQކO1:ue;HS..*M\6[6K$zXriW!nz^I9h 9ຐ.ycg!!PԤCbϪ]yV \%9)+0\W;m=p`?O; .ll Tc|9jedx,cvE{~xh ЇK=X[.ؽ O|.c~KXbw]Uuނ6^հpqNNXX 4v>ZL"b-1Õe~Nr3dL ڟcp-RڤBN5JC}Q~@cC=M?YМ(F۰OqS)vh]2dȐe"ca<<7<}|,::Lhi`ЖfL!iyц9$\{YOb;`p-%ʳrXϐ$K{v/%ڏPhɁok zzS[s6sCpGuC5dM;a;y/o1<B –-;PI\PUR>_VBN wla2d %-騚:VBGi%g`ɮ|s]No`~vPbx~3mKz{O![p}UQ9B/lLI*|*qk*xnӱno^X'v]צ-Etl} L!8:? 2 |EXwy|U-KqW?3r pZF*6fL~iG-Vδ ?Brw6G~ս$Ei/77OVY @"yL;ua*Yܔ=אGګO?]i4 :@b</HWGv9iCqnD &y9nթ,U˜Ÿixa,X dɦ=M!~ԑI_U7`Wp/f35?ׅ\Dib 㳿g_g Y^׽WE9NhcҀj($Aj^g*){n}0E3%wyq{mtYԤfko^Mb_\R=yF#v-4{u6|3.:l$/] /h[TQk݄WPi3d]!^I:—ofRs}{V7ms vگ7ןnF;4kAW9.&;| Ƌ?]WsOH :1gKi[rF#ʸ !RPbE_4,>} Fgw3ЄC&|ɱo<g& ?[aq_|f{` ~6YIkD@-@s@,`)g|XIlZ67 ۹# \ϙqvKĎ ʃ |Jtq8ʡؗx>󝰧؁&R`vJucy"2aY6xhl(2# }x8sVrlB_1} ο/F~sm@J:[%}i / ^loSi^/3L4xNz6`otYYeןKz=;@R҈} ͝wxk{/ogG|TY.ݰv5`\P͑ZU s>TX˃9>7JDN:$zW; ּ3UšHKA˭ \a oTd_v'.-2̷z^3\A:kZ}JoI[P TVP IgM-oryC ".LpBL ^9vE- QaKֹlPuG{'x|AٶuMOu0f8$*Oשi^Num?8&n.nw$@9dV 9ȁ,E6h٥f$UNLYcn'ȕ-7m`flVe=Qp4y mO.&{#XJ`Þe0@˚Jcf9盌vg)V48SW5'=AX KC`yNLMFčZ` '8s/\$iRﹶ,5$3H^y}ß__oTT?t\k19[y8ͥs:S07ƻ"0kamH>+lM`Dgeʕx-I=x3U܏ h/ #3^Ţ9Ҹװ:bSXw:DaSד7 e8<{n`5#s*5=)'`ʃi/d_!Zqds&ގm6?<4#6AU:Yd46w";sh.J{\/trlolYy8%u0!HTP$Cs.AVYw?QkE ;^vN,- K]~ԔDĺ, X'-XmK7fx`d3CE8 ,QYP7e)Sn8$ U  {Fm0M{2~t˘zIhO/B= )iir|SH蓪 z;9Μp2kʙ%6! I5*sTrL+P2.Av:z+Vy%\ +)~ O0>CM¤( 89Er ego y#hx8(HTz~3bNy\C MGDVԱ^5rUX%7QQMvմaLrRl%و9#^fS2 ?1-,Y^J|u!GUXK)-Mew<eWĸ%x?V˨=0},轴W6RO3\Q߈af*_KϞqfPΑbiMr<44u?.9 4jU//O-svA"){l8X#\5ix #`жauH'] b.G(VK߅$>)8C1#+ Wk U4d껃@ߛt6ᚚx2@5?f Ty ˟ }?t|ڌ]O5=+6Q:]0 NҞgJF^^qP@J͒jW`4Wa3Zi+ }w7k:@.ؘM wU]YPIQb%97 qH@p#К̈U=|h,k B)Y*@ ${6ZjՔ!SD3%Q\\,hd2]/P0UTxvW}Gʨ$Y93]*۫ qI!ىVnM%ɱ yE!eHo*82;Ft?n?/e"xEĄ!%o`XVi|@޹yf"iSa>|e@yFfL0g,|GicN-.wsJR4vR@EvNwe@B ѰO~*#c p'"_s_@ $tPp޲"M=ɶ_1{0EU-CS2i5 )/ZUf/H~ g~2岚]rs/@{P%=zu]܀l zQP $_y{h!EplۧeY<: W0 y yY*ٳPVY=3,=+(A^{C̮]?TW| ޙgYp1݉ᮡV7Zԝ2?툞ף X?%NdYre ,cذIMW#V*T1u;ۉ<cW6RĸH$Q0NHxx'[]'UYw(RjNHS Z"6ђ>;8vvK2GE&'U$^QYVQ^Zg떭7gh䘁1O='[0]?" '2MG,`"$-HfŪ REN \@UwYW\5HZVIQ_˾z^Zi%lkCdUAL0@z~}׉&iy/ǹmgtsЕ-)}6 9e@ ̯>mA-߄M  @L[Oo#,YS7Fmf<hiڶGlm0g\AO }Lux߀|fS NOaɔR P W<  U$ǐeN+c׷Sy8^@K @^$Oa-aZ8Hў0|6Bx j+p p 91!A[ KO_oÒ:ZS8>*oCiF-Kd, d[^N1vr|=i .2cI28:KQWc6O 1 r,S<)1]O3dX02Y5Ε iÎ"M…˓HU#.} O!@bŬG1H8a"Z*"Z0A YBm7b/Sp3拗=!GыTy,5pG$~TH'M}gIL=UՕ>n597: 6CB^Q E %JKw`+7Pf`'`f́2 n7$ bM',IT/dv ?a9P/蠏thNǷUSpKq:H:ak`<` 4 b]Ղ% 8K'6P $eb+Rd'a(ՁAMGMXJϫKb.ipWS<~gA{ZmABifxƄ^]a̴U)ov_P4zǙutÝ.DA;+fHUwx6 Ο}? ΋gl z, UUy]p*q~!./xZ}Īs@N lE(\`J/m{/pt @oNê (k\2cǁAllm6,A,À&UbVpIYuU.8? O_V2O0PfSʑ8(ƒMAżaX||8 !#_Tc|l52qMryϾ =$L{$Jb;$__ Yq96&I]:ʷ_J',OzRvR@2vN2u i}Ksa]9iq0l_9 ٔd *=f]k+}lV*=Ȭn N89p"rJX^]~_G g?f8GEʬVO$,3hkNg Yhվ_U}؜10Š?)V\n2ňc|*쀔 p$[_fV/.}6Yu~E*?㰌I*χxʄoW@Djz蓊`qӪ =iD80qWB->cg*L=Z")`9@2DX @㋩c7 VM9Fd[ a{ vM!:u  $$8``؁y9y$I~C򳊅g5tل`ѕ[==+IG̈ i<2 "'4u)`<;yrL5^=xп\@,l೿g_gizW-OzPQFbc0{ jX? k ]8^]Rb6e6ǀY@79p9 w>[8zߦrjh|RdZGI^u)u_\T@s(780F HƸ`4=]/YOR'PvK!afPoӫ֠0PnPvՑgdcEXyCBLt4K:kjtP=Mq1?Vh4.U3bӹL#&@0a4 K)kp fг@MäbƥJz*o}מpy^OX=RgPrq'G,ZHNliV<Wo{[|5}%{ؚ(vnqxtRUj12 䠃d!My/u@Q& fe \L@JxzAe:pؓ+tAKt˸T\@)95q]psD<ή3v#~Ǟ Xe;#cc(M sdzq2<êS 9́^K^'׭6́0s ́0s ́ 9@a9@a82v ,<'#?o\צ7TٔvIx2M6ʹG3&tW xVmă@Ϡ68%~~ܗ@a9́BG`T9xcfVګu-ͰYehodO|#'vY}!7C=+sϾ< r|=,;%U`l0,gC.,|0++qΫM??*hgt,:zraض$Xǂ'_Kf6:P&Ö{! B fd]S ~UA9o O˺]! kZM< j@74#68م<矌&wiy|XGi@XG6 s ́ǽD{UnnԔ)QLs &,UeFQs-n0:*Ð߇oYTCz#ҳG!Sxr?4]f8[|_wbx渧"c )ςͬ̇-ۋj{޾F (TU%FcUwnkZ؂w7Jƃ`ϠQ޵xDIb dg_#h4 cDs ́08F9p/VsyP0YYQ)2\ $oZ?04cR ^<uFed<Y+?1L#=`'| FISt>8؏`4`R(wGHcy >ǹb <]5kdE/?-=/$b< ){6êܺ]gl,&.mUa^MG>pUȜ#$I3P#m}05rzدT9WvcbYlEA˘ v=Mw?% \H_UFYsiMOO_ ϸICkޅ`yp~?(wyHIz~FC/s";rJp5=o}O-j!5,;RۖMe‘0w90"cY"_^÷~Mݞ6^e5MF =P(9FZ,] dc" 6.jr[\f]ME)> 6֗Zj0f1.1*07_ę&ʨR?\73!hWU9@:`H\a:O^*?w%AW:/몪{S ejTLb/GsLlew:(2iuV @0,#kԯeΊ7P)|Sԑ{Mt_xY$]>sgyZh͆v9x-]U%jPA yef|Oqx`xFv$g} laARސ_鞡&e8&䄓%mlJ(ƦRđ4 "KĘ(ا; <Y7H} jxTX)*ױBk4S|E@phhX,D8scaH-y|$%1>Z4{t*ťjYM?̓Px[^}iܰʏe5A^~2*D+2ɬXDMN`Qp$`+@W/+ C/iOjiH;) ŷaw^'ho~g2_k~#IAŻ<'Uz&$E[7$ѝDYUaYvpm75 ZL_BӮQ`HU6>T~2`\~ u@x(^J_hԽC!g.|yP3D÷绠gYa~1ݯn_ʐۜ_leH`,p!ۢP.kUduR6Y&;J"X.%@?[kR?cGI I*!mbbvhze2sL>oA閴5#3H':dYV5鋟SrnQ6"7@<QN"~UH7]r6] s8_`@Uz"בu&X %!ɪ dܘWSL8D qZADDќ*#T *"sqڭ0:Fҍ QKNA~ yas| 0cr˔>Թm ~PL=@-yN_XH75/NKRWTSYB&hZVOB3n{/DDШ1ru]{>]|wRez[C ԍc7]ی&-+!v,,Q0474WZp:Ga)~MrC.uԦMi4Ҟ_2^0fɹp)~ 2(!?`VJ0څõˤq-,p<0 ƈ'o>"|p1qԱcpZEBѐ4mttp^+cf*>p(؏?kᕃJKz+=[gw\ FKQQ  _yQ$ P/حP;q < 73{<`g}K&Wk[o{ʵY_@Uy~6B? Umͨ[ɄƯqharYZT[$k0fI ϡu(a+r\@IDAT*ᢼQ-§ ]}Rf_*J_/& On<{AQ_ dAn#"@ nTiR:GQFBli~ŷ;B yE \d 0< <,g}2ij(_mW$lOB1 6H?wM\ f`茗 'ˣGtM{&M D^XudřuʡꣿƳy*/7@btGRͲ3 5-8\iU ~]yK_VKN1+-Y6I@f2E6.k&74ois8$wjj3?՘x3}UQ/%?DG e0Yc _rŻ? B}5X,qJwASKs?#m wY݂Ar9t}5KRMnUWTNl J]/u< FlZmS})W xDkOmoDoE}JfmQ6$eCkb.w/﵋*GVq89Ug.T%V൪{FIUZbBAS}jRj F>4`lP;9 \\:6/)홖g!??~°a(dzer$S["@N?6Z1X(lX8Ǫ0h Yn̓Z϶{%2`,k#QM?<ѷLMlmCQ@zx#^|9\̲'MވRB&>25y(I"Ceug%\*2$J=A/RsgPg쏆wV*6gՖpD6~XB7V?dʉti6ӓ9eo<:ihWz].jvgO*fϝObv=u]ȹstE0_C7Mj/ح||87>[2 PYC_M ðXbDLSB 0hsChv%L!%7Q?Ӕln3w_:ɍAu_Y2?|}+kjܒZ3A@h[Bc gЁrcz;{eV`dFV~{y4A^4儋Ӷi R'?׼OImYۏ6%pz*iV65t.; vΠwͧ{!ޛJ'(Tj%^ `#\ʛR;ŝߖLE_m#Pd;л2lV2q@Em")5quUeIM"f P n"%V6*l刊bs5ySIwh_c1#6uz1%ԩ7>^Y=A5,CK uԲp0U@ÊX_,M$ CsA*;+84d"sV+r(@2Dj722Ę(o ؘO"6Ӛ5&E!Z]˙hy rBY43JItMcNOҟP;51 eI@xe?L]6ڝsH3W:PJ'L_T}v(-kU'4m no ,yЅ[Tzpj8sЉªB"=x0 |鍷*3V5=ua'9fe,#N4a#/$vڲf$7D0PU$6lL2Iߒ$9?$Wv[K @9U#fP̷j@(u?_؞֞s3Rɏ;k?&k f#.C5zq~jת~QWr:pTb :B9\mܭ@5)á8fߦRXԃO!4X#'`2G\ߐvO2L|Z6Ii~ ̪ ]Aa9p,s:++mTTVsXBI6R%$Ǝ2756sIm;Cy9v @ 1FQhG7qPzW7nb[M䔩Iw󳠸8xvy5/d_KK榵w{$aW#eox~UX]zDA={cP0̪h4(p[5 Ntt%s ́c."29(c#ܝR )4TP*TʅzEiɐFR+`nGR˼ )7oӃc{Mϵ~lCNA#";5_ gqyX$&jC lnk EJC+?LqXecm=)7hZ٭ J&bnVc>VCU_S8̀-:<^ uĄ`2ڕ (ךǵPN7v'Nx4Ȇ=.Q A"hrVmh>xdV Fy;$G#cj4ggŎn&zyƁh4ĽDIt2PcuBa:8RI3K |&7u#cr rI}+twkiDZVk&Ҿ\Ot@BG7q9B\ JҦΧb킺2ɯ^ ;Eғ/@O!޾]R5-eAj{kX1gޏ9E6aߘ*wQf0vU4V2Hj 7(ڍ9gD÷_ÝrKTA̬ ,.d.F NGv$ᔔሥ_Pb0!7U/T nSuAM)5%*W]Z. uq%Ho/Nv?&^FOXɺUjq1@ ӳ%= )P*T`c UO5#Z%Z#E<;6̂3 ɳL Dó5Ycqz@GX*=5dBF?`}# 6Sߕ:};#~!TaǡMQtpot}j{l0[3J)ò{ݘRd$;zЎԕblUE% d*\bV@M_Q$V_N߽@+`cAۖ~CЬ u:ԝڟ[[ @}*]pfW7BjӼ ]u~O5mEs;vyΒ5k=%5jjÁ1j5~^PGv |9Xٓ[hf%s]~G};jrci6a wqyW#Aqn:9+3+*>5d4dC3ƍ!E^ 5SQg^FZkl-G44[˹j0xǻ񌞧pꤩz:PhGHY kpJʼ\1\IDV g_$+xi!;\y`׽j@`ՇnOz($Ŵ ܋σ^MG1mdO_~`Gr{ 6p gp&{ܴ5֏AΕ݃e`@}R1/`㡏gg'C]QS ̅H2[H>azzz~y0?' {x)^P.yHGgM'sLqoD1[PxٿS( o> ${E6ːq]]c#ɶs?X +:կCs$Rjȹ t_\P칩tӍչf;^@%ltņ|M¥X:G{r:;79QT,Cbݑ^[qVhqgA .>*@\e@"vI3P_"Zk!_n&ŏ {h igU l0VMMCJGDH?L=:=;Rf.M&1tgL }ՎnL&H ufN791LF\6'*VМWPm)9)[g`!k[%:Hvi!{]QahX딥k$sHaMG)ݬ(f 6֗Zj0494={3wsY8M=ׄ/P8b$*FQV^1CzKyRT~,i ް'@^VhZ&9Y&;´( >Wߴ:+ONCΊ7P)|SkmYk^q_ >H}|}<^j-fc<}b+a/ahQUbR/Ҿz^@$@O C|ׂOM#,;HRA}+w,2n| 놧~bQ|G-B]4bJz6徲\3 TI Oatos$wIS7[f $<ޓ`CAY܂VW;M$/[,5ml|]IP88mdzL]{$ mw׵v]5l$ѠzSSZQHW]3٫K=yԯWgi6Yo@ggWX WKtVSjH=vt7o-%!O ^oW~fqtgRdϵ Ij#)kXZz ˨\HܺYj .)$W1( !^@Ok =iG4)~liejiH;) ]*2,#DwQ-ɦwH*R-zwYy x睽ņ{ =^we;lBRvC7oyoowwBj0ł9^} R68St:R5((.Ǟ N)/$'2F(f +_Vχݛ+l֜ޝjFt"/yH|χ@ԛcRf-x|*|!p|9]jcp9hHF1ޟN_>n8IpaM0\,ؠgD_K>+NDM1ЍY??7YDӡ=' Y(! z6ke4-8g'|CO6wWTޤeӧOw6WìHSJhgˌ+'{߮| NL.]5e,ߣ>8jiGא"ӏ<ǎ{7fz W4W>{i6/M8ݔYv(cW%g.ʠ!B)uj.P1 +d6}ݹh'絴4j u҈1$?i|QJJEP2}m-Yw]Kl% !SSn >]_^G]]ŸSȈxhA`_K5ݫ if땋'I *Ya&Z:HNP\["^IfyYt}6DGnn,ڲ0}rK]`V.-A]x~융5ua2aac5 ` ZO=^<Yg\HNޟYoT+nWASR޲ŋ́﫴:ͫ_3җ&rzy=} A;׍}NsGC2O4Fآ>Rzd8~CtYŕ3oUuŔMjF^#ʜI 8+Ο3Z,{%Z_̝=Ƒ,8@C%đ j݀eqOi.t FPڇ[[46v2RM3cTeEZx=~@cQHiRPͽNuO7( N#Jϵȑ[NY%IO s՚r8Ʈަ9+< Ҿ[9\ᑔei78Y44Ivӗ}4ޢ B=n\*YeO)ReˋvE~ dN{+?_wb@8qw!5V3S6y% Zic|l&z->.lfG:9_Sv6.Zn!F;|,Y6JKp7"JsFso `M6 ɤ.C?WZmfrۣOLR 3m,IfR Q8ܹ3^=orȹq4(41"t-xY4H}N;$4@~kdv3.{9`eiݤdwla:,\ 1#x*i+SQ0) u+4tf<ߒSȠ2G{.x_;qo*<*~f`.ʷ ;;q틼q9{aC(}haCU^4vy&,x_OmhHf>O㞷y_3WSy[yX;_Y~y%x4S7F *5U)f =-Y#Ac @ɂsSP^K`q9ÑkHz4Jx-qa@q3"EVGa]W}Ly;3)p]ݟ:ð[5jZ[@cVΥ=BXP:Os\@xƉv\h WM*%}F٠ Q߹Z4q>ӺЍ5OU|!l+K K пCĉHNVqrPF߳ o~Bo}vF/wU05>Xvɏ-tđ>SOGb9 CExg:rojr g>Ƅ YZ 'w=m{/4e/ d{VpIJʤ )b=ثރ'tYIv , `D*B徥;㍔9ɰwŹ)NW*D6(7vC Qq G~#9đb ^L֥LFŻn k dxvdWrd\ӱƕyZY3]x*`{a~[2Fd;\ǎmv5?f!<ոݭLId3ޅ7Q.Gvك+ vgn RI}s9n賆0$RgAzU;9lLs[oM=U~rߪ}}ʷeS kY%ޕ-n~ptkYs|X~g|"rԅ}bO.kl_]g^H{4 QHn[7ӓ)xHe0hFL% ȟ"bB.7:]5h<,2( wwHMV7\G )sy+=ޘyKqjX/#e2[ai&\heE1w>8ns}3b.9g gH;B8M#Ҭ 84=c=NM}%|1DAO}qlZ6dӻy%'iF[vS}^X͉9۲ǂjהxTx8st`CC2RRTCY{^:1~ `r8 n{hQ<]'fR)w_4eQg1ݦ.ܦE=קQQ֓Xl-8BKposSنL`hW /l|ޭboI#{1U̥!q iהx?uF-Ab/2_XMVm'Ht@b+9mfYISXQOvGwІysLxa_f82@Y)׮Qt\jhY(vdw~"ڲsw{!_b,h4DHD@'ʖƒ:uJok*= +Ȏ6D2rW# hQPSME @ڤ 䯞UNDj*ESՇm|\A;G`Gpx_ ߷pĉG'%ie]V;q+S=1Fm V[;:&Ʀ.NeF]<6H8e;&㍾hGhYϔ@J.ɡdy@PjQ[6c?4,/y(Ȧ:GPipjlͲ9!L˔W!ۀ1 $wJmuC-㒵,[{gZ6Vd_nrZ'@@r'dDA@h>]g!ctx|C@K҈<}*R!镚D4"ZMF F_MppS#Lg3bbM5k)gku[߆bR( Yz(,\m"#f eNAs;ބm4JMf ct*z7&O=!Q~lӒH( !XI15;ivÑ[ݒyo8V!saq9 @{&! êqֽ]6n=Σ>㑴#_5灝oM%6rPXwBv[a"bBPU69DH2wdz{od!~dmIӮk6( 4AHYc5@Yv;;ójT[_HSh9w=;٥AuxRdK빟}LYLo ivx.~EϩO)UCbg}r$Cp^$!Mۍ6mG5moՇPEjUĈC }&sgD}-b)~V47 AL$!d݅-^kczǔw-} S̼QZCƼ~& "A$EKAgLf-e RSʭHџrE zw*a4˛_,#&A;z覩%ʻ e+$h~1x(UO4)j7ę"k}^'3OEۏq9;rZNНwwGQCwj>VPA3 Mzk&|j ~K^5\oBR=h;3}<İn`^$Y<-es >o^.Yq#v~GKh Ž'ljt@-}'b3vQ_J&}9u{b2ajRh|aПn_uH[&@- G\˹sd i5B#›@6i]s8t͔ 998NI="C}?z4=h4},}OEhsK]Q[./@N52!^n]wAM_ h6V{ZLxM9mvNaIzoۢQ"XA@?YNsvՁPki;>B%k34NﭗdDr.bS0͆>bbO<Ͷ26}"~K_l ·TqQE4XzVFmVu.Yqa3j@{ "(-;v$w]}|rUW]sXD9>HSH %IJ\F/Vpx 0Y2_W9Q䖺iJ]9r_|9Rqz۠^M۶<Xشf0` +\edځHB4)9mtلD5jfE\ki4;34r@onLaS l腯Ik9-v^Q5_c#Kƌ44w44i,@D@wlT+7$-f<K]dFCP„QP9t쭍#]ɪCd@}-K@b8C[Vcj6-v#l=p}L7(,vb{7E#%VRđ}^k¼3 D~[ ѬVVj@ Ѧ9;atg?]JVbҪJm;.roF0hHҮ:N.R4 g$!:Os. ~R6xgu~W,L9\UXS*$K?xf^zMkͦ}43 ([3n*Eo]0S(ZC G[F9qmYEO7e>xV4Ss}M*n|NTL}TI@[Ma킭oeW@99(!ǔD~セ+f(::K=_JߦQx؝Ks<qnAߤ5w?::*XFnO rQE*m8_ {FQ'Q@r8l/-;m[ޝC<I6.M(5f?5Fq:6;UYqpF@'D5;Bdc/$PEqD1$xkk*,INqŃw]WWP_ A!M#2/Ȅ]CMt %+kI({CFe2*~S};vvH޲8l`X-l isWhe?;&V0~`TXM$7չ&>sÜ3 C3TcbYՊi̠a!B]RޭǑ,rʈ&0T)O5(M)b,~Mfe~]QP6¾fDJ`Q8/:Yn&Y\AC k7$DX̔ %lBרdn(]8{!ap=^+P!έn'>V/Uڎ2~x8w׏sf> +]L^m٘TRǗaZ?~?-?hB G>ƗqPX:R_x7ܖ\i˶vtSPp/r""x Wּ8;p_\, 全h9SX8wvdD7-F4iWݚ\cnV{c =<9*+}2@Axd8 ӑ5%=8* @h?k~JfWSg(#Iԫ;`}:`= `sͤ!+@ b 4)lIN߱_~M9x8ky͸2s=^p&ْjA !V9NDզtgq@IDATr****((RD%%tJF=ꁠA=뙫Pud5Cxw 1N͋oEtYǞqd7Y32oRqqN)'y[Zqt vs9EWտlX cݝ0aֵl|ў\Z  w{iVlflPݎN8\xXrʝcī_(Piƌ|U{1u"Dc:JZZ'{3ClrzDž Qx8P Oƕmمfw.g*A0/tSOE9KCV;f4F%0VckP$J. H}d㣜Pe'9i|0ã*ʝk>©#y lFF(3C;W4ߜQr`犊2Zd&Ƽ0af<WƷJ뀭&!MK `;r_D1dbNuhfJ_j=W%= V@`7aH wT8@$GFDhlvrSهg1( n/xB: S/k<5ç|G߽߀AAnTf((-?{wM U2b&LqB$7"DJnEtB o.T pj 1.XhO'2ضx`Ä8dcM2fλ-wVUhM *:ڶ{-XvN8cͤڭo綷ߐ+^&ɬ1fBd^X$dp ) !-v_a_h01¤н_6! V8A4.<3dH9G abppA$3--e89`KE@{F?.ﺴ'Lƚg"6ވcqa6Ys>evVqch (~BoⲖc{4vmjI ;md9rpPFmJ8U(C\eǬ厬T֭&t87g2d Ƈ2k- 2q&|NK 'ϫҖ9|D+wt{ ? 7Av(K m0\t\tfe~}sQ"D ?/|#tJKݔثϵ+emUYyfhͲ &-Xyp?o teO MD@r悀 *vlٕ8,{y@9ab댳o|%C&cVMjVeȔ8N2츧;7{a;6I,? ]D  A@ 6VPH`元Nӷ{ÂBBc,&3XIi}TaDqΨx$}mV0̂$3Im2@"m!md@gF2;{qa)KPgٵi]1N\g`klJi%aGL=L(s^^xx^y> ffF&@@'_Lh1bͱ5je!υ煋>?:Qf ,4>qsQ%E7'2"A@)&_D3.v dݖ dJF_x2c^D @G@r۟# `reH2&:IvFupH':a_66>A2P5Q! ;I8TEU2q`9o8TUU{{pB\&Q!Հ{Aq.TnR*( 'ܧ^Y..ST[4Ɣ,? .+AQ5, V,fhurj``0\.$sҫ))7Y JcW=9ev gm+EڃHTUjF!\)x =*Jycm))өl PWDYs.Wо!a}(诡w*VmKˠu;ӎ(F!ån?w NzBn7Ϭ:vP]FzJJ82]&cd,"u62XW.3U&RR8D.`r9C SXndU2xD 5>e% D7_/mާ䮑t͔q~? 9=M~ƙSH:άNx4htv;@tuJ<3زh지L2,d~U@G,*ҖL~N=\Nd|:єl79Ǟ% p*QLGAnnpWcMyA9w09HLrd"Dv@$n8zP/W̬o*Nz{Ju ~x"d߈\(8$,`5:+MF#&ɮqSfi&[|y%(M[Yd]dak]g\ym@pe2.qO䅌`oA%`=LG֪A%g%ۂ tom[Qfs @|3zPOíW&rAV/3 ?Ķg|g?i*9OMbв&@"H+j%4ȅ0BU? [eXU`8\Al>ќOFng#dtu$UVcIr oXJ *d~ܿST  hDDi q4hO'5kgvnQv1o'gTҫhoDcU;%\ۍ`[bJvFd0TS[~f8üLS`א6K^~42*N%ΚTnYQA_K5dzkq,lr ':Tx8n$$1d=w$exN蹓73#p/ZsJnWT=NڛaK `6ԧim/EG9K/F+KGSi.{yw ޽o`A@ht(!NR&5m`[m<ɑFv˼ Qn5#Fn{ɛ*s EBR+,d=1F )MF`(XVSAy4ŖM<ȱ& A@h4.g'8 S S\\d@m܅7 ]C]m !-$;(1ect6_Vn|Nml @BeθD8NIU6Ķ1(EFY p7]& ],Oűwx*J:`4wuwzf+?0D{L9i9Kh<w?d kr3[FvӖ=."K@D3%gͣĸ㕴&Wm鞫7R\@tTTzxYc4?5y0.4UWġHׂ f1[ʣ'H 䨥0:atڰ>>E+6!!f\98%eh&5'ʹ<'<7 ^*"pǣO^ |zlW:$Yeev:qqOц=oѸ3(+o3mJ}$M.yi뾏lM)co.R\д/-/R3Vlуnq<>fd˧-{?’FFld+>Vm[?D1 \G? 6F,[Z[/?~%uFYO te}*S  uL|E{^.QsޣCߒZH'O;ڝ7j}pIg@sy0" 05]á.Ѵ~~ڸUhUSN~1=yՔWD|@LAz%ѥGj||Yf[:U~ڱWlevSP`MٿeiAhv4y`rPEuɲ#SnKy.VW tRdЌVP%fc灅fwt)_TAi[ ]oi$x΢<)v$gt`7ѳ>w@3ψ(t3yb6YI*eھUV&YF&_Nh}44ImWTJ'  + Ta Л_,'6U[<#ٰUJ2+4Rڱ#G_6ӈ5jsL+6mitggcuәlY9/r%NW%}|)ٲ&Gyҝn6;*(4 mu6FykǞsx#(0 c>j%@*R.ZajhX+Z$cy˳ (pنdR֯:<kobÞٔ[XB#^$osH#ȓ Ҝҏhy4f$P1@Jf4_2~tô%>6BpVx<'Ɇ 0bpQT[4cݟ m2 % v?S;)C_aAh/ԿnvXD8v(w(Lyy9I ,VU=@먰[A@QnGLSRXMqӹ\[ftE9+?32Щ:ϏwxN' JEg*+#2/_!u!bA@hQn4\rHF.1fS5qQnl+8۸5cvRdX(n\[fBl$Ͳ^ 7bJ&&HOO'eZhs痖3$^UKSAy,#^ru,rA@heFgqDh0}z;Jh]TSdmc􄖩HC1׿g<|cՖh[L+7]^D5̬mk4A!`2Hv{,eBYwLaa#`2RD'nBe#,n@9og9; "5ZㆺZLu0kDm֜eş} Վ.84"E`V@'q~4ӅHBbiv_xg ϟ;{bKem '+.7SkqB(`* CrHvD?T.WNq*HμsP6A@hI4K`I270T/IFBO>IHt{mJch0Q\M aiM p5;윔K㇑)2b;f̚5')T bz^fJ)1>kS;H<ָbWe[tDC@+SD2*)taCwKZKB D/PA@|qo72ZE&Bw"Mp8ÒM3*_),t`p'yc݂ QP578}m->j Gj_&f~ Ⱦ !dd@GG'XtV&(NF| 05`a4ۏ*^[SnUph5QdRv?S'β`9ZbdA-%Ǎqor[@vIYvDH,%Ŏ!B2w:EF`j7ѱHx4h7<0tΨt87M9 [e298V+/_WxE q&ťi{6W_odpNS 61DLLhN?dl"^  lf]tGF/ iѹS6Pff"1f\P_#Bt =jv1^J;RY/\A1%_A*GK,dE~A@B1L7=#fK5oS 0g1VT$ 0ԓFŘ=#ӧłݝΘ5uT슋Z$ܖO+6?}PVE4,;lZsuL%Gby`!08MZSIA]hK4jI=#ԣZlGE1X#\_HGڜ}L~f(^piG/7 % F{݊dA@e m`K q6GXF 8[*JjL &@ăzT_o뽭9?6'΄8h ŐLeE3c[ONy9j/{a|\ xWфSl ]B.ա=*N/ku2z.i@(.ry]@6{uh+6BxƐx*%jEF<>mr/AH[yϪVoG+c#Ã鮫ϥBfZ|#9p:OC7Nx"x#x]\.{K7}Wyyc,j3 dj :LqɉJDߠ%k?\%9VMZIY6e@.zK kWXcs3'K@H#7RvVȹt(u;R~?&F,i+lߒ.W0^ZeDy|ޜY/D+@[A@g„⣵ZI}l&8;>v5ʃ(5F?_(OݞN_fz ᏚL=竴6or+_'/<?>HMy˦Ts_Ki_&.gU*M0ȭnm%k VVV$.SӬw?"l5M/A1ݴ]5F~81n63oJ}D,{\-oK,c IXN{VnuT8ͦ+']p O}_Qu׬E`|uI9ʣECA@h6B ]'V{EGi$ىۯ[膩g}z4'PQY Eeq5И!hl|(:.*)-ZIOE=43QqDA4u)#ZguΛ7\h3 0hC&!`ѵ.M~y؋]dX30dMBe6~H1@r#Gpk~{ LF iM7#|04v_k 纴#?ip_:pQّdo6֜_{\o-q_8_xZ^>/TRfn|#@[@@gRJ43v8rQ/=5M=hL+\F9>MzaC;6o2m}P#\a9h!ݵ4k?&e]Gq` 0F]a]Re hcu9 jNpZ`L^ p[gg.m֜?W z豛]_L& 9(?n.m|*IrvޕJƫe ;/Ljm"X D78zY4y`Zc?xT!J!4\uvcgR?4 ]35_AOgqu3/շ[!ȴhK'6I0 47`hhcZ?^ `w48 Λj1-=jOxh04Y!<72ʚ!MNp|OqZ+zQ_[U‚3ON B}pAQ1:kJ}IlG|_!GKhh$M,ljo#9 !4+l`2) !MNpjW8&y_<$slg|d".7 aG̙).u:l/}MQ b@ k <\răId۩^.^(M7^JK7)4A" DOs%:&HNM L߯NWLMӏ@\F%k#۽+fe+b8w32W ; Ιe9~ZƏOJ;Sn]=u ذ+rUEp`@564Et=v9~Y.NЪ 8&Q ShB7Hy ~wGSve#n--;5h(s2$W)v{4Cn͆5uj*^˿i0LuHT#^T.BuT"+AAV~o6>֝/MN>B8SǏF9Z4s魅Уl[F:^3X8톃 ms=j-5,&H>Dr ׍.86'?Ոӆvh`R`"2B UNp8UQ֚֬5O'?x`hhL-& !^Oyxzqg[%9ՐKJGO^j9xp<·\1O^F 5v쉜ZNN[`e.h*6j44Z NU I$W7d8HPmaAu[L^I:&RA@hDg>ظبpY֯[pA?[~~HmVQ(lm2=sYB{g/. pygU/.‹.pFU:VYE Q @ l2eO+G6wr5hp8838@u*`e%N0Kо"`4bNgaPST|5 - =(a AaZGTZ:eLeɤ.Y  6uQ4]>`랮0c (qvkw:Fs-[#Tԟ #PVGܖtwdH|.{;Q=b ''(SBU f*I5FRmgL1WS[LǪ*Nq&c3(hb9u64yxpn؁:v;1TjWvV;F#V8~.4#NQxR%@=ͼi,Ղ@s0G"Zަ,CK= KC XG̿lEIb;0ҝyW_ENϾTRdtE^RZ:1Aܟ6@]h4Ά462 5`?°%Tr:]gf}x(E@eCW@c%B @S\ Y#=CKQXZ9jawuD"s`LD( `g_?Lw X-)|s'Rhv!b Tq} Pa1f*esx4A@A@tdGޖv&c?wҥ{Wr؊|"?Jpm|"9BaBëd$0,7M9GA@:pBm}-[0I.-O.pT$T }KS% Nܺbz7iviA:Dr+L4Vd  @ASh$EE啰I/Nfx䴷(yukhִIgPvb:-{92):-5rLe ʑ cpw| BQI(j:jıvPx2%dF{}cE-Zg #0i"LA@@g7lr{ Vm{ߎ{P M%9~?)p:g)zC#Iֲl*Z۝ o:+C)cG<$(0өΣ k^BсM s|(!?ߡ8KA@A#!ቲ[дR_`Y77%L1,lPx`ߣze.mNv8RthKq*ۭi|8UҁcZ= a wQUY?LzBUHWt{Y.k!.X(Ip4 EXe] `QRDZ yaR&L¹ޛ=k{B@42fz&|TR[DxpC`&,3[/+˦Em_B½ղxny{VۻmrZ|J`M(ٰXDg1oMF*9k7Z +pb4S-lq(-[vLV!rB@8 e|Ł"9WDL0 p ؃+̫,[r-;HJ-_X tpꖊY ,nWJ{|LLQv݈ڂ<[P-H:ӬY/XkыV_i*"K0_ FD|fzCmsWtnNz(Ŀ.ZFO ByE: ! L*v`YGs#o6KnѲI?fb9VwqBhkߒDHD~ !  M*ʶuucGـܼK\ CJ-u(u&yTwkTbQnDj[|3(A!%q2R2yz">ŘELe/!vEi;mnhS!uB@4VQsc+n<[;`6K\(ҕs؆'v֖m*:;˗qhLJ~{G_+#[Wl-% !ph֊2L[%9vS+txϷH'sߕ- m+|Q.*r;#q9P1}x5WKa>"4|:Eyuq,cG(N Uw>, ww {+r.xa"#hߜI'LNB@]Ч3:uu91.7'0xXz-V* XGc^QƳ6'MIRv8J?"#1%{gjZ`^TWhڮ]Tr_B@@U鈈JOӵ^O'v/ {F"MOHgFWv;Y:174U=b?|#~5C b\ʆqj ,  ~0&6'@m+10ku1T(p(WR `HVY7f,36d\;`>kG0l}OqbJ'X]$'jJ9B@@+;p D>Y'`hRE`=OTp;a4a mE?q`1 S7x?X*ԑ wP|Xz=V{}E3VnO~e9*u?bZT޿[æQ6f:XC׼VV#l 7%9HHC2nxn*>M@-MSѡTlb tbÄQT# 峱{Xuw9r]P]QW۶=Až6#}UT#R3 ҙkRBA4UE}"@ ٟ=ke=J6YS?Z=׺| PDtԢ +>Hǝ3尸Ա?N{~IC.,+vP.gҦgQnCiCB,Q%tܳZjŷB׽E3'[4򢜎H'Fa"'|PaEF~mwE%a\w@wW1%d+9퍏?N =Jܒ?Ai S׌H1! tMT(8"G.K_9tM_^/X]5iكXy.Z$/ںE2{* IBH'$0o玿R+/2R ڷCl%Zy\˄ޢ}ꣻiI6M*V0 (iyig(:s}$vUV!&;|%,;.F[[Emخl t`=/vZ@kbnmiRtl- tޢ1߻I)iOtr}= o͇hQ!|r\G3pAKmˢ=e1OU\ Ϧ?$!w#TfUv G7#(Ux! ((QK*+Hq_B{{( ;lBc"pU.Էe=˯ROLnJ9J<̦wfрV-[B:}%_޽3 K]TlkNĿIC;B]/v߂l@(^}'[! N@# J@$А4πL9-x.FqqN4uKEo%94nHZ iIh ڴ"nmr`2s:Mq%IHJW~%uyyxNKaԺ=Y6c6G8G{K8 m-;aC(rVwYhno&$z 6Ê},RHz0V&FgޏAyEeH#jŰp K7ݹu-KfroJ qTHi@n i ^+]l}D*M/oBb~U9ݰ.TҜտA`p]"Eǃt:dꭌLOeKB@"l-6@%Q8y(fE(ᙾq+ާރ{R-䌈6]Ϣ/*|jm#bѱgMm ~q6ye&b.Mڿ:hC-`aoݏ6$6ԟ7;̑y&pwo# GDG+y{>u -B|3n:sZpC}ɟK Av?=@]qw ۘNagRk K/+1z/|׎U* FViZb~unV5U%BH!Ьea*^Bm"ibڭn_B~"bRu_Nq=E5XdݶҲ(mn~N _NRޗ"#1B;Nwl t w`Xztlw"_T$v+FRjƴb_fڍA} ;+}ե)iK$LI/h̰ي̫I,^-jwi(*rua /_ }m^a︳CC.zܲ1:{u|h7/&Ҁs(2ߺ4Gi{AAX*e~q@~[h :??vW(Rk<`e`8aC6 ȧ8(:iJEQ')S!JVJ"""碮Q(J?+d, 6}髗D=sk㻰*'=NhsRMĭƮl_p7gYi/͇lYf`%KҪOE~a07А^l37Lv@=~|0S̿{=lgʥ0G3/,ړI+i(tꆱha/X B@@QV1W +ȑ e9k%8ieutjBqi߶-痷s[PfU,<^{˧;`ix o~ݽ_4uwDJ4gM&>WV{+wv[ўӉb02g-A4&|#3_5|;[kcZu:y\}GhGvƬb/:ū'o9{}B@4[FQ1g巒ۮEEFR}_h*`DE7ЉoS)DS3RaYYid~?+~ cCy՟˲+i*b_([YI%|٢\ETBY%j"0}LMPh>&L2pvvmlZ%P߄xzwkV w8 e;ࣄjBvzcEB@ԇ@QSy0I0X, j> SV/jKC"ߣέ FIJfm'-`I>C, \zQQ|/@8 $N?0XP,OCjM9 Q=oK#VKiՌ-[e:W(ʯ]x6]Waim4ma]786Qn$M@Q YEPc^TQV)\̬{1iդҲ;suŴq6ϲU#'-x^$f %zwiKXry2W30 XLWqahS5b6G96aYnyjA7AykvkԓQXz̓il"~O˰4/n]h򏿐G֜N]^^n-*s3'Cg^yC{g:Gסg\ZFkYrBWt}& ߇XB@4(f(;TGeޔE<'E%YNj(3t)hK!d7H*(5:Zvxo^y]5A%PV.8'se]`ũ:r@v6 )Odimnn‚\YCҦ|z!4ӥkO 9&$$sN?ZB ȫ [y@t9YVٝk@ W6ma)ܼҿ;NǷnIcuboӊ=DOpx ۲ Bco!&.lh%4! @h6I+nEo?[dlPD5ɃJO| y(k?OMQ {~T+ZE;DZ嘕-ZXvTfMq.L0sSH&!P I)iwM mmBDIٍq,}INtsc(lp~u-hV?or<8脶-ihA;(,!M[(la7An`\JWsa=Tzp*Zt`V(Wj||[zu ɧݓ7+';B@#@Qal1slЬDBiF[%ղ@$þ1Pn8CuJa=OmlM z6lIf9eeVj"@Dh%}hǴNpgCh˞q(Rf'.Za6{vђ]94,ez)9t}-h.%kIYurxhWQ1ȶpJ G7{ zi >3b[&B@l}^gGF^ہ?V8RXl8@Q.샢i,Ϭ(c†0Pz5ͅ-NBX1f䘘h돕d>+O'krGG*B NڮP76*"ke'هÇws->4bJq>Y]g?ا'eeq=u&٫JgXaMζY^N+ obXQl?v/;f.Ϲk硾 E1~u_6{C ,yHY)nm%)ig\;;j+YPyWdkW~^brs֗1x6#^W2d 2[y+YI<91v>'o!jΨPh Lr-sM>g ag­BF&]Psvq> A3`;?-Ŗ7e;яP9\Cu2}uEX yO$eU)Ch'L9c~! sou:X#/ ! pk9Gx2 s)1ol%вYf1zr̖d/8Kɺu,`UfÛ-Ve#?(X9vYV.$?EU-HKyyK A`DJ0tk+5D5 =ײض(ۿٲ.au lfE9ԁ{h>Kʬ/'rDB4KEa*h-醋Ow,qV~mY @\:ifm.o Ka@WeEfdv,*'Jr3XhT$8CD}mrU}/2Zz{ȅi4Z`! @4[Ey bUQ ˠ>]SRgӰl+/[y'98na tg }(&9c\u ' b?\4E wB4^RiNZH d=K.܇'-cC8l! @S!pT)MC#0=yG`ǿA%G8#+VJP.> fBAy+tk`*p؆N0¡_؇?:tϙۇ \jN.}8|7mHLNT,K1blC!&))LOQSڣA4V1a΢>ퟱ0iD1 uE;#30g߁ ~l~'Ӆz{NG]龜O*+.2r5y_|ܻcuicm *b8OXv83b:kJɝS 15~ߖW[yDF%v-:|J"x*MQGQ^gta*F=0=ub2*aU jY*'d|0#*Ɂ&I #9㟊i^%J;*#SET-nLRĵ_㚹4ߤy;)}gLCEz#4;w/r#ZB`fĐ4h,,dZQfM>ڝqsYy7kVtxif4ԕ+1sAFTVZ -e?K4c6X Zt`}ᐾ>\ӻBw7v/FYxfAz굼p>P s(,}ᘡtΙN[U~-w1>P`+I]P ӓEp= ?c--b?73}=^˛v]aKSt,~ۤ3&y}2MEk3G^CL\XRuغ}Hvޖ>u{AΎEo[iPb܇lD\FN0rJ%aN2 C}q_^x\1ɥ&$;&!ۦF@m ]xq$\ImL|K ,:ˑBX }EvJr@( o9q{1g4(j3o-%٤PpF ^uG'w5O(e9h"8&2uw:Sf](Cy#1!^mxϟNB 4I Y}zm >AX?o2%40CNrũ ȩO_P&6ypl~3(TѱScZLo5@mZ;\va)g2Ü#hy``rg@Y^eh1q?JYqҐ8% ( HU%٦rh;5Sm#\*{t ԴCR\HoN57|) >=8G5;gN$n$Rq*F3\]UrLQv~BI~ۺ.|EkZ7p =~c{rҐCnz zBn4䥅Ve@5B(./:x[R?$CI?;PPStd})0uX7S(4TՊrzs`;z(*+׻R{LڌN.WS9gwUڼKyy\ =@a`²7FNJQ+ YPCJow%)9=KPVo||Z=پE'c˯|+Y\$1 pa nǃPYi2ƇJG 蛪nx^Ѳާm%t|K7ѽz1r`%tXNċ śִ-}pUL`][!&p܂$6Y"j(CQʶ 5(ߖ̲(c;y an {1qӼJ[B=r>8yu@D}9Cϡʛ5+(RCdp6TMN|pCpw=@usTMGnER7q̈| jG 62ooIXHTp/rY<2;]\ZQq~ӱ#wU C&w@ad-0"v}-[A0 \C ۜ D.cjCySKyXo֠hh{WT.ALJ#_uXk^]-K/ WWѪ2=UhG1=#M> s!-.6.1cE(.\'3v~+J$,ŸweijBU`r2[(d{B()}̩+'YrSHM[T=/\I>vEFWAh3Ny2y(W\|85SNq֩Ji,(6zb?H g;}B|gW:'û&=7P/BO_JӘM! OwKeX(_YYGZܴ [!i \NJYa(X\$Wʾ=h8W=~|1ܚx MEŴ ?^Ky;3}j}OzoishNu'6rȬ@)x8n8O #9*=&8Q]y0/|8->: /j/D7|3س(of̀wMg1lixN:ukf zz$QҾNϞD&/o;/eO3(sz{j;6 JX?-џkT|A}f;L|vVu`p5 $NOtlhwqꢳ9MekāC^^Vm}j+Kkʀ7^WC\s_z/X|1UP1YZzazU}Vzp͇? ~a՘p.?ޒԫ20D~Bhe:9v/LNj70Mju-Vх<3-%$\Ԍ3#Ib(薝x-! !{ՊOoQĶrزZx}ê}^wE}}wc7Ԍ`e.[QSA}dzwЮ"ўƊ{!&Eq<rRI0=jQ߲O?xKbrӅj_P95h (kH݂'N\gTʌ~yO1a0Cx=t_DL 6 ~ G?K*UP` +Z^TO4}!++H+r®(SVVƕ)oPmֲk2$P,J&DQƍPԷJ qa{Tr,t W Lڦa0#aN_N%<Yrk_ڶ2ZDc/s}x e!-z'I6L>"YhE9 ͊ziyt%QD>;/<i+ b+Be*pb N p$s=03#Ȫr,PAaV2{ϴ`q6/y̵݋٩Vr9앣,E*Vxd XK7EMJ0e LӼ\l(g"[4 (ڙBe:B'&}-QM7'KXK2ɲZSJ1E IJ jCSWV \yW' GxweLW:xS.ŕTkkieuE,dz7,Vd3 \o:+`p8/P9+S;ꟘweNgWr_AS`|N LӜ.GGEskD&LEhzzU1N~G"Wh2W s}>}k΁ܖ-V+tS_3컋{[]+ubB܌iF;kr.w2!  U11}ƛ̛ӎq 󎅥=*6iprKF"HHWD/{ m%QpHtg8[@X03/_OU% VNU%-Cm G[2V6/= iy`Ŝ3XIVתi1Wj:,_V#1:˫"cMmi0.oZ#qJ4L]Sr`npX3QUT\٭is׏/܌Wz6PR(>UY`7q%AgSyE>U2h} fO}Eˆ}szgS /TtFOu EZ!й,5D1/\&9v:aTXtօz]Vc SGaL)|,[v]'UV^U2*K;=땐}yejg<Xhsڶ@}?"5@Y>5{-[!*05lBsSP %t ʨjuV V~tU!V< lfAruPX.{=J渿xNtWCUi^0lؗAY㭗iCit*c9p>>Fwv }D32 2 G;ufJ_C8e`@ \:;k ?d%%XylqdՆU(3 Y[ \m/Yl\=xaڤNOa^ #/ lY2c0 U1(Wݵ eI0nLr\Pgwߟ3ʝ~FQӦ _qdoDs:$z`ptނdxUؘT=c絷5Ճۣkt >Ngoxn.ͽv|)lHV@YF$XޱcQLuxc(\gby_h5FC(v۪j[$s<\(WȔ_Gm.y)xUdQ K^]|>b*X7U Q4z! ھ|P86\ޫ~&ix?XytoXI|֢>& d7 ]֝}ZvVb+| ,A]npz G.wnbJ2WݵCt4,M{ O>Q< >ds8\Ψ $UwTQS=tĚq JSR0fGWw嵲ecan@zX"l@(4̋ nv9unt`5:ːBϦ3,(k$(o'm6x:mZVv17/cYykCS9\ `2IW<ix<ފ~v|:ybtL<+b(⮼˘: /5 ߖoɪ @IDATtEE39&9F‘M`r h\uZĖIF^˼~"98X;FJ~;o}־mȰӲ2:Y!Tw{&PDMpp":<'sc%w9*%hpE:fKXwK=Yb~I᥼ Ό#B#pŽ -]]N= Ia-P(A-?`^{AIo@y9ǡ[0(yґq?ۗ%0&%oUaDT'O5BY S+u XV,ЎLw?g@#ȲD$':{dSɸ>M3~DXKre1͍uX2Y?>}ӣ?O* FF<]=(zd\|y{:>n ,C@( 4l7`#>Y{K`7L%i5R%!(սޒíֻ9@ fA;/`LAr/Ѱ&!>Գ^N_ 5yD+Z2%y/匜~-^WQ_qز. ik: #xa`J($$Dϖݼ# X}P]lFW̓DɉENt~ڄ9y_19ѳtUkV;TR)~u~.g䅦n:ҝ1Pju!#0}]gX`%}1}`Dviv P@ofE d.zXd*zZlvƭ.ޕoyɮkzNO:)Μ(t$ Cʺጰ,'L/urĨ?q؂?EA E?+i𱼽#L|Q'P"լ] _T5+?YG:Q[l w~@f?߲eVIf >mb"ǯWu-Uù)="k)fyԌa[c6XyxpTU3 tg`zg04i*I5p!@Q5<М' BBQ-ʕ`۲ΕnI~-#~%2J'#pJ x6%[qɽ>NTlق1,ndayJq9,scL~+"pQ1*, NŹ{~ڤMGԾ9H9V1BJ֪g}(8҈lmmJy&~_KwCo7v=zɶz4\Lk[kϞԼL/Bloq_OY;O==X8~AFғRo6Tq(-p'xL.j𨆋1MSؚU}XA>,d_9v3{o8,a#:]݅Ȳ /8^6qu0"PDrG:fix'<'_|h"Өi'7v=מ9^7tSu=U$WQ1(*\w^x FMʣy}yQyo*Ʃ3q/U]O0-39Jr.?|{vKg|nAO@Ǻ|ZAoAӬ1uck/0:Df|mLJ Wm]s^v@~y P?[|w@Gkkϵ23R^mb[uB*Nׯ֪F@!PVW"|4~7s L,mUcPz+$=9묠_ˑA;?=ya/!¨*PXpwhJVJO#I"gԖ%-#R.MzNOA8N?J(ug_20 aHJw+K)'@^e%xGRw݋zO42='X٪D><rGI7FՇ_\6gem ~tWW3f͆q]l}qɓ泾 L(FoV+`}_i}t}=ѽhYϹct_I#xqwIw[({3&%L 1(f\i쐘> <ƌHI]wug :"wy<|:`4.e3ɰO?ӎvgi}"ꛃ6E'K7XUä.i,cѢEO,\\QԹ|K|y32X(ZRuvqfE+YwfhO\ka&)c.4&y}e]UNޞ66;\?,LٻVؙ/'>|p:4iJ{,ZU?&as:纓QS 9ա|8:ҝ1HoJpȄ ?FЦBXg}"=\ q;{]RJP:/T ɣSFb`Xd@l]0M*z ^PQ6{uw}:䑣Nm=7঳4|>y-fCZİ+o̘FA(~-|-.j.%v*4ަeIJc3/@Rb<ǒ#>pa)'TVR_clU73uWXݒt/Z/N?]@x4C>8WXz!*8s4d:?}4xax4{ރcLh ! >BRfXfcM8pc0Ri ^׼mh1p~k1^Zp!3oz/ކsC§][,YBZ^msՃ'1DO]EKs $ MIH&$lPC5Z: z ɡx Cj{& + ,m5:H:Z.[e*WMuwn}_\lgh/NY*a|ޤ\z]βit~P`Ϡ\2IR?Kv\V^2jȫ7Ds[ m*㋾[w.Z=MjߒڔH~=.eKІ-qږ;@=;GOB22%Wg ڟJH%ڤŮpNt[WHFlBrfh/`py[yۂ$waXO!2gN@ݶ@' @9P#" sX@eUMQ.^hs't7~vrܑ}Il1Y=ƻlFim'!Y/h_jƿQL:,؝캊*/ПAh% By 0]XToVUQw`Fv_}e?ώu# uRlW7Zꗿ r/{/ tlsr[)FCR3q?gcy'pȌ.Td&^jьn_V$R>9f~ZTÈՏ0v*PzU3ʬԵ՘ԑ^ScOxCJ-}F0uOBs- 0ׇ<-!F"+0ή* ˇ>0W=~l-Dv#8Bo!@ۜeŦ&~|]as2aL0*3m~T{i7,ZF-VDnDK,9F:'-BiނGlY'O d*kh!PZmdNQ#㔪Gr֝(^#-'U*r %[݋u׶յqUuP:Ӟ-.s$Z=7|$hJP^}"?rރPL)UK[n`:X'a&RUЖIW8uL{b fu\Ch[ShodRMd-]eN|"ןp2u,ec౹7f{+ñW[b;Bx! 'LTvn=J$?YAh]GD룞˪Pڿm ?"gepOצ[7n<{>*On*^y"ueQP`O"e~ob4 ZB 8z|a};, "[@GDVkȯwե>`&6m>p^w&(KlNJX9pHX8R,Ix+:о4㕤)#ADzEzŒZ'(`Ӕi{#RMsfя1E- fIW7!+:6ͨvAòڡw< hT6 U%j'-{#]sʡV"4r_ =!ޞzTНʑ-bj|YN)Hǩ{{n cmNi?t9~T;-F AmevylwnAx t(ԼX@S5]&ЩCg.Z{Bf$6!e%3 ̵㷻:}չ ƉvIolLeXڰXµe&tsۤ"|)|bFxZp,F*m[/i>b??ępOҐXnvmDCp n=u#4I+,瘿594c 5ȣDUh+dXfegn\9rHmRTUO?-7c0M?4͍{m;XNG%U!ZXr"L:(Q}-QʱC ,BW.w(A*~jp EQwe.TX#Gq!,D%ju?ZC7o oB05D*:CyKAq⸰|7aU}L_ݞt)$;BS#-h4 h$yCXQQ=_ =tJz-II^ˣ5}1bY;$D%`+ >ih ͙MᖁnO8YY9]| wbIuği):tv_8 yT^Dq!$0QSp5Xٸ|ׁ˰4ש TBv7KQWʮ>47x-BZjIARAzmӂfQ05G'n5~(^̪ش@OmCBv+g) P`IFKGQt#UeL_ 4~F:R +_Lq/Ł; ?Uoɮw 0Ii|է0Kz9|!3̡} ^uN%T?~K`FIjD%"N)]S `T6 :Q3Ëh5 Χ#];m˺ ՙ@:xEf&rmOu(}:UHc{C;/u B ^ոZAț1'#}HJ޴L yZwwA-_Ưq->kІOvOEr rp4\Z/0yp+"ч 0uyf[б9*N䮫S_zhc6kjw)s2Ug:1I@qvh7ga#O,QC^i͔S~QVѨ0Z\Ԟ>*6]>_lș :NߺdT듆 tK UAк|gy:87kןt \U >"EOYTJ^-p\m?idaj_\}+#a:OeNH֛o*El^.؄> AArKi) u:#(oTP^CERp14Ig[2WH1E4Ŝ|Ǭ―Jq0;|}fs|¢vB{m'4@q@׽ i@θT#%Du #m]O|+fB̉B%X!hf a8nFB^ݷx»gKs/LTUM}N0nX~Sf"zTڟB(;eҴܖ>jP`jes~34TRѯBZ>*cȾrfA0\ w7!fA;d[$%&fُ` x|K?N>D%h" WCQiB{ 6ON*r=+=D u0QL>jʙأнe!'ข{0M7$”S}1.46h,Fɞ)'ɫ+5U>8ꠣ*`}lDVLs1*4r; ݁B{WAplfatڳvsɵ5ף^4m j*`P1[-; Q֞' I_/C>o@nƔ4 bIi6?܂F_uɾVa2婥X)ڧ=wZͤIGFvFH\:%K-P߮_K=H:/0~/2p8@gtWɞ j q:\hCędBuziĠ,)Z̟,Ke4DB }UO#_7Jɿᨅ@r]{ztDcO}U533edN۹k< G<cv{l骍+ŧ!Ua?MkN י*sG l/0l$ ꣤gt=K`vB*h_ɳyY^4|eRt%uh8"c]mI9 h.V'7uB hoxq@xzY|3#v O&;: 3>]l&sG9i9gJYP,Pڧ+nD2W\"匄́2@Y D{Nhz"d1&AU^_K5]$"\ t#2nK5p8 tl^u;9':0nj&"}&`h4A }pY{rFfٻRҙtc.V8}]1mZ t#Y! 7c6呆-Ф~?>lN`PwOxغf>_rF|)s(w`5}^VKajCPjMʾOqLshֵ޹r|YoX4(+?/P<};jU6J{UW}XuD c~s|JȾ˻}ϙy!ʰ)'d9z^}tMUK۶@??:,g78} >3[030f+o4|m/E)VϿ_ >(f~bďM@n4JdqC}Awedh [S^buu:ݗ|;{HZl3 ǻǪˡ}rBe4Vct;}S>-5Y5F閔jo+/eWrfxs.md>/0~G# whxАF#?^r9?7]LbcAuΩuM^1sZ'eo3h\NiJm_!n~t>vnI=<3iB8wOx)FnC\Z5[V o`fk@ۼ3{'q[%hpr2H% u?30HH@͗\u }fƯ2̨rhO=>#c#s{Qg_ahӵF}AKZ;'+|(Пtb1F)h*fGx-/ݍ-╺IzaR|5k=?|o: |ef_o?p3Oy;m;;_(i~8('J{{X:@k]">΋=-T̉gf?̝(7j(<ߏ%迎E,b'!E]u|:θ'#,Bk:/Z?X>z-@|y%:SBu'shDd_9}騁:2Zl״ =D?;E+-.h}Tz}T:?c kWWl r wZ*u !Vxр-sB4!7X<@|b?yx5cذ I ;wG3NUEDS=POЅ h'7"MQ\TCOv2tC=wؚLb(5fh.9?dZ̜?@K:_Obz[HuFA8q ӗkڟGQX}xy2WP?=Z]TO #hF^;P;/Xwc7S-Sz'<4q a-Z!bˎ=ikc@CMb{b>Q2~zS=wtFwdVyw:>2+/2Vxa(UGu6::_ԗsOr64>!rtN ?0F$8x!1+P]jNr>*R=>:U~?\ks~WZ)_kF{Fmtv1=vѮ)=ɾ\WlYAZA9|, %}MgZî.F߶Y;ߘ@Є HQ4=U'@ϴ6iϦNXzFWjT>.Zxy3]ic%>/#Ҥ?_')1Nꎟ@cS U k;44r>z}Tc'\]L2ZO.^y_ -v ŻKQz.]mKv-Q@ORݝӣf"K[br#g 梨;4VOKa1NFտNUWys_+zJ‹͘D$._'M >*=ӽ6wn^uExhmr[x.8L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L d} g t;R8/fg ;܇`L 0&Gl.WHV¾`L 0&@#X8;L 0&`9zks~}&`L 0&J&r`L 0&Gw!h^}h,⪾x`L 0&%3t8S`L 0&XxEcx`L 0&),(S?' 0&`L܀Y>1ZirgN4cL 0&8-(9oܑNå@)%fdxk܃q-`L 0&8zokgfR\C׎/ `L 0&hT(7jj2" &aL 0&h:Q>P{C'`8ѥvOa@>GuЄǙӕk4M%Bɠ)]MsL^)%LiRwG4.Vl!ԍRɩ9Nrs:n@>)B_sǏM'\!_%{pJh32ECOv,3&`Z&g?9|DZi 阗y g)yra g#=dO7H)YR 0;??+?`L 0&qQyй߹Yt1 $yrβm4O!0sCk+TK%|6ݠOmA|[HKT}.! uv'EpR% 7F>[=~fcB)Kl.ϕBghr7(0g!m[5>IϳdՆAFB>*~Nn(sOK݋FRoXG|92s#7tEk `L 0'p\i눁T侳{^w ^caWh_9iP֗Ъ1豉WAH<'xt;C?-3 )ڵq^gMAR3q?yzFv'i\wUCŰ^ADa)kp"\AIU:۶RȂl}!6ҳcڣ-3V}?ԟ\ Bw̅+2T>y8`( 򚟝ߙ`L 0&ИX\6 nJ^9# eða(Z~QyI׏WM&a5xAe ;d3B*ݻwpv6zBH5㴙1[(؋Ʊt{ZhvehP궴iCi!77=FfegBr u+8]b 4zYg&`L 4&z  rHqyRS79݄Rp楡q\zIºbhvܐ $僞+}ޥLyPj,3]h_F H_3s>D //t wbUS=y!N *jHUzL#<;G^kL 0&hap7HwWE8/R3Z[oOC@m,?@PE߮Z lSc7{naυ{;UX9DrCfsǍۋ[3XZ?IJ#+$J,?x4aƒ IDAT-Dng_Bؽ{{Ŋގ.Zt2m/g)F%'gL 0&@xIM|Lv]eK^?LHvcZtT`9,X ypz*ʂ;(Z+ZΚʲ&BX,/q hsb.ҽʴЂgu2nv1 Z-p੤f).:#MʧG<8zQXE~̶{9d&7zEY.JΘ! ot K5RJDY :ķj 9jSz  4 70&`L hQ&>hPCoVVJw !$yu+=݌5 Z_^jN}⹩jx88͢>訊5RɏVRt)ޅ5ۑ7al^ָ7Evpu.(WZ|ܺ~HNR!*ɲTƯmZblzaI7Mr]O #ޫ%8s>V^0`L 0&@L ms-L:y،f&Ƞ{4H]Mt| 86tfrp)3C{A{W ߙ`L 0&X x$6ȑca'$ȱ+w)Ǿ$w쬱Am{BeL 0&h^zAB65N9&`L 0&`N'8# Produced by OmniGraffle 6.6 2016-08-10 22:15:47 +0000Canvas 1Layer 1BGP Dynamic RoutingExample with floating IP addressesSelf-service network 110.0.1.0/24External networksL3 AgentPeering network192.0.2.0/30BGP AgentExternalNetworksSelf-service network 210.0.2.0/24Provider RouterProviderNetworkRouter 1203.0.113.110.0.1.1Router 2203.0.113.210.0.2.1Router 3203.0.113.310.0.3.1Provider network203.0.113.0/24Self-service network 310.0.3.0/24Prefix Advertisements10.0.1.0/24 next-hop 203.0.113.110.0.2.0/24 next-hop 203.0.113.2203.0.113.101/32 next-hop 203.0.113.1203.0.113.102/32 next-hop 203.0.113.2Self-serviceNetwork 1Self-serviceNetwork 2Self-serviceNetwork 3Instance 1203.0.113.101Instance 2203.0.113.102Instance 3203.0.113.103Address scopePeeringSessionAS 1234Peering SessionAS 4321 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-overview.graffle0000644000175000017500000000731300000000000031023 0ustar00coreycorey00000000000000]YSH~_À}О`붻a(lO;!Bc!UK*0~O-f7Ytr9ߗ'O*ogwn,L^aLMzaßWիy?{[x(ro-`mo]Ѓ^ŅX)Ylm?M&/w!UH+(f{:pv7/_5o6yxnvK{ 787}AkI(;9di4Q=y)4 OⰟ&ÁgIdX"A@I֣* daOӡY _mqݩظc1+MM_hCW8> 8LqIE|KH*Lﵶ̶ ͷDaYFwۥIGb229e[=eʊ}ثV:{KdԽ$g (V^RM&\ts))hxoDa?_HwAʧ&Pбd" XrӬ AbkJ^3ov6$m<"Hf0DeD3%My?yf?9$m'ᙉN;X13!-]״TL _h1E35!+gcͅZ" j$ʧL0.8ZuWi_ظ8$|#iWMKX[tNm$(ҾjǜIkdSJ&-FNͧVXJk$ZpK4n}cWT_̇zhə}FUϹ5UJb0>lUiJ„*P{`ikJ)(SJISeӠ\\&;mSw:hdnU 4;![Lє0*.s;zҸ46A YE*UM]o|M-m(O}XaBI7 -lx 3^utGG'$= ț~}zGΏ1=9 EG1icL H{G7.׊P@+ ك ڃ.D+D8*D =\ "60|]{!O.p7zҶ$;om^d._7t)#\QLjeC*>m6mrlбcCdžbJ}B94P ~䨲t(Z) 5b :+!'*jV {PikC1 DH Rֈ'B"E#n<s:|j̙&aϤG/?"IfHL`D '4ų)%ϋ6`ԈevGj9RQIm[n It'V a)*L| F("L':A4IMH&./9qYM\y&.S9f Uyc~œH"ôTrOOn'O(?ѻ'} !%Â*{]?9~ric~jT=U'O&?['OdS@4ZP9b-=U,hELI\TQdnQ2Ge /d53y5ӅjcfJall1c6lٞѻfF̋;~9fs1sc6r-sv_2|6c6lώٖ067`9D&؏URp5a!J4؇ØF`hÉ57zz sɤԮR r\aL I)B )r0- R>k`jF&3Զͮ˚Ie˥}uګLaA-Wah*Y[^܊; Up cHS)SJ%@XcD DoD݃|0mn 6 0a39Xlt]x(X'p-Z1%Hs.8~l݃MKӤA`gA^~ n b|$A)`'8xRP dm䞅y+L'Z \mkRSΐ ?C&k-D O% O9;.p\9?9.O ENE@1p3 <#*hN;*xT `}1P y}H QE'0`Jou0o[H:>‰ r5CJ6szPD;y?75z$%A6K׼`AULu7 `c S6AM!;q y޹G+أhbRcpuf#`XRs!qm-x)GK9"Σu1Cy60`rknNϑ&A + >%:CKͷfbsY?fKnw+ŏ/Fn(3SNp7;0QG Esp Rn&v _rCU0ڄ{n+U9RsHm> N;jhIpqhQ4q4]q9B{J&aϤs=)g@T)1_qEd4a $}+VڂQx8d`a2n/,x=Z&wD; Q:A׈؋eMV"9*>)Ɉ.fKŢۤŢMۤ1<?0N0:`cEFa ӂw; M7n E٬5~k$EfSH:a|UðgݤLZdES% )oޮVgx[ f0tJ-]boYTh~1|(#i֩NvMX5>tmsvks uEoY{in7L0aX5̽ |TTs'ԥ3a\@&sI@kX_E2c T<Ŗqpe sj*OA} O`ik]yғLa$VZ5a{n" 4o\x:i&f&Z'H,MxN_d97$ϓA 5(2Vw%A^79u.zT$ kݡaPقIZ 53F?їwo n}D5GhoC_>@>ltE[\]]sߤ{,\~00i [Zd5` QʘO\sH΅DI8[þRI\LoԴT*9.7-4Nޥ*&ȾŃ؋E9CpPzx9H.6/0ƨy:dGa>[A|dA(YE5ll4"" q,c14rE?ff'nso>շi =Q䬋|12+yx<7֐jMD++bDhլЬ LKSqP]:uvuys4&ӯ״V]ƯXĘ/?Q%벲Ͼ$YsFŬЧ <P-5"I/K" Af&N././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-overview.png0000644000175000017500000021670500000000000030210 0ustar00coreycorey00000000000000PNG  IHDRsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]`>3(B7)b}@lDWI^ذ" K==2w&YMvMr.Lvέߝ{s.8A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@AW(JXҭ4)^Dj^ALiG\+z`}1lx|t(*rX{Lv#e}8sO8HiV0{EҨaF'ީii)yu-wMW9іG{\k˽/-H9!4(R>#M )'ῚTkuyc}"M&zNU/$2( 1XsǞLBeIMS)[LDNIMܵ,rO47ddjY5,X=`fo/->)T,-/G( /0V=@v Ś9ODD/v>+!@gD!'p=&@rxqH gۥiה45oF,4z' 1eAg )G'ܾG&&N7Ǹfjdm.s:h|&$&ߟ4U^-~?!`HY ^1Ϧ^t:dRaY_˳̇_CL~6aI]4\R]iebOi/Ny)x{,-M:&y҅Nҽ()IM[F2[&V:wuW$cRoASg@:-5ik}/e]-x ~p4!j[18Hˍ<" Ҩ!VZ.<,"MN)~g*Vvժy[2q¢X>Ht:0yYy.10VU<+ޓwjhSSD|btɹEe:W>!^l] e2W$$X+#V-eҴ}\TIQE~Ƀ@d ƤQ!_ Y2  v(6rTGȯLHtݴ 3$M`~?%L XkD#5E27wchXn 1϶LFaۧ)6&4兤\!.ԩk'd\4Aۢ&Q@ŘcpQqO si/̃[[ OPC֗b l<m;ӊ,Q,Lr\]爩x+ :5ujʄ_τU~u)-mv{*O?vtRj& yZT=9}\A;͜>_ ]纠LF&b Щ2h˲5e-2MnF_/!9uۅﴑq!~ [JXJWNMF`EZǧ&f9۞3 Hg/&!sSph߁@H|\rPrho+z95{#?l8h2$r7tW=߲ ^OGC3hur+KFsn9׭&5*qbH:DZ @-`]TX_iҌ(/~E+䔫2Wj0©70xue;y2Oj,;*+5giwxFq ,ƫdʕK/,Xv>d;}Hs&YYG{DShhniOLQ*VLBB4mX+@ {2N xIQ4Ę\jk`]ϫMDZ{#3(qn/LGdg6g4{J q:5fi2}h9d+W"EDA"5L΄G.,01W"߿gM\|J'ձFTqb+fL\F2hD3aq%cC=-4!CHz ]s$Wk,$%GP?mVraxÕĸNm#OvQԄJdp^~L4|TN;g 7(ܥFxhC+uHRTDޕoa '9ocM3ŚXBUS"0!%A%p VNSM?q>yS4'Sx Bf:6'JLĚ_ ΄=oHeh >u?<W~7ʫj`8,g#(i'fI'[ln2l;[l7T!=P Y[zl'-4V4NjRghb--j>*|V/5\E=ѮAnHS !^M{ݍ1d.d#wl!YHLjLKl_NJB{(vؖkYD$2M9בV ~~~]E}]5ȻspӦ ^HXpo'[~G+M+k&NS&UbzqXUQ6dvCifB[LhЂL I>+u)a*KO!YD :4G7̴xW"{p!tZ*nS*$o)r.NeS8,gBFE gO~gBUa+rHܪLtEɚ8i-Q=#/s4&ۻǀ g߄p\mXhf:oß] n;qa}M#`HUۣMth3nb-L V[ZӡCX8|bLg_j@4lg^{ށUj0o-h| ez:&k50Љ!_bhilʍ, |ڃ0,i0db}EYLV9//9!LLbvBxj5eF񝍅qZ 3ߠk{uQ qj T8a4O9c(qun·[S!3Xݪ:r8G8s&$jD8ArĨJdcT e0_>޳;L$aM,c`yhKъUe0~[HَolK1ʲR$i;ɎZ+)0(նv  G}wi~+RvSkX8|Ϛ8oT=,@X5r 1?=r\C0HT\{sLV1f<riT(k I?U9X!bh/3. B` kD`eCUKUxq&MtM yy_Mݫ*y o5Y`+aLY`nGiyJ6@8ɢ.3j( R&XSi]AT3dY$n\{n6HPs5l}ךF?[[H:BË`֧`%s(9q]l1=/!r5?eLJJsȋ8lM :p:,&M2폆F1<10'Yh_SM 8ƚEDPqqX +M= U)8atqMFh|``B*ٯ31i [\~nt'Q*m }DF [lۍdw|M| jY$7P5H 9-`?xo(>;)?ԓ2`σ:y0|'lM,B~ l֡ M`O/|怤J`R+RM% Z&2K6ƻ=U{6ۙM\2Eg%Ș2BK?t=e`t5/ z){IFhx YAh\q>Kod]el,jѤ?&mW ^w%Gqeёhcɖb PTVB܄ ͖.SVP(?$dB.n|ƃHeNʵ@eU7FLX8|6X/{h iZJ~,|5',QL ף]Bo6zy,ȾBoށ?x@P}B5/ga[bw-Tn#'=z뙵^L-/e&Vf ?sUIGIq@!¡ojS!ڝ5j!W}RN8i>s,^ldOnigKաlsY"=uD̠ Q*nz3VuK+$tz&F".ƽf`~z_B^]5JwHBZ䱄I+(jmA 7rVS$ !< k=b7h$~6[r# Bh uB`5lZL g7` ƟjWm1ɭv?h H2AfGvJZ7)                .ˇզR 5Me  uSZMXU:|2[Ooj5eRF PRB !i 2id3*iO|t'n[;ԋkxjfTz\Q,3A h"7`;ES[;wFQ^ڎZ[PNYTUMp'2|im zp4ցIWa+]ujَ<.Ū)ZkRԤ&NXi.Ŷ8iig~h߀dQbMP"uR' i%NI3]?1]#ܹ4u-qurNxBOzć ?ڏl61EVѩ/$QV1&-qXEa">j}-5S/c~tڵ0uE];pϘ H霛l0bf$CUM?o;t Wxkw܉aAtEMpEkЈ!a녣 l 8 nT5m' P7FhG"pw hԶ(p$ڳn2l7?DvlQ{kM\S?'!%<OzxM&efon4m+w$ƴg9muPފitwǷ۴@xG~m3s0bTK'hԾ6&4):]d혢P VNL qGx ݎ0Kyz+c6]Bd,{S^H,&2[Q]ЍN445$1]+ϬGy+jq >܋$&4 9rK~An юR E@uEĶv8`wN|JRGRShc!# m@<CaǿNV8Ͷ:vz>NL xd(.!KT4o'Rʱ/k6[oBO8 ч&~;%y7aS%a'AN FhF"t&k0yIqFѽx + @S|[ƛ4{V ZrFoNx+ҩɼK+                               {xc,9Սk׾z_ei.{gWr+}j$gվEANa鏱pe}׵2kt Vo P*:z}U2EUIUbKxP&9CSt8f}?Eί_r?aTto\+&BjBLw,#wH|뷬Y58BlkI*@M AvzܼidQvrnZ(k2v*#G3+W*#2[wn v@[s}N3 >qgNcy بI΂)Lt6AdcKM=^F%6rmmmJ\"T'4λ|PMlx#wq{r"MxvqՒ4x "yVT 2A[ r hS4+/.!keKJC&[iԄUX+>-Mm z}Y9ru!PE]Ծ,#ٔR2IBո6.v2&6( z5,Gy h,foosܾθkblv$=A@G&>S5Ã51?_# J%^7 ޷0[,q_jA]qr;9 o,(5Ehj65 mypqp۠&2lsr0N̪\8rYfng939okHͽJKޙū& ΧeW`so u;tVYfkEw_9bhߡ4郟f Ӏmu??6x+ 0K ^};y!ގ@w&*)zUȼ_ܺR>2f`?Ӓ;(/Fw.݁.G)}6O9:5`_x.nWn_c"CNUT70žiX.~"~k5bq}7vOέcTf:x4*iPhp -ZfP &Xl[Wp[B%xi41om# œM|=8#~ 0o5׵5ULw_`oH79Q=}ǽ{.j_s.}P~@uȧgxh'(;f`B`nX_OG/A^fyפ!ܴVo jvR)+8,] k¿сs;ԃ.ՁL.\GY9yԥM,q@ av~k>~_hhNzywH/~[I||oGs7Jskԓ>e1r!h'huiW˜ +-4`ɖ>u1?r6r i}zzWۃIz@MhTKG:|i! ݞx1Ѵa$h5^D8J + wZ(w^q,;f6sWnE?jА⏃@s&s^Of@UwEb{5cd:u1A-r84jͫ4KBI ]N!kyk;{K +&4L@3`zakӼ !_GAbpk>w3gA#PF@|1Wn€T([%zAsJm‚l9?n@p: AN[,{+Q}vnw_U[E &RXmp[_\q8WQ \/b!m#OYNP#gPWHC sh}{+`„ҧRa9X\*^N SP$xFotc?ީMYe(W&E V+"f2:#bM^bs5\7 Wl% Xo0ma,o~ˮu6Nd*7rqo֟AxʸP{eg@7 Ui/u:mw(J<91)hAFE1ia@7`E1UIU1UTu.m5QqnjVs/>m)Ȧ-ȁ]SIdڲu?}?g[|@'uJU S'}||jӧf6PQ+HQ/<9C뢑PH`b-ݙ_ |t9x'^6:)e=.̍~f ~ZChp|} YsEԺyjeԚ[h_ks}󺐪HNKoSnvSƨxjl{9?Z"d*2ۤ4T\scnwil=ENfҡc'-U܄of Z(3U9!IG:rܨ{ʀs%0V/j_*ƨp:M5[/^6p@nKH8ؤ:?2ȩ(]@LtӤL{ͩ:9561ɩv\6)J%uUSSVOί c@\}?͘Ndqy_1HǯiB'bï[ͷ>U5|OU ~ 1]:kSd$Ⱞ "ĿS8e4o|5՘訪v-5CxwQS%f٬QҬqҤA8Rhp٤á ;ertFdfN9107 J$MۖD6k&pa}Я d_o0'C0n`1 ςdHҧӆҧR-zi@DmBaP0yM*_-r 0|4ad/UCϕ3zvHs8*Թu,ulՌD؊z㧳h/40w8vK̼NN]FJ\o ^H8%4x@}9+7%^rlT!=iwuZM#3ȀR^Zfۀ'2/T=XK׭{qyK)( v]D?K/Əta};o ҧxҧaA@d$jN-ERέJ`y|ûbjj&7iAeڴ k 6Z}ÎAp<>-5*Vã~IhB4ytŅ-<.Hٺk1 6B):+}7M*};M4#Kp/woB\4vw UGRҦ9R;L_<0OJOssKpxei:{dFb;3̮c\ /soquԧOUEFcМs7k=W.V@dT=aIFjp/s?JN )G~(dD.,mc\-@Vm!D`7ymwO,Piue<W7b!V˒퓚n_}ʷ5ԥOUm is:~7%U~W]ԋ7iLn{E;x~f\Yq_Ȱ`z麡=3RXtoۜ_DQ9Ɠq3ag[\FTFI*$$s@]?kDsj7<K+iɌA|KkpW>~SZyOIyFNtt><\~G>:c/"4`Q !4XYH `=a e6?Wl+oJ7 Be۴ظֆ~:Cemh;/!^d]54?\.1!2zf}&ڰ+rmX 7PagL&4g[\FTFI*ތ4) Gb_> ޙKӲ;_ִqVweѲۧ0vmEAE2ro9g2֛EZޙWpr`c_ֽ8tم?tXӼnԢi1o -۰X;&vH B]['3rtoΌᅴDز1o-c/uiਡ 䗥4&FWƗ9}bS~嶮>UPѡ'aO76Q4G ā(ICE67TO;Y aj/t\;v ˱(m k|_R8g.վ\DhPE{#h_{\[):1&A/$Yj+9CuO8{XSa]y},3zc"4z󺶦Prvl+rMt?&^| bNN|Y]}j^}%>֘A)YgB3#N^m/~I{HNۢuj˞w|~4r5!\ݩl4K?ݡEb!ډz=8MQ8LD*']&3f#ةLڼp~ᴦDL<4aD7]'_b:t,á1F ub^@95+]ܺE)mWRWΤ\A+Ls.ALt05sE<#~7*8+^hx ]JhSU+c`}1^٢abPO-g/spb`.6^1=5v}۸&y \luvuei詿k=i  m@IDAT oɎB>2,&3 hDltԀp ?FG`KUTțdG\ Y#^E~k5JEW%_\⁑ ctT)&ĂOl<1ޞ)ukKsVlӉ=M:T@u9j'}y :?z[A PN|e02KN1gTtq 4=ʍ邞9H sKtbՓ7;n< B~ -n2"㽩I\>U(}GY)wm u.\n+pG_޻ҫ?oڽ*,\fV4'ḓ{O^7m(}{|dV#y-avd1s!`2rԲrʷ’|cNs֗;2aH;z0`֏gCW8~2 =@1 <–ԯsK>^O#qK$3s({G禤$ШLrKBãx90e,~_~N"i彫j1N= +?cN0=Ol@^% mFGՃ{PL*guT rKrL3vs oMpt)[ma8ˎ3BO=m߄n^x's X67OAnhOIYr[_A&pvhaCvJ~;};p i\7й#-$ayPz{u=+1w#t#݊´o9U*('@ cfu\>"4xU5J* ö_Y9PuQ+|+]/H!^wk突 )E7]GfPx#]=E|>uՅO+!ۖ\uN΀Jm.@|$S+`l̦|{/q$h}ת-? U aͣ꨾1xQFڋ]oX&⥇Gq]1+.ЉbΓv(o?Úg]t Cu kzHo0FI;Xp&;Ie}mù' ˎ7^u )hR4&#=ĖOy9A e'] iy%|I*LbO"1|ݧx@dN_l凿a >2upgdya3 N/6k|5ԟY[ɻ.ep>hM`ɰ=aÖ~]*޺?v SgPPpYɌ Gsĉ>Mlj7c6>Ń[rronGWbrM/897vy`pVb6umS oÎf63M=7/\zi5@7-5kc˘SԋYEk8]GFnFaZlg]+ tY/qxe-?8$CAyE!q'88sٔ.B۸h+hs1a]`-a߆;B۰!=vl]ھ_Q|斗<[mn w}cmU`Ovikz9G>cmu[Z\.\:ϊK^~lg;]#l@cgdf'ij |B" OA2^m1IVTNd b]Cy{Z\gxÆ{|!U4aԨaӏN0X{ә~ˮu6N9+W..:$y^vX✒\0ȁL,p)w"ΚQ}ﻠ|Bhx '8xiG~>[URB٪t13vlYȵc|ly.8ϫe;oذ);uyS& 09uݘmP?e9ʺ㰺g͸ dri٨WMk{Z znߴiaˣf0r1$̬G|q?PԅJ[*Ye͂ = `p~PT.a]@0>v-^v0P?Z\=¢ч膋 GZ@BR?vKAu`mev="[da*SV}Kjِ8lѬf(gRqwՏ~*Z 8/\Mo;|"p+Xu>tNoK`.3Nޠcr5 bpOh]ZnUN#oU_|&>TH$֊ZYT4'=ĻD/}1S7GJd;";^Ƴ"ew6 ΖA\ DvPy}ٰGξ~YtvשMۅfbuUV ~ "LD{oڰqMWS Gpp A3hԥ`^ /:n .Ycv4OteFGanÎ tn;Nݠ)ǸC{C?-{hF16DOĉC:uöyf-]YW×Eɫf/^ɢm܃I.Zw~CԍG}|Bqҭb}  0Xy l0(A(>FY;҈g6-@ey_l]er"| x0Wm}0l׻sjP/nBe qPSsx '#ЀG q UݹU,d6è\]M`lRP/Zk5}9 `>_#,Ns]UPji䊖}3JU4k ;ܫ]15] $VC譯Wfv>r<ԣRŏfbH+ߵ F9"nbS9l Y*p%aGس3!mMwwr_po[_ڍTK/}s1|y,P] 8 ǜ;B?lgk.~;^=8O{t)z_vC Y}Oo|W`$C=asZt [?SmUy%վkٛ^Ψ2cohF?Wf6bȜ 0BMCXf'Ll 91u+q"ñ1pD{׮L)edQtw|]fI{Oy'%Sմѣm I)_ڼ:mp]GL/8Y&o· X 0B#/ B/ S)g] Zn)sDpAj'0-#V~bfcn?ju,$^NܒKrV+nlBYP?e|ovmcUXhop4v=S~aH,= 11лV>^ގ-цì] +Rqd<W_9Ulҧ*)bo{Ykұ1W* sxd ^mx!=囹kE>OAdp%}Jc@d؆a~qg[xRYHs`YuUt r |ڔ/a anZRW5Xm_=lK]7#˙0uS}٧,ʣ'RSsN!$ZE5&d/Z{Z N cz \ QtWF’)F!9 $4X#q@]߳$A{+͟Ҹ6U8u.D|^e8NqkWqd<W60j?0ڗa_)K%}jOKL<47{姟N\~(h[uiZ.'v&bAۡ^u#?Q'̮ 4`x#ˌɸeP%1NrPDk<(Be)5.߻d38vq50fmOm$KNTI<*#c^fsmwjK`۸&Ξ՞㋷+GEPpЂ1ShCq0L+ζXE\X'GWP̀OE3ӄyVrFg-;a"4W!aI{eьЃ73%o\`./Ȣx2fȚ0 `)ﶩ)ɩ4۠Ox3ٶ@ݶ@z\ID8:lfjfu!|ؠky;ӶBfOӜJDX1἞rlL_ ;-`p*)  cLWY 8Qr_\a69]`l]tjJkf7:z6d\_`MTSPxFנ<+׬|]sdOg"1fف-`Isl`Ա cǓ5 >PTf.elR^g&VEѦ+dZy~/hbˢXZޑxx¢iU5gũ6M1P4S]Bš& # 'l|_Ll^)(87SgnAߖ# g-Ϋ.V tOƕUni['Cὼⶪ@vO>彆l^ WJoY>[tX6 s8}Ofv y)E37lx( B6b[e2)T|FcOg|F:q &,)F6/aOdOZnkC;vMk@=5%%qNQ%"֢&4x*vNg 4bhhSfLO„A.uCpW:5NFH_d<W:qOTFU+v/޿>JN $wƣע63¡hS`)94jQNSpZ5YSOϵn:v2kO*h(ª&`#cYNm3. KCXU3 禜G%v8mq*S)',>`ԣ 0Wr@Hv E p4mC `%Fm8(P=MNeҔSǖMlMuvYȬn}Y ,usU[]#nBub]?)z}}|b "P -`0?yO k ER-s'\nx`Z {NaBG9I17t!Pn ;(&HVog'" } >u% PnBc/iϷ>_\̽Wv2x`䉓υj*; 6+Aq9x:M2.!0o{ݱ4=9Muv*ܳr$ϵSn2cj:wNBT7ay㉬ +t% O4P!{FQl"$8X'2tAтhqRQL%Ua|` ~2QhX;>)6eFGZFUSe$o@u.u"lz6_j*=t3}GWR wzC Wc+O(P!7ás4<+4cl ˼bl62 cO-TeUd0rne p[0]kѨC_} P?)B9< r|__d:~RCL!*aahBxƘ/dLp>c .aNM뾟׿"6B 5DŽjBlQ]n~ Sj r, g>MtuL|ι?|h^n=[18,A`HplaDܬ̓{oٺ}}H=\0adbۄۆg6uݱӜi8p*DK fzw2#5 c18xg"7Qpv FSSA' nCN nDSa!̶„ns8p9c/s-`Nl—YF]u-0ߌF*f U4o =Q~+u^|j]"}6[9qpn'jAP:/LL{Mz-fsɼ| 7Z8Tx1ɩS׀YcL47nmt5Eh0Ƅ+gx2 W8O, B 2 f0G9Ld7 DzM.Dam(pT|x-UZzgax_.V[yS^|1 c(;eC\(ULc5I<2k; !=PB tPqA׶uWW]Ktw_w-+kET+JG=tRI#NMnHbRfI{r;S͇< YېS8Q>ur )#59#gLJ֕1& cZ¸Og.6Աn!_Ũq6RIZ^˯Δ3vT4^r AX@QV4x|Wdf-+a_V4X`E&@1|N[1Xش8RLd͟1\~;;ᕷZgPD3FG eCZ7Z[/\dMi5zиHUgP ƞPr`0ٔ,JLJ}-{peWYhh NQqdYh+Ӎ?xr3iR mLM1bp|H{+* i'Q*EZWѰʗvVaLYTlUE"2+qj[YPgRhCBɸ{ry6V^ߓ/ ʆ͑ 健 &3BWԫV3)<`1ERl[pws&(a~m4[L=6wG?~nբeP2bB؃5VM')i bJ.qQf_᪡RE^),镒7|RXpt)8|up_?_nmdV4xjXs1dŃ>dT2X`BV5`\Սql0ά\+o|LZ2BgdU w [24-+H^.T(-BGHh+-5q G Fd$^\6cT n]k=ٵ}+=k֧;ªB{sזLL|鲮ҥ:x:f[5+͋.ߦ7}M 6hU`@xT% d aLYT|UeCU8qN+Lտ Yrw-s'kwUO˛jPCRA@U4UXP7u_#]CvpJ.)-G,s>|"N)xzɘCGq3lThv]x{5t]E,TH/6k-X:y>3gV-/ ŏCUɰ||Vk#xb~]5@ҲfOWmK䌔K}{TBi>4j]Dbep΀jr;o ƛrB:}@DZ֕u▿M]x1?\:G _Us|m5t &hz= 3`OƵՈtG2v=3_ߠHk[kꙜm;DY{MV:T,y@,Վ[PL`d~)Gqd<WRQ*MEqqz}mhE5<[/&)&yb1}e ivh42@"б` /HC2!x2/xK6`\^;-7UVcsv:M jLb!Ns_w>kfcl ;Ws:52e} hNۖ+&H:\ᘣy!4t%]Ψ"͸23-q 4{ӄOn ҚtSh ɦUlPqמl=mg4_5jv7&w7#)l`ͼ yD@"P6Q]'pևSk1:aeIè eO &(ZzZS']m=Gi))/ޣE~3jg Al/\rS[lmB)X,` Cpfp@rʹL?B;&Z+4kb uՔL"O7g0/$¸Gi.e Fa;HD@".ulItAUeNNv"hdTg4,Dpݟ61pchO9%XW_2+gXMFrM/*wP^FF`)"Վ7J)Rz6bfs蝵ۨL(ڊ|,Eڰ(uF9y1]l%&"XPjfRlo~Ik#-~#kt@Rh["P-%2"Jg2؍,gspԉ [CyKXWꍨRNr9d_pBRVO]0mp"<5[PXh̠Ptuǃ2 Sěٮq̵U}4T4^ a(ԅF#un=d\#ގVT -5W@@dc9+60 KnÏX|EӼ>hġ=TāhuKRbeWGKN9wΪce-P}J ʆMSM0,?ê%u᜜4aa֗Ө%V/X|@YV K6*J Z\ʵ$o,lwVv7`k)}_7/#D~(YW|leײr=9;;&f ܿG˘N9Hy?DcfN!ć8ꙭuwIEúx$]wm rjbH7͹.([AN~e[]9A6:|xe jfHi{xl`{\$r[9>#-+S5ѝMՌQ"6?PNN׎h\|t# P(nbs \_tVإR7Ts4b;>߸#(YTMoK^_#ooU_ǣ+/u#_ ̌n+(%(We-mՎIEjPʎ$ldZHqbӾf[[1[ Oks' Fz)r8`nvHk/i,fFKzhtVBc!{5߄_Y ra4U= dK}p7Wm*^' LKv .7EXN:9oe*(:l[dTp P c?:+tqhfxZel=VWSJ}{!, */+%//'ϫ9gƑ3ionv#5pä[t ǘ˨vXB2ct8ĹOnHe}'a@aٸFp;i( @KPfoD ͛:TOEW;)g͙-F L^+ 8xmÞ Xu J,OlIi.Ȉ[]uR(rD@"<8Cd<<|19 /0(nG+xNT8,:fŌ\-s8})ր]1 I4~lه3\E-M.u?C i.E@`nEq?o|a!ϖ t|# ZjVοi-:rkLV~Π&1,;[ 3JEV~%] *avSO`<}zA@( ,C}r&9^d#\PjA$U#6.+"`b$}(X6yx_ҭMCo"3cMgg>,[`z8kCCW͝!qr@}Z+l1q(IƃP+2<ֆ$JDfE h8L5N[];I3N4|Vq|nb=y?;ng|HÝ&@j0LJ{ٯڗT4T$D@",fʫ?.HE}~}!\Jݒh9NɊ*Ѱ|RRh v?|>BsLAޤ▽@ng8E֚)g9s(p65- nFtQŷAF`+HP48<ȯ+>ۑD\(é6%ql(U? rTx-kot5 >X 8ۖEޠXiI}\UlѻS" 0FM%& 4$ܦj>6^oߍ1,mՕ z182j5vpW|rwEKBAݐZA- .6c :~>Ud x{u=pkg~30q+3' !JÊ{o* [erO"RJSh_Z7Ow݈WV~8 K'6etFo&A݋T4Zyds@G@@ysǏBua)CM3].Tkvtp'g5; ~vLNpivq \I_d3O\,Fyx[T[* hWBNB"`ԅMF.xR;Baרrju^GƓqe|guqqi:{i|̓=qHvȁ6v^blJP0aK JX?_ur| #shX٭D^`1=#2]HQem?KbxKi::xĶ޲\)f40?bBx>~>] n!Cwv.OoOwA:0Tb0.R ^{!-۔U ]WS׵18X;aN7jaFGAhW٫D.P-J999Q`oSLOJ$]\lN"?Gqu@bކC㸐ߢS}"pLm4~AZg@(l=jpFFN>,pR2%]0n|n-Zh/aG^s Z6 >OZE!c{կX( IA숑G=M8DZ3u]-Ƴ6I>_*KŪ%g-\l6;#\+Ool@>X=2a6 X +"& dI5Cv[.HE @CH^a|'4 h4 .yDs"[OSxw ȩ\@~& +(9="CFSGEѴQ9]-d41LPQ,=oxAéET}\ȬTCf+g%:b*a)@]" F!aP 9.ZRW!t<ܒ@B۱ +[tL-jЛgCYq^_Qȏ58\P IfV: T Ϩ\/T]I h MvBmYxcYKE$ؼEKMEIWd .6.n{"f ͛6(B4toy wq 94MUY;PjjXìEA͠bLș? \4jR -7tOi}Vtt1Uިq֚Jr0ROó$>>\F ) c)9˭?#*A`|K (o9N֒5;$kٟObwURѨD#*GϤкqѝ­t '_ . +{n,{d3+0,>h̬ +4)db&M.|0A77OPz\kVˠ>b7}vkN^xTV`,3a y(=Z2,r^\V82/:^/ zQZq˾'򍓻滷}"r6C-ee}]– )Gq+8F1Y-<;nR<QKz׼\'G# 5ZDB ӐYiO!\Bl|(^n{fWM5M x ]2~xA-v.5șy<@_j4zQ1+o9pvƘhdLѣ]MjʓJes#`Q4̪܂bg[ok̏qd<WRN*MVOA/[mT6pj(븦Ѵ7[MHEِMx%euv>[Ɠqe|g[ʵXi~J۽7MP?r8]?v]7_ T@s,FBUg1ޣŧ~3jg Al`젘~4 VP[AS3gyPHw?:ӓsazP"MlNt.%Φ9 Sf))o?Dn4ah_ I5qMt)c5kih yL" P]'" ..AU뤉FQ m ~["Op{_P©/*2+gXMFrM/*wP^FFt|| oؕHSl.;kࠡ(R qm~k}Pb"(" f??Q(E槛Ĺ-os >OF!Б$բO^"#&X؍,^s9z|(6>XmrA';oCaCO]Zd``P5LV,:-rĹbұՀK 'Vw8) KʼR&Ezjk~NZWy}4T4^ a(ԅF#un$rW}2R/pm\+F gShϑbs KnÏX>ߴ_+*ۋfO*)|G_u\l7V&@㇈'͕슻}ho[yx8|1vc1m^hU٧Dlp:jP,cX}T~U1z睬CQnb\ىiGbVP/X;[){ڨ(1nqMd ["Pyn:y ?r`OJCj(2;͛f)eSӤc1\|t#iaU`)>Oik]*uH>G#i8B뫻2n05b~fYJڿb[7 Sm+nj{vcN^.ABwhiShBفD~}<L iSÞ<Հlwlk;c{ q͝2L,>_l9=`Jknqopr,G {;Ckg+5 qAL'>a%D=?.N~_ VTdJ1Z뇳Q|ޚ"xgw?OZ8ohQ7t켕"m} JYKF1(6GbهbSMK cv)xKRnW_QAeEDdVVS9~\cem;,X!1: abOnHe}'(L>p͝4 B%(3I7"M*~xO'a"s&Tc3gTH#uF䀧^{g7|4MG=8vFcK_ܮAj+RtTEUVέJ-ǁ\LH_^^hҗ߬= my U" Lr|B佯vQ9ۧ zOR UZVY}լ#% Jď3FRx(fSI4h |ljp E:/ UəB9"XnVw$RAns*s8=ޡ͜\zb4[mh YٯD Pp#bprp/(E] aJvin>0mkkJj)}cZa89}i4DJI^a)do}}mli"CiHTn6;,Ib"7ךM2r,".κ> $5GY~{m&XWljc|8x@ޘ@ )JْM9ATh#ȒǥYBj{EZZA$[ۿ鶫M =GCA*ow>Nq`@C4?, J~WZkNW?@GO?k/߃1rb+fK >tL07HRlb>U܊/+ Gk#*t!i,=|p$dHaA' Y8zڰpy|u[xsMf1 2-tFNAC!̕ɬ<"K~Ze6r_"ҿDR&:d8f rtKy:=c^6&nE0 ᗵ'.֘ߺ"ݕ>vߐ-;"E1Y)٥ӽ璦O"7 N6QZaj&}*Mr 0]x cNƙk4<#Y% 'J:|+]KG PX" 3_d#b;\+ZIFCʯhႸhh}X~71GƏ{+Wb~}\Uj8 Wrʲ- :`n=|;EcQܲ.Eo(O\!opddhZn6rj 0ݪ? N6ipg!\J- cI}XXQQ%VJȨME'oť((~|m(q{Z:Wrw`ϰfQ(ny[2A۝3NJ%)lYXt3ҧ{e/, ʅ(6 [\a5DZ'qt On .|#!H?{ K Q-q}PRF+/}AБT29rl\e VYǨRh4vU7EKd6+?x3.0D'Rcɘ2qP7 Ɯ@mSQdܧ6-<VP=ZO kvk07d0F KzlQc-`/) nH;g)F1z?*\R2si@{`Q-'=}X&RpaA hF7V J[樱\V@lBZ^[c7P2Lנuj}# ]Oo2f`/-,h'4~5c '"N(,Ѯ&UM=gCdX&'\-lq 5H@]끘CvCR32T-1&(IPlMz#?tulkڅ}dT/&+ϕ60֌9cEی,Gi+Qj̮81JitoyT5mU3i8{WO1 WYEQ- i\ᕃP K,r܀NS$ q%(X98 N0_o~C-\LJ& EM-%s,ŦN .g/䃊jF[Nӊ~k~jͻa>tߜ/{&R{ Cu=m6BSkŽӃ 4;TtSwmih? ̯,t~"Tl?ɴY:*F?FDVđXK8n˽zqK'Q~z5P~=K8ƈsoly~!4ކˡWKd$#k]X\N^-Oô\:J_// :*J1 ֖X_|nY{xex "S̜0kٗ]+bnˌɽPZ-"5455 uךw9GF(GW?V?ֻkTq&p0RXQ>71JGySkdYԮ#ȦhƕUWߺ\{"GcH%ռzWZEw95[/Zo{l)9h}iW3~<*8}KsdmՅMF.0raT9m]Ƒd\_YżAѢEc s<]®)[AJ#%IWcIY9;͕Cݳtk3nW iHqe|%yƪ #!tr>?Y$μ@C X?M\Ĭ JY&ҳM.z nh&i ,R%~齯I2Aۯ#(h7:j8C4iK+FswF`T*Mtrў[v]] ´`]kE1NQ5sb-hc[ӏl+Bggrqun^_R: ^?cl4pdu82+ :iM<-ݳ8 ok֬n:z~ q '%+7D8ͨ9n}$4G^|:K_>[Pl70yz^9hrN]H_x5,tVF<د;fA۸.hxťy;9{W wLfm;tIŸ(μ#iہSyzwsukn@Utau44(vYpxwچGp^ʿ{8rfJ< WX_O)"|^QU^dD=0w2K|/,X2½?`x2:5,Xkڪ̚˃ɠQ6*f/l P4zҘd25J)7c%x<;;˞ٯӭaJ7%+{c>Hb6$GK;6WA?u{lZM lfBr~p*4[S K`Q),U1]-#b7oQ=sHaר^P’r"oTۻ %{Ys] ;~fO;ݙY-G1O=~_-Aa'a3C ))̆¿hQ6 J_ۦD*(Y|f j\=tTBA(3ѷO"\, X46pw wOw#9k (.kwPFa`1Fϋ>R&9Y` w`Oƕd- fq("_"a6, <YQjO &$VO!1:A8׋>*]Q Nw,2Gշ@Z)dr7cV(vq۴ݗ+O^mhU[=u1]jLi)}Ye?\.{x'>ȲDhF`gDS+:wOz(F(fv) :TO.s 6Ѯ&EaAdR|ʅ'yyacex2tto*ɸUf^ ˤ=Pipܲ(^Cb2+&R`d 2uhT]s1C77O"XQd$ 鋅-^Bk 1jlRHh($\M8RF.j]{ [,XxosSMaCDT[5 %e%CZ:QHE9H2ʻR|)[;О-LZt"N’Jxn:֌pc4*etǛk  7wj5)g9zs3aP}̇FM_n9@Ga!IͼB:W%>ԝWVQs}aшvu/^U#E"` TDfwXg*++GP "R-0i֘  2^Z ǶlX1h%CN2^臭W#@%7/B{ǰiO_tHX=ؕR @9axpfKN^M8iġ4ۓk:x‹%r(pkX JzU\BKa7 d!L8(E" hԏ{'86]Cb9WHTo~꾺SF wuÓƢ Hai1܊ ѯ6?7$>#?K(C* (ZY E1T4Tw5bo}T30*@`'+lOV0,$]Kc)E" .h,oEWS,}':G<୦NrAݼiCDXfM!@ [PZYZwTM,[2Pʚay``l&[4'j0G+d\%U#-R~:5WN}-8hԧd=HV2xQ̡BL,#\%lSF̔h=`c_aeKv!CQk%ǒtTQD  ^8قOKjZ2Qm+xu]H R2Z^Da Y1D@"||d @G@. 2'͠I$D! wK$DFA-H$@CKh$ ip}7oQdmX:[+\ ,kJ^" H:0ҢD?RTll?NkQ ED@" H2]ڢ@odDӾ)t *FE_l:@.!l{Ҽ#EQ};'M4Gb]]+z8 R t^|uzU" H$iѨuH$IB|mx\ ?rA( %a)4PnAxq\TfJ;D:*J? GﺑB| s۷>݌zNB> H$D# -y+?**W@HMGΡH/1&Z|񻎜4v}-jy9XXL(!_D~ aƢy"$D@"nA&1ҶS4| <srP{x7"XeL8**gWg~B5 A" H$.h49h 1nW_;$D@"ЩNu;H$D Wr\*ȲJ(*+ԕ|ME#nV}}l$D+!! @[OR~Q))Xm=]]ÅAդsփMNE]vJ${FC֋#Fi@@q$LO]Dn }%8EwZ+aP!E" H$m@Q4L&mwQfEKMe*m!X?\lrhDzk7mM=4cwOL" tN^`++SA 3FBjÙ V5X {xlS}&fʆn4KyD@" ~Un!d=kT2xYcX9cGVW)D@" h ZEVOKؒ!0n\82D@" vhb˶8!2Kn4DZ2Z`C'*)Yv[P\o_\K, /?KlNiPl|"Jt^]wWlv>C]"?[5eGƓqe| u-C|7OS}sku͎â>ГXM)VDf^P'hڭ[ h4CRQo(6jxi zKf}%K_[xvs }vhXM%"(k\?g0`V k*h6I3D nxZNY>WxRU9IEpueVHl< ~<#d6ߎl4y(IxNΟqp.[Dc]jk^O'/ 2.Rbv3uƑ4J+vskD"Q\D u>sfTW^:jZ09:mATTf|'dJ1*Mͬqٍn)3C[`iyuĮ.բhI}`6Ѷ~qa}hΤ0i͛xOa^X4_?d\_'GGrd5Zy+A'L`&v>aIN^DfdZpD7'-Π/\'-N64=-N')T( ,?VP2b@n~!AQQ4md9vA~HJ ]c-ZcqyS3PR }%YcfYa֚Q-j0'ޕr}gu#Z,؊/]VLfM )41{@af1LCZp}UmdV#פ%,ʚRh>3'(G TlSb!"v cMAً Kv,{ARLc`!Y/7#Qkq7Qqsq!Gd=7k0Z b#b4YJp,j7}6[K{vIEln9D'.\r 4 MBv?BݯG"5jʎ$A2d7qp%CJ`ܲagqu"5X?l<n(Piz<I(Ju,c]F!NbaC8јtcK Rd:<:FkBup_ %2i!"LE2rzM /()hZGZ=R@_mK*P$dMPQ.ޚk>ͰtI Ja_yˁ"'̚g)m7l٘;yXCy/c6sk^ޫ7OE(`M UA{/9(U1*]Zb8v@IDATqbl~^dD$OU JƱ54BVuC #gS4Iɣ/玭¿.Q[gO7֟uU0% {yTgyp *ÌΧKP33(ˆع40P>m o?}&s,y#'Bi ą }< DqϐCcR%_%Bxleψ*HE,g0SGvl8o}P9?^ѽE,EHk?z– [Jxv!Z()}/L ƿkǯru@h>96|AʣvF:'oNA}MT9=+' R~aZ()OQPP@A+JuR&iJ@kX3 ȜT'=?voGi(I. \HJ n%!B3qh_i=-SmO$DFԟak8!OcҞ&/~PDafPEвzޛ5ڛҍ%?2-{HEpi/6Bu"3GjR_Zq[_ݕ}p^AjVIloRwViuʒX{㕉hYJKA獰9*?%w8OJ| HFrWFElwxޛ*ߓq%נ'nۭ *ώON#h|$' |C+3ھçLIm1B`T4G2fgDmSqnt_-|s 'N'N#tܹrL^C.HV] ^r T8_F&\*aťxI\oDtЋ;12f#_bTi#j#LǩNm(>oWgS^HzLzk2u)[QWtiSr~$ hkW뭨23m7=r<5SsE7V ըțZbhTaY߰[ڞ_e [)?UYs6_ kk}p!R8N5E3L\:!R6ڡuFoh.]}wuظKnH*H$hkf_8˿w_c}eٺ/)f/a{Vnڭ" QXڢ7æ-ݹ5+̺>]~ &թwiWGy6j;fSvv ;0H:sA/!ڵllf\zIC.RO(a-|dQ*LhMAuIߌ`u{>4TKseXyCBl#DC*Txk+q$ĕr\խ[\,ՉnOko,2G4IһMTb2\ݳc5guWvc%BsoYQXY2s՛.QBd;^e7r\ݡT `XꊮmKX|$`F<6[ifSIF\ 9T@,"!x7߻_Cq:|zo_ǟ_W$zHsxeB(Oja,d_smptI>\ge8A sp5/w d>aŲu/b.L51h+wTk`Pjڵhcǂa Ha36:\Qgi ]KyԦ]L3[q7g^gnx3vQ (i΂$ŦN -ɇ7Fбn*']Q}g$#D !k:0IWEw @܈q$5&¼[t `-|&7G*=GҤyyoFޥ 9iVfy Y?>N  πTnw@L{]j`u#rfnhvW`=^4qęvޮ_b]Nf~8< ɗ7m@P0_QU%ߝE\ u3DL~cSuF5Ii#R]>JQhYckпYf3=ګ3@cx,^C}뺷JK l 0^ {R£zp)ki,PIy;}[}qo'QU{V +[]YdU%v=]Ynudܔ盅Nyt|yKnԝiR*X1F܁[}-\u( *JƶEqyUw0g#WiV$P-T,E\Z0nE| ]yĭzu+9u}#j;޲:zzR_jhԚqiGVmD~H5 X44_5/,P.uQܮb#حQiJ2H2T,i0QMW@شYF-IC^Sp+?4K+|?m4(NfLvȄVCc!}וH ( a\*-z0-[fJv"|i PDs5|Pbad?!>$U-= jqNJ/Գr4Q_,ݨ)XnZ~Ҧ ;MUJ LUd4m`NDLD4=0\)6dhs@. ƺו?v)(ŠgD1)f/wuA&+UOx01732kK ]&Tf/V^_#}. @VQ7]s='5|zӥW_/K8gRT0D2FRʖ#'ξ [Hū]>\Ut +/w4"2<%uhH6L"Fbg{8dЅ%4M%"DI{$óH}Y(9ar+)qNQw׽q~((7;5.̨q#USGatrI9UCh,[ N8EWGHsff]f$2s_af7͞*Hf'ayOHۺ$}#[Xyy`t™`sSSS*mFq8>Cxd˞6h ıxZD(2*IEϥ.8KPF<-;dz{}ba$ /tq{G\'#vDrĊPs(+3* ZAĺmnƧ=ύŀ<Ī[!jg3~E_]o7So~D xӨ̨HzAa6&#[qj(= ] I#vU[q?h63͒`gٌob`yD.#dSgMF^|i̴,XSUGtHl`0bzʔ5ެ߳$<8gxUa!\z|y%[4%W)^KK:vE=+ Jdz;yTsF[}VО 7!\{gڲ223ZEf o{χ󴌼9SjB;ěĞٜ2{GNfR9ǔh􁁲gߛ2}~ ߩz=}K ]!W驓Uy_ů+j) - J&b͘Nj tH*  Cl^t\.a~Y/CmRko~aQFTwXmxXQ'z/᭨]64m` n8~vn FW$;̷r-!(T4L/p;w@$ND*8 L a>>Ǵ+%}x-b|g"MHZ_e?lsyDˑ!5FY-N x/zZ矫XRdPJ32hvNR'$"޲tȠ/6WȠS*G׷b2JYQ-ƥ{)mVZHѩ>Zƕy6-{?ԙ_k*Pݖ@`zjbS /sps[45?A@Yǘ6fR}#w0b s ;&obÎ*O]+/ՇkJ(F)JH_'ߪܭ\3#5w"HF'|NOKzϓ%C5%>SY %$4 TtL.GI\/uկ=k2;*Ъ7g/a&W}΃&y f 8iw-Q|=VdGNO҃kQ>6A͌)jUkؕ=P[5x WfcbRuCEOja|)*o.s6xt#ָ:y]QXC]HXHM"qYjof,iFgw>Ah\Xv){{rN-O0bK:$=e`o]D9p#(:7V3oa*4U7qAXOJ|C3R5FŭG<ιvLHjF3F75m4A@bX ňeEz!"3;2A>ws-ek(rR:#4U?^idcmmv2e|Q\TdQq#ZttPIKXml{b[}+h%QI'NxV לqI^:4.Ku…Y}#0Ne GPzH6*PPAA 3q$ĕRr$JYK蒏^_Z>X\9omv^_[75 ;z;rrόTp x@ 볶+q0[W6i36&)m;S7GHSDM4lX@ILt*hXkRᶳMx7QvnDl`(¦  Cdvl$B1*13'q%nM7'RzOObEEf쇍u}GX͈ u1&MxX HU4w鐤EN:BT/RI:`*.B5M4SYLyj]J11ꫜ߇toDjT>̟K}=4ŏ/pbX#rաB1sAf ϸHS~&y@H~wJ54T0ŀ\*.$ĕVKm:x0ܡq<ӼXwv6%@˼gXP+ri;>:$C9[ڰЎ5aӎ!{lvM0הtGR`yS~Ae|4驓?+B%n7v vY'\tRJXuL.IY$Ƣy!B*rB{:ս:70lfpjHSAoڄ^'>fxm3-{WOFR*"UCbt $_SS8lPv´:C  OV~Paʈva\~bp42\yTdRӱ0q2z/a{I!0m2 Ѡ$TH3gZPTl`~i E:hݔzg@ IcR[aq5lz`fZiAj΢SU"l'N.npJ @4 U1 MQH .PPB,oIhiT{X=:̹N]r h lvD3,2C-noIJ9235@fZxZ@ RΓ7p Ɉ4Ii(ZTdf{D#%I#~R-8$j-ɰ$!U'K*u=@ " D#g%@Ĩr:^8k)~z)di׼!@\t\{wh<g߯{*"%\_RQTT|N2%wSmR<@) 6&!#5nJ28O`q~<睗Hj5B4jo^!ƆA8WCSRt=--k[z tf= UVXrt ͽNTƢRWqc_!@K#=eu^1LꙜ_c5^@թX!2 F{8ÊT2MDp8%cZ7kgŇnrEGXyЪiGœ 'T:P]"% Cl,P|?W5+N<{J5])/@E@F;ՔI'ti+ -nOY7;)7pgk .3QO&V[8/ӡ#'8o?#'G{Q)\u$? [cj hθCS&/kg1O75bV-d6׺ PmFB|'Rjg$W'9Y' zm!ٖ63D@B4g.>'NxVD!#3.٫S =y0 ?;'ݮשe=#ED$! KrdzБ#'CA@$B4* \vLsLȲGlݰۈs_>4%&w FhYE} C-SXsx1 C* PEhT82i3|-Gl0~F\g',|"  MOM^l3a63o1T S" ]hxOӒޅƓ%S),oA@!A7= zNlj>hgyA@.h|֊4֑Uk%A@(A@F *~Qp֬Y%EA6# D6^}z58jlYDC PQ; 5J:T(iV/ C76)mY^A@jB4jߜuA6=mk ck}WA@څ5_u-=5X$CycLRP볼  hԞ3=1n\qxH]0A%di= 2PA@!A26i YH5hԾ9WIIZ0L^FbfńJ. x !^R <^M4Fl3T\g'ǬHD@FpΫS ~l)a0ݐ" khaHOKzbRIG qɩϔ|7 >C@Ϡ iIj$Z( Dr>h uV"@! D#&SR1C&O@W<6i =E.῾mS9qFqq(rj`YhP=?8*?8U̴d#_{ȜSy ҟӔlCS&//cCCO+.0l\Q*ƙ?2q7ś}xv@U#Tּ2u5 #ig:TݚݪFo0pgh[[h"CUDX d *vbyBNvʵt$(-BMw3':8: `Cԃ<^c:5kʅfCC8qbW:ĕdMiN]orYs{6MU& T*dXe4~o yaWN^8vZW*;5U6u]zo=^Uso>'29fxP @ ial119qISTHlSo}Ult$^|.lî ;"C`{tWKp T5a!K74HIhNd,)s vU B2`W:oDg\!aBIᤳ( Љٺn 6rUkX7݇k>.'h7lnx#H =œ].q\`@LVsaI"7= ~f͚e_eX$`Kt"าZi6i0@ѾmF[tdxYvʑ>ەWeǺ4mho]uTq*6*BEEOw.3sT4*3;O=yV8vw++ccW \/Dp6@l8z6CgM2t*N~Իe~`VT8ѧ6ּa). ?u6W:F)5<М-b{5ǟ}Ҡ @ sS;sf~4~<Am6@bzQÎ ԥ+UXTy*m({v5QT Ziڼ}˚1թ{a'6>6:dlޛmaB]vci}Sv(m)ՋvՋ ! ͹Ĺ/T/knjI?[fK}ư)1V P{PvTvt9 !H2B-Vr @Oub֨~W6ezuaDjN,޺O}f[p0[6vK 1ʝ$F\$];Ƃ.> UBG${ZM4} UXO<6Ή3Yjɚjن]BMr]̔VD xt{,>կ~eT˦~2|=GO/78mLۭlxSrh߲;VRuN=o׹rizx8ʝ \"c~i3ԛح >nkŇ!W\ Q[:8S0|,=5yڥkDuҖm[4oSfhNIwymPov@5bFj: ^|@1ɩP1CG^kc )C|Zu?Ȇ$sk~(. v5*uWtTx+03Kc T  1IiOR3iU]w\E^LZ8pA@Cbzw`7z߅j{&]զ²;U-AB#jvL_s+;0[my~î*whעB53gj=bB>*Yjy`mSO>ͮ94o_)@M)9v*K%SER=:2sU !Z²qgI$׏S?3ҡFa +U+7[.ع4z|?mw` fv@v+S* Wkϯ&M' dE }A0yU8RPvuj=-@"wog i_[6j+%V} ̑9da5D r6'|C[n]^A@& GX!zc& LiUů)Գ {LvBWϯJ3$X\I#J/mkШ^l"D#,{sbk4k(ǾV{1Kqv7JK< :0R@,>Zƴ؀IvQ:aR:͏(5* qGPTP WvY8c҆ MZÄ~8vSGb0PPl4cOuƛP0& E$X^\ٵ^6HJk!v*z峗2sܳ.Cv8~[P UEF%1+'>Zv˞ 5ٷMhycn[ePkĨS3оz?Zl'g9f< )>.in(m?ׅ!i'[ )Sum/];wo;7^o鯦H=0o,Mܼ|\Y5 Q/TX𕪗3|v(H}>{l-i%k8uckiSFC>kY*@9{m@gqoײIc j;4%\Kc,4SfxΈo#p鿜^w|St57tթ%spxVŢ .IFs*w u0`"CU!0yE~7 !g,YR^k_sÏPX;Q=%綢烞G*U_WrS\{}þ7ܴc K; a}qMN2 q/^U_ Zx=v߫'t0\аh7;#%[PU6u@ 5) @"Dw^Ll{4纪kZ`/Yu.8^{Tmw;>55Qj綸(WnmpA!ŷwJLKo[A.!0D3$ F*͸nF&nbuA1wuir=Vrxux༃N^Ӎ1XWJ{c0d0$Knm!jtGJ?4)M@@ "FJOazʔlZ&rDh \R|= sW헏;xo(=5]9g4 `ه_W!΋_X'~t{Xbc"]7`Kי1<3.XŌY:ۦbCe6Jf%Iu(c9mtb1?\t7wG0:`nfҷkUƉ3 j0So,9ʚBI b֪I}ۮCǻ7 T"cSx^G]wEG;Q(I1v5bjӼvdR,%(>ܭtw05Df W}?,.W6{Au{> ˾@VmQ `ѓfr3N}3 P ίrըd ) ӝ)tCѡU3/@naY ȉsJ-Y\D(H{c*@0H=9KK)-_|gjmL ҅] Җ(4.߮ݦ1ۘ)n˪ܔkܣC od=s aʨڴQu1|Wdml!I`! "}1[ WmE~hUVW^_QvV>GdףipRX+ԅ.;+Eo PDcXR`K_,A&u$)ݻi]f)؄dZC69hSx$b9:, uo#ўA8 TWHK}+8FD4+m8ٴKU2BX#kEϬv#*`>\ɢ]VFՌ*D8|8G0/ yz15e|e7zWFj@ձ[vV"Y:H l(z%ةL88;Q!q lqŏ`.muηkpۀ )]"Umbciww#A<3t$~]]t0S)~dy/NI|6&ĄEe9|CLN!aޱ)̾:~%?RM`}*ʨZr0OA`8zoyX:xLu􊥼쬛s lp9k}Wf}Li­ko/u  PDTL{{Հ5݌<+.^]e @EYi/}.s+]w(fq)Vo-Yh#/R;?7 dCS}ڼ[5:amNs~1fzӳO>].^[H9VK:눉h'c2rؼ慸usdnæj ؃<2zZ[_ӖcP 9#'U#o{+O<3%5o!  PDCSU[u6Z7khzX gֽЕ3#}.X0'H4q,\p, n}s ԟl~y{ba\l}I=* Ҍ`ز[tߜx 1_Nw<_-xfTYH'J64m` rk/?'̿$`$Œ9y2d|5/(?&w@S@[,TI0.QͲj78w\S)= ɐѵhTcI.GNfr QЁ/wqA`}b Umpݖ3EnͰrngn~ |;eeuSǣlP}:@NLF>m?~mQ8a!aϿxpuA@ C4bwͦh; $bٱ5b Ll1JNo'11+>iiS&Gںئ^Ga!lZj18̈Bax/*(0Xiٯ9ιX;.1bPįtnmc60e㧳᪼Omٓa\ӻַ[}Qѣ"l37 g9kqYܬ Ӛr>lvn#T#@]D 56wb7s,E(gC.Kld6LQ^ray[x5=b;f; Qo`scrq[+ȅzgrc͖ a1"'F mD1\'BFB~PVl!^5eY8rέN+i$,Ut dޢIծE#[knfV)r*{Td#'΀i)Nm's^'5U6O 2@ Nknz}b~;R]~A@)j5hTONYWϡ $rՏ'D"`?0aAiJAӌFzfN9:GSAk/B̟6aB)픍S6(E1׀D-Ea;{|l4dBM)J<漳FhWYr3 r0̬| τYz, ߪPx^ !8{n\{l{CW 6-ie3zX`; L$$*LA툞#6m^"tAq=Uy{knx@ARy1Ts C,x@0*s-D_&&yO  P׌43\v^ZMK5?qٝƹpyA@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@A@ @∮IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/bgp-dynamic-routing-overview.svg0000644000175000017500000005605500000000000030223 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6 2016-08-12 16:35:04 +0000Canvas 1Layer 1BGP Dynamic RoutingOverviewProvider RouterL3 AgentBGP AgentSelf-serviceNetwork 1Self-serviceNetwork 2Self-serviceNetwork 3Self-serviceNetwork XRouter 1Router 2Router 3Router XExternalNetworksProviderNetworksPeering SessionPeering Session ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/config-macvtap-compute1.png0000644000175000017500000006735400000000000027122 0ustar00coreycorey00000000000000PNG  IHDR M,"zTXtmxGraphModelXێ6C]ٴ6iإ8.MүPJ%{X Z/#̙г<٦(,pq_96H "I,V"D aLkAߺa{c:}I%6 戻 ;et9#N!-D4wqX? d$uE( IȇG6P4m A1Сp}A߽?씷s;R0k nܘo7`ɯs-mVP/F䃰{rcŬ2PGQW5^=H17Z\م*] Fj9YsDKŠk.~_AZ^?YmzD k-OlM o5Kp9t6J\qͳC\iI\s&Ⱥ8Ji͉H/t|YvM»A~}\>!ك~#K6).>ݬl+E튟k+}icA >W&(|#y39t3;t]Ի8 kANWwXZ4 jz?]LV#G_^ )^0"ZJh&E4D4~׉(ovDTHD Mg^٢y"IxC;,勧3L`-qˢ܈hvIx澙jjꒅ*"_4NC~ "&O舶އHfT,4Ǻ+5"j_8V$<om\Q|iIN|gFzo-$ 8@D.MWhʓD QE"c=9I/o/+DMT>n䪹ADG, uLSAB'˺Eg^_=D40MsȂ' l]TPRWVQM$Ӑ,lhc="Xk] HUt#EhX=[, ЈvdAz})6UT.s>Vp1f紏S/Ee'Y4M4D&:W(^qyh^ukվ""zi,ؓp<++5f*' r֍xKH_C(DbYhuiidqǑڊƍhze \~}OC4\SDeCMw=+vYgǢabYXN5WB+5 ߂7I4lhطLY ς/ O@B_{ѰY~}ѕ/"*~;ɂF[1i^ZWjdhѸӵeEeEW7[, eD k}eG5}I8oE][}+F4#q8=D\S*S-KTY| X)*-*]g^EÞ~ݫN̯9eQ'[ W=%DMqd:F!4s'HxtMUVTX{u%~P,Cgky|j0S&5dΝ 3Dh"8쇧Eefhx,4׺z߹EV?&# HbK[, N,nY0^ʇRM"L$:)WF q 7cHh:e;KQ͕TZ^sB8_pڏď #De?1Yz,4׺ZQhS- UV0qGh賸!}4uDu˂!OT6&@B;$#p8{bIg+":+~}ŚS4-ʍ% /L)viә'ѰhQdGcOp]Y0d?#i6WBs ѼS_wgw 93CYhCeE5I5w!D/q8N3D+I gE3:L [)*7,L#~_|ŝ!*y_,{?!*Y+ r85-dv+ oyTN;ue9׵κL7ᦟt[gH" M]>Qݲ`Gl a 1($%$@p(h9<o=,XP脒py4+E$M+ /ߩa8IxҊsz+ Q-g]Y 2l?2CBܝjBs+*夑 d.VJuwFg :RBSOT,=w[@DEâuvp8{J[R_QtWuZgYƒհl}|uC, A:㝤+a:PMxhOcB"0CCtfF\4׺ WY$H_}:&n,4u΂!3@gaSu8Ӛj_~aFu?BoWSE­ci% v:z8V$tꫫ1Ag~ibtH~*[wt/Qt;0j=7ǺVljΔ[bvݏUı&罫ǟԻ7sφlx?v~p-$L/=ABgbMO Hxs1}&RýG$tVk]t^]i_$t4_GhI#Դ?N¶/ WcH؟ꊩ֋pZDz p8ep8NaYp8`X8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8p8"@DDt ";9+] "/= &a=>)~#"[=@DM8i"Dt^$Do"*&dWuEnQep8ҎQ_D$ #DT-o ~!D R\ 'h"BDDtkg"ڭI"jED(O[N&.j"J9BCdMDΣf OղД:p8b<}t&$qhED(F DKB#:DtI""%"G_6Dtˆȇ>qIv`":ED/iŲ:=IDWhv{Yhj8i1KBch8Qh@V$1"r[D""'"* H"r oD/j,#Q=|ԷZ܉h ,4Nhh~iG8i1y4tw"zWR"-FD%DԁYC@D4_4 &e!jY{"G(NwlNJ]qIJ`KD*"ZB£jYhh҉-U?hj8i1 "#:Q$\W= W~,wΐW"jda }te]BcI yH7]/0 MY&ɜx-WZqNb`$W=4Xz-9?eEd?a)+ڄQrկ+sCnK8-89ÆGa1.Ws>GrS,?7N~3 0s8)oOzcѴQN#96J~SUfa3n\k{UK=%G5_yoo}`3 0K8܊\ȣ܏cPrTCs^;y 0Lܟ *r#ϲ0pѴQ j9Kn}3 0ܟ *rU## Nr4m&qDީߦĥ=`a,Vh0pGNSxkr 0Tf؞]SLcI6 .f1}n_ Eq38k=HbZ(mk=p,#N' DϲePUW'W5rGiL<'(\3;|QkipkO*u~̴P~}qkOCiNoF:WvzLkrYQ<"ԦB .u^(\ >'D1-n ywGz_\ ُ]er\"rկ^aɉֱ=] 0ռE(X煢 nF1-^(\e?lj$}K#q4##B&pR( 8,c\NG'BՅdُkp)G&.wő8o.32s` y_TI1o~Sў( G#e?s U(HC/? >&eN(҉a[a2ُsepx[+ HġoeUK>AJ{ aaZ&WSۢp.;c9 an8Eze~ɟd?,"7doPMyf ֺ,g=cޔ1"O5bmG&~g4G`+#WSG7g̗˸%%7[d?,ʳ(X+7 eqI<+y'' 7"0DUpx%rZ$wd)Syrx;rU#.݆q$&qxyeI"PQU:h0Iվ(\놲YQYMr0D(UT]H4 7Ք67es%G6|`̏ I$ HePR/U(N=9_A< DZM7 ql' 㚱 Ȃ[6G@Fezv~hUp]}c?٢,=,үh *nXU^Nm1e{jAB}}7u3s` :=POvB{>ح(,4j;[UP]h:x7JyW1X,(,DI@F[FAf qZ8JFWQ&t.,Q5엕mF*;ѩݝa9Pwy a{"kFv(sB@:P;;_P n?vC1 nlm{5N &ipsF~T{Z']>xoO_S1d Z57D5Nֹ6ք[kٹ# qn]i+ȂoCŞP,ꋞp=#(Mitl3 ˂QYH`t0Tm ~ɄdZq#e_-YH_wG-iۨ,.<&1,,!ObPVd^x?_@*m6;ChF=vg^TA!8]= dbԳn㫰ȀZe'x搅C~T{ig>xqdb :D B۬! ֺfF;d =.&ߡ<+ɲlmppM[Wo;P.v1lG{8rOòP,NVwd8! $,G*Tkƚp19WcW'knw<?T| }6<:# 792GbڣgG{,jS2ۣ`A6c,XPfF;+ 3hZzb=0D* y+Q;X2Y%roejo(W'kGԬ; zaBC\'wwĪ/Z5* wgG;P[=T_'F 7gk\M F6 y,BgLކps ]p61QYѨjZ0>|Ev~p}w7uL_ڦIP m`.Nr!8!*w#o c>,XPYHM$ɭfF;+ kдAnv1`ZdmHB^|3̽R7m :˄e '։&hk(=:9ckBfzByb3heC ;k<5b=1>+|Nt@O3"OVy#Go&g rAw^'L_:^=qIg^ok jS_NtA8 ŮCPf :یe²`AiTv͌+ ;şް[=WR'L q{#X#r+m }lqUN>G༯/GW̜K0zkaISqn3^Wb[{CX S, >,XPYM$]fFaTg/-BszGg "9](',ikGlX)ݬ`gm[̟dXaθNEowYYG$w3; h%]Ѻ-./ԺYC,06c,XP'F²˂E:D |ֽe1u =˄e JDI@=&f :یe²`Ad!NW:F[`3S퍶8ʹmHv1"Ȃ6c,XPbՓ(^laYk3)Otb,0, dVBZ'Zol\nB[EX, ò˂ERu6 Ū_)tTVءX=74ejwd'Q2ya$C2ifrUÑ/l@\d?Îo`Y4& LaJp3+NZہo9 M( E<p1Dng)(Y~s-|{ OPeMidAs1X,(, !,N@/9Tc8{51*|Q; Xy+ 8wx˿taY`L  J2 8ǐo$E)0!+5Fhe8Efi *NGpKJx F,DYi P+{ªqzɸ}]}; ,?d,WqZ`o|}" &p0˂eA:$ &, 7`#0|v\K~L@,KNöۢGR ;otfvK[Q+<;瓁Q 7 %8^;öʾtaY`L  ˂tH' I&ϣPS7~H8RƷ3WyZYEAq؝.$tzb:.]gY{ra$_PBYچMG|T]HɂQpZ[ߣsok75^ cJ,XPYXH&6FEqx,Qx̧=t‚_;#s:@МTf|6Oƹj\O CEw̌_K0z}3n^P| /'>]  %@gf}y?c;Apn(Ìz ˜, `Yd>6ϨEs1QO8!0H_ =ǡ*abJ A\^X.}azg"ãpL?\bYvTvcY`L  NTLIDId7x>Kץq<`Y,Ht4HK,0˂EH& &#8Ȃ c,XPXCI@QdAɴPraF=~X}`Y(À5H' L EY0s1X,(, !,ě'㣶r#F^֑/1ȳ;zcJ,XPXea'A v͛YXe"jF83d+n31>}(Zzzλ#D C>ߎ31#08VdGGۀq( C^`' 6]-Pq}/R>XX!,0˂eA:4lmTra@Iqkm돏vnō!zqw%%t+q4La 㱨8_<o/%ycmEyf:عsp*}׮qt,PyvCT (Ìz,0`Ada#, SO4NcJv=J=ZI,P`dߎ;OQrNOUާ?ځ0tzBaXy"dYhX?We² 3cR\Ad GG{89ÚNH޳#q[;n0G@ Wx*{Q9]C_o'3/@ZY{e1%X,(,d$@:Yn^NZ#QϠ{lۄ]z[sqM;~醇?d١O8.Wϫx4Z9xN j}# &z ,0˂eA:X,Z;J聹(=8o;V/%M#FgY` J2 8M$)~FEmfEh3j*߈ ǯEW7+8遏#"$?6$ ׽Ad[m(|Y+|j Wڥ'6ibdM%GfGI'`AaYd\L "WwOŦ帥-(ZwSrMzXSe² , q5_h0Bo 7N&P7ƒaY3#Btˢx%R?`7;8zox~didAs1X,(,,g$@I@r+"V 3#Ȃ c,XPXeaY`Z, N cJ,XPYd$@2Y8iH# +y`AaYeiI,XPra N63-Id<`Y,H&$_"& 3ò˂eA:X%`Ada#F",0˂eA:$3 cJ,XPYM$j²вQdAs1X,(, !,gZ(, )`Ada)#:",0˂eA:X%`AaYda-BaY`L  J2 8#B",˜, D6*, -eQFX,(,|HtiH# +y`AaYdaBaY`L  K `Y`C?We² J², Ac$@I@QQW0-eQA?We² L e1%X,(, ,0-  J2 8-#r",˜, D4*,D2-eQFX,(,,f$e1Y\a ˂tH& ~bZ(, )`AdFeL EY\a ˂t,0, LKe""F4 ˜, D0*,,eZ(90?, >,XPXeaY`Z, ep|!#$` c,XPXC:Y3Ɓe1%X,(,,`$@I@QaYh(Ìz ˜, N;q`Y`L   NktʟeAs1X,(, ,0-   LL1+Ɨ<`Y,Hti,0˂eA:4ĹM2̨, epl.#7L EY0s1X,(, !,,bZ(, )`AdkFInNƙqd7r[CIL[au M鏛P?We² Hb݌ʽ|wU\Q?v8 T 3D*uDyV'B'+h=aY`e"F~km ɭ Tfw~#U;VGk/ +y`AaYS3GpjߑiFu-8ЋeiQ,XPYHt/|ëP'ׁifCqx=Р#<˂ c,XPXC:Y}~+{ڒ9ɏﵳ,0˂eA:4p5*B:hd2F<-wY0ò胥Bx(# ;;Ľ=2"v̀1]cEYH& /U{!{Ɨ]nC$L433a]$eM/( cFč?[^ь_&}~iW)sVw]Z5,"n\ת8qqDirAQE NJI`m;8cP Fq99m '=ꤚ[q?Lhr:û,{&J" &p) wnBdىC0ÞiX{.Gqnx%´qoRN[2"vLn]&)"b 7vbDد"ϟޚ#M7vbC,H&.FH(/sQd?keqKYQC+ [랶l+zPj{ 5fEUV{^NP gz 1=.ww'OY^;(Ìz,̈'=??-U" |Ffa/ ܋V̈="n1fOnfs3U,L=olGl-~_-Eb%+̿)f%L>[9xVo?ڬEF$C;_f6X}?es$aXv/Aꚶ-R NV&#>87+v@Zz/ZYa]4 Rb#6Z,MŘ]NeKNRn Nt}j--CZrr7c*&]|}"XC:Y0?EF[89b[ᚶrr[N65Nt?,pE;GlmۛƿQ\lhxuRȲ'e!DF2>V(/g6TĬCzV Wtoj׽D/y}`M6Pt&a0;5/)NsJ̉ӮRN8uo_,Ht`#٨ēVwԔx!G;q5%\'r>h^*_LCmv#uTȲzeafel#xă1O=篙1iWR[L͹ǂwBͷ Kcg)'^}< Oj1l?_>4[*7иLg$@I6gbTlkvAJ$1xBh_\56z.}pr,VҎ@Xx\fzX>Ҭ qD;_W>vnVيD֏b6GB\`E}%ګc)HK^ | &LSlÊDVp:LQM<@v^&c1ucOt1Ge|>. KlI9Bx(Lj1"b~8+qf*'^NXg}'2{< ߥ~~mf]ip90tFza̽^K" &pT3=n,Cgy RկzovlBe,6t<聈? ǿDHL^㱩Pahc'bjR_UH]0f*#ZA﯑tL芷6Ŏ!%^k,2uLU h8j4=nlǜd!"~ܐ gď/{ߌ: UV& ʈL=Sc3&{ 0daaw=R#sWȂJ5"H٬ qD(#{H(OI C Gҟ_ 8; 1Alms;etwwFyˠ*Xe3 /OTeհ߽uBDx/kKm'A?Y ݍ"8FDMڰ, akTmS`z9[|P_kg%S.zEjdl6SiqoCUqc>8+S!쓁_,HJswmSM<]97˂!,|7H>&h(Qv>}pG ?A&\2^ݜ ܬX7*M`Aƾ7уĂ L[ fQ1@x _o\d)dzcJ L'NSdlM Z;XׁeH8sc+SlCX Dďh4 ˗h" v4*$ *ov刡mik*o\[Pg ?LS3ra֘7˂^ȀɂfĽsk`'Ϙ8&B,߸06Ȓz$ $;x[E!ȏ_[ ɶ@D{k )z!K賐}2"#&ô#%#" bJ}Zz͔͟#4_ܘ, &th" Pz%mE z Xa؅XO|j NC++1yBSjraF=~T3=n,-gd`zF 1#a|UH>l, z=^C#c{wE?< mC 0珚rUh㧋ce?wuo=jVcF;W&)q e z7eYHP1 qe8 #{{GttżSC` |j +spr;?o@G]BLdAsqcA?&' g卤(4t37ה3B=]^zm0<ڣXNyb!9t{o_[jT1/zU bYdaޘ, A flϾp뎨glA7[|#vO8aJ_kU{^*XYTc5w)#( ig&L8%ib/Al'7 mFbtq »Y|/Z OOaXBua/퉉X/Ǭn&3T9a!P = _35+qB+QF%GNM$DAeQ{L-% 15Y " љ WXEzMH,XyC==|lr;w37=i6ҎV<6"`@7ӧ`P+wc2$lB AVm%%p0“ºGZgкl9$1;Q ˲ &/ qaYЏqc)gyjONe= ꋷ>qʅF.V ? e{".Mtt ;<*))@Uw}WAAu]q]WWH&@03I(Q:R̄ҋ@zDLKh1tJ̤Д* 0i$;y,% };s빜o9^`x~`Oa=oS90xcj9ZuCx$.?C H쎖}mM@Ҳx }n= uޗ W`nBGM_T yaar5G҂uBX)3G?|0ksܱ/stdŘq=듮>p'W<SG/;{ۡ?{W>98>A^ǻvD#8e >5U7 u@_;&c>^;:= S*W5'w\r6@T%?a1҂mm1 b /\߾?yDŽy=*thvkIsI WwHn#%}= !Y:|O6 Ѣ0DYa0/_]!hcX#=CC=0}PGh]: pJXTkln:X!,T霰 ReNfğ9c:‘5޽xci(NƐol=0Byx =#3t9+0ͫ|ӴIH`EnB{h;x1pX[0'0w:wB_#!k<D?0K‰yӐBS^USbQ$lFA[ 3Ex)CӃf[<%:4{wYO g b,xEWL9W%|CЩş-:E%f`Azty1HNij61kmNuFA\N(BV )=a.tA( >#Nxo!NAI֧A?G>XWN銶L"L|нsϦ0_)0nl'K".;>jwgޔk62D׺aPL+FcS>4Jc!4(.sRr %4s< MËấH<7p" Ik`ޱj::OKi {+ffV_e㞅i֘bńYO0k $l~:g-o{hD]: P9֚#ރuBe\SeۡQTԷ:%bUiΞoDko!N0O(\~g ƫ}bev*:aHLLt~3^/o? #d|'|pD|W}^ DB_};1aT{;.1ÂF1s#HZa2S;ӿDaL'~ Y {*DnwŌj3D#ݧN cGܗ\/|!ay:-,O{aj!KpCRla{RoHE)w.r\芭oH`ݵmZCr)LaX,IVDOz%eϘ(TPBطZDGf76lg э|σZ[댰`1y}Q+-k$tvizMtA<):"3ASKк79~]炒Rth}g.8U[2󮍓.2\Jrڗi# 9XW} .TY aAkm6԰@DfP:< K5n}l}ay:#,XMQf]d\K&ɀrmC5Aү ^Q2 9n/❖r O$xD9JÂ} aa'K%)̱rZ)dNp)^ ,DekO6H>AQ u NFc l!Pr1I9>ek [{@z[#j /V^uĥXa!^]Rw ֭D)z=%R-~PF) .R3I0/P<(_'1[HBceo]/qR\4tvD~ Za 9, qb)9 ߕgBS߷Ul./zk ZPޱaႮ.[+K!w I2`[V$`]#Ci֠Z*K_uX٩XZ xla>v:b4G50\cP]-UMۖ/(~53?elz9nnzg{ש2{ g@g:m(Lns+M[;fI~^Wpׄ[qsbn7K\5ŒIAz -e ^ST(JplX\STXGycje5g7h9*%V&JSg=UQۖ@rNa g:ae XRWP?2Az)PZT+xC_c#x8hq03`goht#˵zuFױђ}<ʽlͼkqPy[]k+-8="M(#T$؊] ?EhQù}qAáZSڋ~c`2GSyaƀe{bMV}_}|mkշ8]eDfUƠ*{5TkPjt)n;o 5|c]JD @ wTud0.; 5#ֵ2mK(;6 K ^(I~ҡB+#u6odÒ:pG=NA+̨N7ƀ[F]EaaF7\W эfTOe ,4(aFtAU$c" Ո阻utFT5T%UQr2OcknN:.}*NmW*~Jk z9D8mKC+FQWoUDTWEokQ|*E/MJhJ|xS*8{ gv(PaVV[aV6oӽ2bd_lar&;jc4z-6gFC1WPF,ɚ?Olchy#ĸ:@\p2FV5@-::y^BhWPm<٭S+ڢ5~st}"k VH̚'aaF8TZPũtVTEau腰)XiK"~ +F5.e 86N+8 b[~Mۖ0}Կ&&ůmQ9]kTZuxX~ho&ao&,7 wa1i7s.ꕫ#{H芸~x$ql-<)pbX@e['rS!3X,;1WEn׽-V*ȋ_l!?)tDMߖK"ԠZ1wj F*:%4:UiNeTFNT5z 1y{mT(vAlJ-ʝނ `[PaVV0+QvḼwNo N U|ʋ_l2o@ W/,F$wEXlGԢƦbUwčXvGTr lmpqⴱMb7[Z`5}~fr(f'6kmڎaIUbDƜ3A7l,Z4, 7%YE/l`fS*FdlGx߰5׶#GK)mG{qy3K K@1y{mQUZ/MI[ƇȏvaK)^eu4r|y̩~^*vmj}Z)~ba_ތ /ܢ{|K-&bMLÒ֒SA755ofPɰD .ʐ 7,:%UQik&b2(zamjVZqv( W rIDATvXXތ%GBJ{T\ K r|rLޞbILƚ}:}pدm쏂XơX(9 bFHEBʋ(+M +9zwX9rkE[G$nATpfqWaKT8(0uBC秋{eY2Vy$N[O.`,˲nY郊i,nQ`S- [? 9˲,[wDnO8k'Ō!5`eYe5~;ܒ((0%WhM1nQnf,˲ז5i"k"ɒV캄i"䘼=-&[}nf~XU,˲l8}P%Uq[Q8caIUSA_Tſ8XeEVbKF` oO[oNUI\x/NaYm :_Omfwyn"C`9~ H*nX( Jdc#PvsPiQVeYaVZ0+QvEG?ḴgHݟnYL7&sϠSb Scg4x=aT 6YSMTӖTf6x,>Xl.z_*4f׈{aqaUsLAź$d;@$I vaaL!0yE{=gwaQFaa1xIElþv=gw` saaM_Yƀ]M8Φf,:O 0 #azqp:jQ'aaNY;=i+˃ӃZU[t"ODVџt'3Dt6-&~Dt=q"JDOӏvfaGCk׷܄rޖ/ 5:gt ! <[mZK"LDoS߈BBZEUX(!vզsX`a D \;$`˕KBtjq!cD_m{Dt$!,%!\x80 0 Տ|鮩)Ѩ/ZxWsDt_":KDo=,,$"rGDϫ -S":OBp{xw"R6j}ma}"ʴO0b2^mY_Ѧjz־Oѝ.~;IDwG0 0 N97-w5/{7BFD{(J =fK_vA_mXQ*-#"OXЫ>[˾VBZ " !4'DEٗ¾*Z} a!67@Q}0 ÈKpV_>g9-+uKѫJ}ﱺDt7զ?(,$bKD%a۰@$t\'~a*y UIoÂ.$+ -%Iv "FBB%!P=#}$tw+DZ`aqUכQ]Wa&5 $7M ]IP+ Y"jV0>;,M!/L]XxS$ ;c~%W 1Z=D˜OI({FD1 0hh~5:խأs A]Ik{UIIxtA ap DzؗaF9[F96lNU<"0 0 }s.Ya56s)U~`a@p{N5Kk/\o ~KL֠. +W=| 0 .֏|SWUwŲ2s VSD,TfTjw=ڙaa@p&ů֠TV^eʦ5Zz1aaa'E0#IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/config-macvtap-compute2.png0000644000175000017500000006337400000000000027121 0ustar00coreycorey00000000000000PNG  IHDR [fzTXtmxGraphModelZYs85}G1VeR=e#XYiA $82ICKuzI}cMF<䱔gR8J6AitmT%"LႥ)]CF8gڒ%f UvA[wN=A0R-YY2' )V4!"7w6LqIq_.`Qe]X*^s*W"T*~s/O NęE*QnЃ!srL=)df1YCL&Je @WB"4H ΙjӬ^ԓHm/ŽuZZ+{rcm*j\VVZutpl>,+^o(aoBrm7',,&g_:,Q;2F /%}=Bq>nE4Wsis򜮻W( `BJ Nnu[x7 'ftJEQ1Qdoes K_'}mL*UlWy5`S=-rڧBl#Q&Q;b3NMRTRw t1m?}]6ϴvr8^L@`͠bU1mM939վK 6alQ ީklZ籟|OkP yR4XqϤNko9c/7+C]axw!a~"1ϳ)/H[ #I"a9hVJU7PպJluyg.[ 9ܙwGw3{K{ fM6ZNyV8{тtcX_=/zn  IDATxyXTuG-A5wq5WVVY)i"""; ⮹fZźgs.9g}D\Z29%"wE?"rLD|DrrUDy͛hsq78 #š#b/"*oyً~"w&:aӂ|kZb)Y W_?'Eret[߃&:FަԴXs[L,WY*"rַ#DdMהŚS¦Ef6Erw_7״ܓr"DH+"E?`?EMa."{D yD䜈HϾ 'lY6hZER$᱓g=)a/iu^߆MXji5Ws6l( .Ir7OQoBS\}!*;+⶯HL+"R`my#""WX`/Yg졈4/`'iZE,"r/0X*"cDÜeH_AwyǥUq!VD?iZ-s1y(Y6EߞXu:D;_Dn,-DO.E~s Wɳn: d<.sbE7 e8+u ͓4-ǯ-@ e*] iՐɨUqs4fræekĚ+96T7e2gyqƧ)3k ˆ}v`''Eض "r[r("_܆ț˻,?g_>0Xn8|gVϘM] 쫳mZ"" 9JM8`iOҏek+96Ay0X疳8Sc޴;("ΒHV^uQml:b8t7X<2|률 lS^(CogmZc kZD E\yMqefGMXji5WsK =j(Cea<[DF)_2N+iTH|чkyyڦ.?)\~MK$uo%]Y_qjL6.妚@>ͳMuu ئ]mV{r7&PPMAM_ b⌃D6Q5-c^%\qϹ 8cH9wo3 3fq2XP[N?Z%6s ,7ggu,7K D,_6~C%mZ-XmZƼVKsQJqiZ"im. S*/2DVo9H `yO~BR`݋ۘj?MzjL_a4ϣ"Z-JM],O}cyʴ~}XsDTl@戨8PIcQqƚ#""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""*;E_1&"H  pIX$:DsRGQDl-"7E䝜T5"o"""uE9g;Qi}a.'"UCDDJκ7DTD.`X9)a)"D1yVD>3"JD^["NrBD~}='"+E$*}>zHk9("n"R_D~9ې,ǜɰp}1tw:Ed\^"R^TQrneiȶ B 68Nltr9VbpI Q1I9_?+"cD䬈WD#n7GF:<sR$}TL6"4gyS9""$wC"R;u%woYmp>]DQ7XLκ OqT0ցᘓ"=IN D?hu}*" Eddz"'rnNѿ[}5a9m1'Ezµx}V[wDB"@u}._]Nߩ".DD> 9-|4XcNDDDDDDDDDDDDDdN%4NrR;Av;S{ ,87w*SFܽ}6L1a L/':/"/FTb:[ޘ+^vK\tnV&.tցu11W: d̔q:e,7%Ys?s*[ف;3SF,;%Us>4^1f_7⑝4SRu1/=1糯8YZ6/88SRu1/=1糯8YZ$KSu1/]1׳8MdJI۴LǼtM eiR6RpLǼtM eiR6RpLǼtM w3SJbަegJ8+lZO(KȔigJ8+lZO(KXϔigJ8+lZOH_QL)y):i=,)%1k*ϔ\pKWشpÙR6-_?Sru1/]azBYZzSJb֦U ):i=,p-)%1k*ϔ\pKWشpW3$mZ~c^¦Օ&ρj JNQܠؿ+DYꠤLj"}rU+*+ۃ̘ YZʤ9? SU>Y2pbY$R*͌ބ8w䲢r%;91Ly/f^VhK(670w6+6-ց%AIj|.(6;7;Lyu./5i]8O{.L~ڴXU%1+q,N<8Seb}.1i}u}wWg{eަ::(1Wq$-[QĘ YZE&'*>ξ:+6-ցEAIU)'Lyu.7i}uH=qGqՙ^i,JbUjN8\eb}4ξ:qs=V{v 1S~Gnx\tZ<"vb[o!}njE?Ly/&1o95+|xs'r;odBHt[\犺R-F&`Mj6*Wu2A_3Lg_~*r6TuU;XW3;cpU'DK&~Lo4 ]:;EaXԪnYOVWmZKs"*6b*VxbK EbOt) S=s(EKEF`"Tª;s5+E>}R YZ4W7:k|,VD[U_7j 7 DVqA&0'm~CaWK|}u&WfmZs~"'\.p,fjpA0+G@j}n,ݨÐnv/8g_Y$\F¸"'lZ7Ż_]ɯM/h^"t N uPuPcrU#;Y%?A!RѮu0U ǯ?EZCРΧPX>3<_ *TEN徱XoZ2T[Gg\Lg_4Wg_Iӭp:yP tAIUs6(>*Wu2x^lg8d_*nE(:ξ:ݣe;t߿:()+uPc|rV|\ebˌA.{x 8mbvx6|b~Plfgc^PI|O%Z:()ku`1wFuNYhef˄ O%Zy:e_|?N1Z{aT|zs/#b2'" $T0=y=e982Ω!қZsIjZ8'ob?1å3̘)'t:yPLv;YZ3' }uʸ%z?N&8;$}ωJ,9۰,1ҏ,)'+aX"6-R$NV$:DlZHHu`شH8Yi"q"ց%b"EdE"KĦEɊDXM,)'+aX"6-R$NV$:DlZHHu`شH8Yi"q"ց%b"EdE"KĦEɊDXM,)'+aX"6-R$NV$:DlZHHu`شH8Yi"q"ց%b"EdE"KĦEɊDXM,)'+aX"6-R$NV$:DlZHHu`ش艌[Ҳ~& &W&ݟ<:vnJ],7qs*cuF>0lJkD_++F_3#hk\&/Wq8y/xw^TU=W =v::Xaiз1gn:~Wrz%dn:37[1W<}?'K|םqK{ u`yubRDߠY;yœ:pkgm1/&f콁`+p S>7@~p||Sq+&G)ng_3ξ:{ u`yurUxiQ7eO3J::Pq$-)fشWL1*?˫}3lZfCzoL1*?˫3 8zS(q@#L1æe&ξ:l=_QdցՁU̸CL1æe&ξ:$S(q@Fz~a2g_~CSX _4ʱNztY(q@媆nާζIh&@pV=F.4ʱNbt`#a2g_ٴM pO5X7k\k ac]iۍ"FU~XW*W5Ez$/uArn5kXgBty߭6>6-3qa[OMhjn.> l? V0V}Fn%NVaX^\H^.6m}]:5[f}z^G{frI-4}F?6Ƅߎ|6՟ް\~z<4.hFtl{aB3ָij &c.|bЫzS ׭*?˫hV0{T:<t,DqBV){mE܊MX0EϾ Q&c;Qe?lXfm*{΁hlf6=0}i*60?o>ͦe&ξ:D&v}p :5K#x#]CØ8]4kڿkbq_",SaH4NFbUψHTKs! +fU5X0о6^׭*?˫Wn{lU_H8W&lAO +,ƫG6F`;вnC4i^jkJm<^9]и6ݿ*_4b Vlfx޺n+d[6=9 z5O}lZfCxOOHd5U#D>ap͜#$q3zl tw!<'22ocM>WO\36*5Z;5@g"%ބxdl9߄ xzcS_',TjĪR:uFEck]+4<_Ep&X ^‚qQ|x'6_ۊ(qΨS'>]z ]u4 SobuԷ 뚟Us\:/X6DS"|cwc6p8Yu`yurUcS'jhۼ =g)G][١\.U~Crx{Ti䀏C%)_Ǩ14ui+,ù{J0`J::P0 g V k3g2g_Tpa[Ty>M%NVaX^\Tma|V`lZfòke8W3M?Y-08Yu`yurU#x2,sh ӡ2ٴW?UQdցՁU5bML}uX S(q@ƪYQL1æe&Kt\‚L3?8/=0@5I`˧G0ňjZqc^&K?<+{1ó1/XWS!`j8SNYdƼL$xs"aK3gsZch ˫@ר"O R?)*W"7eoxӃN]}s.2]}ӃN`-0 57pR]?P, ALY2!~.PF Nc^f]6/L$JI,]#-qP5Y}OS`&Ef',ʗٹ&%}NTXiن cH;nHu`شHq8YRip"ցb"dE"KŦEɊDXM,)?&+_yIlK`%iqK҂}.%_MiӴ_}J91lZMMi6شHqش kLMy?O)M4-ϭ$PܵIǦEpLOTi)Y6-"c"K¦EplZdIشM, ±i%a"R86-$lZD ǦEMHشȒi)Y6-"c"K¦EplZdIشM, ±i%a"R86-$lZD ǦEMHشȒisvE#31-tߙ=_<xk~NtZѱ)ޕ;vLCw/=v~1r\;]tXg.wKzg\" GbJ )Y0WwҰYpމNKCq0%݋NtZj4w;)Y-]˔`tg֎f4w;)g}3p.r<{- N qľ˙qҰYpG친nDNnJO拆˵5choS @ U`i;wC  oj]B}a{d|B]X3UIKB# Ia|l7>%z'8bF(4j)=^oլcyp[dy|z՚ujNh!3S^5%T0sg~m+wp<Ǘ`eS]vh:ul5>%z'8bm˹P8s##g֨P㹾X~r2LE;;}$`PwBjz!~ݎ̱hiZa9Q8l<{_i_IΘ{`y2 }xIM^Q[]ݿ4DrQ똤Czgyhq)و"ڽ:9cùdd\ڂA5Q{/^JFhg/կ! l:~^, 9-z`~{^[KH jrg,?84~_Nʵ~릾kDu◵5^rkUTj4w<;CZ&5s,Z[L ҳ_Z1ퟄvV1iW|&uZ Ei-O3xyu4.j1-^ؼey $@b|2q=Vxko|:[/j 9=:qڡRv&g_O:hqcI<4]7nΆK ?D! K.uDjqlTHyT\*WD9T-Ƕha >A=`a _X$f?>N,6T1H.gbAݶ_"{UK;cćYpG^bl`qfQ}69Vs?;7jG᫱ao6 AD{Sm#®׿>%GIQhD5flZU:~(1S%A6zF-˟u/vf36Ef|:gcsЧV= n0}C\XwR8#5zU*u-xqhkl:phNm^V/ <{ BuG룱fJS<#i? grCNgtRGMҭ~2zF]&āh`!끡*\zz#6.mrb!j2x|V-`Mzgyhr.93^.O<^mD yg <޶F͡W|PPDs:j n*v#~:yƘNφQY Rۼ+C{.)Wb`{Xqg -;}Sai8*KuxSl{ćYpG$f>%z'8"l$c!.yw;) g1fzgyh9ʘ!|KN qˇYpGl:ִ:._VGj0e_G[U"c!.yw;)VbueX[</)e^MoQ1nd hotZ f R֭{`t%z'8bgpӊ[AglvD>N Q'?ƠS'Q_]-;kwDZi@Ħ73Vh37yaqxN38e,Cd'hRݱ6s&\j;hT)B=Z;Ɠfa]!2=C\XwR8# 9) zgyh~b3"-M}U=_=#СjC|!)Ѧz=t2T1idCش{ GPkgУN8ywz}T1`(|ܩwX5~o[L>%z'Ԍzt2%>%z'0ʃ3tlSYo<\5[;nfԉ96Lew1^Kzg¹\<F_3dJ ~;'#~O:;saީ p3~7aőLłT?=ЎbqTw;&CxԌz0xG_~i~w;Q8869 H%a)b$w"CLNp|ɒމ1Y;!&Kz'R8>dIXD LJ, H%a)b$w"CLNp|ɒމ1Y;!&Kz'R8>dIXD LJ, H%a)b$w"CLNpyb(q6cxď ԌyjFa>-ūӼMi #p&xz{l8 u1j2 ¦8{u~fZGss0nş{GsVܸ?}Qcܹ 3C֘i~53:UAǙc^ÃAt" qþF/$1}b""*"}ӊ1j涨>aa)ZVYhӼ]N[aéaa T?пa+ި kmԏ̮ڠX5BNlAx8܌ ˱u|}ؼ7#qhl]4;7cߌV\!z~\G77>iQmV0G]qh4eNÑu R vv֨ngvVTЪ8u[!xp9u -cP啩}#[?3gs+Xy㯬~&Z -QeiF't >F%\_`Cv8.x7EG '.ƯWոC67caQKu߇?qkm[xe2n}_Fa[>l-AV!2g=֮*+{MM+ܨ7šK幾>= oUkՇCׁZp χf\S8.޶`8놁u["X0{5]/pB8Y]Acqz8~Oy-> pc=0X5ǎ]qnoNWwش(K\1j6>!q?Ck]/%ju|5~>*Av{VCpo[_nx83@`/khg+TF/bѲ~?[+E/K r8~.c=*"'M:dĦEDTDYZZ a""*,pucii-g6-"":W3Me a""*"}Z˜!lZDDEu~XĘ!lZDDEu.g6-"":3MMӄ_G9&<,d~Oy nP 9aœMwypܾ0]`zܷzZOaԧm1HӚcۍZnp~;O@gZ.ꃖx8}_acb[s^; u;`ݲ=2aq .iU8?d<.w3pܵ5M5)VNN{;&~ +y6Gޟƹi ib݀sS!Z oic^w`׍I+{C+G$ݑvtgii0Y./hi8 IDATS7ON z jj+8w!,]rl?3OAVHwEp487⃮ 1[;83Is!~J+LOF'&}٣ѣ=OFʀptgiU87dOp' ֺ9>s$haS Oq$k4d~Vo nG 71v38}[%fpn~O4\7"zk1ݖxS{qîH;nsشdÃg&g$Ǹ$|SjϙMS\83D={":} aL=6eIѱNtOD('{_9r-Oi{{n]vҠ=< 8̔@~=0ى~ag'"Rze' ~-;woS10q2>3;i3]=DDuA7rvYI/LpxucLpxpd_EDda<5=4xOlsQشH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شH1شdu#*{htԌ;'nG3jg>#elZDDdT#zj,ю87~̃I.wuw-QV,\xjF?4wRϸ}u.vMafZ\D_ T_?9tu?RO% `ЃeG;ng>yEDDO3ndSsoԻIYqʞ =`VZks4},6-""*.Q#p0\;h܌w}=Gg""bѿ|/VzI> ~I{:sgS8S3fO0O͘Eh2""*<5zk8CWH\ٍ}s41{n΍w۴݋:Ʀ#!Xp5"<޵鷽~:]NtZ?#+~ٱ8xuO'%;' WBwV[OA_`Aoڱ>3#"2'q\z_WvAw9Y67oث")FzZ;aߕ20?imsc""*w݈ʞq#x9NNtю;u+;GZ;PֲμQ@DD')qn''x?6^% |\N/C*Vi "|xj?:]4أȸ?,6;/mv߼{l(+aIn>?lgrlƏѱljyƏl{eyd^ ۳wRsEs׹Mëq)) ޹EyygL ƨIje$&&ɶvMm?ݴ-I$ W#*:΀7΀ sAA ^A4g2K2 }w9}sPIJ W/aiMtqkJ_5bk}F_ 0!ieׇ0'ں/9W7aiI޷mWGӲa\WrBVfa\T(|b][0 3-QU{=X#+էy1 L[:{Q Svt^T|x!ŹM*ޣET9t:gҫ0T} obH"{s- W{՜'h$0 Qj4ㆾ+i4ow/\VHӳSiv?BO;^q}Û ܹ~|wwhҷærZOY 0.FapS|\z*],u%ѪoƤzlwt+)jӳ#!(暈GַҼnyOo~/]w>OEiD bg&gS'1Z0 @]B1Nݥ"ʍzf/{J;z/}w ]J#ȯ;_-5?GR(R6߳ y uRu4o雔UBМGFL*zW8TtfPƋ$0 cEz.t43:㒦| H߹+rH]L#rzjcXեB? lѻ{.&MsJҾK@"y-"*M]Fw=H= *.ej>,x.rba` z}>~ ԔlUAwFI|6=1*x'C|mS#cfS/&66fA3@*PD57h&~a-|}=e=Gs/"ݹ4SzY4c}([=\(E}a0xIZ+JbƋ +By4ۿ?.@4-^GǘEЏ,N-TВL;~B >C-,){Aߚ .KH9|n"ͻTeWM6;VI[$&w\ 0!IWSxr;\6{ѲwҽsF;{7gԤPɱ4ҩL.f{_i[]ZQЍOJ]@?CO>ϦԐGi&@}n'þNbT\B>5ܸmDRM*dq1w7|BK)tL+ik{=o7 =hKs߯i)yBF9~O?CN}޾~K^s9ZzG >>κ RQ%V{εwaf0xI:rZ7ў ī a\R瓔l\,b$R\}a%֫3vd9ZaZOΫR}"djߧ҈R~.iB\}ajzy_ŹTb9CLњU1 L;T:{'b\2I)uj\}a1ZoMug5RZ:zW7aiu3)Mۉ52HFOY 0.DUR'z*>5HERrcEɮRwaIG ư7Sљ-^rxE6ޥ C=O 0!VV3[7PͣR~gܨjwQ~gRR15GRRc%SIʢʋ$Waf$]R}̩GRv[4城sXJgm;ڤo<% ydRhRdK:gVEnWFa@a)TRWIZJySxC&UIҬHZhͪTzyTl9CIyI aid 00xI5ϕ5mjLNfUJ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 % idFmSm@Jc2RcT)7I\22S xq[HjSd/`Yd46] J5&%M@X0eo-}/+|pӝ^`0ÉsgNbwuXɶ]&uH%3̔Yq)kMe:mit'gm.@$g&F\޼/Tu2[K7ɫǩf:n-Uib^W&,s7l>l*|?9NGՇRxaDH6!NB8ށh?zZ7xt]N#3 U7@8fW E>eq,t~f= XO|CqO;g|L$2הe\Wk:F[Vg^$c1^_e/c[r-ʚ3 GJ[r0岑]iqZ-thmoO?0A)U1y{ZtRj2Stޱޠljy f߅ i93(;!BChș K!F?D#\G pfM!!5[4T_A8E7\|wZ;"ʫbkzwV{y*R͌(c[r-E/@tcZߤ6Z-Q5U:-c.sKlLZNO;j>o 9Ǯ%7eS0p LW7uv?(Ho!:v-h .~<{G3\~<|tn#qxYwZkʓw?CΏZ'Bi2%m 0 T(Vc|/ JZ%c. sKո(0P j:3f՝{}lCQmrI_[F #'0A8 18 f]p~ ;8zl7 #i\i,ʌ(9cuǬMeF-ƖpIk쨴{ [,5Wk\b<{5Z6x49!{!F6h``yڸs#di9^p"r@p٧{;c 5p@Dg:/8 :*jr.{wѝjoAˎ ޖho=o|H/KdxO3nPIˢ0* bWT A$# u`cKAիo~ 'T~cܪ9v hmbW1<ͣտaGw@$+(G F!&ɿ>  D#78]H5kY/TGGfs!5<ĵtiRb1VRӞ㖡(Xrw-}[cjK ӎf"iVE*u"K2;9#!b ^[ j2QqVDz&)_qsTug, mjx %όupƒZ'Lśj,%lKSRk5hb\k 9+iN g,gDb{Sa?Vt^M_ғw!^3oEWkD)/%=i-}H7vweDik}u΄B bȝVLU2-gc]:N |a-7 >RDܗ.vY~cWzMV\径dBapSjgӨz*;LV֧0x8 8Ĥ\:yK%pn)=c.Qu)G- U'9Ѹ '6yx`u+1mSyU%uXI_"mi*ʴ(KdjCdē ߳;n봞iC+1*[ jpd^2:XdJg,LULA%08M9ځܶ2a9hvjo8dMVC LNZ P^DRr3WYEƬW6(}o|__hAHx4'bo@X{4D;2\E:RVҧ^[BJ2J+5C ژl9n=[R<ܢ5Xۜz4vv(FTrK8)uW90 \_¡؀FsTY 2 1 743 65535 558 image/png xmp.did:fe6944f6-628e-49d8-9906-268ba47de779 Adobe Photoshop CC (Macintosh) 2015-10-08T15:04:06+08:00 xmp.iid:fe6944f6-628e-49d8-9906-268ba47de779 created xmp.iid:fe6944f6-628e-49d8-9906-268ba47de779 xmp.did:fe6944f6-628e-49d8-9906-268ba47de779 2015-10-08T15:17:03+08:00 2015-10-08T15:04:06+08:00 2015-10-08T15:17:03+08:00 Adobe Photoshop CC (Macintosh) 3 Neutron Server Neutron Server rJ>@IDATx`TU3tJ!$t"Eb[?uw뺫]]"Kw@HH?罹7$@I{sw޽&HB@! B0_HB@! 8/B@! "}DH3B@! s! B@! GN4C! B@8B@! "}DH3B@! E! u4Lï?'A\N(EsoKIrLѕ+B@&&hc!pW '+V, ! Uvصſk|=ȼC$\Ys~eu! B@!5^B@! Bq~eu! B@!5^B@! Bq~eu! B@!5^B@! Bq~eu! B@!5^B@! Bq~eu! B@!5^B@! Bq~eu! B@!5^B@! Bq~eu! B@!5^B@! B\Yb]! @$p|Zp/qj{Mk8;kNHe"ٕ ! U#prC6Zf+ugZRĭ.]B@+L !! @" ~oB@! q'G&B@!P8_[z+B@!Dɑ ! B@/" ! B@0>|riB@! uB@!P' Q ~;֝ =N B@!/gZ)u#y\#HT^B,g ! N#1u%HrBB@! jط3IP0Cqp]lBK>B|cX0bB@\94~RZ! @ETy`k54 ZFۂ㫺bc]1w|-,.냉Rm{T۲!48ZCpwh/FjSeœ OE"?I5wa6F&B@\> Qع-A#Ȉ-3?7]Y8qz;F݆-h}$ [#nd6,!o\8y@|481JĦ5@h=rd!?(!# a(<+_>FbI',ϖO̤Ba[5V Z# /Z]_J/h}0Oh9JF!vSGl8y'oEpiĝ@vb/WuKqc%nj])%䟝$B@+BR!dyTƾaW4T e2bSB[ޖ|B@!  ܸr{p9fa`k-5f|jn*A|)!8+ņ@ },',5s̥uRJ !PRZW! e%@,w $ss+: ɍDA8{9 9j#!IXGP*v}J؃̄(l>?ţq\,Vh,/Q= ν*9e$ 2SB@! KXNk`/=k&,gФn6;09,18i5Ҹ&m"@3=ci2+¢#~$ʨyuϣER! !oH^MQ+/=N?4s h>yEe$NEkWRB@! Ds+=B@! 3"IsB@! .uJτB@! s?;a\! B@K@y=3! B@?# S) V@A۩ULn{ (IB@!V;w_+ڤ7.re9Ij"dQH;>i"Qol&ҹmp^P+rT:As5qnK~̴x*s8m eY:#mi]?̓otp?3T! j@d>xlεSUEh1*. GoŹ.pY5q6hРjMв`РNa]qں%jvt1Xʪ1_8cyNS6?$Zlf-sBϓ-3U⸢ڪmU-`˵vՂvNEbٴy8 ¹!]Y+T8FmIASer4Na]eTfZ\B@! P>>CHA,ߜUm9qJiRrئ;viñ22yUeQaz$%'#..eȰV $_%(ՖͲcW7 E7 d+ @œVWd˘8p&Pi_m519pp!wӑвz4#~l-WVu򎳭fl1Z9ZvRᭊS=Z9ʦmrTӸbx?o^Y͝Pcﺋx]ۯbj8Ǽ7FXN֐n4<ؾ;?`ģƂOsσ [9B@! OpJ@jS%14U׺(+r#$mvJ$Œ9@M-[OEI+"!dhI^H}4#\,0yL4q,wءLKsģ--ͮ>5~#==cn =v#ɓeu <qcG򏪟i%m*+ R]ӹgZ}PHK8rJ@T B@_'#$Xш9 1T 5 6ٴ8 9-<&Sź ڱazkWW񖋱=+Svu1銧F3LYr)B4. 68GD rNK&X|عs';V1h~`8/΅J筙oXHDFgvCCm :s}zq6umGB@!HcE"7HGFK4j-D#DAЩ"n&imNO<N.**B5נir&&ׯ[xM,y$?ST_QQ1/[ÇmYlI$+?c=rscI,#!!!ʴLu;4]D]έ~)5_zͿҕiw җ/Gsq?Xȫy4|W?HNQ_qa͏yS}oW63mGLu|Ҡ]PUgٶc7{|RG!p% XqhǼ9 iRϭK@t o ^?MD6Pu[m'ᵗ&?`d]51IK*sJ>@ӭM u}T*D[+grvp/wQoD滩-z>JgTGъ)n )_|GMs?8t=,U>-p&P^n3Êy5vd')n̆oPs稺:||TvRC-S$! |g7~_8sv_qsxK$[q$Cn*%0(% 毽}ॷG/4.5 ?qξ,εߴDS ;iX׬Y ?d5̢.Ȯ]j>\ KqW Om{睘@BGI펂y4Ý2&Zs>cM?h^mMUbO>8E֜gsvtEI3ȡ~ -~&t=-q\&&&Fen<l/F-,{STA֒F΍o_VLPrpI6@y|G!p Xcɔ( ?y;Z;Ե},҃X,bDD|nvh6a5|ڊ`njBG %:K/\r& 2âX6.hz9S?x,t ˶,v9(vS4ˣnjFrUADR-˩K&7ؾ7ި{pF+v 0[ϟt\'z96z;qq_wjr#B@_!`Aףg{.|n~}Aϖ 聜Xp3δh/ޟs:½p8@'M_V F\2 3?x'ztߕXdR2zzqrD7q>N75w=ۗƄˑh "=ϴ r'_.Տp rM/:$4gh⫍S:ŝ(VX/Eypv`wv/Qs; F] Ĩ9Qje8נ ^m"([1N!mQA_}Qjy\s([|"dB#O~|#q[i!44DB+smcN׸qbھb7-[V*'-ptD,=knUZǙM݊=5iB; yȽd/h2KoQgǁpgcX i9 M$>5,?NIZ$xMq ]5ni,u%Lt!K;na)yTWUGkv l}M=wQnT}9q} g0ѧ:si;BXRmՏ2}uNכ]b8N%FYPspg/yU2Y Ya"AigPISFd+LҢyDڡ7;r[?gЅ!qN./ʂQ0EjLkң+\G[,UDJTsY]7/UX9t0rpA1BŹu4?X׋|}Pr"ըF9Qb󋙚f>I86Whcg[)*;f_rΫpA6S\TJ[KU$! '8Żs"#L}fˤ?-9DvANL) KYh3W,ÑGqi<^ѱaNX$GL Owq8p8@k\G˩57kr @Ls'JRf8W;Y> Oĵ~$u aq , inO䪑 I E6NBqjzDWA4Yi s KxN \8᪝< @/Z2j,ƹ(>)w7~tۧLUX:n+9Sͳ*w]: 4X|YB@kV}O|#=R*B%IZ(;/ j,?7O)_ep&c~m,jT6$B@\>~(n6[,i\>۲\&7m݃38:pf"luzpQsȢش##ytǾ'ae@ 읡 xcUsypw3<!SkNn x u=!Ǣ"Ƃyq~5h).SyzE,yT]yy˂Wr0ku=xeg{l4R}=83 *,Oʣ5c4Y^_9S W+LگT [ rURT_z[Tn 7 CK(r6mby.;<:>wcCC{ʥJB (ľ 0?$Q.z?fNنҰ6H.ق=cnhbT$7vkүk[l.H.bQLsoZ>MnȎEᵘ>U1rGtIJWZ0o6FZ!z`n#EZb႟p>4o J8פ2+K tsq(:0)Ggaz L],s>WʦCűzxutS j"IUXnK?.1̳Ƹu0,Oy4ίƾcG\JO71_śUӶjJW[>6WTB`ПKך|S[m}q Qc O"qnHĚ`F֕BOӯCvnK{ { }96<`CLS>ZB!qi01 M5Ʃ~Ud*K7c[B@! B]߀~M+ oZ4A lZ6Fm(.iq :mC`O2R\NSQf>zm,ڣۥU>P)ʢ ]Wyeәr9\y4L;gԬJsQQe\+DʷCoJۀ:pz!JB@ L8C*3W]{ YzpPq ,>Ɣa> ! B2{q^) f/RR SqڴiuOҙH.U1%.:!Ep˯QӾKhjT]%ұUVUu,;*_Tnr9$2`7xr{r! |;ΟŦI6rs4_VHBJuBOͲN*7-Ke<7 UTS첍U>Uy[NmxRB@S` B@?$P'9s:th9JsW\*3T'({)#yk)(S*B@!p 8Q89qE%N! J#24!I]ѓ:-}4G! jިZ %0`$_% WόK! jL"Wu6 Μ:ؑ9XA%! B@!+Dʙv! B@{"W@! B@ r&B@! zB@! Bbš/v~\fwqSØ٢vX cyXe- jĴz@qYc/cy8~L_$xr!n! B@z(]Ŷ{ o+g+bCqܦ(۱jLV3Z; CJ\䚚"|HÄB@!p8JAVAl0 [݊0_WV_=6E믞-!.޾ zvI^%ף]] VL~e_3S$ZVKB@! @_qht)?# ώLEpUQfa쟰YXy$Q::!8oGq}ƊÓPl .X='vFc0 ' O儳%'cGX" 92s6ȧ666x$M@ 3~|z+'u~,H_o08q&Z)m{bȨ~ȊQ=M,5qo;7,H]E5"._xn%$튾e\+kՐBB*+n[$|XV_ꊛ~NI[PV=xm%?nݪs-H4z#_/d~aɧb`E8:݈FGhz*> OfemO8#aߘ?c~9--U"Wl4 (%0\5O,P~p ?"Pȏpji[bs=ہaQj<㉁ Uţ&'0Qm2#*{x؄ 6cŘaf{"Ag@9qqtij:]KT#PbBxE zo?܉CqiI<{=mۇ3HS9dϹy k~cԖO{Hx%4AϠfCBrS}+$m Xz5[D0]Ii5 "_xhhS<>]d{^Y'ri ja&XIJcw@Mظ1O]C]G l[1 6WBӢ(wPs)Eij*R=?TwlpէxEPd9F(lA֠M&M7 <_ Qu:fYϖɱBy/8:_(VU#.[ |rqT2Ǟc&8>w*X\(o@HȃѫmˌEE1@nU' gqI!'MGƮ'RZƒ{Q/,:;eYU5CUyix]WT'cvG*4W%Ji6t}u54Gz6Go!HqGrD~K G 6k),Ь*.G .^hbrƪɓ 7w}TQr7哏_6Q9%6-ǾP 8 +:`` MSC},`JGkfe4$ _Vm޴) Ϣah;@̡Hj֠ _! %0'#1׮ aK&zjx豱v#LJG~^B-cKQ G@Ĺ)2a뱗yZ D?^? ĠNܼdף4<;ZZ>Ƃiy( kCu3Yƛ1ks(ҲVԖVaDy&3&Y;ÀmۙCX={%Br902S!!!?pq4 6.L־V[@kL=e~w6ƶwլqMRP\u"Nt  ӏx* f,WgӻD&ы/ m|nѕ$t^ Q8|7s9&}~ШM<0fZ9W`M cfLs 7\\dƝ BNqH|K$F!PSJ.TH/PURg/ű)+=ogneЎ!eGa &c~6-,Lj 5vS(r\OPn+N+Qv^eJrwjM%$N܂¸?Kc8/lA|P<@yW.# CKA_᥾cLK=# bṮ}fr+Pi_2M~8O6<<n{>! '.仈}!cWdD'㝯#I*i`$ Bx.AN8=Ryhm,ArqXOn_&y>Iho"USn. JY!PŹf! OiG"fBOwc[cmNM X495mԻ&ȂM}ȈؚvHۇ\:^ȤČFڡ!@IWN79M^,\@OO!MnlC"JBeG(=_+,=+6]hFh\38{Fjz5Fw xEpmØ_b74(}F߅R @!@dof|Tf3$^WM Z$ aKUB@!+ő`[[ S/\3+qs NǍuĘV©3s*pt6$#gnơ!Ұx!wq("B j?l4Y! B@-"! @IDATB@1~|B@! u׭)B@!Ph [&,9V:&B@!oD ! B@Yu­%00ӦM'I:&B@!P? q>(*gLzYmjB@! 8Sx'u ! B@FnˆC ! @] ` nBWB@3" ! : L8wSWCbBv q>-o xm4\|+ >DNs9tP*ML2g# B@! @UdH0Y! Bq^εT! B 8$B@! ?Dןs-=B@! q"}IB@! :3[K9eS! G9nX6IL@[v^DiF ! @uLdYB@Ĺ)iB@TQ-\&ia 7]Zqh!1Ih=k8TЊC_?aH#R+~b:܍hhTB@! a­;.X`^< kP}n={&|!L"F%Y'NtH܌'o:¼t7x3$! B0qa3ܭ_?s z!C,>y}$P8m9ԑLu7rP;|gZ ǓwwCU!mC(%TVIB@! #` j^]]w8s?۟ƪ'pc;EHd$^ު}1>>Ut H0*ч@хx?XbH ܀VNòPZ˩y/?--FH(_o_×|ϽxyA8Fg MAr7e{!4-"/3RkJ|I?2-B=)|[{z/~  s4 Y!z67`hI>9j&`1|kqbP,9yOvqؾ7&OŘ_Dq0ۑ#-l7D .i_#◹ 7e,|>>ǨFf\7MS҂7~U'd+B@)"Ӱa^H^Eӣ>;cMxcdl,,& !5y1zSG#|Pe< 3hTO^ 7#BZz+9m1 AH)6/ٍVYПXq|97i@!0kCVms7a aE6!!)S4>wD/]F.z~]nF*}3 h{-u64@tBd;O|Ǵ&! B0!KpNۀPd2޺B@$7˝+ loйs]C~B؇i8x靅Xֲ:;3m3Zau r"H)[c0 A&NzYX6 G# J슁–i=5Ht s-%Ym1kn:mErS@VFP3(qл'DM! Bᬍ:P10a=v $>ʂq,lRʖ ;"/tlI{hwyҳS /&[!}h7*wd!}3c2-B7 XgAV Ǎw™7Ah- y8;"pkݞ""6RwiW՞8=xF/B@ 8pn2죨ifSއ {#xVhF<6^Ym,)e=LF<-# ?Fc`JZS4rn6^& r>$'GƲ%[q}_AAL'^쇣;6aæؼq1lX٭oSxC&u/Y#! ?PkY#Gc0ӵ:-Zw@^e9Ue*ZB7b=6 mJsƤ@>I ،`=8ZPBIHk}uC˲zTb&reLk"7=c Fvb^{i2 Fƾɾp_b$`4'Rv ^~7a;V.lUvk#x5ET &7̝i""(ۢ45Fϒrp;/O'#o dAK?&M73Fza?cV""8pPx-o=Zq/*GB@?"Vm 8(=#{6caȽʎ*?ue_HʾC(=k=kV--Wc𣏠Jk򖵋뢶܇V`қpgpKЩ,лL?ѻNTS+4' dmZA(9Nn}0 h"Mn;v㈢ 6Pz9_+ KBwu} |~AKd?lZ}h?y<8 +:`` DjWR w3dB=vҥ؎}1eY= #]s(Z 5(.o'asJL+n]%͑a3@t/ކ[O!k‘ҡ;z=7ZOߚE<gvb1>yH&F?z/ox(;0-M*\7X1yfGʚUzK%V/--0t΄q_8t*&пkN܊s`[In^Z`Nh ywzplrrK0jm<YVG/fe*n`Vϵ%"ZS1~b"ֈ4FX _ Fb8/=~l$tDϤ8w9nM/tP Gêc0w[$7vV^tN1y+BӺ I&B0<714n27/cr9>E^"^Ѥ: ]?"`A-c-m2fߊ?|kǽ!Iޖ5"/X_l]khU^QTaҰxG sṮ}fr=3T_xc! _B3^H ?g7s+O©?"zG<&B)ƙk/Il>J6hru38 [֡'47t&?-Yu';wűd/h2 ZtH>;N*9:\#{FEku@@$PQ7鷽r߃3wvtj(oO4\:L`oJ} xX\Eϣ(a-^z  *Sk9BBO2rWI'*v䚅gD]&{',%~FAM!=_L:{+^Iòr{yo׽*'U%!PS5v=-bF5G>hiBy{ ƒY$1p>c@ǞMڠA$mEڪ!F[CwOcǂrh3{aV.C&Z%GH wANL)R=1" bbe8rhK4Gka$0twĹ^(q~u4 ]!P8}nd>wbr6J =nqvmq$6ǣ Ƿpigd䗋<#[Y?wPh4Ll@ϼ.4 MBaya+lo5]|ֵWPMӊ9YǣBB=.uHBrv:+h)s+[I!sm5yȠ6-,LjǠh!-2ѺQ= V,;v4_nx'&2ȖYL 5JH^=*! @E! 9w>t=5sm z3+v>8K`EIxȻKnǂ䆓}:AkzsMokiF{j&rr)kg,V7 V{qN[Ck;x ݁elocX/9KgS"W9XyPr?{h-q݉N15.4 1|yym٩ 0fic]C{Z@z|l 4gz̑h}]&wmhߍ+iM'⣉ ٱ(=s ާLx,VG3b*ǭD݀W>=k=&<>/y? SxyO.^Wt~DWB@zG<31ۯҢ.֌_+,=+6] q~C]2Ě`F huXuقdh9f,- @Tj' 5+$^TV;?\HKg3`/^M.K#ĈB@$p,*lXyua%5Ÿ "/^!k8uTg ͤUn?EA[aLaP}#-ɋu/*\/ W_ZZE/ 4FEX}m8RPgx8PPw !q j 쌰#E_ȼ ::?5ss}F P@T gx=,.{N܈Y.]<<:`܄ ><FxřXu.7_}&+>sr3:Lb/J ho/&{_%Nj.w{pKb Iayi;NѤG0wҾxn\Ongo&Νs08&ʔȠoq8s+LI PX  lů'4 y烆B }x he+w{GK>Ŋxnsԩ]gLtq-Hj[FSxbX/EsOϞo?]2ѵ8 lZD۳?bϱWKՑ뻏K6DƎ\Ǫ9Ɣ/'{AZ"Xqÿ ~ h/yŗο̨Q]T1OQ|xf='tUn Pxp-u7 <}}j6l%yzsUl:B\d\pI~YaB''7 ~w-vV׻J8wHUǰ8y?u %ѯQAѣWu"Q'auFͻQ:{\5\$[l1Q(@ Ԩw{o<ט. xOn $9``=Oo™+:H][0g1?;q"kYuxdvFy354DQ;Υ9Ilwi\NzkX9M:BC=>t]5|/tu #0w (@ ԔCèucbcOмԵ[$V21 n>Aj[dU j?i߾CwkV^J6m-w39u- jM\88֓3N]R RI~U-|Ƕ 08N1\Ӵ-VR5 (@ P%`?/,;141;n")U5nLJDf9jwh% 7'2|o4E|m.GMdNQdd\w<7vT]Y ިWAg7ʱdK4A{;(`"|4qI PM4vķK(uj ŦaKu<ŦKO?Wu<;Zw#8u3$E΁;ee Nmn(-uib) 9sX єU֢pEmg9>`J8؊K;u(d(@ P;4;}]Sp`uG͚p/6 |;AfbZ l}-6oݍ " Dָ'r":X$T~ê 4 oqEV?(|63,ۼ`ix{%H"i6 抟2 DK[z2(@ P0997.c(@ P*`>#!}o3+y$w)$Hi^Sճ^ PlnH*3n4Me=mb>bK(P `ɹ,+_3 ?8+q!MvJ 0w]0;HLvΈ>9Hξx%+_StšyK[JEd`A״d,wkn Sbu0ADJճ(D{x3bxգd͙1ԈySwQ%3lf]>(@{Ñ\|۩FXcoL aÆA;^H[ W\7ު0@Biؔ$C&ʼnyARk{nRHrE){G0yWS"{bA5HFDa=M.E(^$X7]ň ̩S}-ElDc'Ϗ-"7MBD=AsQ:cbC5K戗cJŗ;5##r>Y5}-߷|E wT0VvsBɛX =HkTn_ 3+'PgpCE&祜s32E,Gd'`cc9W&CĺN"ef4n`D^7m uZ&dѼ.N;:8ʠHN@/xm]-^j:rMl66~IQUZ|Ш7g}, 3!lt-8:7N~Zf PԸuoyO?}k^µKyW0sYa~N -]DIƈrhƔ`nw n/u P` *Xn:U`+ _%v1!)aS"ׯ"h=-<@ FR=L!z--n~^xE|u(͂Ӆ.qbTC]q(*-~`2N؁LqK oJJ;Rē jv[^<ׂcPS'1ż$Fnr/lA螘RM9f }0L[b K>giMÑGl^^Ja\.5NC޿k;p-,픘{^ؚQ8q5' trsq;K$ ^R9qh,U$( ]k݉E!kY2j>*^htW@\1 Sr30*(5XѤx-="zcF2عM&M\5r2ұp >.hUW%C& 9GU|..c'b;8O|ذFnN@tuaM-wvûRJ֞9h.MiU!n* gZc9{57aoS_Uؽb9ZVsYb'ۚ 5_ghlN1j5qZ|P%Ω*tFJ1 S'Hptk0 @IfJ7s)H,H@Hhb~yz^VX@՞82Y{oe=EG&BXYO}kuP~\j7ǖ9qUw⯷%qŇ]P[E%%'|#x+>{OrqV"4Z]px\]"mO8+' \{JR!:|ۤ ˓ùn t{9Za5 SN-|h_kX({Q7Wp ;KPmDl3~p G+>1T$ĝpճƙ{> AAoϢ i}W"T$8}+ ?$)[`ށ.v$03L|վ2"* [ގӫ/`Ԓ%?gTj/tvtDeW-^^q[SX+xhB P"PKnsiA Ha1r^-N.vI;6!-^&o] >,H*q"Y]ŞWqWsK%`P?/H̵^TxA%μ' fxsڏ>>.:d^Ez5 !.q? j?"&Ц ګ.-Z!ߞ祸y V0뭘WQH\(@3y2yTeC l'ĜlU"؁y+ \KB_ЯJMKh&D]ܺh͊\:bKZRqsR%b L M9ʺh`V^Ņ|0bGF}޸tM nEC n[BX*WoHADn(@`rnv]Q}aײ+Ŋ}Y< m]DvI|Y8ߞʽ "Ni /}DL-)MʕN'6Ea~,OiО &n*:¡D/'젻*Pt7ܻ^zؾX*uo}B P)<@ +nr b;qT5/v(sI#Lz{ gɰDV2W+YTf?W^6瞹ɚ=ډT'C t@@(Dՙ_T>(@KxWKc)`:RW#lU'zl'8«%_MMJ].0A}Wl7<4#W:nYqR|B<iuVČD$fί-kGR1 097^aLr [<]&G9ڸ_<& m/vRg^Ÿ3-9ȸw(tQ5uT(|;`]{xobŢѽq*E1BU^J4:7G Ţ(@ Pz8zYjN&Cz!SUվ$1GlGS=1g}`_/Mv3sU*&l]-=el_|5u>9m|Ըsb3%Af6haz} -scFǺ7$gwʃ17+*<WNk[L9s%\;Ȿ{aoK(@`r^=)Pm ԋ?Ǎ6ɣ/Τn>y&bfƲm?}bVG  h wϙo,5/RN$玭'`05llܥu_<~xr2-pb,߸fJ4FobNLl!kƅ_Er΅KwBM/w5@9Sy`J}TodYj^B\w.rΪ+ C P~99|E|9Fgιzzn:.&7E[&Ss(`;%JTh,PMsD9<z"L$~eA`"O,n96oXբ[xw3^]}nln{wHT1Xu, Rjp b0&M5Z_cLX(@ qvv <@|6q4(P5s^5,(@ P- Y(@ PFy8 P(@ <&d(@ P&UR(@я",7>0(@ P@W࡯"/H WtKzXݽѱ3j$VZ&cȄ-:Mp7wI/%#^qո<:ޚŜ2 $snIMyb֫I`)`^ Z_~5(iNtޱzơVj0V]Z+]d9ӱ뿳U_z^xZz?_1y{k͛aލ4֍=))&9åe 9 0n4a,=obw+"C?Żr&>SL=[ǰFa&/vɹc>t2gul K߉OG19ZL͹yL51h Jy-^CĨUH3I~YcĞ/E P5,Zj8~VO G@-rq@Lz<=w>]?e梙8'X~a.I@Gb9w >jh{{m8oNaJH 4/yJJT>vK~ @ @u_}s.%VD8-5N|[^WSŇq(PM|W&hVCox#"}.cɼoWC WF@#Em S"C]LS[q GCE܂Œ{ n aNDP _(1*Ú4po >qQb=B5o.ypߜ(`<97`$0.<>=fzXUU\؁F(;.Fmx/uin3;q);ZfqVp} C-]D#(}]ZCr D Q&4b^EN x5slƧfPN(^}_TL P5(wgNEKNi)-m5NȐmԦ:t.KEӮ=(@ T@eڪ-(VD H@OOڈƯӗluJ!N;[3u#Qe%"1SOƬSuh%.~~yI\;WQawFd{v@$=8jO($,.'K+g!S?* fcm`}>Ei7QQom & M(L&.x'|뭻XG9ĊE#n.TqxX93mHsҳKhjn͔Pۈ $jgISߵ$pp)x9ROwwQI,[s䉧Y + *yhQDW&/SxOL q+(@ ԈagA-BLZ3Ln)^ԗ0혆ѯ?"R|kXyYN^9 TP"8R`5: mmq碕XsENQ"d{(>#\:}1\k_鯾.<;D;'?+xt*CʶTK P0søT Į)<ҋԡ^]=>x2 ^6bE^c?YQߺ(/`\x(bP,(up4Fks[5v{89S[w6C0듏~ƁX<\K!^3=tS錒(@ PZ$jTk,XL|c (`01M9\Ikx /Naƞ K(@ P 䟝e 7&k״|R}D3~I~ >E`rn\Q54__Λ̟_5e &!ɿL)FϘ)P5L̫ƑPWtVu3NEXj\@3ZxV^Cr X3|BfS^=eKq ~/ws?mL'Y9(@ P+tS(@ s3P6(@ Ptn1r P(@3`rnfP(@ s;FN P(`fLͬC P(@`rn})@ PLɹu(C P(`LM9(@ P 097es(@ PLWɹ#(@ P03&f֡l(@ P 097ݾc(@ Pf&:͡(@ P0]&w(@ PY9(@ P+tS(@ s3P6(_% (@ PdLW1P PR_|{PP(@`r^0e?`(@*`r^Š,0xy(Fà(@ /|#A P(@j`r^-̬(@ P 09/߈{P(@ PZW 3+(@ P@L7(@ P&J(@ P(P(@ PEy0 P(@ /|#A P(@j`r^-̬(@ P 09/߈{P(@ PZW 3+(@ P@L7(@ P&J(@ P(P(@ PEy0 P(@ /|#A P(@jUK-f.BJVҴPB.")O ތKa2iNh hf|%L !cޝH9uȢp&NJ(KyY2NJ |J -t7t%,R~9"$5{cb 6\mYj%YQV?Zo&ImK&ny*+(`ֲ f`hu!͎]Ib[ nNH>yKdRtoףy:BD P0G ")@PjtSvcf|6f(j3HCϛq( ]s!f;XK!HV_ sT H=˽uJ1%^cPAy5 P^u0=PP5RoE#HOB(ΜJ#>x f<_< d]|㋆Zrrrp]̌~~Pqc P 09P9G P@k[k&@JT /XV 7h~,b>eA螘RM9f }0F[$+s_oჸK\>g"C Pr9rRH ^@[;p-,JɷZBAwtGDSom8;TﬖbJj䪡*(@ P`rnWJ X@6Xr47D򜓑SqA+k+x,2T vrr:NgsЭ%*MArx^Y9'nd:U@Ʌ(`pNk181+ " N})Y{oe7 m*o׆r#`f[@[mR>K| ?jCy*8YNkYO)|)@ P`rnTI < +zpO?tvGO51wzxSZ[^Ho{p(`xNk11k(@ P@W;Q(@ PL o(@ P(P!&bN(@ P0s P(@ THy(@ P /Ƭ(@ P`r^!&D P(@ 0971k(@ P@W;Q(@ PL o(@ P(P!YN $n (16a(@S`rn*=8)` LZDb)@ PTFR(@ PL ˢ)@ P(P&(@ P0sh P(@ TFye/(@ P (܀,(@ P+})`L^<ȣdx5)0wjzM P09/OS)p#""L)lZ)"׬X(@ <<(`2RܛL_1P PjmS(@ sc B P(`L-x P(@c`rnLX(@ P,ZɹEw?O P(`LL΍7 (@ PE 09g)@ PI7!2`,\X5ݚ1p*UqI|=[4SA^}FvI/[IYPk#I;tN|i>b(9P9G3ȻKdڢBa@V7uGs,NePJg_Y-™,CKl%Pu#gGVϋم~؋e0֐g"Eb[4 4yHtIy0(@*`r^Š,#v7y|C{LTEVsdVm܏3)ȅvAh} :ۿ`=8"g9”7[#uj~b5#ܢt;,\ٴUJ4:&pphc#JgѠ|&ɋc0L?\|.`Vfi߱-d@xrd7ԓ>0G<^ +A-!7k?փ;]V9(6ZbuMcnc7m(E©X~_KaB&߯4q(@(}d@)@PBV'ü˱[]KNrp )CzbX``&5\6 JB_( +~HfP_qnA>͉-q\Nx+*@sGNBڣkC{[dhHXAwg;mo^cǓoqB$S0a3O7xT\E PF)EP': azKΫ;DH1i(9-=qq];'`tƝ/(B:"V;.F㩞-ѰnxyR\=BP' 6Fŗ1%?_$*+)-%UH|` daJ!hUR˙ne_K;';uC?Z*5(@#+v C@Hl1MaA Z4D#1:!Խ9ѓ,M` :ҰC O8D!rss5NBlS<=FvخVpK1(P[4P_RJ%H< E|ʼnTmzquap^3I_z. J@Y.'q6E&0*HC]zb/ddL\+Vl9u P0zNk1.b>CS ˏⲨVS/2t {؉EK.Hk%^ЧvѫnyȲmumN]PWˇ`wjc4s؆e4M /-um a8<\tܽx Qrmg+T0Mn2i lzsnGE-{7^M,>l>uqZH7<+Iu NbCeؾ=aE2 / D߆w_Ky_S0}So [@ <893-MOcVۃ_(Fep-=f z5q*1ZjA2=v%v^+9]QyM\X?-K-cN sV;q;.h⥰qGGg"y-Rrq붸R.m1_.ko QPOܲHs &,5*1nCAX<]Eqb/8(&çLxVm؍K--: P0qZ,&O6mNǡJeKQXD΢[U0ݞ4+"WU ۓ>]X_ Ix aE\ /L P(@ THy(@ P /Ƭ(@ P0jw(PcF S pz(@ P0Y&&u (@ P[=(@ P&+dS(@ ssQ(@ Pdl1p P(@sͭGXB.#22Ңx P 097՞c# C^*=r Hd֖NS0z&FE (l*w(@ Ph8hP(@ XsK `)@ PFɹt(@ Pt9߬V-¬T$ZUF PjW'K@ DDDx 4T'Abb|1" P(JW(@ P(PLk֟S(@ PHyW(@ P(PLk֟S(@ PHyW(@ P(PLk֟S(@ PHyW(@ P(PLk֟S(@ PH7!* (P,[7i֤pCԥ5t_B Bω[broo短CvLv1'k8tn%eA%[;F@vޣ=)T\r$h#+me3?^h8)MG P}4|0%&NK6u.}$l @ؘ08  Gc{%jBx'7 w.C;%SBRf-= ه F+w $I85 uQ5}e>^"XYN Pb[LWx;N%F`x{=KI^.H̕2n4j['=#q.ނEड_ /VA=YVX8Ȃc/rd;.FhU1yHtIyUSK(@(ˤ`)@B OO>CQ6 W6meF>N9R841O T]n#\xIgyi'RdP?gRҡh{n[ջN#&K=OsrO;|H8k1}ArMΟ3>x6nC-CbİNN/buMSoc7m_O(@ PxO_0 ʺ]/"P4,ծ`3rn4=6,k w rds *&3JǐaakMHnEƆhs,< ȉŋ!LkW#įPuBiFbYyf"FtD&vgE1S#ỿbzx7?3y0A{& (@`rnVR/Fq'wT:y.2%/1yq3Y<:3+'oܵ;@hAt)DҝնdYrWOy^j m&m/jĦ \nEj?01/G,TɹvIw|mo޹sO:9/h^fgU}'9*K7lч ̄]uJ£*(#hTAe>6L}ggf,,b6nLSy+Ju)Ѻ 'U>՝ˮm3OuˢBeF փJavn.Sh}hj\Nכ @ МGxIhSٟ_QY E%MҢѴPB2/?׶ҷ"xbRsuE2KM;P7wڵR]Q*q =ۡ*QU_H}eWzvKyT;69̾S?QnטNӜ۾% rF(e*}%g2qpGl#!0?}~NjdɒchP|Zg=w͚5Zz}r%)=G/Y3<#U}U+Kcsc?D׾?]c lTV+Ox4潱@ !"pzS]Z٪ySyD;^G4Y2* Mn'_K=Vjkrip $A@@ \J1ʜ x<ߦ Q>̃ #@s> [{apxY!$@sA$tav @)sk֍@@"P,*)W R@{}|PYoHHt?;@C<<@{fLaXM )$i +dɒ޻|FO*> 9Ϻ  hmXtRF@O Q! P܆E'e@@9Ϻ  hmXtRF@O Q! P܆E'e@@M³.D|:]G8\?f|۳NU6'2.O,s')zVu9 : D#ݿn¥otg6u,}ich_T;Q;*ǾK@#΀' \ 9@@Uijy.N^XwotC֗?T!myFeZV6O nE(@R!X@rEKc.U~yG$K%j2mkjۿYN? ݽ[5`NFiDS stçnTow]I? {ؼaԟZb9Sܼ\.?u.-Z:sEg&_^;?sG闇q?s顫4/rq@ М;Pdͼy;t|=JҌdu~/OMY WJQtjCilT,+85u^e=\}٘nrTJJ&wtYkڰ ml͕۳|gLOLwiϗ5ԣzx@9GFA=4]y׷tT C)ЪwҴ4)rz5BK't/yikhL{ױpL+~̗$MS/gfoe˙; @]561  0\.n^ڬw>b9j*msf*%ꚬ=UxL]w?sl@{6koJੜ.֬4OouLT꯮9*~zw >| ԥ w@9J 0J&Unۨ Ig/q)c,z:bg*}|8'hڜBeywߦ~w|./=\ @Tqn6# *@s>( ;@Uw*`gkc:OL-͟e~ڵR]Q*1\_B7JW_y*j~ Uj;0N=|DZU_}o4͹Z g.;}7JE*,J_}Iz@9L@`ne/G=l;O uЙ]ZSZtS{)ӳg;wDZbbwK)~RA7g|)-8wOV=~G5 sN@r]D׾?]c׽Z)~d,/?{ \8f/=gFpzS]Z٪ySyD;^G4Y2*0#]@D ~Z`V^/ϣl֝/re#0Or d2@! @ <+D|  9MI@@ hýBć  `s۔D@@]<+D|  j-)5Ah͚5vHG#Fs@P МRs!07', oIӘ;b,:A#v9S5Qr$F~d @ Tlʆ М'! |qIjl .( {Ri颜:tϭWkwwT\*$Ϳ/"Y|,W}X~ŤTi%ʌ3h8df[wQM_c]r[TUCF+kJdC@`PxC Йu٧ G5=Uh5~tBW]v4~%ьӴHWvo<^RZ!M8ZC5TK_XU7ܣ={f< orq|3K '@s~5!"pd<}.]7S=iq^wT^[K|NSNhyꕖުS/}iK5mrG4S~ʛT\wڃ ʣ[~pF(|sgc]@F,?^GEKe%HQ)p(>^nL*,NsHy)MH;nt]Scr5?;=(? @L1d K/FMSnhkf'e)={rC;>@ȼ8ZzǺ\f=5>J,||[`Ot8}awTfƼ{9k;c`!KGl%@snrx |9nc!=-(w t)!V!xYS54v)9]qj3Q;cXUnngi=]@Ư@`-5TUY.6nM`HRGTKM/90iX3=-! +x{\H(gf:?Um=X)V]ڴ~՟}g?J˩1W*@ZAׂZ駯s!87 @ %jJFDY2=TxLTvIs(U w3nQIDATM,ئ |W]p]f>u@1@O` 3/ FF@@4vi~yFG'\@4֯! `I'VPmrsɣyr@F$($F@@4;Y# y@@)@snϺ5  @ МaQ @@4;Y# y@@)@snϺ5  @ МaQ @@4;Y# y@@)@snϺ5  @ МaQ @@n{M `7 bs576@ 1-W0@ /ߚ JT͍mhϖ@@kG  22  #YiIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.graffle0000644000175000017500000001664000000000000030442 0ustar00coreycorey00000000000000]msȑ8+%H^o+$!d'zR$A"hPު$bf_^pg$)i;i7/>ߟݻŻ> ޽==vw_? 叻777a[7de_͞awcʻO ?&'?<i'kw/wx1YĞ~ug'([~,|)ʲAW G,K._&A3>h|wtrWy }t]Ż/'~wytn o1]vNp@F)mFZ%wcyh33o f{*]޾M8Q[Nyo~_\^E7a~r88ٷ4mK94Xb}%e2b'r#]]zY0{e[ܶ5V2jE*P8 ̏(U͌$ͪ<&ʢjW}=D'E _/g:Gm*\]Ao'paxGqNbON_/L\GM잉lq(IVnSdb9e=*LS@R69:4޾o&x4H?oe/ξiyI T_Tᜱjp]2e cJQϳڞ2Y}O% :&{N-GfLT|ܻB-2~O4e .Y?˂K7wG_.aMofwMmN/tp>)\;i'%d=)H~;>eǧvΣl,?;a?gq+u|qrһgqsG8>qEigme7h"\DʢbEh(hM}WY;yOR*{Ósʃ8GQ/؏4 |gUo~Ҁ zZ;W$IeJHHI>!M$pdB rxy!N ?*fW)` EB8\ON[ ]r{o44 1"%8 $2o`0*{"o 5]^yP"v^+pNQHaq%7qGF!jb!Cdb6&RD܄ M-n!7`A>B&7LHnBcjym =HIPJ5[$qPbZ*:y;W\! O|BX!֟Iݚuʁ+ىd eudvQN=\:'WaY{1$JWj礓KUY!*((#J L$NU*!&IQF3ƧDLDLDL7uhyHa8j5!nD%ԬlF9 ȩLUJNRXcrNÅJkk}{D 3`pia*V15>fE( Wm^ZePUR3N$9Nh6JZRGLk[cZݔi`>8wjY DDӶS$7OQ{\eȧ|6ۧcQ[+~"- F2Q' \u5 95nDFwe1"1V;.˹:ޒzs^u7ж.̖|(gcl"fʩ ]ޜrԩVMJ=ԩ"00jNtJk 4OmpU2cL3`E1PP6Umpᒒ88Äߦ ׊/=Ǵ+8nWDk6Js% Ck;C*-#CylWIi~>6VLGT˜"@POW G1Q R k3ۃVJd]P]ƴλ:ۆQX^4$od$nyA(FZjF'DU]JW9U5EW %H5ΨI4VZKTALDLDL<Hq7jjD0'pKkK&uxn۩5O6>%"8"G@p=x~[m%m "J#J#J/J!P tp?C:l6P- mNmN:LI+s 9%tD}D}D}+PzϐBG@X'U  PCdb%ix2Ho B\kf4k$4}G~0?t :Ht=@=h+}a2BZuF3gBU1OKZ%#K1cRfaI LQsJLfbS6wYt:GFFܶ) `Hք8t__AȴSpC%7PPb`mֻ5wZ{CPgx&owQDyg5ʵ]GIl"@jٱ5$7m;=g0Z}O 4.#NDbe.t+ץc:$HImVf.jP Xc9S6e S,y{4.fǼ3@#g}ܧGqu<:~rxЦ0VlD˩T?lz`#w;R걙OG5i[td #kC?t8*>pT7?gy JI@ 16]dfyEq uPsƒե!.du! Y]ݮKT pB;eptm%2_[R w s?P䖈s#_߉3OY)F@82PF*#hEx2%_.JSi(-N :3?]TpIЅ#0I NHˡHU&d.@[+{{/sA˜dIŔ vm{:ЮTA E(-2,wu^Gbŷn~ITI 2D>k o:JzIKM~/TK'U^=:8' 6C;`pJޖ&f{۠r!]*ƃwiS|۱>&ZL<=ۭU\Z6|"FgK4[_~H:(n]yK7k|fQjK/[f˚zI',7I|sm-pmz_{"T1$?'W l_=qO'Ui+6x4n ƻzq]?p=Y<`KR7Ӽ3نNE pH¿?4~5G7V>cg?J#~fUhb/y| q?,A@ѣָdǦ> h"(Äw~4ʧw [x YޤYpIfA>^+/z}ɿۣ{a N~ks~;?y󱷇_V>~}{G͟n^eŧLGE?@U0ۗ?j g\S*T3f}n QJA}WVBhCA1cQ!x_#5!C/s)|H/qi,/^~Qd rG׻01Y `ӊ9:3:#$E—Q:L -e ȋax)2^ a_xlhXįy- &}Ԭ˫,Ù(frVf_ nCM6TE|:-aL(v0ezgWëޘg+:&.C&OG-zɛo惘"Ԑ)(|U]+P1e,:5c f~KӋ*^ +}>Fjɻfs&ڈ)kfh;'E././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.png0000644000175000017500000064774400000000000027637 0ustar00coreycorey00000000000000PNG  IHDR4EF(sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgRi"A} {{}%X \$+؀^^ao *ذ#* -=l\\g?g3;3ϴ&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L t Ņ@' 0:CW]/Qn( 1u-o(kU(0o񖯹K4'Ra'#hOzVu_`ªhbRVע`h; J+Elmv4FJhacG7kŚ]q^D{]AAoJvMOq76$cWnkgluۍg4 :Ҥ/+ |! 4&m%T~so+ML=*EEOor_vX2mZ-Vog&;vK;iҟkMu\=1|p- _3Hj,s  2tgS {McCh2_t*ܵPYKV1ŸW%,֤xNS N ^1U>B)# GAsPQfMJ]iutCѫ,qm pv<ıω/c/V-ͷ9'J.LMS#>{6MP!xn.D?rDNp%'ҽMuRj){W1;mh|=22D#pW l)ӳH4"fJl!q R7J<+XC}9섑VGazTr}xNL]-eL=J,cBT3MH+*?9Cɜ:b< lKGe,ľv/C̠t<m'zUA~:(>R۹h(V|hzLS]Wy"F{a@IY8'/Έlz'ܜX4mr}wn( d:73wEy .2%#4 0,V"/IS&:B&N$})5q33PXΝiJ[ 071=Np=kȌ5?#ѤYx6i$sv[Lk(-oqʽ^:V`<Ѥ׌(K\DN3e @B %Z#KTǏ/GUF^J$ 85xl@^愍 Zz>Ch &_cw9Q+]<ZwL(F35͗;;pZشJ=kys=2pKWKYЫ6Fp#*cr;R4y=99͞"WOij Tߟ]!F#c5 2rT e{륧q;\x+7zzL#' A fx9y{O3ALn7q<*0eg0S/wVyk{물I;nμKs: c\ߊ}ם!In%) /0r)3 %|Iqw+MLA^B%Jh OױjVd`{q*494eH=]='qGb+wmQhwaTP#Mw̝3¥{6fnC0aN"os)4̍&>'_ tL1lob$BÑ'{#s&F4ڂ|ޫKPGt1 g*dy2yCbu uW!gq<.x|V j XF&+aPwT'3h\okϢj>+InCI(1Kڡ{ әUR<7{KMC! F1ZDB#GnZ[hTXh-f7,%Iًs c%u8*/a7JK܈knkR܄/(n´~;,%{&)w܂9Pf5Cgӹn~5ҟwc7v 'G#7<|85!oDڡA&) "?i!qJaZia_`>ח4e4n (NO!{mF(0dF !AO_N*yfXyqr5ؚkA,4G1QTZ#X"W#}|w09tzCȗ„5͍gA4JXrQ$d#%Vhv'7͡{&̛B|LPbG4&isZa0N[xST:֋G#^w77nXvd bPQnF|1nͿ1_*!ŧ- V)<7Άěݻ'?{(Ü yjR1;lL=2 3Ϻt+Mp}CM=9Kgz)ak^8bV_C(ϚJ,FzHEz, d9iwCiPv ijI#O/y-+mγhPo^K/ ;I/6z0lӈ,.pFTБj} yq Fzf#-ŹvGDRJ=RG av|92G#qJ:<ͿFYwd{<49gͫ=E(gԹ(c؛, Vs{qT?f`%*wd9n1uDu@֗Q@Ir[ڍ)kK9mSW^nTS۶k^)&1 LX}̧gM=O%z(QE̐?PfrЈwO 2CțPuJԡd."[jhKE <.u+3dq]ݴסGeAcDpC/ۋ$Ep7hrN2Ca6`XЭ̐U{&{k8mV8^c,<1w~CrBF{̭̐Y%8-kt8N%4XVf9@|*ohF)x_/Tƭ~GAQ beaįC=[8mƘU#x~@<սȞ.đ,Wn͹&qsB?& /дbiqtDz8De>1Ǚ֢Ŕ {^([+hKaH偛>?%w@r!QuQ 5h+cG6]k^o^>a93X##+s猊WZitHsJ> S=> Xµj- u'w^B̈]1d[>{F {Sm3KU)i }^p:ᄵDHqq{~ek]A<nZZôK;e]i ) Ҽ¢Pk gt`nOt}) [ |8VtVpô;]D1g:\ʨe]]w{a]&L%Y4dА/zC̾n[=1F|&(1Sp~%?-Թ{HCÝzH`@ HJV2 QI5gR1âTZ ݡ=1 m!/c5%KςRs|/M'iޘ-=+ע&YM\Pw55nPCHѪ=Hq%gAQܟKPy e Se*s4FEF;϶!tF._t9˲'h?4oPRW6yyPH҇:=?y,Y*z?J)̑DcIvQ5|٫?rϱ+b'4B!O鶛ּZ3hp;Lߴ,ڭ{rpgS@eT? Ppq8bK༢}ݛ\p[Եy ȦFZ ?5/U5$ J)ѷP&%Yc ;KPʼ}CKgo9^ !'ipӽbz66%n3nOJΏ$)6{=Ϛ4/zMvz# "0r&#Sĝcg&ОBӞRemWӳΉX-Ms[-9)8HMwi}O؜bh1|{C|Q"GZkkj?iiXɁlu{$yu?0gjyb OVilUQfIFZݟhV:ܐ]hPdrNg[6W tCSG<;M" EFݴ$>fM;F6>BC2,Bǔ:LfLGç70CŒ[fPG֟SvVg k݇6&bn x|IgR:MB3y`Ryw받]m dԺnDxDulXI^(RC23&HXI`Y:n*ozh`mnBbL.+ +wJRe} ZG\O5=7mԩw֎c$IFNj렑 ?0z̬ԫfڏagDٶͷ^v*Ԏ.t@HB=QRLPeؿJe{9L~d :}|~FtOJ|(_KVntM]ăpk+4J{ל0i E!<;gf8x'd WBS W;uy)km]ֹy7Se͜Hr=Gux7$0SF]h P{stG]yF:[7vEQ vKiC΋8})`42)sU瑾q1jiMv L5ZѺPF/c+4%XOWjVϴHEP >K4̛8 QhL w.^HXG2 TS4 zko4k؟ |f{4 "fp@Y'ROQ䠔΁x_`ªkC;hr:5&C<(ԌYAC,+kluCrXy17(/PNsFhZyt m]^k/h=qP6lTqxqt6& )vn(H{nv: -c%_4:ۼef;'83ԑq,ԇkL¦52,~BDSn4u:N c-Qs܆ @;# >H xY@?ˤ"FUbDh,Ι$_hlчSk(BK[ۯ c!@ 4h*|#SW>X|6O"p6B4Gg9 |Oe&z)4T!s0la4-L=ƍ׃&5ibmsbhXg xd]<+YNs̠-V{(mlY^\^eciAi}"\EFvnm' ^/O: 1_zL$d3k>╊τWG .Κ Qqu l $(Vh4aXNK^|u4AkVf0\b2ؽ 4O҂:6ӗÔhT=gU=g!EZC]c}b~^! T2sM_/7GדFT7m\;ɯpa34( R$;q_Z*^z5Zpw슑)n48790P{LCzڃVJ6q`ΎEnh61]m$si6fI8lI{J =<~$slU=b*lf}3GЈ?y(zӵw獝Oc4p`E'kF+cIh=ih`{L Q ĥc@!/"|ݎ2v݁uŜ0GSl4.FáaU>y{q4E|΄"tqvqm1vm6^(10~W4:Rq Un(%rH2Seyy3V4aMq2xFmz4['@o9ЊuO HTE>zcbQqA\A\?y6U!kVsdvmû~8<2e&5PqH2JUGv595?sK%30$5Ey *ql4蹿#Ј h 54}?됦GCY|5t$cIAh`·ne8S`Jք ==tCϱW5)l1X\P~f:VEF!HRή1x ([3nA|K7`)kRsY"nu:~QRawB?d啾3J'F>̾S]'%asi>x ?~Iq] k(QO7#7g[񻢍7 SղE6w3d q@nц@"@`Hmuі_fOD![X`кc&g柍FSQ/mmj>&.~ޝ4|xi=jN8hF桠87붜EqZ-_`t ]jryִYb\fnkdh;o,KxMCEM `zeNޤEQ;fL 0V&SZ8@ dgr(5r9Qm_ 5.`/Ad`m0@5Fۉ>hD a׷3c&nLX EczgO.MJkZ#n`+47m9fLS6ر]D:5Y#P ZieXW`Ǽ]XF"Igq`Z6ϸ sˡ.3[j։`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&4<&Q&5|[FnLJsZѻudX!0"'GUޗ0S9~JoG#GYK2s ؓ+?ͲXܱ]&@ hcL ē=3sʬ5UB- W3,K@@zA(&n x2IJM+7~ փƏh'\ךQԩݣ}VX(v 9_3&Z@Z@BeڴF<|j|>MzNJ<||ASlW\cJ(d_,ФkR|u%nYq;~W_MEcঢ P}K{_g?5`AbL 0SJ^fp.=_Ɓ)ѡ̜,Q(/3{rnU3ǁfy H.TY ?~1s3:CqǗH$NZ$L [%C12siY3(3.[u/G--+ }0cTgW$M Q}Jj-KWR.z2R,pBô+úxӮ}(?HX:*ufpY/Hy5SNH˃*}֖GE idv [7@ߕn=<3; GP #&Ronrvޤ:kF 2u%L?`bv`FTY~޻>M#sHeF_(n첇!gǞwYʺ^jD<f.tHZJm p?=_|!e/)ԩ[/49Vhb⟖n.vG 07I Ka/4y9{V>Vawh/B]K+)nҐfX{uڸ,EXsPb V eJXBj`I!Ha3B1ˇGiaѿ(e$][$(YEZ67Q|PZ¾1'ިRiK|M7JϧRk&l_铲 y1ĝ}EiK?6-]wyiӰ.pEs=Y2~ 2v{ExW.ƳSRC=`L m3}; /vlc%èq-ndJjI1*r_`*2GCʵ6(aɌ4һ\a[q~ ) f%c@ #E{F^/Xd*e(J_siXF>@mKt!φjק73UG˱X{{n~,d`doN_8m_x5vE#]U}oH}kF##S^ Is/ n?KR2b-IKm|MuGAy#l.FT&uv^NZ? dAa$;P@ԋ408ХPot+0H|zˆFL 0й`L  HwЬF-[CC5# |ǃF>uz_w2?['}__h,7=6fi'o3W;.e~DSvXn[}focZRr1"4޿r)3#?=eeΥ%hZ珵w4jC]F##`2c{޽)NfpUI;Ge:C8Lߏ8AFuΣH#G:QN#GϚ?x7{[KOw܌ ==&xg&@ @BRQ~Q %/ /5m 0v_]ϋ]%. 5G#n3̺jig4ahR;ל1 H:GtJ|Hrgg(/#y;ftFö'NZIAz}xl]͊g=3wR+^#)0W,4(e *IIRVV/ νT*_T9BEhәFwsZ{%VJ?_W`}VaMSaN;ð0#v{5F:`LzJ=lhhQAsDL6zv=6Csrp]7x1ze(Fch @jU-s[{VבyMev"\X}G&'z4*#E֙~&c2s9[j t%PfC'{+A =p2u]U@ixzk+XjSO ;ƐiRX`zZz :"C~whyfaݐB{=-;6cL DC%hmko}|OLݿjf~cR#Ԏ^*u4FzV>nQ0TѸj=Gk(-y$YKT-7?eal4x (pp(?ϑ)3pW/8ngQW^Pn7F=|{ҕPb+n[(A4vm:#|F?.t3 :BWhS=x{>M7G:e1`'5ݿ.vhrho;<2l|{]?wc;m\ZϞ.EzeإQbiv52p"k͕M|!T,LiuG} ?iG+KQXf4iZ{ט:}i'cD̶=BĻi N Zfp4]ni\bպuF9h T$hCb jk0mXLu]XTȮ3ѱ9դxJJb9ymsWL 0hz0%`LQ{ukHl?%*;G.*m- [cϋccQi]TϚ")ɒ5p.,(BB4` 2[1ߒ!}W7ޖQ9 ?Cx=S{$;ymח̲}-Ә ?~nL.|U y;M,i0@L {2%?m'otA侂)0]0+4ԟ%.޾%Fp-+~GJ#뫇¹lL3t͛*ȟ Z0Sʚ #M#'b+,䕉HF?i ;MZ=;6 Ұ'Z,j"tFzW M74ƾ7?/3BB| yv*v;^NHLЀ"'A0|pZߥl|>הcf`2:qY׭/+blox.BX_7'Ʌz?Pxe|%n6*L?-v~m: ܲ[e#*Wk?)"U[z5γp4og~=ҽ gm)8vjFdC/STI'>3&`&@ p]AAoU`VI1cck֤IuDry?F4`j\FR]n:qK~$Rwum'eEhf ޝa`% Ms {&:P3&dS;aL %SbaC&`L ^FbaL7.;)w@'#t2&h-Y9~]7zbӆ/sIwnZK 0@ΐG&J@ Iin}B\bϘ@;'uօ7СGgL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L L@ FNߴapo `L bT+,`>,)dLh:@bZ! }wRXB?Д Э5Xn&@[@e M?}b{\U;|OMcL ZDLsPRI-M{u֤I> 0&й2e;:U)u ~'wQ}$ǃ 0F5머+4wIE;[r|`M +'x%}wO3Z[xIL 0!u'Ѫ|t)s·Ey 0&wWpIo1mw? o;M>I4b `h:*n#44-@)<|0sa L 0&К,3(A1|J{~Ik 0&zZZ 43}%+3~0&b'`(;Ԕϱ{F.>i#,`LTv37@VY+ `L *Q_G岽d{hI" иKl 0&TSloZ-'McL :xQVh#gߙO:  0&"B$v9V<$q҂%aL 4xQVhLF̴ͤt&T>S9I۱琙`M uTD~'|hfkg 0&0\F [;5l3ar}0I0&Z@<(osEUR%3&@ ?>I$b`q%: ;1رgL 0&(|r:O>X@&@\ īǔF=K\;rӝw',Y 0&2sWͿ?a&:=B:br(mI R/=شD=N-JrLPSҊL-|xG;i7B!?OYF3 '_}ެ{ַY~c&@[p86/XP?;"P1ރ>>}4a6fu~ }BìWc=_0ff3s/2D%`+Ӕ+ʄBZH9 fᑞ- lex૴8tiqsfg25G)q * BLzG@IDATq\^1;¶?M喒܂eڀuQf0M0D)~K1an78<?7FeUh8MCND\`:gej`mM9bd9-y**Qq`pmɅy9G!+s9דhmQߤD[1 ua깧p*ɎysSxE+7:7H!Wא݆ҁEL>@`n }dN 0&w̜DozF>)4M>80.H?(_e{`o,<}#>(k &BaY!o+}BaHIC+W}[?=T!P BjBՆ^ ,,0D?TF7Kieykm6 0&4l:jD{9\̨@>u EYNvCprK {ϓ̼rxy =HSž潩oCcPn̍9Tjq9XpzD]*Gcj@sMsu`}Py$kҳhҺ+D̜b~ %E!^-^vg[~_{R_-)F|C LXE\wTݴA \>]s[N36?74ف׆؏x@vz<^Ɉǯr:!Crwʜ`Y5@Zx=e`kpg)TjQV>*4Ԃb}))q%1^F A{f^AGʤ nG``Ԫu#4Vֵ 0&ІTxͥ؏|^9Zz!v|[# @onOTNZ#\ڬ>d `|=~ǥVg1"QºqJ4{Ze~w8gLK;ej 8vHe.U#(o7(OHa㄁0B aF MaYC.6&(w L*EYjpe_کw|siu8 WO-S{%a)^q57~5DCڑʲbJF%/ +5mb0v_]ϋ]%ɰu?CfwsRj(sNϚzuA;[DNq4/R,#06 S3ZQ9|ιqIRVV[Ah*cPhzfn(%ű^:>`mI(0wgc8?Y̝gtx1gf`@y Sj,f N8(Cأ3d7rŶrFyCR 0̔8uz`ܘ)Q]y| WRH/>3hB hUu  Ɇ^ςG²YPy=FzPt^; YkTl7Jn#][RS^{GErΜFFܚ?Rk-ԥV؅;y)2UdM9"lۜ5#EouB&@ dx`(1gcӗ :drůX7t}ٻ氋C?iUک crᕥP|H0MTcP5SG=<NA(d=J 0-{(of\ܮ#tKl f8DH^yQQU:/)tή5ubC:q7Lx:NJĬݖ'M"z`G,lsWcq]TϚ")ɒ5؊\LӺ,4sGN.[B]FS3 (Ovc5EeF.4LTP/aD}*v;}^Y<,U%˔a ɣ땫Mz|rG>szvujfPIEn&Nܤ \ wo`״n{|Hdv'(CF!tui m2oC4jGZERn_P,RgBG!^<ue,Dl+C6B=h t>4:Cg>:.(g}Dߒ@ઈ1cA5kҤiGFh{:8dvь {y+HY }̮#6kZ(Zjnףkڦ, {:=6d ͊fGsz^ kpYhOet{5qS%snwu,v,hwk';zhhҡ'&)IGDRuDs[rwĸq@< 2=4b`@}rYsǛ 0&`L ttD(0&`L 0JY#n\Uw,Xz&`L 0`}6 eL 0&H(<,aL 0&`L B2&`L 0&PXI`a`L 0&+4bL 0&`L $Vh*9X&`L 0&b! M,.`L 0&` EJ 0&`L 0XB -`L 0&@Bh;e2NJ~R EQBlJ~xfM `I\y4NR ԾBh7ݷxRhSk5M7,#CJX=Ӎ6U躵qsRVG0+7a3UtoHf}>`OVqNٜ:3nOh=>0} p:nLމk̒FՁ{,ޥLKItV**7ˊŢ~Uʻp|l,Xę:+Z,;B}BC8<|f~{܏-e =(. a /ۅBD3cK5ԉڙ~׈OZ':񡩓g)6|0&:,+Z>iq}C AűojF-_8!=[vk(oR3eBMl_mM/Ei\qL W9`W|H+mD,`'uE6c;oޣG!ՋT?r{e pӵYF#3(|5-|yvYoq"^K.OkRj(;]&dLຢ -Q=H!'0p-FRSV |wC\~*Q[=D=fᯣ+ 0&ЦhS'h}c+3TQ=c'j/: w!Lecb(%B=ueWDLN:DϱGdhT{L 0\Wio.?=Vm-^8`p;M{/#J13v]vTfy#]O'_x `L".;owTq{"2 bӴQ.~W/fFw| ǟ}Gi`L p]IP_ojGgjܞVlq P~v4Dl~EU%' f;3qpg8Ϟ;9OGi:cF83CMoܣ3)Tq{"A3 bS$BX6'@&+ho|`mGir]vi6oA\*{Tq{"lra%`hG#zc ҙ,dFnil٣{;G 3b 0#uE뱎96oFLH#@;'`ўF4HD ՄqǗxz^ҰM9ȗbLU p]Ѫc ;o@b璩Du~XmWV#gߋe+l#@0=dϣ2ࡗENn丮ݰE!]hk,qugT[3&$@eb+KZ  .o,Vwc>SClPF/Lw>2vJ<֨ڨ&ojh6Aω$_KaZ::-us?o]R;zxw_@̸bo&v팯#iOwd$AxмV$t~ "MSŰE*!(sw!ȦbwL $4&X?"-yk6 ظ,n0eD qIM>".~N}fMuިPů4.ГyrZGhaL Ԗ]o_\>-orLsٹZI9ToR*}kuKO}'q!{~.?Mk"Qo~fwyq{v) vS߸U|R1P2mW}{ڿ*ax t!Zv9 xD ?vo|<g{k󗈭ev/%'j+yT}R]%yύ*RR׈a]ۃGhT. q{ڕ"d_agPԳF/^m)-#;L!(f<zﶈ{КNnqq=bL 'Ф2l}0a(6(C,haTӈQwWuÌ @=A[IF ?mfGknB[` %-hCmwo_l%䤃z93:g.h{rz*g 9m¥8#\;e4j(]|3J+7QL)42vj-G' ~%>8~W.ixVݞMhs$&M"*4-p 𝃆i螆0C^0Kѣ)M[cez\f ]ҫfWâtJ}|-9)^nBrSHd4EiԜK4 /qC=@ST9cZXα_084 !2Cvh؃1cr Ӕ8soRC )&, KClZ.<6k!M Rrb;I9bZõS…hQ>7F9C#.'D{͈7 }2cj5uQ\wNjYŶ,w[\p4ӻ 5j!$Hc$$!t ؀w[Ed[V9t:;I:٫2;{x" ޑ<) s4Cgȁ{7Q@$ 4>ώf-\&qtw 萌4aZ{tgnZT^w}`*=]889'ANNý"o!D?!?B Y־0CggN)̄*z/c&{,֚46ͣe2oô"`@ S`c7&Xk?$kr}@!]u۸};OA+z l F_6G4YgGh@ B7l),;L>TBv(C@V'IqƘE6oKQ3x|cFa0)3Lo=vX(b(=|BML`:VC0aMWވF=PYC=Ckxw0k_fm%-ِb0qdqPBKc2ꅟdr϶Wrv9nfr5GW[3NCp,PP<~/xD'Ekso;8bvIf6 s}4B66p, =f@֞W AฎO9c-1 Ih6/b3@ G7qd>To?fHC1&0 kh]y]@Ȭ|a%֋u}6/5tqYŵRχ}z WeM9#kkӛ?u>/Wv)x?>וR n/$tXN+Zt `hmNS|&c'0[f_Bh[Z27QLvK]84>\d^oƍ6b)׵ uVjmD!)trK=nh2Nrލ9˦\K7Ӕ7fL)ʳٝ}`_'apgN}YkoJ!'K7P$ÌkH0k@&gBR#H$Eg f?Zo[fMC2R M$wyLςEz \~X6@f Sܽi JK {'V 1jdP.;wL'LU΢,\fm(1>.2Rhbvp81wϠ+.YCt N ?a+L!X~6iK!Hs3_PPK$@T t%g #$J*khߑRJ:2ě?cOoLqv ڒҲ*(߻aznz8ꓒlR^);gZUPƉ}0lrqkuPΑk@ c?N)-^rEi-`10@{M{PH$@"ađ epIow6S6t:)ՙ|m,csI҂;#F`Tڠ5'XxuSw] =GXN'97UsD%լ$R5Aw7 qEESсf@( 4yD@" HhF`i*2Դ/1o3uDžr!t)-:mc㛀{sHԗb?hZ!YF9 Rc([C@ op J 7YKÅRل_^cLyt2(\wiMuYnA@ 4=eK%D^֗`EGn„I9g{, A+mrP޿c9C7]DNDPo/qNg2ף|xѥ33p~+0j7@~^W3+I".@.$e9D@"#$Bm0O9?uLkd[CkecD uʺH#k]o]mWȽE2sOl{ܚȻEk : H$!" ?"t.(qk6hXM,>Pefa@#/ kQwp5dтƎ?zS4'n>u'^#]O@)FIyrĘt*8߀mi#Xi`O0P3O$DC)xaB-vYjk~?mJ:9D@"0 Ĉwx*w#Ur:/G8/'Ć~J 9*"l}}{KG 4x!,\H˷28TFqnLhrҺ-5G5h`$ Z[(>WdT7tظ)W\vl|u;. jNb4dӺ%UQU箮uï勥۸5kFA`#I" fVVn+UPMm)7?iDg;=cn'Ł4fNŽۛ=V(wz>W6\F1USf;BWn4zhZIex>p_֯+<=$ÍhBcSqhIx@ ^:ryũy!{9gNoӷBښYD (nULv? DW?فz՗ъtSxUthلo7qTݱQwtq闖侅) LFRByM=jϡpT=}?s~*΋4A^)}k #SNI(|d+`PYGu)DW{(뫿}peyuVR(/褶/m5O딏5v; BLU<9#Ix⤝hҨA%iju~bhHF(K /#afgAZ0"%"-ͻ/BC xX,;x03+raS'n: ApAό)55#a/KM;>_yEx)Q|b&N5{ Vk輳hT)=9<t & éc@3%}齩? 7{Gtώ[XQ53&PFZKÄdV63/v$Kt;U-}K0/b@|K853= }i UJ6].ꌺj9k)^:U-53= }eṢv tgYŚ`!X)j}cSCU6FnwsL{d{.ۉG`ldV[o6H%K#WL|@9=B*8DD1DAz~ks nx>FgE; 69=-}fao*s<&s#|1RK4]sȁ}(zo"C2R$ 5z6;Ytg 秱&jоl/ڙZ6 ܼ#^;?)}=T^Qf=)LJu: Exơ {Į44= =C37MCT" z`W 4ݡӸ>^yo/Yy?!J-N}{sZ6uPqGњ{W> %《Nʢlަۯ<>v3陿|ʦgUo(l',,F2?N^;?,Ij窇|,L174 f ῅2>ћC@#of Nҧ7-tJc,r:Uu '/^JLW㶯R`1,xAiQGYe '4vhv%WzWײ 44="8C"hg2CD =.f՚Ln3އ>GSB Z:Œ?F06Hf]#P͕81^BO,Nznz u훫 SKOG-1]i(mZ58},54GLX%;YZMF. TG|cMjy 8PNhtDx($pFjEol7{=ڛgP_GOFJ2;軃v*O/j[_( Pڵx5B: AEUjN#9ZuI)6n:@#g0N5s +}3=i@/>.oZsVc0ޅ-\#wG ڄ2b,Ƙ(gv;[~ֵu)xjnby}{V(M*s_7m91leTni' /O{w6c ׀?|j$s#E^JRKWtTls{yH磾2 hi4tixzݐ=\|өwb,Gc:E,HY QF?_w:{':ʣGx'e$ި,GKjjx!43,h/yZ@]Ra7m9π(DAԕYՀOW xK hpx&Mc^Eo_K2 aT}c \q: D?RRLC6+X=Al(-oGΎ<_V7!XCEeN:m_x֓99FMo_ ` kgl6Aذ@IDAT;ߵW9ODTwrH JGݲc]H̤&7h;+' a;}hkFk*Qc7 J0m (waIK#, #?@y<I Fq#51+>npBŚ JWN ٽ :MTδy?GI-H|M* /$}ilp=_#Z&j_s Z:4xTz >vzŨ_V7ͺ>QPŚ=a5GID$k~|%tU-e!E釳g4Y7 8g0]{D;0Cf8Μ\݇JD-EN&-7r6f)krҺ-]-0̉#8H2n{+ $UC7LnO`[i~sx94A+(gtoH@bAUj#$ݻhG*J7& èD'hYk*uD[BS潄+Jjnhb6+:x$94>+^f 0ޤk!W"4+t\f''=FC2lgEBg6\s͂fwb n*ƔsҘQ"sJ|NRJwmK&?I!؋he#(o3sUWyoݣ =q3)J0>"{w};b;1vFp_;<}0V3a&e>@`fև52X3TM8~#njKC Rֿ=tuÃi :~B{"(]3f,*=]IK6X|IY%kZN M=)}B@5B ;J+]ùfGdƼ}TCc 9R\K"| tc.TbҎhy*3N޶@X}p?ܷ^Ko}nMa[΃XnKH  Pi&w<; vGcz`_nx3`JADb3sUAX"zLh *c …1>Ֆllؐ57n1%{rN vflhmkE/ao|oғLډi~2sHh(;?в@pA>L\/7^b1рd\3}<:Lz-q Yq&xryD3 ")]?*O.UxPQM[cȺ)x@]i$@" 6zE8wC=3X$l\!\J"lUSA*`\ڨ</l܏bX֛ޭ1q;.#+'0 k9a'య8+d2a۷gSFJsR9,;: B˪mŊ"Yl"C:MF6GSk@d!a&_.<. )#|R8aBBD@" V@@'x%o4vg"3yOfpci:TE56 .eFD R0cśV'43TWh&Tsm4_gҧ&^B#UNwA{h1 'P;32WyD@"(R +g2Q/)nA{Fv3d%>[7XV>*,.ߒ/fߕ23k `gi Ԭ t/hv&:7GsNk0w>Ln!6rV;x7I}Ws/@W# 0s+ΌJeQnrf`$@;`IDC43D"^ZNl;}0jKg=w8"@؛VҚ"^Y'@H&xD@"P39zmC3gG4 J+\a}L4]I:rهns8wPV?`VFE~>>NKȥ}9ONG,F*+QSt =Wǂ:9:@ƬWom,<`@0Tg対O|PEѩZ:7CKJg *8,cEN.H$GqR~@k@ڢw$PFZD;9ɖ9،$ɠUnNq= ~)g\9ٌnmَLIe(0*6m%34IG?}}glk،BUߧTZoO֞QFE*DC8WXL' Fzgo߯ 99?f ѕW$-}zSq<ǻ TowT6Kw/-VEWsYBΨR/<;?9Ds @,23IR-kz(* ]ve~yH&H%kl˼N\9iHL^_Mەf0&:|E䖥bE M/ze*NxјM&9͐w:Tƀ|V`eF8oSD~{m)&:9vշy=ԠahpPӑ@ѲaZ|lkN& 0Mx+$֐P1*jdZLҟS-d?NM1 ]ǧz@4UbPi~;}5SKj < kXHiuq9uϼ b聯4I}shR !A:z~F 9dgu 9_n&̶o!w" Vm,~v:ziGpLnጽ"ry]/JycgT eo/q:E鲡zxz$Svwn>@7F,#ջ),3v `vif.13Ӝ̱ 4cgxZk|_<@s݄# v ﭡYӄw&I]hZ}6?5nznE5qOмB+eNv3>Gh)sClzo: QO˂^ RP?A `;E^.:;2ӓoɣӒ9"eJR-ߔO*#hs~󃸓VUs22hqP(,Snt>c[\la&x `^5 utMOÓI`6&00 3il$,/ ?GMBzhkjlnZ{0ld",#05 j6@G]Cf^(_"O9=*c|kɡ1hn!B*чqlF'hcѱpӗ4LL58!WJUpF;ª4i9BL/^0k`zkZ[kKG?<;vVh 8y<.7/MkXcؒn5*kE6 mETSXIt'1C9 gC/o[mrD#A(.dk(OvMZ{l3ewzA, ЀD]W/Qgc `|3A  ^Ejk̜mcaƚ5E "^{ ڀ[E,j*{/&6)|g~hxRRa xۊ-D*_ld!QpQh! {L/` (+e)eb/ʼ 4{ͼȖKhX B`XYqۜZD,A,5ƽL0zurk xK h 3g,73?m*M0xlo@p*5UE8ozgpY8G^~kJ,ջ dKb5K 7@F矅@@DkwNidf)k`X,ܜ_w MI"A,H@!h6,@g[iaA&"oNx(=3M *6  TPmwyYHSX@`9},>!e9gp2q:Tz˙W34't-|O*:V"@ /}xw~{!X%綦1J/ZfwJ(ߺ˃pq␋d@ykk=kB[N[+AB1A-nд)BYV1&4ƃr/>sbg|n /MaG@h@e57 W+x0CR|Q7ܣ-+%+Oެ;Nlw(~ZYonv y`ot`OpӬ0#`44L91&9B Tk {=!VV0랉" 4"gMxMqpL!kO ~ AXqDә5%-KQ"*OTفe˖jm;*Ԣo7Ug>9vk5 &X7?{g#vIAI̭7@vTs:0.iP eݞ/fSGz+fԋ7~~Φ;3Y0& @j4c5'jo}ͿXx`5Ihm<GDTA%?E_1 =B; =d{)3jXʻu+B7gM`싃{dnT7ޠa=Mv$itIK_[[ݳ{*Xұ4jPymHFrB,$Ӗ/v>ܳ@ABAh5GIBG?9!l[Wjt-;.s": D Cc1?{`)XS5 _$,P^fs`m-HyfKyg!=}:sYo6IЧySDqb%(aX{RFÿbnkWX_t{Цi2x8gb,A~'V5 >(3)~-J# _ۀr$5nd p$Y _IC %LeǧO XQ '@i]5}MæԿn;G2aBz{mꬭQC{`Keы6)t(91NW.HovЈig;$#(es.6[W 3,&m?tM3і&ҳUsn*xvyG1q&(ZL! O~u3sc甘w=HfY/$^Cg(0 g4 ?4A+(g?HZ0_9Z^c ͛v?+̱`y k}~ ˒9"ϜoFSu.$SP%&#|+o-W6? 4v)k60mXziM]cn`=}|9w߫m1g(^ ۼH"kq)%8NQwSfRsFguWڡWY\loQZ,I}_rj7>љTɖ\;5 ژ7[l+1  J0!  ګF%3+{ ҷ]_Ks[$BҒk;{?973eMvo^2bή01\kRc /)G(cs:c(cFfNsjD/Et.EaG8U,R k`(Kx'\Ghwx}츏E!oKe_K{7ݻ&K8&`;?DS@7:X!F N >=ܨhЁM@CP4>N#|Rs`au9yG՛вV|t:mq3}0n^ g;km+G :OCw4ߚ۷WYCh 쬢u#}#붑U 8ΨҎV(])*]t؛f=V"9X{5@q {( 7+py.O]Y+0eNvm׏yyWrk_8}OM ﬗw=If^QaڊPNSrqޭWb;ܤ YAz2.1nc7!tq] 7/UmW4 ~|)S@i ?Z 2pd) 4MzۏRvfܕDŽrZm)36j/?[>7reJ)܄ -'W]qmlD@"б6jr=g6=RXO4Ŝ1ias_ߓRY)u3F()FK)Om;LLÖZёDg6F:*QO;k৕w"Z ێHhYgoTTxP!c240I/>BjV Wu/NN^-I&2qPGu3pDh 'C'w?9͢'\htUUK9[" H@/t4{SSe Yr=c2iqqձz呆c 6<.W/"MI3B:P | FJQS֥DG~9k ~#x>`` 4`>4.DE%V =R:N fs Y9xҫ+8` Uʵi"Y.-<5 7#D_%9JE"1JK!35rPG|,QgEH٭SQU[M1}NoXٛjU[74^[XSH$3X7p`d* ''rwSr;_v^s5qbXVxUM8h[,0b-hZd0)ZKc!NAiDg~Yk߾Ҳd6e >}x(Ѐ>V]oUձvET#k6jvpY[MG*RRGpbNrlK*|hYXI3YF)Ll6zqxXF]QgO:5gH|t~7K%9xTi:ݏMz% 2ƹ1OG8YpJ=Tnd%&,; ^"瑾 NH4PB|u\4>=*^"RJGġ^:}^`XƸR>n0o<t4~/p8*) :~\G{а#+'#Y L _ZaBn LZ{HaT%uШ"G;9A3#ԏڙ~{Ñb"0m. E6屶djhwyhա 1p8ǟ4{Dhʨnͨ'˪[yvȉ4TnN՚$ĒECw3HﲒI8e4uR" 7LGS=/3#Q]kaa|hgg͏>ZyP" y/1ÉM~ Ps4\0qB̛C'Nʷx ԡ+ۏwU9ŧv_|#}yUa@OM$s|HGϐ;I $,߱y~>X!\BOQ1>+h,pkҀ r/h\`Fx" ǘX`j[Sr8~3 X tnMKVZQyίja80ܰw03N:G1$j7m䮈Cٲ-9tZwe C;yT!c3 K.X1b)ץF, w^G[/9rT8?vQYmC"z2O3hĀ75Q|@䭫؜3}m,ٙn%ejs:XyV_Y2=?#J9ɠ$5]8POŧ]5tPV;i1_搡x*?U|:x +]HF?=:MMi1A^e.N^fy^$cJ"IA;ӧOTGՄ/`[m1 6ǰ]#0"gQ&  4ȉ3 O2MfׄJ ڶvuu5\FJ,P82 | PprOw^ǝk핥ʗu]Fajô]B3O , VC6UbvJK1 &U s 2:T^UK^8F*x 8=-ZOU;j(/n6/ӥeTv:U㧅(՚V# 켨jh8]G%U5vՉgycӹRo?p'ymd"`!=6-_Y_OG=SuNZF/vLZ~~~ -m*M*%DaVcSMbb(.U#c?~*?GKPټw*u{?}>'dgpu+bhB)<Q>y^$cJ"Q :k⒒<,f&Xq?3444|xB4{|lPo)دڗk!X<8{FUi-;-WzU?︗KclY${D 7  Rox|Os9' VzlFвokj[43BKLM5h#=B.BH} Nl+ d؁xZK`A8Ƒc؇q0)%3fC'agD^G]~}t 5كz;YZBǖ9+44m`xq7wt~H-u4F19C\|jy.e[/O <أ h #|, x"zOG }== *vff/>YebJ+x7`ۭpn|tèWr.g В(SFs_OR]z%/;Qn4ıGh_T^1* Ew[ں?6@4,C5'QIҒI.P GSpe % YZ]vzK4޿L?_CGY3%<l3>GծMyRZ q 4h.`1 ;iM^~bfq;12Xg&^|{WvmȀa\G>PzoV*B xWlTwcF:ƨϳX,u-O5b%6['lvXWW;knޥn\1ݕrVj"W^jQQS|8a:`ؽdE 7ChDS1I?Ju"~vx6z1PvVW~=TLAyB2s7f rmwc]DJ&;$Z"ja ?fNf?z7pNԺȤUѽ~ĪQQYtS  ~pw\iA B m)3͆E(tv=U5'w8L3@ a%|c G;\:ruΌ%[sofk`<|ڠbӼ|5㱧/5jӢ64 t |ȱг:_mVwbrmS|.hq>@/\C'Yyq5>xY=Gi`Ó~v|Ϳ 3~YY^)A6 ryDM25j[%J9z2!(2thXN׹iI-(&egfulbl<aT?)N39D?Y"ȁ@tu,F~$BSQXɯa,̌j_*[_n=T` XSs1`P=vaelPӨ$rJjƊعba[-7R% Aϫ8g=7fBT,OfR8)6/1x20"i|DuYϩ.~>}l,vh|I 9㲲ZsR^{ˎ_*`tt:S TA@v@QOf\ `jO@, }_Odϟ+8gwYr}֜eYsv}ƒ`/Kq>'܌E q/M˹wYm>t8!?p V.:s F-gfl==dz۩֮|SoL,HWr46F'khEAԲeͺ<# ߚ,il}G({h4'F Hçi'9#WF1q+̨NV# N#J=ir\PeDH>"B:Va_.uyA>t?qz8-mN2׼yxoC я׾M4 6oYP?+m:`!iCq3(tRI'vCbE.ѩM@HWd$f{zxdz6>DAQ"-ސGvfvǍ WQYN>FhkJQ"T8؄KHVFBȺmdvU495DTўr5)ny^eZnM>O|=ګK<{[ι%U5 q̒s8J46c+j!vͳndJlrG!Fxf]ѲM?[^Gejif:X墼,s_7ӞrdzmU34BLH( 'I(Pʎk!T-0ju͘EsDhG $ЩJ_;qrn43fzۏQwNK4ͨ+庣 =$:;5B̹CY\Is'IH>o_kE4c286#j?|%Ӭ9KZʈW_TF6̬z>K/of%kÓ Ap;DK(:Ps$dNz1Qρ6N19p`4h2bʏN46`@y𙁙431nd$uE]a&gI> Qꘄb>+WLagXOVGEA(L;: ̓BYxhhV,FY}8''٣Y*h,9QVwԮM_tڜ<9r@qq.oVZH%ve (쑗 -씓rhU=6XNFDh . Lנa@ķS#4~bRq \/\ߛɝ4 t‚CV~sB *.8An6MD?C  Ah4Jr[}cẙ(:37zE9I>";Vm/鰗8UZ@wd^)kZSNQJǡAނ[B ._>>@. oS NVx%ej:8@zmc>G< bU F䢐{B҆PC9BCwVjaɣ;I4Ț/v{E<<%L<>4fxllٗ^TXIm+90:Ӫ6}ӸdpYe~1Q 1^qqd5muD:ÐI/%`/&hĜ<zXL-3oVwqm틞aRRp1eN zy Q*kgVhh|LI{o@3zOk01p?š5޷یQkfY)֠sY>-`_*^/Kh 4rjj`sqWBEw:㲴y%V&畆I41(>5A]=_MȕG'!b(|r쏲ų[hjxTsס}CxV%A ?͒$Gp_2ӢH#l@IDATdz` %hx4NJ04ǩ6 ͹utp==,x'fmEf`k25t+TJ[z֬#*8(gQ{|Ju hWf!#I,fpŻN{)•Ne?3 bB*3dE Z17G3!],'}?GK[)ЄMYVD /|he#(oB*ð&zt7^m(Ca$8g~:fDv;iy+BRM>)}4DHR|o`557S>Ą` %I֓ @sv  } kNLy< B26ϣa63fF5jgzt0J B̓ nǿnנNh:dyG C?{XUuWvXav! GݳRWC[jmqUQDd2ބM~yyyy Ix%yI΁{|;w|-v-Ӿt+bhxTqB?A) FV.f"^*6uq=Tt tt(̣訹t"Cr*"]pt^#A:@5)\iI ic$6޶uy~flf~R$"sS⎈:wKk_Blpk"DɜX;,Zy7Kijaf 9<265Np^)DehHЈI hi8 )1&")L4\V(ASwnnϪaFMNX%EmQS.A4 eŁ: @,ذy 6Kas=nSIa죶}kOh'FER P,Ug:wf\4@PyʁZ}mZ?-.u顑V@౅Ŵx-m~Zn'ӿ*mW]88   w,s|v>4=b@Gi|Ł):Щ{&/釩8f}%ff'K4o՚@s [hHF{9+u%+PׄpQiCY<Hoo˅%=\HO/uW ͒_EHlzh! Kİ1ӯebh\S :!R О3`S/֘pB[1R+#\G4 hx-e/zρuSxmDd%@Um푾GuK4u_U@\p9fzvr)vi_tԆ2ɩOH@zC?3d4$ct] ] -'hiмn 3YVf{Ex^7da9pP/qoγ!8fSH-(qp<ZlbUKªLiibݦSq`F0Z0#emj88д9=3_r9|-M">wnY,jڽwD穣͎&΅GM3 SXZ°A0wSp ښ0NܥO‹,.z>.Hbh4n .zs})'4fo`&& :*bxID$48 56FmVNR*fBJuuW͝ t4;@_q z>/m\ufYt.;{mRSSh\s1T FXriIBVZ06*0So$ = NVZ,x2 -mvwR$ui`s"UP36F]?MG<M<%n8NS_dgdZ{o F30|Dښy}'>va 7! a?oNr )aT!+m~ 'V:Gw4Q[Ł2ŇjKZ6⪿aq !ٜC I ߃Q~en]DV>.QOOoqk&]_]Ǫp2ȣc9ǚıBe{S*-=;ze$ x QS]Łˁ?|+{Á?|)kݜ 4nF}B'\1@kxii-d0'qlUGD{Y!i䌭nuG#۝F[: ΃8\Pz$NSoJOt}:z#Ł̰Dk/|ۦ~M#f`0Pp!CrfID9cy{p;N{i@\i4 0zp;TWЩGMHDOkDEɩsQŁF+BX3HiAZ[{Gsv F92FwF #\a#騏uPJl65+ h6;+(((Ĕ~$n2>L>rɀSoŴ8\sByrNXׄ@Y̬=rAN𘑨ibܬ}m$&|LZi|W3 ZZ:, ^{`։fR|g7a z::J0i`[$PBj8A4PUPPhx5{{w@Pik2 68\qzEa=H8 ZKrd^sc͸.^xKo]@|-6ۦ%r0׭,F/"K4u䧌4`!exPKkINDJLtarə4U4*((4x;{F`jLV]_ 7̃R&_QkO67z:\'.$#y$hϤ;Zǰ*;i34.=^&0C\eΕ\4zda5QFvCW eoB8WEG2݇]^2dL{h`(%9))1lT((((0ZqF| fr}릴u6e)@ӔGM&$3d)7;~0TUx u8s}$iJkFRY$-1G ed_"Z-'$n>:M0tctܙ ɱjZc 󶓦1Ҽ`'YHO>P+2qʤ'nA#ifSIə@Ts_q@q@q@qa90=J{!&罋 snXuhꖿFh`Hf̪#wT+SfZ8:sg{l&3|zf$ zI?Xhd4 0BR:Q=~a~UdHS|Ԛ )PE>E-{ڸP:!ВuHRh:_U Xx ^g&L3ÿ}Lͪa}͡)' vk槑MuQx }2=ASnj3OT{-v/my2j- M) 4Ցm?`S!'8Scx̸dV4mče~G, d2 @^p & ,\7=M yٵ|:#CcSiH@2N+yt&!?~Ě6JpV܍<(Ah΍ucѵ up[`Bjkz=Al&c6k/IL`M3`[ԘU \h k77S~Q)LFdԗρfq{n$^4Go a׿h%P+[F1a9MA.cټzS3[tFN{1;9hfOҮMY{sA½ZozڅC~JJzQs }΢[.?OhmWEq@qs t.}>]07˛"Wi$XabxLJBXo@3QB:BC im@D>5`/̰ptH6P6Ӏ-i-Vl}5PY؄$+v:! 9x+GY(_G>!}.Ԃ (3Iny[WN:1J/>평rT}yfE/.JɄ<=f+szJf+ ezNz0Yp>JpȠD53/Јͪ^ X5-:*P1j]ʢ߬=sڴ-聪N>KF\(7o>E׌M_cqU@{~eӝn & \wq4ՓSπg%XB;F5^Tʂ+;)=7µEGJ iP[g9Byf8<2G3F=43l䀉2Lie`PH fnŬr@c~)5~VMX$>1 h1 6s1H|W|^ꨬ*5f(Ǝ7Y2r e- 0mHx ^35&M+1s~VEkˢY0k|e~Q[Ok4~DfpBid)y X3$7THo{5\0 e=kQÙ  uoCMŀ4ȀhД__}/ė(́?0<mG0nGgY9 ,Xs0:zT'qrDRUĂ&v@=hiCxuĠt2,PK BzYHц5b V f\f\.@KD$??4tMH`ٌMZ{>uFhM͇z>II' b1kX(>WEk KMm+r{63fZ$pl3tf/YjA<' W%oT`X8LOy?w"^[=QMzoϗ6~Cg<~01sӰ~ALm$ڭ$j*s BKS1to q)`6 V*Z[Y,.jRQ<:rÔp,ov 6٧$@%|oa38-*h6ٵXf 84oiB3RJve&U*TG5HͰX[m2p>303c͌09 jgb :%@Z}WC.2Yk@R!L,f_krBcEA 0/eрG,R; kj t # ! _~ŁP0hfyd>3 efJS6ӓ2J/}כqQF3&e X- :zuk^ZBFΤ4r7oa9m*%p%#G6$șO~]y:x>_*hePS>%Oג3 |(3"j6ҴglOCF]c2at^Ԍ׷*kt!̤Af4s3 f$:0A(:9*oi"J#%~Yڑ&c36ci0fe?&.NgSQ|O3xm&&v<;b`C{gTKúheT]M:_o6)LSuH ô9k{'ѽ3CsMG\,˂5 "`̬BM0bA8 .|dhD@Q # yS$5S1!5@A@HȘ`E;~j372 dkdj8P[3I*,F` f`H's>ۣS-0F<cڔct/ /@~i{.zE hXZb0c) I`0s56Ϛ gomܲ^aZcB.oY} Yf*.j)GÑH,\k7|U:5-!hکkOEZRy-C$5@&af#I9$4LeI-KI21Il_jY;7y;j[e^1Nm[wP3Jި;LU/DcD= ˬ{ 8/^- hmZ:v֪mk*43v@]W;@+;?zRĉ77dl>5,Ы9#=̺j`F \@-~DG Ԛ͎ܶnDZhgpXZ0MïH4m𵡋x,~ާ@]q@4>+s\ٓf(9HBrDRܕ;&vJ'sLA卵"a֩$o6yq;D8LgY5vn7]33?ӷlOYl.l D1(3?0Zevyl_MC'&AQcp}\/p{lv3LOs+r J ̀˼Hе4cs5^Xŋ 4ˠ'>򊜤7Mݟ{5ޢ=o^01e9o#~bW3%B -5a_xOTF;Ey-8LhnLS6y?gN*Ϻ-4u}jh%#9=~T["ka4`aD4= vY}} Ȅn^v(XEr}s9WF(i *&z9ۋ+f).͡6{_kiہ/Jzudþ9dsi/2;_|Nňz-\_z`/3܇fP Yuj+ә?; Aw2U{KC:[b+t<[_FD&ذPͳrLdֲ.^8 ¢@q'8xa-)Z$"X\>zЯohUwL zL3ԥ4a WuluxюC_ӗ#6E*}e?ºq9nr nrdO6d Mwc[חF`8ec 8LccXScwx1Kt_$կ:_/vyŁh9 M"4]D-zPVÅ˪lZio,] Ϥ=:ҵAO毦{bJKNUcgy;e)b_<1t E5_<^Ѡ8888K@ ] kdĩn &_ YB3l{ylc, ۙ;Bus)U_W?sxDvmK^~]UY{۳W8Irp`p/ޡuCw%+={又v!WQTg6PI=x--!4A 4B >.U hU+6ȥTP!hs޲UG[60= h0Hc鍯kG6wf| ~4!7 31t 9l[): on6qzuZ3Y(f4j4p1}9IۡxYE#[!EpF,T|Qm)4B/FؑڐfS*:N nѽck! =G0ڛ̋3:v gΦ+h9U _XڸB7ka `8&k01dɂ_sៃ8BFpUY{Dh 1 2MӴבt> @MUde=91qLp]|.Zy7''Rg`ztD7> A4>j1s)<}A܁ ^)3";(Ztшk<~oֽtiR/|>2W^8XOBkE$jf~w 4k # c-` ,gAsLq:BSwqγUթtx83{jOh5 bش[NnEEzDJ.|"Oi5P Yd hMU8| C1njRx5wãJѺ^ȉ:-ƊCrԞq#[PMN]ӢWPP4?vl:(Ƌ,l*紀ʣ=\2H^*d'y^ex}I͌/Hǚ7n\$*o-djN]wγG%p:ry>)}Qu:9lD͢!JQ8P$:?#EUi!7!MVi apJd>r^fHW7ZȪ^k%/ÃD\.!_@KMcy1a(qyHo|-\Y1uQuѯKh->qz&%Mfꋪ]H08 9CWŁ: ^h̬vuRt#]`qLt`Y `0Sv5[imtԎ_We7u s5Epl4![}:V-^/Ə̌8G_T F_4 N tmgKmPeGvBv'0>]7D{hβ_w҆ShHko+ %.nx1af|\3@k@L(b f\Hqj3Ya fdH1 |6:(@e뺮бtmFXS]A_{Dȃ} nx\.vq۹ɴ|hj֜t`TTrQ%:!\}w3gM]^:HTxe8Eg_3;M<` gBo&>Qhfj]y*`-4cz*FxL7VrcX/Xc>DqU#@h@Λ']>[ fӎlC'Vcf;lw"댈y^"1o=G[}F:>'x, )DDks.Q 05f.^t(-\{:zjcժ8Miff?a-Mhvha7'7LI!YqI)oPL~?T,p>gz9[m4K+#˱*(r[c2N_E[p wfxڪϮ~oѭg}?YAlbŠ/y/݆`3_uVuy^ϗ@ӶX}OSF -Z6%5 2"ktXL"ՒL{L]:\AJNPvË7=3TT R?B9C2:eڌn m0VvÆNS:K4-^G?]dR=A4Z^aѼ8N < ! 菛"ݬO1mNh8jtFK_m[\㧎G(am cb{dΨ+ޖv)HI@uqb5;>mZRl4KM,|rt%Qd1%V4wc\CawۈگPd9p"ޜR~r>Oweޙ_,]OSg.HޜeM'Mc UDA uaQf*xEH@44{T9i&癉"ht&scKUtɱڶM݅ۮma8 d'ͬ61%0Zlu?ʬ7ha>[JER>dV2tl=.;%uiQ׶cp^yGEY[3 CP͘+7AΈWDFpwwPӣc%;MÐs6FQp@X܎ C0ΈL;|2>Cv'ƥ^@DG{tk I5,jB0YH@)/֩biқw2;]E{+ٴZ732s??.>2n\hkNڶLf,\88#b$( hn0ZiPiVf4egM>Z}8zX#O'$3;Zq7GJGhèg4M=XWOBk?zӤ7^BztČ&U4zH/K7%a8m" QyϗY{aԽYq Y05+҇obŴ㴏~!lMڌ :jQs!jy~DSjh<5 ӳqaVQqmu]s@j6F5ZΝa xq99lr`a,H&,hfe?dխ4t =i!3LUpێAF[Qoխ7-1[Z1St^Q1d.%ڵprMW@eW/)XMHuxRVYJJ9 ^ʳ]TsyW^8#]ҧIlzT8C+5 zvlMmZ$>4.$F?/J+E+FN=;-⒒dF4{1}1qlՖ=BA3A<\ATx ;ms0D ъl=3w^]S Չ{S]ֽl.M/X?=x.}#-o,q=u~s49QuaI"BY̴s"--SzhN^-|P>AľA{//!C> Ti\qY cn%=9Nݶ_enVC{ӯMB8幋 FB ?9q^^y+iOqJISn<(l"%l8|U;C='h89S LK[NϾ9a]AVh-˳1kS6SF:~ >/YPv,C?gAȱPfrƠ [ȋ=2O" JN:^~:t~1F'~ךڕ맹4^YQB eI{閦L5]Xk!xZ;Z[irƀj@+FiLS7Pz,䒩Y.^pO ǚ3LX@IDATjv-j Zfdr a S9eL:x'wò2ԑ:=*_,{nZuQsL^xK(>eECi4Wھj/RK80P&FܯwAiFAÅCltmWnͪtXkKKkIZ3+lyvk&q}+v_ev㈱eK[5A2h\"e_3hbsr6Bm,dj @ 4x~HC&IV OM_ #O ia/}M㻚'0P\~!1W7Va"g1f"y c?ܶ"9!y +'qur\pC&l wn%4;gBoAi)_SNL .yG~X-سpq(q#~9gw=\+gJqGbM=P27Ŕ`R/Úh`}ڃЖ>+@;ٺZa+eٸ]Ⱦ:ݥ-T(5b Ђbv8#WUGCՏ `pA 5[ gҵ3 ȃf{NlS>jP; p:<3n L8ymOiPFLmx]:+Yc r%8Hp)% hVttmT\ߣr7p{.ZPT=ukD0wmL#4sTPCK=q1b"ɢ5f1u;^W.֛b=҂ƏDĖH68@~@16d(i:|! 赏 ږGp5l/9?_Z ڥ"BzнٷweTn("UEφRRuJhɊxƥK~to{Uik(i͔h4WZTW7$d}C,pgg.8Ih:VԕN"˜N |X|d̔-_GmjbDdIJ 42Ls*SQ-HY;*{^Sڄ`'s>#]0)[SpEJ7n&mljtT"UY3cR\8/pt?=0Y Ccg{^daHC fru 3Dr@/g$0jRYs59 ]"ɡxˣ\~@rOkv-c<'.5 NPr|tOwK43)v9< >~49%*,~W>VWIo]t#%:N>Y$` m4vMNy8tT(r5nyNCyLyFfQAQ)Ɗ󗕐A!tTG@/>vZ@L5%jzJc՟ #&#(ۡ`eDYM]M|=7r$\n;rLu@-9~9ksrdM>0`0G# Ld\02km So ڂ]d(Q׬`2KpFӯn+:s˟0g{u#~ע~%!GyZ j={ʔ#A 0<uG[twr蜁=hdnh6H1#yS0LK&rn@s9dF(3f!F}r\ݧO(㺿9G,c9 MĤXкBMCwycӜ"_HP#44/c뀰sO=q'NzIi 40kǏ!;QhhvAi IB\cKNmAZ|}v8X|' Jk~B}Zit]RC3e)ݷe9r6kUI=1)&vH+9VJYRzf%jA4DjaWxg!{Xp.>:g/;-?|Oً֦?5džLjJ*:?@Gp酷L##qv%KƔ,yg{=kxԔDzw…f!. "K-Ͼ1c(-X9+"J{mTtSjʗ L=̡|'Xv2L6|nC#SCc@lRdp5 ̎G-Nc=H ZVg`5//*ˀh}"|^Jn 0aŋ4Z*W;!J D9;ܔH\h)Zt[2\noy7'ZFBPM`=z3гcd]Aָ|[__]B%z=4F?g8t&$UǛ4),X @_T\B6=: Х5ճ-U%$袯?'?.CGN6j^(ĆK)y}d X<[B,b<1 2 4jπL#m]33و3^xp3 ?Q䎿Nk:uhKuѷG 'N92viѴGW81Zq+T[hfoiHkm.!AmJcmYʱP߱j.$r.Cς1= Ri;B2_`6U39cb&_4f-^G_~F}canuצ/'B\צE tim T f>w{d11r=HqEv֢0zbuiJ') c?NS7bB|;~k;|Ђ=TZ{ '͚mx/~qހYRȝy_>~ qTô>ı[ !X$v>*C'VӖ=3i(Z ЉUbYikk8״3YNyf0mL#ԶE08"-AL#}lXecUWjGl(@S[aϿ%Un_L9 ̄r#@ b. #H~:~LO\0L;"s.}OdqKZ'4L\*PU["<] m;JiZV$5P49%z4O/9{އ-ghG@ɑȊQ,Njƺh9#-Z]xQG{4mW@)٭~UԾr TK#%PSulGB59$)QM2p2y#M0R;H퉟~2lZ}pتOON?'nM|^7G/i{K~{'*IV#KF^|K61X="f${HѼ4@]?+QА<ܰ. VyEξ)jqMI( M-XY3;+ŴA\iCM2aNVE\ cY0nVˊ )แ0ո{Xϋ[X lTz.7zua9».|=ؐf^pW$횿JML6f34>˂dSYX6*/oHMF$εE jBuDo?唜%*${3=TMu9|Cɑ=ض|Y~P"(c޲^a@A j¼g۹.S|yjj9~mow+{C5L%N`>ӕCYfl|JOgkM?l|n26Ho`eTxw. q#M<<4̲Jv҅~ᚙ|=r=d GZ܌mUZJNWDpo(C[4X9ьS4Ftd4UUq@XC0zH7?O+4Z,|kmyi'nSEG+Pi3QrIshoN {qs*jh[nԴ.+e$jqO55&5P3ѻL}uvlijڤ:tcѬeyMwhfg7ffO_)? NfzFtaBPD,DyL̘O2q4.Ms f" .4lȱ&UWRf:tj~̰ړت6`L E{VVJ G0b:Nn>M^'fhғ%8i-'|o䄎4Ma.Kȧ},GJ4Փ5Ҡ׹BxH8jk4Д"ȑ E, Ӻ^W GsTWz}CKQW!<#P ?]n?D0  Mtq]@~{r{RR 8{:N8 ڼAmXfL(a F.T,`ʅy jr|,l k_ >BtQ.7|,ܓrhe60qzN|0"'?{Gq.ɒ{,w70 G j!@PB10Ƹ-˲,Y$Y]o4,wt'o{3o3yoޠ)jؑ!x6pc PKQp8]g)|~ڗ|*ъ, 1^\ޡ=7A<Z{7zOzc= Km 44|[$u@ 64퓘SO+ǃ^EQ@q_˰Hi `HL/;ͦOjR-xW9TkmkDr5/4j2cN7 5Ddz1QN?)54CY|,$=LSo&Kpn m!/kcbӱ_Ij$<>797Y08$>v}eZ(kUjk^1Օ#mlda3 5d8e5KؼK(mfLMa@I90 }y+B33j๐9 r,g}i %vg|LB Kj<JNLC74B@ё8؊kJ0iz>+{veQL% dK)*vHCyÅ&@y==ieE%m4RF6dؙQQġ:6̴֋Cf8d"f yц3εW_Cԕ5 H}}X9~@+HlIjHk@6"0rCqL{# ?Ӵ+7y\B=qFh)lu3 WSv[uK?pc9|QB9ZaX\Lo6]9p.IZXi"];Wfh2TxKeNy@-w}/РWA8 僮hΊםvV(`dXQ)}e,YG`Ĝ\殫X^ 0q܈6Ij(dF z7nZHXU[cB夰Gk4Иpdxj`T#$^0_%,e.02Fkܱ ⮁)-C6btu۾o"1D xARO$wvnUP'#~^:P?י&=W`?' haPhx<r=%?@9InYK˴CbI'T%֝te}TYxɉIJU=OUe_tkőD,` y˩F Ap3 lE 7ً ~䘧iO(2G;ͥKCDf8 |`iKH~*G(єƿFY⹢#s@(" Q"+ d;_aw;e/;Cћ6ǜq+/0<}ĘS`ߑX>25q+Σ9x>%.´"0i(eLJ8Rr9,]홓Ж 1?=V,ɭjpҪ;<&&W4"19ىl8ĔZ3MhN/{AffI1+00$#9I^4,2EzÁaW_WC0j@xQO-`s8qޥ'x-3ߌ@/@kJF¸9sV^\`…?\ f/mS<\3g_nDg+`׶7^`L;U0Zj&q =|gB4d@"ВBsbZ!1d}"'99W $iWVUwА.&FDb1Wx} }e N[ YH\%$J.̘Q>Ġ$'KrKk|$=y#Z˺ҢlXpSrW+nTNm1fz(3֑sVۃnxspL\7Zo~sz%hED 6~"t(J$bڡ8pAcQy`4ZijдQwq:P(V֭•:OLhbFbwaPjrhusIJwgDM?iY Xn[C}Yծj@by o^-VG6b1]CE &*  9q nUxFqcCnXJ"K:I(dEW1qLҪ^Z)"_wiqABɿst |1#tԹl/q&05ϯIvG ٘$D$!$$uhnh:}EFsWz2O-{Ed,.ߔjqVкTFQ6S4ؙ5zqz|r"{X8sXj0ݪ7ړg"w ~U־B8evw̮Ȼ͟!͇ɌJY H\ `EmգUakQbt 3\ъ8,A9I^;Ts3yH(%}a"z+'#zS=:W/5jH{y$H;6f#{!`Fz0FF8ݓd^յPF%Й>)|| ,)`ШzZύQ.91jߜ&ƏL}lV}&@Z ԋd=s6eGB2(>-+K>inC=.I{C$ $O "FĈ?xqL$uD;#厔KyٿyѽijUׅzBH(d yLJNȯ+MNF=#0#`fkܯe6C&rcNI.OM6kZx9?s*uRmš.|>5fkИbT 1rb;$ ua^So Aуbx#O!.|fڼskԸ=^s4rG u)1WdW;sf̌43”ɧ @K-e ZU]"9ߵa=A]߃`FhNj<}#4Alhf(ץaqJj[bfm\yeίdo0iǴs'uS!A7mx~9IbR"yOa 3ٖi%ᄡZ=ɑZ?qȎ;;^pҀ '8fٖ[$25LVv]Q<>xTПX'7 @#ԑĬ55ׯzƜrguLvE5'DИq~݁N p 9 @4P3 YVKz8zᚙ4w1Mf҄3H]UPXT4}ĆM$#u7=EeU69 9"#0O?;ʫ`ϡG_z[]'dލY*`2 *վmwE5InnM^93dfFH $DyۍXz4Iyk%ZmitutР܍6"5$TtoqD Aޫ+Jy4&sP:ދM͙!33:& N=i( Mе D`X,5^Ih'U5"4ςCGҟ AOwi`!g@5sdcV+LhRSJ"~ؼGx3+H.r[̙:ѕ@dĜ;B')fpO(M+JrȽ7YQ~|1vI#i$MX8D?ر;/OG^q]6^ik/;?oK֦&4jp9t 4tB1Š-OugJ_glykg{`H,` 6uYқ g |&WUg1qa_޹,A3I߁(5MҴVj-~8.ml)!2'CRg _ןn1uv'kv<LG-WR0494jpD(9,+ D\kF|nU$﩮ד5嵮c#v _HxBCf4ѻ 1o×"4%;rs2[yo?c$/ #Ʊ?Wj˾"LfD<㝇~$k|,)Ẻt Ν1uTamIho}0D,*>$|~%wCs"5Dn7'Cdp`Fkǜ$O] kp.b' )~ءPVȁF}S'94 GNхv`G12g߅ ܵTBsrh5|1wmn;64YrƜ@ъdYDA3߀Ś?TWfcIrK:{sY21jíӑ ;9m/ 4 >n;pDdDÐU6X֕9Yu k:cNLkgߛf̳;lg_W41[ݵO'23u5 w^\qk!Xvk@@S'^Zr!rг+7z~I =DbhO]#D(4b.7V nŌs%8ϕ%NU)}]n+[DdFfLF{Ohgv6nAA3X :܁k+,;C)[vJIh0qUp k#ZK~9-/ťLpʸۚ!3flG ' 4G͏C=j@IDATT?1i.oH _E52w;YWqfC@9K/ ч%ʃMhhw[؎+ [*AmFo\n tU${*Λxn}|w ¹2 /z1{-ށtXNy4:h"2N$54M@"#Ɍԩ7'̚#DQ c["sKA>ÒHb#`"_g:cVZGŲ# y|_n?^qz6w`ssq(, W}Gt󏮅GdC$0 )n"cwm{߂sO~j7,Yw/ܿ͸^偍8O%PBCJmpڄ;pwУZS-Gh yׁt-1m"W gt s6 2=BjN|kA~,Ѩ`=䌈MT"cB$ esG|.,)S3tˮ*(W2慜ߎ w'*N`yD`hZId/qЄh C{y<c^3 -F6 \YĀ I7'ۑPݥWIFm{}F`JvWcrx K7 i*,K4L~{ﶟwL|"6yrow`)b/\3u^m;j:O9 ,oZ'fC-]_W7f$ZQ?8,+ U@~S]jHMq)E`˕&ҠS2:2};G\& ;.`#cA u;ԍSɽNqƼoOɌ-d:X}t#@L93+|~o3@ @ZBᤇFNARsflh<pIh Ɍ9yL`dѩqB^);#~Gf4B32y= 暬#P)0FA{nnFtRFGj@V'Zӌ0)"mwty gNͼ_n84}tc(E{]?ƿ|0j) "qAztjTT^v7bε 6,P&VQB^e &OW֕I Rj=/\*DoHxBCGY#X=@DjGn,#0@!VЭFA&Hza51(N+􃊊a-S۠%Y(ؔ8!#Krid]4". ʯ'yd ݸpݙ B. g#0#HNw]V! 4`&ř ŌJ2N6_)dE!e3H4)S4?P"!]KuvRO-fs8G!{ENopq{T:I2"BÅX,f`!c8p􁤄SydFڢڙ$yIni"P]I iF@~aeW.znǽ-ӌιndy4;$-DMhPͶLhbv损ǀUMT0=M~~wuVw(`J<0zwEOtq*t0h\zD"z'4r<LEPX%%Nd1K~Hn9;'9vHFWf6xbeyIlwp8`F`CFVwδïWXVbM3p"qݴ4Ai *C6 jUhոe:<ܮx4o3-QI#"8܈Ԡ$/8R/剆qu3=NAbbԓąw W=|ѡwa?;pV~"(6*ÉF9 @VB2Æ'fnfZgZ&e23#0@"#3xϮ2MЄ|Z $gc+4wB^nup(9~IZ'Y1}-j,3LIBiǨD~";I.-Vy;0#0! ;n}/ؗ=.yY^Q9(%Z%${Dhhe VM̭1#Ix5881[qi.FHIE̙3ifəO;3Rc{a4S 7 Y$3POwDUVT!EjI;+7xTAm5bdBd\7jwżw6ahbhÐ5ܚbp`F`NG\Ifµvɗ y{nv:XiSODu^̠;gbCBcCgN$4. 59a2#e# $625$_8C0vtzWia+`PS)R՝ps7\"8]h#$3zy=8`O v:T"pMUZRQ#S7^8}ja&5*]lu1%h6|`F`: 0F8"34qPgݲzsξ/(7VLd^t\Քl2 2#8aq$4L 8( $ Ґ"2t&A@en b"/[ 2 pƄ!)6yL,Gg%uG=]TVrrL渢[r{x>,iτ"Sl X 5:_o3#0@HSGFKm?RSSCX"ΣD{^2u =uР܍ZH [# ݷ %_TXwX``kbP3s/ʥkx ^lh fޜ~'(={@Cfi[ =Q Mfm(n.4`Fh$:prSM wnXqs/{oƏHWN:5ԁ%2AX2"m 9 #2=teˍ)sAgt_*=A{|==O1dt˒d}о3֙!͌x\mVuux{25瞞z}h} O5P!G۞ MkuƻpV#0#H u`7[LzVgm\+osn bS5xʨy{m -;-G8,Ϸ-w Yܖ,TD(IQ-WwkY/-3\N^E3SY%Kq1̻7ziGh>{[qV( =H|ګ~ R5F`F J 7@saF]^GW|+GdL8xQ昸q8CvDEZSl^tZӼ.nZkursĬICq==9 #373v/ƏУ ?С*X#5J@,`F`!௥ƔȌԺHC8dۗc?n12rh gGνX8`Û(7a!sM?ä!B"4UDrJ,Y}5z;NⰿL<,Bфƿ>,Ftm衺`ȯ)`F B>$şynbn G&4x?Wݬ:3sr1bsAXqhHP%sАfMڙEINO$4/;^AhHg6VU}@iQKh1&E,#0#PPNMA&B#4$5Lf!!An}ty儊>Q=nDfȌkz!BиMy?.<; *aI"EOh68% .dg #H>pPÂ1#0# ;Թ?u30%abӈ4)]yToaZYGľn2s9sKA>А9$Djo:/ȴx,̚Qo83 E}фF+꠿HD$Ǜ"r&5QqF`I4boCdFε!"dApm'z]xmR-~)UU(FaOj=mtU̼8/0{ {Dфj{A`w]PqHr%fXjg/2p`F`Nu iOpJ33"2 [<<{Eg|Fq/Rϰ$57vHfzxZ"JTфFj6JBvA9fQ3"*Pa$ʥתaTjd%Iv*F`F`: '_VߣE&|QolrE9y O>}on'{ QɁMb,1/*9ZB?ҡN 3Cɛ9Т9ِD5f&66,1!#J2{=1I/`Fⴖ&IW28z ?xsSrG-vObΐ{rTϨv 'l&v'4TdEN 8ўNn7 x e5+,ip]m*|h<6БpTѴ&$&>>p#R2$3EͻĂ2#0QS9mb|Eeqa<ъ 2GLhuW~sHZG#ji$BQ" z[V]Vp*V@@: 4k<23 xqLf$+kg?g0#0~2.{8/s,^2wr3ϗ* usOpܖ1=W7FtXshj Q!.: 0lln 7o\R/䩌dШi>j93!͌09ig@Hs"F`Fhw2G=s}#w5* D4sVlV 9 pxPc7i\Ȍd k͌h^lu#0#0B@̗+d<_F"y*c"JR펉Mm%UhbDfIrp8 jhp? אAZ- I2/#Ddiz53#0#0@7_2<_&FFP)QUhrˀȐsU$ ]! FD0p 444ChhBĆHCC&- 1CdHNhmX3ӹ<0#6xLϯ w]V<77T}G}v5G$@DhyɌ  yr`F`F |pyd={yAw"[Jk (5D&4&@DF!%45D͗3#0#<_-dz;B2C%ChLhB2#0#y2O3GTM쒹ޜ3g+U}fKI*6$3߼o5ǃ1#0# Ηt{tgpx>A2sZwϽ^y<Q)(PQ]U p8Ez#n^ 1fH@43@SSnyh=HQ)Ф!6aVUƗGXEU)"!ZKO<ȶ]=߆8qw7D2FݐXt|,G'-EvWZ5!@~ۛQFɺ=Ao2&6IC g>ąFITGqܰ|SnQQd#tQUk"35v((*Z X c0g8󤱠C2^U1kTZw7Wì]sSExo Fu M`0NL6oz8VSy3`̐`2.]/b;`O~ l= +_wڨj&69tտs }@UU64l{DTAr(F(qî) ]#Oʟ]#JɅ`|)v~ = ? j .{Fٵ( FkN߅փ[4n~?jX1^0ppE4V*:ƒp!|~7jyZ 2qo. CӎS_^|VlCRN n7z^\):xѨ?rF[.[5PxCs!|fZ^֤vZHjO#~T^2W/&qh 璼5ëNxKso-*nڪ6 P2x|?l #N7kOD]&/;ag^ 5aRT9u/~{<$34*L@ZI '‹pQ7aMLBhT@#w1+gzH5NFxc:"5oQTr;q` y-7!%9.;jKm!@O[}|" Ǿ)Q&?."+pSFz"RHRҨU%`V2FP[bcc_o>f!IMuYnuon!@fVY?953!ij.5묰bsS"Qq6L8stWQPp`R#wwGD~CC)a5SJ=+5 63@_/o>b-pZU7QI}@O]̴͌|#7+wv !|,`9"p&4-[733ӱ*%G~߈/$w RNoSj 3ӱ#雏9&ڀHOmTǪԽ8BCڙ2U ]:X`K4.|qX!KMʯx$.⾣a("gt%jzB #}ۏEۨ0ukY6*b>pNLk-E..Cx: 6N'kiV:]H1_t{ GSgГsH eoKN:3=T\ ‘o?b-Ҵ46*Kol"иbMƐיf.xOێ{DwuO\h'g<؏ڦ@¾#e`stZ&mfgNAh؍s@FWXЄPZW+Lx*pD?Nm9_/(tBWK,1p@E@7w dywD2pɵЛڨ!4TLj5:Rn2xj4;/ CibH5ce_ǘhZB| @rcͶ֥ TTgd„ipY݇~̘<&A} MH#3/}/禺/ 1 s369 ĨIG65 xGQ#(-XPFhp :Žl"[mT뺕 {QEyD->7)7j`p:pv(L*ʛWP&4|(Mh|(C},8DxuPWȄ CD]1N[?GZ`jTCOB*TnêJwH,xu%#F.E+V_XVmT'>^BC);z˴];4vEpչdD`0hjHwȁ}l᜛?D$Dj uh&ÚmyP\^ _{Zxqs\ NC:5o0"eD. \Zu)8&#HHtޞ3#!mTֈĺs͹GCDpiwuT$!@k؝. ,\DHPȜD:Դ9MVn*%9N{Yą޾O1 {!4@QX>N}dP>fCu*#) XU4~82vV`~QoD j!ML$8R/ LCFu&&FܡmA*F KĜ8BndFJy**.lX$4Mπwc|i\{f:O[xfi50k(XDWZ}YBMyE+,͇33;<tC̘4җ\LO_ >}| ̺n-:0bHS//M}pkzڊyjz޲lu>Pt#M 0{lɠ65/ xPi:Ecj2j$4 ^`(F}!| Efɠi[.Ѡn7̙6Zlu ^'dh1^1[hMgw(_ކH'>mԉ1Qi]"ւ 5(tnȌKb#'K33`ы`o߰aPPvp zP&nQQi.B8Ɵ4UaV^߁3$lUo\!_*ӱ3@\808MG5˜3ab<rW?t~2|cá>;W^Rh\Ӻz͜2@k4~o.0ț eO~w_GJZG| ;ޅegY'M]-4!f?){s@Dupʣ8ʠdװKw0fAkz/F_سPU{9v.=,>!0nR& ]1jgjC.z>0]i4[A"?m&TNRB}πC/>E-K+oAGBC>(=qlpQQ? j,Γ6gC]l<)؎x5Cn{ uozx? BӲ((no^ՀVW,ڗoj)6؆UC' MTD y a!b )}:Ԗy_`Qc3&@b8]]{>DMM⼋Ȓ&%wSwP]jʲBras ՇDcBg (FG9GB9vVZ j+܀}9UhĎHޜ}j>0х@O?BWBOaM t.g8rTmY&h.{0EK<,_5h "?We9!YiaHRGp]I(fDa2v*Ed?z4HpbFf >&LLCG_E}BA ᤙPOq+Fٱ-"q}Q5 @qޗFuj6*$eyFB "-'ؒ2ROHģ.8Dæ, C1MNjJftSHM.c'ԑ´k1#D v$vT֤ua4?vTAo<6GeC۸4Qt9|CF zW i(f-G(qi;l G-J2yy\64%,qyEO swzM3 M qF rl Gfh K?wj7 K-Ljwn~ભySC}mw#A"s3LCF?<(߱5ߴ5"6(i;p66sMκw+#УCӮhToZffdbwˬ` љ`ۚ%2|4A!Ea-6F]"ֳ! 6iVoo.bvq#Fjl8mcQ+3E\&-Iզ\O.MF!kHaƉhd,yEd\]^xlVAx C܄Sź5u;4GqQ75-1[e3m&5#pl1&Uh湍 HOѫ44߬%֐J1/.9ךzbFp[0n֓bm]#9C-@.i DHspK0lm‹yS62#L<v)Sz`)Y8zV[']0ysk`뒛pNL.50xh8)1FG!FIw^<,mG%p`bK ޺=mq>DZ`MtA7=D8& Oipa0:".y:>_hv}i.6!N#6yr2"Pr{H̾~#ZۨhPǘN5~cj)R_,'#e|;nHiv?ŤKq+Ȍ9X,Ċ.GطK?<}2eVzWG^[z2,#ǐD\ϳ@dt\潐(4|`yB ̴Uiy5"2/r?Wnш^=piZp8]H)|bLFCGK7>pSjw_C3\_ϙ'g ~Iޑ菗oA7^| N'[BEMjN9SGk#0#0#@DihT.hP6cP^U/4$jKLي`1x Jn0jP !"tB Ɵ:v0j5A4\Lx@J;0YMo)@@GaAR;RNq$H)p B߹i5`GIgF 3&B4Q?MdC%}Ox\D~P)CKu1K'A1uaf#Z2ck|;pg叻y>jk\S1j؂ީS8!#q+Zߨ1 CW\V}p9%q?sF';*Nj1E,cd" Mhn^Zt팉»\w.o#iPW[vO+mJ 10W@NQؖ[.t(71>^\o|M8'%=8CPdyӆ5vD&JA%V CR\rCadHX@IDAT+e E~,7}$y s6*b`c?RclapX ͼmr _'0l<mVhQBahZs;hm{]h|Iz[Z8(V .$ !"%Q7n̝iU}X71 ϣ`uvMTNC#雏'=oIdx~n|Aom"G+٦a`6hqbJ5ۋOAD#`DH{1D=r0jh2c/]Nۅ\Sw7vT"4/,1-9?F'# wrTIqJnao>!ofW = p,vUπۨPj+QChh=2Fo3:65@{gmFj =V @t!MrމN/5~I2cc{CCmݶuߪԠ?{EѶgvJ{ "vPDAQ?@?/Jb zD@,$ػ(bAT@z H \vM6\zfNyfofyyG "@~o…3d s]`Dp@#Voj%{7bkZmT%p#[ Boq3my "}ݻ=VlgXT}T%*>xݧX[do ZmHv?״1v?odAx1PT?{ UDžֈoo*>}T.&5Ahq6-3fv|. |}{i=T8w:\ޥ5ڶ) U}w f&kfdlcgMt ~M. }5;P~c`Hhc`L?X`Ai;oj<8MoG6%YEւxwo2%y]tM^ P m0qu߁71K::>:Uv f&kfd@N/[Kf}"Zk6$w5/Xgߤw 3e:G& }ql'ND]٣}\-Z$QlLQNUY L9>ʎ8Izw&"s+?gaLόDHX Y@"5p%| Q]hh]+zQF};LݬTER"1Dfߛ>@hf)m:wۻI$>EWqX)g63kS.䨞A6kNօcطk:Z(kR73Px"@@ PJl0fhc^WMrwx_Ȗ4Po [Υd_8;“+Ik21iiCiclӁcBj?`+Q([Jŭ& }A+8vp*lǒXc=RyjLi4}'{{gFV+$8vEf 4*ܰZhw4ֿ2K@cCfX353*gҙv<&:~r c( z $M0 իAt7Otq5].]}ir _>A^B aCP۠4NBH[!hC>2ˮ֌,'4KQ<4mU3mU Х12:Ѷ%wx!DezQ/EevB( ײ%[wt?aRae4H:9aegeL'xۏ BNOTܚjh>h̠bKfr \}!4TV"*F'4DjdF5N?{њR3#{\ט:KpHØdvHqc&xJ!ȷ|X+#Gƞr(A(ِX ##TM<bBl2Qz(SmU9Q4ڟjkj@SVQz/g>t"4T4 N8ӢZC~;tl;ӟztl m{vlO ԫH AF 53GL}"4,%|D`rρh2Ljmo U3cpn{4: s"0t1\tvY҇ʒ#^l*KҮ_Vglq򂋓@@ ,tL≏НqG@#uŒ7ї$ܺu[e3aAhlSt6S Ɣ7*1$=^ 3~VeU+41$44 :}^mbO {ԠqK"/"-=elŔ 66\QiVbY1S̐F8Fh#$GGxp?Īo*Y}?Qyer \\":IeēD""xhldӇuܒIoQqNk4zRuurQCDLtI@z$B6T[LbchVm:wo2a*aJJOK>z`ߞ;A$$!S+oU⅚B*w:Qxlg? =a I (~l :VߡY gyE*3Jo(8FA~JB2,&$ug`p\%IM2k(/ IcCHc5U{mTA2Z!C&qǯ}T]}TeGOy?9hȾa&WIIM @@ P]`Z>483'6K7 W'ՍpRcXR BS2+#P}S YPu؅=W'w@,XhtgT(W|#36%6ޕɌ@2BaFH*Igd%IoaB. iŮm+*.%r:!£bh~NGZZ}jj j@@ `̕, \Xފ?PctIݪI!q~}i11 jBC0ΓObg`s%:ZFQOk\KDp@! ab:@deN&e]ƣ[)2Jf1 xmj#,U`'оb)?)J&$#ﺣt ^3RS$##_yx&C t0MWiP]v_[9N FUm1SVTpYarb*! M/  aոWHe_BS~DH@m" Mm/<'Qa qJlrS44FK$9Vp+fVjxiʄ4F|' 4u.DN:@;s;n 1{K_3MiV}=ƹtϐ aĵ@ 2eDy1h<_)OB PBShAw\OFݲHx_3.,FDUTҘW@@W%G@2s+kmpI g%!#TZҘ>WD@W5g@Z7y˽Ɔ&Y>Sz,sDC\&B*SJcI~w8֢1'@]G@^C"@";;{ws]}k{F$bamv]1a q]TT*3=6,['>Q644(8 4QO"@ ;N3g9Zu0`kOHh9+n**1Cj ,qF)~xW5gyX7@ taGˆǢts$S. w|򙁄8W'T&|)?-xjldӇs1}|j\6*9L&=XL:HHclՈ jAhjr@@ *Xjٻ^n]ݛt?S~ܳ]Q9(L73+F3 q*+))/HbMzKc5yOʨTVq/psq *@$~k}Y#]vי;@}®X|@EkXKFX>⭒TfӀ vĈo w4"My,I Ꙑ5q/8" @aij8 $2C 3wHd %'/ε h_59M%;W P1H*ynȚM] \Y4$3MSJY|iqnQ#U6}ִIc&}[ƺ;恤1G6:I /rYېuwŵ@@ > ԥ(@@ Ptd6l)j\o|跔lʵ9 g\)n}e2̊o7c-S"r:UTyJDa'iLzFC 0 Ҹƿ*1 ѵV&"j!E*#@ИƭOe6 Ikxr:ES.O,3li~5eÉ{@a**a`x H;$im.\8\v+|= WyM;JcTU{0ѥ1\(KS8_N 0== {@ׯCQ@H01,ߵ(H̤tKn>lDc#c`%c>ck9GU{.x%jQ˚NC 3oE#s\cӰDžRZ~U`&~)I4[ 8=q՜fp<,3#/yьkSAJTƤ8/߆_ {6H$ư!~7 }=! o~sDN ?q\K]ȘG4 @9%-3UW1ΌG|E[b"gK3̂ 4ŀ(xt TnYnv?_2>g/qh+eh07۟g~, >!pI*c/+@bN95da|ccGM3=AoC-kM mɍ2fgוfHgC5kк0%1 F{Eǔ{Ƈ`xv:4f}z>@n>;a=T@KbC/tsuFz2@@ 54UCuE 3du㬇e_je.Gtd.GXbDDEOyy8"@AfYɿr}a@BZz7xҊ [EF8_w0yKH1{;ƥo1窃 3ę1dL1HOۂ>4ăFdȫzTydxI V"3 ӭ=#h# 9㙦D8># Sꐟ$>61 cMkK$.6S*5ح蘂\#"Ȝ3d~nֻmϵX&5 } ’cԹh#iZEA25{ES`bux0Yr-CRIjYf^#L``Io}و 9ϰqf?aJ/0q1gJO/wC'T &Ic8&C'jLTop:|ty{o}x7s.Dy뀧 f?4~{w9qGTĵ@p{3mpym}[npzL2 ą> vA SSMKzSm30e4Җ@MDmz2ͦ,2mt똩Ӯ5&V:*2fɚgp= bxIp@zn㭛􉏈 Tv|GGER4N =;G0|wAt<#WR^;:&F '(@ (M 4E{Y J.FDd,kN>Yٵ'ڶaٽȓ|Hdh_&ɽ'E\.3-S%yx TϦIwHP=KYbC,#q6@S|%&KFP6Lv%Xld6' LI2e*YwH<6\ɺ800q[SI\3_hÚx{ś$k6A)(ڮdft{~̌Bҵ=TW^ǡ5gfLyL⺸jO]ref?][K Y&Dm??ES;- 1ʗq a$yʘ7zWɌƦp.aNsU&63jSdlWd}@n8ǐ)C:G@~xZ3#M"&icfS[( ֮Ef߾dڈkX0&̌rJ'Hrzʦ,'Af1`W @v"ucG|o\<dq 3Ȉ"dwg|/8(K&=H~×Y¹1ʼ]5wyb ε* df`C{&] _=W6QS401qs*cSwٺ6# $2kv>tT/.?\Nzk@up; ͦawmڸg6v}@\R` qok|*)|i1?Ԟ`2Qo.= l<4{駭$-vUdzkܑ&ǦҢGq=t?soE1uq \=?u7&A#,$"Q`͉-=<⤧g,JRb0#wdGSRQU[/&Ar5d02_ jښ,c! Z߰&ϑ6c,1>d~gb ;Mkb.w(^~Y^o0- ;B# ۛ+]-HK@%I0mYW ! /0C#ª zk?mĽa Cj94IZ`Q%J6I2.GNtkD@}@ZEq MC Uܚ{Yߟ"/'1wCt=z~S2*v8`"3_Gkf gXҳu@ȁt @Sc"Gw3LTpG&alin3wކ\ӌ1Y>[6Sz+oSj8?PaiZS!*8*tSMQ>z5.(셈02NvO0ǜ5siiΙҘku&m\& M:s9ٴ3RwݝJ$t#uSyI ]]<H@%LL|]zT=WXfZ[XXIc}dTt;/M ExXmsWxW\t$Z|eZCP-o\B^qoz fd7Cۑbc*bi1#BwKeno]RJRuGԥE}Ҙ3w:wO#v3[m|+THXwǡ h1)r}k#  '4[nk:Uihd=`@wtզoxn~ܿ1ZW{3eyS?;gP)_=/6 3qE]lH?؁qqc0KE8F\3HX{ ;~5!c#c8su Sԯ(&] +dHZSciS3ar-6_}lgȬ1{v`;H]u>Fx8%kTn6=poVÖrE0fS6^"ƇM"$2$/o@i~1Hq#gx+et/k!'لDA&$i`RlcKxs_e& }* %"sW嚤1KgJo \}c@8naDKۙ&v\יfe}+vXX|8Fd,O\xc7Ψk&SGTm$总0la/-5 dyo%#az9C΁)og?sQSX #9ԫLPCkX2_䆎q~rcY~b t1D'k0!:K2׍Pf!M [ XS40=;2ap' :IۓŬQI\. 8Y8߃;/1ۍ`,pYX>3pse2KxA8eZ =Ǡz3}N>.pe^eυ1LJ7,Ke>8;?IN5WêbBˇ^),g EeFXiO9/|djX>#4wa830U!FH*r!kf*&_$ca\b"-8Axb1t΁ oߤ=i4+z&ij #"%@  |Z#dY;9{ [m+TEo|~eVިor.0|yX4zc WM3g@@ ` ҄Vva,Q*%x%KT9 @@ vqf]ӱH){j5jHF<}F1 2H7e,0 ] W LVֹP#Kv8_㩱ee>G #L> }JgŋvN4=aeA$Q$nE, dK&󇎳m-_]fkwK*&ꪕX_I(] ^ªXA `cAv;7!r$+%4n2jWQCGm E{#]?A{F? gb2Jc;AX5l0nðF|$PmR),hԍsU2%@z/1PZ)ᗌސ$gRʔG85 HKoIaP5yWC, G3Rz NC-3_ up6HIq@%45:?8לNWD(=jc{kڙ<9 `&\ ,}M7) Lep! 532%|>!9&ڔ5!)-}3}|, z,wH5STy;&42*-O po\l)^mu:{ߥg D@A]cWqJHq2 5&fvD۶T*iuIe+…?UamLת^gJc˿ kg7뉎z~fu{}L^~4sqAe ۛMĢpV?ޢG͗v,++n@h;1;hi1 ;n|G Rsçg3Ns.5!j#ʮLjo@j*[ $4 *xG +'AhK*` M Эe*p[@aSϗBrpKv3FA0ec"CmjKgXʰ@; R ض`N&2A.IcJ?r>u>cXm{bN<Ae ۛC}gE'JBlۀWYxllsɪWTkq:HUU5;%I<Ia_Ľ[5 ArmIQ: ;j: f6dfSRى3qi%'1ց |Bby4QX()4`D}*"!@!P=\ ި$R1TfS_s m|W ̽yДY"@n!'aGs{rnZQ5Nڎi8ORRyo֩0T#gٞÉCWv)8{TTt7yϊGYwIe2}{*24F!h?4/#Oƹ4loN2;ή;.I}$Xx#PVe@IDAT17Kgމ~![pzǾЙ3-1{jV!sI} $ zBu6PKRR͵4O٦2-?Ȅg ӵ/X^hg͇|a-ekJ/3}mL٘c]/tiQbK>Θiv#̸ Ah Y P3j\k==Ao߬Sh# %Se,IWc7Ik4Hk$H f~^0n701];a~nWYjF&;f:s!ݹ䴶IɩV큸Gf'cE (OMSy9JcTEFr$&UcKkcHh.2g@@ ` CZFh'vQʜ1gE2m`]:9ݘ쌮<$Huγg.,+9X:O'LxopQIkxMkA€M ,H&.ּѡ 8ŝ1Y2uvJm$}u8ˈx :Dϵ,S_|2H }<62f;QG-MJ )FqJG >sS .X=&A _qP (]K֑K-.e sjFv!PRI"+b@ZXhZrYVMB;H}H?q0 | iLץHQ 4CAJ7 k>! S( !*i1Yxr_37=:?zVǗ  ʖڹLXrc&c}'m} !.9DF#0䓙Nݑ>=]ᯅ Y"jMBؕ%3zkUx1 MP9In.BgxTH[Cj3 w@X)jָڣC {0b][RGͪ2>d =26m-$Q(`h q@@ PQܒ!axf6kNs;W<lm ,>G xA햹1SPJP_ kn^sp{杷|񑑧JʕŽ4<UVG<y枝%4iSkcqg⍿eBV4EQ"@@ P=Yi~o1˿ÂfYM#gF'w,T* HO|iՎ}7`.ٚ╷v663Qf<%нa"DdL+ga:>I_h{,VO &Al͉nnw~ٱQ山|?1R;ո(@@ J MmUȘӮS4uԽ;c}Y'gM/z%rWEnT$MV P_F7&ۍ5Z'ƾ\<6.vp$1K@@ PBSGZ3xչM3ۯ]`UF,}x-K8yzy.Kq'ڶU9@@  Ah$GH lzûn-a[fXc q-Պ 4 \ pU#cźM>ԴQEXu?b:Խ,|]#@" @@ PM5K#Wc?UM{hn{t̷iވ5jǷ?9];d.Yn 4nW@@UcxHp ЪiCdlS}{_-kQD@@ Bj7B̫ Ɓ~?4'ȌUoeg?+N]ߌ<]c mo3tO[{jl$e76>" s .@DTLG[7k%YMΧ-RU͍z6".)Ui`b$ ;4@bRF\8Oǖ 簙\"xv;Ts.2u%w!PʏV$(BnS .JZ-@lWywjIDDZ.r%ʽ0~gf] 6j+7>ÃyzIͦ]a96;ad;=˪KNSS3dĩ@v6ɚy1+"@@ P%|e@X>JR0i⨻$3 Sٴ0I*qw+Sԗ! WUF>nmCNAi r;4v<)9iɳh\A=y+OYf̶t\ fO$> -B,̆Z6OqDsSB-Bq!F|@G bjh J}[z9-3Y|LH(bqDbF1j2gĈ?&jF~Y R:K_х$ 5 '0[>D zjwI27¶ zC;$;>d6*=Sae)fkK"LwE@[Q2@,mwW}i ٟwrVlc-:jD-l=G@b7 3dًl˾l5aت{|*t=T&xR9zWgXzwf1dΖ~FL}WjmîUX~R7 `~ طjņO5zu/8 dMB - ,W4[L?056^3sp:xdiDid~_+Q%G|r5.&rFI۸t9l̂ 4ŀ(հI?"s'm'hQu3N&-nӼ1k'\`Kle\Ճ%lZaX2w鋔_  n!1>fgW]gǖ[E^21jҦrNE_o|m {C,{Vڜw=5JHI\$"A*ZypR}ii=6}jXYxba슍, Ab]1 FFz^CQQѯ~U1#tj<~%ODT̃H"lNl@ 5|Qwn]HMgD<&+o+}ױPUHC_7y {oI:ijX'#Ȍ1. >쮁1ac]ӅÈ{#6L7m9CRV3D7/Lw6g숳X~}ΫQSN6)SW,򇎋hHUeHL8@- %ِlknd}s?֙76,ky-[yÔ 2Gv?gQ5?iq;z+a_wzF=xzv`fMd&ɒ؞O&c{ujŽe_7a;`%IZ(e`٬{ة)gseZ'jC榺Zط7{76a:&m, Azu>٥xiftx"ŀָ$~eH%"VsՋ[&g*?a*p NI>e<=`F8*-{EQ;|~TT{+R]0vz i)1S]m30;Җ@*؄8KԦ',;lr/yۖI'r::ec&Wjoaܳ#>:-3yCP )uU;#iA-7qsy\\u$"j*`@9v->fqżI{pz8M_4LAm?pEi9 C/3I0,Q,1Ϭ"cᜌf`طKiEO6޺IiQQQѣ?(lyF%ՁoGĴP5HR7IR{_Hɨն(Db<͹kRk~vϠl+7~uͪ#uҹvXrMOSNco6ƅ}*Sս=t xɟ/ݭ-;|=n {w!,~:W2kUFz|߬$m=4R24scoD'!d,?]|hI!ٲ_ilVX hܯVv<Nװ`Q9$jnk/\Zva!6 s#3Dy*cwxeeW@-MOYZI' @VR6D#*g3 Qe$"U"3L;qu㾒ʼ羯5gfA`}cFϭ% 7=XfPZZ>2 ivCqU41+ Fp&#&Pl7Oghl K&.DЕؑ&6TӔwl2u 6;]CZL} y"{qR;OX^kfON$ 3`,qjjT"4eY}z i#s׎LoOah'hTN47ҳ\ރ$ IhdLbqE|o\<dq 3ȸ5q|F|]ӥL`/Ѭ>͞:;nB'wپIX: _2A{<"A3]i#I|Fyn ip]8O%:WڅyҜ .!A*Hr*}Wtm2$uU9FCgNcTo~;B֭]svvIegZNg[lCM6pFeho vW>_7y\ 3&_[Jt wPn2[ W;xEkX;9 ZtF{;3ʏCӞ 4c,aL L &~xsqgpfo?.!I]U7^Z"w{ɸO<Ϝr_Ht"NӞ.co.1o&L0a1Q)3Y0#Nz:xցqD.5+5kɓNI~FUmp G$|)LDdLikH`e!!.?\* -N W&Gk+ ̰eqC2Il7nnDT/VV[t)TAsB=QV~ 6G?q0sݭ9vfU`4]mlЕ=XXi2@7Wbj\ݫ#MAm `斤;C5;7H7؃Cѥ,qZ֐||_,_8oZH;p%]V,_6ֿ>tI6Pqѵmsvi}DZ_ѩM3vFgV6T\+wQ[4 .R-`(.|OH% ]QiIgybE@,H%@!}o&wل$$ g~,w33;gNÚ0$o N|ԝסq2Wx"QǗ{v490\/S ;T]S7 |'Y}p_Ӱ7TJV4ԓ.6tH'^ZMP n'g9 q>^*^wuT;֓CD꾇yt03t(0?Or#i ]`(>pH ')Z)WE;af!C2R2e=ݮvd3)aT(Jj7M49e6$6Yzv08{QD0VsPG wبF.XSzʅ g U@O@9gOxQURAT/"\kmVM!^\"b_[ĎF@uԯyG#nxؗvh}='% i'#[s gNq.؛U} $Ow#!jg+FI:έ-dȖw^+C]`B~*Z]{bg-&RۣJ'er)& r:5%kL'-S3 j4CU,q7ܗ aQOUYV(ulV.@B}D o`sf>v]j++ylTlᅀs~z0ƧRQ|"]s}v+!yfR9g WS -t[,b^` t?*nll=W?o . ̵Nc~r8WZSA7-ө 'o~ȄE&?oEtv* blR]>Eve^bXk&xh}/aO^.Ϣ=R.JI,5vlÏP٤a[;(B!lGm$k<4R>p c;cgpnY'u~{ O-+d|_<~ѧD$RK}¹rF 81Bٙ[ N/!u}D*#~EТkM֫hu?G},<Z*bßv[M Ds L䒙>zV')+riMMDUDp,!&~W$ld@7ۓ6[o_q ~?sϴ㯜 Z\L&׮zرOl8a$M!x&Gtmk6hN$5`LiP͂\3Ҍ,ˆի/KӘĠMdCI4z2Õاq)jn$)0p@RĎI7fS&C)TNeWb?X_{X"U>Ͼ#G)?fDe˴S3!IyމE+c]{Z /=GV4#P$mpZidƵBܪ 錟C-#yS\Ӱ ;ꔝ̕\/7w:'CDL*ߒJRnP#`c0%HIOq٭!X`<r@@AB?a9tkE[JN6Ā`$C(sNp:ۣ ^Pb/Q $8O.*mj@CAi?Ar14jȾO.غspB/},.,ٞMFL%'E\Ve' ޻!E3^ +;lީB _ Ռ#P@uvċ ݨ#p)γ"dԺ~$%$6S瞩vWE24+DdE6g)AZPזM -"C.yXfhC"P:s7?cmN:1vPx)߷K㈑DR 0;쵙)"A܌O?nO]4_ǎTѵ֟\g帥z֞@U9UZ:SkF\ Fl8ǻ^70w X[\W^YBN'|N"{QPȑȅ\b-R*)E؃Q" 2[Չ.~0CYU#ʫUX(U@v$*Ze E`wzp\'}|lq9JO'F<պxe؃y$)ay}=$CQxi0\yj+`?橸MIn0CcTjYtr|^+R*3mF~h6)>q:/ߖb\ yCHo!=x-]dRLi+,8MV3b|TWZYrO^61; SI*\IQvTM*j,Ccf 8xKeD_ijh0A@>cD\ oUjǞoi&2p|X/ivTN=tOߠX2C%g\s:5Fy a۱]Q}S{]յR4nipjx0) r.0p;bw(Juf)-9XqH_FO+P%kFz9s`$grs_YWW=05q O`C26_Tl,X)14R.`'$ moMO?| ۖ[/Mq\ %G7ĝE%55ާǾrDrglYX9ɏC^I"PIBBA^"eMˁO&_4C#϶}׾4Ɨ>YqɉXu>0cQiU"ŘYh 9}Gz3emSunqx!z[9f[Mq)%jk/a8z(OJKȥ3sʣƎoel,ކivi+ѣd?mq^i/73TgsyVv{)6["M{2az/}"PWupJTuӡJ;㎫ŗVbdStFDP4oT_P/".#.r&+ğ`(-BB".E,nTlڙx6 EWп[{A9(TQ ўt %jNE]vقuJuqP.oP#_M`xҎdEǦwɎcx!Z(^HU|R̍ h^Ww :Lu?8Ig\%]Lüx@E8%?t$Mcq+oa|rс+@XÁfxO807/Ԥ"V!z`f)cN$Ӵ{)ϜDw̙3UOAun2LW3s\, iRӡXWu(m%ײBi!6Bq,PŐn =ȟ#6g3A rHi;( `+Edz bp8uEKWoxuu{[;aEo]x!EsSvdNez^b9U?tW\SnK^ꞡg#2'@8KG~롗f;\锗*E:4x \4eȇHƲL0xqG|5lA`9>* 1.N.b-\O&:.@JЦϠ>#Ӻl7nsZ?#ә!%s{Ӳ,̔By+;!z.+,ri/@(>@x6Ѝu4 jaԿٳkVd`}kR{H-!E2vjO!<qc.2-j )|B;I5t]1y5?#P>;8tߝ]( Pi Iu[:S\ڡM3[])$oi_ZͰPnkF?pxŨ=V#URq18BbMKπY:7tMc/Qv=% L/?\9>?Y5~Gs%\BUJb֜k#BB<) \-Vik_YƯnw1Qr13 QH+1$|cSޡy)F'P SK>SIH%=ljhOҞ|/ 7;#iP!:s`+r  z&!y(ϗ꿐x\0],"ߋp殣\8q9R0DVU@Ͼ\S0/]'/;r$I E پqҮֱhRK!ۛm)m}b3\&37@= 3!w`KC}i)ѱulmhrӳr_[JSlՉy>5hcP:~s[yɶM]:J|XN8Ɔ{/̨SɣbQ#<Z{ F;4ҪGr8uf51}Q#6L?ޡrKQ>#StBVy$nD]GqmDSL= ;,*hY)&-.˰Cv (;P4g$ո>җK:CZ0 1Eۥ bz߷KijrP,d5:i..'Q<)1%%[:7>D4Q;KےbՍ<;o:|94+1S/䝯P91J?h$k4'XmvOD*͸קzE+|v/UOWl*C陶WBx$8ʄj$vSQt#P?Q ҅)&n,"ԡ UO,7jhIwO~ !CA]k561>q(C''`۴s8t#قBM]&`fl=+9\!|)ϒCu fjv !RP_J`2U H_t%L6oRgHr}8{Y9Gm[6O8}̕]nqh{* D/Dcǡ BMTv Ցڊ\x ˡA }r|v$;Y͗A?qF9EfR ttrbм![gG*Tnv5u9nJbY߫';VM7M{BfK;8ùT߼GN'FF6'6Ancˮ& p "Mޟq-34 1x|iآzB)= Nō[,agnb^X84"Wm?@IDATQNDV)u^D&*!mGo0;mv| Nt!qc.?ⷧ=gZ1"Pq #Pe?ry9zռ.H`JJ)moafˋ˗H856)nʂyfiu|3ɟ3RCG7K>v E (ٵad H4q_7h,!HHm҂ks٠^V';'ɰu lڟql=ht9e>5zt11"tEō4slX[`D^F<^SqIN^=M{+,Esr;+`EV 3Cq0ճgxkk֍Zlt#bRر~IN*cNSd_ BnOjzNWyFg 5F )TƉ -/[D\.ff 4ﰵBuMsŹ$hosHd! P^ |4~caGC21ymW {U]qz"|6K.h9@:=Ѩy78y \ъV@iM17T "R! )KámI< Hl]HRd?r+)*e?|Id81@`|xqkFESp9޷\Q WK.ܰC$RSi| 96yG@3+<ӫw*CjVV~wuZRKcF#IO# 0@P Цٛ8Q>sR='rbNH13`703sbܸ#0Kh*M05>gzMw^O@Cg#9VL`F6"F`N+-ZziGnJ;!V%׻xod-I1sNC(xɌ#cI0@ |/qSνiצvh#]*k9󀩌X|uxJh~t#0)B*N<,#7Qޡؿnڨ jf {<*CQҿ98/x;K}j6bS ʄF`ʌ34e2\9TT~M7i< bokY*$2L)^130@"pZ04Nh) _+A&ul4:OؗeNk5ks ۚ`)ϧ2XeX~XnAUEgIk|Ҩع1`F\jO26rRXuD"X_Ȑ7oH}ߪH#׈ Ij^'ͺJ]QHnt-v `@chfwbY+<C0Έ?z,}6ў%BX-!WvxV(ϧNBk]y*ǩɴ0Α9ҭŬkṟz$ꅄKƦkX˼kŝEؐAͻď4o,Sh%L\sٸ~]ٸA]ѤA=Ѱ^t^ {Rl^0?rBMA)2)F`*B-.XÕtB˃~]ؤ_q眉{=/*K>#Xf{`Lïe8{&'xBthMzn>k`W Y4y6|]٭W]Q9}k(N+~\Q,ZMpڒ]n%PDoWCǍwonǍC9٩11OuYҲ: S :;Iķ.Wc-!3גfď-:2ŝ`tAWѠ*w rxҭ#tsҨ@:һwoi_@eTC_G&d`ldE Ӝr)Lb~qm_-|gjtaGk >k"H\ LPSUWAngz`s̉-BT$R$$NQp(G8'Jҭ^ޖۺ E VM-HCVMnPDuS \ D{ `bmkOKl@5v Tvݠ[1Ev˙Ge\._'f~%Byءi'Z#Rjt0,|FV8x8C摌l|aɇ(,S{ N\6L(-hդznقe}e|!C}gפu\F`ʏ@fhLmZ7+K_v=Ԝ[IW\KKgas]?;n<@{-2}m+K=?[ Y΂jl@z(.WDm6ˆu!I\ lӖVf[L80׾/iн(uuTbN1l} >hck irv )4@s%mR`cEyst$HR o܄8M{J13gΌg4`Tx9kEkįϵĖqcnlj-|Mh34ZQq_ŮbQݳ$f4TM{2TAtm-;?=+;r:`7UM7 @ix=*T[J'[ik[&MDƐ ޾Q3ht]]$"놟)?-~]Ur;5Mur;$͟#0 dg? GJO`28k(F #bb/iPS{L'3fbs\݊[\G!YlS_` *^t JcfcTR[$LjG ٍAs,E9Vc4)B ݕ<=LzXn+6@IEK Gjf2_\Ƨ~ZQ9Ck m₎m"}ibXpo͕NEkZF`ڃ@fhDU= H!"CEiy_r2Dg:=uT*@YTԪ6&j #I zRQTyTBp6A%&2Mٹa$CjNUF,u{q a 61_bs*kJ$ pRnxB }(bM~|A_\{ w/)a[MKܷD#ai2q(MrlWU)b* #p("f>[kFgic^5t_ljznUtlBo\thLժi38i MwY9 LZHtːL5lmu0#?m '/(,~j? 9f^HxaCU׷Sa%ώ0@fMCWr/ǯ*Mh_RӼBjgF% P#oF2]i;S_hhz$ I iX@rIHıyk_2n#.Ϣ=R.J8ѵ;b]}C''ne҆"0#p80L7N&ÖxMڤ}mK\bXk&x^WqTz@g$;!8n'q~zCr4֥+|f>u]I"YӰwQ_۝ jl>q9~]8 0/l$sѹsn`oVbxeAiSӪ[Qp@%6n+lTGH 6(ܟ``ݮ%JC!e};"3!"MǖsRD(cz2JH[dYwݺܨZHmMYTe!$0Oҳl< 460Hpy+74Mi8hP\dث3M &^i kI Cl6hf!yg."&<-#P G+ٙړhǾ͑i-<>9iM,5;a50UrFB*MꡇatXqpY8Dt+#0#P^ tԷ=NMS*R\M3lsrN_isedɞN6b1MJ D6#{JKf Cܶqol);+lH+L0%#0{xBZaUU^ŗ~z]ɽ`F85\f2ת6>"iIInW1Y,0FS0zwPwZ,l5> | F`JFƪA}a;$4 Gar^b-.jd 9^o'iff jEv&;3Y=#0A~jfC|C MQ]A K#03ϴt]|u0@AD:BY|cr+#0#LX w}=^*T*aT$8*vƣ'F`C֏SE ߜ8a _F` +!vvU wYUE2#Ly00a4'upzX0#CF34" 9pl9SԛQnN#z,įnŒ% b&`F  `heJfʦFZ1hy¦/QTmhF8 =;oA?Pv؁9@{-RӬ[veKyQ3gюҒ"J:F%XVWg%RR^J)k64|eF*`*R:_ -Y ENmLH*bvr'iOѳspO TI:ε}Ic9Zz}L=˯nխR3M1L{"F`j qmÌ/n;RUϱYOw(_/9ABιc C3 IǨbF;͔r)6ZжOIcœ Gy[rcY\"{O\¿ITKq`j0 `h*Zi5x0\w6M9#? ^0 /F 0@g^u, l2CSk>/`@gh$%θP7kw:ε'N Ez3 E+$GGSG7Dy&bgjZ'BN4gSte/$0#RT QǺ^&6w(< }N㈯& ]Z~JM;@ەDoqitz7nEO.N96vtMm庶mD@A`İv9Fn$c-ќr DA_F`A%4(B\BJ|YfW8ȳrѐ%:CF"KKJf~Zz#`h~2Mcrg +$/{ciWF8m1}KhL6_^(#0# @M 1JӠZ\lqtokWr#07_}fW^sP[~Eei`F6a`FtEL{p_*JC#s[hxgS9'F`F ($A|F`KX:#փ+mf5ƅn%#0#0C'0@#P3RDx;?6rʤ`F GaFznb/u˧~LOZΊGcF`ʇ34Ë[3#Pk0Ģ3t*@: _F`fhFz }9(r7 SqbF`fhFZK@ eIƒrVO3#Tfh* gF" _̔_R,?CrVC/O`fhN'dFxtUSYrvzB54;4̟;0`rg !Dj13ʽBW&U8}OXD_ Uwh5*;F"S}<toer5Ζ&eƧ6\k#P}isd2oV2F A;SQs u|aX4a XsMJ(lC4fΜsRx vT%yc>K|oT%K I<h'B(r ;Cop>z;ΕeYv= j3g>,KjzdO[.]79گˢ.q[wֳѐ¸dt蠝>JF`j5Y )~+8OtnLks_U*`(~~„:ŵ3ڴ..[ނMxs_Aqm]ӰIN@::#{׮'F>@3A3,C6#al_qtfishΠ6GJanYTԽ\"Tѡ3BY`2˰toeDRK#ܮг|<\ Fu' Lsc5L0kYcmFr&ua}GC.0Y_~O0`+wǥ9?5m]0u7,7j :]^p+WYi⧴ѝ>?Y-ai8 |\Nf&Y(ƆԌa#cs)`7]wg#gtxC=_F``DKhredlnoelJkPI+?^7-(J71>'037sHN\' %_Im!%CjFehmBmTiSQq Y7_vʋ^] wFkGQ0 p-H@E$D㸵%(k/UΤQ_;}1E,w_giƹ]78^!b&eC[zl LbHZx $N귘zÝi ¦{ԢzLbW@+Hd*T\nbhT|>n.~u}CҷM=×]Ӟ7%)Bǫ:8Mk\Wr YpO\?u<4L5|ؾyz7)3Cw Ʉg)^7!smy&jN'̺AӬOS| a6GXxO 5,|KB*04~ra~HH:@p@^?0ue30 Kh3CWF`EF!*Dy4{6Fy>O܀]$1@b մ)+b*Kԁ!$˽Bq]Gc 'W舱CSĝC ^09pJg(y]Ey!wǹd/vAо^ok@!~3x5ײaF`B crY>l6,L?+N4m!=ĺBz&.E'͉WgTqHiq4-z/ԝc*dz\R &  MK-+[PgiSb"6 FN[+ll1MzsS4gB-Z翇2n=2piZ/BO5MYbח(8zܭQmX*LQ.kfH"p0=썍tc}tCr1R"v7 TŲ ud_}IF`F24Xs1 ?U2t/nt\6r]=%:r^ʢv,o|e>z -\E{ ];8e)X+!½w6lIRto;_K+PIu5Ha{Ya!;P.pr{%ڶV `g%-Rwu U6JEPҘnឤ['&I6?BU1b(Or>/|*¶$TiOP^9>*nll=W?op+nq-)kjPPoؙJjڏ1Ձ\ _ 5A]kW0x(<#0@*gyLeB9@<q!E1W@ tFrǎv$ nuݺ7۴~_ -8/T0Lؐ?,U#yA DL~eOO;hKL4 MR{nȐKjN ˭桞߬i|'rP̡1y:o͵f8-8 rZW^ 80>;kjpc |*'Pw|; ǸG5c}O?lQDho+'F h~RD+ CEʎI40#0% =bLT(eYV~Azcȵ/=7=EbԎbϡuԃd^lrNHHim\++-.MI}[U:"3q8{Ú#4 3a=Eщ8nBe,fG'k(cnLt]F>/ůw  vk~wd`s Qza~S=7`FV]> `СyURsV=H$6TL`A`x_P~ۇi&y*Cۡ|߳״vv? 3.ЌФG,>\0#pjoH]04nZ=~GտgVcmh*`eQ #IgiE#, &b](ݓÕ[R]3п--ߎP"],m0#Pu֌e"ظ7L|}24%YW64WFVU02dFJiYbMN޹@~S *,&m5Ozl\0#<dg|rʜyu VɈDە 2ۃVeb#Fqf6)x2 M>3cM{jv533_ #0G*zCjO5yyeKx*+N)0#<Ź_B#̰$4˙!)rFjfdFȏx%x0#Oڗ24"UI*I0m͐~L`S)CS)YF!EXT~ TҪ]BC0f&ME#05w;f>3 ^Lfh:'Hi0 @ps#05#;/LqZQCjghț|Bm KfjW'0@л z,KlY2C菓ca˾[{|}s/)xVOg.L37%U f;[_02#0eDv_>5sVYV?u9V6u5\…Tte֗@Ow$4 &Nqޠ[yTU5qTM_3yu6,g sk߸ކivi+ 7Y9=K܏B!y7%3g_:y9#WF`euoIps@pn244S735BaF=8x Y%46OQ3Cаx򬜒9.[Sڸ3;f`!b ׇ2EU}Μ9SE 7#FUXh'R# 1tω`Kh8NBci:ԗW;suf t0^4.ř(#0@ @zzӻ nUU)G[[~رT>ӽփq8I'W+(G |-\'{G/=U.i$ЍTnK,q FB(p`@p㷡Qc,~u3exN ϷC4}4e#|̫]EVkb(]|~{M.WY 1@5j0WMH@H  Wnq1rWowޞvUIw{i̛ovWeScx]sޡ,E4v_(WWUwcEKIq<7><\˃̒ MUԎ3UESNe'?:YYy@"SCcw;8}&whqh}Q wm>4xjŇsSkF4mPTvD' Yӎ4r}~kرa{}ݝGOtUcgWߧ؟]-L}H/kT-Vn5?4±OCV%ۥTL=jIh fdBɁ @"}ηԸ#4V6;x)?lb@{SƩdssϨa"4yo4$p4.jXBؔ(Vk]hdA f(73ٮ(}'5h M̂'  4qerּHh|a VmnV[v{&jmkX𮧑!Xm7%ɶ8'[A@CSM|wl+ݲ& $9'41< ЈO6ؿ~d/0ѻ)됲B: \sh{Y8'}W_֗s7xso}y99A͏<1S/q =#1540';RmۂT-ydWh2n}S=i"X#Z+/ f'y1fh`jI#6g Zz˓ @܋QԇwpGt38 `R84kzYp}IK@Q'A$( `6ht-L=B_N}9F&+kKLydl83Νf=_c\*Ojqƾ}yC?'| o&q;YTD `"خ4[MSM fX!{'Mj21"`bt("R!MW,5Qdj# 6"-.2A '+pz+]Df6"oEͫr$D1qB6n4Lm"L+'b8CX SsT{#"VX IS&#ߩ)\ffm3%5 ~ 5.xeʤ͹/hCaq6gM^' Kp@IDATsksRGQx<ԇe[ -?fV{?ǭ/ _rN?A@ 8mT 4' *9"9ld+Ul2^upˤ)`= n}L=%TJ<`SlSĹyjܩCq SoyO~w>z;8_n{.sh!tb:~ o^8% B|wm fmn2 (y` Fn5U6;l_=i-NcP;  pГ{A@h|Bf¢B=;֠)9GFӇFQ.\#PL-TSgo}j˹ֹܝ=TqIۗVI:eXvÏC}¶'OzhĀV-gOףe*"cny;5)sHK}s)zIvr=~O|%o@df%]{ujBl{[xeÿ2;K<%@H?X0oA D@%  3%e Qk+ iLM!oV`饏U6d=SƂtR[׵M pєzK.h-`TTE0z[|42(Cjc^ây38eb4JUSv_x n˨ *nqJ%L0@@M IA@Z2g0;a*F#AIFv#ORxq)fєɦٶ8öI+ AؒiLScQE|A]_]bs:]3̢m9N7غ`RϬ{8Ƃ4AElumCu1{&r;A)W;aScw.Ft  I6A@F^BSӇFwZ<1 Ck\!w~]|x ,/>^oi@qͮ)aP?Nt/ޟg\aYFЂ9 (!l.}S a+"݊rÓmS]e6M]l}Q/֓a,N }r hhBNnA `&^B47,JЯǙ@ԽލWa4~٦&pB WbzUGAr) '~hA>]پM-w٫}Ul3a\u2@LZdqhfԽ^6I|rzfLCXIA@W(]:~iݺtzme]]va 570De#??I]efӚ5gfXN=-#*wnG-R?lKA@h"/&gM?R 1GfU e'݈lA@b!41=" !h1ABLBcSUY&A@C [A ILӠJ5ljy  #1A@4k4,:PXTMp:!4 %A@b+f$AA@":)t=U6'[A@A vH 4>$4`2F69좡i0   ^A@h4j(*4E1eBhL  BA@< uJ y`IlNgگԉhE޷yC :)F .BA@bE1Yfś_ޟ;R)t#Җ="SQ1gB?-meH~vF82)i8/I94PSoeM&U A  ™irF}hjhh*[,Qn97Z߭ZV2RiEZ_XloydJ^yY?v.[ܹ%nAnbaLd_m;3%rF"*cz=gGrnWMIվ3K A aLBkF54-4)Cccs9_TQzҐޝw=a4\6J+ht5[L7v9{?j<&2FȠ8絿쌬V/#H'lٶ7%~w !A@Q,Ddn@cU8[ҏ>x7%]v'Q"bH' e>_xUU'V5V;.y;9 Z&u{߁=VlqVT$ ,TWdo(ZuBh/;Fv^ ?pӅv$aO{P'><64 fXNyFAw q5+BhnLC>1Fmv%QBkhnh]b44}\g T_]~ruMes>%-.-N+ij*8:aX>yFKT!w"ڒPcjlK}PEnA@E`3YY%ϓs߁FwOSW:g~vhk nwlE;v~k׾@>le>S '<,<#R]#a4NF۞0 mvdBA@@jsըv/juPX'4L^3cL8s:Fc?H&QU2r3D$5䜓ߍhl4޿`,&*K 4-O4 Kc59qg}>spΚ4HmZyv8ZZ٨?g$<w#M-i)YA@h4YS]{2gC6B37y: i^< cski_<#-aCFwnHZ!4!?.r Pi/Fí)gXg&EM,Ѹ/Ou<-g3voAhdC !)0CDÛBhT,@!6RCckhFLf4N6[k2H /ғ4+YUCc>_M,k,#H.{F`Lhq!+  EjAJnib6óܱˡؔ-_#x2SWiYsFUI,˃OR႘ơCvo!;k:ڲ@L!VF|nbBMu`}~}nGd={ut?Dx 6b.TrH4޿bvUqrA@H Qcl \Ѥ)7uBc}EBYx;A_]v:J{3S=Nt&'Ѣ;,Q&T_~ڵhB:B??g͑φIjt9ޘ痜ִtyNij=L=G6א4MzMM+  5xqC=i^j$5VYxi|m{v4kZھNh>ZSI֧ ]>a$دh[h?RAqunC\yF K΄֧]|pzDW}oɚŷ.'Lc3Ge/.d=nͲ/YVmM'kX>Vl܉rȊۉ|*| 4 N{u)夅`݊14Og_}O!sO}jfӻP,T<71 CCc]%G&ЕwYL?Cor5x]4~dwb?+6kϥl=EY|q j;sշ[g~6̣K+a}Z*=^l ZaVTR ڀtΣdYB+nvs¨t5KzhߞQI*A@(3 Mz; B:ML$V Cc@a .:=~?9 *W.KlNbV|E[\;c0#ͤ$.;L?]PRX/,)2 m{imt ?r<zO֛`D\JE%:pYwu=W|Sn9N yG}]^~'tㅧZ(&YWl[GNZF8w"?Xq 4NB@{*;>ȨcoϧO* 4 t2iP YV' g~y~%kS0#d6 ]Ko'z0]?9^ KFfp/m{hClnSWz9TVV^IAs-O@mԠIJs&qRֺuf* U6hnnr|h 4@:<81HB/5iVMڨ(iL/YǚH&vi*`MSo|g.ڼt: w++8VXz9s0ѰӔmdw53-95[vڧ6ğm3{9yy4hc+*H daj_c)SCA|>uٺQ `5| Hvg4\Ψ 6'~8&+]`hl:1:9}۴+Ahdw9ʛE ~ⷫe Ri׳@ߢPflѝ1PZYw lcT}FlӳS{2pW Vffl fwa~2M,.ۣQg@5gR8l~zA4{]NG h_{W U,U5hwYh ]?xxHukFhXc9/wNusc+  T`n$40}^CjL6P.GnDd¨tľ0}у1k]8*'sӇpA?;[ElD˿FïexO?Nя-XmE'Ͻ=]'lÑK:G$z(DQ{wf`ڧ\5?~ t`Rlc1{EtS8(›puL8qtA^n"o)ˌ݀ =_8ՈtDŽ'Ǿ7Lk޻:ePKmYwkH4xQxгoMׯI(Uk0k$ GLop`Wy.^B8e8/Ryn~x\.)m=힒}:wzw#<kng M#54T^=8 7#iϘsu-D @BVlsUe)4Zn/y^(8Un"a g@ du=0~؇.gߣkSؠg|GLT6w>~mٹONu4RSol4 dAe!S1T-4M4UM߇9fIXx`5Tg{;fRC׸d 'o4 7Wݫ-> $um:xFb!O@lVaI@H!) |_Afi(zpO"QjwIAP>}`oņT^^Bn= +~CXHpDtcخGKh:N֛ƏW3uѣ+'ˠ(gmMmi?>|JMAY@[sԱ]+xK(]! &0$ -Wi VS`U\XL[MWM$;ԮFv;uiCFq G7OC>O];Ai- 4Wl[MRz4_W*v לEvD%%9 Tgߝ.:ʇiW V^(#5䜓hwrZv}jr_ Zeh !N+sS7vx_p jE2R]tڻ>;a{~m񚭘AACQĩ>O L䗬N3R=4S4<l؈Ym8\DC.r 1ϦQPq&g-LΘph҂Uu-aAq)"ںeбRJĚd1Uҡ1g=iٺ9Т\wɺq )vjך;޼B (s^$V@SdƉЯ5$A " &dz <>uř#y?P @( 5GE@jFUX}Eh@LD1I#'٩qf-^GДۯfzktڍ]}kW7`- _1qmU.lzu&=A^:BVa?3Gw{W/}y\"QYSc2Xʾ POy~"h&yg6I7ոvF;j\A  4WA (N&dݳ 4jV}6AX36?œH K(#-l kjBM/xV cρc9aHrU[y `L51ak/Z]CȳbNZ}q=BPf9@M~3 e<B iDCiD?w' 4q :xȼG{[ubvfo p1zz(őɟYGh7}`תt!@DSLA: [nSo 9L'Nz_!P|)f/m/@UT ̇Pv5<07{zt:U֫&5lhv zZ9}:o%LĂF '"E+5WվPWsW}u4aj<4=_&@@Q\4RWU:ʙlf/YO7\pam^shz} YU_E|)ÏyA@' ఛjO ~JpYKB[iSکThB2Q:2ojzt9 -)oEt Ytř#zP$J貄$tKA ih2%e~H$YC4cZaf| l;;*1- Fb&PрҕejFZ*y}uW\ q4AMOnks[$  \ޕíA,㣩SGXs!msHsǾGsaTF 3*wyWmMwŬX|$5*ָA#˘꧔aw&pqnz Ƚ:I~AE c02?nENf֦ 1De`bɜzndGM]>{̅sx._M1A$}XBE#@J%BM&t*4.ATCLSuƝyGI{ҘAh[%~kfcEL@~'l#Y1).)f,X3+e)[SYYNj")[?mm게z˅T(xM-o/&{HA 8|p55IPiW4`_mZr5JKqGawxjr+U&~f.sc ؃qH@:,MR y63SQimrfvKySl./v6WF_)GNjT?5VXG0MHI&4jZ]V$H;Ra8deEw՗8J5猡?Kw>w3}I郙d/c9#KΥY//8ykkxz׵gW·_;յ3v fi^~-+ဳ3*=@>rtz!%}UvzviOSK5ݾA@0S]:T iG%M@蜓O, ~ɸc|z~#;Mx=f$9ad25cǝ4#$=q- vA@ }:3t zDֱ>H5YKlK]r}vBNsBh 2tyrO 0 ?4UM2 &՗]OvR򆈀@C0nnƚ1ntAUz>vʊrtceH8 Av4Z',㑂b]nөP8MĴBlU~14 P)dF񡉓f 0gxs PɚQa_+sn&c6&G*@0r*WoJ/JhQ=zz :vN{Otbm:,7yoHPE Pmr ~jxn{C} =;vXC"4<3l`^XSR|!DA?x`S'; |K΢z9 Գ]'8@J/)7Sh=i&3 Qnjl?ˤowz\?dfpJ-L+>OO/YMA@b R@IQ}h4T 0z Kh`p;`G#c^0 +4ztv) BRdLJUzai-Ҳ^3?*cɠ1^ \j 1e ML#8B5 4 J!4 lA@?jN|cBQl09/ħ(@# 1[CdNhmSv߅yÑE/QǴjd&*/rF[7 0܉&lF͢rT<%&jA@V0~ IC&gpM5n۫GGT,`vNr9)+-dҠ^kN8zϞq0ot=SP^?oWk[Ҕ3?}FIƽ\N(7ʉ6F!w -e7@dA T4ӇFB#4~TH ?brD2XCr#vT0xJJ8zM9XSR0DZG`BUIi($vDIv: _9L 9h.LkH=_%|-DNA@Aќ,mգ)Bhܸvz˅TPm,x e"1TɡyeW졏g/K km1+j*!bTv}& hj Mg檁^s#Asŵ$`ݏ~A@ũ!N)d@k#Et*hhʼ4sZp2*ƴSּ֔;L:La[o6]>a9z C sV6y]:^ B8+TZ]ίK QFEr"#s%1Ljkה.6.6EA hT(!m"Tۜ&mUd9zz3f.C,u\"1Z6N eꭝWCܽ.Ef>7DJYUD{Mim>gMU~<+&{W& C%0@ >;;ԑ*󓩣FOq,ɜzqnTx @ FUmBhj=LL8Y"L53ٔ,5/),䀑~T%;#`E}Dc#XqELr"* R GZ֡ CCqP* *C;v}#ЀrBf}ԭc[p(疮Ҿc=Lɞ藞臞ι~;<{CcPl/']VlJ++ufk,%\.E&y4:fܐIi ]w|qEfvGE|EOqn* v>TO.(wVQ;ݿ:?7;6g୉C_iTXd#g5G`uu$'zG9 Jh}iy9ELM/>mӡIt )c; 5JGwC-XĴdNnY˺d7e%+:9rǧwXtoJ'PnC)#_g?ɨI2]cZ%ѹ]Է M?WmDwU# Bs?;KT T ~/x`KtBÚ13}6fy0f=AL:-b&Z U/)K_u_[UeeNhXvnCsAgI?Ig9+F=4Բ҂wm(kbڪ;/}wߡ)6t8_>mOέȋA@IDATha<0Ai?uQ)+9>5 -g֑ɺF`ֆ1-ͨd]c1u1+QL,ѸnNbJu*4ʝP_wK_nV]RWeJ[Ww@d͍0Gcr*ӳaF')zD ^fA@B`&yP!\ ';7'&g %Eo|_Kp7fyťzqrl KuS8if?TI3yts7YIoǪX ɣ1󂄁툄 G ]aܙ1[U< :߶ʥ v0o6m6Ͷ{Uˣn#d|jNCa?!yeNsĝxH_ί;NPMJZA-09ۆgNbB3jv9Z&aaH:Ĉ}gؔͅwˉo<6Aj?_WP$bWWTp8/Z;&c:umTCrPezV/ Z. 3*dw2@FJvu[8Ζ6#ՅqMu# n\ -b# W:ْ=J ]P}U. i#ȺZJvC+2m1qC+hnneu_KW taZ=pl8-R˖C9,cT 0ǭb)][PVEm&nڈbg'2gIˈu=|iشH=ZUpT|4822]T];E%s3nME}THMGO-k`3 Fl%Ħg' D OI5O}lWR9A%fm?{&iά뒜 !4%A@yn1dB~lGU? 2P= l |cdx{Vu7u>*=j rEkyh]0;ƺ,1czuc#bM6;!j>G)ćA[e p(@JN<Ec!Cr 7u@J9qˡZzh:$m(k;=c#X6UPjR)w#4xh;oCifDcu܌*TX"'Ӯе)-~g"(th=K`RNQ۞YtL QL̹MPFj[6pݭl:")#}HԑeI^6 @B *d'aQtbg~uBx Ȥ&Ri>¾Is;A lz(#5M_wH:3%&GV R&4| h1> ðw6`!}խC){5n. A էhW>4~THg6M4Qʞ<+^(M73[4lJzt\2-uM]J!$t8>- ָph&Hek> krS{nsw 4nTlϞ k®H7*&IAJYoaf `X6'Tm2SthK0ᴟrcӪ}i"24cn_~V]Yo-e-jי}쏦Ƅxk'}rRNtQhl_M3}j:2ғ(99I<5tOs]mՊS?>k@XR>vO7fpFMs\ށk@v34~2 ˅:r#0V/]M+ǑCHmՈe9LX~-jظߐLr/rО9:Qpm85Ƚ ͂c6[χ,kbP>4 yF!_D64q~Djz)dx~v vgqkjz}9n.H6 Ld\0rQ;e(Ҁ&kk"%WH;2,7%5wC%3t%FnO))IaSK 2 $R QD@qXX\H 1m(JMy([^E2AR.z,k+(2*ӸZhhlu Zssy ,y=DN0 ck\11p'%; -g$SzѠ3J\79a,/32G P2^)*~-KyGSUv:BW)DF@Qarf0}h$46 rJ?yCQnYʢjc^$zn#McC/ek"6@/=p=//BUW`0]1!p::IMM4|RSsVe}:8xSFU!~'et;dTɗrLTMѺRTҽ=S6h>W{PX;XGC9m *bQ$M0}^m*N3r g@w4ROpP6=lK>s oƆ3C5UsXcQyiɡ . mIF.7HKMJ`,IUUXNʊt̑G?;x .p!8`N qvf&=Ru9Y^#q;BIP{j)zuj~w~=Ĕz< ^6 @B"IP`*., /w[zA6nN}-W63 D'Гk'}Kh NI5ru.e8uj u 55zH [-J+m=\u3-x7bI<pШk-zs ݖB"dj)@ 6za {TKɫŠ.k =ԣ 6yIvJO­fLVfa?vƨkqxqB*DD4X٤*Cå(hH'4^v)!4Ua3/У[J| ?sjp~vzA?l1ܼ+t,qI\rxGS?ͺ_VVs4Դ58prPa.X}z1BL\thhX3ڙЃԖ5-K+`{-'2F_wЍ'ĬuZ?kQ.pw<Ѕ-!A@ 'Ha[੼qTw9,N/ V|%i%F j?Mm3_WVÔ)w| Z$=qFwm;3ig)Xb-?LD21@\ |f^}z{ڢ~ ]yɃz&d9_BzaB~O8Xfd,u u/)d;Y u?z\cAoL"]3uߦJql'vB||q4qBhG.  ВՊ 0 (S- XS͕4 RMZIS{iH˃cSM*PMB J~(Jc7ԘAe˿?gitUBK`>=`@e0@1M(@muq&eRZDu&|O$!Cs?7eYԌQl,lUWҡ ,jjw[[AxStXLi}i%X4M ATBUrA@"i/HM%p Y iO]2@d[y\ C/XUAN쬤;Or]?9 ͓ mRlZ2dEj] AfXݥ~~V#~vX%TM chAf`tfrJ~؀bA?0Ԅ` Z^G4f*s4o?~zm; r Mg۝TP/$"gȚ&3oWvoӿ\gRK;kJA Wzj_)[mЄ*Frbn2GlݝtH7u!`re1RE|#>jPt%"-91&k[dd9;Ό?s>`hw~r[>5%~4:%4JaRcupu%h]9cC}U6Rv^5(mk2aP2Rȡ"$vge()Өڀ,Q)axY9YND?yIdע0kRԤ4s]->Ah ?>1ml^ö1ga:UKONҲ2S6aq(gMע_-_S__cHK*կ9_On4Sǣhh&0U|>oю.㵙g?V "IA 2zSClv2Qrsե߁2C1?:v+pur^]Ny:_t^SIl9Ch)tg|x%yp.!%؜b0jK0QK}s?`h^AútۯoJZFVRJr$D9c`ڞ6Td Oތ<kSL^o^v&06O!>3h>[[I qe 5)Lh,W'4(7a Ax-Hgm3~M78CĀJ Ckweiy8Rj 31̻*!&i>\Y.?ю:+<4($?1yf 'x 4!3CR"ptK4OM:'a6r|tkjn"#(B2}HϚTk)0$ Vk$ǵV7SǡQ֚PY~Z3ӽ$ rt:ņC(G2$3N}hYljffDd42kh 1Dw-K 9uh؀mJ=шvݯ_ფ5&4A2,#0@'%ڞ蕬khhߙ1gָ˺)}v(@HhjN{d*ˊ~\C 6ݧ&X>3䚙=XB kUd%u|3&41  VFc|Z80uϞ2%n.6~9>Hj }]_>1Dj4-F̉h$5Y?w` 548M5)ƭ)eZG, L)hT. $Iβ f8[3*) h9D3HBnuLj 1cEFCxA J27Cs361 "/7fF>=_I!?a{onI9FfMij>Dj4mteF #;*zJYz]5^-  20:Pl";pNnj`0 ̢u2dgQ53Df"#"4QVT ?Oo'YH97s"נf6tU3F잒QńQ  0(nV:wΑD.A@Og*JKn=Z#(V.4Eu]&\o! ,!L f.FHHyP[gA ifT3vY:.&'ſ6Bp k79ZH*SvA$  `"/JhU {:0 D"D&(1 ׬mvIDFF^!Y3HBfd$y1#ofd^FzBB,Aftd0 =Iޕ8/c[ߍ("6.V穞Wo^Z&4u10@! B!$3dukVIqC  PMŢrt: jh4͗Tא֮zr! $RCe ]Df Fh|d<MFJ7"1oy܌2&'o|=;\3#0rH DF;WoC#5T\kUq1Fhd왒F8]*L9{סzu%듀C2#pXDczLΨ> 4@~ @ãEyLLjpKn0@@)Im7ZWdތqv\c&4uߌ#4߿LCN29+ FÂcF`G?Z]l4kjd^3֣A`#0l:7*p/ yme%mE-sEk]N1#'w)&$5j Xp%Io5ۍ0#POX_AK]gfg4 #0;5c{p۫ʼnogΙ4Q>2@Bxz [Bh@9DDi&4#D:p+u4u4kbFF~*q3:Fv\b&4e#ЌNkE%FK6t:@f Mm#=S?D-*E sҮΛC̄92`#4: iVOw`6|?2C73:́ M Fhb39룿j?SHm93#Tu ܁iSi4=v/Uկ`Bƌ%fFJ LC#,Z$dָ[vٙnj#40I>- t&QEDjXzi M̍0#P>4^3|K0 >lr0#p)$Ӽͧ=^=o &4Mt`[#|Ё59Cr0 !l[ː(Ks" )Ȳ=$IuQF(@ԤWjuEӾoe*)7 4`F`.םjq@YKhLД "t>q2ŐWTN6$oBgv$LF=DXC * NmAE@׭ş#\7ؼewR &)qG1#^4] Qlo@@u4T_s%4DH3zs|v3̐ 3-x[Bj赐P5;:#.] G__㤯#EKK@C "D݊Kɑ:mI7l_lkYZYS{/Tt Mc-`?w"+,W6tІA B&g.I> ߭ۮ ǀɑX ]*2zw晴aVR“O5uq6 1ˑ!``땊q JI/8)vv>uDdү"!Je^CiME5&4qXfF`.z9jЎ^}>BT!qjv|sdW ]uaf q4E amе]kiy@#Oh<8^m3em.m@rvBĬ? ꗸ}akƁX\7t/_ŕMO+0v<.4U]onmX^F`A@t batRNWI$=V5'745{-PXbuY}ͩʬ9w|iIV1SΑ.dl J-=ǶEGfː{5vCv Vv`LhQ`&y P3IЉ^B͂hڙBqNCpu\QK )c L$3C. ꇗ4dm=aF`z&X}_ 41PIrJSFΌNz.[k&ubˋ{xʡx=}g9=驲tH8ƉdFIK_TfjWS`ƻؽJ4`A+el_ 򦃯}Djbuݧ8Ѵ?kfhMp#gtOxEȷoWzɌb4G Me+6ڑ{n v= Swrc:MF:'\/^-w 4FyoI?^~VذmޜS"B`]J&úk/x1b1b=[`à``ԕIAd;A2{r6`![Cp}_{߭?kKe I[Ƭo<QY@`|VvLmFC@(/9v&>s$mVlL=;)n.Z~\C ƥ*AGI yLz?ls/; >Mz2P" SP^*UP)H(cϩ Ư 54A=<,#0C`0x54X_Zc}mȾUM:Q)3;v߬q#@6ߣ KsކFL)'k`2v-I _Ы Vn/u/OS5);B6ϯY Ul5A9cq(;`.'2+jp7#l{pO?L\@8RXPUgZ=d @ܬp,#0@dɉe%XKE87<竪UqW*5sÿEӵLцEL )&{s&3{#PZ-Zүΐ{¼wCÏen``uWc~ӣF{*4 p`|oWrt|52qнc7?yO;+TgY!}_TYFxg"N\l=^H7sB`LhytX6F`j(K_UPUd#-檪6Eɐµ!n"$+ӇCtZsx%wMlɩ_V%@7v3N:B`cUX#g bthLy~ZZ^LFTq5othatz9ͷNDyrk/W+#M쏗ýcQS9~[(y5 1ãwPlj1Gg~"WtAR ȺSOz*g0~VUoޠFhxY{&>ph?0iYŻjtie܊2RKs ^3 1۵;"%FzhdzCY<^'ҹ:z߱Ӱ i:B{a2S1T"=h,!_?7khsn`:A`ͧdo\k-- JULo-f*:~p ?ݿ݄&h'uO1-;: X~7{9 Qo*F~` ly2|<{LdBaqF8p^W2WN=ÀѼB@IDAT >n8x!ur>F!0 kaӮLFuͨA5ykȮm?C8T@σVX M00$5TNplч| /޾J~0YνA[ w 7͜ɼDp4vȾ=%lj[`i܍68^!%J$X HZK!CE;-;}} JzL^Q3Yvž?s?BDxK6Hr)<{nP77XJhAsZh}MݴVZC`C=-tɽW^$T/߄FrlB'A/C"&€oL1;+T/Kn8rKKՕ#sUל\;ۖY"F`Q# sQdIW=kNƘyi ) G3-~= drp?COF >5a,(Sl C2Ӣp>997SƹR)[% Z LhYbFS^_RDNTcGR'nۢs^BgEha,b[u]DDKeo,:2 uo |fI+〽3#= A ,t=&Xrw8 njLH>ZnACja14E=W&1D<:~u?M6Dǎ)6xo]='C>{)0{Nr@ R\k+i"Ɍ](C6F+fWv@!(P{ Nӡ[6~DfU}A$4qm[J`@M22?kv 5gnZE.j5. kܡJ(0~3f M0 0@  f\sNœG}me_Ntf<7Q#wU_EqM gǻ ?̭i8Wvon[P,땐9СWV`2衻- DCͅ|1NI}i&+%3$O4ջY'lrS.ڴ p+: HY0sQX8qvǥMɳRftV{]h30BDh-UFAp!  `9/3$i=Dcy)7߇Fh1lje 'q=+پy*KjW ~/Rree&nE/,Z$ke)2PMCC@׃l~q-`cYH(Z.Z JU.1f0 `MIf]oҨC23Z=8= vo3_5C#Saqܕ^#pEbؒq­ܝu5!:"< C0_O67'YS͓io_"; Aɲ "`C]ʵ8IMQ{\[-ZVyR{4蟫w/yIPDz#Oez 5uR-u4'Hqd";tt\ ËkR!9ZN@{tiŵ>FUVd>$/^?O@d(>n>!l1w<;ԇddDX%nJ{xۈvn:PP\gJF /R\OKS` `?@3jN%APqR["3YѐJ_{o@ PgƏN{X2JyF1YZq1,9}f2frFl2bm#B;Я-^SNҚ23#LlZX$+LL.%WdX'沩qfH5d^ Wrڣ7L[?CXtz.>=[:t-SHn$YPDAHzdk$(KV@'(ʵ)v~cd~$I HCk !N;Ԁ^j(TI{AN_9֫8DY3}0M@Bc6URÚY̫A #Df4!;h'bvǽ6nJvhjVqvȕGMf̌43Df"#"DrY]jgK**o^W-[5lNLl]3 ( ki9'HK)9_\'>ص||0 eU;t;;c M 70@B@'ꎠ?tQ4KKh`sҧ&:.l JhtGeNM{X&2MK`ڡ{/DY?ME_OdA/+]ҙz%4#(V895*deB;+jijpܨhph 5%6Dd(2ƠhgBT33̨&g^L`WP8<߬gxٟXswf>Dd^U31>0sߎ"T'?{a_۷=N3@(B#KJ}K[Uu|\`C`ği?DɋM؈s`Ґx([$FJ>V%*Ol0׉>sW6#lT nb@ [^qGr8NmD3 2AAӘ{rlBYHh쥄4" y3k'V }ҷfeL8EwELsˮihA$[Ex}:i%W#tҾ!]~#8<4: BT MP  0@H,F#؈ Ot/]a}r~7s3mn{UyP1[?me6v~3VG_L$}LvXt̶ulW84 4ʩ sھ00~_i5mB,̨N4;C M:mb[]3^ Z"jzdz f&- 9 "Lӣ2h,‘YM(ޣByv o̢chćχr'[@++P42A? my7DŽ6'J#'s 8_P É9!4ћjޢ"ЫK;&Z}isw_`=׾(k U"UۖAODUP͂74$#0B &:ut^BVYur1eI&}U\4 v>n-lmqJݫZܲ`]F+ɉ249|Hп~ٚq MK Y5+uLZ25s:qRU;#|gO}F_YI+⬮IKC'"SDb Ѭ>538hH)5uJc-[s][V4e|rc7 a7h7 Xhvsh?ЋN{н "'9.X- 6lςb'N^yh=oqýBvG[_vAP;'Ð ߱n50w>)@>C_f cI' `j#PUhp}LhEhp9{fFÿqEA]4 ]|rCC8vm7^ 5ǿL?e9;֠!@Zۢ 4=S:%2q@_ 4 I&@xo˄F] I:d>lC?`@CEczÒߍǪ]҄y䲒EV„&`rU#4 @p?Ɋ/FTɢQnᙨS}+)cc>p5%4T5N}/~ƉF{ A&d F2=I jKh-4'ڵz<6tIc nW˖KDJHM f(<@9xY(nyW]嗍ky>7ˢhh\ɔЉ&kS`o4Ķr5QZw 獃:`5}`3XW6_>h?ıq 4(]bBh>F`! uѯZڃWIR<8^(-qc-= ޜJMyyvn'\~˟,Ey!nz I ѡߴv:1կ-!c+0 `iF(ąFba"\Op Z.ct~Yݓ{ZҨ4YE 7f'ʫ-i=E0~%2ac [TcPƈJYㆯ2r?I[l ir Hn R aåc%#awpǏ43Eif%,jV(.{<48#6V_v+I+KP[Lh\`D`ZEo G//Ei1WӍ+_Q1ZVyNb$}M(Zߥn魣'/W+!0)aK=yh؂w,0McB4F4V*)Шirn-Bk1{scY tN@sΟa_M\K)oX%?u\RB.;׼̄}=\}\n@VɌЦlφ"ZDРR%PPu%Iڄjc_5)2Nuл@;;T A%5 0#pgk9ƙ$`Щ$̔O#z52TPtk%12TM` ye/~iӪHֱmtͩО#<hvF&<~^%+5eߕo%Iu+If1)Vl0-BCr)y m[Ardee%ҘҾuFmEɆ_.loD봉h"}vŸQXZ >>?;ѩڍ' gC4_>J:{XwB·`}ȋOnPHz;tU]xu3ŸL#7pPo:'oizүGcMCcM崱ڰ͈z>j3NuهMy&4u 60@sE]/9lqO `{ƄDU#RO=c{HiY)~3=U|և8 д$U)' (*=iV=iȄ_H@oi&tXB=Ν'XBZ,p@ćC5p]G ={eiZrmp/&OЋe mĕ:C{u:E"$Oh9 NyU$|aXH|$vZFxxَς!/j0H.섳9=_M]50%6V\uCN&Wc hե)<wU(kh 'W0@ ?ya.e pJwZFt Ÿڄfƻނ&d(NiC L|7j`Ze))G1j4&d>MdF{Wvڎ=x{GaC5 [/&- Ô&Cd0,^2{Xza$FClh|i?8ֻ\%͘(H샮X@RԃФ2~dS1mqP\dRē6ͤ}f p K,ܢ# Nvkt\8F2f:qZZʹ&eK Y i 6VO@g+r^qZd2 :{)QJ.0#PHfصg Vԃ H_8_tNÙLHcwihF!i ms!gatu* [Ӫ@sJD&Qy"424ugQW6皎S]ɦ9JKx4#ĹQNg2$-ހS+IqYƓ;]gD!]5ϘpݓLW!qѱ7|ӫI%-MM 6Ѧ of4&2Cr[]abTYFhhFF"5Dn蜮!2dṉW5u+ҽ8}X%K] |U?/F|; C󅳂,*g mWpT @'mع" ~SD(8]E^w 7,FS!poCCJ93=_wGm#g@>ïq K CASBn?Qs& ihr2uƴ# ĶTWr++rk׸3 `9F%"߀wOMZ(\GsBN0P[`D'#ܼ}|l53M)443d44Db.-DZZLiWfWyص 3Ё':Q?J|D}O3o/l{  w`jFt+`kBض{ V[&_жe*Yهk`:"QŪ;0mT53=:< }w<$"z3$"#t)SMfr!58XgCĴUMǩJ+rZ"Xb[r0# Y* ?0l'{gwd?SR).kp\9utۓ)c[BF׽s-ԴknM+CDF#3ڔH K#`5|i^_F?s 'үϚ^1[R^t)5瞿 g"aj9vBalu d.pܱ4Xvk#Crk}V^V!7thPLꗨtQVPZ#YM.0#PdB6]QF/{(N;齼{rbFeenxڱkBQk̈́y^wJE>Bq!Uu0Itfw ӚI_` JqCWnhjjeF$Ax*qMԤ2''Kfd-\IJT.ʍZc"`h:NϜ2{bbi53);²S 1FݸSd/-` 0skڛ}te5d[gs6øޯM ƴG^zA/ {tļ&XF`F@'ͨ(> EBB"4ՑlHcW. U57;j .U7O;V*ޙ%phZ2+߄"UheBuS~P,i:TI`ýTxM\ޘ%f M0@"0t#M +_K*ǹDub49{Ok#쎺hpM.8{My(eGS3GNO2߅1oYUkLk$4ZYa(Wn;Fh[$) ܷcЩ$ުmE|a0)յ5q%\MZJ DZ4%9'ZJu D\#0@-K^s34AޱY-ǘ3#l%#ѓF\ Y!4kyoNF۩ tFjzF`E@b /ƴP #3 ^NmѲ ld"IUkEF{n''2nCR֝7$Yz:7F`EAAWzrI ."4F`FZ'΢qV*[zjUP~8DX<4F`|t&ߴt0Lhi4XF`tu߬2^!͕ZTn1.6S#0@{ڡCԮ @qb^A M}?#0Z M n\1;ƪG|[|29jL\b}m0@B@t)*/г- `4ewT֘e &膄bFaas7y֭(`ܸ$%vaM;Rois4VtXůܱG &l]yUV-|jU{o;!`1$W`.:At]- LR"]E47#xG#0"=jp;SamЮa {^KQ2p{@n ;-6$wSum?+~V.U}]o¿-}奊5]Of?,XGbFЁK/mz̲ox|A}buuKdv.o~s۫~"h '6VC}pY`1GA`@!Nݎ|^TM J? YGbХ9ha4bQP|Zkp6ep: H bHv";$w{!h8xb9 Fwzw vd#/̃nF(߮ ޟ#{AnAصKHG7È/A Kw^ kR+_ب$8_pVn~^<6>彴-S沩жe_>{&Jt(t6&40khSr3#PzڑnSը!1렙*l }ezGkcF P#g@aI'!5C~, יap&#a䀿vSؚN'3Ifj +!}mz­mYM`C*V'mА񪹰3 fC9ZSt!й50YHC!Y 9* bYsx)|6{t`6Fu H< GI1+ cg!ol5⸘Uy2'(ͫ· ~ZDwGug^&460@@L7;#tzy4opR55Wk=FCځoq$+F(*9%'3N){ϑvMvF @ιp2' :d:AQL`СWv|UQ[v+j̇7= {iB\gIvBA1yԶ *aGxZ"Rgmg'>6Ğt!<[3?*/m ˭@XO$DߣL+pM$kv/;r }"Jv'BڨZMCQ<2uޱ2s=D0cAKc&g8*,#0 .`^p]}oIԡcF2ym;X",Zvp:D& IBv,$.%I.5xs{՝D3˼$$) Vԟ 2;( |ٞDZ+F\kUoO7?t7\S3(q &g::Iup|eCC!- kfrEGKhE'v'6lo*\F+ߺ/Pw dDLnS22PTW3H61+e+ʧ:+vjIA{d]:FL2 aaFhXi!]Wh1rʍPMXiy9dx&b)2N\ V,uq߂}עgMиZ/*z6sgpp '{Fʙp J]F-|gwoM&QJ(OTHQQQE+wH( =[ZHuwsf%$!3f̙枳guިg~^s,y+r>ei[o-keSr=0qQ7 cm} ,`g3@t1QLGu ԃ48d o^g"y^_6$CHeQc,3,I$I>UN"@ZL~&C׌aW~ŜC5M/#F ߗN%:eXȊcÍp~{fbaaϋz}p$ ݑ}F!1RPƴrq*eZ۳%P Q!խ?v-El%۸e\}qPKU`f<0 ~\J~C8O?K> QXxe>x3pK?f6Eya[mջm#ou|TӦ X˶~fCc= D P Jdnmo$F\SRPJ2n(k^Yc'NgJSИI3bs~ٸ-<^6rd2)c=YʗK{0r0fL[%MczUM۩6C-F["2(bd{ |D:$0xc Y],a- Yu#Nr=vx<ΕCIi=]x^E̛b+4Fn(DDHX""܆Cc G6 g#sι穩(ߚùK , 䮆Eʚ׃ [p]^[t9NXԣ)VLm:64^m+u=!x<# d)IH"@C@ꍺς҆Az ].H{TWQD }Ϸڥ"&Tąn^hcࡁ<"#"*Y@IDATNr` +SNaL[{a"N;MLu@pxВT`-22alQQ,"2BFyQnPNWG.htDG1og=yi2Ƃ| DM4 J  mtE':צօMh͡n ֢O׺ T?  ];64k͙EV[^vAp zh`̨0f  'kѴª)c[\8Z8u H#g] Z11Q,h4j@Nnw3w8j?yffffc͓+Fyl֤ S|NyǨهc\n3£v$"*! ʐA#HК"@$0c)SeF0 ]&Zܿ[vMdo.t0T *.yfz. ȨK43ғ[ه.i#b&Ei)7X/_h`73L}rVW5<rA1mI? {("g JAY[$ ?l\^KZ+o~$>F !h"@# Tv7CE3ß2H_=<8#n]2c>jҜ9R ]K7f`a]2w-]X&# 3tn#օϬVtGF=86jbؠ #czce30gFT❩~0@?U՞8tkՑƬW۠_Cg -G|dŻpf2p1sGҧ<pШ\_ MJ)oPt4 DBC?sEmAIis:N~›||O7 2( Y/zV){$~FE+8gؾeIԠ* q#B:qgQu ܠȻ(Z ,4 4bX wD3o{f<.&mHY݆Ad{?ŚxuNAc&m^pJ?>iu$ I W6MF?Ƞ V" ~@@7aL 珁 @F)9YKwm!b`ig-:6rwCE=|Qs‚ *g%i"۰ȣ'N_,tL *8E7.n@  :_ nA/eĵ >]+~? Uَ 5\&a5`_cfzf٢ofe>|yet)pq &2hHf"@@ o1L<փŽ @u$^l# N9r]RSԥ\Tv@C:G;9Ndge%^1׾Xӭ}s7Խ}jS ,b70`3S^(ʸ )cM=>Wt2_@ؐ1b)1jRڮWuCD{`hff4+oӚU] (y\<=4^.zkWt6 ±f@&2hHh"@@C8O?n a] qAJk&5s[.> <7q"kp*gFr0vnX_ PAQ3!ar{d/eqo}q6@Etʤ#Iß eFk^5)tޯI6.8i&04U;U SOÒ >;~>hנٴO§;CM`IM3w=L34 6Er/c X4]ۙձnl^4bИFnXN֤%nݪcΖf%ƠУC˴Iӥ 4XO\i&С!9ymvq"].a?kǖA1p3ܩ̭M4:Ne$4~6AMG"@@X롔i0nA \ F3v#榹zP_p'-b̙h+Dxzi1i蠑 =y[ah^X{`'r4`%LQ!;7rZ a3#0.zfИ9w{ 2kѳq]M~7AmG"@ I2-(|8x[Lgǡ#?GO]JPAјP)4h0`vIC}HtK#c`./r=hfؚOޔ?8yLE|C@TgAC @; ¤+} kF'$_a`w2aQ q1nF )݌HU{W wX. @n="P̐\#Ѡ1_΍u(ҙ%e4 bq9]?n2GʁSg. J* !8gӘcmyƅWϨyR]!;+Cz򳷵8ĥ9|vłϲQ21ϕmqٴa Ewя2hHx"@@@)S@Ѩ~ɰ}]ItdnV~xa48 @[d!mt!Rh@%O5@ԁЈ 14 $0%Lc??|'ڧ$aaFK8cK_a2hDҚ"@NhL\РEӌ`wg;z/8+mD읠Ü^)>>tK*'!@+#<3bz5h%3]>f78nR łϰXır鿔on vxmMܛ4`$. D {ܱ9e*D-! g s2E<\Sԍ#v'9I"P^؇9 _]kt{[^/G=]7F lT D }¾Cf'\MUgb9N*4 ~O<0SSliɠgd Iwer5 x6eGMWAN9r=}Btlk2hE>D:"`4YH^q]kWS|]φZUuIڈK~pxu:){/HFkS}R  4UU>zĤ%;wudT]G lKSE`,b_}dt$wfhNQ"#A|I2OVl۷9]VK%az,Jo+ʝ"RZ&}sW$EUg} GyI&"-eG/-u7o~B tK!i`ܶibA{T D d$ܘᐘ<^TXcEľ?;:W85E "D/}(/ e+y^vT `n] D }B_|ӋLќΙu FO0pR@!@e/g;5R0b C^Z'4峡3D"PMؙ\S%.y yt?[@2r监Ͱfwcfmt9 DM44U"P<Ȁ02rj#tw~LI€_6mL1 FXUS܍̄& :(wxjfAFB"@d|CI"O:aCg%C \Ĭ+֍liDle <=2g 36?`H;OX$CeMM4Փ"P"bd#b x4 E,2Hp;Iߖ`MF͔zdДG"@W(F`bfЗ}K^؇Ĕ p Fa56/Smtwc[p[>.O 7]6&K )"@(qEGٙ(bx ˾I9XS݆sU9mDG7:$ ]4^&tھϬ# Bsޢ֧ L~l}b8dМ̓ DMT?Ka_ &,/w4M¬{36p6<ô}^-2#U$@e,/O\;%xf*-֥ xj,>45FH"@@eeh.ź߻Ww(3L@Bˎcݨ@m2I?-ӠuBq=ܓ!&&DMm'EqrtÕbߟdǴ^υ8V׮ެ8Fk"PhLmP2:O<RwP PDZ$0t.)3MrZEux;Ӟ N*: FY+VӍD r]+!F4_ 7= _)]2%Ƞ"@jCX((eMT^th` O,@k~IhxU>'L*< ֬glS<)dI?xM2hBD'0`RaSۡdA+ϼ"&I%9y]׽m/2^YtqƴW|Zb]8=Sɠ)^ʜ"@"0lWq8^ b?=!Iʅ`|,zt*[>,n8Fk"P4^&ގ322}%&΄f#“6M/ ZNr2h|ǖr&D  mgǕx t=36,*N%?RwA=nq5bN w6,nA$P2T^MW1cCK"Lc\ZXdFM4$U"hv 'Qv\7N4F03/KlH&q anˀ8oh(T#7ӥ},LΙU/W/jqnH*)Rߴagfadn ġZW&(D"@NWFpM'p/%=='iaOX_4(xJG"?pͻm{&%'J4M$΢ߦMc%^B61~oX4\XEjw<6lZ?Dv ZzT!Z`bEypF ~oZr.^Kyi ={ʳq]tmus DԐ@4yxfJX`|۰5̶n_7}F[!jǸ$M sqFN G=:^<(-3۸IͬW\jRՄܢn6? vj{}T$"@CPM*xf?S>c* ޮu:>MeGUMhhcf3Sivg].;L.q'%XL<=f$^,EWRp yfJ5r6_)YcWf翅 3+ D >:hb< '@_'sI %hw"Ac| \ౙց^ȏd2>(yh4CbWz^,_ʋ(y<r .gyjRdT]M D<4<^Cq)H`LM&?$a kMɾɆDN`Nm'hӰ\{B&q9VgUAbht}AKo^OLʉbh:D}߫ҐAD0U"lǤ;c[AJ}NS ?.qA@YBV %K ؆GE] 癡Tsy6nּ?pн?u?۽X. ʉ<3jN9"O| 7jWȠ) &D 0}6E F(s4}hrH])( J$wΠo<r ^;"rB=Da*YѤiJwxzHI#qF*nwL .'Gɫ?W癙<؞26 Dr Wt-W ":]~ܛDE;Xv;w+KU/^lLԹm5kOil t#T#zcǛA>˜)퍼klr K,'. cdE3R% E`EG#al G^Լ*qœ Gy9_&.ȹ :U0n-oicӡ[Ûw8뚺VJܥ'8IFMї˺gU*܎~!-5q[?k2j4S"cl_ay59G>z}OŏXD5q̨^)ߝ#>e&K#XwYԥߋ1[a`\yB^ A|4Jy"@@dm$~P_QCkM5 Buyގ_CMYcYF@Hw^fLs#IzScjZmG6>8N ,X|^2X3A F"5Ч3Xn=zxj~H3SlGc\u7_݌n>޼VejԪtS5qġϰQ^̹Gٺ{b|:k"D5r`]5qwՂ"@@$Ӕ]iAcp A)_%M LԞ_dB L F`0dhуa\U%}D\xr6I27J̐a 3e&ٗyom2/gaq?#_ƀ?d[$SkqxV+w:XN7,{-F?l{w0c\3ޙY3hfn50=tã~DU4`ZŎ[ƶ.{EoϺ]_ݸ+ ǞfYuѬ%Oumء-߰Wb!9rE2nUMȆ0X|pƎdc;|.'vC5?f{}pve`oϺK?\D0L~a(E R@h"@IꧩK.\Pb`;eܫu.bwrD },oaUU/MR0d.UA .]<ǙDR+y;Ç@&;_X,ojԣUVVAdt9`[r+gޜAs\sV̘,-:` xuL8P!wOJh0(U@_YݫYAo>u l"=u zJd{7Ngú8m[<dkb޺AT3LW{^5v¢qpV=fXܵZ?gWf)gAN. D"0aY<׾Lh dnE'il1_My!4?q Ǥ8d0E `Xf(R歊1z b?$٢Kt B̲t}Ss:ʺM[Yem)1hw!Yko'YXi"@B'@H#f/tCm[S)"8E !٭g3X-rpKuRE =ǘhʘ)KǤx3x21f^/S1)mqg 3XF%2hBũD"H](1 0lܚgg)Ӥ?R^3w$TD"Ƞ6"  DG0Y8IulZ8]~>mhHrkvXjo9dD~錑sk: )yq(X{)ğ l"@jIcb`0&-33(̶{㢩(NUjN5Y*|YBcwTל)ax9ϑTGCtrxKM "@مIޚբ`tRG4_4i=N.!.@VT*!E߫P94TO"@!&> }֔'asvAW~U&J]ij >KTbQs;/b=+d@N7թv%Ґ# >AML$Ddm$]h a!Hdgnq'JFuJ24ybwN!|UTUA~zK?>Qs.^W%+_]){}ZxՓ đ#O^ys% eA I`ȸ FIVB7tQKjS{Co(UR@TKhD*fC{@~ݿqoF,=om]'[3co>2Xl/wmyımSZ(v,ӫ<6چ3N#!M<'}<ݫSCMUD":Qu]dYz<60lzk\KI&mLʿSrJ+7xf4d[^(UrC~󫢢+(^Q#W1]tiRSRJl}|C , hxSpQUױS;mS@Xmݺ>AuƷje@ D $ܘJ[o5rf~8"h=`R0̈16bIl1@ W-I'A/XNwCyIUR3z-oXb3X/r7--`eu`@+gOI+6ؤyԭHnJڭ„4f9w|܀_k^p ZLE"@@H{#=djN6A>P"r:<{~b~oܲ >0U*Auѷ_4V#SaM C&, Z13 =3lXtFc/rAacCٺ@9z*fC!༭Y%ϾcˇXcXbY#nixa?T33oz.\v/K `hffT-߿eݟAϭC"@@xIŴYMg˺WxoW(4tTgsCȳ̡hР2J 52/4վcZFÜE5n{zhe,Ɗgz1. -;IƜ-V$âk=xv㨛hڸaŃ.uutY}I*ai(9Bm<㤙ߞ?s.1SǫL"&`%D@ 8*okz8Uۗ±KO92UB1fAtcP4O/;tզUN],QKOH% ~/:_T玝a vDȵJg~{#"S'iU+ mE6F.k|6 کr۽ 4uټX =[ةcxdt=s%`f[Y ڿkǶ]7V^kr[*NAD""@@0`*CЀ/0Ac@ɻkcXڌ-|>xĞ_1*3ByJs|X: K F/ / ۨ3x_-ld?: J Oá%Y^d4_ۀA[Ϸ\1Vn5nX6@n5jw?h4AӔT"@G#cU^t 텼k_71&ǜB4Pћ Ox *_mTn1{{y`BV1J-_C4f|0v!M. IDAT\lN[;cnJп4K&/1o"`NGv?Q{4v$9 D r@i൙hJ.6`РAp tjIӹv0n`0}4?' [T1'Qa/h_a- ֡`@5$%@ 4`Рci3ɱ襯&!iMTWLSv/+j팷F*T62 ij( D b Xm&P.2qCNY$In6-Nk=$ PPFt ea<0 .BdFFFڟޢM3gޗ[wWq7YZN |B&>$p ?|yĽp(4TC"@c^ L P7jnbf*͵k2N2p6Aؒ )5*ay*pk=z<1 `6fzzB&bAF5^]W=͆ E+ rBO_\dyyȞkǤg={}t v/hsӨw\7[d&2he^D"pN:܎K c==+MtJnD |xoa\Y2,4޾yKmR 3*m+0fBf }CZ7:Տ],&ld Ѐ 28 5Au10q: D~n3`#altca#yPCM8՗"@@$nU\R^3wU0fcg0+J#%$ >.&o.I$.z(iq}QCeM5 \@G/Y`3x<4(kw5YWY `t.J(,5 9XgA 袸F׺Hԋ韐l B4łXı SdS  D o Ueכ[rs3!`th7 A쁜ܣ.0p6p9fO'6 *p1 Pcf#'pw34"@__i/c!ѦBpfXkѡ{)pl)>x"( p  _7k&o2Dm 0^LvL5XnKؗĊ-mp8$W$e͠ OmdZ &{݃j%<idԔ O ^! qdT2+@&CA5?(^ؒi캄k\ J9f_/5$9rsY4i?(ΛA"1i̥FŰʖ#I7_<,]`]w+.meF ,`s4ZU[TϬ; SdڀmdyKf<`Ra 7d;i$VSPl~"@ U#0)O뗔C'O>U,YYpƵK/9 6,W}{yei/(LbOyP"(55^xyXx*Qk,0@A2v! ͸6[>fÃn=j}u~WjB<45G"@(إ^֊a^830\/-*^tK7UGwpxpW_ux 0dr;\ 0~B`i0!a)S"P̳Aφu() 8\pRThRAyx34zM؆ G8Zh kxpEQI$ ۬)Hş', ;t鱯,mэ@^a.` *{Fu}&T.[A&l5)55%H"@Kegz-SʨR_Չjw6v}xP<|3@.p g#0FJ5Ÿ׸hfȉJ2Q ײ Z=W=VF%"@ 0) g[qJCS%\t1 DބcrŬYӲ(%apFV+rZ.x0ڂ.`tW#{Y7Ua G܂ GK90tnolxz}R,IٲA -.=W(8gIJWϾTU"@@r'aG\v@\_ ƝH\q'"XzP@׻z]pZ* SWQa[*+<=$xTb6Gg x^C<+; 554|ŐASSt? Dxg^M*@͝5hWefijf $K2Q`&)N ۲KŽo%MR 㗰H(";4晗׃A4{ʳKQ2hA D LO3Ë N=3ySV< fZU8C D,+'UQH|?_ֵt"@Zя e?0f{Әƿ۟#D4~>æ%t=[WrBԄcA'D <36j6V73OdxҠm"@;(0)09(B--##FH "@B;aBZg'͌FؒAS:F ~Ig_j`4MG,/% !J |@T?NKCU"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ DQFHIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.svg0000644000175000017500000015562700000000000027644 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-09-20 23:43:13 +0000Canvas 1Layer 1Network NodeCompute NodeLinux Bridge - High-availability with VRRPComponents and ConnectivitySelf-service networkVNI 101InstanceLinux BridgebrqDHCP NamespaceqdhcpMetadataProcessvethvethtapeth0iptablesPorttaptapPorttapPortVXLAN 101Interface 3Overlay network10.0.1.0/24Linux BridgebrqLinux BridgebrqMaster Router NamespaceqrouterPorttapPortVXLAN 101PorttapPortInterface 2PorttapPorttapInterface 3Interface 2VLAN 1InternetProvider networkVLAN 1 (untagged)vethvethVNI 101VNI 101Provider networkAggregateNetwork NodeLinux BridgebrqLinux BridgebrqBackup Router NamespaceqrouterPorttapPortVXLAN 101PorttapPortInterface 2PorttapPorttapInterface 3Interface 2VLAN 1vethvethVNI 101Physical Network Infrastructure ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-ha-vrrp-overview.graffle0000644000175000017500000001422200000000000030405 0ustar00coreycorey00000000000000]mSȲ |= eO6d7M $E-a a nɯcTf:<==*n´%ퟞQc98?wG;}uxGoߜzpWy.[?pFrvv:Lo`;ofgpc[Q#{{, _M~yΨ0}AF!?Gt 4p_~xRp~rՎZiҽ/ipqnHk\>S23n%tdn':Knˠ݄ٝ]_FA޽eȷm)ႴJ)0Rl{0aц2ؽAzܓ$T`ؾ/d_pز;N8dywdw n5{߆_ L~о :iԊä<IܽjQ]9>?o}r4i/8K)AKaJw(Eru IZ.27xmNQ:4rfIWO{?Gqx99(A^,*O70;Cjz|$gDte>H%#lۓZpFrI.K=^:ep P!eᗬW'58D5v}L-עFI'=jcvT+1|1^Dqqhߍ1yiQ49ݻYP!59nr~VmWhϠ/Q[8 55=ڞZP #DFbbl'i;Lf?E >܊+4aEAYf-'ډѨU,CUVZMTQ*8Vp*ІcP&QSj(Q (E, `w Xi0qYJK*|F`tfB -࿄n)PhPXZUrcA_nM+˜6a}na pk h R j-ck }9 $nLG!qm %"9[Þlf9 >s˰e{ߋ}1tv|%j=g{Lp%g{VYu\˾HE7l?qyބۼyI&}(7Q=5 A|Wcx^(]~BwRfǙZĴRp`k+ @Qg4B`|N3>J2*zLR֊8DD력U߾κ9b~̓jn-0XF_`.3`?td]wDCҫ!=wb(X1n(ǗHpaT*)ܓ!8qsDkMp4RV#89'nGk=)j1rq$#8D3w^?eYc'!.AhE='EAyh8m5 ',=!V .WH>+"2pe3F #qqqq/LQ;\à9F!{{LH0 ^C G l+pJ}W q|y)7r+­ )9%/5vn wSs &Ycݍzf6 a/MZ]ԅѼ [AX _FeL>Q<ߕ|%~oJ/py U`%[䕃 'v[čhAIBe1Osh"|Jbk^Y'@O88y:%[=NI)}PA!ͲVt Mق(Q艺`vP21#/c$wnQG=okמ?dQ\psX@3Af.cuFBA,XDGGW-lD3zQhmOb#%RqkhFK)!`V攦A#XT£D2"/ZH%ugqXBBh\{gQBckFtQ!4.5@؟8 pD+"\M.1w(-5are 84BYuXF`E`}jX f*9ԂI[Xu4јGc~XZ9%wubnt^Ap*qc8b8q}֗M!x1 Za;^"=8hhj O>-R,7=*"J%+jJ|skz˟i| u4YWuE{fi_zkׂQ.H򼎞[:cZε=nZ!P&X}Q,Xښ"4"4"4CŔKbWRQ/_CD Jea,5)/K%iӠQ;Ԇ+auz_5S$h\GhDh|Z#Y.4.yNF,_۞MA\2p`EhDh|X콿TuS۶SsFCg+}d\p'8op]fp>RK5 TpBkJo 5[»SF`G0~j,jRt5}/bdzH¤2k9V0FO3vh06bV\V N%LK,{7A|cF[ z$yg+" [f*:::J` [Ciuw"5C@6-¹a17n51|Ss'eJ5>s$%SjPC@cH$0Fe`TƣՈ X3 > )]&8W" bmUHT!QKbϾ%u{_ ÙrBNpEj @[BfO8k8k.)סZuyXΓ`LIҐ(ZUՄSTI&U&7w]z:pϲL8RƗ=E3~Y[8ЯZ?Ю`U]Co}m]HR(b%>9^&jWR-1ڨjB,bָsH| 8rކ(+u<MNKCquT80ȱIծ"R'ql /,͞ ~7M5/:ܢA;h~NfI5q(ebwU."OԀK;Y%s"`<>S.5Ei@N Us?Jq vb5Sg-Tغbe=`c%Ka, ݖj Ȉ2ɕT]o8)blՎK8J7k13K­l=ܓŁb `\4{Go%jW m1mאאאa[N^*Q'ʥr7[s=1._<т*nQoߤezL m }Olkk˙/UFh G%|2nMb{xYWGpi6o϶,8:!}EcS:KG K`nQTy 4t:q Oq'.|5#7%wK:kttn̰jãELXgԅTIAAAC~X_ wGֺ!kWCк m2[uݗoyhEI>it:] mvQQEsU4$.Tw%nf¬Tk(Mn&P/'Ճ䟢Z-U!BSCO`IPpU//xOLSz]FZ Oi0-|i3=TfAk 0eT~emj`@ u`\#_BV4\@۸((4F?}'jU}qclނQ l3'TXn'̗+IJ`-3~NA=wa\ 65Yux/Ө 54 ni(m]z9`&Lov~Ƨ49䴩qiV-/<ǵ0 QfI…>'?jVDӥFJI>4‚%]/| qɴoBO`}<꺛yO^B}"Ǻwɍ?L056ta0`7 `^cUBhgijE8@Qs`=gݳoϠ$>ԒZD$kؼ,Zi2CwՍ@~4>;cԉ·14? ZL~u34- OF0r9(d 3Ąƿ3/Zjxu$ag=WtC|]YX@9]>:YI)IF_EIԜ>/U8VfU.yMzovk'`}5qDy9.0Mڏ \IS^,; ='Ha1R.iF>y2B%;DxT(ԏzrOr@#.Ȳ O$ g4~!]T0df>æ>wfhy?ydYr.H aD"Nl\{!dgxI;ảviUOHaD/DI/?߼N/sr~}yyyc_S_~zO|zwK|^dwy<U(19|Q{bs,u稄GuuiϴJ 6Q6MDUKp8r k1c[P8MWqq,_a;t(2A9>'c >дFÇZgO}8*훠3=4/9R UoE7hzyg F8gn8NEqVSv2wD$:{uatCJ1q=0u[mOs3>9 Y ww!CW+:Fvf޴;סﲅ$*Tz Ď{\ 5 2 1 2@IDATx`gR w ATXԟ`ABEOr_ $ػ*"6TPi"%l%w]r o4μ}fFvL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`UUL c=w=Vכ׳ycS=6ֳGfq{Lh4iFl󉇻eWx-+~x<_9]B)~3~s8%KMϼ0ᄯ/a,BI"i4vW_Zx!U5MM/q-<VX|d7P]4"ULcwߐx\",5V]BUOv 0)%^\QPiF UY5~;tvP5X_a8ۉ cLi˦0cߓ'yf\Hgܾ^>>|k],5䓉E>ΕAvZږԗb;XHbO"v _0bU,uP!jo+?fjoe"MH_k2)&fkhD]XW zbX8y]Y>xnAٰ[X?. j{<'D;hTwW;zs.{:f\S) = rTkZ["!baGߤmo#n)x .oi;:e: UL 0J- b7ULfBٲ[Ӽ⪎3!`eѣu͐F%`Z'M\)Od)lEaҳߗ'i˅ .yf.dÐ(U7:d3{V| F:!L.F3Nc)m޴[Iy>4>4B&/LkF4kr kJbW|<ˑG7s!89O-!Yy˗O3qS1qCW/_FʬYioK_E}|ڙ5k@875# ]}@{g"JO5 ׏ z_IR<=S%P)':l[=SB$>$k/gvBj7+c28|,_% ë%~Mee;$ОCsw;c oBLLC>i|$ Kn$y"-Ʀ"ٞ{v%vҤ|~.(ӧT meA2%{=fDaCk4W{-ܶmOS>c, 1&`Vy t]&Qnf5X9oHB/qqԐcwdt+^bJBi]޾bm{^'nfev?]ڭS铡A;)Ƿ0݋?dΚF"r$L9;.HK{-Bg',;I܇Opz9fOMgؐu0aY 1Se7{Tsظ9t̨i::=z0}@q"rC(o=ouLvg 1Iϧ0%SIgY^wH"ڝi(YTs7 Cy% Ttχ7/Mɼ 5.gbgwCF,GI/Zu(+R5~1)}Y+:9ϢW Uz~9f͡%Yºhڨy0GGD\vi$4OY@YeKQF)QqBG}gw_g4iJ+BRrݟ}x}NuX;}^`F|L/\C?FL10C=>W+ڱfO} 0.һ^U̥O9еh6uT&}#;Ӂ0_~FZZ.a4] >xl3kυydk.6͚%!ZV,X4j8ӓt=AYJ4Y.Bs)| pKlsSř+;齠nH~$ I7ZB+Hz O כk|:RGK7AЃ/ GPE&4OV`9~j@#لd3h4=7ݙ%~p/9B>.Z UXˁ OrD#N(я~iZf,9}$)X\${zgeLZd)9̮ȒDB2#;S63aVp̢wf P x,ghRڦ^C4{2~kz dON+)ǏB/F0QJ4źa\Ρguj] qB}i2uOqk2t2*84x,(7[ tGꞓM%: %vz7 DhO V*Fgȶh[IQxM-(6V(>YnEaAEQZٜ:6,ieG; Ȅ0kK]!vU$s YYOmNC.@d7Ws +9Vy@@ǚЮ_ i+ Gqұ~vtdKp`abgZ| }K=ݞ}\t\y`NJ">'h|OߑI>%a VW='C0w:)< a?I_}E`T0.FéjFQ("?Iob"],߉x毆ݶ!U(Ö;n2C)P}wN G&@:;&P 9TFʄPaz7!7NG0j"Hw,oz9J&†Nԧ}|N9rn%ЯsH! \*,]& R[NK("X9ZQhq(?@z &-_Zw1uԂC$$)_WX?@/c pcÑΥK̇m7~%yηs/MLFg4 +|xXiXϱTD3/[NC>#cw9R[zfhA(`c]L,w\އ2j^@Z4d+gwх7k$+Nu~iMbqC-uF L&MPzTH)r*yla";~C%e|dԨN7"eSإuQW`W A>WCaxxɜa.uӑ>V[[QR$ ;1-[YPÞFU$媙)X +x~mw~zY-\W.l V#o{NM$\yZ>?4/Q<R{]++IGwH猭ڃG`D/OFiL7P\t|Pl^#p,S>#U JQ{Ɠ+8vdW,1y>=0JUY>h Xv``Go33J<,(x^US L VUFH)F' 2aUV7Z!BL(90 k8bO^''Uh`X3 XmҔ`#񃠽/HI` оs(q+.Esj>/8:)xfwbmݗĪ&k)LGhܠieK ]qne|rޯ [yHK#g)|˯¯ϴ_.(BZ@goISP֥*BE }0T\w4gL#kN1׿޴}(]IdUil nbM̌3WA$߭4Rrn 7p @!Ž ?rV)̣83?ƭU^4G5; N׍vanL*^ug&_F@L]i/(DAlB<%]h*t,86xk7}~(!ٌt@nЦm~Ūo ڀad C)Ӂ K\ɮvx Y0wSCXy%Q3<)jT/ftԣP7,-&Ӱx05wV*"qri0Z8"MxJL΢Yn^4!x|~ݵY!b Cux<ŹX?V'Dgtіz4a ; okJfqI` 2K{Xźc).i3RVY4h"^`90~K mOCA߫8N8tڴ/6'ÍvO9v?SQ-Mh6LsvZk/Rݬ_hW>҅!hB!Wm%Sj]ow<eRπF+xT\#p.ƱӖiVP/BڐT}՞4͓3<6/A&XWFuys!{tqM},=ye2RZj+tGY^NP{0<{qA [ !.UO1-a},Zrh*!n9hFm6,y}05fp@p ;~1a [K -:`D, 0Gh A Lt%Q' !eɺ%Ojl3̍r ^=ne;#+ 4ϱG9 yж!$Od GB'ZvAb [U/*HZqy'W; !̶ۢ.8T'X9~& Q)caǀdM~w@>?Ǒ018 zVs;'K D{%1(EY懦"o&Oq gvx8Bk'<`#MtB-16q"~D80l@k${!DV::;ALYܚnHe#7>uJw4sG ;l֞:U}IaNV~q tЏt/|ds 刪Fl||)=E^;9>nG}MɈVMS60 cZW ;7kvռd6wpho]v~-c*u`M 64L C~*ac"_ M=y|h[`sa3O:R{}K"^ASq \6TXMds$E3}J6|hP+%rfzBlqNº{A2wLߑ[whfkhkDbi뇱tNEX!$"eaJx3\.&%IG'\ݟ,Yf|$k` rnhVH 0T Mq'f`% LLb0+Y>'aLa`Ӌu?6L(J %$SBu2 z 4臙PĊʪIV~6MeBU>&2&PE]\X&9@4_MÜ Ld ۦۊɀcE;Pcbv &a;n D;vjy3[ߚrɣA&J3L =wG7hi0&XP{%bQ'@DoQ]4c`Vh CYe>ńW$ D1]կêH ނqC4zڛ>r͞Xq>ZXǟΘC=  0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`\ 0:!񼐴M-kwUP'0;=( 1iaQ98`L `A&@:^ؠŁZtʶVQY!Ņwj׭ 1Y6u̞0aO\ g\\&@ ̞=1ou8{~?PpR.4q&gFy:㈾9Ry"]o ×Br5q&kI`L HMLu=KQk2^ ,R5(3Ҟ L 0&00#ׂ 0(xrnźlPɦy i**7m!K^=ӓ=Ng!O!IӵQӅiGB$B+.ҿ6G|aGV1ƬLVXq0{7\we{ӎ SY7:X5|loq^M}"8vg Kx;~` P*`L D@vU"4 ޵͛&,p[Sk*Qk ՛(5_. 9l!xމFQ9j'JNQL8KH4vH7d={q=ԔAzL/0U9n s $_7 j?lAݺcݟ` Nr=L܃W %yX7z=<~>x&S !4n̓OgfHa\]i)TC1p@GWߓ3ӠBܗX,!v] dE$dH. }d!?/ 1&5 Eǫvi9 0&PM,(WGcLaێjfj( eyW*%hvgbKMra^w;t!^/[h+t{<#:9 /@9iwq 3WC7ܼH{Xc( Op93!D|?h O` z*׉ 0jРS5 Sa@- }3L*WDd~K~$]Vx*3 5>e:]--;+954ι#3]~1 UkE 3o1OUFP Ac,τ)@W櫃uC(LG1&$(7ʕbL\ŕR^iRNZ g-eCy<hR иj&R60ex)RjYtKMgC\+輠иZkUix΃p~3qo)!>8?049o._3<M^d?&@}'~L D@Wڦ5suHPZ(aFI rOKל5*6CYafp*:ϖOyn &A ]\ 3&N7TZ=Z9^iO:CQR| 0N5r:NrUH{;04q&hSȚ< l8K0'xZLϽSݙ{*9.V\7T'czvƄ ;r}9X7ih3"[w$zH {ۇF#W|5>fL 44Qnhw@ VK\HqӗAH~+=Mn2Qi#g H6_oXo7!E7BμX?h>}&a5c V(Ukj L@GPq;sN߿4.4!i-eL 4Dh 1&@eR\ w.vsvj\%Լ-&lI욅˼kVr`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`J@FnN:Pӵȫ!ȴimy0&C}Ґ8>1qid)N{%gL P}Te煤_mBwCtR ("-Z2EP޶3 )6} 6U۩q&@>*&r;L@r.^S)̘8x`L 4Nmq5‡FMJ)MhB b"s js= ȱ@<0k(Us˕۩r8 0&РĪL[`IoP2L zUmJSL 0EA$?'r\&P?{Nﻹc(NէeeL T@ ʴHdZjđw (F׃s;Un 0&%j,(ӎ{q2L 9z/q3K+SqL 0fUcAF;f"Q{۩zrL 0fia%uG[t8>`mG۩E\@&@T DufZ;N 0'w{zq; *hQ0jBbc{,G7PFyE~:rg,HM?6ĺqHڰ;23ݚ/fCm*WgnZ&Pcr-7K;Ѝ; gw!pr?SP<fEs=mHGK_,GO9 )fgOAY*A~slv{`T`vH\>'@yUZ0[_[ZP)iИ,=+#Z)H-eZqM`rnIJzUC*R)RiHhZB&z7<9d~'A}K} 0%0ysaO)q%O@)]%}n1kp;I>NHצե!YS$FO=JՆ+$SեQөyL{Sh| 0&'@[g]566qAƴ?vA<#\ >!uM;9N !çxڎ I mFwf9IcX:! ,lza3;/xFISx÷cŵBEG*cޟ|[#~78!DS Cք6wF Ij6JI|x Lָvb:e > q$$GT3Irn j:T]*̡QPe* @JTh%S AyER?tX0*0 ݯC\c.o\8IM^E=a?57 L h1Pd ;setjih!wa8PPE)3}V -(c /|n BĊTHUU!riWEw휌|}ڒ[</ +Q_v^v0|\ii;6O-0:~j=YrJ:O 2"0E-6oC*5h'i'fh4NJlw}'YB7nOt&|gf+,|ݡuIva>Ҳ vll1 t)G=I$s޿?cxd]['Cc:W|Zϳ= ­_ʃ67&"˳_XrTw|<xb`uAR9r$ ;ZhG !B[P1|J`o Y>C=+=$w>5BHϠݾжOF^[ci4Y^h9Wj=𣿰]eA4گHHxƢ}ȟ6Z|09 /[Q=<"B%K&$fz'A+t eӖ":ma#/Key:6! W1r񃖩\r'R$ϫtnFP /IxбpHG=iWs(@HVP !-vL )AVo_FJ3TE=o<` 82r熜OC!/EQ{x:V v?24(gkz+v8h_[|wc5}KC7|c8Id_2}5vFڌw4% aC;]-- oiꔥ4rt0=!%C2V8W]yA+=^lτN $~_55<݅[?ChxWJ$i:-إ?zN.:},ӟuShraV S+ + /e_SUj g~Wڡ;;j>&Hʵ g*Xt<lj#vF9\»`˝XԤI;801i6X񢂀2GfW^늀sO~xN#,K4J<5v)&=\O`{$5N!TY&%:8Og\$JVOҊ6tQ) U,WG7nV`|@_4izَpK!Պߖ`D6:`jX ϡibhC™?U8*rn;ش0s@0^;nJX p28M󖯆Ѹ NMڳR{ Ay=xL\4Dk!,r#q75 >u`W>0뇎t)e ?{@S4tCY /eLOϖ?2&BB}bu(RX _+[.jAe+<)6Sl2Gyh*"bFkW h*WUp$0\ VWv8hq#SLH^D@Lng1>b F94=FKZl?Bh ڻ$9A|{I`WEX9fW-D76҄#eɞ0a$AhwE% +ye+jTNː7AX-(!uru+:M/i0E7ŘW`bJC)as&j@/ #w9.|>zEmSH{hR(2:(e/H{Dӡ*t(uCP+ބV郿؆e{& sYaMQcuM,C7ҥS^Ѽe2UĢgaLçGյoTjc)}ږ91 xj0kJ!c ǖ]]ډFC;|]N=p -쮈f;ܺ?8. M|Uwާh/:Å[?ZBhXܙl)XcVM?<~>{ > 2Qp=_ne:eAϘ8r`j3 h +2iP{&:*[ڲN %,ȻۯpyF&^Q}"l5/`Ɨi VaZ`O~}*k89L @ Lm 0ǵ`uI Z>^]伙`L 0& q{k`L 0&`uIWK7`L pOJumW +p`LA[ ;&^b1&`L -?`L 0&XPbL 0&[,(-Ν 0&`L N 7`L 0&PXP[;`L 0&@`A9No  0&`Ln \9w&`L 08%Po79}@M׆k7CMq̊eO2WF9wFĥ1ˬ{ I|5,!ebCq|Pکy8x%vtLi?C=r|9)KgFIzfdui }>}|c_~-MkV1F>5#tb;I8;vJvNeR;c\xnvݎ] }7r;̭Z:8ОޝdDG ]}I8/,+戟~ok7?TФuzgqkGD7a j Y.;ّ)?`#hĻloѽ*~Bbq>X8KECc:0Kڶh2!' ۶.gc4M@<=<{{ }Wy)y,ǒq),-1;Q~ĸ;;ᾱNWhs;Uy+zЩCX\10:'v-݂XjMI8T'cuW]wOI瀉C|_cPp=|8] QD_GGЉ8Kzr{FwNUͱ9xyĊJpL:>ij,oM#|.>U&j[_{Y'E\)Wvȝe񽭽ǡBNP8'_q;]?S{}"w;h<q+(x].s۵9dn<mZf-Z8# KXFjы6z,kR>5S-5dzܮT7jm^* DQCY}WAYZ1we.rʳWtCpک"Daw]wʘĈ#&ur{H_NU0=W ?A=H.ۧWc6n]!$;.ǝl  ~ıqӦ ikWW|o+c3Ξ1ʉ0Ҥ(+njNzGju>ռ"B}&v޽nQPV}1Xg=] plsER2EfƩ}jTtGjG7EJw;^ UC5F+1a8m&uT߅^Y0F^嬬đxjn%.qwo+cv ]> k}v>sH?²V*`|=ݎ&UC-Dl=iǽL$wǿyK [!Eō͛62O_.i؟_ݥDVGKҕ ﭗ,IpMNgW$ *jKʏ{:V=a} ȟ] !]EEݑ ʵ>հw;o`]&(!Rשb*&q^\F}tM^rZ9>磶ZJaDv3Wu8(i#j}4| wꟈ_rbhwPsq z" fyۿa\J8r[y)(wnJ\8hѿg'6]}WhޤЧrIhiF?N_k2K19v[C< t$Wyqh(>ji|̂\FdP5NN_39uBfm.&LzfM"۫@4ńi?q78%*aUNJXǁϰ{Tu#6nbSNɻ S13D&+Wb?{  O kCNܩ-6IG}Xix˟ دuXVo۵?Wu>ēl}I ԹLZN9R ]DKLWT<9끍2uWG j}reܾk^ѭ:{nqrVQ͏^N$H^B{勗>Z N>}i"1BxC{/,be%΁j ~_5`&!d@Ⱦmmų#6Bh&QdN_M=nlmEb oD|sO>)^j S\af.>՞|Mik# oQb]yr^.yM+J]6GBX+dXH-![D\Z`G"}*VO\e}+,I.qUE6-sY?{>](èS6C ǖFujR)ccOԑ&^m؉a #p30ow7n55];,GBYzs$G͜xIeUCRhj˾ FvܮiRz6׽b?V;9^Ͽki}٦ݴ|..9SNt-WX{svlѸD @6d?}iC벡3+;bQ~̆>VaBr6l3ȦOv"lxQ# ŏy' VN9zL vF~=K1o3ݶ]{DΕ+,flӆ'a(9H-t!+ oU#6k>IP'XkodzvI~ |^7vZ~;U0"+X ]\.m= O1IV˲A͛og]o8KҶ&>(-Y4_܅M3I1HP}˪EZCJAoa hk2VVYw%D|B;v$ZʉXHH9Ϳ||m=o/& G6P_=p=y̩ɗ=;r~G6-QwC1ȇ(`#Z'U1kiWF 3 ad^e;I{!%® > fDF6ݥ4Nb~bTh'b2q&#O/G^%gj[|ί Cg:\9,ST5A.Ȏ5rdUhDL շ$%;:U+{ʽ+/ƨ&$K_l"(4ˁBr`d74!^\W_L3%Kc]i1>։k6a1Tf2_b[rIvQ2iɘ43'nZ&Geȑ&dO$~MeyrG_/1MzNPU$n>v S:es`Šy$ [WGĝW &0f/<4ٴ{ćeGb}r*M!M/~$]ɡLUen".ih9CM2u唘@hf~)[h 2$Z,_;Oڜ2G=yjU ZUi)CLr_O7P} )h9 a>1iFfiW Ycϭ!mkưb}A"YpЈ!G_XG^㟜-Vx=46M 92؅ټm&GZc8ݜyߌw^bXHءYo [IȊh }y^ Sv MP&G&O.;8$T {lT Gg#?.HCAv4aVNCk& ?L vOE$L$L>os[?g5?~i~JG$$>6Ͽ<1\#XBf p\_A9nO+q>6Xx7r Y>F8C`-ٰ)?@3wiB.~\4?L~?G0kjj;Оdz&AK9{pT֙iכai?!@SHFҎg54kL i04g*Pœ+iSixvo^>>C/^64I5]/>HELu'9dL pyߟ\NYȩ',m$BC4y*=hvתu&@h7>[lnkKPCK&7Jq&&}Yh S5 '?(_`337 6&ziغl?tɃ0&+(|2E0{__Sz~m#(Sxr\q2&%@+''xRC$%& N <ñ6ii,, w!]?Z" ekQ= 0&04hA٩8JݾIŧЬ $DO14@e`&Z3-[+~^hXn!i ]d.Ӆ+].dVVZ4ن&4Z2,دU0`8P  3k]7Ԛ4}gU \(m`Bê k.:H}YV՟|OkQX N4  ;^"ݴ-o1cM9̝tzLi.ݑWbaKV07N7m8WߟDеW>^h>\(TcL 0&C eƦBoŵɑ@LË͛$(>\~ָUƂi ӏ;bC&0!GO4N8䬵LicZ˔uZ˔4hD7p*=ȃK*}mb\ clE!I5|\ڰ\KB;$̅~Rv1#`3o2:6f|`?\{&7Fi_yM:,IϒXѮTKWnR72HKGlI~bm,㎵غyH1 KҖdQZ }<qL`UZX3TU-j-nCZCyߥ,) FOh-jZH3e7o`r3MIOCRIIef2B;{wv” vM"[P2Tf7&=]]NY iO3' /4f:Ξښs`weđxD8WYL}&Vojo d]Uf2#L_71ԣ0L^aiC8ZpLeHLжդ9ߣLm1hMAM1NV?YfNam9|$>N4ᡍ%B ٙqTXAS4ک𨎫sw!QDތƦk\;q$K8vqNOm0t "$h$@myg5@j%y}ݙwΜ9$")8#(m_2:*{KXtЬld <.@}­Ʊaۿ.?T`h[ʵ竾)=xd5Y'"qC(7fa1 B.^5D~GYMdav$"R&ZNƑ4~>;/^>o[Ys /R c$w`ŒB>CcdI;m+Y `|a!jkӞ#4'FU~j+KOp'XslޓKrIi( t#c)w[ZOym(qYObK*_"ʲbt$p#vgE1Qnʯ}4$ ,?co aaO):6VN{;%ѱ@af;Z3ROgk>7?^I؄1ss\)JtE@WD)WzXz =}NMOGBݩP"dz|A%YuRn񑴔7Qc_hǣ_, $QٚQYYq ّX6Q'qknpB1 0upܐޣ٬Q>Hg);_Ni6j<~>\osmg%W 9\+a%2\;ǪjIJB}zSisS\ݥP|(Ccbʥ[B##ۏBr߻}sZ5X56(3 %IkO/&tJΟ;7f{ WTzZovmXTSm^;A#Xocum}(;i@hȕ閃;w4ƛLtGh#"#Uq(tEs$;./5%(p!`SaD%fej*(,/+qᴔT&BdxÔء<qߌ:+/ˡLhA6APOG /GoZ\=̟/v%=q`E=QW{L#pl7WAx&Gxp:|<ɡ[ oQw\7$đ\C={SU'C+t&QBci@<1j$J ^/ 6#L km^SPΧDH^, /<Be}X.;ޖ$Kv佃 v F_?zuNH.8 ^,O_ҿnٓ4G6e_&ʲE꾍O6絘HD=fA۽̪`j s i,}% @pܷ}~ jyfG1IQɎ  mjiGw 5qe0?x ٓq& %&LU|wtR`-:wï%~5)WkBUh #raY/'8(Qi6 TTR!8``71J+Vp*Eh큷/Hm]/HNqXRhnvFB@!P(NlCψ`" G)#AkL8͚0vNл_gNq1c8ǏWx7M&Xm]e ?fMB+d };#éKTד?Vǘ(Mlbe_0heO66d4<*qW昑]jC!P( f! 3+,!Dܓ~zut{R"9x_l=HgvrsLGKFRnѮ4@‘ܤ"\<CODd2G8De_x :\&mE,Z+ B!`;\fCF8 K߿yFrX.ɳytY~ ו"Y `,Rlt0,U0iqRW 2p޾ ނe0icg٨ B@!F 4UUVg%0S769ZGN nMJe$8B?q"9l΁|xb7x*1Y8K[;xe >))Y_a1{ʕD!P( &" M/@Z[Byқ7yZMI0J.!)-/VKtf$O4tW+Q$"ʺiH0(:y.'p\|t B@!h;H>@ao?)vcwLl2<O00 tMbKv lgm2,QV6^y$v͚R^9;T;< B@!P\IZ['7{h^/2O]h`R& uB_hHr E93_G7{Q.O!P(֐JrOiGO_ob(ϙt֘`_Ld-XRAQolk1 lg $UcF|<@?u$%%'Nf9<#o?,U"Ж cO*ZvvV;Nmgm;IB+z[$ʢQzߌ L=ɠu1_*ZP6y/aogTVR89p!tGVI<> J>/C8!f#zQ%Vw!ApCgN4+tXxG~z ێ@QU:(m(k?omQ&'Lb8BP 0;ubg|GqY`2y\Ey{'wx?9yq6n\9:={;|jD{,0mI.S,ˆrDtL&`UXRFx2Fqy}y/RҨA4}@AaP6렂zM͞R>mǸյ_Yc!V/jmpBc7N CsPVQI3OӮ.2O2`ʘi Έ?i 0V nFe5=}sbPzi?_.Bn–j`.>;|M\9)H-ٸ6qgY8rSZYvWVmY5;jK!4bTB:QїM35&q _XWRwh,ZF9û|YU; 2ƌ7<4|(K-2" 冔ôr[:Ǡ{nK:T#[b9~"-޸>XNKJCYVf.j6tF@hL;Ʃan4-l|J@ es@=~S&o[{ᡛL[V7W@`+?Oh\jq4;{Ǭ5D$IXTQYIvӊhX8z$EЉxj^k=nGӁ#iUPe0\c[T L Yu&}GoV3wd+ x_cs{fH[`UUO -$I{$q Ǒz{U xɩ?#GݭrC@An`M7h2 f/so¤fϾ?8 ݻD:y=6cnb"so?/$?܌=&P|3d[TEaQ }e?[<.\5;¢r TuPRW+<@}gQ_/.Vn;@/e!mܓ:G ק:v|_@~lMQi7e`q s<Ʃ(zq Ӈz[R.\ ~=lًÖ *␝N4̈́#(>(J @!k8g?^0J4@ܤNʫR(#D{wٓR,6Ze{:Fpn2T}IW)_əçDZ ޢ/PY8juX&`KE'LBpL=OCzKn*JxqF=x(*BkW|"MF'_Ж4S1X=Vs6XESؖk顛kʲzBzȄz!Y)"6z]reSi^e [?K N?sWfGqloѤn;/M/߭Qzԛ=c#il>ߤ47HiGK\8v-f<6H=Cpbns!D 1yfG{ {)x0ÀݠbN&lO2ibѷ{g:zrt'2D4$QTY/O^1ȟ;>z]eT@A )hԀZF˶y|t:=MLh!kxz5i(DVm^]{ozPtx{Φz]+ӕ醉CCGyTW_$pEbw8I2,wOeiz$<ɲ> V ̱ ~vHG~x+=y Opԡ o5sI },߇'Ay$DoN嶬Ly2#Mv;sD)m L>d-!w {%>]G~Ƴrs^'K/&hAaұ~az+`38IWhג-¿sO5Ld x)~ץ<ĶN8w",Bom 4$f~&,i07t7$N,aE *<|8{DYkMSٜQa4j hvF<t֤a¶IҸ}-b^$sLn vwfaCxɀ{ Y@;38!.L⣮`MJt[f \Iv׹ [7wNY||EKh꽨0 \lDC@`~s_7<~G ރ_Ӽ:qg?5DgXWT}bkFPn1Tk^RDQϡX\jl-׎$jp F.v?xcKWG?u1}9wP%J;֓7i=V5>x1&ʙEuan R .g7kgUJQI)"T("sj?_C4c_Nq/Kc2/6gwbR$xo1U\>folN&H--C\F֤,:{Xn&p sLkCoJ7OMdȕmH">|˔ H}1~Lp$h2g 1\nܿk1Isծw"I:$IǰF%򚆎<-I~ p[F~<[;&qʤ󚮃{[wc=?]襌bGF}A>v}|+cFq]/nJ1>Y"ic}2/hh ^oD|`ϰF\)q>?Y?GҬȿ1oQ?'ϫ" "WE(|FF.9NEi[됫&|tB͗52LLW235ެ0@2B "T.#{c4'|S Ac;p8ĿZr 3MP`!09q!l< &RR B]A9==Ha@ku< u |ļ5枧/e͑DEteiC+ŷ;2/^~_惆%Mˀ]9)ыܛ6F]}rT>g qGR!ʆ݇N@%\)yM֎1qpM9q"s 籱e¥OU{RD !R~Ԇ:b:$$`bByIiE`&({0!A"L^^=Oa }dOې"ǫĥpFcxAl^mM?0%<st:åU@H b4kA!V+nG iZu :ns]DumCҵSmu:C1s#F&V9gx0M0:7uM-Tk.R;{beWyłҕ rLkUn"X&\ofٽu60&9\E`qK \/4ʲ.X5S{ٱ=nXWL%/AB?îLx{kW[ nXs=㨣BnT w{_?6WhR ljLo2f2F~_K6s"L.P^d_2CCPTc,*Q 8솅Y5.qx9FZ˽o8a'C{=|Vit>$LJdX0W[~>[1o_Oe</=|~sD])k+z ;Qt4n`I{x27Fґ]\Lisv#ȗf%+eOGx5p"3H?&\岝c_D~]F -楖׶1=RQ_PXzQvF2 J~}cP@'=p&y]~,;\aGy➙R"8Rc <+RPTY܏yc׈7Qy@Pdy!*^,.Rˏ\ A0zҍGJMtxБ GrG%@XB[ʞpլ%d(MVKNf>%Eh7Mԟ%% r!h#!s-Y,VˢbtZ_,h@#ot/akH,;' |E4qHdxS\p']';mE~]l]cD솏Iw6\9GzuÃ|$1zF%l}TţЗ*J]̮nn2 {&cxT3?$:_Mq**,"Q\^$5!M'G0ļYC}<{(! U MjEZK5<>ڞ9ҳ˜(ר5K̄άԙDi *('#,H4#!e'ԡ<7RF'?RcLm/CP3\gg<=!uKz""SDx*^/j.>ǥo;;805dzR2<;qP99ė19l*긄QÑn:\rW O"=t;w}&f~<*EDdI@Txz$՚_ܕJⓄG`p6ht|# C9k+++Yldy-si~]<6M$SG)k?lF1l.uK6zAG64K> :)uwye[gxϑ7.]-])_S^uk|Eg-q s7jkxw^o^ nk&$7Vч.^3}k=Q(izVZn*ٜMEs("(ݘAm6YEOfujmB\pve|ƒ Oa3D!=)m؈4ZeH6wagoգTUv1٫)~}ɡII}vx?7\OTQVDA ̮w3c/ڐH>Q8 )vKVr dǎ۵+GlDr#C{kO:X5KΔjP(A,O@[,2vSL2=3'YvjO9 eAy&atj'e%3c̾B!v'6A֟5,{3Jox4>_$-I9v&4R T7=/*/&cm2,Lt١s&v>2ljk ܩK"* |/re%)o2 W"|̪l B@!h!(OhFo0r_伾 uo1SW1vH6CK? 03.9/gel U.4֏$_vI pϯMkڽA*<&^/:vޫ[ovmX NX5xg^E"tR[ܑEIc;0k5DDIcm)Nc;aNs9_̧)(%F68Q҃8+*-Q4{1ANQfE'bP-nZױ5.M;m8 <5 j]JCAX!q/eB@!h8LNj >O;^?{yyK#奖/5%(pkI\1bdc[=Sd.ֵrj*(]~"2L4Z>bWUV:CB:5>))qܳtrؑ|8-큸6/*O]ՇP(s7u1w>{{]/a2H2z5hp@N8֜ . CI29V?~io\ڿHlA1lٞn?]guhF:kGנk_| Ze /eǹ[â/κ&.%AZBZ.QoR=Ү'CdoF6 }].1ҳqE^Rk 9Sm uvIQf+'Sxֱ 3 ˈox$=德lBEtt$BcZ[>m/`JwXMkD 86._oDbgKٶ7P{猱 9i8-MNwqC}6VlK^c)!3%J΋z}Yq4c.CDٹ_I blOqlEuc9"D(뀧JB!hdKa8V2I`BkgdOIIFe*CNQ^qaWg?Xj=ǔ A3hA-/|h[7w?}(z9|_"ʵp4{ WYܛ{ ǿ~l.Y8lAmatŴD;w_ߔE "KXK"' Ȓ$qJwZCޯ-9WR pםgۼ_|0N+4oes{Fsm\|vO0eeKeYp݊$YH[-[W,[(>B^T}h))<̤59OyP*ҢZ.<{M4 31.B@!Б5.VH2i\ᆾ{6D%< uh//`7[QL$ːSڒpm11>[h큵-&"%^Zenb b8%Sj7AZ .L6" hW)?-++U0!ɲ ɃaGDj7A-DK?&w݂u=w{Ot߈l.8KS(@ st3ai7)1%aϞM@fL"HrvUߴmZjcPQ G^Kn"4n6l)UVUOl{S|(k},.ؑ 7:|]wn~m~^~X&aB_9An{b &&p^.x [i_ivCi >?ڗyS|Lg绑ɩ @E“X5&ܵޖciѶԝ$ IO1NJ?4\ Wf_7q-E[_oLf׍9.&_<-VR/ "L6--4YaOH7ld 8a&$RcBP0X\QB"FzeYyqYIQɣGfp*t4k1.^0xW۠¾cvvȫ ` #l$1*ۣleTOPn7*J(f$BOldIh5ǁR0e_³/;ZiC!0F?4$ޯrbǸ)oKiQ%eHrIALoU>4"H2R$E9//C[DŽd/FjyℯkOK =P&,HhK#d9<"MߐaAA=fG?ҰWEs?dBӫ/†5)&D3F ,퓯Mf B `^LL6РCK qh60Fe "~:}K.IG\DH&f&`dp ઴ɞJ []ZR>=ci!DZ~7%>DϙHC8@Kpb,b2P_ fgHrFŒƭue k&uE"J$`CR[W-K ysb4boOb$١$FjQ&$Id僆Rx D#" Ïǃ6v0'rJ e!"ȉ 3#fi6 v&!6lnM0piB=<:lm6;}30z*PN/"= $=L:]i/tuch!uV #8qR&ыg*}Zdܰ!iӒ1RU }AhLAxQs@ZSUUDž?壑A̯Br$#^DDY*7otK2G/f72+d"&Ʊm Lcle2`hZe 40pjB`f@I?R ;zLNf掝2)؈ԝI_ KY54nh\l2vn1m𑬉@"[4Gri" Bj'BCB]\Rt?kxSs\O=U _&ʒܹo$~(Vyu~7GQ&11?p'qq$(Ia۟C6$ˤ΄k%ߦ&>\aoxA2~I:Hsמt=Q$YO' aә۟u$x~@wB M2z$C6  sh_GfDDy~ KL209 5Z׺mq&s\'7lhgVrѷ3]MG:D̳IGW+l/p $/^nDq}YR54 Yd |N}o;?~LdzB/fer&nAb#ɲ$r_ob՗k}H$;h&R*'NYda Ze26'6Nmex)f-k",a4G50!]{Xen7kG2s0k&7zxty56$rSܖ-mHr1t 7#y5D8Jͨ1"Ȓ 5 ˙ E.d_wD3BUKeK\"PdͫDYY]00yO%k;Qvi`#'Ⅹ7*&ɘX»ln4/!Rw|,M&̗b;VXki(bQ1۶)tʋ@Wi+eЩF!#ɲ`~~]MMMM$7c ,H\.9s2-.cKSEw߹3Oěl8A ! ,Xocu ,H\Z{6*YRxql#uxb9:q%wfES6su\SOcNNECf>oYcL[BHSfz`KR49mF͐bv*hI9ݟ5ꈾ߰brM*Uq@TM`v'ᤎ:cBp*8⑓XIO~tܔxgr=pw\Tԓ7*g8d-E[ yƓ9m=(뉇2e8&}o' hk Z+Z'Q$>0@P] JZpDü*FϏ; H^Hz?.BZW<}Ż}uҬkN$rL}, .;/dnj`v 7ea| @]Ou7w_"BesLU']2EF*i+WVek#s }ՆB$Rk5Q/o {ɥJCxVVq0!Ԭ_*@ɬd-EϠi썽ZvE%mym0_,tMQ>a]ZM.('n; LpRTX0FQѿQ4E ]'ekQ^gQ&WqFp+ܧyDfd)+Ӌ&kuyFαCz 4}yh`6ƽ ESa}u+@K7Ab)Iv*+~=FrH3)-~z}e?8vDaAm1J=COݕDgіL*.14}%ֵP; &"3I&r$I&].I$X#\czԔv"WC* YU{69y;6jdf%aǎMN7Ȇ6l:~.RaiR {̢mV] cg ! qԳ['E[lV) HJrB9 7*brպ=i(wiq$|^1} 0{{Б(*© GmLnVyM D@GX7oB#"3k j19v/ EhYݞ]QZk, J7"nݶ[r!lypg# t*/aDNJs5^TXR.c&BZuU!=uW7Rxh %ZD+M߿ܐr6:=2Уs\wFwLB#Njs;ӏ>cg(i f u<2LTZQEج#EN-SG{vzU5;eP>$Yd3?oQHXc4u:` 2j GUa 0G||ey{ve.$W{ ܶ[r!lyp9j$sfb(])띂|$[_N}9' b)Ty@Xث !9)H-ٸ6qgYhhd0W'F t & @&R2/_L\}cę<2pSރͽbhT: 7gtG ;y` {ISm)|H2>xTHhdo}G7t];$;qZP?4Jh**i?vJ| IdQ7WqV^]v7pa4X5w*\EnU7H9 !\Q?=)ǫE@="Rg!00Nɷ&B"c9~"-޸>XNKmv&ݽRMƚ6Dl:C3/D8yQ&۽E R}51!Sa5fҒM<]BqT|I2O z9KƳW pC̹yZF9blpv^$'I# YBk~32h&m춐 Z$)ŗ$)k[ODE (~-mny]+3sAi[:<9scO$Lh9Tql=Kh\ŹqWjohVb,{o eTʦ9VM.e{x=[{M7emU͙@IDAT` 634K,lccgAeE`pS\h;RUVvfnoڴdxMQ J}8~*NLꓮI\T4GLQe¥Mf7p" +8(i9B *7SxYqHw!-I|(c4pfp.20Ż`E]=2;nkQf~o?Y~EDS*&)*OSuaNlI|1& ?JZpdh\(7S.D?*٧wrS)E qBr7cn/>]zWJBxV4PMGVd^t?$‡7=5uG]x@(רHr7'"`J'k?זeЀ |aaƠcDyz'+qv-Υ:߃iկCs5uYuFluBgW|UxWYt?wi5:>9w+' EWqzN;q̞.N+*?n:Ix1S7j(9,<(Dp79AuC@.IF'C~NEHOechvnGs+ݼ=r@/>6 Ye[hJb?z|t:ORR4َ2u@J؋}|Tǵ[$z!@;0؀ml\p[\9bK?$N45.`0` D Q$P/|jQj%޽ef7s̙sOş`-MlhRI9]`-\zi)22188YE^X>2 QWܥyIr;kEh 7Wqڳϲ]xi'K/" "mAhY1AdO"L-<gFhmϳr?y"[ID>i󤵮n*"*.w33U\e~F 0tVeZY>FxXkIVQy|^|w }k)3V>惔} 杔CO7\5R̼ 뫨RL4оkFcy'ʈ ی-b KW S83] D4mB peHÀM6WAbhdt'p$ԡN@$97 538:b 1l7Cu^Aso>O7z*gO]qzFjX_*L})c.m(e~Jyuc\qrwڽs"RMݒ=?À$T0^8ipi8<Ђ5H[҂ T'8+.o晱A\\^I4~p\l!òۋf6\s8'P;]P,qTR:l`m1kYC11+3#Wzze&,4IU4<+M<+EJHliq"iP aO Lj\ r+}wOI| f6N+F߶[{둖Dq1QB9-Fr7e)<)3u # 1I8yX QMrƏY Cx=Fͬ9T Oc&L>v*=D2x aQnJxC 6ZƳ(-I,Nd4j`f1zz~?h O8_XJkwov!\XՏ sx}24xfzArّ?]$zx/{[x|%RX8&B9zAn>2*1hڨ[iy_;<6G|͔f50i׊@__Z(ׇJ}dUx4Zv׹mjw|JBx]ph:}H[CKۯi 'W%H5첧3=Bi<\4G,lv[=vU[>nz9GH=Oîf<ѯC1v9r%)73~>x^mY؀Dzf+$0j$`KIr[Iߤ;>7$x&Idy[l%CZUx:]\Dx| F4\tIa) 33M¾OI$'Fk/", F[IfӞ"l=ehSc{- }BM"R'ݘIq $۹ z5@^){[wτW me`2|$CrlZf IREa$ayr:io)vo>8 E p8UmMɽ&wQjMC ӧ07 61 Ͻ f:9k˖@XzI$Y8h4h8 9Qn|p>qn*K RLRߩj_C@@B/AŁy68T,H\oZ(ƻ&\4>7҈ItB Ev*@ pq"ܣc qC3ɭCơE1GeDBpaܡoL^ pvQ>h(B"ɁA7ЎѠ,\*GN\z}*EZ3UzED3BI\ |FMXʫGjK} vµź"y;m"ʒLxJ*39Z#\!/hl تR(ESx=vzrt*/JʫU_8[=>boɥNv(`dWq9Z4{B`UoKf[MA%Mʭ6 PzЭ6$.U(uP$mtϼ)<'£ [n'\}(Lzqto1ټ2'uS ^`ӗ>;LrJ?2kpw>Udb|6W_o~mԻ/JJ\4e!Y(KqZMek O̫Z*i;=#۞JA!P4"M"؋FIN8Mu¿o_Dy7O|e.e% -.V[+7hcDy[g<{M(6`GeE'dѰEmQnx,FA3~ǵBʮ eOFlf,MN1s)5?0U gV$az4U B>I+'ј!YLL{HfFUV 6V[H 8VL5}\R-̘Y4{0fzoù焦x@`<]Ntwhq7L+B=9)Ӌz@(]q'쏆̻j5ʺؖ<$[@>znGMOU6]}*E'OO7feUVar<_yX:*1JqabڲBEs4̳%qDXJ\tT :7t̗H4?Rl PDP*N|̖h({I2* nNDO$yY0. ;z.pIļˀnT!:$b]JCz*`_j~uM=MEw=ݔ4rGU{_^h}K4 i?1&K )Ӌd#n]k}ͼRwe~dz-'RzBc2Gh;\,:=aaYi^_sնB@!h-$Ư+,.l>1oHe8,dAY*x͎4g:]=v0e&phx~Z s |k<Úʱ3M8$nŭ8O{{ez4p{CϮ^M&4Q(K&ALvŢ2ZNZJ _,1oARTB@!P/$ K@vng2CLL;rOw[MWMK$?N?6G}d_U)ڹr;"~4?$@%>z@QL]Žˮ~JWq:25=Ni rddxjh FL.W? 60=z%\/pލHM= Ú+ꪑ!B!Hrp+Fv_ē3f! v6:ڴ8 MLo eG4z'_]]s'ӭ׎}Zо֊ }=d1E&и_!SdžN(#ʺ=\.Wl|^]UVH (&Qfv"(=@)f }/-^P*Put_[Q#Q \/4^XtV׃GڥPtAIJwJW>yN$$i|6 W&r9l 7F((rH+Wqv#W:럝5yb)wTy̰&/9pV|y՚03͂ Dj#F\d&+]fjipV7ћ֪e(O2\K\{`S,c <J*uW D@dDGM. l\ZNq1B |5{ gqiN3i²؊(ePv׻ękq60\@PKepp9Gx.A88dpuC}2?tQ9nﺗ Z3P|?(j[uݢgq_;a(r) Ⱥ tv:-χ8o6T+#ư8}uOW R;{,E %^>pQc.8DnZe&tn&xָzT 0lVAD"NɄ v]=n+&B2611IINLH0`\&&.tUǖL x[I -B{[4pe"T]P}f' 0UhC>sZ*<;Su_|5\GSDrչP]6&ɏQur/;uCpV n5q}5cMoPHeU(γ!@S)Hإoдg1=Ze#Eǀ,H# F5k2k\nƙW /ݬgl@M2[@,L/'89q|pX$YF)idM_l?H)Qrpd"DĂIK勵>{͙mu>cTuS`u7{J*:3pXKxTzq&>A&;7Qja*.wY3\UP2г n((Nً ʏ~^77/ۨ?zی:&, B*e4$YL=j옥6Yk &ЮC$QFo nC8M^*$["ɲ(zDdtȣe tIvՇ:/_٬MPh0@AUW %kwSx9W陼SRG[e^̓?kdR߱_?;ZQM-+9fkAwJY2N0hLHuʥ㒒^G,I2tc']ǁ `GeQhIX!:1`#2D44ȱYh=.@ I-оvo6p+Y u" B}Q Ŋjz_xI\q/(.VngPt"<=2u .?-Oѹ&y}UuY=TUzF4~o{:܍)᪬ڲ\θ3g8h-;9[݆4,yywߏ6@bS 6)Gc;eAM(HgH eܫGKGȄm& ia s hsx͒şrr@5uB)C 6RvwNtx;mhͽ=C@Y˪l,;Qgy]u{dL#A4R2!!QDCV[Ӆu#M^[5}eϨQ66QDYh9Oy݉wO߿i=i#d ?R '<_-&C%Ȓ u < B\]6rr{g pn{n|oݲP5_" a➝I2&n=tJx8ͥ#)bľqS앲 (+#^5~/wmϡR@{6n_sѹmg7\4z_[4)+d+y*n]nF9jÉ[<(<~a [Xh//wlێLvSv~A!)ޢE9lߙ~<{EpDAD0Gs姎ZeՊՕXxAnPGA6)ɹ&8~xxg:uJw# h+ERH2D#`[4Gl:^̓w^(:N[UamL'Kdj(4 WD9\k&2h?lDRApWieW Y iee~/2f~o0̹mtSGDs qJ٪K+*N;rݧRU@k R \R iI_||6XYK:4&Gy0,eHj۞rRmeTu8\'ϋInȄ,9&5 5;к]lU^-H{ ̝5?T"8N[iw>Zi%pcqCx.B<=2^+5*N<B;* hۑ={#*Ou&qyMsH Hpt6@4 J 4&I|avQ̑0Jڎp\R!*> (׻LXyBo{iNQ"Z3(Uؿq"@c!z!ʚKq>4:$Κl2ŏ%ꥼE$u% U5 qԅzH5*+sXj%CaK*;> ("穔= 6{8M|ξNEv:8+K7pIҤ+ڔgK=d#/ghZ3~Qt'_-iKnUW"̉~ YzOjΓDYyzZm22*Dg 0oqdy-o*EH5D">%75]ٸ'PPgʆɍ"ʭse{68V`^榶%|u@=" ߆F͔ޮk9[xjQ7O~ɱ \&R'{4[G|n  <\m@uP5I&1ĭ3i@rntQӄ E%[ KQX"ʵhC~+ < ~ֶ߷JvLC69rd]є}Dp1]D8#Wq-D{*疶Yk L/j?HI?}1>(5XKsn؅먭%?LҳQ׍{OЩ=.;p.꯰ JA/oSSYˆUX;|py#F%I vvYk0*OULOSӧ'9;AnB"p^|w}hXb"9jY7qBR.\W_~}u&K}zR ߾KɑxeaPQ?~Hor)%LWo=D#dRt}@Me!( |:8\14/~:>***o&4up2MꑚH8E aZ8s;8"劍],ψ",ϱ52kaCq϶-sYh# )~_.m!4qx!bQYc"-pGAQpkNOD9 &nsw]}4ms_5\o_s:k,ajن}tRc3?uT^c?ϦycvD4gJOc> Βb|s+ݢ**U[}>|aҁ¿)?X鑅y?Aשubjڨ~AzufA`O!MT0_*q9TpnP1Ru{hAsL4HrYc4N&vɵ$tp`_x;qF4TI'3,tל 4m FG6=4g-&wHg'rk2ɣ{剰FfcEBC^':[¤~"48B)Տ#l=º֛^j1xq4Uk@X}I񆰍rjB7wD&eL= v_aQ3u!ϑ`w؏mFL:}& e%b U^֛:u |&\O}od=H|j ѰńcxQ L+#LFQ87ȑSͶ=(`ݟO][E 4.[؊1@} gu@n.b,͝`sDxq]B)CdEzddJfލH t6l|GgD^Du]@u/Z{&$" ũcq0aPdkt4ܢB]Gn.L/{bpg. t2y[1@Iڴ/G+ñL!` Y6ރj#h\Ťgo&>@`th!C6< ߃Mv|G x,l|6I ᛯaUjt-Q 0 Azzuc-rz 70[ii]9y}<’ 6< M x$/\x&g!i(o~jGEܴ:'<f*ÖVُrEUmO|uذaVDVtQYˇVBLg-!3پ &@RfMLUI^3vw<4c? og B@1l"K>zFG9,@["t &1sjxr06x(>685wϝxE- lc@.BxhKFg?wwh>[0 iͫ|/cr&|4#ޫ[>{Lٟl'oKEz@:h h> ;FM'.pwbVkn픅0anFJ"g`P / .=~ܸ fY86iX0$me9޸7~Įy4oC!~- $܋) -ϰ7䵾&01P0@КL o)&A.~g*bRmsiPy:w `8W;m),) zh1ĆP`oXkCuk9Shyqd!4ͨyxބywA" \}AEn1Ogj5Az@2)Z:u&"OD)a-[jGnd @>ҢTf|U",}g;DaBi@" W>nM%lp3+ 1};HwP.`rMvg3v<߰:4I( $n Յk>7$ dy,;TX>d-ڷuU܉wKb7H*޸+e]od>g 9>#kG|8[uB@!Z:&}WR<pi:lln)qmPs7/>~9Wsl!+fC^n^'(܉+yWqnK]ű.72_es~q~ȓU##,omO!`v ザhC-,JQ 3r6r5[siЯ8c(91aMwAלwkvL|vGPm]7N^Z 57u^@@.Q $\7gn+jtTnqj9Ld|6E8D1Y*+IqJ%E [Ű3a_ΎV@K79f cfM,s)3lf^uO93u \~tUt8St}&G6V4kLOʴ*>Z+hf*V끨7XQmpʑ@4P(#ʪ>p)7g%8Ϲi19 k 0' oZ[o?g|Tԋ"ҵvUOl8!5NwZKOq 檱=_< Z!ȏ7Se3H~lR8QwxZsN{KO"ـ!#(9=9j:2&11Co;2~ ~ǿ4^qst\ƀ]{fZ{&Wi2ָ3\FvZ#wK_;w"&kp=kw~+oMj`1fh7;W8Cih 2m;/Qr0xu$km!":u>@,اpJBxI))Ԥo4$j,mfc4c0XbZ"#8G"-fuKUB@i 0/FKkg]k:h!01 )mC;" c1}dnF:ʡOS}RhzS^xoy6963rv*sےUmC{p_Pu&D=eVjxs%)IM5b =Qfb_zv~#dy?Wf™>vXRvB@i ̶e\F<ij%vAKi9},\JեJ2fCMz&EQAe%}nGȚ%"VP$#EKCё&v-Y SE뵓%A%qKJ.ٿ .{6GYQf H:x$i(9 Fͯd؁yZM{JXvZa˝Oh)ڧ6CŎ"p UvX/Ee>9$7q8{.(*RJIaR>`NT>YrP\t>gwh[P+k-Xڹ8XCs2q4,d?QECmu oHdlqჴF¬?fiBtoq_ُsL#7Kk5o ٷ D>e=(ixn #'<[x,l.;5tמ_wy%bd%;vvEyl-YF ̤1luBJ_;':蕥ͦ(."eFC8kv,.9҄|}.U7_E:{c.Oi@%7]]|4̅(L7ҸWJSW3W m SƠ[rTStr*Cʚ7ß 0hh>c׭75_ܽ;CrBvhYwٱ GƍɭpQ`2ᝄ`59Q5ŨٹܸN:*5jNoʛ9_m-MK"ΗW۴*&l^$k Ybcb$Y3`L"ʬzՍ="[71ݛx݀\Ld#{RP<+52Qhٞ60<+ы_F''kblxLZ*y`fm@J%䑉&)RRbhqɪ’rfA0TcEFxh"jl1}tz]O&1T~k[[N)hmzN@#1~g'hm.S,/RQ&Rb%Wph!9fKL<iݾ{f͊' Qt).կ]Vv^KLO3?]dv,8 Uu΅ӛSR<#ChdhL]#$qUV*rSnEA1z0gˏL+1Zƚ &:W 7RxJJx^@O |©.nl˹@_n2.N-):]Qol.A\OxzԬ6gڙG?f\|Y4$ɞg@3hwǏv#̟6'''+{b+# ,vvTL}9h!w ,Vd6оHqQo st=n]lt"m/kcxǿO./կ;z_|}Ȁ; 4i,Z^R*>.ݝoo_什<>K-DǸ1U@mD|\;]o5陇"0f\# J ~=kƱ$a\5<+s %6l&8,`f$ ųD= Kxąd6|aEsn xzgv:7X9w9ICGCeۥ=[6~I2xƨO$A?K}5ln1X=% 'IbAK'UDY"~kAѳO2I~ahY} % !z`ʑ=}O m@rCR;Rš&NחmmZKMBd٣1hEd1я1яάT "1?UG.2ʘ8" 4CmU'Qb6Mϣӻ@<bvNqO)k=<{ K({ú٣<ߊ \_ H|J' C5(i҇v8dy4p,([x`|$=[iU"t&-IIfY87'Oͮ;kZr! 2G#-II,M+׵n{ж3g9썶RST5%UΛ^+0;r~)eY(?) ̦''I](3^5BNe10۰G hJERm@gm eϨtxSTU˚ZKyh[iտؙ|khgjN?#m>iF4G~ʇ:Pj\mDcL1)=&YMX+3 QEbŘ>n!6ܢu2gnSn3/*I["v/FAK9j,wk&S86r@,5 0#]2ȱ$rvZ$ k9O G ͧ> ?i$"`]щ}Um D@P/+؋ 9T^Cnܺf6YD*<-GUa9ltS" %t+?@6꯻Jc _t)p ?ځm" ӎ=6V~BMk[Mf8$<)9Djۖ! (Ì#6~${)sorYL8KE +Ow6|G%kL 17Qs/wR t4 ' d5 TfOvc7oIڀD³:FP-FLTO3?E3O bo9"˶d$|)m#v,bcDNL6xs Vێ/9 xA# hgޖ$C^û:LC,M@WTU0ȔHRLIfAKǏ[m+ߺtiuq6cI7D1FHa@gs*s.&{t_ VKwXtt&SzF'qP4S|={weՖ] Itaqw!+fǾ&(WK>_o`˭9WmA>x|rN|}!799Av`C2̥ bQG$%hrРmUȚ4[U)?JH]TXkf4y7=QR". YIFG D/e'a.C^iQj$QTy**6|Ը[S Ho7Uxig"-ǎOL/xMsc(sP,j8wNNھn;?M5oH: 2zfA7\>tE%].O ol9\^xN׎q?c#Cxf^O}<ے$mZty<9B ЎBTTAҖC2 ' urHR4>XMt9Ƕ l|xM6Fb:穤 ? bHo˟(wg[ddd~Geq ܗ:rs Mo./o^4Ia^|)zO[&%&h4ڎ61=f [3ڗ_dkoqsڲfw|݉K0Mg;n/ar/8g^YRj[Ye4+-I刍Ol?jûlOw~ܺ˟z'h #ot++lOճpcpx o!іC2{ҥ|بPW( ?|o"ZJ,IMEQG/eߏ9324ɾ/Wx޳ǃxƁUzޫ͐|5oDTp~ ˲~`7~߼IL i<5#+]>6gzF&Dq-op/Cޤ+,Vm>}Jm,HNθ)9&Be_ ^r9d^GHͯx 9o[X~; =9v߿i?žC7袲ׯٞp+>VYq t5A|\;u>\[S7#NF5]F97;|XGDm \Af ^J!vvdN ML4Dgq&q3LRpȠG3̦gB.ח¹sGp8v쬃~Wzִ;Oo‎f`##L-?~5ڡjmف\߼̑$&F<27#2(M{*wh\v:yÏI=E{$shgwp 3 T 6=AhØj`e:6p(?.ˉ~*,`#'}RŁ[-G]%wa\NP7@B&( fg?F"8'_'jgG@~ڿ=ugÍi&wu۠sJY/`>>lc{7Ͱf呣:+XMorөSo LwyxI37ΟoӹS)=#snM vb;')ْ]_A.#oUrepsGN<5\FB3qNga_c(tUzdM˗-./a [@NiQDi `ՃuLkIB;gQ&-\;lwGZ1iR_@2">9y"OinJyXV-MEivF2(?Hy%Lfؑש i ofyб8x%&6%h2x!sSC#ٜwl9I 4xMyMX+iC٪X"O׬ͣxnɭm~K}.($M*3uԤI\.PITQ]֫*n-J#/`ů;x[_uWs`?" hAaV7>r w(f,<[ @$x9WNQw!,=CXJQPt^xx }?TU:>ؑ2Qsa۲_o{5֑s;o!Cr"rNQ9U[YHO]/Z؇^UE=ԖB@!r؇d<1Y7>1cXݑ7~Ieq3ir۫a D f$5轼8ԘyFW".9g7Ln!*U.ٗ,gcXh9_!(K$:-b]""bUr9N[ɾnF Wf ʳ&7߿@1{C wE2$$Pw^\LBoܗ;Ʉu$V|NL1?ud:b }J$j.@bEο-S TɐrB7Z]SjiuD>.# lJ`UݵgIS8g?>ZwGt [ˀWeZv6W`?Is h,C s `RΤĶ[-,S (NھeYň~&EE5ə{ZuyL/̓Ʋx79WMb2qfG~Iz}>tr8MG+u_d9?k^YIϿ$oxbn۴]ع"8O@m(튀F/W/ v^gzl'novԾsO˓"r4pcQ8 c^eﳯsO];ɝ|%yvW]xU>3A *Eb#EH@Obo`W@!$$)^B9fS2M}ٙs{f{s'#W(*'L<7"m--cڈ+ǴaaF*p)^ʉnj++`PNXBY(pޖβc$"Ȥ"xrҙ3 j„B֜}2I^|sӪ|3+Bn X4j271qt:K~iE)S61'w@|7[crsއbq;^v&ͫn%iF;4͍4WjT𻬐7"s r._?!`Wu(qmĽX' -Pw 3:8716 xBK{RQV~M+Sgar8^F]&OUW]6n̷yPI,|>k+J5h#:np{'nE 23Zw7h $$+OԨ;S͙4]ܧɝ}C{(v5sV2 w^"Ke>y0N){t66lAEgw[^sv0׀Z6ŠɃ ĉ#em"mτ6-/LT@E`p54hL_V,9Y=IFպK$8aPNbŧESI~Cr<[u=n'HrMYikehHA4F^l&JͰX;{Ɩ)%.zF"hCP\e3`X};^DkTE&=p/=|c'4ЎvdS\NCR@0.^!ON~(g&{eMTѦɃ]!!@ fyfG=!h=2.~>`1&<⍻[>o@;TKO8ZXyTUC?FoQ+ iUQB"".$ V2EZ,RmRf+"x/VF .i_Bt9!rɏWy*>72ce6-<͠%Gac[\t\.̑A/'X+ 莭ñs7 c/(XU $}Ч]׮=Q)6ͨBD@"[Ҍ.SZ N|ggO~5s^W#Jd_WC$ DRAej;gje&Ab L`/~^wR!5U{@D}?+H2AB"PHcދdJt haK\i/ Ƽivٟk+'ʍ= & 2Tof`l$wu :[wq|I~AT]B"'-*/]‰yuP]8Jx+q}AYYהI!udkΟI%F6qM1Qcj5O=pO- rrY-Nbsl\kzn_iD]22".OJِLhK斿Jk Xq$g-$"1?/oc_R&Hc ˊGX\=a^'~iכFLS%iqů  \xف!E gD<$D!9Q]! -GU!n 0wL0@}a+Jf|E2_ovVXa_y#Ȑ沢J7DQ6ޅD2|w!>4++/L?Z{Ҝ1/*Ĺ9bH#h]Ƶ> lwB1GJz'>giJY/nsЈd nӲAHsmK$u~hZXJn٘=\H˫ V'pżtWw y1վhZAC-vZH=t4H) mD6r j'_7t+%irAmгW+W&عfyEeߓ-;vpL66^E#y`Ϯr )[ĠK*g1nI/1MLv,#5m0Bj#8~mï+ں!4sT@)&&73 bΩ58717>ӯ=[l(p,(@ bS7rh |~v󣦌'ʾ%]%~1Vw|5d<=c}.Y8WmdLf|N(@ v3bI;ٮ#Prx scc.tKp$ od̲J_٣S ]bXD6fe(;i˸Dw$lOެ&u[` ۘ;-~2c`ilFvz\l\N|qnS2 Gãe977ԯ'Z%s2=)"W5+)< {rp(2z곶F8 4M+k(Nz /gR} 9.q[@%}mj6*4+e7G]u~\ܤnZD$ANwtzy~YmFk'ϭYK妐e@dmFW# 7˴jth$p_]\g#G>ˀYImLd LFļ;|jyğH3.{v ҷDBN"qDk/^ֲgw}mJyh$Iپ}/kWM!Ni몓^;j0 !Mhq@Ӻ-*q|}'GNj3ѶB",ulމ#AbܲfP{|/DaƬ-5CH3 mrTk%&݆r6ʯ7Hӗv9G?Pzt|fA_[(xjv[7knUK{ͣ\Õ߳K$}A$n@|:rڻ=OK³%Dːꚹ?5kP M f2"aQ 2!:ս&y^sޫt>n弡}n+[d#3\H$o(2/$gxO'9 .×QQ>IP,"#X*.4x;[>rLޗJt[,0v(ї:m!rEΌ:zI/~&H0x̗cOg> ĝ<3%mށ$ 8nAśX k#2 xLx9^zYz?A3-^T%&̌l3  M ƕ&uH,{W-H99I w0 }$#!="C5w' KMa8-'m4ވe+V+@|d>iSD?*5}eiv?!W[c{aԴ@`34=|fv'>v H ǭnEPPHˁW*:{VC zmYb~hoZX^g˗)//wƓ3nD2z.''KjoInbs8kV kjOO|;KZ8?mZ2&NhZF;3ECꭺ]FJgBB`6JK;G %A4⼀m&ΦkE QӟIBKV$t?~T0|0;Dgdslw,{JXѾ.{O" ۷!loPg,`a ,Q=NXB,R$+gGԏmgFMg&W4X$K(̛1&$p#Li8ڈ8K`1.00$>00Lա&y0,-+*-.,ؿg4DIsLZd"Di%Nf-d핗'}OJ^H(kD9Z 7a%IL+j=Zu 'f(4? ԬJ2!bv́6TV'-zÿ eZ]SSгP@ӊ@fŤ'wU3#HJja͂RS?26$$ ̥RDI>&MT f庠 ;*1rF5e]׹x,i^9ٴ\S/ ֧$&C3b K^K+X@/IaĂmJFXy[$=}5h\C)3=i)gfWy!@7"DIC2i@5JCӦ< 1mD~ɬ@"EQ&L/M"tPzgq]~i_9 OM.76hۗ񸔴؏^MS.5Skl-OIڠ}`(n"U&R$ cQ˿zg㤭8݉:i ȾO,޴vU'`Bw\tŝ-t.`@h 䶼"] Q0H$GaAuJA%,PLa[ 8_5H3qpҗ0w(dzH.V]Z*ꡩiOc?g楧6de"D0qʸ'-2 YfAK!ɤI&Lo%ELĘ2fE'cT!EO3I^ԁdY67<͚5f 㧦 ٔSd#k4JF)={ϽF{M9vYNgO)4}[Oy 퓐/2n|}E^ߴ -_Lzv-5?nY~GӾH*>;&[J|t੶{4+>_5=/%_g-"܂yV]jeprh7͠6 0b7L+"49^eUdeC L=dd"D(+DeJ hK`vyX+MM@IDATA߳Oed}xq:$7QڎZݏUkL: nkTߩGD䝫|sqU=F񙠱Zt{pXH']23V8'[O - ]vA+5*go\"YnDWTKO*d٣ʤ&Y{A+;\eࣝ]~3;d)>~5dOpUj*lUH`K.\?y g1&a&rd:x"Y5jŲ}-()2!o]DiOCeh?$ ZB'I9پ=dk=>%f|G}>VklԈ$ goϝ];9~ ٮ#ܒLܩcjWXfaTfKx.jQ5`xž5v͡?$O *YjC7IMuŤokeYf nUUf5*qҞɯ"еk3\?bVҰFX+$2SI"FeEL6eOt_I w \A_η"tƘFd0G{)րiaztz |VmY/dYF`~W1qS?M\% Fqevw?l|eM6]9';NƵɼR5ISǯ?nW†~n @m\S5E{(SدPQƃJؓD{RFD Sɝ;3;ס}qzH[蘓Z/o6j;[*ֈ]5SɆmK7ij`y#}c:}՘%7E` ^I&Fj+IW v9MD_E/*i.f2~̔;/@2IwʼodOc*FeF_}F#ʕw+wQyp\@M̓CSA#˟1z| c 9_kb@c*I.p}J\* 4<.3],/X 'wr`ndF/+ z NUDx g_|?Q// *^.0J!xBϘ/6UOSHSŁHEyQz85s<GkrGߨ|&+/7')ady^K&ˈ5Qz@ gsRJ{4iҴKH =d{ :rLUHȟeGp|5 5?ؚȠHҒOEKiC|≮ S+ƾ~$D\bR3_eٓ"§k7@̦Mf|oG "k2:[?pED{yeWl8-)췆3x節Mm^ͨJl#ݷ3Sl39^*'ʎcSr|[z.4snbGxw .Ap&k3x)KZ5 δ˗BcOȟMitܸ4j@Zdj@&%s_bb?%]BpGzÍ@"K~|YRܶjMքAKGA[.ݫ]ԉhPS3O5ӝrF- &atIx4OγA A [Ả[9@ͿBzvyB.u=zxg_6 Bw8VN K[O*" }`zRkSZ.=/PЦQLP܅oQ&pC s7֗#/A@L?=4A! *> {T槥v<\vj[ߪ`-!Y-: `1:\ w޻ F}DΏ|t3#D^}˷fy ~ȪX1swF# Act8P ů %}kbw BH0NP@".o9 _:;}!xh8W h QwLpjY{x8A[&v[YΗ `˲Iq}sYa?nhy(tym0pY_<#*c+V8Z]Vn5oE7rfd}oe4`onD7  ^ p˵J2(_'.}İh.2BS4]A:|JY]{?ECwؾ-> ?VBŸ xxIiC&}]~P0v-#O.y#@XX6o+*!$)w_v͹jԕw_i `>q ]!<(޳ }oyZejصpW\*h|-kڰPe=? ez p)5}ێ ǡգakPoa` cepp`X 1yTƣyVC|lq=i;w£jϙSu?:GoѤ]޵j:%= (8{@o $f>D.4ō,@rfA `9|m] /5|89NvI9VMH%\^wc N_ x^U+bLXzp$˞=pr7#y7mʋvB*cXFƯrvz0Ϸ`hЃ./eXBЄd;b`پ Sger֠VR E/X[b6L`| ґc`\ * ?fo>|JC/`! `XySI[KAӥ3GtJNB7A2mm#Cuף u[S̕`):}oW&Ϻm M)Net6o+Bh?̪үk'| ?-N_ {^^x>w}a.Bԟ?@|ZZZ {g< es3X/X&Kלv^^#Gb*t$ u`qgrp (.kLOuHVYK'`}tGKWylk'%a}H[9`!uD@^WF".6U(xc9# ,ZD~Ys$PqT !B&Ȧx&e.s6Y32̨Nf(fje]nP4u tT۵iɤ].9|"Dt+%Zl RA_HYI Gd'm=Z5"H/?+EXK t9M{aet(,_otL)\jxc7ނ3k~nB١\֘4tmπ<F\W6I4Aҭ7y,@Vdlm-=2"=/Vh4]'8Pphp=su+xxWD7M(hnD>/mt?{d|N?%BڢS;ud8s!D,B4ӠtUeʋ\30Erne],#?wϚQ|ꢟq8@O$'$y }+rſ*Xaq\5,{D~)y_Gקgy Me`%l#Mʥ:dR< t Z+i 9䗫}f]B/`iU}J ςG"F 3t-ZCW>VCD n1h8V!"G\FfSva{#bH]D9G(}yω/K#!"W v46/)a c<ZyEgpG5YGeq]oV&GKfd}}pğJH/5IY+q|}mZf{9ds m:0*-'AnUuYQR,FM1}g`}y$-"7Pw-q| `܂ܦ#D^ _ڈV@\G@S02Nv}EVT^wl+MDZ a&Ż5n~@Ca Tշݦ#s]Ñ{82yJ 0zha WyP|x ~Bhh Q`,9R,WQmyOXveCh&[.- e< Y*35;j&sNVtun+A|jDUZ"Ζb^)O؋-.J1#lhA ~ refӊ5`e9;D|&?t6o |Gc ⳷A7Ut."pjٷiKZ .{8(n񟭆>~~nZt .|3-5ޞ7=>!@ޜg Szg:W C-V{ĀapAG-WOϏFM)\8%,Ʈӡ6p#y@aCh z`蹢%\ |^2# tYH"XhwݫDA|M&$IBa3q2ϰc4xW;;O$5aF`$Ͽ w_巳3ƥXAOL{Вca­ݏ?3SwnO=Ӆd!–<`h!%\ 8aH,o]D_ B2>FdMߦkziLITҵ9Ud]HT&ϭ o ֒b3>1iJ xẊ)¯]o|O_Ҁf=PM& ^)(r5:|Nd2 :}(btffZUO3jd5)Qpp!. ϵWps[*rOF,@즬V翔>@}eI IX($Z^/IV+HO꘴#{$S<@e@~k@ Ir"tRXP$S<dOd =Idl*Md>,HMOkp's WIrCe5N}A B vÆt(iɩrdhE^b>TG#xN= //#Rlf448K8%1lt!S~r<0iv4otXe+&;p4ZjKҰ}B({^">gשּׂ^*bsKkGlRfhk7y$~B" e, ˢgWj?a*Df#/ɟf}sC .3=\;rw\.1-&?6rdS2T<,yd4,O2k297gq^Dsz.ɒWK5;P2Ϯz|-$ wEK+esD< z! 'm+'808!Jk{&)$5o^SawHyp۽m˫gΜoj{ںgW:wRJrŽ0ܶi{EE>6=[zY[Z(EBv\@n_UXZ){?'N'pF\ȉaCEAĖ@s9wq [\#[7W>K7Y[پs@5ˎL8^LW—Jm=@mFt2q2"` 3n_o )_{ e3YvLEIchՕ?B T(׏ 8@̊屙 - "KJxTO*0nQnLZ#"IdM:-AZ~X[2Yf &qeD@|:r I2;%_~B&'JξuP۩H؏o2J<0h07}I&{_?>tm>ᳲdDxјIc'氍*˿=wIx G#8o=ɨ%AsA|cQB玟K+Vx%캋G^VƟ@J0QMq1qݷwj8ڢSKA+r[@~wVv% +:e7Vlƹʍ^$[4Z}F6 nGb)B$wMM %L,DM5~QO{j?m>3R'@h*t ?G,=_lqPjf'XJ_JuO53KQ ?dk;hމf%Z%՘> , + u4/Ox݋H5׈ex>.IfF#x | k+H{@cEdxϼS?wb3JE֢r }H``p蠫G zP^XBKBUeiCZL9K]BZ`V㢭7{DdC@ppXKF^s]ظ+5:}8`.eAbA/Ix7bH-&cܽmb9$%0`jveiCZLDW[ﲲ"OK[ϴ6ɝ PHMp|xjqhqV"Ǒ(JPy&f!ڢJL>s;::iMĠ2~d&,u8 C)3b秥t>$4w"df"ʤ Mt{'tޣG`HHK}``V[KʣRb:I$R/XFSYS޾+;sV@X$Y";Bz/=FMnؽs z4k:=" G _KA&CLX-Ʋ²ޱ{*2*|I2bJdtN=%/FEh{ XFdG*#)rlj㣭ϴኬN_AbQpS.^WO_/ǧތC(Wj/8FD@AhfWT䌺bX1b]z+aILX+$xlxݸ{K5ΙBldvAa"Ť9Vi9xxW4Axd"}%(}SppfڽP\\0#mNi;{_-eN]V(+4k낚q>k&&3|Tj4w]穽Lh,U^D B:Uܱ݊]-ZnsL) QRr\w._{`Qrk|3|v4{An5MCܪϞxG|YYЭ(}RqW} P<ު2`}j>؝$#b>vɧNRp1_W6WȳԜp~UUtt 라~ӬhAWw7՞7X[7[1VJa`8*pVgݢqhnq~_{kAna~Mk?5f/~ڒ.Ȧ[r|QfjGTuaH[ґGw ##?]gM߇/h{^b$"OR9#ͱ_ ϣp#on{hp` 1]2QtlD٢)M1* =w.PDjD"TBdPEأG2W82Q_XV6QP$dG+8FwV"I+n7.3{qN~ u1eZGo:ffhU~⍶zq ?iG,5Yq.RJ3n>QVKL$vT0 [~=o;Jq|%O8_LKp|yVsnޜ)ꄁ{*2AbҜľoT5*$@inklڼyp~ߊ@/G75n5.ؑ{I90qhpKRGѲiVG}蹧JYOz (2x4;8hP-l ,7>6$reC'8I 12Vjxa#@==j74z곱m_~c+Mҿ^TE vm~ϱ^.1TnR^rRli }!PIFŴ=NMe>)/*EcvCI7amZM#}o4'X ,tƏ=\6g?Gp#'[)Y`mnxg[J-snv$7V|xչ*G=;X[?JGm ?-FooO"wQ r@Z3++//g"BE0U.] Jq>\ 'ʪ G&8+OW f%G $ByS:/ i/`~*{"knRUiBıoZn7=s{2DFsILHGqf~5 ןq|sEE#)xa坐2!Yӱl5KSA78M6m9]Ћi^EaS?I2aNnڳ;f^&H4.e3r]ɋu:u`"OY/KOIS6i|?}jکfun^r2aktZTAI_x3 ǕͺyiSgE 6cƼ!Y!薒p"'1M̬7խFGAp~""=f!$+H4=(,cr B$RS4TŸyt|r DyS9H^LFyfMQpmE Q5 wa9]JqqQH;'C%0_H8n)ލ)c(]yG IA(j]OI 3z>LDKq'\c+GDL?TIǣij(iCq( X4,j:p\b32v`2>g0inNR_p<~(_مoɄdߒ[5*#oǛ'=h5{*W ;t!kub»S+}PJ>$گxះfTb졊t/j7&ne>k%gJFb{8wSpDŽaHr۬dC]X`@惴9MM#իQ{m\JC^"eJ}5BXFgQ#%N E~c\SV>'g]b.y yT[ApތgV#jKS`+9PbS1wg:hSƤ C5^BӺ-2RV ޣj%Bnb߶ ULR{b5d~}0)_T"__$EtPƃȘ*ZةSg^T_HCRȶ#rRt$>R<Q;grk-L 6֙q/(=+*2]~Ε:̂彃_z9F=/*hɛ HtGmϘZ/c2fVntX X^'SƲwl$Y.6@#~V|G f+ah}Ԝ4ksx%E@nqkAb~Ny<{T3E J湝Adgj:i9Q,uڈE~YhQk7$&;7Ԕ݈/~z nsX$ɬt#jO}n@r|e('j4lN>4jtGhE'uA9g] CmF,^vލ>GPZ/%(d[ m'a^ "NGg1ه^|tLFM@piݺظ7*m'c7HEgU7p?5s2`Bv48Inw'=<0QoQK#&Q4լN`B0LjMC1pfwSAOkrg׉LQNÄxx$0]{|arVf#i-2x3idb}^la=zOf#Hk qoYk'c >;#4I !0>G|0Ztz 3 oJEl QJ;~Sf滪/4ܾL:v\sX!M4hu̚۳U~MPJChekVPo" 43gLtR f+AN_k ^x$ڲNƗͧUi2!D.AOꁷ00Ơ:w:tӠ6\ٻ߷>$)$O:U~!iT.&g- y '"yeA o==iAOq蓨FW`& {\ю):vf2,3SR-G'# ,δ[Ӽb1[+HEjEFc=Z"~:$ȉߋw#8:Q&T>lI/gb35jWknb“E^My(8#$US$r k}an9I]4f˸Ecp<@:R/Wv뼤 y 7 Ir4K(7w#޴)Og+ЯAnRMj{PE.Ԝ$`ωrӖ/G#t^uĠ |ɲ$x =drA_T {͛v_&_d WIm*%sT$4@ǍO/G;ەJBtߜľssn~Q7.6[ق&PdUlqdE;DՆ 4)vS9Be'7lb{fdjD)H#pN֕ WI0*Djqژ.Ǵf$D7' ïq8M!3=KM n~Q'م'ɼ,U\:{G^[M0oDnbC!_v`!}&(8 n# XaƻEAD"1v_?|JW /<'4@,߷s"ͨ(2a{x>Q_1 EB|:-E`qֲ_M֓~xhȍ\TÈ8o; ;H!}2U@IDATG5S7.q%>t΃p>͵N_ 9$'Ca$v(4 E 4<8@,r ҎY74g[秖aq0;iSKN *Έwã?)nl[KD@+y>:"fEvGGd eD.K$Y5(eA(7؉9IO]]To(3zw t`a[ao^9^ۊ}2}-^<M|,'2{\w &6~2M Zewdv[Q;$Kp-eE 2 <"#"gL}0W, In5Z K”%۵o~}hu6lffO[!]ɇ櫌Ǯ*Xk{*YjCMoمj7ς8InrWל(SL-tpS&Ӓ.ZM_O_!ADYd-dF殛SNf cJN|o+9߻v:vu$.Tx+p j!e|B=xeB[Y kœE+=͟!l)fnIM(DS!J8YF@ PoϽ]%Ix681@x]s-#:ޱm vuC&ӻrFwl F>g†N>@lǫP`p\7+87@/مr:vvs;5 ,7k/{م:XrpBPOk ??FphP&=УcvUvæ1q%]Z<$Wǃ'Y_i QI|m:tLOk'<,H&AWb՜({R5q픁4Z.$s8Vp4LH;LI+I YL%,] &tj]ܵ(NG2!3*xF2kE-7p4Ik\!k9p&EeA@+L}̙| I? #[쇎m[yuQ3bElj&W\f͝,s ownvv$:)V6,%jDyE.k8W\D@L60&F_9@ѓ7.`򍐵{?hEp^r/IbX Y ?Y7{`Ԑp爁TpЃokŽ#F ZԮLu;u>_? -0 ~io~1jHn4o}ܖ׿\Z4d~qs跻}.QT{5ީK\v>8In^h!`PfbdJl e^Yg3!O#'Ӳ``qa~jT3`Ұ mn2=}"V*tf@LHHCti7X`=յ|U5uD|tL/D#FyV΍՝a!OaPxv4tǷsרyí"M 5[/ZC\v:6-$%T6S!BI>(!9"h4FM# ɧ3!pNa 5c$s!=N_Е֣LCB>k}|$ΰYP6'wN^a鼤LVǙLI# =IyIIo.,ok(ۇa[ҭ9ӆ 1_Q1%0Gq޼"7@\~>xqvAeટѼ.I< ߡXR@oerg_*B.aXroKnp:9v9y^#J7Π'9Q&ExP7vo?x`0m@;Q4P#= :Y>䛼Fifkg,̀@3bcu%e3'?OػvH~Q o: 09s|gޔR3\Ȓ.v8InN("WJ$moל™9De^S&Yi\  ˽Ig`Xv+ ,Q#3%i[*u#YM$8|2;VCFxͥF鑎y0{6Apx*;11m"Îd~^z+p2=?Gj[";q|GD&0/9ߠg__C.hV$ '~0[9Kw#/݀{FFI7,E~DI1bPrD$\z5L.Ֆ9O 1;1u?0aoP'>}v`4 "#;cxoe<0~KnOeTz|/x;XZU!e |*8eF=36UUYaB ]o:Ak/}Wjpl׈#qBIPLA\o#b­\~.]W]plw0/x(G#O QEd!Dbc-x.V~!Hd.$ݷ ?D/ /G#5L:vX!,$̵ QBxokD:=.֩ >ҕe$[( p̄@x= ~J*F__Wh(73vqEW]p[iyplyׄ#D9NPCj6ehMteEW]plMw(/k[D-(c8N:-J4f+T(5n*芲 N-~%l?(3~G#0 GR> Ka+1!z]T~d$Kw)5y}8@L>L-}ӆz]MvIE߂pD@fBR8ePZY uu*3|:8(=\_d00Bzl;zgوi1)O1[%M Y_|n-0s$YSm8Iū뤏'hXJIi(f]7'Gpsuw)jbe_O7g8/L ¯&/G}D&}2#v;ؘٙ7ڗ&E-r"ٟ9>3boJJb,5RF^p`r4Nm*]@cWWA`N- zuiw?89[GXʚz8s͂v%TkF< 1b Ekނ9B@:AFy%) 59deadJ ]plۃjplNg#K!pa`oum\E;NZͽmcD%'vӰX&[NՂfCXjB.P-RGG"IOت#8sl N-z**fAqyc-],~jjgK1,ҵ|{{P( *z{+ηuEsŐ~E-VI3Ŝ$[(w?;QǽcqOrQ&q#N< ef<&|P (Bhnѣ,.Q'y7BNMt{!2Ga($%?z{,_bD9,d^[ rsV|ʖ*$"^S#N~$ Ju±2 ɲZzS  8{qx ev $;^[]p?vD®%y; Dqu‘ ([@"nDLi7! TCg>df.78'Z}F2z }xFy2q)Bpqr]8$7-Q|bAsLB1'ɦcD>|NP30T ”▢QY& ,KGf''vՉ̙}&yF ";{ O7u?O79FѮ  d}6Yk0ŷ;'/Ê~ dE}P|01Q&a()kbk~0}V/R'&xvVcY5?޲8ÁKan=U+7r8B *+鱃Gy ;Q#>1/FIj$@w6&cf_5v&ޢJL`sl0(yB&pE8pf3@&^K-V o *$*8wNl$tܷ;!EFJ=\Хy8s! Pax_غ\)FOP?/xph$+)"`438=Ȓ<z3wsHɎz04yEƿzy.G֯Wߎ0R!CZn2j,T'd8 cؑe::²6Ay׷;7X.W)m=~>s$2&/d$ sqvpꦍ ]'Zp1aZVŻ#i}D% Z꟱+e5`^o TCHCsW鸖啵jG" h8ر*c@U{""͙#bж|V]/)~T;%.8Iu)p'CDI֠V c[ QI?K猆~pX:cVgB+m<:͉`+pdh9 m,q# c5xi2J5}yL̽Ә.gǓGҗH8ۈWs±996HUG!yPAF\t$8hG}>bzxEG c<#4x6pfId$c9Gk; X+ChkFSfTKVZQ=P\RVefP7BD!0_)gexr.u(O7gpӥw:3yOe2o2v2 :; =wEwNtDy̋򡎇dAwb #ڶ8]$lMYhz++6VeK_X삓.{;N$ϲ+J^<"]Nx=0u5:rLD$϶ªlE#e(soFig}S1-^ya羇g爨vtzUeraC/|asGD&0qxgFG cH#)}fZBOv!|VwM84?uecܦt$'D4{6`56t$t|*VMI!2b Wa},mTn$KhZ!~ޛع1U[6seGu0xFIVf 3:ǒe ;VΝIrgs:'ESΈ? p0;Y&#h$PLad(0tu\0&q8C K,靈Э*gˌAlt[" $ڿ#`L8S0&iS3ԸaIn#BgooA>Qͯ Y*c#n:=`llJ<ɄcWrCϴ=v0$\ _Y¼iIUVMw5GdV`8a鱱a#,!osZf]p+NkDt51ZUST^²Dc_Wq1=L$1$${^d$Is0x5M^W]#}|#epx݈_,T eQ  _ؐf*cn'mR8Dxض*&I/9"sĸN8z9AwW G?n"bP|`n#\rWbG-'JGJTTESG%'c9x${!YP^S3 :=0J1LœH2vd3vWt9Ʀ5uBum!%BR"pvvv 8CSWW*%8 PT8,.8z;HvDլ#oP,z'Y=:pc, !o \K“><_Iz!aFׂ. s|bK̗tv/\|y٣_IvIz'm9AexDt$d?u֏k%4*z1 z&+qXg5&𬯆 JMyB eH}0Dh$P֖ J}X@H0y +e•F/kKpt|@dѯ2b(+hɈ|.2{$H|($/hs8dIIsuI(':G"DA"]48oӆMZx*r#[Ý$fO&IZ%TT&s\μȝ-zjaoPTx @%ָ!Ǎ$HDd$\ XwO'0)kfXr:@DR?cczb2QRHlNIJJɤkD&!-.acD$و9DӐjI2U4]J;c?)zyi@$YD.KEe%Dގ˺In@+ 6^Ya ~$^IJI$I`DIܛLdZp9/49]3n@ 9@ %ݑ%N)0F<>>;lf_XvIr"DQD٨%$0"~֍;¢o_?SkXUhCLܝ8`BJwHLNdl$ ^e s<P^xh@+& †If 7i'> %G֔ODѵ.rp@޽i#GjG9 7ԛ3[oe$vvD4׀I#~)7/7y\6="H_X ~{= S j_VaIH2©ۨEu3(8 YM12iIrAxFٔ׊ +p}еe}2G D8Pxq蠛>yQ+7fǏ =x Ix>M*H:rz}VA1xXm$s9Fec#MHy)D/~?o@whA,9rva.n+&Y^dk;5%,50mg2 hX oAy~c]TE܂Fc%O4y$'f 9i׍(6tZq pNayA؂dYAkg|^mH4!3|ȍ;12j-2k\(xL ">[eľe%Wȳ߮K0Jt~hOP5!$ Ee\zB#4aܜ򮔈O!4D*aAP9"sBL0țfezQ2iFذ "]޸[9bx8 VB@p/p ēLd['9+۾$tMJH^"7GD&ۑ;Q|}H3֌ոzd0òL.듫kw٩Nv mSkmL~+yX[:ZP*UrYDq\ /_Y*p uT1$)vv^0 S.8IfX<(IDY('{7v ??n;jGqhHgwaP[_^~pO1 BKHzA&؊#L2Mozl`?%߾Fݶ D{p݂:iT|?04{qp:@Tя0E/Q#>AI }.}sQ>[Y, PQ]yx+vuqr^jZ(zyU WAIEv^^w4.dHm]=y1P 4~s`Q4삓dBG::a$KW(F6±9r|jؓiY_Ni [U |$%b<]InFɻ03X 2To/nT.AШv-/ie8Dׄ '#8@ 9Oel9 I3)KT&罳w@ETDCS:4ʩn#C^:.\v)xxO .:=JA$7|#`plC[IL͉D%HtMM[pɡ5_ΚTXR\d nj {DJ%)qtʐ1OjUX/(KsWֳ"es.jI !,{>'h"jxoaӭJ]A&K$Y:q8EtP늽):%&YSy8e`NJ]a2_pxnΟ2򮔋.._ª S<;%:y9 wGb >&Nr{:;@]aq9`C8{!/ż{@Y|N q"zcB|N fߗOUiHiT Au!)yR2܋7ڕE/I&c&R^ιXpkGf<1ұ#Еx򭷜*Kk $Gy\͡ery jckHpA$[3K[Ҏ]H mvqt ð9~6\;ҋѳeSOB=|7`^~T)=@{D=hXq.z-$Î?S\4.z6(S8K1ҜMKTl5YA[ߴmSʣRh^)X^8tz,hD{\)zܠND|13.x&=؜F,©MP^YcWU[7g4odʐ%I2 pz^pyjf aJ9wzI$?%<u$j8Q'h?G6KEAgM}!v Rb%CӶ kKdedeؗkqR}s୐ؑ˶*#G>p;6<;K^ߨ`w 5N]hГc-& o'#ul?xb&: ;~B1[%ㅱ28Q6[V2Q]eR }g^}Hk6a^{),S%%h y s @JJCljG`3uL2~,6f9V@B([|eQԼѫ{`cw\L?k"uo|Z6Wnn KTWX c gdu-|PcswM\բТ+u-Gcp1:}(c=RB\xh #GL:pR0>#gɓ6掃%&ɨ@$}@IDAT@HX4[>Wwz~#|F ڰsڄ'mɰbLTыev6ڵdmOdfzyH|&ǁ0~҈0ϑA> {k)=,e@ gd@k)?/'G\pl׬{ ZeSf(F#` peiYB`uSCo2 G%=7b&3sUgד6G@% I.) n֯W[\cuS[BqsԢi,{lx~X 74+lk3F~FOw͈& kkU1bz^>+GJײfxG˼99I/zin˜Vah2 dVçYef=޺i;ևQE{Xe҅4j$mƺ27 $/th+RT\A1Cr^Ҳ-m=JXQȱţ2޿d$\*e2AFqo*QX4.|M X|}iLeuW:Mp(a~)k<:D+D&^./߹`MT8Qk8if,~aգxg1mpl<ʧ3sTم=x* (ᡄ\eb_BT  AGF4EXvB5RA_][OD =m،i\ nΎOwg{U ȠNϲ~m/Mc)q,`(9u{Yx: *jVf{TWV~x|?~T$vO0KDjW!b{ T^V+UWL_fF3RP #Ƞ']”bmq%$P @6` t`/t7A(p0QȔ?u`з/*D/D ΎDʫrࣟwL] ~427K@rh5JpV}'d%3eQi9x*dZvBKz>~{q7$$/3.#`$X5;+nQkܦ= ֜}[oʦ+l={nUUG]Douqf8bG{;c%b: qWAeu-TYrOaqy+U2FC,Z*4|6ay“ I&/Nη?Cn{M={'70$7뎮6tkV0$MKmazyw 'MeNbo 51găq:QS9^xF)Mlz|>E Uz^U<]@òPYMXC*JR-̼ y^(4Z~Q% >2/H]D {9z$XV0[/{6l9S3ǒ>yIE$'K{I?pMޙev6@ndEuK^m'u|r{N}^e&`tؐG(l"UR-?|*6IZSڿxy2V"1z= %(SAoJTakcYeL`شL ezH@YSc5SeAQIvv=<}%l΋,:!hI,o~uZW ] <$cKO|l(Sco?ܿ܆n]JVP , 'h1Cx ˺2]7GWpɵKO1:9l`SOMOB$jz_Y*U*(*iڙi/=)bIϰ@??)(+oOWu@9̓Mn.xڮG*),K..W=yzm$++xo;ej@ r裐}{ԎJ~,>y }{[툈>23S1OzOHEFv5q}#u/v&t)Xq͹H fݑp.>row&a"=PC,# 1vt$bR]Z:=6Iֿp~npMJ }"˽Ύ@SXdU p%c..%O};~lY~ylyٔD:of=jvۧ`.[xzP0 g/mUz9TO#+w ;nMԭy%3do~iK2J=[@dn~_7 {7a ɲ s9jj롾^Pl+[+P|*Ou1{:; FR pC`+E2lcZTz~$;Fx EЛ]!d&?:DŎ{ܣgԭ@, tD9'(>wfÓiNNY,xhz @lfu 5}:I}Yƒs YH6fEG,y<<} pD=h7Q|KdQ~$2|s]F;UCWZp[~ {US=lRI(a3KyW`ɪȫ_k7_'N a2roCo*TW?$a1O&Z[!_[1X:`&Yy5/5D^*ޮ mYK;ג <`xGZׂ+l bF/@MX-ٮç;zoiwW>RS { y~Pqt?\Vţ{[4qP?h_.f)>g|7$΄#g.DJUL 9@yU-p7Rۂ %2=06'ș UAjq8e8U8/-+r) ȭv6g#&?BDHq}=0Ww1FM-r|d\aR̄uBONM~f>H,zܼBx])ou3 {X7O{\8v aNڤP؉ ?{ bo E5`.˨87wUMRWֈ?w!6YR%A# >8g{Cr!VY[F|YΎvv6N7~+}"fw)n鏣ҪRY)5_|_lڇ[DkI8{1e?z!%oщl怞u[f ~/>5ճA1_6*()&{9hO Z}f 9 g!t'G;FH&g¶a a8B7pvr@u`#9`(F,;!{4԰)Χ睦sڲ;B{N`h?cXVf?, :M5Jz`X#p?S1IVȨΩ_@l G7& kLqsz&=ǩ[j>iy7cn6\Fsp3=n.Nib-G$Km$W@n(+l:^i]oG|'ܨ+.A 6qKFlm)ϺzzZ.B7>=kdL; &ÀhR x m][ʚ3f̚6z{T5/Ӭ0XlrQM\.-Gi7>k.)УIL0yx_Vl<6yƖ!"9 OSGmpz5KC2MȈK-}"< S,G?hOޏ cDCKdY soՑd.ww;& Î?"oSu/-3\]=r`4 nfBGÜEt(e$Acў< W{s~ysq$km,ḧ́amO7`'%t5S^N1Oߛʚ'B_|MjF ] ė-6ؤ9 O_G%_rzz,L\PAVPA$tہ&3Y8PIfB_ QOiI#'~TIjc$p/!}.# CC?A;Vj!Ҵ4LIΚxmg>O 3tJ*7{`ZblDjg%47 o[KimG} uٓx#Jƚ'bǙX6uDN;XB%@͎سYNe1ozY%ujn/|r}#CPߗX„:J!D^Ƙ$yh6K%lsy(/HB<{WUWEo^T0v8Q$5H_~$Dɲ JX7!oN#sBH*L:bth #ݳa$ ڦq\6F|?ҦgIZhЊs(v<9, ׷A>+r5!c =DHf@O=(|<672:'4~dtc t +k /)55nf׿؊8|6qR Kz)}lAϹK/R/XTڊtN/$?Z myw:~j9}Uw̜PdVF2 GF-Qn{kr:4~1Q'z󣟀:yty5FC:-ow%ŷ~&|_1ɟI#g>IH='&Q>__Yuw6L&?ԖBdsPPyCQ8d#x+䏋_e+~q2c3ղҌ/?o(>ԯ!ߓ v:œh/}utNأle 1p ?`{"DE M,2i6SZZ'Ðx:MzdSz#L(B(>(w0h0C25J/@0zɑ.ӝgڎm78иA|V9R'sT _cҍ"R<GѢѵ?NwNpѺ⒧5qQ$mZAH.ŋ&??n)2%DiCGHdօl# 5[_mg2z FSֆ{F!dŚق//%=g+ O[1w` v*F$NHzDEv>j(H?'`J$u8Q(˪+ɑ,"@elKj:(+9}N`ç2M_Drd#S?u8C.{`v0#!99ic*y *cg€H{ijc>95f>J,[*u0,0m$cuӝMEw_wtڥI7$RЬ#CK)cL$6x%eAU_Iq̊G/QRQJp"c.}.7?cO\>ח<7ZdCn }(oI{Rk JJsB𜿔?[m<'BB^$,`~w]g=yXFQ: GKeY@̆UaX3ԋS`a0޸zqWw3[yu贪 ,dbKXG^ ՚[\SF#GܘaڪnKt-?8y-݅,<-GesEZ\i[?H%u@ZsE!Hx캹 ȟv $;", "kG*P]7#!a8._= ;QHIk'? c[-Zk CiP'~ҝȾ_T4>mpl /7MG3P<3#4 +Eŕ3P_M}޸q,+V/*OfE(hl0yi%>Km'(\eq ōHͶOhϮb$&p΢DY܆Aݜ_\Ë5U1["?ua^%կfcC_kp8 +x&4瀝"?Sa5_y\.=1}׳Du <# pC0!Yqany6"(a3((xzj>[ژ1c]1zA"Lrsad""m9 Bd+xxD@ [aԑXF}nwe5KMbu5DaT[h~~C5؂Cԍm +t{*}N tAIC "OR l$E'9Q ˵4aMI#@ K^n-($Ũ4'1@uq >wPaXw%&By,dƁhHz} `U{3I&w$$\! ("(Ǯ ^x/zr誫 "9$!ʝd2$d3S&}~ޯg~]]jCYohw,!Oh0{X q:}ĐHᤨnWCra[+SF@XD"aXUc"@nx‡!Le+"4Hq11v@|\3>0xX[R Z]vyYW_ V~ oO|Nq1b-9Nkѡ`FO\_MZPtt-+c֢8dl :a]BV׻FM$gGnGd a! yPȘ#&9uHfl5(jI7KU#`UsԺiso;GB|jFQ$l_uP]w.&ϦXEfAy?˫#dw`588RdE]"A&l{Ȏ1t]*jXNޏ7ȯXBƂscdJKSHn,\V%ϊꃪG?-E 0w(D6hB'$LLǏ0XS,ٵpM#Ãu1z2I*Vn urZZ_dgh,$X\2i$9mb6P!Z=n aJJqRݛM kAWSe,I& j,][9`QGvŋy&C.[6-f|,0g΂ "mND &t@Hd"C_enfbf31cPΤ$qYνF )g{QXqwvETq̽GVV7`&Py)5z0@0KHUʮkR܋kߘ ]I^hU}5]y׋'$cS[nf>-rE ɾ^cY $ _xfgzl/D[wUM 9C?aiet^a@<@>L"\{wIF[5Bb'{T=cMKɮLR/NP}qjF4.Z!v$_x><n'K@h/lcA ` tp-}0LMUt 0 |kD g[-nFOn#b.|+[ܣ'o/^xr@>ʭl5.Zw\q/Ck%_ · $gH*1o0N-`-,)e 5K%Lf͉ۯg꺱iAp ѕ# ,hE&aQR{lv Cnяjݦ Cg\Zoe}^"$|7g^ڜ\Q;Q& iWƪѾV"_$ = $!# ?.Z ֥޽Dk%;.C6Z 5`4CvS߂e쥏VBԊHn?Z="6uj*͉)LHkGe Ȱz+0f foG6TblIn|t';G0QCYr_]=nl2[K6{Ey;6RvLa ңw0-y ϭc~iyew-X^xTZ3l%L}%7?;t$Y<jL`|Q٭pT _xn,.) s,v'vqݯc{T1Wٰ3{@$y(+d/4 0y{7>[v/b|P^qﻟBmZ(׫ḳ6ݚlbؽ6 ݠ7#ٽmF/GI|Ci\rjuܗddWO>pΜz9\Ȏ ^r=2!|EaXύ d5dJap8x8\d?YZeKN pf❇%:J>ʭb‰/u^UisVO cŎ?6O}4EH2onԦ|=(Sc%,$/soXƏx#IFΚN/=oVȉӏ}rsE|үg E{L'i+q7Hzvz㘶洞(1o+a&,)'C !*3<)Mo)&^BNDIhw*"gNZ^L dL1KU 1ރxM0 u{Ԧ{Iqg6K@3 @V9\EXu-$]>y !޶)EG.03.: < i!IrlXa{]֤d(5lb/9]hYeE /<`Os]C1"X+`sz`  򔠰n*QFk({ShK !:eT+YR0[x}HL NG$AF &k*!A)(P5Vg,QCiLFHB~O?a:&`Z2[{T-qlRÀ#G>Z%k67K'] D8`=e8ÈCY|*@r+s%z $Tt Y5$h  +,v QVHO`k}Ԙ-åʐ\5>_>zׯ#GV{xH|+ZS SEE}o}8|,mϕ' !0^)AA; $%S]B Z#Ȣ $T({|I/ԯdFno2'/ l۝^d iO(\=OڠYji(BU5'g9i0 :2v٭UL[4*gxAY>]FB4]7Ѳ_ŀ>y |ul+w6D:i?0fWur~^ڶ+|l~=7[iDEx9NuVh  @ E LKzd2bL5}c`T;~olrH:'E}vάg.~ރH_" #7I$vi$g |81JP#o]21"QBt ^x{{1wys?Nц룲tLg!{eQ&rV*cӎz&'ޞ-eR-H&AvO]|t H5`9kUwWihV ;j̋#Un*83W5C}Գf?K.(: X( o+Q 9Ӏ<e <ؙjVVUìV[zvq/jwgB 9c:zǫ0)=|IٕPyVMӻ }117bC Oy@#.Kf^JX@ݳ. sSd(?"  b.>wWi,H{6㡷f9Q>'\1/Xxz7!f|kqbzӐHVB@x QFdx]>!k/@  3 hP`-:թTk;=ݕWղRVYkf6`&6:<HGdjNPHGbM9ձwqՏ6l\u.'FʍYD tÈ*% cZw~5q˟oy{ˆ$`qPmo0}A`܌,w f;Q.f;V;|\8ii]g3*!p^<.+N(- 6WW6 $bRZ`Y(QjMWVF07~j3ACH\/YWxK%-yiOho^(; {yʔ,y`4O#WXrm`|C)yZv߆#Gw #BytT( 61$6]]Zm6*0 );%`ǧY²8iiX?ӮrS㄀?"kYn͖f];u=ϡcز_ofF bRк k˚1je3տV?cҿەИ}]?n8 ѻ۰h|o`Y§MG/tcdLeqPn`pgA,ݐA&p ټcC -bjc ђNdY3\ lE䗛JB ;EE9Lj ΉnFn͖bC:6}| aksGCDzk3w0b0#C;ZT,@ץa?UKe~GYn&|^_-cj|0@gxq?P2'sJy䇷Ԙnɝ1@W|Qb7>|ݻvR<ƩJxv/(f{Zh0 RBOE /~=`MK5zBvşm?gp(-zZ'B@D_66d$ItF^/>LxfǭXg7OuHyzw$f8pDHׄΆk7tT1MV=d1)ѮS& I&7By{d+`~!T|!YqXH{n}&3b(Q])bʪJvI e&DVa^=#k>g=#0a3oK󏁵\_0wNJ!(P!.2%ƾq-TZv~Vnzu@IDAT+Q#NB˲ -В\^Qݵs&Aoz<>ף+3woR2޽SM*aprn"x" SW0V<ۡ|? #t(!-ݖǒ1I (a&148Y(|LA pJa-~aN Xe[¡/9Xe"  gh^%&[0TŶ;~:qT|L}g_g-2o$e|eQ'O2[\e]?flSAvȋO4Or-D@y+j,׌$_U4tͨ>ioYc5ֿ8L38 +?N.FhPUVDkkLP#OmMnhjl5 d{* D[ 1zXàW aíо*^Q["kIb5v $U}xsњd2LKz,-@[ kż!Q/*=L2@(;v1݀&5Eutp:`@_eWњ\ qQ^\M7^O?O/%qioSdKJ!RXs) Xܑ^]kkA 2_Q>\F!L]8fcg`k-+y))žR@7&j IzK'z\R?;s=ѐ#C :G_ h1v}G]4/0X;Z6@ִT ](Vz n8O9o/,/NЮJ;8o>^:Cy~rNj#G6j6s`zmTͳTr_$M(`ܶNvёab8` KS` HAH5YP !*R\ mqʫFqBmLpyI՟M{-Hm>Q9 1]mq <1F A:G4\(ʉNx?}|i]) krtiPY{n*=g1hwt}fx„S'VLkJd) gݦ}?\8i/`_گdcr&wK?4VT0E)C ֭vĽ1'S&ID GյjZȇ43Dy"z(ޑ(|Ay+=F-}mשEGaˁ]Vck^3/[cpߋA#q>E>]?5T&9LRAEõי Q"Eڙ.KjDI$QDR5f(aeiiw]?{ɳwB)tC 3!OуA ]#+6r*q,C p{+==MmT\k'3}XP>62ڰ%\4!߬Ug|&ϖ ϟkv&́Cjwnbf·6- BFtC5+ʑ(˵?òJ!wlhweQ(Iv4IBjKjmB49+ A!yʅ|;Ot8?ud?6!(#~N`]l8[Z2k\|樴9u7N ="=V[c+"Ղm`zw5!߾/X |{ ;svCzɏK!ߜ~X킱Md_mZ!(#CNЈp֫\iYoz<)\yBA s9?Q/dHT7 B 厰tXiy7%]7&uwW(~b3~U*K0;ppabd9[+4ZED\NdKgPsd~p֫C#n SzFAU> ʏ3̓דk4ƹX8RSXh+„''?!A5ۣ<ŎNpݓΣBPg!@x'HZv=YZŲe:`O=ǫ*Aǰl9 ϰk.j,6up μ"-cQ7a!Al<"?2~1o~KfK<&GX('aEzYLTC ԿF_u0vC}(t Ö],}IVIǗbAAFv;2 ۏPy|Z7ۧ)Vr q^ 6r;- "Lt=<0aFԤ>~s$,rn !@>e)F"[ ߻uo@XHACݧY y@` :tJ|q#9R1q`kE)-bcSg6[2$=;_7v KV,4%.bp:_hx0wD06I%҉0o<4lezCr)\ԋ}t+UOK!5-.Uf 9?P1j럚9 =|kLaBgckOf6- BE(_>jF!|:u=\.+ OIphh)ҿg,V-h1~@GR$HS,Qa?MT%* : ׹Qu JtQכw䁷 g:Ć!ƂO6shp t8/y]d> Ǻ *zrE_ _=|sn1ľlm- BI(kZEҁ`ݶ7u}+hy Q3Z֥S`$waK&~a `Y \<&Ue"dv IGګ"[hb[,gLÈ(Uf07btꛅ+gW j`.~=}PvOO-'z >F܆Wu - tKs5&#B[{\Yca8 oܯZ#z=nJ}Ǩ.Cvgf HIi T?kŪNCIahV8FE>M!>ѺGm?p|% Q C7fd׌Nnch9J5ݳkgb d?eSGU u*@2u zԜ|$jJ "& )KvFU.m$:}9MW2nZ;H!Kʷ # 绍k{lԬ(+=N@{d  "[‚0BE3ʨ!j䷿Z7'F9S6"I&^z7?g۬|)f`ޖUʫj[_ Gt7>'$lg9VУO_ 1z r$wM 1;'d>L H`ئҁd]b Ls0YM rRC`4Bc-TCUWHݮ' YMߟk3Cċzr=eq7Jf`+8F^&ՌњH$ɉgV\/8ν_\Rb6Ijˌ#̹"Ȯhxv5;|YsRR.1A-ڡo9VZY!=ɚ#^+  ̗1Ec+JTiCeU+Y1Әsgʍ+9ݽ.m!(k~pk𣗂"X?ѢK=WyP$pzcLl=F}a{R ʒV`tNsQ!0oj2<鉏p[ ӌvc _Ií3r~lT D@7Dٕ@#'C$TE&Ʈ{X$Pk!Yz8b>Ƭ O/G~%HK`vх$qi*|+C쳡Cpm`mAmڇR$DsČ/ڠyj E@7Dp@QUkf^ 1Pic̢MkCg qqe-鿍VSx#Cd5fMV%%#} #'@7#ɩ$ Bi},&2/uP}wi D@wD-]F-{@6|#}mF8u 1:_͢h3t-mԯJozo!d_5A*ҿŽp׶;3_)+;"LsҪuo S ȏ;qA̜g۴B#++ 1)_*g&#o-h\(ʉܤOx_iXU>-r.n7:iɠs dv?A> s}J)R0 If>4 ԕ'&,?qm^BmUdMh\#2 r6M<owkf6rMwyk7w RR6i-dW5=36!@Oe$fWE:8#|F,f 5eW6W,DC&Reb8Phf&G ,O^FhJ6-UC*53;]H(]ޯ7[eՊj;ݣtLfAn݇ B\wSV}]Y\,Y>x jB9,&BOe'IesMZ  Zq&+ CCBH,v*Ya-Q !|ٳPjO E㗘X8 ȑEep0`H0uqE|/zq.A 9!X-3YjYM`ys?> EJ{d k>jEzE~ AgNo%%;om1X>H;n~URS6tEo@'&͖ Ry;=rdx͂Աpnc0wfml$BhOK^ ĐȎgS@2tIG觊PtWud:.U%V6&Θ_wl+xW:q_~+8_#I  @r$9""i G x"^.4tΧw<.ffX'uEf7[is󤿶^0BGAP#caAp}) M=zfL"O:m31|W}IL=S~x-$[Ɋ^R%+@E _wȜ?o6!@OeSIF>UWxkX{SwOإ iBE$4 s~f fUgU z~GXM.8*2ez#Z7t@?e≸xCpwO꾭a/le`r-9S=W $4 u\yiiϵg,.)/~N쳁~mDý3vsmnCֽ s8{Of[mZ!Bh$ha3|j+*rlPbcؤ1i@ra1 `$YgX5k2 {,WmVgLv>@`ghn !Ъ ~<$s#!HzB4ZwN[ߪxr{QW/%3˲:Ezk02O870Wݻ<ͽZocȷIBL#|uܸ `%?u{ ČmZ!@4Oe㋓rjჯ22]-*:f1ʾe]SGsW2-<\׬hI.Q~hQV0d,C^뻈]kZ*c,b|@rlT}W.nIvF<$yJmw$#I{;-?{MG^mѣbc ˽5vOWNEP3FE>8W f~0* N 9 Kc\yz^r+w&4k>'mZ!@ Oe͢\bwo%_~7hReK| ܐ Ja"$K{\G1|ۢ!VbUIMvV}6,+ɲHu `En !Mv=ewq[wIFw $_-‰]x?hfD*O 76V@ ^ xȷ`M,|9I1`|OԍU}!@@SDEW: zT@+ɣ>[9oweĠD>o7ge$H<ZMq旌X#o⧊yl~r#p!I.O%|%ImT3€$).-pIk 'RK/|kly7cp=o_kВ F#Z5$55K@ ѷgCL!LAbd`O}ں?,;V;~WcG=ӽ$kfs̜=u4ݙ[H5W $#InҚc Ulf6( J"sEY)ѐoG?!_<|ߧ$CB V"i"Y CX$k}$љ!hovz|v:ZkXOyq\>򍄞α[pįƎ]^-ZS#1嚺{xruma3`=wl}7%K@] vq;]76%[L-3~3# S:o݊zB!tQ~u -D?X|DYrұ`ҠK񫱻[[¢[QF 0Ve$͸U䌩&Af))߭p3X,OB7ّ*t/|s e80Z68$Lo !@ NٜfG]̑87~y$#QFr}`UX`}ĥ]ƭix_i\2gHq C$fWwBEp S8[dϠŅ۬ro?W,oK튴Qqnmiu!@E^,U ] F^-B ,* ˸U=),e0K_*bz|z_pݶD+?5_vAJJHa#gO={]&Bh=Mq?R"1]-ɸW=C!\K񫱻[[5KژApc Ag\G1A🋖pVj720ȃ8~_X?4")I?B#q(ψUB8:~•Y]rF1L[ k F, P+S -X1lM̔.> _Q!@  "SbC%;G6p&|XQt[O!uRpiEvJ_3_-.M!QnkD=BC>),+SFݺXYas)=$Vt.R$ɵAp;|s#秦lubcWpLHܖs*!@B~6B}!N_z|XM<ș…zakk䪷`[u7 ٓ>r[j2odR?n_$::* "&a[6- }#@DY!"%6=|GâYH cv-M'䛻&^bI ۲ץmB GrcL=&ddv[ @}Қ >I^\H ;(~]GGHX" 9''fdmvKۄ!@Agp^f#cJl32 qȞv.7Z^>x7Zk7mХI vm=;@\)ӳ6juhI!y({s8 wƆXKM)w[e e`@>6*HڈY!ʋW5v_a-G % @kE\V!@z%T%IRVH(v>([S!߀! Nq6`*{(FaX׃{> HzI sx@ִbKv{FFh.w_9ba&Hrސo-ym6s#Gv[ݢ7$^#\MP5B vFr;LjjOOC ILts7PA׊Zd lbr' /!wZ LSw" {]&B8(wԳ }}$nZ^7(`A,w}}`\!XPCrD6w))q6]+֝c$inT B3Q ԋ!n?k_ AЁ7빠|Z:޾!߾?l c vÐJɏKr$S8\n8 psbz*I7!@DٛEet1ۼq ~6u   ˊ ܏ʶ cL+9/vI6ݢΒ $}~  BWX2k9!U\+ XZykƼb~oe0V|XH{ @@i0!@D}L4G`ߴaJ-+3V;Hr]`= /}[E V~|1^V .&~]GRR,:xR$qtpX^ B e}郤)N:+sXH qWTv3h$EbH`?NYz|co[PM+'$ u3c&Y7W p=K[up8RC8 7&fd6B #@DY":Sb۹rgFA8YHlleӁӒS/Z'YFqk}r  g>>y1 eI6 d5L亾'/4!@x'DSo$u;"&,uNH'=̕k݂kΊ>C,1f&6Ie[cmΐoʪ}LoOÐo8z^Jj_ T1 SgNހTB "ޡ' F(wȲ* 8;A f+§)gzY@4O!ߚvY%D;;#gsZZ!@zA^4Arx et"w֞וB ; ~:GrpUHc!߀)lÎ% אozވdVp&N응!@xDK_$m 9%.E;2j q{g+CF`QX2pJʺ'Bm~ҞP5ks~] LTmE;+3ӽ.m!@xDCO$e+Ⱥ1bos̤|vw DfocG5d &0|Ј+_?sZ/&eLb).}emwlh!@x3DY{$9Ⱦ>!RjoTw g2_"쳔%;٠\zXZ ]CԐo1|*~4YV%@8^rv-(4pB |">H)Fxd'jB@!@xDI[$k2u+;"!)5 PgEtȾûQqA\!D&w5UA@n, :w' _s"B Bw ;dLVpCϢƌ{\E`9WM\k^5&.,,Y3+G?;qv EpS2YsgY.~sZF! Q-CĶkaL_uqC2ϒWmNS_ VRQ&)ǀ$NT8aO2,α7s%?5UYVfkiy m- B |"ʾK d ԜBR<4)!8@W d (,!dewr-*^4BWQ$} iH7IvkhI!3QUz@PiFmG61߀ f2Luh  r !kO1\whI^o_֨!@(D}T2,%-MZ0Zrf=F]-Ǹ /|誒|GyPȷX-eҤǿkzIH䲏V B |">ToR]/eFf( 搹!=9:i B/ j :gX%{cΕ*-eeJd eQȷ RRHxÃ'5zoϪL;B |">b 0^qjzaj"C7^\dàŻ",vJ-@W Jʏ78>hĕS7'*\<) PF[ D<!@,^KI&fz)\Z*.)pv;ل Q47LtBdf 231& ΃nf\Y@5VX\.~ zv7U#?=ə+'7ce#{2 5 Kc* ?"y"K_I6aU pNMxS0!@#UD9-ccB5ȱdC<@6B<9$qxG WB1X<^9n}73CډXfLO`55ף[Ed lpG.*_._ي.STT(0L UIf ;,xTث(R&C! : z$+զG<{?yd\Uy!6UVVS= n<I ,$KSD 'Ԫ0ՁmUzX~UV%am?:,|9F$=Y^q5sgzmMX!/xQG!kQS򣿰"W/jr?6u/O㩏DLFckHe|ml2jdZ8ԩې%p6A@q] J+Yaҿ Vz㇟2b;SVȱ}3.UiLI}hW %l pkzmױ- B  }] $7?M@IDAT$o熃>e~ A9 &kcoM6k); 㛁TU\fGaa`6d$('kDqݍ E ɰJS >W,wA>?Pzo-_ݯL:3W N" c$i\!@\z&&%HrUjny@/h |gd݀4>(4kJ)1KTr(_&45]`q{N*IM]Q ʇrQnHj8鿁N@/ j7qV0kzik:oZ%!@!b4'٘#kGWL`" <2i׮ЌmߴJٵwzM) LbChe1 SWs=֎*Iy#"慉%JRзz~kG(Z4+hp?[0u-zم#Gve>D yrВ BJHB<$%®UmtaE?/t,N\ lH}qxĮL qBTsOI~Zy >-Uw&1I%Q:xd3}22s'0ID! Hz,"{# Mku>=ǒN ~$BhhV3C 狍⤥ >[;SpH}1݂&]0賌8do{FR?|oauD7/@?ҘB =x+Ӹ\ۍ[B-ì$6<4bEɋHQ+Nkn1BzE~6h;KN_d }U oLH'+Hw$BCЛEY*&Šq;pT_5D9KK>vceڂ}LL6מWsTЈp֫\Bɹ:uɢ/aUgUk˝0fVI9xYl~ikմH?iYOr^mn!9G$u,+ymoO2w s3QB megsqSQcuDEt\qGmX(A~%Dqy3D4pĦšK8A9{/:1g lhVDĠ Ć{eظdطm%h۞vhnؓ'ִ8!/.3ŎuA7E~vq;(f˱z-\MS{QrC^l;r&/PN"A˚qsc9ҝs{bqL"e SĖמx`lɒ̡(v:٬9NFO }[~8֟Жd $ rw6$O Pߡ=.8vz9Eg@("{xg\!MQJt`Aߦub,wEԧ s-޾.#[->5z-+2#<Ě/*,,}0 w[.٫-]qneE jQf?6JvNE1/}Y_\ S/ (׊.Џ:oS޹_Y7b3ؤ!;[etHlYذx}kܙscE*S4[0!dtIY9帀p@@@Q>4'LS4:5Gd-\Q';Uh&؂[><̮.Z+B:0'up"gjMZi ޏGE,wMkxx8v@8G՗wXV-]C*qPHQ~V:G_@+߷uhjќmf].N7 hυ4M-ȿFVL57J r}ID>X$ yPS5ɵ( @@KA e,{,>o 42gH}_$zV DY.R/|,N{}og$\WHgLLt}ڑ+GHnU|m.XFVh׾ Jp8"qJ%a1';N s˖xżh֬pX\>r8:QC?#[Wgquُ\i^'nWUmw@3ZZ'oyyoUNIdO!!!q x< WVpB,˱e+YQ/RI. tΤOxoQ=bW~''oQ.-g;]svyX@m>'i#PT6O$sef]Sp">G d;cd>,LáQkJT:t u`;t9^=-5%tN^&㭡]   P 4h\[:LGN4GۗA$riĘ$H#;   PM(XjGP/;S 6ںl'b>mS J[z&nl_1A@@d|^f(#PsyO6P~@C;9^|T{TQC;k@2ޚ>Pa,?  q*d&'oe1񃼲N>Y?A+Tyi,:< Tc5U ՘ed~ch\xtwK]G^|2Cd%dQ/((pBg} *`3?/we^k'뤸E*T)I#2cεY[$SG4>ɵ( @@M([bihCnmԹe`s+IJ{Oc   u eˢ!l `) &B-,H˴ͅE2Sd#$X+Ygv:Fn5K; 6c@ŇU ag_$v5ȿ "Ƃ5@=#B,|Y0FEG ,-ح,lC~kBd7ݽ(ڰfT#6-F~@u ]RR"*SzH߅E ^eEn GW4‰׶@u1ݏn5-m6v;WKafȊ|]4uR%-Vu؂Fnsc9+^mIE[G#ܢi+ǰB. eMl!-ۅ}6{Ca}m.~igJy ^R71#\59@@(l* #3~ vfJSf;;z#5mxscX@C$PrCl7\3T@Y}+]$)⛑&G-]{    @(7V57XjW2W#ofb8H*5eيCX@C'C ӄ>,zZ]ʥg/Yco#PO9(+?o)QJyy^rt]XU@@@Rq+@uֻ9 g- w?Y=~ΆHC@@N.XO.5B u^JSE@5%eHY{   GP>vA.6xpӢx%+ حi;/[?   '||>8 uGzEEyϐrSoz5pw^86@@@NHB@~mxrOߚPߗ{   #\>N~G`gv6aXrrlc@@@" +B qAXýG:*IM"'t%EJa;FM`TJH)DڮՇ{EF]ޝZڠβimvZؔZ*>dHκ/ w,]s3yٲ]"YlP ZNvó~GtSo)lT/t=Fn-ת{/킋.3;0Xj#!j/⋏P}HK q8t0.m+w.[qTrNQ*YvܣN߱Utݛ!<`S*z9(pDjHmi[=ݏgrwT[:)ۭ}9 )ȚTR:y}~KBe@@@ ' YF}p $VJks/,[("H$b̃0&9\_3G ֹy~CӾ|Rr l!6pu m.X@suX$N۹MkώuOvϿ8mvp&D,e-ly'ohӀ>KL{eז& O"00[הdJ ޒl8X@ P!Րw $O&ѧ] Y0'\ wUt,b9+6 OfqO+hRG8:w[rr%K}a@@@ GV%(w&MLKqW뫍moXMkE%֨OTϦԓ2zR@(9-$TNlpEN)΃TX efMhC"/kʱen\3ֽ/>ʅ>><[~#S6l9z3Ad/l@E֑@;R&ٱ#rڤ:-< =F>o:$FVc+cTl$\Z@@@P=K)~Jy;*EկW"WI$5dX{rpDR!s_֢-.k0eNQ^JO-J~IǷW`*!Gb XR|&"|Q!7Ews0DO 78&sa  Bf++N{imZ+/"n:xsp""hK (#E9ktBk!";E>| B }j[BejB䉜EJȎ4um;5uNB[ hH ikkZcE[xmE.,Ξf>@ `7}aMЇEeܹ[h!ԁ"τϵBh&oFCn .js^3eE3%7e0uNS6,F   DB ,s0!"`@oQ"m5^GVB߾SZxgUK+ Y>#T~f`+&V-M<'TnȝS4H..v$TB[@7Fm:JuHIˡ*f   r=n\vPY wK4xsZ6m}zH`0ZfBP8ش>I_Ot&>̾5 M+"\sP-!|rJfyGuKE?v(3y>xBQ!/9|)afg W7]>"a_:H]Qe[E"M` py,w ϒd8\w5S{ n]j䳬,ηo<7g07A7]#-W ySS?_$5w,{rm"❗Ep#[D*\g/w   5IdS'% 5B 8(e0-KΡwK3+OBhE+5Y+PPQ>ʓ; } 3輵paYgƙuօb}@:AJK&uog>N^T(ǽ_ hH  PGN5WV@feWpL>@@@@zQUH    P/ @(WeH^# }\3+ ӌ^f(#OnM .Ec(?$}L''(&'(Xy im(qX,2eU۲#G/ Ro   PuUgX9XnhCμl^̑yn۸~%A0i2@@@@Leb9)mWi̵m;@UCS0G0eK6QviR}9=!xyD&|D׵bFBsNJZ?!)tci C#z|ڐ9A@XNP6xpp'Rd,r*ښHGH"K>+i?5%ߛ#3PJ踄R"x#`rB"I _ 7a#7|Gur4m SˮmR`g|}[Tҽ^Jm ;t^b*9rxlK3T:r$rZ~%ڝ0P q4`ɼM]T.L1])q>Ct^\N6(! W q+ODwI8{7ȢC f_N.k[{<B=XE$O(-׶CʼJ1cF|{I&%^&L{&Lhvt;B/Ǐ[\}9=o"\@V~~~ú&B\%zǏ][{icZ|cnrHkcW2NEQJ&wɖ_rOHGV䖢/R-n<ͻoB9<6 O)R<|4v^9:X93ܱɣy<7A.Enid܄ |]Z/WI/ٳg;^4$\ #y(ٟR:?n ;&z"^$.'vxs2VJvYt;ȜK{w[oexH[T߹B6n\fT\B"Q:VH/szpo?5CI>8N%F76u+ML:p6(>{f/pLl@ PBsaKJ#V=f %yś͇A>=`瘦qmz*[g7Pۭ #1C)n[Z6>ZeIVdmCΗgƏ]u'<(y=힢|*aЃ@<=x9~ jBS>7ùD:OyjqI8jR~p4NBiR4HI;4cD5){4اKG?V}J0FvyK$<Hv5=1ɓg~D.s:LN+ u>bY("9o{bii?a'*Cv{Z|G f o0E,Y'9Cb:4)!f2ǡ:>*?>$~[o\l@C!@k/HlL9,<@$!K^RlC8ó6:KA"D=N j4YrOӣw?K9;F:ۡW4xQCK 87g8k e@S]MSF,ly"y{b'cF-p6DQG=[&:p*^,UFWԪHiǘC/TuS#~=~ bh;75օًdi,q)>oRzlھ׊_b .4wx=caI/v"xlЬ{?*(TœOmqɂ˂ ,I$(}9*ڵrXkR˓sIG/gD>.C4t':&!T~iTSD 3>D()]vϧu4MC#r=.'mNפ3c>WI$ӽKV#ڠ8j_v5Of&Ǝ=mhjU(?=D.@sx:~ɒrwH:ȶzbnCе'WdGAWkY|uYy@/ȉL;{Yap83ד3)@Q]u/}Nt$ IJBg:6qt|m ,S=^r:.YY/+)_29>~R:қKm|!Q-b[n"G4I0D E7\m3b/Ư/T"rcxY‰ݡ&L%a_.e^W-8~a7FU^ y_y׉>#PYL=3>YOm e=?=Oxڄ_vD3\=gHwܱ"LW7 oKk\ R'Z:9ܶ$^>H&o:)GjA:%[ Y>B,zыoS<+u8:uuJQqxtw(ʓJ9pkS;s'IV9g.m4(Iք2٦EEx6i){2|"ZVw!t[|gWF_9,tdm# &!Ju9C1%e^oJGhgO=dwmLY|SKXqsSjǷw;m疈d0 mѝw- ȰE2ԡn2eXU>\ o vu=N_ wx&(}VZ|{ozH% &Մt{o:f ejc 'CyXd\nkyQTv_նbDb7r)~!Z~K-M G"A+MqA)ŝz%\>=ߤi:2"-4I3 螵Bx^HR nE17TynӒR('K6ՇKx̿o=?C^)eLLWp)rsIOL{_d·;Hvdd@K3)>_@@@ e'$YXV{ g9ҥtCE`x{)lvO( }|RWzȍط.ϣ%Z*$2mDԔc =Nӥ$p ƾ^@>Hn4Y@6մ^rӰ-YY^AR+{g+2/L%nalb SpinKgiU$D|c 'i/PZ4^+AZʲiODa$4 ȟ~mKɤ.=$8 Ш»OÿInBRPؑwUW~b'ݧGX^H8۬͝_tAR-gվwkGܩQPoC:F"piKF9v᭷Wl\CWD9݂o k&_FK\2z(in;֚x8M<;ߥ+gH+)@'+jb*,P+Rw!Pz4E.*+= H8MOSr򘟎r6ȷgŮC};GMDZ˯e(=hORйٷ B@ĖJPN+d= axA;oT(FMHGP3& fw ;?xI,!i6:vRqq*RЃr=$qSWSɅ:[ sAR*&OYNb"ާ,sm?Rִy+ 7A_D᱉c݃EY(۶w11A=; zOtzԳ~Y t@QshG%vN*d92PgK^-GmDb۞ M<웆F$!kUr7xѣ$޽<M}UǶ%pCP{6 =Q䛪hH;Gc.oL9d- |z /- $zy|QlݛE"|uӖwڷ|$KQ4 @X"կ!EReHWOUc> 9򟚡/b'='H׋]GS a̠bPzAeCo#~C9$ӃC)+Nɬ7[/;%' qH G[ґaY@s% >&W<},6/ O_A@c?{$)&f'`OyMx#$rHt;-a̮}-@m@:M=@ojMmI'klelf]5 QĉyX9tVjM(kCv>u ))춀 ݼ5r4z|ExxÊA\w&;/XPBu[Cxuqy3B>c[Ww<.>^_~9pَM`W{>3 VЈ+"B/4OrSUSj4k5Z2j:KdyzZI-ܮ))V$?0.uwJ;-͘,ms)N%۷v<Ьehb4C AfΣqmK7<&acuQ^t w?Mseʧg|[򈄄FZ)9f{cRHx8Zv{Dg4,atn4H{F|jaL@@-Z—^nx$mf$0{9T;UP$G%$<utL4s{lr4raTRCDe38+LS2kx# gOw ~}_ bfW5/'>޳jhSUn[BWyKEDU S4{zDΡv4Z˪Խ4xGz $xM?hBSYIӬn2/7PMOkMK}|f<=nEt\"OK)-Ž}SH$Vy9ܱ$Ko(q%Oy*3pZ\@@5 ?7=CV+;"ͩ.蔲R˱ɒtE}³)=zPsәp"g\޷Cu3έH<)Ph>,}R|gTF3?ٽ^.'F4r#{:w4)&cӽnOн*\"cfx6ܡI-)?^>l1 ]@j~݌_ҩH'L"9g7 WPorׁ5@%PBQCkʪa5~+/~JGb`Rx&}:%'W?[|hs7H4$kzhۦ4#xyvq+ID u~?oG'ֲ> a}  _fl0Qj^6 ,RT՝\Ptx2B-1Pgv+ؒ$Pd߬ߨT eAn{'=d5+MZyKk`1IcdyڿZjtCJ i$jS\3c|̏/>ru-+xη]ܯI:UO)da)/]%ur)Cѫ}G ewe+٪lY lFkzxhJꘜ-RvܚZӃq?#I8'ʥĩUF1>rq+I3Yn^' 2==1'>FƏIEML !AF:ӽ~g>-MO[@n(Z_E/$9   YOV3LfWò6E\Oy9Yk'fGgSlzуV>\ćGN$񼗬;BNC_ShD -+Cz9.?$~[癯&Z_& |&'h7pzkU6w//PvP KzLv'c3=r3r+y3$߄ ^7HdLia ?0Vn   P {i2J.kbw>u|=_>R$N=J_sy/w+%ãϵۡo_NaY~T;wx^6"!m xsשK2$z+RkPÎe] iFp^YVLy9nЕV TgOi덛T"(=Ɋl8g&Ǿ4s|73^i(`0B3_L4MՋ˚=IY$z'Kgeڪ2y$@p   PMjGyU05:>hs茼f8c??2]t섫Meuz-L7^T\Di踄ThO)Έ/jY?ʜB־.ߔY2w:sa$& S ]h'{ 7IX$@.5!.spRAg~ ooUBrM)|y{f;(wb_]7_~ddש/=+ y$ΕB4񡩸n2ufBDcَ6,k%)!+b~R{}F¸OF_/z|ίJD()VhN wl2 /d歔g5z^d&GN:X#]Ӓֱ4;{Ϝs/EPQPl VںT`;`@@or{ WiE8:VkF]n:~JKPOl 6]楘 9Gt E% ,YM$C$ޫ kD$@$@@SXu PuJA ]P kzVVf] k跐{++~ٹZ~ݿgGaIA-kHFm߈LOi(󿺎N#r#'p| }疗_!!@)wщ[+e_aJ=8I´ȃRE޵-h$#F~Ub}^+; ng;u2eVxvW * ݿUVZjGZ-vAuZҝi+,WhN̘MT㏳Dx׮?teULտ#,;驩AơG #G4jY89yzT 8 tz簪^h4!EQrnv*Yell+E#^(Z%#EVZ%ǡ*(0C*-DO}MRlneN9?3hnFAZaG?e݌ջք s(B}y}0s^yڝ]<sFb^w<Ɋ\mw i6rs:}=b{Pc`B^yV{?XQ4 &.Fƶ,$e4v~8ⴛK a돺}=[_RPr 0=L'ΒƙB83QNEH:TJLQQHHJ %heY3Y">N5CXnuhdpo蔙?H(+X ;(a<+ c620'v;m8jw(4Ơz \A*֪1?#nm o`7yzbt-ΔbLsϺR վָqv/궐ecdāY9ݨ*V") X٭|#bQzX{3꾅t/mdF93d  9 @\YC W6#Crj׼&&|IcQ+ʈcvyE#ыخW*V/,mB:`עɡ95e59nQqӦuع9 2$o\2Czz|yzer!RPh )_t]5 c UY_ W69bE~ 7Gdppxܢ½#jG>\A`qdjfqzAqRO-7._z)E_L?y ^֟t yLºHH?%`" VMO["S}53`| aGܠ ŲezAބ%OԱM)<Ͳ*tB04d1/36ٯ}[aW|Ew1#ME}"pQYC4`mhGJd\[mBI/zocG\ҷjWgւ:O+ IC1K-J%7ڪ eVRNLBJpJ~{}\tE*! a(| "\늆}jQMx Kp+Bdl╭XiÔaIxc j. M&v8dy܉f9i{ J8U c}<`v7X.QeI#&d\r]$46~sΫL5[Ty}<,n,NS蜨2U^X1Ng JJq)d? f-cQXI/RSO^gԧDҋPbC~QbbZ[{X~R[~H]q r2W-̯Rg5vxjpo%Щ5׈|Ze~vWOکML9 ~Un?se^K8O/2p,φRP]. _wMzQQPliax7:K3ǎ-yq7Oڶ#`ui)mjx%!3+nh!ovuWRN>0jTgP[GAٵQVZܦ9 &YU>Qj5aXW?]?-b..wZz=o߀:[Enܐ~l{ љ-˻P?:j/+<3X.2:'C,\Úܐ`e?]R5X̑% }Ecq鹑ΩR"};-&+&[cjJr_/LQ _B[s=b(ΛDfScg_ ,G<|ȯW |]X@f n/ OIHuhEYmmV°~![ٕfDښkYo`%AY4eK`֫Y'$ve WĮKP(PDC2]l[b.߀ﱘLUa~ tӉXb]}`bT&(5^[ TJ-ܪR)pS[x1fF;̏]>_ iFA9yQLFh-ӱLU>0*YꂚW|__]q^c^$@$ faUuT:?KRvx Ir2 "5jhe)dC[Y?h(q柾#7BJT&یRe˟-ۢMƓ_O>![k]6D>Rr{tll&5C;½@eb#$}3kg_˿{R奸(~}Q^ش{߀OwIHHE[v.Z:5XrVyX%0ָ#}zksfEP&eaA[1?i;.|{/{#';5q1ׇ%v'/3qؐ-Q) ^l)u&(D@]\F)k`ZC`!}0 $߹ZH9r˄fB\/JF1|qŷjd" MO? ʆV7c \֌=S4 8N/1~h d᥵*M1DD!RTn#:1\|N|w7UxDX7RWU|2^<\)ZFo|1õB^cV~;^7=Ku@xq$p((2c]Lj`o&9nJdyPEɈW9pᶻĎ0q[&nFTqG)淵-͜2sðgeiLW:[g+{[]|#mIHh1EY]:]ߔ {ECS7'5x"  C@^x4)w᙮mJr8dpQr}L^}r+ȹsne|/n." 7YsodH)HH&Тeiَc¾xՉn2Ȳgz_oޗ=ZۂDR}2sسsE׾~x ̑z'0h2P6bT{ γ!ΆHH@Z/cm'5䓢m+˿޵x]`׬ņ`ь%f;&Zod}y1yGGELa,~?mw?%)b$ރm8+++ ^sz(eJanL5_p"~յϓ'&_}3\uKzp+i @tEQ_W/j+o2Ujw]<v:&Unp[x]0R ǏߥeFnE\ݏ' rD("pػ2?4oTUhRalj G{({:G|!fߢDyr;%o+M :bU)q1y2I)w8,Y6   8|5COCQg\)H}?/34>O/,(8:`]^ Produced by OmniGraffle 6.6.1 2016-10-04 14:46:19 +0000Canvas 1Layer 1 Compute NodesLinux Bridge - High-availability with VRRPOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Metadata AgentDHCP AgentLinux Bridge AgentInterface 1InstanceInterface 2MetadataProcessDHCP NamespaceBridgeBridgeFirewall Network NodesLinux Bridge AgentLayer-3 AgentInterface 3BridgeBridgeRouterNamespaceInterface 1Interface 2Interface 3 Physical Network InfrastructureOverlay network10.0.1.0/24Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-compconn1.graffle0000644000175000017500000001046500000000000030714 0ustar00coreycorey00000000000000]mSȲ |yQN6d7%MQuK؃щI[GWdJlK=hy{zF/7~u]@ePggpZsh?e/>yY_et6/6Q8D%z1:)z~U~(j7_Hn%Ѝϣ`ģ~KPc4K4ؘT]4aślz,W~ﳹdԇ ςTl|c1#NMhA֜9I΅DI8[s"RCH*Lo{1m~ڮ0A"oKIjbUK'syr8eʆU=ɷKѽ -?ӽ$0sէyU?nןKT1O0/QafSlu,SqZ3I;?˜?~7 m0{~غ2$9sgYA>AS6`qotӎ$`HBGZCs*C$s=Q+k%#!GF$u%$qrRP0]]Uי?] 68FlT>BrrzY28!ڤ`F$]0*W(մrڽ7ѡ<8~Vth7j?5UbA󚎪'I'4Ϸطxo?:Rius,$:FøϐCҋbZc,I:J&%h$ I~|9'16:n {P½\ݽ+!ʿ<$ɁFkiWMvfqlNIvAp 9Ž7bŏttDO>>NSt|;Tgia?|<=(D ǃD|9 G>1\ǘ@㡟􏳯\ BBX4^.X.Q.͇" FY08ttӧOOS,)qfʾ]g-΃aWRB0T' ƮA " \spG=d A`!)EXxBO} tH)]O`37$4PW`GUMa[%DN;pbM^P۫u)uoi柀56C,ӔA_'NT_FXgf`cnd(sQw8[*0a8opO5y0܉8W| 8HRN )jq ER{1DŤAPpyaw0{ \aH&fLr S VFM'AƲ^b39_b1OJ}[3QxkYKEqd kȁ:xI8',Syֶ%Zkcs2ags;ӻ:M&hD37Li%Kc4V44¹cEJ2*P<թ5@!dcKPD1Ew<% ٮf¬y"߈0Xs+B"{gCVA~X18\B20f#Qq_Y8 y~b7$2+/,Yܷ\ra,ج>st%de9Y˜99ǁh]Kp#qBޒ)\|s촻D{0"dcSjIzw [}HXz=0ňkL1a ia8hRvE#Iiқ7KoVKl~[c( Kl,Yb{~[+gϢf҂FDonOaВ%ÇD `'No9,ɳc]'vΒ%7Kn&y[bR4Fggh,KSzRx /.PIFfWj ˮᕒ2%d"^2?rRr+^ `! JYvku w%61D0#2 c Cv*djIdwXq Rx`|-uŖ|,<~~:=}lj_g!\W!}Ƌ!l?z}a?=*JG76/Q3ryxiN4>~BE{}[A1W)9.B*wYF]l]l]sG_WIܣINb. N#c+"QqE1fZ5G Bh VlZ'Q8-qZy/^f~?`0<(s @K&a<p V5`Ң0D m/!h}}'Acl3M^Ix$:q>.h1~ߣ(0u[f sIaزfxX $LDq;Jd^`Ġ.eB}v|D/iͽU Mv٠`@a<.309@]68_sg U*a0oJ!?g"v +"=^┌Cv[0I?(ҽ,d{:LؚӞ΁!SSfr U.ƈQ"Ecu5E`cu5yá߃6L0NiqU}mh9p3'LF ?4LMR,/# ȗwLt'}ӂQI3./=UěeAp lL' (>VLӜ'J'啧SP u6Q!iR;r"e߇zTabk5(=7tk5lDs$]miPn!}o8 e4üAuI[qjMRێ v&zXT },$_W6hYV)h᙮~pVoWRXG:A`?g9V?LϵWTw~KKf8Ӌ^:{ɉ4 +7 tYγgUe;?2=4 U o$+D׾PHyW_7~H7&=|}yq|7؂ɛ&z͏_>}Eo?<2~ /~p{ ̫\iQsъMS=ɹ3 >0xʊaW)5M0@_x#}9mT_AEڠ @(^DfxkGAF-?ӚR8 5 2 1 2@IDATxxUϝM&݆ĆcE++)X!嵿b " HQ!EiwHٙ&fC6ɹϳ;3ߝ{F$F! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! %l !PB>3r=y=\.mK:uwhWtu.fDn4#ׇ$9=4NL9ŏ7&Gqƻk=':ECž1)'_/8$sZnp7T&l AZ=lꪫњo|LD[ևmu=e';^6[ |y_IdוY+(Y+#*N<|l=u%ee}<95ucEaj?eM˙#WM34)tu,@ :8sr/oT՜ɉߣun 705Ɠh@o@9ӜšMI%ᅤ/Iя<)#yDhR@1?4nz%3]f#5d߄az ߡ iK2uy;؃ɝ5!CR3 Kt0ӕ"G@4cYbBw%ŅNeGöyrf]Kz^5 ܏S1,Tt_}sgݑ @S|R?M[4kτR nn@V(U)5Ջ\jOL?|y*VɣFV䞔PQ9Ns_t+ʛ;g?B^ɤ2 *O(uR N<1 2L_pq9Oq "$ʼ8#,<IҌc[Sc;} =ؙT?#g&e 6>*eo%҆$f^;3; GmMvfhz}_'e`䴌{0"mV MV=ʜ= RS܍363BSBk[Cd#k1m12u{KAMpyn—x >~QI?`C"ނnw{14(>&Hݰ!i cKp68wpj;ҽ[W'x|lS^nzFWOLje7aP  L;HC99c";^^ݘ fk&gn v N9mjP=v>lA0Ey,57(?JJ˚a&9Ǐ暏%IdjDin7quy3+-uo-J)'x@ٞ@ yR;"ԏ>M2T.aJ1GO"XҍWX 7]e/C(qhWO3iÛpByO3S]k =ϓ1fw| cUްUPwVOQq]-X[ NE}r{QA: f$ Ncd hNLŽ_;CrFO]v ^Hn'~rBR~8'eReÃ)Tm4_m<q,]E ۞p8(AJI_aTPj`yԔJ}熢 Sq3?JsSUxEc$t: UBxm|5GsuC> I`͵,JƮb%v!4k3- /D+w2+rq'xa9 Fz~ $`~PƘô[* ^pCn`s5p;NS- 9:7S2R߁0NS,Nn eݚ-7s~_1B1}R۠%Lӑ{?psZ9:m9]q;ͦlĔ/.87MG}/P9x2Ŵh8ù7ce7xF8Oci ^g{tk_pQUPw3 GN}x t"q hD $^0ʡn1NCAgTm|$^tLjcBB YeR}{,` w!D(~[guݩX l~nCi[O<:{Pfxc!@1f.{53C *[+6NL>5uZ0}ѣwG9QYH5>F>z T>1:t ѥ|;QVn<ƶb 8%+'c,3%p,ԥ@/ɈAO|@>mJn=:½7JI+bə)Urn >}Nخk$j!gl'*79^5yk~3p*% Ȫc]7S4~BhY^}YsHэ"ee=^eYV;r.c/_G6%jxM]*v7^}m'{q[LK))Pe{}6hs*MLKて j:(kt]0Q7Pwa.\m]?/ok<7.qKYҨq2gnZј%0~Q=zr3&O>~_獚ĦUx1hR>PEU:DzYiYqVteލ.~O|dQ?T()ڧ罇x3'f}:|\zTӴ/?Ue?V8k\M=)[#32< L<'ĩ' sA{go+n S J9[Amt35_Ya knxOe=2v?o[Ʀ:l'\v`Gk\?Ony#Xh;/?Msj^S4b# ƸVzAkPƭ̃akS N\aŁ{.DM ˫JuJfts^uY~ .vy*sBaS|9IKu<8x̄@K~dl_]{0ØV Aʂ|$V aS*cdy̌o<9mjJ4z@]!jkLgRzuh?:=KGt#kZ$;o{^l R7аba hOS~8?onx nhƫb_ۆ$fb~:'z,U?kB iK>wE\iqy̎'#ʌgิx78Q3gw/: 3-ka?|6gɬ9ub;\CCFY_Xl>_h(ܨ#KsʺKY_&B`v[إ#TeM/ؑ6k#ڈ砥gu}GX+ze'AX;*&Txe5GGc-W| zc <6q/`Nt\Dwԫܹ`>+4^lkz0TE*~\54=#`qo5G)3h9Q&K/r& 6IH]ciU p/8DցWdV"7Oqj!hO'ggc$Tkli?WYY<g\^߁USDYnŭ*@y[+pmlH_e8#x1F MuV .0|t X&_]½QG'JqSƤ}\-=va\шOMƩtܧMí(.&.%Pڋ@$w=g'?͋GO)S2S>qn>X o~ 1b`ǯ +.4/>k?K,ȞG@/a*곓e-w5 @qX"n /Ly͖2wG]@GiBbJ|4(5/agUict A-.p¼Y;jJo(^@̲f s{&s%b!4z&V<V!A<ay'ҳ1дb39#e>4Xwy8DfK eYyMkG2/WJ@Y`zJ+q{=rfA_m_w#xAn|~@Y|M˾/)WarԀ">R#/_[8@nΚu/xU/SB6P#>{t/uUt;O Z&EyOXK}mtPҫ/LJH6ViVXQI%~co\9)cЮbeay2Sy]{ݸ>5m""J*ET1`q04Ū`Uw(7(_`h͟lP}0KhcwR2_tx񾉗nd71#u*s 6x| R}JN+ȉFwPqŭa9:an y:wGƝ,ǀi8y(dzzV~ڝh@Ĕ1_⼃pÒOp=_н c rzXעc{g̬h6I9'@=V tSevC//`GW_X.YSL6ړ{B}/,kSWk S,)ȯ.r8q`3Yp R1B^L5x5 ѯhj7xa;< A/1 _u5jHzv!+:W?\цiS VEQˀUE3 &N{a̅y~f>/Q=(}?[4B<0?^؟k(˚rD#0W g;5lt6_6ȵsv>zơnA,ܕ<[. < nrl4CZ =/QbV Ò\ ] d@XV¹&IubR¸)~xWDzpQau|bC8)%bWްcSD;AP[qes5s(c>zPNdgh+7 G>Wܥ{x,% e/d~E59ѕ0Zv,9TT—, +y|nx%hrz.V]ߍNmC.y0 jˋ5G] ӱqCsSmz6*{1+ck^wu5_Dq}Me`^D5WGK>5~\hɛ{|\>iz}<utzR#P -3`8 }cQA#lRT! WJn^"z ov.zySn:&Ous! @ HHJ|B$ee|{wt};#;bbCm€{Nk uύߨϽv41{` MdjsCO} 8)B@! B@! B@! B@! B@! B@! B@! B@D/GfMr&/+&uhY75u[NZ4Jhz3:__̙r]{q^ ycO{oօFE/Y;zFrVMs? ;#/TDmCR3sRMhnvgLjJ;.ԊK~$,ӾyX)u]q+whj<_0'=?2e5[7*gJv>JS~≆zt2۰< r\tM2H{5(3,9z{OteٮTvҳ3 C쇭knsԉҮ}55nAoTsu'WFe){h A}36=?_9㇙ź4,/p `4%0t9FLp^i'\p74+/ Hyǐ1_jr 8c;I##O7!iTVl:C}k \IW>13}101|^De69+a<4D~"rGߘN[a'5s0f)5Ñ~yvUrCpiCa(2i_dw46ѾE0Cҳ+HGPZ褘Q~.U p{e~+Wޞq~ lB?=~RV0v53}Jf}EtcP9 q™0]mwLLKt̴{G' hηɩW#@yGWp|<˻==;Ƣ-Dߝх{VXe>nLSiړk ֆǗ V_w:Yt|`1mƛ\ ~B.7yMQ)`wYu WU( Mͼ ,;νAT09=dB.…7zNNϺAQR,@*x-)Q(Ya<`(q6X6Jr)UM_Ÿg$ "bjh.j~ E/7`}>'({|Q~44-{NKN>[sh4 \NJC#nuMxyNj^̌n?ދ/&e=j ur]/|vtȘDK))`.hܯPc ~Kuy/@jbl(WJWdw׆xH]gd|m0:c&Mu[>FPρ} _> ̓}&â!|/ד Af#->Qy%U8$.bȷ6s1OBCTXRgK3$ 4 ?%T>y=}}a`;( >ܫS<31#XH{ϣL]\˧03rr~x'F1ҚW`7>lbfgxѮ2ɸ?bC7M*נe_|qwZ/8o}e>,h~@(ڶCt#^&Geko9oM|4u4-}Gvw쵦Էhs]6lVP.8covV"UGjޢS϶HEhz ]|su~Cghܬ#XOg3W!9xXo]nKZ췯`L4~W4 ܩyEpTk^ %i|u ~m09vhx#q INjk=5s4Za4ȶl/nviP!V0+ǏOB04|+LdF2r#[(>* SB*\-ʗh,G#kHȽ_A39_3 M(6[\. uZXQI24֡4YSEWJ8-7߫|_cR 5܎JGcr}hg Mċ{ ]WЖ1:սYSÉOD@1KB z@kSNu+gLwhR_{Hd{&ŗ(%ry˳;B`}C>xޡ'}hhnG|xד)&g4u)4 !c6Oط=Wh~ uF (v0!OAdy ²oW ^,'l[bb^*2TP[Cҁqh8LQ,_~y5M]\!֙v3Ra pᐪhn(6{Fn^|Tw`,S9О:¾tfG~Vy_RHy]U {PXx۵.>S4xVcn|,@@e~5]7 h[+)Z2&&`u4#+IX}<-lZGRG<3GfG\z L /*4ț3sHi:9)RrKZ1uAvʷB־ih<`߰3y3|`xuԕoSF eRhi os caf][^V,籊!ֳ Q>YnK>|: >nOa{E,]y ;ڶ7F"5B@45] UHP07p1|ƑC-pxb_7}#/22tŬ4 1WS2 ^>);bY 1NG8DC[Ӳ20b,[eg &i'r49co-YGw3s;0ah.9ڶA M3yDk BL@C+t$i%olü[jFcÆXszlAz5%67sKB(]Yo{ICSF,Zf|a(,nj'HT)1 }.}袸%xXE)ȗbP?m!(L&Ct }הw;Tth`\ !I % ! <|/` AnZ`L v(Sw31ٕɻ7| La"]|f @#/{[E׋֨@$XEՓuHc[ 1Pa#>eW&Oc /l=PB6i@nXFMpxhnIZi=l [h.:52a7v 6Ѽ<*:;iMm6l>RF :z@~bg ӌ5MՙiS&e@(#X.{!r"@ ]L E,OM! /ѓs2y"&pe"B@0w}}w!Xda D(q*PT)_oQŨ$0,G,E92NWN! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@! B@h'kܙ>𞃔:lDVBnֹ{6tkK茗3F/%RV&\ݯxr&eK&uP 2f L-i.@hkk$/ݷt$}!P-ERZ@;7}Gt_=z B@sئFqiw.>"!3ӿL @ąAO)ezeMB ! @S3O7^FNHjX'"M YErhr+#% ,a֚>g^2>ܽhy}^\pWi/ᇓ2SFW! $B%e`ϗ.I_Ҳ hIU#@ "ȆD!B@U\R 3;r5j驇ʭ8iCR3kʜ<)3΋/@! EKH%aܷ=ddB/{yĸ\/G֩17n {@E$eĒ̃܉cRgzoVV>6K)*S`Ѹ=.1IN˸=GϚ\e2ǍK%N! @4E4FLxԇ|FV?,nZjفu1bZXV`y{wduD b'+D+5Y 0Ho1HbPƭHoZ;g0aa,Ѥ2-$ӻ54gOgtC-NJS:SGIC~!)G3^]/,O%'PQ֤hCii/gWnpՃ0n4<ǠUr ?+; NTjSlkODO1D(51 w҆A]gӲӣiC8t+~KJdeEV^Fm;hԒڥT7Qf]?CݓS3F^ÿF ү`=$G ieZTr,0|9`MֹdЍ,0!͛3x4ǓogB 'A@sF6ꞢP&i$6⃀N ۤG)gB@ "|vu:FZ$3VSU8$̴lnoḿva<œ 0m-6T]'Ykܝ1Ф Ewƹ\^p\>yw ar\tǁ :w߷!i:=7ЖR^Bc1gbFT;[e84ŸMLuB QW!?)3 "3~rrNw^1dz1}0 OOGCӲ@f'w8J9)%q~<$=s/t,&c:ұNvw%j=OXcSBN'1B :mݻ1k{N n@_/1{cwuFK[`3Ejقۿ0|x!R!|g,,'SM@^ Oa?a_mB!ckdt8:ةN/<#+Po| h*kB.D=};)Tb[Z>\,H~(h.%W)ӰBЮ¢f7pŮ4m^]|s|έS{_nZ+C#|%$ƬؽrNR8$]DW _itB9MRt}%Eo'e 8A0m1m/ $Q?m| ͻ#r~! B% E?*W)XS΃T/b4[AYC(y5`p;+2t4T"Z_Q/ j!d %F=s/DM dL}q_g )- [tG% |{-8U 3eʤ$B@"\ԃJgסѺ1ȱ`:2tb}.0 4ylc Zm4evЎfj+{!rjs'SW?)cR!lo9\e.B42;B emǞj Dr1v%XOL}߿\ ! "Eܗ_"xj7ʫ)i1P[߶J2q)>z `P4nb][qU;PuL 尦z7^vg9w 7NP`ș "R#g'jbRGC:3Gf/_ *Sr8yFYR3 .Z!\iHy3Ⱥ;G%go{ICSF,O-aRf sx>3rܦMlyrHG.VO!9l=wW(1FI$̀eǁԩS_/3ky=tKEqWG'yGGgH* 7}VU@pX"@% :ZQ^, ! mDm5&B@!dHWd/0;MNJb+5I@/iM`JfʧaB@!P[ZJB@! ._KB@T+.D.BչX! @ZJB@! ._KB@T+.D.B9r@IDATչX! @hݙ>7>t:aKv޵aȱ^e {rEQMɒB@3Jp_ܭi~*̆ qFF *wZص'ܛWGfrzF]oTg21B@! .Z#\`7y7}JGwobYTf TyZ?m~_Pڐe@n DȈj! 6Z!\@4ոgHvl^16XtbϮS+nwfoŘvۨGFŏ 1B@! @w%bsdg/t@v(93 }דjqnv$J8"2A}|,z zi_^!]Ièm ^<8{9ԾeS s-Hh.lwxB@0h.8?vfxH a^Ҳ7^rhu\E;kArYDƉ#+5H1DN< ڍVԢIjߪ)*Dz.pd'PMrھ{>Էg7:#eF_Hl~N b^iݦ}^;Hm&E*B@>-ئYiM5@Cu&uAuLA1{rch =JޗGrZ4L{5l$B_XduVޘ6Ԛ[vQ& K"p"E JB@!@4 s! fx\Di%Vo vʼ kSo~b\ӱ;O[wugL\aYT_wSXّ@B@!P' DSHk,*wBAߣZy'6 [qEh*rQ ԬMnf`l{7; h[qR~,F%B@D15}|ao1cC5\t:oOqC'C'J}+M1rX;R¸tED#ȄB@DpSo)wK䤣Y?>?֏EcacH vv駾l3K|p8?XKQKoM>?AG6_ŸER4[O`ʹH^#TFfC7cҼS Dž.^@y^KU]!;֬UvhԼ~5W%"wVM͸)w}"Qo5\=M] ^AI1xii5[`4CwaKz=&b?1c,[.sNt`VGue@T,B`4{2kVYųV~mB[Ĺbb75y]1BWKC?ח7/& \,S+XsKsZ0 X٪ID.+OKk {E]#6@.xE /j3D*WH6bKSE)Z:n.G5,D" +D ټ}mڶ;,[1hfGwwdd5emgwsp@ :-\0"[$١Sѥ/F2D'urIWwyuVmY-%Ҩ Pv\٧k翟!N~ŷr Eyo.1tYIq/tTJkNv=s͜Fd]ld ;V_|5{w|KkpВ{,).69N?p+īTX U֒cluK|=u\j0abUFԱMsܮ%}M].8?-YNw|+/ujKSqξ OgJԶjڰ4"ek)PLo:{ 闿XqB:aىLԳbKq z"0wε.O9;)zL#ɽ,8k6?E?ST=灧u˟k3Xl,ٰ0 \:x:S+8Eo9nTkmΰ3{ kK[.:.:8𛅴ugm/Gy"S:"HZѹ}C߄mpWi2[w(=g#ڌƉ0[e}TD%#Mѵ}Kt%s!^ʝ;bSo~IVY' i +MxJ,x z[[ VoztVH+ʵ! H!thՔ7H@1l~_҄nސצEcZl sі !P{i"EEU;I/t5s/9[ئi.TًilQ3ـVI` xBhQҚّ֚4x S~g[";Ho\2x@i2XMГ{Zj+,zpNΖԤa%(y0c KlznWR=#ŏnuZ`UZع_7c)`ME;Z S.Nǘhźtr-^ݮYqzĉ:,X4@+c4[k8UԮeS17:"d[5khiH5.'-oX`w+u܁.4֡ GzYyp ]޶zz,32bvbEINxk/kF\N?Af[鎋 Қ vͣsto [u[h]VWk //&ѕ_;6M_+S(MwC@d:ꈁ#:3xZ+g ?V@P/y qw5r 5O%fU\5[zvL?g <~w944r-* ~\UzYy('}B˰Ę)cH<}8M7+%zY}13l ji޸\=)B@D.B^<k םPP d <סW , w#LLžĺ υPC-~/C ;q`/(SMK /5/ OϮea<;ABy.dzT!P pJ+Yav@@` Z ,1<7۶k/-źZ5+#DZ,hB"u7}&Q E 6E_z:[ kZ,k'[Hi@! Ek)q=0bdHWѐgA[~Z^wYˇDW / %B D$ϖMfj޸{qlw׬M.7dT?ԓmXso/R"F_x ! @un奻cVMzlNt)`Զ1Ɨ#I4ul՗VoC_}x>,:`ZyR*d^sO`% כ*Fl@(xN^NBd{unq5JlOZOn_[vA.&~|uk?x{Zy5TlEX_]E/ߛf^"F*\sXOfcwa<'? ym̎4Y֦i#@\-q R6'C0h'OŎN)6|iv:ڷƷ y1@dDziÎԭŴeOm. ha/;] RBr ޿A} ƾ=sm }YY& d|*\tcv_8.ď@.| 0/J5߲\'J+r%9eނ29[ǏĜ~'V8C[v-+^хiRBlC:{ytڝ 锛4*"){+v+mZv x+rP[ytzoT셀Ot$b@[~(ʟ-]5kؕn>ov jۢi6[[ BF~6XJ^bnzm5(mP1o]wT}XUlACN` b .ڗ_@]L UÒ7(+TZT5vQB,𲌃Fa/o}4;5Y3Mx#)[ k:}k κ (L |f쓳 l<C}N~YFàawɪXb+nl/6`]BWv O }=-jb…@wjӌ%)H@`"(\Ea8&v\n׬ //۽?| waqqcfn^~ϔY]&p8=^<.8j}I@zY i[e$wU\kZz-6Ƙ) $y$CB:OH^IKy$1NH衘f n^^ԭ.m|s5vW \6w7;=9o @B ?ꑞHVdžM.wE}xB]C#ݘCwdx=^ŷ~F{FMl.u;L裍]6/GN?/K9zYuP(B\ Gb'%n):څLbf}=iD}%&Fg:h{&Bo)ĢƁ誤P(BEK1?&hHc`酎.b)T%ڃvY># mFe?oH0{NЇ쟫8,Vg.[lSZ@dq )I4s҈KX!PEK ![cw.6]/J&8Hv$ H,_ze2Tkh+Z$8&vva髩qzγM6MG_{hO]U( 0L~2uy+jpOv0AL&m~YMG G JKYBqq€-:YacQʪK,z'hK-}h#ڊ6KHkk,9v~˅DXuBoꤐm4uw~*cƨsB@!нL?.|UCWN|YEК =yl6r84o&utGEdG y8d? ~,B`i{iPrYb@h#ڊ6k+FZDZ#\dkѸ$O4~\x4E4>E4c B:-_Sӟ؛w&q&F`ia#IH\.y<R8nJ*@#RKc<\{ɬ~,:e[}GL())7 n#ڊ6R_E3N9oS|'Dc-_[ Fw KhstԒ $cBRyyF̥>=4㧃N79YzpzfEʿĄ8jB"$b[^^u4&lK//  ]m成ev*J.Gwadb5̒dٮjXX$'cDZ$R qy=}6S*$g*`DM+SeCKL.uh>SII n(1~8:۟`+; đSә _Z^A[v>"͔L G E)ICB3#Йlt:ni5@ lJV?XjS!&ׄ |i!a"$ a`(8Ƃ%X_j~N^9\nx8ծ@40[H)d=K)B-޶j@+j~ʭW i<.3IiG&s!K⊘@P$EzrN{>apx[L:y>xTT>aM=@)`Tc D.=Eԟ>4Ko6@^&wvba& 89x\FL,dKe]D@Ձ6` V@;xؠ}]U?^$f~^Ü9z)JD#q'vH̦"Z.i`[V>^xNggIƈy40 H権_ݒ⭻)=~}a2^dIEcեel.zԻ>J$μcT9SWS!zGfJdQO)!!Йȅ.//>}&L+}MT#` &E0*=N'&a,?JB ~jDFϫVy* @` R9$x !(/V^[[e0"7=pE7EҧYNAW|wsa:.пWUhoM{2։V`\A,>Z˓|~JOO|9V>I E3yiđ`Dc[ټCW(n16;N`1%룵imUktJfSJNZGuln6OKrHH J+N3X:b` y3q[,Eib϶VCQn?iuQ!D.|Icذ|馤Wu N L=N6Ӥ/\^)!$"IOlAz#? fh+C%v.oŻ6 <6I0 2.n?2#WauxB6M4KL:@|(c{ܻ cѱ3N[7ȧb+60>ȓT!OS#|q@P~/!S8$:)ĢvYǺPH,C{_UIo.HrYc֫#G5Vo?DkZ\o/xWQ,y(O>oO$*t2{x"jzĪԔ7Q_P՘,MFFv8{Qu6;FGۯ\D PI!@g#BƤ5kx[:{崇OG M9@7zPVK?$@+U! T=':ZX6 ?XnU!0][WqV#8%Z5pZVJ7+zx<.4?GXJe~.e{}V'^dF4X:4["zVW͚ACBXB`Um0lcCT!(X#-erqh[+X} UR@e#L{LvAX\и<ϑ%QeT&V휤nBS`izm{bRt&:+ L;Xmyo='$%Z-d.d[1h|6~A5><.T?x`ݦk1#`6`-Pr'>0y w{[JJUwj}%,7?koZs=> }An9M=9/Wumѵ׮Xp~ЌlfRބ #5u%+b h`ܠڂ Mn/S8Ko?M&mN]{P$+Z:_TN/}WgT/5LKzK^}HJgJ/M,Ӆ[tK:mlCT 6҆J2b=%^ǟ}Y&$Z$:W\h%}UVW:rHgHAB2QQUxW۠!M2s-5Eveﰬ#f2qڋՕ/Z5}ƣk|>/Kn9>\;myh}JY_%D4߬&%tjn/Clq :q![m>Η_ZҝJz%G:,m:^fFρ}?x땂kCTe G3 =  R*!]y;&aI cH'@$ @Lpx\s~γ>a֏5Fs+s JT^8$|Q&?s??p+mIycf?XDp}t==~ύth :/%G^ F#Z7H;3' @\IM_e8رiڴwk^|nz tBƃ'Sߞ~]6![+Wj3M7 $柺61'k+ypUQE3 tPNʖ!tຕ7H+ >@hg]_$l)Z`6ĢI#_C'ALDbb=}ܓ3g?$FKxb= #w4€J)7sXU_VQXDw$]?Wa!}nؐ%T"e5JpYFc?@7NGG,crKfO_H/r;V50چ6VTv@$X`U럮g5CゥGox&O½`6H]T\BL,0sL!@.pwbIbU6  A.p}4 dDTts.`g{MRxDcۡWlH^i)#a 1`ϐ№.T}TTv;Jo &=p0ZtbQ荓s}< >XMjxuJ=0M\lhcjgDkB+gT~(E3#-  MLB BN1IbƞF.2HI. D.,|To9kBt{|(b׋|X'VvAS@⥼Ә֚(.E񟖉j{)*YDՋ{,V^ * Hxm8*铍?;@eÉ^ťԱ?jYT\qPH^VεX9剠^+8 !ȅ\9Ů#L^dxb22f۔T[:ec"Č8l`W12Hb!m3@*2L 05){ͦu D׿S3ߝ~ż-"ou9w*^{/QyZ_J4\J/2G1{Y3b%-sc~ȓo2̸L+$L}ѩBb14z%M6z~Ja~Qp~">^-`kײ&N/dULe(K]V?_ @##7LjH+g"R7O7QK0.͓nk^󵪻 G V(޶<1] XtC^H|$6KO=d?I}qrzDg9dtվa9Gshɖ9rkw?/l.(&tαCJOOίgsj]bY !|yc̅J0aUj^9Ee{~3J)ZdZ@@ ȅD&6IH*;  8$qQϻ\9K4 >.[H)wcoҪh`+~de' x/{yGR^%ln !`x a~QW8(W0}60FuU!еjXޔx[M^{mƈ呌r"($No|R;˟{DGϰkQ.䥴O܋ Mlʶ7\eab6fhp'h=ҭWO4e%'_AKEBMH:{{"Ьꛙ(l^*$(:(cׁ`s6=]8@\| ߋޚom 4"j4sQMz L0ػZU'Ά>4SLJ+ѳM,_g<tlX22Pm_Yg.sZW DXHENAhۿԠU Ei[m6k|)he\hӁT^q=Gm4~ԋW9\ɡGlܬB;k"MMuqxlۓ&\:_7a$1n`H[.ym9O+Ȫ=yoY6Vt2rB-CJd. a`~Gg+tef)'Z!2) @C@~kڝws:.OZ^9V%vq m?6'=Ӯa3_t**&7S=Iτa?Ne:EN=u>/}=Jvrps)ū/L7hiEs̘C ,Kos؛<_u9/Isȋ]K3' B8-~-cG3ϷӃ $!(ͅEB ;2rY/j,"gv@/~@XmLIݲ;*8"ԯ4*-w5;;%ܸè6_,J 5'g3i7ٟD~, L衪`ؗ4TQ{VH2FPFiű:fz;:K//$JÄUʬ(H7t@`` |l-.$CL'BNV a-mOt-TV1Sl 4.*siE|\b?p{x sknU@ԱBiB).B7]d BSM99 #"H,d>\ 5+C%Ri`2_:X fI _S2QI!"qQW4C_{] H/m$cÁ3TeV֫d1G'@B;A!55J,p] Q jB'E@E:fu,4i>Y(մl4n4~/I.|u}3LυOMCGYU-/Um!C+[WJRV~ %_H JjѕFX-P@Uל੪&>{SeyZîX1GCz}Mb֯C/јshp} :`3ֵ@1Qi݌[<]/ÿj& PQwb6bBT!ɕ<>m?w3qe 6~4p􃧖e4t}/B&^Lb 6ڄ'"Vj -4tTrnvI>gZ'8R$T@@0RYc`/} Oؕuɭ5~*ߔ@IDATo6Zjd@2ѐ>7 ũuyZޡmhcN -ȅ$n^Fe ƐLIҩ@+ӹ0>7_z<%$xKMx432E.E%h !u?&d5-kdžܟb}4?S޹U"#>ݲm+^s;辙k^CK4um"=ʭn FMod'YMҟ-FXRY5I J`uǠe's|%'%f QnvB vjF!~bg6Y 8W5'ӾL(b#^[};52Gk#&DUV<8n}T I#b,aIVZ2&27P[-cW$E9D; "=D.7ULPRհ W NiKy|vuXQ];*+4JY2`p]-lj H.ِ$ HTR(ZF@1R9b*r'֍ Wa, T6w2)'GS}h6i9, ,αzDLh`6QeaȎ<px~'W"WT@bIlL(L<>@-affdH#<&"6>I e|lz:&?vzY` B Ip0,L>!RB,=ߎ4X41uS!B0ƹJ#qyhNL&ڗILB _8:SC&rP+ bn{'Z ?V׌: 2}xK,08'49iZturWLCU"IXMm-.T& $֥-k!:CrD džp=G^&lnU8TŪ ўw_NFHF<.D>oYbo?6.˓ LfD@LCH3vkL. ^2\ W ;CIɀWȉ;0*c`rL.J;r, q=C ¦?\Ѥ"&xoԳ H//M:@ m$CJjׯ2f8oFyi FhJ @tr' ;~ԷY<Bᗕ/1>Λ3+o*'#`w啵Z'_)p<4\ah`"$Lrֈ׵b:@؟ܬhm7!!dF&+ &VeIT@& RHvj}?R x ŀJHv;I0@$G^Ȋ6!wLĮI.ɶZ\~=P!H([5գFd/}`IȂl"&H' =p%FRB@!6M.VoHJI1b@/߄h^8^Rk|A{dz}Nן#/fךZ:r,-&V <[l3l 'M9E[t,qL$%IL,R#ڋ PNhC24l4.iu\}=> BSO42_(O@ dK-Oe{O#-ϢlIAPm#>0qVA.dvgڦԣB@!::\7~8=R|0QݭKfFF ;zls%CF~dI`U k+:䊢Q#ZBU Ii `r–xk{Mf )dAR 99Y~b!(v $ɐ{I*@3 W(&xkNK>CҲzoT!hZ#"]~i7X替G>I?KVB ^/cxs\!hړ\M[rzG )5cعx#ߘ/\JLr FTȽ|6=Aep yoL`+~Ma+: E.qSo}2T΁fYܪ.i0# NykS'km]yU}Rtuڛ\Ӛ+w:L|cX+x=y oX@)'(#FzL!L3-^P䢓PƬvvU3@``& @,!Ƀ۾ŨDƜɘlSĢqvΫt=!U{6jʬP(@@"Ar!{*X!]\8xic% akPU( F"# ^XDUDzit5(HKiKgQs oh>tLObo-B 5 \n1}r;-۸W otsDꑑ"u|~"ijt[5D!P(@{ ,6tRGЅZ="33$ﯡK遛&S4*()Yﻞ/v;(OGp? ǭ_2ѝR vc%4c3mV U(.E=j :q-ze}>o!-|;n W-GWn.GHgE`2'AYGmY8JAw$ǫ5K5#ppSWB[ ^` Na}iyC6v߱sTPZ!OjM-. F )/5?Ū,}N:uBT B$w.yY&9]!Ym+\7:(Sxag!5Wb*PEMLxklO+ B hr_Ud3_AVڝ򴉽iQlK)&u19v=B@!Mhrh1InnDܼ 2jC'iX?4%rκEemݘ-! ȵgR&U" h/ry ;(b^ ˆ$ze:Wqhyu5wv娌퇀L>bqEM-9`SӚ2ldTf61X,#zNjs6*.,/4#|~3M"ԛ6!l:r"zL!С(rQGi2Ho`Q[MMk# yZi=0ӵWW '("s%[#yՒ/vSB׎I3& ģ=ǩ՝Q(:E.g}wCЇKKK5βdkME@ E;mJ&VkeamH7RuXh8H0;mj8VqКK7_c.FR* E.j|6B&kU( l*%w*b](E/YSuGhh3)Fguoﮤ;G3yZZjVtnZrَI+e-~?Gz]ǁ^@Yv\7aLWtGaXۭz%Wv83')n*I^Fw% _݇OxCmɁ~Ӣho4Sg4ewޣgN[׫W*\ȩv&VB 8 T!XtLEg["2C՛68Rʣ%Rm "6uuvZ(U׹)5eIJ,w_i))桽h7ߚ+oMY9VZ@TƩS\ʪA@j,joŶ#Ū0 hVl{THYnwcՑX%싪[!V4xv]]&6\tMt/_nwЉs'C ;-h[ KVNr[H/bv`4 cUŒ&Z3NAV]&Xp S%D@\=l *c< AXtքNv6IGώzF ~(8r^vKoH{Ewv|c/,ů֏` E9+Й DWD!"i7cˆt "~ISmP tWT04k7м[2<UM^}#7s/҈fGmC˪jD,ag$ε" fGba[|LUiEHƩŊT@" |;s^w1Yaw9R9j;+qrѬi{oJ֍O7 O҇Ӗٸ~MA<#1p >\mzgCLэ "f=}!%T6V헁Bz8 SkPT; ӨuQAzC%UԵjN]'nlh_!%xk79iFJb(\Γ|vP!a4sH7QG%ڤ/HCdSNF2SB"O$NfNAYi:GW_>nvZu(H)Iq80I1_?N{+[-Xrr n R-;pR  'd:Zd2>7gӓ7E? =z*w'ͧˇ|{w^_gb@|ݲrz/h?BQھpumjp)T~@,!j} !dVTQ]Ko|-x*%!APwNv.8)JqY /TɈ~kԧ =o`H * HxWsT'B{]CwL{>>I{NS=#tć;k9U3ڦߴl;$im#g:Ӗ}9VS'`Lpb@,yç3o#,~'{d2C7.8T,ӯ4Y?ogq^Ȩ~C TVX/@EL$zr[1bx;Ooaa6AZjC`|O^2ZLC$Sόq;I݇\q*HO-!o<>dF Aa~/*bH6d&Gվ>ܴvJY95=qsE'cWw_A&E|#1!<+bU@L#!p*Lzgo}ر40PLg_H${:^}`2Q-/)LKgĀLv*]B5=AtcF^ |FKON|V~F"\j6@JQF~VFxPϛ'{KUP_Bk"Х" Qcb\ޖ`B41I=D`m}Egd$װo y$ucV>HIhkiOg4R+4s¯{w#5w-^Ga%ۢL@ 1-Hg= F;k[RoBV}<7SE? Hf٠ryo7=B+BCj7&.>VR#Y0-?%n9ʡ>(rцc uǼmT@K6Vc\au]Z$vH\!P("5^ B@!(rď9@罴8UVB@!PtcaH8V.OeU( n2;Rnv*Y-NOQ{ +`Ȗpb~B !zеKKA77Tm;m>wL?otWie?+@!E.1r/3_e2HZ}䋝4yU"NOmMoޖe`LaOZV[ {e@72,TZKK.q4؀e:[|ޟ?U<P\@|ʔ1xhZTeMVe b}ߴ"P s+D!eccY|I-Ψ'/%Pq< i"pICSF~ٗخc^<$l ;'T\~8j67l!?$;?5U#Z50w8MDI'$ 'A}:'tz"`uZ_8T+nvyVѽ' (KusX%[X5._+;\L74]RVG)ƴ{γ1d~"P4.?DC: /c1lݍ="qCIm*z|P@?3jAm1$v :ZWEQϴnu1j-h?30V= L犷}dcF^L}XP'[\&* XUiI#1anWQ5Ї3șel\z__-RI]N }iRNhQ6T2%ɱꛓ"J*B P"F lWw乒1Y'pQOC#!Zҳ'm|#sm^1` HX1⸥ 椑ѕc+S^R_~]"A.Vn5<{شy+x5ϫRxakAdmW^+G. 9VW "0UY1"15\@BL׽ ޺_U}Mfnm9_:,5ba$̛rS㨠JfQl |9Yz{yXN>& DMHBe_hX>T_}X4\##Jx/d Fs̀䯋&7{ dUmjf ۈ"Ts]BX6Ң2Na;]>^-n@%@!E YkP|sXq <$0䡚F^쎲V 09L&A*lxrp{AnuTX7\$$6z{g\,lA9ImD;An)nHQ`tiHFˊ85Syev7"6bA ťX^n:=޹P~;VW[SRU2gUF$I\lXɒ .#fcb K=ԋ}a+_AE:"0K "R;Kƛ&)% dKvh7HQ4+34+ݞgDu;~.<_)0:Vtr9*fZr:5cz;%!h\Jm@($~bgիaepP 0Xx<e~NEn*s+ }&%p 3ǝL^v?F>MD&ɉE#UFtbdIqb$.Rx:&&mWݞ\+vփTQQ_Gz\tԼz8## yyq{᚝N8fp3k﬩ |bڞ[7MƊ'M5 Ow.ΓĂw"i/M0Sǁvґ% nO Zkdb5U@V3HXVJr8J[Z#, e_^}ڟ ;XIJ=IіI-|V/y.M.dW wVkKױP|fϒG+Dp0/ޫ s!ȼ2lG)|?ޯk^? /T "?hynxMI?}hllD^ a"뗨}ɅɅɅf WNZC,d#e}D@Ձv+P$>*6Cpfk|)4PZG9NNwe"ek?|{7^UtB9K lOYKdvq,3'PѢ~n><&5" H+=/<t_Z5ݸƾk6էI,rVUX53uGf_*`$C )jB{:[o!E#`QvjܻM&B&Hb}ahUA'*ό=^=']o<[>}eϒ7xLFK /U,Հ'Lg{{pպH%TNLr*&zH,`I-Vn;@7N# PΚH*nSE_y ٮ"Ţ}P-k5|AOޙ.Gsq `/t߼o˧kyTJ..̼W~Χ]iA #F L&1B) 8 I*vrCS^ V>zۣ43. ]#$9ط8bU7nw-u (PQ)6Vn/V#ª;MtC_R"!6kDϰpt6Zz47nZ3N{iV$>pIV{Q> ~D_>˻^}:3@ |xF vobǑ];Nk=4~9'1.DŗlřLdp"5&epǮy8eum||^O_ǥyk+{x,T6=55%\vo=~ނAP)_'SUS,7CK n=W.2JA(Жy t;C'quvgzUn7M&za{Stni7WiAK1CF_ޯaì i5h0BҡR#0j ]صqy{&nᬳWV>zxߡ]ۏp@$` b8JD[(R" ,=ٙw C;8O}{fׁ_Ra~Ѱ>Pk$a?{Թؽe14<9,cJMs6A.W\{j|Q1nθV4Pz!^Re:-G<D|F;S.l/87 aR7 ! XX(RA W!{lI}}pAw08]@>l4* (<oZPI!tyrVWג dOhgEMUT3D2:W wچPӹu^4/ǃ}Nr8Sj]yj7X< 5K es)\J\1>^Z^cBƋ6/_*ޛy F.``H ۰[J-R <$I9>|B vq"{/K*/x7O,dW Jh?:?9^x!A60X H.@(Ƈ*u b @2p/ؗ_VI!\4efn5-4 .y$,jnxCq4>ټl4*p2d6]7b]zԽ{91Vj-ޯlݿK{' X(YR&‘aI0$ɐ~vT5\! .M.a5r.=јҘASB|V1lkPk~AN_,'8ѸbX4s嗝[K#N)I3o#cXM\`/7 k?n@tir!%Bhc)@JșAҏV0z+xl|$vġ+a YWBK#ХFO0blyGQْ$BҤ"FJ_΂੧'*JH }3˄MH&ly^>μ;}yާDcr{SI`QǺ6*u4V\AZv.ڒL9Q}w𙐂@IDATL 0&BhʍK&<)k`L x .ԯpϵ?4wiX>`L h-ըC`eCV s~3p˰ p\BzjX{&epPV 11me,< KO7y|(QEL 0&_k.jS?U3~oQxDeˋнrPUª 3|`L P梱7f3CN=Tv~B#a.’ OX ^Vq)S=w`L .xuj̠YЦ@(EA"<51^ӕ]YEmF ɬ:;L 0&  = Z9? ,iQZR#ڃ\ %Y!~eL 0&梞[\Xr *mc0BC"s~X}Lq>Z y!63d&Q$~`L pQͭi_߀޿GكCc}^hX2v@Oӷ%Vǎ>`L 0'"Wūv-N`lOeAOkJCw4ワw=`L 0p'I U`H`L p»]a,D B\`L .hMV֭hW]R$F%V&` Z]lH mzVkkU*rK `L 0/#…# Bf vnJhh-r=L 0&1~-\(9p@kX\c.FJ%!uL 0&_ UEiVAvqv4ZۥP* :5z(1paL 0&+zRiV;%b±W#Slk@}+sg`L A ( Z@PEB:L>.< XP??mEhZg; `L 0_!ʲMAA l6`BL/-8{F6b6A|mC G}RIs"5 `L 4 @F'n.#GV,Sqp>)\29a}xNN& RcAe}Ґj mC}>+"ʟ2&"\5hsӁhB3$XSBf !'{A@*@.WZ"q&TcB"Fm`_a W3yą 0&{n-+A'2VDV+F&(5 ` ƕŻ();7dk&jX4 -{2 < G+DJgL 0&…}i8aPR B.Cf X,Y CRNl❢>P[ѨѮ"  Q> WZ4:_`B "A 0&@  I&}9'M<"/]}*%M.M{{v0T> > J+`L 0_%0EA;-W& 8/GES* ,А0a7T 0&@M-\(0 *B^QJQR+`L 0#E-wZ!&`N`G'AiL 0&sXp`L 8I 'AiL 0&sXp`L 8I 'AiL 0&sXp`L 8I 'AiL 0&sXp`L 8I 'AiL 0&sXp`L 8I 'AiL 0&s|6HFv>:yˠL&O`ڋS HP1-[Ʊxc#iɰ8o.L \$…țM Ã;Byc9ۖnc!!_B F0dZaii, ^T"\PQN 6WHK GIKɸpt\ Z$B*MaܺP@ڟ)kC(cu ّ8'3/O(7୯C<~~p.Hv+ب0ؽ Qj~&8$W g0@ p -śW=>v~H/0 vrp-g y@[4ꖡ2~vmDb6Eě-]ȗm^$pa偦 AE֡5H?Wu?=1c))7HGo :"[@Dx~Ot:kw< ɟRNBy?1.(C4BWJ .I2˙ Ş'Z](5P /mQ}"zp ⢀!Ow]h#^;Lk }W5j,x&^Բ6BZ;eP6&_2Y[+ I)D -Jټ'/+`Aڹ 6Y8Μ6o1*i[\$@h7tӣ}k9;Ť0b/2zet%<*аsZ0`îcUUQV؄BiqyF94ah)ye M'1E](4E*15>AqOkX' f~L~thtev FPn(\Vn= f~ \m B[D,+wF;ccgtVUUބD y+^$Fx}X\ӷSu6;WXq?e}T]؈*ŜI^*9Z7\!h7b\p,8_N# P~Tޢ ~3j@y(9yvP{MTF #u2t,'À. -^(P-,wbÂKvj^ 4K`*+VԺ-`Hu3&h7a*\X%u-F LMV`̈́e o]R7Wݒ+8ZcјAѲFm%5 l9*"  gZtu,x^S{SJOҭ& ߪuP<ֵ/do(ٳZ ~7/KѥFq#_I@ C@ s_W]KiwP{3 Kp<Ƥgg< q7IOG^P]CH.PڂkAZ rВ30dUFam0S >-rƜs`3Úp7Dg>~Q9ur1$*N 0[߰zl&@9Pu d 0@cl͆#RRH . ,~,Lh[#!A4:l`υ_C!S`"zcF[WAC(Nqp\ù3Pvh,D_=;C6$$,yu0&0h/> 4{KqATE D'9}/ZiRxvh$?Cpv`8V>EP uDT».}pƪςwL'헅%x 0`ij&PlK8_P%Յacl΂q_HPoM hJt @lFh㓪]NK+f]"Y7$LjJ ?!RS=`M "MǗ2&`'PvhIQZ∿)Z7i\4k"[*Z2"u,-wDB:Ak友voe&> 'itԲ l| soJhWu 0&4XQ61P=)YPL p \Xڢf߯7™O@%pWРr:P *1rho3uL+Y)L6C)g|ڿ0R,+`nt4yYZ^ '} U䠸U)tpj8>ypaLJ Xw %#jxM3#p;DF)E`A%M*]z0Ф#L~O Tj%xNٕ_}F7R C֣7c1eA'A PA%'llJZJ6ƒ:ݱ^gL5NPY|Hy<K7f9sCapd>L/k55;@I(= JNA ,ްB4Q0N<~XfN~.>+ᗍĹ\<#A<[̵0I\,[T c}E}fcFH@hEroZ &LZv<3;%B.m1=zZ½]F3|d~t T`{FA=ڃ0O5\bN*, 5'`(!!`L 0#U J% ("u!ժ!J)4Q[H_Rڴ̓k/XK9E6%JFf@˶CHYBK-u;Xґr_}MvՊ`,`)$y2MkfKk G@Ҁ&ybv {%CUC:؟~}^h|A\huAuGPQgh/1U~;{L P\ֹE&悞GGO4+ZƖ2ظ;]^&IoIhIiL2H? x <'H6Zj9z>CN~ڱM(H]]5xH1: / oCe;ws/%q$Ϟ"c/1w xWi.;Qa臆UȖ}db)zGȟ %mOhW=<;Qtm]Fo}Bz,#7‘qL`AcиpyXO󺎻jԦV-@D#2nm%Wz m']w}. ^>CN!Gf5l' sϘ xpdzB.Ѱy8uOu j+dĩr:~NKS$ҳcV^ij]D}eW:?Xkn*̀ƕBu>kj!~i9I W\E\!Mm:dGVe;{*Ueu9k{׈_Fxp\:nl|+^,BO*ySAvq 7앗zM±?QIPkOK2?NjV%@Bƍ F-L,oKW[MmYꂲRpYLЁ1E^3( h >@U W_;+w {bkeze%E\ِv X`QtqނO}Ye_IҷdO_ƦgNIxRQ I %UP ш Xmv͆# ^<>J$[W .B^Z\nILBvwxDP&JzVR)K&t<7R8j6r ~d M(p@* i,H`B# Ⴞʏ $~mʤ)7~|[kO.j5Qi¤ɒtNJv.  ^X1vi/.{-mBJ%wgIM.̘kܴ8q=. KE^vmee{Kn׹kאH ri((P\YQv!#=ѽa%$ܑ 7*zV;;݄jR&`M.%៸(#ΜFjBq:H!;0ni^QR5ϥnĔ֥iR}_zTAoapaH~1|cn..5eY2G@muhB&>Z{/- dG̉bPK#.Lp͜!f$?xK Ii3psPK1|eңq 򹯣~X᮰U ZR80HРt\k\0&ȶj´}W.L\0~z M&+h0hwF#+ >qWBBp(`PGF hv;nmY%5 #@_͘V3p.`{810&]A}P & >ګhh_0!Cy|wx@I7C K,X4%_tnqFg#qV 'rLJs"\Ы lʱkO@3`a)TzCƐ0n^ f%Z2_0_qzk[ȑ) %c+\b:QתFhzԏFCg|8RgߠkuCp08X?$zΊ5tg=3}vL~-7u8):NWO`L 7l.I8l_?]>3?%"~zcyXG@1 `L 0`=\V&`K0#5?zOZ%_~\ޣ@3Sla}ǘ@ `"> "Qol(]_]ӌKV듒 -{kRBI (S׵/ Mo>[:3@%Ey7QY$e0J 4a.s}C;R% Y%"4D1\5W_o:$%>cL pw|䧍DZٳ?K${oEQ` YBh؏(]~@@EE(xL0W&N3+hf1^_'cW:EܽJ](P9O8l+/o}k^7ut 9E 0+P_> 0&][ փt%Z_g}jI;LY`0L njE* t\\xmBeqG "\yI㉗NѦ +˨2tg 0+`ʌ &ZR~݊yu8yƻȦ WR}xˏ唣/sɃǕTZ'_@s`L [[b{k.>)0 \ 2&v,\x1&1,\ 2&v,\x1&1,\ 2&vD (}WWRnmA.DɪRɯ*+>RJbXXه&}tm&j`W@-`W&gxl]$31!:cDbhܴW X0c Lp:2_zVZ~Kg`L o;7&%,XZsX?0=ҿt],۬2RFMNQl- B"!\aA^,hjb^#A^aSP,QjU \ɷIJ ..M e3U F5IM\PiTͷC^.Ft)=#5 ?5B'{: \$)0/$/О*I:t 'm[vbmMh/Q&'7I/H럙h|񺒓_=[* &\%ig!&^k(&'^Ua.Y$[\KNm=:&BVVϲܲ& r8lX$4Oߤb&Xh3"@vAz WMDÿО)mwH ',ݸ~% ^"HsΜGX;>, >gxjs </7n;q6"!L451 py >IYLҧ잔l>L<\ى~~^ Mm*I}ۜW pkwƽ>ft$cx^ix}t'2sau #M1|FhYn>ͺG6-co|ᖽ@Qi9| F5s93lsE,\ ,l߷O'=ta=-ȟw|~q(iI@9k32"0ԻIضjC7`C9 Qj̿M nҜ syIY̾yA6q_AנU؟ێLNV;gXylAE>(^N}n]pH7&ޥ zqokL5;ɓĦmOR_=}=8l. -j0kNQ_1~>Pz0T`rS/`H\cgtLRϙ VOV@5}Όɻ. 7¤ٳcm&F%Z6PiE0Y !4ǛpMwwOMƼ1UXhߍyGNe?o:}{;-NF:J ܳjhN>!ND{`ﱳiQKAI ')fN{kܹUf՘w7.t"Ǚ>.j2Q 9])y*XmhXpn9Lv7d>4fhOQQ_.'PZn10ꤹ? :B6K9m}$+o+mqbmBC$NAhAR᫄|\(u ح}mCfn,UZNYXZnPjHhU %  eQBVZVi’ '%q`&CZ${%Dml]u\$j٧0O[&xL1xSZne[ zڏl#B֘0D,Mȥ`B*4JqVe`^gtbdXjUڷhJ2psw܅fi(h}W_-mpԹ̀ ZJ-cYCw34T=LLMM >f1کU-91Nh:VhGCtpoB}##K\:\Ӡ:a/A/o7뭢ki)& ߙ0=INt<.< ftlIWuKaX=@r+8 @snoE1֊ɸ<]jwLwk$5) W(k\@IDAT0+M·;n?xB4ei1pv@ VE\+ Mˠs:d+m(j- A-C)­ȰߵI0Oq puOm>tY7b{ek.TWhVV :RZF7k?u4<|PΜEEߘ\ݟ8hD!c-{1$eS߶Zdž1nۆ`Q&Kl /<|x hbW5WD'0FXZ3Fy S?T‘a:ݩ %v; ǛКOwM{J a=h-XSwDvgcf嶃5$›SxPc1ɆŘ`, ,XxVP~ Etsͺŝ t% HRUĒJž,`HNgUOY τ6-}((>`iWÓw^4e@V1V551w1?vIي,߲qIZF ٜhC9cU] ֦0c(+=igȃn'̅- d9?EУc"0@0zJ&/ x󳟱?0H#t {QsRb¶EGMC r|6juwWY1~\ < Θ2k^ԦTl׎׿{Wuk'a4l6U +yVƃV}\(WdIoBKYyTuaʼn[B~qN?! Zod4C72K/SF̥@&nXxk'.0 P.uMp0k_0Gf{Uٸ;s}D?y§-"uҶ%DaH'PNľttV>`,扰`3:-c<бM+xUL(߯`cw0>^4j6WZ~ǐXxܝlW:,)*z,X }R܍= ]׊͝sL @ 6I 3a5? Wj=rP6!;v `HJ14=GtԠFtkIz\jQl .>Ss JNkqh;-FS/^U E'A\uy)i"[A|l$~S$F%WD'@;]Q[PZ 9cPq,#͕;Nz 69UmUvE?Ybm\.hNfN!ruQ5Sp$$FPcDrѣWJ#1@%f!@P049ڜK/.hM|3oݗ~=?ro( \CmRhmZ& !$.&iB\*:Q-m,:Z4KAI.Ȇ}KPbA{Dg k2=;T]FBʱdUsKw:98M6 ĺp4~?( U& RrԆ؏LI8t6T64-M|KKiK6SH4Jx|ޛSLwhv'N7Fv'.Ge g!d}jtj u2 oDƠ>S!)XO ћC/?,$Ϯ6F҈:~[(_q iAp 4A)Pڅ+CRɩsǣ ,jX6!(T('$<|by?XAyO:C?^MBFcˬ/~Sd\>Ҁtuyu f[y$~f؇vXh{5ckn \ ,WBպoq0-O9 <3&&sq?JZ~V\2OAn w`BNʓI)N^r 09#oZY:YnLAEw6_7dbA҂4]C!J=?ho?ӹ-jb` ዟ7H;AFa8Ґ4v၄ Y?΢K&Jގ"d_x;ϨdܽjP3s5睷75%J]zS˹u3i. g F |=ߕM Ȯ,OW?\hynZASpeuѵ:Ǚ%:ç>u1|pP_{j$ i~ECE M8@^T,8ylQȣ"PݮC`Z良 m!` ݋4 MTm]m>O\- pdBEO%!J^`>1b8ӅkiaN Ea5|(\2i1hmٖ}dsUr /r9NG#adQM̿'<K:DTڠ}rٴF68v:5Զ}:r}r禵3S ;>]^#KˡJQ;&`< EBT(L;ٶPXM*h31Q(l (s0ВE]]TQ\EOBs! u![!Z. mvptw 6N! GmӲ; ty;U=Sڪ$~VgWW_[@ 6~,w7<=WuoqPh)"-z\kUBIC+j =;Rf}*-#y\%% fih/PY$DU)R!ftmjK&4 V"hdlL1/c)r+ u0' cMx_fqذ-9AńF 'ǧyxx߮HʂG{ Mjn޸i~ν eOJpuL,r˂b٧0/iLp9TNfۓ S,\&W%igqgڬ玜̺{ep]U&rƞG@(tQ]=ERK?c|JyXI ވv \ER=](+M#O!% /`y2$43-wkG7p]VOLGbʓ]a浦F9{Jsf! f ~3G7OMtлm3M{_Us_7r+$@);Ud8^?'P3!:E2K?~p콲@-ŐwK/ORik½b"7vm=_/>s>[cgsK365L&Nu$桱gkx5.9]!WC͕~J(ߥWO@-hI}5;~OqBS҃7 < ʔPί/rNC^). 2u,TP7y8Eah5?oR_ߋ0|663eg-Ƭչ5DG<2~d#KjwDC5zt:/Tw= vctBb$̍8yj EӶStA2)L8]CYc7Nf>v'i)Rh9jKлôThFb%G¤䔈@S1*MTmh+?]$'eS1IvJQ9IvTWo8)Zg7\H&w+D/ Lq ޽q=m31(XAi8X / o*9R3ϒ-WPa%c}T4hI9uc?\6u0O6O(1*\h?_`|wyX9:=$cݓqRU&w%kP{F[#wu0y,߲ ݀9u;=a<^jtHrZt E ¬2Hy?(XRɎ'Ϩ F 3&g>>i unkK0d6_6 *_-,'F HaФ,i^tWIXּw;!_ʆ1UxY:7,*lELuG,\*!0aڌl:5>7wQ|>S  )Vh=?Є}bp:,I 8Fr^X%/Hhrr:FE}e0=2wHkxJf*,'jk;Ҟ)m`>ypWH` i]i emt3mξjPcv%h) ΤIWr4 Ɨ0?=%1fg}qL[+)aٮ#gQIc7B(3i36>`@{Dښ4Uh[)X-3|&-J1:ůjҥKȱJ{/\^)^quhYf8}!+:@0"-t꫗Ԃ.f…ruL.(XLPtڏ:1ꪺǝW J>_N8?bM\(-,ݸO>ޱMzעi)4 $HP zOnJuxm"jPR(%-8Ƕ؎4 $rؙ,m7y&MKvObL"Erq?tpKiE%E1ͩoWx}lf&PE`3Gci0f)(90\+305 /J~E[o4])ɞf}3.P ;B05&e aa߱ rBF4`VѲ-ݬ~XΤV}J}NmACQ #k'o+$jbg#Y9|4mo~vk:gj% 6p^t Ǐk`L\iXeH-g^DBvo] xk3FҨhrn6(N!p:oxIr(DZƄˉheѪaiT%mz. ىP' 6M.^6a=g*aGi+ّX#f?Khx+_[zoX`喃,O9\hH't{:On"C 9NUgI5vJ XFN.Uf}(Ν1ekv/X@j߱qj[UݒE) o`U:eȥȢ8NBO mpC؋ yرcU$uF)&(HkQa)yU̓ʙ p\L64L]8/uڃ<;5EG|& V5PdB1Vh SxlRb3O][c`LI )*&ǟ !WT&a\mixkjSxYS$Pi)5iC(c?oY%DTMr} bk`rAi슭G.r@YE-)m 7jrEx:nΧ2saOZ-Bl NUTDEQ_.$|>۫Sqqю+9PQP)\-/Opi_ s"@8# {Nѩ|o)[o$\^V#ZpyX d`!BKdh "еXH֒ʋGnix2h/i6zj,[0v  ZRZ,%$SMXELJ2@H?/yoiPk09ZR1;+ƱӼzl:vsKCH6U \e#N\i=EY`H+ h`A!"'Ld;DwEh:˴͂ ');UD}% DQd`&Tv>R( ZL;}WÛ?g› bEV17,N ׏,[[0D(69f(*Ɍ&6kC,E2% T7m89>׌A`66JgȲpD;49=X0MV0e͉=A"؆%4rVwubF ĜNb PJ&75KF^~Dఠ|̯LiPeN `y]zX{jMkJ@28ےy@\l ^]HDFZ.mJV[:a*ԞGolh߼+}?&dlyL9ނ7*$XV~~ [zlgŭIh,1^ d GUI~(DYZT`- RA%9'VU;,\M!6`WtE-[sbMa lB/^Vl=ƛI{LJ&%єOjNӿtKx0| + GbQ -⮨`΢uҶQa!~ m cȣfwK{ĸhڛ}k^у`E%Yaԩ_5ou&0m/> 4fё%Z.L:J ~\&v%LIwO],~ ~B=G?ݠN0\AAi-D>Ԫ#_$_`&…s{M`YRZi*J% P_7b:w wtT<1yNLw+ a:Qȱb&X& \[y8 〼?e{^,ŕUKR)+?#u uQ-1p4F ʌaXQqw;xn`=lպ-btmX+Y-[vT4Ur|e?#Z@[2yrt,D *6 'ϋ#aQ/s4vT.:ȨQe7Ǿ^:vW֟>lh0õq>1VLj;Z)!" f-\7 [\^ vYwh۶lјEbjfKmCX*k7FS4'eZ̧rCTrKٗ2极jv-kQEeuDNS+D' 6eEjJ` aō-:0cٲ)\J3i_ `B[i *{fږ؄@&ecYFf6;|4[ddf r0,d%s-TJrRAa]v]8={oOb4z|F[.H $\ AG@  a ڃ =ږll2fė Ur>i_eJ9&1Ea]$Zu`_~NfzHo7l 4D>;q. e\ pQ/G@7sMMlUl2T(x  ]sbb2K.4+Hic4~=:򋺝{QPhm{ٯ;Swa9D*֗"P %y H`dRR+~áh.^rN+ֵ}+"k={ӏwmݓ.v(zmu_McV*R"Py$\T$DғXy`?v"uFcjt+48 8@:uEB,ct̄HsFѼ?1$dìqJED.j%D@ HC+ht;š`&*9BaGve\ɔsM0{Pː=sƎ-A(* D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D9wDZkmT_k'DtTQa2.< .}Y'y3s'vFy \̘,(/̟ݲ\q`́NMJm&LKaq+\y_8a l;~zyp>t>L?Zv'2J"@N/\ o/Q8&شщ̋9\+YHre8-'d3ޤnJK ŏw9v/AYkT5hEpgOYvgw{^d _{wf1?J]2F086ڎh,8Y؄dݽn0OB\9e߲1s!%=[/^X]aLU9 'o!q6rONzGﲙ7LqK{&y {46_f sBU> ;qƌǍYQ.b+L}\ ]Qqc< zb)>l@t ?EhCC8/%<n (;޻,wϗ~r+?㉴MjN˖bp!̋`dz]^5ÿ:-8Ù:?|Bl3\.{]_#\S&O: H"K◻"c."uF뇆Rd0u zJwޱJ 2Bv%"0"·K؇4T(!|=6702=O-<4DФt`=!GgS6hRq<)Ġo!hRU}%W,8C^1Jh1|?q>1]`>sM 7mi#~ ^4I)tJA+|T6qH=}.]Ja*G> &Z{mm|䐧v`ЋYhh Ou⋓n-3vlLQj LۺZs)|<BÞ{3{ ^_<un)w—  xDK!N@h d#<+m᧣b֙ʭO~+sυK `ʔLge?fFoCqcE:/ԧ.Dz<_n>z'Ʋy Gt F[iT+?6l݅9}T'DhQ| hh:tlLmkuCV{oQ)>re&9qH;+ SJr{6z?O ^µmV%[If>'Vˍ\CflLv,2#DZ :q>-H%3ڎ4-BY T2.#5%~l:7j4n4R\ /JzՏBI7| ^b]|?FcǸ %`!-ap1|֤t "ހtHs)@c߆}h!]8EgG*W4c4GNMJyyt{eg N~6.r_ֆx&(mCeX.5f޵h /c7E6G[!iK34 Wny ;4*H3_wRm+'L[k:^PN¶%(ϥQMU^~]~̃V9*DunzοK" U?Aӌ5oAwGhA(?%>Z yQ dU+&g*iqUW?§JQ g2K h!lfؠP) {]yJmF x.=;_Kޣ4-Cw{~H#7՜j^2UmE 15FtI\W as+'3Ud#MQ8 ŕPILШv4j2=oMy^{ Ez~9 ͬe`ǯCۖMh <^͢mv |;_j +exbPZhD9-eמ 3Bj{w*y}i.iɺnNK!χ@ \7'/X!uC6~]xPi·>՚:~9ON#OwKⵃ䙖7/~j,7\UX^Hk9`tb k`FT|}yrbMx* qC}vt"Ff:FMc@'^PVɮ}i 1"!Y_­'3ڂoD)5 UM;o=o`]W1ٟFڲo`L;?4!Hvf_(mp;BOE>j0u:$Ᶎ^q ov4BstemO6,^E M5~7j2'̨XO{Ӵנ2+LeQI#&Zĭ4 }P߰&aq. [#Väp%ӖaNgghR.çU7Ǫ6`N25B] M2  6nz,BǮ`[--@uyͅ@cӷ:/?S~ iWb$Qqcհ? ㏠.}[Ux-b.^f+':~{KNlf m]"3 ;T;& vA Kl?lM#5iF:a4G|k~Μ/],D㢦Oogox I 0Ev==HxktnF:Io!K=tHw3 Z~S%_Q ɮ(|U=5` * >)Š,TYne{l/0G Eu h0 Q_ nm׈!*rY;*0\ݷ-rҗ{r?M%CT: x"SHw5-w0L9tfo!He-)E`}me;A&{Zv7.=iq'DuwBžP5N?=jx^5Ʊt>wKhB^F=s丗WL ^:a=xTXaBU,ʖBe{`Z,8ƄfBtSԢ!<6+-^kADј ddRpY8_Mo9$43_JJ; L(LuRˮJo u^AesNJ(B܇\On߁ƷWJl,fl(W$l!W$\I{rh#Jnw DM u%ՈJh~g۹.<|&C]eШb Oaed8aHEG%[xiR\gd(:#g@\`ƽ^&~7u_E n'|/F%'. 8[t}_1+g05ѧB'"* WV* ۓs#ʆw:mgzl( > 611GWLzE}~$9h;pY9t\><6iH4qzrPхO+5u I6(“$FC^0Is܃q S.o׮pőb:є{V"=A ?LPDVm6.!DV5h _1& @eOigd gtgHq"ўEpc O//a_v5Jo>)TajWrG$Ϣ bkDDžGSLrܗ aoO_ Fᅶ~m+?NWt p+0[h +ml\cK~CJ{&LO!HL18oi֚t#>' ں"izlkglq)b{{Mgt b*3Mr$ PA @K6;!2` apbp"u,ė#Z#v8˫ \wMlF w'LBsX~BհEX*󐣋ЭmɹVإAa%rE|ցx\V{'lm]5CtByFi-ƛr4+X"G;f$ ,*0㫜I*kg%ӨhНv|E"?"p @SW77i{xy<@ׁ?3%byr4J}ev#w9{Gh$7iNXV\_cf; b%AsP.pЁ8#W1Ghb_߫WNXh1ޅ&;嵷:FGc48-I>DT7oi8 f#.5 dFv%H}tyHDa:9;*o( 䈞慮>z3=9@EXM:5ͿrRP>o,k%T_yivw쬊d@Dy12l'G#P@R[eJжODJYڈ g@T\4y$XrO?) N 'HDp<_ P3Й6((}"@YG|*FnCwM]N++9z&ܧm1bퟃ&ՒB }ٕa^_4$0f^=5atež0rb |gVO@Oi"@J (|CU)sWm)!@th@aZ*Lp{`?Lϒ?ݧ35jΟ%ՓANŐ!꺼ףQQ۸ƣ/@,v͕*.E{YaԻIP[_:(ƽoƢKK hs!c41X󡉰D_Pt*j]gXc X=YI~29Ja =4S=aBr\,z^}L(/oE/mU#¿Ƽԡflp;"P@\wޱ@kVȊ/.@W W/E_C/6lځXC48S'5nົ߂i_Hc8c!Xc.m)KӌmaXGTIDAT!^KEyr+Wc-3%?^Cx`R!hLW]N ӑ^ozSלP\`XNiSDd2NLG!bHN xߥik0ڂ㻴`MzEK_'VµwPʃF]Y'D[;/q\h;!g1!XKU媇JDŽ|_SuC\Q0r|e!>hRb- l,:wzP2L,:d%}N>Sac3^-5Y#3s~k-WN=$C}gh>ts u}xr, +7 moeaq^ԛm>ߴD(瘝S6c\K7h%v'CqKnm.r3!teNjVh9ޖktF"L#3 BLcYyzCܗC\e7}q/D'5&m45y]$s6,΄68&R]i W@6ޙ Sj(ݖww >NM%ru0np/d>aϰ$RnX7$H*Zۧp2Bw A؀^Z0m^zbyz̶ƣpD.…'1kNNwDU/k\A{Oĺc[Z"{5߆5 BO*1_ aGqPʶJW c[qI-sȥNcEMh(xw~sq!4#wO'pH, {,xG>S&͉ɀckkSIN2kՖ]}?:wꀗx`Wt~؈(>}$GT)ӳ&Qs/7YW\m;%Mŷ`/ތ߰<84tՍ؃/ ydkպtH{ Ë4Ik{y2YU =q𺛋/H4# 6vӾ L{({s ܎-싌'ge}M:mϊ:7 !n)Г:F~yI-`t_4"O4fNOj˦+W|*<ES~AT^zimTj_XO~xsEkpMQRjػi!BUM/nt-.Rpm!bN:'@)}a.h_*ܤ[!ЯոIAX ʊQ .7-E%^ְ` _?wHwJm۠aX!x[0eRሔ'}-/.޲[A>yvNLl,aϠώl MdtǡBR}~+W^y'f&лUE([Oمe_+h%v=%N6Ʊ;aBq"`45'S[ķ5"A?@Uhm_yQ vb3s m1^n(DCpۓ!SB(>J!4⣦&j(["5,N5r>gxZd!xEO{ܯB!1hk&1O1bc,3]BD=pQu9I(+P-MJEaTU&f1BS/14@RR4 /hUQP>iv2҂Qq ecG$pBB(o,苮'Oaf݃uq0Ӽaq{bA6// `10 g6TW|c܄ixN* I]8xJ:{(=iZ›Z-."G4\!fYEftBɆx'_q٤6hsSynZF!F=,GrF⛽ WH~2/e٤k$Bfhރ0(rZbV:UʔUXf1[Qs۸bH^)^-2 a*POڈ@$\K >)pWڱ`t/'{W.Pw| Z7j!{,!U^Е^GBݚl0v; @i(_aX-vN|0kv{'.c1r; nt@غAᵏ}T tL! s̭3Mv>,u/ rw֗]!/4*.3{'\WAG%7+–㗢3O g*&b˥?KN]M  "02.d`]:[8D0֣Vvq ޭVF"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D? ; +UIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-compconn1.svg0000644000175000017500000005642500000000000030113 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-09-20 23:42:18 +0000Canvas 1Layer 1Compute NodeLinux Bridge - Provider NetworksComponents and ConnectivityProvider network 1VLAN 1 (untagged)InstanceLinux BridgebrqDHCP NamespaceqdhcpMetadataProcessvethvethtapeth0iptablesPorttaptapPorttapPortInterface 2Interface 2VLAN 1Provider networkAggregatePhysical Network InfrastructureInternet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-compconn2.graffle0000644000175000017500000001216200000000000030711 0ustar00coreycorey00000000000000]SȲ+tcO6d7%HrU-@'Brd9~{$zƀmmi^M{ſ_7?qsun]ɯ^>{?NsA?u>:xn+Áݠ|Ӌ/N|?yu/vUD'/ыQ;x)4)oy\K|E?M_* .xs÷<ŋaRi,K *y3j: wqi2wF_y/˨ .θهLxcs\Hęd-G**%\BRa0x#˟)q3I;/M?{^+^w3$9+gA=zlǝuO$ipa i!JQNt(Ů3L%#HpP\%R±D@BBK(2LkF^}]z=GpګJ03*@/b\jƊf^SiFK5YfeMf`-qs+<9M>r]M%|: ꪐWNe\[iS ӻIvN[ GpD>=< s%}?ENy]Er|n(_Х1ڧ=/鞦߹@/\>2C!AiVcY"P|$YAy ӯs! ..S?::3g},9qςwMV.Nh( @p!781ua5+"0Fj%.\q@R 8>8H$XdkfƂ8'b %XQeqq C?/s9@38ȿB{4ZzG|Qy"m'U źKs]<@ vѥIy߂8Q.F:۽.t6@MXr^n9  еDl5 4ڛ -~e<8Y&)A  Qf'fjEBgd$gVk\[ =P)#,Ft}Q@C`@W3-H܉"J3gE` I)=*aR()D@RrT.22 eXe*+ 8d|:=t0" J@F E e< )tF1zI|H @uKcP۸}XpB 7Tpi".6wX΁% {؈yA)Do"(`JsZ) VsT؁Yc+?X6067XyVҹY)QԭFJ*!E3}k}r~ =58> } }lh„8}v<8.;E봞{$EzaOR]5b`d x>')1ڨ5(gIz6YO҅&`-Ӛ+qz\ XarXSFd(+ΓUx83=D{Uc]R2GhXe sC$Ect/QңΫ *BPP%)G9oS^XoK]%"@T`PL% Ff^`Qaa,ɲbNaب`Rjea¬Y  }[VTK }3נ0|:ζ3qƀXѥhiҞYr&{rLEq"&[&q@,L &q@sc;v#hA6;f{JM8U"`q6ܓ7)-Yxfms˵m6ܓ6.,Y`fmcM.tr -Z0&0mv]Cg͂7^)fQ\hCH!s$^0%(F󳙓 1 sD3i*mjef0SܕŒ`x/s)lM-'IƳ,--g.Q<6gl`/!xI>xP)g.H_Ђ&8R1Sf"AfEJ \*fOa6ql]cuӅ<[ϙ<̎,޲'F+ɤTxSRL0)5 Yq6_J@dv:n@=S⊊8HlJGu-XYئ|Ri[|Ԧ'Aj3.A)_$6`CZ[<R(T36%@Z^Z^V-% )'l#):iBUJ ;%&4o@ɣs%hBqfJknca KX۾ 0 %ͪXZ?O3`BCf]I$t:NYΤ>:SRk|>ܕ1’m9 =#BS0tQHcp<%062t oA:w`f9ovkl@Z#0.HP@h်+JTXž90JZ}ۅp yn/(7}Nj3N| қǑ=kj÷@q HKP\ SHAZH-s9bj2;8'lsC z w31ar> b@O6* R9-x1~=c0tfqU5.X8IkJm_MxT^$|S @"ĝ/b+(􃳂O&\:.1i(s@Hd~$>xQr&O[O5cϷo]W~{rG<{)܅{ 럟ϯwB_{M(wU`Ra-xc3QQhNܚDr.$L2Еr.!@qBp[cV#R8{|g/}.2A{Qxs;{_Wݦ>P&FE/Ρ8pMNE߼~Ehk\@ Hѹk KHzYe# -9~04Ly}uɴi49@I3dSfo_yS\ Zn *Df;u%AŴ"MNAXt3t(߯O{v ;{+H.A AQa^qKk][ve㫺Z>z\pu8Oi=mWe S|B -^7././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-compconn2.png0000644000175000017500000035363400000000000030103 0ustar00coreycorey00000000000000PNG  IHDRuusRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|eߙMBB]l.vg~) (It%PoWH8yމ "*`,HS7Ʉfd|63[;} ʗْ @\\翻silg}?qI\e";~VP↓b +w w*¼qI 8p_ё D#pݬ*]Vn4ZpwF*Kï%5xRvp 9/Hk]q Zc)*JXe@7QFcQfQC2bĈP;\͹'77tyE=GW99z怤.J:G)}eyRX* 4E}s1n&PQfߣp&|T_۪lU(8Rk`Wr}; Z)D+RXe9S6-0ۦOeӣ, _(]ӔtZKh-TEKB`P;׸s?Dsk'XV]E}'륫ժ#hNyHHcZLRaVZ%Cm0 s| 䧣Em} sRRUez'Ҕ4Ze0aZCA8; ':qb=g̋H p>ڋM`ErEH"6hrD&}f1?A\72L,avpO0ј_wmJ+Jd?,.y"ima_4eُ$`Xy⮲>>Io̘ @L}6 2R7욺/VfΜmv0mR<ŞZ>;=__U J4e[n,h[iu֔>x ?MM)odo?F۞ߛ7b{wnGƖI`{!g 4ȼSbSL۴BϿ`t"ns3=i+S\sݡekӣ&O7ӱ*ӟ1}EQu1 y^+GO)TjY9"uN^d\?Si¼# >?ep63ߢ_ ?)H)NffZ SZ:t)X "'$p}QA05A񶸫6߉c+ 'ׂ|ؕߦ-=]%6E^R>q g@\?_7"ܙ(Kp~ y6kC/6 󩢂b #n-'}p~40jqJ3鶍H ?צzV[sf3]#7}r^ ? m;&_?n0azfkc )_7q`*zߥkGD )5 #$=HWc-ϛveU+yNR֎}9n,{&8|>#[\}cK >2{aƗ uʎ̟rc(oS/V&_xcp!1`T!O>KD}Z·$8S҄~ dlsSj~(BG1хBywۉL+HzTX }FK&L]&~e?]TsQ85N_kXdG0veP]ɻczg@8b1ZA$~2"i>L9H? of# Qj܃G݁\סw?(=dSurݫJarï5oNjqG {u4ot\>ebڇ h+?WБ@*p~R3 loua;eb柝_xQ<}vø! MPb´dz ]8_Yc(_c4}RktnhǴ,[IحLӫCW"1GƔ ADyu>*ºkl#x,OYh˽O>Z{xpFW7$ 5o.(OB,z~m9}ZJCWgy3woNڼr(nR@fR@#pA/ɵoO vEXto!mhP#{( B!vNqA _<$2f * L|>jI5#dp+%߰{y0X1ES[!+0AM34f' N 4&z=yɞ\/$F܎'bx&כzoe0}^yhY;z,I_G/>Fo*].zW\om 䡂_akO>s]$AR`9C #}rKhe5Y&\s-}LQDž_ٚs|k ?s8u(KڮDI}#AD#g";L:UK";IC9Np +<;$H}<ZN.4;darLan"o,GmPJ`Dx N(Z&N\z[X% l?D;C9Td}PL0 # ?:~xm0[0=0t ̤, ͈%?ZvmKclebЇ@a~cկԦ:f- ¯}6Md}:mxaߛXX_M: FDnGHQcLG$=hceC@ആ4_pHVص?AynBza1]se43Vf,m(, +!f]6|%n u,m 5m!3<Fq7þW}.&&M2we6%a0 x}F:=/oylcaI'E[Ȩ5߅Q@zJHUɬD9Z=3[bmoipڭOS]'"u_[\3ۧvKz?M=felIoUi?w[߫ ]?0i#ةRn2=a{O٭&J4[*$ E7mu؊Й!w52.v0ۨmѨ4oo+\ n^Xh\2T ǭu0:yt~h}Wk`A *[o>˳~ K5ئ#6!hڧ%A7QVgܕnÏ-xZ[fXw Y{ 99:O> fcn}U[K-ߒ9}Cq]C}Ͻ.GsT8 3.- > od(<{^31PuDSְay$T%Tmy; 8uv܈V`̽%p~0eA+2 Y^vtǏ;^op۳\:jd?Fٔ[,"h o|)D0YvEK&D74H;N?LJCe'ɬLI>;޸aXLf8XUz\~]el+?cWv7Fe$+, 7ϐ:PhkK{Ƚ.[xsEvP/Dx)ͅ{ڠ#m/]PIh6^Yˢm7cKl #Q~f } u3a#o=og3NFUa/SZeyۘFiNd4$5ɖ{0I*C NbIGy9OK~o=.;D(yiyѪgGCu%f'@ˊGkO /̈́ךǶzl$GGFf\e+8m)Oo^ n;/ ݋΂oaTcW/g{]߫p0=؟^T4nmFAǠXOGYaWٻ(+-@0mkH(ò{cω!IL}m[,N=ghl&3cRi^.ٶ\[-<k:\o ia۷˷\>&UUiF,,j IOLOŒ|adw9X*< {jvEjSqfq2@\+A:x^ ƪR8_w…Ӌoo%IQďshіCME=oBUH{ &|~eO &1щS9Q¢Mb1=XW va0{/Z:>75LhvRI/ +AT#yF7M9Mߡݾw0^Bto207&gQo 䔾<;)rݩxĘ@&yfʑ@Rjnro2Y,s!tͻ̟2ى :џvk|sQ7n⠇jp N֞M[#{[xsgZ#-]o5aIbyOc/^W6<,m)6|uG dNn^en .+#RE}}w}ӱRoԾ7!ܵ{:wC~kEc#oFzɕ }1H 5 Ym֚H%pyf~++RGWY# 1>~w ^39Wa2w }1#WK&ZK ٷ!:xmq~$@K y\RXs︣]U:KiWei(Plb>n1!*+*}[3 |-PCL~ݡXWYőu3 ߳xZ]o'K^_ N{P$hM2Vo^k>uX<QSV?]v q|G>va3{y'N :]}֗6[w#1U) S#Uuw CeE̛V0}7\i+ikkī  kqaqÕ2TJn>SQS‹QV{hCg!޷\(6|aa/UeP7 ~_cYUnvHXǻp okt}Rڲj`pYo(EF Ж;FY;=0%08zR}CvH1=N/Pg(`ʣ&͘\x^<'h}W%ֈH &!}W{H,G.4tm|0jtTՂɇEgHlm_mza&Lim2 s,J|''o2m3ҧXE*ӸVja`hv~4\ĭiA~/aﭫvtN4U|a;O|e^@7|vQ~"opδl[ u!ui 5~0q29Vv0&*ߑ#j _I!<<:wN^'2JPYԩ(em V}ԨjNE~mZSXOgh}dCO>èB_zuϤm6ġ#ڥ}48d9XgWgۡ'%W.=22ّHL}kSֈb"!п1bݍSfmP9U+ X֜9m }h5ש%J 5:JXzrߴ ϋ8JnQ 3@,Zm- ?&Ћ,R̈́+oA5??Of^n$?&##?b^=+acHsF潯N_E;o}yn?*׶fb2'¿֤*2J:ҫ1 ai{ܼ&F62X샧 X>-vS+m L̬O=[*;ǍۜxqLg"\~kO>tI7'b+LX$(JE$0 @G$ ;fR͛X녙;$cn}`58 z0 LS{B-l+L;|;~%lz ~kBAj/C~&K(, ufn8ƒfX:,&XxIRһOdXʈi`NQV;ɤy̿GW{]?~j-vnbo ~6ۀwY'GFpvi=y<|4ýyt7h>iBzn"< $'Us$?1;$;CW'G8JeR#4%~4' ,o!2yu뱝hqF9-6m;"(ev3yXƝ _XQ42: m;m'71j;P7v'OM9^wVۅ+df̖Я.CEL`?TH]NYYc}Osח $)z,6 @# {̦9s'֨F֣?>_P.vno YyY ¡2_kZb:kSo6L'qYY h:1ˬ IC,i6]S@L<][YQ1l--p!b\ ƙ^f['X (| jyΠR~osӎhLC`|WmbSsd]_z m~Vr\eFN>ㇹk:qRVjB2NZ@8z!=f8Cɾ;262#\1[ɕMOVhL;@nwugh˘ǽq9 ;a>=nȠo.^+✜[³y8L?<=*u˔hZ#q͂0g IqeZ򶜼;wZݲCaҺ^8m_a\W| x+:-<(0!a6Afwl'Y ;a1"Mcs 1z CU($; WoBG]nhtMn] Uz7} eK[f}0爿aul+4Jc3xX,x$fleJ9o,7 n0},F/%1fg=-mcgkND˧/wj+ğ1|,<z~->V0tɦϏ3wm = 6dRUhO'WمgG8*!e~ivGf&v݃T]mib8ѯ ҥ*8 k_)#9o}vtlKUo[)hI5c? C[0v: T"Tjm֕"M}+Û7_J_lkflOŌkeFA.ף1|^6t74` pvQASnx!J+Bӱ` &1' ݻnM!3?`f`嚐m' }Y0١)em @E| A+`5A_ߔ$c/m7C/rL CȣPC_K[vh6g>{>l8#鷥=pI"5=쟰 kG]wO+U(m+M)<#zaWlc dfa ^t-Tmcm7mS`.ߞד/3`hLog=>Y&ܯㆩ}~OH$𤐎H \5yr7]=b'w 7y7@[qXs:ݝ ϶v7 ;z]W6uab7;@1NeH[i~JMfjc0QQFCLMioc9Jx˝ ’"088illIv ̖HHE``1^};6+;C$P6E"  XA32bx=VKl!hM([Sk.$J?ð~fJj@\m=T}7 (!                                                                             `DiΟ24dNNнi0N$@$@$@$@VjըrymZ-]~cTFi0 DjdZc$@$@$@$@$#@Է Cke*Cσ~e-QyQD0f3|yڄ D%& $StSs&BG_wY.j[e P.*̛B3=    H&9l@4LucqA޽,'U z}甯/gLHHHH^~>iea9~oύW]6S/&7Z Dt$@$@$@$@$@.z#'3!O)fМ,Ebr|.oM%   h }όE.7΢Xsʚ]*&@$@$@$@$@eD?;Eơqm%F(6-$HHHHZ=͢PfzyT>?P&A$@$@$@$@)A=7[˛bR}3$@$@$@$@$2D?=J7[ԣ;ɛbb6 G?CG;5& inn:O$@$@$@$@FZZtffzK%(iV)HHHHRhr0iVHvy5=sG?_WT;L$@"me$@$Hݹ*;te~ i̜Qh"evsqʟ|tQ H%*Ś4<\\74Rj ߼DHZ'ԷveH;ڕZ|4uXtTCA/t 5LSpaފV  $&@QčǢ N`Xefu}̟|a03A/;x9^fcn70+bv~JHQ(x+mz3ēQ*؇s=>ϵ~ WxRŁc\?yǵi Fښ$*7P &1Ն+8pHoY~ A2ʏ{Gb0*7pCoc| ;ZjAN"3]6 =0x\o?3'^m ^eXԕ~ON3k0ҧXExöZeIx!v0`Ξa 3C|rݹ %Hksv 8g)=&C^GؑU1HTf-j85>RǑ_%,XuU11ȹY QXpM#2=~' d%dm9HQ0{0#F޺qԬM*je<3#w﫭9sl۾bE6v:?ʼ*6 oq+%ʵ*t>玲j\Q Vo+K,F'f7`{-޴%!rX(Ҍ@5>Q 8N) L|ekUYUmDi̒_Aʽ^/kCqV[KʠO: {QytE7oKΓ`޵{- vuGmbsW_]4>)I:Ծ^=u0~,<-=jgGG;7Tax}!(hQsiz`K}}x%y OJn_ G=9 $3's$@1H_1EzҗE7LsC*dYqΝSM0m6oB3:~3MԶ u;K|TN̴Oq6,c0c]yOhku?( r` p_d"Ηb(!u( uGm8l?ׁ jzH$@HެP2Ve& muRq-']>[b E+zuwB:[CO!}65(AÉVD^CP յ>R#hC4 vXxߧ^Zژ?\ײo N8oWP!y3.f Zu H"ObQIN8''sK(l3o*?SǘZ-$ bVsvuy+K3!f+,ly?饩3bssN~oiͶ*ZwCyAP#t;s UG´6yca{n0},ڝimN^a秠~UOBL[AZ(PqϷY/ϮoDW @NV,yח̶!a^#&Q["HE#  &M1 $i HHHHDIHHHHE}KB$@$@$@$@M"@Q$lD$@$@$@$@C>qڂ%!    &o6F"    !@Q8m @$˧FOC';iC6j$II+jOwִ :,: v$@-TkRI%2VW]imW̰;42|{P 6e&xr XVm+}HHH PK IK%+shЩ}Gj}̌tOIo@R}l=X\zSǧ^ , '@-U |Zj zq y\- xOmGyy}wR>oҌIZKmAxnzYZ`#,t{%  H-R[o⥥dŨ`|i]zU3<1x|w^zs@GpXz@G$@$@$BɪY36|~fMIx 7/}Wp2j"^ڛ>9&  j,ɨVgg|Gr0 &tJֻ[Gվci{uH$ &($@$@$@DZLZ*QEl\?uJvIV=9H%Y޴2 @$jH"MLZ*ŝ{>ϵ{ iC߄E ǬvF >iо>OIHHc&JDQo^tl[Ķ4C?{l}2   H`RqndR&ꝑ/=DysfrQxvxXk  OZ8TBzyS,^,͒zI vmҵG[\^7`IHR#QUj8w2hDR2^g&Z5R;d;r'̑HHZT Mt-hN5.=};f[$)?đ/"  !@-@CTVwk)vD;M?Y&?x.M6e>;59xFUo/V5H;&'8Z79QF$  HRR܇S 8vhHZEKՋ?So*U];S:!M["kDQlxDzOUBԿ)7M $!I'5h>A$@$@LZ*[V2l_>je?,D@.K6%Rl,»KCG&ڪkW_`{Pn#UzwsO'**-5o7u)w~JO-Sxc^?P_Fݥ:UFO-ZB8Wvs ԿX}'ف#߁R?Q=ʇj/Uw ?03VM]wꟳE^jѥ+ky#!7&# @mږ|J/J~=:Gj]=-7th?Jm,)W ;g[y4ĪbO]^:!j{kO;vW{S_վ$TԺ1V[ BBt˩98Vf}XCԥ2ۤCSY8N>].*cԥS HBU󘹟~ y} v[}wG=gܯ_@k.8Q #?e: ivU!;yߥ]r#  h=hk:ROC PUj))LYn$9qkyHOϗ6;J~uorlϷ(tPRK%qх"}vQ(KyL9.gwU'ӆ}1߹C7͔bll~wburn1IT(do\wj)(GUZ^:5lTtu>ՊS7lvz43q$  MXӘ:v;S i)8C6jWjh-i)b RGvj=TϮ0\.EubS/32ߡ]f01zJOQZ>X"ߴHm\n[hyT#H\paZf#}ձxlm,)SO:W}Mel~xX6zoJ[Ub2HoVO8G:N+nP]\|̚Z|LHHH  D9RΜUxC5i 019&  ^lvfe_>GCZ*4`bkՂ~tP>S/>cnGL6%J$Q_=k:1!'vbudc4t&2[LzscGדFQiF:Jy= H{ãN o-\f&O$@$@$P@G9X;L!ZT9Xw閯fϟxc`on-X\e1CwG~[ߥG|Kؿq8Fbh7w̘q0'cAzc/{m3Hte>V'J#UX fw1yڈɍ,"œ'q=6jB]HHH.D"%e=̧cqZ귍e'!vqܖQFRl4;G̽HzyP;߬c*$ G>WBjъ ~EksrصQ'{tih9k`QL}Yz{Q=uӯ;{V[w`쀳3[bK1HHHE@6xEg"LmE/̌4cisMB" ѐe{Ĭ uq[j='߸Y=\-&CFy'#f켂w%@1rleݔ!w:OjFrE-RmjF1dFyE#/XeS\eؐI whe>K-jOw5HKVG :HHHH`!ZJ+ɇW; 2"5EpK>ޣ13Ah$TW*QsvvU_~JTm()SUU-Skmcþ5?]i^!+SuGԫ2MRQHQxgx̞a? $t^m+]mFOok= PPG~S$T]Q:y;0#+i7L۵EEk#&ZYT':dl>-۴gg< X{5(SOk֫X~T6*lR[o E=΂/զJlf2SuzUʲۨ,VԱCcӽIBPWPY;۶onawlif| #aS\VaT?ݼ3TIDdvν:lةm6ma{TБ@BO22>-+)/P >^+Q"C![UVջ|t*)p!GUx  XhNhϝރ2OGLee/={sؘ1ˌ`2ޫ\}IwfkkA}x&rd٢vɣv;lU="Uᓬj:7`бrb~&-l~oS,Zy:URa0:)RNԻ7?R'*ͷVX^2`Z|**vRm8]R~T^'g] d71:vاG'}C.{J:H*CY>ҟ~Vzm^w/)722L¾^EeޑhRJ6@}Ysqg.q^'۽Yf}wue`PYz/U9ؖ6ew:,k|ڗu%RIi-2Cm&v7}'*͗G&# ._ً 4Ay%eMQ'}R#dv諯;p͗恠OQ,vk" xܥy_C OQIOνZ#mM=urtSvfiܧ۔lT'.QU**+_o"#+"uR $]}tzOTV旪[giȀ{9_!yEjҟj+Y<=λ?T}řGi>Z찖aq' Yo>NB&sJG=`OurNF7*Yո E.WFo^;?-eR; ϶tĊk9&7]<83S9_U(y%eN!'E }v.Str0ۤPGHJݭj߱㴡Ci}}޹WR0=j6ާ{g=:V%׆M"ڤ&77Bi¤lDW9.UUjY;t>d^$ vSQK%Gd[5{RՒr6R.)߇/uF–Um_t0|,cKߥg Wӆ> [En4deEDWGԌ}Η[j\I:JfK%+]nǢVD$;|wvY';a:m3չh_|6Q)2[ Y|N<[wOG)A@;*v?㒜QD}ܫRF)+є蚬d>3F}*YzyMRyEOU`Od\NV' )REԇ7ɋdDvR>)7?("C`#Ozd?yi ObH ;/k$,C">Iuv(#Ѹ6?K}ڥgcQdw>ո!GwGOT eL1-?q#H%c若Qp^RmRvC+wQd/#w6Z #T! ]ސlx4=}HA*e&WdO+!qMP+l/ ^o7>^oU~^k3-"K%_3RIK9m.:yd>6㍼qn@#`^ҸmU*XQVjBGӓJ]Cϩ Pg;N _ !squ[Ok֫ɏ$ofۨ!;VzhE/k<ӇMDW߯QG[kMXR{W۰Yy͹MM&)I7 - +B"j:5ߣ1U]dO~zԽ7^|?oBnjp?ѥw.Z^}Sڱ:= i~ }ifxV"ޫqD7 , GTZqMJSLK9̸4|&>6X1ԙPOG,klfw#)`~#8 e,xI?Tik{şq[_ճoSkzje?,]V.!Ҭ7yܫqe(#O=fK=wU+6mWQ/L!-ent'4fiF%Q#b:i{4uw=!ZM'LLQ%ekT'` zqհQyK`fN)h3[Ƕ:3sL.:ph W.}}20{ /NNjCd?;ēF阿9KuE'wFq\MKHB4ƀKp;sCNwWllc "{7=IN}V;;;vv7o~h#mwB>) 1BLѹJ;FonQۖ͡=.~\1>N~+Iϱ|Ɂ0o9@/ӵF1=~ߵd4hi{ "*b$)ҩuIe-txQމv^}&k7qq̽nx_NK4w/,K&?HgKi,!!)GL>\HrX6+,8*~u(_̙F$fSY%=}XJM.cyN%axa9C,Scn{O|p8(1Z;4i 4!:q̤OE3~)ʐL| f#xi:ZPBV\>ް.]4s]5o2hI~FZ @ n{.o ;@C_v>UV^j4P' @"}0HwkHߪC=֞UPQw#5D9'ý麬tzD(֠Rќ&;~}( ЖoT54̰ԐK?yqDl i7)!N7|,?o./zr*8B]3S)\ʉ|I%CcEBM%ҁBZ>8W#K`0I`doi9MIǻ.5I|OZ<dznw,NP迴6$O/Q)4R޸nW&'rgD Ď Rym,lX򇜧3铝Gi.m2bLbFv`Ҁ,Էz4.5m;E)>iLuPij KfM%e~ 1 -<<{Y0NxO :a2#_Wo>xl:3ϡÐnolM56 KK_fFAtaƞ+~z~.|?AjCk]D0x 5MSA'|c)!3B/8Ll&lw睡Ϛ{RBO@oJۥvi <6 y63w`QJ=}Oy7mOwg$ u  NsкOM'<f5 .}`{f'`KY& XlUmᐷI4XA9peZ6gt_3[t§w僯|v_Tq^56df=?7d Hu JH?no1w(ʇ/ꟿ.h/[\D.I߯3;)p۬mgP)l^Xo|"J.0g3a;^p;=ڧAn^X>(K$RbpGo~(d=~|i GO ,YszxJ3iM{W@̯ /%xcj睆ӔSA}!ZoQQz˄`zzuƐ.'xkc-gO3tgt8x{As^/,!߈VHX%CVi,3oc簁/z Ymb`.%utv[ӻ o7QF=}+ӚlS9 tn {ٌq/N"`9F=x`GFxل%FtGXOL|@q[ 껖:>7}qJC- ~{9?O3=&;) m;f/'YCY=}=t(woFRzY[/% eeNNh2(g%0%j!:r>I s+'Yt`<m%`/k+ Ƞ+ q]O[*K`hK`\Z"'Yt>ZMċ—~H3d % K@,Yd . ȠGKAܰx%NS u+A.y2ӑ3o @"/BtAѧ~]; M> K}Sa4ZR|"턑_V⯜7֭Ѷ#4ghtQ(*9>6"eYM=-9杣/,x^Y"*ʺFzDpF4jxrRB~O#*aWX XZINO)Y8wv=5!f%%7nƍGgiQz ݽa8{ uZmt,+7QqϥxLiCWyPrxm ʣNO1  5E z-"6 @o4$*zu7j-fQ ?~g”en9p?yΖT\(zM*A\zw +ziy,Y!J]^h:8qf"{x3z= QrV t[(-F3 Z2YH!Yݒ±] ?&'R`* s_U-\S3-J"(4>ѐ5̈*K y=ӐЄhwlXϳr/:L#m94mV-mFJ<;j2*ú*hjpUT:/\+ڶ'dpBc3Lr~4/8<'p[,/*C>`-}j~@*zΕoEo׭W?=0gvm ^7XW\?I0b0,uKzLsJɠTj7p2"2ltKبY|y-K@@H@w,x'9$@{RSg` >-.6Ԛv'I@jh'le >)vEk8Y\z((6ՋǃFnuϞwv/N/TCi_[8,^xd62-v9LG;l gJRh;aE>eѯ_7WoO^UPZ?e>kwRBbz)˔/ \(M0oҪz4*өM}_|x(^BV@?K";y4-KBIxwJCޖ$7a&Ck#/ehYK@.[|,'(iI0il⛝T }>t % rhܼF[@_]?tyS‰I8&ap,y<a ӗCC w9\?V'LG tRSO znSYTWjvTJ`<5|s*ĸhQ^t땳-h|NjoYzQ#@{csUs;v(auڏ>](;ܫI* b^[*'+1hČQ-dPep]p=uӨ!c0XiPo pDzfIzò\/5ƦHR:]t]5G|ЎOJ9 _yVkTۏ?`nA)n_oЫZOY+Gq#A2xިhs\!K@@H߸CbLYKB4Pk|QUU_.ѫV7ӖɆU0vJ#^z8J&FcqRGkn:řM,_u4gR%&LEUT[Ҟ4P˦HHex"UyitJ = } C^0k9Zl4ez,Jl//%2Wʃ?;3,|L,ޑP"" OX-R8r͂N5]I%G16QRF*nSL>L:dIRDKuxҐ17B,g$^Gѷ4Lɩz"hH ko3]gsv-;-%{ q]ԩ.0@=3(73( pT#+9,tr2QvjYCπlLJJ3p" fv J/Y$HIyhM]6}hޜ|WtRo ^@-6JF :t\F9a8rn2&QB~33Ҍ^ʊ7:r]<:}B`!fk9glU|~rTj)&rEFJ,Yd |apݘ`O XzJ1:as~zQ-Is=srR-c؀g otW7#yꈁ\n&h>m%{aXKuQ^?<A=&]6$z(ivu'eIDv*lRa71PxH _F^0vPA ",Թ\W3[=$YV`IZ_ #-FUSޙm0S}쎞R(#%ƚhLNw *|y,Yz?eш>5(2,cŊ ?A1vYJ3u=ZM%|'la/l$]WF\1IJFz{>:!0`=f1}޷zyj<#0bw8NyH௸tZEQ'>&Mf38G;7s]ڭ}$Ӻ& ;qN 8͈Y&Z*omhПqBmEiOY̾Si fLR;uAd]StƳ1DY#ݶ?=D 2PU1:dnp w" f_GΑ9-q'q,_8ybgF][{[V~?V(d{AJtiƨ>R Ɔ-zeJb;Sj|Zz'0h4i7|;δq(xaprdNW߮5/]U(8mw1싆y1g\ͥ)G4NM}N3h>{KW/Yr g{v:m轠2YIp=iAgoGCUb=LIu<8WFJRRQK6`I/).Ғh\v*A)qJK$z&kTo8N>vx_rxgqЈ:^Q %Qr:v{;EhV :%N$+CcNG z@@IDAT5KCjz( 6^ e>{?)1J/@8%(5>K"×T3XnUP%4uMThhlyf|AGK:hvq]ѐ/+h;C*A9V F[F1Phb@^˕@=; n6S҂Fh+iWO$E-Qsەکh#ln:pзO(_:\zpO3u6$h`d6 혡u[Q@K0zt(ބiiqk;?=B B!hq=g7짴^7324=LvMZu=y=FDgtI/=wWh$8q#FOa) ےt큌>5A=.t>_bazLS=7 M$8%w 9`zzH䎬;'pXcm=[K@>>}wxkcjYqwnτM} dC'}"='vWҌ.{2U7IyZ qEqE7,EqyO*]]㬡glҚ{@.O YLr3S1c{Lu,+4N](ii@ ޛ8x @:‾}lNۿy% #rud kRwY("U?'"ΌJY- D M#RUy{AaMֹø5^z[O} p ޘѼvrgkX]_(7p]7T?ׂ"dj赗^{yZ~2R{n$"Rlp >cZ8>Z/  b`?R 35g򎼷5f,5˕rn톼8:wzTꅟϮUP_ WG^>GG8_絓x~m-CFM3[OEz} .>~~ce3<24bj<-e-4,. qx D_$nۼ4mӞ`h4dН3UԡOV7ML=.?U MͳؙLa0KBp!!2}y>Yr^E},ض}^G +gr?TwTge^, SA_kitUs7 ܷOv9!HSELgfaГ*6VY~Pko/z۔bA~B;A|]O|y8䗟K:am}{KIqߛ9T?*z7^ysҫ $ŏ0H @z!yS/R=^˦͛fg<@_Min.{ZKB-*j/]%{~J٩x@F;^n>׉=l=GmtuYFN$3ܺ yTNW*-PDpPV5` wSCv .t1=r4"9^<'K@j'Ε+k$LL6I8=eZțw/TУ7]NF$z;mߟ9sbzqؽ&JPzlozyݞUG`yB7*ݗj5'm'V8ͭo|H̐O|bZk|֓m5mkkX8oWIs@R\L Գ,XosY=jێٝW~k,|^o\I{w^clKQ?ܵr],:*nW[w~&, Y,Wo+=5scg2"'!:=*l+6p[پw0U9ӍYFMFe: _F#K* L? 8I{#W)Cvð#NM'oz8&7$DhbY='ۖ1#zOyrnڷjSGg>OP| ԡ_/y(< ޭ4n!z[]I/?& V(V;=[H}+/L| oeطU߲W,7ݮ!l "ߧb8d kn`7=z}5~8aƬqcO1}ZA1`ߧOs]v`mn-<(y%g}}6w:@+0 X1BkwT5y8/77~ z“(Σ߽ay_ڴi-;OoCUzXBO4];]w{OJt@*wJŅZz7a&P0#`8e=+׍ȑI~G:.|q ;TAqoןԿ0mg{<{ ɩ$ S1_>9SQ R:]J<9o/Slo߽Fe$w \S`)@N~'>Us@>Wg h:Z\[ g{+\Q쁷#"%{Tl3Ot3jn+@=oZ^iץyڃzK@"Bdeo6p)j?0~&kJ_]I,+^$kKt 0PKcP+J׫ZGlx} qnKNNqGZZFGa*isWzzꅽ(2P>ꅟ=sZ fnr ]LaԬe_񝥴ĩtu,nfk4cgN-Қ08Gqq9*כ<IP/%6gN2ؘmw\wDjԔQr< mF߱N}7hs +bG}e{Ѭy^h'EhhOiR|,&!KhS/%ݖ꼔$:VWO0:b[S,R ?ޱ݌t,z{/Fֻ{S`ZlzثN[ٿ12X*Wk^H=G*x'O,AʪXy(RW %kFA+#3=|gP?=nS$I. ]X$-=z" ˍ/Jpeұ^lXr ",\;s % tPoP]Ka=h<_vB/X;D#RNՔ\"ȋƑ~:>nZ>WL @94Rj%/KnX/dcJ0|"n0_?ktk1D,"9ȺS=64ѻ[Q3\@Ȁ>X`z&DڒUsF`/q,eAr{R=lnE?d*m( ̅4?.E_ܸv$ʶn!I;kiCYصAAF>Kuw 3¯:~2@ 8oq)iRiw3p'_yg(ߞV)߈i+I؉˓0cKD|IKT`2̍/p"7hlة ep$EjL^T 7_n\lMpWzA+S!Pm;OmBwޡP(W|d??/Ry+)d_\ -bѼoDCOm99'h+h|t[;OOFMrU>M' ?e. QlT6EWиG^l'^@tJZQ)q'xA p'|39ހYRӻvSߵd6 ᥮X,aa7 iO #1#Y{8.K1JCMf:Q@隬 4Z67qQ bMҌT5g 3& 3П-pcUz6|FV\[@~9p55euqx7O~cy>t Z/w-3b+/܃p8qs*/=\(Z 2Xhd,uenŭʕX,@K#L^szõ4LeYF`y  .qmF&^Dsϒ y-.Tx ';CUG|.C䥒c%ϛpXd:'(j`xHv>JulH/5[+AkyNsQV<|p\FtmLc%0祧a.m dǴRRk&2[9f7JXy WyvuR;eU} Թ,ZUfQ}42ѩ>5Mw bM[ߜ2ΠOf1WN%h:'$C"^J*hlYIS)i01":T|@}ݎݵ̤WvynW# 1kPW._~|VZT4ߓ8ŕv7׌d,\xdkX*#wzn% 3ỲUso/zi>pzܺ_ QZXFҶ$;_7zoz^WJ$x]T=уDXu fk5\|~@Ob*ŰҢi%2f^P|A ;|ЏZNS!-~NE]nY .@>`|Q@z8 a'aCtؘ~N{[CȀ/۾|(\Vr/qapN5b#o|aMG1>=5!CcӍ#3i]Q)tE7wNpF+7S^-;c1`7!mt8&ۊO=Mc< ۑSN߷OqjӝC?VKd&( GZ=&P/Rut|8z^YR"'u>7S(sBquwZJ_CҘ+LyL\ÿ Tb?в=ΔngsB~Otz7|cfŷjCu WQZL"/2}0Slf3hTZ\S/~;y'>.7_g>Fb1)1iJA1p:@]>+&=7/@|G㎌I2IJKGr#o/APGo>xE#g|yM~Ec2R1 Q6txu! qͶjo4ѠQH}6gM=su銇^8Z^a VSӥЛj?.]p{:[2Lms;޶ne`Li 6UDgLb|9l$B(2x{SY}zNf'%PAMw()Ri(*Ow0|}|\R},޸ST2\n>ɡ7m%3@ۏN>dӁ/Ӳ9?o~Px k>>mbKV˄\sT"~"Re]1' so{c񘨴T[ EQ "~JL ^sQEݖQJQ`fſ^JF~DŜ'-AEb thLEE͜" ,,%+1v&'=csu67]' S`G{OYBpq3ihj;RO6oRCϼ@ J@xaԜO`=mYQw,iXH@uotEt[NQKhh?X󙑰&dN2"'UUN)΁E숏Ρ t'42m|OFy*LMGά3Y)siޤbCEo8b_lT&VуhZOW%=x"mJC<{(#oR1fRУ2qfk&V!%Fd<R"Ym1Y@"BQt|B*L_F'驺A)/#:a&sVC& %4 }+2O&DJ5}?̟%Ǎ&ϜZP?8 G3PhZ/:wop&u{ tUVlƵX1{ 'oqPF6.*'w3IhU@HEB0)OnƸa》&)CJ] ̂!}*A9TD ߇,-XiyJMh٨.NY :* B)-LΉcb~)7JD,GFP h8~+@ Ĝ@׮rމS 1XGw ~@1&^DqsL$bw?Mq)n@v1@bp ޑU Q@1('k +g#LF.3i8#QG=TlP>G%lf`uƗ$'ЗrGp-@y~}#"W.hMx{1-:p61s 6Ӆ *)4)*j?e@[V6r/vW3pqR&܅F4Svڥ!ӯseͿyh#Ou{2O98͘9v-/(wLzp5Ǻv#f:_K1z*ǬAtf$$? Yx$:;d3cRWUK=wA&gaJGہ)(γ0N\l5ohzi:$fLTROkNtZbc񼐋oVf*"b2e K>,qəe > %&  )Hcc(M90 ](>AX;oԷ i?7(VLkwdwo9SPtKT{ҪCg i3ϾFңuTA‹Y p;F Lk;5h7ũ_f35^-v\Rv3g0?*r:_jՒѐ?mB+ot`5Ypt 7Εʐ2*^/x5'VAf[}7G_:kU7a%4Jva~cїQJ7y)Nhx`&S`.kx\kAZ*kpgO9R*3y s0Lp>iP.;`)0{F= |ȿzgk/Ƚ2wtѕ.3< ˶ӊA)8lĆ|AQ*5p7zDMF[a_Nڧٵ;>o8Т"paSD`;$naB0*PWΛh^hut>DVm-tlZBaG ;siW g֛2pcwe ;FFS78ғOW^`{`롿_6\`.l$?vr5#3Rˑ% K$ ޓ\ JLA~' # ֳZHӀ@X*n%`(5e a7m{`۷yW"AÁTgiױ 7*׵8WJe#oc!^o=|U=s5j \8$]Ñ=z4 k;TS-J6hGR=(&"{En+pօ#%}YrpO1 9Ktyپendo;-{ґ%]voŦ7.4cޭcZy[8VW/;!% K  p%V=b1-!sϬy"ڜWD e[3z) >} TrhF FrXp,ʠ4P9E^= ^iӱj5`v|}g Kw;*C:FЫ.hqhnS f@۲d DN>rKB`*5V&WnAgK6'HfOx Pv.ymKpCcVp"jfxo>JO}.[ S{RGvCy$@kߓҹu W  Z:9S{)zvn(9_TYR`91+|zm !%qz?ȻEV}24.=2Rn H]nVz"A hTr'iݟ@ >W?}XTS򦝴18HDk_2N}@%tͩ1,Y% kE;%%iF$&3˅hwqֱµ g#SHBh{O A)ûGAqg0F@/N#o&f& C΁xVyR;=[\)0VNKhwӄ2E#NՠvhQGώ+Gͯg>gv4ȈnMO@Ž3T?y` 1b;{ι9dP?X\ϐ%#:'y׵K/)*hv*x> T՟ߡlx:p~O' l"(`B;FPExj .7x>y `Q &ک@Pk Œ7M)'Ȋ+Yp +:@?U;2,mȄ\ʉ,2jTNJgZ?fW z-e{W%jh<zZ`K'(?$ Oh+6eJKF'> dm%ĶjGyxp;=pQ` .z͖)2Өa Ќ#2ׂ28|So0vc9%5#!Pb\NPGzxn(2?S6tTlԝyH|o.*&'7͘i,> T#r%0$}8HMA!]`^#8^P.uxILˉʢRv\4"H/,nn"TO"#% ػ!5ZK0u8Ӻ8{x HU53&Mۏ ]$r.K&vn2bUk18TϣibIhD6HNDj)I=j*&yFX"%IG0jM:(ʢ11tsN 庾:)ƨL3% A&BSU-`ك;Uu[wӊD%f=b݇JН^[?OU|iEy:u:f$D֭td#iU h K.=@>Ch5؝ƙBɴe!RFӶޥַizk鋹S f8^@8IqEI8+ Ȝ|>>C gҀft3煮*dVKn =vG#^hLnO*eϕnۿӒg u\7X7{tw-#|Ճ̤$W٩Kxhݽ̠;劇gi|)l$W{(3JAFbc(BcY jy9B'SNN'h#N hh{43,^bcq8pW`܀:ӡSMO;e dPO@~H@ $-74\&0mGVyE`;uZsi<&2hzG2upǚ<768<,Ifz&U[9ƧFc^$m Zh jap%ӑJVxYIUnJ2)1JKFvlL 7F $#Yԇ,BNհ]hmQF;9f;%`SE#cuQ5f0a]Qjk`Jh`Ԑ"`cqd操^S )ߖv'@00^ɢ3XnRA1'5C709 @ܴ^O|\@H@!u*I>pƍz*on^SǡTRP{ex5T:^BdK:G9f.Qkg3Q']Dem*DPQt27 @fxtdBy0()m1p*>%)V8`7`n |TPXTJ*+iT`dɆDXDDd妮xCqr'٫SFE;UB.4yܰ=HӲfSLa2o:u0ϓNNJnWҽ䬲dP?\P%׀3gʹZ?1j:SB55Prқe>v/XѢ\_7ן#?f\{`-=n]0m T9p6J4hvԩ4JQ)V)T* 9^b7Q(73am1P)d КTt$n] pct=J28ٹZQ!6(f.X.&xրF$A%`zOT#f_=SNhK} /5:TdqSV ahnCvA ݉w?- q:h£.n1-Z ;rvu3q~utcFxBN DS4U(͋4#kAA NN}ozvԟ?uѸFʇH?><1wR>UO]1DY,Gt1cezhnc -Ĥ#YCπތk5,QGM${iD(ޯm֐U *<>0?f =*PUl2<Aū]dPyz.R*+V1y\fZugkJ,?kp:}`<}T$ 5,{Ɉ` ? 06ϢV{ 41 9وv᝶ON 8׉N͘xd@ H,ҴnҋA )s?AKb/D|\t+h۫Ϯ "VKgXV7)?%nL(mSi1klˤkiՑs ȠE._/$(ѧt]fP[:IpY¾̡gsbUa&}@2rXl/%Lɑ>f~8 X\R\9< B /=AX qt& @ ? |>3 jAa R2@+0)&`e9Pabt}\Qp@bQ[ 1Pa.`P7{`A=s<0[a0SHŌ ژwvjSF m^vD BS-p[b#O-) l<36XA}ݍ0ŇtmX<ܮQjy&@ haFFI슒DvSN۩Ơ7r^F[.+.?ϤZ:>RUe=SnXC?.DSsRz?Trd DRֻr ]-PKہC/e攀4eʹ ;tF`j-&QyCtՇN^pPE~m%{aX JJʄ&=:G$91> X[mf@k1 ކˋYT?.O҄aG 8yLg}"65o?P4aD oČ@^x>ߏ<أ>\7P13@) ZymgX~pkʋ)Zm6Hw)|]n湝j1cO#.x_F3&0y7"@ۨhs6Q;^k߀Ƒ+'Hmzv[^n(VQ4fDR^.GR=p vXj z1PtWA2ttYQ={.lL7AYoJ>a|Ì⏴`?ԒƙѠTZ\Tc*[.>`2W\G"Ev#9Lx`޶0Ȅ/t37[B"$!#\ǚгPbp#{{$eBS/T=4Z:BQ>>@4 ֮BɃʇv Y,Z~+TF m&TΫT4,_tH{^3Vyr? HπƸIRR;7Gˀ=vk7AjR;-SķNnA!w۩eN }E6h3DqکOܫN7׏ۧ-n T_SYh)f7,zBmE+<!LQG>O q"cEjɒ#gO_Gǚm{b –7ZM+r; ;V#2pdFi0mߵ٩ 4.^p_Uc`OhIyP\lf@*qc{c퓍%Oça@ϚPسt{/S̒Y3ae^4|Q68ѾolE ϰ M&4B)0X}K e!)n+|!!~6r-کO[N 1T;[Vh>nSK\ <:w]}z J3K;픟ESP`C(-)f:A*(ޠxx?4:AK oP@5wW +}9G (HN= Y޼ LU~'~?OPOړ s}@FwЖ{M>DϘ?<}͔ NFz k81nB;_CSzAk}` < OѿُDOOg@S&=S?TtWbov划gRig|*sFΛbݞ~?~o8Gvמ񇈵dLЉ,>LڐK-+?֭GF ~YKǦ6l:|Jp2O D |b 50aRF@L@>_,>ٿ7ߘY@ y϶q :S>uGFRn WRr:ﻞÓ/Mqn^@6M7/Y]d?znvH`/?!|Til69QHe}=mws*AX(s|?dE& X z)#N|MփU?`a0!pubLL(qL<TjܳȯIFcAQ$mOٔW &h<@V.7:&Ҫ~*0K Wo#対OR*rr"r1z[_@_?ce0뜕j1ஃzc PK ClOEmǃN)sꢾOeq^RvSEr4Nez .ѡBCߔm+̎htyp]}֬ %vh{Ç`ͶMM i]Z4GO~^6܋va{kfg{x,˛7_ L˽A_W0}kj}~ʃOAEO Wʽ!~*)kgP(,QR r$7-S83U6ɍ ~Kv ZCC1jj%P`QV 6gg.I~y/?c(XoGH6$eͶX(5M'_Zהl@S3|„~*e4}ToyKUU#Aqd =?1V.O% HNz5 G)'2-^eHAR6,.>VT*JSEsH٨SPPRm#Wsu,e@36W #KoOwV6 P-b&\y3fOP-n/8E% 2i(x:&H.S8͝6:Es"m깍TWhUT %XJs|!*:<&Mc&ۍֈtaso>HkO1z4m?B m݃uGoDyqBH׏@0gb)Gz/.(22A=i$*(.NѫBbO)B./0ʹ\V0<1:+ӪYqqjW%gEÂ_}Wkn$p\5п2!$XJj:y"-[.7-Įtӄm /$=|D .;m9 K9%u9PC]Bʇ7?3ݢv!d3`t.phȗjZaU*;2318Qzg4UZ憽57uU*AJl.TR% (.$alμfQ4/1හVn:@}v 2&u‚DlT?NOe4PuƱ#@=S]^럆I2COˢ8%Uհ)I;S3`.I ԑ%tu AcT 8A{R,a}[Sfn_+ɆYYNߒ*'O5}؋V@=WHV.OC5ZI̮yjyrAiE}V:G i`8qIq$S&TP%J@*M K3Xt""fʟT.嗈}(o/WT2@.M%uyc)Gz`]Ϧ^8,. _F1lcPV󝊪z15uT,SOݫP%J@*UVITy*/[( cdv8)W5GzTlmAVG{).aͩs٢)@?_o8=]۵<P%J@*+%.ǻ!/OwxllTCcо6nBɄqamP" )7*1ז?I&?1o"E{}S-YX,-]q)}W]?o{^7!yWٷENgVV+ɦv]m~tjvRF8t38JNKXcX s$%-mSQeF==$#8 [?1P\^Aռ[_W. ԽQm^%ُwwv\1ԏ۪s۩0b?wiBiqK**K|)~=Ro8j`r~Oig7 !WY"^4/}aAS]Ur)I ̪62KJ`Kػ^^*c wAPrʸ.$۫mLzٔک+u{NS-wX`*5:pf]S`8Ȼ˫v6駮Nd+W=JU$>\%T>Db л 1/OQ_J8~RhP75?t'=xz >_jN⯽cOv*܅u[xxAzgpzst>KyY7&XxflSWjVoL*W ]TDa1hڨW✿o]7%1T DRDH"PO"+KmU\VMK;YZvGMg#djdLngS;е1%ğLڟ8(*"yH/DwH?#MwxC{s09Yc+p{SWlSeU TPJbW J;rrEVybv;#flrcPiEUVQTh_/) q֑o$yz#tȗ(nȤ35tt!wbKzV[ dq&t_υBz#GSf7SD1Q5#tijzL:@_~coAԕIQm㡽oX=8CXknlWn< ̺v8M(?1xj<(.A8"ߞw5QZ%<Hp[( ;BytYkv1dң4/2<-lz'Hg/n?3~Ai7cFq^s9;ĵܢSQf^l+W1V~ZVkev_`u8oEìuhǥy9q.&"q-ۛoAV.7ЇBp~$^o*x,.xuA gkw?wvpR} A#=7n~8Erww1 B 8 `ں !- ӛ4ȈKzW%y?]cGe4cޘBg} fY[o-֖cmz6Qz?+w?JH17E6uEur|1 hQf3IVe[Nvbfcqgb%gYGƝF'Ϙ}!3?4|,ֶAr[]R&JUY(,b>^Xd\UV37rI ,5w!6xX&~~߫ `KVoJU\ȰL}7m"-8)|mZzfp?Ccs?=}?).B:}"{zg0XŅ$F2e),밙 c ۰nt$ճ0aa*dv7'=6g 8O5J=-(w@1Z\ILa)6c"S7ŃȰ0=s M$uӭ{T:,\Q%kſ\}L4p&nD6YP#/"7O"Akp#0Oҵs`: KbsOecyj젞B$~ix߮輆(2qNwZ.Q%s`gMg M9s?gyzU֔{t{hɔu]Uk^||c?ei)9Fɣ^,*7{ӌδd=?)p{}DGP Z}"mK*4,"y]6R1`edVצ3uR JDPs)kZzfKy{t"UR,ho!38mG<24˃,CgˌYFa> Ka2pf-tݻ4Ldd_O9.kӾ<:Uʩ^)ImS)㮰zҲwQv?u!%h%E='(3'^la%kD2@'6QRK<(ӪǍ%/p-fwtn_L# za#LQF.w= 1WLLgٷ0(e\*4q?R^Yk4oGzD_k];]2T{ xR/ aXP\L[ЙnI%^IYk`ݖ'@=u D&>yPɆ[\F;LG㑰SKTdj5Vb0 uy>k`pTXR=~b1^@bvG\,ejyoZ~`7KJF 5_IK'(ؿ)%7<u|ˢ作Ok/QK[GʺUsS9vrzwwoE~.k>>@?XSE &i+nl%[ok|?;7,'+N}59 ZjT0ӎ) (;{%D G"a)[Ep4C },M8ivVnO7صZ-6KM= ;L*VIKH"_\-{]q]dgئ֋6i=6{ Џp )xJg/7L}2JASGe6DB,zР^9"Y0?DVz#bf!lSVnI:˧7dKMXR6#JO[/;kNJ>}Ţb6c]4 8f%S5fZAJY7JjNRvWN%4S wF3.)Nd!)ʓ%)W(zvJ0SbPh݃ 0GEGs{c@K) /\rgJh4fcV$& bQ/ADOGz{9WZF+33u[Rq)>GZ|5 /$iy5y^OL2TO: l^ijËcYm<xoVei2/5%@-nf U;Cds}gX \*Zkv{K{0EN&aƢd71B:O!k8;G׼-ҟHhs`t0Eޡ8x7F%q)'R2Pv1,@tUr S'|cTX5m9Se`ep?ʋq=U<*p[ggs%?Q1è{Pn= owKW X;t6M*9Ee空 >3Eh`tӘ]AiUh5t.4)0 *flb1ӠֿWth^($ miQܱg< S\F oYEhWL`qV |~>8q.B~7stZw__=àP^Ae=k٦.1UQA}C.<ӡ fvBΑfkf7J 6ž<kXE0ww'B+~;lχap?/#Mm.9U:TPoE?],-xwIU *iնЧtFI'6[QRu𹪚R>tC4:]<@!ɹ-t :/Wu41h0-ٴ 76Ym~C)P6̰Tr] Yx)xR&Q)R $ֲJ|",hۗ¾)2Kw}L_>1(3m+u鿧z/W3DGݧ `y$G%UQI*K٭ UP_/ Ec_?FQ] L_h-9HA^];dDeWPIf!A*@'0n C`|d`DZM׎G1vkH5#UJ v<aTS-^[<:._߾Ow`gW0o-Mnw J?xPʲ?w%g|AA vq3ŞM'yzF䅲n(ˍwE&m"![ج@&0lΦ(hA|ӵTP]M;L`#;8H ۗ [5~O"V^@}|^D>vB8 ~=?L,w4p9mqoKk) T|Ad{oH JF__LJ]ߴq AWoȗAr,*^;,kYK/[>u@"X;[{Ɇ|Myf b;X1⁣b/ϔ|7O]!gSSGmyϕÜH.Ģs jՏVѵ#)A}n?n?JOl.4Gܡ=wLM.ЛK͸{\PijVD=nGPTP^pEJB0Cz>I&u6+W$gPk#Ξ:`FCO >/Fw7^ 14np}|\ (-$W0'1ZXu<7ZxGHz5>fl|Lbo6b3G<8u^,RΦܚzMzt-|f"9Z&c3Lu$Wf`BpQcI:xR%r>=)EaT tlM[`u_I3JRP^aRs*,$կ#iv*߫ eJRLI h e L!~b/E @Ͽ===4 *[&3FT(_%ϩ{{J@hӷ~MBwHNd74e^)( GN +j#WTbazSr.bv2;f qLYMyT ^ӼK+JfG0嬝 ih{!]Ж`Fݶ6xES GոOf5WP%J@@J?~CQTىWo`$M>!Axb3VTQWVOB`1DCt&z]X8*G`RhKoɪ«MX?H6}=k> q؋ '$?q bz PA=001NaƟ5@| E646+6{Xo&cw3~=M⾘mt#xР0y6N+s}Q('2oa".:_; bIE, }!fҐh܀諍,ciiѷ0;7#sET P%pUHkx>韈{&LXмsy8/ƚ.I"zRҫ7zcCu& kxك0*I|sHkZ,\@`cSF\(<[>*a]cjfXȖ>t58,a׈$lXj>TA=ZMa9a4=6:@Q`8n4z,]x$)E#&a]㹉8] s(aUjt!w?MF^wx?72D7-^1L|N?wBK/O{I}U^;7z崴Xztz p[yA)emŭjz H)VgNTQV2p-)Ǘ_vỻ9=ޯ ľa@ok\5o{TxQ|g$j𥄬8n]??bӷsaK|pNP'p2^"lbØi֔7@Ap@eP7BYe`@ŏWޫz4,=ȇ Q(ط/LpRN.I% ZKK5M7q"(k{xQ&G4WtZE{6nPyVj Kk[e5T'F5Yfp;1zfMvN0$sۊ9%9yq?r"JΈ0L' Px֑ix?t7)0%aw[{Rj6Ӷl>5 f5y!zϿO.> [g꺨2mS{NJ "0g^@aa)){UyMQS8|9 b+f^:3+cZ災t]3Y{y(>z4@#Q%4<%.#~3.>Z~5K1iL_? /bXp&)44{VG^,Q+H8k"XUP^)IBɛuG;ԏ]3t4}`SNK(6*m\tΞ<8p6>=hJڒK+\Z7F 4#Wwz,)Vw$Ϟ #))H_pI,GX{S{*؞~Wew. ͦS+bHJ1˱c Cl633^U<`5*`ݞ=uޞҳAÕe3+Į.UjYXT3'a#o.?pRy^*IjY$0O:ѕL쾠8Gh봈XYg8cn ^GZ8Ty">ȷ%5F֝SZsMb l~`9 {"|Do*5|ٚVUBLh+[￲VRhY-9n41AGpΔSbOir SSErc_sPspQUJ>,2ǔSR,כkX+i(,M,GoTlhbx."-ҏڟ{:ӝc˴ttrݙ؍^H_5dMojImmI7RVf,[fŲS5*H@:gClXn5@^jAvxn}'e&RUUdM+/'knF0+ M@IDATe~9S~q (+N(_7Ѱ~ l!#|[NmZʲ~Nu5TQK%xfIb"]-,*V /gL}fpwt P) bZBk'ʦ~jvM$9H'ǧb2Ah$_ /84GUW^kioK-zVҴqkD*=w`uUv (L5%`܅_¹,"%i>u<]Ka#/2MKǬv`6Py`t@O-™)f@&2n=\rCUz zgtG ~%\֐=ߚJzz]G< j*Z2=)۰[.H6M߲|<5icm'G4O/kGn%]<_Geb6 =8S-=L}[vSSp˿0`yc==T5okˎ-oKnRmYuҒ֒myo [j]=%ץ=YVjO` "q  AKm .yŚ/wo_`:Y뻨,10f0yajk#),E9,H5gaE=#|1|z Yk8x4> 6]C!6CAh㹳[qp9]0giVCF/oک:hxܚN+| m⠹~-5u+1 C}Gg,em;{kNxaz'!a hnp5~v*Na\cYeRNS.ξ^=LcX mwZ,$S%j.R5Cy&o%=iViV-~H~~%/*,~r{c M~T^SMyQx 5̋s\q +QlEi[0Gk7ǣMحیIy/0ă517=ru=ه`[N_o|4~+ۊM"EeK;YP3OwMR?%LxXB)1AmA1HJ;8n0qfyڲxnŠ޾nP_XVO5 h˪)3GN[#>5i!ýcn&~6C _T B.aI@m$ث-[}51֘ȻajهYYJN[WYU[wF9]̠`ׇ|UVJʯLKE6̨ g b+c>_55pifEf7v|#Gk ovVQ5ԋzudfUM 6L/o^[a3ձ2Y@imfcf=;>Khtqv%o~镾,_LlC&7w?uvjNmYr9jƗR ӑul Fh9*7%p,7S_3@$=N-kċ&PS =1`ߏakj)R[t6rru*եi4Gۜ]!ʪD,RzE2EeTTZA5Xydוͬy ?K񝚷?k<ݾz?mF^ Mk&_UUVVk*0%2|z#հϬA:6V/ֺQi5Ehk`*즩 wb[giʯNEں`ؔ ?a5#x DCq2칆rgc"MAdvR֓;ښ,τ2+ G[QJ !SOƒ}ϭrB/VD>mN߱\u ( {#OcPXom}FBʆTjUe (4/m/$YeE?lOlkÙ<4+=n7\4v.mwu<$=Ōy\3'+ΧNO3 D#Nhղ tvhN5`^rc5mڛLwx޷V+kYmKzhv,ŮiNc{wvZѾK=v @9pX%^F^f$xA") k~Їc-U/q^KN}ALxX5YcPvl7}5>e؋gyL02x*9M A_OYQR{8/~Nh 'pN~zVM\ ]d->o_޾]=+y曵ya&mi&4ZL3ca)G7U ړL*bV9l$kv7^WX4fM}Y: ?^͟_9lk c߸FeO)T#a̴Եnnw9C=cZ]- ޽a<&:GJD*2h3_P:{&[=|sᎠ4 L60a 00j[@V+pRa#NϏ^\:oԹsq[#+odϙo:ycrH@6O;V:OM|g *%~1fb~׻{@}/_RK^eyyo OnO{Y)GC)yݏ赋Dutg&pf;j NWK Vi}/UG 2mOr4$"H3U[̿3/S/_OV]&W꫾s5RQl rs gN_V$y6_f<#v=yp\pα*PMnGF|Xi) >.ϣo7Eрqm I\3 %kV`rbזLk-7zw_sP߈)0- E,//)9kۇn'?dvó&d#?D`oИ{b-"CEvԛR8pGeY[ϋbN4̣=I~Ύ=oM^HUHҾ_*B꒨k̸FIQK9_+3ǎzr6G[瀟WYe^g8M[Qnh$¬Y{"WR^mN2P&NII?Yńww){xca=q|L 9<<)d&zDg<9LP^I< Ֆ_zVKͤUێ @+dϡ{<2 ZZo l]G-y)7>̯^iӤ9;v=Wht y^7/4MP0y>g W9 [7 ZyD;s{I F2 1oCh%V)6bM=z^$6xaڊQF[&XUjjsGd/7Ep6!w%b~BV  0jZϧe@7z@GStϑ(V 8NcfJٲW=7Յ5'o|8[}NS^u^1EhP.k?nonwzvv~Gjc!6>̣BZ `Κ&bo߆̓#cTmMSAI:Ю/Ҽkaǔwd;m.ܰbūp^R[$g-xVW-~{<̼=[GOR"K#BR6m3iKG;T+󩧗rOs䳊_EU* kVi48vaw2dQ}`|c+YOjQp3/TEK2ۏy߾+da8">6{|.lv5f6*aCZrW!%52|~XUj2Wg`׍ewEb C4yXoM,: ;&J5t$_ 6d[yc!M7oW౵<nSv'3mƄ8wWW9XdYgWXjyiiedYOKƝ\><89kkn9ug4r320헦;X@LX]/Dx ;K߮T?v1^Mk5y{xNڒ̴3kw{*@:-> TV?sy^z|4@^\YE𜣯A7e'-`:3ŤM<6O\x9G) Clm Z:?y**k(ׅnLőCrJR󋄵g/@?]?dEJa(_r_5Pi{Qoj՟kg߳_p/_wJsQI{"6, ےZm]UMUu!GJv}N@9Z?桽h饬>h~Hcb{nxFs-txAYp"֮u$ 9*˙)GamU& Zoь]`ts ӐtYCB~Sbu{!e@N7>"vsr+lo/qGdŒrc?J7mZRZuz-O8"O+VA06l"b-9 xlo8n0ZpSF,и_ý7 agWc @/]cn<_l>'+vǬg~R;x%mXt>l ٢hEؤ5a ,Toܿ:Bd~e]Yec_~ִ?cH_-M}oџ=γ!*hNo6gF$n0>oͽPC+b6a.]K|j]w/{c&{| L¦7~ysUoK*׶A=OǢ rsoXHb.+#weԬ551N,\p }yk=j6.\6 os0yt=XuV|28P3P`yE؛ {!3:==W~]OoJ7ky?{Fs{[&%P`98e` Ҏ^ދS.O~.v3Od]-ش咣OU)ɾlgG<4wɢA标,WN x(azyds_/_{q}1M+Z EcBCcV `z)jKKzLW#$֮~>ij. {ߠ[VGScm#8wï?sկ/vg^Bo{ڻ}h|/<uΚ联>U[ |KܹeO,Wh~< CT Zg>G Û{//ܸ/+e獯ٳld0rgNj񌉺Qֺ,rU 4#ʾlg=Wf9_Xj*Ӑ~ێ[30%DMg= ~ $vso]O:(*Z6 g, f ˊ=a*v%~r,A],i)1BP6xTvqժBʞI)*8"Auq=6G E ,ggJ ݝJB)yx:r(KV(< %i=&T5Vun0{a"N"{ј(<,e#׮"O=RmQ%6m[{;*?s{!EP֪h j+׶uoZoߪU+ZQd@HB;I&܄7f?sLTSՎ m ۏ@Pg@qovxrC妴Zi%^_5'=<ګr_Q.8uJBl?R }7<ڑ6QMI ?i8#-z]Y={[VYYzy1n\W*2@P",Z҇ +{trum4Z4=6~$D*g2+޶ʜ.ҭ >/}g KgwJ':#{nEgjѺeZE٧kfG}(5W"Ҋb OIchO1p.@`3bNeq64EYquU9ٍغA,WU'e胾QV6:ow/﨨4r vSɵJOhh󞗩W7v?@gmR@K),*>6Σ#IʾC5N}E%415NvnK_dlFv*oKdD ]q|2JXGGR6ܢ"#>6;enYVT|_~ u[,8ƪclw8e3R!F )<3$SvCfxNUTRI#ڱ/=r ԷV8g%q8=DztѾ:9$=cYB(: r|KHC1 McOW _%%g]Ot-n(u 6%Q_G쓥R.(gctD=3Qo4Mt7z`@ ,)QRcsc5co1#w}iPB]#tܪFwIq+ $.bs:ˣZ|w1]:pzxX=4ii#{M4v]t_ @f?=wc@ @,wK8 ˁy( o^NwO7%x?W7n7}tE4_Oz~~G#5I6M!bӣ$o80&\0-0SEe!gdŐ :q>[0Flڴ tM;b1bp#[9f%O16UU;^J^XOfEy'CZJ yFtUІ~zOX%L>8q'FWcAslnz& zc̥ޓHMirG;*h8g;;*lňW!PB>_PzQϽө؁I/$M} ҡb:Uڗr9 .I)'U^j6rb'BCoQ%\0旌hI=Q*eך DUrXz+o񈳗O 莱cpӶB@!(f*mr75= G#owphIB2h2>?`M&Ib-"-2ۺꂞgzf3S *( 9\Tù w`M~Y縮66):va,zqވ9z9@G'oUC~_>]R3; tqU5z+_4^c?hNGIKA>*E UnrqKTI:^<ƑZlHK}2ds-z0f.vv D{]Q@U5oZ?I=.e0ľ zoj{ r88_hbY1)YwݯWI}[qh)?@mr%rkLXݾ*r訊Jr`%%ujYrC$7Hz6|3Vֈzg"6 zBגw12H$ SI'1R6$% /4n6A8끙έ[\w,APlaڴ#A@g(a`6h !Y_fa/"#uApTQUU\Sv57 ,ĴF)^Lm)V|S=R>$%ړ/&Mw6FCX܇8Ύ{Y K^1Um-<!QCƵ1BJW!.w)"[nxwޭkk,71k?yIo{ V: zY@:{uL v "ipۢY|Pj}BNDF<⬈lA8_ho3%gkc5Zp5``AD->QL$h)C"rDsq5Ln<{+Z$3gmv%Qm1w1~|h E>#8;llr3;XᱎE&Qq9$h#_[t.>gԁ8Ȉ71WZ8hqVc]nFCa ~DޏE]tgZXҺ魤P {â ~GE~8 P-Qg"Q@7uclpw@pj5eC'( KʩʩWE1P+rmVH/z&2 KyPG&%+>kAx',ga8 -4`R~8V3}q&6㿺a2}7Nyj }vr:]vIx"xv遷߳k}Y{"n~MXz}\OccNNZM ǁVM(~FN^y٬7p^õhcWU¾q$4E@7??\i糠.9bQ:ne\\ qs @Kf j)濟nMnntl[%n]b~1*-!Uogu 'EǾ m]_q&ř33ېNhT#`Z9_zQ(7tQoyeNhVЪv]"<7>~]نތnd2k\NP\wmY y>=F8FȻ% Sqj8! ^z 7Y,=Ü_Gj?<7OGB@o:S륿#_dY%=glڰeta+nIZ}L h)HStTX{fNԱ?]@Oo'Z}bgIy%U:# g b7[-)RH^"ʉ_W4y=V'e) J¹w+ jDdʃI~feW^ ^dB7h}Ri4Gة]_jCZJFo Mt$+Һ$PRLdl~m=@%eԗQO>nŦ=$/6;߱:i4Ҡ Mc򛇦8'#CiVp|ҷF4Nv i=S1KXl37sMmqc"wZ"DMGU ; F0;WYb&r#YNKۯ@~<~wv&=!גsgk.o\3#zukˁA xIzuaϏ,z4$ > $m-%NNvRnFB@IDAT0g6ڢqnOGN ~\IR0_mܭҧ+'h-N9E|l7c?wW~$FSlTtVSq7,b&,J FiKԹ\8fJM~aC{ټGb%vO uʴ5XHz1MkX RD}'x@˒0'oWxSKIvOԵ!F$#vr _cZ-`fCd` 4 N0=/Iq,ͬu-nOkH{ye?Rmj͟`]2+~xk£8E#G-sc$oϾK{\vM$$Y>GIJKP{Q1۳9Sy/ 2 TXj:ߒBUĩ_Gf @IFua>iYvsɓ,mEY pu.&8"EKg5޼g!k+Ϡ%mIHTlrLfُjڋ6ZoDٚ]e1͗Kn!QԚNGHf󛋨m~RJ(iGx*#۲Nӯ z~jcYs | |-VCA xM"n0X* ~g;^_eH9t t:55˸FssR~$}*Mv}#1N!!)rc50+Υ.1Ul51F_]n!>F0vUHes #%):K|'ۼ7v㜶kؽ̹y ` )sO&3EO7̤.#q덚"ok,ɷ;qJܔj=8 $`H%QE&vStL#hg[NԷnS U$3o†L6K1oE)M?m$\Y-fh@Zş#G3ߗs>L"rNKя8h7j-K_^XujO@'hM.nۢ2+A \NdK Kt\cye{kh ki??.?+"' GFM5Zqg=$陷Vл+ {u#"=Nai}gl3Y}HJr-r)u~*n-ˊID${4~)@V~@-я1(m}&t╿b wι4g|e/[R;z=9`8mZ]e ue[v %`nƴTk5љP A>ʨۚ^ 6RuUds~0)fyd]ȂSr΢=[(P:\J kr~EɗH=~v?p I^#'PxAϽWvQI旅Qɣdo/>Iq.bxɳͿ _!G6Gp:k&:slƓDyTߑ(aIs9FhٻAn   zy*|_X:y9TMJwOKQF#WQ>Pk>KvRzWqZQ,3(f Ō 0*e"/߮Kvrl剣Ts3yps/i&՛]e3['`W+ @١lWQuop{:Bbbޫzіu -9NAaqb'jM}*Uh+?zX(aef]\0Gc?l6%sA@|K޷|;4I@<3ES+c5M;ajUUY },iW[3Q&dhN',A@- [COco]OKzU8,%έuC6v-?xE]o%GY)yVL"3_yzz/1&&>䃥V82 /9Nӗ/6ۮh *4W}XcU;lJ47q(}]"H;دE}":b6Q4;rOw),\w.Iu'^i8z3_,'[.?"x0MҰ~'8k 9QofGTQYEB9ЉӴi}tek~![6Jcx %VfL+R-+"i1ԫkUGk9F#ѥ it6(@Gpu)C̟~VV4[BTG!Eiqʚ} şWKNyEeНWPNA.3Y/IhQ nx6y(Ks F46O^3fB࿐?ΟdBUNn FIGTl=FnJ/+ttΰ>$IZ{}_hk!yJZy򗂉4?Uɾ;)]vU/aAq9+Nne^bEKdߒH}Q',tKҷ7<ͦ{iMZm3 ^a-/S7?Dsta(Jş]r=F.vM+'7]|K(,_BB@ ݚ֧MǢЀ=g XHM0+ ^-SCIc Yb*#xm٫=9&Lb&2>iIt2e=#:#S%۷/I^$l"Sh#1Fv)|yXh^eunٰvqAx"yPVno߶#!I8,q8h):J=.\;W:gDt;5tm2鿟~ާV_lQJL8QLU[|}Oњĕ2ګWa  xB-Vo9k+_RC}$ۉ^Z2Wz!x0~L7zq$bz#o 8n ZɫF{ST! b""E0]WT9c޾|1{ <8<**k1sN1Βڞbk; Nр]4Iuo욍gʋ3<w[uWy>iqŰ/we6/:1az6zlYi2ܓ'ުi6 F|go!oΓ}-x8}q;J6}<7J&a#FĻan#="K@ RMjͮ`R!ꅯPQ>/{7>&6ɰITIz]=edʊJZԋ#-^lz"=bR#$DSI'13w7ao Vmg<,͐@& "R%CX*v"ʼnVsAftR% d[%^p2a_^3 7ُC@F߇ -n&-x4ybU5M:<d2 {YQ zq8Sf:>|d0/,%uzzK~1_=Tu㙄BڵۿYyPH[y8A U粈SzTNX2TVɗ^,fxa}j x^LM7Zc PoKjc.^VJqU]Β-׼U1G5i ?  ZHxɏV;5-j/vlO}:1#1 c2uDYp w-4Dd.:tު[suӟs2nD^Dd-lھq?l=teWMtfZt]T¬u׍qn܌FJS{e`)fݮ#{wO_C>"_Z=d(FO@bUī$O1o;:0sQ//R~V(St{nQ@SήK=w҂ys4Q(C\Joyv}{䋰dS# L%ްF/c  CZcZJtiWUO=wҕy/>;3cy 3%NscInF鑗dȺNEA[, " =#ԋ/(;@ꘖb:sec,iy:^wUΜ .'J 74\QW/x IzE˺l/\^DSogxJt4۝j2󑹿T<,dM{)a[/62`-\^8@'Q D=pހ(>@'Q D=pހ(>t6+'[4MS|]%K, D@,4   O. qRE3rVM\ӵ!ƾ V>ysƾ'_rt}nl686/_f9on/6u!yIN5iSrɴXed? Pii@!(%h7hZPtLHy0oi4Z`U%$2Vo5rzD-ac`} -?^qmQ7MQXr0;؏9Phe@ xw^(iMd65kzG[hʆJDԻb0mɒƥXxﻫl1/c{kl 26Ӊ]A&`!:z3b[MQd!ɼ.k>>Pỗ?o kdo'[g|ŭE9ҨX@@op"r9v3m)*\ec-mbRtTVN>ӳϵ{ U %#nړ\Y !H=!2'Pz1v=9D͛ސV;?L~⿿wiU{Ŝ_ul~"qX @BD}(6  M`gԷxwƒ3bJS8x} 3jw^\{/?=x>tX6{6ƶts  X`6.6V}_lɹ8@B@k? @(5㜀~K絊cnP4* ^$z/DV  #PbSEwWA  `~R@-ZX$QR@-ZX$QRr<ΰ%Klc?\P[$%K 6>D@@s!-/^4DܟKQx4tp<eKu!ƾ_{F cRLW>sc gQzی|b?z:x0|+ ]iTN3brq]4RAl< Uf}֯=2ԇB+  gA G5N>vv~˩; /ޮf&ۆ,^xJZ늖ϝqU5} umu\k"}۱  B*E=A@'6oYf3=m,y)QkDSI.R&z'isVۥ}uM9ʽ\.W_OZq3Vehds6E}@@ 8ĤجRIKfMo*rwvܜ~pePI|i[pbҴ?xwZHs_4<!\٭g@p@O}p#j '(o)q3VO},c 7!uY2|eźs,dž-,9*-C_]T6d*~3;\2a\F} ՟З'gkW"SDCPY17ʍ@G@O}GP5@@M&&lM#cN    !E>Fت@HFeA@@@D}0*    R CQY`$ت@J-DJT4%QU(4z6iVA*T0LZIѶ,% &0L@:<9ĭƪ "M2 1@Ӵo&Lulߦz_ p[e=~t~d H KV+R')i Bwפxsr|Iv ;E)n%e't{1wRQiSaqta;'HqUAORѴOZȘV-@`vCA=?.sl2~zT!1.=G_.Կ{ $Ɛ[w'U(;2sDN?>v_g ~KZFA@ @ԇ\ M@9˂=%A?i>izw4v̤+*ftiO3O\1F F(@9t<(v珠Ԥ8{{Nf{ w"w];ŤQ2^C3[s(VB֓xT[s@@ @ԇr >#N?{]X[rϮ竌=JK.(.S5~hޜW_=ׁ.yw&(d xo7) 6:&J{X`cd1oXb@@D=2k:6;q}\b2My%/8h{ wnOdZ>iI8FGS4;s^#}].{rx^gŔ_B9٧;A[lZ|Ɵ@BD}?@x&M%}U&液pϳLU?I3娚YRf5j?wcFz^ᨢy/~.(*d_t9o;Tg h&h}ҒM%Qo&qDW9R^Q GhSr_z%S9!\^xnQrT⪚L:XSb.Um G);=#Ғcg}5w*cӯ.8^Iڸa}CP\W+=GNҖ=G=GNqqXڛ弰PMKq[ Au@|C@lO=WLU=ZnRM) 1M1mzLxjMf}]JW\Vǁ9SQ ry}˕%|kFm`Ϯ9pWXB8@~د{)x7ˀ"~g Б ;6 i =UMg _.4o7eXna&;|pήCշ]NHĞLr=?w־q6?zo#@f̞{>w?tlsӈ=hHTYT?%m[v4+2og?,/(xDW0"#0H{\U"#S 6O;8 Rhش0{7c0koMqh5/9~:6NN@EFQJ7%|ߢq)xiàX Mnu΂ ZC@-~9"s Z+/jvj {3(wlV)3;mnEO\1m4wr  _>=|2̽'9>뜻CarfAϲ|/b}??QC2 ۏ/IYd$/M*:y瑁S1fUSsޜM} D] p K\OrS>ϯ 1M443cA&U֔xRD,xliN195E+ ͔iQ05gW$\v(9$s~9/1TH^tVn!ylmz 7\W=35 ԹC dXSxEc FyiQ@!BWW¹(nK9avӹYf1ߍ#IŰ`f2USrk,ps/0x<u~QZJlmĩ59R(9>ZL("Nv6L*J*(.r/.9td|\=ť)Znlƃy`@ݻJեLg?r@ `18_P*,/j.u,R0WR\9&*Bw"]mY ** G `ZK E'p[@^H#gjg%HiڱeKΔJo>,?Lr71 0'KiPpT}O^ys7QYν}QϽUSzwKVYxN}%QLTP4AmNк-h27)li[8 |C=\AL`#Ͻr]G圑]A(bƜ.5+'ylJBkʸ}(16iM0Ե#MSNm޴0-[uDAQj-<?5@9~ UH Rl};4䍔W5)MyX׾/Xb;iqq/M. ٟc]-׻+sy {o\y@{@Է0֓o\Jy]tQlT$НnlڱsDkG]&MGQXRF;:f//$~{eb5]Yp߽㼱S!#D"y9cIz/F;#TYЏa4|cmaGаLϿ2ly^ȘC Ro)) й-AGGvx9&@S~ݲ;2ut>t6M?մKg'hyo{ϮНW##욛ݛ|F^5S@O}5 L@єB\nut|0͎2mwv6?ҶԿ{jS˩[r, zI,DWT:I<ɧ]ʉ#>ȣljtOl66gDJa 5(%_|Kdz "??M7EE?D+6\ֆzܝ.-Y-m{7CsA9 !vVR Hˢ<4(75<68D_ *t[: Fs ݯbu"nC2fzPf.qj(zo^c*/{C:9beo<>~n*wiQX,X ؞J6'*Rl *{B;3L)iavgnΝ{ےל Rh{o}:48;$粘'{L Y0Pti2AO: צG@. OB?y VMU rGa".yy`c9̹Tqb~\!ԫ%h@Qn:JxNq]&bւbj&Cx}fHgf<,_QwS'SN:} -K;{aL载;Z@_]b442Ez(4E1z,æGĔYk$hڽmfw[7"9,ѣ} bx3EWiJX6K6̀wScNU tX<c1Ϥ~vc`߮h!e[ Ӛ5+VM ZiЖM6?~Y&o f@VEp\x: N9-:bL+XfAڭx8?;<]ZW. 70&P.cqQ`K҂d͵ [AH,?M $9 \PݶBpUs S}U0YOxj)p9lE"LU$y^w`to,3 v:fڬv n+vBKNO PfUӵ'i~B .<Շ-mg98tyWEm7ZVY:t9аNus1=ڛtW$~^:䓚K"x*QT<1I`ɡ L8!:GK\"L,`e@}{"me>LfsOm#?ZL7dYV7 WM*n;-?~eֿ0S&UM!B$s繱3;3̀20"kgA3$_,=y0A0+!POIDNV{9VyMsWܮtԿcKzB3G896i2.uECyHكu J-bhƌ\f!jמWA(LGr`L k~[[~vLh-$,e1wXp-`s'ٰ+_ =k^/ܮS>޳L+<GLB4k,.5M]xeiԩev{)(c =eE 'ΈC.`c>jDK:OqfnŹ2Æ8֔;@ӴBz{._pc½|dL Ə|.Y1mf$e-mi?X †h68g6Xm.#`IXvjW`ImML֎8櫰Ix"yGsA:1 W ysni@|_[W%]HZ1//XS_^97 \0Gfu -,m0^E=ki )*-4+ѹMSmB.+L2ںXn𐆼8ʹkga΃a.U7cؚv/͝i5;!žDS]Ѿʍ{ |34X?v aBJ!mF1߂ݎiʔY͍qnRWmm ijo(׫a>/K3Y4SޱnF91M 3>?gY@Ɯq`@`cJ~ѯn#,SK)ThS/Z[^U9p3Z?(#B@sΪb;F4h ;M(yfc#Ӕh@N5MDКE3,cqEeѽC ,>e.L'?]ݐl_ vLξjpHD}= S_R&;#F$;ƀx#fl?أCsywf^gX `1cC6{ <ǻ.<_șϔxM![l\9ogA(,Pf4;3="} ٖ#ϔZtܳΜ̊&PX/\`#pHG8މ0}?~-ԓ^@IMs`ܰ=͝Ҽm͐voK(>]@KKo68lSoPqtBǒ(`|6r,?3&|M 8gkw5r;||%?W'|-]U:iOd|$ex "҆`[4ZG*XvRwBxf}ٴW/ -\%M/B%68+`^dVʍ mk @B4_K*:lfTy\Po#`dkb"ިb*TJj!џ I՛+ļCI5zƛ8䗛vE J -Wdٰ 6~B]Z4pq#Nwԝ=Qs.w 9}EAJzuj)<4ZMbQ-r,ϪSHlb_x}C|]aeJ\TS .J'+ g[vu1䡾JΒǠApL]WU4om_y'iCc~HH@0ms MD(,>g{SNsHX.@Os^ 3&;'deOBzlt! !j(kuOpr!f'LkMJ/& Yp&7 Cw}sn+&n4+acm m"zL~F>[(/nAs=F]27.YЅiXP8=n5 sϳNe nÐ9B/ze˄W^I tn}˾wU,ggGOsWhR36*l /Fl~*_`Ǐ= dMC7 '`bYdd6)`h77n2HspA R-ysV\%0X<g,;I|A(eiK9r\C\B=G9wiU9BowhQ\̜-^`&UmQCTH]Qii#isrGhM $4dAgtr+UO+N#y z4({EKlvsdiҨP /R~?jr#q T/`NWnc'ϊ݇Ϳ-{hp 546dKSgW.#I[l֏kB N-N=֯۵r0 Swfb`+UiC1.e!'1UֹuMU j _zˡ`#FI2+xU&VU dV$6GXp QE~-5.4Ҿ!. @or§(6UR{^ײVkV&\ӿM w(zU8 coW+8|{ wW%DZ-[h ׄ Ru豫bwʰG2!qa(!C-K)k[eЎƑץ!_>QݚZfUZNUP=kf ΈC '#dZ( 'JQN dL @HDuW͡"zbɚ} -(rA^rL8o9(wwhR >av %X{!`"" {"ctq 1K:9̑* lc*몣W:@'B}@"08"h6J K`voEE,l>`L3sNo1UM13P3sChO{2\&q1aG]B5v 1[tjjı@ B}.C`w°Q.&PP0367̂~3}G$ ^ôշ nF w9O0+1/^۽Aß&",-hrcEG\,99`LL23p%ҥ7rv[ڍ{xCбkGrcǗNMT&@`rL e6][Z׶]K3rY|1ۥzj;K4|O\w&@vZ 0&P `lzu7quX%. 0&l)`%A`H˰sՃRo۬$2tb"$C_T( 1&PV sUO\b7un-zaT#/Y͝₷JC?[':^8"9[&@ B}L Z)%5WzmEɒӖ3IωEn6].\"cFm,geL  Ɖ(Gܬ}-;jblXzY*ש\*6:$C3&PBƍj$_Kyl'_N*:lZ@ҬZe{mۺ[47My@if]W TjU$jVʢrP =ݥ .p8{!E?-$1O1-UUh1fs872&|Az_P<(3h-!EQsmwnsګE ?{SeXq-+WT?fVqذEP3WG~#u٠ c*fU7q04| 0&p`Kqm];~^fzuef d۫&k8B8 =blߗ ЄTgj.Av=7uرE|xe$vt}TɭZꆢI*ğ+5wFHE]8O&@E"B}E5^yˑz]ݚwkUkW kWkUť%OKw#.lq MaNЌϤ?ncqFB"3`7Ā>]ĝ=[1--f.KܰKHL N 3&(bL b [Et1 ՀO}kukVSd@?J xu؂BxQ.w=8 '^'N/d/LJhSЮO糇3g6e<~CV⁾]{I,_S[QKb̴1q3`L  Eg90&P dnN겝!v0 i*nXY46v4G1@ v>Ch㜑{Ko]w8 %^^5?rg딾])<&$oC ~s)*8,RtΑۋ΋ 0&2)1(&JUWZM*5U3T= 8ӅKOs`W`ZUCYSЈu~ZkRѾh٤uךGĎ}GŖ}GN4LwLw)`L ?,G3&!17`6Unݤye㺢Ж q0J<%=㧍} CF ~M KU=8_&L˅'&(~4nSW5Seo۴:^FN*Vu("0kGkLRU:@Vxךԙ Թd#Y  |O j}[#G/`L /,E1&ٿZvvKX+A^dg"Ob'#BT\_Wp!<&(E,ԗ"|. 0&kx[k. T1 KH8Pӛ:{ !QvF)gt 8WR|]WΟ 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&J,K55n.] l ) EQ߶#)%Y(+4"ـ@eብQ~Y.Xb;^^>3nԨe]%KzسaPVB !48nUUug]K.pgSM^S#>uY11u1cKf̨~B.BכR !aTX$5hɒSi}T)mSF;&_ *"+; )vfĔ k?Om7$"9RIC gө˭`CWEG#!\4v" @]^㯝q2U XƇQ)ƅW~߽,0DQ"&V@KxGԝ=7\21MѠ":#{(qB"_a>>>|\R;B}4) \*P?NsNŮ:¶u9=t)GC0 B|ƽ:$TW?*k `p#)=4yp+l6Oo w|P%}vM=4GF&<7!g7m%JGB_-K 8aR]fE=;{Ct}{꽆4P w)R^*'lތw]∅wAM(|WB䌁cpgɔƥ@tT.9 8Bw-8vK^}WD4hS_#H!tC48"jWI{)"dQICήŔzE PtmKQ* OvϠY)qhKvy@>)|3Q\ k߻5 (omK婏"^w [oIe?71=޽QAsLhoKO~ S1@z=?蟅1Qj_Lv0#FG;p;t! ir|pdR3 sEu+g^tΜM#dN_Ns?-,8ggVT7j׋;RȯQS qrcK~ tV7t$G;. T~m CF4H?oׄtkZWaf ZR1 A9]¯3v4&Ц30C f`ClW B8gĝWzSQ0EJ(2{ }qkrlT{89U:LqV1#Y .1G>}##&8YJ?(wMu \{!35JQnw3C̜9S]q_2+_Y,29xh%]T\^5w pY͸AdK[ёЈhw]I OsJӷXÂs:{Һ!_ ̪5dnh^0.sgc2C#cnt}N@B]7M;@y 4˥~0=%gԮ n}1maN-߳Ə|k;Q/;|{ vB"hk4~869KxOhG|~-j0p&.:Y7$"jf9{)Ӣ"&[yX׭j}"'`/P.p]=gHG \K6-}MvHstqI*J^xv;u(BL^}xX0mR5c"*)B>YZy w6v| Fy=fB}9pgk?Erɇpk.-Ίe ;ͯ5 ;7muѐʰ`{AՃKA+śv0R y{is?j/+!ooh,F+ D"l<)t =V%DYׯcL:l&_qm^29m|g]4CΖ8GEzѾjZnMA s3N2c3־E= x.\yV<szڔv ]^`.gS{U'jkcl3p g>'ҹ?{3!=wO*1AmG7+B_:@OY`u]h+^Ė ~"ݡaJOMa`UL^c~}QB#2XT|*R-lR7kQG^8]翸IJGѝ}78($SAV[qʽޕ.&7W:t{wo5ve5]4NsGjB:tͥ;&|'4A'zi҆Q|D4C9NEPGt)Mf +@DFtAZoG=( _HcF*Wny)lt}hmg ȧ&BGZ5LOBCk`i88P^( VIEC8x⭗֘x. ~{x"h {c65tꆱ p/#n՞5; |wTKh %vo|DŽ"ڭ"M f> V0f^"0{n7o@iS)B1+tx+|x fLwvIuiGՆP[~s3&> -e'JK͗s1U~TEY3\#ax=|%LMK{9бmӸiQ6/<"[ż3[U<4$yϨ7AKӡ)d@g$ E[3fK^ϙx?ka &7Y~op1{0y&C8E}23*pܴ1sf,ey6$4%3 B62t.E3볗rP8z[3MM- L6k 5?J+ o9f=@UGLc`j)n'iNܨ|::.P.&=,ϸjG̋ Nӡwq8dSOZqI@"2ܫ̥kE(1YeϨ7iag;C`ghOPT=[%z [e?-KyT\~>C+%I=]_o $u;ÿokUlǴ~tAJF,&C50cvMe r!u@/ޣ!! q0můǴ4^s@Vw[g|{"Gaq^qGUMɋF:jkJ~\7}wxn^GqoIQ$Oǿu*_Mȳwv2ְ3>Qdx޶5k:@P_z ϴdUmu5örüx}qL_u G3~{љynᦝWc/#%{ ( g<1L}։aM-{\nj,*RM] F|G^0Y <抆WVn`o2Zlia!VLg~x)Ãi_JN*4{|Р'|E>Xw1tad3S9g%$pmj[\Q3x[iz6tlH݄(!F +'`Ԇѿ (uAo\k_nE[`ʭo6%6*rם@#fÕ)spn!4;zGMxT.MxF|g+6>ʛg<[EMX( '{Ql]-:$6O!N ,(iRkέ/f_Um.}ѕlhN)nѰT6i!kd+exGP-´Pl~IQVF z y+TOKQ')QaY̘%aj1-R =z?Ff9rC 4.L6?a,Bi7Kͽnil 4KqjABC Jqwpd?a0u)x4rQr}b [ a >prtCqOY,N['Z!&݅fg|qE86 4Qlo7ijTL 7eJFa< QA2UB&NEf2ȾΙAeci}mt( 7AJy]eog63i{'Iһy~}pO fiA𢏢RULw 7pz[M飼ys˿0HEW}s5q2fo[ا Iaʷ;S: v|*Ypi k x9\뚣idwaVרwxv~6cLމ!/.A7.}$%K`OGn2єK,j:m" 8p 6D10>up$Ah)j @/ IDATԇ`R!*Y"]Z/sA_xɰsh^>&.4׹c^ 𣹛*].ہlLa&sG+1>90YCݻ%O-' lDE'zFjNU/{><{Q`+LiRt#}'P+_xumd:{/{UfN׳_C?3 %Ӏ',h43m}:YFo+7NZMe1)C8CP %.1x|=Ҵsd_*]mgBC̴ǪoICmG&ڍǢڔBOB-`"h&+ptc_#6(_9| 0|PoOb^<~E-@V֬O$/-osF\tw٭V]ܒ%@8u.NjdEQBGr:޳ 쀺Eب߭CΆZ%NziϮVlʿ:·X=ÐMMsFdȧf 83聚n|d˛&;7XC_{dL.I1G✑{>OM˪:L\жK5ê#׸k~lTxS( -#D:_q*Nh;0vmPU厩c[DD ֥l#s1v?*mvUG١ؑGPiQ>3G&P^ {M=^uQ@8e% q]GwRG"@V7=|Fk+R>@Oe)l r404O||͂lB/ϼs 'Z7J)6{ƙ9st)UQ!O8Tc ]i33b[g`}d΄DD+( ǡCv Q}Oݖ×Fh&ucC[Ӝ]ў\ȉ/ef{i͝s(O1& D1~6-ϱ+uq0ѠQD˸ w'Q*}a<kS͏69ߵ-7.ܴs,x5¨7| l lKCqkN Kvqiq hHeFC{Fd 's ?Te@) LX񰚢5Q),K"!qM0mQk!}34~hšsIxA;yѥ9 H!Q6ӳ\|&iCRϗ)dEKUܺuА7?V@ !ZoBew lI# yTkp ʢ [Pz8bdCmC60 %MAxٳ10,gߙ%f#H4YZӮwMQ [wMr r>zɪ>)[n^bpywIaa' y}M-'5ry)6NΥ|GYC !+5UN$p=@=,.C!/)MEƔK4u)]jзU%p58ɌH# Շ֩H!bgJV8!Ծ-?xt>644ޛ@A"Uph&!!zӶz|+ym=e7[1d"u/eCav_Mpfxs lGK^3ቃa/2jR\SgaT6T#ǶY6f lx8G> 4P>pcAͥ5׮|J4 dwkn]ݷ3}&h lQQefRZ͛IL BB sS"/sA.Zow ~6-F&Q(aˎ'P% <+p LCa>u:0n6"FP 2$&GtGL}^Xg8ltƁ 0 =܉aC!OpĈL- eP o8:}}wtѰ̺D& x4Q@#`SBȬZWNuhLA=]VYz32LE~;|sXܺSP/y|CTxy:G&r&PorMߜGP}3r? ϥ?hjÄMyQ4=Cx&"6mjnmjt.4!pC[&$H=[>$gFǬ~7Tm}C"fhuL @[14]ˣ^C*mC!H@بQ,`eѬ`8ǨCoAa]{ZiN[\ ]Bls]+z"O}Ϙ Ĕ0vԮhC^0P`EpQOV>J`DQ!qHݭ/ܓ_;̞Oh#S :H~M -` 8&8XU3+UsEx}nop 'ګi.SFGPMhJ@*I1Xy fÎ^cי`N/w`~i2C]v=uUhhdLˬ9i*r[J[}c`:G&N->@A P_ێ͋3+( gʛa?X}=Uӓ)$#ӎ0j]wttf Produced by OmniGraffle 6.6.1 2016-09-20 23:42:33 +0000Canvas 1Layer 1Compute NodeLinux Bridge - Provider NetworksComponents and ConnectivityProvider network 1VLAN 101Instance 1Linux Bridge 1brqDHCP Namespace 1qdhcpMetadataProcessvethvethtapeth0iptablesPorttaptapPorttapPortSub-Interface 2.101Instance 2Linux Bridge 2brqDHCP Namespace 2qdhcpMetadataProcessvethvethtapeth0iptablesPorttaptapPorttapPortSub-Interface 2.102Interface 2VLAN 101VLAN 102Provider network 2VLAN 102Provider networkAggregatePhysical Network InfrastructureInternet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowew1.graffle0000644000175000017500000001075700000000000030407 0ustar00coreycorey00000000000000]mSȲto)&8@[Tɑv+HeX mif4M?tO忾^?qsun]/^={_;90ow[[۽^om:GoONhckk*M{/nnn\ϔr;):J➟&Tpi9\&o8 :g?߾߻QrA~ }Uk/&?-+yI/? +_Gez!|5..B_(v !1QE^nλ0Ht|4['sɨ ]qѹq7m8ۆ7!ɹ3gTUJK8„VNaPtRnQ~4drb'm%G&OىĻ)Utp;0Q}+9&ˎ}Ipd40O>F0jzY铞ׁ& _QcٗQQa&ծ&ƛ9X?zgjw^:CnīV л=xa tjz۫U@e w~Vt<6É7 V ȕEq !"0"HOs& RzM;i(7~?o(G".[0iQ>889r * Hb)*$(aTTP 6 aϏN_!Q6PELXJר"\1%.t#"YC]Źi@WםKmi:BS[%>iduzY'Ğ9I ayxv.>;W^S&?A#(ϳ.ߏ̗pgvB8+}/R\%!# .R?:ٳ3}qE} ~D~z'L>%\Ʒ)z2LuH)? :/R"%R±@BDcT Q1,pI5p-ԆS=NT+oWj , X s'H:a'UuBDV,0`I99ߨU IB'%RBTa녕;qɱ KRu%L0.8cAkr1uA;)xFlrdAĨz P](Zq/X .f}<8 >-0%w eqOm x-QT%b!$( V-KŌ~-^zWMjP,_ tn0{savL^h =u[T1j` TB7MrهP}(d>e:+XV,h6gEĐv$Ag;(yNk3ݵR8gXkB@1mB`s:O`K|%<׬^zkbzMkg5CN jkL[fkV^ղD.L" SdXt7S vܖG1K)TͲ0L@cAP(ba5ˆ]ҹǩҊ$-8Q{/m`dȬ >^IԢƩ}fZt-(Kp,l"@ *֏8 DZv ]vv"4UDK@.kBD2%!i*JfB!\5H$SElkUXladЭhVɼe!kB6ʲ,TdDkdB3ŧbeLmiDr*VVc1PSio,ZmF\Ԙ = )&& .ܙSj)9w (_ +j(_+jU N S\"Mvf2#}zm@2]aPw4K_JY D+$k_6۵vEӇD/ 7^xsou0pЁF{V,l@(R`JmI1IzrDR-@M|o4 $)`( bs!VHii }%6M+dk6@:{wx<|85+j uŹf0Uq28ⴊ*5Po~E!k6hikp5#jDN>ImN>6'߽ۉ{wC)ULj7DP 'J|6(^Rpcw7R$T,sPrQ]0v6L҆I|?._V{{(bO4)d͝NxNQp~mUǜnO+UegeO|@r1inE[G}i3 }?p.@F^;Es/ t4 ڢ|ʟ,nzwI%M6pz ]+khBVyiz8n`|ԟ&h@:* Sf 6?GY#u;/a=l", E o$+DWP' N-#F|&f~'{WW ;;{yuC w)ۏ{'/_Û_gw V?c&)tCGcA?(k,)䚻Dr.$L2نv͒7BRIvGD 5 2 1 2@IDATx]|ٻK%TDE{{"bR$ ծ{Q@t ]jo %$䒼|vwʛ7)oM #0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#4*dJ-'6xtl_NG>ɛ*eB4(ꥧ;|Rz2u4kֽM,Z I퀔[22R&㿾p0DԞgqW~Nz@)"ߘNSB}V ܭ ~aZY=vʷ?_Uᖧ0ڷ7cZZ/qmE^]yN ե ,;UI®O_mHQH?|d}B0y?HJRQncoS hWM믔H V@ siz*oB~)ޤeT'!NBG&/ۜ!sfy& miIM7.^_ WOESG'Z~::{WØ )~Hg8bꄔb'45t|j铼޾qm^Sx\5GY$]5ɄE#JOTB3G%xT،3\UMUyEQNsJ0X(!L &O2USJ7ү" KpJ >.SnU w." 2b4eбǦSU*9utjjPbpRh,{%d'OHJj{RNJc>/S*L}dü4ѳu+F} LsW0R95-eHNBhJj,T!~_tj3D:sб96hR֕%.©N܎]"1 xk?>4_?mwqۡOyh(Ñ ۿL,t= qly'm_+/炲#n`Ot̄߅v|"sW"=m>l5v9=8FvFOKK>Kc6GQ(_ C]}Q4Anf2Ғ/ U5O)׫;Q[<28Mepq)B%|M\ﯢ`s !FCN4}>+`j-SX{qGܢ7Q5fM=L1b3?`qTLٰÄ)cO 4%}]wF? ya(z{IoU[h${BQGWW6/ #9B|*NTOtȟxׅӗWu=9ԟ kQm&>h$ֶ'U)^Gc&B!1A3&~D{p# =!PhxJ4t`sޱ]|~Az~$6-0<\ߺ|w#q4M ^cYsھAѴABr,e*tvȏk.U$<^fJQ& HPg ߲*SL){ ]=)VA,ĊnŻ=mPm7Yy"T}YE<>u>cvlٰU!|g"vkö-l[ ލem'HtP$| oۥ.ا pGiuԴ/`Ԙ_-v0ߌ )[1y l*SSHPWЂndbdIz A4`&<#ف3u},CզXP÷EOKC( Oqi1R~| oޭpk/N&ORzkqdo2i ,Ctx30umE<{8L3vgͥ]8et,w'CLM<7O4JT Cݞ%ܮ7nzQNow'1ھ Y^ij (/@Y?P1 <^9 9vy0Rob:up5:m~߮+#5Ex_ݑHKI4HfTj(auJsR9NspBn4 T }=F^N3-:yRݮdgy)(S\rQڔKiߥm0:"[q !їʗe `>qƸU<o/Acwq&Ґ*n>Wlge13P |+to@) X4LO̓΢3^ X6OUNp}$.lۅIs4_^ 3B%.yV:\mߤ9 n*w=6o0s*4ʛ}!It{t4hJ8mnoiwQrSۥ+~'T?ы!1Cmd!> ;׾^r=T!mz[n%~uM LߛvhCk\ 9@(iil!UOUZp$56 륫账]ZU$ҵ+Tp8[Vp&Aw46_ `|_C0J\JO2EjԮjNBy(}A >;iGEDiMJLDcTkImd8%922ƴD̑j[1rf er'GOwO4n~)f,U[?GZl(oTr)9_ME&ۓ9?ރNo uy|HHN48mccvWB:gE,]L)>L+P]?j9Due=Tl.ct 6%-']vnڸ DiZ'gWCoӬOtT SD -4 eU1QrBپ+Bs>*@Z ,xDmeEݔ&ߦ0I@gB(?9y. ۹Oiڣ蔼uC7ۆi7{.-ˢt t4ϱ& j~XY—haDQ&֘dWnFqAHE%3Jfhog,f3P<KQ?=RpS' 2F(V娟._S8/ xlIȻSasUuC&XdĤuϹul_~E:z~[1 Q.B^{ QVLYa\[8 6. j2ILI3Xace=#D[O4L'd. Zc+pMd=7M\@/jB C4 1Oku *&[Zc`Qɍ^)HAY4{ =`\MNb*|x['!+h9QU._4׫"Ŀk&AibɯKɏ鉭AzdWlQ6i"51q*S*sVQi~~[1">NG9h VEt; ZȽƒ!~T Gl/08]i?p{9!,|f_ ?N/),ZhX94?OȞVs 险i4Y $"_K(Z ue/OȋXJ&7ׄ*%_Ww5ۡhH]3dlM cU0LZT`ΪXnTҤ.]R3#_Zmc.D^\Ea<8LEPAEEG]ݲa cܴ!9;!!e Ul2RAÄ=( 8  ˿ e-rFk (hh`EX;Fx>-rҫccʳ{6*>dUsVJhwfɪQN7z柦\ FW"o+:y:՛|-nu*{:߾٪(jbdhZhPV}ǖKl.$X$΢~cӳeI2RG.'a 2fh;<]2JnzY t1!i\Jvoe>F)˽.E+`Ga S0e28 5 8 f0ۜ c&@zy [;z'#}fAo 2Cw.Fwb햏؆sTߣIMa\t€i:ufLB|K}.+<,¼nJO| {ߡBh1X} j?ڽĿia( uS9kw"E"+!XBPd ZBF[mnzF%b=de(c _8xF7K?eBVF+]Lυ 2TgE, HNi '2G|}Q#Ӑx?6wt 3[hhTG#?%hi1hԾ ɚtD(ð`Nݺ5,saIZ:;"2ݠXMDc٤/ykGF+<إyJԬ-\94ʞr| q98>T\8p,[:G;Y1: U8^t:Mh]βA}E#>!Km R6iXAm V9?B](ov{2耱mNP_ԴP楮#k"fc޻FhBeΈČStŢ-~~h z-g`mwn_d;q*碫HHyZ/:Gb*׾8>Սʖ/ܑtѫiaWY¾ˆ3 D[͐ʜ#hXY7cR#Vtz{Ԕ#s0rP~\څSF'‹*TNUs˧N./7 zFW뿶AAL|go|qDyap /E_;R&åIM조}?>SpыH8x#Gya3>2t4 #Ůii)]*~ Z.+q ]# ɉSS޶>9Rڈɾʘi !biaBO<} XMMO.~ bDK ғ-ޝCU}:;L# pnud6-wK x݊^<ĭE鶊G|JP!ئ$T PG6A{`)RF#oMZfJB&!o&.IeB;R1p> WA8hNHjAnއײ Hp)|ǾHCga:;\ڨɣFk7PVG\ \A}|~LMK8(%@e qƋ:p;ׇ/yd> >=e )/Ak*<;swFr:4^dG}w?nq(P:;%zwBM Bw:oKqQ<giImAPbɖ(oCQ'͚z0b~^%4>Ek -IRwa-]9v'fA#;R0͗i^<,opoZ<> tB+yMjiJj϶u? ..!nQA1>=^Enʘ~X$`1,|9 a/@4W~~OK(}\ ?AI i?-S, !l_DG_:4p#R`p#)tv@ N; N }G=ELӠ dU }~+yZhBjPYe%XBGeFv,\5:u`};?G~>Uu>MJ_$|gA7<~.]6^ HJ; _)aFc3B-w#]q.I#Lc ,3g̘% շ( yB]V얣bmn@y\r!63/~ͿMJ>Eg?N"^}2;_o0= e{:c ? 'gyjDNl1JE엌ԤwӂyC6E=x9g7^CW?b^b$0]jԱnvK${i! #IRkL54ف9Di~JǦOGgʾ ǫcwRQ}Ѧͥ9}{'*!OKKUomڳhOFvCrI,0Ke?g!m">r=D ֮g{%at}:>U.})i)Q'Gl"ނO{'Mk1?A&/;}z`?&PgX/uEiGUЎL;ln6 U=]˔]mVvM1W{ZZAy2UAޱ>pex`FBГ]ٕ;zNIo7ϡ`^U\Oe(/$]M/1t}S(?9{}P1rIw)~wG##biKf:0Gݣae`L{A~q|coJdoBt'4;GY0AA%*p_+8Eo[V`C`-kWyxD;q{|nD*6'i\ЈжCNĸ @`(60ʑ'!vҨ7B%A+1{U씾 CM ghL:SҬv`"Z7ky \dQռgemUK\Xgd"ˣQ.:*]FKPKF[  #m1ـZBBH%,#GGǏ+{{Yg@mkECEkT/QY]Ӑ[N(g"1=HSP]2] K忘_8e:ۖ~KZ^{agU"gx~/JWI?YBTݤ3hAA/* Tؕ}knw54uP`/BD$Tlj.k^CDUiC׻:?rV*d}&]<8\^2ho_'86AS!A}\U%"CɗtzL|@:C}5G"bJɦ uZt|9tw_122W!mLή9rB%ԏ(ئiFb-a!l2b/$&(aBm7ւҁXALx.s?<|bڶC Հz4Z0yz |9E'T-r2Y#Xf a@Ii@TSiC 4 uAEE7WGN(HL<ݥGr@C}Q/t l;`TإQAvOeފۚb ^(Ra4#gHۧ*VeU12D[Y "Fލ;?!|C7KcU"}(|3 IvFrsmCeN֎V$){;tۖ[G \ٻ\Gώluʸ)L,jIctuh΁nnclS*ÕD||nXbq ڐ72b*_>K40nP淐6Nփx0 YO݆ `eHt(05]h^ s1t.CcثMW\h44ns6~*D7ܴ+jMJv`!""LkfчM?Г湘zBcaF?]kOҿ8\c;k>DXli -Ƌd b}!"TB2OaeJϝ s{"֘i/!m+ڻN٦]V M)\oj\*wnJ~4S=gɪN#s;hCX=;JW(۝#W˾}x?M*\& uj٘&l3߁@Qy"' Dz۱>1o}XGl?iҕt~!&a@H;;{c#4ӽQI\G&Xmz<'Ai0~j-ry\J*,B@gjQȖ+emUhfap_kcQ zQ@JsE -%]=):vRJ5#\aƵ<T~S ^_Z !p ;x4\ס/jX8Q40APg8Bǣc:O|-cOvRHV5+N@E? V>,fzem&"h~5PqzYpURiݪ~:(@B8(ӷ5!oF,C}t+8ºkNCqM/m<`hѶ?F &XHՄnW; DcjTfȘ1-ptPTi"u#dm~w|),uc tv*5BӉSzYݞ1uDZe?Vl=yΓ(l|}Zt-6:֓Yv>lbZFLA^6_w6iQۢS.#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#P,~A)c{q袤Hj0:&0#T %~f\&RZT/>!BNR S( x3"ͪ`F @wjЄ {W^MDZ#8!9|0; ,;ZQvD"&#0# 1/QJ݂߅ %mpЮ%CҺdV=R #RxR8XM!e4߀}+Ǩgaa5}EGˎ\8vऴO_JOFA$| ,n]glz z]Yhl7utwePޞRWWA:;q3|BRXxtGQzRBl碌P!(_Dd_S\q {;/4;l=p.!P>o39;ϷGD==iiɽۋEi_x~ᜩP G)}·c<<~B%ɩ?{Ev'|F_]{iEɽʘi ΞԇxEȗFڊ{{{w`~"\?nrj?`[Yw.Aqæ[s~ l9 by>4ཀA>`#ĉ+ߎ4y;7PحiyxIg)rN}doVB4@mXG;%oEn 4UzA4V#tނ³ 'ggx~x}{'Ғ[1ShAW¸q*7 a߄{müϼF瀹#?re^ zӁ4 0aGPqۖMIKhQU¼џj׶y.+,1bHβ" oKwA%WњYIb4`x܀3$ {GEiu&M Aa~1jUݔ;>i0pۚz8"%z<߭:߈h^rҁ0koؕM9j ȍq&B<6B]|R4.Od)RV)LNS(LXЁ wUI-,7>?pApQ6n:1U(K 2) erqg (KYj6WPЄ­:F ]SJGJnN1bGzi{pF@'aEA 3(.J"ؘ途sE/7#А`ܐr;g*ԽXdƀ3R hѓT!ɃKv#~rsYdɼ-PbI0X4r%^JYӪ!)-P|1p̑Axw eDjk# 4ȕyfG7@IDATb?x)J9N;`7|a<n|y^/SΕf~%V~;c BlX`2"ª˔;[b=kL4 Wwr Sf Lۥm*˰Z^w{./a2dFsq[ B͑.Cܬ?JlS1;t 4J](1͛zb3hn!BI,+ȄY7pDѝ-|g i?`Mo*sۓ˹dW'@P.ao*F{?|[BTI̴ԤA5=N7+VW+vNSSvz82'YX̂N?zNp>O}Riua.xW`wD>dtkˮڊv;1T;|ݫ&ReoA)yR|[Ÿ)t*jʝy2W ?>.{.g̘ᚳteW;5FgCƌiTu1[N4 _zng7o}ˆ)F ,Ḡ%R#POp4L`P=VAI2#0 +9#0a`6/Jnq+`u:٠iJ'`jVA  #0#P%/F`FV`\+0s$#0@IXăF`ZAp̑0#0%`\bF`j3G0#DpI<`FARoTtUfpS\ FθR͸m NN/Rvn8rS/՘~ <]^k<>MuSo/qAi\L٬ixh>3Qs4lӼ&/|cfM_GS7&@xt!-;c|7{ٿ"2R#{c="c9_ Y7>Oo?|z9*j[Y(|'U{K]unS >#u‘K3G~٢d}0'!)uc]!pwE.7K}u]XBrF{>[Fx=`3k3b‰"<17E0w a??`KM > R𼄭NHF{-Ò",Kߝ6W][8"@uc***fZ~5aև#~SffOvȷjŁpS4Sv=9{DGaEE7?aT7^pT}^GpL] 5ݭ96pJEBVMr 6@~dt} kQkqivjĪ"[ꆓA.\GTYgkd>hPRJ'Yi7l<!fpOvG³E"Q[o9">9gYc 7i^ K  G8d ixQqёJs;#umI&OENQ"`}RsuAVk0-0l%Í0̾Yj+]wp|/Goc#PԖJlw[.,}eք.vR+PE +ߋ$>9F%Ͷ;V2Tz/뷔7?%Lwv<T4apOL NTBUY&M@|m.ھkܾ;lzoVƗW',|Jh2b@|u "<ߊظ?10uqvg~|_o iB  gP|}-|׋tc˪#Xv'7\}ӀBRN ֩PEM6962w+"<*cC/\:`^z33{:[jDݴS"qG"~f>= :5iiթp5zA;zjh|HU]!zI^LDEF>'!:p?Wo.m!pɃ`lf?E߁\fûܱ(!ޟcFZ,Nq߉`+7\'98KaI#~+hy)X,JX7h=dLUGUrEKN\<$&R\>Lek?,]-*kz-*KkJ6vnRtZ|rϦӒUg/a. S<.fv'/Nu*CO~}F_ bPQģ_$b#@SUԦRrַ73)͒]8j= |>׺Luz'.[Z A^jޝ@t7ơhA"6:Rwxqg;xh4V\AO7?aٗy_A)=Vyeg'>fر{;⼓F9j= )mOp'(e9ߊ\KلsQ'vͶ>O xo/blѷS{v (|):1s'sEx͂l#IPfWO"BN;[;0M4{x΋n|[,FFYl;~!4aPqHu5VѫAV|0vzNa/.h+p`&w0ͷѨ6eű#G,.im'׮Umq8{Vp&hU.c?|+=M8tKkgl~Zj(Tm7 p q%-KN}[.Gtm:ުvUt^| @a٫OG$(C4|4-W^8w>zP "nTKl9.ꍛihsI8tQܮE j8Zڸsf1rX_FXkքF_<g#Q@4\; ijH(Ҋ儫 M+_<$RA@} BY½ph72ge8<5zi$iku/@H'\v fRZԵC+x_.Lj9Tl3S@|KW5{d~z T.)%T(}oھņ [DŬla(iHDK2!sOi0>K ,.i&f=ޅA| j~D1ZlgRI<`ڵ74WL*]{s,~24ЅIF4F%XE;E׊aPur*¸x84K#ynj4<2wZ?zֹ-n[v챦dTLB7 &Лmbb iޖ:4R]_ѹ}ẉbep|y9:'-_)E_,P~c'a)#5an]Yœ^8kq4ėX'%…{ѧhu̅bؓ۱$,y'~ 5YP-_ZeGߋޝm? Tzul~ųhx9c,SDچc W؟A=O:`{riIL8bZa]ןw% DybۃΩ m;#Ź'XOY?3ND.huu^.}gUG4igHPZ&!9;"҇0=Mz$ݺ0 A.pN mpZ_w Tc0mO/ЭcNCj\l5f~kX~nN:hҙt~~ue d}[x^=;6.R~8N>^> qEX3o+Cv}R['\*|Psźo>3Vz_MPɽyaNKVU  &1t<3Fp ڏ0FZ eE(-žNmZ _ r B((3 F`uv,pZ]!""yO-{;T`@!ΠF<z`0  5.F`F `f0.fo 'F! !"!h2nΩfF`;. wǗ.Cdge?bu}qhnGy쌠a@cCG-9@%պh{Xi['Ukźw W{ ZÍ#d0e#ll؅`]Oj\\2tXg:u]v\oa`\>>4z" O.z82tT}8 Lg*߄c0!lToOdy"4-^PĦ/LEji^{2W5W `|T >o+ļWJ\WBD0xKE\lۻHՍsY!]\ί'[ek7k3_69h4Fک8['N؆*1 -useLO T/nj-ȖFAH= \f;Vyb4z"!鉭I~jRh Il6Jl ^gxޛ3Fnϕy¸ӌ@^y78AIlj*l~#Fp0W?..prA\+Z4߄:N)ꅦѹϑЛCN,sXD+'u0i֯s3U%|Rs}״KJjڿQk{(P#Ƹyl zfLDfG8lMvW | ķ_LDxvm>QQj<]b]{.셻VnjJ Avr!#?'5-m:V]V-版A*ľg7>A|թhzbˎ=b,& E-Eڈxju? ֓=%9qCcG7۩N{>{3ר`^A?%ߩO;MN|}Bڳ\/lGÍFЇC!|򗘵o\l!'|5ZP'߮TWc4\8yq=*@xU.s-AeSZ* y"+7O蘂4)Z43Zb ߖ->qMW nCB;&okeP#}(ǫ6n0( i u ҅MСghK{JAl;JeiEmKWeE#@>q5s4Y)j{;Skuw#I$F*`^~XjPn5V$-v >A͚`%i.M2@7e$|:)etU5K׊5XS.1Keh }j2YӟiNxPs9/^lh1:ds\`'IF]y׀b4}뿪Qoyxz < aHJ:LTID#߽ħ?ai.xmk-ޟ8c$6Zs¡!*yXXjp{JaM?/Y {tVAHnۇ2f`t>$6q?Up٬lnaD6?Odv] ⲳ@DJWg"WӖ/ɎΝhݮ3-vo5,8x"+ٴK={kҟ89,d"t9\7]4oy+Pf}Xm榉_gS=20w_b/\5u1P^*Bi6Č?M:Lik?1J^l WA}I0,=E_AK_, Ѫ;߹A.">sP;1rƦ4!Eu:B po6.V;Ƃ++&\mu]JUG.,\R[huHo12VXSGiBXUuve_ Z~A i;c3Ϳ!-? +fB=`Ad!XBFx}=x-< v-|ieDP7UhMͿN?nHwC\C'Fy'JըҙZ7n?4jQp#*/Ny-aZ$B|o[-״_@= ?6m%>5m]]8i5Kf[+iK()};߹V~lL[w |&P7YV72*+ 0:V?>bR &M)6߂.K"vQ@$lH+bBGz !rm\.$wI.|6{;;w~ifˌ%CCy$RCnNCj y(P{ʏ@J }=F1k~bYrظlw^!ۅLD|~P)l7btd#e;dp(!#HSd MP ؅kKq& ڎdVC($+-S}̨EL׭dnUrV^F` GX.4^=Xv6Pt[w7joWk%%nFClMap`cks JMJLhEw.w,{&K@67{Ok} rȑX07ܴ2&يiC-Ov^jQاWMll0zwU灼[{  %=[ !LMA R5ϻyih,aKMrklI>؍}*wײ. kO )"؄kbO`I/=|flԙ/bag@+-Swibig {]R/_ ـ؝Wv#{5lH#{XvHCC/m6ݛ9C]X5V006nwe bytn_i3/a_&׌gS.,c)f==c0 nFq( _Y]; f]l*f<\TLIoS1 4ݏ!kY`fsof}]&˚ةR/n@+ޝxp<:QdҰa,s9`w7^blS&W}N8EEc c!VY;#O =7Z Ū硝q҃,:fp"If rxWi 3Ƕcװya+Yit\Ƌc\ _E WN;Z.dž}m,`j{lH`6-4"W KgK"ڂ(Q,7R)ء2/9Eŧ3[6l"F2#P?5Bر'_H]v07~?u?_篡?\a.,?Z-Wmͬnl٧T @5zU\6a(eG9*#+yEӋɉ bkc=; +ooD|k|6bi#Ysq]0ϸɖVk= vb%VW }sj[/5dOj78Uezf,17ZpcϖJigOƽr7J}s% ׳#GNZ6D Y^S`_%=eh8%W\r!KJА(ja˒Fm24t )RS6UOӞad`dv]"{eeo-Sx8YG3SL/L-.bQ="?2C$M-b;Gv+CKa*Gj2^ *.߳B|׍.~Yؚ/4D5%`u؎2,_d+wC¢aoXl5~{c{CUޣ'ghKV0< NzˀK4լlӱoq<4H?h|pwBnE).aٷf󝫦A"s + N?7Α{0׾]$["`ʺ^{z}lMELw; ?N?\~GNEw6|瞗ť_gqQ]MI:k>p=yK-}?yGU6hfGCHi5yin' ο"~_XAq,pq]YMyΐɬkS_^gEwM~&2L (6*b4wHOiٓgt_׽̒1 m9op{/9ft6fV\2+(, Y 8~zngdE$`pБk)y7@~5|ϣ U}[;ҍ+}}rgM4%_Onhz 0A#ˈPTߴkI##@nռǀ;ى^c BJi.%`#苵bB4(>EWoP(-15}jt;m1@7JJׄք\ƞ߳p#v7vr }dЊHce܇=Փ5t4k$F\(e$L82Y;tdqnf6]6ukp:[@lhB3vRk9C&aƽ0:K0L^mk3 Т:L]puYP~ ^ehX֫Y~6K?FK/t7^^ٴ %Mӿb;Oaza`M~_aÕ7#34wI_"Z;v1$}Zb\EDcnN뙣s۠zz=xrR60_+B[i &N]~监7쿕R4uhH 8\cntڀ=Gk2buKF)x%_;>g}Έ'61`:76K B==7OwBY0>zsJ4&`j2OiIw3 }뗳_zlXa$גq ̱ gYvyyҀFw49XqV8kVǮ1H,1sزZ6nXtqOC[t9"`:He6IKY|~g}1hѣ4ͻd7%nHY@`]>Aw=WMx?|]S.C?}:]y0|$~ƶNiQ~LsѰ [Y3Ǯ#`a:¨ c]7O.ߡlZ<M˒%sܹӲ0pӳMW?dlg:3[ ?OgvZۙ9k))nb?s8)9ͻYL̟GsۿϙNs5B|!V%ߨDֻh|N 's{Xcss-~wSj8~􏶆'tdiW4@ĚԈ κEm MӮ(oks<߽g׆S]-t/[ꪦBHCBՀk8#@jG!|]Bj"?oiƔ?`1d1%1 QUMR[ܺ,g$w(+g̢#1V 'H+bmGjDv ˢ~-FGK-ѺQa]-=!-bbS]$eMN6d}߭=Z{v6HMCB: -+. P{BE4f5KX3*>ӏ~L3_Ac)Xf}#U8GDb-h+7T9Jɝ$ U'DIn& k4Ma~@L] H$)!'ws ο!ELr!.Qt܎140~Ef8B> ?ị$`3! ÃWCdXUƢ(A󜭆)l+dY|u!ՠgԢ3]7iW @Nxw1( [m=К&E/tpa}aKLxZSI),ć.xݤo3]и#RsEAyfS{tllOrw93w40Ar`hÙg4׺Xfh3ۈ*^]|i1M4|K^X%.[i4tg= /|\W a2pLxvmCD'\S"D:[F?)if2 Y,a=$<e^a&a|#E -?%B2lq{qESm,/z6⡞9';H dqH֦FwHnCgB/~TUgW9ҰgXҵ A'b=/R0iDvl\\Vj4!S1eNR80#AG$K`ʇ80ρ}'L}~d 'mSΎIm" 9I^;X&hۣchEVpp^6j`?2p\vvhhu-$[0HʌxAڻ\ euVJU(3|q1fE6H$E9TTL`!s_h(Y G#JKofF]#1Wa/4lz\Ӏ+FiDp,}3|(esWţ%7AQ7't~w/A'8N?'L}dv&w_k>yd$yInZQ JDT)ي>fӅD]xׄQ,ht`n7.^ PQ~nh?Tx+GfŮ^,H{e "cHcATxx{n&x#A4-~1 `a57Hx:J]*|hhPMˤ2e \`m'9#9 ǯiڎot(o5DmoorDaX9CCanPdnlFh(Zf|?xbq8nfRVTdl6.-?&y黥35H-~nc?XCKq ȍ*h*q%?~]eS;cW(AzLH'pmeـP *ٹn߿^Qz^H3׀)!Br`Ϯ_u/Ǡ-lҀ&akLLA߬0eXR1X>[a.DeDKNp8ʊt xjpD?{˦UxNsM4=y4kcFSBVOfXimd_)Pr͙t)ݾl>NAw7QC6rwIV< 0iR9)n#*[ A{Z( ҾAI1pbjE$KfhfC#.Z/K;hDSAԯJ% ?Ns:%~3cz8,ҹU/![:a ,QR|-7mYz4]"ܢȘ$yC:UK~ω',[=q__y`|$ި0x@9+J2J*`9ÿBJ* 2;KK>7i$ 09ꑵ~ۊp0@]. HSϽr$\}ˀdq>H)xe9ߛ^oׅQ"fr ˵AJ+DZ$LQ:q9o,Gzq mXI2Ml~u;LFCOV)\<-m5p6_[c).*99LӖWo;e큫{ʒ|5VpWًecAIZ.-iD5&R^ȸPӀ L>/utϖ]YW0S9jTS*"oFb o DL92&$2|i'0ACKj 2")@IDATni-_1$Lx;1d$y`?%@K志|8 ^M@Dz5 4Ge > `㊂&(0}/n030ox'wZLKxTgByD,RNt'='=*yf3ݧ)޶;cRA]s6vmfN$7)NK/d I AY:G}_0֑k C$#+ 2{ƨuO5 JQUJM12Ay"_ Թ=*~O\9ϿFx Tyq\#!!#>@`?@K?O3[d2R2ƻa*D$+s{/-DL/]NDQ|DކߘKޑUv)t-&JI3L Q|QSo_&Jyu3rPy":< xArPDDD4WP\ E }P^U9OJM{+WĈ|JM \P^1j ģJWD4kP{ I,y %F:*0HGʰ@K?;"rDTJ嚗qzNe|E^ocS O xjlr-#Mk2̿wJGK=B$H"'#@Sm{ݎ/\cs0$LBLH$ T:~>aM#ϐф7y+SKż}ZdCWPA47^)QeDB ҥ=k4_o!|?uHN—p%;2d%1Z DS:r;Og0MDJТi Dٗ^{Y򚢉xLgd ooMwODȮ9g$>kew(a)tk?9$m 5Q\w6:AϚӥXG@.`t) HyxThJ]s#yӾ5SFC3 #̀%`J}*Z2^{)PG}dv&w5lޗeLIxʘ__s#mT ^6 M6tBF;-MŨ0Uh ?BJ>H>0oHU<_Cix/&_@B p@ !jwH#/1` zR4%S+F wkH2) ˜go~?6~񈑘Rs>&\E⣥H -JUUiFZ1*;75@_"\:Bz>1q輤m6Z /9<  23w]#-@|zuƄIy}klP/HN59tY0tٛRw9 SMN\p~&CB6ƞ>u7N3/n UޣJ̐18pA1ޣ?yqd`Eƛ͖ԜAIok&\G>׮ ɼL/D!ILL^;6w)={w񶘘6[<niG p;qڼMkVl+-/ lhtIz(\Jz 1@0"O U>$9 9nA3ʛrAI7[DtP+YȖp|o:3BP@ { 6?}l'\jZYv#(ٙOFdgPt<-۞Ju$׈h"_"d"`C™L霄&}|lϵ_ck?b<9'=Dbw5-"ߛ_zCtn~ŘN2}1>T v׽Nr2O#҃Sӟ VJ$h%'L ֟x9srAO7ؐO 9D?'щ|o&dn*Z_qiד4] dԓ'cCMtXW`/$DEh=0Ec%/ 1j7"PD{j߮z_;CmؖЗ 4V.Vi/{@'wt})jYjSB . '!Mq߾rin"[uig8pqS3Ni5 7u"D@@ \r_ !;q!]ӌx,9q Vסs k%`cAb Phz7="@@ *X}cṟp\ dn`8AQ"Z5Fu9ţ* %dd:Z{gn @F`+; /7|` ! F`.雴I ,Iw]p=~KZMJ8@@ hc|/`{.I$ڡ'~4Bn2hE@ r}1yDfc]<4;vU (@( zU,V9O囆,U(ؖdܖr[U hԤs%=x6 L 'Zp g^ 4%*cx< |]pG87?s@@ hr !B`٥ta\2zat{=qnY,"v@@ 4_~̱?PF:y!$B`VGY1a2fE@P%> !@@ h+.M>WWR2?c3Ŧgzܬp@pX?g'?齮RdR+*!،!D$jG@7c= mH廇OAJOJ!du 3:Ӟ a5"d} /~6C;t@a1㙮JpHl5,8}U UdR hӭ.#L3l  4 \i p`ZQ{8F735<~_['[g 0E.N~%Ypyk߭A7@!' ,yn [p`}#,<~!^zƶ~D J^,R.I6}5sꤜpLHS fDroĦbkQ|u2~Aʩ/~ɯ`J`JaoD콇ILg݌ ]:E%O'c@]PnȨ`EZd F" !$æԜrg"1UblP MB)SɄ2Xw[|c%/9iDQ$*?}zV@x=Ѐ܅DΘu%V$Y8_lVS=:2?rԕIyOm/~'A')`Ӓ&q_VfN qdpJE:aǜ4:V y,Kb>U0F %_YY:ޒmO- cD)(I$܆2?vb+i8y{w v0݊A#`2;뺴~v5 50EK`FD'o"%T^V2Z5;wPRvʲ)CRHv; ,µE xL ߶X?FFFJ]^?" F`ȤsF&쏏e lQ ^A 6^ hgc|9ʇHL|㣹ʻ1ʿsqٶȹ4yKV`̧(#'А0ɏz]xy1ȇLFh})\fOʻ1ŮͤZ$.he#i9]b| ʌEs益3qLȆw4G)h+RC(m%".E,;ӵEXy(Q9O1 -Y ]kЙVE6ho^*z&Vjyѿ{ɺV}Ir`LEOf U`t@,QZqPyOIˠ{s:E*nj1,[:F$MOxB$'l6}༝+?WimgMk 'hSвH0Z@8xsNُb ڱz9h.WMuwzЇQ&`A[ +PS. X}cr簞pӒZ4IrDo' $5p3(ƥOYwjˍx휪c଩iG}sTBøAeg !(5#'e>h"%H;9T4 VC)`e(y41 d$ov6}nIGLM}vxJR6̘=q"m!TWp|L&Y}UU33ʴIxϏvm.܊=mrqtX~12~7s݆y:'`,?2G < )˒iLW`=^,&b0Ĭ7V_%<pSoCT}O=#n'7T@&f}/6I+EyN CY-IK2㉬WDxt⥣4B...rfB%̝Sj `*iqtpsNΆtԘ:O5dee.C"Ϛ+͛2FޅFSo?a-MZGX]IMvƴvi\\O?HicB[|$n5uJOzx0Iֱ ?}M'‡c:0VMH;yXwO1) єZ/{$Ϛ:7d_yta_Iօ.e2eS'AwyB_t%c&IN9uO0NӕXx Ad螹H#{,JǰSYSޥ*6Hk^xtܔigK6q{r3NϽo28Ёlan8T܏E{fg?.+/NQ)˗u/PԨDD([ ^x k^K@YkNVvU+7Qur$k<,â ]Or_p,p];ͣ^g##奴,}3͆]1q zC#[IPN`RdހS5{ﭓLRňI=|eXoRJ}5>Jq:jӴWֺ^Oʲ|8v=BCM!s]U2m5Y3tI~.-KgʮG8/{=Eg`q 52k3]ߙqߣp~SLŻ|?C[AdSեdfvF- u7sq6UcUcVz(T/@fyVQAm+CUks5}zT΅`,Q/O\P~Ch q}~nQ=? ,~q|hSR>՜~ +U傴Jۓ Њ5c{!cxNtR~!g46l٩;TEegDxma{ǡp"=mUƥgS%u쩓7%3"/74H-=3C;/Ǽb;`Ey~?vX>HoNϚ=nh4IjOOPKvWsA+VltV:eۏ22L.=X45PO"<вAjz|#R9ӞdT,zZ}ڀ⃱Zp^1^]adt"dz|ܶ29q[< Fz0bp~QȣS^>!w56@*,cA m{tv,kvQ`/[:ƛ1t _|C^|-&̋wCCuyo7Adރ0O4=uZhqV+TI>X>L\/04;2ZIqoL[C<&04ƲC Mt{BMs6c8MVC S~a67jC@ Y=Tz [6Lj"*γp3ٟ6>q2,+̦uX:,–dyLU>O$k[T;2rN䟵O3}ഝ+Fv:"f.CKKMaNҐ>+FVH\4;U.-q±3 JOFrG>aF~]';ǥt TMd΢^ wW0k EϠ :kbCIA*0K[dfz~SkӐdeN4NۧdewAP-s0J,M4ACP"Z+I[QD6 Cs)gF<:*;}]ρ3*cgҍ{fVIRFKr]~X E`}r.t7v$Zj1Uw?uSVKOh+7|1)k=2 8?x?*1w>6}9U=tV՘h{/Lsy|}"؁JvYw*ƹ uLad=ff}-Ӿ&Bt_; xxϸ:TGn3Seʌߩ.Yh#@[p.NUZjY1r)eB[3UyP=O ,ڼL@ka*E UfuC@ԥ-N5.JLLP=ZO#.3=>{w4]fkw;h\|U5Z:S"w5K+ya#nĂlA+a5o/f̟$|eUGϞ;J5m740t<-;ua#G7(:9кF&g8~܅FhdI/\ցC.;y3k#Y2䇠^1wIkbQΧ0Y7[@-- skuHit@Öi/Q~cl% yz5(a5ccNM=M.;o߼dUdw77=mZ"V+WotR @_\>wߠ㭉.2巰R K<6o>8|m͓> 6"<@:+FuYv4[Ac.&&]6t؀c.:0hd'"΢%M:B$ɎtLUKIۧ|HL~*'.̀O (08ZM_xh]̦ )Y0p@`e]q 0/ߨj҄o1w,I3,:PJ8K˒ފzS?ە2$SVCzݛ4γtZiZvqS\ L8@S"f X1}5Xpb-(є m WtM,s+WB߼֥(gدigPG&WZ޸J@cLyK]jX>`Xz.k&=$ 6K~|C4 "@@`~ea꽦9k[ќ]a2C/0MmD?iW2'Oմn8u,=;.']fL#[;]7w户\~-@ps+v3pk8kLe̾b3l].v^05y")s%& @0uid6x=Mno;Q=8*-mx> C8@s!a5"@!``&u`YaU W̧/Rf]1g #Z@'eӳXzV 4sl^s_"΁O̶uϠ1g{mbt),h)Rȋx򕩰z Wb.][?wMr&bҊM=I96)5?v<ӊhf<~&haS眳D6Am{"@p g^ v5cWrKiw w3B+Z̧`u'F'L2YVr؇u^rqe;-|mZ b>Z_ fXeƏvbÄ|, i.ޑ?BKѼgj %Nz6+sMaVC\ B7Ag|3.}f@LSCS{^4/̦ 1fy/ LO[F//tY9eKaoQb^ʒd:{Ȱ Yx!#:@|V(7]VApZXhZ6{iJjk'5w L&0"c@p֋Ѐw ] IqY-lj*A;"j=,qm$`"ݜb +ub2V`~üKcR-񹽊ϊKD뿌"P[x㉊!wJT/(neh.uuu*}]Սuw &p6]9"Zhq*ԁә8~PAjX+ 2TH\)C,9:_ז/p@הQu2Hw/0! tn=V"9kc.6h@ l9?cm2A7 [݀=~o~}\$ܿ8  6cR0TY{GLvQ,b's帴 lO4%-f/bNq_s-4IIz2o{|LFAf x}p2Ro3&:!Y[Eِ+̧yu+Ɗ>ڈhFAoL.G=+JWJ~>y -`66N3t_KE20O d`$3N)GGHW{Tl4'_eӼ?Ϭ\Knaߜ.^.ŹݟG:sŻپ5o__3N>``qob<:D_G mU`d*Ҝ Ɓ~qOae.m>_wNȓP:N߫L{$~*pD&.g)̄Q5*'㟧TFQ}K2T$ G : \?10 U|V2} /D34R@ Ѐx+%+ˢ{ 뱖B.QX9\VqcZ4td0yJ -oIA:m=-8)s%JK^DQU >)_ܦ" My~hQ1 ]@*ť@ hpyF=_<4_rBKGE|qdjMqvgMM3֖%oO'v?}ߧ_xCJ6yz +#>/?\hLyG;G7$̚:ie 딡ު /}O84N6[S^y86.=c'47j:\XhckW ݝ#4 ]OFhwkѧݕn9? /0c14jZq*H5-?]݃WѦZa@[Հ"?52Wb?Ӿ޹KUɷw_xt<JϳSS= 8nOMoM^=-,4.LKj!^kU~k򠾿!M2/cEKHUm!ǸJOeT(,QAM-.5hh m(ۇbh% %nq+dٔ}l+<'H$@ h0rFC@bfK*H2)7!~01 D̷5z$*]Q?76_@ו:Em5cIz<\U=EdktCBzlV~t澎E(C ]$|<4˭^e'oɜp2/Q?+]M+^k=過!^P9\4up~V&y.0jN Ќ;8ZrE(`HHkv@4 arN4fZ2My^8\k1dg@F-pd>ך$VټXCr`?ƣѧ s{/@X+Qo37KY5-d=w`VF>F]o ;feYp~򮧩zeAZ;՘^ec: !ҭ*;<4,Ɗ>@9>s@$8{6wI5 jS׀\l $_y=!)O{_oe BՇ׵Ѐxj ڸ GϿ;-G]Ƀ_<-NEv/Y|H>a 0P&p'&@S P'!4EfGK\%SA$Ys5oB?g7#oD*`!"n}ㄾ*qgKh}!?0Ƀe>)YK:%sbT-Lq| ʚy.)'aU4 -fOMrX2ύ}>m"c:sB?d0գvs07OL }~NYrKI^$A  #39u o7|,ƠVy. 4ʴGنupm.&k6W/zj;c՘E oUۍS$D#W̊i>CS/H^5&CCatΞ8Hs=0xW]?2Lk>q+F$߶|Do â# JNBJʖLh ?xa^" @#80~%t϶sE8T=SѥJ37vX"_~#/g y̮VKts i/eIMSs5}fSOf76< /x!ڻ2VU6\h wOU?Z OgT50zd Rzr5Q$Vw#":pzqm5 @# ?E c/s(ԯ{ u`Jl%u߉?pNZb@@ , l*hconҤHh֪l{O^Evjp~6b[k^=-Bu|0 ɉwa7MƷS e@ PV^޳8֑`Fl3&ToMOlw"{%?B ps$Y PFu?ISR1QNeS14L8@@ P/ &-!fD V]UxLqQ9m V@ 8"0@ gD35*hUU+polL)}T#@}h<~!=#Mch/<NZUrAL&3NihhsJјڿѥe&gY:;USZiȲ4AєδZSlT..JXIJ& eN!#uJ>EQsƼhݸRdbsTG*`иˎu]PFu|@ PZ V}Qvл>l`ovBRTd)PUt[ewZu<>ͧ2B4/%Jqq68=C޺sPɟvzp'_ VA ]S)%4@#cPo:-#yoඉM|}-8;3㙦ަ1=` Hmm>8Uk@@ :K 33Coyd) B{KMo/YǏO~=߭}i(5d)- )3Ӭ V!*\BP&` I%cB iRVvڒ%׾]$S`dނ-^f7^B !6@pJ&[,uHm#]o{/粂2 aw.rY͛W㖏^@Uҽ0v>_avnRЊ?w;Ț߫LCeo{ɥͣ2OJ1 N H4Y j늇 v`ܵ}YNjƲg]Ho#[ #7@S",pߣ4MDSpԒ"G,RXo{ϲڝ:%]| 03# ӓ:wC0EWBJ\͉@Z""4Ϸ9q&Y݂% 檼lbν%]r-vRcP0Y'% Z/ׄS8@@ м$•Xdy CVq[.+\Ŗ$t;7b "#fswx w Mr!@@ hBiRg,/Ikkh;wnYW-^XX0T.>ZڟoNBɂ01҂͎@k*D̤XfnڨޛϺwjl'w~^N>;TmQd~fwޞj>%ZY}nPy2I2ʴ3Fe_4p@ JLkcq'_&9cf%e֥cqlp* ?< 6l@/*۰cVWbCb\$d`g_’.EvYYc.t*W|iI5^1w%FbB\ʍ붱D-4M&=3,,\-}$Pi ˞g:(ٽBi$^ )O]{oϓ=vP*(*bA%)M:Rog 7Hro{oٻ|ߜ33oǴϫLaKPdmM+ݨ1`L2Avֈ+- xW~j +c3gZ6^~G:==s>Ŷh{ Vov1t9ҿϣN!+aI>'/;2'K~ݙeT]10Ӵwѡ#س]1 Bm{zꐚ=}`XyCP8vw,`|QW@IDAT˟7G,`o-  %fa͏ voO1s`Mݴ ]Kv066ȠziDK)*] [Ca!Mԯ}ltegR~[E@ݗBIwnXQ<JY_KW~[ţ9CWjq%I5k>g @ k 4b?ӿ.ߋH=:YOM^8ޞރui촓;UxX#éypʀ˫q`-&x9ֽ#]tF//iB[Qw(w5Q1D8bo-c#|liLOw]I_+yʿ(MɃ$Jpun1mٙA۹M+$cArM:Ʒ6IsmX^۔罨;⮼"zxޏk!%no&+%Ѷ@h}hG~iن6akCy Q#˕߷>BnOVËJD3/Shq$% %fʚR!eŵXls(m!(iyf ͛<_a[sO97[ͤݥ*5ăȜ߾/f,C{Ƿw?Xݛ|侂 `LF 04ri/}u֩א/ON6y{1sn:_tDr  1BC_8sR3/Iw9 Z^0\sZ_ p{ i){ǤU0{)m r  P>tо%) kr+;wS!^A@!ǸKl{촳ݣ @" ܀`7ԭ o^Mk*28_w78 r  7@'`l4M#MiOAڴ3)܂B*cƣ|\", +EQpե-uoI* !n@{=8?Aݣk`wڰ#PN~!펣/_kA@&KL:L.F%%Zz3HNBy{s-AI`ZzjDzрĞl-'cwsgoоşThniKn}pO^ߌ6U(Twt{Ow.Z!@?BIIð>C[CN G(LZL ViE&3Od}sWB{gN%r1[<`ܕ$0mKO'Գ,oMD7V@Ii:.X~v!u3}_Ԣ ۳ 2̘ػllmiƗ?Ұ҅&*8B*ץdivYaXbד`b 4 ;}"VK,  46p_׎f׍M%g سˇ gb N$(}Ä`"kFs7M 6 vefI @ "'҃\|KJhݖM^F!_S^&~4;mWɉYQCK_7ȗu/[^J;{rbY% 4 ?̖o|Zbʖ%[4Bꐵ:~Z;OS9b}dκ/]E'PC=% ׍]ә|5[)DtjV~<'G* +rS6RFwc4OC?PnoˋrNB ---{ ߆pmL쉽rnZw:Fqm'U+!ǂ Mgpڙ .6X|X +hbu˺sx[_-gK&@.88 !dj`Xͻ ].GͰ2fuݹL%@@072lO_DzderG]|ۺֿu 7N:9NG fy-@tg=]qA@ol~eg&u84\O;WrMЬKm;v,*yk+Gm{KW  А,3f+6fhHt=YyVt]=[{,d @=!lZl8' YO [e\oFq .7s,ާ>yCKA[,3@2ck&@T&!Sm^G%늛\'@}#?/&;IGZA@ '=_Djh{A@@.PVIOHE(FNv=FZw@j~khױ}\ks.dZl3U ^HjH_I<9AXfnqr_ v`nI biGAr)Fn죥N{y(ϊ3J3#h;U>kg.lY 6U͵zx6{j BKu/]z)jޥM^K@1ip[}{/UsD  @B5|ةK+(s;WUf(mf ҿn‚^w: kJU^KԴ)}0I{0K;p}pWEW~/}dn>j(A " \B>W[  xvъ(֔щ6[Vf_Urrg 0#?4]waWX7kNbsV Y K(e]"?A s1znpf,J4Y-Կ¤^3zB7e5iYWSi XA^__Y_e/)N)CbבBb [.^e`Npx*`ZС}26 8FuH^kCh:yKʜ9 eG$~Foq|-XAĉbpꅮBP~ˢ:Fm1Si,e k0;}ƲAp,4__֝/eM=gz鷲n.NU=G״RRVްKgv:Y@ȧc:rvBY]B1kdt3KG_yD@sknٙҒ!&r{*(19ܸǧug=lySg@~Vr.Qjaљ] ˬ b4Y_̬Cp-)3"Z !M&RP:mkq0ofh塮ϖAL4v;m[C]4gbl5Wt֜A""FN(UXsЧxea(oA@M` ~U,dVګu6_N|'L{#;P.ԼĠGVơ-^KѶŖE0{$i4L/xXg`Ek. }~Nq >Ѿ'x jRHXF?@NwcY~Lл435eg=1ձ5bi_)Yҿu}禧%-웒kcUyoP>˸ :Gc"ǁUCc sNʯ xx2ΦWJV О /D(t8%-pQjD,!Եu(AF7uu&!tZVнOzՇ'ژ0E. mҢZRTe˃1&Yׇ{fQaa~[5,CBA5+lRU 0& S0+vҎ讴ej 2,"U/u 1h:ƄS}_ ne?gVMu~/51XXυmtנ Mѩ‰kEʰ{][Φ{b- /.UXpqݙͯL! ,j PL+ " s}MlDžԾy!lz mp+5\ ?߇Cݡ;D˟Ҕlh/Rh0xP%ѕL瑮gw+3g!OEjA?%.GPs#&X&0f[xxPII 9j*W7IPC[Cz'\y {Õ k޵s P,'WXn Z8]#Ϝ`)_֧ssJg<ÛcA;=?7k?}x)7߲3eM.(%_hk*"'֧,TAM;nL;L>jS&ftbI͚ac,/Iya$STLyG}îZ eGNIޭ[B$`5^Tn9Qekto3k1kW:%ͽ/~^߅;4%.(._4ϮvpՆ-@1`B$L&pkqI1IAN'Eb&h|5NN$9t4YآfR+ :,,ظϳAcowϏ9G~j7' ơ XQ`+D4e%OnI%Y2Cy;qj?^8(=~J;wRdmnUvTqx9A*D,[$q:/og>\~ 3z83U1-fY- .t3q*#S5i_m  /}՟{l\&B&]az<ׇ.YCc䛲i}/`]g+#/Mii%gЖs'<9rNNVTu;+KT_< 禥?OW]w\7$HS$~;[54fZ^IVw#+A0㗕?垇2Fxn{i;ӉL6,`/mO?3ϾDN߻rJ׶u'lӕˤk P&SW=]=8z8_jp9y)+~dʟx?CJ{-`~yw8cA>딁/_oGU'G9~3וq4SO6O92|-ט{,¸Nm Qy>c2cƭW O3 M-`x.uj)Z6@ `r `cг۟7/2 \ ֣•z!T&uIB浕x_Wֹqm嬬ogJy4d2*.3o+V.ꇢty]xf'~ˣg~ޙhxWU [At2ߪNܮ\,Pe%ote!~Pse >v] 7 kת?XBZ&Lڋ_3&OKp 7[fen=NKޡ}B"ZDY`^K.gIIQQnQ~okvAln^f+ɖI7isȚ&6>3'Ol_k:ޱ%ƎֺS2r-ӏ{;_<_h:FڷXNvr) tĵ4{Ůctp1úba]zVI(YTi cZQDM-`']y/scO> xhZ&7Ol^f3,d왴y`^Sz$PYtX~^ək/?hg[@8;,%SgZfN?fL/5mRq~G ;ndh}7;"[AʝlMV Xє@Ɨ7OpՐEŷFr @A+D^H3m c !Ƅ-lc=/C"c9WtB>|&a|fZY䍉ٴ|MFTacJmNNNmSgX/LPI>W,w~4}%wПŸ1tvrQΆ6Af8ƏrKSg(aD5Ookb_QV7Kj BsOd!0& I$_>k! Iw'`vceRd=sk^g3ag`'~S7[G.GT)at 0~{`a0<^~ wg¸1ٕ5;"52p9F><6u[)GB-zRG# Tz#FOG`)K?ܴ{Ck63Nl3~J`07~Fxcego>O[x߇9RQgL/ez,2 _HE*%ES(,b.סdMųu?L*q:+{p4JTZ鵘e|E!Hn%E /^fXVnbfʹzc_Wqqo,^،I^SF>|33, `UQq5Xg)$Ӿ;+4(S=wcPhLo6wh{qNGaj^zWʃ'#d.*l'PAܺu4(M$WK/N>7Gc7.6K S{a0g2d˄̛gcqJB"`0͍?fI}[:Hͤbd4ߚk>vKکӕ @#Ə6+yL1^Vch禼Gq9>hk5ḍ5ϻ["\ tlVT}JB(spJ QcV#>0l)٘=704{t* 0|drY zkp*e2u:͊j=g(Gj!OMmظ1C kU2z7JLus0RiEQ1Z(H!)j@'NK YGpܴKu `ʦW͓̼9il`yzAcsP+zS\>:{f-gcotth!\ f%s6lע{ix Rբ>RrSy !`Dd#+4ez?Ϟ=Ҳ5'Gqyv:&O|ËۅG+wh{":U2'*7O% @3#h0gtNt,f&2` %BqT uF`- ?0$:s3m_ I7D_ӊ` H.|9mҜIM" o>`+=W`wnbb!ADPA7q{ZW`R5%Ģ =af{E}ǡxbЦe+^f^%=c)xNz _ $B #w9 S1&1OkP<A`EMI;n:nZ\7$XE _\RXᄅY_WA&F_n-4UNQx[NZ=qviW𖞵zd\koWꂀXuAMG%i) 堪%.?WB hxE9A@(HJh,[JR  p,7FD<4tw2et>&xkڒ !BZ#}}`a`Tr-EԤnR- #3tUy IO%MTd_' ,љ{b.4$M_I; ud(@=5g:۔RH\'{A{QO=9YMMyI4I3w47"6Z;" DHA@h)@k{G2kFܖ8s'{A{EC#ۅfٟÜJC Xy-6d9G]4^ BuAMG`0}P4T;lB<'{Akr /ܠS0|-eSI &%3~ѫ7*T͘4%oU.%_-fZ:#yL7A@e߅C=s6Hכ/1d<=GTTaou] Ν>y5R;FiJ 4[o,7'c|ؔe2d?)Z3aghƌ˽qp(gr3[B` ]v$I̜AƧXm\3Ni6H2X0{;WfgD2?G@IN/Bꭰ~KXpk,ua7zMR|6u/F!,+/d!@TEHJ)3H\ {2`lB01k̓lUfyABB aѾ!K|׳bGKLʐWd(rŜgpz\<O|=E.A 8e~ިTMOgi"\ǜW6GzL{GJ&bA@p9+4ehTW8͚+:Eqm AʎJ1< : lOZuJt=,i݈#穈0Fb枖\/M4C뉋w1l3dzĹ5$G%y/_Nz_SnGmmKO{v_}/BY^s?cJK9,) P6qxSPUt k?tM2`YOnN CXdM|-ɀINdz'5q9pD))s~ov)ݢ=S,2yFD%7Y4|a+q؍d:I="^alB_cVs02LqyŒBG! pa/w%'N;TSպ|ZYҧ0t"N猙8zɻ2-pż `1{m?YuU}K_zedw}r%ҕEōHX7%yBS"q\4BZ[kn{+xݵocڃNhͭs~ɷrz,~E!ZDJ>ktGsht`q,?j M{ Lozc*_֊;u] wv:+묔xԄIh 4~akq[s*eC>d.B+/37ow9`4Eh:KsM}AvHtԍ@r |֘j?bqzݱ%it*7c-ES>wOPr-oB[sg @%> سN7OozAS{,te-_StrTMŖć5Cr 󇐋ע}2o |~6 ~@ (00_SrB,,!ܴw|ă&@h'SLjǛyRZ+f2nBڙĆfsVI/~sҝ鵞'GYgH\jUv;XY=$ZH>b9dߧ(UwMpnY7˻&L]\}'+o sjY*  Bxkkƃ04'e:; q[992bԄNT+J~|[|}ܸH0:>p*~>ͧYmj7ǯ-;4?8>iŒjCPQxitH~}i~$9:92? %ř5g|X\!i9EhWKy8/9yegGw {r9_}w!2H˶K|nvfEn湥}[z6"Aq\J65T ׸`-*1okBQ]345vRbֈxJDX!|F UyPQrA:K75rD%$%2 b E .˳ =AHNί3thFEqyhJ~=^UYȫ)}|=UUik'e>Ei!:_,$*(/UbYEZ# p! E׌EX{Z֨t8ȢY+ k Pp𠭹9Ŝcs]@^72kcu+&W5}%l<:[dWSQտutлNtNR]-6#+T2f@r˟.xHʖj]:iVefF~?el G(}FИEfz">Zw HB^՞ J q.zU.qY%4)8Wuf<y82]a܈ )r['aݿG>NjSU-? S5JZїYb-=`W]P~쇋*$vcЖkjT'`,=($Bpq/GӦp,wzP.[7'N8pJ . lը/nzO#tAW~Gp,2fH2) #{3[{ _ )UY~7 HIԣZǿA,ȃ+ı7\}%B> {/5+CZo4S)FU@֊$@IDATڈ4Q 80C 58-,0fY[AEa .$b`@5Ʀe-RCpqqkSSc&NI¸Y.RAZ4<ᤥ[=)*:xӝEK0$!-h9j. sޮBϾk[-j6[Ncqe./TeR0ՓH)+|>ZSxqɏ%q~O`Bө308nwsrŜjvo>%ݹe$>-Wx8?e/L>;5ĹAfu9;5\dLVKЧ,kiܓH^7М+g>ouOǿ#ˡ|Es|#ܒ ʥ <*oˉbUcP-Ң5(Lל ҋ@۳9Ժ^  Cc).x3&q~ǔV_:x lyqtfgRZN>3WuaȂ#l]V>?{l˲[:QZxQnKG;ujK8Pi'*8ak֎k"/(p4 dzZUNY]u-a-;U:0aQ:d SIM&`X$/N4֢ic#kch hͲcigD-[[,MЁ~ $hYW dkԖlmT>@5A!()e8rNw8. _>e`mX"NA qqOXxJ]?+>r@j0$-:W9y_EhXC'k>侈X_;}ytk):mݣdu +.?Z7}dl hJY? V]ӷӵ޿w售m),$دa Kmjmi M"c}o~c.ujoٮEa#Ewf%)}F[dZvZpU|머Go">]|0 **C !%#"5:|ͬNsYz%Z <:~ui%f[|5I=;O2ҭC\w`|u㋣?$| U?B-!P7^E`r;1cݎg!f<@|R#.PlVM Ɖb-i<jr/a9-r+I{QɢI ;JWyfLQPɩC٩UB"7.6nvn1[1nmc!!a3/hdu,Y>e6ץ w?l6;G'B*}RbIQ~dʤj}抸N|,aYNW >"oKΰh߶o &h~&m,/-_6滏ohgszhT+˝A6J[8z%]P|DbQaogD?184>.rlJ @0}[AdQ{U*V 7ø:R.%!Vn{lbgFGW㈱n y`}?˚ b9Y^)!A~[zl}g(џL;/|U$S;e/4mw5N@=WxU1 ׊濾zB' R@#i@RO^u^_AuܣJ˽T`1?%.ޓy1:ĵk+ј__,?HA7G(҆nrq\}7ȠwH/=v=٬U??d>%_5z6i #{'!ˎ@7  ,3{C?-o߻.?/>-~}C }Lܒ\P%4kɩvy-uy#G~ JFF);ڞv|;Ƙ:zt>VOե8 "5+'|f:Rh,Y/C$nEE_$xGDeĂB {/N֎YA'l4淿(m!jۂ.;TڙVmIr6Ldq&#<_D'w3vnۚNtE/=aw.oXسlMVEc說ЃvۗQ2痐7_4K˽T&׸[)?[;a 8>[Aat`F_iOGЊ[h/0Zx_ *lwjӚcLg0mߍ{x|E-:o7Bs)=2N^:!ЁZFEbߞCR維 [4 +&%EZk`Tc ʛVj,!xihw)#c۲;hu e]xlerzM##ŔWXd({2 ="r óvBsNZ-L)~B,F駜d2lڙalbώ1qsQ.%-A_vɣG9M[/[Pnϕ?PM_7/h|HHDz_"`|8<- G r4}VvO-f5UcZDR;XVZD$>Kds(*"ވYS݃eس3]?@܁m{ e$uKͻs>` s)dKgK*6N6z.' ?lE323Θy,u=}7aF:]%1͛@.rPѩMie<4W:GbسqOFTD\:'X=&= }\Fb4Gk.')ٛZH_ ^'|9DQ js B3i pYFӳqҿy?q @n:>P=iC\ 3,GnW9b.“ڵDxjzx^rT~LWsY\v)X=b1ЂMЇ@/|5}Q#g)E؁PcV[ =5gFJw#AmhVOC5WpV |@$K>]L$1E>u= 5q }zR3В_l|m\{p.O^i٢= aT[볿fp ̃}"g id=,?fE<09rGi?[؇xi/+Vr}u|,̐{~:T2PCS VzM3/~׮5zEWƯi 6=W^./ncgb?2ve' gckߖ+lۺ:Wlj.ϸ&ይeprfذ= Kh &jie_-z~ނKQYixRC- />ߛ-Ѐ2 `~7zYt3?NG-{<47McƳxB/?6Y3ҷ{ſKP{U[xon=Ki_/?A[Y.O4L?+?Lc7+ JyE~ܼcv#O;N#}!)֠SxR]k{M gaʁɗו|KV!_{ 8A3a<"LN1j[Z6Ǹv1-*+d/%KYdU:SCi# 9/wTeSA@C+wp^O5&˹{@SկQ;mH;گ!aɇQAaHI-N[^ϒ3$u#.;7aŷiiA@=;E>ai$k(q{RDB_F" hbйFBnst{u[b+ .:4D @eF_35R^^wely,# |b$ Ф؇g]n,W:) O ë&Ĥ4|1Oj`j"(c49SHe-vz5jB^T2 ob&Ո^\4dkTB'<XړgКsF2DSթ~F'xբ=X=r2B:ƴ;aD#-#$,^h%Y1D{!#g&@HpoeBǁ4'9yȲ;yN MK$f(]Y4^P >@rZh'a'%LC˨cM "qœR[=2y,2bi.@Wac^HTc9:k+J|̱pҊ/uh5WU* 01-"s M~Ҷy_1U\ϥ kf(BG_ ^@ *$Y:z{]מBp($@"mֶ#J+4Dџܡq򽻋t2h=9:JIk|,f(uf9zlw3 ;e{'OrX _T(ݯ;R"e+:&78vFRpBu*a|uZy̤vKV` ׋maj{g96y}wy/hl0,9Nۨuuo!KhZ ҂vq&vԧk{c$[뀏罙IR,׆eE]q+~V誫 :JXVH6l A)T^!'SM03do-o9Gͻ/B۵9W^̜a![ \TT1k%~v2WRM{&!?b ?ɉ18X)&:*"lFQwZ&,7%IIuEUS{h.ߦD EtQѴ3hڒvlםt;}}4?Ӟ cFbBu|pz5eOݺ|F11;Zǯ4~6[:Trb+HfIjíX Ƕ?lEr3sZl POtH1Ȥ6˕F}UW@/RXB:{'o]6;>X\N.80%1G'0eiZ/[w_Ll9ʽd Όz)ӒtgH7yXXma:wu; gܚ/wGEEB ^\t dHtk.O|۲9RsaHd7u>Mm+ ~2~v{/:xFzy7-юŴ9غ Im~=!Y ͹=ɶ2̗ʊ}ofU -)p3̄dG fnUw?{oNשW}dopl1QܿM&H*>|X<3߶.{W8˟цmYs}ƙ8h@}QGG27{NQb)a:rSaAwG1"<:{pȶp1M|c=󲸄Ǚ^Vz"AQ1ν64ޛp} `ĚoiY}7̚o8Ź3}8х1k˗phJw0;GRs[.N)ZO߯XKESru }G|EIY,UGx!ŭj]c)~\L)%ޘ㯑&-..v4hƏΈyEL +c k)F7%ĭ uR!i:@7kC@.*GD=0όg&?M`>xAe BY,0kQgB r6$~ W+OWLNXjԦMΠM5u;Xkg=b=y :a6wErgĴ1tdC2চX<,˞sg&e"obZUR.aSA<sƎd/Y1bm ei ̓b;y^XDB4t4 2ݒΈvijK+FSi5L )D(*2iI{F0`RJKFe eh DEqV5l$PG´1q 8bψ!$s|ڱ}^%~k[d˪lM 8ZT\$al{M`fvEqX]RX*ovC2lG .~Hk GeFm9+"ƜXNFlWٵ 0Cd& ĚDU n}ccF'6-` ڟN5hiqPb{pGh&^()qi{j4FEUE{cLL-w_hM).ZsF<fUEM[#ˀae?upBx`ڥ :"&ZOf6qh)x", W +igb(wCZ/0aInA=b^rR0\n8*,BMot4,7>_07CjG&|ɕӮ(*plZVǪ6X<+1[7RيPN0LCDa0W/oCDw\eпp',Q#b~wʟȚ3JL6+LN&J6 6À9.~7—yGK b@xbs@33 A{0R? L:dEcHguy?SV?8O1'h Hz uI„^j&D |0a͐hc×OJQI>O-| ^39N#hkVnG b ap(ؘTzƵ-}PMPSѸ[vR.uI;nC_7G+Nc)>3=t2@x x; R} e^u[ ˊz47~=̣^^4Vyp ƽ^wt)~*vw|ӯ0ШN7 {Z~}_TU}9JOK//hr4 >~PƈL1aӎORuwQL~*) PtZF@τ2Bj\5N=zoW~;+装wѝW|S+sF=~ 纟qTX~sxl"+ -rǟ@)E{xLkra8VMTZ~Y<*lMk~bZ^SUƾi`V+ֽAg x?f/Z Gw\50 bJ*鷵[HU*)t~G| z֠{i5*!#l'onKȩWe /S鯗̢Mtʱђ\+pi()yЎ)9.b(::ۓ8Z1޴y V<|0TRyY)lX+_u!B/OMʶ?݇Aw_BZ{ F_N m)Zܐ[ڹ ss">[R UB"޵'2yЊ!v[r[tT(nDWZ&gM[@W^@ '?FS%x6)0`U/6jJO:°(&^)3-|{4]RN}%h&vس`b9)qwE{)5rwyK8vueP.XĒd2ǡ=L~LTc_O* tGi5UnZmD,ORؿڇIթLK<6/(z@}rjQgy%ch7-QB}a@KOI;>Z%ecY;9n<:Pxu+( TJI^M Y91qmαpPN}Lt5xA#INZT#/Jg-8&×A{x2]V^AEiA!_lA[ܚYh +c,]KiaӼlˤ>d߷rJں:]|-_vZ`茞\%n(Y/ O1/ g+KB#v6_SzZjpsq%\i_x~<DczPܴcvYHǯ!'?[oGK \R|wP|5!+N3`gق̘Ya@Wes)=**NԹ_Q&gՎh\|g6અ 6X'BEEh_a1 Yom;y7Q]t4'p#%zl( sCH:FGS sv1;ԙ&ny춥2*n\[/a5XL8e;QE?wI;]USKv MehX'qݮ`CK4H/MMo˘фF?Ť1=GzIu)>pVj| :Js̚u#F|05qwfIoWkl{_uP%kȯzGͦ]uk}B cc%A],;puqRWZ1뙵}Sc{'u*]=)Ǔ~L깶?\ԏtV( k|H_ч:CP5ŝ}e(m2o(GF`:}M54 ڜc^-Ѐ;;3kRh63yUSb|/DX|DZ3( VnqbpHkF '8󫼭kK5˞q =cTHS ie=C.~KNݒc0vhh¦퇯7T@ߘʃ6Kb7>QրgitT;6:46@tJcX\6|^n LǂB@1Пb#΂!1^}: ''k7)) L8``6ꖻl!{92cy~vK@saHqL Ӥ9ŋΞ]C/9\6X#MYzR$3m)ͮΝ<5vpROJA#lJDquz ZVgZZ ˌͶNض1 *S |\F0FEF,"R)ucBnW[,1.WIDN`8R |#~kb%4*l3k|SbbVRp0 F pCF(h:pTF,{PQmWL_ߣ &u,=yR;fH8Iv\ݨ;71fѰov>SNK ޳y4i:JM](αc)hX'=B2ӞV<曔x+6%syꖻ1nn}x2?6kx_|kj.Q}C9|nIظ-g$t2L7/0@hf>PRZb|,g1:%EiqzX3]'s'JK,clcS8wLMԯ[0rA5enkĆޕU{MckCM$ϙh :6cڦ왶P?331X%>)ތIN"m0hؼé`lcUzDq␉gςЩ~*te~Qf @7<IDAThб[qpvB & |Sz͎Y0]Gal{B[<3\qhRx,4_H[h܏t8xo;*X[aLtF//J_Y{5cKirStTQe6W c]w p%S 6ݦ:O^j³]yGKU'4_6c YnxWSio#i,|^}ޅ^Lg(ʟ3Xا3ֆ*@},>u˞iC-/k  P`!4#aֵ˜<39<;[CtP'VBcYYsufLJ9p?cmӌa&ư [~ߙQnfL_B};qsmWՀqμ`_/(8n;6(˷Q-wΗlgpv7Yk1Հ;$ݾHJO=Ft@<~ĚW%Dh\0M~h*2KqeEov%3u` 'tLt;Gh1΍W㿭0={C(ʟX8'%B[ ˨*@e4=y קj6XDkƈY^/klf5;!G¦n?6rqY,`<Lv3`B&޳ 1Cp̹u;ݑwm|t{_gx9*kqfǁ1+5:bKp buG6sq3nfLg"E jͲ^Rpķ.h{4zglt*`,;`co¥&:@JIN)qb‰)X~i/˸-,q\Y'cTJo ^[%BQusqx^ٯq=d 8eFD,u2UpURq| 3NST?DZh*wJ[h2fLn 2s~1;Wcup09N'023g~)6r\$1`^PŻcsh¢# ; rgbi/k|o )ٝtvJ%± pmʦ5}k WHQ)g9n JF'ӝڥ ϮEs]ڢ9 9A/ceu7c' .knY+fl6mKϸH( &CG8mUX7ov/k !f~2_1V̕+`H۷}_KFb Ξl1X,pB=띹î }EbLXt1pbkPrxfW2^aj=u;%f̞-x̔άnZ>xFa`g#r+wȴ:"boXe+Ձp?_'hwʚ$s`"Wׂ_[\E;krq~ٵE߼f܅s>ɋ6j~0;37cLX0b [J""[̀] "Nm)!5 <(¦_=tBm7?_&د~cAzN3BnlHNEt(k ]D#_C):_^óy•XWXJP~Zs厴x) Bb/,"Qщpu@!0u@]#śVNn]?}ŏ6[x<1g3 h8c! Vܙ\v*n}]9An9}&պe]989Lmҟm_s  /ƇDz-.ܕ+; Yw0|\Zv9o$z#:bse9_88WKHLK4%λWaoƘD +O` ycXH¾5Y0]cѿE-{G 8_\bbʑZmVZ26Gش~WQ [paʃjkiZ!GfOڅQ59ٗ gX0`7Rk~83L/+>wx'E#dM./Hgxa:K̄yFm;Ym^vhݷ9*; L=qf^wb HVW^Adf圗(8RR &šg1`3ǿ#"c,3Z|Ŏ0S;@N"'<;* PP彮c11Qּll1S=klv6qL0|QN0_ZX0alptA4@Gl5ރi—0ngwsCA\HzmT h9 0@T`Θ ;K<3a ̚ \jXx2D93"B%'i ]tv.Jwqxc?) _jEfro3e=Fyc)^狎t7x <8Tcu^z)|Kx0H Y8IזW{3ns.{FHoULkVL:iJK+H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D ܓgtVH=qu򀑏\i4K$hWGizw(]QJEٚ@ˏtCyKis#8EWq٥*94yO!׼TԓgnlE%Ͼǥ;OrS)e-ƒKJڒp=ΘX+պ\Zr`?n\i{[~ڜ/Oݫhtv|TZ~yׅ`6cjՄ*ޝHӕ1#'=?3g⸀ 3ҝELkiү˙8! 1*HIԨDUR-7OYZ~{+_ Qٓ"9 E7.pKws[+Pc/&]9k,+VW]2: U/vPbS@ xܻHϙxKhcD@m< ˯m!MFe dQ1?X_W\Jٓ7)9䒮h1~p}3q ( u6kj2҅ "?EQ`xeۦp0nGO>R~|gz*t*;ͦ).Q0-ɤ٩*Ew;cr̫[V]̽i=p5]N'C<nayc6dw;szܮڷ co[^Yn 84  [/ Uc⽡7%|m=y.)*Qf_ś̃z =iǵ'i1VURy9ucqadu )T7PG_(~A5٪ ;#.Q@x)*iaZUMa̜,}ʍCF|M˙aSB7boJ r\_AiP2s 0jĈnM\% _[mx-2N8L[|EI} *wk`r+.ey>ޠ> KrXuiuȫ֬U5S=v^ı <_Zb k׾/̜4~Y͇gpMxDQM΀*ӗ]_Ť9ɬEn1śM*-ݯi!x,`s: ]fVN$4-_;w\:Ŧ֭{SX.E6Gsw_:-Yyɢ܅g(ȓJ3gN!dS|9)+TWj3s[*hW0'TY87oln%k&plZp:OlLmr 3Ǻu~\Kq֨ux^Kk&wRgV(&r5DSܤTVh;fM o6/T /&u8h!vriK(tAgnяM䄙<=:wǛc)IPݨc7!}.XuU Vw+G ] -{FX +Εef=LJpX.W$F]Cu57:cGC X߻' \Gx_A=?aoWM=>5Zg#|[wOX 5ҿ_ęޮtU<;ƚs|ђ &e ^0|3vζJw*w> x ge0+`haSPwMg~3 uMIg`1}Erd@-:+gjiDT{ЈuMa^3l'Yݕ47h_CÚp>x_c]>5hg%4BҲ۩{e-nhfF}HGꅠG[s4X z]{QD&:zPd[ߺ{'mu)aW$B¤[5=԰KyfȉSv^PNӄP&]l 7rb+nijA5h95>`z!,A 3.M0W+A+EOsf}F*cM}#̷NU+;kmByH< M.t %X=-^!Ґ׎xHx/WhS)pB09eC=u3}a*L652_OCW>r9V~uި0Y4;4Tx@'0{[b&ẟwoAgyRhP`C DSWŗl~Z?>v1b.pO9 H %{#T0KI{nm`i)`!?xxԼdM=H"xtrN6 \.N/0QkJ42o _Q9!f0YYP6\ӅugʔTxRa~Bwg$F}#́7mFxިP 'eyAH3?#a3ΰfA=WUu}ƈM_A 5(#}|+tp,t♯KﳸGyߋ8V-r$"|_ƽ9`9_s '} UR?e|\uz/LZEMg,3zccBxAy}{}G,U~`_4n,L%h=RZ0Nf 9^;Cx⿯8Tϕs+:8S#F(Oݤ E fg8c 2{fhG/FGhwfQ4ǚIƆ?wuoy kW7ROSݟPsIXy'|ArA[%1y)GU-7B!*/gh $5oYx#&! }ZkLHTqc;SS.*Ч*]M&`߁7!@ʹoXFd݇EkMZ)t-9\=L'HF l mV"w$\C.t̚+7.$SMq;;!ݞ\`c-AO2ٻ7+k0~r({ #ȬOWr?ƅ6i:FCZf2_-a^יYʓs=WQ'Ϙ53] c⨩SD?a[c哚Yԏ1tgOQ^D<aW\sҴ1E1k뷉Of\N j": RS(v0!q5 ]ǏN@D}z{%z#XX}GA?87ZxY/Zo{uJWK'40\5*{Kc 7'vXE ^8^ҿ4|١nݏ՚)QkyD%j0ڝe`bF_07|8v7EOJe:`>_Ӯ%sL*=3&1R}4PO 1:í|`qʕˈʩtLJΝν*ї`:~̚alePˡR䟙;У˳оZ ܩRqO i*8k1zkro<g$śxV8y0QU]-TKR \7p?/߂VMW!ۄZ\CsD,u[sb%6ك݆en` "ºVi`6,5wR1v8`y8zd8}[Ej9I S93_|L<s)pQz*: 0]c̜l`ː&} t9&%>ύ1sfNW^W@s4R69Ř"0Z>u y axȀ 1#yTu TK]"!3U~J4{b:^ Y@X)'7}':p/OtkE[[34n5@FS?ojckeܙA#OkDI4Vx /a ff'o5'y8$ʦw344ƾՍP ݍ#y/sG㙭Nn֥LoCӵ&B8@fվP%H/fefdrGNKPOa/M K:d$0`L`)aH3!hK0)LN22mDm 6q0I Zܪ;<ikPO[9z6X J$#|%X}qB * >3Pzڊʨ% 0`E~+[9Q!)ϥ&5U!$)z񳰰؉'aӋɁLxYEgZ#'|΃rCW)h+\v,k:5nh@Z^גX'D3v3qOhG"Ptfl!ݢzcgwV,|oTTK3475]cq i`Z e3ʚל?6w']ғ~@ص@(v]uB LѦ3Ox#$Qf7\[]x 4h8ӝ$3( #ݯ`.3Pyt8?/`V,`OZis9P|_vtS6ԡot-^UZwc8zOmWjQGᝨ,V> FN|z ᾯmfKY&3ͦݧ>}j*E_Ȧя^XGw5e5M z"nqn,G\GMF,'❸6͢*jԥw a}Υ}hǡ f<2;5eks뮛OJjjh wqXy˥@>sW+`~#a3Asg'H%Ϭ6gTdFʕӧXu[oWm#zAbj2G7lNMFhSR-n>]ziB508e<<ӽ΍7A zMh*hqqx<v w8p.> d3_þ&r Ybc({76\(!C +OsRG.+­VtYFS~5 WSi]QX#]CW6Y%ZǠDr5][ƹÊejgzul,A6tBw;gɲN5 qX!k=w꾲oWo>ml'3_gƓY_A2|hЂlh|뚊[,MW6 [ƆswfE:5S,֊gLL|% ♯#'ށ~W(;}@0`SE6H{{UEԸ Xv]h~̷fVG˙ J Ш84@߰uA/Vtw#x町[lEGەi.-k2h0CEW5 ϟj1]rBb*_?>t@0bG=}g0zi2 G %Iw8"^Ι m`)l:EOQ{5t?ih>45:P@QvMb_vj;Cw|z[8MѺf0F=9c&u֤cτTo@O*-YSJϴ&Sy*+WC08W5)Ժ2*w8~`ToܫV "jmsZ^\vnD9] &q"ͨ؅:?g Tf>q_&[scַ)d2ay[0rs~΄OW} '#v0:+1]0g~cTu|#*h$D'r`\nN~[rvW{Z%0OMؒ}e\҄G 9pF+w&E R|X<`wCqt㽳 usplZxu"PÈ!ѬYzl9q{kp>6Lrw+<>u]M S%>Xtmѕp鏝Nwl*FD:0 (QWo(} 1!̌unjb&)p{g.g?|*4KE耷[?Z<}kؔ⻼#>paNW}q8:(fNW fahG)00e{Wԥ*/:]gfma,d Hv`0>zm7nގ6yx溎GVr.z0Ӫ!8j ذX78,$; 1Qk`ϵϰO,={$1aZ#aZԇQc$,{CؚS޴ɀ[ ]MUMo&Ϫݕ7*NCrY[p2hfB`k.oMѦb>+m7 ӗL+Si6fDErUNPCH?q&ia`'5*lS5s?T3bӎ?a\;̎#+ܸꌼ,tju<9lVKE ?T峍3#0Wa31qq;q{='2aB1OeL~)~ r _5Ob4,/Ԙ@ ~4JfRM0Vr.A53w+m5 tq*8LC޾Υ59AH f^׆U I[8EUշ- }^r y@ݕ%W4f*^&]ҕq @܎Y/e]hKM/zhHυ"ӓ$\WHe= fq |nIcɓO6IEMIگ!u~a8|[oxWH־,">yxH\Sc&N-Aݡ倫%զhrUO|"f`Lel E<~$%H"l*sD} D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" h?|IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowew1.svg0000644000175000017500000007240200000000000027573 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:02:59 +0000Canvas 1Layer 1Linux Bridge - Provider NetworksNetwork Traffic Flow - East/West Scenario 1Provider network 1VLAN 101, 203.0.113.0/24Compute Node 1Instance 1Linux Bridgebrqveth(1)(3)(2)(4)VLAN 101Compute Node 2Instance 2Linux Bridgebrqveth(12)(10)(11)(9)VLAN 101Physical Network InfrastructureSwitch(8)(7)(6)(5)Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowew2.graffle0000644000175000017500000001204000000000000030373 0ustar00coreycorey00000000000000]kSH [ l&[2̐LQ%lɑT{Zm`C 6Rw<>}__B狟8 vǏ:q7.~zm_?w{'tzaO/^9Owvv{?w^8˷O'i{s}}zۉLa$vmCve+݁ݠ>'N|o~:_혣p2ROg;*p䇼q%/I<ѳ~-?U\$羃o$yg;"Xh >U*lgtޅAv7Ku>KF]\2茋 }X̔΍_hA߷8I΅DI8r"RZ!$&ʽBl8C*'ϋB(9$4*ΖNVѱ݁[%޾G_0YA1MF s)g5ppEh7J41.^˾f{_ AmcSJ2Ow8㥩/pV:9rUUpTbB$m 0*jW(Dt-G^ԯΐ(F&jj,rkWdc}ᴡ$kCRۮrڮrMj}j9Shk$=ǧ0-Ww4HN;q')XGp3uz3,= OygK~$w(1ry]Er|N(p%scOyI2eG^0=~uOs9'hQ_8^ɧSS{>'Ԅ+q묈J0L*J ǚ2"0: bBPXdbk[?FP*9+WPj^tԿ% )R7 ø*0GhK񤵸dX R\WjV lr/N"?9z -[[-]9d5G\920Y"eE@I@ym!pVD nD^:HsgP&}8}`D+Cf3]64c!؂ WS8++WilQƕH ?^JE)5Ɯa2j-dԜ$c$S)K$cI$9d|-9o2ǀBJ'`!5p}\BS- w\ j$R"#i<˙9 %rfcc*8t! [wiq&/L?W,ݱςc )"zfH<\ P%LRJ4g /{|/luUm1Ƃ/y5:[BHO蹽3BcИeu,wSցgJL7L9HO hM0D+A3JJ".JXһǔ|h;\LTzNG=BR T||,Yֳ7>&S1u_(}{ݹUuMz7f8M2 b&msW#AA0“κ@JA!f`rҊ0nG=wJz/b:&Iet M:/ηJs/gqNݚ#ucЭlvdV + lAI^(`` b0`P]e\aILaYQpWF '.(.z2]Fk|]Jv/yG^<$)dV.f>r6`RZbfm>bS$\km& [l,GxZ9Ya,Yf[fK%F`I!],851Qv{Il6% $6aSl]ն:hl]=."-K-Aޙ[AQY= uc4hExKkbˎ{pVdz kӲi!r .zxQԓ"ɜAQJ݌[7ԮGz?\d69ãu37+Jas3Q'0ѳmn bph=S-91{j].@BM%"ELX !D1*Ya^;_umĢ"K!čkaFd3y/sd Ų3V\$3V\Դ:- *:|ecn+@΋$^wݵ>fyΒ7;C_e8Vx.PLjl̒փ|h暅l(ormyM˛k~E!?l^.; ]_vQHVjU.{ͺcl,4Z \Ti!6] QBaƽm9"sq<˚Fp3͈3i& =^\Jcb+fcd,8P5AUvbCB?u0w⇮QWn*| JE0~ cꚬD`gْݒ1[r)bK\?ؤL?h5Ӭ.._w&ɒP+ejaGXz(pAxk Fx=bV[WJa 3.l==~ou*3J$6lqf37/[,,HW՚wab;kYbĶ&)XWcQ]^/eqeSڰ[Lm,7Wٲ2J\aWx7ۭ&vɆknƣB"rQ\$BuF%ʶrP\f2W k7wIc☚a fx.(O.jhV2,GLA aZ*֣-V{aL6B>kYQNZurI 2> i1훪F)wTh(&ZabS>PsjSWԕ6)7)s7)$oeyM˛Q P\-sbw\C0I:VO|y)inp ]h؟?r$yq @/~qǏ$;z}fji-TrᰖRj0d(Mܬֲ$XJ-RXz׋{Uo۸"$.k2*q@WnLwUf3_V'u0뾩UoWeґ3/>` |[TC 8p, wᏅ}|WPp,z1NV'*Iqqc;͍DyɷU%]Vqtl76;iCoA:yF TSDNt7AӚ8Ӝe}x4ylʇY7_Fͣt~~tQ aŋjst^0SA?M }(9KKśzG6FH8X=E)?55T7^^oz0>oԟ%zefJÔqM/9}Pֈdy{"Nꍗ@Pw^Zw"\@V~WqG^k^mЋ_?bEW.>~y{=ab{<{!܅{/ ߢ_2? ~0n3<UGR~Q3ڮ>K$SO6rîq!$(o肙G 7E:6a&oS$*g]^]~^ȜuhuG m"}%q*y EndpA*el(< k|ݿ/4H YΏAdT$ D6MEeE l͇<_5ACoĻn! 5d#^ؖfmB-cG_u7ƅ>x`[ѠeT|{! 5 2 1 2@IDATx]|T֟JKQDł>(콓PXd+I(!%޾'(!uo&wYv&$g{O9s4!0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@B@6rfcaUѬb玒& =O*vS<B ĞC=9☞1AD:c|"vu lgԆP"Oʽ0io(~7Z&S:RRֱŶtݾx&v)UˆHJ^W՟~[\NX돹CU L13bob0>,S7́K;< ?֪ҶͥgfnoStCUT jF=*vW+ؑՁvϳƗnJ~cjx%ES0nc"-3g~|QtivMrl"SHo7oso&'~y]^e1 _yHu罹~n O_Ϡ`؞)JM6pG(TH17#hlRΑRF?! BT1tH*o}0MOH9qNM>̗̅Rx` yy(ۄ!Z5iܬ u.4z &C|yy0:u]L΄T76o55@gMc09D;46Avxp  xUzNl#pC""&&O9[%"F7DPJ%̌y)a̙PP8w}e^"DQm{ g$mtS]7]h M:~|KzTGZքci:ƪUߍ$YPj[y:tp1 $?=sנ׼;-+0d*JҐ(,9nC7Kpc}N%S=cH{p0Ź9}xfN74>D$yQUYa~zr3tpn´ ũ\}vdCpn7؝vFPozJ^Nd{#9$ lΎۨ~[F`k+4τBUsHM^-qP02$gSܿ۲K;RZ.XI -T\{Ƭד0NHc=AR±:n̤?;:J;epfj\35?1=!7Meo:4 r]~|r &ȍA-{G[1┏f25 |P~B{24k|?G]%vW(L\P r~K:V݊OQ]7j!ٙoZny0_D'B>=Ӑ_[ +L8u c@r77>\w~p]" ڣJRȟPCّ9㑺忱?q7vLptGҸ!oѷF*ۨh|:m_ф ڥý o(TeWn/#̱RyQaHxJGb{z[}`>_9uK$ȋ^'a:im4kPPOtmOAZN'E'{A, RGLIvKp4vep.ԺgNw#Ovt0D9B:8h(AkJ̗ps `.`cҬi<ͻ`F75CS PU x@iTegHW3a0}?h<>jԾYnLz:-漖B7Yv#mwzKѤ@+ U?dBK̾zβ'Vo®Vֳgy?ӳr&AO;z_e/5!QVv[)ڙ/j6\)ߥw] 3ݓ0:֪y^@ǃnN0A~~$ݥ;\Yz_E*|>0LU =mg6%{ @d/?vwUsnB-ΠN ޶3_cNF/ek+S1/PUMn#1w&;oη1_\h';-q03z=:y,3nV\ (yXx{{"IR Ի,K~Htj{{$u=ق 5aD.zlu^ DB_ўv/hWCcA k8MMzVU,?'vJ|#]mx-ctѕʆ *4/f5(_p =o%@aBT4O1P_&^:mGe?F*1RCmQՐX<=' gR|1PȄg#8ALߕ]]bigȖ~n7/ٛP~G/4 m;i(H A4wl owrJ*UE ,03fWs*,w4]-tΐ>syD'ߵK^_b`oY~GQ;Or oЬqg82$(3|匽џ`h¸r'4SW>\ӺQ):1,ҍ?`0vI'WĴporVA%ոBRoN1UwK $|^T2ԄVѽP]?6P@ʾ9&}UP󺰷 GG⧃;ې7 u`X_@HO%)WgHM;ꚡ C0r;w(Z{;i^4Ӳ3 Fu3** YHDŽcT7: Vbڍ"Z|уpk-P7BhВib+5O?.op4Y`h}hO V"AZԑѨ{ϡpOyjHZר :hx ?Gv%%rH,T t.)Ęi4i;:9F(*45e{<0v.3(q%)N|ʛcі/Tu%?EQ=YoKQb&\Zz3(kϣ74PU&%HSn37]j` rd'gfKt~&Ei~qAbYډ5bzSrȓK:/3Hy/Ib5LUu/d5 m48kc~؟Z&}7 OF{\%mJY]&'ri&ڰ7#Ǣ9 =ڵCro="I(,ƥмpĦ\^NT U}+L[T,Dy=ID Ʈ?ԇoՌ.$ u?3IQ RGROٝLq!ծ/q/L(J@'mPM֒VǸBJ ,Y>4EM+xPOjx_!j~1?og&Gyʗ` sj{M&f"XBx9eg,I]&Ĥq~͐OCS BHqxa\LF'2=kܩ,Q]y:3r2t݅!N'7 }vAfTMb{@xr#\lCz J@zf~K9?Hq5Mqi/!B5d^S\Cm8ߺ0u`fsOĉxlZ8`a>`%F Kba9=zZph& `?RBuJ'Ƨk0(}ؽ ]+T_.垟=0yq2}Sx2RdO!I# ΁ jy={R#z7ڴ`ñ7) $sJȯCM1'C{nb!/ҌNAӑ΀:w/2 mՠlV \y,%ȞtR {?sII5<Ї^JJx_ohC4o%ZY7=vaZ>HM0g"3Nv5jڿ(,LgMd.E9l l+ZI,;_C_EZ7}& ͿN\FQr'I.Wz6AeXS0Z'wXO_M˘ʸR% ؆g w4{Ǎk]Vj0[ cHxBҸq61˴r,?lH- јw^|J['IHs&k6z^O90%qꂍYðԫ4KFRCQ&pg3nbgtV1 /Vwkdo0Ό69M< G~UЉ5ȳ 4gO`9CUoS>lGHk) 'BC`CrǃfLܺnP?S>:/EE B_F@'|oC:8,(s#]iIݰI}1^`{ߑz# c 'h!=/vHvQgCfCd$`Txr`?0 Y"-`tJFu7ZLƧ1JG+owWxQQ;!8$/=uqg ,N@~'b)\EqUOXe}=κ5 H1 tEtu87B:D?%m,L}/b4E:od>T4%#Y(͆bGZ '|p > &lGV8,RϏ%# NIy).T/q剄1@k/fu8 j(t,OgGlԯ*p~)]E`\t- Ň4Y= dÉ  IySҞwإ"'"ޤ ʑM]"@ Ppse]+i 鬫JJ'1fUUnHhh'Ƙ)eT]h蘻s?g(u5uzIpn=w-n ䷡Qڤ&pwQ-gԝIblC/p駣' xVڇ FaZ&+LGGTAf10yB}gԆ@|Zw?^#0@QALO@=EӉ$R,XY}|yl2F`fu7fTm %=8f#j4*j/eawwF`bVAr0m"-7(ֿ6B͌2s·+n3crĘ1#@`@ )I{7 =ji 8t y0!? v}bSƍ0#0#0#0#0#0#0#0#0#0#0#0#0#0 ܠ/4q nQ^|>uNڨpq{swh]W\DqHD;)ƺ ({ mCԆG:[D+>zzvh/N:WyAB-\}@ P~9ft&5"}␗u}c(UN\QV\XǪO̹NCnb>EFl6+=8>g.~Mn'BgᦣEJ5MI|k 5M|Hָ4]6`x)vj4ۡ 'F-zRԢR7zk_QΛ^/GCߤUPfLpҤc[n< Ԝux&Gq.{wt`W1VY N[1VϛpWAE8%.': E.ǁC ]0Bo>)jp]RQ ]oݢM͏Viyy.csq0+KIM gu[q=Ϡǟ0Tmax| BoB;6mpM_ jk &Ȁ.ܷ$v_ mHvV|OӤ?tt_}$}lTs@vP }}]T~Q1Mb'3ƺ̽6 ބK]XU׈3pA3&?@hCG,Ճu>6:-gJd~Q1ɮ;:&楧{+"ص{.a-ZTHVV7d.iY/?;0O% ﶉH=hKRqO0GCكuCKzc3ᝉ9ćpOgV:ɹJhE3_K375,YH8HSčOZfmp;}Kk.#Jɏ U qtϘ~Wvx7zso^7 C̓AJZko`͢uh[F$f{/AA-|Ьypɛ{2[~_Dޏ]~+tܛgu~e O{r?^ [fdj~p]xmWٝv$+cJd&Ee*KsG/ qR#&pEs#F.M*.Q{#H{ҘΡ6Ju4Ȣ0ۭь|-eb%&̛A$ iOUj16ބɏ@CHRu˴GrRXM:V2HYmRy?PJih&s$N8s4hPPEJ*g;P6H긧@/@|ݹaU_3fΜ@Ŋ!J/QKV8 ]hOXP;h1;۲ g1hyNG{Ok-P6߇ }pZy4ߙ7}߿vS'UBZq"`W#t6]>EJy,?,4J/F*2%1szrTc~whx,a3xK;Փi͟0؝`",\#q¯/X䓉; G{v LiV! 9a.1Gww$CȷfocUko`P'Hnܺ#ourqsm{7$K03@ ]U3r'kJmzNWo ?/b掖4% گ0j?2q&:BBW`^Qd'{TwJa(;uGyjxK i-2vXm~ef3;`ۂ) ^-V0 cRr#瀡~^43Pի!|Oz0b„yׁn_giڥ/ Sz1;5?s9:U3ԏ*ҶGQ:[V+tVL?Dgޔ-?3r ;)XaQW̿D9} ?1r/"pcVn bhRKh):6і!aq#uIįgh%+^JLߐ4?0WR`,v)i]V`(! czdA&z5W/y*۾t9ڞ|b$*ԯQۘiX}KerAE(Pou!cP/)}.'7fTcl;/7T h]3EEMd T=o{ uLAiv/wv';=р n:#$3ߤ)Yx=zL55ԏؐ )\;[F'$+&gdl#fu%\Ҽ +Oĺ_ ٧?t/BzȄ.#<+4V9KOlR7܄v;#.s7<Uy :/4nbe,;zΧKeTTiP@WćQ}qy]jm9_SZ'd( T!Mj2EAlАQY~|~ 5j# |-IB]ǥ2UȞTlfg%2^V_ôüR%ifʣÂn@|BC0zPZJ|w)QSbAxo\t\0 %vP1lTyU\ R^zyG|W&奴 A@("ji'Q X"O#騏/ "I@"m- ѭ׋}0zqjd}:d>u\:[ƩGQpe$h B G~v~[h ԯ.OgWs`gW32J/*E\Jβ3P9+@PYaIKT3f~ &88DxzNT9L=q).t}D K܏ب!"n˒ :Ls'ӳ&|=zIAk<ʻq4sW[P'!dq*e_@; rQfՒ1\#K S,;(^ qNq|e|ŁTwf o0 z)r^L_ژW  `l\wϠ6Aa"CM+f1`?P }x+xm5J' 6/ *vjYڞ +952 5p|8鐣ⲹж-m/<"4ߦd($49S*>` 9ݪoj2M;\MILŚ]C`Ӫ 6y)xF͖i7gW ;K'g*gjywUGH?Vr=dX J5SX)hS.oF:=hgJÜ]VGSQǗM"4`}zN؋jo8-Kb2Yhx ڂ-S2.p|b &0ߋi@~]T;† gan~̻Zt̕+{o/2q4K Q VH s=|ʝXi4k5 m΍ϐˠ~hF+qeO[_T RۚӥV1_5-UIV>vA>kۯNԍoq~\QJlbPy"Td+ C]7JW 5H_s>C*.ZȱrGFsE+8M搎?oX48/5䟪Q;5تq+PA>nܱI5h+5\ϛq]NBq!$с%#5 }^g8BN+wS#G[PYC"Gny@aGz$0x렟^Hqwr `t'b7P("V zkx{ wy<{bA?//ҜOij)V:gWGBN8w)>BFɊ9G~Zv7A&Bq5z^Ā4h3ix/[LTPy85M&THՄnw :J?E4jU>n\k,'U=h.4DChbAC+> X}E\ڪ^fµ6Eggu2C޽m*Uy ?SGJTl~>CLC%PmF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`FD@Ct|͐F'$Rr#0@(B쓆c1?.?ƀ= ҸWBh0#D0f`PsGI4Ӳ> @&J)g|6QgiY9xBJc9F#rM40#69gBLF:GKE<~:i:Y95B+\.:s\#0@]!7sםxuJH#OpΞ]&0'𽼜vJ #0#аܷԮk(Zh%Ђ+3vf@`FLZV7ELZT0v6\a7ߓ^1U#0#Coüx' EV#f(G0##^gn3`:d||# p#0@"-36hH:jɳR`~_IgBK2%qqTM_P/tHCw{z\Bʗ}"rgJ3w|D,{zʽ]ӍgvEݧ}ݍL '&=TKRqOܫ3.3BLKOƝ%u= iLHwSč37Vm`o•3ug晸/l;9dTu \:qϠckҕ]KEv@#*/ &OCXkvvȿݶ@b"o]FG ½ Ds0Kᠳ^?3PwCx>{ܬ-5C.Lؽ$0_.WtZBC8]-;-ѷK2*"/PL'rOs!z@ەmw:!_ِ #Ј` nc[ iu %Oh}/_HFO7ÙZ`SثiB8tgmMXyW gJHJc-o~(m"_񀙵·,o=3z=$ C,I?(N( LkJ1VWˮ0o2O>qeeo4ABTq'#+0`:#t^S 4ax 1ѴbyPMYj my[ef <+u ;HɈ,zߕBMatOc1wW@dz8# +0UR.E!s܅>MȿhL0nLHrvt'n,@z"?=`F`ϻ4~j4$iC@IDAT!%ۣdG?Eƙdμiz4TI$A BFs2_%,kZ>b(!; TLd |47?xv;ta7|y ,*c4=Xnze sj{VN=fЄ_,RZVƪK xtŭuo]SqBNe9$kcN/j,ߴFs ]^I)&Sq(kONKf7%q<#wf\{ 6~£!B.1Di`p2=q+@]y X`>t8`*'5Kݺn\-0ڟ,_a lw8giFj+Zdx[TP ʋtL,#` !ZԤ&t $Eo: Kv3 4t9Cb|nt}7u%+hC(2NZM֧L^;%Q o̞Ҽ_~%Oß&#?Dn3}݅ޢO{y" vnd?GηD 4@RCeC~pȫ3_{<ׄO1s?Է; ]X՗!' R7F"wd{0@O6@] @Q86<谱H͛{Y}zm UvmІ6$OJoе8eCUNRԝm8'sS z[t>sLǜ+Dho<>n\k,͋~',v9PdBeEab{DELL"D11`\lX-'3@ -*)P&`Fi! i7`F FU1RLFE㏆`f:٨F! # 9F`F2̀+_#0@ N`DF`01#0u3:aF`*# 20# ̀fN`F3x#0#P'4؃8fpo膫:Z:A,q!7ܻ:-{ .Co,M@Hٵ(ie |m_u1)@x4Tŀ=pyETu)I zI2jr|׫[d+*QG.ߤKM~꩑t*i1L؛qYH))YH\j?%䪮 ;!P4۬m|lKGy]=w0Cm!G0Ry;)?ezֆ0+۵h6ʅ'k)\3~ X|qGcGt nڞTX(ۯUxl:督{+v~a>п/.ߙ͗cdb]m+˙xģ+vbp@g8N:oӢ61ꙙX@45|./+mƖ5G Ang/ UN/W7? ldwlly|)\(*s`CQ#_ ˽&WXX=pgɿw,ѦE3ϿG$N>ЀHpd7q79ѩMKMZ 7ѫG+$D€[|û, qbG3O8¤yg [wCڷAï8Xd56"E\/Gt`ѹ.2&ޙHj=D'!t)ly:'rAX$>蔣q!| p,*$KFEp_;&m, ȷfoJ7_j͒ŵ4!-S|1vbO+ͺԔ$O)ɴijENmV6l50g=Rf;խwJPLiOJi}PuR(_$~}(*-IhhD[/IqSҦUѧRlM0l9 "ɪwD2W@,\Lֱz͎Y Dw{y֩)x"E#/;]ƤUdρ' ꄼ&>Ӿ4_(XECڛy헞).=xbۮ}{sO>ZhUCx2pg,;=V'ch[X"~{ MgE\6sO>=R v+'qq=2_ @*X9fC4FA QzŎ=1+_;{ㅧ;aS?N<|EKN3 )g!`og?X-.WءX,]mv}zbsj{ɽR.bd+ |I ,C\n۔ڷN&U:qڽ{6X6ʠ V/"?IO)nѮU kg8f|%)TZ6p5ϋ̗,]{ n:_ٽ#hG XWU{͒V~&-"S"!>6io4":$i\p2W o`t>vP *nTSl:]4uTBb@iSڡUpƵX~3̶BrXsUkگ])Bg#i e8^ ij)Ҋ+ E >. wLY½;pBtڳںEN:)=4MŴHI6~| #bof5ڈ;/\#^dhm'ںMŬ{o<WSf_*( ۥC~ܡ(=ߑHAձEb>;lK Ak Js\nԶ`{dWs C!- ,ƅvtևEWLh.beN@~.>Τ8I1~,rړ] XͷQ% RbE%/uCTv6AsŤڹȴo ?;0Oئ9.T?Ar0/b,2}*_X FϨD }ш38bHT᩾U{goUlڶۜYf/ UīNt"B#n$܃ŊҼ- RIR]ߢkusOIN0|<ۓVY KJ@,Pzm'c .Ybog1Np /$:E\5 V}+_xx4x\}hk? )$!FHgHGcO.M4KJg;B̚ _-&9Qo1 Њj2m쓎?߯7f/`x8]RU_yk+u~bjBeYSoFXi5J̄߫ߖp! fhgDjMoyoD3a;ӫS^tS&{Zĸ [hR+ҮRhn?Ujn430@ ]W).{~ qq:B6#faFt=a?0+qzF{saî=G~|/5[qeg?@\Ɇ`Fp+#H/?-YQ4tf3RqLj3:Ԏ #T3a7FpBW0K74?L/mɈY3#ƄmF@g\0ŏpk& U],>qD=#T #E`=ךn")Ժ04n?nLf^:Bw \#3`?@`F@Z`J6fJ`| ~ah!ЦEs1V#А`ܐJieZN@e2ApΩ2#4q7 gFxNe]Oz}]_W]D}MKik jtQ{Q[(̋8ZUd"59IJM6/ֱPlFZ%VXqX@S(v/eeE_:HʟN**)%^Q\Z&❢eJhVqUCVsۯih 5W&7qyczb$F̦:|!ISb5M_*>[Oj(OѪ J.sx1LE] _O,Ha*c>sۯ)@bWU5H>e~\Ĺ> BG>KN^bJJ/-w-Nq5D^ !`Vhx˟P[QISFcMmR~mZqqC*pAE}ػ,G%b`'BH+l#^uھyH;ѺENm{ܹT쩎h2 j^*_ ;_셿u0!ߡ}رJHä✓z UPޛU|B̷>˞rU_O8+,K)EUd1%&m"Ⰰh?Ĵ~;w] >zjfsHRHEý֑PgCV/]&|$h2bܹUh2 TOKJKUgߗ3߶mު'AߤauohۢѺLDt]O̷˞rVWOW._XOz&sY:Fp Z~, O,'Z4_dNm{Ż(.:B[Ŝk=sm6mvH'|//ڙ$X1DwK[ΜaۉfɉBXC¥@ V9ʞR[O+oBcwpќn*0M`p{Q4H.U[aAbs]l3E=L Oaø-"5u(,:NlyxK<^ m+Eng@3`R?Jg-۟M5|C؉V->%yx ~cſ=XpEsT}Rf}s +Z/) N#9iIvLɼ!va+?U. PG4%#yB] 3vW!D}SZWU*n<2VfZDqʿhⵏ̗aERou|+ޗbyBXUu6+c;i퇃+5 U h8<`I]v:D[!$ tnv8Ui/k,=C__ s+kDԹP7Z86 UOS``Vs1PۂUԆf3U|5fΈOt´Hƪ!ڈnFS>neODZĤ6l)>)H>`J! $"J_(*wn~ F̀N;{ RdŬ/Ktv:ݭ:Oc|")+-6l li+®tӁ)牿뱕|@l;]=X΀Kq~m!~cO*ѸdD?m!aS3;pO!Nqª߆`jRuUDfGCoU46Kt3i܉sۯ avO-RCQ^/Cb2l'6!`ee*>:zGEkRW؀ ĬIQq8˚D+ M??[$q\2iԀ[G3gBF0дOuXE6Qz`*Ao?p >A~?b&h^uwJVUO 8S=1899k|WRSi+xm!MsFlL;((*7-X} a&?_槜c1RXvi zKHW0Ư;ܛ=8vZm:qK* SgJ˖NG'fG88==;04WzKϠOC^!ز3T\{O;ߧ"ҥU;ءҲ /aWյESIot^m*tF}|Y,q"Zi.Inaw@8/dS"=zҬk($ndk=TU/9#G~C߬ɧD;祦}OAQ^T+L>}HMϖ2jq C~yV}緿b~91!7i0϶r `5 O)%nOWCUSJw;0[QIcO L$%1O ӕHKX:(-%5{{e>p`I6_ :|-ܰ,N.I` +i)\Tʍ"}`eb\x؇Y ;(oZnjPZj q R/6lٹ/?{JBpd6Rj? p!S"pͼ_;b{:[De5J,1/)o.d!u7M# !hxzLf4O#).?=RTE9\ϛ_vwXo~qVp͂ &=Uq{# Y { `睖w1t¨~[FLvS*[ʫ g(7>uP@>(I~Ѐk v @a[h .=J8]|.p `5{"ky~TXc6S~I1 _դΧU~ķ*?à`*8X8( P~?rhjE:UUu"&Gty'5ݽ$ny)ױCEKjYc&0YʫsO GnC.Z;74eiĽƋ"8^"Lnm(\e R#*jm+c3o  ?=-\聿u<,>l=l)\ЃG潲a{C`>1?`wh@Si=BQ~OELQ@>p6ewX|z?^z- CΘf6y.q_e ಪ]Bd=bh?xzvLC{X+ '@A#|6-dK7P2ሼE 8begLF%xm9%QsXLf _OXlv=pZ g#u'`6VД_槜c?mz.w4/p8Y`8$`q7яkziR.ll9Vmen_Nk9Os@&l&EAA <:ilrTz9"A?Z:F; #ۂL~$~% d`mkBc]Ʃ5/i-|44|>\XAI2$ #W(Q~ t;`O GڥΡ}?2<9K?T\>9k5R%(yN9s.|ƌwnT'rk=T0z1fKEV*ҍ6yɝْ[;}'/@bNR$7j6;G8:,zەgUSIqQ(C=c8_s;7ulQ 5`isݑY.ֻ\pK,d•3xzw$ }ovTuԯjN#- ?=]\~mս3* uY~:ރ &Y,@E_UVs('&o3Ͱ m$i]PLs8ϲ7}pRK=YV4Psړ]H$#<תd!t.Ut㚵UxA qp]ROk 9NyesS f5`c-Nawn13%΃wF>γnݫR7/i֬>bWcqצj ^nfJdЀḑhܪjDAMK41̓pW5}^qVkP歎KKJHÅt{AE;DKnGYցd; E2A`?wq )T[ ,>4Mxf3 `>k; C)6mN.<}; fJ@9CL3Od+GMV8gutfY,<98񅟉,K818?>< qѿg,wSu&ք<:Y6XyGnv^/ɾO-{Bp-òi]3"y儃tޞB޼GYlV\Xc?!}M $D,x&#5lK%oRY.Z韼st}~۟Fs,}G/~Q f0H 4"J'$訤FeSV,Rw9JG匓[ Hou6݁u,ټ`5 fd /:0"m5sf 2\tǃ6б9)y99Sjʷ2s}-*娜>ov*a|9[y_lcqB&%&RbR"gNjƀ;"E{OO,>]svn#wВuFrKxkJ13?/B.Q[jlpv9~kXA&eeEOv4Zn(5 &?. zGqogx#Z1;SSQb{Fk6 ?aex[Y3<X╓yxr62k})k| V,Q(9)Yn`b:~|G[`0gO|n&ɱ<ոvH+Wnyy\D&"MМY3PKޒh| Ibn:k#i८G.OMηCe'wr\Nޞ>ЬB ]!^ N@^Iv0Ki{+XX3k᜕`#56*ocSi,'sF[mqI]c[D(q .烙1J1aǪL쭍A,ۍ;S4M|@Ms02mXkdG|;0kAXqk 6ֵMHap9-6~I3KÉs6^ad;N_}Ns~0C4s3I6rF'2A[6mkH졻wQ`eh 4ފB`B BuƄl,xN4i(.PEu*SE>Z-.^k'IqBHSv[R,as(؇W]GZN*`-t4a3psp_ "BbܕecI4i= BkyU[`kaAN~$m/2V{kr ="I{J9J*PA ``Ccn4~\F*ӡt6۹34g<Ԥn,DZ2b [oO2 :Kg X Dҫ+iOE:R\qeIN54X~\,T!wr r߁i0f4T]3I8LՍ,8xB*֒yb,x!q 0݋vӌjr@&a  {z ! A1b 5_m_5& 7̘C+_ QWѷg]D?nJH5 zf6I Fy&V\|qw< djRMM5 E8Ul" :8 `/{Z+2YYGlzõX(o&7Z_gC}])?F_' Q@q*ldlx<)tX lGLp[X,|4 B=5>&\^2deSTcհo1sr̤NgDKDuJR/*db+fMsg WW#rWu4nt˅9]V厣̮k2lW(p홇o+/l{ja?K/iõCyԨ\*diԚo_=QO=t)Deiiwmu&P{ GQ?k4ҲY.HܯPSCvi2PItLsm}\ȝ.Nhr8Ag3]W;pyPA=®7>Nٷ˂:>5 fهRB )t!QnOYXl4ln D<̦ah&gvca4h#f{w[b5 w>! hPBkyS-",8B#nZ/Ѝ؇oTX{7N{e9u*ڵU;Z5zg6eљ՜7?Ac]sf;tv$ `O'O6 sL٪T&*;$ւ&Ð;K˝:%t(!A34__x}6t87Z"BzvCvh\6 a-Lw+`W`Ajwk4S,/13h Xm8Tвɽ+L__o"Gq@8ؗ|gvuB 6 iy0sVݩw~NIhnl`y/~ 'npzXC aI@ {{~V3pYb0n2j@IuLN*+4Rygjk,\,Ax 3p݃\=["PBweBhcT5ZV(ʂ߹]?)|u~9:I3~P^fއ8M~sO~ߣW5gދD*eן/KHIyoOuThA? 1Ǽ{N 9{(76<)3:&]tLz{6| p=[6~nukq!#I{x \wJ ާNp_lZ+4-)Тl#8{0ns53i ˤE/;vV%$RЀ8ф)gtMAٮ #F:a4t(h*-w*ަf׻sa|zwBS:f]JX%;vÕnY6,__ kθր#M;*VnaQvСNr9?)E:, tVj_Ӓ]_I>w{?o!]0;DrgWmOR_z#md3ߒpnٷ/!FAAcͪ[em'M;|^Rs& $nH2D/ph-l[' o[zFFDB S kn۲d_P'_t}u6/ 3&Xvg;y*?1Rywk;ݚ߭w<<hֿ5['M|V߷c˿rA}}m%po250gv-[y"xn)ش w;Vi^-ORHM˓z_t3 ^w:_ pv~sÎ?$gQ&s|dLju9"9^PP[[΍oێM20/C˅FCD5Ϡk$ ̃ k_CF2l)>>`2%qLaCW+ p۷pۯn-+ܱ}k 3g>gΥo2y07i:B{c(vaoq0q''aorx/D-Zp$ `35Lq5υgk-y}×1  @E han7_FO!m#񝋬@@Hegnw>³v'pvb j0pZ\ s)қ)d7?ad jwnQ/?VlPkr} .He R0(.Fz"Дˎ/c$] |Q+7p}9 eB{$sN-\[=ё|>1Yzarӕ@/T忣sfqkD2;| x9^rVQQNG?V]I0o>r[H PQ8"p:@E6DT Bg %xBwOˤmڭaOϦ)x`}:qրTdY$mKԏI sgYV #"YO{qV)OW))^8EZKH.,y0s}Zkoa썿'p^DnM1m (|m* E5#cvOO8ZRLÏ t`GΕ?=K>bAL%  (7N)'z4[ߵ5]KQ|!.7iSa/;mE9(8P@BnǠAo()}w wġ6ő1!rPT4cU@vV*P(P@@ S@pv*zEJ;Bi6)9~%>t;I3 ,.h]&B  Xb$og!#h1H  (P(]PptKVB bF*P(P@B袀"_  (P ^"cJI (P(U ;=8Ri1o,Әߍ1]FǼ<& #Oؽo(Pp0IAQBFc)aTlQ4aϼGXj j.&,Y[+e?'M\2Vp԰JATBCӯE_Bʔ;JJE!me%,|駱5c~e!;Fa|)wKFPp4rMYBQN۰g}B&#<l%c\R {rhᔂB "rCy;vR'MNr9`CfVym2p QIBB=\-74uGoRl'ι{Ts3[Ezޤ.Q]nM?0  (P(Pfi$l,4|")o0 |%|-44`fmŹlVky~Ka'B bh+̪FmsNKAs5fvJ;Wfj۵E,AǴ&!I|kg ;hu 8Z> gCB  D9ٹ}0? i ӀaD?tj g69'N|{ Q7`85#"{n`*~v EJ  (M x /ѱi]Q;uh" N;4O {eJ>7phxoȟ-*2~ur`\Nr;ۻ} :dUV(P@@LSɥ—v |[:קӂÑ|7:}RNJ ET7n7Pp]TBQM9BAYq 썍K'j],|t:}ʗ~Ez[{=˲NbyBX~..J+(P(P (Q FntJ6,vTSUrFLiLuUhӪTW>V4ᔂB  Bi $Lj)[*QA >oO/4_;yjߎNxC68a1P(P@B.S[0[տ6/G}}:b쩯k+jЀ!*ɮ/Vgw9@>/|l˟ a o;o;n!j386zgՓMQ R=p ÝfKC XzKV[u)O:/Iʩ)t ,߮ʩC;,R#x;aԬ_.[ͷ`7=Q xoxTj,zsIR(!|d% {0f05,.W:L5 z\*&]NWEQo2AT6k8쎺UZaҵ;֯yNzjanN"7dV2jU :`@LDB &nm)ܺ|3j: #_who~HUݡiv:]*ʕ`6:*N,6USoQ?4XoW-Xr~Fxd(x<"sqn3+QMھjtk+6ھ#}t9?W9q;iT5,=ir:~ɿ =./wȰaTɔ՘ v9u~@ܥRjq8Ԗꆺڲ۷l߸? a 0?nEg7]Ng&@U t[iӮ*]E:۲F > #9u'ou~Y)r\Y Y,QT֘w^5𡙳_| c<5{yN7uYWh֛88> ؐ?Mmc;_Q>Ђ'O2*`{틸nEyC 7b_%5`ȟVfJr]vDقa(HOQpv]Wgߙ6h"+l$Lj wMqHV\Sp ;j\y>G__bG;Rc+rcݯq1_Ca(!4Q`#xv YMйo[TT |Q^7q;KhSj~i2t7"Hk6m<Ý駩lm*#NfOE-N nგiR'~s=ӚS}wgƂm%:b40AkH3 GA=<{'D9_| |_D_#bMiӮ3ox<(lO&Ŝ!+ wRL+0OԌ>/흢 :lT1cNqnlvs|UΨU3!ZL˜fgoŗ-&MQǬU{~=@>'pQ~9L`|=Q齵_OUQ[IPm#CfLj}c.z仯ݒX~٫ἍW_ -_B4{jreG/jVdZ8Gx ) g"mif(:fO/aS$^&)B) ~rzTNʧv?sg>MUt)A >%h2Pqܢe G>^՝WX@LNo['U8z6dWk9L GΙ/ٿc.kcFS._sL4, .#G18dM$qgh ''ũ4:mQ b$ 7e+ .CNu! zmvS~ڵ+JЖ8TmoNv?~,35TgdlzeU3;cL!Gf7u G HJϱ}4aD]4e*)!N}3RN#Xa'_*(=9&7NAOZce#&hй;M<|gz׭^:l@wU( mnݟ-{q&aq _.]GR#B_BNK#_6O\!'SBr Xihbep9pxvp=.侳R7^|iwQ)·#BŐcҨm DB?ݏfje;pf˿:]lMdRӁp]D{ްc?-^ Φ7Ѡg rzPAM;h/Ni'=Ъ-_^G7ߜ}q:?yfwEa9+iB8Bvޟ~=uа,XID4tt WdϽSF/^GJi~^Sh,|mxܚ\F]AK]yI˷-xވfYE+7/n7N W}WCI fZz/WӞ!6~ް5aт͢9||45mwgo&ꮎVZZd^=&=yrVS\T~|_yH[Sru#EClhKeS )7+MtFf9.?;JKJs?3h\IԿo!51.rlv}tQxEkhe~0<%kw=T:qH֤psIg#: : c)%yU]z4p(r.9i ^ WQeM[]҉c)|-j~sǏ{??oH5tyiٺ\z1(z?vF>x\%|w` ׋l"ļ$ Cwyp6 `u9qaH rϩ#oyUFCIquɃ369RX\֮aB#蝖@82ӓh}tb#~h0tir5eGHC~ݯY VRFj9hL۟7Khؕ+#%Q>b렋//nz2׀w_q6 6%ڀxlH73b$~d=,Ͷ;\Є[(CuM7ߝ4R Z\*bNf+oh88s3/B7ix]q,75b4{` ->Κ2:T8;8mn6խ޲ ^m4~f N;W|ӾZ@i[QNPXyv͝lA ,?`M'{fkKfP}i%']m?2=ri,ie6]5s^HpS8 8 ,$׵IG`唲L~Q8;XJYqanvk+"&kj1y0ίcomW|OtuEbl~]ðQv[E@Q.*+] > ڸ4 LXzA[V@ie-jp, <(g&4|m?2-@e}bn]ԫt+g.n.Ta0;rxRl/6tƠ};elU[o< p`LKG'L^UMេA/D ڽ;[ʿ o\5pOR}>]U 6 ޖF}h<so&5BƛxEa {M"Vk< qN K6TJku¯XL7rֽ'7weo=Ƴ(?ǶBPrT(P@Nlk~QP7 [DZ#;69|[:{KF4_RrR(P ːp(u |k/=qh[ 5صɒ^yHeZ+eS(P@@)-o*+Cw·4K+?ת5u,UʦP@@wKy<6-Yդڲɓ(\:ϊe*eS( \`R!Ы[vzrFo:ߋ1;8Gw X<*X/%y D.l {<5+^x2tύ""}&BKa"Guutât;2}O?ˉ8uE ,|ЎxEs#>jW`QRIGBғiʅ]]+|KYjzqtzLl[4 V(8+Bh@Fj;il[PnZ*FC/@UuTY[m#Vu' z%Ǜ9nM9Yi]JCy9@"beM=Yx7 &]X?` Z/@;O^{V43(X $ן j+}z+Բ!ohU|vi8gQG z:Cؼ@PZx%V[hKA1 ,Vjha%NA9̔J߷YX-=nkoÿ'˽PHov&ĵD<~qט's*EL^N,x+TH:a(@y(BFtW{Њ mf"z˟p[X.m{p{;B1(Uvk t Zմi^t4~DDj}Ķپ_\5y"ec;x'w9{'g3SoЮd6cފG HѠ#ʜhVo=ȧ}:G"h{xC>Y,5&QZNmF;Zzv [ &Q:_Ogd Mu$czndmOo:Yu(ǑEգxk"(,G |?_ ^g7=Rhȡj! lzzȈn*x{ )Mp**ޏ,;ݨ4Q/HwUP9[6(li*)_&vLP4mߵMt0LRxJ6UNǪQíae_D#z vK7x[unÊiޏ7="CPQ`vk"(b_Q]GE%T*OPleɃd`$Q+PEuba^ϴjr mM_]!br2S9tfP4 )6{ F[rU='o0=A5~`v)\+oW{ɤ~HA/&Wl4ރ?І#;m״,;,dy iln8$М*e$O@Y5sO QA m *Ұ=zJ窙'EEĈ3 ΍Kj,bη;OJa: GGs;a?xxc'C8X(nEZ!|%ᥭ99od-ڮRDg̋J6QI+Xc޶ w'SFZhizbWIvR[LQTRtBpmx?x UWu;+d2H=JLC{*-*H+m " @0U }Kvw}NL,[CF{ITE[BEX*ZNyS1-=#`jͬwBX pc)&+hqJց"!"(iw ilىu-k{'P1k yYخw!퇟iwS{'t |{\K"0߄#RAWeUo{3Q? ;bLH=JYy7(V,pLtwG|`gIp1f(1f B:୘#Q'w]\_XfhS:.f`vT>wwD}Vܽnx=q_+ㆎG\&䋪8RhN$ް;.%qt-hH(s{wGHA8Hv K#VΞYg6 :aY/JkЂt(,n!&cqXFer8Ki By;f+m3Zufb.&UT3󒞳cz!h[$>b$|禀/vѪ+G0y!h]a`RB8eS'7etȯ;PY={8ʅ-~l"jDwF.=MOmt*iمgҥr(hCZ */}9m :V:(:zc%_G;Ԡ zC'  tR^n?|=ѩɢ7wּ{nNeb7HZ8^Ǎ_8к#?_Aa ";=Cޘdmim[[/f)'Qo26ЉTѷ>SHwz7]t5h?鸣֝BMKgLih#Z. :]60f.Y!>:_5)k5ހQG.]MVgK?܁}3hPR4Ť'HjH/d8vVÌ#Za-9s ɱǖ[Hm}iι9}Hb;;kXxC2 }5u7]Nz^i ߔή39O濆ʋl]yj1i@3)Tv/W^G۳nFETǀA tGǍ[9bmnq{mi9nʣh[8tNo%KV=h'd[Gin8mK55dF|붏L+åx}Z47bT|Y 8:۟7(Ǩ~k[h~YKv;c{r[IK0]͝ uC`Vq$aIwy)`;X8`e[PHQuhg ǡɉLj a=KԇP`,8T<\ukwr}ƃ@ _+I[ a)9 [HQoRYEØ6@Q+vF:(ƺf1ݺ#ʟg33!!4bR5֐SрcɁ)7>y'ZdL4.=YU]CA~V:`,yQ 5-h2ͣwFS#Չ9~fu+'DϜ0YxM1' l ?OG%%`tLlޟjҺ` f#޶ ̓L帱m4{>+4&JgU@I2~'#ۦ i}2+#p><li667|. 5 :UZEKb(g7&:9WTQ2>|kRbS ԔN;`_}a7aέ}ٻC|li7aL $ԙWcG ~UO#~h+2_؄ yb І>w 7`?6 C*e!!uŽXCQ~O1p*M|l1`a/P;%K!q.ͦE=5g>p17.Ti4;~K4w//$vr @Yyح=}P*pUTǭyJ@Wq%ڝ@}Lz6!ǚ:KVk^bzc.Y.A@Z>Ҡ7b?Д+mBtA?Xcx ZSU~^,U.`g[l>u n`i`T;Mt 97o9]!="L c[3W;E푳K3fYy {r~J#V{Lҧ7k<]֔;ᑼ:C 0BC -xKK+2S:<w*9# #e oa @ ]iS 4/bGͯ$,?bkS<0fbBXECjnK7ת#+=ogm,y=dqŻ8D;[-$#.K8(.ۗf^ =B 7.E~4Cb[子 A@)**=@խ;SO oc=K.mz^[ZErj20 2A{?s(>6Y~5 `z@7ڲ> -|teIPhw^Žn~N&dx/s#ߙu G&vgΙ3ά*7zZ@IDAT"sZ2QzAC~5Ň /h"* `idWdOkKTjfo'_@pa"-jLz]6AǴ4gis|qWG9or&#}$sU3kJzMuL^{wQ[ݒz"g{`Ab*Y'w *,(`A,Ҥ!-~s7لMI63msΜO9sΜq;ޕ]w[C#/anPj[bXc `ĒvVj@g7.:8\~[c)UhUԏѕg+ʊ b|ʗ~W V\զ%8Oʱn3zՈ6d? V~ =#/?ƋTn6t4_~|il8.)~.ynhK\J˲VT}VXV{3m_= RUʟCiݳ6G;Lad + ڱ~0Z .1/sÖCd]63Y,(.ܞSw#rXZX݋JW p8G9C)UilG>iK|8_y.JEc?V~E46uδƎ9~"I BrD0))f3fEa$}qQ6/ݵM[ c.(*~&\Mw;;WیyzyET \k@ңO1}V)/U-RAlSZ6-e of=Ubg†.Xf(i@,:`I%P l"{bc8TTe9YԆ헅RWYga6@Qp ~FE4.X`FGm,ԋ5>F6]?)H~^[ Egp/9(h3uÒ?,mu#zK8<~č0l(K˞N_xҡXq,.1)N9#b.m kl2b1M;}ե4{īÖSΈFbV-Ⱀ?!"+8>v/s\^I;Ln@%Y%jT:нMcQ)˞NQqT RѶoR_rE]8Y8V.boJ.byXF(2Ms E<*s-ݢqYNJDlxC&4aU&|˝>b;v %:=gᄎn)[,G-uhq 'GUm$Mh[q'zi@˼mU %nB۝a'd[d$/1)/ЬTњؘ/@>k9qg&c6GssXU:qDb# 2K;+os1$Ùĉc_bs/[ܹF},Q"^L 2gtJ``xz2 L-{K2]ⴃYbcD Zh!IkF06󌘌: C3;5o'Ȫ[R#fC*ңӁݣL&b3 ,aKGbl1 [Bj#$b'81gFJ5-(G3hIvY,iR]"2@8TDhhl`{B}xӤD$I7D1i&u٨2_4K/=gY}r'@$}V,j: h31`ӜÙ'r9V^)A垆MBl,_#Z^]?sdMj}NHLoz _kO壺.3+WQn~]mx03O=75vڌѺ*{-O Ulj5 bFII D#4d\pp[R!O%[(n줪4"b<ؕ$LH'`Cu.H>vҜ.\4Kz aX "z~$)N$Y=|9Sm =>!w:QQ'k&%YX?;PE4BT@;>.8Kԁ·VTYb.z4N992pՐ&dJWZ䫊֐|Hft>qFgDLy֞B8y~S`?塬:Э] UĆ8ݪʞyÒ|\^^(@}#b1ݳ4%5A[bv0`M>hqLCdA=Fl-ˑtciZWQ*LyGlyZl4QD b$R8jĤ+|)m<1۾#?ꛚTݫ&Uӌ X?gעFZu8{jI4t:4QQ&@*8<`$w.ļhD Сk;@.<]ʣpD޼q5WItUSn FS&ʪTG,ai;4 'j=yL0#F ؐ&9O30}EӠ$f9J̙8(jz1os!>cZa"mݐ!Jj,' i`?@ r9z)m϶Keƌ" &Ja*e6lT_~^}.]!p[ޑ00u|q1z柢 Ds i0IZ{)! ҄zQcj ֞X6K/1QJ./uZO:ȖzPu|шγ.NnS*vp6xN88UR_0kww! ]8)In&RA ⩪)M/۾iypmgUJa,h&[9}VQ۲8R >L-J8OGCjRkFԩp,zFEgju@UurNy@Jb+9a$q51n;ʦB #0҂YS;m\g9X\泬oGP_sr=ꆗ5L a41 7Ć >K<:05>iMH쑂IFsb#ķcvMCI*eގɐ<DWuIĴmءl]j?~]8iѧI&\,T7}_%Dw&Mi~U0ʟH:POddGfIv!L\o=Sߍ:_Tz. *^L[~t&w{TRa kFL1o8O?rА[h=tj0= ѣy`3DQy HB ';)㡒(r-uFcy_-!HLIL+,xa`+ItOfb%1 |>A*2;&р@m?-'׊:KjHxKe'NdvVNyzҬݶm~j QЊd hg6LHiC4:7Yg?v#^z;wԪ넩qx:ZNJ+h<t>:_ZCdpv9s.5$ _™6[ 0Xp.L5]( ,`@-VJ'_:ߥ3VN+=K}XOV-mH} hufeRC}o l\lٗ^>v IDbh#*lK?{člҿWڮvM+@xoX]TPGf]W6>A/Q[V~<BM? z(]ޥ)Mvu@^*.Uwl+/Ybve0Q*iނFp~&h9fS{]bD$ZXažayy9Y6oڼuݪ=@rIҥFxG$\i,gb2]Ǡ½^S^mڝQ1 XE%m??'-7mYj7`۽-KVV32`DI|t+;i5+7 ( Ljq$%ɗ0M8 W3A*wφ6vH$;.vMtkFLJfBҚ\};hF9B\ WiB,W:ƒdr`I%kf]"WF?!1Ҕ{AKR3&c!rrG`H#lHC@B2H`Ȯ$.|IL6Ās'\\:LvWJDd4|S!kh3#&/"a!E>U=JSYB>HF^g,5Keue3C.1F=˗s?VY0x v.C2rNjoF`!31q<+(ye\~bFˀaw (]rF{ﳗ+X~P؋`F@UpwOCtT3` ϕ|0#P}V\,?J0bRCf;`Vm` 6P4Fc`eP6pmi0#,KkGf{@ 8hr\#D}}( mg<aT9̀ 0#J # b8#0@(~"yBW`F(]x%`lp@ɘ#0@Yl`тbD> 3R3` #0UBUR#s3@p#0%ѕW=y:?0#0#]x 0 F1.~0@$"T{]O)?l3GK"cz-#0aT&MUO [87*?FYsg߬.%-3rq~s}#`Xq00# (_LՠWi Uamэ =6W8Ur0h #0ጀ[RtG%k#_R^'IhA*<'F^,VizWa8:ry8cU :#)})˷B=CXr4,ڮ~{6 = (+>Y~hM(]}W6=G+w=3#LN^>gpܟ`4+̗:{ӽP]a=WvOoE`%YzވYGs$_GrsFAs4 &-//ONKk+GEȰM۞/qz7Stm^GjsFA`ҴBo|!~SG'iIϊE9ޘʁŤO`)}SNb*ߓj<$\ f̰Ⱥ1 yb,k _F`,Nw}0񅁖N’E{v\o0nE5^JO}V6Ohπ}5sPHg%fF'+d4կ{½]3{jo,㎣ {[LSXofs8'MF`"zKSӔ%%e'tgiB<^./nX>ӵK!v3SqSz;u -nN|+qv#0@1/h8q`]AU[%eW4%FC(4~C/N2 ؜е.c,29O+bF`@m*n~ G}Ow8_)1 _7tw'ќt] hߤ͙]~ߵG92#P ގnfqOz\=9[H9@NYa_K|ÒRh=#.Q_Gx3#p;?bpTu; ɡ(5i%t]i~fba#D$9LrEH\nqbFX,Ó#bNwE]/m .;.R LO[yE`;> c7p$>` 0joa|^tB؀il{`[+O bUNbddaX#cStu*lY<8qbx|=_F`F]cUM{.n7;Go9}zsȾ`4WF`!:Hbk P${Byc\5"Z6T]w+RLxLh#0]@-[9V]VVu:v}B]̳lG0WF`rSRWx`+օ0l cOo:cg8˭F`F V^ղ]vE>׷'%{~`\ؗ`F`ƌ!O=)~l]v dgz;\y VF`" ZV4} Zf ˌ!*yƺ"P^G`]LwiWMV&ukn!&Df8NF`̆솳]!.jG-7Dk6L;1~^UVj~Mb#c؂P1{s3cBSALo4Wkc@Tu`L/#0DE_ }aȱ㱣j+[7RRFT2 Ag$ + C6/5|W5"'?$NEa6dߨQǃG:0@ pI < ~o2%A '4R օv=YPAG6ɻd8~5H= ~̘aYWp(Xyr߼ '𰮉pFZ]LKkpHF`1.i7EJmzT[_]w=4{@6FyDMwgQϘsh>r>.z<:4+?Zy/T *0_e)f [)|r[FTRb3=HCp#Ʉ'N r0_ZeC,qZѪ+uphv*#Hy:$4<~=s8h _bU,n{ }dƀIc/&ݳdه`F䱩]嵩j݃0{ T!.!PY xTˍB;ih éV6l4[m=> *ݡwU(ڳAg-AJnBtjvm*U+"`FCї}j @Oj_MϷL-*dnЪŘ$$}#y`L)~gL[w{R Ţ) 2a]]}탴ޟLk_ϗrL;\&ηt#0@9PbcHϮ5[Y99IǏ~-fQHnlxt|_ ^*Us1#& r8e|@%`x '#0!`A?4#gtv_: r5hKl檦`P՛犒mfE׿{Ӫ.3%1#TԗRZnTi4ᜁ-X0:pB%\~1KLNQuλ}pNܱ ?cY+8w?$JƷ\̀QB4I-rS㗌#0e""ç2SɎ5OoQ-,|ݘsgHb3m?T뇄~pY?ACPc(`̾#0Gar$iw(B}Ux͖:ˮ`g9t[nbQM"vcఁD)k%2ȑc&GƏ[d`PD'xطx0=ulq+2NEGև?.h4BdZbw_C 햾_кCIJw;?! /&%}}=gsaU{1J.^][lW/9unSlG>5%eus/B i=бBea/ <q*RRt5%EqU-kEM0B9NJy,xljI/lMLV9 Xh!VH0ߏ1ȩޗjXf}SRtU<(k6bŹ&@JI;}J}W7b6~ c+Z皰끔gp׋څ~[ζnkڤ~VR{BАhQ?jZ-yUJZO8{_8@0`J0[8ԱX3I˥yG{!YߩǾ6U5J+bޤLjR P‚4Igܤ>qNmWV:\/BfpzRsa<5PS LLS6!+ZNdq5Dٴ *%:Qr7 EQk* {tqVK޽1?ɓsPBYkUѿQX[:alƻqEwF?qb#W~7vD|f/&OK?vsN'F,ctwxX"glGa 1!&}T,x"ζs K:mVM#:d+/F3}h3ڟz4m&BS^yiY4@Yp"|;ro',:Tx*|CM0A GqnF-Uu#?1&Sd &+Ҏy. ҤEW}r%ޖ$]/!جU>NJMm򎡔}ԿHدʣ72Gub,KbP&.|:?"`jBcY[l ^q:/RPqݚg"MbyHwU/U&[^!7lu4+<2rtd{iH薘g*LeAa9nK.=T:tQE˕^XtP/tkЮ(iFܗ\soT(,Ap̘~?w׷uQ7/rPQs~0*M0BwQ|UNA\׽a*yàqT96tݢ;䁌0Fs3vD)%@?.]HK e:Ļ`&$R^KD:׫::nvݙXhv{Hc4i0)8/aUUw>キ4`@Gxhe;mE 7BVED:@$1 zǎ@h"_EURglz(s:q|cH9 8hjOb [oF=5U4 k&EL*y\mYVAkrzQt_z꧗HQc^D}Jf~Eo}]]D|︉烩J9VC6}Q?9eOS_{)&Vy??OLpE`t6~RZM9<8B2F90#^0nwQ~OER$FM] Hr刁D1UW`(4,l$z n =-#|7y=3 <ƶ}1I&k* 1q9uģ,CVEjȿP>ڈ9-ݭ OJ̯;q[wϦi’CyX aIs]?9W?ڔLJ:|rtb.[Hp=SoRPHѣ3p4nB/4Oʤ72ǀ quM2G2r4`>'w/c$Tp5ΌP_Ccs=uEzQtd\~ ~W8_nj%Aʉ閑Bq:)]M>ե;-aѳi ش>Hk)u2Oڏ%YzVY@Gh&C'[W@d 砳^5bѦij\Xut_)OY ۓc㞍r8=Ks4!9*J2=9ىg~wP芎0e1߲ijkǞ>0C#jj}fp\VxǑ@g"cTH`zv{Zn<*=@Cx[bi)l0_#ܔ &)])̟i4p纡APA2Rn $-}2We5Rԇ¾t@+tsP[2| ?z[?BI#j@mb27ujW L?txogFvcGPT\݁ۖt}?mCaU/Ö|HˏҜoďqi|T[R>!MG1LSW$yWOQ;b *,6@V8?~V&Dr6t_< ;œZyjk]׼g;wD0["R9^r .(O&Ϻȋ*mF:,y[cA*Wu u)9`euXѹCȱZ,וc, yN֐[pJ:ngA;`^y*󌼽[07徔7ǝCGRKh$-C1#b\WQ[syᔗrzC}D:Pt*b&x}-/p&:4^}g{'^7ʔ9xc1&Ca`pa{LH:[/ˣ_G=l2oK#Go0꧌?ԯISWhX7Q-72]ޚ^-w6r,s|c_ty=>Nm-=^Y{7<@0<ͱAx_{ƌz3$R}YWRTIgo6A[j9x~7Gށ6V>b 坦꠴g(C4;&Н+̉hg{#)*[0Vu1+/*qƈ*8qкhRwXDT'->&X"i=|#wf#כ]-1cܧ0:Й1RxmfJJ)naA(% Ƥ;PQΔ\+Ny~({6= DA!A+>>i#G,zVG(CPE3._;ϵ4搻MO {_sȾ>0,0*_BTK(Dg_c|S;\JROorZ,Xqac u?^Oqږ՝~ OmJ2v>7}!za ¿A~.PtcwMA -yaW^0YS_Iy$ |e.T9abX2C:v=1$;oBK۞b/4l@,+R͞rԼK}Rլu V*}b~c0w:M\}ZyԲb i\`pq)Wҝ䱩1; l?<ݥzP,oJ) QdX) V' U;ORm 4 =OWX)Yxi? jPola9mcwp~OId@߁DnQ~ч \6 ihG\euzO~~rXp?Z,v8{e7, yG(K)¢ qa9"t|,}_YH|^}EQHeVy8)yNE:]1Zc+[~WKQ'LNku?{ #C_u /O;rEt/.]*#iKlCˇqwXHCM!ZZKx֝0\CvU/)Wu ZNbXn,y۩vjkwwj0_#3lgf3y?b@kG?I~wR2rYxfa'2J]mM@;1YU+^-s0_7Xtoqui_1Z+$Cq >|vuH9lS%j/=@IDAT "C: !T/P>FK4BzQtJh&R 0WlB8Xk CAU]?2iwy?}2S%Rk%_A9vR P ky9iG/.ss]ͬBuߵ,g31an4 g%Y_ܵY2WjN8݇1g#Дo|k(Wo,?Zߵ]`j*Dß0#Pfz)183*E ں 4Bd #0u[)t)J[[kDpЮ>UH?aFF(Դa)~7{M#? ^t$0@ ^,ds93:dF./@;իU]i3Ԓ|3@"^S?x609IF`ˀ0,#0u+ s.o/q`]i2Ԓ|3@D"oqX |S0U91F`uCٱH]߫;j8pXLZ6*Qp/a9^:U=n)/Uz8k]}ϸw&"6:ǻH.mx)O&lOH *2/Үc΀I5=+\&%Nmۜ!A8{G)[vWS>45s/dGEY ƅT0P 8}>kcG܄WjO+7! ;uնFGyרG|_ץ-`uVL<9|pC?GɃS*o٢&| jO˯n13Q=9W%vHU98tM `fT,~uTH6^E'>}͈/< u*ʳrۦlԛ_H\2Z iz* 2`_5.::vjf aWR[ pk٤hڲm۞EGf-*5N)٦5hVϞ$"Ӳ!:3k"`Ǝi`|-,Vn7_v/OF,1 _"iPu ή6yߏߞn_jdϬ}+ Ȁ&kTt;yηzL51R0שU#٦ T~W>eY׷fc2dgjEVcuˮDh.u*xXrLEP’q)Ӷl_ b <&$\Bl`y aJG³I REH u %.h6Uqiۓ[ViY;33_1`"TpEkU1!&J٣hD+ٓc:u2&SMdR-ֶJJjO놴oߡ2V`g"LUa c-h3QUX?6ŗ4֛ϥ/"N9].}w by22WU)5;jS4e{+s276;Fufq*4ԅcog17/l"1>Vt?ҳZԉoXDGuP Oj7*h¹ue7YjN]6=)8.>iؼ렰XpJIk.+vE-fDz1VM_Z~MD/M5U{@;-;Pk#I1&HZPY^&v@ǵ[\6g8Zn1SQW?Y/ŝue:2 <ƹKs/; R*gNtM&VȀ),9o繉Fd#F$HEƜ3#dB!fbz[Z%EUwe{_ko_)2EŭC4i( b8 Yz!^'QP/H 1c]}abݍo?nhݬ!ߛ7^$NimC$*; / w S^hp#✞ vm[ӿ\ %Ƌ/ohְ熁Fo|:תiC1c_{?,^#!6xVqӥgWoZm5Eqlbm{Ε>4+jSjO8!٨TfΤ$ؙ I@*LN=VMh(!C"`W#tD;gq}ٹsF+_uq-iEï@\yAe"x. on^qDF5.5SKN5)zsZc'3LvՖG x=LER=u86jyj%tngm{g.B=Ew0d Cl0)q=-(3 %?6vCeL*1IK0:n9,r&cİɼfͻyu!-S24۰^>fxu!y +ꂸOiԐ7a.ҏ'۔)^c{1'!`F|5Lw˟ R'SExV!t@F anHe9;hH;t#G6Mk pސxEH?N&Qն=b?v쇱jLb0x,+j~/ݮyQ!$jv[)pT5yW!`&\RJ y, ѯk;Ɗ 0[Z0iXwhI#4I0'v}uu 缕X ARꓯ0 y;6Wn|Րc`ORk .>&Zl8x̠';cz`(tC➂Pg\Wgp"`9ڕ.Qu.IÙٹb \3TZ#I'4vy⧿֋B:㨳#邤mÔW0|Dz #P]0/LΨ90Iʥ=.izՋuc{dsx'ṣ֥0L.}qt Ö>9A buF__>S ]tfA?ZvD*|]H*eݍãnV{\$n͐ mSIN.<FjP=|bи d&F[))`#zmkqP{:XO=.MNDBt is(($`)B(H7B+4 4Vzn$mip|e J3OID*"r)Zws&c˳Pv-[}Xwpʁ" 8P8#0&DbҔg//-~w&Cd19#0#pbGL=PJ!fA%`0@%SX<)?S_Z(jnX6w0u#os{ 02{Z` tE1#P1zJi(oHUo~^7̀_FL!#0'! ix'bS# 1#p2$1Fk)[>54`T2#E`)ð6EdGW/&d`2Eń2# Ď5]FbG/|k VСSVL)#0hFC8Y|V_ya MXrcF X75d֞f])>8}曐BpH0@As,vOӍv}'3dF "q*(%eaE#$od1ь#i]z*N<2F!U><+020@!ʖg`2c8qyECrFt p_1{0,p1#,Ր~/*ʩf21@(#fq.^Aiڷ7!KGźı9"3'O0**NJEDJ̏@M'ŠT8֘yrןhKXvtخG+Bf(=-~^Ak8S /T L|u%4W=b WzQ⢳z uV%(ip$@69ʻXuE]DƯ*c=pF<^m`"3@lMuPՂ)_MEN^/B`ZqEWHBО|07q!u*M`|p)7hH Ϙ}9X ./Y'LEEt/Mbqq_AA;q<*?./.:[ VT͕i]#t!Υ|B[Wp5@D2`M|7.vhK*ΚC7 RD,feejoO3\">@\L]M".ΕgYap{eMRk_,4N%긿Tݺ@|554A/a]'딩Kĝ痎|d&le4)3F`ڕ!sksڢ(7_>5L̗:˟7,ԟ]ojDR є +lRg̏K7| ꙫ>r\_|)v3_~ `hiH!~]Ѱv V,,_oLnx*Kך>\jNT/5 XM^|{ՠu{aSǮ2mmٹcaWk6iIQڹȂ:eJC+Z\kk+ꃽogeH76٠ufuDmBq\.VE^uʤuCZ7398]>5|k~NQPP(b{IWclКNhfpnjpB;Ц-TxoAҒb/D;zMk0`7~.Y,-'LW R-Y>D7-y0`QFO)_Jph7J?\) e|5-àIMH eô! SB]D?僝y tp qi+[)xS~nm|k# X"\XBQ'O_X6SQyb SEz+؟BoJQPȔaˀIM(UdDVСtATA6Ci֫P+SomX|dtQ)-,af˩9s  Bbb@< :j9W!ǀQ>q S*W^״HoEWf"A lT%'t/J%qGjiޜʲr !uժZvviE(_s9B l0DCR =Y,Q1^c[3V#VY&M *ONRYu{8:QU㫐J5REz; 0#fm WL*.9kh WxL6SԛGw\TvE_J#NDvldvѿ˝'E9u$dBy\Q{U|Eկ;sE!Cq3`D&D|܈^B#Z5#n#ql!ѽbvs,,8 [l8i\o-Q.7N?R+Ny ɛW4{X[W͡|-"+?z[WYP9 U'hkK C7+j1;Jh!6lP .]E4eSd]SArMK]NXT Df֝fz|%1ҵY.w; `)oE7n#glj`T):o5TK77Gk]š뼑,^iKf;2V[#%vʂ N K7`K:%)Kk):X=%Q(@E4SyRD^+c%Z$5dzvxڛL8Z9VQ/X#tjDz)"$FuJ?6uضw\P+uT>"FW[z5;A9 [t0%1]V~od|9}(~YEa0G<>뮝{˪QZCk+#f8]i<uZawXv( ?uaW; GyyPM)EX:Vsv(TB(w''/ǂ !^STI_K)Zv4'ju D) c,;ۦa|UEmZ=*v3L09=`!^1u ڇ slx&/N&9TiͦN;3uZ|U}Af J|vW~!TS}#h6&_|7Ms:fEwG6ݓݞJrxKC)Iѕ qFbɑ' au89UW>8pqD3θWU+$vMԏʼnnm[4ZdJd'Xqa>X5O#fReSQl3t񋮧<()Zp(oڎW5֛>sk; dl=qtiF';6wļJRtȳ>}\Kj ӻ uwTcMwQUY73I/ֆvu]E>uupu ۊ("((z"-ғy7IL&dܛ{-ysβtH--dۉ̙s#wqBN͜eHeܺo<9<joR(IMtaQ# [~KEeԵp7vd u4VgyRJ1`[J_hI9-]nzï!,床 䕯SRVh4c -3k? 9r%Ptɋ4"Tiiz~ /~ZWI/V6j+㎣^V3Ѯn:_LxGnlƏG|Q8'1xjߋ-">\r;sX/׍;qzibbzxPCeth'ʐz?B"s#Y3ؠ|6(T9Eisi1UO[ѬRՕ[[S_kGzEUVΡU vt :<={|<wt'=OΞȣICBxWO/}jp*Agbc<ח^`jyt]{5A^"rGVw9Pu5z@0:QV6>xYѷi޴S-ӷ>Z\u)NR}k>L!qO+ݍms!TR~&W-,Ve;D{%ϼ1 `_]휢L+y":wv}GZ1^~>6C0mAes*#ْ;e@s58Lb3CCf:7AGW .{'{]nwS|"u=ܫE2 =b5txYZQdPZ¥UAVNGJ@[JIDŽy|Mᬍ:atw~ /:]ޣMOP8xmboOJZeZtZˋG1Ns?QMdNLnc&R I=S)o\5%*޴ &y2c9L~GQaaΔ,ls?~rtgj]}SMU%!C `  mh6cM$lhR:hKh6]8[ 3Od򴐢ⓩFS(規dznִ :d &_uM&^HBS:C|䭧oyCczÝTWCfFyi ;Y e,C}r4lG,5{ewUz=3Jw橋b2SuSꍣeYefAoll Syy96E(V \f**; #6(!U`^/Y*\y&$.hނht&`ʖiC?jw06z<|5j*jؗ.; ڶk19OvC)6]gږq7G)H{O|&hZ,ޘhĸ-xoA-4l DEyŕ[=7BAT]G H45W7H{mUԗR+D" 纴7+%H$z$qD@"RYY/D@" h+E&R.>*-mo4xWYۡSWm>q @3A@ _Tc?/A!K"~(WF}S'&s"Vj{v{ \peD! p{gb@D IX7GwEELw9)o(;K#oHR77&41 ReޛJva+J7Ip5gm!/M@7,D߀,_"Ќ\ Wa zv4*߿;E,E.E9>iZ.DΑU,G" h+.TlEI%\_uq$6/S:mQz##Kp6lGGѢ*I¾]QR\ I'vn߾WZyy#{aڰc>VDG KF7m0x*fhvVQN6*7c;Mgyթ8r)WWf ؑA@`JAg" /h~DE\ب̬JBnö́rlNLeP|\4 =k]rFc#f' zԴ1y tܺ39;ꝿNsv uƫ+5lϙ>u8%2D Ẑw>bm--=ES(?VS 5f\eђ֫5b9&j? ϾcvSWJa];>=(ccQi[J߀p-<2)!o(gba^sbl(Mb vHYv @Ue-e4ワA$цΉ` ʂಸ\h/YBVm0:eA-\GEפ1xigB{kޞw/H+/ 7ihf3Eg}->vT-v:>#h<Zfׅ;\GB% b,zeI\] .^M W7c:eh>ZwPԆ#GI'&QOBsdAf=;Z#9{g?Cm.mN>w(AGV|QgrgE׋ ODaD43uf"#Hˆ@woit8J;%ք(pL2_ho{ MX 0r}f繋~5V#жJ~)G1kA3uA"4iܪٙoOYJ&ڊH LK I**-Hy1uQ/71idS1;UtUt2:le-jS?Tv$E=S5=`͗`zz$GQ d2L4AlMP ',1[֥\\F0ڔoͷƴlkG{Ғsf2PD*r~3_18E?1JuT:i}w?%C;|䔁tӉR"oYY,`A=ïI[%ѐ=˨cqN,f|h]M 842L0A7:q@@ Zf˥ae42nN;)|k`O=g.>KcU7_5~gO ?t;Dw C#@1>[afi7NY4{;aޖ:'ZzB w4OWYy<Ո*JѨWJ]Ql\A#4= :le"[ubA ڱS"5QRnܗGw4ƪ.Cd!|;Q)OZmǤ$mh,|k6?~C)xZiGxdut:҅L7X^ P<EH'1q0?<ւMdգHuO~"[lopЊ.6 o@qdž-'3` q<{0k ڣJ竆-ӄ-c9{(,)CO<1v m-i6 GPߜ_k]5P!jbdǁ7 (b̵ ~ vb ƯW`-bL{d3y\:&&87$ГHA `DXε#0NdI~/xǴ dg\wpLt!|c'H^}N3JW f (W"6/s,W,x݋o5_N#D h{T=on ӹ74[sE6 Mwl j.bS{Xk5_ֺ/ ntCsy_,t-VxoCPi+`~q'\`h ZE4<G$. p#( @~lf-ObW ;8<%5$oF"ИX hy#pCbf#ӅN&>BOO96"O !ȂLʿ ,[%-%?9`"I^[~!.~YXh%/2@IDATG N\smJB曕%F CΠg$蝘@O{r銮Pܳ+ͽB^Cq\ݭ3}vŤVjp'GnFMoOw.B+VFft8e;W_Jc꼮BaԮ8A" @-\[ࣾ09oPcxvˁ5h=+͂j,%dzC`n=V@J}%ʟY}k6x-.Iq]BcEե;xRD"Т5P+hvD?3`?:Bv) tڅi(Yy5-&u8XX8:0w2ĂYŮ1~5'nV_tL^8ϭlx9kįhwe.̓2H$9໛i+z99<9Ly SBUTˁ[{@OT~b??p[ZwV䙛2`v}>Ӭ3sƑik=[VWEgYMZJkHg~e6&eO"8_`uסb oK-k0Y<6 P4ǀ7 ?8)VEQpۘyvLO!k՘^Σ4bb~h\ ]D" 5:.سnY{B|.i{a!ψܳYWޚu7hoB_NA6WO&]95NPKuLyz+aס=Hѫ$Seobid`ZG&ŏUvs7{%:~YU¾ k{N]lC [آhHǶmnZb%P! p=^UM}~DCXDM} ?սhHdyYQa>WNzVFh֤rK/4͆- #FkFarlNg9@.PjRT|e\DhX,>7>MyK @ٮ?hh_:45EƖ+껊KY-sȻ?-mf.?N9QXh[]ݼ QupmK^Bq#d(h @"b&U2UBԴ/݄U,Px?zíwZ.&Ťi~.DЌ)C@;9s@^^3ecP⠂Ț.4%W  z\nyNJ\{[p/`66vTmne7xU{XwoM 'i?Yk;W3P櫓mO\צP-Teofa.6P0x fZ(B}:&rui#H֢Y6%Q]ܳ0iIjJNeKF ` ",{h^+εBG۬=A 'ɍi@jA#!C W1p:W'FKTD" H"$ݥkewS! 2J6,bOS&ՄYKaQԹmA't tYC0h`u-^HyW=:ЀΉO}S-IR\{S JiD034c;(Տͅ,,!xc[96R(//.N{v>&sldͷkz&Phd:t t%4 >-(+ˏgn)_ ў).5Gj`OqD(/),-WJ ,c`2nhp7*#X fN (TA{(:o8 9s2yZ{GHV 4x}^NfnG0qdvBWMSNc> kMSرmmDfY?0zټyܽ7񣆫Q6'ƒnܿYbz[ *JvqXca QWyѲY^ln1ߘh`3?g ?Ms: W,A3hP ڰO1MWI]cd7՚f$5$y&eP|}S;7oԲb^<>lbgҕ]\h-1KJafQi]U8ђ|ŽrB}G'[fK×B18Xߩ训[n۰f!̭rcqƃd\`~F5 yJ !~TiQa6omHO܆Ѧ%R>l1Dk>e 3ү 2< GI8q8w"8>!iNX0BȰy\5Eq$Q]?$O12Cɾ1cY#P>k9Ps`'&e\M„5!T| 9i,94_-0 + / ֪F<%g_4m'VMþZ&!-_q>L/gmɦ`AJ7 X{kZE5 pafefX3nQE=A y&fh[>蒷Z(!v1SF/nSQ6:1fAB᠅3ףnɂX`+pǪD0wT7"-n <^ oBG}O<_P7Uܖ,C+A Bj䌿 ƃ'KC `2 X67u—s}ā7ލ |惟'CWUS=k(,ߨSҟ/u!Rй0=x=1aΚi& [o+okn4  ^~.H:zG|#{OX'c̗=PQRN fa YpS8VcB0OgEqOG`YxCkb?V !.O<=H+x(ΘUCK&m"CDy.ONqs's7Qec̤Q?fe]Wf[瀿xYQj(e+erXk@Y" h9-*uDU U~@W(ǾسbΡ1Le-|9DFJtb'eH*$@EUi5LIlgޕD^QqDPgdxrە&2a~I~)G3/4m~-N5`ԞS]/gM,HB4:=R%IXvDzqvǣMrk T xi3|:E1=|BA)*8l1As1-Zб t<(>KE}9+cF^'~D-P_T3=eO_x%*"KD0 B3EVYs謭nҘ?=y.ҝ? I sǤg^XN>1+WI6{Jp̗Ҳohϰ奕9B{nR\Es|_jNS?xD#ϤhE_ء*T4Rfq^\-G>¡1mps֔ďqkVIR?tJTӕqxʘ<rˡ]S= r7deL71SӜ|'"S14?tuݺ ltוT|zx'6;80:u1_Qx2RH'/=Y#bH qpI-pBhܴiɈ{jWxBRo\%[{ h0,}X=2n J?>h8/Њ:C.ʂF<ZzJiEZvAK FL]Ђ9_tIˏM\W_LoqT[7o$x@ΞPG/kʤ(Nh=ujdpwgN"AWmE_q)McN`ht ᩷ìb0DWmqyg9T,NX%;`_ *@[pB[SyQ^]#KV܄ʹC ]stXtd +*d:EUydiTys]'@gF˜WGnkY,`lAqidiw{<k<+0D(!^m~?~.O^KN|8gCغ2oϊ!>D|~|YW. KK/MxxLX½rX✯P , 0BzҭiiC h uD{'3`=zqVY+SKXf{UOWQBG =evtԍ6f;pU'ƞBMe7|?w$1 LwT|xzp0.wa^Ek@< ;KӅe?!<(o]_4e<;ajRU/\~Q=k|ѹ̛[}@k!G1@|O u!|trlJI>zr*ASwڟ* w  ~hA=N2 |/g8WnVd//@A9L{Տ;|,S_MoCktz8stV1p:F}IXkKw >Epz}@_]sNAl|W`!3Nq;в9yEQ=c"^Of2R7Qp0,bAMыqyJd>:?RwXA=Kk`1Obv; @qs~&M:0zԛMZѪpMg,?4(K/iyEo:}ȥ$:IɛlU~  ӽa>ǎǀ|w>7kIm6o??<6A|@>A=+}KtW9<abAڤ tΗ!މmG \dt^\X>ceX{bG&r'ҬYS&=ǿ^sZ,: <-qQkX,_lp#h=^pTJֺd'vv ʺYTaBnQfk'O5{|W@ܿ_.Sr%]W+Sz WT'53ú4nK5ACaV>pb+ .w>Dcoj/jI#꼢QA8vs}UWL\+0Iǽwgq3|wXjD4SY|&;+c҃TS 6z3XQ/<WϛީSK?,Lz0Vډu_(՜ p ?xd?0}҅oCC/y5gE~SmEy{O%|q{A}ҠCnي C#!h04q_c5'͜27\{9e[3$<`iH/'nŽ/}z3~/=qQћ6 ~cOO*SbYǧi:& ~qU롵{l^ף\5;lie.F#9K9;c"oUGoe5%Dzᇋ}ur[yzVt7M)tpVOC(w0j~;xM?:}X+\F"˕p } 9&yez9ɰƅeV: >ʏYMr[T[2>g zɠrduM;N8s!9g5ݤY_Ew G'a0>^3fr_L1|Fkn[qDc8냕,W|/sQYV~W1BE#9빿YL)sk1X<ՙ CzNr;k4/0|*KF:/f*>`߿+HЬ핏^xav-՝_jХNIOspNC0׊!?s鶴Y~YpHq~gHbSg#F(t>qv S-v ܾ~/r?RfW=YS&?<ьe~h\^[XBq^`9I"H.J1ד@}(=Y ~ǘG3mU]O|=! ?O.:ӗ82W;<%՜~'*TXpэ.1-k^v<NWŐAJ(#^f>;ti8/jXs{e:fJ> *f޾>ΰJze^7Wp4֞:P_B^hWaH&c+s(&eTz;M ~6ck@{!<9T+3>w9vC;t.0ĬQMTsEgt^)5?Ø0}gzvCY/p(ʼ$4ZRhg4և5JPvL`7c8bωª9Ӓ-dNg=m[x~l= ژ)t&F6hcb/\b/,`b|XU&Næwa xO8tEҧ :_9& iǰ(L\6"ma6/"δcNqۆn/}+P:xf~GNx@jMqºbM1vDw D%}u G`hׂ,!Xmg-Uuӵ_)xgFgMտ=}1ѽ}Zә~otlH)yzW.=y)֒Q,nbPvֹ޸N.NO.Zxx ﰫ݂?}s5] Jwf S1uzi @CH}$~}Z/>f,0!P:xgcHu.#kʤj҉&D0i|"~cKEq՟݄ء.@`#9\ϒnU_z=6`?7C8 ˶i.)HtĚǦO?k?Uu!bh~h; !Bkhʥ lF' ϱܗT<9Zιsx!+9idz1w|yb`]i0"Ӝ+#uw|n-p} X|T>Dg9+݌6ų ?b V-)lݚ agݡ R@ N=J:Ii @{q?0y v({NDw c z̗Z[wG;,d^c}(/: Plo;VCe0(\\@ `.OnSOSFa2lzvן1?T'DDޮ âX.BWFF/SQgMzEY, bYRca{;;F v [)ڍ|mpñ)17 %&v]WbճDƹƖ~ynLLfU3wla_@O'dTAz՟ ;,77"^猄J_ m5 Ő?poao ɚ~nW,^ Y4:35K6y_`cKL2ZX_]?ynK=h '@{\ӻbPUF~6ÞRWl52qlyp>"dlRͶعa/#:fs:ci>]׉|Ʀg^.hGBa2:>"yv#$ɖ3>=0Z1)`={)xT&+}Ϙp5)+>>1y'/M̱OF]ԥK'QIxRyv2_e=h!ѝ֖Ks Co]PG} 5U?Rס47(<^Xyn?Mo$:!p$OTt1?rG'}e6E?xף Mu}z7ȥ^/ڽZ|1D(Ef c<>:Ҽqn{@Qi=e:Zjq0:~)9e&Udif w:_ip'ys{:Tv0v ;og婟5z It+ߩHJ7`z sϱbЖО 6z֏)=B7vu|3LcyNI鸆ffc3_tf8G@Q>m} ..mP `r ufz>˗#оxk< Y3򯦕7'y#pK27;W A%CB[;!&&q};RPCiwC{݇us: mb4 )&PI3:9gUMO@_cKV1PH 4aD~Gդ{rXlź,Iw9\߫CU,qڹ@[W ٍdaVK5I Lw@gGV6׌/5/ZCfک2ʘ@=Ϳi0Sl:&3sT[3wri {t\.6wz=T;cB~VZ{i3|63|'ϢJ "?1h-{ 依)yF{#UA\aL=r#͝M/R#2C|\Gu:Af\k.@nO <+V;& (r=&Z.O6pĘ.q9ce{ÆϾ0q {'P< ȅ9]`niio== _{0Ƭ(_Sty9aKI9Y.R#h\Pp>9S|1q~&<tMYSƯA1TZ0A玫\S`:Bąũ^T,HCYƿ_?Đ6#` \7Ξqθ'umyԌǿmۍ<4 Gc-pJ5[糅x3a=e+f\zj$ ")A_t̘2q9TU>A!KBjJ?uJ55wx\A)v?_ DmD7cʄ D[-X1$94bJM1FxeRw !ċii'!N c3qgCDg̜287ǹe-񬈍v skK iˊ%P)1\N~Ga.y8l]eN^,x'Q W;k̨H<{և?LK/H$-1!a ^t#5𕯕؇.KxOZe~+_e{65pJX5D@" 7޽މdD@" H$D@" XOA0;kOA9 ם߰yunx6toRr9 ϥS3΀:)S_w@k-}52D@" /Mzgg׻z~6h7pkZvsE.;c"-\*\Qηs)ɍ^n iOa3L҇r]x߬w! o㸃eTPM$OOB7Q~vUf/4/,~<<ւۊ&|]]>kOutMs$ U ot6 ]2qT Qi$gXv!ķ 1=cnQV ]J g]BLy6MhQ[< eä=Y]Oom#NY,|;2f8gKn/4>_GIxA G JW{f},؋[y!B=jm˺?/1rw/⫊5gN5<1'ڣʉhۖ,Ð71YTjc`k]30K4!8lǽ𧒉/Ahi5{˷|5g{3mRQcr !D@<`iIH>^^ʃ?MjχZQHƧK&"x.c9w;%~S8-5mv|5sy/;3r!s5,Efg8Y :^IsO^bBkll[5zFGDa=2<"(UQQ@%fbQ[kj:xƗuXG (os*s#D.؏% ퟋW[l3%ƞ|;lfS+Z7DdW+!=69Nv='ʆ'ePu ŧ| d4)">呇"G!!V`?e<_7i/,ej9E=C>Ń0,-H.ZLP1x{Ҳ …vmڞR*{7ᵦ% j-ȹyjiЧca%4e["?V1,X x/Xг|޼ݪZkղ\^,3_̔Go:UUٶBɻ*͵ Q6ڭYZm\k <^Jb"@@ `D0T^,5,|Zs}e=gnnzbly..LHM[nMLos8x{+P-7Ee'SnΊէK\cЇ#XEd{Hu7m+ŸⱯY=_!뼓}- oi\wJo/ǽ6ZZt{*x/>GRq M6EMH&"f_4{9T4!Oۺg1[maZQQ_ѽ{UG6wMۡO驊QǻcC'_R\ܻ9vT""CVq̳KՃf|q#D"@:J -Cܦ^>qgS|Иn{x x87vb"@@H> ^{6sgߍi6gg,X.u'uC%5پm0pq/~ρ:iy(p:10?p)j.@HAٜ?7~@,zmg ɞvwL4F>}8Kfpi/tvJ0q/\3Xwo1:`N(n;^ӵ*vUICID.'vVOaB\м-D - AS*f{Mk1D)iCm99a&ngd6[{Mܭ!+ :=VyC~QLY#q 78B&9OT3A>fcIrs xVOW5: Uv ŧ{M]7_4n>IHtx6ѳ|_h<#w0MR]sTco+ UO48X֡j>vcA*#]ҿ`j|$vT\$WA  b{ea#|^5ӸOY] -VhK^T]uz ͯef]o|/lJr hȋQʣ  D&ԪvjR7 Hc_8Jp5MPݕn^rn^|\2{u"@@P EP D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"oM_ @IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowew2.svg0000644000175000017500000010223600000000000027573 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:02:35 +0000Canvas 1Layer 1Compute Node 1Linux Bridge - Provider NetworksNetwork Traffic Flow - East/West Scenario 2Provider network 1VLAN 101, 203.0.113.0/24Provider network 2VLAN 102, 192.0.2.0/24Physical Network InfrastructureSwitchRouterInstance 1Linux Bridgebrqveth(1)(3)(2)(4)VLAN 101VLAN 102Instance 2Linux Bridgebrqveth(16)(14)(15)(6)(11)(7)(10)(8)(9)(5)(12)(13)VLAN 101VLAN 102Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowns1.graffle0000644000175000017500000001047700000000000030413 0ustar00coreycorey00000000000000]mSȲ |y&8$wST0:#!VlzTFFݚyOAGD_^|֙JZнK/v~/Zm/]t>NVkWZa* ݸ=𣴪$̾.L T,VRj6JK QZ5F43iRBνߤ@NJiܟo[ a|Y=_~txn@,E|.p:qr|=28*@wJ$M0**W(T|G-/WJgHuTQ"S55V5W DK:݈r> pVWWqnջu&R뮠rrE*m(s@TW'IzO`X^hvS f= FXz(9Aϟ 6zrx]56M=3B) q%Jsa#J247@i%U7~V> nѹѹB.љfq]|v|cM10& cٗP}ՙcG6_g"bhv$A'܆,~^1PThB0EkCl^ x-|SsCb[0 qBzneLK8cjn~FZ;fK%6\jQ1lR#6(lϩdRS6rjȧ:#3uF3/NlSe/uS&LPt7S捔OUc8t'`\$ ֽ,-]AԔ39gƥ9w5#`1/(wd6@#Z!)vl<)ZRL5@kvzl=뵯Jg^v !TY{4V4h^Y,,UmrJ b{+dGBji$r5"oj9#{]B)hZcywDZXwD/_5Blg߀אMղZl<{=MY:MZL*fbM$ӈXu߹OkݠSVɼp#_e"FGFtcw u[w#F%OrL`,Tn8XPmtX ydZdtrdb.:iq?H'jL0V2E|>la HLYFvǣkXkMܮҳUzwP眪_B[`g/c=6&0Ol] &Xz?x3\S& ՊG!8Mrtd KcWXgҬߺW P"\),ڒ!,HH/wgMkYb;MʲgIoAc6v}k/d0u2꫸A˹FQɄfS40V|f] gnew.,(Ul>|9Xx xl8[ZRW_ 0.d Ħ Q er1ؠyRkkծ4eg0thc ΆeA˂*F6;{H靝YbĶ&ڀgt2WتdupSzqq֮uNaI;+[Ńַ[𩄼;qنo{ v+"Y`'^ `AlIkZs𡛃šKEHrjY/{9զ@ȇjVwY*("\QLS kɒ|ĥVZUOMu -@4+J.Jpm.P 8wyrMc4v/I8ly'#R1YKq,) ʛk[@y(˛7-oZ\|S/jLS0Ys3I5NCTM IJ$FX)i axtٸGeR߆N՘pP+F("L⦹ٿU Mb s[DJvfjo,5ŃOdĒjSUAm; hU eDO90vbk@Zpk@N1ŴWd k[~_u4!Q')m9 ֌`FZC3Цw0?MFk g}2Lډ/zw}`0.JdSl8JS"Lq$#S6aĻWX]à5Mw}pR@tѡ0r=LAO_Wybg܆CukV"C[Fm\R3kج|Mw5C >krIkaȮl : M:%&Hڡvb>Vkm.xo ]$2^NyliCLi9E ik%=&+iq"TX3n{3gBfa Pdai8΂cj߇=jŃiK^,J4>R5aeb0p2r buY}ϯcp 8hC%ެ߀0&ajtg[= m_arK`⤱8G|2_MBٸͥ#?/>0c/neBSN |r,cmkNȱwR?C-.E+ 0yw'ŕQ s}aj]uCsyI+ ~f| k6`LQp~mULs$C- K8#Uìon҉ZcLfAlҝ;c0^~p " GQ~@M:M{6aY@:'_?֊q'5kēUJm"K_r\ZYE:n̯?˝f7&Z*?HR~9Qx4]MY\sH΅D!pîyBRU&xr!#cr(ǽUT_ADA)6ȑuzx9/KVȜuhauG #}%%/q6z cEv; ce1`4_(6FiO&Q}g$YOm폞_6]CGRBhEfk<+HRIJ.W0SJa5xD"||9{ֵ̓BoԻ:CUjG~^ؔrEC*+_jri#Ƀ9p4YcEM~~jR|BЦ!da././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowns1.png0000644000175000017500000022023100000000000027560 0ustar00coreycorey00000000000000PNG  IHDRDαsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]|T֟JM@T]T`>{W"" $]IBgC<ۧ{"6QDAP.-={nrf ə6ީggf̙&F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`FhTF[lT#Vނ.c'mUTi`IOz&u*zs aZ{SLf  xtn?tu dg65#̦2W a&O{k}I]RgfëkO^t| nMhsЊ}뮻ΈV NDO}atliVj?V/Mۋ[<;|{S.57nx/w^}!ş@g㋷ɕՅw zE,BhRWSOMxڔAJt!mxp#h`a#/]eR/5<Ó>~9.o56@?䓉 Gac4.ϦdL~VߢV'i PK D|bԯc_qQdF0TtaFgORʈGgfRu͙3jC @VG;;OYP&B8aTGOMxr0*O)4?K0^\*SU 9Up FKuyG$Xh R1m̉ ;X~Tc95LQPbpRj,{%vJI'dd,lǚ+>cvz<բ@0Ovfp3Le<ҧ32F{;>$>Lm~+R}IgVڅ|cdش--dL+yKW]S-?XsFtiTc~yY;(ھ'q; 󠏙yh(Ñ!ۿLL*t= ie6ïlr#n`O|́4c vDfW"t8jNsz8FywErfN`# į-u ɥX(!yz؟8|C<9m往܂ r Qo(s4/L%?6GqR:6#KE9yg'mkZ31Sk 3f]/ -?i@|?ah:uѹ6P~Q6e eL8I(hӔ̗.?gylSݓ{d4%9U nXNgG G6- #F!re/Յh&AkTxw-^߫:(> lQm׮,@ Iuֶ'U)^Bc&gFBR<{!$4ޛ#Cݓ2Ls4 BӬT%m(>_33{wOO*G5G(~o[e7{ *zZ4JK6Jhv8`v5/ғ4g1Ưh~[amMinXhZb폦 RsQG߁k!xu3KS.YK@ʼݎv#j⽅mQ Ix?F&AC.uBZAC7ɚ&;ZcA9n>O@~զ|RF {Xٳ~+yކw{m7Z<<"tH0j[6pb<ؓ}4`ܡNMe Ҷd!](O{P_C < uoL—]jT XM8.=%P 'mP&FHh꙼#<33u؆xuc*FE0]|~⡇ dg y;][=o?6P8 ƥGHekES>7ՓvK#F@w8PQxIBQoM 7G ;:n.JUnJsR{*{z1:&{퐍VP?Q|e\gk7|ӆYi&τoM;vmt8̺ضGZ/һ![m;iNIˏm*O).@S_N-Wtvn%Am{Y.>ggGU%cx2K}V/#:hأh˜]˜ظ<ȄZ6KM1F&xx桲)yǬw}*{:n>3Ί\6D .(-D1vh#kB 9@(iIl!UO(/ Hju] V] !|^Ӛ4geUod!]Ce:e OgEc˕p0 5e$SF펠 -uo/4hzOmQnq WSy{|bc8 vJ5%hAGŘ[DJ?;:o i9z}{uoW""TZѨ _bENR/$^-S.jެE|ŗGaQ|._6ćwCOCw,sg_ ic{-bٚJbZUQcΡ8PWғLe4tb{I^ڽ#Pfd_~]>6^a͚Xgqa6E7mgmT8QFA =)AN.4H~W!h~yo S#4/wH=XPMiU y:4t-rAgDZpOx}]GTR@a 4 F4xor8Ѐnv`~:.դ%:A-Џ&|).(by.VzAp#7, ; ¥G**֊r\<6& aTneŸ ^D¡|C-VPOb+G*h,J;ͷ~4HŨx&Dؐ 9l#P_xr X9 F47mLM{K,*yS$<Rp6s-"UQxB}"H*ť%u5L͟1mU P$\ۿl՚z/š;lwKhx`vhRg2۝**mT JLM1FZ|Υw{_@HQVӯtq?0bA5` 4#z൫$e%KLaPQP԰IRnpFG #,2zJ*h>'kwN6"[FjB`&e왼n:M - 7 h` _U#pӬQxM,I= |Trs]o#1:F{ggӚ WS~mU=)VX՟^ vKr%?\v"G9?7CĚ evM,u3u(.=|3Poٕ0Fy5|x|80*S*sQ]ٙc~[0">NG9h VuIJ; z^2U'I(N@t쬴㜮iN6в`B7PXL4z~!MaŅ*Cs-iej>W1ې~FU @@q6xo@nK;x"KIPEcK~SlZF!uͰ5%:UWVfT2i ) |IaݨICxI ̷ڊG_^\Ea\8LEzQAE&;ܲaY'XY椝䔌XV&GiFC4,돗XIZ4GAo7~_Ю(s[w^5Z@_C&Lh(O#޳Q&Ro8k"_,]5׍_"{RCJ8/MX=t8ihhs K[5(@n`g&hDO: A@ #yJJ¥y`4zI BLOHy-J CkVJgڢssO(_(>,6ѴѬ-=]I_X$΢%a3ܲWmާfY-N e̡vxe܈zY t1ic.%cnsI0Z'(öwHjX;+{`#LMD,qH˨`&wǎ97m1{ }VH~ЛvB0v2򌞨Љ.@C0= a TyHWKΑ8i#b3I٢/8 QvB+׊ӕC-Z\>E(_~VV?[ S\6[`wg#6iZn^3D*]_9庵q8évR&PJ!/ k lRr`7u>F|/|MZ~W-,ղppFŘj5/|ܚi>Y9wcоVhvŞ(oDͩьQ .d[H8?2js= ghT , 4NFA턂:0 sat12Ɵ} duoU`D\4: CZ$qTu@y"*752 i~gW/,>H?U_Fu>:~~F#.Iɔ20 䨻ح;_ϠȈOZf!)Uqw&}p/\/K<ج-\; ʞr|3x(ND*JhzS8K-#^ |xf:NֲAķmwƽgCWP? ;?mr~bG.ڛ;Q޲gvQE9FecSНB٥YmMJ#E@w.RBeJkh;= '0E[2м׿遊 玳z=g`1II7wU}d< F.ʱyS-Fmae,8tfiaW0\"aˆoH!Ñ3чOcygX8\S<7;1z9@4#0!hTчS4EL@Duw\)2)bƎ ^8qt/Vp#D3f0m"%;(4Bͼ 6c.5VvłۆE\Y1f#,cQL;U_x%.nQX.pچ2.1CZpm3#0#0#0#0#0#0#0#0#0#0#0#0#0@ ԃ%>+}eL8Ic8'$426ē}8hH8ڇik3n&nnzuf7հ Jp=;֜΢PTH/)|(G_kjz]Fua4'Nam \9&Lth>ϳ͊h WMy >:=3qQd4)ʘ.89,꣜W@ rH d{B8kzfڷ=MQd78Kaw[4u_&E26ѐ/x:[id]q6h\%HvxIugޝg@t58"{MGAw M3"H2| 4^;1lFfBwN k 0KWr^.oCyݴqc4mh'uAv8ŷʐ]Ma HI=_5\wg>(#k&\߅WC 8sp΄YOvo3!@.G\;#춡6 dW)N%]nGꩄUk1zcD+%ٛqmT?)]ǹEaS^UuTΫJ"Y肍롅B+\qPPMnr-T甽#ݗk|ٴsvFl (Lz_N5σ+JQw-sOi4d]Pfcx1d+wwýWF<шE tB>;+}8٥xf&)cg&yU?3q_U7;9ݷُ!o׼]\;'hNhv{\C/5Js+AfZ3Yj5}Gp sc?;}aJ¼fu"r FG){H${gd_D1f\R#qyRhJ'mG͠gK@~E޼b3-kFJOp W'UPWR-5؝D{:~,HO),Q]ޝP\~KRsbLmŲ+EO"I\*cFV|>Rofߊxn?Bd'iqlUCS/r|Wj8;DlFZ 4ddT]i> (CH!̠=# ]X _F܆_1E?%`w'[_2nPȄRa {D:hJ'Wޝ )PKH61g(HaO? a}!*sp{ }jH}}]&BwїX:GBe*9LU vbsoL!wSJI%E"*4 i8\LPm+#C04.;}6 "u>S |gO4VBeg,1 s(Y1!2} &yQ6? Vp_<#nNig?0бĚȮ+}pގxn{ i~Ա&ZGc*ѶLUYp͟6JȾLhM|r[)3]h7N,^N>t22h3P.yޯ/x7v v>םY^ [>*_6x7~P(Ou?v{8;P~6p^ :F7ci,,ت<;?쬌+xF7!_Ji"9ݍ`[;#3\1q2M-_/: aߥXPxbTxԡa3I+EQlW1:U?]Ewk:  0E޼EUW7*f!/1MslD;/po}-Ô4RI rgϾ<~x!h ξ32bs'[8x,wyL^٢/X`˩L 6弦yyzP% ?>ֶKd3']k`G#1<-!Uǡ7r5"&͚GP>;߰X"aR|YE({As?mǧ6v8K;~'^Wk4RIEC|x PQ@j,}92^PV,9h7a'qtza4ϤƦrnGT< sۭFTNn!ICrU~gxҗChݍ{!`J?m])W{ _j /$ '\A::hc~mwM";I>&bh2 |E[2ZsJ,Ȓ:GqvU*Uݰm6q*^rb} k?DgWCa?R M~~TT=exu`'-ie?͟6a;Bm·ciXUhњ O`byTū@ jmh  kiSYG$7c1?}\}iNǦ*h!U߲EӸΝNf7TWmHӛ}}zm0)[Vni\VF+u"ܰ[cI'{E:_$TƃbtӷTbjm $x="׽IHmKS>]$j#GEd]Xol0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0 zxHƄi\dRxg)6*9#0Q.ϕJnc̱?5u&oνB]Pr;$WF`"dV{& LgW<;jOmՉNqgRO{MK>>v~F`D`Ȅ -]F?hVVg#8%#k2RM &Ds|#0@]#u)Q2OG2MG$#+))e2Gd#0#P_ '{,8'?w!RGlLjgLj3ү*u6#0@l#+SoB] vqZ$`Wg6,|#*0# X2 ov `Zl-œ,OjATq#0@!@ >uL[C\E##0@T"@2d6-鐍}Q0#0Qk$,&a `:٠}aF`Fdɼâ5lԻ W|FX|#01% @jpu8L6#0@GTF&IeK+a3# {$¾T(*XdF`Q &$RЩ5ggMhxF`PF]8MǪ}+#а[ݰ2xlÙ7c@ KJz8>nٙ/rixH9Ls搗=o%#!瀌@ "#d\'Mj* 3S4钧))3i>WSroPmgm3K]4ԌQ?\qs#P$<t.ܜCwZ?|2E7< ^gL80:B}EkD3D!,)LR1 ,MReVA#1liPON7Ox K5%H}Pf V!4;C]IqAgҽOAzga߼ ~p#fxƮǻeR a^VY?g-e'/m`w Sȱ4zYsܙANNKsg'Sjd6)oVfڢ`~m$ēB| zVĠ9iɁ=JZ Lg;.'+;'} R,uH==3[_"E10Icz>_!2CA?7U ],N_B} h|<0ccJ&,t/ %S!CCzg;(GϩiuIk z3'J+?rWޝ )nE%ypsɏx 4g_5$4uSft!z30HM˔GoF pq魄FF'C +64oJ7ggo}{<_E_4ͧ!8NP)5RÜ]yYug-Vb)Őg}xǾJ!hߙYGm CQ_̴j0xgvh/ޱqgge\YK,,F^@YiW: RSC] hE߸T1r'3|탼[t%zg|yixu-gda*R¼,dtt* Wi{Z׎F>)/}aY?F Fp2>R;Mpů%5m2 ap[nBxH`0f,ʵNjۇy(<1T!8@n]M.g4]} U=b|`mj}(M]E6Ca|F+|k|T⣯_~gF^R9s8jHeCy1) ¥R =}%kaT6aԘ !@ATޕ-ܪ y3L&7|5|{G x5LwT*S.Q6Z5b :UEWUt/)=01 c[M %nշ>ؘ0 dW97ݲJ}> hM1#|1,*-xJ!^BہKuOYU&/Jy [qoAr{UB5=vxQޢĥs%Qqq2_y0E'9F@ |j\@(ڧdL<>X=DGkX%{: #s|;⤕wCAG 8b7jRl z[%3pȟJaF/la"-ǻ32B Nlz[bvI!LᯁV纒H 9Npf[S/~*W]8R8WaLD~&8(gBA9| ԃeЂv31;+\[Yt$,:EHYc~;SަKC7o/,L酟"NW1 }򾲣S ձxCޢg?֥oʖy[I =*S ͯ@!#cҿHp,uS_lK0cط` wCd?!9 2%S-لH5sgyVک֓MÜgB=m B-ӚCfKZ9,5/J=wjë^7ϾRi9~|'z(?1I|-j4J s;B]@IDATLgW!@[1[L}!̃ۏ5f>-A 񪩊WaD=G~2!sC_Q@%cF۪9J+}#6~|k,\2[q>S8A'fe5 #"(EK*q\kN0eS]vn^P߲EӸGεIfrv^Z}m.*6n޲?D༞=w$@GD:#8 #4"%X g`F `kczF` A3Hz_J}^h%bzFp=5.ff=9F`* * p#0#P?9F`F,+#0@ ~pTF` 0#0 SaF`* 0#,gN`F@1$cB_4.@n N8i5\s+ܭ:=s,. s:#,YHT |K\)ϝ!\kB8@XČO[^L&9LJw@l:E59k;,_yRax=dztIӂӐ  gkN.L}F{'z7xEa a gso|ua |uܮEAW.:X٥]KrL.`)p 0yDb]IEޛ;_|cX8NW 2Xm+,Y|"@>9)j[Y*|œ']{K=vzL w\8C|DžubRxua pwG.uOck, 9k,%T)`8_ug8WINrŏ/3 aw atD?hajoݗR| ,} rDNI$\维o!n_ ,[-D-})A-2,K]ڷThfPܮJHH}ylFꋦhef8xZq nTTñ= GQ2^D0D 0عO<}-spGcCl9]GEox#6/,їH2F Dx/zbFh-Gd9 Ƕ]u).ZGT_Fg[Q4j)_y{GQVM[ W|tF\IHxj\ h ~Nl5:~Ϗz]`jRt84$ ǦJs:"wmI&N"~>R9!h FmqMvF[$h,.g;G ƥ|/'Dos> vj0KbAiŚMۃCT:dG5_Rk"A85!l5S+2{ v9;, Oh;"F|2H6qbpKb㶿+o~}^9i6-D #ACXELys~>_\a[@7 K~2ԀBQ#-TBELF*s}h۪޺HEY(ekWB{%4aŬ{*<ƕY)7̃{:[iDyxEC;S>, 'MM|l@t Nnl M`B83t&+ű=. qGN8bo7~X.6-iJi.gu">%.]uc+3*}yn,vn+Kk(~]Gz*=$kT0/nGsRo1ԣ-"ϩҏSR@_uhxs"ky%W[_x$)^\7 ć.\-Y򧘷hx EIXe&:];;_BۼT_JyYuWb)CF+(g)0Z횥.zeFQP\"PWQa÷]$O]֕y/NE[}XA4 {$DLx9B,ulm5E^zӅ;q ux"Gtw\Oڹ{f'Z W7GjWyx﫟ŏ+B%=Dq!p,Ymjteg:^b[$,JXw$#_:,xDnA2/zvJ([90xkObonx%c(|)&bxO>Ǻ-lߣ ldnP &n4Kl9]C7MR(1RkVbñbmGہr7Za0Z0]2*þ2$irá-"nAhH6<:hVS;db miݢN:5z)#Z٢iiǺRNKyXNg#p ڈ.\+^xh I\uΉhڰu7 /RvAstNH"whGG"ҋEݩ#CKi@efw:{(7$ܰUtjR]8Ȯzr^hJ4Jԉ5.@#4֢x ~2u@4w.b&u-+(A0Z74WLj ,7殾25XOL%9b1*KQ_P[#`~ӏ,u9޵=[w_V\nel':7'އ+_hޖ:4R]zڱt?oM_4zۓV#,Fz}9Hu? . xxB!>J$ͷVG}k6,9 JhBu.[X#h|-y3XҧWWk%+}'=%'"מ#̩(=a\>\qF'!BmmC쑔+=Oǡb%TKDqv#ħf_--y%buDdh9'%= Toޜ#:k GvƂVWZAèC\~UWɍҝ%,vҀ('eRܙ_Q23 Cz*·>ۉ#o0BJp [i'A]f PLqn5+Ķu+g&ߏ:Ɂ&IgwL9AMwӻQ .u?=GxѩZ ~duӣn)uQW2M\&܂teWqD=#!#E`}Yn @)Ծ wD7\Yͺ [Rz33,OF%oe 6 `9r(`"@yTt# ` ʙb.Z5sp(Gp3cFh_m|m>}qG e_)DD+h~voh!v1dߍ^ە4Mk"Ǻ-b|/:6,8h$Z5O.ֱ4ͪ F]6l%V勽y{V#b]vGKTGUcχM7ɎhD"5%E0Vea}̟_L F+Q3 S{WKVo~Y)rK pjBsC)='BD0B7G qb@ޢߣD|\9THir ?NxOkOmB`UTT" Lp9E&Ie$ѦESqh!$e9Q>qݏ(ueRW TL.ɗqFID=Kqf3Y4x,%mƩBrf f2ґ5Jl ^kxޚ#Fm?шEk+>LG<>oJK7 ~sm}(69H''/~EEY샫!҆+Uzu^<w\?0 FVGU"1~-ꮀ+1Dab׾bμ+ԫ.?KtlTe+q͒3m)o°u? +G$Gn9fxBbE{vsJɭɝĂ.۩;YO#q 7lW@/δ-buaz[%P$;)jK'/=7;VI+\; [B{0yOy9X'OE svo.ѹ(՟d% r_ ((Kg&bR7L UblF٢R۶͛*!Cߢaohۢ8gWK;ڄDb De.jIl[mٵW۴Clܶ[ F:F&n4ɚy̶͡0>)qp}Il6XD5MGgchb34<{16u-1 8eⲳ}P4-*\CdaHL7;S/JjQGUQFߢE$ooL}!!L*2BS$|voŻx{OGv sh3V2PT7wv&u7?Ħ{,:(cAzC(:DV_,|ȿE[}+!M``#ި 勣w7_thE^$Yu?0aڒ%ߵkVm;t|4 3ވ-rw|K%v챧Y}T?Q5'0i#y5{0JҪbێ9( X..aF=ݱ{x_wBMSns1-6u& |*g3 tUޑpNNZ&ܼS '^D ܾeS26߹J4%\~^|q*V嵧9߃v'&ZmJ5p0'fCThGgX"#J{׻gZ\ Hh"ھm5ZuT=T]Vh=e R/<؀=O19MwDSDӦ?V6m4tU/|ɯm .b6:ɻ}!cmb_[>f-i5 D%%%mр3d^Tiݨ]ȤMF:GcN bJ@7kD]%Q{;\'(@MM3ܤE]۾ Ph ZӺ`wp慗$6jtP}ʝEhw4j{>VC+oy;tN7v>T wBƈw>4]ֺsoFQRjblsݯ Z5c `Z|ضsP[#F4hz{꾐+?o-Q{|fL"gM#:|1֓-!I70ѨI. mtT`0拨U\Hd CM;Cg$'q ]ud1Mhi(/tk)V+Vwz]KcYaR߮Wѽ.e=zZnL?mn3N=!tP8g6Ca._*5?dSwQ\>鳷=# ;Z'Å[9ЬoH:^L@~+tJ߱*$=ʮo: !ycpk l0z~Z`[[94(̧5ާ#_8 u2oZ951J;VOEz!\q$lGyUȜJe[Җ\ןq o?xcnC_EI p-tkv" T GtioOzᡣѿ 0 ;>}Otsg1'02Q V.?\lL2 }pzψ/:fJn}BH.J{AּGjǂ),Kց18+)8ϮR!ۢrZiAayԦ" _ɬƢ<DzRwleqpAH-0~9ε 3oj>R‹hӁi5jx>=:҈su[cؐ.â"2'9wxf?v>mų.UشQp$U7KHґ%wS5n,ޡ}Aq{RmuT܏N26/<7!U>L^:JO;!]#|S\L KGWWp9׹ъ!L?8_ڤCA=vyjbg2t;J@C.i}NbɧmޣeTTxkuP6:/XVfW"#qҋwm*SzuyYj}udJLvv 9J(KSH p0aF"lvtxOdJKUHz2,aWGe@%y~iӏ驉t5DKԮM*q9tZ_0ѭ(5eq@VZ%u߿utƀu{𳠍IZrW3{j8v=ptc.=>WG⋷d%t(˧kvQo8Gwx_97;%/2PՁ_ICny:z֎jJ-RLWg^-B*؇,KOk6nWpa0Dc ~R^]t\n}W.OgCϿ#%%'trސԡ zg՘zXwc ϐY$f`{U[h_TRr*HBC?2ui7on4'o1c,ְWQAA~b\\,Y dyNS+=x^t7Ȉ56 bF A(M׾[Az*>x;&mǘ q1>Sϧ_oǴv_4\,)gJ|k[S07rNHG5\oޟ^f}pf&\%UD6c.X @'vJN,&q^(8px m?@QDǘ؄hBAwwmw&njW-{Aw8"v.8DVNxىfO7&-qhsRQJ!!Kv;f[Lb,lϝh`ϛ!Ocm_¥C.mH hV Mx*?Qk;H]In* PFS/0]|V7&&_?'7r`wJɿ;xڿxZL|cڴ׷$X}ZغvDC>rэnvN҇O'"ph>}'p#hUaEtK@;ӱl6cF ^nWTcӻd= Raa߮#tcaj4^߂WX6_FR|K0sbT +O93g59"'fUUl&"hWLig^BSc[`dJgzv-dNNXeN ` ?@Cj'?b/xs>i\J#"Z [? ]Vo:gk.b!>/ք7>D/V Xr6Mg0o~#/g8<7qܣ DZ#`3iZ\WdK~N-N5gM>Gqtu|>O,h# n'NHwfCo &+Ŋ/C<.0u_ \QZ{w ! Xh蜖{Eok^F8n)ϖÑL|tC !.-{α?gˌhIV 0_Wv xႫ-uI8t H eHn:ZnI4 k(WeV]]А{31\x7.phXFI "Oq529_,2A랙NC!ȓ A UN :9Xah)ʋ|u.zW*WZ*qG[6B$h+]r+kzb,؉ ,'4ouy D.Z̞`lCZgİ# z7>΍|:$~*`|wٺW<(k - FĴ1{o[h=b*G]1J#! V5X?b!ɴŢLOړWqnafzYcTC/9,hp0gCwAvY2:+OpA`jX)))A۸@{/O˺߿=ێw[6V!W]@ 홏f9j0#jLhxkd; +Y٥&ibں٥!x@ϝ %/T꒑J:lc|2]UOp iesՅEiC8+] [X$'SrJ2N@JMl@rhNx]}YMw;_{jwRg@&LN۞=|x7+h x-Y_sG0:Xai)MfNLq-4ч\-8?5 20{&`\o0s? \ƭm=ƒ;&S)yb^u6[`H<\p>t IEɑNB(%% (PJ*0cO|jLzmɺߴSo9ca8e;83(iaK̞]C"/Nn `FWIjI|TD+qtN6+ `ZOdFnjO,X5gxϔ:c5Ƥ$j:tJJb4{;?r=7uYT^$;n4tBhZi'(6}蜀UF,EjʌqvoSN*R(--DR)U*0_xkZ'emYs· U%t, i݊o(; La4`{&YVis5 sbYoa@%ƩmʪK)QM &]Na?Ꚇè`2L/!j}ldU=1rA~?]fEeqx.'5`0 .3V^I :RZIG*T^Փֳ-h*NjkdȘ@;)-e!mR4݈7ߓNj҇Mf}_/ W|/_D— 2,hyuS( eƳ rg0 2>i A/˾%Mwu2 6ǚ/ S{*}3L/K 9mCEe; ̍ 0b˕ VQG4gKU\lqUCAxn9=uѩϧ.f +3{RL%*Q 3Sr2pc3Xr5+ʀV1E'ɘ NX#9=/P8b(RٽeZŅO\F&ЭM?X_XjS^iwG(g!tYsyYqP.m|Mَׄ4ay˝0kJMTb(>*1o\M' 8hBx6+Jh,Y, *^sg͵pҠ]N8DkbbX5lJ 8*8\\QVzd7]Ѳ–5_,/og$Y8}^Qߺ;zun`1scS͕!ƅ!ӱxEh 7x(0<'{5]]/4`|!hh.񽎾߶Wٽm3%G\xUx!hs042g})Vk% ,/` Fh.bk*GX !eWqeUC `tQWT%f؟+  Vfǻ1 e~7\M\^>U@-ovk NnsAZv͢bԶog1˖-~و? kѣ Db1 |qxn5\bș5_~A4`8n9Bv8WD|D#^schPxM̋y6*zftU<4sӮ՟08͗3j,~8OVs">KTFwv"П s& 4E9*F^XreLL NOFwU Z4k-sqgENJ\0Q\\utߞsF(]sysUw1@E;;s^ (<`R*ϊh57܀s fm0kba a>/X8#j,셅K7eeiϾ3gvu8QϨX aFE4LhD"҆F7J!b!uEōWXcҪy1 C12rY /h/L_5t]to0) > tγp\ر`2-4V.עwQ+ ʨj8<;ÝME3Bև4B?\|waʏH;ܼuYZ}wzz]5?ס)(D&Eqpjoz%{y `8VSsVaoֱ_qF ŅD~Jn#}B#Rф] s5B <ϋxxCj@&9NS7BbXt4):<po;m% < f1U- :n/U?*e9l MUy?k(Xݝ_{Uq E v \6.gsLSoVWd\[tҜ}\WYƝ/_,tŅG Rq6 O\9Svfw_.oό q|}Lug}.sEjqhˀiy |4Z4֠=u]ϱ߁@Ctrمuk?/<ףU4 b@IDATWutT&R /r.}Ϳ0ΞW̫RX6E&KY5`h~1$PBj @…N|g?<;){vԆ뮜ԳrkʌFshٸMTBF+$71~|#-;ڙ\9~n<$p,3ɿE0t4w!, :6‘r0ϴ ކp:[8 fZ~Vy%ǥ#kNǨ)tvh]ʱQLncXy_B;^5ى6%59&`se=`wN}SGKi/< >z7ZK.IFEҒZjJgT7c{:ߞߚbe?-r1Q 3FpqU3uͯO38 O  o{f?Mqݔ!gka;6u2^zii{5Gk)0gg/y6Ϝg%'°s300˯<,+Ӟ,w^xGf%<x1T,̡o?5:n]zcMLJde;ba%Ņl޲mÚ= ^t`XJAg|7vNt*?şw= ''&[Z"]-Q+JKںikvV\BVWxXsj (?0x~ִra o,0E!/5i"ɺ,Pj ^^Χ'J(\ pDž~X kbC7#zx4|7 +$!G ~k 荡sopBBޘ/1AfN&)N9gzn֒@BxOmHt 펵c^kȸsNq;!|A';wD>^WЮr _RVtMzP}4F{q;)D̟ɠn4dbis<cڄFĂ6b5`׌5eH!C#H0ɚ0kC!./g+.œ1cm%RfD@" 0&llQutQ,^oc҅޾#D@" PLŲt>j٤lK IpID z`cKȑ( |[C\Z|eDHc7?$D  i[otKWll#skKtVH9J'H$--]a淒T>[Ђ 0DVl6H"$@xX9"!ܩ9Wˇ! LH" 4/FԀ%@"rD֚>tPhu4ѬYᗉK$#64>iZ'Wj0aV6B}C)HC #I$(@@0&lLH$@P6cH"BT" 4?+ԃlU~_q9}7LԀID@" *ll՟[->7ZsCP>$H$HCm`s6xVc "-@ED@"6411c il9-c52~D@"Ftc(TI>lߞw KWU8S0'iNΞ:=MO> 2VD@"vtcc}W?;+#FӲ)33vXn0zmca80dhD@"`o|z lQ <甐GMi\s:g鎧|t11J$#z? iR[eb&9FzeUbYZ.w)LD" W;E!m)g 6;xg o5nm7)S&t븱eϳ= Ư |m{yRkD@" _mzWjNa>AH cL (He$D 4fl! rg&d(Z)y)i\&+fG$\:R,ȒHldH$+[vWf7;S!'cm^UDs:@D#eZ捻woE4&Np:.AZ]p0t{p+ӐH$MAe 4 C_1UM _6i0n[e{d.UH3$WjNh_}?0E dpv[ql$E{K;*(8 ~$*ϡ /ej@'BGTv֫,}6 "lKc6 >yi$iN6wǏvU?Dɚt1>3zcqWWGP" h7eJydG n`2zFc#ߠcpdX@9jNzC"ѓ|xzn[kN" D$c'䜃3ؒT?E'fLz%Ќ+JeBW\J٢ 4;o/qb+Xme~@P2'dn{cH" h܎)6s9kZ ^B"^ͤ*z _^;Ch]1"?]d 4 5M 7p`:E:D@"`(Zм]p4@\!ſg;4%Nk SΌžޛǍ/S*kX}>EWLv&K;K%&!khh5^`c03<Ȱs=#A[̈9I*r3ZZ $W~k}3|,"y s3ǖGZD@"ܾ|;}ot_llU#l*`c1QgN>XSi5ygI5X=?VN$"yzo$^*>'NbلVTOA |RGzZQ=='!){*w%uq梢o-xb؁Ga=2X >_忑 Z" 4 (vOo|v@21dށz|Z}??=GϧsBZLK}HVz18 `pF6x*ɓH$AA;nj,5'K̡s)~}Ѣᵬ[%sJ4Oltγl`O_9RX#]•4),tJ$"whObceV5ǰ?魭{y^?n.srgn|ᝌ>~3CN:tLs;L<0vf ]" 4n/МմN.?Jɬ^wVmU{A<σR}hꛓnPӓ ȥ6j {9amga$Em{,mlC2GڻʋFणqH)xWUp%]!",dV$@P1Gd<'K>>|kA1>ؤVH]D=!6yl!F0S+M}k;ot֤e=c҄Z\B{)/. R͏̘4n.-؆xc 7泖A@ 1Y9_caÅ2F`XLSov}SߠprV ώ:5mƸqELFsgbu2%ϮA)3rC_vfg+#OV܆'bgLxoP`kll`EmcxK=6|UV"V QhL= )`VUNaةii[ib4[ &G3N̹䰍gPsFϊb4$R,7;6k칧{ p#\yjlC@d `9`  c؉SCC[yj3yB0w4 9#7kOwAc⋆xpv{gHhoz vDž=KW!'쑠h 'llé9ow'҃ColO>X"]8 ٜ?>:5ū1Dx.ާaz>ƤK9|HS`fNq+ qs؏7xFNE_D^ljaÞ?cG"?1z|F΄=͚4Ix͊bzd?~[m6iĻYQft,fq җcǓ-91=kL%pBF;5W5:<60ak= GIsR̙t5G$ j_ uE[Vy C݌-_ޜu>G_#&mJ~3m+SzQoy;yr;*fA[!Y@L‡ ?&+O l<}߸#MD=ssJtocS/6ML-.ڝs&yݡѳKѸ-UxB 9]j 5xizLJsFN(a}"f"n(klM71V'7xᄿ?#*r=!-Cc :&Aߟ\wDMxPI-+LmtH~͖ FJsow:y&K!oO'|q|pԞ4ߚgϓ&PYF#??z31 W%u~pRs>&jXzpJSw.h^x]7T#w |J~e~4WQI_r6C{b1[zy{6i({tzKp#ӊJ5ki-)L0'tA4_`e윅NNx5^wE9QW o~^t0g顙Kkuz&Rwfn֩!xڴvv ͗j{{6G4WPh'Vۛ?9 b#)O͚¡9͚|T:]YL؎vttj=;+>vZZ hͽþ0J2&MpuNgCЈk-xK\)^4VjMYMI_˽*>~&`}4;{O~ 4*+FAh7U̖ pZ xGy8WUUE2Ct-Hvc4AkY9c[tľi9㡅+cf}ה>s5:Ӡ_Pr2C=7 FU< 9Bʛ7-MI;H_-x:H ac:sn( )` i(oYۗ%HAVDh 1l?5^5~7u)nw,&FLgY+(JcXlM#h“۾se37j4!C!dR"!xS"t:uA3 ~ Nϙ8_  w4Vhak4VVelRo7gCp~7&{jgfdR ԣfM; CkkzO>zz7[҄# ;M*=8ރoQ8~CUdwcTѵ=K򂲛y+4t:L_l&ݟjzү姑;x h q5:9ϑܵHl_m_d"Pn,Bػ˱iYY _'(oLs45* Mf忞a1EJ7w!*wwB(O?h8w Y*={>cai[v*jV K >b [ ̧ݟD)gF6q]=_Rqƽ4 VfmW#v泖7^Ke)AB'CNG/vm?_Vͱ_h/ t;ůFMcsqoHc~lvWXz[S,^ⱖǧO߉j'8sQ22CJcD|,#x9]h>G1 D['yK-7c^<5 &WيCC*}Xװ(%WQygj simh(J7ҤT=0q=՚m^8BVTPlC#W1-g۵>CBG+B# hfg"l^ʚ%V,]\U.b9&mw:qnp; 㘖(KؒCsmIMюA8AUՋ?;旱&KKmהV;d(i."vG\~q>P䁖φn7alâ~bh4_@k"Aon Pg?\ Auz~uUbh%*i-JB%1vW%4nBh~F"^j~VnW;<= |`r6&M9թєHbKe+x#:쩽lq:tѐa*TX$REZ?/.u{E2aSěGꥡ x{ߞ6 ude.)آ{q V@[9 Z$})b FfR$PܡJGe?|akZ&OB Ɗ2yR`/ncf=2bɷkv#h2M'o VV׊8ђal }ǘC5Ѩf$:iV/:粯n.?~:qKg|j%a^Ԋ[{jf?jls:m?M3svc'ߤٜ+76_ÆJtoaR!X'$F&ky0 ݦbDuiBe bBqԱ>hU٘ (5 Uw`!}Juwk} t~}q_}44%c9YGȠZ9(W?}3>]%~˻D)H t&4,HR?_ a[X-1 #: G< 3ms sHahm7.x֮U * Q_ׇ5_fۭV㔡wZ0gZ7xmo5i;a=D _CxthHKIQ܂}O*60zq~fU>,1~fdߤ*Cǭ>uWSÇ{Y1B"]XB(oW)"4}-ofSJlera򰝎E)N'*fW< #&v Mӡ}ei8O꣡)鈸Z> wllHu`MMO#-G@,0n1h8ߣ!O3JwjQ6vRb>,f?^!ki]xRUli BISvsWuHuvo*%Gu|n+6ߖ܁'/&Gjb¾՗ߦ'{.Yܙqlb, %ꊀe-gn[8#W MM;_˧蝍mhc5>m[+)f-ھ@qᛆu!yH;ceIu|$\i}oo)AOh =}cnH|G`7r _ҍm c@>%u! p *~P{{{k͝ 3N" h!2c<ή4B>\UI5W:R ZD@" ,P{bNۈdv0%l6n X$I@m ,L/%mH A" 2l/4OW?^iչ]d#b4O7d<ǓY#I .nlXt25/W)(r{oc4ѼLfJ+Y%% COtCDɉԿGGS v. v).:c8ӤxHutHYk7?o}RjRܻ39K~!FBc?_k[a;ihn>iŅOf[];#HVdd˗ c=O.>?f v;4o<ШW!|J`@f8}ncjctjgث={tEDmkA\暒E>GgJHO@N zbrH"#j&Ԕ8ޡ~ٲCs鴓z]~;}W ־5tjLՔo %GW;^!UV*R#fӬ?/wB1RZz GG) 97B|t@_x:`HY܁ÅzEڶJ/&5nL56c$坹eg N>Ayݯhh v ЍRg~\J+sF+JonK=Q.?k uEPڟYpŴz$={7e"`(4!EpP6d˧>[AA]qC`9$덧 wXnkBw\udV;FC#D|ߊp{銳Rt6XWY A;4':mѮwl;Odm}$tM&x8N"FTmݣ޼+J+*rSR^AYF۵;FM{ukQ5Q,l+Z>;7g״+M>Hir 8yN~zN&/zރif2 ,;n%LoCf'_i;I[Ye/ؘsKw#8Oz) 6;! cק[{0=%hzwsH ڠw}W~5.}w~ggW#~֌=}EsnO>2Gqxe|4Fl t䌓R<_!-.̖NcwQЌs0G[vOS5OK+W ׳Sp _NR4@aY䳑Qjl>$#\ڟ_H3?YGC;GAyv8L\kڣT K1ܪNڥ'r.wD \=xX03jW${>dd='sl]whߝ&?D Mqx5r:d~W7/[Y_ҽ[}eG]/q[k({lLtU{6QsMCݱʔop~۱ ˨DqYV36L) *%v@]2IUUl ( 6lGEeTXZNնc "Uޟ&mZxeF `n|q8TUeū7ђ_6QIY5FfTs)TkKN{ ٝVl-%%yC!}(6AILF\X~b-Z˪B{&H/E/.G*J( <bh%ͱzO2'oS Qc(e "?I)lŸVպڤ}3beHtQEEo*-H/VF7_|ىL&U!IXFZ ..Nx47G_LEJvӐԹdBUANFZ)mto1P$)%Enw@E6ї?Ajv. 9X'$ѯ.TP8f|ZE#HҏfԆCQf/Z 焅ȷwoUyF^LeC+j콎nү 7=Nni##Ed1[aǍOQBJOTvvo1ڰka}bCҡ`( oO!|p_ۻw/Fw/$3e5[?ŝ/˱016dEq ޸5wт.ۦ"|E`40t ݺ[.0ً]·yϹ +xp"|WY3 L,yoI [T\J/Uvf(i`8+RW qQv69wٟd 3kFqLKZE},Fቤ#<D7^s역TZaԹͪz5Vs172Lt! +7R\rh(ȷg/A'=UƁ53,je;z\e׻rn}k8\50Mu[umn[_Q` kD37p_y9 js_Lq6YKZ5l7Ak > NZ D`K0@IDATޗ`y9 E _^jdTǴBCeߨt XdӇG#F6xQ4nڙp80tr "nl"ߵ>l>_: ȲoTI@T `c֎xsQir}/f V|OiQ`_۳[!3wj}L,q(kpUp"si,Jhn<~Q-0vaQL$̡o.CJÜ'4ʲtdD j0a8^+Am6'VǾm(40_t!h΋tMC TP U ŦQո@oϲ,΍L,'CFQ+ bD΃e DZ&P|˲X%D jXJb'p_6Kڿ􋼄 Dc7A/E ̷,Xe wC51/戢` ".|˲o")ɑj̹t7D$|n9@$v`Y/32D D5&SlK )1y4J$FD j# so:࣪yof -*MZaA޻~mf] $`[BºPHH''3?/yIHLIf&-ϙ{9H{ӼߦStԽTVnx~X8Ut?~@@bӁ&^^ )1A>Z?씗i_iҧzσԥ%q))q΋yt#YSɔ4hRchk4*u }#Z=:غ۬4[DZUpkH~<}=FFǏwc^ >wC.m)rP&R~;h+뮽ǒE4x!y]5vk(_Lyt^YN{|Dw+mx:yƓ@  8LFv\QI=k^s >K[.;6UOx,K6$WAiPN2ۿ\Z#cij~տx1'(':P7E_tT1z󛫍 v]Kyޣ"noX M{ ,uci;Iwԝ[+4lӳUlYb5rB@2ne%m}I?V1m/xk2:9i qUrg a@Zr_|(pA4waK!{_.o|g(4(Գ˱tm•bx*\5%~ELTy_̥GH|YJϼFΥ} fر7\(Q-i~T+%}~]h νKWSrBwZ5v3WCb/w|_R׫BGz-,wY~OMo +Ĥʝ[RP(*G_ok2\f čNnYA@,F|naR3,ܻ#+O{ޘ} |D))ٿLҢsЗ8R;PRbvɉEmiQxKcU{;|8xqd8hڴE씄# G׿q#t5Yn TyyЋ/K}CR|y̡1Г~_SWq⻙|:J|{RO稴TwwZ}!eWџL5UzҹgU:Vf܉BB@, 9Aq+Tĸƙnh7س:xgJYIwvyY#Dϥ%!g94&k*tu(߶,q -KH֙8A@hQMcu8˨J85㾲 =$5:u8:J[4޳˒þ? omN ;&^]e!~>/*wl&vCPx`ԓӟ1_Sti$@ i|o WRԡ9sh+R9𔣵[wMf4rv=hߒAވ}k;Tiiz7N0|` V񲅆I̡ݟaTϖօIO ڒ M" Ixj_.X$]5-Zi*-/M;35NF5FKs19:@Բ?U_]4xJޙ{]Wa{/%7QaF=?DŐ^U;E7 |(ej Kw,TiD+TbPshOZĭR'܄%?.{{k*6Za)l*ZX]Nʓv-9|yuKLXO [?l> >$D<)Von6N8F8wGSy`JZ x@D[ =#"Ү]Da8ljϴ>$oiqI3 A bpC/>G؊ hY겦"IaY'f6%,x@zcٯ3"&m#1;C.WD@|L>ﹾ?~֭4:@! 󈱪0$&P<ͥQ|Mw ig2.7p e3[S̲_nJk&L(jz@D+`wUTS<`wL-5T~0J]˱WiEz"LֻSAI;CoxC\@߳ަwIĞύ"|Dө&Wg@ĶP|O5 w!0*܇H`ZhNғ z7" &n ?$VbJg@(E=FJ`Z&~EC+BGk  ̭P`x(J86&z&YL{^{URuM2hdzn!7{0h!2:b hdznotŚ2;=g$W%,ׯw}ߘҏ}VZl2 LȾ2.)^]ql6x㨲*++AVNɑߑRS>ͭIcW8[=-ohd:C6]ЭJX֐ ݙN'4چ q0P>+1)4jw^U} gk\dUBvU x(* h]iUTA;;Ү7q[v@yT?_MLLxNQܘJ jEt scZK},x\s ZջIKGo ,DJn&F<ڙ\qtf`@D+`x0eu\h5>1v9TIUb=sU@K` @xUqQXP"nrr"%%`% :^@@J6ѮCD=So C)*먠8g1E_b2k"۰u!HI# m @in(_P@EmAZᤒJ`:]h0,P`&G[(16 X'>ְ|Y$'~摫byԌ%cڐ DaTXRANtbg-i)si ,(*Ө4@56 $w>tIN@+`fhaڗܿnh x*/0㠄Ӵj YӪVP"E>iX( -_~ep74p@\7oTl Tڅf!bǁIoi|y˥njQGyl[0Geb]XJLyk+U{m]J&v͆q %<0.6P*cVOl*`_r*B\Vl+Vs4J,-& rۊf"'Mv(ni=)J XUţu!#M% \q` [@\&[$ѭY}Dug]SHWaĐB;Ln%]ݢZ36 ^-G]*`S:yŰxiHtа~r4HQVQIo7/~_K6끽iy`F ,_g*5xʑA=[t">qY}\oSJMS]a-:*J^\V:].4V.ҠM٬j69};"U])`t򩢹W?N)tE'Q]kٰb?z!Ʊn.z_(өG !zxϾs7-EoNSo*gZۊfE[*1+)ٓ|T}NF;$8PX%e@ s'-[m8YY/G2:}zZ8ap/U?N{o7 l'uv\I_ԛ;YZ\'?j>u`% f2/+胹)~"s]'|~阑Jɚ q1GtƱÔ"z1P#/ṵ3^tr[,ʘSszBހ_>^Ty(`vqTQYIKnٿ6nS7hx}Rꔒ@4m F gXn+}8=W5/d 'f>2PS#u];dFGnѤ2zqx V-Q{},_wCmV V5-RW)v۸uwoh)NǗL.07@zc@Q }:vf7TӲkzoo73%S8O80bp[nP=0Xs.7۟Op oR0DCwgT^P a`>)_O|_Ya[BԸG*oi$XokAU0qBf3i$~D=x7}4u\Ic/9M΍I[BWq4/iO?f{d7wMh1|ۢˡ)j"M!ff\r=:i GtL]RX=K51>o@;etuҡQYC^lAfVN"+¹@uo)n15\7Gl;NcTKu0eL3c|CLqo֛eIdL;fiC;vHHskVGŜ1Řngb?vL&v!"bmqV}2:Ѥγ5ظ̜{ZI%h0ݨ6 (P Lӟ t 00,y5 z &;j|f h6fЃz[8p1BG1PpWہvap?o@#_g<7T4sAX,y z! +jXP{h{A\ȔpfEpKDg7ئa8zÛ'bip_G@wC5n]aŸz췵^w{٥ rI%b7EIMwC-R66t&~ajmECL$FJn2I A+3^a;/HgRX lZ[x)F8Oq nDÍ߁Ws)6vՖ]{i-<H9)281a+A`**h,A Jwy!rwG=y.XK|N |"V;s<ܾfa;{2dj0q|gsw(&6ldY^MH4vJljńk2)kɒ_ uv Wl$jG!{<`*V N>KI|.ӲhĎVC}OP|| s!֨#!* le6i_TJ-^C\߳1t=bZN&yC!>L}upcGIϾC; !ޯ&5=|e&~>s4Wr,9*7ueGµ-z$ e5Ո{[b {* ](߁n9J "prCxݶ*:$1%(5* եYxS9.ȧ^Q<4g׎5?"g4ag{J} /9i=jڻ4؋G &5.iCx&vS4s֨4oQѶTEu|h|B=X^pȁ.G͇b[FPkc-a.֋D ) @wPWjJV%WYځ(66} 7w'>oE%w_!*0_yu/)]-l@Ly, =Ȟz+:##׵\n{Rpohl=[מ=ʒޜ;]k/Bĺ0M/Jx'(TBrH=wt=E'q(Yk֙3豯g+O6ijī_+׭ӏށxifIr5xJJ,v+;}zL<ы/hQFpaLl2]\Lu#`f`oH9B|[ܐZ6f˖@>rE/=}n"ow睗=u4C)ҍUe4zgp]40#{E>ז]q{bc2'$Ż0RQ ) 8x3DcM铏D.=N۽eNIly˹M;<El%B Fa6`( ɻY PZZP{,UY 9,Ly9-cOc{ 6fztT0 (`/ "[21~`Caгbʯi;&ftIM?rPod-yV~HА7UXJh]x]WUQRn"D6rS3ňw|t;W?+އsqA{ɹ]:] x~#eKxh c9ܹ.iI A ]R1Oc+Hs VcU#sߝKS0eF+\e7Vo޵h. rsm=a Ǿw^}ʣ[;&nd^G7-^mxޚNS7x Z XZz^r'_>~1Ǎq0cg"7 W%X )K駥&g7 <ғyzZ ˌC8B}yє7/:v.ڵnʃ "u.];KNAX˔/SMQ<gsw_!_ ʀwПs1+A%]ş|B1JtG_h#ޭcڹq#}X N\:f$z b*_βP_޽8::0wz@Zrxc#;y)9XLS  ֬ظ.97kQSX-_9GrCB#0b@7 к8SSO=ڻύQǃ pqc}I;7 /Zs|)父yBz~,Ƨ~UhsM ۜ0isKgZ?Gp;wzp*w_?ý. pCX} ՚xc.P}Zp,3EY!z7zb<06~ Yn/ü: pǠfngt}c^i1W@͂*^`_d?ԹTE9p< j{x6kpKSGfk lk(PJgM 4x:3{pCKXnST,dohXzKiq<ƬKt1ih ]l/=-aSZ(a޿x.V{0CνJy9 8b0_ VZ2U8VlV]ob6ok20{۴Fz{σH0jp%c}?+.0Gn5\Zҗ<2-;DQF$Nfogt޿gD:49]@ /Siρj޿ޟvgnϲ8/)k]AE/53Oэk6:oClX8V~r|91)A#|N7g:o]~_>k96/{ʫޥXaڽ@)M ⍏JL󩬬NZ95C+8l'XCFOng|{$[_$JL't.h_TWUVo 7 kG_UQ?ga;Ym{-oUkY5 3_D쩛PL>?R( ]NhhlxKL-}{J=pX4!3}ELϧizwC{+8+v}Y O?5 @6ݔnOp)oE ff0>|K'v_S +>(VLOBkpEMtxQFaEnef5v={P=gMs:|7Ys.6_Ժn4?1gJLykmizsmYwS1jroHPR)H)YxczM/lXP|s$Zm\c!^~^rY Xs~ &o`v?{ ۋ#ygs'IwdU1Lzi&edYnk,=>۔nG=# g%sBI3+ 7•?/m5|-_鼏XV0_3l fz|5;w` `YmyQXL9 vo^#Cwk5fFo ϭ;"@dw$71}x wkǝx&]9şE 1(2}Y隊<yp,Y3H`-ўee%E7]v?+-]VjDx2*gbFnGkΙ1}oȈ _K=_^^T^R\yݚU-0ZELk @]ffmuy^vY>fQ?Db#G8+8a!iRNXV^c2[l`α6-`j $<3CM~\f:+{j@cmO>170mʄ5Sδy,gBBl S26X `ˊq([ w 97@=sXUL4M4F<A 8Ö0qc1+׮`ciJ=(_T&.|٭06Y6Y1{5=ܣ}bXǖn֌B"\o5 |N1x 0T}fEJ1@ gG}ɊT<10fZ< `IiSL5eU:y q6{8K|F Jj;uMrZf.Ey6/.#:p#dy6FX~/H<(?0eϭ&laoۻ2jF V8 #|{9klYz*^S>`TfĊ߇k+C)˦\r47|Eu(*Cops f>Y/qƩ`0g42+\>gf}3^El0">7Ja>L/wWnHjuk|ov0rT2zF5P~G<0%ESS/?jއm)ٟItXcS^ȾoUc䋜6 D975&ҵ^x &RP۲"zE/}t\ʷ!zt_ת,:ܥx=Skt)*}+5ޟŶeO>vJq@Kk0}uY}9 V+߃ ]9MSy87QҖrFY}GnpSejpEѿJxCi{!{^wĽTӦZ<7Տrj+x>-axVrñM`Zh0[HVW+.UcܝFX!DbWN}fȠ?g2>sp3V5}x7st#Ảx~~?WSgq>vsQ(yٙzOx MӦ#j_$]* 2S? nBnpF&UyOmpp3yG_(> b<?Tgf'i?5{Χ'-SQO욬pH4R^\:aì)\z-v!:6>sЯBnwE@ZV'מ%{˖(899V-IDAT֣D92x 63gpce3b/XNPU>tɀyƹC$%1uvnoGI,,v< Arj)ٝ]ں0d.+"˭|o^n~Co? PбiAÑt6^X:[Z7>7ڪFׯY%+]Q~p͞١xTi͚'|?3?C>3(]%P9*֌sN(B&<0m ^:t;cwWz:ˠRg+VRv3cWPF D׷?o+&(T+qVHA?vǜkNu4!?z'[uWt}=LQ[Ȧ%Ts?b6[\.KG<ƒ`V'%vg /1=8ʛ6ryPki z#V>d̪g)C84jB䴩E )K˫~}9bhU x9]Cc@F=rzȐwVK٫S lxGq+$~C5531޾E=7)`Je+,[k53kMN'L~zeM aԈү{3>l3+9t2W{X:}3΅Hcqݍly^NfvMOtkpVg@t~{eMFNI˱SsWH5afBA?V*?v7ʘ-|u*a㠁JsңwٟH}~>KI^FS$D֡*N;Nn5ʡ1m70æYY7oc|mqZqS= ˘Gﻯ،ùs\8x ^y아\[|BNMMS5)dg?;POq(9mw9ESGr_Z^}D4EF hY^grK2u::uKP (fSgPh_7C"|\MH qxޔ);>wJh|F/zq_+@y{\$t^ya;[Tu{ڥ;nE76e{ͥhϚeꌏ4GE!U"aP,לΧM>$UV9Emɑ&5pFZėhcu FyPY; 9vЮ kI͕WBG8*zQwQ.\,[pbiτEPPlPveo޽}k]Th\-|h ()(# mmNy+i1xM?RԔSwwA*],ԦhpyC)߶Ù}*] h 3f-P. zym:~ |8>KB_z7Lׯ⿱}]W 9UaCN3`9ExJSy|!FF^.j=EVJ: ^P++ N:%(zһ3tT ,>ۡwi? _IQX 'y>fްHNU ף cy{ {w PfNe=>:nS-q+7G]uNvi__@+]w- x-FSΌއEG+Gے.LJu^P(nqw}mW&4;rrxUw4Sǧc+;5j t/|h.7H_A3Rrv_5N)J^w[,w?ɯ/NP;QO{ 5(Wec_Zhgqr{E?YOޤJp Q5Nȩx﹧WڽM"[l$^#* ӏpP2zDmf8ra)JJJx?K`U^|i ʀb:׿ ѷ*Uye([Dnwyrc0*m _@VVS[4LX3˛[$č|Ə46U-5ݣ2AYoz"x_gc{ 7in?]h,~K3 LKsi7qmsqo{ho<<`jznWb5|TT}'?$4KEyRKҵU\lxTKWj+Z#yX@.E;&gX-,ˑWUUnPbA*Tms5ƶXTyn߈f[UˤYygn+|ڴNewp#efMf~6?S獝xQ݊]bX YYS9zXa>! POG PT]Y@:i=aBvvB_brdEh5 ؤ8.MpEnzFUWTryX2Z~k`3ij? cհ)Xo/lUloL^f;oiFquz|SpмPUYmSgn+wDlT{ˌ~Ex c>YN9>ttk~YS;^%-;l"6?:ˣ=%2l( j 8JXtj2ʣ~qSsUPxXfnhMUYl1?iP; .SOpB瑨Cٶx.y5uPf!C|q,ZONsyyux!uo*>ŋW> }[+VQȏ6TFSό}#ڿr9"݌_zn~-mc;MGx~8ݡRVw6Nc}Exv ~lW`Ql8S;̍Acs-Ꞔ<=5j%(y= уA(wo`VF~!(v~ʹY-hTǚ`g ,|䱮M9#54t:/C~yG͊]9F+X;CD|NdE]N̬ImĩYXjw9*( X*P"D34+]=x_tHt%ej,5ͨd#eݷy5{>ymh1Öjo+0!3ﰾ%%S3_Ū;{>7l;ڢTr^(y#?}3hFkʃeG~(GaUރb7)KY3Y6{O٧5!JNXB45j#?|ﶩVNwM EŦ\D~KלqGr^7}јw|v`t35_q>0 XFld~.!֧&e4aÂ1֘y|#[ȀXMЮ1\擆؃T|-y.{&аkt%DU˥fzwuh[03 nT3&`׫|?~n>iIt\6P*Ŝh [$QmT7aRF2\y?u 9{li,d߭O%1c-V p!QS792N?̢X΄ lL֨ˢ؆L]oU} 쥬$'اF}a6Wj|\r<|<Wui@uV!ƻr&x_WzƇ*x>ɩZwUӓcl'R.N;=9l3R,<<|<H?֡W$UR(U7f~] A]9ÁJi gUX?69Xn@G|Y~aF aRgQ[GElO7(uΦ+WGw8X/o)Aw˧;`Ph ʫ;s7~,̙:Lj*ꍌunjqsUcK;4;yYS^Guxm>w],HrmIѷy:o#pqSsn:P&d(: N:hs [grhL52V]702Ayhr y^t˔^ A8Wџ0I9Q'KP ii3b>߆a^P.Z_Q)|ƼzO@G@XCz̻NEOsj4,3nkh^+Va޽mxtΰzThk!s!ޕp5tGǔ:KC:V9u W>(_,xӬ_u͚Q>K;rr{AC>~nۉdE!ʚp-зlcu6p0>,z?K!I˞ )Hsp(V18͡U1а '@m+xN8 donCwx۹ñ S~&t);ZQh7\u.ѝ^dKYrd7뱗_|)9sxP^.HA8\n1Oføt[{kfV< ;3H+ˇWx }|f^ӧw1|&gA`yf45F(ˇ}"yj| q˴i_W߆aᆼ~ C9>܇<;>/Ϫo}yYmϘ"tRRD_q̿z[$x[B d_o %A-\>e|2 Сp9Xfڧ,8u .]ΚnpsJE%ab,#                                          К?Y"IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-flowns1.svg0000644000175000017500000006403100000000000027577 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:01:59 +0000Canvas 1Layer 1Physical Network InfrastructureLinux Bridge - Provider NetworksNetwork Traffic Flow - North/South ScenarioProvider network 1VLAN 101, 203.0.113.0/24Compute NodeSwitchRouter(12)InstanceLinux Bridgebrq(1)(3)(2)(4)(5)VLAN 101(8)(9)(11)(6)(7)(10)Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-overview.graffle0000644000175000017500000001147500000000000030667 0ustar00coreycorey00000000000000]mSȲ |= %'Sd7%MQuK؃щ nEm *6HM?OwOOϫ|KfagV/>^ګ=9׍,?n^llnnu={ދ<ܼ[o%`y&] Lv_zf/nwq|{i0MǤѫUa+ȡOe˃JAO7A9ܐ\HęduO*+%BRa{Ndv(Pʻa'}_;ʿGfXqʓduxvty~7 5ہᏲ{3%I.f,;A|di 亟n}E$][Q؉kzEnЂ&SD)uQ 7[! 70|IKƽ!6<~\iPf(~ ڕA&.j9ޭU@ez&Ϋ:HR,4HHΈ0 OAZp,5czØTqEHQX!\hL kMȺGO`\p$^C:I,,0v|Yj ]jDzF t]qqd.$jW)=یV- 뱋UQdc@R}tbve4YDFHuYEc%"|~12aБ~i4BPC zCʹv Rb-j0΃vr5'M r4|st(b;"F C`e}Gs"`D ~TzoP[‹^SfD>JR[ArbʵDkLhfخs&sJrΑBM63h29C*Õrl ma+MF w^YctgA.a!Q74 滓aBţY.8?ȥ"J=>"Xr$}D5+VZ^(8VpBQ"^ `J%h)jJQ]s^skmkٵzɜVzN8{0 ZN4kSq&T6*ʁܐs?-zDŵB\r$*;)(`9 +"5#*GTV& )ɺG5`GK,v|l6A(t\` kzR$k[2-?[Zk,zh< ,Ng>4 2J :L˰mRԠ$zb X:AngLE Ir_ڸ $VK1D|CS0ѨZ-UǏxݣ}/. SH"qA%6fҢ.}K$7fQA/ AhN9h4.?B#ZVʰYvX4:h|IeB㒴i( jEHlC\wjե0oKϩШktp[HTI|;`uBcmR6M?ض; )Nc3>&rb27&8 "np^fpvJc Å?m8 )a3R 1_)6w'{ ӥ`>]Ӆ{3Q岞\ oAPmTK >I,tSf%` /}|R%T?؍bÅ[@I|ƛhsYqB (jJgZ$LA#Jj zU(A!ݒI9sPW l! A|(N_ȦژYWR hP9sP砮X:HٽgQ94V=)F$.P>%Znn-eO8{8սN~y䜎H02qU@=Mü9w³w'P};ʤ5!t}/i>ͤoLm&ISDֶƠT=ȆҴARQZlQOt[~ҏx]s8ڭj_vp.$o!Rz/|JsrtsVGTS r>g [Ծ\㊺,q-.C燌$ROZ_tvr뫯Gq/] [_!0[7ֲ'kDjk9^[^{GèRP$lr+."@(8l-};%Â*Go\1(s'kД{ɍ5Y?I G4έlܚm~ѣǕ0L'm%MW_dip# ?Nv<8 2ϚQtᵟF0@(q:pQӭ|ܹr+u GKo Kj1; uؗ 19sD 2aqWkmqT6x-[ku".WeA Am%6~;dx}jyܪ4ya_veWo7MȤ~6tC)$AJnSOiA"D%UZM\R%rIV")a\m~JI?oWz)&\*Ǜ  %'4(0uG7Q6ńdK*jTSDQULAY!,\$/V)׈Gm\D^{uEA9O<ؽ@E?5]}QHo#)jJ|= )R,r ^Sƙ_KSq'ԹuӦ&()U"7o; m ݃< &<.Mz;.>p$nWE6(@%lH*&jY $}VABSēTvVB|]ʠ( &мA!BS.Dž8/ yn/7E|Ov'>ChWG1z2gA ap>=j|m8t8M`9i\eְx3㠛'3>}T=0N *6HOaae ]0蘁p5+%3]@ܤ3ݘqr Fb2Rݤ+&a&,\4Lp!9o( [&6> xc8m'8rS@Ҟmi+yƻaWatKa#aW#UGm;䵛Xyĝ|h%;q`wÙʏ IS3s32g\TOwGuaV/z/sk j ͕;O~ :yPJÐqMf3m7חh7[GA_EW;N=sߦ{,Lk̯t /d#`3\cjF#~q}"9q&lSf T@`.Lt<}w9]4?ŋ8E :!@ъYj}x8HėA65.@%/njѥY4Ue1ꙡsb̛8iiVs5>wzi =Qd]goP|1]< yIǐj8Z.Ju0Oz=9vC/2t`8z~#8Aꎢ]xUCAGlPilo|1èO$EUy¶Ч ެ Sc#1/j`2F =\7!././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-overview.png0000644000175000017500000034635700000000000030057 0ustar00coreycorey00000000000000PNG  IHDRgsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|g{vbE%Brѓl" {C`,HQi z{dr\.y6;;3>̌`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&0Hg]O6vr8]ϹׇsZ9m2 S$h]ϧ9Fb8L:a[N](ϸu]k<+daM.#bpM<`=_U҅ioJ^_mBЫΑ#Gz%奣<>1SmW& 2ZS;S_J;2aW=L!z;M)G~Hp\TJ\UU{f)\]pآ#> 0'/NA-$ .ڐ)SJGL|Q^w͉-/_ͳU6z)-/m>s5yԤIJ0EԋKO k2-S7gӯ<򬖸Od?M.qqe >(F?1Up.׼ L TR Lq@(gpN8 @9Y\z5TON=My3.+ٮɻ{h;T2&qvt Д.<'8S#qB(|i-O^餤*dۃ|"cO$#Lk!e}U3/&q+B~k]3櫇Cv]0YÚ) ?Ż(T5q tgZ-Tجo_oc1yj{[3~)}]S7]rmMG#0q,u[p+j爯Mo2u5>s+GTt>eJ\NLu44w9cu z3z rչzG{w_<:ZGQRЄ|J۟}F;SňiϦn 05Bf:Q7jw` ?8wg4n88ln&cpdȳa ƣ*t2[:WglDxo/to'lI].i- X^JP-Ј@Cywdy_!.@l3B8^s/긣rmy ;˶湨2_cL3*I&Eka>ӕAߚ"톂y;dMG l(ޙSq,c ʃ梬n]ӊ5R(ˈkXV!j`)z6tGy< !%.!ޥYs^MM(m&:a 본w:&f{߲!N+FPc{UҤ|e!pXv ]w g e"^zEu:*lU1XW>S.[ ne:cۃ|Rmm"T#*vɜfBH`9㶻3ni롍$uXcByAԀII"AʎUO#R (XFxbl_|Tu:^Lˆ7pjj* \4p l~`pOޮ;pq]>+iy-Ds]} NJ!r9p1w=?g MEgֿ0lW8-U't~.̈0F%]+0a>O&v+R\xN$og2qV:ĈZ ߶eJ<\@}>=Fٟ"'8nW i)f;*6Ӓȁc[tw݅S3WFY,>W`Oݿ/q~JI @WoT>-4 [o }פ8p]I o3]? !0L6wV8]vFJ{ܪa-V1ݐ|8Buz#I7(z;h7t^ |F݆!kBM$wk! eU 0atۣ/WotnbjH6ֻ$ 9y<;M1Ц~f-vK/El mA& x_WL1>nۜLС4<Qrkol J";rH^:_ e@:εƿC]<$|-4a}8yȉj`1̳agn+xx #L]H[ŁE/7' ϻ|!8,G\qÕMJhtmsGC<-lŸ-?FGKl+mE?]馞3 2 o綱Z(rekбM*/x7ax~>l4Ƚ;Btl_jͺM0yyfFVy$(0}wԩK݇6y:ºȼ^}d3M/TNԜq_Mꑆʖ^4S|޲kfT6IУrRYaQ7ѻK5lx@e(|>z}~ѝ~P9`a&>Nx1֙v9}5.ѻ`xcΥ0T٠pL(c|QsEw ^٩70[ٱQ=(Npy ;(=1& M܉5 4Dh;  tUB]+d L@+Ⱦ7+*/og"N"^tEN@F=wm (gL+Z\`_A#_Eׯ;4|a1)0Ⱦ3"lNXg!(cTY`v5.;yk;E>>qMNU1XX{Odzq@"U"(gjYc%zBm`Gw*V 2JjGNNՇ hlAr& J|"OR! Jp"䵆6l9HJnM&-pUYUfBb ?+Os]*t"Vトt vD9r57B_P m=ii4&`( __Y۪)fXYo-E S⫎k{ϓ>fsTj`?_|wR ^@a]^ Qe}܄tm1y{/\³QF;Rϝ|_:Zg 8 >7e ~#4gSk L %髵 P6w;0R4@%5wnj'QvdiiW`U> ;5a Zxy|) a§9!mr\Jaa`X0kùh* *򾎲x ’Wիss5@˗=Bb: կT h:z)%EHpȨ+J RJ'Ŕ/|2QD7W0  Ƈ]+!ǡicZHI\&Axz`E*/Nm#ȒG7 Z7+)co+nOu==ƐA(UvM0mFV4ʆgϘiBvoVjF@ظ<c(^O|#'u=W.ksݖlLW4UZ$3fiOz?Z){+_##! f5xw\2J qqAezh>6f4϶7w C)J6ۣ;3OL߅ {;4)qߘi[gVEK'z鰂/`y4;y?(]rFHvҠc@PڅsS3lg KnT׫|&]huc?009,.gm4N^Ea1f$C~Cp>˳aܗd}aG4;㶞#h8|MgS.B]i{dz&<ÑR (V3fK ݽƭ3ÐM؍FWGNKL|&|! S#-\^z ?̛#hΞ83,u nӽU]yvd- .qBL H5 hgX!.:+Cc F O pG;GO <^n|Tw ~釴~ A P󹷦 R4N30i1ص3ο:WC~H eT~ze{-"w $ )ߟ+-S ͘±CSOós|!yr8}Βqvk.2+|eLKeh̾q/>:5pu<gb6}Eۣ>6!0?1W0CXA:LP`smxP3i1c>1&Ƨp +4!Q՜2Q>&Zn3 m )h3S͋E[Z. )ټm솆?V]zuks"nW7uvwtkcueud$2 %GttۡF nT"VciXo %kٰzJzf|;ICnL6`6& 636;> VͰ49&`DMP9`L {LioPzŒI6qN,`L ,ך0AuLPhic]机Wyr#LTܰٝkL 0&jt pL T@|ZZ}!ܘf&[0`kla&pv`9U8 0&@ B4`#@soW^\`VEgАTӌwR!3&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&jwm;6)C fǻh09,&(/S>#I`\Rj'gbs PÓP )X3<-d^rDHIJN`LvPjg8WL 0w F$IBȳb;CJecuս:!MECw"&IxL 0&P 5 0:M`̙;K}o@ƺh8kd:3&@&`3&*N`x]Ί{ qGR,,xz.Is~0}njt-5'Vw?8AKJH&笔"e0Oq-ARA7v%Q¿+Xݭf&~FR:7鯼)3O'\aC[k3zNJDF9SB975no`u'@ @< &'LKiRL]gYz}G2^MqK{Pge&=VմoLL~,x\#ۨ(rM^~whWznZ}GLĭ e3_6dS1w0zvƒЍwl+]d-`L p~ `' iDv~tѭ+Ј'G G|=zvvyWoE^L$4Q ]$lӜ xgR>ժ8QƔ)Mm@qK׺)jݯAe8kA^lB1&ր׵2&Pul]R!0bP>)RXGaq߆wh`/ٟ'pӒo:- G; 3w!@H;һC ^ޗ#B N_<[&` 8?L 0bYC.fwۊH W3:5l3󞴴V99e6+z`^L-O^s_A^UbmX0P3a@7sjm=ǴrI ,_@=嚇-LЌ4rH8иH';5{.žw+ GNfeL 0BlO`L/tqk|Kq'ni *wg\w`! \u?5u~L[6BΝy2[jۓm8OTh[H x.иzkO.y'Ѽ24_Ms7!p `uۀтl3&Pym!N;t{t}މ=WA:-5%A>Vٴsо/E:@X\r6|RACaѲOIpbz*89&@m&/hpu9~0'5`oL{/E9 Si|0&@>%ɧLg.G[YU Tܢ>YFj*_bL 0BENp%dL Jc<,G#"8L 00"/(8M5/<2N z}7 Q! 멐`@`u@۾* xEqN/!,r,V|f$0tTrRFcљ 9&@m˫_Dj"9/H8R& Sg=Xy/ȯr8u=RSKW\.֊Aއp\(G'AtK2RָPBzNHs.Z!r`aM#ޙڋ6riI sOZZmb͙z~.U of@ 0!E|r?jXU搧+RKRߨGsnz}㩱uqvBr#fL:!'8ST2BgqdN5A(AhK.53ْ&[?# p!&ANǣWK1OI<_aL T@8Ou2j[~j>|sL.`<yS T7xלXݳ;"ð`gYż6ǭ'ނ "Y/O"AГg&/HF;SFi:`F{`q.{;o= [Dl4-v ݏRVWۤ✕yyi2yov{b<\;u{]OA> | ($?Ӷ.my9&Ÿ@r{q7S }0lI{8G3u~C˥vmMjח٤mi(#zوU^iLXsKн1vtrڭM87yR|ZZuq= ǹ2mǻ?rޤ2 i}(>9y>PjZ2FFVEԔSXk[q?~J_Gߣ@ÄPF 9I2}@E JEywE%%[.R/wAuB7Qũ(f`/P;l/Gï~2 Grt!i[{>/TfvV>IPZ3ǁ-^jڷ .b?>vHEN@x1uRTE򇏂ы1oM܄c.~ !)*id~ӣ2L^3!P-u5-Rƣ w |/_Je20ymԓp%aQ}ǧSIl8f9s]c^EC摿QA?siBʎ-C^+4ׇF' u"?2q~N䱱* V&$c JggMp UwR| KG]@>w} &HDɈ{?,|{#*.@R7`zO|8ڙ &7B% O4%Rb[+pɹ)c6mLE:Cһ㘳 M5L_ݣvyWoE~^L݄ Q$/M50ɛ>߀FgF/3\1{o_#4ҏW:4A%n;Q7~]&_^teQGԋwNcL TG]y>qxuY,꒛!<>:~yaM)ǻ c;pM6Oi)*ϮA2֦8V<1h^&ZY ה-8';u&o`Ą2duON}6I)V4]! 0 qe^-`A:r( <'iv1aC74:^-s<Ժ_[q [I/l6¼*eJף>*7ۨ1[ \z#|=Or/m3bY$&`bej]PT@)K ~uk%}_=4dd[BUtX"X65FW^؝Јe{gӠ#~;oePZ'+k7:đ3Ky3&.P|/Q:}JRm> H!0W>5(!ܞ]MeZ.6''N |uL"Vv99!?dzҏ{L xX^E5%`' i5R|}bӂJk#*٩I/3"qɏYq!C*=2OkAHWŧ]=velW1X"0&".G_o9:|3m=Чu)awOy{|/a~i'_;>MVa Ȏu8L*R}]ymHѕ##)i;n253=}M|H-mY"j>`&Pk!FKl= ^R ].a@GNv$̈يI4D!ໂF7.)F s)SG`1{+u ?&n Ou3h==g:cL?Q l<{6ԻMq،iH  kHE09ǠT<:6|jAij|h- wuFnm#nDžaދhV6ϸ>$Dlc&PU*Aږjv9f昒s C1f M%#iѨqaSM;6j$ͣ>J6W$wb_YS ^xφnRߑ&oĭ`6)VW" :\iq[ϴLuhFG^B#hKsnkxdmYv/Wb*/e c(!@9F i{.#eU@\`6yXxaMى.l)wB|:g8Epb}{d#v5ǘEuV_1./k>QNt EEE{WQI_ L-4HTu{n9t0aV=gaihGKy `@q"fW'xUצwuO&-ր$C`j1Pd۠3Q1zbzc;G>— GҟF|W&Id٫@蛰 O,NKƔ)_/.*@1*d]M{+vvcwdꚺaA頏),2qX@;_DW 48DPM %hzJ^"`%zzC}V4Gw+vwv[i6O?>A}*Ds! AsSކ;kujaWIIzݼ>60hڗnk! ֿ]|66|@JݞG{UzIJ x TJVh7і]:Zw: <6CѮpuEd2onJ='vj> ޵YSJs E3^׼qHoe#Eaxu@ش'31uTC"ݏ֡GZx'$>\[+`! MPBP8$`L 0&Gpd<`L 0&%P9H&`$ vݶ!arXL k=`L sS? #{bL " JD' 0&`L`$ 0&`L "ĉdL 0&-X-%`L 0&,GD1q"`L 0&j kKIr>`L 0&" QLH&`L 0BR&`L 0  IףyNJ!D &Re-6}|e x=BʎubM--Tw/[*~T`u~wU?|2mCPIOuJ1H޲}&aP`)u@IDAT8N*Zlھ'6;}g.B8mq1l'TGuRb}ʃ펨QOq=U|K>\m D=5@QD3zz|GΊ']T? Q ][WpS\C85e |/jD~]^IF˶R1EQ*ax#z~{TJR=O=BbaߵaPu~[<>UpоUcqR Aڶh,6n<No L!.౬JH|T%Ox!|{EWVMt*GlGP}OuCBỘ@4U6*+rG^3t} 8vg(.Z>w8dsV(CrG>!kޣhzz9mTSj~|F@0+\?cEGnapB:<G4PH_MكsV(Crg=!c5aIQ=z:a9zTTpL0-Z?xb֘j^vU$Pı+9Aդ˶eEf}jTpGG6'Jw;\K骍CUr#:|Zd|VU,4?FGٴe!_MhîlbV ȭz¥x|_ւjcni/AgZ&ڽ_J~aIѩusqDOkķ+֊#9yZ.\JZI2błGѠ^~n; V+Oů¦lgva|XR3L)x?:Ϯ꬟z= XpI#olkT;E[SoDd6Mg?&.]\5ݳ8x$Wgk_ԱW(Gaq8c%ojI'ƴRfu)lG$ +| {bl{K 4R6H/ Y?E "NghA Ѕ!85ڰG۹h&{vj-qLR^pʱbDFEv-ĝW s6OTـ }%TgM"> >_L޹ر` x& jm2aMO4O5]OU:'az#ɫ)Lp@DdK2HUQcֶ!T$p@kTCDE~/{C3~\Ru7-4Y=f{x)|6t·XSl̇z"F'@T~[ 6ZpYtX{vѹMsMVsv~d_"OUx4Hje+~q3՛QE*S::2R×fDEGV}g^ԗ궝E>Ɗ.P۶s?vxMOIϽ/8vEFDE5Lc#bşI7^`(-B66$SW `I %$Eف^}i"^U!NP~&ǒ%GIW/F)^J6TԸ]Օ-ߓ't Wl ばHL:)wsB Pj(5.|wռy&C!ya2!_sd kع s!z_ 'x>hM.h=ӝ}Rq)Lj [wY!A}:X~hߪC.EC8 WDI थ䜓Ec;.|+\4ng<̇Ʉ;/ÃHcg żҀl CKDtF1clmvM<"/-zs!q9'`^ m{_YE,ĊoW5*-L@%K16`h 8YňM n/zK~uw͐Rz{rJc`q~߆K@s2=0PۺcX2v|thLjp8*z&&[b xhupBݶ+6Ì@Ě0mѠ q_0o@R\˦du567~kІ,CKmqQ,W)/m!$kL4qgd/~Z-6dЦziw]u4ѕܫM}3_IS|e>|x4c fqŸl+]Nm J\ *s3m˙q5>O_Ra<#^tߧ}"&B?@(6l3a.BbLܟ̨!E4,\7Ahھ߇ߋ-6CoG[# 0='n7LJSf/ ߭2Fc[#Q#Z~M=UMwuo>ô :_q3N0$S!IK؂ :L*"8 nԮr>GqBhT;4ӕնQ/!|ѮEւmb#/ =|)҉I⛪cWC±!5H+J+Hl< \z?Į}-; yGh"Mӑ_߁楚ܚnMpzc:fIPvT4m'N9*&MDhɑ 8վUôh(Cf9nM;7l7ne<W=z|قH W/CN%⊴dߛ mmB43ɨo$nNېl{tl%r μ/ۚ|>|B؈ uE$c`!I[ulI[N16ں}zPWk¶Vd2 N$(sOa.ڡ}$`Bejq`N`$ї?3ߥSsM{wҀA')dG͵eDNQ|\|(ݦ>4Z:Ef1] 0]-K< t̚^ `(׼Ǻ%7q]%!׫ku*|NjzO_T$qiq=+{Syu&P; ݶ9Yr4{5jPbsyA\=LLvwS8q%6a/-ڭ\aK" ‘:ږuE (I6): 4.Wm3ھ-]J52ĢM Ηd; <3X ?uዟ/W,t]qn_Ap=q@ѠٮhBG6MtAc^vi+z`%u[0aGݐ-9 v'nGWn% S軕ėn\o/撿QZ zbNޡ턼B`B2)+^sx0#Pm5 sV @8dcJI'd'~5rYĨW, ׶D?fh 0oBe(bX|}V,)d^·wd5At3RE;1.qtl/pI#l؊F4s }_8S.ʥj)}L QC b!4 nǝFTP4$,D4(I86{q"*ϝ|LGh bc7LA]E"69iaz҂~EᷦI SƆv5ݹ EcL>$S'Y^bA44Vx:;g-/{s،0HcGӞFCo@VJ˨M Cb ϛWTL/KġGȜs1Mn3O+O;cd}YiEh 'sz'+-ТY/YVqFiU8yo}V՞!mV65p7i̝w^&JszHRņ:7@>&89Pc탳އ}aZd 膧2HQ~SR*gK0/oO:_LXLF7d]"@y77c/=4.7̈́syF/uS{*& qϱ1Bq>j %c8h 4n`alӼp0qY 0C9"O+-Y^ORTđ]p;f"gG5 !|iuTg4sZ|-dp 9~5 kX}srP`4(: ЗMgNgBӏHnHkk?TH]t\4ɣB;Ul֏z_Sz|WH]).Guvҭ՘BjъD:4%m7<_!@bR{dc_Ff 9pfuPq"a:rqƳJ0(qЀc΃mQ*s@ObW4a Yʎ 0BCu0$VAcC.mܕ-=5zW^Bcܦ ~ZXRi;rHtjcKx,1¯kYR}دzMjj?lB"&}teMKPSIi}/^ >Ėz{@NW0L/ϗup5 p"Ď @UڪRHrT./C1E U^:LL\V{(iP*jG^FTBЀ,> KP=8@`?L 0L T4i3&%@3X^YR"+TQ6h/42ՕslGG3@n? :3&@YjnWltO haο[XiNZBfʠN1ͻIݺKWoמ8+vvv"M|Os@[_,XE2L}g6eKhI{Ң, sX5&m{)8+7g+>gL E%U"@: f a\Sx~/0ߌ4w4u^BysôTzi)[&1riUt\2 aXp*/kJV N{Y[r֫.Z)k-sd6[1&`M pIݑ$9t ڨ~=2";Gn(NPw+ :"71@K,R'8o]68NX,~_"i]wd6+ 1!nl̸n?{&P U螜|Xa!Y};gkt?p([wG}‰] +NUO̅ nЙbFaƊ?s!w {:`[:%hfre ъo/\A81Kzz1#] A7/V$/3(.G|Jc{V̦v#z*t)jaa'S4pIP:/s0{IUy 4@ycOs1AEjÛL;~Z0"5-ܔhoZaX(Gpo絆D+ϑyI`1{3&=ٮZf/ewYđxd?ʡ+붰LݪG߼HJhbA2M̅qGYKax̅(uwJ46'Zb2DZoZ0'Mw.>n fljh[5kޓVf%'= S&Zpşٔ > JjA f=E?S,0&e4n;mYܰY{Hx ]hWO@=s(3|$bcpZB]Xւebo/9[ O{XyiNj]d2d }s9sz0sa;9.q)ܺaoȟ_a7R4 M$<۹aNb!s!|Wh;8̅za&oW5tu⌢9ZiS;#(L֮Ub̸[5߲fNٵn'V4y(PHF]nT i#(p6YGP8L 0 nVw}{']M!1 );D79&anthyg<` @#ׂd>n744WYl,|R'mK^UG=UFMf=E>Sa2&PI75T?|[f @8F4U=d'_" dqrb0=:.p2"z2 (\\'6z YѪdīŴo[w e>`,}ca&Fݼ7UCO0ʌ7㦿*x T82ںca6E&:J|W !W~q=Uyf=E=)ʓ;K*߂}v^۪)N#VP˒^9|hT+Hx ^{ԈAHQwÊwh%1ú?s@Hnj9dMKsN,m SO{=mk44N\^.op_fSN0Ox轢K8.*جzD\T]L e(*dq&M^|iI&&@%jVOך]_~ė8GVؕg./@2hxǛ캷܇Yr9ת Y"s%s(}Mw_/wޯ&-Z~\OҘɬ2ObL F&S7/5p4%5n| yk?C!m#;ȖIϟk>^ѻ  8m+%ě]h@F9f{0c3JԂ Rr_L.zZOmg+ ~LO~*`LWЀ1 Ѩ9uy۵-h|B:f4 ͋\͟KSx,4 LS݇W[[C%jX@g Ǯ0hDgݽ;w7`u?oJzK"\]q[-[]6w-uuW]+ *Rk- PғIɝLBRf&}os='RS5)[m|j¥ 1"}[kkwvoܔ7K;KXB#FۺY\.b/Sԣ"C و[*+\/.cݴ奒t2~XLx6eU꬟pbB;wzwڹq2SZuD E"Spypƥ,)--Bp0U?׭b@g | Jʨ hko6724dp`"B5""䬵÷eB\Avy#RuHpHXH_} nH4X+2M+/==;260/d$[A6OwGq(<@"9w~ :g~G&ٙ %,5+Il6kEe\{L;y'/@e+@AWm;@;Saq@Â(u(ᚲNg.( ٰ"Âi҈ppG6 ˒w(l_w-$ _Wx9!4YZ(^{x}ea}|ܠiܗc˓U 4 rHoqf(<%'#C{r[~v K>&|`EQWL.=HYl8g|xI#BZo[3htJA_5z%vQ F66XѴLeJ} >k1׵ x{&s[Ln04jdz >gf:* f'[J #^'o]^!Ύc$.U?y{p-pNO!*ɣsi톽N @=E-arƄ۝|K[qL6g?Y/25=Y@c 4 hSxD'έ`#?X\AQ{Z.ƶh"ZeNP!Rj$jޕVeYڢ==[侯:t7u# +-JTݬP( p Av(#jB̦Kl$?tNkoFc8N*촘]3Q!4_͜0nK"Fxf(>,n#M0ѥYYh۳J{Ѐ{ShiMFã'[3G̵6 B@!h0??_XBp +3+)#I籥|r\ht&Vz8=}LNkɕ=3e8ъ-]缹ODδT#gݞEvv{nM=Ocizx{&|XV( p8t11,,-彔MɆǕH+n"%Ąsxu^[Ӿ9ef7lH1Zˆ}YdD1p ۳(vvOk}0C!}1]>z(B@!Pxh@3[Yi@WZs!}i͎C5p:nTTbʇSN-g6f-''DBgy\ļY_= iiY)#ůqС;gL[Q( B@!D akKXp OVPn?(\ɲYJ1`-: t6ɻ@ Ji؁I}{8jc;u`ǿ H0S˥\o{N}, l2s[3}ʔ7TC 0=I3ʵlkڧ:SdTf K#nَ]: \QMm;|O'ޮE.|nff0ASbm՝Rnyo.*/^RS-)#ף_MpoUGȲR~a;"p8Gj͠gn V*O).vQdx0M5 ȹ"*O:0Q].$ *-@Khl;T~-Qݯ 7ogGAɉ4OؑTERA{2OҶ'?i]~7?3đWa| mWc2GgǎU] IV+ed/OйU3o~"-н9՚;)ϑҺ;LmzwfV- v]% vp |?4ßF?z힀JNE;ˑ9t˻~o?˯eݶn-8256v{{~1D`!ݧ2 II8иJp -JUAꠦ᥮Vx1_\8,ݸz}qu |JqLt<6 1wao|6n<gT:ʮ'0yg6c}a*л7L4̈́)Vm'J.. @!j8^0$9@oO7\ 8 S`Pc{'je˾%ұ7 }/sJo,(:n_ُC㟷,~ۄz8|Rx;Q.[-טu;AXF\6Vo?H90o5gn' dc+pYn;J.BA&Civ@4pP%R=65uj{^}equ|c1 ]ONI]߽8~A TLt>9IqŤ} uc}5R."4F A^>},%jeDмb+$/Wpٶj 6:!q4 ̞4.я'\ԝ3u4!|iq^ s@whxswY&X OihI3<'MPy{,4uyjfy|~Uilzy` I4fP缵,o'0'OMgeQ'p9kw+wu敌f]:eU$͛m51.x$qQahd)0A[>Β OJz4+Bf!|ysAc ӖU ^lND8I6ЉjOރG@!Xc_k1+ӣ ]?yHEՏW쥾2,QDYG2viIr(8*C<ڟ{pYsϵٽ14jsvG<#xġv͜/(Cz[‰lG>gKAq؁Vn0tmҠD1?5dͼ i.@IDATkܳuKtJs~%y4^TD΅.Llj"! FƉyG04U^\ |3"{XcV"İF+)wb,]sp5ك.;gOߋR$Aab8l~J^..Bd04p}mQ:q6o~InJ ΓЊg˶(zNJM; .gwm⩑ȭ{D6GֽYQVxXf!TC찝eVxT5>4 DW"߮%0r*6c\}??z_p?1ҳrٻ9],VՖL|H'wbR'F*!<&7J8W BT}{ R sͣ9]p hѡ ==eLJϮ{f8qf)HCi Z?3n2D喘׺HT}. ë9g:j(E&/ryo[,h!/~.{ Vsy7OeSJ;=nd@!0Mg>n8fUOh X85ϰ͖7H3?x3g`3W^v$L AY)x?玕Zuz&=&/$^/Z&.foBZ RTR&|w  /V:5ٟ|E4أ]Lq/$u_}{cdSYHg 2{ӕ19^St pwS#Hg/kyd;wz{g3)L~GpXh n2]7yD3ia܆{T/ W;ƏP|8 X&ؽ^}9&wc5;N${$$[^SqG%c]_ܯn SwROJnΨItH%Y@uWڟ v-,zU<05Ѭm80Y.nhH~]F\ד}_>s\fDCh8O3[4t7RA_\I0άp^u/4<\ϝkϯn9=%SPKBբ kuPro#Avl ~KZQGz." 2ؤ[X _ÓU/ּ?tTyy5APoHSy"##I `6'|ؙciW^p@*6YƓłIdƄ`%&2Kv8Hm,!b0{D̳49k/#5ز|:%&L,B2+>[%va׉4Lw=!~GKzú- 8yjgZQaJ%km!6rK ;9]j5n'\um}ҥsmsu:dxb$f70g-;FN!p 8LSP,eN(g uo"Tc]5.R;{c%ZjJ &AŬ_(Dig&ܽu60;滓B`q?K \/4.X];SGaܺګu;/ܾ콮MS\55p#m 0\w#b6N7Gh~ lêMꯙ4f0yG~qC&LOPɞc߽1CS_( cQ8+M[Ș 1n|~⡼ `Ҩ[|zQBc-f~1cCڬE\ '<gOxA>4R#ܿc796-~~,ቑ\V@uFmR0`bԱ{2C^SߺG 8&[IAKG"qy0&.O3ڍ?46' g^){,}G#m.[.ZF"6: :S$A:+"n^j}m;޿Z-pg{n$3OPw'! t>h΍]Rvr'\ eYc01_i0Ef;~-o,!Kɷ<_FV:`P#c<?}NM@g1k<{WYH+X#.9/{cwm 8@_J0~>`r{ m`-OD &1f($Fg[wC=᝵iVPo)]֥L#KJrKZn[nx( L`jvJ /F"G@jSA@1cr÷{mA*A(!RˋɃpv&*-8t4-m/=wiǴd /9@|3:}(p\?_p#+xm"myԚ`;Ha;4Og`Jnƒ5݌)Q( v|reѩˍ'_x)5TKJ*pl ]"ۈWىKw{hDb o'ӻztGӟmlK-n/>Y)_upgڟ.\+ ]) @&n^jH@M"x}8q6uKFM=@܁?1A@&QTRFnؓnr4+2^cO'= Ep~cp B@!R2GMw<[9Z G>=˭ť0yܵiݎC;w0cT6rD0ÃP:g&۵k]ޮv% U{-Vh^MkHXQg7Dg xwa.lFŬV(KAྒ}9(+*-pҳÃ?}^D)>n[o>Q!1 5a]Uv%dj\) Yx# `s) @E@2t]}"3?f2lO~+mU ٳGb#MFS !l6ųSEk٭tݠ8fVT[ Je>δCGgBi:W/.OD]$aGku>m@35EDtbe_.;̻y*}t#E"LUZ3(EAQ^YQ ( \W{92"ȷJk?8TCJށ ak[~>^G5vQ8{-cѯ]?p֊S(C]><<1)s%"%ٵ3=s}_M>CxmW~$zn :dOI0 2498p -۴Ghc"ُYLLB]iַ;D/[A#LIeG/:kM;+A =xPF`jt?Ƀt̤#hha1QPOP(?gն~՘h/I}iBI%/c B;t?0/ Jy7~y"۞+qb $I-WV 2ge|Py;Q%L#x!of>&^ek qao(C-PpS+;c0ZNO9IN߮Ej-4RrbMտ5/duPw#|ysZ\MHO7<"GV#rgw” TI(m#;R&]L0흖SD+um h74)=S[Vo;T#A3hAّk/VK~g}\.|ѿ0۬BΞ:^M{pv.+_x3w7oY9Aza`Z: xyp!?tYHkd0l؃Xu#cEqo"fa?`QD1C9vd429N {yBB=B@!82|ͮ/v-Xp}v>i4H֒ J7$߆{b:7NYܽt˶Ml䥤;7r_c'f>7'SIǺ5iz̰n~͢/|[QZ|7Y 7|d% v[׵Ҹ!r6^n{c tp5F4I!&#օ1VP(:Yj;&@ӍWٱ>-Y f(Q) S#4zO_0~`08s.g,]`R/?XS` [{` %Q`|LokjmJm}`zKh VK[QYY >l `$\b~Qyb zB#53w j4v qWr 2%MF`4l6>&1orB@!L87vpѝ0I;vk㠽'QŊ?m1h`l-`;Vhwr1.;o#%7Zo޿HxE MOʬM:66l9rj.X&_^9p^-sﮯwn\&*^g5LĬuL d'Rtbm'~t҄r5?ڝyĆRRlgɨ B#vgvh {lە‡_x4ըx0z2;8幟Wdrb4_|O.4J~ox/l>AJv`Fg#G (zH}ɤ8`Ӭ~Y^\/X=q |3>:%pi+ϝ [74AA bDQ\:+ sjaBDa ozFPQO |1ҠLPVPԋd/ӴgE<6)q۶yk&WjvfpKYfC'~d94}F6!;~p*g/04 S`? Y]P/u<"馏 e'j? \v vd[/l3c oO_d_jHҦ]'3SQP;z<wոC;014͂xP?SEEOdRZe]Ax"6/ kD@;u Ȉp аP'pwߋ{O#'Ba) I}{~W6pkBKTx) ɉԽK'og`wrkC"QEy_=hM_ƙK}-Y #k$,Cc 00DBZyCGѷ wf5a(`,. <xL63|hh||GEFmWWo"x@& VRUWJЂU)u~sΔ(Z3;+& -&uiv6|7:8o^M?}D_ilGǓdZI$1qL|ۥ!mpǹ׊{ψ3s1WGE(d|vo&iz S\zzV?O !<x$ځzǃv0gr֢ p2"(H8#qi> c-loa~ɗ^h9Q|vߚA_NUUZË7>nr4M7jW!x2F xv8_wO?d ("c7ɧiJfJ )A0M_jOTV~36,\>rقh[.o/` .m7:߸Ԓ̔Yl{b托~؁sB@*A(!RˋɃ=!1[[09 i%.hWkc`n`xW%E@j1O{1a})aT 趏2R;XOh4uN xp%||hcw||י6*]/4~sh|pU2wLؾTV/?pI.AݷA$A*Aǟ.7Ze__q;v?6[x< Ipq㸴F!쬼J%i۬wjx(AJ*;qn( H <2ZW}#wT@qAq! 7:{2shɺ݂|9k2i)"ZJib2>F)c HO*XdZӁ Vĥm 6OYg ;n a M.ȥzxK-NݷAq@S4G=\抬w$6K".vs1q/OLcʉ[܂|;'j A: dz:qǃYA6sUY줂Ga0]A~evL spcfLLeGO=?la#ع6q|ױl(Z'+e-(]"z R+AH$r[lpqH RkEkA|pȚO ȳJB s hy1R}tK-עGϻ42vX0R7;&YI~ʮ1!S}Dzi)O30I͡{j;cSECj 7lJAiʿC>@٩V+%2n+pǐҩ&#߲`V 9/X )uٙYK1ˉc;uۺGoB"Aلa-5"qj]o~gq@ ûv!tYJNUS6sU\ S;>zG1Ţw!lzokk\nzU @N6mXb-M/n=t΍F͐f֒$[X^f|is5ftj~N 6ŀ9F;_ܟ۝ׅ:9oe粦oqS3^u9íqQQO^+m{I&h[,|BH;VoV{ j%2~}k~ݿEw[ E DI<*Ol'}ÿ#1|AIZ^PBk#)/ǃG()$[?1z1E,ᇽ_Kxݸxh]Ǒ A~h^c kpEX%"sSk L,.PW9ς23V@q`ɿ~#ҥ<-;6+BWtOt6&ڪ8|[J O V9gE%ق֪K]0#ȷG`D8bW4J׶m( /d;?7kq®>(8|P}ש O \a "ygwrQvtn_FunO7mۯ6o}C)6CBf[?v+Uf豔.XNU&7&(0CpxI U):P} ObsB<%W~'?>"V 5W֚v{΄ 1}дòXVk@G@@jc $\o/y1 ȈQ/墒(_<{w|Rn!ix+^;vlfiȞJCTf)/ z_ 3vJ<PX{>ubGF@u|Z6 Tp Z;B$][G~qj! ak%wcϚ־vrv>Ihr5mlumE{kv5rݫSkۏ6V|b>:s/G$tنb.Z3a:܁"!(<,RWbf b>ars!^yDRBy(Gh"|F@fCvoԴc\I9˯I [K#)ߢCWIFր2AiFe{|3']6̩Nph$|쭙qogUy{/oǖ]}TmՇ{mmiqW:7F^++$➚&%a뺶5jZfYݚeAhg. `}yw7\{@9F&7_9=M2Ѹ!ɴx.3< =n:2Nڝg_%B~=r;"Bh#~ u mړEwfRqҴ1{KkvMD[n̝<&ɷ\7rɍ "oEk60Cs #r`f'"I. q}Ks/+#FpRKxָq[Udo}qǸ3{➁.A{|}.Q"A-ܧϰg>>8c GTD!lR)(~Z,((*Y`0_?#[v~7%\lIěI$n/+t.VG@7 \f. &% :''} YIV+/O&{$-݀"9,cg(`}fN3Ӑ>`@!0w"$!LL>+*,T$ұS;k{%Ңt5')Nݦ 륌` {lNSm)o|<pCW'&\1B3FRA{xtn[FV]%-g_;gg $\wx\<579$tg*]679ܧ` Eo+۸Z**h v^F[(-gɣ<x-\Vn&'ׯ3|vG=yϵF&|X7O{^bSUww=fyv9\-yqKBk,qDi'0;{lz)ŝRZ<[Z xEp 9DMo-[Jy%$'‡n8wx1!w `.x>G+W mTԞB?[iݾb+͆)Or%'e4ߡ3okfdLSLoBSmPl{&/~1ûiookAQv2AMO%37K`͜]Nc{oTIaQ ]ZBp;=`;nyL`Mل/6!Afx)'_Tpd2VUMS7H1 4˅\Nj? {屛5:6S6^a=@ O'))K}$S S\2ehZZj!_wQ4v s˿~kg*W, \||*r]CCO-.]x=%CBHa]^=ֳ"&7L4D9W ZU۝Q)J3lc M)$MG]h ~8)2 ~_vf7(wk]g x.x ᣍ҂Fݰ_nZpz\$tQ0\ژ'1mYjĚW4ϩ w)DMl>u;M NuB@!'c=嶨f'x"wp'{ҕwpАG4=j ,D5DҰ=b~]KB1S$Rjz,^ipIviݠ]=yV*W%-G8XePZa O2KPw5sVWu40b"߀Op6c`$aϋw 9̃+ R^mGc޼4xP;Q^ k%auKB}k0d'K+p:?JZpjIx7S6sTOSuuWM ?2 \vha c֌ p!|xЮu2t]uZZ \~,/sr <㔧K¬s4uDw5zQ.8¥ tY~1n@#DT .3$HBbtu*R8'O5.IG`[/5<8Poj%jFcw.Qv:YjM3,V. Y/W脫#pDڃ{4 XKW9r3V<K{ )XHX#`<+Q#p,wC| -;NhW*_׌m|lXU$`?̜3̀un1w%[¼<*ң`Qcю"~2܋b 25ߖ. Ϸ6;qW0&($ȗVUx YE4YI w%:p4`Mjg$]*'FUq6+}/?VG-{p!p,d6{/\F_߷[G/Ź#պ..S՞K_W|pŖtxn}k?p4P)5މA3qvv,8edպ%ZpMw#iXICpSwփ(zj$PٷCG^Y ܩN59l~b.8K{}" ɷ4[C>42uD[.sG^@ܕ!\[fiZƑS`v**]-|Wr*52w<#kfǷRU&Q{fӫzs9&΅pѕްZx(i<uid&CA:f)+4q)}\o%[:~[iڶI2O>>Iqt|zv%Mw0pެ%ߴ;]lYhPrWtp@IDAT:ɷx1S 4j0u'UZyFIn줐@ K":6Yb ;"+y([vauqp;7#̕vѿku~z4/=Idgց_TJg+GSk&^| sT%ESƑ.)g"/X_>%/0޳وOel J͍pIu_1y^K^O.ƳtIzlSӹ?wsNZlS#'rEEALJ.1(0*`*,ݙlt ͝>Z4[ZoL0%FR.1W 2AwG#t+YJO`6'P$X p{T?{%s-ݰK(b=68{[NQ8^XR)ȉ3LCz/w!3~ ڍ|/-~C^c*1(\f W#rp$lU_xBt1jY+TB͕Rj!c7OY#W_FE(ygۦQbSS,v 6Cp/*<1Hw^`wͮ҂i`~p!y(Q+cabkQabh!\ ]ߍ2rAi#Mxl=JP! o>0dK25ڰms&SuA7շ{u'MW|&.9RPw&h_K~>=O?r wS碋=~$LK%֊;%0ƿQ Bz~璐Gcj `Ҵ##˩}wg̍3;v3Dÿb>E1zУs=^6rY/Smw^Q-qT;R:Nn`6kY{Ja&))FT`-x{HQ~«>Юa4GxWox+8"nUG9ȆYkS?6%-6|v#R,Hݝ)9W>_Z&4ؕܶ-߼Ǖ\h  [2rj%"Fq M8̉\*|p_}{|m3kg[Ҁ79GRFLxDѝ. ajfSe/*qb_K#˵;Ք >={_nXݥGy^8ah7y&8.40UK6л_|M.\Q3> o} d+E>Lj/;Y.j͞mP&YCYGD},`nv)&{7XXRu/D&O3*tn=I^^o @`bSMf9#H޸ KD)8EX1p;6r'&笢EViuR]6sشTiI=$;^ts WDb;-5Uġ~2J^-tF}=cOmYݗv7"%6V>)#$s=W 4Crh] io0j.pp.&ָ4。;'VX?]ǚP̴&!L & ƍcxm6w9߇0KIki%qMCpJshtt9r54WqC d< ujicxae IؙL=̬x}9&_)Bj>s$qz,(2'xf TnxL .ZlgXR0ϓIC#dس vcn@ "^'V/C.vg$mi4|KB_3Km|>Go_6Ƌ/AFOl2i+T8C?8\9sϯ¡;.umK1A;[f ew% `=ll26kr&\s{)#}mtz칭;9nC)Xa~V\"aBRLěBMHڛt.xJI~({Cԓo`gl^K:Y~>^(_9?1"%o!`r9$|M;dǘ$XVVpBǪw(gjl@7{W* z(]6wX8\e^s▟as_&LL-T{7n`ְYH +aPgЍNleOU4~Hqb_Zx|/)%b}6-7;]6Z^*:&]&ֵܫےx߫$& _wxAepZMe{3/X(v#,$ĵ=1B@!hE!=ߓNm/8IyLcа~n?\8Ƨ:r:eitͅh},e?"}]4z'|q:Z6kN_y÷n6 bN & YVWCpxv(&t \F`hkD{uhJp emdݓ6t{LF{:5wWwb>f0%eʨi:sc%t^ԛ}S.:&$X:oGo7v6MTQ1| Jm|M9~E`>W'!l#R&Wh4vpӞ)ےogw#/$Y]S!(|7> L~"XMW[H~i٦ݾb^AQO +L-^)T{=z2Rı\&2$'ćګߡ;!*2w=!ϱH(-a& 5^,\pfUpm5]1?Խ$"ڽo4_*v/P!ZMBO,׭^/n{ҴiִQ/˦B87᭗ wTW18XSO.4%cQ*(ފ0IȔ*l !ry$(IRogZwNל.OQ;G8&& wMo=Xp;6bc|3iSirQA`yѹ'N %恤Q(M!wS&q=KE{jNgi!1RS~7̠ť3x?A/Y;81~_g0GKһ_e[H99͹UTSx$MĔ#L j[- 37>cx 8JO*4 bt`f'R.W wQj>uJ!d(b ŕjaZqBj?,i@b=|Ǣ?^tI7ϜL]{C)r*(Ra~][3۹$aZLt6$xEUf Hnn6KXRb"op &8!J*d6MXHS̬2Wְ",!2|V8'LԔdJNNĤD'p0mRS] }C'N^tJY_SNדo|:"{f^Ғ(ȇi`!8K;ٮ2pEr FC,2ѿ?IhDκ̰7B9ua%2GEbuZ\ZZ QО& <{1v~ kN>gN6 ;u@Eb~ˡE[X(?bjaC5.#9YK%A~=IFmDg؈S!y`!}X`V-xz& I-:Xx WPTXMFgSzR bX\naIa\e^eR^, Y܂řLj >M .qDj%nz MC`TOpr\GTdqT_j ߋPГeo剤ng<\tI½Nhj[Kwc]t)?SRxjHA`ΗhgіU2 ffܛh#Hg uA_yLƮUǚF3K*߽V &#<=2CuPu.ɦeX]s[LtpXhGמ vF_/lso4E֬IB^)*sJ)qv I!*^goάwyl>wN[˞2TB\os ^ 2Y8j22F"ܟ:P;p綏.'$أmFR*x#+`!aJ1?,̜6R$%|KA$ 2vIY(^B,_v8[0ut?3Φ:yPu JÁ\){vajNWlZ읭kW⭈vG%yKf*x!>6&XnAa:*!t#d6֏[zp}eEU۷_؅E{}:.owJOΒ'i#Gwg(=@$?z3kglUCTfGsp\Q5k i+ƍ˾Zk=f_yûIb*x$ꑞbY7|^M}?Je]-vphN]+~ٲ*Izl6A۠ o~Cj{r`%'b+;lPF|̞6Uw4l!vBHTE!}GL T< OQ("`e CLl*XxVR:V1-gh}"+x@Ee^]({LSAi^C9~B[UUwߟ3~P7.=`X;0@UaԔTV;oϞ}۷[^)76H87BN,< !}0%uycG๳(HA>2n+}hoIԂShRC^q[`մG: Whj|,fwe9P^ic+Q:}C>f 7zԭSD\R^r1T[ .7M̭{Ȑq'\8z⏵ !viK I{ b@ $ J 4ɋ/B=bUh;dliXp0>22`lYxư',"^›" B  m% յy\B##rfERؽ C ,8oH@)>2*5ۂ)E8WpyV pҬܺ+xtgڄ$FCY5Ԓ$!w$r.kJO/*):IA}@]'P/ރCq\}{ `񯣖-${!Nj6v%_W}eDYU.Aõ{7{AhfI n7;`ӸpjJ> .2n IB LS~`Zڗu:)1qBP?9lVp r |dlgGA0 +( c*<\>ƲQ5!0I HTE\ԖG{x`鵷ޒoE {Mk6!u[K(l.S :E+^$?ió&1>"숉a!C)GL NVmOLFZ6vom-tչ0C & 5K! lk 'MH:?x2?sQAoD`{lHr/$`BU÷*ZI >)%K  oԵ|$)gm;xu:xl`8eCTs,0y7lڂ% Y WZ" W `!$pzYjP$uEE*~htGu]Qd\cKC"^ҍpwFw" +6#R0CI%eJx1B_QڰK;R&Kr(!^ٯoP3IpvmN vϤx~Enx7$j$!5j Ǐٖ ])pT|H'o2+::s=rtm/$#|Q0=Ѹ{Qe8,~x%˵; ~e%ZBG~P?P8 j#& ޻~GNP3X`5 7>]RX ~N0C 0C΋p@2*J'$a 8ᛑ8O}w ޚ 7 Zݗ3b +JeЕ[Ba>u[,+=rQ{8+?ZOݻPIy5qjP/Xb+Y@,MUdGxq-۴rczdѝWNj ؾ &3-1VX*hK#JJYb;rm}Xey%tV$βɻgѢo5 7\2N\ݴv{K.Mbûv ;҉eRG(^2aMG ٸk4wW1dn'p <S8v yг6XCezƠ۷-NTGKfՍ߿ 6Y+RJKQEzM @?:)5a}{FHyאmkJNyYMS*c!X&/S&ቾ1FEY+~`0Cn^llҲь>[Wlj- o X{K'd֟tB.H@+˵;gPmqa nfo&*Ծ>΢Belڨ~1zuAgO!OT2iF8q4f=C [&n.ϧ 9_DÙt|WY}&c8~N2A_〼SY[1~ٖ1 "=%dRJntM&I q4U޿,Uw7.zNv&MG\ .c|  NAaPV{M`;l*(út BjhGeOkxJMz4L_1}$C7\R_ҷzX'"C'dQM2,Q+#{2W#u|Lz4;bɷϞL ).zX6}/I5E?|D* еLOK-o u,Dw8W^~I 15u8'<W/YK!}/C`%$ԙ̬ƔJ2νW'N?V>X ST1)Z1APAzzuex a:^mIQK+Y6.(:yRC;y_>8facmNo@ `k$,ВI8^[XJUh*Bg`,x4}0co"mYó+P9axDCӝz܂q/NXgjcWr[A=ƪ߲C{@D *X-T .:^LX;`gU%{Z;8$զ=LnZdψIULo^1iZE!v7Bw&PI>ƽ*dEnGΩ}κŻ w">z 1aeЎ~Hs],ftr X@ x.J@(ǯ$x'L~>i&tz^j:䪪@cD1BSؘ:zZwY`-F} j־wy,}昫ڽ>ّa\ oK7;r|鶲2t= u 6 xht稏r=ƻSa!7^ם5CƿF!n3 +nK԰/z"[8|,i>z_Sj&dkn YsޭL46oȺ#|$Ww.5J-IB[wt}K FR\Vg4Wieڬ^+H *-½Q>XYcaqp-4eDYUD<3֒ԏM؋4h˜BIX䕆zo.yU++LY: ${n5K/yc{&Q3 jw$)` QsP9 FG O`4dPFDݦr?3vݿ4z)![2^=t+4T\!  J(P4[c`pٴpm<1ywoWK, ~oFV# 0'u lS9-maWc bGT=?Ll(gIXTBeCQVY}pZXƳM5NVW i;qCm9V~hwc~<NtPNs\,;I.E]3B!$֢LAb/M&y(q 4~K0>"[ P#e)7miDI3Ӄ1PVY}[InVxsXH{cfKY1[ ܙ~x/G<I[nvh*>P(kжUe& Fܚī̛$6.Ӵ>W]"F06X~w ft>Z."\I ͧŮ1v.dyd@Yw9#9 & *@U>7x4ëFӼ2vL[FWnhxqA<-Z&9fnP n篿xǖs?1vngMNpm\-XNQfZ"Br}b;;M 4r@]:i(;@AOt"$!O,-,wuSX/{a_Jg^T:xuhXd r1L Axz8I9>z&k7EFE¬c*hBlJō8p }eEQvA!ϐy³\`ukM=1jDER˃R<-8z.BV{;tp=nx/ ӼEk*9NvkJ_4@IDAT& K"K7\wtA&·yly%{yt@jEߍ& >#5ռ$̱J 'Ō^f)RYrr<i F]UaR >.d-;LUJVp4 J@#B ⺭( I>3RH6LY}NU$>鱔")<[f+ #:tmB,o~=%,H(W1a~ˍt9:SZ%$r0 nx8^sgͱ4q UtY5SIy5wy4wWOY~S(?4J*~l`Ҋjqߘ=; ؀KRoAhnV & 6I^,Û0I?/o{cI9\g/AMA`/tRa;OU d;11+YV*A&NTе9 Ue;0y8URNX9O6O hTA26wc}ew8(o P:͒^F-%U9^仪~i q&oU"(5ے"V=eyGmI =eT ]LBɂYB%dn=y9%E#!c{wŵc'!fa.Auijx;"S,?ȵN%q;\$gƮ;O|#޿[^!AnjLXM ,d 8KJLdma;0'\[nWܮl!70 ;ivajVcp5Dy($?.q$wp5j?@=Y򝜜lgR]%$ϥ}zہy fH4M?}fz fjP,5v6$l6HOHir }OO L޹0e7+mnG%VS3)>pZEz%ϳ`} W 1yPU7g۳w5`-:,irWЪ(+ؼ_)IqZi/]qHaQ*'8WMEزi9pk& {v};, 6 |5EõeKw6;3XpC`u=ju%[kXݣV{LBuF#wə _ӥkZqcrUę8!&W}W'vmb C%oENG;{v΢ &#eKo9 &fESV'U5^j֓T0>Av[ R?|.ef;-5UH@~2* \5R/&QS%5:j L}=%+tĺY:)ld]xpEcHgQw9;6|K".Ց0;-txv_uhAeX:udrtVB|s WٓX$=>K[g' pvF t̊B$L; W]瑕vS`-ŗH0-}}ف}`Iul&aDF:Fy|¼!ױl$6sۀ?E9` 89sj1]lO3_P Ȩ(/۹z\b4GxBϦIϢ%eR'Ɵ~"X,RRG>">.\0Ƈ/֥?yUXN0 | ܿk |aLAM3Z }uo e`k H/ O+TIƪ: 0޺fɧR2]!w^5]p1^J`Bj6^|~;Mp xgK]NH?%i;9OkK5G;b{_ѿ?Hİ>0ܑ"$aٵ[Ж%'T$n7NHGA7>غ8 ht R L&Ix[% toGLx{uIe;յvu mA;Kj T0xO+ hz" wfϾ-1LV;;;Յ78)E׺vwAwOs[_ڞ|T$@T w o5_懗ϹJsO-Μb wpɄzx iʅRJg!WDӭ/mԔ h@%|(g Jix!b5~dQ V0ĺefԷ ]w&1<nl6"7N ޿#/#!G(ՠNG#.GA4z kHҢ|慄&e'@- ͚rt~V5CU< o-c𾏹 Vj'k$v͆:+<8%I"޺]ъ>)/%^8].7H1>qx?ma'\JBA&0 ]JonIӍ r6 pifMO>[c%pCJí{VƂ ],t:kvp'nx&[fK * b-uyTP|qPneH6M,RfP[ā*,∩19IR< =֏mk ´{9Wx xm I|xݼr=y ̼r1IB =&jn|ɶhϻ>cq'd<2VOV{Ί,^zo _m;^^W޷}#HAvdĻhU|b9Ylxt{l`p]p눋Ɲq1tԪ|,6a%`tUMb[,m#&z%W b05B`56Ou'b*NQJN$홨  Ԡg i( Lx-O8c<Ԝg6G baTi0`ӂ0?92T |rv9OqܹN=33$m].`4Şf ւǥ#GQ_Y-hx3r&tu\OXi7knԪk*+Kۻ`VLH50HAj7xC?gj[$s#_}[z?2rpwƅ5l'YcѼxb橼7dfVᑦ([+962⊀Ws |GtEfiGKN\x˔ F@~A 1#-EI孈=8yœ+SzNN{kkj0]gMa17\m2.9?ڇǏ@ؘě22M2=վs:~jZ?,r+'%% bяjdźqCE3Gz7-q$ ॵqr< z#_Ayؾa;I\^0ގ\ 6+6B)A:Ln3A7e>'T,"| NZvOÉx*I iڛ8k!H9.ɋEwȊIb6O(aCegk5I,s+_fpwx}VNzRaOsa 3ox8ޠu[u̧ <2,!)=cM apN5'l-Uk7WU ~@ybJ<_R$ (0ߜG!),&s}qZ)qюPގRH#3'OuAwdhEë Z*!Fhi,՚7qg;d/z8p @џAѧ: ҜҍF~SrYYYø+׆yƭ?캵G.]i1^3d6jʦ&'^87@ŤoYKL&)S%H2hq玫\v|ٹ NP% 0 .u<`'7tѧ}|[cIuą!Յ4AJy8匫.pDʏ?z m> ܞܽ=sTyCmlF%9#`ƘXLMC~/ sB! >q|A@t&(Do'/KU~ͶҒ/חW>¸|gKp,x{rHtރN gLD#9cNnhZqC7u)%˭9siW2[^+'Oo2tpUFFd]}xȎoжrs.KN^We<๒3lR d {l$](G>Rb.;X>C4PJ3ZLg|q#_Zem#,O-6b bEOIڀ$¶& ''K=}J fmNPk\W&qp~уO?ۓ^qƹWTճ<sW#$1iiOtɜ=.xM;;aWF=vu8}ÏNƎ +/I%P@^R1w?xol(nr<t .N&|%]̓CG^뾓CE|_shNH ;+^M CI⬰јHKI Sc}*ֿ{7ͥ6Ee8,/O3SOD^8/ic,AS YeCFcմ㉁Icɟ]]cHK$ {<50t=q״ fG\iP8HLBKj涆팫5Iw9KwmӚ%.exzh w~3~/Q'ڀ[Qvb$)?E3.1A{c`&.db.OI0AN&ʡc;V<̙"c}P߼dv„A+Ǝ68otý@mot>~<{ϓ ْsj92ך=yy;,y);Jeb٤#.x7 Iour9ˎ(Z+*ؓ^hOʢu^=:25Ac'eWCu.R ։u$i7D[ִ/sA@^BN^~ybSpDE>={Fs^SiCBL zhsLdH O?m8K\.>~^Kj7 PU!՜*j+HbD 3H@C̫+_/x:዆8#͉Ic e2t*lY%>zpkwqP7J-+iV<2IvPTz4z894k fJɞ\ItY|`wL曮;T.j.QDXR4a̓umL$8l3?'eҿG\}żsѾN~{#/vpҤN[,FM)9$ר#2(#da~tArQD@bA!=E 6?Yt\Po,gAB,eۥI#*>X9=f;Pcg&tAKJP~~SӴ9_p,#}̟biM& ?2uf:֋U/vF~cNyzW" !w\ &.[IRr@A,A1r4"߆ kiy&^KZV&WBI;AXJx:ti]|H;{Up](~T,q.g ycJ|A2l?x$q^@7]C4r߾@zy%kRRȱyUX-)ө_?O)C S(zJf^^]D1cX9q!]:?SG4p tgԆC+f&fQOPۄ7I!!Nm~\I8p΁|G]P<]!C.TBd߳l+֑mj1#XXCO/J?|]m$׉SdHM;n$jw%7\IϿDe"}MZCr)"W)f JCdv br mHkvVI|}1Y_Db|E2t^B@!8]LsJVM3)8]-lPsݜ0T"M~|y;K.b Wkyy &*)uRXC& 3PAAh0K> ;[nYyۣ"ю¶v#Y~.]>bǏ"U(:$0;UPLƲz 9 ΛRZOe8kbqcOޖ>MG^<*=ˁ<< ҰoʧXӗ{2PBCSdR8ƍA_*.77יBF#jI]ϙc,:&/bWŹEUy౏x-('/_*t -A%{>BrrǚH"O)m 2eYͺܦC:)Kvr!ߞ^$>zHu23ItYYpPo[4vԿ0z Cp☉SDLFcs.e1?`o/pn"ݘjUpN3e_=^=xgm^lUzF@BDQoM"6yCT@-8 ˧_A 6̼q^A~Tzw~UQA驶̹-xݪ @;#&Ra6FܖU=vIJv~QϱVF:Z.(=}#&W4^ m KJJ}/{ aAX:%iXȣ+w]Ri'GxiPҒڈ}&q/.bnM}:zI;q(f i,Ϡ$CvPIOۺMd :g !=OE @In2~3fN7fՋU&s 4<6>{$Ky@a<_\o?71w~'Z0~̿*Ϟ:*{zpIsiȀ۴?|SVDGYRX;;)m^ti6S[Lq K.^P96TV?y d#/UP(PCv*1qe(HMb5tQ AA%>./jѺ>y[n;N։EV˘7/㶤{T_ҧwyM~$6S;9ř vsAB@!C@-¬âc "ӕ ڷxnZLKpn: :4}z:WX 6I`|!n;[{R)ʙIpxoKI+;`SFS ( ?d:j=^YL&KX8Vi*BpTW~3;XROzGoNafXSiU*c^,Ez>{T_xXp,oK7t?W f&Im'EuX!P4D@IbGGfVإGG%*=۸Bx-**60:̏y ϰy<7- pS8~ygG TR(#l: ~N~vWN:'ptڬe;6+䲺x8ý| vGll@ ~K ўvZZ6y=T <{;ǹY<tӰtꞷ%bg6f7W^!P""I̩SeuՎ{/A 96oJ ! ۠ӳxv~, @ <Z̛J88_b=~br%gzIƼhtכ]W(EëM!$ۅ v%ǹF5ٚfnelל`DxPx!PXnivfv.^o}/kK{E+&f`ma~W_ ˾7ESIýRqB)o ='% 󀏢)$! ET`'@YRދDW)6)JHHMH @7;3ɄMH$[3gΜk.(8[/mǛg`E:ǩKݲb AHoRѢ**&ٙk@,U{~߯ +V֕ /tۑ6Vڲ> ~DZU72hE7Aȝ'Z6TΞ0v4K~ZIh n 얺*r5;1pkJ@m- ׮E~I1ߤ,ҟת_%Ğ8|SAvB/\Zˋ׬~϶-{8W$ė8WYf耥RU ij܀ k*޾"dz ;d˂pNٖ-#'&GtM o4ތuy^ %z|19xe{2z+*[E'@prCszöPpNJ#ǎHMIvḵGe s( 3W22J 8l,/>gצo.//-Eâ5m[[YG I}~9ݥC[,:YH*hWzz4eԵ PZ񛲒XQFIEF0guS⹢*C1mSԆk.Yf,iFG.ex /꒒91ki>Y_<+vL2BˠVKNnk̮wZiJs3LZY2ϑJ(=*'r&ep"⋄O-qFw?;@7\a6ו\gCyuلs?UTZ\XpC"Y,ݤpS=$ėOBs}Z*:\oxz? m@n >rX&xM-vk n(ZlmKɒgxaAu A*e+L1G7::x/۔ĻXv+|Uߺ0*FgQv۶UGVJL *],{K_S xXLNډBt u:D+|*Ƌ9,ɣӻM֒aE 8Y3&Ϝ}B:Ь|Z?kRU97ޓvzJcHhQsDRo-*Yp ^ĥ nӟ:)d&廠jrR!$)wNÚ94T{gf^[/HJW1@W1dﱻ&U,]^=Uq)3g߷0i֪d 8$;1JŎc $P]<}&t| w*:.EGO3uRߨ,hy^vE?x/'M2}pm󑧮3<_"ƴNñlD7)#:n!Ir; jj}A(65uz\k](%7)co Ѝ¬XRJ$.&Y3ƔYIOȒ&Ə] Il ۥ<"D )ZUU_qS+ת顈C?[8j(K{:y_fi))jhu!Z RiM c7Bhn"˷rs Ʌgi֑0vH}3;4Ǧ+-ǟt4qqF_߬|7tB>>=:gyU =qKcRv~E"ZBG1K$|]pn\FXC:(!]u,L ЅnDJ8J?@^?efJtGJ8-BM^Ւ;ӏ-[|5ؖ'/ү~:g짃k~m֐=OUE<(Z]FE]cS PP%|cn8bEq"7=y&.[6NS2f`mդ\:<U% -& ,:= oxr &`wJa0j`t- DH:1Zp=N5*΃^V;7ޗvtౌt ާ|c6sթhCZH)4F'3¥|d醗~7=NҪx4/'c;lê PCY 0{^g `kO&VY)Ikq .NOkNշi5k>TVIK)5sMs)D*pIpX>YB|;"Ql\T+hsP! IVvܮZd_Pyop]pkjUx J` g wW#}j,"|K@QP|z[;2Ʒf%cJq‘t^buCn(ZSkT$'';8ĴLԑcj*]v-WExq[pEōN >:Da~奬|q|С3ZPFS&CL2k̸I93ۙFˍeEkyT "peӊ# wu %=5$S'~3+v/"qr ?+USiw>7%87 vcP OyhtYv|/| )ڪw WEr~Uyt{kCl=K5И| LpQqF-蚒SM@w%L)-ȉ|-ka\[H)p׉8a!ZNW^|KF^3-^;WNvvF'X)7f. N@~ZQnwJhfMZIrUɨ\J*R0*bv}ü Чe|Z5pk;تJ!MljU.M*gZsb.ID\rXݣSqϿT{yLTy@]T$x:n8J-+^Z` qݵ{sr3G26' x 1q?Tp3GN.l#tEo;lޠ%l[O.R zC(zr0>,}8J5U^0xq*>\g(Wy[Ntt0t~ a$D= r |#t۽cZ`E]A1up+w+VOs-aP?|]Tů%?ù%irI$_x^6Ԟ :oUYHqze4|J `P8pנ4c_ZpVHȘ8- [yǃ-FV^ea$[b U3k -[ PbeKu^f- @҂puP}xzKˡboP|-zG1>y*RwC{.#tݺt4c>~ > bp(yCR lS0WlK˔x^u9xM Đ dU {y3@@w솒\Df `=PexuSgOtO0dA_2ktt.Ǫl]&mTFImsf&Њފ)c5; yH(W`9>ޣi[N ͇!|R(>,ǎC?YE1.[5H٧9YJMx_jv@KoAkqiZ߫;߃`ئ)y'-+Ǎ+7A׵3x2+:sh7'`@%:GA*/c&GpE>@IDAT\t GBB [d{ SAU( (7L Σb}h 1]l`];e/M۠Ŋ_o)l4*%h 7򕿃t׃. Մ^Z7lE@%UJ򟖃ya> ґMJ?SeJ~CEe~;H6~r?ZFRRY @!o>c!&́;\GBSM;7Aа1^廐ջPr0 RΕȨrE``? E{SzzcBr⋷Py_O*~q|>έ[ =#cu7BF!+r>L \ .(X 薲sET]pAÙJH$x{)}W.%sJvSrZb5ɵR^ځ\\Jk,GݺaP_a.FS fĄJcuquÿ00|hH)o(=xU؄ >e&p[Xߩ\I-? 5uZbutn3'~=Bg_Pq^3&p1V/fGό%0R#hLjkN5 ۖ$Uix FR#BEIhC1]'yF**C7 7=Q ﶔؚ,e`:w N"REE|b{C͓W lg%](yu%EfWϽ'|6/gLbRw[qpx6_Nq?1? 2ZɽEq;Ї#a@Cu 1TЦ{" +.)TlM0X@ M޼e'O\_}q]Pm}P; |)\Hڍ(ޣNqËV_"n_)Cn.FT􌈂+n-%\X81&P7VG]@LL}#M`ښhٻ4b]4-;w''=gVD_W \M6q*t;}NaڶJ?O6Aaj1^;߾#=0v$#DN|~ /X  [0I*~c1z cuY Z7 ~!_Ȥ(*RY d,xGB8ߛ! u F_t!:zz >휘@oQY p^GF f|ڛzy`ry{Œ7QIFѺIeh\ǟ}GEy;]%O|b[Q(I4Az:g`}^ h*H 2R>y5,)|4.vvDY} %+&\~Amx3r7@ 5h]Zb:6Yӥ*>CPlEP(J˺ Gݟ9^l5ਮsND'JoD΢6XtGɡ#\aMK0'ۮ5)U*tb:^T;H9"c@qk'HiH‚o:N#ojp'hi_9FQٜ\ĒV| V]cM$P#|_z<#>~dE@U h-?|0HGyDͧvPkͮVd$Y]IXxFZsb.IpVTcŋ+tp ʋ,̄nIZ!Ԗ=[*uJ. (M-qVgۥjHFb:҃.hdOٰ漧X5o;50M`\S´&ޚ]-|P(L4G>\8%:V ~\n7(rk⭵T} ;Mg!Ǘt%X}| \ (X ppt^Ʃ7MAXy/$`UIoJ}q#~ݹ K~$\UTZaʣ\$+Zlm4J2@sCd߸矱nihn7+!F&ӜJ@tKKwlYX/4NM$@܈=aqUӪ\HCwn;_cqW;b'y瞾[ZTyZD®BG*~؅~@A9.eܴ~gƾ?Ie0-ᶱ&NċZbp@">RB[>q_F'ē_EEN}@ERm5Oq$bv_߮Z5(%\khw| FƉs s|vU~CsN&\_9:OimD Z~GuG;Ȝ k7v B@y)%-sQVL`D_н]۷aOQU )d%7C':?oYƱ3ӽ'}$0;^*'w.b!>=Zek:vk <81&PE@Y0ᾌAG 671('@J!)J}G 4qןwJqxFzN@1GĤt>sl&:A>t*VbVVVbſU ڥ{Ϟ~~!ށz]ӧN%I`2vBEQz}~ސk? l2V=_VZ|6Rl |Fz@\~)5pS'QOyBYJRH2E䟌wN }=^VԺ\~mˍyg Je:iz׀ˏwXw9J$XˉxP"VRMĆm|;EU4%>^"s-rPN$2,_K*ѸY?%Q| T6生~ 8堰! vƂ%~4!aY6m@P KeǏLM-՘,N JJh`up/\HYMI?۷Gw,OIZs>TTjSB70/bC 8)ܤF牣SMCe%QF[; {'dٷ5y @Lq7Έs10h"z/EKNyHloEWR,8'U zIR(IF%.QE|ho*TX z"T.N% )ᤌ>WղxH{)dfy2JGz7.~rf~]rf^U[t hr׀8#h+Lbg,!ߘY#H Z J|[+ߪ8ӪNu'I>Hjr٧^W6.D]NH5-tNӖo91Qܑ2~T_nS(^˼W&8A74v~'O' _X࠰H#@13udi&IW#hLjkړ%je@,vDwf htEbvN;R$mD@ YAIEzou]_99]TOe#gJEku!6lMPXu8;]/='h5P^g%Ա:VSNLvCn.xqsrRA %RܪZtF5j0(=d_ ;&lXIkv$M^F5o[F͍{7Pڬk@`'S^*bM# xxqn7'1hzUe̦ 6̬xgA9M쐩 &MiI@S*L 8/'y{3V&@1E/a*HO.e%FxX+͹#v@Fke)[^MNKVy8n<>|F7כMt8.a|'`m;jO1[vӁzb?81}}A2co _^t޷]/$u|,[<MՕBn" з_[fLe2Cim]SSw(NT\q]IzDWX!?cur7َOw7XͅBe/45`LXo22..8=|Hu x 4@\O+$fwpZhVƋs3tۑsU1ŸVy#'qv7?S 0`$&s>J=P#|k&Pv=n>KF&' x8h*rLL}2\N nI]O?썹 YvFnq+3#LY(FnA{_U83̄ͪ ,v=qвcrL +-g50j#6D.!4 8nqlf&,RR gpZ@4+B6.1&ʵ2&pxD3M8 x`qV&`+.әpp[9,1έ찖]OSnLp\ 0&PC krZXwĦ-ٲzt7 Q*Ʒfד]Mj}&IC迄SZ+-%@8FxpZϧ$'}w+6[)v=E;nr}B0ӥx2At(&*^3&`O()-L1U1¿Vx^#\ZkYx_7uFդ+H&cܯzTE>ǜc53&\+;3&?%>vQgbR^Symz&~F&5vALIĤ윁(_2>/s>K]\Oľ [#Ǚ p wp=awֻ%&+| 0'&pd K!lW2{Q70"&%wKS hrcap&tM D/a,*ާ9DM+Gsn\MZZ֘`nM8Wu=aw\/\+`LVJ1H8V۪/qtI.v=cSwUGѤЍHY}8S?>AI1@>vAH 5\io8 be%yZf}w\;nTW1 0g$pg5 4)#+\IU[wדzMgC|b9ǚyIp1&htn %yëPJ 诊MMԖrvz&}Up{L 0! xpV92TZ^$ aDLJXډE6eB FtX6we-T!bQ.: LpbsϝoU\+ 0XXX8O&N&m Bhjܻ隐Z6`L 0 n3*\@Kb?R0^Hc {"D {PBżG,Ȼ4!xӊ[Fm<o>vmoi(&u0&MpmKJ }x,K ${ Q ?jl./:Q"I+ r%}/O?Z{y ֎8"j +L״C~Q)ΗH9 Y1 2Vy7s}ҍT.t(˱rrL 0`\k#0V&?p<62T%ظ[2}En M~>^nQب0) :>WP'r$.GN=%Wͨ fY6c}2z&pj&F сa@k.z}ϾR{d]PѺK@޶O8̆m{3-tx|?_WaI]ǘ`Opc!@FGCž[?yB. |[f:xlqRE ae25w,Pf2Y% }нtl%ш.; 茲Nmx.+1QjlM,e˂,ߋ?տΓ4F3&@w{@KL5>A_pPӱ%յI٬smR\N7&?>ד.=T% 0iI%m,YBOh5;Ge_/\E6K&EY-vaI+"8_$+FH. G:;KeFlxLo3&܀+n0E';xZrb,%$ɀX=jЃ$dQa < AާJ|@`b%lܫn-%9g] ~]}xyT~~~YHH(zoh oDnA\8t4ɑNW*+سŞy ,EawJ`Gp3؍ LI\#s/A'*r=0_*6k(T I4zt]^|V1eDͯ~`ru|aH:O Fo0;|H=O΃Gт.C,TԿhES(:Z^ 0&Op3@LK|{Y$YLO(vh?Zt :A1"otr:K YiI12˨^HN0a!BBhŰmJˍ6NΣ0ex+&?}gL 0! xpV@L{vRnO$+$$ёN$ .^]z6'z o-LZc$:w 0&XwKh#3gGWpߏ=v(Y \=DKZSB,zZqgF/jjhö``vV 8@-Ew72x>.amr"/n{~4fQ's~ڒaL 0{`&Μ=}=~tBOŗUrQ'&}o|rh2qz)ΙwEgL 0:^>ZÉ"$iN>WL ((*EK6J ^M8pbL 0W +"L9*MN mLNرpNam2ٶbgt\;`L 8+"2zL18Buv>48| ݾ.L*`O@]0!0sF kdh}.ȟ]NgFҘ%M?ѻXUm# `" Hr=LSg$]+;':۳| TLf ?zl`Ly KDhbs|0]@SaۉOOϖf{|34! a?bOQE &g-7F%&[nSE 0 K #wBΚZy"huqY>I{^X[8{-Ś`As$A*u?( cqb-7#W>H%g02Roe 밓Q&1pΰL@Μ}$Ê/_6P"LND೥m{K"^{'D}2sWǛ1hNt+ y <ŖDZAE&3p))3BQi9g,Ee:YF7lc(x| JS`)vOoQ@D ~rjΔ }8$MQbX͝l)f |$^-#uݢ cX0(-p΃pihY|[p2Z8=a/%H ,B0x@H`B rY:!'t!}1cx 0h/e2sΓhz{Al0wBOh,X4㑶u[NLNy| ]`FЏSfO'XV0D\>xj0JJ gK]m9Xe+w 1(U B z rnȨWZk,POFWTN0?qyx 0{`&P϶140j7F|y~̒uI36.h+&pTCO( (IzQ D-xKg_O}#ڊ{ 3 xn eBe1 >/ԶʓKAQ̫%p&g, `O,AVK"3Xߌbw)&r/;4@ 4g r4 J<UdLXo"0l%p-JCưVh&$xަnyfSſNY6߃e@\VE TByrLJ7.& 1 bt ;D8Tza<_zp~&PIp0Kd;Q#8!7i;ܬ6w7JY,ۅGB>+dv]5#<>WZrnm{(.4M龜?zqwp%rY׭ P|!}~ݴ Nm|;eƊfח(;Z)H\s8\m{3 W81'l`jbRBT~<;o׋"{з[Gg |BTxp{O4U ڌ[ =7|@T}rK*1ph?\; dX~'c. Mû[ 0nVuA$HU^GtIWםW&|r+]+B^]Z_t\hd[`t]&ސLxYs&IfyApnWZ]#3Ɖ߯I?=-!ؓ[qn 6 rG$Up_ -WcsGoߝV^(;7No,kݸvQ?t|;veDk.#pYsp|p lwq^hb,E}u*@v),Dqqb'yΝ/An5 K Tbsz7/h9q(<>nxch3&\} gkCڡ,%YojN"r[]P$1J(4+Hԣ xy.*syrFs;1rt IFO+nb0,YyУk8x4>_Ǝ#v O/VMiJh=|?M!lm5ڛ~V|T⿯O=@F΂Yj4;|E6}-6J45/'ȇJBWE5s[J-:r, z+,vmFSa>1;®ǕPd^EVmݓ]Q8 +FE{(Lq$d#eވ)-Y E$@px7 X6ߜ\7/`|fucRW/~݌7xk#NB~Je$ŝdS Q{Ob=wJ^\V<1Yp(kڬA9o"iCzM^}?*[uLOYf07Zć6 FpSHn 8VjXXg!8gmC|醝gawkQan*p`.p0rPwܾ +k&.TxI5Fg}J4C_|hwiE'8sI/ CnQ@Sl8r*.:f0Q{~8}д^2)#Oֿ?X;`g~\%Mp8R~ߑl"'MXx\p#k9re#DeYMut boeV (WoG?D:6qDK3sbf}ozX_?qj,H!m#?sbi*pFh| Pj1}4UX:Fr5cDՂ1ǵ@i6*WJw a!WfKq'`]pzCo+;|`|j.f?x<tzg_4dsa&L>4Ek|J9Iq)>"\>Z{ PޮgDY)qgJmH'^{A?V9$>_X^?aOuM |uϮh}&t! 9N)=Dnd&)MBʾ /oΫ&󫩮*rwDd& y5d]c|8)cuSF 7Socto-/9E ^7_'$J]P:2,h-8 ;ҬNF G> C1t!iKwR]J<̷c1؏t5 _4porXuRON dVSL0ŵd9Z%Jz VOCD@|qԤD!x@QVJ=F*侠fPi*n&C).)^C[L@>uf%v g>'@G_ԌܤWQaϩu S$ɢNZWn 8/VwXr' ;ȊiN OlG0vdh.Ϣr藍ؼ/kvJ0񚡊׫`GK`'ZR|~۟{7ysGEEQ Lؼ/;tSQ\JBW׾)EBVo)jsoY~[FE-I8Q!_/#yFxC4\7M3ЍHQm3ʢ éu[+޲qA 0qjiH0'?? HLCJڲCd׋N&p>pͰ5ĥ?^o Tj1V%PMd-CQDrAVcY[>R0ma~4|K>*KbZ<äYnW(|,[ӎw^ut!uF"#9Я[<Ġ?ɨ _O^?_Z,H| 0:8Fp}& }o>Q Fæ'fLiPRV\?"eـ][ޠf^귈AmR[?n@י榺ln])G!Ď!a%1ʤ+IU떙SؠQc߉OA=CGy:UnL 5cˣ0Nk'RiV\r×}zU `$f+nWp4&jaJqJA`$iYL0aꭣ?K#K+ Zt^~Ҍr׍Ets*t?]{uq(X[w \'XnV^hTBO)~>Gctr5ܻ(P}GQ][v0=?T'MdE?QVl٥Dn=i|n1p7AP0Bz=a=QGh+hYQfysfG\pU5MTKhc;0j΂²1V[TK x%Vm) =E莑[k($zp"@/ m*Z=dmH.ƧF7\>CzE0'b@lL4LwЖ[vVvk} C0ԧ㋎e+>~¯8k8`L|k_kGelˮ`kP=B Sfmg/ЋN) t#9fW^8yڼꌼ@ }NW)aM`3d}BI_,~Y]=nz =)sqW& it%(ʧ˔g}CKM)V0'=U14Kq ߡ}o("O_n@ wUh@IDATqI/檓gϬC v3DN|^sq :_o/ux)&|Hƽg"Aҿ?4w s-"7TY,Ḑ \~\:AwÇIӗ9]'(0E>%.(.Cð찧]&TzDRCGIɴY|q]"mxGFO0Z_6&d˯AwBox $V]rXSZ&0Q{Μ1j_w"ޣK!pG~l p>h=hBXGf$u1 ;EB|FQxG%rŰ%54yT_Ju[_sN3&M-ͷJPQ2 V[:7l񐳍2$={]8f}=)~3o}kY_}EeӪcikB0B!@Cݑ'΋pw,g$,~0V?t;Rn9MEDcyr9kl܀Q@qw5T`A`A}61s=Kg[Ovnjjݧl=َz}4Φ9w> <)B5,eFpwqfL5. :DIm ߭MB48GwtCxOxqroO_a'*1q5aA89@.TNKƀ|OHՕ"#oj. W8>cĺv|VOBoWD'-C`䮐Ӷz~2-ʭ]|IZ.C=c0y]mbB|)2M!fQـ7׍̺D g)ՌP()) I86M"7͂ϋSKT n h+ |w5+KC=c"n\rZ7BilA ,VTD~KjGh)l]\ _3)JRG=pTleĀ8jh.cTUb WjHJf*.)V܄ʶ_񕵨e:dp{}ʒkۢ^[y3~Zp6RO%{`=ƙ{=E.`fRvFE[Ma2s}u |cAr3YyF׍Ju T9o[ŏ~!cwr=/:: +,FʷuކqNֱ+W VA|]MqjA0~5.6 kAq͟ϺM`7XwAcYIqZjaXXtӣi?4RNgЋ`ϾzZo}7?!xu$TaY+r*&59[?S:uX('r .g1C/m;&aUbuqc%:2eF'8E1rGX}Wn/W:1^VVv&:uqO:Z]~+4|ރsA_m3pya r)-Kˡ\gť'DG(u䅾ƒ䓽f>P#GP8iQAQ s=:+>9ygQ|fw$$fZ(Jl엄"x!$y+y؀$xE(HGZ脐ww7&I7a3p-W?]wX7r~fEu]Rl-Ɓ#5,3mxMro(v$p!i]JeFD?Segnms v0L'7Qڔ+\aeI<.E[[Dj+Nʠ,{4ekrSuRg?2$:ߩkyΤTE0MC֐WwgZT/s+< m olθ{xdaÆpMT*Ou! O E<} DƻFR>}[C5/d`B'f~Aϩ qWBCO6^Gia LSA:toٻ['&S E檍;kLqWp9T~;:l&C1!`nvX$·tHVKi] T>h(\IjG%Ýڵdp5U>#QnA6T '_-],+FKLúkÎ}]m92u&ljt[ٺES֢YxΗ >Gٮ}نL̋D1Ϙُyl_H%kU_9F>YKofn S",R !DJ% 5' WK7|Ņ-X^/~\;I*gQ)ȳ1\OyC L*.LLc<:YSK+ү\J$)͜ +RcYy汬\[|`?n}q}z#YeP_U.m.;[kcQLE=^ҧd>ڔ'NjRj֓ηb+.Qs)1(\ aLs9./ N`>e^- *"0&.D@n`|,a p7; O0 ts7SjMq?lQIQ*LayAy#WXmߪIs#ʞL+˕dWw7Gជ~CDR_4D gL%4bg[`}q9V@jC@N dD6uj}gZttkS0IWq!P|\z@ olnh6oa͂_g>=;*DBL"/,?yY\ U_׊Y>}D$׏@hYq_z)w x|j ?(=Q|h~+c1{ʪ ?LrBSc’i]9S:+L%RQuFZ<C>{=pR~79m+ʝW9"PgH3TPc%L~#DCXYPkF@j' 3 hrM,jÕ|:AP7MӣsOvP=yA !~!dd \yۣd_L69+آ`MjSE^)egœ$3x3yDQ hS@{LΕՔ(_cSߞoyo?7Oho_Q"@@'@&(Q hg .y&`z5 dؑ;)LI(o"@[dZFJ`jpU]XnkG#hɢX5~mĸE5"HH.45hU4KuXWjp ]Yl(tl=l]lƝm*\a_+B<_sF"@N{$p>!z{~VO-k,CMnOk;Ŷ97lkabm1aܡn8Hu'D f,"Wri Iws&&K$a]ڧ2_O־U_wf;[e7ifK0"Ni~y[F?*)h,f[rK9ylߡ `w81,[UU /ܹcB+]T8 D:$:T2(@T ,{86ptֺOWWN,Yh眊쇕s0s΃mTT) eww+29eFB5Hw *ؘ2-aeJ %OFn F%%t19on[GXt\е#޹-Ӵ$z$3msc{:[iepc!|Y~ޫkof|4-4WE>;z)V711˩D"H H<7ۺ+ʍ3 qj9[mP:`cIH_*-*a;нmܹO3TE=̡LKc7FƻF2KLۯkm޹s^WTf?F'S$ DZ t"@*'0JOQhkLyV될 mԮDڭsvvVȏflρ kuA37:>w鱛+o %D"P{$מ!@t}SX7+*24Xg4ڜ\kӢ)kբ|l7aBC`s2u516 \wv<'aG2Nyx6FʝTWdPs͟?߇S"@ ~#@PRFDTp}Ja̼twywU'bdbrUW 4=:s("@@] HS9DD#3w+n\ҜqJCmrOFe\)cPr186~|OQ""@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@@ K+wߵPC0:^A;&X?W_Kht}!+1;mFCWk+Vl3qxZ2gQ bVogT,+R;(=Ge99!'mVx¼3QV(<N  #M@!l.ac qJƒ' 7{[o`eRvN8ao&:?({GǻF!Z{^/~1A\+uӃޘVv#?u/6jrUI ⿫I~9'zUE8 @2Mq ">2*uRa1SCI"y~;)-)&ҽs#DT@@ >/謭.\,Ls^`ỊB. [\H_a@_^ WBq#t(/ UL ʙGƹ.8@9?rܛ-& pExeKfիAٙ]m`/Ky΄I5ȭZp>jTG΄zV-L~U4]B˓ҔcD4nDֿ"ZU m.d62=fCh8ox|2c#t,T.BFIG8'ٯF'7dgtm KU\6Z5*)uNz/%!vIuΩwitXZԻz5 % 90,tuQHe8#|ϓ_ 'sAl·v!nYǢʕZVI^ye>dQ&5Y o3~Quކ[;R]B#CTh)=DCtqذa0Ce ᮈKeCVp 1qO9FcY'L n|΃Ju=ɓfd-=w Pbc=Nk SPu iafAG焽Ɩ8UŔlY 0|s>3.KXBӹUEg!~(oL?ZY9m1yMMp>-Ϸd=!qikrȴ\Q̲fhzj7">iaYo(*:1Σ-'2bNfώ5ROp ir0-?/yDv3q54nuIyw%y{pci[!>[ ˓r73[H뢜c!NT΄HSG!6cfsGE>no\|kŧggk[)4h|8]4%~EهjH@$P]'N>q)nΌju  |1嗱_xq-ߎ[]'Wsz$0j(z`clP~`oyH!Ko9ڭ<_A![frQpH9gۜ M k2b)|t碌\Snu88\};y s~ SIwoPӴ!VGi3mJ\y:H [˲O$@8Y`>ahHv/?=:+L rWF9=ʪj*LY\#Xd^Wkw іL)|דφۨq师-7 aNv^bϵi=7#9]7 %@s-햎}nY,d{=3zG~/0g7W(P.2ďrbt=f+SVaWGb]y+Ѳ/XʼnTEЂ/`LY=X}%K(+2qeϫHF[9֚rOWГrdø[*{_#㊂hSAQ 谜 <#^(cm!|/< ߋ]f4msރ9 B"p@j r_ ݉'^|Ggg_sCxGsASx8ϣP)e{+Ch Tf ;ZYſ?C(Kڷ{LןvzQ`-fd5%[:3ǝlnFa\#m! ^hZ=O&:岄^jסU& ~gl'q!n 0BlV)v_OMu~$ G @E.jYW?S$df?fYw>!_r3 t兪XUVc$CS]!lAyF\w1ì("X@ȿu`{+NjeQg¦xV0*tc5 f.9m{e̘W4:a^93ýzyۊ::`yИg c/#JH0D\ډ+SNy>춲Oېyx1u0 yok&KMx&O-݅x/Ds/.zv6sfBK7«}BI5(JZ4˱G357+~ \3iȰdDoSFDLگ_&a&5^YVB뛂qRi9M?-<ݵl^#gA,]I۾R\˜v\Tj>rŭK!uϧ* XsBbC(g]b~5xsoh,x uQu0EQgFP=)=vcoE\P>P_-|˟ύśzi2VըOn>5Vj8|1:&@`Mt}tcڧ`w&w@(*%5(4첃qR0[ #-4߰*U.m{;diqOg&?f<7^9-#MaupӱC/IsˈP-#sq߸`ִp+#Gs:xW\y㙬y-)Λo263e > #ҍdKѝZ w^[ H7höXk(\䙃NEJw'hnc'nF?6ϩN'Ϗٞ`HH勗 BrHIYUϫJ;:+[c~/A;^x9WN0'xpQ3M}G zRlMgc<>v8n|RTSɉ'E!޼/b9U3p3pL+Cb1+ֻKcCHR/\#^jf $ˇ'oUPhJ5V-ھ=~vYa`SVܹ*F~M~,l>D Tz?t}qR:=^u~4 %5uVqvλ?P1fY8Ä+@6 PxB:ǵg\'|92(T뙢ZQP]Udvbk&ޒgӂ5fhPܟ_#j|R[oc^R-{ d s<ޣQQHK{H,_*4B{m0K|#xdkz\(027i,蚲s2#/WlB `؍ƨ2,&2C\}-+΀j 3xL#bAx >xSћ!ˉUDzLؘ-e{3=eA4=v'2>,^=DkD.> HJS!{aZmi &uƮQ V/;Ha 1jP uyd*< Av>OP.Rk)C"UМϕϑ.N~1WuғMcKFl`6/*7_L-dJM˫+O?9Kw'pyx=Hh9gLʹپ֥thxc>۪jg|ha 2ņiN߅0DB{@єW/qU' 2XeqI vuc tT/L77lʕ4mFF:cµ&9,MaulYIUȡ{`.0ͿH ɸ Daf[{H.]FO VCwYAXq+2S] $^@`zs}{+|.' GMHul{& ѥ>δ}_"!KLSx"֏xx#Oލ>Ƥqwᡎ%r2y"։-?g{_^%i+ّa$֜*9A5};/dcBN܇/Ms׮ʵ؟v{ ~8RDZD, UbM ʨ~sqUT@͓{)J""?neMP*+}QLj`23f,alBȼ{V6`vUkP8^1͜&_Q |ii"gV8\M &)\pv&'X_x9p@1F J @Uo>RXwM,1Ʌ4L,1L ښn]Vyh=V6.Cj^qmf9R%0MaF7;mUۙ C&qQ#qktW5}h_^ y߁!Y2X5t<(\{}Gv&g…!A0M챬4߰ mWU8FǦ'Uu_yWόl^ޅqضweäkx^Qv]i1q|Owº<&'ރÜvzMŤx\_s`wadsy{t3Pj$mU 3[wq]GJ\`6.4|[oƷD&UVK-]ËN2T%B[t*<& ct%ڄF1*w$\mDrܐdPS}WLc@;h.l;I=m4{xGLt@B5Eހ _ G qohLf³ 4QL\t3·f=m[NX/8p|5my,?;Hq0~} Ma;flZajըO~Bt7=By=ry'| aNB2 [Rν\JUx sy4\'ӎsu6ddބ2 dlBؙ?bsŢiM~y#12v:_roQF|r{S%'anGij_iBZ5N ڡ,FFnf]psZB/}"P'pJOWoB5Ⱦ䔢8()vjN$}{,Ijz154cr(ז)y[zҎw">.UfwFSx83:6hӥ`IB.&~Q`:*jL|s[ ̒=;'q'5 ScO¿8nFC<Vy>.:ó [L+SMWᜅn_QREjq5#P)|cvK·7\(gQ ;Ud-%pzR v_ eXni}N9Z'M(O@Q0HgK1O`k_Rc e2ҭ*W.N(V=[0V'gb2[>18k\,6>ZuqP3MoOth˙86E[ɾl+w0&}yTwz)B E{".k9{7B %vzou"cg#.7}_f)Qz깊uX0k\q;Z8e~Pp[/1SD~hezLԀk:t .B .1A)U{ҭjxЏr2"5J녪;r~{7cvT&LO(fE.0./UI=Xabx؍W{n!E~|[y[5f>+)߄P>s dQTNנ( ~=5 r_MIZiD?n+oߒjhHc@]W-޸S ($!E;ퟩCܤ4:0P@tذQfJr L6{sZhvZ۠1;N 颀oz@\z}5V%;{Nr 3;3p) Y^Kw'iGd>H.>Fylfs@W#Qw$P'8LC 9(*S? Tޔ/o8S1k Z* ٞϙr$ dknU1"o`rpZf+mB{u<ЀFjӟ v0TC}$h6 e_ƒUr\6Q,9.w(.$'_bP,.;|uK":"3c/RO-uJJB/~9+W$@|V1x"Xޔ1X4vMObMtzܪ!I 1+E ͛}PQީNIpy"h?s_ɔ8Ҕg`V1ߵb")ηVݭ?\Ej-K6@u*8sYW-\<~YVl]i%:T3XwԮQ6 \y?Vƀ0U'OJO_mVN~ 1;3_#KKoPt4uhŔ! @ ض /[Q hcDi1ŋa%^s81iW'!m=tg8^30q>XQk}3n q87^Q, )g`:T,P;b:EF|ϮmZqK0 e Ds]AXOъY~DٳgM _7c4xK$;?Ӎ]Fm+8ߡ9Z\:^6awqaM ck1,^{En% 5g58߂kQ5'Eg0gPVv&ZRSF Ls\ve۷9X`kIN' @xA)ˮv{awA[xe41.Lu_~iレ0nCt|brQƤ t"[[<Ҕ.}+&{]R),{-;O3^}3veHߢXT&=eyA6/4 _IDAT|<,_QؔOdt*<#P9A -X׭f.FFh=Cw.CnTB5 u=O'|Kn! IogFW=o ڏxXhDTE5vZf}c; /v|[ |Dg ]ʚIީ!Q*B((LvB8l:@(IbO>d[s4*spߣɝвXVI`L_ u*GCXj*|۸3R?̞@cnݛj[,$8F 8”+D'ԉq-0rLt·̅N$d>Mvжq}8Pl#C7] |-o_Q"h  Mx=s be?h&@8m. WVBTLSv] ϧи F@`޳CqwBRA_!! DRܻr]^ÏBm.\ef&c^1+IW^4(_,q/]uۂAeJbwwT| dmL8~pg",H>- @2FBN ZaG a=%79\Yʷ̲^?Z#XpgB$LLYuGGK]\.ҰMd kOwU4~3Kx3TH}ZHxfA:=_\B\ue0'֔JD?=*LK FK @;v= O14be̍cEAׅg`e!]*kwtk :igK-qA8Uǃ!= MdQiQ:? B!B v~J<B:(VIBz?u_ C^wH,aʭa}S\K~E@r^-{ HΊ4S DI@K iWZD?ȹr_ (SKXDT~+v뤉qۡ^"W -M x> ;WmH+<^XZ{n۫Y`xbHXR/ kZk{}Y41֩WqU H!v9(.b >*1Q\/e:s~-ė}8Y2Nv]}D"@M4DSbVAnlZ [BDonuy؝c]-Yv=hLZ{YldB^y =ds!}//:xxxm@AE.칱csm"j/un]:w/^CQNyJ4Y=6Lײ.DeMGLfk~ \ԧ?S\)z=ݝx&78R#9{H&y4ՕQgDϵyd3ZYEYJJj}L;\Ҧ_}4O D<2dDy"@l}f{͡>]*;#Oj7IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-provider-overview.svg0000644000175000017500000007565300000000000030070 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-09-21 18:40:05 +0000Canvas 1Layer 1 Compute NodesLinux Bridge - Provider NetworksOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Linux Bridge AgentInterface 1InstanceInterface 2BridgeBridgeFirewall Physical Network InfrastructureProvider networkAggregateMetadata AgentDHCP AgentMetadataProcessDHCP Namespace ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-compconn1.graffle0000644000175000017500000001364400000000000031376 0ustar00coreycorey00000000000000]mSH o>[KDfEHf@)n m2%Ȓ 1`uK-s<\dOtLԍ{At󓏇3t=bt¼L&o϶p0RsjqvS8̓N~GiFQpþ~5ONB#mZc=`DHLTɳ! x \:Mf{|b/r;i6Q3b0>6_s\HędTSJK I }^vCP=Ay9m?~fUh-ǷS_G /r:()?ށ#,~t>$i0&=[z< Өr~\=wX˸)a'K?5Sb'J~bw*'rw~v~__2C뇕+L¿Nsg{9W: {w&J:y$Gs`1FggZIr Ñ')! [psP-9¥1)IOL:?|Kꠦu_VAiG$/[Eh}3rvilɜ-a"o:u0QIKO<(qAg~`[=w 9w)Pi 3T|h-ԝk*%GIz`ZُnxunC f=fXz( L:IЬqq41&CE1\~1G=sǒG}?e?_?,5щDg9Ꞑ!A$Ȥq6`9~C4 ]wW E!2a.T5 CăLa&,"a ,p8)Y,6IA [V,7R+$ݰ6o Cv*ψsExxtAJ)zOn+0GhM󤶹dX 29aqdKThl]Fl9񰖌qN[jbPQVjB6E(ʋ;{0AlMi&ຮCR% ǩA5S47P`E% Uk" bLF̄Zb&FdCfbH@aF8MJp]$JF%lj"ae"7!OƜ7 'ƴ|] &Ԙ`*BIzWD(a ,(Ne*ݖzjS/y+}smoFKsE&=\%hQ{ V(:3p̴5MV2Bla /j v#Zg:MoSQA~#(r1Eqt1E.hc>yFzjzE4Q)&cqEe8(Uz%,w?ygJ@Z'C0{!Ԋ*4@Q&nR"K^gH!GKQ!ng?=0qV z$G,-4%zT3ͥflړ,gZiF =%r.r;Ƌ]߳)gB1UՊoŎ`mL7Y7en5 j"׊ b֖&ܳ_"@e<71`6=1RT+hL-GV޷:8NO GOu(@.pCz*n ֈ*GO=-LO{gW>_$>=p4.BFzhTmA#v"=5&XB359a}qU͗Wsz/c?\w;ĀmDfI3gf `Y`Ě9bv[,pg˰4vaVҰu vh+V}ر6!%XJIAJ8ƹ!(4 d(ڂFJj4N -˝@Fi@ΔT6Xqh+ ̰lz[̴GXkqdXPWkȨ@I ߤg?&͠wя|d_lh7C asv^&YOH U脥]N؜ҫhӊ\bnC6B#PG@:y5"g&nH < N#NG8ז8䞄Il8B`0\Xzh Si;YLmu^yI74볻' C|.#šxҪ)Vj!%r\Ufm?jSm+s' NawC9JRϜ1#5OrÅIBڅ`y{hYqZK=ޞ)@' MXw;HJX1f(˜X>9;Q>G}*VMy h-T[k]y}S8e§Mޙx/ff04IuDEwZ{O||κ;꡹WGn͝7ypGuMhRu)si4ȧ<n.5GOf=+m熺hLDlqr.f?!)EXh!lXXӇ^Y@q) I.T$rFC9s疸ǭF8nW0H|1y} c\aٺӉ3 ovY;[ˁ5m^}[G~p/~z] _̤c-|Z \&ODJs9b&Yヮ_$7L2${6߃?uA Ƨ\xyFS|' T&P bv -V=0 %|'5JlbXzqZ m[= cz J5 `,~V`_IOUFD @zL!)xeMOJ|S:}rke'*;?MewKx>,~K}!)-6e@[+8Mw~#·B\|'a짳b=YBV~qG~ī^ ٫Ћ|EǛ׋9xus#oat}8;~)܁㻯!ߣ7_|6| /w~oIzcQ)̀tƌ Uwr$OIsH΅DIwVG1)AoH*P0;0ҷʩpWqY,^ćX! nr&~3Ss>i w(-ysPȳaX-:Wv"^*6aߏQuhk ZTD`YGaȞdUgQ_}司MyxIfj^6V``:RcX3gi243ᔡ+ $n _+e UK>1(BJqd̆Mw*=™ـ&PybyYS8K}>`j&F-rB:㍛[$V;3a././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-compconn1.png0000644000175000017500000042765200000000000030564 0ustar00coreycorey00000000000000PNG  IHDR4ޗusRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgHo(Z_;6ڱwI($=]U,@` Cl\.%K.3Ny滳3L[!0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L(-&@ ӵH/d-{<.!hIUJQ9w 4}/MiE~ĉe;&PW晝%22@i`-@ wX4+W"fN2Yo)nnyVH<=0v۾1 WbzܰD5Fd;:;) =GXu>{4l03ztI)hwSdCO,+L[o-گ(!Ec~Mݐ(nummr9J W-ޣ`x)6,7g !BRxPiU!>\)- #qib^ԥBo /;{MMr5M]9&wlh*4i 47XϥYhhd7"$*f)#hqWGYH4Oo z@3e_߇"AJJ}%T["QRg#e'hTʏgR(ѵz]ДeC9JXs&4e`+49r*L5;+̜VZ,[S)S:m#hdc!?0M,׭_, 72(Aɻ'eO K5 4MTf> 0aY]r4Jv%z×늧ܔR_ [N9b=QXӺ67Rj7}%#'Oީ6܋E)cjXKzQpԩ*O^@ NQ6L 45QɻXC_0rW,cgp)?׿j+ vYJvBMx]D.KzDWC0"'hKCn27`h!"s]WztyZms)XY E9NOOz sѾ,#FL>PXO0o*py⩉ݻ!p2c]hD 5S+})p;�e0 : N&˭Bap{@^0C> qpHyn9mM֧+;AmBKAoo @kw@2s WL55pXRkK F}dך7>q/_}>,)mN0elNq6BgzΩ!;"'xr~yo;ʷ¨뎟@@i+-ȫ炧3r~yu.#|_ӣȚC9U/O~W54ewNk'Q܊*749Ѳ!;_HmRAnS{0,wEޯ/=sr>t L4 ۠IǝXk~p5 :+CcQ.?>yW eC^b -(ȝ;,>gL*ʃ@@eTt{F hKTY8 Q JYT\gXzi=CϠ0k(3m7JqRztkɭrcOGjK(Lny&U+"ä#CFmZhMى߈䁥ϑL}0S#`0Q' ;uv0ER>;34F)$1zJ9h*'i OGWj@Z Íbuy}∇ }[Mq6Kb'5x~!0L^4DT)+a=zU7v,WN:z';Cnt6ɀ]ކC%Hۋg)ޮ#'+l/C#L[ ~,Vo_9X9n#W;HJ!-|P,/:Ӊ9;IUs(zPW0(3 6zMW ũ o4:v:n0}ҵ[V^|pF 2J[ i6B|T2?#?LD|g j cĭK)?~[mK\t3FW-{yP,"O#7/m׶4Fz9_[\".Bl1|V^b1|P/h'/yD`Z^y[S<ȷW;6Pz=~wGa o% (TM&-[)%gf o(oE.bbO5 (".'%Ft C MCVf )L\JIrUknPf0+>\8_VfȎ2LKceƁG&P Vhj÷@K"zkd'FP; (5czH[TWnY_pM YWM#GFŵ.״; /'#wzpݾIAϸ %d}ywNEgGjEktLY^;A/1CYPl)I#;'68BqאvK.a4- GbYkq,A*s*#~"-M|V`b8LX{h4{ڹnT[Bwl]%(n@hSI(,t k`,Sd'<|@V:{~|e vo4F'49 YvC=ʑ-vhFNZ8G>gL j(aLHIVDKel Hv1vkuI{,@F#_`zi "Cl,KFrm~S4oywC[N5C*IqBR ͕Ԉ2Jg9lVWT((40XPo5UiD4iwpv 3M0+ HIR`Juz\[pIY̸#tn]e|YӴYwfCՏ wL6E9d9rq}ģ29nGr-m\U{SnVACwRJP6waԃȳA9:qh"I\J nQ ATx$EBڹ S? }HQ*OVw!e/ˑ> *_ oR~mSpO~_\n F\)K݇2b,ʖ9([KuL@$&Zt,٠fC.`& #=MS?jQ;TԬcΡN('ݯWG v&;Ig.H&>IhL)h3w@{Q/9~?h`w=Rr(EMΆm2"XPMɩ=$!h:F{PZ)76y}Y-2MtmZ?Z_ח}ԴKL`q84yX3cFN!{my+{Ϛ\jUF.H^*j9{}[&x>aEGʍ3| {m,d}^$zqD[1b*imRgE^"TIe$ q6 >7^(9jS25aÔ؅/-?8&E.R^qv0aJm҈DT72|4#鞯N[݊mhC@ BB!]BkP3}5eh̥Fc^\܏ב( ^0(5Ȱ~AϨ0RSv@K}NnәA ͙ezL>ZLh,=Ecᅿd 6Cc}BWwX 9Go"-~vHX+6".OyYc6y.oBFnmWH} vO ϙ|aa'XXcW">f'92'x<`d xAx/ޝEri4ޞd:uL鼻ΰXl]aҮ`4Xh"!ObtNڜr >f g|}bPiO$ 17rⶕ+ep;fl(Q1],Vl"֖x>8_N;2߁<ӐOɝ}(l0~vJ)cJQ,Ncܛ9`avO`LsFj0/ X M?EyaKԫj8{ttF6.?Jt"M(-3|b"@8MȋGIvqQnrpC`jEFR5iT%7/K֐s!9>tfMБiٹskC9Ae9j +6|h*!,ngԅ ֶ' l7RӭRKnE)uk}jLkB Gx]5]uƣ*[$̜gNy'M3r' }<146a%X~w:60p#H@N<5Oki}Z~nB̋8Seέc v|w>Ш>0&& 36D G m:&$+_胒p``w-*/gCpEh_74j,aMZkQ>YT4"k0g`{3:^ 2<#2$5,D#1/{An֧X؄#9 ڳ>(ݮu)O@AnvP c]({^ϧP0LGVnq1I}URJaըdx}aƱpr*|yك ןA >*{40 V>>$&+1BGy9,2% -ŇC0&P+Vhj7@'@©^NۃaCT]O[ШYフ%4wO\wx =M|+e:=W =>`,G#ygH{ƪa{y0mw<*05Di1r4dz_)>nW4w@ +ڽxXUG߇= Mқg2B/HNz収Dnoխ$Vv[/ MZ;UgN\M r|h( qE~ G4Х|4Z?ﲸh)mġIߵ:oV*eAAE:-Yog5IM8G? HhFp0Z9:÷s XKdG婭˦S'>Rci Ox67;BjoXFŦ2/n)`.qipS&C@ɁXeM^<4grZEMl(3cX: 0ͨ#*CNC>̓a㐗Bӂ>.Yi.*z!D?G#Ass0p]Ѷu4LCi)yaӇ\7K_+ՂyH?𷺵юt jnB[iSL~Ñdq//C4Bk;"wcglaece,we=һظz=qi#&dȽv2 P1Zp{)zwyX ߧ)d4æe\ yc=z[u8תkB :PyºKI&Hv'|(s4 E,[7\G.5NfxNp*n[i\3Ë/+yEG8` RдbL qР'Y-@`h1Z"}&сe1&hxY[~v&MSTh>T2C mb0&` # MrL F4 CaY7Lmۛ;Tr.RsZ#N`L 0d!SΒIL PoQb} -Â?fg,Ea ܐ0&`" MrL j͊U³ĜihM"CW -Tw~ 3So8qL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&@$ ,`L6֋ܜпR[6 ~zC*Vc﹧ː3O+wrбG.;Y2&M@w`L =X\l.όz4^ ]ϸ8,!rRBs̼=xV?Kl.ۂo>0O,aD7ўo^tiYgL94чee}(,h9`L Znŕ&`IEඩS;ERH!ФtO]I+ڦa&nnL Rc2rB(K04)Ԇ!_ OV^[h "I߳h R0aB#8)}k ǝ^>gL 9xC 0&P@ Hm¶W"83`2Z23e=ez_O*} sh&_PEJ6+8ѯ^fNC[?9`Cy, `.!9NӴ̙Us򎗖1WLP-B%Fݞp'_| ~~3 rӝ0-J%1;N(Eg?t+s].Χ"pDJ97kA-d_f'}7!P99E+uSHCO!7gf}츋52&2]tȹ ?p}ڹBPg0?= i M?Z.\ %VR_9DX>-)g!>oMQVh/\;x}M^䟺3_)R.^D.xԺ3vz}֖GqFTZ]:;KF o/[ efJI)H Yǣy;ڦM|IxĤ9ϻs&@}hqn`MFTYr{<K#JeF&l7nv_d="}M!u&l \L˙Hwh4 WLFnRӷEQ_hr$),bTu[(,|TUlrpGz >3vCv"Ÿh7]>ח"WS !ݰ3n q!Zw,A\sr1+V2 %vYBj9` aIpHa3 A1 !ãL(Խ/F)-`W%Pb,Y27IгbN 0N'QϮ>9pPz>J]6PffrK#4kz3ҦPTS /ne=a^?0 =IMgoY5%wZQ=fL ԇԇeLs8߇ |io\6M ̜5.w1eYӡ@n|K%wv030z;f[RL=P|l햍J#Fz'4H[[`G?u4ah?HF>7 Y;a tx#Y]f u!ρbpi d]X=^xrCsr>& 1qc7'.8l_xeG@XW?R iHE7B)eZs ;)VĒ0XK߭ngG1ŚXsٺ#C"GB*R6#*>;7;VOkmP/;3/b6y2(7Ia$T@VVipQDc+&@й† 0& #4bl۶PJ@QwN}=o8 ?"wco,C4W MrT3{W.eƾESvX p>S^nwd/zKʁ=&y].eƾqc,aW9-V1VU"A7g,2B9<,V*3v0)U6^d1+PKȳs)1>g(5(3N11б{S]c]G] %g:~FBn_9 0&PAGh8+0&(JSjRFDaisV@G#n+:Eڧu-4e Ѱ T8G0(޷mA59R{ K;:aXD X%X$ FC~i[/- kbYșʲ)C E@%"S EAVʲ-FbWzvP7L#^vṉJ+FdGեZy{_ r'|0j?;ð0#7V9`LjJ5lhhQsL5jn=ʶCspz1je(Fch @* \VU{nM 'hGs1hU MHUf QHbHؘbOZT|;FeB:\׼Ȩ(cZe3vl)ڧ'zB6Ք*윰cIo=s4ބ1zx#Q9axge h4 g}J )h`L X&@S^ L%|_Dܵװg1>)"b9n7Uհ0fV>EFoS9 bY0R-nQ(FhǴ#7hf?y/Q4Uk OY: g5J!R_r8|RȔzw{ν]H#[; ZηKC!.̬NгsGQy3q1>L{,֯`J.=]ti]0_좴P9`@֙LN`-տǣh.wfڣ6ӺXNUsШ^[^Ui5"0%J{%?ET!_i-v1 Sī\1"LR!,&MW<q+x?Һj}9\/X'Q=,nxgx"Si#wY~:9.`iXnRQhEU]Cb *:gXLu>]cL 4@+>`uC#sJ}uӄG~UZl;[cϋcQi]TϚ"%Œu<(BB 2[1ߖژ񾧛Vw(!Ɏ]RTF<6KYjYiD?m|!]y;M,oiW EaW1˃)bzǧe#>TC7ܺQxMxyR$[4@{y/Bg#L;tDNISh?hih8gW)M '1>gv׻J%oYeJb̽KȳSrϽqk1x&>9&@ Riޔ[]/Lu2@/oiOsƨ9L U3s@ :??7 ]hcg[6Je|bIFݷZ.]HQyIV"F>׎"mC14柰T|uGHVȹOPt8(3&)i^̏yB{9q*,.{2&` `Le!/* wIyy;cckĉU䡞hߍ!$=]RO]~J[ ]\MT8o(i'xM_qD$&_^wC5a=,0Qce2σWU(CvXч+=6)z .qY]Xj9 {>^F32ۦNXU:#'e3!&28`3نwgcG&@c BX 06G Ris8LFFh+^`Q 𔳨Xؒ 0&Qɻ[x Xw@#eL 0&T2c1ՃntŦ _JwnJ 0@9L ĕ4>!q`L:x"lm“3&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&M@G<̙|iCu%UDڱ&%6J7Q 0&8QG%D LsPRI-]{mĉCfL -#'O*NWJ]?q sj-$>i-O@[#ДuTeTߣ9L 0&<Ļ-M-C$D9<&@Htg•W 8 wxa1&@t?~COqiњqpҖ($K@머д`f)tl`L) @3(E1|ZK~IS 0&tUGiH-D8shuxP0`'`(T|p}L9Z&@HTv37@fq(`LTpe{%5'TX&&@$BC[3CasHL 0e{~>ij`L ĻjBC9 gF>48B&'|]% OY$L 0Dghƴ̡LL3&@\}5KO=hj kM 8>&@r3\RˤI<  0&X񬣼UIall8 0&Oe4TN'$ +xQVhݙHcL 0@M3Io>IG2&J ^uT<5aX{=AL>ydg?^s&H^(/~VlxN>~m@hMӈɱ4'ۃg)K߾$ǖe& v/V50PZ x1ƗqOx̞fd7B=՚G+ %^ =3&Q|!ł\IE,^y/xYꌃry)xEYyDO[J|IGh { ]e"&5~YեZ4 A;2r 7Z۷2\o@L[s{{E> cCTjϛzaGC }TT's|㑚VnǑn 0&|^oC !fA3b(w)kZAY}{A̾ZnS=2'-]܎8O@=5TsP=PgMr+V)>)eg#Z]Cnk{t ?l@uRΜ@`XԂkDvRАބLu~] QEfN~_S-e݈l \L)nwxZJm4 7ӟw?k*ߖT8DxՈk/#'H ,M9GKt}Rk|i)t_cZ]~P$ !<1}P2]>O²C|)ܚ W8W1B=ߘ]T*#Ԯ| |!*@$i!T rC 37P}yk`)uMu=H|CGhC]%#U%u%օl\6 3sK]i̟QhՒJ4`f`7˔ݒbd7 L-0RN@IDAT N={;2=0vsAJiܺ6}36\MB:~=q Rns9gW? a([^wG;Jdf#LC-(ԗ7)[[i:~5ϡȽ⎳H/BϠ)eZs0VCee4jźPg Ek*++JO|hFFy;wX|b!#4Pjn쭘1ҧ+5x|K#JM[4qwB׹} e+F2xםv"ۧes)\K但^c]nɸ =p0͋> leRgM" 0AqyҢ Fat ;1S U?({mt踣#6^Hq &@s(LX,L'9GyQwQ*@af P~G[0ً`F Ec{tֳFQr^(YP6bE*7fJ:uPUp9Usp#+4Pjn߅nPǐ\+ jx-,UzQ Ts0jf Es;5պFsKT⣗~!)5fO#So*#8ZK>uSvь3 E:g tc_{<'`DH JL|h0^ #%lloRNƣH(/u4FYJ|`4LV.řfx Om)Z}℅Χ8?WXozmS~|ۜUNLrkiTWYifFYVx< @ E''3h;.A 5qg1f XMUT*q@/]`JqdM/:ck hWQj>e4-^m@uIo`S=,SoW#bTT' @tmt;λCciR(~Ա#`LGL\_Ӵ.e0M9z5LKhjf`[ziyLٹ"DFԧ`73KS<6bH_9ZXa;BHόB#Cuóc1Jx$09LeCchL(h?ǭj[0* Z0w&mթ}|!] z;M7Jgy<*π.`JgpƇ<9-c-k(7ӬUTһpH &\QG{:֧v 7)'aV>K0eQCꛚxG!_ ]k"rS݌ph$9Tz& ɣeLzk8)o8IM׭,̠oaLEK',UĮi 0&N2%Q>GnCX{Ja*Yʼ{ǍQ|D6uuXLh|,ƫ>r :v]c Ur"5ht>4`zvO#K>YOke puyy;cA5kĪiGFh{28rvЌݻyy}]GmOP(کKǔ Y^]הuz^ASksOS ͊eGszV|{+{K%-I},@,um;קܮOvΜwXԏRw6?9 $MO8$'*5@ۖ5I%-Ix># 0&V ī)gm5q`L 0&@+ M+x&`L 0&V xj9-KkNK`L 0&X4 osY/7K)`L 0&@R)gI8X&`L 0&CbL 0&`L $Vhq0L 0&`L ԇ+4n`L 0&H*$`a`L 0&VhC2&`L 0&TXI0&`L 0&PԇeL 0&`L k̙|iCAs%U)DǤ"(!I%W y_m*ukbT`fNha;vx{Ǖ1#aL -\WyF{Oִ7x1v{Bk1",K@jjYb ͕wM{_iOO[+.---{Zҡ䦫ntZMcŦ gN:hKHn &s В9%[nO$,p =}sJ@-"!UJNݣ#zQĦ:q~OlxO{^f2i:bÆ 0&j p]GۂjzP/x|Szwטּ=1$@Wͯ Jͽ5C5$Q+(qϡ{n $E#>:s޻Jw]uG#Y6#b ?+ϴ[H}C>^u>^vBxjJ{ \{/yPAᣮ;xk˳b"n>_jO&w䱓cL @\W4CK;|TR}f,uCj7S^*4v>~=+Zoqf) ǩѺd}o2&"p].zTDs{x#<~v4m3dL(x>ϘvxL^Gw8!B|Q/@`@%uE=$oN{QcNTϵz?nOD}\lJ !H@XiL GhwNɦl5E٥s{y{ D 3b 0#uEӱwL\߄Fx&B{$O@ '`ўF2HidK ՄqǗxzAҰM9ȗbLI p]ѤY37vހTϥR[aX~Kf.+1w?Q,]|cԞMhQ"ŐC?*n̲UDïkzമݸM!ةa4G|A:ʌ3Ive`12~q+b3^GtArWs[_ 4Oޯ^}37y}R!GhJg Y)ޭx' ROÁ{xHKo,{o-v)CmO;U7Hѣ!C'^D,_At?.v|2=U2SM s8L $@eٕȺsg{zn?_(sO:Tx=W>F:׎LD+^\*|N; G|C ~!n*nɈfo*bur-]-9gT`+_;NyŏDT1Tڃ#&f F\ϼfKo1ǣ_ZeF(d) _~^&E:ZUv?Ez x>#kɏAWն'#=a2m-*/ʉW\Jc"oh^Or_LjH y;x]l]v+w 3Q@.~'.v;zYKtaxw)9xݭ_[ ڥM }gm?zg:aL 4NTrS;_2>R'g15A+^i!6lVav!&:cZMѥS{r޶ ;w)MI`qӬ?ۊKljk-E%v;헍[ˣ{7I;ۊRM!|Ng`b!~c.NO1UvkT(1̯߼Mްc mW&=w( w߆N}h&CЫke4Ji*Fp>r  SF3[Wa愮]ik=FfthO Qވ q*;ږW;Vk7QITJ 0& nԛEL?:ľFk3B_|:Yd'|SIcYU&-'%v$#E֍jO91DO{Be4հuw uF;xF0MG)XF@mB% S۾O/.olZ/dRhLf5Zr(vy4<:_OL=;ڻ( ηPOE9БY1iac.%pxd(3@%em@._kO1S}|  dhm S//3̀"DWXSpD@f]oZZM[v EK3V1Թ6l*\6+Gj6>hƙ~v1m'RnO#q Jza A 셑Ъ;E Z8d_Jkk6adp$C\g!M aN /=X ̤P,k6%xih{LaK`\&QpL 0hֺ]paR(elA0y=嘦{Gp ^_MM;Fz"G#.i8(1^7o+\rjf;J48fn}9[[~yB,ZI5f>9CRi4AC3jCͤ yc19@|ovFlh=qv{y'Zur!֞8Z o.۰K=w<>L6'X|v|_vӎŝ5D#4Hy;f` ?x{m0_,= 86``-~=ٮNMN"K~=̦=gh j{sĴ=mI&ebb~↋N7VO8~"l>A?Ft<~7a&y)-G_ ``) 'M<(ih424IG鏮B`#2+ד=j#юޘF(!ahO-ӟz˾.-^ݵ~`z4}`T-[mYlM1wH @ࣆ䃄|1&@ !HB3 ޭ"[mY]7NIӝi^m}ݝ_=x_uu/'~Kè㗲(S G.cv3B[æE Dy~KhWނGq])r,lPЦnp,@@ NopFX ?SC; IOgݑ}84>moW&9M-3CM?y6DM[kɁq"]hJ5k@O{A~ۅTjh@Ԛ a/0r@@:WH$Gm[+mkZ;O,j~8Вrx?O W t4[F^>$`s~Yb)~J$D@" ؿkgFE_ ͌Q~4U@މhzgVK$D@" >IxDle%D@" H$DsM+kH$D@"pT]{ddzRY%H$D@"=/*HDr@|D@"  d6zBfp*@'*oVӢ[ =p>n'鍏V"ID$竷_?^)䜲H"h"G" Hz4owQu-E" 46y*ND+Btf>q"o̧lv!̲,H! E IYD@" h5Jd6h '|uTpȁ1q ~T5 Eԥo^08)9"E3a^N ʹD;wXǹ|fo{A}xF?sDb__ ty7 sm 9/LNS23q͙:Fp}3턯'6vȤ BG"M@3d@_zDnusP֐x֜_{5 L~%)Mp8ɳ$D l7͛*fgMMCkbN?fM-2jĝ3q$40aFKf-ORfz Ns NhnK9]W͢zhLvo4HXAk2m<7ϢJ|n>&g &Aiþ{Ff6ڜWLO0Sh藜ЬrC"JqA'N[ϥ8f|C8/H"49D@" :k`1M2l*Lh&Sλz$Ba!s'g0͛- JOUs #3(-%^$LL pwTK3YS1~ޤQt嬳h0=qIJ0ӅPx0b/ރhzO_˖J$@+pxٖMxQʈAi7aBcg{^ZUhOt!jw8Nd+5y>i⼉t >Lypv~+pKĥ-o(H:Ϭ,H! H!)H$^ nY`e $9?@4L[Š`;iBZc/YMAh%=P=ӂ]|14wZv$ r s514*3r]Re8B݉i4n 64x ui^&4~@BFNK:R씳(m+Nŷ93ʜ穁i[OadfYzqxQ=TJ/ݔzOko_FkX Z.E0FL~&m#.V~>?1K%mPt Wh]{$˪|^P^蓕vm꒏Oc!j[,Akw YNdyYE8q7?G j꘡'Ra"Kss_P^M>=~rmM[%JJtVv[ tBlVjMkѮ71nmaH!pRy>j.ҩn'G;Fb>^E ^5xC0}z ~{Uz]y皟o5G]UvX?xK-@GN "Y8Y&3d9h zB̶]{S5ǹ9 Fn2`AΡ4uPQs F4z0r]{a•oi.'03Jh¨'G;6b~RU5'?,6]Y'\fzpv{fd,]ό)55#eа/ LM\u41&SL2'{ 6)A#\7w*{(Aſ$W+[yӄ # ?߃Rľ4CeDE %x+sɔ!c2vUUgq;%zB#]R U>K?e<_1 ϊq[E=ݮiٓ,1z+2L{/)ʹ3^y=oKff<|v 5xf>= 5fgb Y_I3)?zp>ۧ MAg,&+բTsVJ1ju;2Z\  cjɬj6R:)폖~80󧽌 oV%"q ROd[S,f\.?MytV3ĥ@g$9d9d1?YK)4!=2J˫ x Hj9AV*:DӠaS5־(Ο42R 又"i=GxDg=NL+xۋxNA5eVѓ”LSP R>A {Į4d¯3?aD$yt|U}Ů\Seӆ.-2U:癉"Ki~D(OĂus9i&癉澒I"w"OJz7aBzFN&%3ǑVA:muLBB,r|v}48|.>1;4 f ῅e|#77^#D|3lng0Dv>n/߶"GiTt -9.%:)ĴcW!HJΈy I8ʫj0ةI3;pr~49dZ$M/E"[=lY&m!ϱԷ,̄RZ;@}4揄 E t>d\u.::z:*:gfɢ\Zz|fsI^"Fc8pmZO%<_[ptXkhgªdiS79nv.Ц6P5ԏdڹ 1\ Bu}-PeٱQ2;O5@ҩZ5ytrg0_RWTr%~w#?M_ӀTa,6z 1S:,ܔ EHB Kg\j=f}ϧyv pMFnk?nCӌlR$ҷ%N_ְ/n[/݅i84u-?O̧-: 3ԁn# ->:G08 C7f4J;bx&BFA NaֽQsӯLA)hb6Y#)śM7;fW|zN{'RܫU8wt|+хa IMX(/<&s33uh\?~^qTm3+\h_\bjpQIE?n+m+u.3_ [x=^D7c_J"zkwHײI@2؜Gf Tl}le!~}ƿN|-5dS,tk#,!نm0NO22g3@m#;m,][֗`u'27=銑zd/LP*b/=־pytƙbP ') ei@&& eStRPa"ް- Jb*s{^+pUi&fzh5Gc؍O?\T4p$w)cB ؙW.,qk ^pېɻ.;=!P,dݾrEW͚DVlEΎ^VC WeQ4]+떥PauW6YOl S|as(|d¿OY[3%mD[1 5۬`MbZC#}' N&9PW4搠.PQ΃#y|YSjwR[('-f$aa>!|L5Y79ky^|g1BVRjxֵKĚ N7]w!ΖBGc q;Ce"*o쁠835\rx!l/,A8# ( I ea'* ŏ!A5赔h1SD | NZVdOW#D J ; = zThg |Qn6R:γ2?Hn LUKg[GjzW4~P?]Q'Ǭj:Z4Izb.9b'fKI3eĠ4q Zuk~|tu-ޒ ?iYh8%,0sl拵;l:ǙՓ-B8s=JE(o<̈вrK!Ig 4 a9QGҴt Y F_}͝2mƐŻnZ^~Hoܞ[@9S77uѪ;쐓EHL3h:Cҽs"RE~3?o(0*eH 86hK4EzfV{Uu_[ ȉf/XQq/nȡIYIM~]+282ߔ#LRNìL)qBc4<#g ,^7y7޳n4'r!gF6 cE"`stZRe8BI.FF l,ጽlPkޯw, _\g(Ѩ{PvhĴ3mwF#]gQR g|a z63)hf8p MFoD}4t}6#z ёQ;rOyM!PYSOj\8cx(eDf;|fhy>;d"<*;]E6Z}iy4-&eÔ `K֐TC@/N4BJlv@/yљ)o?|ذ9R\K6/7r/RvƝK-3LtŎ%ߏ[jқo6 pS/!?wMx`ǟ`?h"1-Qn_n@sn{lJ^>Dc/'@IDATTһ9v=TKFMZJrx=wY>֫08sÁ~c>p]`~LJ L4&(KK-`-J{rfNe ]NCN+Z\8 @@|Ņ76^R13#sIt0:zegAƹ's$]3dn 'G4t=}i[,Sښ]A:ht \K$hfqF+,6,\*"l Bj(ޠJ89vc>|C=p9/Ja7KDޘxIdi˕% a'/Ja}i_L!>vjs %Jq")WÄвf{!}jjg6҃7](BZb2MZ%х 39^~zf,S@lX+}n\RFf F,D VM=k HJ 3h]"t=_0}l8rZBGkڧ¹lXE"0c9ݥN/43D+k Lv>0U gҧ&^\sGݴsоc6*>b%|YB@-hE**`]9Q&@H&tD@" P3ކQB. ,bޝ쬧asGcF[-*ztZ?D.>t:J`dTMeE6;\u^E-ClwjdA# :DW{ t{Ķe\Rm=Y!Jez(%D@"97@> ЀW#2"I$[yICgNTA=H^ƒ4d`?҃)"TO3;ʫ-ߙt׍EE*"^GI V?H L:BhhA fVkߧTVpgTCQ9ߛp!J֒))Fgi'wDCξ3Ӽ:*n>g>.a"Y^fi2KSU8.~#3Qg'3MthOK*.4-^Y<88+8oU5?)Є ݚkŕÐHSDbj\as" /ӖcM/r0,Ë}ק4e8^l=]>FOiT 9#6XL&9ދqޙr$uAASsM"?}tF=v<5L'p8d4Eh hY -`.ÎmɄ4BRl kVUc̍B ZȄ2(^M)#ROgz@4*`@sp=8िn9&zbY1 Z)C?w8l<4'32t=u} ]AM .6`$EoYi$J9 ѳs!'Yr^B3-+۝hڀYE?9DYxv 6za#8>UC c/W{E Tje=]w5fa^⛅z.dwiWE=it=]4LG}H!L@7$=IjdS!3th=@óOc I7`c0 f9ڪ.0nb^\I1cM[Ŧ6P5Xaڹ Q zqC,1 KE!kg{bRX`):Ek/1IYò~vUi'kYK6i'fM=}9`쾊6q_[i|",vazh Lb~=㚽Nk\/ <<~f_{~um<<?4 ;+! **n)м;&h6c Z3B紻ݙ?ڼ`7%  ㌇\[8Tii9 !\xr)״z]4Umu't$x4\į"Chx4Ⱦ5aaA/r)#a=!M9y?k|?:@Q 7{)}h,@W6[i cԯ&HUG: _gx`2-^IP5_cC%^szYZvEKQK 7 023;۹W8 g%usP֐d;!ܜ_{5 L~%9C@lY&DR0iOӺ.1ښ$0DHxX=&f i-0J;G- eGcا=˙Z3sƞ'-?p|5Ҝ5hD)E]JDfjߑq5B _ooc^6쎵e6%--B},\s]8x 4|-mf(4A-iodTM/k'1@7?"Ђ5xNLL0Ha6EP> 57 ׫19A uT-Šdڻfe^eӒ,zKK؟my9]% EiV SN@dQ|2g\lvaoodjl#(@/ޢjy&zf6L1u=hI/[L`Ħjk-' XA%V?E_?M0:,@hXLQAjG ^,ݺUBKc2ƺqv,=Ҩ&nAÏ71%Ǎ=76]i:vB`ߡR cc'+iymH8ꗜ@[ "1_sw$[+ *k%:l,^ygl/f~IM'\hḌ)1C81G {i'}<ޔciaAs6])N%9ecQ1̅Ol#?6C/ä'\pܡzZ0I<)8?Tv^w{aÿbnaꉵ/A_r{Цi2x3"K0gUFK,=^4]Hpk6+{'Âf&#Qb>cļ@0 >}Hi Jڻc)v]q|$'mO\?8JZ[]Fä쟟KGO+ΟH)ItpZr+}f'vqC" 8lEz8afkҖ=xfO=Rc`gkKQ,BP_~u3sZv?Α̈dt٫{ ]W,lpK(JDiV݉(g!-W?DB/;aX+ډhm-G\'[<KB?uVO{D0җMV),ȷ xkqze#i8YKZC-D̹m!H]}cnPjy>̧*X>7QiZ^FKQ#0 lj 9+tw$  73g-|'"&64ܫ~9z՟p-OCѡ3mQ:GX`>>3mcfs[83g!ڠ8= ,&1.{}gZAԬʅ7d-ȽAρVf /.zbC=S{<>6Ǟr{_t|ZtbLnh~wg)ν7&^ <@r73ڨ~0ڊq(|ޫ{kw2*|zZʰxĸw .´}#KtW[+e~ROOXӦhhx2 'ъLQ㭅P "; H#ayKYLG_1ad-^43܆H}*Jx{~1FGa|VK&Rn=}&ne$E@NpxU liJsOěј43~fҫ{>>n)؂:r,hhh@уGX6`γ`G4% lR?Kߑ'mUHzakL^ZceS_(̟vO+D@$4 1n "tK&5ߩJA1CdQhH^|&׮>5ͯ-\VБnҠd@ȴ2,we 4rBz'Q ^ZnZ"'%D s>;/oh]C>Z0͋cVc. \0Yx\ 6kar1VjVSѭDg~9 kb> HWh XJcTxU(V jy>KY(tBXXdG~ WW"`-U*i"Y.-<5-6}i#*]d& @KYQ?Sw[TVyv{S'~b /K7`~&X:o*Շ2@ 0D@" gf-n88 ''";B^+^f0^ e8*/U5 8^f6{EDk|n''`s!/%ʳ-J a)0tg| }n((0}jWC0î0{դp`D]q d=w,:Ri' " dGlK*rh.YXg+$!bt#6K<=?o<- )jR%ϹOL`ӳfE|z~o-%xTi:ݏM^Q sCnA)g6AT(D5+9?m"oU>5?YH7[qk*zbO^1܏}Nvwc8,'YEPcA1o heH>zB[chuS18Inhw~ .V_HnPKMC.8QQ\}OcSP̖h\3bY[@ +8!0)&iZZb0Y53.< Y$ R> aff,0(~ÑT"aځvWlٰF;+ )-kcx#UCSS2LF}r\@w\1ShmReS~+>FGNTG*uBXt!l: >1k=XTڟ&DFT%:f"vu]wŸNؐ n_0q?Vz2W9^?(XkbqoTaLmbL>" m$nww6>*yfeUj@׎;xPN3?Md&f:~\G{#+''>*leZa ip<nQ(!LF<~> rfF3 ;:o/""0oƻC 7<Z9ڒT=qvIU y baQ?+Sc{4c{>^ CG!'֤Pah^G6w3tuF3LZ{%AzD1ax)DM *w΁a>kg%@󣼏m J:NDrbgi '`_d%<p#']НA t|#jxX7C3)%ҩ<c}Gc3l~'4F@/ kg޻o{o_}Z ? &|۾p ɯAKÌo$fW{ƅ|_56/cl q-¦_NRyR$XmHq?^CвE+WU0𻲧8h 9*:d̢c^}MmK. A6P lG|=I^|6r]Tp88vQhN9z MQi4z(J<_",&y%̉k {;U_}5TREvo,Qc2" \^K%U ܞ|2 > cG\0!\~%l?YjI!FK%vH\D}:cL*Ʈ3h.ڢIvw>}h?&O{/{춧uv1`ACǿ9v,̈`_2FV"4ǗgC3 K5Jھn;5GJ^qd=Lbpp_]UeK*Vy}F6lQTs/-<(ԊiFPOW`R3Ng=8\VN'y9QQCu}^M?zZ!=f7A5Lg/Lubb=?-LDY_I@]vĢj|V4.hSiu-΢Ch|?Ι8h=щ BW{4-_`5Oz7-׺h._g/8n6?w5 X~? ktj#@xqP={ޏ'Ra{w5/ߟ ]~GH :|KPo-8ڟC֌/UZN-(/ѕ?Չ,o~O=YUnSoxΓrr$ Uzl"ֲqYo 33DKaZ̵5MLF\ _#ّ1V[TXEƎ ȭ` +4 yAĦdy0ߋ0Ӳ i VRn0jm2 ^iX$x}M5'S~RksC@01waqtW痠;dAU%zl#|_ǟn;wRO-"l0(: GxL3fׇO0]Gp5_|@;hײP=K/lpݫ% |f 5=?cw@~/qM&(&g/?Bu_{z[/O ?xl* AFX0  p'G#(l}}>A $ffV~p )=qe|x7U`׫~A8qK9w}Z߸sU-c_|ůn`%iDBlI-]2BG Q_ )e`3[ bfcfܚO//~fKUVmΧ5;  kiK*իɦ1*4<Rqxaփiژ Tii>Jb{^B}i鼉_k"NF߳,F="V݀9CttT# I!q_T>f D$E+[ڻ?@X4oHqϺ{W-0)1K~c Yb_+F +,Y^XڋUL .:~΃ $i܈ a#<(dS0֘pf&Odu=<ܼVا ^98rl]]0͌!kUx͋(tTTQsq/[ݵ{1}Yo öu_8"͘WGz'vҜN VܧK&vVF#3p_`¶oL5 ؄(pa>=(J#0q LZYgv{K7 Gt$~$LuW2ĸDeDCe3[۝ETM%ȍd%1@Bsv\>:R}|;\C}ۻU54IS;$IM/&iaue[$Sf|Ɯ'JKκ6LtȸiW^:x=uĢuiGv% ŇD2sFGF.C$w= (L+ګoj!$+1qh}n)WʮNx{ =zۅ4l`j>~NO (Y&s{v砙GSn"x~2j:Q!VE0'hg2H) ӇI"JGd%;RcO H1ٷ}{g^r7gH6'Z -;Nkݑ:;뚖mYz[l35m>޼EzKSrW_}QWWQ<)\+aNp,7!:kʨ̑Ǚq}&cV3@1rvZ[{dOaវA4jlfLaqr 0nj¾5տt\  ~:  t:9חn˽@/ڳPڐJ/NcrΣO^4:kaSΣ%4 Lt0x 7ڠ**)O癉"h}g?݇[ư0n~:bNpEhڲ-qiRw"_*jx6Tލ&ʼnDi#A 46/cl3G%clav^rʚm; tk|a'%!_jZ#v0然+l/5~ҐLqq} &SVmn@f50 1kaT?)^3"omYˁ+&cV2"px}X9l aflW9`}#^6BSs)h,EL5H&{qqN`Xy6vFYl]Ja8et7~o3] IG%j.E(.z9XQe]t׽ yQ^{뎙hh`8fйpg*̰4XeaES3,аB k^xaxpXs:wCO.וnY5B EXQ 1 A?Rz1N;Y¡+ aaGP)ij顳ɋp51cZfl==Ns*ߔj2=*Dcch%w3Dy%}>OHa&po>z#;(17pwECcçi$nGN)L*ZGD7 &F#=i(9 L2^rH>"J:uWn}#>Kz.K1eO|knT͋56vYQ:?>C : 3~XhbYa<((85 5,6g_Zl^> sU9t^XXBM2m֜DJ ВެVy_`avFgU* ^6jh j`O贋.D!ڧ']pƥjDһ7kqDx&#QM7'k3E;;v_pH"R%ҍy#'~Bz4ں C:ׯ)&F) 8B &/ޓ"AnZU e;Ro'Fq3]C _TM^聝$Ch7?A}븽̳es`؊6;eժps)?z1K Wj%MBr7-Ej~<҂K,TR.`N{[0^{D 8Cc.$Bxa&$p>$RpAJ11cZ!9(kUX,&CeHTu葄3Θ̜w|zSl3MF5 ILP1{M >3lfƚt%^@+ڥ^fr#ѐ #~]mN&LcNlSKG^rPa5ʶru41 ȚŁ e^2.(ωhy쑬? IO2(?Hj&ٯe[ݳ 88FO PQ;-]M%%>)8XZxs''\h_Y|raNiB*uFɈm,EJCækao*8N"Gh|ĆbR!ij^̀7;֔PC,3NHO@BJ* B&qŽ  3áG(UF|]̘A@ȴ2L{`sx\dOQI}|a[?=;;tZxЉv$F( 2ʶ8 0JQpQ;}]1 }wY3LdO . 𴚓ej*a8&?"q k5Ȗ.Ij6[#440TkeMꪞF';OP{0IcQGSމOQG 4‡&b Z5tNjG)ţv1"kxy&I~kc]qdL+tCf/x)^(D{9G!AZ{[]: .›oV.⻙XhdvCEg}tkQH1n zqf Q*3k6 4-2^ݗ\y)_k01 5 (|3QibVlg6b_*^/c!klnŧqOB*•}tC]ZD^yɄlRƚ)! 50>&mrCsʇ#5>>GY raJ?}h bgUĂ@YrD ]^ZyZr{")y$Vh] L/ᶄ^|Z7%r8E@9gVmBk* Z:矕!h_z&Ԯ'*243zҨ}Jm chB 3v`b6J';IR+N ?63kfXIJLYуV'S/V` rԘ ۅB'{FR$@/|hec^hB\~\`ð }dzMW+(w:'ل$t0*rM+ꔚ070 go7|i{8\gDž{kп;גޒ@2)D@\ ^=y-kNGM p焾vb57?߹}Y‘ʘY3𙁙kfYvWD! <РƽXuR-f߿n(e2eWpЗ%59>ZZo2bj*;9u035=M7 Cm0D kӇj(n;YFR4Ij y1ь -ID;#sQ$+'рxpbtއfqeɗi;ßJS-FkPѢq@nŽe2wathW)Wl)"2(UJbZ*h/Y ,Lpwʷ lϊ Aa--r/ָO;㋸elrf6¦f|iE" Y]+B뻙zBn#HBjMS+XA"н41<{$ յ&~Ki"JGeSs nZkL6v+iiUCrIK9GKE۰aic^33|JjZxSGe3 [-6 a]nn@R'"xi. z f8ki$j= L,E`A39d p8y ~6`Hp AT9j~G/ɡYKX| { rv|跣_} *b)tsKZC`6\5>N0),W.#^Z6uq8TW_/.=.fz 2ӵTRSIgW/t0j5-i^nݸmM@IDATLҹc# )kRz(&k hmA>i䆱z)<$"sLSLD; Υ#tBᵯeњkBjDɜXwUU{{eV{=ĉ(VU]GkZ[jW:"C@EB؛!;7%$ὗ%9n}7N/rY "sWijaf3,@ABfA sigx])Dh HЈI hi8 )1&")L4\Ve`eg0{˛ot񨩐 ղ@=r l< 9 )Ec0yR^Xh=dd*t,եg9_f\$@P{*ZsZ?.&" Kh!LꚎP}t:kPO!T2ڮ*48   'w,s|v>4?@Gi*@|r`TgĴSJ x%-dd' K4 o՚@s [hX 0э9欬וC^FiU3e>3{}tN#A/ZpsQ#= _!/4KJ|!ZR:@($P8 5>q Lָ\~|F72@ZӉݘuGJh4d-izƄe#2}LrP "WWPڳ`k;'ܥ+-߯}˙[p${P*@SU+Xcga^;Cn%.7-!N^[9 Qӽ ^7LcE@E{4 ɧqv.}34%h.7 L+"uUqqdm̦|0xltD;TJ$ƶwi|]x_,t w-/s\_Fn3q~2J!& &E44aMd5}yl R]S)us@~+ĖY)~3>üṈKӅ}ǶxMBw*}`.fKc(]n?-:LXU$  DBP #>>J̄6BG{l;)sOF9J ́ ZĢ6FY? MG<M<%n89NRsaɴ% 4fa9DDښ}m3>va 7! a?or )̨C6WV,<: N-usmQN_D]͆Txgd~/e>)4-mU@6B 9-dNGNeN1YM:wtZD==y>Fĭ\t|EuNx:OGs i5!c{sJm<;' x Qs]Łˁ#I=5InI{hZhJSG%~,zaڪl32â3H1x02omPk53pͻRH~ h5RNibBEb5sM]QPPPh(x7d%Z{.`7hhe$1u{>5 nJ$ҕ,XؑwhOnI/ nW*M6FT]B{̎*OE,xnM-_/ !@û$` E,(@M(mjS*(((4 p zrO>1p Ii흨,9՞W]Uhb]nPVÒR0>(7C ukZHuKΓ!ah5G¨Vs3z9"Ap8[\61VѰJIrfWe -Ê1{'MIC.\Tt⭘6UH_qN"Oq R)7+`1{af k:'cz9sNIzYfHzi0J,nbxQK;ܲ˧|h$4K4Sҭ+8]s݄1 ff(!l*%94P *@bn QŁ& \ K+~/4h縶^I `%X6ͣà3KKnI^z4^Z}KsO$ьWd}?['?&9hQ&AtZ% 0nHlc'Hi;M!^F.t~jc 3 Hr8lBCcZ>lr44Mad-i/}5spw3>L͘ɪk ˁ avyPÄWtΓM̝"(kgQUNb[Jx$7kϤ;ŏba5Ti34.-^.0C\ezZ4zr-j5CKVR: f!#2c2dL'h`(%9))1lT((((0zXv΄Zɫ4#MlR9n \DR,$ `0%{OPu)pU` tpc+GThz'G~*q]F%Ba"|B6$Qa:jBݨؐE=60o;nzkr! ɇ>J}bLPZ3m6hd4LrJ44I09"(x+(((4.=ػۮ#{:`rޯ(ƥ~[W~joHmfdƬ:;rwJb̤cȯ=Q"BvL2ϧOzӈ6LF3m^+qJ)-R bC @X瓦vTO((j޽0msEP"їGu貆rVDJ5"kLrf3HLu8KY{QnA19O#8Z,eZ2zv.ifTgZ,~/m2jn z͕) 4בm?`S!];;ScT̸Ŭh{93-1 7FfH h.( ;{969iπBqS>,ޓPO\ȧ3R 46E؜,i%D^=#/6Xæ\I\XkH@RfH;ilUj 2 Wۙqg"g35Bc|q@Pd Mbh3݊ƬGO,^]Ie0 Q_Jz#?yj,:M&F#xiEŁ́m |2'n?}~24M~Uirgޭaw&`VLuTT sڔ>21ae//|S0EA+)؞-=(&*lA 4Qh ڝMj1.;MF㾩[8[/;gh,ť *)(p2ݫsow̻bMph㨪> VXaSab% wz(%rE68"X}f}8:'W{i@I+RgmLlBfh8\Md<, yuׯ#ސ>LB^Ԃ(3Inaf/uVr&n9?͊N_8] -P4[I0/h߮=@F4.G >fw165Y5Ы%CGq`Ѫ,39Ծ\hAW-,9ru ]5~$469UMWM%TW2pIߗ3ohhM'6 "J0O9jǡk,\.ʂ+;)=7µEKhhۉgBpxdfa!N̰&NL't3A#-`i# śg F|Y6bĀHjl`7N h̍bvcQOT¼4UGdUF0Cξ0vɒVܼP>`"@&HQ졍=^aҴ2:r53-"f/>Y+3jn^p- gd'&֞7؀@rCedm KC7Y8МOFPخCaX./D:D xlGgoh;hu;´< ϚY(g_OU30RpZ]V7 1/ɜgMFp>Z'I j@'!tmH_#Rln`m"4 @$a@CK#(M440QnԏvشB>PC>I|: $7Alf@43ô}` в@Ey0Pdp&AQ(Ա@(g3j`MOB/71)Aljh&Ѡޝsy!QF%vMt)OMk>I^ p@/1cw9>ز=^D՟dYMU)b<~3Pt~v>^5 7Ꮠ#)6k#x`' B/أ#7L'֬{b`}Jl1P 6'̤$' - Lo}hgB{Fa 9MnhBa0NrPȀFQNQfƇ5@KNg7nhi4%y떛1,3| a61jF  JmIN"IRo?=x>3fS̜D`MXh թ>S4MiQ-)y#.E.V<ҹ7Acyp"\׃fvkl@# dG/JJI=:FeA:VH*Յ:1㞹n΋GZKkqVVKL& w6-@|F=ܤ5BH8')@k M.gff&gALlDZ J\|X,Ct/N\@0~M2h4t5h&L( @iY4 Ƒ431@3Ú3ABzxB+u^q  9Y>"LczJKGskij{тu=jnÃWyy0{^oV=[[3%7=6dzڦh4^8s|C=oW;O7o=WA+)x"G'h fCH)Ti};;d{002 ;Ʀf|P!_&e B?`40I0#Ai8F pApT1KwwPҿaBu1@ѡi2X6H M/Я7O>Wo6)LStHzae[t~ᙿf j#.A8oYej0fV&UR| {D CgmnAMyt24@j(@z}RX ֠H S$}dL?oo  FjiLO2 dY+ԕ ;tJ9%7^A 4ǘ6%oĘ@$2kc`j+Bs }^޶EFQcXm${By'EƳfĠ[{r{/= :X3سbVjٴJj$ Z p$(# ך _{MtH;v )4I^T<_NU>DRdmf 9rJIked[A^)/X^t^q3Y{q0)Ukyܺ0uZZWFax@$2 xM_fk9җhhh@/nҥKVm;t{vB3mTqZ1ݑ|7'M߼y`SÂ. #FZ[/ ivƦ_lz!_ظ>v=6;%9TBc%`f@eޤ_Q^豹oMCfZ]eN &`ӮOyӚoѰ>7RΘ23aviTh&4M77)peG2va*)#%cFVŪB7&8[8Z>W w _Xd>Ālhgܮ:ϳr^$[J^Z@ TR76^Ra\:rbzukYiQ. Pt87{c7F=N?|&#~yrT(O󵑡qrZ}hI-`U@2۩N+({Y|ZlCihBgK]|([#4s˛{7ukz dy_nI\$&NoF'CGm8o XhAo}- ~ӠS輡k&w8&m;5m7vZDe7岇 aE>al4MQC#e;8@揲Y,Aڔ*7}Cih t4]יij kb,X/DWcb\UۅzoC58-YW uRovY5M0-[[%VZZ58K׽<IfMaq6Ya8ĐZNs:p$g.=yd>M?lܻ ]=.AHdjڲ,,V 4z`bpl:"Sc5= Jo"NZ Q M/A@ؿ@ >ޙVF(؅02Uj- r !ӍU,L3j@7r܈J~N_|}6Уt9wuf9Lo^}E6yR^Rp \Br-߅M:gCs;,$;47`F9Czw]A7^|6=\w|=;ܕ 2jqҙx2X6~~hbK%xވ;({ěJ- 6ѵ }bvuvL2葟o~].5o FcmWGw^9gon.:kشl[# u?Z=P3”jiʜϚT:ƞ|9c"ed4+ul*60?9v=zaqdwvmq~q4oQ& dĊ gi\kܰfJC:^9NQ[W~vQU@V3Ωv>~ p `ӎ jIF|y:}}(o4k$D_\zW[ V&h͸4vd?3 9S"&hͻ6ҳ\E߯NVeѣҵ[b谝ҽuKmY[̴|h\' V ZtM8v XxzXxZ٤/[O 'd);RV'a73YMG 5.:s)5tҦ𼼯Dwv81ym.7,|Kڌ0?RYCy!Z'_GOCVÌZpa)3X7  ?M(9}I{}zz4~>卆La )JEnGǯ3< Wˋ2Oh [t4s/*ϖ gPZO0\9cPOQ-ӰuaXFJ5 J4%r!'0,:׀>@\纭7^B\v(w;P _3chakPg֮_^Z ņ.K>"~ $vL̤UK*Hi嬟]:B> :̿uD-tmewNa٫ ȴyAiHo,N[ LatK}韀a_a.7Ԑ&CB8,(ra5+jPY.&kN[a* xY͛!:888888PwĿQ>F_(Ж(k0&vgj\^8XA}jl:6.9Yt)RY(;l:w('OT |uǁĮk (o*pjPGqRq!MX0>ءQj^թ¡tulJD9㇛Ge]sMn║.ŁzfEbz1QBMvNe-mREa3ښo R:*NSETXhs޲Ug0=,h0HcG˵t#})]{_bMML B.!ۖ;|F[8&MоLs!ٙB g5VxJAs(me_bak~R2qoy>^H@HQNyCqqjH BepH]H· Љ`ۥ9եϧBg̲=h#ish,xVfg6<͚yX,q&@p%L42eUF!CpUYEh c6My#ÿ}+2^jH(̪1E;ie.JKNnPq嘈n|a"} zcR8xăxC/_RgzCj'qAOi5P)Yyd e`l8џ,cԽjhG̥cȉ:)ƊCrԞ##OPMN}ӢWPP54z\:(ƛLl*紆#=ā8];yi,ޑUM5N2_U|sCg ͌/HcMޛG7]v6!-djN\sγϑ:C:oº6ڶ\x-7*E>#Xh}5s_ EVU R` r\{ŏtD?%Cg1sz3L%n6Ͼ+3]LY)_:ZB WmQ}S}G0t7)^a2PTӱfjR8P<]!8k-:33szmTI,_oq(8EV8'k&̔ݴxr+0ua5Fy i]mFsM8#kbŁxg 3b>N FpdѵLSr]?#A[OϦih¨gm2M>uDNf'"nNio_ĝaԽPjr,Kq]@p ` ^hnJt>_@, Ux_ihytm13.,q~j3YfdH1 6< @e뻮бte"ROǩ; bK7@_.{X8W)|`j֜4:ocT\(ֆ MT>Gr;w΃ p/WHTB͂vG|23~/oS&GGVg3!kf 2ɩO:W XK4\J鍕#66H5] elGr7U⸨(0ە:#ly[潟Ѻm ^ A(5ڟ fLͣKfk~OGNl ZOq@qs`NhfnmR&nFVWh~#~r̔h"۠FT3MERZ s#O*?b'-^%&UUB}>wg{un4 b3N}Et})'o+ͤ!||U>_MLHw\QZ*U.7o2M3Vl~eWO>68Puuis@OCzpeq*@wC47Sue8$OD%#GhIYW-MSGmȱ(uRIcrh]'L-̡ДqJg?#SjRw^ES3}Lb)Y-4tshΕa6(||>ZpeUCX6ͨF ƪ~i>SO}iҀϽj7Wb胯W:~,J{ ŅXLn6$ט6'4H 뢥ioH^׵-+6.TW:Tھ84}& p/Kd3> pX8=SJw!~NSс+Edtvh%`LΥ&v>9GZ+մ j\!{گTl9WHo~RAr!|2;?we1ޙ_,]Of/-X6ό˚-OK2 UDA u~If* cQD9 #vʋf:3mq۾qnq.9V[pյ5$`D#C&f]:MfgYoq=tF )>,Ј~ѝ!s;Z SHӇ{)-.LGh=]ڝI=xz|kԣX䫈( ~<|{sHs^{54<}̈́b.Ҭh#><]hꔱt+Ϛ~>zX'IΜi] 8R:*=p߯I14y0^=U+X7+&"ܻ f4O1"$Zi'mDWNI ͓hH.-'̣8}t#^5BM4BO<& fԬKK_>A7aj3d2#GBnMkite+|?.m۪\s@j6F#Z̛a xq9=lr`a,IO&,hfe?d54zt; 1y!.LUp߆~zX|3mET:߼8Ĝl{)xG%mh׾ 6q^a^]`:hп6#IYHy9M^Dڷdj)Ezvj+TۦRC]~0.4fvex4 b;У' Dͻtv07ӥo p_h~8~h\,XqԶU-H\;]6f(um$;]pFjH{uVmލt 4=! é&EGpP\6G xAHpӾs?80saO#@^HkNnHKzUn}f˰Уgh՝)tVG#sy9 Zt}|d$'ͥ)%G^yi>poy&gS$ۡ+3m)77Ley,ˍLտ}٬X̂fijWDGj@nWDRzɞO^5.1amaqR5%'@I[ `ͪr(~ouPIP7Dˊv«~8o%>F)v+tVM8% tǁ'n'cxqs`փn, 7Mj۴c Q,  a'T]t4|j5M0?pb6y'@Fi%PaY~It1OY9^J3lt)y~jFv0++JP R;)|O3Ҕ_Ungur*5H34VZVt :q}dj׶˥oV>n`>0)qfPT3ulDVU46`E5 xNrϥ{fӁc]~VYWc!u!.8`btzp*jd&h$4??́/M߮BҟZͼ4},ܛL*OY5`;ͳ[cG6U蓷_z.G=+pk=~}%7}y̠BhےY6@h™L: ,,Fꔤ[ѪC^z&0a;"bi g=yVa$"g1f"&Yhn[kLFBgDyD7N29dP`2piPBX*˰}jb)ɅLcVYŢ]˂V<γD܂]&j:05ot 9>ylj6̩*ɰt;Sژ]ńGhc;^Wُb=Қ&-Ep}#!ذZ/GI+}{'` k^hmyGZ3e]#"2„ϾC[3]Dh*s@ 4ߪRrF"qꞬ\01qUGڪ?VBr]嵗jLNYݮFsխEU19&[b=k?84jg P+%$#(MR65iefnVwh3z1bgi˟8\ׄ7& SM?ە%7ONݚJ-Sq nfT'!d2g]Z 8ԤM8`x)LH;&kEBUZ07{@"jj|9#Wz) !CM[ sD}~޳one7M@O'}N2JMKoRA2aO;& h"ad(\uyjbKu)PnWZ /S[&j\@2-/ ژ-o h](EOgj EڟHG@J̳6OcTؔ^o:/s^ŸO4GœZlF4p5fe3cƦf|ikȔ_X!t5;8jxn~›1p1 /ǫ1ɜ+˱0IKןCN~fs4F܊JQp`ʌr]¼OA~?k$i]Q}HRiJHiyOGXh>\TZ؇! 1|̀n 0Ë:X:-(tՋ$#ښ m2a2@F[LSpJ<<&d/JU"4xsJBdY1mt}~|~O*kޝDX|,&uM<~;Ғg'px]뭏>O+~bv恎 T֮]E}fT>VPPP!XeIiie:D6! Ytf7XM;JM%khmҰ`A >k! >X;9$M}.MOh.u(7CqN$ y fho@&fr\0rֺNk so 0=0h 3׬`2[pNGӯo.A7~ ~]eYT6U7"B(OoMLz86A6ƞǒ5k!DXژ2hvQoX1V}ݥ}\lq-YEiȱoǞWcX}Zf3tbSЌ1?*V}SQըMlg8ɇ8*@l8/b!,8r:;]Ǧ(ja! 6c ffGڌTy/7nO "7Vy 6صs>d$>4ۻY@_|"FMޕ`=D@ʼn'Hs**3MN3c2-1r}۹ *%cxQռ ]5~$46}]/՘yX]NdM`BWg4>aR74#aD7kw6a'ujJ6"KK=;RQ hz ͧ*,x-\E;>i܈~CPEVL:w0ebDJac|n."Ł怜O?誩]7+S{dHP@S(Ņ(p2ɶ-/x5^7 jELN !I_&0ՁfTx>iƪ0S51w듯?7FS7Xس(-S7Tw_0HjOMܫhwJ54M^RP~[Е/XZ@3Ea~k4EaW$ob N3x%}$ЃVgvz+(X s#@ 'p5 B](!>A\6w5d^q92cH3f!FCr\'Omi_ϧsWAELfb\,}h]঱;ͼ1XNgQ1a/$߱uFX毢k?q'֮zI<[C; ux{ _f4j3iF3 eΒbUjeX[C:V] s쨵 E3c xhLdF<9Lye VHU=rq#hDn4dYΠRhhvBh,I|@KéOtPTDضO/.?pxH X< T:SrܕB[it̀R#ҍeV;Ц5z62XuMɴ\1:z0Xy,tċθ:ւ5wur6=}8%~MDԾipcаaڭ{uo}]*5 s-lX+"^7v 1 `z KD:Oy]O᩻:0y`xhq #Ǭ4{靴7G? L1OK̄٘Ǭ%2+cXe_.+6S|e"H}W&kpM(L݇BjkOpBBLIޔ`Nrh&5I.9Eda %:* O|ތ<2٬f*F*l:楷ֻ%tHHZ;>žͅ=bѭ'ߟ\X܂g i2Z͓'IGt7sYs .4 Q,\/<>n`St8c43+C(kRM)_6f*<*e. 8[Iռn O*cŁ$زϩkڙ ZH/GO{*-%' k^B^զ;Ӻ-{EB'b-N/(r„-/Bp\o)wRTGgiLWSXڱ$f٩rot{t{iOk@Yq[*5IH@7 |O~Z(|߰m`=k0/[qE/xcfxdž(h54}=N7]vb^_F97 7\0 z&Omz;Ǵi#*^R֭O=x?/{DO=d1w hoisЏ@31Zq^W+6'"đ־߰M+BʱP߱j.$r>Cς3=R^hB2_`0k11S.A.^G_3,f/'nLk::s.i&~E]:^g ~Og-R\]짵w̧\οI(7/RtT T`~69|fp9 pZ+-٫XFۄɕv-p~^g3hL_WnHu+ p.Ƀ4N h&<0iA9rpg@Nj;Q)&etYos%@IܘBXnƚ dkdY+3$e;j`*|B叭BY ѧKmf5h4YyJK JO߱mێ o~!A vc67 jgpLnQ*+;kwfMKn/a/o:t077ɻGL[;Γ/1mF\3.r䊦a*wήh`Cq{ѷۇYD2hZ]@si*3ژLh|ɂSOlTHjm8JԄFKi <*(9HUEI^Tg,8eϧ>ᚙ=r=d G܌mUVFENWDp7Oá-<8h%#ČNm~`AG3sr7;fs,N'ұ͔_3vD %p[ ңWV+8L ]hh/\W0oE\KJ, @ @Q[I\3)C#35Oz]юS}&y mGF]2OciLZMxr)õƄVLiXVyr6SAa`g:l&uΓ6YϋVd#C^{}bV:Pї'*a>KLmd`,ҕsŢ\7´LF7s Rk® $mCA T|qІ0 M_tׅ0$6N4/|OUm/Uv)2:=u{tu~e[]ej҇oam>6O~|53"Rܞb|tϼо\1uz$yC9*<CG=Uz:: F^1Ϧc;+$˰(o$p7O]iy$i[4MEs@@[Φ:Z}u Rw6>JzjWt'ZfqQUGr=̗9 D2`oIj,E%Q>'DdӔ M LK-EO$Ե\?V-4Ӡcҵ<Xj 4>^S8nM3QK@Đڏ^|5^0ʼ-L)4,Y+!*ycGH#>tH4vf1ױlsTu f+q"3z82MݬG$A=Z;X~6sWcUI,;k=8Eھd[0p(yRiˍ+7yZݾ^xǺ  ϖF,7 (  83#CΖ V> ChڝTv-M #-Z &Q.ѦLwqW.ےlwrfL1@h)$$8 IIPB1`17lqe[dY]o4,wt'o{3o3yo$ZȟH\Ɍd"7* zdP> ]<xiw",1MUE(+ @GtD9&$$ OC{MS'xzfxsGft`0p㦃L u0iZO(zFM Hƥd"9 ,d)tC1VNlVO5mٍ]1}$2T{u} Dx uMtVF@ykT^tKw ;Dh\@4Nru;94Ѫ돈رx$(M{Oxrܲ =iewo^Ŵ OJ]54]BAE){Jq( u˩A рp3!bC 79J q ZSәPdv'KuF p@&.dmGH~*G8&ƢYЁ⹢+sB" 1"+b?7iktسWe/;C՛6ǜu;|0:sYҡ>^[3 7UMn6ZכDŽ:&$2& A3;g p P)AͩEbO0̌43&|$'Kr%\Sh/$0<jD NS$mNλϵmFwcC_J4oޚ0t  F̿mCbrP|\3gϮ_ax1ZW`L|n!gτ&d8D-vĴCb ȢDNrrH3m!+"z\Lj@. z*T҄*}ZH\:%7Y2`53 q(#I䬣:;Lhֶt)a8)Զ5u&U~׆Sf̿>|΢ueĜ=+?^/"60;lPBOhH0dNȅKV84&(Z F a)Ɋ,I焁f ->]=Y1K:f*ԡu+&S6Sؙ5y+qz;|v;#D8wDj2Ӫ7ړg"OaG.N<ĝ]$+nFgHB2geOP[cjqj' ؀G&eYԩb5?r8 ftL41D!; DjPNL?/e v_\/x>TՓ)oěXO=ze3h= |/d3#0= &țb(OG>{݌T"xMP[hjTfirD8[a7{Kf GK.Y^JjbHK,[U_e @XAfpfSϻfKD$4aR A9I v$;n0a874J@r$ k2YIJLDrYW3Rh)';Z;V\h't(Iw|Ԫ/?|d$3#0=0W_ӷ~Ь:[hC^ܙ=6)ܭZ(Ldqߗ΄NQ`*8ԅ1dčyǴ7N GSC1x'盏){ڨG;I*!/IGӾ~M.[A#7JƝ{ rUh%:NӶR7(pД6|]B$1)<2K/U]en\hTѿﮉ1.DK2M'~2w]Ɗ >ҨS/(3o3{t4 :pV&Q:VfviMVު'v!ϭhٗXz=!s`A@#VGP=$Izj6OCYɧ_$Ǐ/u5pO®`WvkIΞvX𴦡ngOfV3 {ޡܫq'ofɌ̐f$ɳߜ+Xpvt?\9%1 \0Juv@ܝ/=xϺavH;PW5 v>ˣٙ7UN:]^pАKV󯼎TWVU]xsjKZ@3Ad qϭ3EBr7k|vhPѽ1"2$SOy(8Z[RS"4{6=SO4gH3Sr>yQpzIC)54LhBMND'́9~/ M5rԻpȱMB}3_?i`!@5dcѨLhzRSJ"vfFsW1\mm|7s+;\*pNH:y<4)[t}{S0o%wo 2?Nza:i8=/HDshHCCo:5Hjޟqs~̫+&TOd:5' iK 43 DFN9,c*8r QO f&of@zjlݲdOhȖL蹧@EĄF 8 .: 0R``aք!< 6BIvi+6['װ ?4a;[/J4"uGa@1+]pf52a=j\Ra((G_g5)_ߏ50v0eu#5E\ /"[{;x} JPslͣq$4UBmy3qC/AL/9!,?ZpA!Ho忊ښ{asX=5K+ϓ5.Ff$%*eLHn*C(K΂$1ĴN%51$uDG, }l)D'SO\VW ˬ O*|뙽g@- S}=Mm[f㾭[sXp|&bS7դDb.] mKd/;2m|4ۖQ?mr5*-*^䣷iV:wX; p`5EPX N u7EKp m,geKW(%Z3acG Y2 ;iCcER2}7Ξz7Η?U0s zipK`Ͷ'j\paa^kφ,_)>7<zj_ĹqhV"Ak;砦ס#E9O!i7?#Ot4BYZEc?V7>p9⾜Fk u E~5jx5v 4tB5Oφ<زE\ZUHg0yB 7޹ًfL9PU#j0Zi<^H6ُ RBdNFd#~tq@%S3\)참qׅ3&CV񇧟XifH^ 6#4}\}PTm_,2`_Opzr%~&|"q Dt` _Z4#[FrJД쐷])͟αEKH^Fcǯn<{M̈5|򻻎I'"XgMHc=ۖМ`$1@ELJ{nItJBC$Djo:O 0Q ;u9-IO~ on4?s@?)($Ǐ<`<0Z: &N$r.D0y?;n\ɡڊb/*{<p{8Oid9cUNHMM 3Ʃ,䶲uLQ]GW1)o&xd6Fʷs:$V@4;K%IwCDgˆ`N i> x~X¡C`bҨ]q?!9n8| ܍d]%X!kЂ?8n ,O֓}$]jlnq%XCOgqC۳C8_ kd Εыٿk5ȄՔGC)2ms=oIRC# $2HzKKpҬ9B P2v$"=3,I$6F w污#qF8Xemwu\yj߾ȓ[ u)k/6ͱv_Ѽ9s'Fh{[6XF`C@QЭVр&Ha51,A+ -jE,Nibz%FS9 =]pd4Ntz 7`<2\spk| ?3F`Fzyػ=nꒆT\Qd4l3n̐n MYPQqmhjEF+*H6࢐e$yInה),N/& IuvR"ROmfwTW%{ENo pqT:< y:Bjz13#0@?CI+5t>I2"BDžXpp8 k2$p CSydFڢZڙ8$yIniᔩP]I iƥY!{ĠS25;p=ޏi&y|{df&>.qV!'Kr2*G8!N9z+Mۺ2)C`NϽ( <cGӧxy鍓%}Єۣ'N5XK6٠GJ\byuve{2Ë;&fqp:F`F[}}mNA16yTpۉEfW4ϐÅdƃvzMnv $*h&jqO˨u=xUx܉hf[ۣx ͬBf $&CBnDjPNLJ-BҙQ 9{IB;+ǎEt]8iלN\""ëS4Ϟ4QE`BAeЈpo%-{SХ / ăW偳娡Y3 -&#0#н ƻNQ܃ HhSQ`F~-/z%2#:hӃhtp&1 b$q0ՀDAfpf!2(܌uE;#eJ.-4تpj'>8KF%IB;Os򝐗;[<v#FI&4!V@~73uP腴8 [oA!koF`F 8dǍ~y゘W҃˩XQKZ}oIf]Fo TOEn]1bqjhNjD\Ĝ0fF3;h A8/?v|Ml~5A`I2 D"-Czߋ*~vPo=?n(>Ʉ&T#L_f^[*- #4ЀskJQkÁ`F`jp%!3 W?Z5lX9?TcзӡJCz"Md9|;;: p!qݨ )i\Ȍ^ȼX,f]'"B)+`e73п)caNHQWo֓|νެzヘwӎ̈yL2=@Lh:H ~Oy4U6^@L%jfڠd.Nbϧ)1ldF8 p:o<Iht -QFmZI)"5!Dd<]'9)M$Qa%p+/lL )cd9#/ Ĩ7ꉞس?Tfz4'}Ғ,{AyK{&4c6hf6x<9 [oMr7 C72l-u< T úp"{F`F#@#ђ8Xl;6I8c},KRPUA& ]: ͌IH;9Ijp@:% ݓ4 "1zy4#+tV#_KILX0oHKɷ43Df{H9z +?ٌгOއ54HfC)L1МlFOa#AWk;gr6" z7*9z`F`"uhD:r4H^zh3;_ %UuҼA.p 2؋ nֺ 4DjH\Nwhey/YʯÍwY f+v&& 53\.yæg̐ifʷ__"3O?>Kh/əz+:ѳ8Lh4[E?v{dnj#0@ @#ԁ:unEhk>x˪c̝3H<&S=%k=fHXPu`̋%HH"#(GAvi/7t υq}[kM콠e 0Cֈ~"ӕwQKNxPA'Zg\373ruo\Ҷk6Ԍ{z=vB.f5WۛfM"0[鹧3hQ^w?fC&4D@`+=cF`Fh@S"3R") wܨcHqe<<g;rzK ./_N? cGk6&- i>&#PVa’YtfVbG&4a5kC/u&EM#0Q($)DΛqskp/58r4QKVx· -͋ޱN;&J2#3,3! N3YKrS\^ϟ1HhIGX BCڍt7t9:ɧC-TT#0@T#@@և:7uidC0A0چ$w{o5 >[ejAy݂|~̐= !Cy目~Bp{w֬H1x"3 OOM{ !Uę*8 `#0@N uG?L: hKh4c> mez6`T)5BΛ>U<}~3EY蹥 aIj$u_6<8tFe(1O b@+N0Ζ["Ϩ@ꁱ(Aq`0rD,#0#PP`m"0I$2r_X L:ki _A> P\֙!͌МlD2Q38d%}Θ0#D _Il/Iҵr oqm9YXrOeHB&fbyBC5A&[IrHLz8XQچrA0Ȓօ ;4&(G赃܄ 4dx$1񐐀d%,f%`F)x4e/%˨nią8*v53 _ci>"cbx|fgA 2#G!0 Zh;TpN+Q#dШhBYd,43Df1#0# ˸o  αx[yLT1wN~U7W_MP4Z0 ESSh valT58nx=Hh⢗_!Oe$VCyP[$g͙A ifə_;1#0@_{qxI/)L1yqӻ&/ݓWP 2A7qΊf{3!/j#3F" qyXb#S3Nq`F`F R2_s̓H$b?{QqoRZEieO\|cd'/A"4-$F#4%͂̐Ӊ^PCl M4@}5 hM$PHԐy9 "[Lӧ4F`F2m֗2@7:VnBlZU{7Et+E"4% Y5t&AfNKt&ABC'66@7iih"SDbBkhƚ}9wF`F!e~O\:꽵;7iw}N"D4hn 8FxȀPJﻟo-ޟ+Hy(.F`F`"ϗѝG֟s=/({''tR[CdLHcBNId>T^BS#Hy.|9>#0#0!e:Bo.c$3qT2 ʇdm}'/M&4I*P$2?Ij(WYq2F`F`:Et OHgDfB)PCyB})daWZEȍ|fah&Fbal2vnͧF`F2[ȤeĽ25.RUC)t .E^)M֞54=2߃`F`nFt3Q<2HrG}Y~V\\F`F[|U'+sn]2?/q&ǧֹ#0#'h/.חu-}\?\' ξ_{&4܌#0#|$7 Wy3 +%Et+ 䬯$`F` <_T(gr<7NfPg\gCm K˄}\,#0#0Qϗ*v&%'/%0zEN& HE#0#пh}z}76otf+٤~6ecE šѪ 51o8RpT#0#͗G8LUCh M tϜM7V܊Fs޻QAbfJbF`F"Ŀ5 \DQK(`ց-_`pg/9j>^]!z4;WLūh5?ww}a MqBF`F`9_m咹Hoa݇|朵4Put0WJQ;̼.5lr0#0@#28_f&/m,kU]dbyܻ)y"1),܃PU5MtD#nAIqHIB!0ȤVS܇[CWTowD~c-GaϷaNFy=b:4%UNe^Pc>j mci4kR= Em(ʉ"4nVoًAbqMg>rbMq%PoÇkCb͜2.F!uSZ ug#YST8#76oT[|T͒!![0E^2nakku7*ǿQRt1evNmezΪFQ/%|7}gBԵQ= C ISFu\1rou0~x:\4; &Hq mv>E FabOILWXo@27;B$٫NvcFJSFE$xoz7j d^5F9AwbgWxf;px_,JLiċF5?Cwk29iD}4Ln BKI Ɂ`"#3dtMh3iZ |[;@o;}1.ͽo~|ꨂOr(@QMff~Ʉ/8 e>pp"]ԐIMP z= z̨o):L@^I '‹pQ7vMLBxT@#лEv '‹po<}1Gۨ`*8FEMWϟI5 R$ 6?(>23yi%hieEeen 2nu9*dE)Fy!^s W7*z$ܒmU}1 Ijȭǒp!sEQ$7&9SY3f 9Sk Ԅe 'w쩍SΝ3\d:}cg8uJM1}Uё'Epi^F@Chyt53vVzc.FܢyN QUusu1DyExLl' ?q`CN$5/ a,GN$Wfr fsfV¯AʩwjX}sf@~#}1'$SiZUQQGhH;sBhs#@8649Ѳf-=uX?5kOS4ځ蚙;9קI9hY0=%z53"g11C΁у)$7NW̝kI :$*C% ל? v? @f8gL4G y9sBC#15P6Rw%qw]8KF" mg [kK2CmļDJZOmTSir&]g HrB}B_!#Ǫ -vi8Q`^Afh٧SUD#U͕ 5Y99KF J.E+V_?XVmT7>^BC);ؑzt]82vٞ%p݅dD`47?$﫝"cD9ЀsnrhP k!萀l@s66lχZɠ]+/>'DKZ01~CHQc #ZFb ॕXc2DD 8>B E=9?ӑ٣u>:?!0A#9? Ɲ۠`IQ߅ W<5J< oۃ[.=gfF;a?٧b t€#~{ȸV%>#%0 00uܔI= lΑQ8K"s%=q!I y cWQBo:7kp!*N(b?fКy 2L'M N& jrں18Ȭ$s‚3$sgi+#{WEG;ǜz4}adhYPyPU5|})lfhʃIٿ(,:w$gb{gF4`/> +`ih`8/y vfYý!zoD:inNQo8g݋Hs\%̨A 47AFMǰGfd^6ktv&=YĪ^Lŧ#pFòoC`;œq{ Fr'd zcs/¡m<=?WYVhtF\Ӻzt2@{4Bo.0o@K }`HM_@VVRt:+EL}eڥvo'FG ư[CɛHDg >p߉g?%K)1-S\'$CƵC3CSaT|VM8bq*soшsknI3}|" =4Cxf~؆Bކǹ M TD  y1b!%s6Wy_NaQ58mAoJPu]FMM|Ȓ&%'`'l'UY N{l"4CP |,쀯2PDxfv;"UK nq$#5| ރ{!alP.h:' Bn 3΂4nhɧ/;I箮};5[4.5_ .^[a-?UAʑ9-قk 5`zoxrH:(߳kؖؐx6Ѿ(ʚZ4#n:Nulm<#Ԡf :]l[_"?7thqp2[A#σQ~֤Qb~ŦGu>:OOӑH͂.޼H4jc/Y"4IUG7FcF1K쮫FIhBxy?qq5w0A䦏k0 MDo<"H< h`XcY^?'&L?}H4;@ mWG.Fu O_dB E#\7Χ %|N:8ǥ㠠",\YZT8":hJ6UrN8؏ڟ` :=C&4qEk%.|J^kiTGg(,= /05 ZpdtMTw}Ӕ1[x `1F/PigQZ3hh0ۙQc;Wwp2} 4JL4Fk~⌞u03#]fF@p4 *- ]Q2k3xb=ؐ+iKVi ց8 0Z1. @?l=*`F623eҒlYh?f!75k#u f]?q\ 6<).L܎:ضsX&EZ>Ohb8 1FO!AIgo^<yy#Cy{\` _m#z6 fc[I~0 e1yJ5 GKrw,y_ˣKJM?7Pi (;9df}k<Fn.V+BCg.;Uo_,8-Vdz /& i=m2x=NX0SL/;G̸(AMv֋}ۼIߗb_ \&̪QbsgP= 9,w5U5&nzGRѬ@#I^=#d*7C˶gjŧ'֫ủ!G45= H?apq52%C?PT*,bNR4O; `rא1 Qh}Ȍ2 fU 88rb.@w a MxjT^oc@pƇ#p"wƣz=N(3NAu;=̻s5@Kڨ!4a56TĎf#\3uZD+oY誢79bfgȀ6\kgM | ^_ iaW.ڮ&rg!5iWp) wE~)o27*O@ު09(,+nNP[u=\$ ahqΊ3`qĄ ;@Umۖ/LІ0o`ͷyPQGˡϧbp$@ffdƶHjߡcPVU/Dtt>RE!p1Nf56΁lMNժ8USlHR(i8%fF9Jz58t‘o?o sDsmTDqQQ!2Cy۴.89͍|ȓf-5ςpӯDs. |hJ7w1|&Dp=|  LgN;:r25Έk!f#u'7(h~ގ!#G_[C;NXpD (:֊2рx]JHMeaH[/iGFѷ M~h !Pn"\#B#;φx?QB@[{Hg<6;z8#ea@[)5:ԀP9dw46|@ohq>$Ett-HA@U jAsgy$yD %lTpʼj& y4]:e9U ]̙3 CɓOABߨ.>WۏYv~}WmTWl'}l䌴ZT;r 0ڒ@$3H2|<21ᗇ&oi-ʨÍo#9YhDN6ba}]cJ>[(G8V1́`BGw0~Rjz.G87O?e{-*ZSSF1FEMVj M)2b•;!ouvK/ vqNwۨpj+QQChh=2Fo :5zW@gW}AfԌFt!MrN/5~I2cGSS}_ԠpۿsSǏC,2b8:#UNߵjs*6QaT FmߴcVGb{kn{Qe p4ު׺AS>{Tn¨|LҟۨmñT=\tNVQd4ˆDOa >-}"@"3 !ݢ 3&0ц kkrfwp] >E@fmۓz&MYp:ʹL)-eIN^PC/ xk•e&4F Z}݊ON~F X)\:!2s ir`Ϯ[iwQl4s[]Cuؕ3SOq/gt*CG̐ifLL2C, O"6/LxƶjPw$932@IDAT86Q gq{oDYS.P? KČ43GyśI 90@Zo\t :=jSzѵFΡ=h f@Vx7({[EژUug {4"7j*;I^xjO`ˆ40 U f&ofjq0>>qVqL>Og-y]i_($H 5l رg  ԡJ. @>%y>jkWB(wH" " ƓP h>i*,(ςT:@BғݙΙ0aYҳr.ef=s9F ZeD2Ē>o=>} Ԃ@*|q0vwF=;!Z+YQv<3Q(w%_ L'2{~"1-S4[M 3BU~F]ȕ ;ҷPNT4۵y"3(rZ'(X,Pڨ*D`(=Y9q}N %<2/Rs 1ifgƌLC?+4 )H`C!, ?$ OJ|Zhxw< tz[:%MKߦ7წ: 8> Ra%$^:EӊpfJdE P3$?0:(^/ܶnme׎ڦZK䁟KfQLGYKsmYzeC)߫l1|m }%1}x߻f5DPgA3b@hʄ슺uÚ} ;>mO 4Mʴ48V1$ 3 Mdh_yE]hbk!.w| pb ğO&@73# 3@dnFQ33ک Oï9^6`dEZbH1~o,K:vu;4 7Ɂԧx JT Z-%8q`gwMZ's3 paL6*W5waEѭ50 N%~$3`>ŧN);GzԿ4r219waʹ!s=Z [gvQrs@CƇ&W_1m;dBesZ5?ЂK!2G"*.F  ?ļA{N"/Y"a5[:25C43#͌nrV0]#瞽>~PBCj:=?`܆ \'G8W=rq$S|&MĊ>Ć&Ćl&N$Фv!pa"p*2O~cC ĨB&c]f,9Tٮ\)lۣU5d6r16 M,#V/Qeފ\u~r@C#3& 4QIkٮ_&죗$&Th^ =ffEEPtVBJfS94.dFF(E3#2A ?djFmhx T o}Kf\]KS&VRI 4f0p52$ U_T,]~_N1(㍧PF700MݢduAa% (J˶|pZo}U`W*16 )hc*juǃ/U \U W4tc\2()(+/AWlJնd&: 5NMP`.P4MhY!mjP=yfH4GLi2R c[Ӱm7N>A68Zp^Lƍ41I z\OܽXPPZ~_շ B]l*!PCUN/C3c0&ڼY*L%8CKhKP1{U ߫hƘ|Uru//n  &4Iټje* [M MºYN)0 5djVRbC: ih4/hiA 'PH1Z҈fifHٰ}8;UK9{]l'l!֍7` @CBU++1_%GMjks:zB C\lbD Z҇?.L 8b.މ$!Kejiu`4StIژk>D9]zi#6RM M'OE88z2[~Ry>5ts#}ҎBnf&fLiPJ jdNutJs|f̌43wlj7s{txvяx`/>@B?6?I5]䄶1oˣGیxP1ZK7a1&Pw{ő/X$MR+J,KOt{U5)ݫ\jhϨ`^~ahbЄpęj 3[v2hVCn/a]7zǵ7+SC MiNU fƗ-)H)72#gP L(jj'X  S3&tm7q2x`5'>$\iOM&hibv˜w:젌uZbc|mg`"keݍ:񉖏7I=g2=&^}2_ɒXP1I4S415*.T7{u%fuuBx~W 4&eIp]J]j67t'ݡt# "ƒEL7 bh -;+Xg5JlXsel_f%DbtMH{fhhŻ 7GzR?(?*uOi?"u@!M~Igdnpr vp"je"`T#1~^ƅsؽ 1U 9Bs7珋nD$i:inTȆiZTT,ۥ.jѪcA!aA&&XRcW00/7{Ό;S7JHCL$ߪ W0jϥX*lX] ʬͅ 002h^Vx;bic4EI1wb}捩]2Gt:WEȇY.zߴ T ! !:Y+{X8ʥ9KZ(q@7 aܵhMUX< .h"u}#PC.(8ɪn^2p3QjʘXR7e:]OK[ 1\*!`ܧ a_"v44( {&ԢWDF@EIq j?p፥)j' O,hee-x6SéAHDqez%ps _3݁e $(vzom$;zgTY3u:keU6&7?4YtSB&Td!;leS^JÅ #QuhdI^lUub(6 5$xej0q!}`k4dg$ԐpCi; <$X*&O.Y$ɿC`,)m>^M2j$ާ=a\xx°|qÜ/LrPp—ꏀ#Q1=iA 5ekk!2P,cI6WB!V!AZ҇] !p= _Caf jBH^;^[HO|u`2:F" 3i6<2~0%?=I'T 9M^&u4Cӽgғd/ icTmGoZF1 7v:o+19UB>?7F y`LY0P J#QB/OP=*_*v)S&Y]/?OMV +1L( 4!a+x>5|L x8| (hg4 &yJ3mv`' 3Ɵ?O }eT,2# j:{| _0|c/%(ĈTVp&POr@C'MP-ojЛN)HZP<5eL xR3_R'^)S'/kq`mrA(4;[?I7Ę[AbPcЎ`ԑ `LHbH*L>wԀPRc|q po637{|fÔ'աcb\NaŧCɤlic}xY=LZoXH>j;VQP.L#3i-gTT l4v?3&܉(0]n@H*ւ(xdSJ"#ؑ)7{\~M3~jO+Jd:VC2NFyL={KZ7`~(`v]|鑓j+~k{xFݘ<w 0&"^金5a&f .ȅ qefpZ@EX ogL xC3 VWVp. +5šʾ1U3#@(\`^I`L1` Γ"}% Ԅօǒ6FSju/Tf3:`t&'E>#tԞ,kcjώd #(rL x5e ׯXŮb|` \`L!kcjǍb&r}L x%_^kZrע I{ 5zy*Wn+,8Ⱦ2տܺ6&/^4)/9b/8S@.R+@~o˦ &N# Vgu[o25Ơ]r@Pԗ5G3:`Nd&|G>+}eL)kcϊdIm3&fŦfQ|fK=cz|{|KV\6]X"6N`} 07#iۅ&ƦނפA04O !zx{cx T}%גgߘyLXq%mn 0!3HP~m,I{_gD`LW1L@ W teZqq|VX+SU`mLlxD,xU>3&Tg)%kLEieVE>St< Į}L*HBϟ2U_*16!T̿N 8 4g52&:Jolc(3*> l>GvZr3[^X+s!jc$7Y[pw,и1&R_O 6+8[ϢmMV>icMYޯZmȶc̖jef}J W8M8"< 4qL @d͜0W(\!m >Ou=мy؛p3\$nLKqpx * fu$ML x.3uՄEB>ecc_OBUՏqujkv9=l5!LZ$>?ZKxU>3`q,&&@ܼ4q%C4;uM7kupW3_5-@ʅ 0& 4ոX5nڏ̍(H LI_Bhk?|u S4Xg5#@Z‚l2cēgLԱ|Le F):Տb*;H5鿹9G7AmH#IbLPpØϩ]2k[+ n{.3&=XC=גG ted6  44sMHfQz̑Ϝ|QhelJmXyJmٯg*t|!A}~+6&/?>`r&n2 E?𵂔> k5&<E@Mkn1ֈՊ fP[䥴'gF|kL@M2o|1=tm=rO$= '._oD(|M>OH8R@LOot̊ڭД{L3۸.d..;hL]fm}_XHjirL 8 4c53&E:̠H)bgZ(`:g۳qW6\|VgK̋9}b$I3Qq ߻B5uB334r5#`ΠG D,2+Dx21_rܸжШbtAK&< 4}L $#]'Lo2vguϪC7Vyv1F/.-ӒM(GۗRZEa<`= h8}mWT̨KCo @߯ȹΡ\Qeq0@f}In}i(SHXNKY_~K6S\^2&؇! 0p`6=گ u#ByPS5>#}2i67`] zW&sM4=5rdZIEu8z{U1 `R$~yc5m?UERբٴg׶ƋTJQzےS&Ind2+>q5` 0/ -sor3/ȵB]2MO/J^Ca]@Eg?%sb:\G~w6G--Lȸ N3S˂mnNCSaEȦkӟK&x51&Pz8ٺx50&&WNU\+ bW5|V!_̴t 8=~'1Ơ1 ϙ+QPam4!x9uhmџ(pkiVm(}qyֽ(]aJ=Sps\ቌgGd_gL @אG @a(ߓh\Y.3SG ?u™cm %bw^z.ʠ!ϫ*X)Jѝ|cIG~ܓݑ1g=(4 S6.F?;L}'3u{gfz@*$i֗% #8Q/-lr[כG('hbqn6K_? 9?geIzF?qn s۴fʕۜ;ܑie6\xZbne}b5۶Bm+zy9(lUqf1]hbTTЯ}uFژE3'*i'ݏ&]l1BLj Zk'()9LQh`iƺaԬL A1t<վ1AX4I/XFs-M~6&| 4uyL 8jeBtll%C C(Kl2rɾA<ۗvܣ~5w'P+i&1cr}eeҌsoۑ -CݕM23>4ӿ}YBژ3*S^BYEC9g Q k(EU;:ڷvc4ڪ0*hP"MzlӞ;["| 4yyL 81VOowm_F^ gU(23-JX߇BM?oj!-$D*f =i 44"/z͛Ȭ<iJrd8a f$K$+jƀW[mOvAVVPZA% Ԩ%;j޵l}n5_A,E!3&\OZY5؏ʏE;/#Kغ';okz6=EH]jM}}5 mvB}A dJ m)4c39_>_պP:Nrw.L 0@SBSYζ̐({!LwQNɻû0NhFs׌+IH|;6|K|d{Vp".t_ڳ]jKˊQ8Y_cN8l]tjm#U=Q%E1\Bvqy.CdH 5A癓 (,C99WwiN3ʗY&lmѷ4kEwbga-;5 0&Ph*`$p6Gc(Ă$hcVcCՒ a`rp[񍵞8utk%>A2'&yRw׫fMY&_((FQ D_/īצ6u6FwuP2@'}Y~/_ f?; JB.ٮQz%u2I~әa,3&Pz?L5⮴U^O-h*Xb jq~jM`?t¹$ۜGYZz]Y;޲ϕZ0s'mLEPGOW?(į8 hjCaL Ԃ:ABɞ61IEB( -7 (w/OPﭬӦ5~*:EGYUW}$ !r}mֽS YAU3c WE XqI+,|8ZWp.la=˰_z6" Ffxq=jJl:ާ8*RYm1&\IW涘`u$`̢߱y+Fz7#0Ѐ }Fr]FТq3 U.Tx3ypx{8d?XP NW0I39fNRCEVR;Q+S]m9`D=qL h;7#򙦊A,=`{Z$B>mqmUӆgi=iS$I1V0JIwM֩:wʰ6NOfL@Cw#@wG\ [b<2;a=hٚ*Nj z:sȍ@TMB+zAҊ1c&fvr@NFז;ieXSmL 0 @S9p+-4Q!ZUәh6wZmL|S&>RO 7Z4>}ΨFGHBXѯ砢i`'܅ЦRS-iu36]+>/`5'M͙L 0z!0yUwi*o՞Kj_붡fIz)cg̈T [Q`6j)Ҙ֬q,:R  na2L`*NJ ~jU!'s8ef`UmwX9AT6c{h3KU;@|ž1μ \7`@o<&<@R<#P f0Y &jՂ&N666iZwժmڡ<֫E#+`}%&s"$^EՆjfhlJfr똆:&Z4i&l :>7v`6F vZbf$%nc3垮kcT55ma8R=w<+#eL @R'GGW4]j/RZG4A)A3)$KYJ^HI}y-RV=~bhrdX;;.j֝aEQ0~i f! aY+0&XūcfLI^ZE( DjMP~B|/hB{QۂgB8[9yh.UG):ȃz:Y!2-*m/yvIt`> /x&cmLMhL 0`v,&yX4S!& ծIm9`Lʃ : +7St64X1ݤG*icT%~cǾ1D;`LnX?> 0$JK<+ 5+0mzIR\&s09+;~X }ciq16tu1hql f L 0&P#,/x$qj x'O\[~qX|jnT3? MM{e?,`nF7 &ܓ@i&)hp.olբUjaQhw~@4TQ3&z,и9x|-Eԛ:j=p"*"ԃF]uO@ڞ#o\0GHC'X6;] 0&'<`LӯPA,7]Sǻ" WjM#O7¢UNII#Kj]!`5&M L ĩ1\!Gnbj 1֒9Vkh0|MIIOcL 0pL6ʇ3&ܙĩ~n?ke\W=֪iD;tEo\raL 0` &<ؤ4M(*,)48s:=W-7@9`M,Rƙ`>BM|B0@+|kFÈq 70S55>ž_8x,70@t~31~4| ~y3$"dI*j\ܤ0)"= .h|ș#0ijባFMW&P='~*2mғ;reLXYh4%ZbV!Y0VHh(vBմ#B ni;#63 9hkL 0o'0fҔ&^si^>\ `@ErĉNn@UC FADWE Vƒ`~~o? c*Pb?E%@! ٹJvNu)v(f4=\(kߝJƻphaLs $%}IÉMG$p2Å ԅ'aA$~Qx<魰bK&&^U ti#wh5QBM*9 놓gr`f6~N=Pi IXS&msJI6/tBjHkP"\?*dqHCMM !0 4Hb6A=ūSͭcE,j[4?e ?6Q3y}QHy݇..$,My+?|?iqsx x-h+(7B׭mgթyqATӣK x5 J-BfмQ *j`۷h qJQӯ[;ܔmϑS/M{;鉜 &EXCaL 7oFs$3* @fBG,#`S g3#W6W\kL<)ŝZG?WڲkEWiOLI+%cL7 HpItD3GNÂ?nNGarp 9Z疘s ƹ|v&ܘ@szL~r?NI7N,;!e(d-[2Mv֜|cvȄNDg΅]E&<Ѷ>t;O!6o~ G*4fx9#`Fx۲}n w]9 3$<}/U䥢Em:GSd)13@IDATW0SO{vO .pL)Xq V 0O @q2y{է nn% 3}&T6e쇰@肹-SgP+}K:ԏu=eH8!(macWK8nEmi\~C }!hףs+\AWHQ S-@SL0H9CB`ǁc藓y2 P Ҧ):xNFieGgJHʚ/*>F;HCivf uXZ: W,R@I}8һL\@ho<*p8:)W' e'bKkM`4llH߮5%4-PcB%$__[JC.=|(0~EB/MG:-&Yv:'-䤹n ˺um{if|vڦɿC&0̌k JQizFi tF!OFUP\,wa_NQׇtgiEQ']\&B2kh*D;>¬2 hE1 3WH#`ui{@IhBWhL˾S 5?Zi.HC?eG7Jyٻ] Qs ΨyI I ڜqGZFD-Qh\aW]S =p]7i}')eڬQYhCRNLƚLJ&\iK٦ {7i9uft[e%CqreL xI0?+7_Ↄ!Cέg @!B2ILttj 0H@8;ӄ|MnXkAh&=uMqNu& 郾){ZvE[a` i5POeT&+;v>_ҚQ5hNۦ~ ic(H GAиBn΃zh?ߔ_5vHW._ۮSV&ok 7O|)[c'pmr L 0"ۇea޲HYnA\zQ]a`=V Mb_YòF#K:+{uOjjrN~;p(FO_qivܼ& F9k @@E%%ef4՛$ O>H3EX_10@4?5-w^O.$SoԖ63aU5cR!c񒐶("uM;|9%X)~[M+o"ux*)idi8sxX2{xL 0j˶X.˦0D\HW´Go۾R4-iDHA{RsJ.F 0&qSj V@>.5-gZԚ6UX6b2ٹ0iS'R>~cKSwnn6_a3Oįc[WLP=GXgOH~eBUݚRڱgFN6KJR:X&)dO( 0&,|c_46 x/dpr MhE9kb/ l_Br 9+OmY}1Pml?]=2@7WԲIPyrpW }8.0h; :%lR y`eU%ua5ָC׵Q%<8HaPy%"clAW_sU=cƟf `s=N&s$8jMU!11a;P0%:S}/$?.H5㼱0ۆYVCώbcQ|(X@4)zS׻1+yBwR~װK1mB|j()0 mGh?_XOuqP B$9(oL(S].݈QMJ8dѤ"Pp1+5 y7Mmjע\)RJ|ܥm]i(t4Q  Xb}:u_6P?dPS dV;V1@K 䕮Y(jJf)#.lLjN]|>h[0_]I2AAa|h/ctJ0;Ep &>d ƌ'_2JK(,{Txy񅴠 ;vw?3~Wq߲꓀s92n 0&P S_X'"G4C1"pբ:> ЄɻA!fuވY> #-\zO'^RIqVq%УK֖~{0F;p|F(^- $r1٠d/؎nT*J0+E x %uvDC|lvv\?p6{9{\?`Lޛ\SIoȷ/ݰmHCAhlmM ine XsgIsݴҭ1_Bi N&NGE kؗ,g5(]ۗ/;W(JQHSZ*5M0ke i#1oWq$K\묶^& ,и2wӧ=4cŪdMݿO/1 ʑA=w|ن9dh,ַ0z5bLJE k38=uв1U^7NȄRHЦ"iJw\@<y~3&4<%> _w$rLGr޸C!'tBZ(x#0G''=&h~sm$NGX~~u7_?v9SlI^6k5_?YEK*}%0s!0UTSqHG0"M}v(ͤReS|= c} K0׋#}~ʅdzlP % l(x+ciоЪcn|gl'#&$:dkü%kQب] f)so̓T *@%8_=o:]{ (HYDvⳘs-'L 91=5"ĵ7\y-5h(7 Z6_E%%WWylMǕ+.i:o{kxIjx{s$SMzœR۾NzᗹE!۱>kaqi !#Y(,F~K±:v }43mVdɅ x4ǝgL&$ˇزz簾"&:M1gjjKbvai̡[vԏM|[7mީ=y:{apI0kN-_CAQ1͜_Vc( 1<\l\h^ÉlmtAQr DSą-eW@kyPO~r&BxNaطM?_U7Ów_#+;p9OGԝZk.Œs(M@o'=>0` M] L x%yɿmvj ),0Mf:TN,pn ТItĄv6EQ>2\װ'a%]oHߛ:ik8,98WVmݣqy&F7jbrf۶/dmW uGʺ}GNP<4V|iB >;XHvW ]<AgP:hd_\ #_o[b2˲Xv"$W(S=aԦcrQɾU)͓p r*xwl[|\cY^c-h r̬yŒѾض]Ywh2gU 1 MEUAWn׼k*"o5i/0TttI/lx< 4xոL 8if=pR.jRr!.m`ޜ[uLCxa abj3nW~ۨTTj\b9"EfJH`l;C36o$vm[6IF;32BM}ϧ.}h|[2fvaب0Sq419.V pڄ|_?Hi ~z{HOj4!j8Rs3vF)\19"6 a`Mn]T^-î(˥UV/겏r,;DŽr'u-$ӟ(gi86 7sVf8FŒYsA &܀@‹SoDK'";a .Vc(Xz+<׺ie( 1soͅ o~ =Rw%f? O҄9;56g>q޾:ES/`o11)c˿u~2}P޷B”#TV]ϩR`创P畛1:|e'P Nv6 H~Qzˉ(W܄@n9`LU~Ӆ;1p/{VB,_8Dɑt>2W+hh兴>ũHPy4(.컡rtڬξM!e6?OAucF8Z+|_UTL'Z 71u?9tqjo}h=Ⱥ3O '\%#`N'0yZ<:'Oxp r>Ab-w3rs}@$qǣYORDQ\Ա־9D3'}pi g6WIs@aEDh %eOaN'FN2.!ƪ؈~wN[ &EjaE0&l "ն͢/T]LXJǼ:I<̅2!3'譂rK#>G bD4SkL$j (߅-BbQKƺH~JGnJԗB/hd[}*K"DQ̊N %4%diuhߚYǟKtnȼ|kh|R@@|r/vywƁ=*:3 ȏჩod?IvQdEmm !-NdX O`s1g: 4zey\L T#I3[,֝w υ ԕZL&Jy)S'Y|ǧO.*mqHZDpapR&}Š1(@=@⾃ ?fɼ3OCRm.7+m]4614:\O2kNMydB- 8dGIhm p3>Š3_'G*m^ Fb,L6n߇Vj۰tDRCEs=1_hu(14F+QarDH(0L^Q5C%_TRЋ}]HSdɇK5OѤ/zI2 .L Ԝ 45gg0&eⓒz̠^]L#p\I0r|.VRДWm9io1 [MS/H$-џJ.hPQk!åBԺA6K8IS"CHrZJ*:3&P3,Ԍ=iK?v '#\@ fW~=Znңy5OIIt$PHN,ZqF' gW@Z w x8~l{3&$Pѷ .NҍzRCL,ե1&NÓ֝!7hc|ޯEŜ.+@>o~F9yA,̸<8G54X`>N`̤)Wqã#OWJ'?+RPOY'N&p hN+eL x +9`rԙf+U?2w(ESDBHCU)L 0&,8"Wظr8yZ-If\µ o)mފ55) &'lPQOY&ܛ@| 8 :Hn(ҽ;̽sLթg0 Xb2ɏ4q劙`LX6*> 0_#H }-$-KK/85 >;Xi~[ F&U<;gL !hppNe)Pм5Qn'-ָO'#`Za۞9c?lL߯PfwYӄZˉ:% 0&EɎ"0&(fY/i  6~fWe?;C#j*ɒLf{I[| 0&X+fLPK#_vK(t 3 džڷ>h%p9ABUѿEDh AK, X,V hi8t<de MLUQtيAVgfL 0`1&|ؤTE P5տmhҋ:HqA`QPT n +7f'NbnQ ݅@\a;|x-סXK^Sax:4#;f[UٌT$nR暴2&{`= 0%0vƌHkz$Dh`#ŶZ|Hw= SQ+!M*Eo`PݡUJ\3gXRLn9$%;[*%$"@IT$AI|捤gNט`L x̓sߙ*I;kVIHaP"DG}EGBPCN!Ob'a 13S9](dOJj40;(Wu$uUDW/tPQo]ZEM!*șL%# U2 sG}|]!WqٌQՇ\  h84=%h@,لun^O店 !%%4oЦѴq&Pp"? KɧxBen;vHۣC%Z _OKNl"(}t6Ϋ3:@jHے֊?\ٰ ^fƛ[6aذt + @BS?)Hj$/$X=d(!'۫ FPp7GZ/ABd2Pj(R, =% /V*qv^ph޴QtR KV,Yq3*GAcR&IHTh/ 8 h+KqBa!d}E(.dk'uG>E!kR=G@XtXMrKDl*C'ol!%~OQ  lپٴ#vPXynuP~rxTkD^ݮIHVThoE$@@ ɚpiXܠ+ŎyӺJ+>v+ a|ݴו. C ͡]JIH l1P+iVZREˮ))FhFTr;b>8y5x{uHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH#005xT_Z]Go>{ޑ TEWUӣ#KIGJ`ڐ`IRIKRȯs㪒iB8BUetQJCHHH*v!NBaHeidWwccGRʏcN,'[ɁXA$@$@$@$@/Zh廕ksS/56)&xt`fi))[ꥉJd^$a%V[    A,ceWp%#wŠ0g}&Vϛb%1| ́k#7v kJqϕTR|mİ+h%rz`̞N6R)RŹ):֬YJ 7[ MU.'~4IONd u,?/NPBӧGzx.Ka6B<0:+;|ܸfv& DRCZKJ f#(15mʐ+ zz2Vmz&'t:aBr3~DGqx,{2Č:OoU}%{ ; ߕQ[ݸË|>>]q?Fܻߛ=Yև}eFm;)׀g )}G} I$@$@8(/pra*Ttvx0?%6^tM3_]u>LKpEIpe;k rN/wƔ1 O2wmLF-O *;Q4D{/,ɵ^޼+8KڶCw*j7+VfHPƝ'(1PJ)\(+iVUPyrJʡ- D s ([;z5|. eu/>Zln($ q_B֥>UGEMM8tĞ :f5je)cm(ak7_zʒ{r6WY >ߜ"'tT(=Mru$ J@Q2S ݉sCwA63UR lܱmcC =.xWR7:x#כ}7BM ??%4J,wܸ=STBS7 Th`d  8np"Ϲsa4M1˻uaЬvt[1Wn7 'ZC7Ik||"--\̰R{Jy!w ^g(F7/SA!'f4c<J;`JX@rℴci\3&m^{fm 6cGხ:rͶFe$R ;#h5xǶ -l\diK(qo s&0>`k2̙OslS@ j5O>N8wN+<=3#0&2P>~{y5E8(q/<0wo }U p\ߠw{0:e&؟h;z̼5"9W⻸ b1Oό$+qTB:M1`y}$@$@8(,4_^liׯSꢤ3e8Vz|R,@ǕeǔRrW)aE[-1P3L:Z*6 tAS)Ā4 [x0Ғߩ~eEo^vy[} I M\zy&F|u.*" V.v][d+ ]-0|~S[z6e"hg0q “2nMOwo>p`?VytD'5 Y3SӾm mJƻg~Mqf5'6ڽe7,۞{j[Mѳ<'#uHXEXFŽɄq[SL9&0G5M_UG \krO$@$@$q`޾][UaFNL+DZw f9+Ջ B.vX>!f_]b q{sM ۋ^^^[mt܁ a`tr%+8 ND?^EArΌsi_"/A^(cQVc㏕2tZvz,-K 1LK;Eȑ;tڤ!C [#ȑ8}t0qGcdcAS a/QG~K|\CoU4-MϢKOr 59Olԩ.TׅsEV)-Tĉ})ոf,J;+EenLX`TW,YLo0z[G|=E@>X>zu\V" i^;mvs0+ =c|]iz,xC# +[Tݦ}VquA߭.`*,ύHH~K B10 N=Tӣyf2OZDI99~nWET8՟r[E ngO}H{ͫXWXָ*)|','GYvb7kuhܢʔ.*pYV 8֟Ͳ$@Yb`j˭B<ژ1a*sM6*J:VHL`olA.+  ڱ=ZlX\OWsP&$Ġg {^F^IHH B|-1^ mVK{xҁWMFD.Pmٺf^cnDե'RS7Zl݂Ю d9BQ -6^jxO(ˊKKXp3|.ᕯiXP.!mZAƠޚ7ͧcФcc}^ >.MNäK:û^EڸzzmrO$@$@K*4p:t~aTŅxN+ڊܐ~NBk̳|5yy +\+OFuqfPB:Z1 cY6ݶ a wOD>}^gbZ6'nZwW[a!:$@$@$x7 u1(y' y m[86|eoqar<˙ઋ31e7<-v v+ަTd^ vIkĚ9oh1hiT2{n$@$@$b]a 2&ĺ=_蓯:AMrX=ER>up76m<ٞ͵Xle1cx#ƢS^[pֆ>YcH`} gBIݾ1h8/4K$-eZ b   4\C7?>֬Ě0('W`Kl#z['Wn #ڞ8dm  /ڂZ9l!1@#*4x/˖+iIxM5WR7D$@$@5h14- * α" + c~}A1o^D+ Q&}柰Vc9   hFm,(ؼ+35;$UTL׾{ޖ>=>ʉ*v!kR>n\4eJ~+<   4EKF꯮nǕm(q^1E]P/^1HUj݀"a,*](`#V6 t2%x#DRZW^JNGZ#{BadcW4+no' I [$$/ChEoؽqo j=Xzpj3iB)1ʇCՆ<:zLRY^v{ʖHF<%xOՌ=v&XFDj+L4S+^Evհ޿mW!'LO-/Tqǒv 36IHH'-4N{<%PTG-Z6\X}ɽJwâq MK|XX:de2a+qtƕvj5|%//yp 񍌗ˤ*в2Sa0vp.{@hcurXӚ&E Ƚt%8n|uI( Zo|}WBr[9P3\ފ  ` e`Z[bZ=kI ԅAh3]xQp< 06f|g)y8Gn 2[L ҟ13,.־DpGZ䮯[V߽3LjȫD/n@[_tANUr:J"]&)#C] cbfΥRoƁte)CxN '`>j_K`Jx!@{Xvr쨔Q_(.G+LtE\8_5sŲUdM;%fUrdeȶm): WWqx&`LE`<'  ch*^|i3k۟( N 0~2 3XH<ĵ݆Y{°agWocF8 Qphmd@޴')D ~$Rwl45nY+\By]` WXt;;E戕E_fC?g >J dL - e)m5yOCUh7sǷfW@8SW-Hl񽡌.?>VG?J47LUf- UpUZVN7‘$ohƨQ"/LmܙcJ5K/s 3%sO$@$@$PBⴷs̩ a} #ӂ`F9kj0JKNg7t?aIIM/ ՟tte='4}-zDŽd| 4z]5R^>03-!,<} mͷ`ӹGa0%1mFW*)5[a~)[9М) U\IT3e}Fӽv)̰Y;쌴tzRZ}:;f"|d@T0' R/ 2)ml7}͸.gK3Ϋj,W<^"dEHPnUsk(y,Tf"AHHjIZM) Q^*n0bbma?uSvY~飾0'^6q^|2X?2vToҼK,ۅ%7GS.\x?B&)nGdO^Imw(? J&X֢,#nz'    8D wx+y[^l[4^qy}U*sO$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$S2csZF~Ge85wr^>$РXJ'.2E+5x_Ư8nSvHHH!p*4Y3}ʽ]*[ qd7|F"{ELSG9Bx`Vfb6sL`N}<iʊۊʽϹxWMR/u;7^#q>e}[oW6͞1j6Iք Fa3M}]JvOVȑ;$@$@=odl[̜\g:* ͏s{N(2sFqw.)5x',n![j+0:':!i&3Th*8\(CE>'>`8["=SHnE u_[uG,{+|mASo}5&$CM(4ėE3dQ]푟Bq*H&T-kW,4/w,󉸋G/m4t) ݅*4 eIH ]D*e8@X#c@Ao3sN283Gx鋄'JH[\ N0f@o&2 Hgf tЯgRVf hY]'|9y-95(ܰ3B03l:+r ̣3r3U͓[ALE`=޶CX@i+<[_⹝;&۬=.S΄Vf jtX[,6dU9+t;ҲNxj%}9U<: =-.?i$3|>ӭot[wvg SNgeԜV=1bW!_O5s l VۃjyކG&2q}JפpO$@$phh.gY9- 2M\~y΍ 2] +KdQWbps EηTt(-vmPOFٮB۾DF_GΒ\+y-^]0}'a,)Fw\%R4?YAPUJ9ӐLm}g2^;{ˡ-ދ> ;WAz] Pf^g\0i㾤}f,\ bW&&gZ Xoeu(34nXl͸c(FCIG矁x0D԰SZZ,fd23}|PUf4Ƕǽ)M]L]xw) |/KGM׃ H e(,(L1̈~3+ksktNWҸSgصɊP.)R3}L׍}1%Bܛ os?xk8 V]|ҍ?8(^WIu:΋dV*X뢱qB<#;[\o_qM\W/"';2)\';e]ae-2q# hu1y tL4ahPpA ɹe߶E'x P2\̰R{7Q8pad?cs>gSA!'f4o<+m1+ H#I9: yɥS+M̴gM|y+I;v>躡SPv=ftŠ";6 b2$ҏn e\x'ZAٶ̮\diK(>oӹYZ{~yaH5bIS1ie+IJAO@6.ww?~w}p?kRLㄥo艿$<{ ^ԛ~ƂOv0>cY}"k:,5p/\=[ gXHSO`tk2W۬p%ed]9w]'%3v`PJG݇`ڳoٛ %l?ӪMY#IE<+ByRۓ&g.S6ܐ‚)3sl?HH]wtbe=`LTIiOt[+J: 4hf/1zh p~L鎭^;03.ea1(xʌNts286N$; ta|65}!=<{l'g)'=0.(eu6N+ *>*!w&[%Z j.Љ@WzRl,lXYy2& M6` ׶@{: _?+AĹm,]vVY}I L"30\#}M@g~1N̯sqZ|띬s@7wuƘꄐD(S-aP,uz1 LJ˸K?Obuݮe`/ |ǯs3{Jg챔{qDn9l=ĉ+5rg?8eP޾xebZ֙:ڬx:vhPʟ(PQVJx)]?dzYTy׹p3-i ޸>^ҭK:q-A=HG=,tBr !O ?#cfvݣfc!*X4syBܤ.e爯yZlg%F|d:Y([^E^F:H>iWmǀmΘ=K1}H +c^^Ú,ؽ5:;xo>} ϛpx aKV0S#1<EP֫$'=mXg0=le~c,/}n XPFf<2tͭje Pើ >,]Rڿz9ț_f㼷܏zKnyOUֶN=\+ץi&`!>yu6 u'I@<ų|}d?u, %pyAj[5,;C:rL$ 84XF3f7U2;V.v̒y.PǒƦƛ)w݋AYpG9k2єl/j*;gV{y{m04:+MW=\9%a V- [tfNYǝ|BBzӲSgiPNb4vl 雷 )e!fGkLxb6qT:F6+럔S a/QG~K|U!w*ZYjf *`M;1>!̆gb{JgjAU)m,A"4"Ez~d]O0ȶ-6>"nFzªdהotZ6j; b mjk'7qF\~Ҝf Jo,F"v9um M]v#<akHG3 ?=59cJclYY('_-ƊrڼYV'RS7$e WԹC.w M1HHh.gpw8*Ux)4g.o!ڟv22tG ̫fްO `b0XqrD4BQĆeN1-B-?لAcEð"X^{X>UfͦpsZ)v~ŠLYÍ.p%u< :B S~t 7\$Onځ;X!ͫ^vYhe5r3dm2(\8K(Q+<^YX:J7)Sц`Ŝꬺm[W>\xPp-mg,^ QoVn*|w-b6ض յU7A?kgU|'k0ars\$n p}n@~Vjvtl_{xEm&) ?e2aiܑ 0&h[f1,v;\e($ź]-ʭV-لb[I֐Չ.*)nU]\͛[`faGaW,չ l:QUb0ˇv[SMNhgi5Ōye/P69\1/^u~ͭvcfe }MwF.&╋UMЋ)W x؟^P6WE?YHH*h MF1S*^P}IIT^)4Ph@9)cƲaIJ &  84X3}'+S"mĬL#8pvbi   @Uh UV/EQ !@\BwÈ}u$@$@$@ @UhF&S@`:784poҤLJM^% oE*4!s1G6iZV ۡh8 |?D(Xu B2Xɧ\   C@VhsX|uX({;b";L+\br  IHHH" };fGUz8_<P 4(rXlQ}w2~?-HHHHA+4KIvz*m0(QQj%,K,&EIIV Qggކ4L&Z7YOb*FF?1mIHH@}M$:7*ܾ_x;=QҸ;5hK i&椧+/% m[/ڸ gܖm.#xv H_`>~mc}>ӭo"c|㦭Z &} ,sM@0fڡ;;fY} k 8x;V_\)wG-Ego'e^hq< |F7ދ}ֳgoIᎇsR1PyP Gއkɸ*:ZWU!գ5IL *+1<ױY0PI99&'"*SG,MNbw^jUߦEKUg O0k}YзgrSpaƳɣע_"y#Q7]BʗP6vGu5 mG3u}reY}.:^ oҌ~}ah/O{w&D|W^Ռ1ݏIVf 8g6Fž榏._ÞHMP*寮PGs}_rHHHw%݌7ާŨ̚uR܊+q 0Fܻ'/%7QˎRl }0{ղzE9: "(wddIHHLBTAk<>eeH>0FT)[i^k ʌj\n+Sh =cO"J:G0r0CWn]IPn(Mж,XQTxXl"P<*;4:kxuH:eW'tb'vM5dG%ʌ΂+i!(GugOL,zn\3jm#  } xU_P߁0s6N+%2]#$x faVk;*$ņ[:N.VNP\Bi sb{;"Kt@m<6b.!j\ף6"YHHH A0XLiR@<\y9Wet'pیg-.Px$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$P>ž(IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-compconn1.svg0000644000175000017500000011651300000000000030566 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-09-20 23:43:48 +0000Canvas 1Layer 1Network NodeCompute NodeLinux Bridge - Self-service NetworksComponents and ConnectivitySelf-service networkVNI 101InstanceLinux BridgebrqDHCP NamespaceqdhcpMetadataProcessvethvethtapeth0iptablesPorttaptapPorttapPortVXLAN 101Interface 3Overlay network10.0.1.0/24Linux BridgebrqLinux BridgebrqRouter NamespaceqrouterPorttapPortVXLAN 101PorttapPortInterface 2PorttapPorttapInterface 3Interface 2VLAN 1Physical Network InfrastructureInternetProvider networkVLAN 1 (untagged)vethvethVNI 101VNI 101Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowew1.graffle0000644000175000017500000001004000000000000031050 0ustar00coreycorey00000000000000]mSȲ |yQN6v p0I%t"$GCح#ٖ%v azFtό_u|I//^8:jǝ _7Ջ~vvN5"jkPuф^owKᥳQ.׽ CŌD:87l }p$9q&lÑJy!0߿WTx8 Պ'}_P)NBnߔ*vx4a/ ~?M8aW?=Jˠ&~λy)%wme4V??nu6T1O0Q ]?&ƛ9X%?|Zg*zw~9Coį n߶~8veI8kg^AOoc}Ѓl>vvZGY]AIz?0S! ]i5Q*@=(8(TzXAunQ# $Q)'Rb*Wvuwu~0"0'[=_^J0TeǥXSdB1Z+P*5QWG-?J7[:8NTh\kP{*U9$g_lq ԳHl4wq389/0C"0}P+!=Ąq1,ʄB{IcIs(&TJVFJ}ԤW~'uҘ uh(@@SK62% <$ɉ zS~TT&RΒ?%*cߜbsI @<<@gQ+?Iu1woy2:2?Þ ?Xq (~Y bL/!,R|QP/a,(K\DH1IgϾ́,tt᷁.R,q[L9("8Г.(A+`Liyddҗs j@D9<EM1@Mro`3x<)5ZhMZS!|A6b` ,/QQ^aIVl2vǃ#R#y` aJ-{9c8S~{*0EpK&qEٿ':6&0KCM @~hu,@NDQwbT "*L*Cg埉yF,?w{\ol>N2vʂb.i?j|uza [[/LMc=L:ϊјLm•0_>3M8],ATJr< &~SS$˜!`b V $ (˜)cL]c NJQ u/mE|٨[8'A8_K>JfX.b׌|ɬT  bJ+LTSd4sLI.0b3Cg>ɨhYPx b*aЅ]/<."qVѬ33JJ"y3 u;^wiY?Fzr90.ɤcS֦YRjڽǖ-Z\GI\#@lsDŽu:5KkVõv1灦y~[W\,Hڡ^G~af]FEl;H ҬX.Ҁհs50o^t9|ЖN5Lf@G#= |s4$_U6hI3l9SО֕},P+ŝ5oȓTJm"`ᗈ90=7v]~gٕqӿ<3=-%֕Nn?!3ss kyY_hA9Fft]Uպ U\mőB6/x7_~HxͿ[{WWGۗ;w=tuC w)˧h{v -mz12cph H7T_|5fFXoƘ30 ]"9q&lQ Jy`7p$h'+nЎu ' ϡpw*(E& 'ErTkM1}@fF_Vg|8 q$~Z1Z`06)f0Ss}_ztiTEYLɨK艼'm}[׏@ARArweȼO G;̢ʒު?%C #:GNFW7MyкV蟿FP}>їvĦٽ'_66B )ƨkW6B|FxhXmPgѸOu*ī1l)!({././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowew1.png0000644000175000017500000022353500000000000030245 0ustar00coreycorey00000000000000PNG  IHDRSpZ{sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgvRETbWP`("@r\QZx﫯],X(" $$ݝd渐KrI.'ٽٙgӞBaL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L Șd1BFP5¥0FjbSq0xzǮ 'Hc- K],ﲁG\9x`3_cN)!V|5#|fSnk˦ɞ_,wRd"فCrwXeօVXܱԠ۾54"~Ćp]Y]b$z)4KSBR>?zS4-2+PS5Ә&u{~0*$+O(,τ~Ԍ땲2fJ=WHc*wNm/:yCj>dw_+ScBv촮(Lb38D).d CrwXe6ǚM"%= w M>-yF1s3M/޸s ne ՒE7Je~iڄYߏ4̪`P!'4M},y.˲aBVU_LR3O,f)&ͣ??gON]%DFS~cAVjd{@=$-gm zSFuêsEP^=*ϱ_&P]~3 C݅^2դ=C1ZJ$fZ P?~\ WsՉ֪_bo%N"q+~i+ˮ:RnW;2{*I+?X0 n32$=jȞ5ѷhNjz)}iaש`J^_s!u&MjibD\S}p 38d&;5*eShIѮ8AK:˱_9 +u|"R}Y}aZ?)MY?uƂbG^(_pS[p*A;_iY\cQd?ė vU7g/UVk*=>5fP7J/PL;vo+K;ɑkf,umgqRJFe~ ~'4ONN؟TUvTT Ru.=s?̟uiXMssȞ֣lv)k*SC3wb'`Luc{a KвPOq¶g" kq]QraHQv?zI7XR0!şڲPs5ԗy2~"f0h r oclOC[ch=1cv9ntu4'qFSg G$WMQvVb)ŷ^%ƅۅWYyp'p!xKJgkbHDU ^YOUMخ T3j i`!%-@/3R+/j4D s1cMS AJC7h`a ywwEv;sܸmX,Kj)g{]~5cay-t &"w8NuO" D?:~=ŗn :|gpݎq{s_뼜N14zϣ ^Єq/E;MԁbU,.FPrY@GX  <лUdHunri_qECO DPg3%pngq28t=ʽ| `{'KWudwLEe Wφnr  GI A%ñ)P?DT QVBQNe_E[_bl4@]P>a(2iV!/ )RB+reI`+f߀^ԉD*oklBٙgWUir6CiŔuI<9RO!xe%ݴ)?Ϻ\++CYCzӣi9p6ڋ[Q6!_9L1+!Á“x[rydxvv|NɞޏGMRyٵ}FWPRISDZΝc(ճnyy>3.Us\; -K') K8e&+,eJ8/;WiTx|f2uf֝{U+'${u[\"nGpj@\64U.E?C;m)c2&) |Yhvu5<-L=G{˖<*50F]433s'⒍!-chJHX%Ū`,Wu аqB[5?4xiE۟zsљ7ñȽ0q4\F5aN8jf"C㺘vn/ ͺ.0u &HpPp["c tjA0$՛.E'FlCSeC*ލFSyF=/V@Tb,ђ15[“:bhTJ(-YctgY8Dy [p<)ysN 0aڊ V1L\CrhPrmE^fq@#=C-{VfxnԆ`1 1.VP] [&~kS'4ƮƂݽF S7)yeFz|4~09&ehcq94E[{׿uF&0vM~v9N%eBݙ7`=ХDƺ1vF$&ɧ*"5}_es!Ŵۋ.f_ MKo^E{+~K{^[2@Y#ʥ]PzMif5Kn旰X[rSL v;&p L_:#{ђ-2ܥp4R,Oɠl򐿗:E')F*РB1vwe5OA4ae>Qd_cLfT[*9t7hÆ4,$GZsU[ KmuvxL:̤44(mm@S]TIHAGP#M}quG\`D"4YNɂKe: R >W_}UCZF'Ҏ)y9X\";DSv'̊ 5Lڂ{,{Jn8m&46/`k*ʖ)%3Apݵ`<PK>Q_4ym(*GX=dLP&=ZQ7ڕd͓aWI3vM>"ºG}3ɯе+aZ^^m3149u_CWA2lyqEݝ{Zٛ~PDriZAYdX$#S+5e!lAhSy#h(pJOZˋPvdxtQ7Kޱ=k$☈SpLKpR(,x@Û X&t Î}uW^yYCE߶&Ԯ*g}`f\za㊦Ƀ]+D=]K;SoMȑȋqC䯦WVtx S's=]a)T_[dKħ4-":UrT«C0< 4v)qQs)&?fi k3M*4o#ȾPcc0MkG;Z% s QPҭ֢P??^.[+]\ 1$v3A4.sWjOe+9>1"o#֒M,`e*sehL0jP #Hk%1n>> 2~;3յ[/k 1Fjl譻t -5" LLEZ"fP>55&[5]p]+lR⶯}i^#x<Ϳ, @P(أX{Ȕ-$;5bR-;)h4ѽYP-8&"J:N^$ -L'c4ܒσЎ7vMmuQtJsmĞ#9t%_.W`CYPZC-#ơo<"X䟥S傈ĔPwEp3k$}Qy'2 #p,Č1+[ K '}v fpnԈcItUR0'3S>м  LrКLm[k?F^ف~ mA:dOP9h8iT."e?ѿF7Pu9mw‰kYГd¨/e5٩%2u);3ZRdWmac8LJn4-.b]ىr!f{ klhHdm!!|Ք^*P!\4ѾJ}.ڂ>xz]oC) ;GËך3+*<]dlU1ULn__$,da-C$̃*}VziriH(x4RŚ{ڡVh_@1ս}:'U_Ŏ^-Y #2ϖ4Xy]tBPG+(lwu-0f:As+_8>Eq,&v{ZOG7N~> }N $7XIRDCʑ(]NWǮ.dOJ/y|)͘|Α+g^6OsogL^b{4( 8DWQ]cT&CeN4I8PFnrAaAE%;M+e2= S|tOD]‹ҕfQk{Ctok,4t갌IDỤw?[{:fw!^/F.?'GAnT>KϞLO숲K0d8 xقsgs]iaW'w9^zzyF_?Nj:11q_p=v-Bz^7,F㌩7O 6c.+A,]{h\i5✋a0K<^Ec zlq`pjh|h㱮-!d{@8#k-VۓMhγqIڸ`I]yRm+֦uvRU3Ѿb/h1 oV\N;X䱊pSL,PVt(%#MԶ촋q=~Pz@)~DfɦA8e6צP?J>֯SO@HQht7p)qVyVt:kD螄`љ rihR7 #Q>̇.'no核JMY(6 uARjaΞ9SQaCaHCWB/XFܿ~mSilxo3băwVcJp{.+}_0)\ wJ%'IGIaQ 7*5G"{ r7Ȑ񦢃-ƝFw@=flEF>%>F(j0Dd,AcŚe!|_PIr:=Y4m8ǰVpX$zNOCC,<ܼP` R[t[oGlŪkP|"ai NY9jCo)^EөeKb(VXy煬avpR=Pv?gjenx;8Je Qp#} FQ'fAmm*/vI{\cK#۝<]C;NAW/ءx:)[iud CQEAVނB]Fkvvd{61J .FgLTO<9aohE9@ehM;q+*Ng>\OT8J̙ҿ UzlU{VOT_8RMՆ&4,UƔV8ΧTu}N5mZqg+z3(ZTRpGz՚vUTpL~'i³>=7kUtsA/K7fE{#Khک[o]zAk7M_ŢfD'ڮԉ I2U%\ !oob!8=WI8]PBZ3Vvzn@kv V}?fmÒ|Ҏ?֢}ج̴/]@<{eN P)5H׵sgML?T^j ./=f{e~U¨ w('GIKu4UP̿& fީ_e)ۨ`G8 WMj8-Go]i͊;՚&,%[7 3 {jea}hFɦe.Gs@COZ:3+3}oC̿bb-sƎ4sWL,aD!1儏O\-yc?ė= r^>4Y//4z}^B2(Kjߕ}Ӑcq iY֍ɤ.vO=۫l> Meq-La0r 슛6Oy&Zw⁲7 Gn[RF=q& ,EEjxz{np\v8Xjt.PRkGa62Z1i]^}S udHSx3h]پnRݝ11vOR׶I9mN {<g&< a˜ׅD}5_'hth -a)S_N ˰'ej[~ _E%22&|Or#{_?x`GoTa3c#CTTay跨zf#T#qM?1]jBJFd}]YSDpx:B{aK733~Cb)c"S ~ lѥw66:'ϐ˅)cj{3Tq^=#0f)>]ZV>`AĿ#:#87N fRkPm5oĈE' d K ҽ5 u7zVPL :R!_7*|_O7nM.Pix 1e0FU д 7u!{i/OM"1-'LGڏ}K=5_%՗9,#y5Ҋ|? ^x9]m_+'~ِ2p|IDypv }ʰ~N nϗ p;6 i/Cy+?[rĔSs<~7*oxvvk@AFu&r-9Fw⳯J!-.EG$>Y:JB_L7;y:aQCkj ͼ3f yZa ]F9rӳ|S2tA|dy oP4&ldX,OzkW쩇fzs?oCaYHϚRdTӰ'6u0u\:IpdO{*mhz:P ލ?(^}bٳRA'S؞NI_"#_ץ&TiAsH48 rnRE}L FiĖF u?hMX8/,^ɸo(\t< zҽcR3 ϟ.;~u$_F|/-:~J C])cW'JdpyW +kSrVT+aB_@^JZ#wܹs# 4%?ZP ydI$|(!뻨7Š*MUD~ 1J>tcIy@#8zka(,~wneL)7<-_q䅤,H|072) zl T#bL%椦 vP_`,8da9;MȟPsd}:$#ԈxOܛ{L U~=>54O;ӛϳݕi>5:Xӕs{%+sk_Xg,X5'S@聴{~`Y o,E6"O\:3,#s̿&N*y!Y<XЋ5Yj@.uEVٕF(TpC`ަ/w? eG44TfCet"EJO,QR.J~G*f݀rS]I;+c ;: ::71papD/tF6HapA\<1XIQ(153Uͯ5KC|}54z :6Y#W BfZMsiJ$~4cPBIjiأQ4*PNM7?^Kҿ=WSCʌL@=$% LA/,<ou .qˤ5jWPDH.EvByˋj-K62W&@p]lzZKabOFC $bDWB̭]q]vs>ϲ PnA5T-1aA=JHWWRV(aUx[:#g՞R=HaN.z=Qw(\1b4{j nB8D!f;L`au[Q0':g5K]v_|A?Bg7?*Kq% ;}= ds  G?G/=)F<|s'aWFP<['k6U%|D_D,*054Cn?/MatSfgҢeԇ$iG<'He?mYN }L Vu⼫ %qlxie2eo줝Tn-tUc /EHZ`ӡCZ<0=s/cU%& +ϼu«ɕvb}P‰ {\[wTi'"Qyx|T5S<,bLyO?l/@S&,<}6c*#8aq׳%˷b׽PP*HTNtE쇡!VIk`ylh֫dv]5*zU*h-zp!O4~#]ؕ$n^g|Bhoޏ+nD$8>ȗ$BҋiTS6“ɬqG+6 (QVR=ʲ7#/(nD!x6Z9@i;4#k۹j=aDJ%Mfa!$_F+Cpuz*sT1%P wQNmn%2AĚuƺG╤8%[5vq 'Sw?t?odxy>vޒ"(M/8?z(D8%ʰ@Bӫ!\DrOH9݅t\|'I9e6Fbp u+^BۗU݌Q78~(%t(3hdZ;ղL{mJ4#JhKFAyіM{F$^gQؑ)> V/ S?ɚG^'\KM- w uҶHYLFu&77qH#(w 'Z$ ,⠬DO>XZҗ1[yyġކ623rrO* U3iB-]2_O} &PBC$j:hSIc7I=4lKQ{{ L\*?\NA}ȭWŻ`'))h^C')Mmf5&l(]+t G~#S\rtJp6%'&{ss*/H-[4>Ѽ0+,O+{~_+}`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0& Ⱥ0,cR?2A\ݕTiӺ`L 0& %.:]k33-9@)S~3 ;Tw %H),F(SigL 0&+L52 }V{q-;{;?R+rIV_HnR=sܸmbL 0& İIZZJw.րː@̇ TD TJF`R变ٞ 0&@HMf ;Lj⾜LX_z4/QcR or2Gf`L 0"=Up0FF;?U]4Q=;?WajSl`L 4lPd/tjk;v~ֆZ4gM oۛYU 0&bݧo,oNi 1B *ڳcT? Fb0& aE}S=*r @T):{ؼ N`L >:ȟPG_2Er#%_گIr`L2yvS @):ٜs"1&h> 1 ,s-2| g-`1EA1% S<50{Fl`L 4zY4$>6L&Pce JG[{\`L` ~<0@)=>5S =:>d8F`HF֭RYCqa=ɾ`M.@%Rc?vl/2qXmכLrK|,:fhFX8@H3v}aHC3?'/ĩf?h,OsW <~cG]ji{e@C{ae}HFҲ2`D<"ncvQ,l[[QEٞ 0&Px s5#@bPa;dw _7/M7JM/O f 6Ba'Z#N IPN4 5Z 0SXR[)S2Y?]xFii@IDATc\-%ڒJY \4m2;rO=ۋ}^ȜdX)ʌ&ɑXktMVhz:H6$naP0DԌl{18h~`dT:~kjz*(\Xty$~!FndܱG*s1@愁ɞ&ݧ^j!)ii\HMTB>Zy=^ nO!?e=ߕ+6e:(}SSo;)Swn/Y^)^~/3,T('bɓ8<<1ث|({]Ӧm9ߚȸY[ͧ=O`L ]lK)!k|a}8jȥH^f0#Wº:)3y^HFXl{):~K)3&ZC'FͮQ"ev#%xAJc=kWIHGG(=O pJx`rOryӞ9n6)rou zm5J&@ #S wq6}F@:E#MBiO~1z)O\:)ny; T !)?~۱vuo~D*I+̭蕭r ]4L3N%:ZF0%ǫR4 'to^aJM[4 bJ $=jge/:CH$^Hq BT9 0&mLE(}*40R!R*L`X7%`/=!ew?uGLQleW{WW<:F͒XpR9w{M #AVi`Go:֒]QYX$7bU)S-ꑰ̽ք+KCI*3D:1Aw5ae7o&@,`e*se*G>5o j۱Խ;uR-SQ A+h;11uAZMW\t22S X"c@x5n1b.hlφr=Z%8KXV.pöC"#fiIZqAvW :?£-Oח9 `Lm΢'hǖ&xk7Ot֞>%9%c9 ;q#΢] Ҽd??< k_@lcf= )rv,:kC ~L OÁU6y B(T;veWe]%鯎=Q/iuYܔ xqF*,ghxd~%y=\+R<u,ezva]wM4ov]aH7neLceY pܡF얻nL*;>ȄL2A{3f ByraVě%7ࠧm,v~T'EM.=hX5_ 5(9e#EOQσ8^5E\%[St78GVO PubS^gd  A;S}YX5 k^iKY 8Ei̱?E";aL 'Owh* G\C0Bxop{X|/{4H>NLtoEMzyShZGQ -[O|`q.&B1xOup[13haYXY-Qy]Oر;y,_p0拖*ZKqThUm+DJ"Hbd tux6bvfnA{A(k2"cPBΣy29yL 0&PV*L Rsi/cL 0G_r`L 0:$T樘`L 0GƗ"&`L 2U9*&`L`e) 0&CL!l 0&h|Xj|y)bL 0&+SubL 0&{h簌IL<Y]II ѴeϾS%\'Zff['q'79%U%PVXkSݯ*+@RgrwjeXFG)j`h$n0,P 5fd7 sj&O<بB {y4agB]>mF*YTMzK\'[aL 4e*ŗys:9::ˤxyZTYPX$~Z+eUW}pww<7)!TaQhH9v,٩8c$RviRx= F ,ea=:?oI/ ^{ɧoJ=6Ć: ϭc=O/vavZv_y}@}l]{1,G ֧d"%=7]ԭCϘ/ԏ=uC7^L< $tPz8BֱE`Ov]>r{@T jL )s0"5ei5 Nċyyy*XWH>Kr D@]龴sXC0%TJ?>ե} qEĺ9L:m!h1Ïo!*&-VH^K?eETg:k tem},c@U jV.Ӳ:]u:HU5[Kk=Apm1Nǘ@lEeJ~Ę,Ku<Rʱ\u<)*_*g[(d>_:jMGML٣s@N#Uio&ijUg" TTONh-p~ϊz_)砙@Ie z8ni4؈cxy<ݐDꋵeJ$9I^FZ 9YuFhKwGVGj@)S$.45j;/j5͓C$Xa\%#f $'[@8p&T#HJOr?JEx4![{ܙFO]jEQvOrrFƿw [vT^T\_a}X ١.i®V﫞~ (XRHy{PQUҧ[eDm5 V4ΎFϒW*g5jYXgߋ7>Y9+}X8EƖUdڝN0۟`+'7wcE>7\qEN+"yN-(t'd,HbWnoeepGn[qލuGzaVlߕ/w#{U[WbMT#!Ng {G~ŷP=De.=k(KWo>_tsb:+v:?w=kD0'K#S:/W0pJC8wW[XrޒǑIxt]j,'EfǮ#Sfy&)Ǩ/}릂:i.[Zsno55V)z}2/Ͽ fhת8GHr.^ʂQ\Vrmg{uCztD}hwPҺ`Zy$9G Q]qDi,W5p:@EeNЈ&%gYፙLKx8uL ;D{zv Gٲ,{ʂ޺߆u::*C(Vـu'gЊH!59-\ aRRph]eU34ͷ#PC.P> TvYs20NDjݢ}O==EEdnJ|@,)S֕&ޣSvPhVY(Z.H8ؽ"X~x˟מm7=^:.Z E!uS຦٣__-Z)0Èx•AJ7~ݴ.e׋Nm[$^W! M{H/e>鈑W2[nF7MKC8]#u.0w-)H=\C.rb@O ) ZhWPh!th\nU4հuGm~Z/1UhtwѢ$#{p7/ or Ul5WʸlTBӱ(StRVKP#wGU5yN)_V- 0M<88##$6n݁M:'zKtX0_oRԚ&'؋iTk6sL\_X$b#Foț"BՕPzp’3$~x& L`G3o|:޲ۉNLjKF,ŴNٯcz"룎ˆĤTOSOݣzwwb*iU"zk JDFOsr )%S.q~FtXO g>tvDqz{_d?g?Z>s)kv_*B#dؐ3;TZP޼7xY%9G5KS_z߮Fhŧ 3&PMF&ŗ @ }n{OǎWGXÙumhn)'aJ14JEG&Tz+R܆;vc+xrbyďҩt#-ȢyeB}G9QRw*ZgHarP\qy'kهNzQu>6㤺ӲUKl'V}m&ÎbWruC~[ѺIcczc!ƭHQUJ )qMzl@p+0SšP9H"{ڬ 3б KpnUv) J\م 0#79&&^%KO_-gJ- qqOa &@`e@s4L T@@l`NXcL 0&@`e*2aD }ˎ%X&@,`e*rea1F )pI0&b`I@L؄m'w )0,qN9u 0&Lid5 pQD?(SyXo' ּc┣{۟YZIc`?cA(`La`eaK@B| | }9Gm6-=ly&A#9L~NjkߤDZOEߐ\O[)PB 0L71()Oz-A;Ǐ+2Og"?{3&V]r@LkEU}d?9w/_;6 0&h 2hG@W#0>>̆ 0&?VomN+#mZ4SGq4L 0%Tؙ@$ЮUSq)G5ʴq`X %¿`L 0&PLU{eL 0&@(^BD᫮cWի{.ݻW5,_]_W$=6=}Ƕjw{h!j!o&%24eه 2Nlݙ/voQU-m:7n a =*u[$XۄG,M*19UT"@""@e.-j)IϿ_uz|"xtԥ8WqA]p4^nD&cjuV; >`Iݏ=|x|N%pPq2JvJ(C[e3U%kG % ߲tQj%(%_*S 푈6o{1WC|,XM{TPn׊}⚳jCJ'@ekŋ~%vb.Rox%~T? -^lۈ:a;A@w&rdNUbᒕY$}q_bqXۮsWg<<QCؘT H:L_F"@_/YCʕtZ68wD]]-SK+h{' 3B/Tɫp%Tf MR\*ֿyzyٙ%3PH`?G:@(ksO!-*%URCޡ-֞fIpYTF2Q܏367te>slI i͹7i-]^Oi5wOG<؄* 2gZ 2_A,j*|}FхVNdS%i'Ӡ򟡥Zf q`r:Ű_|.XI}=Kq()QiT s˲DmRiΏ ";v{p&`{o}x yݺ6؃Id+) -zt@'֯Je%2]&bϷD]+<)Pz˯ɩ3_|iC׌ 4m**F*F"jeiق5Fg2H.%R5rA3:ѧWPPR{ -`F:J0콋;m Vѥ9tͱJ[٘Hy.v)iDHIFW&qa.cB5[\33>E&Ta^_@ 2/SVF>.tC#H,Oڱ#JK譹KO:wnv9M!?>Dg =N!Y^K`}*&YPa0,lG_uܙ o[?ap*z/“G: {puFˀiFX#-8YP.e}vG,_c~†PTH+mda*~6R3؆cw!ګ`J0kjlX^!1gū cu{=9D?_HO%(ա=~L?֤4H1bmVOZ KftHE.H; /d&Dif Fcf*s67$lC1WW)XeN;JyY̾y@x22 IahcѺ|cVSXUX&UfFMUU}CUq(ggN2v{.UoWa|-L? NsقL]?0W,x YܖD۷֫R15#+Jv ,˖ 6yv"O~L1Kf*@]m[, N%VU3u_VusIHO+6ѯ;ͫoHFbqkz6$~bm("uVs9%)vp`[n&+Ͻ/GMک !мްqX_mDJ^#e`Ә/rPe>u$Zپ{?}١<3Pn=a1Hz.,XH|Eۏd Ͻ9]:HŜASGdXƬvĽYnAZZ4/rͳJ+tĝ4V<3쇈()ވ--+kHsYy?.Ӻ\e ~uh+ab֬w9_ROMece765S-kRͪhOW+ɔ{exNcH;!B0",K٘7pAxDmH}˞/,tHEu` Gnֵ|kd;S/` BSWv 㲈@ Zb|R1eO'XD7|XB'tseݴ\wVẖiR3i4SU Rr>yZ$vq57#zFԳ叔߼ PGnIqqѓ,6Tk -xΥFށ=}񍾗0l,o "RTt&ȏ:CӁpډo<͠‹?X0Oǽ_WߡV*N8u=fPiW~0M`!g$vghj4L78 W s +5x}x3r#:;hJA6O^}] ʼnV@"sj`; _U;N~qc⠰jJp^c0i'zgˆΪ=o'-`_2JAx^*C`ۧg?$вͷd|'a-4qY )D]֚(ٔ-,iVDZ֌V0vj߁g#?T|VF-GZ *޷UnSNo6eL~3 s鄯t'}np B@!`R ^3$f-e+EԎQzj&ʓo (@2CuT,E8Z4/XdqqTs3VKK1ӓ73pzi}2&'bT}t##h)Zq ͓2inNftžxEZ[~F%X\5W%c/6w A.WOC݆qI5eU>WP5EiX;X;@)gC#L5SN{ڂ`nnjC.!8&X1YÊ򧇊 ,U M,fg?|.Y7R%{VR~N}ﳩw });Fq=imC%{Vr@R.̠ :f2 <4GE P~2 lj= ^w':͌~Lxc 7*{W }.@7|{u9fQ>c4wSa!'OY^Hl;+Yj?2?UofHV[~ǺѷQA4H-zr p V}-{*ٷ_" bBz?]2~gbj ?D1BD1A4Z`m#k"1~[zV0\g?r6j'pCŠgaiP u;tәƚG]寝>Y}[TZd:2|@k)WJ[jZ-آ -r?|&BByUU15ͺx$CP䎽)ŎQ V_`gZ֮][ nuN ҂՗kVNylx/ѷT_qM ` tEQ|aޗ݃2:+}ev {"_ccꟇD `yk߮5\=|k)CO=2um]^p<UmjHꮰX5eCZrdo{Z,u5k 񘰌WMchIƽ[Rz\њ֎X6Y3ҽ᥍xXlupgLvi5>#@}F"M4ftwHRBܵ?m@%Sp:1ۉ"Zڦ:Dz9 cKZyFl\\?r[4&˭U`+2mH Sn=ra}>hx Xh"{<_yٞ*tO쉚8&fXQo-nhdRͭM*$E6hve;fVʞIˋߣc 7 njxhʨi!Zߒ }$SFRSSIs3+y/~}Hu opukㇴ[WMecK_ɾLX~#ТTExx׉ʱ^p@Z!=m3Zk|g67v{+{L.zވ`iNS` ^ yE6iDLa5 ٗVCKɘmH}˞*" #lKPG*N΍7('BvkYMK^ awRLebIJl㩒mlͲM^crmۺg,.+.'gюSԿb-#Y w%/&7TJ:|6eFFH5u.~]Hu>H1c۶i^G\5Ed@Kx^$:͞}<デ!@B~#YxCUsaT꜎AN3:k &RNY2,3 yYn8gkH CnQU0Os=$ZLe%eid,u6%s= ;'~x4$NJW*K+.L4(̌ ̠4ckMDֻ9PkIxj' }E}h3 eT+,X#i;h !+Rpﬥnj} IŔ[v@L[{=Ҫga)˰+bMaǾ8=]F{J_A.ZbM:@bFzp*rkM62<):u,/m0/ᇨWapol9LwrE{w{g@ R&NG ς^YHPT<JɁC' AxXRq}xj/pP #WsR찏'y/uszZ;}#c .5]`)cU꽷>MY^IؐRK#z{NgowfLkJDʭCEP$Ha2CLɏ%!/{' {uXtсv>gR!!'L OLpcfc2=㲭N}V?go rɅ@i=˓g0z*6|z眎v‡?@ixU !L6IZ\HR6Xi24!zÁn5 RG6oV'&:L 's0 TL`pn|T0jN,VvZ WNa>V;1Iad=:}vsɧ]} z72c-8Kf?Rg!OEZ18`"e24O~ Q&24\VNgbe*I) 㘇 Y q~ pUm $LLy!k7Aq;{)G $ܙeim_״~}EkS&.LHhhp97\fs\3,/}b)&RLX^ÚAhj!~ŏK P Iu]ԯ{K|- _]~mI`K3`k~sDH9 ˡl.aFg.D|ОRو7[aupx30ƞCwcV?P=D$TLL"Ytr0 24 a0KdI)xӳV*쪁y8kRR¤ _3L8=oVCU,YCwlıݛ=feE,>KT؛+S{e߇`7=MkX#ŤNBKU#ND Rw_x-W>^ Hq"_Lb8/|Z%n6LLDv7~U .!ޘH,o! :O93yJ$)<ːާY?Êg3`>oM]f2f?IHNL7|BRZ)m_`2e1cm,vQ{m> ޹L\0䲉>}C=W8܆LjXCdNa/^PYQwd2^(~bR"af'2ѡ_P%H#zoRcb%(LFG,'Aw0޼w0f3a\>Hێ=y _,/֘_0&Ḫ+,|ʙZQ,YM '90g$sm>|1;h;LĂ^ Qi8-N(SXS&3]M]\C]f_JJ Ͷoe7 c~01gTc[ؼI\|_P{LK T wyQ@d O(sNɝ*7n> .D) T͡bmk H6 <;Co@ؠ 7;XL[&P4vR4aX#/ū7Koawh_Nd!!+ gU}|hgP?`Ծ&f!<\ 7<0a]Nʋ^SPHTd2󾵴-V/d{&(n# +&2lIL1276*̍͛{quic:׿{KE0ԯ dpDBT'6'7~}ߗ_?eg+[aC~Vے$ iG_Ed/pǫy05lS_,Smxh5RW.ߚ,t4YIu{{p hxULl9ߜZ'Pp<#TM] pc,հf U6PSS3t;7?{zVfqq6q10_fڿy.! ſY>xJ5K&T4}|Pf jvL"II:Ve6/V329ns ޛc @ki/|6R ڱ ̅*C~89^S-C2,'sfy=.`TyonLE9@kkC)T7?`V?c;W"ګVBKU:-ZHoCwIn]wlmYbU^P&LU7S Mj:1Zk7NvDd ^iMj(09;yӑ捹R5敦Iz`vBg SqSvs @@ lTIh[Y.R0M$d~ @ZǼ)v'"@@ 8[ RUܻC;!0}yN=0z'}p;1rSL9SWBR@@ hf 91pmmх6"L5ߺ 2{%;> GSh< 2-Ǐ h5 SM@@ V%fQ=PV^:$0o L1}m;Iu]+Iյ>h3KKE>Z2Qj SM@@ 2B>&[;fb.7oKa^Jg`>4,n;Ul?m+/=>׌@~r}Z*ڷBxO 90~>{@@ 5t wH=kxxO-w_W2v~6-}H/{P#$ی;UC5 Ȕ%0L@@ ;çJK{0֎6S23 tk r>mC{QV([&/f۫>\SSk[Yq܈sB0`@@  u,s! Ԑ>4 P }Rui/ČhYsNhQ"F#0p=mhxo&gwUT2)REcJ."w@@ 4KoFCDjߣ*#4S(r@`'?Y^X}W }4ZDk}ټ2-U$* ͔uX@@ |lm~ R3nV".:Л)uy'B@aKF"L%H+$#{76rx]+};|Sr K/$ɗcުx=]1L55F M =#K}k6pyK@p<*| Cwsګя*:.u$8ִ7OzL<{}~gTvziE<@@ $Hde f}{$ry[~[?viv*׮K\s^d]zYs, Rܱl̀ȸK kxǪ_DXu<, QC )*[-fMxMrBn`$+i5 S;pU*J>d mGDgssşcR3ڱ.72hzU )w;HIơ=lyq4)ʝ:9X'IǬoԨIS5V6soT`!@@ bA`)'kDO!I&O|#w\]z-xhz"jq<]r.5y|^Cd*LX -r\cw&i5"?@@ hYʝB=/Y%YVjƊ3Ev8款sH=3b_#b~XB|}-;@ TjJd) P=A:;Q LcZsikp De e+ۥtyxKn:kS#z:G2sCh{p8vDi:Q\m.<ΌkL8aXipE$@@ hfmo9{av7`HwޫLvyN )#r-9l K+}j`~+S'?{ga2i'=AQE"O@@ q_g@QdȒ3_#(:^}]y"B=GC<۞Hq@ul2%s5#%"qdrB?RkC@@ 8}ٰttXsbiu]}?M>)J;Ɣޱ)ö'C{6gG*<@@ `>rUT94d}ᑚN5`\KLA\8䴺jD~@@ `7}a7ْ!͹S_)r]ŀf+:,pj5V )^kZ0@@ yQ6e  Cllk;cBfx< )LL<~V")Vy²-o')/? U'fOV栅\%Hw>lYLϮ%.YB|k %Mpxxt 5={nÃKp!K#JX)Ⱥ ȔuX;6 ,Wv߮1* /r9νE 9דIY,qf&UZ+~çѳN<G:u@.Ds#wFVZ~?쀯8?<dI^J4^  RuͰ KkRuYusV[B @.FM'wprǠ`~4ei&FO|}:uV]x$t^FOׄ8˂et7:fVU$:ҴGO|C(@ ,.ֲ-͵ZVçe)%h.5eǻ(Ehko:1)>T4^{I% s:a:q[RLSוwJ)9zt6~Egk]s$"k@@+^Jm%a"lVsVG)%-D%wq7Hy/cО? ފ{bpfwr7-2 Sy{DC; KE2OrFT|0u*e2Tx6 1")4M8 ,]Ҟ45`P?O5n4߉%*F:/`l<ܷmti2*Wr8[2E'>7շw:i*T~[_\ڍ8+O= ANgCx(XAܙH0Go8L`>#kWgіx{Jr_ C@w&JI=o\&qr+`lq&3hR}_4:bN ;ۅxy!7Ҩr$%bB#_$>̔N};2.C޿%b.֗*7/бIW $/I8 toך 2$$}~ I>1jҔ 5ښ̽S_NI @W T),ft|?p]X EմG}i \.wç ſi”?Kz+ q;(cJţ'=`دy - U,=!~c@ |~k7q;6p!KD{eڶ#4972cGN^8/珼5{ x}$WA+0JB3q^ueIYd;` dh佛&>C9循a hfC+@eoC[tΜ2<:;iAw}<Ri="4&8q Bw'bxqq=|x)в܏4`P=;2}9Nڿp_IgԀrQ?;6GӦA7_2x~{DX6fCzi^ ` `Zx,A4 S&]Ty%؊-F~]O)I nV0%Uu=8+Ċ.)쟻Ս(X9v:cb 8^?iQ(Xa߸q:Whk+ECPm Khq:iG\yy_H54Yа%w3V? R)Cz$ЫԊOhͮ`O2!e!OFۗΛͨK(G'#枦gCTxYF" ګ#^1ܔ;u(>eIcL&  !h7  +.k nÝA=_E͖L>jr,myKw/29s_O݇_`Ep€~3:+mѹƁ'Y^_x 2/b"~#Dh@:rsh4m#.IztGf tG5\-`qFoHIu+QZ"AlzCZUE(_VyYY[t]s8"kj$u‘t$ˆ0:eUF#Y |p(GU SLݔ`O-3{ưGS !0+wt9>6 hie_5-8 S>pM=+ 2%{G}7-7)pL2LE}J4|#1[N9=Nc.y*͔ݫwf% |*RZID3;6wmoYᣋ,KPk5$;ȔMqg6T;eE~2\:́QtFrFGS&`"[&=tC@. QUJ%pEZ'T]8B'p)$/ @2ʺ!T!>wa\ۧNPQ \0n}-'K h ;_i]WZg$;hGvQ7͡G9d*쀦 9ɏe=YA_Ve,XkdI;o=c8v]ZgmF5a+#0+U{T "2USYͽyI ک3"1HNqА2aJtX;~?3yJ`ĽLI]s2qkW$mD}!@xI! jGƭ,ᄏ00/pݖc8ȑs>Eė-ăr(s]>Uq}eȓZ=Wm1qSF\E1ƐvTV\e*%Y ͔%0L@GWaѓ LHK[MH fL6r?gjLӿtmд-> 2r? '?nMCwcw#}Rs9.}=$e=][&`f!VKKcSsT ČP, @)[;X~wROW4lܪ4EOv] _{0[\A?`H*e3S'nr4݋4;܊cN ݡN 2g88u2DQvL!Iyfyir]ƹu% -"w %×LV T' /]ȕCOS^h=0NwQ+(%7njJS lSd iqVP\\yf:Y h>/q1Yh^؛"ѩ wbș*ȳЬ.Dźwa| f=b2Oк^׾1 5M{dR-Cqk6[/ o8Ə/=ґ =+?_g8v6. Б '͸t_6z` 6Hty..7ؽ ;g ,4u>,P5 0p8Z7"vШ߭uQl'8m-)eo=Խ#>Ɔ޲?cHѰ2)e6}}k{W.?_d1<@[k{oMH8*.+ۼ"ם)S˲]ͼVv@6~W[879jœ+q(@ZU5Tvx\ զ˱47Dz_75i/KGrD00j։) =6skR Z߃)%z뤔wFWÕN]!2f*ٻ|;Yùe/' lremr/3~kG@@ ZT$Uk[fnϩ b˧m0N#TkȊ49޴ŷ n" @@ =hs?SseZ6~Ԣ1VGhDS*ICWַ‹B -ݫIBW*TzA+=$X0AP$0>Kp@.HSL[0\dcB*ukwfQa(S@@ }I[Ӎ,.Ͼhk0xZ¬( S@U)B1XPg-=7М+>%0ngMX2e1";@@ $rE&l4h9.^#U٭ځP7v|E])[?B8@@ :RPoIu-=!Td!)5A'nuv\+KJlIMdCٵ!T&ʅ #B|He+p kyUq&nHD 2("@@ Z Mw\PXX+[N=]8/6N8i8sŹ=`\ * XBu[p<*Te($o K݉9 ZXk\50p8|6nWHZBZyqZ Vg:=Sm RU+`&܇).m)wP=(ݼM=/[aM-U*+jՋ)&[ Qo^8n3unkl8ʧ??y=:5 / #STwHdEܱCsNXxmqXRݐR-4Dz;\A1oYIP')fyޑ;cYZsaetRW齝le+9X19+PNu0TrCEμaX !_\ SE FNs,Mʅ5b Yvj~0]q;Ҵ,yĶO2xP[*aV+ӳInRL^B7[2u l.Q1b @q_[1.iAū(FsH qE#P`ZSo{ 6J*If|PVpBs ʥat6mUY&RoKF[eqլLVjj'vѲ2R$)P0\YB"PO i3>UI%W{@(+ _I!Oޚmh6djg 8ep.QCzӰ]g`r6Ӣ:ذኴۯ7ƗM ͙TYFyήT kR;oO(@ޑw\?>=m9{b$6*IEE͂LH #igegCwϵXJ:fhoޤ[w-_}xC?BؘT5rܡ(·:d[ y@_э}2!kyA+CcRHcG)}oX*G%FtSVٮ[we%B$`֎˰q8; :j.ӌ㘚~xю¨J,!@mފ,Y:>6>ts͎dJ{4#= ǜ31ή)~yvRQTȢU#m.zۗ,FYf|y5p]gCN#_qDhG8 6;%ǚ&R+a9EGIqN fO R.fv_aDzzf8hM=+]Z4tľ+T{gk"B0^.;ݑUY,dyE'^"֍.d[oݕcQ m ݪB_ xjMgDiM3N!Y%bF*hBx/7_ry= DDߥ{l,H6b]$p&_P7v#,,C&)k4;H2OfuTd?a{\nS?Ј3WLbAu Hk~Y_.\E?ܖd2ھ}) FfNXNϛ:h39flbC3Af zt_!`'2 |0wT` h|6HY|3,źŬweۈZv;MS_T>&L9a'f~Oim/O:sONТUre XW(*=:q{uɡ u[y5]rQkáG ;U1_Ux#U U<"0dx૎g Е37]9,\>nҵ4wJs(3=-^m<;=/ ѽc[9\6o/0Ho~hs{t8(lhTZF# Iڿq@P}~^JA[Er.x]'ymf w*v(} KPmQwwwҤ2-9mf\ zbo^6fȓ?<ܷOx-G ڔO?m9ztjk}w|s_q^ݩmf:(k.r19Pl\׹3"3i|bG%F܆ W_Tf<~D_譹z)7oT" xXn1>6&a}|X՛w# <">=܏ s%k_dߞ`7&xMVr!ۨ0.z yfeG̒ѝur0N(9u ݓey^cGWt;pNIftإ:ԾM:uʡ(&3ثpZ O#d] |T?͖l}G}s j[X[ۯܭ .$X^Zڪo(nJ5왙7}&w d&3L{7{w9s9A)3I ZC̪{]i 9X 0'  Vma޻tHP׎b6[CMV}GG un13>3.&^](b@IDAT׃:geg)/I3+$D|,.0' WbB1r1f׃I zf욨iZQZoVb$ ^UbX}{K}crtkRٍN'7#{$/r%RR{Y1cf7Wc还Iw~K{3?Q;Ϝu]7Ulxl|[W$Xݵ}p!߾v'0m |ZsSƏF\'"`]pv,-tL7s݄+ǐhϷAjC`G ->qFP ;C@O0ZSTui+&IG"46dAH}]:D}?h檷a;&װԪT z@{ihM떟`21SnEfϳrVs1 IJ:`[(>4T}gQUS5z s&T{ѤבNZϫ 뛘@-)ܶwʢK`پ>jᄚ AAr 5:KKa(IH%+q=B7¤:Xz mEfka Ccm̬*0}۪X>t"AEmxwl[Gx{soX/-d#9|q5dUlh^^YmK`W2ik~b*V5*eAz|Wc ˳h= ϸW݉P}zb}P+LѼ8`7MDMF׸ucˮѺ~P#oܱv?lWoB`$]{=TECΉ',AZaV[_gF-=54vggXeMMcީA'"4Q[tp;kyjE9`iGԧRo!Wd -vP(=ᯭk^*꬚8M/S| *cI|~wϱ!Px&8[lhXwm fٝit*s!1zY:sށz6 '`{抾|5=l{D7\uu A(#9 L~.Ft?e豴{>:;sG.0~O.{hXu_}H65_lShP u3R?Ք>ix1R@0#5C`bblϰq^ǾU=;.d[iҒR[)Fս9ˇ=U`9bMAT{V+myjoNp7nKX݇]?(T kFT3/ZC=aXvRs?Vol5o>x>q EID nz%wC9Lʞ{|ejVE;q?4hVK1q &X> I׌HT3/o¡rzM`w Dfv;dVI  I.LHsyŞ7_b]_.4E~'5h*c?%YT.83uhOL=oyӸ,m޵~9fw̨-'R@4c6괍8kUYNNZ1ԅ8\~gHYۮUY{א&bɲd]'{! Z$h8;;k8"63xtl6.3MjL OF(nXTga>t5QC6أ>V0J;^r˃`PY2S\2y@#PË=^ LTc{*>Crڭ&S%U| nLMߗy8iTh9]]=6P׀z{Y憜Dh;>e"^<Hf*yBR"HJz /~t?~uHZ8(^#pQGQ'T}87*(fCw5@z*i9Fh}Kfw||W&"@ ЭLZ.?DܕxgN`w>t>݅vcY"hu4Q/eHq+1Z`)ΌZ?y,ɉL%gH$IfF>p /&HvLۨէ hI.19lHCQNjpH%{L%gH$-2S&VRJw5UU{)nv8X]q+]x؄D_is8.ě>*#5l_YFЍϷ-G:QNrMswŞx,hYf;RD}>n/-ZqW`~tBj `*JыjTӇK)-IHG&`⒘$"L ?/=_|~Z2c(.5K0PI1A׫3 ?'%'bܞe=>pn1H]Нjb%w\t5rUJQSUjy`B{4xϼދc``Ҩzˏ@dzMIļϖi u k<2TVK{g'O`ӵL2aitG;取R)PMQd/W+ B%Tb4J> C&--VӧKWSWdJ_5y GU>Y[(=3Y0Q:6+P+!(=F .ߌ^yU3UG9): "F3%fm=LK9`C՗w6\9S^z$ $λגR%Y!EQote")Jh 3%:S/lug-XzZ׉cԎi!Zi*?y0H*!g߭hֲ5g3SQE!F|d )i4?>Z .[gJ{ֹN jk2i(Mhc4?ν UCawQ܆d|J}'+kGlЬvR'Vy(֔8W9oMIKo}yߒ6L{M& ܌xdu&mq=&ݼ J2S/-\^X˟1Л a['qKtP]fX-t98QGV~ a;;9/ #qMic7zGXt3$qEU3S23ǂ**l:H؆(އw_/.0f#@~kCԁ%&ڌ5^RWf8>| :I"YBdža@lVlq-:/T聝ͱyu\gdLӨZ/y1z܋'~2hՕJoG;T7{dwժJb~·tC@ԁʞs٘jb0n"{ܑuRcezYiWTveoᨾx)qXl)JIKyF9o']3 j+.Bφ2*ͽ<.Ǜv͵tiζv婩]F eU|Jo^N]GNOv`{^]6*Yb oo=R׷k7z7FAX\p~5A>?WNOLX{0!RO8@No+ ^s_:췮`좀(4$Ϡ3^,/*Q)Կ%N%WT(>6\C߽[א橠?b?\* 8FeDž[++5W:kc`.†r+=H]}^m^p3D hWJiHaF5T0O"$ټ}/%zhFVgj?elؕB:܀Ons$uУV73߿K?+4ad [OzT*ՠBF $3uNU Rhg3?@N&C"4%v/} ۬ýM?PژN^G*[odMǠ[j$B"좭^w}7y*]^IU i#{zA AW x?!m#̼6_`=-*UYQG9$**|^^kHiǚWw[V̦GiR=&&etnՀR~h';Suړ)p5>:4'o('oy* v5x;JkgFyJ7HحٯѱYJ}ߎK0PгShxýlVlXTie]BkT 4i(r'o"g%*e~9A9_P࡟x?Fғ6xZTzpH? Ow0etU$ rSdZPiT*0QѶ:nn>\}ߊrkAPb*zR f# %>S;SbDzd4THO%3Ŵ+<3SlKԷ_Iҏ[QqCp̱uo](oL5mۆ]׏D^fɢ8Vo&!%n9>F|6UUV*-3!V*TZDHxi50f8V.r "nE}reeuE?Pu;;G"LHf*LitQnH8/7Agb0Mw0$1\aO>r;y(Ө Wt<ѱXwWn|.;ZAJ/g5#Tv u֨yؿ˪fH#+gp"/Zfʌ LlPM:f]ilxw-ێcXBXǛpbtzvbA'\c~֕zŔ4 Upac}gƦNsuute__b"~~|uv: n tx7+k> R++KuY~%%I@t#|RXbV%Ȭ785xdئC˟r,,C'X >ė(gƊ\`\X8* =ѯׅbLݵNn5j.N$]lDZ\RQ:ї8L ?RFcKYd#k8R;Sj?)]as@$ɉ 9k6XKyX nd e؂?]Th>襫8'r 6Pv}P춖$vg) o}\ur5ihO(6vb>n=5Na^DwkVlvͣshV: p/ ’F*q'"Ҝ+oԬߓ.3Bwe D iTTcYـ& kHf*F~VS@ɾ/j>]&݉}f,m50ݰJ>;T}K*)^i^{Ϙ bH'~EIc, c+5W-b#tsX3A i@,PXLzgiJ7D?2[)`U-5:{1X u#=_#^ɻD 6Saԭ4,o^w`0K?CG˳YZ7_˫`ǫ5v9BRQ Ct6NHT{O33SK8Ko!/ouKZXKͭBx`ų2vL_SH8o_'_NU'*qF=_m#?N3k}GL{7}]$R2UO)V %4-r]EW:REoE#^z6k!7c{rbօEqan\7Q=WL8^kTq>#%tsT$C|Ѻ%Өk@Nfwu}Υ¯L% HTXhxNbՋ k|K|5ֽ_Mᘩ`̚d#"QuQ8*%obKj לo5G6J|<]M"jɔlw]2Vh77M-5} mƏ-52KA%jn{PwJ6ÞZ|qنsw{.ͧa{5{m=o6w0|G )ҕi-Z-3UWGГ_4khO۪6Tl}> јm4'ul2|e"ꕕbTH P Uh6]|m_ QL ṣ7:62 s($;^7hT];q񪛺; S1 .~dɰI]Ɩ?S؅\'Uó5~5CL6MC+~0< =8V 8Rn|L}NH_ue~xpd:ͤro(jzfgj7t:x]Ʈt|`t Q=#g5ާ#9!Y@!N,_lJfÎ穩.rZ,jHɋwc(Aъ㏲WXGcژ6 N"$)VL:Sa@2!kΤ-B{:d8AF0i NrD. 67.{2l,ޅ3P'OQPm:zwȲE+O6Mٝwx罴h#ܦ9Q܆dRB꫖-KvhtLy/ӟJZ.{'@g MRiRWTv r41mǴO 6"/IGp$pc  陕ePR"b͉vaRS]"i;CQ-T{ߓn{:fpذUxWiU|};g3oA{}Ghm1v!}:W%Wٛu57FرRtI*R?eeԷ*>Oa#K"4jӳ!cɝ돭^K:6:{uъRE]^!-5LT:eˤv3LqYυl7!a@SYY^b+OR8iZtl?yI Z%d[2VSɕ4JVyfX:!gƒe"sZל)H;zgJ]ڥg]#$܋E`h):ڷ eoPs=g2^ 3vݔ=`7tMQ;*iyJQi*,iT:*{;0TK\/] t \.3ʻث ~/5KAf2L3Uxv9fe!Zm}1%:}Fo*vD'XQgñDޡʖuX3K@.$3h>WY0%BjS)jbh-01'*v%ò X"e`/j%8j>DUuoMy3x?_WнEg_|eϴų̔jb9śx ?SFantkT=M±0~d<ơ(Efӱ57A{kh+HmtL jNP!"uOP ȭRczW\~Rk{݉vV>BXv'.|̔}ܟ\c=BNThp|vnçsl#Ū=HmZo> ^%kdb9ӽ}K 6u#&NPu'\l[^{O:hY{b]<;aòp.uwiHa l@# F$3xaV^8zvǣ9S@dW&+{' 3(,QX(Tt0l.-p㥪r~3S q}TTWk"6kX"\3ur*$`MSBl7_6$jvHMf R(n Tۿ빟Õ{mGoZ0G>;13dEEᄍG3%?etm4zYW*]dWY2#=66Vşl)|gzn1$I'@22S<UJ0>.ع4;TL* pc ` am鮢b/`nay[Mٚ7 $z08RLX8)G֨#Ƒ񬪬%ܑ`MMd0(@}u;@vvA~ ({))Cc**6'S:f;`H#73]. 3]n*RIʵ2ry+1͇% ܭӇ:[6eo''6E6P#3Vր뾨<`Fh\}۾jm,(rxfFdT fՆ?m>gj/k]0RŦx cq| r(ETo]OYUU2m| L+;^J'EMMc߻sn/ظN73sGVg;cchIxTU:Nq3<嘱T( 4i`6ů4Ċ%s*ɳwVD;Rٱ'|gRtҩӣ3==x?P`3$6e`0w@TESgpTrMYD0iafFT>NxI$Saj䆝!M.މёN0Ĝ|K 7Z2(fob:^i/K LEUo X=5xf`Nt~j e ;o3[bNOtUNv3:D!fl IbF6 OI =~9D:N}+x{4İ6,3S"fL *-}i233Sz/#D $3y o++i#I'K?_ynL;lѩ,ww}hRqeꀫc.Ag$\]X@hy{"ZΡC]hYoLm/Opaxiv/n#/{4 XͶjؗd-0 s0ܼKs@bUO \puթN9wÜvgJjs`K~p8**/?tpV]3Pqʝ+3S\7#:;<&kA) ۀ'gAδ3je3AB"jҪ)_[j'iBNG|^G 1>;o3avz9_)3 b+xm"1A-‰3E|3I,c(f oE0R7Y,d-IW2 Z}2` ΁!ȗM~~zάHm|T[i;or>7w"/1;@f)`fʔR.Y'paUB*e?3S-mQ ]; D C|1tb{wk'D .TnrGFwK5bgfIcimp[`Z~+"OܛV{iĜ/ȡ򘇻tqLC7d8IX8 x  q*.f*fDfB,=[󽵶*HB2VT厹rH*%1I@\)ΑYLTцi+3NL`;mRrgʎb`DG+[,Xh!V}&b=0Cq~$HE^*X>꾏qWui*Y <%މ;>Z':U'qwQƢjhm7B>RBw H!s~C2\7$IDATh+G aLGn?Svyw݂ =oP b!Q6j1&,OD@!~NZML)h>Qe4 ڔV>Rp$/6RB$#Sg^>`#Q@:D@" $`jGƛQ?7IA$B" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@"-JL۩>]`4B!M1e*K s'*D;1g%bܴ':nBmzm{-iH>:M]Jr&1 R Ň>E1Ve,z}kNzE+7bA˄YUꗤ*{ Dom8yHR6XyW[> ݘZ?5>Bڦ]:ٓ/FzH2<6 1giӉn%-C dy20U"08zأ:e{|Grִ"3oQdK2|9+;тz''w/pn})g_e_N1[Z C,^|4Lfj|X=ܔ&i(ߚT[+)gY+DyFۆ'qv`rn9GbO\d.i7V1LS GXq]TY6g-A EHBi ,ͫ4M3R o]0ɫ}<|zol|OB^%˃`B|E.:DٵD=|^ΟNϙA&ҁn-GBkiFQ$DZZ9;aWl`fEY$Sϴ\yȹ+dz|:'N,W1iԒgJxGe/&oyxFd[pG1l奫^u]#ѓtA3o_7y#U:EޯŌyׄ{+u5"-o1K2@L'YiimӻTUC8,6N"hrfY ub%Ku6w<3YU~WV 2lGul4>;en *۳󦊁xإ-Yj!K)eǡ>a{f l._2 7>'xS1C%[ŠLșvrbG<ĩy7`v30?8C\'{]=,?XbSN8B;ĸqv15q3]@Q,w͙6ۉO?KQ0*eVfnٚZ-۔bݐ;}ka=ݔ;k@dO'A ՚:;ssR'c47%C&`v.fĜz&CiwhЬ0&6, gh3:)K}>f}'D -YԹ'p5rN;gIЭ99n}ys1zs!{l(ͺ)ڿ}hgb;'n0uƉf"I*^ Gچ;aW͛ jD%- &U}7|y boAzD= =</TUY.}83ePn Ws1x ~FXm6"[z$э|F.K1IMQ.?ȓ`&E^xfhdԇ!L:{[|>#5;R@g~MFʠ)nD\魹wL_wT[|uWX4XNHq1!w1`,sU CگR+~x70|πXg~MF v8`")?.[uWTPG z%5(`,3uy);_CȕEsobu^ E 1z=E%ls~WH}ު-4n vϪvNPi'Ki`~זiScC $&hQdS],ˊoƟ>i}guk~,FZ`w@24.dTҴsx.7;3չ+1S>Lhj=nv}[;fya|ɒ&H6acsfקo-ȳwGMx1qƌg'{8=ڷ`дtF2sN0Ү`)o #wB՚om$F1ר`$EyM39`>bǃMj-9 m,R0XԟAeJT[4YO4aۖݰ x(/쬘ݬ=-vxݭ0>ee=K.K.aJt>;nCwf&sVʪ>d.fl|+0FN3Ǧ8s]OgGvbi wVa=$J^_rUY~ f{/V@~/f~(!Ojb7u э@GLC|i3̈ݨ,f-uS.ɀ0*WVem]QUD=Zԋtձ\*Ks6W%q0ZtEQ_lY:<⠶ݤ ܯ+Ǐtv(;çW!·+H 嗻|O3V}~&FxƤ\7bB̼u8Z|*F׉&!Y( #jn^kHc̀ 1ڌE/#`= +38;ve7,t??.l?u u'4j3Zm.5BkdED Q<~jG]ߥi`O4œ6P)}CƎR3?,җ{o·:!^)TGJA3Qϰ,ɝÎ:#X>Ȁ:t`^ c!sF-Ǵi;;-!f0[A_?bJ3߃y> -6}gzfWU?B59ms O!N?aCGAZԩbB4q u|oobM5Y_]?ꊏ6^"$3ΚŻu$Dw~sv]qzP1<yoԏ|ۗ; $Re. KKtKc;:|Q䝾̫ސ3mZWkW)ӲՕ~]luZE5 zQ+v:ZR^q`RaRTV0ׇߎ7Z_ר*'}ƚgl}wT9XѸ(d0RF \<z?/ %8H Z^&~Ys* #@=:?k]_=7mϐ{'$6 wa57w uO(-Цh9Ƚ b)Slgr+V۟TwVk;Y32qBZ1ܜʷl% %Ƀ@4$ء#  F't',s}ڱR7lN o[-gK'<`͙KXT\`7l<>ès.ˏys0ñ=8LMQ6oyX­v_{C o~0+}=;V*= O7mܬ&j/l>Tw;e$ bi{̝8+Er~ڔ/|x?H´!?aUqÀib Y\P_` ϒa/V;e  ظnC[Zfh3uP'~Sqb3Ϯ U`H3\%%ŪR }WVWӌx/۰@B%ɅĻ 9@X7ثʸ}hiOO> }c!.j98k&(͘ѱ>Z0zn~+nUofElUdЁ rŻK0,=hLx&<fl /ؿऀ b╰>oi3y;Y=ʴW+}aܙY.DY)>`'JK+ D p]%!IaV\1{_-'4&V?fh¤eRIY6?dA69H$zh)ߏYt=d *Czm Vl%-ثHA!)J,3Eʁ̍D@" H Mb3eё-MFlFbq{0Rw+F@pedPQjSTgD@"hT32̤ 9q 4Y5.,aPA^wS" y_%2~D@" h-4dj̘Eؿ%YzQK tPF|vҐm _F,HZML1^ع΍"W)Nqh$IǕ`'P(eDYh2f꒻+ z5޹ṡX;^D Pk޼y8:;'̇OY~i oToJb['#ѴX1WWoīa湝A qCƃ^XsgD7~@+@Цu fN0M5es`)wF?:d?wZC1ܕ>JNQԼ9Ӳ_o.R&\Zo@F}1fNvgYxm`ϧȭShSNHT2~δ)ߊo6VtW lXڏxחਧ|ժfvn*@] 3h=FE. ҧw@Cj_gQ,Õ+j68h?l6s=aWCasذYl;})ڲ~lGI">?}9eu N$8uH5OQu,ʨI7OzE#@ҘxrdwQ]B7¿ʆ)vioX\C9 ҝsyG/8JwFG7jj%9sOh7 HCGk-|n5vʵaH)Nd NOw n8j`?< ؗh;B~"mq Ip+SUUy Ui!?&Vi S~-,TiN#յS%2۸aΚs.JYL>!3-Il&E_0o*z(|abޭ}ms¨W ɗN֯sW3Y,5 iIfaK+8Άҽ!g:bg|8 Qu=/CskgTzNC>'/\/Mvw o:ʪkkQR*? Àj!'9(":RRlUk?8Å|ՆW {-K1s&gM@ģXL!F͙s%Ւs<]ﲥ*MFgItƶ_(WN^?/؈sW ʪE_c+Œ2/:k?^:@[GY)۫_vqV q'MPʳvGʠwxi0 <Ν/[#/?7lG+0Ҋ6>d췠 b+ٿ߸SCNLw%TM#jwp4LT5KEY7'k `̊g F쇍f?^)R}Զ]s[yjAA cUe즍DK[]i SֶA/vB<}BNދPyN4sWX?O4ˋp]WDf|cg5Ud?ǠY57wѻއ%ԫバ>G,mks>3S Վۀ33M`:;ZDUŅx6};82x+`v;]P|!9lC㙘3}2)V񷛲zyuc|Jѡ~@M#g4L'UI"vhvfJ@}-x ?6W[n(醢`p hࡡvi/) W@́V.z#8-ØJ{дo&HGzOhb,!AOcok5P o4 W@A|ɀ~Kw-@2xԟ{~ ys|>u9_;#>q-85/H;B|QC4x 0;g[क़Nx{Xgv-..{4o?԰qe &fFw<~睦xмV#6;;L+ T%-UP>DfK)S\5GúR"HaGqvk-Y'$WaWs ;0LFYK"m"zCS?rOȝXxw[NX7vcz?&ç4f\PYiUmNLiUEDt9#? 3|[yr^ͪ :kLfqZ. [{dSڧ î$i2Bͪǩ' l@=2y)l@Y~cMީA%q M`2ujsz,{i9Sg}i{a5xbǮoKۂ (r% Aؘ+q~.XsmAzjQUrN..(xFu.hˇ ,Z Produced by OmniGraffle 6.6.1 2016-10-06 18:01:04 +0000Canvas 1Layer 1Linux Bridge - Self-service NetworksNetwork Traffic Flow - East/West Scenario 1Compute Node 1InstanceLinux Bridgebrq(1)(3)(2)(4)(5)VNI 101Self-service network 1VNI 101, 192.168.1.0/24Overlay network10.0.1.0/24Compute Node 2InstanceLinux Bridgebrq(11)(9)(10)(8)(7)VNI 101(6) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowew2.graffle0000644000175000017500000001204200000000000031055 0ustar00coreycorey00000000000000]ksH< |ڭ%J/ [!0Cfd;SRNE){Z-[m7<9}9>]FGfag^9yK=דϽAfw˽]`gϼ{GCE~޾%6a}&_Ba[PMYTsj?OdIx^as>AG_GYP۲q MY#?e߇oYdaXh h>5 WyU+=4轷U}npɆ>LfSd{fz/8I΅DI8{IE|KH*L/Sb ۍYQUjYUGN&LvLr8M(2y_ݝ]UޟltFߪ/ef$c O&~.y-92މQ>n_>=(b>5A+;h7 ̶0GJ~dFl!IZ*s7xuLlzAԨdޥ7^/02ǟA=m1%Kuw$ip`fCZzz$gBt9H "ḮǤsp Gk&?]&A^ʏͧG-亙d៦%u{Y2[l("\T(IAl`D*%Vh4,ɪH1 *&8Ġ?ٱ]÷nʮrm 0LM*19I3|", ΁HI/%+dWI௓3aitrN>{AyA/DI y><5&_N+ EjA!Iߜ$ H?KG_&> z1AIxG&:L1/6uȉMa}aR5P>݆/3اw M9~5/NQ# H%c'D 1eLI@e$XµP5:De慉>4yƒXڰ>P9wô@7BӮS">Lr:y{ȗXD"!CM<(ykN1+Aad(HKδ='PqTkC!Ai" cE$~\$Cq_At7K=-5B +[THFL 6-|oR%5/%6-l3:$6Gz9[NI*_i) 6%Ip=昖Κszؠ>R)΁$\Z{j]eOB2($,| 0`Q GAQɃa8`al} 0 7 0>67-dqqg V0`, *,6c0R -|J\ *7w|/Mj8iҪ"6#9zQVj$Oi|@9Z.HxT2/G@Jrc{II^FCWԯݘa_QDC[oY-wj `!a Q| 5#xA@r&K9 0 DSDRb.B@͝Z%uwO28s̷+2̈́}~[2 ΄-L|#VUf΄i *!Z 羶g FP:4n&̈́݇bv{3aY/p6hdmr*Sʿ*twT>VX )KlTbbϱfM^Ɯf};XбcoaA% =&P̻f9#6GlkAl[HW{%mH1sEg Vt+:A4^P+;|+;B#$^!k+R $CH!d)VhYvzF`)D2B:t؝ "C،hڬlO:.6G > F+-o߭C}-&뫩Bƭrھ9?nUNstMΈӰNWi?7'wݴ U釛Kؗ=c崀^q2PF 4#IP߶2['5[oTT8q:tĹĹgyT=~lClCI4TXHuxsc!yҕi%ATVn/\IRp\Et"-HkiqI(8"݅U-MY1"&话aa.ä5k_q.aJrFIXl69"t҄ ?kəvOW%!NR :agT`)'jx r_eO??>>8TWwmp{D[+$YIh* WB)$ [L͏vUZB-!w 73 V!FRk*pHoHO,Hz.Tb5 3m^1TQzm0Q|VT-VrڤRK.k*3,t*6kkԱeܖ.ڢKW鸉إSB}аb swstrD{v);J.N*ی%c&邶j]! :u:wt1c:ttۭC':gm4uf#3Gf̴ xQ{XL>H"Ab)j%9iK8H*[F44 b$J nk94:hthyW\%5*PBȸ ~/NuD*dG#ZI.p;>bAR0dFڕqdwQI[\ĒA nSN,8.`@I z`ٸs6~?FpSIWjl0gc+RuOJdiNc#0 TE`z)\qY~.`iE"h @oE,( +Iw7;gΙ[wΤ`0M.M6zkCZ4펽\I;/ \i92%( [C?p`2mÍ1pPA9#8`p#PhmgYMQ4qgIo:txf롮Ymlu?}%% oM8U"" @YW#v\/.!~[kʹgralIc" Ĝ,8cL} Fu9X3bQm߉3,TG(tQhBɛ-ZVapN=yi5PEEq3(UT-u#.#;)NI[%d$}S&Qhmsr8t$HpI2 ߲㌻ Za85kĭ$9+e8Z[q9՜ Mhkpt %-7Ӊ2!#p)B#$W]9smJ qq|urCrg;By\QL*w5q;n/L{ɫ0~fyM"0kɑj飢0nɝĮԾ44Ϩ2  e" s6hY43 31XTJ!kgozT(AXla?"#?l5* WA_k7:kzIn4z+m ܅[lz }6GZ+γ$ϓWA -|( i~gxZ+*kxID0 fE=0`=5F?O?^$;Owbb ݯQ|~?.N_vs~|L~ǻhgO/77%(=Un* t [/?Zd+`s1ѰzF!ܷ3N1+AoH*P| v @nE:6a$Z5pC8i,/^~l,AMߏ_z' 9THZS#K֡9TüA1Ȧ2%E3F<^*vzoÕLMfyoU:߅1Q}*M'l-*O ao:]9t Q {0)k!,<2S o uGQpX[HB YМO׸#Ok6S 5 2 1 2@IDATx]|T֟F* ߳bP,HI6 է("$<=` 6DQBnr$d6əo;w̙3g#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0 g2{M~ǣmK_]%?Td# ܹ%+}L-p"E~?gĝ²#: 57+}iC෩njI~}ܛg=B+>i+egΉ&|C ccW a&]3}r澦E((!4:BGۤ&6շOC5bw@5MJM~Hͤ|Q4vƌvw_Nߴnzp)Kɓ%}s}m/07#ccea;R͑wOS癅(Jh1*ʝ=XyS=ٯ6"|O f!4)^єs jƌ_(7u1D%:Ϡ]=c d6R$X!1%Jj aPb)%2 HuB;hFXUW±?#R2뮻SY4#m%#R9$%-`jێx:4XSF``~Y]N~eTԿNK)6A|T~`>Ked{@KgsIuV5R~8N6`d.՛l?sB}i5=XH22߃3(KLs#ùH:ګ?R3 cD;L@c6\jrzΤ 0>Iv.yrYP$&W]` n\|Y? ,AhrF6hqci"h#6, vKORj)IT]5MNV[{WT70 p%P绥(7ᏪJtҗ;J>R6[6\a,d'BhVFJ^z1{Ĕ)U6w9hh^]6oiok@.øiZԔdE]UM9,Z~ {,|K˜,΁W[ۭAHVPam͓n 7H}\6s3'j*Ih ͺvޏ C̀To%)ֶ^X ᥤh4h_#2ӕqF AX3ҩ?zwﱖ7~[[>t.DYKM!tݏ#R(w)';S}H;C4k\Ǭ;`Ҥh]kT]aW#^\2%[t=Х+D\pG4ަ(/~Q_]]s}>i:iUJ틐y[8y %\_0OGgMr{gPBE:Cۇˡnm!=u2ikQWgᢹuI\9Hr3ByxPh_lR˚p[&j|AZ!C\MMSBjr2^}x L_edrh{!Ԥ\NVFy>8((3~).9td{7 eD#C],ַ\wbp{^ ulW (5ὲ%u04Ty7jf~P T ~Hَ&vG3,j$meA8;+JnG?v:~GiR6b)' 0q0!QqZ}Տ1j LԄ"L:{K9rc =mZd:ܗ}4雾 8kI?n ih6RG F.&p)>:CtQVoSקf/'uhy=ҠFǀ11R!α%fCe-7]||/#4j§Vs{is|^dY>/J$ơL6 pЌn"aVZxD:3iK8fF]YqGei+qCG`h94,.7mSj{eu ( H)J M ʃ?UK1dy<:{K^:6!m<7"̿vNIOʣas FQBgoOl]C4xc ]+@eohhZ'2ohB9 KB,`BJ.p͏o?Gz_S?ZOGc~ʅP-P&J)7v[JBRJv@'|vdC/RTM{S#DKB3-glT@EݣYάߋ,?% /"Ԟ Cϰ98\h֥z ̞F/rLUf`%EHm\6XCQT;+[EAyմ4CˍN> i9Yޏ"ktA"Ku#p--~071X&;ᇓu+=TM!FȜ|zU/MRжe@KlT4% B}uRV \!!a&?rXװY[˄o 83u fe 7 I L.8u|~d7=75W}3"P5BmHkㇵ[ J0ɇę5rBu3fd˥qcfr%hgGڲCkNCA/ BI$ a0-Q> Gƌ!-CdWW|a%Ӝcc{eKZx0xdՓqԴ R\f`}ི<&#, [(q !E* ZRQpO0, 6Q]W v>A`;GSFv_. "y&يYi+u)]=_5Z \m)3omD]| a\V&8jdє}UV&AV.u\Mm{ sp.:Ǯ)< Wؑ㡑74tFvh nUѰ0@ANr ]mD3+4Trt=ee'-Ï=|8P :ax/#hGb{4T8i /DJjbm_ϥƺip @Wy+|@^W:eNA0WcOưو խ}7 =$?u(uGN۬SUY7 8oEz|CUҮEaz{nn1B>?cY(Os{P'ESl2{u20^1#](R{quRO[1n߇ 늝!41)nPX ZY}|Rp2eM㼖K4Fw ܄>e4$ 4A3WKʭ~4QxJ:5n_2TjJj0> аb{ũ?&$S?6;=iRUOҶ;hMȫq+YvW>9}d.Vu18-!GoכW]Z2>]kމfE<ܬš{=4+b1Uj]_?MB]>_}f -A #e**:~UE+4@jBFpnSZ 9Bz:]PP^5pq vy0Dd-]P_M'~E>zY~e8埥s9Y˰&5 i&HӘ_y;R=>n+O}j )5E{xT`( oDl3bv萪642g#FtM-j Kvҧ. ,G`"ԶW.$HkWpSA~$:~?Yˡ]y!msNqO l5/'Jۦw?A9Hgtߕ]!a%G]N[X˼~y\!|=.I%UC X-]6 @Qxp8S;w wQY C"m_aіGdmhTg_2g!@;ܮ`XX!BJ:d8IәC8g l0TcQ$?keu|,&4Y3ALt{4f B3tdX{'_pzB*kS v}r ')Gk9yr@ֲ&#b>+㙖j{O)8?4t4sѡ6L ?)“J0NiP 390֏^/-ޞhhHmKDG͐ʛ}?xPudZtWh+ o-CLӪ/m|A+v v8%/qWGd;PcnIn ~6OrXH~'A``XTr'seN9QƃShs&|7—}ګ<)/v+!Mn]i7k&s9Y޹uo8|O'mw|[Us9$J57v[աA,޲[c,zNHwaqp:Xzwr>WſM?;) ]? AEG#-VRys&y_J̞j 5R2. sRS}9)_X{Nf ָiZ[]n2dSw.P}ALMxsmH_qNRfwra 2B2& D06]_{XH=5 | SFf_"g͈AoZML]'8ЌjIWWB!1ǘ|!y?j}UkjAs'y߯VUcyWA+5a^"|~r]C /`6K{W }0Dt|N%յ*tv5Nzi7de<[0:/KZ'| 6wǠ]i̼)]Վw|'r vg  Tʑ؂]fH%GXWMh#zjԘޅ\Q2 NE/u҅`fp"΋nϸlR[)`):?;}sY i~wc5(1~9TC2{ޚ|WwJYT= B ]ʹs&Nywvv P#/5,hPT `6]BL}?5%Q6!:v)7r5 UAN;\W(߰ C܏(l{B}Gz~?'ۿ.!}p s&y߰5yr[@n*5W#<2OTP\Mc3giPvIմ %2 }qK"ސM/wHH9@Z|JU.!6-K UrCj`| %sJTD`4!#%5Ol+nB\ {R^aD`k3CheJ4AcH<#Ak׀Zuu]Ԧ:okbYB7%|/0;hB+@G<P;_[i)/@#vap#-ð<=- o E' O?t_a|]{ %Gx7BͩiO 4'u6ҹ\Sw GOغL|iUP>e p q Ž;[~zL MiJXzIz134+i6Z*~F”y90]e W2sNt=92CgiݎWQP"L4eVB\:޻wϺ:O_uNAx6$!D=G٨Q\XD{ewڑ6|EQQRow4\FeuߎOW% gJ 4 (-]grzy.. 6?īw]zg9%%4sU2IXo*7y:gpM̾PEKn99o!o^ Rt .hP.>24*fLE[D4N'<>&h|0d$e+!w}&iGD/IFgfa`&6߭Ȋ=hg:7apxI)g%Ҽ/N|>&amnV|;5.REgϚb}t )VA:-r̮7; CoP9~+m _d\_@4uiTt(u(8 M5Y*XKgG m{O]saT i--lhCG?#u6ROǬKyuiLU⹓2" (HSQndHhS.|ks_Ytj5vƌfj25P0Ā܏׈.rAa<֢Qt%; ;IS >br5DOK8XjJ`]eW @|]+yk\a=!ᝂni7IPc·~1灌/h:n| ԚKsfBD.c?tg0]3^;ҷ:L++{ om~v=2h4eZ潒ު`|ڕ<^YIP~u`MMtyl7]`u(}l'M7 1)*J3:V5ާP.)ƚR4kgE=j@wV@<;Yn%1M.|7fG} m v_ p[` 4LM֎@gJ(1`>yK-m\EK[BòywԖ,@\n-#FtX^Jm1f +&mN{P\ mN)Ǡ ^EtPUeeǜv6roq^;angMiMe,b)iaDIZ ,R&JeR>5Z8!'vY[<8o/(30:6Gi}Τ0X:;.OBwh@ IFagChC$q]ciSf0An vFSvx [c{EF[#FDCFKCR0:aq-:ҹPvlaҌzO)7hA>=fLµ4.Nm'iV ["Pg?Kltv3P ?wJ_bhCU?-:Vktb+'.)mRcƴW :\tf&iE5! l6|Zq(:hE b V:J:u/i?67xq9t_,݅3IE=zOhzXBxerU8@EwǢ9,Ēćf90Mor_3`..w; -j[G`P qHN^|*<ߧzYvYT?ח=ކ%X:|P) j $f >_& {IBCnر~#D[vǒX5:_9PqPI]!=vq )qP8ʜz{ |wMFGY ?f{k ­-CUE5n`(Tr^th_t,CUhǓ^T+ =;o;e*9=i}RO _[+up8H̄`搜I|1M1%Z"vA] pny S68‡އ;5 3Bn\ e?" !ofA?̄@Pyg܂r(7".) C7dMrPR / rP3ϻ`^L8FS6˚oWյ+?2`^`ŢHJ\3WׯeZ-իICm?tCc{ !ujƉT ?Nb*Q)N櫀[Ft-q`& ޅ5 bFqC(h'&Z퇫u*B!û[KݳvWxr8=6~08ayilqLߢoW߂I:UR.ƒ0~7CIX+DG%P)ӕ>Vp, Ա?5IZO;ٖIcwk=bi &ԅ%.يs]`' nDnO4rӋ)IjUI@kf٦HoӍT7Aqk¯.Zoze9m\4J~۝rIWJҸqѵ˚_&l ث&& `]~`,8|˺9wᠴq23P-aj#Cb-I`Wo2LMZT·S&l0Uϗ)kux4~wkhkP [*2<{ ]|k Y˳4b_UthW}ɫi5q::zP^ "Y, zGJN{Ɗʱ.~4QU5tސ,mMQ8'#cCUߴ.R$ ?O-.Zs/ݻWuLQP+3#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#4Xd}p>"sJ?4EZ=T]hH`F`A@ W*a׹49YWD-u&|O'R-&0!l@6[L`F)!tQ) )cz>-ES␱DNTo09CJ9'%L`F%#L9,0/TJ]y`r21RR3)R}lѱ¡F`A -),pYޙ]lpqn֘5ϴF`B`'?gYp8*c{V_s:n1rģ|ϸ Kb#0=CPX69 F`FhjJ N;; ?7}+Nl2@=!PzⓓaDmIu"ؚ"f(/&xJ" (ϧ4Ogs^{vNXNs˙}#}舸8>GdFkRe6L6E@(u+֋giu#K{5%w56Tt;}ӺELC;cF4F֤D[; ?9/>xn3oz(kؽpEkt30 Rd1N A9CӴ'(`eeN>Sf&%˗n0!L Jj=LSBrGjfB B'r3İt;际Bq{(7>! Ձ}_x>ws{JDc4f{ f a#"?inE7|a*jRn* A/'E[4?q2KRv|9Ħ]: \G|[Լs?q{FhrO,צ+C(,|t' uRcމp7~^ uQeZ俆!C2=Wjb޷2?͛5SA RAw=IN,DCvR  ۘ0[@IDATBpb frozPXavo0't[Ӥ2W!KCz߻;,OϋThS٣? 7?J?z[ S!y#pz-_vF6N<7y0>5'Kq{0{ HMgFh|&if p6tP;سxy~vhoЗ,1Ms&ד!x`:!nA=ÛXL)F$y<_O!|ޡq9 QA_{ !ev?nTw%ް'!x˞ߜϼ3DuA?Pnji^`5 f*Ae QDDow2ްL::mz}XƩ['|ߔ|r;ixP&?0@Cv@G@#IM.-9E > Z|pwXxPqZ;~9YwU K!t/e{/ٶ4#ݡxIK%kЬ w<_/y?SBg&G+OCy5y}H\ҕgqt=kHP ҏ2˃F"F[M'c:6@%+u(syׄZ0t0>qoB]A-x!p?M8;ܿ6ϰ9#ӇyشT!qQ4huz~\=麾J7N)&B iш|bvyWց?C$BXvԒ(dUȇRAlϗ}AORIrk~`F )H^4Jw܄?UpBfp;˄-?ѵ_ _ 2egJ f.jv*P #r)m tABes}_E TMy2dXW.T+ g,ORںrhYdve7t"]h ¹g|0MruicA }k4ޕ+}AP\8e^ⰲD "g!AB3PB~Qv+A(khy Fs)6n>BGJ.5)jԣ9;}'2! JCO'Br3.݃&:?o ?3M֤4n9(iX( g.[i[L0u:7kd܊1`5A `9Hn,,G2Ҹ .lo3 +d7"TgLPby:o?s>l{+ށbWx 2oK7; 'S#T" n#ؓt͚~JJO \/cj옺X h'ai5 }Ugש8@<& 6$ J v}/^ u7d}pb1[Puƅ3c||0֤4BmYʙ8ɝp4.~w n_8( P" kF@!pudĕlʀElE.u1ZP('2LW,e Sɚ-[^(R2[^[;]-݇oPP7kI3L2#;);>Z7/o< ?t})4Ekk@F4_%{O349e:Bƿ](S 4af;}͝0a$C{TūYBc0|eƍP )jS2kH1Fɓ۫qr;pGMƦ有ڧ8>՛}/؇r^lO#zB4- f{,Ǚ+4rs%ZH2}]]gg}eF8ƽI|YY8O2^^"Y+8w-ı5<:GeX&ä )2cF{WSXsJ#0#P XHX`F`SXsJM)Iy;4MM35"%l2E&y-1#C6¸۵!]Չ9D}XWj%QLXYAQnM:}fw|w33 Aį! +P 90pf.}_±toɩ>j 7 Z8f`_Op7c~/?xxiH@A1,H-υe(uegO?q\cp"7'q5w{>B G|zohc@Ԧ46q@ )ddōk\­K֢es:V`TR,%u1nxďyjFya]r5(505睬Jt:3GJ"~d^_"K|sv@5'L:w=ϝM x,{5ú)A]u$f-Z dujKK&~;Fm?19v_q@< )w?4U'l3eR۟}U'\jS.l/\1(|&Ѥ6xMikStmh7!ŚM{ΥpJD5θ& 6:kI۬ʩEI">1; 7o8l Sf~K!dqP[HS#[$'*,ҒOmI+$~ilVG_*5WO$op.&?.8>xk [j&]w:5)q܃$mЀ7o#6m}R>⒀X #,GGR钝֠K~f8FC *+քlolʘx}{oˎ"o[AOh+"iKJ$>cYhψ?6m/7>ZQy9xؼ?#KtK*oĬ'1{˿?{ jK~ec5)hGBWBL@UkѾMKbE(ekwTºQ ;PgݧR3Eů뷉[.88C"{=Q^˟Nyx֞K2F@< ),5$:R]h*qLneߋ1 _֋w?^߲CtF;AbmVŏkEbG,v 8YPOW( {p4JxyA8⊳Of\|:/N+|Wl,ʜ:* ş UË [Z/<:ԫ)b%*[1ie~ <ѪE˟N-ISY{NmE.koj~Z~w[mOb>)K&/WP,N=_`@( _CU41EJre[)dždxk VA#/eԞQӖ:RJoy~`0@@V‚ef:mv͒74qpvViL\:ՙtC7eiP>\!>7GG+uh_|3]غs.  ڵ5)5CsO~[PhE/;BS= faR ko_sXD"̗_PGj!~ZAȞam߽ [D&מWm^i~ub.:Պw+E$8u[2ЈJTj_ֈrYiě&n@u/Zh>\cun`VwI}C|].ѦU3<\r>ФM= $XYk#@Kt߯ӚmvlBЯSVUĹ=$85xZPQ(u<;vmZsNXӕ9҆2OZȓ Y y:{ם+Uȫ!~[5OqdxKڊJc:Yc磑#oB ]/'iP5o\14U,"22쏌qig&A`4th\3ѩMjX&o;ڱcUc\:NG=h'NeU,֑{v@6reCprpyQ*9.۶u ˩a= H%BP  >X<Um悯#p 'zSzBTd ȲGN7:m%~߰U w.BXKKFb -3} [В;b1{" f;иcf=Q6ozty$[ƭ~["x[Cx5g;vØ#!49h.͒,#],skb1}o3g!?skȥׄxN8pP̤ ocӯ}-. ;~΄|&F?F|`Hv( 5uONjzYTю'xw'>=>~k mIW%;J@R*]?o㑧y SHlGb,e)~K7p3".A~yї-a=<)Dub\aßݼsL~'ѫ**ZAvVmvpœ7)ʥz>$́t#0oly}ֳΫGٜhBRiUhkrM\qnٳ0t.ĖfI2&/O=:nӲ402x!߁rrd(=zlG?P@t&&ع q8'͜/9d6Ze8svQm@C]ImU|)#xiu:qym )N#BS@!k*FN)tz=;Fnp uR(7\ȟwb4mO^sWuhP"q7@c@ (ry` ]rl8eHJHR 68#T )B/F@"[ǎ`.e..sF`F3XHavF`F )\FB2f:ǧ@`:B:2:;޳cF@ uNh@lC|uP}NE޼cu?#u>N݈~oSf` ֤[0?@!@ۀO8ؾ{b;:5xŠ[}qݷ8ׄ#0Aڠq&)&AHlpkm:vXߴ-otP?+G6d8#PR3}F#Cr GֱO\)-JOJn՜p`!BJ،@G;|ٸ$acB_'$ܑ }˟~^ GF&lMP8@B+<7_F]>_7>FޫcE؟``!%j8 #zP+y<2qSaB?;>=/{n#vmv{F` )Ն#0MIQ021;F`b&@i0@ڵn)>o?~`FR]8<#T@6-[e80@eR:`F`l‡Jg;Wݫs=եXgys}4X;e_x@&/itOk7{{$bRzSjo6t=;Y#~u`݆mĎb׾QR_5m±} vs)1oM&+PEa8 >j_ľBi)t$$aWLK$sp"֒ydqV~GDwH`IbL$* _$Q3 m^#MvB`UTT" pME)⠖)]nHX mI2DI )pE+j؜WlUg* 6%hfȉú[[7gWP~z;=КGS b??!Ar - 鶄$U=unZqYJgnFA Ф{f%۟ \-'vu^h$lNfZDD>~M8lZ2Trz3Qzy$(=M Q_F.xϟqМ)hT +_P>#!F#hcݫLppԿ .^#͉ `WV~ #=Q:SCWб=LQv>M>0|1;,iq>ͬ_EzCU,Gd k n3TYJ s3hh`PS!'7 } '4RPBR[WWFcР $h宧m1Ch5^T:P=1홞IowҊMDEvhe; <:AְqNOX@x֛ krQÎ<9Nll$~4~iAt) s!$c:+2R0?lP</+x<>A+S<;d[+OinS1,ݾp_v #)齮N5l=d- 9Npkt&g,cM9 DahV*N^KI;~[tu hHIaӟ* @/!𦗨ICTlR #xB (8xe+ySHYJNU>ԬϹNFP8/O(ʀOtj _ZIeb8zx;꒪ URGR0Se~8ІȚ1nhmD٧274zu{0*YGZQmHs.2rɔ颭k>=ž BGuAuHLἢS!> CQaĈz 6q2Aw"#H7蒝6'Y#R,^4⺀iFQP!10e{2 hU< |001IQeb仪ᥢD!Y+і*_4lX}z!v=ᬜA^T2)TW}GxsL?a~Qg*b\# hڣHRā@ )@|ѣqc%hzU4[M$Yg&x ޣl{c8+5 64LvOwULaVHA#9x9$ sIS/F*҇iBY $ӄ8|XOnS>_c S Y󓕝OI.%ǂzG!_VH͍7tYw'c3n, GcDc/5:B*n ]V-);lR[~/@V )$tGю|8(?lldb(^2$\c͛"SR , d1D/Ѧym ODpZOzR4m%N!`{m bUdY+cآzܖ=O'ri\d+rs4#?)%PB6} 0~&z\zO3~uMʋ_C_閮EM)pZ);Tkh-l8+}kmU@EX9B[ݓ6@1*qh`f3 *f3rwvUD_!Oы7[̿Lt':~u3Y+tT#96hsZ[="OrZyylv5Пtd}4dY|:H*4DBt3utndЉ!q8Fj vc 5a D]tEͷӟ7y.{jm.g-z=?J7Mљ0KO^H&W4񓉅ȎȱiShҗة8ϩ42=uVEc @pYvzy=c<'4&d'/q3Aw&mA2Z 1ry%z=o`6pcɑȚpKa_/Fkb,t~jz-Y|O8d΂ˬfznO2i6|%ɺ0lSUA^Yܢ/w 6殛N2yqR1_]9 ~<9h~J 0VܖO#\q^qΦ 4ڳe5fnӗh5#o^ΞS& vm&7ucr^dHUe|$)&33k)CBhn7#]Q<ڶfL2btQ؞lOHY`Zv9KOiYa2qa4[Fc _ N (ECLt '7{"B[;,4-1vqX ג{oQ1Q_mjR=G`XPQoIp6Jr t&͎Ԇexm߱Ȳ*#MtQ#7o6b'7-cyCZC%/m^ҖR4B?W wXAn#Hf(۲1l;M5ԍ\ON\(OP%0WH9vmb*7ڐvH 2eLuU]lZ@_Y}Xh/n?wNz~oI# i%*2GwIS^nNT,4扥m=O>}~U|pF6)IAև| 徇1yܤn&";+353NО|_?A3)T@G -bN.ΛPB3FpN2HBDic=^h(0Vp*uX*b (D`-/:yΙv7fmi(4 ti!O*J@ s-i  4VoV[GhҢÝll8n'0˛L7麎)<%ƥ6\4qX9l2%َ̚ f󆵌wUʳuM{j4wtj&;gʞX ʫOt_ƭ Rbxc@Lb #X=pV )w0Ձ.6`EWN)"0& kg`zhMj/'36 dzI9:OӼ6byL+dHGek}-qF؀00Uh|AٴzG}qXk0d>=&4e߾=¤HS-6*=/hSl-)v:vSe@R$@Y4!xv 3}\vW a^#` ˯D'xǎ0/#`5jyP 0=`pkE+2=uV5#c6+A0/C8x#vH*s"P }yE|s$s`v’ħWA B hTSU%YO_d#ُ }]MJ[cb`Oy5P*Cca!n1żOoxY!2+2~uV5tO^7I];iec5#' eѦZ߳QӓYVl9@\:ez.і)m"vlx3 -B{*0k*QI9c*5tm:B/!4)qp#k<ġBv!yS-v"+\:{P<%U˰c*<,,? ],ӾεL]}ճ!Cd䝐G;*& ʣJG!GZ"Uh~S= ag|_ҎswXr}"⥷ԲڤorpAv?ocuWDc|WSձ2JAW)b=@ )5`*oZV!01'gf-":t=јD=rI v \?kr[ Kwz"5*m|)u|~E Ș6(?2_vZY[;wTpjh[c[I1NJ$|$&X1.IӒm @Hf'BJ&IX}+}_H>;N/}Z2/KdQ<醅| )xz*$N'Ag*tVϾ3 =ܒ^ރt_vܧ cɮzZ?+tTI=P_\N?6 ]/\s lMa'svkEX t8X(LiBKtmjt'Ca =[~WJq0|ţ} uۿKK0G= )Z߷I7utijdtA`{`<ϵzu,46r2+\}ȐDLJWyt:*sy5MFYⰪ} ߻7G{ICF)Gv:+ -TZYG&w{S 1E;!%hTP [(wf֢@{ah\ot:s )(gA^Opf&li4 E:; -3 )ȪYA=0.tl +d6 n̻m!v~{ZRfwYP*.X56Gn u zf8/5MoGۏ @jze]tTk܌s6*-b!uyDŭvGPMT{i ,0UA^Jp-N IDH 7 )) hY x<NJB ;aNSfbCH\>V@2;(H r1|ړ XVxb_mm=W4d/`>?W;do6˭A {1Z0"d5c|14 <<ͯOO5#CCߡOo}'!% U:NhIq%2z-2Wȸ+ Xb68K+UḎ7Dt/fZؠ`% *b=-J bm7g%1mS-f؃>jb4tۿ+l506*13X TzŽ_SypX౯H6[2= )|QzoWnzfoqjI¨ A6rg\=7PgyW|?4#f:j#LA%7QQ?E2~p0Pחq|0!ȜW %}3ߛ c]禂 @o_6\LB2&<]p!qЊ yg/6mB>iB< `wYѶ&H'~dOWp 8B8ić :ewR3IS#D#i˺u꿹ݼ>R?_]WQ8=fLPD4+hP̷?! hc\@IDAT>bho,_L[k'S~}׷~`O'3*d zR^tL}k^. ^"\`HFi< <҈Jm F̱C kq+݈3-q) 7ji0je\ t24(h69l8s[׼_cg2Z] x_ s3-6@%4#Is~Y,$U)$:28#}P^¦U Tz)XX6AaE (O},O!v<*qq^C8poW?]ѠQJxu= )Q2ɳOt'?7nr16*JzDnp<=Fm"$+ 4>Q0wdT cȨ4%`Á)hPW_|X\_'!ߙ6X ﲘ`>*s\M@2 ySGf'JpuFP;l"|H?A'Svoy/Q:I+РXN\SР=]B~_v''-[<ĠQ!ЛI BT,)o?+3/\X>o~biԱC9SF4 htрDF[4( 0W UtC49SWQ/ bdC{e/Xf'&qD% |^F郓 Z+NWYx Tm֪ HަKOu'S?|6=ug[̶0 18Ί6>7xhn~㝃ZFe`!f{*HM *:*+7ݼ+]rgǍi. HN$ Xo=u<<)=_F0o::Z7!ߙ^Q#۶οb,]M9vd@[~4U}D(l붿tTUO%TV/x7c/o^FbV2(rG/^YMHP`/yT0Ճa)iϽQ#M-9y4*q״a&`}6zu5Gp׶9m ڦ zGarD9 ^|kjF0ޙW`s:r"pQ>w}{ع!?%SaEӜ2/ Xi3ng+팚+:KBTaԒȎ,p`}|31nzW?L*fl8Ж@ B 50ધ&Go@v|2V##^X BYȳ㊧J"|<4vޓ iܼh̜6GPo=6tѭ!c`8h97@ۇQHlŨ_VN |> M a!o"d *lMMA =kQ<%^Mczͨ_8ncVCuLiw )sAŅ0YETSjvc'vƟM纈Gi {A,qK^`K.G V@Hj>#RP  gW T<l @("mSG=g('/w=sNSzx/L)pvnI$ӃԾV{!5Pvޱr'j_1juA*h%G fƐ񊞅YAE_E E#!% qF G *8=boRAI]QD":;ZkK?\{7[?->Tsc>Mo b1c/5|гPQ+UCu' (vI6 (=˶̟{qV'^z˶礴 5S49#fAGB v+9,i~Ev<;hB}d#y~g7_;}"r w o]C4`4~Cddki;S{ :rwHړG}߃d҃p B @6)Bc~L9JbPZY_CKjoMugHJegPRU(}h!Rp#:&SFPb7"OSvSD2TǫK1Jb@Ut B K%T6+J!]c z%gҒn%Ć.R]AP( @[[!P( ] ]AP( @[[!P( ] ]AP(!`me6}RU @/#/g7 h8tӤ?hEq=g_VٍZUS(@`؋F!=4zWw:D ^&OItjJ}e-;?"?s#4)Y\5B@!̶<1V:OOLp1hd2;iS%`}} ~{oԏE@iR*g B@!Bug ߾O\~t(:V=J^(J 3qwK^_ xk{ KfІߑ#wQ4|g_SS7xZ@S8^+ovC A@ )(EB@!E=:|?Fg뚵Յ>\%y0]B?z{vx{V@֭]ǠW *2/ƣ^}~귟ٶvN<3??D!EtNw>JOF0ȠQ=#Bx⦰vр~361JmBJ0Eiwf\H0*(@ZX;~$~:RW0IZS'H{ط[m[K_ TXrլn5X ~C7Qe#%<:N |#Z|!db;BE-@:pB:ɳl|4-@ k.%l.Q] Ãk7]1 ta&VFa8pܙx8r TiFr>jw~8x@?B@"Q?iw[~/%I@Yv!{rĆo<4\|]PZ Кy R`C' Fhijq1TU@4hP (- !i-Ĭ6_80FW'{o8(/Id_T9tqʳTQp餙;֯|5LPи?s7pH?ʪQjg/>kV=kN,Z@T%o)РH- ـFE.>CQUU̝l"L e;J5oJpi*R@)$cRUF@Jrmߺ_puE%iJ/yK+ x7 uwܳW> t~B{j_>Q (p!@v U|]GMg\=NhxqL]CT@J& И\"PmI~3.ВB\ q"/>2|"WLM@/VgDTPĉE>< (yFB@͉T`)؂@X8ÇV 5^,gmgYa!l"=i: djcg4`ʨHmPLJXnjkhEX|X/zߛs^ЭψY v_1(ɩh)`"6qQz:pl18> R@K2= )lۿ2uPd3^11.>M|oz{(2Y]9_hW1iSP$BZ)@!W@v#I!_ܒXq!Z=a%MHO~+s5M+ezg$#8]xյs8v>U@(~=J"@4pME'(DcaEjVJ-~Cq!¯-fZ3EKo.W!ХͽJA{Y 8@RH]ЩHHY ާ@@Z)@-Gh4KEkw5]S!7!xxG8:ljI #|*'{CF@}MJZ7=˖uFBaEK?hWX{9s{ bWzI.|d1X i~:TcZ.1^T(:Eu(:( W~UOd<|w˵$Sj{N* x&'G)*§qn; :g~yU?@{w7즃;}ď`~*H Fc/++6'Ŷ z+29ީ- +?hT th?vѱ2l^CzuUru]{oG>o:׹SC VIe E@)z Y`C-K=rߎk\䈚I-^!gC`+`,f^JܩA4&:㎨p.N{:t =z8jK#~̜8>itDivkjSQD}݌f؋G?Q(<rq'~{1#BvX1g ?N6kw/=̜ qn'8KJᓕqa^oWΧiJg9k0bxwԐ]@^X?U74g(9bt}Gl6%s&ƛއB⌳B4=zhxBqG ؿnBa!zc6ewу\ENZi(i XL#w7'*zf~E?b=Hd2 o!wΟ1xjC Azmp1j \_tX9ו_]qs< %0 RoyEg[2/2]*@@BZ;N @KOҺ,̥EDҵ4+QN|F.Ι0n5(=Ey.ųEch~WiWWn JY8D7\:Mqpv;ĽZ7'AkƂɧ>SYUM ܼ(=t"Os-CuE#N5~^ik@ "QH|l94Ώ]4Bhhƈ|tm)yi@Q. e=^sXKBx9gpggYd9 :7) l  @;z/ h a?Uhɂ,\; XSpMy::a&XP:EcimOIJFQF='*#dl.Y2mO=q iȺ zӑSb,B~l?(~7MG;`#ml@pOu 0IvCe3ua,O'<^A+kTQC!/%͚į~2*?[仚 vh<8Ni (eF,c#ɻ.frL;^W%&M~.b 3K~}- P~WXMme^RXQWѣO-ޟφ3+|Xö~KL #wyQ}`Z3頋fM.~o+ !:./,et&BXɜItm_7ͳomA)͜8R|ɗj ozKޫ>~,c]J_r/2d@)+Ex;?Z[s:aԦ;W'1[J"?h=4 ;X2#;'eVK >Pس@P{{xɥaNJ˔<>G#,x B3ߙ 徇1yܤo ڬ;VVEӁ:oO]=_؆Ah_epۯ\/QWzNS (bH:S3O|Qz/Jyɯ-hDM777oƯj솪{ݤMC!Z@Dȷf-i T (mWh@3rad^ˠ(we耢VBՕH@bogၖ#h۳+}/7;{x+1 к'^[.s;_ CYJGJr:ZqLg).H_{6S )U'@ 0 P!6:P>!j*3-66Qh3mn? [!P(t@g[:)$24Om5Ц0UO"D[0\"arMǛjm3wc"X!s`Y>VO1_mǸd]e\S4+z Vf9 6uŸ]鋟Ů৫jx ]sARڷRJV])O&%S8TXa37.ď=;r 9(6IJ?Wj_gxɰ A@ (\Zt>tdR (EUЦϞM )=JK!PtP(06ʟcc( Poвw=RKMŜ**$U< B x1| hcI%mru$mw#ohF?yY*q&m.2f( TDꋴ"JO=jjd7{|~n5SA+VfWJZvK0 (nh<~{k[^PWzC )Ms=|쥺FҠX'h% R3c#$WW}Hy.']:o]{X,} 8X R,M9AW?Z&OPeВC8>NF l^o+J h°S˃];_?sEh hzsre %8 "" hwki7ӊ {薏++EB! z⍵1k#gv  &2 uSh2 7%I(LdMfE, 3鲹SQh ^D2޿lSMc:mK,~tЛk CS ҂Im[X8)ݥxߟhfb- kbXavpF2z-5>mE} YK7\>.:/'>q#%e6{qG>")؟8x^gu6oJY" <|eŇT;N7L *}Lf]YI:QAk*ƤVtN*_@H~G%=ς,5Ө,a!DaC=UP9mz9n@94tOhHuiB6apUu?E_s t\:oؔtغN+67AE]}]s׈Sh{ yW^:|92 -Jr^VwpY:* 7&8!za<==0K죕 mmDXO'tK?~ʭtLBEUOLb4?Z][Ov&C`RCO"fw?ѬYD> wz h]\HYUDHh| GRBҤ$3` }gaJXKRqQ^X{lHk +6Ќ&gꉶK6_':-Y+і*_bN;:{ +-{C d~7:pl h4؅\Q| \wUuWր6L{tB"Z!ܕ*_t-[0 -V=ɱL:RH?.RY=5Nғ8c|# )?Jp*&wUBI=(ZY+`F\.d^W|MMȦ⟾鍦.#RRsv~2FeNE`yE; |Wu3h`@+?7,]{su{dH+.v5lǂNRp?nj;ZnB@yS?(5f\84Cow]fr&Ejc-|Wu?BOwL[ݻ:.Y-ƪW<,0-ܷX:~]C*i={bԺ7P6R?!S!N'qy^fs6S!Ѹ΄r#OO?!";(w𹺕 ʀ6AY`0].#}!􇿗s'@k ip; Ӯ}Ul 'aΟL uW#=$`O" ^D`|#'Y`$5>ݟ;??L٠V|6Au!zڲΛCϣpJ|A/@[IP\CNf+$[?Azjıbڴ^|#rʡK/̣:yGoSK{x;J.mCdf6)+<ơࡡ~;F|v@6,=v'YEd@:=XcE k5)=]J*+=EwM3"SyyjS4aϥ\O\T'B-%b"^Q#l/ ܜUf 2vLSz Te!r X8OX' 6?A+ޯ\ t?bVn7 nz1?}#@8\aa?`a"jX!֪yح V]ngӕ5{XPw\-GӗDǧ;G@vMo=jM *wn|/Cgp!>=Nk6i'9޵9Ӝ:zwua5J##2GWSe~'nw⑤;4&QqҤ/-{ut/}>)} dT#fMwh p]'/ Of3 'wVg| u8;W{kqkKizJ2[v'չ~B'B ێxF1뷸Jr+蓬M9SeUfL -JYEIa'ۮf^SMnZoR3DV&ˡ/kMˮ8 UXxٯJ56R]k}ci[Agg? v. cG-S2oi0BO!%6r17iWG ^}lYtdY؞8~8C[ȱHu,)TuQAD Qcc AV:B% BV!d{б!@~?,,d^QBĔEa.6֨`ZmFy=U1}yO/u17]]H.+WyK|4L4…dM穟05hFcI~gNj,РlލBs!m@ˣ\*jMJG^" !o?~`&Ii7Й`t<ׯM+!^A`W@3]Z7']sE ҘQ6hB~_9n"}Ml Fv!tD:f(%OPe,#m;ZM2ʣyG}W Mk-;;K` m(ùbw耭ndX^2A8 {uջ꬙´l[EkvUqx\ZAhp!Ĥ0ml8/edxJ_%ƣW!kBY 6\{UfL|Ɲ'2Բf%?>HO;)NW<>G+㣊_GiQC0R[{Mz֫g!kGy- _G1}4Pϛqxo}=]ĝum]D!-7t.i)РX,fz\M-N;mJA~D^H0s=%ΪZZʧ>^L;~SBP_gt#:d͔[۔^guo]Zq_ZۓwzZ hbc/ fe@Z|㍭eY>:6V^Oʥ?[eWZ熼S!sѪr齵#ʽf1ʖBKG'{O )VEhN9NSk dg-AG(k)ӝ}}b[@5?TF@ ) \q&` u5WЛkFrCm€r 3Ey XY9`$آ8񔇟haPY>!52kIq:d5U7o (CbF;XV_ǂŀ )0u:MO9S)lH, :{mٶwG;1% OUTG&'Rh@POƍ2b8t;#O5Vյt'@gcO#\Y u8`z2+|ԣx3K ^z 2 cktzX)ЦT6l1W$, Ƈ`ZM^j|Gx,VY? b=Q6* nQx&>EM V@HKeh[$@{X6*mUӏ hyRĀVI?RPBJ;H"7qf8K$>|ߗ[_T&YoR28.vw}čd>NUȆ4I`@Xcд؀[~htg24`-;jC,ha-v=X2%F5L[,tD!`ŋ/tq½f. Ho; w2xBJG ;~2 Ji[>%t#1sٱ5x$L/L3L3# {f 9yuHY? +o6wI umFym{@IDAT.VH@Ě]zkL˪fRɏ<5y&fXP?3|@ b-pRg ?0pZgBJ皔eG߱"t-l\τXU%@ )Uɣ1L͵'{1Hm$ Wo,;x鞈6%LF `A9YH:M!m k4"};lb[xҵ8Ub~WHg]! hB'ϲZHinX v72?zy*fe<S"`zIYY׼GxVhhJQؤ߲æ 7q}0'ϱ`_CN>;)Lw!U;CIݏh0;V|/kF1alǍ%X-le$1L (^2/)KD$qE=X,cƈ"ʷxd7kTJ9,MUQ v3`]|TU?MIfK I,{[]ыP`+*6֊H E>?oIHdL&~3rwy{v\cjDQJ?G7/mw]~~h{A m%XD\,µgtE Ld2ð]^@s!]410ӫdvMkS8쒂fme4uj΍ED9, UXaȲmCE+3F 4'!ʶq'ITM1$::SDaSHM6-mwyDB5mdYQO0(pguEbK9oR,]V$VZb`R^j7X޵Y O` mjLAቜ7"&iUۢuL!pX6BAagӼss\c]U 5XA( g_} {q/*2z~E<.+Z7Mއ/-L[ ۟Flw rrF¹Qi2&[+F}*/ǫBF0*f8UVY"ԝEg%1gSl^X̊Ԑ( "M̔x|x쨍Q^ţ1]ȯݫ:y]'_ؐ_u jmu O⭠t/Q0glVMNj3Dv&XQIkJ_bLXu6F԰ ^ȽAwZ(m:+;g=b ڟ1+Mf0.d[3=e<ᣫk *M0\YRa0Q "FV0#syn9p0;9"`Ƹ,Q1сQ >ӑ7RrCFFcd''h`4_(,jxstz4 A ;wv Gl@ i3^2 ePȤdz"-+[CaAIg|]ŁM" ;^ts9ƅHaT;y #%6FhFqC ڀ_u\܁ 2G3+30 JRuT3ϕ(4@0ѱKtqڞ(*M=xL9L@ff`TQg/p*a z Kd wQ&ޡtt  ōC1Kc$1̜Dd+HXUSezc6ɣ4i*YCŒ K=8cŻ-9NcD(!ZV0)%IJ83^ǎYsԮǩWKY@,+u,Iag`,7@GnĠ]",+X:Ńo@|BYiTɂ%F^6'6o{oP}6IHs+>CGa:6@?W>`,;ݔAy.M(ԕ NԚA-IѤbFa 3)8=1S nG39q 9f<LDg@:] ۥ0sKo(=u6?_8E؀6-=m,7khoMT6}~]-;u99` 0)N>Xy(Q̠ Lvm5cЎ—%9>YTpMfPhLX)  UNݵy\.{ th1=)tGbf1>Pc\0/F@J ZMFí1)Nx^ N~EAa&3dfCIc4dn&GǛq5HH,ǂ5$%ϒ{rZSPvw_UK[ق/iVx:6]2ә] " 2j`bRMTNh$=)jդ^σaPߨ?X_lf_')O\}/ g+TTs}l 5ܣa4a< 0L lauޕ,HTHS.Xx3sq991$pN'|QGXl} B8ΤZЎeĤ_"X'opUx0\{aしatuxt1:ӡ`?fPy iMOQ\u}#1,sܪӠ;hdz\r0Ds*TTsiVp_0ƈTc<r|kf`jAaI 3).M&tr0 ܗ5&RC0c%&\cE^._ǿ}NAKh 4=]). hˮ)IZH…3i{wl!]c_UR}J1((1ceZ͠GW1 9㮗)ޯi ̜} 5v%ڶ33N0Rs01)vϡ3^G>Z[gzt"pЏ1Tn͞JNy<]9^2mٗWn>6Qg3hi" <0- 6&Ek~q9xPS=/NU xXGbV-tȸ23 vQpJaX;\aoИ2\.XTj63,_idy1|yy;ASf{_M]y;T^@+ h`eR4IJ >1ƓJ>`E-=ڷ:lXb?*uD? 􁊏3p{q^IӰ^˫xHVq9ӶY cr|+I 6&%WVndFS0?&|(>Pu)UN;ޏ.[Vδŀ߾n{`dA%)jnm4S1)\K>'˟]6^vrq ^z5VMqC7uIJ+=mkGvϞYnW=k6ꬬ }JAKŏd\'XT= E ;h/0\ضMCeLI6{Q_{GV"j"^m$yr#WmX폙% (z0-V F&EW5OPGc@22R} vֺI6,ZfKx  FW=nR/`J޷kaΪ0CŒ VX', pSIxbJfR,q *0(t\Aa.EьqL ~?ƍ3vf1ݢ݃m剀^{_,@ k@ (@0)B9E '+t8jƕ8eRBYa<8rɜfv(zZ ʠI{P6 ] a-}€C03)L3"W5KP|vPfPP=-0&1#fL?~ ݽP@.w^GBUtc@ݺamL  _J>jiqdڙ1̘TUK3'2e.{_&oUBF}3"LQݵ{|!Pj&ER([Ɋ+> KtI]zKP_T`G?^%r=tfK~@FBG~PvsL >BZӫKKNJS/!tsU5T=z] =סv'=ZV_{/6e:w.„/;/5b ?T%Sd7p;'}*v1$(*!\\و//€6/"m8JS^ Y1xg|ę@@ hsnvbׂTI*--) U>zooT/w5mc((^U/Ң@@ P!׀ü]*oI;N#ThvLJnQ{@@ (0jz~I:`6R,$[o~饋U `Q_@@ PxfJr+TB}1COե_SU1(*=,,_֩ZsKO!fp(g>0Ha:uvGlJWo>!~pIʰUfanp#M[ E`_ dxJܼlTl W\pHni`dl.f#ϝd_C[ƻy쬄E88ua֞A. I RBo&owUP&às!=LXlY@u0"+9ϾJ Hب< ̅F$mD>|@fvX721ǡ>Z,7L|=KlȦM:*4`L x`2bQә@`x𸩟`½!Bs%V>R`Ѩ.r)]RU9_~7L\nn@?0&ɰ$iT"ՔNIuTfoNYFh xX"P.^@UE@1-*W6ՄZ 8].G~cngڶ K$EP&y|^3is+lVlTF@לqVUD㼏~d+$K^$< #[R6cӡv2$HU )$oDˌeF!GDyKx"IF]^A y=~77=u{&&ƺTEOS6jˌp@"&CUQ?_8|mTdRFf;3 E,Mha@!1}UR3)I @@ Jxf(J'q6OK7l{JS.~HAg_wsڥQ6,ƤYﷷbF6E:P4eOQ[~]O6cG͚^JWFfAh&K1.w/+Xar hg[]{jJ-Ix @@ TxT|1_ژ7}A#ǭ&:7Wq"ETA`GB41)Y$=yu%)`{J@}EePXb2rn]Q%Z=<N?/g@[pVkHj>N/Z :и)k5) D~I ^(~ʣ+Q9DB:KZߢů |W&7Sff*dtǢWxIn9p]0&\d=Jaπv!);#(TUP}m/K|WPU;|r(@Spd l/ tV9ì 2*Q@@ Pzޘ>saogH[InY3K/[%brKMë6)e*  IYUg'7$OO/o@yIc#{H뗃kA@ x`Z:w\:=jyBqLF$=E628¦: e]Uk~eI;:nPEuNJljsML&f)lєI|N(?$~H;S`xl{0Νv+/9)8RH-r0۬Ɵ˯eI%ʉ)[,;iszSE>La`-M2gFHM+9 ފ!!|tܴn^6R :50k=pB)EQz$?cX=~DÇ;?v;gH{ g$FR]I2]4e;Z)O3o걳CQ.F-ۤ954.0peĉ@@ ?F^ɥ|( 闒L a1<Ӯ}H~aP _AC%塜W:cRgN\N]Й۶ ' MkWPdRlAmE^El?țG%aP(mэʼiĵ@@ P<-S #ǯ>Su;ka;V_/9@|_U׍F9i|w|pW+krŶB"2DCX (6r,[LCE'բ]oc l6Qή+jDMiFC`ZApIQ8.]z|j˼꺄!#ΰ! [WF(Xe "M"t7=|Dh`H,h=D*Mv_hg\ (Q/ldjuO` wjD6oRzeZěQyWKHW:#-T|mAUI-Wo{(D %ۙG0ޅLrϚ&oD]J+NqP9$gһ@r2/W3p_Br?x7 XCyG` T^l8W@CoV;N3Еyp{֜1c⦺}3TDa(}FH"Wt6}q!_\LϞH !?1me_ngw5YPgKu!oV+ST֪ &Cuns.M3bD[YQް6i'~rgRyu76/.mܔIgY%"Lެ%u+H yIPy`LbOhɯ_4ŝ&(W&}&g)il2z|AT?>YLr٤H0*Y 'G@t,wr) (hXS=w3ئ2biW2`Uϻ`L_Ѥ{'L>ba>B#hzШE0#DUhoS)F(3TVGW^H'f7,;6I&u`Pī1WHf-Sބ `>4c\deZYL8Zc(kGHzE0a4&{Ȋ|?t#.,k><&7L\ A`uKY B5YY5;WP޿ <È {ئ-dv֮t($)+ w_7a:JL&r)0>_$DP5HKG/ oꆮ7vlve X38E*L&ZZM)[y,=>cN}ZGM쬸]pi-%7}7~qٷ0۴U%yix ^~,/=PF5Ԕ/M}A& ^A3 4{6LдerzuNm|m4f(CjZ0-[f)ao3ș7ju2aRXij ~q9fH5x>6j2N|s^fHx= Mm,I~쁶=*P۪nQض9|sI'`R}m{5wj:T peV{Cb9xCo;#fKMEPipSi䳈˶o?5#PTvT @%G`%8c+1[4\l WϮ 1f,!ի׀vVfoKPfPpV(2F`~[Pޘoc;~8"͑]v&TS z@ 3qmwl,t 7@rمdX40зE sRk@#`P 6Iq?4b-#Ƭx WXג˯QxDfE0m c_o>LJ7@@ f=Fc'>7oZP hR!" @bjI&J_T%`8n|ՠU42Vy^{T?*q㕒t߾}䡎@U@LL>tG~|KLaa7MiJT_&I2;_ˬ4Rԓ`Qÿf%seEtP߮-!ȯ؀V!e}``Rxa0p]֦;<n ^Qzle WE[pIt*R3մL ˯ t3\P,^len_f_E(+a!з+!`A hPl|>H!UhL1Qldxvv>`Cqh&EcN@ V-EUkPѾulPS@4.ӞM͉>pU:5 G6jd W.\d,w ]t7SX:jv&4D԰;vFG2 ƈ#GB:Sq"}2a0mO2<^nI:$){¿``TD(eq)R1i]{u|Z00HVh/\gޡqSWy,ɤ!CfuaBBSM[<neu7A NW/DqQ.y%D*s’doʩRmA1`c&a%RɡFP5ÉRb4(rdR 0-2<_IWm0b88m>g|hք EfP)~%ֈ5֡e#Uɖۀ",<|eƵ<^?t$1`IX}DF]M:E7F6 je EGZà.Sw˧9kLK#||Gja1!඀0^m#GP%v*٦ź/)-7 [`#RBUy3] j>Vr):Uy3)l0kݰ@cJ5*FXQLU U lQb#0l"9嬈~%~(3;+)ڄJ ,cʋIrt{p9a%L6RiE"m}IUy !^vGWTGώK/ Dhn=wF?ҍ!cX #jӍOQ$3)i8|J@2̓IyMD\:ldS-&< y-.Cp\.ED طVQ `-3Km83 H$DzO?6c YV+u}pb#mNiqGre#,1b@ʕ*zRiO&03?h b/4D0rȒGWGL8}>N QYl,0=Ӛm4 ,TP߮"+'TI*lm$F'~AТst<3J(t<.-:oȚmW~-ɭMc@3 /QaGw&9Ǝl(:JjDԋF&t'fkQxXT73] }Gftנnf;7 \4ݯeYBZ4n ]XҼxLʞ'iՁʪ%ʷbvHBFm8T 'm)|*{qh؋oMN=Jtf-=$9(V;J?_K 17 t˕Dģ1]#hkWD~}: _ӁG[.~.e9c`VcP+驻Z!4wGڐp69T`W6gP L"*orF@̠r%ODL ʸy &fҼ۳ȄW{^}N1u;{q՚yKsPZrURϗ&^)_ʃ1*ԢamsQڲ ]*-黵;ldQ!vhʲ;(k֬5Nw\{5-'g;iS.-wھ&G;1K@րT'~۾{zQ- ۫Hc+Hb/_&SV !U3^ Ż30z 9淝3hꖟ6%ҸL1QVeK:*rYujDSz5YtաF3'?nhiݴ>&~3齯i _QN}6ڞ|2dEX8[j1اׂ:AU@@z5%]:p:CnЕy =KE:V$[lӟ6IΠ/WoޝZˠpGN>';lNz0LCҡ`SM7[:Lw Z7ѫynM:BBo1TX"vB!Ȫ( ` SQOHl\"x#'<'ns pñmqKMKIaXr96MM;htn8f0z}DZ`dk/Ɛp`z< j_ukF#(מgPڰh5_p@΁]mU1PtnݸЬ&t%S#n%a}v",AntX|kCVuiJI0Z@k:!%/.6PT"?@ _93šZ LMU͌K7C+Qw2[kh}}niNGw}. [nlϺҕZAR.IE)&%h\XݥMb6bj5a6ukj8Ǘn~L[QhS`pk7CuA)+2t IF~AA? -ץf8T!,`Msv6Vl f-',<ޝ4`Wdc@P'pݨ] Q'-zm(lS$Hqj4/O> J;\yݵ 0fԸ%ΰ ȿRܾp-,Ua[O+7&j hˊխ]aJ 6,}LXzٰtfSK+bw=8Vx֡?벍RqjвI]::yb$= ^ $73?NOD&عOj aqClꑗ3**LJJ)ÞM~>E.."@ _`)̾k}XSQ 3+ Zj5Vbi #zXa(c Ǽtg^2,ٰ-ɽE~][ko/t&HI"驻zԧ}vza2b|O$WXj«y>a#0 '{߮yka[K:mN+a?=؞[?-ֱ' %[nmhź]Zo 6}ƞ0 ׌Zf[ yG[g>r_efW]f{y["v}ZCH J gkK LS™+GyZ~¢{mhMuM s5}ԝo&y 劝S/[) 6#|'Z(6)fk>V˨wnתnep^rLE~ 2vrnED@jS>ޜVОG{f(f؂?TVusvЇ*[!o?e-o-4&mɴ|V8{Ѯb!Y@Ujag.kkbI_˰P\ʇ}LvJa/K",Ad1(m|Hkp2(L@00(\jfW ]V%K#2آO% F6ky*mR&52'`zoĀE0)Qd"-]0 x#|lՖ?suUQQ@"t4J# U 5/:psÇOqR, B 5؀\jugΌHDj˩yG0Ubd5gի0nKHRJ"@@ JDRSE%eP_ԺK^"j5@MU+nq4ׯNw}H-{-IZJ屾/Mf9i+b/YB`)2b"qSj.oJAjX~wٸ^5AK֢3ԢR\\٘=m7P8lGxg߇ӷ {qu}m H(2 VUs{-D@y!JT3R6ݟIzȚyCv,Wf0)̸?.ܢvɋ'7SԤ{1 )җ-,~fRSߣ&W7'1'.M/:U# vgT,Vg77d)@!?_JQ6K5GjGi.XIqv9N Im.c3U Qe.PRZsbv!,oTqy=吼o&@ =a :!{4l۫1*ԣZ"?\<.KE8j')<e\:Tdi?EzWB괸ћ.3kOR谫); Vgp%Lum֗_AڪhvDTŽ9s}w{\nM-Z€L{{p`f軵 Wk [zk w1L2.~6fb7TjEӓw\y?_"g[5{;Mi [gq:K~]ZkZc?ui[,~P= {mv&ʒC=Q@!F#**tIդ^-qW4lM:I[؍7qn9NCg84.z_>~au^kx7# KpegLMZ‘/Cœ9/~.B!w LgY-*]セ}j }jӬn!j$!/:ݪj%Y=][X֘f 9o:-o:쟿z:QT#FGWXVzgIE)aR؀zѬ1^iC$6!L:uQUa4i1tx4=mXjp#6Y[7՘GNqpHB PV.HM$K=7b:rQoHOɑq[=瓃[~Fs$鹞hx f[Жo~A\hNwR Imq7CBݣѲq]6'6.ÅL픙h έ9v*{m**OκSANբ}ٓd Sf2YX [% ۏGزfEnڊo?Nضӳ!+ a[~۩0AeԢaxuo<ޒD|v1VH|ў]=j-_jZг==پUUY=W/D!@@GVh6JG@5l.;Bڅ9ihb.0mjWp^[J,9Akgs2u_R_R̍cf] nTj&WydǼPXɆ_ ٕ"owcK v%v4I,W|FX4[40Jz`ƂUG|46|zƞ1)|CA4xiCuP3Z1CQw0M_2 QܶaJJwN>QGWK|]xPxEȩXL)CN)J}:UκZ,N$zsTxȷ8)3(l '%1$d%k*ȕI:c$*³7X8 o 3_YŹ@@ @۹{V#b'($K;qāAj=*t_ d֠'|\l8~m+w#jӜuprIkDI7d>~UgW&9wnHV`z2)2_TYBI #aXuժԖ_+xIv'+wGʥ{ɇ~`,/}>]` ?62IIR[T(SaaI.2"׺]_l˺k{ :;(8`Tjht^ $ `P´ &Q(!}0w@@ S$I:`s{&m%37dI;1qvѯLmH%WdڷlOa¾rZCk⛐p(X߃tj8b¿5aB+( oA@ V&bi1q( u OFa- LBJ]JRQ\5ozIc}IQZ[]v+Pb]v'؏)GJ%1'}i~$1PY%JzPU#lILJIPiBI`O+< 3qId1]a^z(•sdT-VEag[1)q!~&樐dM~B%vSzKͯn;k}P< 鵜*&!YN߁'Cav'XVyN ̬HbV#{U Jl@ 1l$.3袩.8$[ ɕQuRMi}Ls$zޚt_@3',e8Yy?^V|+2Z k 0)ښ8C m7O;vv'+x7ˁ0\?tDu07q,_bö p9}X;0ҪI=mw+쎽G;ٶ>[Y {8KN_6 dGon4n_~' #|9|Ƹ}XC'jV|V:gjHIgwRʹ|Yz;&QaZ8/0ޘ-!(_u"ʅ/)V]/)/2ɳa ?}Y6+:Dc„g]ɍӪ n<Nܩsq:MzO1TI&=<~ԍmF`x#FlGfvcbtulSm@ښ|ЯZإւ|dlkLN͘(:z"aW<ՌnKZ4%C^lP2 QdKӓ!G(+PՏΠCJlVeK*"ueEbKz$oF] ^.mς+-lw~G纡;E2AZцzBX% ZE(kֶ1k֫S `>!M9~ﲪpKZy#A5biH.^'oM(( xfiV Dwp JjTa#ڲGsLsi7_TZf UKH_y%FD&%Tb&k]3ԶݳL;QԅDGUIIH3YU=5CQ|Xhv!FgMAH<` #ՅKvX>5"iK:sntoL(wm M[i):MUn$kX*(.^eh.G7畕em]+?i;;!FU҄N1)i|p+`%3^l?!䞆C:,;?(NtX%iF~W($cn6oc v֞6U4+=m c[iXR?UvҸNSw㍨ix \{WR@1v!PetiӔ:nM{i bE4{49窱AQ &VMs6ت5lq.yeKo{O9@N]x樍s胀- jڠ _*zN' 2vTT݃}mG@!kI9y\%mf%yyIq~ؙ )zɑϟ>;:h`H,/ Гc Pe%)5bFAMAџADeF{FL::DzC3YK<ɖ@Ԃc Kzsa(7D٠=:.eJha]R\ɭ';1f}c׏1~ݭ%Ё-7rU-n.hVyD{Ѱr@5ūvA7aΆRٔ{@Խ܉sm7{__zEջt::q,=$h<ۢւїRzE0Qc?e/6SUhȱ+,q?&xq~-(`زSscsreCW[^o#NqK DJb`@L8{wٻųڠy* ^dOIm5'TsUN N ;9nc7URoY0]gYl4w" )r "X填=X6A guu?֛oyu3-%h33ay",aᓱsIY%U2ζ4Ui&d6w5͙N)tqt'CFMsn,ay> J5Z*C]$ 0̠VO)O0TJ["+RSt]+|V\fA$Ƥ.Kj5q*؝̅ƞÀ7G{gh|FLX7Xu7Ty9?J#y#Ҧiٮ}i{teݖ{`0?l$NBI lc;UEdu]oOs:t;xơoV;߾zHMJa+Knc03 ¿)LP|آ/GQJ<>.`V2dΎs$N?ӳx?#?tgx";:}LIYU\Yj m&i`q))9$ m%ik`=O5;f Yț|z>&'Rc/:y_ΦȒuP.<15cYCo棩3W_X|8N+3~Փq/ΞtWCSqL-GݓB~?J,?՘ ^ 8eSON Ƞ:o6S K8En*JXțDh "P$Gvq8e40 #7&<;l}$bȜd=gW!?[[ٔst!pLil5)N* $xIx'VpN{lkFb_R N}+9s}NչM@o#h:uwXUٗܧK@0?-I_;,XQa+IWO )ӝOc񒍴d.4 ?`Y`dȰ*4r:pCMd5*>ZZuɴ餗u"#QtKl[ĉ%є^&!h1jiqdOZA)?덮E`+y93aپU(5|IeM yogЮ lbސ pr`7SgsKqiG1ffS]Ӎ5'^ȓ!mҭfwL@I935)$Os) jQB'{K諵;4Zr#Ao6Ў~0L/ӥH ;讑fdjN[sm(2Y[;k.k+֏JP =af*ч4@PoH QG`a6O,̆#lhVŠ NxF.fG_!-/ E]s;k֎d|-*=8h,W96Fg-df ;gkk튤e0,^-Lg ]hm2ŢsscdM@n/=2\v+ۘEұL=d-Lټvа~[^KU-D 7CG&<7a=*5%uwxhPQO{ y:tYE 5kק֞t)0}TPi`=2IƟ)p}~z׼~'_|I٩C_d2mǔ16>2oɟx1܀}{]Jo)pN#n4)`%o } Jf}Y*t)kyߒfκ(~ȮS^ZxMo~+Mi ]Qk.FÔ$zb]k"HGJ+=qzsW7$d>kUFu6}MA7kU aGz5 #HBDc54touvXr-FRv&_ϏϝO~fsLS YRIeNVϭPHϽF>(Nl.^N{G ^0J*_IK6' QcI7+Shy2L2!NvQL-kAdd H s7D'η;*s9|O$Dq0mif_n#?O;]r]樣zQ3g왒2/bN59*wL{d>{ZpMwë>2 8Tzy,$(Mu-d*2f7є&q@RaաɯQvr>}R^eǦSՃxMGђ՛mtDMdq6W-)WVk%NE6:`wLw,4\HVxM40ŧFm怆~`͞-y _MHA+C I_\w4*NOv7QseTg_LTcGӄ:}}\_E@58 Vdk*/d関#uԛ쀎N&/ 7Ltd2+ hs(*YSQ`OG!U<"m:k IݍEy].$h09yJ_wRK˹ři/ڒK@Fj_/ 9NqqMhТyz&'{s< .dmE&M6qԳKGt:27&gwr\1er.@xrQѧt2OCć8s7{T=uKQ7恳;pfL.hJ,ʉEpsod=tR5GR8aEYZq?Fnv?p;ԟg||s*s#ZqR^H0saj' c}߆'<=6aW5Qʇu{#uB~/~#H R$ۚqPRȆh%& ^I"FM>ІT7| ~"„0סV&s 拨y>mT%GGomuj&u;vȝj13oLpV7$|E?c2<N}Y~( `*E{̞Bnϟ7Wg9IN\q7So U5~ųqf]Z nu;O-"}n'lbʟ(o@y8HhS]dY9s vCM:3,$m1{>ЦUjPBjֆt@$<\qZ#bۆgexW;sڏ{KϜ[>6Gu\ޏ~ICwF84ݘJ0K?⩹/rنwOccJ,!rv &(d{6JN_^ԖcV|L^kW\g8oUSb^?|+3lY[ Hprc?Iq:](^%0[yxy1˩hЊ)0SƓNGm~[5 ;j ի Li"F%vk t㰡wCr䜓74; ӏ44c/{K.eMޤVC qׇ9~'NUR53"L#WX2qƱ[#~ ZG%)Gx9>lk&H%O}3eGM1F=Y38XÄ6;0<N`G<^lcj&.li`ld;[RV1v! [Ͽs ";'fX19å_`6,iWV1 s^xvRtҴ髮y?pڞ L-~ PL@’t0`Z=!|L юR@7m /o7U|my&ʫ҆Z$īP7 ׷sT,ܖdZ32ytZώctXǾ4֡m3߉m߉8ɜLc^Y1kVoaV2f"vǣ{ྐྵs&.B>~=d߹F| 5"2 KRIyAŘza=&7m)qqCDಚڐzk)ڋ]Qr&e7鍻t1iڐFˏIfwn$m+3;kK[\~Q'1~m5Z%Nۭҝ$#Ljn;߬GH4wͧs❈J~zx/wYAI? JN,aQe]} ة3W>-@’t1CxIpJ*>^:+5 L1K[̔o'^1~.)4 =s2h#Vn)}ɰif"ʀ{E`3R^=/>o*tׄ!6t$+>mt]H~'Tp7v>E:6UU֋:OІ `?ʵ1ASz2MFvi?j&;naV#?JL_>tcRb1С*7GIVD*ٓ4m^^ȞoO/W9HPь J4rqڣq&XުuK6Vgt3O?뗼c_UjZE|M+$!=L mH8M{5w}2w g^>y (E\IՉ(Ðn5 QVfkr_DLK*N:P7$JOB;yNQ:;5 qgILOUWw06FbNX }"[%QzVr SVj=yQ6L&rЌs_7Igs{?3D;'S,+Ae/S E;~m)yrL-0Sԯr6'Grl*[#'(suxPϦicms]HW7\$+hի-:~uu~'r쉫?|qdJhli'IeyeFD*O@\mͩŒlW6;i ~F9)ËWٝH8X{54 { VF UrJ*7WJ;,$^od2i2CvюP׋v,L&uzP_PmL-e6ӡIhV.zA2.;`Z"7߯ N^KW?{F WBpQ.63~3 VΡLy߮쌊tW-v403aVl)ƽX=iDݳҨS:Ӑ)|3\ !GG mAtΘ4Y!3dGj_2 ehu[`|#dŵ-ƆxΚ *-_7S:;cnB2@ȑ +!hS)DtJ&2HyjTW;n4ao1bP.ںHTtF>6QL/Р$=ye #dІc n7ʶ-)ͳݰP۵?ܶ5+Wr^fj^`7:9\ngA|LIHwv &8(gͱJe3Hʊ2Sd2~$$`#&mᲒm6:N.^ G!Xt|ʨ%n2Sgu"pq\e1)hr$-٦?'0ڍ(v:y1D7bFہPc}\EueYh6_GzѝyG ?q2'y#מF._U(JwЇVK~υ\gGQ^_Z>m`/N%FMi"dm\!6tLĤK 9_ץW78H0] <ي+lڻCs빼y`.,2p3A ^nDAe/l\a*0%7b1qJKKT *,#d-1j7W`+.9Ɛ&(EhUҙuIïg曳auoDK9ff3O&gՕ2uV-##v%Uul:A{S/mW+ AXB.3@:Iyr"CxVptXx 䠋d{8RyoeWA:h6X8LPw]|yyLR)Z9FPl2`RmaGh"2.AbJРiA\0#Zѩt0qLg>,mvWܤ(V;UM(ԿvV*xv{$ <卷<0󲔴ԗt2WlŤ8#Hq ٧S9zNq_95{ f]%;@""$) iomuO9pۺ1NNΠza?(8sBIR" o=Gݴ)#1&F׈Y@IDATRm~ UM|6x&rvK`FlȠ?:\_>(lE3(-sR[ru!~~~G*Nn#E7XUT \ 3f~W?al.䇺CV =iNjRyw>/_/{jRJwo끠0\x'Ѯ'[ IRzTqJ YI @T mB*pb}^5u 䅛0` {8bj}@LRp,Ҧh SF`'Q4#0(F0ܓd2<8Y#ڪ8E]??>S>{i=zR7Dø}~JdEL5y wX>raRDmoqܠ!Oȯן ;Q0qz~n_7 vVχdDIRӏoI#W4~ S/{ x geeY]po~B *d$@ZIIZ59պ'ΥrvChKt%@_aAf m dANvCn!-ڎębڳe3gϞ @#G%~wodmʯ%+mk4YA|ﮟ][1w#q\xsȑ_ATyosp9 *5Թ;Yp9LLd Ej Yrg>ϲd`]-{H ^Т3/W|9#<}}DמQbZ/4 bz2H4(.v"okI "r1C֛CM,QӮgDR,lۍ۪РluP9~p+.s ϙoD +OV 7m@dEx=zչT6@eDbYRog4|[%` L;e>)D KRB%$EhR d O3y-Ͽ0mhn!ʰ]ÊA&Qaـ0kP%)Kd'iűhvb4ڍl#l0uc)ݺn.^YB֤H CdĬ4`b]Z '4s+,z諿jKMO≆&h3 dO֕dT3I$6fHx"^bB3-&;Hɰ4B ~|jViM&\~3TS5jVlo '&$[6T{?\[-v[k﷤LqN@dbcY!*=U >Nax\W.WU{M՜O)zCGL3IJ)}TSg<*77LG?<)W{G I ^Zfc+Rl穠cZX|fT+K]FX& /E+ܬU/?ld&'w0[i}/)`2.^Y^pdߞl-!, 7L=Y~a5M&mZRq.JTAi6ewJ޾y tHuv~`Z@$,I ~}a ͱ[OX1Dsd`MJ/^WȋpіHѦkSċ?kټw杜o!A2 _&< #¼ JZN &+\w kRPAɶH7=Wb$u};n:-υtkȒ]Mɷ-]Kg eT; 歱=28$q@’1?]R즔 i,[0'S#!dG;QG$ HbF8W$Aa0I L8x^l 4<2sTEV>HKK[`OoM7's-H& IqrLm9Y3%^hWMf!'FT) :9˻-4pLqXφXk4&yĸEXSPuu{ %#[ߪqQv,)]kkN-]">퀤hX ikܿ. |N;y unw,U"# ~‚npQUǤĢɦ+BNȋ(%S pF$H-HIJD[܅9Oٖ\׈{̡d/=R%% q;B*o FjGaEs x͖Jݧ'ʋfֻH1TNa%C*Y99ИPj*dMN ۱Ԕ<|C/^LRF?j֔I{^@>qSr_".h7$$U)  )IVFP4ZoEhQ9O!=Vۅ)@'Y7Eh`SV" N-&ƒy~csFݐ"~1 1=f;(^]a- cv"JKԩeC:M,HQӮgH JKPH$@""ЮH@-"-}4r8 Z1(\zu`@Tx%Rhr+H$@$)tU9 Z 8 ?` J$HJ@#lq?D@" @%)A@VɉNI'V" 42\ؽD SHj H~s g`iaeaLXjU-w%G`կPZٱd[iH>.;U;}-4E'z["hHh=*#$Pfh9s ӑKH.~oUtq_C;nլrZC=rW"u#.+HYP079Qbs^zD?YPJAtmK:n^ 9`"2'(88]#LcsD@)D@"h^YQRsj!hT`Rщu^XRjOlimEFIRm^F#do>yD VX )D@"A[}{%3 $1S'5khz]qn<~"5* 㱼`M LWHA$F xmW%G=#pɴ:F2ٲj\+i8t`_#J ߐqmX.r @ IJC" u^#(E6Ѽ_vU <=6̣~ ]j*?N ɣGq12IIR⢛@# 41໐W~1kS:yԺNj˼c/ZB?c>+M%|gѷΛC^ATD[*F9Ym.Jlpd8pΖTPq9;a HIf!}QΙa+ H48| 8hlһQʶ8nPK}hۺ/#>DEjRN9=Iny7VT"D)*GyTML8LrTI|RmItAtјAd4n Ʋ@" =jgM z!~z#ŠqӒ@r=D9w.^ 9I iZ.XC%52EU-b_^,%i]t4\!%{$QCf ` K0{t٥PO5D)u 7h8Ǣ4(wx߁Z *2IvIR@N^}nk5099i ?'t}b*U[B7\4.7t:& + AR\^x_z^rwU8n[?S2ɷ$qmdiƘ'YQeׯoZ 9DI$)>hW$EhO<FP/NwLHbM^. xUi~j빂ms=TEg iUAz˗(we1uE~Ov[6pUS<L>mϦjR*TRnqP<.gّۗ^b*FzvCRAɶGojҦr.0nT֯$*u{E>}R>`?y:+:EMIS}YWFy$8n_qiZVO'}>uvWc׿Κu?Yy+]W^&j>6~J+K9^^=hMN`Z zk%[(p K ݳsMy\ B H ~#( L@!)0񀠔WHa4( εo/\K}ej">BNȖɁ/Oݝoy`FI{n%/g)X!"=/CY^yz֗\c $^L ATA &(`xE-JhȨ;nlqNjP&)-M[9|jbklԳKGӎ)25:>-5=.24 J+v`͛rNrウ󟳠9Tzh)sSA QB!L< 'F p$Q *}qW"?8q$nSTd}V ָ4TXRzyx/VF_/w.ߧR襉'{ Es>_oe*.@PTo0!(#A6pb3k2 n^V֤y5)N0"}',$m1{ȸ_ℊjKIohnt5 ybAgs( p(k/V"Pe aM{EnᣨcZKNqPx<ͦY$RGV (u$!$0szIAxI`Av# )a =Rf/d#D&rzqH.~@ $EhRK'+(4!hK<1Q*Cb lqg/,L!!l}dC}78NϞ ^:AG:3ˏ{OxU)w{@ gM9,s\21hYӬYy-%)ۇ4vp/ޔ5Zyg?9r|SxII/@?z_zw䝠W}C/<|+ uARv杤ɣw{IPKokm'Xn2p5¼,f Jm|0˨{uLO ki`.Z5' Y?AD:t_JϿ|NF@_ )UDEL4vZ9'w{7w~h]Y/ej݇NMo A}w \e)ҒhPotp8"mTdgPٔ16>7VL25Cel2l6E%hHsOݜp <;gc̒;W\Ju1|%S.Р %өɨ/՞f޷{6 Ys StEYnL3-\@o/\M?@5q5;f ë0mLlPnWM Ebc҅})9DPHn'q?d_X͸P)D] IJݜdړc.Pǎ0R_Lhd0a9vyls$uʠ/Wo{4dTvCpaF/aFY ϲ)HQ\1Q:A}g&MQ\8z1;n,~9%K&$j]4*یt$C"Q~ Jܥb^@ւ6}Di:yX)ّ^,+h\֒\BgKhњ ~5"wRv 5 E3`:3LQb8p*x) -(=hfXI)-Щ#=IZE%sPyD#gȗz)=/nipZ_MD'.H˚m@fP~қvs̒k•۴[?_{ӥY֢9~ѿ;d)3Ec׹І |_e۝,,^|0 <|%E9D" ihA˳<.γ?KΏ.DeHn ܄)3;:3deiڲj ?zES1Y$G;.5I?3ss8dԊ\;`2k->? dwDC$7 Ld\|i7[Q͓dx l6VR`~,%APΰ*8 @l# = ϩ3n~ԭK]Gnp:kQ9ʱSRR/@ (m C0A,p I4i+]֩C 2X+ ,1OP@9[,jke@HMJX>fu]zA?x:pUY7=ÁdrmQI'O5ofO qOfo@t0M5[pLd14&4<::5]Km Vf#tԉM%HIR/̟PF:^5JNӠz3u5u-^uJt&5RFzd|d[<9R}[; ;RVD IRy0OsN\)La-)t狋i2;Q4!L@5 aT_'I$@B ՙ>eU̴Vbf5jb͉fWq|k9W7%3Ӫ퐅K@c%J"j$V@ aI Idf'?RK}>fYn9l:qB>V.".[Y宝Mtd3F /2%opլMME5DKYD HXRV5)vnR@@6S:Md0*a]0!*ڰnJqчy h +g25f[X \7Z IPPHxRmQHyLRgIdOԯlH9 t-i.; ӥ7or*w?FcG&SvVl1I",\CGr!:e+ء#ZVF -YN#k 8J84)D$4I REGV#x:אO%)kd6U뀾BFUMthK)rn3caIZ`r@1TRn^fϸM{kc1=Rf䨱}i޲MTPTO|4t)%A +yD HX"w #~b́ՎOWL:_zLd g[@^/R[XDÝ4.E|n͆r$E#NBISoom9_r})ꖝFk%;n&,{%t=ݰJ' J I%) hO9ѓ4O 1:6|L&&RR5yhGS f̠Ƞ{])<f $[uς܁O?Fx:3~Xf^3'S%6"%[mSA떎i2I$@ŽPB3MEtMVv5$&zkEuUrC~іPe(QA, DPXx-cˈP״^u^4wgaNzPLK۰]0j }!-5DP%ZG" HX"6v91ר~LIvp9 ]L W9*V%TA~% S/c'\v p z\DFkrV81Ы3:[:~,]} ֑lOBzŦcxC< J@$ @$+Z9S}}.*d+Hu JNJ3|BnhRB14\vb;$YA{t ÙA;8_'ϹxSe]O*_VFjГFϵ>I{sI%!$f FQm$rt,oA@۲tL{IJGqP0ث' $l%l6+m&/nӇ-2l13hUIQt} FywX_LIl֬![%Q(A@ ]O$AޒJ"@; ):2`oؘvվ8d٩ȭ2* 7$EegEΊTq63&JV&[JJ2ْmo 1# d5\zA1wwSyq O bfO,zz)().|`(g5eFt+ J @UJb ȌM7 F8 7W1n൓ݧ'"{j1TGM@R(53 y!w$L=@x*9't-ofPQc#*<laX_ {~VJ%!ЩC ]uވ$A4<@|" Rsz̉eBk76}̈A~~ }&02yӉ^w2pG=;=Mrw/o)VonH" )uv8K*BRP #f6ߨ6?yT0α'4_&*^_ӂH`5^Ёz|dXu(&hP4sO@H[\Zz=3➿>wa[ng]kA8x?dx~'M_O{O-ʈ-_onr;x?ܲ[X~Rs_ɧ3䄿Aӌ[eD $#evRNI{rÿ_55ep0IqI)^/bQ4#0(F0ܓd2<8y1Zy5J^e1trev7V2B㈰Yq` )cЅq h7]GY9%A &[" `AR9x_ŗߑ$%jĔ^R5=;(^m@ccfK ujoIAЦi$ ĨiW3yAfg}6>g:*Σァ>r+}Ю( @f3|?6c!{ㅧ'SX+;_mR^AX$wTWi-=оݵg-:݅^z4 JO+1b%q@4H `G#(sd/Yp>ݯG^‗F/E#'6߹/t$l[,A׉ɽX3ˀ~Zqm+k)'OE~7>2f?K?{.m01l%Lz1q~ڕ(-eyD I_@P(ĵz{ פ>v׵:1BL0jO^"8ni[QpZZ~^>`%h'WߚUEՔMy葕0GKF.">vQaoSgxۯ9ۊ1'%wҗkvqNYm@ۊ}i^x:{;U KXˤۡCW^g `<&hPJJd)$M"j-9RӵV#?+.>Ǻ=s*m~DyDt ĀTpp6/5u' ]~.>*\wJA`AoG?#^xrI.idvvvUW7Pf^sl4~T2u판,'h0>({b>*F6[>Q;Qe̎&ql]< yL" mMR&aa7a1,SLro-Lڷ:vH2oרQn _:A4c⁓)ݺn.^Υ?5)-w Qܹ'K׎}cCF\5mFm,4IeYU宝LVRhG_P*8ɆǴY<0h<`T?͉~J4sH)j7r!^q[[6pUS^dHSRM4b1b725,?_ )=U > ,ጀkMDQƍo;87,0/m##OO' h(;VB2?$O5v0!;x 9&gjҳlB:IJ% JI.~D &I&hS@L@PX#0v{*Eճf5͠7@"Ss`.^Y^pdߞlEI7L=յ(\Wic/Yп+IҺeG%;Ew+«8?'ƍQV^:x(0OPo=p>tzuҖhLȑ/&񈅾2H$-C $kSA &/CmwrÿAh~_ÇdjA27 #¼ JiQS\ !:mZIk:&Sշ%ZMO 1Qn6LMZ#Ge,* Z5Lp䄩~;`)Hh&30}>IS-F'LB0H[pYRRY=Σ/b.Y:86Y˧ ܴvCZH;h߁^ΠwvPɳAfJF&SG#]{E:]sy:kiJ ƍP޶;xM8 c/ r8&Q4)NnJS xxx@#I8 1x -4A &(wD!\ED8`s1AaL2 /\wVuѡ#e HfH$avG$0eH$GHh)Ȃk0"}Rf$1B |QdE(:.Z:o|2Ю=DWUaxIb +piRPŊ⁈H"GfцH-BЪ`?nR+7mJ$5"'R$AP{#pu<* lC7. 4 3ZTiP" #hʲ$@ HJv~ `2)GHK9 GqI;LR{@L?-NЧ_i5k;nΤ <,]siA8hb-~ݕMyp <۞pf[x]ۢ;|E.*+xȭD@"\J,B شkOn3c+ɩt׭Nk$3l0?̧?9ΎDW1iA9^ŷVר;&e^:rEO^IKxFyD@"4m%%i1W+$'D}4-zKJ=AE9uC矛"FP;| A{4bKV /+HHsO =#5Dv"Ɲ?}sO(aAx:zI:#O|ߜܞDcy#R!#@t-kAFY3vu/]A>D@IDAT 0^E>(]b?xN#K1dH$)\c \jeϭ7d?,A9(#-)=<,0)D+ΘBSC#];;|gT=-||.^~7ѮBYLsZeJDsYc!X'XQSV$c~~g6=#lͳE521&NsON^^QG oOHb)"Hm"T"]t ۴!VJI:AV- )_ m^I=ij\q>޶X\qqFFNsп{<)QJIR'e;B`bzrY ?(ٿQ奄\_۬icV'Vƭ> nQ"Q*K" &Me]1"nZNF'XM_SEU76 Hx@H$Ii.]||!'#zSE"HMJ|1":ղM.a, /H"$)RV`@CU}Kj}ѨgX_{#o}g]!U#RY LwMWȎvD"I<",C" hk"FlV5Bs'(~jlnM1%53AdfOi U_%aEpPJ$$) Rбi<ȉl"3yGQ}gfS!],zP,yxywI JlBD-pvς'V@E@hB=L,dS6xٝ<~r%C}{fjJQ^V.E/S*vJ/zy/9[?A 4;ҳ.N$ OR(nsOsdyԤXٖn!]|@k JJk Jw(-%P2dVY)k"}ILR!*~kX.Ll+-τf'tvG!'$Aa4:zr^=`Ai۶Hl'M#qͱ1>_^^pЕ8r$\c#^O95_m{jĥJQBᇂb6V4KmcG~}w@T1h<l@4\E@s=NJPLڵk+#ER<*I}7.lAqo3#@󼕏r[5jt~ח^3hIizJQVYa9ȠeE(KQ K"lI SJU).VmuBҾ]15h洢c$pAGQ{}upq5hXIiɚ[iОV ~j0)ۭ~.8K(\e$XTNj?hRE'Ҳ@_(*9t ,BM54kGIH ,)dA 4T[QlhutjUu1нEs&ZVRZ9FC1ax7Of@|@2rZ?*2J~o2dEa7]5 UЁXIa@r? JeZwPS:BU%CI"e ns>ZyPj! ?ƿ|~/Mޔl [,0:4YIJ?# b&[_;+y~/5}~D IJ  &::ɓ֙{2\%ŵPB &0tjkV\sϯg[ڢUmcUv2CS5l UHB&24)[y_k;-ycZt 431MԆJ۶|a;.} iڃpd{~xkzVg9o%d%,Z87 ֛"` R? }44┕[$==Ouf6Ic'i!eaҢ{ߴa5ńUB~BslG.9 &P}}7k3 k}eo&PRHH96%GÏڿ#? `Oq>ގd !Un)%}ZR( Gn^k+v"_fϦL.M+97̔ ߕyJ@ z]#) U޵7x;J M5e{õRB )+lE=MkN, #,/uK\eI )%c7&'] )# U?%`k4˽ ټ~jq(nkRRD?R^\ H6־LF+ޘ@4%ndopUWtb vT v+ XJNJ ++tn;Ž/\%:\A&)*srwq[cPo_7~׳ E@Г3}*GA `JԯўYTH >(>lHp+*ؒbB{AndA | kɱYއ*3H4B V**b )k1e0u{UVH! VN\(r-"|бʊ{eo1) J_c9fWR&~~" Q;>˷ZZJH!q{&'Z-WIq)w;ѺgAfD3 L<<ߗFk;":uz7&z;=pczOCIUDcoLIдXjs>8z 9£-a(MIu s{?}P$yFň̩?:$rnxМ1&@+"%@R'D &ڊYaL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L (٢Eҙ=@JӉ *aL uN18-ncO&;1&ZV>øΝRkSB˔I!_ҵ\V3HLmc|L4Y8y^75B?.| )%^vtxO 34.w=Vpo!J%ԕS{u6?Iu|46)gUq\/&}|ce6$jB؟ +)mW*5ެ(yST^Ğ ӡO\%;vSY'l "9#;npsPrp_'*J$'cJJPRR2fR$ml精6I*o0D&/^OY漏G$y q}y";.sxZx*5ˡHR$q/wo,O#f߯(\IiYj2oм ?Aa+&p/N*:vxN:DU.?s֚_>{s|pe; saRnԄEerĐne0 CMihh?N.EVEVi IʿkB=Ղ-nIQ^/;]b%Nצ_ؠx/Ȝ-Sd?` cpw,Vs9oج#]{}hкQ2w#~XTz#4V`h O+L3,6eFvΥGR9PtODd hq%mRup/'fV:Ky.ǴO!3|5 ª&L+襼Z.t3ח ӧ{?MFRO~ݔ^chSg M7!\fN ,3$=|E nGs>2śC{%nFƎ̜!UtM~;oL%Т=/»ěR%odR\*p5MfSRo+928\:0d\ȟ6ڂK|J&CM=o座ZKzYpK[- 6\嬀r&s.&!Sr霴J񷐻]t]}NBѹ_q?ʭ{<1W9"Kgg:ݐN C,b A1Rѳ;77/eYʕ_U iRn5&LnsTNqΛx}=( VIଇ b\\`]1ڂn$%XCy]^6͡q諊,J98ϯr noaS#K-Z[Pg!4C:yj.΋m90pk o8<E#X*n :.mb珸737{yU>xV:>{ѕDkuqO@AˎT+ќwێMo[PEa5mu=S_ ab(ynyG?="׼| P[jSVJxQtQR YS8fo\Kr'58%65il5KoԴG5]~ꦡI=ؼ%qVҘyY@؍[5@Ct=&X 4!`ʮ6 +`F3nޫƜj]{~UKb|g.8Qp;X9x!,.({@Q߮ecuCdQ*+p,5YܗPڭ8)i̇x^5tޔ@AIDVk9AYUzfJ7:q)8V_~vU whAHyg  ],3ia ͞ƈ+e*0"֑M /UfE>P~ّ""[#[IwI *=S*)ɺ²/QF{4V7t&r9_?47Q<3[ oU&PY3'fjR F)bheChZ.JMz-Qzn9x"%wG7Le?#rֹ1|qGsXeem楧jJΨHl5)Q=+-1G:/\tjƤ.5⯘.X}Ri4/l7ߗ&k Hū=՜ىjK7i蜜D696Zy6ܕ`@()Ơ l߇f*}9J7Cz?|3Gi1X5k:|aFubᆴ>`LhJ]nv9m9<*YbQ_웬D!Mh?de%E 0&pHz P8 (~;BAџF/gQ QLE'@h3Աr"SڧNs0L7&p8k[P+:uZ֥Q}/EE,)=5RJͿ&3VF7c`F-[R> J`&"$bJJRb\5/|5(:WA808 `}gDє%1KeL$bJ#K;ڶi@H&Ъ w⹋7M.XvV]x`SRi#i+aBT QV/p7DU1ig[P/ 5T҈+CfZm2*8.0)Zsȋ<3M=sdл5Z3WPBӂANM>Z^9V߻ބ8@uuՊv:sw0Oڐ#ͩ0Y&c*/_K~ʰ//+EԖVCє☬j.meȈ&2/;*Xff3<& M[f_&*kr ")˵ԇeOejm\v]w8|?ן&6SEgp;ܰLj53PXakʨseNvXtb0bn5t&ԣJ0 _2x&Bc2x/R+G܈ /97.Aq~pKB)8}:50BXw~EZap#2GDz\z^k=\9gI^As&F.J)(iotn;Pomu$,9@J蹂P~*(DBV딋_C֋3*q_ @y7!sDg8d!$_ᙻ#svP O#۲qrL_²5+' Jn~B WA ;"UJ,}k"] 6{Or4]> KGc <(Oͭg&.Xbmq4ָKU>$CkOʑ\%s{ɼ,-pI,<C-d-vhdCA].[JR?&l"w* +Ϋ%% +g7&-EIq 3hN'*6;pea6)7t'!ץ5je~N/Y8tU;SzV p*&;C T J]g\@IVeMTg7_} |Bo—N'?e -hsxgQW5PIs䔩'ٜv"*1nȘ8C*(6뻓bdhJq{vz j{}O~Ⱦ@,=stAyPHL0ۜx׬լ8&z-gAA4}WoԎ4_r4z|PiÎz`A"V" o!֓*'śUd]!~G 4 4,[֎C I=]T˜Vm/W9Y:bX6[C<l+ =!_IQ~Ѵi{fJ77JBIx_(ᄍzѕexW0ϳ-3!KQp4Jep+1HH!KL}ކ$VsT 'Qqnvu'8c(\ʬ Xý9Cps:}Pb @ESM;u /M@nQf rne9=:!çdvj~j>9sZ {.{}au#F&p 4''9 Y1FiVbu4iD =EK*[WEؖ>›f|m=_/9W3՜",*XHxo"7 @K GE QgE0k򧸯_F@)=3[93[C,JB_BUKJ_E˥r33\p{#~kԗHF',k"^ ~cG~b&$:m;$/k d8iCbRl]͸׮ʖof.(3A,U4TUD.$Z2`p 0Pey_hSv=Dxp!+]Gۄw #HT[8.X3&`@9@gØN 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`.,:Wt*s@z놅Rc 5)RCK?Jm7k&]H|(>Q(]||=xeb7 [nۭÜ6%&o}91&8 jG2[]Uܱg#v_kYtCDO5}8DQ:ۤ 5iM$2. .[t_!~/?HdE3hrkClohu.J$'e>BV𽥐JݴKMsaSTo&@rORk",tR:zʉf͋$I}c9/)פ4O+GHI M˱`ڔˌ(u~]T |6 Zi4\Hk*%X])#Z?e-*[=^, z6fL roZe+ߥjkKq+>3))8ElQvƘ|(3()78b#I F1%4[tkAіkz?J5gkS2}#YI`L Z\I=nm%hP~fCyiUs\Jk f<ß@ىyYc_DMk'<čBȊ,ݒ3|6- gzYN=;,>mKT5o0^s]yW)\ל+yСv?)(/z5m5}BL 0֒4iu/VhRg~pY}^ˁԴ'Qݕ7~`#3|=@'kbꔜa&N,L0SQ;%z/N*dih)PR8]PP^@&GuegO~V&TBn 5ߜqn#BIR[6:ݐN믊/+\49cĞ&:Iߑyp{QE?rЮŶwCGH4((QRI:$LC}+qghNraEh(c:/ _GjrhAQYXQfN/bыʏt. ݪi7 KG^Bׂ7&j Nlβk>}TGj o;uLxPrlf d<ʗ,+ Ïͱ Pň6[fLsu@Z G,DBi|+UN3܍ YAM Vpǽ, sWqandv!2Dy A56/ۻM,[F娱HנIb3ASKT/q!D|md[rf{z 4Fbイ_0]n %ԱH7 £iɩTR|6XA8CT)M&?*J GF9~\ϡq"σryk]`7IAn kfESζ"d /gߢkmRaMfe(5P [";ܟJJu0#U;'c[(F8y}m9K)L•{&h~-ҹ0>ZCt؉3uuI2忈%t||β$YJRXנY%6(5vLJaU( QE899s*֡"4Xabh"G)mUpvX.)bk6%&pyǿY͚4YZo4eB3y4QZW9u-F- X2b˭B%G0l4K̜ۥ,E?U7]G۲BŸSOve{DyBL 0&|`58N﨧Di@! o,6|߃)<ԗE̬RQ \z@92􇸤)V~ѻ_PHv ܖ#9A5+ol|؀4GD&MϏnFs%,-UG օr>VwَL(;y-Hx 4,0ݧ(SwViϡ˙TXÓÎ~Ê2ÓC1#/JWI8|94p\ã6Wn14<3&@@= èԁ -IDATWyhZ4L_Xxc7LO)S&l)IfbLL5ӄΓaL 0#@ ))+Ú13ڤx`oL 0&Ne{mP {hSfL 0&Z@()bCQO#e`L 0&A-tht 8tSҘ`L 0IE<Q).\`L -JZ';kjw=ISL 0&@h1%YBe_HezTP&`Lh1%JSiNV'JlFa$' 'rX6Fj4a5ĝV)2ƩhHQ{9P\_و1͓>fL 0OEI pϸCl? _eK b`>ߗ9.8_irrJ=}'V+PifW F^`qjfPQyM覊0kpbba G)Sl !o 6\lx[7DR6\WkԻ`p:|`@ZRsgI;`QhJX-oHbT> hszxU4J 7/9ӗnGŻ&8**+ݍ-e^J/8HHCJ|_ƅ`CUXD9QLהx.mI=02׳JƷC=^"3=0CkHu/r2tw)*($T"j^,P^jСD {>̿J#'|(ߐu(b27Xwƭ-u$* 0&ZWR̺| lR%]?eVDz衋}oTS\k(/JEIGD+ݺa"{$,,^c[X.xʝZJߡ(6yft e*ZpYq`͡/DS-D_+ QJ:ݪm/xM>51,8-۶d$cb (חo 9=w|y -&&7w⦥E-)復Pt\YgL 0M()dޜɓ;I- 5uA$WTJSvprCzЏTMȰƟ6 RpϚ TЅ7M)M4]댖&S:CļU LI?T#J坫)= 94$9~f}wR]\ՈLGgLuYdGcgL 0MArseޘ3w֕=Qp~ Zp{B mt&NytvAf_aCabTjN=Of$ȗ[Ԇ^$MSՋ=qŨ, OlɺVNmYO̾ U9-qrtF :!p£gW ̃:1>-1ו5G%A!fF-Vg嵸V,<2 ʆf?Yof`G|mPffկ3М l|W:BJM(:XjHss~Ea#Y=F$F:Kt"{0)eL9-W@ hI5XfAx%7;Z9 y6`}Z&U,73;9"z[_wnrF{w~8yo?IF{YjtUQw|มe8P)B?aYRP詰/7&8"VKJC2ףn l[QaJ7X4W:q{Nkm[ گφɔ<_hJWVJo&2txEG3@(G&$iOL'ȗ1ʀ';㌠CRl+^օ>!X8Wwh:#,''Xmke5ѵo W6KlaBaW܄QZ\ټgL 0OQR r䤮CFȸJ<~6a ?qׄ ]88#:S]|?*=Ms32vBM"n 9Fsݨs&g&У'#%T~]*:a~ Aէ`ڇ蜜N]FU" @KahseMN'gNC۝_g-5H_@9XllYAak F0KUT1&@U4͘/c>kIrpz},fM ֆh!_Ğ8 Dә}HPetli?Xڣso)*)OE徫muBsWnbEK*[F0ڦMa_=36R`/G~%6t39iС6)&|XGh<<\ݐ UB`8r4@gVu úN`L 0h.̲yc`Oxqa]Y 7*'縰kqioSpGG*?\8.X3&` Produced by OmniGraffle 6.6.1 2016-10-06 18:00:33 +0000Canvas 1Layer 1Linux Bridge - Self-service NetworksNetwork Traffic Flow - East/West Scenario 2Compute NodeInstanceLinux Bridgebrq(1)(3)(2)(4)Network NodeLinux BridgebrqLinux BridgebrqRouter Namespaceqrouter(9)(8)(10)(6)(15)VNI 101VNI 102VNI 101VNI 102Self-service network 1VNI 101, 192.168.1.0/24Overlay network10.0.1.0/24InstanceLinux Bridgebrq(20)(18)(19)(17)(5)(16)(7)(14)(12)(13)(11)Self-service network 2VNI 102, 192.168.2.0/24 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowns1.graffle0000644000175000017500000001151700000000000031067 0ustar00coreycorey00000000000000]mSȲ |y]L)n {ɑe~{$Ȓ 6Ćq*E3hy{zZ".M G/<Nѯſ^?_[GQ˼ovwnd66ݝ֑mll{8ϲˍ+?vra 6Ҥkz[ ~'뼀Ot~gכ,4Iwj 83g&}^m ?vAG4 gzY z'qx&~KȈW"Xh >U*jctх~lveSgil _){G>(fKFokߜC$B"$SyR_)-'$&kmEAn+q*UvNc_keב^NqLǷjܭq݁C1=9I.f,[A|,O8\y$_ěQxҭnІ&FSD)̱ð(|v;L+a:F/)yɸZl#IZ>2UJa׭v 2 /}>W5uV99'v_8 h Im$g1A'gZErJôfB<*/ĂbbLZTI|stM );2߲z?߶JGUIħtق8Nj'!Q+M$B\FE c/]`2wM ^vD@k䢦F"7q*'$tP6֧a4UtaXht^LQ@Zh.454;0-O{4HI; !X?L NSt|>Ҟɐ7dV?S!I7PÙ}1Yp" z~ Ngg癉O6`8z2!eޕuVDGK&c-5d>dPJR \ Ul{+(NAw&4萳Š9q&\Tέ0mG ORDR\,(rDaT`#xX\2,OԜʭ$Mzt)j4lrK4R=ew H.?&MgKJ8UbU{F`D}c 7[ B56~NS0NXaՂ?%njgV-8Øs4z ƴ2e`AS"&Bl8V f,352!97 | &`Ӭ'[M|ɹF*̽B  AqeVIg6`9Ka_4 k+Y4a((-(Lt3e6uNᳰ(SQjH糘jVx9D*B !>Hδ3xePnLrF.}”0 ;*^ya!gة % ! x7OBlfȨ.r*di'y$#!YϦծxXt) 8)CX\hesC˫ѡUim\˾q/g o޾)d(͋Pa,/Q^j7"b(v4~D_5G sqެ =4Q`7aN7QIrhb)>6Nc%9#aE@4MɑSHIKN 0@i `k!͹DH#qMķL`:Z1J0lǘp4 pPsS09 |Iɉ)0rsSJ")7 hvr8ds@2 d3[eYJJ "}qni. #@K.sO`\pR?ҧ~WP͵ @36KhecOp6}p|FM߾M(wm )JϹ0J54՜[/T.G Vg\luq=7l|#TWRVlG 2XLZ=)1c?o۸>8FHp #!AncFQ ;oUc=zzl~֛;+LLЙsUAቫUffvO1̧IdՈF^N> #%J ^XgE%|LӉ+LMծ9 OgzVvVS'0'N`K%p~'Z(/+66c&=Jv^FC l#$B̷xY3s(9TD;}0} )Fm2?N BSQ?*Uc˻P9z $@\B)a JަDq2+ 쬰e>R09bsmFo @ߥD Zr蘍DV˘VO#l8zVBYafZVD< 9nL8btےyHsNG̹rKtVCZpB߇ R&up92yd_(Io#Ebj[~2Ot[#ڠHϑS =RXܺ?| yDK_P)56#8`4 ۧqF!5S)gG{,\;d)f`q( yjF[43{xhw*..Dx %R(I |D'``JҞ-Vi!edBFIo2B9G |6_4+#93Sʱ=; AH . n( M #MI"#dԈs42>bs:d\Hɝ kAc#r(pLqigN?,·.)\qҗԆd [F+*?Ecɰ0ˠ;1p͸UčkqtOqP O(GE0dق~fRo/0n6?{=̊}Mdۃ}aʰ>[4w٠S+yr5>bvl5OT 6G@:]{%"ϓgY%N R_5q"8t#Υ%ΦXG|b!,UgS |I +Wam,sl-em91BQ3;BXsjt;3>ȀLv_c~6rTO$U}zO\g:t"3E0)/1~HϜ)i&\Q\EЛgρ*U-@]ǘGNwj}Te0)0a\x-M/0}LiRGk (wwۑ#AGg&A> =θ[85kB7.,4OYm=fzj[ͩYfy ]/*' +:t)B.!Ń#$W爛@2sB!CZFO ZTh(&ZA!^;t{Cqw9sqw|i3/s@ǭ[ed+ʛD9txN˂m0IyPP,-| /H5V?~ZAs4O]hl%~f!}^b T n3@1? &E(eU&m(bXrhty~*ܧGs'?݈3޺2zϤa i( NOög!ΰ{Jٹj8H7':MQɝ .M[>:mhdtJ&Uޱ<I [;5. E8ʏ>~1ݤŌ*6ZǰQeiěy=@ (\t=JZԋ]r\t|kBo.EI~{׋Yf\ a>7"LmUmi;ya/Vm RR>ʻ6> z_:IVu$vvg%Gd :Dq9:h g*?)l/Ӥ.lG{ =sS>$Nzt:FHxX%F?Ε7TA+;+jŽ$3_~v=1c} P̍[lzKoV ^Me Cɣ;h4JlR1dgx_ID0L`tOf3O}}ylNɧߣ|O}6[~tD~v-:pᱨrS`Ҥ |~K69&s잡j5\Hęd5O1+AoH*P;%iU V|ȩptI,o~l,LAMُ0ڌ³xVȂuXGaVG­ zJKQ֥Qz,E+[e!ب?8 ӛJ0u:oF"ɺޠ b+Ӎyx%H'/IlX5O|dLQ^eilL?Gc[t'nxV3VP>4g5u!_T^y\6B 9I.ڨ+>`aAb5?kқ3߻HAA././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowns1.png0000644000175000017500000031554000000000000030250 0ustar00coreycorey00000000000000PNG  IHDR)sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]|T֟&XŮ򔄢(-J6AЧX<6"DzԽ?7f$&l3;ڙ3g `F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`z31p_z@5绽0HyRMT5VJSZ0MiEmƄ ;Be7F ۼ[Ogz:L߁A#{[hOƭ ڔ̛2g;+^,9u c-9)){uwt)ҋJB쟑gf7+"a}ىPC=5SSB koh^W i> M NoJA¤4SP2ۢ=}rfXiSQO#dF@3)!ɑ#0q| ;R7 ̷ H)?iI9JRsq0;+#? } <4yݰsfp޳'sңp?%=d4nsYsNwp&j'ƯښAh6"V6no% n~/3/|⣝6"Z,U42"2ʫdA{7N>kN07 ȍ\œ9s]xŨYh%A!}V|Ҭ,'c_ea*Sc^i0Y}n}umgrN'MݻvKe&t{ ]Nqͼ=@&pzډmD{/N'5nCptQk>Wh:EJf*j[Г!M1F&ow崣}=k,tAgȶ>wqxM>}kVer&Rs=c*{`ivl/u3,هBцI }f=I=Ez+Ѷdww}mhV :'֖='M7nGạ ݜ~O䞆MA{0.֤rvFZ3,z=c%g1>RefHҟC^y>N.BG؎`{+mfULR/ht ]1*&Eg fh~0wK3ZxNa\Y&]kׂ:p1&a)ެak(Y,sdvU 3nevtc_j3$+}`0hS(dt}Q ;NHI &4oA9 9}o/B̘"0zo4:LPU)фq <>$\vT`v~`W 4K&oj3GãpA<c JuDWf :K1g68 |EҾM)֣ ґҶfK݈w)üSh{s%,u Mϼ "ߵHB0߁# &` ]o} e e.:nMfPF(U<?A]/M {0ϵ̠ƴ>V_]_KD h4R{!KRGkZDRE+Lᾩ_[ NyVj uPHQԤݻAuhT]۾2 @MsiGr"%ŗW$л Х3HbA`r_k)ͻ6¯e[>5ݗV6SVs{i3|xxP2;9;dӫ )~ԝ8A ǔo* oS<0_BS; QZyI]ķrdxoV7 K~nNf]v8ϸ$wb|˶Yz_:ՈJ$s`*xܙ'ً k\x@Waя:7~/1a!FaxNə.IE|p )L'*GھBZOaK72;?PS ?7i*9٨4YIiXptY0(09Էl?j[<z }Xlg%,L$U ]2##+;H KP9E IX\nKqk#Z(Ei'BѭQݔQ13}idڂi[+dAm2 ғpg)5ě~~Eېm.] qMAL{[$ % q591~ V7@4|2Kӟ&o`6Rwtl R_szɠPkM 0f5E#?0/@f4I$g,mc=[.͠?mcA`7gmUA!7:-ǠԔJۖKh1o.&Ti7]<1qzBah%m/车e4Bȝuy~57GBv,&;{,g~Ϡ4N$aȎ;eȏkk8Tjq'BSS'iףs cdz;I`Nm~-E&+Ry7lo@dR\UnaW%#;:[4ہ &K(KޚkyCbs1g;0ĥ$ :[w O9$o>8aXJMIdUnо%!Q[m(&jϴwa2?/9?*e^Yۂ. KbBrԴzqfdxpMS꾟nOp)8PVٔC%1+TnQof{06P߃~W)+Ȋ94 [z㞥-';j ¸ea'SS-N=𲬕gXk{.EWMۂM|{:`{EpVqu|M=n+ʿ&nP@-Ɯ&ĚVt&0̀.+s&YL3=Z+%1qj(7OoAوq*3$sbrã4WR`Hwӓ MP큨`h-Ș^A^iNi N7vd_Tbo5{XPJlM.΅w8IV_t+#PKƾșicy&" (0Akaa \V<{Hog 8pN{$׶];4چ<찘Û` l$1nR#O[q*{}i.r7lHI!he_W_m~(sLp\k"y~m8@J)>-?bj/ܲq;nEBU qbDbMim=GHDOɒC/> Uvl&&x{0$5-SD+p$(7(6~9Z<;8mMv^qHMmҶogI<sCahmuk1vz Chm- ŐTTV.q5n/ayw|2ۿK)cHWscӐ1*k!I0T]*K 7:S54H\}U;pC35S)>V9t+R3LRӱ;&C]Md(^}F.kWSR N+w(3HofFZHHҎsNv<5c>vge#pur ^mugWG?GAO:zq#=1YdRl4WOxuT=I}2:&WYjfNc,@':%/HӮ)1/q)Y~S1#˳?/~N;at8µC1Y#@СUUY~+7Ar*F< PǯJcfR©\|e3PtPCz}]bvV+蒪8LqbV=w+bDw?XԜQ~B}NG>@|҉D< 8NJ& W.;˻[0QkIUn +f{ YH&J+!I)#ho\I;&ӫiRm:2:, ^|kALN6r3Z[BJCzHƁt<0'f/8 yp6@&MX֟[C_ew DKS@b@=8L159= UTѻ>!LJeCI3 I@9>? TE{ɏ#&0^ >!ݺ48Xe?Ξ}黤zXGDc \fBג/97 *J Z1CY%%q)V9"{.T`e{vپ1`>Ѳeqɓۖ EdʎS[ObK7MMʵĵ.h{*X>Gt&c.6sGL['S$Cb`zA^EP IhiRCY Z[=vFJ| :'$-S~⎼lm?ٗyNO( R)Fq4E47_c0T13Gz?Egb}/Xb˜fh=p'ܣpjɋ'f򌊛N܊{z/=8/EWk;+չ#E'} T:ۺV}J\rτD }r3\١q.Jb+;#u>N-] s$s |;#i<;/ϳSyaH)q8Cot܎>> 40DoW &N%gM]kILX^ٶV)4bI$NT>0=əEA_ Npo>h=(XvV6t%NB䇛/zʵE3kM7pfnAn¥@NRDuWpXH۷U[{.ÕмɟlYYmpq yN 0Ib^3drN8fSckeLJK$ﱚ?.oܺP%!^`؟XֺF(؍`F 7=STb#_ojGE Cǖ#lF`RIPk/CWO >)C[gEc|xßUkpŒ#0wUV^/0b_-5Pr% x$ﶾn\2#"̤"t t}>N(|!Xᴓͽj#$-櫮ߡr0^#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@=)LRjd=ҍe8܋<7%xSq'm\LnzZ'픯* Iߌ< ]j/K|f3 5%UX+WTUh13#C/#P=Ջ;qA`K;椴>9NC~>=N|J+.Izi?paEk?#* }ʎx\ww}O:zq[,0nFO#8S :zb,zkm_01XbW"nFqc:9?R"}[̌9UWShϦ@˦L!&%?A9V>'_Ҥ ۟>7Zis: zϤpyȘ1y[4ݾ)'ʮFefv`r2~M3^''7 >H֍V琪gӊ,i~j4b{}V; hTWwPeQg^ses)eTsEfa]ށN4`u>{fM ;L./=A}^PEŀ!r$H$3N;x`PnDnAvsZ1>7]s5F)=u}9ƪ( wKz*@\?h|8;#eG W~HuOAnCҳΔ!Ex۟A!5(6Lo$dtU{sڰuӘg~%L!ӐpKo{3Ra6-炮~9tt>L[~üSiׄdSQ<r2gq!k֦0v[ټyPM[o^+ %lu0(v=#;B7s $1';˥9 j;9vzЏ cStrwIn/И1bh#mesSǍcMNmRAnw;CW6aƄ e!j7e4<}g33|TW\[Fs7eC gVVl lڍЗx}>Li~@ kHrtڒpФWR=Ot]X.C܈tBׁVeƤp}rrQ*cX*[xaJzeK,G `PB+4Aao8XKj XIe?%WϚ|0$O}#Gd cmJ2{N1n<#0(O"60/@9/ f}tJw2)7?w_NRM+@͠ML>oͻω`^jZ5%7vNhQeȢ>idI@6z΢ᅤh;GAEOiuA`βB=#*0;y*#ݗyx/rX0-p'⿅ e&Z;4e(($g)}>3 ,pCOﮝ{˅UǕ2(bk{vg~xkREeS`u{~O~) o̖S>`VFC3; SwǽO'%$ah6۞wpuozS*qy3{gJW0x]^Zi0)\`@O`v'VwBB _!ppp1Gwzw Ƌ}fV}oXS u8NX`G_8`wg֊9e*⿍UڕLj4z4yAuCPo2ށB0hWe=vNTT Ox]=Ҋ̫Xady3#,?0q]M[qk\|c8“-0B쑖'8~#tsᖫhaOpIw~vAvbb6~ w!uoBm:%iƜ$N?}N]l;p! [1΃@( jct i)(BIl?_ &N ol,0qH}Vbsw&4Y hq)>EG 6$(3tM4`\ h"CaVMygbu5Iߙfv% J%܄UtPqSf +T4vRv% uAB&R¼[5vVQf;`}X^h`U3#=GL9; q: D dCzpYOZo/8pF*C1(f=!LWH#s桚h4^amdtG+}/( S-pW69Ugsg;#@8 qL):mI= :aOt&X5@@Lfl r>1M]4% q?7~ &\ | h.#+R/mi˕iC{!J+Qp5}| \`z_0xL&;Ut/q)[6Q{ p 1h17M|_l*qlg(<+ Jӆ㞼A(ִr3&AD##ir x=Lϲx&<cpS.@T˻އb?c&q[BVF*MK7 z1o>Aei:/4mKMNtA^iV^aɦ_=r`:oAX,r"& wb k?11R0ӟV'H (Um0aSny7:K!Q]ʴ)TI K/i80'va;є( B˧b6N׊msqE(ÚPGuh-4QY|X09_|/׼|%T u^p+KcL ((G@*vNNh@ }hqէc6:(OnݽXBcԓ)n%B3~<((:c(N.H'|眣z}eەi>1S>+~$:5mDɾkU\}p泭'm-.h &V5ق{Cp4R53ͅ4)0GCr`;͐-&RTau h2_8yu lܞ84Q]>c]FYY;LUY?,^y?y |ou{yw57W d2SOoGR)ܟ @2Jy sSK"(* BaNGHʅE #:N3kp1錓mٌ+2'mޤW+<*o' 隞1T1 'moӒ}=zO-ZDՠ#Wyiط!#`%)Ti8:3N7 -Au̗MUClv.}Ys0b`wgD+;K&wPz%fe0w+R/]/`tpɫEӒK] =Q3h\1l5"#ޏ(02.KFW4RrJ D.kh\~;(ڀB~$i9?zXݺV#)SeV*g MS\.f?D> u<}bg~&  !")m{8ګQ ↟ttܖhMg\On=dL0ga<:N3۟6+Z@8_*eǩ}Z6$?S ƥ525Ո(\o@m܆DXM{o M[KCiDVVk(ř#O BYWI&{]%&-̮gg]Eiv*Sm*+ƥkIM1pqE֘﹄@]B{]PrtoSgF`GLJS0)0gWJhBuw{a)qh"P+LJ7\9 MJ9WD&&#0#MM|o^^?f>VxDIINRxDJґٙ/#C1#0@J S'ARdxuW4Y Ǥvd^9-F`Fy?(*x]]ER}$F?Zl` #0#P"SbRSۅSwE#+RE:hGx J4P4F`bkNZ7t΋< QaR$ _J~G 0#Ш 4Y'WU\a¤1cpsYIU$0#PGsuFfũ"Pc&.j+E¨2#0 0)Мg} 1R3)t,]FH F`FV[k%Nt5fRTwI/j*F`&\90K[rהҠaF`<y>6@܋B orEq#0=CuP29 F |{23ۏH86|h!ąP"8sF9j,I1A{} & 7B%߆[" MΙ6!)HL1.vIiL_~QsK'y _sDFh$AVk+)SysR(IM.r{J iGSrgCC%W7Km4~864FY{r]i?w7 SzU F5#P/`&^VD \?34MKy: V>$=Li :/ "n|#'čԺp"/R2x6)v~M*)kN '8A\ר ndo!̋񲁥Ne!i61ԛ1T$, >5'{a7c04|܂trwIn/TԤT2H]PQX=ٗ-|:=q՜ԇٝJCZ LAcI)?㓨\RH#0힆YT:A YpQ C7Je$[xaJzAiȃgJMs0,sRӔd)toeiHF u^=Kb$3Jhb.:  v( +̎} x# aHr5 Mj~0QӬw\!KC~(Gpu)dv2?i>͉$2_f0i#[ӻm|LXe/K fA>ڦs#{rv~]`p"Q7ϒF!WDXañk?Q!=;VB:}zMӜ$0KTsP}#wy3>,Rb)Ű'}QȻxբͻ+a\7JbUP9&fmIYEÍ~aY_4LGaw)_yØߝO4P-Df0۟~ejirb&Ugb :!zs7Cy&?P 70{쵼@c9mu{~O~)|v7)xP.0@C 6@=G@ IN))>E S> R 𡃝~ؾfP)*7~9'G,B߀Z.>^4Ӎy}H<.p^ui`FBJ3-DdN ָ0N m:ʟ )O@IDAT)B?u0(L*/%AE8u咮x%|6λN7M$. ܩ,!`,,IiUx c=*RP8'~Epxi˕iC{o?a`0vC@N3CrxIhYiOk=Ř vZ8:mpwT]~ lv!CI ֈbmkAC&5c{1գ3,F~#LJ?`"[ew9s!d< @B@) p~aK sv~RKH1"}KE&R"3L%$G*y4UI4̡0ߠGh e@q(82Erg*(ʾRރޤr0 fR\6iB}c(qǿy+J?2LQ](VܰF*<#)WL:T}QO*Mi+ ­͟K)dgMJ' Z"~ѥ|VAn`[V!W# ! x"7$kqR0ӟӝ#xoux%m(]=o?ڻr%MZEx! ̛0Y(zg& n;c =}1[.q:HtJ,*PBoD0뿸=Z^橀iL|k"Q.e%:){rCGAs N#w2 JC}N7Br3.ّ&:?o 34n%()ؒו( w<,\6WXzkfƄI'cnQqvKT".Δ;Jl]SMEHcS3ǹԋa{CnWވd1HwoofO&|ŚG`D+<J' 隞1})g{25u pXc\E%J/ `s~D05.sf+#4@X+1){Rڧ Pt(tS_h0LAB}o^5 는KWKM%csUvFt.k v(tiKp@ZK0qv/u[vEoeh`'FL.*pto6~bƥn֖ eӦN;wP +7 Fr$Qh|;(`[OINSvΞd@q2Hs/nϜwi xȒMUqZh#аطf9W9E uKf5h@;+*vy3YYljΘ0~3P E>LGBI&*J?62;yXs+ X;WzhV1i;JGj,nq$ڷ2yLu)1^F]p6e=D-pޛÉc'#mxދ6U]a 'KI< #4xޫjힺÚsbF``&%8(#0#|!Rx'qJ#D0@`&zq,F ,cD@aF00#0u3)u30#0"LJqpF`FI9F`FfR"3#0@ LJ̹0#0@0!``F`fRg΅`F`"D^6,}r4CyfWBJJqsgdL*Xo )5*(BviSoEzŤ|%r49Z7txeD4:Pa `ܝjßA׍i >c QhwݮQVkLrA"F gR7 .q]nφh&>w~6}{vV'SիLLw5^!,,H"WdeVRдxٺb:"0ַHd "=[/v֡>nuK3(?bHI*Q Iz~*>Tx{`\3{AK} :7e&EBly$(cRw^qXU='‹pxⳮ :1*T߱Ψ}D&F }_Jmh߇Q:Y&%9<ۥ}Kq%mpԶh޲#<(A!z.R\\]L[,"@}sTBBn cvEA VfwsOtz p4SGu)lO\@ #!;DTcGX:ޘGp{\EJ͚#G8&5k6)5/Xܧ꽔&D/?`T7ir~_qƂ@,2)M#ǝ`6OZb)m\JS-j/ζ\Q|NQ#@}H)[S4P? kLǝGjQM8ڵ?%k*%FKߴesbKLE &@ЃnEmuF͍pl$^inw7|bHͤ]MN뿁6H.V!`}RsuG Ψ"kL 숫c[ˠe$ꔤ %^"I :z 84Jrl jAɈeb=nMfORQl9KjtS4XUt\@#BinqcmX 3Ŋ*g Z0!KL 1%_ U%m[w=P'|u5*w%)j$ٰM |^ٸm¾/jKgĸZ@XKVOca>^XzcH?v>P_٨I! u$6q'0y﫟EV.Hϯ8X9 T&Kh}eROԌ/;|)ڴl*ZUqĉG Y.VHtjSkvA {EMGjIY|ǥ]G qb3;Ԫ_\'>7nvѵ}+U[q1 \t8 ?G: ޵@ڽmEUy 'ߋ_S7ℾ`.|yjvY'tdԚZF/}{ 9hcIJXU ,)MRe֗ib\:.%j4 O' n:_h(>_Y]7wZV|jfI~~W?Z`>;9S>.fIMbo~8[ʠ8}y_T,Wa[.MN%oc*/g;%be%m@{ԟŁ}eו` [[haQ=w{?j7ѺE3<&ߡm&ƢU$A[y@7{_bWxKV9:\\uWV/S"Wv37>](mEƙ`Z|¢aw^ؓ_` +;Su)^sOxG ɠPڞY&U0 9p!V:D bB]AnvW\2?޷" ,#6nۧSCij1 ŭj'E8Q_рҐ%IsL 'X'$cthX}S|b_&vDIy.1/dv} I!{H,.i~[ZmoLЯC⧥y' VS{TJWM>+jEm1]f+}b+96O%OR\bPt؝gv^ݏ}5HQi5F:"4t2+DmCR:\ST[,Č8 p2IybSAt[F /$_7;PA) u鞱ӷ"_jqvBz\t:I/8 #Z2N~Q}@CmI}ץ+䛐O,ryi)FCB A!ˠ 1GM;lEnNNJtBdǠxbܻҥ]2RR14Bϒ d\Fg;Q8K/ܓ+> ]o ˄3@0R!408L?6#x(txq3#0@!LJUq|,Ŵ0@@Q\JFZбU+>GbF&NJM㸌@#@`3>r9ͦ(?pp}B?,Y%]=W7{?;T"2@m!Bet{X_!ڧVH۟7+̊[|3vvڷG0#PI zhrL/||!.!C lqڱ}E,1v7d#P0Rs@=G !F&|ܺL/3olJ/!lє.f0@`&fqlF#+>X1nslH_1-fzwF8[8#ЈN.m̕go[(qhcwF`F‌@Cࡻ } <,,nΩC{hw|)e7&7FfR"#0)XZ\bF-$9F(CMb}0@u`&:qFv O;00@U0RB0#0fR)#0#T+!55i"}:|4-_D 8믺9Dމ{ caӾ52۾Z;#KC<I&4K?Vw]yGjJd|[HJZ$=;[ MӬA9|im`b\}Wع7_U}uRtl~a(( b5KMKomخwQGRzQ{ρKwɎF#7%a TkLG2~D&~̴DJ2)4@d(* Z*O7bLZ0+*"!ܪPb48b`E<|aJfHXP FdKż=TDL}"_܀_0nK-G"G]$!P'͘f];\mĮ&>a`A4!t7ݽ[lu/"7o(18ሃ-;3Z[4hjs]۬EtlYe2NFg|k,V b~X~5QG|ȸ2A YaShTLu+bُK.fby.wZJ7u~-O"/%-p)KUj6K[,?NJoIxsbXӬjVe. l= /Xq|q̒ņ-;]{".%#ڷ[6m#޹U'4[T$/[IZKM1cq|!]ۋӏ]&&b;6B"sxc2PU)g$e~)BGlXdsʀ4ƻqKq0t4r?]7^.G"'oaAԺuF͚I%\ߐHJU5m"GfT0PkPAx{55ݖS4&qJԕ!zn`uM;U%*{ʝɳ n>((&\ 9~*3r\p䰹B]jǍx"r =Z/S7Ob4Biyv߈$/ѣ'k:(X)DK/#/XN/c)"hLg&C)EyNH([D2Y;%HvmWFIEFcZ?cP>FVbx. #U$'h1Sy3u;m(XF;%5+ucKLNK!}hsDfZ@LxyVӠ ;] v4Fu 48ᤡeYxlTDZH75Te.{iR^c?4w}(玭HBz-N-QaNmڐOh_)kn Mpqr*m. h,9-3h+wjCkVh X&Af IՎhD$>'^JTVu,B6RF.JVP1/r9%Cf=C8na }RWy״iAQcƩ23E l&ZUK>RPM;Q1FMObI = ZTF&La)†⌊ 3tQ?UIJvS|˝}j1$R{lR1ä` Zn]ш,Bçnj#昢;fP{ )qdB&Ypɮ\x6QQ ~|,WbE HogK UF{P枫!pZ(LS<=@BƾK cP=kLC)@hx"P8CT|w⑦Yp@W8ԚN\ .Y۹izR3eZNٯ+ n%E^w4X&.fOo'X''&/&y=Qںȏ%f)6KizDOH%+W{x89兄4HҔf)Д:`IKJaq@ K[Ƨ@FVt(JMz/=uzd8XVR1_"|~tg-Ҡ{]75gGw.kjqdk}*YƸT\o)@2y~N{ ˤ Ը+p_44Lr]2 &nL[sP`7-OɗO`w%py[# Fv 1k,Ee[9<|jGKH 6M6 =v>(-KZn&8S&ᔡ9eX mb|~afhSZߗ~^ܩR^!$8d v2̍fXp|4_4Rz|5=zlnCZ &E $7擜pJQRa J[nb/8yaNIo>WYwlct}iKRiHgcF ?(@N7VR o4@i8w|Sl #];CI~F8 z+ti3hkрD"Jg w&}F<]Q,?&!;JYF+֗H9[t ╯$CGZhe"TqĐHL r+~]DJZ4&ź؎RQ!?"/OV/=_/K|jW(߅EiSK $6=z2K@bn @Ц' 4g 4^u %U:Igl1 Kv$-S3J5 'KeXBrPVDQwIL@YeBI]#̼* N!|8 }u~sW3ap9ͪ_mohN֎](ᤱ5pA3&u)WvD[E۔m6=+O=?axPoQyP5wXViW_:4縤_!."СӐg^ #$)5-X͐A!wgS%4h:g/Oƍ 1!A*^x1lݹg׿_1/ M}rR$~cS_ݭc'_CwʎF; B_Oj:}a1|W"&Rj `Z ) =;`"Rho7`f;teyCdmgW忹7fp<CĈ'\ƺ&.*(-M6Ĵ]'1.>XtjtS+Շ`%9]SuSʟ-aWCEzЂy 1 R )h<3lb$ v5hI5):=L .IVZ+dt-K/8 (Sc~A*;wEBK !-`Ƭ.qgF k\]P`KZq;K*IN _Ds+.48@ =@\;XN4^ ;YD)۸'ŝ_t1G\dS-2IR·sT>Li!IB%%̑(IKZpw*3hyP7Ȟ4QPB|b{';0CT%KpsuwC坊6kR$T#WovIpyh?\8TNC))vE@\SpWa$<];Ԡ;<7_*)vie}廑za H䆤)dO)'ϕ#6/"'dtjg5!hQ܁D;h59Z׻|̵T.#QIo]]9^ou^CΆfo2թʳiGASAD}$8wl]s|30ZA %<A zmD|xc"ɮڃ`h].ӟa֯7@C9`"l}c*7fnikLj_I$*h.xnD%[pykNuE#IJUebh~'@ZTW$[z,RO$(לu6?܏ .(G/oǬnZ\xR~'-} =qnZܖޢwQRv>4vJ̞N$D=GNA-hS G^cjTePaKA`ڵH?M[bhg3- 4_5|]j: ]ݐz~f*d@0(Fkchf8y!d!$%hy> :a!@TԔ'Dܜ-Ա3-LE^z 6?YT[~kg߅hb؂AiN3Z#:Usê. #T+JAb1JDSl +Yhg@} D?-Жk1lv%]m칐> m-Z fЖk*ۚ+wϝp ;)x/䶹G'Ȉ"ݥIh=C2):0&Y[QtxM4th!Yf9z"`?IRh[nsD-ĺKqV ,jbm0~Рo3 xgH|ƌ8<)Sv%Qq9gDtW}mLp8pZ_H}#HciyՄDqS~C-Q@Bi#I-ܩ Y 2(cӺ}QxݛG#d_~}Jz9QhLB0uht:,dy JRp[c9hk쥗tY^hFRwkeOXSW)TڻMHꡓm+t(IuY+:WB!^tA{dP'X!顳Y>tF&Wyۯ dG߷씫p&"h"ڎ>-<0q4tĜ輻'tT;nm Ng,'hWOD^:pҼցnmRJm4hGOS9[ĈL{1A@p! 2Wɯ-2hg ЧV,@$xdN1IJLĤDHWA>kvv sw*^^~Vp9=JB4nǹ_;/\Q>,fSE*ZR['Aײ-QSLD ԩ B1=[-)?@:`A}~mS ޮeYiPBu܂'![x< szt "ࢥ YH֏/o> e:Vď?r0t1HNC MNFm&$@RR$'%AR22)(I!3*ΞOBT-wjK^N5diβۖտHNcW"D Ѵu੔zR$5B ڶWB$;(͘v񐒬£+KH-G3N(ND( >F&c6P BuJR# qqjY4uОsbhrvd%A?Y%crDHwBEDĬFb8s=I L]F#i++-M@U[HhRN-ʟ2P[hu N_ G 8bٓ:({#fUiˮ܉mnUΨrY%*А#-xx:j/V4`q 1,FqM Q"HRl416$bD 2%LW^NͨSCR 1KDW0r۾f;6 ~)< LB uT㨣uǞ|Kܝ;;e쐀5H1 Y( Ss8[2ԆD n-a2!% =JCLx*{8C@X;rFtՉ?}o >=3IL$)$=:Z"Ex#b 6OyuWcDzAx4&*WJ8 $ٓK[4hڬ\~$|鑉.M򧌜 b%8rXGPzK(bT(޼oA)nSw'^$Q$f KR2 I/rƎHĘMc iKe%‰{l8HCL71(URBKgB-xvr&=Bq޼#z?13VC }Pș)eo{jөs'k|bjIm8e8)9pО=;_)܁[Qӫ2>nJQFV~ozr@=6JSN :NӉvUH`M7]jQ6DA3uҴCbPh7=LBnj@IDATCȒT}ڽ̈́ȓaNe4]xŨB;1'F,O~=˄j/W' aQƠ01B!ŎWͦ>ӲIb$ɃIRx-d>H*BuҧG$Ew3v@ir_V©\,/zWd`Ӫw/ 4bPXZVe :ǰAivcʍ,K{ʯ56.}R7NG9I\SS3Ǐ}Cstl"4)z QU߃DYzQWvD!]p(ɑЉ#CwB;9Q{P㑢&cQ=KgIХbw;ܑFpբ)U=4cTDĜT,P8T&8ԃEVdT8I!慘y“ų%hYXq˖s7&-fBeDW,:5egI'Xa2/݂w&i!F y ?Ch`1E&n=Fxh>Ώ_mrsb hbR^0OKyD#Ίr#%wfP[>xI7=oǭJQ:,yI@ѥHHOW`-Lq-P:aN@) 1!'Z"{(D- BRzgۜIa#G]6މgOU]J.]};ipm=h+$EvON9j의 &E:^ƨhQ,=g̻.[:t m|6;>/b hbR:@u7,G߮V3<: bg<< TC31(jNjʁ|뼱ڱM湩f5\=J=ux_" c `˚u tbd go znqkQ%V]H%9bN3kVwˆѻ?*b!eqI~'>5@ybȘ4‘ĄIN7=Ngm5eg1ԿvgbP|j2)f1&Ř#bTSZdFef[GO߹!fRc~igQ7RR}0! uVLx'4fNYc'18⾝84 %) cBwC6: ۹VLU /MLh$fc(G-Hց(gmTMi;g@ IV1Rk'bX#Usg_(m]nXl=PW힘jL[z!Z1a>(-vQt1 ,M}]c&,?r(w;q.zY5{8!a3)"äy}JOVO5Et}=)tC.7^vI;!ax.}~`}۶.>6݃/"pE>fAG@ؒܭ-geuJݿglčwVܰ>g݀i8 ;%bR h2$UCIuV,lw=;'ˬd $]|0g窵KJifQĔE Is49/d}˞!F|^XOT-V_7T-Z'i襾=cXU-iJ9{v1fǂ,-Czdm>m"-u(8UK-* ˘q;ge\40њ&J+/Z`\>;عh1'. g86[,vP9v 2t'ꬨ"&NƝܑRN=mөKK||#ŒB7M5pC۾~$ܝ۶ǐtYcD,nLJPn|F(g^VACB@A}ܴ8ZX%1*C 1+ĴE[b>F&٠b I CB1'Ĭ{ +h>"آ"GLUԍ Fv´pKϸ5彙D <MGsQQ RϜPP`&̚:.bPi!&EU)iMĂ1(ä(1BeA1/L::u{ '(h>"iW"' % EZ2Hno93^qWh6Ϙq52)0/eM ^ـEL)B cP蝾cR4ma["E0bFmNߙ rڢ#6vhCQE$aB185}iDh+u?=qEV~zF*&eZ"ƄB.|lЎ:*vmwĈCBvg$.xZqzP}]cH;4(bj ک8 \?D=? 0iͥ F^AnsPyTC6&f._1(4<_h?uR"fu`w{L'P}Wm xCuGۄ9R_d0{M5.,?U^#;*D3)}caw:Yѝ]Đ}k(2fe~{C)O}h(ʟ"ҌmʟpT=эK<:I6ϕ0z{ ЖXШ-T&J 9\bƅ":˓7(ye5oQ %TE #0O)۽ep;}3{⤕% 697QN^oahWK4 IkLJsLT}s8G@[ugM-s;:MN:W:c)BA "LJ(0G#(FFG#p.X^[|&1dE_Uez'p8ф?E`,FS^ -\PJ#pBڤxTLT¥/Y)I >#p #=h7cpᴚ'`<{AwU3X*-N+G#)W=f;,ȨQL2B5]A\D^@s&*lUG#8?\EW<G#)"mp 8G 5ko/\yLJSp8(BBr_"/̙p8Q$(H<ݫUI⊴U;gR"7O#p C/S%!~7mLx8G ƞT9Nx@*NSt;ڢw~8R7X9G 9 kzbs u%{FwHFdSp8(Cˬ#@P楈)}J% ߎ& ,Ax/gr%d/{eoB#o(A 6 >6?kgRp81@F鰴IxXhdJVgb&.ŤP^O꛵fG( =}NI;Lƀ8G 2ggg}—OjW(~Ns}G5kh7/%>ʛka IG G#ĈĤ*]fk!~߂WɽTC"?Tgœp81e dKAɊ@̊_i¢GΤh '#pb?:{tMyA052`nK3)u1O#p^Nu E*)ơ붧k#'vMvHT%g[}-3gRǔp81&%*ݮ=n.?2*E?o_ҧwtљJ[)EZAxeYVEZFxw8~<4G#4@І!^)K:H_v=J&.hhǣY犴hh%)cp8@W|,tsXtr&|ncejv"mUD$/di;h&O:#pD3E8q҉%xfJ\(Ep=([辗h6{]?"pP [1)\y#7tKA$2)0ђI!9G.@&%o7EG~[V2fVGKgmH+U'{b+Mgtxs;k[ ~&e|ƌFb#Z}s}s8@xR2:ܖ'IaaK#MK}GLɊ 1Rp.f*mm-zY|{i5gROz\\ہff $#p Ӳ.C{!d 32^ꚎsH%emrf B㊴cUOMg= b2iC9R;G#  C_dzjP?OZ}rה51(=lku~\誦i"fZA}"˚vɂ'p8hD,NȘvc{},,>.~x)&ኴh$d1h p%!8@4#ؿ#xWG3-ɞ ,D+Ui"ܻ&L QdQe~愲H9G  TX ideEx^F>H˿UF@&#Tq8j^e.s Y5/(=Mwɭ-x~?C l& yp8 }oXY (7_fn yG;]^>+>NwHF`a3)dI 8G rKⱜiǝ<܊DtSP类hTEgmnE^MY"#=h7cpin}ͤ`tmȒ,7VXp*j>MCcM5qbBېѻ %+#v>)=EI?nWEa3)t?RG#4ߧ'|<5?O18w <4dc~CQz\!q{L gk#4ߧd(ČJv]0|E;wifRjp1c3~BƌbfN+G @w i[rۧɊ7_9RpJ7WHw}O G t%LOO]g|\ZIY- lCI)3S33;C#t y-Jzgd-CkWt5swc3g6-+O:ye|[0v4aʃs:$-<;dO`Ή:]}];~ډ LF3FREc\v&䛊;"c s%)cCֆLPm^B-ϝu̗Z>Zɒ2wYqJ#)@H&p {̷5|<y7jDƾӝ+Q3)1_@@<\xiYDD}pΰפX\hq8Ȟ}g^NyO[KwVzSқ_ɞGC(<ɾˊr?HA/Z"`맹\H7D:W`t64*}\{d=;\ex g.g6<~cn(g5͛12e'LēeM{ϜgU3pj܍4?;wY 3~us}]y̿#wMTSZRڛ>`MϚ2\3A|EA9g"ӟtg9P)3A:> RnG52 /!WVSpN3ޝ/w@`t5{-}}ʜ)=ȩ%صS7%^.7%[G=` '.IiE\?A,Ù (ס2 ӲnÆ}b.0y^#ۍ~q  f)To7N|K̙K&#o$bref.0*k CLnm-5s!  ~5dh->&O} eN'dƠP2Vu YѥYÿ`~7πx/B!$JnY^13GVO@m_,vtO.2(eUUuKK_6g_wΤ\<;N>q"+|IM>&EYweMli q|'d7Y"o>+BFfj$ & ͺ (yjPz73c[o#&d>ͿW8HϨ};Y~'s~qwEb(}gG:q) ' Rw/\2\'2Mcg>}p<++38@6ʝ Qey}% \ZN8p3_l\}uLE8cF@.b$JT) .Bzg?uCzCJIjQ_ g7)n }aj! S͠1|ę_Z)n,ȐXv2&JgK[}/}# WRa$%GF )nApW=9+Mr9 =uȔwx%bFRBlqs`ҜKMKԕ".Ihd4g:d9>vhqf2PEiTmN@ @ >NUv(Km_g^Ƞ#DoH35moXR{&Cېy;eq򢪟";Dq4h/T\[\͞{%R/9m D ,lPøoL\ڷ3~YTF* &EP6#s?NPZc\R'<1>cf!.:aΛ{MOy^r(2}`AY EF67kڇޏ#HnoI\g&ԓ,Bty ؖkt:bSNru]g`0iqg);6# 6/w.I$pZox:zWo&ģz(KJsq$Zvy8AKQĻSopLPT}dNřGS8D_]Uw{bwļr,ko18^J \ŏɪ{p[F}`FꪆdyoXUegܪ@!Ov_o޸M`Ұ Y?qsQD\*%d'D3p9IIP}>eגbڬ&3S_z)5qIy{ fN3ܹ(SNv"q^(wfWzٶܳ,?(|ꨇFq ]8dRF}Ăݴ͘}{RivQm[i?7ʲ,A݌fK?g2> Kh@2o>iēuǠeKOr!Y<V(2%rc8'СfpzIaӳ\w\&rIJ q/ QpmUF?/kKh>ŏ\z%oGe_PDԝcJ2usrycDE ž2'b*M'^b|eʔRU cs0ί].qE|"8qX8p~9Gsu5iaIpN3{Q %Ζ@B3;q5 Ep12[V1\z* #=.#ɲ JNҏ-%Nv9 %0$V߄+_} t0 MU&,Ax\nx<=z_9 Z/G#pF@9iŽ%D@LNewIpGΤp8pPSn<4ܨ*oUx&ghor\rR\rNЛ W= :U  sTnCC/S|Js}3) p80DqS1;K堂hCo!2$o[w,܎"p; >]9 V:%E7.cƋuaNlWBIr_8Aif_cbAQ5Dݒwg<1#1RPuHfCu:Jܴg+E{͆n#G}88pF7{qp Ì6RzP9wOȟԄ4 L -ʉLe3ey͕C-0T5(P2̓w=:tq gs nM.8ioFK#(>3$-Sa{g߮T{JX-SˇOHA:.AV@>jNR#L8u7~B vXy"ϚRGM2wlW ˠE?ɒ|?,V~0M'ܷXS~ԍ iz~O$$G 5zthp jFQPPY 1**lf9]0FO]=׶7EknMPd7䬞-߆UtĬ7 14i3r<-JIz< -cqDIΒAH@*n3@QSH _^?)QP5,pؕFÚfeh]),V}t)SX﹎eԄOÊJH FdsW_ T!TT&h#-M H0b(40F4*r,3|Qh"ͤr׷NnEr ;"* ū^?.IAUnW%_VųW|(P<RsA9isP,3Q:>-9)/h]aƇg *+*m4G }@= )"+Nߵh(ti$B3eh74lD`CA(|Wffvk@!L EJ& YHYqgF& Jlr,ě=7uO] &hvDXXΠL{s/(F= x+%1jas8m&Z-kNWXdUQЭ-<%o߮ $Y ^^{]>UCz\{w]w/~&՛?1);opL aQu_o ZY4p KnX`|??p} 6lAwǼf|Wx͔i*ԁPhËbh0o'8w.[7=/ +R$܅?\QטIIDe)#{ӥu![Cxw ozҮzNЯKş~vgf.2&j(rpskGN<Ys .E\Ot.Qɉy)w"$3ƳX3~ftWh qfh"U> v2)wEbk4z$|wkٝW ) 7ow9. 0?K7oTIՃhkeF()_~ o.jV"BL47%qÝ ϛsWǖta]b`մ`Î}/[-|}6hFM1W78#D) d/ϸۂ M|(A$"w <|8@d僯WIZH|N 1nQuѿ%y.*)'\ hYelߛp߭[LOQh )]G􅕗7uң{Տ@Ne(]c˅&yPLDevo.VzhCla ;7b؋[`^~ {:[`G@aׁcj'`N+EL>7H5۱^zM;]{Ss)2N?nUcV#E/G %d?`=ձ MWϚo/i>A]fHOUťXਫ':FJv%N;RZX:Fɩ2&ā;35{+3A:%ddZx'juRF H7oߧr58 ^d)'Ej[hf "H')PfM'}ߒs;ܐh0F>(Z8aiD_/$[mPԪX:*yD~5JyꦿK~ݦ~& dFP1ӷ\'bR|]c_<]{i)@[1!)ޒ;_V=Eщ*hؒǐ\M\mI'0.Db0$4us9B~>N< fgp H(ҕrkEVDRIۈҍ%R̝+)4dL6n+%F[MWL̢F-dR|(oA?mBI aTB4AZ5m~Ǚ?&EBMā(YهM;zN:J&UZЦyuhT݁CJnjQD&;č<> %-p׸j[~mA4|S:2Zv\v_t5($N| =k$)F7v8ԵI@'o No.U[nmY#\7<ɳztXgUIuli}cwZ]ϕQ|b$.iT;/F&F—a9zf?-9:Qrdvۥ5YFqĆb_gaMHOa? $me:LJ8_fqDv0pNZdYyw\Bjt] `/R3+&StyO=UoM| w.VX$)Q NT u?;8Ր\MIaAMJϨĕ X`|Q{^6Mp͗Rw.\ie?[-ZAJQ:bCBӧ'k1UTęB ƄqϩR VV"J7t POku"GGT=~쐽784[󾂥XrcÉBUW ?ٺZKcm.$+fC=vF 3G:USRY#В$n\!k-鄓r;!ѕtz$^{ga[Mۨ(#AD˗]_#ŤWJtInb'uK2-A8ݦnl2kd^7I9_! u0{8>~ z-"@ tH;M؀|l뽻u]kt;rLkRv$<]ʑ$YY(mc~{ŰsMʺ )!ҠJ#s8XPӶ-;U&-9ՊhNo:ʧ6KA>߾K:HuUc`7]XyI'1!A!kbP7$g8*!U9)Ւ :W\a]i 6Ӽ"",$-K\y K'v9%Ā~ֺmи\Fz4Woӯx8ߒ&ӽZ؉BB3)!q>Ŭ*76T|t"Kb|q&%h8G "wI`]͗t"R!'™9up+2bq8Gtdk_pN]:sdωg?v#p&%҈8A @hy@A^9Ut9f* Kv_w_tMr54rMz}v-S\mc~d:Gsub}&W ꮚ/ 𬞥ht0gJ࣪yd'! K .HAQܗںתVZ~-mm]jV *"!} $d󿓛LV$ɽovs9gq@IDAT"/ 39* E&(3-aҋ>k;cnխnMR4C">6[U|!#ߟ^x񚭄pORl9 [}h ԅoM;@wA5&M3>7AAN6c![]b?w>!eXۓ#2~ANͮ z"0Τڗ"@գ/~@;Js[JeNM>PG!|Vr(ҏ?]$&۾CoGLhoߜ98m9q̴D颳Et`e"9bRڟ.2hkg8mW#!w 0u0ѡes.N/Ԁ5|` er"re쀠zIg0f4i!__A7mTOѫx~6 /ƱՕΛx=. 8vm<gK5Y]z;={At&8牯sO{6QAM¶2&1pd<ġtS|&go挴c B7ZJ; >YZC|mOY-2Q-!I&Y4'HجuV3U:](ȱ0&X`?o(btsΥYa1KN!ߌ_ {W-yj}AB~믃mgiZ@3^;fzLͣGb9Yœӂ5XP ,^XiAE@{2 ʅO hLF'rL;mȝuaq*R؃Jt,Zẻ`vr۩LnETV˟ùU|Pc0$+]{yTPVl,Q@ox Js2꿘D`?/ 9!A}r'YYaUQtg`/gyvInH36LW  9 9ޣ̟26ǖ|uW& yަ{Gy積}j5t{s oجת1Yyж 7^X`bAKMyTdEWÓ>U6oE8:kIK@D+X7~G; PIyЖcl[U=rٜ%LRQjr-Aj?\|rf?iaqtfפ#ZqZSƿ]}~]Ú?sڠ[:^~iuwv I IJO6z%·Pu?&|DDf&Ֆyvk\vOaMcw\iѪM)e& | FibzW?.K<'!|ܬ]$CIG pkdGn7,[{띬6Q@,1uhZ;K: lcȊ|z-؆}E|ͽ#?}gcwy;ٟǃP2:vC[B; N hd0=bҍ/ y|Ni4QtB;e`Ka૕"{2f/5'PE! HKNQr[т@M:ßw =-#xώbւi=?[GF}jk.|ߘwdj5tk1 UИ< L?{yVB ^0ܬ/U5c_ bR`ooDh: 9] +.@,`:Q-|uN*[CiNQB)(QL:U7,_4g1? Ԇϭ ocvV)(*yeSyRhdjloBu$- !|8KBQufSpYTiHPBρsoSm^~+ D 4$6s6R>\൦dgs:V bM7=ky^tdp׫ʋg}rGz 5U=(uEZZ3g[S+ G&ϟ0M/ڧ5Aqlߡ3,׵ݤS0TQO 9 Lg>7/bok~6|/gp6A.=u4Cb2}3CQER) #,|euJ+~Lomfxr8?rp?v<+noDkF $DF+3hcqzi%@KmoZXdAw<tQ;zހ?_Op%%&85%wc֢̠ͤy\x-o"s~ <߾1#t{F}ʖ{F7:vD5DQE " Vrn'}X}\x9HNJȤ3g)si2牲ZM=uB߬N4X|ɗ8'j5!#3nѪvCf"wXL>Z1 &n}w}>6}qW1CE6t5Ԙt fXz\:o}X[)%MQ05᥀߷,>aLnU}*K-J=.S3yfiڪ'F~coD}Th#7r78o9kMJTIżiuͤ{)[H4{:&i'sbV+,x^5zՂuFU|͝MKHJ>;aڪY4Wo#A6*>߸#1w]7W_i"M:3'۵RW>EtsY:MtP^]W<92%$5#?ayUզ+9Λ?cЪg>5@v"~Ȳ-~({3@ӝ#Ԕ H-W+9Z2&ж3̔ֆZ*P%K,I"̣W>ZL3's9NK3P[i|췏lz<Z)$>6EOwU=R{{f}PkL{p<7ǢikfH}Ow/Vl_Wq~;q"[+t6f<{ʓ>pf{~>xoMAt?"xTVsvEkh8;P 1VrW>XȉvMdp,父Bwi1_p!a`EYy3UsZGC*r~` 5q:en; F' 9f/^O Nmd WF.[Tԅמּ &j7ۏ~&(eIWLP$椸l[|xϷ0}4v͏f_7رY9K'HP֭5m/I;M~LP܃F=dl3zY1yQ3؇ny?[ߪ=B &p2 Ȭ.&$lDeL_v=4.f_wٺP퓰Q_xL$ Dp6B ]O9>Ar5A\bَU(?nw m}y, %KҶ?e=?7J)%76[&;)Ǯ   b9I' wp LNBsϫr@|o=ɿcT``Yo]8jRpQ|ʦFݡ~mzpcZp]zx?b 3aD!ۥSƳ}$Yfڸs?]>d:ix6]8ZeO_iFTY@{p6+ge][Gfj]wᙄYD .ޜBx6;NLCT软Pk}-uSƙ?Y'ډGŮބ:`^y6M<'> [< K%&'m3}ߋ\:O&zBn0|_].>r>t!i Qte;5:gZûL$D p܇61bS \&8IFLXÁ<ۭ"= *%Gz%%ҌNw@>_ ;0eMIvmLkPvZm1TQE/7r:]g j Rh#D7͘qi A1va!XN1LF^݈HϤuԤP涷bAE NgcBo]&@lv^&RݯbmȊ44uvz|i9g!Ơbck(Ǔ2A6~;k/i 3,;~0O^y|]cvrYW@6 rh7(w`vp.[OРeFk7/%c8s{$iM@FiQ :6Uh΢Bg ,3GS]9D]L Ym.W#(o6N֌4L3²iG9_}sy[? `#y(b1K'T=K'l \MS=$l7,[|ns` afz~~0`,v|L?s~옡YXA"3nxN#@pHPt#J4!˹ea|̅o߈SN&)p5IR0k/k2E2 ]ONKO gz2FO}jRൈ P4@ǒN`bML %('5$(sa -&2V PD"&?JI6ȇ%U"tmk7oDŴc[hLcQlfGhz~3vnQIq{~n ':Z{VL7j_@aU0|1C!h"OKNjC@kUǿAP\+i Cj*7ppX| xgXfSMdQ^l2O'ן9/0D9lJ(A@pqָ(%Jp!4{x< Mq4d GkM>|\:0egѾG #XIDh0:̚'I<UT7J*Lg+mAT v"ovvL]쁁;I@4Ї QsGVp4M˟Lm?=H&b+10RtWFAhެN[Ak1m#۔=M12݆8cLP? ZVJr5,@E[颗gӛ]T옆ACH1l 'Ai2=LXw˨ܣ/Mq}Ef^ّ&kM]{nb KgpT3iЎa^~S֦"m p'Yk^U*%E9MH(ZܮZ֏euLxTڼd8kK4)>5*9* @wPo!oC"J~܄.!( GdiΒ a^A *сAhpur DK<٦^,<ȎH?,H[pHߛy;:Y\8t MS# kbk،%˕#u0?/ :};\BϔDJD dةNS%hѦ{V.ygs:~eK"/dN^\ }ЊFץJw*-_}jC!yԢP8m,D[L% ZW@g4hᛢuml<~OΨx1u_/@UޠV5uuªe ,a,(cA<}) :Oxף02=B_ximMŚ2ip]٨ʁn|ocL*޷> 4IբTcM*tdE-4gQJu!fY+J:{jk_w9s? 4uv[r;FYSh24ʴY)解[ tN{ަzaHs(Pc62qt, Sش# lGJ&q:]!)u5ta&xvNg7w5NwXJTP|z'$ ^YӪۏ G ڛW_$C!gGT0Y[E2n}MYLK&Q[lk` i0bs;is$R9$-ߡ8OdF^ҿf_&]\&rswI~.zŌ ‘3X"&Z5WDz ;ȱ31 {+ExGW'O'<1e6ݽ*S3٧X<='T],'-Y콨zՆB' $ TkRxVOY|9sc>YY6mk&{*ZӵA ޟ[͚uFU~+Ώz./ai`1qF pKRT&t:i܃y@m{$XHqCBgŔ]&&5S7S&C(5hiq>U&&l٩ >Fuslnq;օ't;R# [\op=}D^݂γ=s^ݪ~ZQD!%,II @#%Q e_04W6D SuGI!` B!͉եCY8 5!]*pMրlk=QBR@Mke҃zt_?!q풰? ڪ]zΚc oԟզB *Ytlw¢BnhCs0 !m!bvT>hlKbڐ+t/ިv|&rtƝvQHxG<,Zt@4XaQQ@L`.nEPX"g0هq>KP9S&]L5Δyoi=2Sj=F\~˥W Qp:|e- ru/׷?ھ;VJ;_ բB|x"t[YPmuBY bq|ymI% /9ڭ5yKV3vyPϹ]}_~98&ft_d-&"x5ІؼvddY).($_EcZȕš s@f~LT[֕p=Or?f+1wElJ3`mȗMG9;HyyڏWPt21KRc=(J@OOsPvrc]3FZR-=>A׎ҫ.4;6^6nnaY@{;~ h޴i1̣5It? zs$WF=Bos D;k{)I}os?x6f&L{`ZAU2>wْ9 Yywl`ԆB 4q#^= ϵADlw|8Z$>ru9Lbr{P1 mIO5״J{ vsӈ Ok(.PNxLE e?Z+<_uYe(+. A{RIj 0czOuus%-:e8'* dL[exiA^ITd_BV%B%Ii>&8= ćR;,֏÷`/S1]oZ4庽4YM(0@sӆ|!2B^ IaT(}ڑCiM)sd笼ę4*g,/קJ%eUa!)0}`7s6i qqrܔHC)T ˨Ĕ!f u$P"fxSPd32,Y>Y!4뷛mz}^H<e@g^Bݯ*N4*x&1C ҃d`g^ ::a9W,$1ݼCoZeF, `1fIuU^[/=k !ˉ7TQD34sw`_G<9M>XSN:ҟkC nz/!@bqVWU8Z5 tkYxq]qqdr8S/uSH)o7' g'G4U3,KXpMm70) ).>N98çua(N-)"dws, L@"{(ϓ>țCĵ^qh(/W*g8= T츎]wxxVRޔY)m *R P&R.W,"&gԸr9{dP|>\+wپens1)A, ,B֩DUр")܅%B(Q/ָ8h3|<چ IAW[\0U@@X$;N9wzD&))k <q3K% l2a>}pR hK~'0? $3rګE.,-LGX"7k/(* 8ȱۊIhA5o9U2_=Xu q4UM><`sX1XxFA̝ (mǰV jQM<РsO} e~C;dq~xS>cx:R$1vB< d@kiQA|!)BR4( +J }P${.zDM3VhB|"K/FMtl'Tit /yϲg1`Hδ}M3 0À "ᜊVq1IqF"g!1l pZL#v`q8b!kJW;'~~/itN~ژ~A$(0lQVn1DY69Pc-'i6Uf ю\~FiƘ'ـWaŷ_>%,9Cp KBR@ȅvX:О\Ц\>B#$EgbYhɊʛhxdjQ)h/w:+Asۺ5O>7Dzݪ''i6sH֐4nkxBKc]a?nG}mݞ:5HƲjCTw/ٜ">+H%mq Yd{r[1@L |%SA3Eh_v;e LxJm > Ñ4U6āÆI&} W0AE@3U欮,ڻ}-VoFSE"b)T=b"f`g E!!Y) n})5)9K/۞MP-?-o&^TV%6g -L+x6TA@,!$7 ړcbp'&C#욄Ii!f NO>Rv <tP0 m B+d ^n|.@Pਊφt ,I )C&ʊsFTB@9 @ e_B ! crpRemt JC)`RD!ey!74)=$tvEE>H"B["$f r<)n _\]t*Q)C)6gD, `qg? JkP8gčg!'-AI~K, \ZS9=H")XEa*b@R "( E`^N* & Km_ DǏHf5 NC&%6!Ȃq!'ETHRVŏ[Y@DdQD" D <#pS g  /=΃3\d-ougH>*9hǾ E*TxNv=s4& %II KBBl9S!+ n@̓ɇ)<89f$0l2 ,\.!$4z/@{6M" AAINJA\ Ң }UB@!PtGbH2b-5n&+;: gqAHxpiZ`Ԣ xA(=aVS( @ ) *H "pLS$v mȆ&u?4#0@9&({X@PpP( @!)(߬Jo.p/ jkVE}QHB#.L9hD8ɂ5(QB@!Pdz IM/BS!'0xa٠Ͼ+NHҞm'@6p͂$v%8GiPڃF!P(bER qi /E@@8JZ9$ YɋIʨ B@!H.* 0GY=DٞШrR#nuB@!P(bKRM$dEy#E! e{Z+ B@!4=4EmB@!Pt jkZU( B8(raB@!P(ERwժB@!P(A@+ B@!5(5V B@!8 uX!P( A@]P( qP$8 B@!Pt t UB@!P(")HV( BkPaP<@ʪo3ھGdр>mH]P( @')H hʹ`U>W9 N& 2A'+|I oRb=e4M0Bd`K#B@!P=5ys2֚nASz+ @FNg.UVi|3v%4[rDW* B!#I ȉ @a19d[/+JJgҋ)X2ɑrMP ( @@G=^dOAI[8HQFT\|}5Q@SF *E>Xd -ul"t CC @wGǐIPcY$({r$d(>^HHcWD]&x)0Ho|U0?eYMW}qJerx %Z89唓~12"^Wrbb<Gw;=MzHNth6Yɺ.uA.7PZ^WT >c2") KդB E6Sy}P>Os7^N@IDATs,M41*0fkoZbrg~ɣӘ~fUL* bZrv/ظc nony/bC@"_0It;,pmc0h-B^ {`̋{'>)]wyTу JFu!x<󑙖a4?9RJ AS n@̓'( x=KeOLZa##4f{lˆAڃ7\h: 32Zc<B#M3<_Q{Nê= &)Z\޺Ԇ8(Z mu<,wԦ咛nO-8-Yʲm4Ʌ *6jV}9fF)=9)ۙz pQt;f?kCޥMh蝢߬9 xӔ_qRф`cA[#SRA [#`tp.%)!z͍kˎp-u $rD-\ݒ3{1 Π7ءYdZhW?^tմS+8ԧKv0|}>wyI1q]N75i@f* ۛ^Ol ÷;_|/d݇hh4p 7{bdI=5|7kh햽TS@?/Vڭ{d2ҔGҴF^?oH>gr4)xL bBK^ՒHs.ƶ[I,->%ɚJۮ>QCXwg L'SOa*A'?VV%6|4z𥉉TZQE-\Ci{Xړw41CD=ޜ-M}f '''ѰL!8՛Ѓ7Π.e}B3@X80ٲ)KL=_QO!aIi^ Uٓ+9Kw5i&j} =]%>*nd4U>A3qnfaQ]LcU4-^6Z;a Qގb{~B Ձ#%LLm{N6ڴZnRE!PERA31WlhvW$ԧJ``aa8lٺBщ YsnJbf f16r9wz&JiHV&} ,=Kԟ$P"*zSQ}tfԃTi K'FˁRΨ@Q""Ac:tT^&ZTx.hvle)2|`_atf6Py\n:bqMsNCMKCS<7zgG,%B@!,M5{z9p0bggú ¦^oϥ(E໼dVY_Z5?N5p̒ccҼeyc-),kNJd&53PȔGйltNs; ANX.hY\nEksr'3go~!m);6~B@!ZIit݅gc`ADY>^ZwgW+RN8yzr݆ɪ( "HJ3pOyӎ)+s(4j=bjh [!^Fd,( B 5)YtkW?ʶӂU B.6|>W*VW) B bJ 麕z%S)t[USEb18 4z4 T>Gy-FTr) @Y6jUlNI+6!NJ8\nyVԆB@!N,I%(쪁Xg&Q㢖@6+ߕ$/V֘`z,ھ+ؿ0qZ@Ѷ}b$H4z &0,.;@˧Nى̽א/^)LTҒQcsEkc%B@!Y퉑 L&d<\lD=LNgIJя抦9t=LV>ާG^tk&4WF b 9g0LlvNw2E$Cd˄=zE")\S* h~kpHjD3elPqL d92YM: $/jIrnd 9nJb7-Y3/J"`֙TL# &DMGod B,IA9S@N2Qs&&zөV e(dm- 4KҏGr~OW r}zh;yF&D*=`cTYi gM9O#RErUw CثP(;=܃fyIiUJ:\=Ք!4oKhP@PX)ꗖ(䄼=eکV#_\9@l{ ۸G5g*{Ak9lyu&)肉(-)o玧9߬܁*2q#B!$f fY8Y-v)%=BHQO;G>(0;A?H9Eg!'pg=" cdb,֠$$S\|BnL wqz)#o xVlzM>%M:Ir可h_nxbO jOR?: IPZj U @, $78 //6T=r_A&@)B~_;:D NY6J`b@rwӒ9N=ܖVMgWstN*05.aAL8C6i2],08RZ^}FiO,ϟdG]LRzK7O&oճ^7賻fpsZ!P(mA3I HXUENV3mL:S0Q j4,ANp He 5(mG[rD8ɂ`?H L{{urkOM_*%\p˷/Szd]i/xWY+okF_oyFνyz*.RK(͂J`:yrѨ!Y4O0[+*XYVVi#}lڟϟz}o1~&+Xs=?zukU)ERK` ruގt*鬂_NӓAAԢ&5#J{Dba4)h Z I &f] { ]ʛ^5sY2%';=WKzڋtJn}lovԞ?0k<΍{$;Mk>%.Ӿ>f-%rsOMS&AW=`U>Gs3r[/t*,~OXT%Pe]f㭏P@מͲd=?kMATC^J?AkIGd^|e#k4ARF{APx/p{LչEjU@`bV &xRw{x6F6#qC}jۮђ_63W뒵*pNd'V{˞Kٕ;yƲA<%믾ϧ^ɚ#>!N`~%3w]tٔ Y)j$;V_39Fbwu#+~KW~oO1%[hҬ%OW]JU~?O.zս}RY{ *SZG=@SO ?{1.gE{OeH_Ku:&Et Ly㿕D_ZH] ~cg R휽_QFաLcV<1`1͜@tȻ nH_h>"='=F>{}V5mo2f )l^k-zp;_T岳#AWDOގHZu'; H$Uؾjfuu_ ċ(dJK_:QyOxGla Al%(>^H V" Ov9K}|͝g6[A9w.}  ??O=>q߳"*| Y?wKd=4]]]#4im-iS~Kq*~f x23 ^G >kmI}̒tp>{룏G_޶ݞ;k~6w2guFeQUwiv璹=½ IjKM f#'OZJ>^C,׸rvyJp~}P`% / A0hAAr{_FU#c|G>5)!!0@T5ˢJm):_}^%WkU[ {ڏzo6q_tD ]xAٷs:<'7GWzf2:77^Z2#9-cGM TFtuhX3A>̈́%;t%>D@Y,pJWenB4 J#rH-X>F뎷_x<~ѪT^UTߏM}wu씳 Lt>@/2zy~'\MG99!Yr{ݯ7xo>Js~%d%.J:ܺ[8˷%ˍV'S\t4:KArΓ0=ŧ:~]|ٻKO Day*bŊ$O)I4Hl D@ŧ PCz9.r\`&7۝o~;3m(;ꛤK z2_`34c.0W?ZؤOz]ymDBi̘!'YMUsrKX5ݒp"ٺu"[M-Zf7Yo|Tî06O!܉1BL{ gQ̖!` |k)qiFoQRQ ;78y: 2qQ1'Y/$]|YOI%g[OqIK~$+Gc?/4Sc^Aв[n KK%=>^pꋤ􀐯 [=Vm_oɏ#7\՛6aw$TF4Q:-Ir rb߄~@W!Gqv9.sن_ 汣'-ⷍ3MsXwB̕ {2R3ckv7;r; }{tn =V]b5Ϻy\VAws1WL-3` Ѝַ?)a9$?ԞRᎢ蛷35)9$ծF#G _#o"G|OQ$!=dM )**ê{^={ jl„2xHKJs ۾uÖ'BBNDP詠c2oS/C=(@NalYփ|6(BֱuW̺rqH]aʬ(w0&j %hBͻ3Yv- ֚"]UaEۺ7+ﯺ VfZ_A Pyu ݨգ6Va[XT,_ u}'Zi暣a%il-,'4i}aܰ乳mIK]$)5E<!t5QBư`/PB›6jG;TA + 6iޗvͅ/ ² P8-7,,횟4vD> f<%H?RhLoiHvc'`ߗz"+pb6w p` v+V:M/"d-meh}Jc~d.$#ѓubqN DRȩ"Q(I چcAF0'|)!RXt~5V6m;p^o$k}ll1`Y?9F u- 9#pf^aq?_XXLf[E] St[LzS@g:>.9X1/}1KOt ۘN}[RHwyR~L%uW?UI Fz7d L)lI7(APcE͊-BCٹ=N3|W6eq)Mv4X$\A}R@4h&doQH -i$n$7؃?:RgBwM&Dj//XfEI*ӛoԝb zƑ[u9Щ.dǼnPeեӽ#%B&YP YR 2Ԃ*6ٝ[ў6ߪFub󌱑U+| ֈEp-vI%y:#ғ%֤{|!: W]@ECAT' `y~{B,=~aizOT{46<]~oﴧ! }STXиsT` )BwqRGJAAV:o4u`=EDhp?0Iѵ!=}j]tlm 1_;-yo&M0zkE y:JT݃qYP|"*@P+)]MsG`c9ìVj^ךP&,Y3C$-%u @v"@)@3`/ê1hyWl]=6=qlԽ;~a!Gn^c_wUs)~FC>"YLWvﵟa_7|:ĸ|Ϡo{Ow O#]q=?k;Q#Š;Y|[qSu;1XxK1jP'(<;`]]nk*Iowyܤok֋̊/B< I $XP7\?/k|M XdxJ`th՟u6K>vytv<'1Jy.ޤNr<ؔf$094o? 36YZ`ԟR5j+8)#ԧ5w6k}?9;  oa-x?jϏyO?}m̬b ZBc[ǂN#vŹϱv-cVo~]w$|5Bw_whϥ}dA -PO~]\=heMp<ف8'&?!S]ij;8hI0#j̔[[گw.LI>R_, yy,PJ3&xZy[_f:ugit|tL+ܻwcCeG|r|*UټXO4[g3LtTҋ5o nE#t2x@؜`kZ]܋zzOc0moYˊb-o~|)Zi{v7h{iBk|_i)V(?<>-3Jg'6Їg $EQj]Z_GU;;(d_ϚcHdj\KOjz6;Ѩ/O0U s;ݕ첾O>"[ Tֱiܓ`$VsSUgZ^s;4N,]JD F $:J»tYSO)s4$x[<YX3ʕYk |f8ONLhgoewm*?ӄH^O~ CYtWv7eJ$-YZzsO5jѽOӋ28wBq_guz`?ʎV> =>']n <[UDu`;\c\yADز;6֦Y_ŠRZ7Sa1NJloJXIقcmvtkv'(ܳwO5WT+*,\d%%t!Y^TjMi+Qe,3qڋB{8iw":y58!MIfs4/y˚*ruA^H2iX䠯2*$""\"6Uv7{&* mx& ,Q>fSO{5-~-؏]?7sYL"ݍo5b+-GhR7i9]OHg=^~1'56?Ƒ9~a8L%舟 fwP⳺Z3{5YPɏfrx2]!ijJQt̔ԔiMډS>Z<_SӔ{6tX_I"^H32_ =55ywHFt vNt'R!3tN/3*d1q,^I :yzy0;Ľݰ92YzM/>;_lźWYQq:{'I^:Ș Z_B/3|̸뜰˗|qj{d=ee3K| 4NskCXL!w,).Tاm)_;zXn3h,{]j+4|c]^6uN|:ԢR)\ 8)UBtQWH.߶ `jݠ\V }iS7aݬ$4&XTnRL3 >)-J h[nsEg?]MdsW 7w2~n ~TQeִmtp˹א?wrB(ܤ =! TV/dY XW',+@c*b 3+̡wYwO1!c+*,8Zl%POҍt,-2fO(%:ί7cMuF(bÐF|Mv?Zh+eRG#JZt "RfKط M[nEWoyC45n{)U)3ht6ЖqVTtfV"ٮY'IFUik&5 5Rkֵ8&쫣d'Hv]B}h?έd@ Fm?a񏪿ˉjƷ(evdkx:_mWL8tYD*<:DAs XYUWb}Ⴌ;^7^,;#bzn}%' Tr@!+68d,GZTp}ٷ+_CNhSVlpsΓ i{yom{rCc0[MX\21_/쩓>k[i!v<֦Mچ߼rn1 5g&,$$ЙtT'^ԛdXX0ˎmNX 'B8_:\sY]Ĉt4{IΊo/ӗ&E]Ye|R\mݑ6i_o}rI,4GUm/%wۭ߰[j*͎#*9KfZtӐ_=0mUSIRL( AGOtgwo˖oTdž->UIR{+vn KP' ݩzP)vhTwݽʋd J ϝ  1$)>'fgiba")aKfhN۔Ú7A-*dEN7ڷa3YC2#M2ñ g, #^՝Y\j5Mw$PuΞ(m/?mj>;!ˍΦm?cIdI$ [9n4ȊU+P8-._q> 0~`2ߍ4.Z-g ؿ=;>q4%E&]I؜Yx@IDATp-kIRuY;DFʅ'r BRh:zbtґoVLUUPofW[fMӡ>N^찠tlbbİ(G: pTU,[:vV{UX-zZZu:y'$Bnr髹 񒫰ܚTlJ[g ! -, 1#=I_[ P`DV{a0KGږ_21A7cI+KK:_swauVE+u9 ay!1, Mi!1&:SP$IqqP P\Xp$SE]VX:0ȊDG]zwn̜m )"~+,du)ڼ4}jK_EXXu%::EFE:  JջnAsBr)W#%%ue K AM ʺ}&ݎo`*?c#}fL_񐓬8o?/Ȃtݒ AareWǛ1V~AG9aVQX^tj ).8w ,a}*7ݖP]/IJC. j8m߶-ڟ;2}Vh.!Y +5 ggnfa¡+;@_)9S_@S;0㫆 h3w:4E4ibbQ+PиzcP# ])f U]r;R7Dm3ڿWֵ&ȯXY(,ب7m_o8sΝb@10{a硚{ MODmWKKmo [\XOm-IJp=!mdT qM<'Hq]Y߅ Ͱ>AD6($)AvJ jmX!ύb@Rܨ 5AQ` E,;eD gv-w0+ 7aXNXtxN@H M4C_jEZVo4bI,RX{TGA>GяW/5$S~2!KĄG STT`U;/.{,4<m?m?[Tl߶k[7I!B4#Q|p#PfY($)g;hp۱LBv}YG6uӪ`bhv9J(jËYslacR 3-:9t @!LXPbU(/K YP7يJzRbW%nz ]wҁ ;#)Ǡӷ:O5E' Y-±n[f8&BcX[ypuz!$_*Ȉ! iE24^$Iiݩhl_uK:uq =[C>כ:l"uA')Fg]\FRȱ PeE!K#ↆvO&""hn U{ph'Bhd9@78'@ ")TKAtӉ!1(Otu$$),2&\2Kb\he+᷿ ΍IbbvU#EjKKK R B$[RfDq"Xh(!'Y"' ܜa 1]o0NMl?8q:n"t{>A) r`|T}ELP?3Q!B, ?Q2H4T֔1FWc[rz?_ʾdnբB, d" 4SZJ_aXRnXR|Py ?YR<%DX KZP+w;YPڼ߹Õԩiu=LDZ,BlF2F*!Fפ x3IBm/R @@4 5/AQ|X~(QNԘHR r'Aq:׊/aBR_"d] bl8'<"]TJAz&/;YPh2s?*_%@{AAP^]~h*(SAVĹL:،@2%:T.^"RJob\]-˾X#t׫-ӽ޿wW޷{:ͣ" ԉ_Ab 'bmDGM{Qy‘Vy+.L"G7G刽g Ng9j=obJO<\RBm0E˔r7Pϧ7rd\+..t,拟}~޻_=44"~!OCȆ؈"DLhO]>(,( ( &=9ʔ% FIT+0l*OIO~dH;9$Y5 /""#" Rqb0$(wlZD}bXP4$({U"-8q}P(,k 퉐ME(̓X<7`^}t[Y@ `:9f3DoO879tgPrlA"P'0_6u\`8cԭh Fގ&yPp-ߔ>( "g D҄ z$BD@" "@Rʖ:KuL:ԓH$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$S R'^z*ɟuOLax҄)SʒH$FOaےDqv2t}+1%jV3 zm)b41]Gq[iN c#9aUH$AGRV ܖp%We ,br2@`}ܰ)+*LfFեi qV`8X5ٷ{YŽaW?;v?V>$@p#$EG׭mk:{DNC皶VW1ػ3L%| ]㗱 q[:- 1q}y+D( C4dMQ0xX1Ly)I|(ukNK,"[KY/H3Ǎ*H2|hV8i ER3 JRV7yMgYAN*LAT?kT[us7uFt *#H3S+\ˣ[f0ǭ)v$;_zE#{ <#55f2qI '1#u )vJuR}Hfs~Ҥ<4q#ӹs}%Ə?F+~&H#'ݖt :OLڵJ3Y/',:k8?0|FZמzb5g !,OE>lvζ%L s1-{g1JȍKĹ2#6 *NxۍxosF1iU@{k|gD_9C {fB#Q:Ƶ +7>pjpa ;qoY>~u&R~P R"*IJ5mIV^{ЭWW ]S fX^Cږ::t׃$!&X=|DSip. (+abːhgzB5!YLf~{|Bӧ8cfuS.Dgn7[Bf~7@g4AmYb![k{4qVQJ䵓G 71 CL{0~vhǭPlUUl Ll,3a_iOΕ#-#b#&r_)Ji+p$Azr4ڟt^[/W̢\B+t=x0J:S'%Aq"蕐^'ԌU;1w q{_0`z =:}GsO!K i[hz隸=r$.KMiPUYEwKʪ ,VmֱNÚAúݡo<̃>U@5 ]+ʵC2: Y % @mX+"tb6we"l6MFY`0ڪI7zX?[LLI)~Z% #@6jd+1/.1e pt8;tw ,T6fӬkLq}XW8I~+ZXwDR`(fZ֑ %&_VjEr[<,HɃ@>Pm~t=^8ra_ۧe/[15W<5b )72M{MWL­^gM[n_yhg78ZKz'),c?uzu!(Rxc u0Z_uׇ<tS6btMcs|p{ӖtdPx H ƙJv҇ i_,Tu4F U0($)ϫY (tq#O/ʌEY_;O [=Yք `V8jտʴ 1){ $b!@҄(G/_u׻"pPo`\5;,_az W;0a4^xx·'&s^:[WLx3,L QV~4D= ~֑TXX!zRi 0&WJVٱ E5gIOF{nRd /AJou)r<XW7rbRӔ"Y8i:Y(e^1qsQC.p=k,DŽKQg4W|S胸] IdУĹF)'"o =٨9$Wbgj/ [\^r~%U7'\j3:nbU{Z8?{gl<^ 17H2g }Cg k525%1t,A-u s@ZLINMCfSѐi ox넮GGs% ڝë ,I'APA+ & 2ej:,L]`Ƀ GdCǍw*,:@XTسXLX(4Pskiol8UFI/,b,4[cEwiKub'w 4Lݐ&ҡv¬#3 kE@DWCue' 5MIcZ>hW9ei,{}hY/'f0!ΚXGEQ%VuAe2@qiifŴzw]L^09/YlX #sNyo_u8Ss{,H'Љ3 KC":idMX\ŗ##$9ӓvl[ 6D;N1d\X#Jd~WcCENkZ`/d#͞ Go'D9JԡC:(dT;,L F@£xցS[n=sro]f2.V[v+}K?+L] Ao8ҐVsQuT!ZQI&VPğ0|ʮ´Ɯ:f%'̩Rp"-2MJo$H&B\f|nmoIٛ* 7/) >F ӆ V@$J3Yf>\L_,&&"y'H)ieXv}x4ߩSBFCVWƔ+ě}1xҠkz95V%3=QȓBa/ uxW `D҄ zv/c)  | MedN8L׫ 5{8D[^:-٘m.RqYN eMg}n/Bj*gi1Hĥvv] CgR_*b8S績4g9eoS!j~:փPW=7L ~nyu:XCKf61бPo ෢a̓wqj+—[1Bcn/Np̓c("Qӛ۠LvoiOحF^}Ι?S3(_bz? ^%vՎ~-f^4eh7y̘i+G' ^q'6 )-E*"Ҕ EC]tMģΗl}m|*a\Iub̶>w-[A'YdNc|)E{Hf /s B>V{H?<)2Mw̦8|~Bnr)u]/`;*D1!ySiZO9zpD/ج/tŐ,= L"E j{d{E\u{9dZ22>Xk=e%:bbYk)/su7SuOTxOqG@,~gFf K0ߙayv-}R2)`8Q7ѱ._b1 \ZI>cjCXi|=GԱT`3u =燚͏& тp`ID/^x૲Bcl,~]T”/߫0t 9!YX7Sd5# weW$n/LCF2܃:0@Aʗx݃2zbǐͰo+c8EQ(8󛡡Z`(&8վ^AzNA&dMǏ,bj枮c(_1LκOV`?ΓsOh7Pb}a =E0L1]G' œC0VuA!/){䐩Ht @%| Z5L lw}ǐ WDTDkDn?yao NƢ :y +!OG :S닗 ҒaxR|QT!OCB؈ bE% |tG#/2d@#P?$#3RM;\n8@ dֈ>UH3֞h (#`uywlu7C% ,Z"p!P/>)X`7CJԎ. (I;%8|t6{*7JR&SH  >B@Aʗx]3{˄8ex,:gs/#WJH@ i=`o~&b!)P",zUu08ˋ5oQ>`JD zPy }rzY)yZFyV+׍7mO❒ ])W" z#)MG=U̫t:¹CȊO܃>/D\0&*Ӎ>lH]3Y9 Da74yWQ)yCHn O;#'7V?pv /N,_c2q˺slA |9T]Zr6bhLNjVY(~@%c}7 qkzdcZɪn ÊrWuGRJh|?-^Z Osԁ߻t9{@K!ϸgYٓySh <:yKFNzE6jus+)C" @YRDMF?=:t;BeRAM[n)VuO HW2mi: 9"lnv3otLLv{-RU a{K 51xJ[HvMI 9P ^:ʊ {ep}ᜬ65y.z')=QuV}Ţ[7 Oe`Q83}JαLȋܼ Rʮ @U(Oe}$>5Q"Jka?#%]b2+r3#kUrh%W}ݚ9Ҽ֚@MXvcH $f'N#NGne-@TSM(&*fK` @׻` s@GvOCD5MX2$ 輺z"ov 録wcZKEΆ]TL9[g{9X" ꝤPh {h> ?MQ@CueB16M&4IH'H"]'ǽ2С1eh{!a)OݴZ(פ8S˼ui_h/K4c.\3_zyMr[ma;ރ*ȿK|ڰ>IvB?}Odz3`u(sc,+5>g/'2lu¼&wݪw2ic*l+1[,oUƶJk9x eKFRKmBjg2) Pv3cysK)o3 C]WQ4[Edy?Lr4lzJǭoEjyݏр2rm|>8'PgiGq?qH/I1į쳦F1pX&DkgzGq.U"\?P̊0dʎtjgףr;4p2|tgXMEA%aֱliEK$FAI ՍϝK3ӰժsfRԎq<,>HX!|ܛw8,u9RL9?lo*cw0HvL*c:eQMNsMmg w"3Շ T'0ݕEYlj[k®d4xNcڰ8krD$Rӊ,Z 2HLAh[V˨ݰvVK0jٴϮ i!Nz83b^ -a NMŹ+QW:'2H;6ykAV7\Bb4ןiG͎%hH')uV+sss1͍I˴> im᜷h%ђbod[f,% tX>f將fa]G`%%xw/~W 6n׫SO-4=߾yVPg-ݮ=(seJ!!n?44]-0rYW VKxbgMJT~\ט+3j7qmXt%<ǮƇ'p8Xי Rw9X" /AER[5e и}븤ԋ`X&-To@i5.zӖhX`R/do UQ&>k^3i)v^cnjl?V''-%ц8c,Q , I7Jch}0!q}/`+騛`8 Tmy! |i4c2|NQ*u]>\ \sPrGD (Kh͏GO$:jE1gʮ&Fĉ0?k4_nȄp{e9HP8Z SdI>wTwTq0!0 )'AP6tڒ8-y,Yy%MKYZm@ϐ{:V]pOWE^@Ci ()r|H"0+y!uL1P!:|D3cnzi_޽͊UJWU#u t&y 7(sKydD@" H$ 4 ׇthH=ʓե;yT]*D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$5ʿnEFstc: *N>ض?z(t ɏ+}=!P i֥,(˜cscK竖U.91'׋0fΗm|@ܠA<|,BQ` ?:roLp)vV6uikz%L+-),n`,E)B=V׸?p S6svqW;sܗoRGɱ(9,~qM"̢B}+HRU,k+pI imTq8mȕJsev+p,UWB/FNZz8kYt+l54 iH!_b f/K_ myϧ ~3r-avY/=_U:UW2UU  `rdjSgD3UVz~Hy"h;I4rYDpm_]pk|jl A2ShL &揙 -,|J5\ծdoF"͑Ͻf}s(\j-I4֌ r3mX-F*R4m{VEQTqek8<}HKh{op&FM6'ݖTrcsM 8+fYq{pn$p NXϹY՜%1!)E>t8mKz9s昖z2seFm,{$ٍ\gMQ5N&Lp-6_o\z@s|b,7tP;Yu8sܟ㞯׶=sd][l)}m`lvFhLXbaBmTn=0cIo:U/{+,;AVԳ{ؙ3uNOǮ9'@T v{5Y,!Wޏ3eYQ KgeV~nj{fB&8tm;2uCσLr(}>uL}Ғ)kzFCZDt@Žv?Vgqz!5M7k6*Jmmk"gT !uA^-)BzkS+՟I}G*_:Fg}[mCcKTx`w|,RMx0(g~X ,K%:ۅA- Yk93 4A<Ο[ 'es4N֊-n/%Mt1q e2q"3U6o2%LIKJ}(7Vg[k%[k= NDq5ok06S=7<#{wF[Sw֡{iud_7yt p.^ ~a}>k2+<7q))5>8TmHz8wӱ:eC)iʢw6~ pGH'YBB$rRpƕ$k`w|})fw!JWU; B|F @ -h:J̹1 qk:VU ދ`8Yicu8@Νɲ-i7: '$.@#@5 {vĄ |ǭxêZ/ebC7Cv2K,ENл8w ~1G$#^֡[P37X7|dz PZ@4_eS=]oފDk6RС;@/_y1q|Jv] ŹSEc4 1 $j$&b4b& 䁬΂ 0yP#$>_Ecz9wff]_W_}Ue T6$^|'[|7BA=ѶUqQ+FVH+;uSP.!M %X#P!_t]~nZjԽkг/KXM0P--mHy>'M 'xB܇`3ɧ< J{Eb Wk6m^olkC[HX*R;i`mC/)#iB%;R.mh(y» khAogdl/f{yԀҏG)JShJ^-f$Y!qF˚}`+`IJvCcErq9U(;]nCsG+†9ؚ [ZP]FZ*_CHB1P|[/R2>jRa \xSUjOG9"CIDAT/\a&O!fUL+8qT:|YBuء~w&?EmzyE[[|K~e>4VToL4H|l܍NB|oCa *)|y؆޴סoІmg^[qc掛{Rcsg{B Ty!dJHQ u=0vĊ)9ACP[WoaD.z zHy3 UzKshè;\ώīd3";=VPlz&^[sq2ϟܷetkSmiЦya@(9J0uc#-2DG~TLI Y PkSRP`Pr5Fשm/ccP9$qY,/(_Xuo:QЎU7 z v.2Ge9+v"z,Z.dPbjwο`Lނz4P_(]0ٳȁ9S kB[jXmgwZKn4[T oˣy !#wjYew{9HKE@}{jvzz?NԄztBm]nGc@AI {iZe:tͣm-F#7bb3:X€~YKL[\?.-)_k|eY)0< ei3^R|}VǨpzx2 Y ‱o !T},c13+d0[H +g!bP= h-:4z.hQfcEяO (y{LZ5;=,~p}'blC{ B Bnl2W- 3UojO;~F#VEN5Dy1ThmFz)#ǣ˯T,)7Q0w5-0t:2nz.V` %9c]Oj %;_">D][~ťJjuq=t~,\+Yt?UYLةT2- O,55P/^Ⓥ6βdQWn3"?^8kieήðGSL,QUuw0{G޳2b1!V̺R2LEc?4~̎>6nsm)FH N:֨hjH[:Cr4Gc%2<!(yK9oFtVmWgZ 95#>2_eNL`:EHc[ȵ)$t?`[tdz@WE<2^uMx]_.#t :EH8X߿W{F`@A@=,>e#$@)vy4$ʕTT4K Jđ9ñ"JWY=)N"u)RH~t`xCubK_yO:M\V_)w̬2vʔxO{VvŎO8)q]:+oMW9vlItN7E:5ND'nB -d^Xw<Ҥ yKD@³ Ԩ;女GpuXpKh',*Edi wB68R&3Λ0gQñk;=W[:U<>Ե%j}:XwEz<-U{>soԮaQtpC嬎?w+8~]¨+q ^t1BˎsҊU܂?dʠA)s@s}BqP|Փ2JX.B񨧅Q]Uuz[RY!ʺ+|M2-كa748ã/L#'hstnP4KX!~vBhVA}GPB|'0jwA<DqK+C#~4!L N-,'K-Yѫ(|]Q>Fym(H%@.L(.iP)S`t%^%YBX ߤAUDI^Sv\!?>o)).<y}'nV7|(%z9ۃ \.|tU'XH٠?U۰V`ԽV%81oA;֡ U>iB%Os ''̹דOLz| p̄/)0WGBQثh%t 4[dRm-ZU& 1ջ,@}qǢۍBlI![QV}Ee@ieч1bl>a;^i^Hк^⪖1d ہFlM.4EAy|?ĐָGJ˲|& {!eJʠӈ=ͨ?! o6ZjfYuMwc[шJ~NbЋu^ NWGyF,xp`\&{ 8۞c"̳n٢krŝ3o/N26# $, k6m{ nlQğqjPMV>Oa ~ݴ{v)& 08@he &A(WbYBjN4K}eVxHHSOk0L|JK @ zBnl'=i0b"UNBO)8$ !:@FdEO91EFg6f F1nQQQP &) 0t~/1,sh=qUuՓj+c!@+FaHObMOA8xTϞ}% LJ֧uPUP&@4^wdzD  Gc&y_OĒw*'K :/e:⡍%1]PC(B)|Ch\1mxzVͭO3Nճ_~&²{оMFm@Kiۖ. =|?tϖIq96 pR-̟绊ĢGq;š׻Ny#{M_Up7Mk>OU@Hs3F.&XU1z8?kzVk4M1e:{_4|` _Vu-Mt$g:SQXuiz(*N~[5 醴n ߀b '' bJOќ;^,~e> wtOgW)mخAԭ NYLOsAaY^ҙ>G1u.S$,)_&)Z7B`ZK{2""vzj,6?4mt0][nQ,c5a +GDZF+ye؁{'V^Ql EhƳ#'|L}5@Aϣ(~uEh䟩7m»*wO(r__rs\L|M 5G6(B)ўAV/NM/$,S^%V/VǪK{n=ͣuC4b9 M-qطuujZ\{ wE]| Acu}=h &b`'(?@0;Gڔ*<2M0Z#i q}8h{&[۫%EOڟpmRH"~8HO3F_G6FAu0RG#Rfwi^pd3T}x+#0iGp^hg 3AFBVM-i^v#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#?ַ:IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowns1.svg0000644000175000017500000007774700000000000030301 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 17:59:10 +0000Canvas 1Layer 1Linux Bridge - Self-service NetworksNetwork Traffic Flow - North/South Scenario 1Compute NodeInstanceLinux Bridgebrq(1)(3)(2)(4)Network NodeLinux BridgebrqLinux BridgebrqRouter Namespaceqrouter(5)(9)(8)(12)(13)(10)(11)(7)(6)(14)(15)VLAN 101VNI 101VNI 101Self-service networkVNI 101, 192.168.1.0/24Overlay network10.0.1.0/24Provider networkVLAN 101, 203.0.113.0/24Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-flowns2.graffle0000644000175000017500000001150000000000000031060 0ustar00coreycorey00000000000000]mSȲ |y]L)n {ɑe~{$Ȓ 6ĆqRE3hy{zZ".M G/<Nѯſ^?_[GQ˼ovwnd66ݝ֑mll{8ϲˍ+?vra 6Ҥkz[ ~'뼀Ot~gכ,4Iwj 83g&}^m ?vAG4 gzY z'qx&~KȈW"Xh >U*jctх~lveSgil _){G>(fKFokߜC$B"$SyR_)-'$&kmEAn+q*UvNc_keב^NqLǷjܭq݁C1=9I.f,[A|,O8\y$_ěQxҭnІ&FSD)̱ð(|v;L+a:F/)yɸZl#IZ>2UJa׭v 2 /}>W5uV99'v_8 h Im$g1A'gZErJ"K5 I'V4#i]$@Aʎ̷^ϷQrU/zhz_tg;i2]`"I|gTq4 gS* l6WQQ9XK؀ ]W#Q>PMp( &iE7 u]}zAwP,["Д)̜fQšJ _ϬYp1h3+i $8z)d,JELb6Uƫ,L,LH,I&4#ĉ}V_r )44i4r3 4ؿ4i\,l=31i9P?((9PZ Pjtg˰m>ea!PৢTirYgTfWC"l9l"탉L;gQF(@ 0%k0xZ<1!v9bB|IcBH\ Sn , Y),Kr2,#+c=Waҥ'ا a V6>j*{XʞV5 ep}SȾQ:/X^ԶoE@Oid4+@7Y{hFoœnb W R%}NmTk)9 ͱ"XZI&M &1/7JւiBGqCa.4^79nD|BTjĚkGH(!c‘0ANEXG3%e'INL)$Pmn99yr9 Z _22Pʬb^", [ cQ\/GTJ\(2,'ҏtߕ'@wk flJ3-s$QmX5,6Yvv>wYi- Q,n F9S*sajh97^p]Z8ų/om|s/ َ@dֵ-b {RbMsY iَs 49'i "w2m$8p>~t {cbdMpHoeIOzSh= :jp>(.C l#$سB̷xC697J* w>1g8`zh!Nf#4NF`r;ؓsV= C()P^$q M4s& H*[AYH`g,3LtA0#6Gl3pbʷR"ۣ$6qwd)X$UƔpsOdi΂sHoH.meBy΀[!y&9^sҘ}8cfb{r;#U!:!:gG#3Gf~Ʌ+W)>ZҦ %cVYmOfbn xD9{԰K`qw ,]X-}AԔ39`5 qF"5S()g̅`p 4W,7w_Q!QvP34;hg3 u Og1P"2㯙}ʝ#6_) XRgU5wȸj ٢Q3:|GrΑBv=4p!frlώBrи@h?4һB+|5|CvfBHesҩȈ5 P!&CF؜4GFrgny^Zi؈DA-4S\8sڙӏCg?-mbpK(W\K_R%o(X>Foi8 Ƚco nٌ[ELܸ&qM8LPjtrTDCaL-0g& msì4O= #EP{:"*W-jiZ*f}`kyVB4mΈ&@sF8u(oޛ4-y_8-*qj<&N<A#NG8q.-q6m>]> a28BoϰMR^ h dcm(sqD܏8'tڝ{L#ߙw9Gܶg$%Ix""{|rt8 ՑKќ*T?hمԝ7D7<>zTw3]j)҇ ={T겠>,\<,Nw SWL_s,-I H@lyzo}c@Ԭk:^[XpfoooEISێ :t,8; ҇|<Yq*'ęw9b[bn=o\YX2Id*5-ڂAe&h h'Kso)Hqxjʲ;CH!BR<8BrI $3'u_B:\eP5OqQMbUT JK|:wǝ7wǗv8#>B11 tq|i]ʛKںIMǛ7o.oĽ,f ӜTI G5l·≊Tc#y />OKԥҘ/VrgfRipr$RX#Mؚ?y1ڄ[oO{hsW(j뷛 }ǢMINtC[ca/Y PכQ{!ܷ3N1+AoH*P;%iU V|ȩptI,o~l,LAMُ0ڌ³xVȂuXGaVG­ zJKQ֥Qz,E+[e!ب?8 ӛJ0u:oF"ɺޠ b+Ӎyx%H'jE {c\^$GU~4fZE'A8||28kE;cŻj! 5`Cs6^XϝEܧWmmPs\TQWl } .5Cbm 5 2 1 2@IDATx]|T֟F*O=+?%("*%J6THIxg{vbC""wH{|sn6d7 {;sڙ3g `F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`l3@=&?PѶ򥯪jD`lPRrD>8BS"3aPaُhPW暛!xt7 &>߇n[2[3p0DҮw9r_[ SPoHqGf{R YpmR!CW;ˆ&DwwvIf?| f_(wvw_Nnz0)kI%}s}m/07#ccua;R͑wOS癅0Jh5)ʝ=Xyϓ=ٯ6"|O f!4))Ͽ^ƌ_`EHZYgOEWmO_?_VBYBˤ3!ƸPIU@! JPDvx WT`>"^H}]}\[KB&}9p#,ĠTfU͢i.tt1Hbn;"0H9mE) !4Rl ~ :><;$ʈ R!u;X4*'g)Ѧ|]]̥z30gt9 )1*s,tfR8da̴9ן1")SZS8/7 ?&8L8ύp00fͥ&L =Al`Uw,͍K 7 'bi4hBh܆qUw9Ef84w+ a %2Ǎ%P;TrǏ-=Ie\%՗RGw6G;[o)^_,n`@hJ,wkQoJt./wԔNM} ix-F$W4 '<ى/ՅR+a>yAՅ ]8|fN9á0-m-Ǣ>:EmE\LՄI)kok`cѾGZh < µ9TSOP3GS4o֝#)A(bzC(HNUM /%%EY x6C'5su(7 +qKp|j%BGd._քC2EG}V+s*7494U]%617+`^罎'˴KWڼ}1tmPC/Ԟȵ9G_Cueeq/%~lޓ1w$sˀXssG5)Ǯژz4tܿM4\xN5KgBCCiPYab%thE pJlO#XJq pޏMpӤ##S4L=tꗡRkJ;-  MU؀<~U@|:aI߃YB>f`) Heq| ғ|3f9TA  f Z#I#+Cwo ?*~JCR`N nρka!`6WWN@]FN܂7jBJd)ѳCǐ*U$fyQoB_[8W=:so)\'&=Z =J+QKAY}0x'R; Jmރ-}%-k|h!P5Ho/;(4sf_ >G_c|@IHΞS0MZF_,?C?xA&F;4*;._#GyQgd&\8gZa[:#7ẦEF|7t#|0f#?—}l@7ri6)PAhjx[$CL] }0Zԡ ?Jf7uƄPi':ǖm-N㻊ބwY7P;>#N&r$A(|P2'5'ܴ4$z24A3ֺUƍ[]j=7;̠-%? 7;~W0r=iX\nڦՅfQηkڣS,+)41@*Ԅ/ŀf F]T$k9MI (_۽z{]oK5o_&7)Iy4c܍8OcE?rT8SU6z0VfWV[طM&yѢW(kgFit ЃӮj%2oDoB2>}>aB]2Q߱l-vS8J$߸f>Ɣ1-UvL4Yb%%(c2" lBѬEҽ)t=($U::hlaCr{( jүQn Xs'Z!v$ fFE" N%ܭn;zf^/:YL~Rx|G4kp]vza:uS@0 U꣭:B(?vku itɛs}nT.!yOdۀoΝd%Ukibj(;4`TL|GE3ngbd%q6MX:qK1)(TeV@XY% e1a9d[Nu㽊[\EK3lI毝.ԽOG$^7gs|J#PWexGw?pUDN%bI9V|ix҆,ZbV/NHBt bp*R.wu ؽL3^>VQa `lFB<8~%gЏfGU9#}@ !\*YSO.D1ڦH.~XP #= ܚ|IQ+TG8cldNp̀QٌV P[V4wFu@zA@kU0PR5 C?"-@cm 0 QCz!i(c;Z~ rh p;UB_a` IG4#>vq?XJooow"gˀ 1C36數'0+ߺeNV/{Tzl ~k;wh^]@jdC$[{%a*{UfGD;lT R"}[G}1CZ.V|a<ܫLABxQ%x 5O"ᅄ<ڱeS.$aՖ $<0OHpԖR#J﫫X>Qԑݞ™KN0i;ː[T}r,)7h$۳ujL!#B'e `|*a [S7LH,FglwzҤ2ȓm!#1K0:f@c4!~dz]ɲΖ6^#s"}ഘ^o^Ãϥ|5ڼͪ yYC<hVN@cUf]_?EB]>_}f -A #e*4lV8iԄCpܠ'@rF1]PPCC+VjAdf0ITTJ[ tW䣗姪^SY]=^kR#f45G.>}!% 㦾٧R[9^br/O* % mC̎`R܆FTRiv:BD*F5E dPeSYgWf&ga #j5+R~$:~?Y_@BPsCULqO l5$jۦo?A9Pgt+w0u%ma.Fg Bz(I*%UK X-]S*l@8A/nᮀ/ lq vm@ۣ4mEG~9ף}"HX^.դB~=?vei,D C4= =I%ԍ9{ V\ɟص2:@x>Gt]y,{&:=[!zWW:N{|Մ`zü.Ara?= {g,-wF=źKeqM.'k0EjA6|~`ZK ~D?ܟ֭JOIFYK:&,37ۓ+r{;,9Rn.Az BRKE8;,v̺]!C̀ڲ)3Z 2K8z 1v*-C)uBΨ[ۉkRhpF]⸪~`;G(@Cmh@#CaxUWO׻fC/tt(S33L͜t5ݩf{it ">̭&ռ<`v5b7w~dp58F}9к]b 3!1I4H!܉z)լlwAPxY+x/0tM5x ~hAhgY?&1?FkzWp}nքo4K~ @5s7?r7[)Kl26N@{"1> Jh6>EG6|+sK㡨P̀1N{Jy#/D<|bw^I8yN",N%GMm2x3xOvƫ=b|&|.Yz OTJ@E>vٹ*'u)Gk9iR@ֲ:#b>㙖j{O)  H:ryR&IIuLiVS 390Տ^-ޞphHmKDG͐ʛ}?xPuDZtVh o-CLӪ/m8RȠ;aΗ#\qkGsI17{D{D'W=,Tm@HO>Thm=?>~+fQ 1#/j@=L&+}pYBY+[Elyd0#L ZN,Б[-Ec|x>Rb eF ^ /.4?bŁ%u0%max- wZ_7nydFhKT :Sa2&"n' n‡}m(;fR\`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F89).C37e7g/p.Y ֹ)%M.Ù8=ս}"oi]S>PobMrscVcoC`aMX|O%mwWy|[j Ѫ'h▚9DJ-whҥ o-^B4Ph Il_P%:9b@_|J˪[8gv\$4R3?@#־GV&w_|8Uvģ?IM좸K>Ƿ8Э@va 6AM#TT=`Ct*9MQ9̙i:Sx =꣞~vPzH Oj[s1'+4ަ(~8/w1^K*:OZU_*:֓xWMZ4@{X٠3.yLr NvN(ԅW]ι^oGhbĩi 9+dӼY'Ѥ{@hIud ;mil|PLӾBwr2'ѺB=?} ,&h-tg532w~ 9F&  ]ބ',nbޒg->Ua sK}(9vʵI94?F%uEG&&0sQtf0.JeeL>N?v )DYp'>;gH5Fށ ɻqyuGft7hRT'UtB:g&-Kӯb:T151v@ˀy/@5|dt BkvsNV3e|hi^%yY^ҢDe|77\}[v>yǃ`6Ep=nwF `1 C@8tR[_tZ >WAx:]h$K Jf!ܩtDtg8}yQ>՗YA *ٝ>ݹ4?ǻ{5(1~9Ξ-|THߛu(ּlDHy]u 'vhա;7"7%IJM3 `3W+bT/6k.14U*0mw?ᙓ#+JsaHo ҚEՕUE N=\W(W Cܯir:9aެAv϶]癓ΔxP&_Ex'OU$ ŬP˧/Q^$c25hL3u4 ?v?-w3cuY2UడYK^&w2o1Lu71յs;hS:}W ېWZL}taR~H+'+})Q)[Cv~]R:1rDH6È6b?h3? Z䬉D mUU7?9jT1SJgJMmV;b -k@a-4` S3^ݴ 5Cpap -<=- s Ag iR %?׻}iԴ't{%ҹRSw GHغ6H&j 5_0мʫW#Msx_~K-౻ϔx>%O4 !Nr޲8u)Cwwꑔ&Vt @@0ѩG?)iJ;gBpR*hB@)_8=Wԇ]ddzexߤp wpf~a2>u.`,[I'[E'iھځP /?!t1Ls O;}U€ 2oٓHnF=Zen q 7?.a dPۗ նxJ$)P4?c*V&M$J̾lLLXRRAF"AAK*MԷ{qnޟ=X/<( (h L^fl -4h`wO7>ΎVb?[Nڀ2J+K;{ݪKLG#ʠ{_cyf %LfnV|)ijz- :ktgh _l=!yH_G"Qɾ2;+CoP9~+m Pd\f < #__ds։R2`zSs>_Ez$Ӏ*I3fkQ*my}Ď⡌MNt_up qIwY ~pۨAvw?prf}#-?2dƇHӧ7۳x:y~ocg79$ ZeHa&ʘ`̀JJьrhDBT#m?,a0}f=BXeLߨ=vFgܪη]'ի,,zڅV*n}y>Ӆ/K: 4WZ[TǺ=q$RPwHS\>1B9t;~iyBE XxJg8d>Rm{OCaT i=MRwH2̚?D%!!ԙ&*z0+x!Xsߔ0-i]0?wbFHi*|M oժùW,V3eth~T+ +h +v~mߎsA.Ax(OIH2!g$|ޒS0tArGƎ N)Y[,rbe7ny7e۠zufsOOﭥ%^سe7(;~cGŢ6Z*K[rB6: 9R>iE` r*tma׍RpU=^0Vr2B֤\uЈCc{0lBc}A{~U]1y zCǹg4Ug4T ~W_A d5QsS4ߘ9ȞUMXIpt9n F CP ׆"wm3FFZHpr-2M]Z4T,P0~Ce~r޵bq -aVU7ʃM8 ˓BdqUu,sUֽ&èV=InuNyDږ#a%'6X)P/&%FZ)7}$ EiJ[9nm):o_DpEF@R#oH`WS3 !Hc6H-HR &. ZXP]'¹C/m9uPlDŽqH$+!g`MJ9⬬?BM;H t*p{D h;,lK :i8 =鰢/Wӻ0AL*پC)g vEy L>pxW{|):_vI`F< 6T-j^n "+F`FѵH;B7pZ `*emieGYzl=e)dK=bzԾ ȍ7TJO7@8|RGU[.aE[6^0KAHC=sŵ]<~u͟M'kpdY-sC '01m`K<áAJJ?Paد~z^?l7T:dШ1,ט>4ovO&CXsH'@q"leb$)}s\YQk퇶g'7t̔)R[n$CP 駝a?R.@ڣݞզ84Vvv/gpﱳ;vB?kmr3Z쒞87&=sS`ŷݴi{8y -D%8L$b?*::ia~ áS)N'H﫪e|hfXzz `8J,ma4S};椳!v+/XfX*z֐r^вEJYrk.׫8ŶNzP0u_0=z7_48&c`Gz|^? D7d}q8zJ s.LT|x/m9m17_Jqtwf%=&%vZXV?=o*va,/='/gs0cؕ(Ri= di@ >w.Yq6MoS$6qī~i,O;Aw,H5+-/`jd%hjvSH4Pت=WhJ0;O mm@o6Rs[K=6.zN|Uv*f@p&pmb(.y๚cM(S6&ywG;okqB]] !# ju+tu͟}IppXHw E>Vz~ah%@GLU ˅h d|e[-3nJq653W)kup4~0_?u-U}'RG筁gg}4҈ s5!"9g„5īk@^LQI-Prʏ7rҤ07Rfh6tޏ,mMQ8'#cCMn~x^woA^k,iM"Esbl~ϟZ^"Y+8w-u;bٖk[˂#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@u^F 8z* ѢtF`"+:49YՕ&ǯ;1R|ywJtRBR6 }ug)0#0ABJs)]0fuTJhBwkQtRa*"!%՛u!\Z֜ vF gF` 'd+L!wNg?QRR3)RtTN#0#0Ha 1Ej,yͬ ()åYcVG>bF` e~ǀyFC2fw/+}NG`-Jxor`K|F`~.⥟+O-I,̣%i (@i0#֘ ߼1/XkGdFkRe6L:E@(bgju#OVRfL*y;}S"_;}zXf#kRE-tDyon??g1GύvX SOz .X` Fh v"gn Ϊ 8U9LiX MKڗ"a|#`'9ԍzp#wR ex6 )g;?妛7wMLw{s5zo︷\7>!̋Ae^嗡/mn0oPSeq x lYvެs ld3!ͭFwt";LUWMM%!<9rҿ*IQ,,>|zU8j~gw%)`vR0VXbr^l!\G|uKj9Y:=#4NxqkʕNB}t' R`މp~^ uQuZ椓h!S0xϕ-!OfPzFb]GҼ(!e;)OHmh8 x3`|=7SyXavo0't[Ӥ5z_Z_ riH{C\4L=O`0|lڡ^.BH&&l[ dm2y;%no`Ts-9}jNtz`&0@8懩gό#`MJ+&#̰BX3d;سxy~>߇n/6Msד x`.uRCܞzZXSIϡ3}S;{-w(a\ƆDK {& !e ?e0F%y@IDATް'!x˞ߜϼFuQ?PnjI^` fAu$ QDD2;+CoiNJ Lg6ѾG^C9Vpj֡@XG;n/㻲O 4+DFht`"ÎhHq4)޽%(:cC1 N9 ?\~s0sԨbX{xД- x\}ɶA-SNZ*YfN*~a,?#7)4 ~!Xs~ߔ0%icוKrKݽ 8o:buqq(VNyO#h`MJ-ڦ1V:¹_=I'ɭ`,4"mz҄Zb(q{z3rduµL]V0G~|3T+u&:JFک@'h ɕ ֖e|2P&fD/IFzRtAYɟ<ku+!dg;`Љt-N;T 3v=#4*N9{1VӼzWV`^AJy?;* F%X|;`9/%v Պ@)Í [))1/n*`t= aX#$yT#a韚9h(uN߭Ls'ɹаKI;#ό@A5)MmNi'J$-e%"ӅK~V4`* k]͚-bg̭* nM%C$$2p;_%}!% Q46-3{GA0ۛz ,o*! c 3M&F<7VݹGv{0ߣy<ݷ%͛_X*J7OBff~?fowgJcjO옺T h'ai1 }6򾲳QY?@YP]%jAϾۖu7d}q!4brPuƅ3c||0֤4BmYʙ~;h]  Կ6qP싡=D7|pJWC*'+6uP;4ץ8ke|QOK0]qv/}ZN8oe8xh"KAʔnyDv8Z7R~~ǡn֒ gLř.F@}a'!`J PpݼlxS$i"⍆iJ$fisʎu8gU(S4a;}?~$@#{TūYBc0|eƍP-(jS2kI1uFIګq߸J;pGNƦ *ڧ8ރ\W#P"DUdicK k^k{nUqJ/M&mG)bA[$l6n^?VW}k Y_3Υqow;V擌H {K8q|e{F4a2Lh24# xp8>;FhW=50#0 BJ`qPF`F?xwOa)5A.qJD0@`!vq,F ,rcE@cF^00#0 )30#0"BJqpF`FXH9F`FR"3#0@ BJ̩0#0@!``F`RgN`F`"D6Ș";Fw?'iߝnkUZ.FSj |[B>߭1#m26j 7 :8`'>MX8(w+%ݵCC]xj?٭An02V,`)I8:Dޫ}㶔ߏ=oeYaQGv{D7//va>0+,{3D }Ǹ D"K#{icoջGza&C׀#.g-cc~$V!8C(:@!B.y{wtn#B|hPcR_qLYE='‹px']w=: *T.ħ&;F m_Jm_h ݇;[!%5՗xֱ}`˺CuiZlzQG LڂJ\[Ɣ%w).x..-6ӵA*))e9\sGDFڐRe7 Miҵaf>ބk6IL8j9( 8p$Rz"xk'p7!q MvQ[-ep޺U3;֩IK˽TÓD| pf/.xiq[e%~ܚ--XsOh+"iJ$>Fn=bӶƪ%U&?n=jK~31$PbPQP2fWa>e^eg JY'XrG\ebXbݦ2'ߊ>[=Dgq~c{~\A{"!ü?6|ǞGƳD ^}314(ē&Y >VoRJ%QSGExRK˽']!O8o/~]K}!88CBf#z;w+$:TXfi7IHgԴe.fBUzw.A$%&AgU^b{63k󶊯~Z+~ gVZT:,wuS!ڶjkw,9QwxOq_Owmgզe3qVg ֧YեA^h5W=@ֽo~߯\zZlݹz?*sgƃsou2 =EboA:; l…;v[slץxiWb|ʇ߈R@<5!4dчZtn$?t||D"_xضkش}M \b-/9ՊhHpb9Y#g1#Oz868q$҇@tj=Ow? fuI].ѦU3<r>ФM= $G,.~X5ض_v7+|3X]㵣W_U)뤍yw 4ڴXFog+,r e,'OZ/o (wɷG_8WgV}# ˫]IdxKڊJcmG!^`' Jb) \ڕM*a_tc #c\陦i`i4* {tjSS$Vۄu-^vlDn-z<gts j`t;iHp8W VaܳB?!CTN7(wެ2*XV%Ggzmº#jiahy[hfM,ꧭ7#p'!^0%xBgWv"Fෙzj+,7vpœ7)B3kx;zltaVA *s&4s1M0#@Cy0U`r 1_\wIm :qƽ7XQL+׬x7 xiriGq܋9 =;)zwhXH`p (~mҸ!y }b#[AlS:A Nl+pJmmj\ #=J6qF"@m!>_DY),Nz+t,30U"BJ F8dc@E`F` R@F /[/`,4r\2B}V9#0uAmRe[ylnB7c'ՏkC*F|58#+X+d.#H6 ޷rE[_Yw=!ܗ?naT*-ǖav#ResK\q_Kсk#-N;5y[[a8 YdX#BJf@G )DkN]&Gۇ7vejNGcFnR786# , KؘG7I8 vdBߘ-azgF l8[8#ЄN~M|WᣃU+/⍏XU0gF lXH *4=99~Nw=G3c#vmv{F`"F!@C@9n `ۤD I0kRsbgaF6R8#T@6-ESm~0@MRB`F`,9QF`F 6 BHkj4:{s8|vp7˯)D܉.{l ma~odw;h!%4y!LӴc^Aؓ/vXMS(k nѪYhӪ{HWѳs[iՉ9;1AaqMر;_W JJ`#):eDDaqHJtZ4Nmu.wnQmTGZz{υOOG#5%^aTLXCY@ďhG4BuNa蛕o &$Q% CR\.Q(̝­[W͒Y)KTc"a!@uZ)>g7FeO4/(,?<Ӛ^EvDkBۻ[qaDݰUA"m5he)=521sZ'P 2LH(.BRh4z05MYdrh ,1,?b '@9_vz:)}v\uYh[Dfѱdx םw8$=T~X^bUX8(E*U%bݦA[w>2? 6n%'t .A!ѱ8{Ѷu +X-m? 7=q$1G%%7µYK|SsAc <=ŊK;2֛4 $ZBl7!PZ1 şw?'LmA#hY3đBZL=埚ы<]}xoh Eb_wQUY7}ҁBޛHbY]˺)bC a7+ ຺eUi"J^CoS{9or'dLy3 ޼w˹=sNQIb,mʠ#vA,^elN[Bg&XX8{@et "8tV1t[#Jb"ױMsy^ +w*<3DΦ[z!dPڐ5)W}5L OhT3Q!b89щaRHKြ`uOkI#G_ =5TFtW?mEeY孃*o78tuhپ#22(Ubk7d:+KgBSWĻJ, [nJp(8W[A_A-Ġs•2iĥ'ue# 72打͗0Ĩx8"bsINļ$A)*. b*A!i΀lz*CUA#c`:`ǝ)/Z.ޱ"UﭸckpR ꘘ>J, mIE*: q6wWc,L@g@=-ש'K}b*9@,vq;LYYhr2.şxR"6}w VtF1SCAIK;xHņwXz5OmҌh*]? Kt܅?۪*0VS.O%x pTA`-i4X cKUAғΰwMv1ȵ0(J8jN뚫Ү/~XJ?rm!#DF,Cƌ9b\-EtF]4^?\[D}#LʣR"; ՝P t&e+ VI_Vh'eOkUAX=dkF INb!b g,ūiQ +wjKkV^) ƘAbMDji3EҟF?Z9KO.0L w-cD٨؇:n.M jV1]p˟1(jg -=Q_M}dZZʝo!оREW\;H6|UD]ƕ7>A.M)xx+HDYMhg7T$#;(TڐƂ}GU; c5o AdE8˞ IzpiHYuKCI;pf4)8VV~B VG A=mDcjfbD7gR]aרh'ԆM3fYɎY=SRs1k~cՁSdq%|cمSWR_*\;}c9&aY3uC<@%yۯ`ݷy׬h$)*uVy&hu͖{bn;D2)TLK=unȵMSGKtE.4X <[Ow@]&VL,ےD%wrm_ F"ho^I6b)hV-K{C9J7D 7?7&%04hQEeP 3*6< V-tbm)\ܒ '?*H+(:|^ 7: ^9W]ԛR`>NWՈW} `4%ߐi3n1T"LE-p7*:&.(opvRc;[ujc#?npy(m,Pg Љ>[Y( o &7τGJddGǝ/&Y[Wu6*۱u{jMou=]u5—р&`+߅2<nnJ\F/\ ҙ8tC!V7+`u9ի4jF4g ! _߆WeK fdGw0e>%íLƚ଒R`[ 8$eku{@h=?4KTJyq7tVrA#ޮzۧkw;@sy-0/ߡNpBD~{wyK3O[΂Trlz-E8R@?Y"n&/Bq]p9dh$¦;BG"9-.M{+GeG i )^yhʫ}ۮ;`eJ~gόq. $h(U# )E&x)(`Ҭj `Z % =[:`݁b\D}.S-A!h߁Lz9gu\`A$=\SA}5jNC >8rCĈ 32'cdKsV(D\' jNGb;1 [[p]^f/9 ĖFkPӌfgdTBK+_"JZOB'bx57j'BҔcgJa˾pt^ nZ#F~` -bWrߗzuzDešJ{TbW.;`{TICiZ6\}]=t᪰N 'V

hз3$B`FvN>"Ю$+.tcj?-D-Vn!mz1W,d pS~C-Q@BifIȭ{=F-Ab\&:MSS2RgB&fF<c6yۯ N8C$!6F['ţIY$̛b!i ^#TLTm3b@B-PJz)ۥԨ ڢ9xt (_"&'X !^ĬevOwB*o;yM(1w"gh<{ZFw4i@fRϠɸORC#6ҟhlA+%eu #5Qi岧Т2RmhdJ>\ ~W3XP>Y&f9ZI0"[0gNm}jA-V\>>&&g~H/1I/'&IvR(.N{&NCIJ nk5G4m횘n K0C*b pʟJ1哔JmiV=tmNC: 9Z+B0'\Et@i:*99CgQ^ks˝ڐ OFK: 'm,&xG4i@-=̈c!&D2 w4<k"4Ѯ*D7]uuȱs,[M*]gA;vʞ cY@gǴr%K!2Kwo-2xg5зV* #2()iȜ c )ɐ7"B;|kvv sw*u^gFv+#h!d/hŋ1WFˤٔAѫvmD 2$Э|}B4unPD'*bmVKw!PXP4H["p˟e/T-xIēd&A'4Y Q/eHN@ڶ~}y1n.L~ʕQq@[vRNƟ>  H9 ḧ́$HIIԔHIE&%)bF(DDr~S&|,//_Grl_9߉iӪ,-S)=2F%)(F ֶTT%Z;] *SR/U_GWHO9W:L;3 WDO7 Tg,YeNЀʠ0 z4q8z`$@7?O!(IYDiHX.(#4R]*~V3Q7)Dlg-NdSLZ;Kt0h@ LB92V#y:*Hَ+$&$gA{:gXP~ƅgdqɡ ۝Om "sdIQ!Qz JPMt T&%0v]4QeW]ѤZ?e:@ueO"PDG4ʟ 8ޫ-r'ݎWAdlCC! 訽X~ 6<İ<*7NI ApP;JdI*֓F4ƆAbAT+کuj^j4 f UbwlG?Ƃ/AIRjuտmٹi!\eqc$AQ(SB`u9@IDAT qdݒ6$Jp;E~ߴ|iO i0,tN_IC5#WmDXܴCw=;g&险$Nf]CK(oQ{&)/8#tpΣtH/ȐhPUmvٚjZs0@8Ʃl9eJٿ`-`|Λ/nAC(IwQ}#z>J87PXbRO~SᄃiLJh=TN)Q؋z ==;쪢?:n6ԢlA:fi$(Ġnz&+nQ%xڕ0 1'< hJ9ˋp˟ vHJh֮D=,uBy$EeN¤ W ǰb v@ 1(vm6H#IHB(m!ARӔ>8") &=jN,-m%r}EϮ[öBScB C* dؕi7^{2#:(=4_iC^+Mͦ^L]f^؉@,1)kp@uO;2Bu&ųVOa?{H:KZ=ЎT8dKB%9:tdHYWuPh'2'rJ{ʤ`zpȊ '2)ļ:OxxDR-SB+Nqr}cBj&TF.\C;m_#9Z{t/B-x*xgbTȱ0_t Ҵ٤7&qMK[xC䟻8C |'c'L哅Ҟш㨳"HI]Y")V̤};pMqRn#Cr"$zuN4r~b<ܤ h ƉRi$z:v'kQD`CVߢ],ϔow kc q.lgF.'o4o͙(=bpԥlcxvT5t")O7L[>n)Bcf_!)Z{*pbQfY bRTi ީec}Aiʼ׍V0[ïqxqgBQEWӧl BA~0raUnY>ch2AQ;^TTEz+~:h^6禎uִs#(p}$8 R.aU,- D/-~-ʟ!Zu߷i$Gi&z8}'GEXl;,n9ħC P80 }#zM,\.wGC=ky},kZ{(L mw&W:JzacR(_9"F!N֋+TȌ߽`ic8B l&kK 淅Fy*U:/")-_|7| ‘PgD؞ٍg{2IShFH5v:;a/ۉ#-A9JSP:n같 1&y1$ANjݶy۰g2JWmx'| XbR^AF#Q4 Ecr7Q̞e#1&YHz;x^Dcq{T!Z̉oX~;}fْՈC=1 n}B l1(tc.| QZ좴bXJ6nw3ȡ܆i cUO0xa8Q!6"(aLj@`Uzd԰|O6#(7_>DBGYUի vlfL.BG UM:%ǯ>[qƺ43Vk*S4`E2VS"(ՐIQ"m'&;ԆsNEtȱ4˜[nm5H H/~:`4nhb.x)2]oOja,*4q x&F]6 ԞZ+ztzuj+)!aV+%(0 ƱʷN)ӱY/AdehelZ m9.Ƌ4^BkGjŤ)3&rGJ3h$QoHѲ)L :&pE6wF3AG@ؒܭ-gUJݿglčweKVl]+^q vJŤ$fUgH% Ytfs8r$ʟ^ Z; P%e]w(qRmW.n1w4TgL_²E?Mi0ck/{vAg=6طM"-u(8H-* ˞~[f_40њ&J+/Z`B׾:gץŚh1. g86[4vP;6o؇RiCB uViBL }#?ǘq;RJuйWvm;wjILlbXRX覉c&@``۷c/:cy`xme:L_߽ N1RqM\\5E&f%(7>;{|'wZr!!ɠ>njdDsfd/HZ!愘bZ-n؇ƅc b6袵Xb>HRB<ĐE 1+Ĵ0ƪ%k HSE!/uQ05w8nW{gFQ!OD&%'=a(8+͙҇PP`&̚:.bPi!&EU)iMă1(ä(1&&ʂ.b^ut*/+NP!}T ^^;3*Ү EZ5AK@sX׉eRߺKNΝ4.h6Ϟ>srބkZ l"&iY!Ƅ1(cR4ka["E0bFm3=_ŬE k@Xm>*ІHI„"cqh37wV0~ZÊ)!$*TL$!aD 3.|lԎ:*vmwĈCBvgNxs8Fy7vi iGfEZLM^9E/a'A&͝?]d\aTE6 5QakbFeNB96S'E:.bVXƾ㫸t8B\i2@i=:(:ꁶ 'sh=Ed0;M+ }hȎ .bJbX~?5Z:+vwVƬro,Z CS/Mf܊'?(nAq}/o4Owot_0S_;W>.~6p&Ey,G#g#Bdw3(zL=-CEw\qV[yq|UglIlieYQiݹlxG#@* ze,̒"}ۓ?hg*7sEsރ:WECg.IG G#4z?rBw7 EA7`rgA>+_8:]g+VG$Q;,]ZbIcp8-Acunӟ]%.Tpu Ew/]?"pP ]Ę̅} p Øhi$G# IID&%4Π;pCP񬚗çRRD ->^ٵ1(^4OUO+VG$ߚIRh -<'wM$& #pb:!{Ƨر}̹S_YtԻa?{oHFϚHRHI~x~gP/#2j;Ҙ;~, 稷cQz"muD BxT%YA3<8oG#AB!KݽDGFf*_Vg_r,ҢbwUЄIme/:(U8@cC@Q^5/2D~7U5/(RM!cȯVQ^]a3)dcE_G#4.pRj߸Pnz.Nk=Wv}qخ/}?\fRȒ,j#;(%}q8@<#@=9+Aӎ;yZi"C}:KBKx(>m˟lnu>dLNty=WE l&kKd_G"c6<֖m,' .;~AKP2r#=.x|}⊴h6Bg>8G @>%?ns-|+ “(YAC>7'J3ifRPl?~G#h~F3b+݂v_Pofml޿3WeH}I;z#p 0.{fO975ww͜V@,!6Ҷ.6O3\(Mzܦ:\:"5ko8#pϴg.DexzxEUD6ϝ= rr1''34BI'0>dn|vyN'^1驨xW=4cFrt_%(}۠i3U#xЉ$iiY{$gm~3wFmeOORl^T`2W7"-Zס%t1iG1x@ ='\:vz|[{SwJN<\ϜI ³Mͭ! '{=ۏ3Gl&%Ē"ؤSԆ@[ &s^!dˏh7-YE[uzO2OidO'+r-C-]8s9[2GqxB3c(;ӟYl磿y> ȌenpFRxIY%0rE)~ ;94ssΚtL;2@hI# mni|!*қ9vvJ n 뗞U \zp72W ];w0tGKP4wnS_ɝ"0Ut`C'Ỿzp;ӳE $2O9t(?U-H[ΤĄ\eQ떠7܋LܶkydJ.ڈ:x׏bQC yOlFyQ,uB3{Ӎ$]y^6'wN:3񭜩}*P@Me6MOo"mU~|+gb" Cs':EѯԦ:jmF5{_4ɎHp% v, OНnp?p")҂(H;UE Xj62D(7fC[距eмaebkVt&nBI젖8Is~,\ÅXH2=wSqw>̈oDE%zq윩x-T)B> GosrhsjfOdatbۇT4L J8A¸vR+<2>{F.4as͙ĦZᝧ~L<#9JzޏZ~?^LM {Uv!b;}K4H[~bwPH;''ԓ,BѼJd1l˵:S1qwg(tH;6>W`p9SQoySvwAsmG&!F9woM3o|Gl_4\MޜD>L$G(58P).6s$Zvم8nAPĻSop.(>ṛ)jGWW?^)>?2q?MZ[>B ,ң@Wc8QeE./]P,9/Ql\[7G#1)9. IWtR/ F yp0E%[ZtOγpB3>-P%Y~Y~+=U^RdbG&.Ihh<нO8{Cdvcqt\w}m8E|<7e:X"ˏ@(Es?Q\*|5պm6$|W.(;m3f鞒jU|q-ĴdGonFA ݅3vMK^%|[aןz!0 j>𼫔xCޠ{Z\?rK(4(0C(uل5M 8|URFs8#@&AB? E HM:P$vm[ Ot+"e!4lT0Xų(F F(%0CJ Jύ g3iuȄKR{uG[lK09SGfw,~RVK.y3* "x<;AهFݼ1" B_+9΢br/B1ce@b_\#8 E k pܜ 0O`gqf{gF6I r܅$ \.y:[ mnAƭR'(@d$8v`EtHuoG@;:V(&(=C7J?Z$ v9 %$6 ¥ :f}*b!Q< #\nD v~ e,HPLHsR~-s&%GbJOim~yOph3OC*3L 4KKQ~9uY.͛[XK;xG#ԅ[B{QCuoMt=*JLֈ8G#Px`ehD4) eSU'CAIKG#pE靦?F GCƌǑ: Τh(#p8u" ZT2?z# tZǙq8Gn24A 1 ͽ՝`r&%_G#r&Kxz7Vr*,MBd4[0>p&% xPG#"?wƃO Zޅ<},k8\2. R}o^:}uA NHlT&7ѡ)o<> Q™ʃp8&8ϩŠ/@hf0GÀ{ oF PӰ~8hz}gW} `;=8aN'eI˞\$LG~٬Ӟ I+P'Ko& cX"(~['o<#J+26ֺxܧ~7>[Wn?KC;pgy5Ol~u3t[%;^gyVn5dw5ȆPzz'21w/U WߏŴL:>^^p|%&E4%s^[vDAId`6'kwJI]DLgL6,+/2z#'N{Q6T^? MEI |kߌsr.KjY !tO|9!ʋx_يCNW{ Bf@)}߀I('L]0gF|&;˾f#BZ*zt^Z Vq@YPaWr6*{2:4taX3TN 5IQH`0= rWvP~ք9aw=s9 *s$H lҼϢ%,_.iQz R;WޏRv Y7U)phw2x4H48k&4y~Jrjۭ)_3,؜ V`0?># 1u1CIcBaSH = {௰ߘpk`ߦwR2zC|PJfdSO]OfR(=bL/1QV ODa$Yn)5d]'|s/9,bn! Y(~9s־ UW¶n`nxvW؀R$MޤHH7P~:MDZ!KBR==;VJ(UA0TTzR5dmdžHeR?9 UCz\{]u_{û_ L35?1)[itL au_wZY0p KnX`|_?[:Hg+=-QA -s~*}f/UO?LHCJ`9 { OKv0WǰpKGx⎫!3Z3a ̢b~;pQY:?D}hϖ[4 gPoTn7*yfjG/c7'i6}h6_R_*'yc]KTlr2t^{uLtcN*k/`RL* B%9 Zצ`=^&e=`F8[b-cBzZ2N75,^jF9WB/3V8vٲe<}XX~;,] HIOONJi8I2de6v-5qI!~۴.>]ZsXWt";o. iqNy=l(8'X1$zr`k,|n PےNERP QJJco> NV, Vթa#6ȸQbc(4efa;C82D8%wOہ sM'^y8 L ;Es^O=Or I i!#g)tI޸KCdPQ$~ w r__,).-$Zઋ@fӘ1,=\4T5v\lRXUFap2`QYխ6dlOI]Чf+|t-[ A㧋Qrs2BH4l{Xۃ]‰3@>Դٍ i]fFeֽĝWٟ/:Fe~X:FX]] 9e1@ K:п'>fc7lLU;m Ҍ m umCxf7.\`cdWtK5IJN/&1e+txܩ=#?ITi$kvI|Gznu{$XLNjq8k2F36(!G:)Lr P4n4I^/կƀ3:, QF1W? FnByTO6uƪTi̋|GO&Ipϱ<5>HJ#f>1)`aԯiRn]AQځO4dԐoWUO!vl" L!3X L"2''L{I1MΡFPnߣOOY,r9 L崜ȊH*ikQTjq)t7fށ:MbQШt]Bt̢F-dR|( oEƗ?CI+aTA A/4o?3]L";Pz ujK #Q?m OIL-J79뉯ԦǏU",yridJ7v7:<@KSQvhihCn&>7u٪HMJP@7BhLLbu|΋n0갫0dQ;^n/hmte~Wt3 $Y:{^i]COL 1;sl.|0{[uH3u[{z41Yg3G8[a؝hfK:.PQcNK:R<7gI nEj½x1dp 6>Nj=-Uc;} Ӟ~%爫"bxB]9A5$W&>dSu pPԹ#*߃6_!^KPuq:Teꭨ iZUOV>AiVZP9i0! )ZFUdunU :z1!EsUc{ 9%娧Aǵ:z*9q퐿78` d; 9_bl9"UWj?ٴZwv-եځ m(IP]z*@Z%ftN3n=N%{qI iN+FWo&JbI|Ag@ʎ8FE Mt( |]7z-&PKr%`?[iO3QCy glL96'†]1}ʣpSZ-ؾ{c.m.(Z履% <(w磵w3 sL/IvVdwCgZޟvzeqn»?naa/_'tbKq'`c҆ nX}$:l8B@  ~T *PCy8Vϴz;"M1G_ ^k,EˬHJ!%B֊;9VY,4h"&F170`4X_V7*h6uB oU=WM:m=K'^1m~)Җh BMhԤCm/)N܎7HJS*x*2fa* @zqVtI碇}AOivuF[#HJ[#S4Dq:U!Щmar©fN@Pwf__=?7]%ވзGKB/殠9%* 3?a_ Xq/9"- 1 |?OynUQ)M5M{sϜz.a_unuh1G̑:JX@S@Χ82َF ,v O!Ҏ:G!QhIGӌ/LeФsrۘnta1Z`~?Q[Q9坏yG$^$ ss{†߰k/Ls{gEgdKWꉗ>+_8D ܛ?_BNMGG!|Vq(.:S$&۱Co}KE&doYΉͶ8dyvF2zy"g0R1)F IWNښn!w/NrxՓBC2Ymy9 Z~YB |PtȰ^&,/~LV'tidMVN|;yY6!n ׮<B'w3;At=}-^_O{6SmLbxk .-<ۇ3iG ,o>uh_x}p8$mDZ~U.Σ[C8+;MsAiHq/.YOD<[Ttٛ äSySE!3φs.-Ԋ Y: =,@4s忎%n}h3[3_$h#4SW}L]1a4bH\3'lj B؀° dŋ{kR6: (hO&2Adiо=Dg S"=h>Kȡu[DdN^yPdA{hk&Z~2VAAY.`9B]+ȣrSzei 4$8K y9)> V/9%B##6<ϳtKLLr٤3C/~4R$ <]=?%|/ef3O(ևG|u ~.jy̦>n:GulyBqy Fl)K L,(=S[8JyxxR=_|_ ֆLY&! -D&"]IJ5gW,LBe5B[zK+srr? 8J!Wg-fRJOEIt N.Ș4'B M:!ctdWk<ΗXk2~5wi^ѵy4d;Y B_Hgy܊Ti{#a3] e)- G9I2vF?y)ףূZ0Et]™( 0?Ҋji;dU ;)xr5:\_hT'v|*eˊOA~ujHrC[B#  N hd0=nҍ5'<0y_}y,djR5!g.gдc"7+dUbXW.k%`7]DNG8Ê ? 0}Ƴ6‹4_]Tt }(  e#s{xw;ы_r3ɿvg3۾W2G)@[52յ7X:Vthz>%)V:pYS_MPρso}mᜆ_ @{"x}DkY)_l(Z}|W1^5I ?-%!1ꊣ3G>]yКsK+_:"ae-HT BI狧LS?Ϋ?eIȷiZ?rK^k:̦iG-fv;1oyd&k{M jBqtk;Q۱?c_gżJIqt5NҲ`; H} k,=xNni$Έ\|Ή {&h/hO |^3ysS-o`cwrg#\G>$&K7#(HIe\tyUiy5!1f s|xr?_v<=noDkF $DF+3N~{ zK$)pO1l~MĦVtt*y~wpEv!-6y)ž#el&ȿy&2S1.<ͣ<0t-S>0d7|#P$%o.`s;+dl2OiLFr-PF&Es;MK8Oj ko?@nv^v+}nUEbՌAN@L&]=v=V^tV*4#bb5Ԓu!`0s9[eKf*4 ?)?Ik p6Kgy=`k"HELhG[4"jGZʼnC<.SsycgjܧF~coA}Th&7q79okMYTJGy"d.IR=Ib}30xT iހE+1#[}מz0%"Zay\:Q) Jx,`|}_NgXq~{q"[+t-OD3 yk=YI {~G~M_8y?rtӛnAt<xTpvkh8"b,ሱ|$? >:d1Ymh_ha`󣅜Yy pZGC֜*r~<-1u5 ]_w@$+䘵h8!O5REunK7QI;) Gl?f@As~oyh >n̦qiQFp{>p2ltZ^N6~Afu1!Yv *{e樰X|JÜg'a#T$ Dq6B smIedAfgMGcU{~sO"ohr@ً7hɒ4F%ADۢ #xw rwMw$&Ɂ>f ku-mCb~{踜zäS{8v̅b&'Dg;Vl9g ~0s3d0ط>Mwr{!M 2jOs*Ъl~hغw4#kt14fHj8=7]rifk)t38Ka!=%* lVʼ;TGvz xلYD .ޚRx8NNC-R5k}̭-uSΙ?]'ډG WA]v|2xN}>,ɆÔ#ς6t2M:W& a¤uMgx+խ0{<_'EzceʧaN@mtLm(NMR$tZZA{Uo#Jl-:l#?}o.V8C;ޙ )Vٔi8oIgK16L:1/FP$oc3*Fn#R!/x"%c}yz0 ef%T<עzYęcbQhRQ1yb"DUnͲJ w=hkD=UB[S)4{`HE+ E;HTqھ7f/=lj d OۦKw Tv ƃ(Qs7뀄5Ėh}ZL爡pE05}b'&EjSo!)΀ GRU+"ҤQnnm Wc;wkVןy?[ONďͭ_^:Us\Z|~+v0|?zurF"C!pJ:=I/slQ1]-k8'lw[ Rm6A-=BS>>ء#'͖ hD[Y T(bNKRx5Ñ 5*zDL}rcKhA?nKsҜs*AVgP'8hr V 9KF_2F~PAdџ;켺7<`K6^A?@ޜDLxz^EFnE ~AtލKh{3t{Ki蚈~@kT@P02p"/S1'n. ?ŖJh3^x͘8s E[5A<_{A1vZsG}Ud{9F] BU.zyF"Eei-?Z3kK?F]$(M 0Et{TW$FX>ϰӏjM!SWKުO}1zrOqT3iPخ~R!ߧus"Ic.y{piwhY?v9t썴+"@!)xفx>m4o*1 oH&\R*㆒+U%Y5~nxOxIh489y4yy} 3G.# Clp봯 ?ώ. P%GunZps L7Ҏ-Ӧl+eoW#Պ|<'p@ c=}_Wg뢀_YBNmQu]Q_t`쁗БT7ԵZtwKUG9* N@GPo!7l+ym%?al #4{Fڸc RW@_M|{ G ;$#~ #mceneIܴL͍nqPUt?:'vBG|O|2YCmie:H^1JPQCN@xHR֗WTѧƐ$4(rSUI|TyRR/ړ:e:<@jC!Y{ԢP9ɪ,ZLe,ZWHNMZ~6E?' ]$D PdFSl9ʪg6Mlh$-b_궓ju?L:L` P5)w0iӒjžw$׮CdM>ʒeoQbْޥjC!YkҢTcM*v-6PZM1fY.J:zf[_5r<gӫo>a8&#'Ilҍ~:@]-N:2lqO.;¤s캳Xz8J=Y[G]Md_SzQҘܱs| bu̍;wzr2[J⇤ETɧngOQ8&wFg\خ*F}pՔMޅ*l(N@L#$ TkRxVOy|O'<sKG+_ s Iq 5.AR ?r WiPš5"ІxG3lIMqG]6Uv۸qZ{uNwM2b:)n9lkw ʟ:LR-cNP84dNv$nGci=࣭K~Grq֑HOosLPt▤%>1e͊]s%&H%Z& )SMO.{F8rXݻ5c{ kM m7`*pn 3k~7kLa1B7V]^riSU¯˩(-z}NQq@ح_{0#>m/ )2n2{0e O̔!z4f6O$I!wvjfC^u׍@찘MFfC';#Jdk3>29q[BNïjDV#%}bZ|ޫ[խ?{*'#$EjK'%}_ ˾j7DLE&!ՆNQ+ ӆ$x%,qMH,tpb=Itke҃zcQ_?!qR?IKOY3[ۖT?QNUh1qKR1C[ڐCLjC$>mH{>H*hbLHjڐ+t/ޘv|&rt焺hv1hRWMytY$ͣ'hR|Ψ E I 0|"(,w3@/`L)ptВBV`£Θ$R̚ptIJg֍=ؗK=pi۟XXm΂URBh9qORZMӯ4s @_M3K q5O4:8 -`>G6qCz|! &FAϢ3 X2Sj1Sw_Ld΋oסL_O9TߛU3&i\P<*IJL|FD"ϱ5ocoFթ C nIJ44'/MO$zny0F"btaL~EMrD?1%ީoHZKL6C/CZڐHj7FFbJP,E\iIɹp8W dflR߲l﯌GQ|nxL_s^]bM(6K|wv`Ol)/J 6F nI p 7et1 [4*HH݄^{A~}q4NrO9SlKKFwҠvO~]deUHNeh﮴yo) ts Ict? zer$WF:.wKu!T$3 v&N#(SLS|Ku~5#܄ &Ӵ(IUt3L]x'AV^쭛.xRKB tR#@As H0&c]';ia!*)>Q n)],%\ F=i;'4f]kCjRBe,$,~4Qy7결b1QNB@bDQ@`ǀ/OMMsdzו8'& dLV,rL?ȾĄJP$T㷆ED;J%Иn'R84TXWJS/Mn5M:YM#@HbYlQ^ V;ӡh!dٵuӇfג(MJ{ nP$ YMpws6XAdFwnI4"+HTvs9 +DFvd:Ni4{`YdhPTfoJ+]HȑЩg,dڐ֘{-(3:=h[xuC%^`1y|q-ovlZ?ɁAںEw ?w!7W$APc pc.Rvcr﫝. 欰5S^W! Q7[Pھ_B.;V^ BW>0Л94]nL1*,ӪR)KAh&D,L4_p3R,Y>Y!4V?귛mz}^H<βүV}c՜"i[UL' ( ="y.8zNzəie8*iƘ'YUfooX<ȇXC^ UERp(+A7Ƈ$q4Y`0;)djL8؏5%8XJLH $xԝS%$*i17<ŔV1Jyyqs"HyBHzDS5B'䡮v+))6d>98HL$cx d`ukϓ>Z3@Z R ޥmڱa}S=e@=ݢU6\3kLvA8~DE6Jy\Uo\/u:U_cIi \* C@&`]~&k$Mn/]3"~5Ŧg8S,Jo"g$ҔK`m$+`?MޯI$K8 h)LPs"d:]TSkz|-MڙaAś.F1D&ChL@RR(9B斘z$ǍFR 9v8Ña*.1ӊY?s~JsGTPҶpj g}0!ѷ{P GbraO2cǔ|ծ[شa K9 @N KAP HJfd%NznZD ϺCA3v'&69OJZ^0i z ix0fd%{}"44 LЖ$fN`~4( ())b3rZEQSr“8B$ Yڣ9b.<m QI``V\a'/xxr6-! ⢴( *ERx^b.Mf2⢒y:Em1P}l`^#E+ |{ v dsnK2b-'UM><`sX1XxFA̝ (mǰV jQM<РsOH22W!G{2TO+9xm)1tBŋUŗߟ˯d>.=xL3 Q"2+H}9aNhƿQZCRp=IoCm[7,Ӗ} L<Рݹ?syf@\Ɂ[$ ԉ$ x/$iërM,Rv٦$*xU)b%)mg3T*k ?, M$Ë/W|E¦&DܣŇq}cn9az* Q 0@GY9%\Z#}mFgЎ\~FiƘ'ـWq岿Z"npM!^|%Q!) ›֤')稢=I/G$SD,C4RzBZL yr|ϊ Dܾ~3M(艹d) ́G"Tjڿ Rwܮڿ["[ݒ:5HƲjCT`W|>k8!$yF ''8ЊІ,=[q@\ |%s#O!cNB63է'Y_߱(keVjS)mhFeA#N{.V=d4lU/LPt;kJضi5۹ 'D} U%)!1pX9dX$+7eMoIX҇Jy.:~'/вme*utZ^mU<(xV´g~'Ҽ0(::qKRpc=9b$3s5y6\ h`o ze&  );щ  :(o B+f ^|.@PਊφtQQqBU%)R>8q(D9 NUVА0.&!dITР,̀-_%&r j!y@υ$)p$D$DW^˛(%)bz) '"e4=㤨êWvӨ@=@9IJM /6G'+P0FR ]<r3NT`@PTQ(:(qNR i螈t(婦>cr0vGR[&hP:L#" ) Id?%uOclH"Ɋ-V+&)0`G~IqsLD祮j#r_KV( B $ETI@VP$A[CPdlOSkB@!P(Gӓڰ(Q[!P(타>V B@!hERHV( B}P$}pW* B@!4:P( @ HJZU( BIi uX!P( A@]P( @#(@B@!P(타")탻jU!P( FP$aB@!P(EK(~*-cU5xkH1h)5A) 4 B@!Dtz^BWSEd28`*'ytRނO@IDATB>v%'Zi!"sDQ( B $$[sMv{uMmd0r9TU3>+醋ڕlQi\UP( @$) '~a|3gV[򓐰A,.Wo*;6^zEWN˚LPoj@!P(:He"& |JO Z}O{A^G! `0ATVQ4EmO=[vHm*NCR$AI^^8HPfۮHPr}(+-F 쩈Jݻ"I TM~1f{LSz3f -%jO<#kT S"=S3Wsi目4$&j'J6 Jqhs]yi@NXh4Ɗx) qҤf٬mғ@jCN֞wvyc5ze!Oy1/L'4,p_T H")H  La恓 vYttuh#&(9GvFJd~H+MVt➤(>E)[L.%:6lU5^ڵk|bt yG6{“c^iU-hb& sVl5I iQ^rJx[! qPb@6۲yXNM3)5ݑkpۯT$esA:wtnnz<4R{PjiT vG[E]/:ZQ@ &0=⾐o:m4:Vzi|LCz Xzv_qe&}%<;ܷ_OP;?", =S-I$X8]h5`ofP6|y? wm*HO$dP $rD-\;;1솋Ϣ ҷkȁ9MdZh! ū, VvJ) ])-_Op!$9DR̅\~'%%zeS]雕kaAR ò i/ ݍmc48^ 2(3-)Tv-ۺjx MtJZL&#M71:{i ^tK(Ec.c:%;+B3 NX>v@lVtwMysj蒜 46 >#?cUål e49g&JN6}F ,= +SyR%vЪWE׫Aj1/?v oqPPϬ.=Қjյ BY(MJ3gg`im ~+ ?4zu4e՚ڪ &!0 cQE!PqIK{Aܲvy|_$sbuB@!P(:qI/ONs`#]LL>WcƐNl(}fpW>Gy-Tr) @[R6jU;ssߟLii<-l8Ygj1-zcbIrta})j;$حboV) G I sF6g.xBlK ?Yf;\@gf?iO H>]LnYދbqkWf<4M@^P(Z@\AaSQ'Z𨓜\JH:t5dCF]Y>5hXMҖ١84 |a3ELqU:Hx$O&Tr/lqPK k]ӒhtN<=gg:$V(["(쪁X`&Qb@6+ߕ/Om7֘``Lڱk!Z8-DfdnYRh( j ^ڎm Ɗ|3z9㶗IGh<D X{f樱蓅kDN!Pԏ@ܒt#,&G1LDeorzVT?*29([Wʎ~4T4yU`)OY)^KZR+%%g 96HtNwrRE$Cd˄0=q")ZR) & ׄc}Y,^̔f#!@GfP@7L K)d54[ ˾4&ՒL9M~Jw_q?!Csx)M?ClzkS~ڎ1l3TbQxX!BF !?ܒԛ&}E\x}z4k&&D*8F]sed>YVGDo݂l( @0`Yd$hv^2yZo #:rg|D.jOzd$ 9!/&T枦3sa:Iڶg.mEn7Klk_T_{ 4(Wg-fRJOE霜HYEV: }45v=Qv(&)0`7͂$$8jqM9>/';8~>*A Z)FZDrLAniēC\*W)H.fW|v} :Im^vn-?OG6OꚖ\'iiԠެI жmwqZP(:"ƒ< LP(IE{BZ;6&[ Lgaʙ~U͜u>c8>F1_tux۪Ԯ[V( @sh RG&]vH A9@q>>5?\LR\LRLR|>k\$" kp m|eX`qXENdyWXu#3I*emޯ#o^O9jP( Ж$D,ΚJ[6&mQNS0Q j4,ANp [$%hkPd͏ȷ0  )p9~A` vp-SLK^`X<]Mlɺ/7W_I.ܷ凾VC;<ӳ*:qI<zA0(;L'OMtp?zu akbmrZ߹J˫XU 5wGw!A5;tzJ\o_i<@GBH r ݱm}+O8H[ rJ/ /4)ITZ"' [IAЦ`I11 Jp&O{hP/ Vo)U]pBV37” [s&={p#OL6WӠܣ/F*wiVq'/쒛'S<{~ß4scU{^UӠ{KJ̉uSd1Y;,'?&+3_i 0 k|kwpyWU9Z 8ʂ%.H9mq ڔrKu i%"}\KkztN@ h^ïK֪ 9X-0̪sU8)Q_]>}*dmw8+;6^zEWN˚Fxo #ht,{=0o~j4*-u>0&?4l+`BwZ3)̞WaZwET@GĚwY-$SYdQ2j%s'H3rmTmIÎ,*J/-~V&+nqO};g0ϐI?ioK[UG$J޷|K%/|1'v]!~ox >w:HӯMLү\%e9+Ӳ7؄ q1@jJpSzzD@EZDEE~ZDߖ}ocS@`'A@?omi-I]}㷽.wx~~-J˿Z,0gIc=_SV@cV<1'bX=6fw45Cuor~_p$oI[Ƀ7ḋKh**ۂ@pyIP`wTV]uy%WX5ј=쑴n5O$*b_`guW꾚a62QXɔE{эn?:Fi0P7A5]|3JQr}(ݏ3DEً}⍕|]h6[r992D婛~l\ϊDd~>[Nc_d~_Z*+)';3Bšp`%xe=Ν~c['0ѳj.5 IJ>k*l)'tٻ=sn6 ]Rlg/$O)It5@giR!*-tBzٻs dfmʙo|sL_=5"cW4NBsjsp^?{ݪ7noҨ@ʑʚm{_ֹm00^*u&8HKqm̾Xв)lDh1?hIB%#3Y'ԧͼ{7ޫ[ꅄ̠!Ҡx1 E Y)[}Y}%s2}0/ x3, 膋 A92=띱go-n0`ݳвlZVi$[Moȶ7PڨC0yNqz㺺H 1x@HD a[ lqyd[VjF7lPh4(vx0&oD{]̣ v(=)\al`{4 (]Z`@X4JG 5b!#QA]K|D4Cj隭kCbpz.xr1~m,-+O[y?6UxO՝w3biF6(51SZH.wIKߴW:!-sKekܬ4O;V@Lf3X#勸 _ݛMX1)BӌiꚖ3ZIOR #i[Zf=8c/_<éeM{6f[ہа܋1"R4AƢ4͖f=/:e{)u.qi"y (owl=TA`ꮀ!1:d+('bf~cpKK1SmvEe>GL] If؄]T|yi)dpTA Ú_V7uwɬo#H]u>uA4[qNᷕkw_[GАb77BlFX oVLJhZݗjCűH]]IWid# &xUȝ8II$u#m-ۼg.;qzGat"(x&;; ʏxSK۶ԥK@pp[@@=Ղ yI8g֞ AI]GM s:2P8rrl?XԎ9XAXc[Iх@eAsmPH^۶7iȴK!jSDwhI|=wK]ߠI>x`:(^JB6dz7#58*/#<v2s6=/shIjtj;K;g#fP$dwצ۝POl;6ۊZ"4gҕtPD8—ȈޡOhQ GY>=_f-7 ΃K0CY"2bpO/;XWhX7 ~\ $uJ|ّ|%\ A :>#).:͡,Or=w/TP犴wf0`z+!XMҖ:>3~y?HPfI ųZOR(_h+P=6ԂI!ZDQ@$g+N`K*%œ%BJVA{-|qn>摋ir6}<@4h%: 8ӉE(^_wȻH "l^LQE .ՍbC4ެѺ/d}&Pf֥ӽ!%B&iP iRҝU;Q!3‚[{ KQ]0Atfjo)zt! j)GBvQz*uY(~CxA 6,[y ;ikP~WFSGDUe鹐5Dɧ8_qg5(Vuju ;tt;z(N@PhKӤPXkUUX{up(CBn|P= ] [־| 3|F\Xo{bV0K$mZy_5@+ "@sf/РT6B%bUa DP9OsҜWz9F e;!#V ! { UovڿPoHX{A)('#\Me/=5|@`w/uȻ5hdCY^xp;fa@ϒXNu"DlRПuUX_@NkK?]Ϭb `jXCvW1V-QkۮŨv/S~#9Cc֢4hgj2Ofg>cL̽aI[RbAX4 K;='eC]g\?Th`l,Gn ybB]oeNԯ e‚/X ;'̿uRsÜѫzVC+qS(mMhN c6‡ٺ&=ٵ~@ҽ/Sm:c? GDmeoXg5XȯQG⢿(XoaPxO~h[q+% ކ_ y9Oc'~}[ˋڏ5Р4:–nv%s#TWV*)^O),LnW 4%Wgrp A-XZf_2ab6k0#6(257ׅ,UY]ִE\o:"okؠ> nЕMtۺkߚ'9--Ps}&h;+!f?-Du!+6cם#ϱXREfA(,}ppヿzi "cDGMt4uͮztC\W%t=!DieȪZbSvX^t?4*al JMJqD63 e\ޘh>6:R(/wϓ1'6=W k/̘/)Q4)c;ezv gy'Mn!<,5jѰAe]IeQxkGWe Pw(ETsz4'ЖU㳅|leu}sxdO0]a~hth b[eNm-|_c)#]&cZ<,;<'Y133֬0瓛=+-u7 B ^#mዓ&_Cvck+e\yLYq1R+7+O"]%d͠AwZ\-N%7/pA7څ`wb϶lrqkh{_7if7$0ܜ~}4 i+8x2Xn%"xmLO*/gֆMZeCZt9>Re2.*3mw޺mG?7>AɑO.\;vmǿgys珋ԣD\7fZ朌WfYQ]a).23=^pE׳4G q^r ;iy磟Mqɉ넀{?Mk̡nmC7Ya|߳^*vo g wcO(ȔT^ D4#+ OkLe#A[H "$œhʸ, '~r2aY^j廝Oѷ,e圄fO)>cwRZY60OK[WT t4O= ? jW3g@GRBqZtrH@ kCb/rS&t2> I|DpBg+/"iy/ʿudYW^ ÙwT&iBm̥€g$H>3f5JҊ7V\oߓy//FG<2OXyjD4d#iu8!{$T0~ T3lIb0 J٣C>ϋƥS`s19h y'䄐FDTgƒw1{X%.Sk;*s!T5fFF>$I2NA@4m5@]Yvvg$AH>y`hϐLa塚K߉FIqc:_u$xpϮ " Of^dH`d|?\xgy6p-.ϯ9]GFT,,ԈS}H`C7+kX S/=IdZZ^ɴ_Z<W"ZQBlhO~#%u †dx`=YP&gWٰ^ dK.Q^ KqgaAVع>O佼UŸ{Bxצ~uE>t혲hl^4-vG<. 76Rnc=0/ \APȮxyZρKsDEoS`wçh~oғ3( {{|N|-jYDt;\`up+V4j4(h ,cbbP=(ԤxL!@Z i:.j,' H.mP†DMys7ťbf *O潢XU? &A @{.Қ %I&S/W-!.G+זDP#qXűQwC{u%SYj c9g< g4\H+Sve?tO},V(d7 a!5)-2# 4 S#ݣ]8ۺ(;rk,uC=ΜnXMl:c~~~$GyeE).??+kYߟHc'pʿ'^^*Oh 'B :ޢV\p˪7޳sK*'`:Cn@^yd2J PDD96luu9wN O+,5 H:}q*ޫ)Ң69X3`!a~M5Tm [IR<ЗPl@^!Ka7gD%yM%-1,oc6[> )ul *oP>ħW ' (4}?V(W%M~:dAĊ䗚K.1.z.{Rî '\-[e&4䤟Kr걓}^` maˆ!m{&`3U:{\ƈ1,=U`aW61:W2$)N(ggi\a")6kހHd[a- Q ic-aq)+Lڐ |S8̼ v˿^R˰|@IDATsn $J P31C=sMǺU+o;vQm?KxMiTHBewXoǶ/\9I^4)_s3Ǝ5ZY'*Ues)T[T hZJׁe)'ͮ4͞ cMOaz5q!IJM.tGH{tjGH }PCoZF:((uoۄivʜZ# _0h Jz֡Y( |$# Y TT򝛓rssYK =;9L:Mn*wnJ08!Rwms=L3;B G:٠iPn2{/iIb"d }߶??:|)YZ ^RfK%coGtpwKrڦWڤ4M1WhKd6Bh^k0 V3cMBy$>@NV\F5CUq# 4yk_llr)],8pI\AV%RZ 4ՖfUY k JHH0 2e$YI>W(<Z:KaǬ'o濺^|*GTUe M *U~7gɣG\~}xo8M;2li4d]y9WUId]R(n'w)Ee&;vqaWéd&VмCAR M`a%~g miҒ璤T;2R<ؾش[R1 ,e‰˄ P[ٞi0(mǎ4L:g13Eq$ +XPAQ5 0X0bR^ AD2$sezĥ+^}_; &.03#ȁ}ⶪ 4Gv:HCaAiᷕkw_[:vMPPLߙe:3MwVq"⎖ni6dkym^ۂeeeԂْ$Gu't&&Y; R tO84&{ ؃ vL?CD'$)>wB\߼W--{BAR\j01#騡2YJFKXzV) `hN,,$v' $JXz5 ЌjQ7MMVfNe@m7ɔ*Ȼ(G(Ʃ?~EYsRk OC&`Ae$0ٙNhU~E_ڶM.]Xo{n+'ԉ̌w$mMڸv^7ҠI!B4#Q J>HW(,BF騕NZ)E!*j\w%-"dulݬ*X  #(_ 30bQm@@k 'lR FR2Q GmH 6Z1ϷA&4(pOrŅ,OsL[@仡MrtI*?wǓT.aA?f~P4ThSAy!EێMH5Sۂ ;!7":(M54ҠAZ ]E@%EeK.UگXҺ}ǛX{Ș'P V:`A G h=Lх HB. *{Z!fь!;azOyTk:yNC1'ews@*_tPy8",FH p:1$ThL' Ai c^"*1$DtE@[v6T!*\RKa\=?Sz<VOʕb| 0 ƥ:IJEJ4BC9D9_-_"gU'՝oN&i?>}n"TDe Dܯ90gl\T_^xFQseV" K1 w Jq SB ={lhJEۉto%Wy'! -fh@]卆/: -x"CS:D"|oD{Q8%*ZT]."g@%*Wqu..~G⦾nZWtmǻwhQuTvQs 4d(+xr"͛h,JOҊg*)M"G7G鈳ȧ7N4cCFL_׮\0Rǜ(ÂPZX% B鉃xFxQtT}MooDe+&)ʦ?Vo޹aoKs^( `3q*E%E=PϧoEpdzylEjөs0[`@Z=DO#iXC6;;-;#=eΤm;7 IcBW3{(Ӓx98:ٛo8eljPO0Pa8 vWVjف iK/fFM \JWY"?a0,GL!Iʃ@5)f" vRdWB\/IX6Ehj^Te1]Bd'E<E2QZ"%r13B y|>Q^^F)(G*0,|[_huT*BBC8iVHEtE!EFDRJZLZE1žh8sjˮR:G4)TP#cy#=9Y!ԉ"R ݯE=2J! Q.IZ61}T^9Һ2qrdtGGIPt^G#!eD?nL}fmԨd乒F֘:h8v_ʲG3 !")D-:A @# e\'FY8ex@yxOTd(R6c gJy~  Bĝxr2D|ATY^;E1mP{w {Q;V=NR ܂,LQka8>S:*TVa!Bx&ʱ8R HЦЙ8s$~DJO360mXo?>8|@7ZNƴc砜mBdQH ۤT  -/X:^^]0\cfr`oN t:(DV kw J 23rxIijּN" H|NB3bCIQ$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$@"5r$f`pלs•4){{UҋD@"P -߻Cs0vWy+H|')aw6aQQeqe>#kבѱO({+*h][Uc/ShЃnjIj|u5!3"= Y䕳tf_Eu.ڒW}-_;6_mnsر-QDU _kh~߷7{`ƅQBl`<ŚƮG[Bre>m4'npNqc2WeD~⬸ʊ mFv?*#hu%}%^IR SZv&Ge2ǫb,7t]lEg=T37mC RN!ThZFMk U0a7 K-6bq?nᖛfĎ# pu0暁D{+st Y" 87NRء*z6_M=?ǹZP#3&?d.#qhNc2]fp#F Ԡߴ? 7鈙E񐣱Cڪ-bGg0}p#_[Ɔw+Tq`#0\pf&/䚞;Mюr JMt\&‹s?׍ Ι񝢪f#E8ƳM[^@zO R1뎊hS}[a,G͞=۲dÎ?! 1R |==1k3.gDY?" ]lT=z ~ʔYyz M«D|t&{踙T^)bРA4w^orr~ A!K-[vS޽kyYg2Fw]Q<6>~j}AИ\>Y_EDG[.vCIv{P>|._$8bާxoppVlt'Ga9~e6yks7C؟a{<8%E`kQ ߗ&N IIwNEo؊4Z-ywܸ18|W/Y.8 u 78HӢ|oݸgbdGDǍBa2p\GMP N|>Y{cSI| j&s Lc_nxiPenJ7MqE0\|jQ Wbe5@iR58+< >Ɣ7btbw`Ĉ՜Q)F=('xM 4ԃ!mxtlR ##ahpR*!1q#Owyzp}4QxDWv$*A9 Ee ֤^}6W_J+ {=zzѨ;ҋ4% 4,ܕA#Pۧ4jyJg}!W/?;~3EzVB{nptuyN{p{C|Aܼ8iIF-ň e x}_=:]9?mֹ075jkz:1L9پ(o#pmj碼0n'Sb(xgPqILnqg[? co;n|nƸa {ljjs:-7A3^h&#0V ^ ?y+\^nhZ\9O?x}v.}jX3ʟ=F6kVl*mbT4S zu| ]3cUZ MH'6V:4ZH R޸5 N!4Xzg svr Zݟً@z$FEF9_3 cCǏ;+{=L57:_^x^\}ROyE_=7AZ腛ƒD28@u}H >US{fhgY'6+{>*BhE] W7ql+6ѽp(P ޸] p9LZ/Qq_W/0Ey)ot:Wܓ^|1 #Jqjw\Miso~犔)Иcy(И2  I@1zt^Iu=1/06@fA`E!C0!:4!ꞗ8H'm 7\{g.W.E j}P.ɞ%יU+2@0{ylƤQw& ;-vylZP%c(P8_ƫ:3PU~K6 j˒]q=5/ de=K}vzx7xދ%UeDHz2Bg&?Đw9CKϑg N.Аtu 2q}^}K5OSY4AȠt+.p3b Ǧ,W}xF ,=Cù QK &E l"Tfa>K uAbm} xL]3rZ\gxýOJBn#*7u_E$$o$5@CϻY4j/sGǍ V2YN<:I2Tp>8'KK=q3JM!V}"3 vYs4oWdObFk8 ɒE` Xid~w+̴1@E?̔A\4`}ی?ˀ1l}ZSCcחWtZ6BEv.xR3N7U&IuI.MՈ fR;Mxurh:Nr4KE(W8#uҴ# gtm YMqZltiF/Mvƭqj}L6=KgFͦͯ 1&۩#Gfh/&' S~o P;h]BL4fl6Z5:q j"i!a!)i51|ldA$99Ô6N-W(hT߁Z6Ao?W~vP/wM=,- qѤYxT'APAK-*-" M{5ܣiB.xiP= Tv>jK4@ҲMG1֟@`. ,)ߠGYB( lлQ:_siGEq>4V]=bVի̿z..c-&Hȧj~uwȹ#HN(|$MsZ P*bRWiר ѐ_:)iC%4 U-u]_yMD1Cg!͑1h=,ùBrJ9F' 俙MsPd2S (Dp>ޗב9f)I|wk4hS-ZLth jZPg^WF5+R"ޗ_jĮ}(QP+.VM.u׆֠H2iDblYyލ_.л؏֫$S7I`O)R@q0}gk o 5єShFY{K$Z U^KGF,Tj!))ӆz~ƟfTD ZMs5Q9KhzȃU2bD#P-Bb>V2^!pHoa\ 6[x%"Pm$^X,^,sΞfZvh&mF>@ŊI$EHJvaѤ<][<P"6JpQω˧x!kZ)cH:FRwC/<*zyLCx⌏+N`st( 3r[WäPR{5zK }b[l.YH^J$Մ@ y,]?x퀦?`) ˓_T\0'Lq?A] j(ۅ!@cڀP'3{Q ءx<hK'؊]Do1H:5x*a[ӣ 47UŃŪcW]8A x>;F ,.[l"3|^!0`dަe6,;w842z+ݏ;z+rL`&A1'0R7>TK&ZBh:?|>APKK.VQ8|DчG;\i=$PYop*_%#P,a]yؙq~ǻ4ל%1Z^G*KP8{3jFA;9zt;}hj+hZƤ6ڮD!3>]2Ҋͺ1c5ì=}z߲Q,lj\=m\yz{ܸY3b"(iocN-A9Gϥ;//*ր PvcPNs@w78ڢMQzBe n-4Rin~ } #Ľ8\o2\|΍9,e+=3٢AY. #3L2>%rk(KڣY^I Ѥϙ=U_vmKsu+w4+)a*{)2=1]/Vq -MFjy=32P@$~1ؽx /jy34=zbw/m^j<3Q)-Sռ-7t nߤgnv K7{,9ЪJd {#pY Юn-]}hte98۞'}za*ps=?pe3F(n?_f<##C'f$d/);Z~t;%D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$D@" H$ Yq[{1tXՏs_c7vݯ~ґf_'2:)oEEyhhk#ڪ'`1cR+|2C=fcN\>vv| [^F;ר)R0e~bƫQAf.# ~O/U{SkZYryW3?qAuL\`,)2 />2rcqwtk-?v߾2q+ 6+iޯ*/埑e$$)ŀZcsf_)NۊKg2))[Jd{mrCrvTneţp5iW=*o8d硿ZV]l7Qx\lfߛ7ts/}Ѵ:sv(rٝs<@xyzx>Y0!tfiFruW4Cʆ$@hR&$׃lCO81~r-t3T=Bhqr#O8W}?1#tv7_j{&DػL4EwS|E㫻L}Q8 v~=3Q }S% %Ysܤpsv;']>&VqFO쬱ˊ<7Z@WkV 7ݕr蛄YٸHm :HqDFԙqT̟1C^iO+ ]9vf21h0H zM13_Ʒ31fTVna;ȕ~!/ݪx\v$EQB ijmLOxsaoe#ГMMtĜUIc?qݸ @ħsf|괙t1! S<`oRc'Цd+ \7X {#fϞmYaǟr4#?೔sezcf\δM, uF%أאL 8Wڻ 0<7 ޢQ7jߋ;E 4;wFy*G9Gduzc_/PV`YUE(GCmg/Pr+{P 74e7ns!&ZԏVwwo8(?NJgHKuEe9 g9bw7-,V.SU>ooҾȗ\ΏE3t}*nZOin욮Xעӳ!n.1~'^V]GK'j?N~ y,:3&.]ܢR$_7pa˻r7Cƻ?l Ԣ6Q9>-jxخrun0u2Їŕ.8KW QW6BJexf`kK'!*hݰ(G+T"%?Y5z4eQCqUT?W}.+Q2+?xO/Z<>WF22,tE` q8nLMުEZ 1ݹŸaVjf|߉[\.c 'lA+OX?s>y0bJ!N(| u}cŅʸ WeL$+!"$Gu rNg+X~7@~1g,UQLMe<97\P.;\#Npvd6?n\ M"]>dk.hX j0eK6|B,k ͕kWgMuiVw u$S\{QDPUבIz'F\Uɢ_|vXկ=v,CwUvM=(a94/@ /mxtlR7##ahqs25$& XIwyzp}D,GٌqyGAqah<t5g-WʉJ+^{R%Tv3`Gz9 iJNfX+Gdk!*ןOi0>Δ?B>^~v*f(ʷ/Z qbѱ9uBY/l'M f1"bJ5)I-rܩ͵iFч-$kaFb7tnG]/[ŸH2F*R%nHarY컉+O 釠}\)ay.lޣge}}v<42Ia@lX06q%xh># O_wʬe>+F6z'QBA:O Pm$e,庮 q嚈gܟuis޳8XecM+ )@_t%͂w1{hi=|$&셡+8]={r!b F!DDF B$] A Fwg9va1=}P#$>_EcgvΝe~WW_UU2b\Be7^[Ev È/4 gg;-W:Be;_w#!paC ?h( RN羯o{/Gl]wjL){QZfAnMi^-_BJ痐OpG>1eũG\u/E>b[psT䃛oh몸Yefu+❁I ÑszWe`c:ej?7-s5ٗ%&g('Ê<&Sh) q'=N}wV_9֢#K;t¯{{„Ǵ-T/++| u뜝ob5χ&roa )[ȵﭙz=LA9UYK#hj>E)NlÐnmuUC85RZ55GѷTLdEl!=KGbBOHZ_r] dbZ>5]W"m]2GPMjt6o1"/M4tJ6WV7(M}#ʀDH)gZVPMz:Tl&zsPiey"C&FբO:jX)Nt]]oOʑi/2dOb#{ FkZ; %W] h}26دP B2y,agouZơSQ ;FaY/eC&H,"pz>NXSE˅ sԳ^l)[P ]}eʸܞ.5ي8=eJ&%enVivQ5Jù˼%K &<٠2r]{T{ 1HoWRPY[gP.@H ҍqsh h8( }ϼ3MLN?yhSLZlFWSPp/K~i%EE{k4 4{!C,4-{«w_ N/X4!Y0핁#{ Tۡe4pvctFa fqy z|Z SjG!?e{_1w^-,(QVraeE@~iY+g|؏]LmWWhR=A]h\ЍUfcEa0M  ~ۡhʶܩ(Ӕ6*7X;û H/2{Drxvr%3&;&:%_BNGcBXE rD:'~Iͣᰄ@yVB_$k>~!E=EwQ\VK}t- zd5*#> i;uʒY?Eᩖ%ڣAK|rQfYL5mM{cXR^0l`S|;oWTd2Q28l?}Xlba@4:DHQg3(=̾Sg#0d1ͣv[)pJF`RCC{5Gj?ցТ͑c8OA|BΛ`:=#"UyVBNwjSbF Rwrm 9ؖ>$#Y*w]2Ggŗ0RƏgfo<8#I"9,*K{;?t #0I !B]M}$r%ƢqdF8FvPOefʌ#$@ )E!L9b%KZar)86#p#I/[~L`0!L*1^l6cmXA' w;|ouhy>p:~kxFS,з0t["vMҙ/U 27OiCFH]. P)~{Dx:h:Uq =||@)p"n_Od>qi'ʅtc&>h0^kL" OhnF)/=C[B;aW)'oMs ж34q̥a!( 7裆///:# vk;)Wb`e_gV}x,2 bޞByA{Jw_Ϋ8۳rI䞴$r^w4<~ZģVk/CT-8'X8Py|P ~BT a {\,Zb'¯'2 R_4<|L4"z?g(/ /+{ݠ_+vАr*◐}RZ6aa٪9U.7@7:fu/Vp:L  /QIu]-IMqM8,GX4j(e%H4l;3W&LmA|O:U*AIG?ZΪX8'nEzǯ__\s݂hH*>kg_4"k%8>2hЫjl\fGwTg^,|B-/CGP<ȄgєL"uҐV4`xߖT"gJ놹X8דOx5o~gpq_V%W@! t깰(ߺ@iBHE] >(hz?ު/./e5P RgYx釃 CjћN L9Z`x7{)%?B;a!w4)GΠp۾t0 n9  %esߡ?wXaޅV\uCUR (6fr| &4k($e0HkCi:{oYBX ߤ^UDQ^Sv\!?6֔{<>j7+G?Ot֔ݜ. >[*HKuW*RmX]rGGp0+ⷠ>&?iB%Os ''̹דOLz| p̄/7ˡGBQثhEtK!4[dBmmZՕƑ 1[y}>cрDhm-(Ohw4}Sa~Ү16@ Ѱ/To8h]/IqUKvG2@tTb^{"<>tbHgk\ģ{eYct>ԮՐ2t]Ue?i@LZfԟ 7Rh,:;ٱmΰiJ>" u^'+Tϼ`{sUU?@mOȀ1uVlֵ.NԿF'QLIxHc5=פaiN+ҳP_˺a^vh[Fa |õv5' @Q: xvW؈/́mw(?ebcQ-KQ/BwݠFhEC1FچYD)hVӼ?khlwdcPݤRZ 4]_ESHTnh~JJv"2 Ǎ͊ nC Р'@ 2 }o7nj(.n zݳD$w4=;)%>!!#w\鏇Msයm#Ɗ) KxuFxEUOAɰK HXߜJy2rzwj^Siݵw:'ָ>vhF`X?wnfQK95žxt#דO"bŞ cI96H+!=A4=Pp7}UqRDv4-?U##IcX`U"^[b6Ŕpb2#MS^'䒊kij=9(ԙǪKNCVprdOWgފ!x7e4S89iX#Uwzb+I4ٿ3 i{: O1m 6 *nw"-afz{ <N?b-sɘ"jI 40I,qp%w% 2^ړ!c+U3?q qiC5SvbvSX9BS4m0~ Nq1q"5)<<#@G{[=:6=|bLub2/G{@[ޘZJZJ.uힻ'7D Produced by OmniGraffle 6.6.1 2016-10-06 18:00:08 +0000Canvas 1Layer 1Linux Bridge - Self-service NetworksNetwork Traffic Flow - North/South Scenario 2Compute NodeInstanceLinux Bridgebrq(15)(13)(14)(12)Network NodeLinux BridgebrqLinux BridgebrqRouter Namespaceqrouter(11)(7)(8)(4)(3)(6)(5)(9)(10)(2)(1)VLAN 101VNI 101VNI 101Self-service networkVNI 101, 192.168.1.0/24Overlay network10.0.1.0/24Provider networkVLAN 101, 203.0.113.0/24Provider networkAggregate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-overview.graffle0000644000175000017500000001436300000000000031346 0ustar00coreycorey00000000000000]isƲ <4,;V,//)UH5D( (I+HکX20e0}NwOO|0EI'JQ5ѓyG/8eчgǻ{{qs{cy]w}}J.\Q\i. v~ )>8p~x9tEWa5L_u'{(Y ӧaw 2ʣNA^)<\tN//ipvޠDk<>S23#d~/:Kݲۆ]GQcȷ)ၴJ)0Rx0ц2ԻAz$Tp͎q8n9|$\2<>;q8< Ǐ5~7o'{4Jƿ L9&Aн zoӨpay$_tӭ\V> ZpQ4 X˰)MA*v)ݥģG~?sIrAyG4(wz.q++w?^^QzYy_ݬ,dL̞Ði%əA-W*IxrinȅK2K9RPkPB*@UtZk,_eX'`AL٫Y,0Ɵv|z NjBz&[#to/qiKɥ=;Og=S7+c ɧh:tlrGfak!2gQO(NS.'eسSXB(1@imOKV\µVOi;xv\/3ؿ D:@ )̞͙JƁ\ wA;1e&{OLI|އ_<}v,#'ivFO@evJZIN "oɣN䌜u#:^ [AaIyIð~9!O^pKwRO.}}p,~p76o"DD v?͛( Mud4OjY9Y$7<=9Q坴Θ&{@i7`|BOW` IJ5ϭb ϸ'_xh" |x^YQ^oaiè`e_Y gg50h7?$zϰަ8!L C%p{!9V@0S8;T23J)4zކ^04( 0S56*&֍Kz3Ki\ ea7vٝϹUֆq*KZlx2Z*1^ ccQw0 ~&Za Z_q/8LzifIك(^I>ES%8WD$9[y \Yi8eze( ÙRtSwS\QwTWԳmTJd+ >SRJ=* {;a7kǣ}ЭԠ^jSI%e\MD58&Ae@nO+ *B5DBГ$*79# ,HT0m 7HTHTHTx68>m)A0_宬[l#1B:tku<æ^+ ޼\d"'.!sedME5Kqe) _CMXtc@0>U =Ѕl<\鯵 QS@tq )_k Z0e\ZEr4$2N>&m̷o:yBRɁ1UVSeRC)edX`\SJ 0~Q+ @JhK覙ՊU[-,7VIP݂;|iE"\ٯ6Deҋ ܰ$9Nڵ¸ [`ο =4:iX  VPD력S߾κiC1o9L-њ~>]ʐnHz QcdX@Q gCr+=e9> VFMo܃"8B!$0%Lsģ64?='Rb>C-ӍMdd;f.'.N C0hkxwDbnקZnuC6 ӑ!zλsh0 TG+5_]&|D> gY;V$G" _yh) Y{e˗M)2j<)ǭ h˭MI5p{gvj*@'FIK1D5.BnCXb.n 0S.oxl\^"rube+ J9X))+37v۾čhAABeOsh"b Jb]h^Y7@O88y:%[?N;I)}PA!˭Vxt -X0I2ʟo6ŀ7t =[ۻt?S-'+8NA,j@,ԣZj}-gz *=F/ +QR ^dZJ9c-RJbL `R CGt%d~E^KN9BJи\Ʋ;.Ū "JQ.5y.p˩FB5W\B.ޮ^j)Bϋ2*}UR\5UqG)L|)ɷ?19j@`R)W1TއB+x\v-Դ@VDVDVDVV/A՘qGRy^EkW`1-|ZhixJpRsn)(BwM/g=&e$yEJ;hc)t$;F )Sk>)0 f1#b٧ \kj5ZBPh -b u+xX969G m̱X4neqȺdddd8!Adbb`Ԇɂpɉ'񪵁щw&ri M*`ጀi`5L[ TN/ -,\rK)f-κhB|[kдi2p<9ˋ~qRM@BRDFDFDZdYdI3e@v8u k̀"2"2  WBFeolM%i}zMȸ-ȈxOuFZd\xDF*?;C!54WXW'2Vyo/\ݪքmĪV+2-1>q\L'ܸ2ʸ2.}PTtaJ:rPXop(7s!aЂ<YӤVi> 砇0+|w'QÌ(,6&-f)jrIF?J&˞ *|P'Qᖂ%piƙh –:}WZ`͸zS\I0FPPPPW0KAwuRe(bԭ+]A0;(Bі[!nUMBʔ*I,'m@ `r{%6pm ז0xb`gVO!:2 vK{N)>6$†[V+aMhW͢i_[:T,eT*OՀx &ʓ!|ytV;\Ufd׀$rKhO~awa/S N\ڋq {We)5<4T!)A7^(Zx83>cbv\'\Ne2 P8w KfOJ `ѢXNP6 IqW}6KmM\B&ÚHHHapɣg :npGNSHpHpHpHp@pǿ9nO1Ba:JL^{>&*G͏.Q+mI?k"*!(m%󨗕ā`YtK͉OJ1_t;cgi'Ya$qwv;4z!3&J7/ur~Z?7IOUQ I*ė^veԣGAʽ M k7P0kr7IJolA.+a}qM;}։s9~dYr:H aB\,NlZ :-c*2InsGīިv퍞g_zߟ`N~5nu/?p|ןސW^m~_2ó&Pe0?j@q).Ri"&14}#phCٴ^n" 3s*O.qi,?nPAyn.j5C%5`>:#("Aн zSJK"H1#P[Yt5wv@m Ѝc/Y*.صͨ~OS|$:{eab't1|%R03ZOs3 9 'Yοcv4sLېMY/w/C'eHէC ;S5P?v@p݉?Ee|52tfL҃F_pޛY5{k ^A>Fk;././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-lb-selfservice-overview.png0000644000175000017500000054613300000000000030531 0ustar00coreycorey00000000000000PNG  IHDRL}sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgФ& "]; EPD+ APA RtlrI<$;3>̌`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&0H'1>GvuGN8Ӫqvz=#չ_t]c)S痯1H'pkf<]-Ϧ:Ez~9LP1Gھ[`JӮR[u]kNg\ZZs5O6&֍f$$x+z>5]7Jݓ7G ]pLHJUOJoESt!Nis=5iOg1mEJ[%a%.A+FD=I~kyebmd[yζvGi񧒫 Rĝw=X=ϲNdXIRC{WqgG><G|jII; SSdkO ]Ś_~l#ܤ Y(yѮ[ 8y^H]IBz(뽺xN)oIgDЏ#KⱦΔE6oz䓧A |" KLme$E=m{Hh$T=4b<o0&Pk ^k-J{ͅPDĦm<dݿF4J袙z/΃`p%7jQ y[ /-< |.\ER?τ N c=~b녽O8sPuA]+תzQPeWIunt9&*N5LOg97?% _ߣJiڻ o=-u맼ӓHTp?!9lMGΜ皺ٷc\3S"|uMɮK*w(m$Eԉ5SUyr 2|Gfi#pآC7@x(ቖce5Ml#3]A(//MgR *R~r-".xrC2ui7{5u%ҩTu:?~tʻ?yg'g3s$`W/@Y2QBBxLI+qPUHAs0ǚ`Eksjbf LVC8I&P*0z`n4ڀSDdCgOq]HL_Saqe#pd¶S;.!턹R؜ķeq+H(#? Δq[B*Ý47$wG?Bz򲧒`2Stj؀+M4嚌2̈́tThkl\QgSuL B0?cN;ikx:Ox}?IMWħmm>C]ي;3Hn5mUۥ(^55;ޕC^G>3qo=PPl1ޙz?X<43&!Hg5p;I .it+5  .-/O.dzF]=O,3؊ bH5 ÓމaH{ xXҹYgrrzHE~Hk3%$娓w7~g> xߺ|<º8y*3mǸ\ Ve2{4#Egb?W8>Ŀ|Lr@&LC[&:2ʙr쎦~Ki |xļGѮ^U[vw'crdūd##h(Ry5=%)gY mHg}$&aM  ]}ieI הt3h7M=zr9{K(~gﵵ% 䃸0!>aU@5U DA877u_!5oBlctdow]Uߖyiݑ`^V8E# čNp]g 9ZyR0'KE'|[ÖuNW ӫyWE1hΏ3*%M, &i^s/?ӳb-?CC/>bf4F0YTt v_Ml̠`6 O>֢NGz<_! 4 Tg\QVȻQ6Ei>k7A~ 67Uذ| u\t{`^\!~{3!ص@o!{|g XGu׌fYE?3Ѽ[4RS!4tcҚ^5}M>xby{#SoWFw3pA8X(a#v^_GQhjϑ_V{M^x;<H<5yz5y~;lYM!UK|aЋ>0*rSC^w?6+I0wKF:ܶ(g\'A^Jٮ(XWZw][:u'?*ȗ_ABCu!]Cg=U+.>p$4=wz2I[DeS Lnf B6oѥFG">:ҴHʋGoD'Doe!pwQEB_4j}Wh$zw:{UߘM~<]xg5^C!hR{ξ/gϡ]8.J**Ic/bws]f8t[^]ǧT_J;beQBN.=9ߍJN{^JC,9Ă+ŒԤ{t;P彇8{l'] MX9vK:7~/E;'gYxX?]/4-iuPl%@>YÏc v/ޣVOÎxwI#!xx*5"iGD7o8. svxS̶z+<: zʃp w`byT`=KDz -M~ 3 )wW^,\@Еy z``) @ɩwK6NZa=Tg1.`nۜM,D~tux^}a0W6|mߚ0+B/חqQU=!q&Bnȥߕ-A;C׉x܅:Vrka#6!êC\`Mof=hߛx Bc|9>\:dz;eȎj`y]^_*)M$&SnK#Vhz3\1ȶ}G;%7 0w*0tTwdfÿ@` B؄̖&>stk8-5: ^!£N0Z > P;[kB-+REy˲ރbbZ6\@BHpd{[ۛo+y_c\ӥƶtby_ {:JdZ86W0_p>M״;`\Ŀ{ q~dM|U ps]Sj[۔UKOx zwHkE^Y:|Kv|"=sǙcUH *F/9M'Fu~eaǎk/Bh@'0OC;i({snD/<^1=M J{߆lhg)[0waeBG>GwոQL גghNБ A52 qt "R Ky=1G'ir^ }lk]p̻o>|W6ZvWCDt|9N|ۡ7c߭~PˬW_5+H۝hƹ.^,f]'W؝]e2XޱaDB]gr!&_嫙Wɘ2P 3:qΘ.a2t55Ӫ,kIşa.H\;lϑ#zAd֠^UB4'!ps;_?oo(ס~M2>Ey_|| TրWiN' &\_$dP)?Ftƀ\|L`V0nԩ SNG_صUyz:p5Xa6xƝg/4~(C.иy [o)6j4˺FeWXbbg#(~Vze e8K;R6\j@\<LvSi[!b[[ƽi ̇~qU#2U2A/Yi2Ȅʖ_]#h=y4b$GJ oɷVU Ќ3-x}HkGy:a {5RhO؊8koY"g.JH[}<(o'*AfumIä*yB|` kL <!"L&t+YAژzZ@+@w\t| |~BqZY.i#fx-8۾C=$I!C(|'CK9bEDl'Y# 9J TaOv&d=Ku{=B}`?5?s1"@+vL TKC u\ޮ{aW"Vp*_#J S;ZRbT ˟)ɼ$4TA"m֍b6ZQ&v"P>v0zcFRN''N<>M̲ B>7/%<`⏲7x3\(nY1زDc%ic ym0ew!g?Mx:Zi!+p=_ji/LYb~G^4GgPdie ?ks X}M2 VrYůGί,(;ٴGmIyֻn ,(ki!9 Cw)X֫Gn`Lj ^9ZD@ɵŹZFaL{zz|bF=Pc\su'ݖ!F:Ӯm ~(.Bq Ҵ:κg.='"!}OUzy1j+O2Ai}q%|YѪ(o ֛änuddh;E)f{u~;#A| 봺)q~샍St6 Ng:|/sZAJݻVG1㠶_[Iu&0]ɟ, I@0Ba_* cv23N\%VS8+sx5$k,"$ ѹtP/[аmu 0;F5R'a [̻$6#3]K[4߼#iñOd4}g(8 ߁y鵴|l!z=k<~K0nY+ӓ>32x3} I1yOuG{|.Ghoy/-QxrXnc;_Sf<v]c7J MӱCh 3Xe'KWGk<- u8tĒuN9WSө/P*vD]|y|~KP7AzFz9~~A+sFGZQێY/x[X}o%)m{`n#>oJe3` x rGY4پA(d8jkV~ a!I, CGodNL독кB픑i|×,NETw:t yLki#si{[Ip B@}K +E{_;ե UC65/7B'nl1W3)r.N)NV^/͐\-<;=xcMq!W#?X-%ܟoz՞wd&[1Nρ!~xɷ6J*QK-N$^+M7fjo#M:Ny$SmVC4 G4h 3lEm5wM|LB69|[_M+l1쮚kiXoC0&<<=.-y.vW~ב4JE5OT2LÕD UaL=5EwȎ>s}qQ|`L 2 Jd<30< MI0(M&XYO>,{Ǝ 0&@-"x-z\&Pа0ޖ6$8ӮrLT<_ydL 0&(6A cL dΏiB7煙VL [,.,6.ݝa1&=X=ϒKjZw+,(|Еze$$xjT8L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L =m$ۇ40GvffaNgL 0$wGgcqF8)a]"q)32mó쨐䉑K`LvPjgTL 0w\ {.)bJv$]=tճ:Ecw"&JxpH&րWaLN7{vtRg,ׄV c\sτ.ڴqtr ϳs&u\z&@ׅ(JM={4>=f)ǥg1@r{zsoɗӽxWz읉4.bM*ι)Y"0y@q<#RoC搷s%^̿+D\I܉Sx]ʗ85v3ek0tDD>SΗȄyo3M!I.?`AtyLNOOZTZTR)ds Fƻll 3y=j={!Mzi_?:X܉0QQD=^U/T ݵ)ȟt[I[j&fѮވwznC3&ߍsnk]l^#`L pL 0 4 e; ?8KЈ'S^ϙ z6]ӷ}ﳮ_ =7,4m&~ ]$lyӝN;R7>ժ͸~Ɣ)Lc@iKۗ)Yj>W= Y ?Z*\jGCYl(CcL 5kO@ %LF5 A߼n=vFʔߐ]Rxa!x_ mB$;MD~)yL 0&pL 0 H!XB?RBGm9t] T׃5:BM&w5Nꦖܼ"Z+_R 6;>aQz'x#Ɓ:HkMvqt~Ɲ  `LMP0&%!WARh¡o< /Ѷ r=gk9F\c9 U%ֆ5O(y8#tB{ב~7\R˗!Pv`cs-ӳl84/ >h("<[K?Jg<)>`0ˌL 0&Om&0&$H}3Di^ *7]` \M?;uL[6B>6ϝy"GrǛCp"6X764@V/tyT-0?Lh,fSq8X^:r@%6us8dzAmN+^= $]tr7_%hOKHrcNh  _DlZ{>ǕuW]Z| 4m22~"@>&4qt!V{9_oϺ&2 0& k`L HŒW#<9B=X+dYak|M>؝_fB~~^&#Ubq3m `0>6LtݗcJ[Rm(8& LOA;L 0K5usəI_dDA -]ð^))i-p$>{;r NK*14kjzz\D߸k&kg_yI[QE01 87mL%t&e`n#`L ЌvL 0&PiҚc婤ݕ  ؾ\ u+*Tgט`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&Z*:Rʅ{MU>;uH`Ltg8N.u]=2ÝEjnj2&M* &R{SK gL |kB<Nx #9+|UMNU gN 0&PS ﳅL~?)|G8Z\2qc(縘zjioxYj'@>񴁃ZՊIhRv*49&@m&/dpu)"|7ݝtN`RÎ 0B·Lp{^j9 S6r9`UC \}D&3NCA`Ex~#Wo~{Y,#7No1&_B"j'ƄK|g/2&P Fc*G5DNՠYeL Dp}!iA|!, T[8+L L=Xb4Li%Zn‚#eL *-|22& 7۩8L 0'ʾ8pI:΍@N}!5|p;>3`LeWi;N]~\N& 7۩8L 0%ʾ^Rbk6cGeL ,x齧?S8L 0A T}_pĎjI&BF>Gv*g 0&P#﫴^#hUq&'-}-]M^>Jz](*fU@ 0 D|r߅jXUg*RKoLLRd-\3ۅ;j_&$T[0`@$U 9rW45> G'X'mɕa{V[[$GDn~TzgjjR,u9cLgSJw`<\W.FW!_h&]zPN*烯1L<}+RmݧZRcw޽)0 vzU,hmO|>>};d}G8>3H+BONLt:xŃ89\-ӛ0oBoݢQ77мP܁8OxD~1LBZBh^msnJwfd=:$;H*xw})4_w0s OG9 /%\wט|aojl)Ah?v|#Hy m@ .Ϻn4]_aIvg#vzyMc\s{<ax; ;i^^M0?yJ|jjkuq=2}[?sެ2y629mԴd鍾˃)Ϻ\k[p?aOUsKف .B>B]gh3` J|\ -|kxǻ grۨTa/7-ӓv1R6CWoS@+'8S:FXφ[ʬ߲͂ gp}2 Aňm|"P&tcXUӾ(uPah<s%[*r<i؏b)> &cc>:_ /|#"ŏ$ B46<.T3y3*_X UҖǾDU 8ybxR*p/ɳUU}l-NӤk/+>>&-l‹0zoMMkQEsGRz} GJAܞg?N2Vvh>Zi%~4*}Խ_PD"oeBr1w=X?Dv=S-LJ T-3]h@"LFvbỈ'k+rXaBBpna,ӓEQδи~L,ݝXؔ/DZ?yמ-Vh v=KG4.8N8l^q;׷Jg]Iynӽ L.\I<׼j$C| :Yǿp=yhy~3_vmƃF,O:"7n=ԏ结."(W#yWϘdUіctiF[?E?ּa&c ;rM8Xp*:F{jk2O$&Nׅg^JR T0HHvL)7ݟ_F>vPr}uRhg=a5-Ꙟ2d$&h9 cTSokwa~0">a!&]xe oţ_/6ϝ_7ƣm/[͛ zs]:wRV-WmxGmGk~TGRDU'y(#ɣڹcx7IZ޿Gh- z=8b@#Pm9FôÄSÌPb}3K[p"|v,4Ot{^25r$sYOJp*-ʑ#y(ms3u=cÏth@uP6GٔW ZJ坞Y9 WJ"҂:oHTxn)~%w0αz4X3Ǚi+Ka`?'h<@خΟXݟEF<:fl9éq| .$&T |v4&04!:&v}ap(}iH3?.-20! {Bf7V:Imj1qQ!!m{m;:UuP]E@ߺN~asD](u4^Uyj-o_7-Fh@*emFL20LP5sǺLB*]vh>e Ըu.hdq-1<>~qqδ1[%3ly1;BUth?ENj,֐'\kmwB#"W@G |´VJ Exs}&qèC?XQfOqo&j*hӧG+T}AR')dtgimK?{TWQnީ<42Z-ٖq{c &>i\0倉S0Gyя7 Hv2q%q"fO7DUצbtzEe擏L TXdDZn"ChF~4JI+p ON/qJ],Ǔg60h0.K> /݄r61r@J=޵G=YTͻ#q]^ĉŅzk|*H @:sՎvF!NCokF'E4"2yY`5:zJݺ e-_v4k&5)Ԥ&.ٖ[YS4H.HVe)mhobYm3d4‹'ўMKkIܩSۃd9ㄢZ{{?+q=Hkzd#EeF¿,W{N 0&@@? #{bLF`8L 0&` Ir9`L 0&jkcL2&`L ,ז'`L 0&X3`L 0&P[^[$ 0&`LF`F<&$`L 0&@m!xmy\&`L 0An3:yF?uB4CI]#R;o L]-*~A_[GhTM,F]);vJ-6ymiq*J<~5Jw^dޣ(rzqXp9m~ǣ<QÝSU''@ݮmSm O9:n7))K;J픦Ou}広)vL&w; OɧSC)x2e^]꧟EܵNԔ(\L}o$shhAmت[aڈ1'wChM.mg>|<;3_QplL )k/A8W-E:á-9lr鹽e |?KaāxqRyBVbcr<79_~cLj>ư~TŻ*(a+u ;<4zTS%zw8+ :cE| T;~Ԥ!TQE|+L [*O C|qxtdDށ?آ/HqlIԠQ tN'zFSs,]OP;P>_Q ]O,˻@:5ad jRU4q"^N/E$NmU)ՀQ"x#z~{TpOZ1DB\Awm#v<Ѻ>Dp8kײϕQ A4o,6n5v*4##HBSP%R OC5r#:jmu:xTMNYV#lҢ`_uh#ٖŬ.݋Q[5d1>EJ;UGR/%~CI3 q!6_Neunc+-&EV]W/55Ͼ_#i8ھҳE& WĪ{aE Ah! vßzg2aQĞu&3S>{~t]% Nl*{^ 6&F&WvV<*y:K7яU|7qDġĂ/~pVD9" ]{-}SNB812Eִ̳4"~du8)氷 n5!5%?t5KxUO53<אZt(?vE1Gvlv+ztj-b[:D&)_8bDFEל/dKI< BUw;UDh@I?WSlUn9, qz+EmC$jI6tWMOr.bi= QC3޳3)i\ D{]3wMKG, i) ْD HDעAlsg;EDL4Ym߳_|/sz=~>Zj^pŮWjU]ԊWXͨ^ÔA:@ ,?zQeFb@ٽ"g}r=rw{CoעzЩW"i;b#WhT?NN`YT40!|'~1K VXNC֦!pzj8X\B K(;/:C֋ʰqYanB~,YrwqbđD/Æ9unoUe.,ϖ\謓 BXJLqhMUcy hZ?a5{ʯޣWT3$Wup5@XmLq7\$XvW?^wsp=E0i;mgsO&;an ׳y𘛧뷈v-'xXZ>Dn~J TNZ+#$|xk1-jѸAQd uqw4;WDŏTW3BoUiClO C+E|F5Ƅ^]evx/ENG<{R"衑WڄI1ye,ĊW5-LB#ϖcn>q&^q~qZF:5p0Z/`нw4&z9hoWeh1L 4'1s>Wrv 1O44Q *F~o}_w5y wۧc)u-]f; g)H8}Zün,AJAr-Vk#:ifA}?[o8eO1>Q_L$_{m>c.90%s'4]x,Cf }ozuEYmj/"DLZ+Ρ W3&b6mg+byܳ{&ȨALǪrf!M/+qbQfɰ%َJ(VGϥle&` ]\O_ pU=iѤ.mi|G6AH=ߍ=:ݰ뺭аۣn킖tr1rE;ю@J`}ߥ% VQAĜܵ}a0م)ߡ69eS)_sH"TY[f}n2_v0Xi'LF0dVRN2?!;ȑVQs0%&:_ņvۚ+ y=p俸d`/<_e4XGF^3OŬ׾o,8| ƅvaN0?+?$k6lpY7#aZf̅'uys)ZJSdBXH3MhiqC>xMj9.#m7MʤI.8M=8?ZXKMa.v_ ٰ ]tI?7~cLcC)kR9`yq4#E\tOl2RhѪo.[VqFiU8V՞!m+l!4i̝w_%% ¥|'ȥ[Ҡy4јVvɶueM2"L }(^x)V:lzb'G`)ÝXd 4'P#lShQf26Vk׷yJµ!4Wg`XXw篮^4 x} qVgCKy6GPh&7M5 C,0 ]5W%hC 6G%O? .QGu^j,!O4h6I;Jw7(ͤ=p~6:=Z]6bfL!eV2-u6F] s% M 4h1npM6ȴʜESAƄ,xe@>@P6dyR׶X@`aetM:Z#=7F鯼M= + P%:fbPX~]+:U~-;hWV0lȬ`34]#@C̦!kڂ;D|K~^/jLŎ4wcv0-?fҠ|Y'W&dN1L_U^[b+~?.Rxz^:LL\hvw舱)iP#kT˚B#i]wd6 & 1aKnGl̸nW.="\yuovC+giAX?mLv[v 4aBiJ&KB럭om*p;\Ku_fSix~%ӗMvx{ugeݜ<13c!w˵<5絛 7ٔ8 i`t[ķ0=mi+<(ܼ,s!|ä=v)#[L(NLV64ô0zБcrrt6X  d:pw6:ܟ(#(`=chBSh! 5Xe&@ 72y{ yû|c-B/߰Y"; fqym Pl˜/l@4i&Hmر%n+KT6DW;/Vofiֵv&aҽ5=5>d࿬jTUkTȪ˧ы̅<Йbo.<4:?;ZR| ѽYe3]%A+C+dv| cK~Y/fsXy`ArK~ ~ۘi@q}ŻW[ӽ'g6Ea؅''@8ک8cvʫC]!`L"Q'AqǑc2TƩB~;>t.4h|mq'ӎlO@oe 7kf7 װ~,FC#MǵD;ϑyI-aokLE<7oe&XvֿG%4 G[v[ mx?lHB)VWP2Q pz5WŽnnT&ҳ3ց ֛6̡Iݫs[h{mڡL |˦ 0zRЖ?UdVbngf2ц+MYyHNS(کDbS8_ZP>.`LJ LP(_F{֥ 64gŚߠL$EzQ]`,Eܵ!L=Cj n@ W&sŹb4/~Tثqs&y&g$蛻̕bW>X"07 { v#/kGBmi+"֔#dCf( x9 =:6I2b.Dorms'WP/ʵƒnK~^'*XS;92+’nm[gq1fܣso3=M_y(PXFmg;:"iiBtM1Mq׸$vcv8;'8K;q8v 6`0D7EH E^JHBew+yOyݹ9sN[Nyls%TUTB@!xWOj١#G_ڛّ x=F! W!' B?a'n ᝥ.³XN{;F @ak8km/v Hy*_! iQJ HB] et=sAIzA 9`phx(pr|<:Ҏc¾rmFF.̵ڷmOb+AD pQB@!2d{T@6ߵOFK{Mw-qٛS퓗WhA7I(S$=v ףo  ;ؘrEZk?uAAAcM\6hݷx-&@6oDxп/\z# !FLttFe]09Gi-}% @poܷ}2up/m6F" 8!AQ퓗WޝJ7六nќq\%7?:'O,IK7&Қ)N' `=vE)arƄ۝|K[qLzBMVXwt\25},Mi% "16.ǃo()`#?X\AQx@}(=V$$ }WV*1:)1AB g.Paq?RJ+ sdJU2,oϣEu7O~q-zЀݢD B@!PqYd~}!`=qDMtE&n;[nM=Tx"MG{6;dٹ|&:2 E ;S(Ab6gL4: x"rfyk9(pmYocO]cXVKsk3deZ!P(^BİLR6u&W(_,}1ûym]ODO'ݨka{EMՐh֊Oٟgr&þ*ሙgݖEɧk 3'0mR B@!2 cfF쯺-(BکtIvvB8))Ot1L"wǏ36Sn&({ C;uCɥ˓2{p\t B@!6Vpo? గ 6SH;e3)υSY#I F]3fOތ0'ktk|;XͮK[Հ+pV$vÆ 9;;1sf Z+ B@!0OS?Xm7dP-aUSNe`" P EFe:߭:cWO7W&Wo;|O']D.>v4>aXC nCQZ^A2̮)_<}݆_/ CV+Ƅ6c&ODJCoJgo)g~#t.lva4oaRWe󔔖M_nO[r)g uݫ5mN?y`k[m(FǸUh;P~cM3[(&ޱ8 ФqXd;ơ_<_VsϞͦ:O6Ms<+$R덈XK9(ݙJ"&PQ%Uo^b9q"rfZ8k G2&H҆7uQmPS7*@cc.I'T!l!>|fCi=tɷ?E>?|xX^G>ͥCsUwjt}ɉOɝ%jQyEmܓJkvЈ=;g*}f oOw:W9U= "woSB gnx/30Z-ܞM'B#!Ȳ)1nKBH} m}F)'z{T x It(=[ݭrC@An`M/;e@u.IKl?A骷ی uM1{ѻDp}W7_7N*줠~N"hUt[W=T^nU$i6ix^AgY/.LOwejS;|A1=vۨ4{1O3 s*ddӘN<'tmKݳwnw2=) 93F+w3_x(+*M |"ڠB.Pcs/{ho!VT.?ND=t5elB8KϷ8z'@0?=?kY.-۩ w ޟ~Fx;Q.[-טmxBPѨLj:%c7&c p2wWʹ\( UhC^7J)gv%;zwHO]3iP5Y2o0(4L׎D)6j(=C Iii#h4q5F|3Gw@6V}1ҳ/hh4p 2QNdeة(.MGPCeۇJÇƟDh}ɉʮwOW'Zk2 ^^_&6 YY^_wi`9TCa~:OҭLq'çٍ@lr;$͡L' eRu˷$SәHwa⾼RZMя3aw>yN31 GLOe:Jdޟ:?ǁ AT?5Kq=찟U;C8.=z,?N;qpV왻Γ/Lѣo%-G8OD튀7 j$uV!kj8_ ܹb1T1-Bۼ]%Cߝ"B?keho4DD51c@*of"Q֘K]:Q\N1c#tnLt9Iq'Ǥ}d}5RiI_,63-ۼk=6{ q9"؎Q}֗dS9(2"͡=TRVI=%Zuu1Jn sǽ?pfкqxy1<@;i;z^PKk-oDGiΛbpПr/~ǞЂ?YpbܸIwhhlL A+ضH/sD0%egs9x j 褝ʡޱc|RVwy}t9.g}va: ȓ!P;9HVA(Pp4FG#:V[Jхi`y d;n饦*,\ڂw˔q@IDATœ^];Sw_<[x`kyS.Tiбs5]9`3܃߾"Lbje^rj}MG!s]f> @Gqo.eLjEUu0Ăb aO$=~WȰP;th|6|ԋTy@{'yYʶU35y _chzAX0}$M='\ԝ3 x P>´27R] 46MxAZO&#lI3<'MPy[,4qy jfy|~Mlz^zy`哯顛ӄjʲ~Ӿ0:F< d}jk+[W\rU[hܐ%Vn~Ƨ^y.`n<{=o{14AZeE`n@g&UaR<%eӞ8(e~QXG}w^A e[q!ͨ06cIJ˦AL2W2`b t s4~_=fl7Q b4:4 [Sku C)l>o,By'l+Cpf]??I1,?=||s |c(F3yTF0Ox$S/[_cfne E=ӼéGbhɺ]Է['ѽ7ȁST5cҳΧQ{ ^ K*裵)鷏!Z|G`hyLa|K' UHM=o%"6<"{}F%$<J[|ڣ O{5IYi?JhapjW2Jt$U4ofiq1}c$1` M u<Ƴ*x>ygiy7W;+k|l{^zk؜$ wl;?yILa} ~h<¶N*8wˌQ"Vl7ld[sK}e= YIHYG2vi Ir(~9c<:{pXMٽ#hl<svGI<#xv2\/[©lG>gK㺉Cح'8 ICzIYY3 i(nKk ߼ʹc[lM^"t4دkZGSEC\H$&oko Cq]6ȕKnWТ'KXБ5ZgYIg{tE}w2`{+SD;sVb3(yZ _+>H\;Ș F>A/P}W;_;gѲo$Bݓb;7Ni}5 3k]xj$rOq ׃'%5 +oH;lgUp|U ϸ3"  5^bS{NַUW?|#.@I9]".w>+jd&X,1#yA+OގM!l*⾏C7MrGy!?@fC 2w {N/ʀ;%p8߁9f{t6#  2 jSGQQ>[b^6볞c#ëR4F^ 3 fd/h|PL^"0 l,6\[?._IY|,6ec?2 ?酷?%vc6]t7ؿyr x\|yl~JV׫ٮߐi (ܾr@S+n_5oRm5~x1&JxDav ] ?fr;k'XJaqq9:B(8>dn`z%ƽ/IcDЙ>OdƋc(WQsH4}-Ytn p;5H$Ia| c60\=&EDyPnӅg3ƈN:#01]( vu_cIݘkv I"ɷ<546ɖwQnIXWkd I#5ĸ; 긣Һ^3nߊ^X6 a Eր׶hfOBA-"˾/_-fCi8>@|xtƁoOOJ43?`YἜ۔"ixE_Ȩ-]ZLWMECE U.P6 >A=@.vNME2a?!W/M+LNU׉KP!lfdN&0ÓO $XVNR:DZܿcO96-~~≑÷?=|X Jy\إ`6<}'Ĩc*e vpLȓ1)D`2$L\f|7>D6'`^ {,}ǰ.[ZFK(NQ<祆;׶+c"/ 1G+ C>˶{ߕ{h΍[Z~vr'\:u0o -XO_i^n]p?FKhHw-קN+X3.|lfw/kO`&w>b[T]:wlbR8oD! +UÓG1wHZDϿAXԹ"n>ٻr5 {?W vv/ۀ9sk0& 7r܆TH$`b{;varILm&ۺ9\7l,;g7_˚}K?G O.%A/XJK.Uhp {|)R1q#;a":_A<2 u=j |!;(f񗭴q@Ĝ:q̬\80-b~1>ܥl}˭"Xz ^5]ћ#_\ZAy<SÃ(2rP-3+bӰ`,@.:IK!f\9 %vُjI5Y@"5/Yӵ| w8DXvM# }1% B@!&6Uo4cԹC97jF߿cfJr R)YmPE AFN\j۲-8F #depx;y`ԇ;َswӟѴ^fgy~UZr/_- Y) @/RU(z̄FTNj0pD@Ca6]uOM]\{!5nѿ oN5ΩB@!H ]c] eQΙPޝe[o^C) 3 &R ӓJ~ۅ\\D6ba6 8p>QG_x})WT*_\c#'3'2mx6A8Цex\v|s"/fJuݞ{ 0W{b]VʓITS,A'nsygΕjP(@m6o!:rxPp"vR~Ɇl^ɦ(xKLҔ5ai/uEqp>0rcr6wiWt1eԿ̨Ǐaz^'"2 ?vmہo2 X_\fU6B@!PA(?J&%å/.]J?cֽ}.jaqڈ=\~¥6[[Xfx@JlLy\V{~^Z-a&!w 8Y@"Wy|9 9)6uwj&/RA&(,.%+tKC׍pŹ2B^k{|SEI0CQܯ*B@!Pm?`l֥O.tOQ LڶX)s̲m3+G$H=GGi8k6ٮ_6+IT}/ZuDEF BZTiyziΩ7Ǘ>MFѣ|Qp_av]QLÃ:wID)3i[o>Q!1 5a\Uvhdj\) Yx# bs) @"Ϝi:Q~"3d"Ga3WXJV,]ӷϠxKxx`%d4bL)+8UZt @4%֊ʲYi)Gczϒ x6Ђz௷\Hյڄ OEV{.zD\V'' ƉnKK}^`wع,e裵;7X!"T5RPuuybFp/~S&I5oam EiR)ӼwL+pwT B s asq|T,I"'0^&X /%%m>h61M [+ ʋ'/9/m׿wcEYssy-<A ?(<(؃ 'ZQqGA@c#b;9>WrZ)8) W; ߻ΤErR#,/-ШuJD/îۦx]屘STFlaEJZp,f1-KHݭP(gǍ\r)w5*i?ED?@kD|AGR2"#1lb8#nwo|7ھHlA1"/FK'_N ~)ݎ ~OF,k^9٧vDvCg*f\PŴ~!1X#'ipnIAQX8?E||1}Z0%:v /\,vkz4c2/ѓi^̥}&yD&v&(UkB@!D}ˬ.D{y@b/AA%I|C3T#A3hAّk7(jƜrlkdӗ@.=i=C}n 헟& }xm_-^HV?kS#gkkYi_s .y9ə~fLqv\䛛/YXwii؋ԯ W2/T^b޾PU(;'Ǎe+ ,=wI4H֒ J7$߆}zp[s8_{gYKI = _wn8C_O]hw8pUВu5 3.)߲⋥uß6eE$ 9lւkVmrŮrZb"@5RVMXC-*w`-Q 5\=z#&-d}0J @{B s쨻l:so5xÀ#Eie 0L׍!8<Fd[s̽>_z11>[k`.:ɰTV=DŽyK^Kĵ?}|m'y|`vCD[ %nK/6uH.1<1[y;BHm,3LmUĤ{C__c)i2K7& FfSc&'nP(@Ƹ1Oc.~T&>=nmMP(bŠWN5u]j0[ 2Ytcv?>U˕%Dhy 9/xwn'OʬMv"6/ >#'m]/8/r[qDcQv,U­5 x{ f'ASPu ADJ.Ux`i%zu_,_%uZ!P<`%v*gU4->Yl f0f/NhZ׋\йieӡbv.SޏȠFg#K (zH>Sǎ,a2U4+rU%8S&+"MP@CxXBKHpńRwn6^)DI6oo =Z \/F Jyت @ Nˋxo94|z{殙|_aW=2ײ3̴gO47Muͷqka]^24 Ӊe"B6,]b53sSg|E S0,N~OU o'piY&l?86U|uLƸQv'Ne3|:P"$l6 J\ j.vZNx˶Ǯ73oL\N&I%DgD!PUjqS[dHn|-9~C_Kˮ3.y-G-N܇ +pF ڿ-a}.ndDBQaZXEժ#b(7wۿݰfGyy)&4`5w U}46CA/z( b*1d+lvr8/#KEY t< |}dVUj/dbL~BKzFSTdEDSXx_L Vܻ=S.R3tK$]U'AKtD() aq=wN8j87ҥm("[xiZp&&1 BMTLlRQ.NXpB;xL63|XEhA6O~{-;h'1'OL`6ʦeQ¾#t)4)Qg:+"-&uiv6|7:ݘXƿv臺v83oZʬgoZL 8'$TeIbKC_ÇZWn4撘oK3#v~f!cݡ\`,+iZfJQ KJ z@SZӕ'GB-_Z/vE l oLBBqƸљlb托C~؁sB@*A(!RˋɃ=!13/L&6ւF@(n@]vz lCC-b JqL7Kun05?!Թ:2loFv o:AEoN[([Ȅ(ےCHYȅZV/?pI.AݷA$A*Aǟ.7Zy_]y/v^.H저cǥݷ5JUŕkjmrpV;5A bfesf$mG`C tPjPo 7<7Q9$FGPF6vPO\&m<%tR^k&G9zW`2fӋSÀ;}^S="ߞ@i|jWSeC 79o! Dow X\x?$$Q> 2?͉ p`ҍ5k曯u ~LЛdG8XfU. A2Ax*y"Smp!4{t: Ѐ'hin wzNAĩro" 8r< č!r` k>%"*m8%)ŤKi -\¶9<҄3>FNʔh[Zx07&{vf%GkLnĄL7˶~gTyY Vwߎ9O-E[ a%MoJI iOƟm]%@рWCKڳ|k|˂i?ib0ܹu̘gf$%]盱R$.r# 22W(eK'vg%$ބDd"ERO63(W0j5Wl 檸Hw|sO$aSECت 8>m𫇶0;ՒF} R̝l Xb-M/n="9yQ3$d/.= -,{٨b\Zl kS4p^)1^ow2^N@'Oh9HT9GM;ÚZ~=KM)oWgjivEEռ>U17sLo(b#M ȵm!`t;ֆ[yU N8^k%-G8"b^)*F@?TVG ȷt:䞼eM7RYE{ufȁ?Oȸ{y*e 7Pdnj`~wEx3T (j#P|3V>}EޗH|{o$Klm[+'][ߕ)k#"tMDLWjC!Њ`򯭊=`%CxVTr-npUJm ="|{F?LD[*_ E[ၘC !f-8]9i_Usq׀:u\!V4A+LxAQ;O[,1N.*"m `dt&m[+ OlZ$q=1"WWḛur2)4A HLPڡQנSߊO_2ϟk|b#]l[hWd(]w  S^A+1H/Vzaaɮs!aF")XRWEP("͆.`o4IDo/"TQK8'ynd +V|oT־G ҴNvzŗo}{T-H7;j>ynȯ,ZDoᅊ캲Kr:̎f%a뺶5kZFi߃ٿ5˂av8ovټ$dλ=/9ڻHDX[rP؞qX:|M=IʭhHXyHJ66"uf44O'4Vg3bݡ򒒯?n c!w6zq +nȘk v  . kniS`u젊 +%?t*(.ڼK!|uHXaU!O\rj̱CXk/ȹҌM Lٜx&9=գ _QJ=F%I=A #t އSk3\"D%4[O_Q|I2cgH?߰_Qu@!x|Km檚 1qGF,OS zDh#:2b6WG?-V/,Y`0~N~g\lIěT[A*\~< $;]pJ*$}^JmZy }M"3'Vhv%KpG 8,ѓ))-ܲr'yid0$ѿwP&Iyg6fjO! |%i6U7TiƟhb`wؼ{ձLGoVSߠ&قˁ50L_4s ނxVe5hpKXnG͜UFΊiw}7N*줠XKA挫uA[n^}ak9cbєMxɟ=x J]K9u7fK,ťl&[U4IC@oFhëٯgEeL0)ŗ)k@淪L7EvFkht#j=s'53O`N < ZPhzV(LGF(oqP #nq(Gý(û%fGvٮ=ju O!<+yx_(3#+7SpαSx0DGOxAkrg@Gt'em׽_{Բ ap,b6{/ߔH+#~\j]NPկf>J;SǏ<#ԭkJgb \0<:C+ ΞP7Xgdպ%ZpMw#_~Wo9ˎ2Ai#Y#6p03+kW 'fƾ Ձv$6]z_r?xIZpb۫f d6 AƏ?K6j98o|x?ig}UF#׶n(5>Y$$7ݗ=esv,Ϸȶ;#VGl 8Bf5jX :s%G#F&XLlm2k5HE2P^q%&n܀X[IF67TRܹRV5NmxIO^bxv%M<|0pwJf:Ak[44;G:B.Вı9 ߃Y.g_kH:j )uM5XdjG=G4j`/}xO6 K֓/ϝ SّϫFBΜ6tM l'/9`. {]k4a x?Y+<@UbvR$& L=;PQ Ti%MFؒL&Mtlļ wpzB޲' FqDGN>=ku~v>f =h6ͣIǨSTGG÷LY$;2BcҳΧQ{үVB;ټ,"a/:;f7O'hѓdd=aX?qmƉP>2M/yF |'("g巿P{Ev :DA&DX$1TP6·Ax< >R Y&IgBZC 0@O.ݘ10/pJ'unHzall6 IUf(O璕M0>|ө7[{orl S &PBB|KH') lmEE\iNSNWm}fvwyߞu;Ix4m1JIR*GHH削tՄ!H# 7+9CetnjқnJz0=2R('+&w=xyhv aյ";vW S֭F\gO=d#;=k\Ex֙5?qx8șQ>[;s'G9V9u0?q_)ǤDI" MP@bc)2yJPvfԥiG}KC Мdꝝ&pЀ+ȯP1=)dv{1I<~ m x"~\楏jnĻkw@VZvQ׀!p뗞=UlOerτ`ޘ4Och 8pLJ QAKVA`ȿ%apn`=,V%ZYn7;Hxx$˟Ra~\ 3('ӫ)6v}ɺ=8k\ԋ஽,Wxb +Rqi o!} W N%ɛQA&m=& fZCv]gZs4o(eDÀ"69];K \4Ai" u"[!]c.h{CwEx _}zws !}{r?%NH %ԓ5/eL|q/HzVWCMS3 Sξ7)ل z!H0d]&q. y]VfСo .-K A;x³웹yfzW}O%>m+?}klן6ܞ xك|KUΔXp%XdMRbbI.*bR{kCJ^HxS\VAhXL#mx+<#nWG9Xy@6% ؉^>)YLK*쭄WV m6%-3q=B99/F7p)Y<gu# _1kք34E.%aQ w'<^uMpNk׽Yc mZ>/gdv¯c)I +ͽwڈ~h 6oLGT=r'kUJ\q?}b\Z14'͜:_J+CSh]l#l;"yf^]wỳLMR0͡ūvۼ;LY:h2 L6{lr31Md:ACg/FMTLi\opj\UT\TŏMeYz0 p$+NTR9*;9NIN0?ww o~|4ctL?76\6B,0)|LR$s} 4\7u$b.UbgͿҔķ(PB3ہ J>k"y˭9.Mxm(usd[?Q( Іoxἴmǽ88߆ 2/9~3PybnI;,]MIK^s;nhyK};<>>}}|]Z*4}O4Qn鴗)Ss7ػ19tRA/8HǐnvD?a>49:l㙄{½=#7wzޏtO&# m706 4 o52uIwFaBҐLmycg2|c"T7&AlߧW"~ G/i+lDC?8+,o?-\XKCw^PjvN-23{N:06ٹ p&E_mWmҤvzlJ`NK @ [3_h(6>B?rL2H}Vx@ $?WmJJqO&6&6&N&.Ąqod Z6J x̌'8t$1ٲF?{|Ptk̡H]T"%A;UB؆I po)Z0k,[֧/>\ON =fhn4{2܍6.F~+ʆw=սo.elIOMķPaE@KFumXvp-'5ߌhzgfԺ U G)y  .AAq"C}QsZaoSzjBgW<؈0HqI&w6j.{d3+JߜL-ް3Lnowf&ZV؏Q6-d0`mfzu; $v{>g].em؛<3vb꾓F# 5$<9QpH ֬#=ք)JM}"/FڴX=_< ɮz6Z&u |@*A(aN-/&]Jw !/&=C$Z.FNʔQGd wdC4.$OQфD7šcdbA!KB|_`-4'QGzՙx7$C%Bzb)!p7teHb '8s^K֝ E^x`2w{{i>mnTabPFn;#Nj}aRC $tSn~ l]{ۓ+;C9D0&^~ics.&;72#xt0JD@i]i`/p*J ovE[A<hVх6OzKda];[*pM\`Qe:G'{fC󦆅|#a FH;* A@;xc xʋ,PgլKl:=U~5jnǦVc “EpIs䡩G㲽iT,Ao5! X6(+Vۀ@GPU;!ېT!PA@:pEk9k;fO,}np 70}ќq_}/RVZ_7Yx 9] t\Ic#&JED)X#oڦb'{-Gt;ݨ\ś>vmIUvfMQ^{GvX R퀀"}˾#¥釫'kw s*07Vl'D8p5BsJ(-뎴l)q5j^;ck)#(i,pX+n. 5,\z |lLAp>9/(8vF^b¨  "ߡZACxK`]uQ2r͞2&{I)WiwL/Ҁt%em~vfT p4K2)cW%!S<*!\lӀ&h["k9+t ۢ/S|a,}Tw{VPEC2:f$ҍLjee. LoGgQvF2 雓E xavNc&5*6e]$.O*ɷ7!F@NU. X{qIh4W4\jcAEaͤ[ϷȬԺ,'^x~t:u~OWe| ڶZbi|&zӳٝ }|A9U:KF/^@l!%2:$ԡ)vI$|6. up-DkB AxjC~K ia/H'&\~ M:-m@zd& \/pMHM= zuuB@!P;~(SG ɕkqZy/?vnIw%~"&P7WOqNS?%64\jȹuwFH8&q.!b(+@f Y.tl?Tީ14[_h2A9]U@uꪁjB@! P;2*pT*af):z_:Zm؈J%|'j 4(=yޚJ_iB'%æǗN. -,rgם$Lln>\Ii%HO C,Ovv;9]NtWlt)ﭢAw4wVT[h9~j^c Mx'K &(VRV;v_*P($|K$w=r`O:s>Zvޥ唒hZ٭`lav"5 @]:>=w- Kžקѥ$Ty]b H/7pO5/jeɗNr bbپ D= ted<)H dM <+/ IuU%9ij;}ħJ*a{M [Fׄ {5|ωleyIIt"7GpH{د2 pEua%x#ڻQ + c[fV<fMNXQ"| {=qwٞ1p$ǎr2C>~pa-H$? ΂Y&a['o[oib#UT͜c$h"`?㎍gdMEDJbxWې gkl$ۑ<ڹBkNey+G (~Zl T8s4Vnd|4cnk =rAۻCQVf Զ)o~_7[r+g£r4Z{HBQêwvnjbX?O5]GG Eי 7?! muTu?[xt]T24?5uf O-ζ ;GƐpIX0vܹ nAw{ëpҵ Lݞ8m*\yƌ+K<)$h ܫ፡xo4;qi"VaG@$7Ls`^ \ Q~oaڕTUs]f$\ix"7:Ғ;s΄VBoRbqkmf'(;ȷj}h6꒚Dz4j<%u$Wxe'F*w7-. t JS"MP@l.^o?S5 @BLxjB CI›*D6ba/ЀoQ言tV 4 [|73kF+g-VTV;<& wC}M#sCDg+"(C(3LpJZ79bZւ*krnWXeHIM9 `;YccFyn4B `*s qx ҀÜO!^{ Fb}I@p#e2 ;QdY8իced 6ƴް 4{v~|O89tu:B]Δ ;`"a)cxevSs=oHDfM׷yh(kqY}m=y֩-{٥^o/ﭳJo|\Ab+sүM"5]r3pX(YGn\pr~oӞ Sܫ ]??G pJ 1@[y-x6Ir.=.gU+ر~5Q k,pEAÄKoLްxV%GsJv݈ ֧+e@YQfUmW~A>|ߵ=K~ }{ι˪tHqzl%GIGIE%ؑ[Y9xQ xP]ָ!E<.yx!콚W|f轢̹n.ûIb2xꑚdج"P_bH?JڜNQGdnW{WՕtXxAnPGɛJ$^Kh 8 TE]xպ;ʛѓo5C@@11g߰ΰ4WP*D) EX{bA`h6@a~\PaH Z9 7?&Qbƾ]K܈"(4xvB ʥmQ -8a'TeymQc7`l?3M7PyLIP;OT4.n.(:vp+`^-76H8wI6OwDPQoeQ H ݥ)B8='W skl(ւ}GND-9&5 ʭlV^-H{ ͝^:x9:MDk@6m_{~BSG c\yJ+~g%yc3B̦nk}.-.Z}'N?[9yus$X=:1 oqf/rK0?)Hc%mG8\,;`rifPFU}],<],Db_cTGnp WAj@њMnj, j?a[\^Α%l̍}3+5( rJ4 rhK3Z㯔|Oyb_̋" B Db 򍺒opn8BI#rfy8!xTT 74$ ("穔= Z6萡M|ξJQGv:A8Kײ 0|I)u-E;V}ֻsEvI$q]ǎ헗މءÃk%mIC>mș%Ɯ䗛ͣ[\\ay5VRy$(8-M%`򯫆=$ŷB 'Yy8 ^d@QV%Ebr3xy>QP_Ub\b ߚ ݢ=cP[KR7H CP'6”%=L$&XwqJ o*W.VjAkl|3 >0% bc另gy|eyOU?5TmJGGWD9Ys)muܦ1ӌmAmOIV1$<'6RT/#0?l] oBsy ~aZ;%@C>_'Vo"WpIoyIvS3;܀%ޑ& JBtˌur}Oaа9$p9.G׀#ْw:ݳR).&Qag+;=M~v>&v\[n뒰-7 7zZvHU]a;o Z+|ezi4m wv:($r詯] n=޸^e?}Q+e׌#"{CC,Bz+<[LvrCEūptz{mEa b, e$O d*#ll{S2mI>ݔ_^myú~K(:v o2R('3ztIcCtE1픝DeUqQ¼`]3ن@z]UV;dGRZ9Fe=Mf |NzM-<%ȫ%yG(;#Y,gh˾(Ӭ!c"q5O~u6-rpUcymGw,F>wޣiQ:}uUaEGNzvE0i%XGH?@LY{Lq;c mun8[\N/M7~'4+GF͖IΗPFJx''k]yt< Ӎ\6'Ed+E;{g7O70Um iJ$< \ʻ . 5?/(lXj^PQ:yXLhiY >;VjF)n8A;KWOLYl?\Rw#Ónb]TZE-9o0m~ៗ.l,:|{O)ab6eD__xw:AL.MT0iPQqnt!-{+Ӵ{f+h.Jdlo͝3L7>"/k)f@Lu1o6?s4GHM9锔hۯGSFzIvB|, goz ׍.֪{mzt8XH #Q{"!/HSZ#b,oaRQs"QSU)(\c+R~) [o߭MWmȝ!$ 0"2'Pf_M6O:=vk҈9tUuM}Ǒ`ٗm\L;v&j%R=/sr"MObqK|ל4rabݦ;~GQ0m@,G]= xF)L|C"MPw<w ut˨rculPrBz`Q.<a[fH6 X>`kb,LPZ .wII/{MPL՛Ii6/mV(j8v'L5\Zڳo!L6jB{za1-\=_.-|Ϯdį}Zlg$Wمn6YoRǓ0Eg G? )W`r"eXl ~y]SNnkoA}s1o.xr6ن٘\W0q)ǗPʥxc/"Q» &zK8T|5]Kl[[P^*e( pIhwhOr7ۀ%a]-ɝIUqLo^kkp\@B~!La0uwUsnq/,rm?T(lN Lo)=rZ؊tÓzlWOĄ/j<* b*VF@#5WW199B}!®wy;k?`9 Wюo͟Zlal^Zf/8>m.xUX퍄zve;krtoj.$M{o^Pmw8&8.Mb=w~g[_,6ceܾ-|=x}2TM"bdj` nj| |; +6Aa;p!L[?e3`0&Q~mn씄';IcRPoP [ t&h7m-azv/Ͼv}c` <9o<ѾoML2j8<;춸iN'~K58_~Fߩ:^K}B+ WL؜@K7{ɑ'u7li#iΔa|Vәk<, &*SQ;vw?ѹsTw{L6kh;NSaTy}l<{ iL'!l.ö1i] o|c1Q?%۰8'-)skϭfkSQE%pIxxȭ ]$]so`b x7S%۝?k@Plf_lO啔.\6LlmA`䡴l3{Kz"}+(eMKk.<70˻xf*ӂ7?uB(~i:_dK]?fǧȓU.cc,mO!`ȿvh0G& -,JCQ 3r6s#[siȯ8c,9a&!:f/՝ruQ@CD1$x P֭^C/_xQu2s6\\uرɺZ9ّ\ /)<|gi)o y 6 x%a~t^ל{s&fR <cƌu"}MsQDqLk}jGZ<3+9neϕF|9GLjbOu&˴? %FaqIh0ZF4ߖvoWKL*~o y(P<[QF&XLlm2Kp#'UT";x %2 c^PM׾^J7UFϳё&_5m)%oCsݟQuI.}XʰgK>g;PRܚ +Sq&}uu#D} c_MI%U}Zw0Uhne. Fij̛*:{yh0MO{ϝF16XQyɷbbbx1S8:WYIf|}cM)ߑHsrZM!a9cªl#8 ߚ꾵F@ų-5&nh/lތ Q-^5=Y,6-YKD-N=GixһЎcD@%yh/Ee>wIngOFm]N/+R]%QnzFG5P(*B v:Y+? M6 5AtVNrO3>/q61Qfu3U~+0f*3Yȵۊ{KBC{'Kx. j()GsKP5Z>@;# )ys%cbh6Znn.'ςd\q9͛6 η9lq dgn,_i~ Kl Mj(]'ߏ{YkQ0:ǚF5jHR/臙|W{v? xO|@6JVODDJIE5uSb&RgM?q?:Y &|溸j`5V$dg$ :Z_'6uKwxW$ZnݱK3nj|IpQ`:<`595Iٹk̘a.]jjNoʗ9_m-MK!OW۵*&lf/k8YV&v2sSC1e6A*_|o Յ=dfx'݀gL\e/wT V'pF& }xۓ7ʤ869pT7?ƨQTugwbb%E3v6Z^\xTm9~O ,޽x(xjk_2q=+ 2/)i2~m-+|k6^(`qx1Ek֪+x4cL>{ߠKE[mLᱵIbLz!qL7uN&na *PAYA412ȏL31\ƚ &P诃xuO2R$JMM$^@O |Méd|6OJ׆>R5a2:¶dtGj꿲 uؔSb(;^kx1;9Y~:NX~qѡlOzaZ#'evU[ ];)l[е *091|Lܜ;uYye 4 ð g6 p,4v |#5E#fK{衧,4~}W7w]=j-Կ{jeœM<ەҳ WsNˑd6xN+v:LfOyEbcQ)gN矏m+a C%oEN{}q4&ྦྷeKo9%flb$Mu *kٮ%i'9,gij1QjBmz|$'mLS@k)77K#`o<^\b{j*)fBT;1672NE65UU:ǛhDJe|7 {[6[% f$מW${ {F̞Hq5ZۮǦ>ז}3MKR9W>e k&;zFPxg&w\6\ol@Ǩb*$C(SNAV m:Y<_LpVz8;Js:kǣB[HL]t:l7+̢ɅL$>J ~5cΔᄥ!K98E <~Ke 7Fy{pox3&7_\^g)'1 gNOKژMD^xDVPϿ)~V^_g(N aa S3hٰ1Ywޓ?}#x,Z @/o>rlH}򾹆%lnbcn!_`0S ".¬xI107 }<|b |12渃l5q p\͸jx6 4{hq~L4+h#~<#3X~=|K>PkX$ 3Ae(8Q1~ɷ? ҐHx̟|kl9-Fb2.Һ%I~O ș8/*j[ ǘ|?3 }ή-;9{/7.;}߰h ﰵ4|]%%*mz^?,Y?x4_ǷtDWC b&M:C;"1F U3H8p ҅8- %Vk֐qywڀd5J1Ҭr#uNUűtnzrr6g[*LUH UoņLC%ra9QD Ri&; w83׼*YGXG(g6O|߰}ChLCKNI ?w-Kitt޾nGR_{=;Up1ZjB$f@pBpIZa6 I{hAU'i t6є!&ziϏDUkq~5#O [ޱd:U@EjM`RGf2ݨ[oNȌuQ*{my: ^T!I׺d?߳ǏD{nKYeP'EhЪn7_{oHrY 6^g&ěMN;QSzJ,j{Ű2kw=àF9.%"゠6 3q6 At_|B'_=9B# 4߇vop~IЂc(ՠ]nWu` .FGRS "!KOIqޠfnT &bh3࣊G^e oevҺJn3l5o._<<5I["v:FJ>n-ss)Wܫ$]??G P S@ˋI_ݙO Z3J 'emI _qAK9gsTz'{XUpYXm`lw>J,Fn/JU*eU;/]kH䳈 nx&[f oًq!:H(PBG_*}Yl&;14 p &? Ev3ZG5Კ5rlc~J|?xx+N$R>"l]r-'ͺv^'Zbdg-lKoc݇ ؃qW ;;\LL3VQ_s1Qɑjuktꗹ6pRFSlʤMĔ[,C<dG9m5`\xTN'J*b ',);=ljb1%;"D?E#1#XMQcϾaODj۞J#!vLm \x-y0h%!15bI2xVY'1%9ldo򘸓n G5$8F;^f2*1=Q'žKX+Z<Hge u8W=xeZy~Ucm5{܉MWĺtN:\F<˲iZ#_Sln6>WR. "ijgj[E91>~n}- ą\œ,TnC aEJʶ#O>GwkM@ Hw-t՜gUۊGVu x oYŋ0`zfN|)'x(81$ȷ|%9wڵ/| QEǛGg]w\ٿ3bLO0}ݹ񭢢E.iB2iW52Y}#c7)9LR.4wEѩ#yJ7-UuSPL C[bǧLx3!ϩx7Wlr['e>%+21%wvb;pxOl,`kAyڵi W,? XMKu:G<Ե< MCv$1Y[&zq`崴f6?gv.0>ڰCʲWp H ݧqqP\Azٗe̹mIbpNQ.ӛ#-Ys;͔ђtS9b#Ot|g]X54{57--LLx!7hG۽|zmt:Ros%o)KEf0\rxv|v>>_]2 o)]Z)53k!=0﫱7ȸtQqyom0.@Lg mV HQ Q+J9UX)>ᆞgW@@S`- gN) HK]"?1 |0lz+D;9y@S ǪOcu.X|c19J's@sFi3Hڴzl%K*oko>F|?3}Vvlqv9#ۣ(rjI{> ")+/+9۞OX??GFX+~ѫ 66jQ%5pu(F>e٭ ūUx@`Mo-#_:lǃ@L2' OB|bjoI⬰n;)$To[DvZN/(}122>E ;ڟgs'hY^Z˫\W|<>ysbIӌpcysa2`M8rFqw/Xtqc4isK Nءot^{?HD`-<_rpCw %$rfLkŅpEE>0&,rJhE;T7xI5}~ /Kyw𵄥OI!]~|M:Wػ?-s6o.PLZp}ļo&|0(bMHĥ&Zbͦ9a#Cy6˗ 4x0"$̒7[{Jd:Jn7FC&Z[r$ϹQH{Җ{o'm7P?'מʮM*@ -UQO֖WCe |tfs=6xX[0d6Dʣ<CL GƯ5THJʈ[zAy#6hA`^Cgvqm.J{3y9Ҿ~Cv#qҲ3)n ~7[Kd|UY:G >o4W_AqHr_?N*-w#JJ"֝TH/GǷVxN*~%wԖB@!v<2SP^kքQX뻔]ۏ?2Ο0z=8wp#YД:7g-kd~a Ԕ ix z $3;Q p p'k,8֡4|?BHtK&YyZ$Ccr\Kჩ7|sksWo&_P`s,)[I/.G]{IL'-Qs#E2w:B?'d&돿Kdl% $A/ב?ȔUoCZ_p9(v NmxΜ AрI[P(1cfTs]̿o?=-|ggEO%z<5vlF.k1 '^A烷>a/hK<L kA$  f'r*&mB-E pS3Jkn7mRqz3v9!W1]5A1OZm:3y&|8WQ?'kn9u_K:vb7C]s̜x̌WVR"_}&sv< /P+$L _y3+lr;G|i`47'/oMpsjGIW~98(w<2e흗WI#$69'pvt͸ԆPK]c |ωjp4yBAi [.8@hrrTx%K-E/-#-.Nf[nsB*#wkۀi4bBmwЄ!'@!zL yj,&q&u'i yQſ8كK1&}:Q#!h}@0yR1]BދXMZUT6Di +:;T^Du_YS[vW( g 쨹pi dpZK*`WdCr%YpK3>k(kCiL~bEA%M[ILL[FfT'%-/.̙Ѡ$+ڃ .>`LyV"$ K#@.D "|*Ų{\MzIJD~t’ [Z?V]/j3灢$o歛=m'-STȖŲp[` }QbhO# 0O6}vݺq;RmF`0 _~[Z|%h_fl-Sg]OPx5* p[x'{pP$T */|dg #\+ µ`f]]3Ik!WN4G~MJ7x|חvIg{'M;--|+1 iVcqQ&f4\Wbm#'Wg ޿B;JR"FWS2qlL}%UiÙ}]]Ov>R?{8ׂJ@[ ŖS` \ c 3瑛᡽{i-VP&߄7{P 5% "Gyh׭Sv#P4(R?^(Ji\`'U 3R{H6m0Q$!튆fHgg씙E'vsFRZ?ZAW o1BB,hX4BjXd0%Ejy/e?۳ 1jX{>ԌX*:63^=FaA-Y4|#PvCT۔TJщ׷ޘo*rTA]`--׬9-p|$CnƬ exw |Suq ˫~ vK}g;sԷb>Z+©OKMv9TiT(32 I8@_gLX8b h+Zq^BBmSDDL022<6Ndo $^`HK4ݎ'_uY OE0^ o^M5.=x~m4Ći I\Ŝs/"$wdF~_@mqۭuQBz+⋨W0Ɔ23{xgrG@ ҦqMJh39c.vYd$ݱ{NDu#5rk|Bڶp}Q#2ٮw5np7|2(]q.TTOA>*v-oGvB #,*2yJ;b-Gr:U5tS[ޚ֙0XֵZ:'u|YTT@,e?"ℳ9bnNH2쎢$^EٞP3zs3(\Mȷ:FflI=i[[ľ muZr& f +8 7`CH`Иmħn]HNv\)6_&4G/.6|S<\KD܈(֭X!Q!l؎TAo:m\C!7=[ wne;OK@z}߼e5-L&Q4,?ϡ5 Sfct3ۤhXKf$^ JdUiiý{ʶљ9@mY/O7mf*ro݅a( uG Y'TH>x'r 3o*! 5xC/xS!$,/ $|lyW˄ׁ[{wbjKܒ _TBێ/)>}+QDM MT/"%`bJ-Bj3ol^PgwXQWV#)jFȜع9foGm~ۣLVD72>:&mڶۛ~YDQۼP5SkrTmĮqmeJX.0!Jl`B)ĐYϸbW?bpN~3eǏ p|AvB.m%%Xݎv:XP'J&(@XCAXHthzM`%FrC=@M5nV})Ly7ɟbJ,Ru&'iXrxҲ&SѭX_")@&o>myH.4 ϸ4\G-gga䔗p4)X_Ԅk)zTihctQ#-NdPPHuB:!UA& )) 6Dz:*$Cir$}k6umIIMih<\بZpܭݐٷgwf_@5$$o{@j&Kmk+uc3"m32gzG+$QY%M[ILL NhYo@Y2M,lS[ROs=u6k)2 %Zm+*j^\@%ф.vz5Y(ѴӪ-ԉH5m(ۭ*f/Nx{I`jZEzl[k˾"~<WwL_evU8SJ?bi9F l4mv3vDt˽qk3o'6zXek*~f`_h?oɄLB#pKKӏ9#uCB# a:5? +XfhQq~Qܵ[&rMnz&N?Z 8>.ILL03s،b |u?d[Po2`Nx3H64&Mo#MVosԂ$+2Ų! n^G7p"DU[p"4HSzBVY,}op/qI])<;YNGk۟{Gz`VhzF=^o1.HU: gY{yבMwnqcq>NXMZ |n—7!:}C(\*aUPue2nK.kwk옥`^XZ ɞp͏7o'^#k \[ av䑨MD 4`౯418n|1Z$Z@IDAT'3kŲklh7?"ᴥc:777U W{ SZ8IOƧS@4[vd:Tǟ!$<{$-[7ΡGYK#uل3'*2-,A_xfKy!zQ-I; ^sxYŦ{\AC(6È3Y,> RS-ljH"|_/dOz: ( wj:IN?"V%DIwu HjlMni!w;zm8v>P|Vt<}|l\9ӒHc7[Lҥ_m**h%PBN&8NZYu,oWY %Ac@l['qͷ{!MF9vvE$kzfjp(tzIb_BM #qͷ"Mrn;wn$KuvZ񄈝 48 9Vk7hD" }35ᏼ{/=o?*BpҀ^w3ħnN 2~Yw}v7N܍>ǑvWNG\I/nW_,I曓o&?$EQ'Z$SHxĐ7'ɞrv-Nϫ܄)mDHX&H/>X\*C MCO'1dwMVOu3Rf,TUN_y }{:@$vcsizKXK1H\{5&H%%C~[%am <(VыSQq+W9ʹc7YM!ᾇ:pHyoFc-|EX}2c7@t>gZ+M<нEo 4ˆpL4ap\߮sNƊՠ\OBo'8CIѤUs$X;x=+]GXeheɀhA.=>G^cNM>,}߳9&Pف''PqўS裕V{bh#I fʅZ5ІVEvʵۓz9Oy9@C#8:jl3/;1lҦ?NL-H+pĕ>IDrYU!Mvw )2%bZ=3Y˞˗L\J Kp# \iEIHqRnb|=R Gj[% Cp<K_lc-OTW+>5_AѸ,/[sfb,9?Gq@9;)UGgou*KJ ef]٧>oVP}=!O/ǣ2,CpEsq"9G :=u'-EAMb cƓۀ٬sJ,Vq/WsZk(($ܬ| \8j{8Y2>}w[JOzAq>Z[mZýV w>h,/#!9IzlL<3{<6IVJQxӵ`ޔ;nVT%Aɯ# @wV-ڶ)(`X@!"D|O M͏!ht)U0.ǃG7A'_cݴ6:z%"qX)](Ps+@XXӷCA7 ŹvPnkY}LkZ{ ,9`l{\wBၝ1h`+.!7O^i@"¶ pd+`+8}CgZ>.CgA㖃 otKޏ!8Z)k˯Rt>A@`Z Vw1S)({گkhy8#uN_&)'R=&Wzh}b70\=%d@Є;m۩`ݹ3oJAOVX/[Q~l)b&B}rI0LnK ?[X!(xm(\y9 ,X:g&5N-@[l7- RM,]OlwF@$:'@.)S qS\S`?yG!FM5#d,&CoS*![?h5%8O8r8k%+Spwaǁh0m8LAl\٤ /x{'2U4bhU^L|`PJF-Zk7)EXR"i % V[7^#+ MC=FA P|x?09'`OɶI`Ӊ[@DkO x`ރk+dMX]Mm DwBH4'!+&$?WП<]WȘ5B_vܞqѠaJr1Gdr8dFӍ {Hw)Ο¯M*\Ǚe D `X`3*EM sc;u.^_"!Y? ,tU+e!PR '3!'/h=3ò7->kgHW)W ?h8#H `WJ=#a-354jJD)Fe[O߭s)Yzm-+(u[6o1?[>{Ĩ(`Q`;xXc&}. awR. (ʼbdp=$4nA:Ÿ r R^] {䀡X߭E %EN +؉u\smKK/ h6SZ{ d}_Ekow*EB6ysq.H 'SF6 1oG5J=N'vȴW y()r$nw!{F2HS]ı xC8/G_8KL 9UݒR#JS!Lν߰RO~hFR. ̪޺\֐Swko٦gB*CO g,'ac2l6Wp4ubJ}#DZp cPѾl)IQe6 ᅷ5˪J; `8'&[A!kg]P]Rr񠒽6hV}6PwwT9o/ՒhW}:BLLS̑Uw.O%5OWKG4 N|m/>\o23xpj("`; d-6RO{C-yݷ)f'ڐRT;Lꆮ/lىFp1b"!F?@0A.]BݽAP. /o^-!{|m"41`A;p2!rKiV4(8CE'ni am'5nDCf.a=//oh M@1k)<[qAHnɄ(lZҨE?#"2CmrjШh1`*AGHVKIdL{#!3B1II9+mz< nhvh&*eG Eq8?4tOx[U|B 4SᣩI-l-Ҕ!S41%?CKgcZ0|NP2W/+ hHX._-ӡo1sD|>rJMz~WCdGlLsף5mr}ӥO_O7Aǔym?$];?@N$/*rq!dz[%@~F?kzY!;_Bot> r9#@Gv|/\=fep!g+L 1y9z2iסv'¡-h^Y??G^F6ֿL$W?+VjVed`]y+8gؽzJ4RE3@E 32gٻ/@Q2\rӦKZ9'e`ں$CK/ w:K:ݳq.!GBp(yDq'=KK83_1Op#@:ћ/̐7K }Qyeگ'%-5}$n@+)s69qLOt̴񼺯 Ch,MIWaIfsFK@97 <{P<_A+>EkzѼ|{ `.>i=iK}`^Rv@PNWŢjaMh8z>j$f??+/'JY\np8ħmyQ÷ҧ<^%6ӊ~PK(+,C]Mͮ|SrJ&[)}Dj* 2 ?y$q|S9Ցo^Ȼ|y"޾$T qGMdgm. YQU[)'^J<2.K+>q^4G$xI$43@+X:[fõ)pRח_r>+_@Tѣ1[}ks,m3UwԈG8<&Vժk+ 8w-##dZ\LwSI[l%tzUC "y?M{I\Cp#p]?W k.6wMW'=;PO?a)toiT6se+wGNhmcEbPW+p\FG| J$J}44g VodմpB&/[ /cuh<BQN;"N1l[O\?|m+37  2XHI`Yacb(-Co$}_l/h |/$\N+6ݛ6|؍7=:6do K`- N(K0zF";$ٔdο/_ _䟣o?_5yA/-,ъȎ*W׿ xW?6;gDr|qA`O@e`)$X)G#1!<ۦAcx߾BI3Yc8ԧ +=p)1Ҷ~XcF&4m%?'e`zleB㏔ 8}=۴n߱c`HH#C``N҉\_^2 eZLEgطsִXmUE$ t ?h:HlۮCpN:PkAk1kv.M%F3j/ٿo8?_5>Q*j" R*\0ɁbeoBٟ/.(8;Nx]1ظp3X|K1H\ы/x8m#r|#j }[< gZs&c2;ٴ3y}[LQ n@!_ 4\ђOXΨog8%|Ri DƗ6x'2o٨khe؁Is-|Ruvk#(q{q8Ut[ Pm;z+o'^7A7%)&,T|DSV#9#;^|1~rʕ8=qδkjY||ۗ33#._]LQPRHJ0HWลJ5CMT`IRFDI ao"dVjSD~D)pʜګV^CT^mOť>^n_$ .s5|Yf%g{,#R@XQÕG==irz[xCM$W4kC{=0AXp9oҀ2 "'X;a;":soIP JȽ C/(DiKt^V)ur tR0]OJZwڲ;T9k@Kt#lE UVaɗHlV75_j?T=8(o$߳S&G.Ap92㓦xmzzZ&$?63?ǯےxXUJ'4Dj#di8\.S/0QL-(Ο4\{k౎"=LOcU*FPF[U>)Za*E[f*>#\=W8M̶Kf'#M"@5jG&6p.jL` >0MvF%rjx[s668`*^#(p"G~5on LIPɄ:z{Rm\} + C-6gJ4ߐ߸q4ctY}M*2 O\by!jTl{>\+\v|s3p҄Mh'B]e,_V5;18.u*3g 64`DjiKAݖ[N~q})g#BR_ڪ?FEM( |6@@+)魦|҂AX`r/iL||:k*T\|E~4kRp&GYFە2xZ P E.d&^pw|oL1V:{NNy`,:vѸ&ߣӰk h2]EA6l;@h@4TNGUEO=ʿRԱp4Z='≯N&<ʪyr?唛+w\r2Ӝ a3Lz:~424m FGH. XmQ IRroBf(C[ChvN7U'zL8>i#ǭ818~J 7MR҂U3LV-7CcTS=7?:94Q-|D-*KS3=95tX)ACV8/#P&\Q<pO~~NiC*;+a6$\(2# ϸ)|=gK)0-'6]˗.7}zs(bsqAh}L&ત19)SV}9\ReQ^=m Iۘ^a{)\%eSu:7ɉ̲^mn&qb$_ṥs'Mu&[,!GwSʎm77e ^llrCe9hf͹^ݿԣ) lwޠo>ijPX A}GC %nּO$ ZPR?4|kKN>i qOpO! Bd= KN^PC?9e$|4?B"2=ؑ^x3$'VQS'˸8 @+ /W CyUx$;vpt~f _aO/ _"`N?yq3OXAl3(-F9xp8A,߸HulߕvsLqma(ǹó4]P1=?v ҇jy/ L_ZV7C'u|r]9. ,)sNx_#3~16+[{ܲ2lOsu`dnu5hzWKbU6iṿI?曁g GB?=B>L~m$ y뮻!9ykΌS9ɕo$Hж-n5iRc0( :=ob`-~[(@DH4DZ,q<U# tgƒ($_jf(U1F ,rTn^E8ECݣ1S<ŀpOt!^jCSu<)5VX2 uuUJBkɕ܊s_8ePd."QV(1@[Io8; mVk[X8ZerA+S(% f`+NX?b{rp,WXVG^LÛqZV7Cڎgm!wCž+c%ʘr[{{$NXSCCK1dAP]I_s# sJ|vȄ!%ɂؔc̒n潂/&%xӹn$zY %r ܜB~9kNBJK7ӪSBl'dK=`N"K97mȘԭTA{K8CPvp-HE9S]w1pw !Hz I 26AǠos:l'L@*zlH؁#YISJ;R<06 ގKOi&ߏhsh{yJth7ȶ'Sf朴wP}i/a&rȾ;[d|l`ow,K|9pAR5N7,5 [r5,/4UE 6!˅6M45ԭ9[R2V錿ecp!t^z4.4S|V"8yR{3/41M }0g ;(A^I . Z \""T|1_X>32a4M =ZE6Yfw5{0QsΙ6+,<3n jԴ\j?uR$Rev0=) q"5 閛m,*:Y4[ِ'#I|IkD=cAU/pLܔxVcׄWr"IJu@$ ~+-Jaip'Ic4ħLƊ`p dhgL~y6 h;Cr(0xJI`hc[ -Ek=rه~ ZT4Db~A <oV;m7HE&&dXy/;$N|8o0pKXR!LUbѮE[qբLq&[ch^3bC|\9DvδA/5g e jlj#ʗ43ͷz+JwRўy%eE`]ȝ`02"4 ϯtXx҈4ŻsW^‰Lv\w6&CMeԷxoz<$!mق4` hEX4Z/ q]> ǯ#E .H0:aRSB دϒhȃ/ۗjl]j݇'77z,}=v96<@A<_v8a?AH-+O◡X7kM6>Kh+j9Mivd/ ީ֛=w0reʜ]P])ǯ7=^Gp]Ufj벭]7u@=jCՒq6T8ǂwhl+"m+"9ZG ~Ö= !Lىc*]ԛ+{2g=:eFMBUP5Ei7ˍ "ƯF 8;D-Hx>N}zۚ;KxwM}&x Ӌ`8O`;艰ϷZ@Ro(Qm_| @oKN+p 49m=weP jJdV|mp3FlE"o(Eo~%h P; h'>b`ϴQh^CTi  dvYb2גS4OjҠ苕u`%h7& P74|_JW$'?ʗ:#ɷ'ӝT- |kP]3e֌f到 ד&(X E,Deerj-e͙\q""L7I@pE2_v2Ca%ėۡ[5xPO{? "ql<Pv"4ߙZH(bkLO3y9 % \Cۊf( p& r_|%yUF1g$ۣPq0#PpQ;d[L u~o~RlriXvD(4Ǭ o;Gcp.0oTp G3|V! 87?q_Ual8vWXVMP.RQw9 /#'s}je%<=+H{XnG-s؟utފ{,.G2!3&(NxJ(JAx$ n~||ϥ-8yD$?:iEPS f=}7 ~^ lmeFigf8_Xm6 /Ƚe!Q.M/@J@-Von{qn< ,EȶSH8ZdxL m Cf0l`7kj4 Szwt𿮀ͣ`Me_t\2%W+d97ٲy $.ڏ;ǷNRuaup AJѓga;3@[5 wgr!Q/2g!C}PIoPW?~`-#t8oarAN!3*i e7E+'ECp]56<ZڑJhUmCzó^VG푌۞6[[崽d(|uq8Jʞßp%߅eؕy n;4 CO}u.W4ik]Zfz-^OccoJ~t-C&|mrZ%uU/Ea 9NIwېDu {wTy'|keYS@k#?7Cq en~l|O}J:mJh mώDwVEMY9ʶ͂ޝQ֮%4 MYpHx50pM 8Р.5-/4Aԃ|${[#іdM2hؗuJiѠv؎Ҿbd"ԣz&1@nmy=:VG&0ߓy\ѬBS m72ӻUXwN~ ͝8.$n~f|Op:b#mOP!4I~d)>͑`9 lnq,RwTnNm rY^& p>y Hs !QތǓ^j|{)mYz+,_]1PT,9o+t">D|( 7rQx)?k[ >Vo'I/JSo=7CqMy8v+ҿٻ*}敼BH'[ -4i, ZZf]*X־*iJS!!BMfܗyy 2dMsw|swϱ|{Y̚z&"j֤-jey hfnCp7PSu5j5 37Y{Voox6u|tl/%,ޘ$"i=7:0bP2b &;ʵAv/Ǒԓuw$Z|\iQu( nд֛qɷ|lK$h|xڇ3PC~:eR"以95⍵-?i  55&7[m8'\<.?2F|_ձh|Xutڐ$~T{Y AOA+kDwq)eF<9'J ƺc\TqGL|;b:YNrC7}`ޣPTZ^n :5eyqpJ+m)F6T$eHZX%pE5 K2*xvö967r[jSv0[.;~|D 0Wf8k;Qvv'Cn0%7urN+k\ylK:wLK~ݻ(VWt,E%;%@2H .ԁh O;6Y-|vrNyl'6e'ߌ7%A7w E N䵰-|mى|m/ 2 F'C7M{(k\h٦s| 87p #M|g]CEuͣUp"$áSͱ~'dpV.T玁3xCi+h|yUHb_pn_a*MmZNfM7 -Gp l5IxDp 't<'qsXU뜡Ya.'i~Bp;m-8\N3[Fd q (D1[}p9Ja.jI"EJL'+nS8 A'|[.'s H"$o'|e#

L-ut]%:/I3kY\<_ 2Gpm{ژ$ѓG1ZmɳOGp,-sP$`Z*rƢJ!/ >L躠<Q9Gp&" 5 Ygi%y#?<‘#:([!;i|GO8ap;jZ#7@QY /o>ys]"J7< vjo6~+W7*Yv8!~s@./c!h2GpX"  O_uuu-贖]1'fhmuu9Y)8 |+[D_lBKӖpi~_^A)o6Ù>8 EOKn`-CUu5΍ƴ˳c誮r#Сˑm8 A'|pnG-+]ҁZNu43.CSڸ<= YmQրn2 K*v0r['~ؒ:-7c5Aro$洑tNpsu#-ODaxd_?a r y`[ﭫf(vo^JEp;i9[#}n7_2.~DczvѨ "J;vEpOwpA]9 8U5=_$] O7/z : 9-zZX[^7Mu:p(2Gphw܎ZIX HjD2$HjB9 (6PLp+ߒo%zGZi&~ؐp%(&I $ueP8 E2̒S+W_M@é<¥ s,)eJ8aݬd`κta$h*bA>_X.MzA%b[sG6G;k$W&?s|aZ-D ! E:8EdLJ5'la6|7;G2o(0A꼟̔W*Lax p- "-Y˄:]?z? GaRÙY@ hwBkұPPRfgK~L]Ȓ5C&#s@ `jd 'Jxy NEe54k cAx|fX'z $;zQaax 7i7*> =|>f9 ݹhQCh[$G `J+jOfžl/:(T~ɷD*EKPtm/8IA^~!}9m|ajJDfa IFʴLRGfJ~J4 s] Qru1.+ہsz<:o,Wן;^SR@2O8vWGp 4-,W:oF仹Dnkxד掴z{ݺقKK^rd(Jpޫ8gPwS  2 a jn)I?D̒qd(Jp@7 C!_f՜2BWplC3֮Vlrjd( pݱ)ύ#Бpޑh8EϞIL%玃d(Jp8c" c+GapVJc!qSr"&"CQoe8v'v4W/z ;?) "|d҂eWjk6_nP\~w'8hCYǠ GC8@@q]%-"UFy)[d(.?以Wߏ#`{x$L۷AJгkG`&u(AiyDu CzAנNp43v݂̀䓠֪a~PU][񷻫aYz?3.Fq{xfl/Ch/ \/c0R}'RI9 3ư >zoJSA}#Ca,|}Mn?G21 ,dJ@ jϞܴA/#^DB(Ǝ6p6FV~g#$,MOeo;h^@+Vek&;gs ,L4H71Vɳp$= #)5 ad !S{sku;2E ~=飐H1fJaI  ;~fyR4Ͱ@_ Ͼv08yg6:{Ġp/23[\L udh>9Ve(v*?Io^mE# 6Fj$-IdEʬͫ?!} ^'59iX}x(~0 : *+(eGy`0Ҳzm+FDCg~H@$hh~7_P<,CW 'ߊy.AGO(Ug㫖N~7Zn%O]QŚr"%76$0"dd]39pS~QpaxALp9>ɸFZN|\n!Yp虜KesA+5Х\W+_S*[g,"RȈuckQ `X5+AXҠ6ޕ2Ygk(?ۚW?Gcclsnd],ϕvCBڒ2aa*Mgၰ@:n[ISG߲E[Ms"F196ZnS,"JV|SL3S~gS /֑{] ~cYUtvsmVv'RС(CqE">$3#q,v[h^V!d(5*k3FnK-jU&'|[y!%(úg/@Q' ͬP-RdwCjY7˻^ue,OC I\*ap0z(Th&-KuGJj0jO<4`Rse0Hx{c,*;B޽' EHH_~yrQboNRpr8 ,~(0ۚ⑵|2ZeY ۻ[0|W0y50|xU@k"$mْtw,xI\!4]ᓟ'܉tژ{"/,_ >qGvA,Vm7ބZs쥹21ͥ'΢=:54GyJ*wwI#MSX5xqA;Vʛpd(6MMOlT"Nmt]l%rHDB=!OQ=b \Auz)OOsYd&7m AtJy XײV 7_v~l˳T8t2WcLO,,/ThyCi'|wdc8'o˴zBQJObd[&EVH.6%I] \$Fu:Óp%| Q.XnVl_m^YWEEB޿KFTTW~C)Lv]l-?-[kәj /#w+FW[St>d![@8{D…)(*@uj l8+KrɠsL@Ծ{<`*$MO]d~T5euo(4EqGS6l`^h#P‰ARɦʊ򋥕BEU5$"$D Z-#PTWWC\ ȯw.쁻FF߮H*-%^o '[G DۥG7D, O!|e Imuu/7ͣ|18f Fyg,4eڷsU {C9=ea'f_(Nx&ϝ:VmmH) $Zş~bxvQ\ 5p:\,/jP^d8GFQEUjnx&+;oOLFO( j ,?Hn/\,@3X4TGۿN3$CIIxRY5 @}4JO86!8'o^ќ %J:arqґ o$,7, ,H #[LFfEN~^3b&F"I9| pؖ_ rR Oz%&d&8 G< W· #3pӁ!1Q4lE i7`3&Dv:MUH5"x3g֨|9T(z8$\Pka_r!~LvBo&A1Y$"?)SQ[톋<) Ȥs qj ;!褯5z6>~ltǙk '|;pn'kLq,eǶUL|5ۥo(C$|WS"yz8wc q IT>\ppg u1\FzHE4:ؚ 0'I N“7O}V~HM~ ])iA ?pRӇŌ'EGUe(Rş.V1J: 8xLy% uZYUܑJo}_@*\ cIw0J-De~>/yh%b~A}`b2$1ЬӉܑuoR;kuM1'oIxʖs|w,SWokj#j+N17bc%) vl^=cqQPzúh[\XlT]墛ङ_j56DavQ'炄V~He8 R,P 9F}ܛO>I+&Af:W1Qw1GpNӒ&RFq*nmi|ΓYKmdM8ҊοYZu3F.G"䛼, .paa&En|h^QБ Q^ +LC4|d'd>~ |Ajz1SQ[Y䅋de"n;M4HkQ=fކ\nht,XY# cp`l^*O|(>XUeGi5jo7ݭ?=*BσZ|VUB ЀR9]H>(K*8?U;Ŀ %?5'pZ8N3Ӄ^ N-Vn3x̸_bO.Ґ~QBn>ֿBUM{xm?AwF.PۀKLKpdjcp^%)TBbOV bDj W\z};O M;![xr"?$z'/ö3CtTS1՘]U?SiW-Q$PChgo!*7v_/n:\^XFJ(,) 㼤7bsqU5d9GoS$PXᅝb%ɢNsr,Ub;eL옴Ť'k+E1v]LM90y8A?p9/$7]cW9{I6dd1E|,gD Il PƳB3ȖbK~X+Cٴ\vMyyE<MmAp=9y!1"ǛZ5}HuݓRXuH+oEwM@NovqQTa-y#C]|nU?=lNzJ>R9n7G9zNq NݪDeyeey-Z_zn/ yHI& Z_Ue|BW ys".;5Ow2hr2bj'9٦DI~­#ԝ|(Om JU~v۲ns4-n:!w7Qгkw[h#WWVь `:׸IC7g#ߟYٶ"_y#SxyRF9΃L\9 mW,^8<]"d& 8:D Q'o&PM0}0inow{aޕ3KO.Oμur-KNMD9Od7.t5|{$:wz%Jo_|{xuչ%o SG R]4(D?SN=GD(U ~ؚrY\ م!A ps]-yF]nk*IpVi4ͣgO"-oTvѣDDQM TKwFJ`Q}Ef%kJRӅvE%u;-޷w\HxEDSP['TW FDO[c"Et,Zŧ_~٫R/) #;Pr2~8K$䣧W̙x4%Ka PI@^,Q=aaZ8 kШs14*..7H \y=Ƌr -p!"̱ؒt bޣ`R6|x K&Iԝ݂U!1'7p٩<(p<' PTZyWX%/ +?ϓ7H]شblZG#,0(e]ѥg4iNďkz\o][ՏJ[p>R4cZ&th㢧yc5F ǐ>c~MHvZVVY {j䋙cNK@K-gVTl?=t-{f:,}zpƇNwDP *{Cp*5J6Ef_+1D8' K%Y 漰4>a2\I| =! j!tANRNڼptv3wWT 8gMMdP#P۶dwՓ5Z^8E3-%x9Ynm%e[/-:拾Dŗcv\G遯>ܶ I C@8:9=.))8**O[YQXA'/>jϦ'll9ߧ,RZ- $m#:5~Ϣb !.HޑozfK8xT8q!N/0vCaF0&WOQH=5r q\ƧZA{=|N| G#`pnV|Y*ZJ8 燗?EzymgC 8 h$j3zRR}oG^ a;4J6׊^@XzKZr/ S3+/O]4Z!W 8 @{퓕 bs([\VS%J*R[^UxW w7@t 2o6xO C~pݨ D/ٿj@" G>"fYh @  YzA_t4.Wy{?CpH,&gOIװDDR/f%|zMwыW od].7[}&k^8M85~T%ůÖ$&ܭlFxHEnoy#p}L??CF`J>wzJAT Lm!7tU.zOqg%՜K^$^BZ*#"ⳮ />8S5O+^ RZ̾'?#d8aVg}?ǨOa' /j}I*t.+s3xH|`1'%v'I ^/CׅnjJ6]HɀXxSKGN:{bUq gPHrb^D鏴ACjkDs,z:f^Κz<97#B qtulndpx =|¢t:7," D78|?~bt6Oy艁DoЏK}|0G_F'@o w"چp OJRNM0ˉ߹`E U3C9t.0qEwϽp.{n{Dpʲ/;uJr}pSzzu^d,BJڷ wPǠw7mĈY"?:i(Z q'@S ͻXnҟXIڏp,=xɥRWg^CgV^Pb hF'%?ynh5 rzK_gX9׷-].D@?DdpIW84k]CO|N 'P>#cz]/a'P55({rCH$1B+F%U/-yTj|z%Eij1zwPwNכKr\ȓߔUVK[V[8@Hgs=>K%VlZ;)X=N9qڈ@xcEB^D,N8b?@Y^ذ]$@IDAT599h/,ysS'7\?yA>h?^}k_aw0(hb*I*\ <@+tw[p'⭥):ziSxz{Y rw}UZz،&H#Q{K zޭR "X]GkX6tKS٩V,*J+T. AnEY;˰v8g~][ۖ>j_oR| A$>`iI5aVz\.:qW:%. c@k{wD<'D@4i#4>%Au'U~h2?C8B0HT$kmmq`+[;hΖ+@ji<^:+OJUU;?H|4At~~IAao%ԯ{iH(&ꬢt{_~mnY<"2&}Q({qax[-,wc_Up($1ƩJtwP>~u?rQV,^8-H^Fzo>p7\anCiE6(E8_-ި ?l@6[!X.;)'~kD«p um[>$v~}/ŗC*RU}3,)\mԛJԩwuRԓ.U0ߠYRb] >й?p*A҈QI"Q-\P_,Gea:W3y(O9BZ1{tX5Y_YE=K 11vZc d*F#y*FD@R/Xe*A4=KAEdjcy@ qG~xtN|{L)H!}M״c%>o"C9E#!hV5>kG+G#o?fuU]!g>AZP$MiX$-fv^(MyR}k~C^ڻ2r䍢(EO:<\Ec1DHeM?1>uB l@OB$z_qQ-~1/=0rPTg??1?wRA:zD)mHg6zyuz6ÅB8Sq63#SY9o@bM&aUb8cW~ #`MNۤv cYX/EW5ep0;SaG|ܴc5`y={'!˺Xfdd p5d"NADKYJL\fmsD4p@"jC_AM[a8o~|G#`DON p˿`0t Ww`R.k{ݢhU"&UP^YePQYʹݶ"m ~P7@nh Yz8xǁgUޜmؙDwsޚ??׼<|X dy(&GMq0}%ѻ^2Jv@ &v4\kԄ/:[Ѕ/6 :#d b)6IHL$l)mpT$p'YEI|.||ڞUq$HC#n*jƀShv3K>PVTchtw3 d5"36.jKcv*t͑ {1U3HėjB_.dۭIMoO;Gwq9O|n^2__3UknB%E:aIց˖pǫ(zXiqq3-H)@)KKcV5nnw`'5pN%c*aD$~;p \@`TlO-Xҫglud\/,Ay[Ӌ,`2N'V7LIP6l8ՠC åtvy^?SE=Ƒ ͺ3־> 0c6y@{*=etJJ2Mh~fsA l i|꾍>zݷvbUbwv <ꑝjG9>( \:{movty|(m~LI>ppk!UEA >?8d֘QWl0CQFl^j`Uh ٗ%鑗-_ h58#ґ,xPGj֧+G˟ cOuujD}:b}}=hhRڹ\8tɢ}kmd>Q{o /ЀD,@.ݠN^c>|`SkKCrz봮^n:;j*ȅ!^b{#/KuB$eYk+X>Qv2=J ݞ7|׮ϢSRGL?~88 UBk"m b 0xHqO tv|3A`Z3'|JzZ@O0Ͻ^޺Wx-.ΣKDiksݠ>{;7 .4疱BI%''.n{nw5^ԕMY ׺h;x:M=$ #kȹh6pn{YR::Fc[;cz/ƴd 9o{h,tҽ*߷P<-CG'> -@6Ã/}"CQ侔gLeT\I lìy0h~w?oKtϼa+y'1 ;+}rկ?Wsl?n3d^`cUٶ+5}৭Zy1/aǫ!ho};ͯ7zl߉: ԓ?sYٰ~[w!l?+7pV*ж.֖\}of ˈA6oVf~P .:QWPh)3Bab[Sc/lG_ tZo{É39M܎'PI ݸX2y=,;z"Z\@olQu `pS$ ہgKP'{8zv nj[w$= VmKѤ :W ELhQ{2DŽnk)(Ţ lrNg]DzӳL֗qM?Ϟ' {/;ѢMĚKFhv6&,Ґ1~-?m#Urz6PȪޒEm@QTVxx?xxz8zWQvR,Du}""aѺ6>B{t buu0tVM{0ضP7-Y qdrz/;"I|oҳ[P)G!NLQP^~r e(rj@ye׽ ?ѢCRɳx_i|ic^v ~8<\iHFub٪fYz5-Y^C]_.TNJ6WV= p̀>!diE"RH%fOˆHB)D:jlPgʏ9B퇖c$#$0f u 1:%\u CqC3̒M$z҄[!9uri69O-%^P\2Π~@SP֝BÇpwZ:^VY뼎pkp3XWoY,,=/ìBjzOjS妏8N F"|Dy~,Iy˔F_~q7lM: =ĺC :R0DV)z4a0#9ͷ8˺Bݯ8Ҟc /n\\r>4cPBFuAd:L҄ uIc43hFn҆dȢle^2G"K<x^Z-3s5JT">+D)-dc}hE1 I*N:ht0D%k: t$)K-SA0o$`+u;YxR^CJ'Q6Cukpg斱/(eQz!aYNcZhy`l+#T(OW[tLm4EAvo,T/,@Ak﫤8--WD%ݠg=8K>Y{VY˦*be;w,u'ݯ~-cy %3{S~K^ewH||Ab+3G@38Qr{W ޒg_oyH̼pQ Efe$IPXAc|\ z#Cp-gyKt'7#_WFf4$hyp08ב^H^NoߘI.@ÄS'7b9݆|7>0k`y$׻o$akwL=|}HY뢹ONp'׶Uk (;~1yvM!@-IǰG{zeNA]4_gBfizO;3>@~rCCVY8Nf/9rۻ+ؘx,dijުP:VBp %Hz4e@x}咕ZwcD2NVzzDON.(r4@2EhM2y InqgMavEk8QJOr*[q2qyhAϋ˸C.T6J/[Qb|~ w@Jy0%erϹy|y:Sq={7mjS4ujؚt,{W4@9cy[ُЦtACοL$ti =8}wy(p ڥ;k'rJ^8?i<&^+E")UK+yj9-? e9hm=jԮ# d[QAj$@#MY`Q'Z[!w[h1TԖ?qF3$L'tC/,]вe.J{SԇrzlMk9<4w 59G@FwI4~lGjFN0& sZ60G"/~ un8¾lqw-5h@k2<R"yTD_4gP 5B [G0ݴG lGPHs!@>vH0w_dʥ+i& hЩ<8D,[{j*a_u)m'/+t%i;z3x xps(Sq2Fݻ:Iiu(8g1m;7YO>)#IA@ӚDE0 .MF >J8t8>EQz(%J|CJzwT2hg5ԑ$[.>{o iMcn*Wqy|uaJAs2a`c4`2&BG{d8~d-4F`ot9`+)E1̒wo}C^<N8No)=o.A4SyK&sk\)on40E7O䷗f|UI_ D "Lk7oZ7,Lku8}(D1`s5 ALJÈ@h-DŠGm}.|ڟ>JM11,HC=L= ǐ4;X|RAU’qBm{@RB[QWsAg!{#6m&rxMmh{וF{&Ɯ<QpSgsfZJ='rQ]y;ȝMJEjs4oB5#mZN+,؎'eC*jw̤=3[o:rzAڬvŏؼ稾q,dnK\_G_ŠkoKfwWä[ qMcTu֥\\҇ޣ!jvva7@1 Ó}!@$ph( %&nUKoA5Z9k#ԓ`I@M-Ҳ^)hAHOAe[|J='rgrvr2´-hD%Tp㴮c䡅KG9( 9gjPY)K>94~*)Gl{-ɝ\Q]gI$<lΜKI\-POE7YU̦7'nߖ}}#\W Ey]17}|6OtGN6xg:F1_(NGOowWm{T6{Ae_/[[ŝ^#0'q;/èt:|l_D(ξcjpxD.,X-JtW9QB`pi` CwjiDc@r4XqA Ь >h 6N4Js^)hqYD,HJPyh߲?'Ђn: G"kFsnz(qY$^\# gоzMP* ^#7Y0z莑v=C?zt_},سjkJS`@b EI1{ n墱crIo1z4̶Er%;"`l7TtJXVQf8a0%?ZPL!]+ Mq{h DUx\(9&aC$ؚ\q_I\.7eƦ۲'+֪MM q}OEKkε>Ǽ3-CWo(C#`<`D KwNV`ܔލĖ"ޛbT4P[' ck"Q'* 9n0pDȶvYCʣX_"c% k+ԯ>X*@DRqkF+7YL̂xgdb(qXvm~WRVrS.FשmTlxpx&c*ƺPm=O.SbBTf)~|n9(gkd*A/.rgsل51)B.`eM$$" <9(ީw'NOl]Y~zvYQ`!IBBKOyf3͒@B62wl>Zo ybKr;0DAQYJhEɲ(/֬tYvr[)Yw6ԔH@ 8 tID0|q:l-wcm'C}gS_-zwD.Eܳ҄]gJ^b,ТN{_.>H6z'rgcY4IW6MLF.5ϼ>A>vQ،|f5liZn[*w$x଱:lz淞z+\98XUS>0Z!HsYmZY,nPe@ ߻pA ]A]ifE}N|xF8p8l?Vpځ>ե s5_n߷G6aSN/z&&_x> "mS=OO\MDX]3y^;,4Ȓyf1:M ̂(7\dfupFav[Ͽ[;t $0ͧaJ%ov?i%8}Pf vC ^ Z~!~>vb(˷/H,wXiZBsaI).8BNv36'q^ ˚{56 ҒwVތ1)QTRg))\NZx7rae6ϰ=kKe}k|ڍVfOB-F[cy3sAU'>Z&~wmƲVOWb 5Xx3/a}v`ٝN`}1s1!:`qubm/#-]3_w bRڕD{V}̓9_fM]9q;GY%sWAlT՚fٍ ~kG '{u>(h(+G^6>~ 6_5: 4??O;oFr߮8 u] 0?~5iٕ< 0 Wza*ţ:GOV /}-<iN~{~U.an]EzѴl$ѻ? A4Wja?[N_Wy`ֈ>M qH`c~EQ ŏ][>|陛4 '&0q|M(-ߏmoh>ݺ].Y Fn͉{L1Ϳܻl& 5[BBtx/+1% H j-{YU3nͩ˴sp A8f 69OOgUp]L,=JLU*d;_"v^mx^w f+$~L- mh c0-௲t7on3 ߦ|/>]=Nv뾜h?:K5}:9_|dx%k֩{SJQOw89ٸ=DdVV >GqIᒿtСi J4I&wbgŋc StAQ6)<#''RW6;jmLv`bڷ$A4AէuoAM͞Tr2]QZAQy硥;$#;ؽP-X^#૴& Zdb[QHD.#* @=5 єHu4P'Lv\L!^9fqO潙9ʊs$ >3R 18V -a܉mYVAVd֦TVJNYq߃x V*g -O̼:mW($TN_}%7m}_!ۚ'_?jq75sV܊^4Ax`;GsNw-8=(ֹY/)}VEw=L:_xlRC^j%D.ј7p!_m+ƜUUt}Q0ڋZYkTG%q3>TAXb^j}gjK`  Ye-]@ż'4bcVx< oEx hvAi;;4֥,uPSbꍻ?$^+%@,YIPϙ=`kCXǬѫk۠`/%)؛ Uog0\>ǿlާ''&4.qO$  ln 0` dY&a{><*H̙=_cLUD3Olj7"(YrTU9Mמzb?΢1Kc@(^{_  z =1r@;` VGlKH{W:ذz?r++i圧g}aa[*lj xXy/<1Ky=2Fx&Q93G'6`~!0Z;gsYħ*PPnx+]~kgo{FQ8X\qd:%qי8nW gٝzwXĢSpbA`SO<~O}|-i%sS8,j{P |ny!@/RQ8n\& 'GOHf~0BL N}IZvҕwOQ!vw?p:Yw*'Jq@{j=˖۶ 50w7VA lA6;ZBJ rkQ^wyf5Yotp8 ~_\Rn8_LˎCx'^);lEw z&!mP?+,9 w.Ux8aB^޽puD q"!2$;dBRDYT㪢~ U ?ަ™C pÆʍЭK4KO#%{jW?m &0pi+sv_k?ulc+ˆØL{57[A`BWo=ofzۤ0 ,2piphH0|cIFCK.1 DA߂ntbltx{Wg_zѳNXYߒ}rOtF *r7!jSQ7+߭+URϞyg႘havX25oʘt WvB/J'wO ȁonvĹttvf,@6+voXpE?f$w+{RRP|@$}`Dl6=ŷ[Wc߱vӾ 7B,tñ=:k}tŰ#HJ&X5R4{?א]_Ukku%C>= ef~RohJX2JFH{EDZ}cɯ98ëxNbR|'qp[ɺ} UJ# &DDQpod>_ՔOqoAvos}x3oD )IY Y9q;얬 U+%hN]^Qջ[(͇=Q}J5դg=6/BgUQz+cqQ @7DƁ'W('GΜ'}Ξ5'rl;ֿ_7)8@Id_ݚqiُ(z2&6 '"^ޡ $ov;9'nWsl <*PwŨ.Tߊg7~⢡,Y$;`MhTO|F٘>uEEiyNj_sp2L}G׼ F dعQs*CdٻJw\<8(Nqhj*G:W9/.*4bk";{ʟnr N6xfyiFgLЛFL@:{н:c^NOy*#֙ NNXi\ AZ0\p^6d"_pP. zzS}Ɯ`ֵ㝝{mbo.thV-&Hc g2Wa` -{),Y横2 /$!q3^|$.~x֬KTf֬(DW'Mo/o1 [~|Cݺ(#s@;%|C87_=T:.5Jo}ǏSL n%*+L^(B NS]ˆ)ЄƉ Z6V%hZ~in((WQQrIQkRE+dJX<oNsf<9XYŴW^! q؇PשV'?''B{'DiO>6!ͩ#މ] ,,̚5B%)Cs?zr$L($]/&;91]%wç&A±J_TfQ^yJAy7_zs hoB#fTrv8l˰Ν8E05>(8Dzk37_>R\ &:$C6 \)60o#$]"\F B-HeR\='k_|-.qU -h:% J<:azcklg~FL|u~ ݫglzG%`L 0׭rbVC>\?&q<Й%TՇSڀB6JAH:+y#_w]ؖR@K&88U# 0q6jڮ'ڧd{PU7OL8.&r\A؏Q5/bzԬЂ(|ߐG=6N3DU/[ ukNX|91&@1h"'Wx BX#<9X'+qIwy- 0>!x_wmZycwi7:&w:8% cxٌK?ǁgNzhAEv>iԝ]ȑ=N7Xh!!.W41&@=oZA1FUR[ߓzL5Q~oZry}(o~r\BFåÈɞkNX|o$0d6ܽ0>!ۋ7`F𞌢E/nZW >ȷc4 `KBlB'F.soǑi|uه`L!p eU+OQ)g4=z-9uSU?=b"-CU Ey}5HhT|7߈tF[ iB *oΙ!y\zI'i~2BhAڪ|KlӐcvU헓;/o3&@WI6ޞ Cw9Trwرh79 L0յv,(8r't8.tiS[pc F fX:V 9Ȅe gwu:itQ«dk%bL 0'8(V݇ࢡ.m ‘:' $I1J7텂pŅW߲7Cå]׏0X9vaƌ+|cZ^KLV.v} >H>=;0˶! ƌ񝢀\Pu`F `Jwſ] "Go=>v|\ 3W_n͇P?U3y6OJPovv;p)(J9rƗ%zhA4\t/ QEɴKڗ4oq_\fdm&h/gr%!tEk np$Ю`ӭz<{…Cj> jD%F֯gv\к~Nn)Bw`t-V|n\DFgѓ g|QZVY'ʫk~ +_ ^c%<"t{xӯKȝ!+Csq=ᐠ/GJhwX3aЂU9?c=nq훳}L 0&|^_2W뱖Zл}/cTUQ^яar7x`- ΊwUzÎ(>oh7R56]\QT(YEDVdҢ$'uf ĘLh ]]_tHD |APuZE:ѫIo-(D8h"TU[pو_7!\(PO聍q l28Vs+T5EUI}>ccZ.ZлSaWu_BB,747|bce>&@G'xe h^[xF{AnKҷG:2_(pklvm%{Q&??7 =ot짷wB~Eqf]BUFb_޸h6vS}>|n]  ?fI =p2+XygMͱF ^Eo3w-L+ue}§->{edMoߔ7"u"_b-\aye5k5iP֞0./MS{cK@E,iFhHVhiziQ/TCW5ޣ ծŏkct['\}^GD#b8gM]s]fشf5Sz"eҒ'}넑#+2~$|1)=%z2#!S;Ƚ۹(63; C5̫*8fa_.:֢M-`u,y" 2mVyj/88.v_ 0&:A( _U{7u,X1Jhd Opi0 ia9;\MQqr[ kxX(/7U\hwNn2d Yn+N"<+ 6:\9t@$g܂{Jͪ}mnߝ{U8 ]PWc"x_Ekk-]zYCn9 QuS i,O%`Lu:oŷk[@q^^U ??D2)p XPHQ(pLMaO = oO]?4hws! k`)9ZȡrK:k+#m0dUR0='& SO؅MF@.NCP]h.(Lh}qZ]PZY 8kq[;g*Z\]Tr DK#Վ-.iF-ݞC^hV-Fk 8PXYyy 0&Ξ8xЇQX@N>FI.,:gZ!yևHqݩCYYXn&(FavztUB]$2_իЂ$=63UeQ:z)sSvUǔM5t`- `)#0[*+DOܞܛ8Mw7vPtͩצ W`^Ƹ 53Syc2z=*K}(=|D95XЂ΢s^3fQ21!Z[gYopAl/vO )?A\`L x0W4F$B-IJ8qZ!.ҺtqWrx?EfdQ-fQUJٟ޼+]QݫQ23_ ,^-e^p Ol|~ט`L '2׻['X *Ye;ZY-[9vA0'j}E+nɲK;=F җqMC$3K%`cOO}?>&o4˽+L 0&sǸ&a>߸ m15zu.T/ՓKo=_~JgRSB䤋7S|mNj(PQ[:PB 60%]IK&h0@]o`N1>.>Pݣ$(Ygޞ:/"jo]ko~o]З;s);gз ;UQ}[Þ dƦB ?{ͨjӂS']< ( ?'S̻hL 0$`N$매 D  OW?m\Mݖ lZ}M" eɄ6BwՓ{F gOth~6m2W';اeg]>`|'Ki_}w޺;YmSheܫmOcB8e5!y@{W F^G4ok ,,[f3b}NL 0`#/F=?p-$HYpva;A exi ![@t gi iz~"6EpO 'NIx~ fĄr&&̱ Bc<ķ`;fQY8܉7,fL GXeMAqzr 1L##"Ҏ$ J(,AMh|ёtWSڶIvlZo X '!QQWKf݉(8W ԂV nz Gl(DG5>n%EycNބ~*.h !s3GVlo*2#`:9Kp:C Kʥ}`LCSHhέ~sc˲ǠoPVߺv4 &Sk;t:5qNVRt-;s_opcu/Q 4am6|#",wlLN'eHoSsjCBߣSS?T^ ]%q2>P>Tq2 ߁ 5D~SUn٩\+>xܳP̦׆u5GW?}1zhA[Nw!r Z$NCw^fL 0׭i@-EiރпWօv(?dӬ>A=j \AAFɟBDf%`(D|0AMlHhk>vBom6KoV{/em #|m{QWW[_tyE埲LK@T\!_7Cw.=MZpYPEwЂKBc*v Opʂ@Jwۼ`FG4]|mEK{,5&!f|H[h S E?f+ p+ ҫ8D{XC-SlG ~UBz++z BrA!?p $FNP")6B_mrE_}{S᭶om0`"[AAiw.23[}=򛳞=7) Uv'͢dcC,XS7 89<7gm^aL 0v%P ڶǕj݊ꊊMV#?\F˵\&]_zKOK8 =\YFi]ێi\Py"m]nqmFlzvؐbiӃ nxbt˷'@wU2|d&sp(o}l ֝I7{EIǠ(SuFwjZ௾'|SAvB.bˆ_Z=YC'tOҽI:["e0|Ǽ[)ޤ5x[&47uIa<ޮ)B 6}߽10%*6\6AJ?L 0&^mL4ɥ&~U~+@1(bG%Tf=/VŶv}ζYÈ4gOu np06{#ݗg{^)áҌ48wo/1'12TONkxuTYvL7Jo<# :Jf ?rZe 3Z"^ҏỔSdL 0C{Zu55|B>>_X""XbM)ļ*1<ڼm^}#,;V~ѡڮo ߼x}/ZZ#ڷg$u|nMn$ܨ0py$YdIU+r]?rH4&!@ -|F~ڰ0# Hנ)NL 0&`PD8 D[ߦJNa;7fUg ,`U㤧 ߫/dabr/w_+8ءN;[7`u"K(sZ"\&^S{]Or>LQ}C2ignqCKݎy (ъ`hۡ͢Fx.d"v< ĚB'vY`L @{p]|u,M\R!8֓1עc{zvvFi?S{Kdr Nbi=)"*TudQ_\IuE*J?be0[>24l֎ 0)doz 0& Nc]I0 $Io$6fmP8F:T=ڭt_뚵K!?nޟO,ϩ؞pڧ%xׄ:LK+L*;:k-I]Rn `Lc0& rF&K~IRCޡH'W% dFh>ڍ{7Zߎ1}`dkκ۫/Ӈ(NG|xB;sn>wL 0&X,$ז )=,%5TA: =-T=vʅ6wĒUpЂ2G6@?cC"Á;dߠvӱ$p,S`L X#L56OL8CL+߂;=6`㭹?H\vӡ8: KސS7`Bxד x kwqs t;i=L(츹P@E+N7/GϊZ/gi=?#cpPUHЎ B cS6lBk?'&t,vcܠVtk1H_|LC;)4w1"fPw:X-4!%;ww^fL 0&x]{ys|L 0&Xvq H`5ݻT;G?cdT*F-DА=z m -́;llK=D ܻ; oIv\GEܥ 0&X :eѺz~Ux.& W2ÿ*.>,zhA|HsI$PI{Z!oN,(1 h^܁0pS`L hBȷSA g!%{/Pgl.3#U> 5hYI|LEUP5`"Y8'kJ=۵].Z{ף~/:&v ֤,?2Wj/ԢM -:l |@|q(Sdw^fL 0Axgne+ д9oRnˮG7]|{]|$_PmYUB2}B -ɀ?i_jPkS7<'&XwԞv7 97~'hӄJ=ɂ 6v?vrZ9˄̎3Ly}C:'Q`>4a>M!#;l(H'58U8w>p`X{u&rdMh;桷|dGY0*ͱgŰ$zݡեhL>ЂM쀡=9z^ziE쓵qmsbL 0Mxn}(''ʹVP՛v9 '[xfTPӗ@C 6 22;Uvf|MkWbL 0`'<{$ k[;UQn=~5N)jOYQLцQzƠ:'B T;N)QC z2#d-vm|h+A';g=msbL 0&@Xșٵ?߄;1+8SJT1/BY0hJ-ϒOӅ\Ws3j':d\on/8.:ë>99yy 0&:6;LsMڔ?8YPNm:u# LK]RxؕnLSE q ~LAї3RG; M$!wsGggL 0S ? NqJ[ߧF+ezOE%[j*o,Yi?lXf]yîeNPTu#ȩ$ް$Ò3&@SX7E$ifHL);j'aPj[7JI_0ox]+FVWg?(UA`]ax-ӆ݀JuH8>czgʛL 0&XQJ Ƚ2BE7_wn (GGH_g,.ڻ43ׇTz>phe 3}oK|2&yÆMG`L L0=)fAUnU D7NF <>r pЂHw^s9@J?mskv vCbL 0&p&l?!>n(ٓ]KnVdTS$7>VEiZ\9mYh4wdA^Yfpt7}fv PIks5&`M`4>b(128fPWvm}]8IGiK`4>kDB TgʪAߒ@51G6@ǘfg>( =;~Ě&vYBK&`g"Lx)Cl5IlOEAT(QE%G,>VYTE'pЂ]{7P|ߩwAO%`L 03`~@|Ѧ=UUyjmkPl6q_Pcd?4OwP|߆[ǫL 0&MxQqF_ȺN~Unt$SE3AO3.w -?'Wu#p8692Wf2 |n\6x`LX7g CUEe*F~s)sIO/.͕;V)Zps*' pC iveEŗڪ㫘zF| \ߜMT`L sH`䤁Nʼn[SKнD S`rF͠-xZRM,8>Nkw;(<7{sm^aL 0& ,[Omƫ%XTLujhFkb*BOC/]ebPϰC P3祧eHá;fmzEp6&`g$8Csd_;0TC&>5N(,Y~;xv |ilI9.}"O-rߗ*/ƁZZQߘiiOL 0&ZH `]O<.+8looUPPE>^\R.gڬU ?Mg-,BdZrpY%QGZS ',bZs~!fԺ^WW{YWSѳfroiE-AOm=8 ޴b%,(>a[߫2#&o »RrrWJ\`L 0vgE|Qta%Y-Fğ?sRãqEv)D6EVs<6#FKZ1_CV,@M^RNВ]< F@׃l%Sx1"^I`/"T)B0 >zNQK$X;lc LT:I0OQsj}T|~{U,q)s4t`LFO5{ǡPuYyH{r7/:mSi&:mP8:v0׳Ֆ4c<i|KB|&)5J7m1U(^> 稖UDlϸʷEUL3S{6Ci"X;rk=`Wà99 GnC緞zr: ԇpq}!$^űOÍ1&@G1|'qOX])ZG Ƈ_>‘$XctB??G&҄!ZԿ6DLG)L@ޘ7`C˻YT:S+GodwX ]]M=XC4opX'uPfV#⌘u+ՄÐEG})ZT_3՟ü]4- 7si\ys/yOM)SB3){U)tT `vn`LP,GoŜH8/f6{>O"N@m282wbԬ 4ѭ:4 lC ?ciIFDM?~TO3՛[Ͱ|&}q"^č0k~oԠߍNb,C0}~}L 0&ڒa]PM˴(z FY| NyLݶm}95y`xZv%N=8st"&Sτqwu>vykuM|c}cbތJ԰/؆(ZW4r*8 ]VfMJ GasZ%(LI;~L 0&ڐQ(w1|}vvI*qqeOu.Mp5v] E~o>+Y$;:VAF>l>;mhbiMIJg^,yh,]xg;VvqrV9 9Wa`L 5# pvKYyPOx/X02)%_+`ͪ 7LSӺ+Z!GNxYN$px R;^=w5!kN%^n`L _7 + C $ \Ch J,`WTo™gKX\t;o~Ƒ(aA|#Ot_qAQD^Z*zo|ÃnXX|Fr-`AIhp j +*J~nr^=*s9vb5hTZc]9>rۜZI9jOVdžoerz^GAUs}(D=_nm`LR#dᲃOo GΘsb!/'~$)RE4z8˥8im8G,#{kfP*ưKπnV[CMaz_"`L 0%`4OUF[T+<lb( YI,T07vjc#,DOŷ0&@;01P}L8XT|$ FbQ4< pza^TO`G Jē+,HipX%$`8%e99`L 0C?FI$#,ڠW01q7YBu醢ׁCǐz*'_l`tMYUIzv썏o^^2&p0J%TBd.`C}N">Fu/{]2R֚}iîekE}7S̷h> 0&@0P_6:˲.tݼR-)ҳZ|[w_yn͹/-u6 ]|z q6hZL 0&fL.3-fm*SL)2eHG!uHp5 x0jf\i^HB .;b-qU>tO(y?gԓ0flQ\}nAЅi7{зo,ùIL 0 %`$7˜` 3^`2CPP P jY%B a׀*;!i8YYƽ]{?Du;.r8Qv4 n׼"`<Ko¡mKT()~gQ6+@cTߏt0,S`L 3۞]sHhWzN{G w?Il/ކyGQmtBHT,sŃ=mjѦb\Ѿ({2!߳rD!`n qbĺyDtNyϊ=D4G,ǷQ+ ]ΌCbODvC-TFE/F\aQ\F]/u[S<[lX0hau"moY>շF,ج}uV L+?0իH"ş0~n#`L 0doW]-u/uZkd=>&~)kՠh s[DDdm:Hu5qL">gfOrWlgp:7>=\h?[ZiܫU6s8W"oGuͨadMF \MNbP؟(omP!M/R፛77r`''/@w?GgX=޵>;c}Y>+7Y)\,Z@ M1]z[.$+[y<攞"G~H |X"N_>g_" ,6n.œŁ ,ܲAAbߖD.wL%V)WD=QY?|0{P\x'"2,d鶓7?]xi+gZ6F톈}*Zv4O&n֍ƲVzfK[dTMlZF7yf-lpm_29~ O+o5`L TG5ZlYoa!",QUGNWķ5Y M(]v`aY)o1~=Xݽn_U xL>u"{Z_(X)Ngn:^zTvi'lժ?M`(7(G. WdS4lyѢotՠz/+Ā+߷,/,&}~'{~P7ռyO^3&fj/O>#7 {{* S~{gTSE\I2W/M.I1 SBLE4o+3wtr?!k|i))㓯&-|疉m[ c2S25_>Mze}~V9&Fmo}(S|ubIR$ifE爚*Sf0ۀRVP* >`L>`GaΩ&l>\?Zap-bm;5dq.k"sJ^ Is,(6|Ih+bamlYdmX-tX]{UF }a=&7k-Nbdo^g 0©rm&BVj*ٓbY;xr;A)WuZO>|tF|-NT4hӰhB-?} R <)2U|*&:Z) X,ԏo$Nۻ D^ӿҗ')ԗ>ݞ@,7 {~):b@fxVpi[^x\D(zL8ߋD`5hONN?WtIzǷ8*v$2CZ"z_e^1giO Gm]1hQzE~G~ߊr3i+^) #\8ESD=~3}"jQyao.s\b/$A1kRJҧ^쯾UB=~_a˗>z`L TkeW%\ ЌĄ!Dr,Ybz,6\D%tJ*κ ?@M>30rsuiv(QD`tH}=XbB~rz;-_UрX㯶{oM)@oN4$8IV(`qኲ,:/I%\u`?>~9 W,`pу'0n'y;ǪRn-P=Uͯu#omhQ}>^3&)j:#LI*JTqت%=Oۮ$MKCBI[v{5g7BVZb]H5@n*`L 0bj^ @ީBQ %쐼cݰ=q1hTR}xCl/[F91&BISuad[6rwtZ1hGoIGg +V6R{y;zۊȥ,ď8 ;߷}v}/n2I!:zTΛGCNL 0&j-P%D_~bb)S 7g{,3G@S{B8̱yҒ ^XλEz?| uL=7k%56KB:ŀ˖wj[ y 0&@h%NJ"!С}WzpdC1PiII{ [w/x?l"V]_ξčm\y=aW]ږ~}馘\)^ؤŠKVn &`@( pP[;6Z%-e'ER׭b|]Kė8{a;s29FŖp)C"Nċ~G(~O8ٖ_h o+)&9iUp9y@~=qbL 0& Zptb!=>ۜcGSgm5Ϧhj#@>vB?WˬϾG>,&ė8e~Gw8~%4ѽוf#+Fέ8ĩ8Mn'dޞ߻Y..ЁM@|/wFjɒ>^3&`G ,h&p <=>S&sxhl&u6Qo 5HNhˏZCr?5-PEm ⤇{mڬZ;Wɦ "LDq) E;X췩~7 J&Ci%&;}O>;9KȨFaup*s@znV֡ׯ߸f`C%d&M"Dؑmr?j' Tbm8Co=k۹kȘᑑukJ׽C9l۴a)р; L= 87f)wHI!ω 0&8Pdk-bckYȱ&nYK`&[@+Zlypж]MBk&`L*$ȚKngd%!FHuXm߸L+\H|[m3…qgj}}nfs~Gxa^#%>n8W[/66^3&(P MBD%kk ,ĉ%-Ƚ$A>~.]!k~?{쾳)iz2)/rB&B|L 0&@ A"_pP|oWwM߸<+{!Fb5-t,-h/qPa(( ;*m\ڥ,)+^G`L 0&Pf. !Hd%qiwmc -xuLݗ6iC33XoOYSnBn4qg}fL 0&*G: pJmAb- >Zl.皶@dks?{ӛ*o+T7~+KP$NL 0&@TG~WZ[!M `ַ>i:$m5`L 0&85Aj t1CK֤|/HKm&`L pX%Bߕǔ-N< R#:,[񳽋L 0&@pW ~"p7a դJŰ[/^L91&`A&<Ȁz&p ?:9MaVʊd5 {taXugL 0&Pqk52ZC,KZHm޳0C ߵ`!D-!&(Tx,[[5cJ6o3&`UDx0 }Р0wL#O3ZIJW_>^3&`' ÝJ@]#-mW5RLwyˏ`L 0&p?iL 0R=˖@x&?:\CX~fL 0&N.'? Tߧ0_TyMJf͡l7E'&`L ^60r zu#(+*E&z'vl+}y 0&),C;ޘQ qJURB){~_E/<߇*@XL. 6p8:alJ&mȐ~oz^X\sC{ƶ_lH&6^3&E,CWJhNN2ޜWc0kx.tpw>=;]ffx #qgvJ/C hԐ!ԬGpI-\M>qeE&`L d ٮ)ڰ.ܓmX:[?Osz}抖xcޝ(XWnY7fyWбeHW|j7/ 0&@1Nմb@|? +-/[ * ]vA\O쬦o~rРj=2[jR#@|IRQ8v\#6^3&>0>hRY Kvon[VB|n_p=$]XMvB;ݿ^l+n-&9?٩cʊ2%l8fL 0&P<ʕHnkCeĉx7+|=r G"!)hs>Jw3pfiRLhiɭ/;ƛL 0&@5$>!iÇ#5)jyD\&9$Ec5평UZL=zg%8Hc×$n앩iJ=a(:,yBɩQrR+9S3&`!p t-@ohZ \`w \G^^B1OO8LwTꖍi~?>+h;3q |ǰ &`5[C+pHP{|]>"ZHT&bGF!iCSbqAe!cJ6c`L ,,C?[xbҴfjS DxC_oj$gzU⊲}P|FE'L  {ҥq`L 0&P ߮𰋰(wh5zijФ--0ȴ}*S֤[/E ?-\!`L 0&mei%qhZ>Nz℃p:۠H,4Kf> eWrq|Nʵ|y80&]XVӠXod71y9ru0,$‰1Y+R?-#sOTF=&)ՋrOS bL 0&P'AԄn~!WX $qLz)T/%HHB{R8uHN` `L 0&P 'eFC!t4 ̘﨏 ..&Q9$ EWZ?Е"?sN3&XVEA<(b=w7-ecœ+Ӽ9WWi^G)vXbU1y`L 0&,,C@Z(F4洤noo\qp2-mybrӖMDH 2~;6"Ƌe1&`L p''%5d9yС&¸rMA7.p>OMO6))5_`L 0& ,Aru*[#_ SO|ޠm)u^:\ ŻH ?Sr4kaL 0&j#5#Y}8rCe"WBߴ¢YFESޔܷ_a-Tv~~U[XiڛR p *q82*uF뺎dTJ.`L 0r`^ndէ l; !#"EvfY;nmoN r-!5l(TV+.0-WDj#dhZ>wNvsL 0&@"fg1DoRl/v3K֡8rCwOه)\~LR9yRPo^ٽwZXk/ZFK #Yh͚`@%u^_`L 0&j"5WOpM}kwE122Rd#b&$ f/b'Xnx|#kƊ%"1`/=(Q{5=Vфwr+2Km׋_-}ph`L 0H0růlxyTzFbZs0JFb;de y WV2"Vdht>RJ">:$^W}XNkJ|mlr (e`L 0ZKQ+?^9.&"#Z׮p:wĴnkmN7P26/G8M6R Z]{:YBe,BĒ)BEӨI+ _,eAErL 0&j#JshtSO2s1L!+Xx~iấK֟yeW;^u%BEy<*&/G:>֮q8Jdnsoknn6 oz\%'qe I{WJhN+SeL 0&P D ] vFb%b,$#},zXb%kۜҥGdTLȈN+\(D"$wnnznV֡ׯ߸fh8Ķm>m$WP4<>;%T +ZcL 0&P[ TnSU܌'K<=) 3oEN=W²ۖkP|%bE;On&M ol@$J]c`L 0&PDDwlt54=" RؖplG.d,$A)@TD"Vb[L0-߶x.N!0==Jy/Rkl(pbL 0&A p 0 #UJ(q.G;8lqI$ J$ð'k[6~`MD8ũ<#EX.e `L T@@Eć^?gNWG/Bmpt^Y i{!/顅D7icd)TAd^j_`U\ 0&@'PN4 Dx;F}e qtRڐk[IXӶ-m!nTSc,o!^GKM<: 0&B@PQ.D6pEGZԱc"j&3[gfVܶӚĶU)g. @q B b)66#1Gf`L T@Є y&yVzjNJ8~|5!Mwـ"kf[Ro&jMtB֤?\Ua UJ%R. yYw&\7`L ^p@5# wN~a xމ'O7`.fzR'M!WDL 0&BJ#IS0!TCR.[%$hq#@!W}]fO>`V^ުQ_)Ԕ+Qu?c>8,>?B_ա0UI~kyތ`5zJS<6<=k|rv[G%L<07Jb O?1w-&ߤ0f+pzDBHUЎ6صFJ#'[LGOj, eaL|^:Rnqg^r 7瞫s82]vT?у$ Ka# 9*\&40c/CcB3#' 3qiJSgNQ* ;21We<[ N~:zWf6FINM_PϽ~x 0&pB']o4(͹7e77 "_}W n3:$Zz«*%)~q7 C^ͯ(:>TK dE'G g.+iG;|WoTe7Y)ߨHPBΪ9or@; *(SW 00؍'dm:S2yʬ7xYpD7c9r86%Lr147?g]?y`zP۠% %L^ =ڴ?쾭 eL L ܙ,A`˄ &s’ڿp<&%%h2~*:wdw>~`X. *cD7g&l(,<ե+Z4yIBOHMyz%J +qQ^uua܍`|eRuoR<}dMcӥ[]AX| [Rcm G'ON?'Zua+R{u愱 Ln[팰ZP}yz8KBuǾҡ]>or%yĄw :=Ds:;NsKL4|tߛV%:AAւpjm}XmNZĮCY㽹87p c|/*652| 0Mět0? VmWfմ<|Hз¡`5@IDATȄI!E$ ˟AZT_낛^ _oU w7U5751%^߮YE/ kq/")ZߴPFSCor|܉$hE7??qbc|+)dz/5@xkZ gjUG_9),4!n'ERjϔ :iZ'ZPV1ix?,9zqk_KPm]u]jۣ瘳Pg6\͞/`Ra'GN_6y]c'Pp:'4bCʵD}>2+*WYdƍ)7Yi܍dag2uWp{?vIIp\}d$?:wYA[jʟ6tf[@ y 0M(DaNO||Si>(czjBB|>X ! Ӝtj7鎃{me5r4#I= ~kO<9ڝGp8e~vF?j[$K FmT b%9w>!+YӠgږ{sGb>D4홢:N*B (u|Zt4_^qr|;> V20чGvxFGp\w4xå>u>0E·W$e8,pyî􊩅(,ݾOvªy=,js1 7P<3|O%CφrOr(9h,}`gWx I̘}6G\ơTGC cz&=Ɵ`}0H>⻰jozZ~ :Q u{(qlr:vcXaG=FAQ,%&?{wQJNg8 eR:Q/n94&bx!ʌNu6SqntèH|SBSrIM }J Q!`K3Cއ7@|;tkaC noSaΰ.?ƶ܁︣c;%lCWa pzHlw5[WۼaQ~-лfpO<|a_l |<UF|ņM 0R xڀ~tyٛWzN0@n/>TzG?w7I-_p{4MN>.ᵷϊUdDPc !^Í%]y5iRcyo%jt(|=gf{9WȧS?#)n-V@\cͨko@a?B[T>4MooK쏠^3l ]*WwLe:}=8so,a܊&vD0P=x L}Am v ܓȢ;{vO0zps5(OУ"fgȍPg`MIꟷ-&%{u"Lhm>޾&}J<޾t^3&xY*4ϋ(AF'k&֪ ;\APkv_R6f>S+X:q phj{:*&j蠪Ii€7?VXpSSy7 ո5*aRg fÍ~6m X- >݅?%$zgkna=i)&Hs3q͋t[l>~D 7Bb;.olM^xlʔ*G{v?Φ{IgО /7vރIJbw0\SJHLH͈ATOA4\me :%gpO#䫚 x~P+j|9y{ wix_#;+]o˗_5E6ab#>? AxKʛ~ ?_ 1QJ=a;I7J|[Dw'.QSy9 8חxض?O~&e˺V<>I>BYV|]'>ׁs?]oqQxT IIFxīqﺃ%`'=n+e%Pep7wKLK սƸg^lj`{BWxuCK,߮CԔ40z]|_ҳ#Dd 3.7e)Eݹv~-e\hJcUے zw^ ρ31xjW$G5;͸7gӱ:[ QODT!^%= Pnl"Do;QcOgfbe) y=ݙugGM+PzIHKtlonv߹+]z=~+]-UW}Z0?Y,"/-&zD|Ŋ&~ό|5]}yl`aY+[$hnET>Kn]PřcH\vcYBڴ1Idrܩ/Lrԥ^{L4nȣop󎍳2J3݆k=X'@ݑ}Ki9م8[ }b߹ŕ~l2bN4H}; s9LqHy%$dDdV;#wq\: 9Sj4,usL 1/XMMu!.Qewhfʶσ"CU@/?dA"ZLJ8w`3 C!}o5[%Bg"u\\g6*cc!6G!fsDUqȐf|w6FF6V |,S+gd*BO)yP~>kP]Wyo4X&Ql34' RZUm/`DY;Q{u,|3M%Γh/#f`~ 6ݶt7M\f=3֣앰gaX5⩅S~͇7JTXS47]C -9J164mGiЩ }b\ߜ>a+x p>q| yއKl&`XXzչQS J5vAq;`<: qbF=u0g&.|qNS7 ?L4YpS60\XcQݖZ-3K`ʗ5a[~7i/.os @KԃAoMw'S~_x:T>A"o?'-ܭ}]F6d/Rϊ,FǔA}I/-*r z`ҤF߿<41iNy'^He[8noxR:+]oly:uKk[԰.R5.(c"PI(|Y?uj0Oҩ7:ນ@( T:PyIĄ(4q Fʟ8:.AgdxǬ]OQ0[l^V3O{Wx?D=\O?|*Ā K]^"NMYE!mn׫HL0*} cP#;DܤHٛu~H?RV Jaa ] M{lnϪ|6ˊڄk _70!t*9uۈw6t5#0kO=gTqs37}(w4ߌ+ŃƇxXl<)`&d;1]q~&A%o^G!07HrSfhs_әOmSc7 *#n˖?@P43b տKx} Q: E7Ugbj7^ R4s{e-f |cQ"6Vt}}E멪r9E\`!w=7);[=K߁1j6,!oh1}-,&xz.6"{AB\rj 9Mj,mDGaM"UgDi?-fq<@|_ K Ao}Iȝwv Fen_M8]M{o݂$d:..&sI܁qYD#wwO:1ކe4#fD(xV#8:Oqkiws.+֡9nDx\]~ƅE-Q9C=P7W|y.fmEd4 ~^Wo?ahKZς/>G0fs6hSN D[dzssՏqVs~&P& R] EŁgD.͉ZeYBjT Vg\Sֺ!t osZ` nLNwry$3  cP磵urhZ87&1[1)apM;*!\+qGI'* > Ŕ)f?ܭa0|r~)~<1ilo+2( 3 @|9ߊFr(_ w pGx JyN. \]'vrrgpS^WR8~SCco2e-$I{ {Zrߣ>~X|~o>bË!MT\B 8,oA7>!+Ec \WNX R^ql#w2m(>WpDžդmi([ ͚e,Mp#NRxͷ}XTH:In?@N/_SO T(dHq VyHlGY!(yE؛{U3VJЅ_TnsJ~suvFoD.w.zYW;?,:]6˳?*t"EFB?Yy.Е6!S+=!En2= 2ń֍tDV՚ `{NTBw"q~k%W |~x³qᎨ=|N\Uܾvۑ*J4t# i^&*޵֓j\>0lG$<7^aڤ+l+U5RFtLG=%^Z庎|Ays/evtuTfDK FT jDП|Ziu§ޒ}(Mؙqij l-B>N,x>ߙAosMuz8~jذTiR GFx͝z^]WteP9)-,om~ | An:VкZZY8n | b} .x޲i?EM w^ W2EoX Me~HuQ[%)T} wQ5)mȐ#F[qz>&R.or ^BptR}> fJKs>H!wIxm~&D+L I7 =>x.Dؿ#@SF3B :pvKn̸&FGHzNf:Ԝ}.oB ]+?݃W'NtCQo=f.ؖ b|c[F=BcYtoD4WXWdzwI§i>-a-Xĸ&{"QnCKΑ*G3^ۯ['%C-1>H-*Ýa~b=0@GQ>k’L}W"|}MD߹tS!ua+o|kcL:jruN`eK8o(&fЌb !J߳V3nӍa6"_2;ʇn"A7' MǞ{dbb˼}{~^cE %oj[JײeL^E;D<]XdMs?1{O֫e)Vo}FpL<$vJI|#244h>NTMdBȑ.W^crt5یplxoUyxvpcJR \6 0DJ8qpl0 :)O]7 UoH9vzp%rǁąi*6 Q%?o r' w8GƳZ}P0r&5@si }>.Xyu,/nwA N<l}\JvӅtni`1l|%$"An#QP zAi(26d:/#9 a:WqL(_y~u+,./{ᢩWVJq?qц{Pĉ 0&P@ pwHYq;nZ}fKũKYkeΏZdF7~Mzy -SN{&t y>z7"1E43%Xqjg-(޳wm҉䒊6w6{Z)U< тcwR-4  /Ҳ*[JW4;\>n~y1Xɷ ԑIo̜ѽGÒ{KPj:-vݴo<]{$o?GGSh‰ 0&p*z%1eSh5~?U%@c.9yE JMd 4hu}+n `h6Y5H#O-P(2=h6ȴ^3Md bͮmv;7Gz-wWw&W2'O/j|FbB-x;$Σq{pO w:=yAγ\ hS{Pj< J;)[aӣ(YiFHa$SIIr0utX۟7.-4,p;k%k.+vc7?rp>&j "mW-|YN< ?y^x4aůG GێW>d<|4W0=ȦC֬*X B' Y?o4Lif4i3xUL ^'x} 麮hJ&ؐI¥(r$Aeuݧ(EQzx" 7!tw_ afrLPg=u;ә_rL"%aˠc! %v(IBS <ƀz>ᴅ?Od`<0:uH},,sJo!֏_[|WI_!.bͦ/;w5}n11wMåd…\'RXgUldCg1Q>yDTYy%:@XQ ]=L_ɮ:kq?=ߡ 5I 'm-7L󦣓RjD9+Ԭ$@$M~ 9Vtq]7PjǑ˗o?jJy"F {ʶlS9BTSze6(qEޞ2ʆMTU7aG°^BTl0/弡[U}+߿lOҏHl&#"KGˆb3rN$,cOLULG~ǓGg6G8|%&&!o].5Im`<%ڊk'Boéra)G ]Zrj*^C}o/j{# "GJD$@M@1!g-r 2h8zϋr|Cl)K]ϴЦļ;Gc͟+`lSl(9H*$^̌Al]=.ڎmU>brHٸ ѧ>]@Sɩ&oHHH ~v_r s~Ea:{ <.=kJ]yS8=RҼE< ?m7u=(,7a>GGZ*-R V迪(#>^{I9 Zf.0kf6}nr~YHHH ^Ov3\P ׌~x$                                                                        _s\4+ܾB00b!廆/yaw9wƐgHߦٹc?[Bzӷ^v?x̘-uvP9ֹ+!:J)[B.N^P@$@$@$@ bg7O4d1B(qdtP7\(T%BC0n}3.%EOn4 Ӿu\! ,*q`j<&eUT[9)Gʜ7I ;pY|9ؘiuyN$@$@$8FD.'PT4@\(&L?rM8;}\Z%*>VȒB}ꉌ`Qmm[ZYF ZK_HPT_(n|ǺFvl{ʂ}ba @8 x$j[rc?. cVT7o,վc]2pLdqq0cE/|)}wy]rZBPNm kyNH*(62=̔uJߏlB$@$@$@R2/X9;vxeyQ>3w;wT j'WA"XK / jwMaZ&Mjugh&T]366R= CG<49$yMI9c? R& ơhcu9w]w,UvA ;C'@ZZկu>QN渼K';׺4ep>q,+c78'|u5LyGф찵| {O=zVvHMESԡmZFf W7ܩP{Ef.f iGͨ`W0Lk |qPvPcd- @hJQPlۻ36n0{'{_+ C)˔K%}rh&ǹo|( PenmTW,u2"lϗ^Sn:F_1̙xBOp>Jkx^> tW Dl2HАxs^a^oh?nX+]ל ?rp$@$+#l xd߅P(#Ԧ䨜8y+ԽJ?vP+.iivh7;tZvv9W~q/,`-smA(K+O+@a= kSuÌ"\*WgC=*Y`> ӆ/sGX[uXI$N`?BmiWP\1@5EN+A9t7 [+V潱b5uJ3'TI8A[֣|jVBZylc 0es;wR̦ys7۾Qք|%x{ϋpzֆMPOQQ0{R9C\ Xe:O9o=iG}vpw;B+~Pˤa^P0L2HgRc&5( 7,hĽurA A9Wx$NseoKMCT;5yoU?$Iߴ]zR`UTu5+ U:Kzfsv}--HR}rQ,xCD!mnHi_ϭ'%܉j߈J9_eqfZ#   hV Q:!U7Qy{-ޮIO۲2[/2 lϷ,羴]6o_ג?"쾡ˡ|k8gMޢD]CkCzC9I~L+knw"bX(`\2{WޕG@) ̃N0!k(B+۰r+Eݕhk!iR9ۑ82}Lx+lMV<5ŗq}TZ[}=sڂE T^'g38{VFEuS(7񪂉<~H~oqrF* !B遼m|JE!vȂ-kq/Ddq`v9#]w"7;7kiMZXq{Bꡀ_Lp*]ڴ9^d YzQx$f|7MjEqn`.ǚQbx3`?lB*^$ -#^D{jú*0: Urt]@y33Cz 䌄ExDJM=)1!f e: ;(<-Wℵ"yTHH !ő2-IٸU+I>Q,!m,4+1؉WAfWރryӬ[09)U({7r^2ÊP~S 3~B*T nctzwpOµ=|૭0Y0':ϲ*0`^7ED|P|%,הڥk|]餟.cpEʷ{;2PELzac!o%|kza,kj(;q{|E{fg|8Efߗ|oC;9DamYg]j3;Z{-Cɾ}Ol q?ib'\l%(? EPγJWΤ1(߯C 8 @B4\+ Ew)Ӟqb7Acrey(+kSz:-yȽ:r֮Z +5[/f0S5#k@:U3^XFD׭rC^v QINEMhu;W]X6nD~UK` kuNUE/x_R@4T/(0:մF7)&o1h# G w{غ@jDO޹7NDNLHϭ cVcכjmVqWGF(xPүʞ[V[$ D<‚7P]QfYCAJȬ<l)ЖP:/Ui闷F3'7s-;{a7:܅:Y|MrHHGypB'$_♨}Oj ᾵Ω~_w57JNtLmDYkH kxue7?Jo֫?U&&oTV_a!g/D,ꑁ􄂿qֽYBԃB*:\g5ue+ 'TjFmCF!JB#'wypy#"=Z*M? t#Ot>r?Ot$ _4ï i=^cCi.QĞI^3G5=1?ǵ t/cOV68gjbnМSO#) G p99<ÿ 872IHC 91HU]ucU5p1Mr;ΫfJxAJӗPM`VR+%cצ2 mRhzA8?b3ɕB]SOUE;j=.ʔ-/Dr2pO$ ]SRw$E_0q|ra<>xHHO},\B{0phSۿnbc1L/MH 2RЎ PwW9npm00O|HG۰V4m]d1_ǷbW{,@?fG_ŰVww=e]!  ~v\_X|Hm}CF3n[o# h8^5Ozu䐲k{HQ$#_d̂tB`* cӝ[1tD!~gjQS!вl[SuċT[-sFM .(0ɣGC v em#mHHOp})%'ySS^_*==!/XS,kcSCY$@$@$R 4 .<^o#vX[ȧ#5֫KWy:~kagޯ[K7e8M0#/>|7R "  }4\h4DZ`Af}F X^뮉'j[g{ %#AuݎWryȃ?D(ALa.EaLB.uCA9y[V3Bȩ፽^Oc0v;gJEjM:q X| YDz\rěN`rl@$@$@ & a)jCG ta;va+jo/ _J_ZYV:?U@S6W[_}%oP |D}X ҋɫ}';!Z^]90|C ++ Z|?~lUKgd;T : \dx+F i Ұ/xԚO;֚Qȍ%V eӃ{)w:|o-g=&^_ 7alnÖq.7Ec>Pk$6cս7\N~ic2+e; @shxjLou,jP@AOB|\#]#ϱջlY!R#Է^tgYօV @5`C5i.#;xxU)iwQͺ|2 Ö '?wز^u"荘~nKFVBʴZ&,^5  \ae?<Ai:Ӆ|ov]ްۜ^ImtF2pRzmj;KZ{u?X~H4._R~p/+-p>bСxɌ@Xu3tRZyUûjsϪrHHHsAiTVdOnt^~}y:.Ϲ $O28R[z[K·E9~[25 ? |gXS+ 8=;.6|xYm[:;8z䍚4so$ )8U 1YYQ,o;zx$   mOG6ubn/0#Moҭ',^=]j#*ym)^ Produced by OmniGraffle 6.6.1 2016-09-21 18:41:59 +0000Canvas 1Layer 1 Compute NodesLinux Bridge - Self-service NetworksOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Linux Bridge AgentInterface 1InstanceInterface 2BridgeBridgeFirewall Network NodeLinux Bridge AgentLayer-3 AgentInterface 3BridgeBridgeRouterNamespaceInterface 1Interface 2Interface 3 Physical Network InfrastructureOverlay network10.0.1.0/24Provider networkAggregateSelf-service networkMetadata AgentDHCP AgentMetadataProcessDHCP Namespace ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-compconn1.graffle0000644000175000017500000002104000000000000030444 0ustar00coreycorey00000000000000]ksF ?POd{Ƒ5T1E $œH(Au*Lv}}W݄0yN} /O?4óWhzG?{roo0{{o>>?1>_[?pN|x&p0psvNó^} IC-LoܧeOË0::A y2)Hݫa-WW"GoIp~ իѐTY|4effȫKNirtSpeԙ h17'/< w)ႴJ)0Ra1VGD݋;A/{3?~lNop:r|;pۙ$vqraO/ ÏItM9gy-g3U]KǧgOA1Ak,e< /)}IG}/'JW'?i^zp$Aqқh8N:Atfؓ\y&c ?}&c&\_A'haTlV93[TXi1)_xIrG!!m.J31}t]jR)--ۓѽ<8F +F}<^܈qOpi:#[fhg7b #H5U8Qכ=jX8 ˠ;`=vߚ9v)Pi=Pu69Msz " .Hi'āJcx}<VXz;='~v.d/?:q/N`ܟO " þw )}N)٧< 4#NAFi?:ٳbxs|칇8n/˻pm(Dω&G u(dn*JlB@W-USV^3$H $-[gU ]eO@!LVMH'hwl8.݄)ȍH(нk:N ŗ77nv4Vh-a}{^P\FWL*gSMʹso]0'09)J/@‰<&Bq@k|Z,jDI%* BQ鳧?F R~VcarIxŽ0kB 5[}7>2>F韕J J$?rel1=i^}.ZQaXf#>ՙ!9ҙ3AHZrg6-i?N}.MJ)Aa7Hn[InbM)\rS /ejȥLKaАА aƥQ\ 4F"M(,&VGm 5$7$֐&M: rWLZS+ƨVPUCU l[1}1,SIuE}-姥d gZ.JxԠBIb A'˄ U~Khk@3G*(FLt,cc$瀥m’yTrÏ4F naggn%ۂ ;`l4Fm22^`/N`/@Sx? ;}ᣄ1n}.:']Yj(ʑ ϔ]'T]2R-vZ o^$I/Ƹ6D&(E/%D`t) cz1](ţI=l}XU ">Y !n2'reYc8pd>;0f TR:(4=Oզdp^iȌOlb ЂjƇ6>eT76X+&($074LiA2rV4|N)1,*ƻLT[aDmԡ O_wۓi pP*)O\F٥ BatgNRԶSԂw]-"x .b.ؚ:}[8 559pj,Zn7'I\)*S‡"|~Yfm1 ջ_nAn;pv)bM X9C@ 'kJha0RW5aS2 qlylTPh4V&!m\@Ǿ!eާF9Qh|!eׅF|EQN !׈(\.X+"z>;7lr&٣PJ,MD,R@\DH P7M)aJ9tÚKA Fk=g@LoVUSa*uJ|2]=̻KXңELg=Z}ئgߏ*N؂_y4uQ* ΛPs߸2Vk !sJ|W_q Ä^h/)*5$LYhdF$4N|*47}'4rWꤺ3FR`$V 㣥O2SaQp$65w+-8T R;*Ղ*Js Es [܂[N?2bpYRJQ}WSLSD!!ԉԉԉ| S8Nä 5.IT;xs}9L K@@q*rhUrQH DEEEmz?xGz$^CW@<*T)ᾔ tNK8Ւh. iޤѽYT=w=\mRJ+BTGrBxQ/]*<j2\<ںq_l+s<}Vp'RI͜D&d{ α[ͅ5(cā[SZa/etk P.ΑƤ$e_d䢐\\XST]ra +C>Tp ZI- S"O~޿yaXk#&ca)Ӏf&gð 3sسJimv`R1W`Sfҟmy:R`D읈-V'}%w6ƴ/uH$$7rk=nCFTFljt@ʐU0Ge R0Ҷ,(usfA ۉf%05̂qr)Y~'ȊǮ,qhI":[FJh.u).[:8Vim {лpu9gY2!).[*˭VI+W# $0R*+0|&_RvR*AtzBuj!صHVV??s ܀G r+ W†Y h.EN~,ASتF~ݐPߋ.U"se0 -y1f_bǛ0߼~ɗom纏6Jmg`{(MHkgi`O1~jDΛtWQg\'JSQjz#ࣇcTURTvwD~C~C~Էsy,o` h!Ho[Io ETv^|/<ڽ (H\XzlQ&c"VS.vIhd oVx%o/6 026I`fz+)*kdہD 26)2U(;O M\BS΂E: pI˒ PGpf.N /UMPn1y-i?Vf!!oiYVWA@Kf1r#`8PcqZqV Ǯo֨n f-q>PìIDX S(ٙw.lWs _M[L(ZPPe!s#s߷n D)apӎО& nBCkQpӣ,巗+tsn*]"GJʕHHHHH%`VvT2rehE.r0rpK20L#lɭawkE5ݑ3ʀ2-׎Y kΘaʕHdd[a:qcכEFqdtkCbbWu+gcc[%}õP#6rMf92|B MTSCJ bT]yD8OVe6þ-ҨMIsʜVuX*3la#f` r1æ%6Μ{dۘftC m8L*]^)STqC-h$*DDDD[@BHw'=*ӂ%GkfW5.ضtS]70ϘQ~g@X9"Jo,?*TWaҥZ.tv 4xi&3PӲu-3\sގGMlM)C'aVM+0Y-JU.X*q'7:ձV&:0^)ȅa%3`*KoApq|w\Aik]#֊pl+6 P>5 H@&a5Uy{ {Unjb){kRqňPvD? 6(<ZG'm9Зe .cw kс3+`'ݽR?nHGct_\Y]u^xI.80vocaNviw7d>d>dUwM3)% 4F`1-T$bXLIIMDdc$g0ҰTΰ@vCvCvCvk-iSVU'7u797X($6%`nk妐v0 UA']nR2_;]1Ky :ٲa4F` RR,d`i`^P|QC 옷<͗[ª߮NJպ6Æy[Q˪.QFvmmIFc/lW9Pۏf+і۲׀R" B=qDH] (R3-'ծRzaJXta˴b*εY8Y߂IpwwhWQ\}*CLni1oW\CSQ!$Q&3e}'rMPf>3بQ:2jD1X5ۡn* bi[)LgRum#^.@ YR7Ƃ ^,AhJ](҂J%nvF"ޟEEEEEmK?O~k#.I  V0Pn,Ke^aO5 5:!2dz`$n{~Ћ׍pn>~Vv0w~/c6+*`]l]v<% Vi U%Hj9:u>Qِ%[,2>cԄ7S"!֘f m/ң ۣ}偄b.^ؚ} Q[8 55[Km[-=!=}eU ]4q~;aIVF,` slw0MLg+4ppՆْ5v<$lKⶴ{Чܙ5v݉J2Ra%$8$Ӟ"ԓYVֺ vUEB]"E@Dv{:&0* q(3Y/ndh -yX.A !!bh 'X~1azI$Y#} \;,_F\UVFw$#l)MMw~d~ܵ[}lȫG,|M[vFK}4~*#'bčJ˼PvE9%GQl{6Ou1 ~d:Ybg ZHÊhaV8덒OE-?a1 㣠֗{`JEz \P s{4,f#W N-ᰏF(0DOat48}vĝpؤ$I{,V؛A+hkRk nZXkɧw\AiQA ˠ!2Swo@z @ǠQܼ@]r LfxREN;kTl ŢFInIX*j]hvA ] pok,yE i)U*qy(qVvFDDD|. 4w᧍;(R3-mYX_=:0GEǩ4:gU@ +1 Wgы$eRhOTp%ZLOixSƩK w?3(kA2q[/BOQw)mv.;2 ^p\zo>9ÐLw 3~4#? "|: `.HC$s4.B5QjhmE&{1tK |;ZO,]ww ș-/hO(5jZHy1k9 pqqqqÑeyc7ugG[ugb*'ߞ \ M̗A.Qr$p5Y6|t? ?+}vWQ7~;_J9FgS_p,V('[:4g+x"F&+ G>'`*&!y8o-Lg[wwHVN#Ó^ ](8\m|.^*H*Aܷ(?E70-l.ӱ'!`0l_.mb(ZqZab&cFtLl34V?,=6 .1~2HdWw]7=>(#T%=ޟܓ++;^}S׃[x>ʼnq~K}!WF%6eXoÓqW? \t=Dywދt~[~t~ O!+?û8\w/,:@O}޿~$׽_߿?_ˏ?I~{sq~zyso?x+?}b/d/oO˜m<*ˎ`2T?/9Ts>%+}TH]3X$D? bwEi M0ZW g\wޓU$l\XZ͹?аoi,{W:Y/F$'=ɲ^!ma_gd),7RMF(0gEQ)_x`ζ*Mý{S. Wt|=M~>cmî;gB,P#>/kZW vҥ/w././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-compconn1.png0000644000175000017500000077617700000000000027656 0ustar00coreycorey00000000000000PNG  IHDRN9ЉsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx`gvR{oA}ؕJI.xKh*6J{O>Ŏ숝EZ{ٰ\]K.o᲻S~ٙM[!`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L قasL 0VG`d0te_a^`If>[ 7%1}+ c~- NuK( J &\2K 4_K 3d& [-Y?.K}Z>7/[2Ҳe 5k.ka~5-3'b΅y 4KRJRD54+e}r%wJeͮݱW#H} 4{׼d8뮮ה,*L|!eN|7#c]i\uُ'd[WlMƺl]}ϩNC|0 NH]?;8~]n|_tWVZhf輩id-ɿ;'O3L|+Nɚ2,W%* %%Ra,% \ 䮗J>q Ǐ/kQA(wޙZv%Twzta@G4j+M-6]?#/i$l԰  #WQjςwvVZpDFp֐MxFel%[ 2 Vfa١BoQ,:"/2۲_ 5}zFc-(ix>e׾-3WpG_y5hOY+@f wИ< @E MʔPc6+dT0oXlv8t..`f!,Mȹ<Uw]sM/!}kd zX X6:;4kU|[H 7I{ q=3y9< IҧE)֣NW+@'7 qN4Hq}㯒)Mlnph<!O ?:} 7-^放1xj5&q[4UYS7G}Efۙf"nUF=02CMV|׀bo!Z 3ICI5":ڏ#eQϫ.(VBa]䳺N,K9249hnĵCFvB`ޕPvԅ=j?(X3j<>ɗQ4e+b/#;XTu af[*D4ހJڋl`zWGHU-CxAedW+5jN,4 g.}N$2,2E^) NB8PTю 4+ނ0ZݺsG_EB8 0M 7+q=b*0`IRY 8φ:뾘v8X.PxoQJ.f #'o(o] __]Wǔ7BGͭe@8YH989#8}0z+2/@hKu:tMG NlT5`i'}S:|J)Ҷg#-_*ĥq9R }‚Pֳd%0E=h4gJ;JYd?3 ~B"1i|3 J(/&|!]}ii#ӆ/\3UN5&'[n`~%~R/:5B[k57 '<ԞRZ/T+t9(̝QAP{a~ bhKa_ ֛M;!y/zA SB Sm-T'2p-,i=6SA ө2@H W_4/6/w48q9YGZXp92yEE-,8s4-+j+{zu;fG`a? 6!ߝ륙Sjt'uq]D3@x_ Ul7;)2Jv%?4d+ݨ,CfHFz- p?LXXtE]qnOpI& e J{gs:On]Ou w&T3L8^4›P~5 Hۖ;ֶvIqP[iok.7lIh7W+0hK͒lDPb{4'kB? 7U~w6hNmg4 _))cL=9J-gy-3'1*QΈfuy vl-cpfN`w@K4wGC.atkˁq $@xp7ɏq_k jo]{|iR]] T61Wd rd3 ^poa-vXq$ow3znxuBxm R|93(>&->(aQg@Yt K Y9*-2,}\Yf_4C)|;k? 9Ͽ9S/K;?MS]?h$#.dj#4x׮i{hQ*;K{ѐЌ)y)~)k\͒`~ރ^[ɋV-bV*Yʆjɔ=eӢI69yW+^ygCMɜ i6vd[;8ڭ#(i#CaC8y1d8D(J@9ʏ&_buFWخ NJ=Z0k[,=i*-ea'zm&\Da8 )p“b_:wP%`oɑvmaGZ?4.>J޲%PâQ.]w'(z\r$G zO9Ӝ8a!צiLA\PP`S4_&1 U|f?Uȇy8/B ڇaWi"Kx:ޝ[AAiރ"_`t\u\لRZA?g|Z.YW**kR DϺtI/^r(=%]M|϶Tu]u3x7AQ#-*tՌ |N|cVQbsT&P8Sdg6#؎].Q d P3hϵcZƱy?>]8 |(#;fQ0O-ajtbzZLIǎ͍H] Abso1$vlPGD Ρ+[Ũ߀?]MUůkU, UnVd!_L . V~?'=>>EC M%G>sO?Uq)QC 6C;깁n;G > ~&UU\x;fae=Lvi^wq'e_eh):z$KP-"Gଣ*{yߥJkmjy 夵m#,V;`[VkamX[I[FM>~Oao?3cnOU۹Kkg{odN@bMT,>[1P:=rjcP֝|e71h Qx},:7p.n0?ųUz_,B5i;R|3nsE7>׬k%" Hh?1biEV11s4BOf07۶k(7iJ֜,-}{M0)" ݰqEs&{1kRisZBz ˸OL;RN< ܪ=!3\tAD?,p 44=:չPΈok.N$I+#NvTgjR+n::>@G-!kXQ@p~!V5/ݨww7yrx]>_3&`ʼn3p ( EF Ja5zʳF+##J 9HQsQt A˩rcD&%Iw#k4 EcY߽禍imJ9UfUct1VY;-vi:410[9eځ`/o`k6lFOhh!Q;뛄m㎷E®Sc.ک 5"7"݁%zOOSЃ287j}1=Jh/onc/aTϏKъ?ڲ:\jU# q+M}Q-M= y~kEֳmu6I@<it$4{YY -bĩJjs:u|UvD#pW3вauD:@F`z:-ɟ#5=MBgcxO/7CY gHZl%YTVEb:Mxp}*iz~^hw[u'Nb@'> ѐEMDE_kK&LAwݎ׎smg2>z4hLNڎOfPwf[0JVee6H) *uDuKc1*m2n? nh$!hǽ&b_߄]ȍ&82mڏگɈA `;yGCVb!lE>Y4{ӴhJ_#wgwa`pQ O:lAcõFUGa\Vh;8O=o?]`]R8% M+NGYo ɇ]]2%IPx 5Ot"MDy2OE #O^|j,97u{o,u*I)!&"o7'iow`sӠuG%u?ºU^ְ 2ݩ 5%حZ8rڴZrqES8 )CHMПCJO=>yA\yd1/x5p YiH;\{PRhT- y ͮ7Qa=cEڡu/f[S3}Q92-a_/4$9C!30s'OJgt%wjcBǗy4& ?1\hVAejt,nڡ0eF#Do,a~L>okMwv4D%Xe0uٯ˼ˆJ9_T;>L-ޑpkq`E4~eɎ[LڧɎv8_G E:ޢEJ]UWyv_j_F@' k7SQ=+|iWy妨\Ek| n&|=.%]lZf(py%@l}LT4r]"×">T Lz5ڈF&Uy}x?ELr5O¹Ro9RN(U SֹH;4a7f[ q,luHˁZ/s'=6ۈ/A~`'k)icC/Vフ9Y(cacG%-;7h˱#SW-[Z,|Y1J)"T04\q IԆ,9H[/RƆ_:>xm z'"a^K|(tF@FNq}q^dbVd?|v( MhDݷL4'|Z#w >ƽ>X Y 1S乸:cn!a?g<}PBo7w";U>Q*WԬ1څ)& ڴ~;&j 80@G_=4!¯߰lnG" >֥'Ϫ{*$j?{|."hƣ 7}}m޺T^`=# k,: fߺ'P8?Qɚn[8vEWM \Tg|r4%> G/(֖bG간46|VjYh4 ?{ՂP$Mg'&w"*m/׫%PTU?4 ۔E4MLB@>qKAF0)IMJn>rD(c^|P# 3}#ܬ#66H_05捎slfu$ʾy #arS/ )R r ;*5]^%d >}#ʦ[υ>mTù:^r_(X?WNQGgG>>ʆXlʋ`x"P U0[jڰ![S(\ F>N =(5 "kvCCewL 08w6t08Г7*îhXM~p\zb];gN>-^naI6iuw54^[[zZGNrevJVіy{:c-hvO#;v)/k.>4E*A辢z05B!? ,ؾL#^)]sN.hj.=&ZH[p1No2(ֲ!aL =84%$a9 ҄$E`1o?UrQble1YE͒0 >E)%KOՋfL i:D+ehwÿR@y0i‘m$bIK 0&XqJ҄a`-AY ۘ#OS#wK-3Y&kAKa&'!^ɞ_;䔔bL Ov`m@JAy14O-2V 8BgL[`jmG.&#axiF,!%&@+!#N$XE=~I뒚ea٫Mr}'XSsE^ BY33&&v Po8;2&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L $?=Ed `[{pc8^3<+& Їu7S1h\{C;7䨍Y.8Gҷ-5]x['n=<{2[^ >+ܵyy}DZNEx@ǽyFokm nX3M*6.R\,:wG]"w*;3&6 { ^_jB,vҹJVRpφwkMw`HM2P:9O|-3'_o\K-0/7HGrP{Y~I_+̩d0^CH?:107gH%WF}w B9yeo-l[ks'X{6"8meX]L]057fL $`L $'`pQ {AЭ6ɾ2־/l\iR"#X[hR͇IxJ"L5SoF)h\@9P֭Th ab48F{-~&LQ43']\j3%]áDߘY^١ ˰#x>,zk/?RV>~!!O/~T (RR߮ҁ2A*<7O صڕ=b?9&4_K2{) ޫM}(f+2^v;ݻ"<GǘQPG~knS!3'b7`f#nI\mIE}E:; Oz vv LSqs Q۶u6ٮiicdL $%qJda Ŋ=PO)6G^Q=8Ol)9(>Qw078z3qJ?RLyk6S:Oإۃ51O L(oFg1?xfq},% Bn7*6,PFCmcהڠ9YY4Dǣ ~@Ԋ5gsٟWv/¶gtFƌ3v`\O~]KyU"O 唙)W+7.RtPo=C 3jSC#9=`:`L $)K$$Ta 8B/A H4ݡ03~xw]sM}ZM7/z&sqiGMNcL;G<}3<^ vȠMCA(ؔac\nϚff7]Ӗ'_G_kͿ)>䚃0{ T49Rt!Ng)4NLB/P/B}Q~@28 ɱPc nXƣzo4=+p 꺨vH&KoL!3Lg8uNLc#eKV$}yʴ0[~wwjTÌ{Ha^ ix Rn )Ưyt!#)w\e\e0)]~Qc1QyJGXBP: FJ3)Кz7SORVx5ZJ(4vP@ڼޥ8LH懘~z:ig6ɿvVl 0&,jUZ*(XZ)#C%h!3>G̽8uޢA_5*+i_/dY[6=,/Ԑ K Fa:%t;QL諠,c~Fدq[^YF:C1{9mZ=7Xuh98QIl!7xU_κXdLu.JՑt:c 7]*o2>L bДcUB?YbZjٍ4-+Y UAG}Q,5VAڥM֖*;3<;6)MY<{_׵ݿBp[i7ouV H C#yeXѴ ˴kj]rBD L^qQWqJ-JYB֯Noؘ 0&t^q%]TX &"켳$l[BJv7,~Z:vaDnb3Y2\󧬴mc/2碑m{(X٧kTgB]';m.̮@\kSz))4bZ% D;eriPJivZ[o=a؍tN^Zl߆io+ mC~ pb0z+6Ck8]ҿTO"=ё>>46nxrT0o&|k :T){*keuP4r9%od\Ogf ]~Sl3 y4}sTE'l\'a[W쀼E l:$ݍQPt6J|ZX+ǁcoQ|Nc%-n>*}=5nx*v7+.R_  l`L֓V,)h;@l>h  PR`nƒt3Qڲ1󱿣v8bmZ>e[TEPnG|(Ђl|J +TbhWpC!_cC˰a~Ri)?l4Hm2֗W ڻCY}4܊8חukP(0|0SqGM,fbmqlgaI_ڊGSHka/!P4NC^K#/Z"OQޣpҤ Z#>_öXGPt&`}dy<w\eB!tF-إv#wJ~Ƈp띾JiE!GOXҪ~2M58߇}L 0%$>`}OH_o]q0xEA0F^yU,@&z)ۉWmMA-6vV.Yi{)kfNH5ꠏl4 9hz\I'#vaulC岩L2; ͮp<6?w'9T]a洡˿$:hӚ +EʹVu=N.z>ouC~⽡MXvRc`L 0$Ss氘hSHqt r7qd@QuǨOP0-`L !x^BL 0&mk>Oz5`L5!ZC*L 0&Z)lTqm6ؼS|HyvƇfL`ũ=ǜ 0FPBS36M@뤿, ONm:9&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&@dVnsdδ,: > TREdL 0L@ I*Y ʼɟ\T82&Pd(8EcT %RB?8#uAas&`@ S_/b; ^Q^֖b,/`" eB@)[K^7y߉2&@"0rڴv}Rj~'W^y-ິ-"ǁ 0&Вe+N9-nR}k o750 0&@fvH[`O s{kK[[L .dWO3 sǯlJ/&`|_?] Fq>y{&ץɞB,`B&q)J 0=S`L 0&|4@ޣA5tjkui 0C~)U Yij `L KPW-Es]"9P&*Dq 0Ys`LzX][I<.m% b2&oʼn2obL ;TQ|Ğ֒R,'`IJ e' _;MIʐbL 0vHӿ~r$?ץI@,`@"F+NmD4,(`L yT/QDTGdO! 0B e'@/m-ىdL N  bui+H$ 0'Ȳ+`L 45QT?Q=ץI<,`@F+NnSgxL 0&ꩤ>.MaheU7TVeL 0&`L Gx"@_6/PkXB/١3W%d,`L 4@Osz9i=U!%q۶)1˦f䄆a[n$[Fv/%RE rK&yB`Pi/##z 񺮝<j+BZZĵ"7^жk 0&]6uT7xE@IDAT81j/iD7~\/ՋOGO[3:%/ D|&|~y2ܤ)KH4:Sd&sRo5c"f?`L@`ĔEf藱۷5Ɖ8q|8%OZ$iśQ774p}\b$LDfmGH>fNٴ<~_L 0N 1순iX&!ëBb X{ `HM2PqxNQҶs|)TK}"eR҅k`uVao\jV@@ZH907p0 ^8/} FBxG[f~<@Nn FAV95s4ZJejRTȳ2S20/]c.h ̵p\_Y実u5~̃tSFN s>ˮk,HW\L<|!+5E}|Y"VO!(ߗziVTgWkg '"tf3`Lhh9[_y8";VTbQan0PKHSP܈:U)s%hUk/ub,u&w(O \Rd݌"!T^]{u5P/և^;t^.KD&S(4,u=v7GdIe-Fodk @fʥ9DOK[le_{$<%jݮlu3GFZٙsfTNhbb_۰ `_ݪz.Pd䗲݃P$~BC)=]^FuUË̢[t94(+۾'\RzBy)8=R7y{^MK 3(66wDiˑQPd=cZiofL;fMbL .񖳱W_䐐Nw{o߄Q,1 n(yͱhT'sdsZdߎNQ R2y9Ae>dUn #N~6*8tP8ngYJGrmo>Tl.YbD2!|H% vA6&J?RLlG+lXu.n D(O7f#^,7~1sEMx8?7;\Q2V8 B9gWyJR>(u^/0(z 33Ֆ-2V8.7IÈn$>5"a%@I-eʮi:n)Uk>`U n#m. 0&:[R.H5ܺI,=&*]:nCɺFtr82}1թ֙JA۞} tË/A3QGy1(A B7TƧc\͂PU[LpgF (M ?n /LziF8~ܱ&Ph\A-c HF-r̰HZvf浌u˪#--j2o(Qjs[Jg q݉Mfz~-)Mfh4WEzjy-NLCVkmƕ"1pGZ0 3M&`GZ1`&@B R6<u%fJXGwСu>1%E%t*C9yCO9Q5{<]givG1_,F^rI SdϚ&ԻO]a !Law c MpSZb75GG: oPpr,ԉ##xKe]є@m%̠ifUnL~  dU^T@X˜  ϳ_ps\Hn%֭Dcl~({a}gL %[+g<|ش`"SxPV}q{4'TM]g;m42sW;iϱ5^qRUT&2RS;鋚>m:<j-VTx2^F)+m˶̹}q Ӱ.۸Q1LP쩰[sJd'l\l&_twTa^ˠ/`@}嬈< e2;Ug73(^u %_}⑛xɳX S^(F/eL=ecehR[(a$'jAn6Fb %0UX {>t7ʨ\3-IFo;M뇞 Yߺt|j$d=ж׭2LUԚSA ]6KҫɯP |s*<;2ԇag?Z  A {%-7X!]҈O$aTli}N;nh='aVaTav|kDjıpҤ Zcn_AzV+Lg&@["PW9ky|Cj7,ޭR!҅D]vFj>gN3do[9Qu&Ƣ\WzÌ.^R^t1>IE)>|0DSUk| st~P7yrN^K<迥0h-mkSV@w/qSd̈́3:l4%9Wbt9ẻ7E>o{}v^jԾB1JLݨO wi/~"Z"g]ټqb-As YDי.R]۶yҖ$+\N ixv.6΋a<@h-uTkEgL H Qe*OՋ1`L 0&`+N79L 0&`L HwՋ[c.)JiVL 0&`m+Nm4a9Z#P=|L 0&`Ok' dL 0&`LXqj8;v`L 0&@;!S;Ih&`L 0&` 'SٱK&`L 0& VIBs4`L 0&h8VΎ]2&`L 0&NN`L 0&@ wΎ]2&`L HH?^f|f-I@ykQlȜiYuTREѹoȅB_uɟ4I~ZjUS0xZQ4Fx6H)U4[tiaX6k[m];5{rd6#.rܸ6m9鯵>@(O<,{) ^A mi-۶-eFFA{'"6I x'ͧߜ_~9筷.~HvjQ2'nvObT=IMs(⋕E~˕fu* Z3iIeie 4[mp_N dMD Zy2uWM ^u 'k"6m.ONh?, Zӄ<Ѯ8$-m{Ie4Q"ߧhv9|}v2wJ@%J&J~G{9Io.s"Z㔒խsN9lO}k5z_"TIǁ Nr_{vDgKQw)|9/@BD+O6Uk#;o~@[UYuTS$D3i %wXR1lلv1xX+Wg!XƶD`Ky2us_k*KRr44.\67+%nJd=J~kWMR&;KJ[ާdZaHS~F%}$o#'!'ai u9GȚZBqS/s)dL yIn?3צ x4f^vCɈptl=r\nQ6#.D54z4M^[)ީMg_y*U>fI8тHK8i"oh2#_\yu[$O{~;o8Go^IV5E‚ NXnc =cR~'%9OwvAʓ O:Ho"VQ%E+&L(7S')O9cz JJn f{|꩙:#RKM7U8m+G ?رs%3^$/ӝ]2Dp˓VQ%C+$}(iicK@±II}n͕#~޹pLFI 8|4@Ǟd͘NAUٓӿ Ι@{}e{uJ&& >v 7EQr>@zދF*p~=8LzhSw5Kߔ Ԕ#oNSSn"s^d̘4ӿfWxp˓NݺQKrDPNQS:9޹ \%0$O=m4YQ)+x#)'DuP4MLۙısT|d̘nEO/Log*8 g!pWrj iᛛa8hQ;L6= G"O)4RO6:]MD%=d/u(uWfp{HN5rn 2WT۟W'(*??.36L<*. $wĉc5.Oc%@{NDs?)N'_2{*QRV@2ag(6ܐ>Sz 8 LShBvND}ω۶q]n0LS`;ђ.5B83^Tҿ%tO-D|"4|coz7Ǒ= MNPô=ޣ N:ɲlq#8\D$W#ݥ36H<z?5'I_%F҅Ь)3O8H`KFIz7egD @Zu:CQ}̾N=qsbİSN56)MXobsi۳8žziW7OJwD۟ ˶D6\7bڧn}һU>*`.@PtT |&>)^aГD\exjJOJ)`<@1Sܠk',q1*޿7gY5q*M,ABvZD@)W).< wZ.ޭj4EާHGҮE*׮+ǞO2-[)yCGv$>_)_Do|Ýrtj5ٴ,1b=<{YG>:!bpHXv?面_2'g<[U ئk'Qo%z)N;b/1oOGgT|ϢC4[?xɅ',vG<ܻh{_sNȲ͞;m/^}KGu?׋aBiM\鎤͘t,ȓɧ87]O?:'0t[&X" q$U"|e"w9"ŧ*zv,ksCڿ7;8vʩӜ^hxxoJU$CyLj'X*3oyz8?@.ЃY!>Xj^{{b՚uS4 Ezⰽw#,N?CDzxq4+űUnԧv8J(F.+ > ⿨wF}׽s8ȽU=蔞*H yeNe=P>S޲}!EF/ ;ŠNcxK;Uw jp)U ;e8),4BtƑRsê?|v/: o]0u'+‡{hϢW?r:}>]{năwwdޡVcچ\p}CQ|Feb=0w.P,Y8Qn;Jt:1H-Ŵt ug;㪮4Uwc K@BvlBBl:.%aH@mY{lKѕ3TiyG={O?̴xWX8z]Uu-+l@ b96e[AUP|4rpNefp&@"SÅG+) vsM%rN''d}!E}ȹw H~yrq2o>[#W`_p: h"rZe>8>+j1y#52whq Y6p^oԳaEګWl+l_dɬI3M|4lĹҬgC!%Ly>U-Wj9gOtc"@db,VwXz;jH+&jj}5 ꧿uZkA+ƾynpgLftFRsU2_ mQuB#'#`|Ͳu9?dfy˾b9+'@w]: (ko76pKڱ&CXyk%j\SX@f!PE?fm ң}>s׿@sF <^g"OMSd[鞫njH@ijI0bzg viX4HM*AJ2Hn\8]Adt裝.?oܵE2ɏ].̓mo񑚀@"}<1QD{|uW̙*ڛR0"utG S0/!Z~„ Εɣۦ+\{Tf{ gMTKMw J:RS t(P o,>p\ze+\R-W6!*0~Q),$b SS?} 90'+Ew|{xPӴ j)xl*=Ssu3c4qm8!2\JM/sd$ɣJRAՏ1&Zj;@P^8i#:pLi 74oAuh8e6Tσ!P` j.øoomw(k>М'-]oW]إk:4Fvq1Nl~6 &h&@/# a4R+4c\[!Z>pUpcj9vHt0aӪk wLDqׁѣ񈐸Y&bX<:eўy',up~Ms߹S %X&x4kpjP^F̖ K6)!~hsF 4_=|i}U6ڭSF;-%.e3,/js2T#*!<MP|t"t ~cj h usDT;]~fe.>-ft}ll#-E1|}qlC| JG-zЖ;6@k~!L#(V|jX~wjG)6|?t͚4Bu;ZbI>ݰG b!(t|s߸J|*MzAYL , ~>HKS>F TV$6wPʹ@QA0s*Lκ5L"0PîǕ9􁆧6q1,d|<;h&v gNP,}qlC|ؼhuHCms6kzsw6Θ0L>KCdoZlwtq\|85Kr/88I@&p#(1!d|Ta2 D#RQJB J0Qkv(qe[qY =ܬزͣܮ߁)΄QV܃J:~X}[7c4ǠoM^E66Zf&O{QӍݺ&)e4!&0嘠5B(ly ~' $C̈́$ŗn*3}0a̰BE`S6]w{|[65 ^i-,]IPjC};DЋtiXT3'B!x#a\?J*ZjR:pk,fǟn,Ud}2#jmHCZN÷ +& wѾ[hsjU"]:p`:`" Up6/쓧ڼFcڵ.@Z|ykќ"9SDQ8ѩOz]U}Ĉ&{o0?oWo:x{ݖj]`yZ.9dZXTyu4)p9x.aVQN~o'@(G"5DGA2>K #dc|#,\di@:Fx{6nT0m|y$Qn>qsyTH\$ϼ ~^mAni6Li{o̔3ˇk[҇8~gh\Թ<M?TqN{|0jqf4= `4irX{KߺK YB^X^ 281WGvb]٠#u H/zQO@pGF4} D<~?~#?Z}@: ixkҘ᳾}C6C#wc(cr51POfcB9~e??*b}Wnwԃy'M;u04HŸqS~6"zJ#<*_ͥ'pNb:P}1h`Bd|Rg{PR%~5o:z|ʵ2]=s>pt3>r#:_]M1;F:x@SGϵw#㭁3+[ v}{4ߚu޷7/]hF[2N+QBPWŝH3B6c]^8u4ob 1K`24jqbXZ\& >00100ׁ]MtJh~NwhBd0g>mb@cy]rn|&L tu| @g_t5K31`bĀ&L 01`b@84bbĀ&L 01`b@0`2N¬Y!z(&6L 01`b@1wV˫K6y0`2N0c71`b b 0*`bĀ&:MXs<F3yJ^=#2!11`H~7[mb `4;&ܹ h9PVH:x\6#EW89%啵^Wܯ=(=|Bgja>$gcgMTὙ\M=//bx fM `1tC ɏ{ʊ-{`9[^̛:F8 ت3[˷"\1q4`<01ux]r&ͻȐ|*sqÉ{奏6aoANң¤\^d ?'KdcJ'WF=xGv(C'e 2s1Q2µOS?;%`nmHiw\!9a]3MR|L3P;kb&ל6vr,DtkR`ǟ Z 4̭cmˇk4]ϛ,sFrO_++ʓPF5K)Pٺ.=s||DfV^nX8šѪS'Q^Bh>dlWcϞ4Rn[4[ګzm109.`eϑ2@|L0b[!# /)HkZ@ܶh?zZq }nwdBq`(IF41`b  Dns6&&#d*SH0LƆ71е s&2&S )2K[}_LlN>tB=*w^5OH\>JY - >{fV[#0J%/~NsX9rr }}%+3]C6G/N%ywO!52m0.ͮ|kوHvŬ7PCntFuҞá,h@ULϧJ&㔠o6@41@JIlm׈6\nH˔S&.|BT %w"_( KJhs &|h2]4HP-!\ \hh{tT<ʒeSږ-ӌ sW',P%W̙($v=Okv`\1U{uP58~hfwL$B.mZXC;Wrj+&to )oT)]2MاgjҨ`Wl+~EeFJ]&8}m@0@ZI >NzcߺSzH)MpϹ`b@|aJ"/mtnh*oS~M=yOM9gIjt/MD6k6q)|fM!!/!!$5DɯMELܯPѿQ;kٲOUK4wTc}/xiKC;\}-|}ק{r;_ 8&$ΟfkM " '(,Ā`7:dU dDS80jpȐ!y7j6;^^)KGO n,ULw(AX҂Q6բ)1>Lv HD{uh[c 4uvMSب341`bPѯAwyĀhD&F#gpmX4HTA3$&{o0bz G?TnVW'ONSipP]zxɘ+/"ۥ,9NG q\+>VoڸaȷEgM H\ Sr1Mn &L t o=Um1#φmd~ 4Pc4ỵP_ x^SɃUo.MIb1)!y>'?G:ZPFy}Ā0'_M 01`bĀe_H?G&L`n\({J4[7]h;[dk﻾;lHKMU&vi=Wu/ ݭBl^o7'T0GO$ij]u r rb4tggIlY}8" $CgoN 0<+]Gcu0^}O>Ĭ1{Ьbaʹ *Q鞘?:]9NiO&t SXF 8!stYT9RJ[r}?kFhM[Srxr$\8s#46Fw2KLj{dŶR߀v1z΃Ur ,TֈfWʎYN (=-I1sfK_}`1sLE 521`b VHtej(Ȓld h6AB`v{kvJnv\:},1i4i/2a'̯ڥB)Ϛ$ػ:+S8˥q\^D>2iB0&ө//٢Ffh-{ʶ=er9qINNqA@;B$I4J C} #]ңܛ01HtGl$a$\*cY&э644:ն}rYR4jBjҨ^'%K6ț˶I%W\$5)}c9[&SV6Dms2XJrBҹ[.ZF,۲G_["V3:[sf9eɦSݒ-RImV/iHɓlAcFUݵ窒z/"o&sgKvVZ0͸1a]]%$rf)3tu)9:rc|їoD-@UJ ޘ,QdždakZ<f~\6s4*PSdzDXUΥҬ鐔wLWɬmɹT&ZAC\1{8{t/Z8f9ߩ!iZi|K?xQ:kb쏝^厕:K8Yni^_,w tòi]W(`+* i'4 f  c2DL|0 昊y=%*;)(^WOSd$ډzZto2ҕ8 `Le>RY]Dջ=p9tOZӨ 8Qz^|"Kg3l:6֣AU;\<Ą1~'de'2aO\g=1 Wf~'IdAC``ʫdw\l@SՒ#3'&3V^]& pO3rđ.Τ4@*i)M\rA TilhM ba^`)4&, H.{f|AY|;tJfC/)󠭥Gs#9IH[/f)( 36j9 Aw/-(%+#"q4!iԄa腏6*Oy4[Ś4cl? P*}1u48 .Kk$+$ĩ(IzKLTZo]$) ~"3ﮐ!t:.; $D#cΙ2J}K}L<61Iu kwhaawYQd!P94Fmj5p0'~*= f~D@蓇@KtM;MJK5a{tnmPȄ1@!>${$*y箙$ԑ&1`aQr`. 'SZc@IDAT}Z~SMRI_hv`Gs> v'L=W%*cl MB &GDM&zeIyE;Ik)D(n!/R=A&֋[S|HiH\0=`Mߩmj@^MN(cGk[iNY  mGke)kr~i\< <YLa̽2M#u?:P6,,),K@RwcoF LiSb]2_GlUf4=t2+9x2eLs`C´CU2KW̙MXҿqjy0\ȽHD= AWDQ{IKYE?]:DzY;v 礹G݁6bmg}&s÷׋)Yab$Dmbf= `/a ^K ;Zy@{}fSˌw T[Ji29gxNk-0\U 6=vS$kn6ccvvjE}6fHxƉZUfSWRFLFcc=oyoҽfE:<tGgx WJQy\u4ɟ䭽H&\8$EvqKkp^/l34mPL~Eʘ_[, 2Un&`}ahί:H:h`n./凗dʲ#.5ucĊiJ,@Cyʷm=Sg6vXL3TPK!Q{?E^ʓk#2 AHpNA@ K]2!Z/p _z,߼GPu$=EU"Ҩ=q*[~L !$L`}9 uzuw~2O0ml78JLKr$_uȑt%}2`Zj;d֠ỷ=I `l.="Nh+O EhLΆ]FC%֘9(cB,Ζ!]Sg^Q&3srd)'>d3#U[R_@ d55$O\^`2[1<k7`TAC0j0!GcmD7ipIƺ<>ޜOÎy@q lxÚezf]!&S>8Su+Αq"d|td]S_=X(~C {+iC6R~JYH\+4cvfsʿ)nhrNӀ{N]cNeZJTG3NU-bsj s% L^k(1Evexp^p $vt~μQa@8CIWpmA)heKϹe|V4l&}.}}vyMrL!/hb:.#]Gɘy t7 t> a|hA34m==h .}c&L t; UU1NcnRh ")o\yjy3UtUҨ=q`b' IDRC,&[d$I OVrK4-u"Cx̼Հp PNHh Æe -:4nP}Ob"/&@!E $%#GϡᎩJhhZ F_;F{s9K`'b|@@8:Y(,fpp3J,iќi>$u^+sFʀ{*cBgŠWn[|WCkaW;oG($Ŀ6GV@l 65O̱(I$1P< 3.4wZ􇶩Gj0~NɊ&8q2=^Sh':'VJ^S\8?$~0`nyL71ito;aDzІ( 6ĀJSao.\`#fyMnTZvsy9RMSQ904TV &D0rOJ ":.= p& r5rK9=#Q5TY>>m Xኒ[ɗo˒?lnOj|S m*КW=F9WHgyG}ɫsdڀ~n$_^~>+ዃOi, H*Ƞ^9z"щ#)PtnXs 栫e<8Xw>o7Rpɲ/ D8S&X|ی ~>GvS5 ^吉}-r՘Ò1N[yXo}^# eLi[ x9SCp 7 DmdOj09' yp1Ǜ~8WsZξs~33+&)͇B?C>ŖyɃ]HƋ.#wF?iW`]C]jHO;rOjwKMe|6xe6;@Hx)d?B Z-/ (-ʖ-_"ٛ&6)˲Y#+ۗ|{kt$fpʚ;qܣ|C]r+"nibtwB%nowCdnxTHi 4VJ,&G߻$ ύpo2&T nt&d"mLl,sD4gHCuIFtAv,T檿*& B~uVK0S|u]ljtX lH yA.N;;ˉM2aoC~0}-sf[Uh7kLYh;*çe&[(1"lIjՌqc v,hՍ2e4Sf(H0e*T$ihf4ܥJrϖQe]jr۶=ѧ)Vy-|&U 1`2N W[@ƈj50ͺƼlb d PJ)uhv.'RdXN|yKI\iH \iq H?4*LUrvڤ#Va?U疏:T >Z.RF 7k`&4R7o1HhUp a )KɦK C,R2@*rH.r6ImbM'l^J0:{&c %۝|Ym7@ˊɕ?a%VX%RR.1B&'(ʨ TyĀ`d?m?XTc汉b`h^Hff䬌MS*fgErdbS#^FY\Τ #. {!rG*'TP>_LK>d-C0^6]@uM[Ep:>^V~,w2|U@{u0Ga2P-R]\&J-V$459ؘ"KRQәdGD#om171U p:!//"u`R ;:W18^~"1Rz/v+jzzub| yV.h pj#9]eLs2rpeC- &L ƀ21'!aymX-/gԤ p)+O6F 0|uttY4H1O)721`b@L)+̢L `ҠqR#[r# Lgȑ&y:kDY ػ"ʂ(iU0fzf9jDWzQWN92Y_ ?"0j O&{vȺn}.LWD 5)ENq -Ta2Nb|'c@3Mk`n-/s4k4$+h'ZK*Esyb@&ݎlr $a@ŀ8خچMx{;W][v2N$^F9eN$h6-z0M)r0kX#Q ܖ$B}-[NeBcV8䑅r4y$d7pƿI~0ܾ+>'S>\0bcylɃi-*Lx}.p#.IƓ~-9P/&:ps_|abJguvU{6֣27oy2eP=NI:ƨsڐwsp$@ "w3l51y0pMݻD5?E#*V%||zG(Svk) )&!ҚZ'45r_%¥'Ii5X6[A?ZfUKF]ߪk8KTKRYucnnh}Ҫ=P)ub׽P+G 9M:gy&$ rSx] ˿*NGw .JQK(gК';1+K+Ev^ɷ\]5 ܾCFp!ު(fX`a$%L^9^Ɖڦx8Ёqg. FΣO}HW#r˙eyAӾ)`ͻSMHBN(HYnkw<C _.9db,dvoLfi'ɯePnLo" %"[f!:R\5:UFqba|ja?s }䡢Dr>)MQNW -'X/Ah2RV{'=j;/Vx@g7zDq(ΙUy?\ݕ1Dޮ3敖# Qx038E~1cZ9I 9Vcl\ ]N "^(w-" ;dH(,cm55 /Ӧ}'T *X/F[uELRZpQ3C-7%W dߗz<6 f{кHQ?L"yiMG0E8CˑsV{.?$\_~LkCO6of>x~rYpNqt‘  &"O'OT 3'egd:eKٺhOGWh8MOzL[3~iS I?KA`S'+YMfjd6B3w@P"%+r;-ᰭf;;[誏#f؞E*"0(rasa?+K]o`pqnc#dvu9bX4`eS*r^%M>`= R=v`*وl1~p+F͝b֟_ 3lUE!ars gh iWsrvMcnYȯqev_ hv' auSH/k4m7pK=4I<3<3<zTTY9J)ٔdLԭqT!мŗdМ=U&i{gC2-jXy W colD5BzZ00<#(Dy`Zl,1C ye^]cyq0}(0q !4>JAE-h18^$D{Brh?v$]Μ1 u;ܹՏLyRՔِxQ\5XP&p[IT׋\&&TR~_& Ҭe QRƨ ;|4I@zhb9=}|m#GGO fVyif Vl+O8AN+I|!1s$}ŴX LoЂ4lEPL[zPKs0FFٚt x݋m1aCSLxtgGwONIr.B9DeRoN:9}}Tk?5 Mz: v߱;+·Я 6LZpI8Ͱҥw0P P'5! 19`JuWʔAR+Org.,&$tL]P4JyOڛZҬ :Di,262f }c~ `5K P)gM^$=m]yr%eHEM}K~фc&''O&59lNGι03U=ƺv4o{WD!x);i _8UQNYkz{] ħE/Ln8#: ِ*g4ya|Rr% [e # 2ܞ+k[ޮaօfdd6ݷ~ ٓ4D)AZRO]CSi3ToʕQ7|g5-WfdȿUR^25 oMM6%MARHb %]/ d^%4N,26Gapٝ2Jyو4814 *q1&48ϸ';Lْۚ{mz\g;"Ƽpl0tmy[RRa~'MMB@1pk-U[O/j9[C i}o8t-}[c1OMX8P?Fɍ7 >S;7XB0N%c:Hk=a=I&]sjZB#O}UKQwHjZnp]sZԠniOzIvM$wj6_SM}+~sA!+S

2M>^_MjI}+ Dxʰz\-. t1m4e-[2%=M3N ? }(cj?&I\|Lk^A,m zqkA{FisᎩ@jkS=vA 朤A>R'~fT$q@t45}@F;BܬML)eއƟks |ZD?pK{yJ>*ڬ?N :9~eg _[N99GK^MiwG-u/V>]&}R4Lhdu֏`<8;z.>0$UciΔc,{G&<^|xLxWX2Mngw|{ $]5xMDe4NYo!M3ˡƐ)'d3 O䝻NO"a;!G5Z?OCei߳Cd˱V F9Y1`lCO:fԼэ%0ͫlN臈}%?/Wr$S&tdy,q"S`i~L?$KJAj+}we\Gf_;#S4%3Dc;AL'ڞ:FIjP *GWyK>o!;x˴q?QKoC^ܼPgļDƘ?K<'=ڻFyC;xAfCZh`\HSfL2N0Y?%j&Z EΔ87~vjXP)_\c]Ni#E3 }o~'1 l$܁RZy|M${u^k\% {}2>f7& d>Į !dLFXfEk\X `hr,!,JA~I^nA|lA0MwLw7M]i?˿PЦՈ8叭l^Z#^*1զb揄ƀҐ`Ypf8@*-.|Td_q]!V!κ3'Uyv9Cd[iGE y!"o3>B]81Y-0 ghqBu]b?p[I<`z50Ovf%^" ޤ3gNf#4@myO#hxN' Q*n'd#p '4=.9Q_'Sai#\ $ vg>RGImx`>'ω-)K>Cײܵ窔^epB#.9eɒ\0KLSn.MojgXI1lIFgN~LQMI`!q >} 6ݝ$~*r6U@xn>yԲ_?xuOqɼhyI}dX({njpRv˱1>X1a̦ݝf++$!ER %au+zվiʀqI Γ$#yT Rz-nK$Jϐiƞ3@|0 gves߃0Y콎!9䭽, ~_!'˗.Ȕó7'H`[}_=_#hл <1 LA!dyD:Ti 7B59gk;bƉh/AgJ诈G)?!3ke_.ym/ƲjehE kF،(T]qp+s;lH_ۡu#y9 CN f{J?'t@l6<1#B} ÅPўQtG~Ѻy,Ā\HTCⒼFZX؊>9_j,}+0!ld&hrI৔Լ2CZsޭ4 ,PN&Fj7QؐڬmS&y0KJ:8RPw)Z6VH0)=Hv)cȽcS$Q#5ESobhw>jV&?F$%-vBA {M s`0)-; %&^7@%)ǎ}hGMSa[dbf͖ѦQ{>|9u$+,=2쐢@&Iz~ Jy>s <w;Bt=kVь?M wG^{ibqFc Of_A;NJF[B<2Nǎ'dQcKTB2 B'zt b #U!bd8{# yulH $}3ezŧ)g|F u 6!g0!A&۳[0w=Nsџ9`b{AwV߳=^FGk`,JB5mh ɉ߃yҝ24)\)Q&n2MmmlTZ-@AGfڡafyNd?prNCSz9U5H4##_ueHfJkX`f@QiW!s3Jt,.F%=uLvz>9v&߭Ā_ 0AG&L̸4yjM <xl*;9TZd֠\ouL5gcKT'wH.xzo6u" م&2!e in韕K+‘Ex{o6bSɡ`0#'W<͇64JUM@o+f7 4{JBtBNS">{cڤuGroڼ b4ӌH4s]]zE; t6&}=K;œ/r36{;0yQ'}OfIir[^I7bx;4d$Ӛ)FB3651Y^VM57A%9_H=24#rC[AYcs`5 |{^3- 82d 5 uSz8޲j)]}71ɔ$ '-GrqMTgvx-r+Αq"{|mcK ߄b$Z]0`s"mΣH~^ZD!͗ "x ZjoAT.J&̓RhuxRفJf7U`E"`> ,0}DfW= "(7Pdn4u9xgMȐh >2c_v]1M6K'4Q^?*ː~-鲹7J-6) WI]G=t5.7xX(Nz)L畖'i{sIOSćON9l{5~m@ Y^)Z(x&H>e뽵KnlWm=c p>htLW_?0(x@'ϚL;Fg[<1N#Ej1^k>{}'!pL K8K9f(ׄ^R{ցIfyѿVu5Rp 5r֭y%(+7M#}YdϷ{s\cW;QU}C#z5J  7%1ẅ́|A3&,K{YLdgw 1-LVfʔ']s;0F6-h_%2LNJ鶆joaG_f9*0'Iwq\=V>w\jt՘YΛ*830Fd =2umc>jyS]~z^rn* $v2Zc;F7[xbԒӤ\-4{ϟ}gK,nmAA Rm+&Ӥ?m̈́w$B$J0<[Mz?vllYVk=C3,7MhȟuP\?z'_g;Q}-āyԪUՌ'x{o9rI+-V&ezheZ:؃8hMh\}zChc䠖/LːoѸL1Vo.ư0D ,sQ0"KU~B8~Nxٽ;U=Wڧ' T0t_w s/(+UZu[wڝpek<1N5$9s~޽u|"<áT[O&9g׫W|UI%uy=0J<8[t}2[F_b#5`r:{y*^]Z~rd.cq-MWibI©K<2NdslϿ~w#ysUďRnJf&Jʩa"Gq$IcR~{H>4ϣi.OQ%:%qj뚹W?Mhb隷"rמּ绺 Œݢj0s^GK|Yqoyucm<|z+eE^0SMhГǔq>9{Kx}`f%uywL1PG~X&A]L{-A!??7yyM5W=־@IDATj=wΝ>Al/I3&ƔFG^xc8k7(fd| oθdᙦ~>̟R>73 C3A]M˗Za.ܪXc⓿_u.0W*eOpmH8u|~v߫?VLqj[ôm#g$geSx%c)>W.{qǺU[!Onf ?;)#53B%|Z$?T#6^ tzkD9ya2(\.H=!gwIf8!w L>o>9t% ;%6|@Ba724Mxb~W8]X?VvL1u)9t\]z /گQDj?'SI:$#`,}wq)>v̜^陙y)):`r9vkc辽8h7jH0:7>'Jx1CUw}̠{ۯ.]S4mqege&b7- TU#qUrVslIc}]{ߵ 0%څ1k&2&6%ݸDO" 5zKEsZniҾačSDMx6z ~ט0!a -z]zϢ1AC_Eٮʥyfe!87kd$qz ZF$tgEB L;T?{Gq^ݩV,rew1nzI jIH!!t! .^dYnȶ.~3Nt:ݝo{[gݝg޶CCvSĩ{ EG\.qII3qBi1LJ.IB03i &rF`9&l vMV}B-HbMz7F$fNZ]\9n_9sZnَ.ڒ+vhZ_U5Qs[~P'aԮ1h#yjV>q 9H"N?NdGshN@ < $Mɳ#;=K„(꾇]sSEj+ћc9U'O|dNmwHEZ&bBC$oOґgDqJ`Ӑ~j.COwdԵ^iq:z*(& ^1J<2Kr 4) JIj7r6rBC@ՙ{OʟD|8Ek2{iWs)t,α~(J|G@B+ e;\ԑ!8I"""x2PzNWPl3_h]PM= */TPf#JMC'Έ:I BAiB8IGt$M_r9BEvg=ܚt1zwp0Y%pϧ\C9\]ÿ'J']ϙYBVæy:A6`q#eVҁRʬJ9Qc(QJvH(7ʏz(Q(F`0I"N!ĩa=G!"㷺ar/x r9~ ,Vph4&R+׋lȶQ{fòϬAAd2d6X3UX9xk ʪD:TMHS8 N.(74N2ճXm.Ӆ̩ zv\"*)5QJTWP^ڛدtלsG臸 Ynג_ ޅGk6B@!u/PV}&N. a0ajK5UWWSF.'qQeg!>Oi`D^ɄrT1ZLýT+ dS4r`F0-hʠS U@x#JN?u\Q/t:E8w=_ܔ@0J TV)T2щd:_M爎bd%8/sDNTш5TO ) yr2ӢoZ"EEGr(7"m-ȧw筣 *bSʂL Ieu>}l -ۘKw^qB|2uk"{X"N7^'0ԢB3#INTDrҍuNNF݂>P8Kqv9_1>[8l _O-ѓEx8fl{. vvOJH!O-ĺqNG*TC!N3]6'`4˕>C\d_.H$d\;dGK /,fV;[poo?A*h9ك&[pG9vӰ7.VD-(^wI8z"(Pg'$%WI_5O #27 $&pkBm*b'\Z' qY2-;?5M MsiEs)H)pjpyܥ[iʉEXqjt0y b'(%!Sޖ/-v?&C뤈"E{T @`ޤqf_qHĺ64W44;ᰈCkĭ<{`CgJkIOT(CyhBб52&>[_gdaK&h2#Ab< đ"K |sP'ا y4<냅7)5D HnP>l'aqшP6A6CNhq r-8ke/q*K^qiJ2)e˚R0)*Y]"TDNCN<烋s˕`īw1vN9,|#/洤{~ζ J wmT>x@aWs:yf_~oa~鮱Y}4C ]A` KR$ s9Z-8a.'\SZ} OB#zA8mkU+0g|Np<4AL3e򴆖mM\0T[W%@Tq%t K^"NPs@;#0Ŝv"0eK_9gC|rLdï, yZҺi\v/sAP z1{ /f>h:7 zi!Lugjbwd~{B Ac/4$A;-JNJ?[kZ/kxDF#1zL$b۪Jˊ~RIĚ|'p j&h`^U^Q!sqW "a!.oN -2 է{ N(͡8E@; 0蕝ܲ뿬hȖ,muݢȘu[4 )kOm.84:ݏf-Ex0w=w@[wſ²B/`|fd:,4Iu$Uhs`g؟O WNjFpH'@`ne)߄N)`"O]=Si:kw9x\%8usJf@trK3N) r*&Ks,78 ݮpzs:sgATChv-N 8y]Lz蚁8AsD3kҿQlHsCmۃpU;zD\[6We"oa<6WB@!w`N?ы 6&%9bX 0p$Od sLԚ&._mxw-TbЭhm>Mx3]9 'cMda4^YWIf&U{(3IGҒ6AjG+(t- ^earZƭafEi:fFe(/58G̭~džl< 9 =7i55"Wk*cZݏdYDC;7 [*B ̡9)y8e/'`$%v*ExFf_Fҿ[];(Y;ĩ7xOk>b2 :Q'w"}@P%@ZR|S9IeL|;(9ﻪ`WnOqF{۾G6Asuh29%iNzy8a%o3;lZ .|iۨHnFD!@'@0I-9w Y`.'`#'.djP#Y=4Y80S˪i1;U9 kM<䞱Ӎ#6yG)ۗ9\Be=${ |$ тslߋq | x)2q,xbϣY_8ȰlY={eK;0hVκ#\¬:B L@K$H'm!9óbT^e%OSAzC) ^`%lR5ܽ9Iڎ@GJZtH PG@f*?= }9M 'uc+iُ ÿ1)Q(B4c) R6ȇ6@Ceaٴda):`E\HSӘvt&NRtn?'qO+Z@y;Ø4Y Zzhr;d+ P(^!x/Ό^-0zaߩ|Jl?U\I"E=}6;"kݙMvy4χpzt ]gPQ-)po/U>_U4&oe IʒVZSI+“7o_͹>C`QJX:U~NO>iɭ;ILIv]rhCդzt`j4:ɪ#H(4rPL JԳzs~D@Nu:o& ?"N-ӧDp SkD]֠}{tRGIU!YMrmdƘ!̦rnirFjL6Rj,6UVirC콿~xMwEZK/w0_F.3vXz '} fU"VhߡY=_ÒD5{Q!P(.)琨4NU:Z$oΊOJ'56 헦4S۞NA8+-Ք8?Sv6E=w| <9:h2qrz~4>mPkU#L;>y{ >Xۋ`TI!P NR)<:c"NRG/4yx߳+#L@i>4yGOGmH9~{o?k|`M\ISA;5NA"'~ d B;B@!hgbLSil6(|;a pd5?gܘޚ'~pIS.sk4ڗ~Q 5蟏eu +2ⵤ=g[a04VPQT(4Sg5.)Hӵw{ydt3܁wsŚڡ$0k`Z ?xK<4p?9,+g[a0~~_E'_PQ!P(zz]@p=5{%{)  :(Ӓ\&S[]v{2τ'.!|8=ij9|@[=)uذ@[ޑEjP(Rt衇 RO _~/?SاI4Ma-!(GrVqp: X,*wO]P!8Vz-t:h;\Օ B@ &mV\^I/Gݟ \Khx׵Kw)vǽ ^M&E#ʃ9XqjS; +e䩣ڋTUD!<SH<":K]t#X׀!l+gpQTAA|%HfϭqQ:2h5昕T8i_A.;tZQnY6NڎsꌑEp|\ao2DbF( p/b"MNj3DWz6@=C[E5W=[J?@aBW@:D"] 㔃~n2R6ŻcIzGʭ.zHetї{>͍N "꾫8g]|::] I~l 9uAv@}p#kqQuowYh5\@-qq/=UST%h.qѢN&M'{Z@'$LJIE &NV*V DmgiWyz>kSiw#.l@Uvb_4 K\usdXq7?VβVhVEd{Y VT hk2qh\tf Lфt=UqoJ+WFegmrvK5kS@OsN׾0Z9viz͹TX\&` ၸQ$8,UԳ[Z~4O=>D~F}8+|>t͔QŲ-ݺlL]2RMQjb]tMqAem=eB5TZn3rqi%}z~6N&kb;t=_Rt!t\)o7͠Qْʹ)AL];b"k屃{yG[AN~mnu 'R#Wo~{4]W81fܧ;yŅwc/Wk&K}oE ʖ.z ۋBYPr|4# b˧ַ=bufe-`ߠ]gHB^CɁ#tR{7X(ƚyퟥAt?8'wTE qG0 iӹ Ze_Гs.^ݓ*'o(Dѭ W_tL[]t^?P;q=<[y}>Vq|]Ms82 ."I4R Ň ֓Fw^y!E+.#n3lsj/xX>y<-9p\% -_̹D1v{bJā 4ww€Fc2jP yqy]M[wn3ChIq T}gD;,9h%L&)M32'SELC4V\ EZ2j\H Q\k8 ֠24&iDϽ]Rn̾11>Ӭ'kuרdsOoj}d.W#yP>qy?F+³:}Vz@zɡg 'd@F7y&xW h.DД1h|uT^ig7I\kXA!%wA O{` m׻nhz5˚%h{AO#A_;sN?t6kj7.­q۰ _1Yw%% 3_Vmq`*bA܁d6&4Y1qAOrHOz^}`-TM\aZSIٯWnm{R%Ǒ>cHsg#m{icӬ C+^|wC. @vthpF>vWe`uI䑑z z}"]$:xu&A+E4>ɜXQkʍ:( ^`$?oqBlT؏% 2Xv#'ԃzׁ1`d9C=uN S oiX U%d ?w eCUf 'XÈBC6"Jr~Da&K3{#@}g]Off PhEqy)9WOc3ũޛ&=t%@.\'֣aY PCbXfc` #2{s$ڔs9L:r:6'ĵ!*oz MJ@@ %\5L~ UL@ͻq7} +vmXj[+Qʳl{ ܒ{`Xk~Ț3O~5O&ϗn3EM16ʮDffv +4r_: ?όxEKw}YFJ[rҀDߝQf]/V'iҒ\M:^4N n0k.~rhn:FH컵٢'WU9U\tdm缌G{:̄2MZxdpG[\ӇlwaK˫F#@@ c$ݹn95Gk "쌻| -ߔK\2Ah%fHd:>?4Y:{WZrid M IIU<+]1Z&Y1ဦ,&F: dl>Z,#d~Eq#ft6aMX.N!Dbǿ5)5WRۓ܀÷_Bz2L gNAav?&4o)]c43F^rPyi5W(z&dd@tL,)/4bI _])zMHSh)}xLPVeG7lwiQlP SH>%lF~O5 !oZ4adyq ㇈NU|LЫ5qqoKFrpef=G䍍Ut)%E)sI\CE|kk`wfz؈7m}A&rztfArj֪4&{D߬݌@&»ْM"hJG-3kL\Wj ;\"䍀x.!>C.)f[g[^~wТ){v%/DBzD]c:d 3>C?]DxAPbSrõ#r`KHa~oT:T$i7:x ,%48*/tY#h(+tj$1VYi rQ)"^!s@&OS@DigMS$EEyPHA MB NX4 g .΀8xף w~0O6+Z!q3=$'W-U&B0"H9$-ݴ[#Y ~;HV[ϑ%A7rAs2Ga\yJL>x[ssr0jzk@cs A)0\k\>&{(/$cЛ"hne?- K5 A#X#%e$m9 a)"FA'\}\!ЁHո9`4 NғqF~:4qlxt}?v3_$*B6ha52Ҥuǣta2/&* İ6wyZk"BNF (.^o$ SDSTtX6q!"u&'ׂ62#"F_L{CڿzD [c8LW#QpuϹ+K'W[Cx1tC9~qXCwl|L_ SFqXyŀ²BٟO5Q&3< t/)GKgӻ"{ 9Y]/-2ujV9W4z9z^M>,KtXduG9[ƙ9N>)n\VWyA0o7A6f۴ b3Ic.P.D0]}H 6^c,l9XkoʧQFѰL62c̐D>wfNNƧ'-6؄ZSv59_BƧ9]᤻eIo LA*}k}I<`m++>4j=.![qRA6m L@⸿ -^=(|k)Է74{2ty'):(!8|"d'vdE; 9,1q_,wqIJLu N : 5`B`#|y;M$~q" 7_{ Lp}|x_ e}+hd[r%k&S&1>'oP @G.@=h~rtA8ew yu5WA[:]Tp/M߬:{ۀ/(E%ksS]M>'s;/<u;J MwÑR=kp8#F%Z1*"B]s LC^Z &IA'N S㤴M??21 HUXg};'ij˨1Fϊ>;*z`;,nSInojg罩ZHy.@)k!?noN#Mro1zŇGf7贒:IB @d{/}/VZ&/oȥsyz=ߗ_ȇ.۫eHӴ;7Yһ''M͂4U9jes=8'qj^iY@ၬ!?Τ ɝ֭i wM/6) QT) wMQ!DugRJ˜"DߖG7PYWV(& {%{i) BΕnNsy.}Ђ+kuSK xtɌ:Da&8&Wm15 C, >PuyOℜ΢4N|wU: xrY4y)Y8?sm4Mq蓮c9zV@.VUKF.^ZIO2v[.{YF |&8u7ɳ Mn:5"&{0q@k0A- ǻyq]\Cd2@IDAT H$V D(T{ՂBd&Y˺Ns=$DžV*Tĭ5@]S8o7d z-M50T!RVt4&.HT\.?/ݥA 8r^ CLZKdͥ FDc HN:ÝуzѭN£' ۄtє1|V>[;*c3ƊQ T99nE{oG5AYޖ g'3ABZ>Ǥ0?n(p=cx/9ByGNsD1B@!"M&C1-ltq~kFZ,i`ZYߣ[ w4m}XbgC_~tÛT4ԅ8+y߂5;ȼ$Zv]8ėsT5; SE N;?* *ĤJWo%diI wgAgG2}ޛVoGmՙDѡ!O*k;tnBNx&?7;-se,\O>z[:>Ǭ $Lֿ]~{>r] UKPuZNu#ЃHaAXrw G y$o ţ3Kuꓖ,W ةBz¯wn ):?fSF릍],^ͤ ?%d)~5+x͗sYMS\#zŹ?0๯Z^@^E&@oDY8+Dc-DbM-SSsyʚ]6ڍjA!h$9 ߚjNo4׽i[Xo03}aNX gU4q2o8G3e 5shA[p1ڥфhJ'خuE z3`(U lF$G#ٜN+yUFrnCJ æ݇A"`Rv_*hR6GB1hϖ9>[s:;8H2_ /EC봟KZyTm]pA&I} (oT: #y0ѱTۄ9,)DR0hANt~MF6W7M%$Ά(ܳ[צN%~NSU'TO *oTHDfʨzٮ/3H^|^]PZD|^zoc?+O1k>dhGNhp!T$Bd?DuPc}#Qf| Ħ U~W} MA桅&GҏFj ꨍ~U9Uy-ZP( ١<$yr7zK/"cӻmw? {3?t/]U)*R  UpQYEDŽ!mטaүG = bdKP A0IOϑ?٤.o8„QN?sﵢaP;g>9TE2\1i8]1r|Tҕ잘scɭj ǫA౉fEF(Sv2U;s:R%U(8˻|abw&LM_ӤҚ6mvwA{HS _P$S[;M̓&4h!µ`b}+;F4u7U}r ӱfzjRd3±RY)m<*Bs!.BnE\w1{'JꬡÍ.&soi~u}}/ʯPt=\0I?̾)LD=R}.E9K@Fi/x^p6ˢ2̫THt$pqdp6 vcS/@+'l ri)vCTX U( @#xEe6Iws6jYSz5j-2I|""~L["Ήnt#)v])pOZbp? H#YzRĩ<Ϫ- pY\Is/U0xؕ#@eq(ݞ9OfҽhYQ-:I>${B- ܩԭ!(|_sue[{͗t1h"4N]hʤ9hµ;9a @Hq1Mqoɑg (I҄1Q&zF[dƘϪZMګ o|Wfm]bzgsݓm*"NmO`Dƣ!6M{xTR5&7!G:&,]j6q"ω&IrëDkcEo#:ۼŴlek;Ds"2&0wPn_װ-dcW BVG&ZO֞5tw'b nIGgL3@ ҏCNR0h 8}O)w_eLyʛgH.{a}V}P>M1$ϥU՗p?sHiv,}F>nH߾@HYpG:' aqr)#;_}Ŗ=NE(Wyrw%R 2 yn״OrAF]mRO_,B6r mACeWWv#vҟgDŽ.';op8@ Ck37oМ8_Zx 㨓ZEvJUVrVZ9n Xes}P)CIܕ4N2臽4rN7iA~n?Wsi!y?%u>1~yp bU@N 3}մ&}+i0@-IZ-\eۻ#^Rn^}!ߴ "]Lq4*d]?m4 ۆ8!ԯ.2S-=li A#cFA{jKX!.L\Kַ߲n蔥t@-ә{?I(;g=}GNp5ME^=:Itit֐F#ɩ (ږɔYM)3SۘV!FbߧVrwt*=(˳)ܡn-ܡu z RIn=k+.g6eyHSYgS[  i.8It \sxQ9Ե~݇ yd-%TXx=8ex_}?ZKЖG45143CslnQ^)aX P RSLUfAV7ԡ7p#Syx b#"x/{μ?S!hZ" >Mݸ_7w\[յՔMk.\.D~_}?Zª|?9qHpEnbm#'9KVnɥ^^> T6C'~n *z탅t끘)i?0dۅ`όUF3hRX qUdrUi h2GsZU^݅ipÕ}pkN~::M aPBG&ԾE͎R|啶V}8UК)/pETT=0U8 3910N/qX}0Ir&Џ"68Hjb,8s^y&=P- 9d„i/\ yUDٸMd:uGZNJt-2̹/\[ڼv nMdr#TxG})_k c/*Ҫ|Orw MtKęG&3h'$#5 ->g-[m.5PfϧⴧΖұBfҭ^IbWl9mqD;vZD [C^=eMWsdt96qx?kVnQ{Xj mב~yZO[Cq=x7_B7=}ߢ%&:SK! ߾NX~TPNa"\qxAt4$il{Sq45ֻN;ۄYRH5vKn돒#XȑQnwRPDֱMnss?;xagԵ[oi/ESJwHkY7 $^ 4\oHq8ywzX+Api7/yǿR/iMسrHu0-odhjH#G܋h}tU-k7Y ʤٷ si4kBy9#7 FsgNx kw:(z/p`"^yo` 7{Ű&js_wT Ƌ|ġ>Ȟ-Sd_Sqs2o~=蛕hϡS4*36:q{\)1TbFѤ ~Znk1.gw_4l=-h_q["u-mLڞ>;ڟEj&oX/\?" !23,zV9`˭~x8[ Pĩ'etf/R #VspwzwfMsZhJdvrPBcԯM1o+D7G'2D}e]%TMaLN>P&~hLw=oeMG,[ Mer8ahG,:SB[srGB&JQ6w1Ҏ9E G }h۹YcǶ[&@Oٔ(<>s2k NFWo RSJx@~rYSz :F׬=)]˩8>յ"Xb6f˷[p:vp~ݓrk> Ny`*8W*屓a#gM9䉚+)HWtkd7Z%v< ˼|CpCVl#tLb/Y<8%; F_J?Ҽ/ՠ=M_Y+tvG_.P.bSoi?ߜhFS+xGߚ*-rCꢈSAU4NahHCE\ }\ni:b525@t4:U'L^;(Y;ĩ7xĭk> $}eĊTԭK{92'&p(Zmxp6@h<ׇG5ȉ5m}K׏L7 HG+*-ϧc7ԮkioCKc{[[gmes՞)]Y:U†GZt/#q!khzꀐGd1w?u)<ahYyֿg^.!-ާ8/VtN:1zs6;aK"^$D*_,e).=׋ pdqYS/pe})Jz 7 9ELW[^:$5kyvӄ$ A# 9zDz6m̛/B·M/rDOBT=h Q0Y^ RCQ44i ̜1M4ut$sLqT&X_2^^zӤo"IB@r2-rRؗv 5/8^'cA?[i2(COϱwj-F$ed#;}<1m_l4K=I+E@^n04 K?]$ﵚxPIHyp{p.lH`%Z1'wWW-syjhN8atx#a8<:NOTRp\'ݾ8y\YW_6]yFm _An?> O6vҕtG%ʱ[ :}>N~#m؄ 1i@ӀM4 ASqFQm+{<֍d;ɦr_^UG4ʍB)m 0BEYdBO=}/LTcM zG 2ݔ㮡W5⎏.V(1%ie)co2JFyS L\ᄱF*JyqY4b[fpBavtvY2'$V#M4 hHBA^i*_WI]C9BZԫiv|~H(Y2qKHyYĞO$Iy|4!B~Cq #&=[8IehKM4 $ U~9||OϼϞDT0W/I9?Q08lLir:X~NtƒM}]J7QpWi`i`L^}Pt.F yP,@S?mc.Gj|y*)L/i( `K w\aelɪ_^d飫'(M[zShNZi@Ӏ$@3啭<}o9s*?unz!W'_N?!!r|x%Azi ;dMdd|zU4]3ik404\JV GpByN`iR[{= gZv51D%sҥfk2Θ[ȧ ϴ*("yHRMT-J6U !D]ӯqn .dx$oٟKh|46HY>'ƱՀj!6{Z@S^1:-a\ayjtDZjmqWu/Ms e Ji8TfN*$=Uo]EYD*!^Jsml SQ.-R|4xwBxSxQ1 0F pBDd4 F{nwv?]Х3kp- ҁh94~LHx;Ԣ(i) eSb>,!ҢYiwaށ|HJ) [5";JLF ]/{Q=/^ } m9pV-pKFpD#~y@@y㭠)BX5חgP[~A]n;_nZeC5!oc7[nk9417fCSG?}i>{2. iB#Brh z䅍m=Oy"a#m?nvKK@/ 8xdVڛV-\8m<;o3+N] Gq@#ԭ6⤁^)Yt%t:~]:v5=2tՂ̉Q)V$J7[fN(igq-+Ɋ8m=gURFfAҮt:͞4NgOp)?W6\i o"4;s6cWFk!!U g2JQ=y9]@}sC.d*+3e+2gXZs 'so35Mtż)[;}PpLnF8!V`m:[G9>G} qTc*,d~OIj^I'XH:[Ό7Nk ٢'5Hejko,Һ.*>ƥ-tT3YYL;f}NQK8BGfR(@xz #yQ4Rh>|8xLt0g\]Hd(іF;ϽKM+p)@p~w<#` Xu,z %~pGc: 4]%B͒M+5'Ξx#w?J}Fn]=-I r Ɂjm 0^弛߸m?xx^=zH%}J:Xq CbgEU ud9H,eɜ)|$}M46/k+<઺blnO*#WiUrG!J .aݼr'_^LYvZ{\ۛ;W(`~c^(υX2FIZJ*Er[iR3 (;y&0&_DTsԻus|H~\N8@6u/°~t:t%|2o/\K^ /\~sφ)WϏXD'9[clGAߒp%㰈(rٖ3^tZhj^+]fFQvI$ t3BB'*i2I~E"/X6b&0RӝaTq;#{K*ON`~Qg,540|_vz r2mȏ{oV*`wrx~u}&^m?dRxk)qlyJ ˩::k"U3vd0D T~o~TV)srx,C׼O]{eum(mjrSMCK4}x{!q>$}9]@ 6's/븎n`׺wDi;{)-o\~6?zR̵U s*o_Ta>D~$INR*\*+:;]ʰ&>vy$Bƫcuٶ[SI?' yX'C{~ XH/eXRi-|d( {ߗ,)fK/F'xM)Go3(\-IJhul#a.]˖FfǛ:AQ&'Ȃv/OziN}az;6:QS,eu.NF/1΁}]ȞKktHD7>]hD2ݪH.<ݨ]r9#tif8+V\B5< v?Mi&:zE #dr~3rLT jy⻤<Jr~FDmC'4 v vB@q" ^]Tf+ =w޿u{驷of+@ 2 d!ĺdϕfW`ЁyƳw^/p~#˅5͝Ѵ 5vltva:X| P׊ s|@D$pxb֋ .eNy3٨O6#<4p*A"2qy CR0@:ւMKB2׋w/:P N_ N%ٶ "Q).`I&lpPjEf+We=soba*=l{%zOf*>QB}opЇ_[uvݓbM'cU=y,%4N5<ϩG A,7U{TvS;A,}imLktIv٫fJ^ &1Ht)ۦ[L*>ftc]"Q}K$Y˿&W)1&7/`Єc{f8!~Uеbgп3"4Txp(9%!l}CnxuK>p L;eD ;#`eӀS$9xh)#CH@IDAT O"Aj4:!2#݊8BЂb"a~9i\>,ajk& $ۆYEq뉔r**6G_{JD'!>w݋֮QWp\ #|98K۾Pk'Cwz;VGAw6H#8z2|IO4λ:T]8 E# o{$edtq{:EoX h:"gd+碌M3M཈9S/n.~QcӍTX(?O*L<t"S} eNkEh@o| ھ`^ZX~ubNTP,$Tn_ xR˞y6I{>lGLh:"Xg9)\#eqR@̖ 3ikٝĮdOk)<">͢c ,GE ͧ?G2ˠzPYlu^ZdMC/lj;OfՄS٨/F 4at3"}9uJqP9h=ƬL1אsuZORN<8q-% ,MMs TWmڏ4ppp!^u ±*fCP1Yi(ZVdC^vy(D{-cSu4DƥЬB+5J RT€)ŚBV9)osy X1yœTh,|_ kG}8clEM Udj"OCȀ9OZ'yp/1usogKyy"Ƈh @v::hY ~6wG'mG]˃OG=)= smto|0X~˶Qk?Ҟr|8Uprx`hSJO#8 ҄a>n*ɏ)1ޞv?^F9vJO YtÜ&N dt⎤<߃9\d,LÖ y<5ďy+VJe'U/Df8YLMq LRd;ɹ "v}<:F@ɺGaAB2 ,YuX H7yPc$o:Dbo7841mnaW3Xg}K W]liHOs`/tх/w8;P`#@ewrT6,,;L'aSı[h|& sml +bw d6dA.*mCk21^Ic /c7ד9#mܟLL1hҍ~ e%fyw [Q 8{GgG=s/fRE[HX8h]6H堳m AD/㰋Iv:ȅc'Z/8[:WX_rĊ?5s=a>,9nMsҔ$<~v!iwK=pUa@hF!A>&haYBg6##|_ dw8vc:ė eL|閍GT=|ˡv*ޡGLJS֓y@6i,KP3` 0,y>rGiˁcl]`yt)@T  ׷4دl6^2K|[;IkOoOgyStW>X!vȡk.-=ܬ)iO9!VO1ť!X۾}'# $DŽ퇙< <^f˰D`];s:8GQ *Ńzg#fe&:s*9o':F+Q9݇&IDkfMO_Y"/Ӗq@`ԉLJ2a ʮ|Fv6s4>P&pσ)3#CL!@&He4HNO I='r5 :]LS&g+}Y2ȯ HAʳbjc.594ϐ6=-iSQqCCyaOiþlNhzn%NvK7_WͧV%?_pmrgJpThqY_?ӎ+*bO4UF]}yhx/64$?]aEF¤ӂT=5;{ PmqI}beuCԀl ,2+ #ab4i1sS(B9w( ""u*&y4 *I/QXa)NY;zfM,P35@-V&=wuu 4QEꢸ87HfkcEd4NU圦;Qva?#:7@'٪MJ'Wmx]řOMs88FyvM~ϡDݫ vtTT'5DURXhNEkۭiJ"LS;}B|<X@H(aٗ >l$VV'Y,HpSFq(p˃ =Rd|-{Úeir~T=ݰrbLDʨխ HЄ屢A}~w՛q Kb̥b|0MsWtd^#,z⳷O?ylO $B(h[3kQvўz@c?Yp}E-ǙFlݜTOĽJha :$xFiEpGSq<\>z4yMh6č㟘c.xY{]E?5iSP"m_|b15fa2eebKgiB1cNnyi,ayT Squ|W](t;|g&>|9O&.#@~J=dzmJe2q@(G0S&'y0wP}DɡW\`42eVڴ4kv5SϮ1Mh)XZLJQ#=/ u`rph'2xن W݇V.g%QYy_]KSڤkL*Ίy[F_yY:G|wW!I$,N{X-Yo{xyӊKN]2m-hr7Iy+^x*KMi(M BmyXK-xquӳwm`E3 7[pb^' 8%ꡈgMldԩ׺OU$E] 0 P%#j\/ćk)p}n0ylMCǝ/=d +Qh}i¹ڐnvxs`Q<>w::R`+6 npq4=2Zi7ۃ5 h40h ̟6ީ ]݇sY> G0 N2gC-C:Yا Ӗ Vzŧt?/=hRTH'OsxZ]-fA4IkMuuGx^wӨNL,̢])]#΀?|. W=i@@hz,K[DDy:H@NV$]Qn@钣eLW{s/5;z&\$pKQ50sμ,{ZڔnSUuBM #/xv2~:rvbL#8IĸOiNQ*k(SOj 4)Nz"k ii4~ \`:8Mn4RIY;e8L:~8Gw=14 {,hS|?fm{0rBbͩ_{]KФLՀni1%S.^o#| H%xJny8ɧhpDP%ѨḶ34q^}zw䧋?oOfM4i r ح|TAhkq9͞4F#N;s4 Î@al FM0 K 3hz[&z5?u+y@x= ᫭9])I{^ J~Y!I4*,N F)Z4:ٝ~jndҌJI1lG{]WoI*Cڊ4w=ky6|LT*G?+\9zCʱ~w"זF}hGw}M1'4Usi8 $8~]n7RvcU7HUCYCnΈySasuLcvIxe(Ϫg"JKK%{]WD\#'MȆTt4'-;y[;8ab'8p,ݷbf`8;Ns819ŠkRSEƖjn&v(M|*[H|XfOK39CR- x>M-J(Z#SU-^*2pGeک&zhY:r+]yM O'-\~t6=qS*q*)ZTˈPX_ɠ7W{YG<{WiԏGHKqV'UG 7nC4a4fIImN/9:KFC"%򩟠lG&?Yy x,edQz:G 57!ϋƽGh]O߼(m9p9)Ҩ~r+0LCNL{gvW'YT-o|k]M tX#w6NNMph*9K[K (tW_zuf{NtiwyQY8B:w($R@In'ܵIԥgd7<#ϹJy44efdfm{!3rج{Gd*jGg}QঙyUЧ]BxF,29^MϼK.~S^A^sJ%1;|]l(O]{ELmƀprkk%6ogu8vn[sդ~sBܝ:UerdU14ͥVJO|TRe\D72Ǻ艃wir>@)Cy`Cm<ߠ\FeSϵ@#_9?)_eW&Հ9uۜ.XFp 7߮||(5%lur\l[t|H4X%Gx<`T )&f]t.[{.hE2^޴Z*]TOE[Ԡ(9od\,߂![, r2;< wP PEՄ-MWwp ~Wn,т'){{J%Mv[ Vt: [Y>e` j6[(9 `G}5?c<C8V>ŬESSwWIYoTdTwЁpY4ܹe=JnKb;,Z Y[DqXIIz;4R?X448M) }:H 8nX%BLL=5}k@Nl;L4aikq{=~:._`;n gy4&^xo_ȶGҤQjZU I]ϝNG*њjnѫY"J ܷ~Nݘ*v00~zSD15_fO;}U't 4%DqT.FƤEӋP?Wjx-lzGmw}212yڨNR,(SdeO&+0V'y}KRC?me¶<7Ҳ/4avp[X*zmW^1yQ:~)KA`NS ~3_ _J޲9Ĝ&=/eۨU\>GӐqY4&/5ߑgi1'HP4Pf?0z~]&<.vtϓz`}DNeT)ܾ4Ơ!ǵyc1ui"'RqL'`Pftl{l֦+QT6jt\`B0%tb}0$do0ekGaX:8/Ӿ"z"8\I+MHʸC#z^AS_y|=UyZ 4> ޯ+M<%-`7Q'c5j3w Vq& 4T2otD1RwY架NlЀSo@t5J@ Lr[?/I%hPX8颓gxSN1$UIc> \:92W3R]4D޲9Eq5˖,٭fj`xff1RB+Gsw9,[ӒE4OMԀTy[bΔ_Mz2`r۹۔QvߟiJžs'lDy<jj|X8#!-JZ9?X'~R[vPFxs:V\|_&^IR]Vv g$-"AV۰hhدx^~ω IB>7`e #B [F̮GRMԦ{_pݝw9pctY^8I]hKMqyBwϩO[AA^ Go.FD g|fp Ӡ7ӭ+4[@ -Kb6aΙ<6x9'%G zΐ6#&XrTqJKz 0oh89j@S\՝I*<)׵eA _8E:LMC܎C (]֪HX衏LˤSM.e@j^7NzbJy(COϱwj-ݬ'nJ%f<ү^6RQR_y":_P_K~NNFC ^4s|FA^_MO}pofQ-5Cҧk򉐺8]{6B'|9‘`1͘I]/[Bn=%adKMm2hثW~JO?ō{_%b"oEL'u8O/oP=^8z20'YBi|~R?3x/zΐY҉Z_nkѩ c(\7sR-p-?S.~y54Ug~3~4kTFK8㢼W4 ^rx| @d£ Jx~*-ct=}6s#,MkYf>G2#ڿ^˨lb!HEHYq &_`(ӴqPntCR0HzT} [J^eile6؆Ejs0q~|!jh9.#7|C-:`ZL vq hҘf3LTB;hzaڟv5t=%K3)lS%}&KhV, Nu'ʠ4xK쓃*h/Ҁ0+X+~hKu>" N]=DlmRB=̮5ROm.e;<5l8ʮce$ S\]b;Ir8`Es;Sˀ!8Ou.ls|&+9=V`ICΤ..v{R:u}y^^>=>Fեƙ40\OOwN2 2_C;hg;GWYiǗ2D}іz~‘\硧f_{QEQ19oSЄΝLp{ Gs;,=$?},tճYfDk/z5q&G~}O.'',N8i\Ft{~_ l {Qt?Z䄶lL 7;j}nC+_5+{z+Þ襭w9M:NiA@"-DՃ&[CѧxL&vxIs$i۠J3 tɤ|x[ł1?1cabO4BSƦM;J~F^:NsӋeqŠa)B8 ZBG:B@@:()h>9MiGTiz˰w︞^4SI^,^13׊{..} -+D@y~2,39nAN`13htj@Nz~@NT;qoP.mAwSAW4C&3c7SFǥ܉y? ~7"Bٺb,j=]kծ:CD^UV,,LwoP5œNדOyje;iOsd#mQq#jUx,BUtn38G8&|G+K@5i_iMVܾÉrU .׶h7۠nx_,ǫ ?5DyZU8WO=54|C`~,9n[CtFT#=G>7YȥO*@R gKc 4%/(= '߱phzn@ Ə]GY;>r4DM `䈘IXb>eu- 83;5ȝ46çe4zjs8-(#}\Κ1&kj{`5~4 $Z}&ɓQvcCSlKu` 'tcUOkXmUwOؕψN*t{v|w;Э=Jm." g|ɲ)v @SfFXI.>pG "Ӗ#ZzI_nbA}-p}!44Tݗ[;奐[ͥY 8"ꢣU5t y?m)>Jp <4 h4r2SEH&m-G;dh@$L~м`,8ydc ChQEَ!201-`\'R*[>KTiA!BA5=g}xk 0h%\\n}ޮIih}oOpo)I{n&\҂5 px;y^r~aݴRf\1چF#M4 D(f:[h8G[QÔ|"''''7'Gr$!Km>X~>d?%i٬=j \r| 0h8} K: <裦:K2,^$P3Ku'Ys0:ao Cn[Gԓdo02 5_m%=^1[ рQC+_Ӏc8q P%G \dr*4 @XguwY4 ̰몏]tF4!ֳ mP/Nhs{@g֮'_T4"h Ի}Ƞjmh䮿L^W1qh{?7 &&SB_odK8y}C㣑P 3hڸVqT:p@xqcWFuAMm'dӖGꂨLM]Ǟ~k'0OKx Α3ƋyA֕==G*ks\J)UNgw( >v=E.FF~NG{\bS8F M{ B[bPN,B| vjnd_-Cs#[HںGsTM/[8% :gHj(EջU9)që-^I7sd8DAO[>t4$()VL$__BBm<rr;m;wdзj}2IX,4cU~Df||F?v;sE>yt?T+򹤰(bL2عue=Xs\G W'YB3&Jc;]Cp`B]Sx8+ zxbtϯnEJǮ^Hv@z.] (/_&\Iww:.սX-xCy<ٷvSSR +;Dhi TMr 4ɺP$Y܎RM+'H9t4u<)G~k۸EJ*xevT܄=/l?x;@Y4ilNBEĽSL v5`n$FUY]3s %VZWEVѥIGSiy/ǐ+ ?\ap#ak * ۸I]+zy /^^3$yDz/:;vJ\tWn^ɓɍCYwdbڽ%b+/3A͔f @"@EtQ[[ Tvw ;:-7 aW'/U sot׸\MPs@B#%.'mn gK\p]hk5pu;7 Y֦?~t=OҶ [^e{CZN^U*;CInkK1SIYvѹT-;pT9o:2hi==.#R{:J^?y#:lM[8mHW/IW˞_|Y>X ">$pۦFF44p#,p{|WnrYL'Y2[~_Qp!{,l)OZ{,58{O*7=D(mG Mb 4:tZXUtq)ڰ:9"tUÓ_x@"4߲j!48,onf*-3͟*;V8wRK$Y8zs+;S`믜sNrrү_Ɩ = [O/#Hg}ĄXbQrkHI /nu 4hM3{M{G%\d,ȗΝ}8)l l?K.o#L!PO;_yٸ+ DUⴧ8} ϙN+$Br<~)-z†}By,G6O{q\@-Rgjod2 K4=zYx=2[P#MI 4%-v~={{l.4%?IƼuWQ}T{ MaCQ,! z'gxzIB(詇bCEiRTtRHϫe_^B)/Kgfg~ίW=lY^egg\y|:Aec'5'@$ж{>URw<.ԪTx)5=۳G:v&+k`*Z`zYV,YN-vAoWsEژUkوۮf;ҎOpW%颶(]^n-y<ƚ|B8 Aw^Uk&<% uUT[6p?Lh/4Uog/nZQU>OWaoߑX^Q{Tʊ/ tT+ ˔8]sϯbcz3n"}}2ϙVSHI-@44Ph޸:${EP6yj^WAU5MT>PN;ɮLO[rfd.-4>Ĵy>|BG}mvC]#:.GOP+ʶdI]aϴ)Gd`,y##nJ!v=@޾V*Lgbbo-m[EpE֮E5XYhR@$L-cjVK"XQ;TOu%o4wěƖI7-)| 0+l>Qn;ftWI/JDML­n_KYTET(E8 SXɾL.v5%u L?W.(IhFX/ǦK0-1h =9{}e0SYP-b2OuĶiƽ@çY/HE8ckKz]D.9?rm?)Xޱ3ټTh/""rQ<ɑŗ (#bj}Erh۔~PfG;ذQQ3w٫>k,8Ws{XDH[65XxBܪ&dZZX@_]$IEڄ@Ee3a0~ pƁ iqU%ʿCNp$Y0gL3KtF1l<,ED j{LHo;C;H}^ p"{uM'çW H3AVO;Y61Pջ^X(CV0֧XT.m\w8^ϒ SZ^H0ea͛@2^y'(eV:\l7ΈE;,+sb`Ywa)y\XH,:z߷ 8\˗r :@ŞU{u5MFM2C7hAܦ 2-ܽ#'b0Chi, zA(tUQI` 'LxrvGP/VP.p_EۂNd]8QՉ| j GhF^( Z-{2ԷG{2 ҤPk{"gQ,Z.6s55cK ?dNp@wZ82dNsQMɃy"QΞKR&r:5t7Aq4A4Ag$oŠء<ֽh+c;HWaf d#ZlR*w%~GEɿV<7};`*,+k_EUm5= Fww TzBDݔy 2ރ;~m2rX>qE} };Α稜Ɓs9|<P̿%8 %""BO^@q@MAwK ̖Q lȑ#yI?.SvZ]FF ?)Yq4}!}DX(3-߀Kju*D } S׶D֫Kk8ѯ'~^|N`n N=?جwU]sFn*Ljq~vb4iV2zZmcR함~:#+˻LL)׫#oK\⨻.Th>W / Bn3;'&aN$#A~ihYplSؕ쀥wmjwi"AڄXXFUS?*JW{Hv<v'$ _W橢6+{l6H&QM"oSglM{O{nLD*2OT`u)#:SffxT9+3_Ojݶ2ʖю5 4,w &?xFá>(S[|c;|ZL9Ʋ zHTqPR NMO,u8TK'(Mj].Ԏ䃁P⛽03tP]NNEl^WnbDT0YaK9}3nlͲ ͪ24R&r9NJH)44BlN5@ŀ}.qzΤ? vj;O (N'~v{gȬs5;P}\ }/[ޅJVWҷBv6+ Ff@b^U42\5x0%67+=lP7Μ7Qf!b?,Ch"nvբA|lr\\IF k!_z^ TJD*74|"^s&^>~ u{߅[^ֱڼ%u/2~'&TotuL8/ΰ&haf&USLv / Tb, $FzP$z L #v8#)2d @E3^]Qk^V IF5?s-lMfvc>6To@(\\, 0u?R˥d8$hcO:(VYVj@w9`Ǜ`ԓڗ/ԥRe@(؊EIMe9W[0.Z2Ua2^eqR$)X7}Gavi#X{8u\Me kJuE{Zhvm|[ns8 IVV>hIGl Eq{JKbn"QWKJ QTXIB-:HJH}%M"yŤMj~'a4XB;.pl9=-pP=I5,,FH3V7]<$Ks{ c\>2NVx Jtd:Fiy%B8/bPX3}z)/C ky!/_3tVK(h![!dW5zD,p`qKbcٗkazM/vKXt1 4їX5pq#9Uym_} |mc a^ԿlPxRibR,˞Yʰ.{}]k.q1x Ƀžc|#;úGuWjc&HEdlg+HvYK K4r~fyST&hLtDBM$N$i*T>iӅBJQA)Qde=}Ml1qf1%eKV=06م&Bto~ XVQDn{vq4>Tzwmh"= omη簉#0hya: 83ܿvVp9VI؃xA9V\m;jhRA'j*Kе6w/vEﲎÙƾ)wF wو[g-p~Gߪx *U{,"ٹB')R]P p>GYœ,w5[/ U'i=u۰PΚ7]^giff7rHs.g;k RگT.^|s4/?땠F8|AjI rAYYc>K*F3 #B0Ҧ,(<6)!p]JNuq8@'4>(`ƵKě[|2jMi*l" ׉]ktNɸ u"$8U? | 3BBjgf<ԉA`$Y!D&uLQ-a [uis٧kFb3}~0m|.VUW5$8W@LV䖙K V.*p=D^ݬ;rN D`%2=<IzGca/Z(Ӏ0D;z$m™}ۆu"c~0SrPUSFV}7&zVzi0 &MhnI>HR# 6!dR($rpPݶ:j}Uݱx>hu?>lܟ"Gو[0.Vpk{>ǎ9NjeyX[}*=lC$i"۾#i>uͪ_"B-]z@Iԉ BN hLu8[44v 8' nooıKϼ487F:L ӓ F:PUܑsa:MB IS_nIҨseIt,' .)9@Tș &RNex?YvllA׉yڟ)+قL{rCńqUN4yHj ksD@ ŀ)vUjEUG@yT/G?챯`dHҊ&,Wnۭ*|+/** i9;|µ68Cvi Lh'rAv8ټGE. 7#xmOO0ҪT>oՊG(ƘVQ8[;r" d-Q9mM3fqjAu䀓?oEjS.,qH< vmۋ-yYÕ-y}M3.J\ir 211FDIlnYeB Kg^TD&$>䢼͟}=GFGjoH$R[$M}rH'ym86U?m >s]h߽e`#l )uM(Skqyj2$EyܤOגL#LqzRuP'Ogiqv@Z]sn;| ur ytzZ~ԺiC'24j*ߜ;8 [vciҮ/Uq H0DW?}'7C7Ua&t1Mhn9 ='z~uQקc\g;ERa^hs m+T mw W*8cjpk+[ue\c׮b5䜤jE o(p6zͬnnteT^K^$)7P'#ӉvSMk~󂗁2=3q>:ǎ njs. 9'f]M]h*!9RyĽIp/$$<x"JQ&_W=WNXt툭Cl1 mlmF-µ#g20 LF/KGCMM,]ߧǭÕ bΗ+A TyѤ{Þ }'?xܮ(:S<~wb l3ɯC6|s{zxCok=*bdj6hذV|:w +yTDb!!1'`%VSuZW ŦFR#POa(Xx~nkoq3#:s 8r`%-jrƨDu5ˁIIe+YAU+ߋt~ Z|+şr~:vo͛7)\\Ah5]Hy1Yv+Ŷk~~mq"[pսͣ6oZ[*ܿI5~#pF%\AL u?rN$ly1Pw8˻V@HvvMlF<U7zchG{z(O@&n$i2_5I{^-C"צmMVƖ~(=~{V#E DꚛVC?=S7zh#pD ;uOyH!Sdvvp"f4D bDOvCH]ӯ.x65k{2F-Z%À_W-^µ+%$9~/5=냙)+ 櫾 ,,77irp|P'uB4"YMF8viȍ}88ZO{9;eb҈ۮf|uk{O %0l31 2 ew!6zY ^o*F-C/ ԉ/dhHs7;6d}_JYm:M k{$+\ި ۧNzGN!![z6Z\]&z( T{K?x3'Cjzż#z } iOf/Xt*!O $/^l>Yd26 8USp_Bե.:{kt'u"O tA*m;|„;tD!TZmeZ2 .]OkGnnΩG_*+:O4H*W)*`Şm4-]*#YE_*qq)~&vZjXgät˟0Ay6O tԈM+Iځb UZD{uwIsTtҋۙڡI9ꑺq* *p~hI垂bE\hĸ5l6d3!{X$VIIC.U>7{=zJʕe>hut](m=A9I%`L6ʂ&^yILeMUt޿LU%sx4"kPwNƱ*w76y[ S']5? s q>~W\[G] CxߤCJқy?$}?͘a[7bAFeT>V4MqdJ=g.5#畞i`9uK W8AQ\Zt @.l=.z~r߫C''&ͺW#]xD]&Q=&UAz=-f.uHbk~kiFHF#/Yyw뻆c߼[| ldnj->n u_nsOozmbYx;x.IsDy$1yXU~ J%>JoOU)»k(+dQ_Ӷ-;_ڣ@990iU:@zߨJ15RyR# <hRoEC[_ל,U k^VCU8p_5kK.5R rS%6q[NqMJ|nF_1)p?|z47coYl;b@@zv>fTqD6Mwq .l2#PJ%SYU=˭@?Xy=-2a_p\Gn烙90i%֧~03V:&sPj ϼY '@HWL>Cv~oLyGݮ=_l KII1d3fE|v9 )01_OdvW6A6ڙ,w>ktkB'ٰʲrV2џ2Ujā9Me>'A6/@T;{#%E4Oz%q !Uk$ɒ,+LJ`KTaIgL5N4^Tow EEV2 D8 Cl6F1#\V|l|v"?p A +*yvub8!:YAJYxl!6'Jp 'njk{P3wtj ǎ݁8}i5tS$"F0Sǎh8;N:Y bf>?{03 ?M<`E]9pJMʤ!ΰ.mTIR # PKSfF:9NH<{%Rj4s|K*k9L3K'm{$n<4qU=|tvT [ sgc0t춧ԫUiāIݿ9Ød#vSOJ{N=\u:6o^,CJba;cnr'3#O %HGz4RȳB;=3H(lD-5n`!qf#aKN;yg^b`PSSznv975:Y걶8P=5_A*?vAyFlLTX~wI| >v>9k޼?`>ȅXsٟ]:bN&\mv|G+8rX6-$xnMq89Ix`KL$H~6yZꑝmG.=F^gѽR绾]Z@IDATjDٙ +C_cj^kky<}>k8 59ȓpSqhթ1u]k$w 8>ypa{bJ"a[!O i0Uo-> N,\ HX(EaFmal i f2aZA΁;p'2Ɏ) kVbڑojx4 m_4NLJy-m#*“b\JȂցr%L%WE[юjhRlNJs<Žp$Dt!b=MvHۑW B2T~N)seY$ybQ'$K,pΐBM4 : n\Di%7(Pi$0,y{%*FfX8h#\.ATl__I$wNdD.ǹqLLޠ^I]㟈2rX_;w.=U@%N&(tERH/ ATfۈY+e5Tȳgby^`9ad:5$tK"0op|xKn)YH}N1R%hrF4R$ȁhĩ8)$N i\?i{8tj CսIX_T>;ɬmkY#9]yT!y ؘI~о`&&ӓ~6)qA0IIx-nS}<:DYsxȓ)$̊$8DY1q9x́CYYtM/_PW"txonvV>ONb؁;6 䇱ϘMU\U9q%=myI_U<&ǓcQ+D&dM>fl՟xrCOge+ N,@pp;'8=i^K>F =1515WڄSkRUu4lE Bꉲ*{lcZi$N8W@g&% \dZ L2l~l%敷ejs_W@ 1_c,Uً ?Ox[ XUx? Cezr$IV ?*%v&NMHtYA8)2w]y!D~^94\4ʼFH1MKJᖮrO3 Ki׋/]>]T0f 4Op!;wpPԂ"%p"jt8VgTnHVh.[7KHHIBpv\{QAo\I4i υjzwutmUB&p. Qz%i{ ߴq9% k_<~WqޗV[ ĘVvZϏUջ0bL0wMOei5$A1 H#JM% #wI^t23aF( <u .2AZ+HEk1iVS3BKE_=& m^q\9rXL@טqFޤ%FMym_wPCnuF6 PEsR'I-Xu  aO 47>@E-LiFYQRi+6(&0F3P6ž-.R q KĦZX-ieڑ4t?h{\sw3<#k?YMOJzLVM tY3cc7 Ǔ[h'S^K݄{5n!?= x[|'Q$NmbM)WV=I=M 8HT>U?jB%Dws/'JHM~== J<Q@(US{@)1To}o}*QFt|ޢh5O + 5Kt:]0`CZjt0,l yBd+ j +AsSj@$1/V[ɵ|\(''3? xT . ^_ IAr^Q!Qc &j3$N<5N4V&4%",prD ++l=XW^XO^7VM{;i&> /r<nؒAWv.x8MPuh}Svl#x.;+:FszK#fb,<;3%e g9O@MIV^ldyJBSC'N` nY0&M=, \rI I"B J!D:߆,^KpCV]S M+\|U~Hڬfm=yӒƁFɁ3RąMzJg3k+Pe1P3tcsbSnj AK&k d :[g)/$}ՔpG^XvQQ\r/yc{Ǐu$CT& 8ߧooA*OL! ~# &cm )d(R^fYCX)LL689eFb|S?8@ƭ1Qb[A3R%i))k@4Ίwӱz!qJ21-]vz~B??;_ @NF GhRPj;}Ssf݊0f]$ \_iK#t(,T,ZuQn-=#zZ/ ZF%Á%JL:鱙 LpD"my,( pھ"9N \? XL%~A9o^%'Uz]t2[Q=h]4Q{(WP;j(%'ߌP1Z54obԿG7-`=vWL d2 d2H,gQџի$M zBZkcmb9D7O\8pt5h| A_XB8ij4g#Im#x?<øcO=NcQ7 Y,d:9CM*w*Z@uS;W >%N_VKw7r%KezY&w|`:eM/۲v4~q@@9`6p:J]=6Dqk%Q,&Yp q䙋U '$VR WAiSO=zi*2cl ~w;95Nx l6+s:t2bB^tLw4;)XRV|m5ER&r9^P؇AYlmgݪ^5UlN^"y<UJ+] !^|HW{Xtu,("RzOBJSlBӵE7t/} B`.C%,u{s_Nu?857Q~?wM:t J'I^$2#ulM._ %el7 X iBs'm$Y12!'cX  e!btD7y~4n"k)@ȆE~b3F@^GNjxFmZIÛ8f ƛ3=k ?F'*K\ BQTj)X8@w:_ 4Aw[uҞ>M ]"i~$8ު~LHHHffLI[s1D4tnڄ2<-$bBweט&]!yg}0GMƘZa=%PKrE, p~@(Xu$o!}uX-6L~, whڭʉIEV0۲ϋ`U2F!Ⱦ$9a'a)ȇb39Y } XHt"#z 7j kLu< =F<ͺE m&N"Ny1zvU^WZًpcsaHAL"u%h3|>aL "R\~InXxXo|kGE/^L*!y-jW8];u9|h$V|L:1_Ov.-i8P 1iXބ79\{|Mx9gKw/H3mnWQ*,{gg }^fZNgfwV\5qI>v{wnGs?ĀX^\2Eԉ;ʹ_ X/T)2wW,b' _CN: 'H2NN{t{i:Anlx@%9V{iq"Sܯ!Mc|#;úm^mU9杧:yڳ^''I!o&%LDNPh6 q&HHU|Ҧ9 >xB<}#s9s}'iT<7"T^_#K&Ƒ'8-+1aԄ[؇ggȤ}yyBhC:GQ| #_ *#}nu@:J16+Jxm6vy-q@@p7v<t=/Աc)⚪֡Ҋi&]^7ıcUB_Un_`2.-2dp1#I o'I) cgK*<\ BzMdXW߷V6qht,xݔÞ NH$IIm$H~6yZ^YN"<\uL{ލAHG۹3NBV955#s I2x3JzMgZ=i[ɡaQ{LH@큈 @#Pj=/(Z~8 Ȳ1p{%KN*cZ鎕-i8PxqŹ'}Eg.]zrR $=2P \栱:=4OrS/t{*XJ0+$=ӨlMRU]io'F-A!57rC4Ia@~3Ц][.H&r,*/غq,ߠvW|S>%<)b᠉;p#篮WbaAzDq(H'9 DwMDc TFKkΥC&gMTCC8@XY?nʸ991>fONvai9EysR|Q12d4w΍zAypRb9_&wpsZZg#4@?ׂ#["qMU/8nKSq[:eraŤ5ݟI}Wƍ~em&sB3s?{Łlv긄b28ND0GP(^l E8QH8Є s:dg /tMHsmúm1MIr:}ƔG\/TWD`DuM@ I\.G&D 8x7_Dm $N&I(F7K.z @o'\ӷ_m@_$ t1VH+K\ x uӢsLΣ_(7ka1u\'˞(Be4U_&MT^)`r}1=^ 8qB%& @{^je5ǟ(rl>>v"h82yA M4w :gspinhc]k5qR䀭uE'ٻ)zf6'ea(9A1TDRpD<SO=IĜ@EA6{5[=杙٭]]]uMw,Nk~#'i1kHDf(!As'h^_($K͚}Ǵ)SIA-ҭgFAa4' v 6u8Z_>4ğTu&E)m/en渄\h>UM`3K4IfH0U99Y>?ܽyۇEF -=j-Md{;wnP1N2q2隊g'谦49*gydT[ # :QPا NACeRchsSS&?ߏ(Ⱦ¦ק<XNB8q2HDbmuO0OAib 3oN=7cߊ5M|/hǦf;Txq_{PtA]X[ {X;"&^TJχa ٳmNlQf&KhXcA.V1mx2mAb-oibJi@O9:뢕OM{^=ͮWN<SHLz&xY sfy wƬt>@)TDxhwyNnhl!&8cZ^6 Ld4ZTg*7+ B̑srlOp02|=_={D| hnd]jaڶ ;[?{$=]ɟjCwߍL'DPv.zŎ ݆0 <_4|K/Mx!9y9YuXc)Dn~ߚIɒU=@/m)U*+ ;,~H~N ~>+<4>m|\shnqjff$Wb`Z@dѱȳQqHl-܇DF~[\  C+~Mq(UYM4$/Dn਽IڏoC:\S{S@2`&xaRS{.o\ncǒygNO C#\z~޼vMte#LXN=Mwuĉm58٭w g22$=DGz!++hK=gf<1q"K勂FH[x !+hꨊ#?KaLDa6y ' G0I_=E^6 LPٳBd&tĉy-`}y#iv>͗zOʗco Sn*1,^1Nz*C/O&9i+6?І &2j#ub~;4p„Cl1`:,,>Ɗ+h s1.!:~omqrxSMz +5-/+ QZhkC="Y\ToL[qMu u~85-< p6-OwGm׋'"5'ҟ%X EQ}M8^n$M4 5?9BSk,gɩז$ m7*joyrʽK/Lv)xGSg1 p29OyNt~#tE@ +1<cÂvF:BhKuSvMa L.JͭzW|1E[mH{+ۋ3كPy A(ns罿Xy0Zר4cvj{#SYQW(H#XA5y佤1]> @(CT(L&8)Ʃ s+;=фftp(L5xƄ'xA0N!sLNhl:9Csn)@[|2mvJI 0YfofMO!{"p &.o25NZϑf`ݴ'X )Ae緵I].ꚢ@̔͠{E%GLD^*m9p@ݫUX 1c&8R'I '?BOdP͠mXͣX)"dL0Ѷ9s%ܡN{xf 8=l6ܴU<* )phq#2 j6 ?ł)BI#w %14&i`awẚNܛ kLm3XAeVl>( ( &XKryYԔ)h֕oDWΰBe=qzwBf[D6m ׉BL"KNk9řz(/q}ϧgٹ&̇yf8yfGIyZ-Z Bn'(!8jSDC2 (\hXJl}J_IG4DJM]PPt } qhihbI_1 h3=`ݯˀa}KOڎ/WڨcFzZR֍RHt_$[i7FWuOhd( <ڶq a1Ogz_L'H<ɀki'[ӢUƩqdKWi} <]!]Ҝ9=:8UfV3 '-#9PSc^hsj((P(W.5ӦL:PFFSt*UTKSb1L %$HXk 14NxGEe% 9JMmnlɣgatoêյ;x~<~h3Hsd扵b+\X';Yd 7XP8oO?(o0O~7OkCAn4FOSWw"<@%6iV.l6'>N%ޫ.( ( qµ $^g+Z<3A, jœ9+GyO<^Ʊ3oP\}\dG嵠0  * 3g [Ç￞rryvY׽VWf7ҾM`;@IDAT4:}ESzT f%։8'29OTj:Nt@hMw R'ZThPK7p$EȉTz~wl(eC=;f뇋:t'֣G.z_~ZMbt4T`BwW)瓉L@ 2N>ORg4X迏JrN>yU4!N8wiJgEmZ4Iv8@^}*8)PN[:QHICw< 6/yl @! .47ag8:P1Nj(P)ݭ6}1LM{ᩩΨyU %8Ibs ofNـ7-ދ(pe#$]w_;%]zq'ڴfs`&Dw_3ԵeFs?gd_pM4u5ty_JʡoWͧBia2 s PUf=<Э-=ub{b¨ϝ#LWm: ag8w.U3v2O4i/vb!qf7Lj{cER`֬V;}"C㫹v`ՕS`[i( QdXewrv!iӈjdF ~3h7GVlMzuOZKH(>[v>AԣC3lK)7^'L4db2SbGk=:F,^M荍 уQ-Ľ ^A!2BNBw Ze]L4 E+Rfv.u/Wڲ(ڲuoOK8wA04VU_ӘPhIv;Q-;'O| )* I ŜjFޮY6_j,ЅZ4?=-u#zyuBѐVRZFdsq݌9 Zqldyv> j]G c:'amQJMBF Jք]42}ji8x0. 2ZX!.Q55mQ|jp9~حru.LeW]A(K 3a-'^-&zܗ :p|)=bAWwXgRFc>z;.wFu/j!PvXOꁥ_`]OPݘ((D`zWl\j0X;gΦe.X\/iaw+i`64ip:v=~~A%<* !vP8\M{E_iQ>b>d)^pyN`XvX/zai+R/5-A3_1o^Gskѷ#@yB\ 8͗9oJq [vf`rfr➉;Hwo?94 N`yޗb5ӳ:O52}]aƩZ!f64NgWz4TާsiN58qB adi ^p֍ lx%8m]u6vTπb?r ۨomiڀz˥Ŷ!kH!i=W9(Lci`w ~f`-ao`- 0MV0UK[hxohv>)ڶ}ɥ3fwmC#pBs \#^$pkv?&X9s") n, O4dd4U,9VhThkB c,Xq2mȏ+Wzj(P)ܬyrl6}ʤ+ 8)`Ңkobqb*P< y:Jq|aGÚ//s0k̹!+2+>y}Xh ӑ6@DrQ /Kp ZqڄҀ5Jl‚ܞ 3S <Tj$hRmA'^2o'a^Gi0Sa\vaƷ1Jq!⍅ -8v@`hLJg,ZgM;\O}.ƘB\>^'|ҳVaˍ,<`T>aX<߶֕;we kًIxlqtW=)u((/VTt@xng-бݣeި*N vC*ju8,| M:ַPxYK't,?I݌׻W+848hG'?}-|3ui-" 'Ϧ)ߩU#NB#% ¢XAS|4lh< `tY_zB);| -[պ"hpPQyUAijڰ}h5Ů kp |NaγkH9zѓ4c „qJI'/djJA[><4OiдR[VU>l0Ys65[7髪Z9ߌ7fE 6daW՝BW. N9%;NZr_pTx KXi#tW0N/$2wJ(u\aSt/" :\f)a|%bhKɞ#'{\wX/(;7O& v>I  a|/I8\Ʉ:t6Zh.Cw mQZvVd_&)EgG=BHhyyf#k?sZ19x,e2TD) hz|G<0tUOݣg ;m7=վjJ =m6x7AeCXԪ*Cӵh0?yn#:7:+g3g]EVGO Sς7[+kLIk:!ﺫNoheR֯G66gbM|a;J$=p zơӨkDUM0?y5YNo6prhqW^(z˅; [ʁM 84o?¾U@R$=t 3O9>wJ"}\5K?vq -= $y Ist*%ͳLFhL+Dnr_&yiq-iz8') >*["*,\]-ļF/̄-[fV]Gѡ5(W{WBC3oAݚ4=*<Ԏo19Sj@nA|z?&<y9#_m<3!xc`0M/~t9}ʔqjVu{Y4_]lӈa&k!eWeաSfCڅ %xvx殷OXϟ8?z/kiwQD&JdWU3I9 񈺕ÿ\DQ)aGwqʳHo5O-_ϩ'|$3cӔSA OA9_1H4HU`K};M hkpcg5 U81)Sw8T5O+G7yjR̰(a܃~97X퇇Ӝfz-*\mjC[/RFL\4NVkgHDlVoM ASz>3c;{a0M/B{lh#5y* Q*/nM` _w} /qTy'R}`+( <'JQbHN) ( P@;-hr_"ƩKsq Q9`:u]Μ bۯ0 ߑI#G5}!=Ƽv_I1)cy{5O+H@RޘF^(x|$ӱ ".M@uXr =h[ߊ0/;qȚ j K&qBN1N!<ُ9!S:%ɬ4M!~uv]OlԵs#/vCf^)b|9^*U*0n?v; 7/?TG~LB:=n'|`$F()( ( V+j+Ma8M0Z6U&DVbSOY!dX)I{{l. ܰIZ7֕OO-sv_PK=uxh,b`fUN5O O3FܼaGѰ qr#,g FQ}F4]y"|}(+_( T%B/@6y6ھ%:KGO断IvO"ٝLoM-O"˾ޛL.'Rz>!y}_هb"% -QXGG}GO h%}鎫zƢ55->]hG#ں=*â9=zԵ]Sg#g淍nH0nSYiV~U_6^( ANY 390N!8q2Jv1g;W1-6sKqr!]gkBAub"iba[^~A0tgI˺a:&ͬ%/qК9sFÎ ڦBдOM<]VqyZXyiyמSB(_\[ӳE_Y{ wׯ+<nmǹ)5^^t94NVutk׌7OO^KO!FHL3 L(GmjWN,G&e W 6RdDbRGE0ftM]ZkDhl2}}CF\@R<!q У}3d}:ZMiԣC3Z}?Ђ.}QҦ15[BrYjY@iٔGSQM|-paՀ0ފl:NBdXV,V= XAР 3N,lz|΋S:Qt48y_Cxiբ?Ғn fHMD"Tc|'αF$=LJ7^2p~pAa^d !@}0fZi7 i$]Ԫ1%]#E?6NC^ !NMlRL8ϧYǍaD%M'`5C] ϭh}9X -\Bc`]h`q;@XM0N,}sY9GdpHƋzRW0)Ѱφl*ū.𙢯7 '8ڪX3Gog Zv~y~Î^GR!ۻ|}$^%P\6}Y+^<9|i3~;CF9oG=yN#zg/7'pf7 qNn>Ms˕ p ?xÇ-t~54$ -^,>>aVn-ag` %a$!ŭB; ω7 .|)ڑQ{0ԣm^VA1YzE +v[.,t Ic`ȿrS}/#=J#3ZC":2!LŸ5Of8t"hX83C2vGd WiۖGR=LXւ5ۊ~STb D0$o?^;U!u(P bKw|VsLd/ 8^-6Z騢)|^KI7 Bo%4߅ IVcA/qb 6AgؼX"Yb0ϛ2;_b)8 v,>!dS$6Q5+F9`nB̽r(eM~O¬$6gdfBG Ėlu%쎏/X0˹:7$E/Z5B8I]ĉOH"5䰙9k1 ,f>k R3ٝ |GZј!=c؜voC`X'akdSoo}0`t|xNfd;Bf,PN8hG8\ \/~ l>LSyό0*2O]龜bBƌ?*l_9ͅzqmǑNR%I#pzݐ0`zL}3=7| 9~ 秾>b: CeQKۦ`ڥځs=-cc"o|uq|D]hkZ#M:8y.J#º3Rj^8V4Gxfz5>& FC+ 9zE4g/"s#>zMV^^kn=@T!./mJ:$`],,'3RR/Ez,n/xa:'mkSa)0̨L:qqق+Nc݅83G됙q }ْ( ~ %`al"Z[$36楼=}}ZX?mٸ:r|uwop4\ܷSK| -b9 t - R|]MbuNs?NU?69PTRl2ǛF7cc̏+7oXj/rcΦ9ye7>/6yϒW A \q5+]Pb";F+hvڜ֋eP5ޯʊEFMLD<&4NLs0+[!ce,}\xNպa[zօ2!h2B/ Z K.Hhz\ Š w] $dk]*i$m& 34qqoLY_X> }_Z\},0vG能%v5sYxG\/+`]SS&'xB9Vl8r41p/*UxuB7Bqޡ@g[̡ NGUlծi`hEZU >v؂ZryV)uCVKr,<C&u&ГJ(@WmE4ҙ]̞֩#; /DX6Y K }`9}?iy9K`?`NV~dD7ZP|1{Xݮ!ša'` +N\ oLP*4b*w:ϱo3D?ܥp`׏ޘ<yG)$ϚQN52Á/qtᆪ& bWI8?:ĩKŻ u((08y֩m8Y? BJk_ ޘ͉L#n9Gb'_# !y$𢡄7W`NfE2/\ݯOBew` ƋT4k"ysw䁈$ֿKhl4?qǗCTĢ5 Æ s|P%hܠ7Hycmk$c>g8\u-s)F΀+t!8J_Xhޮq 3K1cfǽOuNGsT.w!F =-FsB+J—}H>y{'{[4S1N^jRQPL/!c.'LzQ(!焐 ^z>@oVCŋ4.؏$,X]Tֽe]gS+w{\󢲴qI?ڟ'Hpb ;mQ)2|ĉ3<ٙڪ94yrȬ9b˗;p` ¾\lJXKg1J_)yc8=`dI\ӛq( ( x\4N v^GUlYhw4:(k%˾f 9zԷ+_ R(x7pRZBG-Y˞?>QWU#Ar^8-fN"#^^ E-A/ILܨ8yQL8^E?!lXI |Qf!'; fՂHv{']iI8(r"ANvU! G[viæGw iA,;rfވ3Jc+PP(\5NuWģAz}\ ncs=ĸoUj>,EQyMLTUT* ( Ml^ .#\JfwB#@Qd p1YlLs/aEJD]4Ni3VizQTxzO@rh3"_GQLɧ+HZ<7FS2ЬMꦤdD}'O8+^>VvGµ7>]*.wŦpr]+[qFI5|1N)!|4֌8n% yBffQTȪ_R`-$6  2sH$DX!}Mësqr{#$9=L!DEdʑO\óކ朡yӮC"^F9ҠIG> I!>G@/EePtDm k8{u+PP(XnTL | >e aUxlP7n8GKA#G6ܻϧ1ɐff\6}zgzm9^M'PSFTq4rxwl"L['(,13`t83$ܓ9ާ͒\쀰>8yEEW - 3Z0$wtl3x7]JmUG`eUcԈi F66{߭ϗ*2]3s$Z1No~GՋ,4A s97м!D~ L \m@ ѲI}J((qBGOQDLk6h8u2OK3<}:Рm2u2N}cEE(L.VuC_7])+68b\*zzPDKj#qB|+XiVN\H\tЭiO6rGhXNNeAG79?f~TuZݩm1hb'7d)4YiӖoR5B(>EbVV .5w 5g;t5C\OB$mNc.Vgnm/m8\98$?ֽG t: 3=ԅoc(څqJשS}ӂ_֯@N&\3fnscKY4O:}踡/"/SG 6^Q@Qx ?\{j8J~v最Q#l=a_is(/h^S!IgKR ŷ5FGg_Se#"GXs`B<1.';ذ]?! Wd Ra]bu^q/} ?? f3a Gx,CO9Lxͽ3JzR3+7OMYTUG09wGdLEImRqc+I!,ԑ:VP(?w2N-LNPd.`Gq0Nzh*::)@T, ԯ-úbL>-'*jhڋT]4i!r<;:aIx*[,2C f"J.3# pdۯ`6>,{woG?6~xߞ'z՟C h ~1^Dʘ:F .f"X+i]z5nhkhFڨ?^sb6hZfOvu$ūlz#`~4NW) x!;DfNd |Zu nc-Ersu(\)s^eT&o*5f՘!U} 㝚]>`8EE@ҡo DM!QƱtYo ˴26kR?*Z#67w]dQ=Ìs`\&L:H4\g2+N g %`5[!$ `hj:`ش ZZZ[Wk^n& 0<_wvɤ)~2n1j Kq0Me+#դA]ڱQ .E. .a?q*扶d_|S Uꔢ@m vh>(DAӠqrH5]9]n~kS: xKkQbt{j SkG8DaIbi=8 d3wu"@5{ε&"T<}Zf6} 4]7'}BisjS9uF *+C_>"/̍}|.~9W#:651IX"#˰SB}8gqNfJ)vѩs(ul~u@E#4gNNGQPs_A- ;7EC&LԪq=vG$42cc=ۋ}c5L-Z#03GF@/DGN8? x=׵{"r7K ?ś/aI@K(%ۂn,:#=rr'+-[M>[G y Sl>X=B2A(V^iʠWMj^ =Oy o~qP/#ni(?F]7ABq[@)X!I^* ⢉v mݑ(̔ n,jf-C3!Q}r2u~NWSF Z<ȼw̭fUE=4tc=LNSB(4p ^:b,"brsѹWaw˧[O O5U8ؘ''mqIEă 68 8xN-s'9XohFߜԖ9G@uRA!$\ҷ,}c˺ t&%w'V=/!f6Fٛg0߻H0OҾ)u o3kcii'g_zNE+="wP\ܗdr8(ZG̬5h0RSüE.n=:,M.H}8qB5Iq X@Ț)Կkk!xjk|DCz/2<>t-.&R-bESgӉr/DļU0#:|G]q[u;i7G;J:x2O \yaRx oEuS _Ĺ句ȷqB^[WpnO)Nf'Xe\5̨(1U}P`:]*o^ylo_l"0Ӵ#W1M{fei^,c\= g銭4ɱ1x0>9}BNzl f}ZmLČ6ƃz&neÏ}T3nRz=tͧn0y?Iغ(W$ V`L܁E=;3i43GK' 3k(L0;3rGG0}0wĜ?*‘s?&riSaiס4`H l:sbAoD PV1N%4yk ,S}ZR{Ŕ|<5MO xxO- R!|N MSѫ{Ě OfĦzO4N𫊖}&UOҢBiОش.mNhj$]=5n+L6:L{\@{0Ycs11pR)F'}4~doElL:H[;|iw:NSѩtɮڦ-iP4zP7u> MW9y"B3& q1H;JJwH@Uf.АI<^Q(ImpZ;MPk'ⰴ( /ݡ]>X'놝͞?'i.!I[j4Ŏ>uG ykhO'É3tTǵ<0$p{_BgI)ŗ"8aZCvqHs2`™s0sOजias X;!]!D; l (ҔH?1tn%֕*gcslv8vdѾYS7) xlFT8Xmэ_Y+'lv\2GU >yƋ[y/ 4m֗_Ƒ*+)3^xtT)Hza@E(~+6 yqH`%- vSB[tuDq"Ӹ,^ $1ק0OO -PB|8_#1;oѐ\s.T]/A5̘^m`8CDx @Q_( p By\[qb5g|*_?[/ / ƽ ǟv!?XOߟ늛zN@ЅmFV>$[[^#/3 0]7'}S=0& 5L[;W~:@ |MM\"Bߋ c/[,‰uF9&)^0\Ě-]`njY_oi9i1*9_^6.VE+`,ID)P a[ tCqm*zRjOay~3 jf !ᦹJQ\ϖɟS籺⦞+5(r&Ə y!5PuLwaIX 6̃\]; o a1ė}n(4smS6LoVHd<ĄQFw_5.4nhӎ21{[F/~YOGO5ni3%M (Pnş)4]֯s5cT}}+T^ٱQL$ -Wr%PhX[d7P={T2CX4ړj+sLEd[FqxVm>D,XL'__Crk ٳĩ 69~{0q:–tg6eƼմ`1C79*J'fzXDAW"#9ܙ&WDUbrl}t-|#sfn.qAxQ$%\=!4jӾƛ*G5~G7~AnSlt4-w# K[:Yso 0+hb;:߯emG'm^wh7۽t]j:]Vamf߲^M:qDFЫ9B;P[39,p͠98,I'Vc+)xwyЪط&y7suT50qbHachӭ5N0&?؇Ff{mt0 A/v }5JH66ǻ~qy"}rEwr>?GU6ړt|+ўd{ u`y>a)AT*-*[ː` bQXtU= R8:9c$6*rjv߯&cv39'?5<*QI8XyTSo#Om1Tpf}PuLIKKwLpOET(?V}NoAmHfAg`SETQ1$PLQ uRBS7nڄVl< .#7_6^iE@&?0 #ߛ^m>A,TFϲqM#stRM''49 j )%4J<XT򿝂h#ʖf|qnNWQM\qb$NrxLj;l4ztⷫDܕ(qt/NkP/\f;l_T?G5no'\ҪwCƂP)}avg9?7{ Sŵ3 xTQU@Nju)<ڪ75dC$ /ח-5%8@ x/[or<?!֢T i1,Oz߭' b|q?^`O <>GY=Iˏn'gYh e;qE9 ;Dי1tt}2Zq>xc &T+!+'oN"{@?ٹ,9I,8CZW;HSG;iîh"S{^Riɹ~z!2TH"G4N>QFF!8^Uz-8x[5c ьɣC0R=gX[LIyXJ>&aA8"}NW40WjWh;x LrAHny]`,}D<ܓIwkAOhҦġHV'$wi}t:/u8Ej{1}&8B^￞2X#Lw=6Z4?7#"/~'TQ|P˴}dB+9JD>8k~~arh: *#'4p\4#-=lc/9P2RE&6=!=P!S՚-8\{E>MA{-yvg[$.n7R)KD8ӽ]-[G62[zyA׶R$)x>&z&,ɣf =絔PqU'"PDKyH!𞗟e "oȇV=He'')`!-Qm'`Xnb)f8b,Cb) TYGr)Bt^ˑ=z|X,~M*5D .M~F=wC0[͂oW>SN'RmF:=P]5a=/@W MtHsmBQzs I"Pł/^ߚ,| ~ԷVG1gC;F|aYY+h_:Os lB CU"^s ۲ejғ]o ԏLA}Ιl ϕDFS}0ri F |qQw!d3w }:F dzel+84n6Wguy6+>N`ՔNj`dCdd4vr!`~sCJgcT\JMI&8-zyKM0< h-3/41} !Bȫ+ihj[ꩈݣPZ?%o#5V|/5#U:J$C;.ږC-c[Ki ՒYZyWo;);;pBr-eXϑeF L^^+i4[qUE<`..㵬]A0%X8L LM2Fv'U:%_ +]Ak*_pEJz5s(>;OM| 4WD ϾuEI|#1xOS[`5I/OS3$iHۢ+V˶FBS[0BKLd޼BSqB=xMgrRΚ,Ql5k *W٥yo.Z6=3Sy| `\OIs,E;5K1K  b6v*>m?Kx}W)‘N rߡWޥS"%`0er dIz(4˹'i /u$/k< #CNNDC媇\.E\"x*OSڦE1) lm`L2` E[~O3+β”?}h5~%_o.ڤԏJ|-}9ؔ/OD[\U( C NZtFJpu•`G۫.wni,jۦXLa/I|;{L3*)HFR @WE@jZ5h(~= ZzMdhhT ^t=ղ2 öpdu(V%}{/sw:8DBLA)dlK.ƐW>{mO} j]t Dg#uި&pe_0nŧ;3R;kx;L:~]BiTuu_1zB*KaD`)O f/nϦz6EETq2u gs{2ӤRs4Oa:CQӇғ x}ơ!ĵt`wU5)IS{g"U5mKսfLJЇ_N{w ,@t1~TZ*}o'Q{=>ڲ$2T`B#M?GVcXwH>}lY"]L5j&p4TR^%0+0W)4$pwM>ݶp%m?LG J[j=iڄQ-ݎOs=|W)k_H6PKhg^itH+/#nzzUMf֔SyTp/|{2UmNO>,o ߵ:"JUC$9HPfw8BL033{'X} qײkhLN)[^^t~BPi\z5á-45V 'k9ꬍ\a *Z3oRl Բ2 =|WjjU9t6B3XOk3ita?36}NZtDɱՐ`bR.%eQCT-<1ξ6ě2(TN:ub+Ųm\7rqJy]t_[Y`-I8ҍhؔ( *iيCt%ϰd1WԿzwC fg: #qF\é3y12qY*C';;>XjXB4fH.~Mj SѼ")F=[kj{|'Tfj+Z,@<߂O?}6jT(E>Z),]?*P[co9V NuI N^]Z_jߵٓ4}38iuƠ&PI,b(:zX䷆&L-ߑʈ=*c,͟{G~I{ȯ8vדy}~2~A? LOI"Qnŏ?cR/3/b-m%?/=v6rzEMp1xW^ q(ǘ>$4[2AاapxwX4YvJX0 .qҸgB m8ZY2ʜLSUmH?5ƣ>ӯ2o}1^O tr~J:Lza_ x<0z'M3lk@|Nإ*O~o4`"nm'ZUi*G:͘J4|Y|kG+_;Cǡ?];%d'D9=h.gk[NPd$ԠҀ+6]1= 'CL~fNԬ%*WZ';.`$cHZP_[{&ց~0&cAi>Ew܏D?O'ǃoMqm_HVxcq @*嗓x;0w^v8d8dPv@YQ:c630}D3I4:=9YiE >-}xZ!4L*y?UZa76=W?'M=F'yV&PQMIպQe?%b%&x.;Cͼ',&RB=|F ԰vD+qʫGǂ,/l=+rq0K Kez;#8Bpѩz!?RV>A)fZfiʸe$E.]4{9 "O t?ag%%ϺLnG) v6CY |>z͵kJX[{ULD;#fk`_dƩr`NH-@ ׋.:72S'8us QȐN6w: J%P]l71 K?5e~A%D7?}U^l=O7OOw_=E4cǁb6 $Αhk2$_;<7NSM=cnY9^KeA@]Ț۸f.ᯬ92i ܲ2(|&x.I Gd{ުe(Oe~?a1~~B&-mO&p!}{ЖC^^xs%$m/~W҄3o_d1'-ͧqyw$8ڡOh8tjr2{4E7Wiryw_}Pm ev75ch%)?XM6 }+j9Bh8u TczyB[$z,<ݘϪk#!|l:TuhCsuyAQLW K(Ód פɫ!LL1%YˡqmZ6IFC@ZlXGB0V?5#!D'F1L)-q ;"l=bd0v~'_h?Ǒ;FJ)Pn,(w=9!A@R1lh@rf]8n(e% 3^YRAVٔeu MYoL?U Myc1ݱF_89dVb28JƳMFl Ѐ=T Ț͔䭠a=8_,vTݮC\`Igy EcB7krwen)af[vU"O栉F?5y{,]O*)9g>Yx+^z_e_d=+-f?ߗMGi֣2q;O_${ i֐YM9g}o% &P /K)7;f6("=y|YF8OF^İ#@}V;u2A;! 7Nx(j4hSxNz|& ֵu[ &M5co.WqGmnshqj?p*g3fj>A\͙@=r!ܳg I@5P4A 'GV].tBH'<*=A*imlH>OM |`MSNNxXA+hm5Rl*=_AzNz9,xn/o*S?Ɏho{Pf!MXO5M GIf@#(,M+=jr[9\;6 ,)!6I?elaf9_'bUT.3۳Atx{5d4>|3^BwHdS a)3Bm$t_ًWhzG4_Bɯqռ_Bڇ1澔kE2EGP(E';(Jd+Z!k.ql?ɂSkFj3&9 ؔτHvvJuv *+%f~ ,$&MiiuT$:|^o+eL$`Ogͧ; 6|:3nr^nq$]#xgwcWܞ4a@zoYpmoüZ",E]<>Z?:GΥ)=&A M7_>'m}z ZPÏ@, NZϙԴ;Eys:r) YFO򍙡 LN5Rn84R^/oEB0`!4>}<&a6u,(Dc #jTy1Tm>NoE* FO{ƦhZ~ , N ~qqC̿i!OKf_茄}["T$ V3%'&SW#'鏯Oh2ꗮH/-\I%eUnF FįƗ}.љ7,zrGaZ }aʭSdg^'8AxŇ2TI+ w>m/`hLp?xk?B2xu|w?p&aW|A>wwjLig%g45ⵒv,'i(7WN ;QBKw.=͙Ў ږ7M8ΔVgvRόTחRm—5Pu9C52S7w"וttt)Ma7W>^CȁVl4hܰ~Mb[/?]O^8Pl%r1"yYe5O&WG CH:#DS%MRw'P\9e4{s)ۧѢ taW e~畓yRD\}.XwxjcIp3غʊv})zy*[/ ɞ a'B6DXT%'iάS64ϴxfxXS !ǡuB LEo}3RS޺ړO phs<|5ֵ+YUUP 'o.0|N)ODDOqwHS yfJc\#u'`R3)@Yit6:LoJT7P~)MNT`0+?/|}Sq3SJ12LM}{Y~#%[*3MeU5v2T99.?(h*8>h %B| ~c0?=;ܴ| 3:qR|$u${wHKGKL6'G<{^d2~+Lc c m$.-g t Qeu-aڤD@I,<0;',tMr/dM~ R,8b/4]s8E,a34OǙ' 3 xìs'h$kzerY g&msXRӑ#l[~njI"/~eu6sXW]oiS! ql^ShZO0KJid-Ƣ| n(K'{+o2VdI~US y#!Ȱ0㏬Oh6v1#F$6Asz 4vWy4ܵ%cl)xS1کm$tɥT.4߾BgC?:~t'U'24d~sGmT*`';=NSz GI"j9,M`qE =I=YtXǫ_:~8|H :)> =U6Z|ZJgg`@\?7C䴻֔nudc386&ڏeMApj)it)z˨FRY{Ph>ZI]Y&qU8h9E!(*Wق,0mgm +*XkDH[yGqlE}i}cߣ{)kC@*q* pmig&z]%I>)R9 ' xee|q_T@)6%ya0BR`@/05> Ǎ|EO,X`[T=Ixn:/3yo hY0ۙŁQZ^@IDAT喙dC%T3j-IGNӑVk͋xK“+cʇk)h +>$T,yP{6Si]"R`ts` Yҁ|[ej| Hc8:π ZX4#lyT]| OobMp݅h `PSSEKY/;A򆏴%dXmTכR{!vڪңݿs!@An^O؎CNOC.&=Sr/LJVlx%h7?9+)G. u8H.*c@זg %I/'7AZK)LR)XA=B#kP@O$G^E ]-HaA/n Z&F!~F{ _k30W)4uԀl}qa>MkOC%:OaRS3${0W^U |A-Dcpi)N ApB;RZ'!N |U`~^_@pn>x(I%ʕ4.ŧFk'y"|ug"v|M'O f]Er$V9h4LӑJ*G/\ԁ7 Gz"0J$xfgOO4Z]XN'y5'9 4R  4oNpjfs/ IR\~MY$P|6HEA$DOYh=k{FڎFGQ486E"XX0aC,n3y-&A29m|ev{nbΫ.81 8P`B4 VԍxITܑ/S|yB S^p6iw<2$9Wt8=O֫E}GƠyxLII .TͼV: &V*f!/@DW B9#*AaMS"cid[`,MIcw%~JC.u+>mÛmƺ^*`*hrP m%iRIHBnZg !)> cNG%b/#OHew=\a%C*( 餱Ʃf0|QYxP>#II,49y" ]K6J]tTqˠ!1}%//Wdd)ĂR1%%1ƈZU kLTP栳lft2^63Qb"$RǮ$87H60@ 乸ukFh'b&ŧ+ѧ%b|ybex ͗ŕүqM0Bp7@;'73M2=8.ۆz5hm| BhOkP'u5aii)'ce-~!kTiwSO;>qRb n946l,@iT&ƍչB P|Kiyb޼I(yBc]{-ԍ&=7HeS>r.6y!3`#fOyД&1@E2mj?l/Z,vJe~u3W_yn\j[( nr{?d8LfCó.dAg6ST-KOL3;#uKʬs(r$hV4!^- 'z>8ŋA(3ac||3y4 'I׶T6Fs(1"\!P(On{1C w̚u|>;-i~) i=1A.L9Yp#hD,h`~E$YL; &z,U /_SjPS X!P(H5;u$iF횟̾xgB?[e);$A★'c}DMM"pu~PRݭM~"&EE 0iJE@kxJv)^m;Jp DC+ ֌ w܁eTbx^׿02f͟?\LZé;%c%CfcF &'q"L^|Ʊq>:a&B 勠4M2ů-xelZQ !tR=8ҵ@?3Ϗ]YnY; >6뛛{x+& E1 LҠQA0,PO'?"xXj׆<$Obx!FϔuPlgjW7l39f (ef5-ؼ h|l֬(ҮjM.Ժ_&9  )P`F2Ilw>O&#yMPSSLBK 'jwUݣ_΃h}GvS3lSB6]9Ճ32jx_v:FҴ]1𦓁7erIӇ4ר:5^hn^'zϜ쓣m 3SO͛wףt<&-?d.isi*0DLJ`#X{N%8;+B@OgfPA,l/E"2j$0{݆*x] nw83F7h}?ąo>i{eun]۩]Ξ}tw>q]_u{Q casI7>跾^,WE'i1YMJp`B@!Pt;T5H!9z =Bl<K,X'8-ͻ*%MK6l4wr*߳ЄFu,Xk׳M{qX>glj>X B@!PSwU&@@'Y] ꣻTM~# Du~,6@{̏xXhbyTH;MFTfW-`.p:Q]z-;U( B#ê} fxbz#l绫n}4]]R4@'zpD=%h7@G( @wE@ NݵgU- wm.MJ35WW&Rdʿ5=B@!6(tjBuXX~3ٹizPdwego }05q FB@!Pt-ԵO?}|}5iZ]J:)xFRӵLQ_?Eg BF_4jWN=O#lm/>9~o_Pi5?5ĮѓŔΖWSYU 9ޤbDJKCRYA2Gc'K(P!WQYe 9ԧO|PO3Ҁ@^Ѡ Ҿ?%T$T}d2~u MOM:GW7: -+k'gɓ}-}_̴G,mKe&ZxǨ^>8e=jF\.7-ٸņj) ,g7"::lI[Ғ4m6a$jf]|x*b>tJjɪ.dtidm~N8M)/O]v77,0Fp?U ZQGsj VcDkW!@!m?@NyN|Gq OS$b\4C䏡#OyqYuZ#+2: L^Wh^h V۩_1v/%:h~}FiWN9{# |$g!&M >z'3hSs/@3-O4aFa:̶K'ÊN"SU@`b9-ސ/4*2] d$JdRZo*]n64J Je^X`}kFJ5ʼn h*%Cxg&bQ{{s ty̧ȡw!@{R&` Ma~7a {7+B_o0r /&r7Yh8,n6ok SۍSc>}Oh ·Y[ۍ2y<a<;0!T޼oś;T l1o.H}tmڀB%^x}`N(|l M^Y*#bU?ѯ_M3꺾65N<--5eg_2oIb֬Y5N/`MQ+4dWPN t3\`Pl9` <|:4ŧa&'}{?yU NCbg/1c&+C ˁJP( ܞzBBӧߞw|r`7q8lD OÈh>J{p*& FTU]Ko~`wEq@ 2j>^KN1 _$ (ya&g{[-PDo~K)#u bYNlWi2*MStP$(AudW{:|j?/V6yXa4 v"Yi~k Eo@hBD2Ɏ㻩O4%n_Je5Zi%F-<}I?:{v81JlpdNKO.w\d=%:M},=sȋ=Ce_@;4k~d0'P } Ӄ>պ9 ɯ 7U4udK:M/+~NzAT2o$PeɞiȃX x;q6%ŽpQR3i3LSlV멨?S8[ ^ чxgjyaLOfW+@i?ɼG|/Dt|Qw 3lN]ܶd{RчQCƳߤB~b'YR^%0V!;ג*Jj;uqۂ]7ґC_rNrMwAda s]|WQN ߔ`4z%a)ŦDjr8ݡ>@#'[,o]tͱS Qt <Ѯwt~oG_?AXڎ-)&ciYJbڴ+|3u8|סBZ>JItI( &,!iQD=aev*k誊.jsUŔ|:ڳ2/^, Gi󟉭f(me4?Qhߥ~ze^tnBJ;7g9gXǫ}8pZVUCV-\+pFOo[(w~ ]e4cnxk6wO3e__C=Lj\GC&v# <~q+D P.|W^490 E_n5j9[oq`A=Si#Iw|%ߒVo?|.ZNH J)=%ysߢJVYrK=PJ-ttrLfSS6Ǣ7Ѫw?D_E|A|tտb6h8] _"7 ^ɍ`Nt`XsBC@+&qMԱɹ(hhT||3^W*ؽJ.Ao7zudKK_/*=zҀ3b*UvY|?ʫMGM[],avAO,4?Z ^8@=pe<5zept=/N[k 8ms '> 8 #im20C3.Gn?k}^-p5q4 M#R尙P??I NsyaU{IG&}3/=t4ꕕ&4}g]/|y\(kfb˞c"COc;QՀKFcg~i,X lQip'}dD#%l"@q,}F?xU[sRo. tbO}o$U:TX?ww RlN=^Ayg"M tdoNψU[J;@貗фC'Q\+;~<I\& 7|߁xFS`u`@Z7(գdb31r4}jǫw [WOa}T pyB#/)bZ@ }ib_^oLHUyɤm{ MQ+&:"{hC}7~ܷ'M >[OzVɂY?^ X#\G}sldg3ˏl/Tώsf[I^eCJHM% -/)m~R:8OWTguOb0`)My݈Bބ0HЖ!񉄁 -濽gJjA,Сvn!Dùqm lU~[ lS%" *oW?\CVQ1 "Lq/vG~-=YSovG[I4o\.&ٌq om'/zy<~|kKt2Mv:EIoPѡB);Bo9Γ|Κby*ʫJp N vJTSjrb(~I1(L<)P kf^'U"?ݾ潱2chV;ï]KWLEIY ɣn km`of{V c00a+Tx@e&2Y D>@h Nm5snVT11O5NU ,$A;q vgϾæ>8w7L]R`O+x@AyJa{ fINc߯w9CrYk B oz_s{Nuc굢q Ni@htKvQc6籚ʷ)S)eeJn-5%!-VF& IѺqRb2&pDPAg =絔JPZdIf=VKy#y@jHHvHݩt6mnD {8 r@([H- QW@?\{GՀyLI 4RhՏM*y{yV^L$5d9$bS/0_^Plk3)y- {VԼyv.24ʬ7oɐoJܾ#0l )-&p5#Om J~5]EIϗCiC5;24MHI²r߱S9祊*?|}%Y~D^dmK7?&`*Ld& &o?!&_{=p̧Ey5|IN)h Mh{i'{*x/JoR"81ezy#y}s$뉇c,c+h_sү~U-}|˔ȳvQ!׵/7$:ΑBUE@iPڑZ1#g1L;8p Z,b/%X|2#jX9޸ Gpe)UۄVjH6˓ yk^H_A8dj&;bu:G;ǺNI& !mXJe1)R8yZ͂*Ip8}FrqBZض1м"9ޓk3Fc*`Rmѻp,ܶi~t3w D? &4uU:"f 6cRhꊸ u Ʃ`|-Փ_=WHtO2XB30g8}> afA%St{+ĺ5?ۧח 'Wx M4hC4Xؒi,kp}8Tl<Q4c$]=">b{d k!&i}{{Ȁ~C gS#="P L./G\T/1>b}w~Ao~#ߍk ݽj fx.^|w4ǯ A@zPh0:YHFO/T Ρ/;ODk^)ȁ>(ylȣ!Lp'ф2lwKG܇2#0@j-+#%NƜ@Q0@ ˔Xȧ#hS!jNQ/n-Nz qj[pA1f$Bɱùzi0_ }^mQ\ȕklِC,euO>"ȕ9\&6ӓHz |A1}( FwHNy=:4`omsށ''ff{P@}ĥqZ):]vFI]4Z1zlw'\~O8̾e ũ0q^#ZbS6 Kq>39M;"9)/`܌峠>c̅X k8)RʐkH -%pn?<V@~\ Tiۧ7o.R h4TG^-́C~ӖCZw )P94nXaomӂj=L Hw5NK E"YyQ<}ƥOAœ`JCr,Cqnw6Sk0m?rʩ *nݿwN&茉_-&ĥ<ZCT퀬mt(d'@!͕\j}T ̰֙Ӱ\7/P%&धatJOۯ]*{,Oßq[(O%'1q<4Si f79Po-6x}b^U 3 -9j/vƽ'ĄcR\8 %8oEɗNXpL{~Px_9ĬW\pMa]FeM{LfМ@@iw?Z`ǡfPk9pl4 j2(zqPG{[b)PY 4dB%!SY8v8Ku%?*7~ |$8qMK%H&6A/}@dhԏ7A'Ux$Oҽ_O0ҧ!_ftSWjˏpbRBާ9Yxڴ_28駠 Ki/H J>"O8gTvO@ɉz ʎ¹!:y.L9q߲!,j G$qdBbVp]&s 8p>{m<|wTw|a<8xRL yNj@,bXCЩ029W׋IdM %r qdhÞm3?c ̛:F8Gorڧp$Iku Ͱ'jz_diCvᜳ''C4\E(yfJMBwrМ  gsEt1؎LhL狠10B̢U**|!$^RU!o-?' s3\Q -Eg rq]㪿Zk},k̅ZD({j.B2v*!>}82f֣Y߃H64YHLPJh $S\/˘y:]$qOmES 3C))ZGϚ)FS`,(n?yb\tJ=bΥk4MqU+|)V3/C߻ Z,-#͟ W,{A24Q(Y ^<5Z6kz3s Fт&I?${\"-9^hFoFgw9SgPNI2g;NN:E:i:&xSSˬ5=TP1u.O Kp]èy2#7] i'Bbs茩OAl/SA?wdf3Rr/NtL;>pW^ eWdkn 0CLP2#SA'Yko+5nrZrz}dum4]Ȅ0w=>9[&G$Ĩ%w{8HN0q SQ]./h-.,jXDj֤p)x>zFMUYN8עExKlEE!AˆyiT~7̞ 1͠EkZ@IDAT͠B鉀%܅h'Qih6AjB8shb"\Z2M4[D ,rbCQAruð&] ͊_; Tl\ d6!%z$^3N>pk(x2Yї):'[K#j7t>e?A7@w -U  [OdS8 a`vALֺ{]:[1̷Nd6][zG:Hx/>x}F"j/敮;O!RWj(%Ŋu^qbM*cL?|4 $ʫfP83Ɖ< #:WTõ2GV\:o26NPyJ>М)P{Ri58|QOW' ;4hiЀV逼I0&gЫ@*6wF 2\%"707D| AG S~_?zc'4w w}dg:v?~@fTwh<|^ 2WM Bq0y/;?r=:ď4 |w&oAs0Jnn՞j0e`ЈgBή'aeD||#_:Q߀ k 6U D%ρK~=ju *Ч?| ~R;3IؤmXqnATc@L^|bׯW-)e)j ?ۮ^,.]St{'M<>C \HzQW&d' h0@ZZ'PbO T'db2w3gʠJsӼ)%fRO9:EQ Zdj>*Lhb\{K',䕧gA%,WuiBr4Ꜳ! 4gtaKXK=.3 $nR^'vxuQ( WKp͑wvاNy #%f"`~RΉN"SM ;jn7 \ǝe;.IQh:] Y[~Y+q:u+/(3$kIB.Ti(zXڈ&y=5%!xa,1 G)LYdjK1Q#%3ʇ̮ܳku*sRE[Κ@#整na3@He L*A$%Nfר` <"[[\IZ:yu~!Y3M:irۄnOڻ[3a;uQq:FNJ&Yc#"$6뮤JÆp3臄)yop#m UYeSoBOvRS`Ղs{ a^s&PbgjŸ Fwx%<~lX6|7˂:8ʩ[w^ GOOE4H>uäjqA҄ RpV_Kx![>2Z1a-q(ֆMʫXg3{r37cOz{?teS?@ڈ^͛gEߏ |B/15M6hO<=puK`Աm?l= kzU¼C zpAgb4kqR dA?P_ ʴlvIydblDfSDB")0KSw9$}p)-މK!Up-Vd3xODq=$Yl$#--ϓ[O7֠)9b K”%I[2z8'&.<ur gN^'Dt 8[J z|d&&2#MSƌvizlu#þ5Mz$?+$4ҡi@Gz>)(۟:Z[[Dǽ&NdDy NHNh V7mmmE $`\&X70WBZCy C4QqG 4`QaV4arO”%@a1=u>RSYu|r$"yJ]wA:Fb$uy 5D!)z(lihYc9I8u Gb8rZ8py+.?D\tΣSq^46 SsņVE3*Az=n4"i2MjNQ$% J3Xp!"U 8g324Ь@sG$Lj3$HLjp06aKa>DMnni.H9v0Tֵ7yMAy5\m+TO4^Ym{`y`Z[ds} W_Uw0 j (>vث$$;N*`\Xp Do*+f8~AW0#ld'^qyiD(`hA-Zv 4hr*婼j&Dy}UVF.yĵˍvYuvi&N] *v>7\n|js?I hW8r`^~A < N-kW?)Ž3Jh~V~"(NHK ZpXҬi41[-8/|3 :μ&Pu;O$uB)Q@#L$0!>Nm4yiK48A!OS?[!]6D{]dl%E^;JSpjȩ]7:6#І}6=.’a4qfSr Knzqr/̙;B\T)*"LLTKZ! Nv0C_?NDpB[E8eʫaޓU izBb |/ JV:$NؑIxXaWFrR:\7@bٮ,:bTx쌒 u6p_x5JDD(Qj h ” Fl3OE8bO s:dG#'lc;6[uH<ȱf0R9 y 7ZN!$Ӷĩ $|;/TMIk0}H8[Z :3'Sӻ\z<Ufx~faWYIH$w>j4[C"1[ДHu8G̍bd uwga5Gk+ zA"-a$Q$:7_6i($zwy?<)q\aJ~c@f(l?g 4ȑ|(S<͍-wn{[yn7hgdQG_lv8 4f3pD6jVNDeoS6:CD"4V' ֩i"<}K/±;LI$y1 zY!SY@(4WrJ&1ff ~oX9Whj8QJ‰i P%s&ǖ⤶s#b?s1 o p2<~T#Q+W>^f}R *_L+Q}hާ.+}G)35]hTF;Q߾j8^oq /O2a$uT7~t)ݚXh$>an˱=;N$$z[L:Z2d6Ӷ:Ɍ"gH?dLbHzq"|HD!dYBgq@+G#=](9c%y7Ӑʩ5&BrJw7N. r#G' : N 'cπ D򚼒 aG8g"_($HAhENan@[y {_I#xƸ1Ȉ[ mh4,k!:9 נKx r/VMwЫ۽Ch`ڸCz?.|&PY5i1lH_Hq텪\H9P'x/ h':I3;_(M;qM"L(M"Nb}ɹ$L2@jF"dGS?/<$$ 7Dur9{lH7;_)"rL͞mhr.Eo|QFW-*LO@cqba=%FF{O 3'lJiR<d\/~2킒j&N6 ?]2{]kl?@]UWOB(>\;zr^@ q#r1rnNAQ&N3kqI4>pkUمW{iG:gUL+OsL求EW\f,Îw~g#:ʨkܢ*ޤ$7e*g{|@eTy_d8j4-bk,ޱo[ZԂڻmM'$N*H7n=q_)=Og3vy|r0Q M0(rLwܰ\nDky}sfs/S^865~{dF./I&: />WkQiƑOD{PFAerO^m5CbՏ?uLR"zX1gTk2TI]4i4Q~&Smgll1s艓&GF jV0F>ѓr;VLNHDDH=ɂ?auDb9E" ,Nimĩ3љ4vgWȩOk?r:Q4W'F (.ZkVqcJܵND'/Jjy2eeBĂ|&>-{QnږнID$4MDj۶L73=H,O+XN[Nչbl.70D8CBsukx% U?8q}zdǚFr%rqAF>ZjVJ^"DmGI&F(Mq"M(6aEb9H8,=&N~$U!vB?b̦m^3rtaJhb\6ѓК:NOg`OmRqZ'\S@`#MhPC<y6$DwԶԎDMĩm6ߓK|F;˩\mlr*SGD_|qj_NCG/eERÊC9,v'~7EJr uBݷCJl"Mq#:`"PyڎlC!%3 RG[vNi[Sw$}0iǁ@VRɅڐ"GD=嚶\ hrꄂ%?eXkEa8Cqok|ps z߾J9nldW*TzAd9u6weZ1Ƶ!u%yniGI& 6TOmGI$O@}yޙ;Ӛ4/唉S Xv@J+!+`4ne'S> -qPdc4pĚIH8 c2[væY`2b  ϡL?@By C8,yn(ez HRM Nɭ/Գr.F`F `S`_jOQ޴? >zM{tZ&23:㢠'bIA.?t-jCf7}tSG4r4 -[ a9fӤh˩wF`C5Nf^טHӉ3%ɶ40iL"͍s 3LBDv2y0m+<&z,K#0@!)̫SgdV& HCQೝǡZfZmB7XАǿyZ*Y&Z`s5ബ/tpjʥ0#0i+kÖCADGyh҈~Ld4bЈ]G@K #| Sʈ4cxĵ<}StZr:xF`@S`ӀjIVv9-/' ( 砩MdɊA;, 3D٬֗~0YNuJN\#0#80q rE 2WE [Z-@L`XujĶy=`sKZ^~㝃sthuthjɥ2#0h'k TLnK4q8#EnGDzAl^Wkv…VW_ ׁ f9SǤ K MF`?BÑQc EUmk1U{RЪ07W͇i rcF`|k|EOlTt߷VAtxx$mgJ*`#fD1{K8 ,,3 jbM}oY[,vUofObN)]Ź@M}+KLn|]c!1#|- eA,V/ɍRτ ]OÖW,İjUϿHzW/ȀIiɀodi3N6MɎa¨D()];fLC[9p#0@")`γK(:e.#8QlZRZ0.T _1vd"\:wrĩ2|zq6uiFjof>!_ؑpՒiӺ͆SNaHBޙup$Ԩ]HvjDy8UpҒ`laIeD6.b64[IGbLe3'M_CN2|F`F 0`QHLG Z&YSЃ iI0}(A ըu9~eXzt *Ejq+(6ö9.bZ;">[ s2hQ'.HEe5j2%3Dg[ ŵL[HnX9Rcp.#<+ڐj?@%}VM"gJک,EaUmq#vghLq<* HVLCxS9*pь#0@#)puJ{6g| K/4a$9@ӥdauGF$DǠZ }twvTcQsMpPZQ'Q#DL$ M$RgvI|_'(O R2-PF%ŊPH[LUVORljKt.ToAVc~"$3H}ɩ/`F`&NނWU!p$S'Er?U(|DΔ _8s8Qeq\a!5:Q iH)qK3F? Hr510o#&z c k7 ~K:gq퓙&lD]7,7ܩ;9:F`@BS VՕF$ۙ)&%JG%Y 62j*D&x.TtwFICELsp '"zs hA8?)2TF?xQ> /&LD{{f΁o_Hs"_'ŽC##0#G0qPȟ:H׋|h}Nb~dAӽ%38ώqY2Dfm9 af4M#`)FOKd]}=\reRIy(GwST=R"3=:nh44ѴRSn>ԡx"T:;ٟ Ɓ0#x'oB@>zG:gCsDn\<{~lz @ o=7l)WTDgLgN/+#M4qDDg|F`F ` NSu4@4yޣ& uZ׈}2SJ8 ϜsH @Q81#0#L-I$Ҧ6&KφcGA\l Ɖ#0#0@! G'b|@^8'F`F`FSp/?#0#0# l7 r#PDh8[ZO 橹6*P( G1k8zKlP84 zJo9[m|HMySmG+B8!}hk̈́/v"cl*D9/oo\9}+8)Pz] @@=7XZv-MvIeDجڛFID<~O^;'eɒq?<$۹jZJ1jhjF0DGOAƸT*DyA kXtH(GMu:Kg tVW |qeFvJ4vB Kaʘ:6:51.TzU΍N&mW-fF9\ x%źC/8 db(d^Te]XN%B}ͷH~P-px‰" __Նi~}Q"$M<ÿj˩'J8^uԹ?de'y;41 ') >QT'-9H+fuƏHlHCjv~"eXXL\Oޛ4Oeђj% 1MRLn@ܪA&+=% :xWRnTq{{vo$ Z{sw#]K͑2$F25整RXNun4M0*""X4[Q0 ͷ&bZ:իo ǻsl0ԊW"Ue0K˩n x{,B'/J K% 6 z-ػ!?] ?q,c'ARl$jG"P;Ee0 WwY&Pp8Kv m? ?i%|%h.l6(WP%IHQIYZ,q"Mkay+aׄNͯǀy?rAqCbICU(P!|u3.[qCJF&kmٹm*B9"ua?*UJ[2}?;:FЯSPq2_ }BRV98D_iD? aZ'e{Gr66z|>JW׉mHs"!Jty޽i; O S)YOcq=X?p!9 IY I80 e~tr,v@u Iip5ȾNzSfOHí=L1ϸp(1R'&O"AoԳ}g]!=߅glLpiVtBR!0v@P&\gCv$+2:e2g9΋ Gz#!ub !;`ZSgۏbpsO]-s~QupN]NJF (qHK3>s$$1돜ꩳ0R-2I-_k OQhS_  QSjIi ]HGwGG]E}O # U^I@\DE$3u5a0==}0p UV]Ww%Io0 XD_i7t5 ޒYN޶,ǐKSs`kջZd"MY'N(pP0fW||1S,s8iP¼-]t-ZS.y]ϭP*E d\_< 4$,}Y?}g? ?V? #ڠ&{F+u7a@gLenj[ h qPY="7}eNss%6Z垥?rJ!8LS;:\2l6PDDό&88։ HDؐ։h"IdI#PziglYN{ƆڄH1$4Ш> MC͞f$s2P QA_si@IDAT::F8߂fx@e{/И HLe%\Қ,wd!(4Z?BH>HGڷ7.y Mq Q?zVeGU> J~c>8"F´KC+Gu| 1) !wSR^"O8gTfh(?%'F%+; ߇0}G1“ӻڊc0 YtQ? p|.y@0q=K~ꀑ? $H+5gL",Z""\Ȅ/f*([K\@^(.h>:V-%BKDׂ1q i,0:VwtD={!Ny\͟bx|g""u@| (5>YX]qhqcM_ж(@9Jjy3@%IhrZS_t]aHr*!>} N)okk==](K, QFEH{vCRqz9]?`9VASSHB y%I:@p{ !\,Bs7}N 6AI#YNBP } J S Kpa<ݮ4Ff!8|ɝ6>I:٣GX$L&?$gNyozO8a>Fs/LW -UR7J5ĥ.y2qTX8v.:A&x11L҇pܑ&4nrZrzWI=HӕyL s׼ac kîxo8Q0|Qr'Lr_SJ*q GrMhkF PLUa_*62~;$^3N>pkXї):'[K#X<t fO-TEf,n0f(:jse>}"7 U2\rji'O#: x;jϖbx!Ξ9[h @1,i&kdsS6_ ١x9 Gu$GAܲ!b| !mE>RS)&('r]p|?hPE>Qv&nހUq_v#x.'8mL)h%3;;Ħ.>GnjnKp5q)c )ToER}滮* d3nE{Rhua9Qvըjv#qᣌ#0#0 P4 ka䝿Q?d޹JD}o`o!$<NyK Nh?DA (UtH~HO(}yHֽ0?d|su]U"50en} .(]щ7`+_ 9i=w$GG_|  '*o`l#^@TW0ic`n_ATG/oA 5h螢MZkІc0b 2{&NYF`F{efڤΤ"u(ܫҷiޔs3PKveMEdG989*䕧gq(Ade b1D纮m#:EL\fvMoi;;goNyJ8حN9=]bn&.M w)JZd)= &ߥ0Vs<ܶv@@Ύ'@css5ww$EqHDtNC֖_AJhh\N!;*F`FB]}KV25Bަ.ɽ 7ZGѮ]sPC;(#"+ij/!ij?q,HȘ4mq/b`F`F`B&N!#0@ { bY[ZP=!`Vj6l]87YZ><šs vӐ˜9F`PCKljmքw9j 6iٶ1?.[Q|EgLH S#0#榋&MU) #ini.e,fQavɧ=Ap$<$a-0# 4,9qCQ1fJG³$?GKCAytGYInµurACB8|t))`F7$CmֺI!i7r\@8ڭ;bvw=tpgmLn@!‘{zZ;.!)L+9`F;G6|l]`L $I ?±x]HwleWTZGC~#x/wjEL#0"@˲dm/mMuF[?Wp#֚>YO"q&9yK^Z\uܴnuz'F,:z uH*O5 Vc]B~:t~\6 h4jP(;(( -SԁZJzO+ &tX["dnN#4 'c{vIH\f^W;VoRAƋ%Mœھג< l. {6~/26M}[) Ӿ%Ms Eu5CRV8%9A|>+~4`TC1̩΅BYC#ڹ)<,E:ʋlkOY"PJ Tgp_( &HRxC&sA0 ]h KεrPvRha<0AّOQFi1lp(`ڬkkю%ە#J &2#MSoxOIƤ-a xWܦQowЭWƫ,>/#&2#Mәo˪Cu>?حi 6ς7XS)Cױdy'B=_ĩglM`lm,lZ{S-m50.ȓZzϛr΂نM+zGH%h,Cg@Hb^x?Ju4iq~:Wg2,⚥IN@f4*tvK9*5fM&EEG;mb{KKS=F8ִ3qB@L=둝>kB;>Thru rrjqh&G&rZ t܌m@K@*}GmM$Ty)C=^ĩGhuFNҴCDD. MWú 10}(Ħ{ iAFR|+iJ`[oo_U/FȖ$LT۱e[l1s艓&GF ju-HKyzMDj6_lhil):{df1ޛ'SGm!EKfzS`7O>k E2 n94D69崲t鼬|XzwJ5~W8 6~\E{{naG&NGpn4RQ` }<4MМ|r~=s8p?'Q|"KѓJffd§W>.DV(̇K?{9/m 3*I"Mڶm"Sҿ40<3';Nq܁6-P❋5vjPHA%;s]c4؈iyd^FjL§k(miVXl; /&EXp/|ƠU~sˡh>&NCIO6*l=%q 5GQPòYAF<غ#QuGQm3[!ЫEQDώJ5QT@T,OO}" ): I/dw$,invs]fvf9\t,)'G8yH.w6tK8]{#!q%MȲDB.3qNx9B.b1x⧽JWS!DwCĉ'eaG K"O*UsYgRfܙX+Ex=>|Xܮ4-Ͽӱ74'gfe[ihArtC7sYУs$h48 m]m;gi>0nO 1Pȓu&BIyk&刷/>@ e2JG%>JRJ6]ϏK5Ck_妜vZL;.-OCpbP'yX}*CKwQ ZӄMtI&N (*c  tYe`ˉ(Ǔ0d4$'A} ]&+gH[:ZL0d r6:<0 w֒B9eF7 ν w&WTLx4q}_rH sR K_,byt$x}i O㯰 ܷ K#OPg8 D VS)Do٧~q .GF3N^b=i ʓ辷F*ń9 $AJ_˾ P_KIU-[.8PA[pUԆ@/_DqKW'wNsi{o0a Zz^ĵg4VWH {5v&p֚ĩ:"3?).Ptt4:0t EU52|OA4W@G+@3_a`*]}S-4L&*S)^ûDL9x_Sښ~ uH=9ѥoaG+e ~`ФB~.8m}3@ڡ/4i~h%SG2eTMa1AV:;bmŞ0O\=_AOtm` 1|>&3S[zu"wv{33iL!@q 8 QWznnIH6@ڏN:Z8 mIO(Svc"'HH-6&o#NT}'@k*'+D|H8Cn_1F F| sEn}"ZʋR!?%AqPe$8|CǖmjrM,nj rlrͳ&4Rnp'uYCuXcU&v³eˆo r eS t =E½۔S:ү(&\nR0sq*8od7rV$l)&}w:qN03ejti./ص^p]wxP|0yo&nqhn>Nsno{Akʮ{Wv0/1\qFp1UQU#M;q+\s[Η]Tb]K*E m]\M0qRxEub3u4:,-fgVe2|Bxq䈐Y!tDK!:8.hK,ܫ$A)m 7mC i_og̶ײ@ІGXQЬ~Щ`ۡ@Joht91ro[ )oH lGݹ?,fc'1d37bZ \pL|D0@xbAO,4,5O}qyC۫fz&F`Saߪj&Mי e}PvIm7 r©gX/:q?ѝ/O^-[^)w4^DhjZ+#7E(:$,w±ߗAFݵ}'wAhQ8N}z#پ{3t}Ku]>:C ɛg)o*d́ chp3]F`ZVͭFwUׅEBL\$4臟ң ϐm= 0ϔU 0 μ54_"+Hjn\](:a{/9/D lZtyf %!|zERO TFзI68L 2=eL2op}SOHs Aǽ 'v-B.=q{LBP08E(+J'W l| R:# ȤHXAnNCEi&Vgs?JJ( ?z3w29"d!o #i _`FS-jenh{;oh멽zfzVn!h(i5j(A7J {'r1!@u`<}Df¬P_ANm'q>M"G57T*6CWTȟ?u]HrsCh2q,E>q$IPyI:dX'_>+:k!NQXF!/!;ӠE8mR#\󼇰k+j8L=i?Aַm0$JPAPk G^d=FS8e4)G#ߞ؜GpʏBU5ڋ_^k.\̅Q< >ȜYψxk%{Nji|WLePAPi < =j;FlN,FAC-P^6;r$kӐѲukmkm%$vnB0;3$ozz%qZ/F|`\7B-Q[TyF{ rThǰMAY%5!{@nsTQɻs,$=6Y^DQZ&t.ֲ0fBңUUsB(Z͎$&Vsp$ Q>6H wmO߂[{F&s߯t n;<82eE@8TUHxS]X5ETTs?^pkH y\^g^ ._)-P߸vIڳn 8ݐ"@\{-fS{*~h?zxz < <d㚪c;̝ܻ-^9tޡ,#Z^f[C@̔_z+ K /Az5m gV<\tͻroΨd@"a͔eych׋v^O>z{=}UBk&+ a#d6\{QY^po޾`).ߢqS\,\_9a S5@DW#_3.dDo(?5Ph AA ^^z6k~)'~Ż1."LwfԒq:=՜qJZq{3i~HnƏSb*U_AP&c^u!H֊|fƋdq]KGf|XoT H̅ !!K+P -m$4Tg~Xܡp<U8vs0VYCjzw FDgXQvi:$Vb$MTY q2Z !ܑBmoDӂFT/Lx4T+k"9g({D:"Y9"MT$Y4Y6]IcIF>d^[G]cF!(&Ǎraivs\R#z-N@q =M h 9~?&0NL{@'_ t: &-&/`Zv)]4N;N{n!j[u[iin\SX/$bA<+~M-UC5Oy]&?vͫ<8y^5Jc3 =MzD@NA7j{v#hM;KHn@F1!i"\ _™fi<ulHg %K4KӿY^\$|Q huOF2}137:k^A'&Nn5QLqN ^#2*$,6yrL+ZZDydi њ [Xz$< W—]OqPl N[t''i"<48u~aMA^mdHZjٷ%R䚷n潌V&y~IQIxkƍ\ CC5"7(kZy 0[{\P@g[)>լ}| F4Qqw/ * XD_UتvɁiCNsYW9!U'**L`BS >bH- m` ?-Š=(Z Fi tsD .y&~~#Zaɑ4\Sg z0 sX@b-5toD[b8Pw[cю]ܧ/X#x E!h=YHMj↝Mʡ %ȕL h.ʵsZL/IׅE: )S`ʊQA$tzٲD) 1yB< W—pfi:*pPrj`;wՉ2Nhy wu!C0(vRIݟw-&NѩtƖ,VKYLb K"M$r|גNWf X,Tn\)i[?UusW^ B!j+4dKܳ QT=GYMf|iMlmu8=}TF̲ǩ+Z/@E@\Fb2o_455a8S&N-ߧ5;8Qt TVI^e.(B\Z***"eWg]Eh)EȚJ( Ex#Ը.&† $٬MۿT2L#7]t^C@+c]Įy Ek0qr^-7/.8J  QVSP zʑ8W':YL\EHTeG ]6UF# 2:'JGxowS19c<SMonzfūtwg uPvzm'p440qr8M.єQvx gTHMs`I8dBw=8UR+ tHHXhH&8sF(Uhhh}(~ &Mo+-M#~Ν81\Q#qh\ǩĚAHŸɽ%o=4#G*Ʌ3.BɨG~FfBdo-I<29@==BTJ=WԴ51ARM/"OjĊ0ϑP)5ZQ?'ONPƁب5 /gmnSq1x,OܝWʿh0c@%qCۗ[/d650Vjzz|o@(FE`"ר+Ql_ƘݯZN( "@nidUEɶ*c0RDG%6J嚣u2,JVko˧1ʘ񎮴i8 c؄ǩ'%DG)|d<&NkʄT8H;wrGZ-Q87<~#xiv *c_u*7R6%W쿒S)aݖRD]\us{;6?N{c')5|djE`źn'&'6-Mqͫmo&<;-I_6t#P?UW.`di &NӝEeN&[,/7Z7寛-Ewѳhhy4bçsO>Z, cYqYIqc)G%?!A%"JUH= 䦇aapqZ9ZFJRO.^J?k Z/?PPPcR]yIɽW?kE5_LHʁXo~pt.=,ӪwF#P85)?gou"DI LV}\ǓDĂUSϖg O|w<56oJDDĉ~Ȕ8eǩ{Swa}nf/HA1.p¯k`ޚL߿ZgD}{3~إ3rJv\z`?mϾ >g<&NՃĚ*d~J OxT,RUJɃZdQBulԏ,Ndi"&TQ GFz^~/jɿw0C+R}T=?ղͭg_͔2^WHZ{q_'ΰ/G!MJɲDBo3MJAI:O!O4a pQHQR?q 7'cU>.ʸ$=645/d]q5o7gzHQg<3֞ &NҲה MBiBJl"M#D{_*oW{3Վ _m0qrI)}Y)S:WS%&JDF2=%J>DHrs &T&<7LW>p&Da~]HLu![3Lܻwi"MS:*n30)Iq)hM&l&,w$J?*I!Pg~Ӟ?S;ҸS*o@IDATes7 B"~wWktuE%SقWZamߥ^zxzߍu!{3Յ _o0qrIb D "J+B#jBA#0!矫m9JU%>9j(͉І^j֣u{]QP}>thLq{e7%=LxW #T!ĉ#0#8O%X=$C,Z=B*R ax-XG(lT!YhU `KoJ2\bH;AR%Fv%JrlL2@M8Ą0#0HLT鰸?Xђq]ΨRsIXeF_ǠRSDRpSC*JPa2+L&rկ_PF,2 ,R8mӢUڍWUک/ٙ8RQHSVKإÇg>Cox1}NIoB?k,#P;LjDž2#0 iY8/ò QA!<ChQ T~Q)d"Wfu9[XzE 9 #_ѠkӫlYҠ45/ezӉ4%˟hiz!JY+_ݠo*\AZ YNA}2< h\ W~n ￁ ֚٪5e{#`'{4`F`#KڋF/XH/}=a`NV~)8-;{ (1Yo+VM9ţ$4 V Crrj r刾>Bc/k5``b &:cIY[4&eV$y' ˆ ޘ=fN(Z.qݑ&,@[FS[}n;#0#@7i2tڞz[t;B੻&Q:[&1޽}?82Fw"/;‚N)" : FjD0 Av^1dTZv3Yak M$J?KU fшmVY*l`1#0KZDJ5u[4@#"ʰZT .i24"pe1g&) _觰t1̃P bA:Bpe]Iс? Gd GNg;l>9ut[U3Տ`''E3#0@BJosƁW{5EdN&p%=M?Je%eјŒ2{$AL=#Ұf;GNL=Z5$3w j4*CK ī*IؾX 1:ڻhfs9;#ԁ:ˌ#0@kA`ZpZq'Ҡk$1&S+Zƒ PBw3"q/#TVQ2@"EeqߪDUCT>^:T1"=(2r (v7U2uo%US\gdZ`T+,|`F|]]LOѥKץwJ*$9B 99/˸!b6zeiP0[ ƿ{R,ɰ.nZw8t@[8蝣0#0 D aq&$C4<0qd_x[4.ϤB|!qƒ]%]a\'Wu1#0F6aňj&D>w5mުA=F`h|WduDhhO!F`ƹF`GH)P?~Ű¼SQjF&tDIxwʼsFhL?0#2i:_({&njler.r+e(/S p7[t[)0'Pv࠱$pI (UuVGi3[N$آ%-7NEU !uFzx$&~CKW8e4_F6< =;q/{q@C[4tZSN =8XP .t!HF~_nrM.:A*A1WTwc8E:R8fN-D$8닶r:mIVzϤIS[uǶृ} ¹'W[C+ٖS]E%]b~;8[ HZDE7K2|Qm$(TPlQ-Uc+^x6M0'[!`7Mm^ slKGA@s'hH*25S| i_g8S:_]bFqQ#)Y*pTS-SI=.Ek 4G@'yLdYz9K?)Mi!S~h&X|}H`&z47oɴuTK@Ulƃ ޸"VtɠӁVF$zȵLl&cY-IEsr aهx;_M|:*N@vB\!@g)AP- *ϑFڮՈcJBO$l06\ =O??V6&LLl}rN\4NTYR_Z@S;qA,Jx%JRF)xޛ:~hIpI⡭` 1KgA$ԋS=C^h(ZoMݿgGnz26Aڇ`(% Itz.$HlB^Q5S.8B3qr̲}7F}bfN,^l+ s *)MzB-H{Djw$tOJLUB9X`dF8U7cU*e"Mm5l"KNWyz惕?VflH`8m D.+MJq@<1ILW솈z7::r:#޽5uvzӁSI.]тKoIC.rt+uI^gPQ7 _Ru 5|;ۃ@ ֣c1D!ZBUB?o=x׳y4`E&e0V&3}K{^V^aRYt.ڵ+>{q{~IL<W%<454#?wMI>՘={4w<*ǩ^:K45-tB5Le Kѝ{Ĩ4 #(| *ܭMt˶PтC17Y-I>UFG6\XDkTȘ*f\{dRYATNJY[DȲ5z`O8~6Kgɫß.QΑ)"} v k'da&8wiZvX *Oz;OQǧz-85M״&FnV}v'NzbIݫbi)"hM5 H­mƇx6 BUn-Ho/n7'meގOM\8XfZw܉Bp.-7aNk!ϛ 9@L ?Z_|螺pCa/C^%;?{:#9 7x _3'E9P@7VqF>N8D5OUmX .{M6N)zh:<R`'̓-rB+p-J\Hvi=JηZ%ոcZ@,/f*dg=Z=8 uTa$Fw^J$09S1ATg3ra?N`os w㰫iSPk5"W=z#M8 &XWēi]x􈣕p ySۯyE%ֽ!/c;« hhjo1,V+PPlOYb,wlq9Z=zjC%Kp7;u Mz_8*OʇB*Iq#6phjS;ቛ8u .jԩJ &`F 91?e{cz:$$Br ˟`ޣ0@5?A܋fܐ^rGd?~}RIGF9Y>w ׏Da:GÍ //ǐ͐_o ];V wl;D*hmM~4*\si?d?z .ƿ`ox v?Ns_;4##M, ^Qr J7.g|b?g17" Wk, zMi<]5i>ówQ<@Jwϒͮ{#>V4'DVD "Mo Ah8mh X&x^E_!Vr'۾(4~ĴCCl}u])cg3gذ3 ~1~Z 19%Sto"Y)1V yyq?-RzX_ɧ`Ouz@Typ3PmXWwvede+ S C* vQPHiHiθ9x6v!&\3t|E\%LX:\CQQ܋f%ܴj?<8-qi8]ceN.Y][Fʓ


U-@$>-G$#u/w#aهd(ZmZ5qd?X`Wdrđ@K>nIY7mdDrە`h+O/o%.ܶ=;q. # A 4q.+O GFOh$]Do[5?;U4x]mAx9=~{{{{YTK@,+ *gg䴀 .-N.E*EimJjmmse{x:me2)pޯh8uϑJZjmڥ꤉+3YX<&H*xѪBnkp!׵X^Кc/qaY O]$KLDX Z'B)AZ?sDžX(C,EKZv2w,Z '˴kGV^12ikEQ&j٦ze5ViCϙIBBtM4zٰlOd/PuӛWf9}&4M Tl:AA 뮛] Ʀ $@Fj79j8qĉ@w k1pݡI$"j:G +hkrT.C'dSSpvt? rr9[v LnuD4*L93(Z(@Yzʼn6= 8Q wnd]`+ ӯ|-w'{׍uM’BA }RF6M` mr$1\/'P_rO]v;0P!ANliв51qjY]{,GĿmu3YdReo acSCNO-a!½mhE5 #g5";DFݹ?,f(>F\1-.O8& D$A:p1@_J:X~q-S1 ?<~^Zx5t" Vnَdqp8Wr"5c\|egӘZgt؁m#ѵpo**$%}b7 3^Lv PLF }/YH(:+$,Z#ͦ]pͨ~ЯGu]rju ?!jBK|\\j`UY1-af,] ҶmDkjcؔ ԃ5# jC2i nϑBkq;_KǷq Cװwgvd=^VW=1Xhrɹoru2Huj֟Sy*3Dus'ʅ ns߄ѻ!篚{d;L>v~>, wh̥ c`u"i"pjϿ!!1qw}6FeY z\6sf^XoꞸ 4P:GnlPMhZ+r{g=Jz$êE߳#^< A"y5‚,|yztM@}RVv%T6aX'^EOG 5N^(8j/YkϫBSr(a< `럇w>BqijŏZ8G2V-aEy <ư$?ZIZO7w.U7|4\b ú ik^rpIA S)pbL|&tUZR('Hd5I)-nA֧]?^y8e|lW֦ d-*4wdT#YyGnIܸ|b ~_?,uFH;ď_F8Bpd"$f bo.O\u H w !sf+$o%_0o*9Pa!ퟯOkZrK/pR6?Y/񺗅V3 ΚS7hOTtz|nܷ7uؓ?zWмys Hspvε[%zI"<濞2gAy&YAe&yw^)JOڇ]ѺG{|BuLw?O.hֶW}v? >R 1ܷǡ t43<"[AbJzp]9<]X1N:;/!kpUO9z^g9E6<>u)+^z=tNta0~[韽 HxFdmf}N|v0 μ5?y*}Hj"nF@ƗBPǯ Cm$ N^ hr>="': | woI68Lk80tY}}SO(= `^#_!YwBx+@qqrb~Ƨ@ϑ;|HMa3 ,f,5MNp}:D a!d$aA+#a:ӿHN)a h$f v5_T~vkm~>/}V^?Nͧ}~m 5"@"Z_H/V1w]-ɺ }(o 0'xWgynj} fͥ>q; ðֻ"ٺn׎_aKZw1Pݴ#^\0y rzƽWj{ v꼅KY;'w7֩MYÐqׁK o:0>"_38ls8JD.|D|2P~VmǯC R2q*9dž@ڏz\Tv֨ x5`--ӯϕ(NSNl k{A Gj;N_$F0Zh߰$\/Hv!~rjDI9.#GL)-gv0Q}u/?cڼE,_'U2qj.,'PAPk -EW1j")lIf$)HlΣ{`8X\t*Zϯpu/5ʥՅgdG7_-Uu(ʮ|G%rA=@yj?"{\kS;G%z+ӟY9N0e#+$z"dTT Nզ^t8[4OE_M&a?jD$z>tH|˨y 9h^Y L !^W4Yy4YlI[C4lcKU,GY wE&DBlz ܜ%t=;Aab[μz9Dy"0p9;3ke:EyW8Er;FDO;+++?׹Fm0XɯF\|XcJyO&yS;q+G.jJ>JۘjIL` #'{tt(A"K2~Y&Ou׾Fbzw&ۮXǩvI; `J\:^㫖^&*ܡlk$k'- m]"OZCtr1UE@vŕ-(X›*g5kD[[OZ A:Stpo2Mڞ)A،x߀thl܂Sڴ't:y DP6"' DpѸЂG,Z}>5h#c`%t: m^< cUP0 H([L$׷h~;򎆎|Tu|̔_NrM u>(y?҇Ʋ[}L Of*20bH/ ^1>6ok&DO;B}Ьq/&{F}x3_FH*' 3 󦾢yH'@sq@m+}c5 ܁x~'o kvY(hD :ݢBtvzB$i0C |gANW t6/ AҿJ]H|h3fh\otz旎q댴IhVd.ݐ5=G+?@le{,)IZdLnxKk)k|Ox%n_$,=Q87 b)0.8>6<cN_9P-T]I>5 6{Z7+dB3ݟɠ?)U6*yǂD_\\^8D u~*]nm^<ܻ+gï+l U&^_/Q  pj޸Hb+ ݆ȟXM$ e_د󖄚hĪ648dgr O[yNdמo6C„+xFuZ&ȋtaj&Q(֨ tuB >up;/=[/uG|#USh Nrn>xU=1]d n@T12A^4UJ]؃EnfJBXKs[+Zκ'+7ͺžԦv"+sW~_a]= p†)^!'M(Sq6]߉5" Ov=TV9ޜ>{\K͇$1HbBE?].ss7^1[̸k|Z/UFD?`}C98#!&语~3S2@G^tfoII6ZdeDJ^k2ɞ +~AdCP DPsU(nE&yUjeSu3n ] PeCDU1>W*-F@I:PGDʆ҈&O)Dd(JYYɳ.17pN}e! .-ȩ' ivz&Bzm=&!Q*s V, |]c!&K*V e38 \PWkԄYX^^y_|6/nFz#t;%SvIq׏++NȎOwZ ,)% ۸cX&RBQ֒HQ5$E>p"d4J9vDbXA;K'ƢD j|rHMZ6w]}xVF =ě(G\|_l ׹7e=DTΒQҳVԲ:sᵬ[%v'#%sFsy)sk8Du4_a鞮kA%-3V12B3-*VWS~<9F`@ _|=#czHA~s}fG+Qtbsj~8gi嬼e#i67\ttUZMM?G۸MtlГ qD@őg+@/Cfs Sԗ}NsO*7Ԍ,8=)j,mSU÷\"6o 1_)V\)NVR Q3{޵h[_2t}-708)o%gckGi1h m~Bfo<΍Ԉ Ur\f+e>- *ywP=ƍ&eZvFM'kiOZ^Dv*J#CN;IhdC 6D&ɷMP`xv Dn_544a{Aآٶ]P'iԙi uwDY4wH1>} lkA g8Y>yʫ=&p |ֿؗVC}u:]diKTAdčCM#@s3IM{NY\}XTh22s4;}yTyruMt*_o=,Yh:USN T.Ӓ Cl9{a:i{YQܦ8%Wm[K۸IT@lWeEa9X8təF8MBt;r*-tm~N"qV!^G\\P~xbE0H-vY֝o<5iUPa_*AxfʙV酹R몲h2i壅cI.xy?N-uYխ'%Ij>]`̱9E˺yW7P\9|`YYk<$3u8EV ezouZk/w,:i&=N?wB_Wq񼰃eh,ܗ$Nh2ܴ1AA~krTxD m[.ʢk0Z7' :ȯD+6'pl Tí#׊WmSx wDN^ \ )'1"sl-Ԃ.gVfB:xjI4W\Uk-gHx<-m~^dT,wAwr7UpsۛAr=H~^ a-~"F\눦3?qNddB.H.XLInQ[ fE OPo k?*5-S|522ķoR:NmȁMdtvh)wH$>C$B- KI#:4M>I\Xy|?!"$YNaK98DgSn*_êm4 IGD=`&z^ܿ\Ƚ N Gsߎ9p]AWlAlzrn{pS-F01 "{EK.iԿ@IDATntFrX@\'E_M+K`n^M\_AE4o[CVJ _$NE ğ tMFLտ[E~a9POKO0AHdL;l""`E0oٷQz8GX n߼ѣ{1?&M]*&3\C}^^h,wB qQ&L껖FMUKQ732\gU~~\9jWPo>{/AB$L4ղ!zT{Cy*hOyZg2پ^ƶ7#Yydo3~kr_{Uڞ3  XR~krr< }N&=%hǝfOBkC!8[VZ'cϏۯj{cK_0:ܼ`hogVSy2eW";,-xR}&^Sj[u>2T@Rh:#z :}۷}җ78Wh(2O>pQ4a|*WGqUVkLsS&"v23O%U/ߚ J씤UQU& ߭R ]#t3e8G|iGau :w&8莽6/fؔ`nlrpiQO޿vlͿ 5~r..iXd19ʤC:,¼bf'鸈7d_xxhfhFcXgNǥ 4؀) ) p &77v"9p!r8~dBVn'ѧK[D?Ok\E{Mk5#zQohuK}ÏџO6e.56rWӂL8Ӣ>7%P_?s;REY谈'8ydi"igˆOy3%Idz/+q1\3әՈr`|5Yk8uUx(ي/3:\♰yƁ#{&M": {@$9ٯLDSklƿuOgYF[0rW+69{ݘY/my~eA-oNH㠧g?XI~'x"Pe b, ?j)$E zˣ&c{{қn.8ٷ\c' SJ _ڝ/5wկ_2?ؗ$\ _™Pqʘ}8a6SXt`I hȝ>t6V!؞g⟦,:?H@!@F*^z[31ChsF Ǥ`xS5l$ Pj2P k$NH퓙&"01Ǐx*O&O~r% lveMC(,$P:~duߓ02DB>`?JiYn(:qױkzih\UBlv<9NU)L1zk'Ū=*Mb8_#Mq4:9A WuI.;.<߁4PdBNwvtIЮ"N2lO'"8TPS{`x ߪmli.$1w ?Mdr|k"C3;u?ןL Lͣ8l,Pw&Lp)s>ˁ9R3G xu}8S-_zuKiS#:ݩM1a\]AQ}p#plǶM6Xĉ$ ơ2D$\k5#o|UVzδAlhzIq4#4a/\i#dTALLmԩU=Ls[lH@ٱ_YcPWsZѤ5S0d:hW,N^]{:[y 3TL; )_TjȹmyAL2D$BDQ$_.)&DDA|ۦن3y? ,"*M̪w;VOt$~J,Uh$^ w@6gM ҧQpBDP2s >YpEM Ql @TE$) GCD@C{<{n=5 jdd:>''++5+==e_۶qnthI ϴBnrE0!6و4,mt[kXJbYҒCqxǤX7p"8ODH\oE~;?6=!"*n~ޡ_Qs>vB]'^ Nl!q M|(֋4!ϏnBQ/zM;D'""B`8(E,|2W򪪵.<-,-c$E'Mcd P}W(*C;dNZreTy,Nz#J;j9'!;O͜xs#Fs͙y)E`d8s]'25/6S!nD`Ngѯp}y[;D D4!Eqp">x0I-WcwN_#6#Pt!<.:8Ӛ }N<䣬p[yDDĒu&J9b@Q &:8""8Ky"İ6!4ndX,YRJ% ]X:GX<8y(4OOVWiŦ 0#hBLԋ1 I$# X_Qg.c-Yٹeka_@DPg|O 'S$N&?:O>zRS =kaJD'dMNs%ZK Q ZZuo.wUJMvD&H,PFlo:1yZw$^ARL4YO^81Ur0dSO.+-/_ggЇ~恵8%B+Κ#Wmz)$*m{4HSnqx7ȇC !&A~{cHz{kIkKM4Ŝ7Jʒ61tO3-\ p/PabLj`vckۓ87|TBFb[U8fB\iNmXl,],Z;7x>0 W#x]ᐖ[V#QvȸFhpѻs[Dڶh,4@gv'8vI6r0'ryfZEŽ ÑvYݫ;5R/Yd SF`F#! G`OWҼ- "ab 0@W[|`[`SqRfN|ɺ^W^ՁZ9CHӦ5R`C~ݮrAi&Ljǂd㯏 S]ys{&׉k⨑ջWDz1S>N2@1p* G0ua[fEX!){"ǧv)?]E=#-á](t=z_UNz1B>=pCNό¥BF_)JݑVJ `~1aXWua!MGC\m ӖyQQ}qSMuP\d8Śķo1aqՃ33@E lg@ Fu0jd]ċ]  ` F#ftP![-cF_:!C]U1ǏCNJ@5j* )[υ[nʅnU.e;ۈֻ_kӱۄ2 &ӥ:g@7ٿ{; E!8e9N &5[vu0`Z'3ojTa0#Tq`C+۵m밳{s ,vOSON_̝cÁ`jk.7׸][bߤ؎]ݮ=hQq:(|>Om8^Czv{un#o!cW2žGA0%eK L^WFF`©FG`Umi14ׂD(ηH'jА^˥cYx07tnef\9@0Y{C z9vCb D0|̓%XF7UxG90aΠʅ __Y`)<ܸtZzAI'K鸖P:yRU0^ade\t:]?ND:* Bֽxg|0N\yT@xk`QYGcмBXd`[-, _YR>c#",Gc~5VMՂ5=>"AY9X(@YBN]]0t6l#Xj2WG>; -8ǡuoOݾgJOr4#PO`©Ov3#D?|0?m4.b쌏@KXi`w f'"JhK3' >B 2H߈bSC>OZŢ`<3xl#P`©.?=;#PtlCVb._/(/yiPpY77Wlm, +t%О) >NpCf|xύ1#P0T s"@ Lk/Y1v5o(~nO VJ=Av%Z;;;})A$t6΅YƊ%6 u-d0@A 󬸧@G`խz+ؔ }|'>0$&֬VҶrwAr-D(M> 'Ui쿅|ȐoҐxF`L8ՍĽddsȼ!/"s e:ARhڊfMְ:nh|(yFegC(8 bs^|I[ #0@ًƇ`_!ږ-BięΑJ!e/~5"}rWde!iUA`UmB48rAi@Yub^nær,! 0#!1@-Avg΀勉%ZὦwOޕГ  *$xÐ;S/9P2`7'IAZ.1/VeRhC w#0Ndy\@C;r=hߐHU,d%d\OĔ]6=xgF"PfSjuL  R6ثI| FzkBnW9SDm2?1LA zHP1kD(N߄e-7a2NhX|fF0T?SТXO:0paA,,t{RdZ D ֫mY$r2u\S&q.}9zIְ0ShrT^8j`FD 'O4`1¶a֞NMY0z0R@KâvK ; )wb+{3nCcCLŔ+8]𜺀H P 4 C :ita](YF`9L8 g|1QbJ> Au7AeB;5դQ.6^QDOwHCke@)7a&ߍOJ] ' LBֲ5F/l#0"Sp#_ GLhI2E!IDT@U2qtc.Ƀ>^r"`Ќq!j5WƏ;:7djPd,[;hlx5lNp@o1k b5e*D@vdnt)\yF`"SU# 4`-u_GS :BF 2@@dg>@ װ.@H(.wT.+#u16,mHQbG1B p$q~B0P:E QE+`|RAۭ4[3n&v2x;s w`F"Tㇰ"༌#Mv)(5i8;loh5A #*81]7(˖iTIt8 {*60#^pφ{0~qii $TchDgy ѥJ( G$jT\@1|,*R)S4:,mZRxxP28OfF%L8M`j@Z٩PBr<1:iQ(z&g |WݪhP!aF`&A0Fh#O#pF2oY0'&~Դ@/,=o #aXi(ag[q&xN[0##L8S11_!p >R7[FMiӽDBf|p9L7CPCg֢Ca|a<_B̩oXO,=>3#0NDy<#hRD=zPG!E.A,NPP]BhoI0]gVxJB9|BDE_*6N |fF0T/3`:u1өrǀx̘#J"d,h׷¼iZ\(Ubyccgt)iu9]E 7V +/2#0NLyD#HP=Rȷp|1=T!sx)AF&f,yr&sDgQVB}Rǩ4ό#0!P20_ `r.a4qsŤ" 1=]'u WL #_S9a,:v` ]ϪCATeYTό#08Ջ̃d@੻rv`aC@Yca\X,|ZLo"9q|t6ͰTG4ٓ6.iR)p F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F-q|d쭵?;ut_ܟډFFDO;v*"*pF`ڏ6vq fr牙߸+PR,iR}>/6_UqQH!F,f;Eߵ|PgKC'c,rhs YVފ}]՚I!RpGo&8&h]$L^kEM! c]W񎋎W(+R_,2_`F(ZA8YX܀pz%gPW]bgN2粇E&z3^J9Tw䄱Q1/ΏE ΂v?e;EGRFO";g9Y<,-5:S+RgLKOtC∘ =R]`=U"Je?c'bk9yldqQ+Rwye}*!ߧ;?]+_T#0|Bq91= nb>z1tә΍g+ޅE4_""|9Q;^1#P?8ѭF*C=k(5RܻS*P8V]OM@@}S%D_njqQqh?O6kΔ)GVw/=8ťECq_$'Ry QH)\wÕ5#.A{n|ꮌJ%ʄYElT8T]gsDXeyer+ȼTMsm6ujKߔ F`BN;+Ee+@S_[~~A0C%S\ GG"a=)Nz*DRSCbJ>.aܥ49wu:8&&DO;0͹L:2vi7$DLi ,п۰|oNfC6q{~)_Dtx,R9(x|ѣ/~4OvJwzDg%EMV(g׮.s6g۶VF1cܻO͜Xs6 C {cЏ()Mب󬶠71 \+dC"v_A> GOW'v) hbMᲲccۚ JBBfF|@= 1̎grәyݴ=Sx[|J:̻烎f;O< ܣp,Ki6d#j5ЃCĵγ-o }z1M9p9&5Beoj'"y77vV~)"MjcSg tM{sȂq].E 7\/xWMOeRda]_.c[n[#Nzb3M5x9V`ߌ޾휙7\1J]0@G;56',uW8Wh*(>dw&g9_}CFU"hg^50ˠt$x,t#|RO?cID\俱Jv ʼM kX5k ;poc>ZͰ&gl=4otlGa*9m"`Fi j}fa?mqD"w?mUvLvMo<cn8:4hlPǹ;SCiXMy=^|XT+em^-lgp\\KMߣK34}m)+D' N?6_{P|%o@]*^0J9Wvm KL@Bk"fu_K!@ŝ|0e+kz ;D%nӹM̻ )z]$YtV V!.&?=m,DtIi+_N@IDL@si1?:}XeӬ ;`Sݑ@C1 V.S#ΊaXPh(MH'qb]1fܐa4'ޅ9)s@Z4<1#վ)P̴ 0~vp\SHL F s7nbyW1F*jɓQM9>5&pYc|wwdQާ|]~ > 4Mz^̔߭x2,v&ޅOXyij,sɪVF`N1XG\X< 7wGEUbxR`wE]yaȏ=qa\<cj:G,"&d!C~Ӧ5LJrXG/4A]EgtZ]qTH @D轂2.MB:bz~>שׁsE4QsO>)g6[1sV[p b4AltA4yӰث0;Y9o>`F"`׌nUNXbQæ<*X<ϛZxIq3=D Ա#KAP >e#. t >"7,BMZȿPfDžiX!f[s+ R\"U=Jy_ڵn d !KjګV닦Ί+5^ot{h\Я/߭˖lL? #%T=?p}ut<q(ܒX:L5ApE.IxE'#\.RXӐ2f2!:v$27ge(]vⰅ(,>E{?KwkvzxYi>^fZA?G<EE+z!8Șa*5jYSoxzY!YLvr }e:|Ii;8/frY:3(#P1 +S&| z(6PE1rW~d\(ٺ.W聨gsnSN+gJ_rIZ 6:%#e"~j%he[lfae(< N&S2$b Jok1҄ņ0/xkO^ xl~p#Ǵ?S 8 K_w56O{cnk%34P[E)ƵE[ta.-$׬.R:͋\@b#͘(aҤT.t5~$~7ë !G2u/pu?5crr3tFWii7c<>+݋j#.y8? zȞW=\9CsR{S}Zo{k7 ӔHe0Po3f~*vaR;1x٢QbLq6O cp?G] u,ĒX ` Do:+|D,+,BȆkk3H3v}n.iI1 O"m=);l|rBUs;bA t],sB6U`߆q`R)aQpu=鼴8ҫmKbT|̈X#V"|V|I)ؑ^zbr&;j@YY|; Y#ghz|v[#PWX}EK32P]ʐ/?*Նgd saVkRűَR*KI`T^}Eұ Cn[ ӧ7ni.ZlYq9ʴ_W%cX Wku{{s7Uke'/UM.&hS sq@(tɿ =MV#W 5~h#"1@D$wXeIyh=ţp H7Fa,.~eʔyK ;ay |m߃N#@\*-s0 9"3\Now&9ȷ+߷aXm'mHn:`2f8p^( A aJn_l's#T!DvL۠& N)G>ױRml9\7`nWtiN%sSwZ-[T%:XGz_糠L &[cga< \ z< ̪ O)Jo*[Dp!8n9Yj{';,(0^"P#vUrge*-ʕR@]B@7ܚf zD)Aʇdz0tr,BW 봤k#pVW^TMBu*H+y ܯ?fV[_UV1oڜ=tCƣb1m5\\ͪ1Xygp,߆ɷ! |%v5!5}F6r62|D' ( +3G$n"-}/O#5O@X@-vҏ٫z k=˖_LYk'$`okO'0y@F'6_/bԗ]Y]:^hh R#0#>)kj`4ai\M|LdGwH;>svc4vm.gmm'E6˦a1/ܷO ̹#Rײ[y#!jDTC pNZ,n&F:8 zh~f'WbE[TDHYAY*QFi+᦬wb;aJ(Z, Z̛Xb~D~]OŒ--t>wOSj$Wޖ| j7j>djBXs #0u$OUX Fe-?MfkvwHKiL~3̉rI6lAൃȆ`jц=X,vDa>`Ȣ_ cغ.Itx5G|r(|{i?3e]Dt(tUqĎ]D-"2FŇ9}\paWB a>u\ܫb7{P l&"%?WR"cۻ!6[ZnƁHGW]{5f?J˛=ب=zo`? ".Ļ>v+#w{z;3̟Ps9.c7N? 7zw[_ΆR7?6(q眸~>T.h {x,ex29?lE7Bgμ"G;V){eU#08ѐ}RHWڧ_t(l1 .X^~v괇8y- ,lMn%mTa_mlQ٦IR#r-yPf6y1LެJ4`%"ʪ:RBplTcfQ#-fDXZaVo7l? >3389&!l*""`AnM"T5Gs($Vk@pEqE^qQȖsMr@| gg3y晙A MZvx!%ckY0,*ѹRᙒPRktRҳa6J)C1hu1YTҔv\LtVЖm.]-v4[zkIk.֙IݏDos hzl+ S 4$&7tOyaGBN`f͚| ߩoڰѿ|n4=QV+b|;aNT}ko{byݣGo,CK?:}; (aa m.C>[vn"=wvuf~tT >v?\?蓮+D6*ZL7W "tJw:s}}|wM͔®W}O}r9f`kj X4ƛ+JrG2Avykw^`UZ)zjx^q3IHqM:U/ %L=k0b&Z;t 8uOsT:fga͗vUtzu7*\q_ WzF7:ꘊ/N {tr%ZV߰2}^=6LDwYvN,=Sd6@uKe9 @Ek=^^f!?==xi/(.>Fx,6hA#iVŷݼysNEVN9߉,N&^N'Ը?Mnp}.򭌕0\YZk$g`R[1EWu gVIx Kbq}P٫&,Zu=]Zv2ڿ\Ç{<>n/ DLݛ^n47z3N w^]2G$@$k/*{S};'X>TqhAAΩhR'UR^\ORg#UnF+CG*qK)>SGAkQFO)EwnOc4#5#Q{6lSb&Z=W̽v vYY躨]mZ+c۩uǢT!ʔEo*_:{gtmM8zD] 7cЫhv <*G;a6FShuOTSn/}[7j]5J&8sFe]u>=;G\aHߖ{5F-=ӯ;C=yZOʓzHHM.,=:<'O5H*a:o=#&Lxp떊[I$ 4T͘>'t( ԋ}բSoHPb+܌LS=%Pw:UcdV=LT{1A"-<{Wft_kHNp^4טNROdx$1Yt}x}W8MvIN5Gl+V2 ؐDJOG`㍀UD"͋Qjh^w dz.oخ NZnX~o9ߡ 1;5'bj&Zh?M3r O7[y]+Hߟ,r' JޤoP1 srPOa>|G,˝dHmB\7޴˾4n[C-^+. 'haЁ9DNb@/]-;,l"1 Pr5OϠ+gӻ{a>寓k=yI) (L׃Q^]<:/8nۤz[ [Ņ`/FoEH9>!)ÆUb7I4^OᕆAT@]ss7`{|yEf )UȐ3_{OWo&o9qH$@$@$ktFQ\O;Ɯ C-ܟQȴ8 ogȓz믚k:>=Cɭҥ )(h&tԻE9 @m~5*A{n3/jϓNzDTXF/>񉂩xO >IHH>3UagqǛ&v\ij-zoQfc V.Jf/1=u;5N|t%RzXöV /f ނH (#P0:d;!6^yIk\ӷ:hϓwu[:k|Dǎ]BM(]K12KB   #aOi8-(7Sx:c H Tagn:SB)gY9Kz'eIx=ֻF` r/Yg&m%CBp!nf|m7יp?9*8.rO.<IjYo*3fKf~e/u8mT+UtJ)բ^GZbܻNv}4%Vn){Y庽flʑ B :nEJ9DŦ\%(!-μraS.̍$+pw7nURKLC)3L=U햸sTS ?Nu6Y. b>s76WVd~%8=9[SMR8~OX!}}5<owrB(W\f)wO'6\- ' ':1#?pf2v0S*5N`|f 8 {OH:nF|?:s; Ŏ8w|k=,:c\7JT.r8:̕B}Qt?3W=P7n*Fh/)ṷp"]ީ*v`PaaJx1iHP1$y]T6, τHnv+,+xZV `3 @\{DŎS9qKWwҮ捗v[&4߲rn}aPns8.;swPAmQ~}P5,kT*at e#oHV[B#Sj=lR7W^7=8z)2nK\!dHHH`%j hPWɾs'BiU2j&aZҥac|yZ6#0LYAR c.I΁-z^U7yF؏r'zk^@ S  $vJ7n_$Uv'v&։};} gѭu$~.y{q^{Y/tп00F5&u| ӬEޠ܂޶0;sީVNHiݣGolR! xʠi~s_Yתe qX N4Hь~yxƀ 2Ó9R|8u{4ַ_#\ڨX!V}YQraycFǏE3cDIN{NOA:Գi"m Ujc@E7Ui~)Z=M;)퍗(fV 10(y 8tC=~8W/w)A;*MX#:[Q^^#w\ydJB~f Pxo1x! Lq:$ vq+ޅrޝ3S_  ? 4TySN@'dViyr=)?%QtN%k D/g|/pux=TɓSae iohzNg\KђOֈ:amRLa&KGRaۥj"|^$'|:{8-qvD=+eO% 0[6@^׺$շ}60V麗osѲPèHO|m~ZnpNvEg} cPFW#m4~򉔳ds#ur[mAxQ_0ڮ, ̣ABY~OTOw)Am Q9h]s4T_~}g?oUx˲`X/!\Le_k`C#' !aT儍H︿ 54y4"xB$@$& JYK*CtSc?}NM;^wE'ǘR@Pkr)ְʪ?B>Wi萄^H91|cY9[ڃҷK)ÆUg5Jw0M^[kG^a;{h{)]1ޣ;Fc>8h$,ޟQ062V2 #BvRfʪ6hF6:rnG9Wˍ[_cRyZci\\}Ui*3~t~!Z2zzYr~>O޳DOgX-KE킐NTԵJ~KA&k=`-d2VxVw8n9ګ>Vk!e`<֗ݯ׷9ޣ{#qqC ڕ n6wV$@$@$h2{.tPڸ[܄@T0ZRʦ@ :]@)q0?7H1s mߏprwI`׿cqp¨oxucevAVV+yfc(g;V=$Y(igJS[+w臻3\o5@ȠvaY%*T~(,ւ+ƗU|ǹ? eHfHHSs4MPmr/%-cZwHjv!zPnn4 2rix03L"HÐbw^QwŊo* 7)}w>hYt^MkգˆҔ5:7$E:֕N1WSoҎT~uqVܰLԝg4+z~~Y~6 {a~=:6XɲB 0:!m0LI#(ժ"+g v* cV-:ύm3J#IQR;+7pґRhuFekS+s}9vHܤaQOW2p(='kאdvN,I{v։0}b9炓/mqS:o/tjw9XjX8 ,^  8D'ؘ3BʉXytϸdx(q9ai˪^^b`tڻ]0^>,G{!{%xs(ivN^3j/2roaC9P.r4RED'zj F7Cm;4ZZ`;7BI6]7l1I*g / OUъ~*CU=]zEt\>ch_m*߮>J;gbrM5!o~OP_0bWǵw- V%oxm%7}Xdž 7mzJ\FٿBm\RWX`=\w6]%>%TބRx~];{Rz`umVe-..  .>;#KnP‹G/Mo4w73_SÛ<uZ3=#K/ <_I[y&W'#:aŶ7n0 M}N\ޘ9xQfbWT>p=OԩUssđ#k-Van͌`ᅞ>RJSzoyʼnϹ^%[J=]tM;G'Te ԥ /oDWҝ^G>u_/Tbwi_pPQD3gKðt7J=X&R&"&#  @NQa#^P ^(Q`8i]N'   $kflnQ3hb"krGt.47WKoyrF-"'[5*'   @SqZLyB?L n o|獈Mc~   _40{n`:1i*Ew".4Ug!UZ(u_&omHHH/MqJmkv[c4 ֚FFc}6jx5O*'   }@N}o_WqˆS1 c|i, 6JT @ &1y5K]vkxtVS04_ Ygnj!H{1Fȍ*IHHH` deWbOvCSZ mޡV{,$+Hi{S9dOIx-[kPb#jIHHӴax % +M^:￑rl 9OIC+ xC :Tnwu<{d|<]1#wίv}!גPt<͵0:ʹ; . C<9807O>>A_r4DR8R,t a|Bes[.? ӧ?JOqh}O>wEVO;jG"lF_?iX+$@$@$@&rT7z6po12r ~iӰ92w\t-g*ք+/hY9-wI3s+7ɠ(3S.}ƠK9p8a` 'C񇣳xC}0ng^=yR*MM:i_ 6gϮ x2rOޙ9"IkG, |Wx/ϓ̊b$@$@$@$&7z ^آN{NR-䢓xWZo:Q;+e C,YJY3_2fKʩzwS J<c:Of` 7ʲ|5j[_8=NpdҮjR9 F̵@[|&ziJ>|IIv-N\t`4   h8xNsłIiGWE|\@ǯ=  Zr_g_4!"KChEIW}/Y4 ϕhe*`ȟVBtzÈաeSF'+?wJa=FCH;Uey}p$jnzhɏK.Ѧ@Sꊱ0'V@_d+)N8,RKk5v9YFڷp16ZS3ӛn^wHNp&Z7J[o!(O$@$@$pPثGfWq!U˾CYCJ3rzÅH0l[8E$q•-=QSZab8UK`5g5ַ 똮Gt륃ۭO: kNN˴pxjS`_kT R3h#M]!f`QH)}Z+Z0hIdK   <`Vڌw Kr;u ?ƋG<׹jMӛI8қB )(h'ꍲ n^X3v+# 49Kǹ Sf0@m.fNaJDFCzfOq   '!fcmƟiMl +x֠.ll,W-2QǷv2{30L!|@}WDǚC }}O$J1?bZ,픑eIA.1B-R&$   z)RRga"Tnz';ͱXGTedl(o짐^XA#C7o Zր*O:pUء F H{t=xsލWyc?~w_+S $E@{z Produced by OmniGraffle 6.6.1 2016-10-04 23:06:33 +0000Canvas 1Layer 1Network NodeCompute NodeOpen vSwitch - High-availability with DVRComponents and ConnectivityProvider network 1VLAN 1 (untagged)InstanceLinux BridgeqbrDHCP NamespaceqdhcpMetadataProcessvethtapeth0iptablesPorttapVNI 101Provider networkAggregateInternet OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-inttapInterface 3PortqvoPortPatchtunPatchintPortInterface 3PortqvbvethRouter NamespaceqrouterVLAN 1 OVS Provider Bridgebr-providerOVS Integration Bridgebr-intInterface 2Patchint-br-providerPatchphy-br-providerPortInterface 2OVS Tunnel Bridgebr-tunPatchintPatchtunInterface 3PortInterface 3Self-service networkVNI 101Overlay network10.0.1.0/24VNI 101PortPortqrInternalTunnel IDInternalTunnel IDInternalVLAN OVS Provider Bridgebr-providerDist Router NamespaceqrouterSNAT NamespacesnatFIP NamespacefipPortInterface 2Patchphy-br-providerPortrfpPortqrPortfprPortfgPortfgPortqrPatchint-br-providerPhysical Network InfrastructureInterface 2VLAN 1InternalVLANPortPortPortsgPortqgDVR internal network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowew1.graffle0000644000175000017500000001403300000000000030137 0ustar00coreycorey00000000000000][sƒ~N~Ou,hǧKb%HIj "!  e'=H$E~0(` fɿ\aڏG'0$(>o?Gzz/^ϼߞ=ݻꅻ/_x=˷GYvR~'tirpw#xLqnɞ~ݓק{,~ %OvG?GUQ';+i=g)Sh\y w48;Ti4)M*dwt ,gƏA;,|t𸫋3.6|a1W?6n_ -D -=mophCwq7{ %hҠRg}_;ʾIթip=i֤aoM7:{K_E?r>5l_j?PLI+RQ"jj5)0Fp#Z֜aпM=,./neeYaG 7֬hΠk˥qhПyǼ==gJ85٫9rfgr׹:郝tNK|Bv;9#'gqEÌx`PTIv'iioqx /Q\`\i$")eAw`LC܁y!"E;(QFid4|Yt~YB&^Ow9c6f~^% )^Sw2^CCCC[Ur^.ǭarKі[!QB]N2ՖD)xT\R43D!p+eI`"PKbF:9˪Ħ *s[2Ho Qb&5$=фHzHzHz#^ۯ0:p=#ի067aoSgަayNư0An# m>Ǟ'+j\J51 !J-᠈S+yw۱34Mjd4LN:uxt? zܭeR&+M{A[%n^We*E/2UHeV܁:6JKܻr1yZyKӆAZ{Г Z‡-aM`>?;= %RRb + 7sQЂ0%lp 6#$&sMBZb2J}/p;‘վZ[.R+xc@/R t):[X#3}WKF̷҄̇RVsmL}l~F aZki0g7yENӝl*A/n?Mα5?'Š"cfoFW7X)Dԇ7#QkdTL@ՠ2*"zeTg6GD} QFEuD}p~d~ƪUe4H3\$epۘu%Lf,RE[X&5x`Z1V,gwk*p)% Zi*f^\՛+vRv6i\vRL Cmrtk܋ a{\"Oi޴MЕCV,\*o@5 :3Kr)r)r)riK\NTmC0VһlJ+iEZ˭]ƍlK'0LzM6gk#ݟ]|]|# UTng`6PUp !!!81I} WɨGj'O3ˍW~HeyY|Wizp jm%#D\)B#$Y!(*(99㤜w'22$"Fhޖ6yV@Kgeѵ]kњ-0ΡCR T²&,rU͖W4țțțț-q'X>)Dm [IHX,JKʛ@&xVBTTȃȃ  ÉB:N:#HgHgHgHg3tnu=$5Vje[䂲G3Vw^ /lzHdD1-J-i0j rPM3%&\kBńmEQE( + Q#@r/z "4M(7$Vz!sJa(֣Xbb=YmJ Yt1t[tLGр]ڀ5\A12dj;bhbJᐮ6)hieLEbo[ x xBp)8m /T_@,m E0 @sIdA6nڑ M|e8 F;zt)-it2sZ6Une Ѐ>E4"D7Ŗ B;aۻ S; 6AAҏ;BpZP:m^0yJ2fqT}MN+Ys3k+ImT[ޭ-(-)NF%t,-Öأ kzKA5a:hOn m7@{ 5^/:(v٩(۠`tó,8ot) (s 8̮N \p0g>eb#8ŅO( 6=kiRI%?+>y7& XSgok'J;ejP6<i#Ă@D2M-e43wan3*wU}21tIMSp[^+*A$D%-6aN.ǔ4˸LHfHfHfZ)(i?#m%UҶɊxͬSIS y'dd92|*ʡ͉M 0^ T6讲iݠF-AW6C`@pUқ;sRW*9R3ZplkpU?L\C;@7raORZDA0F0TF+UN6\N0M"e.<)< D߆h6XG^DNY82_6탚MtrMSF I)0R7 bKyD1Z =i/RƂhsQ1r`%FX"0_GJ ͅSRc}tm 9@b.?T8aþWJLP%}R7}o]z N 6ʨ6}ogCwR^buThutˍ˛uq7WM\ݼn23wATJ)dD$70DKiᄎ =ْ]GY^AsӨe_=rxxl&9N9\Ǽ yG0( הj#0V*Y|f. ÙV\!AIqL=p`;A IwmV:J타R4B)B)BiJ'F.?Qws|*@O}fg'CghcOV/YXV&}WkR>Pq2Go5au+TQ?:acrG<O/s`,L(k^'G`c"'%t/~St{A8OxZ״sԋ:af[o4 k{qtdae:wM]3JӁ03'򽼙/(d8y_^|6[ XyU4WM4{m4@>̞pj=Y&F~1G7eJvAzFSsuxMe+H5O$ wi>C\4 o|9ʺ٭}г_z$g?_%{boD:o{GO_zó.P‘΀tC~91g=b }Ji)&RhaxA}c, hB߷4 3[X3;H8tb:qrby!šC0 0IEKbd:|"uG(" g%y3]z{, 5 2 1 2@IDATx|EgBo+bA`WI(VD') k{տ+ET"UH"Row{6rIK$|7;;;g 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L đ_`~=7} ϒ> FP%+~١ׅߍ|C.r"!?K|):izdl$~] S|Y-5c4i˳fu|,ҋGI,U8E~Q'#*h/zV<7aSUne x7:Ɠ]"q.^)I)UmQR3*eguơ2?+s|W?ե2>սWۦL6N7z8>;l_$ s'=Ҷ?}S`v6|J ƣG] + "oG92[niL^EoVe=*m!T G]io旚?or{DH2 ~VE=fH+wHUqC|k (n";gk{^m$'%xJg?L5lrgPyn L|ʕxvx߻vBg6#]Np@])\sY? wÿ'q&joj\=A(>ڠSWjr&Rj#G(%i[ 잇1a[p\JZ&BVb5JR~"܀ƿBJ ,oMY6ͺ"!ѕ`ږRYYȟo s ѧ̥?ͳ ;cT)ϥzw}ƀЮ1Qg)YY%ޭ=|.܅_x}Z.<^A1D4nDUs直>kTߥ#><*3C;Px\"=ovUklICiK]ݫ}CKUM>ۍ8?3pGeLb*c.3(Zr7b]aic ?;gYP t@2ۗ"dO7R1ߟqӎQU6!xvG5'kI@E^H*=9-рXkhj:4gԷ>SͣvpHNMtxխC%DPߤ>DXFb Ţ%mqIT,XM]!? Tlo:H6ek[)<)kF R CR25 e}>hV@lUfsR2߫{e,)@)EuTUVI gs?%֤6lFgh$^D9;4 rl;bؤ&JbΟTeW? vKτʄ`'7a<@Z0w$^,G vAi% =.fnvѾ@C7 ~޳'*G|xvLL!:' ^T:?3c9=;˟1<b2tw.|̙bQf?KȕKdʯ{(]Xl;vʔz9SjCr @w~дaAH숧G9?#'=;& -ݭOxwNcM_lHz`3^ z;4g"/xo~ #w?uz>7y}k;޺ U KZwmmnP: B܈Jh/?too=)ïK.}AZzuuoL#?拤E$wN)B,0{2&'͛_/<$f`>K믇 eEVaǕ6PMC׋@rI?U򛴢h`8<1٭u}%ݮßW<#FJV;~8qgXrMoTiN뤧KZ#\SaN*`)+`Rtt$ 9T~Zs!u_l{}wePBKSt?cH}o$A[tCmt4ܿѡ6e4S.NDE_W WP7ċ:?BqBJ e^h֎"nt ޸pUFJ=M~@CAj8N ?ź[h*x>O 鈱)ZcaOݾ۶fgzW+EYSAc-,'BtkRݒӶ]ߡ2oC^=7fOH-E|tblw Bp)v!Qux>-KS{ mܐj3Iշ(LCHkAKJӥ7`!D cq@q {cP8^`mG=cDҨĒg A7Rsu>^&{I9ōhfn{}]|)x[uU'uXz:mA{ a8}ȸ&X&4T \77k^eZri"Fenke!-N4Xǭ"=Ȝw6FBpeg\?&Vp<}"UDiXJ*^uWIzfI;BU=G%ZgJ/?t/}l](X׺mgPQWGn:!9U¦QqDqk'zh;])+ڕ:\MZi>c%1YٵdY XfWvTԓq&0M==5~&颔iCZWkg%hm׬(tР>9:? kF!˱l26wC0)!إJBP\VҷI^i~aygkr!&%w%~Wdw]cT" U{iԤE$5>iw-SG==ְf+qF Ņh(Nxoa&Y U ,` ciDQppp߭@!̫(:9&c0=Bp=y78 /O|7b|b /Ǻ_ul,C\ IϳxfPMBaY\%TxN "a^Nx iINz-MMbMQEY;ԓmzՙ6rm=6[)OvU]mpoHqN?Ǡ rk{QT3gXFP/Gؑ>*}?} L16ſ/lk<)ØrR#maUj]Y:ȊRYZQ>tU)Vݴ4JUٶ) =RhE1|/F삦$HƗ>EzØv$k 4gKH׫ıP&K':;Һ‰z5",%q?>hV{؀+܍B tDPjQTOS,c !/._zoʧ:(?`ዐ3 Pgn//݌Hq:u(ejB \hOÓF'`}hNo@ _{nOEB?;so%{Q˅a?ZӴzrifwD)3Ԯ^h3'B&W#'F{I!Ń^;yVDh+Rߝ~Fsv,ODr簙bv?_.p"I$JhN8ߜ *FDssnGv*hϨ.0&7Pj*Ғ?A3ҷN7#h(z{,߄M}q%JIF~>8,$8%}^^T}sSu`&Y{{vğ ȮP::[raL67 i܌|N9Gqeuc&k&b-06 mЭJzulrc:NLx pMj;M\ShraeMB&" @98!#§ V '2-Ih-I,bvy k&t y/2RM&ؗE(m"~udtƔStӤقG)osh5O5y@-\ZS!;B( 2'eC @5OqEGuOcnqPlp'όtLIghHn {_?Kw^#Y)E~eAis\b#/BӵS"lB:3p(^EٚΖ|0chI&nNg{[yoҷY0GPh^%1AZl0c Ǻ0f]}I(W jѱerfU*yx \7/QSF<ۻ6ErSk(=c!֙ǦEXC_m"]@]YKtifSۋAtG Z%}ʻ٦ڹ'}}Uv雱1Oac/~ ڀsO HSr&4_&I Ensw^w<;:bM/.| GzD쌨/eKoSI_/ & 3;xs6Nt@?Z,SpjkbBu6:ö5u?go `޾:E)]sʣTk=uqpy\KPW-i4,AP$#|u{Q $h^,24|]^wzh@i4iiXii8728D$%LCC`/]7*^m4J j)wfrD{`6o 0X=G4eχorC"Wc6(*4JFFv횏صh&z0:P^ɸ߉)e/ ap{>7GX?wwc]37i-qhhkx C^KpT1}aީKC;ƹ$a*H48A(>S`J-Fgt0^-g dא*W~ב=Xg?暴4 lA#~FŸUI+y']FWbP?/s7!f {pw:qi]p?GAsBlC>2Ҵ.1˿݆:,ZTW'&Ra a귢lNg|pw]Ő)oޣ2s`J_s!& 7ߵH qXYΗ敘kW'O7inLq5uJ5-Xq+xОvaaחhy_L?At _ϣYצ _wK"5s94  b]wỳ c~ ,ԍ5nչ. _o@˃=c5舴Yۀ|VneKo]*WX|duǦaMo3p>:_>J=_9}uvYhkqF5>PgN=?ovi3 d[4-Ǝ44PzԮ\culϏW&[O=#Fս=ŝ |uw 04-@em[tʫbhWhS! ̃@rR$BݍJg\fWEgU> HoQW9u.iuЄg?9qBћhR70T4];goNQXR%m[ Rkb 8p2iCOǿ <ӧS&H qPӣ)δ@R[gL`}ǞCfLH0We*y[={+1*[,65xqg!' 4-VH>q.4fs c6/ٶs0UtES[ZPuX 81arenٞ 0}O}&b%R, T3 c3z\ b`)\WgMN0V}"@0n݂ qSRW~F̕b3>FΆ 4<tϷRΠNd9󶮥8f,nxǛۭ\ futWV.`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`1l+#סݞWWaߑS#;s}nCO9JIr]JHsox]#K4)Ϋ?kZ|z${|76ԧdL9^dooL0kB߉)C X",;{ }2ts! @7(Y4}>|Ҍm}A:c R;8lD,P',YI-_qvYwE݋pMSɎ] 濅uJaDr6#=Ӥ=˟1 H kħK4U@)Y \oi˾UbFH,hOm ,_I4ׅ2ӖT)]?#~(S}nא~Chz kBE+)@wcXxu#uPrBTyАJ3R|Y-}v|iR} E߼.v`>` &Se^0# ]@t|q|f;ה J30&P_XH/N )@:&wc@Ⱥ~mh9w#iT̟}i{%^dׄX歔do&unRםkyUxư]T+O|xUtB Fl@;K^W ϳΫv=@R[hZ]pv1NjvCVf_v3E2E*kphiv hXHmx"qӃAc"U#cf/ѡmJ7-֢H>S;1%Y2d& .u B4ѝ\vp^k|䞴Iؖga*:Up;msYkQl}s(iN i>˅< {k~;,I \t^+81YoP8w] UfpG`LBA=T)C-ܲKw 1}^<4qԔ(B RL8)F?q5*ɗUp?>a svFy3B}Gzm_Wޮ.(St-Rg e#QSYt=uK 8\9{y!4#?\|MvN5Nrijc aRiSЬS=e q/]ꗐ]7& 9~uGV# Y`u;Լs3>E[qy'EPA'Q*c?#46OOqV{]xͺ2ۋN+)u Օy;n|g l@! ?! !Յ:wINX2sޢw1W gbM |v{d?rK60CRZ"ž3RɝϷ I-'FcMȍ?ρ*p;S8nW!?Ů&b$Dt!n00IRp&Jӊ*?th+2[K0mmJ&њDx'tOo~T|uMSnɗ{Cy{ Cf1dLr'U YM4CQ/%5m* QU٧wɌʘz4!Zi[fٸװa#Q ρp^f+I&2_2$=EZ4se EP4zzq h^;|vޯl+uDE[w@&6\R iaƓ)NKN{.m6,T酧҃ ȯ)5Cu엑t<$@\7j[$ ph7;+CoPY~+lrYHrs0Kk"Vy{蠶kR=##:Ѳ=Ox.$<+-G,z#sWzEKh`wIy  Q#NCzy;'awֶfvÐ׋S{5Bq8>eƭ`;Y~/XpMӸɦnZzaYS#nߣ^ Jy{4*c1;}3{I6zIu#lCB 'B7͞}톮1m:ڃנɿB*eYzjӶG=s|ߖ 5~CXԯv?=~?_a4KCVT; `)dC|ہhʼ-+h xTrVnh-^0os25xFРYE#!N{:B ?0%ВZbh:n u?t<.rUoZL~8orzDa*؍޵:>ُMCLum *>8mUv6JS|z`hMxN{Ÿi8<;o%?,LV]4l!>qWzL;jV(G/.ki֢t>e(=$"SQ5EH"LmÃB!4W ˙&w7tP%E`-#M?\gsGx XPSڇ2m+g4 %*Th]$a guuM|^&TjKp{(_`Twa02>oG>i}O#'XDwy*]!hW  cGm)taC. Βl7t&Omfkpmh|ėْ 4p,6 lΫ♇z3=Rx*-R]tդQ.ζOJ S,Ņ|3=Z'L K,(3*~C c8Jx)l3 Oh&]|ٴXX?&w~fƜލ"_C֓suZ(:mmt 뛴8@>CBϥk辆7O/NG?paJ#w+es'MQ֮Gھ w\v(7PUm*ұ[K c|ӻzX.Ŀ$"?ϗ#ތ%˱thvo!VxB6Lb` +lz\VЮ0!3c|~~G6Owg?&KNU_(U{O@$Bj̷FkfLXWSQBJ= ­-E" Xh(W/<~_Mԛt!y-;VqV*XyǓAkCh&U&G i}[[ A#ٹ8WL7֍U!~^aBc62\\VjGƖ/ S1:wJ"a-=am"p ~V `]0Z5Hkߠ?hς׸ +hYHy MWZ M2 Acl!uhyn,> e'c5V4֮/5fˠIjA}[GD>ɝT ӟmU*[-Aסc2[oH{ZμU쀖O޽Bt{VfHk?LuAŽG㮳'}#BHa-qI6MՀMS7b&hw%#BΌg`jp,q҇2A5؏F\zQCw&42Tvm|>,7xesKqPkepT$TNT둾Wwt}x7h&ݝOE[/kkJDL\ v4f((?mӌ6= B㰞x73* `,*{'uL~;r=ֶ.>+ZmF],e3@5!xJc_M(/ñ8ˌ'VuTZh͙ە"ܼux Ԟμ`>+1>F"ޝ9n\mOMh>g.Y^F[:7,l櫴J;^2`6^?cFލ1Mb*44KtU ^Di̢Z'Q]ޡ;j_A,BGa` TJWTlx]@yyI?inH?XxCF(h n"]q>.f*GrIvq-g5:!aK),g`Y93pPf3PQ|g*[ĉX LU yx&My"2Hlh0A6?yڷIZ#w~ Jm ڑmY_UĢfM6U3Ҟm t}Ÿ{ZX^mC; dnrX͕(v_ZVv_5<Ώʞ72_Y؞ 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0dEیΘ:0so%U2mPG&`L $%d.bޛ9iybĬĢ΄T9[Tw%zJ)LtoA1eL 0&@l 9RBRlXO'| ci!5śypGR絖[s'MibT3&`MS;JkwvlMG,iS!)ՏAܞ~K-`L 0&#tå&ggzmW\)PL)+q?`L 0J`6{ߞfЪ;v苆w44ů|7˟7Lc9&`L 0'2x e'{wI oϧ)~|# 5go2&`%'A^B*R9q'JMRXKͯA<&`L 4$/A'4&!¨]$=xv`L 4m$7dٴQDL}T:T|ؒ 0&`L "ϓeSts@IDAT]4]Z %):Am9L 0&O$G~3vR7}Ioĥ`L K~{I SwmϜ&6-&`L 0 ' Slj-\6oOeL 0&d)Hb ~w|@"M4R"eLq>eN`LI&IPD2F.#kk=E?%'nO6$U]Oi>'cFeLM KڧXoy=2?ڏGDWxpt> -B p+$ඥAHa^Nob9oBhW|VsXT{7-pڤS# "6L 0A5 #8L *BC\;bȓU3fڽh~նkos%KL| MCR~nO^s|ccnfWZS=f&l"n{񗊶ڡq9~[)+Y~(Է!񰣿̴0IoCg`؛z4ꯑ'!@O{ؾ 5M=~oKB44NPv)ӦS{ϳ`ߝ/~wj;2&U72Hq4sF)> pbΆqu>;JO3!) κ"/>~hӎmh5z[\ؕ7F8x#)0&R@&q@\ m 'e3=X b]zfg [qFh[AuC ũ*CqBR Z!p~ww{gc]2oo=WEnHF <戡0EN?j;! 0&XHM 1XhB}f(q?v{wMZ..| *V07)gyzwxS^{iL7:T,sIaMh',8dp'ߚVC,[[:ϗ"73lƴ[UpEP9o! M吳{,%U/_E0؀ Yhe xL 0D'PA1&P9s.OCYHN_K!wn=BVpk!,m.,/LVpnrvoՎ@ F #[-Y[l׿=SA5GmggNIrV 0:_J.z g)ӎRH_Dneb 4N?=}9 ӝ:HG9`L A &5A3jB ;55z_`E+p _4p6V^9܄7a8i[T/")ɔ[8:oDqMQ; 2{S3;\EAlEbz[nWhݨphӐ2IbS0`]f<%|ttݚc&y{zX|:+4XYbg\S5Z̫\_9JWde?g؅.u!ZX,ވ/Eb9;:qlBKL! h*Z{+{ V,zoMӳ,8f<eF{ r.X@@-/}{kRx|-,lXiL''a{vQ8GRK,n׺H7_`J} 0fL n2*vyM)S:b9wҤSjq ܍+=b\|'fm[ HcTR{Gm;ɲ_$m>a۾myЦ6\Z⋞vƪt 0#T%6: eL !udz&66j$0&jAex?2eL 0&؇XH݇9h&`L 0xwd.l@ Hy]J},81&@c"BjcMN H`Y{C `L 0j t`L 0&@}`!sxL 0&``!ZD 0&`L Z9<&`L 0j Z-"v`L 0&PXHo`L 0&@XH;`L 0&o,7q 0&`LZ 0Sq6R[I,hSmj%dPwIY59\7Bm\뽦ҦpOXQ }O|5(!{yN0VMtSNWlӲپmKirZ`ܹ;_/ßجƣm [Ϛ9s\!MULUE55amJ0gL9绉ڔFc7KB>qZ{OIf~D>d3ҵ 9Kړip`Ou-7 E9⫟֪Wo.M-,, {H?V#&P؊ @6% i>jS|{uye"OK,݉jmzO?M׺0ȼҾˆj({v < 4āx OܧXy{˼G 9%%YpX TIK<0..S 88MR~sԞpwŧA,ܟV&kxճk{q'7eƥ%wn/ڶo?w#[P E.S 98@ڔ]:[>ֈS!N^\4&? ڿ f'o&保u55hsNL% .iŭ>h߮ty]SsR7eFK&5MaڛpZT#{[MsFlDxFGКTTyYRmyCeT /7{ \#>YkglYsԦhjS=qWV^AQu]`nO[}WBDo_&ܟ{ޓ(RkzF Uc5}+K qkM<wٯ|$#{6=Ov5-Di?WmTWOksſ^X<:z 1Ou24FWZ ^@pɨ}+d +nspglQ;,A$Z{Rsw@yɵ͓IDrUzUqy'mݱ[7bcV? 0hrݭSE6-C馲VQ06#qONy3Z!|# p&E-au;I:HYQv$RbsAd\2GIo}ϯ&6gbGVY?yD nL5T[ TӐ#a&HWWa?.wu4ȸǩTKjS={_qڱXqwEAqCĠCW">$lF- E$KzODqPq]y{Za~Z+^hy $"1 Df-!n:SiriSbͦC& Q31|O#G]ZCW/m!U/$1CB~$GiĹCu ZN4K߭'#Ggї+Ů,}{tՅQ40R|waoiz~5J__7%NG| *~e?UxEPBGcפƭL»1eb,Z+|kwtHq1LIhZc._X$|C\{Ib_{߻=?gN6h/_yӦUsѳko}6-Ͽ[%N9Euid ,E.OG~@'ZSϽظO s&|Zhn;^J[/LxHNSX~\+^-ɝa>Qtꯪj+pCխY|ObNBj&؟gYN4Mݰ% mr?:;ZRLetw,tvDvmp_jL}poqE'=:Yqٯm+qcN6-4Td ?p|cC '+~?7^xïёWʙS>HybIynskc7vZ{:$}TܵGϿ:ŧ#>EgHbnK7q]t) B q"Q# XF{KˬsY[u?[B˞U upk0ؼd1?ox1ztlu!vKĎW(UnR0hH˚/֩94P[tС}GAX} R~ВhLU}HvP4C-/gEsE/#uC=,^nZ 9ꠈur^S/dOĊ_ /IӥX:GaٝW(X44슩D~a1,VrPU[ay1X ?,'|SOr&&N-5,q5=ZFK@#_2$L-?PTqC dvaTM͡6 %j΃i~\uJm>'_]~ƽL%b"+NGaw6?c xBiRYNFg߁ڟ6|U9ߧ%j+6$^z꬇֌ ɬ}5BVָkmB9>V DnO1KFpcۑFX'=i_~zFv-xKT5g5*SYj.4vDH@uXLޣϿgٶSzPMGۄ%աXNX}".5$e?v=2yici+{ ! jhdmvُiktQH.1" *֭hcƥSJ|F_EϚiHiM&l) WpTvۭ?V*VւvN֭2#h 688+uNCCtN HlX"ӡ1͏$@W'woyCu')Vo~Λ Ҵo:&cMipԮ]e)@OSai~zt]pQ2+)XSԇ߿*ycI1m0ܴu!? !73CYwhS XsK#a "RַߵsW%>DV1Iuu}YlhG%n]ԙuuHBjD*tkhqŵ⹷KZK'lw?S+gYVZS>q0hiUaRH4t!oPxpθwB"ט!cS.i#  ΨZo!ϯ6dLO4ч:u{4Ӗ|*к:Zν;0oc;KiV\.NmDs1ǣ'.,D;i0elTJ VNٸU feT16OQTVkެ۾MՇ% mI;']fue+!l)cvLGfC^!|ˎb=mM힮f=I4x;\ih=E"`hi=*Uf ڬAkWIcWeO.t4th h`iM in*2Gcв`TMMY5[ RMfʿT-$!^ "rH،BK?^^v6)uBs,f,npApvU6}L&'FHDJ aSEyO>QAL!M2^g̝ɔ[{9}vkۃO*4UDժsJ߬I\tm`ڃI!2`('ts Ub 2 X&M΢ |Na/4T˺(^GVM_߄ fx37&^VvČdmxP6FB 6fdΧ? cC_0y1dt.>6{IM' b0P'pܩ ~ԑXF(@F{iBKt^_Pޡs.=_X{070<($KQB& ([Տ|YQɵlvqxv8RLtzUćr=7 {T9ACж~7 K͟]G__'+Ρ3/`/'ۮZƣ"˨" lBA~\*05~}kQZ,!@;пJ"5X1tB _mg6h@5LflaJt}l}AŬlei>8oHr}?&t R;r*Ů]NO:Ctw R.B@!oz}֑jv)B G+VIQ&|{_I>es֎.n-;^'x2$v^I3XlVt: _FxȀF^ K^{WqB Q&5Qs @Bk#iWzckw O;7O6\Cyܬ Aز0]AX;z S/^m4}hQwU*cn6viuy Ja5(Ls#ˊs4AB N&5N @r՟͢I4i)/?Oh|x*&t_Tw[ձB@!0y_,銰W;|U XD&U0Bsp/[\*| /5 O>v?m|xVSERSS!vn,^H)fÊX]!^!{JTU'穉+űgx=bAPb1+q ڧ5ܟK @1WzxE,# }K ?zZ ՟8B'ٶt6yx9S_>#RMߨH%]B@!p+p榯ڹhX_㼴c]L C!3y8gߤm%mH Ī^b+?yjzŭQHpDB 0vT9}׊XW\ڤbv/=Loo$-iS/Q ud}B=}|P1/fVl} a'YLr5yMz! G!PĀ&d d:uu^!T'EPcug(?f B@'y^wT:EQ( <@`pUFDP( tNJB@!PdI B@!P(C!HPk B@!P(A@ =N},Żǐ 8-ϫ}~#w.CLeo;!Rwȵ/t*\h$̑U]~%4wx2*Ҕ#rNqKoQ T6)MoNpb6X`s}3Dd8[K_'7S{m ]t6Gm#=Iyzݗ w S@]B5S*^d6wj?3y,W6Vohy*Sۭ]gab8+*>q=_!ÃmfָdMt|+! 7LAd 7._\#4;7d$Y iQkOZ'#xTe)5GO3)yiT GC|kQzyc {\Sl# s{sJ>ATMXӭ@}=f{V'*e chrײ-tyHc } qAy)jtTSi9ZY{Mep麙Ө$; uDy=2BO3z H**AmcgMncР f2qA[%&VaC*d'(W^o&1ď<̦2%' Mm(˽iYhI111iʬ#fS7:H&14j7W  F5:u؊C-4=c"k/d*ۿ`n0 T{22s~tJyy#al Hn&?ͦ?-yπЩ.6Wh}<>!K~!Mna[YFU>dz}^1Ij=ӻLIrz/P&2 XFG3u,kv>U\DD=BeOC5y!#h?z%]'.߲ RݏhxTY3cC>FInO'ZǪ١vOf]cTiB>(Avك}r ZeJB\f-j+m2Bg:~kYQ՞d{&} BA)Zn0?]JP.t(Z^TgWS^v~'q@`قYӋ 'HҦf_Vr_\^eJBTCU庐'+ٳ/sHQe VaSfaQړ!@K Jwpr nvLFJ@_wI( eRz ?}ci4ֱ{[`N@7VKO:$V&)Svgp;=vz⚎N&Qe#+_=OJu܋fC:U͹4t򚤢E'03AQPv>/U \p~ȡB US[ȫT-ĉ)tH(P[;TXs{AGqw=/,[Jf#@r&Z~T{]9U'+I*w$]al^M|3DsRA|ier<D>`7&!GfOF$.c %<6Z;IUIcR˰hr:X9o1JB $&b6n -y)KH MC/x˔Vw+FXK`wt$_JA|rD?/\u0T˟NKC"N՟&\v<$ʡ M#Ķg9d۩ώz'\!r)S #F )K0vhSe$_JAr2u(T ]QR@ޒT4"F40%rEїJYR"i\ȼAoLa=@;6AңmT괉ɉxC|v= ]rÏy.`E oI*`T\EcR u;Ol*d2Or.Wb,SOjLR%ɣK Qdž7U{2~r;Y5ېǮ=&z,W=?yKR b?8O,m YO;<4s'ҷw{O%6of+q=hY|F)'q%F t_ۈ/\L)ȉ0!4|/Tw|z Ҟ3AjFh:Vlכt6M&ϡ㶩7ЪwNЁd2Ajq~b;oa /ox( \&m;azK_. >]uv8$7躏{/}#d/QQ.]48 dJ_j&W+Y=zVwh>9y j._Ts3i^A@c̫O.sVg01a(TFk#><~_cnzL5ڂd"\ˈ-񣏾xzNv?<ВY uBv]dZoЧwIիL!r^`M\rMV75jl|-Y%1Пv)e4?X3уbcyVcDg!*Dݦ0}d~I{nu̥c4k+VOOvQl,,2яr p> xɿ[=40fDExV8;^ث9vW@Sb'@۔1$^ٸVe} m-:YM)cmwWy(uU[OrfM[jXCOdv +a}*Y^Bg[AW!H=8a{|LR#焽g4DTםFu"_eX6BOt)TIRf3U5\[iw{Wq%&zcbrTmGh:G+=EKXL}P?<Ӭ\G>&~jh^yOwyV>끡#9~5{O1IgONY3څwSəOA 3^Yi+Ewy PUEyjb`@UE:߇A3Hͽp RC})fȓUH}J]ppw1aptvgt1Lvdo-Mټ Ք{R&,&6b;d&Oo};ujX+u6f7ӗтڝ`*LjjSii#}dL,t{2Ihp3 @/0D^W_U3[e{۳b,Q*eajn޹!>Ok!U5TSш%d[X(q:FZQBL(%ᕴsڿ_XB76_47-?ikgC=/3Y3Mp.`ejQMzj=T=CY#M;aƽ=Cs ZyX:LA 66c@uV 6':hLW܁jLݞ }hSv(̶$g~_%pz>N㲀t}ޡ'C;mY`;ܝ@+K{P{-1X[f29V;rؖvm:[zUy+>aT kb sY+N쵷9uOa!] C褮+h%OwIԦ}eMxtF&hMp]t"H yF;0X,esGidQl5VEOt9<}w'Bs oFMLW:{R@t݇9pJe@IDAT۹>s]S!v31J3hŚ-{+н?CwzHi kQAZOs|*Ƙ??DB`CM 迋VDyMDor@!SeJYI6\/쁒9w LR;۫mTx)O>/ )fR:\:T|\{~>Ó>PcK(WihA?OT/pb/OțxH*m؀^T xTIMCPn:ְ渷%͛X!ZX+ki=jO$.\}OP:q .3zhkR8Rw8R@ޒԁ sXLdaf h[7л='6wG=<%N%^7LLi$g0?N9N#.TH' S-RvY^xh:P U{2*q.~cPoOgD[?/z:P^B&ǩ:.+s iSeSK2(YFl.OȢddE([h*HiEyLa'u 1ƦnveJEEaW c9ԘmRU{ Ki7Op x5:;Vų5 }VQٚA1+oIlLϣ;!G򊡎֨4RyD F֪;H%})Qo \WNY Ce aM[n}Nu,"1{;` Y#')4ET҆-=Q1|S'!INk&ҹSCLP#ՌψeLm˫z}~l˚ғ$(hyrĘb ݴ c,F4aQ%"2ix r5{H{{<eOyB$S!CN 8kml ͉6v.giit>N'؛KO={'bm/^Fb^9jnFLy'/sT(i|JSIIФ:خ{p8:U{2Zy= K.7,Ldk5QsFGm96uQiK")gI$ >#z5jCz8TF{\K2W#-p;ͨ3@:Hnu)d^r`pR9m8pv.O3sޛL o3v\IO2iXDdI*f!QA[l?Lվ;ZM'bxʚSWu_wk:MjiqoyT܅t{(JA8m24 EET -*kTEE{(d%w՞0jʜźh>!攇tLLg%Beը *T7OJS:L꜎dIE#'B(r{NjAmwVvlzXIgdz,%^йZUb*e~T.YNʼn8AurBct"P6N]ג9^e @ V.!tܣ[C1\\:#˓;5 HGD[٪b2z^^:*T J 2Xhefr=}'kbT:˺Xxe5rA]Y l6AV;H2<,4=QTi/Du_h~\י#m`&ǃ = d[o}nnw낁2s0ՋXofs&kQŌNUbWkjGScbrZa?KOlb{W,wƮu%_]͝s#;UhZvu&FPCNEcɾ ׽LA(:!dÞK9 *SZ;yZ]eM ۣiR:IU DU؈ +d:ebaϗyBg8495d. ӣ~ 烤7>HADqWK).0\ɔ mc{\0\towi09h7ΙB̟.f'InBAxԣ+4iC HjOC'wOiLRb=_~ܩ%DU6ذ+4AV`Yˣ Y"I(1ďpSVZ*EgEc cIA9Pփwg=2Y ojRIeJr2ɪU_Nh+9L01IH,6Af x# ;pWK%4b8m4/:Im35BTt)B$:Z M+fMcT^->; G2Xl|))S-S*SeJ=9| eB|F#;+`Jm*džm)H):PM65G;<6A%e#6!X=!ű # NMV7'۔G=?ѦR{mOd;lW\l=Z^JXWTu9S^xbG HG\̓ʷ˿vF+ђT Z2v9FCG+ 1{_ Gg ØH'қlB`}Bd"1OU/%e Bg\eL 7ʳMWla /!1i/IidU# 7=|NMe T?躇wa1CBmgWK":d*!Wm Oo{󹧟) ==!Se@6w$uz|^8yn& ?Y5Z:4"+I^)]y!ekttkOzVˀc -΁7MЗ,=.n\nHFMrC37hp]fCpKl2@v`&؁:M6@ہdK!û{w=}9NH>| < 1ee \eL U6dzpW}j!  j5!~ 6&u -pE)G>it!'}JO:gi{bvYhl6Խ?#pu{մDV?ӌ@Ta@(f"GNcZ"_sQUvi[m d6hN6'Yi RtjBÍ Ȫ8&HSb=yVrܪW 5qHK~0\r/e*eCv6d‡{_jf{գXKE3]te"۔Cu{G1_ɀ#eWڴ 5r"O4[ gMs QOv?u(WG [IФ2>%_u*es,30o؄ rin${gGkr ѝ+) d> 3n1I* tlYބ)`ѽq& < Lt+CK˔xAEːWt;Mپ~ퟷ[ j!'#]|"`2"֜.7E͖?лչ m$(`x6뫙6~uM.^*&c\`#[e[?!;WȊ%[wc2s: Pp޵^~q%;6[;oBg1EhB d;ԝD>Ȼu߉)M9 +_xlT{"d<ぢ;tOg[d/P֘" B|!K4%*pb ~o?FkPa&JeWW{'~aւE3&L9,auKp-AoͿ:G"5y TйC(P?**S W )^nS=]MGۻo׶|jOlKCTp̦ʟMiVih͗$DWcN^i=u$Sy&my)>6|EA)+E@'b3ɩ|iS)+A [I*dV /{ol#H*$*)CR*&oTwp8jeK쀉*SZ":ړhTRtiULRe } BjM,$5*@NCp7K;T,* Re^ <MC'C%ENEP=_Hd3IE>DxJ ]9gL;`b=6\+d *TB(zm=FENQRXd;I{hA?ȩ$jݱ{INq]2mPIB$T4BRII ,%R*S^nFn\>clHyi[e*Y<l7m bF s9:X!P(Y@,aОwC/l.jTQx4`ݙs$@^Jo;oPhW `i 7PkgÕd N"CIj15SgAȿhO#*ER * B@! Sĵjz-6Ӿ<"W$}puJ4ótEtd4m~ՐHY3I-~.Ud-llȁȯ<*sP$\LB@!(P!H^۴_-cy> :kǑ [.vAÑ0D5UZhMQdհCzqh@ EզMiPG@T1U1* @!9h h~⵭q-θ Hǜ.Wo7}|Jj fߜ,f?HI-Vb* KNe 0۠bl Hr#}{\9V2JaРfFxr#,ɟMsZIU) $JNY8x;lP35?`HkT;1Q]OoK\8gڳQ'h-36?Ш|өͰ=%^u^ISB@!Pd)ɐSaP(Lm5I*^8.xxc|̞!dSo5I*^.xXM_M?9IM?B@!PɒS)"O.-Og2 t;}b5UCRLMJeL<ӳWWejIM- B@!fN'ݗAzr>o5hټi?Esp?D]j4qAP1䟨65Z~᨟fs~Zu?e״)9R(mF-}d r%C"T>5vR_3=҇4k  Bf)IɏR?jئ2^Ok&B@!H!L" w"oW%CNe<B[,u*gkQ&`6@-X4IU7{TiC@ԴA^P(@@9&2aG($Ru4;="3T H;dH4DOܐs~'+G 3KC$^B@!P( ?x෷29֞eGOeaڝ-A}.5ʴ'B?;ҪHj>妒E!PL$|v>^DH^iH$dH4⦞K5ܟ8vIB@!H# 9hXS+c'߼VNmciRIdK9/G6)G爨yLER.K@ B `b9]w5uKIIprk9Kl|1$$I$SOERM=P(GIQ4JŒ8@~6PIX0=l2&r)SӃ"YE!P(bD iI/w[ ng;slTUd ǃ/tmvPMpKc;ѥ6:|Gy,޻|*"jT!溒Y!Pd! C$6{5I .m YO;<"vnaB^/m]Su~*}3ʘ,<3X j鎛+h7}''H(TĤVϠ,zKu.Jz.&BY E ZY%FW쒌IC ȍ瓋TZ<&F\;}A~o'?G}Kq˴'~G/za7eE?5sg9k:^m~~  bz鵶X?iOFߪHEREP=Rh6D8S2Ď@@SFN_ɣP?{[iQ5vc CNӘY#!M:g 5Et}K 7޳A\ +kS`cujL,SZ,|r99oGY#DNh\d݄QERSzA#`'U>oīf$b=\V4-}ǘ4 o nmwG:^lu?ڣrLkԕ'IPRw&3ϙ:6&If&fKa:wڸȞwKޠ&s=kG'l~?;9X~IUi.mRPTAkT#vm9}l XpB՜e–A=8 U\;Ӭ4*! WqP!d%R6bĕ|fޠF5?f B֜F;6Qƣ%̑ YL}QѲĚg ?sC~2t?V}"Hxز&2'b%)?2(S?[<M7@U[Nn-s"M [KvM!?Q]#;2W+V kS$5]Hd hNR"TdZ'dB9Nmmetߑ4at9lOPck']pf UV2ij$ ȴt鹈CXⴑ9-@A6;33\"QGN1)b!#}a/MDqS%@nɦR)~Pd.)a[hZt{CkWGFUkwК/އиQy^۴Nib-9nfYOjO7w~T/qѧr|L3=v¹d6_o:]d6inVogFbӯAVK5Id*פ*r:`'%AcGՕyx;?uk[niBџ]K4y^}?A[䰋3(yA?U,4_9gre,rw}ЎF#=4 ?MFj3ku3@Y:~tCDCsX,JpYQ9+ll#?eU@u(@V! jJ;Iƚ}QVL#X"2LRc^wΛ1^h7&VC{r5͞{b=kS6gwoKs奧Y [׏1qmf"º=޿͝ʚ݅&mnshӷkz,tS;`Uh79H-?^ckYs>M{g-ys{| t5|5`0AM&=/@|ş؟9P$X4A9gr:[NP&dl6)lndd\鲚4l'4#RKv|.Z~|f|A'^Nj_IGKv#2CEsgX k&RkQ:lW }mQ*_M\B?rEL&҅SݡӂqΠ ՕTWǩE gMN #+72A&:SNf'o`ZwMM nX~yaniƎ,#udPRVOO(r:9/)-v?|5m{{u35=6X pfPĝ+R]uh4k6?3/ln!O~~oL˾F vT ѕd6S~̦tX}ĭɤW+ 4%{5]@IHn>FڋNKOT\ȳa *ITn#JH+ -RIE{O!b AΜ TԐ 5 о-2W@$ BC!X^ޢ:!HN@h;ᤪ,sq`09e)1j\Z3sZŶN/ՍB[ EL C vpndg2XΤ?ʘc $~(@[MlJ!C3[ ~f/q5MiZH|ODU`<δuBR1 gXP?`@?D[$-Q tnri|."\9"r_b>Z~ĉMmO7ꌌبC6F&ڻ?B׎G~Wk{WQruN&DGYHo8ᳫ.K/ l`wv3eXȳ_\t7aS&Sx~Rw?%ܡ OP b~O&[ɨރxgr X0lanMa*fCW`֤1veJ\J[xb@)w2$^G\{kDwK^S׻c9| aLH3X<}*h.kRE qTQT,~L2ޤr#c jqq\LTC j6b G\q|~3q$;E~v&dYPEJ*La?&I2#x@ TY :(P$5vԝ 3{ 1[u*+fS>eô64 "IXr2g=<=&G=ffC=˵M[<3)dЯ5RXIŽ3fnJ#Wfb&A [aB qo%w7=mymJ?SMښtcƛ>: OW gn!"& >%> .Rޟ>v(=r;oSToINCȷ]I]J vފyo+>+㯘Att"Hw=Ʈlw/&:GmmAm,b YZ*x^;~w *jYi)"*bT AR!D 'Gϱ1. &'$ӊK1r>m\R U<[-ɑTGhDڛV61nQ0O2> HINХv ٓ'pyC[P8}g YoacikkLJ\Yp 1)t8T$d. b?WoU~nd},k5 *RhPp5 S)Ў4ml!4YQ^bLjm-ZPDFXow{ǨbO8<C!" zz𙵂V(Ž7(r:$h OGV]o4&A>:Ԫ)ʊlF%E^-,>@09 llx$` dUJ **Wb$R;DN0$U{M^B4e]l=vrC<EGXPe;j['MiϞ)4-jHhR#cIMT 2(nsT-Y1k7ݔQh;ESivTh8/phPAP2ԿoMUI:ERuTEP(F@ӄi $*PGշ}Fjj~XlT7pL^ @P6mEiaANY}ؠ9&!i6 lc6}bE*,}Zv7醫ʆ6;;_C,$qrH*wP$UoDU| ,@/ϭ#&>eArTD@!x"+LK+KyȒ9S x|#msl4IR†axh61QJJr*2XxrD*y.IF-#2ˣB5oӥas 7S`T(ܼf7?,VAyo/3M*臀"abRd Zǘ؇\M7|9!kγ(rKJ*5 n{sͦ[/CeLFJdEHY`-i{abeӣ2П[,w Feqy7{ #BT^QZҹX 9ݻW^\|6![&UTDHވYl]6yxЄѕ_\SWxk.'VmI^mw<ހE36=LhkjZvR  z^tѴbq">>9ݢ\IŇwK ! CPy ?צ[0y5rQb6[z;c!}T~sEQl^: mo)9x( @IDAT3xm2zd{vӸ/hLAJۺ7<\|%UERG]U,[[x6zOryy"gi)e}gN]>ôdd_UdOjP&( bӽLTo_pثr&lEEb)Ӝ-C W4eGkS1 *Uޱ| }"fu+Ta>fmt=ȉ 9ݟx^WѨF1'ǽOErS6R94 jAHUP$5UȪxFK._ SqqUe~`"5mVwDzD 1a7 4L[\*;8GN+G+rX* ^FWtОhZX^  9O.TfOp?ż'`r<S%إgYZA{=s/sM8EN$Mg$>@aAx/Hj>"m׆IYW~T9R>_)H$GD{yiB P$5નD`&?tL3pV @:/_<~r_rc,5Qس2JρPHcMC]*sY򛮫iTֿӧ_)4d4<Ͽ¢2MTQF@LdžkJ #(;iF.y#a$>.ҵh{_"hw/+6"y(rECUh@@9=&V*ܿ2AqH0s?لcuM&s))PTSH˒"郉y2x)L4FGZgvQ+U˲b*B8ʁUZ.jjqiٺ ҵL)=[P՗o.߸q?J.E*\>pʓhjfN\+BǦ5O?Jn@EO cIG@Hj⟁HOhm9d=i^4tO4nުm=%:iLͲhh oao!p# .!+#Eδ[gY?,I+ѩS'F|:7ЂB}|Š.V}~?z/`&s$BH]x_marnL1$mn 3G7/5QU5T zںXU_At=SJn5&&> TH3QTf{.!}4A7 NlAhXF4B2vx\jP: ?+s ȐYls-9M"@\`91 BRrrVZsUg͞xM㿞SҦT*hc0[샊!~XP)AC5x5&B&:AC&S0 Y}5I*\![yw@]Oʭ{XMQ>!J9 -ɟlNH@TOG*V.$5b$c[PHjlX=EM.a9ͥt[RDU!Ⱥ M-TTŹpB5< t,EB#9HUKڶ&6臲Ouhd>$2ܟEJft+b*-P?&s|XP1rݚ:DOZHrӊ0pgS1!2T|N @ ֥ϊrH㏞{G1 O&~i`[> rhV`]pj%QưnkR0+)M SIM^ $5)JݩF /%6iJw#M%'>H&$5;J"H&Ƀ1RݒQS͒sVbz^.]/ɟ(J.%[DzO?>̯C 4|D$JrD- )ab4#!Җ,#B'LLX."MI&Ӈd <@CvIm A כ|/"S F҂@X #]tr"mO Ô_4+PWqh]k{+;Wwk,qt0yL1w˂@֬ }3;_AM?y )< #R$% \"`#sxS_B@p"*G  b0DM>2Ysi Аk;7t}9Te?d@S|Fl; B$DRF qY/1>ݲGO_B²SW8&RZܾHvcR i}4}.XfffcQr0$Nj SHq7`c^T7[slMMa8v,QgiRސTC: ʚU8k[U=u:'F&^j*ȠZc?1IٓǴ{,բӦ`'Cr;Z5mLIp1 ]U+eDWBRc!eT_S׫n?B[G%ݖ.v^TWMhHS\ZL(x:!`DXU6Yhu7ҞտiSpl\KL;54FohhבIE*Wш8ђG''٧  D{|)W *+$5K酀Y3Jij9M>`*lC^H#ˬL[Ԓb?RCsKuI酀FP}g4ǎB`SXjz:oは|ƈw_/@Tmq9v@*hO;暢霫:/3p OѦlF(vߦ\PanQ] mL]s,%,}ŊlԤ .Qlc0Q$  ?/U9 I3R #p 8FQ~o)A.%kҜ[_׶3jPé&yuM= HTآ\&'EZK0Iʠ.G_n^rk IMNщ--AKjFF[z~3lPjenEx*dvNK# c(5B7hyߦaejNBuh#Sjz0z7PSVR.8g,M;>uնc᪖O-ޙnrrUݯ(4+fkЖA $5;n%TXTor..6m=ܼɯe8TCR P}\0O/ǒQHJ.]W8Œ"/b~n'M$ V6N6P9^I@/0'1lr-ϢXRۣ^$n #-f >Q:@q qUB׼r#ߎ\C˟j"9|yge2w^|M`!Çf|YFwd GvŬ2P {nYGCuu_l $5J^$jD!ˎ}I#,_QN)A>și5kn HrdB>K#M)~3**.@Pf5$];q.&JuT-r+odL/{[q8M)-nyh]UgO6ӕO&gYT'Ȣd&ԝEP~j8\s2hbĮAѦhdM^m?2d2)ۃ̪2Nj5:AK0U%ΐʯ 2_*dWp\P5­OC>Tpi,kqnSIn MvhϽX1P.?;?pAc js3RĿRmC|Vl;m,k9iݴ`"voPwVsNRIĨ!F/8&D货%뤧h ]nij9]nZ·D*CoA.S{۶K8F#b5;p`$^HStďVn34k_їˤ^p=:tr23ܲ&CVSc~Ѥ1 W]+\VFZy7}Ki}tdWnM}n.͙2F)ݑ>Nj ۱Yzzŏ3?>=oݢY j,b!C,G46;ZN{]ɴam+/ڱa\kw60-4j5@^[( crltv*㡗?XGwm fMԈ1S<+΄nh?~@PER V*GP"@wD=DSGQ渎$G:þqn)ɡH tc_;O{î!vdo&}L'|n@jj}LPa9zt;BqLr'AXtJg -|R G'پ Xs'hxU%\y_=F&3zᓬxkw}tNbT" ?sv??<:Donו;c8[۠&h5ЦUݖcW DgT{Tzf ~2SѺ#ʳn7~ |6_t;N"j_#% z3F[h?eiBfXAc1Q::} /K~k4 *TXj⡿ގVyRLk}uߖ}r Ы{>/YT8ѤA=~]s4jakx(:t4JB3t@VuT,I]8pL#>n0#+5<ll!l ?K9Gh~& 2·.Ď,_zA"ZW.ؚ'D@Sobh DP/} Nc@5,}@-s{kbBR1 g6/˦D}r w T  ?!9AF H^&XzÚK&ruw,/G B``T?QV퍈޶,fca<컟 =͛C9F==ji 5\0Dt(WUv[ .>3 G+54`?!M4sb fРl&EOӎêhv CtFϘ@7.Y9tz'OO䳳IK=t]V4z[yũ4;~GJ'i~ %ѳ23)3+& ò8& Zotg;*G;h:hݘA9!+dǏRO`Ѩ>3q8hiOdd(J8#vb9~28;9=w}#hodsa"7:0ƗȆFSm8t]ާ[5#kKyHޮC'4V^N|al:mp:2@~Ccn6ֲ4qFPQוs[oydYE_+Wn:\q0Ch6-{@GyQS<4{I@!)w˧^.CGy&oں:7PCC#5 ۳:!G%xrnSs@~<bP)jl_p?kTԼ\&YŤ fRI<п3=HG݇kx-LN55େ9$0NB9w7'Wu&>3c(#w:!SgAaт3F~Oĵ=i{̨ LEꮮu&<5aBP9AG}w :]=S~u7|eCpP4bI b!GX{їv(70W&ɪZHD3|bXʤnwP!XY<bH>t؋_b- ߞʤTm?`EnT 8<[=m_H~v\K:l@#qQ-s(p錠}Q'"+lg)}D;K"eLfeLZZ|T~ ,#y/q#R @`҈'o- ݽ{+/"ȓBVu84K+H*&VW?jgudIo /h CXJ1CAa>l揥5TXZwwljV+?>[ﰌ0GDU,=wpFR!Gq!)CZXc%-L;Z $SVHj='-OmƝ^5ϼRWD4,jLTq akCQ.I*[Ro5/Gɬ{G5b#P?Q,~L9[PQ&I#V$n_~s'lY9iN:Q[Oj$E`=/܊CxwMNA||'FꗯDkQa:išXVTfIU4KjgbvLhO؟ Tk*▂,5&?dg3 rrǿ6|TNlзĒ r>0+Gy>osRY7s*BHq t^\!"(}DZZkW8~zs QVdidT>fcTH*‚WaAH 8I6>YZz݁_oWe}?|P1 꾝^_+X0_OoZ/$AHUfhQXS0EHj*?d?QɮO ݁{BR4g`o!|T`+Ͽq}s/.eYF+SJy*4@$aML҇ALurC@ħΒNҰ7OүuV6v4F>}GTZ__$)ݰa^FBDsѧз$8KI*R0%AC/<:@* IM t Tݒ ro˦ܸa+g2ÄGͶ[ռLfqĐO~_׉~~=Ymmn}z~,eTwʊk>XIKKB̠ B-BRTO_Ty?2- t԰W>h(59 =ɾo~]iMK0 A~i/KN9gTe|ݞc2܄2ybYyǮ&knihil8{hO=;SR } }JQF00|S?=#c30Hpm{ek1BvԉBBĨ &zFԐZS1U7ֽg]{\ 6=?+Wz+/wODz3W'MB **B1*VTJ<WTϟYF/HǗt㍕/KǗ /ׁу~nt&-Z3&jSe̻+y9qvAnU6:&-eLp`>ChmLYDH-^N23&_5]| agT[6GxxU-=.,7A [.a}c2Ļ/m)o\^D|y$ I5)/xW\OiXOV>QE?A@':A/t+*,AR Tm Or2T^ַ/UOf_}-t3/>dҍeE352|ߺr-[M]#/ux:0\jFBwF"_[A[Eoڣ]庋}S&#lv/XM3F2!I{Tk4`eӭ4q0:0VQTHKO1:NKo$\ozmy%*:!MɝT܏?BR㏱<' "FI%T|64ͷ7.mw.ED}C+ FImngp%3& 0<Q/?FfIО2u3EC7}<}ھ~I8|s`2&zzqZ\הQh2ܢ:Ž{Ա40[ۆ ȥMhyvyڹG:G*_EH*/=I4a|Qu;kL}ڨӓ 3G{BOƫ_R2" $5/trz M}g$gs:z?U:IPD縮e5V xD;3>a>X㚞`W!fGQ3|<.#hfL 熝wD| U􋿾A&/^s~`բyZ6;56:OR^vo֬y T/N8+]b\Bq:jD TbTVXPa. r'O@7Pb=6 *ПRu쐋L7&_9{=*YwpJd"۩FCG_Kbx~ t#KN[?UuڝtD =[4?TȮ,Wq~{M꟒ڥX"GX,u 1A$C'S&'&&J%A}I:Qɪ~FS`}g?Mt|U*Fѧ[&ÀQ>tOFzo?}_'J# &ȔI[ %E0 riK."tg)o -fG5*#M`BS-v>ɾ%lg2'K{c`Tt%e4IMG!$u:}կRL[_m${}!7Z"_?+赊%h1a>/:basy}~;-<)w?- (+FVlW6ahۚp> MYHϜ@Jotyx=EpUxZ^Е $k67XT:F-? OAdH4:!A %v4nKC@i ~SΡj6hX>[|03Mw_.7Y-N"(@/~_1P}^5bKqgUŠB!},w^w&^ a}7k5: j &"V+BR+  psȐ ^}x>DzBr+X2ajybEMӍ0BRGn  9`[s[TA|W AA0eÄO~K-D?i_A v:ɕ>3zm?UϨ+u,2tU]tէᥰ%35;K^ˮ&:9oxܜD$퇁,* |%Ě 'K8'TeTGoh4/&%IeHMF#ooᢉcoN곁}Oҧ#YFi˴"ZNhL6XyP+mWR~v)]K+ ͘AyB+X&}!@]A@H9no?BVWyK07YՇ;cL :Du_U50MPeRKAD@FV 75ky9DXt2mPRK- ;B&,BR{A@H{r9MeY'Kgв;釿1@&;h`v+Kߟxp>gJuΔ1?铍$)؅`=W{ 8zK߿@wҮ'?XQ]XhضdUd56rGA@]XPcnZ{C%KNWVP:tgdgc My=6׫M>4S"^u42xQj~ob*H}} 1.HHqA@lXS u&J2X.9jtEpF!f:grF rrIMG&  $ _;JVkv~Ox6u^dMp ԫ%B i3I1R @ 0(@_fU3MC5}ZrfĊgECA mol~Ԫzc[KMq舓6lm |5c9HmA@@Uo\ :,Ծz,{6Pnj !)Ԕ|l" @2!p;)o8ο}2eٴ%ľ_%V[BQt@྇y%n[t0P=8piU9Ʒ O7vM. @ S\`{;u!r7SmC3#P}N207&)E}axM>^CRM]cΤի:l*ZޓJ4t)…uB{|"RQ-JnWLEݩ!ŗL{~>U} X8݁'8 pP]dO{頕<yeH#HNNIuЛll&WF fٜuKKWGvR}cTQyRD;VjۭdW ΋N)$7'1(" )<=G7谁}pVlڥXG 4Rmt3x vw' o}&И@﫢g]MJC5$G1w](;&M4YE/HKUמYW6׾KwVSmc|d$[@?UM y'Ÿ~cwHWK, 7@IDAT>LPʢ,Cyg5(C靯j3U֖ӟ^ZB'Z%ڞ9NZJP!'0-&K |A#'2 yJTvΩZ .^Xﱒ@W(~J+60\qPWyz^r+w?ATa[J\@@UU&;_S!"3 HphwcE.@\^ cR@26`+jb]S rU " $5  lAM\iI1T"ڇGQ ?g]j"ݯAK6i0*Ϝ>=lhr@7m{hxPAdrNLb)ڇ'HhHlFr)+$ܳn4)1sl% <9jjnb}FD`l̦Ii2dRK+.C_&eI?QXR)܏/~pE#`(aT;UkAm-j{ۨhشq}L|{\{b{`ep j{aQmBM-tN¬XopMY8߾=9?lF;ȼWMjOC.gZyPPS{M  anGK6TY(nDnA.=եa]+YAIȅ(7VڕE@Hjl×,R0SI5h)^kj{\X=El;i.dg|M:zY+kO65ZE T  e?ZP|Z"'i^mVgX &uk$5Tc%xꏅOYc,d:C@HjxxInAsc$RdSR39RrԔ`K u#h-OR6!;t4-uiE}Z3TliLC@##ύ@JYq rȀ0T ◚OLLF|>QQDuϕA_t˓O'.& G f } c dz!YeOêˬ 1D 6']l4f^RQҠ(>TY2Ȥϕª\2 @n!`N3U7' UxHuLBr?B*}G7 -Oj.筭efIM.&UPڣ35xT1 eT>;e3rG#O빁95`#O3n>D~T̞+8[_ <VyK. @;*O{l@Кx3f$>/]JE#5ѻR J>v0LËK@;4s/t٧*y7ھ:dL --<u]~q.ToK Z)T*eAM!~)=BRPjzPʇեvez0 lOP-4bjL_}q^>jYj|e>WR˘]l r ¹VfnVvJj1 @r ;{tKصjYПPyY!4+L#T;oN?nz}ԾBZj[`kh3FZW^'g\׺0U> w@^L214800'kgG^OqM}eN˓ 6 A"zE[B_H'^!EH7RR4mqXP>Vnm.]Nn|L\ij$31uD+P,l&p-&:0ld!qR7U9Q"]]huIQ=HEHj5@ԁ Fyo$y(ǂ@3iԘqnR=d6Ko -ɐ⍉I!SjK#|noÖ $աn#`=\Y&0qWLRxJ]-ԍ$eݓJ0+\l7#U4B f!3նdXc*&DŽɔH"dGvDA@u;\YKEzCo*gR3WY6j@6{S?t!<7B*Gye U"$ZZ]]@3UE|e뫏(Z thr,,9V2QwNg"'eZ47PkhvA_5#3E. !qVhDd99O[Va+Ug!5[I)ߨC,[@F(CSoOG`B H֪;ׁ9ZrqWd^1_ 81>B [ɐZ0OIKq~3LR*5KuUR<#. R@vD." y1'L+`C,GrnX4C&V'66_Y+\@\u?<1|;߫8u` k$!8cm:_NR"4yTTMVB>fR!GP/oc]욚$k:I.^?6I RKz!69YahNKhPҡpSBR}op{o8w2ؠg U:}r].g \-7< .|WT!@)=``Ǔx<{OP4ՙ$+-rȄ^rCHSXPK_^Uƭjh7a.%4rC Yk/xϽ.' &Ja_18V>pQ'"n\uIvO5jWj$Fu:Bfle+r[* Lbdʫ䛧WR#[AL&M]DOӸ1"} .shm m,e1h\Q.X4!G2By49}Iqdgo"n8VԦ܏BRルڿduLu[=Ynqܤδjɑ Oom_/ KNtگ,"ou&[8]x-X_@bD TT:.-;\n*)I3R}D_x zNjix0јI_ PZ1$G>@e"aEŌu !XpR2.-:Pux/GՕcL`tˮDh1y=I.'>}>o}U*T2ۛ[9 UqYkp5IŐ7l&; 2r1a!:yf4nԄulv\#ɡ,2BVȬ\jRMLT%G>5^7.ϵ>׳>FIRMZ!NZK.Y[ApVeiW\"*La?&Iyݎ-ߦ bZR :&}kXa?q\Tj݀:\eq $QxB\@778f2x;'i0q]Y& $ItNtLcw`b1:蠳-r[m!1Ksgf˭ ε)ndS5 *j^nv~֊ {'BG߱aƄ?qMH1D@Hj Pk凗N^[6n孒iVރ2 _ + $ RQ}xDU B*VׁM,+6iﬥ>Lյ5#6[O|c+y]guׁm ~뫡l4W,^1&!zT=Wv(>AcG Z5HV~]Uİ'U*t:a z3K!lYm "a=d0V]=-j+jtڋ[ڳ#DY{d@{D.ރ6n!E;U-i?@3VˮB .@ł R;TTɤWH1ׁP^k~ru9 j H sT Y G+I*XH ۂeC0CȰkq:.piwê^xݫo-Pu}BR1@@' Yp*@I*ub Šs\A@'@#'>f}EUn2Cu`&ݙ, T.:~F^1qkͮHbyD4Xqe=J.s0HLM~DoմPݮTLr\<-kNR5QQAAT1IR ~ *duJ7o ˗>x⽊M'bu'T29^T_ xJPuU|kD͡.JT.)pO^ BVׁͬ76iѯcTbjye+L*h$8ZҢ_)52ٓf}ł\qGȠ&"韑gcG|^#c1a`YS4brk Kt`D5\`О% fͺ j?, *({vlyg;={cWQT~"y+W2@d %=~a _[4$A{xA[_R p͏wBoumʾLX.Ob9O _a"QK&mq&4}Pi/\B?߾5gۯc*"H#jCl9T*XR!(i$D 1WH_/h!*aAݳ}zm9#ֽNT7<肹U%O/rc\ S4tҨAR Ƈ1&xbohOk+V|c $={"XsFa{b"uW~R YY{)by7_W_Zt|o];X5i2tXTqTu"aD)}T'>\ETpE~ #*La?&Iy=*6}ѯp -c_e?=?gaci_~ӳAG7HȾALЬ1_"@a?-S &\y`ׁ|nɋRA}˳3\qQ.4LlUTlfMjOH۟ `hjz/<\<$B|ڻtߣ{x0Uy_>_+r=>/};k6R!aC&RׁwC\o]T7@WWyΠUi>χ*Q1_~^WR*u *j`&14Gޤ奓 /7~\FVv5##d4!4BN; wWݷsC>R=PKcZ ;16v~^Oڃǎyb i'gO7nڊ[_ԝt eʡlV|(;7ĕŖ[oO5o\n|[R?G@'cG~#`QE jUaxyM# d~]/~e @F2|H((, X(E"O(\ğ#H'O_>~]~b!\c]Q{a$ D\< SœuF:]DU5у'tj}R{3͢U{IJZt+NP@>ዪ[QAJCWyE>t'3T7T<~»Pb`+|G~~;3=99A@b:8Ϋu@ǁՆ{\Z󀏣ݻS{ݦئj0BM l)6v`JJ?LWOw=$tNo9m7o|:C{Qmˢ}IXfa!P-* \,,*:<8k줩kvGM9ҔP@ԄTNw{MÜ Ѩ-B@!P+>EW|Wŀ+l4;v@{Ѯu[K2C^JBJe k] Rٵ dAYX<Ӹ;UoIOG~vU!U/u:NlTW ! LLX`F3e lMGe=R[ |=@v8غ_m|,H ,l?֤THYcj/}P6N4AԊ1F]Hq?`eGt[jQ2B@ ~ݩ=5gj/.*Ӽ+9v=᏿;.WiоČ>e  ;:AHm B@! EV;@|BkoJC@rzxrVk_^B1&zy~"l"mv\YF+Bi\!wG/MydE8]1 oOB@! @Yu@ު_,k-ݡ=v\S*Pa-%Óy]9G^[@<!5jS"&ՌNE7DP̸&BƏ )g" SU25ӝyiv)7vqzO_;CkB8$ BjVI! bg6|u^.K +ϸJyki\aW ZZFͷ 7v" ^ޭ*_Uywz\cSWS! gtH^[Z׀j͸գJD;`(*yñ5aB| Gy={0SWm6u8"vf5''vX8ҝd'&M'4OQF9Ja1LC4݆r}F}5݉_MS\3R! yǮ`ߜ.3I>_| AZWؿJE,1VLu9p㧔c8^ `L{3l:`x<вB@(ꟗߡ5:W9f׆k};WIImPq/yM8Iݸ\zT ťxQ G #cU}bΔG8?nB@8!7gY+L0oYjSl:,!.d@d+ he|^3H0y^3L㱛?3MQ t'q|`pfml5>Ϝ0oC] oe7$kq]3' u;^_Gӛvݫ6JQT:L}ho^dJ3L@I#9FnW"&mB@:"u@-:`i_n:ՄXKj$&f{{O!оWkv[N{3yַ /!\ ,rW?CHA'cXj/Tꋪ%WB@! It`=_̤= L؄,/:(1'2, (]ea}ujg`*P۹nY@ 5K=KxC \Â@P^/n4xP[`یu)}g{Ä~ǫ;iZS)AfHj eؙ QpV{U5YQ,G<CmW]x? F&WzfT?7Ό~ u枉0-xEJE_ E3NiϚemIjŽTYEIJNYA3 ^EY3V3p4Aɣ _Sgʂ땃u3_I8.GԪB@S\EP+EaI"`- ڔXPvF,mt`rqQՕqy`Ӂxsɰ71kT1[Sw5W4yL hB@!S5s8FЗ{/=GWt LX:PՍͳg/(~Gh躎Q]BJCօB@xf-_Nv%͙ܽ6)]2 1.Ƚi?5>OwevAU v|Tr[Z/<X]tR:q -oGf Ǝ/DEH B@!u<07Hynv؄fcDo dg-8gIZٹmgZKBZ+AdӁ5KpI 0 yl, Ho?8R4yJ=Boyl'KEH-B@! H@Un-;miC3cPY O5GANG!n^ Ŧˑ&M X-A-GNZumR{s$, \Ke_GCIS2t?-zR򤴠̻rVN^guy 7epP#d$Q! Bq0ZB*d>ԐQv=tV eܼyfP0!Y--M> Ig*\42xpV+^a>t^$;ڸ2/B?7o^@s!5HE! p`I$k6l2T%(>3/5M2*]R@5T3 {~u m=>v`ߌ#BxzƝqI7o4s9va7pDw_"K ~gdzG*gAҨ+.R shߦldNrΥΩFC?LRne9TqEH=!9.B@8J0 焴~ f 0;!B NZ78Grrc&ʇ;Rb{Ivy;%Y`$"-muKq7Է3'9*ږ3휍MQ{XaǙ1< ,hX_;Qsˑ6X/N'r}MkSިgmt>soCsOroGؼ)ˑuНt`맔s$8Ujy^ SS5U]POYSB@! @!̜9bgh5L{O ff V/+'f/:G =u7Lc\rV2E/wi)sjߏMpaA[IniǫpvSv\V{DUHZJMB@!P@HUd0$L@};m[t pbfќg ɓqI&MIK! NFaq0Mt!F}bD,v0ɮQLJJMTB@! @x&& 8v^BGU@£vEH6QIO! a( zh&Fl-E#8_NJMZԚ*i ! B zAF%klCzWZnMz~YSy{# ! B oyy1hlS;ڗ S,^Iڥ>HԻjB@! j^AA 4wmVc{C?It5ҟ1s+WgrB@! *Oc_)sDl HXmt:j7uw/y0)ma]#Pђ{efeWqOeV" ! BANGVq{:1"jфդmuT חM &c䕍PS"YIW! 'Ø[VjQ ʹZ%Qt)[oIGl$Xd`WI1y#qzgZ+]W;s5T ޥl8vR$o&ƌV͵mZ 6ؖ'MF>2M Ɣg4%e=D i6,Gwٝ_ eEH-KDB@!LHM~rd4IT~1ؙ]nkWn,*?n!˕B@!PcnKOow.*O:yi~,;뫱 G)aRRB@! ̟> T~xiy 5/B@'[,WmkRX%ɣB@! B@Uf.Ywί#ɡB@! D]  "QȊB@! @piR댾\X! B\}0*N5[`܈)ɒB@! A@=LLaᇖH6!6(5B@!Pw]TRȕB@!P1!Ն!K! B@$P2PU]2MjehI\! B@3D1V]! B@%Ťv0_N0d)B@!P 7T"eUȵB@! T*RሪEpX)ɧB@! @@QO*j& ! B@P\}B@! @uvA!Hwu`ʹB@! !0z/gsjD'MElRk.B@:'f":v$oB@! hR$c4-Yw6v'> d*f[J5B@!P0>G1,\{j4u5M:bR׷.R{=$xLqڠd)B@8$e9䠩Nr3OK}aWQA*@U1\ 8tTHQՏ_zc'B@z@'hbW+鐏tɟrK4B*zޣLN4h7g{IM! G` ~JNtm4 _OQF?hXu\'9rU9G! G,)XN2 s6MUN]5~Ҥri*􉿀0|-r! B@t+c'M}ҵFFӵ0b<0|gCMNGM*BA_.~rETWO! +y _+rTfGS6QRy?udԹic dE! B$,/A'I1C I5 ?QR >h7(IO! W*'jnM jRcUTvO&b94dI_! B GYT LN Фj <;g?5VB@! @`9k|9~&.3]cRRN<8RH2B@! @!`O9;dZDId:.BmN%! B@TQ,OUJGG7˿s|%,sTǩؕ[fl ! B@D@Q,OhuNUÈ!FiJB@T;[NYZ!pPL68 ރ'ڷuߛh-i !3TtjkRcZ$B/mV<crWј>fb:A/'Uʔ4SMKS0%vtmA 1-"̭}n-WoCN4ԁn IQdz1D?Ԓ'MkǓO$ZK_նI* "SO>%TEV2rTS9[%:unwޝTSǬd F~G%rL;Ҧ#R+vꦨt/fӷTskȎ`IS 2]H{DW2h_ñn4V?Ǽ>ȣiF\ is"fX9~ Ӭh9&mNK3 ڸ^$dqz"_`:y,*/[X2B^jCB@?1ca)6)%Ot,{$žmbSOap~ 5SV&~: 4Ʃ2X獝4i0 aEQе۳Nz`\!\w,H6^R18>H:6`-.Y0[lImBZ󬣊؉O5T\qo#*5oK._Z<; p߂Э A0^UFA^Qx6-Gv |֔=&B@/hOa8x?IkKB @7oda)ǽaH+nc-x#˿paOC:;tАb*]]&_Lݥ]|5inw1] 9j~68`NCn7'}_{nK{b>M~w/ʔGh6d\sӡ4+^k\B=']_Ǥ|nڄohhh?m?}1O>=t^?ݛ6<_.c0Dİr߬YIyGpYnK @!`@T|KB n(}''h84m =?-?4m/@r_z ן*>}v\o~KCƍ"E::W>Gx^Es΁0Ze&Ьt1"ty %XH\ fdy=Ujz9A jKޡ)ܒsN8`/M/HGwRB! MQNaȺ#WEjL Йmgۈ6nOs'l)_QMwDZm!OrYA<8龜`bSUU͆A_mvQiЄ>ae艇ĔiQäg|kNO--$ Aq1ohQtx 'vcqPR c'O=\_t>m?ݨBlO<{sѕW $\2/l< wN>^uuAN:ƧO|Eނ`oLR0wkKRls^ 2Bz4'̄DŽL.gaN3t({fm*/Lu<1<:J\f,8,4')f𝇏n`|\4e] }.58 J/TՈGT&~ @2w|_[9خʔkj^S7218ϙ2M;^r/T*Z}W3EEsʹ3gëa;y;:Ѝ/1n0'+.zhVxi*N.u>6'}SGVt{ TK ?` '%}'xd5q|}a|`_xZ_4sabq^V4f?4!~} Y !PO֠VzT<Е P0KYB ~f~>һǛa#OJ3ix̮4cn$+}i=s>z(\2}7ǝcg x?|vVP7Y)|=tդqഇʱԒ~7S 6u/mwE|v9R#Pij h.闍RV>eR+LB  UHBNB@%ZRRG79 ϻAN*NɗB@! MmR!ծ(Y ! B JPHETQ'B@!ſ=Cr&B@#;\*Bj=jRT!P?! ;AM*\:Z*uHB@! ]!6pS'Bjҗ ! B@Gv%Tۤ&}B@! GJ)L4"xc)B@! b@)M ! B . IzB ! B BؤrUJޅB@!?ThR6$B@! G+S1O }L'S1tja(EK\4,O" m$$>SvjY}yȽ_z9 z>MdRaûcJHMKG^޻TUoɉFJ]i||3'P6xn⾓PiSёc@gT~1sii/~U2m?ߧ#+oZ $$GaMd˜ISO˚ ΥFۛg ގj1n~nK?f۲GT5צBZNmJ* P3O3%'rK3G_ڒrϟ©%@)IGⵏ9iU73λNcw ^eMT}] nvzJM)EpMdcHJέiǾyOqnO 1b&@iSѤ)i)E7:od,Cycy"~薧_ڐBI|2ۉ/Y5fBG8\ w֣cPβ^y+}(v˃G7~ARmy⦴X5s(ytBGz2K~O ZRQTu:.OמxV:91/v'<~]/DžXPςiqh}(:w eHk|m?ާ .2DHl3&-v}۫ΩςUe3-Sƍ_h#`"m>ն&3]&fbb+_|C\Ɋ (d} *(MR+,Tr7IťC5ѠV_ >s鲳Uä/06(V4 bkR<{?'Ӡ tGRIIMt+XB1oZ,2mRd;jҦ&*sJݸs?xʽm{黕=&;N)*Y>O|cH+,>`[u=l#rW|4׭-]>XB)UiSu/1xhS ޝPmN#NA՗X` +.ZjJ@o >͕#hPLҕ;))c V;*?\iibw;|Zن~k}͊ZRY8~[ < >T|(b=/Q.mZ..g1>5)?6Nz-o𰈊znvE& ў>__h"{5*k7ϗ=RVMiCqӒ[lA:_NPN]ڵS]ȧZi4=_|>觍;B/=+`yg'ڶ0ֳ;eB FUiH`զbQ&y/njL'W[A;2Ku[iYMSֺ~~#бrzˬ73{JMIXYh1>8p~DћE5\7\2NՑr-{R'hZ6I,@S.ʇy@2ƃ<(`b]|3%xa ϓ`~"ڸ>Nwhe~]xN>} 3,!߯ u gaVG%*7]6}ā{z5HJ;H ,{\s zE)k-Oсp;_Z}nGNB'FW&}!D87Mx[ӺcZpqzrQGO^7sie /Pk`:搝WH96pn[k~8Kq+zVmL|28)n8$Mf.gPkYjw1ޗ %LJ0QYcgkxE!; ,hxb/f̲{8f߮,ve~ֻ0Y~^Ƚ&aڭ&o{X̿>/e̳kKpOkP[B#kv=oۙ]^"J(FA 4.c_W>KU~0 KɚX١kqcnՑƦ]q3ė{7k~K|A{U`uUt9SwL{eh` Um,oc >Bhx# }#|350op=tI6Gp7w!ݲ7ka hc85=L\VszِSQ{LhؤsO'6y{eϭ..R6ooܱ*a`qKBh!s3lj%xz,0pH+Nڵ,yuhx9o H{z4^=tgc÷^;2;+uw:Фd"605%n@EgNÚKڮ%i~~C g' Mi;|4\*'lb5k׏m]vIۗC3ws9wacKzlN^mc&+hX!^tjW[7b?CKoY6<“Pyq2]<2pC|kp|;gGkD2? H; @m}n{,@u,v5jDK֬/R6y]e>_G vnua[uGL<FG@7>^Lo~]k٣YdEk[r/{>ݼ߅t7-3Ppn.;`ӅT* Rod(zEX.5j@f-+Ж:1ƫ~g~vva-|)m1rZm̤)0GT?0?}w|o.Fr#0Ưb?hX#T ہlV8^=k-L9)pgW7>6g̴{~F5U9NP'm*FX6Yk=Uc pFo|?<-t~KkKj ϔּ>_#N{Tga?Ҋw:KDW§Yëթd|},HEvom/,ɦkB{-\_65u4TD?޲Eő_Y2j'RtsTvePUP9X" B ^mBaۯ`cb[Đ%\=<}GnKQ^c y`0"fuиz8GbWZ^ӭe|= S G4Ⱥ'WcGʗjrBnG&OQtVDͤDDzKe{o B _G+|Bj-ޡfdPH |xRS̴&ZB@.!x0PB@kD6? ! B@D'D 3jRWB OEqV2)}쟝W6Љ*{ď /D 7N͵hRZ3/!Ph{,A`o2YSiؾJMX96]r-%f.ҿm8B/ ??ߎЈ{Yox -;4az}>UECY>[s h5tV9f~c?]0ÓLZtL8Ó=t~_}/Sc67a˝NS$BZ E#YL Zл* LDPhE:{`w>vbGazPvng>R6!-7RKhGx)M!}r#2ѱ55o\zV*6bKa Ugb|^Ʃp4I,tXf'K! z3i6>4KÓg| a<3ot˺p tVeO[5jpƚYЦS//b͘j]$ZYʿ"I|+ :6cUݒ[! "% ,$riRI n[NmګB 3QqT~*( թu3k[T[+-Z*o.`w?OHwSkF%@.mi5fzGWbFF24NVfRCɺزkU1ʂmi_y PƋߐ~]>#ҎM6 _lRc%B@Dg-6޸x:R1c];KvVe)< #\_~! "'P׃bᆠ& 0LZ  ! jȲGLR+=;HhGd%=! 85֦9ڊY,qX;DJ,! BV `&jzEy!ծ!Y ! BڥzHɹ.26ӠRUwiځConWmHڔsNK*u./gnV ?hZsz0BB5r GTIӎRq*vF!Կ{KU Iպ-{O hMk=1)')[xw} ~cBRi2(KkTG6oBWޫ#Uyp+ߡvPb[5 ExfC7(>]>b*H!5Rn5~aEEE`E&}x=r{q]N_ /Vd)Ls?:w ]4\,6ϑ Ӈ ~dڦl̝ŤUR}I[ZNUR@08Es:!w/Y,Ӝ/y09@JCZU4Yqy cz-ǭQf )S3J)ΗiYHl&U(uWp*7Tf3-Bwliixaj>Ծb`  \mKntW-)vrz`:mO .`3aS/&׀\#>Q1Hu䷕D%CS֡c4h^;zP.m(ϓ׏}v~''&PShOW!`ܲY#:zs|/8&5}mN#ϓv}Ck9XϖwIs z_ާV &8>X}ܵ\B~r#۠Z/B/ԞƩrҼ=],<vz*+ uu6/&jѭДW 6'w r#wQruˮОt4?4n0jyK+>'l"3K2T]'6PК-U~47W:Idʖi(֡K|HMj0'>Nv;<=]i^QZ *y[fCC<vv&Ƕ'6(Npܻ_QvA3!B3wgx=A[Klv* vs/Xx #UG,KXNo?Csȉ\KXsr{bjUmSR{eڵ(iE ŀ*YnE"ˮwynW7v{?[3I~T5i`Z~T/&y1J erHp])k\dTM rrR&h]5b㐪eY.֊r9# v})ϓH_ B˯:mTL*0lB5 ك0LZ5;nT]Ѹ>-P 54門`wsY$=vUTiQW1*٦kwM~R!բt@H-sH5=́ݮb)TMq--*d2ﳊSpݷ]c+_JU*>v"_!QfQbf7U, (? @vޣCX ).%^ŚTO RvMPαduoS ZsIY[F  OܢI*<). .-+/xjhS Tր.MڶmSC55ƍ4:rO/ɡ)5U6U"$M 2U5T-șѾ@C7M\4=g5snHӞϪL 5JtQ aˉu[vnu O_|{1 [m 鿟{'6'~:짿uzTvxe>ٽjsW\ڄ.9!=3w?m{nrz`}Ś|#vꖿt=[l#/Zn8F @[6>Zy}r#$'_D{:|,'\6#_ݲDt}Dשu#Rt t!*}:٘?PZp>z,σ4w2 >-J4/_˦IO졗yzwOgRNNKWUT8$l;U*SXZ9*y{!?{`IUz_ԓH3ÐaQAeEQpuU\uW]W1?\ TPA 3 3&Ct/|֫~}LuWuުչCxZ}#FdtoH&0p=Aܧ9FzguKSk[ZZâ!š7n}FбAyڀǃ=J>_;yry!`IfRc/~qxpHۉii`Sn0 ?`ԣOכ,_J^^Fm֑rB:`z G;XvXrqǸ!H n>_BL\E%PTQn&s-KXt;-t%z}y4OgCl1hCBO-B+s(r|GpxW➒vhoF ꎽe^D֚nA7^k|NAA^m{Ie.jh '}&36q~|{w:I߸'Q;t?mI̞Y@ݜֳ?E3gXwҦ=Ƈ57^C4?OeZ1TdXLnŏ_!H+Q)=n6\r a6Byjh;_5c<9OTI jZmjPGGr׮J1=t?T:zS|< ]lGv9r9pQI@BMqb)zQBHo2I- UplfOU7GݦMѾIi?tAPT$)]E&ONoO'Ç2/rAl?qdlN0V̦zf#~y]ٰΜ6<}i#tQH#}]v@=2~-7hؿ1O8["6s.W?߻S-%u]][yI z [KKuJ?\A%El1 <^b\NxtUtNl]uJ)Gdr&1 U_co8X]}I4+e_o-e'QгA'`˲Ks}nYR^6opщQz}I`mtCOl)6~SLZq EFO=b#+9h4H4C؞TRC!X}͝1AD%xeEOC}CsaS) &IeÄ")`8hjVh87º%_f 1ew^MUl+w"554 c_B!?- [wmK|q^ Kxz:UQ>8Kڷ5uvuZjYvH<2{\C})TZ<]SS=2gjEIR&i{=Oe1ғ6ߟ}DMg@]´)5LT}7elzk_'OӁASQ80vowv0'Mn|?}bgZ:v тL츾,_Fs#Fl>^С:͙2nA*+}2n?e %VI܌/rUr@oIݶIzaqM5]ApM6dJV&E6Q>\3~"}@Z{2$B~҃t"17hłBɣ1#O1GoN&42uJtR2}V#˚iXkk]?>:Fi Mc:Nڪ":quA,t.pHtKq]ʐ}>Hڹ'@1F_T"\]X¾A ϜjK7an09}җ g?R?+Kh8ĠF1N1@G8~Ҽ][4i^6L^9bfm7L!3sgxbs# 'KWApG\KC+X+U"m pL]S}թ({iw1"u $߰z^ +lӡzrG&j V+h.~{ZD ;:DE<8 @RaIF L!=@/5x=)" \|A8_btT#ߞwǚnݣKDA Փݾ̹x-j[h֤1CU³W>yn}vms~*[ ұQeIN {_qq)ZS9?|m%5~([:Rix#fC+110JU g<}s8*) *٤R5NA؊ZŃR Cx]GTjOA(Ug6 *d|fe~uC;SCs;yx՞^.Qlt;Gg!oI -warwώ-i?tlFrosĻv&>er]_~[NW, Y x+]u2tQO:u Ngͮ.?21>&kwvHE+Ȫ0zJ Uwձ_!APq#RˈSM@Ľ<{>{ݑ~gF5*- APKkjxg|Tzׁ|%$ӿFgoCSjH՞emiRYR{C0dHhL08 !/wR{0@uT+ 4*] g-wG;BYaF#hp'j7)#8gES4^*lMU{g`D$͖TB11{ R;@ Qj PHf:"<<4xΉ%,utQ30x$O *sh'B:8ܳn >&EbA-eKj!}h='\Jm`:ӳ@>>͛WS֗ފodNeŃjQT&<`*Gio01M}&H&Pj&^= a?-v&f@Rѭ ngPph-klgb1=>,%Y^ 䃜rg+ҙ`tR&L2UHoj2 BS'Ȗ:eTAE3J0R6F`;yT|U5bp7{qI}l,rĕ)hA:3&Ф%d˵]ZBx u]{![Z\Z(*/(A4p}S8Ci3sZ˳ѻ[6ӎqZ6 RK`hɽ*|PŏTݞJAl)ITa1A,9>b; </a++BVLTABehܦ >> ҉.urx(E H9?-|dewl*ߥH6s#?L쎁 s}p^V_>'_SA{_L};?Q!bnw\,ޒ(9cNԷãُ'a2-;ƐZ$8"FJMόۮ9S O+@EYUNEd!怜"ȸ : 8Ύ|*\r:DWc^ZpwV Glp/b{l.4r0}UFLUjTq M9# 1zT Q[WZhP-+*L< m!*; ":,wU~;ȩmtCd[wJgx>ݞ}rhݮt3~6YYɒS$Gt&30%k$ŽM1fU{)=%aT>  O'\^RYMйs&'xi柳F@.YX{_aT!tR'sΨg tYWhh yjIV"5ј):frB^F=#[Cv|P(p^u>\:W_E]jcK}0N0ɗҘ SFd0mSoCtuLٚ?OÚ Bk`D+XUOAۮ=+60@w9ong)PRܥ}I*Hb Zg‘3e?'(WN9wjN3Gg_crP-hd7<~rv%SjFQw ˸_&}A;EF,{cb>k}(D@Tvmڦvcr ?ҦM̟IF%..,xq,]r,3ETg7sٞf ?;ïǘO*F4e23/q˞{­/~w{P2#FO4xM ^|#jGQ#4FRtłrBtS0!/ [V?8?8b <[z_Vz5Xu Q|E΢QXȉEm+>!*"Z ,bIRuи +K*JqcϭZ/ݽg6`;tǽ;7³/}4Xu@':x5vLe1LNgDT{cu{7-=ՆJىX 64ԶXOwtqZ|mnİF$8rTōM_ޘJ hd >-7?I7uMvG;:Z_KXd2\,YS(YWZNAg$Q, Xb-'Q}y.-FX]ŤUtw?{I {=k*i2  ;e܇ĬrU-{ҁl[sFO7.;iUS_w0<yqJYR.J5 +//./IZ:UTn4tc[ZS͈)ߌ.K % nZXNĽƒ?g4_^n !o k5i{n|fo/ÅB׬)Wo.`)[7~D &p"-rP$2AB.##`.hzG=3{!G'r`W-ym%a\ց¿~}NME8cq[?퉩l/ߋCPlz@vgV,Qn <Z_`X}~U92ᣊʎV r]dKYXR]q0?$Q \ f 1tÂg||v^` =)'u 2vuJNH$ x`gZk>-W]7R'o@rJfhnU[ҎE[7/ulqTQٍF҈f|#)JNC\@[-@T5Y0qARpuͲ%oxmkl[-ÉfN1]d} L)蒋$uJ9rNkȖr()V,KaA>]@w9}<ה+dr:<+ߧi{\Q{;Q4SiNj~v# Kw9W/ymU+_pu7p8+r%Ջ zYiss xB-%ߗǓ]cX̅@,[oߺW_^.f *!w8t" j(~3u(~?ԙD Sw|Y戮wʁ|wKhkD en-wvoY4ӊ hZ8a-eJUt@2{}NILQ"Vk*rxZ}_:yO1_4S+eq6.#@;7ϲ+RRU4*h\~:HNI$Z!NwJKg{[;ظv/'`6O)wz]<9vݪX,S$+%?D#}x K+Ahe&yMͫ%VX//, ǂ:\HNI$Z!p*}r*&99ON7grjCm%2Om_cwD VA$@>H xAT+Hp HTtK+*jH)HjG;pI!/&N " >mEN6!߹H^2 6d=u>$U0A"  ^ F+utR4]"䡪"h k7?gޖ$ /OiJP*P&HD.ڰ#SlAeͤ Pw|>Ą&3fGKASж$U'ְ.~SIPOa ֒wz"TOYR*4I0]W$S"W s9 w0I&ER䔬 B@!HENJiL[R( T[P( Ⴠ"çzݯN U( pE Yr:TYSӌʔSweYjmU U(a B@!<Sh4J-{PCs;5uP0u8krtSE~(-Y҄ѕp8Ur:Hg[D*R1* B -!@\F-L9܌#@3;DddʒjBQ") BɒSy17:D.]'+0[4hkd!F*+3D,yp-o 9fe=rm8,C%B@!P ]R%@+D/^)aAS<|}20# IWWaASș)eyFc$U:HKHɧP("r N]D 5.sh fѨNa&0%:D[R wcRRMV1i>.G}L "~W( @XɩZr KGN5sQzDW*qG{i&!T+#.y#b?Ea,%6cKKɓvRT-1Dy3珶H[HC -B@!P(F Ar^wNUu?C~ KI^z92Rr+II w( F Iru!yYTJ) ' MO58%24ERmZLJ,B@!3v#vJɦH%թHj¸ B@!`WrľjS*rM_CIdPLCfUwNV70":*º'*(1S@@vRCB+9rGʞ2+&-eOG|ѵ?9 qR GS;}S$uЋ@ `)?0_shW1=@mS dS"]V8h$٣Hk!iTW*rk./ 65ypSnݹ+p3ERSM]2d> "ڽ._x6QE/e%~v8CS+6t]Cvs5 Y}"iШYA aHrۏf*LgUwz4:f9'Sj~7+k%"0 ЦGh#7>gL)/~AzDa"ü( 5oHA+ uc@kq7/RWKƏR~}c6~:wd,ڮWWVgSYU&:W!QӒʹ+U t/3A}pI7,dc2P?`;w>r}Ͷ}u׾bX(r:@g@1RcEs%DrQo7v8?A릋43gCFԗ D|;x W)ENQ F@MK*7~c˄53٪\ly׎lԏk3θ̍ifaP>AD(] *VЪ? O ;Y={ҽQ] T?;{zy=x.ԟdβ )rEpU L `PYYR3Q*#.w)cu5Heo:gjeϱEڻ*]GT5_¢ AE@XQ?.FF"̔J tzqhk>~9w8?jXS9ME'+_PNrwzeImQw=@~AͽC GT6bj^*i8S.s;]ā΃EtJ-NQS)w׹pϝٹjdΚ=lGV$՞墤F$gxx bzAԵ!;j8S.WAF*Ŷhy|=L6Y( ?y]V]z0wyMAcHy+ZG_K"ERs_CSb; 3<ĩ=`x,5 р i *L[Mm3z0{eZm}Hв^CNN{ü%$] {; _T R(@Kš}|. ?3k יVTɏ0k)bq:4v$w6Ғۨܵ6uH5SuE l=-}=/ s?sTS]Aw|"zg.zcV:RHeŅt֌tUw>~3u[ z('ۇk腷U!z)ojrXS]Ng}DOFa!3k)n{V$ɡ9= d}ˋ) P4]X&*w$@Z)LiTyݟV+C''};C>8dRL˘z,(GrutHj[N~QeB^O>z|:cxjn뤿cW|͜(uM0JUBO4҇.G`*tL#ҡ`/"أb6+4Apjeiu-_.=g&QJv4I*ѭ=kAJ!Bb/ډcFÅ^ {e$~} h1Wޥ ([i7Er*3zmz~l @-4ﴉEsnw7oVb e(fdXw8$yϻ?~c ?G{~>vaEn_F҄#`y^n'[TڅgЅgMTqwՊzTEN/tIQ5Ѳ/_!WQO* =K=I^'USN'4|]..@IDAT)*gKMYs7k4biH|JABKV$AgN#eT ae_lϺD1atKco$~F:aEϲ%3qm`" f8ϯsLa\z߆ͭ³>vpGXƿ'׷zYy喽tpχ]h$ 0HoB{ч/+{`yߥ+l#{j:7H~CֱճEJ  FmzT7HHl{{Us='׵gI&T'w{vrx9r!{>un"ngZ&1?M3-L$%%uW)Y#-ܵT9z HsgN](>&`^x :_>~ lB?yp[VYY3&e flNBvB{ N)li9SzNbgoZ$u՚?*|S/a*7 UcD)yEAZKMRӁ1!- {. :{b"m ζ>6NFssBf.HV@yU7ٱ曍OXKh MΜِ3yF)'5{)  rBjeke|B5 !*'nnR&}_^SZ/}JZu=jKMTyNkQ2\) #K]&eZ4;*yI0Ç>gJ?[AVs8ho Ϙf<~T 80__ͮ +Ĺklo #r/^27Q;0H^jȺu-}/ apI |DulAV pDtO3> ֪K>n˙T 2} !]~p͖BDEgW @&.)^R9D&a葎Pi ].lP4Ajy;9G>-:+/az4y"c( 5mAQ*aA|<@ڼt t/`HF׎zlL#bLps͂aL$orQnutK7K39& ԕL!9SjT~|aޗ;G?yA?vޣ23L]IQ:$S>iwyLm hztA:vGup}Ջ9SZpSb҈</V !^+DU&>&T\\DEbz{I3GGꏼ'yfpONgb4MF~C1++[G9dM |Xdz[wMz_$ n xH8Bfa-`cمZ֚}1 pc8ɓ|g=v%5ȩY<8|pyIe6>,z ll-Hꢅ4mr_ugvE{TEd=j .-JE. xtú X.E%%~8NWy /䉼DcUh{wW..bյuziO>2;)kok#ڱҒY-3ѰpfrM.gua@ 7 7dn z?-뢣Nj h_Y,$ya]E2"aU9'giI &1ƚ;A!!z" Za, e=MwW;opkg_ta)AwwXXA0$š _LXSLtVTc=]e:BdGC{ɈW܎ 8KM`r?h8ܲx!$e$171Rx%>\*B@!HEN-`CCmD{Ώ}a~M'F0e.2a $[ê AA{TE6FTcS$nwc5@NUZ`W"ňD];=B/2=mDB9ZDkW,}Cw-2m-l Uq v>I͇RR2*EN tˋioaE؉ ZV0!5◄ Қ kçVԈlEW: AS ? `i͆3d>րN }"= f>m֍Ϭ[ΡN# fqAB=/I%򉤹{ q{Iw()!Gb+>6(rrQ~D-h/|[nCwFs$L, 9V@3}2hU I  )> nr yrA/>#3$=[6>O.y ֨W_ 54,F}{%?`T$nIɣ[jyz p4,,RE@T3$UXR(wEW.?{lqٜ5:RNPpQ=t$@>+mMҪ†nxX61PJZ-TK.H%Yն.mRys>s֩Ka0PeK.bԡ5ՒjKUŨ\mZ}]&)7EU$XurΝMGUrNzyF:EgŬM2ekwІ8>iHy37V]D4mT `#lat9?&=*@@vz\Yha_CMÚ&ζ;o߹i~>Dc]ՖVTht2"iWnZ|))%B I05³VEex<:mh19y'SFtDi|ʺu8)ZWw\2xjMfH+y l_^= ;Qu;۟ ʥotSv?[/QCp1Q$,1Ad]X`AA+* >%Ơ5g'ERm_DJ@@j8,L^7qF&p?R㼷q؞6F%#]ʋA1 Rb(rN: SZ@Q>H "H*EiWT2jos#V ވJ՟|C_+{h DUP}}+^ _ZQ)Is2n^2@R+)_奤U$nSuSgM-1fP(se!`.`rҎp9U;B=\V ڲy.6zvzG@qQI@@,  I/;AR-ԭ-R[k݊&:yOw/(q"^Y/$IEWQ q\[']͑쿻"")3;ߦ# tEg2,FV@:?~Rt߸:|lՓoC=mg;K_(r29;Bb%# uܸ9HǷ7{o#P.vgtM+`^.xQDu/KW_x&_cyXG^Xd Z[P=6A0$QE DXOyUˮyCu=U>As`ۓ?bE˺"*-CO)|d4$y 7$z[$T$T E$AMFx`EU)99M--f P }v)O(0o!OVA r!WARk_q@ݐ1X^h$v2 DΫHj^V!;ڽzg%$A+oյYbGZ)o1UkCftRz WٷB?VJ֋֑M1S]TՆB@!H{J _ F[az^%{A]8:&sT'~S!pVD.1Tá#1 "U^JZBf\~uϿxANe.ȩ +q~<͡iOmz9EȩD"~e噺,ִY=oJdXyߴnzE(>PC"YO9/@!QvEzڲ7Q, H1:xTVH i?ПZٹj(N@8L 2)r*ӵ~Cv~^(@@ӞaARu}T?CɖԼ7HjՖB 3zs6zc&jn p{|Z'yf~13)̱ԥso"/]`]:)3wI>뫶PK[r8Isj<פh|WDMшп.[0+kKr'4S4Ͽe-A|_'Nҵg&sGvТ<|L0@@sЋzri!{& }Gvm(gĵ}} um &68l 9M:ob͋iMATeUyݓU弰@+K_ӋΞNc)C[ 87|=謐j=,xRׇy>Q= !sZ6AC79 BӳKFTO}AQ^4 CKXD+ E/` HFnVn2%ܾCQi?R+l)lBiIeK1Lg)mF}P i먢'KSkFRA}P 䩭o_]ٯکG}yqgX?qI]WӍk<ZTr<dQT_UR(R@ Q9Bgl=ڴp9 閫]}Ct W@?=}hhH.|zj,b8lo!~QVf?^NIEp{ N\o޷[D.8UT{"0HZDX0T6g:Ԭ!ARc$-;y&lOEk QF"( }mDkT@qlQ85?\ :7~+j[!0ijS&pɠ)Y; }^cUAu dFVeAǍ$K-)mvU:|plSoOi/KvºuXc] vI`!K\Ij &G/LPÂBvjow+Ԋ"ө8-ej=4-3]YQFC^RfxQz:<"|kݟ2tew7tpXX/%9w9i,^|#r,i* S5| M0%ִvn(D\dww4 r:⦮'6QJHc8~Csg4ϰMt ERДȃ$ #jYR/ ]!#Y$uUdR-S.d:0˟-UIHL'y,֊Si)YmIl!MB3L,&y}9z4ːiJ.~PĖT5Ӕc]ׅY/ NxHj>yH%+!'} `&+C?ީgRTePBFtERXD9)}^HLFYRK+򚤪*@q#M]eZK1/TOC+j<̥|yrǘv9.]|rzo7P[N|٘3FL;SKa.KSy5jҎhZ7s2٥(RAL2c$%5@|E>i#-sv*W@:DivsAP]R:G҉ܱl4ҡ-7JM(h! txLlAk_)%s ]RFʄϛAo)تre)}qyXobk ӅgMӧЦ݇sRHGT'½Nw*K=K)7Ry]A.%VEbkpmn]I*!M*gᥣ;ȸ%LP)R":n55%@W(mj!~m%ҥ¯ $?REW[R=[;Pʒ:Bw@((CMV I!Y?thvT=*$(NJOIa:ODsfOGKlr~JT: ;W9<GvG9NcNQ:&/UKI2hoKľרfέT6j[Pt|׋I:χz,5 (&P ֓+m9#XURGO53'i_iGk;b$%"; "SW#ԏމ+('5+QEű#Q5-pcO,V(@k*)btoI n+ͺt}qh4y *n@./a~cP&](i]Yqln8>o^p8̄C릋5pc`ggϺf7%ZrJCgiʂh̏XRXG#R4" <Չ=~{k쾳yq:Μ6s8)ٸXS U!X 8\՛󋠟:K\B1e7,A,>B ERsY,WŒZMR\o'/Ə 0b׍_̆^ci@mxXNz%B УaھT3,DjW҈ ӎ*E"t<%fNMuM J 欞=ި:U^sb yZZ1)-Rsԛ[>#Gwwq @PU{dTy︯,B }ыR|Яu8aI5msHb\^Scxס#FR_6IW >sI ^ *-pD#l5ʒ5K%@22dչRtyJLGʞGUc|al9'BPq‚X3ئHjV wHeXǰ66⢫?x7^T-fI#-JB%Brg1)'@X&.m[܅ rfB(IARWpO{\ʊXb]= c* W5njcKz!#ԧ50NZ*$ϖʒ:\+[!PPW4/K Ʉ?ɏQși3#IDŽb/y{Yeyhh Q\0^Q&ceza=^|X-=֋rmz.eI%^  bKY xO7A>Yu 754U=j{k*3.4/otY%T $ MT(3];~9#Y l/-@Ɛd+Z\vG2͹- }sβBvUb?{Gu}fvWU$w˖{f6lL-ɟ{B$HƆR^  .۸"rE6fuWky%jwsܙs9|si0|lꓚLxKjU?GS!( ٴjA:CmV+gpiS {cju.7R(H: E}fwii6>ԚUS:=_Wm1 9IԎ^=]>gęK7r_=p5}3}1Fέ~кK6҄c 7vhMZm1jx Nş>}B{7&\JVv jljui3Di[XN5ˌɢ[0^T=Q$h漨'+.H'.|"yKC@4<;䤓#IÞ!q hNf Y/+=+:J+95~[Zda”o69UKe -Ѓw_CY+iƢl)Qu5WՅ&>ΕDvŐ^wo/H+7iW G[f41B 'L5- XbΧV9?k⫵GŒGDOt(Җ)o;! oO!ôf(CCf'*PzrU#]ۖr1E/jsubΩz<]~nNm{sx"|YƎG }}\ ֍W0),毥(#5cbۑ3g8L}}*YJ6s-޲'XgW;iΊ-7N anjHw_fl&z澛woC/dV~<НfUY#.TMtwέ>x+)CV ̩},&GO#M /=IPL,y1XOw`2=v?Jw#zTTZ3wm>t`T" w6܆3>D6`6TS[wr8\y_hS[NT:!;B>. R|_C<< _Vmbndtێ *9>!!*1XPAPxP׬$ԚO9MY!3P":u(UTiAe KM4WB`nl\òN xtzJJ}rIkS癓yڤCqWt}kh]5%XX/Bӱu+t`>V#}QxVN`^]ppz.Ct1luel dlwnYC!X\x֪7NoRz]hd<-ML(O=qÇU}4Cd!%#m׌WļayT6[Cb)^ۮzwUVc<؋cISplJ/dTT-qVh宊J}wdK*wRjZ* kC;qLҶiUwvUԏvuQkꙥSf!+dob`(ThWp۪ornCzwS,p{w }v4EumUuI@40pA ru ?~0- 23Q6 y8=kKc΃_&Ӽ"WKH6+Oʸg>3ocߞXkN~! >3w)GQq^.> Zr'aD V?g 0' @(T[] '<5.wڣWoHkҦmS~̕8_$H9_ .4GgeDC2jYLOa&-'…#mմn#iRh!DF*G>_nu;BE9LbAf1 # zڶ]~6ͨF C$>LJaA5V h_p۱݇#dB‚JJ4ڶQ)zv)}Tԭշ$3* .h)k6_p29aM!#$%MF~TW>ߥ (XL"dG3ˬ{^ýuD I$'7 NB';. UQ GPQGl?)U <]F>dѵÂnHZPC9C-+ao|Efe;AUvX&I;vG& -J5G#BmRyB|R-q !flDxϝ;UmQ歙ˍn~jaQd $3QE$50 jwEn&lIePZݘ:M ZIb#P?Q(~ 9 XPqN#R_L>j}s|jJK9NƊbj8Eo/-^DwkTP+b8T.M0p /;$p@#HFBT7ˌ쬷9]4FkEIu2ߙ8*k*,nW;[Rp۵ob^О?,hT-Y1H~I"g,u}ھ[?\dzC=&T!S}sی/H;?6:78* DD 'gC$$4DZsM<~Y.# e&9}),HH*𳰠 $OdARͅAVz[OViժ ]ٶy?͂aSr $rJB<;އ]lM54|DSd pԠ=ӡ& QUJTa4W9\! ?[T6y1ʱ+ws3y@cԀڠ'vIK{ 3pJ~*Ӹ{f1dEZǍ[\PA! xD!H,_xWGR|}-QER/A "ʏ *VTaR$etY&X9vd?h= vfwkQQ䵧Ffks 3nv$^Ζӈy<%j?zkr**q]apfL?ͨP:LJ Pvf}v ԏNL'ϕp]/c?t<0+hcjvUM^ wv9ٮj!Xݍ@ԫYMexRo m1hZ5 $! vDF!⁔mX@R1t $ %ױ lb?\@%uO{H*H)H*%`=㸇$53zvK,]neMWQQY9 &]tv/XE#ueB:v*4tӕC+6Qh8aLڥC]5`bڱ5]19\Hxp*z 4vZf{XdeTW;.uN] 1 $[! Y q"! yT|Rcp$ICR ERS0 ~XTu.g%5>l` -]9цeMFRm6 2v~c ;-HEL{%uҍ煳aCOukF3ϙтi>D{Rtsi:68@.dKj#)aɖST0e9S,sĬcֽTuUY/&Yi,b nІHK!bTX%5.Q@*@BB H+>C ]9mjMq$~P Pk,8&T9H2V4u37-n&C ~ kn@;'!@H謼]o9aja\mYN7 wYI տ;Is|.thc$:ɦ}GOrϜ/*=RCHj% e !z rj,}RO\W"muiVb@d̴RQJ˘uQl~%.o͡쐹;evSnimLdל ȤҢQv~O*ǎK]3nL`ҿf)t@0v0 SJҺp6:t4=;M?TȮR~4`) R醞:,`&$U]fY  +Y~&扪5j0k4Ys-!ERV ZԾ RKS~@-^xY_d$8܎:u_ >:S՞~9]Ƀ̄S={.ԳK;:SDgҦ]o^vBnGS 0/f)+QFZ9ƶGL*G˄7snM[Up8woҔAqX~&y ݏc}.2gIRϞdQTpG[NmnӼQ+ yưSvMݮ' =WJ ʸj]//EP#6_4&$RgNDU8w^VM2:#~I( d F雨wJ܂ qixU/UD'@jgDL9iAUW]!B$$5oVYA@ II,jm|XS5QFcSo'k#]TjbQ'Ie3wI%jY66=Ң @!p*?ԍ[t1 D 5oTTKj Ν(c 㧕-Ԛvk6C_}/Ie&*r   A˪.3=Njs ' SYjV0۬U % 43FMsE>Z9X?,-wѧ7}AϙM  @3 +yU[hMޠ:s~? Mby5mNjyAŽ hӺC:R⧹{L;if H&>Ҳ  DX8՗g[[jze^p*<3TuLV7|~?͜ t) ׳:V "u  4\Ft䘇 vbf~{n7JF;O&K?Z37,M]:;]6'}畯ѹ]Kԙ =GyT:~Z;h:@fo晰reUɷK* 0d|}Qר C.-Ժc%%A@Z0Nie}bJnOH mvFpvS9Jao1}NM} Ӊc"us(E7ߍU;Y[P1 ZRA,1#EA@AN|?܅! c]FK *'Y/اrRJVԜ1tw7?- 497\*ƭ1MU´nA/6[3Uc_glX%ogWEaA@A <&up*9_ޚ IfӤ1C #OFIk}R M,A@Z nwS[qol}ڭ?87j"Mz$kp5բ?VSXR* vH9q!Uw0dD=-ъzE ;xJHA@ T:e{98۸/oldJl[pگtz|T>BR튉 q@vI4|PjPg %t!NFwศwhp^'CHCւ @KBkZqЀn Z%s]MU] [eLט # u|ŒhWLA@+0 Af=KO)!0ue9!ҐZK]TKBRkIv  fRzZ @"} ?楹=AQ~6"f4]v KR[Xx1E_nBlG2ly8&vZ>ʇXtyg] K%+ZKU)oXZ[DV{Ui 6Y%&K@"`p?[k,!KVMڔҠ]g9>Z\> cR'i]UTdmjO GS_@q0HR5À%5ᒐԄd"p!qk7F:Ӭ˄!4}wDu!0vB ]-\]mZ1`}}yf,H[v"XTATC?eD#ɯEë ڇ']ؕdK*w{|"cJ$DVA ?ajϰ <za:~]*0Pv  AfLݟJ~ "j@zgjr:&QtcJrK:mOU)?QԿQ6ɚEl !A0$# t_a?c[ ju!~c@ɶ PO`("WZX2'}na}$}TC/Hs juaQDQҿz{mIM+[ʐ"[YyajE?vB?ikmV |Ix0@mIa⏕jm2BvqV)s$ 0GbC.DX1_x:~g8C!db/9ē|uEHj]P2zQllaԔOD^K#X9I%NxN},덈5U_VxVߔ<ǵacrțp]BR\T%\ӊj'MqP/R5BRˆXq?nI*qP䃜nY[BR5T9E" i>:}cڣ #淛SzLڸTc&ho8ANL #!SS$ʊ'!M}=A B!Y+J=ȩY,&1Ҥ P#FsS&DJ2Pa4>d McDH.1A2jz%:@-4C@:i6 j'ȩ0Tx_j_10 2ѫp{ Zg|XN7 ١CCS>C55y>~EJ:r-i%hI+/A+5 qHoPb:\?!v\"TC]ݪDJ5R3P5D"]PYs7gPEovM xz5%/ n,#HIAPonZJ ATwwCH^d /j[1tnr=A*+ݧ&j- 0%BN8Kiԡ];QWh0 fH4K*Q#+Vzuz ԿxUy#/՝$$%物dZ  Q9qQ\bihR7"#!at2zN,$HTH\EA@h!y h."T* [/&XR%F I#T:RꧧQ+~J>HYFau>2G{cImPZvB=gzIFAi0t}jЌIE(I PwlAtIKl՞IC'R2^к-9_O˟7)녻$Il7|_W ֥HqA PWh. 9'^jNcaͱ̢Qs⥌2ċ<" TvoT޷K-nhg6̺>:{lokԶxJ˩{ A 뮠` $*Q7F~UG?bĤn:z8׽1%&4Q! MN@Z~T|z;?$_Cu]P͑E9iu/S4y\(uKւ @X?_~uT٩n*k!~Df@jfw:Z^I%g bSJctkU fXYSZuKURFA#R+|^_TIUWMւ@pwsԩ=k_ sdt!**9J靂e$# ԈC_?]LKj\$y *=NV^wyTIA@.,cP^0!q~D@ٹսN G#=ʚF ٍ]%# Ԅů3(7t%'CBRl@ _H nǣJ&a7 Lmcͯ.<ǽkm93+A @(Y-Dz/|dե-1-g5?JDUMSx^Ƴ" g=B$ed/٤] t(37^+)->6/ZnY[h @$T$Z%'1`wE\B٤U*HROoؘ'x OEQJgt ߶ꇙ= C'{Qr3Ysv?܆c^4{2u\ւ@KGŌ; -ӉKX _sSf)ZC*jjķԿyA'CHj\C4'\턷Oh}r޻{-Ʋs~Ƨ[zZGMw/!C]5ks=X@2@ C K^lsIr&PuNр!I9򱜑?pUERuC뀙 >yNMu]tvtA 'JNMzIOԗ_el?A ub:rёL @@4!'m7I|I?BKJ^2uTMbTIw_"0 {' )NyPL": )7#}?sSVj]ﵢBLX?Pt$ߜue@hOr..U\[TbSС)T[R3Z]Tσ\{MQyl43US4ۘ6~6U9WH\dϻܧ7n{gy [qv3X^7d%Ԛʛ&.e}Gۼx :;*֫ʼV мPD=Jw J5/f@/egid ٕNrz;n? b?~k$9 P#6z˩-@gܴiJ-}=!ڲf_JJΕjߪ)p%v#LV/_erϷd N $ ]׈{(OՖeÓ&~1X~Rɑl'=ɔC_ҳ-_ƆVoTaR 5%Ua!kA!kٜOٷ=u{X[T><vnw͢YN'dיr`HIO_ _8L= >û~ Zs^P$\0{=|DRuw&ԵC6I>K!y1TtJN3xdSV +h$sEekhTW? IxXZϵAP4V_`J*kUVr PG`C7Tew&~V1;`gzky܍E?]ۑE7?Y!3doK\3LH9wmCwqmx8ߛܿbºecMwݏUY Q@ x#2l7܄X) Al69Рm ߭vvJb "Fc*j۰|1,9ZR#0ZeѢ?X|`rz+B 2y*\/0a]j8R>uϕ$D3.qVh JƤILTpӀ6lhDZ2r'odCo;|uU,%gäI38/׭ό Y.٠VvJgbN27lEKiIKX =|ZTIʆG?Xf?# =sf[. \Cy,tpߌG"B QC'eM|QYYY}ݥWILSDfuU8O;>`[ {MXKAHaAAYE]xEh€+./N/>s`F=3= ?As}diY~t_I@CoX2kz~!Nߛ$6B&2*Sv.sRuz"6b 6ffI2ntN~0- 23<1VT%|wϖXrcjgkSܭ5ۣ6ŋ HBR2qPB^~x(ZxLK+Ah׬:w+g ǂU 9@FU> MbEvjLc>9²hjzk}Wo0c*-xzao//;sAXS?Z#r <wjwlaフ,t2K~+[`a5ȑ9S|XJu,ܹS֯yYuoޚܸV}(1H*ՀU3RbPlIePZ_ZiD b `&)tk@NTe@FPd:f?'e>x٪=Ts*h]r#=0Ɇ񟄤5 2ZYo@ZAaA %w/Ɇ(PJT.Xcgt;9VV=, ޢVs?Ξ7}:G-@bY_fdgd|1Zc- g*,*<H#n ~iIm5 ?:`IE{"n)"ʹFGÂSKTuinʥ[<բj>gk_ʷ^z;QELI*%k!@)ETQYOANAU}o!G"8&T$ϳ}UuR}ιcOlaU(5@"LW&>~Y.# f~f?wj`p,H!8䓛3I*AV<>^BjTtÂkW-䣅rR=Vp.n*mh GvD2Y)[QJhQVuDb+R QG@Y7a5T͡jͻk=}4^ O|b7;,{hրK@+[~!;@X𠫏ur̜$ÑA Lf rĺ=sQkzvjTUeUaC7<,(AL9Uk^ m aR}4B6Hr83Q$zJ.^+2T>?'l+oI0kL]zǔ¢cڌ`>2 }IU=kI~v6x@2 ~k|hC͊/'wg ]? ;M׵tkRY ۤK/ټde_6a]kX3d*3UK!,~dRuBZ} ۡ!_]շ[*s13 %{o[+\r5Ssr*k*g|ekpne>Jj8m?Vj"T}'Ҥ?!V~=sC=O₀ DS* m<6~f Rra҈ >gVhFh.l@ p9E4^0Uo+^`Y aBs mJko@P1A H*H+Qώyn; suE,͙j _a&ZȐ_<ƽ]jW3K#T$@#LP΃1YLW #$~Z?o`#a!|" XӠ HyOq)"ʗeE)9YiUesWqO͞LmpEN\rVNw6V1?{醧W+MfQv8\!IA`џڤ9g&\LX KlvýCӭ%R%Ed4rAi֤\AD$3Ԋ:XJ* "~ɯEUGknɓH=E;*c+Q[f,I?LRgs/I4 IZ^a+,4δq3u 9?=ZϳYV6*v+'ET1@VaYW؏*;Ryf"I*C8bmW~\UY<{\LR'/կKϚjceN|Igs?a?|O,{E2A@ Ŝ.)n_kĆG2o'ե[e KRb!G>Z+x?1 j%UYSTDZ<'4]{g=d*V2f7?`0)<DBQec>">1Ҥ 4S{+nJF5sRc4s|M#QPC̕1L_nvFC  @+-?uwv~4{<[_XTygs!I9LRK*?GZ/t{-fڃ7>-mŢ&!PLAJvg+܍LV' mMmCw7DŽe],W5!%A6>j3~~½՝T9O}ND" I^biLZ>ky'd<3>ˏƕ2" %sXLPM#ȃ7<{\ @>8X& P ESR5`|KFw'~ C|]SI߯l -/N/μ~>~47<]ܰܮ#q@(Ы&aaaR%X` Y3nb$-9S>zFI76׉FdA@hZfM}-[Öz>q\V~`/tIO/({A9!ϙYť%/t Ջ?de?,:9兤6)@"#sJv }Yy U[J/hpd $&U1g&l`_vǓ#15BR뎕A0g_j<8?9j Z2S%c ̞b 4v *%ζ^@\% /{<<V˪IA~{ܲgoǨ1ş&d ?̞f|_o<R4(?~$BR @D0iϏ[YR?XY-V낉9$A@r: /j2Tfʄ'OU;76K, @sGgǐN3ih(l)dLwM.ϷjdS.IΝbOuth ?m_]H豖!֒@]A@hN(τC]!6kw2W|A%ݺQEΑ *c~G?ĖU}_OzbyZxEeA@hY̞bi_K$su>} [io<>ُxE`KIܝ-&yaʻgnO|ڻ(qU VD6A@hk?c-^hє.up; T}9bD%[,/7i.cT~'˽.z8XK[F2/nA_ ze}MPӡ%߼ƚsww>@S[kKRD)V;,5! $&dd ʎsʅ5uHMIZ`w`, qydX$SuZgik,~.]#,m=kgqwi?b;RgAw&ǾNxbzbҹ #6vԭ0v7]`V\ϋ[JĝnP`4 sC^ 99ui@zWZޚg .67ݖxԖxEgA@@e7r`QbgѺa\7l˄<2<[q a{z V2-ecCemɠL=m,,y!tNeўhLL5C3 +bV,] j-0|S)iMO>-S}>oܞC.8{7h%%1;-iݿٱT̢ki1t{jZRu-$@I1A@ 7,o׳2}%^ƒxMt~2ᅪ7򔀸`-A7czӸϳ<1C^~-@@礿uTɯ1x.XUP焮l} :fܕ9 >fYik/PWzɵ#`o~ t '5ߧ.g5~mfAP(>\qWO0). pq{lZP'|>s_1ie%/ jA'WιMqWnй˝, 矮8,dad{՗- ziћmU=K jcX?rv<؆G`r*-xB羁m7kϵlO&T9[?ʠO<}1E=p=ISA^ywNW[~clۆF8m}_63;aYYrò^'%.{┳'OpvWLIr'jl&ªjV#U:Tƿuk&Nd^33um]n[Q5UegyLFA@"0nb t\u";-wuwfxĜ L^*Nt)ZU˷C?m}u0QdaOM֜=B#ogϯ}]u{>j 1=,{:蹞GwSb7[fO|b7},B\iBR" @]XU~ő=xp&`+jᬮ\v03%/sSh ar7X0ܫuOUfIͧ5tN M3mCLQ{s tjcP(#%G'}%\+Rm>e1m3- |1mRjpM*4& D3\yUb|J}A8{</dFn 2A=Gzvz߰l>4bGR@ފGҏ0k뻖#Bu}~1kB㵝+b_@A FeӞd(;3TQGn{8 G3Ў#L#861 eyhwaڇ>| :.2e\KXXfmF/b֥)O4O , -?[10F{h7ם]@\{@w&ݙ4uzT M8gxvt*LOаXNZ4 FW-~˙Y>n2L;3YLF8OmQgv/kbYn6zXJ^dBR님A@zrhKc.V3Il-V/_%x$s_NTy<G0I,$\uIl-bRvq%L|y0z9o;y0P959{[Ok>B> 55mXac]/>iluN2ik`(]oL[Yl-1+--tmhʳCI*u`F"B$hTUfZ=JnpjaRyjI{m˖6Ljs8wCK~a\퐡!k~ȮY]*Í>"{Wup x x׺.u% `PnH&ؚ3ܐ]wuaE]]QuU WC$$&a2$Hʫ^f^UA #&!tHrTǝ͹RƎE9"v"i1Wm֪;6E'vpt03@~KJ}"#z/oAٝd,.~'xW@e/M.)Xc#Xk*~+i-Q5.8W9rԪ|jUƨտ3@S8D4aWk~1#ÉxC[6X>Uڥ9sc>=xo,*{Gz;}ɂVüSO$郄o=! >d-U"JOƜv]9SUف# /DU'Jͱm/Qur>>Uuk~i`5v{Ah>OX*G˚tP>ͣe7n}#8I:1p7ϜmFNx;~[j,`|F[iM_U7D-7~1{ Ϥ;C/ ^!<0'l;$xL0[YRaٹ['M zj8:xM0apS+fg $eNJd#ylsTͽ *_߄rff#Z+y7+5|?*)&$`<+Ϭuuzdfvt}*u,z,J<&G ^A>&7uʔv4+H^` ^#e׵.=kmxpKio*eg"$Ȥzsot,{H_;p>q K`yGɳ]Ҳzb 5 ȕ圎噂t9S &$wG=5TNur 8ԴvI2wl{GDx.@eFaiZ>Hʂac[Uo0S9?gA]"Oet 7ΫCxVr8i ܞA}]]ˡ@+OeV&UxQK(;pfDޓҸN&Il&˥cD7|%@Dq> .7msTe(6;mL6  qAK㼔aN=> 7 (N$Gݴo`|Ŷ%pu@VP[By(.&ͼSCzcC;*}T)ȻZ){ngt1=HdU>x *R;wjMDPCڀ5GXK? G8ejh[A)3^x.Rb|׷i:7a6?.QjߊB!{`%MKՊ`/MB})r/xƌxXQ|_NbEk][pZ|0QhuK*K'vڹ H#VJp/*VTB. -ޒ;esS*=E0w6(g?V=^d0N)0'}EiY>ֿ{,~#U.펛7r$em(՞oGd7iw,HPC2۬<- p:@ \/CX̂k56N*,wG̃ Z֧ ~w3wy;ZfbaukH9&Kcxz7kD6(+k+3|; ! yRk}B{Vgo{sEd~s^7P?ԧF ynR}~`AGir*߇>_춆ErAd'tW|Gx:^2Hpz %Z8ǻ(E~\-PݩP$>.UDR\//Db?`Y BH<0;ͲjOV'CD6\6t鞐Tz2vAϐ+ 6׽ci/C/|0 #{lӍK0E2z S<@Qz#xpcYspaV*4T:V'!J2odC9KlMR}]0#y<0;+躾$ QDP(3;"unA&Ƙ0dЃܹ+ 00c}zv^Yb\9xFCoµe >Sʗb7,D$[tKQՁVj!')qTSv}۸/''Ӑ>RtX yLOJr|$?{xF?^,̤=1<ò7U^ řXPg"ɹwPx wL4q?·^JQ̉n^^KzzSj-7W{ѐ;䡯 fro,5=}03dj^,绀]'mN`2i x'ӱQ!SYA"[#NX4?e#GPIE^qp{RGZnj~߄=c[ǴжSKdmg)\&|)&"{msudj@3)Vjk2!c*6˱%3gIFi4@15zK ⅟{3&àr^}jqc9},9_dl0 ^Mrio*0K}ZN k3??*n WDT>7/n[q[0RZbMoϜlyz]jmrW!|ϖjJa!?i"||ҷԧ~yvw^Zr`7bOV&jP1ϩXWϩ= ߷MsPm3[AqqcmΘZ;Я@rٓkǝZ+Fܕ}irT"./;)/|m!k=d\9:.4Gfq[e|؎ew_Qd?Xlyk_0x@᥈vQMup{?jdy3P72G|3AduI}p`iQyn琡Ze~%P,_lȢZ 2/V~Ҿ,PdnD3ød͡kIKޒ|ވEGzv5#rs;/sV'` Z͑dm^w]?’QoT/f}+"0wy?r=wl~FΝ{N#pSކ|*Ԥ.=:V%ت8܅p-׺5S}1U0uxqbLល7euIYaʻV|O5g:+ nEX'"`Yt V7peGy&VuJdavRw{|/o7cdYUz >{i Z% R_3@~JI9C[%ܳ~֧ͧo'ȟRXm~K.|"4;aUu epuaqsAѕrSQ.ߓ;̳?>xa`7R}Z#$CYqr:[ӵy?$W fݟr~f]ƽ $t;T0zAn0z?`ې;a?SY^ݏ5i9\b7O(OqFU׋3?U [.-ELj#䋛-y=(zdI&46f2$UmxVotj֡luG,4}M:ͬR ro~M񟈑8IUҊ0pԊvp#е*CmNXuuVUFV8W.6v$72ȲnqmRC>4RǮ C#jr.ی7-z77f&wE&AWV3 V>]N2@:ZO vZb %>qWo_:'LP)~{n{P(%uL~twYۭ?ݍIo {-+b~^?O"gxq*q,KWv͋v^_=yI3s?lOw$׼YCKrbCEUHjiy% I%.hH9D$ͼk\f6d[tMIw775y12!Z"jȼcrbxs rLMuGuj~r82R/dBų;sz-դXBy&STѣ:h"\_T8J ck 9jԂQ6B$,pOWآnWs7"ԕ^{aL7:Sy^>^~}0M7wQv'M$`ᆰ.2a홅1y ̝K-;JiQN Y@AV|뭂6>+z|3`NkU[RU҇zC&yn&&m"e/ olZo֝dH7ɼq x]?NIv =c0sYy޹j!N?]b} jE#4@a8nzidY4t#ͧw9phz6Zs[1xjDCoNIP]d%u w0GDԃuA "޲Cϊh R+T`{ٖ 2(&CV]>.`BEؚ`;$˱, _@N)tNaΤ` *`㉫@"oĄ+/ه{˘Tv~cm-_p_P`2_kKΡ!ZAr$.0C<7U Y630| I`| aOy]W oh-Xĭ)a(Jx9vzT= .=npbF}FaȞgqnYHNݪ=6x0$N@^3~Ң/ plP8lo>fהs{Vh/W,q2qG4zd)s'XZ+Hr#dZxnܦ)־Y0-#!õ0@UH*v-<@QCmvWZrK]+[ag@밌:%o$5dkɶX7#0#@ĤbrfelA,5#J1-JRƙ2ڌ#0@K- _K mX?#3RiZ˂#0GBR/_ 3EnQt#lѶX9#pDg;^zse}#0@ I`iکaf@$ļNiS<#p!pܹ_Fx׳利R܂Y5#0# FRe|n$rTA?3̹)4c*ט)h}WY֭j&՗y?>3HZ.N3z]hXJ#0#phOײ|us1b-.Hp3@']5v3rs;Wucj_aNVor7Z[TX䰔mӅ|H;7%I.{G#f?,Hw@>^5wS#Y)\0S'bF`@æ*lH= PʰujCuhv/,zdvUd2g2ۦ^}/&nsr vtW1DZv9T̑L'\_9xQbǍ#{VbwsǺ͓:e&y5xe\ԧ#F}"+2"7So@)g̝'aX=,MjL5kaF5CiWUea6~CmWEu'8qcWA{]TRݘ(Q=Wvc_t0r }^Ől9~LvyQAS-*晠Tp:0Ϛgdq,v)=W~8ꤽN=4#yx_Z]֧UN< fzW=FBJ8aOx0/iy< ͬ]} It*U+ #;<ȏ{ilZG$Y} =/i+nJ'8?`_ }q_q/*6<~[֢ '=n"ofkVd.woszkD>HkzBsg.:5`Sք#b%F1}4O۟b! aWOp6vš?N& ̕de}ʕ#tXNdvoCC 7f/ԐpQ /-p5%1+֨jo YW |vˤ!Agt_f E#ԇa!rciz#d܎< ]w8+hX~0(&=:c yc6[]0EăzQn6ʅ837#2mn`sFQg|qlka"7XE5Ksø֎;,lMї;x%%<BKFD7;BRy4/}X3#"<>ZCd yRȲSN C//ʙ.SU? w `m~V*=^Q~?h)\{M[^}ݔҾq ATEJ<>2aC఑Twăy8'tC5 CX);4M(=#Tm Zjtq 㦋U.4ϾNZ~rٶ0_&]v1tH?F|}7b(E1}Vo玮hhτ&oŋpM4X~6{C6Vjbn(fɹE9v#{ #|'<NfF^$ʵKW]./k#&A1!u 4d*sAqC *S"ET#I( z 3}$sH={kF ;"Wsm3ᦛL!)߳Fߐ0w<{&DphOvoI`@Wse͇9YC 23wmP濡Y݌\_x}R%qt!fv>aY=CDqC+Pbuk"\0s<{]㡤s(jkf{M -'wLVXuy+sx` &ꤚP3p0ݛ{#{v~pYNX[8 $]Rl!:ޚI]6Ij"qA##8ݒ#l8.m ;84e{*RhrTf[jdC鼡.M*=gC01:9=g' +,[/_W@k"`K@mjx52dO]*=5uKӰi✬AsHoEhP~ q߄g9{A>Q{)&VLH9H|ޟci,u5[g.xwָ`G%%A(eϋ_yY0/3MLBnX+E49m6'rV\4}E~_B yC`fnFD8$̉EirdHЈh跄eY^U/V72VHY&no+EfZq%{+gatFhP-UQ-~0y4%y3ƍ+ڭ OV˫`KZYN^専k^K/o .p>5ѐ|Fh-&L D\Y\u^Tէڜ9(tbVxڱE9G̳ _dM%CUxDm=gs|ARUwB֫#B@У!C`X&7N2rX{syb ?w=Ʋ 3L7݋Qz-튓* ya1Z9!j +ș_ڰ:3q|甇~_r+kykGxϩdi}dhYSRW(Ca\ȜEc}p>祣(!i0r9ym+#o#;|/+t,]T_b̯Ѯ^B_P( |«J{%!^ Ňq*k]9>:B/Ӽ:d;j^v5#0#CG 7{4$ zC":䤏IM;Ҳ|N)5<MZ{F_Den Ti⹯>ʆ#υ> N#0#%ugqe.9iG|~-|X+䜅~T3_F /{}5:zrq"_TVcY \T6GS?*m6a> K`<ŵ@>ݧzXT⻴;nȑn]*r5gS^%ziIDATףYQR"T6,;b˱/5g9>FB}/jTF`Fh51S`FW%S WY]/xr(upI_̢>pw MƂ`Rj{jPᶞ{:{~^( KX=*Dw,k Vs2hqMHO5IrY8A%A{ׇ>N!´X-7Ʊsу?I {G[+\SgN TOm o5l_B?/?sHji_W2pïs[0S b\jk[SR,43?vu`F`V#t/X˴Dƕ/g0I˒d)M+8u `u <:2Ub(.4G") BU}"6)ƃhG&MَZ<*6WgM'vtJ8pUj(PR%O)^m*oȨv/cZFG_> ,&y8* &'F`FeEZѣ: KKUR%i~,AwRb y e!q LK-;J (' 9f ϶y` ` <1t}.5K [H8 'kicR{֋Zquk_4pSMduw R ? O% ʚeDL 7j",hT3iYAm]8`ΊדoDk1dbIKۤ@;}9ږ/21QlMWG$g,q0#0̓-,MpR!%+-֪J KݽOɚF. R1izJU@^n;v[-E $y=`]}q飯 h%O=+-gK[D ~m弧^j;UCC6c82PAܿGD! S81iQ^uҞDiQ)B7=)7'9֛+h,Ou+kZS1ה؎FV8SGa KkF`F8tsZ>3f()T1nLS꼧LSIkUcۛkKҙWEnz?BغkJkƸqeey0dQPCkn>4=nuu+25(`F`,BR3F8Šղ%:ω`F`F@1vvJ(ӳClG=ȟ/4>thU_?<#٣ݴ԰smec#"{ xSX]yiwj `SjbyQ7|˲}wE`n7' ;]ݣ* w1kqNnc4D.#z;yͥiY[2xP0U5q!/{ CXhN"CdekPBţ[}ͦgPUuM,c*7sF`F8rh5K Y9 sI fpCҳ FaM s8?kE0O}U9nyS>?] k]x=w7]w;$rih@.*ȇto%#ϱ/,zXYfYXMg6˔rQ#p0PO7IL ![ly\6t *~i$޸ c"H^kС\Ө{6ds'㨇k<] ߡJcK楤#0#phuJ̝9`&յ \; *)1p_7Vo+VZo@ eCyR.=;W&֣G,t؂ʋ[D> K{9A rכ%]| &6ɷR'vJziSXZ_OװB$i^f%m-@e)~râyg'y;^hF%' 8'oH!3k·o }7Q^Y+`}M!K7nll2AjZ-##0#Jp<9>4|\+4W0kVݱªLpyDQRe;v5~ gGD4Aн$)}BP+׺f#q29)0m?TjK* US-{z$=o,ՉhaWS~Q' ;}z2NVZvU:;]4-WbuHh{`=^5dHªZ$ m4 :|0#0G.M"d=)S?`H1! |v08~ *_8oP C[rw3t3'I݌!žgm2N%R4MwPTg2,`\EՃmW e=-( NB]QMiGC8/+*zWWh3"+7iX#ܘnN∔R'U#~Ux!wv}qqІLX\GF¬\UQc|AWxȱQ<%T;_Bs5)/#0#VKj뢣-nVB>,{ք67`U!Aa'XW&= v{Ml[Iگ)f?I!Fsr/s9뺶ՕZ OܼXC65V(ф"] Bӻ\}Ys@=ii=ăzV3k}dyZ_!d,߰(KWA]Vol&@=K?4luu`F`lj(Z|'vdՙ!~L1(q {`MQy&=ChzǸnNVzs'xx^S+wy,DXJԃ4*3:Bv G@ ҟ}(V2rs;?]FU/ԛ>d(r9SP6ɽ/;nM *z('!Ҵ V8.XcF`##(-{zA@ N(Y5a(b,:L$jkZ)>)BL0' +ʂmm.U鎭_03jiXz:AUiV|i,n˷m *Gٞ Z'#oOE 쓺8Z!`:YZ:Ur!Cl"~XGAU@BfZ2kS?qc->,G =8 +mnCcò^F`F`ZMe lcp]UQs-NV̦)z ԓrgǪ>9 \3#0#&hto.$pbF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`Fh1V>^:IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowew1.svg0000644000175000017500000014571200000000000027341 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 17:54:05 +0000Canvas 1Layer 1Compute Node 2Compute Node 1Open vSwitch - High-availability with DVRNetwork Traffic Flow - East/West Scenario 1Instance 1Linux Bridgeqbr(1)(3)(2)OVS Integration Bridgebr-intSelf-service network 1VNI 101, 192.168.1.0/24Overlay network10.0.1.0/24Distributed Router Namespaceqrouter(7)(8)(6)(9)Self-service network 2VNI 102, 192.168.2.0/24VNI 101VNI 102OVS Tunnel Bridgebr-tun(11)(10)(13)(12)Instance 2Linux Bridgeqbr(23)(21)(22)OVS Integration Bridgebr-intDistributed Router NamespaceqrouterOVS Tunnel Bridgebr-tun(17)(18)(16)(14)(15)VNI 101VNI 102(20)(19)(4)(5) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.graffle0000644000175000017500000001352200000000000030146 0ustar00coreycorey00000000000000]SȲyW{뀘cO6nMHrw[adr`LC6ּ$O_Lwϳ}0GI裏'OIQ짧~4OɳznWe3o7;ͭ8zy{xA[[zOϳǭ?pNr 2L/oMwSfzi8i7dϟSv'70u϶ܧp1eY>'϶FoGUdAJAY F',M.w48=ClkXP*3gJSf Em *Kg㮳*]t>.{]<ꌋ }X̕菮d#_7i)&Rhaa1VGDׯ{qAaGU쾜=pV|bٗ8"4|\-\\7aM8x4z7y3x$a.΢4=1F_]WkK^hb\> 99ܤ}dau(PI&%?r?FIjAyiP2_NzsW9ã/Al>^&Ut YI'faH@gZEr cύ4LQJ F(|CHWPK 4D~΃˰IF); ?gz:l('7a?+l(i2]`"NEI?;Ɣ8k!R6UH+hU06@û˰w9OjR#53RUDJ1,&݂cņ -Q]niPx?GYAC'IURk@sc30A$1(|>ye;.u>N8mTz`׿y-]aŴ]}tQ|u?ivJaF4;$$pW'ǧ0"xi?̈7 :N¡?Ӱˤ<>KðޜW!@6(7 /{}_JY{aL/"EȋHEѼPĵ Jώ γwtȓ'};f{ϣ~B{ƁSA&g& 6Gz$((R>QpVs$T5LP H1Sтa-,2 bbZ2 ZRI%JX֚a 3^}N$hi&ؐ%7q>&:&j튑 d8B4xq!6sO@Vi3)H^w~auY!-,mAѪZ*@c'QF~CXRn'? 5ƚJh,2Q%4\-I^Eȭe>lAن,(Vqh.E/L&%)%@4UaHr2t,8c͸? Ԏwa_^$cw$E&nWIK4`ϱ_a8'<%@PvPb"(!((f?LNXC@jRUm2K-ʼnQ Z#リOg>@kNJsL/vgbI,'[X,0a )H{ ;oյ)$=$KzzW7-뜧(r}qnyZ{kkkkjkSŹp0u3Y)¥5Ol~ `;fƁ0 nݸf)QA^[ojڌ6iZzIwz/Ac5(:cWQn$n^[etVkל<ω򩠚a-S]xnpg w NhI0LJ@ŔV> k]ke^ǵ5U.GIgR2K9RP؆'yO+љ­ܺAbCb[ ;.'߹(-8T:1nxkgD7<<<$fIKǑGCCkt|=Íᆆ}]v6:B> :֮v"2J$0"!iw22[[f3K>;l f#HgHg}.;H0H{-J1H99o9,qJI4҄[6 3X- m /Q+m%TLIŐِِ1R<-,=i)N)Ăo \Hw"f, `f<xrm(IB3Դ\<𴔅R޽R2ǖj^d!ݡOjN4mj_q-X 9 %mfV[pJ@enS3c6XVP`VHSJk)VAB|@B# 5 9g6}C#j5>*!4.ϭ5Z K/=jZi_S%gfNHjZbGj]7ǙZ7Ph<D\]8WPTPN-𬎫|*2 WP@Vª4 au*_>E.rF.}p,H$fҧV-eӘq*s|whOXE`kw V27J[c$# 0m`mJX+};PG`},!Υ Յ*vJ"3 \ Td hD$ d|Zy

h6cEXD1Ζ9w̗e OR3D q_;p' dEDC#w"w"w"wwz>ziru{FݳbГtr8{&R]J95"ej$D"E"E"E"]"uaubm-#s9Z#ɥZ-Yó~8<<yOD+e 'Z( p@φ %1 c3 ѮvlhgFӾK%ᱷr@}@}7`@ݚs =WRSL()o]yĔw]'.bfb.0Ś,j~OYYYYtU1lkRNGaJYf4%k09sui2*0~"ZlPFh~Vv7Co#)r߽.%v;؎^RTcL4 N:>}F9M~:Ϡ o֏LuH)ws *>S>V2LǸT<Th|Qef 4-7Tϝ( o1}f y8;s4nes\ё3_֜s1ؕX(kavE"a<ܦYlԴ8EPseBè4v+~d beo'}o{Tp+}9-|ʹWejG@ϳvV&v=JFAôaN7Q[8ښ`IeY5{$[H20 qmW«-2!0Kv fFh˭HkEk'<]S䪻:kHHHH"I1h=pjiBאאV(L[A B&D8Z~ea[Rk x7䢞m܇7yS!"BR-!ŽMɦ??"B"B>\bAe9c7ʡfT-X1R Tg*mc+ O(>|=\\^etOvSkj|IC@J]vȳC0 f8͇7{.t=};qꤿ kqBZoHCpFOnvq+n]iZI,* JPGhAƲA Z%W&aLHΆa(MuνM5 (N8ʾxp{Mc uoQi@7{p[} {A%iSJ USAB%b Q qG6o/i-b_= :0qC@ߔAv2qIc6\rw&śuA^p?Jf(?nt>㊍Q(!G'0M cjat⿅Qr {E/a:y_aNww^p`Qx^|9N؟mfMp-(%ӧv/6*jډN6꽌YEh'eCrB2@|U﬘s4n\ & ƹrG>Y1~,]F3/  4 yE>HyXxLTM(_QPI&Q?J#?ܓ7TA7o7|,ޥz!7)3s^?CY'gEe LChiYY-=N诤0Wx:@Ǝ^lgu__Nſ_{DғƝ=x2GG?Gw?>9[Cs#-tC~(YM}F*gZJZ)6<#3ICp r !̄-s*%h:A]/r7$7=[T(ZQ#'ZP}8:_RZ5.@!χ聪qlgNpnO-E^[_(U;Wi O"uAe2聰t 7RM"r8'$. G3S^v[gYzn\$(\D\ny88y:ZHB Y <+W)Mr@-O,YA@;eSXE-JsT#I./y,0+ru>` wA~BRpgz=@,././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.png0000644000175000017500000044320500000000000027331 0ustar00coreycorey00000000000000PNG  IHDRMsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]`ٽK#E@Xc^; Ŋ R$@)JY_bG "RHOݝd/]r܅oಳSMof`0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#d ( 0? OA^P4X{gy zVƬJDЬn^LƧiՉgWdԳN߄.=Z85/{KsI3ݗ7fiJHNe}woHd2rRjWϙt CRR%2h2~|W\kƳ.-VFt{p2|^gS§e5^O, !d4!ǒHD3m"&"lוV$|W?)7Cä䭏Fouw{c(IR; g(^ +kqwxرvxS8{2#cIvL% *|ni<:ztI̶F[<|U`hSN%L[dTzvw?}\Ƈ.[$M{iN+-J]mu9rm++&qVӶ^p/:a0D.p3:z۫OZRO kNaaYBŘ_hbBw*%Vi}h/p/}DK4q wx[Fv^_ F6'bHˮ5VBٹ*Ɠ2/)PA%Jwn.7җwhc嫩S~t4\LR25!b3<@Y"ĭg s; |Ѹq)R31c;ة?FWKHBhbs RlŜ2򗏱m1CY"~S}qOTܓUR]jтLe)?C'^CT@b6KHe?R()ɚIˍ-ۗ?U%a}˾{wfeסn<}c`o¾ĘBzWN: «'%F$\M}[X8^fb_mmBojIyE*0/g IlVі C[(ܥ^jBaJ<$piXxu[ezi|owi ew)1!V=ԜDO%w06qNIۯ.;!7CEui,KP0ྦddؓEm+/1@jewX4;yX?{,b]^Po[edd L${FX#t.揘"Tgaf mW t[9H0}C̐kCq S]Ka/\0O{j5S}NǑfh`r>4 7D]cB1 &`>e_b;C 3 ]Ǐy(pTu36-!vbo4˴vH4`}kQMcIa*SDK0֡0?*`:] d%QWOPلc H; u ?@I"y:P6 uIuLoNtu6 '<=C۞/]n,L mDWKY 54G2Fw(`)~؁vw s3˫X&RHLmԏ?KmA'Ez Qr^8ΫQsCPD9%PAI?Q^d}4.ס';HֻػI+O)VmOjqw`FSI1&KoGd#;w3VW[EaO,o8j_i[4)NߏVm> T%]x|70C_US|0k0*Jh&s:4Ȗ" G/ӣK/k7p6km.S2r`\đ2P7M/>pKĜDmk_a?ǠHLw|4SI44Ǔ`)nϫyrUy4ng@$n &u MiGK+uynw*Łedǯ= e07n؆ONe-qgC|X|_\'-bl § RV&ܯ)ecE89gyBy4EC>d~kvC$\`pDYjl\U=MZ)׹" S ЇNy>.M/leZo/ rtӺFD`~1-nLɫV='+)^[mO,¬1B^ᦤơC{I==M4ylGX KO+ 6Y0.$O?_x4 4P iZ~J,G֊5ֱ),3)~]'bdP?12N[@bM76t܉S3\Te1eqBwW1 {/RXrk1X(L0R>g?kl[N0A;6>;)Ub J]rl)4q4<<6#~b))*Z\H6e׵ejeLvT ,0t ~tVAg G_F9`蚖B%Њ5H2uheO԰W֝8, qz*MJ1{_xY2G"?#kҴ!7 ")Ţ20eh7Qݡ/IAO+[.}תǢU<4&gF@1^qP9 +OwV?D⸑'f!M>0Oۏ.|ըDm{` 6  FOOX/xeɑ`:d7<顸`bZn{]O%CkCýS$30`gk j_D0cĂ,˪6Qդ)hUvc(7J)c2xy&/b\i67m[x.j'4*Iy߄P'FM @o"MVdcQJ[HXbR%y7ܛZ]kNm>uWDi8tĻ4!c| c9 6 `WO} K{8C0 G^Elö< 7]4T[@蚽ZKzh,$F0 ocDW_*ĪC96 .E~Ug'S' p)p%~#e+|p H+;V:nݾ6ؤ:@eA؄NQfcxIz/ tÕh?YU*UIB K}}C3y BUE9vWuȱ]_)rIڦr۽1X?f HnJ[[&YDZP.\zh=,֔HJK}41JPK"0:T687bj9Z$K~tou]5~ dlĔ?~`sOϾ:ld2_uOwfu+b=9I<@ZB As;DYԿQ8숇ez uaiIu6/axB|u[g1yGF)]Ug={4I:r @ս'huVvGv?x8]<򾉇#F,e.3:ޮ%\P-r hA=P!]~v,`zO- OQ+"U˪4g, @u;>qfy=i"Xk佅ODeOjtPٯ¥/{ LHNg7 }0~<%/{i8}/!(,v}u`pJɱ>-acJci Ns"SWC6yWsx6h[HB@eIh ycκ~ϠMaD}-aN4FK)6k ɾ07vyM%LʼtT&ն%a? 'ul4a;̀}k9aR'0c51(c R]hkܩS#1 c np4٣cl]*Cv| Ů7hP@iX*ŚrfxdJ@uNVKEts8ik1hB{hݎ[cj.}b#e.ěa+lɒPIaMr\Flb" m!iDaqLα\U(juNJ7xL7 KDKIsE^ck9pf]"㭘|RݞA?kR&LV;Iu??s j{p\q6L/A"Bf2]R?+h8_Z%R=z&RLww ѥ 3Fp~vKK/Q@nv " JNu_f>Piw~dw47n7t) OafYA᠀unOuIjsV}pZ4՞eu8Rjk^a!1gd?niT]܃VH'y^ hCp=t~fD(t cA9oSx}iAi_ hE=.}6]:A2> iyl$'<WXDUwXzޅ Db46j ;/ .*ԡ *%1gkӳcM1SI\OHWQ&8*aI CJXWm-Dc[W }ᙌ zSxRntOU`SBCυf+輸bRӅt'G'@$3 c{'M{"ܡB5v"8C`CZûHL&!0N0Ohct5g*O{2Jѿ:aJuxUP~5/5jؿ6qQQ}0|֣sܱ?!ϽV@|RŊRcutA;t׏V}>  fĥ[qCZ-&PhAsq3 [bhSة=X+1 q&IZh\zi7mE8rx&4\C]`>q ,`s; e}eV;@&7a<K=,;vg~qxjN UlE 1DnjK6Ǡq2kH>G&?0cZy.NèBӋ> 3=0}p n\)Z煖PP1(q6&{݌~M4C9 i(ï߆mh"M?9XNF[BH$[#E)b_16= :7>5h6RG'H5}GG!R=m_ mH߳Ѽ*)-SZS {&!ZZ q9%C @CI7[@iHoAb5THVPCݤ+DG]Ct5[t#{oAvZ^{u,s*c4pA{}*ګ,QŔ81Qlx:BBˮZO=T䉶J"R49yҐ 1 cHc$'cW>!1tY=IEҭ1Ҡ/hL iIk+-:_z?ܿn\$E55St8#+<< ME[i&Zxq5>^FfvabCѶbfZO2;(߲4R"5e>eHCq|F`&#{B|. =A苖Ll<I4f դx{FI9MOgh9e@>뚃}nn%b?qqsIЃ IR rIRv]}q-ů}Cy*K؛3A<, [|ik:nl0Bݗ̍`8 򾠹gdNro;޵?'B3W]uĭv@Cu~t?X(7]W[q;qnt n6vر[hOXebw)Q"P!B<4=/a4%ʐ('A:%V?>q7rƟ$-+Ps="iTRbX1NnH74juS:G{ǪG.؄ vkĉ^f'V?{v#pYg,=m vM. j(<}7x'*3<':Rw>~A{))k_WnXg0@~R-RZF}A6>wRvcLcY7]0.(ϚP99PW_vFj4y73uq}ӣaP4_B砬lb@muMhŘdn$svzNu@xGӱ/};qP\<x{~Cn{3%CnaP}*7ҹHS G 8=CHM1YFaF.{\1~23韄={Gץ! @eCy:&h߳T J7m{Qd>AZO8Z:|]o)+7NOƠ-Һ ~LGIz* _) -hᗍ\u4D܌%DKޝV}hK:7b*&;P ل_6@4-QY>N8穄 :~pw"R(xz~+'ie}~_ c_爝Qñ @ٹe/WRGضQ7 = Dr=i9, &Z$(OŠopWboq2>JlIDo\P sXd .gUrbRzYʲ"DyC t\td-Y;N FCiޏ}(egu ̷(mRwi9gdY vqY`# eWWrܻI%m }2z9P}}\^#e7]zNK_KkҤ6۷MB]2ßd"Zcl˲&,d`>G[&*RˁdR&{ުDwqJeS PB^7ZچEEL]v1t&{L ʘU%#1éd]kѺmؓ8Zܟa@uݔ :+@d[MBa̎!=%hH 5⨑Lܫau$[oK,<23Kyyo²ʢcoZmi2z:F~MW4@bGmS'nCі/4[DI@A&A|GIu*c0Mv1 ? wAL10TҠ['v`OMt) $v{ҟLȴl)>{?ԋ0RyE(.2&= `ś.C0q&l{r !i@բިz) \8۶׊{RлM^Z/RӖ(40n׷Cb 7;]OjC mTM;GۈLA<A[>%ݱҿoNQ;L5WAOGπnVH=ߣ-_8- 3C:r6 (

(Օ|Dl}F RP6<!P\f s4%e̤QuF0hCb̙:{'p)uz(V煉ҵB'VbUnwۮ ZCk[]57ܡvKIzi u%H\Wvۍyhx%K+)\4m/ЩSÝQqUV%å$n4{bV8 ,Vu$G Oq0ὁ%DhcվlS?mOX_WZi>txۼmTL`N -sQ>ҾMZjYC,Ә`q}S |\L[WG7pqB\: j1pւ%( Ol :(y~,*%CW 舊>z٘$իHJdN]\m+g^W4q"9SwG یS1wb"-):k){zCH~6^b m+h8jAڛ.ۡ8(Y8r㶾^)4ѽSrTi7S!n@_}1{.uҍhwmyrgj\\1Ns ct ,@ъ$/'jl9,K\oڊbp|A,Mnw}Pߚ'L.@L8P*-7ѓ^F73Sl- ^4G8}wl ӥ 2}Ӣ4( y1~m*IELWaB/{Oz<(CKLk"x! R,Lf`ۣj ܬQh.)"=qP|ѝD}?y,e-O%>RmrA1m[y0T,:N7, e/>5ݸn6l0vcd_b$ب вCF)6(]oT@P3h,phB{,%Ui,˜%XeiRRX,e[/Zl  \Z/1^me䲉R+2:ul!}tx@)y+c)<&7<~k)-]N"wac䝾¶1PI*1V&֌?~7RQW&DMq+s lE76mI2\"і4cs.앤G㖈~NLfy̒iYYkj7)E<6 $}EV#0 ,O 7{rk`WWOT zT vnA襉J2#4O}#N*RG >eۼ/>y:!PJ g[ b0#G {Ņ2⭁8U7AR ]c 3=F`Z\9%[ߌʹ(LP(nb0#4Qht !&ϖ3t ؒ `AC#卓dN%L]Tq|UʅgF7`^ĞSM#F@\ .{I0#0BsIG _}&@(5ĤFhesO++z[GBaF`]+4I qep/}  cҌ#0\Kpe{; e#0#/ LЍau}80I@IDATBk8F7qw^@#qĕ8ԑJ ׇ9#7뙖ϧ4O)1sl|+||ò ^= /{`W^5f]Kv򜮍=>خ(z_(u&'4c,d֖U_y=^.%eGƝ.dKZl6kwށS5}E1{ϙpig)fbGe,SW#P H(L6 C 8QӴ}iܘ AiŝK Ó9-^Vµ8^tRFR!!tUi*P: ^*9ou qz[O͐T<'"=/2}cp2w`yB -> f)ca/= ?9>;w(n47"[=={d|ח:a"=OV `10D v3$iY9Hk0 4&!ݤ5y|-F|+wsP?F v@1"!`#%ӑ8>$8GXj!s&xʄ^sO)`:G%P$abB^12)| aF{A:6 uN p8bv0={0z`~P{ eѾ0fھR=Iͯ6~ǟ,?>|#3yXQ Њ%)Ƅ}C7sxJOP-H6t4D܌Ǻ)GY j;i -G(=~g!a˔@Q_zfAZQ4bߵJߛ|+#{Yw:`̜=5NӠxx &Q;&PIwϾq.%Qv9{KݚIz ߽kȷ8OAG|`^'w"mХ^;wV!NGģAbla#C DBZL=#ݱn.;Vj_׻mUn7LMtSjk+F$wxt u4e yǾ2CXOph P/Y BxyV;d"нA=SjA.Wcޠ$VcY&?F q08lr@ hgΜG%+pɡ~2B)ʡ;M +6P6`厹km B-)$ r+PJrE[m=;Jާ厝!(;G(@ -"J|RSZfu7[&{60Px|޵]Zձ}Lz[4n/V;K5[gmx-ߓoM6U8fC dLɣ14GH zSش%OLkڻ:o{%RP'--k,i[ruwg{ncf ܦ€NpcC{oqƧ'hL񤙷NO<:c>GD wխca4@ѭz%?w{.X)4pšN(aU&ׅ<)leB7,)bwopx M,lsa$OC0 “d7B&V-Dxn^ooLP'F53ܦ̘R+B=H=t]g}Ъ_xﻇb5#plӮpPJ/ů5IWM0@SSSo8]J3U{eq 5MT k8vDZ4TZnИ~G, hf)3͛t:]{M3#ٹ[SZmi6KU3FSvpj+OqUe3j_ UmFpl47Ж@k:2h3(3z TGS0FKM4T5e& MUM9ͮ;vh#u2M5?5BDm埢qۘ7?56=wNXQQ| s,9c|/b{*J<`f}'~\đ%.zښwV G'c R,:ߞv+Ak?1胂ܡp_^Dm;/~8+%΂m~^^t߭]:Wӏ=vbҍS֖^F+ߡ`O9o|w+cާDUtNztd)1$^TbOqab*~ skdYj-#sfi4IL#?;ca{mvtlU$9'#kh:3R[Iqf:RP"ZlVlwB*.Un~6|HhoV# H0(5}h ieB~B$ poɫ\Ty[pSGy,f=1['>Wu60M[zP~6(.H~{QXfA |l3н3}ns@_/lD~usH'.ڸ.$E5A}5eI@Xi׬XM~@<8C_\yj$o$BQPH"#JNKIC ͉/=h>C;<ү;_pU*dѵSF^@њm]ڦj Jt˂Y { m>IN/~~OJ{͚kj#A? 1$M~3oԿ~}dHJPZf/}`1 8ҹ9ES${Li>yo9 Isӳ{tVwgqо}} Wt1k0hmhSUDN 8J)65nJIMKq1tʂ_~ /Hؖ|LLiSdVBO\@Zߢ~}3}|bi|UCMPS~Z|N x+2ڔ%iz=! P2qçCg[(\a#L(sMJ3@[pL,ށ=u2oM8D ̓6#0boDebdvl䣊8˜<# HL`: 0ݭ .;#0#`&U8`Fp`&A#d EQ &'#`&%*hБ=$ #$V L,L`jA Gk]?/[#_q߾f|qfQIckIF6XP:0 E .—?z_O v̼_3_-X*|Mb?wCq6F?, ?v`"\{s}}&}9CotpQ']/|5f0 C% Ïc3@^T"d:Iv[%>dR~ Qێ;?F 6 /0qD[iyM~ޯ+I/ #f'Sc nY,F?boŤofG>؊WV_8 :qI0@խ3}O\N}2;.O̯c?DOFh U=8:#0E :/}0`& <.0MgTl99FU"ڹЌ@C|xh$W.`F Ԃ_N?Ǹ[OZ8wǝ-x+w6tNJut뻹䌙WM9‡K,KU~ۋĶbW2 \1VI\&:wh#ڻ.>Mh=* v ׈7qiG)t]& jW?/]#^|2hOTܦ*VW ~\Zp!4]d(^aJxo%65Km;]Z8}#Mo;)6 V8ܺaXOw^$vbI6O#1M_O_sO N9oY*ZoN"?"= jD%imV6oO1[۴U{:.bI(S+ Ţ^t )>gg-jEe˽iN!!! /ǣQR4ҋS%]rrQm7WUV[wwg{s9s̙33\>16x?ZMZrXݤ}Qt!ޛ.p?krJ',hs& q%`t)8&ױW`+dae&>uo# ݽڌϔL$5L<XZ y:'~ELm44ɳ~e1}s\ýia:ʆ|ayQtdxTw'hYwa+ M'ޢ.?fO)l+J\>U}#6)Nڵ8Vj!Bvږr5S*)WݡWyj) f3ҲKhc2O1}D3k~Ir #龃"C4ϞE*3Od&%/zw|ȼQQ8yaZ4@4p0';#ޖJr_TONj+Q6xDeڎ},es7@|j@fb32&]hن@^rF}-,)cA%YRe^?ی>ȔmI1/( VY #;J􄔺 \V~t]e*cԛy@̓ L?yZ3j1J<`EK2;U|WwzF odBb-PHFG˛l!^hmP͒ɓdiBe9,gS7ﳉ'Ek8LI@fpZҷm<I#@x\ҽf6#wSٯhO]4zP! (vp֗o \>"KOW=g9#pt#-7G];J/_W9dROJ ̓:LX %B:m6~&d |bG}疠WK!P0[Jh 3]1JoPDk7CHC|8ݑ lwL3lsy.cJucW/bK? 雥4HF!c[/㛺~7RiaVΪc=EP/\˭ gҶ6*ڗ2K⳱'hkceF@en{.<;gj_d |" ,QU%ro5GT},@ 0=Gk49í*^奛fFC2) .oh>-uǻwm %Dqvzi=b?Od ~oe{<MdboƋZx#c2DgL/gbr&>9ne{)kYVC-H- O&%.QAwl >tq|-C< +[/0 ~q+Gi\vS$yPWLN`m'j^b)i*55|U6:T,g кQyO , > ho[, .5v(E&t~a֨]F$.&,$U7A ;2-7y4"ssl1HDxіL,OQU$1gmf .0ކmF*>~Vݞ$S GK/F=)7'8>s;5]WvxENHY3adajiւ.E.T~S n$BݍGi8 '/HmIQ1jwkh/W d マ=Ψ+Li]#FN9y*u(C8DpG\6]%g1=<^ev~&n~;0 v}y^b8RXS}CDK>UQAJ*v!mTF@oX25eVhsmfh= A2r9m''hpUW8*?|[Kj|70Q_;[z0W>f #s@L $|aiOˍ5i+!;;N!H?NZ!ȭ1ENn"O<.[%ݕ }dOo8'= +S[r[W)S2%Go>!i<{|neg#~~',{r@(Mpm kzT־ڎ6&A^3Foȗ0cwwjjx/'yTL@nZ&E'5s+> Z2]]Wl)Wnϋm$?qK!B_7FqРCޛ~ V*-O>zh>S##o)έ2sk1z:RďďU;Z->6 WB?/WH2 '}u['rW, #Df ?$Q^8H+ũ)-oZۧi}i,izyҧtr tYM#z6q  $i)b |=&cbú@gIѕC7x)= X+~bA 6PiudLE?GY,|~g#hr:ldQF@l~J" K6Q$vVڶ@'Mmx@gˆWҌ+  r߹Zv(L8t# R`9^[y =iZVTG =Қ4I(ӽDtl'lm7%.W^_t8@QcuJO[2^Ib -G>*y&09Щv:S#b3}c+D t^Uu$KWCSkk=ܻ"JHWr5m b#ni'ЖJB'üĮK vYPu=w)w!⺁`2[b2wLe4jƄ=$;HFO>5ٓ`( mNAˆx:& jrQʡW׫bhC,xZؓ1bC3vq /M]{6 ;/UѤZ#YEk&4}6VR~VA ÞftZJTk•w{zc1d D|H^!=pSZCrOߒ׀#[T)|;R5ǎG[4yb9W@O:eJ\u Z< Zla^THJ9<H*J=mbeN|4»C]: T;Z={d== L$a c]}|F}bJ?1efwQ+qv%gCUs5!uJǝήW҆@Ϲ߹1utvuTZC `NtLV/偍\)BYv R+Ƭm3K2pS"L_b_}9Y)ضv[_^TDwr5媄wo"S1aC@CȔVES1lPx Wxul!?fo>F qG1m^rqB?g㼕 x(1LcCf{ڽ{p ?*z܃F-b5ȻDSwV2]oP7D7BE%SY>9Z-= a͘yrU26X$";!VJ0)}h1xݙ- t"r"J  eaj S1kBI&].`RIqq$jYﭕcG5شy]|֏,Bܚ))ԛ\Y90mx+/%pp #O %S*ATOQ az4#@kuCxXsN!ca'*f,D23w={!N6sl-L-g,Յt#w-l]{bS¡PÚogyVF@ >fҸIY=lF@{3FZl숍/F G;lfВw%}2eBƂ] %WK( s: B>8dT!Ts淿?*I(|4ŤE>BnAxY =ۯtQ"p#@'M6B7C806Yp>d?&S`;Sr)we.FXxT@b ϕ4qUk([~G>;mlp.cNcl^ |/)SGɰB#=5@(Wyx0>!~K/x5>oDjV*@CÉ-kVp?ׂ%矟TɖTJZ\ۄ5D ,cFI^"mL] E&]0d+?: I6hEO-־ɂ+1OSz}^2oKXLɌa J,]yp/}.cy^ ({2I^e%1 LwyZC@IDAT_#L}^_[h^1m$u?b5/$bo׫P!P|~}yQY||#H\5ϐG C`#@$]"1 G7I%9@P%\lºzC /hc|sTMi)`r I6$=FA<ȭK%e0h %-wx?ʅ! f E}b!E!xpT 1ٶG}b@ ,/^zzݑ>y11bi QsHֈtK2 }NR9iJc-Z.` _<:ts{|Y`{xp |si)2}hCLT' ӕNqԼ/5Ͻkz;O$TY׳`e.`i+=Fn_z5>~oܴFU0QPXjs8fKZ\Mσ4[ؾzUaolߚxl>>̛K>u X30 d<@>x]LI$^!3]=rbί}l^;gǯ>>,w# VY*rU#>!$~_Y c) |0")HBTz'[>qK_o.孂T8O]R FR5+SF%.Le#<.l \`% T(26:$eK쀉|w,LtOR^2p'Խx}hl1@BICab yߕ@B W` ovY>wId*^1R'ѣ>˫ޛۍ;93>OVId$!X-7xI@e \`{lt) W!%|Ocicޔ%/UD *0'hѢ1W^e*uD@TG<ԯF@Y.Vlgg'y&TK@22B@!GdX;L# #(# #* D+sjW @^"`002qLP(1) a۬JRC֩]:85_Po+p;:U*o-;)/KSx8/w ~YRBQ}1 꿊!;zkP5RȒ&X/>ƯYd(Ɋt"6P,SPG~sj^e6R@;Ƀzwd`7u 6[LwB p*)r=sjhr^l[^VoӢE/c\r7iI2tW;LK$Q́c3?.vYA n)tuLhß~h<N=,|4m0n3,B@(GjѪ݃7ys2uOwo3/|"@6REo\^ZTRP8~7^G26߱;\?:D)DtQ:V @ OU^KAr=*.IU,LAW3U=΋RnP^(;ҳLd4Va&@UUI!,B-l`Uz\e[+Kށ*SªL0-Ky`x+fTo.HJ-n/LF;|0}hPYzZf+ПBƫѓo|Lv=4t`)]945 1-xx-vޗ~ݯCa^sQIţ 9@NM=nE}( C~G`qz>[vfM},ǼFV)BiaӫA e^{oϽh`Y!PRH\8fOIGeҤQC#{cFztъͻO3K%.:m +܋)ohDEyk]8VL0JxzWv. =GxJd>QB^z &8%gNsN+wG^ub>Ub3;skݾ$ , o%8,}ސ=-dJ>$T(thM?[<+ߤYp4J3d7ziݶ^HNwgu.c4NϝFn+to|V z4=*py61TGB\I "o|.q7POW,ov{`mwޜmi %~FmhJwV҅s&ѭԞ$S7Fdu4e,斴V:L//Z1)^oU1i 룙:6j*ݮ KuuEҌ|W~އx:0(]Q-ԉ'oלG? dNIT1ֱF6h!h- &J9: hPmJ6 |:$+7?206sl̰t3ap6>֗i`HY}Y_<^zT js0evpb"+۟t*?^j!hPiasmZa<4~YY7Br}Xx,\P}s٭|\I,J'b؝llGOU/AeU@O@$o,@ fjثp&Mcxc dk/*̞du~W2:"x d6=c,Il#O03ZS,իV$( >ǹƈͭ{ %3 FîC f Ʉwٺ~+O#gFR2a/V4> Ī!@r 7FWLUC.U:(uߓO7\\U d԰.XBf?b'Yw|wFuEy14sH>3wVr6FK|0@y4VrO5>}SqSlL,]^|o0 C,+QWt=Ol\lf巯e/%]9,ݫ~]Ajyn&v#8|Sc%ܴqtқ͆/~ЮXݧ4+Vv5}msT6>sLg*Q#m9 Z 1Xx4ֲOz[ ~k9JliTZy  G#r=-QuB;JEɤQD%C||w3GG&VS(021K.$T\ qt%r,C~|mQQqw0Ze\)c:M5EpgBF:ӓ hda\qI0}p B!>z| wA<.>c` -cElꮜރcx^Bx%7s|t>_ύBxc}Z&ƱNp{ۄ cni8FgNK$Ұ޿W6NFTՍ^3^X}OBFjVAW*+$Y֎|Q?vInTv{Tӈ y%(f(`Yx \q l•z^qjO x~7EDeUB>."&q \Ɛc/Yz巒%Ac_Mjj=:Hd+6kr+xZt@FjOEwwНgB/}]:yWw&oD] 2my:6Pm]S"Լ[@%8 %5*Z]?㳢+/ϿO.n`t>+2#H0ٳ)C[E3F젲Gھ {c z ;m<wPEoPG^!-VK"ܕhtpV5Xi29gzvFZ7i-]#X /~{j]=ЏV'V{Ot , E(U'U:vU4kz%x TVptLtL;qeLZOd- x+Reڼ P1!p5ndr(-^`}}o{/`JG9u'k>eB7 <r /Bm:dꪜk]a`4sXsC3u/;{:\]di:.6F7*ą]-eM<6:K:uc -9}a |3aZ~Lx T#r(kA ,ٞF a#hɆ4Ui&kVqߑŎ#8/4qPBkbkjƨY. pdJ^O ,K`4xd^2 ][ V޸cً:}#n9tI D7"b>TxrL|Oh7q< z{pݎV?q ^BAto6=D3tһ <2CVC1?cн{B' }hru0\lw熔}yW3یjџ}e}B2TՇnh,S[ 9ZRp ~9Q%)niG2hM3G>}Xx_PƽG_HAMM>@cw&" O'4{?}LnLxiSŇe"0T!7 #tx dV5BG^2Ol:ep`lP_ %3ӓu/e9DwF?{|x-㇞##ţ buA/#]wE>"`DF_t5Fc3oQ9VbcQk̀.8Tn/ v7|:}0OSԂ' WԿH k'?po?'?sVtcseI.G=$^W}!V фC u&%ԕHxO2E/R-y9rt{ iÖ[x7w2]Gy;Z(vHļ pwMKBWUc=⦗ءEi2\3 7y |֙]9t2ݝ@9դ=߈3/ya~z9?b1<)}a:D_9g ejK;Տw4MŊaso0)dzOa} VKtؾI<ZW# }_u 1onn܍"̋I荗|sL$,zNGJg\xeM/൧wcOg9No(g?ĭzV C!=|B ױOF=v"hN/U9_^e,1jJ @;~/&lK50`zF`wJ2.6Z j1Ue!bh@M$an#yT=ip$Ĭ` }o)@WE@ Bwo{0S3 |BTAcE<\c|W4äFXe_܏`g<+9 0cXhCrEEoB [ E~0g9F.`m<:eOINlf!^Nm9G6\O-MZe1|mg>=0w0%0g(壯E\y1??>/vwsL0f@CLjky>C {b!xLUR:ks/erH"5 0i+]ƫ ذ "e9oM<[B '}*-We+c}~/r!~1/-6ڍrX+uߙCL+q*xj5/^}@"*^\*>Xz.m\;ي^P.+ |GA6&j\4B̪zPYYP(C^Kp!\V AޯoV B@!~MX`$ ȞRe􎑺C!Pm0Vz&: * rY fV95B@!W!^s$yc-ۋ$CfCyj޺Vjhq1=% ޳b$%N*/vє1baǛջIB j$(-'ŤzK]Al 74Ӟ,_=F?%1^y]t=󚭴LIQ.>D@@I4~qd}mB)._EW|@ pmND@]Gfp{Aw0LK@S\A:L<`:'꙳>ЉS|_Uw|)w>L/yݽ 4nU)*QD0{}>v{nl}yFݠ|$$X HAc[[@?J<8>~޽2c@gT!2% IРׄK>I7e0ຓ;!7ʴ3,ʛ९)DRy*O%=]*S|>Qe w #%wH1O[YP:;UYg,"T|)w>,SEj -wq=&+OEj]^l2L*u2)UbW}ਇu@^@I~WU=pŧӉVoSFr+DF=i4=S X01^DO1=@N8{::L mI;b莹vFWf+ӷh Az盨TR0RmC/`{x& Is~\@3N::dDITy dYfڶHaK3bsNOVҦ{=H%/Ғϧ2Or`a7^yזiO.<`ג¥gКm{M߾җ{ wi?FyZ?N7LZHVG5m@[أo"ڷQ3IQ(w$=3Ɲv<|H+1ojVU'ck.v“%ד5aI?-ηgţ b5~qZuaٓ=SFKI?|#>6-4{(Z6)ey)+[f"Z]3jJx>^քlb~d>ƕhɎa,ެniӤ9 ML'#7ZUNǬ1y jmVtGVS㱍4bQTI6?oJ[g gqU\Tt;rbJҚ"׵{Js@!@YdvűˋX}3]4w ?{BV*+*Q&~0\:B.ei;D2T Pds OxGG@;}xd½$O!e>D#zpj[5OD"DߐgnqU2Zp5Qm4xed8|-]ţľag?H-ˋ]XA@`üBcLjmzT?:^*崉`-U (h`i{ֽQN\I{ؽ?$j\:B/㋹K`A Gr\sXJnf; 6z5ޓ7 _FwG;L>~ݯ+A o<4ONB/L}'d=З7v ,<dC?6U$^'pt^4 ]yavN6Xhp[z ]]ϸxΆ:S6m4L- ҿ4$sM@^]eb"9-q{Wfh.kj{mx8V0ҽ7^Йj3|) Vs8:Oo L*Oz˙ғWF,x,<ONG٬8 4V"tzW΀R1%ރ@z-RRwǃ@ʔ%C`r6X<=6Uv/oH[@W䰐$fc'@y0,1 ٻ8OMsʳtљW|\J}b%V@Tyme0"CLwgb60N2~OMmD+B/B$/==5'U-]=s ǫ)?U:-I'aO@q4evUt;%(/@ kbb7fZހޫ(fwNhM6SF*JG)Uy~v7.B=%SdzYy T's|Xcܛό~4aT*ORRG5JhHfZ.pGSh O;~4AU-_}9n#P]Ct2y&u@IDAT6G'|h@@%}R' cc\ffC,~nE7Z+r\N0xOEX*S=Qx>1rL?kNACj/+LҌÒ~'2] g*e0,VKTsΘ::/QC n sҵ%3˂> RWe7=*잒`䲣2r{a6(6*a1w?Z0N 7L'ݠ?bKgmlHFL 6 ~rqz OX}eR!_ݣjHA /zMcvrU26z{4bs*e@VjZ3W>/^BVG/o'ھ=g8rt{RX,fvA().0 }@/V;NjNOLrMV}\4bDw^M ^FƢ(2LU;Q}ee Leo`dPvN%ҚP>zŧzO/\^2Zyd .*_$iE#N w/^ ]<-GҒW(C.~ V!6t:MuJљ)Ab o6Ӊl!gR0K%L F{)cR2Mdo|?S ߖeCG\/K`;^ Xk~cu/`I'- (DBZVr:Amwg4p@<v'@bQ4W? 7A 4hLu|,7hEr1;GQM eI 2|y/2oӑowyenSP4sލ^ XoQ>^WgH#+ Q!G <a_]@iaG8D|ʼn"HЕΔ z7ezî-_ZuL#/Yļ EA/(S!ߩ.Wxow)2-]@Cx-S<KY*1@7VOˮ4g<?즇 t^EaEE,w?=`*ӻYOX!!˖7l)n`2͛{/19h0 8Ճ6% % MVjp7ci.gewVM!<{~yBà6 <5U9{{k!Mk~߷jQ'w 6Cl筶Tf Mܼo 7o1 Ji up;ҽy{F l82W#?&.&z#lM 5?عz~(h~]SC6Y|:>e= Pz }+;fK^\{%o 7{jr߾gP[;yOh'Oߔ`Տ߾ӒϾX-1"Y `Ж_߲Cmixu#^x1Gc0RCb@WeõNV߷GʺzW_< .ǃ@Fn[>Y*,>{m-)3K8c9AIyÊ7:f(;,..bd!jb+"c1+`x |};(>!m\3f; "uVoe~4 meʦ~sϊ*)hVd G-ᬩ @Y*?3xXn}OJj툏Oi鿿&P^O,W[| hzu5ҕBp *؃?hx9WbYkV5 ofu8g- q~Wx|Iυed +f@pbqGW5XcY~K9o97=A `d;\6NYŽyoMJE?jR';V>p K>`־u[Z<"$OoXGĝQ `pj}g90Y`8;IhևXǚF a%)KE4;uHhU [/$,|M|nt]^X>6Y$-%-fJ@uևڕ!" En!vTB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D QQB@! BK@ԄB@D [H 2>RbtTkm(1az/ Cك5U,?Z\9.B \D JVQj훌.޶^Sʴ?SpNK;C;z|w]7/>`0 B |T #"ef`ms|?=/42JJ6!9mߋljɸ<B rAõۯwݚf=[93aֶ4i{8Wl_g`ܦ?%:o:뺥bxWiWm.%! ,n1t92|Nl$_iۊs(U/ڵ-;ScBCP ' ! @t%eN ~B=GNQKHGюSqVھu*-CmA+^FDܢ4\*bI u#ƒf*c6Gv(us??' VjFU.B@!Z$ƟLui)eKvzo%w>tY~ r&wZ! @D% u2֥>w1m_We:k)%K-7O;aPy_)B@& J@=SB' \iK"Hjӗ"ѡߛS{m!B O@B5>(pq'OǏ'XBK KM-{dC! @$%OaOOII=Kc>*݆̪r\vB@DihS߷lxy{j(|C {7߀%B@! K@, 䛽c^gċ! H ,@`##:x }2 {D , ! @%Fa.++3Rk'WfP5"^jʮB@D XQj/+)qp8)X6S"!-@) VaGecwndXyE0! .X(˾ ,u^0XWB@"+J@p#2[6vK9qOeb%/8E ! .SѱX OfQ1Iz ݛnG+--.X6`K("AK_= MkmH*}սBGk-%@bFee%/>x{D_8K>}$$9mxV^OdnR=jN9@/Opei  B@l\6)S0[zx5>ũPPR!JK!}>;5s+"AmOS!ecG6\sS@,iXrT">qL |M(Yqm^Mv`8;%NN$i%#s{Dz %D  B@lg~ ?AfҜ90-tC1ƫqNJ,aÀvr!/3"YQ>Q*i6Ftefft)?謹f)\s+c1},1,S>zf:͸3%:qx(qqu.g{@K` B@M`3u2D:YӟlNpM090~gR_F:-]BVr([rvR$Qކx{^OPoBثhڵs4mQ<\Kl{hyѭ%.lhe\;xw$O:5 ۲paPJ+K/ Pg'=E3&\ߚњ.dk 0||֮nʷ;-YzrC!ֹuW4\3S("s,Qz-%†غ\oW_0U=+]tpnIrK6)w(ޚ_(Z?NP!܄3G37sUM65k\[ +|WW*ī+ȅ|c u0kd5Hel71$! Z$nw8}֛@^2pܼ+εk _|㰨pC #m{#L|qGE|F |4(NֶYKRP ;.ψӐGSXRՉȄw4"{j༰"<7>Ϭ X [,k+jN4NW+$%%8Hu%! N-+PTeӟixjGrkvrgM((^.pR9Qau>2l먷kjA8J a_'4'(q2(Ū$zjK>R%.-sh 3ECۖ"`) t< 'AqSg(fkCđB><4% iexhmdќ̰U:W)inJP3?@ b9PcV׈r#z%3V%gWi+zDnLVp"¬+b~p~pϋubeqRxm-\[uL'=|z88ڝV*)'B@qߡVH||8ϟ}jϵPU!T*A/JO/exB<ķ0kLVgyYpgHN}sG׋= PI#Osh^6<Hf:%Kz9"Y|_Ǽ5o9 IB@D4|`ȃ8bBYl'{nCϩ㫈S]8+XZrmۆz`S8ݯ[)ػ]ˌGtlA {cGKW.yv+\_Cݣq$! "Šr/| 7ϘuO_XD}_F$c o_ɜ~v\ 8V|>&]:V Фف2(D5Yc.<([B@#05uh{m{ qnԆKI4@c2`hWiLG?]= t 9 CM09LfGkB NDKz-<^z ] ->۶ٱqOg'tNZ! po^D}vUGm6G:3hw因 Pt~-p1R+.xRk.yMvB@4-5lY-;AɃjR h3w'SB@`T l c'ZSsT(یP&Ag I *oOq-ܥ{lzlOh/y:"$l ! {*M5j>:hz>m?<ڙpXھ5:'Z#.ɥ/+^FLqͱF'=t/B6yg7ǺE ZlZ@ ꛠISnrϯTzZ,FᆃN_(/8?ڴeRBp ( @X"(VsyBX x䉤v˾B)\y>R4W/C]W>LJOka̺sh+USosjsRiJLUdQb8_ 7y%(uqP[Ad IE7ݥK<*J@Q\c-kw}.WaT؆c@,R^Z~`#x˫?&(XzG#~TZZ+h0r\z-#VV]D$uJ;1'm=f\{\öݙj/1P(!1:ZlCo>xUuWKf'X7櫤޹vJ '?1%]6a$\)ramX;{k! ŷnʠז ;S7s&F+8(!2^:s^YjϺ0lڊ^ ! h coO;`4ʀ]Q5脧i{4m,?vPk/u&ܑx|30ʒma8o2Бm8fzG=Eqz9v=Wd?KUU58.):7 KU{ ~iU+w1 6uU_SaCG c9 ؙbN ǽNA3{++ '͑yy8 [YTJ]M˶Ehς座av: *!Pc]*|=jNڞK,o3#nYR*BI{隽GpO]sЃ5ymbjs]NiI#5U۵G:'#Qcq5nqYΊ IWD@Z=Ʃ$L"ne?RXn2J=9>|X#&. qsIJYyɅ+p u`bS'X9؃,zȾPbܽKm<Btd_Xl{ٲ֪ۢ ˷& ݶ~n/Mcmp>"ݽ${UCً>Ұ{ϟ2堢 84"-lϪbdSFq Yۃ67| 7κbu_OMLŇ \_ (5Tn$ ЮįKA%ĉ\ß\v- =z ~72z|C[c>t]]':d4da=n]k=}fЏwʻ {nczlb۲ \joȚ-(/+=Aa@ c^׍Vgg' clnUHn;U>Q@Xzw~x7FPGi@3(.ܱOj†{ ߏB}?\ث~UB>3q;U3@\?(p5i˲haXhqsܨZ68nxXzз%@ceu<):h 8g-ߺR/`1fx<>~[{I[׾sFp(X7g?O >lWShFk]mo7> 1aUr"N7w8[f]AuW41lG%e><;C ^=vʕoծs@3`r2Ûz~}4*Q+NwB*bPoWĨCq怹(9@\R?G+[z3axؔr_߸/z][:zJw;lA`Ӈ6Сׅ3`cNswh̶(׮:h nwCEfsՈ[q@aa<(M&g}.8akmE{Y곮-2ߺ?AwUP?L/. :c!F}M c"%PqH`nd#`;X*6[Nۄ+<8 B۾o{& 4Pf>exCS:W1i=֍*U[x׃y*sF.d:B.dO rUlùwncs*ӖdYz +d[A-1LWc=RhlHzrUr]OQe~Q숋WVu[#? Z*=>vX+  D!8fER(b]c0oCRU7NR㥺O)J)eM-w(YVKt*Daԏ]+/)n^qsg5wD6ycz>6- s#5' qX8 V]jv`7QƵyp||s&ޏ|?ly6: ujqdwR{A耚ݸSғJh,'Qț#i{1uiN`#:p2.INNaS){cW׷f & a'WmQ49&f] kE(vCAmaǩWQ0@!(h0PtbzaߕѼ Fp _[qүXi?_J|-ici33҆6+qX ۿ_kX8o`i:k}%_+|vaZ(]PS,) ~4ٓ>.C-kdߵ=Vn'VP&lj=nî AAKZܡ4WQU%%j s?TT,$)Ȁw2l"TyH*1cIЌ`xn:S=z M67͛Kl>a3ZF7[3?+NI DGby8*RVW$=IJkyJ C;<0 c %/ncQ†63$Pǭ2˽XH%pYJ7`v|uw*8}gg0e]+7ճ^R9[V|?ݰXO욶1*B,q! @DPhZv6jml0^πM)CyzI̙wzFٶ8Oq =bGQTO04A$g]{놲&J@SҖ{ ! @y#AݥBΨ=rξ|_W viֿ}J\{ykDl41pB@D[eviJ?gtD< pa-TJ/y'Vi3?#V> X[{z5ݶT]׬cH`6~ٯVfn _}PZW! bm2aԭ;C魫J,Z+&{ڂiiʛ$!J[f<:ܧ~鈇kMn{}Bn] ~jDn7], }XOM?Fk_ gG4u Lo~u_h!kM\;w{vߎADahck00$-$(ncDm|vwz"6f偨̉ڧe$}TH'-ڄ3=^K,O5A8{KB@!8\e̦{'NB@&#w {oj;Gƍ0fR$Q-JwP|^B@@fڲd%&?ϚgB@4e*,iP,̊G䁀xZ)! h!{`[H 5'x @֍GI ! @K `~G^5hf.x.ċ1##C N0(%(AQR%&֌m$+w?f!U@~9 %q!}OtŒY7Ee0qIfU4%(b h!7X7x48 -4U`34p E4o Ɉe.#+ OCP_&L ii'կs֬%%>US~5O?~Qq<&̍\,aQn6qFnoIٝÑvsW<K.A$hC|ZB?XPd>,9 (4C?z.ɡL32Rg'Y!H!@ AFQ1$9 @nMx|0ؽ.PЦ~嘆'aLAaG[]?v&@?x&4Ya´,;sڙϛDW3q'&h`8uG~'u8~Gkwf= 9&2R=}gd$կ|.x[Um̂" 3|Hu0kVbt,-w'g6g{0\)g>up<1G wWQe{$u&Z]fB8߱.ĜW]BUSqU񻽻Pm7RTE4ioDZ_Mvim׬!(G_iz$$s)Hתalc.8Ó k1-_qc=޶3׊׎}( p|=#øWWm tMZ?\=>rz:(՗ASL\^5(+Äs  =$xu}Ʉf)[B hpXbוG_us?3\SI=(?P u] 4@IDAT9Ҋ82ƯMȸ.xu_n}]wŒ{{|1 EpȬYga8gy9E({G~{s\Pj+d h{ueZ?4&]7fA#9Y5Ҍ7qWM>nφqδzXqS ^QϝyƊtރ,\ icWxx]kڊ?=s(TdC n&!.%4RD_T$|-}hwt,<}U׃*,(շͼ? } ζភ/A5Q0~ ږeaxOzd"JH!Kf38M}z&eE`FQ44Ⱦ!Kmzc;\@Aݠ QB]NC@E{QKfaG]8Фc Ƴ Ǻ0M)bp]O[B״p\;G5[Y58PGa^>b=,`~qQ=y[=Nwnqx0=Q9 >%ZTߡbv|N@,X6cWJG.O-jΰM(_b JOpftE1A7xh/hmد?6 W_\P_u.t+6(@JV5yĎ5j/֪%gD7bl[Ny%mE 9OͼͭGnvN08g2 x -~涪܏N%'8l X!Pȫ=8b BYdS{5zlPe#Q .>cҕm(U=R PPPS>BPHk |!0}V>Lطsg޿ī1AxWnTkůqY'Rk}OCYAͦY&(dNOB@Zɷq-h|jQ.3V1Xo{? *6<ҎY:^>}7ʲ*΁p&~P>@azbY/P~|8. Ælh`@?6 Cݬ@wi8օG8HBO[Gp܈Kϯ>s$ob(oydŨ/bbJ~(Q% F^Hfgn+P&P5DVO/1L7zpZAAu>?I=oQSom4(`WU bO}wp{59_9MQԩ fi{'[M au$_Y|RMh?rx_=0Ƃv5*M?_u*#eCInoui2Ⱦ)zMD%:'v/8!XW-nw;j7]vW) ;5|=OL嬡}vF(m4#77{$?lMkZߝՒz>MLj˗ X/x\0=X^w >444\f1 cW_1+ގ Q#l:R+ |nޑ"1!C0W*,gVjK4XQB@! @͓>DB@!B b h!U!BLpLbA! @!PEP,-JFB@@Zp+"78FɄB  F"`i`Z! @K'@E  ! -Rhΰ4DمB9TO@)B ,y8BnϤe  B@?ZkBO ݲr* &49GĴ'e[p4Ӄ%@С@,o5BQ1(d(%7 -$ `Bҍn}O^rrdG"JA?W=@D \$]K׍ N=-%AqA eWPbM{x}كkw  ! O@i6F{i;W1jφwгq4W'%К[80rڴ~~]f yǵ7g/dbe@ $`%04Dw( |267=]>j\uԻkio"H'ɋq>'?m]x3ۋEE'k9%8_9S@Sd(OC4(!H,4~_vcO6ر^fy*rh' Ax~uS@U7]~gt<敒~_9gc!4prw!`F \QFرKt:^ܾO\=ߛiڪKCO)P*lD"pes@ '@>qcn,R NQu: um3-,TFC";Fw% ?4WgC|"aEt{t3daOM<P9b!0Wta Pjlgs){Nۍ'"b 7@2%@U׬I%YzsS HHNaZ#I!Pֶy$PpGTPVs(OFڍG@c {0TiY4\;`%!J@<&)m CG&*Z=rJG-9 1paA d|8$'Mfs(I $y@O'.#6`IU$܉9ޱP C`ыOBs[i;HL怊."~r#-q >pҒ]cNw/OˢA2( +o/n1KS~q6}ǕL{TO?]sa嵜ȴKy QI<7Χ=:%%aN +bij,N>&%?>oQM|um.(M3b=4iϡsIcWE4>z%28JIXڸTZu"G0;W&]:YzϾۯm: *ޫ`+? % $I!LPAQv J@S=B{QYx6B[p+h7j.YN]|u՛iqx[L%cz+3^~fKԥ}JE8넾 ofFl^g\SaٹoB#pQ$ڱ ݲ:M/> Ze7A?KFm~}j-XJh@ώRFO~^JILw8L?-9PƝH?nY^_,_GaM/*:ӆPNl 퐿 dt 4w: Ecc_)wQK@76И{ Èu|eMsYBٛw &+aX[w8JJi5ڧٔ ]:1>ؿzHIaߏhšPP\FAQiƞBQX?<]蚱#irz3n~a ått _^P6ʡh{"J5A&BA=RZ%@^66 hۢng"=}JEd(ĎۍuϠ7?!pgj%9jzJй'V4W> DKJpO6ۓ M>ݕ6leK'uG@qᠢ)WL' dX?M/<]9sT( }` XuioHqzV)h2XiZH"P9Z Z >~U$ۡ j|lVv;_7\dFႌ.~ac?٦9o,]b>6ws>`WgKX|J `*-]vDYs"P]Qeǝ6M^&mkƝF;5q ۵BXXab?[!:pAyI `iusM7 +?0 GKE1i>9>Н5Y.p!rvSGP搒$p37pOoVlw/#ͯ.{dƙϾY%,qH3eͼ5Nע_on~ >+"Hkfz1,i &?Og&k:x]z?I\`Hs@ x1>k3@!s!ZailpC*8YhGy$N0`oy݊[׵ͦo;~5󺠸 qOϪkr5AsHVVd85Ի}e4͊ yV٧[{+|#+AH[8_ .'߮2 X !ah>_#-4WG- |06{)p>. +`(ù`n8p ܾi-:6'`o~`G`]1 ЫK{򱙼= .eka!m K~}] 0bKIZ-wД2,&u?Y !A eXl+m g~˜q6%4XRn: X]/T]ix^xs}$KECvxNJ &cx$+Υ27Qws"G C @q&iqZeߟ~ݜ ba M V=|vj#NVֆM-2.K@d3)y$j]E hr B@jsQFgwpK J@Kx! +//3 *P|EЈFB@@,=1NC,q-}:] 3Vc`C%EEC ó|<0ϺmyF NI4 ؛kN#W| 7*M-a64Cv٨Cyq?,%-!_@ hIs% eҢ(dSKIc T`{ :gDW!!+ 0 42I>Bݢh`(3 輓7{&GKU{r՗pQ #Mz*9|;oTjDib7hzKQ/Mf%IU&;/ wӒ O3M=}G=OOX{{o3<ƳڗBҵ,[&%a!(( cTa쀟*TdaBzb(Ht OT DiB> }]C$wg#1quyӋsF20 HӸJdZt6IY\ROiޤoM@TvˍoUxi͔-M6*1"/4V| Ծ&S#fţ}rw2 r|[GZxf)ϊ3hWA82 ]eUS;ԋsG:'lq&p.p)훘y{Y%?SƝ,kCeA{'NA:3Y%5͙O6APZ:΁;\[ѴhzZ!8jXt'QG| `m° XB0?JRB@Ϊ @p?&qm{U6 GhĶX/Z/u>lv @ulh ekYP+9- 񃩠4,~?B#` `+v9k^ߗ(;3-y,ZK@>0fpJwl@S97;,R;t5`Ѳuf/roY./C!Yld =NZwϠOz쁺_EZ,T(ׂΡb7%% 4WT⡭`΍2)|%e!ŕ@%*-P+H1 fcF߰ۢ4\4^fՂB0|8,ڭ{ CCCpy  ) U#9"Tng(oS@ܽAڀ'ƀpPrOlfc%Zi!ܼ"TƁB⡋=и.9P(򱜡(d4@Xb o:W(on ?cʒҟPU._ߛt7RO& r)!Np'rtNA矇P:Bhȯ(" ?WQbj¦ ,,`]\)BNÓ`f@XU As%J@_M0PFG _/ Ownu!#˅@l:o@% g*>1Z7m.T\h[xceh5ѿ!o(r9|`3% ^ex`h_aQA۝h&)}8X.?k%Ƨ=Wi}&@3_uq-&*'_'7֐N6z)4Z6>R*]1 (*xѩፕ4ax)doL_'@%nP+zD gdOB_j*{Di)ka^*-ϣv4졸sE?W ApbGm<J٣ixFmMz(|RŷfIK8/}$B-ĊA{ҡy6CsZ*F:՛Dm;+,šaNz{M9%)'ӕoR~/m>өZVeݖ{04Z8- ~NHBBh!?BH;T7lcle˽IVoWwiVw{;j̾ٙ7o޼ оE;tQTIt6 %(|o ` P"HB 6T"d; vQ QfDɮpx]DLO@-[C$zi:'NH [ mt3LkZ@q!@h2FЛ7Kh0b  Ea, QjAn}u 1!;lR\ g|fi0CV5{E"dά¯"5"$$pXiiE]c~L ܹ3`*ßns WnL~9J #]܃k;|lLj~sP/J ]#5k2{mu#2*hh:ёo6%=/;N'w?iA-hE?, p=Rv'z65U构&s¾r?|vDӌs8GGa  a{!o[?v9㆏(|dw8Z{t1ms΍6y[ztD>GmEfT _ؾk!CAŅ\tL&LC5ClZ^ Jm͋=eNe&& jej԰Er:3nxE IgR[ 6\>:|GGL4w-NX o;'\oQ%UASgU-޺:JO_)JK/5+l6- Fa}͑fסmX N}Kw(0tʐv $\nIy[?P_yj=pYà] Q[7 rL; v +X' (^)00}aX.𝀂"?XW1 N^P^}4p `&=fgs/E0}f[i|u6Kg9@ۇF믷!:IBUa ߏu68T> |-K}VK()Ne'H+ NtrMXL@yRރz;` [S {#ҽsVlmG @̿=G9@@~]>^nb99FF@G&@eK_,Sa?$0 $5-f x\xA@qEemL/W? 'K7 ̳ IPZV6S,πSw|$Asw ? vԻE@j7ˉ49AV2mє  2k`t*(dKKYG2,1HɻHOInىh\?\qP4XTTJ>,< G0#I]0['/LXMI,sH53]Z9Q]갳Jyhò G`R6q5C8R *>DŽNP`FW Њw*@>؍Ff++#h--T>y@6;#_6 r2v#5(‰]xH T*:vderz/;$wc?a7{Z"7qOap^VjVyh)^f>A-W@ ~sDNJF1(89pNvdמF4AyQ7M])҇gA}}v Y| xh/q~zDc:pA?EPp߰CG\.hym0rtP.4"XDC\Mx;?Guh0`!هi~, O{.hDNb9줞wRSRk hu@O@7#S`zPw2?O-, ΍z$IXaŷ4eI*uXG,&$4 @Ty+kx(95ҁ'O|C{ vVeUUλgp9`K쨖\hNv\=j< ~ 50@0't@ jH O(Z/OnZKTf<] ى/c1EEj #PP[+r P" ,A6:Fڿ⿟}tnh'eapǑf|w551~|; -|w@ڀ@@5 T=Z>i: !)߉X2 p- 5"T%xFgJaET~'y0AշXox+*D<]j/$nM M=ΨW޲9agbt5Q/pPbv5SD^0[XӃr$8 y8%&D:3РPKpi4N~ZlZLHxPݢ:ƅ)*h%M.hhIEK 4ˍv룋o֓ 365(.=Qad`mgRG2N ȝ? 3d+#mH0uCOtVwrXrp,!ɐx^lh 4 >Yi4GD-cTxp&@PBdMRG}{OgM -H> W00b@OvW􏍊C$5= u|<:v: ٳ:MB^vZA##}{m2jo<@i~_U+ir3Gt5!#hb`P`Mb-f˹h'/{ /yaݻg}{f;@?SPJB:ï=9ŖK/S% 7oS F@x7Y+7RixZt?Bqofd',=r?gl8WƑP,U"sl>a0 VAsgι+03q#ɜ b,\-z ̉k '\ ?Ҫo;Fgg}rʩ> HG^}|f9Qb3xs# x9&*[%q} \CoUwkrͲڭkѳ_~ծGF ##@pm[Ӹ^@TƼu;q ,*m\EPՑnv8f| 9"WC)__AzH;Rq!@g \Ŗlt;n-&ExUJ} ?=0 Pgo%d, rj iKOI j^ @!0OtYA+H(`u %G ,6QKvˑ!zVB#` @;\`,$IآYB9aG@PB^irN GXb.aIkxOw[ |`חAjakۑ6@]B$'Q(Z܁$0\x)QX `3 ?J2ќ6@'!`6T>aHr!*X4y>4DiqW#|x!`!M&W Q 9 2@b J2(=ɛY# (L50OqJ;&T@r!Vop vhINh8@\@IDATQ#ʐ 怨n[$sҴR|~irN!G#P@_@pr!#+U;IH3;#$(B@qp ǔ8B Sn5L2'M`RHӤq9yE\eϬrcߦ-!%zJ~bí {egRi'eEw=RS q=Oxv!L[;g#MiF[jI{l2/|_mlsZC@显y9; ל=V3qM&!I 159ApXqK#8)!gjBbW3i7C/~N{yUuRuۄf?R-osW/9I\>O "k%=O=3T۸Щ~= =]^N:KG_d꼋ۛ%Oop >GRO.{YYs{`㰾@}Ǟv;s[5x)#$ u'0~D;d1Jh$[}:8!=r >^6xqŗww2&FO20'}Lvș>.uawsUP9@>^j:h^UnH덃vgfp$㢋MĜ[ =TM`US@@{ V t\J[ u'~DdDXq[Aj7n9,JRNQ@ Fgg (`G@b5?4|@6- -E8a!4[p iMAy?fÃ@0$~ @l`GAօ>1xeX U**(E!2jGpn}rm@(ޒ,[6Ղ2+s1%ia Բ kvLr~OfZH(.++ h Ґ08]w[b)gԎP{2sm@_q\?{c;%4/ } 5bP E\0b! D0<%)#dѠв@$+`-١u&#m0\UzKF/RzT^(*S~%MᡓhQGjJ>N1'Ԟf3Mq2YT )~>g응-ô@H½ i@g1;gpdI0ߏfa0wz*螚W_8ȯ_dz6Xx>]>fyΫ΃%>]} 5zҍ?]O[gEn)Z̟/Vlh f˜R_>8t~=o)8o^~'`4 zeZLOpUcb - =>HRxF ꅝ ~+x}~O/z:y)BO;]hw\yn}w]}> ]ix- AA׶ɃlYh)~,-;UOjAc8//l(yЪ@Ս>R3~΀回?ǿѬ&Ą‚I _1 l~|YV.oVm|yxr8rf7F?x4L9Щ|?(̆j랣B90FaC༱C,ji ?dp2%#=_ 9[JXq$:dN 8^uGZwXPNQ{5%- 4/,k-PQ]}{t.iTS Yʫjaܐ>pcЧG|"$>M 55/~q9h2CBmͅGz?b@ӆͻ ' goF^Gus.: =4&Y`,Z /􇴤>rjr X9rJ$T%ԱPFk Q#;Ekv>4ĚN3/']P‰KN.yWL虑-A~Uj>@aƤ̽?esFB Nȝ?80, )%l3@XPUGaNhmCCc۔v\œGB5N cw߉8Z5pQJ87j4@S_agC4/M(n{'U?i*_LT;U[*'A&X&dZz upjXBcWkɲʼnL6+_mB\H|1@a?F3BZ@-xZ| ៟_} Eg C\-W N%})pSe #篐]…hI+/\w~%l?BYH"p8 bN@W.i*o*w QHup@> A>b Q  a@A=O=2{OOs;|x$S: o#\܎'µ{>w\uk͘c'n\ƣE!dOu,7 lTo~H䎭-gCŠ逖BKmWm]H!K;i9S?}?ˏLsr)’@3k`Ѡ=cȤN}ٛR<Ȑ`x1lx{I~M929<&6RD;6-,@c=5΄=LHi?ťΟ ,-VTɪ5hOi۶IhM<[z/{;w@t`@@9N]Õ pb.4;{)YHVa8 `}k4 8H~gÑ582 p9/ih ,Ao}rKK yŊz;|nnl/{$CN ? g / NgI\?fw#"` OG2;|Ǝr>QQM8l$l:凥h@ŵ2G)G5/ḟ:, 0 %XYrI(_\1Ɨv<_]#qV40pw.? y; .|ͨ `E{&}-@S* -bKzb!ZM#=tZaD|uId{cOi76OhG/d zr[+gPl{Tp* :.sG?&xؾiLz@ jC:'5ӑv1"e(rs=kT~E@BFU5Wtnے]=`B59o7ŔdV+`|v#xE]~ʾ) >tE<Fp| y1BHLxzyV1 ˲8VS89EC~ ! 48XW\2aeaJȧqc:[r `Ͷ}>VKc9~=(kL550͆h+ぽsлPP 'ῧցypw39a1r;>^'XV tzpɤ]T|1yy{]fRiCnnG<*kϥ݌]~|Y7}^ܚ;5t+fh'X{<(q ]@g8PM;N~#AcݤPxFBlp \P HM VJ{/#-IGVܙ6ډPT#sCƀi1xpbG Lc2B -R/܍.[R.?-U:'GB-mi߷ m4]#7յ9toz5P NlI5}1EH DPIL.00YجpYRyse!.z6Ͼ%,]W?~VL6]/}U-hbܰp!8 `66odu" œ9Raت*HO4Bp;tϞ.nsHIT6J08|48j^_h1d1__%t38#1L‹<Xb<4.9(@lir!@@LJ \C^]-&3p5vӑb|k{]Z 8qC`|-@b:ipݴwoD/@-paOUr# _m(Z.T&K%r֧$hDžv) ېd_a=& v^P,[? nZ=A+3o^K`,BHߊ(cA:Qt@}`? "cο-]3f :rk% R<{ҵEA_i8c¿;U'k+\VDJM}4Boo$@zea68²Y! prÃ:xs uH:w-&h/=+yf%u0)K76b}]XIpʇ۝e?*DK#e֜{ρJK E\HAە2v(bQ=*F 9| ƩƷQ~{  c>6$ye@!/w5 an87fQ q2S#QVi-mz+A`p%v~nN>aߑJ(ϐw_Dt*2 Yo@i5"ieu`ikOؕ0D@ n\sww_LAM%^J0VL;}&zkbofYJAt{HԀARd6NMg2Wv XmVTZP֜ƎG/$ĵ Ơ^:D,}>QZ6i|.˚ =3KԆ. 3 XV,pЩg{WQ~XxF@@ϸڸMa! D$?V4~fL(?=?-m G4A멶sZ;,bǮ|6FC ActH+-K'z8Q܍Z#VxD`٫ؔA2\ГGȸ2@(!ѽnu4n^G߮qnuNh!Pip@AuQF hPOѭhOhў'-?Twőh(*Bj _PB@{$=I#%fsl6<60rF p*WpB  B_sxd2eSFk{`MvW2@$0qM@~"/f>`MC#ZpSnuQ¨SŮ NU`7oن&}?S'u˛ea#P 0T+~?lpL?i'pd$_i!r 4:ݥNA I7O!Oav>%~fYFz@4a6IԈhS` KnI% 6SQGr`5a*Z)a]BqBVY>GV(ZI}hbd֨ Lv9Q|IK֣ ܂XxUjQ'9Q_B@p%v=Է˗>m0C9QWUw]={=\$&=GѓrnPT|^xp1 rB;F&'†A!`xH5 !@2;Sh* =m%o؅sSLy9D@C7n|e \.hh L5@N|FɎ.%/ 0i@}$,տk|GV^謨> z7O|tX-N~' a4Zr~>Wӝ-:V*^eGjEꖗթ[wMO[.}owlڧKy鐞/9<ND@X$ !H&_i<j^HE>MG:y>,3%ϫ|[XӈC-C¾r?|o1܄?&;jSطcͻQ;?fG$=% ?^{RvQ\&<, 3vhuR൹Q~b#@;7]nAm t5|2d m&Oh7:E W&F6 [5ђ6}Wc96}WcGÿ:m;2wźE@"8C-5dĬvEKD[CKoZƊD)r-$!u2'h_]VlTsAk~"иw1*8٫@4:\ W(3[{Qg$w1d5q.cο~'@Oތ$Iea1r/Q.UuOF1==7ώJOEBRsų:_ ٨u_-6& M0B}794IOwְ2F4薈T&e- /+@}% ;ΌH?Mu&;K/ʝQ~q$!73%  !D2gnMQR]Vkпv-6;j,SdɨҤxQ9Iങܨ3Dׯt=tXWww}dnԺFyE<Rvb۾>ذlr9c blP8FmRMD[D^F-koPc0w&j۝i6"d$w{CjR;H_Bt}2\_]F&L G\ BB``͆r`E-0]"KB xVa0_j(+{; [߁; &)!Q`Ez"]6뾦 ө]U>{!7 dH4>B MNn ~lz'нm*#ӅwD є(HD?mPcƁHPZzMYvH:NP *ȯ!|Di~&p!]C]1_]FіOop*@`˩?wKh_=Ng*QLv;8p⑙ .h Z٠gw\4D7c10 LAà] At&}aX.`Xz;Syoܒ>) hW/i87AP 2Bժ`}Z~L*:Q*w.Wxx<$Cv H~p3izᖳp4$l: ZZ`DL{ỻC/8+NG68C @WEa\MqJK6! .*TFr|^o^+mNV4݊6|㠦ũU!@񜨮ua'>lM`ԁ$\(xQ쓷:;٪p^:Ձ*x l`"*2$i&E @RR"]2D/MG5 eq OIpN:AbV3&OVtDO2Ȑu{[uMqSvDBG[4&'Y KkbBNL 5t}n(`}PFDةPNB !v/:1s`:}'($H!iA̓;|  5%En %AH+Eqz(qCy<~'D' J,C 4=}L'ʊBe4u ŕur'ԔDT7+' ]Ub;"IX \ p,HZ.sKEU!@qJNAwS0wvK|uvJ`ͷݎFxnV[^ei ?vj)6a6j&6 <hg(T/ N7!4%b:J*˰.9][; XZ\"avK [cǾcC@iHitL52kZ-E D0=?pG ֤=y,S{:bʘC|)PoX:n:R 2i󏽏F':jRBBS>h~'bŀ2 FF:V`s.*p˽VGBvE{BWl?D)_"d!ڃOK3 8|y<p*0>RE隝׽$L8w~R=-CaBA 8"A) 5PQWRD8j0&aM!7akP(B.&m$6;I4:n pҋ+&pł`;H1`,rg*GO+u?7+ rH˝ʞڍ%_M . #&!NY!l@3NPwCS*$GJg$wK{o@u=j ْ;hAiH1$oVz2 WN8;UI@$mۧF:~ :2 /.[ j&څ_rCX\)0N?S䲏>vUٓ   w|o7"4Lh VV,#p=p@FZ.FٌkB:NK7< jdj!]u|OUȪPLR` &6 }3v$ (t5bb@i6$οʿrw6L1,:\u-wJeO64@;[:oHJ}Am -p!AP%΁lu`uT<mM%c=wj8;^xoA҄azFG(ꀩQ$2p F4:fH?ECHc ggf xgHwP`|ҙߵRbIu>#ɆTnі;'? VUoX죭?| ɠ6Lm -D/cb`1pZ00Z8A`x[mnܰ왗ScOFFJrڥZM P`  9Y{A^vϞkܔo\5)OAM5S8 K`46=͙\gQr<`k}]}}mImBg:Ԛ. L4wpȅ[+_- D3 ;i*j`Oo7o|gbRLXQ}r1_GҞ}y*0=SRINis&eoѺ{ wݱFӯl8H Y_<  g> ;ⅯCBo.wH`C)& n߲C`2p{QE{soT/S_|+{6AiFk =""P3DFxh  D3IrgZ8ʌFIN{Lܲ;F@#0+FLfYVW[{:`uL֞]`Z@9,K+ ?/wn2׃@N6*[vO:I$8 !x qaWOZmtʖ/z^F[91whx]5~fm ⧶'!6~68D@2 ým{ʀ Y/BU_M7 [w.C_zbvxF`ڥJp%y$E ֵ`L@gmH$@@xB@TڈT/W/B@%I'>n`?,`ڬYydFQ kVi 2g˚ښo:kL;}P >POg:7@9bfE_Q-$0>#fI;&Dd0Edzxk8YS$&&'_,>y-.e?S[@ L`gcq*_pb֣@\QM6N6gvƟZӦM; fR:$C"8q+dcx]#Y{OgvPLG,qRۉ 0KPWCkU`>x Lޅ9p\G79ظK?s3v&\ 4@ M`l -ΑY~c1w!)Q5&0|S@)oΝ;A88GvFl BbzyrWNp axe|yvg F`r~+8oN7G bD͏=%b 2t(0R4N~3G 6;@IDATxiY!MG@iwH ᔠ\ @€2욟9@#P F|4؁2y7偒k)mC |($p)IѤ!Ct욟9zEfƧ(HC J#\pܐ@#^:/@Xhp! K=\ fh|s +7RpA FOvxG+M#[w}0y7wvm#`X! \D{T&6TW@ s2nQ600YHq4#[[5?aQngH&욟96LCOII`K {`Lb+葀;G  Y|z74 (03Lx4W\69Խ,=S#P\S2A3=czpBE8+#. M_d7 Tܷz']e*5]|*  '(*~FW?I:?1g?!hJLp! 4 J*`5#C,\? )l~3yQhFGCZ2fMru~O5?s$1Q(H9& 5z"e4\pQ9Fo&A\ur^# H8~5|'XNg/~Pą0*+LTm}w8FG@TF `!]RveG\Wx#US:.KNzcp]Fg$01?W?mf2V% f`s 2 y..DXhia#?+qU]fUOϞlshƪdB#̳! F͞]&><G@`WM|*@%`,x)!2{g>.'̳!1PY`2Ua -o 3+§{ĀGe^"AIdE@ “>+χQ|?&*<-ViVY 5K{?}Gznz_4^)B\b^]zLL$,kL2PO.*|_qR I׌Ԥ~i".hu?}$j^~= >?3M6D~= =ӏ, ~]LdW88C@ؾ8/8c߱2^X,QȄq(wH9#955_x$|488JO;<_o8>H^/dST¢Z'YOTޞmWn,?gWέ^w$z(m.w{tkSA{]MQnUzO>gζͯTW Z,U&674H kxNV5k~`eUv7^ƽN X!/p>﹓3!-*|$G$QhFЧ#yHzٷ۝!fPRԠ* H(k$W8M,a J0,D)rydd A􊀑1@ #]ʨ.‚<#Soww Of@qfU:U˗ I˕RnԫQ `u-,DY.ѥS(+)G<ڣ׳} cLF- 9/noڡR$!Bs;cӰ=_(8=P!9ݽFh]s+oڡPgn!7WN:N<&?̟I)"ŒKir/n %MZ6KV{:!~ / }MٟG̣Nv[ `t%@ 9][Rlr<4F@~ G0`Q7 ~#G*kHmV+Yۗ!Y`* EHZ"FFVХ7:4{kյtB˭bs.Ϥ%9R ΘCT" 0ZHopT #[u p+VV$I CWC&@F@EN݆W2A ДVj&+VJK#Sq1$;# %|g{뮃ht +(XMy[F lzMo6ެd! FJ~d%)np(T'J%(e! |HSiTU1[>|E#AU}¡O(`D57 НCĢV >TXȝynv5o >/GDGLܬ ͊9ySz Yp2Gf=v B'Y7qmdbO̢E"w,:O`~tϏ. %ԌcGzl#?/=J 4ҧO}k[/[$MK#ۚ(Yx{[(AX,[ޞ FY(4/}K=z}iЈ 0BJPP[*{z6jY@hZ~rީ5?>.]GOqƎ<ԫk>]3諕 *L1]1)[J:M;gc^&vr\&z0mr^ G7]rV*hڼ4rH_tG>,!>ϏŢPv׃r:4v=y~EoW' ?}^{"Deh8 A{oOJ[А]:mx1]8D{mO&|@Q//@2/Fk Rn6ũP3b tJcu ee]I=o EM Fmv^%utI8Gm8{kfc Zq ´]%'ԨOY_O?:{ ؇0Ͼ;Kl>J Y;:kݹ-4`E+6[3XaKv3Z,ڲ4ZpL8*dr%* 񹎬Q|h54X'cϡ3Z"4y3E:jVf|Ciyӏ;@J ӡ)~<_zxg?, Y+xO6:~nƦ3aLJo ݆:򥗷X {SE|\!Nv!`T%qd.o@vv =q0?X-4$y [_k ºЧfEXv5>~4^ѻzKi_zk״7#hԱПy%cتqW?qhCWd=āMXC,߰`818a=o{{u˧JZ|#)?O]L ,:~߷/,.振SG?44#WF3B^n#z3n`קErҩ$-䬡ޥ]։oX6Kg齬0}yAKhjhR: Axˮ{V?= ˁQ YNHevwΔݠ:u>Gte[+5y2 rl@Y\q'[E}\|nܨ\ii& K1o)z9kÝ\Ci)V#j~a0ԮJӮy, %.XOԐcOk0 vU4(2-[T|Elb Ρ5[vikgǾR⊃<Ηvl_fRjxyW6k 1ڮe\І'@|J/ۤ-{cJ=3X9@HC k)gO3 Ghȡ̼D-n^'kZR91*Д.Y0膋Զt7hv'E%NdԊ0*`V=k`h cٲxϡԝ,k 1? 1Fr`e`7}ZNNߑM!`#UP7f,b%?[NԔS4 ڭj Ԛk7/d k!V>X1@ߏP LX+}%أyl*8vё@o!E<$`})S7? FgC:L0@jKiǪH(_?GDUAk4 <uG!0^&\(q?=oZKΥd^9:@t`9+։{F /+5uZ{;[@ %,38?5OsOq9"z+]m5lH cBtbnO"QY[#tGPR V\ xٺmU,qkpa]0r(b%2F[PHkzk.RDEvX5^3;eS hd50ۆgyex3;ÎGXC=Yb6g;FV8 :1K@ 4zG6` H+-z''50 4zh_VT*5[h4} 5390BY~z̄oy%Okm΋0;)k妝07WF=ap̄ b -Ao1>` ao1|3}J-o4_IX[|KAQOB }}M绵E,73⡗,/2Wk`Kp&K"oy`B3>QEȺx}ΡSɕojv|ޑT)MPODO= }3Ơ=x B /{~n a:!ƹc%Xnb x|W=el@OҔcԬ76C BbG| jPp31Gy7H  0Y[D֯ 0 /((h13k7]2doq_\S'XFB[f%x6$ >fk|clE^Sa dҧ'G=l߿49f.^\iےq@hѕϹ>>h]WMj598}ϕh;aS[TSцAzF\GR`z7CnodtH: ;DZ"X,6NGrc SvQ^`.Ftc hBw&\iyvNgVxv}1(Ɛ z9Cڒy DZoI %̸yno 뷮zMX:Moit nE|O-,/D@[_zui+z(H=D(H~ 4ΨP[R>zsi p>Ƴs$mۣMEC=UЧ*ـ@J-?0M˭D 8P(MoA@42HP\?|J(uЮ-+j&S|5Qbp o2-Qx|DI3'JF,/f2LAK ߯JQ+ALt7#!GsHpAV 8ĴEr J*N/R^_C`ͻCJ'9-"LmJ,ot29Q٦8ccfbx񱨎p=a:mFO~Ut7O=NYAtFp@ F%N.B K8m`!LL.:~`@X٭_nt~Z^4 "O7_:^[{^ϻOwq/N-/Xcbe&:mC1UP""mbK@B0-ʃXC1ع}\44ᎪԜ\0>{FkL-jlXsspcս^I xmX+!;ՂeqZ\Qa>gУ!$w#L,V\n{czuF}k DKby9a ᅍH8IbV ^X#i/OЋ[^Tt(<6:H{uQߴC|X i+Qr)dar"~?N\/Ai2GC/Ap6&b*^-s՝8H;U{aYNny!;szgkKB z]>dm@4=-vt MX0G[%k9=J^7&; ڒWN:Epwٚҥ: 2--=Th > ]%@[W[E)[]$"TTښФ'dJX' V+TԽ1bw =+kW}+jX[0.,, Y v@dEbdh"im[Q]ël8+ B12?7&WE:rXuJ0NA\5:9{TJGCp%]18Vj;+ʷ-z%=9x/ D 8yq#W‱cy8 aXV;[U[=|@_~NaIX &Ե=W:3N9”ZB=Or?y-aFMyde([ʆ!5]K ׈rgix}{O('k`b0_[pɑ)䥁_Lm1<n,Zw=w0h V;MBHgϢ".g  ) OyU887%&Ng$#{@AFP@mX z㭗r2E ^4hؗ!`zK~hJ3F\.H U"M?T?kBПf`EFP}He>+@Uy5>ZA.Lo iƗi0K@,l8/#XLg+kESISFճdy.`ҝ{#yN>L`tAT"$@k=33&$fBgl 4u^;1!`LSx&TDKh rLϋȭD@C@ qBaj% jm\[#TB'?>̋#VNbKoXbtPxY&PxK!rll{-j.ժ@Y0ZMrѪ(žS+NҷX/ZBU~,3Z]NuPwl&B`7i^Js[G?rfH^u^Mpe-(6PmƧ _I%A:ˋ+Tu$ǟ5TR~x,⺠6S{%o7Ci؎<{ѮeC}dJz_[_^>A_U|ḘY]VӋ ?[/.r\`Z#~j8?laO4j3X/S~m)_2WiQ coofZ{vm)[B  Ͻ^p/+/zRV^ ^(OFDK_fW(c Džߥ}&p))~^o 2Hãm lJ3/ӳ,(<R>ƃNAŸ$D[DM%4m*M(1 @ x22ur~Nmƛ&p@}F)"oP@:gYB/iC'I~St㱒N(;(_Yh  )^ p*u[ f?({/!L/(^[&x"m#TSFoFoFˋK7_C7EGG9]wkD=3=Ϻq?>Qe8ͦW tm;c㡹ˍv:b%/;(^V7,Utt@K?C4l"{vCH31$iqC9?P @Sy`-9Q9IrpVksGݴ41!ŏMt ~eGv`Zz6m@29 o?V`f$ |/`ن濽| [Y@N(߬ޢHHf(,X> ʂz}>r8Lv Ӗ`T2ݎFo޶ [q9)iF&W!̿ oJp+0)M ţK;ߨ*S,̙3Fг7(6 HJ@ < 5z ;\tkc(!5@`f.-+FkaFe_|wPʾxZ}FGrYYtuZ77HU^Ldgy@Uk}po(>v׆s(\bIX L$X9Z!BoLD>%A)OC&4P%+s-h9G>33W[˿t @om S,x՗jx)Tk>'ɴZF %^];QL-NzO`XS}W0b8yi/1/#hi 3&jvx* l%z4 LܰԜU#67V_~\ѐ"NE60*\^Oa`9jeM7\xHU9O2J!@a8-Y= yTW/03'\;z ̤!{0ӎ}:P*A`xGG_=xN$lSF tg88ق9^c`&=U4"-7QYS مdk;zUWPG՛P\)җ L43Ѧ ,qRbȿp6B-x5=/Rl+p3OsNJU4w/؂CXk_ă퓉p3 Pa ȩA(Kɧ V+YFGh_=w_u+yԭs]54tO&NlRs0#'*#?c2 T^Bi_XDǿ%Q85E^cީ=/Rc/[^gBr,;pA|b߬ Dzs鄓hImd.(?7~laVc/A 4җ=*C?x(|ڱPL9~ WFSAn6<\I/~8[اL1 FEoL1= $>ƬKh!1񁇣E`WL>|ݳBkﺔ6?v:Ǟݞ9p8 ~ ǵz&9%/ѼLϜх~ĩg"_t*^soz?ޛMvR?* y d;oSQԹھ ~#oTW;LrӮ=rvy׺ʡ/GG~zqGN᤺i)ܻ&NId"D t!aa5w?fjERD4Tk_Ңw`9nΆ.Y6|czh|ЧuԸv!(ao1bwRZefΗ۩r ڼ]sӁiӡY7aǯQSZ['Jj ҈}7m}^m(Mpզ]x7xg ^!9R;^VaAbyhoO/q7͍euL^A5dr˷͟R'i@aرU?&Dr?|; )GC3rF [J*{pt V=[vӛ.2GW\1Y + !;MXa{kI苸[Wޱ<2Y9wUmX]9Wr'Q}~U1 7ˤANLs:9YД[EB*.{ ;6|xj- 2rig3,>BE}N7J^SZ 0GqF6^P:!Գk~vBO_`/Z>'dK pt.^>USa_mc0;8 "O%]OSSVݯaN%'3] u8ax,Y٭q6:u?MxWl1X|r? /Ɏl'Mnl І"Vxflij_bC!)u"zkַ\A6Zyq/L_W+pOߖߵj*mzj 4Ywܕ!(Ox8vs[7[9 ('~9Wm.ц5t4*J~ 7[ e 6,zlljvfL)TWyf\(xeE)8&Q  jq-Lƹt+;< iY}ݕ'"6LC VͰSgaH@PYRϟF{{ Œ"!gaoBz:4~opftmRh٢)p#'򎡀#мꫩ{yB/5;K']J+SG<܀d; zu AF6ۣ:sFos-pf >qswW%{98~i}!jf;%ڰd?}f+.)ԥ霬 Wcu:\`9ˤ˸Pӄe2t޹r:Qvv2d(z1v' $ 4P@1衵eXkPh '#+ft[)8:CTTϥ"o\|60^M(R ^8R}vz| @k4ozZXSЬ75-I\OSiݽ?n}SEQD*z\`ɤhʍM"pL>1^^C?vˤwf ߡKBz>v.krbuP 70.9rIjyWurdn8 Z|Dy3g$ˁZan'Յf/^ㆺGyikR9'8vht {s㾋F h=;+!}5e[K׎[rElOʲwcdj~?:quRw.ǀỷaEͬtd37<ئq _35ʂSQ) ԑBJNnXCwM^gڵFvTo(;VE ;2PI1&gʰrmʾ4CA[ T5ʂDȡEnF znT0?m5T9lfh%F`ЕDr-9߸hq3sCU0,??^ f. 3ĝ@kٺP )3i =X‹`5q=`&FKQ;/\D^˫xi {>7Әq93.o/^rwOG`~o1o7>xo}]T**v׮q,@Z(= [6^G6[2DdQ'U~]s4~Gآ.)xj98rZԸ?Z~aKx8|򪑯O7<9. s挳xF0wdG '6ꙩjʍ^pe@\."/S<'0p4:ʧ}&7~\;nO(NJ G>P zwɥ⮙5{^[`'[Aδ_[~J"13Q v{;" x/q/ JP&cz|LPY7a<߹g©OU8֏&@(.n]؉55. 0Cޝo⑏h){Q}}=mb*N9WEVz>DZes/Y̽#r'Mp w@IDAT>oMc$x?=.OCx!X%]@}>_uH&]5?G=Cj:d >ekT% wH4&%f@4v^K`ffQ]]7WEve 'a4[ 2rs` @iE=zg3ݔ76VDn]oY_ ~#Zj Av^5ʰk)[A_}mjj[6]?IxTS]u\.O*U["9.(5 S'Ure6>=Y+FBB7* B=55!czh5_y r;FV4!ݿdaNηy7<_L f@8bL=l4֠!Zi>WC5^V ,*NDJRm܏?lo+6u*j멒}FU|lx1p98Àr%Jx(˖Cx;e +h~2l!grsz~1/UnZl/d3}[uAMŮٽ}Yp,$.k̇^luS~IH[geAt4ņQ(]fk>+17ZH^VDxƅ{z. n Q (y>xtjFQK9ko ?-n#rӕ}G_f p&79GX?ʲcWuНdȾ/:#)rE᤻&u(B[n[FJ8F/MjEeS>ЧDpE4NA*OA#x;L1P=amA?`)7E@;|X-Q`3``@Zbs^ s@ᨣYyRy,O 8`ԩ rpWj]}wwhkCJ@)+;Pa'Ozi />3&I ϊ? i 8 6 +BR t |+ԏb pDxS1(w4**] :7cWdA79XN}.Pd ^~ *t}5H#73 -. BW}r! {C Fh0v-U޿>$(XPŔ;A4Ȱx9p%@Bڏ=a ; 9|1+!hu 0|%AnZ`fwD B%d6eU-_8o93a rf$% (̗+ԥ+zq*OD>"D)C2n2WIYO;{}gi((䳩ܜFPa0!Ap&]qxzoaX#E~Z΀ <BdјaB](ڏ ^ X:wM"O?A1 5>=PrL7,ta vyˮR?8tEφFن9X9) nmh~C= Ed=aTl_v׳ϩN?LTDz0qzg6:]%sfm?Q̒F-=_Vl*)6 <؝tsmV,C'yeehMUei k7Zar*/VUy8#m*}ѕUzEM4Jn+kBazMgxWRkJ׿zg~'|I 3h(F0{)|&9YY˿ /^ %pr? x^X (TbA-ɨA vѫA1*yh 1 %yPxWRy\@_X4R>"d 7922 9"[ʽZsY\B}v?c;[0 ~!*u{oC 0(II|$BT0E)Cȓ{C~JDy+d>׺ͥ ¾b7IFF,J0CB/zE;_* Bi_?ȈB&?\3Y ITy2/dC~c^/BbӉw{0D+I Pc*x1_(B$J>HȆPD(/xronU #.KкP|^@,ȭQ0,+*DS_l Rd**Cl!~ٱF>H7חLTy<|c+d[q$9(>C'흟ㆀYrdU" Cug q"gD*c%H$F sJoT8CI6%az{6ԝ0IFڅTY" 7s՗ji <'T,`'ٖH$@TzhVh79|wGD@" HyXSIJI?d[" $Z߰ݿs{*~2.@?B'8*H$g~Ro M+A4L#~:ɸD@" H~&FLkH2;R 0K$#0)8v)b3' $ hD@"`FTa_=+xWIĻ뗊c57R 0K$"0?\ˁF%lBxRJ@R/H$AgLAs𥳐k1T e$@Bz}6$^gtB^$0aK$"0iDW -U#65J@j|G D@"7<% TMr:H% ủD@"  Օ3`S-w%aJ>dH" td?W FT8q7'$]D@" 02>+̲6Ox-eANND U9X"%TͫWHIH$RUU!PɷrR3[2W"po&r:E`ҜV;xƇz H+UAVux@ w{-SgX=U^^{j[{V^$ ;quUjZysJm9ب.ٲ%Ɵz+ kر};[.ɕw#QYHБ7>s|+v6Pm[gMZZgsRI1%PaIv'xJ?>I[>V^O$H޼,V'7<{,S*e3 ew8e[v ~Mm_Iy$>6ށwʡr{'W23 +Z3|z&˅߷v꘵Aދ6g3$H$1Ț{j?˾F# 696k/ꄖPUk |0űܦR H. 'ճPlp%>(y=_@ҾBڽ_y˯γ۝rؔ+x7x/( ӟt'< g`=7ؗ^H% w07jQ^ɯ=Hg|aFt)UK&\LK9 ~9g4`ذssϱWG>4Ly/90|R=Y,Y8;'s#qV&gdj-|VU8}%jG @ sxazhxLVcȖ;#3]y@Gxo9s٨f<挆wz" -;{< ЍƟbv57Hk@K_T`EX]xa˃[ ej{-? (wM75x,wgz?vc"й3+/fTZ LŜVdʕߵِ3`Lh`@ rD?Mq,鋀Tۋʯ/"&:@Ђ?ɽ"N?9$+y dm8*8ޘv%/Ȩ>c+Eybűܦ7R Hm%=|*V;-,;t Lv[WrylWYXN` S~DZo<{Y%lRzWz,b` P$ŊV8ȔӪXXn%@ARz J?n^TTb8T";`9`NT:Tny,xz_5MS8j(Keϐ HK!?K˜Bcjei*?@G Pu˂?x*B\}޽r@4#DHބ~Vϳ$Iڍ@bi2KoVf~m06@ YM۳^[@(R E$Eh\ ޱ$,Ѳ݇ddfb`}C娝sXϦ>AI6 FT= Z3_SOE4eZ鋀$B2]&?f1nyo{cIdMlQ 6)oi_9Bi_XDǿ%Q8Z>;!44칏~u/$W)D*!BO{ BAt hωpzN3:?ʴLa`q ' P۶J@[6 yWlvqʹ×~rG5;ڻ.͏FyΤiwg A{?xVA0L(wXr)֎#{]_$oLvQQnn=5esYWPMŮft ÎRQX$3r 4ӿ<<^k"4" }{- *@;5ÚC}ў?ov-{I@ڵ8BEds .8ldugkQRk׎uz,V'-ʏUaGެ%+ӒI eM\݋noI~[/;@u{n뺻ir:rG"`&,6 |[lWki([De#JΟOW.ºш7IZA@@ѹK5Uk~gXܙT_j*?+ _z<\`&^r=;OuŌ/TzEQD*a'M ߡKBFׯkN6,z4D iဤAm>/zY| r)gĩF|F"`[A{6NC)ڌΪ:AD J% J#~ђHa@D X3gG$@G# -~3BOWY7oM0|yW,ZɜTݢ:X}YhK3UL gm|)xodWf㩭+Us7+j, F()TR#7 uu{jJGz?줾],]:Q32lƣ, FfeL*fr۹cw{2R hT"zP*-_awggZ/WV@" a~%#B@ȏVsȖfow<YL%e&bV*&Xq`= 4ovn-yʗ}S;"mr)l E@ZI1!ФpJjWdWPeIjGZn^dY[ˇ@ Y}nr ̠hPjI̷_KlC8SO9ҽ*ۛ;`-kW}4xI%AYs/UR6bQ:U/!XLST%WwqUUA 2ER0s!*=]w`)_7m%}*Je>E')18Ypyj+쭕_/Xoo [ aL̋$s"pV8>{p?ZyNWfmͶJbVCL,8B* -187qUn~N?֐EX`w.|z+)ꓣp몯V3+CY )^Omիy55UG- V%l[H :_޴rSΝ|wР1(Ju)GKy@bD+D `YhW1}p W%jS87wHKB@6\sOxǍ8h!NN;fh5`N}1FG~1 VTVޱiMW}PLGo%&y<7ܡg?=wVZnB F>9t[:ExJ]9l٢?T|0~l5$I@r,ZKrxȷ`$L @>=͆C6#`ߔȂ~3~vn/![Tx 4O(2׵?@OSoϡ @;AomVǛO8|a{ÒaT7qK2!*B4sB^6yO3g p$*>l sផ$D@ȼ(v0h8iOx`|87o>kG~EYe>~_o@Bsߋρ%#$#:AƝV<7V4~薇)1<HA{7zY8_-}}okH7a)b%3RI p)Nt­ľJ$DP;?<Ԝ9\@ҕ֘̋)]=>= ʼRr.$H$@" 42D@" R r H$4E@*ie%D@" )D@" HS^f[" H$R 2`Ta˜9U8W^^D@" H%R`2/#؋jZoϽ_TgM]^ vnWUO\os6 &^PV|]/ r}JoTs/;~{ eo?(=l?ruTT%xEee{c q^n%3~5򬼭ٯ] ǷMތ[%[~)VaW 1&pmg>χB{WVNA,*J8'DH%_dw#E.Xmq/NU*u,~= #rdwݙ8 7W|4+M8Y3}3/آ|Fżǒ$JٿGʍ_k=? j)|V6俖,TG4zE-oxH`e)-~~6U|j^09^4FsYë`FDH%_Τ|s#C~2nt??~n=g敇nɯ(o~%{pB@aST*~U aw<3󽝙g:wUbf0WJb7ʿO k+&Y\Y0^C&ZNˎ#}M[~%UnΣ1K"@'X%b{Q"+r„Xdzt{7aϚ|ZI3swa`L]= a0E%D"@@E? m/v ]n6Mq&DC¡En Lb)D4Y4ٟN M)M "@@%@J@)D" ԿJ? D4Y4ٟN M)M "@@%@J@)D" ԿJ? D4Y4ٟN M)M "@@%@J@)D" ԿJ? D4Y4ٟN M)M "@@%l)aCe`q!)نqB0u,#{ŗ_;&^3@ D11 3l۲+ƽ)=e*bB'O2<<(`8V8g؇WOOabMhH hܿoRwkzgh]钿%x5¶wf'h(rĪ98 JnwQz:ExTXjI,7t'D % |tkQ6)MvZda+۟ʠ2E{]$E@1IePk\Uv ̈ DvP;d\f2Ocf3$D5 n&.Yj(v /WˣLp&MJX ~-b0v7kvuZPhN _G D Rbi܆I30C S[h.ޭ:Lo;f=5aM ޻~Ua.A"flM>0NOͩY"@@H Yp6[8"1ޛ޽BwWRP/nŦNdg+H"@RЏU>8sf܁`-s~r^ ?C"@@ PކLe33qE}WLrutu:ӗh Qh"@51^<$ghTskGrVL.Sʂ9IYmp DQ %Q557eupy,>"OQ&D;Իv#r~۾muK^9RskR"iDIW Mg #ՆcmaƿCA?krJ Jc15%f-occtҨg7߉A+r+9\+8{+m wh|Ru\#9&Dpc`LM3Q$ GzYH%"PH  ,eo+ڟ-)#ɍd"@ R/&-"JE19Vckw~C}kSo  D ,a9>~wpt#޺{[Ŀf=z^Sb5;饬}Lʺ1g?g'}uz/lev _ndž?[dK{ ' ncf@ D RbivŸ-z}1Jlwq$S8mdͺÊ6o`k^ȼwk}`L߳m>sɛ XTL߿m妻ÎCBIK(/9½$M;%l!"@#@JaHAyZLKf{ af'L?@x9֢,樞aţm.vÚ|E݈%"@)vyk^)n Aqt?3үx ,`~^}XeG%00I%0ny8__Vy AC"@'@JLIΔe ?>z({]v}CNgL̳mS@GLf޽;ʿ[R sh~^9Őݩk9p^bB26+Kn D<Rho 2߈91<7+MG˭N o۸ٯ/b;yL}5{߮Qm;߫pԁ,0/C H h"_L ?P2vʎCNy ZZwXWw݇+F~ۅզ=jrak=Q qTM_"@@S'8vibߦoY!3/g>9bZfX[a[ԘX׫ths+Ks;qS¢;vc^Wġ\Ʊbu!?"@@ PC {RZMirv,\@ﵬչ]6Wwc\1o}D*6ªU,9ɁҡM%bEN,9(3`TslEN M)w;eJ=fߝ?Ukg^x9~SPjkSn- TY\.7hS͔mq]4—Y"@@U)T):?o- myn6k3J_@ÆBEkp9l s^O"GQ2vIUnɞ"@laW%sxTH}3WKV %<aC㱃0;{] 5Mމ M@[M TMa9n !Gbf5tP )嗈Pܚ<8s~ܻKc 7$['iUJ  D %V;&gQ0Ah0z }_V#-ߚ\NsZy1< >~tff`>j<;rVz׵!dxePdzHjb+\r+zGDц5 r-4 2-PFBE[S̼rMV*hg6SJ+: ihA^ TÅ],kÕQ޽ާ%汸ItbNn17q>5'{`:zRpq.Q=^c [,#299ڝ%]sBZ#-Z.Y!( b"3g;e 6KKNs8 +9WNOV:Hvf^c2q! LBN_N+\Y*0x(OgMIr;~zg0x9S#VonJʞ4wJ%4:U~)  &Xҹh'jԕyb, Bed1ם @2J5GN뚴b]2A';g,1 [-%+;|PY׽ fNoD)u5֡x oiqM?A* '*EUom+!RJ!,f@FQGs.Ax @1ה^/-4_Śj.4O#nT [ {Hc&r e7+[w_e y-t^;2q7M[1WAAHN) RPTd.*-΁xO?0`j o'TWUPJk3wWwEeO*Szf{ ͵3E>P(y_IW #a,#Y~T6kg9'>e*2](;&1vmkb\5:_]BF D_GPHWF_JOLsB7 c9ws+}0YӹUEXk2*gJ{9Q KDB,ΜE:~Ѣ/hyCQgS^5!v˔ 8CrM,*DžCSoBALS1҉izOwѫjWO\CqC sG[4ysݿ =dv ](el5-Kq45%SCK,p>.<0#SH7o`˻)*K ]S{ƸFwV(7/TyO'._럋*bKQ+6(-xOʔޮ҃j$E/Bvh,l5^Vr #a4Ϭ1_|M&ff)%Z*̼nR7)-!eJt'D;Cͤ@D#Zk1+|J!I%M]&Z*׻SO4zO` .SY#(…`ِ!ҧC0Et;2R\:>mm1J.*- ] Zƌ}9vOs!ѕsC)+mȰ@`ZK.~uۅJn|_02 ( w=nЊ1/-B-H+log.}ӳƍ+@K7$}Phu'H1<\\ }d06HqӼG>X\) BB~۲\\ۋXEOP.P8C(4߂ 8mMq鬘F6g-A_RȦ`.9S6#Sf_[&g#ZwOXkq%=`H\)݈xsn&e ('+霉rύHF ( "__?dT#A;aLBF?'xs9ưfM)_(ۅJ sY Z&])>uźߡ?TOW#} f!iD+0=)o* [csCA[6G[.,U# (B“#L%ǀޔ|ݩƕ.sWK~]m77zb$ QXJ^*TVIIi7大9M*6K\h5 vr_L,D˴Epw+\ϒ|;KQΆŵt7({()`|.e76 CWow| ˎ}$W_¸ fKe&oZe~ehwDo + -)Gy \}j)[%Z=L3L4J4z3z"b?'*Jf!@|/c"*zN!wȃ2s$W vPX] ~̰p!އL?' e[vsC*|B}Os'g0\ߨ]3@9tŶinMw߆ENrذa;3/CW@2 %X]\ o6恞*qx0EN+@ه&ۛ"NtSlߟ)/}jUR0Ke+Q-Q\m us3c.tYQnC  ѲʱԵUP»c~󊲬wZJwST(?Zrܥ=@*р %/_">h DCp(J4ºA/'5G' w_I4c`cVeyIiRؿ=;Q8yV|h'Q-l˪#~b]~ŷrGW3P֯(FDb v5}9?f,O9ئ8[Jh>!&= |gIM>?܅GJlF;B{`;*˾! ! ]hLp@#l`nﷻtJ1{+p EmmwWϾ;_䤏"^k1ҒIVO݄ۯBK_R}G:Y 4clvc6۴+ [Ip.ѭ=5E{*-&F:X[e]E{ WYb\%:(or!'Zq2DŽ ;Wa>PCsHD?;^cNwwwP{rCNĻ0ZӞ Ev07Ro)ʖ}sq#2|;ƒ-Ò|,9RFܾD;L0|=MVJ4c~T9D44jj ;ގ.`tAE~*r!Jm;E^тcbgM2.љ1 _%.(ѓPS8_a{w'@W8So`{&pĕw|]2#F{_{͈h $PI٨C'wF 56^A'7G%@"dOV[ݵY 6+G^ "MH&PК|jjBX_PG\ͻ?Ńb"FHO D7nF(J^"&4ݨ"q=䴌$8 ů@a%#>1iE*("%1zڌ`>ga.W)d@t݌mg\XJEd$T"P҆G2fp O5)6!fG! \ߎ$z}-I6Oe~*X8q#&"Pg Su8$&b7g]"0 &uA} x!RaCd~TI. !%.7y#DajkB tU83)GtÐQXS9cm=y9"8ԉ fwc?6 @/HտZ<9ct8~ʭ8ښ f!Pew<*>Kb罻_o?^]t7]Ӧ-ݲtT' EݻѕqwBq鎂"[2VH?šaxaL|?W_(>YގQoʰ~(g]^'A3yr)U(Fj,`6Ʈ nq㢁} خh4G|0soC Ʒ.{0<G gXsEu=L|#/יִWu{[v;>Yn¢/v_?brƥ8HK*`a݅"Oelb [doΑP,6ʊQBA=LΆ,hپLQ@e? ˲¦~Fw}ev&Wp;<_ m+Bjg"6fٝBi*ܸ< IWOP d7=(|Xggm;u7pv8l4`Llcc8qM=.^X'*8+]3N#Sc ZnGs6*JOV!Op8t aog`4™1WuNC*Z/yP#_Q^9DZ/}g;#OaӽBs Sx(GI/nt0;c,~ޛ&KDǭ^;u_k/2y.*^P~[;10VگV4zoI>,U{꼎->lbfɈ/@Lm5ytIʉU.̋lƵ 82;h`'y]1xLffB B"p~9&$ tVR>jf8P̎ZYk]˱AE>b vQc)p!dreL;CO@JKj.8C)vȞ̈${ȯxKV #_hC؋fƽʼ ЊU>I +LwW*]48m(AW%7\P]ggSЦHT'e܅ލsL\?>™1bNDAGȼYO6ʿ腲.`'i-'^|r\[4^ֵ']r-@w}RIeˢd+_ &SΖK7Lr\j6;J~Twwir ENLV\t]?f_wX{FΣYXJI1viPq )A^K''q!zhcX9o6kܸ(-2 n^_8] f+oHJuaA )ۼ$ ؟xՉ̻kf{?;V&a(סgP㐓7%;/˝.<_!\oNz&uMx&LarErv,'ʡIifƕM[%]2yܻJT|䩉hwHMˇB0 `[vL@4dSL=\vb_Osp4l,t?êHubNM}F5^r=<} {V09YSƸ$<_r\7v6sSRH;,8vf}tB +}Ϩ`kW/ADKX(-0/Aģ7q6f (P5I1K\- `Wފˬ,䱠D@o]vW͡^95ٽ t'tBW%V 0{Mly̴*ʞQ8o%W؏,N{>EM 7>䴌[Lo ߬njӟ<ӣXE~MٍE{GKJ:i9ڥ ^e;8w*C @Y$__;³>3Gy*vIµ4D(`s[U;V Ct^o%9ݟ]]_I43sc{Zg`bS"11Jso<շWBYP.Zf"&sqj+P(nDn]9ƭac!s/rk[vmkhy6NB#pXslمڷxpBPQxɽC3sRoUՖ/s:ks^Q;o{ZBkHuVrzCIg6*P5^Vp7?N 3NB?k5-bnѪN]Pq%* E=\.zrihy\S% ; Ƿ?񿪛bn͏-PML4ޮOvag<'ߓטL\.0gENF8 c1bPy\S{z1ǀ'\H9OlY\s`fb9X-ZH]bChފn3㝊(WƩn>/b'5/;=19Fz6S o)›SΡ\ts6Zg+x5Edž &}Wr{ZrcТ; +9W@x&Ohg9g-Sc$f5Ma+i&ӬӊGeOIpelxV ~3&L8KW{c(N{1`6I{BƓYx+ɞIiWSOtc1Ot0>ںy?ꁫ Mƿڇ;o>ʾaT[ug̗y1\nw'Gftu V.'sWj@ΕOLdkzרͿ*/Y痨 ]jɀP6_A(1})ҟ'=t׸\$ {">7v%CLpWQ大|/1'2o,Y0 !T(po-̂?ɖ1 1O*\V DuHkͮf+olI~@up,Z3 `Tw"].8y|=\KLͧe(F~ˆ--PV.4DqiZҫ)Qݏ: X"LZK`1| l#\n0Y7|CE+^(P/d} iV驾L'Q šuDQrfT3{Cq|?^ƕOn&*-rE] -{wb(0G=rFH<`{ wf;2w4ClwwDxRYp=SB/Z !*|YL/ʽJœ .ɥ@ټz|DJ/ `-T]h?t$ i.0ƗmJ;d`C'*NZ׉ w0/{yikX7̞ZVzA. שhu>5~{"0ݾEGQG2w5+Te. J7rƷwvFrl# @5"D ]|޾ݏZN(!Z2x]g/eX.-h ~̚x8ۋVGֻ\$EP,-l͆Zz.]@@%+;ݹ݃volLff*Y0 >hABe@!}}ݟ(/߃=scܵd?h|oA/GxK3THlDc{4GH89K|Ǯg%$0 OWr-&W_ y2e]֓Pr>%S^@TjϬvMGW݌@ǝG(m53w7iӦVKu*[ܕ CZ\Y"Jr<1~eG+HX[ԊiBAa S+*bɼ @oYha>I3~b]~ŷd{sօvTМ[zv埪25?=KxFK|: {ZtV#L?0s ԛaąA?Ly㐬؋4X(TL̘5.`K0-Kwx32oV$| UDN1t"]B$l]Oi@8ShQ_ h !݆3הH}#&g\ڬe/f[W '&a@gMCagDŖWX,TM{̛IYjI3 k{)V$B!^#{ 9#!bwb&M\+F"ȧ^5A @(}6e]f- (NhQˆrݵ<6J9I@dz]p 6'LF>CD<*cb*˾>{sPp-qU [ /<īb#<+9BJdB)'B 9E {=npV*.d4ZƎc_) .L~*5zNv8%E;G=隐kL.60;2Gzp؉зØx h鹳S_v10S?+R JG0(Dnc&!}ȭqBo5.cP 0͐5]soAQ0WETA8e0ڌ'*,N{2&VvHLy@Շ ! ٶVw V +uo-#.FX#z=;~*ohMY 6'@`~FLv_LS^a0, 7 -|e(*CQdX(_a=\îǷ0]NeFeeoGpq&XZ䡳XzK [¨,iP:vv{¸R؎yȋ5аdWr#eiY ʼef/O'E/ :U []V~9Zf K  ULJ2B 8=a->=8\+\ ,Ͻe.ud뢹 En=IَvU k+!_լ2r~h*Kq V};忖)1$.AU{n+e]kN\v3^{e2,7;GSvj+#z7^2WQ P~j ֒걽 ?*Օa3yu;d˔5%#]|KV򮔃XG+/Z~pBy,(:J WeNLMoDd۵\YJ2@3=7 ǮK엒d&gaDć;)I_?SWgs:d^ gixC~z| ;GMz /5(|=;if8yb}U`l{0?e?_4m쏞v ߒ>Xݦ')0 IrHۭ29D% k=#K~ /T|Eq߼y ov+I 9kT_+kTØy H{Ύ^[ *;3u\&зdUu70U {'.H8᧳4™/ Ŝ^ȇ@IJfP{E:yԝhf<=&$Dm(mIiw9S&.ʠqM^)vTqdIg㓽??|X]vyWQCG?Xx#3.|K0H<4AP ~j.jj2u}ЊvHc(XXM Et#ޒwUϾƸ?֒@9,9 = c "˭uU&øZ7l߭?!m6܋.Kjb\ǂYؾKd]v,פ \3s0.Iiްˬ|b]YZJ;5]|b}[迲wEm=@ &3C}c9 ҽ6gX~|Jcyu_1Z6k8OAдQ"'o"H JK6?z tg;9R*LM-Xx ]ǷWrqSHhӐLxJux8d Qw+"'#&;T\.5oC,A)KaMz3ޒvv%Z f%K0&N,+!pY\c}H`?}zsbVMIكVXsPQ+}lqDIDAT{0kcL~]@j72ODg{.+g9VolƕYOoɒ'-`峵h+Ҧ.ߢ}VYB<xm6̸OyfzI˫ar=#K} 5k^s3bM{,Z~IEMfvQŲfdt(**_O#2 7 Jk1ͣx[μŭ0+%g[ t[y(WIᾢ[n' izo)Cz q\];2bMtҍlo~{-^.']y 3=+}AeIչ*Ic7=ʏʘR1~NWu(1b=Ҋ&y 陆r]VUϾ0Vߚ)<+ T-%4-Oυ+ CuWTꋺw7oz!}K|2هmAA kpM&[z~9-*K݉@ V % aZ(n+{SȝOQʧj8a':kG)`o>OeFyIB_ɍL yJ4'N"N@MxH D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@@#\Q|mIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.svg0000644000175000017500000013377200000000000027351 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 17:55:27 +0000Canvas 1Layer 1Network NodeOpen vSwitch - High-availability with DVRNetwork Traffic Flow - North/South Scenario 1Provider network 1VLAN 101, 203.0.113.0/24Compute NodeInstanceLinux Bridgeqbr(1)(3)(2)VNI 101Provider networkAggregate OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(4)(5)(8)OVS Integration Bridgebr-intOVS Tunnel Bridgebr-tun OVS Provider Bridgebr-providerSNAT Namespacesnat(9)(20)(21)(18)(11)(13)(12)(16)(17)(15)(14)(19)(22)(10)(6)(7)(23)Self-service networkVNI 101, 192.168.1.0/24Overlay network10.0.1.0/24VNI 101VLAN 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.graffle0000644000175000017500000001162500000000000030151 0ustar00coreycorey00000000000000]mSȲ |y]L)n [N2$~{$ے%,!U1X=/M?Oտ]Υ 8v Ǐq/~yu_?:07;[΋~?76ݝΑull{8Oˍ+3Rn70$I}*[n/fڧnW_7ipzd'^mobF6ƿ@AKʏy͓B^xgizQpþx/^mDJXh >Uy1:07{Ii27x/ɨσDlt#1#1_tњ~9s 8LqHE\T?Tzz8 Ul':/$Ƿ_*/v]*:_ dƿ;?eˋ.~4s6姜EgQ~\?.T1O|/|MaeE3iԯv 1^%%/w;SIT˿w%^v0N k-Nӽp~q&W+5Za*M;𣴪$Ḿi)3\I-"LbDbb!dZ{ԧПs7)x#[Z/Nt_EA GNt-Ŵ .N#O&G%ΚKp1MZU :^hQNje#UtMMtn*3Q)58VTԔ ă -00Rk*4Ì۬rh] 2U98zк+,CWQJskAn܍=s!1LOOSt|="g<ĺFAlj#%_N¡*qKz7.1>2C!Ai&Bu&X&Q&͇ b K/Z1B5iQ.I־[ -&G-XBi  \m-Kw${҅lK xE~Z~^lsYɭX `WD0LY25=\%uCɤ$φMO=Ə:>o_U6rel&?xEJ\[yt)aƬ\w?$C fV=֜A $._@tDc(a.׉qnЙrVrΑB03 q3ig!4?en.rlZR6KmM,8$`ɴR`*QbjWD Y0ecU!0 kClJ3~2ǖKl֟V{}o4()։љ`P:7$K%h ܷWA OPDc8سZtf+*6c>.qlO|Qztʬg)Y:=Fl,Y6lfٌ.4a0Z*&mZnbD{6 dtP[,! b Av%DItL1W"!U QI˹FQ ǔnۣt#+>^-oVR4,Yb֎BjL Ħ%GIl1# &[ڲՒb1̕XEvךݵf̒=F8(d\l͞e!Y'^$mA]dSIDhG˘ExH?&i4.JԈ`,Y^kk=`-2ᥟ]o^D69fB;WC:/I)M1u|P-IyAL1XrW¼I Jsb64x *]lh܍B6djt {ϼ)S/d)fV2UԎ1{Aخ'^[%r 4:g5n8`quƮe*-Jgk R>B]Or.هt]O J5TU PW3*C#иPhˇFtWhJΐdRQQi7F Bc-WB#UZ NԹ.:8 !fAcD9ViVh&&_کۢ(6Je'4"ZҙtRSv(g\[,C2 pE%!5q5J1*`쑴Y#r5Z>[êԮF xɔ6 <"` k$1\IN5negŒ 3KakyM% j袁CEkM1ט/B20ifb~ J&+$1*Cg hƭ<(IƭgĘ WE=cQ-YҳIڬ7f$b 0v-M+kF{^g./ip5?۝vcݠY~fLҰAd+ߪL-mP]!kbobW73w %^*hX]D 1JJMb>tg$jο~P8}(z: #++Sً{*У-\T n <\!6J&ppq^~AE 3=2  o|!"Ȝ?xF"x._4i ,>Q6R JC#/ b,Ц*'%A P>{]d(U/ 'NOBQ*un{WSo\:xI;؍_IQ+I I-33͑79]b'sJ91Nw?)Z,BU8M%47Ef\??Nt|W7V;kCoAfO(R6*cv9Z4Ul 2mV|ȇY7FgATK/Nk1 ajҒ$xN+D()g*ʷ U?K~Z٠_fiO]FK-=sNOV{lғWJ#K_rÌ\.ܿrӻ܋S[O?J0zj0e`xN_su8{7qzṖ0i .N ATxq56f3O&.d~Oݳ->?y1܄[oO{hsW[xs.cPI@`{l6l6j>䚻Dr.$L2ٚvOq$؋?n肙%ߠa&CpS(/i,~a^o? /ԫkL9GAZG-/SFK^f'76ip96ƀYoYC|g{o4H٨5Ll$6{[ߋ@/MWUA2TśKd/aG5~gLLy&Cc0,&+]. O_wuFP#>Ϧk2h,k& rimq|QT]^5G\A#x45ӳJ0 8EDҩ>ՠ쇠k?3]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.png0000644000175000017500000032760500000000000027337 0ustar00coreycorey00000000000000PNG  IHDR_zsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx`gRiRa]{}]AAH.xK񉍒`{Wx> Ć vD!@l\.%wI.p٩ `L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&dd3&Z S>d>;_Q,Y֫\> WMgc.O7Kl +@=zҭG{M|VM7K[4%X/=_mlg\>?5)1 樂Ҥ)êQ݌r>5e߱+E#w8vGQf"yv{'>MCCՖbɢ0.B=dıgDCvkK}aƧ>wu=:IP;I"q͡;3:Mg6J33דp'8ϖ.qk& V>n[i|i996ě +_Y5 Z{ֱ[nÔB]a61sd=5&~RhL#{󍳵Q|ЍJaaMBQ,Ӓ9~;(˴6u=r? wPj 5Bb%;NobK!5P@M&?R"vL h!@?{ 0]MB+KJP\tD k+K(. @0XJqZ&,t| @ )k ]J Ž?aPK*e#3}|]I@ļLNpbQ#Os351mJ RlE2򗇲UQ ,?P]*9괃[ruX_ݓSR zƆ*Rx /8=Ћޯ*|Zhe/Gw?K)͜HeTezchE0е}owikYZy*ƍs̜BLٷYbrVVxT"t >ObL<ɛIJG],Y͚^F\=--Y6@#?9"]ӧnVc-%^QdoС&zcLS]d( 4tE=}~ v̛Yr˳xǃKu 9;6`@ Qlh,}AmPxгG l(QwߺC@P }Nnʑ!5Ž1J xгY+lTS GpCʜo^Z+IѤ1Aڞʼ9 Fk$q,˸h ;}~5Uݴ5l6lh!575y⠣Gw@*?*{1%tT=PaTj40~{]KϑO U 938%`A+Nj֛ebtCR{t{$K_@(0 u3 (ɎAβK›aθÓ0JA ZBs1J60;TOof!a2eR#+u͗ɤo ޫggO\`ꮡ 06Nq fp4MN4Mq->&0y>Sa}mDz]"k22u0B\3iQπq7V˃Sԋ2)rO{d>Ff3Sykgwu?>7 7 !Zf秧Bj ?{7>`An\aYl}vi\-`v*Wµ %zw*ϋP (* ;vvxR 1/mXxuϋp}-띮_A0CYׇ!abYLqOG X.2>i^4Kh ޛ=Gпk7PHc;۾]ƻz ]xI ĻIM OB1qZ oBj]?@zA0 ?9Q ;'Yܷ](Mw Vxik63:o^>3Q_R7&a@,E2 {@ yyםl^)^/18žJ ^06l0V\i)=Co;YQ%ҏg2';c]5ؽ \Ŝ4J[TCeyR= a$?hY}B9GJ0:/Rȏ7&-yAU@mӻ; Ly~dke oγvJ'Ol'Pn/ݸ -SW>:3y望/H.0 pae@z1z!7@!Ǿs./.;Azݳi?9Ϲ7cI'xϧaԣk:42L׋oz^1ʰ+ƙTn0s~(36)\eZG9YWx`h*_`N{ռO4y/eCU̔5 e|wC 󴬜[eE|gbɜ %Q~w72׼sMK=tzFw#3&Eiuu 1dqB{>Ep4"!lBHl.vUzԭx󙁌vp%AIzJ^.vRr+-Ho;<)s]AyUX ƒB.! {CG8VL-ITQ~kmxlC`X060 y@"{Ȋ'"℮60IH+*[vYUEsT0#ɳ'gV)d0ΚHYc]? ǒ})|ϻQD/IJأ)`AÓ4Y(7ֵUf bl&:T U.{|ڭxq"eY͚қ|[ Kg̏!7A /KR0U?hE*/{²OKĊʚO[g:$>R(\]:c|ϲ^=yubu,<5!xe)]Fz |c+qB8: T*hV8S˶:^ y_CBzNoǎaꧢ"Kf&;ޞ Kyd9{Zc[HBuƇ$=7YCpP 3%m;?4Vws v$ 3֮9GñKg+Żzn0DⰄv1]YB,D.hS|.~t 8`[9s7Ot3!nWuC¬.uBx=A_j~/ۚP{ȣySB9ɧp{:W;j'O&AvnM]paeϤLY_&܋gɮY99ѨgB3 !S?R6TEmg+J^|RTksU| ekzQz9O/ꞇU-^ܑ釹jcNbJz+j"h =jFZD2UD+B 7U֜ZP7ùBSb!>bc6)lwJ Kn k Ia  Qyr6Ɲx ҦN@n-n;:tL|+j'e?[ѳ3^9.1I,S|K~p0q܈{s_IŐl>}@O];Aߡ@T:[t氇-0zy pOV8]iқvOlb쌏fu]+lf}~ 5wO=W#[`#z?p I-aIhYV5_3VDV|T2`e`ED>-ҥH}I`>n e|8pC=N~n/, QRa0+0[HDbWnP-;Ҝ ua0[>i`?EkcX?dJ&;u&ZL}9T=n-[6T>T3m%S~Ean(ЅP}A>˻oռpr;jnp>hlxl sfL`壵)'莦VAAd3*|wa++F9*MNt{GzHmmOBi(^ m,+{M'+a5 eʎz3Xz3B/=GfaUhI}.1ij=v2?h/T5ԟ.AJ8ɏw9okIphd)Uϲ ,2/FgC 6{ƞL"9XlAy=r;H0K@Š\lĝ i&uݙ9ՎƆḏo,R<:>J] |^ {(%߻0ZZЯjͫ:=[U8I,.DF)~[Kd?.iu-S w'@ؠ!W#}ۑRb%-RLb?n[ ]EAӪHHzݓ&jFJEb%JflB Zڵ7סiH?]Gn6Z"`=~hB%Ezo;{6]P+yMvDa8{P4H^Y#?;izJCbU 3[溧!;h0xu؃4 ^rcC]=ek!4>-qUmQYޞ Hi?9Txz\z?焆qi0:/56b `@~0c>A@*Tfbi"cp۱ f0x@ yb*;^ϧ0C!2o Tdܔ,3JJ, wQ>蔛ŶnUa,D9ܳ5D$_*6=~ip^j4Fۚ!vQ~}Gmdx`J??3 XmaH[{OV*0LHhvlF~ i[r OvZlPrT6(ĸT'Ӓ +,{ PK= ߟ-o"%*.۷{Ngý'eCmkRsCCx(͛4&\Qs:{ďٯ,q2IIA&* *_[ۍwR7*{OcV7-X_1E ]i ~w] d|qYʋͧn2 P;0ci"Aed{1)F 0MiAd5V0]J᱇\9[ i؍<༊`bjzN)vh@Y[Dz{VS55vn5!)Jy=MK+/|]+F.^1vlM4a"?T$pb^Ua.[ w?Զnt?ŵiK]ӾCI+lφBDC9feCmj*s0u:=sl[;pG=BSPFP=W٣hR#LC 0'm(p @h !&MDU梿/Qv9*v t@Q1"I7j!"rC^=I_ oW[(wKH&ѰJdE˩R˱3\v״_gxa/okF _7O0WHE;z8'Cy{C %r:vJwjC!sԵd 'XX;.B<ݝdO xK"wPѥƶng39v:Mҗ:pՋ2Z-JmN b 8urWz)Ey}(U~5-l2(F>I 2'L˸ ys"Dh{65}Gd% 7=P.h  AO'P|; [Lg3a  O@>xy }C\1 w%V#n!8FagO v.xb37D݃" LHH,^g/V/-+QZ}^46^3D 6TG ccDcfwLTki@9& DSф`96DzF0ި>HM.m} 5wȡlT&huWӱX! q\qj0{v? '.;J=TCf?:)?5‹ :ǡl\ l^Y{WgP1PwvPcϞ }}< ij1v(/CsQiWeOpi>5紉-]7wG7DQk^Ɨ1pPZ7[iHR-P:B(v|{&E=Xm)Oajv$[E/^ՔXvªT5'J@ 6kyLORC8qv%%䵴cޖI1P~/eb8h|\87?pRt/9%5G{W3en+Fj_\a?eg1\HW-M#"-~>`-CΡ0 CtªP,P>Z8J<l#|~Ư "}bf.lgFW#\X1&+aW[41&09@mMė; eapE'0&Jzh' 0&H|4;cL {̷bUWL]ŌM^jb#3 ұܖ'gэ͏c^_쉟gL9VL j8]L 0]@YdzYJuaq}Egp X8a@}sfo+l 0&##M"OЂuŚc*gf=@Mz.l1 +<)$B/{vx1 *tˡ cu;^0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&X&@ g/Ogg>0g{X֔eޫyoϞͭa`õI KmJHkV`ʦwL^ V7srzXH`ږO&3ݣ Ӳr_wKɯzYvpX1Nw8o9hór7-sbq~p$|lL@i5mSA=03[B=_}lw.6{>{=O U פ{x=W <ܡL/\M&WḵΓ5lfi;AzH8;Y tىG;wX ]-jfgg|DaeG+Kd[=Ww a&s3h2}N\g sa&HN}7~vnk<y?d?6i#0SQ0Cc31,eL&t꘣'-I .@{"\B@8/d?-Mu$+`^Oot.w=뿶̹-weaYla!99 =# ^wi>Qnb{ hCIQh=Zȷ<'n1%*Ck((sPtlGqR0fRGgd99iJ!=qߙH'3zXVҲH H0+8-Fcy=irT;G~#wa[N[#Nz 0w[&9C0Xygg̕G#?"0oYh7!z0|oͰ5ǭ)=+`R1d"S[ ; <ٱK!x FE|) qodZ}i k!Z^aZ/[,KA׮39ƙG}@ʄ"s֜jU$[ 1Fqz$#C_[OMr74k­e s*jYg,ZdYփHˠI4GEyCyirqңDױD,=\`sCK`cNlTzʋq>P=;ܘ݇A=d_b'r2>g=H`t^$\4 a^v@וGoo²ᾪ Z J;oٱG]7 u^8y}T<* 6c F>Fߑnp,W Hyēn;*YCO*Yi6brA ¼-{ ⰬdTKQR-G7Qƹ#r=/ug7i }D1.K1$(L . NśPA`e5Eb5W=DpwIZs]>t%fʵB!AƾP!XLjǭQy$y#4Uy^;z^EJGw#6 gJpgzܿ?a69]Jh8'덵W 񍞅JOh 4}ܑE0?hruWc`v2ُ$;2X@y໣hO.*<2aT}Ɣq5Zi0<0o ]p!'f͙h0o]&j%량WApm@>y5QSUn2UԊXl]ؼ1o]+Wl/|Wt0VCjze3QʚB q3rZToг\Cx1C05sa7ф븅B WEzusfEj߱'ƃ̄9E; ?TêSusNjHTe7;wu~]W=@b:efo IOp50~4'-.nPh0[~ H9T|scs>H4}o]7[W6k``l){+"Y ?l@x|¦LQhȬ+Kؚ %^_V[Up%f 1JOcի4D@}F* x4/*Z eoսb#mBYS?˞m,i|y3҅#ZoWeN[cu п3wU,)-,ڗ_ȼレB`KG֗ҒeAPvi6:#' I)Γhԭs#i)|*Vno9eb^IB4n?2:,Vf a 0 3'|S!?3%EJWYQRiҰc_4a D̷.Q6'IefVnZIPjOgo3I ;kb˸K$R/oPVX1a-aiGO/͛Aztcu  ORxa1aAYנwMO=ニԕc;] =yտ%>ڹp!`hu;p5hډ}Yreyj:G7Zp<[z~'nidhM~N|S韃.hq&/pmIJ٘1-Cd2Qqw;ڌD'1Lg+/WB0R:K} í!_V֧s:wt2JDwx} +-K?2Y~91Hi q\t LCmCAxĊ`uǙf,[q-#O;iVi^՞CQUnVlr?,7mGn`յ;4gB.nqB_tLŪsYRK<3cm>n.b?H*Z)`SY7[CD,."|9r~-6y+aw eX~ȯM-GSE m!"W-Y^K}ˍdy7L 69G(#/9 hMէn} O`8@98c˅ V 4Li3Ǝ-qMj!=HYs/JN]?e|R"mvri84 5or?u/ yMd󰟃&NUnhӿ#nYfVr›h)4i(ڭSCC+hß>n= ٤y׶ɔא.hT)w`8ۅ6KyXoVbX#|'n$Rl 0&`L 0!p)YֹJ; #ac+iY;%R} Mjt^XGcLI@IDAT 0&`L 3'YB<+?`,Cҳ C͐B[=ve,g`L 0&h:K?Z.ZZd쑧m ?U1VJy1 `L 0&`L!9/@b9M.?siuXPe?`L 0&@y;VQG&&jeO.@zqԱb`L 0&hQ$CYErm b僖Ӆv&O.a/`L 0&@ |{)j6C>L 0&`L (ϑoQ+jv. i(Ι`L 0&3$瓼OrQZ@弁`3&`L 0#`J^c]{&`L 0&d~IZQł3&`L 0& OrTGGTc&.F`$m.աc҆Ǎ+ŒK&gXfSM`d9QL 08"G `@RwUt,b23͟=}e=K O H+VolxVȸHH8?UPfGJuy®`L pI8l 0&Hʹ JC&OIm$cWq]) &IǧgG4)`L 8hW`L5@ʲ Eޞqހϐ+oIw(YpS0bRrʈ+rkb%`Cy&Z!~)]G)I (sS6,baX 1ӛ1>-3x=V9GzG*a)`!dh!(t:!{K>W:c'gYs;T+OahÎt}btb's:ʹԘ9Crrzb5W)q *P/5O蛦!gN Z摚vvGzL]7B̆n?{,9LuW$L}e Y`M0:tL 0&PA]qN`L 4@ 0/p>7ZC +}akI4yīJ{ ٗ+>O% PKUwI1psXReI7BCJ 8<„5D9[Jtgtq~ad_chY~iyy>o1.H;#naYMLv;4KMޡ:gxfj̅H$vHc'CkϿ!?J/ !`w/R$P2^.ZվϔcLBcy-݋ M nvԄm-%}CEs^35ss)F+jLje`8?=8ޤ UR#<~JM[0!4 b+~H7w ֔U};X_3&|p.`L 4 5ZTJL =yk81EQ0ۑU80&IY`5jo$k nZ@{C+_==C))V't6яBgݎN68aΘ~ f>qhYSo)W5讱5/Kxbka>0R-6C"_*)ͭO=*V0 `m@ hChXi2플y䧰p&Qwu?N˚zFcq%zOU~!`<. ϞWruی1{=!TѪS Wَffx*;xUIR 5YYY`֚bߊ!Y`gg$*;!{ntL 0J{>t3&{2.r.LҳrľϙBlu O`U*ZItt2J>wx} +-K?2Y9D'Iyóh`5*-NW,{W]nսp.1i*V:j}ZYh]5#{)Q1߃U~Er?2=g"t{Y_reòrnsST/"!-.2:K cin^_n 8`_R-Mja `L69L Ć Ra0Л/,3z%I%p =eͧyTZ)6Z&oe~T`˻yhv<[A~,ӻ7isN[W^OKYv}k^| =ɟ0a$g-U=:oa~4V DQ17;y Ǻ(SCf 4/;Y:)ZM; [׭*XA%=We=:7U:a5`L@Vt{|@)t 0&Ж u!v_܈!AҴIC[mEgy;|8Px>AkRrhsܭ׿ڽz=mZB,XS?oMFw#ssXRZ'N>m 4-Uh[,%gL )"Nd ǁ 0&*y0&@d~v`L 0&`L DD0%&`L 0&%]EK3&─sF`m+msh3^i)T2&@k!îZ˛x2&`L 0VNV9L 0&`L`壵)'`L 0&hXh/`L 0&Z V>Z˛x2&`L 0VNV9L 0&`L`壵)'`L 0&hXh/`L 0&Z VYSy&@URBo-cO%v:究9nUe=]Lu8KH٧[c^:-iʷ\ݑ~|fB<' 4M,ROI:uHI>_)ukka^\)i |CiGf[w|G]8OE5Ӵf쑰#TC@RaҎQ^?Zk]uR~4:Q{绂Oms}[I Ӳzꨃ[$% )4oVϿU}rhںҒaOM-X CT(l@ \͏O PҚLJJKY7Am:%0fڔĎA=ztqg> >5 6nOxb ?RB$y2e+Xw͙DeJk(O7sӄIwx<{Mq;oy+AR,j)/ܝX)h1<ȏc|ȊP_by6P"8O{X9w"vwe(cybB|TQGg8;Ar\&QqZKt5}q|}>4Oh䳈Izu$:t4G]]rz)漣RrY;ouG{4Ԋ%Fxy 9HQAy}Vq8B3;Q49FG +YJ>ԓGO+{ox"#wy*rVl %@eg㱔`)*(>TQGȆp:!$ISכ\GE^JZSݔ?^繃V%!EjJcjZo#m-&q ĂS$&'<$SyG,v$DS֛\G;6xT>kF? mNQ*9vˏy}Nv2{ B<~TzTsݸhw||D%H{SL\Gw>b_b™ G#ܽi- xi>qy~Fl 4St8l+D zlN^'֚:pm}q|u?ڹ7l\ uE''*g4&7E!t<; xx]]/S<]"x"ï)M'ro{׉@}|P|bϹj}YU8XDѾW}ǕG4]Gj4& a!ibd@[V[DIYy@|Oaa P LiƑG"s-UԕJ~_|^>?hlMQouM[k I>t{6|]@<}!Aʆ9ltZ7bp0ָo-equb_:tb+§<ɋ],jFux|>onG]{cjL<U O^t5At X8K'U8RB4.aa\gR֒o'ɉ Gck՗/ ?Z/wǨ9ԯϿ[!G51~=Dְ'g{zD׎ֿa 0ΚԨu>ʿr 9t(-/+q 5 F|7/EDuPߞ]3=vū@qs{K+|)n4qǣ^)|VU.zc)-]T|e2Al/oĨOEe(ϾmvV_։/S/wj&mK ń'^=n$(>Yx(3H`戄Q47m\f:wH#,vj/YQCX~ao`G_}HxJ+O/jkpl2cي5⃥?!^-Z)GU׊?VݸY٣8bU&9|bɲdk8߇? ?^lQ"ﻻmwD}axig*TvkC{VՂZNuƠc+5`GPv&S1{Aqyo*&z4)蕸t⍏-Eb !%oALdNI1'98ivĝל%:O}ԚOM؝nxJOVx{N?ZL<ʇbڍh*F%*rݰ~]cV ⍏P}"%9imωît,Li]5U7j=Hzcrqpu )zc h|%q>i#XJtx/l;KEa#]B[+vAxɢ{\{E\}α+V|:K|z2][&oů-PqUgv9(g1>;y-6_~7(Yע1j= h: Yt(HЧƫBOLf4\:Hix#.k1jjՈ'5'>lюH;/z>Cbr8`^vCiK8 ŕU1QqΑAJ ۶BuQE)Z8*[tSRKn.];/\1E{t9}[JkaPO;XV:d }?ro@j؞~Lz@@=E%݃B܁U#;?M$垎aN |4c|~Ҥz:8eJK&ˉC~S*zAwKHGd7(ZU8z͝aOR<.'n;3C=tМS+؎C(7D=0?H,0,R|sZutj*BAKOS <[0jͯ.|T299M}Cb嚍On%Bsw;?Ac/ q:{uUgFG]VqƱ{/J Y. ,,IHHHuz؎cqr{ަާ;zM=<7{ysO[0mPR< }3ںY1"&`F,eoU &Ew崆ؿtqĜ9z`'ʍ쾵A-{n尘 R8f8w?Xk߰=]ܶ~ݱO-^ P爛f.=nebT`CXmPf,B6s`fǻlK8`cFmV{LL`_T]&%~ +4$]*{oh`k,pAɣ[I(3n>\^h:O6˔駲W<&2()d?6NAxɌw#ح=E5TWĆ`6]IiAe@lkMLwx8}>H8-"fp,ZIqvC=2S a0;udSd1>aaW,[[&;=^B % ].ew͛:.pw6 VO)c6zgDZK$=̌ܓʆw/X9$?paZqCX8:]lYppmyA~Wn˖𩴄]+5A.Òo:m.f o^^5 XC+zj\gOqy`Vd]p,t+"BǮRpVowsF*F!9΄8+|\J\1uz%5ޯZN 81#[SQ_<uǏ. }Kk88\}y㕜% N A$\Zj+㯟 L}4x'o}H?ڽ@g5fQgcR bFhi{a+DQwym/)eӿya-B+&sϝz]{y!;L*ngnpPZPR*} Q zo}9&.7$q/k"C,pP3r _LX0#^$Z jN.֕C'ޤ<>W4y7_#5<#J r/I͇p-g/\P>۲W(KxvXy:޿!3bLٹЎL'wq?N}}W LX!h{Ye8W@>s1]5XF63)xѭT~7#iGu{2o#aiǻw#/k[EE5 b`"/CK̯,1) KTЮM~|01+PE ;@xz!& Z9/>D;8M>BeC}KкoI-ku}bj =G:Ƣp*M:& '8⅁ΗǐL.PtQC NM&:)lsDfƌJ̫aXDr+PPPP/ | .p13ơre|?+#}0PP& ]VG( ( ( ( dH[GA هHQ@Q@Q@Q@Q@Q@Q@Q@Q@Q O(.ʆ:jw\ﷃ9&Y.0;~-T/Z8X2[s7жD]T>󚥄z s:QAnQsfN݂Ư+6R;+3'Ѝ/op_~hk.&rekkMKL yfrxL .Ϙ=y9v8N5遏^|_$RN P{Bu}+UG.߹1/Y&wl (Gli@Gzh8} ᑨ&.j( v5K`sdYmB Y +'wJNqC'i \!_ZMkΣ"'흍{>ZV('eı Bغ37*"T5w3ﬧϤYQ`sX֧n3'g<ʹp B[κܖΡD( (pG qko:+~^?|׷  zSXï>Jˡg}}yx{->*J5ZV*-=,wyew86;uW(KiqBhg /9g:=xUtL+faS@)N]((03Y`0D[Xp,69LXm{fO?~I陷l5ekD*-Ue>}sX @;S*cyts}X<rKd*׷P3[30wXDp>_KX@eh" z (-vmE`x,bzqllٜL71.KNJ;B8(>kF{~T\si=Yl_0-O RեΙ<-a%i1~8?1_Va |úr)t%w?ީ  zYiYwB(ryVŷ UBdS) ( $J|`eAxM6lw~W-y6V@Rǹڿt1M.|HO4HZ;\b&$PȮ!5}w^~Yę6Z8c<}]HBz~H,damKtMbC8V;qjbWh=75yKVz7)L]((0{n\1e#| bFV<@Q _)>ئ4t~~?{1h=zf>/p+f[Z#JL[N$aEc L~7 ۿzF&I)ڝ 3\^8wvչj@=ןOg%GO7`FB^`Bƾ|c̤1xuTDܶo ۭa@ީBk==n6~e?٣T"wQ( ( ( fSߏr߯8`fxGOLMŅ-6X@(4*Ζ Z! |W։V&28]Y.[-c ptt5?-[x3΁s86dyƱh%ύԏypBDpmiwmdV(l3፵E8=cbe%AB ;kӜ(ΘX- N:Nt<(&`D\Ģx}lA)?{+ .R') !uԱAV?-[C=$[kΛKgZ;zdB}@LC*P5ۅP["@>_?6gduΧE;j3CċHf7G_|~b{ rZyǍK:)pV`%UicEǿ%rpG~۹O7"F`~O?p3ľ0CEI!}37YbdD&<'uGAP|a4w_gZ:_~kog #Fnor&6pӁ)"<ϖI2,A8*HJHvJEEEEES@/'X 0;Ćʁı";Mx@x^q#V,osվ){d|uDQ@Q@Q@Q@Q@Q`P@Gf&8P`'\$WGTU?!vԩqYZ~o 8z,0[M]NsYQGGGOXg>5dC/(r#RJ.,Vs558P3ERՙ* T;AVocg?{R7m%DAnR|=;1Je3Oimw[ڸlI+6| & fʹ5D]Z)Fp"ŧ(8E' EE|q.>yㅴuQWcSGTF!g' hUPD SiT;JMeXJM4ýyq5rs#{RW7[[OhH6 ,oAo|RBSB8:jŁ;,&_4wxߑzB%ݦ.v*=;9r1oy՜|HAXxXKo}ciR^52V"sXRnXCc&ϕ|+rKF tOBuX+a*rZl{ga߲&l!:}{=BAڶLs@|+(@搏!. ]p*]d6+,oS/x j{ ڌ*FVC)]SUN{9n'xqk³^x( ith\u~>K;VNM lpv ARKMiʡ6E(/lE< 5F;{O> -og_ܠQ>AҎ{۹$=8&zTYZ(|( MF&v8N/ښ |c9^Lk8Vj[%:Jo(U틅Mcs1ZMy5GS{(J٪u_ݰnb]p*ƺPdS秊HG_aKlQqG8b Q9DC+/oǸ'&^T\h.i=G Yj*Kox"!s\1gӿ}6_VoGx. "YϼihiVwkNzݍtE hWiqDA+Mhΰx$HXy=OE|XܜJ@Z(_;rų' PKEQ&ɒܔ $-'qidjg_YV@0N< 5P])Kov( Hjn}jq<a[ְpGH׾;Ss>[ YN~~WI{D/ @ 8.X#@Ɉ;K#() (#SOy|ԣ|xݪv9(y dQ=@_9H3 K"{[ۭm,&}U4~C'*L$fplܯ֊L8=m)Ml ɁY[6(54@!=_^b:q?f<;%]r6t̊y~b'+ ͢}fvm,(rM scƓlݑ *Pi@ , rynܮ\@vLuTUzJQ.Px m >@V|cOߡ(CO T;2AU>L%d d I.lxٖL0zf_ Ⱦk5evzdo+ì0N%k :]'QJSPPPPy|yZ&` La> ,LL:B/}{`Ѿ|I TU9s68ߵ'shq.gԔ/ϵ'зM0: ۿ_o=)E;td@7]׎o 4VO_E=5ҳG2$+2Mi(x.uz7-W=4Jv|GziKhzEho~_Sn} xfh y6q8hg<|ۉ#w\˨ ߶& yi1h5r@p 30#4Sw-D[XDy oo\ɶK ^G_fڇVP@mPHT嫡3q(;3FcE9gϩ,Ͳ LXh'{ ,U@A<:nN7-u'KQON$DG~ܬ}^ſ4і/R++i<&|.:a:WdNnENR@ *Iji6| "C|t+Z-&*q:I1n=>e n=OQKG7ukX2o([衧l+:O)3 !QS |._+K1,u>9J^~G E+Dm0ͪ0 IV}Jt *eI[f:,2@d2S@ ~ im&[E'S)d8]n8By4XS@qCmc&&he_^H<:s59QW1iKT8!1ewk,wY&#/s ];WGgXzKj @Xq/ش:'4送YF[J ߻V2=ښTUΤ+ΝMV5Hr-XK.Hp om :YR9c/v%25no>t^VmKoO][,z޺e+a2d43_kߪd*̳U@ O9-tsF$+nTR@-0[;Jj 5A#Z<饇{\f0=w6GyNSG ',`5v&\v$S,*.rn}{Mm4ٳ&z!:iOs9Ci?\;Ck}-GڃBșPb:~T|?jEs\Ӣl6q}[ EJ^DZ⣆z;-)vlqIxo% ׽sOzۙ럥NpNГo|Hl@ju5tO6q UNвXxO^M,[ɓǏlӯVV&6hRSl?If&fj2ʙvK+RwA/M$PlKIfw#/[.]@W77Dt@tGxGmaL/+N' 'Kޛ'{}R磕kw(˦~Iv@5Uv4].g"|x᰼io`D 唆=u-!Π;w̏ Q=φYOGSU^ vNo=K'OApA Z3\Ydc+;$ TO8vb(o/g"O/!ެJm(UOG;RhY~7b 5feæxD Vo'(D~@դ?>Lw~8|Gtap?[ y}.֐dG}7UW?2,G4@٩J愞p@ZSNTZ}Gs7uN`o&pmWBTQ)YIg9E-EF*?k5I7OE[>c~C /ݭdmS|tG?qP:,?.n5'a Y1O\] _x>>No@/㗠Ϥ}QzýVLYXT9Nmz=N R)`+b.g ֱP7=tX}<]saZiR`t{9LVNuF~|d3xO 2H6Ҕ)(pCg`sN[&2_:o;z A$23:V|Ea)ǀ٬L N50}卮HY?x/zkp8`l| us7\lRϲ}c=]&JJS9!OL^>xժ^a 4ʦ~Dz W2M} \3y*O/!5:Zce DBG+p+xu?רXbf[xꁗ p~)!{Qvd3mC7azSw`GOZ@9>NpWxvbB<HY^챠92^B{O%oŦZ\fz >ѵVOuw8}\n'iG4ŀ{Ro\- 1Q\uz|t(n-=IG4/Wg󞏕#uCc9^@K'XP_ܧj4g@}E(2g1<yo3O"P>7N5S]k@籎ǻVhg_=;^5j|ge%0S3qLD$NLz$ &xogӜq}f% Y`Tᓍ <.I\n/a|&DAL2x?U3%MwXp{T 3ue&8irZpon9L#fcsڝ ~1s~~Sڀ,P~QJYHQ:{FX+Fķ V#AORY#3̕ c<ImH(xd3?gs[Mh +w췘㑭P08o`e~(hu՞駀6 WHPywfy 0eJ_iA?S [bTrXX]W㉤DKT.OgTPzkgцDA~\g?|pz.7ӄoQ`t;{eAV:d&/g[L+K+n ;K'P0Ҕie|' e+($* 8`_++f2"WO'_z`kxhhx Ξlskjp>C{3t[,z " 83K;9} ~N pckʜxjbMY:)kF\IFgvomε·傮u\[ibQ8a0|m-nKVI4u7pi|,s<}9* gjnWhP!0%^pvMpHGG@8epg>k#"(#9we VKPQlg{lw$%B*tjnWfDƓ(?!i )8Z+c=#M*|}C|ebq ?JCON"(SdMw-EeM(#'dRU,𕸣 z3jt7Sa OAF;,[9 ʱXfDF::jIl%%dw-q֢Kؙ+9w"T>k= R>Yr[ƗZ"*..N6+;w5dNL"K:{ AN#`W{KQӈang,~ =~3a,p 84۵v;{<\Qmc!xjxlp⣞/$_Y _<hd~$؛ Ol6rrps#& []A⩥}t:L5al;g+pBjqۧ,vBcEEn ) m9p.VP>X~ `Vx?xPE9%P%HosRɷz+z詑gK KN@jbBJ .t9c)n{3f4QS4Hե<5;bXXcSB!<;Ѐh_YBM.3''_ xJg+  Y@`H `0n7sG;BG(ӣ^ bs#[:9唟|Il(SKFS]Ĺ-!󆝕Th*J,aHhDB*[@`x F\`qi<? xCW8DX-U<cJpN'JB;a3HP<`-6Q%)Ypr Z(2``5"D&>I~Nyg:cK efI^(8> Ath(L]'>9CKw+-U.]'t7}:G~'5)Zws}dgoa/gNv[񆿺I㋍tc> ?D{xQ#}_ \յ$'%ҖD鯛 #_GZ3PuSh3Q/>N@z)T%hVh_Mct:js+)7-TT~:2H=^!w\(5wсo7p)EN3?z=OAў0L;I49ik)$MHba( u˅wP2u?<ȩuVS5 @DtB>.vF4`f |5*B7:~5sT_M)&v ?tmXa zP![YAvc}"5/!Aڵ i L,Vܜ?|'uzbcIH^ c9ޛRk\ݪ4+,bfaDJMXA}/]./|NNK&iQ(L)>_K|<6P<b1 mTj7}/tҕ+星Nv=vw |D]P*Zٺ!yRj68~gjyFݷ0i#!D vbhs.'7EM*ZVM"vv51F)(u6)mCX O/-pN 0@hQO5EﺧC﷞Yj<PV- ]Aw |?HL?v? t}}|?L=ayeSht*S<#.pg3ZCL} bʅTpU(!px+!vp+ó`AT t nh!iV<O@S9p_ySی$nB!*|8NV@&xýBՅL=lA׆ֵKso(zS04k전 !bwjuo KV󳻩IT|$|*XYa7gu"~QCm]hNG͞t tR!ADX\CS tF\ƾ3maҟ<ʚn|wԣHaH@h9G8QP")gYh%]VNWB~  t;F[֮z.гH `wx } ^4OI~@[5&1ʆIaH8xkkم&3c2I-(]h-Kkҹ>_W 2xQ(Oq<M嘲cuv1e$'qavZ߄u~s?]>Le ؅]s|Tn', Hd]i2Dw.ͩvEgWB0.($B`: u$ѡ*t"9vPRb A|a)Q 2&"]|%dx* 6`A z\%5/zӁ@?N^\l?vr_c1]71.p@9~}inmj{l= է^"'}@|/C>ڟivPO.ʇ{w##ta(ͭwjv(VmhqSƓb1p0hkV]H~v/k;EF-:F ǁ|VnY٪|Ϳ>+=:/7KM^:wa K7˕~5ʇ^h3eܟ C">R_" C+޾m=4Ģ|s|(/A*i)Po8 ϐϒj6 #z LfC^=\s~ƔkWi˚Ux$.~/T9Vg#P0ХMe3mhz|;MGD ]̦˺ѭ֙ǶPWa>G> k݃(d#ٕW?q6h"'X68 F]`CWvx-q- %!٭f֥ hE#w"6?Ե< 2`0ydouoa]]A_ͮ#2So4EjDPO?0B6{.NqbeAu@W`Kt?G>?};{wN1+ֽ.WW_Ɠp`IZ\a^wD !nWz>UـAGs:Aݯxc?]_$, @6*j@`jQLvwwygk;cQd2[YӦoo^Z ^jaa[p, (  4`.W2c`O#P_[09b(vX̜KA̟bp'{lbCYK;0so9kf~ nUlll~Z*zһwe hmyM6E-;DE/ױie/;deozztiGYYI=R gc>V}aQ,bē!\c& 3B@;0@b<I(@(P6 Y7 AǑ ~I;DH8]u &9(R<ϛ/ѶS;Ɠ(;d {xገ+D?3EsLVh٩-ɯrn?Oj;-,ڤqLltJko8A2N_.k}܂!.@|oboCP0 _JG/W8v_~z+%vlJ P㉮0{: 5Xf&ײqJC}2ЬӒ]s~.O=#L.\_Po>{fW+;KoYp%tnOg$(|lV>e`|Ƭ5L5f" ґϊ7Oh"E/0FP8Ƃc#͏)@O^h/\x9gVZ;r,OWUZHVT?C|FE?hoR`#ѻ}yF݌Rc'>C# bL@lT[(К?碀M\FG dT͠3!>0kG>q+̈́"֘`-P:1Rc>J+KS}~EQ`g~=2jpLG~ۈGc|/1bǓB bW$DQY-dtޫ@ _N[gd)X02^G+Wp5?˚:}&PYX@GTޣӪ-?wd ]23(MMB';3l6ݹWel 8hz5dfH[* ч!<"l;Hak6rɴ4 ڑS2g8?<iZگ/* XܟQ9aL-4ݽ^F!漲,XNwqTUo!Qncʹjv^&3"KM\5@V.gI󆄆P Ō3 hM}~;Z#jhՇVEtД7SkkaC lnF|3Z' w>%CrʡOtxim NT5ᰰfUݿ\n8!߉M!mVZM{ \T>4?Q⩤I7(v[eބLAvR@r2,2_}nyV,e#qe}(d*ђYЅk[ ac;MF\ƏSŨ2fGknXϻ8q"*s=k/wsn. !BnTjg4{(]X ۬z2tfK߼NP}ks̜p]hRn^j3e:~;/mu@Pt3L`Z|r,|da( 㿻[5GJ=1^ @\$88N##YX Nz}1 9MtKF_+~.?5r0C>re*5nde&***$gSlW,]hCoOVY@rW3P7LOLa n.tN6r in?X;?tSہ,<Mv.O'w'7}l^ڲIZWM6QMVN^ yC(l9u< X(O{,!c.ƞu IR7:u EEELQc| Ұo )طmYwΟ7%iᶁsê`P qZ*:=Abo,_R9qApGXG06Z!D0:By8W˕D:sF'+ wo ϊp|o 'a,ٴx0y➸qEC ca7~П=5Զ_(Ge1XQ#5;7;7>TGRTdKP<@ 5eBF!)6@IDAT\, f1BY%@ki=keK4_8sCmJ9sR344~sKN/kpF&Λy:΃lYǢݐ=.'K HbLu:w} /PʇZ+ ( H ^o]?" ̱T<8".Wgז7^~WeMs_#$H)LA,%a?vP-9|Vta@p9qX& H% ;ߜjl}6L' !|ˇ_<3,^Tp6~Z]=_d pmB$]<8Tn2fz=| ON&?JHl"EE|_&_wQ95 VqENwo]ֲʪxvzŸC/2}-| ,>EB`@n&(Ra\l"k(ybaσ(p5Dl hPD1s nӞÆ}ekg t(_VТG!<g4$Dfx|LHѢABBI-Q^[ʯlԊ4G}so'WTy$GQ(Vy.fMWކ-awaW1VG=*ih⽺Nɣ Av6Cv, s<4m\h~#}:F&NgFە>1!1}pVqEYis& M;0O hCN4Il ƃi>jܠ1 @^VrZqe?zs@gZx<忽;(1>i4%Ձȹl巯_ ˗WS]|8TGc'_p}YwaMWކ+AHl—l+ \7AvBi,= :6\޶?ҺTgT07rGP8hF`Q_WYƃ^{ "St"@04'f%șBC[7;K'\sp1%(% &=P(1Ae'?>J~tɍQ_>*,}9Dq{iH)(в%EK|Z VZQ}z VJ_VZnYL]6 W1#=[*'7lBUg8s#tH&{q^ Nli^=ɟ`(ǁ_o?;gʵ}bi`p@Dž/|# oaQo*cO8$xQ4 QoLcBBN#MISYЋl u[ K NGNfXk,}E;lݴo$Ap4pڴO&Wf+/ B!޵xmH0tz#Jw7fMrASΫLr]1$g3*+Gy`ybiY,5{ٻc!dLI;n_Gws8Qb _cCfOэ|w:5;ޅ"'Aq>+Ф޸ 9r?zu^>4o⹶3B?+Du fBFߣ'NiǙ>4=&\`!$tTt cgWp[א9 py T'؏|GuʃkxૼIKKf-YjH(%1/+4KW :2mYeuD7 fL&G @-.+\B #M[%3" a| 50;5i)P}@ 랻#ѠKo2^q}zZ^<̐Â5הk~%v.tȑQt?osUxEG VҎvLƣ E !ь6 c`3C]OS3OE h~T7؏ /U-!(OL͍ޘ`3MN.uu??kGƗ_ #@eu<ݣąU'#P K|fi(z`S| >D@.lM4ݸO< ,zպet>کZmsHgFĪ~`}OunA6)QВfy@Wu1:<g~E pG2MIG0p#k#XQA4߽rlsՅ@/9@O!0oa[p<#]W/9^Ko9#wr8Q[3Vr0Lz>| f*Oq N԰E6ц36 w֕^NDt#.?N=G#VW0G ݆&;wqŸx_( v'8A#0AJf]Q[»zfX{ˮ]@Gr8F`Q__JU+]c5la|{+7a;u]Oˏ6CoAM@G!p8jE\[x5j=>487z{{J,+fD(QZY|t0`h}䓸AnXϞ\_6A(>g {WVL(O" (VQFy\iMPwM9l~boqbf h{d3C \<_G  ǎv;Lf:u&%Ѿ;p"9IA3?5wN~qɿjŴfϪ(9A ##PxJNVcWypkߣQ^XNxiAbN^>sb[ @Uγr#\vy܇ 3nި Uy 2BfG6eqpA+T7Zmqէ7~oΖqn|`3h\ h1~#II9p p;kX x\:=; vZqP6H8j:q't6|#R) N G#AT8R6X`q@.S&>PXr Q+ՀpO8jУ\r#JT5QQLHG .\O6I 8|.ֈ:.|D3u#(h7M~!G@pCe)pځ `@[9"0߅fWOA{( qLg65Sm%:6kz-0@̬+(i8s>b\9WCyu Y8?A˅ΰJp0Rp?A#O %.4V}4MrNks?T=8? ND%\bDs8r/vg+|#nDGQ#E W [pXO>`rP-}xL*{K#p#KHL8ջf p/}@L (FG Y_Yf4#9,9ǐX-Ym >5qSg?EMnY Ky>.;1 xˉ-JMO?|*78щ1a'\=:>[LKi$驉IOqpWך:Ui2gJo+>7#.1A$iU:h'M2`̐M Gcrim]Y =2rzxO@j] F׿{a&!5ן:Ηw/ߠ";oAFKSmIdᒹ|y6>'@!-‡|ƠMt U; UH4ծΎZ>%lc@?{yCz=BGz$ H׿ӅżhmS?r% 4MyNY\ pӧg~}ͽK/AFx_Ji.ZtQ;T?ZVYzF)H"j>sr.;׼4 At' 0nH/ JJu'7q~q?&Z FHjoh@=W&x&„6Fe|#.x+xUFqLG%G @"fCss.h IqչA-ⶦq'T_$&>qm.H;r[KaABx UHSuCL(fb&nr/jrџ`zbH3$ p .KlP,:G zN{ʮ-G#A@fWK.Mɔ*Lv$^TxyEz)?ߺH*}G&XP;7"t{knFL􌍁^ZhzkP饳GSm  a+mj~,^"O!#.~pѼNh{jliXfeNx7O})TB"<"^;Y|ɽ)A'G%Ԩfj&={wsyKE,M xB$s[b9ydRZVE,'PZQ^40XZ74qMa'b/ %.4Utԟl_r[yqAfM`V><{s&WG# j>ւFS@yK*n}JQ1LAշXu<%̤IE v%bf\ز Ovټ^7ͽ5VH򏉾`umy.[>-B5͙3gp8A!G'hĐFѰl! } W䑲Z(-FNOյfXj4X| {h?GZ iIV>$xajHR[BBW#NҰza :G(R,JVV}姖~#vDӒV &]䩷hİ9H#P5i>h`D?-l(|n7[A@K΄Nʭbn}7yg^uʄ[/k8;Y)I0>p8エSrtNBNz \tP?Tޗ5y$Vn>+૟= )kz}zuN@'y^FN^K턉#K˟ >'FaK.'&kW/ @+٦0ψ_L,s9wLYmbyN'tJ޶wl&nqZU)35.?&b[$'}{ +4fmԗ<_JKϿr9wS*q2/kb#3 +eXGad9)L-I?o>lל?ˇz |zﮖtϾ2W~Upz8o@i$s%ka$ *Frۮ2񓝞 GK]`ܐ^?j@ SG$~,I-V-y%c׵G}3+ۅ@rp$‡{CپQ \-{G/&Q6WVD|Oj@k=|8H} Qח]hDǏg@FL7{f](ElWPnD?+sF c=AHҒaƯ΁XycB4RT>h?hfIذjJR_k6f3IkqAؼ簤xߟk&ç?n0kv-BIFJ"\v3'AϮ9pŤQҫ/ۯ</4,GsGw⨦3? Z[icC .?Dz]|oLcCnd41~ztAO!{lJ0.w]'Lp |)%څ^:HuG~|{z/>z!IZccox.V5^~y|x, zPJm)Yyƺ>up6 Qn~&0n+Fxڷ^׶K\H&vi3lSfC.<4$ӿ)eեI8rɄaR{1RRE|[i^dI<=U}QK\wIs '+!9$i/={Yi G9m-*qX6j<\}lj94jҗ$E>閏pFCx#LO?ˌvUjI6:1.ǁ5}kg6DdH{0f`O޳K肼j7 3VQSG *¸n?Q>I2"!` .XڴQ艃+ZKGq@dFi}:vHI!%ʇL :zI !f":ьbxkǍ80*-+6IBs(ҝW+m^LzƋkNA.tyVhIΣyИ=3Y&!\=FM(``: @ ]ҤF jW}מ'5%jvnnloGmiNn4f3SWnM\JͽW޾[xɆV$Yu/S /0dIRf w9FM74PI&]O_s|o%LsܼiϾy b%Rρ2àGggyỦ4 ~Ia-QJNE$i0K'G ,񇎟B D8c+j\H m 9G- "wxׂiK=ZvAD7J~!MEgnڜhzvifaߡvi/ܪSFtLZ{ P$`@??{KN7p&~Pˍ]Ɏ9ގ-cr4_qЇ5;c>F.ZtQ;T? nȔ/ju=|c#)_@P(Ng #M ή6G\gh/f?Z^t̾2vɖ>4ؾ3;L` cỎ{nTLzt!ћ? ,wޒRIٸhtԪAJfd4Z;$e$@Po"raP\m.m GwK۷ܪ -f-5?ZOo>n?푮}ov;Kwq8aC@MGHYI:KKsASy@<m_ j4R&W~%DMUr4X 6R)pU!=ٱ{b|%#+/;֢Km`4s9iy&[ZY&D3D'icXj<ԲP"s|^lv_|00hCS>&ZއACBRBDk"OpN9ͽ:/)7KKoخB;ىu6`!oL'9A4 0ЪW"4m$_$Y=ȖdZ]p)j\*I)1 Hp?1wY뤥I"s yiB?!}r*A ͤnwn^#m}៴&?S+DA2Zz=:Svd&mov v?趱żU[cWs_AZY2 /Ȅ&RdC$Գ9V0-I{Rh_Ig84AҤ,߸ Y/Kin.;PKJ}wgy?Q1@g E\@KA#>K5;]q#_|-WoѠ`/5 2ǀ| h(hK2wEsk?UpN+ΐ:9w4L=7LE_ 'LԳcfDq5JN3͜I3x׫=O侁5S9o%sA⛄@n%|3L( wqhkNQ~dA&$A+]Q YWOBB%*fxS~\ j+E{K~Y8iBmK.NZŎhM}x]~0LAʓVbl<Rg_RMFs#Jq-@h'2)#÷N[±wRwxoֲpRr[`dz5/Z u 9/>8 M6]@PncO >$v |A} ;]Zv >BM4GvdΤtMl K"f?i`$'`j>ci8%Fi6w'+PL; lw$ضkw^뽘O)*G/"oJb yBo;0m*\xF5TY</nH~8ʤhǽ 6Z&UH?_Bc?m# >b^9yud~/~&;ə =g$ w$Ǒkn.GQsk:&(& F+j=˹ c:?pTX8Qe>asҺz&ӿp&$[4潦ܽY|AooP<@B5͙71BxpCqHyPLUzesCI|y^m#`%^31&l xUsj84Oξ “7!G#~+D蠐<~-\F=* J0h1G@-l?zGèC 6Xm}6k,^bzX7;Q79%$!ac.~{|OL޻#3]op#$L9aC( 5D1+Ԕh՞38 [׳|hp;|BQ;bqK}(L`|-AZM y9-IX3L\VT.4J#Nu~YeYU5~!G#>F.THGaCA) 7.垃ǡb/9 t= =\*9 Hsͪ0u]aF3Qipg*7;YGqZ~5D}k@.RL lw^r1"p/ v&W큙 ~}Ɓ00aoӶ"H@tp4/앂3?욃i#W9~V6ʿspz(V/-H3 ?}f* `zUgDnv2شVo+~,X nWw0w (Ͻde`uRp,rl-: ck{Kvo;ݴm}( JאrP8%cن}L3?M2wHD?_ _4& ]rFFԌrٍolhFqY}xlmنLVZ Vጧ~q~}xAo1 rBՋpc:w&|Z yŤR@jP:JmE#ْlY-x&Lb-aFxgO$s~ŗyP1/Zt^rKWe3CcV{;1,pb .|@)\NцY*GNJ&Mt r ըVX)@,J59q:]hyGrQ$YK~(PHQ_)&v`t2RҳGऔtPZd]?G@YZ!hrgۇ/YI:Xj/b61M/)ͯM"Jon b(>pVo>ZOo>n?r]l.ov;K#~@r.|7>=%Gs9A@?y*.)P`!ߌA=Sfdv~wb]$Ɛ] ?/3ƁGiNj)lw8DZt̠;'+9ԛ%Fn';N#.ӜT R4ȧXsnۼT2k`RTT{ *"<`D ^js l8:OVmU2&_|D݉Snt:|Ǡ1øI[Y@ 4# ܳ Nrx8!Af׸U=7h h.ffv/ţ>ُΰiWG#p#3P/{슥~A!@E`sNcN$dnܰztF#mkٳ|x6p8q> p8D`LŻ*uRSDs'ndk>ø2xCnpk}.gWyp8!~PǠ#Wc Z2ց1`D A~'``GVPxF]j5boOWF f?&)V@IDAT֥] _6NsBCjp}K|~Csu9 ը"QVl~*ъJ\.([G_tqp,b? 6 QEHt ^2c )`ዟ@Z73mD(R:%4i04.1"*V܈;.)bWAVu(S5r &jLv4V?~7?}6UdT S6%Om6$ {[ueo*uϒz(7{<3@Xqo i, BIDn3 ӄԈߓ`Ŗp?bnZqG)@yG@,#QoQcio@Uו"ȯ;Fr 96JRzBqfl V+\6A҆G#?Gk$- #g'C@B(ZjCb\gKuk#kWs,q T)x-4G': QRZu0)ykQkjp)'9| ]气eO+Zݻv/] }tVHH(m-B9\&XCy=YH7M WsxqӚ`;`~P~DyU7 h~q)|rGi4ZJ0Jo9 5}HI]`u#i4$z>q#.ܬT0YX8 6I FG&mUf?:x{Sfm %O]vX M9ˡeGXTdkX۔ړ? X{T`a6mD8kCO߀'sd:OMR6g@U4֠' 擰jKuɽ<Fz?$%vM;xG#^m&fH5NǗ+AbT!aʽŠ)(E7 ƙ24x 0 ?a#$ڎH'&SVTVKP8WA59u\v'7V"Hٔ| 4o yj?d w{jHeJ3,3 848syOJ,O?  < vOlhb?~kt5 ( |2RzDIwr81@CI;W?{)hC4|9@nF* =$a@I<3#; -!? :E¤1cpCvUpWCU\g4oDm@!dODG}O\۸MX_`U7G 0i#9kX#tE$x7XKVPK"Z2,>ڋ[U:UZCMwBƿZN>djE5%A.QNnꮚTۓn2RS7Wom-;AӟPS' z'"jٷ^X&Gnw ょ !B%ZvX+,񈄩UKuhpd9Tml.^%ʋV"GĈZ?f& 1{`uLC#fj4 7a(℃mʿ?5[)e{l*+FUXc:^`ΎۻeuWH*bV-MLV|u|3fAfUO6ypF Rs /jD+nH&;'.Mt}+UoV~ܸGZ(\mSVi>E'kwTm E{C-V76ՁHdOɱ勒֖6JZ˹<0|w]-{t3fƤg˗{G#*b^9%ǁ#ePosB\NWhCcPT08rI .EգGIUp1黩 J)VsR=oT]T0@21t~O:-euɉT]z\z8zr^>&x~{*:ƍ D> +~$b'Q~+!w<DwG GC͉装_ͼF+m:E(Duݤ?|ܦlOge< W?%1 <(Psݧ8#,h"fbuuj0ߋyq8E :< :;$:3$8! D1S^۠׃]zaPj1ry(*?Gt*ſ4<" S<9#hb|QuR=5ǿ˟&npB+S\O}ZMGŏ\}7q80"].ڻfC:+$:op"kМ6]UQGl+Jk )d.}̪1vN1Fہi0AT{j\|0.ƎedW?քߥբ;\{{гp2 Y G#ng|<‡fxepKu >J4NcI& SփHt*?˓oC@^\`)7 ?6T{j+QaO}B[yFCjboH1KC'G#IbV f&@۴:M4u" NH7in/(9G{JIcZ|cS{c@єxMi>1+|P13|SzDMǍlbDR!ō?2Zڟ;"?mW8@[ĬApFg; #$/1^K{&ToxSr?we1i}#@ İcCYI$t_,(êBeN$à+?Uep8@L TޏEmbrH e vhRï@*I4JJ)&ſB42IO70FTK(U3p>,]nFiBooCtmҫ ;)7hRu픱0y&YtɀٷNGΏ3~`Lg%7k39Ix4vjߒgқAdoHvQw]sçSf ̹}Z_ΐh_ )Qo: Z+qy|4wF ZwS4pǰl]20~*/.=kɟ6"x+p8(G 6zf A٩!ep˷+`wLA~.P+&@K*S3-QحS&v{o#S1tncڶ(&>)EJAqIGAǽMF j#ruɵ)\2XGi?pޛ5nJg{pP7[޶AvVo'ͤ'=Zxү6غ&H^~{s6TNs5zI1zvn, 2I3-ј9fm+r? aǁ΃{=G>>u:$/Ap>T:VιW+gmQoB]FfZԆh@ y:_uG^G1}=w}0[}qR*"D5 h-OAK{)OF U¢6xN<[f mp Zpooo>)CzujPw?*`2H+:QGOVJ?+&D\H'?<ϷdB;|mW:/kQC+N_叄C_t-uoKY-]~w#_VF}* q,l-/4XOQ+ev;a~3ͺ#D>6X52PȹjȿsT^ԥ}DL J 6 4WZQ Yiɠ;<~wQTG7}HXՊu(wՃU%;Or٘vrB -]TUHD =5" +KV`A FkꁤYԨ >\xc6xxtwO2-dS5%rsb0c_q ^VN삌؟zc |#D)\ Yz$tt9iPbCcgeM/ )I&LQegv+Mm(HM 3ujFgU>͝5 l<{%EomOz8x> f]5TC4T4%Q \8Z+B(vipQpX")1O7p7 $ ?췽0}%u;G ޯwQ7AɎ~_ö΃2?'dtQyl国?m(xPXJФ鏝jph фD}u%%HB 24 4%+%ÖR \zZړ;mTluD)߬ >]'ʫax_`^jr HzYR|:%)W$rINRAV:XektzI܊lʋG@q*&02sNU_mx}F`ϝPxI)En̺z=ƕB7`29!@ 5&W&CE6H#VȢ\&- ɍB}stWCfoM:oUsϨ) r59S1ix'I IH:G;=fcd ^'_8@, &B̓jf ~ХgAqPs$ddO4+b/m'plř$i~:z1X&%*k %F \F_r|q䧢:D|h/$B527"ා3@CN8HN]R`;~DϬ"ר6r6''sJu=N\~>Qw6vLpJW$k@N biG=7NFHVnʋeN`?ǿ{p8(E \ _GI(s`ҌDH~~ۊ7bș(5&-jDmN-G@Bvk ~?H-dtYҚc[=J.0DVm'j 'RAqۙg% 4 @yW^1-]8s XjA|7GhZuFo=CZ'az#wPW[ઁh_[UNX Af(w~MۓV110;"t*7qd[+ 1j$a˫r\䝤].7|#D5\ю+y6_,oVmox4_KԞ/ҾR_kG>bkz\ESMF{i~g`c$D=5Џ'G#KĴQ~Wl>ڃ)dyD+h?Vif1i$6:LŒa ~~N[8@$Y᣹hGAXQmZ4Vő?0ntlDt*4m4)-8P$ڔ%SG˞'ӟFGqQ4v3#(@R4z9*%Sdh{IП]a@t*{)8S*^?qEA U{" WP?v]Tyq8F b?N\MĦoRcmDcQ糧tF\U N%(~`uJ'ZA'qzʟ!t`SN}1NUVM)ݞT,{V3Yw6Ӫ}D'OUY<(@"‡CkPPd#FJ$j h#d&,  \FrŪng]T`qϵNQ,GY7Du .xv*oSJ'V/TTٳj0X,%Un>QT{J&9Q@!Hx?^נ8S ̈́WYiD?㥽=Gye@A u DgT࿽xFN zumA~Ghپ} Gn4etM)՞.OuwEDٳ+ӟj<]V3⚖dD51+|Pp 7YGRDRDtJe&9jKhV둓7^ש!Y4g* ^9IA)X_xя(|(n_! :eAnVmJ$/w֟!j-}sZNVkAsFEQ{q8@L VY*?=t0ApêcՒHi=ID3^K+{a0Wj?PYcFߏʚ0LR̽:eBZ?Ј4DO_ˀ)m;"a]! l TapCOn 6ړQL{/wy׬HٷT1TVMòWϷ'z.Nlwo~#(@ ?:a4HtPeʂS±-HӀ<4B^'>Mr |l\p8PY]l!yi q:O'Ht@U!3мAnS$tP,?o$:q$L6yTTw%4srEiAIn*me} 1y`wt+=DΩ(ys8Z*|5X~ԍ39 MF#LN `~q L*tȬC<z<]qYD%IkA|1著 8KX Wd'Cd`>'͌ U@paX't"!:NIJuLeڶ NӢD"ZS`pđ]*=C4vMбcQXuT"` ˆ~@l6Um*%bmJޞOݿ?q, X"Vן(>Ų̷'  6nO]n5y?̝G#85 ԉIpi &^DYm`٠:n46Ä#?A^9/{,U2HtDt3`j=:aoY8+7 t2W!G=\lFZj2:&Alw\SdڪhSHku ʎZ؛0 jtaǏfiS$tlG5ۊAmbp^:}kI Ίꈴ)y{ Nsߟ8}(}{ןƾ,ϯd:Lz|<*ʁP8Ov{]G@B@M‡H6:M0؁CMOrR vTG;p킮LjB)dW}u%ЧiǃӥUȹOkj#)N)j.6U3 :y:[Uv1dm*ϻI`(f4L+uBa@~N#Ar˜Ei؟&JqbCL>|`^ ~)y>h#t+tF{zu0YЀZKn}qt3SiF-$ F2OJ44PHOKVփ\;:V[dǍ[ŋHH@x׵\ֿ',^*㏔Uk3 fˆ#>㯭-#r~Nl m j>H!]ys4gg@0HHBÐS-:{Gs`'\f\qB;7qӡЫkNslrBIqR@[gb@E`khSQ4)+rj!tTɼj$Q@S |?{E{SRPx?ˑ; $UO=;{*z*R H =̼dfS$ɲ̼3>N]'G+E,w0\S/?3 A8OUto -4@"@8)E/EuckmWhHςl ^_QDGQYÛ'c4܇N4l*gǖ 6[<Ċxx7nY`P18u-?|Z_ѯG'Ubެxb'u$ُT=!c ,.tlmfF?KvA.&& xDzO.4_vkOO+0M̩4VSecBӦ."pR>~eOAg=%_ cj1WUX(STۣs[R:z:.w)R*vu}c[[p+DY,Zi˽W㙢tw+N:O si`*iYxNG_:ؿ%;Oo}oGc V<}ew-ZnO»@ QZ~qe7%Ӯ՟.=K ~U:+aƖ{fVlkղ}X|q#»9+!Z[;;('8 p+.8E E]R}D_z:q(ڹظ݅$^=[?/*T#³D\/?|g!|3Eix8S%dܔ}@ۺꛋUEG9)}\]bYb<ES/?3l( ]<ǃy/+?הޥ\p A$no/76Fz96߳ԯгS/qnub+O0yZhqgY8O{!9`0IV΃]NpX6l#xfסu3j3?ڞܶ<;[0';z:9VߧtyU+^-v Ц.Ńc Wö|KK~euKLB/DS 1Qy8]0ChXzw}Xa̾Z82<_+* Q21>Vq'{Ee)(%bϽoVog]2\GTMП)ʫZSTK0˾UpT6 )/ŻUH'?XϏy] U%_%P/ym/ymӳS)(靐eگ7EG(?y7w)@ Mqw&U1CxL&̊/c:^kw焘Q11%(p(݅Ro[cCO0<yGmݰ~á}_>=Nlb_9C9|ЌUmz:j2A⫏ĢNҭwޱͣbcbI4KLMLqSU| (9t$hc`ɍl޻ẅnѮS!ԃW$qѮqkNIx3RlBܡCO3`)m۵#5RBWmO/^ݏoGaE q*Kl)fE-|Ͷ5:rp jWsvC{Vl_Z;~^ΎSzpk|?{P X`%V Pw|pĽ*>gŃܘ~ Hc)$8?p F/X`?|of շ:O?H&K st 3?S{d݋4VRHحGSZnsMҭR@A^[曅 %{\@BCw)eGA Ԛ@8+ː_r +r+#9BOv}a+<>VNYaEW^@9e-2{/)5}r!%teX'MT߽۶lȑ^$8S=K rfvΌ;8 SRm_X.Ͷ ZKI~':ܸai7ʇ֐pf+|pCϱf}ᫀp#$y_yogd Ihu aXqr2|τ1c\Xv,?c̆?K:tp]tUgtw|xۇ޽e:hҩ{%OkּQ1g4O γiEOy vض_3f.&PwiC @}NJ JĽ0lf[;xޗ5/Oȃk& {j&%9yxə}Mú3E>3)'wrrG~QEsD Mׄ5NvС܈?׳t{۶gl]wM[(qבa ⒹnfcF#`?7={ۯD EXR>l_lţΟ5lgyyO*5Ӧ̚u)7)K[$rzO]˺3p MW?Cmf5>KiZHgѧ#6=}c)҅O〟nVx@lOJH E|8(Ѹcl~R}[ou99g(S]Bð.9"gTW;s݄W eN"YF6 V>Z1~1OSZߟ0;x [%"[*ⳘK~A@@@@$(KZo˶Glf"~\BK13<奫gz:^q"Aj5Bm4dk; F۝Ѳq#Gj6eƌ,Kv&:i;3)H^{=fw'FA GY{ ^eYof ;a   P%(Ub'TMlH{t7ĬYJ,qi!~ )!ϫUpPȧY)x*QLE/ ˈNRR$ h}%e!5 (K% iQa)M(KJMˊ%t._X #jDaX*ҥKsZV/~ )&ٲZط8!(!#GoR6KKô\gXTjJ )dY8jWHhCJARh-[5@`=ϫ=uRfλs>-*ݕאBJ *!ռwIM4\W)\@@- Y~hC{bƋ} iu,+t8e o78X#(@x)@@@@@ @*FA@@@@ <@z     |D|     |G=@ xP>"Q@P>£ D<(_( (Q@@@@"b@@@@ƒH    OGW1     AGx'# #<R@U@xp#Ř)!eW%UG)DBMY~9)W'~J\d6yo5}I)D4(B{}*Htݺ&nXF)[qFt%[:\R|G*rjGFRc,%}[Qu    B)+dڭ^̝IcmGz5w2 WW)vDm NjM-ߖm!|=# D&{#\n=<銚Ӣ8up #d9΋l^t;>ؽiSF    C5v u=*!c4%Fs8F{ T(QUeHF,4: *P8D{DNGXO^Q@#.:    L \>7(]jQ'Q/rcA8 DJol* J;SJي=*앦cq@ @@@@BI {WV[H'+8'ɍZa[?ca[Q<H&כoX,|l    M mVڄeM$でDǛjbOɯ.(]~׿k p     6HV>sy@IDATHxk^v}O}Q3BbxѼ8%_f3> PH@@@@%3-@T Ł>Ƒk[g>&(ٿ[!N׫P!}hҨGPx *߭bLV    4HV>*5 (,֜íK42MOO^@|T((x8þSJʆw+,_䌕Ŏ2e,),mB0HW>bbm߸' ʲ;6+ @@@@@ "Ym\÷ÛZkaX6(9kI@۷a)3@Dh*7x۾y˼m ]宸t^eʶ}$kc@$+ŀܐ/m7K-<24gT2Y⃋|Ab%l     P[|0 ؊G1Wƴ W, ˴fcYic |P=0`F|mdя~Y=.SiYe놵o|ߕlA@@@@ B#QٔiM'DٲwmxM[=rk+F7Wn_u49<Ԋ-[֭ykop+z,/򇍅dhz:Pշz}8  8K_p!ևc+opq-~nfzv`J )\᰸|Ҭӹg|!KLy/l0O "p    C`̔)-z_,|>ϰ A>„(=PpgrWliƳ/>mݺ&J4D{ T'OFY:+֜$34Js]ŷs?_Pwr6g,/    xO="( QK!Wg^V_B V:xOI9mhK-6=7&>eTLL3^=)q&li);m_֯\aٲF.m|k< =@HJML vzZJPoˇW%%LNI 3# [n2aՏжYa፝ /اs}| 7J:XVB` p    ORn\+NN/H)Q$wfl ]dns- gKÁm?ys`>)U7>k'rA@@@@}|nC(nm`E[?RZKA0clNGҶlpiު}n2Ra[L24o{c?F-@P+K=M?6f -ah`'<\>հȏVv_r{9c=+Cx?|Ȗ{+Hp     (#ؠ RfMJ}&XU`J1MJrOaQŠu:;m w} >[ >Qq     G;ǃZQ&SR]DR']Bٴ$WW)\j̜0LcoW}ѥmx />ʇm=+fD!4 Gr&nA9JOl mT7#蝞wF}}*2ahXldnѨ_`}_Ǿ{V |/@@@@5PiRGjxPj>˓˃5ÿ`Yj?U:/xN642/nu* yU|%4M+W2 ?#HplΜ4ayeC7|= 0m1蓽YO    x~#F     L!A0#(@@@@@L@@@@@|hP>32{@@@@@A@h䭷B`wDܸ'ÅP    $ k0}Y|B5YriSNVR8&&Y^x9)Y1] YB%;Nx?;_JDŽ R Ep\@'{ r׆VM%JQΎL'gU7JGJS^t7L*jKļ%|`Reʨ=bwJ֐*ijb    (sޤu Ly98JzђuPu'R_|@sȄH+F^ xm(~qJy ~8zQODp{fx Qr=UØx)u֓Y4QXW.-WqinLv;G#jZ `     #HF7iYp +8|))o9e%4cSFк+ע4*9ST%+*IK1aV|4j{A    u&j1M#S~)ACZ&7z/,T<%:N>]wJZfBkV9@@@@AG=&}n'Dr%(j쏛)wo/귐;o}t?#cv\;_>NN^t} [0Twqe!JDwܖhA{k샚yP"1rWGy}F\$vI@ƕ@@@@@ @DY@@@@@ @ʁh     I|DRm,     |q@4$P>"6QcP>¸r D(T( 1(a\9 @@@@"HM@@@@˜#e +Fl+EI{af S*]=e2J^    F@@$IC,!ky)5ė4]5щ󥔪    M*jδg8aۥu&NDDBLQŮNbgB˔ eSkВ2RT<@@@@@IWiV (:c׷zuB pH9Ŗf=4 \JIsQCR,KЅT$HrbYl?qp     C*w?<ߓ*QB.]|WrUc=u%>yu A˗ƐGB"$WSfh.+'.Ni\ 59-1"@hR]/2IX Jh*?|mmNj/,uh왳^tYC-Ħĺ|G,"sJ^B 2*a$_PDo#]wXu5ozkhICDCgI ڝeth7z,e^@@@@@œ@+Iz ڵޟIj<L?nӚ񼑆@D1=,թZ_ymPq z@"O    "O0Я-b %:m5& @ @H0rb x66ǏfH @@@@@D*͋m 6$ːլtؘ^! @ DQ:4)obv ;yї ,Sd    A&q]STB6/>dTvT2Flj<)3ԏ~/*-X(8DPJMY$@@@@‚@Y>KyMź+,Wo;/򜉚ҵ@c8 r[ko<]g֢Ob~]#NߪEbmBjbJc)_g8 D!%͍r n9I$ :.uk/hŁ>Ƒ҉1zs-{B4ľOmO7(I_d$sj+ƒ@ DXMv۶h*F[nY_Ğ9 l(ؼVDw&3<|]:&.7\ˈ    a@ "Ǎ+TfsEXL]sYx3ڧ     ^"R`IJs4e}%fv|^4yik`ŎsvK(    yLHYG!22:|k>Q{K{}xNB?@+Zm5@oui Hju,:@hiuO7הW,oD=.V9Y~u/ԟdet&GNQL6hfd.dU@@@@Œ@+6QQ3Ή%G^9򶋶{L;w GNLKYO f3N' YqNuueT_54 ~i!EO!CGI3,IaA@@@@X!d-NsRY|hWFcB Cw|Wf $I.*i[Э*$v%zX~XTF}5)JZ8<ǀ\* 4)O7/'ta'Rړ&`Q@-Ri?FǨϦJK+,\Q(Ytdh[49Ŏ7Zqk[q;68{d[A@@@@%dט)L #x[Y=t8|%d  EXWku%4]ԎBr@zZ25ja*ԅNҪ>VW~y#,e]! \%5\9^ ,<$\ի|$ely2Hv% ]Ȼ%; ]Nnv}"k1DZ[@BFѕ\qe{j s*.lj, j#Y.oJH#]/BF# NNtGt)){@UGi[kVz|5+e:ͲVk).[kF+ ,ʡEJ0z>ghBcI e+;{r|Hd{4+=mz`1&T+qw^Hmīɱn$zqb#@*ˮx?:Yո FN:ɰĄe{EH}YCMRsO*Sb/ڦWI)SggYS{zFг5jN;mu/2Yz#=wJO]ὣF!ۭl#}&5kKnZpd`cR1DɩG0hp|(NJek,ZA e\7Ѯk_D2kLJF hȣٞw%3r'lx.+My6cXRܤ '% V=`zTk#&^3m]1|wO#B%MMQS zœB^F5#sgq$_HO'(Rҋ51ӝ?'E5 KrBc)pأ9*5E Taz֤zVzH VvK 2zaށt L`#3:m)e>IהTf׎&ݽO=Ci$U°&g )P7zfȑJ$&ܕ⯒Rx~pEhNI]& ˚phN6|ò^t96o;QHtăzeitT*S>1/78|rk-P:B{S6n;l'f̢sǰz'=@"Vu˒Ns.[=K*rLk Xכ7sR7;s(q[_p3\5) 8RELMZT-v/4\y3V-4TMi*ؔ.x,%hDA'jt\Jj9]tbSWw:]e=u w/pQ;g1NcYʫ$EiNaV<\GQ:tK%F\V&8vn"W<) Qo.ݐ?]]㉣ԛ ^Fu=z@ViMREc/epƥyd,vVՃKc+ '>:W \N~)ד³&)-NMI/<延qRRvQcSëp)S t%Y5银ϡG>HVz>^S-9"eߺfI,-C̽Z%Qxe=s} 2%iʌ)-˹[TʎӶt8_+?90.NjN@V=C'`5/( *R3>sP>Dv-ѥE̛cǧQ=jH*C2{ˣx:J|O0VȨ‰\BC&^2Kae5Rc-ti=Dͣ!OpiKh iBey`Уɋi[KǛ8IBmTsK<vm2{R}^R"}8Q1ϭzy= y }Ofy7&x VpZ~Wq^w7)fi-w):؁HI6gN^;k&-y'M!<5)v{d8?` q;ɪ"eCTXntTH:5H(MX^4}: NgW8}r}qs}v5C\^s(62 ^U*&=?c=^SU8)bxli/?")*W[kLHYgݕ}L!KpAC}s\{ Ƹ70+%SRP^3״ơT}hWyA|y*ᨬ]IybnkQտ#Pmyj' ^UԫJ٤"K=˝ʽ+*J=~TK:R6ٟ;㑉H(S>{Ёi=1M=g5r1zXOШZߗCu>".$Rcw=VQ2Ę7:agO%jEdŹ'y;U?=Gc?bFJ!Ff>ZaG QΊYH뇐\WRۼ5~e=-ys2dNl!.t׿cvwS~w$7ڰ  ФT ekIȉ0yit8}g^ a*џQ{kk*4Zgw\ g=-"S&YUVS k7 )v)Iɐ K;ƙB ݴt*7ÒaPw )WG];JWU3 aG&h4˞U\]<ɹĠz8\_gyǓ:J69iCynFdɔme;Y9P3'M\\]GR1c~zܸB>\Ԗ=$,S\rRnXw4"-*VKb<"Lw#ov*)<;Zmher,D3:u~U_<~R,b/g1:if3;Ra;*5cB#&3_-Ũ`pG9oJ|#}iYV8-|܌[Zm,{oI7wӟK~2R{ Yzbv?]YHC w{(˘D T hu%cS@?S5>2Z:^U8ZLe)URcmL[ٰDXjaҜF}iUsi<9RحFԀJCޞCEy.WS?Hԫ:{UixMWP116,k(5Xjx2s-ߘI?U$Z3V;W7B4Ǥ-.PYXwOFn \f=O"vnlœ}k{we*f Sm"<TEY+ҡF{H70^̌TCTR<$/2:GUxԋc͇P=TizcӐEWWýQīIP{&8ZOOcW7^UIS{H^`ʿ~0;X;}?](*'NWaC%J<٪Q_Ń%x0 8`x+Y6}Lqu/;uqVe0{i'M=55B{Z]kU8|=IN+NWMhuj|xe)5$   4ܲ+Ms2 qVƠO$n8|}\EV%>M)3֏pg2@   M@ ,5d@ <,R#9 mRSsC))7Ui4= |0VZ3P᥏_Yqc8h֎-BVX)v\,2H@@&S>5OMXCO{Y(uS$+IO EIH8=BwmIp>TLUpIت^S-8w?}'xW $@Ʋ0   d+f)%<({)'hh}nPC" peN[}v0E&>x4Õ5ʜ4[;NfZݶY⽇PB~?;=u^2Z?_wKJRshN4,E{=c-5i֤Tk3kLKKʌ".Ev Jr8g'ZUё΋N`;'; 7w(K\N+G/郜zRj Z| 3)g?Wq8y3.Ϥ>g(6ǤM9Ts"1e뚆n=DAz mNxG?)-~cmwtra"3=S&9iG72o kI?1|rV R^Qy|a!O|wIJtaGǫ8ܣoYjjtGR~49)y՛N> h#O}lָHJKϠNIAY=ۘFGsϤt`+S' /7;=J_ `gS'; O)FPzFJTMJf+4p   G#Р Ӭ{m'YUY:TQQjG(awY{qu{|+4.'*  5hGg-TVh7 Oџ^,HsH0Rϡvf'>4n; ˦ nNlw; VBrCǐ&_hUh\jEީnINIz.өG޺h`Fir㥺t2?ɫKtaNit'&wihc4/I5}zԭv9oz֍;aUkNZn*]4d'I4MYY@-9)7Mxb1Fo/Ycc<Z+LIzYh|tF=žr%$_Z_0'n!c/E^M ]fǸ3uwMRw",kRW9\ 1)мx^S |jJEܔ؏=7QU($Sϸ,c*E@@f nO|tZ>BjqtcIC(xt0=U'ŭ~x#ߥ?!#<wOi༮#4)rXr M^=_wDAJE+Mh))㜚;UuF8R?_()é_q^Za{IcOë/B <4CL ` |dXB=}uϊkm'8D{M*{xh ?PE*ۑS'_NwW#n 9M`jz*GBx*A!-IDATr4Ye.MLt}{%eXze^+5O{߸84Bɷ6yAc[K]PQ"堏큓߹_\'N<@&=<ԏ=n752Ith˸C4( MȏxyZT1?" IÒ6RNv U_n(qz{ћct:pDGM{Oܵ~| ՚95dMȡt NW߄հ XoyrO8oͶ&_FmEUq蠟̢]jҗE김{PP= .U3ct1u>i[B;j%E:/X\3E{m±@NI9XpTgww+gnn9IHH 49u!%#%׋uS4*:9>,^t֬LhY ?JMO">DmRaOnn#5* Ejh-q/Wtqhw^"96;GL~BNA{_Y5,O&G&;gS $@$@$@$@$8|$9z%s<{f>:ۻ{ []*1UCclvwb3 ёqĜ;Ǥ`h,K- CGͱw! W |er9肐r.YnL@aHHHKƾՃ(*<}_# kdhMzUQ1Igm 逶xIUHh1m_VPщ-bJGh{[_#=Tx_nJBkY8eKVzUV,@$@$@$@$OIaD|ئc,wr\ㅩ 7.o*-nJ%[؛oY3?nuYofc7Jۏ]Aރ:.|F2 m8iNH~xgE_M$@$@$@$mv5yoMejKKϺ&'L IJz_ eI̝)grí젂8R*F;O{nURP/CQXo/y)E 'ﻲ6W;S0\Uc-"mCH!t¼pF!|G!/vPwQ[N&5@ y=)ԋ3[>?T9iKiރ1Қޡ*!.O9Jts9Ă ̅yxKi.=1YHq%##ƔZwO^߲*ga*MAî<}Jw/PK3zfm9x\W;^qr45ȱ#: ˃('~f9GG7iW4߃;V+\]_ 1U) Y>seB6,r*\QTzӨg ~uex4esپ%X?rzv>FUוKpKfffIᔺ8g't8Xh/ʶo!g6Ì2e\20plçY:¸AH1<-w.B cOg|_ +(vBzNBܕ+Ptt߆-;eJ;}mڦŒàt& -s(WVRZ1Qeo 6(= {X%)BMkp GZ!"h7#"C]oDk/N3i3` )eU;lp1^cxż:kzIa,"S{\Np {[N>@O%sei#0qfB^Dx,K0XT^bYʀł5!*fd{b@zM_T7yļ{E iOE߻!2^C9nQ7llZ%dQF ? r^I=:W=ia^c  f3 j' y06;ӼN}f[#ӣУJ5Qej/~(M1;)?q+ބX>dwH/wYu5ԥJau  :&~ Ci/dS XITL27Dy)a88Mx ytS  RTUWa50ܧ CDR1u8Ioel*εN|u,zLّD@Y NA|)mUzU{$GT?$=鷕Y~/ 25=^] krC/ $cR~!Ǣ뾴V{.C$@$@mOMf>-؃?m PXj6+G˯]/"NsLuBt1rjΥRqkRuQtV[;"Fz-q= A|wnK_*mV1F1q lka^`]MA+{e(G=*Dbk7HBCcdF9 ~pk^Ek`iU{|#pCX <mûp Y_=gZ g{# &d] Ck!p~c+&Q_`ƾcvIs,г= 3T F Flo3Do-rU"my'D fzK+DU;h3?l . ȟ!@azߠ b׽c4+l2'A,PMLD&S[P1rRs3ؕsRB^#܎ݫ1P:-L_X*(uG 1Sc㥧ѰS͌/w@a zT=f8o`ɲј~2XB<ѿǓ!חGm &b ѣíxO.t5'Z^Pؿo2+/bxh_20wn9Go*3ߙKF DmӮeŇ3 [mb|B)P[ =LlL+|xѰt/c&Zg(LoA!Ӯh6^Y{p[V͙R8jVc;l5!)'LW;eGXW2-ho-ː텍iʇUHM+<x1sfGL#jح >JPUၫC?/cs ބ1x_qcOn>;(ɽkCxt+lꭦxͶ&Wf280{!߷P,G0=k\+lgsa`N8I/{{@!,t_-VqdkRiYnIa7G⣤01mqoieU=m]fYykGɅ3JZɈ=k☯A<-nOzfJlw9z=\U]W. mujr.e*%To@i7,䟢u_,\7g"NnVezƝ͍[1ի/nbWv3;R{@{2?; + 1ÆX?6#M6CkeV. 70$WO>D< yx%L7lKՖk震C"Ɨa( /9vMy]=tlJ_ \oM! 7w HDƌ+F|# H*.eX2~| D$@$@hÐŤZ[ъݧhh)M)q6HGl -i {z{0,3;j׼ }㎺ k6|W̋)Ј3Ѯ{21>5 WQ[RgZtD!jӲY jL2=bbF^W&*הgC.;T|plL~:KQu]iמFDՆˎރqeGdHH'&3'g>ifR0JI}5Zӊ^,?;(nAޘPXĦávB:zny0 ^ɱPj}RoI֤uj(D 5{MobU.Ld~ޒ 0 }ysS~E6ֆce:\WmCHt߃-ŚrHHomb| xn7e>MZM<D %#]s FOG#Pʈ#T#ʕ 3q22̌g+P!uAl*.ޤm?:Cz*=(IBMMcܮ ٢'nd#= Zh30\v20  h+'ӘѴ54*5KM{rV{ f6ZR?}k ?B4KԘW`oeP_A3O/-Ur4&513?aEyKx|;CMc&P[Tp7G$d'd:Ƅ=lh$Uݘg!t`]R݀N:\vN=tl knJp`8\vp&D].ppse3 KA?~22#IK90voI`Xs~55'QLm.`cigt>C޿%Ota/hOj}eG9#/JV?3ᲣCw'. _$@$@$hS㷿K?:X!\ :hY>d5_Nr zѨe !:[&|#fmmH/ ʬz=KÍ-X|/w&7|)Qll<3ɘ#OOcdF+Gc/HfA .lY@$@$@$@qmy:" (:ರ!CWL5N@Gl7:,\ l85"6;%:߽=pjVzEq] 2c)ѣh/̝S+=O C ̫ؕ9rdQQՕgP. pc?}־vWAz%X}#g[~X{rFE lֆ7]P .4U]Z8h:r~hA`0b> g[.S_^UXhGa4t%'<no$ Ru{VP|ƒNA 1 ?Cn*\ NOY--?r圶lBYKȢ    'vk>ZͶB)n`{(33: jexw>f\nʚ+s5;Ob|MKtVv- Tܾ0Qlֻ//N-<{.j ty {:UbED9ٶg5fW!$KnЇ3݋_wAdė.܌y M\O>m .Y||n`wDA!B%'ELb,N_uw9Gs(. @2mn|e^5/=Fӓu lG3/uʻsV-97ڶoR2W*ݏoHj _#g)æzhA#i .Lӻ DEbꧧk*Ts.Px9^a4,wN?}a|Y/? ̐7)!Us st}`QlMbdčWx*-]KI=p%֘Q` {ڝ1xFۍn=gL[<Ÿ Mr%veo,f/@|`eٍ)2$@$@$@$;;>f剝n8 0(4l( A^3.'`#&XYeD&(I#mt.M:Ln^I*SvcB?2jF32P8?H)t:V')"(@ŰrW(ʵT.=$I#i܃! PLFtYWCݳNn~CyփGc[oYvUʈ< bG2.A.b0r8"lmm8=4r!$,hHwG{YYW   @τ7W˼PC("8"l lȵPMQݛ[!~cCK` ftc9?'Nڈۥ0þP֞ mH=;2ޟ߱>& ov;պ=~ye-pT"1#XSQ9&Ϛ 7J)3Xy @اGf2Qw ;w'+0m:8jsN":TupO1 (فZFp(z?f[#ʱH=f>sjˇm˘`A,\7ax{\R5WbV횩Wcf1w:t)7.W' o5CMa-WD*x^avL5stݴ1fƨQ՘y~_j_`47G6\.8!G]!.^;2raɳXn_x&   %p@b$ǞwmTp?4*,W9g@M<> gW:]wA'rz_l4#:S+튗 L]܎z]Պ9۴8LghUŘ:pք:!tٽ90ZQ"t3-X0 N3ċu~ cPϞ Dj~ 3OBan}IHHHG`qjc4+Omb|M:nY]i1^J sJ̗4@]sjLUiԛ1iґ9lRc>ONlk즆uNLز3^h]Bx"2+_ntp W41 N7SЇu~&Mf54fg GX^"NlH㍿J1nhK i,dB~n#<^IQ 쮞nt[M-3 *N>Y,f=0ɽoYoF_Gg^y:}Xqq4}/#QzZEEzGQ|=[?S7$@$@$@$@$@#3m5Iam                                                                                                                                                   6 XRBiIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.svg0000644000175000017500000010401600000000000027337 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 17:55:59 +0000Canvas 1Layer 1Open vSwitch - High-availability with DVRNetwork Traffic Flow - North/South Scenario 2Provider network 1VLAN 101, 203.0.113.0/24Compute NodeInstanceLinux Bridgeqbr(16)(14)(15)Provider networkAggregateOVS Integration Bridgebr-int(13)(12)Self-service networkVNI 101, 192.168.1.0/24DVR internal networkDistributed Router NamespaceqrouterFloating IPNamespacefip OVS Provider Bridgebr-provider(5)(9)(10)(8)(7)(4)(3)(2)(1)(6)(11)VLAN 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-overview.graffle0000644000175000017500000001522400000000000030424 0ustar00coreycorey00000000000000]mSH< e6-IvI&;E-a YdӒeI6Cb%<>/E] J{I} {{O_x?}ֻAr{gg2wvÃG\cgǗ,qg\+\4 \l:ݬnS\}qh7d_[n'Ã{w_p2ga/L_W;v{G K~)<i~ K+'~KKsšz3lRjMgJSfJM^^x,jnW7A盻e <:f>lZ nύ?ȖGHK4B #Ŗ m(9]`PQGty?n;[:]Oc`F/_bɲ4Eir;0r#.q׮O[]Ĩ}ks,)&~GamJ)Bt"IZ!2҃ e|?qΰ&\xFW/S/kXնO:Wa?.@H'aHÂiSZ:I$!(%cDJJ,amť|ej-c[3>II ZE'WW2lZ`ALY2}}dEUi۰>]?> /$V.ak>hZOܬ"-釠0^ӗ9Ϗj,Y&ZMN˧H|m)Ұ54=yCϴI'zN̜,fH MMSoUX@ n$QӾDS=3㟩}~~`;LwyUVq{6}&O4;'Q_B;$$p0?W'g*i|rFN|v΃tfī__OҰˤ͛( MudWiqw4K^pl@w9cOq,yO^| .p]L{fr 0}ņ%i$%0 lߙd>'WY`PayՖnpʬQ NΔ@uDPc,P -ֆe\l@Vj+ĸ'S !!aڿͽ^Ϧxc'gLl62inOVpR_r:ZsQZT[w %$+$'GViru&?= J 0:L x~PY| iR+xقTKm.ɅfcVx݀jRJZ5>DɄ rT8㣻q>m[AAh.9\J(عQ3K.C(ՊIm ow߻i4Idra/|h~ăcvy|b}y~A{Q½WeRܰҮ f@5"TjUp~Kܚ [ *a70`)iJDq5 ɪ Yw@ v]N?"¸VIDRA+IQŭtqa&Ur)y.c5c.c0qu|hBU |rδi_JKZ(+ 2̯ ḧ́s̗.R2L\.Xy5g0pZGv2Z*JPxR fuŕpURyoW J !tV5`DXEX}:#xiFvИ,k$i$[Lh$A#I>Y atFfYnŕs ,On g+FF==teYHǽAւLLL9yp#C~|-o$ܐq7#M9ogYp 2*ir6`dX=϶azX١_a1WZ09w}$PHv@S\Υò.b2)_=&/7?(Q-"'bӌw&b4A2eF--M1Z8圻H%,[K'%$(;:Lp;]';*<(!|RtA'8(ITE<]IbC(2BR/^dW~;묰0te$%)6=s;@4R9mml˝[B.CMM\ $M2_ {Ff`2! "m/鈌O5*KEF",>;߳-5JF`M*и\hTKz!N/sקYk' 'ai9- y@ }*9v:noV5JumREŵd6.4gt1|rFA25 GRWE@ .W)ŬܶUVM$S#N.'戓q4&TʯTEDJ(TV7u1DI jD;Dgvn0r0 TaP-d +7 W6VOYX*4ZиLh\x΄ƅCuS(gB8q`Etyȫ Fioe j:a U"(m LXq2T?S]$l"AwQ=pT>eĔyuH"{u(b"UC&zMuν^f)r~",dP P# p`Ĉ4juI M)&t[Zn*cIky$ d,91.EVKƢ)qKABBjX?Eix2~3 +Sp(\3Q,@rIk%䆕rҭRArwXի)5WTDbZRJ*UkA;,wY)P)e8ǸB8R,}%ZC8Lz=Y/G@%Ќ55j!lFh,y%z>ւ<#a"A,jV{K K5g 5X$˄KJNHUUPVf&"8. V85#T5Zyا`+*imcV]cPG+ }lـM8Sc d_; £wP5KlGK*F"a\G]a.G;@ Uq THɢ"b\䇉0"nDYb:Rϖ(!@[G| 9*,Q EqhPIZj Z\u6 oT7* OwxM/n¹HZZSŤ_uƔcuc2 AViAZ[F1ZX yZmm-ak k N 'sNYSgMh:,cଭ *o{vp HFq}̹I;v>>]uV7ou?'s[]E0I[8 _At:f)%àW3t ׳0?89 Xp|GtEp_--GyE{/QxseMpx*>wE Tqg;QI>DhUCQ+>Ȣ_irX}?~ڳt02'N^ ja}qMo=}- ћ$˒A O0%~x8 e=]%<4obx_xu57zR]7z|/Dӯ;=/.{+?~GݣߠO7{?~,8TuCP n|xhlb{Lh]M9 }Ji)&Rha( Q!PGAGpsx 60>;oS8\'<C)20~Dx VȂuXQVG½ &c8)v;Yt=wv@-ҏ~ ƉAEa256>F"ɺ 2bbK2ݘƞt!x 5 2 1 2@IDATx]|T֟FGjXcA=yJI6l@O|*%,OYE )JKwlv7nIN=s̙s`0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#P#` p=YBׄA:F<^\\5$xe<ͼ•i m-&t ;hCݔC:cǖ6/bkHWAqOjͱf뾭[[sgŖ2^BG5wR=}tڷ=Bx5~c̸wnƽPro()R)p[e > ?W|*4)m.y{7.~]Pi#OMu OM"9yrGoleқ)-vNZǸ-e)jۂÃ=PݥzGC/-dkDe Hvb}7b_3B #l>](um0*bg3o52؄xev8ޣu#B;wyԅMzt5iFp"F])f &|Zk28F.zV`,KMNդKJm3sO>Χz468e[vQ}&Z&L`c0'ﺫ\HHXb-$8X,=HHH7OI_w#Ts#oRs`(s,nK hxnd{Asjk=#͘?|?n۵Rȝx矤 ~W#r&[}D[QDH0+l1|{''4$%~mF j3TjSf]U9hdbOm{MYdO<y`t[B;W)V:R+ݯށSElɩ-|P\ gT(7.`"| VaBP| or_c}9j+,ڈlwJt=Ê'yYk` ۸97(|kJ_7ܞr[۠~; Xb+8p5*7&Rcl0eGV(4ǝOk|_>nkzlBc<7x /E}VRsR:W?tXxNV*c< ݓ49q$k'K$78Zc u%v G?0JnsM-?s7Bw7IAAS0\3Zz;.B(i=x|>+50R<k c$Z5@xzV턧jM+}]S+[F_wH(x oney%g~@ i{LCO FF۬ ܤ3bt{=#:eѼ\\_`G_VZ+ȢCO圎 ɗ+"R ꕉuww6WZ@6{zkeC_{yѿὥ\}Wy?IX %JX4'~M(BIߍ<=ǯ]b?`12iÙקZwڴ׾i2>|iCp;BzotO/5bvs|zim>SwxK&bnkfoOWb550c8Ff,֔hg`Kdnq$t]'i"Qj2c擪W ?~Rw ܏M]tچ7FL>g'r9a`/>% `ReHGpDM/pgc?wW|) a!v3ҫ*Lc0W^mH+z\7{q} =[i 1dt8R>xʱY}%0Ab%fWy-~Qwݵ9iyp=J ?䦋S/hפ|̋6iUw~ 'L =uÍͿULSݳؙ*qНgLe[wnFX|b+'7f0mqg?)V[v7 `'AvM+k/Ŵu}R,~&i(ձR,^:~nX+\f[/q'b],mƬ #M;ajo[hEϨn]i3BZ$\]od,Ѵ99tw85Q>-kk>;8M^P8鿏ʷ GͰx]r k]{Qx'exo7`Z"v#oNM-~5cX܌;?T_*<~b\1%ؽT-`k8`\2>Mjw=$,U̼~,_2/Im`uRδ2d0RB8ّq* JNDŽҺEĔmMjiĠJmHKp`etrMb})R0,S)oW]DW!44=ӯ.W׃I&prL2I:V-ܛaie)F)W>P]ܫjUR{&G[.%u1Nk0`ϨP1ˉxZF(WO(-*ˉ"xQV~*$KhGZtwB7VvaT u5d+6bm·1Ј@XC )4LMˬ,CN!bOː^L[?Lkú`>Qun-vm|:·v~d, IW%]#g;iVȸ_z\eW-빁`BO39*Zwn|_{hczrQO~(QH_O.5?I@ ~Ҵ޸nTi voz~~tiP7"-%Tx_WGנI:j|6 wU~1mRI+*uyQ;:}*$PeɞWz쬀DPJ0^&F++C.!F,Cq@Ӗ)bGmBL zLCF"o{0V1f?Sj8BtTa(4賨*z"q NE/:G;cs^*UxrBG4hq f0=񜉱q4e2D- kH7>dXPq=ޒ0ϷhD3R{.\9Hҡȇ^h7lrMo6o|n}52U-,Gʆ z @R9߻aDV,IOlWϪYYӪ)B?[Jʥ=X=C3f@Е:TN>Ŋ᐀b Zz\wˌUybnՂy#tu??¸L\@w@uB}ZxLok 4Բ\3_mPCj:l+>lLXM‚e4y>q31 .燬ODÈݗAW['jL1ڠ.vdU@龺J0Ra>?!k>_׷ya ;,J~Ƃ:ڴ,[2%,7dڳ#. .H{l`o,L?mE&moHqy߭yFskp|cQ U* W^wbG_$>f\CK@]CesD ͳP`mFй}4h >! ~Uզ_myo 7@ ,0ɄLrms|1 I䦞!nhlKX ɒ1c)1bާh}obw3~i{B0٦0Q{t::!<_?m~Xiƍ{838.~g2ߕF3=3:`&B%.q UX qERo/=l0P{׎ϑ.{X`|<>ǂCCk*盈{2k@aބ)e{0aG{m[Ť U3M}]1vʞBƗ^r|t*{m3R6XwW}=YOp"Ls~Ry,ʠҏà {)!:5Qn&m3gCvVôwg3D fR}vZN; V=t=qFx#LO84!űT+2YaR>,w~~ ?^ xa{vS)LFYG@7߻!hL׽}9`AD8Ƅԉç,Ȅz$ëj~%}x^PĭE,QM c,i0p[C0N*n!ÃUJKɫkqH jV)~* ja«]ʇd6Aꗂ:pqٿy Ɏ؇ h6y/`rx_xݨi\͞9=8]qk|F ba6 0Txt]Y&~*~$œM8,2}^5TÚ6(7U`rCLR}޲oC}؃2>](Dj+U2Q{T 4ݷ}m۲Q2Ъ{G8'di//,0ΞYL^0KqC-b?d{p0lC+, aȴ܌H00?V?pC4wJ`|4`Q>cIo5د9><\m B,T>9;%_ag8BPXk zYΩҖt<\B)rUiJ<*l_0*`򰸐tp84DMӌ?ikz9]EpC;?K)DAʞ>!uX,.只{k178*ڴIsgtF#|B~mU8#qAA/a{y9f ) Xϝ7]qPR ` B=d~Dcvo?3^Eh(G, zC2N3oGe7'vj|~PBY2D?1[uQqt'[Px(*QɠX ;06'VGselh{~ Q~RׂG9`U3\kPVͰN;w|:Mn5E]2O߶*1w/ i3,kh&u/A-Y1= 2J;'ճu5FJ%21b c 4ߟکT]7G=SHwo4H#^s09ˇC5Xt2-C|sΑPø\BLBmAHo  `%;IF~a-ai/DHG6غI1c>((D[>Z蛃%vOC!ˑe`y_wbb6 t 8*=7A{)dӏQ7v)p[dEs5> l`Cِ݆j6fzW`0">aL_:% \7y1 tqm|j^AjumKm@?+t܀1%7hW>+IC,|{1ѥ9li!ŽPQ( 鿲~4T &9Aka3)+ѧÛ.+0ljnU(6T.z Fzc O~ZMDwwE^h1KtIe@߶nZ z7˿GvOc.6hWHճS\‘11>,iM/'=ۭ8U|e}^h Cchޥ,gx]´TrvwXJ_&MT9/#И`F1V#QmaBX?UnQMpg}Ϟ?!ł*nv %da;ЕKh0I^$qpF &3L+9_KMՖKNF$f0BLr2׉>W3F`!M$=uL׸NF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`́cF"r=ŻWZ{cǖZÒ~kz2_$k^#46Qnl=e!?fEtjM Vp2q;u!&Vv&C#02_ #̛7϶`:OHMtX-3<>\6|p_2"IBy0ubFKiqF ~du@Ӵ r 9~EO.5mDa^ο`Fi"rWn#D˵^Yjk !ᒍq/R,zt)7l+!fkʣdZlw[6™w2G*qbL{l23HgR佚CTq? ՄxPIuTrcAv3yE~sܹF[9\3Ny"bBw%f|*Rh89(жqe#4U@1#|(.}J#]ӦU|^jiP[jRt q&2Uk0**RgO 0jk/F=瀓0_r5g¼ #RI _.D ]w~*ť4_FhD0`"" mrAV`7RZb=I_+(6 ,u/>1S螜*!&sߧTUy6rv:w;(hF?DR{*RQ1w8<hKgFh0ܤ05!*IJwĸR',lvjrIjhkuq(p8$O&t)[PHiӤVO߭~WC᩶V=Ux2u%^-Æu]e쓞^oE~"mZg ).~zKc_F`&(7~V1@ttl(毃HXdwjSnVe$ /X_u&Vy!(m6C킢hJ[!fa>uo]㮻v) tq57fMgіR%xN]r:)J7*}-*N1o+{%T(>尳<_ aw| Yu<:K8xR{:bVQMPY$uW|mJ8&L׸f_FhD92`""͑|}1}t,NB{CR_ L>83PTi^ٮf9<+`#&^ՏX!m3=iw.(n&o6{V@:$BhA h6T.,߀?]F` ,Qn6] epT%.ӤJ/z=r0/Cz~k۳&N3Yz0@xFON߫]# ݹOոBN52#`"PsT呹1JK~dꮂ|"a|eƇ@Vd]Q&-s>ZQLO#4V=G QӥG Ʈg#?_Ѻ]o=G E-ħDSsaF H72mc*%AsTI1@SALr濄R6F5 SMe4r;F@(j1{1H9-M$GN$3{u9}2WJݘNUFhR$j L-{I.temRscF wnX $ ө#!#0uA sT\e2N~>ܫKrZFq @9Qe-N52#P+=GՙQ&#~;Z1@C#=өFI\EF`@<:3=IS8FHr}I^[!N%}qF(U{1/g$=WT#(&#0uE s'3Gq]#иѣ'c:]dF k3 ɭв=qmg0I}zOzt*黈+0@\Ջ63cF`F`f `: (ǡ; I@3 vgҖ"q()|rӽMIK1Snq13߹!<0면Eoᩇ]:tقR$n:0nc=(GUؘO8L0atd(߶<(I iFof:iMΕ-#n#p(w&TlwGT(g]y'}=1@ P Iɚe̫$ЈɧH]Ż=Z.R&X}߮eINM(A]BPiH;tTs6fkj`⬖; ~iOq | qg& u'~^;wg^zhi>į OnØBJtǗwδPj|f=ꅉD-.׼x;fMgB )4y[}ȓ_k\w@=OpA܉x(%:xg^g1|?;;Q0gq}I.Ff,(pmKZ6p^]_8SѮpu q mH%D*^8 DfŘ UY9+Q 6}#Eu`hh|h2Jh2*}Twn(zEŲ#>{Kv'hX<_[_!r)p;t0P,M]1#uom]q;j==Ot"|үXpxnh]%tMl!\埘O{3 dEK)|Mb 0ny[>־bۉA:qE:(|fsw y~l/^{W!/#s?BWwS|5~_oH;e:bec5Pj(pC;b+8Op ]ǘ uVCbBё/tݻ=T\}#|㙈$vj75O &}K-ߺk@}k;ʐy˰ؙR#4x!_RѮʧ)!_-,]{/uQǙLg0;'W_Ч Ї ݾӶ[*ow[eyƂok;`Wz~QHAno9_:VVJ:L?nuT_C~gaH }HWINUbTy"FNCt{!ڤКţ{-_'bSVQ!B^F y@Р'`v: =o1>Hrg%?geUvCeKQ5xG*Px]Abñ: ) ,D:lbu@4=:vTa hK?~R@ʠnrp# lw3%(K qL0}1!C5e{Xlp[KKvN1TN%e20 R1􃣢P"i9RtʬԴ /%m{bٵMsKH|&NkXo3&V8|5+*:؝/tA'{.waZ m'<0$tͣ jnLX-Q)O0%^U+"N޽!9JBF.!dدqxoj&'@foB)R!a]NHwRosh . Xki+.Z nⷠ8X_!h?XC$и8ӯhptx&C] >Px@)[Kz5JD8mX .ʪN`EH5^hT=(FO}Brlw 72'}sByx Ƶ U-6a"]GBcYwe5HqBNSjȔ0}iv|c`XZS_u#j87R-uaWD?~kkao+*VP n ."jtPRG Yxӯp,x&̈́;]M.zAI@~:bHس#Pmt@i'qۤWJ~v&7ka#s<CGV:x>7D\NQ_7G` 4ncPFG>19J0!O0Q8Ȏ`GήJGsjHMbv,_W-үӺW1vw5(״NSBY_ʍkC6~uzaЯHt8<?k0} 8ٷ`+|C%ᐉ-EzŰ0!+7Q&No k8e3Y}mڦNU_a gݑZ=>LjD=~?X8;տk/E rMґ/xaaJAŦ [`:m@~+0]ɰ7wڳ`'zHq>§+Zٮf9<W9(wG+j(̛5 3@!@|Yu_ЋP@IDAToO6]i[u7Gq켉ߒLЀA{[T/DJ.="eP>RH}ܱ̅#f;Gq+)&rR"*{*S/ryv38`pAW8: IU+i[|e{>;ۥ1c#DBO0t1{J+(Yн_iڿ$q@2|J3$I~c?|O_+G D:]u#D+ZԨNSKGixNW+!%^4d/rٌ#0#0I3I5\1F`F`D^4$\6#0@E@ RzJnXpdEd#0@F=nH?v#Ev W`F`Fa`FaF`F`fcZ#0#0 3 ?0#0#0bF`FhXQnXtF`F`$E$#0#0@"v.`Fq" Qmzo52LgK2 @eGN}g^JLPV cÔ [*Y,Zoʛu$rϦ:>Nbel!efOXgl5b:e2ƴ0f`Gj`{ MvՄlY?i-tIjy4%5v|a`JbШe면"O&zuoWg4m p4;52Gk^{o>}زJBќ\opSG'a9{y$mWڞ;vNiL,t*蔮G9=rLt*14؆8QܞvХA8+d7{*o1w/0xW<ݮ zaZ2)CaG:+Ef}+Mf޲r"ՏuaƇJ[}}OOsȄ۰5IPO f Yis4]ft~q8 | s߯>M1۶+Օ@6MylJA}~YNh>R𣹬!ۉE=I*Pg&sɧD~htM1G)LNm[tvΐA{vao4|~"` I8WлXi{FIÇTOHXIo% וoH :ՂT 1[*)SNݍci C͏R;aLFֳun}n 6QZ_=!{[[SnׄPƒ|LCǩF)Qx.(I|Q6cMc8"D'mhDЉ3Eo1}㰷xH#ˡAARzP([&΂$ySdP}'‹p-s$25a}[B o#[f/zϘNņu'z &⛳΅$ۚ u|8R<>, MQ9ҕp8޹x6؀m[fǶu۶96dke\q:g f+zwnNx *=цTԯӗڳ2O羍ȤSmڵ9h +' ۵$JV4_{yO34A g+6~骰=آUQȅ5$25U&9ZjIX>n7@FS EQ֮kd&h6xGxd um:Tx%Oc7QDjPxRm4nqh|4T1Tc:D#n}MV&)/Ddۚz©/0w:h|$Bvug%5$}H6jrENrh)A?,+m6>ԥEVcg+¯~{!\ztמsԞTx?/Sj pćjߩ0B_=gڥ/.?iH6͔I2>U)zN5Т9#2ٷt#cCog:\\~^1O x@!ɜ AQF/5DtxĿ/JJ6$y{/.v4عT{K mGr ETn} +DLB}[cMY$us ̔+Gꈀ1AwFV~/&(44%GJj7h`8&*ecO4QC$n_W֪U@~bɮ,u]E7|c@.1N Q|ԣڴ}u^r]R*66Hx6-a3ۆMo/^5ocؼ}gzIQuQ1\LD:7NX #O$ ژTIz} #q}g;'xFn( uquct EKvMViGN@"%.N=W <!ZuhJtlJosg6DHG9D&!kߒz I/d\PEfwh*=Mw"-so~8AS7}+|Od]vZ|HQͭmYFb?3TmO_w!Dl0F#U*͕piD)~m)JEs?_Ç} #q*5uHO wmZwb ߧc;Zr9CVc6%7cY'"^_kb F}vA͂\ Vu-!$j2V" &oI?d~!^G{cI7_tq7 p|ZՆ(ܘP+j42+}'Ӕ(R߆ɭ[^]8'0̈́~;~^[0sFP?я.a6$"wEb מ{3?Fם%\YbGAH0ݐyŰcOU|rTZ^/kDḎ A'+?Cנ"'.[RQBEV-z.!nrq5'y=;|$=Pzϐgo+IG(^z V`R4!ۅESFcr-7ֈ9Θ,(Q .K >4dH =} O le~#.=Ÿ_V>0 ;3U h,eT+Quky k"]}`.;x%ftOe3 T [}{HOs9KtPD#~F P/б!JC%~zh!0 n7 *i\‚"عIE߬߯.7]p#īXHhƦBu`A{`8Aw`aP gZ$ĚWBmb^iP9P-џ_B}[!^P4*5z .YEH(G}{?dۼ}|L%X2hL<q!N9b.N5}+A!6DPySB/ 9bx68RAVF+#&_K̠R8ӵYf\#P;"[G !%}H}ɑ24ǧj 뷠_w=vrܡ_֒Fg7l5emHoB'r?nr/&sŰc75ۣ.?MtoLg^/z8 Z 3~]+#=;c­0.׆qz³00]v&\u}kI׸9; ˠ~= F7RȦ9o m24&Ӱ'~q8赒ďUx|1C1k !q3$աhN.<HQCvH^^_z!_+HK;.TZMs"D_PAF6G۷iaPjgUw蚰wUOݏL̾8=Zݖ;# VڈP/۟; A3ůXa9LMׯG*cIYѶ(M>Iɵlj#}* עo~4:/Y7Ll.q40SL̻K-X|[$#Gj$:,drѻkvLGƭƢ|x \Fn8hiGdl?>:cQʇ_a.zGLJ!6юۯ6im%=󎨀Ebp16\:Ƒ!1$%.[Z3;AM׳_A-qx/I]m< Y2~0 ;%d5.T[?ј#!v[0$vI' ^-"א#`Rr6pATeI8FD^sN8jkeN/\rKR9z_׷Y;maBbg?AbBݯqjWFMmG4-Y8dąiT?^y O xglX͘$顝C!k#~Ys^3y"  rkIye6x ti!dډskZK\Ѷ;`^Wi'-?猹LCBHX@`ӑ# &g2fYd".tn:# U+T E@ p\i柀k/~ﶵXe =:kxPMPC5Y[>Z#!Y]p0i,pU6Fm1 Z’574⒡Ͽ_+ ">p2'wq(nŪ Ö"mA#!1&G_QEIFhC/=$=5E[@2C#Vn="|/4A4R"SM@ea#F*ek^64IC"N3ơ/ܩ/T]j#Uݮ v$6*yP{#V >>$I4[k>sLGR_R"}g:Gʏb0>Oq#?:GzӤ#,>51^RmZ IIl(q]Iqm6Cir[}e7ac 2t$C+Nhbjyc4hvu/tl̾&z@Xxndnnu,Zj>a}g[UQ5a+bVsj;:Lpa3Q+_+AVY-mMצ#Gzz/CC1̦OӁWɲsUT$/A_c$i39ֺSo| ӁR9 _io:u_k.;JRs:,AMd'P#NOh6M$}! "]x_O>Պ 0gq :hK19a|X<gdYO,~ tDyM~st4\:e: ϺlEvrqK(K4Z`1ɞr`u@b܉{ A\5qFv}}#ThK582FYa ~ǐՀ7WHo~#B&oIDLIɊ~8(EδprbŅck>T %.4u=Б ri0ưa[.4š/ |bDt)NX=ӹI/5l24‚Tq3i53'h{t!37 urAW 3䚾#`A+MP4LG" J!ƚ1]Q !A'RcWBdtƝNW "?Ư%XM6|ep@`#6s5X [V&LOUtHWCx !E[nFSĴkz@fHu5b9p'Y\0 WYx!O"0Fs̛ &I&id?x@ l2ɡ#m $GJcљ JKe7n1lW4EL#ҕ7U&9RFieWphڎẕ́lg6\&#P HKm"D5o|dg7+ |bBH(~2䬧9 pcW%Ǥ{>$h7|b5P =33ˮi!sKdF92>$ga@Ĥ^tPX _BOzMpen!92ifZvH')]'n я]G}̪᱉:FrDF`bDaNvk#Y\T,1D6.iT1#4Yk}m$I3.2ؓU_/h$hMn#>G&*Њ,lM9`h sU%j[Aw7`+.5Y\W&I/L<0Ut҇w>Vg;㨮Ϫj[0\pb !@:$(_'IHB |IHHHB66.l˽ʒ-rmYnz%kW.SnSjK9\0\)C礬4zc5lV5eU60afQVP/.=ua[B<Ҿ^ICH;QN9x/9#KFEFٟ**ʯ5ɔ螖miY:6 vJEkrϒA# [3W #PjsaB>H"9ƔC{Wf+g!eÒyZE`O쒋Ssǰ]Lk^ KH]Hm0'o)Uk=Eo30hC(;Ν<8 CSjf>-xHRKv"~LeoT'wEEJad0 SD媲vG"5+7Q!>˴ܦ (dLXđx8t`/+SK0f?T]w<.uJ7A=ifÝY^Mn2,EiٶE%e \"3Mh9ܫJ Lk1Sk<  E4jJ9\>Μ{ 8^S_Y}EHDQ/_oĨ;ӳ& ]ns CwPax䰾]ߗHXH܊(eژ@M~wi8g&Qi]g"ݕo1?oI&!Yjej_tl3; r"2ny2V)ptm}A\/{#"${vRn&CK2%"ܙfJmn2dݮ ebELUt;AwIU,V5Љsp „ZjJ9:uܟ; jz}SlTYc#7i"`"Z0Q֊s3?g05Tn1F8$$," GaZX-Sp[Ş{ea&uqȎʼnr Z(T?_c<n2Ud} 鋨'׏h=*3 GTb2d7s&U݅j(\ Gg/oFGzzi#40'3zݴJҚs`\AMNOڋ#n/ycһEEȪ"RV1%O5XabX߮p~ݮ@7}MfTL}j7^@ `K-L^qXWȳLJix$#ML\/-6T| nY|[T\X~@>7ObkJB×Ϯ}{nِ#BDOė8Ejݞ_|}On R7@W59t[DH1]vIlN=,<_ͮ2J/?u\C g*kFN  kss])srl.xJ Wl-?z䑃oQW`sWmi"`"k(bCEYx|:{|WH'e&~ L }'}\sZL|eԵRLMM:amAوrIn~D]&SNtSu[msms[RI??u}!vin4PCC^ާ8M: 27e~䋈!4 1@.}}zUCvu`𜅑+. 2GsZ^3髏Ӷol4I<gӥ G8O,G˹H:,ʐoOz*@tJ]>wYwBOEz}QUO;?eˆޘ{ZĎKDܗ+#Gw_N\|JEʓ/ (weN3fM!`GLt%$0*)+C|bGAq "Y7m[bsII[99-&Q 9 'S,(_u;fM=:tX@OEy=U^`C&gQzf?v0#i+V;*q|qJGrBAf2kk?KodFZ85uQISmf:hW#GĠ^U"0?D7Q&Te>~FoЈШ6cZGZ|ZdILsOôVL K|4cT䊾DCĔmʒ9au1(p /8R,/g/"-t՘圊erSLL@ LGbݍ?{A_ܥ/iNعV8~_T*cУק=޳ڧN'eTوw0N"=d0_{˹=;K12_KiF$DC+UZ?5uAq}(&8c4} VNrۏt9ϯ.:kq_{gKl6emEdfӚ L1>*]B=B}M$Y=n5VDv˒u)%0Uzui.:MMGO_%?o.3“}xf#Rrum2oAAAneMKE[%Sjeסr'ɸV)r^qqX,=߱/"I2q#~!3?4J$tV\$y{,^[#Yc aZ]Ŏ eWɰ.Q/ú;f+6C(T@; F&ZR/$~WG[3a&577W W;?>2bxB%c#ஓܗ cݯ/-kqDzP/~%1IYH{ Y,; "o+c"dx,#lƷ{ۯ~@1o8"KQY2Z(3*Z*YwIdLܚI2.O 1qp_5"rws\/7$n|dbʼnd` Y ~#wLD$\>\C[ВlJ v'kwK.q5TE0ϲkG|S2Ɖ%BQF'W<*\iu'Ț$e\?(2rX[G] 6kxC' NjZhlDv/VųEfDI6-1[0mIJ> 'q*0M\2s` F +D-Cūw#1椭slj_DN[.~~*0,~e^]{ګ7/DXV?#!.qI:-5jC*9pG )d۾,뇁]U-TD5ZRR4IvknZo/-[r7zaE)蕹՜x'@p }Kuƥhk%DOBsr YdE0;wn{>W28QPwyJV '0w]3o., gXeޱ}~r)I8#Bd[0u$>g7& "ߵC %7XF B0$ئ4Ttf6>ZN,9#~V~| ꘄ?J}Ӕa V,Eø'&*LF&s' J(ڢkypI^Rɯp6W!{PJo ^ 9^ ,˽%*z _-!Vn1J#`+W>X#(;aFvo[UXQ&ɢurن/4_W 㱋ץ%Wdc͛'INW Ni:!,I˽/t]XƴuېuB1>ZMRHSUeѳrڜ%ewqޣs>[1M+m_\hekӢo{p# VfSLLLLNTַA2L!,(קJ79|)2W' Wgx g#ZwI(ewJN@FK1Kzxr+ʝnAq6\M\yFA)5A b,ʪ= &Dce44iJ:J`rI]`_rMRpB\)Y0}5|쪓d7[ ݕ9s%6 p:[A0Hj5& ŀy-qƼ _~6|.>e%,{$Hhsa;V y;W,sPWuD6`Z³\RV=b:>F.ɲƺҶop[Hp W^ux 牀Mq=ct,)ėV{I.hs[qX(Է7rd\ t&&&=!W4au/?{/~;Cۃ>5-wAC{)ex.ʒ.:\V"( BK9ͣ:;FI{X/T7F&9xROZS^ ozuy8wp:vFYc(k$Z=%K#Y϶4/DDD {~M{2eA½z3 '\a5|s`.`d |Be9dQh]fM{(Lf@IDAT_ swBuj b~d̬d}lt:88/I xxm.|kq"uoaS"WɈ~VNc2`J~gЉDGƏgG.0LLLZψ,Aqr\/Î_kw)&+&BL #?zo2J-s^M驢Y0,Xmأ~}W\1d+2 s0gPvgq'(8Ơ'2Z q5[B|mθa͎~$܎kvׄcJOQq'.(ӭ95_pZc#G|?6Ǔ/T>Çr_˪Wp 0wjNZ^?&V@QebתzbN5 ݵ{g~XO«$Gw%*.iUy9V ׽\Z91\3U[|!(ߢ\Fxn(H5':q6^je5Z ۸}~`6sOS=Mg/eόY!~N?[ʇ{gA~+KƩVz19{(Wq{ܙLiCG29L#WωEnGNmB*^Dsl FpH.V8xs)Z|S~bhlrrϟ]L k=;-~jjij{QUI{9Ցd=$m5w{$N[D-38#?Yv MN,!W-e?#bV_>w%E(ڵ/sUX _n ˏ`2 ;cWڑ\vM1i%}jSȆTY.2'Y~q|qDcMA2c@E Mi66ub₰c>#N7"'rSx^v<wus<@ QOyp.qCz [9?%֍h:E%_yZvg%<܀)c4W:laeпIuk_fM9;ٟ>Y7D#oR ā5-Й܉HߵRLͶ_d=6;*&v7 ~ 7_x Y?u`"`"`"(++wZ,~>q ү{GTv7lޜ,OCG/UZggJ1#R2 VҲbsnfQ[,%[U{f5ؾa.+¨t%Y6x,@:Y(lZ ڊL=!vcjFO% OOCX5GLLLZoSyV>3?7$y`[ށ<͛^fPeyI: Q$JJJKHO"?3$uD⵻!a'$#By3T vS8#R?ͽLL #es_ۧY۲ED[XD |3Kΰ$NYֿٓ$9x\Vo? BpS= ۟x%I)v"˾{E9u"|za䮧QI @soVQ߼y5!qSL+C#n7ֹ}t-xѿaxzd["7@>ߔwHY™v^.%EɇwHIII=7&eƃK=եCz%Ԩ[_D.痳'_T#Y)[v^.m*:sٸQ!wOiIn D0F&ΟOR]Ǭci\=/Or18lJ9,yDtԑ%P dZk—8$~B Eϗ){L𧾧GIt#66$HEDkh7*DYkCBҧ״[~'o|{G"b^FDd&".nRe)ܚxqܘzc`.LGԩp<džG}ϗ(z*i0[ z:ss8rdƽHq#b"d2iӆbe4đr c:pF9G+9㸬yP_}Hae|C9}AVQ ˏFW R3k7G=hֽٚG=!Nm;Fjب0;iJ}!;_>Y["ew$TΝ8w8ʴ(_d=qD@_7/S='I)\cH_ q#RaCK0$Tn)w2C,92M{ :u`o4,#aJ@VgUhʼnso(+^r,\آ1\d+A/JaIxs2"F^A•V{Ӫv&8'K;SO5JG}lhUV. &$3`Ua* ?s A6eӄa)Dܚ.!d3R.K})LHr"ۜpd7d#]ΕփX]K۷oNhW_ uutx /y_lۧ'&"Te|5n e[!'?tT>@b&5΂)<+f_nd#քqBg(6;e㊔F#2t~lO?:cw'7Lir9y_>UuUiRa+yE)'녬}ed?vVk879tRVmd*OFGJgsJMuP}ǐD9,2;t#lߥk"J Ή{qB}w89rƦ%8 ʕe`dSq$dhLxH5Q P>Ojv]s'qb.JSRa*4;CzwRkX>^g\/HѪ=qDo.{\7 k]}x}7]W&]424̞ؑGb@dP/A8Z꜉E3ecֽd3xE#3Ҳ@L͔"ON@2ٵ:,b UJˬNѐ'܂6|WV5apt<(~`SU֙K_ (ߘ7A>;)#uficA Z:EIP5cyYЙb-VI׊zc몃ޯ9Uۚr i+?-M@@`<^$䨏=0ҷ2(9iO1c|đ1Rssҵ@\ύ3g`o(gmJN)?0FdT-sri"{T{& A9q!%L_vO$[O׭=\/BX*kEz%IppC\~wrު.U)Р@e߅ End˞L >hF"1lX('kqͽoc?&냎>weֆ{EXNLI{+W)3e…~ 3Tsi._A%!I9>O?W%E {IA|DЩD^{'Ԅp٣c. f f ۺ7SVA=\rILʢ/w*&9G"F)Y S;puZ?rzfjd$jeaDu8D~rpܯu\bvPV yߨ DjUQ/n[[P4.xsyjlZqēD17TώMP7&Wl<%}C[ }+C&~@h )q "ZF](w~Bu15יnt ?[%hG"M"j2*fg|sK89GtIJDzWɨ)?{@I=xB20s<ڇRjп=INMOp{.2,T80yPҲny}Hk20(.ڏ3Jkon=tl PMu[NM۾=I|Tͽh񇉳Bxa )AI(Y2z@eNgɅM2>9-!H*@h) /s̝W(Qz! b[˯^_*ϿR2JrEL՛1-y5H0DDD DFxݯՏjE78@Oﬖ/T,3yX)Cg{\׫:'Ĥ3n!hdujlS5yˬ cno0f_-$t^!kE'!vnl03e,,R3OW.$/e:=jjtG+@v$qjMĹG!Zs9:.Xi"Di };y'?~h}5~γ0T >!L)zx_v^z(p]"@%Qj;.)s(!H2-eOf`S&{K񦜣Ra8'Ă.vXWy0 (qbNoJ XzRPT@XsnIN\1Lm׺A|n Bip`ٌ.#zRw=qp2$y(!2'oB*#1;bH:FۮWشSN I?'r;0,ƹ7ZQ A0ИW{uiHs ?w kØt7O]"QӒMԻӟYdo̿N}zo%` >u{OR4蒱r\!s6+ZvQ>j'ʉC|Jvj5G%bʝX~gh`Jo/k |ޝ*Qc]l}/sh=Q X!4k$gCĉKI*Չ;]5I^׃=A X@c :fׅƴ>sr֪R]o Ĵ{|Uh-%ڥLG`%Ĺ-UO?Sߜ<FZmN9He[aSs-׌[GR/B D'?|gcr&+iߟmP~zw `9͌\]P]oވ5[7蔔#cFl=a&?ݺz[Z/3jäaUjO} L;k_~6e'mcr Hd/n0̼/`,3Q70 *Ewm9h_pGuh6s!;_!pZV,D▲էFڇ&36H o@W̮4&Kgg Qʶwgb`hVP()1yRC/ɲ|Ċ%'_]?ûqb^P `uTu.Sv@2$d}mTًrx`?b2F_+Ui*0XpS|zx*f#ʼNM8or$Ǩ!GVr|}x Ra35LsC1u!pͳ>v*:[wԿP^:g2)p%OLeC]+CT~r1<ݢo(ēj H*IYIiNNn5DlJ05n{JIטP{h?'aZ/sEe]XS /quR`Z|A>HTr".5}O-;k,Oc&gjOG $wWmW7]F%eϡSm5DDCp|v+I$8$׏N Ǘ76{*R~=]~5"Hh>O|s>b,CvR_ZqPcǽWh"7p$iHQnv g5X >~ WxWK$67.%EK- ZH4l~Mæ-̊2!l Iyi陆{W^YB ,>ZB7ALKcߘ앉r {eu{Nb>fLi7ZR8'z+'BIz?0%CU%N]o3&ME _@)CgQrNFވc^GvƁڧ_qIHxIe*=vVLۀ?gq##uɲ|cM?s0_0)i-G[0|/DWȬyr}$DhE!aXw0"IB 9F(L$_̂9?8t`o^}fl`ѬokM.*x9,,L?_D-N\NH4rl|5{BC$Ί9G12@ 1tK'kweRRR21ekMDZoMB`4 NWC .b@?n#VqrMq#9vx2Etd =Z$!# nT-/b偫o5h_q%B&$@ ޑ%}ebQLO>U]å+:b"ThbQHrk&ER[vƌ?>|+E?VlA.[Գ*f HSx$+l̩_>g[;G@zLZ c@ZhhėML%-t>⅔ I+l>ɰ(Ӓ>?Y.Α9<%_y9?nu&: dM\y5F6?gJ\,rH3*mZ9=l},Ϝf]{5tIWݹMZ >0=GF =e⺯~AZZB+;Q=qQs"U{DL/S Qr*E0>D$`ɦzYs?w8XU6dďBROWp().A'Iḍ m$AN)Z0,&\p;ql!$RW,{~7j[Sߺٯj4l_PdR@ :dߚh?(˨C./ghd_YEnd^r(7Ⱦp 3؏ZNjuЪܦr 2Ƹw AR_hVmaߜzHŁ( o9worxЕ m{3eD "5g%ۂmJhpܧ j@rALHr)A¢ ?f̣]5BG:EcĴN CCAL2$fm$Q&I.TTTP1]7<ֲM>R5w5DS};ƄK\;ۉ h|mtPZPcԔ'g0gsr.TU+^wgn`ZegW׫Up%rdA3 H M񕶙LZ#1c 1d&c͔a}(_} Fa@)5#@"Øe6#E:+NkMY`U&6jB/ebC2c$\3b3_KYu:I2RlHmsE>7O[[~P̌rC tkvR;yx֓J佣~2y \h%MmZIwe~(ՋEM6l#I!d*0:%-][~p!Jܱh葙P}Ï=<HyLL@SkI HH 9nInKD}zA_|q Y!&j1&}ɺ^8^AtT- ,7GP!s.Tы?R"3"s/^E3絵?$3[pZeÿwoLڂbx2 ۆm]/}|e?ϝ/ j:voLRw$뢮_K6 9kjK>إ1^'D)'qĀN hKո5Qe3 LI~i۷;'5k٬S@"qM`GLt%$U;EUzI-\ QC"/8ԫ4 .kJ}zC+!~WŽcpfAd2`c٬f$mbjgc|LlNeYш2/D]ΓTdcYXV7TW5H}U[e젞ṟyf ^RW +-vJ;xVιN`B@cZa.4Zd[Jkk'ϜO4+O}!+dڷ^RRdKZڤz?,"*&8,42 (f#ͣ2!G*+x &Oa$S<@yI)]U˖D=eZ/l-̳a4aVeZ2ޤԜ5Sf\%eg&O:?0c[ uӡ+,e~'}Spİ:hW q&aݢdPeևBBw*t֎M-.Cḿ qJO_"cTeʯ`v:N~[d,ygV&"@gqQ%Tl^O& Ξč$!~r6X4aLiı:T#ִqy2zAM1h HOL N(&eUƜ<{ͪnvSe82z;;.ƂMXlg;OKXBYB";y__SGbN|i9&&6'blfC(?ubST|O8y֛w\\)/I q%g=:㱌 u*{͹L$A~TOTa,+\$_8&>"X.ˁceԀn>[+824UqFPc v>O]zzէ~ZOQcu}51$F$f$k$m$eaDOS8˧۳z ĞeRd AiE%# a}%7<굨7w4G>ⵌJ%&tНefzi]ؼb$LԔi}0!q  =_ E;gЊOs&I)W۔bd=6 A6MݟV*/mLVbr]|e+J Ct'ꎻ9ѣqL &4 F/@KH+m ɘ]Um=|.rճ ZROy[8QZSTE? d뫧ݣ(ɜ}@%eǾ4ńB qǬ:^b(gDL eƕK7>殌~at2l!NċٿwIʦt/oPKk$p>EgЮ$!":DJ0`wWn7-Q׌ %=;)\N[WjOXV/>gW)y{\/TcC@g<]:a^.EDFV{d̘ǭ  ` g*8Bϭ^Lzi871F}*ш2MF;_B+'^BN_6SjFH%k/ZE1Ѣ9qF޽I@~gƌ0]HAL5&XBl,e::R=IG$).zva WK)BRsϛꇫ~ǑO;!02/t3aa%PDZ/k>?ю7\u_3a>`|E,/U7^3oWǸ!-(q[8j]ֽgF#9'w V,sݸ'qbVϫݹ+DO@Br.VM sP^' 쉌fe!% zXA OJ|3oF߰'F]h΍:kSѦzʚwǖ\ Td@A^b})l`C~Ǝగ//nxaTi,ut{A~!ZFYgLk&- ei&ϻf:0/|Gdh#&:{Xؽm23`O[ؿa+jr̞(':OJ|sx"{(t`>'ZtYT"u"4FFE8#ʵLKWP,(1Nr?vB8Q)QQ"#ĕ/ { s>9y =ez*Odr Q-N,YrnIYEvVMr|zCϗ2]b"T2!ڽS!fJ ~rC{4+]=wZ4S6jW1 T8OzS~'n]~!tRa#oаvA!ߧ@?!VkTdT6kiiqqnqAACJK9VA1I12_B9tɮg&oÍ 䡕.2>;)LGqhfp_t,JAM -:ɕ"#6^EA=bQ* $1e{WӚƨQ?tEI-uy9?lT  H-PHx7BCKK۶ZKˊKr .8|p߁dȚu ڝ+r|$H0}F1 6#·='~ 1<"W#Fk]u2]nU e\8cNRzZcزp J|K^ҔUHr&^ڷcb)dfmMƾHn!~Z& q;//m lKKd9]hq۝j*xPPxѷx`CB|nAK2I2Nk2]ǣ/+P~2l0:1IJGShQ"{Li_ZK]'eZ={:bd=̝4tstNxm$: Λ(2k{[ivcpt0|W GGx}+9y{&Ce cړ6q(MLCVJ(|:Jh1(27_F!ɨd[W l݂g@=uk\Hdece&Dܸ8Kzl~ؑ@폎ױΝ^MI2 TafQ\\lZay&Qâ4IB 1)ppj"Iv${ɵ>b9,)ߔQʳlI6'MWx|=]>YK:GCO5$U.O2j@75q{0x>c?Muii@b`rJQ=쥎$LJ- 8Rn.#׹(k%˗2_D$~$JyuD09Ña7",0=F}Yc.,Ml/LzgˬCڬ:;G-u%V.{޽}qo;#q aU#׹j݌LYW*Z*Yw6ihs̩8 Ɛrr}?p?Nfnz#&z"F/#bDr9'n%~i *A^:ƌ$O#%bd(ӏ/`U&6jmN2Ufء 9fFHg(8igh;Q *v0mZ,62Wu+ɋh[|&Y{獯s-P@IDATC0[)d]$Fɰ~Y_qȳ{pZ?6q [~J?˯6_;)#(Yr폘M:(z%@9-$ڊN5+0&`qѿٙKocxP46,;nűW?ppED[BU>A)+ W Fw(Rm(Z8IAI2Ǝu3,)|ÝUHd v'}ʭ݂dSp%_>XCvEPɦE㛑-6B_k̙4L7.i8uX;ڙߧ߈웸(LN+{pFenKÐS%Mg/`v? =zZ~'99DE's{$( iIu8]@7V>_wk~40!OK{NE ͐~dyk}?,ʥ5鋒d?`g y%rŎ8f9;0o 5¥"fy b}-pPȺ;춧|ZQo*lHm)WHMBۜu0_clΛ@ǗMw P[Iښu Qqkۿ!< .{ދߐȾ{&(L3A")ffma?GtnyF{xF}fA?eMݶ}uh{ |, ⋮kupzfG%xbUq0Y 6G[mʄMvȡImu]5KqaE-5Vr\Nl9*c7DN_ȑW?\[]жz~ W+?.J|c"͟jo}!#uUIUܯ) O"$qLu5De[2w92os\JkjVb/+8@" 7c WC,q쳋 =E"Uy/ȴ(7s !$^B# >X.2!#h`"tlV|IuUu\9,(b"`"CUgY̧oUC҃8dB[Q(nMIϣ)-2jw peV FJH.u8-/|S;reD.2aЏ$#Q^euCYe[u{cF+KU󎙣eѳuo Q>D2ix8:nA9|_Fb7|JvFڛE=l>1VI%H d9}!{jؑ/U(},#@2vݢX? 1 ^oN:ccpN֝~{IFڱQgKnKN Qp 'u4lkkkڋ9o;7z-NBq'hǶUz*L@/icpzZߢ[R*W1⯏X'{~w+s~d= c׏ l2`} nCzkؔCˮ6.\Al{R8fAMűqȾUx89 rM),? o@qTQ)0ٚ10Q^|/^ޥ |5aWq˗5G{(]Ū}\T(f4Zz:eJbdu7YysepP_n՜sFz RTXu ꋏpIAMֵ;sϝ$-cXŨfwԧr Z,e;d}9ن>,3V XًtyrTLcęö*I njrL5/0$\B8XWMN*~94ZQ'$coBmF\ * }ȫxHEA]QV22lq(qϭ %vG.mص_1 ǒXͩ5zNEv9*`0,~6"Ih0|ϧ7`$h9gtP \vJ fU1/:(;yiՖ\[2R1@|8<ӝbܖI 쓅 l爓8O>g#Zdu}ശS`'NcZ! Re0#}ۯ>~ mU/}22 ltCΪQɃ^/>_C#ʾP e ѣ\VUDt.x3I6`aťՆL,N3Lgm5ƭ[ߪ0a1jdFDd.K2c?YA>AR]]{~;[`%tBhW; auX0)~:ݝٷYLAw@$V:UƪM]5.d[nܴөvmwׂs)+ۭ>C ˴Wh˚z~ؾW8kFPFPgRC^.:g=o%xe7|6 ,fm%~웡H}j?0rhtavXڵr|ȆZ=v{ a_'g_wǓ"/z8W4ޝ{o|΍Ul2Eޱn6Əߵ9^0=,m^i~{E~^EE*f))充g٣֥y8o= |:v]F#уE%kvQ'~R*ֱzLܹiy2;C&9H\JLdC;[["AF*aܯ?i[@6n .KOZa^( ,C=) ZDh3 sTa/.!}(\Jdh0ʗ&A]x<%47D=}6VG崮&һ Cq !q Д_6ڶkmTLb$*:tܪz/.А>o?QDazaf nxMD ÍeEfN!|"xk(fPYK÷ҥ'`VW-ljdEw;H.̉#`*QIlL޺}1Sh9K(gK'' ӋH^x骿FX;\8z H` Ǭlۭxg`!ሚ5lֱf{=Ѵiw>-q =6q? Ҋ㚘(l!cY]d\oGHY(VysR_\ECU&>Zcs_)QߵX}^ʄ} tCN) 8,&7o$ou(CjCxcd^/F k}FO1 k2s.a :Snw0iA΀*E?qߊR⍹sRJYٷ{'ApyxgOGSsR!35@3̶-a+. lY(Xk}l&r^g[q~ 8ޅTdqh܃MXr{P53ֱPQ2Eɏ ~R5*¬k{'vr_ @@GK^7؈  U\xxtE#>4J(.QV!a5yd :ڕ%),ոk*0?=wv*o"Lj;;=}㡝-\y52C4p^{Xy/-:~Xgxؙw"$xĸ~q* s5R%j2eJeBG=.iix$G K.DkRRr|rniwS%p\ ]8=%5E jp$|SU k{2 r4&aJ:d  ycz\K{Fy3;7^cf*Gc aܚ4?Zu#ni F7/-uпBLGc/ "h%q=~Vs_c vIy |jV,**!m,_ʍ5L?H T 4 ~i׺=fd`9J! K"6/h0ɬM[&$1$9DPx$1H2ct. EG +/M:0?{Jʋu&|G;)xMFW(10xZ0̗moQ̓]őNy_F7Vvm)⋜?An+"  _a몔MFJv!\)88ktI .LG ۉxэ]VCA%.0@AAOtF$*̗G901zVΤЏӞkE2:{*k 8wkckDٵx<5v'ET 2oMn'wVﳫ%9|Ĵ 6d>9UU?\Z+I$Uݾ)ÇZHyFCgzo}TkaPt~m1҈ m*U|@Wquy􀪜fnI MY_?5VslKrܣXQ֗ gq6OL4y zbR\ek׾?W XDܝK v5C_Xaadh1(z yOr-̦ͤJqizD.(0|@()2A^_F,HL۩$h C0٥S0KƓqe|3@{bwyS-E;yÇZIL*;K8]_y:RڟcTdCoW;8=ͦW #~_ᾳF}غ2Wdbx`8|p*Vr)(ĹŅ.t;xLIΓ1L{cO2Cwx;< #Ԉ3f|5VVpcS&p؅P~GPB?:wGzˍ#\ڛ+y3قa_(!& 82L_ƙv e`8x̝Id:=wN8Ne@|iu*P9iã -jVVmg*XUq oX!aZ:rk_.BS E1bٳX妄no} ^aOunޢ# J\0ڒK9I?>ANy9WvIk'\*՞ΤvӴSJ\hway`:qrqm?1bdҹFNFԠRi /'YHa?Ď~ez!e_)DF@a*WN.N ٷn1Qt&,ѝEZbȟFrPbnyOa6* ȶ2X?(tW-"S]i#2ub}>ѐݧ硐5ܨ1 _yyI鎍k?ЍoPq|aI܉mo`AD?(W1=+/K+Y.X [jGa-YRrݹ~$YvFydAGhE2T-Ɂj /{8VWQ*4 k6ޗ|xPSOymŮ,P<@*7opMK%8(蔡L4\D %GX䰂V.4Rh~`'?)w^=7 y-<~]68"O=@+`[l+cͼfk]uSeη-0Bm?D9qa`TdhM\@p1b2JMعaN]Yy#}AicG2:LTc2&tحd(eInFPQfX>C_h.$3IޞSܗ5mX\/\?\O\_.! Q'ӑќi_‚#<}!v]r_ ]|5:F}U\׍Kٮ@_ȩ xikO3eL-Kx=mAGoqv':6 톉ChڸAGĵL7.s5EEq-u:D?2eǰ>?MP6S&fI P,;* hX3+L5 XL /%e{뎟=6R]}6 NYdty'=ct ̄)D2Bah 3v6ɟL0X?;+zzzk`b$Ley W) ˥#{Dq/W;:0ϤUTM:4;ʼypw V^^'ʾ^~',gxBhH@ʕꋪ`os |y1× ¦|ly,MeQ,?|ߞ=yqo_o#SRG)i0 =&L̀yD^L(D݃ufژeڌܹzzj$Ͼ3߆NZeУKL=pdk! ml\z.i l!YF[^^\š8/0,•w6sB7D썼QBX Ϟ+y|%O]{_,ku’ \YwdžE .A8Njf$a|% 73(7(_mWq>-&Lj Fn~ed|>ׇ{S׮txlD9OuF]I\hn-$r߇CGl 4k3.5p_νaI:#`S&ƜFjc7 $)A%LQ3Z̫9ȮfU ܘdyE'eӫj&|>s hDSH8SQ͜F@Kػ2VqY%ږoE`pj('.3?+_}`&WUVN3Iㆉ2kh7Tgh$`8)_&lN3~aͨNQ9>"ብp.:䑎 ]Ug'i*Q0xO}mږPtj>i(](7L8 9^G]šu !|ke$ ^* K6k:Gv-L\ZvH|'7\=gm2ރҌY$k&wdz- 2]5O\[wf\PLCX(k4!+We6U}2#:'> N|z)0;|!pt,@OW~2hS 4)̩Q\k. h Ί{ÒQA>˽M]#u1i3G*N(aC)mQd4]<+KX 'ihb6SZPƑo6[p_-kG.3TW3Hy=%_JݺEޖ67.7m&LXH<19?lj [c|Ze&* F^}A+8ӒJf`$ _ox2ɴ$Vz(jk/o4=P5Z-NPtbXTF8/bl#İa^)*pLwH#.xv +%5Lݑ&p0jmFf,R{f}pQiaW'֊o+{U*W՗\wfr֓sDa`sfm8${@QTeO!|j+>dB]kݕ-]h@[L9="[[9՛)I*:zbc.em8w ϫsh=j 'uIg=`H-9ĽW 7&tLY,'1I?lm5iI9?XW=p!NN!vsdKUUU*M-M-3&Z,{[C˰(nۮmچGRcBK)g*pd<WƗ=&\\'e;2^wG ׋PgވѫytO>9hB W"[kB-EQ댚&-ISMugaVyS=mAW]5Sm9{Ev35Iz=EW12#\Y^>,蹪ajrU=qƈ.]τ9Ũ8['}2k@F#7ba5gJ:Un'G VF㺎(ťT ɏ"5:u `<WƗq-\\9dՎ jHrh[%榊vǝhiwl*3 ?ڻ:YJ=ٴM\ѴܧR:ZacY@Ry6nΤu$.{rjc5}yUayP&˧'Na*.zIvgevQ t d&,V ?WIݍ&*EY>XN%^A3AX%؏: s3IZyf\&3T ħ8kOؕjD}u(}6UGOv!,f;x7;qCGp5i_:nKit'^3b8YEENqv抝 dpW/}HZtce7U\A5 ծֺ j*++c$]O;,EyhЧx2֘jl *sUU,wX5( "lCž+/=Cl n& ].j(C3L,5q7zJjlI?iYbF 'cVEgjUwDe#w ga:?Iv,GKS{yo4}0: L6=&QԞɈ]ř fAp}EV[,։ gĚJA-|ͿzO3M`-Q)L0)WE%r%D^y@%F9%01f䐐`c >_Ifd,r Il{k{0; %*ܛkS.<˶0gQC _vİ/4#]rRIdU8lɜ.A{y_;AyX @B>AJeA띄Uʫ@'U*iw.wJ`MzAL23]pto֗> 7IqR@&wbdٿrHp Uf0@QcD=+6xo?,Yfe*=&|3If,k cXr;[m 3ו7ljvt G'4uoTUɂ~Y; e=ԅy "vL>-]'N8m2>tT"$Q&O۶c %JLe&;lF= GSuLyY]yˇ*5:UmF M)sbԙ*R̤N1c']1dM`&F:eI}eƺP09Zf4L)Yc&׉Oe;mkxm0S/5^1\<Ǽ Mj;PiPԿy,ZwrÏo ϡ)]c(G݄t>ګao=gQIy$~vіP)VL[/'\ũ| kMi%Qe&f<cڭ/|$4d.@J&ŵkn_{Laz4?~|xx`-li!!/̋WS:5_%s8o(+Lܘ0L*'wJ L5$ݩ]XH2@pB$Ls&3QfbDY*3|Bo!*g3A&^amljrʢA?&&+f|-H|;(w1m\a^wvJ<&C|>B#,c/$cDSUQhe ^G APUt嶧B({ i/SUNmWCV&VD11sfV~WK cL DѼawiildCԃc"D%Eg!e&ͼǙHsmk7x/ViG02 $ iEUQb,<2Y0> ^?`IzI5_C$-I+7BZ;"I`ݭlp/mv\7Z@\^k㙆^Giө}C] ݬ%S*g<6H-F=wrs8G x6n$?U 2,Ǚ F5iu$,K,Sb Uaz<r0J%?,< ڦH~)6c_ny̪͘)ޘ|-ݛ ʅ#S-E;I@@WlIL*n3!*l)qUZ:@8Z嵭k@E2p՚g[0svJ u\=|X<ьɱ$2!MA`*>WH2)"5Ghc<o_w-&c#h^iDF̪I8'J`qo/͕GuP̯u;G$|ϓժOaKlr!.F=([k)BEUIsQ]+DYg{-kO }y޵jƄ tGiqXڮYP赔lSj@9Z<,-MGu^19ZsH'0Q'INF7oE˗CwoCbQ}6^_V j̽ԈC ݄pWU\pkrBNG,َ a|< 'i!СPAyz(P=h1b|?ϝIC 5\@҂#eMSHmԛHb3Bt2E \7}Ը?;k>7E'a 0/ 8>wԈFC@B6U᣸˃'Ҳsҏr,UuR"MUG~AA~?cd/=솷%.r*z>ױ֦;R/ꭎ.:I˞uhڹ@sHU%$Tc$|so/Gaz $,,?MLoH#1VFWaK9>gj .^qAo‘Sz@3uF8V V9 |o#:yj8vB5X#ʶZ ߻ k-I*Vr8ͳPXV I,tŢ IK#" +`rH?CB.epFBqZR^;¬d{Deث/<1ijRtlwFQL~k *a70azH=Qx[U6xccs~ w9uz%tmOQ4^ϕQբg 7"1~,:e۶!C'0{혁q1ꈔ8Jpd%qiTL^foK "ͺN!+` 6Y-Ќ6,2ҶYGZVO=_za%bFQd^g/9A`u *3e \Z!2.9":aQ|[V6}U1j4=´GoݥZ`oݑoUVZB!˃hĐei.>-I~s^INE }-#L|7DJG ϛiU5If jM4Z#Fzr:]fviUש>%:4*5C'ӕ[=eY~3xݿ<>ۑ=Qb92l2o$Cp8SѹCg79Rݳy,(8Qg ^׮Wf6b6JO޺W{hzW::N^3f1vBEǍ&b%ɴpxr?ݯIvkb" #omQm\O4Lbd<-h(Mt$!%ɵw{'*z1褪SL0% tYkj}I9;I,xYv+ ɢG7mMר{O`o=o Wql#9=:QTF4n}(ȏ|gMfM2>7WO&:ȇrBw*<D y1O / !̔]w=a0g2o%Xx8te~KZҫI~߬kB"C']G3WpVd=G,L/`?4Q ]z"_b^;EicmfwÒ0<=Dy*b Kz?JkXߡ`wUS):}yk|u^ϮQQHMe*>vJJ栳f[Q6ɵ3œ_>}t^:|)l{&~Н4ȉ"~ΰYáU r!!MF٥xQvxN `Pe#rWfk tb[̖vsBLG8Q>:bDo{Ci.$Z z'.<74石i ?ɲ[820Y&Yk¦E瑑A  b+oua& ZF|Jrpn(t:p:f^ ͨ9F<5]~u#[3ۮ|oSy tGG=U֚v5}s*KQ_>&6tMt5UQ`">4?mٱB@|0&b|kaB_ 2|bnIqƍh;z?-]'m> ["ϓ;E~'>p,Z@gAc]fD\/bQv#xqퟃrSIޞJe_4:Psgs0P7ĤIGkُWcߞސxL/ȱ{Y?u s3xZ.E`?lEX~3T˳F"h=V@5XUTZ,X]%jy#aްj3(JC&_Byy{? m~b{QhI]+)z=ߨ&>8h<Qj}M;+tYkߖvܗ? >G_zt\qktvCzżjSi/q'-[a&qlBeMĪye鑿}&dYS}sq.zmm%6y7aXBD>G:) O6u0WgUNg*$Q.>d1O.k .xN99T47>6jxo@;4)Ě]Acu,CK FZVЛM4:]9=8}Ȟś@J0¸wzuL[#-)Jv1-kx)G;^ݩ#z‰M x ߜ;Ξe"\/ao2?XL#mTev?48v;J{hqP~|g\05?:8do Dr큚tݵik%8xeCAU\춝Eܬ.X,6p}*ᚮa o ``Zcoos0Cޙ'4/3ji!MDoɦqÒh∁".a o|NvH;Lffwt݄atiZX;vY). !ʑbn:_\FI(8>ʦ|>{E4WEۡѽ}ʥByGw?PRVIy+92J׃%્4tB -p%ͺy"mعO2i$"?^ :96ɧo7ԧ[qWzm횱_KTٵ8~^oVq9 B PV {;)}]F. JE6gQ00< HGN+6k~gCwຖk߅˷nJB>^W-Iߕהxԙ)m ඦpުjaO z4]y߭ҿw4FN n_ -:Y=^?+:Onx g&YWl3SE N hhmD7R#^pbw|DfN3bds9EdxdF.گsQZ2mjWq"7Ub??Mh G7rH_QL~vo2S-.Ʒ1jiWi%xh3q:g. qrC-Ӿ$*v cQl͞Q7?,z#wLo7ˤRhFO׏FA:J.DAiO ztPz5РهC݄zY@dlߚ6ҊJD %1+01nfOKiS*)kXX燲ͼv־7$Ef ؘlfU`3MJ?tGC|m{匟Q`#0ba2 Ego.U[JL"B}LD%QD~y$p+<#§H2_$3|[>sSet5Bvm7x@o;x)۱=$aCGCLCS3`wc`֚^|0Ajr1,a!u'Byyk%s 6i* pD  K' H$+F >9 e$;@JY>425'o:,f%C'yIBK|z\%7+dӊ9ԛ]!A&PDR.|W9#A " h\4LU j|&;/f$P~f磡DŽ7kɢ;oꢔ[tB>_L7^1ғ]Ֆ+GKD3hLCNm%&M|l;|dO(U3 ݲάӛɪQjhĈfv]nk"FPߡfi' ]Wز0ɫ-<< XG }|TU!4z( R(E]w-kY못W]w-~v]׆(M!![IHd|N^L25$޼r˹|{ ԉ kbVQ)ptz3ĠT # .iJV큉˄ XYMO AX2tX?L(9qg ,mk]VYt>/_uWwAuB){ zǒ5lvL*JI)c1GoT\>/p2i֟ agKRh97ʓ;:hH.6LhY}pS 8w*OZm^d}2t44i?w %xM qP#*Gimt;*A0,9(1)k)=ouTE\Gyqg< ɗDʾ>PVhop*UܳyLΆd&A0ِ[`HVݥj@@FUyMn/u (9miۅʼWAi0Xta}1[OPm2>pi lFчbV)?5hؚZOaPYɛ&#'(Cvv)%ӓsb~q.K/Gݗo!'3tJ YseV.6xx<u-UpDmm%ޭW7[yex`dYT^;b2ooۚ;W i:!m cOÍv LN./6i1Ãb&Z_7^z8rpI[}4jT(0+cӫsq3p>t_ =[ٽ+R.Ll&cx2`]~Ҳ*s'mڌ ?JYea[0m5Q߶#1I{~^IOmq}z]8._K+4ʺ}}r^ړ4a7QYunX+.0<֬شÚT;$&bGߔ8i/e)\g/yk\Q?|X˴ZLkyK*{)s5Y4 _nXv(8$'coc(.%kh:~`,FdNf$iGFWWC:wͼnu+~VS"wͺUsR& RI^GJcAUj 4},Z9g\ڲ)i)iMii͹`8ܴ+3_,&0}12HZ[1c(׿Ú~\ykp'?gɉu =mx5;0_Xr/ִ/xxaK0IosX1o>r8@w<lgIu}[xʆI|JOHٹADOs/Nms矞*^Ӏ{kgaVۏ)s^o i (i7Uk1t{*zv"B1DlWz,+FbycO};|`GUͣ}眕KNY4* ^g~aO؇+5Hamj X=!ͼoXXfmWE's 3o~#:##y] K38&v+5[uzk %ؙ|:ܿ}>W_zrTˎ$Er&řu/ U[R!)E _QFih {A&T{e'Wy `KZ?3mcpT  )Ƶh݅%n||U]흏L)gN;Lik{T.bֳ;~@lqLʼ?nD}+8R$`v;C5UXtɚX#]9.6 2a@2jK0<@y<Ǚ-/`<cxJq5o\T1x}9-tz#u R+o]Xr"2[7<n\Ҙ'J\Oswٞq=cg; K˛UM6L?_هKΙ/ݵ7 gK; ~H>"}L_Sd}?۟u5Z` (;FID.8,㙫gZ5p X9}Fer%֓(8HXVJV=9%޶_\r@~kx W23g.=ɑmkG$(mu98Ziw5~tbHĉ\U섲:TOy0[sLj;9pg%540=y}=7=(}5i"XHɹ恰 -B#V T9_(!^&Ѽ`A|#h ܱuufcy"}]5%z<š<dzGXv<\QYWI*{@jdb2WyGZkO7T*]G{ ӺZ:U1tט.#[bǧj[i`3`w,1; Ǵ/#ʞ}gfkA^(V ~)oOoz>cqdJ`%u40=٪}a'e@uyxN[ (e`H|vv/c' 6ݞۛ,zͯV ߞr L*0&[7ؚ{=Dž߁.%s3e4}¡o5u2}wqO~q:GN%y$JepOK7lLWoD|? kQhw[_6ЦI7]EԷoOąתma}jt~}9IpaSyԯ50G ^-rW|&Ğ:{)=f|t>b.$џgr 22Pn5F1Gǭ:bXGJi @; -mv/>8ZX%w͝*~z[fR|L/lFy=WM.]OFSڭj}Dq)kgWM^FFٚ|GF u`*s=LXtZ&,h*0VdIP!xWu,N%Pu>Hkӽƒ\Ukqtx^FKnjDv2R2kLq)TӞ(chbڲZ}%hpR?zzЀXo`NONӔ!HG $XVTYDNS2ӀhS|LXIC@ .:dY^޹{UWk^q9`Q;UY'TH9҂e_haP+>ݩVABPI ~b1Erw24[M]ˢ 5lvAԈ@q:wi4ZObپC'Y0$(VP0/ 7?IX~߿ܩfjHeG[ĉC*PJl7k1|$U xJSDv6 ,Gn霯2#}B# -_5_Vipڡ9_{ڣ3QcmFJޛɀvܿ K*/^q=DGYG<0.8FL oLXeDB@Ϭf}ŠAjɪuA Hi.hVdgUQ5e4ʞ&ӏ ·eU>ʪNjo^gӆ#{( #@B:([9](.L<$<~rv@yUЗ.B:tRR[XG!IY>@IDAT9?`Aseb`~œS|?@ٖV:wU]{Ys5ئ_H(J\0rX?^e@5;I﫣FќMze Hc%xw<=Áb2=LYh<1h2 p `=01n}^2. U&es;48e /Vl.D}fmlg~V 5 4} Lvf&dmbi8Ct<=SG0'CZ )<[55XWl/9y<.Lœ/r{.Q5ҞaKKk1' fp|V?6 ٴʘ̥:e*8PbЛEpI7Μ@^4ba5kT۹g}ZiZwLQN'M@$k#_3F8XmǑ>︉zKm\HW=j+buqr3aDa R/\cMA\ZCkZd+ !BɞԾsi4&H\">zU›Ş2D) YympiYyyc}%xN Yo{+5k|xoG\HGk"MX(= ǶsCtl);tzW4I3r Wnfv g|omx5fp`[[jkv[^`=^w<1jJ7k79O2=_^+ۼ3?cnsk_֙3e=x<[I߯~(*>fR RMVʲ5Ӛ#Dw} uy>&{"vSˇH=ΐ{SdHv˨%B+!IäaqЬ(YX!ko){?cM԰9Jqp5i{GUw2WV| QDSͿ#sUuЄp:T_)/8̏u5_MI~!O+T+m(ys3 8qs5<4ʜ`V$Uk 7t L$N>B{e`(~{GRzj<% MwI#-ޯxL<9}Prv_ڲs5T\6uCJ7n8PO$CG^-:0au#wI#/~U(3RD..p[C>_$hA{PÚMԐF >#S)Of d q MH }ү!ӉSדw&ML4_w9U 2-x 4~ɸ+o` }^D~H%q9S}S[Tt4UOƹgQ֚:?qxfpNGA u梬[8-)) 4_~i}(CՀ Hpl4H϶yQxXzӨؖC ѭ%<~K‹L8$?7VcQΜMە́E$.vl<_n'ݰ1-ldUP\"X3 GOgw\ Y2b:~dعWƿo~Z%#(\<55T, y2-γ5Pn W!ll7ee5rVEE훝v= `XGb`A< 5{?icBCh#a/(`Q0l=WcTvI-/jr lK}W2WVEAATqM摅4] 1 a -phŋCo7eoH +m_esniQg_i}2XHճ5\?ŵ_J,򀁭РP y87>?&-y4GvmTز wu8Z޹ܟqci±cGh̆+qFP5HX0Lab0ԿɠyYy/튫k,t*]Wb8uuKS1 kD}(pxh |e*deBܥ7UN~徲=07w'ˋ&jW4t ϸ?M7>/^tAR7o>+P,d /;v =uybp(k`;cLX}c{2f08'l"@YQf]bkdoY̮>pZٸ[_u0s/G쮢ڲj:DM#F76hn6~)g9l?|oxN)vWe;M5 ‹Cרo18):p@6'u509C͋Hڪh30@ھLNt'9ZDCAEGS؀kJy$6wgUS<6t>goi#7GC8a)?=Pl( fRأej$~FYV6BӳrabҦZʾ*Vf4k:?ny7NY\I:`h+jUͻJġ3ϞTs^Sdle˿(*>PT׀ m|uC "uDr@&{)enQa4t`dYvy uTr[5GJ=>ڠoN87GNN?!02XCxư@ʶkc0gzcGMף[|S5ؐ&1BOЃ@=qNh|^4K;LƳt۸Gc܎$ŪoeNjmgjD@v*w/Ə7_b_ϊf7fV~u34eᛇ &X2MLQBlqP?^~qB_{^;~-`^غl+fmcRNsGCMSw6MV\gHt˜dՒfrХF՟Xy%β\Sx7JZ8 k]_s1n۸JoZu~3?,^aR^+3 7M{(+-i00Ԓ*fx!%m+RyՙLsҀxc 8̐vDk͢ (9t"RMęsf o|,>͋{=RHw a/ΕEn&t%.BJk_7 XA{z哕tpCIM>SxؐAE_SG;h[^8Գ Y6\ KwrZtFt5-j0/ naڠ\۹RpABH붵gCMpf/ϹogNv2uczWT_>\.;l5 (Uw_zbTքg⴨3Ï֤>YfO2\&UV|mڂ&H ]bQZ{ sb%MaUA~~|(̄sue%ݿc! h,$W4mv!]ƚe>OrxpZHS-8k ?e~)5u3!w ^DmQ:4c53U>襧 QDWS\J,(clsv+P.qc-.<&y~@rjZh`D_?U08Jk'v$'+ŞtvpzTvme ŸvSrM OeD#s85BӢ{tbCUN/uepȰ`1taq~sTfN .2& ٤gLϕ5t +2Mڕt<}w;ݷzFK<rI)  /` -aQn}i;}@/}Aol@$ssZ%A|l ӫEYp`υx\s݅dK:@r+'{+SONWS)۶%S^lUK5YznpsvNOT!>M2Y6*8pDJn3}2W"B#": 2ѷ|3|d~eQ sy6|S/T^m`'2ƒ p,\n+(c-F.r_U@[Ty.**:|7e hs=m(0fvm]?p[1qd>ϩcO#CxO7(9+&HVt`@ZnE7AAxc-oD{'t[Hd47Lo'I 'GH s^- >L{XƓ][&dP9XnV4vhjڽG۞Իe8PFA0,`C5{?5&xYc>(Mqu-4f},H7l锛I¤2dM(޿};if~k͚o8k!0DQx=BUAhVhXu̙'_Χ|N4O fZ&%ݡV3 $Gȩ2݃>+?-ΰn{4'y%cy+7;-&v6!-щ(G+`jdPL| L ԀR8cі($tޙ [Һ]}' ڇ~"j+{U,tٺVӾgQ}/}7O3Pgv/y U׀$*))t lN|lXx[06 ~Wo̽[R[YOpqDT?w<@g{zd!A"[wn>F fLʜpY"|N kl.>vn=^ UAAk&Z8PC7]յC2s"N\^#Q4,@Ixvz؏{Bg*-{DT7B{׿Ȧ)H.:BvF<80_2H3׫eefM<>u[ՀܱcyO._+eWjǖf냭e &td VD꣈ۦڌI;mNETke  vVWAT rXÂ(kXZUtQږ=){G#tۢ=+ Tn 6^wqr !xqH[{ ^oEP꽽l+VŽ>M\r'RPhC"?uSՀNܝƍTglX6 TQN*V6ReU}WWO 7ѹp[ e ~{nѾ0g u +Lvx(Ò9ϫ/C|.=䕢^0:ML0_f;p-=sVPv/fO#)3u;I`i&ZɕOȑi}An{T܃_z@|NΑ)S꪿c $f1i<8WcX<uTo Kɵ<ely:}h&e"4~A9-.]mWbA."бq;YsEQ֎l{t0$sFB> ]E.DŽ;8WZ}=%DŽZ1gҬwEG[zB':Rmխ3rU.w2-/c擖#@£R0bG@'0X[{9^;#|1/!d65ϧ|pc{)zNրjQͧ^ՀkHή7ϛw}q9Ob6(k!-oۓ} sIBVPjxa<Eq OHs8|"nldXǷ͙ |gW<|!}h{Ҁ!“ER˕4eyPeEbr2RmOz _/ƗFx'h4 <u3$?oGﻷ<]=]e; K3 bjJ:tN)ʋFQl*h+ {XU5 5Kyڴ[+uɝ}pLq`~SkYnNCy7, HT[5ї@pD<ԁ}Vi{xPnf[?W<7ɽ(Oxl9r"7,)ݨfNg4ٺX*xӏv #eFXte:t+jdyMQCx;MlfG@_1fXkueT.c%.ߚ6C[Qt Q/ZU>gϞ[TzbOc~Tu<-/oS6ȡד^C#Mu5S3YTx0spR?U^Iْ82/Ax@r݃Yz$)[n@;URm{4]-ɉ:/¸^ }S?###xr?I 쮯ng*Ahky2<3F+auምrRAQ>A#qC-ןpaa~'֠zD>eW^jj@tഌkJ $2|[Wfg=CAeߩjFm[u4KK lIsxqLށkd.>4Jii&`2xě~y'HJHV[~9]Q-\.=38žubO)Y}hkLݵҲSn^ʞҤn삊əsz&6<㥷(,:~v_[P( ?x'ag67\0ϵ"GSFQhH?CC9죽WDpGܸ!W wHڽ&+e*PJU t} nZR8f2&̔~w<+kzq_Uj`(2 bPLdzCz#U7j̼Z>{?#}4+؏z1(VDH 5Yj{ @G= 7QEyB[ւS@Hw[">f QxZN]A{{#moFFy&?ʌA) Φ O ~[A+g0Æg[fM`Whwåcⲕ`>|9tRx"` wbg_0Bdgo,lc >#{Oe(Ӻ굪ee[x_1UWݪI@h s !x d:o rhv ? /nijn$aw57'n/Gz{_@M'|gUED$87![}ծ:xܭ a7OZ>dj4?8Db"ͱ?/8zt/W~ kiH|/ZY?ڳ' ,tyhXEwHOXjuُN{>үeXyHmoo?+ڧ(>- 8nڻ^iH=jk -P|Cјij(& qLg\0 zH >zS@_[hx0' (8=vyϹzE]J!uTzGDdNv̓ߢcۭy7El(YVQw gӹKgқo?{o+O~X5b,Ùhn eg [bG$+_y3Icg@0'c_y@ٙVUU551=a̐YC/[zHp',c@9$8Xd1ѯAL>Ϩ@IDAT35%PF&cpV,ê{؏(B]Ez=:+Շd٢d3J{[J'zۚGq7`. >[#=Ϟ{{mݹ@hn< >G'MCTqN҇vr>pZ4.f2Iˈo:Oy+D(R`5=KVZ{P^}r{'a^0ڲ_х 'hc4Hc,Ї\vھ^-NAvMd]pT4n/|b/q gx[wuz}yŹ(6ntSt>Wjk5_OQ4,xq$LEeh04_9?+QWԪ&j{j -g독YD ͦ>5'y:|CR\C u$PBU^Aڭ+@!h%wr%MT_ =AgYeɍz$Ci^[bE62ӔI.-[*0K]m͙:}0Y *C9P_Ы>ԛڇ6nDi˟}<HڤQ?NYVUfj{i -ⱙ\_hz#{2_^ Kd︺k;ӈ?,-^F6sC}:"$#}o\ƃwՉfN,[FQYh31ft""|ͷtd-`I>Re mxۢZa5}U=@\TtF~w Wx}u=jU h>Ihfq' ޵l-H㟽a`ApS hFV}E_9#xOҢ\͉ߪE_.;{ɱM/10<.=U3r`ϲ.ˉ~j! z t#'}'.LC-Yr k(@U]Dƺи^0\U46ܭ_w*JN]EǨ\ UTגƠ!HGPtdӀ8]}5V6Ax(zUN,( N]ɒ 2`ɴm&͜3h4\`.9<8 Mӭ.ͬhn`X)B# "n4&pᾕ~]MM%)㥒 ڦCɜVEՀU@rN7K`c6v VĬ_)J&M *gKjĞpSDM^Y_KRObmvN&Zy=Ycjw(4yqN.a8HHO躺F.W 4xరށ~:?XUqFܖC\]m[(w 3= E[9O(C P5 D>Qިgʅ?sOz&u5_2*zB*g<$͞NCc)8#=[ )ں-\Kٹ{9(=-[*lwt4G6TfVg^kxS64 ;;7tP0K,ɝfMU %j@Հg4ys%[c܄YȡN޲grPS5 &i.A^zvjyY(Т5_D]ٻ(ftE@XcCkI-^^zUӋ#$yT}IK{IL75رaݙ̲" l{ٙswΜ{n޿OusgE{Ch ڝA?d8wSYD!"t"q#KDk}W38IF0l$;aD!bLdb!8דW\/ZUֵ9&{xWԈ޹ Y8LJ>RZk8-8䁘`~bEX$įGVpdp 1a`Eb+%AO؃)ʼn>][bnq< ,l\*l\>8"fDֈhfp](sŒ\"lLnY4گ%+eqLjrJmHVӍS i78!*%ug8]))Ѝ˥$Sjȡ款&lݎΊ<5LȻ2r`ڝ$5_chW@/ %?0lWg%%{@A*tQ'iq hlTRa"dDh]5,Ɏ$Ye&I2B"\pv$Bִ1X^=}̈́O9?$9B[oq`_+HVe=r$ɥll݂kX~%d5ԣ}~& cp_,.iQC !a.q(˄3YT{WMch+hr(BވQRHwPppuMx*IVۈ2[0K2OgM? U\@/Hn8}6J&^{]q0 %1<9VI R6kYrNCQEqBbS}<ZG~}<_#ȅ{.ӫ W^Qc]uTUn30:?nw,eW5*bůAM{(`>[.~N'/P%ۃƦJ{J& 9%-E&_{5l2nSI ń~rknGd[cFxӄ瞬cjOx"9~v π$9sN2z[ʢYԝ$Z%j yL1Ay!~dlB=5,dY*toYJ?~d/VW6lA|fvA{Lp>GYK1ƓVvݺ =ɪ2ҳDdX hQ6 BqZjw;G@1\Z[G;%lG}e4yE%(ޢ虏/ E{Aui%=TˠňQ@ȞH!-:\h0QDXd r87!op`Wt2)I`AI~M@7wm[&u/ g Z:(p:f3F l#-C\d8`L̪Ɨ1`]p.?T#2} 6~]gV0кesgS:s>_9Jʁ&Fy,B&az;C2R֞\) +o.TFYX;:]>2G[4.G3_Dq4#-tٌ^8[ߗz1I*s\ '%^X,$n"cyAOs;#}E]/Ѡ s˶@`h, Qޕy6ag1&K?z-v:76Ɇ٧}EwK`Q*:CTX u䝃}۠} lңjak6 oUs8}>6oqV &ؖ4LDEUx'LuijUA$[#Iq򸜁۷޸-yYyw:=;B7!n\~GQF6@ZI.-!Y[,'4! ^Ve».<E8TKɜKЍp$q.[; ~ -leYcQ>ҟW麧ߥ}axHrq,Ky&"X7[[,nW + ;nhhՌ)uFҪbl-*F_*n6hspսSv\ L,,#u։py؁D[I8NIjAnO^j32YK4OAxZs:t#k#"l{_Nߜ*ѓKpT^t훳(ښ"#dIeM8A@!nHBbDI1+Qv&D~Jj^Ah qmfJg/l;Sظ3Bl#Y)G?={( De`=CMgQ3*^ h(;yJ>%Gl䘈rm ƥuq%lAYI{ظA-n\@rӄ# ['0Zqi}4Mk}}d2G@!pe"Kr(v b:B[LޤqڝQyhA]!>%Qčr#NgE"uD#. gO`Yk/CbKF +?ƉxlD;P ꏟ>s 7sڿՠ+:6k(>53Wp7uppl--1ШO^'gώYDpݧNwYLI6£$W:X#3 ƨ @fNHTHh?F͞\X r bunP0C @Ŕ ~K>< ]lz1&c1-;C#_]$/ckEI.k}:3\W72ྱKF 0uQض#N@ ! WTb¤}!hM*-'͝kRH:29V 2 8cc*ԌZm| ŲI|3fnTE"˕7@wN"aT'Ɏ)2(m](h=%|_%{sclM$+8Qظޏ(!1(=╇|#a8Qv;o 0l;E?2^8dIdYxiĝ'0- pMQ)AT; 0޼>'߯Y^3hp 7:fK0M1fN\[R> sh]nHwhq3L9ϾJ& g1vY{"@͚S /d3ٿRaVYxO'Ѭ3\!H NEnSg܄u>'\2˘eR8Ϙ5$i$lcշuZ4zcNIA*&|N6ƀM_ӹE}*qĉGSf=l돛N*^X.ƻ:G)+}6~l$o~gN{ J~R^4 \Uル鬩?"Q9'uď5ujnzK#~}afrxQ:&k9uҰ5K68%C{u]۱}~ܲtcWOyHob&l|<',o-K_Ѭ#Ky7qՌ({wAG"x2kR(^dAfYe)%@$( $r=,-c( f=#e}R%Lm>`PŚ*t(T!;YH>pN1h6'6wC.mK-~2L@!0F|2XoZ׮# ȒyΦ@Xxa;θWz ;Gk 46>CaNte#^Eٽx^$mӖ qIz$[HJ?5?Տ ?y6PZ*tNw~/& R]dahLZ6]$L>;=q>9eG-.:L~fplfAelWG`8{ኍG_y3iɲP]a޴i')1 Á RRne^ ߸ȁDѣ$W))82bm7mhiJ*T'@N+˪t %yw6?n lߟ;$jylܕz)?'raU8t4zi@STD&4oj<Apð^ж%}I-LaϕPcNѢ7>g?Ԣ+e/!P=X$:R5:qͤiMRI bV+bTITހ?50ex2d wA;'erwaGFqN -㢼;쟧Xm3փT?9eFxH&c:- oD_$kpGz?T-' KG׍S̮U! } O|"{VoZ'lݓ VQ섵GN!YоRۇg`Ŗ}5!RfWoݠC=Сm4;vh5ۀ~!H۫|s/k8~E!uҷ^i4pWїitV]%u"=L| &4{ҦA+m {u8Q\w/.3h9,V 뮼p z9`D_䛯[,J›y,8, ZH',4ȩ̲lCr0p!y=Z=VQ#Œу0']b^ݠß*>݈ʛ#EKϟ=+$|˪W7oʔ|1@胄p%t/ec^ 1{G{Y98A8Rv|n._h?nn}7/ ]G@Sm (OseTa)$D"=4vF~qۆoqM)y8j?Lӕ};l,:kLi<]EUmC?cQiFy-v;.,ǿ[AO!rpH bI]WfDq>:*5M`yCD#Is`%ٙy$N.)LNC:ٽUeis?4c[q` ++7Î9@>qJJl|79mAcZ d5ptiAF\:qu&шTdyκd?8fE@pr}Pe8" H{ۄJDO[V$I}E RuQ8q]b4zP7Daܪ'}40n$4P: aYOº} <4!_-{@=3YT"9D=Ugc;_\<}=k'טmeC+pjCoDH+NIlz +2-kZ AF_V3h[UN[%RrT%+.e,2r@ifk"Ȟ$JVF1&y"E?^*JQVe/|RnQ&qA.6PBX¡ /)M/aڲ>[_d]aH^Up4_϶0K8+{UFTY'J4:8PЭK ݈ɛ8Qȼ Gݘ}1a8smmA\, hHvfolVe:ṻGpAȒLx8#5Z W—pv-[=.95^~{8Q>FGBZ2`nl:P\/ A0CD+gdϲ:V`\k O•%2wpOQ8AoG7 kD%lðNť4N8Qn[C#v%hk@\t:F! #i ](O~^#b8˪h8ɄB\/Zh2@#᫸^XFS=h-9Sv5GFxԋ8" eTlDٻZ(o-K&$f3X,V01@)M88,Bfi"8c`#k4GMK]!p*nCT !!$\ _(Bx>(lXNpq,rEظcw_-\RcDݵc/6qբ$'6(TK4ɢlFl*$d1@4@KY9++B?O*+Ka]Xzm ϥkpbf8W\0F РȈqXX"Y%x/w a[S/ʼnrS?x9*AmGU\(hϖ2)ypm*#Bd"aal)& /([,N&ǥBG(l\꧱w?K'`.X~g8d;(ۡO4G@F-U6/Kz&/s\RAփdWKn 2L,[%YBLɟqpA*-0)uqEkm2ɷ- bۍdIfvk2W#x X.7*n.[6n;$sf|wkq?U|y))#rS}v#ʸtR5t$_i T"D()VS!^eH*2Z,U pp/l}m*\BL6k-Zm dB.tp9t-5YVR<{)lܒ2סe9&Sn5l3bG'v(ԵגV(| 9 6!?AkA 'P#hY̗Celv'2YWaW.#C#"]%@uW@"wlMʴMD9o$~嶁~~̊5ٯ0ZD@cpa_S-BXޠo㴓ɊZi&Cr |( /Di?'LXܿsYYe/[vY$ %lܭ$'CeOPP4oOY$K rH`(h4OuELLDLgO1$~~;NAJ aumB/,X`tO)Akǽ {=z:uOVs$ʜ8"uJ3^P: '>5,2" Yv"RxɂGPG X} 8:I"l؂;dQ&l3 c2"\pKkQւݩ2UrI-[T7/,2#z??}di@|Qb/KvMKo<A"7ۖ3=e=Ԅ37k%ݍr k\(^k=ڴtwS< X#?eF1\ S7)fOCt́?Cm8ՉH2%66m"afH ;5mhƁ?q'9h)5k_/JU{6. 6(iS0-Xn3Odw6<~&o1,길" 6C4Rp}4YC_ZpˣoMͦ<0N;kM*e"~F@n4xȱbEVIjtp싷P"̬eFܧ쿨 q47^d1_pf(dY*yQYWw`ظ}=ڨge$飥sG=oxj9SWw ӟA>]+wac5}Ѹ6姱Vryk)4VWez[MΉ{]N%q1<ҭ}+^T,S_:y[S|50ꅂ( $ʊE,4~[f 9f8Cb4aA‘?Hugc+BOa$2ؒ2](H"֥ix&!?8\oM/++)AYuH/ѴѢ{u a> OM ~E{7p< O˻x￱R PhijH|zAH+~d.&˥\_~=)mN]! FcVkS 5Za EHp"DM18 " - '$$sF%`5 0u)8O."`<%%EyG8g1ίX pM$% @IDAT]nOFY%'a[Ók_|GFet59yTd8ީR4F4/"KnTԦG?.`%YP6~WD$+תB͇v}Ѣͦnh>U j=3ﻻZ5"#cYYXEZaQI^!7D!ɰ3Bzl,<zKʯ$w]s'-Dɭ^~%r^6#D5{tq,}ay8D %N QQjZ$ށnPeK1Go(jVI7uZoUdRSRL!дnqmEg jܩVp8P[%KDpܖDQ`t&Bs$4S 6_n:VV Y3(vvs m4&t u9Ӝ| YBy4=V!t}IK&dI&k2[67. ɨ, M1ϕUuFe/=<|rF"FTZJM`%nJl"8N./1ݼy""QQQv5BC=Е'kezQ{rZoT{5jXT2Y>h֮+hy.\#&0W\Tu "# ;7TndgI=a<(ᯓc,~+ϾK"\[DDQV|ڣkRMgO/}'pd.݌֨l`5/°q?U|ڳsj@e.šXA#us9B8h2;P Ͷ-qW"<u翭ᙻGAǶ-kOB``_h]K}OiO^G-KZ̎7d6tT'ʍ07hGuH/)|t Z8ufΞUAwFD>:VsjY߳?]Tt^ٿ9w7nmc'k{8I.krm)D*Q"RJi?i"JY|qK`^5`Jظa9F:ԈDKOZ!2`E+I6neIZӵƷD)^WD+03$]Xd1Լ:m?ܲqfNh=E{.08(ƧD˷G;U}/|.H"X LjHlUEבJc**< z _/wXٌ9 ;ao ̣-oGnN͇o y( .rOp~dPP ɝW ΀V0]vhoqVl/1Oܢ\3J;n}ce=4tQȲ w$t9VΛFOFmRP$8)\m $_EɄC_W{bvv=Csu nX v_{^zƞ$94OE$x5cOb\_ YV_9~/aDALeƝAp}К a]65r|n骾H+8?n(W^@dP((.kCX'pn=|<IyظP(fTM"%Ղ$o $M_9ojz4;G|ҍ0رsE͒R HtCx%hVLfu8qQnu³/#d}uqRŪE1- eihBV@&way[zv +!]{W{\bbmi8 >ޫw1'_@Rk$p85(Hޡ Y\6R1Jke!R,>@Ʃ+bZM"LJ[1$W6ؒ['u`hY-g @.;#zth!бM4lLx1pu <47hy&sa tOPVY"w8$unc \ ɴ9p4#K(A YKMD|HR'Da=YѠg[JԖ7۠|_qB]<(2M$RϤW=bEw"jLbY&>D2lذIzТ5~q /R7y_rωaf]F`F?ؕtА-a4T/RFwqi_\MX}v;o?Y  cbF{>4mL(~>EF=\J6_~aㆻTg;%Vݖ!m  1Hh!?.q0 >~5fTp-@lQ[j: za;$txې􇯃$[Q>;GNE]vj^tab;g{JnKIpUiF;*ލc[5? ђ](ED~m܅lԳ>Ɓwrq HwoߚU`O!&kc"ȧƇ$- 0.n'G1ߴH䗔na4$ KY}z݂<"$ 9ZxqFˊĺٵ ඾t5Y؈w!=- nC5#U^+҅pm7gy~:f$'U!_H2)LcAއ7˩Z>pA>G n,1@h%l+snAcBp9.p!6L}p,#|+%a%ixÁv`1/`w12I#%1YkK" wmyVGWanA2L41`o0mH#;0N"^o ouliஏ(6Tz|l}N3;ɬJ_9wz$^Ax:.H MUad1kN'i0W~F:Y1{/PЌؼwErr`}Af`1!SGn_ᢨ:ekE7 on޿['Ai5ǸDi ޫ#3 amWW+JpQ57*! ˕=z(o UAԽe-~Hr'9[oݘv!r˒Ιu~^j/->w[uTl&*eQܾe5a7i/45&p׋Fp 8SCؼ͠w})1f$+e]Y{CM$ٕ ^@ rه>'^A$*Ix'郟1mEQkY*n]ׇ;[$2$,%RNwgUKLNg*I2޻Z191mOdR-s#!œ]xе@{ 3Tm-/ap]wi"'w쐆It% 8>93+0"NĉN~z"C*( +eo (3&--^ f>1<*A00aՊx/ZR OƁ^~϶ߔq vܿĵG;X8 h# & K-^k# Y$Hl9Xr+/7eXyQ i#ϱ 6 e jaF?YxmϢScȸѨ '[`㕚EndC*않 I/g晽>Ȱ'$=SS ʕj]; WUZz6ة$'j G# EK@㶉C^kKdSO[-PE[5Y`i1~urS>$(Ѕ]+ WħaLL?JCa!*×ߕ"m!Xlik ׸&(lF6ʃ}$돋s@S~NU\@SFRZj'ʲ sd eE8YNj@vU>IŽJ~e+kfNd= @3.1-[PkC[O_ 'cZaJq3y#ZdȇýE/oI/ WSlkt3{P3H4T) }oưk Z. p vʼn/Pur8! ʕqY /jJwd݅mm :}kS5w$7|Hp';0Tm倓uƠAQ8 I+e$O떘 ajjרqB (+"$CZ9QV}EJwj ӗٷHpPf~+Vh PVC)f䳢(ދFݺ5˾ lqTĕShk6?:e'%G#pTSqo շM8 V|GRb6ED_zu%1#9i| 6sU h$_9kR}2+hR 4qќ(G?q-9A-ʜ(eI~I}SA#Yv0h 91bA35ԇQ1N 7.Y2xMm;MK;kD7eMF-lAPy*׏W  >*㶲eom;~ F<[7_ZQ:I+OԪۗ.{z*KT-^Ky)ؠJNeÞ!(5ˣL#L }fJy]W[z}+xF w~Di( \~N}gͯr)Vǰqg~NAhPs;\"Qnik}AmE 8Qv(dSgaOq8_ yE%P^n<T#x1CK|,##͜8S_g#HVVާ^J i gݽpW:m\pBp㯩 d +p%٪K΄I\T@mϟm|nvgO - ؛([8>ax腔>1R{;2 #9t~_e O~tF؈_& qIZqTZ}4j.~K?.;^(>,HF?sF98q=ᆭ_?'ʗ8t2[m  @1Bkb]R8v;!,C:ޝ`ЁF*H $r{߈2=؈8j !mn :(r$JlsSP5SrkWoE^$?߿\`h!‘hX۩;Jsg.тjtӨI &ͅ <%RX[:AL 5Fjf,0U+bm\ E ^9ew)i#bS 0G Z Jr}d2p$uWJ}OQqTM]Uv?mI3n %ЩmK=tn Fޟ]KM?4l?櫓kBt#snC?;^EdBwώ 1gO˗? 'YqGp^~$Q23#cao\Pl`xŗWa[aIĶIQ- bl£rЦ`CsGA4i p4$2w>浖>qOz獥(0cHg[e16R!hA/rtK3~V&tK|D1U(#+2[CˈPx䚡@;v,Ԛ-_F W&w^Z[AS v2~A҃{D1Ո|=ۡ6W5}e8BӬ,;lHO}?N%'s lVvV7YyW _Å'ba]aQr`į0}9\J!ih=ߡeZv"յe䰨Wo>f-Ws3$ye62Z^"&:xnQf2la/+OjE@eYV{@":\z$rpU$jYz>n\!`K)Ob<'ҝڐ!pG Jti-f:FOג?Un04pIď>C>^tϤ{'BjedIu(~]zowz]SzGN|~q8e%^E@ $dV;I၉cCI|]&ANգDxny禎B$Τ;>%Vuot5!3gIOFc\H>'‹p{'C#YvEMk""eef~nqpj][tPc./(Sy9X,VfY4 ezH$FOoMͣcމDwѻRjZ[9@g?z]KK"j7Gw+}_skVzBC>cP(kK瀭 rRϼ[x"cbޡ{) c_'c="`J2u/wОܒ\.% W~J1:F9e—`TSn,毨UӚiF3Qh'Y-&mĞm5~rj& ]eAAd $ׯa7ji'.S9P ƠO[ 1j&(T(ףs=Y֦d-MQmCqqIU0ɼݢ2j(=Yq`F> ;?QQVEZ9&':KG*ͲF>c#CEIuOΓJPɇBAQZ Q\VkY q=*KeP oLjTYPEx\>꺾VCk(F:~r?TXEv01CwŰ5Qo/TUUCwCFۂ(|U(%F 6I49K&er03l eWeFȸVpqݍCG8!! &ʤ#qd_yф%aޢpOטT4JcdO@dtOƓnir=ۅjsEl=OM&rLJV+B ګڮX7# NҲ%p  i&?~A3lh>T=Ԝd7(8qOj[͘u84IVLxS\]5%9:Ҍ{LDM.g156X?]ce?͸'qiBqO~bCEY5OΓJ$,HKٴԞV뎑}kB\d8f w Mv5Tˇ4>.D g›' V;)z4kBK2.CܠiJԨ]a@5sB>*װ?@TB>ʪnQnXߺ4](8C],d"ެܥ-1" ;гce_wk;[ 7_t8/+3:gO|-2gX/)rbEdW0m! ۉ!|$,tq5&`6pW2X-A`|v'|Fk?HGĿ$9S9PuB9sÚ{*RHj_m#7" "pJTçXu8E6e@cpE0,? nˎmPPu@h!F}x3 g 0`\06" ` ݔݞYGH2-7O*B@'xo"M >ec@POdJv=t^W^Ts xRuqdyZ z]f23*qRVp9-S:sRȆ!,8N/O]&:^;b=pH)FݣmO I8DmV|nQ$[$vLE~Yu$T&њ=Du9'^XӶrV,CzUj lj89ZqOU^x9QLK*^|6Vu/Ա5JqW&to"-[E=Y^Ί k-VcM?f/JN"Vrx #|Ɋ(I])sОɶkxVKmX5TqwyvZ3ՔԤScӅ ΪNUvV>(g8` f~aFEb%E9@e4K:Z,PČglN,Gi<]EWɣUH$SYS#U/+P 5&UtRYTuFy{WW * c/OGfۍ mwY"hlݗ aά*mj| KeM2௭E98?rJ#`C@.U@q_Ckչ8EI:nnBZ?w6Mp(4| 2:6Pk~s5K^]A1M] Y×:臭({qYb]u3`> ѪLD=ud3|zZ qڵ% ᭯H$䛬b yTfOup1߀\Qڗ;U @[A<ԁ[៿Z0R_/֐q؇{K.Lф_} ڮQ[pw"(A&v 0KbZmKdj7uhg>s=4):Hr-t|Ա{7ww)rWܷeu"Z[*;,nCۀlc:\C] <zDݔ;tܕ횘 o o#U'to+`쮡q:NI4 RsHZpO+8s))WReȎ`?oJ&T$!'TEľkwmQW]{[QTPDZH!!ғϹ3w2 IHIL{߹;]4nSSxl>y4vTHkwŷ$~ !؎(ڠ3 n_Q;r]J:=G{JׯhWͤ 2B@X}.w< ݭ-8G'N՟UKm#[lV^I? v/d:T5`  ج֬=-wyD˛@nrs}zxXƻb{)8)x`ف ZX~L~9C![A__~h ye"BUϾ[oMhy]0'^;/yF~p"jپ%bh*ν4=#{TVmUaa`K5o}JUuPFg^/jO@;֒"0ǜ ۠y"_]7A z@mH{uzMNn~3Dǁ7@ޗ́Q ^P jW88aXp8c_ 6B`x? 0'!q?GנB$o[ q7=Z 4̻A 0$sVBёcqW|¼d >:4@vBIf(wYç?'@Px$C^cncܰ\ y߆ ⦀"ۡZ,qoҵ0^0GV݁h{! (Nl5:U бR{ ̵'@?晐; bY xSȒғ/\h}(0̘urxM,~$d| _yf0 Z`/({nq 0[ yN(7kvCsC@s!<4n~N~ /&E:;z$7 f$r (߱<{JQ;[0e ]N QmM?_}'W}Ė-'!w`;v> #?~LGپw=j8PW 1l)dFv\w#EYː.~# Q  g<[ [O΀}k9_>"'·AΎסh]!-Pub?x I,DL U$a?;y7F !l_B WC"TA`ą/!XҦm£&t+ʑWækTRgan9`EO\zs#̔SB>i0T=`e>hbz\x rsoNDd2nL߭bhMtYHؕY6.IKKe+ºk `z9/Rۉ,y6P; PBRl9Ü0mH .?b5@(zM8'NqÔ溼!D.Nv- B'r*le?-#!`8B8?/XQ}ѶczNB>⼟}Ϲz2!aMg*Nכ2GE‰rÐJ`5`8mg @oImթ 5pb4諀D "({ uu}-/Wn^D٧O/$72ޣ[vȡC䗒qM%R0gRk@;([wỚSHEyӧA JӫW$s{f|o~36HG6ZYR=Pg:j{?ר<ȃcEx q^OZ\2o6= {؈qxBv"A.QI~ '2&,n+Zw_q!;C.w2Lt->E](bG`)p> p~S`sURs}{ȓm}\1#!}ٿ> V *na@;!(tBRw:% G\s` %a>Ji@gqaϵW k#.P|4wbPEkp+"[,3&1WPTQ[a/FA)*b)QEmnk{S ACcñͫ:DlkںWd6'}Q)Ujjr;8câFOcq VsFfb OiUEBʼnp.O%k.5@7DƮ}rw 5#Y؅-(ujn!asmRYG>}(pֵ@sOB1$EӲm/ ug~kc`<4A8b y U6Q 4%+)SD+YSy,V!vt =1PxGsb'f594QHmG_zDM"ů:L偭.<045%؇׆$v*[zdBbr`h_AKPłbɽfiWFu#f]@(+;*"P7:8/ Kըe`C{>Y `X+ŷb* 7ݬ^oB#0pճseBмXL`km4\0!k+;3[o wBICahTx<[SډAKzL)PEܻ`̧aԒ lV^;*]s^] ^2ѣ|o,DGr] 佼%ˆN自z`4adx26Q604Ae~^.'#ڧ *W?I!?}ޢ`0Ga߶BҔ5P1ч.bI#y;wE||!IkEa3r8)"ǛtݑI tlWQMuk]NʘR쀂nOExrSoyU߁&WBEӗ(+A|`0 ::UHXFR` cU:6M|v P@$\?5|5} `0X>~~`&" d:'\hҖ.)t!uNy/#tK7x"ŋA$G_Th҅{M5;B-R-D Nΐk?o Ci@ )Qg9mC!TxXw @! fФxK OCR GXE[ћ!,uex[T%SY K}TNƆ sBE4r-=~ ᯊgs,(ạ̃l}ϒ >ޠCGCJX{:̽&^`1AB\o1~( ! ӊfN&1P$Q6;V }Ű>-0cPzG㛍ە8ľpմ1@fᛵPIspY`+7cjb4pٔ|n:eD@R(ޞ#zY̯r{Z^1");un\Xxs'=$NK$&NoМ&RS&"T 3[gzDju(ou,58$T?zʾ7ˑ@(#'DsGk 3XM->rsJ$tWkv΃+6w=0d.FB>.xڲJ+1I6~Q=+ =#EՓF$ֻ2 aG(* ZBވνx.9GY\?wt":v5g$>┭Ǫ0+$j  *j= ]4%WvxB,.P/5^0$#c!48˛]X>c0q?iD>c| u&(9a1лG0$􍄨?;i6-).1W(9F \8$ƹA|<ǀ>=g{AwJiG`8&G,Vs66'x9mle@@r\(\ -}rPڪ贍٨6oVSpEUgp'mz-5 :^j-k Ow)cv(TRpPrT6:)#Ӱd#^/&²uڗc4kaDy'( eNťu`^6 5us~\Ssņuo {SĶNF@&|A@IDAT fՙ:~ɰZ=%׸ ub5k=Mb۝]t͂j ߃`U (mkJ/υOצQn\sp0#%=ctd(7$O[cpp\ //v/ ~ߛ?`]$\CiN[Q ,}-_w!B3fNv2tzS Λv:0^l6㚚WV 4@|@i 5mC!&r$IuPYMaT)vSNJZnj/505`ZՁ\\qpԎ&% V9h,9xvM0p$<`)ީ ]r7 ')5a9Gnk °KЋ1(_Ҁtka`"!7D_ x-vf³g (axiD?lw4=D^l/WL/b,19fgd㩑,ӟ07El"μ mSÛrᡬ= "봄#yX3*"#9kѼnOx)5hjgm!鞊qb8;]^/zM07o(Hc:ٚ2,IHPi]}>y6S'9.FǓY!C$׃ peM8TG#{)=tEL$FP)c!0 t ec,_wi,1Ձ׮%/exi "AY;昻+iB97e4IL$K4.N7ng۾֜.8ط/#8RQ㭖Zl"G(9Lh!RkX\x{xOᲇmt~;.!WJbQs,مGzvj°H*(c%㉓䎗H4rˤN$m4W w̷9I2Flp? mb'1"%" e$t m? {ZoG8|1᭶JO|y$tv -7Mhi"Ž0M+Kߖ3n)g\)r:^u/r{ZKyHmF#Bg}*F,|1}o3sWؿt.įB%0dqsS]a8gCi\D$y9I}͵Uթ6}r*4 VGRڈFe߻mJyfd}{E†'tڑ;pTU1#mh{.`h{K}'t/"}Um/r;[Yx@@b`V>YfMKWoG܎AQ  @W—<")~[`D}H$)fuXZbU8tvdy!n‰$e6oH=>q1YFTZ'‹p;7c6•%5s5LF ~8K7aEv_XvM)5_MQpiWu, '‹p{'CmWPPYQ[atӣL`#p`Q().}E{ JњHR$KHFD8  }(o- j`+N*iywb)܂<ɅէrAx/F//>r7 q_# 7h&RsPL2['9?3~E!QT5BkKeQKW\ջvIp $hwo,{WGADz0(`QP`0QX0t@4a􇤁@p=N2IuܣqEm;,a!lp%|jEڂG+E @K{'I& 8R{vj5_ݺ=tآѣAUvR8uxZZCzB<!qy'+!h #:/ 1ɮJ^3AKMfG+QF x10,#++.,)FDnv&Q~#k5ڟmYV|ց+Ũ!ުx?5{Oɗ\:6hԼa3Ufb vvD|WL 0/^4 )}%< W—pVkj@¿Κ}=!#SZkZ2nԀ4w>DDmsՑmir<ߗV~kF *$dvO|pm_phƭ?ZWWWɱWڟ\{?]I(w=/uyDq-2f Pc 8QHUB (lŀqI&Ζ)T/7K bZFv,,\!ewy|O>so!kqD$ {啸}͐cӪs(0Vf0te:}{TD}f5[NTV:r(@xbzPC-S5Z.4˿|xʤ1)C:"+ (0\ 6͜_+΢7EjYy5:Qu`Wwr6ʺ#Y@h8咀}nI&F13y?9qxv"|0) T33y\TG8"|y!5V  7''hI!ɓL$Wh*Kg`/NxW\0 K*JgYկ1ϟVɚّDVtzɹEZu5ǭ{<:gOJeO2DбN$%0Tg(Sg?-rw n y)"U R0 6D!p7~#>XDTu(}v>;.ylvAJxLND%D$! Ցd 6UUo(<$Ы{LԩʹB$)'w$Lm@F G_I^^W2k =&#tqU~ą2ǚs\h{i#<]~ 8HgZ[ӥ7R?ۍ8'@D(z NܱeluZ&l b$:Y,T@2heӺH% KM#QXCd d #O2W6"`D7:b…7x۱U`=T^ۓ2>"~L 笣eU]&T [V:w܎1pDE2IZ<.MO~z ܘxa@McSԺbOH)v5vFm[gꋟL`5dU<|tI=]a]t)q$r!^D긌 /pJ ʤ(\nO"LDDdH1aL;狩ӯ :J1k׍t ۗ<*4P^ THrYٛu,tj:ɓL$YI#=3OT}Q^,HB6rASfj ɎQ]2Ձ#~Ȳ"̖@sX| ~[SsPO92Ix WWLh-{ב, ,J`<YJNJOptu UUOhB4 "#EۑJtSe"I|"2ߏ|:zk )ac ) ,ˎ˥*E;] ˊ|?= (PSrZw+P_G,&-랬 ʞDӋeq2? 3 <܂1'|Eshƒ*;˄ Ƿw€!#aDWru(SY,?A\|lfe;u9Dmm,VՇSk^қ%tҼ|ǚ*x[Ӝ?v:Ƨӯ5.d B h*Ͻ K6V.<Ծx;cO: 8긇4'=d<%A8NRr'| 8.|]|Y!ەT|$|jFa ~8ؽXR#w<Usab<kzn[yTeG\{Q)'PkޘDלs۸ؕjƩ;\̈*akEV(:&`OˇxS Vci0|1pQ:y*&1(7FA ebOdqnj߹gO i`e<廞z>/8S;Itpt`/wc3bņ/xΘ#VEztZ.{h6./rK:UWïq kMPRr,祛/BVŒ7֯q5o1^,]T\x9Ύ}X"K#S.$Ws4YJ$Y$C@ekQQ!54zCRNrrUQu +'ndb^VIYCNF`K?dv"?8hg>XK!"!(a*  ~[al﷼N$ȗ;} tYb޹2z"vHg߁Y$$H%)U4cxZ#(GzGO/.~*A;rqB@h(+spbɣh钲=?}ļ%}d'Y 鷒   2 3&AiE@+סjч^?SgMH]=ue]J3^N5kM̽O/ꦛmTLgA^/~В# @/.[g}(,/*J;1|rƼ:ӝdc1ˋr*Z̷K_$GjaCm.NG4U0ω R| pc=}p62>${dL<9 R' @p8R<שKkX,$jq/~H_Di ʾJF@vP]@VwRqŷK¥Ož#&v4 7rAnD;3-$.eBqxi.Ypgû~3`~0>Vyk˺/7>inP kC#g*/^x\>k¿kݵ#~ Y┊ֵVٙ$MHuF+[IEK,T-HMPXfsAU8MƉqA)H;+j,k$Jmx'3 <=ÂaX|,  ).H݉ԺW#~zSnc>h A|eTݮ0씊{(~b//$:oKۥj1zj)M'D'b2Fۙ S32Р@ 5%>+hqTՙ`}S iI`0AոH40Hͻ`V߄N0$qǎVdMx:1RqDETYp-@]N鷯fT(:WG"T-<ؕo,p (|6(b`40 Bl' -Vn[?^z\7[_X rRۯ%I[=E@4E>Y(tųgd]Kmp&_sq~{@y}DFG^dl?|Kz5S+΅C l;zY <'?XHVHx'(5Qnգ᭥FRq1z\ݵ&$ܮݒ${\$A@e%:$ݞ #NTj:"|3jY6Y&<YVG;uԊAav**v2e-# _ooY/g 1! \!9,[I(ʼn?T߯OQY @>Wɪ1p"53aoVȲj t E/^G'LbTwp*ˇp#E 6ٺC-ח"-ohDylld [b"`E)]f/p?n`Xf3O/K@ 15G6KkTl[..$`TK('Ι$I ;I|Q~ wi'Ii5f?*?0}xiXiA6:emAqʩ'A. f؜vPÂdn!Tu([:qb_s @Rqd|A#9ҁ|X*N }~ Op#@"<6 6f"&cm@JHcPSS*ۄWc&wW}d9 ¡w"*fP~fI!X[3gkM[S-/IkGn=H(Jc'ĔTdϚB5& &,V*lk64\ZPFٲV|0>"b9ޛT[$'t*i"I_Hµ&_dLߐ$?s>rMgE@By82PNrq&+Mfۛ+HoELVF;VO* m> ʌ_Y<ٮDV# Z YpgUp7 [=%(HBUA@ 8 0].H7יV`!$vtQK&@Xp ᮃ|1f!~ණ~ŝmj.ؓ]>zC*x_+σ!鋣w#H~~A}hk|ž?nz=DOx]3ZORq)c.̛S"CD$ܸ`-0T r8wf[Ǟ` g@udH+jذԝiɯ;3]7vdfa$݆lƁ=7O6J3,;M+;ܵ?Լpόe™?ߵjY5T_-@>bn"yB~r$Y~%$Ek[5%o mR4(7Ko%-MQXl`wȉ2HD0ё'sc҈x6Lt_NN sXN~=k=0HxZ2SΒ3Ǽ[+h))ɮuqT\!m, ';^j(97o:Z (<\g}`Cj6Ng1CM{nӲ ?2 N4uXmC\IGH2MHi[1}A]-ک&Z-nb.8 I"8&Vo$6k}iŊ@A={7G|`Zix_0xِT^z1t]N}RZŇ OjF|r~Ȍ$2\̼]i8Ǟl43l 1vJA]0BHŹaƒputxܪt36S*]>L-!Yec3rsx@E߯"ÃTyuLBbe1lπwE'!q0aZK^d rajiۑcG6^Kxo61,B*}x6(}%u\xHMQkV^/|!= ]|3%2ו):2x^q SƠXe?#;x蠴B{< cPF;ZM28 EF^Qh@dꨦkf$%Ͳ&_i[ih@Dmx |g#R˦bȤA쎜8 (SQ>Bb!0`fzë'+s'LwUIH*NT_& I8\/ 7%DjTLeзqؙ l <({vؕdT Yfa'kБp͌@J#c!S0JP'>4fH-ZfU-w%όIܝ1mqV+6>C*.%eOE+ : I曓Ph>2[=^@`:^Dr3_ J5{쯗a8O4ȅk>ᜁ@O8es"gA~b|*yv؍sR~wT\n/Pq$"7вH yYE'Vě$\C|hJ*^A{џS8T-b.:£u`(m&Vug1IVeg t!w6q&o"#m/$ Z:qb7ٝRqk}ML,u(./.o ʝ4fhN&՘:oHN?N6mouu@(9Jruw8C*N3nBcoVS~k'sDN2I~K@p'R=`>&iI]''ȣ."oIs/ U0nM3gh (9s<{."Ý)e f! hOĤ^ 7mITH% ͘C;2;N'!:vB҄k]:gvҾŘ/,z`ʔnbC$I8Yz/Yg~>m5DZOV<$yT\ h/1C,=cX(7Ֆ% ^ޫ>7f?#oE! w&6BLL! Hoko锻iNK)0k"hr_r`,ִ~[4;x^$7$RqMciBI7^7`5~Mc) :A KtS4Z뾮~Y3Or7C d6X[Zmbus\[hEy87ł"3u`|+_{u1 $J}S*3h)injx-%Hm"bdzX4s/}T~p6mfa" %2Jg7d;X ͆z=Z #Tr=JbI tj1ğx`K] fwn%{n..=]!t%=^d[=9PUc0cP`Ȁhظ;z-̜4Lf uc.HccZmvXq;:=f2a}Z6?U8 zO~獄u62|v7*< |e;'>f7t`a}wHy4pٔВ MVꔊ]_㼛61&-}f$Ŋ,*@ q`qwO $2zCRC*nT !I8 s\Y^"\ۺIA+"Zy{NVwD}QpW֕|7Ѭh ,Ue;It9p&k 6$n`Y6sJ$tWkv΃w=0dPs/O<Vm cW6~Q=+ =B*xe|^42 ޕYge,/s/{ @5a6/>9\Fzhd5vK Űi\W 㧷ۅGb0U7n+%虈 žd}0ُ ߛ({al_v> *6]?v'{6$Oe6s.,!O?LOG S:bfzX|  }#!g()Bo%ecAzQ`x$ q.zy:1OOf^aĝRZ98yɣ=<ޜ lg+aysGcr[88D_afDZ,2mv@L,'j, :/>Q˒_,͋xRix >30.r :` /;.۟Ej$'gB6N f&q{1Wk7XuW';t dE-8&(TRp ^lXpA^)((> {,N;eDX.^3\=c,fv_d3 OJ;BXpTTׁ^oj,紋= f :|h݋h--^9!4u޲*/9 M8%~ |귒ppvOTd-$ /ǒS!@oo ;V 6qP5&J>({>HɶN<|`$GJ*ڔ/:2᧭X !yy)>~Q鵞'u!4'iᾏEIt!S3'gtzijDG:؜v4/Lη:0^l6㚚WV 4}P2zoK_l|w~sm@Bj&}},Vde~޸v-JlOH LOI{wvg! pJt'I8~(ZRSgnB}A]urk <rR>f4:TזISq|K]s6<1=?`9Gnã_ °KЋuPҀtka`"!7D_ x-vf³g (az?&>a33Ս4'rf[6bhV }cRG$2ĀMd|=SKP-ۮ(TU1b@d=~5,_+f|6괸Ju7I8~+?C]B ЋsWd3EOn_[; 0 f#ءi!%iTx0yWb zGsIK}>W?D tdcBŊ6ŀnIw(q>*ec6Oc#ћ.l,oywi,z Pd{~崬#/-AD4(kGsc[ZxWMC_ڷ!Q2}E`e;lF#A?.|j!úT\wo>Qn3d 7/q g<#ڶDN-#rZ YŐT ihNݦD4r3S'DN6;֝$on;Cvs잧9܏e/DCEmM[t7ĢHܹg'^sK%qcb1P ⺋$6%t 鷎fDn vۻA(Oz*(0%SEwHQ(f4{'`_M=0^:uE"ܾ[ #0n: K8sj[hh{,S>0KGT\@ a^Y~?o#Rh,{~HCqU%ُEF$_ r;⒑%]Bѳh%_H2ImF#Bg}*F,|1}o3sWؿt.įB%0dqsSs4WCKSI*y5y1F <(vll!b`ʔZ֑Tzc%_VG 4눁גCE5Sm'\岑$cMmbA ܎F|aIH'gU$כ}fՙVZox@@b`V>YnPQ/'ccT *,!&ĕ%r VP_y[5kGf*a8+~%_sD1Z|cr1HZ몗))) y߬*&8$&2_.T* rK?J‘X.9` BD,(1,I`o,;Z( Bl1+jNX"pz`#CaQx{:,`OP݃Hrt"  'f—^WFl:! |#%IoeDu1Mwޏ/A7bOoTQ5o֪Rq, גC ;+SX hA[猽 |ze;;")]ڦ bSmɪ:Tk27(Ai (c@Hp0adl Wû6˷0E,78B>ehH #-C)فXP[PX梶@`TRy%Z1?/ qHܕFoy$"uɊ[Heb ׵Y[/޵pGξh> 6cVojaL r+A_j0cɢc)qo3:YS>rb$^V.fF򧍹^ e1 Z6s=q6^ƑT\nUQ!1N8WJn,& w^NYb$U/i>F3dwC\;qI{|f/xA['#7 iyVd$ن˶-V8/͗kk #GHcH2Ja&'Qِ,"݁$3Pp!1S^jAfA1Fc(2 pnl.}Z*j+~ ~Avjo̞?SAb'_lͲropy%siOV 1݊<(ק24% U9߾`77uf*"mE@ ,aOO}v;h%;I"`Dnk_|'+6+]sAɱA,;<` b$ٌ, lQ wNe3ARӈ{VA/DPAv 2$sO4K}r8{kHڈڊ;5k ofu_R:\EW2ȫ0 V3c֬\k|l:=EXK%߽_D|4I¡\;t%_du ntϤߦ..DB7C@4x|II&xW2k%$鷂Wr/ՃznqT[f-㗶l'B?a@jX›i!KstH=Pi0ͯHoɛ@{RyMq9WcNe%Uvz ޹o'd#u '۬Uio0uzrAmBmCmC/pH1' r=ƈ7SƮ.ض#9N[2֨(}evdGvDj pB8# r3Kt@-y.{[rT(s23b8Qho{&6$sH , ݢRcRxqd׮:miDLv[ᬌ~^O!8d1#&ejv M|l 0#:p7= fLܝB^h#>Ev677CD[?WRq* iox.@FO9ŽrmBA9nsR٧UڬڶҞEOEO&' ed־~7_ oC!acPN*KxnRy1/inX{uuuUe2Cv瘈qs9 M1+}h象/nx6Wc2 Bɷv}:7et0=ɍikko׬e8_kIm~뼋_gD*_B7n"=QMMeϲU&rL${9Iߛ::I y[e s_)&O2ee"8<>u8h_߂|ۊ4EA qއ,c0J#wܘ]{T*6ħ|P߬'ce!pofv%>{FZ5'\[{ɓY:Ns Ɉ1|R]h yiiSX>z<`:> kIGh;zHV @RqF/2!oh$I\6Փd~p}V\n(7jxczBi3޼'q,IDa*ڇgΫJnHǖNA9&bLD9ƣ)s14GR$,d`c(]Z%v5/z9d v{47eLh®4 b[jRq3˿$I9*"M*Io<|sB,AZr{1VAK^/sG}7"e({KRrLae2fZ<.MLz  jW3R@r%;;\I i;oeڴ(Ġk-T}M*W$ᚓ~i:5!Ѐɰp\ md2"pd{W Lc>:RosvKn]Hpv'˄3cD:ӛEÉ3p̵HQ]N[8n4w}UV$xTvpH~*~_A/HCMusım;P1KũT)7 H \"@@''`b<{]Md7AyA"}g"M#~M7zfνFCJ'e:ڔ9궠4Sqz_N8ڥsdS{]jV/Ҽo⪑CoZ͜Y{A,/t=Q65z;B'WYHKx9~eS>(%収ЃVLJXoc ܉#>|&l۱:w,\be_[4?{EnzB C Ҥ bbV^BQի"6 i!j JHo[lf$$awIg=ɿ\fji1Y:ʢisb/hZd^MVJ8Aȭ:kQ߬Q=Ft冚9#&&D,!R ÌhJsى\_3j͕<\ݕ\D# SXx] RPRsǶ{N~Q+y 59iOߜl4ڃؿqdLba{)t~O~2_p][k/8RVT)LQFS}Zр5 ` e(<4y8)&1׭h(o- e/kͰ^}wϷ#&LZ7lT@^w?G EFf"gg'CdHKud+ nK0j()Ch+t)#ӯŴ+AiY!nOț6;f~-~Z^|DA\n)s_{mf7Kcj͵ߐV ^)]kjPCR/]ub 4N0X$榆 \Nnr}^ҭ]+.u~a1HJgS$^3;`2?~啗W^94WSّߒoKc~SeOD[ZрjI:A9tKNPJъ*љQi|aʕjq

7wg>"c0Ks.-н$qŞw~z^iW U͚KA ӁV-)O$3NZ}){ RMPFRUj'kCY[bn]2QQdP m?!jWϦ7ٳҚ}z, o&Wgն`F%z*''Q¨QjggDL.'|vRGz~Kb^;^mqi:CNo2SHZ </v6QP*Lk{ު?Uy.&W_燆PƆ)_9hhr¥QQ׎A+/ЩDIVVRitX|8 rX\iUE0o$;|X(JLLܿ={n|3.(|[=*f WÕ ^dņEť۞y`(<@Zw^ϳ1wsu6yy 1 `0 :CvS+YY+P4phRe-I_З] ҹ9{|7eEhc;,?5 C ;D9D4P]Է6Pm3 .;ݫmcc7E,JF0tǕބ'ytu6D]K_o*kU %99yEWl)@ƢH$o/3Y]p0nyͮxuנ~]#~`:4jBZ *,.=g>4w_r?iT jgp8* R45Ф eA_4?Ӡ&VO(, HOpߐbVPD@ؾ]QRZ. dpw%^^or@'D F]Ġ>%2Dh؜ZzIs]<5pAؤSx0륩 ~wW޷3/)`v;%Y& o-=Q Z]yJj&e(c7T CEʆ(4!ڥ4a4j,4fҙ^=} e?ɻ,Ҍ[D侃*6Ru;U@ILQ!B3ϲ/o/U`$;dZC{q8X,>8-zxWd8mS4_vӓ(yTjU*R4p 8nOKĆ$wFY[]q ΀e*'@`\K.ߌhp/w3^QOL^WCh`dm#Yn? !?yT?SwtpE,άR}^w_ 4\ 4Cyɒ`wx_V1mSZ4pvL~SQsci bϞqw0<Zfy+#C{Z>7u9$H 鱉7Sp(~wgIR_J XGMf2H6WSӧ_* R XGKB@D7%V֊*@{Aݦ-nCVure_@h8zl݋ӣߕr 4+]T4ph22[¡֜Rkk@EK(vvJySmwJwy6.:F7= I`rVSkR4ncT$t9]GEc( bY3OY+o 2' 5ۥ6e Cc1 <Ǝpv8E?~7L3Į P*S45d ed0j-# gmehQ4|-b[T4PH&mtmE1TFbߨ6D@j.Qz&ǜ nҔK N[ReP[>.9p鋴hb E<`kӿy5 W" HOΎy`4z2Ut cv-Jt%)I-V+h^?nìo 'iv _ـl"9TBh4R^/E%b~aP\ZnQ ]qA;;5CɲXdSP\/?}{F'qN_d.Ict悕 EVЀ+ʤH|lR.Χ{/G^ջ1V(W]Jx~޼b824@8TTh5⬡;h쎟}/f7ZcD;#0A[a!-U-}(7Ezv?fໜ"ʼGy%0*1NJs_?Z]x]UNTJN- hYN z5UgI5 : tq^yo.>4aFz -V eւ"7'{P3;.|W汱$XZusPQRd+ !;vk uГvolsߤ3T"(*"Q *#gD ii_`gNI;8tjR~a]ӣZ3m(;#LxCs/,d %4KBtx!帨iOUCnZ[;e#G-6sU*POV!s"UR6h _y|O A1>*^v .7K4MWD74"4ŨPIBgBaKb[!X  aaW]z.A %ͅ ցvRZi>ڼB%Ng$8ƀBp 3:12D Pv^JnA0%ϊ.uۆjFsv묃UdoCdӥu~Jd[ɩnӕ0lYRhܜ,vk0#\ʼ" Yn>+uL]Gwuب+y/tz&(6B hjPRjoݓ4svL&atR Frw12mF ^]`q(lP$xy:JYT>O8pP\),P)<:>]dLp:INqK̹E3~0cZSi҈I%$=> y5syw+4thOCMFc<=֥mh:L+1Ӣ#(p4 ZyXVm3̡19O`T9@]A.V7P)٘7ynV9Erz~n[4jcO0jYA\:\jtNbcNjn׉u?}9*|uں ] eeoco ڔLg/qqnF".=(y۫jE5@fa9ꡙ}!~6{_Bce8ū\;g1 졯+SjhR8pAZ;*S ~6Sxx^qac0 A?LrWFꖏ,NgnF)1M6ٍ2$t'nJ 3וd1f(|ٟ ueC~}ZK>V:*RE xA|8Dӯ԰#՟VRI)՟tluaGJm ޼mi8mٸp@fçG4ݣ6܂^yx[0ngp!4N=N.!^n."K{G{Ƽl(p .G;D!?*5t1\GQ÷CVyٽS.ӌy_I3|Co/YIǛQJX %[P>x>^tZx߯*x}wվ~_\bu[Չ7Y7XRvr Ԃvg:iNϻkc7n.# @?(,zr= IaWy5;^n0,sơSe7z4MiQ~VS3i@|V۩J=1i(͜r kv{-:q;;OV닟61V%Ɲ FXE̜MT9͘C~Op1y*4EdRc2nF)վf$l^*YR:٘ YJoϳ)`@A>m,(Ĵ4N =n MˍN-hG{bWϪ*8r5JHF42]-2waGh@HI/si)+zw ;܂bt/TN,~|':Dg6r혜Cs]H,y؁/<$onO wzhMcIx}': ў1wa4q"yeC tsώޭ](=<ri藯x3tu,nG\jXԦ4oo8LܠX;g{$9lij Cc{y~Ca=ɲl>x^{o)ӟ$Y/e\_O/.^X-L[Rq4=+FXEKӵLz^SaOvOM/5ʍC {˧onm_C٤(鹔S{ 7tłQgd:zYRsnޣ'Щ A$bFhDkvɣ#UT#6L S$Ao@B,l>`QAmFlz.p\?vPHKFKeO[j+yFP,uGYNK|-<]=}>~}.|d((s^f޷Ud$O  ']ƿK 7@(6 ,զlQm ٱJUe왒j|M]‚n2y6Ӳ$#/OL<$=:/BU*0HcJu$u'۫DI~ Hjx#Ow4{O#.GE?˸bgNS.D'Mz! ag]?a6L^bG64gɘcƸڃdgJiFY= );58 cG~]"iDNPA:=;f\b<1뭉8C|޴iqJW'Hprw҅,- 6Z=:&msH Ҥv>|F2G$. G!?h'?lD%:'F ^Cj_άKYr (*(R?4k򯟦֩V{,_0+UFd-[/; HYk] v<2# x S2uK_ɻ84ȐJ'-G=,0y A4G๎Ĵm7w {߮$'b*9X;HAxȃދ<.L_F~4S,L#@&T1݇NӽcEJMr Θ2~ Ձ]#E%tޚw<+G߮mC%ƞcrvpf%jankeS]0b}"u^Ub״ɚ;/57,ک`AoX>MX}9}t/p}s0)Q,qƱ,^pVp]"藍$C WiXu@t<ҁ?\uV+f8Cxa2C#PޖdRԧ!B_+fVռO`θSg^zGX.k `#r=<:eO2-OuS {VUtFpO`@f,m9  ,O9T:\|ۿvE^/- \qUKyFtP~l0G~N+9o YCns2Њ ת/ W_1QCbxMjf#K1}FEh`MMcy鎱2H`vn3^ `a%l+<ݯA_0~7 <$Bo< ԯ,TEcg=VH0!mY9&< R3%GX7ƸJ<$͐|c`F e}[xd,p3{`qk-_6}nΝ[G '@79e?%7؄+;u 3?(g[ 1O$I/{ƒ{`}$X{:8w0gSڶxQ otJ:uuS|xz>(JRQ?y.E2Ql$wsDKzc Y,PYKbBNKϘt|4=xܛHW[WY٭Mk[`BoV_z A1#8%Uۥ@g\&N(lc)$9q|qs%^ 'M?5&2F3 1ýr !3{w,C{G{{5ܛ&{]aH {䒫agCTM3CFK5:7-z+V4/ůRؽg6l3(bj\_~3qg?mAshK3 mMSSI.DIJQjS;z'l9UqKbf/zcA6OUEُTZ4lW<4- ld>֣Cki7r fEㅙ/\1y)e/e|CeUJ03=Ų}aK&r<p5\$c(g.^y=T@x?xԌژGh׹zPXz9Ǎ`] IG9(GSŞpYbo~o,dD\Ox{&F{RoԼs/sVpictaZכ^d|׭4de i\/W.K|hXze {NYj~ 6E{ΘɌ޸uG0߈faOKGxEب89q*6Ria #˒HuSr0"/7/,"^%g>>r u2<%Lx'SⲦʒV|kH5/g0|:$Q[EhMT)AzwxK#;*FiVݬm7mIjcPcQk[1Y.f.i>wg]ʪ5S۠Ge@I8ff=Htu'rG0`êD; m+\oϘHR#K1;iƛ >wه+^א9r/jv6 \ Hx 6v^``၀m/OIǯ`J4 Rtq5bZ׮"t9ⵛ(Ӻxܠ4T _#a0Qavl:ڔās] lc$[3eGΧ+6HcܮmPhb`Ŏ'ӿ%&+ZW^Yp5l#0gg3: >f]] mX3g+F9$gl>vܮK =o<9s{8P "}j{ӍTbe3_pheǦ4%/m3+̨g6$-p zj-R/o-9^@LL2}荏z␸,[ ׭3tD;똭GMz!+gA"Bڕxi o0C|U֌ON e2#gĶp2}\^E#W6uoT8u<|ҖR9mk Ez}cm?_ȸHNwWҟ!ňIՐ^UdzH(DlRfyiOϴ3s+ ''< v(P=ØA]c iP T%"%)űAA* | X 9P# `RkQ{Ug٢>+sxYN;'-qQ434LL>S8A۷TFqAO|'N&*bfŻ )<ᎎ4wZ2s T{'}&z{][:dʮW)c˯'4XOW,;f;ӣcp]B"rvCvJjPI~U.`V}^e 5wnC9 eQ@>ݯ Rp>~73`*~r|$P`/( El=׍f U)?>s})yH 8o?,Eûlmp7>Շ DJLh1m3 '!1j֖Z,(ZW Sm.IU%Nzt4{[7oRx@IDATL[ Et=&.%K#K1@x5DČdP)*X@̈́gΧ,c '`¢b8&0[JcZ,|x?kP5QΟ-yϋz  `p̻5Ñ>dVQeA _W kl-<<t.[e[vƮؓЖ mjfqk^|9W!^6UIa*JEmXY7s3]HPB-z D5wMqʎFzNGU{k4S  チ~ h@%y Y@*S`/JHt^t<c90wt!1$-L_^dU7ӵ:srPmhwJe1 d޿W X8r9ÆP_ n#'p$b{|N*xJpMݓG#ΨRYL ?K?h`L92_h牨 _l}+fHz; V.*:@!\Mt ?!=^}M?L}xu$pV?n_7T#wđfa$c MTs h8;piw?G6<e/e7x9hulI 7I Dfy_ $I뷬I迫wHɰ=mH,1vPWE6YԢPsY6E$V~ƻGx4|pz$uFcRb(_nr}ߦfQB:Bg0ypuBbj9^XT.Ң'u0F}JUT\#4'꘼.d 'މƵs&Og~_ 40cvF/kqUhiLS X2 $/ZV&pP _apXlqe68e㟍ScllGx:Z6'ۜz8=GPN4lKl2eT{bf9b8 KtɒP@Kј,m8vnGT蔽_ ĆR}(ܗ3 ը5-{; -:}w&JJ)¶,()%3NZuù+`K vx`YG>cg8(m_?ȨhhHm!oW~}lu# 4$3 ,<>qͰmr& PԽQtU] /Ddx9/ p2oM ClU'lV%!!ɲ򱚬9TE= %YԸ;!W9ӹx[o50͝sR3E~OaC?;6.U)rqJ _S鿝.ftkJe"T1EY{zPhJ3 t0@)fv 6K}Uք/P +SN=q_&p&0N(R$FR2i ^r6c<)7W5 da(DPY!7#!\&"bY+g90s0zi#8-z޷:T^4!Cr5<ߺN{8kP-nm.4.eK(-+R.sF55 t13VZn;} sA^>OY\eX3&ᨖj⯦Xh1slsDl0>v6l_j)jb2 ' gӮ~QF[7k$A7v:NH OvTd(5CvC\t%88; B a^UV۷q {G܋a;yP8ԪkCjXݠOUi@2l>"?z?S`<4fږӐW e-WgoDtJќ%-S^a|ؼ.(JAA7<A$5@%FqAOےt 颯][&(PgƝ[h( qh3(v۪4B//Z䑗[s@ oCoxdf׶!.%+4_X'߯ MJ64μ߯^X-A/Ļ4yfhOIAS)~J~N5kDFS?JFFr^hd^0jAq^f.g(Cybjfu!kyӴG НJѾ'9FaIE jjz'i ӢnAp?וTnXx$㹱uzt8 a`|ث5* {ۅ(p9zUvJܗ  1QX* n3=]5ٴYC'(sUY[Y}s]=+w\{\eoyg{o+Ă^j D@c%+Ϳ$+r8$ VuN}gƏe>P:9n竤BЧTZ>u:s|dPK*Y箏O[^ţle֦8wW?,WN<0͹C:-(x[SLybػPƯSgg{ve&MDQ8Sh@Sԅ\9TB)ʘ)x[ZbdQ+hh@evPDр55؊XPT*: 1>f͚[,N➝paPΥdAt.̭7W(AnO%'.1KbxՌ/Yzenư^d<6"6߿ttXZōbA8kA:|q+934t,{k:^{`g=}^` -?ZLE bbgr`Q45..NQ4`% lw; X5ӹRZ_A>iF}Ʉ98%BoZe45dn..I0^Y^R6 t2 0\%2aT-w뾛ݧ1LRx?xL2`Pů@`T!ﰡT%Q ?N>"^wzCفn\*-o>zv%2cҥg~F `VKt")'J-~T%X3ϲ/H=MZPnw};=&-RwѮ&]J^O5=7ĮƁxΦBb7K逴{ضqj''!jE>^,/Y(8br +}ϑڅ`\}fv 發*A~!/9kEZj@1k0Gu79Kj0ởx@Ͽ*1@/tHduӅY9V陚tgBWn*>iT&x>r"к>݀5>a#`i'7aoqw4h,^|=V R f D,'Ujx%>r Cr<H* `ڠ e0ဃM1O?zhN\)ɕ:)򐥊 a4 ָon'9_?)gD͹.Z0m>pA/斌 ѷaoҰҀwx%ca3 04/q0L1QvW3&e^q0 4(|nХ -aUri:m+HJCtZ i+ ɩc>ssێg]N Vw<\S2 MT8UJol,$Gfq9f{L"|oUYŖMitR0IFgڙOF{7\}g!SD@M4pk*AXP*(O9jA7@)q idoXegZIًr!4qh_/zpJy#.4v;W/Ӣ$*|;@ms~g/H^_sO~Hw҇FOS֍H~nfퟻ: S"";x.+y^z[7p )h@@upi;ls~ڰB黇)Fruw#baVn9#3-hȟ(,pSӎC$[T$ws?_;$<'/~n)լU݈/hzA(LW:8K~5\H\RӤ/Fj#2}) TԀr7@ֹP~HӬWf=XoFs/CBK8 Uq8@e@Ãvs/kfk?G\35>y$gSPWS29FHE8}!~t?4F̾E5Ԁb_#{װ;i`5`,!Hn2|ڟ^CHi_$"횫ñ,rns$ԭ$*GsgTkhd^AQݰGm.;j 53Z]\_v%/&,3y *EBi(#ˍnq%&xƞ;vRlrWIi)e#Mcj^*_q3\eȾ=<,r[Nn/UUk@1[ ?W4> |[p:q߶Ӳ\<;4= $X)=X ,rk/bfXрu4p2Os~-`n`v%pn4$*\)Pʝ !REщLS='B~4փIj۫#牟+q:uHZܬ"a0to~ac,4n =2>|.[m|5t̥H@]Պec[ J Pz^7A]lu}&9B}c;Xb.(z PI&s; K$Ix𩋒۽}MzT鹊hOjХӧs֝ x>q5vS7sᾞ5pG*d;yyJ8_R gT*@uZ wJ 8j䔽Gd"̓l kxMnT7lyLwk.Jnћ[Yyb2l哼N))ӮХ+!JQNQD;;VPNV4hFp@C@{lZK=Q\ڰ8sᩢ^֣'2r5NRa=_G?}Ygiϱ/i`mH,M馁9C%#yTa,q;ּr;y3SK? KK>QY+ֳFV Fu+̗sgmőX{^`#[7 І\ uS+oO FX՗Fd]lWF FFHBAPhT}}eʶE׀C yv~ښUËƤ6-ơc$H*$j(y9F@S_sDOCgwt_ gek): _}&ܝ"VGI))$Z@l9R2r1' cgS*mun4yT\R"A\KCzBFKFErƽ;Ϳ+uӅ^vr{m!xHvsu<6|YxsݞCߜ n#_/7c4a_$ch^\cE8IbLa;xCjTpw~X|٫HvZCᤋONcVB!*"B\_#11/^H:j{߻bO^WZoJF6}ğ0],ñp@ϚP bTŰ{fV))P4PP蓋 tҳ%(3? F tz/sYt>è= l. #SfCRy.l;EI(SmbmZ+7 Ϯ=~`;>Ri{y-*lrѳH:J:}Wyx\:wʓt1ļ.sL0jnb^ xqSn IQB2ca zt 8 >~FHڮrZØձ52!ӊ qlG{a}:H~,ﻭTw, |^f̙Q4?u>I]~ݴ_p3`S-}O0Cɤ:tǁ歹,p)jJSbfՑh<~-Ev<*9f|2T.5BHрZiHc| lUV-_.㕙]@#uW4U Qg#Y2x;r!'ÆT6C9& qMxGV4[tSKR66gݩ }vBxH; Rf=:ǫ㦇Zu{Ӳօ"|W y+yҽ;'VՋ`.d0^(P^MTD Vڱ9i|;um,뀙3f(s-O/Ӽy4:h#g4]ޞ`!0[ĘZO7ekwgrÁ[kZWDs2Jg- cZU>7 *a%%<[~8m޼ѥ%om*ЄVo݇N;*F~}%Ci1Y2_p)<=ܮ# Y1mGIeی.T2Χeљ  /q,̆^2ֽ;`6IofW?`>XJVe{;otu嗙</IH[93;^|;-|gsbz: 4sJK6\ۼ 6ċ]Ѐw éWGD }kRΞ}SЌXj.E]u'#LS@z ~5VQ匂z𮑱SL11\o @j”c)+&Ofߒ7!x׈BwGKuθR(h^:hJwNir 8/qӂPdJ54Y-`2e,yҦ' 0>Xbd(,B=^Yad"< Ж ,<~#9?Ķj6\f@yq>}ǗQNO}7gY\9[ѽQH9ީr=.nm>z5]n'ea,3q*(ShsSa>+4 HD<۬ՊtZ/F}s aH7/HahpWFbHhyTRģ5bůR> %>())\,ׅ5o^JL3:cd50BE!cPE{S/u w򵫓Ӽ羖(Tڡh@@4P[fZ,dkW`6 /!`M3nC=* @PѽO,VYJK>FqRD-UhDd鏣}4d?m|(1Zbлc=+ELWy,ʵs|1 t7OK1 &OſjK3mpEl2"oa. oAj0a6s{I]چЪ$HbFs'&x=$)UYAM)=0-H;lG6*3'wB|"~$G+Oc;ǵޟe#U\S,'^߷Gwk.N09snBm; O@C{Mװ i7!EhFkԐ Ie|U)'`E 8|6rj(L!k7*?hg_w+(,}dG\¨m'=P@#B5~ͨ%/ytw3C`9yFrN:Jǻ2gJ\Ǚ^=P05Юr׀ʶP3 T >˺HIW'Cw3j><{yD3v]9+ͼSWU{xKd|*y#djxHm=B;:\Mz>T; m|̍={lbyy#ʇp0\Y MirHh4 j?_4. }'}U4u;-r?A9ƽ$5}z< grjוѼL[)r8;!~z%|჏fK`F;f.l-1y,4b\ӫʠ(\" 4r 8z旷#[d=W.wK2$_7d(s&>Y&w&Z֖qՍkdd]7n)ݱOnV[ l^t9]M$*r0s7?V "o-[j]:}:M([b~ e(E(P4P4p-X\i4оs"`umlM2FF\M޵qE_nm]`TU>oZ2]&QX`ŲuWt]wEw]W׵TnEDH)PSh37?M!$3Irn92;s@ql9$sim I0oCK+iϴ -mjxՍMFɵy~E>fF 0Q Hs<ɍ6B {v⹊ӎ$hjDה¾vpP%E| s#&ʁDݿ煚yGʾ6tk14hvK\`@רIN"1AgW&-bR0Q^HxrD.NQKz݋9#0Dr0nGcOvC[Z$.xBE}p#F>YɫGARbrƪO[*,97ӛ-_m?{MH#6T1#h(0" OΑ^PVuj>ojKD9vKZ .p׿DD l2gBĥE4?#=7cuL`ݒ>Dh! e\ y ۰6Y9#zS@9=G}nLPVme]?\pm%I]φ0E^Op -=q -l̦ 2!.*&,s6 Ep{v? "P|U n;x^!Q&㜡ˏ ͽe_$O_m8 \>t KS{ݳ FBcCmRw]W^8ʫٖݍH(}V$FBrhM"$ʾ%ԕF$TkmF_] 7:zJg՗mxdXxǬIr|'^/Q{&|=>֒dA!W#!ݝw賍>4pȔ_֭\c}yF`|ESk p$Ǫ% k!=E5X.K%̾`$ K|E&Į=&R?]05'sDLawlz <@ g5Bn\r[ABnVyGdbC8]Q[wͳǡM.PDf87z 34?ǎ NaI7ԐT%|!N7 }٤ѐԵ!#pGj}5b0 ˜I b֥7"Q׋SvH 86Ԡ s$֌#x@(iٍd^tGDRXDhd5z]i~(,> Hr~rls13w\}qJ$b"a+}x>.{#,f! 9UQ),w~]CkfͻWBVXይ{'4xH;? 5}u. (=]Aٖ]`D-a`JzԵQCXvx$wLkF dID\GY۴5%$(NcF;1} h/3Xs0@[ Re[Ϊ6N#Ma$Zڽu&"uEn Hƍp)b "XmcPc=wzN}/ Dc0)mZ/Uw8M.)岪è-ub;Hr"o3b]9*z!_m9h~餑8lUMD]F} v;Z5Z'pRfy>4F( ҆7]bo$TP-SM;F kv=|ס8_ᰃ |&/!6~58Dzv yl,F*!C=I[d GW5DV9l'U!hrPr#eHtjTM1'_?Cщr4@$̲ Ndi~7:^Xr_G$P퉻8'r)PUSN*BlAi,?ASb' f .MSUs=-:HVۣ[$.*vEMFJԓZoޙvh$Ĕ\>µIQۢSgLVmw}.U"ߜNYÜ+mܺs&!$Yс4"ލ[nVcu<#0B dc" bE^J"<LCP˻mO!tt!n({\TfP2tvu> Fߞ*9!G,C-4'VP]4V(S#*{CK`].f "1*/Ɍ& ŕ?!&_Bm yN)F2>ؑG*$"M oA}0a \aat]ЃU0x?DlcL#4ȚmC|V"$6|ehRsa}ݝҴM3KHM?FſM`p0m@㪾6HKJRԺjgK.EeM-ƫ]j6_as+A58e?KjFT$f$S" CMyhAb@He‘iT:aLo;_ʧ.L# ('9=}>IІM2IF;eoMM޼=H#=vX? R-Hwni24>dOHyA`TpI/ݺ'$jV4xYrKp_t5F`@HiYJ@Mmz 29n]B 2} NA㇫@_kվy̐>j-6 .W:"O3{:G_nV\oqƛq} M bFdzZ̃(dž5O,$Gr$w ׿/zѡxզ!؍Mz*n\ؾ:E"uj!'r|㌱c;0},|uoJfb0zE uNOt_ n;9szLu:??8n'ߒI "\Dro5[MIzYڸ18TA0@ Q?D._ AΓ4<}Ʃ6 |bD2. qj 8Ȯ+ W5} \u&"~7LA/Dn5N}]OQ"F?ܐ.WPN8 ̵ 5P0V;\A㚱2uśu܁Z2{i6O҇]ϰe&rݎ]:, ?HB W—pܘgMFMWɣRuwr[VmmWO>)L=`BzMB/b9 IH!)@|j M|K^}֕$8ccvHAC k*fFM12 ".zƁnGGgM06Y?J6d@nF$Dp0#'Jf $#$'F-I'sI^%巣gJ)@\)IbR{F ıVYIQ76{j}&xgTp4+N(Al#0m@He>3H$ŀIWe'ߪFigMVAO.#/qψIF+ęu8bĒ$\ _ !<5 z>fd8׋zzQ`PB %MGdJP2,\%`J ŬNY5]ylPp?c突*Xme;HH }@IDATb9 ?zkC2k0c17e0jd S5Dcc!IS eēp%|3]aT!:M+m[5. +ХZW,H-N=868ir:,Jnd%MiI/E^"FA6ɑfUH$9.6V-yVLl&w =BVWXKoZkŇ'pݣ-] v.rÐl2*6.p?z\gqboaJ]N|o-`t|Mr0BS.`"rD&'qT"]䗴hȃʪ +&8;8Qj)qR#ȉD[oӘP;e5{!9>k7L#TmlQ!VmS܂4ɪ酦MvKZ(#Q*Fs aI76R>]|/T\:}+0ot](");5kfFU Z5o4$g \F֊DJ٩UEߤs.3r87]RPc yG75X͉э4N \HC*i' S\/RU7uM<ST `P]nEiZeF-,c[(7 0H09II)\7$Yt܁X` TEvh:\Eac;6Q{(uM`p0!@eAܩeԤ4&@;L"F? /P\g,(3g|n>-]oD;=Pe)vV(8LH}) LȬ$iidW&%UԪEh bܘDOJXw[Hp|йH"/X:Q npPN$zxD{Q+G tiX5YO'Rhf|$hU}۲2orF`%ʄƌ4TZe$pc(3i捈 @GĤx5XNAҼmDxZj;Fc(6U&_@Aبڮ<\Iv@YaM.}rSrO%AIژ/:1oV =!ÆjYs /SC׺xXd곹sYF {N5+016:HOs,5 iCM"uqf[mD]g,889#0!@He<"j7"hd"ɮ 3F0r,KݺvEn]/5ӮoA9>J.l\DfZ`dg\.Boz56)z$ʮrʝ>e!_ns(JE07-J sGKA0ֽ%rq!=4i{ɩf߃҇Γntd^yKpsH޻lU%iΘFNS6la#Mxjt^S#0m@ ʄ"D̀yOx$(Gzl棍s-I766-+&dW0Q.>%5I3O{]D4S}(o;&>G4-8f̓w'@Ld_uZZ^Jb}){) W YU\cVd+ɚ4u#(zӯ'O+ii|EOo(xѻuq0@ n@G"< 8V/p[ nY K'>~~56WMpJޟO4Q K}RVY5W+{Sa|5ʭMb~-]bIѵF])M chkuu! \1`!1~ Oq*sA$N]$9[;ElՄy3Y7)2QT-$ɥ0۷bs:#0mO}JъjEjٴDϖcܳoCqTؐk2DE$V;A'[ʗuE7XA{b+pY!1Nm՚qX931v ?ғ7z4w̲,V[k/1X|[9 u?mt/ނw81#pv lGq¨ 4x tE'kw) ~hDD~ YwiI-3q =஀,OUOOzeA'4{+Hr]S@2՟^P|-3^=eF-`ܖ۠=g]ja ΡpTtDY>a`H9hrwԓߏ79Bh  ":$u%+ ݜ`n]]m׉scbqM#ZJ$/wۺ›v\`P@r( 2 ~${/.#0PXdhkåneҒ4"o2Tֺ|/_ڰɅ/hq`#,dȚDqr#\AI[3VޗF=2yْ}NzfÎޕ0XBQ7938`^28Q-.V)|x! `,C82\<>Wrv枍HiIRn;?muiڨa&Yd}/+"c“S1z"j/vh,0@( D9VC5./I pH_^Q&mC ;{.5$i`&AD4<  R@a R#)G膢>>Vc.1#~`k&?Z28eVnIT7QPhG5*\4 _hC(Z))xUE-髊/ ӆ^0 Z=H?RVB^؏x@ІlF9(ܒRL0 7ĩ#2s؅}'69 VYe|ڪ<&L9&E`ݲ^YZ+3EzBtR'tNȸn#ἓC4oVՖهc.0#p3;㿟]¼#ይAm8U^ eU5`5(UZVa)^~kqvcJVJ- ^Um ]c"A1" k=z~$Mg'[ew|+Q6놯,* -CTQ;pE?>W1JhjH~%C qF`&m 5õCaF,e9#*=c$&*Piy08)a\ĹsCFdJ-F[MU#|SLZ?I˖?NsXXw7jU{}ڒIO~K[[6.1#aWލŵZ:p6l }`PJ"MsvfZ+,~9zvRh4F8"̜@#1By{,"I7d*^TܧCfi8K(:IEm Vk$C&81#Ю`ˇ?vÊ -~sIKR_4zX=)8zV|Z7 & 0A%dsF IoہmI?7}ձC\Y()+_ƄɴC O}"*ii|űP4T@z-1#WX+䭻a902s$&ͬ=D;wͻ24s+'FdJ~$ɨ}l~A(diy1CAovj[M>8oǹSJ҉n0@!P[:ya\1\?42.[A77bkmztI'e._v*[Ң 7ݞ4]D-q γs%?-IL^O3"I~厡٠V:~Q Imhǜ3#`r il3 NIbV,9¹gU7 (KlJl;c}sGWeN=<$YZ)΄j~,YϨϦ ׌_pf֚񝺻 RAP;0@K`rKP PM.8z* թ%VVk-v*wo&;;)J>fn,Oa\{ kOǸy 4$ȴqk] -$Jv_#2LChu\DYЎT?!$E9~ JOXf Xz\w1#⇈2LxsbC gvtŲC_Ց-znv6 {c*2Z_}'[#=䈺>sR3s>iMܖ`PE7ʐ}HiCkmISP[vYf2q#[?(,:KrU{gI'V>ސEˡ[\$\6iNSvBr S_>"# Ϗ=;V~M } r2TF'xk6Ԑҏ4 V~S^yu̽}v.pŅ[lˢ?YG7}fL{&U,Tکz͒qb3YkN]$_UqW/σLK/^Pa~ o}81#1`"**GnQә:,> Y%97F!6*Lv.k &:vX?5A8ժ15jQ+.Zii)]0z ݮIyb I txמ^He[DsQ"?f^zܚys &Y(s0@GDe;"!0'A܈е6B.H3K`" pC)7Ѹ~]oE*ObW0iÚvA$N­sQa\Tb28X|b˛Tud=t/tSNͩhG#&ɍ ~@LyYkݸNL7ւ槯*8מKtSʥBfE|CŹl{: yVqXn0@`$Ȝ?jxȆhvAfY=Foi z7S=wE'av~ohH؉AAD:^VdrN~C9+> ]s KǖW#I+kVSnaȋ8X -njWQ{ [cF e$gB( {2ԒEn֙6ёS;XFy>j%ڵɽ1QH&aH54莛h\1e znG[܈H$'F( f%^q(M%M+I^RT<%k[S }*9֏$L #tPS;vɇҴ@vVjw7zη9n>> }Ϣg J`𨗒-dS)z KH|Ij>" JS7ᩰ۝7;g|z=a$l P7jXU3Hl䃐A`^Yk e"#}uG\{_O^$;j}O¯vn[kΨ4XuS#0&!DZBȓ}(w aŢ(}WORmt>+\-CLZd2 j*~bonMSU7pdzQ?]:q$9 OK)Ǎ3=xA+zQ"ss7G !L/ZrB gf iݧ( ~ءGa-y z_BQt%zN&EUV=Mj`qrfa0@FlcϗgW5 Ȟkq`Eʰ+Z;7J dQhO,qs{u_$ItZOJZ BV9K]PDt)0 5R{*8]Q#I_E2iD H"1߽.0&BLd8|#g@e>*#(L^spr(S=h!qAwtG< @.;^Z| a#  *9sFs8iIzvF0Q$mdK炑6fCZa2Omp` )e*kT|^0O<2==@LD8 @sJD)Cm~I%R$tIi٥ZCkO=&?~X넗1UW|qS~яZqe>Ir, r)"Zj&Q;8|7}Ѱ0#+L}E,#Mp<#vxk| +6n맟=M.JЦRrw)L%V3A}a\|U+i(iDx74^kUTY!N^Zc[ϚtUVEWm|55EJPSu?8ԹI`1{sF],"pU tD,yN#e_ p}pDAŊ6xH{ WmO17H Lb$#e'TU[g _gK Fw'}jt:7"wV?±j3]h?sEM~rxvțs^ ,]!I)طc"jY܃`F#*ZA"!J{>ӕpSP =qc7) *o޴֔\ AzHHE$:Ð$4'+T3q9Hxv Cw:ڤfeBEI.\7,‹WHņiH"of<"+[$yOqLTF50@{E5!r$FCDQ%j??뮧+_rwm7u&7WlBuA0ܳ`ḁ/tz-=b8܀m EArA8 5d9$+ ~tIǴUj\ Ⰱv?JUfu9c|~رjo]c 7J9#0&!HԫDNC{uP< MNEkot-m9/I,D=,7pEc`*aW>~7L{DYTs ͎a*q~?,1hieׇ( '>N(Q&zP@-cu6\/|zX{y*Iaz~[k.-*μ?#cF{)xU@k &8"t#R{@DV|EM*D :(~a3A=,ߊcF`\(!;,V{yI0ۣ+({qi=}"&HG!)B3Nĕ%2^WM`Yuh])gN 'O5ZHF" 3|<-;fsmNYtmn[&/=jUi%mzgIYVVv"ٰ XUB.:eZBx_coenB\قolF*"Ǘr?mOسS:Ph3Ӓnw8t 6E!2c`-e H*,LVlD䀄xG866bbCd$\ _6p#݇Hy+.ߺ"_yvDp`TVSWXͻDo4K}t^Lc!2MygZH+#tX<x}c\4a@Ȟ C%^ꗹݯQDל3#QQ&Ʃ$s8t`ga`$\Mw-c?$i ul1q#0`e`!gڕ?`{yٺ!X"D()tIFghnaXZGi,Qv+`\HCLy dfA6fC&tpBDwVXgdYܷ>ڬW ]HRbOʢ>uA+YhIKf̗֎\r# 48gFqAitN \ښʪMW}~D6 D2e*Q0ULl6z@2hx̗{dVAYVdW&Bc`ͯVVQb7YY33򲊥Ȱ{oZ,I0E Hxů(_Xsy?&J)dm\<ȉ`FIIL?V%or'& gXh%"uiB8"dra.mj/Q ZeF(6U&@Aبj].X,Ux&Hr{۶ይxI7{kcN6YQ.vI`4;jџ=NvgKLgNGHտ5K IB⟆1^o>>#0`eRЏ?\p}|z#N9E'O+N=W6ˈG"GPGdUOl#2Bi.l\DfZ`Gu9cI&s $OM{MG^ O,6L]wN>vtk3_cPn9Ϯ!fY!:yK´hg9jw۶q9#0@( 20Oq5|b%&_;cK* ~֏2?"~df@ZSڼ'- ,2#<(Q.>'u8)L.Ȼms:앹wwI\Qn(gJ]睌GuGE>=RĐ?QlIb^ )V(i5>FTb'ԉ#0(($;e$ :R7vs;ly{qSgLeAADÔH)h II8 ^'&H Dd3?Y{ `SL & 4uTy[֯ }*0{MG5Ƚ,á Iƪ+Yn+ P"Ns(Bݺ%Ouib|c snԼJKwKƀ#0(I)9$bvy |}S bf6@\*ٹ7_;Cj~:ܩN{V^{Z,ꪓ~iO;shHkL 4D0S.}b=wZsbǑ]gNfw~3zuу8ϹzhgnjϵfBb 2s P%#0`eR%ĘJdOMG8s',%u>43e!{XT;Cw6WTs"I tGqU݇4D&JrXo\QɋXĂ=׷Ȗ/XDH=]g ܛq 5î]KGpk^nF(-^8qvQ~dA晈r{%>rv6Scޢjd2{/"D,}ޣt/j)는ͺY\rK|o"\>&%C$ɏ)K5Cq*7h7'F`h ,~ٳL?íz\y}_x+ڶ]ѻGTs(鞢$-AɬB<Y&Lt^%mRs~'5WH*-[&ߩ^W9D`shoþHsSSNsa}]Ȃ[]?'g8`F7`:5mUmAps"MdO,l 2IF,)yN5wyb(͟+̓,}Fr5MACX,ǟ𼖐S`v D|%;Ux]ԥo)ӎp{ /1̿ 9#0"VdZN29"B+D$iRtS:=-4(KeAűsF(N4P#]I{I'}uƺS5/j>N&AA :y^&R^y֙-c.0#@[e~ԅVKR1vɞP'ʴ9Ӥ3m;XP ʔb` hz;DO?Op#ݗA`Ҙd 0mւfr)'J):7qj#P BzM?< 8V/?diRؙ:XP?O"{KܽYNfs>'c0^H$n6>1lّS--85@=#JˋV#IB֨)˦=llc?T:qY|&v%I'{-w0@G rŏ{ !xLNgN5?{VYo `cΤ(myz 6ld(͇Xe\]5'y M4ۥ8:opG#tbB(we3G QF՛;!KzCi+{wX}֜Ֆf̓?ҎX"f[-$ɪY>]3#Щ`ܩ'qNBb,=-;췠fQZ>5Cn7cb3h}Z2s%,O@ M&7AaF D,4O!a兕ZcHm}Ȫ^1)aOXs4.|ak I~ݕ/$tt }hffr!I޲h`GF\0Q\ͳ$dJr1(J?)qQ7r屟a[}7`Fh&BG gVHNtOiQxOv ;y˾#fJ$o!IF~`>'Qyc7c^w[cF`ea=1AG`Tv*pp(/ G(, I@Ɋ?Rfc ii=Nl =_fsF`"Dٿxro@P O6MQWFmP\JoO a<Zņ2ȟ`'&# IS&?P!IqPKF#ۂC < #0&ʝsy)W++)DzNcc>-y˭C`R9zTړ/zxzuAP _8u:~µF`|A/hq]F WowW5-GVP<uKL#:$z8f2]|уa+LE0G@YP53ʟII73cMD&_5KM+4mJh4.kjb{jl_aFQ#grf&z,dj4I tq̪!iåV!PI qwi%(a%4Qt=z0+n0##L}3F gN ,?#r'AFR\jͿ@^M-J_%ɨI$Q.t)9_Iٚ6F0L'@IDAT 0wx@8%H/:ȉ3w_:lntEVre>I2>$y=xɱcco+D>O;? ybF0Q#  +sS/6lXTJI̻ 7O!εEN?GߥTf]-[W#0#DqlJ'F sV$PȊ} X@!n:8HIh꒯ښ 7SH!nan_D4r( ‚_cF^QMK\ \`F `ZҴjqܑe^?}QGF~Ѩr@<K._toau]@^vj3F6~TƎSS> :.w0#r(;n13lv~ڋQ$+\y5#pSC?R)~ϢT$)`VZ 81#*LCueX ,\|u,A"u5Iť;6L2X3}թ(N)ׇϜ8ז9VʨS.Mm)0#<LLjkt_? ֬WG^c|.An[p q|9%끇KrNUyE<%nLܱ;Tc9F`Gr˓v7#AqvˁNRc߹Nr)dXqtmMmKфDSwߕmεq!/mԯ8^N^w]9!E0@ArY2ى碊k3DŽjw{^r$~x )yP4c86qtkq6 뗙O40#5L+]N28S{,Un$=iV콢f3Yqީ'Ajѐu,lOI9w~DNΙݯmc?lA隣^~E>a8t_'/C7 0&PCP!U<2uϹ`׊4En meoGz[~Mu6f0t_`r~R#ōaL q\@`Ր_51ϛa5nRHz,ŧX{}oEva#5 H$Lv%Bk@K2ͪ/ECX|Wp:fL 0Krk[7ޜ6X ^A ' ގ~y&{F8@ڪS `EIۜ: |/ĤP_HIL~}H;4pĴkeL 0 B*r켾Eľ\|V˹;筀4b7}} 8PQ{̝@0Zߠ^qe?vfJʖWxOJ#maL T+;a9DVL )[)fd06`zːJÔqO$UCSUW5;P}9a'4>+_+MO?JǴom_ DfaG5m_17ƿ8>?NwC}Ш̐;ڻ(# M>.qہ߼`@{WM.*ܟmFY2sbCUg |03r]x$ ^/F6yùhK,%^ټP-IW4^pj]V3۟!? oE <-zhRxzxH>h?ӇL3BKN=@ZǞm昱ʧh+MsŽARaK3om]I+zk^7J|>OڭXOD80&@#Pcq|ZfTl2Ic6Xt1mErNMwĴz>+"G}b\!+}]PS+l2F*q2us҅+؈9vfμ() xKSZFsh$dI?>.NG;rpa8a&Qy0A4v-JvGB"v;urߞL/b]S `LljPE!<ɟX/8k",ko7 ┣i{%-69;w{e$5ڭ!b7j!75SsҩcԸ“AyLʁ~xfMA|Kr滋?\3BC7`L֝Q9eHڧnцl\‰5[f8ڧn/h]Z!&lxL÷%'&#O$C2/&[4݁aj'E~pmFNw@$ۅs9ZkDTh UQ?L?G0ⲭ r5E$3)aKk&B٢,z3,¿Tž W*ngo?>}RX\1V iovi 4zZˢq8}~"YPJ/xCww1FҔW4?<ЙßAzv^ aiŠ9vtȯ馔>_Z-C~L 0Y>ymqQm1&@UHMj',Nk)5~yB(RȪHKW0jڠciƒυ@!~>^\DD~oF Y{mV_kQTȨȻcv ~qbpg8DI7J+2=_%H#r\붧_+z@$48qx9Dp<`L N[uO|Mi 8'5kϻȖQ΋].դ bc!`=Cs5U&_t?NMǡQbڸt^}Qb7ǫ1qZI qҢs`L n&-6IwH~9Ԝ[d) LMa9qD:s:ڔQ=j[dM߸Ʌ^'PkGkB'S-mi>d<8@G1Dr +e47 N8:R+7M\=Nk&8P/#Yg̰hP" [%(o4)Eҳ"SؗPi"4UWrY(W< uWG8Z(Fܬa> _Nv̸i,$F=}DgBΠ{53kTGL 0&PBM(wfhˤmD RaQ"ï6r)W\WRM*&o-qLc\.EgO-Kk {I(7›:2'xQ@Rl3V߀}SH,CF~`L BQ(;5!cܦR%a_ oGba!PvбHnԢ]'W(88?Nսw"mA?v)!M7gx̓\'==uD6kMQVM 0&N^$I-*8& 1Qw,Ky*;mXbܧ+.|-q=y2{ܫ" Uiڳ}E'I _7_ܛ2}nvOg 0&NnAS(r`L 0&`%LZ[S-$:XU~%v9آ\2?,޳w[;px'|rK3v3(N9&ٺGTexoNqÙ`LJPڢJ:˅Zzksu(? mtnQSўlTG])u# B84盃 t0 ?⩩-;B$NrGU#`L&jB#Tq Gtln'R67OEhͩH1"TvPmW!ǝ.5/2lb҄*ΰ?uiQNuZ\ſɆ?&OVDQxqgKr{+$k϶+V~HL 0&Pj^7#۟*:?-";ưㄘnE ́.(ghs"Ot:kt1)]t6û~,;}TLH!{?S\8Q""EK4\,5K6qc=jޕ'\TʐܿG/"vXErIn,`L T|}^vbZ-DDuboɢØW%Nyr D_q,MpkӟFfrm.X4mqz@4tĭֺ7 W݆"cC⋾-ډ~\D;U?}C%bY[ #"v޽;Ď;R?|"~lhqÁegS.c*I^mjbѪۭm{̀%h5+ +p}qªPpAt9ۨJ݆1"uJ8iۖUN ][ˢ۸E*cEơE/-=Bkشt<\%2 ~R4m7FLS:KlZ<Ί?h~0 ؾ]m~h"q^GM }Ѥ']Bk޽(G$7nB Eū1\w 0&2Bɢ\P\oin 6D0Yc.kյW_=em7Jqx"V,QL;f[1@ޱUM} *sÄvf7̊'sѯWL Џ_]۬}+Le3Y>^|~'~3R4luh:q3"ZqO[ ѽ̽bĎY^CPyMK["IHSYGSiDubDf*㙏杆k&=[,[dY#F@FݦDˮX"o,a^0A 㧪=)HHH" kErc `L ERkd_m~m!MV=۬:Ce?ZV-uϸP#Os;ӛksv%d!@;c}hq-4MDwaNlH_8 O&_|DiGZVqw`w ]Fwp4oA`oš-Aw~3Ł""9~Gqtr++״X9{7|*"b[q QLc;}iŸ{4Po[em!hf"ӵx;Z7~XIh-[-qlA3h(L0k @gd;,΄ XxòfSڬB~̱]A0ms'P%;GxZdmYEH%/爵v:.؊obu\tozӰs@(d gEfDn> B:["s .BjN y<Y?oB;^K f\)^e>sAR. *dbӥ j,Ϡd`LjP.4jFN'{uД[d4t EE;GᩯzF,Yk'(ȷNYK mNٚ\r8.S9nJto = Bqo۸XY_}a%5{l"od*SGOl_ؼtB} M/u:+v#X($<-ۯ' ? wX|k&`%Pykh\4'p59pDױi+-͂*vm Ȃ[@>S&׉_[Y$[XX#t e"iKqʘ7,wݭ\78@#KV}Iչ}-w zQe*8w)/>*"Z]ߘ%z~TVhP$~4o1K&|qxxrW- Etb͘iRpJ*u4.{8GĶUW^'Xu<\Pظ˂o'bн)1shݺDrѨ+z8{NF^_Opf "–vĎ5`L 0((aҮ cҺXbM+ߡWl[lVbjnN 3<;N}adegGzMs쌭 ]9"huV$??zP>,c!D=NܘR`рXG_iYgG[6%Y^ Cw<(M WTp"-˱NuQ&|tI <%Yw}9ߛp5RWh*ܷ,:oNvvfgNjw5;y`L j8ޢ}XiD2YtU "+Hv9ʼnd_U?¡]1k$H\">us$R%~C$9I(p7`L* U fL 94$dL*#6JdL 0&*@Y鹳2\]IbMa1&K(tn =ǯhǿFǯ ’|Bl֕Trt&M6yTK^+;\Tژ`L%Lp+~@; Y((#)O:=&fțO |"~Hkxp`L%,C#ռ ?3%y>G%K0vm3!l[Yq>}0'lW༗:HqivflùaL 0&PP&dҢ_0u%-:>3|iKg%-HC qH5iks4u_`p'j^dy^%:N-f_k*΁ 0&@%l[bsG#E.}HKIAZH0g96OX`;Zaڻrbu<F\r2O("ƽ%ʅ84GI0%' u8 0&@( e&YbNbLar6[A8/uV:dofۋ-Cɢwnj.ŖR >'E(i{Cܻ$X󦭔$7KZ`E Q>ۏ邞%}%Ohx]uEZ;J$Y_oZ,&2m_PֈG%]{^n6Ti ۱CQ'-Ȓ}ΛWHnu~=(ց>I,龾q`L P& &?ga ~fsyYw|{uuZePl2 NFғ;<4nA/sX csGē~_*V"]?޷`s%]][UKĹII'wyi 8݂^܃ow~|oⅿLȎxomƒ!G`xpG._N)&`'@ e۲HyKa1)q.Dq֭bῨ*ҟ)ݦ?O@vE+}֭Yl쥹٠A 1_vE9Ԅ11߲fu\vީ:,.ӤKN| c"yؾuesgσp qyfY3?FσH G4!jz`L `& LbҔdmp?}wn=۶>ө1NgI?]Ƿֲfdgn߼i k3 YI/XOJvXqC g}B!uңe뎝:FFuGF8'JvtMnYzXosϧ}Nz} ZDx`@ 2lG/HCl’}kJkIq 6YIErj6(?9ό wkMA2mqϻ!7çl(ujs]>ʫC0&`!C 2EY?m,(>Êeqڞ΃Zlkd Dll"YɒL9hxmůkoT,Z]Hށ4ojHZeG 0&@ULH4/8$ e˺5'HFw`s!LU2 cʶ2q1=F {\B~-2W&DZvjz 0&@HelI$HH^ú((cm ăLVaHX&aI)K"*$'c;QH޽qv7iRރ|ql8 0&@heLJb XEz/ -m_fȵY${V &BE1"qLkZh_[@q8fsQȍK.:SLe~4{X1fL 0&B](H0`lG9X4‰ֶ@51;jXlqqyP֡S6y,: e::\&`Ke"hA[,=A%d(\ -^8JS{#=洶)M"7QdC 5׿VL 0&j$m Q@!X ۿ'baa{|m.j_'yK0^F*0|/8`L 0&PD\  Ŀ{`.#QnXS}dٱŠId|U{`L > C2 "ǙG*̄uC튕?qfL 0&P PɣmgH@ LIOO_Wfd)~-A'ΛGws`L 0&P+Pȝ`UK`[ޗd 9ivVۤ`L6`\F@rdUH)6׽wC\#ƲHL 0&PkP5CaG O:IeF >VzihvB{ "yk&`7`e$<-wϡJ[Um7L|8-fqJUdRc΁ 0&@&Z=9&P: W$9$eL{ Y9mW^L 0&j;ʵ}LܧSH"9{/vIg"y 0&ID]/N2%6O/7,|r[3d `Ld$?'X#h6@ 5)y.HB{yRҡ1n,`L 0* B Vq0QO_Al&z)#kcTq)T0^~w/OHBf9? d̅|]JߞO[.d\dL 0&@X(ל@0ˑ)TYEzf7Gw!ɞZ=t]]kef#Tۭiȧ=>o,\830p>i-ub?\]0&)X(׌|A&߆@n~u=2?.Bͺ9uDr⏸VMR6Itf{um?_4f`vWy;ׇmk ? rJ ^b\ `L 0&P 8Bl~AP?#M]ahyP-7\ ͂g;-rgb^ލ:uÏ?^E|Xt,޺q~vwJa07.0Ș p-l d`L 0" P.KHDZ"'<A$O))`L 򒫚|5# MkBSq8p69^oDO9$_# S݄bir8HK 4zHp`L 0&* JGZ-s1!'Bqf8jb "}X)BBd=W,uͺA\-[J )G1&,+cBnX[NTTg<+Bñ2eMV ut2YbhG|^Z~}trBW`'#.'#P3 7d,PIspPcӧa2)HSEZvm)RJjG*mV$7&`L Pf%TGiqhRF Q9X+GoZ(/*:fE#F v}vfhx 0& #B/bPK2#+F@(cD2-UbQsAʔy rkXA >A>Ŭ;&L 0&@`"f37tU{Z.qEr76햣Ʀ,;g5.X)@Ԭ+Vo(G 0&,r[p2 +͙eh]o S)~="~r[b7G)m>w\f䜁 0& !B`/rK,w#`> bxחB lY5Mib`L 0&P#Ptl.\za5%~*;'4hU.-יZ+]v=t,תwF0R87`L ,Cg+dI&>tz烥ꑫGoI) Za8(TT;>q?D WQO }ærEi0ƍOk-UWn}{u8(. $~ŲT:ů헯RDRbL 0&j0ʡ5xn%[>+dSD}X?0w84p:EC$ӷ;.(HhޤBxsѾͫš4p:G;¡LxS"V}؛$˥~5UV]:BL 0&@`CT1;E[ٰ/Y.!byLE8fh!H!a"rDؙ—켳~9_|gU9Nan!dF" D·_!ZLy^x"r>FD=4\z8k· oR\Ey&$% Ngm'&L 0&@'B"wK>݅%BOnő5v^xwA'bxYaUJ+#E]O faGB_1@0&&mp>a-TVz +??i@{nW0YX(*FfX؞9/%99! zdٙ`L F,kTZςkjYsM5@IDAT~اlɁȠ r. 5h Tf ֿ,EzuJUr]ai:C r`L 0Ar"{av& ]_.r/ mG\&vE]Z~ɱ/%  |aL 0D}khUr[ dlWƋ Ŋd*s8E]\|y`L 0&PQzQQ 0&`LV`ZJsaP5[߬fUØKeL 0&P+Pa 6C^3;tXs[u\80D7`L 0&``7dGı P~`L 0`\:Nՙݞl[\Yo8ϜC$݀K3w 0&8gX%XnK!5̲Oڲ#ܹej@OWB`L 0&P$Eb9!֙qXUPGM[ⷭ(_+ . 0&X(֠r4a^ja k 8n6M[,oL 0&( "HI"N囯>U~y]z~<#B܈.=k/$`L 0&P [ҩkDdtð:N-D5"qNNZNf7nܼn_h8UŘDmMN6 eδҰ)'QBu}+al`L : [* .I8w_B5}Ve["w5X6b۲8cM v_/@h}bL~$i!LɀPsSKq &`JR^}&4Sљ=óC5]46eٍ,QX"ř291E95!XE2SdEH&L"-$mK-š,yE*ߤTu;[tx =&`@Xb9,~P-WJCJZ+U_lH.x ?$ð%k`- YI([vIX5x/΋(e!Z>trdP`@acT v,WڱCbɴ^Y ^{!,1i}dyPNdIn^j_.gQ 0&@!PBz'ŋ0(r 4V`&Q,mL)TRm0 `ڶŲ-j|Mj,khsGX$פ2&@ET%h[>\0~}?'V"@cffƴqxk+C-c e۪LkbWmh)-pTJ50v^̥3&U&c$ZݯڃJ%+D.0HiـOpVfhM<ɸۅC8W\ǖ[`e'P-➱1k \1Yz4k;}Lhʸ'B&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&N2ޑcK;ifJ00͝R;-LٹO_r|㏧*-tMK)!p1Oym7T\wLBkC2˭-ey4LOUݏ/kd+PJ, ۾N^. 0CybV5pLgrd25bk[T.mY?$ܦASi?+\RJsk L 3*m2\ !%W_"+!MKMcYB,"(ahlJ7&<_X]QgY>(T2TNMS9+Sa B02KEg\$PRc "Q&n@Hv\.0q.N\9+:M\OH1ǡip 0ZB@;ڻ5)G7?hKtQaglHgj^tRj3Ra xQ=9\` t'0,~g蘄ŧ=J+_ƫ:Ŭd]SpޝDrFԍ})W6A7ӕ7'dlIn:Q[WSC=WH$FJE= }Ǐ{7P$/ŵ 9/$jTTBSM0~8q/]^Zߣt@#pB2\,’{|n[Zlt?|j޽6_tB}wkSw3`,*kR!}|'\z+s-̔tUUt=s$]T27h:w-:_rqc>>>  )f{}AdO>:nJ.zKqcQz+JCӋWMN/M4ĵ&`5@^)[ȹpq7]J[q# w;wsQ: %w$i cߡ4My8{tiW ݜlcbt̋ 7`==#! eW[XAX> y5~BQ6LR];-->| d<[P\W7F' \4xYӓ-/J,MGozͤ)neTsL77k_ךʼѤ(5QS3w\=m6Xs4M|2m\SeEk'_hb 뢞-V:0aSN7=cVS=c|WsӞcYj՗b]}_x챌=uG .rn3ꑗ=RéB cK@Aq*Ǐ΋ztf*IW694\[zЯsW^t`|'X?cL0B_.!:Ҧ'm3a p]شWiN1j'>qgBߘr]v{hL |SX;}\?w,X)XE c7o&OǏJu ؋ 5$aަ41qo> ōA޿ޝg*\k)kV׽ 8pܽFu/_) 0P P{"65p0wu5.nwq1y䫯b;/,&[}v=\؍ ChG[lO?#}z 0 pTn|L)(Mxzz-wPN7 R:.4&bp N׭eV|O:Q "Ht3QG3Ԯt$&lvB^g7oH9 *z?JR{* piGzoAip'q]Xv2s煦>52#}e+rEyҲ*=a c/ꡥ6`1Xw#ay1~%Dݞ m E6:, P)Tl='27Cyjcgk,2"Ynuc]VH^{yv)G[-4#G}.Hۙ /OɋV̈́< 廴py%{oqNuW3 ךZL/yK啑נ'>RmB n!Ar7.Bh<-p t\Q oJW㸤7}oaERst= yHcm (./סox"y.n,: MLci;9/5x`&p1OnQAEr0S\}cDpo3&P8)}{-Pw)O~i /O/myz8W=,!gxɉ`z=(rc9,oZae~/,ΰӼyKvzLҕ=&ԛGQ`9Jg[Y:9z `*հ%Snjџ}GۊF40x\h'OKBvxPRQΛ摉ǥ>`(,(W9i[d)]QxJj[?,Wz ~(=gOVHm,NqxU9&o v0~D}6yH[`$MthW*M ujg_-]TZym5,` J,pdi+aӦW<⦅!uqs(4$>:x!\(~=H/*S J.US:~N菛ח8~Ok7|}簅@0\ cAlcW[Bcpk8&O"0[86:Ԁ1pU1kϦ㫷pL .NyzLe_"" etuͧFPQD3,={< gL/KH;Ϗo% zeԃP^X`pYQ>Je,/ZPkqߒ5Wj6 ' ęw'~0ϋ ? <%ֈOWp қx*5eff;xЂ,k8Z[܆iM iz"=pxfzB? nj˦{ƬpBW)XU)r!e&PJ>MÅ4𘴄eֈm{NhpujY21=\+ï߅|:q ˥GLH0YᚗgV)NjسM: cjJIzֹAn<MR!fs-x܁e#XKWa"_âGP]ʱ_uIJ۞^ܣk7y2ॶ!`5Y ffF#n~ p9 ^scs~L:,* 1 W-Y7,U>x9q "Ԓ>>SPoiKAX}1>n}s6\g[qn}Z< WP%x~6:BO)>W "e\WDHLH}:Nx* lj{1طWa*S^)KS :| GTTpsP\NoxFo}CǎqdY„ 5gį$oA@ lKyB7L\% c]46DnzH#^W-wXpX!~( :t}]LGg"H:V[XdJL:n@%mv^⾖3vuU*}i1l.Y3D7_%P:UG^{ѽQ<e%\z 9|. QD!~Ԣ#Y1xg68o%2y)n/TJ!< ] |/M߬6;5΃ƍ5Ʉe0^SǍn0] _lyANbGvʏ{Max!_r:>~sOD;#e>gk4+Pq6^3fбބs{|ʸĠJ^4ة`>? .첩 [ °ύq;"{`쯹vӞ8&!9>~/xMVf$C/{rJp}2 0Wxy%dqUW-|+v7W4@4}9o~9Fq $?$rIUbf: k.w Q~S4u@PyQ}iꫭ5p,'ZZzZ\uHW/~u/!HŅ=t*zQR'{>41 Hlp· L{ fᥒFjhXt6~UWb Nyx,KdZBBSMĬpLG²CsҋS{E=z~`/ To}:6yӔ_l1|:^sj_ym9xnl&3B̵&K&^ ,=,t䂲.9NɟN{[F/~B3ЋTd?\{Y1Fu-ʒ8iil0= /N ֤Ʃ%ǿs"ֵK*3NMH'vJ'EM;+;]eKsbYzqt^J?xIsh#4u6 Ȼ2[T>z0&?'yKV-3tscitYj|qqUҶmuW?ԶXA<`8K>L7E,Ŵ&n`!<α4;GN#njKcJ>/| LC LHS)pB|Im걄)M% )_x r^7=/8>5'+e)I <¬řNku`)BT?aBF>z)ء994ohWγ[ҋbfQR2~ =7syʤ2\r(OI|][[$ cy-:󭸢z*}JoUݯc9NY2*P^x8I7%SqUR;Jc4M)4[=胶d Z7Ro;c"]Q 0_ܟ^L:gv뻶xUo)Rۘ~nV-JӇ a h:BKO\1Kм]0.f`f vp!zwsnǜ|x1ȧ?٥G~lG`o6M.fyG͟/O?2 7Hg+*lOֻ{RNjww 0p-C_hi w&;_z h' c&RzbWCe*- 喩)!8<-ܣǍ^N8T=j(RET *>}\2)_UB>?Q@$S\Stլњ*0%^X!jvy,_p[ʲ]eTYβx˳. |Eǫ{@9CYA^x"xຊ*hAE L%A0 (z((.3tw5Ik2If@^zUW^DFX9'):5`ե K~&+kp7L {d${i'0 eU.T>S׻ kv!l-49b%+U8%3C߸los(imM( #I5 W+#^IN i%k6vf{')쩊g] T?V$%%ð Q^)-/7r(P4WP>CBsp)X}$uK?<cBKa% u~AoUP[G$*)}\E2P}X?Xw'pXP?jBngt8I0\ ٧pw\. ; X%2n v[!Mks/AGf4,OCI^8yC⪷WQGȚvZ9|)Al!"0+5쐵>!УZ@iBo\O Cbr29H,ڭ<.kPm'qwBJVѮd@צR.b!* ȇqI|dzvцRE՜ xt5 qWpDk ሺn0̗I:9X-.e'y*^_9JL-!C9f-l"p l~^NߨT CF2)wr¨}P~:\*qJv\V 4ģgWc߀}۸Pv]u:‘(;] y:m|Y~ EQ_r(/~+^rR."pJ X/7nװ—i$DX &=֬cQ%L.Z}+'v;!BSZ !s⸵cZo@CXd *u@C &u\B)Ua9½77YB"L6R{DB=j O|Km6R *tDg؏ n{ y3R-a$0 QdX6}|3ME=z=n6[NjGl4\5Ɋm*32#B-27;tDl2t(#FP xYZ2*WP6aRcr ?3]#fMmK@]&x%mċ(BM@]&iڠ%[Fu5 ,PWov-RU@*GAР(}sg%%(e6TœB8aet{8_ &_Ӻ^^uead]!w' b93glu\^l!&voO:;aB/Z0F@( tzn;zS" [%">aUmm,3?01e_o:|iyf.aJ̖V0/W0Wj$ ރm96JrB̀} 3mc` >75uNb qK6E;۴-*tFj{s}71`̓ZD員cӊ&8uPLa`#9D@|j4/Tߓ}B&U咔X%D[/&\Y=hWwÊFݡrX̲Gނz`ND癡7Dw]]2%+']6^6`wraM 4g9E( pE v^sD2t# 'f(YO#eT9M-w'wx,)5GE@"W2wmǎbqk{:_&29L{uVj⏈ʷ,,w@^rY_ O+vr *]w`3~a{|Qanw.S})R!8Q$ [mOn-ڵa_pPVn8Piy6vy(eأR&u:3Pv*?ڸ9#HW3I7:|?7_`G,Y9:2 S\2ƵYcl{3 ΜLޭLNքoˉC}PQ<ۯJ ] aDFih\=Hb6.ݭST_)S`...+[(se()ՀFY@%v|~XXv7Fǔ(-[MdY|C){K%Z7z%~}lo-n|\^y6XTyYk'{p}r[@vft(> ٵyp]QكW_IsbJ\'A|dMtK"B< ?iRc(DT+sDuw7_gm'ͣf·b-]Q-'}II5L,(4:|Gm^lBiþfor{}Ιڡ7nWA («`Qxa6)+_D,,|:B27|ou%Uz̰T0j,0WGVWmKR}ɞ gw}$5x1~O_<6gdp&_m< [R N*pmYɨLm@ԖvF֮=vay003-+R}w[zT%'kbJ9ٶQ_=:|"VEnqzjv<_? 0Vq#e՛ #HMHF{QsD$M%@`;.tnE) k藳W/&ؗ]ܱ&ccV y*J V>K܏B`{σQ|IfZDz )iJSıოiX3p0:.v0L11uo:(7!<\?Vlz#,; 7>Gtw;K\a V +beYwDz%U vg_9粣p(>f[-LP}vC}0 y# wy,_6qF\DeRWyJ&|ܽ?L[@EJܶ#9Ao'(0Iz6 &)6$@au?\on,W0z;nd ]:.Ygm[̞'k&.A8qV& 0TNOKkMr_&)?c#tXKvdW-iK @("3uܷPB7"&1E싲=\FXұy2>,cŤ(B/NʖIF8̀/pBOІ+6зGz~{j%k7xPq.g?q1۹//B+WN9rWbkNer0oiYs ;I m2w^\:LsF^fwH5oD"@=9߶\0I._i۲V`-8Hx([sz- )WBqe 'AiQ]Om.elόLfפe}"@_".1 :d{~uዛ?/Q;0!z0&:MM"Uq/8.@ D.NdЕeSR`eYv[J3`CK( ˑ7p7ms(zBpCc(T-̜|'PɇpuAr% Ŭ4l^`FR+3c0s~|z,oʌmo[at@ DtҒg#;.Z!*ɕ5r$ă}ineǍwM LS`aY?ɮqӵ>|j(o` Gi%swc;>O D"K(L0hK}>8r엷~2Eêʌ2%ƳrvNs5ǫ\q1hSqN%p=.@lt:q_U*e s{fU){ DgawXv%aٍ x(7osn7aG #vDXIdLKŵ./Ux'ӕ2+fwk֪L3]I{i;;̰Gvs 0%e¹9WJM"a|Zot9k&[e~xfY/hr=y<7$%aY/)*51c}u1baXGg.4b*_ +~.$-bF=?yHHpi1 f*!& "Lı炎a-c[-̆/tqoPw1/#[5ś *H D"Vi -k$!(1>h ] ,f_I?t&|)P|97A>Қ},CI^%ה~#γ^?^FSe/G(~k4-R*_sƣEPef}愔I%Ž!P.=˭jEGaocE΂)Sh>[y=J`I+"N²m#7Z|Oc1Okոæw<m[PKƃ@Iq=а8g@* \?!hÿUMeܖUN$dJoӲU}%J}> DJ B`AmR/7"x64*&Au fB<:DOSA{=[Oȃ*(xM됩E^OHv,!>bX{p1e31 Rs1{YO~ip0$#Hc2㋛޸/< y W,Wdl{e}S=DqdR8KmUdrjy.k~bYKQT"@IMF0%bb䊇nrw^eu uﰇ^o3{LiEKIWv+vcPL7fL=-c]QFeR)}%&'BnG2 sa .BǤw>uW龦 vy[XGMsN4o=P rӋ2^j?nPx2:o{rP3m+yre\Q)³= gtDŢ-W6F/r6ħ') ًνPѨKi儒eQh[j1^܆xv3ٕ NI( v ?bZIrr4(緜_b -XkLJqJGtCǗPr{+U&}}N6ʹ+ {7J^mi㤻G~^Vަbo*vf$'!P]2Fet*ᮺL+ou]wHRtʤ;THwѡ\JB,԰ϸ:0n đͦ~so%g'ԗ0ON<[l}:QvXBppf8\3e̗Ɋb^_l='\V;sܝ0G3,h)GhOT{>:!l 嵐5fXzz`"9Yi2S>Isni/5"PF%-wi|ZdaIIꚴJ[ƨ]7u0#22k /21\W=v`l;0 };\C,;9y}&[[cEXTriY\[y_ŤxQS\8^Yv,= E;Mu˥eeɰBu :^*; 3qeQu뉿۲:NsdWfs1;9S^GL'@I 2!/>1Br+[Feb|L"X]4̇x #5SG';xzGQ-&.y<"Q8`;F9v+00& n9.W@ <9(%"@@&a K=TRZq+yaP},'*UeLb[ \_`f9b\,e.:_w @٭s{+E(;[P,{bfGރe1-PЅQ,ze6y5:S[j}[H(]Uiɜs'H~6 %ϓ 9X ƷuIU}%̫r|cC1A{)F^EPw@][fϔ]ASfϿqGL Y^agACwr Bk;P2zc .Yo@~,i MPLdK M~JPIIЖzM ,2Vx/'!`U4pyP, 7ANN P E@`An$c376]'\>j|+l;NiOR 4{]f,F]dwbuvQߓMM'Š1$8aefOJK7\#} g'J DNကho5 C!#7#_JOx+Q>h!cFMC `l"Q,]tlqS]3! >~BDk _AguqJ2탫%>J_<$'"Ove,w"X!V%'Ʒ-VGk]-8 ey2#| L;J-vDB{nk//]#DaSoK & %"P"ip^oZ\kW-W`鈽ʜ[Xg#{䎄o%Qs,{E_e:0~#?DCXޥ*I|U[@⮫ pyDFo'Z!:*jqd ٖ3VTʘv= XYs:ma\[ٲq'Э8h|' @P>"@LG``ՊƔ_ծ:5~̶TV6|/{:d+۶-`/+ zV U%:uVbA=ΊBrL;lVcm\*JF-nHGf>%\MUw]HQgD]FxkNW2;?m D?*ʲ)˦jW \e9Ӫ v9_?Rdob襰$L<͞45˅ gOʭ.N D\=*SW#B4X/j,Mc!a\8Qq\pNǧ.l%qC@S#^|<,V0M[UzOnkZ^S{qqZޭ2ZRD"pJ,ݜh|h@YҮ"̄w̄[oGPPPU-fl.{hЀK LE34|J`gZOYr\{Bhh0FAsJJb;wwUSI (CY DN,܃ǻ ?-k,?5s"|8вV[PR}'0x\M~|<4 Sv D?'.:GZexcgj_y?(J]$$1MG7Pf/ 0,= )#z#~Xƪ@׉ D"@3e U^28OS3wA?9^;Yr,+8+dAVLޘɓ:J]vpJE0fqq^IIOna!Z5BD"@zmQOYJe _=XMb,fbɊa뛯2yP%>ڲ)8w63_R[~<핕S"D"@ $ -.Ju< e*(_|V"ysD"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@9\%0IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-dvr-overview.svg0000644000175000017500000015200200000000000027611 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-04 15:57:53 +0000Canvas 1Layer 1 Network Nodes Compute NodesOpen vSwitch - High-availability with DVROverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Open vSwitch AgentInterface 1Provider networkAggregateInstanceInterface 2FirewallOpen vSwitch AgentOverlay network10.0.1.0/24Self-service networkInterface 1Interface 2IntegrationBridgeProviderBridgeInterface 3TunnelBridgeTunnelBridgeInterface 3 Physical Network InfrastructureDHCP AgentMetadata AgentMetadataProcessDHCP NamespaceIntegrationBridgeLayer-3 AgentRouterNamespaceSNATNamespaceProviderBridgeLayer-3 AgentDist RouterNamespaceFloating IPNamespace ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.graffle0000644000175000017500000002065500000000000030655 0ustar00coreycorey00000000000000]ksȕ<XڭHpgR#[V$g3-$A4Nj(ă"(P멱dso*LQ>?1^x{~_7nA$ ~ɏ>w9i??=O{4 ?ix&?/n~zA vR$凗4[ Џ/Y_O_t/&CfFSe}FҔ!/_:4rޞ:M.7߿ z_)}8<d12wH THbǣRI_k+%;>w/`|ܣxM/0f}>(>#on'~;u$^?^<ۧt_b:^OItMs[făˋ :OFv|8엛hiXFإt9QH$N?i^zp$Aqқh<ߏztfؓ]xn// }T>(؛wy}$' s0bCϯΙ:TFsKl'UFh ;FXF(%[w<|eH^26vt{jMaeOIC~ʂhO3?Oarv*oi2vîz3$;Ž8p_ "/OBg9>IOǧtyÔx% 0LqgI/'~f_NsJ>I8M*geD6<m6Dl$~( QF0H/`p>H4=`T왻Ó1Uqyo?L,/{pIKPr\(\Rp -iϘOc-d(-,Ɉ==8#50I_’fg*vbU ܏YFF B f8JgĶY2$5TÕ};+.PSnf̝4 f-0)UOf=*WrNI~'9NȔBR g_>1F.k\[|T["@C("qYm YL.HHϗa8޿Ya`T.Ć 5 ħ/p2n f50f|.J2мUp:e gQ #3[rЧsYLwFYxY`S)V%UIUݫpyZ+fhC]_~U#,Z%ɛ4(o,2Ķ&66S>E}RB1%bF[ʴ ɬkd&]<)X~2OMbmBY(_q¸#8'Br|kA{4a1W+kDXڣjΝEqClyT-QDղN<~ŋ˜m+xOA)bCyy+@>B>B>B>&=UY.CD(F]P8N|\$p`2{‘ӐӐӚXA;?~^Li |)m+)!!!!=6 5VqT~4ˢ+"Y  8ATjKBU}>>>U\ 9)F ҹ'ŸW*`d,~tN.$+M٤JnsO8+RldP!v}ߠr ]/Qyβ$'l%ie'#2"ցdUdXՔev&mtWQ?L֜tQ]yvxV """"h&Rgr bhE3.0p+0}+J?FYxG(S +`o \)bTyp |TpbJQ.{[ 1ᱫ=*pap(Ub1ޕi2&Qp(Z|Vʍ~J.'3M4e_}ABYa؎G]EI,?Y{bKG^,ɺu1eA-J~aF0I;i(5*vU65RqCD Q$$$$X`E +xnw{GHbVdn}f~#ThGةi8m6]mt[ j .t%1 癇j "$ 0ɵ1rl î ˎ5m@/a,}ٺ'VKPR¨!a$sٵ%EhTı%䟮*Є-{K; v,lWC`j,h-hhAC Z:`A{^ƗixE8YѾ%٥=',K̀VQ͎zXMq ,& UØjx5<]ZjB*Z]Gt&qۏ#Sg ;]ZKItQ5?yQE4 m""tC[E"*\4@r.Cĩ5 839n! ~EY^Nzn F) I-ԗs'br%E! E5 ?F9ט+.+abx1Kazq+xdrW¸6έvAA醶E7.\q T ;oCAAY:͟5?qاXQ\$#|҂JBrj=6ncpG)q/v\U;ZtX6݁.%,ظCO(u-,[е+H)1r̶&[@VQP"6֬! _YgAeGct@VĆ=bҷ~3!mqTPeCfCfFf4F kl5Hl[Il56ؐא׶5U^քS2\!#p?2ŒF7$+CxӰ1RR4õ$(-JWZc db}j.0 D-g-_] w]=v%dyѼFcU{l@EzOfH%\w0Wxhb R]IJ1<_L>h)UI;V` uBv#6`zφ+D9n WpQ2%րy69MaIe0 aO4hAr9 AIy]O^0(ـT^BaBe"UI8~)R'@(*%dZJQZ* :wr,55elΨ4Up V&u]Ӣp?[5϶iꆄue6JQVR3V0Jx[*VS*Q̩Td-p|6F[n 1Y%ֲ[ضv.L2e1II2Ba &ɠ1d:$gI:2 =TLAsWj&|ƝQ*n(h$*DDDD;@LxHw=2#|-Z.bǃKRp%-$nKUꥦAHSVT6ijU^"ئMu GN-]*5BərVKJؼǍ>Ld.[XoIv9,csLٯaSOH  {],pVtA8g23GT+[ qӪW=AOʞ U@A.aI0 .Zo B+ZЊ+`~zE8Y%٥=iL*IɅdV,'{*b 1b70b1 XYٶXK1b N}(+V۟mrin=[F0=In,<|B V6le!nC:*ƨcz}5oW'-h)wuga+‰xrZQ oBܼM 8[Q O2, eATtA*Tp9cuICzJzHoHoHoHo-[ު s[GosCzCz늍Eb;Ab{Ćq'HlHlHl;wRj1Dq'wԇԷA=ڌ8̗ņ5‡./&BnS FU;7.[tb_Jj*]_5Ә*UY l}ﶢU]q~U3f֖KA.],5 }emBh\KweC -TZ^h}WeQhT!2qd\?(D<# Dd \+`>HJ-,7VŞw ]n*kJqS+2;\1߱'BXTFE4:4݌R~`QYdQʐEϢ""""v0tA$?jK.g gp>nǠ3,ܫM !I_ YDvf7X%oėFbo;iEVGVo9@ A)]٩_RC]LZ#ROl3U UViau]7ZV}OTק_ H2f@TP)$)mih]hU,_SAt6CjuVO䬉#2 I|Y:N ΂4\lXS+.|2WuaǓdTB"~NOf-|&Ã5R2NG 쎒7f\J(؆ Q[8 5 {Km[-m"̮R)h~"-v}`i`a'XAa k1Ɲ`bۻna6wp/=Jh -^} ᵲ4vx}Xr,X=qX;6W)Ҡ`7]Ui3@*nnCR 7 '( Nxu7ThQƚQ 3e7;9 s"kxeą P5oar Fo$z#l)MCol}d}|jaW G}&zm*yV?k͔8Rv܊3&JT2zU_c.~dn u2PBhE QM+FMuQh O@Ú(G^)T\xk5 hK8#G> E1L~3櫹S\dYT汷=H(` })BH>H> 7^xE8?Ssd`\pd. 1@{ z Z{  0"dsxEDP5dOJ)sF%$Yf628]f4t{[:;|;ICѯ.t82U)\#u+Y[(` <Q*.X]kLP.;Y]ܣſ|ߗ48; (uƩNTjn38 /1d 3:gKJXk!4XhB㔱FAY ”0,l3aQ  L^MdX?'?ܥxWGQ;vw0x7 Ap ߜ{_b&(3a94C?Ys8euVDY}5, R@Kh `ֶWhDD될5pC +2ezq̀NKcU VZ+j@υ@,0$QTr 8Nq8nώ$hn0|™ +N*߆].07c7k:xӻ2qkx;>fFkNg֪2Y# %= G^0s1ykތ%6uX7ѫ8M㋏AW0]3a^ ֓S.N` |fDm_( =Wx|Dғ_6蝽?L=?ye߿~+?f/d/o6~ŗ!Qyhbs̩PU(y}nӖRi"FPAm@Kp 0r +v&C's)|G80 FçRD|e$| Вϸ< 'Eޞ,#e0y} w8|;LdcR-thu=Lxٓ,"F6xFr!a-SyZrӼؾl[/2|1`ХN9qϧ-yuG]wQCEj҇z ̥ iY'L©dD/Ҿ_+?SFT<_(¬N %x\ %eN73FYVvO/ۅ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.png0000644000175000017500000105367000000000000030037 0ustar00coreycorey00000000000000PNG  IHDRKVVsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgvJ WQ_ذw}BBH.xTl 諾"6`GTBi Inw{6%w]rIvw3ߙg `L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&``L 06H`w"gAL~If>;œh̊@`p,sG, JD &|2 $W2 ò0"Z->C]Ld߳i fy 34s㞦4huu?;RV"_DcuY)I)U"/ Cvwv[I]nnwԶSåH}w/AޓOCHe/, B!ԽiL|w"c$"2p)O}ѹ\JQ[Y΍S;lnu3u{{m<9~uϮk/"34.~gm+Km)9LƂ#)Sy7I%ls3Ǝ^s5S+nM֏5B, RY5McΤ7㍈._D_EHJB7o.|O%cy5 ՚J{2 SJ  AsGG ^CkMe!.cs*=2̀>Q[S,FJC̀۱ħQk08Rnݠ\rzwCÎ_Rmj)KN.4^~gcd{o0U+t _1{ڭY(mWB޶dHH5[qχJ)Rф 0(M7 )> uA3*r}ߌss)?RV'OWĻh|u,?)<>1ԕNwP'A۱O}FVa[IuMUl;>Lm|UV@{WA$r7 ]'^O`4j H9hxf,N<g[ʼL4,)^n߱]̛"*k{Ffˢ;Dpz't 5M[[JmuC5Y"eK^,[JJ<`I=ٟG00qRKС@-|^ngD-@+$s2$1ͅA~W}=\o+/l}2:Y_A5Jԕq/ϹpPC*Kþ |ԣ%*o0!͡YyC!C'+4wj^4,g.]Ο84.z2 k>F(yNA{c6&a\xNG\G\51X^M&-k~ϵABHKrs}9/}q8 -{Np2gK*;J*|Kc,B{+HOe *&HI8c/LС-X7IX=fg ̟h`J|aye0^A~+KƺQ;%Aj0k_Գ[Fs'ҡ/^4KG{ C=loAK*Ծwbf)O]5ίx uPٌ VGGNeIB8ӛ4h'*]VY⇰xn]ΜwO,:E"- A3i "71)qp? Le" hܵwQDʴ^y;g6d _ N;+VsaV)YjɔuE{5Eog*˚ y0\S8'u>~K{0LԻ\|"h$Poׂh4oe=ЈAa <rB@ &?B,"UVUP,E7ѳds:uN*K8RZIn\X[*U_1hx2`l߂{o}bU} D4uט taw꺣xMahbY`8sRf\i`)??3D~|16CfD*CP'8ܡL PSÚ2_'SiZ+;J H%cfk\ t 㪔[۪=R,JC2RZڔZa z{})S:GTo>MQֱSjϪ=Ȉ<̜ GѲ}:]?%8Na@,G`?ĵ:Xbx.' w,묔9sa6  ߤ{c.{Iᒟ# #=cܲ2BF>{!7:qafu=+]?7? P{xW{P(6Wа'LH+>ނ},˪P)i3srPvw\mB2vixwiRi}Ay*[,qT{~GNR D!Ok|ĺɪF;7#-EF{ uo܏=3b4NܳSPP78C~&@lj,,q~E z#y]|mhGSoY~Ť0wp_,¸}g5lMa k6Y K!BqJoE 0pzKP?:HbӼqyB XGl736.+5< J 脗S%rR:r=ˡz, ?a}cdȼ;8DB:"h!¢0GPNRp4og0.LFqk+:#<]}>T,[ 5Dؙnbx%_q(:G4~cQCؘڭzi`ũ{iy Q{%÷~XWGٯl|g+K=9} !8Ηf9AvUVv0SW{DPYL;}X Esh\FwG GGJ~|ez"su NآlPЩ"/N٢0DB͟a p`)m,Sys~" wDk8^xaLU)J=StI3TeSVyb' (:ψt9ƛ[LA$o"NNdW~n"Af 5Ώ_Vxm#-Va '0y,?\b{mhA[8KvhCwg@(Q!8hFZ#UKX;;[]5]П0S0t%fnӠN޶gLhh)f v4O砽9=YoՅ9}3bOQG>2sUE5":/T _L T`eB# W2t5sBp+,Wa%}%!v\˃~RK#AA흚 F B:-, i5GOpf-; ޞ(ҕ|cЈ(uA"F*QtjҸq|R_5Rͤ =es&֋4s``S2V<:Z]/ ݓ$NMRU ~]W˫r#G0zXάraEzH3Kp_={VvM>{„yӫ P~es٨75*_s +4@=Ftu-H2tmnz(f;P ߙ@[#@"PyWtT74F4h >-O4 q Zf0SCu=+ݸ` u>}ڼxda\}OJXũ0Km;Ҩ3x(8J>萿[CRC=vΕ7~]WގRH'W>3cNNϕPZ~ *] < 7m B;7p> (Ww{l242pD5}-R`t!4(p̌L>ys 䊛^yz!vMFF5TlsQr>2n@k#jm 0hho/,)YQ%ho0=+zS#f p3zayhWV"R2 RP?f/CG'\?B*Y/1Jl<~ aFAnf3Yq4}va>ɖ7Pv <3TN(UKIpC'nEtN9>ic5&;Wfe?pĺ{{Cs'|i= ^{(2cs7RZa֪R9U\ekFn Nr:wRn/bUrW@2U9L|Vr.f(ď1es!GB56")7qݞwz|gO 1` ڄ'a8r꓃1ڋxvKcTPf:ߡ@"Ga.oC@",)xbX}b˷2 ]tO'8':>{1~3hkxf85\ӄ)Zp_L}|K@{':r8(4#@fُE9mlEz; {H!ʺeviäHZ.R3[[bɖpP7݅p.hm;btѬKS>'l;zNw?PpZan#'CIx `o `yc-y!йĚKXK< &wbLв7ʿbkwPdwB'N/+y_fфe/+,oA6nh5KQ)];RVC6Z>f/M=c/˰2˲-wO+ߠy[cv-tvڹ3&@;R792lWu GSAz_iL鲴(;;+C*WѩZvW7Tׅ|IsseG[74<`-+K-#X$%P,851Rb1Z,a@3wZ`@ EI`yM.l.T(O|\RTL 0ex g>`-W:H%v@e"-6,xBa { hf,5spL 0 @3XZ!u ӤSlO&>ezɹ!'ǙPfc֮ _$,`Lq\ξ`LȐP/ɍ,+RB/v޷З\́VJ>r X&Old1&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`IA@O )X&@sOC7+v Jo쬍cOߌV妔q|/>zosTZ3:p-$b2&9sL DM {,Om*%mTKr}N͌: vWAzF1{vm,-Ti7&&\n6-룬)S:ŒHK 0&,Zfc,X&@V2e&u[I"%SWq&Dɿ>uj uR4r#PT[5L6?nNL&ϢCpk1ͬʏ?3&к Zw8uL tQ:KYc[ϽVU>gOKOfRl>0^i wqIA|~t)sy4-D 3n_k/?1bL x,,0M2G k E)b\dJ.Rʙ%|b$}Z_R0'-5yw$4%j!嬢|ϽΈ) ]YGIu#Z~[zWp&LQ|a!FojLw] ހ;n߂bUhtKY0sZ5:~IaJgb)P2k[:PHAE$Gx L񟑞{W+=Sl yn2uE௎tK "( C FWO"z.S| p=.ELe*4 Q7Ѝr1d\pch2_CQM.9:VX2غHR`!#~ա(VХބtTlz]4ZKkiKi v0kV@ٞiﰉ2Z(-C}+l0M?hgB_d[A7uޥ y2ICPhByCQ# LXg34:%m~E P>/WJ5|\Tl-=+n?fuQK ;`L Rs@DRY3/9,0R?Й @݂/Uw)-x43,]/7QXl;]w. u+5m,0H߂萮>Wߥ7Qra? ;OþLEUWYaV9 {.eXXf^Dzѩ~+caVޔ0/Cʝ!%55dNǷ4WWYkKJǴ 0UULTQ,wt@Q&Y37 `Y)YR?K %*(-U'`X,-f,?k;ðOS#th{&@je+؆DI@XK%h3=GG|{9nG|E'V=2|FmJyl^1* ̶Gc4:CFaZ WYE:@{K1yr/ػuKЇrp8Anv8~~Ș5l՞3 4]ZoPϟ?_ #{ eG;hBkދk?kr<6]kI{  4EWa>*Ph(ӆ?Ӑw.2;'="\ t.kH 0&HWd tb]Z>XlNCwG8~^hQcV׿ɬ2l38~6AS2I >2JorF3=eLP^ЀpUgasA>{WWܰ:λa PtM%~_FST}=ˠ0m˝j|9墔YpILxo܂<~S, R}i 5pꮝ a8y~?wvhFDE 0&mפHː*G8 18Zpe`E_9%{'R 2G4ukwW8ʺĊ^bZKj 5p)Y ZUEK;},텼KEgYv ugv2#PZXq=ҳKy KwU3PtIr}B2E,¬YiXYʽZYoVyƩ@m!$ڼ">yKVgYB E߳l)d" C)R=kKdʆ,E=|lNp+4x J@2CRI{EbM3b:l Nn ^F|(_hn<Ď_U eA5' B}pgwFۭJe,D9rMbL $7YJa@'@b0pBHX콁nb20n+Iϐgwt6-E|nE.V4-7-@E1{| 137?lR#gr%5TOXw!:>PyHQD#E`ARi͊r)O?0_m l* ~iB9i+ӕswX(|B0QqG_XM!0}oҷ8|e}J٨q8нd#K.;6n֨>@Z%Lo%%F*gѲx\U=s4 T~;nU$MQ"4C)Ys?;5Sm(y_78w79\0񻡙vJc`L 0D`e)ф9|&Z-R|60&GýCif4H^(`L 0&$0;y7+(K}/ׄvHV|C `Labڃ7 I`L`e1 0PBܥ_<Zn]C9~*Ɖ`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L%M!y ꧤD"^ 0&bTrC7gOXܼ5M5 g 0#)K^iŁB}Pr-Ut[e-K`&F=L=b%ڂz{?^[3@hm~B,OȰhJ)SZ Nd9`LȗVIK[ 0IqW|7+K%7=,f`L s}XB!B®`L`j; ?58KaHj-oDޫ|*scL 0%e)jK6%i۽$  0AVL<>8KQj9B2&@6jSN ¯L 0h)~%釯6& mkHk"2&ZJj,F%U&Xp?`L mhO-6%/n:{X8&ZP7ZYw: O|aQ`L =ڔKahAZBex-(KXT&`L 0&@tXYJ1ܓxgr}e{s`L $0O<F;R q& 'Q#M&pX)nwރqj׫4WZyq"!Eq lLoZjרwPB٨03`L4E>^/-sݝ񽭔:Q׵SL}3TMgNjߨwd,@IDATno=LJ,Ň#l]Ey Mt+xZ (g#A()+%'4Y88P&@k#3}/N-;m2?ĸYEș@`e,Sx+{P"bf(s}>,ezKl~/Wt\ըOQ(_ 5DYv[3Tl:!wͳ#2OufYV J>XݶLH9K>cL=><i1s^TUJx7!CmJciܳLS!pѥ晝.Wn `1 gܶWp<9LwK(Y`ەzaW8D0łz{9'뽶,L 0d Р:u`ԉ|yr;S sq.F]vE[;חD~rseuBn;u˝q%NsCTܑWE9 }&7m7Cw&P^W!w~ :#^Ea;Ƚ hJj8]?o6˔(J1*R Pas?LemI G.Ek,/+Gi ~ K;͂wKndGn)+ 㐋~>Cc%gx@Р"0,!䦾KZJ3hyc}.| !*e(3:"4#|h]m*>$ռs}C}iב-VlZ%1&@ug}u\]=x={Nijک'WQ)F`<HR?N(uKo]a !v GaY\dEXڛxdfs&,E"q'Z,N}&e}1뱯cគ/Լ)8ЗZ0,4:Y*0B*tgrGm_mL_ag;W~zl20@K1fw}ssqfN_p03JϝV[;p7*r4FS^݀:9+tVJݽY޻*޺&Lkͷl*YT+V^O2,ڽPDΒ@ F4e)S.̿=EBn)X` ߟZntp~e/*yX=O 0&Lb;PS[ʛk%F1p"%(ԕrT"gȞ)MNJs^s;(Tsp]t*e)?F. whfzQvv`74Ła?ܱ7h7r3|1&"`ʽ(&K.Fuf1u(J9_. {{y[ޣg3QV_s(J%gAKdžmk CQÇq5EoX#cfmF$?[VtRC՗;癠t;JLjEvkOT{zذ((Eh:fJ(; 0&b;[) G{.fG>HY,k%?IQUu:#nkf{=bl&d]Qrģ݌,30xf)<6M,8p`dF[vB픫S -)njEB\J N.͛mPƼ#P1+3(TC37"i;O`?U{fbhMZaJM[4 bVrR:i$i4erH1 B\;`L;QaF]YǑY)}U p.ŭXByhO~3mH;rw T j^J* +UT27saltjϭRE{w0cskPc/l;85GazP,ᰊqw6i! (lέ9znon @Y ¯#p>Wd t‡b]ؤZ%T},-,13+Ru A&J;Xk%e&|C!s|t%8*o 2`l*0xU%X)/RFa!گU5eĝwuC##VҖaNJoh;QisیP/ZMx0#%jr(@۝CYs;v^soR{,,M4wRP֮,W; Q7n7 E(z]َ 4E]djRNitr$,78e>?N{iglb=)p-'ܺFհ|n3猳d=d=cjK9TF+x?{0qG(KZvK`fȸin?+ ,[K춬]RZOeVX˯ˍod|1&ZNCQP~%溒/A8KiҷǹFʝLWXp,cYߌ0ʅaa,ΜW,r-F$tL Y GFx?Lgw2'-M%:uNo{7),+eKPM)?MӥkkqbY85o(*;ɄxA#q:Y);qp%T&4~)8LQ}wFWCG8!w}D`p1аy/6~j)hgzaykq\"ճHIdNE:K n 7-˟ӄk]0Tʚ5: 86&# -Bʰ aB3wlV& 0&"՝q{;9;D;aetx+4wWKm6zyAs'ey .VkFTW:r}cJHYܱ7j7r #3KᨰYr}.^ Ǫ6@1!  y& զeѹ/K)Z}|ϴ̏hE1|\Ƭ TP<4E;Pܱ7n֕Nk6QR" m&<ߝ! KҞ9^ 3&ᵴR2&`L 0&$XYj `L 0&`- 2DPBܥ_>`L 0&вԲM |՜/&`L 0&PIqI`L 0&`L !R(l`L 0&`e`L 0&`L V@a#&`L 0&+K\`L 0&`a 1&`L 0&XY2`L 0&C 1&`L$_r>r@wU~l҈y *P([RvƩBʾm5J^s:@u}"ٖꓦ!Xݫde5ъ'NM= R?^_8&JRLGʧ?R_-JYzM+49ְ^RJ!#1CmnIaY `mRi6 ގe1clJwk*r̘{Ҷmo_kk/W!ӴfI{-'|5njCOJJ?;7JKOmYg[96IFߓ Tmϙ/H)d*N(~b,OkU{! ۭHK՛TrFb+eeݲS~5y͛{bBn7DH)Ӷ,πL,,U&>z{x4^,Jj+I}[Nع,K^GolBO=ЗkЀ8.S߀&?&F+s5Dnϣ{".4c޹Nٶⲃ9nۗ"MX]I<5՟SXViOn^9v;ݣ QgHR?VºtK/%vSԧ&HCsۯ>S߽oF&>9S?x]]sg,RE:UT!")u,ck">鏶B=41H_KKZbvp׀\k²J]MƤqՀLc/IK`iWvN\So)}ɸMfeIb)QJH =XnDX%s@ĉx7,8 B^$?26p'RjӮ5TM6M5rjEkOnjF5"kp)iKFq%VDRV7v?ܧGgqYG9-6M2u;wヌuBA)'* !"yI>=(Dp@wػ{1/>U۽G*yZڷ;u3ۨ}N]v5hБ!)T̝ ee^|a:i|N]zᚥDA; !/ yuCDܜ;` @%qH$&{q"JI_᧟{=Sq0IKٗoԗ@Zʒ]!GѺINv: G(KB^!"y9 $@>IMO.ID&{q&Z=Quy븍sqpIK {jס=%}=QYҮq!N _$Pűۉ0!#33ߥRǹWgyv)>݋ı:Ҏ>Cǽ+Q5pK+'P{O*5Xlʒ]!SSN;J5M#ܩ{cZ 4eca';w: ~yv)VsWI\LXVmMֻqF%8D {ܵ~ mf)^R*KȀ2,|p8vHOURRd+5"O2Œ]v}Yjz$KZb?n{q,W WJs6*Ι%=꾤n%eɦG$c%Bd3Md@SڹS;]sf)YW{(uI6OyE@|u'?ֈab?cYbjS fr l*-9,mT0 ?6{[ffVmv9cfK?[h3˰)t&d4;.?W% yY\suƭ*0#ԥFt] W7'?.QV_})ܙmjGES};"L1Ľ '"o~8=YÉ }pɉ԰0M~5.e"G3K¤kzFsQT=P6Ak_$ڻX ; _7]v{j\}Dw17Y$b@Jץ׳cDC/rP p/͕}ïi"É `_2ŕg Z_[g߉S -O}I-iݎ,i#:+$%='G_n;uعK*a#J;7Rȫ QRƍ%Ͻ}oi:E)(wCG۸Eb#.VUDiYX~*NY4ġRYrNrFq-^ =X xeI}Vg e #M7"` ם)J7RΘp^{ ױPNۋ_WnϾ a˪ڰ.6Zl= S~Sbպwػ8Env|KOEY!f}'eb/,~r&e]y,R⛥+g}[IKMW(8?03d9w?v{kݡ]zbepd+IŏuDs[I#γ.*_p߈/ALG_~#Gg'\zY\qƑ[m~oX4=nC th&GobeZB2d_0D<"[C{Jż>VzuZ/bWKuE4#xQ ,AeH u3i䫱^Xb' ƻ.1V׽*J!m۟}[ЙHۦ%$~0V2?gwʩmXav 3Z M /ШThn9oii죄US^bxoAa!hk6皿YhAUѯOկr,OBDcf7K1㲗xwm=7:rЧ Kxy3v⢲ڂŦŮwuU,rMvܝb;8.!vY`$IHN؎mY{oF莞F#it~O+{}~,+] Yo mF+%Ҍm˗c%ǻcdR=&Qx- +?)0?OLXQ6X0Nj t\2gw:7_-XnYriV_AzCGzs˫l& Y| ݰ[G6;Ve$P"7mPu~#CgCͻ@łuu0:#[*"t(GoŕcsIDϾQ>VCY׿ ^3J| ˱֣ Х I7F rtr'6o!ΩL?(9{u6td94ښT M_H- ]=CZ, .JG\O-.\mi Bm܉`nW"EFZl $@D-,ܳJi".y8:vo^T&{yV/D}Aħ5r\UNkkoI%# cOVMƁ׌FSJXd*(L p h6F r-{{:ƿ@9)8vV1-=%IAM0S0Y஥ΫQ-":s`27OiQ O v^b, >&IDA^yJg; Nc,8_ А4rCw h+K" oԏG4}\ )L|6S̝$<Ao,KRQ/,L̕_3{>J%K_#gTӔԍ($~d2 ҒEXT(*S6A@s|ϣI`k k ˥)ӍYC-}2 kX_osO 28u?F_^akޱ)aN1Vٷ;;߃8`'2;iH^{90ӟb΃( ͖Jm2W+ e)݋DV8p KIy2u/J67z;s+ftۭa=֐)D,3A`ph- \P2g˓߸G "XQHU 64 a8!(5}l+,V(,=EoYWхC2 A 49ܾz@a vo=MZFך/Ӡ[Jڽᣭ)?tSz7rN9)1]!yΛoWg`}U֗D ,ϴOx~|$:xR],URM=#aA(腪|dNS% uъK@@.f/HwD@ 4 ; BJ4yCkMөAfvyc﫵5s o޻>)'ɕv a/,[4!s%{g.o4sO nRUγmAKks?mi0pCJ43JLK6d;ṏ X74&}?X G& s< 7+ Othh߇K(FsɊcKSZ1}#i4~iQz`ev:O(S"~8`'K aG藝!tJqJ+ҡ.͋*]@ qY*Jfʼ=HOb?N~~^|%F@`jڀ kev)m7lT̕}L;r~Nrz=-Y6 y)3& tԚizz3E-8u]`o;Sp6 ZoS(2Xhk$.>KN\@?h{7 CTm8{DiIG,{fQ4 ,F P om5HUд$.r*-mr?hum -,3 DhSn7jL{.R+k4Ѝ ~XKSh4L#>NYraA㨽-X;o@~h8q4'"п9nIUkOShz ϩ\̚ J*|r%:=&S+'*e>` Lx}@,rh<:E^B;rnjZHr>(td`AaZ)m4OoȇX |Ea#n\0&"Tf#ZSq1a4։!_:D%4ۣv[ܠ܂o۔uJy- ?aʿO?KzQ_֋xNތ5k%z~FsK7@`tHw!uI?zk3 Qў8)ՀƒC;"|z_ O>w_a:5Å O^j0h z|7bbMI FI0$o=g%a\OBk`O=_Mnm}k#yXn{i\ƪP~DQ4vB8Ԛk/jjy2yޟwζmji ^<~x޿)#j;5LeǙ&Kfi-rgQHG4d[b0_B;ԬaOY WB*%&(%2J|WDof`30) 7q)Nk {2`0`0?Z[ZFmfMIAkD+{[gζ[lc_M\oy K` BB` +SDFQGcn:c yI@'`IWi İ2& m`MDA-{ ~~Kh 20`S  1Dw-1ĀcU D&")^db``````````Gb0K=M          €an0`04=]y````[bRv#yX!Aa0KA?&c%>.Fe#/,.8/ݹfJ83{pcR"h{8=5Y%rɰCJ{3]J^_2kFZrEGNԱCd$ ~?RH?Lf.qCDJMMǵ򅛯 X7-F(0>25_L%;#y^eQqaYv:OD2=wa<2퇁ySF7?w,3Q6>(m <*EZya!J+)>z oo^i2V+k$L_bB[Ug0 Ka Yj8 ް̑^>E˖Z{Su{Ef.讫/z960h' #9}F_B1Rmޣކ]`zjd8,㿼~}sd&WM߾Rl-?;+$L(Jb 7ˠ^rUOʗZFꫮ=ݫ*9+`]H?WD0rP,#qVQ֖ޙi2jpg0 Z€,s``o WHZr?#%IqLWג< !On%`c1@bاz3oHLI5$2A.^TZ*lh /n\yHnRnrvs+B9[?zZnbD j[[l [F/;s*Wփs:f)t z<r@DS2mPyD3d sIf`0I&>4y0rǷpkv$goj ~~9]*D`hY=|J&$9ij0ѽv$u!?~h(lyNɬ #d%֒I:t|pًP R!īuɜ3YUF0hf.Ln#OYȹX?`^]gT9o0`0 N.4KSD()}LL`8y`ws[a[53=gOȩNc">zϾQ6:(O!IRX7 fgi@b``,})(Rtywt4e+2D@``H^rbcٺ6Gɐ槁9Aaq:S4LƠ-g. zY:y11qr˼/ߌi`rB( w[o "m LGVCzc€Y ]f0ZoP  "W>5)Q^]]UZҊ*EnWfy#Ud)!HYE5CK!4ڤZ4 D#*45MdϒAs`0ЀLBE(Ş^zv \?!/0  ~L/'+;a"|^G"[x&];oX&˲ ~1@&j_"qLӗz9%is)imϛ}c ~1ՙ76U֩p+G] 0KO$ygRKps9/JdAt?[E2S ߙO";x2e;08Bxa{␠-aKOz}]S]M "z^i4nwёվk@Kp'^IZriPqg{4 Ѫ˪H\DE'S4Fj$Db]6ywIMIESl$$z&;'_5;OIe;w{t]".fu{Cjs83߆ICԭLK^295N.3z|bZn0^4t)WTٔ[LtDDIFWD`ceNwtВ gʢ#e,X{<)$.G K6JuIv]OvP?(.)G gnk&BE|y$wDwž2H c dI9vԁ1Xq@@!38zj$eǸGai+}dA5Tre̘ң  …Jxϧ }9ĄxɆt׆pừSXjڝvG|%::1KZbr[I,w;yd -qIɟX'7]1Q&Hl,$&lqvޱ_۔/ժwHwf+u_mL\"YR.5)Rbb}8%Q"0{=tr5~ܶh$7YcZ)+66Z%%EWƌPH/ y(5}r}F`􅿞r #Csc㫫S 5% ޑУ%1ܚ/o)JA4HgnHAۑzV-nDq2mz l^ԄԂQZ>YS?x!Ƣ_P;RQ]S"Si"&CОb%V?X=쬔RL?/_jew Sr\G*0I-A "m )0OFQG@\(͘ {P1߿~J^i8/H< JOQte ]%^}3RXSZ_30L,QJw蔼E('.*1Jd=jɣ(>BV<)˷Ⱦb78_JRI\V OBǪPX%Q0-' Kd_l_UBuLIiL dt^TG'H]T QF]adG/g.PI#)l{1Ցg`ȿ߽Sm^~ T{>]5R#H²h\ '{#=YHsoSw(E>UV++wȁ}UJƩa2<TSTV^)oϓ4Q >ٞ}Ib0#eS9hj̤1 OW$fscڅQjxUƤ8hJIq S&Hql|Q gO`Ӳ)2k-{co0pJ8ISF|mrXi^AxߌhZr%)|ЯwX8Gd;Tnb*&ʕ]>U-AڶO*kd,ϟ,H8wT^` E:~jt+smB{PN{\z*= q̬ @ . ˷ʢ$%:2p,6GPX\} p0qR5j&MRfO\HWCu7t556ـwUR@(?ʦ}䘣s=mϸQ6]'[S{{ٚʺ̛dKrFZ' f>-@+=FѕXaLzDp<:28YEQ1 Ѡ̊*JfM l| 2{Y83Wl|75E7 !ç/HʻExOVr=sQ.:VҒdQB?"0jhnFo?xZE1@1z:м 'q>Յ&Z:&O B96@ٛ`ww;B Mej*>OQVS{[>ZIbƞ LQW"kv(V1cn2D͛ ]2L7rp[ gRȐ7"la!0Q7~a)RS?Zd}K0$$hR9rѴZ'Ђ>VF=/@JNBj{=4EۺF !Ã)I&T*gj N{T>1R$0p#'ÉwdHHƏusOH f&xG,EzLLF`*$/uNjh7¶apKN a8)cȼ.H ,3Q6:$_Nm+/RŨ<뗚!>7DjE˷7LoB3s_!&4aKB9P@6yFtҗVlS3 x?iHDvo^v' H_P)kv[SObOTM"O+:2.ռ L#*]ΆIԱ%p%ᛘ6 Dzt2tGɨC܁ ZkۄnIN턽$U =Reo,ؾ׋zB14J[+e~ď QcG'"x9d"" hj`7%ɿNs3׺$S~Z;zq\?op=󦆓4" $fU8bA*1,ء!Z%/JO3AY31*ګ 蓉@ =Xi!oZCft0;_m8\Mc$|,˶CSux(x–%߾LkT̿&z\7dXh _yM~t4ᦁU #6na@n6}&E$i5a ^-Yڑx d1"^"L0~?mrϷ4`W43pbmu(Y90F#?XCH\EԐAʍ{K7 9L9}B=ЮA0I#1lYe01Z 'oQ>ʜbmwkt8}|2׭yGTԠ~ <; j> FsQSDB"uPE$e.? ^;pl{T#d~4td(Ĕ3 g* O(}0yx0N>Eμ^P4 wOHwٔos#BjdsF7htl}m`FSEQlۄN."zm;@H>ueؠ^ ?Cܵ)*Ia, T!/M4hgם)49=gN o 14+]!) *BE2Umo7hDoL2*|.,/5W/p+u;c{%P&\mScs9,H=sBvr' :?3@V "d-dp84lnbkO6F%kr#O}PhZ@.JlKSG!]CjjaRuU -R`P1h7h? F_m+9sJ5lAoXS~s]?5L[L(1%(@^ ԿLOPTOI*D)XUfBCȰ'@g=X8=c0e YIK2bW1}~8QZ6&o1Y!`0-A]Kp )aTL0000$51Fl[#s'jRp132$:֏$)Pa̛2Jz!}$'Xx4AZ7ѭ?f<?W&z\X#`40z]k0ٷG[GWxpE ߩTDe#skc%o1<|$ }O[[js9O0?Iv]a[}ץʔ~}H>KYӫS?Gʍꪖ)&+U`Hxt*>ō`깡Zmz,/]r7VTk9d2 W{y`b b f;-cC ȨiK R`)2gHaI -OLWVnӇ׸Зvu{f)j&sZ*#\";"a3ߤdeuE@[^(E'8$:.Q@-2hv[m" j_g*qVBf#EjOt R PӲbhΉJCK/+LtDҕU[]v],HO! sFz$t)ǮZ_LQum*"CC,H }L_W+d05\9$Nn/N;p&#Y1 :䶪r0b<0Om5aTu-$Oî)(m'ql?Q&}HLl1tQ 52oeO6ZUJ]M"FMiG$=g.2Jz0Jj9tRfM)!%vDenކ=2sY9&!Wo{ "y5ohaç *Q-Ѷ䝉a(v\*?Z;YƻN2l79)DP91ؗ\cCҗ-L%,!DE*Zṿ/*k'26;Y@] OK5^gN1fvj_d?[&_s$=^LJke1@2JEB9[+CRFSgKUt ,y? *PߎE=6)rI5Bpʊ#vV)dk]`o/%?oo WT`&i*00V]3*FbIQj?Im|ǒz#ěL t h1Ny̰Ȍn>&jlB\+ɿx}\CfGƔуsW}\\4GE3d΃RXT^[kv"gb ޚ Omv{-DA^$$0R` WeHQ'c;o0O to fɏ(Y TJl} W eR$%%*͑7V(3j\\LeTͬp8-X#Aޥr&aT,5D|\~N EA@@>A}̖MELR-<qHT~ňw3lZCKu3i vO||z̬\#y`6|♁9~w}JyKRx>IVAh y` T =+W'S#2K0ݺ27:Efj9~L3XM|7szP)R[Py~4% [ʽQ0?Cw]2 ȧ?mLΨbv#DHG`0;꯺30P0RB ( iޱE+CZ&~ *x?Ϝƌj0vSjNt0ІTdcwÏzѹ~d"j"m=Zݎw$P"1}0F˩Zlx98ӈ1h4F=6?IK2,|: 5Y|+W|$^e@t/UL أh8=Kk1|`_KmIKB$a& ٜ"P0A%X? `Z2@ze˧*! n%""H GI!s|𚆵FʣQa~a0K~ɿڷO %C%{ B:AE %& b-|$t[tsVx0#=]cz-ﯟ˳P׍iXb׏$} sPdF +SEKAP+BY8R1;|>xJ*;iP*A}1Cd4Ro T(`v1 9!Xf&g{iUf+:O!Wf'WĴ&r=}`'+]Mo^^Sׇ?DHk<ٙinFce,F |SǨM0ŠǑ7!э`wcs~=y-O](2{1R`4$ |0!h57yq]aƿZ>>}='?~Y`a1q]!G!#Kt}g!)sʞRx@BK9ô/D[z`AQnrHF *@%>BKVY(H܊te  ?`e{3JkQFf4P% [L]:Qz.F0K=[oaZO/taq`:6r HI}cn$q4Sdn>]'ZE~mq]YĞ_G'#jj(GSp/sD= C &^ښ{vGE^bρ{20#uVRlCȡuDD6 })\V B㛪ӓdpz|ߝuh3"+JDbKVF9?Vqm ;z9.HarP\#A}AcL˜UNjEΨ [ws|sErK_@WEdHf~W q-\%ϟ [u[p̉6u{bQ8_Qy;3o/~$%@2"59}! _?uq ޲`Tt|=KEβ 2gHaZjlw?ѝPb|ZMPΒl?-F&'Vր9ϫʄ 7UNV!yTY0WWtUo~펨ߺء1@[p??英QvQ =aD:69zQBB 8~(^ p vU|i ljܛl%H4g ΋{m ېoGPB&p9T|~%O,A#݇YsʸꝒcf+/jfiz0EyeDP佔^w5 GmH$ :Ǖw_ pKOe0R23!UoT$](CdSR^mev!G1&d ҬUÚ:Rd>}49>Qkm6/tk[{̒uur8i-IU$e#zI"$#I{K`Vn8&C =\'8| wt3K2<U/YWr``c1p)?P#W\[Q-;[-9lixj߼' ET;+}Ӹnuػ!ha'$h~ŝ8 mYj4\`E%c]7cmip{tq}|(.Q}LwbBhLeQgg`vK17) 6.wƓr WL _G^fʧ+bpBL;6B}OfZ%;TQ u`78L*ib4 MrΰUE 5~| dLMqOܦ63k| F)bdl&e_͈FdC ᣖ_tL[``? P(/rBZ! i:4)Xh LUƽC@fj4»fc{j ljoCU|Gă*.g6˗f$+wg-u؋$p=)f7{mN3a/*yy<@+y uU<:i>8m^;{z]"jOĄAp*}]IP5cxV#K ! ϻ{Ff8߯\z=I*XE,gI2­'%&JBB,Z[BR cH}Lvoj sș +fiP% C d\:{`` 4 Pj]HKn$WUdh:w{)O#qJGH2?}Ҥ)]H @6l04X*A Jhϖy ~yT缵N[8`bVRR3$jo׻qQ;X>)y*b*!\x/ /u!tp$ ] wB99^4 0WB[=8 0d\J=¿`U^3!X ="Fwًo F)QTRSS$99I5Ȕ’O2A\T]rT=^nfwLLQHéz\e#h(hzW]y.1Ry@i9H/{ki"wOGzID(פ}sNO/"rw*)D:P\.14dgV" )ʎ8_/Ef\/2ߘt\6T(~2CcsY~d(WOmõ7␦U٩ԡQ* Wk{,7tyy;胵ͩ$;%Žl'#ph)njv7b%iN?Q͔4IOOEeG%![Pԯ:Fx;F"@L4ӔH[ᝃj\nip}9 1;=YȖM ؾS['\.G@(WBQTA/\snu%9stu&0to\r[3KD% Lb9nFuJn'2#\뽹L Aiv'\[6jJu­Hgt'^WˑR8k)3BNP\$P-f wd OGbBMj!%vۏKl.x4ĤHeLTG-:I.:@H=‘׫ ID4T)U0c)Q`oИr(y +Y&w$WH:D+\x`-Q,X&NULϑO:@=tbk;uʘ WL9~el>,6E~JOQCZ0P\Z;~ֺb:20L6;Ҙ%ZVlUB]fJKZ;sF& TAN0QL¦wY/{+S*s3 pOm:ПHزo:":W:9P߻iFK$ժ,cNp=x%á_ LO)NUĥD;\ $&rS(Z%\meDI\4.ǹ$Sgz$ X/GJJam^JIEhNtX :Qlv)n-Dع%0LId$<K2UL{Ҙjs8 CYݶ3 ͡c_aUuz 544; [Rý畞Bx{>_mK%+3hZs1 ==y'(F"TSDna?DE:G]ejf:/$fɃڳ5Q䐓AmD)5%]& /J Wʍ#q{0S/(`MFb0XzÐt4Jii̅R<pvlC(@<ꪋQϲKnW s_n5E#Jb{{=(, *5 f8ϒaSl\iܒ)7Ȳ= ^qy2!.KLd;ƚ]J$0Q"i&6:'$C̤h0O!eh롥Nh&2MLp? zژj Gz>TByN@-uf&j$Lsuz{_:B4twv{j\RXTy?QhNxW-yQ!fȁۅx W#Yr?ubczvy#BnU~+I~889z\b*/(慺:IPQCO!zk'%2yF`#Ä:$ jNܿS7{t\#[wi繀`\HCwT߳=\< -ԨPH@QofwQ}3+ä;e:o"3Xn>aTP; =Rc Ȉ>#;iqiZx׾F)ӊ>? ܐ# #c)I kSvЁa4A풌I<fB))IJIjs!=T!sݾb4ӀPԳ Nw$lC%(ʺ(WÇz㞘Ⱦwݍږ8'4,JÄ A{jx\|i73~fl6INS1 {C g]bVk4-*!|@Ӻ1,&=Lb2FdɬXJ:vEd]tϞyioTCNC۞sK0 iwŋ+"v'1Ӟ$dk;0H4 <# Z:~B=CD;jGçN>!AJLoV|z9Y_$#k@O#~'2;nhvd 49|PC{?%=כfdwټ./%V1J6f:Ln(̈́~-鲹fIP  %Vס5md}e\oh1߽v^:{>9s(. ~{ƩEz-R-2AOƪj[\mc=1H #sYo`R, Z$L=Iʼ4Q ϼi^ktj(oշJ"fO% nm:2Vtg#Y"NIx훯rǗr loGXP%͈ݿ(A BRq[dk.wi<@ F)t.:*;*6l%c nqSơ(=P?;dŨZYNtV.{2M$U^3Ij`bxl﬿jH吙IJBu;TBihL^)tpo=e7̘Bw:{D&{m=qsI $2;k>q9e뗽sSĀC˳K^9:C>܆*meraY{jeչrOXPck|ʹHrk^i15zuV#u!vPUv)yY03A/R;TttW~si$ yGi};2j>l_oZ73zB 1[(haZqbId$. Д(c^  I&Ir3vbXՕ 4z4qBy^A{C/pG;O޻CX]^s3- {2HS,~Yw..G9R诤Wn&)#X[pі;ь6cmplи A"] }z71cJ'6Ke%B/E\Qg 5v˥6,J \}]^\3kBֽ%Nye_Nl c"Ykc=nyqg).nޮ'[-wRtֿʍWL>HKm:+#I˙< M+ݒ+YN^go<*T ?JJcJ0џnRP& QQԨ$\8FsA(^p,vmZY³i)4K~dPun`2|?ǥ?v!Cz[Q݌w Uƽ~WkkO璘9H% O9ɱ{_ݾO"m.FY%myOɲ{ bTRx:|s'.FaZ\J"9j*>rks_yŋR@qJk9W}:ݾrʊ-| uyG%s%33]HEg+%N`\mj>xחiܔ{vNt|qRM-)&<qJIvݟTPHcHMhf.4H\oNa}1@IDATbyW~~1q #&R&*N8<4=ڹ4zM <' {}H P|ʏ׬|5o립U'ijq8dq`V?wqk@y{;pzAmU-oƑ{9#c*:Q*Π/ޠq}ǔ|BV{HKQcs^ 5r0u:Aό7XI0mUɵs'yc%#vy9SP d8yٳD.WZkzG)׵hF9GH/sօW>܆hbW?ډ`)%#]6F*j:^,ie;ׯٶs]t,SJKJpefD%Ņi@7A;]?[tosUuTLт}xoVX8hتY4fYdutΒ ^O w{o#`khRC޿7szc@}'|yf\Hg@*٦U; ENȷC͂Gt=7^[f"ԇঙHu.S=V\wyu]ګݝѦ`ߩh4.// rh`ܿt -l1ikhO)&M( ӿzwpIS=67)5-+!))=667p8jkk*+KO(9qhC{w{)jsIG+*!g A[¸A*)3e@H0WL3a.6a{T&'4L%U SrR#.#Y&Y\)"F;4 ExpcjQDfDI&xw&bcxIh1?M/ɧq\AxJϜgNW3d7 n zI\oɛ ֜HUԴo GnzQ SQY-r4LI 1'"Yb4O-&4@S`4Okݻ~-(i_/FKYFLS$kP=5&`v\xw%d%J:.,=‰]}_$} z*F Α[\&ޮ2/ ˡa~>">z;lI۸ B1%@- I0$ J衘1ޫ,dKeut9tUNt'omv۽5 ՗,83Ixr`%eЀ9%tW M(ʆ``I.Qnп. Ft7""^#e-3E"-UD;H{$Чs;~f#'e-2aDa9fDΏ뇜h%j&M^(#AeD5vX,!sKDa6lkJ[ 6c@F#x :䮾rӠ0i2 6}+5|6u ^2re]vH1{SG+o"M"H@I87eHl4`|iy7ݗᵾ$]R_fx4c%'Y3%*7 gC}ܬ{HӖ=U%v*ԯk/6%%vo|=pƄ&i8dI e}~w_ 089oϳF B{q=So=xNa7F {k}e yfyLyBЋ)ɿ/+}0| 4&'gT˶HDhtjݲ1} ,ia7?$J y)wP˥WzMK~e[?5H]rm>|7J2ziyq 2÷aHK$7"}\/^+GH|lp{l"X*/e~ |kP}_K ^K|FԨPi[b r -9h6*>R5+搑@I 0_nt2ΕB|:oycl4C5u%PhL yPͩ>`Z6-)` QE.轹j+soH&'p"4}jGLC5K5HHHLeC`om|%#ţ̘ƼVytn]<]$8T\؅u Y^qcSHiI >6R;$ZԴzi fx /%ӂ@#INj[|iې/n%6E춗p/m~Yrl4[ `.5Á"~`&F%]9O$%.S{VѡkFYeR yee<0$XbBQ`e1#VO%jyi:&IJZFQX" P >]JVqoS%iQ9-grDOY.̯T}ܦL.IaQg} 侬LI###K`xf/Z2#HvCaUr6!Z#Akear]N$.""r2mm?aum}Oϖ`ke -GX3ي)'9(Ƅ77"tzKM%k!p. `]EXl PO,0󴖉~\![E͢ *4}d^͖_`teԦeti_ٯD,U"sHHH$0߻[侸~wŭ8֢y6 H?EM򗶑VLY}Ԧ4J;z*vt釙 \qD]S#f/)OBBH˸X56չu,=$+| _I_\y6y*;AQќ"JpPEKHp [}iʅQn^dȗ PnۮSEfScqu%kF)mbEͱ0`<̞I`BrbK#i&X1mI X:R(/,͕mœ;t"i^$j Տ~ZkawRgIpZ6%/(Sg)ߗqC{jigRe;4iIpmB?I *cXHZ!l|~`?OiA#JjzɔBRΫ.?5R.6͆}꛽x. [gpn OK9?bj0\DpNIRgHZ4uZSmOk%4$I_P*8ZB<7NMy׋6JI-c PFqQJ^|nTti]M| ޡȢ!:R:M:Es~->LM ~]^$k*N;.KG]*uڝfwD]V]<$PspR+% _] ni\@['T $C5F}8{EItk #t/9D^ `; j}>w}x%ӆ)iJh|,9X$\=<'eϹx>ϔ`d旚SP ``Է3xG%jtOl,9qD.]Wi-~3_~A,bĻ&jW=%VS(f9SdSZ}S$BP%@9ffrC4C'd | +Ε9b$`$`$u^^{ޫ[u\<-:t.?~H3¿eiJҾ#CQj >mygcRPXX梪:uYG^^*ʓUUڥm傾]K**IfCI%     p}X^A`]0,6GbsA AKbZ_]"eTԟj[q"I;%~\[WϚ#iY^GTP߬&x9g`2O=WݣуzHkD#1geޭݬ'ըj$.SHHH$Ѿ!"@ 1iϯӎ>x]!ȭ r#o1,Ŝg3bpn Dc>^WUfyVD#q%B7YUuA:u#iѓqb+m~o& )###%=Y(N=MuWls)[lXK˕I l"!o !W2LZ0oV%Z*%K})~,H!"Q)z`8Ue>|R~\ZȣҩM كUQ8w&3[Nh<\sbi^Jdh$ܦD4Tqӂ$yë@ $.{\2(ϖ-zi%:=^96lr!0DZHB:ycnp]s c4Eh\9AxՄ~Fë ફYKeM~9g!*;]m9o)|Uw$[P{Rcݭ,iIK`paBi9W>㗶?D%'X+JGy5ubU2S$ʖ*'q R~`}q_.Ѫz[ :̳XY;{OQ NHl- # rlluX%r"xƒL=rJ~XEZDG^~G) ګ)a!|rI!:H'wM%Bt7\xd&{pNbk13^{k cFFFK ?bu81/폁l'Z Ӽ2(ߣd (,,C'T*]#hv:.kSmdޣj(U-kw߽=x<")_HV ݽI3`IV)##&/m %xAտ-b2d$P,F H ͕?&}Jrb )!REԢ𘴍JBtX1NpGُBN[ | OFT]hH]%EIldg8*]QC:^i9Z0f*,@k#+./o@ڱ>4_K FFF]~yOZ h3<``$: F.`-,~@Y;}D;jC.&DG>/f9U{3+(W0[)*ʗCjTٶ JwcEr,m3TJܻҷKhѺs:q}RKhH:,qZujuem]~"x2GSx Mb| ]v|S+8hԎ ./om f)8@$L)(,TQǜQr%$?W 1S$`)a ;H8˧u2 px0M~7#ios}lMlt~II&'ӓ׍G+da,c=ySΒ@DuY6P;XczBrv+5ZT_c=Iroe!u(EM I rŅ(n*e/O7gd= 7ϸpFtW ={s?/KGDh=:2;0ENTp &zT`I$t(42:f"ݹc-ISԥ@f09/9wkwOy#$RvySI+^hկopG[pv Q9xӳN 3=H$!#d4*YȌ|=g0 3(DQ"PRPלJu~BQҺ9UY~[Yڏ m$xE_@oyu>Nfx&OʺWz>"[vLQUgTJX`Ik ~m{!amaҭ}bݧ*ps= )Ų Lw>zϯB.|cz˔y>j]ǟy{ &#EР((0竝ԍrOF20ًXUNmL #ڑ?D#PbN|`zG2+*Zխ-0:xbY-8BL4L&ss+oFUI ч餴m,ײET7s@A?(Oa 0~Fw^edg~'g_A02GC?~#7%P];0ny#P!H!i-(@Xc%K*AhIΛLGMMZ%gt> /44D-4yk= ,Yr-M֥pLDgesl62.]69"90C5^y5§6Xې@)ڻ&_g{dCAB;{*f6fQXA#Ƒ@wŔO2095j5Ѐ@DmCmАaNTP_%# GS9 zUW |ԗ$M=F +n+}nj|`%4MVu-͞/;F$Tgz궫;U|kˀUSZ vu;t"UF[2"GYq }a߮}:A{eÎ%8~O͐+BRWmC~H׭D# - [fj=#Bm?*7HT{T Ac#lVK+A: PdS.o9tifyr#Pi!Q {^tmp#7 `O.%7i5yCmkOc(/ |nCq@O|!fHK%|WH7ݎK%;l84N|?ڟ.iG&9S;dȌF@ژ̐ '~ДviuټrQn*!eن]%ݲ2nHdR߮mӹ%d[<ά6ДerZ&jI_TD_,T߇}QyRT=dŘ䣹dӮC! RZvqwKȩ*.Ojȕ2\c;}&Yg}q4yJhH6mGe`&-]gmfNqTG< +_S@I@+[Jb-'SSF'&nMmzhA*Xa8AHp4si [3OtNW2,YpD(%( .$M̖p Ա^H@hw\u0%-C@Dئ%@~UTrG0&5I% ŨA)16 IsPOZ;RL~1aGi:iM>խc-L˺?߿i".{/XK+ r G'%Dxfn`{~^^{{FsO z腩Hsie%0a=h/~mTo "%8CR*PJϜUQ Es9"0Iy ݇0ˋ E?eBM I0hԙ?c_B\:xB⢡1r*V\=LRNQ"ut}4"4;`!A>~.ZUϒlQ^ɽ˪=Ci߹z 9f̘j[ly 0x+xB:^Ftdb;Wz6{|P |D|[.~_ Y"}]*vS(YҫUr}){Ì+`#a;`SԆ@ 4qs;y:S|atsWt >:Rbl7ҍ;3K0VZD˖݇Tjo ֮(kWu֋-ґ=׌Vjkrmʑ!賷X7W$!:iWE\^ y4iBl}xO%<^V?oit@m#ͧfjCC\Ilջ]'tّs5ۥx#Nh|WX;ݓ+ts@eZ$|ld'&$3a~#+~ +@_ kpX$1&]%6,@*8w^Q"-S T۳C)'0?mCZaZWoÎkuynvyMVN哧#Jmk;&Xu:K :!Hh>F$~2խibȮj™3gloGG?C^ͬ/_= DPsj4/~XN8+.!V:ŚtxzPg=?MP B@ӹkd;BU>-]`ĖkF/_!~ܕ[U9p5ҡU`i͒>qkΜ͑ȹ}1@>_Wv*_^x95Oӿ' u|:eg s(՚Q2+ڡBʩ ylǁYK6*׏f2 WDhZn"_QCI?ݪK2VoSQ;XrE֓ 4"gZ.ww;j 'hŠW%GR\d `|f! Á]AKE˃ʸs|umi=q\iY/Ssfxx}01e~WZ7^ 5\+U`7T-3%eL,Ýcn7>o}~iEh&r"Lk FrMjƔ);sڥsmktƿ>.yRQq>J`+}1(Cz:1ti"Tq$|1W3\RMyL{L^` m ; &kF6ؓv.`=hIgj"y17yV **ߜ+u 79$P-RHӱߵ zw>lB6 ]mUŇ *ɻLߺP?9YHe⍰q`pY/k}W0]j0X,w2K+g #‚UF*A!yo.wEͻ&z3/,y~.92%իk9/S.ظlc_Qb5OgC(5&Xz/Gb΃NhFM#> Ǖ4PFM~4g2xUx[?C9NIS9!nBeͶVl-`2@poˆ~msq)ZYˑ0u9A >IW>wV *\+J+g|ld+ZBi@{v2gfIwPώr 4ts[P#̉kd?N/SZL_Y9 ~xlqHr;eϡ!gl=dرCz˥W{~}1 T+-bb6EʗE{NʴH,>@3IR%1u$)uZO;_~koo#VuD.R υWPr# 3 H0t|rsєtj'ϿOqG;R?|T913wݶ(B@+YZcp´ 3GM1$@ }}oh88?.ZF+&t֑4a2<{C$t} \`ڬ0e|w^=B 饪`r$ GnL ?T pu^9)pрxœzuQ~UG˝SiͼC83 me8?H 4i]&dn#""1,eiMS#/yqڷ*Z<ɞ!%L :'ǡA^w74d-ٰ[9R`TY}iSZmS鋑9>IbyR(i̠51;Z-xIK.yC+@]~?~. X0qLŮ5UK4"q0cz\UJ/=:E̾lfĔWc޵0x1)͏ljs **m@2fx%Obbڣv%f^ToW@C2WVe2l[^ڨ*i@ڶҰ%E2Bf]W7_:4՞2aOX.(qMH> CZFI*4OIIlT 5JJ9}mU?pC^Ua>ԔYgĄJvAg1b#FT>z([KsAI9=woa -rV/r)'RE^ H մ&yY4+F]]6N@%8O?!+0~}˥wˠ+/?Gi~y|#}1, &z4udN Ju~8-nMDV9 vi0<]X7.b\uUjs4 ZR> /PwD4P G+y֢.fxfݹZKI'ҖvFMZt*oz=$G "帞|韝>y]'ewr j um6mZ*'|UvѸIw{)Vu{zv=N[ Ԡ I8ZRk%"ÆIDs<:& rͭ,_@}0s܀j.Fd,)3GbBB}幎&5Wrx00kh j:g{l31"xLA=[MzokC>*Np亭t MJ{Z4Is㓋B5oʨE r2e]ޱS1Jt-[mʆ# aa]#%zF'LxmFn;V&,o7W_ϟS9'ܚ=837œy8ׅw04K+<,L1dkh 3ș|9 i]!;Bg`]@)E!)l( 8Xib8 V|EDKXx&Q؇RUٹ'V,pH¦@qّh`Ɏ(xZ٥mtDEFH}ޣ 7 Qsۦ[gΜIM9jj7#}ںS9lj2N~"I@ X}hE&x6\|D'ro6GďCҞ#B9ٝᆗ1 umev˓ ¬n dD"suJDYڧ?ye]{~LyC Z}$I4c-&/2upPu#2]EfT{6Zi8)OD:,;0GsݾO· ⵔM^Mu/tU5xVXD|5=RwȾ=kqH%Y3Ҩmz/o7A3  g<}9%zh'?H :DJ@IDATs&mstW D/I'\ed9m(9[[>ˌJEM\䉻Tav`aP-4c _$uԦ>b:,kaſ|q1ZJPKhx>>quU+1j>r Gp$~̓ O 7S ÀCtfm$I pMāxQ("dkX.$4vIr%͗^?Ggj k"CF>)?fN+"V=&gzS#kW:I"*J75"Fy$'?I$6ZAj :?y`[abw7C@):*Jm@j4;ڥB;D^Qmٲ눜M(s!3K F=7'X;vuK'8 w|-1O܊2`0T@KU뀽 \``憢9OiUTP;rJ:OWV^Ԭ8#L 'CF%*9iSAA,D@9+a"E=8>6a L7ʗ#cR~ |?HL̆gm ?//EZJ0FIxhjء˛Y}\;J+(3!#oJPZǹ%,4BRĄU8oa!KX0~suiGR(J|Bn;ǥDv{ETre,rD tVE>qD+5u%r Q}gH ,1Bv2-KMxmqrTcG;x"@{U6*[~ )'rlv u2Vj֧5=b %J5`FFK0[a`F\F!9{ˮCj'Pj`M}ppjRjK6[$%vOn8hl2al+"{O%:tdfmX}B`'ǽ]\gSj̗ GWAHpK5ȣ护ۅLia N9$xChuw4 &KlXxOѽ@>/ 1ZܲzLE#khw2k0IY-rINKaJK q?Fk£"mrcB'өmY¹1)^s7.[1eJsYqd!}"/ѳTE>%x VEi uW^x6ã8Xd:#-W > DЗH&T%gbR]' Ԃ}w~z.K%fe_vuer=MQڿ#iO_|Ij7%աs@5ﯿɓ |Kt,yM@^'w+[e=]#SSFޤu0c; y'b\|~75@'@ "NIwUu1?}UUc,i im/RN 0ЯC֭v"Ncߔ؊lygsڹ} G+5JKMܦ W*UqE0ޜ:1Oʎf|+}:Q3_!Ͻ' )Wjgn`(s3,4#Zq6/gHfVlv\Ğce"ܩhm}D~DGNv rE+ㆠ-r A=;-_ykpiIh ItI7|3CzwQ3ϰ:WGغ<*2,hTV<L0~]nB~O:Cpmض5fHHj D`=-#+sr;;G,ɒǾGTDs־H E: a3((mЮc!B%ҳ>2T t#'?BT|]ѲޙWjR)[FB$"9&(fw((UUBÃL~nt^8}z$*m,g@J鏫剻,G9A L[I/(3a:#P+D ?ȉ IlŇi-#c0#~ fYa> jj;iD9Rtd(BB8kFٱbL/TeͶ@CI`+=5k s0$J:.;7պg1K&&#   Lpf ¬Zx7ǬȖLXW+A}j*=uIJ?`L(kY Yd5J)Th;כ2Jϯ^ s! "0*5J8_J V%-v4_]HA)}dwY)Yy1IhA'WH_)1B,͔F+?Wթ Iϓ*F(&;cW eͶ@CIORUOFA!h@wRwfۮ^r}eD/=Prl  T*Y;4LJxՉH|ҷ7ĦW1dFϲsWiTzagg>}L\1_97\M|8/c 4h ʒ `KQrlЈN3٘oU'Qԯ[Z'Nڋ+iGP݇gV-*۶n(gV"QtFvQs㪭Z|iCAӥm !sTƃY*#> 0HC'20J(:OjhґmUʔP[47nu>|yq$k+ob[τ`P·&|EQB%B`}ƭ>]'4FЃv@[5K.hR`}>J'.Sv<ҡU ?LϬA~z}7U>#|;,Q9fPGD&*TUǀ 0w6~hźI0+W_htA. ,UƃKZrfm$`$`$м%Pb!R#3C@Uz`Ɂ&I%s_ou%뫯 [gCጽhKJ.1&ח~Vc;[y.-->s+K1"M]%s^!öpDmΌԻ0)Oora(,$ϓUa0ӹa ;&T\yrD 9CэEw^kljvTZ600000hT Q{ƟpDqTi'z`_ @u ,!̌K+ۆԵ{?(먈'UEAx.sHHHHJ@Oզq} եEEuHVm*}Y`FkV֍ EϽ6c 'P˝X?/4 C:K9Y*U gXr$IS@3FRs!|Al]HC.v'˲ %GҮd7*V[`Ge=ȚHHHHH ڏۛ,n(=_![~D]RV$3BSR_6߆`Ge@[eȈX"́&'Mq]_kscV?<ҁvua9{ 0Zoڀ%_k*al9ȅL\Wo``D#"c\Tnؾ Sתݺ!V^R? O?nY0 dƱxV|\:Aktͨ9U‚dd$+,N]+Nq忓| մor#3&t*F<;a位̚i$P$'YK SS%@sBm_6Y݁\WH,y`Ăڟ]0؞*1t@)LXX]xky[Eȿ6 P*,#AP>9o TmEsK`e馗K{yPu,.4F\D';UaM 2MYaK鹩I7{mmA8Xv]̃(M:Ͻ6/3f9s3;C"Y$jZaxkkY*cI{ҍmBy]4+fZ8#]:gMy:p\wl%c'ڶ稬ںGrň~ua]O`v;_ #=绢ynk n IolԀ6:P1l3[Q+g|1v0jẔ&Y릨IW|l~qQrLVן)HG>Yy| }F92ҶOg&V 35'|OOqGڋӧ/MVZ]/6n޽ hDE,p=)]kΗJVdeOn.-ehΰ}mmN7Z,׏"pljt0׿X *sWn-s<߹MK7U0zuT>0퐫F @2|,Tٺg\7n̑N[TJNK@ 'msXf-ݢRRzo&>0zFIzTQZĨȫ5aٖ%RGJL5tZ ؊ċ^.ɓ[%X鵻̓]o_--o9w-i]37<7m}nMP*ʳl엿Lz\Rh̢&vO;)~y Nj9K`LJ´imy@7;yރsSõ/Xd*"ҭ::*Lq;, ?whX&<Y~!5֛3<܍INerp"Iv FCsJ"0~߳fi7FM;h8;\k8M!QF p*;SIfjJYٹ2szP%nRne[#VK.iouڧ>((7㹵Syj.9sES*lxE:yzw֥OzKPzܗ%_,Guvn7jyuf?leΙ#6N)Ӧ[Q`zk>p'PMlRB!o*v̢ܶ_E;{=p]5Sxܓ)e$`$`$@7;`Lg|6m[?1y!?GٿN ` 0fZt ʖsf]VGr0Wh>gXNh7VhH>9o8w[⺪6ud9lEsv#u l)KH`T*fݯk[فL(1a[Q.8[o?{&l]W[D.'O˕0#3ppj`lp FJhۆk5tJi-Dĕ)TwœTYw),$P&o ϔmlP ``8 TT4`*E['x`="]㛝'}/&*0'EAaWkZ X9^MAx8g!{Q7sgXq=SeյQ}7iR.h'JlR>J,$#+.%s@ҿ{Gi.CEqtە MF#MHZDʍ SZ*7p(J@t+e%o%xd>[+졢>QqHL4Ë́'G`d|YԦAQ@FK#YL=:ό]nRE<}*$Vǝ ^:mͣTWs 5 ?GIO[~( 0}uW' S5Qރ >A7#FyԦsa ]yfOjNm)sQoR7Ro\_0mb>Vԯ{2rS9` Z!O,s]cȳI6;8HimJTQfb?uF-'H߮&& wXBl0ɖ]e]~i+W +<#[LP/Vup(!s%UVWHӹʺ?`{QUTVv1srFk^)+8?Jl!SX)jmk$3ߔl,;X?5+G_SPhjSl]$zZDа غ7خy.zpalBpobtGSRUpnc{vm_JFz?nR4aÆ9{Gv(UHUZ>4K`R}"6, TL4W&k. ]tT >ܥ4c.&ԣC+G@мڡZfTVN Mo%g "C.y%Ms<ZT'-5, R ~" d": +DB1iL)u HS*7zeDsg̘^[J _$Mc0=@T]*N-Ӻ?$1^d,&6d^yLb>{r#&$ޭs-Xa:[1eMFinx15%ڕDHUC/g]|!%Ԋ=x%qaZfj7ykԌ0#iڼFّ]%7\_ >>Na ,*R_^뺔=NyvA 6\k԰az%oBg:cF(4JA7kGZ@'1~RSSk%ô"bo0B)Q+C B5] 0R'F+8fRoȗ/72 .v9Lyumʴ٥>zL 9y TtD'myP-L뀖Yo-U*ۤO^Z'\JwWhDBM 'd\ $Kblr jkq;jT0`U`#88X)@bPHp9ɡ ȷ6ëXft8J^h,e0<4|G^sG uCH8{-YN掝_aj:?O 2ASÇfDfxQXȱ-4l 9^Z _%|JJ/!87a3dP5Mȡ;ap0%@@Mh]cG~!w䵙)9U F EvTVF5Q>*kSڽ^!Щ*"UJuUú+gA#&fsU%/X!2%4K('*2@ɁI vf!r <|߶*ͩ\ |7=1٢ړEK埳IAק! ݂勍)"%/͗[gDܓC#H&tnD"ĉEz+"ދZ1TϢZc5DdF=ƴhSV11Pq(G|9뒣]$-;Dȹd?uLbxqba1| 7%YD FKL &I~wMQhƞm[@մ-Mi_ +=h~3o6/!ۑ7߽ oG1`?A&&E]n׷P\dR&vx3vdOSG@5fdJ6.9e.|ކ(4e͔KCIe"(NH]ܛ>֊ A&oǰ?A~\^ƅ6d Y$I y+QDȕ&3fxoV?& ̈́^./])2=KZ CGi0JA-G=.//Γ}YU@,F, Xi p4au=N?3uۛ,q.| J{T0DA,N$dSz4IAs0pQ"Pji}5U"iz<ǖ5%5O7iڑv'| (_?X8)5iz{6ͫqX$`QP,vSѾqiCͰft8LxwG?!ٍ;B㛭E>ɞ<p<7eJ,~>f;rO۔>f]-[my;k")\x۶WvςUpk9G2vZn:.+\~ZAm j[Ŋ(Tzbi40\8՟D 16YdKi"GbQ%RdgLtޜ5(cѪUg:K0G6ժE -*m[()+_NMMdUIehG>s|&AQf%e*\`pֽƾɏ/yWz >jpK‚2=gÆ7yi$D9t{Y+ M#ɓAf QQ#Ϙɓ|:1q}v$` @m|6?dI|ȡԢ ~ad(6S\ϕXdHo嗘6Z` viUQVI[wKZ i=k:t2 MQq{q0_;!;XW[#~/?K8o_AeS6- {re󏠒X)Fe/"ϓ@56fi6B?kZ~J$`I@E }[{m!ճn Pі 0XG'`fR-QSD:>Jjϓ  =}|_ycD >5RG^DNQ)8'af4|ns =RKh ~P:UI<@ir*n1[E=k؄ɝ1_d/F7fĽ{o.F8A32}5~ث},mdF5 \Nυ K&-~wΰMw1IaッFve\q5.rg3)29CqɩΕ {TÑw Laf_J*: ebw逽JA;ȶ\2ya܋0ƗcG&>RU; `|;L9tQH}u:!Ҟ>ސM ,(PZ,oFF>sŜ:.FNѡF;z\ <mFgQ?P.i4Vi9#vV E,K@o)#To%K %Y9)X4_bǽ? }Fu4L :7 %{J Wr*j35K|&K tDFMw5Q"PZacOCg./$5K7yN\Wg#YF; <eB{h ۤ" c6cG  CWzdg_1zD\ECQ#(ɹm Q0uHi-?y.O_4h{*qoiyiQHZE9OA`\CnR`{Q5JD'";K>~=5fZ?Kgvuc;>,0ʕ򥜛XRMOls<)SBC}c6"iȢx#p̓C|v6 ?|4>%u(4EOOIy%lCyar0nE ON=>eZDC|s64B{K'Tt"Ї@IDATI)LùQ#SGӰ=KV1gҝm=84: | VfC9d;F;wQ/ӻ~*BizSf\ zuKvC_~ CTx:< ]q{̓GstknWy;o7x_|155>tZE5[>}}!U iy @4}"k,y_5#$hAE kXGmTQ*CexŨ|Vn೗o֍KC@62"U'Ku** K &Hkr*CyT;Ƶ ˠ BdĐ+d4lӥd>An_rr d9C~VNua9nR ;e] Uvm2}CZG*xȂ]Nyhy|ftѯpR`hW4I&&x|lӾѥS^}£Bc`&G. 7/+/;-7mY6A"8/'ʓrDžrntD0t+j O~τԱW褤2̯ ᚅHwN6v4(m>L|Wd$~;oXK7/ڵL\UOPݓ4zLl|]Թ;NhqxGmK7-e6<zѲoaBتKy- <c\{P?Fe# >) `)37_w J|*BodrtjRwPETȉ|.Yn*݉כr)a2qAtnf-r+۫5|nY},jb(j_mvh"`-Dzlk1lMG>3g#RS% =uAhDt:-<)aqݜuJzGr^@BGBCusNMgOiE'.IV Vڽ<8Y\R}|H^Pdp<#q l]İa!E1S+ڦhۆt!#{3lqmsvS*H6,Ԅ7NAQ<@24tǿ&Onpˀ#czgPrcv)9]f9\_j|`26褖H)<H|Q$%7i$Bv[2k I |J+G%PQA&# HN qt8 /;PUE['hQ$pvr LjM 9z"TZתyĆb"j}"9Q#=#OZ8ɡfqx&.wqt}uʃPdڨcYrVN&/yN9|$Sti+;K߮|c&xHڂw/##w*/V5 UG1q_iS[.~5@栟~SXX*>Q$- 8$ybN} h;w mxGs=NnV(cfTbR-4>9UD@>MW&8ak=#yTsvӔi o0Vi^qW,1Dޗ(P׃H"!.V:8зn6iĔɣ'n޼L>k]A4{Gl0»QnxԳXW9Y_ o(u2zb[e&0>u U*Ҧʺ:W@_&jH6 I%mzc'ʃeMz_^$/9C?zD h/,7%Ͱٯ=ߊz07s?9 5;X?)9f/@?4X`5q L2-+1F~ ۬"F FF)fې$(ȕ]9Ny-*{"g8:9$%9rëUR$ox$YEˡ]#+/ 6ӭxbbw_t8-GHT%ũ8P*~>iJ1D}U_6YXt+YLn)͇X sO?5&G\jLZ/^, Ս`_1r&60) $ P7rdbJ3[yLĝO ۃn;vpv[CE^y *'o~"ho,n`?05_l3F~-6hAl O  1A Z"5dPtݍp$K0$D+ 4+d]9Qqɔ$,|(ɿ vIuji,&D *'ʍV()l'x5OmS5dZ$>M( MZ.zZI@i\r.1f(@"1T` )a '9!_ί!i8AmK~IIf|z]^֜~4K7W S~:YiH&_65F6YlӑS/*κRrMMKp=?sM' o:Azq&48ekk-|y:)I?=b"эQeXM[݋%`&/&&$&]`1fTwivQLäwp21jNw©d`) ),L'9 5Jm"L#|ST^Yu@8&q1^ިD%QTohmDT|/4OYw%ZWZ$P@0QW`L]Os k=>eKz_L9 vmPeR"ﳀ@>pCwIFC,UW E g#3[]Rҍ2ZZ4hgs]P_r=vĶרQ%Rg"ME!A!So\q!xE\ގ@1"sv:E42rျO%# erU^]Wg%5rsҳ \ ktH%"!)- y^'GBiL md$ԓp~)e3CmGahHM~7#*^|U, ԙEuS#)y9kM MU-Ėq~oRuO%qϜm،kꄗ-Xqdso.gAr0ZywO|>uSQ~4c#1\>zؽ_*L^4MAtʻ%w<T5dlw?YW鶊LEkVe"envZ6:>yŦ6C֠V3Οr.//Cxf/qbе[>^Sg:(R~g "ܐ۫eܼ\yp<7dWnnq!ևKNxoHgV5YI"?uu0&D]%; HEM?SQֿ%?ٽ[Ͻ㿮Te|@I $1^A>KB\.NSMjN0r 5JJbc6*iUNФ޲KҏebuO~xld-@oS^b<9p/YwYח.۽O[Hw]ۛ*voo@޸g9JIC;@ dzֽ{]|tKhK3QI QvVmWkQh $?1R׶j-Jv.>aX OS6ɻU}[1 Ɏ*C#?.mr9{ZGل$5mEXMR# iHgZ3Y$JXvoٲ愋+AQ(M@4uc|/5ì:t vw#pgzJQ5J ϧU:1*_eNzɚmSIcB8AgWb 76U=c_mBupG mWRX4''}ÒVS^;T ~ilOHN}fƯ@-6,ץR㪍!CP#]fV[>/OοƿfOj99wcΊϝ=jUbKOArǕ7h2%h¯s3doM*.tEdX8㫙uy1Z$W\THqtb0v 0uA.QSD:V&w4ËW xGlen)d""ҫSݹmy7)֦N} _<=.|P~Qż+qٺ'uA~\a;~<A j*&LJy@_b~$^k({aZm\k;/L `QmNKX}ɯiVl-ThwàCsුgJVGB'.,l;^[ʪ.w,ReֺD s@FAnnVsߵ]vHv*|YgU@DmCmpT}zY)+R*| Z2%}b%s H&jcyT^2e_7"HEDFG(yޝW֪ԩ<S]˟@~?ӎoKNR6|W5`f`Qق:߽=κ>$?~&La[:B^8D4~R(x- }|7R 1mkn規vִ#X+댾va_91W ,Dq>Z// 0c!;%I"X#@ V&o今h V'4csVlE]D(.C%v|oJ ڦJYQ*7vm9?.u|Gk%d4Ȱ7hDR5˳np O۟~ݱc%woZK[;O\w]E|FG]wfD)ICoVݾuVH׺4$.<QkibZoʆi-kKjKatCWw\/)Qq;?˒Pb,7Ϣ H​c?}>{t@p y5۪A4p9Iм0 -A5T0| Kj>~ת*[N9NV=\{iҩ]pիS%Y˵LRM4Fiuf}1'/)(U= n} j_ՠVO/|+ \ ?.CkEy RMթ% I˙ڔciS~LsY+ΊsQyO.uƢI@؇=د[{ P158>m>cI:&'и/äJ&^ Fq=%V[𥺝۶vv=!FK_G1t0,k ;Ap֚6ѴoZV/b[.f@u)q1fHyq80*_b][|_:.3~p EDN(0w˻~m_\u(_2yrmǎL8 > gY6oi>Khz% QzPM(5c fxp8>%[a℗11yk0BK=[ 2x0akT8F>1iiy]N[=Dۻa?f9j` .8"hJXiCn$>.wktqB-yꙝ;;<*yhxx=U"uخP &Ϟ}egԦIFq6.ە_{~[U6jͶ,Q Rш7GeY6@ J}|`984&NWaS%Um$f8ڪwhJ}mG|67WFh`dD!}}S'_ shn뢏5µEgD8n^r=M&R'=lԦ]y"d:g}ՔacӐU;|hAoE%%'JB wrfրDXoC+!:u!`( ( g!4Jw(dᵞT`I~wskX 8ffXHHW2 +K UdQ%Dǂg'GȟȐd`M{q98cA΄?"ۯ6krCyGC Gx@i--$P w/ |r >9-|EwF' |TKAVWdba:j$2b$l<$B%&.gcz6h(:lKMcGDFu H5+M+e=b$P =߻My\k8dQ% aDՄeP1fU0PF&'Mؓp,3WE["r}-(Lػtzn%PYWc7a]/G>VCӿGyvxv.]ZvmK8SI:]ڿWS5`_z+(\l5VV͢3XZ;4X"j71KAל=6k(k%.em4FBDTT]=O:o- TB|w ;66艳AH-MAs)&;;] 7?7 ܮRkKIh\|Ųzܟ'ڋϐ}:Kz jQHE5*矪{N;_I2TƑzJ9OK^frI[Ï&J ]kQoq7߹ƸѨAwoBl?$t9nop'fsH<<p3A;mV$ !'`3,Z\矟gONj7`Ko0iФ\Հc,Љ> k+Ζzvy2mRyd|5[vů͔ʍ%+7_n[/بٲP>_U[k٫Ȭs%^ r8kۼy?d_{a{Ꮝ9w7|mT}vBlE䌞rJ7п\yn?U֒Vbռ{s#ռx%1ˏuL{wߙ FۆGJ信סԘWś3Hv{H5"R|lȅ#w׵Uf0`M&nS \_T6~4 %!G=5WʓA<үygNY:k֦ PuF11aWg 5Nk}Ӌ>t$R5߱}*td֒umdElͥ26SY@s9}IM}`4۵|9w^_̸:VulBi!+6Rڥ1iv@l25xr0|b T\t\wj~.^:quKeɚm*oޝ^ՌmT^ҿfCYÕv>r \>e:U?'\ ;g$>İ~ħcSQ+5 6DH_Z%y-i 8ͯmOv!XsZhhm]{?n\h`W9wgq`9߶ʌ), je#*`& Բs,B9՛Ȣ՛3{f.z$-W#zչr&s 䝯~{K-U\N^-pH+:}rWH'SzDԾ` 8y&p3L4 IڧF{e%?`d~~|,/L%s^](vm[ szb4>3+]X.?#E8-s7Cxddz$I kB+yϠ9_7l%]q8|`p/Y0$=tD@k?bٺOrrg*F8?vB#|@r>7vdҨz^%Jlc~~OA3~_ؑff'4.w vju>nwJ7hSY߯gЮ;3W¿n:5KmgV9Nٰc?@"hv;gE> f$n۾4 f:Xa<.R +dYmW@ A蒀xf6? =_,b SB;Jq 8O嘲!jQ"Pj/ 6i,/lP4[féΕ毹d~/j☴)RK^3<P Cг]3ACvl ?^|D;jD!.\y$䙚&n0 `Ȓ@S@΄5!eaf8̇.6i+Vb10&x2i5fJ̋45};!Ẇx\OArVM0cO&KG0NS>XܸS#l]^o9кo4QsyAߩ iG5ԊA4:j~PGr,m` ɿmD! .*&Cix"Hyo Kg A^rJ"]а^pZi$Zy7wъy_z9~(w۞/~HIڱFhFHdd^{ GrZEn]yG[;JpA!dPgmh n$0eʔബOꦞhHax%~*ދG$!|\S^6-!CSS~|04Epc_mgt}ġ9.3(h`v^xb檪֖%+4T|j n^eV˥9Ѯ<; z oFL\3/?[b0T-ھfLؕD0=D3@A'ҡ#Ysu`p=dFA+O"m[6~7_`Β385a??kj;$qؼ)W.6 EM=Zh^…G++ #1=O&xeQi` #PeC``fnŌ\Q j$;w[BЇ0ڼiA؅x@;n{#wu[G#,A%Y"#(ڄËiϒ 8$q݉4>0q5' %|ĽKFkfwH| (L~dT&PX<0_3p}Ǩ~40a\?ia⑓]O|Af;}-mlF,j {)+"''дPa- X ptLξ]K3zvc9p_)֥f,\ Ns%YAiZC?,4 ]T jhF4CSa6G:_gdRYn _g6gi̦ztl- 0O8@Eڦ||h2PB7u ,,@;ۼjL[3{z%:GsʏD Jo<EUѻs;e2I<@;…%fj,RupnʔNgRCh?_NJ|.j3g*"0 \9lذ\xq Ja3BoUcF0#Q2Ö, TO 4㱒o>0 2F :/%J:܈=`T-45Ԏ#  &M4_wdg` 浡 gP Vgͨb T5#X~R]1-֛Λ~r +α#.jo3 l j;jOdvPk̙c_~GzsoAyO]M6tۘ1650uM1+'(VQ7mJh7@KkT&| \Mg@K mS&C0{C¦3`),Q٦.gqҴΫUF"ч xY4%pq|zc"MݸkPeB0q1#m.>32!H =g4u԰ #CfM)6!}zȤ0KI}mK, 4d L>Β^Ȓ@MI߶!FǬ'j0A+W-M|Km&n3RDxJ*S|DXMwohs" ?1^vސ .oKZJ1T~K $!m6u1:`>RxHR0huӓQxN9Rm`aؒ%K ZaxhY2sUB]FOeU+.QZ@IDAT6*U Eh~h3a-*@K ?b9N) iEAZ/|AHh]%0q䋀G6yj%eYkK, edQӱ\Y062(GLy0~"E2R07vy& 4Fī}Q mq@Vn*tݥg4w& 0A $ gU)N؛^ „8kkʒ%&Z.LIbR1QHvO.BWpI[CSfP~^Dۯ9Nu$o|6WZFل䮇(>;ցU\rl!L*i=:}aՐ^A">y^b"e΃Jksvrv.,`GEiMbz- rOzƫyM`:G*,e'V=, X(Yb"l{Ґ+#oRE/oP{9jGI%F',?(# gΫ4r D۪ӧܟ7%MuJX`iڹ?M(ܹ?]%Un \ӊˑˉn}ꓥ} `ٴ$8%|JJ/v1E)}'*lJYnS>՘8ϓ}iȡ]WpKoކ}L1@reFj3Sqa x` Uhm[$Pq ؑC}$,.!{zvlf`!,#nB٦֛%8]Q%=~\vvhB%,3 #yj#ҵ}8=|D>a8,PMM +.YڒwWZID|2,.tB@3~cEPɎ jI^M(A7&y2q9^V? SVaxxu3i7%Z*4w69,{5dz8Nڰ4K':`In$0n6oӨd^MP<09cG&=0~R&e8.9e. O9 (͔H(rDΈ90۾/ %:"T·+ Vho^MȇE;QjYg7o6pdOVܛ]ջ=ZxK 㹪-6<^, X; I~?ІVVo#$H޿C=[_^. B.|2kO$j,*[X*[>YK")S/OUaMFʃ*[Ĉ{s !1LۧGRR1͡LFGm,*@2WęZ ], Ա **ގ}}`QJ-b0i:7w&6:({xXwȟm[pO]P )"jӪH\td]''G3 9Wkbft@V%S>ͧ$`In$.1lL9pKG8WzvnLޢr Ÿ~utOh%K"/:'**"Lf 1M̵۪nt(1]^dr%XkTδ8Pid6YvkQ4KȺ’@J]w&.Aj(y}2 ^U<7LG/?acx&FiY[j|r ETfcnG7VPk- XS 0섷RѴzvj(p9) @z~P_{Q4s+> e2޷dt򧪾;7* +*08R\sЅ0 hu?ބI {U,ɒ{䖲Iػ)l7ͦ8eĉ-*V#! @^f`w; 20=ߛ}s9*ٰrAX5ֈ{e=^ ״_ձJkTKBb}J Ɓ r2!UQ*׋F=  t*v~ `#upjOشu$kܪ刣)!y:\tNO{X!.9a6WR:*`"Z>mS}x?%*pF sAwQfi]2[:bu}N 'oTE Ďo?zZn47;Xzz'x6yL\m N*zF :wA !?+ V(W39W>U$t )u3)!V> ?( KUHbH\tܿ̌AvRW3zb<< >h黭DΥgV^z-::_I%H>9msw \߾w^g?Gw G!Ѫ;>9o| g™*=3A `3@i|B^\^) Ok,=(t,s>~Ի, ͂A AS쑕U.5w7Ҁoo!Js' ʯuntc7C @ڸ/"χO9GݱlbFQrb| Zcr|G"UU|~RUr*)y4Eǂ =&sVdP\L2@?:tNCNDDna`f^nTWkL7 ו\.\ EJ_A6GrzL4 dVxM|+35rWx`LͼdC2-y0ؓ c"%&:\`^hRZQ'ݮ{ۏJ\L,3Qn7IBG|˿ /mC'&21>62 <$ķ l6-I6(&jX??RF3{e%׏?_mhh?`\B`7̷t^Dyʝɹ,| &n&Hw&̼kq!O^` YВy8::g][W/ ζ#NVZ2J5O;wԆ)1`Ebdklj3e @8| 5PcÇ6_fd9k W7P߁dm.W9# ʖ]bߞWh驰ù9g18s<97!@HvK%Ɍ P6C2OEƤgd5arǑ|ܿt?ٙ?W);oAIqX3e:| @Ȃc9GW7O/,[o~z7jM<vߺ)q+e&OVx/Qa 9]m3`d_GΫ&E8v]"=&UL /3R'䳏,Sxy;)VM0N;)6SW)_W(b1_#A̩oE!!A2y{8PHI߿ ,l݀@=zk~{XWY]+m/=UbO,[YuP|5$u="/۫1H(<ًW23:ާ.&՛b / l ݟ&dkD> yzjb6⇃Rw>r|(i'pR:瑭qX#ܳgYe{޽LOo6:Dۚ'U ch3c]!!a?djg/5Ǐj8ifA?>HǿowrvgRU2| IE/ZKX@D% -*¶yiz0g#jHSQU+3'RKe8w?!l=D{NʐRTV)g.D]'dˁrӴ49~mys':μr" [q2nxZH ~.ZIs|a$b"1*܈g7^]Y93W;J!n)b (U飅 dԩ b?<%{u\0ΈAsX{xxV|c~#Z)xx< Qu|G+O3\ɼEeKȒY2{(Mӈ52'?H&M11&No*Vn.:D2tv,3ALFube\p^VLrt,󦌑:*^@i4f<e1*k6QKGAko%{[zޕV0ns?tcoۡ3RW_/daC!ao],& F)_-JmU5N F ~C|Xyh ׇ+K(",DeԐDo˾Yj\P7;ݑHZUq㨡EqrZH(AI8;+htsb0NnΩ bٜΤ1e1F6[X;wIˌ oG3tnT|&ijJ܇wm5(!su}pG5KϒY"bb61JwB^/aAn\I]1G>aϡ|VM.6 r/,+ KkGWi>JЅt5-39W*y#̈́5r42H^asQvt9`2إ8PS$7b* NGx~~ E<-vY&Cb>_ZG*0 rKme}a|NY"E1͟e,$suGe#.;ۢ*|XN=8(ܫEf90 ~Kt2SxqƺwҽO=T^xo7Y2C0jo ea .vmdo[̇9AH)N*A4~^ !Iבx~M_Ajfe*>fb>dkD.w/F)Zy]?7>Yi??t>#ڪQQySu{N2uN}Y-6\Mo ˅GLv912zUU-f1.ᡞ%GE<@4~!!!LTU)|Pr 0ؙGH~RAL{#G%GqZa"!혌#0FI$%s[{ w-./<6}qt|!t7?"OqMtr noZȊ LJQzka c|@9GA@30b`l5,9).%t?Y`ݩ꿝`mR*?80ERBjVBp#c"~콁KoɗYu>Aa 𹌉 o  Y7ՍkEɍ][qTf8uGZ+JeaR+J\4o>BQZA(f}(T$_|+EPLi独 A110 w?8UG6' :o>xVJUgJNrToe?|7}I:o(y8UƧFյj}hFM kES%MNRwvxm":-Y%؏#.k$:#b:0C<+jk4TIXToΎo8A  {eIⴐ; gO8}Eo;''^,ߒg](((jPP2X|Dv1!Tk կ֫{?l?}Z+K̛֨:b!&2BEhd{g; !_/5(K?@ .|fĨKU6@$uؑ%hbr).|KfOjn@1LLp %֙7@k+zxZ qeI[czR|QMI7 Kl_f"C8ii O: NPx2ŁtqRE0Rqi5]WV1ܮ>LB9-εdVjIArp;y4rGĩ0\D\B+r)"Qn{ʇ>/O=TYv/axL3 c۬L׶60+BSrq߳[ uEɕ|霉 :ޯ9A}J_tрQl3=|FY%k7bBuL8K.~d< O{WVn$ԣCwC ZQx39W2,ai9az} f}w͖çsZq3Oܣhc|26Kf(xgs=9@k9,xnrۂ2LF J_ٰ^${2zhK(_T7Q{kWDPNG\e ̸(̉4.q䒍# ш_Q#ace]azr-IaN(Ew.lyOo%):2BKep\rFi w{ugN|iMbQcݝrz_PDkfyΝ9ӓŰuRN#{Nd)Qre;z {ߤN)B#(HTܿ2o/IESPhIIQ>c]ڰ*~좰Ú=s!*Oܡ(œ.teIc.^}y>| ]݁x/=7n!n|qT[ms^Ơ( dId`P+R=A(0[v|3{kAN |JYI lNp-;o7Ӳf , 8qI~Wqr8m> 3˻強;1P-bnF-VO>AG:<>_wZI4ݩKcU٪]4RSfO׸cʚm:qbomnӘ9e *֢KEG9H]̹k ʡRPܬ8W/]1^\U˜{NMs`9W4-d#I%ed;!i]lNB'.zt/,UAM3k[0@0f{ ~st-R NmFlzcpL|K7a#G` ӶmbSL;m _sʒ'.rZBCGO߯**.,'R=u?2!=SV̛(KgWkn\5Տm% 0MA):wtrKtu_s*ߕخE#ŴXQr(6rb$,ntO#eh3/^qÇ?=])AIW_}|>[Zeh%LD 2mr{ Gg ByZ%͝D3v9ʦv't< kYtӊz~D\z ZQAqޖSIEIKGt˯3s"ijݝU /"b{_nnv`sUȊ9`łs1rA*cwuRrWҐ_, U䌢\Fvvig\Pi%egD ZÒ;źqT,ce Ujr,hB ;ոwA61|1ZzR_]F?g3*I$p)=bjq2Qw8B|s6wFi |wwL;u'O񐰧f9@n4j}kv\'ץ֌BrnʆpۂxDk}rR}S1bV9H't3YܕDE=t};_QIJ< ;dPM l tjRg_AՉItZ۫@%x@3e#>JvG o 59־o8d .EĻfJӾa9iݿv;w~/3 i 5uw~VGv=5%T[l 럾~!ѳJoePq'DwLG9 $X$ \I76Ix#++\-\s]>g[h[8=])eRÊi慨<!;O"RQ "#!@x|T|X<%Ľ^!`i{ȸ޴SΞ>םʛc @]eŵ'mOQL* JAgǷU^GK(9WG&mz.2SzrW@&Ǝ֭/ۛX/(QQB A0X>ZtFznK3G"`ک+=[6gwQCƬ7iqYԫ%n`+-"k38,{>XeS%@h׻zF;h&F׻vxGI1"H|N{k e9,MI'.VU䗓?#2=CQS9F#{ W.p._<Ҵ޹劣zWcY&LpKvyhظJ:&5yv9+1Pg߂Ķon>(O=LQutoW+g/^||(+@1wIxVA}HokR`v2?!Z{Od!S^OR:sYJIUM,:ē36u-[sT RL(6C@1YNe竁%jocRecW}H_Z#?_),S^v9x}JY''28j$|uA\X],;Ծ)>t9~ζc*(Hv]rj4%]ZQ#O޿HcDB *Y&s&?SJ˫ 7sH}H;x`/ \'g΍9j$9tiִ2-Ó,ƹ$ &/CA1\[, t#Q9r1E4!0(8ʿ&~(5o#GUG2/}Ω QF}$#sI,NPye#6@3ڽEdX_t\ J+ddž)Ys<#ajqF1w 9PMO.SAMR~cejtHf&;]ҝPmB;T@1Z`BJڈY43]Fn FZ[B:բҶ65x+i`z$L;l3ʒeFI̠tIKѕs-28%'7KcԛѺ02I,\RZx8ycyyw(ַ>c"I-gAMqmܓV"Xa!96 .v e.IpohK0VӍU!٣UʈA 99&5J#:}ia 氮WYz4,e=&sa=>W/+ s{M #e2ցoF;^u`PX;7bnW9(LZni "yqnG?z{]ٿPʿ~6鬕˂i.`t5$A e^,yRJv9wA"+m;tUvjQɠBFRvڗyq9פR $Zjo.AAȠ)P;)hxPggkR*G h2MZK FHʉpУځA"`͆$+whkD8;P8z-}#䑕zV=<]҉ܫE2W ?xн#$T{g)(i~$Ssw̨&̉+'g'qЬi[r<:<\Q<5n%4HlT8k![hE?o4 aIaǴSS%49%3FH&Y [+a,ҽP њ\ m FŲý.^>yy$?{q:c8&x_5ۈ`kTn348@b#BGgaʹAXfR ř~ p{Ὕ@=Kf1T~ϝ(xsz>="{} &e+㝌 AEWo+E '9/jb(G;ԖBי{UQjj#$jUoo-芷d4mrc|-򴜃?V lx2#rb~hR2Y}EL;ۥ:&;Vu G)S(yBZ8U8%$u2ɂ`p(p2u0T5LCOޮ߹x3ڃ`dقVS),,[1A @_19m#7NJ$GQYgi-#,Bj͖;P$#܇M.`;Բn9|z ?A, NOS-d7y3&=zw9(LuLItI=?RNLKo" R~5?o:)7<҃Tg>H&JHXp~L5ƵZ(jEu=$(\߶@e>o/4(@ń(rR0kC 1.Y5UJ?^ЊRW.!4ġc(fn0t_Av%3n)V-F+eXRb 3]}}ٽ2e OkE? bZ_ʵ亯,3em`G"dƨa753R7R lP(PBC+Ic+o ma `05Oqu:jV-ZԢg.X~.w_Rӽ) ݛ&>:1}Gksj{5mz3m Xmf> &IqRT^#'ns1Vf!fQQ:/Ia6I{_X&(Jĕg8}876#da^"NqцC F֮Gq=w"׻}CNto== $BΩ1sA aYmkZk7$$Du"R'iCb%4L.vPzBfS \e;r4ڝǑx/9&ɭ&*E/&/*D[f"Y:'JX@9:_kIbw} %&ʲ1P ъ*eNd0 L0Ⲫ~rE2z7%$qNf_qGX(K-zN:$QQ C^0Y,Q{ͦ:S5Єx@5xN.-JT2]ȆڪDp/dڷN=Wj%J8$¨ĠD$f&Lgq!#]7Դ7(ڽ)n7Hu%i1?+y~_/. W7CU/f++ݖo:R~A8ft0#K~rNd'\e)*I$:C$( Pr]U@̺\fDiĤ%$ܬ QsU͢0{أ j22ºTWW˓MSEW*@P?!T ";ZӪt#:EW}=7pW%@IDAT66b;g}}l=)dH(N$2pn^ƨ051^iEYrt):@Z|oޯ@^upŷICRZbF  ;ѸRD:U"H;1n︝8 4tǻoQY=uw\o*uP<-QrQiH&'I rD Ҳ-u#&}6ϐ(gȬ$8-v*K GkW*A'x\⫠hD~P2)冽Жd1< ,%ɹ +YyZXB8um#+բr rڸVy%[PR!5d)LG#>/q>`8%%}ı`8i]-)4,5^ƥV68 \՛f>@Ɖ|П3idMǩɼ`<SПg]!='(j,=Z)ʪ#ts/ej]V#0qy"#")۲VGP1Ns\QC28S7%*T\8+g*EipBtJUT*ζ2(VPΏ_'_yҼ׺Vޛ «1sD@L?R!'F >vxJ]X:ǩ,E{»;e&hgO-qwLE֊˿ʁSBeKJɢ+IieK*qvTf')΃Ҕt!yP&Z ê`XE[i#݋+ei2\ﲡDZVMYr!P>Rn܎(]S#fz칞Kh~4Q.@,8CO]G m\B?4)^8&5Pލ?,ZV@ZNGwm?W[bԽ\-LT45GL?|*2aUjP%-W_ZaBVPNulfQA Q֦`oW0tLN@~N\.l=ʒvE O@EH nʆ>brշ%}bW7Uw(:ĭ&I<& ]/8 !jtpQeR ›2& %Z0@w2/=uYK<'gM*;O(IT(29G&a$?GsjRrhQb{UPpuRс}r~hohAc=7s;aL';v5A@c瘻N)+> ua[6?j`WX$ }*>+$znxKY⛚pPQha[~}=tw;n l+sBI)IxiD@%N( 1RP[M;Z._8w'7GW]/ 3]GK DK#@b#B:-w._,dQ2w]G5h뾆;P@g%(qHPq*N+rwl Ϩ}*)b0^*cɰFw9w_8# 8!BJbd8U@JP!Pa8wYn&5xSYsU !%!F BL(+@:r5=x}.um-7Ntz߈@[xNo,{kb¶F E_˗ŴSoi,i@̷Mw^__\p[>z)c*w)^..*3`:PAb'KOܗ]ˆs=MiGd#CVqղ mPUySA͉\v9t¡Ad3d=C䦩{)_=9[D^e;bKg+f瑳VH/TZr=HS&wPBiv纯^NRKayRl-{z*z)H*rB<7gj}YrP1PH1WX⣞Շ' Gfp_BoGiSRXk7'ma)Ɓ{w9w{yCtcDCB mP!rbʫL8 lUu=_S]]Q]8T¬'7"^@Y@n]}dПn95 Abݒby52Fzp0MSi"\a9],|PTdתTdlO|cA5;\&qmK~o9yQ.eN}~UP^xwzh/n}~,]ūH`E!$;)s0cG IPXYin]J@J2g( +b@vL;,Th[%*HTto_)RVX8~ڬѣO=(,"".8(<0P6m6k]}(SÞTH@Ei.x7A%5 t©[ UG:b-Ja39Wd b h凯PTyL8F:@R7'*@t@'T{TU ף39bHWժs8O@!J%r#Yp;@3̝v0i[BY"bǬY 鳄}K~F]\ OʙߗݺpQ nӃ)LjTs!<2($!S*ɑA_YrCλ^*n;VzAms!Þ?RW/&+\9>1 J;&:on><sҀ%HmOtN=, y׭zo^PY"X?g%RQk1'` SMD칝_!)1Mʑ>& hH)c_HJuB莮m~[\f&|JbMq$i U{uYj.Ċ'@1z~ pAH)8a1!RX %Ւr7L%w5xN=󾤼Fa<*ɉqO^sDwک_=N{􅲤;윻.λb\?`ZY2J@@)Ec%>Z!WsSb .8NY|pˋBF)ᔎ!LeLNtca"J &#w PEWȶÙLdb2ب9sl;hrqij Ĝ#]Uj)X/Ej K/1baq(Q)RVpL2~Kj/bx9P>K~90F.zYO%u#rfw0g*fIxT /7Okv=% DFb%&W C!V|O';LK5]+m`13ScBYy)g'޵CO剝wZ\%^X{b8h qqvU3$9q[(Zh.„KdDn ]DGW#* ٜh%hOVr;=v|s)G[|$j};v t([ ;_,ϗ$6y:\5?dL$j)]]o}]]"b˜UJ~,Ued'f?TKywbϜ{IaH,Y+urdE`AH=L 'GɈ8+%H0utim},隳n]$iEIO@qCtS&4zE.zԜn2P52o_AdIBAT*-pA}easUZ'cP>G" l6klU9p*[-OO)cYܛP1RrGrTan[';~-31GYjak-M_UZd/IйPue׎pkR'WVu*9 N}vobg>JRxD8G#\8bP 9ӢLq|H[0d8*+<+eĸᵅ\L;#w0]__+K9k*G#e !ĔMs'KqvG͇,?P<CI"f d/Ň uEVDBCpY)![?s~;&Q r6Ib(C4ErDlSY*չݗz1XF&J4$UwgsߒL;ٽ3guh_Ptuz -? Sy_9$VvspMS<ɋkwK$:)_z ڦQ*V`j->$Szu}m+7Fqk! 6Z9:"TQQr䜣+V",Rj z=Ccu b@@F6`Ps~2@($.6Fbb%*:J)fNiíGZd5B>*b@7OMkq#jm3d3KS F Kněnѳ3?-Yrˣ]{RU%q)@M"{ho_ )D+bpH05 a-kfBDѬ|C 8 Ę X"r#Pb0OJ| ^ڈigکXƞ,2bh j=%7M\񎜹m%{oCը[]ܻ{[,~ yrlvF/|Q&/iDr_Wz8Gѩ%m`wtP g)TmSQ"6TT\hQRnxN!vPyǴA4sz{,6¦|@hMjU[tl7h(uZ%{+JV/`G&1G<ʒP9$闐pQZ8Z*9Xrǘ5ۉ!`iXv9FQz]SA `0b_-Òe*KǡTrGbX`vUɨT"aq F)]ˆi]"$vv쉥!`iXꦦ12}zsQz]SA `0vz;[V -J ypXp zyb&ҲDlh]b%*TB9d,JՔM;mYN[[-y A Gh&1`ܒRvG`M(VEx.*LAP4&%V#i7b۞NoDe16g0 @#GK'$tЮwHʒs фz}߸VN[pAsN[b m 0(.Zdvڈ!0NGReū F[:&^<)z ;]pQ\$_m?#Kո KôSG7My#`؜ p̞ 斃7lk.!cRK֛ 0~ѷdh=o#q")/8m f|\]ZYk:owv~5VJ=r/JC/;47o޴Ӂr}:{h n_ɲ|D,X;ʬ${W#;2x uj)#;B=𪌜 9jh6 A ))$M9. pMSs,>r̙4Jr T\#dx k4DBg]!='p+uK%;(GkY˹?M24W1*PeҒȐqw!ȽN. )*OIDH()SF|Jm[Mu>[?ebGb‹;-WsdU>.q"Rtidl .츇5ERWS Hpk `0  `%lbh@TD6m"zljvӁ I"TN]ȗZ\P"CecRcӶ?1e˅CV~,eG5ޕa ޟJmE:|ԏ#V9K,SW?)Nj/)E&}WJց_I}*cf^NlU_Cnz,8)OC?Hwf#n:ﵽkN7F)Da)j?c2eOdÿ"bGJ\TX$ ׊,ɻi{rVOzkSR'T\*W2߂e'C+/8!ե7 j(لMSLwK6(Q﫤ZIaD^ٯG Ŧ L)K&[]\{ٝ'CT]k>P /m e)e\3(nםteƙs#A `0tQ:g[ꋗt\U^C× bZ]ohz\ʫЎvwk[Eq~,7I2lM9Kt*JΕ. te(a<=NG7@ֲ%EY>YR{Tʹh]8[M,sDƏtق17*GwVn0 o]1q z\Oh&Rb%",'Lgldbyu)pOyCe9kA $$<~i`҃8|!KLDV CR!{ڠ܂y]swJX3s*։ny$(cF|RC)4*IY6-9CcTcVRun'ē_? 1 A=Nf/?C1G0H$- y侥}NG9j g''R`Г=j)K'_IQŌRQx/f -C1C-/btU9L};rdoȲO1RBHq4ve%ׂUgȔ?Q{rzwoدđx3#p[v6sHf(eNwO~M RFeLV mVfzw7*JG PfPZO;#)(`sޯj A?!гNB\KA@[*1d'144S Oj.Xdء _1Ne~9aWlv߈u1č6vCOW_^9A `0Q9WUr0:$}- ň0(V[4H()~6" c ęxD\˝6T*h;`%noDljl|eT8V:'R_Z*z~I8OmyXܼ(/vvSRo7460Kuf7A `0nW Jmk;!!!)upѵRV\& Q<wZcH(6:Y';Wuw;ۺ_< VsSfL[+/S܀܇1JthL!ѸW_evk}=\guY6 ?"`%kΖ땵ubDu;|J(:(#:* R[7H*B+,r.i.*zH.̣Dzpޑ!LSe\`RxW`Xfݯ)#(@^ˮ'%%8K%/%t,v BQ"=8YH #"$)6\ݣh/>|>kk  A``#`}}<2Oɬ|Y0ulH.3^#V(J HڨP4Hi]PO D (4ivC(1g%2A26)Vb$6;ߞiۻ_cpC$zT4'IKt$Os1N%e`(ICmX2Z$.wK2\u?;"ɸ,eU5~ڝy߉q2el؝1i\M;FNFYjګ獧`3c,oY+VSOK %*Gnz$M0YAlЧ hB 0Xs"%!:\rBAE)>.NL{MG鬌vk(*(DlBZeD!-OMר݋*)2@GǛP)R1/pEIu9J='vh} ܯ$"= :}gӗȹ썅_|Oe_fd>ހ}H|GJ-22Wf9fxPIn4AR'[b2nx]^BnU)キ!w`[f8A{L;xzn%O#j 莗 ˶4r\m' ;uD) %KS盄 TPԁGBK]V  ]E.Ntv+:xi/WA<6wyg-.Ϫ~n%iNm ywQd){s+ =sTK3t5j\~&yhcr"ɚL;m'eɓhf/T޹=]V-OX0qyg"#"Ʀ&,gɨT"aS.'t#>VP*$9:,JIJ7W\T' ^l\c29w矫@xV¤f`̒O Gnގ2,KnFOW+AE򁳫eȕ&)}uzNS3dV[4J%AϫvTeؙ#=GD?5۳dBR2G{tΥBӊS!;W=V5jG/o0yR}$b&D-вDlh]b3J*H!`-+2~uᔔ!Ieo,x~ѢDEЙSoھ0S?k,K޸]<w2벬PV^\ŒT(צwɛE$)<>F<@[픊ҼH^Z[~*eyz)戶hN?< ʢ־f]WU$/K~Cv [6Tf^D.__;5m}ޔRM*l:MHO8R&|G_//^8AsO/Q"=8YHhVڱ{wilV5;pp |rLcq#n.o5);NS]J3Ginh5QZk\4@]4-lzҴ.bFY[heI[m4=?}w/jYN=&"cGk+RG^Kެd7onSYvubʫL8 ,Udٱokk+p8ʦ9]-KFY "|HPElN^Pw̡gw=pPE^jGNO]Pw1}o=diQly' eBp:)4ZRӰ7l{OM1b #c"#bC\1x#e>Sgs?m6k]]MmemUegϟ:dRQyDʼn(U|MEe)r!7Fz@p3R._#BK%N-6zp#=G`8XWvs(U FY#j]DEI["E:@deTâ@#ɟ\ys>|\7ZѢDei WqF|h*TtM%e%oKI2gP[뤮 _)зI̴R!ly2j`gK]mS$M ~%"v..uURIػ8b[=Ǚ/đM; FY  ;i ;b`njF2a-O&#!76]Vvi(Qi2V%6\J+k$fӄ9-Ueʸd~q{_c)xU)B ƽe˷RPɵI0P_IȠa doKI YwIRV1<ִSO2! M1C6T87E,)lZa20'Y={RQQkA>oTQYbEnik;Ldە5趆ZdgJvIXpo[+2S_Roɞ%#v[[*/EޔO|M*i%r Sxu<˪j?{qWՅHѻ+6Kl؉Oq`WPL56٦7 BPﺾoOs: ItEwқgsߙo3&+Fv!uTE(q2WX`%iY -㚌V5]@O`e@qFI:Cbly(TGY,$g3F18uШ& M&Ug!bc2Q5!%W&wyJZLUy._6SqRxd_괹wH#O Av`/=uI̥ qD& $xr|Ai/6M geoI:K2P?9KyI"0&u6zs"h ʠZo;[<`mE=K2לn]# P9d | 'b'96QBYyI zpSZ?S,#2` {B 'CP͗\^灭 u蜱,;D/)Pjѹ8ijuƿkdmE{`5͂xNp; XKȹ? ;23h %07աwUvEQ] Ek޷? }| ~98jɛ}DŽ`Cmw vzdӠMw"u]I&H\k3W@Ew ;  Nb{`L 0&Є9bM6|jOsVtȩp /AZ, p Ə.ibwL%GsD . I`<*ݒp4X 2&h@Avҳ-55T3 dԔә+p:C4ح0Dn(.&`L 0@H{g$zGt=oGi[J}TtZJ񓯇sF/ i,|9ǭ0=FkxDICc ŵ$ʆO|yC0[`{Η3h?,_`LKsܽb^ UwA_hw^5l4|oڳ '^-^Qi. $C.z Ow97jw\(Tw@Miq QpMէ<ɂ%`L t'B~ 6A_(^% w= nP$w<K3R9(jh pG 1)J_$(P'^} c wKxj1 R ĝ?Ymk @2AnOfp"?>XWt98 *:Α#8JH=Pj9n@\89iZ?mCox"Aڔ GVdᱧnIZsC/~ SCb+T0UV3~^wsF a^b:3A瀎t8  d{v 'v:VQQ<"%H|/&`!D@kAˮE}à KIԤ[i!|PsR UǞ rC C =ao>~*POe:aZy*v@CQNtzEC푽Wc&@;OCC1ֳwGn[M'V !y/!}PY_ QP֘\S>9?P(%S-8#&=z0|%dlyNS)eMXoo k 8_$v>L]t5ldEt}~ʣN="uD< 8`L 0'@Ц@xjO8/ 5'1cPDp~qw ;h@Z 4,MCXR_0UJ:#Hz:τ-@b*zXuX`Kr2s#dP_Bi yus] $__ZJs'i5nc i<=bSR 0&@k4Γ0-.l6{t7 ;XԐ [[) >Wod̾QZVNYT+WYQvd_B'oIp={UdNRMAhB1ƖvP)DU Y[k62Z< vk @IDATGTМ% {kp( ; ?0]u2EڈgGx ѭV6s͂>b8Z'Vz|y2&~CG]9э;nz KIu k!x5h!unC=Nb[|}lQƓDouט=FF8Jg<57kE&5^pૅpz""'D:[!@Z{5KSO}8;ro-NrSaL+?\csohͫ%n|Q36~t@ȉm"+~aqjUYdac^nu%j7׊<^r̟ހ\+9ZiǗ7E(B Ish$r>7Fuk<Rl@/Qg@ `ngcy,yJ1&rdMҪ"I :ئ 9K [L;v58In5$Pjth~n)f䞆!T(iOm3uŘxsh;)ZvJsHI.~ƒ8Oj*/##q鄢PrOClP"n/\K/?K|9`L 0` ؓX"d0k % ( aCPԐ+q+'@xzCY0 fͶH tS0toMܪDݏ۩/kŒ/ir^L 0&F^-& 5 t؉'d4h4B\zp Xo,L44RExAЧGʕgPԱY>i>(z;u@}$Tw,L>`TYExnmvT,Q'`L 0pulgzbxuP$A8n&S$FpUW-nhs9J4t,r 0$ʓ_jjSf^HQ>v\۶I;ͳe֣9LħoKshYNGITgX1PΎ 0N{=B4͚lj5(z'zU$EFFl6Q z0C)>90,RZ)ᶡp'ẃz 鮃Bē'ڐ}I+jqf}-޶UکJ ˄}jai%rN^șz P;Z<,fIbT%?TqtǣsIi*v)FJKNWF|-uٮdHnh5DLՃoCjECasbP 3DEFPmhY&bpm(NQE Bj08ZvruVt`҇! ݌ Q$ė<oo9ս_Spm[bۥvZj﯊ 4XG`f9V]pQIoV4>hy ?`g# ͘1']_?vOT+k$X:t%lv0&9F*.O=sbw6lXBC$ s~%v: Ol@f\6;'r@@bCCgCǴZ36޼LvO-Б@.E0HQ֡'bGLi+"BtE^^a(U njA_Y:LHY\mˡyS+1޴Pl{wEp v2C|`j~bL u5w>y)ZSOَr%(M*ѫ|įo?8Vt‘%A턖/>(I=s EFJSigCCCdveB䐝'E@&ZH"P$\/!E%zG%u˪;T3OI9`QJpOm#ba2RyqxVC+,Z7hC!bP9K&"@=Oy˽ahtRכGeioRNȹoaQ Sؾi7oyomi z?V.qލŒ?XBOlh;̺Th-VD\RDCV%A 0puwt"Xmejed {_ܮy4jTԁxVCF7`jTq8*7/_ $GNc + 8~jF" q7B]|N^7+Ff[{+:cﮗ[|rMjBP_%$"U:Z5 R GUHlGzKDOERTd]"g$8=E:/[7,<mB0eImT^*fR;D;[]]!iQQ-~ٳX VΔ 0vLvk$Y-T-J̧K_8CRS9Z@`u:r{x"Püyղ-qqcN?\7ADQ:­8 (YɃӪP-K 6KB0a9ʌ bg'Q7~G%bC%ZCD% $jmrzEGoϖ{MMwQ/#, 7yVCƹ^J4?ng}cŢ/X[Ι 0׵đEwI4G޵ HMF-$=/>}fοCM"9;uGڕ~[~PPZ2B/0QT81yKHBWB8pX}X@<(8E\$(*I4qQ&zG/aɗ{SV`jhX0܌?ˏ1~/ڪ`4m:]WiY釾Xg$bX,-@ޛ^poƺ &5a"J~C-&cCQղqg>~ǫ ꭌ9@:gPJaoCJCzBH k(}D9x B,^lF8|Ǽ- Dn9s7_ofrN]][`K(H>=ɿŶm[%$TLW tM޲,JF]%P<h+ժ=$MOr{Q*Jo^l (X 7|Aꃄ,W!ڟ;/vuZJhbGR7*b#0W0jIچw#zc[.*-uC7lJEeB "SWL?iMZ|iڀxѠ ͶQ,jD.:}2>eVc|V˦Aj[3?l_bm]]Mr 6&V~g8U,EDG_.qlWf GJޓ0.[k[k,J%|<;P[[%/aI<4-<"*hߕ) `"{nL2|=$>! #L)il(YR(mheZ9gLC n iuF<!-SS3*SmMiьw/ۨ%匙hgQkj.laB`L4Џ !v88dYȽjI߻knx^<6 kE^"{!v^iҨ~Iѐ;ȢDB)JX<5b*rmDax w6J~ԙiglTM>,|3dLU'i0Rtڑ _ҔDI ў6:%,+puv*ڧF]Ky0& y&#y(qu5HrNy=Ys.xE]!C(z-QB0,·w6J+Q|X,#gTe|yf:`:KT(Y!$Aب&um-iWi^X,y3`LG:SgGH|I٧XXSGKt :,;s5 J%v6\%0q"&@ȸrpg`S+>dԡ <=Ԃ1 "zU6tn@go۩OLfb)xK ,C.y]?>@ev ! &n-goS)IBaE@Dl_,o5n+ybΕ 0&@pCT\&aEh3}!#{oB io_Ƨqpu,[f.9~^{HH RGbC,4`L 0&%nL t&q o~¢t 8Efq/ blb`sipX {(P29yTT)c;^TMuNzҮxzO@䀢 c.Uʰ_qEg +<>0CI&8 {enu֕taI8`L %?嬙<ސ6E8pPpt9nK!.>LUyjR~u1". lU a)EP¢Sy3aR{d @Oiz*OSGn9WW{z8*Q=G΃0r?yTM }!ȗgL 0&XWΕ 0$pj8?j RF܂&B~7h9,j<5f'L2M8 ? Zs],z@yN3ڻi MuH7IKn ݇%1+<tfR 1p<^/Ipt (Byxq`L 0!b)d ZME>G$S.^ yuSe?[בe(.i<NGA(ѥ}.SX+'\wE@L0 [8q$gL 0&DŒrL tͳ/َ.}pZm5[`fn80$E.W,c볈mժeS~lS=lD+g^Ľx`L {k?; 0#P[ACdrúC OAҍbIa8Os@FEǻ- ƨ^ u #E'E75 =T˔95th5Wpd 0&, ,g@ /t˞ υ-Bڔa ^y:x8_ЉUNʢg!D֋%<rc0t,yj(9RǶHGC s۽u$1M9w"7MC@? KrF+lH4ZWǝym6 xtQzkr118%Z`L 0&X,˛@hnN "O{ÙB!'EqP*m8k[a,yp 0&@@0&`L 0&RX,Tuqa@%@cpc:cv3V2 9`L KRS\N&y :QgS ϝd6Ũ|3U߉ 0&B8.6dȬr'އkVb|`L 6# [Hɪ]+[HͼfFg@M%s`L 0&BYi&NՁ7d4~.w}8:l?mnY0&X,uc!CjghqmTb2Ƃ?{"k ^ D90&`g!b,4`~' ,ԑoZba-oN+z߼3ހ?*Ul]l->~#Dsr`L 0&Z"b%2| 0@ KueeEG{JN曨Sϡ۳m󢺺Rh"lYB`L b {2ոEF| 0& 1 :ͫm|mf-Ls;Eq;~x=[6Ӵmf܈/YS,)R4(R`L 0 X)x.`J,HԉFڕ~8l!&J >jgK~ߗ1y-ndY=q%bF.$a T\ &KH.$uYL xK@%ղQfgwۦݪLPrmA'ărKL?]U(jX[wP7KPAU0. `L ti:o^Ѯ:oļ%_Ciw[3i\0"X Z%L?v`S YUhYT=j>5+כLB U"hX$A;_~C4:Mrr gpدIJU$%Pzy[Θ!@UH]6k}]5CNԞtvƬ~\v MnX(wz(b,L xL4DKd *W =vHiQ X.PeQC=jow-Si&,hz{NiYH`"TQ!xbqHp;Uܫ3I@ ʑZbI$:ob/nVAE*f}^xS)oϙs}8@ *Imov0&9^q; `{Z,eU7P^{~֦o@"n]]DYڇ[:>jy=`Mq_}%|^bEE#EbYH4U~;dE#X^#@͟5V_P#iT(2 :K2-􂢫Egp^9ѩ/r1̝nhFl&>S=uA2wvJC C\`vvDo^xkƜ.Z0gf? y3&Щ + 3zMZ"D臓::Jx4hb %Z'J$H`ô7:O.oFl+~ -9ɺ ~;j/ęKKr7BRm~7# .OM*L-'*OXL3xhM2-$貹U, FGTK{r;uc˨& Mwa>#V!F-T t]B@RV[1nE)t4~b#{5ɿ鳊ς=7q"^čĥJ"#]kH_ǟZl@!e%+m*QG <(w5ehOGldY%DH)N֢\גFzps^DzDN*(Z&>}';_:*$.?ʧA>+)ǻ e0=l3:P96օsj'%QcG?z&mq` @}f>FYR @ܯ{ #&P<AoߊPJ5*.~;;Ag*FόsmG~|I# 46m2:nyh Av1RdΒH0J<ʠl1kjIЬx'臝`L- 9J j~ ፗy"G 6FRlX t{:JNʭ́(E< } AQS&ޣhhN&# nNm.d|/^2nhNvmTr-=[bC^c`h-%~R+3[k8lRzɁ 0&MQ7u;|fC"8h ]w,h_쨵uPΞBM9s9JcU(hx* [ s*zcgk% a"ѽdq ۧ!N%I S.\?29 `V{{n^d5%@ 0&@hwAzHM(mKO xtݺ.[K|r<~pοVE!흥[}/g-Ӊ C.~ܣkoGi[J40[\Kɼ>Nlgኙq;|9`RL 0&@ njR~{Tnc/ /;O/6 7TUEa`(4{scw" 1(>C1w97jw\(Tw@Miq Q?Lէ<ɢ4RAB8P1./`L 0 P}k9gbOW*wm#u5PK(^D =Rzt1 ˡHyRg> rPݏ3S4R8# /K(>1za;x}P 5)xΟ ,Cٶ5k  mAbBPlzwu},+Hn?mt,gp*wm^WB9u`TC΁pI&i pxÓ7Ҧ1=Ҡ" =wGZsC/~ SCb+2R{Wy lďP;9dHMх OCS1 cs@LJ_:z YuƳXIFՂ3FyDSH/"< x|)`L 0L@kAˮE}à KIԤ[i!|PsEݳsSqHaG2 | TCo@0,S<`C+OŎu(b6pzQ1jɷNSu=W=!v~i_(znjGn[M'V !y/!}PY33DAZc ~sM_ @}_NAã{k?󕐱y(:NDb-?ֺb(yK;%X.;茱jdJz!}\Q=;]S` s0'!"n@[UWVt KͷbL 0& >{/|$ơH*+ %b#Qmt,D >J+ˠtr? )&+SG|Z0ee UjVͷhbRŔA9_6ʶʶ׫cpޔ*.tq^gӈ>* } ⦧s8etx Dz"Ԝ %;Uѣq΃S?ҿD HzrFdA*8Z VsZ4>UfbJ %)Cor| ~ H_O|h"Uhe{a#'E,;kS0`|;&`FbP5F41(h]{;P VDQB!,/OI%$QgB{ 1=b:,%WNS z د?smhQ`5{.R7buݨ+Nъ0 JBܕI$0`ܽmUTjFM 4T|ʅ(A\"t#[?H% ]HnqO` |&`J@6=*7 ;XԐ [--%IŧjȘ}8p^YT+Wj)٠h헐ɛ5@<٫ 덧pz4 a2Z< vkGTМ% {km^aws$g3&Ôw8Ik#bEq+d\G[{S7 \ ,Z巓< oh9c&`%:ĎА^7ݡ' 1c-Č>684%zNux]̨ ! w:/;'נ,Acnt-^㰼K!:~$pZ< qnS,[dis]kncd"z%<~`@|+&`@ϛ]k-z{ uOGpȓg ~s/@]!^9FE0h;0 )S͇:7x?ag;aȳ ={$ChK/8 f}ե0鶝pUoB5gLUwaHr]pp!}G6]4%[ =-Ϫ7| S9+Nʹu &a LNu}s%#o~+^\cwy)r [#0cs'>ǭP5o6*=_ct-6]c xm daBષՄ$Gmc_|kƹU~b ǐ/ 7h|YͶ>'D:Hsl2l[" jnmܖ4?\+dZ}jA=9ѺUG]?'n)0/ hrӖ))i?u9K.aL 0&CQSyeI"ZM!W4 hj#D4?В!q%Px=o>< /X9S&`L 0&BPA.?`L 04ǵqqF889ƠȲBc g/攂`SA{bvHΆ 0&@8i5[*"b8QQ#}tΚ聺)rAYn>|ˬĠW}nb9&`AH\W[lKc{+AW$hGH+\HQhLke⸨ 4J>a8X00HoE$V%RY:X,"`L xЛ{u9?s)a8"Xtnz!$+WLFgC!jwM\}ѵf(<\Gю"?Y[7.K7n*]qյ5$EQG%RȌGIրF䚇GPT0=Z,^p~uїEI/}8BTdԗŒ/ir^L 0&:tX"dƴ{|7^\~rsjcx8ӎ**Jm8%΂ys aϕ (S.'дh=8AH1ϚɷD Ùt5ߤ i=,OwĭJN#f;S[ӱXj+1N`L x C%rn{m:#[H;z7MNb +,J$w$XmLO;eiQ=$i#tp-jn-# *PlQ@Z-Nt)I4o$05Ewg(P:e4|ŒrL 0&Љ `/)H:ƞ:䃙$/j$sHcKNmxi!%zGGo0)$ė8wD%.1(/S=U둬Y3Y%i+Two54NKWJNPFY%zGlOvK_\&!"AZdR'&|3:%\&Swg0W@ԦV(2,^ :Jޑ3\w޼aw7K[x6ʗ%6Upͣ쩊Ի A8Nup>>pQSLE\p7,z}qM1LnS0! ZG܃;ra;^<[KA[5\0&R )=7/'(2UWJgdZذcʵ8 똬$hs,X" 55ʸj=R)4h=ک#I-p6D];[@+ %r@}Ś۷ąWMiaN R&UJzکM1(f%Vgfn;Z{O4$fcm`UcL /tf&׵?9Zv]?/OKQv]t,e|+vEvlͫS|yҼD lÝ }FۧC"b#£u `~nXLUcG؛$ u+*ܓ恅|%_kל\rCrL%uR,qGYZgm3rG'q%xZ4Q:džixm%N:E;hʃoSY \*&B饞3\o& {eAٿ+Qsge^'mGefBax譵r`-}"Qht]io3_ǂ\9q}u KKRoEƤݹk)Z=~='p׬rQ\0w4ϯ_JI $EQΧ`B7 Xe\TXaUjhM,n4G 7DTM>l{9:NbGԁ&%2X%JC.y9*Q1ù?אcV Y 1Ú )js-+q!4 ҳ͘5wѴsgiA-KAU\&: |Yơ܌KVk.]u` !%I0my <e55n+/Sǥƥ0Y}k܆ Cv%i4qNKs[(#̧^6R_"Iv8T$Ff" ߵ 8"$>|Z λoh_! b#J$H$h JҶ+/rp,kÈ9돌OI53݆\@IDATW? #ª$)sh}Ss޻hؒHŽ{ 5Z WӉ7c*YNH,[(iӝ>.9*X"qDb6Ei| 6U5*yW+)ph.FAK*nEp v>VZ5su)o7 .`!N#PyR6yP1#։G$q>ɿ(Es$:  ?j N CC?28ԯ+u}̹.`LI˲f|| saGp2Uw|?q=^z#J,o#sg1{Ê?oK/ S;DxdQ˽% sJ$H :#(1"D{/30W3Y2͢;oeVM/]3P O3qv2DFi-3dQ"גFzpsq8yK_G\B&BvvݷՃ8MXs">*m;qd:dY*9K47E V%FqH4g,xn(K7"ً8|9 iEd!!!@}Hb#$::!DgT|睩38P,99h)c8qAo\zLyNGOtM(6VPӆb-Q¡wxHPzr,:=ߙ 0NN 'j+U)喇6ẏ>/ardz4ěC W<{3}O@L;9/AوV㐼j"4+ڻE7Ӣ#JG kIb~}gN(Fl&1CF&/1ðo$+d#3L{dvbQ}pPvI<(H{Fi/S =r+9s9JXbQ0Y, gL Zs{W^߾ YzoFZ܎֦%ˍ ɛ3烡yqhJ,xjn")%Gg 3'ȅ+ȒV>mp r1ħ3诏ו+%yzp+) Oҧ%h3r$[\aR{b|ǨRNbږ&A/S|vm/U dZpQW1䏐3PY,5%Ÿ`>$`0W1˻[˖ABwp;rَhղp?8Û w^ۼryci ▌kh$B$uk]$9ͺPFNSS=Aw<uPـQUUb_/SZnO"p;u,.|M ξQZ?eL O f&s+v8 \7n8 *ae֒1ė`<3ʊLC5`v}=| 0X_@$ b VKH=2V ȞVT\r wk:2cVL t0gL 0C@$`KRV;9fp] kY,u:fKHѬY[:cQ.6hY&`^`5`ū!æyTaϻ'8 Z(EWN~W%?9|s&NRۙL 0&<""#LAhv";)Sd6R  znٕZџH:`!MRHW 0&`$3h$lcVM.U B4Zc55RX(6=`L pX,5߉ 0&:9IS_e֘ʲ2uUWӱnM5z\L lRz7.w:f2*QZ@"bs'? `L tI$ y()uCy d:1kp5S3p t),Tu2&/ H%MEPи3\H~G[׺Aʙ. }8G&B&.$`L IT eYvt\7Qu(tk2f:f@v4#[oݲ[dTyA&K]`LX$.# sCumq9fh(Z41Cs`X,8`L 0fHjJZZDb3L!u-1FZÖ_X]~3č3&@R 1&hDER##J.]簚Xc h4k~hֶ u-^'`-`>`]5e;yZTY05X" NӚG:fxxN &'X,yB0&@ "c\M[DZu16Hn9;f:2BRWi~N&h7"&G>:öF֯zpLc0Q"8(% JX12&W,3gL 0`&HeZ?ELy83 }1"٦CL?mCbL X/_Ν 0&/D= dP+@w3q;H䘡IV3\kS1CK ;fEZ (=2k1Ύ:L X,qC`L 0.C"=ϜF] Y 1@sdK߽fnUa2N:26r$yTԯyYuJ,%TTJL 0'80&@[xQryLmAiHREX 5Z -5x ? O1[?\4xɓ7pf?H?J}+Y1ny# P34 LDlЭO{̒B:>lY 0& ,DYGS?ٞxӽF<٭KֱšZ1ÅAz{$@ ,MSg&@P7AY-\(&[ve"W&[He="2ؖuixW.}??oϹ#:eDCC,ɆiE(gqN{,I҆'r8wD'3 R1e AU\&LŒsL t>,:NI//Jk3Ђ+*2~_V% S3ؼE~-N9uGQu33!{" Mۧ H$AH_?ņ@w%DzO! 6!M}Xvvss9Ynm}=뒭gygbݛ{Hj1zkZIaU&kse6B@@ P~\AY~R ! YPlkIο,UU55u.e[skOe9DřLEvgzǞ b UyTЉ4W(FFNL+NΖ~^o{8 ,U)" BY* H%ɾcF:8Ѭ rd2ƼtQߺd#fVK@#v%f//~-3(1c=,+-?p͋:4@eA@U;)!*!6%v•ۉ3fW <]o-{?Zٛ 7~sy6K(-Y O.NR-:E0 P-I{_fCn>δUך")ʩUJKICzӤiIЉ(@! wOŌ#PZ% Id W-jKM2 D3ߑ`w[׷fY2+IfFtvv +ѩI&fT6$X*F9n:Y5u6hpCΉfPG46sGi+G?̍zxREmfG>Qَv(*D@(K.xӄ@"PZ%h5 32 cϘ]r('{MOjv&ФKoɬ}fGE udh4BY'JP*@iŋۻgjyׯ.}6E` tڭA^:g2gC5qGꈌۈtaI/AX3p%;V$bUJjk^_nzN*YGV"u6>\9%@B@(KvB4Jϔ-/[2yS 9]ߺti*OEɞX2[܅(G\i-͊Uύn aB,If6ܩ/Y3V:4lcÎW'@@ L,@@ Jz1(EZiڊ4KZ3ZLU4 \k;RrU)<$vjR"kgdYivݦ$!w^٤L -Y8ڍ?qYsFߚ,8N zKf%+bxMH:}\V 4UiVzz{<ړ0iab,-}3``6{ӁF|"T׳Ǩuzv1NnwqAp@3w6NyjV)} i={`x՞g4:z+z] TTY@@ #[S{Y5m ܊r_ѨXkVժU6x{x; 2І wXOYf̶PlJLIdJLTƔhɲ_(itSd48&^VJ=~WwpS oi*654\~wHgYgޑ{S/UehRr _5JMZk?6l1bB(+!ș4>7v OM#h>d8O}z }`x Vu?/$hg%PCXhL9pP5}KmmU8. qID9֥[۷q13Xb9FP`]RG}ȉq+$‘ղ-Gm> {'6Mfp1NaAaċ?.ᚕ^4V;W( a] TRTIo@f@y|d,'XUڣmSFԼA-򂵨 sx:t*SC5\YYc74*N|y+I9,[ )Ϛ_w13hڥ*#vkVP@ʑ&+ bsa7mNn?@OXOӳ.У`O1*^i$A+qK:[_kмkˠNzݣ>jtLOj1N~M]$mw@@ eY p# q%Yej5nRjܟ9cIJI-i#֋SXdx<(3cH@ztypՉ4ٸ$ b$ɛOuw1&wz4M(jr5Dݮ(KB x{N4߇:'wX8l]Zt®@fC@(K70MsgQ#Jrd0mQ'SB߳xSb7EU+AV}=AU<=dXs^VV+gfх5qUYfK FtMcF2JkiV6%2vlqGpa'B+q!J!)lQsQFWךVH+jRm?8ְ,}sy(HO4?C1@ qhR7}ߏԎ"7F75c`-4 cwZw 6԰%,G@ q7ba<66]ֳRcX؊VWS2(Ot&<g=w1D(%AZƟ_M 0T&%iG#r$o؂pk8ȓ< ~#\u|ܨToͮ_'ǑI؆I ,PԇPfa'E nPn,LZ¥vXU X؝ KK<8ժEcs A~cv1r׃%IZ2@g-7;]UP5{kBM"2e&qrj>GO'c͉IFT%Cq`X` k2(IL`.ՎG mgHᓲ*bE R}jFGOys_T (@@ P\T\D}@P^5K7 NPxF[Ukm[e,~>RjrA 14#&dECE̔ " Ĵ -)%CMԵ~>wVJn ͒&umH١9kcRQ;|(C'R?{%X3IJ`[sh2#[Ts Y,k#*ցۣʒ3:>%ďwI'w6'%7eWځO@@ pBYrM-a.Ba%=v喍R7U͗U){R(Ow#^]6[dCZ PnCLզ݇ jD}v5J>L,Y8ttL ee܆6ׁNa'f0hƿʆNNf:dB% @R1=nJƛf!:HO#jZԗ7k`*J%B`ϡ3d$MꖔXo_2ŝaݍBHaVz@q/[(сZo9IۏvE W|u@CR>EK@#0.0xME<7ҹ|[UijSc8G i fo$S&UBx4V#8),dv=bqExi˓"c cʒcp*=ϛX̗?@@x/ːm !/OJ?w1`ĖHZe s*c3OkLKȮuBF 6(HD" SF@(K}t@`TOSՆt#8P7L=.%Ě&}HaAsF@!Zͭ*g"fp$./@@ (?T~X.ɴk4 OYfE `CL,ݬϣ YV8{LC_jX"07grb&eϫvЪmQ}21CVz­G%f ]h+G$+#_Y2@ *5BYԷWLN P|E D(;6hB󾏰?1{s8o,Ns#&fV[ŔBȅ nUVuх@Tg E$NV~Zgo*UX(K|*/͜Y+;MmKYJߪX0Uw| -VY1дtcQ?Dig0JGݎΙ<9, >Y7 V~pDogKb$$)03^#=boy=[͸3L_yĘc^$I{ֱJtϵXXaݧyEYQ}cOM3]mBYr;Aף4*rOֵhp<ݐM12IKƋbffgkiY EJTd91dQCNۏ!KKnzg݌"tܭEoGC?$o%ɬu*G4jzce#f3p8 Fx;K7߼|s [MMWGc=(^#~;,ƅLە0ɳu%ɤɱ(ƴ^,Qj j+QZ~Tϗz;SLBtREOS,)iLҠom>5+%^B%8=n4(һC %TnU$Kլ@8?Kg+Pr<guδEEQt4HpȻ\Ǔ} M|lޜ({SHGQ}ej%4:ZƸDk>BC6ӡSqa+ $:LfEQҢFa%[J+m,R<@+ ^K-ے\aX*{RB B7Sᦶt)p vj=CzvC`PQ n-{͘pކ-X[D5݃Q iݮC'Aiݸ.ޯ5-B|_f)EO_)[c.Sjz&kOޗ UUidxδr[5n`@izQTNj<44ѺU;ipY$ٮKd'-ЀmG̽Ss !m:-^:. <M.IRs*x|[ ,0g|`HC'/U(Wz58WfbU]LJVnYk#tОԨn }Ϯj׋OlꟻDGϜMZe?=07=ugjT&yyS6CD iN"OgAaCzPrTM A?6֢y A$X8X-O{,3Srgp$-ben$]WbEpap{Iާq]omcf|:,C ֶ I(1K{]frja3W(@@ eEo\~0'i\ĜgK㢣IS%V ʳ g[:pݰoBwt9] ,Fu%+U66ו_r,8W4g\僭K]4ǓZ YQYҺ1!!3 +bXPZĖu՛@kطu9s.$$S*,nLr9]mzz_(lmV%V卭\?z7>L3a=))E&m8(a,U@@ po?=Yw\w[@P3e8Z pC !r l_ˊa5s 'xZ LIq\eۓQ w?[)v45]aڻ&q_1t\|sgZ-g$;} J^(`a=6v۰13 Nocrr~Iv< ˠ ![ (pgByQ(nW>k߻_]|Mt`PܾY ^&6Y) 1 +^l38}ݵ][:9fk+wZO+{)QPɊM4[l-X?^A^)'IM&1 5EH0Β-ReJȅ9#2QzzNV;f-T% ŖTc|R>@\#~^n*˽#o+5κ|҄曦돻ñH%X^'B!Ϝey.W QoXc~{;{}[ؗq ex? &2ZClU[bMF`YW_=l|{XΜK{yjYˆutek3TFxDKwXrr۳%ݨ_4".ʾnҴRw{|r%ڣIJP hPQh/`z 1pG Vmٳj9`B@# 2?,9nv顎-jO/NfX6j[OL/] 59Eށ&#֦ 5"6) Ha%וlm:)Y뉆&!)Kugf5GVܪNY5-2\ح=G:$lj @,l5.vCsAb#xq7Һ箁a*iiѠn , o?=Y7vG bMx_Gtg;mǕ]R2C+׼l7F5 I#=yޣ:kXY;3\JӞCxRt.{quvK:x2N'pT2Ǖ*2ֺұUرc@9jrgHǖ<6WC%o -=_?AlbĜ9w1[c }b W=iܽ Sz'l y}.z~M[@Vnו#&eh3La0aW*3;;Wd׼?A8V'[yn_A!ŖPh(-տ3F'q*naփ{q3%=3 W49xaohA?it;yQ@*@:a2c2! @ B)RCi&әCCQ`N_C0C!g|`*w8/ |7yi`8 'Ik>ziߓ>x5 V˽vڟrضqCIQyxSzJդP(Lo>Ӷ寓sQvWPf E*WR_2ǿL:p"AOh#D@(KDjn}Wm=T ,E%v9TYҐ}V6( %15 Ƨ^սppWBB4ga:2S[Ua~T͞]>*+\z_ac=f+c~16y<dFKv9p;\ixr} VCJh_ B/,1Jl#.1'^($2^RbJn'hwz| کLWHp>C@&9]W5U~=)C;(#[dYF;'b,16$e[ee9$KBsA^KLl0[R[b+/>iN?*op=i[N{ )7;(q*I^RZJ9n23z#y~i;Z2 2YY[5^sfoƩƘWY4uB\GA  >ϜoWxaJy*& _9k8oˁ1nC3wݻxu1zhDꈼ/arĴj\#{f ĕfEߝv4^DA`?޳m?7\@1-3K[4h!ǻ_ .ޚ~}*w Q\g4H٥&W_dΦ(ٟs䱯GU`|'Ytg7g[;mNX@,-I^!͉@%ayLJMs7Mj奏?vϺ<ǟB}ۉ/f?m2_ ,O, \lbYí8[ hx캧+KYjXM6/NuX2 E;u)_@&pvolz A E G "m;}z'BZP *y#w9 4$ ~蕎SEǏ>] FS+r >sE/8Tz vwdypJzV\hd(1vxb-ċڿ$qa7$v7ںK){# %t^Lilspٰ(E]ؕH>jTSm{ccwLjjGoz;nxwh]q*Qo<1G]aAD8/DiF=u;*3kxy%LM_H(#dɘW/Ryy+&? 9 5Z4^ +K9EUs]nsBv~zaȴ?ar9·IA)J0dioq!R5J{%Dߋo#> JPJ3MWւ7>\~zb&-(®8E9ӷSK:tsrTCq\2G36 ԕ!ɦ}>YT/b0P]$ Zi}voW,p/=4TOvW-LR"F0%4}]tm=r]֜A.!C{cZ\mI@7M=iz , jP؞/v< pEdb^c{+9,!/(_ś]n{܉34Ip/$B/3$xHy΋ %],0y >S̡{|g}MNd LYv+jn>VPl|u+YǝvE7mq؎iGM]@ymZ~V@ l-x}"z:x.72OQ g=w-3e=.w"&" օݹq {*ąJX4,AIi`:F]K.(Os?HZ#U,C(HA I ܀鳶ONjf.=V3YUym'i4okj{ΜZP`z/w SmĻ@(|Zփ炂YgF}_Zjޗf2ց.TlcČc=S8e[q=[b@4j؜;2XY,"Y yv{ҎǾ[1J,ٖǢOLah]s\ڑJEV^EB`XUXB8Oя@<UNy4c5 \z5` b?!P ^W[O&cJ Lo]2 Es8iDG{vdZcFP45VTղ4+躻*"xŒsD'UC ,@aB`H d@D僀G%Czߐݙ+bW+W|GOG ONagJ"C{ҷm[y"Ws!PglOO`kd9B &/1WcGzX%;(H\vzkh&^;W 03y"OxH HB<܍+q `LpU"POic9#ϵ -4M`'\Fٲ$kro6BxqY`3ݔ,*P? (_p|~$'fݡY2V`W FcjZ- @jpsE=sCf>՝K=^Զz}0.0d"~t9])ő`T޳O&'e O)`XoAc`kcj~,j0NoFYM7`U`53dG! %G!$B6.ȣ0cK#y~^\-Wr7[!KagϽäʛx{z7[z{ԧGS:JDGamP&c.jqoܧZ//z2$d~V)JcYV>5W~Fvݻ5>! .pAg!Ϧ2x a5P*)ioP~0kY}hbL#\{˲$/wW\|&&z!,*QfiI>Ew9:gwwiZ+-n 7M;9>/M](8(4[`hyuq搁 gfQ/fu8q&&U(|$fj L&'E p쿃\tX;+t>Էs jXmwtms.iff:k~V$SVç㨊κض?Nzxd +LQ!!f}diplp >ܸfLWw1Q"n ~>~%Ċ46k˶6???4责axsh[;v$ő IVZ Goʪ>e IᳺTX*9ShY} Ebn,Sx{1ɪ|_a?#]F7ʤŊAcv$k*M+[t(k+tkWfTr=+6YϞK^ldh&5uPNvضFd4:;s^.:_qTc ʳraq )h U#wa 8_+zAsR)ͪgta5Q,h?i}ᲆ?1mcy#Kޭ=\-~q-dcآ8=wʒktwݍ6Pк3#E(FkW],4(Wm<] q..66ɦs]hYya4ڶ Y*LZrz9F>ZЇE&py 4ҠlkN#D A@(K.iaywzD)?Goffek`8hUo)`zlM׹=<7 ;ɂrm^,>_WRj:}:^`HLG|[~f_"r5zlT?bSaCJOAuT 31r)"[wj :B"s)l6gG ! !BerՒ|sckIHE )%~^SuBew- vyP^;u+{Aׅ}ՠ?{<4盫:- *W'NCE <8Cj>xfɘ葑}`1p,bR9-ִkꧡO:fIBTyh.+L~k6 ՞.\Ήfˆm`XT=~qɟo+P^ k8t"f,ZœnĊ͑z],=u@~z{ᯜk{+ [v!`P|i`ִd.|l!(h`f o\X kܽ+sD`Q=O %E|TFL!=$U/!x\w;[&"2ηĮP'c ۢNZ/%(@E"ٜƮ~\6 x}V`l[fnu .v T.W Ip穰,U/g)h.rŧK6Zrk i_EXIatickЙ>xpb:i "[`jznikдS1]U"&i ,_?1\騏CuНvbgjW2v͖LF󸞢m9SRP/[MIɿt^&Pfn;'@@ eR e hp ,)n{dԆ݇܌ֶMɍkJMŞٍLѳY5#۬ƍo0ukӸqL=΀l8KA 67)cy)9E w|/vjQUO_̠8Di-  TFÈn@@ P&eL`uNHr@z3/ .h w,2 iU}=-*ȥ#!?X]x/q:9/]S3(R3ql} KL"_+xI֧C[.x@ym`Sk#RR[JFEֲ@МmB'*FJXnk^_FXEC z~Yk!Y:cRV@@ ,9Ͻ(I>y, |:gKd)=ж'6P~oijWq,Y jq.]ulvJmo [YqoL7ׅdtܙqt9hNXf%I߿cW> vh25Ę77t.8vk5jjZ()w?eJFu)@@ 8+BYr;SNr]p-l2''ה3U, SU@Nf&U5K?)f횞)E C) ` m6w{V/YԮn`fN[ _>HAyrJ g! +W[񉩰n%[%$QvfJ o%&ә9]h.^Grq욋C@aUک$gLSA| %Q- 'ka–O!g$'c.TuS! j=C%AuTv>IuSDLǢŮ` j}U|d1FX?`$i*Z) եI VAQ%mqwaM(`_iO/VzjaHb0`kշzwcg O+Y𛻜@zr> RcCr'Q DivFZ#O,+h e隯8QQL-̕W1Ř\z~9=3ޝ>,> @BR*-yKH%, "!`2}cy9YTK=v]PxIFcJ4T4t 20 45]y"$}P ۫q/xxzO^Õ'٠0=9ŃgǗBaSJeh$p@έzgebp'ҕ$e|ׇW1Ľ*/z.=ӻ:5{S j ie \~I6p!8 hTػ,g#.ݤY/>\CAwmX5E4_gܽuԮiÛ`Lzuh&nR.zfd?}nCR'ruzTYXN5oq(.@q2Շ;헾we5xVv,A^ Pؽa < qnʦWى83.1!@wx#p־]QhnMkWP8\5PN-d)bn޸,w+J2UOo`+iBR"p)' 疴[6 2~!=NJ@XpGz˶tbbbA#ѻ_=w),. zJzqw"l,J֞V(4}`3Ǝs=9iHu#ܟ(t(XwHs%n0+Nw&J_T!@"id>ڴ}r5)cm=UJױhT@!a87>ArڸH,;XJub V;w QSzrnЃ EA2Ȏ\'zXte}ǀwĦzMqtD7.@վ'K²_Q!@b0$A{}'wԁZ=5q JcD;KYfƒbA-:C.'SM?+/_t G2[xH1J Ɋ,fߪ?eJ6~«{v:}'cqPmrt&^EXk+i>~UNlbk])KIY~A^]Yj33^6^la*Vʒk/!@B305k=c⿷tn!ҊkUP <-#6=BvV/%~e.yŒuŪps3X%$sBܠȉۧnE#-ךLcG0Xr;꼉Dvg"U]N?[IB*ΖkKbL3*+:%]h%{}_Ow _:*%iɆF -dZP* S&O5a8tݪUvh^_i׬iZv)]m8i|BB=Y^/qPۯw?;V2XnoPaZpRu~խYB' 'c/R&pzjPug{R  72Ϭ@*Ҡ' Y𴍀axӜWK=#@ 0.>2\`5B.NթV5_8;8^4HNaG_$YұlT/D\󟳂Ⱦe"gXpYf|h\_7R@-JvV2w{AU۰5F]nd=VWPd}=@3XfElԻffU|hXĖQ'ή,12L^~eIWY*{1nX=C?A.&B) =Cߋ8{'l pj?eʭhćd4~^>G"Q5陻RU_|8 ws9udulѠevqnCVJ qظg./9 'kOlj: kzp#vtmRʒ$O`ccxC,U[+&&(_M.bE2b}5DQ`V lhPzPV5wxq5>r׏o+p# (;bb _NKt1 Ӵ؋@?#`PZb\?g4[2A 6>lJȤ'/}y;[wSrjciLoPn5"(J`eß3puBraaF)ߌi?nL^v1mߊB.=0c.V~҆݇hѯ㡡ԾyWӏ>GY{4unʑLmGf 1ɔ_Q ]:KK*weqk@-ݟV=xEi6xOS iL^/XQ*˜ԿweD/BY*!p@@ P8&`t5}ܹag:ĜOu EdMIrt,YUv(G ^!U_xH;Cv<3ș_΁]][9KJ:7=Ikwr ^b"_hW̊qZF>,@kb[tT v X;Y#,jBV}7w(:26/^@AuW!Ҡmf+i3FvMLup.O\4w{ ztT_3blx]/w,/yΛwF}:6ڡÏJ)/){uI>ug ^q=v6oԕ%0Ϲq$ė5N-r#7NJݩe*Wц@V|  #+Ig<{ͮĉnw8y4Hf|<=֨atG`qi~>^ ~6tFߢaR ,۴S2MYZU/3o}t99iBw V8ϣQĿ=>䗻Uzx^1^]I= K˷Pjzܒ`e`!Yy_LғM^쾎7ng;9|HCz}:?~߂߆,Mc yƖ]W^oJZ m5dRCҤPjH@̍0Heቜ )[֤T=R= M;ǬT`QhМgEdd?K^UۣS׻]u^kĉ;zVe%3j~:نFM)+ip쁭A& ˟1e5f%7wqeRfSbl/[lIy5yxU=+H/^`  揆Hh4c帗W@w֙yqJ+E9lݿxc-J̨3eQ˽O8e!}1.S,mZۦxEҟ҉x]98ǎ.0E@ “3uYEYVUJVKx譿k 9[);7| 8XidgWHw ݁ĩ);¾ %$s1 hϟO~XE{X!~|3ޯM Sa;+<xXR+J>_x0rx.x_[pth6+]ߜvY,M}z4 ^K[ `;?7xWo(>gL1@@ ® {'JIx``Ii<9 yԗ~[5rAT!"| Kә>~4=F_&M%~)FyBSc.{_{xxM.߱G@lĹbטax% Tz暊DQ-:RuXߊmMJ`N^muESpCNnmӶ ӔFt JDYz5 9֤ j;o~򓾰g+W^R%6ir ްp"+.zǖVm.xat%kw$ѯﳠw ꪻqHRN

(愅zAżX˪%'^| 辇Y[6^_  R+lǦ=d>G$Niى#tZ$Tb"G3yc][{`&3=Q8=a"C2d4–(7v`:̜>P3[W`LBSS$ãF^|E":{9#qlx,w?w&8sq貄CBV@4B, K re`F#qTqI0kL7\ ӤS+f8C"o'K6P;|IƮ͒L9D Ov!uW.:G><jf8X3|`6can=kMX 8J *M{Q Xwl! Q5}cWp·阿ccH|m|\>q^-ՀBM9&ݎN^J-R#0O~\N6UF4x`Q&=d鱱yjη ӭ q&YLyQwjIi,$0=;@(]<`Uy $^+. K)<`ħ :m +I/VΒ@PU|qNm8ݒ :iYQ D(~%-UM׍ɰ 2d /ad!Y?X<e}G8e է#?lB;^`2SKll8 CS5c C gq繳s~͝m+nUEK {8JxxiT6l?XM~v@} ܼHͫ-˞|a)4컴ajH`ڬj֣͏ U}^qZplfoݖplU2{l K,GZ$@@ "|G" ~b&Ws{A}} ?0V}0ty"Ѕ_nL6{5ruFXre/Iُi#8(71?evơ/c@% ќ@fr5=tbsg>rكᤄdF/ziZ7ouV~faYMXXZEه%-iESҿT'wSgm @q^k|UVB$?gƯVΕ{ YHuĝ5L-FSU Z:1է;&6c,9v͕m+ -OWUU]u!U@@ a zCjI6/pgcB:}ֆP)SO*IhaYFh#USt/F#}u0]ֲSE9 J$MPr`W2`*gZ㳨zW+=kU _`*)W2rAL(¦\6AxT<6@@ tHI6i$x! SU#dxG")@}v_Aw'hW^</B,pa#畧޴fe<=ԥ"~m;5@Լ gQuHu@ٺ!¥{V_OxTJ3tyEDcd.Xe.-l]*=7EN1MN|ط"K녀,]/E=@:"x\%=uӤKoz_h`IGJءuvg}@@ BX(:uFHS5KjX{i6jF%qp>ȧ!|G@@ P9R|n@ h!9# Ŝᚥm|Lj_ y@@ PE⻏qwOt9Q@ PTD@@ PIt=]MoKRC4I;{@@ P+K)^(";uK\P@!PD+7yr1B!(y {ǽwv%w%h:'cM=Cmxn'\tyj\gyi7I)i j][S5:x)Dd8gY/ԱeCZ"hġS j8֍) ._7Cm܋ᒱ~s=[F@h8UcjawQBM(8wu/0QF_&Ih40*[4C;ҦG(rb~Wrn:69NZ5tZojyڥv=pQA38H/BjӭM`?(-=C2RuO%# b@UAXJ*x PC;w^ga%vBRcֽuzeC ׋֫E<߼am=pNh˾Ÿwy_ 甕K@N~úk_J9f5g2[,MoZkUQK~NKnM@ͨ3ҙ 1R-N[zfU(R?zC VUkzZkYu۫ R R$ʎwh`PlI]}AE3}HLWU[6yJJZ|!A*Ԍ"[q*.Y1Lm6ꖖM6>.׬A]kPq@^@_]]i 6e?&eA ?HYJ2@@ P0"I`Gym+M"|%Ez͒L,K, kנZRvp;EjD/Nۉ$^JU I8lߏKJA2_z5TE%c#EW\\=n=o^ko%ߣ@csvpZ2ykF8,9,*7C&c|jN׎]4.z;b\]} He|%x]Y^z~H mG+nɵfS"ڤ̬60[WC@+-`8!Cڻ#.%w {EOu%BL5 NͺИfD 5r"-dVؗ5zoasNPao5i{MЭ=_HӮçk爣-^^~]Q Z; zUt)MA@A@$+ͣ A+f}L*[Įk#;v%ήQc.Pm6د Z½< -{Z2 bq[ɝY|a}U'BBiF=□q,\, JAH Tt`^U'1V2mpkX,]‚c3l J5`qD2MPҮ@@ ,a<)N@@ B>ߥ5VI.i.݃Q@#(=;XXGDN"^:BChʾitxϴ+jru˃.[YQE;m0+/@@ =T9G:AjD[LoЙtd<|2(k'npAEyVGJ~Nm9խ -Ǹ1$T)h@@ p=_:r30%b.\XbڒNHz aI P ,,@C{A,u[$/y\~]OY%Aɦ!?_oӹ%w"OO]p*.f}iÎC'Jsuz%ҩ O ? /<ޡO[UAJMyBXr@@ 3,X(wmg$Z7+xS-vhNwAP P`aa߱s6c1LJ1POkJ"Cb s"䞣er mրL[s] Є$3b-m!Ӄd|s)#+~t'S}?#w=LW,U#&܌W΂XdFqR'R%RHQKjŁ@@ Ak_죏XO w=UPbYz>7!@1Kĺ[{lm6:i:iٲ]HPh2 yƁŤLhxgg^ a2=-V@@ < 3ۛäݱza .aͥ']qK=Uhi_Pҥ+2s4*꿪O8T3dʺ-i;股.mA7\vj;[`X6 J99j=+7#ohjU+2T V*ǏQukԢ"@@ P1;!+L:KMz$ku,uD 3{7줋z7du~Ie8,k9zF0q[]&(Ƃk"/OMjt'ƋqȕϨjw kc$r*5S͒:]0% ˀBľRcCgp?H{~ G[V:xBjPͨ9}D3 Tf5zfݴ]~"-֦{FujVWzf6"ե'W2_hCZ5{үC i*Vf={_Od n؎[PjzlT՞ԫueff:D7+7?ɠ@Q Vfg5[)CO}nP=tNtK`5(b.Ri2AG5$0nIɩ͚-ԼAm.3rU,Ug*z$F`. &xNro=ERj7V?hP;i ae5pauۭiQ U&3[T^!<10jތYe Ԡnt8VI^ɤ-֜F CqI)jd2}tXح-իUC-؉; e {F/=;^yj (Ɖ}:e9@- ٖrF8'@Te?6C{@ pī (ƕ5LGѕ,Uͅpm.9oMtGft6ٲW\ ly(kAm{BY,6k+o*e`ݤ#Ԩw#R`8zdXgTVTU o!V3"u$cE)ըԮ/4fsdxIQ%E"R~//k-ё᳊kt[׭V(79Q2c>&<7\A֓yq1tp982b-`L3 S+vqLgAEǙXRȣGW@Q2N,.z=7zbl/` #EzDN{ZO9b3=1(Up9,R4D~"vk]tǴ9Zq7%^JY6kb񱯷480G=8sE@\ng+s- eX(T?$|5"4TaAMXiէQ26P(rjT`@çTa5\ZbߌsIN_T)v$ 9&H4֧D:6o\ı+v&iElhatKcxs #Mѷǘ2KG\A4.="YxKW8M.TqZU|KXo4Oc` Cb=nox3\*{%d榡 ݵ/ [ vN pmi㮣[+jR&{*LzT=hRL8j6{ b"6d| l5T_՛œjդ.v[G^C!sH^X(ӰNz\Uل-!0u*rRۇv>cF#QFaɊf8%xwlŸGƓ';,bVI{oKVs)̤G&( 0GBw!VKR\/b"ގFt3UfKȊL!\VƹU3cf%a*QK&(]U:#ҙ13_lj z}G,\Q2{Ж,I88QUYM&IV^瞮U).<~P)Ry_g<&n29Yc)_ ?Z^)nZ=UqL#Җ}'H> fMd!X#Ctn/'te:v&;TVuj*DwQo}v!xlga6!x ీa5~TCPf+)2f+;W.Url"y}vav GnYm;٤id#\8k/)ÿi46~4*Т5k|X ,!XF$Q?+_eyF7/[Ema);i>_Rg7.o~o1I~(5W #̽ZBY03`;yR9mfA_P]h@McL3U^FcLiM=mݰ=nW,}3=nZx~ݞo@P{KbByW M{%IJlXČ` /W8FT|Jͧu7NE@k>$-|+\GJ'͚r<l ˈ5F NM9-2 xrFG>EI$^y7_H kƄ##&9oAOBc=N="# 4hle>w/eEzTm;*Z7)C3QtJz99s.UI,{3czk ARXDBuh1rWi F)2HP-^{N.sf<ћԟm&hO*ϩu=zL\XWU'?-סçΫ]e =w5Qzvna7s /W'5`L k5A$Ҏ/BV#=*-q$zlvApMoF0*mlj5K A_Do, ̞F<&~5(^*-^cGk3!G=f65#uc.&[Ii0dA}p6if99Y¦ILU0$}U' %w[1aTR q<*D'*H#fw֬ x];WKV϶ yE"3LIpaN؈(u2A( yT1ǒ&Stc!Lh٢#{%bWcZVE~eLg>nd?bZ,a 6٧}TAI`& Mf9 LLne Kdt vaɖuUSu^^k2cL|d (Aɖ l24͙ߠiQ.5_+-gVWM3~lrdRtqPEw b`|ՙi]/2e0c5?A*oLd83HE?=.%>kaR֢:y 9QtA#d~eN/fՅx['U)ُ2ll1[7 dgF`_˴~i3*4 QpM̚ˆ4* |FZ De?b]_QANSIcǖTh1c޷ lo#7=iͥ;/oM]4yq KYc0 }aRC8dmafRzYnK#zjm;O.l$]uE J\G]d#|iPÇܱ@eTaBQEčr@ށ@aӨGz;ר[=o#޴ġy(w?Nlxy}&^E$U (x9`5G~ ~qDe[ލw *,7_=0G n{ w(8%Ũ5<))|훞,Y$`f{am?,<2Bp#ÿ|9`Hi?X,}q|":5L`VbMsg3BF?8 }ѢEõ1 F6ڶ9vf3Zbɬ y>)oi]gGPoό֟f$ⵏBv)Rp b~WZɢ=+BB^/DL?ЉQfp_%;`V̵- z: +ډޢW ։fO|Eʽ4{wtC5il+T}; xU**^E*,(9f*E:!]Q*I~9^^vx+odef5ud c~ ZEno?Z^NseBbӽG @Ǔ XAK]㞣ꢻGiB=<#bO`1+y]*CWu++ҧ{ .%f;!OЪC0uv3[ۤ5Aϭ-ޘZd-!njDѦH~! ][1@E6'3S<7S?gY{9u[c?/ ^8˻'f@uA%Ht,by_Oɜf/*|:=IJkJ`q?k2-Y/4iz&ڎg[H ?pQ :Lgpe죄]F ΟJAx_f H w|`z1^g<80rI?a}Lxy U@D̙rH:KJK,~@};>SGrBT>I'y|8dj:q{|WbrfoO~ ށxYCRa\Ҽ~oU/Q?>`^>Aa@blЂ)SѨ43ƵLb_)`AǻRF6YۆV@a,x%nLCfGi 0_s ׅ^z?}ݲJ5DtL: < f5K5?tNV} $k~zBйrTKϜ'y M뗚Ho>Q*z Kܜ }`Uz ؄ 11· 0q>0Xr \x?Zo _`>+\)z@Xa+翥&6[',XwK}v}"9y*$ybF: fjNdNaW B5D(]Dv-^!:N\҇ pfGN+9l|> ?ft{D'bɨjs;svΖV~ Wj!hl^z z-L|5sPz"d4&(]ham^y_?wݥͻPʱS3-]u_"ދj75 ?'%$ 7Aa(lfvv˒]Eӑbl>Hӌ[cLSiD#9?0{KJBhy1)2LFL{M{\o;PS$2^4q. ocbɸ=t;IbL"P.~nJ3>5˪VV_`r: EK3ቊN8|dTouǮ:b;)IzvX}aeD^#XLOw{~?{j$D.nQ ,ܤn- r/+*/VX~_f`z)upOc`̊l٘cNK369/BAC3~ԖƩ`vC&]Q$p;Aw v|f˥dŜu~A :} |8YvCۯ@Yg̒VEarπQ !bmLv]0՛5!XPϥ3RfV"3N%ADkc.A/:vn;ާ}ڷْ'*Jiy-IEӧ1&$(%-=h,q_cZT}D,_ t`旔c1$@#P6:86PvBׯə8M*SQ6g1-&mRiĎI$,2£龞LAjҨ{IkB2w:,qYT5;y*[ՙܢ$KbC*kCj畐Zx>R|b)a)-7l}LO@sPR^8^P•܀1b3n#rlUWΚƾNȂ7&&kr6Ȗ|kRפ|)UĊwj\vyli+.*AIzy$٥fCjɢc.kA+" L/ޗ##kuFfʙNHcgQa:0մЉ=_̝œ8iزY&ٟ2aKкy>+kiWee#^bXXa?&RHH=>O ]9_" ǛFD^SL@5Âʭ ؑٱY$@Yp8IVu r߂½fl]Q ߨzZÔ7!VRW2Z-abԌv:Ml?ec?wlQYݏcF[6DP͢4 B ;Z:iasQ-,٧Kkmhnz+FP] ;hcԩU#!(i`a߶Mi^ݛmAȡO=pK0!Huk`$S/Z}L+ߛuW}-7=VX'BcY]5,n88DG))tX ''έC} RәvmM)UҺ =6qbP˾C/ç)?֤IMOْޗwlQ,MqxXaZ0؟'Yi)#>ߝn,=kZMY4:xVn/hێb/’^:(p{'D7Aw=_tQ}ГxϰsW]W6SLJӰڬj^ 5ٜ.@'60rzz=Bť+ꂞ|Fܽ#%[! 7ZyWT3;?%ё|,TG¢bZG*폑^ 7۱_9-!$ܬ4oK+>`xY tMֶ.G3fH3PWl|%Lukj^xÅ,g"V~*uQ1HRb&~ u 7QN frsS;"M +ߤQ{o=CX69d)ס,AXRҶ-*\| A8U}Z0AKb%Ԓ=jdaqeN,dq>9q ~Jz"̤wRP?n#Y7nVMCw~zhvEhR&;:RGjy/0 r-OAY?8SZ&zS"̓-rq1?ʓ).&{9iꎸPZ歨Of8}Vk=`7&Cު%C0ݎc J4AIf5]!fwaË SVST-2XNə ā,9ȿvI1)7Ę4i֎cUjasIg _KSCg[ {=j3enT95-7^6/2 |rq]kƵH|{N|#"zZW6aǙdbGFDMCsfw֎65x1ڵ4h/*3¦ݡ z:BaӟvN}_鳻*$e+=>k>(4Z7gt<. _#@@c)=k.rUO5hPvFKcq`j>nZ!DQϛ7hSh&£hă=ӭyA@R KBǯ5?.ALpfH.|srfX2Ƶy[%4U={ &+ c$wgUmiUGΘ6E}WoIL;Ǚ$!&uɊ,|sE̽q0, Wc?,oiFC2gSŅ=XҮM/QHR9,(1g[0,q͉\").i\_XZR, K̦'Usj~>LQ"=`ڴ^0mVmݏ'tm&W|gIøĈ!`hʲЕB|s%z_E[1?/E?cfsŋ9<_T/3 b"?jZpYQR-OD'44Eґ7[G?x9j%\Gqtߘ3lZԧ^mLdt$(VBLޏ6)cSG1 &m\ܷ}W.b0.JSGu5̧$ JdHGN> RrXD]7ɱXa/5BT,Iř񔔁932.Y+25mNbc d?bioVsγw {? "#NoNZ2]tԕ|H"nS^f OhRENw_Lu`|#ӤՑ -azQ鳘Ff6Z,5PQ#Ewx"s^e.޹\n22 #ZeKX<}XmtK"3 N.Paa߱K*F8j%6Pd >a5mA+g;[=9 > JV VY: ?V#C%P QtL{$U k'Tz͒֗Ice}(ː$iB&nqhL}2ߘ9ڛ`3w%R끙YVrX䢨,(|j`4__C]@x:@H&ۂ%Γ>nP =sBI_F~c3Ba䴓]L1mfǜ72xl]Ȃ㽢cOm1sXiX(w-gڮI5!(HjF%{ơ} FYPk:=2C߂7Ś_/]{d'):#e:hfAIe|Qآ>ڼ|]5Yi v'p45=:gLի}zCjgKW.AP@[g#i\znilJ KZ^܈9]4"ЇcbƏ 0mA291d7:1RQkľd V]'5t'#)zP*m9{C$R1e| ?)΂L})X\&&H`=fO|d.I]2)MER<̽k|%nUdW:vZt mieom0[qdZ|?K^3`$#H.<2G!cuwpjl0输hFf6ƽ&c;)XgQ!ߎhX9' Q㸹S"X4}ʶ&dA~$EcSLS9uOibxGTw{s3lSsII껚g.WS oA {1lp/+y2\<?O/Nkm?,%wnE2?b7O Pk۔E=JWg? CutE? uK$O9<465 S/cdNsr~6c=Om=7^u1b'5K"6C ~ZJL2D"})Q3`zd/K_DV4WK}QH&z+'ڨd:TgT0;OUW"Ŧ:ZCTttHq:TƏcZg1̄P.-5sXRK0 EHmDW,4`ԙ}>Ly.>2s\󓐙V4&x]V)xe׹Kdt~tcEP12=fWІzB6s,cTsI44'9|xR_9wJbOWyFni\%$Z%tXSyb?A`Uvq3fδvk}.ʕ<zWM#NJhSS51aaf3o#$ɫ< ƾL|elI!9Kq #ntGʢȈ^I?RZ?d4:r3w#0ٜÔ̯s->c_ωyDINS4X!N6ck>bORT #CqcsO>Q͖9OEo㬊[ |%}fY6T_M^x?4֌[çkFG>i8_q* \EZ6w·u,Z-֕1*_*r>N&5*k[̹WNAx4Y!eOEٌ9Ʈ'Ul~yRq y~_@.|(+p|ĮPb-í=hX+O 1'ZhB5g(ȜH 8>I[ <ζP+4!y҉,u._oOgɹ,3ԡECDO%wkЦǨghKzj'x.<8ZgǕunnoZ@/%k8ɒe 2;fO{4z"hZֵ L T'Eخ;b)z'(j媳ёQ5I#(Yl"Y8v1&0|O9^6ՊfZ>䯙 -DŽWcD%րgEySOj"f|"gp^@XJs3veMx@#}9 {s,K{[(yʤLZ. ~$Zm{W!=/雿p7^܂+?p8,\ӄK6V6@| g#9"#I%7c9+g~/s8?K%fobڤX’WapFڤ_-eEJ Ucb>W%n&sA|(k(sY:\}"RFVwārrmفq[n8MKV3>[ް=aPUY kx>YAW(t/ĉ %˚dVħŒ=Gcy̙Y^882"Xش78!N g81[.K`6 ho,01Mɠ_?ڳ[6ݠ#$}@ࣨ%L IRٟ׶S>Z+zԶ.@IDATy-[חB1kLqo44*OB9T#sڶl b5z[0 cyCW2}"yjxK)3+9( k^>(<:",?p#Zf2pF2,qM { |uwbZ롱|TY짚XdbomQ 47,pokZW lr7/|l@@ھ0Kɵ )te~n;H ܬ@',q[7i[eyFK7)Ž7ax$@a7#q¦͸>NoUچsZD-5l#GG~a)/Wyע~'(cgZ0ujv#&j xڷ< G}zA{4HEi$mfV]FI͆Pv$CE]Q.ӝ@)G!!fP ^=kyѩ:b%p"%]Dg EҐU.g3z2$^)$8Ϩ(8]>]EօӫhMa*>6FL!L4eX8>o*OULa{ sӫZSIGc{GA1Iyı@z#\; uO/etidw/~rH2F0Umd&Ja'LQE8^c6+|-•aӾKLٺo8W)laV7x'L';(RO ph9k[3ɇ_Aj|i6M`a]ӟUN )m|iԺ?y{y'ԸBi_1fZ*݆t8B"]~tНJ3F YC,pgSQC-Yn[)u;%E:~\\վ@GKU)>"AM@0's$שI;`Yo{Ɵ]uW-HVd&u=;7##K РC5h` !4E^&Sb Dc}Jޝ0:Jq0>4J~競mE+Yi)߹f3yeyaOLNSz ]4}yx%Q8 5[t3,ua<)N@@ PQ0&K*,?,% n t3-jݗ|,86Sg/g$lTa}-aˏZ^vJ"aف3XSEtelnq48aj&{ą4ҧ' rfY%ױpzBr6,+8`E-oW HժS@6Fn+,&xS+߼'MIVJU(B+B bl ^,:|~ (>^~F7uoZq5@@ PJΑXxb4=N7@"}/R9Ep!0}Am!wYm(pҗ[q=o/"lZN0>ԯk+ &\ן@D;cgbV{ZEi (=s9{=_uez$(5߂6:dG$T֋#֙-Dz#@bF}QfQVA`Z]LpSw!=.(`IӮu $l}r"%IjߵNg>lxWFhM{yXQMs$R\($ $ $P$7|GV A?Öy ,9nB:~=[6 'ںVߨ@MVK^cah|8h@'s\jֿ[@\$\6i1-.J鉱uܥT^E8-kE*/871kJ5BE E`@pr u]QED+v[RM;XN|\,\biJciޙjP`SH@H@HK`3E'ϐv5avp^͵ni87n s>r $^4@i]tI߼BRsT7pfve[rTުsqW{- \KJ>$q'3"qޒ Z(Po@?Xt],UոĖ+NjX]NLr$}~&}媽jZo]86IL&ϻ+k |~(ԇ⪭ꂬBɣK-$ $ $Ѐ$j!J%nK - %')`IvHCpO%E0Sm9M?lح)i Wy줤ٱJjEZf)`_$gaZGُTZr D ͠D +ъ,.UOlYغ90@+&W`Zg WS ݍ,[7@iG㷙͗\d-?P<+e)Ќ5J}dFB-PxOi}t=.aVXĮvެHzH=HZR p^C!Z[X V"T;OR`I.[>5z?ͼtuJk XD$WmC'mS9bBKzX`z,jhS"8딜0X^;60klqER8mHbPzږ'ȷnazugCUU~}"@v5RPɣ XpIVTPAe6>*m-X bN `0Kނ N8p \ㇻpy3 /V?6emIw/ZWjM59 bd=܄[0N ~ Xq([H]ӂ]wU7Ny7JZ闹;JW pJ.cԗv1X{0kH#,a+)R9uYұTXd|ҔoAtEغb`Z\ﵴAdKA$Y'H5v휶a+&~/9"X)w`ifs?{ D9Y=qF$Ik1U%My#ֿ#NЋIof#廪7/'Fu; T#4W*rn@Z/k}2yf*%ʈu79lvz3mVsqآnl=eWϝO@x39uu@V1)9'uʩd/hoPRXn[/86K9G (ɒn3.o*L.\z\mU٨ bXojkRxSflDc-hWs^T_H[*K}&tЫKl^4zDɻ[l?!#>=?҉ 7:5Kfhffb/e8S 7Y[@)XHS`:ϡ@NCuEChm>S]'P,rFpnwv}_X @iY.wMd3)ރ'_hgoBQ'?Wf])fZ7w1Pa Fu$ɺǻig^m$Kf۱qY8o/͛rg;1{]Z~w?\[{ y4fJtI1JO;q{Ԕbẁ­?ɸuaq1~^3)<Λa[92fw*{csދwq4 1KH3禦 Rc˓SnZ%;vI6w/|=.eh, !㲆>75e?K/aS?5XqQ4Wʴ{\V7?m&_tsx`8Z,< SLd }렛ZkQJD;kʑ@Ɋ-nڭ8r-ۜF \!E08O3LPڂ+b OJ|n@=,I4 ؀8tJ<#Ca3Mo $t~  7 ~ HYviuMtJ@ z}.@' nLlv ؇$i4_@,[F|g30?qgI.E @i ظhC YNs[a:GS r&'߈㺃(u~@~$/uƶ6%ы'ye<4ԣ'z:&i$E:P?nz=>9 u%n1&{\Q;\! MqI(ۡ9{&JN+az}| @*5;[*`7MXY@W{Kyr,pY-~z.. `bY=y8|wnA`]+ >jP};9q$ ~&N.Vt%6xUm~Z]2C1W&(ImJx~Eo7Mi ;G$oil!f@6t)@Mźul) Q$w26MSOYk/f^J^zK}m3+k< MoC[ ͵'hoIIE f%EO,ޙ:k΃jEvzj:f " t)~t+{NL uOis<>OK[2EcLke}n/i5w:q00Qkb6ۗ7!`ԁ~鋷*\쩓~Ƹz))Ǐtެi)3#2=?fʴB:=VyX@Aμ>)\XFMwJGx dޅzlr˄3,AZG7+lIO9d6R^z)1ΦBO;680Ϣ{`IU}b(8\eAV[< +nwYt0] /靸[A?^+]<{i$R.ctxEwclq ?q. H+?];'^- Ǯx^:#z0N(.J(=G(zb"-O"&: qc`"(3J/K#AAc+ d-`ԨXZbW=őK\{B2;*$Nì,77\` 3%.8ft:nmr=~ [`ob^3}pγϚ13,B_LqyoIIp+C"3 ww=Bh;*qePt&5 8dZ)~(.֝-V$_SC<g]% 2|C1k;̮]a4]=Gϯc%[e6 ׫J3P*6!}ng6|è褨6[`-\>%2Μaүxg>¸wwQ?S쪦G'91~x7%GPr%zs„2(;⬈(XX|͠h^{;2lt 2^{t!x6czl3'=YI r(w5s=x&}'@%]"f:^wqFh^׌߷}۬\wLw6kSWE1x)Y:^)3 +C1Sۂ/i 1s +?nRoUUT%sx"T<Ỻ77z%.ޞ=[w1uVO,ta(;[ddԿ@һ zaYY+^y35epL&"3xvV|䙧֡k_I岐]q){w*LJ Rni r)o\XVЩgGr 1@YOa2 E^/I.P68l< -D[,djn_i#e!P(kÖF>ÊPGlAڢߑFw#1A{1y"ө_"K?%uӦ%-ZccŠv*.'hKCL|/&|o,E,77꼁ytXR`q\$RT~ ezF.za\jjPo{ɼBQP>$I&{8am d"T3|]gXe6BDîy;fvsL"+jܦ[g1XY[%d`Uٺ#Xo8ݹQ>4 uC~ᷡNCՀ곪^yi!.;zʴGM,|lhnp&PU7 :0¥ yV wwcJf~߁+`SB,fbvwj]k7JUBy2B/g 坰H=Moق}+uysMדdV+ ~E箱0y 8L7H{9= ^c,Joec ,SvC:4: VO^j>_ư*GCԀ%^J=E4\ =QymsKFZ-r6lZ.",WG=u3.7tDfbkp\W`&¾O$G<_C@啩}r_.\T2k_ȩb"|o=l.%V.-xM0-ҿHiAd)7PtdxmbRgR5bԉ%em@:S_#T#:eKQ9Tj`:L}Vޛ<9cW{2cIu{@I ZS):ܹa["970 "`C:2?MϰZ+y%l}9,eI3{.WV;~>`9c25m=sQvG Z|ggpŮ 78^L&CҐ[Xa߂Iz%}yk(^Ah\v:}3=f96ΞN41ɲM^ɉDG_皍:IvBl Ok%RGN(?$D!!?%Pt%?c5[o Ϊ~J#bu 6 Z/%Y69 엠d:eRΤ>ڲ :PG"5$8V.̐a}Y{*Y:dy׊˹i^F9PвESE=P5|9oU_0c_#m#J fmštn}S^>(.x|deZ% #'  o(a-wxK,K и e|cE/G{:& QXHK )!ܮwL c Fe&,֡ugorl.ܼ|ʼE/]'-~(K,aA.FX?`vZǗ#df/䇞~:v |lSXYɧBeaΣZZA\9-!6oA!WJ;y=z.Pv~^V*C=Pu{>΃觟w3N4몜} ~^A;X7-Ω,Qx3dԲ `{l{)KP{ykQp\{!l&//JL#ryUժP*0T_l<[1i )Lhy=rPPxW^_ݗՕ{[-#,𺥀CЄ/Ϋ,Uj.R` kjiQ^^]Ha5z u vJtIW~N=`+(Pu&%QusӪi mÌ9Uar490u;)&Hu%} meцV|37s !%OFObp޿;tMWO,\չdWwiC,1ڷPjku,%& %zM7pP6S+K5p0w=&\c{v m-V`40~懱xRO\L]> QI0z࠵'tuS}G[v}X0[>#Wâ.tr-U]{_g7ŠT+JL؅AXhNN͝7}BknXTw.IAGqc+?mLaF%f@t \d]r=|kރRv><隳f@c\eUan&Yj:PA6 q#nx^/` ŠkJB# mW ~Jgiv" l ,ɒEm/0*_oޯrɆYHp dp%5*Q!~%IJ*-q_+_If)Jn%%~JB [b\r3"Lִ^mCMESdQ``| 5Oѝ({boVo|)tZF<;yOorZ $ ceM4gc2LpM:Q ~No8jOyHSj\!fX b$Yb˒HA{[ʚ%OrS[I|ݩHwEbSd~Ue9_N{)![9M؏aa'))iefރzN I&1P/Z"Œ$"/.rmȈ=&W8 ޽ïOm +_ʡhO L 1{_J5n~( C_H[*KbTx:Rq-깿ǫ;u  }i_Z\؟頋pK_#`z7faU(,qDkd䋃[HK& ϯ1r@`~_V,Lk x}ç 8^%_èU*[q`/q]S*@K,}؅%ZzO<AaY$ԿJ*!=}c%c!Rei^sČ)|M-(AKq ,X{4|jҳEЬfZu|ju:i57GI ϱFɠXjS+¥__.ی5M֠3}b;-J"U],77%ҩu2 }{4TIi i4y>m~\IAȡ@IOvIrv(D!ie,4*۞)^A1xޛ]xFLԓcL&lZI5_%d܏BZ`Xy_7qq"_(1Ur9}@fFqϼHHݥ@^%|wªWv3-lJ;^#`q@㪼>{H_% Vqu6/7Y݀=@ iyq6+ICKNMf}W-,tJ;eZꜩ?WG_qRizSגvCvYhXy(lMseFgl~4sQ^/ evaA"3⭺| ,6-}MK) Pc$iV`e :-a&jvz%,ј%?q"X൹Qbf:~;nyJvl.kc38pM5z?MOªۘ)%AYND禦 j4TfsZ.à+K=V6Y1 a'SF&\MZ}b:LJCAie{ :׺mkΠdefy'stDTUhnQTd?AܖʊgKbyȺ@%z#d{>\!E*-zIN1DFl#a@]h57hiW"J†c+&S.5sS,_UT'K6ezԉ& -53k[gj4Fzy`H{17?V~q@*PSuOOut)> uA|gfM6Pn&(IcYJ's܃|4UiGC=5'LCZۍiz*$hae!~c 0$@Z|d@LzݹW2( FgW~0Cipo)۾~  ՘CR}BGPKrݼ*Km7`BUq|Q*㪲>گ 5Mty[ddpǻB,qˆ-/s Qv1-X]bŏ+]t;٦ <׼{'gK)xt_|\Jğ=9ȰZ}6zʴ?%L}qR38.yVˠi>`ŚD0^{ӟeG*)_\#"t0aӫ]JU~6s{䄻tI>SD|oӌzkwT^qQ$qcsrwAz-aI>(Ř7x齡%vEce׌hu#"uy6T5fE I몒oӉcjGvqUYYw#NR@*]c%oyYVza׉p̅E2rWVxeV$R3HNjI|e*:ɴn;tQ3,X& F6'9aI1Cgq4:t Zs6P7G }#>nk=~5JYw:f؈#?Ormnpdgnls*3SIhbK% sN X¶4,C)R`{ (Fz:i[ey\y09>w.UVRCh+;aX/PդiX͆hӪa_wbʀɩmt+J ۙ bVpt̊ʗ>jJ+(AKi喽Ԯy"D{#Ksp.>b&NfLLOc1&"@E˒p'_ TrR#'Z^N=%%L޶s?iL4Joz(R+GEliR9pŝұ1)SϜb@s \ga)t'k2(D{ނF-kRC>K@ɚJϲ7"ot%̈u8]` bTA\4;DI3,1o7LBL8.zS]STml nwKl7hx._(.XOvԑy l5c Ȳt-NdzZC㧴dU(^ }ʵv~p:3 DNز$ ,^4D[bxXobTq՚#ļiun-1"[WU^vsy;J@e }wb6msN Rݒ÷A{0VEF4b@OjIO2uۀ^q#ulU&֫DO?x r}l8\6 *Bcp7/ю'G (R_%LvpP'{yEƛRP͜䙯ʰ>f/{g@떒4X+PLt[3,-iݔ}xƹ?;oOF=-ݔj߽"ҝ;PέR}nXyU*Bc ^ 0,Þ2VN&@ __d)Scvv%YGWƺge8Jbg0w;dm~+P<5--+*H_ UH>JNpxCWZFdk,9 3fhYɤ9נ# >z|t"I\ZK%kGb^N |-q"4I忪[=SG;o5ܬ-iG h㔕[L j,`d9Kԥ=\ϽF{iqWH$ (U_$[3^?S_(z&0>J\uȹ2N- _."o *.g_mOWkk_Ţ|h5g5r_Qć,UE5BBBBn 0X#_oNIO<Bշbz4PaY 3(E*XmQ\Qn1S Ga7& [G5v2:tbY,M^l0(TΙ_#q;BbJ,rqCSέFPѯF7]\s?u9`Qf4WX:j2-l|ʻ?{J嶑]6zW\v?DS :A;3oEvЈ{Ѹ? Uޯ \TK*몲Á/F K7+E7>={YVVF,KU\     #YۈX)6cȲ<5x ]8Wb˒ r+"6m($3aCywjVy]9iӬ1un-h b`!h@ J`3=KdO׍G`9K ﲫ$md 1HMNhՓBn"*ʛ:{RpR e'B&Gc`G%c>>W:dk`8ER\lr5$R mP XjָL#gV+zXD[Sv5i[:y*\n9c Ƌ@*y>2#C}L7]Ӎ|?80<^YQXH:♐@%%Z^ Y#u/mw$e`8 3=;6L!'eɑ rG3c?Ţ&墇J^U71H-"1P:]V%[7D[t47ĉ@b͒+:1!Y#eF   n (ʲ``9j~}9  ,,0HZy8R냊%e yǖ"vsY\\A'")""\?g~EMwY #2[{v?S^jg8 +i{|Ynz@z֡9BBBuAZY p P:[Z =r3XWDV+*qyc\*6*,ٺ$6uYG5p+kn=|Njt-]"5W.x %`DHAWue]صlΝ;{ oXKE jbt۳mgP53>qߍ^,) DU82QCq+.qaz%+J0݉%dlqǖ%K5Hz|',Jm/]ߧQ7} xzZr-ٴJt[һ z+>*2ѼI<ֆY#-` XGgz䎁_ky.i/iBWwmCcg/*jD'QnVn݃c=v :q:Nž4Bu,|wѐ#x`}Bg 6k@}tj\Whnx(+,K 0\%OYOS]OG?If<* ks8L4|TkⱧŒpRھEmVl;*ž:68tPbb">}?q BVԳC lK).~mPД˛n}Dqݡ%6uo(|} vo9zvl}2:Zu2pë&04nq+ȃL am79tRrG9jXlEu }kD vs1IiFrCa|RzX[^#W^&o6&)pck]O+>oF10U\ ̰pb=N4b悕ʹ/)!Cx3k^N!!!! K`c'0joP1ε8_R@7tp9/a˹jCݍYKNW6my4ޛql>|JɒxqLDܶBs0BrEә Q{0=v e]g9_Y,XB|k<ϴv󾃿 E=fpG{miCH@H@H@HNHG73@슷/Ky`/!BJ<+J7. j?ܿ*6;Y: Z6?Y(9>2`.tFz:iVؕˉ7*9Pɫ>AG pBt.r׭2Yv8A\VjL+>^^YF+>Y3XSir :!H.5OeGv3dFW`l{a~Gѱ66x]Om->$<-A#R7+4O`mMc_,4:pJ2oPcmb揍J 1>Z*+\-{q10 GxU,ʮx˂^h2(y+W=c:Ǖ*Te>G)]͚D6*_XB/ $v;BN~%(kɝXrBJU"ȓU޼Kƴ󘓊ZSXIϬ2;P&Q~p2Y*&Z:gE ;4CoѺ 'XY~E+]ծ2#_xټvmw^痝v.l]#,KuBBBH满ohifݖ%o QFH2 kqclFqD;ok[fm[D Kt9Nݩ]јˠ]%7ɳF7u .U9I݈5t22L)>g6*?gU%BBBB5,  WǨp7X’ |8Vak}eӦ˚tTa^tU?lCm .ۅjaaozUd5{{ձq8^z2 ̻_SfZnc#e\F8.ˈg1)Ǖ?}k1(\)r~pD|ײu)pëK%x`슷#ooX,~g~(D~!HOڄ[N+s$9n(+hױԧß|Ge8,-c,/`KSB·\=(1P*(N-5 9&FPT.F0SB|(.*_(`"I8H+Ediק$R}M!!!)`:>9-JoxBG7ѵc$IHG ̘gm'Feuu-ʢdxbIjl;+aF#QfqdDVwX /P܏5(efKZƇS8cW晭yia$† V`gfS| X>u^^Į*&ޠ`76' =#"^=_=ސx.لB#N$C`s)ɚ8s*Gy^q򟲫T+6يtvlE;m im*]#7if(RI#eÀC+@$22,ECM6J#N"Bx%_a;NrP,J[ F?摁 xUmX(Q@z*Kv#edA9`i XUe}8<8G`֫SZa=Y[s dzǬSg~lMO}C~<$΅y0_8i_O)AnxɏE 2%%aK"y \e:u.c?˹D`";uY#֠BnI5M+DmOH2aftBSm6};+"'cUyfTPuF$h`SX@+*2,P~,5 RGg-ti>HX($.Gk U* * J0(TΙW晣q*jv6Slʃ3!J#qP DZ%'6u8q^B?ɔhtR8#DS0ly󸷚ͼ-C%t. &5ʵITYBuZX` 8^7)O͛lje^FKf7<<$ ɵ 8@wVBei̤.ţ2  wSt^ݍ\)tn 42~_ xaI9ֳ)L&S4&<}o۶?YG;(nlՆ]x`po›(˗N[P5UVI 52#*ȱ\ll4@aW<ϵ"ͬÛZsvP;Hc^^Ǡ,T<ȀwՀ(x&FiGl Myʍ;C3ǎl},)5_=4=x1k,,_X˽'x/43HUJWeLgc,)pSA+{Vn\#Z.k64]JvnzthX&Q=X)G9Jg8 {"A)b˲ә}wA}/\PpE+Ɏp`!@P$ ѾbLnx`XUk6YVZiPePr kl"ކ?4r4; 97 +H+Ov-pѦZ^m%xe' P‘:rx6SuyDK.AEF͉k͈ fX5=0hH&Uyjx\eU%ޱEIqr[ 0/XfI U0P Fx[#5Gh{O_K~ %#sf$K`ߧܮ Z $ $ $Р%Za>%?#)^LC xVz>Z~[c!NRr%atStVU0З!3vd/))ǝC̓5៨Vɣ5a吕D^(dyɢw˥ļ؝6 4qpb9]!ōX#TTKGpKj]l)*X"xED+vc`@mPڲڜչuSҦiXq:5GV0Νᅦ0P~KL9ޛL9?mq DABH@H@H@H 0 84p0ӯD*E^{,Ng>_UjY~mgŚ݇N-.x~r)<`7%EQ%-)&BO-]K/l [6~S~ym9P(zWX(fܸng|bi%;o01pPqŅ9D7V;5K-KF Kק|փ-K\[8Hze}+]0-Jn(*&V!7zulIΟTEǥu ><w߉R`Ie%)߉SIX|   } Hr,!BKzrk8TiV_mnO 9"vjNj4Ċ3+ҼF׹lyJ(U$Q>Bk+]rXY4:L~eVDg E6G!BWRI\fiNA ˁ)II. S `˻sbE\ryCz*koA$GMྜྷwVdFh8T}k-J][x;%~7)]9Ob@R:)O5UNX{}&8$yx_W%BZ_o%ҙPN­\V%:웴.x ն<1ottcߎΊ2+ReVVԻWQĴ1R9TU`I,,)ѶW̌ [-ѡ|MvRڇd+-=ЁW& I=VQWU~>5ztuVn x)11+k{PNˢʐI >Xq0ݖޱq-wr;Y3 I@K1ܨI@\d92o8 Jy'Xy充ךSah2k3v/CO)` o3<$΅%w%"tZ?~rd%ݴx?%v; q Z0o =sl\=D+o-R\TkXt[B@v%H T)+b_ީUmZw%<*1<wŬv{l5g:tp;#FlQbĠO]~YGȫ7aǓ:{*,KuBBBKZY{wJ#џA{=ȖE/~i9f?)>31ocV^cбɯ(=s kBlE (9Ϭ@ ~8>Ci;b}Q-S";=ٻ8)g6'`ɒsP0%)z&Ĭ" w ;H˂wf1'AJsZҦ l;̾1tuwUkfjĈ(.viGʞqI2cSjPƌfBfq,$A~+sam-d߬$A@@U6c$ٚ%QɒIBas0ډeܱoMa{0%vPN,xRJN<69O02YbG6(S=!J"g|^a}^QZZI1DMxl8f Sn81_jo1~^kwđDm) > |vSк&BBCtkO ]ÌbWiueOq4FV3X.|,q}`jWH&d뗟bƌۆ+3ư"Kc?&&N|וK%'vT2LrUA@P"9SJUrTfxl,}MYT$5gL:BJR`PyrKQ'JʷI%PtUWyO#[:W__X6\U ?|L c¤rzR9[Dɭ}kfsMMwNaFjppD<8L(\RLT 8iҾ?%%& yD CG^uɥ2c,@CGkC][鍬iGʖyPX&EԹ_fc7Mc ~K2uy!KDfA@"ԶᏫw"njE%LjfI1j?1 ) Mqs,SBG$g0*<#(m(#nI* x[đك}jAiuzP_2>cQn|{MzrOС0<<|o" V+5<[ϴj(͒&LC8,ѭd# 磚XD"J31S8y /9c 6Fc?..ܬ(ʠU? DC@־h2 YRz8eJO~lK!]::m/+<hҷrٱ@Et 5gr6? ˟5*ʫY`} ?~2$7w8&RtTB+lu%1~Wʗ-3jLmxYxQJi3gtZpYz_&ݕp^dA@(.}ʼ[,}tI7ї]sqeJ{TA:xv\BDS aY>`Y^\|PvQ w]H2a{} ,A@B dɤV,>+R#ၓF%WQlL BxmM9jZ;JlFoϾ]Dk}FZpl׃\X9)h\)T?96h9lshfÆmYHDnoKܷ~AF0 `;Tz&@X"n,Y4L)TcZ$E4q D /μZ^kc/{ $aؿ&JED,3^;!c؊tpbF Ī|K!(mFB*itVV[bA&jn d)/EA@N~I%8fxfIY> tq_̍$Kg]OMjI96%ǷlWLd#aM3<HGgdbOz1*:F HQ3YmSC]!#A@Ï,UD,aۗ4WɷӻIW|_;|;.;gS6~ͻь9wZVn':~f*23[yLzpwj?)M٨;IRKuSoim32D 1Z9.?GɫY<m#ʋf)ނ  Pٚ%qp4S&(zכѾCJ,zw:ѳoLTF̻GZǜ/H/B}I i#2ʄH÷朚#)kFaIZо~ݖ:SAv<628썏DyLEh_kWnMװ}INr z,=ڶk_Zܡ5ĥݬZQs5W6" ?f7CN/OFCӓ/q_pό2JȭpC`)=-NmXZ.y k&&zxV`I%Nf|$2gȴr?73fl֘-b?ܜk({5l5ڴe=6Gݴald#Cd<3,j+.dڠ  mxм1.fhaaۯ䕣 Ϙv"TN&㻧^~16YIƿ&dL;@]mQq)ٛFݓ?0][Ιw̭Ĩk;u!~#ɟqxXMKUeڦUy>)A<2 ~c_ kEd0W$>Kt)+ P |us5CLoZr BĊ7,`Hv"դB>) g" kzwk΄^={7?;uj;ncݘ`iסԁ#fդS!TEN ^t|lz)mOʘk_ٽ Õ4#?24a- p șW$/_Cؚ%DxWmgSVHf EA5J}ɟ~^cS)f'L~p|f8mm}~ȑB5ߚGȘ9ӧQ@^w(M:5JRIJ&A@J,Oe EDA@F ʌh&KTҨu:+*&Sl/t>HIӺ7\i>._Sa|t;x [Y XW >v_կ%Zc54V_VhMpe/T& N / ^(@MBãrf>O ;a8k' ,8%SXLyx2XМl^#blš4C2dzLOoMu'L|gȴK3ͺ{Ʉ辁 I$a!;=6*:4?$$X9l!Gɔ=>2 jZw}3_T:qpnkY:5^RBA@(;4&'?U[B}7ß7v>ݟJo%,XOMUUJͲ$Bjo7(Z1!tZmL:L̚‘4~ahnSI?Q[~~yԹv[ODO~{6a׃ϓY25oؚf~A@!C[`eSbbwTɲ=GXt(V%D <Z Q2DnWJHX^%ϭl,[ f#)zRٶ~?y}&yc]}MC3'fLC˳At 4mFNnӧǾ&S *Xf:Xa!>KaD`A@"hl%-a KƘ6㤆 Lyym(?$d,֢^%!#0Wh-vԲ`bwbYf̘9Ubfz QR@!pU>܎knoLA b&tv}m&lm(9)::(jR;te_x6%8,!SӤvR,yܱY &o˛W,4,zx䐽0]9!3=t9vj}{Z{!Djkcdm=I!KDnA@"[끓&d <^B9A6$DS S\l 5Kv]>dƲ Sdžɖ,/s?$> Q^ŴO~,-m3Je&E^z;0sYIkT)b}u]9Dʪ~}L Y+B͉܂ D66Y)P'{)5K8nW*)3<&11KqOÞ,&vnV kɒEh:.†CFfR]nkxy,X8+q$VѽޫU>|;dkBƴazmӻTL$_>"׆*p@`k&feu Y]ӎUզ񝮧sA@ `qQ[Y49`32692GyT*Zd#**"I T_@Dl@IDATy/ե:>mAF`%6d-WV)2LXneWVK/yXvJظ^Ež=Ӧ3lFQ6cnQ'681x6:4J"}e#B3-LȘ{ɦI}=db+|_|%L}S݆1Bwc[>] B"rc5l򚧑:ϒ%A@ ?Ȗ.a-zHk,h 60h%A:bNbB nnxtaP|ړWoPf9*ۇQ9T:5F))),9Y^#q?*KԕmEbS1Y u}L>uygcߞ>L+73La1 1~V֣{0~9.^X>ڡ]uMMK[Ł=68|;W;jkˤ]v> 3օaEdA@0CKs^)T5K[$޿r.:VzlƁ_56n%׋i}|ڟ삖t KQ]znJM^O nSR8Ԋ$$JNƇ dyY`1MK ($?p]_МǽnUܾ w:<6x=_@C1~%[  7UEP=6*$m\Fp(k]m$F7qQW D2njMD9=G z =8aʔ7bPלƤ 0֌1F̜jҔ)Gm' `:s6[ܒfj_5aÐF?u%Ogm7T< wnRd;bSRi@uA@A"`n1,ᙹY eY>Ai~tztCuBG&*LR81P>R>?A<L,ȧDI ǚ"6c9Tt>6c_8w| Fr =g:a;pic"x; CQ8'-ק3u4|j{TAwUo' yݦ_&#cFa`~F +^Y,iA@`Q_W1 x9jVɂ=1ቼȑ',Z g?C䤉R+DT~FqqQ?\VЯI% US;l,}žOx.&.q0&I|3Q:HGEO?q(cQIaGO,toX̂׋rG^ YS i1[r.\B#y&{2:431:/D LCQP(^j!*^Ѯl1]@;cd%nO+V*XZXRsr4^kx T.\ Y 7%r  PV Pٚ%C (s:D`_,NSuɼ?h01Qps3qaQX4Kc83Ydd RmE Fҵ[-V9^Pa|gš9 :d?Ék,Y4fĈE|ߑgLH<-h+7|j J*06{O/#t4Jϡ'>c%E lu.#t߻=?/`h ,ovgԴiIC8g̈;phMKY2L_5\yVC~?" 5Lϗ* aZ_NXē||z/(os(ۼyz0|E\@,;Q azsNYNJ0gz 6}5JYѷ7"~WƗqQd nSZ4{_M̜5G[7Hv^12}§^~q'޾ 7q̬aBm#\L 4g7RɸDij+5Ȝ3>s*DC,s4B;KPVtəMͽQ6cf?/ i3a^9pC2؆i^CTIcptSQA lײ_7/6,ZFk`Qeظy|6|dLϹz_tEѝQ)gibvJ;ڇ mۚ#A)1AR$I|I%>#A] y%Q8q]:}38LT r S&%lIs#ܳ{\ffL`_2kdh]? #o;*mFnXbD;;q1ᶹə:ZeHoCKG8&&j  ZwZ{6+^gC߃*K27ʹ|uPYiI4KU4* P0ca%̹Y+h0I`"'%^]i~O-ty lJe&ŘRب,x'F+"T^|B삀 D.kpo ,&O di.q?&J{ZI)ƕg3n.U/y#LXDW*߸A-@g0۸bܹ{C9^Kx;;29<ӛ&NvA1@u JԮ~78 ~?j9@u?|8V  R`Vk%[9&N{5W%֊p \iL8?5;cxS*QE鷀9Y#G whawNjbII w?9yZgmCu(.thW&JLR)œ/RzNFe:is(Gkt-w*G!KDvA@"'KQY)VńYK¤ RpysE("Gql7Əqd<#^w?p8Rzu9q4N%77[΃R. t9}k?yw:YDK]?:8oP $|idAޱÇ/X,E[> a,E;t\1Efx_l&Va:{윽.}U 7;g]`nҍ7hND3X [| Qt٘i>>#w@&df[?pٙWpKUd<"<1"g4+}A@-<vB 8p$1Fy%4dȐ|ڤ3IaÆ[ !kj_X6v0zİ%zם=>+ۘaÖ"`ĥ.Č,ט#WeAZyM+JmuM5|xDOA R5KF3`Ri4-\%OU< )kRgM2j6;v4 4D_DG0NoL[>}x47y##]KƋ'g]מj 4 S$lx!2zY( iLr7HyA@"3 va"gi0A6o1*Ii ƌ99`580$)VoܲE_}:nʔdz E;}FRHͺ1 Ղ<:|J Vj)\K͡MO6zBEcW@=WEa,E[> yؚ ,YpO"vݞFG+M? Q9O>* I$ X龁 &de]y͙&odO49{3Q̸)MȘvd MWuɕCs\ 5tб#FV(` V(}p oQ @!SuyܷBQ=Q1"2#p4+*ĸGԄ36Bp}䬁 }mgC5,jIOM *}=" $'$ĵFymHf#/7FА 0KII|/IoS"@ cO0ήh+j¨Q\طKG ;1Kns%x9(#^D6n~AcLf.ʾR..yA ((N}f$^ļJ )"K*1Ld%Wg3/%U&- 2| +fl&3CeaOWYw[̙ϱY_Yݓ1Z =uHyA@"0m,a=; Ϛx4] Ia~H 2=_{a2QVxbZNaAj4ـʪ3fx}7 ݱ"KvA/ϋTUDgH>tywwzqjkϘviz:q0\ءWӻH"d)88J+ AC,o%Ȓ HaÞe1 !okJbKА@m9ku7?/򶥫2|>Jw߸hm4}8mފ4 MV᧴&${_߼ۙ#cbGX- л^8u.3s$L2_oh/KCU5F6!яbո~-Uhq4?Ï_ w7.HQVYO3,6y*A@@,fxE,74K$ODn[juD V8!?]w 1+xN)10\DhS3zƲ@#8yOqzŘӡ궮/|,E OM`w݁Vq&fx]\Oai-:J,*CS~q!?a*4T$^J"J,7/c8pȿ^Q-0)s&X֦n¼˥/dTh @u T~>KK>Ka`_qm /5iPۼ?aBR:wܸ^-߸ɿ5kcDbӶSO| T#:ZeU Ű,U"iJ:]\\8ti>Xk-0 Hѥʝ A} f6Դv>Ќ^P6cb߹A>J(ï<[.@G8bc4>N,,#x2VQSϩ_1rZ1vY8喱ZHXj&5On!CK$Cv%k"t)o8}m@9zg4(ئxnBLhu+AQ(,PתrXBCi5NoW}c#+qvDda ՁfMSdɷקijNjIl8[7uyht 3eYbcRJ JpG٩}}oNGQ+6 rIZIqf\lL+_5W^^γ5Y oY(@ `^3 qwX!YNMJհc嬏nO9{0%D 7ի{i$J&1Z(W I5 8{oMxJ-h z.oJA hpf Vf,a%xǡkZblT=W_8`rjc)'tX(B;tG< XDu}aޡ*:!1 9)X1ǩnkX߶ \%_yy}uqǾC~ɂ WG5>lGBP L *"K R'3<7y ċtZ/K҉|r8tjӴvŅTN2}>Wl-=ٯoOI:t7g.w~5WtnvYls&۳tPZI4ࢳ肳ZT'wM>C} ױ%ı' OAHԩsۨ^J%!1#WH];GQahYǟ1ZcP1Qƒ|ZΗraPN-С#ǭl֤~mit%%נߙm}ߺ5/zP}G_~{P#.BW/A@FSFj6c0I<`ʚ%D lēYVҠ~ݨKftD'z/i| 2vZ=}v˦k.9~YoK)qǾR'|\yuXDe9QL4 NrMQVx>%%%&mf0}qȴQN p v\r1vtf&Ճz&  TEfxV937.l_ccchvbt'a"mS›_.l m/,Bϥ}᜶4]D5?LunZe7HBhv7 B~ułk|l]xbm]Wz74L&v2q<9(9zx(V:2O,E{^  N,ب*  /$*ӽJ=;NPL9}?͓-ʼn2g}d̔اͤ)3PFtV[F˳h},Ѧ |:n:|܉Iv0I:~N,oXU3ُպ{^n+w~goզ֪}$j׬AcMJkUqt#0 1=CX~cm'goNviKHj 5nBʼn$'йOgib-&ze~QbAl]]`"w]>2mm:G+},cK_.uTcn0DrJƋ/9lYռw.=A $Q/1¹N ˪{"+,lZI ֳcBT<O8qІ)w)*ZͿʞVy5mPǾԬaQ5W5[2MO%yԵ`dY Q su@J=:Ƙ d)P<bA* l ko+_FM\ 1yozYnaUR۠aeR2q&swy՞:~x%'ncRcĿP Y 7#r  P(?bk0E۬iO~V&,V0#uAESk%G [NˬNfs6G~'u |v4%&xMd8Hu}k~Ç.[#̘xbWRrMI ^ 'NW߻wnLA3wvn4Y$Tɢ,jRu!.NyϾi;.^2E V?Pm, y_abï,rjGA֒kPyhUEЌkZ7is.V$K_ic^??O;)/fxDVA@"-"K&r+gQ)-H(,UKI׋W!qK2ùj4GR#*5GS&x|oҵym5mMEb3bEh#}97_R -?7Zb$G_s=^u) |8޿D#{A>sPcm5Y"M;[wۜbM0I-Ob{[%ZZphP|ߝ-[Q؇_Z1j|sĂ#yiL U*gP*tXSkOh2BD @5 8=VY*2M*^36.ϕ3l/=GًW[F_lZ]ke9_,W٤xԢPm_7k#T $k6}>@Q8|N#TC(Ovj۱"c:ǘfEq /.6A`< Z= E0Q|]֘ߋ薯"Ÿ2G|v_VۼOe*^զUҰ @`i0ծ]V?Dt%n#pXХ%n7&J~ lXj?k'jJ1ZS~UXC8ɟ(s9uВ M; Ԥ~wicQ?E}EblG;} gUݨ ^, !lTYM[B*'zyeAB3Y{pLƦl~%I3oE$5LkY!ӰTysi=/ns!MٟK{HU_f|@7l/GɚQ%kHC!(yS隁{)dA@ӌIMQEۚ%DR.՚w=xD˥/fN^A"҉|K\wej}f_z yT#Ľ]6_s~ZK~a64ښ n^lK:C'&0׼S:fI!!GA@jDHZ4,sY7\+9Jߊ?Qn!"i=أMl8G'eB+~æ-%3ޛc>3-^X%HzYm;YAoݣnhM`-MjՒa/>fj'%PnEPhB | I24fx`,A!KBX;NAgԯm]žFKl]j|uБ/hMǕY{~EV\>o:k^瞬ڱ0}pټ8WNӆ4bPkv;D$-_Xmng;_H.bԨnbhi( pQ6E4vD,o> 0=*a,73@8a kuĊӗ VgoāG8梒~h~\A@8pp2=6ZݖdLCF ~`ҼQ]j}F}!riZy']٫+5LM!v<׷+c#̳몋Ϧ?"I|#B"7ڼK#GD6բv@ޞ&JYm_$DUG1& >9&ݬ&ye]xa05>9M_C!m޵O*]v2V%ET(rIA!}pXamnmP&KHTcRT8_H.y<|XG\”_Iu0co}Z&כ~> 'mJ8،7G KI1tD^xs4X)jJ۠y1Gh$|PmE*@+7[OM"&I=-Td5 @2enҰr\}>-W6Ms!KD F qjAV0QkMfV$?~ 0f"$z[J>TK$J|o OSԬaQujՈ.]F^o"}Z)ܹi#KLf y>K]q4@$D5^ 42!˓M}q}Sa < V<^A85gmGRľZ ၀ng*BȒaK`j0SP& Ÿ́6M`6 o%>/-O"0{Q_ԅ|mضQDM[1)_nUCM;s`I tYДr M`K/9Gl]]eEqJmY1\n5\!\q\'WVĦ7F񑻂 Uf Y:a Yrƃ0a̘E<6|yR -ӹ7O@o6MSsï" ['593,3W˿j噸GR+{ Q2īa&(ꔵTCm-5 Oҩҩ1 UH6Y3i #M >K*(g)o$;OUv"*Խ{ʴ:cszs"> X#9z_,XRIWh\BJvne8m/ƫDb#Tږg׶gW!K5KA@nf= ko|+- X>K W{Ħxa]<ֈC G|v{N5/rO`m&{m_i[ 9G_māG)IRPqƂkk?-{@ hIR xEA@Bq'6t̷IM, *i'38r\k!yOA$V z8W."݆}O p(s#qJ_ݵK_>L%2d" @#`zam%cXܨ$%avhcK:z`-VsN޸ MEU_'n>JOI 0AzwOt{qBA{`_2/!K"&A@ȌMt6,yg)HhVO3a&+-]^Twֹ%iu%JCCR;O7kChxc#wA@Ӵɒ"K~VISCE6H?w.!S#'؛v( Q:5vRrkf%KHB*A@$.g\\ScZO'utAfw5T%ߓ P Wl9?Ek|"O#IUu-JQgkNX,U4" PyIGXi΋'N>6cpfIȒ?8o齽vnb'K#'FgU0᜷t]YTem. 6( 9VlA7TD@N  T( OJSuXuI_ שiF5eȐb@N1Ms13!TBgF qЇݲu1tKrB9 $"?t 99Q !KDfA@"LU5 d0Xr7'|Iά\눶Pwp:N։FhJE.jQ;kX81Oʺ <+=ZQn</cFֆsgRuh4D9yt+9dC*xjl=ͩ}FV6Hhm>:}3bmh%Vy\vbWlZrmض޶s7'޿/QnūIPfi -^jԯ9BlP$r^JYB5Я;Ƽe @QA@(Ii vG{'LvN:ag&-FVTAŢ2+^TK@`隭ԤaΤW>|CS7>xSغOmъtA6;>GNYglzͯQJIq?9:] ġZMGڭLؘ\pV7@G@IDATZa&| [+u[:l޷TjݤuM7U' rw!J 9$rm|^JIKK -,LiJA|gikbјM睲wr/ ?4v]s\t" i9Gs9htL94;'KGN4)9-u#ۺ4Elu懵O?fa"NW:@PJV;i}/ԩUc m =?\إ.n(8ݖ]>-!]y4C[/0%v}t6yFCR"pD.B:I#7_v>%%ąa)|m" OczjwvUGoE,?!mES;y Z(gi w-͸q"tbc6p'xfsq*ph1"q"¾ &'޸DܳΛ7cqֵK$?NKWo kԤ09hr >c-PrbBM |vRdgߔ "͙6OK{P')Y &Җ @z= /wS^{p1=Fj+ςhngkTCfaLBS{\}u,τ%}p?5&|,%_U4{j:r,^m?ҕ|NvmCK@v?L:vluEiRd69viʻ*G޸N2GBlD$x~,qr§n0L6(ZEU&!KDnA@4{25 dISiQiLKQFp~5a7C|VtD!=ɗXxEbRk%"CeׄQ%FO~4o0LJJ ÎiQb\, "e{@K%t.=A;U<1soS׶M)%1.¾F_.XA]0ZhC(3QkhB%'r UXsW}89aPnjr Y 7(  fI7t>h}a*v lP∺B;\\s]}xRƉ|ʗ=5}εqY6qEf2ϛ\N=?Nl/L'Og8:k8*PӓlE pRZ+]~Qu*GA $+p(-CGNXwݨCRH)~ <s_"ҝłbrl:k%- Hh94{TJ+ ͒JhQ U@S(׍vD3ۓ|䏚C64JJˡYcAX$U-'/TuA@A<VPQ 4>/}4 X`vՌEfxf Y  ' `kBlWa/v.85$A =ނFI%mE e#E6!K&  i&x &xH|LmŸϒfhBNWeo#G xi7?ROD+)'GA T!Qb8IVMBEĈCRĿb !XBĤ64͑⌭Cw;)Tx`m1,'?Qzӎz^QI[_.BԻ6=:#ӇӇ '\?c Ȉ5cxjAngoyU_G&ӒUc_Ws%!Uׄ,U=A@?1Yx0Oz/05) ^|OSFLP2Y%K<4kiHRlL4Jh욞x5?/E9y.1}dinyt FZ1 f !KDSA@(ތ6s92fbb";7k)^'? |۠v]*h0a]75&c(5&1Iz j_):A R)g`˜asDDM~wЯ?sQAU 2NRzvA@(8;}њW"*p]sc0ۘ{tp8(k+j^c<*=ofwBq&ի"DD~j,o~\8"5cN882N+]ykfHI9A@J!h-U_遡CY,i>OzNUaՁ?Ծ=_ oLs`S B?P^grCuk'!"D FsQC'6A _ 2N+aY-f,t @Ђ範?i?'x\yLÆf^cF$F~K(ɣf/(*$I#1~o|\-0pa+4xX$d  T X=TksWp?{Gq={Mے&W:. ~?@ H$L:IH bm ۀnٖ-7ٖ,X9$K9wx޾7o7ܭn}/P#_Z8[]NehrqݍVSĻvd%a.TOčQP "H|9O4x5]б䚘`L+L¢L~]Z|*[tݱ=éFje8ȓxlUXD21wxhliӞ@}=ee)<6&聀KeT[ۧ;7q8u_D S6B=,l1Gt6KseB; v0(*o89AhjV݈AXY GYX9ݱlIp3 ` gU$7YNC ژ`LwL9tWqJz1*F0jh ?XpWu—7ՏJͷ`pL*.5[e\h_%fۡ;L #†l͐[gM%N9%F[S eiHN\gACs J¼,)~ eZxoNH+P +߄rz>2֫;Ғa* ?4tYY K 0&z ;se.mTnY=f$mVοeެjRk4솧,Wd;+s$M2CJUk Y_ F.Ki½uPiwg)p)[Ku(W<1`0(>TlM; v:Mwfۡ-Rt9Bzo.HL0AZe:d4ly@uvSi}#i:\`L @?^<%DzxMt3Z26I&% dU[jw´EuSg~pcUY2gW>rS2#z t}H;ul.̏D/e9R&N=,^wvo1lC Eh.ݛ<Q \N7A .lt06̚8D ~ia%SMэG&NEfEWClwG#:VrJ9N ?@3*Fݐ,ÅBVZ nČ1:Q;m8YN(3qdb9 mv G 0&z <ێ[佤GnX/)Vۣ(а4/Gnph8H dw9űݘV$J+N$aeTU{ޔd1Ẑ]K&j?}\|T& +,RkF9O Zt?*+7'J~ߕ]])bS:EgBz4$8YY F 0&z"y:߫ Ml/-nQ'ӚĂy:ҺCs䭈I=#8{diOHi\JBQ*O.R2R0`C$"OQ']0S`԰\N@>+Kȵ0&@tn/kz,‹8 Sܺ"uOH[f{'d #Uu ҵpU?{"s%p;*RS\ѝܛ͏Rel̩ZN{j9mUM?BF)bLQ?pbmu[ 7"CFJ,G!w \`L m(K**KY#Ni%[U O4t'tNFcr~z\Zd_HZ䛆b K}"͙4 ݝ&uo:nyݓ3Bᗻ__׎߾r}MZ^,fq/%k)h#XZh >;|طK_|UXY0&SsZk }GEř pPGrׯE/l/qL $sT>wvċF',yh -e`hYMM-0 S2JMm 鸐ܙWHmk|ӺhN,VώdL 0>ؠ}.ZrPwWQ<[~$|dN;N7;W7(:fRcKqjY Q/ a-\qzn6ܣh͖ z߹e 3>uaǁc@MWaqro/o- 9km Jv,YF8|8 N hgmvS.F"g`2`Q n7>ms0qP{m+-]>aolT5!P'yPY"ea 0rh65[aǀ 7LNg)cĞN#甈$YdӄiLA>~.yrfpɀ#X~'E5qTTI eUL$p-ƝZxpkL 0 ۢ.U@Y%QqTpcڎ>YTF抍bo) bg^+ađpD N$T\rt(յ 0c|%'%kJq!.MgD̏+ʧ˨e 憿JX@I2ֳlap( Di.*WvT;w6еa0|H6{:8^.Ϡ&hm6`tAL..@L(LRل 1Tmlv(sxPsγ#bL .ʒFdT G}E: k~7SVRZ(Un([؊@M`wV10qP6v8~U n+;$9 V׉2t1,GI'^陵EJuG&Yd6F4tб䚘`ϗt6-MGq:6RJoBBD\'58u0o&r9nkyDp}]NT|hGek2zGQ麞ɷRkT}Z4K|w;xHIMY6/_0KǢkO570b\2.#LY\WiD-XNC;Rѭ\`L RuWRh7z.KOx^ǵ㎗lCVYS?lvx]حQWb`ZFtCMW,CFK=nwt}fB+*sϼV?+Ltw~G:,_/rV6B` r;,gz:Gԗ`ĐL|Oe#R z7k.r!-;i訳e)t,&& e``+q'&I AZ"O0uIg=.| 2.Vwսn*Jݟ鼷:(\o׈cyщ5 x4w5@kzFCb9 (e)<6&%.@{Qʢ LWQPpj0c^N8DwPמWokf\#dL7hOr݋Dmv'$G{ۣ} DN#ه?* LnT}-($#XNC?lY =S 0&0( ݎ8PrKYT΀MZ7'kA(I0b#.B%׾ .u,J *eq$~6؈J.Y Yh%R*IA2 k'[ ڏXN{r;Hae)R=L 08$v{E땔EXʈ⊢k܊iA hݒP0A dļ,0{xsd@&G%J3H,C%N&αi>&@\y#:cIRq_iE.ed=2 Ei0)K/8)%62b9:4ăG:ve3VMgL 1 0aM,WQ PGgzyFNhEWI #'Uet;&(ǃ#r$y`e) L 0$ #5}-W|b֌{K7ﵡtF|A7O y| Q>j8e 0&0tsO6J>Уԡ4%_gL 0 ;`}Y(k,7}lVYY0`+K,L 0&?,)([;; sL 0&XYb1`L 0 dP P,T:bW|YQ$n`L D Vf(!L 0!9^[Ζ&&dƅKoysL 0&0 p4.&@E1uQNNXs)`RР4ݒmeXQzmnlLiNn`L V; 0&t tU]/W2iP;b86:3'<˖*ZJibOܭ8[WR@uKm&H7L 0$YiLMWIvc© ^Upjdw|\ͭ=dsQq}<(n)*lYqV0&bh>JccWL+n=S^+~I%UG\qDsju 5'~WRƍFgBRRY|t{a2hjxCnTa0as*,ꭐlKϝ̙F^@uG9E3ubFT.A=lzGNSPNa9 n\?`L N8l(xإW&T֠4\8Q'M>q,aEziF8f-0Z5&^8"k<V}rEP2{Q>_9i|cvhr0+09vu Pmh8>N_pܒA%R.iSpQ21Tb=07:~=iG0媙`Dz-KP<[(ڄwwedvt t`Phfu}ǚE29nTdE>0r Ei0W~F- ~)7 .>guty*AMZt >.;X3r;9ņ6M,} ?Tt#kThYM\27S'&E} '-nMM9/-ղm[iW<9I;T²uE#ﲢ$,GR()T.E,O@$(Iz%?ABI˩(R(`L`8EQo{7=ɫX)]8>)ul$Z|,ɥ6+g,J'@2 1N*'AZ#,JO%.M=yg=iŁ䪘`J;6tEyWX○+k7EsXM@pҽduF]7cDP "H|9O 9mwr(-YXi rO=R' 1&@/0WYq,Q|=_]ªtX ]P9O86ȉZ. 9#q qlnc9 *\`L _WY݈ʒ;n{Jt0[~w:fCѓbYG))iDku5'BnJs 4k[AH:#倪@{S}"ͻ}$ W`o޵#_M5;h)rlY G 0&{2i(!Pq=O ~ÆwM|rYM^ ʒLʹn3 uc`p;.ۀn8{C~ (O@MLp:*F3yq0g\( >LIȳ]O=ɩt{^M^G|G_ٯީ3̸i$e`}+i ;gR@r5L 0x%}]QyW [#ǝ3]{P*ůq[a©|%'kq=اI839F kM)bnv$9e?Y ȼ <_s~9mx>g,>| |mđŨh%؟? }|EDȳ]p&d]4Am vX5,By/_A5& !cUWX~!Nj `4975e4D2R@r5L 0%SdtU)ѵlD:Vfc.]7C}56v5X~K1,Pv81"'uѐFOpw ېuѕpC ]?SυQa{X3ړ%iwgMrk/&n!dJu {~Gq>7> c] iy%PU:5LaB|br,#W_8*K.,Q[ FB넲DTKT='h𞸯f`;5X$gC|$3@F^ )cQљJPw 0U_ ?-Γp-BY dy*0T[ǯ+TNwَs?YNCpZ`K@,)ʒg2sQ lYߩtq ).UTC!ѽu׷U(&#o=|ȄQ>bH&Dp5IQegpǡS\dHM[wX샜>6Ime6rJ9N :T[k5~\ZdJȀN%{b푏e6,VBÑkaL %ULsnSznTf_z< tE7pvi&ʱX3͚Sni/j&RWg;:!X>@~U _Xˎ%9Oeeː"ݓI-GL%s,?0Y)t%ZOBR"ݓ ultYNC2m I'&떍[KG핗}U[۷pJoTtMJ*(g,bݒx"o މ(9ՃΣ55m-Ԓ%>5V]I, uLtKGUɺ[3Q/GWapAVUpڤR[a ˞+RՁJ,!?p#s-L 0&F(ehU:3ڥ OZQt[eLTG[8 יOUSE8CزCMeL DZ;j TxݚC X ʡ&?%jVzjR~A5JQ[JQU XEkQ|NS*\Vr?["q-K`L (M7QYDR?yzˮwnPhJ鞅p0%뉥#F[ڌGR<񃣴4!Ei#Yst'ݐ<LjzнLtaٲB\`L , F6uYo} ,?e NUQ>uN4[@*w:Tθ֝78`qJ-Kq:-&@J>,TS02 ;҂,x_~_x׷RHQ䴯"xk*`S{{7 0& _+eL 0ԴՍA=ф %`F5/r1Z*ROS3gRV~uS>2&@`e)/$\|{S&b(])?^(Wb=WYp`L,ϊK2&T|PF1M}>&DZ+2k6| @IDAT}pGǟw c}6YW70&#9>$7f M[.[?:}0et~мm#dϻV(WcUp$3b;P{~>wP|YPA]wuh/Ib*rܟ*L b\`L }q tU9/e&\}έe8=f@9~:5H>ϙW M[B +@AiP͟1-&u4r!`SH5ՕPJP 6z(2?z HYDuz⇘ӡ4bUoC{2% /s_yOk?x z$ Mxs&䍾|jf.{ Tz/BP2ae;6´ $gK~i0^)4܊߀Taٶ.O(8%yCul[b?sAZ$ѝω21&\s2J,AUR(_k/^C\nxom}{r-)-˜ V"_):R{Ar}ݽd.'l,1 rɒ ].s UoA;ZcNWy` >G1hMS aֵJ|oP:.*>=>Fs TAaſ>IXoy ڛ+As{+MT./X`Aw1dL‰_j*; _ 520&@,X\ U X/aݠo1,j:떰' c8pF忢 2g)p׀fAޭ`;~ 2v }&~2H5y&z\mj>-ЬmS$Fh)#M#Ϛ{54l4lx?[%)*k;qZ˶0dw3|,} oyNֆ`JȀ!3Z'-A S $ LdA> kh^r =:!9/ ۱I(:5W9ՠLW{!kX%ea^WSH[ΈK0&k[תP3a:w{JͶJzܙKN6P] hd1d"7vc}q5yGKP"w.ЍPbp%+[ސ CG1QT1Z@1Y˟.2:ip ̺ =KHo7&&Cm;{ϟ|;Oݟzkx҃Fx =19јcՎ^{,B9p] qGu)]H G]`0D]] e*{%Iq9& "`0].Oq$VŴ7kNm&f+sh6<QM8$lX\4Vy&d%_"p-&򏮹~oxX^5±>/%hG,}& 2&Meo^R *u5 Q4˽e%0\]'njbGnm׊rru Ȑ6H1g ]Y *Zv~*ҦEc!c|qqjȾ*_YeHK(9⚧!iH7,F˳\6`IKJ>ZhmqYN +KF2&◀RP[ 7PyJUkPЫhIK4Yա"+<ҵ`{9:8| c=v8G6y9$M{#|L2Ǟ}&>2{wЎycW0WO8d*",oح-g8&TH*}>7{$ГT ;X>؋5?}0Oa@ mݐ=#m+0  _"o=;K_S ;޵kfއ|~Gi_ K֜ W{k:)ρY1NOP7>,>u) qL4([..pn'= ş]ulKˆh勵uw=Q qM61ж6p{Pɤ֛gS?QOnCJ:F`e++u 7_{Nz>AwT@zzo}߱"1ٚ<EL]^^^XfQ={aFKN$g=J̊c?! Muɒ"$?:Ikh$ >au4>'x/}W1rne) \\ 0&08,, 1)d*p$kA(I0b6RѺh~|p$-ڄ.UU9 O޽%f(gHq t-o=ۆ Ѩ^ěSzS.].=yU,Lg(Exd._LC:AfQ>RԷᄫ6)lh6ExSBm+QYYaF1&w#t,:6EMgm;%mx|NݺtyPoDT Vk%9S!&)Gh.End <+%Hlr+#pw`e9=-90ղܯ5Lg_Kk,JS}22PBVB cL ΍dtq⤷vc֤s6u݌9?_0n'XtjljIGP8$d2 %)99 6;vpz\w ~TBqC9j&oGbip jDCY Y٭ɜ;Lf~؀C &THC)&I\o\hQvcʷc3H@e)\|ۣQ; FRo996 1rJ!eJL-ٰ}Ԉr4rzPf찯LVzL 0$i ,0FZAʉ:8-zXv;X]So.;RPڊ=N5@Ar3AE7=#N86T[.)Gom k__y/"d2qW*F¢DRFzȓ E Ucknw͛*6j9^لN FVcQN˷= ~G" rkC^/ɂlDC/fL D5:ly>v-k_)ݣq; DYȒ &uU/V< cн~'YhIU2iB*!2ghL`e54%`SnZ쭷:d dsP”ce)L`Z&@~Cl+͟jkw)f2%۬7 %՞&S-۷<̜߸i^PХ"@|GK IYJJLpx?tkVHz5;ܜhQɫD%ڟ2d]`$uO{bDc\U[7~lrwTɲ$dTKV]`e54= t&;?TKKhQ9,a9\)`L v cE3Bq2C_9B#=ۨ0݅@%jOۺwO|߀7@u)Xh"*C\D_',J *e /\„Hjt*KCr#Nb#~,KĆK) .%R0JVka?ry%Y &e+R3spS+rHINg>Nr2*%,GV‚+eL .4kI6_GZߣb|^è`w^ Sf]I(qHۊ>nWO^y5L4!A hݒP08A <#f)ԩ,egebß%ȳ&*4<*Q&r#o֒ m/t$ Y g(4Z>~ۯ U:Z4g9`^I)f/>\3`L & 8Rp^HxdwhtūCW7Ep.}~$9%,O{/ʜWk\p/1PS2ZR\}{KYO()HdI?j3#}8|%ZB W}~gkҗD;'zjEHOVL>Qzi?+F#2 `yۜNM \vZ۳v69$#.GcO2@냢"R2wIo=pW/~EYM v aϬ9}oz[hhg 0&b,V.DW<]_属n&1t$\CF kJi(q syS:ʶFu9 2XY F 0&gTQC&  #|Y hhR-'T\0/IҢ$#SЃXOr"JGq?y-9e-th4B 0&]i@clY l|gΞ='0x U'n.Ě&ڔ9/<片$'9Я 7Xdă&Inʒ_`L`P Qܷ3m>*VJ ΏH-M/&AUv^Y*86xQؖQ[/}~ 2& C%YHd0Bퟃ:>y CI.`Dh n `L JR6 "E2GxHBv$ݭ=(|dL 0!R 0&1A=&_4e0'qq 0sK2&@4`e)F2FӲ[1>ӗ?~s 0&b+K11LH&@d \'1e)wճ#"ۂxjTC-vK7l`z|{XY̽dL D@QQ`ut؀*‚̷O@ jL9aL 0!R 0&i{ {10&]uƭL .`E"eL D;E*Khae5흚ݾ% \gU`L D+KίcL GYYY fܺ[_?lF0L 0&,E3 0&st],Ę@5XX@yDk\wQڹ2~0'BQvzZ}ֶk|5cL 0&,E2 0&pb ѿ7V+:<)v+w`Fhn`L (Ӳ,`l2RE%NT CP-W`a RrL 0x!pw㺥|"qxm1?ͺ?asY&@`e)ZFB-Kܸfl\g}ij I6O2&@.\/`L n(x,ya?,Zj]ks 0&+KQ1 &@PTWYB8uhjҪ ?틝i;bK9`L` 4#gL D9p&ֈfꐳ1(or4O$<-5utw-SW,r`$t:U( t;ɟ= `L (V? 0&P˖jA|]:3:h20hz}}|M@ym߹*[TK5gU ĉx70=-IWN#pN|E1*,`&NċC,2,ƋK3&\Q %/چ땾'k53FL(J'TNW!O%-ZU]w-J Q.zvrTFٹQ5`0' ,eL FW?h?Zgu>Z^4_vCoi=e%UÐ 1Ew`}!n_m,XO}^W9Ð8g;K 崓K9=X<`L`PUۊ}_39CL`XqՊ}[L 0& yWe%SL}j0_.yuz];u_/ :)xđx,f ֥#QNi%N ,s5$I 0&">%.h]v޻ D`~jS?qSAOErjkXlyYBAd#԰ce,ϐSҦ$qD,Pȳ<~ 0&0 ̾{w@qq!G  }f;[ u(V0q8\k},L\`0󒜭41"SUgGr4eO!O~#?`q:ʼnFWս 'f.^.y&]u;6uG.X4'OBoL(?gvf]>TP@@%"&111FFaDh45ryp{X`{;;ϧ{][yU+O8.O8cU;iF? s O 85F  & gUg~K36O$BtEUo[tj|떏K+C0T= @=p?4<0ceC?F?JC,U7@@C 5V|:;:n||bEQ󺱢sq!I7A]9ixT7FƟj6X*  5! kE?O٨ϸh0W.=9Y%HV<}٫rea3g:bAYLΆ`B?m~Z?\!#J&0~7xbVNQn+-4b.ʦժaDKin^ߘ(O9!4 s0v&;9c,ڰdݰO ACi  XefX<>! \,`7b "䈳*W'x4slBq}Q[GK+DXܗA(B fQQ,{uou rZmYFO @Mcx7t7/d B#23o> < <:>M[QU+bQ•CDICR!kbY+KV/#@=-^O}_Bn^E!u%Puz|j=7tΣDmzHOEѢduQvGiE:CKu   XpXby}@Ԁs}3oYo5YFSkè`V:tz(Yb{B;H9]jBkbr 8%Ke+p9䏕էZ񩝯/goT+Om麏\'h۲迯c;ߢQT\ӵ)1kE3a.sm!@9cfOe}%( A@;7na?GI 1?Ս N|5=RǻHDR߾8EB'Ϥ.u(eT[^R-ONc{H9D,I2>5tj˩ݩ99Κ%tzŻFIxZ\5tMVRObwNīFqөSNjB/4:1]?$asu^C^;ssÂfy~ZMPU&eJ<   PSQW]كyL5- @`W;a:S"b~)ډx!MJjT Μ=ɓM':QM>UP__*eKmK]zEg)E3FGȓCy_"f- )d7n(yxYYD ґX=tJ|WD,hJ8O}BdRIa&E'Uo_3jaQ??tو-+΀iTtv!܃.9MoSWRa"=(ԶYȟt:ŧtٍ+(5F|<2W*ڵY5Y""!?yΞ.{)_Ɍte"(`r$ %Aǿ2DODd"h}%e~9=0I,H'/ӻ'3(.bu*1ĔZ)#Ё-%:V)sPfI{y'˟Lp8?X|śg4b؛\<(n9~>CQ-Q x^UA嵞+lӉ'I1֭ b*$߲X/7<(Ip;g58!^doyيҋYFܙ#焸?A]Kb/]b'9"DrJnw Զ/6=DԵ}ņ,؆$n$Kp#йsnOL'9-oY3Z<^V|#L+9PZԈ]ƹDK;.G-芗WIu˰bZXyyNzK",zv&x!C@cQumy ^YQ#"J[UWx:st=}$$Ө_m5iI ]]r\L6>nFWy8 =̢bJ\ v - &]n]ǿ!TB;&SZ#SZOˈ [j3 qWJo2_q EuNFxmS~Mbi{Tx?K'{}I/qv |C@ Hm kMɭwx!ӈj\eK=:\'E/=XDTxZ0KE-/9gMm{d$&йsm1&Ǵ${l+^g< ֕Ո 6 F` EW+s=b{绍4}ԋBswqb9:E{_Ϙҗa?kMUԊIسQÒ5o7Oqm:w}:]q' cWP6=n}*@z5Zu P# 𖧩+زd|QjlߑS 䩭83kZyyG9Ň̓ZӟJސ!l`M`xטyͮ9g0op[]Ey( /eq>!ڪbIItԖ嵴y7r _ꉸ TO;$\:4 3Wq2g\zݥyOf%DD>+2nr!{[r9E-^>ϹrWjC褣M { ˒.@@(}SR>a:|EoHM!@W%i%+ĭweAs: %#i %*Y("j#$OL(}:eKsN(*JZ(Iv#T,Q@%F?Pߓ%ct?O?AV9WR&A@@ @,54a  `7ݻLGRZaC&7()̫+D3?\'n+$"\|晋#b[7p'[Y(KSO(_b:SG? Rk:CVK zY093ګ{?Z۲f%!uu8KO;#:F;(<=.񺗆LOz#(@@8 O:@    O`3dX+1O/dKma8-_(G㺄EQx_ldcad'g?=~ZA8E@,K  PUR;`!נ$-M p*"8<ѸʉSS?xd;QYa8[~zM9x|gpD?ȀK0p   иX0-uL?U'/KSEM nލKW Gh^ Ӽ{\5/6sTO=o׼:im:p~6*Tă4 ӵ(oY6_3.K^"r9m\fnT2}qA!&vot^q.|ɼ%urY~9$hZKecRXmqNҙކ~Z[V8hLT6<:y.;{!}dtƬG珨5KL']>&F&C})ބQwbQ:oWևY W+%PӰ 5nv羥 p.|AN(B=Eb=RGMzY$SĢ, ou0" .<8/ ,,ioY EWDa LqA|/B$FKk|&eiMȉIDAT;ջݫt,J\މ3^Uͺ^۶~&&#p46^i3Tm޾Oj˾zv-|wŷ;X%"Z5$8sy~l|%(os  C`͓q)s.]&Rr)z̜GRkʋ? u*S 'G2I|#kLҥgϡV[dHO)R'.5B/eY+^Hƾ_L_k23yrCdZ">G]ܱiws69/b'>|juC^USRG*J~m gC-|3lYC\u`:TM`m4c)y?&UU?z9ռ[>?i75~o#bQȇ @G",zЩGѱ85BPa^p8 s߻{o1 5Iđ%9$ Y],'m4vԾ}d[l_9|O'~g˪Z8[1KP 2V>s~r-LF2~tYsq]v.7Weo-&GbQ$bITIV^pGr)v"D!bIDR"䞤z%`i-zOX?g+ʿR_QN&C3kry;\MB@"@;q`C}z %ua|5y mVVXW庤M9ڗIŔ,D2 dLx.k,ӟLė};|Z8*7dtbjة=`6'5IsHS۩:eZM; j  e?;rr-=riA9Efu%OHhnAt7"bi˒@)il QC"Js KndZ,GWйӯx*b`+ҭ Bw 'EU[9rZa[Y@?̅y)ZHlkmsΜ;Z! K@@@&iaL8Ì3ol0r6Faƅ,aƆΒ[KęyV銪ձcOzߺK,o r}SGI+&rqܷ9L @@/2y?anZa>hq?{S{Jm.JOSdfl' iA~paWJ"JMfPj[ZCqOSgܺMdSV͠/U,[W+*KLlv vgur%B# 4O[yY?),NԻOq}k}OԉR[g+{!@Q#YwNIf&O֔ :'>}Kʓ]BX:ΗG,ee0o8[7TJH  t}<ѹ/Cl5x-"1z vb PKr4?sh=QN+⽑@@HXnŵkfxxzt*3gu.WޛXMVE ٬w#+Թ} PK<  AI`u7¢x=ig !HYv5|7hOVa:~D&Ȭ0"A (p  *ҟB>4ۤPhT,4_%yzD^7>Т*9yj]Q?GE< s@jKU]fJ@!se*R`j<8vwS`$A ,o響"[6.xi{KWU#{tRN7r @ z @X]מbSRy*!{ @HsPޯVVIHO?عF BRHN4@@"kOu<9)LcLQ")J\0f< T?Yz7hH1`G]4{ec2WT7ā@(X TIz xAJ%T(GYb9L{ H` - Fs,G9AkV+L=XrA@X:GyQ2/L'J |:!l9^[}BY/SUuYN=az .@Q@,5 f<@@ f+̽]S<`Y~nM1tׁ2MgiwrPQ:]QVZQYZă4<g'1̮H{FyگTUy_GuNҕ[󕺏 Pw{2jHч:Ge](}L,V+Թ]+hjKM|!"2Q Kr}x:ԫvMu)waI`%oN0V7UUY H ᾴ]{ǯ~rn~XCAXj/ Uh\+[Gj!~*{%+WUԸ~KNiiMI`31\nsHuRUxusENVOٺʢX׌YQ:5 5O'&:}E}tMa_R7kr9 H!*MԐ-fPq&Ɉw3[!ՇAbᴆu͘E|tGkI/g{9.Cuƒ$/6hŢn;:BRhG@@ L{3o$JdWk9v527W)X|j˙ו=v'M? #zTZO`A˯(RnLi. @ @,E@@ns=[2FG(<"x&e+ٷ),qR5E5ӺiMѨ.g16R^Y8nVH;T-Gn&mC 7K^F%iGb(v\˂(ZP$񬶟e MU{28SX2A5kFX۾ji,0۳jnjs3?2ו/6ecA찰8X-;Fq]n @@@ `>'bWKʽzV),Nrl>&*>+'U%n?εqK)$%+dLv68v'BH-|v :f4:}gU]MIA]AhJӳ>|%CXDѥ|Z/.b-TE,(Y@)fUF"_NNB[h+:Er(?ќ,G9ˈQ,'H9.A>g(jjCGeVCY  @9gMu9Yts7&p},ͲWQ6J2N+YrVT9+6zhԔACU@ L@,ɋF3A@B)TV[EfPkT5[Zud\!?39|>G tV5XU5"v'F\+n@@ @,@e@@@~ lyi@×[pǒXUcYdŲX~Z O2lv2 8kV*)N,`ot%<ΡY9NSJ,|TQ>{шr934s$Q0LIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.svg0000644000175000017500000023512400000000000030045 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-09-29 22:17:07 +0000Canvas 1Layer 1Network NodeCompute NodeOpen vSwitch - High-availability with VRRPComponents and ConnectivityProvider network 1VLAN 1 (untagged)InstanceLinux BridgeqbrDHCP NamespaceqdhcpMetadataProcessvethtapeth0iptablesPorttapVNI 101Provider networkAggregateInternet OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-inttapInterface 3PortqvoPortPatchtunPatchintPortInterface 3PortqvbvethMasterRouter NamespaceqrouterVLAN 1 OVS Provider Bridgebr-providerOVS Integration Bridgebr-intInterface 2Patchint-br-providerPatchphy-br-providerPortInterface 2OVS Tunnel Bridgebr-tunPatchintPatchtunInterface 3PortInterface 3Self-service networkVNI 101Overlay network10.0.1.0/24VNI 101PortPortPortPortInternalTunnel IDInternalTunnel IDInternalVLANNetwork NodeBackupRouter NamespaceqrouterVLAN 1 OVS Provider Bridgebr-providerOVS Integration Bridgebr-intInterface 2Patchint-br-providerPatchphy-br-providerPortInterface 2OVS Tunnel Bridgebr-tunPatchintPatchtunInterface 3PortInterface 3VNI 101PortPortPortPortInternalTunnel IDInternalVLANPhysical Network Infrastructure ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.graffle0000644000175000017500000001444000000000000030621 0ustar00coreycorey00000000000000]mWH< e6=2쒄 Lrw#la# fN-Kc9խVTUWWgw(%K/wn寇?m|~9[<뛽;ͭ8=zp_z/Oǭ+?pNrvYϪ:HV8 iəV OVI|cD~5Z+)չl)_Yf)'R j-c\(!d _*:)\M5FsZŤzjʥLRZ"BYaݨvȮzssa j(gְZ1] @dnmq ^;YNӤQ"cW {AzIr@I3K8nP=kԭtl^ .XF' QL*,QJAWeLXϔyNH-A}}u獂f `g~g`h=Fy*%nPG}eIv/gǠudĵ9AHIN5Oe+;XEjdOl]٪dAlAVVV {V`| 6:1ZX=UqfF[ntLU_=k'TmsYSiꏻ'c"q<q yy=OafWIHM̋'weX)}+Ǎ^DܾΦ9b>m\G f0,l%`t+a;*Vu:ȪQ{o8‚IBQ@Pn}V*m, f bFA khoҨkӞFC@Fyy5<<+b|=O=uUFhKF[uM4|~~1$7 =Ђ0 p!i=.$B.$BR% S6;„nDlS:39fq4CBslY\mX>>˒ϺL |dK/gg?M.ֽm=J*ə&Q0r7D3 l-*j.m=̖D4i+N7&'7-Vl|!K[K.m{6Ѥ>%Kc!+TPvن )1s)1aaa`8@2 ms.pLFZKIrB %PhB2qHm:.M GS[f#IBwȷ9b3Qlz~ǁ;xI s4P90R@Al2TilfgM \ɷ QjrJPNRIe9iSXwR@v LA@($ISi>[S؛eBxM~ͭ\Od; KT:HTPuX ep;S®2X I$a<*3RDX$d sP%[i/C$|^- +*L K\ɚ[|zuR2_Y… {չ[T Tam (-p$R s[vygtKA#{hM3BYGj Zw)'42GHbQ J !mBX\Rwu94en5.9 V"|QS&sܴΔ߭TK%LٸWQQQIAT^j L.>?4.;Oy5mdAUôue\e|u)|7%@uQЌ1Aj :&H͓eNiK7:_6ӦbՉEaɽ^fg$r,,C蔴O&R>Lmn6Tn;/)V%A{4J2a]KNTBB(RQE*rŮfYYYYqX=wypeӕFf?YbcΌL+EЅ.8XX]Uh cDglUw߲2Hk.,rϽj-Ik e'R%3b{Ak-g)Jë ,Ĥւ%qi>$0)R+rX1jWv;%V}ʵ7F ȷ ^ͶB^*5UhT!Q3Ȃ~MG0SҚ?r ʡR|H3Ӡ֣[zt7t=X/G)GG#55jm~>J~Jb9:pU٦/ j[* aQ1̒ǔGl Vkjێ x7MʚNi؞%(wm:C]"dchʴ]{ l[h_ E_[ 4]r>&fZ 9"2" 9xb2mr"0>{`9>\j z[JZ>ّKʄnĵf\k~'..1N'>LLf4LVXn `f5_~~$8 z[ZMMG>عI%e\ 0k, `Wp8ֳz6k\O <7Ka|LdFl3P8e`1-|bSEDTIO瀈kI(D8k˅-Tqy!!{E}HֲJQ[L5X$mj5#$**jG;Vsnxuq9.  6*S lκ]î5-r[Zq0}lɀI1醏t:] LmvQIP ])@0W󼊖:܍UD24CTvjkUM]^xhXS Ta*ȧx0i*,5 L0gj` % #h+h} jSgU: &kN^N'o\DEA@ 0閨 {tj%rTZb2غnAҜZ/ (c$\ y ߂H}%ȎkA`3N!;`M\ 4YCM4 .i(kz_>?ZOazWglH޼Sj(1ZXPMdsg(%t\BG+Wm " ,?WmĴYaE=´tXC-1ju"ΥU2)1kTh2PGj@.8X:+:!!Ե +Q S9\ Ɉ":_kt-k] ZX-03= V~Ө[=8:p+ 73:&icVnJ׸y30󧺈`tηpԱ@JDxS.+NA/ W3t wֳ009Wn\#EImh18k~/z}seу8ꄃnK^zi6ȦO~tdamUV|NvQ7dq(=A,J栆Um@?K7j7(3q^sTaF%0Dynp<-~&=uaFn.~sp=?I7:nMߜ6)$£GFڵduxCeoБ+jǏIH}bKb5T0e>æ7 hy|_H7h<4H615&kԂYnoD -E! QPr n"~/x 3Ss*&qI,_ħ~Pd9 97C5`>:#aՑp'_ q*y~N.TN]Σ坝2;}G_cČugQ:kؿ̫t~`OG4G7o8 lW qIٓN x W.EEŨ47nu0Ozk cdpÃЉwBUjҟzb SdlnT廆A-IΪʪ/+w} "4G@@xR129#X mƟ1; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.png0000644000175000017500000062201500000000000030002 0ustar00coreycorey00000000000000PNG  IHDR^sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgBBG#b^]_ `WTJr{l kT[BGZ&{].$|;;3>̌`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&j% khnܗ^ .τ-D3`0{G}Y|'_J=qZq7#ie7g#6 σ5tdOnǎ-mڥJl3<5ֽ?@34%X'e=wmLT7{UWճqKN_$zD듐=J'x=/ļ>NU<0_Ɠ]215u!Ҥ>O\9yW(e+L!R6꧶2DS[por;8r3R~-soPn{smh>oz~] ~ݙv>:qڞа@[ܲ,(XRGiE;ѫUn~'~(ZlԴf}1xGmUa]ʷi_NGa w3}RSVJd 댸;doOKRSS׋y>}:e7%Ώ ;k{ ;{]MeMq%ýd !_P>"1gV-w\~ݘ*OE:E~{!L3&fdQw2>2U؏v{ 0G3r#O{s8ƸsO 1 uz .,(p׍//dC`FW-3f}}9gTۀ'!:7gy7cR6Mj.;+4HE k>v=۬pC*t4Ux.yLR XN0rfKH(7|iZG;Biֲp*N=AHvw%4y]W[u _Cv/ /q#L(~uyJ5.ƏRms݆E!N\E8#GZ%FZۇ* مdOT"#!31o|Z_/;ʶiӭ~+h(S(c[ {8J>pWC:'oxC{&~ʸP_oAjB!\qk^^7H:^k;s+_?8Q4xoC.>CWGHl^Rs>w }w~_K@y3uYk{;:%Cx|6h+ip~Zۆ4vCyoU` U^.6fC}aS>oNRϋv.S4r'w' ўi0ڽGk4!I3Wc KSCG%hŋ@0 &<{k+͹_Aa}Of ݢ 6Γ܆_MfipAf$d*ijN8v}G(,Z^DM8f}`^;+CހKYgnxtcCy/Zg(h>BI_7cr&_7=7kNqJ+Nqd&2Nv E> h)fP\ kZ 8~?̔[/W5HѩIu]'J(jq3h뺦`S7]FNu0f#egk GLN×qϷf^_q1Z$ib)=f-iԺgڙGD\4FqBGݡQ6Dz6޷~'mOv6MhEsMxy /Z!.~ߺ]oq=?b݋tE1uw_j}v9P ڶ5ZWk:oZpڡNgԍ\1-;{2|>`reVEӞb\,DK8q%$M›A<^?fAҬʆYf_ X Vo!TLM!@J4~I5;H,4>z}i?uI{kJ>KKpoM* ؕ *BuL&D>B`4'h?\ljЩS.' $SdSG9KZvd؅d߾} ;ҵD;i踦?D>cs :BHzVE@ fKzs ɔ4O6#ACt+KAG.S*H͚k)9wJbu1Ŗ0؅dEvxgo2)n [Q1|+&\PmRDt6X47MG{a{5c 7SS 6S`3R,9hCE:9uE)=2Clie\`D'gll2ւhZC%7G!J>QmxW eXr :]%͝E l}E|K~ُim;l䑧ֵx/Մ AV^S2#;tñu@c(iWtZ)tHX'huLiy<-4 AmuݳئpfhN +Z0{𮻊1|Gƣb]kZx/'V_Z &ٗ胥˚5%;ޫް|2V~`wEF }nhH|᭘6 SKJC҈^Ôaq(*q=LCd]| }gw=ɞ&I(|쒥mr)#0U{Q=/>:ƓFIrwj0mݳ)-Nqw[P+_8:N4wtr, rq9D\Y*yw FcVN${>p6ߢqj"X 9bF&H[]կUFN!$LV`}4?:.%X[*4z!ʑ`tzz OU;'{;XS3ܾ<4ȻwhдlyDg9ޑID/I|u<5SCfL !LìH|$ 4dJ?ߒ@d~@GC"ڥTL պ ia$Q<)ڗZ9.rQDc/:GZ%t_<_D*AZebBE6OH)BnNdzh(4Ïq} ]uKʑ% AaLG̘J^PVc-K ~( W?YK}~8m eNP#nBx+.0нI$hIuɰծ♨]# ڵ4A6u2ڍxNdj= =U] rأT*܅;GǾ:Vmk!\lF:hAI=]]b%]HN%VWH#ty d~{=vMK 3Ѥ m= ? ?@[kJBqʏ 8rsfFfVD)|K55 Vp# ԺtT2pnt&q7Za][U =G(':\ic+-;C~:r;= 5XV AoPޓ=b~",x^E e`6F>c}EPrhCA_9v>F=4"k"ZC`,gO-UL5@]4y'{._0v]kk3&R@];ݎlw! `eq:F8ǝ/+ Ϟȫ|(}aWww87>=~bM)ۅg8( ~ǺŊU GvSF] _4CO#%׻ :轶)'6wE}~×~c7 fi?φ0j8/[9\E+\UQbLAh‘tW>,<G !ٟG[[N]f ˃vl <tJ*g5Omqte+ɕ= 4x}h+ٯX4i iԡQyDߠ^ x욍#L%;h2Cyk4L?18rwğa+ ZBM0yUm/Ggq rb6lhIB5U ]n_ms0_+z:ei/b> ճ,n0 h;̜9+0eC.#שbq6ĒѺcڻ)ONi8:ٛEw'Ӵ 8mu/AkysaΊ&ivkh 爐8k +5BxZB(=Wu ˹\Z+>OtnJEĚJ?Ag=U)B#SS8߰,gѹM1 jXӤa]a։z>%fhq'<ޜcb?.!Μ+;ˁ5n h1<ڞg)3  CdLչ,fqNLər0O,-&6Ұx8?V$V™w(sMtC661iJ6-2څz1%xř = 5p ;{&z{jjUG"&3̻4rǎ H4Xr3tmAFn;& M4 Em;|F64i;\;G'D[+L"[OPǹgC̼;䪙*׆Vit=^s=ttǀ6Oc!:̰&AN%Vm `gӶ5qOee#{ju$U%_,q0c i++;4hHwtsCwwL[hmӏ>Y=+yl" i]M40c'\x ~~t~ÇA?h/ӷ9م(q"5#p?M(",s.Rv_l30QW{f!}U%M\7O9i 1o³9$kO&mByMx-5JU\Q'l8"NJuw( yXXUwZ]o/; ױ=I8;O{j Y˻dV|[޴&17, g{6Lg }6x>ڮ!_.4r[>W7mU_}Xck1qZލŚG.Q4\cZX3ġ4eKXۀzVN Th蹂-q}IHUP|vrOr5ظ?pYނ@.P Z7@0sMR-]9:-" jpqyйbJܡ\րԈքs!75E6%;+:sX}Ύa% cS@wO[:nlGT6b+~V==tm\'@`܃p'6D}e}5L괸6q ŗcDMGYȎLXFN_k4ѼKP7q/5F@fcXkBK&N $=?L'Is@0вCR]~V_!aطohj Jk_JAطg…g͈^40&;roG)2}AcOC4go0dlmBrsi.[K$Zbs!|Ll,Or:[M%KS1@`>i$|as@Ȥ !ar@k/Y@`ݦ\ [= }`V;\qYb734S ;٠IawtOͬ\& u&8Ƭ|RC# u\ZN9F:6 u@c.{74B8$#t~]~l +h0+$Y9;L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&hL(;&xNݠo웺kcǖ%m;K5'YbL 45,(72&091!nG{a561K5M<랞qfm1Isv*0a{(`-̙3.~0ԙeI&&ʵTJS u mJR}u})ZhVR C+ˆ#[^)S·J\;#a_iO7;&@K P\F&"RƈpC*my_t|m1<.3gQ?lLbc!iU 32B~6}Rv@H&?0XA|6y98).4l'Ѥ];ı^ +(P//!HPt1R`͝k{ stȵ]RɳR)PKe%?=/5Aϯ˅E'|1~q?l99Uj@MKωW)!xcޞ[򺕔aq%mg ṷvHq!&wnǽ9+ 8DX 3&Ь ܬ @m\~4W~\J<i,9?\.&ik 9C6ZG5r4Vv3};5%ebhu»hƴOI';\jo<&зxgg>}Aj?>5hXPnʥbL J]\k U03A 0X9r^Uyr9UEƒ5Dft8L )m 1 6c)ڮ?e7al BP~gĉG mZ/Ur^v,g=g$b0)Ūt%\(GיhFYV+ 0h fB`4Çz^&-m1{“&saq!Y=YpX<<,0N)LQE6rMO|9ްu(b8M>q{|dL Qn edLF=]/hfq;S-N}_*/Oz,L+&U]鞻6fra<kW`d,A'KnB:vr\=ȗWu6yC~LvБ\lik a@c.[uKuy&iB&@!S\P&" 2V@:h)e_(\\3* qm_"ߛ}&տ >C`< +i|ky?/5ۑDOQnZKǎ-{s;_=ںG&@s';&wثw׵>ӄbSv)?~g}5lck^?`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&hdC)TWID5D`PBJ!*xZt}s;ts@ }TeB_MB[Pr"$eT) 4-hBPN{M) )OOW=k˚Vij-S3bL 0d!}TB w):m)G{Qk?mĿ6 02QbCw5|.|9TӺTӪ/-`L"А}TmJ~f_u?']{/y T|8r,L 0d >*neTJ~f_ s cL,3y>M ۩4r9`GiMAdn!\]a!n9Hf{y 3drP޸ ?`͊@ʴ9q6̒fE @h4n\&p'OJ:2&C }T\eZGa@ @9ҏM#˕v) 0&P'-("$T"@#AEz ۩&PIE&@ijL;f"Nr0&}j$ϭN%}q`q#>ނ2J՗vDVHz SM8L 0gof%u:(o< 0EGOzTWg 0&W-(cv(ٮ#cL ݧwN%}q`q%>*q-G`L 0& z( ;uI@l(ӏ󍕁ٙ91xOX6.h 5/ۍަb_) Lަ ߤN.nPwm)>2}[BC˕g|Bm=yqn œt_Y7.BuiյCn<ڼP8O6)tp!5ܬozt9>.4;+~߱)eG9* Py0lwӝi*`{O/vq%v¹~'ܹɿ_:L3TëHo.'?48cʋYeP9u@p[ֺa|qw@h+R~œhB+ ݙdKХXʇkFfYs+b0@?"ŏlGhx\]7hl~u'0h2&{T!wVa8PvE̜3}Kg-X-f͟2o|Bgʥ(]GÁ|q3w_M [vN®>_ﱮLb]//jWS+v8hQ[{:Fk&{d ˖qԍ_CX]Bh> f,h`2|^r:_~OWzJBrRSfglzGGFtPy CV*&҇Mlڻ@4֯%Cv 5n2r=Cbף?ɼ!:缠\4S xC:9QB]tWptaml,SȎ 0d'` ٥Bt?vibeOW@h]C hye5@W)C 6p]ʵցH\ Vc?H@US; φhܲL=gL (G"^w@IDAT Ä̜C8#V{d?5f?ӪCVYg+#,sӕ0|~}*81$zg语Xwh.?t窓祭+O]EG? U.T?桾{8]TtAhD{lמ?r8?=;=ykc;/SPKtKnDA_a 0#@|w1 ?G?]@8Nm4 `Lid?[s'Df msx7H_w_+㭅DrwlMq8?.^&|`bS谆IuXa7` O RgΔ'ItC!||vۜ@9 Ѥ\R2]U"f@~p94ن2&*`U 3 eL@˥`L MoJy=iHP-3GL1 īgӋƬEN 0&`L i UcL 0&hLEc紙`L Z- $+f8_L 0&Ь {-A`IJM/b8[L 0&`Kϩ3&`L $)b8[L 0&`Kϩ3&`L $)b8[L 0&`Kϩ3&`L $)b8[L 0&`Kϩ3&`L $)&9S WI.h-%NdPjhZ K#溍|ćNb>~4!eN ʡ9>n.T5角}hRtj&McuCA~kjtlZ\-N;^v%exx3sem{ex 6 uzjNG$,FyHul4nlTSs-S@S vj)JSsȀS[9PwM%J㠤\P.]{_i[= Z`M,щo޻SKSKghL2 ۩JО߆wޜO[+~㊳ZdI>[6k :&¡1])lϮێ>v1CedD` I{.VܺwAW쯾2mv!xAIp^wsݷ;])OjT Q\~Ǐ܉c^| 4:~ >ī7%Pdd %:t~ڸr ӽ.0g5g9ݷt8\_3=w>8Ym#Dz>NID3zvv~R!j*ڧ >3~v}HfAYkTh@磮;x⪷&Nċ\&_z#u3mǡZJM$@O.z=v*Z?S{} 컹w; j4ڇ32:<-KGѾciDDa u-~AnDz>1%Q25`XSHޯ^:)nF\EP{OmCbPL nݎG C ʚy0/>}Yq#5CCO8F dݘu{U&$D> )k#ޣV^T sxɩG8v15fTp@$dhA"rܶ^lU՝#mڵXH[ƲW溭{U&$d= )c#5aIQWN՟R}A8L$nQP֮eaXg;QOױG#hi6QX[U􏋏*'UƭO) hާ.N5p8RO?&cT[~&v_cɶEjr*m&B$7Vm[o|xzpHѷ{gZ._URػD=ڛY~b+ tIGn'"ß r-F I[|;&TҒCƬlcҌ٘CR ʨ~^cn&7 EOy(.-M!Y|<qቇJO>pHq%VpZNgd/h7 k(u5zݢl(iOg&Ŧix?ή~/}g9x'vW`c6EqMԍE>zcp#7#2Řob! {vh+"pTyIR>n;+2?N!'M6F۴M?{!xa~7J< X:'U Wi}"nuV#`hql/(M&3PA(]}H.geԄlr5jt*RRbvܼmi޷?]DNDƿw_L3Z[dlIPoȎҊkݒy iχfAbZq4rףHmEJĹwI~P2 Jݯ~/2/8!zCH![״giEScSu-H## >%+WmOV)۞G (Q8͵#vVek7r{F" )x]lٶڶCX4 $d;yo-c8Tn%4}lc$(@%T$9ҋN>\Nk%bi|ɩG!<I1ڤ%$/|HWKHfӵ0zθҠ-ه̑%ڷm VPpY{+Zfqܵh|DFxj7Sx5zƚ[lB.OlnC*ndQI,//ذe;aQ琄G_TpgCT>Õޣox beSHb y4ߤrR}шa q4 \=hek&UKl-B2}^8Aūoh g!yo!>Vٰ\ӎ2MNɌ '.>v{G)3 m}jhwӅ3]׳FhHg}IotAق_mȜkM;*2qGV).ӿ5QA.~Yn6>}{)^;S8r) O" ю>x{u>oyngiPBHnR6wW?T1Tr%DX<kD\Zp-߫6]>r5-Ͽ?Gĭ*zv$^hLlپKqFu=S9D p}l P=N}H&AYl=0?(ii[f {V ظuXzqޘg9VnmYlj#չř 41>hK(C5{o#٧/iSKL_%G6ʴ9!6Mjހz!va#KrR؇m5ₓ3WuBX\Y. qCD*-ِJS=DλάpCk6<)$ת pTMU .z>Ga=tL3o, ,'B X[Bv+* `%f_1 [zլ[dgbf6'= FXQ(B_H  H} Jcۜs$lIL&!>c,@=XV}!ڣadsQ 4=_E i0ɝqPA#`D} Sк /9{=>w31zs$29 s.;?VMyw}V:Y;/V3!Vw~/|Q,BV|Y[n`A]'sa{a0#iɵmS9FZrC0M@(KPgGt8G,Б]0ıH #h,١]~ d>~6iXVs` 6곎5h7{_,>4!xԷU(YˑbhmbrrXZaUñAۇ4kYKE8ء Fk)vGBG`kwߘWq0AǤ%\Dx}=iJG6Qv}1_O _1kiWF[3 6`daz}:yOi-Ӱj4L@phd、a| CxY:;A.ڮsymi}&| ݶ3h7Vߋt XS 9S6A.Ȏy_kHDF(ј oImU)v)؞_6d>l/h*4ˡBrh8#/~X#_n 8{%ki2UKBcLvQ2zi2&ٚ/+(A%zhrĐ0iirM#ӛϱC#eyjsǴy߹#1Hܷ[aFo!f>uYaTIc ՝= R9B/1_s*iםc,$wU|x=uKAЪ/m0~M\-vxİ}]ieR'æ5**_dA1 k|IwwO{Íإ1>ik~g΄&6!9MI!ԀъT8z>F`laMZW텏5Zro "[x_,U\h9ig_uXb}8jKcm3sm) x3Ҹgȑ0ZXϛu{t(yO#|8(")KbqdybS+2u^܀R4{$$> $ ^Sߒyll&G~cηG<\Ks >$\?~ZQ1Άp-aSN4f҄L]|U&22`.jAKџo95jhrj;lXec\z$g:=B,H*׿sde3-q )d~&\Uޅ]!M0qI-x#:O'A:b`tVqٳC2q}K/(k 94h)BXel$i NK1g i|i\`:H2ј_Cʭ6*{b")Ά7\ލX 7?؃c`(^j[hJOҴJzè ~$EYlhIl[k:$\x3ECn4FWbu5?4BBiFQC?W+XB1pjV뉇k:4 AX.R -t!^<>pjoG&K\;&$3hw ѧ^>6TUc13 ӎ[v;nپ4udl}nѓeenM l?mL<6)S-) 8}6;i<-cLྥ:cAf>| $S155M$&CIBlH=씫ۨ֙iﻮ>4=WEJZlke X `M-M/"FR)QdL 0 :[$ g^eNġ3I#MOuJ$Ml(ń̀:qH"V 55tÌ [Qe؎]@_dl`k `_*-;KSVBWq9jyMs4F>V3 .J'{H3V?2&@<4[AD13[*+چ-k]¾O @FP&䤹Z9mjn_ ⠭-/E4=tbM s~*CiŸՔ;x>߮r 5> x{|hĚƖ;˛PGEкk _biϱ87*%h0u.\bޝ#'-G;¡֏֣0Ř89XjZ˔&|2WlU􆶆@;$~R+|dL)2IYZ,6G~,q_&@V1${n뙿a3GZ#w\q,"l^Ei`,t9BxlTև9Xs7R:pS8斓tZNBDk@M[5H![/=3iRZN=%Uڒ}l{F&7;7?o{9-̅ A;&`L >MP}@Ѧ}rlI.Z&G#K8%bҲ uəmGBp4|_w>>Ph+\觿Vt ,лeJN%7SP B>-ɞe`͜@]6 >mۋmgn[{bW3r?VACif?z$\B1upl'֡܊G2{o|2 . Z \M2+|_1|bK}8N~9vB#K/K ~xbSsE[-EoŦ,!pB /a}<ήzw-ޛl\0 0` BBB -O$:fwl\*wM]y^ɒZI~̼̼;gwo0Tk=EM53ZHC9{%"ah\"?zUsˑzw[$W52u^N^`D3; r2n2V)p=nw^֪`&wdK~ e=:)7 } uq%m.R D>j hVWm+|EBw(XRۨu =qK 4ݶ{0ზRH7OEJj zIShƺ4 `hQ֊sʲ8p@]}媋҅SMB ]Z̬V8^ =PQfYX(M a嘥2՝ӕ_r( TR◵cwQo[W,qQzdx~yf\5<[)_ߙٗ<U.ZZ -ta҈]UXt_9kfRSh2ǻiA 7 Z;Chwz,$i$YoJzDx$Um5n+%n*jG_>WŤĻig |}vQU&S&Vn>zrQOV_ ]eFml؍A }YWmR]}Ͷ!O8Z%+`=>ݬS34 `#DJuԱM+p)D0X5n^\ൢ"dU)'s,6>]6#}n2̇ d",Wk/#Iݿ}Y~eyV݅rTBtM1Pt_Q %~eTñzz%5kp$̑@8"Nć*I>J뤶m^ط\1ѢHBÇϪͻY>Zt"z"ĹXhiѷ'VtM:Mfݶ(o B wWk UƪT>C'3po18SccZ|չ 6 @~B?9XSu_Odo}f~fOA!D*Lş{g|N<~F:iGC|q|30-\۳~wĉ+,1Uė87[&SStNoqws^i׆T*+tщnnm:w7K%u-6_i.3z (EԞmߘVl0 Fy" ,%:kG<.3|y|^be∾1"&M8^9kiǨ$ZRP[16eزs_1*2'(V'>"-t՘S|8idl `uO(2S$^^1r=>coz9DsEX6)DjBǯJRq,w8Gjû塩ӷ>,k'K֫A|#]Jӄ94-HyZɯbR+3:ܝ>LyWܱs 'uC7aY"C;N}"g߱">I2q#~ՈA 8~r&I /kMLw/ o>>|? Cl a[;|{FE* PrјAmG_n g,2 IyuD%f!r_Aw9qpŸii?2lÈ/ċ/Yu#@IamJѸhL=D|~  @K92kEoznh*neB5Gd-!dى$L-Ve¤Ie&An$O>{sZ㊶ɿXRRqZ'bh,A 4 LPϿ~#LDlSmmaIrII5wr%H nGNK6 #m[!n"N5N$yT2J%O5IȶQew_ۮbXbKNw<~|&01@X'g|Y_ 9"=Qe=M1E].n[I%Oqhmo-%k7GIqͅ(*h˄&˴FsVdMZy3ڷyrʸ̉[O?e |5rTt 3pq׎1JeܐjP_D}qhNDY_ `p)${Zۅ>Wb㫮vQqb33@'G$sݣq Uݦ@]J^AL5ucCB\ B7d3liW|msI5}=sL3=?R zͯA D4~B} #f(e*$7W)-TxqDžd(#9I91ip-,՛wc/@x94y U԰4 `0 En$ps 8?HMavN$˔qS{GNo͓H9=&W>S/8T[rˇa=NIʔe,YM!E`^RmKWRZ2DUp,ʁfͺ(~:?f}BA `oXoٴ󠔀Bgy`IzA̛Z Qn+{\*dѸɐ +A `0vhjF!}dʉ69,k2j_׺UD!EdN;$鶎60Ŏ#%mzX;3sC@A `0ɱwBa`Lrw%U[+%(uVQ$HU~5IaqJ9rB*´մأHGA1gLC gگvm:n>PgLTwM4E A"[;)1QLR;|z[B\t#a`GI $/%ဴ7YGa0UӚ0}]k̈e0 \S 3پOzv:=lm1^w]ai r{ uF{Xwyjږm?OӜA `0Z4Erq.|Rv7[]Ni |8T^9tBv8V[>_=/W(;0]}ѽWYOۜA `0@S[Wm-655EntKN+'{ʫs#G1iѹ} M;Ϸ%ezy{z/>o@\_WmrQnQgf?SΤ.nS~hzTy丳gQE# Gs'T--ZUd^@8E}1/.X5P.3@hciVP:ș1=!w~zD㯧Пrs_?fcZaA |h.Dq/vx?1,b0E~ a]vFFؽ(i\^RbF|C{]f|/wPx?o||7{Ť<)4͚$3 Su|zzGyd ue0p&ʎ[$>W_hEEf/ĉx_MΝpt̜$N; r{ٓ$ۺG."Bps\]D۬kdEv٫Pw*~fT_OQScS8?wfoAE J1u11qƾ劉Ѐۍu>'E:tn}Qg~ hw6ؠBLwR[K6"Qf S#@R&[sWHII!#75&詚:Si8"\Kg?A9Kޓ+'_{TϬ/ᨐHb4Ć@ h$uܴi$ű_urnX=.LIbDqXf!ѕ0B?r2Hڤ%FO(! S,S ǔuRS$~:4ƑCj #N|b :U4>*511f{gK_@D?ľ[ۏ1h˚^r@?r摡kMv)kr!"޾_E0w_em7,q: S#PU GcOnv/(zjc~g1[14kuKC'Ԁj:lN=!%ҫ9_MH5lN=Uen'^*;Mb"QdD r|hzBjۖ]dmrXKEK˟eH=V90hz<'*{S^o9gȠeyC$&*BɒHuD O*^H*pe{υ+!U!WNj)bG/G3f:d&# \ BT KT&kT 1 Jr/O8%۟FO5JG}l0VJ蘕f]y=\}D֔XΫ/P_.߬N ڷ=,ȕ ͹+e=&#G6Yq7B.#x.AµҵCzPN*6Fzʻ-H%=ISl+CvQiZ jc~ƹXסn$N\̸d"Lu?h0DyeYV=\Wm-qt5lo<g/~GM/ە1"炵 *r)| 1%!-!4߹ gZEIIԙ|:E;##:_3IR?Kq3 D7մAPɭ٦"ӸSJwL(}vv3QAhukRUH#0KqT5oD稈+cejiA%ɽ(TOGnBDC ?VA՟9[~]>2<'TX 1g ա[F7oP7ef0+1U%۲;d6 &5Kn:^S+NQ@}2,'+n SuВW 5?.]76u -$&F~4k݅x:8ć|vܵiU ɣt'3?k-$ym8rTԋrLJӭqё|7vzAM=04:~B!,?RO5q-XIѮi9ffPF|*ENOLd. aX So_Dkʹ3K|A.iyPf,KΒ1zɍƫOtɘ:9yJq]>ռJ ++5V?HCl|lc}*eʵX~&@ԹM@@3y\ٱNP%jt٩*Z?qnU : @.$s!<&Ir#;pbuBiU2n5Ao5)DN^|q :7L "+~m-e"ZbY ,ʏ}s͗M@X'_ɿ_#YߔUvʫI%q2}h?u "Re2D|`lD`e7|}(կv̏~)a}7TX|U 9QfݚL4'Fc[44S@M9PS@mh&  0CۜsLf!>3&5 m㚬LZmI61騨(/q֘v]BUYEOBp ]z#?,s ~_؄\HP,;`B|)ѦN 4+7ROF4Crr)0|ק_#S{h8̷K:^[l.-Ec܍{ܮ1c:6{$ro԰rvyr#ЛJ>YN`2 ψA5!hxGB}{wW|y뚣)*sS/V]-H4H29%EhvE9H}Gs&׌+3Fɇ'~+xM7DtMp,V;"UF9^N^`F:Ad b43ƅ/0]>c[s2 `֬9>|>_񸦣6q5a^*>GNoS3KOCjȮCaptXoG}£7!s"k_8΢}J"2uw# vB{OĔ(4Pa*.)/rO^DGc [S+V7GdRTG!ʹ%$y=vCR"X 'J|sߵ z^QQ)8K I~~V3*}{ruwĜp[ ǩtX_9.Oie˥ $bDHP.pW{_6TF6nc5\U!#<>OO;`fKtj!GV}"`Y85m})S\c̾>;-^`IRC(߹|A5-D͹$]4dnQ};%22Rxڣ m!{9'/3*o!'.)+ x0_zڧ mu:m`4beLtՉ?IiN "U)c5[ѫRSj92ڞUV*/@[X2Lئ%<ÑÛf$r[>q#}SdYmd-9F  E }ϖ7aĺm9HLM9@}/3.98kjߛ]#Čm$9!`eENUkA3jU|qv3̲AE#FLվbHWFТ _Q r1?!>^%AԆ o-BIE4nZ8˺6gd[0pyEƹ䜤abb'īeJ|i`Fk_\j5>3P7}( J^2-4+PBuٺpXcaڍtNAˡlKE錐~x T(6;?f"\tweQrr\]ѿ/Uy~H.j|~$E"f%tAr#>kqWa0rxy~|x܃F -NI5䷞ҋ~v \51/8/2@OħOWGd;Ɂ/ ~IH&/ȭM8֋/݅ DWe!;zq2rO$%a"YĕVx8"jpDiwBm5Atnk?q} m Hat3-Rj&c`k|FiRj$z37 9K7`F[_Ā" t3ڏkY &!Kd q&?CE]LjA"0bf Ox\erRg+7dS>2"E)]TSId>YRW:0ϓo0c)f(R#R=[b"8 p/{QI_Y(eL(&'g6EȬtä :14&=XܚI1ϑ ^jU1`ED ]foÊ9UuA9#KUVFd8v_/@AӾ|CPkUvJ%vzLZ Dc@Zll+-٥O/QZ˧v ^:$ț]r(B8N"vFC ˕gO5ڱ:z`?"}(,̻ֈU;?|$:0=;mؑ/_5CC?(|b -.O'|>fnh,mT@8JzvFԟTIt-|S1>D<&’M@3&? l0  v:VR;M2g >׎.KHğ| Gh$_I $qwz Fyd5d}7h\ҟw6زpT,Y8ST I_p p<'zml#Q2!W/[3gzG|^|kӪܪe&+,\'|m N0Wk85Q9 -¢'K1/EJKrw~ޯ#'*F3-6%Nڴ |fӸۺ*`u]C\PTTPxkI取w\}a^I ɝZF9\&QU$i/Q*h eŏmC8_c9μS?ˎDMk.cط\1,0frHrQU5٭,&( Q޲iQ&675B:v`IIlZIWmedo}yOX.eHE 4g~ұ*~7. } 3MCDy^f,ۻY>zOnT/AʧIDkEWgY[I I7 `Xɱ&z[*c"P/^l]c"ml<GdqLؗU^f颗V/^rkkkkCB",0%v8+.)8m<‡R=kȵ+i~.%CҮ]JKȄ~ۉ;~ l `0 !lZ_m5ZB=塡r(k2Ir&$džK[si]!\01&FRRH/3RW]߷-kϫ:gQ$;3b8#֩G⼣ǰK>&^NVKyA$L#޺뷗*/6!u@}7%.xGNvC(O:yo#hGyϤ1r5#,y. Ӕr*{ɳwff.Lg;&c@H`F208KJO Y5mISnhӶ`Tןzfjg9mYH>eMUL>7i,:~Y6SڤܚtZ ?ihH9C?ȶ]/o 1mk2>$tK"׎llS  )tlS=igYx`eE|t׷/^$P_>䔟vd)WMC-E#u@@cK 1ļd_nmG?pķe8y_g7kr 7B]Ԯݏ6܆ԓ%9вCCiaM"ǘ&m'q9*)' ~̚(yknGSYeUKv/§<;:< ˲PK8eϰWnDIo 'Fd^a_:a|`nof( ô-}fÍdQSb_{9,N)^$Y24)_AE$$%{o↨!S&KORyp6^7T‰(@;vYujpP!TS]A=~WwNWiUi!wވYṽHgn5 Ǭ{{R㜲ua V~ 4\}M9h4v~{5PMێ/yl{QE2:U~88t^~$롚Q9Jĉ#[f) FV墢|2 Ij-ʬdG+J  OT귿Xz[zo et:|aK? O?l՗دؿj8ZOQS꧆fl  |&}$"[<,d|·tQmu/`]qHfG~|H1GY+nK^ <2-6dߵUwYzoP\Aq޴~֚% 6;.oLKk$p>`8v Q!)V:ٲ_X>ksV}2sVJ|iQ6u_| ˪O=u&`㯧Smm-~Oom^[7f=j.Vz]19uTw 4n1ϐ8v=܀۸<>N0JhIq++eH'/IxڣpS_zQ߳ p~_=u+6)wyl~[-f%eOԎGǮvMҧ=fp/k(KP\`QVqW-ƺeW̹o(#EَQ#V&1cc.ZD%@br/.[2*D̓R j,b0N2C1J@F#  ĕgxi'`fÇ` SFOUL=[b_W|n~\Yk- Ǝb|̼3}[";'n?1oBKv9x LF S&|Y~&wZ4U(d@ Z3iaP֡LlZ|˄]Pg&o'F)VLdd{3}.31~xڕ{˦E>=PcIoPt Ce[:F@z> :2`Dzn}Kh vd9Jus[}\ܮ₂{mٲmÚ(oW%dS|+5Z~}bYVϪ BP<$QH":2JcK JRPpErDQ8^zo(o|=cIiUAISƚQ~BD%O>Б=0 6!mtllr3": G!% 1z9$sm\e%Enߚem6hJ tKt;#N9,"bU#a,{:b{O:/F>d_#Q [m4@Bٮô*3$՜(ZS`|@Fmxi^^ :_TX/MK< p5$|0qpFx?s4!Ď@4!i2d⌁fXق?\M0rr: KW7ጘ;϶]?cmWe.il#8OY~JO.,+~;_Õ(TJWbQ &e-| IF[uY5*r2'%' I2 &(,qvX'|nđKN8gט1{\є$$~BzE8Y(((\.XI=l\Sk;i\h!;B!.VM$NX_aI'~% f {P<,<6?0` 0pCԛOe=g՚7OpB>ɈsTln|jッwL"?deż:6yCQ[#b/IX0XWxnE?~P,_[v!1o=mO0̂J5 $S \f|XE@25rK˴S4FLMUeZ9ps;I21Pt {o:FcRf#Ps*Y=xEF o$?z W}Y G>8Oo$0y;Xm gsWjCKm9ڗ XN~je' X@ÈsΉlIƾ>)+ņAZ aHt8AZ d(ӏ`U&6j-N2U1BAřPpcI=ZQlOD5DB#]] Z7i3-3M t}K@;[짽{KV3XpG$OP. ۾m6r=ݍقѨj$ɣ圖QvmE'rXeU .Q( FN+ssimFe@zwZzH@rD[BU>A)+ W /Iv(xo,;BA¬A5v!X"R?9SNP$T,F?USe"*m%0c!q n4>MLL|nitL OҠlu;T0\2ϝ7~RJb̉XOX /2'>|[.4'VSvy/q[E?8ѓÜ& ԐeS??VS~DUºLl>zf2R?9APh=ĹMYqנן' |փawzz']>e$͖싌.`i&,d?︎ɶeߊY'iKeK'z]m'NHT/8mo͘}id^ܫP@;##ZV&$ՆVC,h\79~rDr8O8v!T s1413-˃V s@Cs$##!/'xM몎CY뙙T5\TԾ,4f۲|er_A кgK[B/ms,k,ɯ\^7#|Y'`rO1-o?QntYG:G1X) `0@ [:Ǿ÷a-ײc2>.ZثW۽淥 NW3V/jnrG9.)e|j[ZZ|s=b.n22* yLA jK~z-АP;/?ffXI2}Kfe<}drck̙޾gvPlQxn A X UUo9F:?.ٰD6I:쌒i==Uu_pF;Qe+|R5N;nW/A `07](oypSS}Yd?q},EⰮ5co L\X R@MSA `0=qGܟd<_K6DSgeOW@{ W4_[ f[sE_w[g!ʁ@e4Pqlק ]d A vK2s@KcP'˖Ġ܀ =6o,yv#&C:d2-v.~O]^YR%K}H@jjEc{p|뉎 5l17D9(6 v/"?b*BŅQ23w߃&B[{Nls0Hii,XE.J$16Fb$UH,H:!%u+Ec(msߟnk/lIIb"#ҕmWg8*@@#Pγ$JOuY0}3O` vQ?;'nM‚hθ8,žȘ}cbci.4ZTR^9Wd8_<~U~y~ɯg\_aB Y+aN1Tq(^둹n~8mGA,M?eMer 2u#Ub؜V-ťeeaY5G>XZe)dH.p%wkq/+qvzz } qFOxQ(aؿ#oI)mNͩ QIlر_Vnܙcsx>Cݾ^S;E/G5_ QZAڗ0ZxUT ; vF>CrKYY"sncgZ`Tl]մk1p2}H,r/&_ߏtNco>WK6Z)odʳ0ijm%ҿpkTy@J߽g<*;|OOx Բ{Xb3mE&GBsl c?uC>_ܫÝK?sߛ\ 3e4>= -wa36M]RRZ*_(ʒk3 I> |{Fy q5Ro?Hӣr'Hr 2 "ێw䱅E>Nݑ1u ޏ3cv}EbBP`eo`q8nzb@TG9+u :{q(*MbIs8C ]`d8Āo;1n. ZN:fŢLw Mm#soap!q_{C44D۬kdEQdk=q?p8CҾ -s[wc(Kd!zPxgC^&Q+}}}/.!Wӿs4fĐ9q.>ּ[oy"ɬuxP[.F5rV:lޫ1@nʨ|Yn+!{5x$ݢy pHcPz#@R&[sWHII!C1u11qƾ劉+mhT&+/+wŇ-ύOQke|Nt =؏9+[?{ZKNEw֯-4?6(^mlbdQo{#,y hSF}@9\o1AӅ[]O/7~lAbDqXfʕ07E2o&q${ˁ;IvKrp3;|dtɌ+pelZ"9ӊap%%mP J?;%N5F,~9Be_O4vmo7,hɨJ7P֞޲r65tRe(h*N X *TFlgU.wĪ&# \ BT KT9fF+(Qjoʵ^LeU{NWrQfA#@-*+^ϕ/v2@W#fdjd_,6}m?۱a]s=acΟ10Z7(qw^ dfTS{;ˁ($T_]\E8sĪܧa.z1s/Qvc_rQf2I6x#d&Cmr퐪f\b%L&8ɵmZ9 bZH@IDAT\y}C|_"1¾>9y:xQ 8ajDW{NZ fןWm?WC2}Z݋>@bꇀ3gW[^Pq {WJ2.H`QFH8fK@=L$0א8236HjĖ'Ό{LƳ0 AF^/.>Ȭv|8xL`qvc} IPq`bm WX#XlGÌuKKJDXeOл_Vd$a*!XzT ʥ2W) i-,V8o#5"@:a2Z+e]*%䪤!$;DyĀduJr匌u(ᯪ\2Ygf #@L݌j|}3M'Wā 4*/lC9N(#?R2.1RKt[IC<\> bn `tXVBLdd{P2l8Hn>'ͻeզ]:}a.[ 9OX\,NJNq';2 .<>t8〭&|cߣ\6>uM#pUyM`uGN`~ӝTwG0hV6Xdo]*Me{(eGY^B=` e/$HxgS͡˫}ݲ;`Ӈ[(yF8@Ƌlp ̢Ln̈ (~}1 A gfcQRmYVC?^Vf/Y'M&Os|RuCڄ*$yCbrUiBMȴv+1ht9B?4[ QdҘA%'W] ˻dvh@Xt7 B4'׉(}3c[CWg/?{Wqv{s{ ƅb @(!)$ғBI(۸mɽٲoVZvZIs}f{9s; }qݱjo5u4%/PF&N^e.]#"y ң]PE%`ѽwXL-WyLA)տ>nVtk/a%~2rV߿Pk2P.%wO-~kJ7Jժ:LpCuN&6KUJqDn?DuhSdہcy5 ,VqĹKj2)q`eeVP5?@z|>ܫ,Z X4/꫏8'o_QQ i^J$ytx?Z[i* ϴb@ٹ8zV=nE6͇:Y`QN_Tg/Ȭ 6OϼޘmTK :w{d ΎSFm_LMBO՗jy.,0 tff]7 V˿]@f+g?S#FbB΀Vˌ~R\0>e]\>ԺFұur> V3Cn8\V,zJ۬TR@A_#Gn[+w?X&sNӒj SvȖc+mDD[ZT s+wwuGȃƫmW殒.ZɃ\'Li=p59wB+2@ 6ى2QK2m@wQ3$_eyK=E(ܓngltmJKyIOMR1ȉ|Ayq3,Geirߠ|LO`ې7iz_edHβ~Z6a{hnzW 3eػ7J]b-*w%]оsށp:,[ԔN3kK{7(wk od;9R4+8xk˄[m͆츴;Ipʏx&y)гPh SJrbu$P ^ALOyx߮x^#F28@ 8;?sIYtCtGW>ѰNo(/ٰ|$ɴlE ~;cg){>qD_ec{ui>ugEP裬.&÷NPƪ\uӏ0np/ -{$vӵvGIKNDg"˭`:`zLMd:ٺϥɭ~]UG@_ Ǽ[y&PW sҞFٔDYx;C .p }6Cڭ쬕c(۬kx}Ϩќyr#eDn%tejDUz`9wO8.ILT> U^ѡJؤ *.gEំבwz=SqZa/<gJa?.Y)ê$W% ><򧷗7>Rbkv}ԇ$]۹>:BɻR>ܩ HYmn܏;]28c[;IZrZﳉuCg@켫ݜ(,ߊLQb9[cż;U{p>mAZ~a1 kв5zR$0+=I1?ХZhѥi߲>~CȎQ 3:-YZ=HG;:A[>q]^Yh9lW}4Wۥ;=G{ϸa UwPlk:~h/H{}w̎ϞJ;zE2LW os |e,DT WE+x t&>HuW|FV۷JSk[u[ 1zD'>[#AgOܥ~~|u5sZ}iNeJb)+bp󨚚q#UPC,/-R"(WSY^av`0w;У4uZZZ4 AO8DݹmL?!?[v)-yu8ů^Y aᢜ:_ν:z߿edKȿr%}Y YKnP] 0wF`Z9Snث(tam_z 0gQeWB xNbFzkȎC'U' !{dž8g⚡y] ZTX?,$)خ9Z_^e/MuVuZqzÕ{}W51A7^;X}-|I kĦoLxZ6‡j&Mrk(􃿸ؙaր|{ztF R_NQКeB2)RhM{F×p$2h:w߾W]K;gfy>m'<6Kp]M 󭰔oP >3=m(2|*]rzc刢W*+VaKĩySof\SU RYT& ::Uv^A6ɅG7|"T*\ [}35 t|,Fugڠ_"4 PF$o[eq~Tq9èG;>tn& 8vCTSg_𗾏ȇA),Uh}PxQv,.TZQ} zݰl kfʥ K-ܑic^-ʢOvr1SXo?xb۵z{?y:b]Y (ߕAaE 'y8(< Vr8DN17VYeSOSyw(x i0 ir-?ۇܝRBf…dagB:>.Zxh5iCJA+ɛ;KxoUֵdwwzc>|?tt}ST~#u'Wgnj?)fxĝHmB ݁8H0IdH3r\|} jQfo zOHg#ܳ=m%G5)j:޷W![ lvGo58N$kW~ Aee~~PG66Tl_K_Td_gv|듲/_Z9< <9!_-ζ-7or.>gw'Kˀ~n1z +X6)GA`.0;ص iJVF?T̶@4 0Pghކ+KϏV(L#5}"o.P?ʕ)Y~qA)?xl D8RmLgio=sccmr#*ꈂrDQ-Z}sFPĪ` {KA^J U ?VD\*(mliGd"Aߧk[DʭmDq2!-F"Ab~`'.+Gˤ[dE}ޱƽ[9fIj7YYp=titse'2 >_Z0wlv0|khoLw& `!iiG[Lhe/Ttl   @vw\S* -3FA伡A6b4\4M{<<{eէa De#)R̝08j‘ΣW,&MJfhk}>\ tgEOսO?_?U9b}ؘ47rv:2a 1ڞ# MT^^69Z (Ӈѳy 3T  4/ 0 rʪI3nW7FRNj^0O5P?\^aYzҺm^8uar8"*<G;P:CD׌c /+?Sbcfm4`45p!sVA|rTM Ӳc ceT39%41HH'5nHށK,xt] er"*.\LlKU}A(HL60}$O?J `M%%@ϥ1D16y  Ti=WJ[b-rsXXd|+aH٩m0Tz2) Ry]\%%i8U})?Z^Β@{q ߆uPsjl8'(y co"i?ܚLJFF5iC(9$7ϷldhLa+jeK810$ȍ&:"`2&°\IjT҇s,,,O#221NBэ!]SG59PF R߭1d@?Dy .HVVwn<8*`gSCua'J2 ̌3V>"~՗+(h  {KᏮʤWWɝRQ 4yAP}OV!up6Mv:~oVOFwwLT_t/NG PuwJgj3:qzV (3:#Yk#?بF(C,?r|ܸJ2}"Dk.ZWoNCpnをw8\}YOťe/51D ]Fb2YBWKd VJRE11|+5Բ|2oWŻ?l؝Vg+}{ui+ECOp!š<}$cZ/"|}x̛n'3\<(QfxpCJCc#erƝj\/WcRY-Ȫ. `h |r&µb2l\Lʋ51U4iaqɊm`&_T\#ge[+@9^ʝ^b|8wY2\_kIIotGJ ]ccfӪ6@J΁ƤrOG$A+kT'"[3@9qpY)1Sрv ə :+kR81u{W.uh%*_pNٲ\3̸fwA=]ƥ8)kgLBHv(>5*!I"h2F[M5/#Ni?,L("G s!GdttM)>yAwY~ȅ.SFZj̈y}ObJzp ٤4p>/jb$>uTqP^qFGb$bvv6 +eQ(sg9F,^?Xs'ȉ|=#dIKʍ_ *1'7ZP ]/ܟ{2(@rbNrP>\M~diie$cz:olh `O WH`@X^ah>0!'y 'lW c lgw88~ҳs[yIX2~ +#z鹝 Ӿ΢ uo߫5 Nװ$3dy#&P2HfVΈ]5 oWN r-PD)Xq)Hr.˻K6}3^&sE dtU^1j^zš-]RA&*%k*& #xi&qC{c^octw\Zp/*̉x? "U}a}P0\ԝ7Y_o{Tl쭑&OCh>۟c|/ZQNQNQXR^mgD^I|zIM²q>6 gڴ$$PiJҢiيYG^K8n2 -pʼAA|I"tu &s,pwHM;~[MUוn [K0\=/&(GZԿu+*VUQSF; t\CTo,(F]o1'GhL+uWv8 SSQaa!/eOe?ۻ,'3<qXq~C_-0ѯ[.'da2!-ΌZD!HiWݰC7ɰ݂dfZ]dߓX;1%ҋg1K#5hmm7Sktm'3 0.Bzt:.,$M)+c );Z7.%X.`4ʗЍb""#FV;(ʥB۳?\lFcUߵ*7frBń0L˳%H#XpRšڼR 9{מEb*i+<]ɠE r 2v`i@"@n Z/ӝl!NLLP A2SBv ԁa+z%8ϩ\4X,jllm-]V<~3ԫ]p>CsQNWlL~L39eűrYv&-RG;TPqþK?;L{w ~tQ1ǚ>"-*0KʿnM6M$:MXpRY(>D(GMx0 le؟:w+wLtw)5$S&PNLHP YM++S2P(7gr1٥_5*eZ9qy ˲yYb`YכfVd$ 7g'04F|I%M>i-I)7edl8 ~w2512pd`ݿwSTyU]鐩UwwʳYQ.iQ $3beSJMեN럁ICN([@,`ck}.535lY>h2A1#%b\dť u̢EUfء 8QVfÅ?K2A򦽇-vmm͊uĺ9pl;+w(t-\3?>wkfXcd Zչcl}`Y?>Pጂ`"Џȹ{Zh꒒"Rqa=\XsӢl2)lgWLu{(\?2E,ٸOEgqt$bp< "zr\*Lr̦x]#G5<Х((ɣ99nЕ1Rd×r) mDI8g.:Q@4@9-5kBgqN ~jʼ}?,q͊dw>g ޵W~=&%`\.cbϗFE P6 _-*ȿ|vo?[i)fgcJŒ5uc_&۔5o Jk˱yxL # c3RE*uO"V&"U2emj$a5k I(JaWj)`!^ =-߂Ű7.z(ꩦ4%)=2*R]-8' kvJK& y4!2@Zo@GK,xYÚZ5M3n]~pmm:臗q6[S SU4@;) iI&P\M+@8bDiBeMFCPȯ^](C{uR]osH]mS vx86yy&H=Rgjt:~,2CƴwPnlm`w:}]˽d^Y~oX'>6Ps/ZG2SBu=|Jvh~ g #^ zEtY;"uƉjcra1  [;+ ;P&@ӵ_aA #N=&K/D|bRLNtD$mm1u=r_[. )a*%'; S@ y (a8}Nmuph2p[+Tw4~-''O0ޠqv-R?W[O] m>A\>=ӂOKxÅ@n YWښ'0&P־=ϻz>0RU ië zsDR>iY#Wwo'o9h7](.*%eWH.mܥ-rJO& 2U+a9} &\\?$%aAxf9xn߽:F Ϲk&/Yj*$չ+v.FPq퐞2qx/ }&cq.H4A2|hDlջo*C3"/'G(ʁmU\Y}x,V'^EkG`MVVNX/Wm/(؅w@YqȈ P[ V:#X,<8dSd[Ue!HMOh|h5..-G( ;wgHMn OQ_db˺2gV=K1˷}3FKg翼\M8.NP4ÜۤARϼtϨy$ ;g.[6ݤw6`Z#} ,4L+so.(]Ç@z֡k'u`YƐbŽenhjjwtXN>ŻRNÁ=+ ~()'(GjN΃{hxV[{rh/ZO37`9|ML hM甧lp2 U[ &v;&fެ8*,yb!8 6ԢU/ԭ'Xfk.t(G$ 'uL}G62FSmu١\PCI㝇?v8EW go0 Y%>C:#Q'9N8Ήfdk\녕}ք.+'?G: Í"a< Mx ,r2÷^+ڻ@ZA+w-^xs}u+z$k zv;r< #JJni9u1_o׵Ձv{eblzN |p 21KHL;&xvhQh-];().J5IDH.rkΈ]ⰽ׺^[軭Az*!oषO-}kRl|-~9?m2U7K,eu$:[НbQz$Ě92_һ۷Sr]rbOnZZaRAI{wm&[ڞ V`""]1X` !u V:\ჍKN` g/^4hկ=:'k怚3d Hٻ>2cyQ&3'H5@=r2J2PϑR3B#k Tq¢\!ip[Ywӏֈ@h@C?=X,/ D{x5r1&*=Y|i>(N{ъ wVg,%o:P1+0ɏ_Jq oڏk_1FY`UeAtp{s7,C}Cn>"/}ZxxODzG.!zFR$K/gCҩu{~A($M͡a`$z@` *y}*#w԰ COU A&.9%h!ůC*>ٯ=IYu V($M.3*rANΩ<<}t/OW4p:kuoJ'rpz"^ID{>yqzhOV./45Am펾_FњvB@H[і_ʞkv_W_P\440GƀROĭ)/@D5][sC]ތ4z@zQmד*߾\>@nf*OdZ*8*HV05{\ϗꑌh $h,+b,?BA>NU0SpߨI|d}=3kO\u 9XzUH* ˧A6#4{5&[#I\)PJnU{>y2R_SۚS:ԢȆJȡ]MRoF=(#0]E7rɿg椾Էȣ\%c#F(wfq i:*?hK3A1oZ۬eqd(d¿.5\1 f! sPc2׈΁1DDٛX!*3s_?.]/ʂ ,ҳSk|;PRG~qCܟ{rO=GUK:..ZYoTSh?W&J|(7@ݣ.r^>n%M[q4C4ɜe:<ڀ7")_0֡{]VgpS͵`Pa8}V7\'׋#F;m:A%:-S2̴D9 ?CFչsh[ q^ !=UkvµUPi}ᆑwo[@CdB>~UA|LGd$Ç;0aDGgCt/"qM&6itmTϛKrZ$Se"5,δ[3`1ܡ&Dy t*;sUFaM߰F$V}eԀn⋻p4HK녾#SqVy[S>5mlOf_Qߨ& 8EQݽS'e>hhv8!!v0ou =+SbYɝ-^RT;u]T.LU#46=rx u:nWD/K,,Zϡʧr?x\ġ=3_9FyXm? )a=אtIպ]fbwmc0 ̵@঱ jQYW~:NfR8wfڴ u2d2Fj;ȭ>Xsi/Czj=ƝJzG&TVZ9:)5*ܫAڠ%K4wj> ϲ$|;w%,%vH5(J7S z1-tB7t.a0<|_>-v8Fq0\VLDDLl|~0]2,3GB)\hӵ<~dI[tmrl_? nw*G`Hs9@lу'gOB8ܽYe<,Y x*;M)7.5ie'ov'-KLM9Z Pnj5փ*ꈲ:ąa+֔~KvXn"0L hDH5+n`U1|I pTL0xޝܩǫr&U\Έb.zTqJך]/N5׾ĭ@k@['@ %")?>ن}`ȕP4sU ޻K6+ *8_Y<.n,+JvSl+g[6"&ů +\{CȊxS¥qg \DT%[FIRFⰻ6N~S5>]$KtJ.j60--+,$-gpF{0-)JQ4#^_;<]-FPđ~R3l:tw\5D{=' 룵Ⱥ].nvK+eg1Y#}%oDi ij5=_<8vY䏾~<}ss?%Fk?p111꣞ p%RZZ*rim/D'*(# UoaYi%yrIF܎HhIddǃծ/e9k=ۥJ|ԽV8~vN̺Oh6ЪU+ Y_bYyq?Ϡ2 g:BgUm-ҒdAdUy? ~A9@q#.`yL)o, tqf0NLo`QMCGH2X-Ff-eBI-vۢ~;붗JӲ \/fyTG߹ErZj)9ԣ3uw@ӀGKK_Q\(_ l1h\ X៚\r0KBi[%Y9zS1]Zt?-E|SAF| [٥SzHJJ$%')}R/E"@h]_.\Sny?IftbIJWޘm`0bg Npܹc'3ODkF4ҹ}D 4кl\1)X'+hg[\I~Er6ibPbS,.Yp=>\ra| [- ]U9RRxK^jK\RMYrFqz߇.]JdU{|.plσDIwG8vRdh-/ΰZ~Msx,ыʅ홀.#{{O|֢wt 6߃ڹ1ᡖz%:6@* ֙3K{) $%f f9괴m\.;v"Qd,1h]`ۤ6=-eGT1ӢLDe q:Q FiQV8 Keev1\4%K[fhIc?K[vzKAcۀg~d(d%)|s˶+f[Qvʶv_8SXpFpfD 6T썲 6C6h((4(R٪C @ Ōj3S=}Ez{ڎǎsY_%*mfǓ@OXȘɮ tguMVZw)~2JRdﺪp,z;Ai6;zܹؐswm<4Xe_Os_)/)ͻ|P#O.Aڨ}g 7ju3LxD&̣}9iGV1@9(3n}*f^@:{}ǜqf9I}zb_~S6'*5?t3Ք/Ӓܒ2FO*5ZZHt hd_?1O*=F}CS]\ 7.;VewTnUX-EZ!-:Υ>kkk&V_֩h.X'UO9v!6z`Pُ>5(mIg-ku݌YV.xR~ڲLL FЀOՉТZ?kKh}FUEG7Ud#zݪrF ZZV7t=<(?!_Y6l?qd\8~p{|a ̮r땣"RtPϗ'Ι`CКwjk6Vx&ʥκcR71@9T_ ('܀ 'y뗌S#͜;vWES3b4 hMMBͽ )zB._c/1:KFQavuKfOvU2X[baÁ*ǫh5G;7=4W6MPf}ĥ% !qV,1T=~Ăyw0Kl&9m`:8yi'<,j4@9*S4CqwWFkDꤸ.&2錉8w S100,(z㋲ۤ5o'ȸ?r ?_B⏁B0.F"KTLt$G3e}c(рр@C4,#g"k޹Eʊ.Hyee$&>!Iw|jjokN11cYXmg|7zYs獾j$A2GVxQ=`]b`s(2 RH?p!uEފ> PftUʡm]S%j$m~]S2W$WO/zj >_ⰕHòGeoMjhzfnK,+zm'EYiPzD'znڪ@״NjTEEW ^uԯ:w+wLM hP==A!g#=ze (C FCdȴ?y_UZ/;? H^X^Ƣ#X,Rp".7BVKAl]֌m`@2iY&$ա6\:Q L}iI&H޴C/&E(4ݲi&~kYOo+uA[8f,aпhhkm˾Ϫ=IL,=wPT{7~[GF ~ȌԂY~|!hTubc|]`٢1 (Cg%fwz`\I-sr:ah֪}A[?xZ\10Wy:y!pҢkz }D 6 6Q3{H޷5ѝ`I8z/ >(,K缻3gFN|^[6g'ٖA=;yYzRwnq'pd=͖cÚ6.[uiQۏ|IkK3y$F hW({KrH񪶈_q8=9Q2Ӓd@vGڡ{X:2j-4?R|帛6nג176./>>'NR{O>ʴ^ұ}-m98nvT?gJB3=-SQC;6McO9 gDn,=ءtnaa\r #)NX7`=/(׍9S:wQdRh8:UK >m\ZBр Opl;\o'+") /q,>h^*l]\])(g.!X|r$Ȥdʨ DAsXj yZ9HŅ.z֭k}3Ңݓ Ru/:6[9Ĕ\).*p]m>sAm Z56#(Gρ_@vMJ1roU;@̐b;[opkILJ7ǹİIH_ۄ/A%yVl+=X0 4U h\ݹ'6e>]kJnm%!d3-[*>"+ЙwX0n 7m\tiUO];A5ǵg@EWRYsNkW Z ͚F"C|ɰ NK#=Y'6'Z ~<@2݆B'$ISաm02бq2M-Wڊl٥Lm'vYrϴQҽcO°>rvyera2uumY1FPy}i5'ǎ@Acv0 ʨh$ӝ:~ *YacQÐ!_~N^z]M,|z)zm -)_4=\~bxcKeR'O=,CҾϘ"ů'P^.Y!|$I~LbDSvHRnu bL{ę_

+ǙR?}P,U6;FFu-Kwǁch>9;ؙP/2gVi*zuV Q%ᢍ <3-ɞ Y2J6RWPϞ`z&8 5%*1@Ykn9M$#KUb=ڏ\/bFxNq׎Uyւ8WyVBq:fۮ}jJ>GDm#%(N/B)_=liP/]Ao&(р@Dh $_'v"EY#M˻*o/ :V{Jڸ P4XUX/GJP_ 5`For#)>kq^Kݻ%RHzK *((OAHSbHQJIH-  @zo\Bz糷3ofwoޘeUTwP`>Eq-jyC> lA6Ѽ+ iaE4cn_$o6%:"nDd[ZyE=y_z~C@&ʴ7W7i<#M w&v7[ b)nj>(j}QJ4yP/81p潽0_'S>K8i=McSz2 4LC!`&d?{$/fj y+(p%=Av񕚨/ɁĵO(PPl_7,̱雝e;*yТXNsqBaL= qgrVu.Rf \pd\&jmS׷ۛ'U__tlLBՄ8o?;ZJLi1ϥծ1Y^#"PJ8O/]"QDO-srUwJsgHxJFNDm,5 ,9 6NY,rWz::AzL&Lf9Bg,p^;()N^,$$m2wF yDi/L(M+Bww!\vɯk6+UGvUbz \ uEmQa(!5: $9{7V@EtaevA$_K,&r)3> \Vn08#Dw(_7ÌAٴa3Я{(A‘V0[ e›юm#wk[ 0\8~I&I8&l.$BNղ7B'i&!a6([O5Z+D;ڋZ$Iͯ ٝغ {C@ 8(k:(d{W_и8As2-Yti\@$ْh,Z;X™n Ar7=8'Wƽ-D%4`D:UѲ\C'fLa( ձnÿ=5f8@kbaqjLd<'[nqt!lzaiz jWyA`+O8t"M$i.ݍBx%jD/^O7zul~ m[p̬|zni"^Lzl8+!0~Gƅ:ӵDe=[0e8n`=$yE9xjhx-WM & #'h>9(I GbS':bT,0_m@B i B cK#I #[sCfEyEž8fc1#q|d{t!hȦd?K~e,kI]j'Z$B=#nF4KKzxhAӦ@&8DZGR/EB|@]Dno$ǼpdI[θ98y << &^2=lN_:{nIٗM!\XV\prg7EcVsB}UDԄr0._V$_xʶm8ý݇1nGPAMè)ё{x^Ypr]gO&^f!y+ d3+lQCPK=RIWIcyPp;$|CFv:\W9'H"PFDpe]dHӔF߷ݢ!Rl)ż%hv6N60l}+荙F-7.>n@@my)mgGz |{pof!ނJ[8mǨesןܶmPl@}X@s 7oJː}[Oh-5ʥI K _O9 <ȯ 6h;MWJe2h2t4i!JB-k9e:'S 2tX[Z˓Y1tE%wq6Ck@egsP,?&lj3ܸ oEEj5W\Tx[(npdOVeᮅ72(t8$M3h:(s.F;}Cn L^-ݐI6UU-SFh3CM3hGN!p xkP\J+$ 1,4FҔpzP6gdQ9/W5t  KS)u5_0].ug̏M]VC@B(֍ ./ȶjg! gN%cO¹lV޼0<1'qV8s@M3ɗA T2oF?T g4${ha{iSJ"J";YE$: Tbu+pӨa'.M)-\t^Ȕl3(i:xDB៘g]Gd:g DDLm!&UsVv&GN{$~o WM2]S޺^ksCK&߮Pi" ^caa(HNCqٞ!`ؚw N+o{}:CRL&C}]u.q4->n#\ Kq3^f7Ll5,m;ꪦ9T'{[Tw 89+Y+=C`&NS"r723惂_ mb @pN܄V6,[eZyT gCeލ/Dh!O($&b6Ͳդ{s'>PPVյ@_/xmP(Iud+LڙgT:hC߮wvɉ]]=$qVxbTYU2VnGB%qg1v$@Î0t$gرfϢ^wcshA`3 &ڜnF͂6,˘7D|yh_hB2P+9/fHc^4?mݷY!(8;L6gV&2D< ;]̮6gFNW,0! 8 '8]VVf#]w =zcn};tum&ehf/'$dž_c|ac^ctO-5g0-mOG ~@qaZ2@VGD_Y-\ kw]:6",& /-I'aN2.Jo2eI۰AC=9O4o(%Ar>܋;\M֍U!pdq|P@( >V'6azvh̉\nADiO8"u E'~?`@6,T|z~_pč4ʴ'\ _™ne9eW_w9ft(vM=͜2MX06 I9?=BA[Nj*k!hI/̙ShgՑQƺ+&1sO>ǂfUĈA>NNټI& Dz\ܬDYēO:9,o^kt{/wX +!h+C|h&=U 0ad-ͅ@g)x&`% ?4WC\Z&D%6ߺeO2bhch0IkZѭS |c.7`HS3hػiB" IMnve= Ng*rh,Qlha6D&Ȧ$WQ"C yAs !# X8_bv٢'P qs{޳&j&ʋgNN7mE|0=mgD7})_U&?Hpaa??;Gt ЩSTТȲ₂'O>t! )&L$Vӣc2 \Pmh=׷oѨ{ )hŸ/UjKmj w.oY- {6\T4p[TӭWϖ%n$ʛ(߄DӸV/5*9_qr$O VS{ADYj-BݶdִxVY&Z{29n' Di )=s_ƒaE_"ɤ1&(Z&HMk~72O [~giuE/Ez3!xj@NoIMV(+"; ,yy.hֻgخj҆rַo6*7,PaWVX(?0\ +w|Kc'9PԴ p2|̡F=@@~K]=IQȥ3&X:jzg$tyM"<_Gm/y@rxn@Ð>`4˷Eu[Kըa0.ˉişb +z[U/P+s#wV5'Ti[66N^ƿj_{PO*rb2:eB &IͪU:?M GMc5v ^ŅG>r#ӯ4杻w+& hMc!2h wO{&:ZN>\IJ$Vm=DEk˖2#èm\wkjDټe/rc/!(KQڥ>}NS{rzxbbȄ9<2q#@+pWr 4Qa-݁qȵcHxɖZuIc:JY: <4R%LI4ٔY3$U]"2,eͰq{ ?>8G&lٷ`85dL( 2?\>z3 K֜RH@I킕J%hkhkARV!U9Œ7 p*Mh#L$;~ie˜fa|%+OPH۸Ar!l$ۀ! A!MN`nтzMgͲ0v$+ 7v߷%K.֪⺆J>pZ_$7sV-p^==y(gXB8˘WJNo+T{3jQpBO 7/K* q (gԄ-(AV45eg5q*M[WBMma<{@x mǣ a__>F-K8n:9Jyw?d46Q0 "͍a6"1y f (([N&!`'&R{Urm舼]-Cc{9DG7kQF 7WWpusww78r[U;뵻#ϲzL~ …+AnЭ#IYfr8Dt,ƭ(=C:=\uxCy([^z5өH{)ѣ#lɖllT2 odLP^FAL@fiJ1!ps|-CR/' '?P@x{nAx/3/d\[reCݘArǁqy~#֦֖]oVkrM%z/w$:_E'48aޑcCܔ#L/\h,*霩ܤUFBKK$wR(%ѴM+軸@k=vEUDI^Tlg],&H'{o&´f7\C\Q4 K󫦱kYx؄ܿ8*1jԊu7.pyYӳs!2-D;$2Jz.N"荙9(W16dB@t-'5&Z2PDb\YՂr+T,ME*:Vl6A+/OݨGlKhBd"d6;!'Pk5۸Pu`؆h; UVtc-F[^3nB ,>iGj߾Ls timkȚPi>"h,[' Aie-(SbSjMdYDc$2v6ә6ZGS.rSl*6,\i`m\F"ƥ|۰"Y6CeP%ݶW?-Oyd"y>w=3ʴ/(ɦdm6X&7~Ѽ-^X]oe"~dF@ZSG䘈QDMe06>9:CoZ[A)z벲2O%'Yn3ZzL&D )d\~ 9ȸl_whe\ˁ-/*/ڲM&ˍ(SM*C틏6ag`0Aɲ1<1I$,TޓlO/]RKd熵.M:YUEnPw jRK JM|3o9i;z-;%5s4R^k{"B>i+( Fe6NRދf咟fTSn: -F[n߳3Űp?9( 3Rb^hOW0 RO/lqd!4>f@c.r#ViiOc(U 6-m\LT n," OQ~cf4)Y (j[h2pgSGDY(c\M޳cy~#|_i@]Y`0%S4h]|-?Z"4q#5# r"nJ=a>|-f! f9#&bΰ 5-D(]g;rW.) & 7E-Fa8 ߓ.jӕsVB,0AU0i1$h?wo-..q.ʤ-746iZT GmBBv"`DEt3k$C2%&a+9/6O)ѷEu%m9A)NKoEq6;X^TYj֠24K+Uy+V(K*"t,Ѡd>ys*HkL2aSvLf}2UnЇ7)3qVۦ2l*=s/uWQu@AR>/^T#sL~ dr!NtI{"ղ9A[1>>RuUƕyZAh||uru?tn7W&7>V>W*2\y<;xM@YDeS 8"lum2!6NFQfC!Pow&]K93>4AXbqmA~w IE^9I傛3nDYJg"-$;~wܡ)*Y$:pwnn ˯g}2/5==F3VF.Y]^|a+#DZ}d2dZ<'\Tأ48Cl0l*='9'Q،sBrϼ_\llȬ1M$Y(<>ohJ:0w+0jP/S_^4WkU,P/x+˹f0l^zڒ3Ŏah޼8?z%e2Q"˴se@n6.Tk4S 1 #y k"=w8\;*5#Ϯ[3[&M/dz̉8Ӌ4$Ye&L'''nǃNeͽd-j47}|#m=DwW2'̬lh F5#sA*s(Z +` t%B#q%o423"58s +4thr$։Ge`<KHZ;* },OM} ,:k݄-ej \"AIrQ2$xDR'p4^__IHI;|$wKNح99)$)})9 +r!`|zƕ6v[iLQǗLe,KS7/[?w*6.m$,80(;p粦1 {LO]{ ǁ |a@t{69ɱb.k,llIn^nn/ -eeܸx;Tn:e]p`5"Ԏm<pLQ1>~x#ij7# ebyn 1m\3nE2lw&€cuDUnOθ ݻ )/ozS0'tFNˆOj$ q_tTXt/4! "D?yF}P~Pe 4 -mu+Ĉ!f0ZHQA>gztE /886rN\AIIu?J+ ⓸| Ԏ?]BR[ö%Fg؉Z[v_jХƕr;m#f"-5K`0j@}BB/I,?H@Xp$FGG#!RvwnmĉO I~ߔ OLc:zgSb^)6ꎗd0cI1a{smZqWx|Aԭy:v0ZprW$aRWg1-dz" QI-@a{ۚs1^MCZߨ^iVQXQ8_[Rw8xw^l rP)G27y[э<@0@ɚ.nR6K3uJX=@JQ=$%/QVVZrw_ܶm4Ijm]Rczb5qJ~ j_lvdù6Nm\%4Il_>'(B@ۉW.k#C@H>'J=+xŦ3''6 v>;q2>?sCLFlo?Ӥy~v}jv"Xl)|^=xpS}d5mz>aё[piH/kgF$KWWm!;Fy^W`\~ZZrX:CDY"ɂ(@˒Yv^Y @S q>/YOLe #M2űG@_"TVT{ZL[xTHnVXbEy|A *K6Ϗ ?xGF-sgiDmCň2[hK>4vBm4Ղ!h,4i\qN>2No"іLtHf>X@EYu? MxIsKSDIIA幐\,ˈcwHH`@u0q!c!4q!XqQP='Ӕb՜!0G@81Z\V6͓T{H[>(O 'vd CݕKɼpoQǼ$%D~ Fq~2U P OR>d" IxL ط}bciB],'3OĎ!NM<*CH68iGy$19mÆ*G:Gݻ^۷N+'ҡ:9SPɝYuŌRķ5˳WA7^\i4g &G!`Rg tǼȀɾDI~6j2D@8_Y[y ҭeYzh"Ò6j.>r|^Zn ٢uW n٪'9N0O XKDy/ FE7)lo4(ӊ{I2@;7'f21\tl + 9 *KF|_SsýD pm\89wv\j)U$-7NjM\~ƕr3,sgyLZb2V$7uױ͇4KWlWZ;2!rJ 6ZCkc9j5\Ӌ7BWהkPD~(rz2QrpGg,e!f{%`n,fj4Q91:+!hNhoHV9<8=`HQs#p0J.0ZVUԘqrbqP˻uKz&5ȱIqzټWUKhN۸Dqe7(_k4,7C!`oиߐzOZP^(ӗ . JyȄBwe<%&-9nM1oypvpWjt vҥ<;ڷs舄䩡۶dz##~UUF媄8C!P8Jm}Hd0l`w}hBM qnYu:׻w+k~=&U'(8i~Dᷘ$4R91"V3lrOwzand0ltψv=UkܴO]yj f>kOuv*]\ARU,K޽:Z=b\w^-9G>7_ӧ-Dn7 gEftMg$+5ZAVGk5[vL$0lq\2m tL5U`D`=~ZܖqSgϿ7뮪:kV]^誮+dʧ'/RvDުxԸĪ5*#E]F2W@ݮ]hw)|͇@j8$%飪jÞ%n=bA92bϞ̪W')1QuQll28=t,!`C\Θ~Ie`յ^Q^뚧..'mSvFǽhWuf N4C&fBQL1e港j:/;cbTh?໗EpžCOVN[yZߨ^iVQIZi4J&oKxU5gsg?=^J+n y-eIOE XR`*85|-h7B%~z1ɵ!aG qsW' kCU*JY{-c %?ej8 GOL,ωdgtDks3_:9igNIy6s >c^ez|5]Wp ~΋4nfNa{qynS( 6"9!>v>%8+_b̷pyX0`36s%:L  U"v+7ʩzq&wsscյ޳fBHڪAa׈UJ&\ b81;]7u>"@6)1Vb~w[阅G3SRq|')]>S9ZN%vhuq b܋ \@^d{#@n6.Tk4pp1 c+ S-i-+p.o'@]Qr/YvG?gEcߛ59i$b+$ q"| ?Fxtŗ٣rqH$!{=ʉx}B¬»XqO/!HryᇍdCeqL46v]poNpOE{L$WyO/pсY SMgOgйF'(G~<>v楱o]7O#קޅWqN]|LըG,@ uJCqtl,4),4Pۗ&B!ǮDAp|kltS^O69FVØvHF 8pUc~^'gN:5w )VkQA7#)NTe_8Uqts]7e^:/G(Ia I_U͞{ٷc{]rMܨ֝~^07}`&MΦg'H7SCfRssC˚kQWU8$[ c|FVAV0NR9M8~ᇞPK)?_w ~"+_$1 *.qƇ4`p/*hɛ}4/~1sj\\ÞTu>bexUYSoqp[ęH$IHC]aA4^z ?*>!ʥ @ZԘHФ͋D-of̃N+i׻Kje[Q bɠfgES+0!o-=+1Co tSB)5]`OVR3.j}ژmL)FpǺ} ?d}/\Qg}o%u 'P*4Bsdl/5&k"(=jeCK:Un%y6{68s5c' >ǁqpeq4/rrDn-FRޅ x@8{@mf>ܝ}$[{V_‚0!l#lgޞJc _  nqYεU$VӞBYILl3n6LlmS7 q lVR8$[)HҊmk8 vI??u)Sы9aV4F; S(VBײ))8𩦸Zc$o6+-0=Kf{l7]&ߗNrf41g1c)s 3!LJrҩS/w'~2[ ap؇o\>;hgGݗ|6C7)~I:r8H22zmuxb? =z',xvr,՜X2¡w}qJBfgV* S">|jT>\鼊Wrk}_e:D\xS䙊*mF5F=-|Tt#N {ij\㈘hl[9oA/=TAɟM?KiJ]q)qcDR&ReB%Pq%éPr[R5OjW;\^7So0"P,˔uI2IWp,1tCwY.dz!g'6 vWMLrzvCB:~䛟+iQjknB۲#aÏ䥇xI̻交IrP eE=ou\/l(z⪤Iemn~"~ϟQ66mC}oSe9K}7wzsQ{m7mtCvՁx9-_$WjHJBk(?}=GSAjDO+rQw'=RePy/r^x 3uS9DE9ČLPz6y߉$^w jz f!u$>V%' 7 qtF}wZZ)(t0}-n.G *Î%)&aE ?vPA N,L0V%TWg8v$7mnm8NѺǐ[_BI4h=PT`?yzTгfsoDL{+>\:dJ qiσu*7ٳ[ ',HlTzB fb*" +y=bgYL{#f {E@4#ʜhKDY&^zCQ}ΘcmKUVG+,ÖƘ oAT鑲gd:]*#uNJd0.c("gKDZnfN 2-m^omq-^T'G =[emn1#K26ؚ{8B֦vյU&^TҖVM.ZgXqmga!`d,ťhb QV U(\A mhn5ϝ. {*l:; "r/`X t'Um4E!hXD"pyꆠJ Hp8 x~!ztE%z.XKV\{'PvTK:]u*D& DLMXKҡNiKxPy^LqϽ ?;_kǞU½ՐQ8p2WԪ@P:;hf.!`5LPrvn{b' {$90D\D-%%Pd2  !`Qd岅d7Z =ͷF I*hwMIj9P0S(\jQOo~,dFʲx7O-^$W;,SW?+(J=`Ye]4s^cA|m٪^:IYՕ6IW8IdSںڊv7V\e]6 'll8{p1U4$y |CnnVꮛ=7{'f7U!7cu_ 83 tID %UN:Y=EK< E~("ځp*~5NA'J |`.4ϔmsA(^\_iO~6cSIļxP WsӠ"(|^@hڎ pn@#pˠQ|9xՐ{W 7q'|kJo;/ Ƃ\s;yztM8zNN~Bڤ@IDAT */_?W~ \ m$kXr~ rvsp(L Π|!9\72$by 9;K;?- EwM;goo^ ]n#hB~! 6d]; .!HP™} ]sҵAϠfrf#d Ne2/2]%YIǃ$(>+ c&K-~a']u]y>fDFsbFq! ?l"WZȹ9ttfiɉS3[B@)=pY.T1=f?q$h50= ʎPp1Yy9$E?eD]_zr'N~6 qQP FD `R|?L4G%+J?•kP0ql_=*$~Dr ҿK$8 'qxrɥvhJ'Pr ڎ ~C+ . kF$;A _ ~"Wҏ~t"k<̞( S9tg '$7eY>ca_2Kʠ IDtA/b"svW;%L2vo0⋩&1i)?Ay֩d:2eK+'u Cnض:nΔgs~TY:;/GJF8dv8 5ǤܫMvt싓 ʶ.44ytr?F]t^:83J?(l2x Ϳ~|rPC3 WGHzK 2J*>30dTfD9ex!kRGaD{lp&Mعx/pA]Â> GD0qVmQ\ci\Ři3Y7}sEF@E"R\}UnrOBLDY\9[s2p}YI{,deߪjo+0^TQA<ŕG31s¾G"F7IsT޾y*Bj "`I p Vc[ڿkKE"n#݋! );Cx] P(Jh[My4>&l4; {0;L(dڪmG0IVLNM /{y;{C̴ BQc+D ]^,e}`pm?ߜ =xՠB1$C~8-$4Β~-G㒃z M`0NAPm43)M5a -ኴ`P/S|mӷv]Q_zxәkY9_a:a Y;㌛6w-K008$wVbڛ;=ۗ$*PU5ШcO o{{szیY*HYoSM .1@$͍'IZFyyuL="ɣl1Ϳ^8Zq} ?S }VUQn]p?#`L ɮU ʭkk ='DsAxIo9+v T}"m%Zb#t"L$IMI2A m!tnK$]^zID]I #@1̎$R|"ך#,oDj&~z>$1Rj+In)O5}@{z# )084I]h$jj ^G<ڨ_Z+Klo>je"@@ < M4Xlg}X+T'~-d'R@ڈ2Y'Y9j. 6nmDN4WSpl{U ot*ћl}$I $tQE6S*_1̙#%ruRϻa[:m.AuZ=%W9@&|XkuFo_wW,&@ZMՂj`_oGũJ{[.qQveʉ t}ER4 "Z'A=,V;"V0IeҜ9MںJco UŢ!dT>D}ڸSd\U'jZ%\[V (${pF^\Oc+Jk9Iv-t\V?ǡq08vo|\E1aaIG6TW6Z=^GEI=ЋjARN8;|’05Ԑj7)IEj7#IŤj [LjԑϠ4!FDȉ+Iلqʭ7`-78ts}wֵvd#fñnG.9E6GB{̔$LmlZ[Gj{b%K__ZDi"/%#`m5IvyVF#z^7]o>~+͆e֬3Y`5 kPZ4̭uɼxDqTݣ્(;޸u-yVS]ٞx'R; e%ћ#im"el~>/a4de/fI1;N*2D{Q G8"ڕ7\Q'ˌ9ԵY`o{tуg\gxU9n6N.3sXޥ#|7t:-L1-Ԟ- 9[ĮQl7y 5Yzps:u|f1eh';`OAnq p,DuFB֋8ȋ}ГJʫ o~B h5x-|t7mmՂ%F^ ,:lK 1V<&N#nX{NЭN٢ |cS 4.U-"R[ K' <跥kI'=|F:~Kw8eN[YkQve=4gdٙa0}B{?|b3Eٝ1/^``bW@,}kA~7\tm!s$,X~yX L/Oa~(Г7}B*d\7~Nkh=уTKDp#>ܹS#J[%z`%a]$m# wn +z 4nQ[{h6?Fl)K^u/\ mh6͝(!ka@QFMGܯgd).H*ۧ9 NACv;<ʴ![ސgbX?v3 x 6̟*,/֞-d-d,Y +6`aյ,4q.&s/34bJdd:#cJCx0O(r/NKNm#Ya|8udIR"s|Q~]ڑJI&C~Yq]NdF(;3%=~=eȓL$yo¼6,_D?x•%J_tHW.?!E8I&$>wok~ڊgoJ@{@C%D_ɓL$ygto{綿:=ۯb+pNg0wٶcUp$jů^Αa,Ɨjja|j_>1#czƉ9 6sݔ(S)x喝>)ÿa\1҃beG$-vL%ia剼 NKHʅ/֗B$z Ơa W` |ݙP94(&-ȓ|4'+B.O•%m?v\PQ:X' z{UbPF]1-$Sy }E!W5B\ 6,ш۟b)܂C (.8 CGdBɱ3_9vwٴ ЛKb):QOEg`BFWo IQȶ!@ zXH)!. BKlvi0 b^q8r > D_ SBj%BJ47`H @TW~t6/_ Q~+ŭ1b{~ۺoTZ<=>hV̕A6]Z47bHP[LuGZeUՕߛV)}kJ1@zwO|4ow߲rٚ*N]tF~ۓp_vOwa$-g+`Ȍxz<<"]gR7iTVʥϱNK?Q ]|z;3%RL.(\~8QgDƉ{ڣ{}CB:uZu ?{);>/(jsIbL5e5Օgܿk_nĆH1=(!Id~oܣLw5e4579w|>B:u:>J!8le=k3&pTUBںʳG߳GvHט7_Zb4jc=CR'(m_m_VSYy}9߷_^/tk/mc9-UT;i4Gak~-{{L/3TE.(5hdD9بMj?Zy萶& d=I#$ej7O8zKmNRr L2L .uJ6*%qIqʝNp胒3wq9$O/"GbuqwF%Qv*kFЪ&M9hI P="N'sS"cY|z>~ɸЍ-?9.I"G$eՕgnyR/I1hg=QDj\nkr\8I6$2P:m'"MăO~n :_~*_nZhVb9Fx ]uwt#"+ 8Vͳ jj7g]BiK3e>VbNS鞏v'A @ pxgO,i@!})V=O)P,À`2=4Q]so2e5|Eu$; ƄG „O#Q"Ӝ&G$G]mt ɹ%'D G5%#_xhN0ձ= C@oa D3d-I ;D'O2d IH  BR# Tae,,x?|w, )YNxGLӃ>@GIS1U VB^HlgEQH}KڽA|ۼAkN)Fr*USt \oh0uT6_jpp,X<{ãOٷڟ$c9Fי L4E^@;@Z4r= Q]Ƙ%?Ւv,ҼvuZHs>9I'&N(&˙LO2o A9b.x /Û*ie\ha̙q03JxbER:E dپM- 0 |'k:l^ ^xt/!L ub^4 >%eC9B%2ʖ>uɠ< eXNDR IM8|Y(&^5RM̽A E >>pDՔQP5"IxdK#AQ~^lδ(u | ov1m"gYoh^&澁 ʾюG@ Z;QƏ^_!|n>jJɗ3X,w1٩N%ԥo XkK@驩Oˏ%! 5@;0+v%2g?qBHǨoD6 =4,y{-9"*0uщV SK\}&I܆$-w"sA>EI@ tِs=2et3c[8\r!1VUa[Yf"۴NFor*84vJ%qH# Imel [|ŏsD@elWQ+w"An &7 Me(=cB# [D;@dV|ɱބHޏzZb^T(҄vgEɞm5ΆYT!†!@8 p J+d2s='UuyQ  zB|^043&/_.i  2<=dQU@SQ]ASoFi1܄t.ƦykCDH#z!_׆ ^D ޶f F 1AgM8v*jjac{`0AI4ߛU AV^x MeƟmi*o)Y~ST1)04m#Y#I觤˅.ژ4RgZK WWŒv\s-D(4~]y;=F_Yc(EMe5F[ȋL?mv K F>INj^…1m `6[HFƍ%d_m_X7Rh*s$\?- dXBɳGIvDʿQVi{#O[waPbCϸN*Xf>4l.YcddYԒ"OmbMem/ѐB;َF|QViy(6'u/V֪,‰"VmZ.k5Mh |/ \h*kcbEB;M|vHv1^ *l<"ɵfzo1 nĴUhM"ܢ;;L&XSn:E=`2~KKBh*dy08m1vIՅi#~´ө̼AU|MX,!S1k TBzExϖzHi|Y=ȤH8,@V݆)Hb)KTJZ?ouV_1 H#y'k:lDYE&? 啵LAEy)QUkµLf*554LYBg|Y=i*ǍK0MeY%rFCvrY"d&>o B)0*1s0\QVQڈDH'Y#@8,H)̈U_Z#1q 6Z9+@Q0q MeExҚkhU⠋ @ɨr ?Lh's$3gM0dHeH9 `..H7lY%qU?U z5N0nh/6GѰ`oS8JLrVc`H#ph4:p"iBS^| Y ywѲHE`>>fr_-@5Hy}vS;i$[j7z|Dzj,m XB3xb,+`A\VUÆv 3& q#ہFko:pmV_?0 We™Ih4?p޹os#peCEGK'6Nv"Y5|5dA@vC/L& ww p`Fٴ='XuiG,EHSL)w*L\ 8^C'(9pm~M;Թ }8|Ns.^mw?lu$=<ۏ@H΋1J[NJ!bڮT(PlG@h'ۡp…45d ʩ|AUԴ<$+.5)ܲrSz^5[Q-?:Ȱ BIi0jH";8 icP: 3s/ H0T4t$Dx^)[Ț{{F7+YC% d7fs,k^gDY%M˽#7e‹YDdvd1Nb2M4]5(<8C IF  _w(:]%e d[L#}8 2]Cf$tY$Uve"!hs[h_ Ar7x*Th';1HB#ٹXCn(dM6%iH\Z %ŲR)ĂmLpqá`W6iFF) z-|l+PlWߞG\M;b&[@Rf&t 7Rz\ryʊh/B;645/[4>Zsos@kp} lk-1I%ű|β8{ :py:]R ۧ;h@'Δ.87yǰFF]Beɕ刼4i_)GER 7+ Me3$pőCc7y$.('ژ\]Oc][$bK7f wvw棑v3y p=mqhl{ΕW@ a1˥3 ˶,„17:GZB@1}gF%nxMea̴9 dĥ/q:zH4,эD1[UOУKG-^?nt ] >aDEµcm:EÈ b.x3cMBMWQU}'OW Tm5$O⮆{ I7>J <ɗܣN,旆Z{~+ɗ[0b݂aA>A#=yc,TbE piЋK˥G+$B ^qɍ:$Mz]Ј}sbу<j0Ι.(QǾYw^_L`N_mp?"gA]5o[M߳Orё)Iݶd9% @ʕg௪!T[ΖKB}(DaF"] MTdwxXc!zKG+hQn$7%e&\"7|orInKZDǯ{ZF!Y֘L- v~v n?9F"[=dİ41Bd \^晎ˋ#孆{ Z=R3MfifEE8mq.-# cD{ؑjcFr˸ ʗ8C =AۯT4ku/x QBSK\$dOӈ Es gK4'2jdS Qnh@C(%KpTbM嶣Ya2=&'8JI뷼ǙB#?YmDYm-"E );{5f( =Xp =`i*`//Drć.Ҝ/KH6'nl(4'OxaX8AM p1G8N+w͓R/h˖17*j!kg8Gw='ԽGkaʈP[g5d)qԾn,_Ŋ]QcY $^l߾“68S#`bZH²^Id886>i0[2|sj ̼aXu/;d#Ű;GJ}jls $;yn}:|\9?,۴ΖU,+\ ]? o f畔W"zĠ0uð -QQo2 8 }+\\5$Ķ7g):c 8Y4 zZ+f*2ܬTf*oH &p (6J#t)bvѺ<ׁlD8z jx?}ⰾ0() 5p\;tzΑ:μ"l>vt:"lDN.u?8=<2w‚ SGN)ԫ+LG{@B9V#x9Ηu+7GY^3|]݋@cMe?}r{-Piʜ9t4i$/}]F2*%@H hb.u866Ivݸdx7XK2izgw<@`xP Jb;@1¶NVY4st-,ݚlxgOܫL8IFpc\ɳe5d@ U5&Zo?b6.43YqqybHn? s "^͆%%?%a+rοbC 3)x6N^bs;Dɟ#v7tl ԇOÓDٓ7Sv0#ʚ{C1!VlR|}@nQy{qƶly -ϙ{:͉`p98q8nё-W >7·”a=ҴrC5;ڈƜ,y]SX\i ԓp]~܅啵 >\o&͙#lV _MH.?m̜\/o4eM:2zhl"oF\B;K?⺣j$'5Ѽ:~ <`@2d`!5ةRU2O^#橴-cqIYlXfЋ`F:qyO-g1.1ahF Cޞߞ"]PļD)9$8t+>|}I]`NIJxH9f`DNf/Co9["'7=eF1w BSy뵓GH"kQ@IDAT^`|SdG7u֩$ DF&p=uNŽsI.&cP}0qmM:q<#Ǎ@T7"ӔF[~݋6 wݸ!,g{4g\" ώ!FLsvB 4$lCjE(UݻjS{Tj_Iw)q'> 2Ҙ ƀKGlGxɘ^EbwxvFCݒ ;.?DAvd1wr#):/u+AHS?$б~LɈk'sd7%F2AS7(}m,VKML "g4/6 vhu_,՞Fu y 0&H@ 4&ĕ%r VP_mbKE@ !k*E޲s .:ľ.\$+ }՟r>0ꌸJIܟOuHư Ҋ~uAS9oxz(#ĦںҲ^g.88cj,ڞ)bGu"F+t0@0fXD _zheiybX fCIJ 9%SA( RSq@܅i*s>*L:jq*)_3L?dHVjލ8Fr D0KjE@eLmu銚:(`QP` ȠP] <}>n=|hSĸTeT-EB>pUpB0 a8+kS}##-Muubpk .4Iz@9./~Պ۲j*[wRq,4Ke.W$#x i75pG: ;Į *N aácp !Hp0욿@MR%Hr&񵯢3ڢ?Beēp%|]P[`[~f78)Z^QvO⓼+ᓦm]DUFB'f뾷@B0H0$(!Kj,PgCvOET ѣ<2tD!~yr!>9U RhM:a[ OVέ[n\m [ڀ@Rv䫰mɻ}RRv%eg6*Ni-i*{q~T#:TswK+ [. +>6X'mC FB8nfʪj$}бf 'mE,)#$zDVCz׷Ap ó7N|Icg>mQW-?A8A~ʩ]68ƣ{oUl9i*ʷxO 3e/bN^6Hۙ˿IvZ,Z&@D=MF^N-lٴl񉽮\gVTko t-%k<*0`EmM-e2GdH2o2"ˌ$(NADT- E`7Zj&$rA 8 \-th'?sd}kNdHjt3XP$~/VU|v-Er!lwhS%=;h|Oem-dN&dP7w<4'2X@D- )2=qT&0UWWdmXf•~xr \SͰxӣ)!b#¤{,3@Fd=VWe_v*;ـ!DiXjq*H'݌ [9L5:aFӥ'wJKP/j#j+n(#_8,6,ņ7^~TOK._TbyL4l"v2i$/wP#Qv礖%d1}_*&{Z$'Mׯ@1y弁eft)mCP`uf&mFdB.L&1oy&<ʜ,j)l»1yI#M/si=VF^9= ԇO㘈" {ڎHy)!enl \㷿ܜW8[\1<@u<\ )BT> #j6B/( QGD}==}ji*&3⎼Y4jљX|W;4O:k0&=ngBI-]'M9af̆B.Mmx.kEfWw8IZlV_ZC(ͷ1kT~]-9NkyJKz!?ލMe}￶!/F@e4'ܣLČH2 6ܺyװcp 6gPrv}DKF iCjJ;sDSuGC+Q*)X2)[X\/a9<'j=FbZȔi:;)8uOv-]"xڗ5[]٩Ne_J-j$?KuOQv_ӧ|}%&y/Ia"L/tOgE˔ver\r6X*EV@讨\ubjjʃtg~Td ~_ p*ljڄF'#mfK2,7" `+JNX7Js戰\~HxK!(I]G_Qv^e"D٫whVst\g4:M|Eׅ:>8~Sm0'^R[e< K]SU \-",R>Q%,O/Xpa+ f…Bj$ iSg[90@@%0b[^ed2Who\}DC멚)&J#,N|OsV5/8&#~rd(P~g;ThTVݗ2}mX0[kIbN^6Wȟc@"t~ ɼWD-Kd:Q,db(wq3 J-8Q mIDOy*KIe9bEgͥǏo_ . ?j jj rF_OqG$'#@,l Tvr)ɎAR׍G.H#y}m_7I#98R $kM^D{x89))I30@`1öFwq<(U208' +#aR2Fk¢_3{sP|2?v' IVn((8D`"^8?lm9? n&a'zJI\;i$"y/져V1ʙ%ԿC$#Ј|mSA"d{*@, LuH{t$<jH2IeO/|"HURqQ/1=m(sH >\U0u0 %-7s^ɾ: vd|+g 䆶K j֡Z/^Jo߼שM1QQD=gD_-H2% `/%8ů|,S:cwm?"q#LőU P3pD(GL\Q禚N,g+rW7%+U>,4urA=J5WNTS,c QeDԒ8Q^eir=?-rT@m~S,fy#^4j_6>n۶'Kj&8j 53>,4U) u~8ϋh5>q1'#"ge"ь9-b2`bS3&Q&ltańz<["9ݷ쑴ixmCFBռ,qJIS3"uLJ!qi'כŝ57 j:5}6\ >Qn~|6 VO0 -z> d CT3ע|BUcW-{l'M1Ԁ=:/?qbHؖ^47`% d2C]l{ MeFd%/~f_X'|!Nzl c`H3[- [BGc}ۃ#DnƗ)dl[}bLkTS];p"IHvu#J.Qvc{ӜZUAF3DQM8䠯,6+AZH@%V$I_]-R:ڶi*ߧ<|I2v_ GM6 a Q?9߃HS;kP bQ;Bh'Frͩ?u0u'Dx{@ nڗ̊ _{Rd(=>x {Yb"*i'ˠ 1j~5*pJISYu:i*;%oWgBH**^ Mv2n$>:cͻY<}XV6/`{4IICj " E !rQ}eLaHYD/^'RmFk`TyA];9jl[~v .u P4ճ'yiHQCd~sgXiɟv]i""VB|Tϗ9"~#1g$d|1^VlKK( tUS3d{@<6N3C}2z9z{W$sNXdtZͫH,s|]Rԫ`dYnߴmPPb%ټeCGzu<`7j*3dEa}Z]$i$`]$Yh$5q@ .,`yoNȕYsvH};Ās ),dI*s4$ˋг<_vni*?KxX8ަlV~u͗%)hWنc:I?vl:*A]*[$"d 縰8ԆRG2}s4V#8EU%*ˋӆkڛ4v2(wa|9HAkwH #'2RDم#˖yyg0b^rY ahkQXXIzn 0hGc!fA(HI~ȓvySԑ[4ͻgvJI\mX dǛDE7w>>{|I:АwKXBYC#"Vى\Vݶd J"yb&3tPv2i$cFV#4p|Aܒu&ŗܛzyEA bI 2I٫A/ 0t$)b࿗S(?9+O5Ȧ>лLmsoI$!3;W0Ji2A #wR,A%$i}M B#M@b hf7NRÕ;h.Iۨ֔jex;'g;Oj͹|̂sgߟ~.ȥ+5#ZF}o?~ 3Rku1?Pdl͗VH%P*hve{6׻`+O\Bf\2*|xdi*osgNm6GR`KwOɊd}zV~>m,:w!3j ʜkǏ=mC@ò"4(GIF^oRO2XUIŞuo^8QSy掬=Sz|*k\k6˥JEuFӾ8OM=E[FsҬJŨz"R%%($ⵂ1GGۖ-zZOA5/s<ƕ%muWϗƟkHDjtt!jEx(~O^OծK8e؀' _iUf7!STm03+mNdsdwxwڞd/յu38l]+ث]'㒝>jj9i%p;T4XE6:Nfֱhp~k'k$S(T?n(i$Om}>os@(͉4HIvsV\l{^qSbwHɮ'X[#)QNJG ٱc3}ڥYIfe G=lP2KIM3>++_hC/6wdz6' OΑa@о̡E,y N<?>35I= @80[z"}.<9l\RY@ hxx'3N8bԙDdo p/];9>8g1&6&1fLy1&}X"DiI%$n.[k}^OHb-HI~"!;*1s|Pd9Y1!`)Z3ntZV #%$-uGUef{B^ " wUꪫu׮ J{I#!!w aRfI//[Ν=;]et,_iի՛ccCRu(IP8N$ P%AwxSj*UcBG2h !J*az3 Q~)*hIhȻٚ { zWvƦg()˥EֻfoWt ^-7EaeOǧE>% Ԕ>% `bn<^,ȸ2~@Y.j9&8"q? 7IGM獲U/1~ 0;,jǫ셋j08 p1dt7ZOME2p0VΕ6e_49#eA޺RŪW+P4` 4ښ13,3\S+VQ-wEZ:We}~\hB~$˖e>D'р𸼞8hЯ: 9DA{*n@WؾlINˋ݊[G XWDnΝ1NV.Y 4]`%G%&:hl,=վ^X9iuuN~)Pe]oS:-+wtqY2& )myoZwJ9Aрk2-{ ̓\:W-ƛ ߦ PNݤ@sr!1 8x"ѩ3˖eJpFlu,{it:twYk^\?C a!Ɛ@_uh?R(|'~bZ#>{TRVNy)6-łE4yx/fٿkʸK۳o82[^ ;i%cO_=b[M{VKsqU@R\CKK@s@Cew*+5m)-5wN>^tp/3sp_BseZM"kӈG'#ԪN|j0:|#wOɐz(_u;W6IJ ﵤ_>?X;4;{:NXzv WjG]:)|mߖZ7'uV/1qHc {wn'R'.]rq{mhR栁ʬ',HٹТH#Fd5QLR:Dao _9G?^ĔOW KAxh`d^u; SM2fk؁n :Z%Qd`~@`Iq]5ʋbQvWp*[r:Dt(%ՏԍDw;` H_ zC?3U¨/{lovF3Q4hh,[S<PE$..3% *@} hƖ_vr^zV.Ĵdnno&uBvz@e⚾Wj:Mf^loJ/>|+x rJu(stר5<:gk/j>fYpw_鞢jqor+fm6V ]*DE׽')ϋmLi,ڸ'?ӝӯ{OPHvDŀ]"Vvš=|ڸ@]C` Wi_DL|:bߴtU?Q Ir6<4^{|:mw KÍz?) tVJJV,}HLcԯK{SSeB>A؋kbeam>, ArVK~\oOw5Bt &@t}SѨ]MMc1w˶%G%\;ztj+2[eia_z>F=콓)<$}|h5@d1^^qE_'U"M_,jI67s :)0UH^2g (c h5e+_}@Ce+e)^MsͅiKMR-W͟E%`ðiyl-CDYl1e9pV֭|pӚ24<M. _hy?EJּbO1s9(MuaCh^p(9:Ơ.Q|9b,rVXxv` oo:HϘ8~6Tfʠ}+ S*-xEIĖp #➭Jp)F w9[95, Cz38 K5 G_XGRY [*h40{ W~{vn՜YEY ҭȐcFޯ|*P4`[ FlQw[V._͟%Հت p {LDa&@32Y!B3GaIPl~ >oͦ­ٷST7s҈BXp $-4Dp~)`A*2X+ Ii59wNmhۡ};8/ FtnsH.+Ѻ]e(&V`n|c[1n*:πġ=qk*dc'}qRr7qF1(P4`S bq[/66y779`ހv`~8*,l9/;TX,m%J>gvVQ2ʻdQHz|}`6g?T]-\wnJ.e  +u&®,W)37Tlef|ӈ>!FolwSw5> JMվ|r[fKLA:5~edUJ8\Á2=UuT5ρ)E+h&ݍi|+' 8OЪwEdQb*6c6Gnl%@ 8hY9UTeHHWHPKffn[6GED_Cp`8Kt }y-sSḎ&*%,,ȁ p? 9 j< Ki+{gaU?3KPaqn"~◔(-pᐁ|, P >ؗS> KMJZo0Nw0Il(2fh00奓[F^*'|uf>m0|*$JL!o& 'Hι3SJ :Խo`rcTH{Xcv>\a5+ޞpQӈ~=}88NIb9B́u,l5qa3``cn-a f[_҃}E WW r+oKGqK "lmf*:j@<~`hfܾ5͹s,YpϹ+Wsw%G%zpS@Ch{Or]цWhg*aХe}4[J͜*ӝ7^7\O4z\`_nKߙ' BFTzI]Mnь<`JMUhsƠenf,F\nצ-D9D'v'(ˏZ-/nElˁnLƁ p) 緦n׫+)-# xfui%%/>|;f~;հX8Xm^Cn6_ӭ_^H0^S}+ߟ1n>fBG&*MCr˯MP@r6m%cp>)9ށL\OM҇IRYǏF]֭ԁ^QL,5* ?aDc@6& Jئ.g\q%Xr%=-MjIrr傽^u@uحNzvv?4_چ'kqɨ>bYscLۥ3 ^+?| fGHV9c8#QT4Pfh(>ab->yXHFc WS@zCK$u{ hUO{b|*{GnprueHH50yx/fۊwk^ʎֻREN/[̓((k@F=bV['Qb*X|%kpmpIK- [vh<7Y1UnY!uhSKuhL7~0ݨcu:@OO;`p7 ,ZjK7 ׀_XƱp6˜Kp!*qS˨-gZU:k}t߿QL&LG>Hw (N9 5Ŵ3c/`{E 4 K"HUzo(55' pK@IDAT^E6~Uի Á#H  əA0Ul{q܅%ii)@ǽSs-?\MEuѬQo< 2 :ԳmT+nce /o9(ApRZK9tFi=G o&~֬YjUy*O~Vv1c?-_;F+ fAGiXґe.JA'=5aCg0%3a@΋?uց=|m9W Ve)>@FxF$8bMvszA^* TQk$ }um-+KVIF Am6ΥU5MI0g'Śܔ [hm͛ȩt&Iqߖo[}D 5g4>笮3sWZ#()Xf8}SKd;@ZHI<^:#1sT́Lz"6Dťwo;:{ʕgרfp xR'gCE%e+̇1l(׆w bku`ȡ+3q1f _XYmu 6zvNBյF?/沞֝/] `:IDA0_5<La{M* Yd$ۃzNZuWOwzvbM@('Y=uP*{ֹAʉF T^g|Ȍ7BGygnOjXb)AuΤQ>r ;byˉd$-;LB%3\~ؙd(@J1jGlJQkɄ5bk+0r|HQ( ޒƽXqW) CrJS3"QHq- f_k6Xӕ Ki?z52f*hF$QgQ_-{Z}EQcqg/R'RUfssIoh򟘫Y2kfgi#ڡe;kkF|fuT %_6 eDԺIM\z v(//9geF/-כ[ixW4iѾ!Ήy:z-[\dPkoXW}T_\3R\2.(VBeBwQV]a;#)W1kH^eﲭ}<Ӗ儩gk FWPGT;mcKrgm/gߝs8)&)>]]NnnX[tKՠ==iThd% f+3ZBr%W+{ޞ&PViz`ҥH&G\Yԫӝt˹V.ʽЉ*՝'سMfo uK/0R3/2,\@ ΁ sts%μʹQ4Ј@Mk12g\, KGaG)O'kw;?lEUGb{Vwt}o^V% 8:~:(ǧd"i h4X/kF>IͲ.zC6$݇#F+V I0Z[ݥBYhŒkXe]P5UaT-r}{}85X?z.O%|/ե}mȾڵyp;AްGPXCh49p qw̖;9T—ў-z$פD ӹ&|VdۛJint:MO.[J_bo/lMU n/wzbx靯6׏4Gf(7䯪bƍѶ#K|v7Y?mT;vYB\1Bަ|*h=S%4>BSBЙ t\*s[.러GlD!hn;ɥ6}7gٴ$,`\(#^:jBG[ ֔}׬L5vPwqaPӳ {2qi8H}m B^|V^O‚"aml2ׅ.пDsft]o>DG#0G$M0ntjXtiݿ}nG3&.7#FCWSgᚭr&߷\dU6/SwMP@rg 45_l&8:ej*v ؿ֑и u ?g^Yj<} (ޝm=060YP" N偘^-MH%K gH Meg8XėkqI2ӿX(Rkor>1]uMľ)0oW}$5++OU$F[2}4Fչj\\wr֣mα9I"i4nif47b ֪eiKg焔hDR=УnjR>hX"UҨIi_iX}lђN^ZWP,q& >ʶuwMIl.;BrRe 0 X@{A X;[$={$Lmψk3ŸC6R)>!A]t CvP=톻I;_v`;)iut3s( F~tG耧k+%eZBH{Mo!-̉OH&&$Zղ ??lcr+ͱEsor y Ϭa;?dmbN[Zhi@V]D6% MLͦcIolH2fpY7]$؏7`Ji60gvaYu۷P(,Nᡠ {%T%rwXCX0uvߑ&lEՎ( DYbyJ0"P8ߘ` y>41?YE۰MqxmM"qvya4~%U:̕,C}=xQ4hXy)@{NSn^9gT8Ŕ-Qrm2Z: [z~e"9\%b-k;v''\],Bιs,(riG>,o,\)Vw \1Yv=؊߻[%M{P6s'n sWi otMNI̹##_m\\Ts dܑnx=~O֬{g5ӋL㴢e.N체?=IwQ]x檉z;"YdX!v/o=p*Ҙ0t#}Ӱ7_=k4ϙ{l}ЏZyYRX)Z _乄8) 8b!^k+bo XWK'/]l5uA,ůZgOnK k ҖԴ>Wfkxk)pY9k@pd=๕|U9 3 `ɍ \?s])yBaB圅K?T&Ky[L"'-7d~u.>Ivݤg#7pzlһZd8i܀:,89ɈY5@ڴnuxlA{%UUy|]w$igi7u-JD"E,xIz"Ctx~쳥w{bxɚQYt–տZ+33t4vpGnQm?tcuH؂m黬^m4pܔ}3pA}ڸ`i-!)nWNPˮ; `o<[嫍D@B]8:pBt|8ۿaq:@C09^˪eFaLyHe:5' u|,Z:Sܤ{ 9jӃ|}<ܤReEAО3\q81ר+QSNcݷ=_X 69(oI0qF"Ͷ_ͧ-Cno_d5̀A3ty+[ު);/^zfi_Fd"¾88[!HI\t/EmJ¾LeaPNQ9m ثolr#13,$ j@8a01<K'e}sdrTΙ,oWɻ$feK ?,( ٲ 0i'Z~n+w2B0p-Ǣs<;h4ۆk4,}{)i24ܰOH0M{"C/F:U~t3Lk󞛆 +3|j;L6K[Ԏ띴=ђ\dBGTR`z7dV-C2'i`7_} w^tEޛ 5ޱ%oD_ a5l&&v_xtuuhg1= . j‚M-t:ܺXx;<ܔpӹ/yk܊/yO>vn*]oNΗ[ S8ח:7\|e`ap5=יkwR~!]7KDt̊mȶrR¥lZ8}fRSX0W'AsDťW0ՓAO@};V |(rszc#} 5O;Fe++'n! `4o ,<NdWl>0=}ک9c/r6 wS zwqY6t_^:N2%,DKͧ0kXkGd;Am\I4!+1{QX%iƐH,039Qʎ&2HZgN ~G#痈'g6u2X-&DR6(H%}>yU'[Նg?ek?4k6IFTS7բXtNk\͒!%O:4S "T觌uE \O}Iy_q];ޙ^Qo塟K]w( --ۜܖT$j*n/sCz73:jgo9,@}S( }Y rݮ({BzJ:Ѽ+6w=V]ۍښ,dGvs|ǟ4iW-YkODnHqaxG@D!:Seڻs^^2U*pxǴf٥ >JPv|)3GdpUan[/-6h;iRԞ𻪎6vb%"@݂-Ɋ^$` *SkWNSAQzUj;[٢,8U% $s{Woǟ#g)G4 ^d6; BfU8Κ,Ds.3☺ %T_']H{wd"Om[y!T=edF7qœ jfLt|WѓFo}Wu.9Xs/o})bOɒ0!k:G>UME4rE"FL&AF‹2` #_lkIJk(1H Xկ3*@ +.d(g"+S9ar$Uƿxyz3-MMO-\Q$Y=&!!J^X-@Ra]:(D3|i; 1h3Cϋj?6.[!~aL72/ !2w+3D yc x=nR8(j?1]n K?ZG2B k9@b.iwm,,.BL䣟Ì;A!h$_SƂ O h)ۊ1V -(_2u-b>^u-cPObBI%@d5lUGU1R92"ӿ&$gk)-Y϶$ؿysvyz}V/[ꊪ֚vr{-luUո[V֚漼tVRp'K Q/,..*fUqZh*p>0W')`+`ۊ%4|+WSd",-y/S~"Ukv\ \̕w9;ɼqY0΍5g%.2:^n&;?XJav6񯫎N^ݾ]3+?_W ߯Pk, YfmJ[u 3PK/Kvn2*&8l!c*2eapw6؝{Ƕ⸪xcq@8PiWc^Ss͜%O^0j^p8 ݛgd#>up 4U8`ȇf ]zeo< otBYvȸWTBmq)L˙ڹvSo#j[VUdz'LX <1IxI>d2STuх FS˔!^bQAL@Y >hkn0ߗ=4m$uAKC1LѲY+=l{CY>ɘ_վ'lMڧڗ!,|H˿2,[M ڞ)!mig?SL~oVPRk~g|J^<*I8LO" M"Ww֢,;33p^G[s5NͿfq!hwD?+_W|}+{ί5w2 !]Ӂd^mҲ^qiy' g i\nU'f_cծ-jC5KcJs;rë! #T6=ʖ%Or.6($d"Xy*Z Lo\MvkW~p>1 aOgVTL'9g=S$% Ӊ4.آ;2ʰAoS@~a)P0>`bȒy9h׿Lx2dtAT9Z ߈vҲU7ݎf=e_RBee@zf}ے.fekH]ǝcD?/\)xwO7*6/˱? N9l;h'zspxM m[G QpL4ul*mk~ 2몺qwpz2ws*K=y%ˠrnE^!edZ P6r&3kJefEwh#\7v#p/\;_YGpr xH`w96"aavw>ˁmHhf0WVio"2y I-e3CgxQ+L-h{liݰ SUl8>`ޓ$EUkl^[42-Sbk{d~-^M٣yə:C:Ib':!)) }ADG TO~+,xu=bnhg7eXvDs#a#}\?XWu*|m:-8+Ƌ@UG.zܹ]ܾ|>KSPd.gyL{!9sʅ?g@4yX%ҙ4V#[␞-p|nCX__ƽ!ڠa l=qQAF0[ܤwpꊵ`j"O~E Zf+MekԄ764ȗ"ͮk ^Ő1 F7xnIs rۆK}G7G1xN]]s"wgBwyRi /w>#}TAkeH(GƽW69A?l#Nt$ <]"W.YdVcvuSxͦi6,}`ဋ;o)xdcviJ MI2n_\6|>ٕ㎱1r>Qb(S/Űh3es4wQN Un5uCt#dy VQ~K:QdZrq6t0[\+˳ ,L ,=;Ө׃88KL5 8 ^ֱ,@.Q2Pf}+t5Q_jM]+E5d},o|<@< b, TckA+ ?q5i<9$6h:bKdiZE3O,.-bFHqXS.2ařFcʃ:SjҖU_:ѬsIsI;|&3hPqɨ^L.gWLRFD3Ÿ{z}~{(Cܴȣ} n0MZȒ`+X ]g|r6(L-)Zi.0%@'c?l7+8 9ɜʚly>OWsMeZKkRXhO.) w֢&j+cXxE7x Okmx#Ia%(vתòkbwGTi 떌E˰ C` _ m\$XԪ|m]];vUvLFzjgHKE.TsXfV9|JF 3;% n'uLېv/{y Lŭi7ؿZw~ 3QMbz̒~]+~[*$ײfse(?٬jRa[4~K,L=pư:VhXdKupe4, 1]0f@`nnB-um<77r ߨyc-V -K_7۞>qhMۢ;Ҳi5ٝǖ '{h *A*T*q 4inml_PU)k-G@ytB.oj~y?}0ե:Ii3]HSnn.6RǶ!PR0SS3 l@KEH~Řp)Ӏ8(EIx9p*aDMC8J5 a֘R-TT\V%:L,zc1 KvcT(?*j259AKDkkXYb߉1Ɓ6.NƥRq`,@/6 "yK :ZdT]M~ܖz[ݾB|L3׫>kҮNpZ>Y}<Stm<ѐRݸ7d;a@?^[+g:oŚm2]+gA,N6 Vc*\R ז]o?\SfpW!勒iXX\b+*Zo;O tQU!W2d%oԳ'RPKl,ij{?.gөK RS q,mxΠien΋T~^tMC^`RWpRlDNɠw 2w1?.PӭA@?ĆFfI+EOjljy]'hd(ϼE@;wbò4Qt43(8.5[=￶=u2_"}(dķTb'9׎%AEe~FȖ#\p`!aBV +lQRA*JnA~'{晫Ь*@e-b_ooˉ CASg7f:̜hKYcTm8~{k7pc(;5eD/Ŗpg@/``v ko'o$~o<=ݼ#f-ٗ8`?,n:j`;һ~>e[۹Uw$K|lcz@n)<2f|i-Ë,uK|C/|cߔ g\wwo2Hz{ฐos.~-h* y"V,v$`q͒aps{1}jLRZwX~CLw_113~=@`@tTp[5KzxhTn/熷5 Hhi2w k/ږ[\]-z([_~[P<ڥ0{cƪ_PAp?̩ew0;4Aa<A ǖ/Ye SAwCl蝗[>FCph'gXU>xXUXLQ4xFa9 d} ucP縊`[sЩrsVtS.< {Y\\gwMc 4\ OvQ1͙"jdN8Tғ/e(O{Q /8LNJi 7W/sZ]y+%: P¢\2 d |ta2 s,֐n%F5$q#K^٫/g)G(hPrj{72 Aj>՞Xi9&Պ(P4` ` P~nӒ+h&tۯƝlYdQSgIc K P:]s.ԺWd?ERw ٪/~٫\XWi.iIo4ߊ5N+S4`' (@z2SES9]S 77zť@f)G:,eZaHNL\nej`I*OKy 5ˮQ 3kZσAz< H.E5@c2EXy2D=<\O'h-wOOU> I+Co4cOĽpIC{o֓+2֩`$íu|.TQ '3" SV0%tL TɁS !S@uhmԝ{S\Bx~ix(¥l|K<`sP_`1BO";؁]qWT,t,V]WՃv''2Z 1֐틺-"#,ŕ[PL ;FSv!!/Ḟ1ҝ|/Mlft~oӍvU?/YB}qu%ڎE+E94Gfol]gn9p0og Q*/ -)<뱦(~Ǘ2s(&)N$qRɰW{EH\b (([0#z13:M}EzCaح»–|Jf.Nywxh.ZcUۓ9_{BjKc-* >ҎRZre'ZOOxRmh|3í!BOWyzѼ& w3aG^9\T٦M;/c@zωx  faw ~@ܴ+WoiLSPS[H[nb@+f5[m=xk6Jv/nNTM@oidՔmT7OLwfl 3RH`Q#+^|3R2r Y9Y;ܡR -*0#ؼ^`OrVѵ@VN/(ig 3 ~q^(b˳,W;fD)ebf]H~rw3]r_Bz2l4DJΡGВ^/G1ĹĴ]UgPf1n?tVXLa)-4qc#zGb\] \)-~qsjfnNHWR$e cdNIJ}2^Lh$'h.Cp:̠%",l# YyW]\u1ML\g>=bI5Ff Au4nSH=-C{v\Z;m9x#yDwGկxo|߀Es-\T\GO .¤JۆGNla=wB>*uoUk_~b^wQznzdko5$"/ɧ) 8XNe KX"L:(=]#l17y0CE dDN|dлnu,|*^ ]vj%Ѕ&!Xi=x0`>c?Ap;̀V7 K2͛=B,>xYŶ-s}Nn/ۑWXBf GT`>Ŷʨ2A?8Nb#^zdad y(8)Ci Ǜg&IzJֵC*R uz `"+VJGa I^[><8PK-^1<$^ SP )'Ý _I{AvLUݘz:gg'Б*ye%=wk"&`,0wzţ`H6 7:UA2;l@IDATbloi Uڨhiig[]hB:nd @\eq7qxzx'Q)/,ື/[f͘0"؏}R9c߯i4Mfzy; $p_Ы $_ֳss*Vr VC,+\/6./.nG?TvobHH-zU 䥇n?/,ņ?֓$2= LN;^w+W޼5rnX=ymV{N럘~g6/{}<$$l`tw'mq?[ :\k/ Uo9k0nm=Jh[[.EPqk Bpo]cU]x f}g-Sf/,(@hse,]yNJ(P4T4pZ.[a@V־?.|AHͫU|tKؒꊩVV6 )D"B` K777_ֳ]\?M̀oe ;NeJD]Hm m9pNL77edOܐ}Xq=uUݸ7d۔_ǧ+?/,3Y]?UO-}=?qcҬ/Q7c`\NW\dzDoN5wp@@ahΪ!=:ٛ4wmcP k㹤tVJd?мސQR4h>paL0PreV+q)D4s ,[ –D'⮣@Lcz e=m=-ʭTpǃ}VFURyMtGKL6ѤƟ9ɋ}+}Poqd+|m8452P%e$ }r dxYpѱ=ҬѮFFawEtcHED*'է5]/;=;6oup>}ku; qx9p p&W0V&4ǘm()-~fn/_ MQS4h4`H=-O3\j.qId>}L?,ףNs*[[S tA%ebUG}]/3X*2Y3{5R5ޒ_KC˜NjgLB~j:՗m pp0uY"=ޫ=ThR̯o Ob^*􎞳N[FW3ZlUJ7YSzJotגnRvNᘜNUcHF} UHiG[,bBWb8\kcQJn>;X\Zf~nr0>] xA( +eU(3oZ \@PȞ9̏T?0[dYvTe 0f8W0R TWDzeS,(NC # Ok񽃟('悂Pj/"W*< 1 p*[ZM 뙝] K;hpO{7\&o9q/7[5*a%r!Kdw%P^kOFA}+*΀_3J#s݂ϘAK_`D Dt58ьk46)+ 4ZCYe]&8E>>THljjoJ- |=zh M3H%?&?';(폟M$J `A$! *ʳ=#6Mw〽7R%B8exޟMa\pWe]@ڌ+RAͨݴ 5ҪUO`SJk9ϺOޖW3&^N((\9nSa 8%݁:.G?@v5K4V-%}{\pKyxYv3a I0kEbbuUXd*HyNѾ('u\RSZ/O~- mu\5 =nqf`gʪw/>-wwCxdqOh6I&ȕi>Nm%㿣I AlŒ4xDGGzgm /'&n N()!CP#ZnVe͇Ewe׹fAN$6%E@8ovzIY6>:F%6rJ(bfmɷ-eMFTt Kz&{wC(wE9L|5*oeAR,یܩ}˂:e! 8A~8Q5 ;4yD@?/e}[5ĢffiZ4~<9~6`L t) &~QPA\Cj4]KZA^)5 bCá'٢]_5Lx hnj\B\gUtE !FLr x鰧,=q0/ *=^0<~,NF0q`xgԜw_?˪eAF`crW]\ݻóQ]9Ud{L{7xk)߿NAx1&@V( 1@N[Gb}D6fӦJUj5" Ϯ]:7QgKe A9ad ML3ڑ[HP8*=ȮIF\dbyDWaIX6k? 2byg+T!.O6d+C A/ah1Km9'מb{qrtO]R88LS>&3z p&V(*"Yi]XɒlÁ]LȢD4?e!phP"…&wvݾFC|'̇h,Ć#^(HS}W(8a2?\^\o"{5Mn+O/P.@c e:ߟ^qSҥ'{7xGh~k7?ٕ 't;_.C;5hm4DzZ} -/]"ysY^t@x~93] eD m7股 dɮzA)B9B%ԦΝj܋MXv4T\Ƙ;+>xPrqoXY4PtWӥC\svaU?8aLʉ/ue?U 6K6q"Fzn$Rby?#%ݛ[ӶM+ F9ƺk~Z$&җ9Qld6/}OhIiWڏ0،|N` ϓk-suTj x`L@B(=$H<YMq6$_2c!2#<|mi7ieB^Vel(xFzY DK!6bpwLh{O|['7aX\c6{@_ì[lpBON^\:w;y B<(eKEiAD`\tIóxgsgn*)[9s\IE2FSQ8vqi=~u%`L P0@ q@S m@yX#/؛d6ΜO`@ 3vq#" .bۛC/|{+uI|aSE8VM>1=2zy8;y ~oF|jo!|я7 gS ?mĵx>| v塡C8jHD7skxo3&N^Um~B@5~{=%tz)m_fAwP^ڢ#ѡ[Ul{Sq'~gct2h{hNw5m%}/%h -[͚T8Z|V`Dln:K^6w1KJғTro7RzO$[9w1&tOVUmF,ہ7\FL.>/U-X=.+6<}^8Z([Z' qի,MO{K$KP!K'g,F$O]%ꊣE捝<*MkPgݺK&@0`ފ ]o\tքԥ׺0qLrxv];捃`@\]=ɓaz"3.:R3E}Ix1}ŹZ.cxgGQGJ?Ll3dHgUU^Ss9OK&^ PYϜ| $[v8jZ)>\7/DOsMٌ/ hcD.Sʕ%23ݻdRQA VَV߇Ç[>KspxTq=~;sF괽nq}Ryq^gL :pwn _N'VO(7@ȧKvh*)>/䌏*š.6$m1җ?%͟,tf"Ig|F8ꯠX)b/wڵG6/`H{-.rS&<[}t ˽.?arqBD|BȞ=> 8iZ:[c =.: I R/4Xn/^OZu`L@X(E ]0ۅru=>ڑ֭?8kr|B&KfUkDb}djsc#dL 3:@`.MWP?V2 ~z]<\x/!NB_7pe @Xob#gL :@`.Mn.6@`QJ(D=O`ӗjنUEyE8B #+<(t|V1S~ۼdL ;:.KMq }[(}aF= Ww|P{]./!WE/ ^N?}q%K wSR<$g3Oh.1"ҩ:q@U{OݒL 0`i>.G* }:I`WhD#"ɍԤ廌xǥ׹_d<g(9V$~|p}oHg/XR|74֜s )o1"G_4pyc7WQ^"ϹIyiLpGSs`H;u- :o.zAI80yZ^}[ a`'r'ǜS:pL֏v-'N-dntxGrl$nɘ!!weIS(t/^? ߪ O)k|`L Pw9n|.$%t ZvROS |z#0qx*p*]M0F\86ha?O)NEaEӍܻW{wdlUR>C+>[!wW75!i8O'ۼdL E,uܫ% >6N!&1@]oŪKSFosU4l4غ܊E 퇝@n&T/}>\Ds0i"&y#bvfZ-䇬"9+)*McL +:9nw9H*sNkj˄ %.qtE'}z CjyJN\>Zl7S,g2CJB]*iU8&@k`ܚS6EpϚY95q+9RFR.aIJV+%=6~JZtD(J"ZbtL$V`j 9"Nz{C}WOtgL nИr>O8R^ ^v,Y' eX'Xm61̒/cM&~Bcxşa\: DzY:gi7ϊ-c7a*x<0Hjl̻^qp4þeйCLj!}{\ADݵ}2^֢u5~&y1p&Qz &c"5A;0쭚[uo̙!,D {BT8wEu XѢEB]_M?}թDQ\l!uaQ76 7ũ|}s|v['.($FHu%(WJ˖Yi`L`W]#GPM@V㜭a !&dct8$&t(3\v~90E+ףp4 pt1Dt—6~0?};h7R؈3%v/ Ckƿv;hj?u晧ieS$\+-ߝ'+7qA`ᭅPHKN8U[ x1&LETNL9hC;V$K5,=q@d*|l3yE^#ѿ>익Q@^2&tFN:/\F;'HD%E1G8&2BM#S4T&:'@)-Gʪ4 4]۷g,DA|(:|mvf3f&E :( Pݻ@ܹ[wA?,e'ZEꟜgC ];h/XS$mZ8f̛r-)Y>ƉCV8ui~h 1XT4+0q,cCϽoZ`p$o ~͋,z(=Cv9/dL Gޙ-#Y^B(@.+BYKy8awzu36,YtC[Ptpٹ6/`@̎PP@S@;g~™puP=%(u 'BtOP|ƨ$rǗwLϰ)7k I gL)<, HEk6׫77j8tݫ9WY}Ʉw7v^kY0L+N~ ;-3U#qi'eKOQU'Ph1_T^fǗ1&mK)}(Qt a);'Ӫ(ԿЬ qͮmkQ<\.(j*xo`ʡ w#׋iX[\91zQ8S/Dm}׎F NL~ؽ|#?L$}gmil(w(KH,<v{.%8w$ _Je\.Gܬ|&@$pi &jڔ0 ߊ)SqyE(ZRc"Y҄w"Y\eoz9wYSǗ {زzlyާs;U}^䥧݊SܗҍIy 0&\ꙃ>% C̭zߘf֋L>WDĚ_ ]/X$7_["{bկbREm >p]G@/ђ,ާr^t0&L<=uS\Xtp>'.SsۼdL 0(pLb sbm't93݄slF Fíi_kv,yٜ'^ELueޓϼFf #/at=wcrl{6x_h_m8&@-5>JcF{ >ePk~a\:piHINBLPԌkqTJ2/` EdLwFN/aAn(rjEnf%7H]"Wck,ߞf81&@X(7w2&Жl8.<,Ey^ "Iw`EbOk-W̍p}Q(_̜霆BNՋ\ OqX_awsۼdL 0 Lx1P8ڍ;BYdf("Y,_":tp%xd/_"Y 3-F9׃ 0&WlQk^{KJOJY 2,x7hD}" yx0 GB =-I|AL`nTy}}xsYܣYxhs#ce+ufTUKC\5pYʕ- /竺q>L 0=` @ݮ=:ńGܓի8馸FEry "zvW\yejHM27>oᬩEIT JSQ$]$ZZ&Nu$~S61&P>v Nе?n;҄}-ɰ>%1Rzvg/0ʅ* [O&_E`4 /Tř p>0|PvB/7w+1ta4*1ſ eY M)2EwD8XO>~{bTpFjbS5\͉ 0&@r SH.NK{$ږ5!(`19]\4L%`L 4 q YPiA^lw|'Q>\,_.z*DCq4wW8';v#m;!!ʠqASڒQ.rۯ]^VMV4$yΠEE~yZU%/}p9ZQ N?%cYdt4k"%`L xG PBȒL"yӎ5qah(~P$WK7'8}2ny~<wu vdzNUYzD_KZVshRLuD{pIl}Tef*%;$U,)+K?Ŝ!`L p!rp8RkzAB. ]9 uv'ث|7ix_|=ԫLdIlAlYYOJ|ɢ̮u]*kpԳ[wor'%vp8-;l;0Gq*UIzsg);(NrsaJMJ}Փ.< 3./# UQPLY%`L @ Q5XUPn@-N3 pXO|4ȥPXZM rR8 ' XU&Ip݂ofC-G3D"KI\pUCZ2UfDۏ CBp#8ڳP[.=<˝Xfd^f\SRK%$ӂ<294T]yCdoxNl 0&ZN B `*U\\BYF (#ȈE;F0bR+.! #VȮGQJ N&bE8 @ M X!'!QQWK 7'zyY,sxNäI9⯰T CD4Q$6ʆ{R]\>xa_ xWՠH| Ί*ˌ2Vq|`@ԕ򥥲{ I);Ӓ+FM4iJn8(81%x8k_E*6T ӈ-3d) ,$cb :?$'q%vQwsHvO,֝䃵 d%yf2O2f/`X_60EQa]i-exN=ݱƟ/z , _/O,fL Gʞ_4uw 5,ks~4 e?Ip[QQ5BDLFPv8MD=["^Gk؄7'9"¬YI$w킢^5]ϡ~k3?ً 9˱9˫oy %s{ܳKIiM^{06w|Sm^aL 0Pʒ9Tɸtg׎+pmߵ]!CP#\◼4k) P ZV+Z8ǀ⬠w' E ^_ P$ĚO2[%Ysp[yhI;HE; MM8XDC}^)$}W?<5;t94UD,Xi8۞d ̼ы|3/=lUuqP)ٹ({Ά 0&P0}҅+{D?px9XžaH$4›m ZLVeS,A$\BL.k+ZY)Ӈ\.8q"@aCgُ ҳ)IJxnϋ:WHw8 TM7+]\ڱn^Ѯ#'s{>?:p?%{Pg`!(N lukuuEV'?{֪7N.D6 HN[G2P75 -ʵ~B(e/ ShZjq*#2 #deIk8sWt6+M!mQ=bfUvz\ E[kw݂b$ٻ5TT)%_s;VS9jt :EW 0&E B,%L1Q-]:cXdM/A%,#DN1 P 2\lvGɢQ4 !E>mqIlMeDVeL/$M-uwYI$gY Ϗ_(=l3Jj25[l֝'83uArNh_GMM$⮾,͜C5lEp**`{Z(g`4 0H˗\%ˇ~g쳌4<<}$Cs|.'"cL\l\$ ȊZٮW'-Ȓ\ug~ 5Iz6gI˼@IDATmqiy PƢ?ӊt8lJ2g(/ Bcq >%`L @(3P&E__~xsG^ǜ֧:4OY{$HYMivAXdM.%Zľ6f6$S8nA"5_,RGxZ=u/Ovv<{d oIdQ 0FrL@4,ohj_í|'e>7} 2`@2~G+րmtƟ\׆ۇh8УQQPCLf2a6@y/[6 VV(İgjyzd}Q|4M<̀I}!sU|R'gMKpLqCp/HSݩd 1K_TSaw֕s1lPjv+dv7Tnq8@~Vrt5_{\`L h);YȪL_<$DReGܿ߀N>)l1d],*~ӟb=,;Xm?fnWm=òX-rKU]m۲mz#Ӌ*YI(`urG +/P UYVdU}= 6< vh m)4sj*1k U_`L @P*'A&$dM2#D~̸$Qiq,:<|ƾ(2dXoY=m{oj;ْcW~e>b|YI}ȒL"3 h៌=w=[| s&効qNt$ڂ^+&e^zڭ8+Ioxom^2&@`Ң,ZD_L6}ӗ5 e$~=Kw45>]ݣbUodrg/$I(gӳ>Sj-^E&i\RBSl|s^y]! H:Xu*!7Y$ dL @keN_̞eM_Z4 \jpP(Y0sĺ KxvvvU_hZl Lne2 ?ڦ/Y0GN]Qn e՝P4~eRсK7y!+ls7lﳙ33gȐVsk QkK&@ PO%N__$鋛=E2 e⃫Mu[3?T+Hzj?=WCϙX1-Cǔ]ߊ>ϴ#U?x_ζt26~"`;F E03pYrޢbp`EUfS`L@k eQWej8Չı7n}nF[_a$G:J=6^+>| Z\ۉaX䗹%V1kE3y}Z)HO#8h;9'"qbL 0V$,N<uI&o]ݣbUob'-ȞՕ*wN 8aˣ鋊7ߦ6_]kl0Wb:[PȼCN;PBKʤ܀փ cL 0 I(7˽oe$C}ܮH'bR9 .jZD8 ȁk0sLf! x?`rD{VI3#/f"?'&hezʭgOϋ:W:Zٖk9;dDZ-j¬O7A 6Lˆ4拍[2W^n{ |S[Jz]P H&`L`~[ڎP ]nqdkvG|.Up[X6G~@)v IP/ SpvBc7V UIB>61& `538S-v3Aih=ݢdFm~ڟyM\(z]%t3ӺM `˜N\m^/6/`L@X(LE\gVW[nFd8w g{,gz:~/ \.Y%. tԢl6Pq*omQgNL 0&/,\&,Op Hڸ3+U鉶<މnپwI"e1TZk#=UUKXryO%`L@X(cZLL6FQp Oce,޿GȽO6ZE ?(`LX(J@2Ő[f3ճp4%Ay$3ޘ?$S/|`s#:4N[`EH$h=s쵭^1`L 4 &aⓘ \v`kfN,dH2f/؇SNZJAEh$.͜hudWE@XF9dRʕU͉ 0&Ok dOIKZB{qf4m3NR* hwGѤ-zRqپϑiCK,W\VmFnYwVV8K&?#a%@XQ+o&Fk$A}!d|{;֎71\H[^KL۸/%+զ2&@3Pn,> @΄}}r :7$DڅeFj_Y]STw{$$1vm=#zNBz)9+ 0& +iȝLq.Ʀ" 5 ],9=ᣌ[o8="^\X-axI]LlާSF\jՑR0UU*0&X({/aM%@,r~9t@F՟ *Ҏ?ڔzu`L 3{\w~UrܥJ pDTG@la/ 2mH~Lu'``Ĉ0Wԓʒa|ϟ>q`LK,Ǘ1ܭbS߈ǣ=(,=hƮ}˟ ﶴOʽۗƨ[K` xW 0& ;:&dm ul UWwU5RTL|ѐ JDG3;qoڐv (۱/ꯦ\#& eo5)#-7ñҷYpr <=uυ%Wcw$؏>W,{`> 9U{‡k]rTi6 `L =A߅܀@Ω78, T#0J2.];<5t* ' !!8bOoK#\T7_Fsi*aL 0 P_N7JQN?M,rl7˵Dɇ_Rjxy6tWy(ss- 3۬Q$'P3ђ| {]{L 0&dX(YquC@s9PԩNU=E+U8҆I~"Y8rٳʡAq6ͿdxŘp>]-8hOHCw 5$9@_-dL 0C5h[ZD` ^v%1Wo*Z< E-*]L"sy8~Pjn~ AH97q&wd~'y`m[Vrk Κ%y"@S6bpGV(5 / \XDd)ulZ>=ꮋU-6\=t8_TI|+Z`dݺ#B^zmL;$YW6/`L`[Dh볳^ /Cq'ʾ!Ǽwm.a]YåN]D=qϒ){j >=/S=3 M7]dL 06HrTn 0B6^.ۓTxaВo]9m/txAu*WySs m 'BBWIf)U&hX(N&M@5˜uʝ89ȵ'KeIl_Ij9XqtyeEv$qLjUɮKObT |*PC\jₙ`L `X( 5r''*v-7`a>9F5g׊qN\鼭u8`O pEۯ +۱uFD#2/JZX35\5& eo5&NbȮY3cyt(X/ *I_lxa¢tݘ 9Ƴjɻ"Z(=3g5jz)_u|iaIIY9[\Q&h1-F:s,o?q'| $_>+-"!; :)"ya#o9` h:;h|Bu5P. 0&Z VEY׬D+-V"z졎]` ],͟ly Ű;bglD>@CeY*K a0p׊`Gs6|$y` mHpH-knD4n?Θ Q78o~#.o,}OhADSV/htu%|1Y aa7dNi](?E=oH~Jl 0&41HnΫEq823k?,˯4+8[`D՚%zTifLeY:©(k29íbL 0&Pn}t-896'pJDu*o~|"vAuG%\:<[Uyl q̽.3Pvî.6"1t_\`L@X(WN>兕g44ĢY%ɯdc<%|alrU"gjreߣGb?x3cwmֶRQ)tl_ PvAk>,ts'%:K+p675ř; ;;1H!-޳ugs#5E+:JX΅ЫbrxԫB< Iꮨ^PM)73$V(Z1HRY?TC_sI|GF0=ִCi(AփaP8 v;%>,{Uj Fv7YYxt\ ԳjO=l`*UAMvJ!N>W`L @PX Uv9g^ ݪZ,$B#[#k{;b{g?la6bdp9ט6MZcSe~ O&+q`z&J.VO] ];]Ӵ>ASoT3>B]{iмNInl47^Fȥm;'kuK ,V?sk`&w,HpN0/t:|׷QIP/pսW:sGweCc`fE|XM$0mVSþK!2X/$c 8X`;^A`Dj u%816Y/I5 `L 0гPnx`h$HUM5eB㕘Ib+޷ He˛$[j,SPT\XghXAۿfҬ;uibu&))OWm?'O/?C14{FZE/Km:Y^^>ɪd'޻2)k=`L 4n72,ٱ%Y5phGUi M_)ϋ:T98bKm \= J-%';.IzFt,m?T7&*&(CfIcym64PQU5y[lm\0bDXEőCQ? H(]Edn`L ^$݉Sv'dv ngY%/{IsMW]]C ʭZ{cv}? :y@;Hz~;0@C<$V]܉Pf{WrRMGKy%i*ۼ`L(zl0݂5GN>%8}fF%a@[A>kH*|b{ 'oj~+E{-N>$ߍ9Ѭtn~Բtp5MpXq:K@JΆ6/`L xC,xzKߧ?)*!֐H 6e*?K !G/)~N$J+/l$RVI+8D6MϼUĐpeuHN8nט`L xG@oeͪh +*If]ֻMς>C_/><<;օabFQ}1N2?iixē+ g߫?7R|rΆG6/`L .26(fZ<խ1}f[ٴ( 2M1l^!RκWB4QeD̋){cP4 ,o%eo VLx`-#a>v-^t+_<%YnI0酱k-T0v^7oGFL|{#O'\|ϣZ`L@/BI0eB; %AKiH*?$ z$?U"\a~zwv'( ȋS$"WEL 0&!DU#aDN;"8a *D2YsԬD>$Bj٬L}'p#gg 娎S`L$aTzͲPi7̴$裗wNX?~Q=ӥS7yHVqY9nBҪUp^2&%j,JGl<ZjHv85V=Ս-ʓ CRP谈+Pg`L@#'L_=#@}.8Rɳ>z'Ǘu}_D2/"#=sd]x 0&^oI/\ v6%@ijukZzfҒlPg֫Q֒dN Cc!Pyx;߱|[11,2&]x)0h~'$4Qlhҹ:)B)7!2$9  ޏADӏUs5?A38NɆx^/ֺ4ǿvOG/rh%$Ol׬;vqz"9;2ztIH,t%hCaožw-eԄds8tyOT_~kl)~.Ig=t4mwQphϰe#vg`?>f9ջ$~ SRi,h`L ̠՟|bzK wI=*XvA׬}{E}vy 7^ e@ylXt=b(xAl2MYܱXxlM`i[enǶ"sj\i]mqD,hyE&c{DΡMEDC2L 0O ,~m{%?ΙI"y+S5ȲͲ沣i }.ҟ}DwfbIys^]~Ts~Ayny*Nhqv>wpD7L~\qT犭E4.' Q/D(nCoG&شiu`uS%sG{]4Yᆠuޭ"fqڰd/:-%"IHSyG҅)975=$t 3g?.V\ش&@#lee]Oh6-Mr{kCEѡ}Ȋ}Ykƃ/eq5(7s]])-ĵ4 ub[}"{/fA8GGM{TYËdiNtĖ'㻙 mѴ@BQմ8Lk, V;B{6},] rb[]%܅Gyk舘 1Wq}-R^p7 i)}-VG8Md[W=omWsr KHnnQ3 `L.B9?}\f F较IswnEVٴ+mɂ^J!mXlwsL>^7+p)K(G':N;_}JmM eȄ%BQ_n/fEߋ P=BΟ< l k$Ѥ QOE%*h}J/5hn0@Ѷ{hՊRj 6^%^i>{Y8uy%=ߣRşf@Xrev]>.;`L mZ(W5OAp'6{MgT\en6Ft=MNӅ"gJEZ ='ˌ֦9)NY+ ᧜*{Ay%ײE}% R,<3*]+hGG/`sJŒ~g3ci#u.J:[O.ԘǐxѬù3HN).e% L 0&p[xќ0_ѧ 0v#_=*b{f, M ?erhvI[7$Cv1^]n^/tX[_wF 1a<& NΑwk8\78T@_G՗RMsׁEaU8,pe?<<țЮWz].1OhE4:śB"[,֡QD #eTB# 7PG$1M.f@5`L T@Xx|1,3^_(lO3EG~,}g}'bWiYFO?#-̛Jnqhڪ=\9DDx_pX!}CJpo|G)Q1+&Bsx6|8㒗L oGfrqɨ-'ʆ]6{ҸMk\ߓheR~kn|yŋsmꁢ:"V$ͼ3ɴrD2x+) edG"XLە.ipXH.TmLɃ%l6,,]x>G}"oXot[X$Yz0&E^7L |  \Ϸjwy `|}%HQ{ޭ:R}=¬W_č8Lyh{ѳ[̮';5a55uYu0&8I([.z뙙rXzfbINjh* no|4 M$'-Xu0;ja֑zbmu|qzct?!wu g $mMs&`u@0 eFEzXr6ָ[A8/m㺯 HFcf[%ɢ=V=T?ݧ˔[+q8/FG%Lr3bc#Ӷ|GZ5H^y){mnA[`x;Q.{,E˗}"`L 0:G ؄2}b'DXza ~ހs7xNg9T׿6ˎ9ENF҆dz{07eO6y#X;uKťQG{py_swLK6No!ާv~ 8݂;g/?zA&`yNv؍.냞-"3{So 0&@]&Bٲ,CR6l|ݺթc ^W\jڰ M#`"PR/*ݾaݲ ]^X4Ü9-,&;+]֦!zCt"T̒Y? M#`"ߕsۆ+v|D;nwʡD 7CRF`L &LbR6L~NӯsS{ ncGnPA1Z̝[6oۺ`0嘄qVɔ9$,n=~/?qK~FN{tim7_p( w[7u`VCx^u{EhpkԵJϘ`L (i=jlH"ٲZb QS:G) &/ee(&K2 ̒eQfMFP4/iHy/~!ww>SR ( .{HQ/gR+}Gll"W}dυ;\U:p`L 0&P/ P$e|$H$E$Y0 K0["Ab%v/JMAc~Ν(}{lY9֪&Kt"kϙ`L G,7 .˅H|I g31&b+cDݒ*zw/sn1 yO.S/BJzL53E:?L 0&+`wy$ hnfYI ["(&K8ȴ.>ǟ5\Zl f ^oRJ?z`L 0zNe: 2YJI[ZEBlh"6dm8u9y/}:4S*o ϳÊ_y63&hP·%HPֺ +X\6{ίZAL> ڮHv`L 4(uQ({&͘׃w?(yd3Ҟ᨟c1`L 0:E>:+K.;|`Ū/MYx`L!`:3Z=tSxrf?r o`dϙ`L!zh ؙ@#6mpHfuifhkw48 |L 0& 2&&P_ ^-[(tO5Xx`L hh7 uGނnȯs&`L^~YQގn5Er`L8 +:vVJ]S\MF^dL 0&Pe@ML>HzCw@be*\p`L 0&*"B": A;mjѾi4?)_Ҳr|W<,2`L 0Z'B֑sL p ;?mRУ.RW(Zk3ϙ`L 0`|@  *{Nb7Clrev^fL 0&NLeQ c=RT"+"㮸~wYT2C(f7#q|j-Tloyo|U1tO(ۈW{Be@IDATmm9`L 0&P5,v: sTú8V.*!v6" Wx$MxzGb ^>BZg^.dxpJsCm!ۯZus&`LX(WIIxɾoAV·]f0vR*/HkVޮc歞)yα#ޚ=i`w9}`\bSB6M?n&`LX(W]%H~H)9in5ÑCGW- / 4Mhu(:櫻=1)O{0`aאm tP7 \-4Ljοw;/3&`'pR?W &,^{wk "|um)m6dzw>0q`"{}PÆb?\ThlOW$^ԴEruN`L|,gsȻ9/%K2Dz 'Nu|D:'L{sdRsp"m 5eup%`L 1z',.IzӜLq:'+q;{oƕYb}{nYa;i]~]? `L 0:L-y4OS9ƣimlcKrNq՚a{E.0{tj12?جG$ilX$WTL 0&B-UU;qzlI`C7ܫtYiM߃^D07A/Lr3co0'Їگ\'q`L 0&j@P[j_?>HiZkt.~8C-Z\BvAiU6`@lZlc,BovaV]5u ,-*մe F-r'3 OQP:NҲr|W3&`e`\(q&@,ʶ;"pL4j_&F ԓSt{;zJIuz3$D=ᗵ{j6\ `L 0&P&@066QyyGQ氷DaB0X&dUs&z~8wntxcO-$U[jB\`L 0&P.9);H5!C=:]O#$ Y2 eke3lH~N72},*B80&`ABrTz!"9H}:`bcaNt{XhڸL 0&@p`Y8ms0_@'LS,cc2>֭@*GJIH-#}ϣ㼘`L 0`_5ɍı9B&$I[bQD7ŢLmWa=RR֥so`L 0&ЀPmU Yw-~y!98dHt+ @b1 >.)k|2`L 0&X(߉xòuh+%\ ~*6i~4t7_/<¼ #1K=M=׵մҜ 0&}|'kx._E4~Jg8t!#KJ)ѧi!TaĀco7߾/짗2VjҲc'5o;mqNK)]WӯX$W qT&`D-s6jϒ찋ECOO~ƴp"W K)#lm[r?|Y1Y{|9@^n^nfŋsQQΓ 0&],kJ+q SDק9y3^RGBx<"Mkr Y24DD]}PwFKv qew>a^fk4Z?Q0 H(Z* ^zy"kִ9Ӆ=k"7)3eTωמ=u(W$b?JLWd a5..z>p׵kOT0gL 0&u|UZ"rDGLw +D޻a8d LyI#HZ0ZF":rDpo]D?#l.\.Mµp."G,\!Z"Yk^zE¹_:NcA;% E,K fߞu2$ҡ},)LMuL 0&@=$BT됊~UΛo^2LE?.zNs [b Ǚa^#? 9V̸2砄qU([ UKal-(rCyϾj%Pw-_eNY+BPB[ݣ!aKoܸ"X ڦ]w0&` p9VPYBY5Fyd =ow֡wY}W${7,KE}wy%f̈́-vóm)}G†t^L:;PqG&thwkȈ.!clZS; RbS!H Mkw+L 0&@'Bb=Q:uQBAne27[CBh]:̻4c}%PG _"qШձkhQVc1x0;Km`L 4L=\<r@;G^BFG_RD!hwٛ n]i:|Ց|Enݽik6T]wZ67]1`ъnnIt- ! M>.4Gk;aɕ`L [lQ&\Go1q{DzȈzpNo g)Iz +\,QM>y]hGʶBzvw$"s 7̟S{j'oq"oJX( y<+<%2!pU)`L {vU#O\Lqg'% 9j!%uA3|l]r޲rI3LBfl Dwl9W*O DR}6Т#u|Za国gTି!a -Ø1PC0&@]"t\Rʛan(W$S,;W 0&`5%>5%`L 0&%uZ2QFfe~"9fa2Ά 0&oX( c9]*ݚ䆅" rqHx`L 0&@X(uv2CCe{U"&u\c&`Ld`|ȗ_ڷ{r9eZ=&@-#YypD&`L`\pK7Cw]GZPCSژ" Y,|kȖ3&X(oC޼ݤy԰ք-Di`k]`L  @Zd4ܒy~E>p۸a,M܈*ZWU/,L90&`LL,rR6ZNqzf׮~(:F3]qJKy{5\QD8[̱ȁ 0&` P.dD : Y;m;i/7Xer8/m㺯 g"ę-ʀ 0&(@2Lr9ΞVzABSw-ѧ9|}+&J|>xmݸ%,Y&dQf JF %ڙrB&`CίR NlR sc"Q)lp2u9_gz0C)jO2uG[P=OaaaA☋eIr"jht)Lɘ`L = eN!UQ'71~?஠%-2PL!.^)5eKD}Xb$|l#\.U/C=:VVaiCL*ݾaݲ ]^X4Ü9-޲(o @'Ooj$$L 0& ef^7d#߸Gn8தL>dl44H?~>k,pj~;ڣWxTtAAݍ1 fgܲy˶&1 㬒\'qed@I])q$N`L 9">qr˃`Pwr&S#Lј"1>C/;&OSYz (&K2 LL$-2ad@nO=')CutL 0&@-*"SGNL:oVr@,GOKepLdEZLbx~ZnH|eM&aL3[sCpG~`L 0`%AtY))-ΞIjF _$#J&0EUt6SSeM&L5ٷLڲ$[".cP19+B"j޾9`L K~(2.>q}+RJ$$j,Hw?fC?<+`-%7]i[~VZlP䷐tLWHL 0&P UL%$= 5 G;ٲk,$ɺLSɴΧ5a^DBW,cD'Ȓl uC_n^ 0&@]!WL],47go+K0(ȖHn>$-l fk2[Ie?]jQ%`LP&pø9>#\0},,$fOk4KZVe[ ,ql CjQ?]rqbj;bvR‚5׌ 0& LPQ==a0`k +2P(FO z xAR] —4 G?qIIk`U'P+bTXR_29%f$O *ru`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&jᆐÆE)S&P ([Enz}|pL I{)VKEr1L $$[gЋn1; ԅ;PR_tn+9Tb^a'}l;{ܸPrIsRnJP39R߰& zϺr3;#ЙՃ!Խ0y:Sg^2>mss!7J.)V <& O&u 'x4A) 5krbtʭMLCIu۳?:6YF&$2c:ȍL dٲX A#SԔ~̹uʔƓ}0V{{|ʕ~lcܛbEܺ(TݚT_LJ*T'n\|҇kf%M|n3 Y(QH.e&EI%\qKK s=][/1mIH{攑%obL8i Hވ?SzZ(̟ ]-֚t !ƕ> %/ǟFH8|q)?@(-_nu;Wvl[}a(! DYzej:~RI^:S %8un<%oYuCΤ#'C] Eyw< \zok"1=1}㐲cV&J TPڀ~BP|-ABH?:P_랄ɰ6RwcǥZ\g*ྰ`@UoC"Z9H<5܏SR^[CgVķg'O2/;iR Uu*U4" +5xexhl5܆Ki]-JuPCiVzN@u X'XM Rx7;Ab,iw!+mܔ)TNBjqvSida"05YI S>㞎o~zKa{ S# ;_|8p5{$ ·2)tB =|͘".oɑ3nq9j9FNqJSek5xa&&?'ʚ0f^F)ݞa/;ӪSa&-Rp ;wmO⚿2|kT(ﺢHג7^ݯq Iɸ=u|G&x/ZbJٱ#Gi{\|X ؋{Gx]qە&fIQ(={ORR;P\eYzMpGpRs/Hj'fLUA诣(ke􇆇+670uK/7rBd~ -Wv>::xt lQX1&x6Uw{>|H7E8PbJiJFxBՓ.Aв#,3Qd3I$S('^.ZpOAC{C Rj`s6)ۋ(6v7!VE#J谇q+ʜoW븢nk2`)]áFI+P9 |.fڞCж`PSCpʿpdkb)_(xwcV?/mmwpVwϻ|Ve&Z(a9Gfzbi[AV6W2伢"W`(49u<.jƏaNp(ڎ +%_"(Nq^s>Y>ܿ<Ɍ㏠a79ny9}N;#Eݱ|!iv{י;(ȉIIK]BjXh BmC^_`a"鳒&|BăvdP)R9l[ѩS 0 VM|Mzr-,++UEsa3-"; Q3N3ƑVr'S 0='i"Y(q_|fp˹F*ma),nSq ^ /ou!,_$<ܟM8T?z3/}4҅ 9<^|hzl7{){γ;3f8'/)\Cx:^`n^`hgK*/40DKY+,m?v^u|_sy|l4ܤt=dY΂=?X/w51gNi|݈Km/ /os|7t9CߌYBt0Y+I !^)e8bM/r9lfr7q8OW!?l}XS@\_< tpuwSAV~jdQ޸ x p]J"QF9pƯȆH=w$%x})4P^\jp*}]UpK{{1"av9G]}maT>y̽3Ӛ+wA+\Ӌ|2:a{z'Rg^}OOxNs @)^&1uS=(ϴ-K}C;*mby@=1 e|UwKGb_P\s#1 سGB$Z#9cUvoΫJLsNqw#{0T*Xgו> _9!yRl{<+{OÔMÞ LM|ww>l5?4M^kRӼ`hޛy֣d\KK!"âOWʖaPJ:XU>ǖ{W.\:s0Ե`9Y ?x(OUQ{ow:׏G<5Jŷ'|0\w'Ӊ),0zKp] :_;x'An!]8w{Wxw{4RBQ\t)T*|g:ƛfWofG¼G7zAb|6"\Qapg|ƙ^Ǟ8|}^o8}k_C{?S(/+>=QQuѕWw+ܔ `p!]Sfk79O:_jɬÉʦiF(a‰I'9th=?gh ˀUȿJBys|:KG7!e~qMl:9@}c/+=-psP8l=^uNBF$$lʵyl?;?{r~|8h5X+Uw&T}YO[o'\26DnrHr 8uԂQ,l_d}U$(Ys\ [2ژ:R(>[O+37p%nf=W$XߗqI=g;'lʀzu4ʥmfueǼR+Z,!iܿf\vKo%}e*[#h#_\}1^*\6Ͷ[ʛ>8+1a'V0\^zWA e#O3oդ.Zq..>$;ka 2iLu%  g0z.0Z+߀@]+Y*+[kVXHq*(G&X ج;22"a lК192aRj,jc;9ŭ=={]t8q7j,xfo. ”QY*q5lT:1OX(J0 n؛\ _lqAKgbG;2<[ZvMj+Xw=">y|=|V(lYܹUkD'N?"a-L=|oWH@ I@-ҟٸ)Cm;]zp8SsK&,΄E%.ʌשiɫE䞃WZוo.W~]Y#o+%x1|_;q+XpƧdb9ru GgbWxiWYT{1W½V?y|8>PoBG݋[\|e =:{Ľl[QHh?;zDn T*NųC:PZ``SAOMV܊xc4t\p /.id C ժ2>v5M]2ŧS bvDgdE[/<^*/~Yۧ;>͂P<QkX|2w ]ĪHk{Ek }l|e.vKh<`\uh9$K׏iC<Z?GiFv/_rTh%cY%sqh|3 _%޴3X0k„}'t@s~Zk6qg^fNB,L(o@dŭgLC9htd ~"a~g"SS݁*\tZ#֧ꔃϱɓۖyhػFn= w4JC~x&u\9uZvY>Pr31>g'M|6s "ŶkdLxz;Pg e6/f?^ƮEb[x_\h%sSSKY~V!vS8o"BX)I>s5ox9^.ȒiᅣLčcgx+}q`E}{4%MA,P9a>ёv V /0}P |/u1(~C'=g0wBfSTh7ڴX7_Kap!rc(xWg4/aաZxA+~uW'Lk4UXJ++sx!PuUajوxs ,W 1|$pGڣ=̟fB@Fh)D ?9AOMugKay1J_GJԊPVuB { ɥ,,* Y:9f>çCi0ӄW& Hi%9 d/ `8̟fBW;ژޘ@ cj6cT[)=gԁBɗd4p{HNѦi*X+Re{!9^Š~1*ʷr걦L#_ŋL"zi)TU#P+BU[bEٱjիBlB`Z<,u(].7B wz—0No6  Qԋ)ۊE_b$"7Ԗ:+lfNQKDN<ؔ+Re{1e[8|zH_#BBf$=&u(ֵiG;Fv2GP{+IJ}9c&d\_#3`sL h{FX!@=!!J/rrp/G#1>քC|$g:C;D @q/JOsQbLJ\ҲS$gs,ߡW{D5/+.'I*zƾR_cnhΡॱvŽ6- Vp񭮩k;_Ώ 3+[SB6)_%g s ~Mgd>w\mۚABNԓ )s7p0FxTre8&b:'LS% 6֚P&i@JVѫ/عݯ`9`'SK>UP`7oNZN2m;ʯW 7?'˙1&P+jBtn4 lDPݣ#)_^4V' W #!Q4PEeyPLTmQ!L]%@$ V53DQߪUM@0Hq?7(^c\-t Ze6/Rtgv@^ &㻤MjFp^sN\kG^M`~Z4'F͛=qunc~c[i(P^t%C{faK$N*>ZnԔ=+9}\MhcE^ Iv^{*%V_ '(qUM[OVؤ{Yʑq|i'<&hjբLڦ sP^^mbhId܂?0 W|4y>/F ~4FYiiu)B?#w-7ʙ4_Pv Kz68.x򩨧@ g/c_H8&v}HuDȄ)g|;Uk3DM6ԑëI<ѷcY$ׄ"e'@twaXVÆ ۞8,ɶRf>&CG\c(o!ᅭL}oXh(?\ƨdoZ8aö3ai5+鱭pow\m=*V4@Of76"!.|[bSZ0@,rm5ST1>: o1˭t:߈=\ia3Ͻ SN5qo^<>e=I7^~5| }1R'P AMG h!l#ڻ}o}BK3(/YONX{uD!DU#(_iFb g (*/:νoGj2T=ՍC%@bǴ 1sZUh Qnz2&jݢlad.x[KO;lU%e]D2KqX<֟RUJe7xw֭!x{ۥP_LOxi7ZsNI"j=L{pѡ$}?,^~NKcYsq Q]!˚sHWK[/q[}V;Xqz&Dh3m_e&OOýIΉqE ɑ"ѭ}-w覩&m7xDxE=ދGuDjᎼYO]oC'a3H$Mb~#/\.\|8xp l~{aEW\YNEUFm}*rbf&bQeeJ vyfge5UNdi6ѤEj t&߲1M\ݎ<.|,'}y@Ď/Bj)%!(ϓ#>Ax\j-|[ZuםP@s;h;pԴ*Wn`).?YOO|tHZv|U~/0n_A qQz eNes #qm)B쩛uH(p`Iʾ(OYj;i_0{{ wG#UD)CxWʦJW̡vU]׻ 1AwHƾ`v'uk9C⊷P`Y&+(o<^`~"0fҤX!!ml =܉m KɇaRi꧶Z; IV %Ҙbw\>3HE@_llR33irƒHWD5')~E?x L[ T4+q#B4_FR.aH~#*0n;'^IJNSLL(zPŤpwy1®= ڷxU k^!AIDATAO5#ZOG3!KN&WK2 mIgk]?ԉ/-[y` [Kk B?7/%}=E۝g\$,R!mwCx9h&@=k&C<ГBO 4p¦mVSuڅqx/^IqIxJ7/kY빞7 |[Y=l4t(/4{c!/mYq~.t<'e8g&/ƗWBCRWcy[9`L 0OZ[+ Yhwkݚkuw<3F|!:܎Lt6l `њfZ~+8PͰ87EL>c/Ŕ当&5G~}2`L 4,a2-`&@f;l׼^*e6p8JٳjaoىzϨl G>-1&@#Bs>b&P+F%L:bn0hx=* 4${' ftNXuTL`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&fdY aUK mBBBV\@ eXjOL3ǧV*ADӗ 7sg'#ם-)nv4hbȌJ׊qfL 0&ɮBzz<2W߈xRƿok[t|; KūgOA%EoFJȍ6M|:+Q !hR rhFz2lYh,EphRNmo0 cKe I]>n}L 0&*dxʬ^ F%ԈDSR;7vű+ZD?pUR~Cj2A nݸI*J'-lh\_wG9OB$c:W$wJΆA鞟gOD:.o`L 0& {Eusff7!፠"Ы`֫ *>n^m @ HvM wz[QQ( EE{[Ѫb+Hcgdn,n?ff|w7(ʯ$@,!2v%W{hd3=ٷ{OuY*׮a\?nhd6<]ϳ-_n%m=O۹8AcO/u D"І ŦUjoKºeӘHrϞ];;f5n'S]?xu.WxГ]b~7Qz^q_7FCvp.JI˳>ncLkw .ʡ|sgKe,NyMpmIIҳ=nR=GO.X>eѢN;[a? ;4}Orκ.ٞEp.P_ uەYۮ},ˡ$E\Snr8uQw}7Kth((@y%H=εx'띪%yTzRax(yr&>G,2_˒) gwd8Wf;5z _<ӛh7(Ov$B(CM5X.;mKAI8T㎡X*\0*6HWA(#sU~Kܯ2d.&ARm2":f%{e9qUI}'z"@h[Zբ\+nȆE %U0̂,E$}I?,gRw3}P\uht7}ЫYK'Og>k?bWa!|m_AދP$SQ?Qx{gg_E'kAF)+1Y+, 6O*گXN(}RE -ѮuAc Rw"@ Y:Z_d(dqQqatACn(wC2Dc?*[⻥u” uv,WqEYf ?uWQ8gCt TE(oaQ  0QF1%PD=OBKe=֔IIq,x ömkJWn:}|Fʄ%YW!79dBt#C_kYJלns"@Z@(ʯ.L'sf?6⢸G 9H4>n./g=Уq{O(h,Hw?A$:×3! $HyGq{GBXfG^ V7 gwHH.Yoo&7^l{#Ģ8"Vj{,iq~n2ĢHJlwTĀq_ʒ [F1|Jr+!&Ca8..D}(źb"%qMA =L`~KS?t؋plPwX*gw8SXG29\zzC)aoܱˍ84.B%9$R)c6 G6OE-`F&IкW~"Ǒˡ$/Sm u;ͯC%97UJ˯LK!-g`ʽtoI&%9 "О *%!ir]»Udt!El~io $0w15ݣC6̯QT5C!t56Y3o,^8&a' !Mrњ;`;EMbv^P<[GJ6qd8v{V&i*A =sOojX =¹.8a\8):SLo MlxLi`,E3Vq[@@$<0/>_/ OU +VwAI~17g<۳EyKP Q'݉ 'X6lE _|(-b;`h(MzeF0(4}X{&lA7HN*"{σz~g0:k!ʁZ@:$DO.qlHH6I6!P:ZATcp|jA19 纟oq7ε!!PYYo{A>ZOVZ(9BҝplaHI~0ר5)nnFxFeĹh_yWHܜl :\YPOCn0N4 db0nZOB"@ƟRROQ 3M̱-T$ j,ۙ;;MXR/࿼JXֳ9kFUAA0/Q]rq&/{p4 1VТɤoTYܚ-č^g2k|G-fqwz|-m=׻vY=,dgLd"=p"ОEY=={.\Sڈ@!l$BW$'u][F GP^GriX|6U?b.eGkw ^^.p_ԯ1,7 CHj'q?"I/(]RgyG ~bq,Eä8=d'hc@镖aiuG|+=q09x¤S?0Pq2ʈ2lhĎa7,1H6R Dh5Es4 zDRи@vt-#/"֔h5%Ǯ?y2 e OӋBSY$0wPbre{'5,P|2ުJT[WWnہ1Hҳ.ّ H*tṬ*lJ`ۦmq4 Ԏ:*|_b!cMBwdr ) ؗq2K^]692ƑǢC m+_QjqFݣ)W^Yq z\TV@5'L鯑WW\=E9h9K[4df3( _cAV,|Zq݆M,{/ }W9dnm*!X)sҬi=ǻEɈsvDb9;$3`n(tQJP4>/['E1Ϙq%Co|Ym+tKdQ_"@i 3!#2IH ߓLvݡ%f s)|c20-B/=G_-aHxG.XkJ|9|МC䩛\ ֗oVSb˜]=jClit^z5ʙifFCQO"@ m@8ӭ+BK>\;$_є,eEK %yg㱰:G"ړbF.Xc7%o{55dF0{aT`p@y5FrEf]5!wIw%:cΪxoRV5fND"F |/d9*|q .̰겪hBIXU;L7\q0>:,_V:]oa]8=m'T2x7T`?y,?k̹ٮKޏ3Ql}#05*LqhL(LX6"@ D @+ xCe[Wt}Vc > E.'=pxq_5V,^sV$`;Ƣm'qSt} XX_a2˺ .9>K&yX}J3_-qfqB o?~Sqhi몚Nn謉 D!*P~Ǐ׿=.2CChC xÆ\h5th)yݣpۖ꓃մ!bӖLTGx-VAiGZ0MSGD"@$^V&\vq>4hˣmgojYy7(.]kLI=j*M/{bF뚚"@ DD&(Gbjaz}~0G&`1#a)#GK2vupYqb 'oz~~|C{+λ29n\zzyV\n']Շ^Rc"@ D4@(;.D ,~JLẌ4!{*TXõu!f6UGۅ%6xYSXe?f]X˗:"@ D" Ғ83ߵPV:7](yR D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ D"@ #2;f^IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.svg0000644000175000017500000013667400000000000030030 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-04 15:21:58 +0000Canvas 1Layer 1 Network Nodes Compute NodesOpen vSwitch - High-availability with VRRPOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Open vSwitch AgentInterface 1Provider networkAggregateInstanceInterface 2FirewallProviderBridgeOpen vSwitch AgentLayer-3 AgentOverlay network10.0.1.0/24Self-service networkInterface 1Interface 2IntegrationBridgeProviderBridgeRouterNamespaceInterface 3TunnelBridgeIntegrationBridgeTunnelBridgeInterface 3 Physical Network InfrastructureDHCP AgentMetadata AgentDHCP NamespaceMetadataProcess ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-compconn1.graffle0000644000175000017500000001141200000000000031117 0ustar00coreycorey00000000000000]kSȶ |~?dr@f8$w%lat"$G!T-` iRFZڽwz:~2 gE? /ٿ^<}_;Gra0J/86=u9P֫wϜggi:ikL*$IzmBA5y~K_<}bw'o 7(~=ߚ~fBg$̗'GiZQ0H݇o&i[$Xh h>UJ|kZtބqo3doMzMQYɳOY3m8ۆ7 ɹ3gTUJK„VySN荚aP4n~y*oBnv~7.f {_ntߊ/y?|Eh? A1Lus-g9p|m jg^O|/|Aae_I3hwԯ7 61'J~bFL!qRKSwxޥxLhzW=/l 2 ?Nٙ}KGWFf䠞6`qo|Gi]@gI=9#!-]i5)A D NBj7+Bkr!*=mSϙ7hI5maK0l&}qNyY}v)UrcО39oːg` Ъ@ЏhT˝AQSELL*u!Vzt/rAEIb@pG3$1Ƶ9;F܎>\{ {ΐz7Z"=)Q_&v` .5cj%jr +'2 6.n,$ -0X!L"m+'0_튴ohw$2wVs3x>6ǒ%7Kn,gh,+km aXVT*G屻?_Nj/Ezq -"]W1T^-3P`l UCu+\%\om,unK61<NKj˖ln D,qKX PRKqC1! >bS1׋( s3U0a0+߲o^8`%8)ȑnv~3[?^?=H?ZWTΪ#͠0`ZQ[X&)FPep-JmRK>|ր|v_8s4zkK7.!J4 1K<&e`]e `zu Bdw|iz V}0ewTh(&̺ikX2vQkxkx50玿0_N\gqjڶjS<`m8i%ε8ezQϿ@ 0 F0q'rN"Zl6eayq2γgH Kg$=EclE =Ϲzs__g.T6B>s]9WI(r&tk`U݋};=tb4Ź8 ޙYhӺF] n'>rF({ipW{b6pZ̀K"|e@0{ 05;80`e26/X#F{twX DVʯI/UaVdIC3'NZVCˑU$oWԑ7 />{1$ٟelR(8)~N5oD}M2ptOHÞWjlSɻ ֘I'S5-k"M5n&??QA-֌yc86v{@5I/~D(Cy}y*fZ0khP5mK?N1n,jA!6?w~E },*l)lk#3L*4e-ia&b?q?L5 *[/MKw׽Kzwq/HI^j0d.`KxOG 5<_p4>%Ђb<<ߓNKb=pU kx'_q fE= ~kE/ӿ /?߼ٿ_᫳O<;y1܆;˟ޡ7^m~4| /w~{0ýK`u}0L* R|Z~07=@)T[ ;l). 8 &bUJT~ f /x#QV9*by"#ߠfxy_Vpר>FC s>i wU<9(Y3zƲe6ʀqw! p4H*fQ}g$YO6uס_%& UaLH~6l8LAŶ 5 2 1 2@IDATx|EgvR(UPAE+bʫk{_U  EOr vJk}WwQlbCRB ={ٰ\ȥ_gU&TA򶌌FliG#qzslH?~HQ8'P՚ϱ)lcIB(+`>NI+|!}ܽLPPk6n{m9(ߎ ~RvLS%}is$ϯRErFTKRo>"iЉok/< V$boR"YH_oL$144vߑ>m[O铟c{&\ h5㑒ĔHӟxNJn5Bxu9M?yU$3d\(\ah_`>YߙIHIOڴcuA5CuA50GdT܏%+y#MwX|90aοw81馹'fdo\s< f03NbZ!Ro5.T6< ]+E` Jʧ%LZb4^F==ș{8GydzEJzj#xvB<1%Lof8a%F!7e7L fJ1F EiVL+ཁId_7oNqT.>?tëk%pJ{<ׄY)T+sq*s*07'iIuE(Q-4` $.T.դ>{K׵&/ z sYޔs as(?NqF Oqޣsg$#6Â5ov`\o%~ñUfZʑuLJڙ~!GGւU՝=כr s/p Af#x,&7Z1Wb:?a= a&xwx&&uiFx_ i!;^w.ʭn I3Isi9o /C^6}lc=S #C2UR-פBfZrn}K|MOѶ>"YNWBw7;-`:3ӛD{Kk>gho;XP\NjK(k&x2i'p%[ϑ~&:u^g-ހ\pkGw-97 {8=GصzJObu1hKLSd٪'w[OK {< }0ҋbekK!:Ll<ޒ ;mK|QsJ҂rfgjB_x^\}}8څ^N0!4|5Lgg;AA2.e~ X\]s3όPC~}9uUڀXΖ(tRrҟ7QP&>¸!>im }c?R7l?t)c$y3kW)`$>(R#F}z e'zB/J%@6 C4xXF{5ܞn~aҔiR5Ǜ}OFE7~}r'p.tju59wzio $69%A q0B'qIxQg_MZ*?ͥIus7Tv1}mw+ԔRo`-,ڑ3^H[@ai{hv^*St`KIoI$t_,ޓİaܔ_RlD܈`my4 c*Cz u@z!HѳRΐ@gpCsũ=)h $]yT>(֭oz{EPǒ'GBpBN)誴%+֮L/B7߯jurgi쎦 9;*"{ˆ֍2⢺̠Hhmi)NY ž҅iK!zv;[5v 04Src3'%%ӡ i y;^\,H Yd3:+?hp1v)B|ӧyF.oq"Zx0Aޯ >̪x.yPH_XgEn2/y[4t!|aց@ttwsL ~M6!Qg[+ DRkT_}>prB_L1)&g6 Ri)_::7EUQnk$H @q~R{aWz`K-' NK+:cS3N,E!ڣ3m74dtQTCyVYt45T* 1l<[0T:`|i!au'@]އĞy'WLvN55 prnŐBiO -& ?ݔq_x7Y /wUTAN]-bLC9cQ(p alc^[´i&MGHoo%ԸjZג5 ǦzG@5^F Yo {ݱQ1|'N@uJI(%dw3._ @B՘qNIx֐?za]UǸêYn&ܟn Λ.NKiws0~%ueT9i߬cy*E=IμFh r5z~%:gPFS$0@xnIl #A8͒a^xLs4´3a Br 2;Ҵ" DzKnZ6XCM䮰P\ |PHE:0hnw 8F/bIp-L| j;N;p]z2c6t#yIj&)R=αR=-ސnb ! t _797mނ/'شmc/#q1rr;ܜC㑮yBÃaJ9 ߰"c+p 1 1: #wJTZoXOYtfZŲU4)݅@\`N VOW_}UZo=5cYnWSb09CBoLح%B\PGw̿&̀)ŠrG)A`eCB PqAGИJ }뺮C =C;LE9u X~v=@A. S65mp]n_!Bp{h۶̀{m (.ڙ{2~>jR7q`a̓j+0u<#XIF;}:6 }җN?|X3pCiZLQ3fB C_w-!i E4Fd<$Ϊx Nux$Scax/5Ǔv]i5h?G Ku@V ? A8]>iWMC-χT%+JS)O|ˡCjJj+%f_{Hi/ݼYzCiBZPgfz|> &VE =e xos4xw`HʽKڪ|)`Nk(L{P;S~KC$<`3.,@14"{P3d_WBFͲ' j_+mXvu#ʴ}B|Z8!4Z#=bCB$i`=7~eAh$Hh/cfڤH lh :'tcξrodQC) S,;Mxy0N7ֹa,h4= Xl"m*uҿ42Z.Zj@'Ry)YdW.S:.jrFCCa34r8td;'FǸ Q08Xr hhFFEt4#޾ްCzap{U_ڷN'ѝ[a?T/xE 7}\N*[Y)=K(N({41x^TS.YllXZpB yKʔ &ZvXEE x9r[A#ʦul8B[Mw`<^Fg.arɁ+l*ɦQ`aKsw3%Ǟ) b6Sv,&t4vOɪ(ɤe ܜIȁz;AaLOүh̞BЦ8l7 h/guu'@t1u)u|Zl94ɱ" ͦ4mҾ9krd/^fhOEq:\OaTrc*N卦Sw;4ED48x=3Qwaz A$ڜEzL% H_Baoս(yZmA';#2-0ϦǧVMP'kݥ'M 6"Խ#_Շ%mz|I*ٞxBv*`rwNXigib1@瓸K SŧaODoIZc,ϻ<N%Cb)ha:޳aK@*(&_$t>w,{gu'V[{|AL41KD' ܇:-~dz]*\:*3U21\Q)@|o>L&vp~:ٹ\^Ja~ fg ?Ñs1aж( qI8: )82GCߌӱX 7 S6`A@mPlŧcwxZp c f8pO.%f6<#K jZ7pzEc<O}i_Yln㋑/2n)::0q8|,x%/w8Zro0\`"}f.EV\{gEEC:{psq6lh% 4@ÊnCN/¢ ġN$)pGÅU9~L@Űݞ`\Qܵk?c!-Qe*GWIa_qG #&vs?/S]k!/*hWۥq \|^3kcD%SKq%X5v&i$ 5X&;t\Ω]Q;1ujrw٣݇T)% 0a7뷬2Ci0 :QF9~3wB|. lM_Q7$.eD>KVÕ^6i$HVIuGʪ/򙎷G1ok3G\Jw-,9A(wi,@;΃MMFpXUI^ކ|P^!.ܴQ,'[3jf@T"K%$Xxm@=dIhXʽiO.rKV+TO@^nU ]7[.[tt -'oN#@"@4XҬpf`L Ԁ{eL 0&?r`L 0`a+`L 0& _ =L6`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L9ЛC&9M7*'>٩5oO>B-]OOt©=GO>)HcN>䇅nJR3NrEK8*-hxZrM =KHI2!ջ;?Z,_ ޝ>&ňʂǣ.1Jh]x℔WJٷhn=ugzphnBo/Vi~0mZp|0 ~BL1hcciN2H(/)/tIJ4!Uec]نwmnZBj5To(! Mny&+Q>AJ[wnNjSn6ҳCӤU4wgMa@l 9gx͡8͋ywDy&fsn{H̡.2;7G¾ck>cR7&-A#4Js|'Hw88O!U;S6;ѯJfnZ,PGW lshN4-qVZA´N.M3 h|&Q)o&kJZo}Bf.=#w % <;V_׼@:6BXdP 3V2IH-l ^ BV8Ah`y'e [Pv!<}߭{s A}Ua^=H˭p/{dbCebseSNnPN-<ӷ}36 @J薲uy䝆~HqPz}nu0a^Ge~-4[> ;OWb{i<+ޑ1127 D( Eh4d fW^ 93sPB2כr.w<'4)֔bl MdN敢v+ o(M?`#t?@ aG?X} :P th_߅uA7; y,@d%z L'4FBm1qcgy.q_:ׁrFsӒĄгz?r8])7:)OӽWp~y}tJZ'ȶpzLJ"Wh -|y,eԑi.kt7i'vZE1lU!,Zv"@{u9[4̀Oh[:6p\N9'/c)e/SW!Yp2؎q 'L2RywKZETn.Ye՚ز>AȺ!r>RޒVnawX|-[Q_MG<X qW˃p$rr9/*(:BʻǘlvUވ9.vQA;J׫@=`P= naP/kKM[0QoZlǹu A&oU}L;`;MMS&2TB)(%Gh6i贠sC:l{'_m;:S$A#᭔G8aPנUHG^26OY1iyWA0lu84,LtHmn"a=!Δ_*fc<[E20zUղB$S&3i:!SQޫbUK|4AܮmY$*mVڤ_P0荸0Lh.Z;g FL`4k5o<=܌hy Fqn8z h!yPbT eQ \&]tow痺_<:`&'v# =fQYY'{chd΢|_4O"? :ߕ3MZ:?Jߧ*SiKŠg.  ?ؾ̵e ØZTN-yQ 7V I9N  jnCyh-yS D} [J}.si>]ِWAx{9l1H"pZZWO~Y}=r|)Зbap[J"eXvPEh6<+,վX+5ͱZ ڌ&*+!AGY0Ǜl𽺺JNIy(db{tДp pdO4a^vU<~>PG n}7.Xj#11DKP907-g]zFVC>fs9PvAu\u]^<7sP=VjBoMcL 桝Eo$} ފʹSuL6vZ9<3544%E<4%v8IHv4acegNp&;dohA+܀ʸ *iBqVGU7wNܾOZ>tW:' sdK&+ t =M;ޅTGe*-g[z ^[ekIb˱4Z!}'̐cϓeW( _M_>cO5C_FM.֞x_,wM1Oe.m2XN [4a&9c%ԍX<4;z/L/2^sߛ;Jw}MQtpS[7VRFGH۝.w 6,{WntlN}fJbE$-Pwt kysa>YnlJx4OɑiQra^/V輢SAs2 { 0E5CLj1i+hdcn~4~eԅxÏ3;7-X\U䟆*.LK~vW[GHn*T+ވD1tuV'T@x{b㬥y+ڂ@{wv*xNhy#J/h1:k>A:75Aȏ8ҕ~_./Dx%/|d„H7שz!fR>zaVQT7}og̉: y?DүF,,l37=Lr0 ~d y))*n9ܻ7 QM·oE}8ϥGLuuvl usxL[&PnvM&h%lwj#+Ьa OඌXw?h (Edd2tӹqEnc,o F]FT^*_T݉KO Kjg뷽6¦݁5j};ᓭ5b5T*1!q>ٵ.g4$uih7^ʝӦ݇v]6{<7sPbQWeWQ|-g_v'~e}/nfz{u\p4Cxvε KIq:@0T/r$LԷ ڮg 0Y3,t2`LL= ?]]+wp@ ueL 4mզTO|F;LpQktiڙ59, 5" Մw4օQp/hjG ?`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0FE@GjǦN7"Jne}q0&@PBJ%7#uM?;m҆M3ay&&ۗ}!R S( fD|R؆ 0&C- uC_Y) )6/xύM-IN1#^gOqbT3&@M:o^~\p/ . %zTxHJog,rL 0&4$&{O7x9JjGF8^ B)iTVڄ5>`ү>[Y^CK43ڷtg4q+Z ИRF7P1&` @)鯠]%_2&Pcav iN,`M!ԗ|ifsj, !g=igiP1b˘xV 69戾503l`L T} Oa@!+D]okqdL 0G %!W&Lr`L 0&Pk* @8?plnZl}{nd﷚TYԧ6%;`Lv0T;9*H{2N"I_1(;I&5Q#m1cR3NEBiNAAChaY)CnRcк,2&"Eb44y<Fu+Cvά{۱D$h{J}9~}0 6HӇ?A|2ASQERjt'qs_mJ g.nD3KHt5D{>Jt{t햲i~a1~Bf.| 0KAn*3&TB]HÏV*gdR6(!15ÚEay*0p6 {n-x DdILGȻ< l#w/@BiI"V BI}U8yl@h-:oIB˻)dz>ÂuB9$hQJW~1+]$aYe/EѡےAȲr,qhx]Bku5h A"QYRD[ϰtBf쑊|tӍ3癠4tz=uLo dyҵ:yS!Ц9(P8 0&qX3qEt$i-[8/7*p m:,rvڡMWXA4"R2׸Zoo!|&׈Xhu~u5<8Ut噴$ =#@,5JLraJM[0ot-~ȫ|;MTŻnG} ?Lc_IUnv|dL j$S9krBL򅼧e9I Yr^(g?崥 N!CpeU<_,sˆUZՕ2sk&y4|{BFy{l1KQt R!^0D> @zsJJpr|<, E^49WmDy3&?XSv^ U9n d)i=3Œ-8eHٍR{U@IDATp &1Asφ27i7+ߏ>co,j })vf=e3,;dRĞڽ\}ࡎhXrSS蔄piKx 0&ʽGrb9mW^פĦS0t5%y;!uѐ7cegN8ǒsh3, d?s~ oNwiHCgaАW;Zu-_]L! ˡ*@t:WuVUB{؏INlurc(lcSΆ 0&fYNq=l3|~)Bg%`;'̟,^%2۝}$nB>/o>4 HAņ 0&"[3=T{0-,l>N4×~7p[U-r.42V1G1l.ڍZ3vwJY;TN2/nfz,o{]-1ٜ=yrCNG8ax6+^ym۴ĉĔ 'aabGH*]#1}&&.u]ܟ.O-|+v`L 4A, 5B,1&`dbM.n1&@e%0כN= 0&| 0Y-{9`L 0&, q5`L 0&5us`L 00&`͚ Cͺ9L 0& C\`L 0fMf]y&`L!L 0&@&h7]:u멤&hJR +,ڠ뮏fM^\X7Rh\ʚY?J!>9'CcdJxeߪira]e\٦Uq3B <[ hO<ȄBT:Ɓ6z>$x#{i^ޯGF.3@|&p~ޔ ύ ei IuS).PtƦD@{ }IGS[ҔR"YP͞ tK(wYu2r&NċWzH DTޑ.Q(nJ740"lOЖ411Xa(!v:sQFzeOܺul#Zi3{[#@-I%Qz)݇tn#8mͅ=;U11qs>wͥL8G R+o'ofC$i҄loJga.1&Yaݹ"{ѐ.5ZK6j"i*tES:kf5o~T|\W,x3r"~??:obYi\gv$0ǖuOOMXI%uyI1R ?pV_$:k%Fm[a)e/{E`i-ǨtᕝTqGﺦ?gs~b 9ϗwQ7'\-ܑDOgs$j%&Rڒ\.]s C/}DkBlg.v(.F`Mk,7T- V#` K^.|K\qPq="; ŚMEϮD'4_/[-Jlھ[SS/ -GמN~;HkK DJgHakhf C9y-FG?ѠFeVEXqhO7T.5$x=-;~{xQ,!*eW|yո_{0qE9}q;9Wt˾h\ƫwm_pƚj ]#?\,NW\<8 p[2[mZ 2;_P? Dlݕ#~cʵ 0a+yueMvEPާ߭fq߳l~)3.rE Brs.sOLkhn!ǦJizfp@4͐P) Z q-4>dv F^4fVqG#{wҹ%k/@}QpF5|NPm.>X+ )M%5_}Rl}-1)ZEa8_BԢ!LH0D l% @?k+вa=Na-;FA+ C+泥@'$\:Lϕfq€l&/C[D/<ѰZw|82$ڵ6F'<WQhiC(oxoğ6-0o;ݒFLJoR;m@o!m- Ej鲋IZjd3MBӸ].qΰ,,Cu9ugp$Z'ߋwx s`aVkHA^4ztio :7iHcX mWCo6[Bԧ߷um3*g)DayYE?5O╏üpXOUl 5-mC$On# Hbi!+x,nZɩScIght7*c>(_\5'Ǚ9_JKyK;{GϢWYV FU(CshݮKov64cUEC3t#.GBR[h\f;#`LHhM?0!:%@&ӥ}kA>iDV-0U(C c17iPЛm!u-FeɨhYq8TMCso95Y77Jף+!!a茆\Zvowli;Zhp < V|я49'`@T“33ќߵ|__)*2V oZp1&]OϒvءyuV:~@D\#M=50*!V7ޓz? {y=4!r;咦&EWrhhiyEtWsnٯ{/?ǯLi#4ɪuwlo(CziHSPg2&PX3T9&&d |h;O;03>_i~60x?Z4?oٓRZ|@}װ&1&H`%>A/A4:XЗ~aW>gL 448~&s O,cC&?'>2 0&@xPh@S"нS[19@T{}WYY5Xk޿N9?VoJ8/L 4RZFN6`C`K㝂P䤘S!L *z?eh16L 0@C)q@#"@dq\#J1' 0N5Cͽp`L 4s Q{JܶʑޔmCk۞ O˝r,k./Nש&1`akbl{_؋e-T$*="9(:%Zljvŀ>EϮ텆AVg-:ćDMS [vmsľ|,7P9ja3({bH>VD cE=ש^JY CpX)r ( '8SnM+4Z Ì;N-Ĉ!CѮR PŸ~yuRM߯{ĖD~a(3D3:Jnh-ڴڴtl+th%t]o2/\*dVpŚMC T$bc׈:+3ӌ; ŗ?&9L ;"R\lTiu&4;a~[J?(vѥӻ"&f} Zˬ_aaO{(fu`qУ˥J.w$Dqq_U(m([MQX~_- ^Do+n2RĐ0hmF'Q\tXXax˥b>3OqQPde c~Bs?B@# Q4m70)?f% ٍ}5MBq+Eo o aH9Su)D)g0@# h3RF1/Ć{-^%6Nt$a0*jx! u߭4oe;i 5ǵ~A?-!@uj:=~v1#SU*zv"@M[_^i"AJο#T hNr'AH,_A|m@jϲdysO؁_'s\m-yJ6n-:u|EV )ڵHlغ-AzsQorQiJ΃-Kkt벻C ᷓ&b~:ecM@"u/H#IQ{s)ZZZqƦCcڗ_X$]=埄 ,Mg|YuJ "rch y@n gaG+UEii*vBba򯋲;i߭(Z"Y-:[-7[}*bnM~z|g`~XB%-Ĭ~m|" r?(Ou>zUܻq/l0SLO @R!8=J:|$jTW{Y~jV+y%VV͛7sf޼3wy|Թܦ\7td:re1PsXmtS?v >TfŒuS*T A1EZuϏ5MIPΉ"yeл)q-K Q 1\-^pA"M+b7g} W\V+-!yEɓ~!MȔqC=u۔l @ܾ#yP.[q3T++b?[ &wbX8JGHK]fO|*Ɉw%;P7 ЎI3ywyo*L:i $睱Mkt2P9P}~I 541 "2ߒ%ڸqTe?84°+2O¬lSHcYTX5S@4{!'JEѶ3~]fR5·ŸGW$w";YRWg;/DpTJaAYټ }Α +iֽHGr Oadv}){Ŵ鈗O Vu ln24;4U'%'t~-'NjV #ҧdsx~7M5`3^2eՐ,S'Ƽ &wbXY ! vdXu5cU|Cͩ{~w0rUB !B )P|l?BKTqi2w/%)Lc3N1|^u9mW:A kG̵j tG:~kl*W9 Ƈi2ì*]z'\Mfo(('7Kn6rt2ٴIO/eYl%@͒KF|>9g@ioewvY! 7b)GYP-P^XVXWzy n emBخ~dpԞz+}G>m AG ޏ*]D[tLsIK ߕKbmiKeŊƉQm! y@8# $C\Y<@GP4c;zZČGVܬ"qڌS\Vkp(-{H䭑{Ftx[;'үzSwr'l|_ x~S{:,T_m? &BGh]S# Z %ak65щ6U&ݻ+'MVm*Dmbq M!7NzO™cRTzX殄c=_uH+A ft ɐ1/|Q= YdNF]kSKJJʎIzLjhL r$sK`gf$CH?Q:Iܑ?[Lt2Zx~Fόsof yÛ93h)Yx=aoꟿu cN2h>uvcMaZzuuآ3XjiI[hIM#K6>a*B0DeHn:i:w$o|>OؓɡsMۦJ\7+a/tb֠YsTpVTоW.W+_] yc 5IWao^".1䈞ǁNQZ^ϧWs$ƺ~cL[%9>jyNgݳ\i@B9a;::7Чuɰ;5\zg))dSVn}N߻dɆ'Iqj*VWBYjԞ olS бkɐ@?78_y5O)uJv"WRx5Hԉӭ;n3Q5oJ0=\[ڏUuuW+[SzHS0*@}Fጓ߀X闉w5|G+mS'䓯T71/C\9:8ئ3 {PkWh|\\7 .WGbL<$Jq#Ἷ JJ!8>51n]0%kp^1햢Ht։S<+*z˱{Q%eIb A-o [!w65qL5mƩB[S{abĢM`0&C͑4!ߖRK`+dg[HTcQ}۶(K37VՎ yspGf^XyD7uhNW/˴A,Pyx?NEMLy@o0=+ݺiz (E}ֽXAu:Y~Hr-(Ls9dd$閚 QQM!5ھeMopJw ?sV Ek|[чF 9u=غFcݤ,Wj8}CoF)#h۰Z|w7҃FooER .NJe 8NiS >!5Vm{Q#Qd%bi gW)Iw|}_C[7ǜ7tV|Nz_HsRgPسQyORev ϔcQW1/%>y)kWC û#ѐawgyͪ>6nx˻Qe=?RCh'`x r4j7tp>6o.)¹grld >^ٰ !ԡXtp?ǵɡE&΀UjsvQd܂E rzH$kNm%-,Q+%%d>` II. ;>* fJJOIyCJd]NV^ 1x7[+BlLAu0d ǿޟ\#8ݽByEFyM<\6$Bj!KaP̄;qtPBw=SnzJ}\?EH'\.(qWɚ/2U,>V8xeM{ߖs?(X1`k=!}QF& QGʳKwꉘ!JʪnwLq#CKrE'6[A#`ȐkTߔd Q0Q:{\ c\6%_|lj ”M>WBٞ!G= @:V{TwZaʅ. AС1Q3^6 Աe0Yoy)\!aRV^ ͐9+)>}Nۑ4D'gP\-ߺH|Lw.S%_P(s o0- "e0?H"7^0GR%)1?i1__\6lݵտA8E8@tn}𡿍:pȏnb[G)!!N-#j*ґԦ$ϗZ(%\}Ja} 鼠 2;QU喦24^!ar;*=$QH.)4Mr!= [-k%<NDF+m&  ַb}aeR4ID$Sdѐ )2/I _JBIN K-SR}2; WHRZSVYUratVlH+.FzaJj~Jx+Hz藋ޘ{ F#!:}UZ9hG+\gA 0WA)Yy^gKh~jFkq92yH+Б)k2UrC%VF%)/Zu+,2>zYUdˁt(qAej|gve̫lQYZ6 LS٢(eB=-کQK%W]yKZ~+LP[!9A C|)s:%m<'\u)l;3# {|;[dѼu V%X5ʻ_(?w?uReRZY)xZ;Wkv$'z`9zmO78(dPOW%'="xU>{FJOcZ=ЮzJUhSP# ST m&}ιon,%JS8ࠕbmN3u 2F_gFhP,1ߡ򀔡=iNj֬Kq?WV0"Rf}]FE]a])Arb8c[%SGeI>F,k?>覍l@I^_VV #G$)a9`-]`;A*!++J-wY>WW-*YvT¾U=`[Ԑ1oumohvɈ1tž su ])߼m8\Ms>ޣDl$f)"~}qD|y6h čK/a£Є}bq],d%=tWihAUyy2/k|%0 %ެC+=oEٰ--H,G~)?W3cQ])߻~rQp>5tHVh`yƁ))Z,%Ncl> `c׈{[^qRXpQ2,&R`;xMJd):ua}Vw*3vL OvEw#vYzCmS g0(zgXђh"#7P"Ĵ?[Z:ڏ~n0 ]RO|ُ.?guKPx vwYr8|s+:;z'HO wN^vy2}ӲoO' zb[V899ҝHҺg{Kׄ()(T .kxl_-2:;Nzf1kԝ2D4ibPhiZA*WH6O72v`Zl>F3Hcc'M  dyNO/%+%xŠVғj:g|j$񬙮i{)jkɩIQns;ɟWӒbJJ(VazkckY =,W@ϑ? Rcr d)Ϲ^nju;]$Ú8x*js Ү9wKGs| OW2A#9sW^vcnP)#-(QR$9% D/PDmja5ې@1LQ._Wu9-}^ɀUsTL@'@ R~(+CvJEEo 9r|Iæ2Rx' })u)$ Y DNL_wDǷjnSDž Ai]*%6T;pm?ׯĵm*b?*$x#v}. :)'hGB ՞)=/$ jC<;9VDG @)5D(1R! B!5%$B:`cXjn##&A k2%Qs-[tzJP'>*WLKe'a`u9B&!wbI#&9%$8gAyit -{Det{jCRL/KAeO#r3mi-1{֟.e&#d,Vi여LlrEHމL^4@O+)6x"$ ⏒!N(N߳P[mdKڔOpI@#duǑ3c%eVtfH.)! 0?U yv[|3+ K\{%EʞVNg]iIݻ A},Ӟ5 8n-*ªh)).vA*rqls…L9@ Ar@<KS^)LGZ",Ur OZUcUU7%*c.THTb",XM #.qqNS6vHz&GȀ$vK"^tѩRWUe=w uniϯNKWWB:R ]=a&b?>ߚu.+=SZZ E*+` N%I`4UtKa+})ExwnAmeS-Ivx+rH8x@YN;LO[Ñ`xvyQcWmٛ2*ЁCf ܢٳ\ K*JձbwƢo ff %O\$Dk4F6tJ"kq>]N -Jw`5( -;?NsKO"I-Ꝓ~pcb*$ Ġ?E vimqɐ@0]fԪ"HHj-%Bxe yK^PE%tbaqBi:դ˥:}ޯů(;Ao̓&9~|{m9 s7_}IKTQA(IIٯ6UO\Ngњ׌[?96tB yTGlܿ` b@#:wsk" I#fA;O=R#K<[G!J4G*:oÖwd-[7b{|9_KOS8VwٞuI[}As @ĭv: l̿iBI1 񋏫TDBfUaCJ/r,N[(S\ a@i֊ĨVI~Vc 5槵Ĥdfu=8My'o~gykݟZ_rfAB QB?߫]|7`RNn>_I!bŽN )T3}-%AZBx?;x~7yݣݺ:ے@NQ"k~<Ė!_JdN#okߙ ~qhwC;{>&hiQ-DHKAUךm*tHOKջ,Xmʻ?ٳuSdf%̎A B GNQk=wJ+'5~X?ˈ[d ;d Iɂvu;p';/ݱsyvuL\UTvTKV-M+nF4c '/qv;|;D#SQ t s卟2zRKxuO]_k=jAW44FnYhSZ~xim6U? ذ쫵PKCf ~!dHK+Y{%_pTOG? +16ƕoEE~H;Ǎ}U݁+@tYAK4P<*w%|uEE}!G [%CFNwmJbk.ɨ֮\8ԯ'hnPoC<_.{oC?ٷ}b'KO!ЗRd A 38'Q.%BV؟7p辽2SHj.+/.+9Ut`玝l82&Jy3#ĕ92E` x3GhQ={<(.!)%&.63 .fF)}uqmܱOcصyq`ef"S&| ML">iߵi⚒pi".lO%B$CE5Ԉk(M!;*Ui/ o~XtzGM7F?)jl}p&EăA4Y$!"11k=!}/Nde2c2D,. g.2"|ArzHOh4ҺE$ALP<#NqDYS(N'F 'He~©/ 1P'C_~r-{ M:ΐw';2n5 LwZ3yn-[+&~tN}I˝BXO %$?z5DH4.Ć?bt`#מ{KG[b>pwA~[}qL0% /&_RoՅNXq3n5k{U 6Vנ]Kunnw;U@!P}͋[qlssg.%<גS]z̚+Kcsf `co| `0Wӊ& !C I `06ifuGqexևg3t6aՙ)A Љx9ǟTC6t^gߜu= c0!Ce" "0^yM?zQM AC:́A `0g'Hsw?o 0!21 @ \i+&LD U-  #`P; @Pȷ\rYdd~8|āYOA   n0y}dLo>=e 0d( LdA >|tP>&ĕWu `0 @2A hȳ'NC'^L# J5E28K/~c)sqs !`$Ce" A@Y{Bj ު'W@GS A q ֧G[o]ǞτSoF+95G13w #@jQTٓgC=C0B Qe {^}p+z);ޥ63= = A儌VUC-N$`0i2 @@'EX#~təU'=Ǟ;@,6% 13  ~?';w_xO\?x!j `h> 5;sA `h -.P!Ӊuf& F 5:sA `.sQS,⼭eROJ$@!`Pթ)A Įq<&K q)?^&M0Z!C-l0Oo/œv.CS P&Cxy;Ibs{(ٴ6C R>=a!Kl%2?nTGΌtX-٘OʷE.7t;;>s##kO AK5x_1pB ?r$@S\{l:,*&ٜ)Iq[TY֌Wa;N*P_>Smw<QsI `G(׎["PڠDWX##~˾^U>G/!#-OA`!dH 6⤔8]'#tb)*|"eiYeo݇oʹ~/vrLZ1yj3~Jm?ڃ+x+OsdÁ_A<%,h/2G|~m?m떑2WЊT:#bE=£BigG+ֽI >#.w>Y+$@g|"F>I#l|?ztܐ2DlRV2 o~ l @BM0 "xQQ1?g_;TAd#dTD蚻f^k|]ߞq%:̈AkbNֈ?YɷG{_d>#b 툀w_m{~x Q;< zqj,WAiz<#3uɡ h=T;PJľ{Ff{˾Mxeڇ7f B5ES1rI/B@02 E?NXƔ #jfDodb3^M95yAIWr$%Y#n`_>=&k"A d3G>R]muHkgGTI j">1>$_{V&A ym^zx &`QH;d]Q |oўi!L6 M!+$㲛nG?nCMew Mt۵{)4;BNK$.1"Ȑi"vlt_ҥ4l }H",2-*:{bl,L X-VkouAL &:@mj`%Ŏ@0ùe81Q)f,Z ܞX-WSWjDk &;` > 5!G(Ba&CU3:ZC\-FuJoڷWW t#D>JrNlݾXr>t傻y6`bS #@?<C(w%X陙&W?Vz`pXhvegCHlLbW#w_$K8-{  ;d6ji߾|钒 {箐3vS_x= o"+~g>WW~ۨ;3Ҧ@ d~YBroXbV?ܑG_PޝF~=seCr+FSP$!i< S%o]%U/u}++y܆dh vc ?2Da°~RT\$ {Y「 ;zBIFnڇ"{Ȝe!)ΐsrϖIIY#w;KMUR9t<_2Ғ@ϒ~vɒu;pn$ڸP.YVm'keSy|6é ).+W{K)JHHO /_W_4HhVo݇ M_Eoc1ի,U( L515C .N].^K6>'$[-ybG>ێ\wR~kTL;S%JRJWH4Wn-7\4)2^㹸`Yj\LRR#^k*\=e g'9)D($V <ID$4=JH^807_wQJ ]36q"{$hm`lLGS':C$ÊM{ mA0Q>lr!j&ՐuK7#Jw(ʀI&,$<7/3dl:7Yrm3M/ؼe'bZ4Q^rh5B`LqILaP[) Ժ` fRi2%~QC:vNG$ j;RN{)E$y{ ^͍D!!m׫_aE^;{2)"w/W{7_N/p P ''tp&>tDJ\*1eP-gJuό)!CF,S9b)4NE-$SvQS犹0a.yu `$޿#w>wMS\%Q,w` VV6X szNmcJ2HO 0-ɞCr$4]#:QT"@/Pʧ3zgK6^,LT5ĕӄSՊ-{dGQr=㙭A 7d i.RfP,M.C+vT:}eܻ $g:Mq4MAAHRmJ?\}0dć/\Eƀ%[v٫5ή3e띆$iKG6V4 ie}3TD+`SңǪ|DӫEM&*.Xv_~xK4Hnh"7\k( S}T@BzŵQ뜦0[u֯b$bSQY/?]ue{OA׉8N]@:1@"b潿sr >eT ֭>QCqO d:)e_ӣW#Tp!$C P;&u0?$9:2h?!eϧz_}3H6[QkޖCyk@G;%D=r$DQܪ,̩5bL4]߿6?`7S O܊,*ߓv*yl#кjgQhDϢh@sKAn$|G gP񚶑D9lYn,4Dx?z?MOI; sZ!H7by?ü8qA 0d( mȗqDK$Ct#[/>1Qj*}=)]lJovL0: u:396ĺAn(?M`0t|j_A `0CZ J ;Wo;SfA 40d(4 8., 8d®ѨAJes `0:Zgk`d̸p\zgMx7 `h}r.WC#40xТ3~熩دbpmx+U끽qCUPNV삌Sf\0N 4!FbD%4B9sE˄aT18\Xa\v:æ]e8[r_vHЯ[9;:ڼI?x,_ޚR/LX={1]!JY s?jXԍA uHn]eoe a'Jُ6@w&r=<x{!ꀞS? sl0Z/e2 &mȝpJU*$zIʑ#M/ݰiNTx~{*OrM-udycy!`{eʍ.K9yc˅ OK7R#`^K1 G?2> >\FVn٫n2 AB@SRO.*H8eF)'55Q!G+}x`0Oa~J >16q%%A'E@_Bc3+P?m0).-nٛwjk1%ZӣC(qN3Hk+6(\dz A]4 AB`Nc%ADT? MI()_R5Zl..->e @"(b 7r'Kϖ+G(p5yvTK5 |蛛ʃgF; p(_ }Z*rWzB~35UQdtYvEOJAG@K'/kyA `ӿ7J"| Obd,sv :[e-TΫ4B q &]e/D>(iMu) 9]Cys90*k"ڏ#7DQ5  !6!⯞2b<=򟏖ȃz:]dڄa[X+SQE}`ՙM>Ē6V(ixsҮt+i.>k$mOlTkZHg1L_k(]SOiA `0  #Сɐ*Oz+`5*&1YuIzsÅJ|_^Y#gdרeVLgگ+Uv5UpQr sh;=Րv9a{-ij,^5 A:4Rr$.$%C}#sF>u5b}WQCyh,-s `0 +ӋgP=ϕɁA `0 b}l[ -Jl0 @"`PkR@8vBhd>uZh3S1=9^,4]o2P9~r^l2y2hv a!CaT(!@w;_MgPfwI\'qF ,c$FR팕X Ekeڦs{5B,Oik>1rK\w C}['lS_&#к=q)ϋ~%M_K묻tMNG+(z!5};`ZiG5Jޟ_) Vn[8GI8ڒPLPr,tJDk oIB^'꽼JJ˫ G z30C@Zen7QC <Jir%+clZ RE߃ԤA+3Yϟ!q ( R"_e4HN?eFzխGoSM! >hPGBx֞0=w!g] ȸ{OyS OYp,97Ը K)&&~xluo:ބG@mkٰΏV.o %& cUR淩pz6* ]R j ;}x@~`] mJbΈ!C{PX>w ђ tHrz>IP76Aƞ[@2 R;ʔqCd~pqp0wL#K>#>ɶwWͿ6CDDw%+P=0)t_,NQgj*eZP"dB7o|G 9sA |0duy Io) K+פxi:Uf :oe(sj$ZK mI*dj*ԔU"TĔ̖P䞿z/P8Lg-nqaP:oM}%@!D/;*J9QT,s l)B³r~Et( - GkŞJEPҤ3sdq h^!QəVFSIT*\yZ5 e@w#@n۔i 2r@ixGQ@.Yq$D󉧶G-Vztz%յsß]qޕDg H%Inh2K<HPvAE 1gջ[L֮.]&w 2u|70nSS>"}GAQQu+S7ɗ~-'Nj<hm9Ci"+d *8Hv|PdG2eX+dZKg*.͑?X$wɣ~,~L3˯jS~%b": Fȫ+?egu/o~*NtD**+B'1'|3,^0W[RODm᪒efK2Xe&Z򣛷}T- ߙ6φ_\zt'N|+^kL|IkSMl": fGE7v-;ť&jP%XQf]'汪Z!w' Ymס\|s¦꽥$o. 46wT O5ͮQA^&ݻ+'M6y Jd W%=ƩCeǤ])G_3܋=6}A3!`PgNRVN')%*"s57u\$rȷ&cYBLv;Ht‚GcX#CYgo 5F0q12ܧAbhc,$CׁʧKXQU$7M{MIR䭕灠ؓ 5fWt^?-slG  Z5eR hg_&= MgJa5X^ )#Td+%/LXW;mGF\vXy Ys8O>_Q`%%8 'KʔcM䮫΅nMl`nZVn}NvW1}Sj?9h24Tg3 [( ;qu*}$]wCLnϾy@v97Y2mPټҿ9w6]S d+nx"8/݄՜QUi7RV/9k-{xN[_ZqB>VPpj stZb?:8[nWdH#lcgȴ0E@~xH]mq{i` axy$ &Vlr+.U[)O`ӉOt[t v+= ҟy+S=;:9=v BؾʒCVgTfk(-)$Rؘ=@F2G΋ u޺{CG"Bs"PnqBH̄ݭnbe(uYr@t$)~Âer@vX>\|?f%y{seԀ[Vޣ y7 Ocejf 0dT)d;/wt8*, |?z1d#G7\Kp'>V?\4iVC?Z`/sӐ8[!A >!5Vm{QNUt\qWzv[i gW}HJE4' =v z B[.Q*(Ϙ<4u?oҠf'7L <cct6;{Iݺ–NYߛi**6jP4dpɐWj\vHﺩJk%X[oufυ:MmT\s dˮeMX[|CEZy\#dvʇIRB 5 n;LM8m@5ReryHx[;'RRPOIɝu߇3c,\WVDxk^.Yhby[ KWʚƎ9UAJDs &r5[A ftdy%Un难A9? N cJa?R"^qm: Lw,ݺVDg7b^/ycMr g yçp:fV">^XFHwp MC嫀% & FӒ/WlwUde@PD+>~UT¢[2qʢ/$|*^9tt8]+UJ_s%ρ]7 :=n^Fn˄ZsrWy_2 Atd yE߃*nPk,3Yϟb&2 Ô#%j6Hz N/څ)rpSQ"ՕXOt s: tہIO+[@3;A `0= qjyn"Ե˛mpO S8q|@$V}G iQ nޤd)jHOsW4Q% ^@' >.{LUvnX9QhUFw2 A@!ЩW \% 6@ xWye2wca%K;Ks-zp4،]Ky?(IO`\=$9F+KIf]{ZgBޜL?)ybכsL˟xY\o $P`j0qx?Y_z҈~Xil=M?w4jXA"/Ɛ>R RrֈMwGn]@e+6Wڋu3$bv %\*BZn ]*l9#9ѸwL #ްu! OLM #. v*={?KmmIrɤ'$.&]6~CM䯗GM^"=(jy+`۪_ګ,pCS!JɃ"mZ>ʕAA Bd 2oC)=&=arTٝcfr+Uafgz  #1wQ]_Uj-n`lM$BKhI(PL6nMeyZvW7ٙ7o;{߽%wm?Ї+6ۧE8yٶcʄ'_K!]v*QSdǁlU~a|e2 )&dUf$r;m޶O.9Iq0eC-*F=mv@ˑۯ>W󦏓\c"|FriZ-^<)RYi+")OYAI&LXw MMو?Vs`!dHkvef{}_eȀDEmIO鱬b9L[8WMԣ"0B]veOQ :woUvO95~dbJ >WīaT0ZzjG=Teoh;Ъu螣OVWuɶ繅*}F6&É6үUHANNa"2\59ȯ$K8B0lhHH(wep}g#7Y-:+A3gW z2i%y_H蚚#!!XjC;աk:'a>>z(bT} 5r*EtݻnWHv;LFu+ykTK@!C64PP^BM d惸'$ jហ"bIK̕O5҃´dؕ[GT5N129P9B+r]#a!Sk?ܤuZgfcwa4 h") rT!u1T" 1R[9rgyKSIf)6kfĠddd}Q1)]*D2JN`e2%E41'TjVDޠȚ>֖жk*-i:,G$Rj]ޕSɜ*CSfmوY5a5$um #T+՘MFMd!)*@ǗQRF h~-{R=_[g>"g>?w,v7CzP%S.vVݓ 02k 1fHhW#> >N3gyf&#- MHd,˹ΓAn\)Ev.L󠥪6:^;{.vddb=qan3䆋f MNeM}M*tYAm1wMt?K 4rIΣen|EvzGmkOl+g%,tj 'hSׇC%5V0*yvJJXO[4}QVln90AyGˬ囕YR??UO^ mЅSFфFq=.ڜ8kÿ}T.<9}0I!T6&FGkڙV}l5Z,*~MD͐O8GXmg[2/ғ"2+ңϾIO#%ikHOs]qtZp8֩pJJ~1huϓ_}b"!]O[sw"ZQ{ʻ!cZokcHP@o5bSrʄWKƑ1;Ѩ9G>TOy3\+mr-k }&C57R6exrjnX}IAq нBz?R{Z:)ݦnqW:۪T{m&C?g!1:K!һk'^[SS~oC>C>8(\{Ye'~V+>ѡCxiFm?sY19'0t$O'}|5[̞[qwgWs!t[ӆnǁcL8~8Ը0k)J`2I@DJꫬ +|y0_tUcV `[+hDBvlLc}B@>բ/o67dF%4ƿk%>,ٙ6`_{UANE]]*ܥL~?SGQd*NʲMn ղ"S4ذyoؕ2iFԤ\T *R\\0T(Y%A.UKQWMe;xEBmDo{KƨIQm9yoTG7Kf+ڸ O=zw:S 2,M\&'D?^EZrQ REk,[6̥`:)%.؟uRFNmG;h S=lGfU_""[ÄSt%L(0#0 Dth#5BL 5vVHhX㠔UYqr˸i.57O T{ޏ;d[qYu{A2|C:xڦ ٸ'Ȣwf*?.|Q %>fT#HIq̛"*P%Nt ^]uo< V;+$$!%r**:VHR1>5=4}9Bm4L&ǸmKӧB$,zwTٰ+A5zHڿf{okT['&0xヒWy1|CZ9gD]SΔ<<(&hbafç dY ON,F\'Y'Vu8 W&BX(-ۼzοVo.4yP/GhlWDH9WЧ(Z+ԙ0UJ!'bJF¦ $4I#ТC#6%%~4קtCؘ=:DN"%ҙ>&oڮ!84$BvMux-V/*-U6WޫP[^!C ڼeoʟޘ/˥3(?V/:%D u֒ANs\gqn3 -"d|"R0CU_mOJ5o|"Pu#.k5B5p։qm6 A"c ᒨ1b `-rkFȵ-_Y$ D2{H10}r=/$zM5wN"ڸȾ=_:={,5C #!Cm6R&j9wf!b*,c&fTZrLaB|g@O' ߺ𬀮%9\H4':1.&:9nWڭ^T dX6uVHtʫjۥ hcΜ$C$zOmco1u3+ה>_iCqBO|ޣWyj3|CqjKsDȵMjFڍ&CZ3ć-}Ո/]$ɓϞ{$vB@[&u*_$"|ItE- (2W''S;TuFZ^lW"-UsO97F+}j ^P4Μw'rM9ju-W{1ON!F >!C qז2Z!aķk2LuX8[6[4JDDȕ$K4S[vcڋe'=\ž!D~k?W,]70xh5B$BF+~M\OULD(E/$ ~NaȐ>sjOtʤ!G2Dj69"zS+?[4N6b^# 7!"f{yo1}KʐxYt*lsbG:?.Pu`>(3̕s5dO;Q#n捏?@=F&C4"#z'*#FɐIdR_L8l> |2pu!! =aѿ%22rσ!"*jG]⋍áfnhoz}JQB9'n3!`Ŀk2CJ c&ʖ:ȿGL]229R1בoz'pkp7]>9y͒$O ׿}NJJJ\?AM"MoQ7~MxW.P I=GsrIM}^,vexr>aZj_4AS??ߋ7!`~ӿ 4\O$>z:Q 5 F GCe~_#I{Yb{jje| UrD)NCh&B_}OX\WO`. Ko37=~MxT!bΠAIW\!sƋ]'ݫ"jH+IT;V9HEJ{Ggn>~٬u%˲ztrwYZ3#A rH,)< G C"Z9\8^jaG(&$IˠpG&sf3G[f s[[hzJA =ih,:P"7.4l6I8+V0ڣ$T6+*.+J3U$hmd[>>KΏ?:u?p?w\f `0<@ CSC u`6$B8hAU%1VpeE#p <&fA .\"U$Hl yz\[[c3,=tZg{_6 Z*ϡLc)3S+w |e˖޽U4:U CUd!Ni:Kԁ\EMM_cdMb4ED1ng[MVgd/ZX4wZԑߕzQJxῸFrΣtpyجPH}s֜^c/C~ի^--4WuݣlX3,V&lva޴;{B7ja0x}N@j0Eh~ /A%%9Q$%5YM)\ا~:Q7c<6Bbf{-sy7@ h4OoG[qy30Af{~ >Du˻bOm"8 QAb7 KǸ o_w&|3n (Gp CZĦد 1%ET[ƍii5[%2-~J ru? `0@ /*B5LWR18MZ;YӉ^&8Q" &3 nO+6Bv WM>+Ԝ({{Eq3=7o]9K? 'r f \顸/ww4j|{n=6-s<,ǷVJ'-dxjZOfu藮~V}m9D/9 7\gxb?f0) ֆv'؊637 -!g!Q!!y:LQd$@yK@^!ijG ?VɹRZC º7Ҏ(}~e=Q/։%<?spEuX^Z.Z` ye^ +*7lbIuv-;W>t@í毖En6b0 ЧɐFC+ ҿՆNcZo37aU…WX>0uԴwAȿ…ۧ9NsԾ-'{ m:ib 4G_k=w|=w6:{t=$hWW#j-xoY }s YXkj.u]o @s2 *4Ymtu>~jzX>Bh$1IIR"q_>88`6n,}E xn=ML{t {= DF=xFea*e]YF-Dʷ`s4!Wp̲A VUMJGώ&#b)Fu?=5zF@>bp~$!.֖<=C r%Fm6~{l"Zߞُ9:' x̀s'MvK{;~a=̆@T4 >@w2f#T1`);?|ϓǍ/O#ނ_Y"m*D_Uݍ'D=fPW5/|0{^!*.&'wܑߕRC I5#ЎI+5{ځ1$S nѢ9bs! *l (έZCUuj!Am!d ތ1y1m[`ڱ 9;hT, @!`4C=9Aǟ}FnX, 5l5KA ГPOmeASqb<NӺ~37 01[ GQ@UENFG= AP2 ]GB:_d=`fA 3238O~21=w^/vevvJI/m6 sjqlؕ@S'm=@/)З;dsDb-9=' уtX6Y2)2ox D+"6:?}OO_w2rpϩr?i`Y0x3' dٟuR^[.XNч?/y[M{ ]Eߐ")o~A&'o1D3Nxϵs^."}CjkbxP|D҅fH^aZ(X:fɛ[. <}P+,wۃ{>HgӶGk_4&>PnxEbm"n.g)BTWW Ho\GN8OڨA%8\N@̂#C }>xw!D^Lۋג!'ki? -%!F,+t""޸k2x„3cc Lt,~A ݌|އi̟m_״wʋ +,+gO6N^#R\V)7<~l;:G(W5BĸBNp[2b`Z6 o]xf OW!HMmiHzV(,!9eb#G}j C:KޟQqcg0foD ɐMTdT Q:\AGO"@#mG{NƾS)ڥ2$]|VJBiKh*Udȫk2 3މ/KRŔC#G 9H3#-C޳T'մ_8NLp <)}<}61RlsDذW/ R䚹S¼RyU<Wϝ*>-ĸ(9c1i! OyeZ!&4EŚx x5J+wW?V.Oٗjh0R/,H%IʝH$cF0tGQBBptZ[<8&]GY}A'ܶ&`y -(菦Q3T׎@یZE"Pad梮_v?@bs&*y$'Ƞx*vcд~ryID8yP8!$}$j_h C" Gllɩ:-9NΚ8\j~NjC2e EbV t,j "KR^94\cc^!60zz[ɚ #)@\9<}P bFPZWl=E3G LN/RlWʀxt6 ΗdZƓ_4-`Ok(䁥Vl[MyMj+(-OﴱCL\O 1QrYeQMWTmq9×W 9y*RZUYadŰrq'>@eTqFK,2‡mC YG S|:9mOLP0\2kL&ߘ70j#I:BRcd/p| Hr|9Lwk8M* l9jmHMlzYVD͐"DhRÛו;woÕ[U@`&c~S~rW>U:u%Da=m0ْ%_<(^1KJ9ghx]_Wʷ.:ʄrmWt֧sH?]4ʞl[Kmxv޻>nQL30y:F᫺LvfP)*rڡhUmЗ3$~Mqjj9$@MQ|c;7Kf#'%D"P%i8-oz`8Cl/'/7ć*hWOMZ4ֿ 7{{8mWϖG}KVm/1V͒Qk k=$MƲ%"ʃQbA% yK\yzB[ R}Pp I?w)|\9gjɼbkw-vT\zle3ֆ5r$_4C^͵GUOݰd( A/-umGOhvg1HiyQKvi/T߷^aǀu#^s[,{B;,&cf+jl%44Hi{~}en̽fAx~yۥrӥ3r|!(SeՖrnH~z뼵㸖2QSDqs`E| QCA BŸbVËF7II90vؒqD&J $Oɿ /",hMiv /( 3L;{+02En__\\MĎaQ s%TP5ffO5 wG%<,X~ % Wunյ= . n#z>M&ʈ4GK/RɈjۡ|"AOtRn\5oݪC߳t͟2D˙/mO {6 䗦˧\T:> `lAV 4,_m笎>!'Z +Pf9]08L[Ғ _Fsǡ}:3'9L=ݰԫziKHB^#CøVqQB~هxR|9oNx 墷d-EgOlMm̕ SZ^7]6SӔ`)9sՎ90a#(KV9٬rFYSFcR5)]q%tq% 2kFm7]"kZxGN)M\Xh`gA FA.ME{_h*3ns1j@L9i-hxtn|=nWi#e(|{#Ŀ1 f^q{(8 9k0tk6 V+FϞm_>>])# ̀c5mz}̝dg>~P-)k -њRmއ~Bf(tfcc1N.;v]W+ߣkwJTd\penWڤgNt`?"c! ZS ^ْqD& =:$m#6Էx]*gy:vgc}(qrYI{ٶ<:xd 9im,2ˊ{Y>"#5W yh%JCʃ߿ sO&t#(s9J[ygsIbEӄL7TN3Ds 7t@?!ip+ClQC[;}W ے~?5`2{١3~D\XgIhL/;9rձLg-IpP5JGEO;ǀhQ?Z TQ )?$܅#‡H 0)A5-08rmޯ9 :t,IZoZxΞ$@!h [kwsm3 Eۼ۩zeDȵ!HDHoc\w"qn8zν`/ڟqDևeпӯ/Uy0CœzR29ysi CpT6q#{WfߝARۜ;vrE =}.u ə>~(֗AzPOQFa&^kWu#Cǫ2wDO1{ufHYsu6gtF3u38m4wݪ"ӓhkTm ^=9 cg/šQשwQ2Ҍ&Pos_2s\d#Ho'A/`0)N;r}̮KVH fƤr%!3h;FF(M+eպ94YjE/=gԈ9¾FH6 7\z_+8RK-o ~w&4H8>Z-۩ѱc_ݷ~u~Lg:W6x1Eҿ[Sx19ё$U#o! ߘpD`MoIͩ9{&M 㱪]Ԕ5}" z%̀ԬPzጉz=kT۹ͷǹ#Zˑڶm_4Cq}R5>rZۯ}ooA0dțώi"P]ZG9:D{hG mCK֝mo-#0׏ԣѦ*nveAs29^Vp5A4i7=X_Ю6sۍc2[l{ђu@B7(4xL] @216𥠿js@Z0a|hHo~YddO_ֻLPVU%ITng`- z0=5p(r[ᶴ8' ue9X/+.TG(SOLQJxhf8-T?bcYSJrf `0t2T IWç:5$E97vgdl` f_qؽ(K}:]%H[G?2Wp?b`35AK=ȗEB#16!O|=yaBO <ۄ=z:MifHM =Wmj'Ą$]5 |h۶7K̀Үc/J2fe*+)-,(Y>|55TN7iv2 AxfH$_5̽%1ٓ#8b)ava/>Sބ//UMODFs$%VD$G+$hicY$&E֦*gMt' 1Q$ 7>P9B3=」Dx-sʭȢѸi+ћ `0t!CQmip`<)ɰw.>{27V( 0 MZkUqtT^YDAAE{!,Vo68UY*noi=uU^hҢpj%YccG̬ZA ZpPsŐZ|Ӎ|tSHɊ(ܮ&hnbj#=ts͗ˆ0c&\rj=+g;5 A#4<;W7P@qC$wڪLMtvWMUKg! Dw͕ u]Dhj(FiCNZN„J:FnD}{I"DuY6 EkކZK5#n!*nFfu}7*DIeYH["DQw}PJ3uzn JGoc0<@O!pGਯ=C1wPR'aDׁl/Sam~$Z%>"xa;DJRDwSyc4@jbSsQ0 A.1QC_L  2ėk#ڬUMk3_axyIsԀ8'cL5h}2EHrD p`I<+Lw \:9jt0UyWP7 !мJ7w/F <V/F  (È+?˘)*a77g!1:KWI\x`=,'I%UJ65u]_ZfLNZ\ӐJBGO>hZz=]ZoOSuTۺ+竻11@O!eTYQWZYma6›#BHƋV wdPdUK&bwy v s`2w8TK|paAu0::J""2q%e d礶8k`~ ~2a)G%1=Da.()G| G."X<9>NY~dd^/ˀ3j  #c&󖗭>_Gv1 jy ^'],>na,b2 O. ݍ@O!>TwtޭiF\GיR*Kj2jAl3l%ԥU6)r޵Z\'=<<&8* P,F4`"!ĕx.𲱜ZKRXո~oΐHZZ^v`L׊Gf*c;éUWnp#5B$Bqj&2&s -8^÷w9TSMH]08CpLFaܾ.51j|i?5 xvbūjT% CjmemcT eE FVJ0;H7ݴ Mb0)"9[_ OHIO=D|/3@`&v#u"Gپ@:G/j3H Ii!e&sj:Nc;qU@ϙ ^#V[ka٦ d.DTu>-p;BBȴ2),Eo.NǫkN_]Bv;62's ]H(^ Y>x@1NQ}CZl"2h"DvuEEi_8w/~56M=ob/U. E+"r~]_&Cl1VSl04ϫ $A}cٻuS987$]Pb|CfyH+0Bw&"%}<-<_LrOo#a$^X>Z'gw/tO!Гdh*jK|RXFMϲjC)^ˍNMi00Zi4!B~+FMp%f8Q;D'W>Hө%jH69d9vV~ݺ\Ҧ1jKہKP#dbyu:$OMvi;^S{|UUkK Mc8ĭ(,A`zHjXb3k;^ Ʌ/XI/ S,^W/³!rhA/| ƒɚRJ_!C+l w#J#D5BG{ikDDB<(J;T%"6ڡZ80s֒,ڰ(Z;yZ!P9mܗFgi./Y0$ﮊ>_+QcYk?io5aznCSk3{ "C56EL'(Bmn?KlxanFZ‚ovwۥ-jB;ѾPX[Uu`gk><)Ĩ<'<7~/@i0 Dб\DL!7u"C$5Aơ5J; ttx4z:Oc@`fs3Ț $CTF &8(UVBK:jԡCFF:ȤZ[]SYU\YQΌ:K1 i"jSA;i";,i#dGɈҒ֡92#PqtHЩ>kd<S` ^@o!u/d(3̕s5dȐ"Ҋ跒V!4vĈ^ }w&mvC&Bw/RAܗJcЮ KK(ߚ1{h)*-=26 Yc4Θ^\\cv:<_MQ2H`D*$K7o$)?ST< uL{Jh;0BUVH=%śVvZe@"јw|uK#M^F7̗Mm!W"D2D',i'bJHBI8m~B_h _ v7|opWU)&N/%[J iEvM4.!Id,e$?KÒtuRy@꽮W8u|^G̑ ݃@o!>A i$H!m0Z!o&Dz{;"U HONO Sdb ER|`炒rD>*~)Sj"I=,Ε%kKb\\Bc&KQr{3I{L2q֘wFӠe=AZݱ N?|T"GLgXmZ!!KnSq8ñ>)UdZ-k ^˚hcU i /l&B_&G9Ԣտ\?9 9(YhPJy{ݎU~yԃKtJ O2*IB,CS_dk#Y'dLUv2|`J!SzFYK3jRC|f\:X-[ jwbZep Md;"Rm*GP)#%!&J 9a SoNb"9R36#'*!$ ~AtIIz8B+nTf};Z<`%"FNHvF C@x_L D[4 Yv0&BgW!($NI5uŽGK}-~ZRU]>F 8B\#)F˔WHY+k|^O!͚>u"Lg= gJmA&e0C3cBz JC&[9ZΞ._l#U x^NcezhOѕ}/©%U-rsמdˤhr[YIXAkƙs#@F3ԀY2t;q@Κ8B~tÅ*%GiٗuJrDΗ+Ν򩽿r%]ߘhr1қ4"Bm5ģP[M4 }W _X節@tx/VՈL{Z"Ɖ<j-u.gA Ca~zMhZ:XUJ8H1 #`dh0 !`Ȑ,TA `0<1ySSA  i`xV~n=%1B= Gh<8c09ckb;$F9SG5i>r~${QA۽K 8$j4 NNF=#|(/|!C{LB8=ySA#`P_LD"B,^Rqۺ7Gepp=VtD" H_jXF$$wg%(XA~)d:]e; ʣe@N? c/n`0tT^f/?GNٍDc@r#gX9j8ҵ;%88PN;L^t"~UN;C{%8%M_-'^[$IHA7$هre= ӁeJz?;Mg\&WWP|~Z$(&^:7GTzpR~`D*yWA f *~0/+>\-#Aw{+e,qX+qH:N*$QHJXRQ/69rƸ\-㤺V{s @+el9wXcȁ8y`Qr1vh6 i8o8Y ņ*Y,ׇxyGHH|Rc9ڳ{lZ__Xr$Jh`%ϖoCJlu_}Ǯ/Xxj`dgr ^f^q"GMQ#+ӌD(8!I|'>8]-S b$bh)߻fA Yfș55)0Ta"ܛZFd,4* Q?"Mb䑃d6e2:QId(DԡA9$KlWNS2!=M2a%;):\)gP+D2Ƨ%:"̹oZR,>f㹎1" Ο6|mTsmA춺C?=Q#U^)1SfHCWqpT!*%f,ͩ:-&'9Kj{`x$'*۳YmS SBCTydL=ZaҺT8>zX&V-$|wsK-ȑ/Zn {? N rBaX)/SjɆ().F>9!Fj>ֻZ+fOPYaސHJJB2;%&uiȤ@3DXm"F rPcDEپ̘.5S%Lfq'9Rc7(⧏ùD=RHiiw~VȑS] % -8[$zi5z24:8 h1-[DHs5_) +0%>YHbt2+ Ui J[9r}(+o@_Cvh-l߰䒙eRTV"bC2UHܥ~<ǺA\u%g+knR˻)ryHZ-_mܣ erɬ WRj \+6g +4Q%eኦswZ=)/'8UUU쵵ڑ5IQİ1+Gi$Ӗ*ˤ6JMM~\q$Bw(mRmqA 6>!C}NWOaxڌf4Gз3{0pMSAm&ȟTe]E*6e4䚃|25+'s0pqIgt!?q 2(|FonuNg =a`=2(LB%: i-l /k VQun]+e{HX0꓍|-Tk PMb&%QcHƯT딣t7J1*Lc_J$$oK"$,uA *,/>-(C$J>YյlsđxWs3 !CzL[E"B%3R+2aDUsp_W\G̞<is&BXh}h oHΐKmmݝ%YddacRVN圥ew*nl<5l]GU~?H 墾ZRVC 6UL߾GnOh?Srgd2(vWQ6_{Z- v4g|C|̙v~"t  ` H9|jZ#CϿM"ӟ TԛZlL6lp.V8hu=_/"8@$n7:4q' F8160|*vP`DXKΙ.pBm] XWYW;`G{9 NPt"CEbɷ_PSPlԨ;y:8CWW` |cHe1 I T8R%io\f$b{cۅqٸNwW|,Gˉ]}5Z!].dEo r&4+ri!~1%*X]=鶚 @מ`zs|@3hM$A!B9awͪ6 n ʤYcfzZ=hX?{Egfw+!;Jػxz$GQςw6{+M" NHHݙ?τIL ew~>~ݙ_yWuTsvp[|Pt{/u{8#`l5TJl痴"##8OEmT3FONnI 'q<$@W8 %ޥjBskHa{p;Bm^;V{-XTZJ锴D*|n%lMLߵZXp|{/xJ\nC5q*ֲ8 YªP=ܼB_ostKp/*y)nji;!^L?s$bru#$]kXݾ Oy _ۥkx :SnTk,[15ҨuR4S4wC^ y7;AUJ-W:  qµh%EKBgxVrU9[{,.6鰓,xQıWl]ҕq8D.GH˨1qxQk·lRu. mg]@mwR 6O[J!>/YAPjBFq^o/]U\t x P` sUi:=t&Ϟ^$]/0*򟱛b:XQAG/ÎUEFq|"^[9s^K! \:FS(HHE^S ߆vYkj,\1گ6_ƒ%ɧ}>ܒ"{<;DZjf1*r5(}^Uʮ-0wwnMk @z-g$oɚ ,mr(6KیBH^^܍fj Jyq+a<?N2.dkLr̮0JJL$ٖ.2 d#т:ZRGW6"l6+K.5}?l(AD0(.Rl}{0t=w%4{߮j[o@ +r=S!\vn-~ԥmVLKDdĖ)!tDSiq1OD1,RXH?>D*h?$#M-{ = wEv) 5q*ڴWtt p{؉Qk#˷ј%՛w+{oj^+:RdX`I>^JX î !dGʬ eKL]E)+09)Ngc˭uC~/[~`;!":.Pu/a zƝ SBX/y,d[DGBtv2v֏YbZmFr(S6Hb>2͢QDPY2E ʍM6Mj[^;{(V,aZA/,ZW[n@'1-*-!$SˊL>"WB$/kXa—/ ie`WC f6Žb6b4"E9LkQ1_Y*2ʉM 5oZ>w{EHu۔mWt˸Ty.,C>oy |IyזM&V9=gI"&9jjC}ebF<> "d,b FbfXD FEey[,{Z{%$j26m/1mY?+i>*YB 1%1mJ%CFU$0![Y$C3 Gx~.s׮#(k8Eaya+L7b5j{ZOY]+2st>ڏ5@XMd`,yIS[0miqos<%TsN匫fvMȶ64G ) ~w;@LC"t0<2)qG/3Fט éb\D뛫'Ⱥr4GߪǪ~vDEsDS P>7@|D@QtEneqBe]qEW<:d *pthE}P=©sAYt6 U>O !  m؂sG; zFPyN7 /1 5>cuɩ1lԝPݙ QlU䴬Aq!C{5o5I)®%Ez|0bTȲ5K-ؿȚƼq@zCs^ ~OBtj;yYAQލ,f8ȋgr9 dK+;KvpdYV= #1T;N8 @E 9`MwdtDAEfWpW4s5%hb} 4:;3u5c@!| @@xa%ԕ֢&LQooSnhz;< F\Z vmyyWv,]ܹqyd^Kxk2Šz/&2MUVmwr6PmJ1)K푴3miҭY]{o^ҿDe( r U@b@@^lw~AEriN+7}HlTrt= 6Nq.'hĤo}DQ8xXş@*@;F9TNk6~`kYN1 |MxEEfG+ "yC\@Bb(PZyd%j/v.=>H'# pK5͸,Dxޟ -8¶QqH꥗9Iac`ͨG3 #ڢsmvmYD">x׃'. }a~ ,Dؚp-5PN{J.\y@`2$Hb" l4&y#"QcjXS?ᘐx; C CCP7.AD 03HQyX/xQٛwU8;u \E 6E@  b(&>B08P0!,gjZpUJ(P  V@@Yd#Um'G;_< GDdX2gw#!D4$AN@Evse%/6&Tl{3R| ʼYdx;a-NWX©QWL 򧵮Y|3H)ϼKq#G4KQ-nߕs}G:?SOjNJjw@ @1@ C,,}"H؝$m/yHQ-NHŻڇnSAjOM!T-" W& זGA  *zSguwPDJs:{Kdhu3ٴ uyU\IJDÔtP2.:8s: NTm9Qki߻R7Nyvujw 5)w| ]g)_A-Mhẓ瑹w=!\D;'VWUr~dR5ף/#k ^ 1@ pD,RMzI11*ܸ-]5~:,tRbsiˏ=)q]mZB3z* 'LAw٢)MR{ߚL"pڌeFiߍ.sp@~ W0ܟ/1K2jJs̟IE7=e&;״AċOx湶Z}l)2?ɾ[n~?x>D;6QB߳M1d2k Me%=O )_{(~XЉ(@(ںԞcGkt:Vlv??u;sQ5D12d<][UV\@#iWW=ͯ35egiM  `M`@ XH;9x[\ٵd.{)@b)nmI}0Qi){KW-&[(iK v鰝dE4kMͮQ쫔R:+C48)@IDATlEŘ>H⇄ ] y'r`2Saꑴ ̑gK]ƾb##TWjrknYkXV$]vϔt>2~7æ@T]rS%y hݴ>@48Dz渹R쬱 @H@&H }4Ӷç_ٷC04d#O~n%gE1Z=>C|]1u9*~om:B z᩿q2/xh/> 2vM T5ZM}SBH2.4O^NY״ړ@'3@@@ X@ KK rRhߵ;X8@ ئA@ l ЊJUG!ÊT7,8;.C^(-4ݭ,r%V.t;Cy, P G@^{K1J=2j GY\Tȁʻ 6}Y7n?Bw. eg|PWC8 ]6H#]WXl9@j “@ zk#[t}H $ 5wiޯKn$,=.!12M@P>rm}B]x'wm5s|F8Nb_&`Y*EGuȂe 7᧹Js7\ EDQEPPo6Q Pb+'`U+:BCwY=h 7(*/,J/"3,C  , C%kL^%?lm%}K XjGT8 /eO~Y0Rb " _ 2x27 JbH1kq3P#  yIyww7}זIu1$qteͪOfW|z!/bpV7o\j%pBBݠS9g\zыK)+.cO,ͫWm:YC-ywE䓝'? $q%v%iKZ걥fow0|^*,"dwOlr$0.'@g=g^_Υ~o%Ǎ@B7$-iے9.Xg_zZNl6QQ"#]Dn^I,HPMjUcͪ 9PDX,!$NSrCXv~g$'_0nTF:^-/uy\#xN UR*|Sre2>Ԝ\Xbȴe"EZx~OȺwFw9\\&^# x>6 x'Gm=qC5:#V YDYkKqyY o6Mdujmi-ZD UD" EZ9&K܋cL <=.+$ 6*Z g0=. +D"Bϴ*5{}8ڲȑmKYlSjGE#Bg3}C'2;Ixy[Q|uԨ[ 9  o򼲄Y}ֳZl.Nd3 rgĐekL'*>oc_4";k7'' tǽ>7ߥ5u~P|SqCnyDϞg791b˃}S;ך]+]Lm3g)PGnnd |W+/3|왻NżݎV?)%@(* _׳PU\AL Q˂(v)B< \ެ\ ۔(9 @@@–P6=*    =kCa<   aMb(w@@@ @ u   C5n~T@@@b&1͏ʃ@ ;    ºQy!|@@@šPX7?*   1@X GA@@ kCa<   aMb(w@@@ @ u   C@1gD "`ڠ2   &d+DI*iB@UIf1NQ]6]71mGQ?q:V'#nO1~E5½g'NC9:Cc@,C&l 2n0 <Ӥѭ miqos<%TsN匫fvMȶ64G ) ~w;@LC"ttdR+{^z[g>>|W0*)Jݼ9|p2+'NszgGT:Gt0Š U3@!|@@GEyOWV 7*k}i=aZDʊGGlcAbc@sIQukptG\pPMCEb"5x==\/eQ9;dQt62r CKk C|Xr?x u'1Twf@jE`%[v?9-ky\7pȐ]<& DƵOV>PYF>~Icz='@j&1T3`t]-DwȢiC}.wøK䮿jad~Me+Qi  P;Cㄳ@@^3 ܴyGIGt]dvu Y wM3WS&Fg0N㮳:SWs9v0@|Hۋwq Z(J]Yl-zh%6ó|oĥu`זOmNzqC4ݚͫt@jM j '@%7cCOYA+ǐ@P J PGc쭐,z* MdSj6ͨ]JI3Nlùh\s0v?b߱_w,b"^q[Pq" @ 1n +c'*ƳmzN=;Pm(2]"Ūm!Zm/-]]( TJӿ lc;}ԞPYL!psR+7Fs:wgV_m{ӏ+7Ң_7fd<Q-R%|mXk4j6uIvFp#1n-@Hwzhv|lT}n$kC4zgBba~VeNCݫOiT:EOoKFkZ4%8shJh "G#7 x<溸Er8s,xt|^q([N>H > §QSwO- |)~+vR`mIx*RkfmcCA*lrARJsMѵԈֱUJVԡUSJNJ}s}h~^}ߑ2qy}2nB @ B@ X Y1laWOn%ֶN6Ֆ]:uCe>1v0 8CmĊ5u6N횦 8ڥ5/,ڭ{iatu[#b7_=PDA T&O/͗mUMk%gp7$1%+PSC||$'8 )"s_iӢ!݃?_+(Vx$ҧ[[1kr{hmpfc y{j=s,  1 „L n`2舎p;T{vlM]۵I $!WGh/i:!ilzfL@xuo|{t#CZ3=Ha@@,EYavݢW b+Uo9~=~-{Z7-Wظc?h^! a#G>AԀ(j%Cp@U2w6=ጋV|9^2i{,VngI<5rd>Ȁ@ CyA6~|b3wƙ:ҍI1<;T$ #z1M1a9ʩx  P>h$*<Wg~AH P[Ы4N zuԨ5 hB.^GFy@  |CU:? !\S:|WU:s~.n&Ц..7'LH]yv7]vnE@JbHIT޾v,? fMf d0o7]h:- vtNqWwyi)RbV^vbnp64cn%2%C=ESݝB?{ds+j_]$hK PmŖlz{߽9́G|KY4+m6lYגFh*`PCMA:[/:HWk?o3Ξ2nZ_';Đvr4Bi$'xsd|Tns.*(*cڑ*.uǨC g)4#{̘}ֽm pkqj k/q^~6P H3]̣˴*+hu=1!6Zѡs,R(2ѐw)K\t >C;߰cG.bz6}lև6WVW'LTK|p gIUhO\ܵ̑Y1b#85)>ZI!R#~<ťR[3m]۱nj_uL;.ZC|X/#v^Ziytq0 o?装 O}/9\!Bdz4{]ZHP@ 7?Ӽ늧deByQs>T>˼n{vjJTj2E|{qΣMmy6ĢH9݄]';ŀׄn2@ hl4D msPTeePWxłjxcl9@um6̯9ԽQ<>]eQ{B_kM;ޯo7=s\Fvok?;\ !A`&<h|ھfVŸw==ڡYuӽ-]q^UjrÅ EC o9p8wV13|>?A߱E ؗmjrB< ʥ/dS-ui=?d<{qm_;Ǖ'CLUwfF=$.:C^_7BGԭ}Ks3{uR 2&Yww%?0R2etCN/ ;ka|/\"aV=ODgw5bȪ7VW+5uzKd2Bͣ́7HީMjWٺr$ktUgуcnreW⪟PhHbh숫鏗I9 TʎT+}~sI7^çVұWAk̦|"IDԏk>IZ-JtLMTqU r_?lZu%|tu? (s :x1Zeenfd][CvL`^+S ,u3d!qL"ǿ_IZu-P]|1:Qft?̧_ݻ҈y{ݖPfMi D$HHB\4b!H _մyAS]^hBC}yP"'DpƿFJn; 2TM%uMk :eM98۳p('S\Lmty ݉e|U-bag$YOl˭?[[Xo3RuP*l^tNx!mxFFрl0..XFj<ͮu)MkxqM'_ X ;ٍ_?vNG5\}Zgt ,*X/bU.?qtW_t py($nٺ#KoēGq>WnbaXhm햿m-U[%Yt:uaa'XZыJ" QYIW$'uk+Smό_9Zj5O;(|mA|pSψ"ᡗ}PpRYF4˯9Gmua5aNKVnfc1ѦUB@vҁ-EwmYY u*',yt&[S$N~beiǾ+4i@T^[ Ub-RTdIW˳Pdd ˉPwxt"VlISH/H<۸uӒ$䘕8:iEs):΢JAd cH$ul_||C\rbD/BBY*Ɗ({|?J1ʸ8_-(,Yү{{[$oPio3TKKdfߢG~5⒓W6KDtw71ԑXDpK-~Z6tR'J$IZĉ\^=2t1ڲ bH+V/Uim,p#/}jkӭ-~ ?%I,t6SM&xgKF%<-S6R~vT]C@,2a+iʂԄ^t>P?bG`- }|x-s8%bmW>"qz 'ĔOhQX*^VU5_$?[4r 0%Iyxp3=f7XJR<5E^Гr2PzLa(ܵI23e#6T=GEX]? }_pԼib_j|Ssx]cnz]0|na o/e-U6|+T|+"6S?5Q96qc* ҟ|2Viw^ʐ@MO0yfIGsɒ(bK%ZuUkשܕ%׻p`n-/F(*RmPlr}r5?[G>_5_CAO$t's?ү9٩^?.߷G{ jq|$7n2xh۞ZAQ"AZW)O:iV* 2YU$=7^J@Jb˕D~/:#T! \^9dgU(D.#$~gm5cV%Ol0oGכ.9t.4d%VY?$XП[?'%,K|켝ZtvޤZҋBV G` Uˁ6G<֍W wX:2DZOR'ЭP|lIHu8K:|fٯ~MYt}ͬkKF^|%6Ċ*d{;O7F4,rL:)[u1dǒ9$gIue^?2"+m,@U]}ң8C>lDd J@z|y}}u~Ջ)Y^jH Ptkѳo}m(2){\樺\Hfk}4?ߨoKK,j܄֗?w{pk}!oE~ "^q[UI9IٔkKC_'ur [XxU{ojsbsVz-#1-u~X?~3gɓ#B6̸~(uNcWGsLb*CFnBDUVM9^J3!^awp XwҌ󄯷]y! v$^~{&/ eUDpI2L!ѭo`%~"ZC{I*ˎ[,y~"{ˤcoPotcxد*?dϼ_BHC wj O8*“BZp2ߞ~Gagf'n7]3tUcĄC DϩVst-?U,C]9Ȝ?}d:SБn{w@͚ěl~2g99 &35ݧ]=C?мus=TmD7O%n3oexߡ6/4Юٺ'Ac"MQP ne > ~B ˚$~"?R4.t.]_ʣN𱠨xtYe T '1Pގ1ck㵏5G15+O ͯ1/EyY ;l55d" l;xT榝h$9"* 2^ieoFbm3@ ,x|:su"һk+FWgVg.)|ֻSov_*kkt.[cSMTgGnD~Nf 7;k,9 X@Wkѹ7Jzi=<5km8P7yuoum8dF2~l !i6ʾVlyݲuS`2SfiMMjW?&:Śceq׭ABm̑{CfҘ) \⃷˕iF2aXndaX\ph:b臗G8S pwOEA3mT+|J`۞̛3u)Yc6Bҽ=~ zkbxQ^@:#ge -\<+(cƑczn~RfIIETOڣ>=%2KStķ˵Xn;_QcLIv:ڨfr: x1o!/DV {K.>rZ ;!ϼ9Kc?]QjLP|Z{;sSr40n*d~2wi K]iӹ/Yv*?'EK iP0/̬;ny {c-ȴ#O1KQSuǹd\R ʤO0vQ O?H}XcҚ{j5ܓT{8'[ԍalթNG]Uz!-ﻙjvZ5FmM@J>?^ ט%ǗJPiJQlvx/js^Ƒ@ , ;)[w]?TpsAv#~6ssǍd@qAr( ͟jCp+6ܧ֥ "LT*3/^EPȳ"ȸլL`n=|H`Ę[9(ۈkۻmû!@' Ѥ[X~q=<\ީY? r| PCs@ L ;(aܿrj=L|WnE-^ml ǡ#'{E!_yT0l@ MS P?8_<9YbhO9=;uv-?(3OPnkQGy؂BWã%(6wtw$l ݙysMKy,>6UغѺYUj2jLiIMR\ꢽri}BHgA~/yJ\ث']c'>M&oPaaW?6cgҠ Db <9u8+Ǧ^,bz'xZ7obVb"(&&#)Yl*y44M3ײҡct8E.shtmm{vI?3"s0VK$9z [ǴThAf'gl yVæ*;yr˦8G £ ` APU짣ēc @=TUmaF2mPl{426ꚱe*9F9C9'}LֹMyG6{kg^XL-iѪJ`L p+~IDAT%j|+{9-{pCC!Ґ;$9).HQ w OkW ť:ehR).hwѣs뀆'#a8"NƎ>S VX\Jo?ذS+(*?35x%Ƚ @ @ w  ܕ6rX""Zv-Վ)4w!On^9ܟ]]a4GsA_XO>-20!1& jԎٝ76kN7Dr%0MMl oH^xiZd8? DDQ:mБ)'8tv$f\ܕ\!c; TA M'LHuyiԅ-3X0u%U0絍u6v2B: X>e\Vou6BPh/j b=7s|kjk*z{/xBjOrG1ɶNb(MemsZSw ,A@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@c5&@*S ^焮#C e/ozwKC1KC).U1>-kl_6=3]_dTӽ܊ڗH{]9_ӌx|Pwgj8W+uWBFU8P/PfW?A&A* g&|AQ@%ːWV3eܘ7gdzdNˣG-d]nD'%9 ҂C+m!B*ъ%;f \T;Ȉ̬i*rwL7~O~I\Kʓd(jG83fE9C^tmtrixMVZ=.s1cUAr+w%jBguuجr]*)F/8-No?cq"5rUk3{RYPWώXkkU RbI( (-_X@P\"!ݳ=9ObI9B_Nvw;ϖy}ߙ 4b+'BU3p/wN6,Mhs>"Ib\NjvCo#BӸ$_Ȑ#hng4+>ސ &?(Sw -NM/d?io \+> dSjf>ۨm m # ]P( 9:?۽$7唫^?r5CY&=H "9%f@>/44*~b_rpͭwERŢvm =ŸAŷxyl e$NnX-S|nmqɩ M~dM/a=2|ghbd? 1M~,i<6,seKS0FeYҧE.F mRTVg sC]:xſQ jЧwOȕyA\Sf M7בOS XWX?S-o"Us艻t]({ ?Q: -v[8f U- i|~c)6! ^cJ!k Ya'BS+??JʠYMg;l>*3&+ݵ*R.s!& z~d܇vZt}ha A~yt"+Kb+ #,^h}jC0.@ ҪuųB 嬠h|"n`= \!UUDk_Qxࠀh]MݔcqzJr]@4gN0O@ԡ?\Y#_נBM1-,1gr!g@q^?%~PE&cͭ_YBNj_ڗM6"U飨l$Fx-]/rGGx^P?]p>\@ʸhYue,RQii-Aʚ WǃT yCA3/d]+I8pvrr簒mCd'Ǐσ꠰xCy ]yXSDN<-/r4nY |9pY,n|Zt2t8h{kH\, lg_U.dYjҲ";GuP5+,1=-Ky/3]L5Ukٞ\P:sET}k_Pif@nF|pq-t}0R29[ eiTXʴx<=YלNό_jc+mxth`7z<7^^G)gv:Ǖh?{ZNs.!@E}Ho>L$\K"Aۿ:C7*% qz_i^;q\xɩFE'-x; @ {+~<[L`Nop {3:%KLR}ϋe:cŮ97gz@稶W?_-3F}l2$;ctr ߫~gm;8}mx8o&x=&Njr@!8Ev5ߋ `t/@^Q5Zi='g} ku%^Ð|ćS6s8nH 5,'ߏ"<[6Oнa~\:(L "䅨`#u,M M~khE[~),niɬ i8 61arڰgu{*.sB攤U\|W&q҂R[/@o=fRi FPfo JX~y7༵o%cxx Rhɟ3g˹׌TB)xM1;_ߟC3 YޑfWa4E>B gΟqllg4_r=3Z6EjaXFzDev>NNI:gE̕E#% gLa;#Ư:_ 8}4 m9 殄@a^,U͕K_$ȗ[rDAȄY7&pSXpega)yt @"@<[46aR2ti~=l1&]Eu>^(Px?ܽg+D?@l7FCs USq)iOK@$N8񸓇!Psfh?j~׻bOX5j4/sCXZq,Df,Г)IrE<~s<ėWb^߂|UZGB2/Yy0 /'ï͂žMbb-hq h)DV8wWjKvOp/!z1xĘFkpCgCd<:Nda,8^^ h5iy:*R;0<5M~4L:8~jG+P؅?.Ƀ(dk $[fn/~|˫5h)9D b3]L]PgSǷMnA Dzf5(+}֠Yw-MO-ˠ8r7?AOLQQ/!sR ~KB)[3d1b-)}7 lsKr+5mV$*ۓrsyiNLwqv賠š eeGbj;NJݖ'$" W* ԙeGA#mŪ'}@* >Ћ=+ !svId`'ෑ.75m,VN|h.sb3|#pBVY״,WRlZy.i*4^C&(E`V!\eρ|g<4q_ڻw{0/] ;t(Zqx+f ߬"?G]:gmN|wr xx\ [kYiQȾ4*AeY*:Tr&'bU*@MdHyuK^59*ʘ,WɁZ ª'TSEGU' ܡdpާ[@PMN[J"3.TEƷU\c}0u}pzL^0YR#|@+o&}_cl VU$8J3?b1 &U6~_kxj3) O0KOީO;n5eTK|L1 NBzk)-unxy6 gxI]N$G`jf~Q1g um83Mm1,ٛ@cLCi;kr[>%/b#bJ,(+aI_>LũX(f2kbY0BE[5\Xr& OPʨ,H?,%nBF"m 0_s:~TIEӧ^̡u[]%`ٍ<|Me TjJF75ЧкLI:rks,* m|om=HNu54]Mzj'/Doc|/$CMF{٩I'7 ސsR !@hp#j9zl^ w+U?O w=roH䈿S9eFyo@`O*^h91Ƕ`=7ߖ4Vj原`|!/1| *AB/q+<UB-YKX/]1m' w=$ zk&:H/gljӺ8 wGDn!'B B8I[2ę5 :~TIE@^`?zB NS-j||Pٴ 塆IBO+Oq!@d?Ú=m]Cj 5 !PdHB 247\P}E$>ej7' ɿD2G߅wJ4OUS4NW/ |KzoY7 >u} tqPbqWm… U̬'+@:`J9l"&xc1_Y R.i|֌j*WX4n#JѰvyr3qXˌ* DFEr2OO|W s|zO /!>:X=x.̮ 6.q &4iptl폰אxB j@ sVWB k!9_-\m$_0pʍ[ _vEhi?IC 1Y "Uk%.1ݍٰ[bQ. W,$ZɑWcخXu(}Z' e-&Whcp}ƭm"?P{IA8[g]yMD0&jL*&!Pdwc-$V!^޾nbv#+Xb~$* !zIL2iw^raU{]6`VNX^!v$*LqcX eo]Yr.t4_WdW;uoENj(u2 lT~s)z'Q}-R4O[UkXM̲)<|t[ʦ;;58KM+x.hxJzgD;qm~?dgV8i>dvhva3WKm ?K_6 4*5|j /] -a 2t,<{o~=J cfYO,Z &b{MHN3>˲Eڛ@X)x[tAN'vxm3Kxf*3B4tu9vWv>4E5UѶq< ~mLtPk&=d~65KbE==9E0Cq&+ZgcC,J+kEC`j]2-}⦗EE{T|9gWt.ieRFbRTo]\u'm3$ jiAw_p݀6?CUt4UtyVhףgs JKkYW冱"bf!z=?">m-r=S`w0n}IR \R@dT$umEr@F@#=ʼ~f9N-2=)UvhAhL3O͊_>Z]O_kd ĭ O^2kd9iBKE U( *{ \܈5E2d?#,hBJKP裏ťLh~ZQ}{'GIr벼]R˅yVdz}؃ ue✤,@/# +G=)W59N~T=Dg%pM s1/Ŕ8:A$Rms#ri[L+zDW~QgLU&[V9S9.3xYi}pl6a)=s⾁Wyo3}V~V"]E8E+mFG8L4kkc4)֝2?(/7s$B2NQͥ\8 2`5Z'I&bWĎ 4;5q\<Eg.;rnVպ&4<{WsE[gz\ۿOj1Vy;'jW)h0@ݼLPZH5(l N:,7[Mch..OFi /DI-ŧ]΄57B _dv6hp22n9gLyc$&A d5z|!tɊdmZ7!'d~%#ҚKJ})0~)O6: SC@^GH<+SL> ]_~5=@3펲(/m0'H|u$:&J.G1}ۆQdU{~,<_;G'7pnGA짃\x2ldj[.L#5nEe\yEX9󍼏y J9SjH--؄)i{?X>ӥ4NiKP˩lʔU*| ˥26;噷{ۃa!, {و-#$*,s⪔S9!BN\c>tC@ybV 4Ua L`a$SmZ6NjK?$=cE3j qmjg%p S?R̉/gbH-Rs LV_6j|FJ xݳm2|?tľ|^40Zthxr*-!@T@Yi.J6[U|'Ks;afʗ|ӆLNÛl ].c3hJo9MSl8dshj~g6)_Ӆ RK"Tefjҿ8h` .(3RLáY Pb#lߴ^F ÏB'-!^yih7h9s@2l>}G4|-+P `cnʘ7 iXCn}Уwl4QXFn͘^9<uK\WASKEFvz N;>=].yxrꞰ혌i&pL[ؿFM@ rժэRK%@fb> ӛr_#c>eΜXSlO?Iu2l N*:S!Pa60rtZ&DHʖ) 'P^Ŭr ഽ)/AQ4ɕ,:xN^%"T:&N:yS̈́!p 0<%j?OQ;~& \IIN)tj!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@) EB6-IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-compconn1.svg0000644000175000017500000010041000000000000030305 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-04 15:54:45 +0000Canvas 1Layer 1Compute NodeOpen vSwitch - Provider NetworksComponents and ConnectivityProvider network 1VLAN 1 (untagged)InstanceLinux BridgeqbrDHCP NamespaceqdhcpMetadataProcessvethtapeth0iptablesPorttapVLAN 1Provider networkAggregatePhysical Network InfrastructureInternet OVS Provider Bridgebr-providerOVS Integration Bridgebr-inttapInterface 2PortqvoPortPatchint-br-providerPatchphy-br-providerPortInterface 2PortqvbvethInternal VLANVLAN 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-compconn2.graffle0000644000175000017500000001351700000000000031130 0ustar00coreycorey00000000000000]SȲ+|1ǞlNd7-n [Nn=mɒ 14/׏~m] z<y zNy-_/hJ@i[ƿ v~tP..WQ<%eO$pa!ɕVPQ0B)@炇sSMн ݀o8%rXxidQyeFEp6N1 ؔv84{ڂI(]m !X[lwxbvsN{'~w&wz?u}F%>џƔLB(9tB9er"AupF*9c~q5spƊNbL_g4|^H\҂Jm5[p:ʘ򴦎0 25n E dQ2/kyq2N FPވ5qQYTQ(,4g bWҹCꤕ?=_nЇ\rsoy0$cjA݉D:7GP_z  ZlIGj 9.G},F?N\JP8}!8 5 !=R !!!6 =6OO>T*eYf֖KANԆJD }hEH8= mhƗ$ (&aY\ď'>hFE~}i ]=n)|Lnm |Q+gGpnN,^n2 i@33MBtoϮ~] ^ K(5-,7)+"""""V  آ ќ\<11wcRPJ5lCͱ;]DtCt['t;^V4{4&PyC cD!Qyޗ78i$E5K&vrXѳPH9znIZ[] 3$?=rFqL̝T ςR<,[NFƈy73,Z..jrRL«OťmJ>k)qo]+cqKGm75ѓ-|+35|+5q+pɆkxv̄K)[7n4i'aki`E6x 4s~;8}r'hɿ*]2`e5 !.F%(((Cѽa:BmA$\hL(!=vrgwsܻ.{շۍ.:շxqӭ>Dа`V-s{RQ3<KYIĽ%y3{Y>T+4EsH*~y0ZFz[42k uK[2n9-n 4\[bx~ dTʸi18?C?F\###blbf6:YgzT*пUC=<g0x:"!9$C ihc' a!C>xWfXtDA-:" tD#bm3 d%PZ卆"=oRshJeAH&s8KODPx"R>Ms8<_K|q`JZy6Bh#B$u)eUS?kUD|88ȈBt8(!6Aˉ3b),EKADD=5̑m}( x0mh&@ _\*I@d1N"N W Zv &m *ή!Rpی+9*u31FR .y*[2$3Q-@VK 4m=G{Trx0K"=KaR)ft GEAjGDU];yn&t^ ߳Xwwi$1_C34M;02F zuUk:;f *MNy_j`If1 @DEj)&U%睼&}2EVWt0nw5N Ɍ?jfp)̂ #pTEֹN hrJ vl7.#'`^a 5+UU@IzEU`CUUTZl |~E [.Rrm< r ' wئ.Ŕ9aS`GEХ &FI+C^L ](I(0}- KB w =72ëBe:r3jP~ vl[i!͒@PHermXT.gGZ``6ԑ)늁/%g 1PCp"20!>/l enNC=Dgx͔ć s-"sbT.Rc?_જiEm(3 gdHнؽ,B[Υ8'aݍsFa .b.n؆QX pnjdImT+&0|0ki^􏵃 $'_tsWtnmg7= jyGFj"/nOY.Pڋ'!.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-compconn2.png0000644000175000017500000043312400000000000030306 0ustar00coreycorey00000000000000PNG  IHDRIsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgBU"vZ 6$=A|프E_{Ŏ]"*HKHr{߳Ɇ.oݝ3R $@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$R  HPhDsc1xk=mz| ]UL(͇単ֿUjE?lH붇j^р9?+kyc))ϙVѶC)׃e'*I$M B,{ ߑvdsרswE*\[h 7&dܦK]CJWnՎUR\Kes>7 )weȽKVnx R"~eۗ܏le,oiCchuDǨ~Q;azv8WFR+7LE=}Si#PzX~Ap(K%QoDiXx=YI0}(s n{'C=sV:_mߔeo=> ZPZ/'BڱhȶLz07{h0gܶ/G$@"@*Q$ $O` e[ K-o='>s5{=gd=ҲWB@_e}O -ݻF/R*;TB1*_Rhs*k%a&֏Nhؖ_= SpRdϻ+K>RDPP/=//B"Kxw]wקuI4$@$hIJVaZ<^. Fqx<I ߕ6'Adn.MS(xj]M#JLCoc[Y;zZ<IM Y i1_c>qN o[T^Kϑi5F_[үjʧ}Ci)[>Gu[&Xn8[E'^s49X[Lq'DmHƵmZ[xHz HOvޯQE,Y^cH ?|L{0/=›XC@D{X|òs`} v[vwp+eχ =>YF|\Fw.3gsu[a#U7.c=Rvs3relzY=>96<0,eB?wK֣U80 =}T.ϛܽ}CҺ}9kVrP%\#t^nP%J cT5BHCw ~~rpK$jK"T$м |Sm}T,[MN΁8r=W* LсM>F ˊD~> R&EN ^  ӭH'uMm]὇wߌWHxL2"C9˛U}eyQh ]_gyu_!O'Jy?M'ۡٹbqm Dnw y0+j°_2Ph~ZUƓWJ qMg'+(W+jEsQ} (P梽.i}<,+k^rt䪍!]w(iA[U\;vl'(;W"r>vpU,؊qk֏cț "ߐ [ՅKutB+uV+W =w{.[Dr@_~'(/"_^g(C⟷ )[8jn+_!Ay|1؁ ^jWNh7I?If H"ĊV{Q(4qV[ZQǸPvm ݵ)׆D7!i,Yu%⧺D͏1C.sQ[A7?#ñB1L Escg^#Ñk}Oj !Ř32cu;o'$ XoCSHac&@ʻH Ѫ fP[<8j?Z@^ӿ[p9Ĭ\]AG8~!Zܐ[=;G"jA ӫԄcw+1FYw_w㷰V{p pmLXVL ݶ[4n*} С?Byb%Ù/EىZ+m2`I6( eSt@=Fmtm3Bc]hMh+=C)Wq]]ې6.g@ڃY݄sOomuvYt m/uW9I1"t"I`>YAok;3?727/ٖ=o޲2;f8DxG?Oej鏥ЯִZqo0u%cیrH В|mpe^`q]QtR- n^GSn}X2E}'3pWAsP|ozH1k=ɫ YG0oi%Sj ѿC0bz Sc!w`,m#U<>ù=a%IzvWe;FUa= ag3{$>bB?Qa]xջ]]v$X½e[m[Vl(H5^ڥ.U$N6QA> $1-QE#E(ֻ Qq)Bu/BgU$'͙F%a=p~TqޭOy}O9<ywӹ[Ě'~*.I^3%,8XήF~B9.0;r-FGex4Y)5 B>Wι֫d2| QtRDuj5 ^'uWmim74(o+3+`_(2wa) ns.['WUpÙCHD"KX1Q9SwCШ{.> u~\*kiPʭx"A;Vts z5c&YE(Lɡ1*Nsb{}a:hBb*c}1ZLQ Hw8SvRG}~|XO$tp]i+{حUv-YsoZVqE0䅸m}]R>NYpëY6-\W$)JF~Nw>6[Nju?;}ܸG42uesEkߡוb5ၰ MkHNzWL4/Ŷ(I25Ns Ц! D<ɴ"YSjѨ",1rVA&_<5Ļel7tydߏY~X1]yQCMbuPu>U(p[;F-uJ=_:$~n(Y;JEtM,+I?c% D֢H]%z:=gNpE#nJN@q? el=?1?/"O@*¥M ,7n( Jś!St!_ aw%;\rt3τ^ xw!TR/3^ƛYr\ƣLnz%,Vyޏo~GV޶{PQؿeQ;Pr$c}=Gb*azBM IgUy{5 ꚮ7¹F^)Sho2QxAI6'Un8;uWN2 4l݀wOXP 4!y'ykHp(ۺ0;j?bwl*LҘnsA_('̀$@IAhЭB%cX.֒*S if] ;!)r%&ʅweB,z<QP <*&V<8SB39W‹x%YS!,3QSVD jhqppdоJm n-Y/buBm64.kTC} !6,Hͽ56m=6c[Oz4U_d2ASܸ2`I=ʔ%x$4jF߁Bp#kB#̈{s#޶}ߚ"xtwލ.Q>de!ٞ#)(zp@ruO#3Bi蒢Hy=(N!jn o{4نaax' ;9cK V;DQ6E@kPEt?Ō H 2:@$D&ge-(#.6/^EuÀw#UX✑]x7RKPׇtmDP#da~[!͇6>EKwX)TAմ PK?7DY(LrNTYۀ:.vŮw9?(+TĻewcZݥ;qc&%N$m'- TG@Uçoy6mW2㝓oA0W#ҟH4=u:jm]bnlaڝ-GiQ!B2e᱈=U ,8IG] uIĖ\jٓ)}6e^![GM bq{g$ǀXBRЏ.Do)Rz.sN 69Ie)rW7Ics=Wpq1vg9gT`a.8%)kSygd= n߳Mk6s` Os~J0^=|>@mx骋hiC XϺ Y f&+dlϑpy9GʹGƄ4cL(*3Mџ2Lz`vK'1W0/,>ABn:+'S8*pS~Nf)yuu%  w\ e~Xa9RgIhKI{爂%|Wӡpq6<;ʞ)}~Ʒ*=+2eدCmb.tܑOB oy?%=z=GxVH>`r>8k˔P[~CY_^LO$ H~.oxqkOI#/0 ݸxz/@4CW+eʎ7M24ڿ#Vyʬ!XVnW7K>YasTziuɘ<%.`{|{fX82v՘yuԞw,۷C;{8&M=NUO=yO'|{ B3rf@u+cCDuZ,ܒ @& JЬ b  @8ݮVDL@$@$@$@$@$ВPIjIͺ JJRHHHHH% ڬk$U6QJ DWI# Mm.%!Gܿr93>Zt}?:wߍ6={xZɜ)kFx# HhfI?B}$=;ZQҳ %q'mk MV]Aʸ=|Nz0Mk l[-Xd=EzuͧmP9>u).jY3Ǎ.69 @S$4"iHn?]Yk6 @ikрqΙ1aׇS&(=;R۲_2 m AzUYݶVbgsQ`{BM#<鍯mЁ 8ܒ?zt%nK?y E߼ڐ 4u6zׁ'Gӧs}Ql ,Hgٖ$zn Usl_(&feljulɦ'v<9<OEeC$bPIj1M͊@ P`- qadLʩ ŭ@z(/͎΁;V>Ӻ3'xaTW>][_kieTJBPܠ4;R<'oĤИPӮa\{O}H;%477l\`Χ5t'L;X,^>p=FmWEV!]W\6s2g*66!:o(;{J8}I;,3Բͻd$yS"%ǃ2S>Vvks$/5Q!#xO?KE6kүUZkci\C9'@@c5jwPboц'XH6Ekߍv*?su9)ƧDYKrz}L>5}ӯ:@ڿZW/;<^ps<& d!e H"QPOگߣ*bGV,WO6kppGA$JkX1Z.KmsճPރWq+PxiWZka,AePwi"/ٽgʦ>0kܣj,6 /?u6\?,x{=Yh;F7k^к>4om@UgB.K1 ɀ @%)I"JaN 2|a9Z( b_ÂcW3cO g! شA(#Nxi!p#(R7ZYnN苔CS!x//Í (ILɟop^)m>33ł$YwRsbtP?`8)MyC߰<ˬڛ7Pt*#nEd~GݔeʟáQKݴhX2Uc궰FE_ޏڱTFV߃ 2si݅4d)Þ'({|=Ư9O,>Vnls f2{rs :n~n:nIH`{\@$@ID@=,[ !=\6% ^ űh7rlt%FxJv}'0 FO_߲= QvKĚQ*:EvO#k,_oֶo!v^xCqD}a(!/zu4>^@uj7.~T)wjңPn#޼G sܮsȹNmWvMiKx ]`1{{!mn5W9eIN;䇳"YxD>+_%Vb*4+I-ۂ⩿4' d @KR2@$PA2(.+"j؁ $=P ! /نhQ*/;~3 5߲X^P߲AmlK( (nP"[2{/BEʈ-nIh&P)_Ӄ:Nʮ?p ]=S\%U=Q‹aA{vB:KJò* ե޶v M+z4RMAۖӶޯl[,hU[<)jQE|NץX ŭA;-XyaSvҁ+KXwap)`A]`9nkrFÔHMX/ t܆o4OG%&?^C$@ے@mycދH#}MwJmGIA%DݑY!Jظ}hw'F@(C<ԯlEe5mpd5SУx*3UWR`&rK01IqWL{QRMT>=toMk%. Mb W̵u(!-Vtظ*~IAJʬگ|e}MUUPqZX۠>TPuGS%7(iMQb64򵛆g*֎vs}4|ީuM7(ur9϶G} 'Y5e\iojl[OrqpHgCqPa<9Wn'㰈uz=YM&& "@KRR5 C$ ,y[| ʧ*ˊk8v4%g:'2KpbHm Ӗ/gan]a8|oOoY~ZؾF /wMZ3ڨU}L;9}pԕ̏C7,s](b؊017P߲V>[Q23Ud/+\۲GLT?p` o BWhJItj1,hu)C]L ݺ*#Gw F@az.MP[Y&N #l|wu3"D|}v>i5f)N6^yӗEr (V+N%U$ɤz LDeϥ% %@KR6 F-iOu*jaZ旘 t;kTi[_!Խ,ZKȿ1d9g‚ő%Q˔P=0PfXe=zӖ]2J7rk[pRe: Eq\;iSpiEg7m >#?& w;Y @qZdS|r]@kĒHim@߄oW%_PnPxe~-C Ԝjvqvϖ.~A^Nu_Cӈfhh@hex SֶKWYRs HI?30kT~_@fo+]]~yPF⯮cPcX2%ԍHHH \?vlx$4R2ju$w_Z.^ťsopo~8[fJ6ooU66_FJDͧ>jǤе.fDw0TΔ@%{VrW+ԧ=eQCݥyY(tU^;vl'8Rv]OC :KYuϨQZT]ɇa 7Dv\uy44>H~v E0|GdBe    h6DI%-vͦv 4L{kܙ+ 4Ij̕HHZ4P呂"{:\ܢa$@M&d,0 $78ژ\;PrRrג#h$5eHJ@+ZzfHs+mmҹ,:khf @Ѝ}Yd}pԊQo[qv⾼ @'`+Qzjgޞ3fNc֪єPԂHuJ7*[ZY+$-M75f7 @!%--ڶ2VK[<#PD״Q` t**Kk1Ҍ'g+HHHHHZk{:öw* 1C$@•RjmmDy @FV8KAbm[s$/QI>e }V*Y9#$2E$@$@$@$@$@B`rEOihXFrIxDI%Iٶ~κS0uHHHHH$}yPANO;#' gLS.WRAJUA$@$@$@$@$PGt?ojnRe5O$^' X(iln<ս/OW<͜U.nɉ $(wH1it+%GArvZvZ9uMkXKB=Ai|ě&hDP:lvsLy ɹ$g{6͓$@$@Gc˜IHm9na^a ejºa{w(>!Ug+G TR^imR_W,i{Ǟk1ְ֜qlD{E/|-)@-V3 6M?@IDATy6EMS.'!}5?gzx}  !@%q2W :~"1{AWQnж#q 񺊥%*DUY%2N&U)-m+IqBz(%x`jLg*wfiezyNZKa%?ӭ̪>pƅX2ME]J* tSqz0j疐zlə{$@$@AJRcPe$@$Nܻ[j,7?#[s0 ,F۽ӝ7y"a"8XQإd U(,hݽƍcpqN;J]=R;U\7$R94t/DYJ 㥮=*5w+)|X~2'IHKh`bgn$@$@!0dȐ2=K-]}medϐ`tNf ؇Sī7/ϱ3vɭWP>y }#cϱ@bԜb58uqvϘbx rvB3P"2Ѱ} ӹn:6V=> @`@a߶Prh e  H,o]BWa*X SVGrw9~@2m/~❿9S,8rj\KkR a\, `M%mv";N .M7m4oZE'뷪cv&Sy}7eceX+\GlxL$@͙@R'R 6F$@$eJRFvN   d%풵uY.     BJvΛ $+zK֖aHH wO¢H$@$@$0T @'05J%kH$@$ p]K? @%T* @K'@%֟HHHHH*IpHHHHH{O$@$@$@$@$PJ8x@$@$@$@$@$ PIj='    DJR%<     hdɾmkV]KkL[˔mߚ3fNKa'ȶ1C(xjA:#-ꦵۥZۧ@YŊ#k݆"{cQa۶6 ciFh_Y@?m6At}R7]FM"IB $P*y4IsjM%) {X}_@G;i.|(I CE-Ǿg7=TZj-Kw ?,_T cYqOL $}ԸS-ی@I$ǣB>ia6I N)l\90|Z}+im&%  =vT :ׯշH ?3o-XF#w@ş(KM-uѷzvf7&dy2-uu}% |؎L۱8 5Ӻ#F>M{<3sf|ltYT׮zvl[4:uhܿzFb͆Ayt7g|j)7WOWl&֊,n'}A/c=aW3?U&`,n%P靲zݩȏ5S>gmim#&B?Iu<CłپuJ:]=i&NK^rͧ&I{A' Hldn.O6SQ.wP֯SDYYm*u|-m &j)v;d췲4e­{}ǎ(5 Iy2Ŏ0A@;`M= "Ϥԗ @2({tT"̗Le,OkSsME>M9#^<O@]re?kxIk%rAO.|ϲn=EJw@IF}'2S/o*i2vHbF' [/~±Mv%#5)3٬sN $DI֭C]">I~â@uw|HO4}2˧ɨ$0PY|7 x9sr_.:jM-/g?/ $y.]^qJ$OD$SD Q.uM%m˧-=*e|(VpQxSד[*m;*d7ݙ 4}ҶcSK|4F3Oh$;Ed?"YIOݶOV4)$Cvi?)۵ne~/(SāC2\FZ)e $}6|Qd{-Oϰ8$P4}L{J]>M6%ISֱ}Z]a^ޱC wB)da\eҩRN)%|^RQ?<+jsIir@ꗸ}Z]"@;VJV٠V>n m(RϚ 7='|p'Q}VBQ!!SOLA:P]j Po!k^U8KڹVk>1Mw[G"/_/PW]4H?O6mI&8^^VRέi\!sԜ~Sk*âeї>Rr + kԚurQO<[ >g,}W]q1нwT~9gEqQ#W{Wg|VTeb4k7W?ZWզD2lkeM&ċAd$G;`o7Qӯ~uի3Qn(T;S':}dOT7dɟVvݢn^cj咘Tozs܄uפ-Ĕvrhw\-1[/A{V޼$DGZvz9ꕏR\x{3}#/Q)dzEU*b?WIw '۸gnguEEjԳ|ܣJFɹc/6nxr/'.ƊOXLᜤP.YN?fڧo %wUWI*:B a+?>!'jmղU1ܷGuƱT]_+ϿSE]~Q9?/~UJ O.wAP{M=l(V;*[>5ށB*#ܥz~ygTuOZr %vCbòUkM VgQ>Eꔗ de^V&o*1 }v;9An]pa͏:#BG3 }bh~SWgN=wgnPtuG*Uh&UoVRa[I2.^xKGʽugKSSKm**QO2C-XJY{{h͂K֩γv1;#˟>oyw.;j)oܯ&ۻD ImMz՚~w7^Q~$2LbD;hҴrS‰NWi)sRbLM;KWR]wzµ>n5%pܱU@-YVV/sǠ.2 (W*fDddMe.{$H>MvnRΧ;i/vN_-eY<('Loڬ轳vWȓokuɧ2je}D n:b_/ gg]wA{bk5.uINT=Y{ل(I Kn ԭIPKXdՕgzA]F$~N0ӟ/~Q$Hęw|o{hqI͗" yIw*>[N /ztJ'N| BLhh(]wCmr8e+/g>f&Ee7g;99 GRi˦1ƹV^b^ a.(wUI]5+d>s=% RϾg0EDĕDLgP VsO8HǏuJ 5J^^L:{uSܗ 2(~ɧ 8vjoGݔ5IbykSviֆOq¿tĄ^;XW[($bF#)emČA[ଗHYZt'<. m}`ބF/ӄe4)pA~TZjRܴ ܺzB97Lry+UK,Z߯qVo`ZȢ皜Q/Y*p{ bgPBܷɨXA q" ]!XɔL+e2uc;󷔬)m-[{;7(Q< ӊ'A{~'L4{.)Ͽwz;d] 2X#S-)|3mWh@픡lOv68 ,@ݜ)g15ŘNj9͔?Y"!{uiỹ`!7zOC 2kL%+gc&Jk`V<Ԭ<jW]XӈE}{vuŦՒk0*u3tSo&qFc#e.xމ5S*+ӈKŽ_l>;N4ĕJ9+ӋA/YB*YxOG9uwbMKNMA^4'/zY' ܰS k9^+U+Xn=˦Ay^mK ɶn~w S F䆷?kT)4WO_[k %I :wT`d$N=j5J<9^6R~~s"PooUφk>Q;JSZذxX:n rf3}ŏ<ݣ:TfYl O:psàBlsd i ItZ(7/2wt1rѐe[^ҖLR]L hˏ\at[ֿuYg{)lSq긽H'ʄW,tm}B^ݠ$)5 SdGs*Hʷd.}'_(U!dA7Np*~ZKjzDF! r2Q&K;Od>A4L %-4w:^<0u>&>sZEu>QoY, X%ﳳ+_dSxXv@˱aX$$oq?mZ@Lz$2 ~D>QRv}/:Dxdv ~BGF˒<籃b{otl>Ugqr Y)xū/u 5F))nƺj0}wީРOՈ{QWf'j:F]AG_^q:{9N ǎ|9!# ;x]JTa,g]k;[wk1R4*{V Wq2Ύ92ױ]٨`q+ey/waY GϾ}{ɡp^RR-j:@I#<%ASdTY^heԕg8X΁ܻ;gl=_Oe^gVeJr% ;M;e=^ V< `Ol .c^-2G@R~pOE>uݗ#dyH>iXne;{c4F,V"w톜-s4Y"Rμ})[Yln~}+, sPdM++R>5>wWo JhT|*8H~cɧO͒}`\ՕQi{{7;$@Ra$l%@BR t\0۸[]i4tGc4Ui޵ޛW=]Q[ , p$>F;U, E4|(;ہGyfXC U,XD( zUc~A7CΚаc@~|?;pF-j"V-| o߂ߟ`@!N2~Z} `A 9"6]܅彭e٪Mض_Μ3|в,$6(eAXz}<^ʇ;B)-ʓ2F՛___'3&Kq~~ @Z j3,rɒr֜2y\P(2/\V{-S]dFv:̷[,$0AT'ZX).3&Ʌgy۲Oveʴ9fOH`Y] `A Z4IтU@[Ff )Ş~gۤw0|а,X AO3 z=ݠ'Nj `A@?L'5rY}5F;#o|,5-jDw4_h^mxxa -$qRzgnYau@hJjCS[%Iz"yǕ'.Ofdeg͕KϚ#>u3$:L|++7՛w#˻3&+7l+rUZGnl.c>6c #kL YƻGY~86z2L(!I·U29IH feIc-X4_X:Wl6.x)$2kD'\X^܀nir_)GkZޕj}kH{ ii'&X(`-YY_j O]?pZ9?#59`J W,̧2cBd a+VB5BBY=4f^h,XLtb(6Ii_a+rd`5HakYjj}vVv?!T&h:`ډ0˝'BD %|h:o*cbj)?D&d=.wġ@]Ad[ܵC{Qj!P>K@@"=F5kʾ6rR.sa1xmzڢVh:3 :E3wKɠhZCNP|(O[0UΙ? + 55FW=vMnzXK!fCOVQO-g ૑tBJ ީC^zw2'?2,U˖d>!MM*>n(\`pԠ筎*ccf&ihY-a >xBSO)UΔX)͐.\,g)ǔoz'__LAqYy{c-A Z<Uh>0I 4Oni$32.Β#ͲMꊲS7J)_~S:HvfY 5ϗ쬨 Ky49'KzL!I3|SZHlOa2S ],Vax-k6K c{^z#ubR?>).'iʟ(,ԑgڇeh/7:oA|n?75c!|2] TPyL DiH9۳$[2Dm>aIS*ᵵe۾ "*!-æjhKI Eڕ!p'YRcn}]Xw>4S~r)5'Z?P@{׀ q4U'o7/oI#:JBRmL|ƻGFx)ғшܲPZfx.lWԙ?|a,T CSC3#/mʣ')2*p=^z}s!"(i'ʘW\d|D̃E g>a_}P@So {p,G/~Gj`]V+,A)peC<;Ms̛ۑShD6d:.Q?p͘ F.{p1ڟx' ծ࿼h=qT>[q0@/ƭ v pq-zI_'䑸$ _;K/W)eE6S]9wޗUViKC뾡^94~\Z;XYzRk#&9UR(CD/k==IF[^\}<.kι}r$'~{P|d^o&OXR ,#JHS"S5򧗯7U }3WxK@'Sc#C?Co'=$6I.W68j\Uv"NXw1X-?$"۱J6SfA76]27Ȭ"4BSEBiwȚ-?]Gz9ܿa'3>-/\!`e˧|J^sچPq:~nC#HZ\\Cqx3;eQT<r{fOCT#FBӷ)7#%; -Ht]F{K7V-/R;IB*be ˷/O .0:eq$p"Y(AHEm"ÍOB#"or|9-mc'gv)-en2A9xDsQr iD_OH/x q>d hY[wgdGlގdL;wdAaFFPsPALPTu&}fˣ3giZk俖.scZqN~4}̩ͨ3`/["~L I|1įq p{R۰!*Q|CraZ &aȺɛU.^-+! @f Fr&TH{pm9@-LtmމJ;A+iojC?EliD+~+yI+Gpɘ"MYjaF.7Btw\%/'$^/rLx __j  Jy`rkvǧNPV$*$?lzӃPDܸ_fB|D;fzLHbST0A@`cJ5snQs\u42jn48L">4KK _-Ur(ߐION(<1$℃1"ZUʔKwY>9/UgG4?Q>IfX6Q?rʉM}SY \A'cU(\!Y. mnO5|i)xjCcA^yij;,s'wc1iC z9149?($s̴POS *MЈ8E:%)Ӑh6U-(ΘMdn|<)sS!|Zv3HZej)@GDM.\PQ$l#[e;7?z,**J`~h[0Deeʵg/az(+=*7  ZK` j:otμ/~62$-رC`=2ҟQH`_ CUL;7U:'ICȹ3`mbPȡ+_s9g=&vJ#Ϝ9E{\n?'ޅAZo `L(g{ B5rWw/2CJ4+EdAĺ)i֒BivJ;|\*ňPb"J9~"A:녅! Iq |Aʊ!}Aj90@`)9 =Mh_[qb՟x$y"p -@N8JKX9;h'|,v ĺn{5*̰la=>1uM;ˮVŽrjUt;kgn@us10KN`^!鶓|}>9٦2Рښ䤁M&Il}<j-қ";NfҐV~8 eLp T6pIM.84; ˆSڛ0S^|ũ'L k7*_ȘMA?ܭN}zȽn;)Ex9&/cGa}`AgΒ+}z&~v\eI1Y4v'F x ҰWΘ`Krht`ީ>Ԙ `d@(k33A0?BYyTg Hv>ޅqe8p- ̵Ǹf-gP~" G.|󓻕C:'fOΚ}\Wcei?K~;TM©9se}%V{:G?N?z&qf^.B`|ֿ.)+]MK,H/S o;.}\23%B,9|.KːktM/?fPcJcT!Ւ-v$8!w+SqdW?ס5- "`݃Knqwt \Иj @jAa܇x?iRV##Q~KTtU탹nhxocKHZcQiNn'Vzڪ.[!3V0`bR׈~T#e%O|W^!NǕ%g)'(Of܀`㔍|z]]#@йYk6A |F.uAwK%s&L?9cg@u~W>z/d! 6w/ $';%3cz㤾S0@>\hBRWm- 4z],-+mYxYvUE|>BUGO[J3/W\ H rڪ`ڃ4/!Ijgb,7!);0; oQY0KEw7l鹹-^[)_3] w;*. PNv"?I0\ʂºnA l Uk&fMU; __" <`(MBA4ú% 0mx΍ȹaWL7 ffƞn.rYvקȉJvV# G8S~I$~T*5t HKu<$!.m!iXEY/gLANIyw-~^Xfh9aLO6'L7J{#ǚzs!ΐ.#99L E`$,i0+|L.?_aX߄ih_YGfzYHTmGss߆ kRdΑ|X7ʑ _JU,$"4MQ /vLsjo=Rͅohqa}iǪw 箹Sɏ[twEˣɗgd( bĶn)HJ>yYr笩ߓ}iL|x$[J`~0Sf-\y=03?WEQQv?#?Mj<&f__;}ҒpVG1 ̯17Z%4X@h>[c] -*e4IJHqNnbˇt);<^Dh1Jfc*b#;]=M\-[ukeL|›q9> !]fM|/㝇)-Ӗ}O`;Ea"y7?-BhNwZeAN6$\ID!$%N;$dCT#3d|xE( \yYp*/5;3P?1,q1|\Xx~ ;2KƕJh)H;PM5(ɴCT#O5(NX,w  RGZ*_ى0 е+fW9%[$%ʳ&)Q0=|ݞ!=cudh0V}=`3]ph$gƧ?Ai8p8_V&oos6Wo] Oן !K°tuCJc~yk%~ӞG…ߐ{nZ Hek.n˕^Zn=A#R/Vdhx) *wj@`B$:WMSұLM ErlU- mniLCYa rZ8 AwfwO$` pkk.ƞYuT}H?ȿk.;O@+]9 kkT a4&O4%΍'^ע~@dT.֭7-/4vO!D2.og˅GG Q iCDyWCT8ޕ< !iWXsgQSj/#?'O dg$&3T}ncҕ !HZFOF'/a/tg";{ǩLx$%Eߔ G 7S I>mt$o<n_#u'T~wݖS9oRX#PH\,^E׮|muH4EzB7 -aTMk#JTW֓F)HAx~Z"]&b2lÅIwOsHvưc^Hb$|Ԣ 7# /F>U=ֱ.đP?KEҏM?m>q#ѻ_IJscrDzK}csS+//5J{'SʺpDn߅- 4s dgcdB8 HCk絀zۗJ=U !I1[akQAjO4!iJ2g&]$[@mfIGrs-I=i^0߇||$nIFrjQX IdvuYRE Ov{W9CC>| /d\TQ?V K6i/?W6 $?GRQH%t$x?C4e,_05^x[l%^h)~.P[9ީl?٧z]=BkbJKN;grjWow盘0X'-(No\z_"~z:xKZʰʗoy_XPߓ3g!Kf^9ZV A9Yt7 (g cr7F~Z7NT>yE<"MͧF{:&M3^H0lVd]b5>E*Tvf9rmZ%_? 7FsޣIrG^Ÿ0*^[Ok|y ԴJ+$梹K6DY=֩)Egw>ج[MECeّk8L-0F[eoemQ{\7D(%YHPtL }~ pk`JȼP>c̆}GVmH]WL͒K$jߌh]1ic4+]gz{uǗ.{oxaNswtΣwV܌onc>kΣ}ٖ| в\ɻʛ}`gZ;Z)!w ~vB>ϓD"FuuZ^+eUNI:&3 &'U>ܬJI3Rd?B?E22)ʛe43ͬIIkt;=ࡱ! ^0Mϟ; q;Gla=EԳ^- -$GpujtoAkMDw?Y-6? OyoJ^xr潏 abX23W{4&wҀHcxy"T}t[=Ir':pGrsf,ϲp岪; &pם,/^ǒO*8IJs $YEwޓMr *zWlgU])Ι??9}B鱥x.EL,?3?*h %?gҀ>XJX KNFrԽip6wvQa+?C I0d i#XbIz`z9JAgZ&vM\h")bnώǴõA5vQlwNOx#Mf.> RYDp3áTܗP }CIN|v{߹YuCǩr4\Nܢ@VyvL <5~k `ŅW;4K̙{՞:gOC^c-}4Nq*I 0x<!z^5|NՕ`\! ç3bh"S%g*6TngJTuq%e2k&%'Ki\2BSMTW[bww+Segs1Mv@kD ZHo,d"}dhBO ! &I0KJʋUBc#Ez -!2)/%1Wd>2c>fX/Ξ:[%4htc`LbZMF:wI3iǕ+3^uu'o^C6@GII=uy'x ؛'ljhKV1A%J|yQQ>r9`i )GH^gcZH23 F63ŗn]Pj9 7>1`e'#FMx_nwa|}"EoorJ>i6}mB)2h!劣Ee sY)QHʱyeVI RR9S_q!#i;`nWS 8&/M1hB)P^F@F;OV[Rx?l'd{7nx&'ݽ͍Ͳ3W^yq 翄o.X*=p8`O;$?=Lu;vKW}BBI$P0E*Pp&WϯG0<Ѿ$r xz3cv>RFS>n'9Hm̨i> V2{2M]R)Ev>K% Q;=T gL#*$mv"KZќYv dkc @O K gLwGe~bǶ;xu"]+3_87y3x=Cw ?hCSTX iû(:ޣZ$kzd[B =ڳvA8? YTz K :Z@78Tu}ll0tL~pj(GZ0v}λ:ܝI`܇(v?k߸ӁY=][s#]4 CNRE*Ph߇@ͻ[  W~ᆷI|GT[.^ldۤZijNlgGo BQ)J%I:sE,Bc ZqwI0)lnOG*ש4c[Tcc3 tO@.nw 7St?pR 3si22~[Bד^UC;ÝOsr֔)t-F.iMG$&d#kq˕\n8΄0]-6` wLyy+0'#{Ϟ*kn ʔS{i}ZKRh+ ]ݛWwyQv|+!V }M}!K/(yW}=Ѕ}ṆBStE>Gz1T`FC)Ǵ&7'VHFZtt-JZ;#|e~ Áf%&Ҕahiiܟ֠a81$O{Ke~twOaoh`2"W1l3`)jg`(@GoRAXrㄈn0$9}lL^Q H>4$ <`ixZqdtJuG݉k5Y]Pʤ3Ԟ o#URR ZW/]d{ˤgtOdIH%+m*G*o~D;aã1 Kh*1|{N?XzHnWoBn0gخ|h8;4uRYذذCR:eGbh8/`ffb1:5KlHԀNIz @ߑ&\7(XPb PuZiES9 &<ëim.5vʤNGyO;̀C/T唊ntɅR% *ɠ)ra?Dv.YY+n#$#Lp?]! fD2Nq-1(';:¢)k"~L I4\igh_2əްtz酏-RRs,qa;aāyk*x',iRENM-Q?͢oڈ E{c5 $04,KOR<8\LN6KpB0jwҌ$'v &^O:W d:3X D ΟNiùƦJuq”JyzsA F:2PŖ%Ԭ߄ M)! #-?$~=hɘUB1eOe ltw!կPؘ|Aaa2]:d"H}XAaV5݋gسp?);"j١ɃB )XA.0YAF)n)Ju 'AL@OlvpGi1-jTgH H+J;HS io%Zc*m6=+LfyI]Q\!ӔX3qoBռnWW5(k*M4M(DbNltQs!=_24]drA"W!7&&ҙ<": ƦPSr)'R, v:;zx-s;&~FbaA3nN)BWiCTNLL\tdnsa[|Ngao2Y*k)y94CoV r=!Œn!9& $24+J8y C/4526yAOtg@|*doynH%]o\%CS|~K,MSxY0)uZ AOSHYkhDd#$R$Ѯ$Y|IjAkn]hW#%/6gB8Uo>h,So‰/ADSC4)-hߡ]Oϣᑦ)Ѣ'C!*ȨP=A]߃smAObg0OƪWL.UyX.GG>4iB^LK e FXW"0HK[\c<{0P=Ц Yx¹{`+#h+ߚBwwzmtBhʩfyI ? +?HU&d$H)YR HMWGYz0S5`bCK Tc&+mWІl0A=Y-UtJ|9jRW2Jm^巭T!vgcsgfdPJ"Bzt; -UO ֆh΁@|)5Ɂ}tg E:~h^P':@WH[`3^VXž&{߇_ j "Uѫ$%Z@xQ((M&= Yq̟9KoOOu' Lm&Q$9UdM*ӗ$kRSy=`B[°: 4/^[ 3hAK{ G³LytDa;# ;&- {Ql#a,KS`pҦ@Bf$ʈFAW%%3CMBF# "W;e"[gOL'ƚSZz2iwV((؁D,Od`E9raڢ%QH-k*v824P R{un/'ژkztt_Ѡ'_ƀOvtnNf6~G({1,/N1c(L|b0Sb|QqdiD0hhJoO2ӣ6Gb-*c e}9҉ڡF{;r ŵ;UVA˅BD jߵpdg'-fp (آc e,@4_{|@xQa)'qi< IޚcG-*'S6* :) voCTbwYVQ+.$CBS!b%,+aF@G ^Z@o@@f04F,13(fHBXq.C bJXŠ20ۃ=r/dj{ۤB0I@b׌oQ=Ш\U6C`221F?"X%cuLq`kzr 8[-p5z;h 瓔Zԃ5S85 b 9 ͈`>I|b0r7{ JfO /(j]/hv߬iу$w2m%͢ہSCE˃ $B" ¸)=0E0 #%4fNHi !7+ڂ6kDZiL +MS:x-QO-]ޘє'!U>^xĶf,Zܸ`!)A s$ɸa iV& _sG4ùǕ3 ߁P92ȃS+C2J>A2com  8z=nwێ?8+Mԟmvs嵭hwn$ ȈwK9&Tv2shv%FÉ{SC檧B2zO `f55x?E sZb۸Qfp7rHpe%)62 2aF~͐S!=@wكko֑mnSPc  l!'|Xx`ccܛ l ՘Oh>1PA >יi iK?M9m&I3}V[~K ~ˤ\B@MSa0-JVP6{-ƈtD GHS{FNEMS4h5]Ƙ9ۢhDHV&:'7_i3OB0wW뎤;Z#3&qB;Ƒ𑉤=#[ ;,HIY4c _фT:+Y $jO ߎǓ&Z|fv" #0xLp2EQ;A77 Xm0a  db76Xupz 4#DeQ0V\UV~O|!|~-麹7OLPB1rou}԰sX hx۪ϓdeɉQoXoۢ= f D24 ²Gu,u< AlܒFtw~~}fh ug)186?8[/5nXUWi0נ+4m`o¬D{1H|hY7̦C1)i0x< : ӸIZVg>>(]7$E!_#$ ۈZ+V&g^]Lz0?]$\5S~e\ɢi}t.:iq"*z[-ԇLi!H[3>6͸ ω#wxpWj GʱO1w_AE|2֥dh '3 Vaŗ{>]H߀aOK0u1$6ܳ͑Ғ@d-IRgH)MOnw^x$=7ZBŔ,FMwe;ZKʆ)oN)V&5?j1߷yCe`sO ^+=HXEGQSe$"]hW7i!ώTOf|hc< :]6:RxgFΟƣ&cHZz>l \1҃QJTS@ѿ:o]3$Mk{Q*>},(,0`LposWW~fnHy9^;)5 F~p!ln qL1<זYgިG]]6s16̎ܛۥf8}.!me$c{^~2y,)+[#=4=ٲ݇;:G!h sz1h_YV"'a҃IKY{G-[dqt"63mwpޓsHqlojH4n#+>^h y=|h{<&fPBW͟7ٱx}qIGBF7^ q)ω.LTH%bM$m;% 6rER0 9۠^0\%CCIwl]q6-(œ&ɇwN+7F;?a#`;=`B]C9A1Vy_M SC+ˬgsgç%sSk$/茩`k0M TeDɡ;pFMO⍖|k1'| Kdc#I}RM#@?>s74֤)t20wh!Ǻ-V@4Sa{§+Djhʱ{##=@=D?nS#Ɠ&8L~(x/O4sɓO^snr|8@HMR抢‚a -i%MC|?1]GS؟X)_4Fٻ7y9)it%p᝝I܏4ބ$R<2ۡ>tpEyο=ZKMMZ02Ԣ!zʖ>jga!Aܮގ +zvw>‘l%g2 &ɲA[Xs~W~ύyNX=zmܫׁo\4F1ʘkO3T`a}ǔ?=WfFjH}6U¾F{L Sє~dӻ+G71l@ t8ΎGB$!i?v]B9$fp|şz=sPݽޖN/J8:k~B@IDATX^wwWFyl$ʱM|N;1Lo-Ͻ ]Dxlq;:gFǐ7GMco};EF#-!G==BKtdpfIXܸed*vDGr C@͠){v۾mş&s($uDzcۃ )MW߇Ä) -@DƋBRK1x2CsT ︓Ym`#MT+YF'JHZ7Yq$z2Vh A9&7s`Ƈk^jy݉8'V,DOt+OQH"t5O|̃iѯ|VW&0g_c/"lJ 5I QXg-w k?f!`%'exu8iHHKQOO\mmiP2O+V D|ݑW!@d\@M $UЂR"H*.hBGx6(QHҾI\4F?{j@ cf ĻqȉMOpԈP0 GZ@JT$r >ְåQWt۹>g5x y'UǍI?XiX%={e͝_߸o/5(Li`S[uW.5)%"o i!`  D'n]эEH" "W)ff Rh4\n93ь=A ٻ6l}$[zbw {gm6ml_ K_z 'W;ͩmI?Wy,KduhΝs3s=F(;5FN<\=tZEЉaC%AΒ^ᵲ;q[Xvċ׾#~xgbԩ BCKƵ^~_chS6B$"P}iOW2}$+?tLjH/ .CJ}w8c4ZZ3Jyp#BáIjbA3= }vW{`}LdXm!_g4eĸWym(,q-ʳ\}FO {Lg}|4jElc;j IFޖ$a} W|h=$? I}@锖CEn)"s"9z졦^U`dQcSh/o2xQ{C&H;cM߫H)[h$5N;k$>{>PyɵD@"pU.T|1$ح#]! [FCV$LBu/tw#F-֒f7i5[ o{Wj5EdౖeC%DH!ɽ%̓\hLmp62$4+ ƮGagPk<FxCKngI~@mKO_ۺFt8h4\" &W!G" H\\P" @HfMfTJ-^m^< hf8 sul6FU;P 笷tuKz}S" *RH ;/-xh،ʂ~|djq{|tG8tˇ\h|L<&!B_[v^oww4[.l^uuac$mS{-mD@" p-J^!*kH$^AHcaD= Ah ^gM^eAZW͠7Q -57濺:-2ZGFgFZ-+wJ$K$(+H$#p.őzH#~}O.\x#l#|Qn2hB_0uG&h֏{幻tIH^?v&iHfFC6OAC" H\\G" ح1{t'NK]F46Kt>zWˀ UrrۿhE8nF@W{ ]7L%_BNIdazD@" )$SYD@" @y.aà=1L0a>~@w?&R Hoʿ>}Mǽq;hLJ;?+H$nA@ InUV*Hz(֐x g'N[!P@e\_ IkԾ@Zoav[Oe>DH$w+H$@@OlqeڃO.9 ic#ww@-`6weJNȘooWZVڋQk-#CH$F@j܍_" xԑۜ ғ(1Y IJn׭nV-w=6earwQs;e ܒH$nA@jT"  yPA# r{2qG.1rfEÙr5(# ;@{X:_{0D6,@V{Ki+ D90 0r-H܄V" (@"_?d4?҅;;@Q{F~! @Աֻ^>,搦N[DtumC D@";H!wD" #5IK~{3[?ȸƾG𶻟GxE%4Q]ռG~RS] UC=a4 "͸5tc֚W̡` }D@"RHr?yD@"   F`g>K g*Ǥ? ~#Dya =aZe;k}D=H$*kH$=f(z U`"yvxq{$FHDhDmID@"zzLeDU$[z -m8ϔP">AH$A#vHA(Vɵm.ϒG zyðffg}8Nt8i,g6ju<+ÞBב^0—$ӱvK{}L  =bR.i5ZD9wOl`< < Hȗvcaul`\WQUX e?y{vcu-9Dߜwjfg0Yvut|ʳOq^+;gϕS7S_`7#vaMXp4 D'm27Z۵a|/(?5uٲ;\-  φw% TIg 慌M?EUj`N16QHtx盭jtضW؊"0|{A9>uMW?:>$#܃1q033 ӄPi\'ek{-.GS>M?]/+P=%&Zܧ !/ lMզ}GhNѩcgӢm%v!PD\+%:O ־O3GͿKpՅE¤U*,.i񩷄$E:',<8̢Q9i|\ IEsd9wԲʺOz~_?K,킪3sr<+ *j/gvM2GȍV\(R^O:9(,o>zOtba{XZm{<>3>RZnbp~HOL7Lw.A@~2~c|Q9) e*?p K\ՠbGyy@VKQ1l+}yh4,@Z؇@hzwzVB_SHWܾtIDT̯14u\MH Tds^wz퀼{Y-{dcM'[.FPXh]B|$'H# c$-Sc}a|!V` dR I'5Hdopf 1.H nEfC>6qU*dE{/L$7I!_%ql+Tx8#%X/ǧwPԓBYJZ we>H;.=ߋа0,Ku2}φCdj?RhQ2I>IyM\+_a[sHmozs<rs_6ʊ$ Oo5^ N}LXv`"a>gz: %wܲ3C&46IR;-ǧzPWB;ą'gfDȃ4(IGߓQ\@55/2'RFǻfgc#b;+0@)Bz\By>1>gG‚CB3C]o ":"@Zm.8{婾x 3>g7Ydd;gp:uL|1&8(w&I~+;uoO=uDXCuA)q,K!b"&,I$0|<yyEW"PtB.YFs%.p7:xlV1 I;[,ǧ٩xk| C:SJ:.",8xH I]Nuo9l۸aa!wdуn[Ww79.}ă'ʩ,V4RyuM>RD%6;s@H p.yl^I~c^\*`Ƨ]dL~Dr0?ߣrEC[+P2ml$5~8]>o*}vZi(ǪDcSfCLψ  #Pp܂ADw;+b^/>fIqti4m\ƾ}C hܰ u>۰ڴ~ yzYeEG19\7lzRm8USuf^O #z_w.C3򇹌;C& ɶO$AOK^`6s٩V;S-$\I@C犱˔>:>UCC(51NCgMiFǰ3>^6=BOZ;BChXF2}<»ǸLAA:J³' #ĆƧISBs&3󩶡Vo?@'.p#󥼣Zio}ɥp_~ސyQ3!9]?3c#E@2vMUcSAזo^B@lSLo~-]rdyMSUĂBw]9"۰0n»/dz _X;OΌDG?x#zୋbJ}nYrÉ$)H3_j0QwRuy/n+ˋ,ܥ%C薫#%;Ib^w/fLmU !5J0 f:Y^C7Eh;MCo|IhOAtDgw֢c⃹iw1Ri¨,gP3tEg 9>^v8IGo.76= v<Ù AI#i<~2_\.ڞ)̚kϟ)޵o6qкU SwLzM_B>MKMc0m.Ťhc>,ތZH\||#o0OmIy-}q, N /^6,f0{h W x L Gi\|lc?ג܋kK.≢ޝ7mVؼtx$sX4=l-@ہ7J]֋ J|Ƿͥz݄}GhKQҮ? 0Cg Gb $@p&ȍM7 HJs KJ{V:ZfzG0“-MO&vϽ *nli!e30gah&3UamXf54˟9SF=.}k!1v0߹!KltuX^xTai0 ;7~!jf/y0UK@ R\wq~ ; 1}-³ O:{,z0s~͘E3ua!zdIٗmijmgx~jpcW c(߉]5W~u  兔/fq[PMCH4$ A#@%g頻zؔǩe+hSL1h|`{)M¤boEfr^^efm03]alT5`?\K!|}%NFmo _D8ٮ"wE=kNғ(-)Ď5N~y|SkIAG& mKC34N 2 GEp#ТLP v`ke؟g_b>nk#  %(7`v:SlLavƦnJu m.)nm4vW,d8 p|L!i>;aF,H>ëET_~CYXc[]9N?6l>l7Gʛ;m4"&,GHG#DS ̢?Z+r'!$ԵVXhF禉Hzbb<8W֢3Q1lȁ1،Y0vҫѫN{pM-2M#a6ǓF-w% u~6&r9RO<0w7Բ'~9V 2 n?O5;k"p6x+'_PQm/7{Ha?5ƿx5?#̝>T -o$I8q PܰFB 6hY`k2oqt66qcϾM7"HBHZt^6g߽XaMsLm &GBܿi;,5oxrd4W:[{>]txk~^7Cﰫ]ۋ1>ޥ6Ž!! 8in7 D HR!X"`3h'Zsǹ(FP=Оp <Ֆ$>|pۭ3Gupf:z5PM,9" NMʦ\K$ޥVɝHs; @_D@"0Xl7yl"4Zh8%]FJF gR#-_#ȦW.FN4$cz\IN)+H$fdBnH$p6sWI=p[^\M#B̕jxe}@# ˦K$EᐹAH2,7$v" # ,&HD@ I&K$[$U, srwWnupbF@p%W66|,!H$" !$}4 &&3np#Dm'NWK- B#MRHRkD7補`ߨosq(wWQtD(ݼleW@~*DNzh7o>iTooÕ0[a͝5Ei/' L6_h+zVdB"Nȑr U#iW&R*"j4@K<]CO"ݰ$w/1s(ߟɼ/68 Wl#"焑tg8 +8&rt .Hc 1XIwL }~m!>\"!yqѴ`F>Y֡\?P#b{0vM{z |?or2 K]ɴO}GŎMђxV$%Υs6v͖"j 53QRr58v?r""y( ̂^N3;eCh./QGie-e#_cl^:ү^8Cdg4d\qlU|?-hLMW-NaijBy8gxf 1{ju*hHqB!-x?Rr]շ/3 L6ް08^@qHk!N'"Q;}t͢⽹ (Yb"Ff6Nk$"A)crΒ[SNGlnOm\;m|=&hz~8ca5ͭf%r#7;_m'NSLdMK;@2`/&ҒbRJB4, lO |/6DZz|D:[<=cj^alXF,wIuܵk䮫^H..n aX#p^9:=xb|]XgW-]znp6q?\KwΙܱZ0-.^z]zzmTUH(/6QL;8*؃f=ϛ:Bڸ簘:9!,zC[ a`p\\i Բ]DA\ÔE$I$A@F{* mwn<9R KJ Xz#pWBHH=BƧ[`t<165[TÃR6K.w%‡5)LZ =7ɮ@&pw6Ln\|l !. ,kO[ Ƨ{iPBB(35܆ȰPi?&٥әs&!PNhoYrč͙D}KbgkS.[9B5[c-F`\}̗JHRdAm5 QqHX/ hܰ Zu yL6NW<M+{?2PG6Qb4Q&T6 5zs?Fتyw Ox'D'.IF]3 bKCwC}`BI!~D0 t,Lp,1^Bl%{ƧitU26c?Ze^G@I@֘UԘ,pƧᨇM9z) =Q?L%z~ه޺X\o|gM=p@SUlr穁MF%H"I):g)70< N[ g9G}$*Z֘[(&*zZ%XhN<;LxPΓkD7ٮgPy E@r'R@2pwD&uGWuJBʧ#QȕI#'%1%; f(7&7L. !MA|`l#"X<(m5_o5\]Bz󦊈 Ff ?}C̢q=k.Ro|)|C|5 />CrNb (k!PB/o~I ;QNKx=૔DfPe]8v iϸ #h<*I" z>2ܗ@dDLol13$!p5|`\!WOٷǷᝯA}X8 zs6 ],+,㫶v##D3>D9P[nrsx0_я.Ձ Tż7Bҕ Llj8/ jׁ<!brWBa&e$](ruut(l9DzĤKq( ˪Qx"r\< ;M`_*ef?zΞ@HH<@uAe$3rs S>Hw0$\Oآ/jpS.dye\op1^د(<,X kcz趋(7>U;&u!IOL˵ߚ" Wc,t <ݡH>^e-!|<z-eD@"=zGv޻{ew`Z#I" -\9>q;ƚƙ|~pp XϳwBnO -^u%! #yky%2k2s.-H$@TD(&.kߘ۹nH_e" # B4PZ8s<->y˼ DEb>״J*6ۿr 6akBՌ!Rވ~QbE|#OZӯpp!}?\^L: 0jkc7Vaw풧GJu=? 2`^7p ĸ(4:qt6hdZ:;h5Br(.q;OU \oS}dUP]T0uwQ ]gLD­KYm;/{,ztcrg^GT??``CkzA 48]x :! p;o,|EFK{{._JB'h%Vz.j6{&Ƙ%x{ Z 5?]Eϝ~Z z6j QqV5Fp',Uń%FV594/<~wcYD@;-\tkZ0eSxhr8{(P,>G55WA`h 9Sp]ͫmg~NƝJSJIfhv , JwW_U>H`+;F794&z1 T,;/${ ɺ=g}n'dxO@Ra`~>\Ci`_\% ]z?hDoDGKo #i>Mp 9g(*.sxkpsashd5#qSyur["jx0ǃ6LlP :Y0@_WG{ >Hl^%\7A Irƍ|MUL3 SFrKΝ"Ƴ fk$a2S@Ԉc= AfjikǀH12FM100٫p#Σ#CEv*gp*cɢc͍$o' TFf~XOd>MVh[q1u_$I}`,Bǻ>Hl њ?tvFߚ9ӿM{"9'חG/~} jP$I܅Lwww -ђ jit.t[3i+|b8Z:} {ۏȂ!N#Y [")6 6J?mFYcgt׹_ fjud@חΝB| z|GBˣs?Iť/mS91/?I D!N}ޅUG+dt;6}Υs c6l΃7رm\aRi gclʋB^|I) Q'@Xғbg R!pefM9Q,AUbޘGGƹb GKn ;0Vn5 lyFUu<߽7dB^?S90e v)Orc1#DF15ӻ Hc@Z=U.7Y-tf|9cn?ϙD5^Lj;K4Qe1ua2ǮjϿk0ICZ0CE TELO[!`ސvkɔCATe=k|Γ=2 `xKSmzםr!#o=L%oin{SK9Ֆ_1Lzp,91u{Ӗ}]B N%~،پݘ7Ql|r-="c^`4;ŅJ?ϩ#şW?4`$[dkj<籩O^)+ǧ uoѳǑ@R\~nC"2| AMPiu >;,?ĝ 0pzc'UBKXH,-kDiUv%=^8Ҧҩ]G0X>͂r=n?3 !YߠˠQn #BWq)BCnkv,y+ ~riGe'u!!K%ROGQj5cᒜLZ?Rwom==}lnK}ӃFDE{GK jlò t5*tsHe]~7*bp h\IHoZ$)7RHꍇwYN$B23':م(\Z)xun7,)t>fpz'B]4.gsmcO.xxX4Rld]7?TRVm-4icoⲯid}=XN,`4 tշ Dy0<ĉl~ Ig1gˏE?%j3)15>8^B˫a71g]b z)QN8g:@?9@Ǜ馑y>:VFGO̧;cl;u]8))ϛjD}RHQ6B"Ѓ(&<P9#l-# x[1h<1mG("4.=/8اh?O=M_h@uƇDut)+y:VmʪvЇk1_NoDRNvϞM@WRr\,ύU=GCz^SH<\B&ǩz݄Y"8Æ}: N,Pygɾ~n=nGk1.|nwmMx!t8E""+*8À4m쯀%1D_^]"6̃/N H ةǐG釴􊵔8>Z>gvz hlv38ӻ`B!q$*"Lwtc3'D`NW$"Qzg̕tcEr&9v$-hCPI-4ͿB0G C׾]KH$ywyUO"d:me̡#мbx AHpr_sڡ+/<?}J[Gy=XR8o{ DL?": #6S4Emȩ}q:x WЗҍg vҡh:(8^);zm_(>ضs:mO !9/2S貼,ZQzBavq(+ FD_?/L!H]9,9z~6@$mwd δ_K%!pt5"MmC qtt^HlO_oًJ4RA x.5ȍJXl 줘PZ8s<->,+I"=qn=Ar&j}K)܂|CSDk0hqYX)/֨S%Ig9nA˟;RZ཯{\bI@0kMU3~ -ɠe#ga$ _'N&0s4.W܉裒$B.1*ދMq=q{>hOm$4O_43_OX@ ҫCkNŔ냤:Vg4¨m LN&tЪ-t풧m(˧[OQSijlKC)D;EU5T]]MM|EF.W&SebG;L>.e)Fr@`$DShDLMEP.͢KNQ?/:BPF$=B709=!2NT*u*C-f8r&k^|Q}dUP]T0ucbk7wQ |]g.9+ǧDB 3JJKNbioϥKo#_th!aDi @gF$7e᣹mkž·ci0º7gyD@"X$z~um&ZX$/>J{-)EtWQ+c-tP~sh4LixjRGK`T.j QqV5Fa,|Uń%FV5c|gӀXBmQHÌy^RS !1!"dqvyVi09=7- lјe0sݔuNzuWr nM(N|To\ENFeKAwfp Ԥ+,1)F eAɮ]CÓtdSCQ)N1X%, ~u#Lu**ji7eDIZdSG*xd =XyX-mW$e3L(j zrimmb]uE[rڰP]XtEF;auBʿɨ)t)5Q[JZCK u4?[)-.Oҁ3aFOw4$V|B+ɫH7WlY !N(w[C)6ﰫI ! ("G*4}iCƦfhjR'S]81j@`%NpW4ië7SoVL!䥡ki {ٻlTڱV{m־_^|cBQa%'u=t G% ?[e1dI6` J%N~R=WKnBxeДqyZ,s_hFrrJ wTh38oP4# +כc7v_a|bx F;0 99e>H=ęd\25/o&'ΩGfrb}6i Y&wY8| ^e5R RQR[?ID@vvv 돍D;Oipe#y(Ƨ!$~ zhd@g4@L >2 )YTʲj}!pY@D+viLbo115~X#8H<}ͯnBHUʺDIcs.F(rDz)賍 ;ESVtD= jhV zꈆ֧'Jbb\plQ׈u4@-8ITQ`.:g L=pPf:]]/?DGf}p 44,#J';UE;zPjB :i>72S 8&`mQHKL!ZO ,jmMt_XWtlOmf8u!D0aҡD @xQdś%Yh?SQ KC]dpƶX3|GD]/W+DʠSEuEbـ=(<>Mw;}TxCŴ+ʺ}بLIbsߞ@c͐~>LP~ÔicŌ#RI󦎡TcbRyM_&v#bbK ܩci[LJOEm0<^NIg@*k0dwVnmʵZОIiA~gq_uy]&%Ƌ:ԛB7MeО)2iT=HOO z +\B Hz:Rwh-ϩ#WxU|MD TA$$8-)1)rWXY?ǧjG63yֱ:0%`>儣,?p=80k`ىMo׾b뵷5[MZtCd?OnOmף3xu\$Ef8q~:7=X}BRS{?>:WwMA  Rؔy0+fsbwoy;?$**MQ'G_ikv8!O=>!KHʠHc >YxeN @GJoq 3|VHbX{v;w}eMEȰ vbv @Q @-mUVk;O @<‘NBό`l颚K)=9Il0S-x ixJWݙI=5ɴJ\'ζsnʟ֎Hk 6RjR, V smC x:U~ZTRC#*iwN<ٓPR H 92j9crZ KG+Xqshٕ~W8 Յ,b" qAJyL;Sz'w]gH I 3 AwՋ'1RFՍ^ǝC>ID;KE\2uجP- @2Pyϵ?퇠tIQ%[ P(r7mϒrZSLi4Ȉ"oRwh7?δp-,w߾+R" 7軻`bQ 0hyj`LU'-X !*ñlD"[}c Fs[۰n o6/@dS@d`?$h\gT'yvX tQmDZ((yiЩݘEvUwvww;SR]wh`(*<B#$9Y0]"Y( 6XԅNi˧$ ?_WW2l.c>_曵@0Sc0:#N8^Wwzaw|\[ k;%WUkϪO&ґh|\z&vAsWY{gN<1iI=1Dqr$L”T;-2acpO'E6^)=8zOÅjuViCy"+Sj?ղB i }4@ @\B ](:%$ HFQddu \p1tuwQA]IT^y'SB`➄˜\7LSv$FB(]x\ŵ>U[j˲,m`lPC%cCK$4I^Bc0؀m eUV?Y+Ye%軺wo;{?9('ʋrks6N}b:VfuGV୑i6sgR]sʯC$;8ienZX5%ϒo"+ =ULIzߘ8Jq^Oo4۩a.ҿeijut]A ^^9IB$vkvW5Ih4`Xn&]t'r6>4]38˄7-Y?Ix[4!;|(#7ū3FIA>gʪ LhC6N+/k8h±x1L,YB'3q1@b$ch }3'5F3QR(.BEP4D9Q^P榆0Hg CTq5bQv?_p?07F7Y:; P !l^ʖ0 G~£ ->IBI_U1r#,@0 wAB(s=U"vU}*'r`ڞuv }i&FIߍ=37;scN)*x*}Xhp253ӯ-9M2!8,tlY7l$5Wci|JʝfJ΃fa"n =/dnڼZi4q|vnEQF59k?PS9㒓w4nHf>xQr(wKLt/r2'ؙб 4喤?PgoKV[[֟Mq\??v k͑h#10Z6SuɳOB.@jsM; 1Y}j׼`הl.}OZs.>>vc(*Gr[li(f?4#IMG7ȤJ8{΂"ihxr"mͿp>ON$5'DcCư;sjؕK~Ik*KTRqʵ_6/`(G}ϟ=^sԗ͖$M"`Xgk^YgMg}=75>߳/A5lid b!m{L- -϶OlM-g]=75>߳zznj~|(.p P,`Yˎg_ig*41*`d}S;AobbtJJIMA I^vXx[3- dpDPe 'LZwm&"@�:`~}1EŤ"CC".'D5+'yii)I l L9x^Se d6kuuUUIUyyCYY8Ι""%$6X=IH F&wET2|ՙ\dd$5Ir'ёP&lq^{]ڤrB];yڹm/Y\@i5zi}e t5wmok-kA@Pht,z)4E C0 hs<0aqCɧAMv."c?ʇrFwφhR჊F$|ho|\yM`"H: 5H$AR!i[wP8e%%ŭwGYh "$$U.x8:&jNV0lX:|!$qiiUU]~EB`92JZ a}Q|PAH%Fx?rv8V,^{E$GA {I5I)Q ~c]ȦzIUXn8 čGw$4IbB$|P"%ǙX-p +Pi$wQ}#$ApdtMzwl \Ab D r#hl 9^OLaSSoIơ:|$ YtD0)㨝yEM!*3X éFa##M~DI SCG_1fجMz/& \ཌྷ(iγ#e:8llQFP\J0? eJL> j ZiAS|*,EiiZ G;T)NůF$uh#ڮ H^xSmc-I{gCB!wsrWYY)|dh^@aեJB]6EBXO +]d/{,f4t84If 耦#6e;JMyR͎<5N4s$k(#((GFn*>HHV +~0CNĂ?-kA@uo[-i^x8Pʈv2+;WJYq~UeiP$D0""JLtJN]5J UTrM(3ʎ:H ]/.!+[m9N3;k;g. O)EPbTUc{8L)Jk74H HV>$inAHꗞ@ &5>i9JTK*E9[L\>eE1 v)U|Lh0ckxTImn.BNQWӏ( ?YHGRc| ,=IZD#,,L(6WSf!t(}MS 72)i8J7é܇B 6ec F6qReE]5| … ova %$O/U: I (H99mG 3$3&jl6O4C0sj`Zs(Z`f($A@Ƙ=?o3y(I fvzy0[0`0Q$o ~߯DwQSm/5iyv~o}5v}< ۼz?֜Ye׼-hk"ȧ[C$gpCi'aN-A#MX^G-$$A1 S%'; ՏAs8hܹ -dj OL1<̜9`y~V>79 /%e{v^&o&#=7sV/y||3̽^^39_7ϘCS?W^`p>}9l63Ǽ'=Z_||=αXoc 0c>~[a3 5EI!|?dOoZӓ쐰G~6{vy[:&uȁ-eO/:IҐ -lv;kp$sF5UT{#y?(t~#dIm3y婽LV 1VvB KMyp~ vfx]_ϛ&;gu P3fw||3I'{Q(܇0I+ 0D8"8­_}ͧz(7h3lIMx|A$ l"p;o*2a`bB6˽:d >槏|o˅ vVb{U߆^֨}㩇+ >.kA@4ZlmT^Ò$A@R)a5hv FuPz?;!z#ղWEfv;#sR{C …~{gə/\o#De9GBM"'|oߓ~¥v<7o*VO yild|) !lrDZE5?V;8JP[8o{yI9Z?{dbT}_A0q.xo/vvկ?ûG #eSklcTAסIFs`ܷuG5^M^A5h{YUrL7;;,**"Hﻊup:H}WmUo2;Dzߏ!C:A***x9Vlw &c}0uT>Rz+-.k_:$0!cXrfa&L^ذ/ .İA@hyײ)(ԟ$yFwL/d A@s`:*|gslv'9kyG^;o˜BGA<=vh1Cv={.))IBGx(x   x}}:='嗘 U0`pί_~{}ZK! G|;QAx\t^z̈́Կ^|!b VDmg 3(ӵbo1}iT>X> ?l572/$ #`a]b4Lj8[ٍq5VvZip$A@E@4I6R2A@Z_5`PGc%Z_ y[9S8\Vdy4&?}\{m5b_ǎ8^cmv}ӏo&k܏Ι蹯^m[mޯsyMsV+" U{A@!I~8R4A@|@K5?1QZ  |D>% 4Hj[_#$cS$DRDX@UOەEՔsr+%÷L];YY]őh.*q6v0! 6>?0~%5qofZP]~\),H_9%7Cv9zgΧ?y_=37:39Q F@ӆ7Sa.[h(Ϲ{!H G#MQx-ZV|,+l6#0 A?b4<~F&H@@ ܂ڸ l~ݔ mzsBhߒ$t@0?r AwKabY՘n p[n7QD &8@y3G<3,,7L2 ߘ֗s:2xƗzafO sϡ{!BZ\,ȧo@O$[nR&vMWIq>*QsH'Æ 0?s;&0&Zbfw9)~T] E@"w@O#Px+*'  ҅~դ-0&*ZU(,O#E j#v$hX~Efv5[y8ux9xVO" iKt^(}d=8ihY~ǩ]0Q~hvm)>Բ;^rx\FLq<2?POO}ۤ I|Ƕ@Nwg ڽ ̭&YI.-@,Tv-fv{@ky% owX>&$O %$ɥmvwԯHf> 5nKOC`压.ձ']ts5.Y/}ut _cSd14ēcX,MZȤNq \Ajl&$O O9AOxa-bycRKC'riƑt1Ej9.!IQAq9Ś"[(6*2=]ǬdnUU[݈`ڟm{"[nR; ~cϢJergQ$ɯ\k7fƚ""IGK26*n1vxf CI|# ٘ y>E&]Z[nt/_什GNٶ˧~L|).ؘ5B.ɄGx铑uuv} UVUst[TF&\C}c7OI[3ŔCcu z6#MTQUGH:_lؕhPty azڒJu*h>u qgYլIbρ6er'AfA]$cXhh,VB:("g51}tE5cgk_ ;?}uzwBuf?AIv1|uMے$EE7go=p$  l|)p مd:2+Rr`:e+,Vm%M7MI]ʍəJ.]b ?[@];'QJ\y;SYz|՘ܒE o,neYI#Of',Ӝijq C+j6;hIb4H4auHR)wL^f^>uNEwhL5N2)ch29}LTO\J7NFc´Fa\=e0&:EyLt 5PN 4~h/pf_I#uSeHR4tgSkIΞަs!`r~R+ט Aϝ/fbMLcF=,+u5 drgvӊM{?r K10 =-{$O}2e~Mf{T\g蟋П- 9^ٿ$)_ym*n6FhPt+x {Ӓ52z^TUg$y3Klf؋@yqњ<>B j>ICUZ+Sdd'WLd@M*7'g^6ۙMҌ -Vm=HRBIAat W̑FvQ ȤNI<]}Y>Vgbd {kM-VLOu񳊘 QMԿG9rME! !sG87%ǨNnca)[AI9];q( ]Fֹ~\9/'QR\uNlgΖW}.ʴ ;û i̛Ek i 4I(q{p[~Bz)~F՜[~*r²`N:AÙ`}n5O}: aIW KOw=M78H;WO?NgC=xdzk C0mӁՋ?+A"?<fv0UԣUL_$яy,e<:k+~`,I4V^ǜ@;)XC #ELneRZ)O^jNo*-|"{ %0oҍ"k!WYi95^ӛLx#3-Y˯1ҋvq6:.Z[QZ۾1h*ӿ|M.))1֕=H5iGV8W6c|;3fy_V4a]=~p7̮ns[gǭ{A (| @pӚm\xa dم+.8rly`m3A@NU:1ٲM4vU6\)xפ~*^l ӤZd'RҘe-Thlt5c.5""Tiy@|Riέ0=iPA"rܮ<f=LtZ#ͦJXLFh}.: ;]$IN]{_" ԋX?Mgz:t"HYArdN&ԫkyL6]8{NlW37u?\/ra`yНON_;aO⼍4nho6£Sl\A"CN@wkF6G fm:%i!L2AqP5xY}$,?y>X.88vNHX&M#wWQj&@H U<{ӔQj2&Di+:6ͧﯢXc ݗ_=qo$*>٠LgSgy+4ƶ{Ϭ~~v6-@#e%MOP:pNenA|OY,vCljRr3'gsJ+\IS,AHm5|&oZ?Oˎ o`Ж՗¥ĝh!w4yTS":U2B%Fa WwƟ1B[`ާ4G1Q@wtJa=<GS뻏9k ׸ YnD6F#m&Pk)ab5-ZҢ`rB7vfc߹y IOMTZ~8FS?&{ /R}µ߾rL4Ni4Df 8ѳ[xpPʦ2:ɤ aj '7tgS*@ |sx 7_D7g,_[Jjc# b-w>ߪF'GYW?Q =t 糗5VwGaݮ`ӳ,Ͽ:@WܕO`K꟟*gCq]، ׃cQK:ҊjO<(W)we{yW{R['A&UJAyG33@g= j>ӍǠML<1h% )Nk#XXB'}jv^qblSS@%`&:ᣤ^]y@d_nQ$3`_9*wir[).y-![[ւ %Oaf9qRfyʍpx֐y9˘H/>doy N<^U\]Y~à n|7vM%N0#Ov}F-ؘzYWV܁B>Eiwp&MO #uOcye<"uz'LĽIqp,}Z46x{WIV`L4hr0FMܭvS;wqzIK yDsp ͊&kb̻tY;ϳ۪^p>q W\.sV;GPGx`l2Ț Ř!`|0KB6 l^mS4zaJkd&O=@iF;lؕM{ٹ  MBfs23չ&o s2%H rcCr&<·S<V2+Hˮ\ ̟2\ǩ!yֽ]Wo7*0yzt w[Qt(wa\^>?BaC״tw(B䭽f\>Li^hrƀyBp9>: Vo;P/=#~$4F@{y?0?(~M'}_"ؼO"(u ʌI :|iNϹsN0!gQtӌ]=ҕ6 QR}LR+42,"-1\ow,a^ [+vg(O?9r'HQO3/$ÅuC+D4AW4t}NK_i?ҒFkaeq#!\/O<½qFpWKs|0>۳i7w_{҂1^x-s\I`O}FvynQV< c/^[}mZuk T+~h@o #m>p-D?3װ&T^tcxZqE3mI %Zn IQ>+'$lC7:]\Ƴ X@0S n<_Y=|T${79HHQ$ zvMяݓU[f^1` [y\F&yX}l{v~=eR>JmrvK5۳c ]9MSUl'?̮ɥ @3L2A5)iC ! E95qin^ ֝i}y,;ABe<Ţ eRN`v Z)@Z  n4WkˍA@hmM8Z$8'\p e@~if{ 팀vn '8Jbf^}A@` 0󻠱, @BCinܫM%Ϛ4^KC bfW!AU8W&ñ ڪ̽"7qvǘߚF7MN啴bVd)79()^S*?LK,\JvgU<[_~  15c>GVT^;FJ&0rRe@{T$㣧Ӗ}iܐ^x.#MivI{s9sxl*('AyEeHb ȓek,6=<"pPKGOpW CG9x,۴rj؝Cl|]|_o%HURU9M  %ė9|*Ξ/^t\!lP׭~^[~|7Tܔחl$D(e0[T0J)|՞܉w9l;D Wyܡ|_Oh%u=tT>]/2՝tMl }^ꑞB9{Qvj`AE% -qʅ݇).xIz[9L3He

*hH.*NmYW2A9Bu~en@X dU3ii[ZN`* =J#юNM+pr:qN{339fo%0Vv jwvt5JECne:\3wb}}Y g%u\rC'hO)'VR"LB<k=|I{Ku2xvv}ITwElb2=Π7g8Fi=E(v~&m6;ҫٻws\[ɚewєЉ3u:9)-Rur] fv J @"֔OQ7o&EvUN"3)y*,P2(GS1~ Cvʡ}3" E:v-(51zuMV{H|G!Nۯ?˧~IrأA_9(Ѧ:'wbR\kQntGaʴp6ntܳm23%%^t2c.&E6E!~yJz9T0ɿaH 41Q+¾B Bݻ$S>P#lZi8m":`8+$mу7MQD=J`"[Pʃ X[ÿ|ARsqgZ߮'x@"rQlt5c:3{unlx33g_)@".'| ;#UM%dJLnt*0t%uJOWV)j(OGa Wiס|$88j1zea9~>goH;q,WF1cc-ƈGכM:f=LGѹ0Hp'TVVNd_n.em fJK\he"TP딙7ikp vҚ$9-ZӉ=EjF٩NxJʪv$*h;Ib+t EBSÒGNFp-bIA!К)z`k|)R zM;+xF=aN~(/0QyŁIvemV}wIQrl}n˧~enػ%*'xnsqu]Gx\9ݞ @f21|[R>wl={'+!8{LN8u))Є>g1 i>2H>@qY>@ ީ438fOĄ3F~0u0FNsQh03 虮4QLJYkT:skjXԗDuw̺N 1 ƒ @ESh洟/ ,B^XkVׇ{Q9x<Ɏ A||ܥlW'˧~I|'x#hȑDRάxvߚ1"kE22ߟ+ywft tی~H05˦skc4o&6E }/Yq^š;OyQzNB$@=yޑG'C9`)vanv$dNzfjҠ^]!0~]`R:6&bn7^N~UͱS7eXv}9cz{ԀLu `xA,ky:vTxL}S!Iswe G1vݳkzћcb8,jLx l?UB0${V;; :&Z8^~ki~Y5֨p au-C<0 F߿]oֻƤ. `lu<_OS8Z}}Κƻ0fucwK*iSDKtx9NyC͑S#""xa\A38.N_#{:Lڑ*u3VU2A KwBHi[:f2[,_GJL,G~ dXxricOY7M9JMn60La5) H21_x۱-G+ܪyQ۹MA@hbf A@|E2ȧYi[$km1WT\D҄(+}(<т*?#9ʀ}GUڀyQ8ic#ʴ03%8( K8oT\ZY:6Ӷ$I Ej8_VUm@({>LB|$j&5IJ(:NWQ2 bLC:f/yU(ZAsjJ Y](Eq0N`C1j_ $ m6TWa.] F< ^rЁcg Tr,k@`-cBL!P`v==Eiwn11 9Y1ao<hR6|_ı3w{e _y 1y1rL.|aYl-"dS.CSߴPȧmE\ᷟ>kwnܐ^uZy8h>`jQVH;Ulr Q .4W$M(ӇB6mdք(<GO>?, ڈW|M%LƒYv"$uTA9=pR6[]^AC8ȫ8.Z*SNN!`J<YeQ>]8@ 4Żov֒ !.| -H`Hu@^ Ysg/g眣iBd(Nh'Q#ВZjj ͒tEĺRڶJzm=r_3ߚuD%@@! 򩏛KkA>m :o$0ll白\➼Vko5xϫ13I2ג$h`:!#$. ΩErzi0/ p8'c6[6B[vs?= B]iԫ0MBl)^@ o:VgNVi3k=Uռ$yX_o;NTX\NmثWDlϾxur{˧mA4&He,+߾a!?o\vFfm2ARNXv^;oI`V Ic9&I䌃77IFb:s 0A$m4$g$݇rZ4R " 1&GCvSss}SA{>١RJbLS.HEDo΢dv0_OY$ B@S-6-I:p3/pTc-!93k]]U+2D B/&s;H6IbJ ب''h0Q.]4H H[3>v.^6h#UP$mb RIi9-YKA$$zg&-ɸS_2-$IMGu{Zk&7=wBAȧ>l`Oۊ$AN&$,%bG-cn9f|9J]E<` Tp젝;@B($ u(d Kjɓx%aLA:yy fvh m6 ~3LA;+f\-&vlYh8D~VnO3.$-Lo ;i$1k-1.r JU6nrtxJA@Pȧ> i[$I*@p^BW.zoYAsMzZc2 G C0-@ \`Ob`'}AoxZZA{`q$LkN5YکҬ|77/Ѧp*`Gu&[pvw!A@S_Oy7i[$wb"PUU*}?7td Rc@IDAT 0$ELF"i0ɴZ-5U撪9fv3!mHEr 6vGXHDXAr#

|{::iPG75};рT]#UJs ;bLn4ol a'b:aE9lP Kbǰ pu\m+p8Tc+燀aF"Gk*^֝Xe?]T$Yf kX ; kfZ2ER|oˮ* G=eh,ʛl܌` }~D^_0,=*<•58ݧ(Q *SQ1>Ճ&,Q݋O4jA@٥y #fLNPټԃsawz9r?*,cX*#-#G*7sfQx|gE^֤IB ¨ F8xy =@G()tv,oy0Q`O_9G?xV3'7;UԱR }F>K ˇKxGQZԅa ,R,pQxr>wb(i{{7;?cC8ӢS3{3 ;tv*D5֡1^2l#a,+\Oph𡋑9qd'lJM`x|{r^G*r1]YN09Z?4PQ~"r4Z&{ݻ,OTIyyJ ":Oͱ2~?Oש'fy{9L(ʳ+*wcQގ@cB+Ի?_Py-= 8Rx H %V=#*eݚ*oαm(CX} 0U(dXm:Q > f c>RO,_JCQ-aL؇a3Pt[}Aܧ{Bچ]W<)0 Ҫ42AIDAT}GM-21=` GǃߔcE`?!h}gy<:A76@X0(e l K8EZ< ՙ"3&ZPԺߗeQeGPJ LpLacu0=6TJW_zP5&@hc0!|GT)ϑ3X"*EY3!)yA@LlV,(XT{˘cgL-hh%7J cwphhי@PUnQ#gL D@$9ҝA;EyF>b &oeZSTh2X$зKllh[?0шdL 5-f| i >55ҀPv}hWTqz?";^ߢ;@!%[b|Tr(coE%n 0&A\x Dh> 59%!έ\G덑2gP՛`,Dn7ENt$Eߖ,݃0O5k5+!U} BQoS~+hStI# OF?Tb8q7d?ޏّA^lc};*ȝ x衴ȥ$VH7-ޘPmlgʿ@sD!U_%rD|[JѐF66=C.,vذ}JcrrCHW< *T]0.[_NNEe(ʟ?eTZ˒u^}7C鮜+s+YK,|=Ý{*V=!G 7LYS9k+y-s-4e%Dbw=D*Õ?(/ۏʛ8İ5IBs|6<&rlؖ+}RCBa+-ŢE7R P\81 %<Xep~CAB VC$lsCHGr!i&B1?3*y.Vf *| f'5[FE|֭O TP\>t.<`kPs߁x&܆]\{LPv~,}^QURԋ?ZDž*Õ=^tҫ/ xό@sժ՜|xiR8y9\H8ÃҙĽYIנJ|vޤ[;aNxn3Ms!](j@G;A%|(zW 潪%Bymgʹe>pk,ׄ:珧fo UdolrOE<5vS8qbM"hRf+/?jHq<(oF%/*:-<5"}:X]}q%/S y0;DS"qU!7Z\HFd'!KLA޷?ip~oXh)(FokZr؛ %˪⻨rKiZFUJ?T9& 1 eh9ZA0hxɮ;vgM:m&pCU/M/hϣ]J hȃi/eJURv'**ei?u`kXyQ5^{k)Ɋ_0{2D\܂3[3fLhC| kgvxgͺ=c߹(S S)/.k,< 2?T-q}qo2s*嶼I/U(Ul66SOePJB(}Hn6-3q.輴`JnvnɝSЫ+d(w S+86'8,ݼZP0e*G\*E8jLZC zUNz{E'aZf@{\ ݸw;3Nzk{*FEh/;| =(=ÊV҉i?tR~(Orpɪ @gӵqSuP.a!&iN™4`ɪ7T@{dQ%z3Ws07U T-MtV Sosò.t(=-KV 0|( >0ݳ_HpfޝZ>t>5f;m0_ ABNOl(~yy-,DJzpw#ZǴ-Gė]] h;gX) z8I$%_~mx f'9FW2"hS(+Z9( /{, uhQ+ő2M7d s-e#<ӰNW-y4h{EЯr-*>aϷq:^le~7h,V3>Cl_ro5Pԡ C!瘌CIpmO~k-" N\.3&BEdM}"[{cpTxʡ.0U,T:P8(UMB g3I(tr*o˜g+g Y-35!/@ -7XnSK%kepGС q#&d[&'%FzqT~tH#AòVX}8 ?Xʡ cT;40ݾ|ci0y^/PiUv }!HWU˗ǙT QcMC<^:㷉?VGDB4vi3ec3o˨?v#6->ށIzUd4DY*DޞV^,8˯|Mޠf–D]3Q4 olmu)֝ؕ]:^`뮽wSk7( "Ml8:-/nXf`0*%y:nJ%{Cec0O?~,⌗O>f!*î x~ y j@^}lO`tެo`_z#"iظMBۂW&i>9⾂8:լ5Cl?)r._,9A?qx oB-x “+1KNC=ety8c$:0@k*b }Cf{VuKg1K׿@K%{8Ʀm(XrZGz6`PXUvÈfiVz}?`rJCyhj OW1I8z^6UpN !jξ!9n+Ő8zٴm\44Å-PHhO“ pC=WTbHNAn9\SNA𑮉`,= !něTVe:`0_Bz}4ʅK,^zs}Ԩy.r<4"oG҇a"hOGL%^_4 o߄Jʆ0-j֏3ˆ900 zGrFTܨʍXRڜ/GRnmtOtKÞ(OВ b^W r 2lo%=U=)*fxv`0) Dt_b({7JrX?l/-g:*ֿo Zh( *·*# !\)`H4/)#c-v`PyNÍCo>V=&EM^k1TN+]U.Dc}[v{0;a2.:Wņa\CU/*ߴwΐ6s!ԀClHGq龜CD܉桩t9saĿQ߈E`lxҨ _8' B|^ ~*b@wm«OFCPA? `Yb|wgv-JMJLÑ1cv/ry1Lc&(|ޱ;H4\/Rtu#&ze9@k_LUð Ѱ=d1 aC8Fa$hQc&gGWaYacvqWuߠiVgN<`W! Ko8gsIv&*r tVǡP8Xƺ@k{ U5z _:ߢCkMƤ8?YT^ߋ,%5Wo[KZ 0k-==EkJ'/4X6t:4=[{:s;"r"U|1vа4rM u*ʿW& ų ] ]gL f]E2:êT/կa<^YR;ʱ/ޯx0LCu},Ce*!㱤Akui~Pu%nP⡗f;~uFSPˢ.s]5$35ndߟ-a>D`%e4No eZd2*cQ3uc=2P_Ǵߢ̽!Pς DhoHgO{4M<+g_B7Һ_+W-5 ]5}-C wz5,Jnۑ%5T+Urٞ3W `b 7\PL+#%`aP{Y^ XU{d/wm UUMP+3 +?5:NLq. Q.Q])w ,LXz4Xͷ{nAoC _)|x:8ꥩ?Ru&{j Q6Ż'K<|B`buMvmVAA\`|"fZ_sNZ_hu>j[]93&p0@8ؗ *m]uh.S TVxcG v-vƛfx_87@:,B``h66\ ]T $K \6sL |+x8>_̙]\6n?p^֫{pnK ^A]Cm.vKL8hϢO8i&N+ь.rMiz  0Iul!Rh_4Yp,O=0D46*L2G'NeFLZ1x&eL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0& ?O٘jIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowew2.svg0000644000175000017500000012247400000000000030013 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:12:04 +0000Canvas 1Layer 1Open vSwitch - Provider NetworksNetwork Traffic Flow - East/West Scenario 2Provider network 1VLAN 101, 203.0.113.0/24Provider network 2VLAN 102, 192.0.2.0/24Physical Network InfrastructureSwitchRouterVLAN 101VLAN 102(11)(14)(12)(13)Provider networkAggregateCompute NodeInstance 1Linux Bridgeqbr(1)(3)(2) OVS Provider Bridgebr-providerOVS Integration Bridgebr-int(4)(5)(6)(19)(7)(18)Instance 2Linux Bridgeqbr(24)(22)(21)(23)(20)(10)(15)(8)(17)(9)(16) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowns1.graffle0000644000175000017500000001136300000000000030620 0ustar00coreycorey00000000000000]mSȲ |y]L)n [N2=mY/66``HU h4~7q:~ WE?jǝ :է_ի}濶vvwZG~[❞x1(2V 'BbƊ6wƛ{i7yoQn=ڣbg3%zkn7Zs5oΡCs!g) gYLc[׫ۊàPvQ~8d|b:/Bv]*:?( +~ɇ&˖]z$8 i2lrV#fEqt뵡Q·X˰(a&ծ:9X5?zgj^:KīVzлnvgIq/jtݭU@e ~Vt463 V1hL kMȚC8w1V)4UZp#\D ' LZ %kߕI :~ 1ҝ4,0W[88-r`hg5 Ʉ J3ZPH5~׏Z^[oIv*dWFħt*ZJ~"?]Ib2F諽 -hxQ:SDG LU%N'%RFT«/wuiΏΛ&؟[7̭*,pxU 43.CJL<Д0*x1z@.^ZjTN嫅d')>39q;nǞOo&˿O ғF|Ͻȩ?By{Plj>>K|?2}>cqK:.1>2C!AiVcY"P|v$~48;Ok /#pۧo??ofnfJ5>\F8\hҲd3Ne3f,= l6O7+s"..=̆ n SX~1 t * *cބ`>CmL\#Wf&hscsAP*='!$( VsR ; Pi|LPʴyRS/e͢;hKЩkdVz+5-E&jB.>bV(E_*;C? #>/SVJ:3vr)ҠR %9`kgE:&y$έ'bOѹeyGkbwBLqNlh<5}Syay|^R5k_k8LHFL5* )NLzZ6pAmUꙩ-sU6a#26Km el0g#Ebv .e߬=gIϒc$=fnFS,!ۆU$D)UDp<,V{F"Fhg5⑑0IARjҜq=6!5>LNḆIk`t]cODZ&͏BAϦ2$mfE:s-]:h5Zs0.qF% (SR *K#Fذ I}.P> ʚkTjlSlM!s rڍ/~pГLO-#ӌ[BQ̌[BQ.Z_*Ϳ(w%cLV͕0Jdλ$fyNuIb&^7`Q)pbRc.XuUE(&E΢X[,jYԲ hFI|td9;)."@X2T&j HEc)VD+WiB2H+08 /6+G9rp#8+$q7)ܑ{Pmڞ JɎ'0 㟦 ï&iDG U|;!V8gp3/c@:-2a.T̐L HabDXh`F`ieTέ i o*,HXJJ"p6>z]I QDݳ,#-yO8{NSucpW`(stc:)Ms1046ܺ1cTln~.|908@O&R43spJ96wR6gjNnNagbif6kYfFJo󦖾M"}۝M;N] .& sj4$3LĸlÛhۙ!|@+ѐR )}4{)4hI/]wDH|TDțNGH31,D "Cm;Wʕ#kN{IjJn8`f`GvY_կ,t&4)~`*PeUTbF嵇'ܽgR qB습eA˂iּ{D5Jּ%6Kle#Kla'2<p 7|aa#i)nZ?޳&Y|i!CPKHux=nH21kOE"?ҠUZ5GqQMbUԔڞxjs9Z\;Ib!Y#N-6-Vys\)oeyM˛+;Q/4ƚ1 mS&᱊ $aOA }r W7ɈN˝ ! jJ8s)E Td8JacCHcB#լf aOq(> ZaN"q V͢!c!694rQDhf]k`/bi/IV߭{*HҰzFR4_aR2A2D)74>YjA)|7{~tD~~#mzwк`5@H744upt7ir]"9q&lQ . IZ`& 7Hc F\T8hً؏|"tCGr_>PFC s>i 땔Y7zms ^V(fu?E_SSA2m?Ϫt~ "0'V?I`$ *]/a|tTjfYNg1hTK XJFa?,V^щ7v ^ u;ZHB X?+h ֥5!%$Ua EUtyFg,^#47Nj_Ra4SBT}ZȳI$././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowns1.png0000644000175000017500000027233300000000000030004 0ustar00coreycorey00000000000000PNG  IHDR:sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]|TEyJ ذbAzzԳQ!J@H;ywTDI1Az ޛ6oyld7لyMw~34!0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#E@eKFhrqoV 0ܗwW-z-n+e`v=s <摫r7=AKJvSkZeOYҸJ3|4c4LM )6Ig4[|X"s\xt!ԓCcO$t}/ܛŷݝ~]B+ d8>e o}?sucYFvg )=wQ/-; XpWFʮgԼpɫ|5V%E;YKݚvԜ̹G1D˻~:|HC@ Tޜ5M;m\ǑQWF"J^Gv>]$BsI|YK9ͥ!7njMi9 &<ͫ#;h 롾 = /PJZJ)?Iq!7Hӱ2ddQoӘn3*pFJZ&sr))eqI_ ) > ˆ0jlޜa`򖦔y:5'!R%4D|,h,o!N%<>kh3λfn!O3MV-u4ߘu M8VPzOEBg8B(P߆skvvgf wO-Q2J)s:#OgdOfF%N?^S%JI6t::}}4 )+2/HGwt$.Zpr2\×^Ui;r_vyXj~ST' ю\Q7x)+Eg*s~C9gBE?"bsǝt́^ˏ3-4M+ϩz>̗Yak*E0]ϠGfp_Dz}zw(%-;oʚН``#/~YӣB:V2Ʀ-*RW(24RP.7DCλ~%[[*of`{8 !42G%u>3(-Kst7ޓjF ѭp9mKf@Fi`$8~% H. ,yx*ꨀ)?9ykwGr7L&z6Pwj Nɕ((ScU>Dh|G!:!DE&uxlAE (ӶπϗN5C6-g̏I,ty5 u 0<*@<-Jϱzs=}#B [uM/ uVg}P"=g}/S#Xt8|#|{)UV wR[~6s?6%G*x>QK6R}^UjS@ؐbw )&gp3L5 5-c^%$8^"jn8cBT높g8+j% ץH!YX>j?8GĨjq<ꭼĄQT{s΅'KܡuhXI/;_LСıS|+> f3+5궟6}*|;#/,zQaAC7WkgfeGC}; No+.GӼe2P9&AHyyFiu!EG֭o1g&(#oٮɺ\[c }?Z8p^ *isl?#'\݅c}տNn{V)֋N]Og84s5vaV&KeI;Wyϕ#N~J\a,mfxg]IR$==v'pjJ@<H\Pey> $7p6޳94uSص2jr]_)xfm MD[O`V~}tڋ :@OR5,tF؂@jf7k!'DX)^I ;|| 4^چ*?|)PRM)NgFMa²ׄȗ""Hjp ,NNse8o*?D e*dFX|7q%v']%Jf.|W>o!5S۔TN|6%=zQ.?,į<|uJekl`ft.-vA2KP8OJ: >BXs/"Jи~׷ C岄-ª;?yQ_IKut=;3ߓ:g׉7^uc6^;Hj *՝ʆޠm5l,8),hkT**B0Ԝc G'f=zg_J.=bo}bz1:6K*l?nxޑmꞘCaŜT )Ni^o~W1zU$ThT.8д i IVyI "N+HH,LUo3dFhUJ~LfPQ ?8=v4 ( 4Ͽv)2TAF5ꐅq:ײws/h~Y䧮>՟5;rKj lJx1kCݻFrJbCݩc:-7ڂ %12SnXu>j7ܟ.Ny)4tPPZma)vg'.i{O׼x;`1Si  M OK0cs*Q;쫻R:@x!mҁ|& UBHYծ“TYn쉶Qc8%n,BIJ1j ғ Qʿ jNjc&iiF]5> jT*S/ С*Zr9=}APUiQԨ(nDi :j\BXJB(+ݲ+5bb 1*kme4D#Xˣ%WǶQ/TG3)EC+#?6h!LSb[>4vmh !1}C1*b*D{cgE cG"/N1=: rSULC` Oh=2sAB%))N?a1qU=2:scAV؎-ьE.mbC>q TWw׻wRƝ°:^DFt6 zV{N?zHhrz_q{ǧԆgL/(wpƌd~%2JG1&mrPc ȪBU--%঺~[^@PANnW5mUG#U}nL=E0 TI~RAs]7b8>*A8e4:L}wѺE+/1śۄqI˶pc1G&EFoa^Ӫj I;VC?MimirMuX l[kj<4aK&S_ޱlم[juȮô25\!JAhW bʱDE=f1jEKI&m@R9;]un:/Sϐ&W:oK˂D~bSo4Y9[u/Pҋ 4gtiC7O֕pP@K\iG`寶l_X#hhW9PtŅ:V-t1+ Vl(;U A1QcV% 0g "΂9|bסh4>]_%P5|>&$ܬ9:=y˘8q(iSr=V2s1rJy6Ev#:bh0+h)T V-~Ո36A-UfQG6ܫ.{8bAؤ XNtݻ$6vvJ 4[ƇzdmP!a^_m#I@C"ShIua0rSd;Χ$fzuغ10HJU#mDxyAà.wzյx;Ŀm4`6Ez: ho 埄OZ/a3y+=G `^2oH:C:ڕmNc7ޞ撣(`h{M)5nDXP6pn`[=* ތ ܹ6 sͫ<mjʍ) ZeΰnO@:[+êCLģEF*_)*l {gCһӐQLl#h{qҶ߇rRg>ݮ4adVb"8б6䥄ORBѤI:9*C/@8΅,)Q ҧ[U]wްhF2Ҋyr-M89X}N{< 0ҵl| &( p>`;fSu$vY p; E;=ovd0{f&)-x=R@ؼv_ ؇J!WڷS)oLT_*y_z VZQ^E#Clu }ǤodgAL'[F"Tv #ʄyPnǼ˨ _މ΄Ѣ,xR4./:b/quHX8ڒpNzizikS^l4l„~ih}ѩe.zcFt፽slg@@/QϰxVJ#]K'J+o)EmP,Zjc CW3ѻtȱ!i:eeO1}9U-{>8HK}&oFH`4q*c%v<Ģ4:?bMfcH8nx_Bb/懺8ԣ=pzjD nF:*63[~עגO^g*/H* #&976cD~GE>ѿa(ܼКw%%<~'! [N9I(> EGmFߴ? _*)_G' +wC2Nv.,Q6Y(AB/@zTT^/{0sd5dҴ1bF{8ʓœ,][)`|)wᴥsaGun4Mh/n=Qa;v*CٕlvTt>5F?gd>|ܴW/'&2}5pN{/Nӄ{KMt,jnbݜjwRbX>0\:gdx]Ę+yR="?bn*Ƭ\,g8T=[?1J~D^XskapJ]nΉ&`c,Eg͐ʟ2ݛ3!_z8rl?#0@=o9#0 D(N#0@"¯AF`LcmV 4F`F`F`F`F`F`F`F`F`F`F`F`F`FBIHX4Z\t7S1$Fu:$`.n_7mJuniHŹ:= U;}n%b,Io튓8R>{1@Nϯκ.b'`Q]ZWҚQ۪} ӘnFÊV@TЩ}!(ԥhOK<фA>;J #k Z5m.x ;vQZl4,Z h),8B!ԁql6]Tx‘A}˥]8m\Vkl\\lAW}M絮uzՃ!k/Tݯ5W9sx'nYB꨷hsOC6DWI:ϸӴ9؏t5F~A7$;w8b[oN;b@qIHudK;TyJf{}܀{>=& k:+t{YMZ.D' Wn說qƏ gV.ԋ.sojRuǨ-}myNC}5m7e7'#ƈݥS:ױg 3qP\ rEڿsVQXώcw/;0vg@]jl/$?,(JOoE=|s 4אEmbT]ޜ*P= Lj&Jm!p晹Yw]oz2HSQO?;sS8(#MxQ8=qXt9*ߟt:{Srr=ܩu;nB>oMG1N><[݆#N nwC}bSp{0mRSUò'MG0͗=Rv)wq 1cjqUS.ü+,ΎsxCNFNPoCɧqbИg-FlKR{Tiܤʰ3[N Q?`@w&T`RxA4¶}wSJʗBDto%фV^i]R5$+Z4\hEFhT48_BRd'Mj,RB[?{%3̗G @4< @Kf̅oh^;R\:܃T| <&"e|}= 2pF|!my!u)Cwѣ(:4[Q/;~`{~4@WiMnFe߁Ϸ@3oފ")Wb;4 (Q78MQt cq\Y7He|q'xIE!O S4mou^c?xpj#r{Sf!.yHf*scgq YUMMXsNR`z Hx)P`#Ց&y&_CnX!5L ȥ%7=h7Hxt"ἁ2(GR%Ï@NoHM֟v}tj`MN婨Lbu2Փ>nU-{&yè㊊<B4`/w`? q*kdͲ=q2MH/;K}{Б _RZ2LJ`s z8zo_`(]{z7yGY."/͉5&LhkA7nz콻zU)NRzs]/r.Pe~tbGPFœߒP{'Mz/ۺg?ϴ1?;rAc i}?Z`nsHvQ֟4N #uƐ!~,Au\u17gWdk3s!)@74'梨Ek՗yzWa%&>5ƶ˝m]ȫ`G#ˠ^vҋ\RƼ803x:j3~m~a{G='U/Og_"FX!n{04%NB9 Uvy?lzФ|Y?U??؛GpHYv@y~!I[,ް%g ^ @hE=PS^Du)+Vx*AR5b}3z F.Ha _`TG/`L ,P||L%*BZ­#GĩHE{KjT8 G7T ȋ܌gpuC] s"0 ACk2ç'rAaեm(=ǝ͝'?||ux|-S@?koQ*,,?VH5 -R@0LHaUeݴY?ai/1 5`g-g\;nu.uʏO >p>:Ց$_Jb I Qr@:iDr"*xjFdu)40#?0˄ji<_PU!nbF!l{U/േmW=7OpI1 {/zP@@JnRٽJևE)O8]KQSmܞaXh.oV8 `!(wDQ(M bi\٦K{vJL̟ Cc:>zr>qcwv{<6rP3vNRw'yuԴ uⷵ{uenWLkmpQÂTR͢Sr 3cN9yx\8ԗw FA@*QLZ#èc?7v T2W_jTŇ$ Љ9TW34;3Xέ=+hri]ڗ.Vtq{ѫߢ1Fj(T6z_FNOcjFmLpGR__uu1 ˥C8f ?VHzCXBmπi}U~Ch__QyՕ}kUν[QWF&ч4Y1_V4ҺviEP.ƬF#F`eX;h/&{ک1At[b+'R*M :m;NwWGoIɣFWf[T%g^=iBwq_4q + GRk"!S]P(<::hc~pUֻ&GcmzLC$m]7"K6k=r_wG#]9 !xRlDɽz}{N"}iT:t4'ihÏ,i V~F [QIwZ`Q_9 ޫXyYB߶#߀UHt2iJl$t[zM"D|u_.'9.C6_EZeJ\K(,0Y)"WM[S>n ٌS|2ݴp^hhc-3i*eWLo,/(5ex}Wty)R8hZK&=aGSmO ꊉ#Ѐ=E+L"l܄&T 47mrMgUU=0Q*kosih(o OR?>_/Ehz4 ˥kSDkp_XУĻ!wL=|DS. ($Tv\vO,8vªgd0 +SrMCqpɫkK\M䎭A7#?V$7,Zލ=t2)5 .bzB hWA?a߰Pi.[5ẍuI~&_8PA:`$EhB b ,RI@rRMUږQFEg].ED6Z\m2iKub=vSĐ f?{$aG;frמ"U+>÷Էg]nu. ~2#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0~@7,{B4pZqוh#0# ; qvޥ>3Dv܄]^Ͽ<߃RB~to"d0#_ Z@u,5!Out{续4WO=./Ûs>vm_0#$:&Lhk(nB 6 ̓gfiνW1)RHyjq#0 u5#dCO=!tlFձϴF`>?|I\4(pIWې9ȏTJ@s^6#0@x okWBd\ܐ*P-|-1TV|ǾFdd AU )ns$#0MDw v4Pb"h;$GJaF!@d5UoG+ f`F! K4@-N #0@3Ddɒz ?^V#0K@ ="FBguƆ`F3t%Lþ>:{Qq#4o OH4P{61#4;X5"mZ<"+AM L켿D3u*F 2,TF>q8,onCYqBY3s^i|J$=V0_P32BHʝ 5Ry>!NT3P?"h\x׸?-RC&]=4%e6jJjn=z`D!ٹcN 25N}!]s-wNu,^fO80 >BGYOsyލ0#k9}1iڐr* }oiؚĞҕPΜT#Bez I_,ݩ#^j $CԨsB)d4_+59qoLڭ qA wڝ>|cI= y N`O!/mPo`Sȭse~UAޜs,܃r܊vu-;7BLKm5=g?Kf6 Ld[$X$2P4BVt+Gp=1D՞DiWEP'ǒs=m|YIʿu0MBʗ4Mތ/"7*K^2[A )_5 4ˤX4%{H@a^q8uDo,t@Я0`u}oń!]+Ak*ӚrԂwRkBEbVz~D'&iB+V<5{TNu~^6(_8${iΠ [e`G̜/:0w=N.۲g2+*_YBhpg^ TOfdo6}{vMVB63>*3MI{*-ɘ8/6{}O^/BbE>Q7Fa@h@8jSd11!`r3CF P:R&zsV.Yie/EAN)ϫN;RwBFh9^ 0zIKj ".EU3 2+#'#1Baӏ7PScL<^čU.]g9۠E>h x@@s4@ u_5~1ZS)w|cLt#~F$zOijibisCyC(iIMiZ^Ugހcx2=7#AEi\FIsڎRV*$utF ~H&i7,0ӯF}kȢϬY\>`+?L?% > Q5!aTш`dE@M@IDAT4n S Tj2H/ܔ2\`Q(ICBjN\_167ݿ3,v2Yj LP)FYhEA| #GX\&]M %ntɪ[BU~*UwW[q*: oCh1AK<Pm4XK6/kQ#tկ#'Q(ЅY,rgЁ^t:_m }n 0r XæOF T32uz MjGx'^#"J~|(Nŀ8iw0.u ã1&h/kM{vx09rè<0lNy3z&DhMGI+cϐjt7:we8neBPJC+~cͳv"D?8 #8$G`Ɛ!! 5Me'npnm1M`Ai9c~l&XxK>²{ݞզ?402YT7%߃Cv$ܟ = 5.ܮ༜/0&"?dZa3|1C3{ؼOu ܏3L<V}N=e7/{r#^)Vy)Bd޺z^Q|G}wVކU/#IRn$S@F&oP92%ϻe/0.Gj_Δ+85<5LV,ŝt,<~ߝ0?M}!C\曄xR$~C#_0ͷ0zYIΠU/ʼRTRFB£3׎kz38 i틫g%P*[.ӚBra{P]jR+;o. DHUN5[L; &+:t+SrW kzzNֿty _O)+S{㑏8%38bQwE(sƏ]g}UZ?۶:^GP8wr?N3ylFCm=LfHP%y myǞh9߈^}m2iˤ* uciݠ]qt!= NM &%O(vΜ\F$g`AcH%0D! =YL`F !`ᗐ‰bF'32&wskR2@+*d~lF#jf^=F` 0#4sX51#TE_UL؆`F#¯0g`F*,b6#0~ͼ9{#0U`WaFh4Mò'1L+h˪Jp_M!)7\omj\@ʦ0O9\)B^76I ?Ŕ|&G.t U˴M4wRܵXj#nuV-y≑t\ܚK%U>b$i)yԦāI>V%&#p/=v!:D{X7JTp ]/.-KVt(_1Ҁ;bY+ p~@6e{o>jSEނOo%f/^g< p?ujbvj+<&#K&@x=yDĺMҊJ=ߙE? ͂a T=pmޒ?`_|ǥ=iyb\ etTO[.u; ЭN:`ry&w$yF{<߳ ڔwd>r7$0&!Ww\yL{M7YEy<㯿Tdj\ oLmڤ[Fe^4$a_F/<`6؟:<ֵcѪMsJk`i6SM8퉀):U))i3=HSʐJԄkF;ya]=j+JFW2~4Q>#e Le?Fڔ.8e*եqM=IFFy.u1x~5#[ J%Oi)8F mJrjꝠdۓD~ڍw?tiajcaiTܿ\{ <쏣?橰@6E)[&9K4g=Iv[pJgNρC ~MV)c}0OEb"FnSZi}5$!I'`{ąGpl4;|s` ?yzaF *6m\=I'&(=..8,s$6ӥt9G~S`ƲrXnS=(4L_!JF|բ_úe6ZdPG~M=IRzܚ-qVgWE|޲PoYwNi䐄Kj4ĬMRO_SuHGB,[[ O_,y/Yx_Xpƥkæߜ+ºe6EV$jŐTTJ Xe?Z qͨO:.G5xFAs ,m u.ɬްJsu4MZCW3gױ&՞$#hiGEz`;Ezd\5=oĸo"n4Ki9E4ԋ4MSмgpqYK-v7뉣 5P,e]m3z16MQAT=d*6ؒqv8 i8Z=q@Ktw}Q⊁} ?] _Sy=AFwCYFlmZ[t5MiRYJD V79޳k?C`h;/M쓪imo{zUyX ԧ:̆|PuK9_l,NMJS@ _S]Quiib$MkǶ:#[sN?4~ iJ+>=мd',L۾koyT9q~8o5:<ml럝 ~&6ר:-.hN/&ahZ; Rotb˾-lUܪGE1>c 0Yݲ5W쫢( h!A%16]P-#!9{?!A u CV+fLD;M*$q9&_p~Dl:So~)z?͗׫j_-=SAwgm[ED7BO" d5*O5%Ծ]C{$/4Gi: *IuIjE2J Ȥ yvXo @G~94C$'$ƿ5_HW$C2]+$HE%e/@]_]P0#Pg> wza'CFL@|8}3%dl37GݙtgFȯrxF e8j,\ 稞'a4q$ G:&-Ȇ<L`"F裫ȴ-Gs24zt| mOF. j`jNJp[;4?,ͺinty?6@<`T&#Tgo_7TGgڒs̳o]8n{?(9(c#P?uj+BA,H<9?_+gvy@L]1!DF nON5D bߌ#CH]ԯwpL I3)FFXY#<0DԞVw6@C##FcFhtxRt l|}R6ټ5_9yy*qy'N9岏MyfNꚦ6mq~=EbwQ1]B쓅ѣ^!Л&ڵN.ѥ0 W66_DZ\.,/2 ;_Q3)‡t"Д]ܞ8K41[GLJ JmpE+W?EHJw )bRrJ$ So-t3U ['}%AAȘH!@|E+ =Eeq'`S)ѳ\XW mfYRV.J+l2MmBo.gvnku'\JӶ_ ?[~WHբcx aTZ}f()96'\|hS1$\.140WKW.^k(SPU⻥ņ?vZ4^ᆎVa.Ś|tU`x*-YvbIG-(ě6+Zۯ;urN𛥸2e8{"%e}` ֯ر212qٙljsI.K:ScD@/o}n?yƄFB[v/=Wl-[/)kSYR^Hɷ+hfntoy'R.1 g=I"Ӱ?Q~xPK">vHO[&ڷ+Un!HyؾJS|u̡ 7!(ύSz0QШ)nѱÛ>)ԁZ· $mJ<4/OyѢ!%R7֭ k\Ő̩bĒ[ Jv2LQ[q{5d `~Ĩ={;sT4KCio(ޘ@ڭha]"ccM6t_>o:x=!uj,i'5Cx\sNVY|"eHK]&}a\IS0=ŞbO+W WIߦ ?TQiTq[[s|ꬎa(-ڼ+rOSPy5#` ()j.5G1Q-Re0DZY<ֶMO@©pMIY5:CWCqED{ZZ+ܹ=Sbt^Z`BI:bqKɧ4QھY&b$*?j@oX;g^-cfV_n]@UҊn36!Mcbw&7miMDSrzI(Z?-4V%QWJaԭk 4uP]-`buEbi \15v0Uxo9T"5vs{)bY ?tUXFjXAģh)mƝŚM|IlڎOZ]y{{F75e .Oˆ2%.^Ѧb]vr&Ki˝ۓ(AOP\V2l-OV%hiT$Ҹ~~:sM @A{4מIX ) OQNj杩"HPu~ZC,MéEOXR.[]\q{4J8T6[GV[MAVg0ǒD7FyK{H]璱95ESUҘ2-/6TEE;qOB<)i߼=yugs{"j#l5E ~Vw5H)$ Mc[|EeԔL45?Fq ~ 'm:LY3鼩^MtQߢIDĆ`&/ rynHhK[_yL^fxc+W:mQ~*/5ʱiBYQ`LX kژA_;/_ƾc12vPA dT,d(oxXC~{GNH{wi`ˍψ`X8:xWy;kGrL$ϫgb̗h,SW*O~z#=3g:7~'%S8 2Tkg_QE:;XMK?Eym7iP3hħ}2m}#'aN]V$=,*2H1-t8RĢp<rUGhM4c⃁k;R9ն37OZdžJan?Mʭ_RNf6miq\2Nmߢf ΃e[jl>2H>6GkѤ7Ѣ5OzJ,[W7v>^xC1sXCZHƄ-66\a/57yM.o=,z F+oO^<\ 0-4}2^soh#Ireq$\j@fIh:T!Ku曯ZG9VJ :]Ol6s˽ 9(" `@#.Nh )?FF7r@BQRryrh m#~eD珢5آӸ\]vXMe\ãSGwNC@lQTfSUN@etfk8{ dj̠ixsL&:bY&~=w _FΜr?9RI˷#3xzC9&FnA T]Ϗ<5@ V>ƫ V:Uy ^e~sTZ/Vz]@xmj:Tq>^ZR=Tp5z> F1?g`\D2~a`-k<D>Dl W!w[PEKhmǿǧUW^J6N-ʹ5zhى?HY>b_~%SD%?GWޭL^f33̴iLlt5L&R~;\)lo-9OooQt~[½9>8Ex1s}Tie@o;/pw Y-Y0R^V/u\& 6(g8B3Kø맕}QI!p,*3GMS}SE诡?{xFd6!ru@%<^c1wΧYӟl98ke2K5Լp?޶ZwTUK@(2=>?b kodHUvo m9vjjl-㣢(++V!gpK@|]ԧh" w>8[pkyc5NSV_;TWn+S]^?Usjk_P@@0PFQ; ҂qBݞԉꞞ!,S#.fF 4g]yoPlVWE*)bѭWY[9\X1aAMڸmZI O;5ex1w(1xtύ8 I zii |:[ĔbE%P/L,#]{+Q`d̠)n+-/BSeg9{%JOC T~(S-_/oD_L?嵉#n[%M M'f,oOW?ܵ#?ΕGK`|Mzi&aSI!z)}=jxZvӹa+{`}kq}k᷏{OOohi)h烅''<TJz=@A:v_dEr'g9ٗ]Dr;CTPXsþ?Q7z8-U3[uӿ<Þʔ]e3Qs#,'QC$ꕓAJfp9 E|S'nw6S.[is_?f3䊬M 2'}z6~ATpifYcT#t/ 4)b n NC¤\^=xE*cg+oS! &*,K?z,zL īylMyb9,ܗh-t٠4F#y-iD KB6܊гTr5z@n E*S( ~Yo,?NKld:Mod|^a& '[vO(T_:r#?ju\MO9+:8DR.E;*R +-у.W(mCbj5?Y_&7|f1 mTPG3w3\uïgrG{F~ɋ.Ms"d^H?7msH] LOv82⤢V5vTRultyA^Ϯ?NcipTRUp1ԋc/Jn^qe-~V^p&Ky2OuնL1Ye}rh OyإHc )k3:l< F 59f^SBnOk'cWzA7#>~/U&z27-ӕP-pZ}38ߓ<<9x ^^'t㮷 _1jIѼȧpތ1#mlqpܵ> B^Y lP;56PE }j*,z#a-@QH52-3oOs`-p;HZZ]>ۍAK4)*W{8q?QJ'ZF6W A>Z)\TQWzwc\m-FY|kv]myʷ[O1E-d4ݚV/-ܵ\gff >g] #s p4r59G2NB•!4˔p=z2~P$skQ}$J ]7Nq 5&ס"*{|^菁Yy6~k}NgM' o~)V2@:qK+ymZ {1؅C~( kjjfC\'OwԈVol(іiAKna=4e=dj@*Cn9wݺQ8Ć>='ܮ|19ycDž/d7 ⟲` {NN6T}Ň 7up'[y9(u!B?N,;faA2Zrs@BTO/T;C.N"*KYlВ ["0}馔}|$uZx1^GX\4w=T۸N2LҤGػ{qNa֕陸 ZE%-8f!&":jve(+ݠMd25ˇe<_M<r`NDx7GilݼALn]=j5)ӕ\ bʳYhǑ:X5p'!S}? `O+x0í?6ܰ;>18[P:n:~N6zJicxU är䄻R߷ysք` *[{ȻHSWI1to>l,\8JK(ˮ?Be_oIY ?{ &LjÎ6~=R`PL$ ;'ZYKme`Fy ?eMζg VFïrso{^; |0qU˗n˰RXې95)@t<tsK;%S:cDk FGxSG-" K=S FqFmFPPpD~ 7,>.Zs0\QKפOs-t|M^gwЮ6qI*ȶz/iWxiyCFNY%ʔ_|ߓ剞ģ`F\Ks _ 04C4-@hƉwA${9@Qd:z³߄bOvwmB ok^pI^DQ>SDTw+=0$'A<c[0X~腿cSVqOױ\9V6ӷc_['+Y$eyGZ:\y/ǓkVV9d}~j P=mB'k?5#%j0iՉ߶YG8L1C&r%_^ ~mݲ%6cO_^I\C) Rɚ&^y^V]n(.bАِ}'ӽ.I[ɖ lrX/7X^~gp!Â˶[}ybjB!=MR`szOH8t cEK?|oqKK 8nKL1ǸΣP =7UJ<z yz;3);7e[0Y4{?#I[!d h剞#ڠO;|ɰq' *6b=+;j͖C 1f-[7|Hjd0vm['pEj:MDO8 0|U' 1׶u&/QC1=q \ɬdJ" tm1")HBX*OzCw"Ϛ>|c_+z5~@ \i#Qeg |(1"I\`z^O75I;`d*]%Bԉ.FYw|%4޸oa9\_c=?(QP@axߙKe#<$.RXҔ9 !q]|)KTebd d ]5KN~ysL ?h7Oz6~ YSG/7id ^*>fO$`"7`,{l-[|~ ) @dY"?".-}gy?+FIPp#SG {ѓ/]V -w:"d#,e Y~tY<|sc0Ѱ}>ڣϙn}ve?`) xiѢ&[{'S&) M^KY4`  3MgR$E$ eY{Yz<%B&OI2FH?剢H!P$3ы7[=3sGѦP(sD!Ϙ+5g] B NJsoIVz+㧧P( $Gd>(Ǟ3C,)\Q4) $Eۛ }"2~EB@!H~Ƃr!p^7[(z @#GסwXROo9Q(%E8hM}y=rCѢP(Roi8b ǣ֟2~2g^!P(4E`0>#_h{><{e B Exvfm /CWXI?=䂢A!P(k^U( PW()^2O\Q B@!-ٺ#[i?7]/D6e"M=P("`V{߉(@է @: ` szY?=䂢A!P0\ϱ>X,zD_s@}_!P8<݁ ɦ,lj+(w @:!`#]ǫC B ?n$Ss N4EKr:?דYx۶(p|YפC{lZϕ:e$CV P>Ky}<#˙Bt)w&$6URD'ͰmO._'3߄դݼs+Pc`lStQ#T<O=%OW/Mώw8zC| O5puI(|{\>`LN=u+d K7L݉GǑX^2+wұGSS>yTҧ ^\ZyOr ҩXN-lF\K\gQƯ3]tsI:AI]:.k/8#z|Ewv2x !*TBx`tQ)"Sל e3h~IX3,{(b|""@Ƕ~x5UݱCTRwr[Xp ߇j:.J[8HBIZ{H#F^ݡތ^yw^z@Х6^yuB[PF曺%y| W?.s@ SjCꌉ"0y㽳BUQNX5X_th]~/7RC] O& tᓮY]Q0e&gI)=ew5;4()ѺyBY(tK^v sڏ{S16qTM $_m24/S YD ?ޢ~%tӥӒ57l0b s5Dytqt!dUz+:UU4U4y@rأsc4(;N/>8az~g䏾AѲts1ʊMO?!뒟O{Xظc?W\+B?3hԠ>x-}ze뷇+:z19f]n6j~I}շJ{9DӎTѾCtɢN% Cdѷ}m0\1•7sh̐~Ի(Vo޼& O0=ѕvûbm~,X0~8?k9E'WKJ -^^2j2]k+-Dw<޾ y9G4j/@gLIgN-~)bu"6PgMEEZ?ۭct [/$jh5.߸VrO:3O:zWREe-~ ŭLDQ>hPbl$A3ܜqrlvofA:? - ѳN+/_iYl/A܃?}Fv;,_g749{w-lnq; È]%ܚފ*-ZDab悥?-%k/83$a)ǰ[#7]zW}>B/^'  m=4󌉄aOtEY*`<y}2b=B5q9 G_- o<Pg+i=Lcv7\(Beb2 /i ng9]1}h^yT~rZ2z J߹f^lfϴ~ȭb1đ\lw؛qeTps]ց_h;}Z g;ܖ"jZ 0je\9BpcaU[M5i}{w $[PHOXA3x^s]h0p-ai]ʊ6 bi-i 7, 6imbf! '^jP7' G0`0⨉p67ngϷ\O_%de . c/_רa~Õ^HϚ./6 Χ-~TTM,zb Ȳ';WD;&Hu4g 6pfuQ*|“VET[DN8uY;b!d$M./9U--eg2đ>6X^+؀\K2=+Hc')Ho-XI_n)*2) p!/]P(]:LT !lD|W֑uK]{baQ?(O M}82mǼaQmc>iTaȮGbdŞ ݀|1;GO?nX]h1LѺ+{X+Ι"o`Eb@QDZickvy4;1o`}^QU5^L-N7+Et]~?X}i~Ks]?_ -j(0eKU>elZx^\ഊBuPvgZltM^̺rl_1t-:tzsTxJDB '*=Օ܀_r|S;+Dk -5 !ty]>s\R5ӳ+ko07++~ *h=u ڟӽo߅74}v;{9w Vٌ}Lvœ=ohBO8Cz(Л|mH.IFeh+Jԇ[hawg. gPkD'?j/?wí7r`onf>+e4Xf--u"QIOsZ^&+(oL=cѥsL|'NDY-eMr6wC߅%/`lɭ$Bu?\ԋгV!a4j)]m `vnvtj٣ :Aק<*3iꘁlOa;M;WX*܁cZgpHe;Ơ5Af{5Gh5~SEP ML}bd!`k= _b*$|}g}\`/:mB)!ďxe%+\&1 QG74rՏSW -:@c>\í.dΎѯW&*q8?l@w{K(_A ~{Izҵ#1bTpx=WM߻;7GK70c# ྀ[v5Y+F%!jNJ6]>۟Ϟz].fpdsGhv`mnob` jzgO,ܟ#Or?%\Hp]op s[FƢ0܄Ѫ|%\3E߉| x`a0s0bӽ~#d/c5K)^{Пwu9hAV 3#ؕ)A@bte:{/f[J?i\il̓K^iHh-a3d1Cᖽt`1t<bEwF#˫bwxDtp漓腷vus"+0Ͽ-ZuѴ=K]C(3O$z;NhF/3"o-~e' x]6 %:GLcFµj00IB-1]p9"*.D d|5[EM!?n by>h/ͣ5mJQ4|)K ƥ0zب蔛yw.C-yo`TjYѵp 3J֓?f^Q&z%/7q*]メ ~>^5_ݯ.c.Y?/eN@ H6E/p;{ GW@A;7}3ګA?(40jׁbE{5zR=CB#- b+>p׺L(!oV]pevWݥξ|p_x'R~w$yy}ⵏTEN"n:}s׫u:VI!)ʁ D[s5C׮dr>Ǎsxjë@Ϙ+Iq0)J -@?96| F>~)[~Oh; z;Jўz\ѣP(R 3d1E]>8Q/u2~ݡoc7#l9<&쫤H59RXO2Ƿ?i?2~ o`ؾLU:lJOV(C ]D?!/a#5ǻ-Eo7p[O8ά~NfU_rj=bN:̡0a~,!݈ӉH1ha6`_ԇc!FЋ֘G򽘘 WM;؟09c Z I_zlEQL\F'LOw]yf‡t㦎!.<CKDaE,^[e/K)lK<ē4Lkp﹨ܙ,_rjŦ]"Vb^e./p1" VhXG~ey>J PW>X&&cm^$ -\^x}i1GzA$|Jr~"WlG D\dXve7GӘ3O7G^ncB}2V 0TbD ^Hbf0b˃N/ w$8P-eBAՆXOPF D9BBUcq":=";ֶz3'Bm_QXv ZI"&+U9w[}Hk8:?SV\ }٢6,ߥ d@ ]D0yghžj脶HLJD5T=_YZ? xGW@bQr ?;O^DŽ[z%К&SpW(ccF)Fc Zu& I,u c?X0;hy'4  7c9"d@*) ptn}ЯTʁehАiy4ů76 |G'@bY+6sn%/~bd,砶Xǯ &F ~b5|K^ZHѢÊnvobSxMpLh9bPЅcm$e0XT%@2#.bɣeB,: *@DE0_@Cb.$% F ·XsO +D ܿ9H5ֿlx\C6r`J;>cw2x`LgEL?jg<>7D & clɊ@4x#"Xd#Q`O>Hl*) ht1XW?NKye NGװỎ+]ͫXʳI-|vT\K?%s2Rsp 5|~+?ߤ Ɋ@ jOO`q+T[UOG6G) Q(^i+ky=I#SetXT%B@!hYMlW7"Ѥi}e4RH!P$?;,7SsE/ rIUŁB@!5/Ӽ^x_>0&"^P(GK%D/]}KihZ*(9EWqرz& (yY,HO̍Z?;mرC5mG;'YO>td(O^Ga--GWa/\K7~Xr:j6:9B2=NSGuZM3#LVzU˱3:i,u++[& awFL2"VF˘<>r{=,nh5bO&t).O >֩f2kYCACYe}铕IKk-vshiΦN; 6A?)7%TBФ-4~'Yi8Vړ= FЛ:i]5,Y7ppx ,b0M DrՆ8ͭ &ŧ$XR B ]e*$В\E/[c^Fobx:S=fj_O -*Sɫ}:uHh}Z:yq\z pc^N7]nJEm 0AZ?*\VZ|ղq}|puv'ԽtFA/*]Y#:m\]zs2B࿫#2Z5R~; m"ZysYܜЛ}?8h gb֡H7~pO`pK+f^})/QSM[G$Ǩx n)ݠ -\US{JLIeybTg<?W:0t^y&ІIv.j&vs}fz5YiKW|C/ O`<>&A+f:oy]&$yxK@c-Jݠ|D$Sj(.Z'(HW|'FSmHQШXSq,6tr4f:R+'{=,'Z/ߩ!2%ѠM_r7FOrԸtS`FgEcD⍊FnOA${ZޯZңG ]e*t-ORMRw{€`[ä?(tO#&ÓPK7|syDbz\RSɚyِ$c;`f 9`LlhTUND+RrQZΓ&10H'~V5?Rs]#2w']KE>\G'ْ'@oe ~NZȔXG޵)K%{OP]t&[(k8rsOQ9_ M] 'uL<@S >F= .=n< 8u"^a&I-;05~Z̲FLSiӿ;Ǔz.e GC7^UΟKWF_HU ,K#'g"_IJ5a6-ΣOÊZ-Szp7+'-yt~^|8mpF W+rwvG[q׫;VlEM-M- /=70-thњ'%5_<6kѺr\Ck_ΫúGk~nJY[tu U.x[yEgK j|2=ogbIWxy N{U`lwܘAq+%ɲ OW,Kmv }(VZԓ*ȡCu<ȁbo5TE&)pM((F#UTei!;\M'Hpֿd*eLT?lʹۭx)3Ngh-|:!Fj9'pw5Դc#;2JzSGoZ*>vXKO3l 7y@:|Um-@ϧB3 rC.sNq__\^pΞ:Ox rs >Wџ"1{Ā K?a 8W#n! Rk!;?;/BӿӶE V0e3zÆD Nf^y}2ZݾA K'!r(d\ TXO9Y9֡%w>=~Exd½$OM'cGk˹7(p=3*i2~a`mi9[2qlgf'a9`(5lX.~{xoq(Ã$%V1`;DE4qDp?mu \<&URU*`gDمԻhe hĀ hxiŁsu=&M}HOObJÄ?_ U򴆚Gnvt)]ކ.菑iH1~7}oVPI!/Qd>i0v$wa8ޯ,'p=p& 0A/Y?RKoy`/;0.dYot+fT, kϑV!6N^g{`ΠK#TJB]0%m&ٰy:v}RTyˍ; Cm!6;+Ne<0A|n '۰sMw])-a˰dQkȚGG7Ǜ̔r{SբyT|6|!syZ<}~rp 8 .ݮ57{wu,^]YT2!IU[Y M%z5bj2m4t=@T˯8yWǽ[*=.['l*)9ҧN7e44Y٠4O] 42ڇy988N%CZ &*S|u))k:5m4Gwj} FGVzo>Щ5ZzϠOsByeՎ_$TICMn 23@ci#@w|RLt>hN-?Lɲ{Q = ۱H% h1J͇6~Aec 9v e}T^_amVxؙv}lօwEbPmܚll8!=.ЗQc?~ϗU$KYXL^1ADRtl *w7oC;̴7{zzK (tEץ䥧>_" }taWY]cMo:?]e*oYdZ7ZZt' hS( JY\P;ò:̋֟K 7/(Z3MVn.JG)e?$Žyٵz8Gɠ)z1UhIW ;<TY=S*)ämy*2 >"/Uu  d'.pn-ULb;l3Ck/y)9f vSUͱZ_]n +jƿwHW ;<0ULGgFL*+gq'0!z3~hfФ]b4Ddx=5lPV+gyC&h-ԋ8kzP Ұ^K)Jt DzGM<%w[A-)qI'12wcȑDѫ98(51̈́i;4\8燥Fka]fdxr0:5<:ugԻ=Eܯhf>N j0'k.UJn^!;WaW'Z|_ 4]l=f-t KWy4QA;-}|pk}C)N1r#_s1jW :EŞFP jOs]AЪ-4~'Y=Z`3`T'Xxnn\^3itKg4tZC;݄l2v3Γ!y|΀QbcLOl5\Ng}] X*Sݡx'RYNkKz3~BY۳({IcG*\ 7@nm:0%Z{}ַrFU߆(fie/#gm[V3 9k.%S0^wА)2uǁ2P[cc"Bgᓅ_-jf秳 2x$d6€q8o'ڿpv߼'pz:=<]p ->\q dѶ$ bB9 @~}"g9y=u_}Q4b]ly$-7/xzEN:LU;$\yK>F*|,oᲐdjBY<0{m{*h؀ҨFʮoP{B\- mC!3vsmF-4AH0R t(>>vu'ܞV_t]B% >O4 {ʷ}tq^lBR5t|}:U70O>S(4iuIOO{[o̞uΞ4g3xrv4  $kMP"@pZ  ikxxepZ>5쾔)?{hoG`홙iܝjbL.!Ӊl03)s|RI%t9|CǘhS@,dJJIO)Y2/%.[VfkU-WN{|kWxoW)2-%А(ރujQ639IKz3~p@a19k7]sF-!D"b ?A.~#Gvgr-ƪ $5a3?=`x:۾a[yuӇha|A o%f Y?}1||V:SU[w)^|xS{o缗_3uIO`XV)g{;u]xi^ء|S 67y".\ha nugA^0h%4Ia^n#>s|[ @|b5s-^~|96@[pm5_]bŌT *UTxƙײNjSnϩ S')zOα:[咧B9%tIo#@IDAT~?͠v[&}/mv3?\.rY|v//aYFPCz<\_LvKCuCy=)}~BLf ~ ͍xfS.u+ .n7q`#YYV=lX|*\q]榆{mYy @J-* Z";9S-CP-3|K*0oOHY&Ѳ`ӭ,t\]Y򰑷-|]Ԫ?@ -uIE- #QfE>@ J7 Mi898K+S`)>lj 7rnI =RQr6g'3J| E&F^t>%&ms/IʅTX4eJq]gR$S2RdN e$mtI2 Afgld ^" *G d$Xi!#0xcoc)t)@:9R.\G^FN=iN'8@lH(Wwʩ!,b k%RiQ'[|r? +TRadCnZ|ITKɅhrS y@:#uܓ2)]f2I>Y\>T\s?>3=Wwh@R2 z=MHVKɊ;I}dIB"S!P(#_7( @!_e"W!P(G@1ToP( $C@$0EB@!PD2~cޠP(I2~Ia\B@!ePA!P( e, B@!=EzB@!P$%Y)r B zCB@!H2K S* @(= B@!d(dU( P/z  B P/2LP(#_7( @!_e"W!P(G@1ToP( $C@$0EB@!PD2~cޠP(I2~Ia\B@!ePA!P( ssro&{< `3IQg8wC+BnQ B@!'#2|Ol 5^` u ְ[ǽO*ZMFb47~}Ǐq3_F<-]=+ Fo'7Z3} BwnIcu'{m/Q'{Z{냏c|J B@!knĚ8|oyg?dXN"{ ;%.a HP$ég=EzwSL9 FdAA@ryffs}꫚U.Nt~94q8m΃<;$6qSޒ !:[Xs\ԉ6r7)oxזRq8K6c;,w#Ayɏ2|,8A@ !О<#b&O?c']s ކYEQ'"0|Bf!Оȏ<ؗK2ò$#yڞYW"  \Kd㞈N?v]%[[[j|+e9ib)݆~g~! йvhNQQª-IesF8﨔WM  3/XEur wV3C6EM k pX>Wb3c_PAmI,!,ڪh-T  P ({jz0ՂqX6ȨAC퉈,_ 4@Ϝs.QByUŦ½̢YVA ;U.ϛ1D5&Bs[XFT^r˼@'@ gc5?u=tn9iydÔA@&L~ɣ"CVSp'+q ܩBL3/G5 HpF ɏ͟8Bs;  Ъ"!4bg9Se  hW-;g5wEgo|K_N8t%4੷s2oA@"L~eC(chGorEFډWо^w[o:ERZ@ '  J3q=ÆuttpBENCe{wоOޢY7Xҽ[|MˈX,Ô[ehJlA@ W-y8l]o$={du`XԈHǸ/$a?A@k!5tc*NSޯ?P?I.^a"^yi&a/";l?A@ _# cW~0zˏRևށ5?`H|BA:5.9 38gbg_hs}oӖJ/èp*#|BA5bmΕ_p*,=yϕڗ;JoµˍɩƹtrA@2(޶"AuESk5~fR3l'' A@ȯu<+=H{;e}/eWIw|2jߪˍ uaFEՔ]AgRɮ-çLY  65s ć8A@"}))S|oiW:u->--'@!WKr15VgEh]^^u2&#@H"WBJ:2ucXC+kD<A@Z@{ ? VFۻ%5.2ܼi22ej  p'?|+t}A~oՔ=µQuƷy~ƔeYsA@N~<\É.ϱ}s0s'vق~K-ء^I-JVx/g`N6nc?ʟ[X3r,A@y  ]}鏫7mqgvLU@6.w.T"47n[x[5]Oyiիߖ#'@wcy?VXbOsS+--.ݯ}wА=xTL\X=`]Wc<`I(5RˋY|<wYY‚׭߸z<`Rf .`"d9Y^/CA D=9DÄgjs&1":6YV\sX3.wgu88h8NN-{}}{weLϓ;&:r+>8A@@{ ?$֦LB'Ealjvh{>jʁaA$붵,L3 9OSc͏5>&A8A@@{!?Ɛɇ5snIQćp;sKIѥ\m$P҅]O[˦ DŽg>X8A@9<6 kXmn<'9PM>3o%\vcS͎wPkzC?g#]YSå8A@`"^ɏ15@ָLOɐZtc9ޡ.2?ڦP]!wNq)2:铌1]#Sť8A@`#О֜c˜c24 R3kh9JӖ:W9AASI,[6:L1 @ph19 pWVM$C5Y3kau사q>ct u*/ٰ|tG6^7  ЕXḷmAw-&?EWtI$#>Z_0ba}69#X ,H-ضlLҔu5Yvm v%:kVBk.y ]kȅYLEQV—&H ܭc&6)|&gA@C%((cϦ>;X5<-j[bm|QY9=GWM{& !%)QZUpt]5<4m}o:uB+' i{K_utwc\ .18b3dԕAs'LI?d,wX#Ar3A 둺u[va}BF9.kb@hKK{͵dzk+&z9q-D@;ֶKuE+,BGVONQh%^'қCA@)Ƨ͍RCW{ii7򲠻h~ŽȽu0x@H5襷x;09ƦRX^a8Hwg\KR,09ZǸOKFϢ~ ![qQ@Gw5,CkXor$EA@cUwϵ%?)1$wSfK^cSiuoˍf-\A@h+x;)*8qNM_\*ְ}M5J]C1Ik~"$]K ?P"]o zg[fA@h^WA[`Ky (rn;ntQPLO)Y0؃Lk@\YsI}r'G cU8a7tpA h.`Xrbnno9 gtYýuEu?@A3ol<5AleElw##xRq _o [k_ChkHy<x{4wr\T4uaQŗxHP>HRAH"Amtn^= Qqu(kEj[4tX2qx;‹ _(Re^ Di CW]Y2pfBnvXIb‹ VD@ϊ҆2=2s:J=K(1*И)T_y&@[# 5`W^صpRa0sfXذف=w~/lbq 9B~m^%ot;XyRCt yA1nVk4 F>BZ!@9XyIRWI^:FW $"b &\oIUMcF E@/xJj@`嘤nM|cTcNh;0w wvmQܔd% owZِf_w 2z_/;Z$`)F4H޷4^&*C>xg7rtrXm,B~aQ[=m}o{*nO_T_hx@}Q>8Rq_M-)-<P%_Ifc6e61 ut=6RsaKh<=ғA%K\A@B~֨֌XVɓ+JThx=af2hCJoB@| {E?IY$՛mg^T4~}[|rSA}# ׾?,Jf젘Ҳd &c[6'Q$ALpNXЬ"MGlmSW36Rl_Q Zv@ewV$ծk ˗KەDǓJJZDhHQ;tAD0 RƷx~P-~4ͽ5}ށ'D%j# U, $lOxOYy|t=C61o T$| ϠD Lu.*;`yCUluվqmbSr.k,[*.%Tݓ&X%YLAN@A1:C`@`$JzJkw`R Ik k[]m'ͫtv۔ΪWO򪔢s A)Mhz頡b-%t~G+lZ6mɽ rmW#^ω'x/ܹÍ&,w5k{n$w#dw#f&$T|U' ROd[y2.H?-ݦ.xh6nx64M~>cNK56 [(븨 *çrŹ2B|.bLylژ$   y NIH5>.8:{hp'ECc.d~_P5k\u/a^cM +i0q}m`ߔ=$,egupKVW8`˽ow.嗤 0Yh-Z i'|y+ѵ'=4γW@ k!w[?k:EB; $8Dp!@gj?7GڵH3? mV![4|gQ<]| #'pet> hߪM(kjB RWf,RZw-D?]qAҙ_BPy.u :>׽/xV3C:٪|.n,n\le婸5]%AU3^?{O5 ֏ !I$ҪK +r54U4?vA1ɃܺHZ9^ TTzJOym^ҔOᇀh~WRVB࡛6dGUU02yc\ٙ3w܌ͳ'>1lz3G |4e 8x (h̚1&Mȩ2\0#~ybw9A#\ݻ E^.Y =ȴbj'% #X:wGECE+DwŀJ VM.[J-\TO O_AI֧8TE/?PN Fw!X ZPSUN kk] :kVV]8W8/;vܼiGC i`ڧE_7Θ$ iA X~I\3\6FR'`˴d?aJ) 6HQyӧ>nूg B|II!01`=ckJ5TS~Mg^ E"`76AD;)W`A@BM7 *wXZ]roc2 d͏[PWxtp@E`آ` hmL 󾾳ѯC|G^",@@ȏ: KPlh }VT<;!7twrgX7A[/g[ѧ2gzA賌uِ-5 c$h1u|ʛ+$@- hzZtn 73sK"b[x;d E(A@jA`Xs@kXɵՋ;*w5xZדwn֮hN"WWc vgwݳ# ZL~W'oY"1A@!0rAև=0k) \p_Eh1a]_,ZɳhE,A@|l|7~k}\p_=A䑅[X6nJXxStjܓlRoɠ9'PȦ?m4]ʶ1nE}})hG1NRwϛ>֖Ck>ڻ$イB-QݴT$t!7~jqL'y۸K{=-gJښS.{>6~y&2oxe1=v\D_F}̼S7G4]MKSPLk9iMhOz%})Mǧ=Iw~6VU0Ob6'ƱBXA񞪈߈_ X!K~ Qz`حlM7?U-sL|M.N'N~ DI~gǿNJd 4XR涁7av{fon+ Csazi6]?eI}ZI<#Z_z%̞>W'4wyb.L&L=ޘA|r fѸg6%G"4.-f 5N >Bo0/j{ju⽰3d&RohFO8ʡtA#KD 5 [hxmgw?% K vE}铿0x.w~%}xwq"KzW|P?31O_06~uM>h![Nk2($WsOg\kXE#xwǦ.|v{eݘF{lyVu0N=qZen@W!NOl{>KSDj+S?gz! ??>IUjznG}UsD./uWi10~ajNCL(x;U. e&qxgH*)a.]+f9M}@YWTa`4 iQO}'4PTFc>t6ad*ۙC\=(L;?~ꌿB Pgc2,Egv #]? E="iz24>귡iڜ3> ~c IG@Yhӫpp մn޳2/ 7ܖD"cZ]Ws8NhLƶ@9HsWNW4MqF<4{CՊMˁ+BG8r>9mwj…Jj1/?mFuls5a}(wGӾ8kbb(3]Wl߅=c|/ߚ6(4 ϪgqHOopii'_y]Ui 1W]s39ҹ uv6o L|Xơ'-#+U9AV~yL2=|-<xٝ |Puocڧ)ovC]U8"Vm_0?h{Ltr3> i_jn} ˮg"S6;P~x=n7i$4v6ޘ!?HE+3[/_Qg.Ge]=P£uKiS㦦o#M{yeoMB~Wݹ?mER~9AHirKٿL+@Ǣtl;[;9Yo:sW.0uOx8"hEFtPɃX-dW%_&|Ok͚ުҔ5A엡.`;R-imowO„&꼭[4=o?LQ?n0oiZሹiS3T! Gx(Sfy_>ǧN6C;Ni[Aa8,yZF[H058^ I lU3@}@;kVB%vjP|Ct3Dg] Nu2Тܜ:ǵoKgcvg+fҫ1q$Lh"ТNE^Ið6k0cMۃsNA|,_ܐ)ۡ:/ vǛq18 0)+neJa{'9J1lvWO ^~_QU% Q6,wP~ι-?F RZ'2 C7x\ Q)EH+;~ҒzA < "O?AMomEu J QEֺ5ҿvP: ƒſaN1`KvC:ОQ=k˿z:omilP9NvZ [<M;;0xQVRlLP͛2 U&xcf;Љ 6B:VTe8e= O; a=α£lO}s^T2߄dN׫m ?~B~&0axu/Uyvjզ  _eB]cw*Npbף]S QN <^ )`/(/NsJ;khU :g[=H}j?u+JӁɘo et#t(1{ ]Y6/AÈŠ~ ^9PW{׉ oh l0E=3W6Fw0tVS|(u~AHT#puЄ|ӫ~Y=fu82Cc$y-C `k@Ҍ;|9'::~_6^3ݦ,&׾Ro =@dB:5a|š|5؍.lr'}:Mq+pAts)g bJ1֭WĜ}Ne1+Tm*ßudX>֝(5>KkQiQ>7w9{ #Qrӟ"-{J>E;4=Dz^BץN|?|cn(6 ݭexjyn`6 ꒡)T]#gXM9,oYe:7))D󫥺`a ވa]}ϫ ]tУy>s<;qbI-^X:i&zXcMF$xLWm,:C]i7ՕA|~]lE RFנ ?D<.r/lp -q`9۰7,m@2ַ 3H}-R6ߊ4?tPeh$s6KNU;QkDƦVB3~ mFm׼&qJγll{ۜo1v0sŨc/\,aЏWY1dM;tbWز01kXr M?6;ю6sKcU۹CO"h-shs F9t6wX|RLKcfg[W)&;Pdam ,TᣞX3j}/ToAxvKڬ^*x)Z7ً!+&'jl{>*oS?[N|qT~jgts: /-a{wӧm5}{θa̰h&q}7B~*=AK鑵</A@S%#tcl!?F,ʪu._h7?~}x+ɥ ToӰTnneu,qɮgfM7jmB,[ͼF .WY6v=$:;~_ȥ) X6m1M@ _*, ߶$%Bަ':ԉ,uy  1ˌ!?0< aJAߖf"-H@@;ֆ|-w}7FG" ׺xKn `Q~-~(6gx[evﲨ"V >^];,pF@/kW&FKN1 -1gJA  dFh~&azu~h0g_E{0F˿ݯunщ%| Uą1H~Fwi#sR{`"6֢imw}f-G@^G@:Z3nOxB= wDDeֿJa6Imt?Hˀj^ψ6g)ǧEz;=g2N%6ʥuR\G-ubTUG'LK,-)/.^Ab{%@IDAT17)4`Lz\Q.9bHR֬֔ʿc1n9[¼yimM Nx3cL2>]\l3<B|x_:O7}>tdnJ+Wg~qii7-[%i]Oq{nߟƺq$j8Wqq^7RC&omޅ>gș_u\ރE{#CGH_q/5U":vp>;2ϖvIډ~#X9h|ͫsSkN:' HN^(z/ef!sq? 3.L> C9q^qdcn2H!ɏeG\w]9+Əqx#R_jFWJ%6q?UG=4ݧaٕ-F~qw7IX`5$$[FJϽp$ sV>WJ'ln'ܯ@8C[ya XX4>ݦ8*T\;B{c_;qd<;w> 'T}[:XHS-#6?+(^$ /׺,p*!F~,T'wĵ162Bw8#m_JW{Fb9Y^׽@d'ܯےs FQlOrn?X3gyس3[YM6SƐ&߇SY,CJÝ1*J ίquبPME!I|g[wƄd9K7o3y Quh(cVvqDLU)FRpgc%dlq;)/`.(.&֝:XۙZmKR:+oqs!OEW 9\eqFꗵmCl4?ς%W҄?nil[f+<41w Wzо"gćY7rn_XcM -N~ C_>n0re୸ҜwzW|xrUb/r7ү=vhY#^M۶N/ei6<r6E1;2i{HpAj q.ӭqܰA[#3^z }o ݵ]}9!JJ)Տ{|]P|. FߣNt8VmO\Ey%ш^;ZiE[;! m<-Y_n0ʩ(/{op.~ѴzWH;3[ghڇK@BIt'P1I;ht _Xlt\6A KNN_/-YcN1 ʠ9Ƴ|txׂ)5]?dܯ;EGVXOVmnH?^E{ `r JFj_0kZ< s`#o =}Nwe fS8v<ܩVoIGIkbvwC鷝O`pQ 9WHg~ uT ݕuu7ڸ#Hx=t#˟/]/5 <3/t9ƋN1|fk?:[D.^Y#>%6P-? TইN[;!+i~ :# Œ|3/uq!#vs^hu9/v݀o BEƑ_XJlg=8:$h:QtϦ/9ϥW[ldÝl=s蔡؈M;anb!) 1ԙX+LhϜ5ϲv 䟁4?n8Z?59N֟Ja` X]1vdeWMvn9[um3k:l4 o>fݕ?~L []e/׫/l\0L64j HY1|Z=tj?~'oS5Fj_- Vi+d5Y@E8(Ye$kE0Ep!RyrZ*)W.qxV mx 3k6 2Ys~f<9ؐGesͫ6l7xw#y:~>ájFG}Ց܆XxNc =}E^8xÿlyEF@ϝΟ-FL~c:>iM{=tP1<5ʷnT=|bc`g:d-;-ê3G5=ygz kYo)Y9G30=O0D:ŗ)/{aOӚ=!\mmo俿e0ت-vas VR;+LK; J@ΌTg4g>3@|6w#1TתH ,VT|C=b-1Zi\p ŵ/c}/1,ٺ%@1):TiYxgCEhssy ~TvyE1ﺡ`NlzmUum#sN|F|s#>ӏ5L2~4jks/[-I?To^K{ߞ7e8OH˅ I;A@++Lb~3(қæ|R! 8,%%A@hcv)*[)MN+tsm\-^Z%E"t|RoD.[$!vSRPA |(ݹ 'Cﻗ A:B~{A@)ŅER SB "l# K @0<+{i>q@}Շ<K#'c%W>{B@ȯ.d_,@ފiy>/?w/@}Շ<K"Pk mۡy#ÒPD@Ϛ"R @xrҦwsvJ̒|u%޵# W;.+D@++MkR#Iϑ=kGdC`_j޺4j>Ȟ+A5( &mןe\>w/@Sk ZV}K>~˗7[vv:R߽\ME@ȯIxA@hU|L/τϢ廗 A955#@O_Ѷ!mL!C{xIX" *B޹e \HӌDgXv*/@&6zV>MMK@r\S-{i>J֛M{l1qoC@ݓvi:{غ(EC\FTDp)>:i`n3# $FK0=h͖ݔWDT^>TmVk-OGܨڈ9X˗ҪvK~׫QYXGK~]GE $j 1 /n%ڸÐX7W_[=5FKߙ>i3kIe`Y2:mѩ~&Hr=Gf ugDz80˨ԐW0pX鿟d uZmCYű, gRݳəzafDD ɏ|آJJJ_7Ra:,lSzsb n1"Y^26uZʃqKpnaJ7 pmUǁ5w}K%&m:@rž*|+n/cò5[Zc,[FCKx<Tk o%0g|X,v,7xp_~a-TC^,[jzB % eu~*)RL@tX"h k9em&V{.i,7!{/3XձlL[Kv@~^!y;㳪cXu2 ^ }*HkcbrWiXX6^d/mߪ5^r5Vy%dS=Mׁ9E,?C\0B=0PoۿGzJyS5dWE@NJ++üՖ$*˘OX~sT\`ü"lWgo^"+oې܅\g2Jo:n-9Æ#lvk ~M1XF7gXnsؓ"irlP*ٶiR5_n:.ACl<aK~ 9GC-Þ- CK,-owl1Cx,Ym)B/>FK߿Ӷ$G lɏ +7aǤ9&~!T[ھŚcšc_'Jr|B!/mmFb?aO~Y6 SuGkf)y AkYDA!–JWGVЩ&ѤkwP.#}vIHW. t+“_ =#;}WNmE ܼC5^K1pl:P3Bמ>y~Wэ?GXB]\SҔIuPkr yk~_}~|?:_ѐӠ'W>^W ۜ@9yKE lɏ4W 劣Q匧+7 yA:g~&8JMoѨji!Cnƪ1k0&M͌5S>6QN? MIlwG\R#Nc+`;}k(9*_Ԥ!V)Ȧ]_A>?^^^QIbkgHF:x!ys(Ծ[MVI"nE>N܃Ϳ#gT7*ٵ:w9}|UAhawFRB3r#Kn%YCLND{g b3\0xZtN@rևpƋ OUk/,3wׇlP ЪQ=^ֶ=?7%cewYN㩳Sq.޲z7 `%j$]ʒۛͲ&IqO,J q|ra}cP6ꭥ.HZ؞xcuLLlxj޷ćxa;2fPV~Q<ɤVwGFaQz!*1qF;xx鈯 І~\t77ƚN>sCCߟ^uot{k?oTטtySZvw#|&8xK Rz6pܣc`#ېLus$~|JPe!NqQ]D~ &N,{SO FpUb⥱`&9;Vh@r!XX{j؁Q]IouM t8=qWL꿮B KB93s]C#a]=g&q:;{/\VA@ȯb,Up:bq{H+%߭p0C$`5 yeo3ΡO;{?Rz6(n eSEC]\C%6Py.#n(ݳt$ Ȱgd ]&'Y?{XpЙ#nS(ʕHY9k<k#N{?9'' ۂ*kN:Yg^wϠ._ ]C ?5<Z!:0/-ϣe`>*&LjEVv|JsUk#ft‘wa({VӔ/xǟ7+f~Σ~̡dx5.XA?X->Aյ'mk! Þ/pe  U(WGTt~3$O,zt:y;7dQkhjS|ŵ>ӟyssV0')?/szBJyS"qGsvIo[ԢY&a) `<#8RV&%u|nیM.x⒠ –j0x{0MoWmDt㶪US[yH3uපmVr>(m߬59=l0s@.=~8l]nǔu!r~ݳnōXS~2Jo-w>aO~l.J^= K\uͫEQt'e " \\ g l|_bwQ={Il,(q@Xk=>c"0`Cn˲]c !nlJmgr%T6kQ*Jָct(mR-~aLŦOЩxee n(Cn6V0bY-*c1"Y +=/w:mJh}L%?'~cnS2/6uSk5IJgk<.{.] ߿fOqTڋMX&giVp'l{U's-FsڲYlaxC^&k,K[ym>)g+=c^nT4(KVq,>iV#lɏ|my0ED8uv'E"(5Nnw:xR0R$2,7<@^FM1pRs]o=픃<@eɆLG`hOiAY@_Ű'+"\ (K\$%GTT<¬M5@~ C^ N| #r"Þ5m=? JI+^mKeTde`Y7IK!ۛ0H/::J˨{ܤk%U2Y(Br%s<6nǾ *uC1ц,/zܑk$P kZW}$>*u%h~&%Ђ~;ozJ-J@,^ت[xoi &@Xc?l8}QA|^f@cC9eii_NRL6{Ubw y]RRN;(Kqq8!'r|Z@]m7@ϧ>/#{-꿮rv3IJHw)Ey u 6c29Hd% {hnN0,jgRJv{ [rhh/~ @8;5Ҧ^|b#nё!>޸!O/Wd,ڀ[(*R)P'Z$^?]`ݻK'RU"է>%>DOOTDHlAQ)HRHHrKo`s;|;70Q5&>u(l0ڊv>3]/{GB./-%HHiZ qIaa> SSXU]a5CZ05@.\;mXdHy0t+ DFfv&@]׿7fޯmأ# 5$tQz.]"҆!N~*F~D=ohALð>%'EFEE;ɟcg.uu]uUn.ĩ'3>B,= ӚH]0%%1Z:/h?_d`k(_4#z"ٙ@mQFuUF4GDt JJ F}N}az!}jSa/(OѺ3jHѠ˲b8jkTRq4(gT1w Ay?]A|N<2KwY<+(_WyF9=AA#@ rRUn*shb4HG7zFadby~^0Cʓ]!2 Rv 066M5e\B+1%D<{h( #??&#`Ix X?j ֲB\"_:3 Pyp6DF`,D5s!F`<`3#4X5jB2#x" gFhk̅dFD'|0# ` 0# ?O4`FA ¯AT3`F~h=#0@@,W{wN' D1ѡ6t~_(,.ZtN֦r|؛}Tlu@=qR+w,p_.Ty߯jn022l7uX~tdhfxѣ#bD\lYfB1_ wѸQqisȃӨ9~M>uTݻNyڞ1=k= ';<@+%M\*Y8&Ԣ-SOHߩ*;qtl|яv3`o>|:Fy,U#.#ztQb~b.U[]}7Ϯظsxo/Xn~=,yx,rk(KTʁk0i~o}buz7Kp5ilE&G,oRu~꠨{*Cm?/R$<>D8keV\@㓰ɲW<|rޝ({i,_/Zͩ^(ʉQ[xѦY,lBX8#v?"a#9yfa.YIsʧ#HC$ O>Aߢ> IσGO[!:k%"#I^ltXͯ&g}Z0w$tթv :Oo\F'٬szj/laa*4a~/ ,T",$5ka6kx1|йV.\nc%$^O!'UgqIXj8qP:QuVV4 ĿXjzO= x2jx,zs`OՅ*UU^JWň-9^Ukŝj3m&{QZV$_7@_dc"9Pa!"ݺe_mԴD\vN/Mh) uO%4r3?;aʔ8,95M}[# +im^A2%N*&|Cڧp gFf7ƃ5{|nZŅ;Z!o3g!PUlR*K`:'ZRr YF̨$>C:56-;;oEňq.P~1T?d8Bo$D ^ ']Pϟ,7~x&Cs`/0[xGIu E$MW[G Ґ^fe@ ??(7JZ5 %\_ Tz+/;T!HBGQOǵ>7C6(na(u|r37O-Fs*mHSHblk9aB^܍ s8z4A_ռ A?T%HT@FZzh쿎cs4iZ SoG~XFU5HAQ ±eeQfٌAG!͆./Ff|nnmTliFzG(K~F<{bo~,^1:n?E량GOĬOaG|?}Cvg~yrORHf~.ߜrD/Mi WYmq7m鞞MwA[_.ս_y;ҵA>l $6۶{d{:,}_m8(38̱ s[:{ߋ՛.bpv#Sve bؘH1.sLPf4Wz[ChJz2^ZXT"pdKeQ~_$le e/YcOt鰋{'_`׍RH5PiQw~mGZ!,*v_,=~}ց6jtr( SI1qxMh-^U+ޝLDGGHaO42QohGw4M;qfe{@4SYb"YR|oewE-Ev-Z-pʖ\&`@>ea 7jhSf) RaDwh[}kڴ 0*[ _۾Ȉ0qy}|s̄Rf YZt۷v 7kH?tV qXeW|Bj4v2A v- %;.rlLqwNw-7_GZ7p-)gjQ#W%®])S{$No/kLQGBhޛ3i}\; ,4#U7払t&mi!~ĉ)&e7n\H^La}I¼JWYNм~Ho}jMf˦*z}QIUe9w&pI;ox9⍼hִmÍ#*=ܸGv.>:+78z,G:xׁM!TZ~b"5T*pŌ~} Kʬʱu*8@eP#_,vybHsʄ&m_N^ع#' W67#H߶6cX)Om݇$픮_%(ás x}PACpǰ0Rz5qumW7龄Crش}^y?㴂W<^Ԣ#]I$.4k}#Oҫks*<6]'\XZq#;Ru\KkMںYuɏNRa/Rt%{:P'դ9BltxJh7RgV4.J$z_U iY1EХ*Gz3[%R?v :; oVMD}{~_kߧ1L{+Z6mŸ0>b=lu$ȗ;:[w6-1%wc’]"̚ 3oT~5iwj/~XI߯h {}f}`qw&ܰ6 # }gVcP4aLngk`MT>dݾWŏG\in,@s9l$2 2\1%'J}&bꍴh;a|gۋs/s/P*z:*%6Xyо _lto/x` 6;|™m I6Oh}&ڛdPLeMJLw^soh_@f{"$u@0[ZM/ꁹ~]hoኍK] /<1o{fpI]kg7)U6+6 RmÃF^n#8g&?ۦUS>Mkդ 4]<ԧ8O2uO&/L!+ >27&MC@Aѻ=ءBmжy ԗuZ?X.cWd,eE #ge@ rՀ~5mȞ̶vJB-e&<MfώU{RԱǯ:u 42EmduFt#? ԟt8HvXu9꩚R TKnM4{ĉ.wPuO*SA/쎌ll=E)URl vv= gFv2BA#PmvSz{2 K­)f ?˻p_̷˧0;eDAUT@{b cׂ\ֿg2H_YO~<"mw6F0btf ļD^AAEve an<b1uO T3K?BmXF 2K{G`jmӍ8,MIh#}&nMI'Bz&A_TEfֿ˼0>6MJMw 6 |%Nƍl7-^ ?Ǐq3ia0sڂխذKD`,6TJoe@1" ̪C\if|ss27Eڀɝ1gT0+^7.\NVom;&c1Ɲvlh}74ui% vz Ccfr3;Kki) MmOQ男vXHӵv^1y/!Rh_IJ:H#ШkхvဵlmdI?&(mJB-cuz ک|CBDa.p]fwB9VF$_ ws1Mj!]Z+0AǾ-f_c+@J{ܫ)t#K̴25X x_o[UK^XX.=RqF ]Z4K2Gtl&""0n(KR<]{zDǙ&gf/7~0t2+u0p~6#Qȭ쿪"sǾCm;5:7׭۾Ou;>v-ʍ/LX13mm`3) ;t2cIDATM\LpҞ" p,X@ ljM E~IXe.w_;PuO ͻEFh'wt5_ޒ>\~( My=vD u?ꞀT/7>+ыzJLLJtoY7?`C|Kkh=lUQRq7ԥا"Ir>g MjJڡD[5Hty'FZ6S|,2k#G}I؏8$ Ss`A2=cwg~HE@E@P:0BrKtxqqە~PSJiTp4 ma/R$zn~MQ:s^ޣT ͨ"Ҷp`-d= k4w%(ے6H\};cE MpeR;T;sR?>1ujtމ>Y{]$ӛq% =ns,)mچOb9&Bu8?O# ͦ h^ut؄!Z8 pjnEENء>0;$:.2g'S2 ]_F,4P݆ 7s5GqwYE9f?Stj 쏋=8詭 w2QJ/ [FBCO?KL>cjA*Nt[0Jv9quXgQIT f$kĠf)5Yy5!W b00+!HC}:?L1s5PZ,.`TDΤjŖ$icUA ]I#>| D3cGGMu:sׯei.7޸KIN+OX -XF7-w}}cԮѓț A L% 6H҃d!!µ0Ei>TdZ>ϩF9 5@4(t0U't{g a~i @2  ] FdO+ru]vjOl|O$#I=2bPeɗ}pccQd]@ rQK@`>¯ aJ{N'lLHTQT/Rw?s-UU%wt rIe@s'A^Di;]5⁺Rܔ]?P{r:B A@#bВcm]3qO?H@,LDn  t^-fv &; 4#cz#ܲk/OAAIG&t$i{)}D/-t@^<`T!jkܮUvnZ˰"4 &G_VIÍf?6y.Y5ާr{?wI^1J[ai$4y AZ B `":}4Gbtٌ9@j ]0 xhOzPs(ưWv]F#jx BeC4hBF|;7r4iq/p%| g;DE _95ɨ 0ԕ_Vr uQvO߷sۻ~_Rwӿ.MHJR-3'L0c53Fqji!1ڸ{vZn$3#;tdb#?J01Q_pȸEU'V-?~YJKhKwc:c۠p;(uD<Ϡ!V[O%7:<`2YnwW[̕?~dLęR2ӓ'?ǩ[zq*jz ?C8)jm,`eV墫Ek㢥ΙAt=7#|g}Mi 0 NTvoٴ,,'u4HEB$\iz _m6 D%Nm 5H*A˗//kg_4ZγwyC;4AQɩ0ـ(6#W .='㢑 ٣>w:=2*yXdDj GeJy~H8Y\\Xxc{mٲmA84i~ @j"G(Lx֥yg#cbGFY-pw*|S@WQ%E' Nw [֮ކ '$}kRg MҞ@y*fQnjZ3 @I$aHW$.j!~G 5\H'1D%!F1WJ#;#AG =(z֚ f/,LEM[I_R?) &DH :PԸS#NOA& PL0#U1ӍLp Hs;Q> Q1[82S{Z_k5"{SGq >1?`pj<Ɯ8"W!$HR'1gūtFރz(w UG&%&F~م( G5U=& Z0Rs3~ GTmv5  xKш*{X5x~kiħwv$Ke&rDTGx XsDz#r˕lP 5φF`C{VZIuwDsp,Xt@TW a |ԟzO6i l4$21~UoЯq3.]}cjחϵAΫhLKW9^p!0!aqqA,Ms;0 Ce!S20AN +; z]pb#{!9A@cV+ZLJn~4)Q'?JUcR&1AW!"SZ`B/t= @&`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F *V-Jjm$ ! IBw.JJmTy%&)JRҡ?Tݫd.h}ئjUzi5!NʒإjMZ>rƒ6d>`ʩcqSWkLړ%mЄkYc?ǡlʨiZކ6Fst"iKB?)3Kķ{7˅RUI%&f.nTt,iS"Fih߂ЧօߚTձv/RGJv;}>9R;"w'5%$Ë)X{C{$-'^?vj}>+-e6CzseZ uTfZ._!F?~[eYj,y*sA_/1*9 !KJv:A{*ӹ‰o0:@ҍl:>a61%m4>>J4[/I!_HVQzUHPPӒbΜ9l[?Ǎt"I_H#y^R4Fq {* K/EXa_zNKkπX%7 :('FXNMIKyuVk` N5ҙ)blda"0c+/#P G^F?=iM^F@eU>}ݞ5VoՑ_\4 Q45im??wn޼r>ڞ:UѾXq<nn{ψdg<լa/4b0ySkbWCd13I0Nn3SK~TF-#OFL#4e`8 i?żMSG-Nq?pqiJ~e Q)3$fϨdi4:w.BLjOHΓ4ڥ0hx1KM? f! wf?Ft9i[f'2i:Ymr/7NB<2*9u@C%e?g&xxbh(-!e+@|t0n\ƌcsfe?& I+? >506hOiKEhh`@w,Ӫ )} O:_ߪZZYy,eB!(P9hH'Y/n8R-p?nHki)"IIh+bMZ&㾗$!/v?&Ig / iw,NzJ%Q&}~ /4ō0H bi;,{OCvM\eҞ)WIE p;83ӳ!3Zd}gީ9Dceletk*| NZ(J8*(TN>-MTOO|EF~xiopе#Zw;wޤn6YjRYCvt0O9Ю`TrM靤C>8JP.X~+BleOH"' hAo׮dᱭr, /pܭd=NB}Lkx>Wq_&?pB\K|M~4Ko%.U6 2ay:)@ >]HwQD9ė#aRObGfٯ{#j)T/%`ˇQ<nwHII=tT ȯ|QeUwkdS=}/ o=Swg46>RMD[, ݨWoE#_zm p K0ŎnElm_g˴ϗAs7<×߉`w m=FFf_kg_?u(gˌxhE%[gUx,1˹<#vFmmK(B]}ٙ[a t9Fg}V(CFk'_0/1{{k FT)J@>Fu 5]BwMx]w> ^b4bШ7?S@yI&X-u 1Ȍ|QԲDqء.[̥hAC;,70Jj1XdvY" m 4Es܃4`!*>r\ ,GgN\j3#ؽccEFhjq0|4Rxi\忇BԱHHN H&֠?R$[+yA*⋽- 3*e풪.TՏIOoӽTPiD$`κS%]n[IP Ud+tOPM$ozͧ!v5nZtN}[Ok,j8S[xC<$LV$E7 g=t0gqe%!#&2Oe3e7q%[tˉ4MlO>ĭ| .zjGhF,U6, 7ə{TM};¢PM~Ti<䧇 Y\f<Ȍ#3&BY'Q}t q܎79LB(FiM-$LHPcj$fi۴t.I)ҙ%Ek+#˟DQQ8zWY{h.M=#pOVW_ &_okhaje'kiuS>n2gJ]bp7bOIv&!|%} t~< hsqt[3ļjiWA9[&jۆS-1WTDTb ph*l3%sX|d[%b  N!O|:2#X2 ŭtD琴Yxn`EW$2RQ>(qH3 ߧ"ӯќZ;>EMTmS,ŅbԾCI*bpFHjyuxvii"msDcn ZE}h,OH@RH+k*(6[!Ic;$˺;>%KrWȒ:4%'v1ÄqN]eTr8w-+I ^Ш@ľ| !t!S}$>pou(!^MZښr%R(iha8tT+RI?ᤥ-;h8Eo`VF )ivy(\e~6YnSGzтeَ7O:}`(g!^pNMhҤ"/腫?ZpaY(pX\=?{ջ`=[F=l{֩&RfHc?_7%mz B ڍgC#p=4k?pd4JiAz:@&0uؼ{]:{~.08 F#ܜz/Ӓ>ILIjDY)΢FM=O1†$nnp[ p.pH*O[[SL(3#fh`_Ym#=n9W5-^]+}fIۑAй#4R/uUF^E}]lAe(^U#,D@S%utfړ֑N?6tbu\Nߎәіϲ"vEZa!O; Ml>9kqmƵmaa]I+?*fP@:gS07vu(:n@}xgi炍pg_{KGP =F˪|tZc:I7g˛4ʇY|/}1':V.C&beYsw"!F<m Vƍ7Jt M<9_hRNc'XXz P3,т dڮpG6JK6hY@,/TQOo'jZqO'yE􎺐GUO zH8߱ȡ]i87+7mgB/-E$/reU δO~PI 7SC]VYR#'AA|I]ϦI>s8Jv8 <࢑&;7c|׳S\^Ai^"EPҘjDHq{=]5iq>Oex g5OYwÀ;] IqmlId(OG (χ?\7|th xW,[G#hY(gfٓ6񠸆ArB4M}ּW42T`Ey%.,M+#]fĤí/ Tc~4ox )t龲t0 [|@rCOHn=n[ZG#hE3ޑ VM>B(NGþvfo/@cyOEq_8*'$|&[U:19֭eRCYH )E4WNikWAN]AUa_6ޣc~ ๟00ًX-KLHOom&ieOsEOFKvpGs:FZɓ owį&?M؊O?ʛFWO߆;(&M9UW+EG ZR HL/a;㹤鉙Vmm-W,C1dUm0w&zq8F H|::Mi7bZ.,J.R8P+9 X,G4<&e@r-TDqMma!&/0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#*?pq(IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowns1.svg0000644000175000017500000010320000000000000030001 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:11:40 +0000Canvas 1Layer 1Physical Network InfrastructureOpen vSwitch - Provider NetworksNetwork Traffic Flow - North/South ScenarioProvider network 1VLAN 101, 203.0.113.0/24Compute NodeSwitchRouter(16)InstanceLinux Bridgeqbr(1)(3)(2)VLAN 101(12)(13)(15)(11)(14)Provider networkAggregate OVS Provider Bridgebr-providerOVS Integration Bridgebr-int(4)(10)(5)(6)(7)(8)(9) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-overview.graffle0000644000175000017500000001146700000000000031102 0ustar00coreycorey00000000000000][Sȶ~~N^7_3%3a&LSF;BH2?%cɒllc*lZuկ"t.$ E/? /^݃?80H3ގbcss]p؁>67߼8ϲͫ+3N|a*Iz:ۀn7뾀 vNzW_[,k?ًW*Q5zy ;xtyK|U%ȯan|$k❝x9Rv !1Q*6o.0i6u ͛m2AgTmjFzS6?u}_w$9q&lݑJi TǞtziߣ8 ՚o&T羨v]~YqB2ZVĻ*UAu80٣YV~)7 eNj. zALn9[[" ^7kt1^˿T̢2>0x#˗dL'qRo2wxޕxFA룎6 4 ?N}wKFc^7_`qgpGY@GI39\_i5ʩ%ՊPB0 WBS-9q Wh1E35!Q.eHpF^%}{}G^tOXL//*\U 78v TBt}?:tȿN[&o4_YEʗ$;YuG/$S[!1gAN(V\~1ʼܴ쵶T!LʖeRcc-^}}^76~r޶|st*c'Qő&!lhJ\?'O>x`=cJU[WiF>lJ/-i$ r{ N܉=S+I?hퟓ3@4<9C'gQs/I 9 `w1o$^r| -c1'}/d߸@BBX4Bu^ Gyhm>Ϋ ULAy “:B3?::'ښyӓ sFU/m'AM-%iqLhHpuc{f=r^C%G Vݸͥf%hNy +"5{,$sbs@ ;^U`)w@TJ9X33@{Pym09. #Pm4?Rs -ᡅ($9BSdsHRN )J P@[WTC7xvJ2,R1%Ab#9Vϙ`tUdt OJ;j*4WT~EկŦSimz֦gկVuTaM\iFRAT%0| ʔ9C _"O ښ@E 7U?i[8ˬ E Z@=iR"zoEwbAS?˼S!N?7&YJи{a*N lzpMb>i7ir-Ih+%[w".Z)S IXFFם5~<VᥟoV\1AkK9sI'l͇5RdsB~7˔B+ҖvtP)AcO3!jwVerctIL6CdNNAWS0{mP!fؕZP]Vw`f# bJ3aF)dnx[6$n6>0X3/Le%j}#7yA8H>ݚ?*UلM W5${" :÷~.hn(v@IysW#\L#Nwo e1R0dv8m̡ƅ:Z\٥R--ɜ^yW߭:!pϯay_ ăte:'@U !4y 8qƩ 8ǖY^hΡ޻Ӿ׹Wcl g 7XZC;~x,Yϼy'kItn\ ZqҺᬰZxey -4.U31aTbH Ѓ2ޚhf4.J´55ܔ*6B+bZf8Kvi]`K6v"sD6Ҷ)dxI^ٓg+X} ə dm"'X5>pʆDn̴ ؆0Q.]l2w6bN*RM6v1RAE(-C^0HlTr2?Ϳ_FZgjs.sl(aМтnFgkb>B YR7N⟬8w)gp&d%XƂϥ5J~BcùY@I|ʛN3KZM)GRZ;R-L[ 2? ~N, MP@ %',,YPg% A|(NT!5LBjU BPgBSC-+`!+s.5p~2$k5Oj@ ˰a6`MZ`]fZHF(\ aeH+CZ.{o &ǀQc!i!<ĺ޺~U`8w j*ZsC6Tz !{~=2*˨V(kJ0 "\)1O,n{{VdyUvϲ7XL8R5Yd]z͢nHc2i ,Y/P^Z]KI%rm{ݷNcI$8IyR8KPm~7 {Ӕ c͛)cbSc })?E6SbB@yq` `J;ik9O5@2NsMPRDf(UuΝ<8-qp'uDB/,eQ]$'" U!Ms`,]`r-A%Fp&@Dg#X8H89^jOD95)§~| 7[CT7cR"|/Fkd>sd~ޓK|O)*F^u` ; ٣]oq'G6g_UYڑOO?tt?|G [RS%wp\:zŕ3]@`뙟PwG2TK޵w5I ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-overview.png0000644000175000017500000035542100000000000030261 0ustar00coreycorey00000000000000PNG  IHDRgsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgDjB .꫾J )z !w EQ"R)k vg/6Kr%w3O{S;;3 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0dL 0NN [s4v]άU4ё鼽野gGML]X9&&љm59d瓭8$!V7c kcÿ3ٙx㑞nBm{,}JF-5qeL g:[꺮H)pU]ܣ'Mjff82'-<;hcՒRľ6UouЅPuk!w8œmO4I(#yg x((rOԕ!Y݅ hj$kK<.҄tF~G/j]{| -6$ E`$/L{ǚGO5^Ejt\zgOL#k'{?ץO&ەa( $]ZHX@` x<+i @{t "H:ݟMLLB*Aȸ"In̙h̝Η7 }Pi8~B;݈W3 tuN‘~qJ*H܂}V|DUo^ٙ㗡HBL 忺Bש^UOQN̘_(.yh} ;'O6̱|bu191pT02lmFMWq=֬juyL XIjB:9l@I*4UKNsZ<c LΘVH#^s= /$'+ėK,Ny2-V -CzÑ!q݁I2',%?_w3Gf"s\Cr&@>I&Tf𾿑_D~4קwO%Waax+Be]Rfd@ `ޛH<#DHAgum=%? F2w+Q?\0"F suHsh }wۈ0o7@-""α{)|88a؀+Mp3]w*a;f3u {Y)Fܙ1{%3G00ռNnUE]w4A]cPRӞE3o7}eNp~}?T09).41; lg?U]0ՙog#|oy%beLJx ^7碠^]:xB?e=yMzNsI| hlR BQ k@^{e-7 K6WtjKz& 6z^b =''s 3|dDJI4| .DY^&NP@0{L/C䳡c||s :!|AL*VA Qp-u;~TG{lBYUna=W=my;6C#(jxro0gky`@q:M>(V8 /)֠3^NX/3gdwy#Q,}psܚDSsiMh{ ?ߝ/Nl+)A~#0 |/<7qiZ6l?6@*? \">\nvI;4HftQF]H1<s6=q܏qr gGp,M_QTЮ6⡬!@ugB]z m{ 'b?BxŅh Ϧ<@uOӑпo ʕ{fx#&Ӈ(ӟgٻ1y!:0/iq%B3H%@;vLA0AOzsDuUk@:qQkr\Ivkw%q@<l[P!1ÛǒB N;?A19z&s;%:߽y/=_?]:I{p#޽ ڗԍ+ρ%p>o7&cj>ʭ_*4jJ+Cin>9Y7q[C[7y2F-:1;퓷X jO=u'H8fB58A5{MH1n+z>?o( Ρ=O%! z;UkIZl5{)_ t5)ȼJ B&m. ʵ@M< M!J<~µ^Ъ. 79#Zw0-ۑeM5H !=NnBZ!z2ʄol}W3!`5iR;9%xڼGp)_הGF9:s??ty_n%F uݖn jKN@\߷ kFk!QDhsuTJ/;F*|mmQGఐuY 3(^-.wWҼ^:а󵻅!waW>4[V]w,.sT uHoY6aimm1fWch3xI ^<)]xti1h??4wlPSa3mw^h ty<ł&6l_EL :[Hp=L#*Y#$H]# |<-T< $hO7Ey>^.f(RSw92vGqSG-xi<3aZfkΑ~vgOw/j .XtؓCwfL:`eު}>1۹Ӝ֗](=I_k4vO=\h߷F\ux0Ke4b8k%4C`w7Y9vlO_)J`ooiƊ&|.OB&@tqsiGW\ҰHt:#K#2005kBi[yԘ=B,u_r-E򣏶?"y}+JlA @hG k%XzHLp~]$(*WBE,$P;U6T16Q .s+L 6hmE7I(H!=BAQWs[m?W(n4,~z3tƵzG3?!0IvK]a-d-G = 93FC #f4NuЄ 0t} ]qdR{G(6z#W;} U@즟0=;{; E厣I{5LQʹ&9CsN'XyIQE$Q/u2&_ ʼn2tUIq7#~n)zX-u#HFs. `6 1FjmnDiD{>:ަ+;0eEzP AH)NhLN3F7%4/vk7θg{qn[.jI8L,V 56|%|x HI;+HC8cG=&A`^I>R)RL$;s,RrٙtCQIhTסk¡d ۮ96R]KW]vɑ垭?翓_zz?4pw blDGf$,EZwm^+;m?L1w+Qϱ:fTe*oaГV,C?NVhxzT,^1M`Fqe=t !2vvEyorRV+dDرSC BrKItȪ7Aos)eߦٜg_!3r46ˑJ*yHo::oȼ+c̽f=c76# 7'cWRHҟ bJ#_eч L:4ִ^ZL%c"(|=#zfs.PY<ig&})FNmOd܉h/Pou(#SJʽ+dFggu4,( k'$[m5?OwDQGvԉwIm<uYO~贊6L5ش8<h1G`ImJ DvZ?"CZVuLGt1~њ?:xEwsƶs2 (8X#`U_[n YAkk k6-yC&Ey=D--/Fbc&[k+p4Ui(& vh{|aR i-E3}{u[ړ9aug Qw} 0;#mBs#DJc摏L 0&F$O˄" `L 0&PClRCp|`L 0& kBaL 0&`5$x mL 0&`L&xšP{%pKvZݍ0 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&C.c#\|#grƤӓOLr` le"1&4Wԟ2&;6Ai5@ ]]Ӿ本R\]gY;tK={~Gc{B,9dIW5mnCY) |pEIۋ8Q[,ἧ5ULAX HںP6":jew3/dgZE1`Bk`L xGC3MhN".. *4kEyk&b^%?}ˮݡ3p/Ow#41.pHE'. +]Hߤ690Z-mstMSբc_uWd X!+kEYlC±cL 46olO@ LFvV[Jсdo `rs4jME6CNQNV&|TFwR2&)t(|8o=vNv)<{!x_υ{ ^i),cz 0&Xu 0&<)@5jny,~.W{`:rnvOӏfɇٛOk#?IԒ>:w4GpͰ:1d,hڈ[8Nv>YwOIq&u;1wg+cH1/&p`L KBzW@RCxp+4_f3yjEF_a~yUaaf%4#B-8&XNs&&?r B3СC=߁C=فWRmo?.%׽`Љ`0ŌL 0&%6\`AP@ŰɗOݎ5/<LU9΃` \M?6a>3 L[A.J{aVm^*4m |.д&bPԢs'Ӻ34_ M A9#5#+L 4RlH< 0`?\;hiݣO)_W}SR\Ĩ=Xs;.V`ϧ:S@NCqK3YoM3PƯR|„C3..?j/nx9nyL 0&%p L 0 KpdZfw`81_˔f_khO՝ !y>MvSG<-ŷ)c/,> ߧ~܆ɜ *14yVhBe[Ɨ9IRRyb| 0&x >{.9` 31m.L1G "؅?%җM쳱#{i@%Nu M/^uBn LڊcqDGlR u>2&a`&0zҤ6pFy&-mG#+`̙9Ww0_pA;}P/L_~؏ 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0% ";3&h!H.D$ڬ.4.A˭kM͞9aip1DohnqΙ`!P}_p\w]B]tRhB; oEjeL <5 EH3Iۋ“jT甙`B>陃u:Rʯ|1m„}@<@Tw>7!Wו]WŹeL @]}!3\ctM<)@kB<?'3}JRΜ `L Z ﳅSӟB~ m/.s\L =xʠ^Z1І=eK}s G 0&А su]Bfn Lj1&P@)Yž%hTC\&@Wߧ"4 h3PP8@d0kQGV& SUKL 0&@ډ169Δ%`O&h4FnrE%v*g 0&ADOYaa"@9aJ#,r;)`L1uWk6-l ߤxSQ8L 0'ʾ8pI:΍@N}!5|p;>3`LeWkлNc~\N& 7Tt<'%`L b ׶Z>qlm<|?`E>Gv*g 0&BZǺ@PTPL2&2x齧?S8L 0 xTЪLSM'6oȯÒ4׭X~`l4<=s"D# Y6Μ,0&Pb[^%)U?)RveX/D@XT Rr]kI/`?a#KÑ8tJ-Bw[u>|Zq>"c٘D@H0'3\ii9Y>E;Qܵ#`L w[^pBw$Aqͦ\4}blܓծ@ۮ?^R?QJZ73: IuO"9#_?jXU!OW2TEk1n{K6v>1[8;%5焙h"-K)Ur‘ѧ ማdN5!xЖ\kګ$hFzHAJG1"cIMm7].'Ls}BVi&@"->׵J+în &]|@Q@c&xR-I,w1';uϮL n]af3;Sޟ %#YTB5$ѕ32DFgtu"ō8w| Nߚz7 mۢsw]h^(@'ܻC>&! 4/I%}Zf|d=:43P*xvu)4_w0s F9J $م%;: ~L D>&tY)h?v|/m`.zmͥ.1/8'3]_l4M0@: L5ÍtNmv[Iع]ӟ#2y4Y'$geu ] ;E;ޣΟ?.ׇ3&p~3&'5-lr#oK"f WYÖ?$/tcQ mvB(נ@z3Eٮ'>ZL Xl,1Nd =Z .(޵HhVj8E[*s=z\x)[#!F h &l⋻3i[ƽR/.]*Sek\;+W4E8ʋY(BYLnM~(k`(Wey* DJT40 异)> ac:/ #E)iHL fT7uҖsnl67|T&TU}Ͷ;֣{Oz^6q;W0}|&9EZfل}5EiGMmvHY51d;Wp{N8Mxٹdҫ VkDf{C/el%)Ut-Bm 3'8TBmd>LP.r]ԅ " }oW` x-YnkB UHmU׎`z菳|4"}/h\?KItI+uxؔxtW?`k9[̰tDB7q֏`b߹epvݡ30/Ow1.[I<+QK`5}Δd_8m^=ZGmrOpaƃF|w>:n{P?ʸ+\|; 0H&Pm9FJgBq4Z`oe~}Uйwg;i [=0Z߉gy8wӞK}NNU_^3& K~d+ ]e s[ 6 .c@2wv83\^LЏQM7ɞ~e$ {v"~|so ,:|kۦR1Fxt5Ӗ"̭U2*|{-·Ţ r!"֨mp>].@OF'û9(NNϭ~%N/[& qŎ 0#Pm9FôÄSÌPr}3(d-=β߆._B.>'l~ݞWLh9cUЬ8's`Y(G4J[΁*̩h]OȣG*16KO-F%6wɿCN+Y-bhciA|'^!hXIX|}5Opl<7~Cz}vFuEy) n@2VxSgμ/O浪9qNp8^"&T |t19LMJY4h7C(sMF?`+89ټPӐ7. uіVЦٍ~ļU!fE6mOGeRW=0E'AKh]'m9bT5:]R(ncm.S~@ˁ16|V±X6'.A\jj&9cc&`|6f.N4\iܺ{M4yyoy1ow6'#K1rc8Y!4O"`OFD.;&AieVJEy}&qèCl2̰/guЖg?\菄G}tgKJR2C/8Ge3Q0>/,U+6o;{oC_~{@4]0倉cG S\gL tX^K9zk%d'(i5R |MbFk鮴WBMqިG(ݿ C&,=HK@(lϫ2/EWA#]Y4ݙfLV*'L D-hh1paޟ ot:-6UuR&|ngLLКN Rl!~(w?rGlf|^ 9iipg'g0&L h0tPfL?D\bˮ0}' XNKxxhN( LR!J4-o﩮3Dwy v\ɏ)ޚ9yۏXx7-րז ooK}-W˺ +sO:k C%V$M%3iѢeaZcwĬ4 G}_lg&Y Yz/һ}] 7ސ祩#Krߊ[j !.EQ( ls桶;zf媫G72 z9t囚~aVkUяܪf݇[ʋVHIʄd<!DF i9~Em3&P"-qXMd&6^{nSv`e˱Z2߲8@нQlul.D#| VQy+q]5U'؞v+О=5{͛ҸhoG?]1R#bb4Ckaw鏩nmy?zPnsq~O@ &Kq;2P` xH9 Ek?C 64&PRO>@~xyN7@.Ѵ#A9f|$KO<՚:/9P)@CV6Vv[2r _m LIwCu}O-vꚺq0|6/X_R?y Nhp1&"-$ƣySNMʍ)M=P//0ǭn4u>ͻa7~-4]tw="\9בپ32ށ{'47oU/ m&L{ü>60hnn_nBh9 |{oenϪC=晇./}zB5>g5$p{tdmov֪vF!]Ns!@䮊h"2y9sm]E=v|.SdHu>*_?e?Ak ĥ>r++aj_F=.^* MPWԽmZ7ZֶF[v>ǚ[ш`gI}&M:޵i&s&E3>|VW1-ta n"?iO0FD hk#B\ Lp=- 00U&(ax8%`L 0&]@IDAT a&`L 0& JrL 0&BI@mm(丘?,{N 0&@@fR? ( bL * JT<&$`L 0&@C!xCy\&`L 0 xT<&$`L 0&@C!xCy\&`L 0 xT<&$`L 0&@C!xCy\&`L 0 xT<&$`L 0&@C!xCy\&`L 0 ܙ1G .͢x3 qPrM6i0zmh7:uQ=EB΍bMn(T N%R 'SO5!p\w]"ǪAJ7Z6qG`AmҼsO?=vt8~N ~NbT\^ܡvmN)NYک,j4M*ɑy;S@4w; OɧjTC陃{2O$c'X[DDi|tr/orݖ 3<ᚍhhAmت[aڈ1'wC g(L-)/c c'ǕuhBJYmj-0¹tWF^]&mt5+;=B8Q,eqqq1]ۋ/rijm?H!mc IG]Rb;b^B;T'&|yX\aN:z>?1U_ѐ ]O,w tjל'cVc"nG줛bNB8=H]uRԏ y@z}rES=%k=v{ Sp DCxrxcߗÍr ^ئh޲;0[نembQ2὆M_s;U3 f;{jj BF퐡}D }T\񴒣=xm%.xP4]{yC,p>;?eX8Rz=/njNCNQ{O>b;~#)6!:vۘw] ?ЬBڥ՗=8?ۚ?ʰa#,e qFF5۩6!ڧcfnGkC$ -w?x*։m`WK\p #Ԃ T?QPY!+}p;đ}jc$M)(ݎ'|5!9 36:^ܪ&+_FQ*đxjn>lb֘EHV YONEhCj$v(i!>DBKЕvMv'ޟDٸ]lRtiZExCb/ġ"\WDN)ڶjnT[*GF]\(5θPhy(3; EYi2sWGjIhwET$eTQN }446^^__&JܪD~a1mncTG?hn)oB1œ)\#1H*D r.{[dԀNi̺pl뢰іFԏh@8ޖECƣ%=%.ۧhA ~56*]$9ʏ]YQۆEDv٥|q'2IɽS#[4ݒڈ;/$ey!64ـ }%gr׹۶kb23ULzǞ<}'GIuwKS}S5.IH[2󡑈k0a~ Q?W47_r6ѵCkKVs[vd_.&ݒDĞѯѢiۣilYB>/~c{Err),cRI8=| A,/ZT&Ċ.5lor8ByM5'K.I8X^J2>lSfUl̅8hѼip{xaOQۯ<7@^G@9@:}wտy'yo_Sx±F 0oPcU뷊7Qssp;2|,mɞ>8c;ć"}]eU,[QtlJr} }$ "w{׫xgb\pUe2'PA荊Tcg eߺҀ @.;>‡mĬˍ }%evx9-~k6q'o`^ ms~Qv 4 bUFEhDrG`beO9H-S%[#F0Bޙ1ZB>v1KBY/C5F`9h\m޾W| 1v|tjq iQ#جGxbCc(m> kֵvfؓF_/Я <7UyXڶ"+îYB[>d1<k6pK9F}e2|c֏b=B7]tazK,_wi}yt%6}l]t R@_}!b_x4c"- s?R.;]DDQəu4C5_p !{C q8X ^l8b\̓PLéeڈaW#:c΅_f*XfFa.!BxBsm>Al0N.gdV-PCV7 ?[t"6a7GNJn׹bF֥Ñݯ2>(O_jޓG_D#לgU?jﺸ߬t._i2MN0[Xm\b?$[#[l]`&i U="8 nԯs>G'qѷb4UշB$!WĞě_.cN6NHǖTy>z!;[4an=b7HI)&.Ą_/^)v=(lnh;Cy &l 7/Ѭ9ibL%AQӪy8WiWPCMNR#pic# Ӣ5vCXX ~FS /;[I?#tEŪxߗA Z  mZB43W8IܻѠj,Inư!٬N y_Y?BG|ŽH mEdSba{$*(DKOe1iu;GwZ|1oŸgf6q5 FWm'/tp- z(9cna'm9Ƙ`9 *(fDI&(!x&%vq*GG6K+hPY쭖~$s' ܏q]~IR ԟӞP4hI&&,\wsՆ2_p҇/+=oEA[^C ݻkX.rB:Ax Mwj[.?5&q1;|yHorMC%!ק[jAa\ëc/ѓI/}&R{yY;/B=᩺:hBn[Lj,9Z=tGhVnsyYB,LvT01 1hdy.7E ‘:-KP4l:R8mٹ_*-iM=\կJ-f\ X)_6S$D/vx,VB:~.>x_/}:_0L~AB&vKνl< zA=ǕͽJ=Mۣܰnmtr;0rчwn% S%n| e+[K"2 Gi(9{{tg|:ڈ O||="Fآk sv^K :cՆm0#!&v ىFFWʞI-q^g̿mk6Lu‘e8R.N|}Xb_7~hg)o~-vIЫqߥÜ\Ya?$npY#aZf!̅O%ns)ZJSdBXH3MhiqC1yjh~ 14[b# -{}eBdh9&&P9CqEdAJZ}YI1.<8˟Č7&O6̱u*h~͑d.b:?6xe} )hշY aթYBgZNf|G},s }D=z4w\!NF$ ]9d@}5y 3 ˑ#z?Cx9ӚQٲ#Mqr47 IMZ%i)GBB$U^!+^~kxYbb'{Lf5,4FsHoLGRpta 0b '7`@1]2&V;ƴ͘,LZ{'W5!ˈ1&`؟W ^Tgzb'Gwa)mXdK>0>iOg$K6LX_B))пXs_Ga!`߹>_i} qgCKy6GPh&7M5 CN/5 gu`+ 6G%O=neQG7uz0#2,!/Ilv| ZCo }PIޟp~5:=Z]afL!e2-u6F] s% ۍ 4h1npM6i9!̧h+0Ƅ,xe@ fbfo<{0U2uyZFW]Bs&y V y9tP|ch?XblJjc4}a }CwdV4]#@C̦!kڂ;D3FSAiK |s`Z~tANUMbc#P\ҷp|Jz^FLL\hvs࠱)iP#oU?QStz˰NϰoWl%'oZ:Mqϼ[.\eiKgnN4U.b@ྥ~ !Nz!;3&@UnWletOKh`ο_C I@$f;E?Sn"0&"etn;lZ#!Mu70zS, zuwQ$wO2G?q1JZBC;_/WŘ'lq4|@vعM6mLf MI7wl-^>faW×?'07 {v#/kGBkWi+"FˑV!3霄^];$f1"7 ^bhUe.+]XmޯkW=wRs$#,7K6n՘fv9dntm@z7OIG}Cm۩8"7)jmu@$ f+=q/o I[t_eX۴ 9t.mZ95ZC#n]zŸCY~ݪ}8*W!P_(G]xVҿ2fr$'^]5Vs!zVOqTyVuEgо_1dL_lg4d>CTVw͏:{w XACj^v*칎K?mE2&#i&(5:۶rp:S2ۼ+|1a+'H$na^s^ɜYoMFzj)sd|8--5@̅|U ~Y%csO:sZJc"6C#N *[@iU|ͦ jD}ƻDU]S5mdS#Q>Ev*`H pSA:t=0]TuY~J뀏otҩM-۲ދ1F3ń:ؔ@0$@HH$`ZŀWp/Ȓ{"*teӜN*w4۽-3͛'*1ņ#`ӹ-XKr .4wy]:rARد~4_Pz SY"=#|̑L <DC3>W?Ogٕ:\t%g /jS\ROGBP4%aa-:{dWg|*.+ m@ 7^Q~~N.Q@sM}u%[ytnGqpN^=^9,i{ U2*u[0ȳ_YF\&|I'<.6J^۔` < +<_KFA,=>S\P4eHaa?wUdf-_5RE /;kvljy*l<_`8r!w PEx g9Ji~vwApW _h" Wۏ94~ F%#_Jto̙Db X18o%@rs49j͙9)\ DhA%MrSȒ୧2pD CP( F%4rrˏx/ݹQG4D\.^"a.=g㊥o^2CtKL&E &$S>vϜ8~|Ш17Vz}R^O9_ K=~* H@)&)r]c{Ųz59/p]Y4^c]#&^qԻf4_L=24D׬KQVvS`9vXp҂ERĵrvu3CbC#"Feg >$EgHp l sOڻ{ۮ=\O@! d.Zi')#y }dB8Q_֔s 0G悑[(:"F so,x¦dzGx[$~.>a!9ȺB\ËS.|yt#&MoIj9r 7҅D\Y%  pAugkvr駫nސ?䲟Xac{~3#%gc~x;)G?5"׮'(uw&jve+),"ڍ[Q 8vwa @q%7#0 <.Vf5ݹrQVv,$A17 (uC@#)N?:snowN'Jz|~;YJ-GQDŽjAètk'GNӜi^4W55k TPxP<x^7f"^07ef(|@OQxl F\'EIC{3iβTAo8D=)WA&[n=a on([͚ 2򜁮ߘ>EjA!P( Z!3s .AT%{&ң\F׍@:-닥yl59!q&;&pYc2@Q(+7x"s-Q8,7a}Zb\k#ɪB@!P(˥^OTXlD#DStU#(y$wV͏婪R( ,EE x%p9oS SOJa|ȩbkd[ Ybl W,1on(A6WOASJʡguGtG:% B@!PbXƒ,6!dlLfG~ߕI^A!}pe$'=,O^:V"R81Χҁsx|;QH@̓(^TOMN̳ZP( B(lq2ZjN:b9Jun^3O毣.~\KgVP>Ґ%%%uy7#cUٞ?( 9OC6V5J!P(D,߀WH'yJFw_=Bw w#z&I}o,Gk8Ư ò6NS>ߋ- zT= m2䡭JژRyr= >jn]?ea 2BuF{SxgxzjFD =?;H3iz'IkT /{[Cr91&텅?> y#}>}F)ݓ / ?Ilv'EȧBZU^ḅc"¨9wGJJ6-WIgwӓ-!!K=e"-‡uU'zĥ?Z4`9RO.}{<5CUP( @C"ԩφ:kY̿cDC 8fX?nw@ ='f22O8qۛ˖; Q]-'x>O]dzAAݱW]!N6Ui{EZ#9V}&Es<&F?Lё4z`tpwAP:?^zw+j~}A?`Wp~3CpiFF,]N-qcBKb7[;1?r,Lz)M33$\YrH7h0?oXCßkTpW⎜<}.]*8ZR( a0)=ӟ}ztJ3o6>Oyx1xQnX:ȸҤ .i$vl4o6)a"%PUx^~Zݝؠ Tix)=U=;<pUP(=F&; &e~^wli&տnaQ|״I|͇N&! %p;{h.ˮNBWR',K{3 jj")7/LmVݘ7lҤi\JԌK~agv(8.]Q=VlN{ͨ]Kq'_ҏӒ;YIK)/j)ӛ5CCsITFC4f~^8ZZb8SXBև{uJՀ˺&h6fee9\i?9K_խ~;;zo{DD;Q.-CԘi{x@fO٬\ꀪO||O-ߍK: ʇJ=}:QUhtmGf@7VқvkM<%mšdQc1Z%&ъ=d z('CaV ]2+N`dWlm5,E:.bۮiY:vTUC21Ҕ–85 >:nOz0g]eCBu}̡N>Exn!9Ey"գօ= BJvM^e@S XIkTMax{(+x0 ui΃t [*BNnpyN_#smwaRX68(n4'b~VOƱ=9V:sqӨ]l3L[SHx{Χ+6Ӱ>i$ҎĶ &HsۤthLPbµ)׉ȉ AlT?0$Iv8wƪ?C8V mGs ۽Z(׵+7G _ 'q<9t*^{H%ǸP3y3%TVXġM狩vt1A,L7l[̓E?CQEyEԶ] AWҸ`?o+:!3"LZkN/3 yA|\;PX(\9DkN℃_=yz+m[Cwޟ@}WC( : :o%(U8$0g|UMA z<# 1 Ց85O'ZF?w -* {'.bit!eY M,ekF2&"KP0TN&A >>A}b}PIG8]LJP~e1fVae75F"ร0q/aT)TK/~ŷiG&|Ie*ݒNx`A.n['$=+'f- H|Tx HW^ܟc,El,jE}Ex=IFoA=ټo=Xz0id_WpQqeˣlA[:nAyҞZDG]3{_BtZ77ś?o~Qht 򀗛!dX~U_1 sk$ۦA@~o%B?GyUA*pRŎjB HbAa!AvkXdzY۝T!?>^j) e? $[;߭ޜCXõeɆ];vl>*<1 X?-yYy{.wxr~w笢>}n/AW,8HPpd!Xr(&91 <_li#[r ֲoOHY[B3J8ѥNSer' cI 8k/:S 1QzuXϮ"٥+wUn+81ҭ},GN$ڀR:iAO1Qc{38N-ٍ$ v(vq5;{$;2yk٪0eY5+ 0$1T7R!Sjv)~{Դ!jXCP.KlFY։C$P"PFaS0[ރ/=88X %-5)+g.!\<!iyK (-X%fa~1pp_ Atgs6H/%An#; "d`BU{]{e2V}H]R^ڝYvTg8G$/qf1.'q#>% F s O GaDvsp_',hЕ O9A)da^[oU1!=,ph/W5¼A,~xXu>ɾT.\_vQf@ڳ8 nqzSvq쟝I/9絗ІNr3} ?)!FA<Ȳ1*$w*/*uH1Xs61c\k~룺U ^%S N2ua_巌¡*9Êa* h+L\&v.|L@T1cC w]s>'5%֍Dչ\r>7b1CrtQlʢ>-1ٜ'ܠPYP8pI|jfʧV8Rc uo`$NNt|}`qOeͦuhڼ0mcKH27P-Ѭ0H[\N>5Eټ'v졋ti}eC*#)N7u #wN0vIe^e)h a0z7@DŽVP Ɗ NjH"RA- GqENoQƒrpV!e*"{y87 nMM+o.R֩ՔNXzN.Ki|n&B:5xizwD=_cO}v[e@&ll##lN) L[Aԕ4!^+% r@G!Wn5)N%z\{i29V_9@ꑐLd>pLTR`˵ET kfp*o^ | jt[UvrerWz8;rb_/gCo#{GNQ}-PS+H6A@+$ RU<āإ Ƴb\KmD1s©A qLe=:\!H@ג l/,,d C vp5!OnPp'L!>IG@w/ AҒ:u|N_G v2cض+-M}wvpNQoS6D}J?vrIgWQ#^CE*k"rxsECJF3%Ʒh7UhdY!pKY:9A}9!XDr*Q4@`UHl-\ȖOVlo!N R@0ejhξg:Y`#HxߋM8zQo?TO˙2A2 .')jR?zz4Y6р0kKpf؆< 0"?0rL%IM-wv8itD=s_|\( qk&GOye lEsI9JF(9+8n;D_sv /FUY |Q0G@f6B<u# wR@E@+Gd+3/9_NADg?58 =[o9/ShqlD>QIJ$ Ar<0k"oec `͛7j k֠E \g) m48;3sE(0YDvn2kiPte(=$ՍzΏb$8z"bq B@!PW ۦw3gyM{6>~1rD1#CCpjMeRl/wwvl~?Đ}15<f:$;Fٱ-޴Ed~t/i87pSwr髇Xvd.;bGrR fȐ]2vhϪv6so^RsE*M L|eɄ.+~N@F`H}Wr*Գp: Ub,]rj< >ztw9߰c' ϾdάyyQ>:$v=4"yHhht9(J"#X'΍79gL>ZXx_jUf8a/,*/=}hmR5;|SwujS?>F~c$!y.$}N/܃&I~o]9Av9)ӮC_CϽ3L*ٵS5F%RRKwox/kyp`8>w@JAO_!c=UgZR v(fEOUzpj{9ܽE@8>n\x铳9dbIAfXA> 'ClMn^ię_h*p>*e؋g-מM>|| Μt~fЃw:(L˜F!F"@텅Go+Wy6soZa|]>>G\qK/3i3ͶS#pxQ-Zvk4 2,\ZvAa-M(=6pAi(峣yJWFS>;z,H)npV P`w,dص)i4!}M?7M`_iLw>Ъ>b/C?5==yBkslKl%= .**3(lBĂs#3|;ß|A8.ۻpfL/x3 ټ0!@o}m?/]JOHv1CA.'[>nǹ4sX˟lQ$6CLZ$DU g"myuua>{Ƨуzob$^ꂜ:zmCǩl@]Y<^Yٿcr۟M&;5i@ީǣCs pRKr24ov+N;x@+/x7.Y 5'xi'CƦU }8 H^b޴z?/r{Su>3 >7zm&4$psa}-SR+si+&b( ޒ|˗8r?gдgOyR|7x8KT?~~b qcA𗧖"7شarsXe~OE6l7k4{YZP?gyq񞉭<o%Bh,T8I=}Uz|UES@ c`:-oҲ4<͕o "IXd y2\Q1r]S˯&ڛϵ7\Vy|#.c` %Q'mg杼No0csHob-otGV)_;z*>~"4D \i~~.nl|ac$\b^<0_B77iw j4 s~(5@`O)~m*ē}Lb^Ԯ%Yg /jH=}_|*_{hՑ'xo$Dpjt8ȬLڴie @½p(b,2޳&qYBR֑'Xޝ<,??éS_Ql7fGMq=ݢyPfIjj̍DY{ܷ͢;k~\7%(8/mikVlɛsfkY,${szSH5 V!ƒH?NYxmI?L[SVW e 6G~LU[OACczATUjBN<NJ#<"qf5$B' &v_|͔RNc^?BpR77+$ܼL̟~>6')q}̐vl+p!pCB/_|)`MΜo?T_> |)mi?,k_XxW(/]Obf1>hem R(OV s:"AOˡɗW>U\|m$gn,xW+S%WQT9g*tw_̽FB[=;?_4oy[,G!PQ[qUªU~ۑ-gϙYKkkOj}bn)߀A$Ao;Vhj-0C t> a ̏DLAH$9r8l᱀B D2O GEۊne_Azlv;X[8mLEGERddG <+Š`Ek}]$/D܀&|ݜ1lܤyz*R2z9{_[>gZZ_Gu`#F;sL_o=ܓ:.W-"i?Kџw2j_j`4"k 8@b 1%ҧ.zFD6 N"셭4n2`Oq9Yޓ}s&|p/0xW 0;q:[UPpC Lbk N$|E٩3 \frp3\&MKqf$jfKw ё$< $8*R]\6%νKڭkXDTLHXhd})|Gyv{~S9ݽs4)PW4`o'mNk=htd2:^ǂ5wВ)Ȕr\ϊ7"2-|;WwۄWNjƐ>aa'6~h'N~teW^d%e7R&/a}*wFXea f LoXu!nr.*ARhcr`>wlB `@%V&ۡ -~\'7n6[~}-x_ YhYG9~*Sp'q-A<`2PQ{A>aKU7 6,!OvkZ!Ӧ_􀓜E:]Ac&"&eD3dZI_|X 9/%ǸcEMq18}f< RT "^#7Oaa[sρ޲U1 /"vLz`2' g .&␦@A! l<06 N`.(7C~j@O!~O8 UG@$U7ON쫯) Skmi:c?"T4"ќ`.0A~nE{n㗣H752!Txnmr!O𰋾=5U$. p+^nhA¥/8pjlYnzr fx@|_\! iAmLm[A\v=& \O"G(S>P)4iP怉/)`g*24 v>P\shNcSb|gyo\o0s9?":P2Cl yMX ,Ϯ]i;\*oԼA$A*AaCW7 _x.cnVuJ^E`( [˫%[yNbO+Q,m 0o4O\Z7Ja)Й#~:c%7c;7TSgxYO?RHsHݽ;&}BMy/4ߧ^7pUJrDB~q-RvN}g @Cy{+rie76 91H}l-yp\9#0Uz}p%dY@eBLXx ͞_l'~"I[W1m 81m>')Rt{p8YM#'*w>-HcaC\>-*{W]Wy֨M5#[qys9, -ɷtr5oKA̞rfdrR'2Vޑ''`$գp1q,H〉ʁě]N!,0|4fw1Ғݞe&]]|07~ E:KQO~lE'y5\glI=~T{j^<~Wqߠz|gZ@Tʗ, ܘ[o9MƐ=}go9Vh((:\NFNr]CM @JEpJSʋA[o9m? +x5Ar}#=}#7C'+F˔eBD!̥n\>JB>Axu5E6U/^R lp hy߲[P,ޭJ"ɷ\6ED_]]?&JD!Рt9L~x/*o ?N*z>K܍kaX6' Yϱnl-zpCuB@!P4- vl3.o;yS}:_y!ΰ3ZyzE}o3{D5;k*c68VzIC(b L$Y+ @D v\N}x DV:oAŜ}[GNֿFw}eS{t;/%EEWe/OdlԹCzڦ~ nP(M; Rv'1iO;~Df>1 m]]Y<=#ݑ6E"a!zZy | 酛c.YX⌡&TBr/\D@t.[9dqzπV*S( &@qk"~}`-gԇɞ P2R,fz{X#6+ihhҝE|Qۯ,peگN=ZkYnwV.q&%4(@rrk#SI!P(6"k䛐`'xDy]Uivv7;!LXzL9So@]/4ml',\!P(uE v2F?%b1W-_܋- pQv7kLռa" >4Z/[岑技i/! `Т"zjJuM;&[qjP( # dge%ksC/O\JҋF`?7]d˽Je'4ɩ^bCG@S!.Ps7_%X;$!ܧ,%N-) @-(NIDONN]kvmGϛoɖ[2aɒ}F]zs% 2'JIIQ}qWyR7~ǖF{kK֫ƅMogēS BK+&ݥ?3cp-\PQU+ |Hߜ`&mLSfpΗ9HoxP]3Mz 콩\(*%G_J%{"l@"+٭^W+^s2jHMK/S= uˍ^~`=B hH/bHpT] l0`v[ͮӭ=6>7XL&^6iڋmYx999uR)ySVK# 1rrG'\.ix=_ $ܗknFSCS*W=B؟(~\"3KZDBnE.N<:OEl#إXcl0jN(MKA!M{vfƳ ,LZklH+ џ=sK=ԂB +k ~64,kXGM:?g 8ARsg6>$gt-=?SD܉[;A3Q.>^171.34)]g"x .sr-⿯m"ݎ A/Y7r9E pIINAA!$xY4lap FQamE%P+EFX)DnbU|:|,臕):2FA.ȹ"S}*ԩφ:kY̿xX b4Riu+{tY׭4uB Ο`ηM22O8qۛ˖́vDwB:§{XZZbY4.\M5^[:w9d҄QQWRC||DaA֥jՏxרd-HF|DXKЬe5I>?L8eH—jYP\wvV2=}s e1OG(4$؟MheѮ'(uw&}|' ޝےdHN p-HO<3ly-5mz&%{%Q Z5n/}\r&6ZRW߅5m؞-FzuN~v;OAx^%B!2t2}}EY&Wf6Ay#%r͝S%׫Ssۍ.%uc^ukd w:u w#ABE#wsDH¿ifAF&v VoIEEE4e_[DMSDŽ~ډi4gfKi4vH/A•5WW}/4+[E7W֮ZCTG?dih$LC鋅b9.7=3|:4m9`\ʥMI3+|S5|SM=`dL⚩fJwe#$\v 4$W)~8! ?߿?Z?TS"5SV*X:ZoABivz@0Zo/*ZCSF f/D?.z# ^LQ&߯Q{+Lk\:@!>xƞJ3O㢶muM̛|C }j>M_G{|[yGfGW7y_Kuq&笺wA혏2>NRIȃޒۋCʿ`}!<\ޒ|o}E}ٕ։C(b~_PnsֽשЦy{jd~_]{i8KXoq`7l#[NX'tۄ歞PϿfQейI 7õośfݭZ'5G׉C Pr?I!ΥVmeH26SZ>-y}h=lvE«4MxkN[v+Q(4oSO889ZjyMOOGr>n ꁕ>ߡm[K2#!6ckPo*bb<n%F{8i'F+n|W͒M*I(Mx1%v''K' D:wGX5+w-a%\N>-ٸC\Qx0fo|W 7B?欸ޞs mISVl`:~.ھ/=\@E<\x|cʫaXרJI} 95AnC{lY7F+nmAHBO! {lyq4wg~;;zo{DD;Q.0CԘi{wՃSCև9|K!P[E_,7Jupobrc\osۋMnj.4ݼnz/[`CZ=T|G|uM}5E-`0 Iz]g}"S;\mjSg+Q=R:֍"dePV* "rC85J "iit_A_t 8S:x^&=W-GG[Ϣ W ɾK^СȽ-/7c'1 $v[0Fo6-T 'Tb0}2?]ܺ!wKp?J84 9<9cB+ 2++Ey9c#w% @ (,ʦ)ebx&vzb"Km~V mG_lcd.^B,sq\;%b~C"egTIv|.s8N829~NJS)ɔ FOSY !wG[}EvGAYBi@[iH_jĉ QZA« b%T2@C,Vku*a FqY'sVf  Ge~<]TOsE EHŒ~osw˟~.ktckpi>s"r01/䬎ys. _iר 8u{b e7w[01)\eltA2$aҺM^~E# !^VS DECX% žI"K|_nI6kb/['XciYy/,9y©tn+RrҴ/:p:nI֐9mؾjBn%ҩc@]y?{O:#}TC}*kW2cuA=^']}3i6I+%b/U쉏m)u$J#t_O! 7t:+i,q{y]uݪETP51i.שBSs,)9,47 AI"P.$ +x2KidmM #x5-B_ VR)~862LWK/m9^x>qFBjzץ=4<>fk.{'K(sZ=m\sZ-ŗ+o풍NJSmCE[(&c˿\ľ7֘|s[%a+ޘ2JdsR=Mg*cWWS r_T.q̌F*m2;-Ç?nڳdՕ,vlUW n[KQ5 PT8K̕t^V94OӨiZDGн׎?Z.܃Ie&[G"Wn&x\=z Isyd$2&GڴĦr/X>Ev8|o"n+ꞷjԆ30&h}y gҖ`=;_nIlZqo|+33]wD7`kĎ$$No&$8el66`&zGET Po3;hFFҌyν{wp]G w<:nL6#܇:\hhR|N5v>זOk:m>琾r9r<:5ɦ[:oǹKW:v\{sFpMo i ) f־C,(_<@g:~c^ʮ((@ H/w J`4 +*zO`tإ2u Eqz۷V( F̔c.pSfI#U:?o  }u%ykWQ{>nd#E30zߨA}dہ\N>GF ʔĸtR *k]zP'Ydtع/|2c^ީIv[^>,+P~$lwBFyI,w^'yNtF}fN >z LRQnO]M]n(mUoxF!}1&WOK9et(ej*5)Nu\x=͎?G$쨼[ZuuAIIh7gwh :jrbkd/"w?~C-I5 "L3E<]k34В< z*?[2hVYyjj!ebwJ]-w>k2 kw4iN^3$~쀅,^+/=~r=7" p]9y(@E%ep;R`ǃϺ ׎CǕvg*SIt?XLAK~~o8Qвb߸XUNdМ]Gߐ3M,Q2-p3yLzbw QMU) 1>\ iYq:G%Nw-ưLa;3JYS:w1hQ }[>t0_'Kj0RYm67:5hhJ6a;ޓ)Ǵo/kT|~B~m3T(3{w|I\ ^m: هO+xozxIk+# [/BCt >y a 0gY i˼ɑ.#%)V1еZhl4_C&ɖR ̴u2ۮY[ 4X݅l$Wuʭʢɘd7ٰtK摽xY~w8tʢ鵯7| u)LƟۙYCb'u_7;4?yne/krQw pwNDqRHЯw@cھ.w]^"Rk8xnY.} _뜰vGoR]uw='=z#W'_G~rlus499;MToQ鯚n#>S3h J@>O'}m޴af~3&q xU^Cհx= p |w\eWL[`A;1/2Սiۏ4ӂM 'WnٗRڲ,#i K+y|T=> C/,݅Ԋ^>gb-{`%&gs4`p;c>oK<8 2gLJY$ AQ,HDsNn+P[fSW'hl;F*G M=y2e]2CV&85 D\pZK6F 6VlL.s3AkB{smlC pb6;%\3壯ȵ'zxK_kȂaSaq:) s'e-B{OuFYTv)y& N?sQr 铦ً!8_uD/Oy$#W;!FM+۷gZw`(Pp(qG8![st~ ŤW1>|T9f o>rU4Jnf<*{ˇkv6gJ0,J?gOk?|MWSݕ[/v>={oL?T7_ É[cʕc No껟oEN;êZ _Y W,ZǪVBj SI8r|[M|ԹKdps>axO{ܷ q/]>;LFJrs໏݌G_ؗ~ ms@t~㚞6jE3ѭF{ٓ.&]jDR\?}}p]h::c`dI#U kc@V& kởo2@]v, elRpT4N#?k~d^(XY^Z uPQpX/mZj0t|< @8{9vt?aiC5THt`C!Xu{oC yb.Ű sWvᜉRjaXNi@3 \-Jp383/'h$޽7Nۤ,:YNgRMCLv~F]C9i|ա1C211NeY'PUU-3"t'a;y s S%jA'u]ԋ jB.B:D. CzGХ!,gUq/>d w7Z_UZ9Y-{&' w4dBG 2n&3F4mq:Z֣kg¶8-rwbnP)@3\>yZ+|b(ȲZ T&9RL $XFkj,zLdXǛ"PWN+ccGgwI Q2 Oos7cèsAׂ:_YY A)r4A4 lw/8|"X0@[50jޞm_s&eO~մs='|d @EWim UN$rߓ'?EY\TH֐0Щ&e;o7aw߮z3\>\+@^{ub >;3ˀm}#%!P9rQ0-Z7,(ސ[t{-y=)g2#-"[;YĸhuIBҒUxFpeqz1G_MO,_j'~2&;Rub1ֆڪ!UB;5ny< ,h3Bӗ#DŽEmo6aq&'3#hah vL\K=o6~jFgkή?'%as< _;pҏt|T N a&# 9{)l>!?,7a>˅ Ŝ5hO,ye̞٘2@õ9j_ecNIInzmh4@_S.Nu#rDYO]1x+iv{Ԑ3X50gV {l#Z\2GzJW}* ;aMA1FI?ѷ?ێH ] zAOV!p.٨ɷ}%tDdZ*C'@a%q7"a}epXSA E A! G[׽HU44:2{hޘbc ^d =b}cƨk@%5UwEI8hNh{yG iMatNqq ZcB*vXz?hyh&&XKk@x?^Tn% yYo7{ '=rӴF՝' hA>x#} t7qN9 > W bh 5Q ]v_ZCimP-UQN> JBIY HE9OJEHC;H fqDK?s?'4Z];OƬ]z~y^S9;KnvLttoؓ+{t0/yt+vO6v-{GT?e204u5PZ~^\wʒ&!]& ^(cލ>_ӆe %=}1'G.`k;a'> )`7w/4=?}wn\ԑEoF7jI #5yQ@4lvW*?6f࠹FЀC5P>t#ͨ.c-ީM=&l z %+;y3eO$*|0\ky]n<ɪ3 gSapzSJŠQwd.' 8jknqu( /V\<T㯡CtT!G?W.&Ϯ:-H9Q*D9`jCro?'a{{A#ީ9=s),s16;wCXdP:o:ّ~)&NW&5#DbBt2.) qDЀ4`Ղ%ir/9vzGnR5SY[ ݞ8;5'O WK"";]%h(uʶ7~c\J;&AP J_v% NWdK$\7JJp?Ja_zËԓ}O8Ѐk@bȩOzfD;d[Ԕb)v=V܄(o|IEQ.|M{/ijطWJ֯7>7K#MW,atg>{N RsL1@n_=U*"\?tbkpmu{6۟)j`J zuv+ Sy1|i kݞ<UV(Nl5@4pn9'IyeǴҒF(=jm9ήZ+chHh&OKpf#zkryy2^dDG7'wԱKW9^"l Qe (r:/wΞ$) Ը7( wgm›/~æѤ:pmwFv$OACIvTyLܑ m5ᘠ(r13I=R{%+@iah@5PPtP53露~2cԭ[W1_7wUWb351jPv17uB毡zJEUbWV#:t.1rո!BkeQ2!6š3.ogɗ;>o+= uV!ېao:RwJBoLOuD0 ^$ٚ|TȌᮄp+dבS2a}5:ufב|9=)wUmh5pNX'a\)slt &S!ir̈́ap]QeKڟn# Nx_tmp& 5(m tSv,y)? z3 ]tݎUyo6y-P .,>"ךּ_jjTdSuzӫ JB;8"B8 o򉆐'c\wt @dQ;22/, ^^S-{s0yV'p7yK7;-X雖}::63݌דr0jRW\g24m4@p `%x2 ] 7}juJ\ef0L}JVo?( @uWʟ>X+wS(ٻh`3+?xQ_wN͛&w^? L-Q#f-:=+1xΠ$[xJ4@i-Mt9ZQU[N]PhƐ(I& .e>ʹY|m2,#Iz~iRUSk<3v6`0cHI6Tޓ p1&RzT3}BM̿P$'r5A{/ou7%=y b歸Ny;âKk+C$5=[Jy#Np ZuUTWW# sY%\ +a'QɈ=]zGoR 5T M C.O.(Z(.)V{qXu?ohhY%9+%`^c& SnNLOMcfgk`~rR|~\R*Ij} hCE|Nm܉G[A|n{N>b/%a2Dj7Өe-6U`f s}q5:+,MUJtu\5Kn42UJtIp̭LJHIL8U^_2P߮º`TV_~vz =d& ]įxǬcgV 72VwHS.*3I1,5uχۀ8/3 M#\BQ<%Rƅ]ZVQ\БڜIY@~f_0´hQM3RSRR4|IŶj[3TI>b=RC6!2A)A93( , p'&K@xA8IROX2;Ixw i_!YsWX_RdNvgZNaXJ>ZS만만Dt06tt\c3k˽JM)q=m>ioH۔M=r ;] i]g<r鴊 .|;o޴;2$4e@hӴy'[}Ce]~ 5;6Zu)pNz%8TW)QW'J4u=~/EM`,*;M𝔘~BvOoj uaY%o_"4.:ö!-hmmntOdцT\eCiހt%޵d\o;ijk 4ose0hc$8{a5/Wu?ߨFZFUG]hr8ua|)::|R Śv#JBYX t`tuuP7N- sgo2= an5ЎG=ցiA2'ȌR + jsX%PxS_3}1rh+o\Poϓ/YsNN*ĸZ4JlC ZmŶx{$37\kLvjȱ`l˅#r2whfS>9W* tNx̀8ssU[_p]Fk{ﭖp|5pd?;'1wW͚6xk( ?Dqj6%ӮXTu-~=,/=~9"a/a=,< \^ə`PY^hѦ[` }cbBq#]!ؤdKb.D\YWްi՛nK( IB^P3 t( lt m'䞅k &P+?7 2Ҳ`qJ lAiԽﵥqab.)md=5P;,n\L/׺<ۉ6 P$'qbھUp>gQhM%)**(=;LeOu]#W Є aፐ7| 55 ~VaWP47cd>t/aTs%7󸷏uXvn\ZEE)R 8A4fGp_N,[WLp˓V¼Ȇ8+;%l%20%t-amn',;Qiе%=dо^WnLmOv}Y߯+|na.XW@I>$.3ۅ.(͉vA!Z|a)t:Ӥ-.QSdMa* ~ 5o~Q vS7᪣B]h&q)wm(8*YGJ$ua[,+xLPoOG WPh8ZQUNuZW6tJ[Z2W @3 |t9=: Ql44.U`#.Yu%V]w]|f0֗vTu:nƝSi:з ,X-6% 8Mua#+Wߞlt~>k/"eT\osK혨I0D.j;hSGjܛ՛>t;;ޏV/YcGpY7#킂MC\5Fjp"PA>pUI~l}u?Dگ$Hx0[nZ_j.ԁd0M@ zBϷ6*Nqzc|I]^+.ɢ5˹$rw3~N/k쮔\x(ݼK>ni#3y)o8g:_YҘ!}SF2xµ5tGK~[oFBꃢpFO]o<ߤ$ '\jKw[Vd=0\\-3C\46 5ߜ xAVCZꏬ1kA JYᦕfY#kɠ28cDG)&'rn,+l}+r >{pA\N Mov݌fxgc&|oEI8iNxgL]SSA y4*IђTM?p1`5+7ܶ7 :99! wqw?+w(ZU[kgQف~LVk]m5t_\:aݰ (]:eC@j~ %iɂuwfm%&Eqs̸Nal'35?r<r ehʨ(q9 {bAh87<"wJIL%aRϮxϕTz۷+ GEif]*{;P6rAip9~ʤ[^^b['#& 4xf1&{XFD3  Quֺ+eeE'>|t߮%rl|3~'w𣪬UQHϔB teI=RGFk܃0r`d@pJ6p tH}suIWN0:~CJTNtޙl٫\J+U \ sƻz()K]ܹXff YG`G< Y-mwJB4Mk/jWû\s.DFTي>5՝ى!.'||o(9q/+&_*( }d # Ғ%'WKxB铞,QmUᛮ x^ Rq~SupZg!@:t[Ke*uR!o4'=Invҥ~o.Wl9(?ڴ_pٰW69dL6H;l.W 7ޔtUP說K!P z%LDJ+bu?Djd]i787AxEB8Gi"fiE$ |{S;fynlxwdS7fB\4Q%5atrM4t(.}"f`bivHVn=$_Y0MCmۑuٚyf!>cQ@kC6 \R4py)vB-( f)s={ u'%IuDxiKnV|o=#“jr ^qy5vA `&{`MNW~'bW po]='}-Pbi <~N6͓Q{aXdr՘r%p.) S=A J5;\II;fM@F'dہrw @vEŐoXg*~t+t,T3L<'&Ǩ|iSN/D2y7dm~JZ#OU K~GSc2)=kXAЇ/ӚsX(z?#El="!|_Y׳CgO幋> (ﲫxkoz][(Ǥuzz;n|K}v$w7٤-m`Жa~ 5vyӅ_k2 hε^h5qHk.S)Uo횓(p2Ir112y~iٌ'Kog/jypY6]\N$ U$i"+QfQ .0IdtMpY?uyPt7v1:q̈́a2IW}Z5h?9I<ܒ0>u2:\ u\{ܵ] s ]R|tt=XeQ'@G$@[v] B m3?Nؽ̈́wÖ! DH"܈Ғ w/G8rϾڛF3u4}Ȭ} L;1Rs՘Q9b]Q~h$ %!ȅ,#^mN ֚oݫA#9b˵v0־UwmG 5m4( DbXЯWs#NVr=˲ioLOV6(ei:Gb2%Nyg o}STlؓSLW/o nkZP!kX&5O)~OäR0(.т|dٺ=xdcWrݻŇzBR>kC 0˲u{Dt>a`dvVYL{p4U>n Tt=9g0H߻^N_`Aғ[{k * ~nMi/jϽk?suZ҆ ;й!=DabYȂX 1ٹ=ş485hJ4SWYU`X3 {טQC #$-d& ww{g)+t"Q8ToTt%t=!'VrT5^ɒ}t`;ܯ+~^>tiOW9~0&Y:^rz[^v w^.hϲu[X{OzeՃ.>9ŵ.I{I՘w@wϚ4yv^΋a=oq$\0%aVefgQ'bF5͎)>e<s*<[s9tc$̙8}5 4[#opd5+, vUaH K,~([mVٟ9dSiI\wKs"}%6$Q s#'Um(G􃊑wK䨬Z-{@IDATjo-5L>Բ5cUJʫ=Κc46[ᯯ$G}s' JQ8!z̐Lyke Љ)ty$;īsТ<|4o ‘=QEi.?kvjkm*Ƃ;?ҹ @YIIĝL /ʧZ,(a 6E-} i"B4ֆ x@On_ ͖#Λ ^`04`hU.(jܪD/ٔ~0=3F>qN!IvFHSxafֳG.1.Z'z |n܄_fsﮙ1>@^`D\tMx5=:耝`\j!WJBwݾpz,Ĕ4]#UdCb/>- q ?`"# R0,إtnLpN|bn`zת(D睓~[| wmC+as(?,epAQӪ%;%35Qw ZQ-N5kj1 ( l8]i%!~ ) s/%w %bodxEnU[ً:_~oSEЀC t <@yz͠KVz0?oJ##Ô[ {h4\'e}.ӑm@o%eZ\>:֬_c9Z׭I'T1\PBX@QqC Էn3! AI0o,ܿgo /I*= Y&Nko?xSd eފو-RD=q:=^@e8Nr|}ԙ\巽莲~wry'kZ@빵iZeliwAI]*-j/]fciEt iEEPù꿧 5۟oV SW;m@EG'4@w^ewj~]Ly*75 abA{LhY;uM vऊ0/!u\ ͎UxLv9,r>ڀ *>@d"X(>?zfQ?-job !@PPq,W_)u%7ͿV ? 4'VHd}ӽX{zM6vH#w!B0||y~`̩AgUw ?2єp<Ӂou2YWwr3;!;ÅKYE'c ̙2B0LyNtrwI4kmZ@_o@k4#%!&@*NptIIݾ7z 6.(W[**N^fӡ۲?0yR静kV S5wZ [u yʽ`)@r#,kf^5jy VsZ-IHMRwr`2.߫լ]vb`NTK"qʐ簐}`( ^G{꼕[}y*pG>\a8!@m0dɜ#d (;9'NۭרM0s JJxt;|zI?jxb:eiw[k1qkQhi< Цv];c6mN:VSWڜ? E n$ GN Jň ?)^Ri?1}PW ;suzXUIZBYq45 Ѝ9JwMبxv)[v`4IjZjt@5!]$ aαgj0XDǧBN^>kCZNeUURYʅKteقtժٜZݸ;W.i0߯WFGr;A3oOi2-V,#fXF#tُ0K mꏾ C Z8± GS+ii{Z赪gh4`NPz0fJBjh[z[@uMloE2xXOӣ _(`u~A\,.Wh~s"P9=`K6ZXE}>tޣŹ:_>\pgFXyAx wWwmŽaX%}ٝNxNE=3M_Sz>CLVk-(R/? xbbUTWWKẐT`;Nb4ijKpr +c  ]P<_y{=$垖܏Q-n[|MȐ^ o \.)9n8] ۛތU=l$xc]+{Tdo_֢l͙2B`iީ[:@B|:tջsg0xW$[ku@Ix] u9y5JĀh"obĉlu7:[ZYm ̰p}q5Y%4~%U T2Z%COiĀr%3s`1'QU^_ F^ehom`t4#Α e*RQM^ N6KXZl|LVU6Mc$B{1M螥%51V[-N%$LQ{?i)=3Au>+l|yp Oum]kƝ=5΃o q*(oX~Z^qs'Vf#!vUZ5.b9rcBJ7~ .N7+&y(je OZ. u>  񒐀 ^`t?u DH_m^a.;ݽ3ӠDl>IWbom":zx/w;T +xaߝDGn1@Z|'f-7tXŨB ['lw@Z+9!V201Ԯԯ^I#5xԚ_}9_߲ͽ]TW+MuNwRҧrSaIyk]RѻωI9iOMX42^i69c֮]'h0ކKԇO><0tKw> N ("}, E%4r˪RVk~E8יTQ)9e0I؎VVODM[WXKm ""}I!˕uR]W)ŗ+t pڙad>@ ooj]#-mmϢf߱x܎JMѷVL%^f2<8hC&gWڬ& [NW#TdICxcTo|c*$D7 xty'0/^:sJ5E6Gun;#wunZ3/OO\1j E=gw3bJ 9³go2}ة2_/ghG#S=`HA[$J)ox @xa!C¾D0tf*N,fc!V~|^ *IAEvbg>]v{o ܟst*Js>zK|;u L]y@X0cp$9tRU]"\e cj [w-KF4X[A2c䬥@Lm3X7xԄr+f~?dW7%}/_t|<_"ϞbON'>x^gBSE[MYl# ٖz!#_OOw*z zVX8$wzǔn%ԗf }cɅ'ZV"/F;m_o9!4ΉCO3x <ƉN @o¸g_7lSGAvclt) ]ЪƏ{yc .]?*O#R&@%ˉ|;&jjz?^ +Mm{՛I}(dm !޵H֛3Aqƿ6@o}l~HJ=X m>|V.UB>^v ;հ$ņD#ԼhxF?M 91Ys Qw3 [˒>z 67} Uvそ淮nکW)eE%n\U5v:GJ>jŢLfVmƵ c2ziL-t.7Azthŵ^=)SJ =[Nz(>(n l'pi-۾vnڏd n|&_f ڪ= ExTcqgWx$hdE%S11Y:o\gV\+ vbLAc͜Bp17) g,zt6sqj@pmx"v_ptZ7 !iOJ5EvW;vߕGzW_׹Yԛvs,- (w-+>XUUA:Y.|tk.@1'F6qąs0 +F * O$s$<2LMdl1T3/Ok̖Z2:CҊG9'en`tp__!$M>|V*q|r8~@\1=<1'zrɶ+O~\\<hU@EDh:R19y֙k!m.coS]ݢ%u?`NB|sui:3ō9GXXg;n$yU۷XWGh3g|aiŎxq6A*uݾmNsc1a]qX Q% l~it'ᨇA3HbҚ3*ub>lnMRy&{Wy'I@yB`Ŏ/moq?o$紦2oekss>C~@\8Vo/:;ݍҁ.?&uĺb':;d6(*a&d KG?/qg@[i ؁#g퉖r)|C x%Q [^)rdz||;S_D$'jhW,.PjxxM=g6 r-4\/?-*Vj~j gK`\q>7O>|$bƴҚKV^MCij@bhGAb] 2w䏣tdF^46dqoj~iZ5X:^t+a\40uho;?g~4#<7įeūDTf]Czn'&Sͦ|@ŀ~?&s ԬX^|%:{W?[` kw^Z1-//)ٽy_'@\̱tWo;u@Lq$uĺ2ҍ6k3 >fMzLNj35=_ o۔ug=L]WU=ee?&*?d/?/ B<[{}Kdͤ97tMպ`U_+R^[ϕ^qіh>g[\siO20}-QVoKNK_}¾zphR[`\x%&k 'v5u,ŰO9?ܤb bu`9JBA:wBpvµ9ݦ؉tg4AWxİz Qzܺ+̙#̯JVRRwRxn?@?hpH28kžR];-9NnL^|iWYY-TZ;ͽ~{<~ⰈJs+`\Ξ'oU$:脶붃])zCŐֳsWLY5%SmkQ_I0YF`2`Fh_Mޠ|oAE?[GSGXqm:}`Ƃ[ֹ۬ĕl .^x?B*y=frx޲,[TZHzK?Z6xdh!S&5/+ֈs,kO0N3ACЩ-52쿀:R-dX~BشZxcl #j<)LYv-_~|%sl6x!v$\I`G9۷o9SKA'҅׮Vp?+*JW}6}—0eHFƥDDG%Y8)4`GT᪚ˌ>z:X9O&\ړ~ҿo2I'șӤot}Rw,ՠ-f[Aث\:}*'}}mRK݁#]yGQmnzjz3$ڰ؟T{y{*^TSlH!IPBOH23s& )Nwߜ9sS.0#'5i״\22)bLS;?0t!Ą ;th,[.N]NsZBbnZ2We1d$ɽqOn7D< !4#l;Ls⫿K~;h"k pcu%}qrcL?o[6i<㉘ŷQ|%Cȿ).UXLW2PG;z]KIxY'=8N3Xnde뛼rĈ@F r3~ C:ZĬ̪*^I҅! >e$9s]@L)luN/W79$IX:m~yq'߇g2 |=ہmz >H!YCg>v(a hm7b(8rePb'BpPWpھ/UPB~t2=P1 @@9w/x mz0gmZB7Cs@gKL ﻷV!A(Ƙ;S=Cqc2ΔkCLZ+npMԪp '&I-=ZxUH0t$I˙~!8>gGNItӜ&UolX$l.t/CjXWӀ>P>stw0 k.{u6d ,# m7ZX}-@2J_ | {X SyGXS^{r-X_N"Xw'*Vo(A7A xy 0 䥦VۿP >=3@_o<'9bYQſ0ʋFJ|+['(%}*UV JO|2w7?A $L8 ?]sQ/n/]De}lX3\]PRXG_ ym$U.(桃j?'P/Ҏg@r7QƟ%[wT[|ڌw1&=#wyA٬wd0G5;hnT؅D? 8sC*r*$I}9h~D|I=" p視US 46r, ׳إcͲ}]a\K< bvVH#ķC0 cz[x 0rΝҪ1I,iSnJ5̈[<`n^j;,'ʸ;ssw߁?qZ;~eh_'df#E~m5ZuVq5ztOY{q7+MLb߷*XRoKV@LEf`=lſJPlr:]l3KE^KذqSsF&TUƍE&+;_x-≣7al M kEQE_?om[>)wp12}A Flds%Zt{m2}¢r["71B/}Ze/t֬bd]06߯+Z{%PX5ʔ:Ǵ!{;tJO)@NK70DܧGV n3$p7jMG,hv((È)! ӟ9lL_Dބd'ǽ<-8KqU^A Aw hmgiYo An{7?2wj·0&M\}8tv Ql1]7fn:C—+V:'&| p/prqAɫnղg^^6C(6}g^ѬB0+..N.eٯ@[wj$1ѿlP6cCA9u/MbD(eb]aJn:3&>MO驫U-c]vtљ}cڣ. 0O zMuԅw--Mgoǂ0RT mwL 1eÓ*vμQp'U6_H8/>+k1zk7B/p`B٘ߞ>αs -,/Tڭ Wx@ZI'rbL "6_ުU"hK,YX>w&]5A(F|*(+JGs~8MimDPKM>ZD,8;+ TET= 0Zrc4Qfnp 8[zLP@Qmё8/1O‚4` '&@X7Y,CBJl_o9#q,*ˎyKL T~I͉];&n3v;;P闷'ј Q.̓.&q gl{3&$A4mC=ߚvkxˉ#];O,-Mv0WP6+' 2TܼݷtI~0?q }h.%5ao1`mFg7'#GUZfŢha]Ӝ`}Ga"q ؙbv=|6\yB}[[QtO-dVU. 㫢 alo=46y[2s3&P?siO#G;uod{RX-*$~yc;-Ƕ>iΉ 0$w|"`(/J96FAP4 ǯ[w?3&%C;Kе: ĕ}@YO?YI|č)Sk,ˠQ~hE8Y+x _B"Hp`7< szԖd]sROKMş_PxQ|SDx<37%&\Kky:Sn%qh/*:^-;C?t`zm:vEi&]v<4 2[iyy) VOי7d> ܡC{v˓zkGzݹ!GWvK(<)0nkLFvݦ:`LXӅE! Eڟ~X1$`:zK`i7 7; 9[7-^j oq/d 8B"/Oi?Z'\sY85;{SSyy+Zr$KذqSp78Pu8U,'\>V3{P?7@>vB[X/:KYL|I p 4猆&P"2YQŷXl#FDl(vŖDF]z>?>:nt)ݔsbL  )mR{;)]0$p ig%?aQ Ǐ qpDq) E;.~eڭKyWKqSs Ѝ7ck.5891sR߬ʇ,?j BU^iiE^J[,Z_k-@n?@Sz:}3&pvN p,wO=eRgo\hǿ~Yذ#3#gW]nO5*0S,5\["T1ApRYg˦~ZiD ) i' ?5$}1bN wY_]UA/f!n0AvS\;fiOQ "bĤ+n;}^QH߿cE >}ʵ3xpWRm'ҏ{*n9]'_sEܙ񨷻/BnZ2Wz׵W+nڲ~H5ijuY{:sL58-ծg('Qi/?E2=% 'M o\˿߭gチ!a&Y}?=Rxd>VSzCs =uh߮rD-l&K7 n*b'qeoL;>Mw/+<J<o+PKgy\)I:8l}KS\xdVJFYIO?[ ⫝̝NKn$j >d"];fFvw^s^w<Ɲii|@Zߣ$V?}߱l}e[%?Vʳ Zgo5+1O&'kS;@! p"~I|o8ޒ'zJ}HѤnX?`u@互i".5lpIlwQ2r~#g}+ 'bDQrtl6j҄X,tغIS0w&wHޞ`Vܢ^cgyܹrym|O.%ē;kn'Z[LQcC:ddi罐F7;Ή FKƗ&O~SWA4VdE04xYk,@IDAT\σ8q{cc#$XJU=$?غ&6,d#!oDsEU1Ƴ+2{'MxߏHS4 qCwބKPt%(\@)xyweTYPO'}_8?A omx_|)8%t,;}_7FzI(՝G_|gތo9'&3\"Q!E&17m2rr*ϣ$ / HS?G[>9vG;'P:SEEmSee^_ҹП1/bC7 p~G-MRlB8S[Z}L9U} PPÅ)*U },ܲ]NrZV+u /cX[0g?='7(pʮ=^g{??}ɍ3FDݍZc鿶K/v~IH$Mpc?Atbl}Ui6#MĚދ6=*0Po6:A\8/򍋜ZBǪDݒ67Gs;c]|$Ţ7F5nZg806,Q@xI}zdeq7E1K|nsn\;BB;ABF%j&TaN.t&xw }Ou天ӗ;ʧb w 5Y{uKL?R4Voϝ:a[fpHg"?1mHǪE<(\FEmt f8Uտ E\5Ghi]e~H7pq w-``.V +P{S:YMKo0A:਴>;AF_C>yXQD#~=b) '@'CpZ,DQ4KinDe\|qD0\)'N."(>Eob~/2c)PEݳ3,(ز[±6"d4њ; *:ܠRZGJ Gz2pҭ-*MkHu8n:, O?1nWpwV'鸍@S ;/ TU?/߶y""49|V^MA thc>8S9%ݱ8r!7/N#\ ;C.J'VR7,cz*].j1q*'&`h‚Vp?+(y ]qK;@2C*Y|бcԈsao~QTL]cƭ |V"w+8GݧP_/!JUmKsFvdL~/!ICe; &k@/"q6v P~x6bw`!cF(> 4@?p T7:`xQ h}߭HhŷWHNOҢ"H{H1pc˹h_#z=#c$IcE SCUs5-e[>]c$ hB= žƆ;]u ͗/{όx \/ۗzKLTQ[ݛy n&rZhudMFI5QUfK߱^@n &/$^y~^J:ϙ8 t`"u hz +s9'&O+7?h9|8}L *}0:"@ݫmz0gmZEa a9P+Y5ۛP%gwooӏ2i=z@xH(ݵ rD{aYp*wh7j}H(޴>r)r!:8'}1A0CWOݮͣ Pgv>y)tNЬ }z8 :CS (X !^INOpo&[p\V sf@_o l|"@]m`=Z۳^\aM`1 p蓷ЧoAl(ݱIWq M8l\TIHs4} En9$!=j~$X{_t]:o4D @G}ir/y%'݇sS#/h7 wi"QMI u?̃k&!UK@ p[zvlɵ$@nI۵ʕDj䜽`wd<$ӛQsr@yގBl(?{k 9=yc}}El,#.S(q,3QeIS:~aRki5MC X٦/InM}^ , |SYk1I&{u}cMM^-{ߞy H2)RQy/$€w8k FY!?utC1"=)lsbL^E3*)B.57N ydBFNm׼&M}n;}( I éNoVeJ\u_BC'1 %C_@ =K4bD(tN !/$,9œƙ8ye\w>1uaWԫsNƳôPMQ w.';.5]v-qܮ/0%;ڮQ4%⮴ǍGk3]j:A\-\ FwM1UU8h}Z:n4(i,R-ntjNu`Pﺉ"D|S9gߴ^\t$)/`Km>l&`%ܠr-Z>Zv|-quݴ:o/oNL+ nFGb*wz_Mӛx O !`t֦HfUQ2cķ.*[DsͺC}y VOe ոXofn @1EӋs&TJKai@kqlk¡mzbC] e`ftq\0$; ppwItiSEyrlFF#Y,i[r b9Ti,&< ZQqw߳zML;~V4@u ܔY [tQ@setv- nj-q$Gx:k7j:R]C[\gMXy9Gsb^IWv+71‚V)nmǢ)˾0/5wcyyOpZޒGl֩+\QNX,Α JwI5u:*DnTo{ Zc5]t .  pCuW5 p֤%0 q@#c^ad{h`[5G~unppNjG~U(' v1X߿ˢgF8_>Mct75N4u͒R,'+6Ԝjčw'V-.tFaGh=(.{-$bG E}w˨@KX7cKCOsb:~0 @t[KJ0X:0f noMNiʖ&Nċܓ3˗ĕ٨]Tg{nDU#&&bËݾ]1+}~ 4|8;g{}@Upqdy5rDB!]ٯ.X&ՏG\O8R9K">XA>$I\/q6UkNm@!~&}±I۞cbgǟ_3r=ѾK875Hx7h~4N|xPnetF?}W63傋S2W]xA9~勤gǶ8^[(lbM*јnDqwv}z Pmڴaqz"NPr0:k gؗ`*JPۭ[c?!kƉ 8ES87{F%  kP;6&#22vzcMM&Bs `$ua 2ۯ-kzn?t(03M[Lr7~Zf;zx[wdgŠئ.I|er= Fu?i5p]c?bԱ%vE(π=P[x0v.[H;/0 d ǛzZ]WZN/(+=k綝Ó7n w?c>hZ`<9 ]S9ڵG=I\epQ7'][6nÉntwϚDQ~0B4]~BQJlD<(+HTo7!M4ՓķXZR&(JY(ӉSEo̺)֏@<ۯq7N_pM FbMMcWmj!(ŇRRFef\%@%%: Ln*^owYW?^E}j:t@ p${Dq)㺘a; )Qx 8aT/,. kLq,gG0:xz/BK:\?G?U'$ $$IPF%é>ިsaX_٬_..]=J:]{~B"8v[ϋ2k* wcHwc⇨Z'n]un֩u5`<Mޏ^w 4!@ P{dn ڼ0&@ĥ AN,ߎ['=kj\? w$$P~E[?Ѷ}"F"9MЖo5-Mʘ=جHj%T8:hzqtAk?= pv Wx(Fx^ஊLUߛj5je$IdМDKM[M2hi(%'QZP~yyFL߰"wӯJ^,wxovr5x.]kCs N;gYK(Z|V^9 Y4N-&mD@$iҭ޺[[OoZ?G tEs}"6o3lMzww(%/:NMtc5P\O]%4׷1`s] n)xq| A[xLkQF CJ E v}\&OKBO:}ݰs$}]njwT. 󭜳]Ӳy5вr.&<,Njq#/7_cM9~Tmod jIs֬k9L x.'s[5gLbU(4\ZV ΋]U ly11=;]_I9G_or7DGqv_Ed&| pjn+ $ݸOj,b?81ƒ\y.7bSakC Ÿ:>oVI昘IȻ[< MUBn" 6_ﶫ0 0!kgff(\1)'&drxw1eINaҳ7l>~&FG!&/egֳ 0&l,30(F8 No~z/1 c|Į': =\T&GxxL =qa:F:ϙ@C!2&g%2&r,[Ύs2O 1MI9|O`w{c&C L ILx h. U#/KMݢ9`v-/X;ϐK`58Fx C-BfzN`w咙@S,X;K3:8Fx XTNq {zMÕKeL q6{x3`L(F Δc7ێs̙5v̮'g]nBp 0fC3kv:X«LU3ppWtI~ľres%9]OW77%L  8{L,8FY=@ ZNŮ'p8B&W>]J ȑD*n9`$d,ٙ@cc_YyppHkU d)/hS M/ ;t|V wk'"BwSO- TLJ H^txiJ㣘`M'@/`\ a6 @u;9FKpNNo4㳿5)mNP jA3\O$/,]jiAQ>Es7o:(.ZO`:>{MpÙ@k /qyY"!1 ܍SҭpZz$Z&*$űNL _x8]_)[ZrLKM(%Xj=k&MDUiUiKMsILd~7NM22{3X+p»~8J> 0ОԱѹM}EHxp/))vվϢLQqnm^%er[1>CsfL]V$݇"*[ I)ˆtw˛$wK[k_C<|76 | ֮ bwMM Lj}bbUE7D&M}r{(8^ӧb짞*tE)lU`YU7Z0313iWe wռ`%wQ,]L(wUAo|%c;}O! pW@ $'M!zw0v:O - = QgU #Rgjp_j/ )6QD]ɩ.zi̎G2MլEI~vAbv:3&RQF^tw Kי@+IMZcቄ[ |jN"+ x8Έq'W!oo]9_X3WwS~|pۘ/A~qEƚ1B @Kz?tBRh6}rn$'M&MG1&'&ڈo#n:-Pp'!3n:G{tE`:٭\i&I-Ƈ3w3*)B.5hG/IA0Lq!QO,M%o ,^@c{v'I& =L &vA~V@ڍ$0i7Mx|񡔔V6݀h _%v=ݤ~. 0&pppuH2^UUK_A|~v7i…Ç0&XlsÍLcwj׍]Ojv7L^515`v7`Lee8h‚V) LJU۪O`_ڻU*'I"BnRU` ` xlx0 n*ˮ' GL++L 0 o a"7%%YV0D0UL!=PFef^^_u=aw/yL *vAiU|2&r33028-4:\?G?qrEד;<>l@KÑeL :89L+.;\2`L{ ޾y1ܔDQH~Y_ `ő/3!q |Ws18lzg j"ҍTJs7QYЩ䈂n fL $ښ_ ^gyw=Kkj~Wԉ5[*P`:F0`^O-2!1hor=aw. 0&j,]McmLy?T8ݜ6G[\O#.7$`>J]P|KQzމ{% rSRVeU?,-Da[Bgh&cbEޥk. . 0&bh7UOw=aw9`L mAZ@T)1?xxkR<4z&sqE`vA x1aT«NB<^,_|(%%ȋyYMaZx|-ˁ30&N-nG'`mOcUUfQC/#ӳϞmI3&!z xs&UDsOs=A)Wg7 J|_W)=dN葱i(UIUR}qI' gD7v7aW1 0O$pO53h!#SBEOדM;O|$}-$8`L mOڒ:v_nP{_X] .M\ݖjs{ UcL Ù E`yEKנKCה`ә>Ker;QVɪ @G O"a"ܵU4ʗ ZK%lp`Or՞$-A.:Vؖ1YXXO&N&5 ~><>o5u&h2MF2"L{O$[a8 ~,0aoA>W J(PT؋{TA݌/f͑vzǦ Ζn \`LXvLrSW%8N$(H23O|OKѪr Hp(] jxp>B GBh?@!(DQ K_:,j8 Kʡ epTRpXZmd5$XT P/3VCUI7JֆǪ驦vQn>\2`LhXG>L $S.D ]~Ƈ?d=Zz*(A+iHPܣK%ztNQgz(*G N{Sw=Zv]5hy[=tV #/. 0&`P, 1\-&Кc+h^g>Ui,/ cZZ -$a[!X5OF.IxBu)HzuSO֗h'nR_/6&`}-dM"r̽QO.'wX7)[saX2EO|wӋד ,f%}$AhAJ$*),'\-\ S;09D'oG+dAE1%քҌ~&P** 9!H iTI>T]oE(yE.Wu] ߽m{nA{:wBNR@SvN.inqi8 =ʑUx|68Na%Ĺ&`y}5a  G = /Eg VIDcn{oCN}#* !" 6nrl}6܋tCy_F`LX1 8E{Yh4^rhPB;DE7 qNo#hQ{Ne8YJKK+Q)}rFlaWQaBjxHİm+-dV9Ia+i9XOh`Luo|&,?|;[Z+ҝ2w L,Kq?`L 4 #T oa&#dBKvn;'l,淿f*9׈x촧UK `L pWP2w~RhNY=f>ޝ1SnW 0&!( f*m>>oBzsH +^tP7pbL 0o zqܥK$tItaJkWؽpNty6EܿYӽ5ҙ`L]X, 03.!\禋M"873_6l7cMy8~9 &< &9&{E!-Eˉ^4S W \OmS>+`L pWr@#&O~.؞bp#9x7"==2?:2Kfu;aL 0%.(w\s"@՝1m)$ŷu!J>>\b{Da)ki8iq%DOx虷B_TUf3 vMQk+c,iiwV6 > 0 +at߷Nx#Z\?A{̚ԧ ZM֤9sRM T]0MW\ǁߌ\O "HxT|[a{GFL 4c:3#<+^}A\}~u .ZߒSOyPսUf/+1xSt Z$*J)"$P'^~~f[ŵƃ\6;p*@Iy%W K’ IUQcmX" ~ϙZG`ƕRpImGlTh~TiP8R&Rq_LVӫu̙r E i.ztNё+ZDQ)?r î}G=nUR =6'ԅM'?bA{RPHA"Apƣ= E}r@4HQz!Kx 0pu4uƣhz;^.nH@Ab̝>偶9-M;SaG-'M{a$˰b/[jG-EAxH *HG[@sXi5 }Ym6Ƌm)~&~(tnKծtHMaT|[!ؚ{ nkVef)1yyx 0f`L`|8h*|foQ6 M5 <դ}fm܁ϑ|N. V E \ByrL'ԇ yNOYVMê]v-xVFGOc 1ǚ>ZDIY>n~C'B߄tw}$/ek6ߺgJim~{N[b"D"4B/D jf7.럝C2m{Z(k"Wg<<F0K~2<Ja.=:?ᾎG\owDYjҴ3^\p@` x{3Gy=ExPp{S~ x~"&9!"9L*,Z6 lGO@IDATלCŷ?`H_oG|Wהg|lح5Z8z}x.ikC:Pp@KV )ctMMp:{9A/'1Jǵ'_%~sC }wO #P0UdxHU.]ϗ+D_--ߍU^(ۇ7lizFظs_ N9qf_ '1XJpd(.7>v@U?YLGy֎PicLFB 6áaeC!wWO~ٻ(d!8 rm@szԾAv Ao <)W?&tƍ\=´ţt ӬSZ;_2f?eWf K#ExsijWb_Y}-4J45/k. (9Taksy*{jq M ֳ biha4~' 韀t06ޯ" 0k[?nݖ\莢&QXv3 a0NPvy pE&l@7] =hÂap 73gke5x#`o:(d4g:p:K!(9&˰N|K+*'y WxƌĺZg`6/nv:|=!\&ٰzt~dPv?7Zćw]Rz 8rgay6 x]\YO!6Ίqs_j#/(UVmbԫ\:7o[ A45ve^*:0A{y9|?DЗ7:w&*N)",.-ȡ0G!w9\_gl3%nT (4qyI!4Ԭٝc0γQ߳ko|'؝D˿m!0 Yi{Ϯ1Z[GGrMGTtE|K>o3?ޚpJ!qRƍ'8vu  !@O(652"<DjQ._4#qz1ZCMQ"AP [1f9|K:8=;2,zxt@ v5Yx2hnPZnA3>Kl#(ixj4A."O5w{kEM8?8ZEӺ\Od\o+Gj|V3?9jШuGO]bx8UVUnZ*hp&rxZ]c0B4e??9(̝l8d?ѼN'bLwWP结|i@Nע3חtkU}dԫ斁CR#\aѪM(7h]6E\CkBR;$7#Lo|b Vxi.#Gޒ|l:EQQ&e.3#<4cT=m4OD"^M}=EO^80ߠ_ QFasz0*م?SmcHy?!*k6[נ&Λ\&#/!@o-]|+y9^゠QyܴS> |C}-00LLCxHa쌞h..>Vu7t4Z;V,Vꉬe#HN h}+huGbAQ[{ ؎wxgS|"W w߬\2֖4]4p0$/:y䑧-F7c*&/ǝ$Ӫd B$lx"Qї WκyptMUj+} k+1n㯗yv-dd.4S_BǏka2ҮeI%IH:J_'KE 9{<7uOuUՀ^ӳK; ̬B0B4﹂&-|G;PeE]Mţ(k6Í幖ʓM_-]cysA Fh/\ks~6nAP~ Lb Fy>ZZsaԖi6#>f?<@m%PXEU[[bLwUOd_َ[]iziv5PXUg]h+//kKL0&O>i}%] 5[{FMP}uZ6m/lӁ5dl7*[ׇ_2/^x "0&P@` ?@&4)3r4?]L=o}5֓07Ρ]=O^Ogү1Qpk[cg~(CSp,pI*\#ZI'5'Z}'+_41^ZB8DOg\#Xa# :F OςbXlԷCVaVH^\(m(;> E]<\~gɯ~q2I5h ފ}oz*U9'[{LV`V^VnTM&0k„c}1Oy#o~Y :p6kr;n5՗\&^)ơF,hOT/%<, o>W\A~<ţneQߒ'sZ3/)JӦUf=xPB4<'{ԈnMKVҮJXȵU9ݽbVuh=\Ax ҘbPH1'*-^~'' ߺED4_*‘4|UqLL$xոε Iq1c45j>pfwDM„eʽB7o~Y?x4Z-l{z}pִa95#fA@? vrfԦSlɿ2M.3:*?q]vMϰYkL"͔Le6צ 1TkQd>|jy,wdfV g5 5qjs:2͑vlJY檪ZOU(Z[5 [R_?9`[wry浄`qw.b%4ɛ@8|cK4l_ZXn5W29\scUmt\XA-y;ueJ^E%Xj©qJ&P7^73 [n"q|MBgAжixu>T[Li8ӍMlb2/[_-fhII8ȾqV0;iRH[> 9tm 4+fg&´vY1k!0VO.^S^ K(?@uK: KşX?/W$@oXZ^ڣSiHpP^HSNa.w`?}Z/[5-*'gQUJ녈?+Ewq=cFM|Jq~zLc/ybwDD{`:k!C` l|8|,́[șFTΝ bsZ{qK@ ,X9h]W(-(>&iOddbPjpyr 2)ɖ٢g,`,BlL9dF#ӕBl۵ll'n.-:!u=/o?|VK}ߡ2)ȂcW.Tvm#pb95:V6#GL5ʬ OF;3]shC䞙nOs*$mL OEa2;m9_sY1u<\է&i/i\f!7qwEo{o:o(ߵ{OwnR\ v}2oJ!1  aw"y.;Glܞb„C?,y[_CO8 ա&O ^'ԤzUu]ac/ܔxE+Y~!};4+:Ֆh;/kpb= TkGח{mi' T%IT. !6-,=-w gn{NnXBqjɦ\(z'ú8mEoҒd;NNZep7˖FcZ-&7%W.ZǪM>]"B31>歪.7<&f٦MF搋wNMsM C۰;;3^p)ٙC{tyoذa']դs]B3Bq5\c=zn* )sR\гͅq9S4Οy٭5ro٣s;&S "sƝS{&7Ww7cAr)! McC 24H k`P!wwUw|&P[ Jm܎K۵jj;é[kXM&@#QnB/OZn r6t{.5t ;uݶPCuLtIr[ټq}ѸAxW@sv?b׾b}E`r3|q@Yё]8$`ZZ UkF-gCFBmǷA3E8ecv_9`e` x8<Z/Eߓqj7ayyL>_Z|RD$zbg#Ϻw|w %04̳S٤a٤A} KkW$̉YڒӲi*=3S>?J5w~q6,jaH@la ;|G1Qx~Hna@<@^1^ T@;|k'V®Jˑ@qh s}x-`qqX pOkaHTPm45ąFb!KH笪lONkBdI &T%U#!%U4кg8 >uɥ9׿W2L T 5TW D/k(wh* nw41CFqxUsle;,) "~f;2Liד 0 &PXȱ~k6KޜI&@>XB,Zr鏍fI<&P [:}UAl)wYfL<Ngo7-[O^<8`L`@dLu_^,AՂk&#EwCO8o&ZlZ:J`jcԫ]X+GWg)OE+BU:ՉP&@!x'!|Q߫7IU;X+5veD⇋ǽ蝹8iH1́ 0&j eS DF{bjɖMh88MapϿaBt:$'Y7p`u u:s+kǂoc]SOWÚ)@fVX&?,.Waq'&|VB>` brS,zXM\sάFpmM k ՛vw^IM|)T'vq3L 0&p`܀3SJ=۵ll w`jv&_[tġcib۞нa^+5h+S̕.$wԆ3\w&+ǍS1#&&] 5Qz.󲞝,g[vWm6;o s.Z/r8eF'5/&zN&[n񞑕+:&<&>f5pU^$jm`Gc%3b DD{#a#7S)W-K{vҺwl'5-6MuL?,~Xy3A ҝpIMƕ!r:9mʶ-hm7׶ycQ/$/!twSGY 8¥HvGm %L5ygoB- >XBUOÜ7<5n\N2&8,=N>[/hU0iBi*]aViCCz" [ڧMC@k/C-3iKӳ~PyC@yangvyEG`|׳Zw_;Ql--ZabRX8v?] 0&@e ^Yr cSM%3K!0Veڪ 4m7&v(Mnˆ"j-6yDn} 6DxhHyWyLbڭC1gRnQBNmW dL 0ZOZuyXyfMS6&W3!rs3('W8rEɑ^zltof*DKw=NGU|d#{y~yURsh?C%v]=H/61dL 0&pX?M 0 v'tՍB{PWf Y-0s6.iGŞĮGl?TS^\zk,`L 0'3({su2ori])^& M:[4/5n|د[.X修?AZl]7DfNȄl'2 q$5 GOCϯܥI[M%!NW 0&7`!0=ҽW*a\Y @ aɧ=X'gF[)7c*h&Y\>`L^U&E̅4DcSFRPH Cmr+# H% MӔvPq:<~|j HL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0:A@֔VӧS2HMJAwB M KPPsVM/ףnp9?>`&Zh(@B]+x !Au&{wMM3:wmo;ki=)О@a-rJBYB ޔڔKW,"=)iJ^єJ)y6 q6E#'L =lEq\$ls h|5>rx==!bM-7Yxo_zet(eJ7&x3nO*+>~?ބ$Ԝ {WZ?&@ u0> (4*G\FOk\r"DL|%ףN7A DĹG{g }VYͦP" %a#ݞ1q 5Z\J8J9܅;җ'$kIImBKE֧*\U0m}/xeիhzg;ڱA*!yQUw8 @JE3ޕJ\Qgt-+SU\ӊ6W\ȕ+xAfI)晦 FLP$@b(!v+-ňi|' xg\-]ڭB7C{~  aÆ Uh;Xu\F(g ŕ!Lsө9M櫚CJmiG^aO6Ud20gs?zv 5V%HE@BpdL|"~;a_s| |d锁AʛZ}>?piV6FIF]{RhCH% m3S\aLkc1+F'wzB\A/˧kjLj~{`mr\ t~ܹk7={\xn~jP}EJ3=ͤqikD' >&cJS)vy!~ݼ wiu$> YBQRjO9A4oǸ8HO !l!'04oV/<1&-~e* s mzʵ)/Ԭ>< ~T/TCR})wLS=<ڒJ!|/E6:r\ᘴ9SOIv2fNy-[ΓZ·T=5p_U ]4Tnj>V5!ҫPnz؇?țx~3#ېA(}H4|N вs}hcsi_ ; Z9?E(ۭ83ݏ`fʻ~ɍgMp $]ggxӟ<?אm5ā)=OJ}Ouh1'GYB="6Iۊ>N845I7>P^'A*#+iq6Mo/;#_FAxŅXVُ<2 (r \} a{ʛ7CH-fE4+5g{IsHᏇc6(It,:Iv_o h{0mCE&<w %WEX}S0ϠB0AsrJ:e 0{@=\X-zGrXqӥ^߸W/NlRszfO Jy{D hO;riXwEe=}چ83>>}}14?1 q(ÛgWgE?O^B0 t\yPVl?3\]y[7`ٓF  e1&p2|k}2(iT{eE&6ה"{$@4WMT4J'(K7+d)r}t; eHJrGn5yvtP9!P4~YzV|9wR{@;e;43~ RmNnFm;]ǣg{3BZ!<|1m%/cf`"!e>qʵ[ ԧPY`7KM90Knnܥ)&a ɷhR&i-(48%KHXu}o):J\ۊw!ʓކ>={OdF\kAf'wk խQ k6;/Ckb a- VD<%-z t{ш$EwnoRhVm}@ޯeO&Cb f&珌iNF)'YYj &b!& |P:=` ch?I^!|{vC7:B~i͢4x{K s?6'Ѯ2 Yګ9--ch|7sj=0im4C[h4)ƮT v C6b@Puyj&? 2UKٷޱ`u^vJzog ejN87U0b xQa2`TPW:QR Wq0&%Z<r=%y8B}~4>܇9aɰa4buQ!;V[?s-x[7a+i`]R\=`hu( V'|;%%Uǡ U2/S\}^xow/~Y 6cSg .*+yp7dsui JŴ#Xo6S(oV<(*R w-j|pnoibYciІC DyC+t#٘$i6O*Cdw׸1:0؄4ծ A[gw)^]=q1?ӧu{Qb\֥xhxԿ1|l.B/Pfǔ_Sr ڻdFS\Y9Vf}]48 Ģ/ $ف]mI"Nպ,L16lCj''9o 9jxLpg "WaEmiAeP؄`.0ͿΊ" d\{0#W3F ң:htSS;s _vIOF-`}C/A\03>f}wKZqt^Im(Bdx}3-au}נ9wGARvh5L<p{F?_I$9;d_΋|.ɘRcnK=ѯP7>m}6VC~j(d"P%8fBhdh/PFŃt5P_Zɚ <S'^Vҧ;I a$Aی%Lwc?;>G iC|fW5W;3 ̂7&Ҟ}:oH`75ݫM傛DМ4m]Dzp2 )0jPH*+}2<34G('^"1Ii^fc=tr1%tYO̷ +avzA Ӥf{㖵='! o>R{Y=_g7`6 yQSy‰M:LtG-YuLx&<" bh&CF>gh!L;QYȁ 4igʔW>2yr &e4@~$܂Q6+!ӄNXa#4Ҏ-a #-^<)f⃾uL>X-MU6JG%dCɧҮf|л<|^,λrsBRűPMiUi*st@ B̈́&4]sey5L a #u[N@6\Bgz)(o(u> p8$L!rKbL^J^SCsC{. k(hO{N,9F.<bML5'kX0g蝞q4kFBE舼+Z7fWgӈ/(dL؆t"Ғ9)k F00w#uKqQ@hpVIBiB=dQH,sM'Tk|@=?_r;m@aHx%|S9Ek6PU{rF^Wj(*5a%NzDIF^ȣ ީ<,3evń8_ѻ˕]]Xl?1hp'R[Uhvh̽i߷@LN`z-&~Q0܉[P5Rs[ ̂#Ft&0Oj&'ٿu!C`O1<WHO$ꔛB+S5̥W!B&(F"5xQi A|( 5+ -U"+M:@jXu+k'9j`ZzP&ŋ<;RM B E{qjQ5NAzDZ? ';>o/*1 t¾P@@$z;\1 BM- R*j'zg@ <wS\؊k?jffG>daMN7]~Ehr<4&(%p(~V5iG2_sYYo>'i~yk&LO?ZNW%;PyV4$>Gx؍{i!ʖ ?3B0I5c0gi2]R< EAQ䵨{|nq&7qyC+oߒjhHc|I|Foz@)wu_)QN<㟩Ӈ9) 7_ lapnUi L6Ļ3Zh-mИ_{ N 鼀oz@\={wNq 3INmyWE)O{_Oy]Yt 3,OK#[9 *⑨h~v![{ ( PysL!X}>HR?`HwMOlΜ#7DLdGtWtS ,\Ӛ0[ehcdg9( (hDy\m.ᚮ+$٢aJΡ `UWdP4a:KLX\sGDy]]껆;INE+Iqj?TQ䋁* \ɓq~rS X `ysLadQ De  X_&/O|M6UnUwÐwʭȭuV܄"v +)?)py"h?FvknR"yv\h?Feqrjp:^E~t?O1pS"d%W#9pGH[L 7M..vZ6 ز#\l,(СCm׫or_ɜ^ ړ0ZMoIӭ*xONk9㉱'n[lY6MtilV[h.'Tۯ_l#w Ư%|w^zJc|{R\y;U?(g0wЪ~:>Km<n3~aa@Rd\XgފY+~ds:0ToJ_h4Hv|\ /K|2 G=qTTk.[#K`Rqp?C|@zFWF pN@[QnW/6Ǫk&<]8]ݧm.4Ӥə@-%P%^Pb;X7[ *zpLGሑZaUpRܸ }ˁF.;onSHvK&N'sU<fp h ߢXp;A&=8V{WQ;ok7^L+cܷV1k ۰.* >U^Jr;ZƚQ'n&?:pOz_؀{V2zRXX/E8XD=hG6_[gTdk8)c*|ZgDJlKn"'znxh73141wTA5 u;g! zƫW*+y.YatWYF_L!ofzǫDZ0Kp_:MyE ]m9BF}> ?Z@m>Mn ^;=4iI/-z1V3+LO=|$M #^$|d.t8\v ߮4no&EE12t#ѥ塀obܣW#&]d Yբi/?gowa?mX0 ن/!Jʕ~b[͡2| nd]cQ aȆv+^s\V̚0&&Y- @C!a_ ~?y?:̗BHޠgXGom9|oi>?|btcКG >4o b! @8|[l#5|Fgc!8Nr7hPhCKTn׋42Vy4>P$Q <@@"* / @%Pc]];S XJŹv+ ;`m}!^;)Tvoޑid'/LLrZU-foTZFkKZeA~ i~t y1j!:g,aҖFzߘDOOi| |BS xG|qYQ$yb| 0& `h){AvyD88IЀzOOFSzf֑I[^"Äjk㊎v0~&շ` 7ΕooJ '˼'v5h Ab0? c>l4]xF oѮRJ&DEéCJH]𜖔T0qyŏ'ki)Y lGG{:1T]}L 0&M5D&pai-lUsqxCpioy d ص>Иv0d!x tM<{3&C(?^wBhd0!|ǎ̀,#N)ֹ[cN<_`=݋z(wb̈⭤'|pGnAt~i32LPN))LiL)Ed[c{>}P;#E/yZxϒso׃L 0&@W[fA1L0#WBQ-*03=0ᘸMsdlea:R`AM o4ե~Mʓ JrtBBSsE}&` `} Ok.ǭ3RgUf1n Py /%/F&';WEp@T Produced by OmniGraffle 6.6.1 2016-09-21 16:20:48 +0000Canvas 1Layer 1 Compute NodesOpen vSwitch - Provider NetworksOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Metadata AgentDHCP AgentOpen vSwitch AgentInterface 1MetadataProcessDHCP Namespace Physical Network InfrastructureProvider networkAggregateInstanceInterface 2IntegrationBridgeFirewallProviderBridge ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-compconn1.graffle0000644000175000017500000001514300000000000031604 0ustar00coreycorey00000000000000]kSH |:D/ld7!,0)N [6rdN忟,LM VԒ{e '/߉Q//~;uӼ/ͧhz{~{=­7o=8ދ4uuu߉]^$N n}}n8m7꤯?{5jFGw/ܷp0i"/&NN)?<$I~yr&p˯`~|ޏNb''P7ɵҔ\[Sp_ "ܚtKpYԙ6k1?ȆG~nxH R*MHi|c6ɟ?ŝl ={l@.O8?QG/ 2:;;<:& fÚ lS~8Oi e'_OItͦ9g9q⼿݋NO ~4?ܤ}aq(PI&%տpޟFN'4/= M4W2I:_RAm6ĝAғO̡Ð=? +'tUdfT[8vãRR!F9_jp;8%UF+;.@bCRARl> Azg8=Yl邛Ff0>q˟hv&3(-PϩZ&D ~y?N3ȯg tǓ[2W&ҲV8zeSp<gA7.=w9w 03:Jz3= NQ'ā;$18&8:ENI?Y ÔxuЬ( Lʣ$ EJ$;@9G ߥ"/4>q0CYn&BdM$ɚ@ohd獒~^$A[}Fgi? :pΆTyG潇zG?@b=/et9)cUn F) I-ԗ%+aĈ\ k\I.(I5 jLmr!))qx=<~ bHo, D7s\%n3>Z_hannFtCVw/`!v2XnnnnB7JvƧR &̣JdPJH FZAvZ8NU5JmA Xd <^wU*uD**4gurfrZT)K[ZyzӮRP*%ȊIeO!`j9sY\+Jj7?i$jZ 3\rw?&A7ؒ`u 5h]wt@F@9݃ypEc6 {-=0E)1NsMl0N5 "kCab QXo)R2HlHlHl#6)}KN-_[$GIlleCfCf{6UF*k6sP})k0Q2-nِؐ#1skh©gFj~+q?6k-kbNR7SXCz$(-J;sXZc dm\ajZR|^̈́JlKk){p>vxfbm^>( 8\3y/^->{([^-jEi60퓼r露ˍy.4euiGت=XvC܉NB(?buD iDH9ι[pC2R3ӆEN/ASXR w=B\趋0f"'tÓ48c,͸&_-;.äaz'_q'P`^}  "Lǁ"(!(0L.NXB6JB%`gh!w)Es;YϖdnיIqGr_ !\ ) QC@2 WNH)-D,ܪ~l@2 ( UzօThѭS hFwm,ڄo}]& E>P Bzj"RC7 !=>Z O0SЖJXf80땩 J03M ᦬.iyz_31|,T\!=Jvnnnn-b7춀`\^۽ޅAva1)͸,27 n؋먁K8[ŵ.? ҂J,XEuqoE=60K#$.,zI{eHa!f !uWuy;IJu8>A\7̪=ˇ_N ,QHDfVe `paRFh\`X_4B041Ք(]5{(`/z+T~o7F 4LF(4^nތ)5Q@7b)F I9 |F$42,pH%g 7L3bvZ{"|_k jTב8K!ٓvkUo$p#>ׄhEt%4Z1ùYIB#}綯4 Y .9e*Gc1Z2'Y{d5}g=:1tweWC|D7b4dsGlNxa}A拆*bi.9Bܖ >ʌnY[vuzo> S轉ޛHrHr-޼M) gpc6$apbHr%R.R.R.R.RnrDǒ )w92TrII-^He)ɍ,uz0W5%BoA}3LklwEfDfK[:Q1j>P#6U*^b9^̓15SPQ6&í[<mr+dX7XDrd)A2 d0H$Ӓ =M4mY"e ;wr׆ŽRqCD Q$$$$XtoF x+Bp*6>U2p&)" b>Pd9d6]bvzSO.ՆW!=JzHoHoHoHo-%ڽUq4ϡy -:i !!$^IJVz~'h;A>TI|YT(|9aB+=Ɛqk[;76[ pb7J~ӴUu]cZZ`;y(WXvhoϯ|Ƭr)Bڕl ,Bҧϯݺ,@c)иr(o Vz_WfF, Z[2޸0(D<# Dd ֡\+>HJ-,7VŽo]N[b `+XVˊ>T\ &.rkKVXeVbcO Ӆ1t>Ic2i4ta2i,,0 ZJEfE)C=R,,,,ږʢ~k#. ,#ʗ\ N[ኈ n} I+-.3A5wFYW!-ձs1u*dRó3uT]]o.1~˯}_t˯!bo>YY)tFXtUuA:R) Z Reֺ*j\u#h{jDzm7ykk`Y3},T " sKc)voUh/֬_S^tگZCj1Yc3jq!,Fi2ꆉU|=/} ; pbM\օ OqP <J,9>ԕK$[Ni";JUE{(lGR1ʹ6fEvBNCMRfbhF I0}V V0P"eD,SX WJ隕`9 *kW%˷XF`}wޣ6P՗ r^+SHc'E*LǂXXړi&IYj즫2mfHv@BBąchǽp漛E*{9rrr[kn rgA5򭁱eĹ P5ab Fk$Z#l)MMkm}dh}|j b%WG}&fz'M*Yf?k Rv؊#&ĴP2q,7I~d:Iʕ;odЊV덒D-?1 cz`x6N&[G0\],^8Q:mhuL4ip|/;I;5AC e hE6ހ"@"@>P{ իdOJ)sJ$Yj&"8]PjT@{u'\֡hWDVzʅZfYYK( Ӡ ﻀ0=Q,.񔵘]kLP.[]ܣ?lz~R@zJ}$t)eܽo u)j!ABNW(*9r8Nq8nf$b*GA0s7fg 8>u, pn׬yiԺ gwE pڱ>r?Gx .tiUX F1y?dv婧EAq7~ݾUxx|\UsxЋ:pa|«kOhhtGA&PAvoaZX!`.'Tc_hˆ9K7NKqOs~z0e@gk `i~~.Z=6+~,9#&tU|IF&''eMb7_(\Zىrku|cg׃+x>Ŏq~K}!WgE)6uB[/݃qF09Nzq//d.NWPH[yJ}x[?=>{7Ͻm8V.~7hߖCx8TYv= &s ]qGa1F3 XgZJZ)6<#`l, h"k&~x V0зΩpqy,^ħ~Pd Oޏe?3Y"@ӂ9b>:#ap'_9elNzzuv€H~")om?eB痨;Ũs$$'Y+DAqNXs!UΙN vMK rӼ`Ҟt[/"ܚ;ћ1t wx ?>aw[,PcOkW?t\UJٞz;Z~󢤚O>m9;*+VӳϾyż"f%}z^=t`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-compconn1.png0000644000175000017500000055613400000000000030774 0ustar00coreycorey00000000000000PNG  IHDRKXvCsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgvҨ" UZxU]Ih*$9]PA`_UQbAłAZ&6KrI yNy;3L[!`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`LFFl 0&N`?pLoiKjRJʞҌ5mi'@Q`Q@w-1>!~![dL} 0&Y/> /BEZgͩ0:++$U_;RO=w_|qfuR)U~ {~|ΕiW`-MJs_[4-倐 ,M )K xS2j@ [PmwOP+ɟӻ%#H~,/p WG=IPvk32)7Y"[uns{VlÄPM`s?Ǘ 4E,5Tc,DI^XD*W}n5~&8 L1DS _rt_!tӜ9?sYY]aeljVTet_ǚMY05x _bE-ǻq?Hi*JIJ$d WJd ixަaڊ\Я]h@:]NKjJ( 0&K&t,xS#˹)hF>B~x)a @cxٹQY_48Cw0RfK\HQ(3nX+Jo Іu`6Ү@$yS޽ajs\/QB+֔ꮤ:43= "WpHx_g|ooُ:陆,h ]:on[|Ǿ3&*`e"cq!0ܗ{BsyH Փu/-w`7.6?iXLz1qxm(^4[JMț5)|H:c`~gW uP(> ddkYkl<  (;I? QIݺޛ=f(8)I-'Øw?،gxŅP 0&P a͕"i)Bh=v< g uhŢEt An 6a.-Br^O(9OZ]0.=C_mcMڸHѶJ.(ѳyɫ"ORomЊRn:hé=.;\8ˉi(>GtDrz\DG:A)qMqH/w_n@ ʣ+aL j ݲ\]W5 KNRB}a yRi],"4hp;PcGFBl:,f.DAn ȵAq;e49̓Y#c;ŰqZ0/25kgk>";x1] c bZx7YV(L-Z܍|gU٫<ݟ{0CX"D{HER(eolAӄDSҶ @!^WyA(ZQp{I?%Hո_,4O~~$[~!ӤY@ MjwϥeY~ kiMޞٯdž _Ζcx#2AXtefDDGwB؂t^L Ԋ, [f' %G84Cc9 ߩó9hRDg9SКx!=/qԻ^`_X7%;B!K {>-%2~ ԳEZw}W4%D_/I}Zo(m*h2hGxh3OAKufFi39"L1+=9?Z!h8}]j&~ER:vgnnK ]J *Hs `;d,o_A-ֵŲڑ*-Yi _P>A·#~ZF 4~O3'P>fs9[%V䞞C>zP,Jʇ),cBnX˕ixAzy s)^ó+=QV' 9^P@|i=D YunŸ&XRt[Hw {3~ (J_"^vĎ#ru|A|."C!3poA!+`hi9VIt#\2a?(J*~]D+rnKXQrk&{{G&`uhcJG=ɲݨ%oҤ8mA m9Ua mkayFm;J&t&:ұuyS,|aO@,VZy˟4i,=;xa1T!ߟQQE놢]h qs.e]R^4hB ЋhM&m۴Jy3FЖdd#ٹ7.|pHRG`Ox \h _5{Õ"Hu9>l(WbtH15<'04yhs OZHMܱx اs} (e9kgX}MQ xx4n HRX6+Ng)ZO.PĺP҆cx6 /<OH | )n VN={CPph2s"Fy(Bq07s9C9c(D^k&hvn;4;?'=ǿʽP"ݧ`{cNgq _3&{}#h`oRm%wR=V\Jq ][Jf{|Ϥxt(q[5\i!)nEnPva;^0%.5"Y6ׄ*'Ă@`:4>FOy0Vme 3]ĩǿKOB#g_)S;=GPNDXEI[-uubwZ6,| !}(ryS% V-z"OnBTZ8,CWVܕYɧ݊4醪zR{D(s[Q"3j}x Wldt2MkWx+JdFQ2WS k@YqDU`<HSl(#s+J[h,څ  S.FQjfui/~ VQ'yyxwh`EScL XY5Q ),tlzTåeE$t5ў/Q`7ǎ2NEm5?g9w'-dzwゥyYUn ( Y~Za*Q*ZX#t1#!Ɉ_hĿձGg+&`|}WzXB,!t7GY)O1 o絼 D )FG] 䫑㦮>_AW'aFA4\KbSs(<]j X աY<q_1 &\}7֧GK]{$O/3۬qrUյ7%1ZXfhaUcGb!̺'/LvMx'0#;!2CLQnNфU5F8 l4SUk,(WHNh}_7oRӮTl:!.Sc/͘-Olcߘ@Ҳ;`)/t#u:07hMeJ ,~4zﶯj񯺆C->=Aad=8;EcAoYI@v8#"?Th}Ec͟an18]4ꎻRHS2?Ujvذa56kXepB=]W~1F1څ'bjڄOv[nb9D|XkD8@kȿx醴faujqsl`{}aóxv:y$Vud`'MlPV[(O"_{zl_az k ‡a|A$We xd[R=G㝳תFrfL ԟ,՟!' QFEAٹ'aErS҈;#>6x~uJJIqcG(|#]DWmNS~XePmDsscK{YӕwCQ:iD-&J?·GM yrKo9Y+GF:~?~\\KZ\۸ZWO(@%wdkw^vD~Zi#{USyi|`{>moPX"y$V{R"?CYBdxu;̅S}[؍ɷ9?Y]PnL DOYM&P'h <(=m+=ٰcm &mǾ6*cUOT;c֖` u!m[nHɝZK Ca[i7ARPo{4~FO)gAq0p/7nHx a4))[MQnvl Q myְHdD}^*y#KeҊD`%yRTr݈Q~d̒kevReYj=R\1Ў84iF|b+ڦ\LFF8 W[JTF)!{5%4fj>s2[%}+ʠPĵS56+KPb;L4&^@o<i9~ yIM?>`i>|~NJSjpoKCUۖ9mZ<ѷ~뚔< +oGuO=E0ibdvUhFOIG<V6JR S <),'F yMg,`ϫ#7΄zMs۔;)qnKN#UQ|u/=z& cyyp_nDe~tVWLm[kӲw/qCwGHgMӧعMX 7Z;f '',l&eotz]!!|}Ƿ~?6cMm;mb6u <}$VSvLFI&E^Bh<'HOkAnCB]X_цL0LsZ8(aϠ#8#hYh$C 3B%'">#m~FZbc8[]tH[cH9h=w8}4[ДK܉|\J~ TNAuAvGvF4-Sɟ̘wew7h{|ns Q q Qdŧᝈ8z]<ɧYUOc_Z@V#CwvQFv(wP>ا|ێ"l{'Xd=03@],| hʩ̙x Whf#~9X~0&PJG8'0"@ jBCk0r jѨA#z?ѥH7oEDC3gl>*nDW^WOhj ]ܑX?vChc7:1 w,{gp*z݉M+ Fo0KF'IFWo")=,Mzi{4M"/Wyj)>ׅw5 q6kK%(={GYhw#)@$M4jG vrY#;Υkw '',L;UJMV.yH2# 0!6'\:}+WQ0' n(Sfwv[\n<8(˿R͚T `oӈfcߝh[Jm4kq?+×{A%?TK*4L#h]{vGd@cM42D{ ~mMQiFvn fEw~c0gfd.|aOG|kx>,xm/?R5~!/zJ (R%jA2eT4GA+?A7*$37rWRG爝;H;rLip4Ɨc6PO0/6^o%Ba75!u$]lZZq:yy^!̭Py GGhB iKQ#Ii&en*.Eɶ[R+*r_VV£jo4σ :F`]/`MG;8|&$ bk\S F Mٹ m7] e~vܷvR7Kk㶭ɯ^3Y& +5m¬il?; ]\'Qba&?3itMEeY~Yx"e0{riusШ~-eaz]Ja^ i]Xw1gh8]da5fm?{l\q(dȵ2*5rڴ}*Xt`2V /ÉGbermE#c ;*wܸݠQ:4{ѢEzk}q.ܝ&'aufAJ#O$!Rmnu 򴩥Ow]g E/aL ē,œ.@zp5X fn VR-Gt4'ܢT=Mo^uk_*R'茾hc HdV~,k?Ey)mƤh.g}zYx"0RppoǴ9:c ?y=WO^A\;y0bz!%YՏpyBUPv-d=}_(qE3g J߼i_ t@'WQ{=e F1S׽?shzDE  0&-פ!FG6Zn;f8404ä7]1ĔI)p{NipW_F)Z@n{2ؿJv <^|kJ*Ca `=@S=h tј?9M.Hd4Isv_{gAg# qWum(”;k\y*\nx@g{$>ŨYqȴ*rkJBӷ m0. h{UEU^(ſ-!)~0g'UxL 0 P*!g! t3/C$lچ\K}r؅:qȵ)IC")ɒƍتBL.Sh _0+PLH%Z ck;Io#)ZgS"JKQߗZ +d HatfYg6BY-եw lu.FzlQ>4BlV(p&< 3tRT[h2ş ģ޺?}TEKoP4‡0mC?*Oh]Fx(7ۥW>'B QcoS|N?tKVz׭rofb{* dYaTo }αZ.k Wl@c(lў9Yf5+Jah_C"¿ de! >)N0Uã'aEj(Q: [EEa{"Bic7< ~Hn1#%kuNTð>/7Vj(oj)Ca6#B˦e~Q<(G^K!/#OQ#ҤmZo_*^GPnn: x AʀhۥKm~iJiX=vOe#I .ds/3&OE|0&ޏ~KN:*_i`6^]C=⮞1kkRFJVR,ܧm3&N$uH }M}+,4;yVr-}ܴ8yuie Y#<ߔAU!TUa;)+~-R!mRG{yZoMý}y}# ,:3gL ě+K&3& e SMk#c1$08F@it~&^WL 0ixqʞ2&`n |@Yox&?~L 0D%<$jʰ\L 0&l2,^{0v؀3| {-^7`{?V42&'JWxcgo^A@k/K)/{E8L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 4E!=m` z)"V .`K@ C*Rks&/o\&t3@)K~#)F!-BR K(:3%bL 0X@RWb-y=_;5\5&} 0D!qQ}9g b)\ZɓLd9`L6mR Bj>۠.y\ZE`iSN۶7?j1z)@Ch~Fh Rexl^ LÔE>`L D&EIfOUMmJ{ӕM`UhVUdjcNZa!MײTzl 0&< u %M{M"XH&@S,c,ѮwfX(Jtaq`L A P&CTTp 'L 04r?&mMq1o]&a[L 0&T؟cW\%vtL $4T[Y~GI>Щ1&@%vR ^'@ Tz+K!+t&}p$RdL 0#@u%T$paqo`u Tz+K` _cC.a'L 0&`%pEdL 4rS_Jc]}aL 0&м >ꔄ>KahBB_oe Uj4ل҅EeL 0$Sz/cL h ~,5$aQ`L 0&``e):Nl+N{oj/m3gSm ,co;&#ᾜD̽&H:4:Nȸ#pĝ %۶4}B>IzOܫJ{es󂾧s:^ԸAa`Q*%F[Q]%s05i ctyt´/udgۭ2`SzN ósO0p?Is/>&{=ɷfq۞sC< ߛŮ~u;|fL $:ՔqՕ( G+?wQo2?9/'$TjW YJhAgt*DY|Cr6bw@ݫ_?W//51Gŷ*g9/~tfm-3p_DM1n} 6V{973s3,,ˆT% rC8ՎCWTFq_^ݹK̒TV>x^nv\\[gB|劕?יKbDm6c(D$eAy9;|C/1x+n3'xeOT/`D6eg4e{"wZ$yl em2T`d[;NN'yi u=(To̬{uJҮxݺ<Ҧ HƐ/0VNn;]&[Nf@&@B@  Q]Q7\lA 0'%KAޞcݛttYa$6cKQ9J^ef^S$.E Fh@k 38wXz#QbdmVUnv(9z sAQw޵=_ +J$]gò?vc4uT⁎Nx1_2&@mq9BySP@8RJLvMAAJQUމl50R6ges`(zkSĢެZf~"\4151RJѳUh?)p=B<"|CT!*N%r=]HqdW#>PR1*(4UC m)$EnYUI1isp~"/F0bhMJ~JM[,dgW>Z)\6Xԇ |2yi|[!/hXqu)$o0Х Ӿ=(#ǎ@ (:1po*ݺԃ>LToB\RFw4-+3x?3cZu}zs|'`To2x@VRr A)#3% xTi%oJHCČdne %óUcW8FZaF&'ajufz]<>ɏ,M( KDv2٬l$i WZ|q=%T5Z7`= 0&Xj*;cQơ *hݒ4Qo9ݮF鹣DsiFl*rENUGn GQ_ީ4m@GS F_^`Ҝ |艐b8la׺$@`sk1G1MZeYaVȜe.AhD;=;&]nDLuTgcK񱚮?BM{˄y, ck;IEN*| 9/( "W4]aPDV>C^$$K+҅2w(ߠnko+~v?8Q:N)k*WH;b ,NK4x):G ݳX9&@MWeyoGG{U'i91Lxܙ mjg~1sA]?eXb|u;h4<+0)/6MMGn>3Hxd)6bCaFz̏b  p i\ x}ȲC' X +K YT^ W0j)R(B zC&MׅѓM[Zv[ NI۴9J7PBe1{S#ƇQ{[Qa9Y4xti?{L=&ty C: 1 Rew) 8cYȰ(,>`M32bimxN+FޥgJg=6}Jt]O@}O5ܵoUoLM`O7۴ט6YG/)t Ԇ}`N_G'ytU喦m2=N>ZW#/{hD]ճ8fmuJM_-{|"X]Oުpnn[E鯯HՊ1^qt;:7w_,\O\qid YhXسH,ܧm3&N$|04tTkєqՕACԃu:w6McԛœՍ@(O)ND놉]1 |##;pgbHR0G'ME旃8L 45Mrw%-Hq_??R`49޴.RJ*-j:MxnJnðn/R;vk7ֻ[?k{S&ݛ=eM>Ny/@L 'A*OB!kVQyG#z2˫$QC?唡i2:%5eeͽR9$ 'ͣߙ(1s_x"(N(~dt_=tSG8tn2-%YoR ._*|sW[gr"B|ܕkfɮ韘j"'3PaSy7u$|jqWe%J^$Hḽgn7_wb?gM_ CQ{YV4tDno=Jx0)%ֱm{}dtx`)8#YbKbSl ~<ޤ-8˓/@\D*Ov*|I콷!и% QZC^)Ոy:/Ʀl[}9F~[9mWuـc/w? QWv['qt/K+ qzk_sޯg8&>=Gt~R׽37 v*Q^SH:qo"<鍺BscT4zրy'j;Qr-ƥpUDc' K`zvB)7d&$1L(D!nd@X%Gĉx7d\Lm*""I~NDm,N]H͸̳fYa6@ZsW We)\G#ipOޤॣǹے +w<KXe)=ݟzީ&n];۶!G ]$wN(Nxf@=qpȐaW*lrc^(YaGgNC) +TD$'  ԖSltѥlLbbüQ7Cp$bO}j۾MBw mB(qE%R4/2y$y97Od͗]|+˒0N^S/yJzoRr7bX](oKv[(K4=".-&mh%ӶmZH#K¸4K{SHN4pK >?U\Qo.~CglT>K]vAHͪŒYQ^4)ߓ2LSPy^׃cJm]' T׾b՚ .gm; f6E4@҄lƤzf"sE9T`:ј.5-W gҿ17v}sXݯ"0 |:sٷ]. qdؼuNu{təGW>BK5 ڵqPV! U4n%;d+29ח_%%G'yظe[ |&F;y\뚸we"ɛHET zND)Kb U#_ 6׳8оbG ϼ)&]{LK#y:sPj6 [Ĝ["^8dtxe^,|mr(]ڷ_rR|9pWuڦ7?ϽH^wqk?CW}wW}x{oI^ᗫcA}_|_|4e8_q^/Ẁ~kIIN :z~oG7z Y+b͆͢#:V|; Niha%?YT,}]jF W)8KWvO;VzH7]7 njtz-AkT˕ŧ\,-g+_;vW},bx>/=ܻM(3 1WG4ck:%gs- ;hNїfW^uZ:ugcv-+Z(=5T~ZIWD#LYo޶S}8#_!yo^5vO()MzM\rUզn\Su!}Ţ%mEtBG (,}e;*E|=U66YJf|o/%.w^]ZRRij|`gyQiR߯E~꿡J] Any^=T5)^5:텻sNNۂKK?X!~]DUȴzuk/<.zvük1Uo~,00E@|v! 襧ʄ=WKҞHMbLѭ>P>KtSV;R9*4^5L7ʟTy'x}B: dcw yv[bsFEd}jw# W?J,gD2} RA1*> r Tg{ XEҶE='F ;U]ﬡ/^$[~} v~=PZ:ajq )AlEbSjl`5xW P?d7ҩ#:TzJ}ZA^-O'ߨ&*D#QlwuU韑F[{k9NB wY!daY:KM&$8$؎mYmIH3ѕF#iF==7w{+aYBO $)2hVAzԪ=뎮J)P0qi[4]Mݜ8*[fZ̃k`bn0>6~/WM}Cw7t!hVz{c$A"#y f%eUm݅oOJUBCF`&(14KE}&>RePD"GK䦥Tؔ94xLpӗYH<&rDzy J(GF{n~_w9&T]6I@ӿR~G#px>Z^]bQ a -4̅wuQgV(:yN.QH8Z\ bRD!'<ޗ N$ N o7,B4t C(:D d ?&X{>B@Ϟ] }ީ>Qb>k$>KZVZ +nzܪ,6|S>;;= iШS Kṕ7MHPHM5SpBe-/`=M#uGӜZ՜:PtPatbΤѲrRtD惙~wW(o Q ͫzkjdڸኹc͂/Q[eDy(w^kD7 L|Cb?)69 i\c`Jm}kZHWT׫z/5J4DG),QԝP)af8u;j~߾:M*.pV`Kf* J3 r=t@OzDY]Ud&WiD a}_hޛw_g`}U7x8.w@ +I j(H 1ڼZBUd>N hBPW]6Y)Lj1;"~ (,:G`:x FS\o ͊4/1-'1V}z1j iZ= `٢ qM{A|v<}??{5o y'j~'9[_f_џ]㾻qM~i@+z[X[\:6t :O#o%VG 9ƃ[ 1D_Z$J^G 9 p# AKhSMt)Wм5r)em& -W C,S/ xGŽ '&g/m5uUfj 9}$h?O'e~[yU`4Y`V22svA4Qw"7=@֛,Z^ `( !M+ww*sw( M4gpHE@?5j+ >K" jorW; 5q2WV$>׋(yu뵪߼t+TІs+ftĭWmhI]4 s#2 y.hMݜրjiIUVop{;\ďƵ)Dhߑnw {.R+k4Ѝ~X;*Ӡysߒ'[L|Gwb}ߊ,px*iNDO[\ZHOUt_vz}<3*:׍gʅ߭fϾU뗕?y2b.@3?'!KZxxKnX[COh)\EQ Ma:GS:`AaZiq,o^wW" wEl gO@ 5LcOu *$hG rB uM۪^s lruGGnX$yi 4DVmۯB"3R?}xbظ@}u(Ѭɯܫo:~Oi"F\F׋:JmЗ?qCN)q;0VXh2F `DLWA` Oå!dLm{D/܃cMݜ8X?7`TkLA$v{w7Y{eI9CNkWDt$kcG_u֋xNގ5ke|K=^$|?~Z}@k |?^1i‡?ys(%fNJ 7SJl>:&CPK|N~e#3%ʰр^.lm q}=3&O-D{Gņ?Goԗ!ItYmR B-+x&Hd.FI0$PR0'5X϶=k}}g:-+Ņ> LĎ14 O}j[E1F cݹt3`Lfs4Sil>gN    cCHs f)6`` H 0*+.i@41`ئo7X220000000000Ҁvh0`0d  :~񱿨(DAb4000`}Hh(0GWn1s@P0RPh3  @eBJpT(=CܹStJqOd=>dn0€ `` p 0 6DXϚ(GKURg]ҁcg佂2}p*ˡ祴Z׾pl$37 3w>aQ0Y |g#zԜ79 \߽^I^2R"֎cK岩cd{%~N g3Q\RSq|k:嵌9d $f)&T`oa-dVwG,=I  Oʒ9d˾ru&z߃pGeQjÖ90Ͽݿ]1̞?}U/_xWմ N$|} ʐi R] `|r]&U`qAd0`0 kNXƍ,dpUsḛ\q450Ui}C.c?U־W(GI:|:1A`"o8*OCDxk~9LA2itg0 €,us``o T5HFj?+-CqLNLTRB Ä|  h͜fe\3Λ6N[d.L7:Ku% oY"`jW`knY "?zHռ^/g/TrVo; !rY2rȠ_d=7/S-2·[G7;]IE[O^W͛-K=?(-C CVΞC:9.OߪG&&wM-/HNJT~Ei~WjJ|p{eJ˪n U ʡK}d;E4GV$eD3<\s1_ޭˇq?xζߌ7{(e@3S`r i݅2JG̡@#]7.iA,мi0whawahB21$3ګ+eĐlj.rh@[]ayLft[ტ Dں_QQu*&&4e|%L:6 L GYw]g _~$c9M71o=cǀSMG4$#؄g<0yaK?F A@Ə[mQ4mID'#p'|ԭ43`0`0`0 f)X6000002Ș2<f׭fz +@$1`%en\ {J4[7@-hhcܼ/0@FDzBU=u*KQ_i u5?S.IWIem4Lej;Oj2JC0q~nnߙktLs4@/}O+,1]ϛ1{?6 G = 4E;bkt_TwH @Ғ3IKl(f!!Z^#jMBc4:m,. C4HrKݍ8ܦ# ʲ% aQ5>`sexl) {r 9J.WP[웝Np'!^$LR9(;]F!ư7S~Z10;*k>a3GCx ay n _zc>-y?M1R] [3`%#jT$Ii )iKX͒\*yMg{A{W,BV La@3IJ݊IFž2H ]tJ/*ȕܚK]0PQ`Dw$ $S. }a.oDD$ NWB, v|`=e𔺆&ymٰP[5iTDY5; E}ϐWJcH!Xi(e9/ɔ|+WϑT)tB$vɛ[ ^_{.Ze{ȳRKir>FSsΞ) qibrK%Y!0{=R[$^Vy=+;Gi4slr+g.%ħ!ϯ)j/#]UFs&$Z=|_u*7To%Vޫ畁Fv{+4ݎxv IF%啵= {y@1Kc~uP.HkO"v1Nd2ÜcDcIeՎh=#1E |]K5r!DDQZRR\Z!.Ά^%ǧ˱Y`+H\1ۈUC!9]⒤ٖ(nH-\0kTg*A#効,MmjƔWG}o_vwICi.Ci~ɾg$1E3i9p<45f $SH<ĖF0#)aϔs 6Ux͔r{|@ed|}deHιx-{cooa9v4Y0}Y@/iㆫSdE3eSF˗xV_Էwڧh7 ^&AKwfeHqId%rK5yy†Q4|: 0F@ .Usvb:2p,v~J}&:9GLBs8enXҮBFWuGQ6#]N_SYZdhށH Pf*CaN7 OĖ&q 2d4hl9<}\>Wa2CRWHVye1D1M~SyLEc wDՈUBhAlNEhIy8Sj4E%_PQS/`2֫.S>=z^h4nuE(ӻF>񖓙 -n5<ΉIzp{OGvzF<Ǩg (X/o oԄ8!yc e&wo,޳0v\rB<*%a:Umْq fout'4p#0k%ØeG~)aJ_…R\Yω`8qk~] L?BAn.@IDATLMh}Z}6hҨB=T.7Jip4I0@vBn{`4] a{=0:@Qt0ѡ} nx2tdhO{fIK.NN tcr1DIԷ ᭹ Àwⰱ!F,f;loj)+c\@F 4``v{ acjMW?L䊻m.jO #tQCy^~/P4oqO>FN]|_F9ݭo5V ye(5yRмW  O'Ω̣ˠz!M"z(XXnk$hw rd򏞖?]jl$ڡHvje3_Abxap+~1i1ͣ1nVIh!9oߡ=[`kLy+ C󼞛!?xN) dehG)jS??= 4E:RMWa:U JQo'"xВ =D1C !ӖфuiugEM!/Md_.ue~' !N2}3ŏf[tUnOvIU+Fe5u! öP.OC1J{Ӯ$J~x,&$1od^.MvKsS+wՎd_:.NzptWHc2͵ ~oFz͛3kGz>@Qtkt%Q>ihM~?%_8zdiˎG{NJTl:,:+\?xcӠ>uNۺFGKqVؤ3$1y2{he6l0|$ue:g<- O / e)L;s|S,Cn~Rh5OY^}12E O_Ơ)S+CG!W7gV 򻻟%էkuƀws kFV|-@dz['oJrK` zj#Զd{Vb"W5C+Y8֏ [Wzhp1>n,.?^&oVc*+ɦ FfɯoKF63EQ&<$E=l!t)NF~?gnTx#)Q@&P4c@+#ڙK. AjFI xg߳]fX^vU7Hn!gdܨA . BSTZRa-, T! /M4h==g7)t9:=gN  DJD_CSm][Uᯭm[ h}*Ķz2it5NARQT,мpRRrfȒ>Ila.z@%&J@MxkQʌWUp&D 8eqmQԉUOv4(%kŭ m}$L=3%{Voߗ%/I_jwOkS WȖf)8{zU{x|").ͥ=ݜ.sgq#/HxÏnHG?Ԑ/ntw.#r%茄G^2MAW}3\r'˪2e|uum9HS}:K!H'g * 9jlyLc'pxd_g%{CS0o[QY2{|7)Ai8(d_?\nv48M{dG{Nn^>6~oj`FSh^vuz6k-2߅ьv$ى@,?wga/;uEQrԃ[ͬe寙ݾֱ1fP8J/HRj}йU~Ώ'@U*Pg LHBYpa0䟯wB :T!E)lOSO" /d ^{a:&X?ܬ9Ǒpdژ Z& "+^{aN}սuT_^P˪r\Jy44@f'KU&[?Y!DQj_\ST~MEF]>}>dgaZQ'jqȲS]4t|q}sUcr,ObD; idz.ٛqmK!m<&$Gg&)M:gY2EM $`9$NNŃ!1 rDzƉk0J~>vg<)MfL)Vfqg t 0nG d7y{̣/p(kMN)oowh6*Y7 ,ЯZ2C˟R/ ^$%5@ Aj%&&ɹ:Ioz)H\₷^66Q6c1Gx|0/o$/ ?QJRk\X|V)dK5mm ^=/an omf%$L_ڡ&Dt*XUO.[هIY0ɀmF{~}OX"bSv&I  $Ab`̇#ˆH-ǐvڒ1.9p/ R!vRc~IGLe KRRɠ}iuĀ5 |5= ЧW=95RXL+MH,G;o7_Yc bE*%>k kA2;]RR+|5!!&rt,gpZxAT lx YKh;'`fzK(D@..=?ը %˨hv$^u)rDP/'י4w'>OFծBy`6|♁9~qK44إ-eMRZ"kɩ 4+ [!nc2tF%/92KdP}4 [Zm༣?,q T`nIxLc˷ϔAY5ozmwH3?L 6fTVp"@FzSm}=3&O7g@&k l[@:@ؖ;–%ߑ'CxM0  W-T DPL iBm}1i40{3|pt.!k-d`9>"1[I: 5Y.(]0p׬.}%&yI~,MVL)BX8ISS"?Blo`]"o"Kio&yz-i Mư0J|푶6;$I <5':Hcn M84:pQZq2iLWn/Ce1a-&g̙.8ҎY#5E\h6"wr3;ShZ |E 6oW0 : Bzr;Y6Yȅ s^|`q3bb6*ݧ%-,m6low %J.0*-&iA ܞ[C~oWH2JϼVӦY}לMI2uHOy? N꿽Ӯ8.۞VVCORM4ydj K±L;jkMd(RTڭ5š=2f @R@%cd4-4^G~0n-]Gw \3 `1`{x2lރ?DyB[&Bo35޹ >C\ t,WH'cW>2 ˠ JdƎ#R+B&y+EtC ]E7G:NH*rc{%jbMzBŝ01̈wQfDc~ {JSt8e"azRX2O1 d8(SZ\alu2M?]%50qzmO5T[00K{haA tai`ömrȈCcHi?oHHNLz !ۧ&fi ~ٗ yӢ6g} /prbB}0}6G|{״'|8O (nE뜔8Y,!*ǑCsKc= %\V#B[sSdtf ߝlh;ӗQzOTzq/hT1yAe2}\NPDꕋG;S0n9S6cl :h?H!/0Șl|wq)|r#VTI>E#CboKqvdhR&в"ySg]7מ9tɑϯ\(D3ef^@F"?V%ן=[yw \u >te?{Q5ev1dr,3f]xROm1R7CΩIYj+3Eݿ[d!7{-jNq*1>]"Fns[F}:ǹvlx\/ϸN|ms_/3r6G鉢7S㮏w%ڛ7C_'tg`aF_}L6xj{C^",LyÓGh$*U~pJκ&EN3E &cUĻ+z?lOvȴ=SGvLIM]$ץ-Q/"jrU${)/44,?(C1d俊츊DM@V !xhNxKi(ɩ/q#T 7Rj.ݲM^9TU]Xv1d7@ a O!B#,׃,DtDҫ 'c#G o!G'#be[ /(Ʃ<~[͹V Ļ]2n$d,r"2%}k> tL1$ج8ߵɭؖOj듕H{Ϭdb\vܗL o#X1 A+kuf0\ЬLQ9ybTPǢ%2niwcґ1Sds/4M3F y},0F]W]]@HyǪelK]ya!9qE9GH""U]ոsc' kf'`⍗=̐?IӓV`t:`̓9!Ks]݈EֶXN09E06rg"UNM'N4{ވa߿N@ +(+:XۭŮnW󱸧7/R~<IfL?RD|r\ f]»c c;ll;X,ͥROc_֏A`I#Ѓ!*^%_}u^Ӟ̿Gܮ?N{"1,dɮ\cS" O3"rޏzjgk4MOe ZuH f<딕fJ5Aͬځ=]w;j߳ͣsEDP=MZO{ 7qhw `vW!i06 s$+3C23%R uPOw4@yDY+/WJ!=zVx㰣m\V9=CS4b ,񴯀fXw-;Bj|2Fw{&c>)qV]>#"S+(e5v{ {5Ke1[hxw}GڅSK,W Bb{]%s\i\9 YBJ@@msR[7|0=2Fv}uV:WsZP)O_?G2Iᡯͦ$ @}G4$`F܍YqRZ|U$`]!@vtM;k$54FqXcI%? 8"j1axɳwh;C;;BЮe*5AʐLΔ̌ V{gp:r(s=Ogw@C_g<ǜ^W >!DjhP hzlK=C Rq=[pm)#r(?,1ٟVnVpY%&;E+8~ViplGXڞxLID2о4{B}YbP@B۬18Z*$M^9 & Tw\CHѵK=IaB}A; O{uiO+hFzN۰ؓ8 Q =H |ZM쐔:9[[#sa9_Jᓈ1`BnST&JQ=]і*V? !hjtK#g`%F3ó$I)QȀV JR1ES^2JH `ǔ1&&|w_|#=YgsZ;܆7e4cnG_d>Pm%#6Ҝ>8N8Ҍ 5sIV}"!Riu1^FK m:+ÚUA2c{ӯ8wfN0E^0G2ܒ m`N@y$^QDӻgF)cZDBS)3hcD Bڍ;>5Q-|o17{+Fnu.: 41GkzhKБP(2cJr*O*KA˜t&Ȅd|ƨ HTtpL#\k3ױrΚwAڢՂ,ڢ~y(}A;25zf)+RĦ2}(5>K21+EmIk` uTdw$#\ޔ*1 4fn 4~Ig/pU'-52,)J7P.I0HI3'Tj,A"sdS}}M<#Hɔk0(c'J[UchYzu^H45zZ]@ u@`(E` \1cֺ$zb:20Lv;֘% Y٪ܻZի͔ $ 8xD@yas2.'QFTT$XgP665v ǖ};:r6N4j^fDVZ9F+)AUrʔ&@4HLՍ.ClFgLjx暽-$!EܒXi3vK xlTN'MB궸."4Xa:Y,ݒ~B6j`ڽ{ѲQJYÔB LΓ$S$q F8 CYݞ3#PLȈ&؋w{C͑iOˆ$Ar."ǥܑd?ye(^KRE 0&?ƿ(Q`t,8 7G5-,j-ͮ Uu2DJP2%˳ Ur8eN0ѧ&ai-2:+^& l~GDȟYY]nr8 JtNtzS2L"$il7"I9#{O[鷔f=hQ1L̻p4׃8,'&'B CIDGYFk0 *Ŏf0!NDѝ/6.Ks.>C`تrAD?hqJw԰20Cbk^D(OeNֈ&?D~ۚqn0$`SN0?E}r{}影,8W఑CN x>H4M2sL6),j ZX&2L7쐐C"Q(ed 2JkcBlJ9z &YqH0Սrv?BG#d\H0K0Gs+Ъ42K` {tڢռiFopKiY5֗DDɣ9=BBxyQ!9#e#cսWXd%Om}(Y|ք!$o6;: 'K_{A1/$֚%IE g"xyoT$䯴Qى<+y_aBMrGEq?zhݣf=GP~;x=<"<ki@B ԨQH%fq%7&aҝ2Y%47|a9⒌RʮohP+4A{GF:rʼn;5V{~vh$ޣ5(L5l j<%=έ-*(<>sISǎu9ׇ10 >BFdd| ]zZ'oJS-Rp1Sl)rsk7Sfôz4U}zWJQjw[a!IAwn_IBϵbx0$Sr):2s x`yef Fh&3ǚou RQ(pw4iJ&+1-Y΀<fBii)JIjs!=TȷQ}`c(N+B}C[z^~Uwf(S('A(8ԂmISK=|xa:hMj[\а( 7@H@IQr[^qH7bx;4hilXhC3.e?{<&Z oysP<QfynO{G\nPjp0 `W1]'ݛ5}1 dH\pz$^0,NGHMҍf"xdE&;ݲ\!B BrOJQzjwZn 84kpx<,¬T, I'sR쒓:2d]t]ϝ#%2y̰Q`@Nwݣ+I0IIi\-q AJ " Z:yB=CD;j^V)<G؊z %7+Yk=t\/j Iܧ SlgC4;2`\ !ĽgXMk<OټȀi(5*߰&idVi;7=j"*9W![攘.n Ʈܻuc>nEdM3LYjw]O;gPJos#{=.{Xor/%x ьڮr~MR}Üon; v |?c.,q%Lqcɫ_xW/в )+=%V/̒,qQ&gEw]rӭ+\.20@$wvV-9!!tLkzt޿y=s޿@ʪ:7I:^xponilg@naMVR`hfS}qd{|Ɋ[3x?#޿iw_{LLݒN m1zo ǾE1U@IDATlhhFY}a. ^>~(!p%`dC)'Do$}4aT{j0LFڛg&u -: cYb_P_O*m)Ť C_־8~wƏ 6#2F(Yl=&ѣ\,_ 'cKcx ~3u <~uQSQ$J芽א*#cuL@+j960r|䆅0ۥ|˪jooaJI@81ۧ jAo%yʧ{y^ϴ@|4}- f%2MUB~ט0{7 <ǹ$xHF:bXD#^2% L'^@^^Ke^VAt˕LK@0GY%O&3̒2`vhi 0Jh2J? %2Gdk,XShbdEK>R6W>َXnŰŦęuciOKJo GPmi0Ы:n~]K{E :'m.QWaԈuj6b%4ʚJ fN)Nyٱ3-֫XMnubY"Vs.$PDfeo(y GV"8"=7^uPa0Jv=?%QBalF3t]RyW#;-A6k>sܬTUo_%60kja0lmM{=\,'~eSb}v,i4B=5$d6LfϒфIb: e*r7=w&`?χŋT>8:@4axS51a \=Ta=4guߨZ_aMh2Gܴ6Ik-h{7z:i}?MtI>+.sBVi%Ouٿz^#@ c@kcpIJLLĖ r|mLϗ=Hއ8K&7K" ReYݖޯ@W|fI{Pd)uցujZGkjj{ϐaB<愛=nMΛߗo߸9"9ri+KDhw:ᅜX?'qP>h( MZ.zu}omw: q'bqvPdER9׾q.jZ-0S )((PB[dʑ%)lW􀼄sd`#%#!##I만|Jρ/f$?Ð@%)N>]YK\^kۼ5;gzm$`$&$sTw6y8l/XyG)h %_R"I/pb ؤ b4i G| ttCj℆+P""$2 $ۘU>!IIFvdHIfR?F"|{ YvL:[f9MdAXl*a?6M}fv']ZY:zL5@ޯ+C9V"S1ҫK|Uͱ*$2o@CFFFFK`?/,)|1!w⎻V=嗃q?n۶4( r3̅8Ф?M݂LH%8 9 K%8TJڄȇpM"C5JMgJ&̓殄rKq۰ km#E?+*_,.%W\M6iUTg"H[-R!@vE#jb 4m$U'A J$D>_rzXu*>s0Ε'鯖oU7*J0{FFF^'+̉!1"g8cMc0 PZt]J)""%l6h0w;ۣuCGsɟ!_mK9:mSKQQ7̚.>.R/SFl{"mHhxcXHZ!3@^h\7|:fPݘ>n̓YxKUH;HMTy9X.Ea6Sy[;tc),ݮtZ,K.~A6(,"j,n,(T\\ߗ%h4Xjfғm+4Yv ]b0$y%^։'P$qqC2^k1FjChJk_/&xFbM2)j~+$LV ( sާ많^25;qCknC`$`$`$O/9HՐ+R7(A+NBJv5YҀ!djO 5Kl%: (m+(LID ڷ!oko/$D>]0iqGbJb›/2Jt$O Qy"62L0\>~Ƕ=S| ۭ5}3`"4   4fKf% -G9/!CFZ@?"\෤@2{"P$A{}Rcp"`Xcj JIJD4J$jJˇH 2_g@)>Y@Oi6V 5q?w*mne -0NIvѕ2 d###@ Kl)' l[>ȠTa$L)4}IzVl.iئ7z e!}Z:9r2nYqLpZ6ot)#)%cz0Rn1icIʵ1Ñ3r-$w_=^Ҩ\cHHHHG$a'9c%f/7FH`\q*#n^&x*] ja͜&0eP TsDoIQ~n1Z>s̸f5X7{g*=kGW]<Al䭚b[Ŋ^ X*000^ [rwYA+`,e/,ׄ*٨4xri\>F.S9|ˀ+ ++(f佡*ٔVi4G(0j("\UwC4Ov:W,+sHHH$4JH?`u\j֮$? 5os|&8.%5j.jbVuC'Qu0^m+=gO: #w~kmgxWm={vx]a}4%- 600b `S˹L|@+K;1Iw ϒ ϰf$P9Zm٪mcz^Wu<~zYjhgD{wQ+1$!6R=Gdc+!oQ('<Ծۥ]t0Q(aR5OQ^yx ǑK aDTJD{G`bSzm;fFkoaHHHj g[N?ai-0Ҩ5sHH@ֵ{9J̳D]cZ`N}Kr7?Bߊ>eP/`Ƀ_R@׻BMgre$|"ÂڱL$+`" J?[:oةq99Z[f }4A'≑AA.uIX=-О"p>C9Fy>ZiPɓ\[a8\g6Q_^4|W!7!E_rNk 1Tbd Lx<^=R3y$-cg&ˀKɎ}&X&|s9ds0*>].!n{ߗ / ~(vgk;%kpy}sy$Z#U.KmN> N0 '؛塚_Lo20GﰹHi$Pŷ"wY~+ěMVZ&1gII LRd0Jo$8P ,,lZə2}P2z7D5;3^ڀFԕ= ̡\%ox`iG!Ar y3w05}]J@MFhуz(m˵oձ\KLp~0ҳly+ZOҏ2xa0g@y>i1̴e +tǯⷂno^Sk5P*-Iο_$߮ډ_IGRe& ^S<||͵SַOikeCʿՏ_'iIm/ʊ)J~I'Wy]1=iٚRg֭_ w6Hu4(00잇CKJ%4KU@;g^{ ڝ\{7 [^x?Q2\GLKk3R1|T5hI TV7{tag!]H~iQ^`NI 0A#ɼU.-f=;xg{vpmԓЂv߱:'8o>%O(N17.Ga8gնy>%bDVtl-Q1 26l]mH; P]1Lsk2kx>)y HO L~ɷ_?׿Xx3OH C2cr˥)?ś,ټ]xȮ/a#WO^˚|2Vl]y7Y;Q0DDEV??ۛoӓ%SF4 gA&>J42o6/}!~P5!<em /&m y"C}eO4` )n*skѾcA! ##_Y"9[ :!nP`fsr9^U{e(TJm5eu;dP UxhDZ'OԠkv0VAA좡#:/[q|߹O6*憩˖+BRkw"?U$LG;Kd8"u}iD|L&/KةLޯ{*:+UF.pCNaz|}W;.;p~FuKž3L̽TX\R,H5?+X[355z ,k Q%Ў yp/,)pO~67\cO0hOHٴa$(V_vm}.(y{hUk"Uxpoux69[>0Qߒ0yW3~R.#fVDk7oB[O~/WJX#CZaYQV"o13;i~ 'AesQ~ahFj`WY&`~Rƨb`p ]^p B[[__u5a[5lq*39)0Dlc4@e4Myv{*œH\" "GD_(#2F̎BC¨i () C'N+-!9}O: Qa!JDDJBSN_>V#L:˧yK%ϕuyE-ȎUukp'ֳYW_)D+)"$ȉ|hUfU"BLWg-R6m 4Է;:9x{hU]%%,!\x6~z/ y# WLV{@@ԫ};8uFlwA9yr?v+lԃ> =Qyb=ڐ}|9RͩL6=nwY^ld4'h*vi_<,^N qQII(Mwo7+ *FiD۱RBE@$3 d(?Xn TW$[Xu~i)G;Bj7{mZI)ע-oi{ďB|5 X%AC%=A4&nM<3(MB6ÞfCs\NIlײ3|o?xUѢx{w!NhVX7ݓߋ+:jlV lxiOOh"䱴7}[J AV&TFx7u痧"k4,5 cdͮ0'aa4K 9&cm~%6,@g*Ӹ>7wuy~RmS[s6{EJt&r"m>gk6SݜrPx.m.[_-WvCӮDž&vMe"@ej[Đ]s~65eee vZ fR#ͬxӼd@%JvE$ĪYXPwO'ٟDzāG.ˣvW3YNT'd0Z=M)O(s:Β8GؙMn2V{ ]+"4D(^qگ$ܟFޢK2 TQ:XrE}{Eb$\ P[yo䠑|sqpjtk#9䮷X6VpYjKu_xɄZ1`Qճ`Bf! Á,ե!*sUmץ|mmV:[Oz)`(>WZ֕4vѼ_sˏB"{nÞz[[7^Y&#Vիp>2y@9=<9;.I>|b2Lb;µb?e.rk'U(;臾Wzך5^|4|68a%P3NloE2:e[.S5.et9cURj'?&j2P/I˗K7y,by+%1ȥIg'rۑZË~0=-,\'{tTU]jTU|[wnE[y}Ƶ D!7ktC|R\'iY[$-sGu*IVX~G7>^zClxUzo|vwn'[;ŰWy'_O|L;]ruu]~r,!2e[Tm)"{T>m\*g@^˵V`2q1Ȉ`ґJ>7m_:}mdX?=c%wb2/ϥOn`z5 v/~y^|4F[v r:]ǎaDH:}k6  bpWΦ|^?@T<*-]Ke({pDm{rЦ:<$X@^>nr[h\G ֩~8y;g$CD]W oG;<ȮS?¥YcoWn Gvи](a#΁o=~hzdB;w:˪-ǢIr}Ɨwnc][ 9 )6˖R81А@Գ^"L5P??xWZ CNsW(CozNRPzX\T =T#`|\yxarwRX@{ZJ2[{I>rJdN`>=,;@?T5wCqH v5i b_!A`<5եiԣ˃ ؓߞQk}'>َa>t}TFoRTr6c igW0BjO<_Ǒ?Bәҡ}m F"ai@|&ãg]cݱ_ ~Po⎫ɤQT]~-Ӥ=|$\]zyשjN \4/@C)+P1r@7M;:uBy1t>mt/}oC֫sLƪ=, 27OuDüam^hU| 7}%< SݓunJ>$A};.hȨ[yH=OP`En]{N%މtwl1Wycb!2mres䗷]W|']g0mL'UVڂZ&V޽Ay!C +@]߬+>ށ0oSkD]4\8H~# '>W Gu엞RU vUk}4KdL*]Ƈٵ1cij US:QQOIf\Sh,WVDk־?MMLeH9 n@Dj X(B|d~Ӵ ։j'fAX(ƫhGrU +cy@d7DNw|W~xH4?S R` $L6+}I ZU@u;LhTк Ml d @:oj!jܣUr?> ѡBVw~Уj`D^B\ZOUNp:o  ,0B!Ujjr}img h M$rFE5L(( VIl y%}/U -b9ӈ*5ѓF D3I>GG8W@&'"c"[y,- gDN=4ɫA3>TL;w·1穁T4G{9yy35 4wLϤ(z VxGt79\K?F|MD)<kHYY^MR@§o>aԟtIy9n|U},FpQW]N.@$π?!+0~~eЕ;Oi~r|#k,íi#MhȜ| Js$A]~9ۭtտ~6hvsv{d;g7Jjww`q?$}]KXhsS¢Ro3<$k};F3AK@ߴ}2`XZ^C%&ɍi0K^&Ӵh. yRJʋ 摘T>\BFA;~CY0k  jѿ[ 8ds! ruB߭VK@=ڐH!HaI& ]n{̇E~{ֳr_ ,Ev~݆J\L ^iPZT$`a0%X b),9^NߍTLxmC8w7 G"woإK%VBpG+y&^TW] R[K]&܆HqRmm(1<81CjKW5=JY9ҒIYSٗ_}z_1V,ɧUh EfU^.IYٍSa&N+&DãyNVtRJMV! oV-.R[]f\tT<FEvuƽv>U1#59lgx˷ DjHIilL# j"~qƑ~S0 |BJM?w['Npm2# !qY]_]Ѭ0Iac/~BbHnIY|hʴ1/N|˺ʰ> e_ݎ] -RIov]x#8RKaAb sQr+,0L)ϕYrLzɤ矍Y7k@ۢ )ֿDڇX_+y P_6:PB$*%N(`SV b 8mH4 0}C:GITd4=,[>|K 8k9mS` -c3f@Aurn}vG*~"Krg`NvRI@cTɇp>+_8QpiĀ2 GԣB9]V0 uq+' uYTD`3kVE` >+lbD{Ћ#X;:zS,wFI_B37\2-Pߌ*y'* UČ'H"`@שB0Gsmص_FRw8eSTlS~]>IW]<r0Xks7a#Hfy$6ZAH1L\,WO%DГ(CtYr9^: oV= Έ)JqdzA\Xⷃ֦ۡ@= TYA2E'Qj㴕?XxYefG \FG0GJb{@3h_B%٭ۣ6YWԊijo6}24.^?4 Aefyc1sVǨ;u;5L/grdEg,`(xjaeNoF(C~w6J:P*|ďhh>\x:Fiv&{Pe؏0yKFLsҨr$}7v>< 9ذ&uiG,U=bG 1*ͫ'-H"` Oؖ06Y`V(EGEm(Pgy]]R)~G`~S۲}Q9{lCԼ=7'\;G勒q1\B+`3@IDATƭ Xrٮ(>@Y%PPM.lk+_UA{ Pb >0=J|]܁kU;5k<{a&x܎O}ā!#_MN7< $Oi]0=kz$}C9PeZځ(5d3œ]>kPs8'g-O݂P%`GN]`r$mCqg 9g{.|`zG2**5,Ce)e;SZ:v"R ,4]ge/[E|H ,u@_m֬ھQf*CF,^T>K}=v"zʱe=d%l :PHlpD: ]{F>~t+5%Ȯ}WH ,1Bv:-K}xmqr&w4 E*.4#䭩oZ(9$$O@[uh*[GL܁0x+|%w@&~޻ٗ_ك}n(Ņ B4U v tƥcZ{7?V5XD*6L"i̠{eq4ztQN}oEζ`IqVe}g5P*vqAAnUKߵ^(14K @O #j6&ړ% gn|u%J ϓ,-40)o%dh߮j8qYsWLa#V!F{տgvɿ\ 2$Z߸e?՛yW!H@W~77Y N AΈA;ڤ]u"#yӴ MA2}xߒIJ ,xK["`Pcj Il*$GzRLt1ӥj}'}hzGҁV>g?؉mUCx C׺>>sai2`ɧoaHcq}=e:`1RU&ΙjCׇ|NwU%YTE^'eeHBru5Hu@ I*-_rިH֬ؼgN|nf$Έ gtTɧ20Ez0/ZC(2J ;jHЄ޶_--cs_ToLEά|'B<,^yt6.[rwK wB= ,!E8gZb6sW yl   4n'W*ʬOɓwMW᳿Zcd5ݝ^;O`< ¡}WoCMлajQgzٳ7hnkN3Taa~)Lsq|AûwۿHxxlPHH6PO@`_;"D-xV'NJH tٺhۊ s 2ٽkG+5JKMܦ WUqE0\:׎9O*f|+3_ Ͻ' )UP*ˡgv*Q6EY)EEl6 ̤߭ށ e=9 * N5\cQY:. rEC+릌B}#wp>!/\/GHBl[&bHcݧ߯{Q{SGxgXQ Y9tNzSo];$ }&; CծLSO˭ȏIUgmV  T/kDMs坩|Λ hS~e6(BӜYvQ\_c"mvGRZ%_?gg5hgyWղ7L=_hzGWPvl0-U.A]am8e /K89qS1,-߼Ws1}d{߬0)[#cxLScd$Ə0>}0Ud`N27DfO7M#qy&j&Yy[KYxhSsr B1;FN6ݢ3)@DS6'*Sx2G̾@%BWWW\EϩTҹ&GKtᇐc,v`%!4`Ȑ!hzl-+WC*5h g@ >|  >"ovGaE}\s4os RylWK4A(uQw/JRiͪ? uÔRgX%iOďy`@I_j"PJ%Àjxyͯdնyy[W [+I43s $,0Heǻ$Vܧ扳{0w4]6:_Z5"3$ZƗ2j`O!V]XChU:O}̓3lGUbh\|WY 4:Ϥ]dt6 殒[~rElRZ4`F̊ H [O¶yeʥcl8> YS,rp5J*JJ\8( 6 E~Þc'MyϱJp-}kr)Sϰ;`d\xFΟ%U%-vG/ԳS>,#'3W~J/;,Yy1IDgOmwɻ/FbVU. )MF+NWУ1^$#UPLlw I=fH$PUOFA!hX_Rwf[[~eE=P~l  T+ra9Y{~NZDނ0 U|Rmgㄞe皳\r\AJzPAd{4xʊ>tƚ:\nM.4sV[Mz_?u%hC#dr^ZՀ%wHzODh0]fN>fC aҧkn%C@4# a߭;6r0۶&>@EE0ɣ)FvQs`]\i9 Y61zsTǃY > p0\ĉ.'j\DT"WN`:[IpU̥Saao!Ryg?|8ԳsGR/T\AV{Γϼ #A; }GL+ ZXb`'ى/|Rz}֡L=H3s$~Fy?X|B'#j^|o\嘡fk[BMRT\|aЊU'4c3`V/?XHHHmJ@kc8H`9#ӥCkbЪDL뾴 ~5*l&|4 i*jn>^rpqCeBGA_ Tn3RM^Y    Ԡt6ymG$ "N/$N(SoTt N6<_MRS:!5xi[Jng_~uSlG0d:eOz PK.r:M_\fΰAH߆G####f@QjO~T*.K?. P ),,TX:J(RNb}hX R(?4|k3~VNDܛ SG$˯NqX_Ά&L/ay$q5[RrNҒ4m+?攑b-,03oJڬ;#($6$f49ErL ( @e79E mGc(EDK$OKi9~4(Kʴ uGzq~X}O168-4NŵKx "C'Ng% ,ğ{D#"c\Tnصk9T{S?,\,dSENP ~X3v - & ×btB>/n*<Ö#R$Qjmmt:?MQG~~=s&kTbIWD}.7`iH`Rm_!YmHA+$_Hf "+2ʫThR?$vg|l  8@&?&'Uڪ{oLFB/oEt)?$lOCj0k)N;@ )6L g>מ|tflPesH3txƆKnOB-.|8#PJ HLϟ $b,)fV{?X;$EIDH BF@):*JmJDn Kڴb1Jek6Ԃx~֫?<23 [&@iӵڒȉ Pٗ @UM;R-DW+_(핥RvzKdn(0EIwLޟV H)}&6A sO?M݂LH20a2EUߔ1d '&AQf%eWUjԣcA?trMwy7{:HW^HtS\}ҍh>ScP<5:RߺLyH^}x>ֵ3fX.I1<$OQR[w@2>.Jd5}jT tp o:kYF]wtsō6ӀI&'PBCBPRJJY*k`!%4H%+~'H;Q"`vxMΔSYu͊7iI˼j$g8{b,yWJ~s:˳5I(mp&<6ov+7ekư}_Zn7X&M%pd:q>[  -po<߳S{phb Ä],ݘ"WN@NOˎmr픑[ /̡Th hPGdΊ (%SAy/lHܧK U@]LxcXjSPR^3FJL]^0wmdl{ؾ]>mYa"hѡ H.&@*ٕf jO&Qobj25Kl%!X#@b^BWpkj6&Hel|)gY~u}7)΢0+R]Ͼx^G]V3*1rNVs>y Nj[>x߸^y)oh t9Ip7s{v+ q 2 6Zw埛m2`2FpQ*ғ.V_HjiLRښDkqأW?Lb\΢R,]4RSfî:ZF&Y i2ht1P}%ʸa6b"B%`?w6v-Y yM%o!o[U5ާOnOijƜZQ~o[3UtQH@q0Oo$AqDD ARH0TDHRK\\DK\lLUuFcl|hRc)?=A۵?[.,[(Wʗ< "-V>+SU09m ,),8`3gT PIUtk;gQ<  x괼7]gղ<"(V]﻽UyV?KqBY%›tM [[)rZ=Ghpr0GS4eaMg0׹hwjMXI-cPN:/aDM! 5*h^R 3[P-bzW]Oa2XBV⍻dڅլ,7UQ>)QczWj?FDJZFNڥdJ40U=Ƃ|#OLwp_.ޝ ݩyI'n7 dnR lS|R5̣z`[iK>ܶff֔+KDyj={4Z=5s?[})4%O=pﻺzhE >J095{)7*?g6%wgx7?_<2crz |ffU *u~?U?_{8bt3vk5,gSMg5KgG;؆CʭHW,ET>yF"h}Ah)X~!0ڴ ź3h.1b+&xhLh:s9t?92Q<̡ Om&{]fNk/˚Kc[KᙪLfKZ>g7X>e ]~n@PDH3:*B%dw~~S}s.*-ufe;pZҼ).vYaa~ sv,U,NXwޟX{"l*-~n۟z[a28 v\2 8r1T [*»vԒBtBNE ˊZ㰯4w}s\T3fϴS6{^@z#ȂwCDZ!tHΒyS ~syf^uuLZI~Ϗ!Oy.] 9~F擀KcSZG҄iF֜YbhSҧ[5 K[wܧ|fxB4S sSx&#LyVʦj$@пo-(ȓ_| GDk€l¼C{Svݹ0LɁ$\!<)WטA"ioBiPL.-,YoL|'?k̟~W ;K"®m~MH{V[GHJFGۧ[.@9sXn2o7e@GQ. [7%>6B~rD)LxCH1\ 583J\_8JG[Iϫ[v)+zF>qϯ'S$ uI0]{$]ݩލޣS;IE0X)ȿ./dž=]9NI:eVȾ3? . jڞ6g=(r@p%<4*ݻcn,q,Ef!ʋr) =@Z m(՘A9Fd0g{IC ^=PZ̺'j8X+]OY_|3foT?;m`zK3S6ǏPC~tN)qhOKny"4|u}.v ;;*L?,NL g֓ g9,w q*7#B\D=;ǫ9ʑ3#>SiyNMGF/s4<|AHRmT[7W4Z Aӌڤr H^g5P@ b|PDdJBcYr;Mڬ}f#|ƨ-sb+ZQ ~)obShuwΘA0k $YN?0;xw^țNۈ|j /*mo|=?]nsa/]Zz*8> T*\0bCvEt#&j \͎! ˤPd~"kdCkCЁꓵLkY(N  >]dp. ,KTN9"J%c岱^L;zJ[]uw@#_)\՝WG`I>x9RI;RMet@LV%î`<>I9-W┫ʽOaٞflʕ(1 ?F9W-칇wyef fl՝+/)9 p'{!X⤋6Dshgf3>j-`vH*M5U|𚵣d0G`3ԩU3xqwڰёk7fk˒0# t I*m92c'i΃tYVkarȿ"}GS4gz\@^frc(iyifBia8d;ia;2^C'GI)Ot:^k*6y@g}8TAR.,G&^>~J^RT^ LLߩ;#tw",T7R8x)SqIS` KiwٚMp/Sj9r$ r X*$JGf B(0ZI%&xf-r4>$-|?Sy5AlNќ|-L'ڶh)CG2!mӼ1k5^O1hY 4OtbҪî-?(xKjuj߯$@jVCYb\XFr["I}-x'P3?eab|2_O? +Iiwtn=|nz߈Oa&׏< TZg>YG$Pi еˉ#J\C{7㻢GAk4gzڎa(XHshoKT"XGE6:#6zC(qvR{"Ij鹅Sq8Q-Re6ULuH@YYVC04E`Mj*ҠH>801%Q) }waJ>S@IJa1~[Sm)J(d52_gVA-?Ʉ\Pf3:#Q,'NeQdX6)8(yNAבER8{juh5 50+v#xlH(Цrݫ. /~: ^)}Fi /MŰ`匋?h 0 Н4fbrڃ쨨(Mz<(A< [5A3 PXM1] E]Jhi$y/w%vN:hy;Jǐdv]Ws{ k}]_F//ʥ0pmͦD+~cvZCJkSyW284?kkf^#o\ G1n&#=u ob{3{v4JAkQqPrKl$MNeRZZ`تr5fgEJJHe&3Wӏ[]cTX;(u\rkiKrњmL;r 㙋 = i#bDVh?iG3Z>t>y?XGD- ^‚4BI[~]NME Nj*/ _$w&1#FlRem6DEd3suz*S'ˤqqk=m"D |!, )Iqu;z7yjJJS!˻ SAi l$k d3zԈٛ}O%) V T埈>aHB^ylPϾRQw. ZFU:+r<%ݛ_y>jJ@<29_qTSm*O;5JJ#Ԗ0{0dS!au=(1wzpp^Ywsi1/ܰ ye{KL{Oh&/[ڍo M Mfq-v1꒞1V'.g*$QgF8MrEnC3%^05 !Z#|ك# ; -Vje霦F:fi׀~!"4A$& & N%l([i\^!ay]^oGƏJpkP NHNY߰=#SA6!9M?\ttoϯc~+䴻`PlaZhhCc&.>ւ4NogG(A<i 6\B;Q^P5:ڦh_X}Ah7٬EӱÇL ȇ̳rWL3| #vǴqn^Qپo s6% ^a%gz%G(}鬥t: &W1t4e`m8_IQcQ)8@~[6vz*UmO@O8"F-SHn~ȢeB4} ݆Ģ ,D~6ٓys֣]'+fb%%%%%/ &5'^byA3-gcF͞iԁ/OmW@1Muǭ^e\49^5osSG`3tz`# ?bPڠJ%2-WGYQWyh4*$Qmo篠\cm N|XDөszJ p!]ݧ;GUTq agLhڼ?vϡŧ)2p_f]׮5'\9Si#Əބ4xңFMjf!DC0AŕAx۴ *Ehk˩{<ǏJ S%rR?KGΈ'ץuK +Phqkk1Pc>z`5DӹZ)TDU!oxD/qҕmDB%:qුPϼHI@I@I@I@I$O8&auXY:&.n)"Qnc%fok4XV`WPF3+R '}ҁ+="CQ#(ٹjM&O" Qi`RbAiI7ѽ\>ASS^L9mZp8(Ae9/ClEĦwQ'b~r a/(/J޹j%Mo~S^j_D&ṳ󧜢$坲" ߟ[¯6t߹%4jYya|FM#%!KK~4`ZpW2aM[f\ܩ8<Z/?٨v:o9K?uc;@ 횁g͑RF 1$#<EO#}9l|1gKMՠ6.Flξҷ#@dlk5or=uMFS 93b hzd^Ԋ`\ԷYbQN#(Ֆ]I]|.d/Vlv·i;ۃv5ں#ZJ 5+hAԩ:42Q( ڱoҜSƉ|:zH;rj(.u |nxpV̽X?3ԃP+OfzcR 7L}yꐒ@H`bʔt}wC2Y?GA'kX` o`3 t||/DIJ0arׁACa@3ԵCW. Ȑ`GBC.9.966}n\B$$k11M믧E'U ý| @aFq44as#_̌lw- _C*O, ud TmeX.>);@IDAT!M;6fĈme24Z=e!EH`tF}V}8(lđH <3 ;l0h&Zi/+՝6 ~Ko¿S%m+eR e? Zw "vh `AhքU8q:l fy\4[Hޭ u2tEG͕ vM!t$'% ޴pX|p9Qϛ*$Y ̣90茺k_mt?d^YCM3\xrЄ)T?GvXcJy1mNw2 t hv+=`$53%-9u3LJ%.(/}b)%%?G2e=ooH9_飂d/3~OƠ(Z4KlfqLˆe3_KMmjuPcruPtKp%9~/mQqRnxp%ͨ-ͯI5KM𐃚N!C\͵UM b&gB ZvpN,ۑtŽ,M\Y]:B a`T (\5"#PtfD1Wj;MU`m⌖zmM ~F!Jl@3K`~}S;)O#G[6 j8],%(nk* uLI@I%`ǭ?m3>C.qbv͚Ew:޸e0<&~d}>9m#]𸄄O`6>@>QbzzZ&WCsp> cG,:qk]'I' HN {!5|Cf+eļs*J "?,\8b۠/N}+=4oN;&tT|] 0hWثEP˶gso9/l繟o~*/QEVQȥy`;Z͵Vxxleq]lMSjNʢȰ )8(@;I539=?߉ESf:遞A"7?3 )^?:'H?%^lyK))ն@ݕ _0{;t^ֽs/ڴ_28qsXSfL|(!O`@'-=c:u{ϩ)*A?aVs]Wr,xGAuϞv<% DBSt:hf;xxfGM^ pZ:7ɰi M̲ kƒ,Q -Izy @IkK36K{XowܚFu ]FKzwF>%`uMKz{Y>CҜ6:I~aE>|.rqՁۋCsr{x]VVm+ ( } 6,fw7C4{RJZꘄm۶S`sYa lN `k.ڸ @kNۼIi1qk_NIiXNl%uÖi7YGt?ikI9p@i푠h;@J"]\qnX-YFn[ɫ/݌әXnJ+(gTHNDu3YG)žN>y֫mS_Ʋ-HmjbYV~79yE0 |0vK`^vw$^$9`7飙rV`kBr{vcM_N~&nQj+R` hs{2_A:,Ck&xgɷԳ'(r P"qj QTHX rzMCz՚gyPvb i)ϐe8"̢FaaԃF @V}SCɪڧ*r ~ϥRũVp?MϏӳAA"HCW]Ϙ09uf&0uZ FO:Fy8%92$`>/<<$*ƚh1<&9bku -ԵY0unX5Eg ԁJ\1X2$ K^1^Y=g0? FYN'|83ڔϗ=E3t}}5JU)֧**|M?YrEW) ( TN01x|Lw2#x\|E)4m1X ڥx3IާZK ^pq5gzC,UVwBfN0?07PxgsMP[u N:S)$MԗABTPP9ȻO)DG32EKp S&ٞ^ w_eBDV!*]S}2stz*-xN&ynWpU@Is=&p8?q1ƃ"}>{ђM[wq؀ 7ج2U-C@~rڳOV?"ۣiF{5X'%}|}j4MM%ص`১ԤCoK-cזiN/ ~B\r2<7/"|(7}]K˗TI+rnB 0P@Ɋl]m+`I"0ᇍ ZռFNA|3|gJ64Y):pdZ6|s?n)_) }2m" !z)~ov",B?琿 _wTE5~"/ۊ)@{乗z#uj9rR, kJd0ޒ_?}`T˻49MEF,*b0vÇy.wGTX}װs'_0 (W5QSf%}PSt8L}}IS.uG7[95ۂ꯭jFg`wh"k&zsi3 MG=AZqFwӄsK翎U(_H^;oߺewq.FBӊqKHԵB;"jM,c,!=v@_l^ם{p #&M(x.aDXH)IYT<@'AlLQb!@q4<&@g? J()~r#?{_Pm}S#srMqmկ ,<ūBW3CΞPq2II;<@F!D3שJX3B۱}ݾuj쌋}ّ(tfbmGw209.=̊k+xda4(q.|`z%a*Aϗ6GzS@IclLcQb3 }=G C{w{]mz孿\?GIM3yt~׵>5J'zβ>}++GE 씇&&=3d LVgF<;uy ,Gq!юk4RղeWO=7nIzGsrD F;L\&m`%S`G mk>?->rx ʆah(0ƒ.~a]<{:^/v&W> )LRDHq|`?``kv U]M\Z%Wt>6c3`g~ˢdf6i@=2uiӌm^V|mʿ N;~r|*F/]b /=ZnoRWִxn4~:sbx_/` 2&NNy@?d2\$*^KQJV}\yWzvk0(|ڌێ?n-]m+Th CුSVC'.k@w\DkiH Z~.ʐպX A%`feZSUGG!WUxVUmh|xRHpJ"}zY)+* Z2%Exp.q0ID.XզS4-Ra$KX.چEWMeOjZ Z*{x)ZlqjVwU&5#Twg\o0_ge~W6$~G ^u?Ls!/< 2)irx[`#T}7Y?| Q"ԧ-~:ħ~SGw?q0rQqI(l^|{E0]Oї@ EcshQXyNDq?T޶%{hǁ^,<^V/}uTtSB`U27kex[jXdEV!4K:Jfx\/fdIb)@h\ʣQ,TRЌ-XvM teN!6mrW^6kA j*Fp\ۻmW ;**!T g1Cgs&(U*tPBU( MX靧F*'S|ʔN9.ಲ .@kR;jS9XKX&LRμNח>K`N-k?>^h!kؼO^<5KpI@͏RW'pO7m=aat^0w &~ПrԴ 1av6DmD4n0×~q{7 9>:IK]:h g&.sն5np~%qaAzdF|qyWX9`z]3V]h@^^Mg;ޯn__2O9,eݶ工̚} /XwU갩3w5, iMe"^jl'ISE0D)?vnP I2ӓWTZN` Rd~wV\TqO#Âus``{f +ud^%OIǂgO'1&C,dA<,DŽ"_i~5ēO!R3ȁ:u HI w_ #L |MEDMAkۊ؜QSS4ۯ_?SRrڇuWM{|D ̯ICajfѣ47Z6͢h)t6~l?,6[f19h6x@17Xj?ᦑ=b 09ׯZ1uDW([=& L xKUP{ŋѠiU{YA>m"ME0hF&OQ&g5Tf.-^]ؕ,eh0i$v NJCdC0y@i$5(1Qz=/ޮKu~#ԺY# Fz oڴ #ݕx}G2OFd6z(42?(vUÃ]3 Ay<`5Jc5%^ eX1^ 텶 :oSj$= k0 b`\6mTWgxő@^s)@[}RFkhړwo|IHM?nnߏEtpԫ[yX;A>)9 itd]wtɹr+3H8Y94ta~\r%2ĘS?H6qT$| ]irQFgy>ӟkD_f@o?s{n '&KتDTئQtuSK#'}`iӮ -fTFƇ?~n7Q ׂ7QT(E鴩=O+ M-d9y\yd;geaq I%!gK2Z\+k|%W^yu:̷|M& <_woX؉6A朥鎫{SέtvM^gJJ] fԹ'3ѓtU- ;)B-q{PYf;rx- Ua\jTU5XG4jd~璍&ӕ5qO>T>:ŷCP.6j]q~g:j?̢}z\ E׉_ԝȤ;.Tþ=ΰ}ݎt$t8m?x*;R,u`HyU\?ԽM4m:孧UWS?Ȁ\ȝr׋(x훆?onC&~y&n@x|Q[7;*00An,N~u艮t_zI%_:K/|޼-bYYßBeeWc 5GrM.XÑHͶTlfӼe1U ژ4Woì [o)P-Dž{fIcmUfVjŦݴj^]wIIG2NW?/=JEЍW/fbkMs)ydzz%-[SͻKE.:K6Ěpֽ;k/9GN7Wriώd6_QG{Ʀ주i@RS},&ʉP[dZ0?4!eȦ?IGkA_5B?t$m W}ᎁ}]K'3s®mpKً , )gL1Qbs8-A9Lk%kt]F=;/O x1jӼ 5 ?.OdX)ᄍ A WlqI?y;4[ SkB&DUF$^mȤn9rWřI j9a7<3~p36 ,MxVT.ޞ@_lZ',/nL%kʼQtIW )e^[נyCw s)^ Ɖ}x`խ]3nh xԻ[;=b9s+aw/,ec/G)t6%o tQl?Amئ`sؖKw=dimfN>eZ \2ZINwnnXk͏}յ2%, v+G%|,W}ꫵdv4;A&z}ѬEk9}ዟt6z4|tV.}2s]~A'v ޙ?p++|k_']9fG;}t6q||¤watpԓG_>mڴNk8qsI)ޗ5% LHNY#; :M;*5~Ҕ)]vG鄟Z\' \@% ;YXt:c?bQaڍAؿ{1`Wt53 &qa?)ͳtjݼ1ɥӬ-1S c 4Z XŃٝUg ܲ簘L_^"}L0pPtvɓq1_T0ٯ]eVI"52ca "3>[DBp7@Cj=r3;c0CH&;_ %?6ar:G=EnKOG״hebZé5hiNix>af%ܡ] /H7&նhʉF|c|&Ktǿq'X*) 4 LX$Sfֽ|xA5+J. 9Sgj &K'0Φ|lbz pսhF}$&5WkŦ=0q4=IcUA0 =;N~PkI9` 7Bj_{+M$![ )/4K.B>0׿Л]^hX>ͦ ܘ$R" ~ϻ ,`rw?} WW`Mp bPU;6 @)u2^L'M1"J^F@LaA}wtG(K7=8jf'kB'^IM] k* *T[JuVsj[I@I@I.K>a(߅~UJ}əZ9i kW>J`Rje鉲m6>RgeYj$$$o8)˛NeuEXKpQy~E2o뚋KLmq/_]+X9Jc}-En6h$ b'Hڊ="{8p մ&Z*%%&[IT6 EttD3X-Ύ}YLaSdP ~.~w_w "EVu$cn!5tڊG(Ces; W dž_¤٩蟻Q 7S‡s^op msDhmzwoK8,$tQv KaxU5aM4.P s^%O/XdB4"1{RHX"ZVM,I{G]) JIc4k#+ȇ6lE•Cnɒ}f/[6cJSt:gܭӴgw()):t$X9|L7AG5EVu1]:6B_foZV#O-Li8Qm+ ( _&lY91MCl<'Fw]Mfʒ"^L#'%hӮC": ~5I?yK#y5mN\ߗڷNsilSKV]Y]B{ٶE;Įx.s8C![,xVd!6xmTY]m܅+ҡc-UUVV0皴q qTYDZ9.15/MϵC3h}& 2e{*Ce:WI$Pf^Zf}7%iބ:t%dlvqp XVkgB"&Xf\o6e{;87|Ub٬ '( jɠJ!@oe ((g21Z|&A7%&6c-/K yFa* /nƿf&BK%;ܵ_ꬒ@Kٳ4&ʂ5]2Ad9YOё+d9+˩k>r?t#Ʒ%Z"0C˜_6T}ŭv.lߩ({5#GCI)in#TaÆ#N91)9e;r&ɷM\ݩφk:6̳3lvw"Ny#YUbz%JHJϻo״ġ*@ΦE"RijI%8r>7.!Asc5ظl.!1|hl7y9OH؊TG<$$$ G223NgQ$' S͈i=4gzwe1ȟsu}z5Y Ն`-,rX&Uv>"YE5Xj]@ $yTzT$ +'+pEI@I@ID ph o7l?wu#Jn kcm;2%Wf,Xw8C8fΦSP[\VuDdsVp$FѡȘ^"__/ j.aTgf&LH^wbi#M`i6%~;Jk4NxjkR)  ~lk/.1gTi>K`֢\Uq!zdUk%Pɴ8;{2@paf 5Sԋ Nf>ngӌWSDX+ @a!fgdxfo;8#6# i(Ң=Ք:.|͉fzVnđ'v8oƾK엦HIaI`1Brѭb]LjCI$9$6Y=+7<־I ; $낰A]TCo⬝ eQZu?pq簭m2 PM =;&J?_7`1^ݣ7v?~p*jDqm]ӄًr #Qlnq%tMQ˅7j8z}I>[o=Cs^%/ߠ>-MomYQ gL^z!]#n7f&vb#`p֜(eq'7-UDI^19S?pH,,cӻwY vjTǖp~4?X0G ފFu}k߸?(('L mYTeaK#Vx x9$?xWE."]8Jb /.mi9H uBLĠ `q6a;[Lf| A$.?=j?_E _hsF6JI6ʿ8$D˥g9YtQĶ?~I3h'/Ք[~45=Y? ~MLc'6,!$UV9ݧx@˜K)*\CO(B+q^@txwOn\䜫g(z g;A64{記ߧ'>*))Q"xƍOw_[뜝LOٜ'6*c3!ц82\0](![ͦKyhZ8#͢kk$%gZI1y>-I,MRĺf|rsx8ZY5]4͜$Feb 8+k87_+iId̉9bZ Sq _% oE^rG& k%;y[zDkjGS%;9F];Zxй|!Db„w*[MRjpZ;Mf%l>5= (|<ʩi _U^\a\B jR2S"-rKTY(7aؙ8>0Q A0]*shżli"y1SS3N7iѬɦ2-W8r.ݐitzt2a2L&?RBgRJ<}`~jc^s SlJ_/_v |~vV}JiՀ- ]㣖} )^(=zŏbM;BþMڅ38 H*ާ~}/md>YrE b^viX;e) A4/REsv)^՛ʩ5A_CEr{yzh|W^L^(7 ʓs㕕JC5"JyeU*Dl$./R$JqI8yne+Q:\bOsgHOJ>t5QcδmWΥG/4|> 4~(0w?#J}.(ps2 QY- Jղ.RyM_v"7l.uɬLkTQϹ:z(;Sqm kɟ?/o?XҶg<]Pۢ϶N-ioIG!@A%Lp;?^p5U6"ebC\798l }%G.|NދcG{9+5ϡ˚88}:#6 x>&QgB RQxg}0KC#7# BC-#:"#ed 56pD8͜JWRI5=z..VW̦FtۡKtj9-A⩒Qh/iڹ 1Qher3{pn*OD@ZR 烘B:~ai>_ eepp vr?@;a?9 +>2I(rh?Р 'KH'!8=#µv?ʦ-M?lN~}q=Sٿ I91G) G׈|&G/AYJ(G7?m1Eǒ,!oO^.# av3AM&mZ>%0z,Y3<ų.TYmr#a<7M4Ofs {S5U]w ~$SFp|T;r &h6Ƒ{}4''=pCd#w6{ٮ3aP!eN~:'] O΢4IW͛Jݜh&T9un fW_|l g\Ns^-9^!-5sGD೔g 8y/`! g`amz0)d5zqu-c{[0c)sp;OP' W}4N`z%/pD$ys~B.+BB~:rCy'=VhvKy4͘<Xud&}A<9{B YLÚ]4VZ Gڨ3(lGƨwP}h3}x Y%K3;;[< 3V`Q!Ydy<l\^)ͦ%)WB@ݧيGTOnZJR[Gy5y Y̯-:ƒi w0d.9(luBN%lN.ad=)6:H~0&r=9ɥ'~FGݷ"W ?PGw}/`Ipete`{_hiXS -u )bY^ebh)r43'Ӊ"6Ja^ar˅ ۜ% $xrr13AY:\Oh2y'9>RL }0!ED(`~dFXzX؃C4E9) wW|@LS rZfN6wP987vkyl6Xmޗ/ߺ6VM΃4b+р!A<F=A(P]vj a<2,7޾Z 뼊,zщwLplÝ2a^>:KݕfBh\AZ.di8w^G#ʹv4iVOJ{_Kŕ6.<'D.𹇆=<=sI=u8fLuZlvi4RY83M. m IGPǽ#A_?jwskf#]RZRd~pc$nLQE kܵrWm 6V3<╝< reiYF,ӊ2NR N0I6gr' &z0kA؆;Zӏř|nⵍk_x|@K=fR5ÈlK@&LXO[ yj֐r:#KθdD@#0l1z 0B:F:Bl{lX2.&yl[ʚLhw~n-#kqD.ŏP΃sm=>}kYGsQIq"j!ZZ%/LHMHp!J9'49w~!ȃ'#FJݿ\pbm;ʅy:v~me4D#XtP!S} 1P7B'e a ߤ)3{CW oG&ȉ$%?oj \eW㹴#ؙ0D;zOs bUpP{lVb%Lnq(Uz!lR*d2 49&s?#d@t5fڭ }Z}c &n=׎|1R,cwz1\Bc8vb |L"g,eGS//mc; N]ǯz=q 6AY+fKD郀LLFL {3tZ-dl6Օ[O|m6'i3j5>?XV=/;BqM녵lB؏)Yד59O=[ñsvNڏGߋ j?8DQ٤phj{v_zhVz`M@^m ۟{;ljm mgG2<SL# 0c3Oa UZg*t=6gyNk렕[17[HsLa?U+8A 1}:>pl@+czSz*jMˣǕfi牫",@PzR ;nϥdD!LG3D!Ph=r|M*qc*4i2'$5wjtOS#XO_1aG"89-AFMY8֔Õ7m7~DZ56Уq'=?V.M]!`MƂycj5(&Nh/v N Wyt&?8x/[ o%G t&Hd́4C]є${6{zN{+ƕf aTI91!KWHˑmʪluZ6{C1sR:],ڮegXr6Fmlm/Sump99݃8g gFgb2S⨅G/vBuѿdƥ3\[Js'k2V0f0?W/ +.@):ئ6'NJ$FҌTSDQ31 7KJF7S":lNB>'k"ߤ>i:BrDE=_."޿bf6:+rS_'KG2w?y5Ek(MR Ry-V>ܬ5Ȓ9uI&q -MNdF@N])m.?/v敜֋'И9Rn㾱!/ -L>>KKxa+"D};r.C?# '}wYzu"T7>NvˆTxU 9J*j)G^<SyjA!n~N`t2?CZ'sK8SՈ F2YJM(CZsx+=l :|o:8(J=P1UF1%d=KEr'p߯Rr"?߬krK窝:^"KĄ\.Ğo>Kuܠ;fwim. '7,ر_MGV4$.Lz!>XqgC\o xu)q\k0ω$aȹky^xy?|RhfAy)<.U%9Y;/4ot2ć-LM[Ah椱E7y<љG"n}w~SH=^0wxH%{:MbBv̓B@־R**g4/Ë9@ |&M[""+jľ(ڣ7voQ =Vܢ3Wľs3hJzNک!KaYYQ ULV/&s)ͪQswRO#eR&/cLJzR\se8{޴$nކ%3HWwԄh&f iPM8afAw#sܝpUD?qFQ6qLrR1Pzr CݽfLJ n}A ) o,Up'6.ZK\N1;+4bjB`id숲/0]ihڦV l.R Δp~I?qѐE < [<.ͣސV#I_d4g)O0AZ5o* }{E"k-?plj)1l01ܸDT͓tV=MaF|kZIϾ?nyFP M3fQ0ӓrBp-s*v I$v4T;)#BCEK]ȃ1l9ߵ -q?oż, o}pPAnکs*^3ƍpO |^3OG.woa{jbz7ǫ{DO߿b3/w8O_-uk*X΄tbPu8gpEiGgΡʹ8vXF~_lfGH#G!~ ûgd_C7BIB@!LQ%\1oSyllu싈pOSkZ>g0Ń-I)b?_{zGë^7 >GCFTdoN QgD bbYP"3 5AP|;L0iY5 ̞:0uZ@g??2c5-H…C6GBlE\ygP[G 8(4Eey5C[BDzCl^[9O>ozx|QڡlIg\!\!ϒ` q,Qǖ̚L|n(ɼI% 9~!w;v SZ!H؟8 Lһ9fe&Rkp$q$J gVI׃$I4xujNw/8&DP,1J\@|tUJ^Di4d$;%^ Y2bX>7{HCKMH` S_o6o2Cތy=e$JiW:`V.޲lY_o/kzRˁ<iBCh^f,!"RǷM`&LނAa{K}Z=T0jP(\:D+`_%9Fu- :]z|3L6Eq71yrD:$Ξ/_bgN%־Sc﫩u)|̸X9zDh3:!✘M-v; ,GY-D8ܒjJ S,G ቉pYF G+qRDA^"L*"C /FtK#Y] ;}qL0o¨=d($(Id$RlnZQ{^/:S5xch (pB-ahxZ%`34k~$C;@LTɃ' ǻكr}p>O.~v%V_W9;K(KsgL{{=POyѱ:vظThx75ۑRB˜ټQ-?o2:9S&Q3rD;P 5 S?{H|ȯYRY7jDd $ٶ%Ż9ASHU"IQ! FК "uv5Oz1 NL 4~D^І("Ħw( 3h?ɷ/6d35p/׋#{ p'H jKdt$W_e7ϔ$V%!Z tAɘu]!Ako )~7Z&K~LYT?h`Zg*fx[;lĵ|8 ^ӑY=½|w\on&J^ݧ\ԙo*ɤhjds\-hKt9$׾y>>1k^20$}WܯX$F;J톤r FsUnKxdzsi R%Z9?[K/ZBkNqDffRIE-;UMC8`S6:;NQD ݝmzƯ-kuY%G^,3a2i4(?۲$Kc""1b =(2Axp*I%Hp&^:ƺ6.djh@A@s*hAb#gd@(W}ѶC)9.dOޑ_W+(@̝!y% KH~c/rpG.20QhΜDqx|&,ʿߕ3Wdr A`ƻu)jjLB$ۏr>%L9]H&ZK/V{YaE yYEVdiޕ1µ%TZ]G_|lX7NL?8JKgz?03?Qk5&@DMS/_Z}RAdD~eđ_tїfL 1:ާ7W7]uZ HEMy0 Q@IDATg4rQtF,こU SCK;?qUuMb@P EDG`^iǷ O&I K3ye✓y$\sV 9(=C֞3\Pq~Ҹ67oL"X!(%v>@KUmqn{;8>| mu.A[;}*<+$:n,M8h@07Zȁao54>qrk>Lx#&A S/qx!KV)џ )?H,32n7LQ,-7;#ܫ7 3cG %GFQmC+8w[1iyP.9GIލ67d;hKS%ck50^Y St8DXK96YH!tl2k%9.Gܛ(!&j}hw!+e{9|!9>ʪjM4u2&SBr*鮹Yv5sTic#I^BL8dsW?>!72_gm+)pNek;45H0YT;uqםdIjxښkͪzÒ)~s҄.>pX@ɒX\NF|$ȣz`}z}k?{pfADV&+*ĺ\\(` ggQ" 8/ '&Ru:]hؼ#鳍GYP r8R!HRe~Lf+1Dgٝr9Ise9MK&5!p+ѕ*ڢ Ÿ򘓹%yLZx̘bQG}5-Ol(c~C#`k;oEjνS"K5›:~Dā'^(t{X۫_("5!pР C??f\,cV34tpc߮֊½'v~wP ތ %UZ&շGQ8a\)[j,-Y/C8<HHy绲}N\EsY.AaDe ]""\%|gY1&GH̏0I jM~nVc.\l2'>?yyiv&Q/pDk 6bOi?q}1-N`_3Ll$; )sC c3SbVCVۀ=ԆbݾK#%Y4=݅} R:qw%h*Vv  DIб o=,9m MDD\F8[Pb&zK^_Q\x"@a{3LzERcr'pNJ?MtnOR(&Mf_h{WdWȣq#DGdPf̎/kJp(^A4_[L,ϱ9YVǴX0䭴j2#eY-%і#kokMmG@bǶ1l]{q ӛ3HN{IPssY(h64<]x'@^T2 1I;AƔVGgbA@K$ 9K!Dcr9Yϙ-A'D(bgS;Q w3 1I&h#NMpքN7/|f@fz4SDHlA`Ź ?{lǽ6DpB!;xN;&* opEMpKY&چ& s̼n:7.;Z y]l^Z|cd  o/A fԎnG?ƔSDuA2 o`@Fg>+[3$olPVL4a k1vzՃH{ZT;uYxt͗aG]DsKH2SV%<ŒX/䱼wfQGDXa,7 "J!l2A f,UGiDsC&K{9]*GjiC L 5&:W 2gOB 9F G+ f9R M3V#IF?FS|BÓu\eZ1_ߧTyuJz{:(m֓Q]~UƒGs S-H_6V=Ȓ+q*@J 76&L_┌$dqv"s||t%f:v|LQ'΋N9:z'tޡQ2'Jҷ q(UNٜ0g$1aۘ(f]Fė\ LD:GwUDsB*%,h׉+zhKYʉc5nr8]$sHR:yH yD%&NIYHsonlhثwf Md:-zOt17Rg~6u4{Wh<`r~yBԙGB@#r"1y' 뢎ndM.a,5L#=z&0N W #1NCm"K褣w9K;$Iz+1(6qJ|%aIm(wvw#"GW=1 xbrTRQ-p/gG@"G&SA-Ln|h$e^ F 2<7;pfqw%P,1GrDa6݂Ci3 3']}iiQLb&Hgź LXKm]U]x<8zl:cH&tC$jB9yt( e%eGĄI9™La|R5ED8L&<+fDYCnu{vj+Ri;λ$LvaBPN4K_9gL`|姕Ef@&&YC ` u[#Q0ѐ[#y~_ӎd"jKC} ڐo:MKUJ3M۽i瀟7Oy`xI n >tz%v!"4ґ .m켫v^IOˈ& L(`,'pa r^m}$Nw:h$,;CI/6R$N!析"Nx@Za&OʎƝyW(U5G/H,|U%FUOW!PCjCSrO'ҕWW@O?މث/B#T|Dx1眃)4ZjS[=e?p(g7郹m/*G1!I.!M%Zjک}9z7%k|Kl#BCE" 5;kZ&f$Vˢg)5M=eBbw3ّ; G/-:aJ_z}^(wbӝ]θT[!<),K1W2(L4ٟ 82ebg4ѓ9CAhD ؀ %6FIᙴJ* ? j#c=Yr5ª|$ᱲINJADi*_aa1ہ!Xv:2FC%WV( ,t2kR 8 J5K6'KcYٯT2Hָ)L€ƒC` IXK%C@ӡMMbXr"KDWP( "NLdgK!F<JyQxy`"& v 9@*A 9Ӥ4JjʪjCc-, iuB@!PxA $H,X~g "?S"&OaUzT;cبvz'B_Ȓ1VgP( t tw2*:Ȓi z߸qVNpsNc,aUB qb$N+9cRҤ lGUyoÇ*¨OXc$DJ=.t $IR%.}P6MjzYr= ;Xp:gwc LNK:M?#0m(5A:U6?q%^ O7\Kz]Mg͔}׷(,:F '(wשׅ紧F@u|ګv:^]"KwOThUNIOf(d3bCL xNG;%!c5 ˻rҢ6QSm HK}2r>M^jB@!P(B@%g!Q{fcRvF -I%u|TTӅGCaOmҪ۴D(4vemgQB#&PMGTtgljś_!h>3V`(\#Jz?;wӍ/3AyQj}4LR>Eŧ2?'>6- س>*ʣD{YOݝ n( B@!Yr, Akɿi3%Q(w&ғ dji5ttQem#&Ў#S%?tgTzWB3kݏ*;a|Z?GwS$ߩt'uD&{7Qə_RSF~r=uwpnjBW^%RsߢE4g֜t! H닎@u(gOh"4QC w3GP( B5(\UF@6?|oަ&=2SZ;륊KrnkG2_S_?*m'(!c-,x5;yb]Km.u48PCm>_?*c7H`p8;OM7Ofq cbVO 4aw^IL|7cJ94.c>u2qm*g4 g̕( B@!02,[~ꍗtETG̃9χ7KhŤX]nnn~\AnӶֺ˦z( 0|?ӖSlb0aӦ[gMDɴlu Ŧ.!-kB#RMt04m/~ZSICjFo)*q-z3~tH zC獹>RjB@!P(+wE]Nxk:@N\^V_or7q^]M2DЍV˕؇)*i6~eSZ;r麛(:1_.umBdk?CND8Z2ۢsi Qh -שB@!P(!A0r.P,ة/\VcsDQTx(iBxFQ)+MacuFw\p}Bc.͟#fcRd -tDZCKuSDf D-1PqB"RhN0C@:n_I@ȡ(4S9#EI7nC_> +Q( B6Y '!`1u|DL$UR]S+}MyBxfę:TL/g0Q~# yNL7B33EؿPk:2f(42u2,7T ({7@XFiڊкϝ~xSH9!x&>gR┍ES e[ަ_ʣ?@ҵ#Z'?;SPS B@!0Pfxc펪1u1(&<5!GLu+&LRz9O^.rOۏQhH+n H{ p=UD ?PщDhbw/IA!5m)y[ gDB\$sTpGF^o,o7A DT=)w(#4KHJ;Zyp~jB@!P2JwO}H0 :J,!%<=^C8P qKV#2+#1vD׬$,eQ>ӑ"=>M(5@fz[w# ^&Jfh$J[HGc29τkm B@!K8KHk3HM:$D鱡-}B[cK'khT/pSy_(7,؏r>Z\'n/ؿO{<x_.unB@!PE܍:[PɁ܉G'1B ({ AvOOK)1_ rgR[_]HXM ρ׸V]B@!P;Yw+/zӤwiu:t?Ҭ А p$94fNPP~"+tIS}S;MҢiRjBSYB3<:0xWQ^ ρGgE^skcn B@!)T]cwЕmwssKۭi:AAAFtGwQ-&}C3FI{ պEh%ӜII< ҬkmYo~i(WskA/sY6ߨ B@!(wmYqZ06Go |(Dzt:RG nҭ.6Hs?…sA%G;sHs"hjZG <+5FXs WNf"%P:GK,`Ŏ[{:k B`|#t&sܴSRRMfO0C;50cCD^j1P$΃&}zCjy8)GД((g<+uOb<+&4nRmdP #\O gG)IRBٗ ڤpvsQ*KK2KkqJg#s0LT;>:rQFj=ũc\j$LU;ua#hEFv+ÅysJml^Y%-uL CS1vL/ S{7O:?%=)tBǛ Z`XQksb5*LQ&,՜Ut9Ď.jjRS4m侲;[+L ɴ Ep E45_\ }}(K4v]xŪhg":~8OQKG7<(4+xPάLB@!a{LMNE!~(v{<}B=I>N9(jU#!`TQRh>Q\QC)#1vIB d72%hgvz _0<3Q橯?75✴$t+6 EMQf]&,/h)} _׮lz5-c Ւ*eȚ,Wgs؉gxAԡm2)D?:PvPEd*N覃g_=r[v-L4[Bn23ӳNTT"KFTg Ȏ?޾/cjNE~هCt4Cߍɒ K0C3+q@S:E&w0y&a;pt \ylL7-y~z>CoovYϭ{n{4ɣ_8m=\qR;n+te0f7V:~>z|ڸ,9'Q e; UaUp ;w1!ϕͪZ`̿,9MU=QitJOgg{^_W3 ȝ tmogU&9Jw҇G.ӔZWt^QGo䣅oѡ>&O9}uzYkiy@dv:}E/Y;u$JӥG&&a{3G>^ :25428̹z8L?G[$I(@2^$%1`d߿ݽc~E1Ee' >J0FZcH|Y3 "K c J=g4JJFpkv}yмu5ӽg(ʿVh 4q흉GG?pټU%R;];:b0 I}[/+jvVaI9YivaBgMD99{\%"'`#>u^/)" μ_ȣz`}z]۹#^>yf{*YJ;hshlc_Pt "Ká If 1`F>q8JG!sڏ#>7 7#3>AY9&fN>-,"*&$LrD!fgdyW /z]wwgW[W{[Ӎ⫗L  O NxF*%8ZϹȘ,U֊rK8<bFf4ebRpYT\sY:(A%'qsI"@]. T+E% #Lrx?oAܿ %p/A4ZӑM* oԈȣn :fUqZk?R'sRGIuY_#UQaKV)'M _qDM 1Pw{-Ֆ#?$#'ˤ88"yj^%JqkIO:fX 7—R$OޤD!6k,IM$Lz$UJD%$qD`yM!oB7_UR;'֧<WE4S]|WeS̅Tگv. xv[Nee9h"S}QQߦ 3.:v F )*)nn8bY;Uxjvvl./_٪ װ;*4,%V@TxUΎNbRKM13VDSsEEPJƑ 贡͂ $IcVPv~FD>o0I$ƽկW" ͛QuL<.~v U?"I[Ql^JaW%YJFwS?Ҷߢ+AGG2P{C"n_9EvۡȒspT|YIRa$I7)Q >eiMNr/_㸨?B>zwgɼK5QHjiuHJrmRX;"xƑ{4K%omuO]53մ>,frmRzaⅢکSn"KNQd 8$JrYDA|]Yk7uD&aUZ.aA,7e5ު` Gz;984;!Q:ޱ֗9}쏴Ghj{룸eUOnO7 mP9*U)G@ZuF@D!5:^LVN(wOQ+޶64oDx3 ";?{~E7"L+Pbufqoֺ/#=DSǁWdq U B@!PX @ \dv/Ֆ[F%mAE a^k%֜uog<*ҿs[z `)˪l]ekT\TZpWKDM%J@Hsssps[y96gw&g>fm%kZ|[=nοLj Eܓ"oy$T;E "b)*`L%yRKes.kkrwf`/`-KARQM&` rH^%h{IA۩3h;+' / i,nt9ǭ0']zC^]R~ .ŵ${vC0[ȇ=Np[gO2&bF\)y*lqCȱPyp7i`ԠPPwďE[ր$8^s 8{GsߵuPzz;84\yP`*=+l>nP N2K^1&A7A_j8]5D R|1 _+HR=(PEH{{`HJ% ƉW#i&D&|t28K%h?̀',Ip~[|0q [-Pj /_09!{ @Mľn7C.~bϻk6*4! G96 0&@!fUڛ`9q;H0+ꬽ`--So= gW,`GQucqH}ak2} UWAo@90((i sV ـQD(i'0ď`Psdr w@1? (ƺuot ۬P{pfh#K8HK jSV s H)PuEKXpQ0~doyJNS)4jN,㼥JNdςAV䁷x΁ŸiE52 rv y"b{:|)qHGzEhU:|HŒa`L а)| OCAt(I%`A$MP}dPaGtrPm-OfleJFId0VĔB%;_;o7ߺZ\yS*ȅCQI:;)x`=Vω^qpn52l@CZNmy;=}:^XvC:w gBn3BYNE:A\epg`1BAWha 1=%aDc+!Z*b*UҰYQ|S>ڊ\EL Fy>eN zW/[ v>LSt> k hK60p;u#03L 0&u@F 0Ԝ*ʕ4fSAEz=oG1޳XE hX^AXRO0W Z:2ψ-@b*zHeX`knt2r;USo@Mv"r\<5tJ:5NsIuVOEx^kʾ?ݯV ~ H]ἠڹ:! ug#٩ԖRЦC%=Ab*>Ji y{:~~K+mb+cN[eMKxL 0&@Ngpņfv;ҳzw4xcQC"di%(h_=~Gq0lZ9jVXT\GpJ~ySy޳Uo<ӣft8gL(| J;a\kkͽFB+fQCDnk-Ksyaw'&޷[yN¹H>[9W%^@pt\r{~fA|X"fU.ax.L 0&Α:t @^t㎛Ja.8 am454.!f~w1ƃ ѳT9TeT SC8)~d8D CHSG_CBw(^" G31Njd* ک,@m`ϸ{|DĤBXTb)?[>5]zGr"}| 3хC5zDTb)8,o,D>ub۔< Y^DlF#|;nhzXyG9Gyr;mKn`L G;"ZrbueNhM`Q.=}.Z}q0?[Kc8!͑-&(^".|LڜC9F%o;0!ʽӟ,Z`;a@9&)t _hKzLu"$tRRsCZMo*k:QK}BPU &.Z {WMn+Q!~.!ʐbׇ#U4poB7~G!k%kO¢꟪:K= &i>msO"rBN[AԦ[< M82`L 0$bOs@DFwB EɐTSx|6Gr@dFW#Se}b^_crph+ECޜUyE_/39IVƣl!d?s/a^SNf z V5bG#{sTѣ•ʼ^c*3N ZPzBmn㇚9I3ENhSdkZj/oSJVs=ltz̋VmDY5]KQ}o tMe"Xl魯T~9ѺU5 J)@/Q;!~Ŷ~0Ӌr>[\%`L 8&i-zlj%A-=ƃ ;i $5JDC(5sDn[ PrCϐpj*sLܶZ`L9@K[& ΁DC(9p'5JΏQuz EcVD܈_i#_q&`@@bObD^.((( BV.ĭ,3 +%*+P;fXel;R {wcĭRJὸzY,y&`Lm2nB+k@IDAT]OPb'D &qZjN`]>0I%E%DB WKC}{PfyMx!^Ss]>0yI@%A%E b%_P4`L \3l&=}Û3"),< q3 !Š#,Jǿo&Ks.^ЌAsh"Yza@r''q%d0<*lvv|mmՕvZ`U]`;0v-QwdQG;M'Q]F3b@99&F:Kcf O_|?Nt||5 HV{IyE գG혆8Q'^)")22&3f|Ё J!V%ʃ~!G046dQ"9\QȒx$06$be&smյvjR[R{?t@w~g{Su1'wAsZ,]fDc2Ua!O@G?>JIإ \[jso{'~:@K E(rj4k6O~/:ʅUFP&ĸ:"3DEFPmhY&bpeVSQ@#k­ +@΢A60$Cнbc!:: ""ĕ'7i:7֜ôԿ_Rpi[ubۥvZj뭈7@-XG@㗼@&)VYpQ o Vm~)w{R{ 0NMwALjpRЂ]^۱+ "G6⹩T.rg>}(j^&4*^T'AY.tgowڱ<_go?_Ro+e.^E~q}UP_9FK XWbsP?I*sD^At6nĮ7*9D$ZIP,T.^qN%r@"}Ko³gŵU՘&}ؠMLdYRڨG/[w_LW/!?߂[X#VY,ΪdW,K26K`|%74n]Nbl,KĆKJH:TΛ%-b Mj6ZO#;kYK;sڑi#K~Z6Y]`;2`m#@k5 C{?hk@56qBI]9eʴC*o_T1&ukV}ɒ=>L{ҕ70QTub@ε,R|8,Pg-EIDr"ʛ$CCȢtP~P}= Lm#m3a?l<mUe4i:]Gi'}Ir.X[N 0\-~}$ui[B&6a"r^aFhhS! vD:$5 7I_Ы֯sfԵ~m5ӶJIb;őޭ>޳eY:,|AdLү|rnPeM޼fLQbZ< T$di Yp-ڙuoE7`G;;rtA)霼"¨鹻o#}-z:ΡVF\`۪Ne޴=czF|V`UFuOm@m;$o}{]5i,VcV˦ 5ĩ4h֏׬\[[[]\ 8U 3&.F@K7Mݸs~#kJa7nWkWkJ%|<,V$+裶}OmfXa>Ϭ ;$aD:!cW (YR(heZk\2WGEu6j5Z^ԄXS]w4pv 6bgh9a&@(=Bm֣:LS )а$8'f, rޏ[W,OzLYV ޫb6u^/"rGYH(U QEε ( 㖍!F^Bbjz`u `nT IaBs#~))htQ x*s pό sꌒuds`ȥ(^KM(ˠJ =W됄YsUI,oTt<&)j(RCuJmƫŒWqrbL A:ziDA P{O!NPRxg8Ԏ u>#JjJ΢X X`ߕZjGة `:<%YA-SGoT9Nvg(QX:RN 07q vC_>kQ-jۗ/'@8wb/dQr7vθ/`S7O(' $FݢYtnu(SPjKޠi0& )y$!~y(qu5HrLy=Ys,xպTT-ARQLjT=Wcyռw6J#Q]X FN 0 (b4 s[u.ufSvF A\]X"Ԏ(ՍQkPΎFނv6%rL x@(uīɨ"9{_S;ѦeY9x<(EP(WsePQ(1vڑhs֥k,\đ` "BB#CBN|4lf\R>xx;cY(k6t^{@QԫM&pcu9cG<InqW|kxG1] gs74ج7!m "' JAwwߔSeL 0&Xԉ+ `@"Z@DFLYDvI #kQ'n'VS.`ײAk!u=-ƿ u薺Ht`L e`C@a@G ,*䬄_!l6ub /IƏQǦCb`!w(PS 0*T)c;C(ܳ 'viχ.ǠnCɩuE*e>-AIk睊`5]{tЇùMphSu8 kA`L 0`C4`'& w W2E(/EGW ⒯sXYd.58h"d" yP||5V*B3Q@OCiNޗM̵3\s$@@}8N:%KnXp8]oE)׏n %`Bc'v`10bL#_]/0&`!b7\9U&ڑkԾ+9Hr7_Yߢer8jr.訡:_C(9.:: %Z¢h`@kJh|k+w}H u;Ew y'ѩ7Zs>fL 0&|DŒrL յ/dɆ.-{p1Zm6ajjzaIHQ)]˯[`5AlסsfR,Kz%\FO}`L 0 7 :CG!ם1`L 0&C,|fLhcZּ{9tt\ !v{Vޯ3D)uwӐh ЦŸq%R^˸lڔN`=>m6 xtQxk0 8O(%Z`L 0&_,˛@hnN "ˏBԐ,Q(6\p:jSa,y1`L 09K"aL 0&`L X 2%@cpƨl1۵`q̹]_`BR .G.kqE_ɬAlRg~`L )KAZqm&$$r'ދkcd|`L 6#ZH"+YH<5$Bխc2&p%7cL x҉/>b@&Q#3X>d ᦊ%s`L 0&.BEm&|Nсݷ$4v)w潀8ڭ?o^I0&i,BzpL hC̗,)zM1ď88o`"`L 0`t@| 0P-ԑmZn)4*d_/ nJ[W-_eo$90&`-`? :KJLwRC 7wŵU~#Dٲ80&dp؋>`L0<̛6^d·C]ƻd0Bq"^̥{l܋i%L_,XhJ&`Ac$B!nQ`J,HԉFU}$я/o*&J >jI̥?57,ў_u\H\8CL 0&a x,\.r΍L xJ@Ke ~?b߶M/:Un [/ŵx&Nq!>e昪~\ǥP"Uı~s, o`r*c&^#j2lEVL tX%_Cy[37j]X9LtO;Vz$ʴ,f1Wܴ=czFHH6F ,~ګbXѦ""uYH4U~;$Y c_#@5FWHUyrِZuR0􁢣yѽx灉yѩ:Ιir7SSYLˑ{hC7mLb@q=yE6nl^KvIi4@H @V $ s77DbNE({ rvr!DCT%G$aڈ'7#B} -y oDsߓF{)tN&#$&`eY EɰIJ.P0=`(CQ;a6۩SŢ` t=i]JS'Fya$J|8o1-@4TpmB %߼1DR%>F;Gu?M]?G2C@ʈ%+mzjU _nx !S}ïc?IYTI"aEIQYc^"`ye- i&oHk$i3sq MJ_?90&<"8k%ҠMw[8uv-3~z{F5[7ZG iԴ3SFASijxDdğS;sp\+Qhq̏70J4ޱ3Ϫ0dzЇ۩g(OD,Q?pd=qK-7L 08 gQ.<,ҷ Cx߈@F-K :J;B5]=8 G]'n;1Ezsp܃srХ8SQ*)L,9q5>fL +_` Ia]dZGɽ)gȑ:>gH7Ni%X@N[{NG$GgL 0& %Vfi  ȓR/TO'R9E<6&`I:eIL聐;>z0ک hs|9T?grVL}Z}4,U۩Y{gL 0&>g)|#.I!1.m)R1SSgFwj)WO .yo _Yn+r"h{;ǭ0']zC^]R~ .ŵ${vC0[ȇ=Np[gO2&$~3*HPgbGʃ!N o>ʽ ~-(,ݲl奮&\dJV-動g}yᅥiPFu9xpLұG! zu{C`3WAށOp%536L!2M| bAMI ك=bٻj* N\ WHi[g@ 0t<_p D#j`И@bIOœ吽ǦCb`!wpr{ĞwmT,hZC@Gq;uyeL 0&4p~tf(XjN1<犠:k/XK[_((R vU^?0J&C'[u|3>bꀒ0h)߱q? (MTvCʻ 5G)׻pĎ |R~b[V&zhmV=~x|3 h%u$%dm~*J2Q<Z}Ԣ5'0UAR^+V a- "ZS"1EiԔKY(yK;%ȀAc!VIXoG1dm?)S~k"d|^4DtSv"ЪTu.6%?W1&FE~ViP$d6A=(zN&:_~*C{ R&+Cٶ`< JU)iH%ݒK\|ZSF 1V|8{(p~(ir=M)« FG'!lz麗埂٦/c;桁H_3`Ddp"zta .28}3K +0 0" RѱPyX-`1*i,(}BmE"Q ]z\<2Nwp|[@Bxr +F;pV)w:5lEt}|L8 0&@9Qg}U+Y"-B (nt};( 4_@’zLU@!Fn9SCF*;_s!bzj3|@9Z(X畽Vjeݰ8nЊe *P˭Mz_}F=@m)宨1m:tQYr$SD6 ѹ`7¨+-QOX, 4 0&@L&nwMCj75$(Hsi bܿgߏ(-ZėfŊOLml<%+ f{MOHp=[y838=Jnσ#WRD 554gZp]: ;?0sEوʹ,r.[窗[[M37 {KM30`L 0&`.8 am454.!f~w1ƃ ѳT9TeT SC8)~d8D CHSG_CBw(^" |j@DL*E%)֚3SQӕwxk.\DK!+k΀F}FzAowCxr13be("D9@|cTf_b;yg F|<(kyOF_|Kηx껠ExyA1g0op[]sϊa^ؗ29ڪpn}NwѼ9UFc哯5Kv#t}[iՌUmK Z}rh4nK4?\˒h]bA;9ѺUW?'N1?he6ٖ1)i s( 0&pIj"4? Bi &4#4h~Rk%CJuB(ZzgO0<`D`L 0&v,9L 0&Иfe̢q> ,^$]cTvʞکUs;Uxao Nax^0&hgt6-&symD ]9^]l7ۜW^8x 鬒"])%.C*IANŒ7(rL 0&欩[`&WǶW`.KѤ Ȑ,FOJ|sȈ%r`:wJ 3,B6 nQQTG1Y!Y;ܷmDe_+҆\)uyS,i #RPSY\bs`AD@02@LUӄ㌡1S…V V?w"KMjU|ZuaB`'wn1UG{6o؆i] qōV C_AN\l$YtB8Nơufu-.0 D_0hv~5s,ڬiϪ7NYLcFϚyI:Hm C1iVwClv+q~]'Q\8gdן]LI2$Y/`B72X$TRk_I'OW]XX_aOt(p$P+Y_8kKxXIEsVcG 0A |5Ux -go 4}B@yӞs/ԖGWIݮ$5mtlͥ4T$u}ͧ@}~GM* bZHH"Vm W\:fI51 #?V~z$7͜ y@\msT/ND;m(N߉oj@;gh5aL $: j[_g(Nps ĞŒ!C6\ohyPo[αeݎ_P뢬Y,˶xPo8-rA>R.~-8oMM:ܱ%8KWa/cXr.CN+r9ͨV%K$H,QF(Y`gY' Q3`eG8N]# c=2բr vڸ gfd=Z5)o7 #KY/+&9CJvx e7ڬR#S&8tY*]Rit4 .q:|_ q_G S5Mƒ*E](˜tE$]iQTūE= r x鋸"@SS#[F} ,3{sfʒ~-,Ln$4ɢsgKL4Y(X"nxҁ1r#H1z1ڼq,'٭UE"yAڪcHk»ǎ.x.p;mFUFN[rwȢDB Q8<#!`AJ@m;Z/y }h(hPmC9(y!yDsS`јcd@sV8f t|V2O&DRҪړH $CNǡIAe &3b_r`8C7cAK3lyzau7>~gʚiCmT;mVێ9J8Z'f޶/6cofL d| F_?8J)w?1 }d2ߴ . =$Pj+|'Y/ƔI oDOkpHq0yQ$9X2vDbB6՚$u|PګQ7OM*gDG@ Z|v$dt0Y1CY NBiLAAmW('T\9%`(0Y,$x/i_6')"5U.H}Qơ:KoPlڴԽ)W АX CLC>k$i3seA.Ď.9 Oz@IDAThW7xg/x\(;$hN"3 o|6R:Rw6ǿQ_ĊI.qH-yd0KWY[a&)մ-Mn_U66]nUTQ(iYZG ?ŜƜ?FaRXjJϙ`^$u_$˫C%K p8Fk[G^Ё7\{&Z,he:[o7c͞;qOܒqs *:.hOsY@e(QPɎE{<^Pـ^iO (aɥ#1 .+vrp&2{9ý| 0X?@$ bVKPERPUW]zd7ńVDy.|Q h9ބֻ ְ1CM&ڙk 3&@!$ngKRV;9f( ] *k͵:fb#qŌ%3ir@X,·2&$k+_5 Veͣr[8iŹGbuh]6zՄ,@e3۵r&NRۙL 0&\""%Li͂βf;)omdm A#^AnuzfXП u&><`L "IX+-Iu $\t+Tiխy$das@1}氰 0`?&&q,׿hM$IS0NM5*.y/lz3s̠ʨFq 0"b)K`@ER;@o+1CՒ(MdZv 2TSw `Gw{q \9ccc>`U\X&&IޤVl1u( gqTbZ3hccV0tgSdL (X jL2&@ `HѐuoG[MdZBI$8j6cmG3.4Ӓ ʚ}/2&Xxu%fL 07 Hr# k^]qmv8fh(ZD$Cu옡9J| 0g,i1`L 0fHjJ;]ZZDl5+3L!u-e1FZVDma3č3&@R >bL 0&ЈF8d*Kdj: {dZ'bM`L%,Z"י`L`ԾU1_?&kݤ puoeR\N&h'"镏 *E>>պF֯5zdOc0( =ʦa1v)K>ˉ3&@ D$Y,"6<^־l:'к c]! x- 8;fh_gLX,/`H"Bd Lsjjm]]DJ$1-V3 lc5ic9fc1CT 0%nL 0&a+ǎi%5}M}$ ;X5`ͦF pґ#GErGژ7L+qBɁ 0&*L 0&+_ѶkHeYLXD˖<̦?n h] <ݑK\[;֑cP34 LDlЭ VL~œB<lY 0& "  Y$y|_ܻ=gG{Q u9f:.:f9MEDlp`nϙ`IdpdlY ک˛*jIAXOd(h=ZҗJ`]֥ͺÂ͢#Aoj1@)Z6м#dXy䘁`L x e)xs`M(".%Ʒ.>Nl 9'q.͓S-PK䘡d Kq-&i@~p^nzĎS&B[B L [ۥ" ;n$E//֑83+A}蟃W75EfL KT\&BŒ_07z7Es¸6RSJ/}-GZ58MKqƼnP:$\934/p01F#fY;2j5UvмCY3/M 0&*^$ 0&Twj&hț]xd(itWr6e܅j٠KhQZZM/"H6ŭM*YQ3P0PP7NȐ knN)hw%ڍՂFsȌ!{H5Uo˲eYLL`L X :1&" Z1SIe Y([L rMK>eҗj)c"KT"9f0I$YZfQ]js։niؠE>xlF3Lcl?:h~fFaϓ8|3 w^=4 r&@`YfL *OEtcO]Xd}.}~~B1:x]OW}.bvW>2&q# ][F+CpޑQA=NBas6d< YkCcppkM*Cի6A˚b 'C@+NH'BMIxbI{&BN. `L 8 x"r׿>diTrrJn{_`]ad%(.B QfH$6I?oQu̠dI02zkp@hT\KZU6^0ooՇ0 tzz^g<%42ɜKcL K!U\&@pD$QImL^s^NtԺu]Ɛ?c-6?qd[TM3gs̀#kSPgMz-RBK,H^x-Ї}c4IjH1GV3Kj7%&@y"\`̩B#i-Iǻ7H!TZUI+Ihlx-%VKJXLM.(~Nsߟx,wwIsnR:AZ(+Ӻx>&; fmi)uY)d:K=cUpnhi2s$=)+WuՂ^U/O~)MLZD` =ùڨvfG4#d9QYGeh mՌ'dR8*괙J!裿M7&ӛ#㵙qU]G3& ˕u0&`;gjrU¬/4}4|xfE^_k _/$o/O2d! yKzʰ(j't+'P\B"'k0cdY-4}:[Zip81^ZBRvS$ҼJIoq=8P^#7$+IeYZwo顪!p įlԸՆv}h#4F" L4G.w ;\8z0׳Gw#Q>:B+GZIvFhZꚣvyC| 0&PN TN/,/ T-sTxlGVC֦ܱeCjV@[tu阫tL,980 Tde'L~X_稨>)v $gRL'Ͷ#yg}t#v>:-B8eeo[pZ!Sа}_PAT%4EuIN*Mڋ{IVϑIg;$ِ@vnin26jBrġqW3&**2r@`܌I%%ܦI]}X.Rje;ԒSҠ#BDJ]dj-F-WՖ.X5*E'8n QH0ʀ#Uǡ8K N(&Ӊ_.qq! 9~5(=mf*F`]6//N`\g}E`$[k! m=2_2AѕOoCk}%/wn gyF0MPoeu.mxIz 0&Pa Ta/=/ ,kiQs+߹-30˷NA'FELϾ@K]nO#*5p2dכ*z^TǛĻv5jdSUJMϠ+qIUUeY7(RuMڈ :,K{jXބ$!-=L؆@#+q␡=BRf~ ft4mBҰC棃c fuGGC@jtt\ ;R(F~d\Im?:Ij+paL 0;;7L 0umC#~1Qmsw7lv)y;6歩Asؤk#0ٔ֠f5hϪIAZժ3夔4r# S"NĪ#(%@Z*uX^WP_>nhZd H GE`D['|kywS.[':4L}wNMd8?ôsJpcX#ݶS>%MOHJR ij?'\[S-*7ML⎞@iF)\=MOpjyD[sj2ٲXШ,^ :G`;iZ^)n`X[|3;wKчǷ1HKʦoET<.L 0& ,,9"*0іkWx &B=%}_rC S[ait%1ˏ]. e V~~ 6mE@D@Ǚ`LXX*9C TH)ɦ-_įzvM~5؀r)G9GKIT՞㓑v%jB0 զn %:[^ոaZ 3Far&(>!)~ܒ 0rL`DH}G=̲~_m=TWKD-{O)F̯GOp7Ȫن3;a|ten1`L9XXrG {-s!>>{S۳ܯ0\ض_CK ѓCoKO+L=( !@mJVUC޲~737O>`M%׾><;&FM_ߺ5xW{1Ds5U&!B&Dq;̰rgAB:G9@BV׸R`grᾘ`LTvy$&v,i[*XXrk3eeJ@Jm?30'tLmC?oهUJgyQ WD~)aZW`` D :I^ՠQM!3`L >y [Xhotg=ںżywct&-Y{UfbWc[aV{#0C^^D}IkZE^+OHAUQRt(!8`L 0`͒S0r'Lǿ AMY |wZ!T "C q8rA"dB8"YY;֝%1+L 0&P T/// @`h,8ҿłRq | fz\l>/.9_>9D`5>%[O0&(C,,!4)SjXS6$K uƷ26L;n<+.\t=煮x&R<^˰5>gRލMW-4ݘY%$%j1"3졮߶"F=Rt p">`.B%> h#7dYjjk0^x(&Y> ϒfWuDҭV=%5Rҭ"KUDmf͍ =} \2c_O>X׻AOfvi,ъz]}4cU⅓NU ́*gL Qw45X,c|8DL!fU=_m^RfիQU_wQ0INM It5>._O Lu{|R :t[; OM^z͘ᛑdz3V ~VUSU}c?};SϏ'M}My2 F,k@ z3r C0&p*t6amU P{RUCZɷ!\HO\NEWk?)1ST:!o7DsM|$Eev ^ꋺLoNG{.^&0&p;,,%sߝ>?> 7Bԇ!ykPD:(z$ftFt/MJlԹ*Bo/nT]UMW; ?($ԫiuWVjUL~^3@BaOpRUxelt#)S(>9UrCU}Ȓ|P'$TBv6Lq׫EL5ɣU|xӦGC+*`L06my-s4ۍaPWQ~ p`V!Ɋ 8H2H%ʐ d#}RMW˶Ўf jJב4G j ,nI˰Kqt:*:yQ;q&4a'"#z]{q*ؕjVׇA!Ӧ$5dsD`L  KFiEH'T֫4WԭN[T߇ ԟaBK2XZ"N IKҡSi"Ě)&S|ˤSwjGMy4u0S W*|vȹd?l`@Ѽ+rڠ"J:M6ڽmS{&$.03Xk:%"7Lm"^ڼi#[ga6ۤϳnW$]z6M]=מ+7z=K`L 0 ֎7y"x2J- Z_}="qljTvA=)?lܣlw|?QV->/s![,'YR3*L`L 8@2iԽp9zA=(|ԓJ" +yi*yӋGo0DlFd5˙cE_0)lQ#eث}NY~FGĖ/1BKײY]Gf+YZX6#(L 0&O%3=ކȾϻlK}WToٰ6Qj?]ވ^kZY%v?}joL•8Zq Щ_^ބ&cտqfwr3J?#_O[i\6| 0&+]+_"Me ?oo/=4F.eC 6|*RSie3F@mJ>6bL6QߋKF4l*c$7#E&Y:GR&owT.^`L TL{,t,5=|,6\ʖ@ꕩWfʖdž5ko]sNMs<IV WjZIlEz-"k<е5Ҋ9M$Q>J,NOF?lMNנ+hv<)`got6:UGOB[4 :Up"T5Ga:d.z>yVu?ֲe?%Sۦu/K&}C4Ud;/{#N{w_J|h"L>MxCSxN},Lml҉`'m[-S'"rј? $OoEhwy!`L ,Ʌ4GD4| J9Nm4SMcI5t^ۓ]N\KHQKw{<͙vں==;5] S5K;1:t-D! k}aIV$0Hn$! Jrj^ԧsK!W| B[T811gu#!x+5tnA#`>m'cEdJQ5sε-†(tׇ:t#pǡ`4.OXa0f6ڦi*g~$!%%rWU*"p%jr/Y&`nJ%7p o?4Wܧ{xYT͗JN'&KB(˒-˗K7{Cu d͚tF1 ;BѳfJ4QQhUD6M‡а$r3nzoF>vsxSKh@VD!Ӫ!!!! s'EB3֠VUzcv-VVxѱsJ\"%C&9sɘ&u6?i[("3A`sM+4g]JB[%4oB58}4y I2FEDrc&`.B\BtwetMLUڸ ɂ'LHr{o`㆖à 314N?Жr_!e/0{mncXNԿ{[?7S .Kn jƼ!Z\6CӃ=)hn }B#L~N:vNBK&@>4j890kӸ.M*@f"/a(9̲_?o3LgkNQ> mwT^i z̯|f> X`keǾG9Y-Ch׫U8j4 ZA˱i ?Wx>>)$0,G$V!sCXBE9i mWs0^|a'ehm`wV/|߭u:5 KhӃYs^o?8}{kh]yej Y aksTZ%/IC^h<9AsYB{>iдQX;8M32%ڣKJ( JBɍ`L S;z5.QrV"]aYԼտ96[t٣맖s݁NsHՋFХuc jW ߤL @ !d飓;p .o뉆_hl2 :^g(Eh@Z60BiV5OhdDfBs "ȅRfIxBXZ`b׶Ios>P<^swvUCa}9n&yZ*{Hz/l`X@˴;Os;&`eH2]CI O{0Y}'"Ox_r%.%fIoęEJmW:"Mоh˸kСbY"7&6^jz@("ϗ?n?[=Eh^~/B |O"؃hסy=O部4˟EUfIu2 /8#r514a"3O?D[-ظ5!{eG (L`B(Ng5(rHz!8@)֠}:!vCh,LufOÔS@.&*04BÛnߙ`L "Ӳ*\Twމ/?8"f !,MrNMTO;UuyFy]~N[ !OC$T1LvOUo%lxv ##"Q|Nʕq"?N]90E"W{NE&:uͭS?E9rՆ-+a.:s0@hJ+TQ''Іþj:2|[TE3&KXXKKcX.KxW{σ/͊_*rֈ-m!3F<aBf &CzQ}+WFbVz~%GEn<߃hb;"0^oS߇5gp<@.,B`n˝dx9||\e-Br&-Yˈ*'4\\@Br*}@ۑ k|M>my;3&kNg`L6Ey[|Uش>kA˭VͻTLJ㉳%"enwOSS$0BBOw#jY jP*[iVА 5ALpf?>!ODmPRJ!$%YGgVvB(цG!|yF 7{vh0̞$rɽȽ##4&|aV׶ol!DVREhnW)1oi#1̏qtFtF6}cÊQkJ 0&@ Rv(K\NҼUx$"i 43vlAGN01U]U+dDr8WqnXI68ohjU kQm$&yc㬼2b̼TY$4cgZ#0A^-c"R؈an2Y+xd=?BDο7`L cx ̳IƱF[~l!40Fˀʾ{;(©٥!4F"4:4NRh_Kar#+( JD;[Ehׄ?ޣꘂX,ҭXwkREw)52Rh=Iأ<[#GO@ۦ=oWPPh+QaHxL 0&&XXr UiγL:acm{ ́R]$t$,N! [g"';GWE ͕5xVv?G|%і{ڲlǡ6sFUh0OgcK_¬0&@"PmZwXO5":G;پRѾ+ᜉjF&{fcoR{ZFWZ,tFNRۦu90Q؛vVY-&6䉑v}Ygq*zCtTA  O sO #I+̋I`pJu.pd saL 0&gկP1g+04rZ |?M"" AI,9kuLgdzfPwjT}j3FDB0[*M}FT@H\>D:pF ,!"CHrFӃKo!s 5@z@ǑU$-J#OW {E(%h552&ݙTW02nz?t_I5ra"CZzK~g)g[TV0AӇ*g͒p9"X-:=F#ދxZi0ֶ$nfN/!,uG'ټR^>%A4t9&h1i|oոĥё 0&`3F6M!N!}gz^ !.F+q4eMF4!nyM=ZkEkk BNT^\}tZ*"d;ʹve1c+rkT ߤ$ryՄo[eA#}%׊/!WFH{PC۵5Fu_F0tOf!}5 X,AzB2Zj֠\?!Wh4Aq )5_E"_XUL 0&XXrSG=8lhov ]A.LgիHV H]2 QEKFGB(ij""5FjС aIh5YV}+Z!GhM0,L90ByBR/Di -]Q" f[Dc Zk۲ J+? &aL 0&ǓG%M`XD݇:}q =нJwA.6s X4fV.F Evk:"Hfuiӈ{OpFtkMq 1߆IH|Ff00b;"?$"tm{ծY}zO\bvMȩp)okE+0 ˏoVn|/r ш_UdVoWF(9D~Ohgk`/O3=ڰm$B"v#-|~^oNR|RYye5&`LKy$d&iWwh qm`϶r6MZy\oiIBJ ~ MTɬ̘gT.:MYqg@IDAT%uuJ"\2*+l D+]rjyA˔,S ,_Eʓ&r1Nȯdwm6<0`L 0`aܮf`TYp@cWgvnHO[7[MXq/ENL\%g)1%p ,N|HT#4\ 38sUIO:`1+jy܅> ]FϧGT,- ڝC"dx_$ݒG>`L% 䒗Ź4)lo;>fM:rե&%B{{9iF^c4 |?uiȹ+bo{a='m8t:[`˴|-'&όgr;_QXIM !]cV< &`#>Kֵ`6V9s<ӯ% g|4;vI 5ґU*кS%o`cFdtJ"sbrJ\v.nDCX 1_,-wGyp)laIDS6^RRSԺIfE3jBʣN\! lפ. Jw M zpNtiBrE&`.B.2GF)kSɪ%uGBТF$7uWR &Yt$|yvkK&eM>\Dv~eMɡ?ܥL 0&JD%`xiΒ$wTzsþqS%}?~L8dHr'SVuST3| (~P!'`L 8Yr T^2)Lj4Is@8Q2tK :1+%YOt%NR4ZB֥o8|:v6ȅ xJ//`A >`L ,0FßCo^~~ա]iw#=ٟJxL 0&n( 0I``$Y:xNUD: K&wO+7RtY~’zL 0&XXr+ceD@Ԙ4e[n(y%׃4?hen8`L 0\1!*2ݛ֟w)ZiVj׬^Ek/Ki֫?!ï[2U,zW h§`L YrKfK@xMA}:<|/tOBqڰ눈vw &߄7)`L 0PYqM&PaidXlM77x,r`qDmNЏ)aMs=͕>Xqʙ`L`a_a^(ȿBpAgP8J^ `itڰuXI,[Yyn$6`L 0B`aШ",Sڒiڽў]nPjńQAV-pv?>< |ԫJ“dry BcrJZ{.?ߢNpϑstF"UwOޞYlv26BCx%xzC?9N9vvJ,gZ IY&` Kr3NVR/Bj=}˽iIuk8cA[Mjr^~ 9uٙXOU-G<*$ḤjmMr,wiUmzenfL 0&*nU&`L= r#g䫶oVOi۴>nR==BY Wi).&&yf?Sdinz5o}d#9r֮7lԔ$$*KQ:ɔ c>OBB.ߕ ă2&pbyL̬.Ӟti,KCIoZ޺q~hRmr> q^#gb(^'/\1$%4Q?Aᯢ0!y?1=F/ݤ BudTxHtY0&(xP/ /e)ƶ.Y:@aMVFW#@]2U1j[i|g`GBS$ \O7z|2]K=Z<|nH3$HY_ټq)iF+K6G9w.Ee{J҂Я]z<9&`nF%7`<]&ޚ5;FF[US,r޶G N^;H2r+Krl 0; m1ymmy;.wݼ|=񾝕;ɵ,7iik FHJ-k| 0&(>]ܒ 07 %i5۫ģjT >5f8Xw+FLVYaovFOE_p#q]æu%mkVBپ)U-vn(igh"Eځq-qL 0&@&N`L @`Hijjz\B^B y԰vuTf"7ūǩ•\Ie5Iev9x &_  o[g4ԫIuk"@ÈJF}_J_ m8a^O "ƅsI_]%դ~0&`#R8q-&* іkk/p?'ZAˎy4/L~^٬(t0`zjFfC lS|/qhRU+3ǟ`L 0#Rٱ摘(FZf$Zi % ,B +)SlZ;h_2t]kd*xL 0&܏\{VMFH몤y.%evݢwيC"^%}8#R˒lADغk;H`hķI{"B׳M;3qT㪪'`nryzjGSLi>D7ǁt9bOU_Y,ZTʰ'5ǀyJu]\.|ܱdߝj]|-3Zn դMv?=P \N  #CsLKhb>5}$`: 0&(@tE޵sXՖ]]N/v` Gjn%4i&] MC#cL178zg"CS.FAt pٔ4F+gM"4Š.:yc=65Z3%<*8ȶ=w>(Ug FNocӭH+BuūaHZW.)E7Ԍ Lm8]UVhт)&`L FXlʻ~?AYkOMvڱڣtV, TZfx>"4s0jeX<(,b$6Ob IRމ U:)knf|ʨkeI^1E\09dKwOd qa *"""1wp,cDH ei:oFGޟكShdNB  utd諢.Ͱ_:G~q>2"ZI!qYN/Mמ.j` J(ݐ LKV5dzi!z*ڏDcœ -I|M>S0=]ӅQV}o )^k%QG9DӴ: $]2pF) {M3 I{Cyj:/M9 ,ΈD1h> ]o`~h;@'4Yo'Fuwl~WFu ΋߼֬Y`W7Гsy ŖB#dԩeoRDMj%x7L 0&@! ȅՄ%o,tka6_z֛ӃF^2{됪[_yWe/1q<Þ 0Cf],̳L:!)nOkL)'?/2llG$oix7%y{ _ieez}qcB҇:Lmjz\qQ2a,^/I; tڎzbWG2ɏ҃ղj7X |žfʃ$;6}FY6u,a2&X|q,hOcm (c_BlM2$bTC^ 4>Y Zqy J7.}PdA)ˤLQнYQ?0 $L3XmwզF;n׶b1klK M&|oʈ9M5=$O4 %Tp_$Eѷtw߼8UK=ن t~^?e8(YvTd~3/AIԝg k -.&`L8ЋU4[1'llfzٮ?]YR%#Re;i %bwqeܵ>$gʵ?}uY_hd;ͷY :ڴI ^_HŻz܌ `9t da$cc3 GEhrBi-rc8gPtS(3f"?59gG<%ݬ?i q OHHClZ\'OB""4]($XQ7Ch(D(xDTMP z2J-bPaM2-\xB YY\@uLVKKրwI8wQB}ҧLRL R?/J:h?mAӓRuq=ށpnu:6a8S)\eU ɷBm9j6]h5dD}7"lY;ƨz{CC?9~WY ]eDX\DoCX;MC]E?C!TcN"Ɏ{B*NeUu8d6` /j@6Y M鑹g wL4488*vE (u9 u 0&p>.jl>&1F,DDJ8wۈcL-O7<'e/6u!1+?([] fr|-=!(:3ǏO&Xiv'4I 쪹xϫ*h8'i6*pYÄ)՞W)v%`e&|~t\+yM9[ ?N6u<5u4hlCwGTBms08([a>fcSY52>|vO@hFM*|E6Y%Dy0;^tZ*uTJoϗMROӤo`9~Bǭ}B|,o^”h٬<=qbm`L 0&T&VƝ66N-BHW4PKЇ]09[b!X 1}d<׍"`bF/oiؗۈ~ubvdE?ɭMlc fPc |.U>3JL-x^pA9ƺ$yB&gs(k̏yw%6Ћ-{~@V[;fNnhC=Q+z4|P8e'r_Get#lf<7E}QlWrCꃿyfNc]{c&_1|p{UR لԭ ,U$ܫZ{֯o]wr71jn;/'*"UgZ@pC^ҧ^m(R&mզxi0;61_=7Iѕ 9%jaB8 Y 0&p&lVi 0q<~ڥ:x岳b>~eJhK9g0,ß |5nO7gi]zw:Ω:%C>59aw<& < iSwcr\=%MJ=ӤÇKJ\YA+0C1 `| |C̰թvq,w86~{WA){,مxfxpQE$ѓS=[f+jòN6k;M#9Fۢ_Ҷ!t>><{\OB@iVA`V!dB;YK2a94h{X/L KJF {2ġ%"1VK n5$)o>}6c} q0y[>`L 8[ Kp-1IӐdv͚G. ǷOܵMBSlڇaB<WabK}cF}BP<y粿 m|Kj{^-ūCˡضs'S)K2{¯"{%\.f?BڦM$yAx"?M0+int"PQIZGhT1nj"Is;vPoPm] i(;=QCM[<240uk/b3^*./R@tjRtI~&Ji,xpQTH_i𛣮>9+QQp>5\ SQHXr+]|;n&ln{kSTrL{"1tI?њ[^|OȤ ?AMGEOBB.LTWj{ j`L 0be{]oI^Kt\ lm(nͰ%^6|m|0C7l##oDd`<Aa5Eag@ Ogqu[UlR5{=g|66>{"ڭI][>?"} )JF8n̕;D!6Y.D8_bP#L/Jk`(H3R.~]Yt+tbTfݧb:75{yNHeF=Lʗ0Gb4(4-5-~:Kp1quƻ.)ØxHr2rb4kqpB;u`!;. j.~cFL%q[}iaxFEk&`L$7pϲ:O.UH?]nX*}g40*LIsDv-{% | S*C(hco"\:|"D"(,KKllWOsSߧD@ X^&,k^V5u0&`LVXQpj .R=I^\ "Ŀ~Nuة_[OWÝ1&`Lp[a͒!)ZWLJ>B\)Ef7`L 0&**%)ptJ?ބ ,}JL-|.`L 0&@]k23ܳ'wbxZ47=;ures{ֻ{|kLo8 b |L{axy DI{A7 m9_n Vh4\m! aæ/Ye̴G yW(L$A&i(LsOYHMxJ0~K-9NJD"@@m&PrS;_oW~w(XRchb=Mm !}KWjq&Ics(=(E!SXF4\R}P`,A3??j1]/l&p!27:}#}Zv%k늟7_SqS\;?#s1{C3 ( H\@|5WqEt}_\) c5]KQ i<.Ҡ鯧eb}\'ԾyK c`hs| EAn? "$?eذ0jub:[oh Ϫ,G'WCY- <7J|/޶gvg7berEh1E@'࠽sL["@ Dv8%^Qz3/68_(D2 !qpel=~Bvekm;,c008z1 1a2|/K!L|=P=ȷ*E;EhJk 4'Α&xx bAnc7+ 8RaI1cZ9s2?θ hcd/lyMɈBs@'$[Wedz}i, .]F?Z,J"@SXr0\h:ːwC%xnQxB2&NNnc]h]E(;x[FIr?mBQ5鱱3` v?qF¨EX(!l+S} _))M 9>*'} oP-4__F>+΢77U͈ȬAÈI:`'U9)lfL\\Y ȃG016P[|t0'jaOx'\nK GS0F $wa`+f9% LӊjEz{|[\J{V 7_|1CTݳZYbKyM aTWp"O>V1J#"@󮍝>IE;v~Rΐw~5brNk(e(o7(4֮Qzv-=-0Az|@pqN[sZT "3;GfP-93 o&|";T}gLJ>vyV%Ëm |j+j1%0_J'NJD"@@m&pZK5vx\Q׃Oˆq+!n4lx=™z|od۹oKlol}E8Vf߁Н5ypS*L ;.ޅ>^߄,_T_x"Y?C3 >eԧA`߻Wr V38qq/: iI Ws# fpܗS GEzm0ѷՆaNY[gOEX&} K W*Xڳ*_d'de,/<E29vbY~pc,uCRgY[/"v, %D"@j+6j  [yy0iSR'y_o(틔'H9FrB>\)zg4UzMf+<3 HIiA%C'OhSU{tRoJsdЖ"@}N9Kmc~aTU%֖mދ0PGs1`PkX:LoČGPMG8ʔ(Nm$ }uhMe\]osJm_dK9sIJp*0D2K2T`,*e̎^䓒 +xJ%1 DGmx ֛tµPϵLsvѣ**B]c `7c^9oǘ`v;g;jovڊʮ̼t>/ɫ~rMMO͜"@ ů6d}!(i )<>fO\_5f 74FLupLC~4 1eo:γ4d;M\waY``Q\vݥčl봽5l0V{Il,ؾT=n_ed4_- vxjEB D!P)âFzR'[*DTS&إy9˖5ySRC{MƔ출?Ӳh-i9N]9f[z/&4\,uELH8 D{}/1/Btd6B8K<&Ώ&Wc憜ek$aCnFG`9><~ցK(8[~j~'Bٲ_g#g-%%|;eqΚɯ60۞iCtO2o@brOӶ**6B[XдF/5 !ˆQZԈRF7^MxI0~[2!˝07X}Ip)kzû.XʹsN/xګ=%a ~sj[8 nq=,{ ˺8] >-zvJ?}gƋp{e'$RP$5I<˲&0!ni9[&A@}yݲ[zl!c8\G'l =';KTy]B̥ݗ.H D~f\^K&D_EWZt1~)ܥAVK%>T=E# ,"30^-PjZd> P5ה;\.~3ioh#Z3KPzJǡx4-Bc<_pJ5ݪcV9i)6臓/e L=l?``Dc n@,s9Yl^6Xݢ*~rE])(WLv#a|(P HPqQkбh<^cf}"u!L]xWGJ\ti(]2yS[oЩTt=gM]@5.G&< PM'Zi`K|\ZEJ1۱M Bn @݁9)|3}z^$20Yxk^XD`v7$.-\6del pFv˶??/ 16;4`.zShQ$GrMm p $qohߣNzy ғ: `t=ߗ$[_4 tOr㷥 C))d("@  YMO%W~B,1''(IB9qrIAP֧7s{U䰤lݳ&PAIDATIz;][嫡_hzo 󮾔"hE31dH^8?~)BdS4?j}cB*k-44nA:AK|CX qq/XLI%+@ӡ#s Ilu~FN aJn"&̟??xΗa G?wyF/O{6 bJ/p"y, UMoA5p.Еuk"E{u7_X$d5R¸Yʑ#/kO2,}PVL?v [RYn`ȗ _ XA꾢y(=z'c$S*A+Y\(ggImeD0\:x|-s IK~` ~G(LS>v[)t׵ sH/g)Mn )lMH/s(/E/~D{z}MQ0N+C"@ V1,~^ƺfAIġL h\29S~ϋZe7D*@̀ĆKʲT\ZY@:n4('EK^dKxP c\|灻ln_ˡŌ&*+fC)ÆGգO8B rOnedށ B :mFKݟ'оWlͦ> s u E_v([|ʷc \'f CZzAꟼS6\nTvk icQp̓uo ~-BŮEөrdq0dy,<ӟ[; [Aп=_ڱҠCD"@@5@ZjP;2u](PҶ@La *{2αdٯPvVs8¤^t`$MUTf[1P(vCma`_`0E(_\jiIec`. 5Trp!D; 13Rn^MtQ^ǮFdS vtnkU{c ޖo'@`BN0VZ59Difabaz2fpWyeJi 1zA4tH\0y`(E' %=([t7Sp /FK9?'#17&SһShTS {.9?7Ra잓Kz\еJ93pmD6'qŞ*z6J\kdm D"Pi]Cvo~Tl(y<#0@׊r;C9iN~kE*i}#2mͫ΂NYEݠH]vg eDCPT ~GYn'A[{p' Xa% &v(+`P:D'>M:E&@3'3;j܍b' u)#]xF@ZuN]r偱Z $xx5^uC0I[Nwc2I 3gcM|l ^N6xq =l83bq`Iȶa\e͢:p{ c^mů׏Z}h1Vlo*Bdm V#%VfYʮ[;MÛcg6,`HolK<7W#O(m5gLZ%ҼcN”}㑮E|: uZ/@=M5~`;yfÊ DӜ"8f2m!M"`x<^ %kfXV[ vf-sܚ:J\CgIp)8 E;]:76}Kb@9Ev3eŬ4(h0?@Fàܤ/\7*'B{)yP1j)j3)O*k{ޤ]r-b),C%'t_>z-z cp'O|ݝ'V,@qL <ݝzjCm,؁gvO䳁"oa+rXh[ǰ4.Na;$_,b0l'X~ԍ% Du3-a!WW:W^_!Id6?\dsu(0^VP2kHJJp}AR*&v7K t%'V^1s2\6Ox8yʹVȓo+nq'ݛ[wZ93yѣ-l!ciuc-'Qnw~nWcEFzB(5y\ՂujN\U]QM7*Q D"P1uXZ4QlQLv.|N\”U)Yb.~Cwѳ _X(XDE??CWX>|"@ D5uvΒ>xj7>U %Š|Wc5GaWt/J5;zfO7f.ȥ)"@ DT#:k, EYjDq((Oέ9ᶸh償i81An^N>7f#1gixP::J DZDΆ +K&ݏ7jpO=&dӇ@k`~2 |kWY D"@(Fή';ŮV5ĝFVm"@ /j$E6VVbP@g۰S.0 I^ӟ-r,9pҒjKGCu]UG23M=魯"yƔEPG`پ3J+XfYtT\(9\S@A*"(Oh!_ywz\y){=1y4큂1PpfywakC8km3tiW($~ n(-^,tUQR:ȗ;> D"@jSX@o_jp) %*iG~Xe Uuhz3m*YWpqwu>z|7wlp Yy{=ї-|~I60*3E ΧX;6 ~Xe39~Ȧ]X2}^ҎgFmQr Ʃg@B],<,m.:uںfZ^+ktǒ;|#2^+:'79ʸ+ZqET|*'D"@)c,I>vH=1i!#,Xv\h anPaOD|s4goQ.;l&*:Ny6?N Bܢ1gNYtI\ɚ?irж6=ئ]RUUMXm> ].~.3-P0ei'4"2RLXDf&^/naH6nwrqbIF)V D"_&_#3”qVj4;;Z+]UDGho(fg/o6˒"\Jʁ!k&Y6;˰Et3]_.W/_^oH|*kaj\ eanjw~Lgi22UXtL6l4m(2aŨSc °gA 䕧LE#umlfyO DJ4=9Nvg24KrфPp[(^Q2vʒr+@Sc|`^ҝlQac~zsrNFi΢¿TǾfbI2M^(W"nAE;b$1ߓǏS I!aq PEx9"@ D8jV82w ZI[ϑS6 Produced by OmniGraffle 6.6.1 2016-09-27 23:26:12 +0000Canvas 1Layer 1Network NodeCompute NodeOpen vSwitch - Self-service NetworksComponents and ConnectivityProvider network 1VLAN 1 (untagged)InstanceLinux BridgeqbrDHCP NamespaceqdhcpMetadataProcessvethtapeth0iptablesPorttapVNI 101Provider networkAggregateInternet OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-inttapInterface 3PortqvoPortPatchtunPatchintPortInterface 3PortqvbvethRouter NamespaceqrouterVLAN 1 OVS Provider Bridgebr-providerOVS Integration Bridgebr-intInterface 2Patchint-br-providerPatchphy-br-providerPortInterface 2OVS Tunnel Bridgebr-tunPatchintPatchtunPhysical Network InfrastructureInterface 3PortInterface 3Self-service networkVNI 101Overlay network10.0.1.0/24VNI 101PortPortPortPortInternalTunnel IDInternalTunnel IDInternalVLAN ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew1.graffle0000644000175000017500000001261500000000000031274 0ustar00coreycorey00000000000000]]SȚN~6Wu@ǜLN0'L)-@'Br$ݫw[bߖ!K24`[V_o_BO ~|]v _|8i[۫/oGons=n;;oN8G['Ա2˺?ܸrLw'ٷP6p;Yܦ}9m'hg?{j{G-\ ̿W;h{TyXK|x2xWB7$uCs❟x)Ic]lOBD^ .?{i6u^eԁu/P}1# hA߷?9[TKE[qY)f$?SRB} E2Ϟ3?^o@NNYoo[ a|S=A:<<` Btqx?JE(fcwh.\7H `"9M6 a׏Z^VJHwԨD>pʍݣpB}c5 8 6z"*`P*ʙT^=1SD|=xI]@0hҼkuuy *O0H/Ny^, ;-jW ҆#:Ms| =prڎ۱g.1Ċs3YxzNϣ/$3 _X 4;Ӌ#,7@i=*k O^9;r7$#C(dPP0p@iG\DH1Ie OYpqѹCϟb᜶ω`*=7˜nb.𖃛`|((i)rDc ,Ġ 0,VRjMÈr) NDZ hirE 9^Ҍv˿ 3-y5̯Eg5TxMVS,vI4!*+nQS=i(4Lc%^ڏ@l`Um^{U 4w4P֒M MxE<\ɱ zYSֆ6p0+. zV߾ejOOg 3,)yI`fKCR-U ` ՗,Rf7^*[{a߶%{2-*|V7{s3ᵟmoZ6.ԉ:; k`(@w0 *(h v0 i{C1.Cif әL*- ^(LoD' KAeTZSX b6*$"X3gd8ɡ$ r$c;Ң6&~i{VvV?#p| :[pNKpÙvU_ !'Kfy@Q0NBZ e%ٝ^tVZ88h8\O65vrLP>Z U0U@ZpT -se3fֆ͘q oѹʜ+A(kIWH)3!hjv>,Sp*Ԙ㓗h4xiĪpahkRLda̤OH&B-G PAZ`J>_:^}F~R* Z+|Odj]pBMӱx@l D8r %o$\}g6`ݻx^7[H6 E221čjm2]gQL,.EeQˢE-4zҋ"?\Ez#3(Ոs4gҬm؆!?f얓mj+9`)ŀS%5լN]ZZm+S޶%:KtMtrD״fuDikS4QHer׃q8t+m?뙂͒wWI&DBa1+A c)Prx}6T>O%% .:,yc姆dE!D.JRdAB.lAI_`dTq`aW7]HZeug.T0܊ #H!ԦTW뱵+}]$/gɊ!NBj:oXt]y.iy*~f^DՔC1vƈ`I"PP3Ю!**̖sj' Lu,6Kޓ8t[DQHSWn/󝃸PGTI"T i#̤`+"}YeMИ c?-"umٖL3d[b"kY--'M {Gq]jME.acT#c?| C}xOr{dhP(J,]ʅ\cX=aM: z5hfCWX?/5yc4횊rYm*4B iOh$&O愶>zp Ix `/IF, "%jjb qSAMf{;*7ofD;j7QyzOLfy=|Elq}gkGL\D )_Z/-ȓfb.ciuMzՅ_De5 F0`Ȅ-a1Hr(&XߪVM{oޝbZԾIzU&/[E` LM0C&M wi ZJ)gHsO^Xq>CMu{<1X k"`5BOTֻ&sl69'Cl4c fy(KjT LU=Tb0%RFf[\R xB-+ &ҦtYڬl|Dƈ|N4 )]܆hlf3C4hVN`m8 @8ɣRa]-@}`1_]=hS(J}*gorYxbn daQ]iߤ:mZ+gaGXǖo(ɷ^Βm;d:*ĢfK*bbY,ڸeQˢE->44zҋ"?\Ez#3(Ոs4g"mԳML=׶Lmc-Sa,v;ݼtqk%`.R%5լNkZZ{,Z&lƽΊlIВ%"AHl\0;wr5(9ma,Y^[C^6nMId S7$Y HhY6UxeDKC\pva'Rycq7^-BZ\w!bG#nBH2q9"E\>miu3T("\QL*2&jW,%SڜZSk6v|+M,Y(DֈS֦l7W,غIM˛7-ooGiEmLTM>øK[p B?$S) q5rM{R=I骲(@z_u{ĝLJWJfNT"ÕEBeU`1=.;3g~|Y&cBeԽ03 [ё!udKgiv'~v'Y*nsxA1wx'?͜Vۏ$Oiu,2#;/st4|ë׆J7j\5&n'8i,> TX ߤH 'ϟt} 6ZIcg#Lp<0w]YQ*  3?B=[mԊaydtsB.wհ2+G1o4hq/#).N? ATm4nG/o0ux?[o/w/? >/ٻ.\{ON/_~tv?m}7{?%I27T_7-` )6p3k apË862I]F7x^0 =0P!@HOG[_ = :yѵQ Tݼ1PL,Fc 6~/=dRmmqZS7TzI=d]mP}~zL(xԈm¥Ԭ$"m9W*cΈYwƮ) yˣ4,۞&]+fvW ^Zy_װ13rVcJb\DD)&Ĺ +Ʃ eSWU4~ 8EDJ;Mi#4Hf׳inWacՐh{éC././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew1.png0000644000175000017500000020226300000000000030452 0ustar00coreycorey00000000000000PNG  IHDRTcS=sRGB pHYs  iTXtXML:com.adobe.xmp 5 2 1 2@IDATx}|ޫ&[ n^PB?C1ƽ.w˶l˲zӞ^O;骴{˖g%RP( B@!P( B@!P( B@!1nL6C%P(: B(jU @@@1ԎΪ P  , bU- ; @VY(P;F;Z*v@@1T;P(:vvVT(쀀bvYePt CjPO;aQt>;"($0ТgJ#w7w #_ofrRN~WPxh0Y-2Z-MB8 Cݶ180 LW]>VH[6G2; ۛn1|}ZUG%4y̳FAf u+F5~KoZe;b`ΉtӴ59QjЉ zӯ'0|/L>a c04ygs kl_s:q&Z֯w|sߥo䚩SiYrgr=Ò4^`o9Kn膩)>:%|L闕k,a 8;:xf?-UHU4_3/ӻ_Oo)4u ق^;i%)C OKVPզ2N*{tA{7_WIYխ Ei4r`?'iqVor^>bh:!&h9eT}-W[ș9ѯ ;P}7\Mnn:BdX`SwXaq鐾(NTE0=GR;+nʪ*ӽ+wuK 1Qt+`dz2h{^A!}>reÝG#ұCUUW.JnDVHLJoQ~ goPxǟ=/p!ՃDwѝ.Կ]eC]!A{vg'1_AQy\Zjm!vs-^AE>O?H<$'O} mCxKY{|OY/,фCitĀ!oܴk8&Ze7M$.cK)xd5W;lYRc"D{q+]6xFFh/796fg8*-HihXW&ASpa`cًL45Bʉ>E?.^Aw_;oBؔݷ-߰&)oj7wK}َC;u;w! E?}m$F=Kl?2v$UTV} 1_ÇIAUP\,ȺCx1tƳqY۰ [%˵v6!:v`,%Zh]TTR75b&*VJa&M[V3$*ˋ -ᰐ Jj )A}z :f ei<wlkhʘEo}9vOshւ%W(uJHKIg.0r/}0QLXHAㅜ<ʃ+[a~@Œ112gUh-Φ0+|0I SuVΝh7rA`eg{l|x)vSHPρ0!Cd%C^TcHْ1L^7l lnnh4`31z S &ZB[[a/& 3 ^:hmG~M` 4r@`ks=:qt6pMhNrB1W8hВ!)'Yۃ^! 1S(nr ;90@rU{0#۽el$ 8K(%$p6, /`vgg_L40C UmW!VCխ}t,x`yVk{i=1 /(57 ?Ȥo~-t& 8nwN2]4_rx8/.50BL. '#I0`8 F MjoBRK1֨?@ 24dlͣ7KPQHfzp`b6Pe5{jKy[gL}=$5S٩8fh `Ť&Z$Ac$:6D3[v>f1zH--( @LoHPPn9ɄΉ8b /Z[-<`Is>6lIe‚Bwsσ.t`}/~O5fƻNLIIY0 c5)gc'E=4zh q1 k_ 3Zjm1aZ@bC}'y!H€~D-L6o`Y ~ڱtd菉P&`CfbZL0pҷ, ܐŋtH LXL) $W[>}wyW 6SIp3ɏ  OR\%'7S;'koU1aI\uxɺB$ݚ+7HKy$Dؼw۪]XzC` x7?0 Ic0@$>LO*>Q6wylz}cYH7,_5ðy8d~1۰^Lf?¤E f)ganMב5,|ےyFIYT@ ^ |C! KAx}|zW KKwMӒcTP qBZzy X$mv&I.b.̶KT;)3da{1fX _{W) t׊T:ָwH('~H;|d*t\ \ܮa_0B# 5 isxKg峐P B!r*T6D !E Bp9Lੲ( -JעǘQ L8(r=.W\K ]-Ŭ?֒çMBJ~欙7|o*i$Z.P,f532ҁ*EPk߉Yv3ǮT/r{s[j*wضjnXy s.m)-\2<Ѕ*;C+UG"gh,Drg92%R5hyx_Xۃ/5)[ hC`KE?ZwK 5ۺle" qvxATFsin4 ~}+Їg 菷L"a!&'H_-bL :儯0BW"^|j0J[*5( Z-(B4$;~2oX#&hxvg JF(%ĸh[|pXA'D?xi_x~>B_Fdž|Il wZs)5LU"n[  9xxYFyhe#%L /)f 0hyq ze&q۶cx_s8 fi0E(K)3і#%!d5CS`{pߟW/-(۰ܛJ LBt^ cAzu"c?xȀ=VXa8 %'@`X3G=pG0/lh8S>?v 63t ?eU@

Գ G0L LN֪>tb" }j ib}A`剣DD(D"d*ML</>$9=0Czxq??ft= %=M0=<ب w_{x&5e. KxqqbIs%?WHRB5>^xHrЏAӸ_9&YSF43i .>0@Ǡ. @ )-bF3l,&Og6H/<CTF-j ?{Ҫqc,b >&e 1@h}CI45|CjK~* ZKWo{Z=#Bd.!q a%iQ.4HdG )H;*yk Lݢ 9Cja2鱻ocR&bbcl{L1@ʸas]S08J_öaD!$eDizCmZx@flg\5A_@ɌS;o ;e A$ޓ?aꑃ]hk"nlZ٥aɠJ:?Ajt/{tmebLvAyTtoAaGUcjA9FF90h@b9~*SLZV?>G2Lq{/3<1̡4Ěń9EP*Yq 9H6LTR pN;ߓî F܋q->7?+m>y˴~Ç 6I0 9<ПP?[uO6Q.KALɗ2 ߥ#ʃ6$lL ua^!\*5|"ʎQv,xXUu 3\Axq@aAH ~ffy] #Zkh۬c/ȁ}"TJs.Uo~1;Bbr(=-U6w0-s~c'QCX~p}/nl,` ۈa($ r DʹqMgaGņi]40p,9h {mWO]-P1q49wJCmM 0g9`5-6< з&U< yf@*:,!F/^N,b[*R(!1Iё"}ّQuU(Z&ԑ"S;Gm%ڛV<6Uڱ!+hpHdC-*-P^'~zOޡ'mq 9ڼ%'_6U>jjkg')o -߰~\’G~VE3MHd۔rڷOذs-c~C1"-{ ` j g-XJ ob*Ek5DM.UUy>\QF WXz=]5a}xƴ**MEֵ3>J^@+1/J9$S\t$]92wd;%>thۗA0KTUw9 T~iƂ\3[#'OpS͓?P!Ic)0my ړ~Dvm'N +7wA#I+0-f ;?'rooRv6q5Zױ]Sh۾~>w>{Gln&WuZ!`{L_`Mo}xdݐD1Qr\1yd4A,xy i)!N?wGzc*pZγ`Skβd8bzg_32s=IwM{ma~H;./o(?zf5"M]ORW4>gꖜH`X"oHW^k~9i{ ce>qs}!7WHp2$Lt8:ϸw:{ H?MG2SҾm4Do#mu(joĝ$AzRxhFC!>P }ŵzXz@!07 :P(,G@1T1SO( (juR!PXbcP(FP (,B@!P-L=P("QXIB@!`9ZzB!PE!X M9*;NX+G1G"v Qt]+ rEl aW 6K?ӔiƁ5]VWX\Fg.ӎC[ɗ.qA]\-\ oW(o{ߎn XDM ×n(>8 s="J>* !B-\kW*g{ߎ.^ ;L~ dNy&> pw2jv46gP- ikfr9Pi1uAޞœRXojgmP ~妭a>bmS  E 3S{o gUJ0)CDTȠ!R6)^y QWgR;J[ؤ;xпlPa+3slJV\0u]aۍbBo:-Ւ024mt&+ɹ8˃y).$}2Ɔڋ3y߯vu Y)CR[)s9Lޣx>yN)p;W&fG*D]Qgg$Kˏ2gfMؙ&Zc-4kfZn=if펯Dռʭŷ6wf"vu Y*׽^ǻ9Ɩw gi$fHbi񆽴)!p̈́AahP:|*v`khZ~2;L^3a0]d)yɆ}B28{~I4c}SKuD]Y巴-,c!9:C+ںZۮ!$"4`zwvͣ6Auaƹv2n2r:m:{֖Ց6P[::r:g8Gr Ki\Jޥ3%ʑ9r;s>]+& t/Zx{'X9K'FњijNc?F]-S7|bVŽ>}1LLrl=pd`ڎ6+ۻ0h&Kç6q1ֆG3%o/|b*j`91g‚ SN3d9iDbӌ݃mQa4z@w PtـTѧ+3r^vPCbuH ,yrt? f+Ǚyo"T[&l^1vC۶,lگMC[dXƱF'nI1aYo֛Y0KX*,`i)>*\1G %~>"Q`Bߍq?d-HYЌn4MEכn87^N=l>^bS4lZ~pLLHF?F/ƦI򅓿vO$|Q,jQ<`a!~ ۺ$LDi).2TS3y1 0ywϸjk؏YlvSAen[)?dSTjobUAcy&/'D~> EX!6:!@l|)RJC!P0nP(bVR%P(CU}@!PX *mM]Kz&#Lk-!\ixZ!Y@Am.ع_UyviKprUL3 [/l{fItƭ+S!R`}*(ҷEЭpG5ÞmE[DRH* Ʉz vH%US&=m͵`몘[:NP1ec:I%mSKQ4RՔW\-pjD^a@G 6(70)ǫ.K;_^x:JG{مTZ^Ahz%G*D|V/϶F+Ӷ`㚢muXl)d UN"5UBڴa yѨ=Sm0["5ވ1# x߰Zf9y!dոy5SCJw??zߝK5~ty0Jn>YZEfӯ{RG7saV \SzB*߯C;ۄJ$UHP^E^і@6p>J,8䶭q$* ijk}o0zuHiqs'S%~xcW&pZa5^=]L!ؚJٔ,Dފ"`hM[`OHs7W^Or#iej+佩Xu>ɢj멎C)*F z,hȼ*LDyePCaI[q6:O[ܞmW&6kv]t[1mqQ9x/= IT2(!0-8 +lZ6+Ll8w]5SkrJ Gj٭M0\.oKEUU5-ڰ|PnpYn{xRE-ѩs rfLM[]jdݫ:PY.׋[0K}0(LCQGS]̓rN~!RMY15YhuEۑ y t8=L^m](ڑm}ƻܩ]L!gԑ:!oS/C{hxK,>U<ܭyV?bjIP^{e )&cToH1EiGn3TD$/A `?z3 CAj)%Ӈ3(׍KO^b{EM3]+ӄ($wfqo.۹?3@קC/^@bis!.}y=6!]({|on‚U̓OeDOf)CNWѼZzeR-9Rlu,âO@H-/T0 $/7priՅΆv%p/2JEр館rsI=#Hn-];/2bNS2V7Y@`)8SQe=PΚ@ zh0FR+ K]Ə#X:xC~u݅ߘvcS'%*K,IC-ߑ*lKT߰_([^뙠S7f J7*{Q z7wQ Op9#ش/֓usSWڂ@`KF/yK-5B= _Og Y\\L7s胧1lc6qTA-ueutwæSYCbfuחBx}*O堦Y~FSR'fZϭ-g3#z-3jc- /PBtEkhW9ͮ)Nbt8[SmrgN-wYv'7rW[4 =ÜtE%TKJoVFzmpRJMfof-1TgFtd Kkl27^n *̇zS`u.owg&:IQUgihr S``RQVy]Tnevi*fpB)Ljh5))[8w{`RIÊW6^GlH-C+Р8^NaNjidi:V@ uMBqJכN"bS<7ʠHܶSºyo/o9yC豅]YMȳRJG|IURNh^ E1ǤLcyyn{0*p[*k-4N5244zFzӀlKtrtŵW#K䉼QE:*&6S ExҐԧl+䐗f*j{i5f3ɕ@IDAT_<]IEhh*WӠxJN8#0M򦗦GӨ*t/UW: <#مk[Bb ;lP*,?OKް6Z0 OJMyY>B()޼癋D%xX:$j(Wu,JIBDE5'Z~̩ﱴ-P[G["3 ̧"^Z^^5T >V+W?3*9Ku`6 oi=utg(]K_9XKh hLMŪ.!Ă CrmXv{)CE'' \Nn&CBR!y4l'G NSLyC÷vV׃<)̳Y W -crڻO5mmmo ]=̭JKĆ+`PobN5cm~I1KE҃g6}1By̲݊FUURIM7g3[K1TlU( tf {QLD8IN!?_3t[T]b)AzG'2TlƷy^i*-H0 ;zcXetB>8AϾ|96x~#wkT$UۖҹSC6g I[af0@ӧ[e^ȣ7RxZ"D߭7R՗K9ņb]4%ӿ?C1S;Ępc7rh W!oR۩r\*Ե¢bZz{{7;0]#m[~ePo%9CRZxJ wpu;n0 ms9FºN6e2***pF ѱvxLOf6qf)Uw&0ojXհpb6eSn~oJ8}>=6@WLg<{ KtNgC$vb>3S[e(җ\&љ<)hD&˺{QSP(ęmzvI?yۀ%+juL}g9[WґV=<໪R{8XGMM-9n*:Kɦ FHCwԺ O~笢ПߘM'qy0΃¿JZa/}2omw߱3q.Gs~&^&'e5mZ7y[?мU;Ew_-+j{x^_.2狅M]qϱ&0w<זO~ZC۹_rG[[O\kvxI?frA·4^ji~޸.AT69o{r7}``~<}pX@s ّ=Bɳ4ed_l@xWg,j@[~K7NRs8SXz9+F;ӊ-9Y-K5tە#9/OZ~`W Ka}S(~G֪mN%eh)/;RT3s}(& Mtġ%!h͝6qvTYaa[&j|DIe- &WC_.KVmO灷.vz;}M˙Σ)tmW$/OO^ڶA_;.;%Q&K ,Wfu Q _ѧ_6g'=C-35)# ) 3c-HVH\mo*ZdT߳Kj'lPDc'GD33 sP wy"wZPT RJM*i`砦CzY/1Eo=u[@p9qۈ 9Ō7Y6($`q+^l&@h,-[*@I8i0) NK,qQZ8aGL'nRf<3L/%>‚D{xykEEe<.׍ LFR'azye/p*|Y[9| }%RF"O>f:;YcH&A06>wqspHfl9c,cɲ92lw[oꧼw$fͣ'ppP>ZMw*녺/GxA2SlCE{ n|?ftx j[xp%D^q0Qqص j jT/w7w^ݤl%~f顸AzY#U^b *߭٬B?yM8d_lf; ̿>5NؗWVrѵ!vXϮ%<Y|T3Hp˘4 ςqjݫsW be,ODq`?x>~D 59kvS =P(O0yO<5g24g;I.`oGX d2n\KW]>ۅ6KcF_oզ a&~qҸ>Ω~jDԕB<9vW4H^<Ǒ!$.wƺ_d CZ-"9Mh!@ɱԉM4YR30UbqhRL%mv@1ΰѕXLx:#-[R rP!AfafMSGmdݩG8Q_Qmy!bLї!]{у^2Lkϔx_le<$3%=^,߸yz#NSqe-={yTI9O=ho. KyFsF}UJ܁9G'a?tRSUsiCYJkM?5L-M=jԊ:9r/a207oo,5CcIvYSEpW2F('TsSk)T(ftty{R [+nX gA ֻ}Γ͗/+JVNp}BXPD^k#'nprMOn < {vg^kK됢)< wp`+QPU^uo+o0qI3-+:XUQUMy _dj_7+E(lտ 3ΊZG:s-|qj"^egCm@{*7o `|H&5}4E S+^^ @c!Opy. g4;;xvdԇg?iuUoF':)g PnT}mj8*W ]7\M Nסbݱ\/?vʒol[ܓ&hGH5Rt07z|ZbjCetP]+ay6k1Ӭ3T?-(?88JkW˜^]kp,}8C}'.RÛ@i2ԿZJ.Ԑ_])mHyqE#*cέUq LK$!h$Ts>@-1ސ`-^}n,%Ǫ)ۍrPdGeU.T !rdWy2xcj5kVNV{*l[[ɁtYRc@g( &oo70)4blUc>])$N ɊZڊZ z@!оO(<ѵ^OSʈiK1TlI?DeT+K%\YDS}EnbMeY჈0s]R^Xsm7?Qlz[ث=dYgEXɹx{*4<-~SXQG/,Y5F\H\-8Uu {HQ(m;>O>yqg7'0q Az[R?9v%bdaF^ʑo=U՞f[lYY+vRG4qJ%>Tqt-% ,)giѦ9tFͩ:zhA1,@w@Z~gRx]lN1_ЖUǾnYL1t b aѤQEV%ؒb V˴6-<&;Wxq97v:qV<n d@YlOlL~#׶Jw_^KMATp5QͲTZUO=#=樛"i/Fm#oT7o~U!D!Y45eZa ydY-'"L ˀ 1aY֖0m̬b;qmf퉠&C/E(W//ǖfze7o L koeP yU[hBV*ODޖ,Ub% y/ :pt֔l\6{@jk{Ȳ~jmOUn4?[}].&GlZNj CaHt 8oN]tOϦ<%$6S[F3XZNKڳ-Q[CLKX%[>Bu,lmYgO)*F ~1si7 9GQsHu)5繶܃HX8&02mUuD[ִ,cYtW%{ї3wS]tJT1Z^^E+"J! WQsHjb7J{7G#unM{ȲP|*oPnaC2Cl&԰m02iߎh ؚeJaf;a濖7DiZNPaovnZ6B\UZMaK,\1G=j^^oQ9]8AabE "osW5989[l `aI{8 Ւvy[PUVKim1=xX,%tvv2gxxрӁU%8Lj*)}ēsz ]XiRΟ?2^IP#=V/iiY[\VItY*LڋNϥԤMBًMi}ShjEGsk}4H-% X^ 5(7Z-<{d.qyx/Ryr&,i{4|之"͎m?F>M}%GsWQph(򜕫Ñ7/~E߼ȱaJO QfF:mqYހ g;f&z:Uǫ[[Fs+yZJ\JbX B5U,YxR>wUO:W4\Metvx#bgT;Iê?$rq ~laSE`^SSK鵯P$/M pUc7bS5A ]-u1K*s c C{]kjiI!O@f>tJA:i?J 3i7b_= zI Pmyu(1!}|sz)11 29{X\\L7s胧1;Tmup=>*Hw*%HCs}5&?m;'N'Uc|QX6҂ZZ0&? '"!ƆyrȌ7K̠ @;iY_!,ʈK**ĔSpjtw N99wxh5& Qfx^zqP(MPujZƏ6 U\7ki("oT e_p *.ۜ.7Sɽ"dGmkbuSDqY<:xh:`\l,7@ROoi?UOhz.'&q<#Ι) ^\NeeTkNJL>i(fqy\ E]c6J]RȵkT7AIPj$:3kN,.:x\iv-LMXhk=5BZLkMйTm'qQf|4VBf|a~6׷?1L.Mp브85M:Zv<J[| p}k(NuR۶@t4֜<L̈(M%xii)_l>m_CaVer\oB4&xi󭦅; U~ՓiYZq:^L7q6?BC|Lj`You5͸ *oذr>s|hrD`qH!55mHѡ[ٚ(= 6w)`:5bba&233D\6п1T׊fⲵx>b ]&_lA>1SOn A"Go4J;nC JFCmQjmؼϟk ij~onwkC *4C9 no,Dz.+3w.R߈8l C X;k|o]ZqL-3Ґ-2l_u5 #gf/i.}lli Z@V[ t >n. @ah;|)@5ׅ4ybdk|G˸RRQ>>}MJ*~ui)"=XO6fƬgwU?L/Zz'8RpsY\&KN]+YO/,nđSvLn/7ಂ7nhkk1 4n<_:z$8#O }"Ֆji5x:JMmAc͝-y u]+,EHzZV-0gĔ'[e-m+09/SVVb "5kܸ#Z7hDuP*,qЪFЂGY? |<9T&~eo7=U>)'LW (:S8~8V^RF%>>Fj׸G]rܸC%:JP" Tdeea]2啘6 Eav*'D_Yy+\>܅5mA89O:\BZuu}5O=٣ ;!1 :L$ slsz7׍q3T폡7cS3Ǎ'LBu/+V痹ǸT[8uqRmI̛*gUQCKkb'f&#;+yyy*'1n"`o`bMA' &\!IM`%o6MfS27m/l }ܩaXƍREx)&]t _nXC%⒚iwJ;LYW8T<,`}./>+Fƙu,:5Sh !7L9M9T̙**LL{KK܄vԩS9csL@s>B9n @w@bK^!#\)C嗛9֡K45miCum?̇NGH=n\˼Gڸ G* T:_`se9j܌dKB@t.FK'H< p-D@" L+H< p-D@" L+H< p-D@" L+H< p-D@" L+H< p-D@" L+H< p-D@" L+H< p-D@" L+H< p-D@" L+H< =-v#Q؂Kmw}|M.^d̜6C^7 H$"p`CxsJdKݘ^g"+--8u>Vop#͞{/|H" OPv;Mxz, Ia7^c^cD6NEBKkY4Y62D D='[4%q]o沸L$?D,Ae1Ոw>W$ 63"ǫD\ DW"ZV۾ cHVW.'k*er2H$_HlGkNGe=qX-TaJ.Z"d2=$ܩ 7v.UAE%oHjh , lƈ2H$_Hb[}_ZGZ.˖"G$9IPhS8 uA" @T_ ZrLhEu YPi:rĨ}DS_X" D$Ai_,lFL^S,u\N$eNEJ|:C.:۫i`^!'`dbD@"+#BB(1sS&&agD'FAm C3{F|h?*Ih;JlODW"~=ToT}z%*0CIݏ{+9loy!H ]]v$˾1pm.X|%@MPi'[_/e+OI$/Cթ0O הV-W޲HC " j mMG^{n0 TR~ I$#%* Yi٨TVVZ D"21jXZ=9}i_i;岸L. D@" z)e]>׷?,2l D|"j5␒ UE(E/2l ^-OlXRBF$Ν 8Tdeea/s [[,q{M= {_n%dL,.eR\67dN'-6$&_TpDPs2PVUt=D"LPmSjl 8b`jsh[FuuƥbUO#okDZ_=/yϘÓIm6;J+Pr _(-' 2hajl㱽㳈Uw!O_tUFءE#U]`B*n \}9ywgnعzP|Q)LXYgΔ'0A9Um 9BdS|ZOEKOnXv - -K/V,GryI{c ȏ=Sն HSAË fGV[2CG̖/ʅ) c1g$AXm>Wq#⑼waB\sjo$">WXL/TVɪp )',Q?7&~TniŊȩ3Gk{\cHPVm8Vhb=T2g,ÑIyK"p!S΢%V7Xr5%ap-߄o>_UbçR> y˸P0sdAW _'Q:Yu@d!PUWNcnU+8p̿=$A S1g0z7&*|jlX`MeHGti`IiDcS_Il+;O,+s>bt4c_Fc׿Fekp^(H$/?Gd0&F"#??{IP]{u_f˾t S[Uן޿kwu,{2@AnMaG͑X8k_u bUmVLύCe>}+zs~͕;QuPc^tߊqd=4nAWT"T%l8̺O8IoCk} o q&.%oARJs~jgJՋӪxϴ[ལxHxZM&Xrv\yL] G]A}ׅ8Xl}i..`WjdD"pq V"w̋^hEBhȌkA[qy3hڃ${,{:tlL$^ĠT7NSa4-16[pI rPQsw}ZC/!g­CK~y ܄5Ϥa?DZҽEd@#0|XssGXu^%1sFV5y9Ts[7.GbƔ4]ܚ~~U)K||JӇg!۠-X:& ;.إQb/[~Vj.Cs.`2!%g>s3 ]tAXMlB'?>׌g,e9u]xUˠMHˇn[͍QiźSHc71mű;Ђb޲.m%A8qq(b0kO;~&V ́o`ͩV\5.Od4t8K/GZn@SŖnĽMzYH&"3O\C$aS!x\[:jPVgǜ tD-vt܏ _ w{K)Fݯ#b?ni$џ6! mȉً($bueIר^WjSg[ME}>/!멛Ѿ3Ȩ ةawĹF sջtvq(ĨG.[k ƐVGW0!(V:,zYu泩⽥>V]B.deΐ#թh=j²g=6x%*;OLVę.8M:g%ɡs$@K&0 ^ͥmWvV?6 lN" D^huP2uz E{)=++Ixþ)ch1b:7tH|.&?Z32\!JDѕ@,NJvo}s&d̡Lq:&tO ^\e7q]ǴG5u{ٌݡl+zw9l4ǀ1hة$G^N{> $AA}smfBT,(L =+DNd-}>!¾1}[*q(7Ĺ m |>F$Le6*{*m q=ę{ reLXD揬Y-tLq|vV3ݷlC@D\| jqVptDhug B&ox$!)te_cf"'!m"oykϚ.ڗŋ|n|{|HL,wU/4w ꃣ5&,Sގ%YƲ*|`§{ot9Lg3uaj9'tZBS`1_3]6.ld] b蹦 "d@p8&stC ٷi$x)@IDATt(fP1L'b$DSB#+] MhM4UImJ_aIcp.Zv2F%u{e@:òwݳ҆W[u2r;ae+i*8P)KJ҅Հ>q]6nx[ߪ&еAYgUjM8zz)^#/D ݣ>ǨX &1r\o}X5gߠx͆ r}=gw>x rK ; Xʈ%EC2qkmr؉>h:'cB;rhTCQw#͔:l߹NXPIP}߾ȕJacWdu& LHS`W6>X:йz595,j2Ex(O5{jPhZTG-|q=#M b?_} ^$mՃ' 3 ͔zPWIr=T` CLXy?" F"lqlB5gh sw8Gh{>o({$3oF&t{keZ7ܭ_8[.(Yĵ~F+Z,nHga}rͷ`φ4J'H"pehfx:~ 3{p*=,KH6'O7b\q2_D 6҉ ~pkʻ:w*];p|S3:.Y8ܬ~oQ磵:a,t1 s~׵ %C؆=ZBs1԰#>i/B"v [{P6R%fFUyu ~D+F-B9A!%>{3gWᑯ<$*oK.6 r~S0\L+Rljͼ6M QΔǾ$x>H  x^5`V/hAݸkЙTD@"0ٙ qEh^-T'z̙ݾ,jH.56EL[mm-{L6N#rp^jQ^XUo/l{HILDk[;tX|1yDaZc7Mӛ?M769lv;_ ޯfN Y} [cǩX,1 ' %:`I)hS$>q4#_ppǏEG-˗Ꮿͭ[3gf}OSg'Vn> 9| 昮\4+CV6!}z9Љ>qi?4Cc-̎EΜF ~ ̝Ν">+M-k:k;+*,? "( 7aT^.>Xr>|!7}2jZ9O*>Gpzjh07>/>@]x8J4~(]U$seKzv,=Ҧ׃of=cNFꨄ{c6^AT8vɹ{efaϑMw`'wPnH l+{sC}L^ & 1'3LD۸koUrnHMn^I}gnxIbFoMC#].]o}>%}[U^] ʾ̕}s{+сo=p NO:L}WRDKJ9{j6yn3'OY<~m*p]cνE+\u9.7'Թ2EOD?"QFI4#k5w83eW-P^h;"dH2ܘs\8k:*9 EK/RflID<:3(+e&F%'\=l]dpt,LԋqwSƍ!L\E9'o'NcꊏPZ^K@"Ȇ?2Z Pg/T X3$& OqAh͢Np+R]uԨD08 %d2H_yr],?zZ28D\p]".'LK`5X{N=]ړ0W̡9?&U;jw Ͼ:F ?n92.ҞZfFK[hkhϳ`t߉@.3Ƅ7#?<*jzj&74n7&(--%nwbYoy_~̵jԞtM̧OɤeX%M t:J/s`o?0WA:?8#7QJzTCF:PluO:EÁ7b;s3 ,!g#r8Xr/wdguI?]6bъ׃W8WtkVw\÷𽝤`qM|h\'-n3!ŷWwvC2J9>8 #}"xf'BQYX'x♗_^\bY9QG}?(7TP:ʛd[9,[8/!l!Ddόqchǿe[XCVv&ב֛Yt\+/[Dyz'^'}9)c #<gˍYZ#u\1{:>h(% X̯I96SWy++>{WT \LK/29b44z2'R7\tMC`_%뿃Ob:GgЋxӠy"]|s:5Qoƴqt8guN#4$#{< ں9S&!-%iD`$ Mp3-!i$!9 q I@9TiL\$D@͡vC5D@"+gd.@" 9Y%D :P%V(FH5$p"X;C`5=-˖H@ιƅP,@" htNk5Mj!H$DulńY8\pބSbcc)w qȦ>M/6vN fb{oOɡ:ϡ2;;ƔqyvdfQ1[(iؓ;k"Z IEF[6#i|Q8!#vZh-Uɛ}ޫiaص~̈́iiAF<b, 0"ZzsE ?$[iwʷ3)*iN*Nïq'xxn1Hh==jDT-[h/l#LExcq[A{;1[#/ܵW h߃VѡRA'h<3솅*V]L/m0"ֺE +tA%iaޔ󧌦=ΉF*zEl@4oO*LcfCS{jtmq,4j{r]x37o{*ӆyHե޴9iZ:pOFU7DžWn ortZ˻\\G}v*jN~) RZYcכm;6s ؟O89dl0[D[͑|H?֧zL`.7>މ>ށ7{XuWJ;uL]kQ|G2Cy *s_4S ׽Ͻ FHn|6&G[aC+`4gR1NƾRd%s7][y"WEqɫ&N҈kLЩ2̘P.Sdzu]щ6%&md׾ ܙs<_U;iý/f+q_2CSǐq#ZH5mˠ.3ƉZt_ڀQ( I*ظO쩥9C{Y)k!5LtH].ǽε#}|ֵ58aAPmvN^&™t!.؎6 :f*%da&)cd zd &5Wn9mIu,q;()7,ƽųJrd`Ӿ\[m)@/]`ڎf!!G$-PMKNf$q*+Cgx=-ªm"戏T 'aԱbIbz/Xa'IB`b6 ʇ[`}ޓaq%Z#6Ln8v=FL`OV.ЩrL֙N\жIju48kvbzm,IT;nnN ^i˟O{ubǾX@cv|ZTgB12Hfj!A1S2u|(!-ـ.)~^1]pTJP^8Xpj(;!&(S֭+ Q?JPƙuwAI'k񒯯#kkAnus|>z8TGRCP,Y"zF+=ܫ.L2QJT{I>7@6*"Cpd,Bi5^&>)AeeCgiEILzN!,*Tҫۨ?툉 /$8]t ]"UWE^ SrN_s9~G"Gu,yAcHT`UZ+.x>t2aYw_jЌ6$1.Б|}p .ɓf"C 7: NUc/ZQwu沸ρE``\-u%jbYiNg"қt~kh8y8䣑@WL9ɡFRȺ؏fNiJZ$M tD/ 6Dq6wedD02\ G8-HEܗUD@"0L`*Uf ьi0\IP![/eBC8/ZO񺼴n(Bb l[6 Ts1oE[6WTGho h4h}iopF$<U_0}/оOg0?Zɶ:D~ͻ4W6Ac1v}Hb7ٻkK$pA\EiwDThmK&dpغIqX2{$T_yN uoibpl9v$] *~:[4a6OmhfETPM;FG,Ae֧ؓ,(Tu&Ի#v6L RH&9T1PރVU"*2Iτ-FG IL]['2ZΣR[LjޡmQ*=>^<-ӸL::u9C 4z HO#i'Uer2E O_0C=آ]Ÿmbsu㨋8J]b EZmzW>7{nsc:X-f1*$1" *ʹըP쎬P`ߩ*Q6A'_ҏq ;/Cjzv_U;.'{Oaﱳ^HԗGzZYT1VF$An&$-\Vs[( p}UC/Imө K~XwfrcO!1Thg&l6۬ g;\6A'o;Q_:/1ƬN;.Tqp4XHt:GkC\*s o+esdp"\/V 5Ze!|]V!dJE*7m;R8 =׺?<"67Сܴ6Īci E*޴1+ʃmTP2"@4O^c7B ca_;W։H٠%SSuxa)u{e*oloBDc;tLSz20=[e6l8YϰTc|a\19 ~|y<֞Xm!VEZ`%/z^x쮤maC{so'`_Jxx|tƊcHoNyɎWօ;p g95S|"\눘>{=K S3ոjL`JAkl2bFStD&JeS⊄!Nt44,NXS-.B [`Ź:i?` 8kC TTs^.bK^=Z;1腉wvT -f؉ QͳZIC̕h)`ÇpOtzNz{[7Gg;l--N NZ=Id-^!bŁI*⾆~_w/)Dݻh;HL/d'q=GOlFThGL'Zq:\wxܰ AuS{Rj|duZ2>8ɐV^QonJU䞈w~x'^?l{񌏘-pF:HAr:vdWY'uZ| ~Iش[2q$CIFA~ۗxg/[ݎzS,}F#oϏ.i)DɲnAliw>)ro$LY=QWV vZ=6W!'#eX \ |'.9g .*SweE{¡FBq $rr\gvDQIj24 %vWR_]xM;NȜ⑺̡&###M6-]N\"9VǠuG\^:wPkPh#~G܅Ou3ByDpL`ꮬhSJd畣5r_U}֐AVr:V֝GIl8yH^EP%noK-ˆaB1( 8k4j8QՌuFl'nSלQDˆS~$854| đϣn_H-5.Fp̮~wNL/mf\֐QWW i ʍ]B|s=sBl0^lpLf^;҉B&&mu,D[_$ǟnc iZTvd$;|| 5Ƃ)@c*2mܐ&jR :O(gv!0A&'@C=^Z&^Sl<4fhˮSjvqǁԵh6&蓊B>!B.IQf CxH6Z+Hh϶|hƤt`}m_x89NT8l~6 J6N U TrOAexLPm44)^$k=zI6"H72U(6܂8? ԡ 54TCyMP1A:|l ֐e,.{h"d {AojAuj ͧ1c/MS"?+iHI\padQ[;*,PPSQfNP%(E\eEO.0U+fHڋ8c/^jX4&ĥ.4oMsn9ˎtk-mxqTaT^&3B\4ö4z14ʁ{n^xS4:T=LKC&$\沸L.̟? &JZ2ɹ?KF2aqj7ANzN,l];CK/n][e-+1˴c0'7EYAvV&'#wy,Z1wiěǼo?;Bu!%lvNI1|o-lu[3ʠ|2cq&f :|m/y2? eIRR"2ɱi!QudJl;<9 &~<9dYɂup!قR%"]׉8Z NE!E̙fef C.L& c ]A+Y(M ܃3sPC$F5D'8fc;R^)qqz9 B[[>IzTnB˸i00Ces -rv.3Jk_p=C JDLK`hBZ&3 JFa]#xͫh9Vڰ4k2PKku ITZ,98'|Ԕd1[+`+MyC|‰i!?T^bfz7!Ʒ{eJ5Z2S]o |+xj&,u)mJiꀶ4sĊBM92Qdcba&2i\/"?-1N'8߾X hCB-޽?g䴼MIrk0V#RNb[hn*} qj2zєZfX7FDY:'񍧃u 2Q+z6Y{׈h3\GV4o*& * ?t'\Hv9 y$Lٟ'QEE qv$| >}MV-$VJBx27lhAɹo//_*v50>đ2gʄbQ6U7ȩr\;h/z[F /`Gd\[cwe?Uqi4tp|/Y)0D +J->":L@YgΔ  9R0 mf ӆWnK Dv()}A%LYbS1-e+ؗn!x3w+ԩ-h#},v/BPL#gQ/@/i2gĔ6uϯdr6:žBmn_p`k|hB9\̉zǕ,Vퟝ*/^Di|bΗ9JWk^G +&KNZ?^/Dv:bD~,qOIIQ0<A(8TRǐe/_^i pXfi|`ǖULy~_Q^& ϐ_&:ۖJm󦎃 T_pbvD[LQS<4j>%#~|8cmȘvP>XdqCJmΗԹ tZLР!S~tl!fDb'f& '<6n#5HH8Z|ߓbJLǓ%4&wq{ 1O YNP(-);E8rnU;j4Y},fld'sܩԑJ{9pߏкṃOG>!tD|hc /A'\&DLb knnʌ5d\qHc:S&|qhрoq\{Ux,<\q?1cpTȬV8T&FLL{K?$Pr[b4GS&,Uw)^[4=Sm49#2 D9Bݑ\>D)+^mׁA`=7??:?ZU?x&AoOnS R#CumqnqH}1R{6}Z*6щ|e9^{T[﹅K.Yw\@IފvuaR<}8!mbHk) jHᖅId@hVsM-7ǧ-B5Խ-˓D1K `J{X~suDXT+).TԈREfE?>قUo#OClvIޕKFY <8/S"Ku v@(ݕ+v\pwW(ᮌڃ"ZБq@!OkZ33]߇#~fc9Leb˓ !PC,P"02sz*`V4Ѩ-KRIP,T"Oca9hPc\HeĥrTʷjJ ! 5G6K+u3r<_C 2#Ň@VE+P酣06Wl-`bRvYD` 0 A4fwyxݧ Rҟ2q-[" 3r+⟨7|ЦnT\IPސgD/xƎNu9lh%nIͿįv!IPC"p#MPMZv ©?\5\r%#8'Am #AU;HcMNlعGNBMC#&΅"8(1!i6a<-8}x,n7F7ƹ%nS4-?< !%;;cʸ<\h2 Sm^se5M{{r'>}͕X4{F%MhFFLS³/Oί . X2j۱v:v^, A%6.5x+H hKEvJQq]jkڷƲv")s:IE}oWHtRlݳog}$!4wb }ȸ/%qe4v8QݺP׺ QdָC]NPY\ki5b'C7# $F' y;nN78ObRr}*,tjZn.Lœ4ײu`˔/O"=q=͸]ensT(k۝tΐ,mYA%ef8w ƉsO"K&tc›<U9랂*~Ow:蹘TցL&44 kx{N)6QQPiEZڜ_ w<̝}9ʺ&"Uu0{mm3!w>Bύa`'Kfܰ]Ii%NЍ%p8)?C@\1z:zϿIXەRe8WY>-^>V",5pDjj߇WWnǣϽ%|w>-uV`Vo?t4M>q&Lu1%n8و=4.[کS~ekk{x7+uJh&FWV)?}>{788|-4T. ¢bC[3_ Q BۗU -Ȟ+YĠgZ֧. }wgO\s~G ՟?u4<N ^Qըr!A Z0 u!y)}ע MR[h n?T+q5"׍ n9 /1n,97H=U[_#,4158.T5(K)o'Nvu,N_&-bb7_5DjZs{bj4\Zm7,ƪmQNs'!35f%*hʇX)f4xxOPFc7Ә0*Ow~%ȣׂܿreӾ-CL><ƔiJ{c `wkbD^F*vIݍ+=QrBq=-C ECT;a/n]c f bʭP+Nidd9Lb<2qY '^)zA }_n`9cpT9lY2s6pfM,"lEcSuTB~ׁq̨?V.562 R jҀ`ooobӆDž`q_!8?(9D I ШB,:x B _LXlKK2-.T^뵘/k`E/Uf79Ũ)cqn$1U$"6Ӌ1k,ҟ)#e?z\J*_/B(dN/>gw 8K̙e jȣL89$z\_w8Ǔ =Cic D=1,'Pۏe$TKH Μ?+6@=c 2=&6c}t }U"*ӛ|?MBNMԕKM.tuq7-s :-.!Lz?I/d44x_nB5yt>Tp)GE5GxyP!Ey%}'UIl,&Β|7D)5&#X$g8t683NnPYz`g̃]1ʄsmf,1VP[^,q ,2eBL"Lq?y|qPSnpuȠO=q]Ũ%2gEM8.& f/B}|voe$a%",¬tKW Ս *s*e%ӻ9Y&uOJЋ֗_&sHs%q&<桮b"}g{QIu`5\'9x3G $&@5fB+ujuʊ B(+J3.V sp0ZbV}u}hDyy9|-<7"k$cA ޚCx a`@IDATE2ݎ$&2;Fcao"_E>WyzSYYjc+PDR=8-|vm57a[qȩ` >H(Key,rY_H& 87<~QIPfOm}׼M_yUms]DS_ŋ-a ڿn`~[YM i<^SI.GA( X1zlCmXDG8ipD0Tv"`7ϏŹ'i_)&$PVX{5KG[BYWeIdiew4:r3Xc'j/.T{u3ophg-Wdx[f _#=W̾< ӃKfLefV'cNŸ,CNbW(pyS#lx੽vsY7{O&*[Iš/E:Yp]}?]Os ljەBg>{g@yL9i,N t4iN ΞidI;՝ݬK$vo;*I޻%ےmwlܱtL 5HHxyyPǀ!{^$Yʪ7^WҜV͝99s EmrݤGoO'~ ʜ}%*P`V~pzpfW(& zLM}⡛WVH%= ӎa"@]T{k%$2 A>chidH3>,*F Sϣ^fE*>yiIfچywW 0&TGi0׆yz*71xk'˪,~JIMk>46颰ŌpnEgL:Kk/Yu 41O!PH*j>%OǚoK05-^9ձNM Qrb_B29 O`l?W_|I$v&G]u>EKlUk(TSl y(q~Yb=]5VJdL=g5*oܗp,G0 ӟ&>CY?XQH!)Pz$s!> ;&^"0m p$)P]N&. :,H!PALCU}kPxtIJb/9TJXYj+K8biᾢ,ΊRx"!dDœk,s0>0V}(n;\ʋ6qLr4rn7Ee n:xBN_NԺ͝8̝ttª]~@TE՟mbJ Iri$SOҾm3xݠ~E4c\Uu:׷> 9v%bd0ZyJ#j7RӚκF>MU!ITG(j8ΒDMtx{}u5ݲlvSK|L+x~~flH_{f5ɔ73*>!Ѵ%z}n(Pzz ƊTss FS.zv{5յu<M߮cSXTlg~+Pj9F#Jf2Q3=b 1_ۙ TEqTjn * dު 4Px}s>iXDAwT>r\SP =StҵJ?1h,LC5e+ֶӝouD,HN5StHv34Q!l=,DΐkL=k8fqOg)Pz.PCᵒmͬIzܤMG)22~0OO<逸%$TG9݇BfzzQ k<*g_nGA/:~Rw{ 4.qQ6xPC xHe _5 % "aH-C)/PE]WCu * p'0ˠ:z -<ζSxdN4X&'WV}3M xVT{y_Ja4E۩PyTp$6K*鬷 XU3!;ixUvFjy.+&戡 BO v(.Sx,ijvWt?yJCf}LUD7OQZznqU9*m@"5u7{0"py^L)۩-mn%4Z>mǾ J}(vwuR{+Ձw_M:v:jCʦ|ɫ3|z{LTEq #8@W|s.uf>|F(gve*:YFWpz_܌PnI'&;SC{v?!1v5#l4&>g?m<`_vb8NX^x/)"6rO/ܕ8KjG\nI獝dvͭ&? EѧkHW_9_,NVѼɼ )*&:3lX;[o_53&554p|g4*BGU|-4el v\YUJ?oP\%b$wb ։VsPh,Wl'$g'j8~BT )ϰ;ԻOҍ{OjP0dSg{  xoq9/tЭDBt#bS_v{ Dk:=k`uZ٣<ڇ4<}8}(%<{ uycCݾ-#tMtj#+FN+ZgV|OgȤ?l~\Js=[ eyWCZϒ."LƲ -g=4v:[u-mim#, 训έ2~7VMr~)c 5cB| _Q@QT^qFb_c clK;铯rn{0ڻi(;+z~V̮v`u`Tſn iM7-;GVcS*ǔ ??K\"ŮŖ}v [j{X^zѥ?[QKGTnmh^DtgkGۼ$ty#Fnz2.VvSRDG0?o;/ԷvѢ4D QWUK{?CRWQjMb _W(65bi 8b*@fPqb5D"bGӡ/~:X2]kP+BIT)F;h߱23޼8ϿW"o(?޾]S0!LAc4-\ͩm phzTmj<(m&*cx? ͧ=uJMN>JslU\~ttJTSo&%pźY<>^UUUT/Ϙ{-@F6_I+_0]o~X"]?1RI쩥.N=6yZC l~_ C-O(N.St߷QOwL{rREABXF_ʫ"\;U£iB\3k8@'>ެ p0Y~m=gx_";OXs" iCh"_9.~@x񽤈@%ǰ#b&`؝Jz^P"ٸ3 nNzi.>=͓?sR :&j|? h7XXє+8)G|VV|,UQ_]KwL&4~+P61&jPJ:{OosCiJL5%FO mBjPZ>muX ot>^b4tjC=^']g)3^v@>K KJ&hzFiyES>"~#^ FCiqTU@M=olNQDA YNד|P|z'uv>yMZ'qQ/O%&e̳8>k8<-L֋N`S TR'5tjQҢi)WK-wWl gE[E Q&jȚ zm-6Z8q8ç,օr/ͺCk^ 44yiz=+0~]0<:w h/-+"U镧 gP%,))14&3{už6o6ezhlf(e5dkjdU Ej@n CK|F!@Oj}ض^q-k!)vSuTY+R.{,ԋEwppg}ض nBPFK M&/-zS M(n~eEvܾܮc~׸؋հ' T4s Հb<ߗϛcj\8:`ʣaڜU:Iцjh\y *55Y2,g Fv[$Z[ߣf!‚{)綃ǹ.">(Y@ .cvs[yIxEw;4(른NJ&qHvRI;W_I]TJ9?aQB .e%ӓbU*lˌasBW<:56^h Kv]x=gdX5j}K0C `DPx3!?wYRQGҊt?OUqLvK[9lP;߾J,g1;GؘhO1WP?vB7in`*6kj7/ ej';/[`Pf a2]!Wy-|66ңϽGE]8 50WU'ún}x'QriE~aѷ EZ` GA?ӏz~vU \sf +K~y6< W yf\u'ze+8]annEahy)LH9 TsB0h,w{K/}Hc2z>ɱV!-;C#X ?{!=B~Ah+E\WG8JR>]P h|fG5n,_\GδqGyP͹C[׼EqX<*PZXX 2TUMgNұeEm*Dj{Z^SˎV)9)҄@E~ԉ/ڍcInjϮk_4<̓ugC+D3:𶶊@K.#as|&̛oċ.$-Z>hNx|nGvSs3Fg||cw=.PQ^pn .Sb KG^TRYta4R/[eP8f3!LQB^o7J>1~:&!H-9 ̬"P ^h"&LZxAႆ  ̱En wl:>^s9oOk)*^nh .ꩾGjp<'9O2GvSy,LyؙSp}f Tx͕4|V>I*SJoe.ʛ#_ [u `%@C\ѫCRک@hv$?F?hG1Ge4z[߱&ﰗ%KӻL?)XQ@ &@L@ ME}?7n{Xw˒%~@W{p9zVH7-˜R p^A͵GHB3[C)3;fC TɝD`=Dmu4fzt3p`K:Ta@/k:7b$?yU4W($,bX<B To!-ˑ)XdϿFg/9WHr)PK +z鵳PtR\֢a|)P,K"'l't Khʕ&/ t_~RzXDT[CE4{7`o+F T+thwǢ4ᒿPL_3 (fj/}Pe]#vˇ8X.\Dq4p*-=: @!L'=NvAB,Lwj3 jOO}g?)10)'J^563 tiIK`ț:§Ft3'~4@z,6/ݳ\9d|uAF҅)Rcp N/56 j!!3pztt#N˼Ʊ4:uV?[`t=κtO jv;,K7cnGYfnGG6Jk<sW(@E7H]K#aA=G,VCcGSLt+ȭ SDˋߤ_=FM< 7L'@Ϸn")8DQ!}Xy҅u%IHNOPkIc!eM{v9v PxK d2ѡ#)3xװLF̛MaaaRK {`}ҩO٬TDč<^$/}RRvFu-Tv4ڦVQvxx 0rUPY+*ʎJBRcٵj8Q_ Ԯ.H#"wTIo pzGW AZWINӉ9PQK,c@Eel I"iZꎰ}%*97W´є^x'es1fAFR'_ 4 $I<@~]҆"'}TFP;~+P f,Z DP&fㄬT[eՖ?hf2DH}ɏ%:80FS nilg5}.d˘GcԱHc^eF lSQGI1-Ts!@kNK0JYJ),@ѝFXɝ@@ Cu O`n=9Pɤ#56Ugbʽk ~+h|,G-Pmϰi,g}l/,3O,T(22 Ȑ8?yD;wSSsחm奖wC_,h R+:j=;AF& WҭP>Κtte'])r?ڈ$!X#SDX~[I+nOO!@YcRd8CbeD=Y>V 8ȖǨQ˵ kX}8$*c;:=WxL^?![MxR{ HKQj;J<3 [Pj-b~bYh6 P٬FK#$!U!ԺPm f?2L!\k-/jK1kXY'<@-FzճWo2oq8||k T(g{)AŦψ|uT;@ՆP)XkJ2L*!ݯ2/@,XO+¹:!h [Kpp(i٥ fbZ=l$֖h^~-}l)go8O60{vNaaЕ IPg0~)Pa/BJݩA(+TwL̿6$M?{:ĺ?mMg X3xVǎa5kARLc .Ŗk61:<|M=b`: JE`*J #GB&۳#ڿ$[LIqTV9RBz Ү"BpB5Qb ^DZ&c; ps]X`uC4@ҷ~0V웯bbO61XdmOWRwGy˂;(ӻ3|6 4~֕-|<ãB4$/ah(?+>*cwdvhhU]4De, sLձ@4ճHGM mAv:Wn6yw>" AiHD KJ:bc)7%6C#책L[4.-Qw5vZpW{le>{a25vbC pk!:Tv y~x"BzbbbhlvOݽ`-PVN2Q6xn-8L?K>#-/8%P""hKIIITK.͍QRh1e+:tK;n~jt4dbL=G+Tf>"h46!"|D=S5%)J+=.PQXKJbtt^ORGOu}gz),4ξ(9)DY(S%%$CB"51aHyN 1yP1M;ϩhqqq,4VVGu6>lc +B#eIU+ !:t + NYO/2UQzRB)9kZ644&2"P@*nP*^Fa3P>pD@" P %c@u*HT# jdBD@")P#J$H*P" 8F@ TȫD@5RJ&H$1>D@" P %c@u*HT# jdBD@")P#J$H*P" 8F@ TȫD@5RJ&H$1>D@" P %c@u*HT# jdBD@")P#J$NB¶3d>@mȄ tNDT]_OQLZ|oX >mvp3:DWlCuPBD(MLS.\*rscs3=M!}}.O mEnǁD F#EQH5!,/~Nl-50%Cxʝ#Jp_\[Z蹷PdD8夥RM}*-. clW}˧F?Մk }ծnohlK\d97\vZYxcW-[:=VHpɊo_DS]w6{Qnfuww3%t%d/9Tm)/+~̋.Qvs4)P}G{hڄAŷߧѨtI z ^~n:h{Pɏ삎ƚ"Tv%z՛>X卢ϵW|Px-9TL7_B\{_9S'?\\mhٓ&Qt_M+[b.UT-&,MBs/~+GӉtC_L4Z/r׬v("|˖QTd$=nhzwL5}A1|8v4}fQ}hE-|G@c5wOS k66}?88ru:!318Eo~+,pZawV~&u0kt)^^z"JIUKoB3&oG8t(^ "=ֽ)W߼zl?dzGeW;VXJ_=@cܿҔqcE ]>la|`RS,*lyAwf)wfCno,s(LѨ9;[O_w~\2q%ЮCE7_GJPZ0s?bmfkj;vʅ󸁜Tv3W}M )WxC̓%ej-'ZO6mi{5k҆5hC$!?4:Uu‚]6l[om$HW]%6l燃0=YNvb@ajy辝gW#ٞwy yTĚUHG7n^Ewp5|| ]mĚjWt?}4e|>m޽o+-)%>r2{-?po0Oq_|~4MڽRVB5PHxIV.N{;AiI f‚m 6) 3Ic SdV#-vh8Xs@-Z5t to9^wl 2\YB09uƱ 6xd>+nG_]LrK9^>;: dnTѬi"?U\ނB. КyW9SҊ*0sTQU#Lԇ=F B%9.::K/h7B;a C(IP+if hx}j2 q[[E  %sgx}—̙i1'fH#?%KNS眺GI|j2esXѵ>$| q#E|ᣢ:EiG壻51:nvaCӂB+>y܅Uo{uT؜l:]VN4T!H rG zXB tWʩݾNԚ\+ ]z0B{ֳ@o(S/̀$m 򍯋F5uCeWZc yZ (0I9E$ٴqIep f8M 8rB~PF@ >]l.>n^ T` n`;u ^ko ˿k!͞.>jB7/7]5>,x¤uՒ \xa{BP63d>l ^}c *jjaƲP6͞1P{-nS=@mk!:ZpJfQ=5p@A /QJhؐ2wqrߨtR5^ұ!=’Qk6p@غIg*>bgmw5/v]⴫yڻ؍WʶBFGYxp@/EOOgcgz{7$īڪa )a,^;CFE;Sf nPk/Mr@[{ռ2/WwC==ICC-UN.sHu^OnN(g$܇V^DU2(}j@5aq ft:? Ttvw!c[6 $H ̘,ʼp&?P~ؓkbpDIeT#Qhdhu L(=XLzdYBA<; !\0)7 ea )A0;L*6rߦ3̳0v/;owH_}! yF&^!qijmf<^?xk=nS`:%g؝ݱHV}@Sw2|  hC\|w`'< ¬!4o:y cWL4QX1joK;{o .UMԶ=`(\p0e e҅c6xE&;@)ߜcg{Bq]UdXLaJh -m 8G6|}S1c _+Lb})yٶW+oڹ00O>Ez30(W-Lp0`b\nP1%$CmUBol-;luLK1mNCN!\3_ztʫj :/{74t% yّYS/WgS nPSLBtՊ%wvJ afկݸUI}9BM׉g;r,Ep.ӞE8r S]kL`B!S0#m֬%fԵbhEм;vṖ (8DC9ӿMEĉ8<;t>ؓI d62{c/??V\XT0*kki<ۭ0`_÷#bl}hZ~Q fAigK;A A|S11"iu~aNyR@.[6ylF-'hGIDATR` M`{jPt4/`Yv=~(y5,pO> ư01/~q#}2 ֜41W.]BQ+XKXh <Q{A բ ø~h /ZN_#<˓N!˶ bq1 :NK{4]BGR !40?8:*R>n j(apf!GA!4 W~H谹Cݺg͛6Em_k6r{Eht \n;1MOyG0Yxv[9pfhjW$YpL)KnrG"0B@@+I#)PGv˧H܈(7&H$)PsgH^[& g@ε+M" *RznYD@"0u8׮|6DHUeapF@ \$" WᖅI$)PsgH^[& g@ε+M" *RznYD@"0u8׮|6DDvIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew1.svg0000644000175000017500000011423000000000000030461 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.1.4 2017-03-02 04:52:59 +0000Canvas 1Layer 1Open vSwitch - Self-service NetworksNetwork Traffic Flow - East/West Scenario 1Compute Node 1Instance 1Linux Bridgeqbr(1)(3)(2) OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(4)(5)(8)(9)(6)(7)Self-service network 1VNI 101, 192.168.1.0/24Overlay network10.0.1.0/24VNI 101Compute Node 2Instance 2Linux Bridgeqbr(19)(17)(18) OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(16)(15)(12)(14)(13)(10)(11)VNI 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew2.graffle0000644000175000017500000001346300000000000031277 0ustar00coreycorey00000000000000]kWH<zi,_f3CHfLB$;s8gh#$G!9o|d -uKVTWWW=׋ػ N&?=>yI3mEOO?mg_y8/yOwvv8yy{fq5vv^qGdY='v_UQ3{WV ,p_~x3xB?H,w,8=Cl_d4Ugϔ̌y3t u3IРk^FM8= ;џaM4tp5& L\/ŝF5jv\+1zFMHE-NT{GbT7AAkcz-^.R&i=ɷPwg)=a3 vLiN "z?:-4y)$>>%ǧI 9ߡX3 8 [L, $Y0wqF} OD3dGgyM@AIј#qy0>e $̯ǎ]Uǃ}zjO6L|>.^9X@Tp$`Jp_(!.`J*kJ1iID[i^ef6+8kHָ>^5(%4=!c%zENO*Ui<%J8cjjf5!dZ:Ys H, àu'oQ! %tBVY]kpb @kȝ pyWz#o j7k ٔ .n<7(:ۍ":I֬0F!)kPmp3ӧןM;]/&Ruð/_ vFEt^攎f+iGS;znl^XͽcushҩޚPkep΋"EJ܇$E>-ۊ$ȻYiNSOznٿɸ8)Ly"y9O2DC^C^C^C^ZztU>ĶfHb3SA N6;*)RZJ[ʴ@u%ؐؐ#69?Ma7Qu7a O<͔5IAՐ 0| KbQKk@ e9a>'2k ˩j3_6(ūjSk[.վZ[.6r8,ݑ6V ς淉A)ܘFE]?Q<_K>Efe&(R1/" 6(%RRb 0t466r!Ќf4mlc-f.ӌܪ0mWk3'[_HrʥU^yxm~'PWkS0KVڅG|)I5wBj[qWDHDȇr)VfQT voNAƃaXWb Bv#NXEke6}tuFn (#S!3Vl?9;n5͹dΝhݯދ,jݴV}9ɮ^8h.\ NF N( R8 o,WU\ևJ s s?A =vLE҂JIwe.2l=ȣ &Aޥ\ׁMٙu>#E"E|BTR 6ti#TOTLX449)MrWɛPTI4ьrm68nmHlHlAlzfϨ[#3BY VHl%8Mi=Zj$gf}Ѻfjlhi0. 9LqÔ0GٜtB[n\㈹ ȃ7~#HwHwkJwP+w`|͜I I>IVr7 5m-zЛ7)CV҉.c+P  '069:Ax^),16ne9ɶnrj|唕Sft!&@i%T3IBj t*@@@@ׄ@eA:(`=(‡mվgWZI!&E~RnGLwl>gi`1~񯖄Lb ?R/N;U}Vx: Si +R+ԀV xN001bP+6{շVmc[Y))7?H)b0e`D\gQY)L$>vJbц6ii6,ba1Ŵwe2>9pL mz'ڰІ6[ڰ]І">(%JI-j)問ua-wCT*ۨ9me8ƃm̬/IJ} +k.ADΊ;= * -Aݩqti }rK~!a߼g=9e(!|d#t jc7F0̸eLŎۃA Ai-@Ƨ۝0<:Ħ 0Wҽ-M <3 ysH3wm`9M٣L7 a8TIeDŽ1.Sgqxb]/5yx%Z2=GܐM1OM!F)[}HLp | Sh&Vjmo5 Q@ʊvw!*d- zf|ӔRRy! Q+ IK ss⏊-9V<OЭ+P.?WYFA%*⏊z(P2|BECCG tD<6Gfuj Q '6Phhmk\O.B4#׺e>4vݼ6&WmLߏ\3vk3]vӾԋYMGn0Obw[l vgw.>@&\vjʮiH'm}gVG[FE-8TxeתH|O0 &B;&$@# P%;1uhTzaQjEEEE׀E1MP`KKxv Қ5y.grmݑ;[nRLRYkv<`+Hk&3K! " " >@\j~D;\9\ prѩIK Fk|T=ѫ +^<dE~K."]rYy@*(@V}k N#B\*B#\Z*ZljaLpgJ~#}MYi8e<%+LYGZEc.Ԯ-8uidk qbnY̛k@yMMM5I3=UO5HKo(%cT.6K* եVnip s4U! >| =^y𞁴uQs.%+%K(/ )Rt<aƻIΙ=9uuy8qrko8ȶ<8X78t-6rM.WQ3ruɢ82 _l4"Wj|3o_V.(Z +Q:COՍZaM+XQ':aʔrp(I=<{yP0lf0֊]o.f;M+%u~p<.jeh{9+6blW7^v֦|M +$>l3ʚ(yu8PSEFP*'>)fJ΢ږVW;[}&gc e a1QaQVO 44 [ߐ8GHO<>P*ަh˱7=(#TmSMG͏Hs{AWouW?iHtNi;T0dfæ>wf(y4^y^ 2hhR*MHA}c, hax HC N \P8J۫$/]:A]^ӫ8:KfP}hIi}x(ʫH$AgBiոh3hTf]΢2LsD_hJ{脯<ʮ*&Q}eEOVuvc:݈6lFQy&dӺb8;<˳n3qR^I0vz6~~9kЉwy$*Tó -TJ 7-m}mW Df n-SsilMeM՗0*j$'LR+]̠;PR[#\?r2b3 >1H'7)E{././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew2.png0000644000175000017500000044536000000000000030462 0ustar00coreycorey00000000000000PNG  IHDR>Z@sRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx`UߛM IoRPQXbgbA,@ѕlB %vDĂA^SwfdflHy߫#+&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&PC8'ίQM\"<k9#2&1Ҷ$ˌqv3zL[5ӳ{x9|LV>C=I]aR$#!NT_mvZMM T{%yqw{QUf흝uyx?Ƿ3;0COJ$X %)Wr=xTSJ: ܠܨxŶ! tO'o|deѭ#wɦцP K RiU;d k!3 ]H&U!r%F`R"]@[<ڻY|C&mƚJUQ9tw: f@)z31T`*ȗWgHR쇊} zwݞ_ }?ŀUwbd#z`dfR~RJA{."yH̽6i#[QHNõhp|5?QB+֔ꪤ: I2#ivHNm)@y{nݨ[cP??\6D >Cٗx`H!;'|t&=[v?uIzM3fL<fRsƧ ^lAlj_Wl0uY(7/@Ox\^4ԌSLӸ f:ioܟQЫ噄Yw^֨č`$]]E؊  /9L:Zu{Я&P=w7]忨\Kթٳgȏ̅';G^]!lal])T#)2ICJ'лq+ag_NNwksNDsKڍ&.*R5WMT<Q?PP?Z(TjDa`T#盝Mx* &k7W`!.Xbfr?Ï9iϧ 1Mt܎)_ƴ3MJϵIu xyvi.=s͠Jn>u4'aYY+1N\)JO{qރ86IZg'pG&w0E-jnѴ`hj99tDm/Y ,b.DLu7,Cz X7=mޏxpd25^;5,#,'0^''}ȿT7,~ʥ|Vp5_CY eN49|%7i=M7{7LL3W‘gdGp' Css34sP4vUkHYnpNg< `M4N~4MRs=g*!Cxcs}imsuhEېNGo_B7VH:`ûo5A]]Ynuy0HHk;e\tޣĸpQt,?^8ܶ\: NJl}ݡKNm1/B8: @&"ܽŕ>HiԈ iDtFuB /1T+V'PﬡL4NT` 8TS2oxl;zy.՛h;axTmt58S*pu5*+_ B^jm?t_V oE52; xnÛjWf*P7WQ^gP'xSi!yTw^BԹqJ&=ٔ/m+aSF`hFu0_4J`RM;zlq #0տ1Ց:0u/[;Y[S|ۈTmW8!N|sYfV)Az:FIf'8ލKUe4EOxN@r1va.FcMSmD~TLG9d7Q$\Ue']7 =#}/;urP4Ovv+kuQS&Nl22Nt y[@nG.h_t,?*eCT=ݚ-eQ;P@? ԗײSƥ_i}#" 20{i^^ҔhR=ӟ H `~pnmПe=%p95ivZK|O*߁˱uFAN;M}{رw)Yw)Ӝe*s1_/9֝xvǾׂBſM aqj'[N63ǡ*XaE~DMSq, (;>?PW܉<У2;4p/_w =d&ǟF#n*bY0S{dMݬSyF8Ue~cm j+ZLut)ՍibS^9Oޮ.?5V F)>!koǙzX;atkJ^$URv|2ʟ\|bX}c*4,:@a+*ֽݏvsB%.$8^Þ8UrQaT p0fPMֽ/Q.n EQa͠3]FGq/T#\m7K&aٖ}V~y}x-wÿM$Fl]ad&믿A{#QGkUVZurgm݉Iܬi`RmBs-pw\цukvea65OƩӈ/֣>2vDYBѕԗpy tbGաFz rTld|쒦, 4y ;Õ-4L<{lf[n{.٭==W_]#A)' }vhm^MgߡgZ4b:죡*$ ƭlW, 3nEZBU 4ZWW6bkgz`'(/ŁM]]mb[*@:}4ϱ*~@)uh% ~qmcm几9%(zct@ xWa̗u^[ލ:,~OOkm'ex 4GiWIu24#L=,`[5w viJY*ts.ZnI~~,v+fҤޱV%j%VAjo0 ,KTە^VCc#O!N6>P!Fv9͔K)USMYUyOԚ/aZ\Ui':UW菕j=N/@ sزɢz]tjuahC5xJ GqsvNіP,bCgPk`ne B3"tW 4|{mL_گ($ϣ*)0^>aAnEg3 y N(W [v u?]UԚ)pPF<~u5BM&UTz|&lz'֞CۡÅ:Ef;gOAob7;lΫnNB*_{SkMShIR 8]nW " Ù-#|ӚXw)%Fݽ8, П c̀1@{"} *; ;tizy +õ)x3%%fo ӓBSg'OāgZ=kI~w)@3P|6 g:J`4+>cb @g99ޣE]lUMY#c*]i˵ K٠?Oଢ-7t_z09݌A/>oޫKiZ/g~u,޽BZ{P'm4p;%1 ;To4"G;c͎qF2(4`)A %g- tD٘r˳s}鋜G#0I~q #r ɥIG~: lU%u8NHZ;-$Fz#3GXZL v иEᨳ]˞:Oe,>MkzQY _Pg>cXaa*(xF}hhXy\i]eU !rn\Ug "q.TW޶( ѓ4Qjߺ };Rt&-:]gKӛE {7Mׇ(lOWLFt ;* ԕ@/+]6{D;vD@2N'*ןP=Ipo>oM8iYsډn.݁Ďh3GKSzt=ЦPE/%@LQF 0&`X 0&`qB8Ih&`L 0&^ 0&`BmgU`zEgw`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L >w##%_6_)73,CC3&(M1|U72:p_V?]7<%%.%97ne]uhf{N?i7}lw|ۋNȫiJFI•*MGn>ZyW,r l. A=08L14r3̝}>|Ҭ́x8#M1RSךB:t]O{:sսO=V ~դ/̊nWnQ)\i2TM3W⪽:=:z gF3'apf^0 g3ڄ{^uyP߸4ua4!>7%c ,Az2xݧx3g.oO.qa&6> /A\HnVB4%33Ӿz' UeBn(Sא?4dIawlJ";S]ot:V}=%#k"0[Nʸ#y_NR/v&&zF~ISn=ǸKen0 ܝCqC35U#|YB}WH9q&0.Je t*N _X,ݥ0",ʌTXƄ SZS{|Rڭ Sj XY DkR!s^J75+'`򧽽Pw[g'jWr9Kϖf']GASC9RkfϞk,@ƴuBv.{v܃==֋յcid\Oam1\gezi'&j/a)]gpiEzf؄vf Ԙ > 遀1$3`pz8kX#3RPInAa̡Ǔ@*0choNw11DTg8{;57K׫-<;@= ɝ69ns;7Y!ףRiؾeg}^]0uL߸?m^o5(1w݉N  {v4lq3ƍq_VVًkG^ G m`hH%YWp>)Q ?F:Ëtd"~ooRq9Gs?4#Lio?-FH;?fQѤ}4a.Mld 7eIF~wl7Ffg ]Rguq(jF/Ļ ,{JfbuۍJEkOCňй)Ho {S]~{ 0+Llk2OʇlpKji_"BҽtA*nˏ6B̓"OAH  LFP4M/L+)_v @Ep=tZS[L Zh{X# (4apfzhL]Xg2 `?yz{9C1(I4֩ik`}Ob]T\X L}dBiC47ԔR/Y|| ~߂aw14(7LPIvOӳBH: xpeBO7k z\Szނw2#Lw-z 74aԩ\F/~pke W2;4v=VY7B3Bzzm3K߈iO @~?/jR3iʇZ}Y;$8|BE@nN:¶7_CY  O;B.=%HNA'{ltU€`uUF`z&aQgx~SCN,`wSrr<(wnwGIiKy$+n,_ɓZnB^B][?ܵiQۂbos{iҭ-޻ۛ?4%7/ܭA]d[Q~(ۛg DLG|*AU4E*FWn:XXqeJw!잂 ], ɗЛ iq̴χfdu7ѝƧtEg{|7ܑ?Fi^ =4'A: G٣~s7{C~YRhz`Z_ga}kJJu+wi* 3ǧz*tQcj{1a=F#S2XCLwN,:rU![ lu_Fm ̓S$Y<~6 X iNE\NN}de.cTC;s?ќYYޓ!|P%2@B2jźir^KX~4l3<Np].pIt%ȴ$L0@ 4:0 7|O*|3̒R c8N)c4OMX]BFJCxx1 tOg|v$?F~M$,?ǟfs }Fi$Rեm.״.zO4V0 M ZqgSJ|ČF=Q b^uih\ET]itvljd0CO|%;*:K{? [J 2.Jz+z0tR_ao&z#5R F*DiRCےRio>nvBNqE24»*)BMjAEB^X0xlb>loМ w' qLU5 N2eIӖU٭_1lq r.鴋tdG]MM3MFǭ_si.ѳw=ΟBOYџ:-oZ?Jw&Scn$?F~=*eGV opއiỦZE(w[ti}l9jIG|#7iWuCDf+8MCLH[_b% "`W=aTjOM\R6%߂83)?o M[/5 '#͂*חͺ҃(a:3w Z4&՗ K{4 NEr!HZ.E]XfF&wlw D'#c1+uqBфafZNŴ_Ho{; !154-WU_fEA{"z^GVL X'U#3&UlTEmWw}JnQE ngaQ^4} zҴ9X{n*$|tZ7;j4M3:śS'R3&~3s;=$!3>=/T~WZbX =èjSXteg ƕb)0 $U-=tUdi+- ъo \*1k=,l aֲ4w\?{\a̲Pԍ[֦ک9^X,! Aɞw@əpn`l%™kpnkJ^3 HcMPi*cPA(NkbqiqK 8T"̱Kd"fFBWΚۃWթC%ACAeLC4at NQ|-4龇iwL-ئk5Fւi5C[4PgwV>\ڨ3qI;uۂbmFlL?`T͛brEךK^#KzEyuDdi_ۘl #6 3Qt*;M49;4HJȫ=A-]\C'0H\9Z;gdSH_;FlFV'éh4{hHUm~`1$PeCS\}pyjk)aߙX3#%8@9 zL#st,m;v< .pQٙI#xWS`%_lwDQnOJ ct4;>4==!kUA׷t͑ڭ94x&n۠M/j[B"KE*g!zݘ8yK%m(?9kCbh;5vݎ.[z n+A~WwiVNEt4ltBpGQgP*%PcAB:Ѹ[lg,~kKהX'J!["ԟ808i4*'G.'z_;Ac>?i{5_ P0ͷh]'Y;^zWZ8&4wTno:Oh5X ?4pN]&o| $(8ߩil#.Ñ]+VrJ#O\-\*,mI{#m355]DSJhp9kS ?^1hGa{/=¡GN6+ߔG|ؙ( [P7aDU殮ms{.]Q*idZy:+KxK Tob.\܂=jD?TfmoV54ޭPE;itZW*u@jE bM(j M衉[iC,@[YyL#cpv {H ݾIh7|+ L1Vз.tFNqb*t@~-XY:ҷwZOE"uP,h {shb~:eZ6(_:LΰW ӭH#IH gEzL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&rvgLoy0]I6.cL 0&P (!H%7 k\6Ը4pי$/w>DW))gvPJhBh w{QeXԉ<AD&W$L 0&'&d)n| EH3>!nLHdyh8fL 0&jF 5)3uăީ5sMGCPBzR iIn蕡f 0&@<XkN8ڗYpFF0] xf?RJO[d`L #Gz^Ccy y+ip2ÝY4%=~cmg |T`77iw Ѽ@KB/#Z+ 5=s!0K=slNu Z{pHC7Jދ%nGC2SX0[ FJ;Z# ZaF5ԦsJN 7 z$4<5زSMhRPAHbXFO}yV(!5#}+ӜE 4fga¨pt`B:ُ=4OUCa3s< ޢd?oIBF#rF9̛=wR޷݁j,ovH0q 9l@IDAT>=P)Bw \Kv7>w?C~|S{hl2>.NT&y7fmI2±nvt_Oeÿ?iNle)F^E߼nHT'B锉[qpevo,{~:x),eo^Jf^M}6 0x$#Rdcd1VWaTeC豬atDR¼.)^waJSzV.Yn#;}ct45gQhC$PTV"ݞWbSr* ʳN3>`ҀMI^s ̵_ӬF ZT%[` čjQ<0X1&g"wΑD#8Bo%f1JzSyN=4ƻ -Lw] 4B$8cJmmg<$%'kg.g~x֌$ 9̈́ƮKӸFn0-ϨR4%\ nJM[LCNLP(  [a!t *4c]*l@$OL7u-  jϞ=t@X0"hJa/C4h` (lŻBRıA}p{,f~%I7<z~5iXtFfM;JN&] X/TTkQFMPo)!)H7h S~Eٍ!P=~fL ~ i1ׄPO~VxJR-Lf9aEN hN*Pq+oi%™ʿr1XwfC0վµP,W*Îr""}kHtEf\v0mДcEBof_ `L 0fX 4_BsR(TUFLh%ai.%c`8;>uϡdNA4ey\ RVg[33زU߱ ~գܞ84X5:$/b6.mvM]_wAHyyyB c"3Y&F<S>!L.s5%m YyoWC ;Ip{-۱eܒ HH0e.DJLS]SB>MA8ِrXb634-՛<nJ7OT隖9ggL ~ O}V/ǁbL p{Z-xkpf_`L 0z@z&`L`]]3%Fr^A6`&OѱE&[ioEv 0&B TW(~fL 0&-|mrĘ`L 0P,g&`L`&-G 0&%O(~fL 0&-|mrĘ`L 0P,g&`L`&-G 0&%`01a!BݕT桑kJ=R B.68e8O%]v!)BqiNia)%|/6 ݭirnꝤyRU$㉻ѫ@ܹ@)(ßQ׍-=䓣P穪x#RdQb]>Nu +& 4'śy_ƆЖw{Hu⑇{uIM] >jK+(*?//+O*lVxmy46c( cSaVWLF;α;:x% v>"INz+z`'2@}litFի5=e"G7@|Q4SHpVL :%iӻ^<'n@Qnw̉:bz<5u 8kO'8yr;NQJu?c&hqo3$&G✞8;P5/ziOV4vҬUs>i$)ѨȌ ;ub{ؼinpi{% Μb&`)Tp7M&;3-l :[L.="ƜtWTGK#PpՀ@}˨&ds|{ !3J<]R4FUs.sT$ ok%ayE}ǚ'@u&:%c@}|Hб%PzҧG5/o޾GmQoi1z-0)e+){gWYGc9&yjRYY̿?]o z'aѴ˪a$}ǚgcW>ۮ7_"s@QkB࠮揼-"y"Jr*)'X׋ؼ}x?_$_}v\xb1*#a`J XuJ<' @,ZX6ɘ>W/\.*>$1A :pqYao7owh-S 8dq4w>Wko8{Gl.Du~ ƜO+*qbߞ!'Z='~mp],T|T44#9 Vx_ 8*OՄwc0K{ WM‹ј |X؝/C\6xe_VwX,2G\aEXd=SŖ{M狖͓0*S<ۗW͓~n"^2@z qq.*TyK\}qaނb_7ڊ5:^-!kEƒnL- ke2PDe`Y}cO8 OLީUY ~?h7Ѧes\Mqu]z٥- x˟, lnW}up~?]rasN:Ro͙]qmQ޶k/ZG|8Ceg/q%E%: eS= R>{sO>FqS +m-~Bo~ s.g .pO*PCjwK*t`.tN [Nek÷](nx;.;"-[| X<'HM%PF|l1/< :Nѳ8#4ߠH !sߚP)CNk/.h2Y8O&Ԯ=Ve?Z0X^p#z 1T\gV;i.ױmKhq)G;}gq ZŔwFeAkw~SX.MF?~֨39kq ..)9(:;Ț:nֺ7{ZkwNӊr;BV;;G7 ; Y!b]XPbEb_-BN @Z(8@ôZд29DK=iXnDtX&#W4T7z O#>5 y X/Uׅ?Š^gb$>Vzq @JZ$I;G$˰h(ל{xS~lfD{_Ч^rS;쪳DZD.b&ϫxUY߰#u;cMWT u5yE;hs~QG#T} ~dCKwnQDѨ 8/c2+#"EAEźGV0J.=8.e3L I=*g kƮi_ko'(*̦QZTs-D0 ;lE?*ƎZZy::q6iU[g[_mÏW=,4u@TaSge<¢rSg/8ZG-,LVMuXnB>a{Zam툏hwx$82_[nxqh@c!@ke dgZDv`K=M-@]۷ =n.!8 Lo?v 0&p@P.Yow-Mu>RP 0:%OeǙOb;;X1&/l_s&`L Qd 9el*(i02&@ @eEgZbL %^Stm&*`׊)}K<_V/]S;f~Nw84Xp)e &@$x'Jl 0`1wR|ش ',w qJ.S/xxEYL[(ũ笘`%#>%8E!{y$8Յ>3ҕe/q:+&@4x'zl 0؝_(Z&7-wx :&zީ}W&@ Sktl 0.ƉiE>ltg·`L X%Mv 0j Kw-?[x}Cg%ħ*L DMD`L&?H>*Q<{OZr|-݅/ΰo%*g2`Lf*@0zj*fL TM[& 8 \\^L 0OuqF`Lg?<i1fL 0&Xۤ3&?<DyoJ{糭M ʙ8O߼tr5 q/Bk7n!Ů) Oy< nR$ZL}Sh ;Ѫ9҆`WkN]S3j)xS5TAQD-Z.>w Թl"&*F% Esp"> G01"1"uOVOX&Q~79 jؔWlU„[D&ѡdx pן{8,βz@/+׋}u2OJG>LNh~EbBi.Ck#v5"JйХ[(oR"DH4-+vd}?!bтqNԃdv>a`+麁)1orKEXxݶzbQ jSBWR*,rB\rz?q#џ(ovRּEDqyfB#|D|RN>Ipwc[e%1)B.: E#>[4ie?k6n[wŔYx\"I8Ckѳk{ZBGO*KAg@>v%XS_G.[=]IJPfJ&Lfg# ~bk'ES_5x j΂yh~"*OZG*P^yr3o%ʙo$Us~XPxMCb=&D& Ѕ,%s|Jb;'"0G nrJX,YV|`H-M>a \#x%]f_nra,LY ~}xE~ӿ>}J0 JY]63;9ɝ|uzzf'tUsoPW=uΩ* [v^,AQVj{iUN5v>P+rw&V7L4#cd%O.uQ!`j B񁐂961xzZJ,Ӓzu4m\~#T2'+?D{npo͔Hs˽ua"9fEα"b;/jNPNRis:*s*-8~;&$B3FV>~lx\mѨ@Ȩ @ j i/1=KE,Մ^VtI+8b}f)_!5D wy |wCu d= O hdRE[6mNW*Jֻ'ԂW!`.ޜ`Ό.睭XxZi2Ս fNa _Я|Qyf' 2>*~}xٕԬUЪs#̞fR*\NQI;J$_!`Zxغ'JŁ~(}9 eBL7zة~V2+Soj~B)czbTV]P},[UX ɴ-YhJd[Á@(>詆k09!1k1b$^˗d;Ljr&bIp*G"A ?>,'9Tlt~Y"?oIu!\)l2<%yX&vbY|I6*8(4R|+y2RBcŠD X %MEw#dO HP>êaX|:^E=p3#4̰66t}Ի'CPV|{LX ѣ3[*i7mnr Dy |Ftij+EO|طq![ON6\]6}.D>ʤhU@+>S;TKRVOa 4{_7,Ӣ7SVfeaB9SW!@yۼGvtی*>\B__;ѹzRKp'd\Ss}|Nm̗3#S('RM[XFv[^a S0uú6 \#wt"'AQJ'tͣiōhہdcI#اC OsQ4]Y qlӭݼ[QZ/DM!: S}zN!`vr{ ,%.z1@->c0J$$%2 On I eJg,vxtiesfS#w|IWDC~h1pH7Ot:cY=#H8P|2SȜ'*Rs;KwvӸb^مM PF&B&XKGuWk8Oe~?25{*qQ'z_ޖ;ikZK/N."|;"6ʖӗ2_HyT҉Ml<:G-lN՗[!0rdFYyӦbv&f" }Չe~GgX@`0ǻ;Fg(Sjܦ˜L{iڂ6:ydm'~%bػga7B{z 7M˓Y\||.*qa3[D<ұ6:-u/B`Ȝ$F"G:1 W"h'f!HU=[JUe*#=ӆM6??1o3o!U;I>sOZzF:|~OhSԒ16{/zH8Fh&x*Ypst:yn*.0w14mmO߭QViRkE3Lǻg8͟. e#؅[D`W:aB]:y:M<_UY"IPsa+og봿="{%p3H/4!\U'Meա/5=)̆R|E h/JG Cw)V_zz"uKMwVoﱎ|RTYd[b+\#eȱU͸}F21ߴ͙wnhb[QQkhrW?#@dyZLKz\Pbp-J،M)>CaUD?ax$Bx!Үֈ6OiݶCVt1)"pC{9dr#&ϝlg,zH(&*!+x!^C7iAH(?^9B\QBW=AY@kF\aa:@M&"baZu LyÈIŦc~oyG ۿ0B`쑣!' 7Ig{=@ހNy!䴕Ҏ(3 >#{;dp</ ci(L^Ku_1%Xse+5n1Y#vn.S'Gs7of4R Jb.!lW3}^FOn˖XOY"W֝uϱOrU_<:Pd$2gr{[¢uuN! 2qzQ!&*=8|'ZtXkLp/l'Xr~򦏮dy2c'H=k;R+٦{y^K6mc᏶ܱ$6\­ >p^b%>0@X!0L(OW:Tz{rTR$Yy .Px" <*-DsǏVX6>el% ? my6.׏eP@g+v5Ne;ϭ[{֝9?ia3^!00 LKƯHAf썉ٓiD/4~\Hz"V2??n)(> (8IZYCƔ9Lk,zWd0N v%'[XHYdf+W0MfO]u}'(2-=szNN} l]b|J'. |= =oR {=5Uq`&w+BGRk1E1 "T[az~BR)v?pSAOTLUDd)zW9YF~,\IefqեbRFH٩\S(,^ɓ!J_|b3GX11鬣Hd̻6޲z"MݒR<fR\͚DcGJLufVz .W5'D@6@!$yГ+Mc-%g o=YXd1I&ZS*VƖ2suCF竹F$3|'8‡崳R xINPyNm&%=ͮi]Z*ҟg`>9ZcEga[tfۿiQkl:Dծ(-تR*+E:KK돹x-2FPL_BTd(ҺyO,֟yâ2r6Wx0I"JMEQ*$A hY#ح G~Y{9Qd]Hr2xrEOjK4a/B9͸x"Z>(D% Mo[Ldk%2e/{xX+w{2ŷd>*zjhtUµ.^fR]QVJP|x+5>psAb;ڧ'WSwQ n[tY$MD7悟MM5\a"`6mmV m8@@!Q\Wˆl=͞J#+:tƗXhtyQ7`]&Az7"H2S/_MUFxrˣxM'A_xʰnkuAA(f"'F|AphiZCwӘnEtGJZGl}̡ %PJeXtSIi1+=la7=ܠ f{˔DLwong%`Dy"zK WqNS G8G/.%4M^+ @ pqE5X+;;h{!٪-,=&p ؠBT[ۻP Pg@|5Vҙ5~=S"ķq)NVν^Mag7Ovho[w0H%p8Q!:cFu*jbUXb-+%7+SpiV˝)ݞmFl1)>n*bA%҃L4 T(O)D}\`{lӊh$L#o/z+EluLOQ`/yYz8"d* 9D)bXwBBG/z +"PF`@20{.&PƔ! r[X+H06&`ۅlrK|'c<0㫱WE K;<:SRauAm ~3[ż?PlpX`= (XuEfYx`3A5k2 ^'ec- N̊#oC9CPtu nԁd`^HI'onᝇ_1]/G~1wsB4ٿҪeӋ[xXRDAd(=|ͳلV[[|Q^`f2. 6v#43emRӈ?0zi# m߶-<-b:s?qBq+|k|~G?(cx[/aJ)[P #Aᅕly1Fp+<ӹP(@RТ+7Z!Nk{ (@=֞5k{o{AD&@6S(HHT 0لPx"Jz`Zc5Xu@qV [}H@yHXtX@bJxX%?)^><|RܳIiyx˜ݬ}hw>ZߥyU+bAJÎ;G9ړ|ooW|K,8HVP8 a=96CLY} 0z \]` ttS0!/ Ss~Vt_eeP⫑)  F @IDAT~SuE%m)J  PBp asQñ> Gtq;~{K*GxpwKٍ6 -e j{')L#O`a-=mh%;K0T*l̤&;G# ?~c˻k->y W7-i& ) ˋdϱ=|#|;1$|7n(? eI7Mz;0ē#&UX|P?nW-{S(3o+ \lkX& =ٹAi*rv^*Zx'n Ag{/V,.9SKa!)!D "8SJA%tO?.#Ɛ[M(;2!MA@ټK۱o)16{/p.gx1M B9J\P>waʼn)/Iklwˆmq' ݄fA,+K7^ٺկOZߞ}w}+^DNf-_}T*L̦@SGFC^}-Ps~Y2ci)YJX+HRI{ ḿaK(@[&^,=;|G V8tGakplpʀ eZѹu+3SK1pckx ߜ2z۩i.;OW;WHa%[Ove=05&'d:H$ܱqk++>_Nm/vހ'pb4< QC}'s8[}uw*yg){6r3{|_Nʓ--|tu]l{{F,fT|k=tt5 =~fKfN1kS\pl6.Q~8H<J :;pjZ~77:Pr@PA`n.Z(n..H$j)~)[lRn'ߞdm 3:/ 8A Q3*>9.Wݵxk{EP\>ç6$`MFG`S.H$j8'ůfY9@̼a=u&_0 RƇ=7o.ޠ A))JW :p,$yS6tMgϑfUw#R `fG6B|‡JV0% U|YH)ƒq(?Pe\|O\x)*W)dkyF-> a$ƥLӵWK [I]̬b4D4){+=2 O>+=\<܀QFPvdžklǒ@!C@>ZꌦY?WzGyhG/z* eW)0#G {X.Q-(o+ M=*qQg $)7{yM`μ\Bwf߳[M!UAN@ IlhHR1~KX$q>q$vLݙ<9inسrN ϫ/ÂWŪD@ 5'?Z}ϫ TP<*r9@!y4DWëCAE 2&&m B@!/nڢ鿉*}{/6͈VyR|:Ua B J³pyEB{RG=c~c:R* 0#im|-]{ϏVyR|*UA B ܱ-\99baeԞPbB@!Pt#MwPO֝\!P(߰ӂW3ݠFR|zB@!Pko4_SKXsn) B`ZqX=]sצIJ0#aXeP(/`]%/ni@@)> B@!0>y9Jk)4EB@!Pd>Xo vL#L#S( Bѫ,#5Xs^) B VCWjaj{ ;POU"X!P(@潥 t^iP J *B@!cxR|EeXs V})j B`T];V9R|r B@!0R]2m~wI3瀞=RR|RN=P(ia%YdM(#u RZn=/4݅q|)Hyj$V4CɓĢiˣ~P|~>IEG|njVy&Eag%&xȊIQ;{B ˢݛ\#x*qԝyQdF#|LQm/vgcX1_[rI>#WlvעQQ%WԣsChkW-7~燇>xOxY#ASS頧;)`6Rdm ͺndC6e\@X>߽AFΝ:N?eTZ0}q9y\?CM({^wv޸}g1}_ߍBIh BmUDOx©_v~3RDO%W#7] o~kD1􏝿fLy`取͟M۶0U5ǵ~?` V5@B`0)tO߼?DLKA"kٌa-S]3fv [Xk)?!++=Bd.'n&ś7w!+TR$@L۝;RJ}|`Q)0c߼b?g|W+qN yKoW~,T<_J%eo"~T~D^ҵԺ]&H1cO\ߏ]_wəlHUN5clę|77Z$3gOeGK# eJYE} QPt2x]8UU~s*>c#B )E%_(Oܛswq̦G?SyKS<5l2.T Sx>vdJ!ɓ*W=Xؑ)OIy18I.pƍ?) A)qB@ʔp$ObZ=ibԁ0zdLŒ̘KB%nnw8I[|VO:H )S,VDHZ| ApGXW{SR `6F=6^k /Ca6F:ALz*/+Ҭv[ mPFc(Jm|׸P9d-{/ WPM;k>~d /33*$y VtDc K$bSRHIWp׵2S!wnv?>*ݖqZ[;,v #(/Ч.9*ϦųNNdEs`B''qN`hPNI0ّds+iTe适6w.>$7m]e /M1JR=B jlL&ߵx$[I{NA;e.=13I |RNW6tdvWm?=F8M]Ig!:DlENVnNW_p-Wp>/Dm>5V;e(P{q5u/E#8EXy~5߲l6+BPk?عzy4NR1lehceOș}JlNg KfO޷ܶVo /])ݥx:c:㗍'#@C!6qw a=iL@<ߖ|kg:Jx?NZ4k}3iʸjAKei]Bpzwށ\V7.)M8q6/9.9{5qhe!B4Pg=ɲ{9|Nt_}.o^^E߬hgqtb6=;Vut*=xps;˝V*~f4+.!ڼ ٷhɜIêy` ;0fr'_5ܩYy}ӧS>sYBKM| |/X!ȓ^UQ>b'|o4:&frg\v"3e,Me J9"RZLjf[]UBjh]Ŏ(B1 QHe?=;?Ո3{c6s-,?wYVjZ[vSf ]>q|vE.)=4>(,Cھx+} {X&yʁɶI䧒@@ʔ#ga?ruQ8t kD[~ ʩO..qϿ'8=h4*.aw2Z,Փ"ֽilp{%X~4m({٩̸Sz2H&v"fiA ٓLjQb}u=S PJ}ۮ³ǎ&љ9}鉵Ul]/~ϙ:NX?pKD(6\4l>'?#`&gz=N[M̥|*.=:Fߪ?~]f Q;Xۦ jz,[rO@y?sGt|0l:r /Y>bN];vTpa-=QĽvy7*yT[;c;Kg Bײk +6)R~(6gi!V1>r O4g-0#=q"БSmU!8y leew؃6?ֱq an gEam9Rĵ z+"0狞,DYy]\Gݸ8p^nP`I3&q=oW̝D/OAKٲk>@GoӾ At@]Y6fO6׸nq58@]tc3'IRiQnj1`IjyQyB״ryFL"F߯o??Ob:{%o+]r%$1rre4gW&DU/?>*oϋ q̉b$ϼI?%9/|\UQy$9sWCC#pE8`cUp%!=sQAG^a{qCGoUpi-D,Pp~E|&  L?sYK߻(6=&1yg5@3vΟwV\?df>@w>5&)o?9w^{a%(&*IDN[q#d=TRG >Ff-rÍ#ӆM6?'|%׵r$O6F&;< mj^8@>W{C[k.:MFO_diȔƝuُ?D*y @ٱChR8lKk5#0ۨ._!P(D#Y),> cQ) \bO"J1wu HT6єB(*c #cNV@*'*I(0y1bErTpsNT"R!?`:{QI!`x0YTt@NN( a@xb, E'>]|I1S }"QI!`&Xa+1]Ӣ1동hTF@Y|z~)Ì@{Jg+/+r5ĂHn]̲,~Ic*+̃tu'͢+GaR|L\94@>"0gXKE`Mu[˹v..+q>ԱBlB1'*l=JuJ!>j{{n!ڰeoa:׏Q+ej9^C@Y|t"E*DLB`WSA|Bj[(nZ."_veg#E*k@h^¢,>iG{$KީޤP(n(fFn6(XUJҦ+YG EY v: tn֣tS?w @#We,.pTs7cq$ݥs2'*IP(fE@-(ǬGR|P B@!HFug)'F^CǨa:E<CkMgpةCUeE4ox8*l9C԰5ul/{H%P<2tY}Pɓ-Җϊ˫sjɊWB /,xP(L/Fjd5ɳ Q)@[FZ|u=yйK_ૡX^\IWa,q~D9S0m0.^"?HERU;b8\IJJMev=4Zğ#X͍@~q!Ѩٷ;hÓIT>,5s@z.@$<ȴW_+Zؕ~df)nͫoRWh7B lVzÀpdYL"|80'*-<,E-Ԯo{y2rTZwif ,>(khI’:\ %OQ/cVz'`)S}=zHFڠp ؊+i4Oۿ`h$r[xi) gP~?@ؚ[ۻ؍֘"5o筃-`'"Zlr?X=V[Xִp-;\lŠv[Ҫ"O3]ffsIm$tXr FAJOgya iނ, lh*>~4B`Ϭ䏺 Oɏ#%^y9fGl/LbcRUzDXɩ®2Gh+;=:s^]n,f++LJ `>X:%Oү|Ɂ<Ň%*OTll&-05|~Zq h@?9YH@h{m6A/*W2SFy# }էyO'[h&Y"ܞ[(9k$(rDFͷB'+h΃,zF%Cw%OA-յ)'G:w֞ XLWM4L9hd>P/:>SKSse %{p+"a4t Lz#c{HRNjWHE2k'; [fDy&1ӸyA 2c|հs SR 1/.vgctVLޛ"cжXzGTd tܨRͮ,>9RDX̰lEiD#%sySu%@7z*N% SGȼGՊ<%XCSh<⑤:x(yx7`eȷ3XV|Л‑7@@5XAm< tdz/Z( keMrL>rP+k9TYxDi#<=ww%OCpuz5S/i+>,'xd@az^, GԠRy>P/S/I2"D~݆mxym)zayH);|\Lq;Y;%OƷP\R3dYytvyG oniF0rIqe eQi\dx e00i#"<;$~QnY1R'z]ɓD{WUW\V|P1a!`٬6[&>L,Lx*S@>fvwZ3I5@◸<[sf W{ibu1[|.lLDJ3|c޽ -hnhn/˜O˟n0JQ{8Ш*;}2eQںO97mib5ڹOO=JaaW"3i88oHZ:uG{=%f6LD {-~8KOޙ]pN9}FQ^z;B=7׎#!/o(-aUDz{m]rAE?&w*IH` e+=OM7M4[<%K I\|9C[?G+WwXAF_ _0׮J\{{mѾ~q:`6h#\aXI%S':i47FUBۿJ0!$t 0FԥS~I(O|~vceǕYā yml~?sY1gc-P{GX`\[%6yÞ:EoI/Nx3A-Cp$l6!8*̙[.}~ȗ HtU:A]΋p&,5Z; ǹgzCch_%6^jX}8[0]j t{GU: Ɓ(+=Gcm+.6:Hg97:x3/|esg*؋AJhzt48j)8rN{C;i{cUV;Q7:~v5L֭ܫrm\teTe=/>j2 SP_fŇ 'l(ҮQ {w@O'+zϩtեUppHt(ini{ǎGX!3w0H5ռ+j&Xip&@=dH~Ȁq$>S][f}n.)0@*宭_dDɢnymIQɖt :/xz=\ wyk:ctXP@ lj51/ħDx*~y*OiֹJVY!/6;]r(jjhct̻t-Ċ&%9_ Fg_6:nv A);HM!ᚪ;g אni%v>S9vL7f~1/|D\|FGG ,obg[*h{v/_ް.7nt^X`B s&ߗ;if',eX>9ɕ+k`VA}u54{? i;󿇅ZTɤ;cbЏTyrڰiӛüpR|.(ȓm_sOZzUij+gعe'⩁rOʫyipIӖtԻG$+[N1}wqVZ#>:$vgXбm4A8 PtvY1+q.> ;u>t6B:WD7ܵ5KRʟd66\ $GP8:`c7M,wdÆW`k]AxUYc({H%$NҢXX[c-Xwɷg!OK{4n>XY%mj?9N}e=jw}r̺2H/-[lo5£7ɄMJSMi0 n2\]݊[{c ]R'4 gרHZ|1K+*8B FH"=| s}vvG Hx:&!I'門\yx"K&|Bه#aH湅,9d&Z#ձBvD'S}rv1(nrÒR|/J=}21!Ra#fĤu~$RON'er^ڽP+[y&F/2^9m<)3Rz'4L7[Ҥn +'G5DOMn.6kbeؠ-4qХ;WJ M!/'&Bp qO2H 6&WSFafr(&IlfVi($J*'N7>C9OR<zsUƤC͙$mc+y7sR[T:T[)F:3+dy V$'UʊC=+R9b劫\مIcKxb fԉX|d]*yn]Q\-jVHyĄw0{tEvfM2OAPʏiKW *aR); h?No^-]X1&4M @)Ha&O'uSey1D]Z1I#d_9%OG(Ҩ'PrDɓXeӇ[;T 0[͍F0d U|Pж ^]-ϭ4jM nbЍm0lBA|5obi U*OArR'b:yg<׈X8RI̮L$7(eQaVv(~tk\GK+e2$3'䉵:i԰%H>" <4|lC=3S/_MSIںh'F@_&xP|x `f1{!O 棲@:xk 4cbgk!VTʣZ$VlM,68O\La3PXY1=eݖt.\AYI}=WIլ :iA)=R>9|6*D12% %^-/~ϻaz(Aa_m~vK^ a&Gh(r H^+|R 4>|tcc5,Y oˍ{xB# mm҃@M1{>Gʖ)Ղ5B1z=22SCF8zV|!{^T La u O .Np4( X8xQCixg4XK?tnNV!9+U#zDDْ%2G[ƃ^i兂6@fJmFI{ȓ%tt=DorrM5g>>M?ȔP0wl%iLI+-慼,,cͤİ;|J>&CPY\/ iM2{v?uHY#9\{<dTbA 3O)B-J{@IDAT-4-=p#t^НLt r#{'g@|5o(kxWֳ"^~=S('(8rh{oW𩿻3،< i9J ·Reujl1E {'%Jev'!R[t`Epۍͣ p ww*J(sE[ѧZizc@֝~e)A׼Cd{MZt=ssqSnͨyhiA ?>z{!3p6q᧣( x Ss7M Db1ybP;4ڥr'ZfR6tA&X?Bޭu|=w{>(Oj UN;pq ri~B) 3~c$S(^g+*;,–پ6hsSX^49yqEۄ˫Dplo2r$J% Tﲼf'('>RM5[M¬>^Mӧ$n2eߎp&y/OX)>6U)>4=z $Ы<0E4d'5 MF{#H Zc D,޴^CQ!TR=UJOyY8FfG=o\ O n$pWަȄ.t@U!> Ń-(EAÒ{1Q|xP| [_>pV#~@%HEBb n-~?\Y8qq fdJlO*=RMx8:JW_^I˖&^íu/oZp|Oe(RiPtE#`&'Dxx>?۴ZjIݖ+cM% %%"1=Gtu= j~{Kt'K~ʬcEMiS]+cH\Y,- *>(SbC1*ʰؑaNvu9tqXpz|xny>LJIi,?ȢAHF }bx{0^}_Q$(X''PNvTW{"kw:Kϸ'#y,1'Yܴqszl.Ϙ{xNx|pza%PoELOxD)W޺@{8'ܽ9=< 3!u΍ks8$>kٙ HT0.a:G |)#'U>#}7] #TZPuOtף5 hs 𛐗ظwXࡼ4m>p=2(JZs,%&DL@D\E$eD= ^4bՖuFKI>4j: 4 D#9oFy #A`bY$%w˪]ipw`hK+՟mc^k_R 1,@ whu3Q|k&>lekfCTVkiN-T̃$1#Ps#;G<=\je%1ΉbL1.Osr?6jTd9Tlt{g1^#i!yS^il%Ϟ +]b}8=ecWwRWgrRr7#w/Z폘蒸'=Ρn:]YJ:fkvPJMߟ: 2'1~nTn%oz #7PZVjsj`x)FZ fmoزj;{6oثO |3wX(W`/PH -O9oSHOX'LZfUױ'b\YƸ@/'>ᩩ_jy@>ʅ ]Eɶ~{93X8-fGVf.3 /t=IV*m= טm--M ;8z|2Řp \y!sg._#=]{|FSHoљt:u MY_KA ]B@>@S- tsaHti墷g&^4lȨ1ŖlŒa4EZy>*ݷ!uفbx<+b^ʔDB= uJsC}%+noZR2{u?KN&ɤ_Kmu%Fr#H^tR !@ C4`8#$FQ=;vA܇MLJ C vqG7:PJ]81${ ReJ" Oy뻿|8%%w|wZnZ䞸'ʹb6i`pqx'1xw oT^d+dhA= !xF)HW p&*2o^ ꓺowϔd1pcf~z O?=&mśp,O4>c=s=" x6H" a"O G/U+{@@Y?ꓺ-U:"=ʺ?r\Д7L0<]lX)!lATDTBmG sxz* 'O00*+`c{lȞELP Rѭ>䇏/Eyl]GqOOA[~ܥO l#==rϗ▤^nFn #iD.S!T%.^G K/uL+xbտ XH K: lwϱdH9dDeIy\RV`!I"w$vL3[c:Prm[-馫~߳c7a{mE7EA"0:W*S"K8^xnz|=< a-Vx~Ţ2OV]7XQ! cbH$%B@!PD?;_{ a$g\eʈ$!KGtt:GXeϻT( >C^No`G'FO?5Ϙ3_8̆cx0=PO $ŢB@!^%oqyB u$bhT)`G'dR)ç߀_!P/>;e^K$o@O]ͥS\QeO>"S( hߑNEvܶfxWN5"xsugC>x+'ިG!PDv97VԜGaoL容q[ALPOȠT ) x1~:Ei=E,*VB2|BJF!P("p%+}.WKh`~٘ NJe{UR) A7Xu+Se>я&[aq|4`<('ߊI!PեgHgDwRDrb 7{|cOUIP(x^Gw0c60Ϭ u5ڑ_ywgE<[3%G aDPODV) Fg/HȅCo<Mcn{&,"2|" N!P$:<AG~MyDU%2|* @"i&?qOwX<K$S6y&eH0x 8~J4B@!:v*iU}$sxnŇ"ʫoPOrU( ɡHowpvz?eD{P\( D~z'p'RtЗd:X^>| B ~h}>,75ye#U'n w*@@> B a:W]^w?y86 uMy2|+  @#s(W%:рg1e<ߓO~H=n[8ߑlH%L{g7qZ5uM=_|?yj=ђs=IX)c=\hxT)S=.E t-:hH?5$搽idZhZQtB'> LO? &Y *>G f ye 1~8M9@lv>R1c---G?~̓7eu~9 %=ctMsrWu3F꬏x{"/e}k! D"{fӃ3z'Σ|o?OC(]"ъ}LxIy}TYĽCw\xpb'W  ¼Sy7s0c ^F`N: br pUǜ}MTv-h>Yui kGM ƥzoV bCDg?yQ ceq@h^%':WKT]f~S>,Kce'C韟EYY0bhPiD=!f lS!FX% hgH{ΧAʜ|Pn HNN*&VY#/No=l߱bxyמ@Yz ']9A4b~P! U E1][v8aكL/Ϳɝ\l;E&~yH4]Qo>2"7MG'P1==^sAA9JMOh DuCf 0PuPSLJ/Ց-TUKo)3 OC|-t'1} QCUWL`_}3GAޞU2T=FI;uFjAI:?h.~o{!^D;U^fɜt &'yz2R!rӯ*=Dhi=ɐ]թX+_q)J?"Id6ަ$-+s>3ą}DŌjx)f}+Q4܋.i@V dN1bQ^qґ4m6}Th7_>![")z?xh-n^z" 3Xv'˘.9[ɐ$W2>FtNg!gι ޤ[q[`Ncj!z#|t]"=>K+ ~zҥ[SXc~=m,ۃ'Qsw%iZ` &!'N!񁲊yQ:d5CH)7hjZ=ˮ o>:I^+ z](ڑsNAcQ'%= }$x,M CGC - =T@w_w ɠkvҪm׏މb/Rz)~g ^DStҵ;E:\&zGwӈ;nb;M7S'VDƋRzx&9<-!^^|S p GeShcvCdW~ 3iz2aIa+GFtieӭWNiC,O|mӍ{7IϾO+)+=.3Ð]@~&\隷z!x߬68v%(Ȣh=q HӜFw5;fw|e{i❥ZK39s>yLh βqϺď7;FP +(AEUiTLo}Տ?r=ĹL:*+M%'i+|JLKçʻ(Ź4&p*_Czx6vkHU`֭(/+]?LcugLVq߄A f+9 c"@W ?%9Cߺe:s}*ˢOv2|vɣ >^,>ȧ+ЍA z6:7w7YN3R1\ynD y2CI{2r6z(SK$q/lb`X VlǞtŌqefѷnҺXv-$ Yj;sKW{S) T?OFӒ5?M[B0[(?H9nt|>:tVo+I'Mf.>Y|0v}UynuN q+WΟ)Z MP)?3(ekk Rgvѓ]NJ5)`~]pUx@F!ox|mCKH}Tz<}~0|YfR:ynl é_N:{oe"S}lp9DwhM亷qw)}n; o1:PeM;" Slp=y$bn|TG>@ sJ۹;mܘ3r OEBx?y؞XA{a(otÆfSZq04uާ`vp0B>=u\ois=]5s0jfhtM9f좮#>)@ E'm}!Nj FDi;=,-\YxmtN|k/9~Vxa}-< @|(IM-m¸E3 K/qf5i$;1i֌wimAgC|\3DwO`e 6|p̴Bsœ]4h.LV@U)Ap}VvwxsLCaxO@k+xz@iE+{e4<'[#0|޵@tuW>ovQ(xo` Ms E袓A/X~e^ۅ}uWlȞƮ#01?NoGErvLt /"%{uvp|O>'g۟Ϻ+Fxqm&i2EUu9Ex# }ik yMGw/1&;+)_r'g(DޒXqË@:x9 вe#o<'jg}!w9elWcյF XܬT1 W8Z*XKꋝq?-]O9ժcv9Fΰ-^p oDtkoF7WO-qh^>m|OКE?;Z~ZB7/#vARwI:v0}"B#r$ D2+w#1+!GN`͇ctvqw^چGﭾiE\10<.GFWޏ^4ƞݛr{x+CʡàpM7=!y(2ytQfT '-];{ )xsDlN(;r-nWRM8y&6GbE­Y-:R@[}ٟlK /_9.ޠ9^ ‚d7B>xFaW5 uM~pk_c|α=q``RORPat%c9n~CKRR04w1Wԭ1)'>;3\ZĐ\ݧ7SNn.\C`2}w!et2dE7Ec}ʕ#y:n9b䆤*l,Q&K]"dŨ9osi#ب녧膹qۀ>Ca`D/g}&d؟86f ]{p\5FcYuZ ?_ &~Ǎ2{ "9m@; f-FOx u׺M9C)0$!6ߢ". <7 ?[8}ekݯaV8 W]bYVx#OnQőS6 J ρcH==<Ɠeݓ ӝhXԩt923Hov]Nk{7):1˻xEL~*y3wO:W4h~4y<>];[;L+2ҿn+ >Srt+=s?2]B% ѾcGZ/UR_=ھx_>(Qo#|}>_#ۖdx+9Fȼ^! X^Hs 50 kQ%< *&&쮥<(11t,¤kϪkӈc;Ms_3tY"%EOwݒKƧ(ΧG<AjK+J ksZyy=+y^ꡠX-J,RA 30v^3bk33+R(‹TwEf΍503V4$\?ݱ=tb$S>s:qN4-o5误VS5YA4t2z$e>6W2.N}|N"<>B 5ҋ;L%x^ 30;Ǣ#xw<'=f_zS+[)ђTP,.&[yսŽ< 4T:׷Š4g,C8¾4#qIδґmt/q0<;2,>=-і3ΠfBG[jk8&Ӽ\MkRh`{ݘa|R eN€̠fc=TZ:BF`LǦQɔʑgt9=.e;N2>nQ(@)CWC?_i$Ite4eB*Eзy].,SVdѹ}g9Pc eR* 0 `;Ok賵u MNV6c C%ɬқ!O)caAl7:M sӊ@+o, -D-S&Be; <òbuWfY4WkzgPsnPÁ|2 Y1B:Z='V^8jy+wEn;RogN2Rfj dҸh/> Ȣ=MUmod6]FP0y,0qrK }̲7b =/zfIv{M(;MbTȟLWJ})̪ #eʛ>Y0//S:1`τ=D(zUCO)tU-Y^M+չ5l7;_05`V#V6RZdȟDn 'Nѯ_Oir*/>HHG<Ί/ػFRes!շ9:Z'kxhAbex"A`oiF4 J#Kr5]sVs ϶ҝ fF\P˥oz)O}m:t93C^YC5˝iקkzOuM ho|-/y_OSQ%ﭺotSn{9 Wm?Dw-/1O(GJ0q[[}>ް2tiR3A5ȴKlChOMEt/Sqbef$+G0Y/hN bFU,6L_]a'ED~O~y$jrw'HoO%Sgn/)Y Hϛ]_C='=&|5=5ц}dHCiS֘'Uy@eZ/==6FT1@>[9nwMhqMGFn9E`h5v4ƭg(U3VN#1Xy-|;$y4DZrg~0U5 ,.ONF5Q˔SlZ̽;(%0tf>>X[O~R i>SiHBv]h ֶ˨iWA?;Jv:Ilv=3Rӷ Kt-]KφȤQc0Vy=Ov'/;OeG0DžjbwU>6H2Uc>}0R$ӈŵt+G5!Oo.!=R\Ȟ7r '!5|D%;ں;E<=B%]_ABM-_*BZ--pfѽOO41p|@8&YㅗD-S5{HgOhy]no,F ==N}2||0(GOCMtuOoخI(# d1r 1=-G7[iyitoud^BMmc/`TˁY)}ֽ՝,dulZT@))V43O{ZGQSk U7d]Al\=7V!ed&cs+L-  QU";CHୁ=RǙ6;X׳7%pkRCC%F e p(TeJʍz)I3fUJ[ VF# b JxMU|[{>W'VYQm.b@h /QCu) 匢q~AYESNxo  _x g7x%cC 1lŁyz^=.9~xdstIOӊ_/??*}S*Sx~*%uB 4ϞDCq92Ҳ i{ YTv<wLIah%1xƍ*Jt'?V)}knY sp Gs!LMMN]4r]J{WX3n()U0=1?V:S4dMtw:Ġ9#3h` ,(hcg] Ϣ.Ogxh0sh8r %cC&{G Teɣ40z`Q~ nu;$Ӷē_K*SRn>P~1 }M~H o FB~VG/qՅO+dyK6@IDATOHxu%YrY $Mi[Ηi}#PL &=y`*֟#e<aXmwp7C&mqbq{z0o+R`ч<355Ĺ&B;doP [==, ;ZޞS-OEg+ki5ceQ3ЕDx& LiV#C#;Lt]=ĭ]Vp4!hΠAs ]mMt|ߨ|'ɜ֏먵8 u/ܙk=OJV&qѨ<7:aS8`Lq 2Et]>2/RG  A&je/Njp+K`d0PzXU f:0k@q=ugA0nPfvJfiw 7n PUmS9 `T2LM1aȱ 2iʘ!сn+*y*`(e*$}6#D?Psх@GE ^uSF7nj^^~q=3"5V%sא<@`3/$O0/^ 薿c_Æi|P8^3h1:p,wws=ZEGϸ\ )Iǃ/`t%Qޯu[P@>H=+ͼM*;jk]]K7Ϣ&=B|\CO9WPxZ]57sMtl+-J7i4wV/H>LGXud6)7Hy~i@V`79YO]M67Гul[i[#Y,z>s?-@&j9LL,;YG;Ywv" Mvz@ =>+v_ˋ:N<Wo왃[;*tG4^^Nv:r&1Zqo5AbГr}t 5':S\ ~9xdɦ4kH>2ڵW`7/J1FC.o<&UiYy^zTY j?lcVt>L_-^{Nt6@z"z@wݚKO~3<Au<+bt! hiY7;0@`x-9eO~FB  U B`2mE&}մm;^x>OVRC\\E9l $HO7 Ompb_9wK)g#k=<i:*o!9щ2||/ӻgձdwI&r>B@"Ա/Y7x^@ɲ6#'Zyoqc,<_GS&XxD8ԯ#| 㮫YpL:I4==J幍6 ;QoY)Gq%ݷXtm4<|Rᓞ7yXyo$UT^0b ͝2Z闓N fMr|=|>ZK!E4&=_CќFiOiX8Xy =BZ]X[w|/y s2e=/=&&,Lo<aKߞL+ѡpKI &H HLm )/EqnZ^TƞĆ,׶ F?SaWSZq+& @Fq~\0! y:UKtGhϨ dHAd^:{e=4jpX 2!{ a8cm%EjljkRYEkKM6PO;؈C%oy#2ai#Թe_%#tr;N}ŀ<>yp m{# M.OĭԓlLT+{^מ[PKOx̜RV;[+gX w %/wPO3-g(9}@őA9KC\z}&r6߃!3R-t\39'dh "/^rBQУ O+k~>uˆnjZ #3=`t24l->!ؗEyLNJĚ7y:q~ow6T=S챴7yYDDk'x(9:+pNrjaKæ~|pWHތn.x$%Rk9yԤڤ)f=a냏Kĵ4cy&B oqxz0ZZZ7x$s:|Z~_V@e#h%d7AM}[O6zcfgQ 7##.U~C XrI5q Oe=и(%1g̜PiU8Mlx#̂ɄXRk8y-dƚx,Nu\C 9]qJt}o:1ÐclC'1( ՒcnoYwUwtO/<4$'x}ҝ,2֘Rv%xfosRWb B q،lFn$n?Gx,(=اi$Rސytp0?RzMT6̺kddxX'+Li7NI<'uL_8čPkTB^z{O=U1R!'&M~!ej/M>2)V HsCHOy|@4*/)$'`{7u P^2TBQ@>CvȆ 3Ogfcl6Sv6:mO[[nOx/y>'3EWRPOy !Oɟ~7nS<=]pc1PxA6a隅O%,57'SioxXlM2/1Yh#[>8hPE>/rJ0?8lmdYpyLo1?s@<=_Z\w;o~E&:`LySTRj$C Gt) t2)1?A<=_OkIwwi M {1hY&c *55Zvʷ5y(*yT`,<ګxtᙘ `:Fo!ijJ ҘG ~rAw,X:Vmrqe(R-IdXzDT0Oc  RFG{kkl( `Z:Lu'wg}Bp:' - r؇s,?G9Byjnj0LRNdEXZz>7 }RO#K/uغ':ONlߞǑR>lȏ20#3&PDmF4@V)ޥ'mT1\R#NXQN}dOixR{A菇']<}4z۾ckf'vi.()=Qs}%\NFQ2+4,<]Y L `0|`7 0oRVYJ0$)G\~#Hw],TVF;d0lSO#'eGK3_^' &UV-}oIUnw4 -^@tRؐ!#?J]aFwQ~00q_otpˮ[| ?8LFQoFOVf81Q#z{$bkj1 #OOˑnٲyĘ[l1zM+ϼZ"#:LL*wW)>q5[|a|K|-1u_4>q/=pNTc`H8fv5;R ZMAwFz5CfwzCz28$Ex#"A#@qNop<= .ٓpbG[Z;Yz?Ol1[.Bn`:LS_h_3'-f~PAh2|d+ɺf{ ow-\)n'{ c;ӴӂWfqs}w<8rz{`,+%"6tq: 5 +mnZ''"I%\o"u)K2#F.`L2BQJ)YJnWIʎ}"/ ]D%7dTRQj?sۯtSg<ʒuo6/./YY`yIE="$=zi\gKQa{n,+! sB<„ܡĠ;>2Hˏrs| m_ꕦzȑb +=>.IϻP)wnLAfPv)v̽3z '7S}$tq4>h0u/=rwӥ`;[YYPHap0x8i8ȯ MXOnN6ٙ (0Πgyos'4ġ)R[ٷ{^5PX(_(g(oJ] ̛y"A:LA(RrHקY~>9VチFCft07^}sgo_]5a@njuGx(~2Ǝ4x[*!@ٓ^nS;<9=<@RNC-?S@ ^u wm\PR{k=>`tK,Uaes/.SSxP ?~"Q[O{';֯uID]"_s2hcOÖ?]%&LltّK6aQgY돗_qG777JUc / ZOep_rW0vqw:N<ː֏e܏zd)y iOy=S^@3ɉC%+7.𓦦%uIFsv']hBAc80j쟾->pEF5f%%-/ɒi4̬]Cbp>ӄ4\޼sXÏ?\P О9Wtc#%{b .Uaޟ 2zXKZZb4O_W OZX576T!(='%"@D\ =辒>0t``el(7rh$䖘hK}J{HlȂ=ƇLP佧yCQ@yr! dw#$NT(AdQO_Gd2f0 j lӣ5zd, 0z8[:"ʄPF ep-==,b.W 0# #JN> >Z(he iD<ƇRD,JKλG%B@H!%Ċ5.6Z2SzyG# 3iEʆo&r60 .XF$uR`/=X2|$ܲ@cRP( B FPO(ŦB@!P(# 1T)( B@!#('F^bS!P( PO B@!#/JP( @('x U B@!PT( B x<*B@!P(beȋRl* B@!< CB@!P(12|bE)6 B@!eJA!P( A@>1 B@!P2|PP( @ yQMB@!P(G@>cRP( B FPO(ŦB@!P(# 1T)( B@!#('F^bS!P( 0DܥcI\{uC3}H8֞k.CB@!P(‰2|Fx=i&y>>CH+ғ.A|k[u:#3ެpSzgZp>&~NA%~W( B H1n)]= =3lJxPk*PȅHr:e\PA#)8.mu< 47B@!P(]՛^a>ǟz,-3t~D-v=ag:ࣨ3ɦJ HR,Oӧ>)t[%tgyv{ ݝ9̲ IHِ&3s;?{/MePYTfr-8tScG`LHNv#0#e1\qp[PHLM.-xT^QB$S' T$=&?ׇc0#0B,Az==OphpA p4oFI`3,Kƛr^#0@ΕILk7PZ9YhUHpG{+*sxe@9`F&ec%rCv=xugJTjIjz?D%"AڲC2%}eF`C,NjTJ¦zd"قc@T?Ӿf +2_|Y+ٺ@AǑ`ºO#ށwΆ Ğ/j M(!HFE LOiNŠ1#0EwT?!Z-mJT1ڿ'8R/n b?pt{x *־ޅY㓌ELǍ _0#0EC4"8t ?[P'b3H@[jNXi9Xb +cQS HFE(9*muJ)F`ҋu#I~pB8b[hUCCu珬Ʒ !BQ  :j{ȥ8Wp:tiW쥙N0#ܡ1@HijF2]Qph[Q~ɗ#tla>K&>f E \0'`F`J1c$ kZ2 >ptpZ.bVƳ "d)SB$$#0#@ɀg&>~]d9S 8!U=o^trRqN?_ SB&d#0#@i+^Ƈ2I+pWpz5kxwXJ-q讂Owg#0#uJ3!m\ܦ9Сy8uaq\R\طnr/x>^`F`>eB?+gˡVG<P@}\U2 bĤhhrjF`F@4k|w{thh"1rsLrCF`$L|xۿOmsPxq?`7w~Ρ\(ٴ:<3hegbӮyRhzr|# 2Ĥ(;D|0#`"%0D=?nЂ梄pwtXȶHtƹ3;?JוֹC'a^eYeRaF`x+6ZG$uMK`9ܷ9V桐% IK: δxجX[.C@ t 9|hZ`؋`F(>\+)[@G5p9y8-oigόMv'agwC@P5GtnN(5]Kp-q"=g(/>4N0#PdJ3!Ґ9d|t*Kzie(\]8;N^$ ,ۄLJ3a!E1 Tɣ)p\F|WUbF"PډIܼ`r*Ov>I㌡J-ɍP;Ꟙ^TVί +n?3&ȕ`,bh v? nw.ZIj)^|>F4LM"o*, #0Ŋ@i&>AC?킵T\"3G 1z3!k7\k5r=_pՁsԱo0)g=rML#Г;Mk׺VzwH7[""rzo-xaW  t_=qt.iIבqr'O򚲗MHGR*ސ䛆օ>35EM8%ߴ)bMu0G`F/(Ԕy #%%)|s;oK{PD h:(JGКg\bO@}2*[%/LuG6=4Ew,[SF& rM]Ҕ"&Ez#P " ku̧Td/rZ/cnOUz "dClxaxzm8Y˲]T:?P)4{ 5gZti-ޢ3Kr=)^+^ L25F(U<$ 0CSR*JUX< Ft"@Jk6.U-Zjߒ_NBNRC,w9n`<ˢ'rxDQxlwOw M\V(2FXVj4-N,FiGmQ hxyև;F M9|?RWHgZ"&Eya"ԩdxrK^d\k~Ua"!"V< )xR_տڠyZu5nfvr\HLM:|聽~ 34;!C %y Auw4E3)2g1)1GP3T,ا^?6( ^f t\ع8&:E¥%CB MO1UQ!2H >9"c1";"9d'E'">D(0AJcMQi}\/F` YC-LUxc0!f3MBܠ1"ĀΤ1NtO]1GIQ☌@I" =s|?]f1(IyJمsfĘ.砽`j~H~C_"cMCᄏye I~HCdtP [flKRR 3CSc)B9S &'+GBO[&**du'ˇ$@Dr< IzmC$?&2P8;FD`RT"se Uo.Ť6!x|+:?FHy~&>晪WPgSCg"9a4_0r%E5FR5EbDŽr2Cɘ.!^!=Rҡ)-ࣴIp!,ⓥDaF3!/5@Gj5E>p_lhJihm ոԱc22ꌀ"W ?L|r 2')҅l thhLH#MgMQjZ\22򠹚@Y@IQYx\GFh0)~`HN@"eJ~M)ئOZY`S֞8ח`AI5#Pj`Sj-W`"oy0y, #L쁱"ć#0ŀbd/Y0#PLRRoSCEC84>/eк]K!3<[`S/׎` &E{`gľ%@;:j]JٱSBetCE*;_j sGO\CF(%\CpcL74E8]U)gx7wTSfh:k瓺jykY;I]eQƻF4347jvhU`R}fxY9R9\CrSF*I]O@qq|}P?baGYfF`"w>ޖyd(1ji6A\5A >KR$TfН#L-֤X' ?)poؠi(<6R e<u1x@IDATix= /!SD:{O&ݍ0:ig8`Є.B=Z?YW2"aF(2"ahZ! pkHEZs1_R=rHxd-4yԐ;=T&3w|%2"plak0}>r3Fq"eҁ~FEU FZ]Rf~)\axMڝ)c (_HWT*QPR" H-C, ze8l`5kQ@:2113#0eNg"ڂ+lMNpbM$5,#ޑ-7_+uT;otTejD|oϨ̓\.e!ɞᴖfx{ZJ9jM#iX=ã~2}bP/>TͺG6 \-KKZ-8"O:!T;[52]E Z#FuPɓ$CuݒI6/FbE09+F`J5;;v BCh8=#yγhSt͵-" a#:+G"Ӊ9R}df;YJ8<<di1ԷXH(ٲ+cɜs{F|Lw#/DOjeBQ4vhοI]k +aGZ_4bz ef4-XXEd"=M5Lm !Z O⨌#0)ʥS3 :|ڀXoZe )n޸U:D5h>oWmC }㙱KP#_؍DQ"&UPGnQ+NPq1F>s6soF0?*/k5>={uK×hA]1W6eAkCO!#a?n N ?"G;ew.>)61~-Ck=UQU ]#qH#\DhIƵ4E!em%TSq*]S4j09EZC7buO?S55=]ͣ -0#P$n)2wojf; )nRuJʨ2 U3c4 # [+#e;c E3m=pD+)bK-/!!XPw!E8!fU4&e[*? 5P!ؚv']kA$Yt4);F`F>skt`'O7mLMQRU{HVFMMa-Mq4"N/o.Ӣ7 g4Ǹ7hTpRţdXmyG4eHqah|3MRJ#AõuO9mI`FЦ" m΢r;TKVǠvӰӻ1ne@l-y8Wڵx`F(Y4EYl[nCkY gfM4y7)ʱ]5 @ܳ{DS71]8}0HEȠ6"kuGbETmg1h ,fca{Ğ/~z/ \=jFݐju}1 I 1#7%AfMH4UNT ?&cM OLTӺ] V ;Be_ p#>b8u1^ f;Țz:<jBj 8]~hRY=@+Ek.=FL|#0#PHn4)Zq8%-MMQ|'j)es!HV;t6OdZsK7H\9bjXh\~(j,? S᜴ڴWk| 'g0#PmN=:v"G~Hnn@8 F8^[Q&\(c"3ay&"nzg׹%2S<v?]D"q0/;&>^cF(9iCG+Mpj?^t5lԺJ@ŹSڟ.Mp]#CiSORT?T\s_4zu fDIM.z%!uTVa"F`|8}o t:_W#JM 3f3FƬoH-a'_=g]h#JC$9%SS))BUXhm+-9''5XUMI8n cVt$97MZ'39Wm<&>#0~MHڣ6b}gjQ!Cdٝ&[#\ )/A|%CT7)+NR$ႍEQs$&$\>gy]:)^_ !n7a )WH$|r L+®uE/&ߐS1) z`F@H5C-܉n7w ЅS렏8)h9\6z++4j-.MP.9:훃Cg^3 vCx׍/n[i47+ӕ/+헟 ܯ 6<^<4ħq|F`F S\b! 9tKV !_L<[O* 맼4zC۹3n˥)h=Et):b.]W|Dg t1!TC*{ijj:8gjve)|^n`jKBs =[m}qglQ[4τYn{cZˑRf[7ߋش>!pƿp97|n(0#07ӻֹh\ EbUL3~pWv8Hᙉ3BzC\9BtL)t.ݥjA:!"ûE۝? (1kzC}5d̸Dztn?mmnG2~ѵ9e$rfڿj5}4xJTA_à/ss~zh}+1[|FdO8#0 lS]8"7fv뢡"Ap"ën5Hb9)#:{ jkSٍƽ E|@"L6S C"pgw1#0ddݱ*!kt-(:.(]J 16E֝cDԶZ-l^嫵Z]aFo#@Ɲ}!=\9v:Z>a/{^MS{,(FZKgLpbF`F w2h㫼7=|k,k𗪂3_irT p# D7v~ ;mܡ *{RwmBCD|pYmυWgq0#)aC=TSE ] ®G3񝆦"`X;ԿyTos;ݕp z'\I&[ b'P؋`FO oXlhS_SuiZdNkC۾Kиg3WS<cl_qM8M"4E: őF`CY-0OhU -\rYFPhw-p` w juzx9a^t$ŒisqI }Poj83?i OA⸌#0"x 'H숳AᶪHK&Ǭ" U=;~JM8vAK! U',02]S 6 =]iȠ.cGﯙx[Hr>#0@ "MsMlOZv4g1QψMڛֽ!:pxB ;jwK@Ulן2I?b )W8'g7Go)]a8f$)T0 6N0# '2(zc֯$hCL N #]PxM\v}y9=t$.Ipzݳxh+$)!NQg[taw}|͂+T^50F`F(DO~_ j+ږ sۇSjksîH>$5XGʫ#Rݞ\ M5K θGWVME_婷 L|"g0#& C4QonpKM+W.gun!K "Mm!?siE% .Px@ꔅ/Sa:wA`8 #0#o OipjC}U85ܻKjգH>RSVNZΦț #t (]a&y-U8Sv!Gv筸ϋ^ˍ v{Γ/P⋸cMbY$>$oVbF`|$>H|c_XEJ!py55$?حy&[ʹU4ޣm®'/)30z]X%k#[Jq.RYF`FF ԩqce׉1/IfNdMQ`F` 1PM2k.xRTtY9Sm5ea-[a %VYns/R3oy1#oJP_̡?%kr]q^۫ M/LxF.Fo#0eA)8KLCw.d-d+2d/-VzI`F!`gbXٗް Q[K+ćfohC,`rF`F!@}&vC5fB4tHʶƇ#+٦l|fF`a}KTf]-NNό#0#Py1Fd7,v\4\.Z'uz,/#0# PJ}i. R~z$fAgo}߮Wd ,/N #0#o>RLP'߉n`D]⮣ۼs[Xq܏0#P6-ԧéG}N Td 0цV~x#0@`J}on-uPN$>1念gF`2@Y;j2+]<8U&>epB`'NOB8\Oee))tV&3bn>{WKwxʏ[.yf 1?z URR3*[Z7soKn86[ݦ 8 8YMo|GROnW/ X<@F@@Q>lb7]ϥk(|df8}Sc@+wMkHnjOy\Hm5}h#\k_q'rdAc\rIQrF8?;-ms4(!8 Ko $zgJ|7v?}HƺXRDҲ %ħ>_&vO֮u}.h:yi 4܎p,SkɮUXrGwa.Gj>=q;>Qr_z9|u|%/njh7L?0IKb&~6#gQ<0Y8s!NMwt!]Mj)ɉ;iBG;+eƹgDy(f fvY=l-/ZPj>f/##P0~ %(od f!C*6ƒQQ[Z$Q+}ҥ;Vd,/MD-ŧDGg԰TÎ~ݛnD2W_ke9jFHV-YI9ztgܮ 9$fAJ[܂-.XU=kq!+Zz_)J;FQ~=žN46:.̩g>2 #q̂e3}%b<~ǜ. r^ G=S=C ]JWTUdWZ7{C ІK2F)J7D֧j 5?'S#۸APc,2v}ĥk#C2#QR,:F@Q ňQTORiG)#=#<2 V)5',= a`@Mw<0ycv~Զ1/&9s: /k*' 44ɃAӿ?Z>2,m<$Dþd+Иcob0l"Y\ošZ$W_rܫ )#/)F }%)-;Yt9=kU: nhY9LIV]LciG\5K'ſt-\ʇ9OnFx6-=Rdẗh?uM=5ݑ`A.3[bGIS;Nj ?.t?Ln,gԨu% EO-ERlTV"IᖙFόk /a&>%xB`kV'KqH~Rߎ~wC T rWNz&rSH]v⻝t]炴"Ю)H5>AK8W0G3銟D$ _~L "7T". x΄j/"8 E}&a0E…i^x{|gG|ɩjK=ӳ#>.&܈eBU6 u!#-nQڞd+\a V_;\:ڮd]tUu 1uW1G } $Im[301%]IC+$\xcJiH~`0? IA#僨)_yWcI/¯בK !%ϔ"u$wBgÎm~`qSJ,y #Pl~Ϩsjo\c;~x_|7d|DaIgOgw9-߼1[#Q/3l>ϭ)D`q8AU Z'$8seT9l^-H$&U Gn^==T=$-@%[] \ z¸tF2?K[Va KF8TvD_h 糺.'27(z$4晊;p"z)ҥߌQAo(vG=,h^nF'WƷͼ;i|k G;nYSOIp*p_~b Hh}D53ĿB2LyoR[RA(cL3"df2d҄ϐ=JGӥ8蒎HVUsC$Vya0HX2 rװxI8t]0${*{eL P ~Ps?4x j1@v]>%FJ* W2{<^wU 4Ev^# a76t Zgz5V4PۃҥK՟v ;6MfV3JŽ#oV-j޺~65f=P#ÌnQzGJ￵cPp6>XF`F!YbwnrO33}Rh\\&Ytnyg&>>PX$F`F@ N|UQ1Qt5 Xi7O2ә?=-`F(fVBOU FeX )j~JFۄ3P kZZL|%g0#?Gnvs0 Erq -e⤂ 3;Ұ:cF`@;}q&z9VQ3+3to>*'TycvqčqCQtgg|_f/>`F" Ls=P͖Q/xbF@;JTwY_?p4u ޯ<<|򚉏O>`F{FsݢSHMfneV_8.S)ޓ7rbρ`F`<̷ OG@~ )>3KәOiz\F`F ܹ{ZT,5a 1*eiY\kd%l-\F`F Wp!-fjIOWh*|fj{KHy]2БkPSE3[%xfOefF`@tM Mfi$ut! j$'fﮘwg)Xڅdһ~ϯ G[ofj8ڝއ/JU<aY4/ ^[k{/dYpSyزy{"!Mko(9PYֻ&a/A7w] cn.rA[zlܝ};eir |{{ ET|g^4:`-X }Q:[ʓ/kg lF(+#,DjRpU͜d-j>QyLuE3(*C_;i*]N_RH!>d8܅]ɜӦwGEh8vl44|'#iPU}IeUR߅`aZy1F_ni [ө'Ĕ4Es&-5m;4v6IP1HpC$Cm[ѣ_p-؂l/p!l!9}5žco~,$X;oq,7qRwЇ7l2b&ݑޗqiB{p/@N+\pC\st6g36_WV^Ye":,_!`ݔt; v3DLTkQO<80 P RL>Ȼ#ʿոmNmjzÞ?vЇslm$|XZiS|jnVkMjgxk1x6~3Y4j}{+$A:e.;㖫ў|&FO2X3쌯gǿUG6]pvgN,R_po`HKj[ 6׺o8~uZLylO]И,,>S=(Y(}#F)]Y/5cN^i|),DWe1^ggfuŘä*>|D>uJB/mxX^dy3Ik#ґg۷Z_jjKCZ==+E;oTT}բ`w_LL\XU0˗ 79~O:E+22JX\'rfwphdC [۟>{s}_oj·@`'{=Noږw=kxm?dLS +5/Nq[k7n ^ٹ;)YW*gYyoW_Lc."b.\ 00vF;""7Ƒ#Q;d [rsj.y:~$x+*s;LjY;6ⅱSj9jjKwk|O k(Wőn}úy?\Y ,y9ˌog_$Q+7; 9ɦb1Ec::[PS>0}xP5 3 "yInn#`}]BՖ=oj/p8A3wx接għF$\26.kCC 55$'ol׳/j|@mh5$[ZϺLMv-Ԕ;A>,ƃ~LO+έABEVx+0'Fv'-NH ,"BVQJI+ܦaNq'v&! I?Q#*VkDFK>X=#Z.K3,Bˈ#@U ʹ fݲb蘍7Kj \{3͸t5ɣRuhEf\ПdY}jGaAXEJh_6 ʏ2۾ZjئWڽ!YDեY#qߑOnN'";wǏ}+[.Lj~k?ǂTm(|M6?z,jv"ʅj!LY꽸e30)* FmIד"EHHф2ťYR~=5]׬q :]0- V04|_·Z+ p=ZCfu ]BpJt<^{A@m}a{¸_a= ܌gqM_KUtunzoj}P.{S v: p=@Fd%?n}GOCxH0mZ5Pљ {8Oݭe^X+IиNU% ֨duB<|r#E#QF4D5+^F"/~ڕ74>n2Rfu1 G=ۭ耛'iD >b;9x;ζ}GUZQ n6Ubժ a'hvbv c!E_mo ;wm t 9Һ.O؊Z,*vs3S #z*ovHz%]p9P[Y\O߷!܎t,OC'+]Z5԰+04lV |D3G߯ES@-hx;ھVk;OޅN#y9U2^/2Kט,VuH ң:,'H^T׫YW rAJrR3xPABӦqx꾮W+ؑеMCmmo$p:aSƋ njgG/G O\m3{0 hhZ5mڥ\Br,_ p "Ayu2?K؁3%+u.awB-F ơ xW'$8i/4Y*Uz腝f. zq "o{qG#9ȆS㛪_b&M놵  Ld%K #jӄ&dxWqϛfip(n9$F؍D:8ow7 U* f^<˿J@ \hеMupo0Ԛ3lsi'FL -ыZGĸ6=Hv<i+1"2Mθ6Jdj5VV3ٴZ֯].-݅} #;h0 ӾgiLiFQcеuc+ NhgEڡHљm};x[6\_F.|i!ya0jeRaLt$y90w^9^pltfo{X΢i\,k!!3]vv*wk;^OVs/FMHSc["vDjh:-{DC:OuTJ,;BldLC]bUrD*h:ba. )p' AQ />pY w[5&MА "dStGpGae u]6:e=dMC99"/jg)M׬^5Xyo3;r Ͱ(sYқa|f7QAI;ȶg]QatN(YZ_">l=@Wȶ" 3H"18S~/4z^Ny9=(!͊6qdnrf l}OӺ`?c̼ϳd}f%+$ifH-Ofz|V4GGuX3\\{gaY6˫4Jm%mJ=nWc]SNtCMuf-6_waHxQ..ڝT`KaT ^)aX :Մ!:||saoq*pzHƞkwK˫I105t*x!,.=QWn٫\_uӵ^?}Ӆ|bN1%M16\1qlm*Ǥ6 W>YBǕ1xer1mBw0Hyƾ `{)rĶnkw`mJߞ`m|`[`V2P}8!_ݩ ^qɼ\~3ShnJO} 2# t\rx+fMᛂR 8m& 6ӭK|m-Pʆ ?T2 O_|o)4z¦3_t^_+/[h%f493ӕ ,o6a X!l?X ,uw/[Ntױ4zm2E?`mtGXm` 'l%7Jٮob'Zg_%!\,szջ::nI%QҮB u%Ro~>s?02ȼ}8fXuiŅ_Cޟ"Anc^$j l69 bx&0wMw\ۧL"+,%4Zjupu׾~{89oU$ycob0֡i^NսL`foaA>*o`aPֽGxj/WKxݥ-W 0] ߾t+Of._+>MM2>oܯ@鯻!W ? ꇚ5J^"h"-3^$>zp~Qg<J+E@L] i4+$jys ,^MxuޅÝV#P~@0@h ^4"e@CSp);;gr H$D q }8A})@eu`Iz C͟wf->Oq53SvH'ڈj/栍@aI!V;M8/U8Ye@A`SLd~o h =Ε!눙U(^]s0BtW 7|˞vG@ЂەgFxU^%XѳSS8< U3pNhX LDFT8c$70,rN8Efc{ndC+P(.={59Q`op1sHž%I/p.{߽ [;" 5>18ЎQWtfM(ظ[6׷S<(_%"1+mRϛ[|9Q Qt^1q|}&p9O{Ac+.3Zf#RG!f|\Y]1E Um}[gנ$X+”#XF)t/i(|D XRLC%PW'uu?e*YRZäk[^yN"O>{8r=/dgYϚy'RqJ@@ >]KyYɤ ޟ5yo+P}gpdS,8vɦ%GhRCRyuTh|swPv&)*~`kWD@+@yi'_cp/iW7$I=n}nfcMִ>߸$oZؗ6>q}E_hbSoP]>Wal#? h3S."rv= 0 lvQ!~9r>늍"W<~~Mm.9:yN"w c'~ji<Ob{ziJ$7JV(j/[N谕R/bP]-Rj|Եg򪪉#Y[㦒eG\^MMy6RGyۮVlx`&^DTb2ԖUBTl#wlX"W|G7*u^vfX]6lyH~Psh@owY(=a0\jzt /xM55ʏ|1 zq /E^"HjG~^"l|Vz|+4KVEsv,ڢ~TzOv&hɂD@" h ~SqtgkzwIe-LD4Y1wx#-YnlE ['#bjHզ|HMUwbv4qPǔ/+A#ԚJ=Fs qO, hqXF[KǾC㱒Sq? n} bkX95OdT~U)Eʖ嬤 iPDc>o9 &CB9lBkJx߷ǃ5t^x8P(eChڪlbnhgԸ> b*bΏq`]3 'WlT4Ӷ+ _?cR,x4y_M0Rhpcz}GRn- _v.~EkK7җ )qzaeDEkw(Xyt؟6͌&[g*|ī<߁ƾ<}s" AG^@x7ExH$7hrL~gxinf xF3Ӟ7vR~_D؆秘>}\\zʝ*"^#A;=o#cْ!q0n^>X; ~M([~*MhgTr뢭-k>Og='uj1StJ\;Q7x/xlƝ4-M2TtuM1}$ԋш馻d2 =7u^t:<𿗶ݳqֱxLy2וּ}smÿ{Ľ㌴zm֢ -<ά?_K6&zmz0;շR2ՄUigk1lXL6}@lϣ/궪xg:W${$ =/c,Hh;\zƍ\s彰Ϋ x~̹7PHO=0?86s-1-/<)!u\L9axaDp6؇'T\h~S- f7RrWnX'\~h~Zش߿ c^S-YbǣƳH"5 jp ^ƛ?^UkΨLyC\*LEZmkdD4ۯǐ@c?-UU4xkhǬhhvR*&GPR=svtЗw*L$o7>[V+ޣf O-~UöPmoV35Q(4O)3hG$0@1Hj֛dho,xz,˥,g8sۿmv*[J}{1" 8 hL/W^ƞ1@xX )JyϣCWry߈m7e75s mY|:i4vM&K&vyaPXgAW$L~栅~.L0RvgaúZV?~lӭBgۄU띗 nWI ʲv$l-9~!l|rCYLs|hG$0syь.?stL}=1w0%96qڅÛ Ӭõ%8M 4eUc,><ʺfFuuԻ8 .-QVS*7+BkUrOSR@H:tT, c,;3ͬL7Z|%(N_݁,~]n^nqO詢BxN5e 99%ξNuA-0ʖHxwB hK]"(#mh?9,e8A-ۤLb`<7ƝQ.#ScPVUC\V( -ˌ>cd)B5yi{3SnѹTSjOm9?3ϊ;ϨmWetjn|cC;%K&VU{8c#RFN5fo(eus*l#.8 Ǣ#Co%kbsG\CC/5|ݗI)CGYp:8ۧXc(?! ?=K&/xw23͊AS>R< A!pm_t?VlkncY2uwm4ﵔ,9N?-u|SVTՑtMBOA^˪/+WM'reʵa(%q [>yWVX]Jngpӈ"oLB=z|B ԭsgC]zV=}>'#d/v*ٜYa'XOW͡_N{#p0?{fXlГi.IlUC[O,0 W;MtMvZnfXʊ GҖnq%_C+"m^vx,=N[5Ayi G4?ݿ):7i ޸wҍUv'[ )T<6{,E=AWۄ:BE__y.t|GDVΛN};ʿ=Wҏk(Jitlw[W邽./4nB 40޴Fzf]3rV*}[;i7հ85e >A +"{Odcd h,GB[`]dDPZy ؗky;ifS#ǙjwI}XH&eC5mmw+ˁf6k5y_Ad\Πl Cy^" H@)~u&EM9FfmM(4vT*hŚ<5N6L. Ie" sl&FECLk\Z\$D t< R^nef ,2X㫟=^y7s%a5C{q(a sy|OT5%Ib|ummLt\9= ڵFSSkOjdF7K^K⡓,d78A-o~*N2U" TIϞtP̂r0ԿDÇZhy&:%}U~yA׏MIF5=yk6C,I./*".WgdN||`bcICҹ*g뮸3\/qC1R.oVmu¬s2AY㣦l~駳?c%ayìOanw+lǹikҩ!L{m.j x+ǪݕRQc ϙF|p=eM>PY~1+ʧ+;S~OeM=t iny|KPJ7F0 }=L>^\SϠ4qc_R){؛hЄ[isԀv x D][* 6~]H#tx?9Zɘi~ ,8uFjpMܴlu=]:vӶ%_޶B*V2Bͧw%PUFNu 32vh(Χ%v+ YؠVr[ޗ>!* 7Pzuy7Z5;5?b*a`I)CGY$Q;3$VoA~虏PBYXk7 )I豂7]hG}j GSOF3*dEu c5dkl?E90?8}V:jl;(EN7yo6Ljj:I)d[% շ)F _)/+@i :tNPQe=TVIM6;=~2=6}mPMxI|0mxW돇R&ܩӪ]&e$-{r:5k0M?Tulb˓]0 hluiY^CF6xNXIӅ/:AYMYL7>G}+,~Qx *Rz߃K7Si<ӤsG]L7PtMh 2AqA.uTRnq1(VcΰF+1fްy^J o݇NK7ճ&ӴC8v#xHByTz}EٸX}gxT8G@V?N:/qʅa%g\Jm*^]QKl?(xxt1#P} :'Gl0kfrҌל}RQ+P/;V=WW<6=V=2z6N3I#|[ Ra$PPFa^n㘦{h?TzJzu5ڂ|o-Zu/ڶ_:pY="e|[4yFo~q΃73kb1Ġ럢yoxJ7U1Ǫ7.kwVoHIcw m<:/B/]ׯ?킋j&f@OI/MguB헣ΉfW_| yNc.|"6#LF#o7r\-xoC) юpIAy,ge4C2ilIgM8as_$y˝^ZIlNaU| 2ӠLvo$JmxO >~7G;%5l|ְ~ꪣM-2&F7m4kLAtfPuzr8]tcii x]ձn_gx 4Rm]NcFd 6}Bڛ6Z 0|- MxO׭}N~ ̈́EjG^!@XfX-d_ Ry$kYv&Thz;^Ѐ>|y#\`pa.E}ilㆄk~۸9Ahq:6-^$I^$c(* hzxe߃8KnJ+<%x`.,SX5#r!0@߸GmW,|`׶96n|1! ύXxKAHҒq3nJɚF_Ձl d2)OZZ*ٛd۩`w6h>ɫ`AzwQF?lz)7 ( /y @܏X 8NShu[E`3]b~\M]ndw"moiD:{DZBN)0|'[j Ǜ9,ES`,ނF!x`9aż+`| ,i섫}/ZYZ||:6T ;=GfBO 8wRꚩ`fһ'+tA`:VoĢ]qXӓNii |cU.ulC tfJ6Sx%?=X[0d64صaa<575 x(~w,i#4>*Ї[m|3oZnV?~@V]<`r4oY?7X M#6G > HO+3پ8M74",↑0{IqЃnz@L'T"J9}Ǜ<:4Gس,oPQ2XʠL 0|H3 0#CIEB.]tڮP# R_+ڟcy!ʑ{(E m4 <L"{DZU0GjHh\<{1_.d ?ipQf5h#'gضn5:dWɶMZ9@cLs_9 $iIQߔS'Csߑr> "\cf ' .$38>lRYs{5B5-~(S]>mOdF a cƐVlIi <⽷7yC!&ڏU5ksxMS ۏc1ݑ%4DlW</;ݼ.LGc\:b?ޭ^ۺ3h:1^x;~oL9E!= o8zB2ݾyolėçB&P'lwP'1Ma$G1\>x?_[ԧB{w xxn]4,4a`D<\=:jel/SheZ GVeѦp0 +D̔xC;Oô^1x GMZx `i|㞙Qx~19Y+<bSG ⅇB}|:6!}@^ >0|TDb:,Qq) ! ;6H/) -F/ܜΚʆ]=6ׯ|kgE4/]嘅Iժ=s tagi'iUfe' ٞv5G\o6 D<łLoafm 'cpSn{ʰTʵ/&z6C;;h}b~Ts9zף~Ę6ԡ'xP=g-|/~A~_:Ub,OM~KW)x РDڸW#HS@IDATB} Aڋ3p$S߇B³B~ PyPR2!%a0᥄e_59-yth~!\x czV'T6i>+r񔏉5Z .K UG* >Ak2kC̡"x EŃ;~eWۙq<Ħ~xܦdh7;o?|_ߡ gP@6XS)oǫʃ<وǽxgò/72Z7 jAMZ'6> PPZAx^` ׼_| )9Т?]r71o,8w>6?x)\fCxxj Fؠ_"|04Z+.MWlyS=iyPw Z08Rf'+MlbTj<A|<@D6JZ.5`z };|wZ|a|iiqϿuɇ|B@<_U;s 3L,xŲBKY65=|P)tbpp]:Z<CM,ZϓCluDUv.a/پvN)O-ܴ 8O31𶤪ԴsE}_."DǢ "+Ts{Ns-h >BホVqfiu{6n?cK\.l~eēa5{t\FNBpjio}_#G[3rؗCh 3o6f73kmpn?oϣ3PUSc{.sŃqt&[}SC]%{z+4;x'ӱN}+n\K?zgҏhٰ9"5=3ۜjg-nԈ!/ ;JXRcP_}xߞ};Z 8'۸)C@xyA)rx98mR[)Sy/ԟHy:rԌsnuũgN܇&(s%ֽ`\gWM0֑  chu 4Cӓ,Nǂe9/ yx;ȿ55?gO:k2+у#S}%JA@X/1h}ŀ3+4B|H k_8a;t{}ﭻ g u0_$D !@X Ɔ LNǒ~ۚNfde)=Mc!}Б%I@@XK/2/6|ҙRi|֤7=zxހ9~|&Fw՟ⷆb\@Əb5>ƕ)}s8 >CpQq1jqh'`-Hqse> /X8 r_{fsVR̺9 6,˶"c<ĸl0>=~<#"/jDrܷ[Jl~Ҹ[Lb1ғ}2=в(qVx!e)X8bG;..ԘgqxvǎO!F ޯv\h9k5NvHbQ0F ` ״a}tUrĘc'$ tR_xw> v=Icü*{|p,`0N׸w~GZ/'dYuBo^f a(pxx7ur'͸g{vb ?=d}* ",_8%/y: o &[M澃K_S{%LÒ9aþsVH, {alME,^b/t,@krq{1FD@"d|MR ":밪*!m`'/Oۛ(>W9ꗁ|q}ee+Ծ}8!ۛ}e@O|ULys?ek} ZfW1=>C:>$#"q^$^" h)hZyPk}X>k}  ;KFKG!A!!:iXH$  t`A ZG<=M!xY͒$#|GNH |r(U$Z%?ⓖX G6.yUK$OGLz&ZfC CV/6-5>Iىi@F@ >Dj}\.wģ2q,h|'dRD H'(XGB>: >GHOΑDr]b%/J$G@ ><u> I'ޒ;yex e񭷬X" $)$nNǖ  >Zor/H|]ԟ(5>]:ڟq,D H'=MHpep[Ƨ#|#0ɳ@O@xk}K/N˽D D|ۭS]!'K$C@ >6j%KÆ<+(siZ#dA mK'5>ek%F@ >xi}P[}$FYt>(4O1@C@ >Iק|@""ͧqK'DdrD H'Fxh}L&g!)Dz[QjfG'm|zh)hsY1GNuw4`MuI L()Uki|Yj|u$|S]:u@6X"a Z,>.':)y.8myTWpT@O@fŧhBNKjv^"  r1,7VZ^TOæȢ{(:C{h3e$$D@ >Iim<*C$IP))-v"^" $cXo3o*r ?gu6Gxys5.HYqGFR{8tؿwK`VD̽^9\әb1wM]Yt;+sO=#zm'ȆK$O@Xw/)ww:3mP2Qݿ^Kn@oqy蕿xLt߷. ,w#rTAj,)SMvھ8YZc>?~/̀`wwlD@" m|` #:oV,uqCiL8k7ڈZb`3V;B`!#LbI3~}OV`䶟G\o#dB-$ yуrav_vQuT_jjO(T8^/ cyo_aBW~mc& Ȳ%# vsYC<߻v.qkHkT>Rۆĉ~kzY40DȠ}Ns`F ?A!(Iz6mOΤk?b5L/g_y^hj}f WǤtFK U&2v/c8g\|ޅ#a6;TͶod!Gv4>3ۆQ)a!"'>S]jq8w8nm\XK$^WCc0 Vhi}چ:UmJ;Pl?HQI^@[oݞ^hi}KO7]"eOuqqu5|,>'>RڞL5[)syEg! Oz6~!5T&N Xx1}>})=;b[j}>:D|7B TMuA> %IB@G=Vad}I7s1p "Sf$;RV? fg֕cZ HE͖I5V@IggnjdI"aԓ.b2mߣ.>Zi"=\SIҪ/'wxK T*ցs,5r.4Xo;U{Mu/OzcI;iP$Iz)mo^ AGha =-I'99?+^g 0{/ǝtdQr멼> Дуo^Ber-ZэTqbe!3 wIY8_ޥic5Liw>}+ t9fӾgXD\y>$jx#-X])Ks3c3GtZO|LƎ;I#iNiڙaD+= ʇ'ph,ˑ$ Ixef?#|q;^_4dg hʜ*c~YB5WЋ-%ˍshET'i'ls&Ȃ>]ZNwFUvz|y `Ƹ >}&J1dMS7t G&қ ѤŔj l6yT1?mea&Pk+,. ٔ =ؤ; "$Ɉ@Ɉ6xlˆHAs"ގ׻ vtCUa;A߾<ꮛsOVnWh~Kg((W6Б*r$t΄av#"SNۺ(-۸& /9ShQ4sH:p$jj`~y@߼lHҿ篢U/79]-D&Jx =ߗѧ+RQ~.U4(m lZ9M\9qݣ֋6Cugљ#e֦Êv k/JM~W~y̒bұS4֠|74魅kw\{UΝ8SAƣUhk _:56_m'RA^eYiҨ FYy{zjjwZ/ʳF MnAd8{8Z}lr6) -,@ܲr#cЇOIo^xUIOW5T^UC֖Akg);ї@P?]~{,⏋.H|b9^Ӌ´/* l\hԎS+iV"@Q7 d@xYi.6d9%Ͻh>p>z$<5`O-FvFxT xwH^laV_-*bmSVz*Oy&NrEg4F뢳FSߜ 0l aҞomxZp͙>V ڶ]@mj4ZV>AuޤWyJ[]Fu]#SMQ_f+7`ע]خ6lNRv|\$ހ@^F &2NW8Xcvڕ-Gɦ;J*X1ޗW&Ma- -_~2K4=4egF *9TLrd :@4XԆSڄ4V6ʮc31_e|gbz 4 T MyY~]ibJ6Al'o~ctAcҒ mJA׺QiOu+`"I" RF?izM 'ì󊍴WAOi/@S Eۢ^q/U'˷]N YRڦ|B3gϼ8; hb{yu٠VaI gl˳qFQOyMP^:@,VSy2y*졧IÎڢ ՔIUQ/M2'X>?||b_8P%. b-eu 6q.=g=ǂaxlї >1%m!'`7jȍBߡ\>.Onv{ y)'ظX5)٦1K7n7v&0!m$”,C%=BYS-A8}#\0m +ZfBQ5%"؏Uny TxArH{'Ǿt#}4@dY苵;4c(h4BV끺Q{#(yq-{jI1%/+hN_Čp>szԺh9.UV ++d i`8m#ؒ@ș^XYA;:t~q׼sh `Cb>ɳo++lz z\a,PXSqW:S%| WlYSF`_] LM7^2WoL9SI>k^H+Btu&4#a:6WBB#VtOg8/ AlT.I" hUlG" l%>e>3x}ߺ43>rhd0H>-ѿ5+.fmGr=CkN> k/ByꫧV|7TW-e6>RiCFI$F@ >8AJK3žq,T]|tG_ۭxB.]OިQF6H$=)n j}|+zx5E=.Ia{K9MCej=vpHY/(xdo,Xa$EFˁ%=pH ID@+HG+=G>d[t4_v4?=k2;[D$4xFd5, Rt,>/9(_˔p҅%=O2$ $^" RB/$p>7;NQ]Lޥ+ˏU)>~E}fhGdž(tFQcQQy߿X]E풼K$=)~ j}P-t鋏7ީ-= ACAֽpć"J;"CVS -j CD5 mZ*I" h )h7KZ SqmOInbcz%gJu;Rnf˿[FhB\+,b^sӞNGLPk$I$-! j7̋lRiTnE5*{k!l}RtvF,S4eY-k'+=r(u%PKk2jE .(l(I" h )h7 >/rRyFs~ed ^jptX3*6+mT䦏yg{KLV=P:#q.驀l ǂSwwCJ%2\zL%1N>z絾kgDSϽ|mbnID@KܧP0/h}D ǹiIfġ@U,"@ZUlHd4}b`E4<6'ޕm$?RI>j} OγҎ m;n:\㢊Fonjl $7n`zbx ]FY=[px&HB% tD"وGͿ&M}LjUcCvUIUsMktഋcunfeCܰN^pcV>zleL;&m.HBA@ >ՃFKD&s.l^!Q᝴h#X@# t;L @5{񶯁@" HJARv[ZJs]1vF`8eiy4g(;Sؒ*ݻǒZ4\ѕH$mZX[OSXߪdB؁hlMV;[A_{Aߚ`{g%Q۬>..N(:j<|) H4@MM2$⅗VO9G88g#vgr[H{ KdޘHYiFc< H/Cz+6L/l+ۚ tN_ts4^ ?qv--6GiwR‰DOFeGvbL(]um.9E2D@" ݗ%S$xxZa75ڼkLfyR#:sL+imj SFl AVGkIǖhQ/0WiHiV+@/s!9uj[E3 P|S^fqWӬ?~/~ Zڼ߬Ao =;\b4Ē!Ђ'71SH0㘅ZD@"R υߎ?xSMw9t\zbg,ѱpW[hqY4n@%bxi tU:j[DuR O-;__~:G:n)*RtBv ر}]Wq]uWPAA%yH-+PQ(.&-wwnr/!%y)$g喙9so9̙MoH+spW`*jAǺ¢Pxm&ڹcvZM{nT%Vk WiYu†}=#82"ڷ,KlP$47C}*ʣr tVtx#pxx/Ɏws-Q$2/EUK!N:]L_#Rg8;6U֩Hm2"ʠLF$3gF:X6z:?_lxtz tl\SL<6D!VN;Lw~q3{_n_&%]ZD~*ˤ k­wG,]cD)]kin(xhJٛw07W/I_pЭnG>T>AxXWC64\z?קKҳ)qk"kO[V}T>A%OXJ7p6~gZ+& S5u]So>$F_h^oEm?:xe$ dE_ Q~ዓʭ?ʆR@l]Biw ǵ*¢"+aHXʃ:c(}k`։+>^U-_-) ߯-=U$ˏ |Z6 82NC=?'s2D/"6lqKO8tN૰4jNX%+6"8b}TxIMNQ5#0+>9(7GADZʧ./?: ,+_])Jf-]z'kO1Y~3{kZ[ hf'~jݷ69FhyXi:hRܯ6mE15⯸/~Lfc ?/05>|6V~Wn° xR,~OMC*OT^r9=#/Xi]ZΊ< Ӛ\8cEA)uzZspn4Z'p'!'J~Hb  N9:t/WT1o3ܾSęj:'ވh}M}Fh럆;HyرkЊ̭o# ?iLj]6V&IpiE\<#Ma RK-uzt$U,[ީW{"V_~%Zx3Jm.*}C1k+2e!+[>FV|C ݣ=L(, bGW괉w!\~چbeZw& ~鑿K &J2*axƱ֝ζ'no[ww:ؽUx$'h}qFhDKxoiІvˮY130 {J$G}CUV}yOzK?\nfuv;; BlWbﻅ^W(b!?Y+?cw=@+@Vh}q:FhhВ4&[ 8$6݉u aș3|MñaN[3(Zu_-|`=G:G PzhFWyGޙF_{?\nІ _2 yi]t CD$gYZ9)Q*}}B4>qZFh_mC&')+޾1%UDM<b͕Q'NjnK V(ڣbī',=,IoIwN W$&gyⵏŻVהM}E玉o1}$ѐ'r8 #XiLFC:] S;F~7u%v#S6_]\4$E# -ٗ (K . -ϥ^Gz`;N ZFxK¾jLj+↗ Mφzem@$Jq [?!u{""](w@=sM~Wk퍩,Ss F`"GȱjW)O w}2X,IƺwſgW__A)AL,8o"cp|!MոH_KŚY{")0#Pǧ>hzƈM[X5\Lg.z=9Ca'ZU,~dܽ%%#JѾJCku;=_S.OuJLGX|Hv~j)>( Rv)N1P".1'0#li 2uD~/~NR 煎K֠.P$pp.~'ذӄr#SŢ&X8zS¯aFָ+1D Hoo.-n{wqA{wN'۟B$Ml-L񙻼L{DYX,}Pt0߅l+Ո0 80#<|J"%n+M!?`Sc}&,*ꂔ(~$~Ǫ|#!9^axk}hqQ]Rgn*fs/>S,z}M}F`V|ߔ8'OݥAqG/XTЏ,> J+ה8V18#cŋߖ",s~Z3Yi~GtlIIԫxk7a)#0QCe"dcվW[|BRh$$%q58&Z!1\Erv~ [ %?Gj܄x('=,.5 CRu; D)u%s gaOW~/!gmNCk쵕q#ӆP`>G$VMn[ėvJlgq/n>A*?n)bm}mhz+5NrXvKŦ€o։.>M,f(.k-:"E玉E?߮_#e8dF  -׾(8"fzD\?W C% faUM244T? +]%4Uy!ҏV>_*gFS44 C܉IW5mXFqljLFL#X'^#^Ln|dF:ޣTG*/R ㋏ NŠ-}Z&~~]0 K 8<.D@Y4%_D@U&,c`PgD嬾swa,XR,fCjJ@+9nkB+B'0+>/J:F%h-.;_,5NmŒ]>>;FbQY$CCCMw nhqYz._UrLcEj!ƚΊ̿*P#G⁶h߇IV>I#}ǧqHٱUR u{ !R"bE'(y6⋅">? #A"+L֚h uU'ؕ//*Yz-uNؖƉn X2nbedR6kkB>=4Eh^e,2w<|`&zֶ{а)Q*/,bxO YnuԤ:=4eҷ [! v=MXzD"K#uij;jKԱEK.3k{CД"+!$]bew3V 8Xc0S:? Z}h920lM]P@^!a>g&C&U/,(( iL(5%"N^N4 ~3CP <'U%]&TQ1@Aщs!I$&%:+\>ᆯH[Ej AQ$Zdױ ճ]G#d&7lc_-l*ebWP%v๠p$xL1ƢAMi OsԽ;G>JKi1FhDeoFYlQ޺%ɝ;߱Mit4AAٿ*@IDAT+ a:H؈W%ށ:L͐H2Hg(,;tW1\. .n:=#]rSc5k ;uJE)tR E H҆)N7|{]ŧOux |0 55?v'eJ̔w 3hOHd4 x؃iqqPrYzH阒✓2D3kqynG*w>A{ #l{ߣA]x#о`ŧu7?Pl9R޾-hlpX a)Li@_CCCJp&.A'h!w*@QYFăO( o Y{RrYFO%ش/[BV`#44Α~c~\~ϼYꄛbhFI~1MOM2r~X);_nYdrkO 3֢xGC\OF;W[J%>=ؘεg ~U~-ڏcV+>+Ν }>ugGee֘rKHw9p,>~@ cOHɢha"eՇI)_Z "-+tF2'[nPo-V.[:pwآ7\->TQv@|6=!vM!;EsCj?q~xYw)SI!KOCdQWh*a}SyduuH!eXgpOMګ-7aK=YzHYO=.;a"G!*+>C+C@hS+ͱhG*TNhsȴ@d~Fv{xy?mޱKShPKJOtgx)PHSR|I*.wzv:O9?O撛ϭ{-v^zSSn;FF!@F孍O |>bԹ'jk5:i!rauLxPDCPd!gfׯ'XŇrTkfګ!7LGiNȑ6+~ﯖ|HH{Dp+>CAP~i=\MN`B6s2@mfLCJ{ _|o?_K.7OAg:Ǫ)2΃q*9U[^[˱UgUy^ח^S*g놔[U֪ m]hsZJ2 YO~띒HW#KNѻ\CBP5}jqrXfo_˼A" )!KL|\axb1}@G2iiAiqю_ׯvʯ~d!%,=Dk.V|օ@ZB?>*u :֌)?CWd!k3)GR~)DUp#ysTp\s{3=X|2"oc4܆sjĩ1'"g$*>m]q 8k_HБq6F ߴ̜]JPh1-wl <]Ɩ YrH!Hq>N90mR`)>}(CGQY80 8`b 3UŇp*W~L|VcQUH Wx\)D^_FU ZnHsWq ڍoL3## 5196Ia |L;C^oDR\%tQ)0 Wq>t$%[N{F)nXW1,ϗN{ƥ%do77ݎ 7YT2?[T(9D  /(av]i>aZ z 07\<#P3xP0ыuzC=eqkʘfi1FdM= ȁP|zPZ#0#ЦSX/_iY8F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F]`u)3la/o„=Qf[)?u, /J./H{d)!I(ءQL٭ch8w]./iޙ]4BϽ7U)%Xp^S?%_{g۞B|`8/N+Wl߭@~&{/*y,K!+>5W<-dIXۇX{/xb!w^&5`#f~L3B'F>pl`[e5*/EHUcJ'(Q|ҳrne,)_ARO$H~s:CĜ;7&)d?'g}4on0)M/+w?oh-;{ݺo`ySO+-e~tFaawzMƯ܌F=˱\E/Pn嶗+-3;,wZۓmIDV#X5V8 uJPX&]uZsqC)Z0C&ׄ>Ve+{u7O}w/XDJ':E9LRѦMziwIiݖ;P/s{ 磚<gt*ST55Jp9)y랐yK}HtWR}eܯ#X$:"+ǮkЬHiаF~px!Ge̯rK*-I(Q,xK[[9pSt_ܑӽl(@ao2kmfW|/Ɩ}ZAas{ 91=$  kÕS&[gXZ[!s2囊&097'6ܑ=͞9j[s`YzJ({&OmaRt(8 #𵌛dqZgr}YOQ@ER]/'7SZ֔gwaDž&'mufNMy7XӴq0ls}CgڡOSii[oD}sLc\i&HCeٺ+4Cܝ\NGϚ_kǠ^prL̙Sc : o 8Q,1sUp؁DF<KvY5=cx*r6>H; 9׻Ixx?{f ~^yeE*/I;GXhR2ϗ*qMt{;Žt&c.@ Ϛzm[,G; L{608k?߃zMM,+`9I]Akڟx3FdM9Ӵj5wpoVP9AГ82FwSzնkP;PFVcW&-:ogGFZ3Lvo^DrDp$P=9ѳ >xiɧ e{tiN#p4ڶ. QyʰYh$]rKnM$0M O.W;MI} o/oJM&<)VڦZL[C_Nl)`.Ź!ԀNdϝ=`x`8' 8(鳏@,:2=:2UM=y4II꿢j:@=eSnj)b<;}|lC,JZp%: 'll8Jy2sLv"hv9QJ7<+B^va7^@YR{eUʞwI Xٶ=ᫍx!%9эv{ cG᫸xrCI#i7e_z2\FYf{촅v޸ly/ `[K [ץuюXYQ2Om-ԨٙwUcgHs BxHO=|~vj`t/˲k3!:ޛHO?ޡNڞ1P)T#F|[!uUH"k-!ln)=\oU"GxsUK݄gܟ큸 'sӃ{# 4PgQhd@5?RėWȚz  h^z7?R\N )멝4Vj@{`'4aBT{r|Um.K}%?sYJuB{K_E<8|Y:͹g*t&!rh߁]Ӟ%;TG?lOk!c{qXB7pEwaZC 1#NͶCsz3S"~ z {R}Enx0c?a qeʗ\edl숻 SUd] knxx ]V>ʙ`VNMPdl wS"0|E+o}Y$ #{| BG}v* ?~=D@t-|8--+~bqd 40oI^DC2"7С_t t`~Ejڣac1Tdq/;we<!n3x(G7-?vKŽBA_Qtg$ 8<6{Sw0qkQv'.ŗ+TW%A| >1J(.t> +yJJ~"hdcnV|>h݀M>_.ϗnĝN!N>cDOc>7{◽<.&CYòT2n(&txC7а]b] 󼙟T:Qx+r/(nawB=7sˠf5 }ad50qxx#'VَŮ!@5~g+A 8,dEtN>b|[4^5}P;~{y3CN7|睴C6u˽/ۯJ/k8s"^ ;ͥR|;heG88#޹ݞV^$Xm+,2#KSaQz q/Rs,ίkLo/Q[OγCBSu-6nVC5[7JXfD|ރ#9 b,w@AH=_Z9^\b۽{. ݬFJr|"J|/3#!I%QJXᓇ3{E1f[@p .'LaKXˁ2FLQlI1|\v:+6x}i.>ﶫМ{zu(0K s}ׅ"p٦ 4Rd|cu ͞!$%gQEG#EŪ)ܿJ< 뇼 ,4@7՘eMˈ~@8CgD65zi 7,_X:X9"z#M*FŇ5X1y?1{^ޡ#z9w>LfϞnxU [&=xc[>NIK1Ӷ/.+6AA(>b7󳦒$yB3~xf9a_XJM¹_<~[{j;mܴPZC?̙dn̠iP:N n1i SF>D\*KtC; /qpu@͵0q쑞`Qd9":sTJrs`Z |:=As,ݎyu/)8&\#`6 y'{j>B qVtK U~%P3إxdUBʥԌ×#я^y^3{}Ez,AtK-J)Tjy\I/9ÎU%"k/4E"~3dz}`ԥIRաZyMܯEg iå_cȡ/X)|R~"mq-*X(:vS4;CO%yݕuڴi)1e2:!HIͅ)d43Wm@{XѹXw@h&+X/ccj#uhxre'DRRAx!|φѐee!9#hgbXMѥB/hʣ" }3ۦ,U$>s:| ׍8J`ʌ#4?#v|6t4З]03f~ 9eqM (o(SEӱa`@(9M M_F P3q§Q,Wp98# #] `nT#`ᬌ##,]G4e2n*-Jw^f&72E[C0TMcJ`փ@X|  bɗTlxI3qF!1&5k2S̒t]kE"3+#DfQ|SCL͎DD.K@@`٨~wSI)эZb#4fS|.Dhѻ8*M3G(c>Xfa+/֍9-3,2@h6ŇXuUKA50Ҽ uь4]]tУ6%X]|l[m()#* t[DAT寁Y⫋=._uhRj8^cimHhD"1 iZͲO gzN,CIrkt]^ֲJsi\__ ~kdnTZij+>n=,KUO"nGN>pUVkJޚLx-xw),]~7f+ w`&aٓ>uӻ]\m{}!'lZ ;ef /]w X/ ]~ /7;|z Z/`yٙJn\Mǚ e؆2È̪-oxVPygf|'bs]mv4ce)0`AYxԿކ*pJ9ae N1n:nc\$hp/Rk!ywz+=JDq3=w3{g : DPnp.lw)4!FPPA+Bte.>y)#QBziǫE=3 OQj++f]V$47M742!k:;*V+|vX;艚RnkM7;~3/esXLZQ-up{ֽt1sJܛk{w}B{S?e"N3XA1Nz9HIye,A4{ӹٙW[ nbJC^h/Z*6“H57n'Y^kncQ|Ɣnt_u| vKAC~vfeOX ӈ [ ) i:!j.+j6SqyߥF;fcsE.S  L;8C?YA2֎9rkD]9Tn<ձyఞܼ1qgPnދXݻCJ^NwB+py޻@5w(G$p' &oV]Ty˞|TiQl'iy'onƾ2}Oc{HN='7=-Զ9X&J91.R|loop8k8X>,p.A{AA;Q/}v/0t 6&כ& J$k m`پ:)[݀lq#9,ӾZ&yE\!1aBc;ACTW)n2$RYxa5JpݝG8V/dО9'/[/'*)-l;'B%K/,@4nuyNZa:Q ;}h庫LRսѫzO o[Ⱦ¶VҘ+=eo!9dQB%XZU2PFU,,,*mL@r6@+>.*݈'kWW)l%`%.=FM_ױ6z^6/JI4{V=YІ#s?60_k?9<3G*8_8ǷUL0ih)C%MPi,-A֙as *l[] -'-=W@?>L)SK4yމ?#/KM:by{*2/.>ߚ AaP4dYSZwh`~vƳP8) nj)C "7,BYi 4a&C*sM>FGN5>2~7|zP2Vm  մ&a޵41*%2"sti:J[S)&\BP Оvz"%fkhf;9SE}]#~MCĻ.Ռ78,sʕ4,UWRBj3)1LumMkPa -N="jUfOxve1R͂簎r,K:uta 04Zt0PtmabrwE;J)Uv:. #ϩ7[Ap) BkwS"iwj#F1vuMatRIY8iC =3hmUMa%HUGL ӽ:z{c`('#[:xt-F]8`<۔)#I禹X R;A>8n::Vvǻ54|dF`6pԃgNF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`roMSܳzOHK)Baƌuo@̥gf_Ic;m~`䛟^. ~(#c֋sv ^N3F`h#⻳j:vu`|HH[Ra,^(9Vq2l[#f)BeOX=ڗ#꽿&s %c8oqgdg!bΝxPn'ňH2mH?@ ;K;m;ĬB/F`Z1-Uf %F&:Q#B9z3kjJ^~l~] 1M鶲>]sHJӶ\!ϗuEո,F |3I$u(9ԕTJ%stM{ ROKo-0#9-|xCye9)PD༑cQ~۾{s2Fv^S[s|]M0NM\L4_Kq455yχ{g{[EYӳ|ʖ׬4u0}F`@+>GiXwiJw-x.:R)$Fj zs5MA$9J6 ͞ QiӦ"3>H !2|!͓yȧeơlX ;9ɸ;* F%BIZI|lҤDkXqRJ.l Iֿl%Rr5qCZo*T cvd̥q(gkP"%U0+z'X.}XΓJHBvՄ{PfYB> J5M/ó0QU3O:tOaJW\TF`  J_yN;~Z'-iO{kzo3}k/iq( y/WycΙ ?fJBK`8Ľ똖=IY^yH ~t'}~ByڦuBi)W||)\顄&x\DmAt[ s5O<9ʏ:5?a*9^XaEvW̹Z,:ECjA9VKYGms OhnïsKo(0͋pZo ˴_BGwҷ.->2#4g8:NMR'LX3mڠ]ѯx |ԗMSEg8Abq\!1p08(2#[OJ뇣|V˫ҲA *dXZ e>2X2޿I^,QȈ,ߐ;rr֔)]V?SR %BgP9sURͽ`-j(8>qf{J5(C#My(T*f1V֕q/sagR#)WiXfm )׋&HF`V@)9EদډŁk@~$ePg +NN|2ލ$ui0R`WWX}"b7aenw14T5[׳nj)P9PR*Vywj3hb=Է1i6p:U#U`p&ϋކs/TEPm^j Z%s=4i)G_ yVExatnh#Y9n8eZ}&ICi#A~X2b_f?06X3(M$V5zi 7zLs!p^>; G$|;##0@F#M\fXX^)p,uy :JgBedpM4#b@Y0 ۠lC}ʹ.8Ng էVw6R[֡Ц3o9*%9湚`x+` L ۶"˶^ .塦2#8Д n_."fLtGt1Œhr!F`FhJE1cZ) .dF`@(>\J`PN:T.#0# MOla X#0#05#l{b 6]3+ {B Фټch =kV<6N D&JOPS\}$S<5V$;cx뒍0ujq#0fY'G/*B%SVMc/kLxlODtecAzdϑ_Aw MH+$c_,0r3qÏórZzgE[` ݕтkdnЪ#ga%`R }~oyVÂA,MLMn`܋&Jؓ,+F߮C\a3ukO f+e˱v<4c)7=w-HGzM0.-bI SnGN><O9flPw;՗FlxeȆzx ؋5#0@B,>.gzڭTEG8KjB6h:k!gHQG/tʗdOa 9矴,_ĸP΂g64ߗu":͔F*Dtħ28 u9:4͗!ĒqiՔxNN$}5׷"O'Tp/ʞt\Hm)YPzz~2ʸUzP͑FU}A;U"Z U=d&xRz`'eyuMr'%tuʆa s Fh43t:vI*@7!s}uŽ̜ݴ~ӼJk^ЀsOʇ]A;pi"=z4,AdI0‚}3PLu-rs)J犥=HIDATPuMA=iFcl]xGS /}ڎ-!NkXKޙefnRBZT 5ѧa-X^nN$weqڜbTb>S e5\|t2!kުޯq޼2J44VP܀62S6RpMR*)O.->2#?]!isGgq7!v( ( ɉugSB{нiRA}Bؼs MWc֐Ux~u`_Pkqoc 1nVs2{6JPnd{DU2i)GЦDj\M7?v%b3)ɩԲm[y!T-Kcymi'2QJ:A L4V'[+ m r}#0zuF٧}ӏ~h _T PtȒ VxCȬ3F>=nwDu\9( אR3; $?qF&aMz:ĖMV:EDVAO\1:}va%d𾩤ОLɴ!PʭY^եޠWJ} r"wHѰpZjxȥ5[%C aF K& PWNPlyތ|dFh?ᑙ.&L^fЌ*8_\aظe $(/U2* 'z9 4JNaL*]1.T>G8E_)vB742˚PRKXhRl ߶q((o )|5g_#4"rpnH.?<ڞ"M攑鎩p46tބ {Iɝ% aV:(]f 3ld .E-)p*8w,d`F]!*pħMD\%!nwj~2~"YM߁2VDmHO-n<EHanۊxQ fmF`F}!ТC] /Gd,lշ, b!)M$5/{׼c򫔗axTz+NoX+i8(^}1u; ?S2|`˓^⮭mJOtD3xwJ$qK`:|o(n)]iAH.}*,~ f;9S n cZִt(,ihKzo.8 WlyéSv e8F`/Nğ;|8 4@5fGYip=q:u+a!oc&?A<fp2@lr+{misnFb[`o더fCBa&, Kюgwl7̲9yTOA`.$wv5 .0Dx+o" HaGb  +*F0?/VQ&,nbW~TUI8]/^Ǖ8xz~\6q([ov=.ZCUǺ).F  p3&'[qW^=J[Bi%ް)GΥi?3tlY8 @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @{o `@`IIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowew2.svg0000644000175000017500000013544400000000000030474 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:10:04 +0000Canvas 1Layer 1Open vSwitch - Self-service NetworksNetwork Traffic Flow - East/West Scenario 2Compute NodeInstance 1Linux Bridgeqbr(1)(3)(2) OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(4)(8)(25)(9)(24)(6)(27)(7)(26)Self-service network 1VNI 101, 192.168.1.0/24Overlay network10.0.1.0/24VNI 101VNI 102(10)(23)Network NodeVNI 101VNI 102OVS Integration Bridgebr-intOVS Tunnel Bridgebr-tunRouter Namespaceqrouter(18)(11)(22)(13)(20)(12)(21)(16)(17)(15)(14)(19)Instance 2Linux Bridgeqbr(32)(30)(31)(5)(28)(29)Self-service network 2VNI 102, 192.168.2.0/24 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowns1.graffle0000644000175000017500000001352700000000000031304 0ustar00coreycorey00000000000000]SXyOւ8l6[d&U-@aYd>z,4Tac}Nw|{t%=>y⅃^ҏ{ߟ^;K*w7';;WWqwѱm=Ȳvvnnn{ɥ+89H0;ƶOp~˞g, A?t} A3tgv\zAM+ZT 4po~x:Rg0B?Dir}必wY;"TY>S2S*tgt1,zMpg|y7)6Q1Wb86Ɵ-|THbӆX m(_V5fGIMTŴ/e_pZr|;Eh20Z\\7aM<xؓ4~7}S 9ܤ}da}(PM6%?q_4FIz߂,_zpAҋhx_zAd~ޥ7oQjT@e "]_.Or41CZ:jSK|naRb5DF1"@? '\BmyUM|s\m4~Hq9kˣqr,z-Azp̖-iQ^8-]`טJ5Tʶ i{!j2^hx{V;IMkf&2WqәhX4FED[pP%-ad`2TxP%47F0#K/*&Ye+kcqqmS׳m/~y~/,rm իS9I3z5΁I/%$(^B<9'glk"HaFW(sz}&y45Y\$T> IY*)e21^8ϋp"/"I^jE"JCn: Ⓩ=A_d,QkHw;c޻7% ʟ>0r 2ц ?#5ny T7kV'AI٘b􉒄kG$ `rDA1"4VNE@v` J0.a0LhI%j(aYga3̸zV9\*cCnzhAM5#K&`&LONTK_ÓDhjz Rj%ƅ|`=mRLZe`ϤX"yI03U  єuhai F7ԼϽ(- VJH3KEt} |wVksD=vхpQVXŷoiWJ@o9Ll0e㇒k'5Z`>k-6 c3.-(,\k [ق"":?=MfSLp%gK;C G[\Y=Ү3?_P@|$4M f0:5Ij* Zc>db>՝+IZ2 P5%@[X@dn2scD„1`2y8mצ,er^ߴlrryy+I&Vj!!!!k;`¹k ʹa WfFP>mK |t'K4LugtD)fmžsz3t:iN56MxT0]s9pDq5zoaWIn^ J?'ʧjNLvm_ 1:I&0 (YSZ'r0.+(umZ!z׺6WW[&I+,DJAc[#=!rHgGg np m=4T#|~Ram!֟R9OP_s S0O5h=[n#rPCa"AZFKE j `.kkkyMprDrcVX$$6{ny^n}C -O=\bZ\I 222[Gf+2ctcM QZ&Y(&*f@oZ=lvL;m#ΐU<((}HQEG#LLLב 7 j{A 733mt|t`)%1&&EeH`DBlG B] 5d6dfV}w>Gΐ3]u:aFb5ic6rrsY5)5-,Γhm {mf[Juv ^V0qMK!!!uc6yZYi)zR睸S*Z n<-Ej,X$j`f<bQRp B9v\$Dqnj:OJ T VbxZROKid^i) c@F(2'a5'S6f֖KAҶnͪ-u%27`9Pz  E`]*\)%e \>uͅƆBbK[F t񅋥ʂaKM3A3'3F-AS1#MFnLͬ] x(4 PqU".s~k-V(*(ixU>Պ+ (Am]aUÙUTuV29U#}WYi8e$j3SЖii̹Dja;'yG".DGFZU+YX-1 %#>sWRXRVr\%fLYřpM@P|&26 Pn2>"VoVV_%ّc5Rg `RV"҄(;%RS,[ȱ/!.\mbDWGn?cȽiM67n!|c@to@`S-0.U O{xC*}L͊6N}a}U]@eu`˓ l&h#Pa Cc t!HHHH@wGA|ӨF$zn_vDJWp!T)GA,1ZD"HHHHk@G0^Gͮf֝l h@(((kFiE`YYJ-4TIL41z\1?0zߺD)t4GbHbcHlakrֈ6Nc;999rc2frpצ0|8гaBI hÀ hCk[9ڙcǁQŴpIAxm'P*P X4P9rKu3*`FMJ [1o] Y= w:ߓEEEEE]_ [Twb%RMLΜA]n*D̀t\qp/J6s0RFR՚Iu)qnږZ3}gvgYp 9dg,t?2R$h7I@WCgAގR@T.|3+|bdӏq!8y IRQR}3]=~ `i[|oy{Qڋ[`b<52pv&i0݈ #g 9vc+EX(avM"aږ<ܦYԴLdEPseBè4v+~d bmo'}o{\t+žfZI+cZ*Xϣ]z%n0a waNQ[8`Eeہ {$H20 qm«-3I0Kv-f wRnV4is\mHqr_5$$A$A$H|$ؚjpj4P84!kkkkkCSЭ[Kb F^KxG"b`# Ȳ0(rp񀍅P<}0z6d#5/Bo?9'A,pÙ́<8g3#`5vIwov]T3zwdIL+Fԑ>7 ~ݦ%Wܺ0Ү% a. YZz|%I#P L c-bP@KMҒ $Ls (: tte o; a~z`VKtT;N9Hf/v;ꅃ ދ>ն) Qw*)G} .xk@ZHk7וMwxV~I~&_UЃ1Lz9 Ӊ3ڀwVXo՚oWg҃jxQ~z]]Gp&}'[T]4N`8 8'X%tëF1q-E/a:y_cNwwnQr?l1n<,~kŠ ƻ(YF J);.,lL ^Dì&,ӲG!{5dV>·rp^΁9K?qs ~K~4ck㬜vZW a4mҡ",9"%|/JQFڦd(I/5*oA]7?@' n+'Y8-M#Cn<:. Sf6=pNg $˒߂F0%Q{gqdU*S*zxAøAm5;i[;z_) yWɮ>zy{w"_;ջxt׏'߿=e~r߀Y-ǡmV@⥵hlb>*VF['Ri"F-0ƌAm@zD< ?-f¡SV9U4_x;Aàv^-9R{S0(-EKPa8-Pv{.^^)cpjr×,Jo{U:Ggu“ȟdSgoQ_~  lW 5 2 1 2@IDATx]`TEy@B ł{={/$+6H6JIw?ѻW(6P@HK$n$͛o77`0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#`KbQ pe{6P=;}w/{]oܺPRJ0Mi;Du;Ce7F ۼ[ӳ=?`ָ{_#K Au:&=!!]6Mgꫯ6j]7*eʘQ? uBӤ*X MҌ裙Rl.ʙD#R]J5U3 h0M$SRB aLăo w?T/)E?r#Df]OʋѭTM+'^&[wln+)lsۼ.5/ʞIy-}PCsOJ$X JP/%WrǤ;8ZT_./SL?˷x>hOz/ %OJvy雚C.\ok՞XL7MAӴGgNH(TheӐ0L8o<) uߢ2}­PJ ב2:[wunMB-ĬiUkJ ɨ[7H=@d̻ YK dKA$)a6+;(cQiKƇh֛۟Fl#>鍆r_mU5-ꍎ\b*iT!LNIY-wnS/xޫt;84G_OѴ;!}\{FKr(-4/C(C';bmbb'O/@?*-'ݦלed{h?&ї&VN0'g|wjtsuٵ?k3gvQ 8GtӋ;)vA 2آj1%;!V<hJuIDuŜ7LQ Ӊ!%gb77~E6@$<}ѭ[;&ѭ-7^DD.)2HO?c|:6_{ƒzT%jXy!盝I~fpCibfr<)ƤdL:JḂ8ioU(3&{33<a/Fs3Ӟ Oi@L~9g ⿈< 3MJ5I<8ZxK5ș9@j,#>_S7oVjA!+O ufe2A[$PG&_@D=/;$8 hѶhsRDe/{YYP<\6 ב3 !&` *᧰~˟=AC`Bl2i|wuNxA=Ffd_|hEx~{ɞǞ򍍘RzՙoPf|ʣvV]Pum_#eN49>J4ICF{OwlQ۫K7rnш{8$N}?;//smL]n ͭ]I 5!Fx=iSnE>fe<`%DZwǯ 9]jL8dvL/m~HzVІ3ŗs 0pNpN6kkw G$>=}BQ@Qb|(ElުkmK(eDȘxPPzW T~({GWpϬm"H[ՊT46Oh@¬kt[j$r ~9"#+3u.¾O99%#\~Ҫ'ߟ>B@vA~^G>PG OOS;T)Uzy-⿈tF.N_|w0FjtVԾtj.}>A9N!EʾԡuVyPW@t:!.7璉Frb9\D?WGdd_0 D¡\%~Lߦ^0tT z nws,[R|OYX>+aeS}lC vAxtAGAQg`x'T*>yi^,\,NDGݦe4j2&ff~& |&5 K\#RAIŨ]a_qjk)&EH .,pm[@n”G/h?z={e !ݚr+U6m v} y }875́nukWvӊ&7=T{iʾU㨾ҔwiR53+ ۟o~p~˭pesxN-^Sw2U?i'[o7n헒}2Y2'B}|EXۋM~)ո&\908$۵m09c4IԹ(ǯu$Llg\t?@$FH}ϦgR$ܝ]I@ͭIuGneh"TK@mP%/Λ}O=eҭ>+OR3009wLW}9)_*TL&oü;aj䱬иmh{;SBc{v>WN43VOͥ|ʡ>ݙ/)29vڥ1;.{MuCKH*Eq ;;&WBuC\,\ER8}-]^gch~c&^z'd_g۲ˁ g!&B]?s+.LDUB 9v'I\8 tk:0?bޚֆ $aUy*z/m9Ҹ즒iΙ8 hvymOaۥk,KϪDO-RL´i %؁T .fջ),wMsUYi!Cd+UYͥ-;rYR^qO}!K1c)!;iH(Sθ ȧCd`Dfj%a,MH0} bMt\A!E͉3/{uasiC#o_Q˾Pe jt 07[Gwcn fVޫC.)2UK`,rF&.N%w8 xar+_w2} Fgk^])^ /tO(^wnVf?_ǶgFlBD{1^p]ʋzR4CT6tTvmoW0_Kd'mWiE )nh:F4[=0? 9?v=\ï C>=P%yj%Іi+Eh\ }pgcO[h 3s4ҋx(_2i<`$gH@aX`*7hBB|易VoZ붞HhAgWDV-+I!n&_8qwwz:Jw],VǓdga=Ġ$=qҎ}9Eh"NݨsuwmʑiDPٮo{E.)2J?*~dS7*I:/"Kj&¶:BNpg406%Xjnt)} 5; =߄Ԡ.k4%oJ{n-BE4cobƩҠϥ7 =ni)燓_%ew'u'tV"Nwi\oW 8YvǟzI P\PLaux#24Hr;eaj7/"D\dtB1 Ѳ+vӳ^ _G37#] ϒ~-wf,Xя&Ƹ Ll!ϸ"O"6D3Cf΢{]]co-<΂2` ۅ(zuʛh5qqc)Ft+Fed i$Pz$\I+^+8k+m7a2L>ߡtw&Z& Uh:$DSer]MĴ]Nc`ɖ3U{ߚ2Fc/86޷_hhwt%!MW1nFTࣦ(E^u Lܐ?"lu'* jӊL+r0hGOKϭj qPoʖ;w|^qL##\m ]Z}J<DŽ<,+K,jG4l ¤m_pH~U$+uOa_u 7PC? 8:4$|D֤ﴊa V/xŸD|Ȥ㰽nLm'Qܺy~Y~m]^uUu(M4>Y`unuyݭе X>U<[ P|wb܂Q$."쩤7ڞ>( ~?/C. SwvݏQHa {;Ɵ1ZAZ$=숾mvX;xm!NɎ%¾EZ/|~Utb*3B`"t(lS6k/Y|dk9gAKnMp2߾$۽JYD݇n"}w* DZd`A:Z#>Ļхq4 >J^ ryGsCA=^ O{Hӏ|%jG6ZtFՄS/F\fYx"W(`iNp~{j2ѓN L4z Sn$ m)UjpE'j瓘++]AjDF*k\CkzФΣ"и%JƵ4X0 _1F n-(>Js@c.N3!yU`xb|[ymL}/74Bڕ/8}0mt]x'U&tk_%H&T>7xJ#l1}QKo)ėP#%j\MaiBUiG1yh/kpp6C[<Ʋ8XX簕A&ߤvޜO4$Կl{Sg]SC)8F{1Tg0;v#|_* *ȯƾFa߭tSG(->|zeTDʨGi?zNDI.ʘM61:7Nڳ)>ݍP7phEWby,BGA#Wqk<1{[[dܕKW5*He?AY50 #/4q ,ݝ/!I/2#E^c)& tdVnz8۰2h{3^^0LNcBB=JM} m˟棬3:W!E ㆘@U[|L4p}5Ϛ T E߄A^e/3 ̤t'M ڸLz%nATmh{Fi;u |ʐF%x~51gF}ef? |Sg7N@gX++y;lk{rE38NGƢl-ؿDw lw9*b,]Qh{qʩl'`+@]W!HpD?'ݭHz?𼄶p}|c>: 8rXۘPG8"ViF'[E]u=V'VǠoAcY.)Rpf`& =m;Vj Q3u/lŧGE^, x_7$l{{k+V;1xEXÐhLy\i[iF-qc*WAϚ ^,Ð?TXII7?7-Ϻ7VͭE}MjѦvSRPܯh!F9;L<==e %ENiN_7!r3#G;8ޓ=PE'fbV+aeM_K6\4 |Ff 1Lc..&/ӋŗY3te{9õ|JP¡LGKSnzYטBt]~mjO=IK>ŧӤ&nWwnS/ -4 Mٱ+q^{F&||ix0wfeϿn_G~rk'i{/*Ϩ)l] 2_~7s'5~s ,r5nQz3o׳O;@nHx)1Wڙh0 |S;Mhjܕn)⋱o-gD.Vvюxff<}7g"a=X*4L %حQǨA.'Gc%nfLWO0~.HfA R^s`|^z)ea8$L,9}y瞲/&HkeȞ&V oo>"#k&K/j*kfezi5 IӿDcxtԈ7B͊{t9 E/ȇcIs,:>Ċ\Jvv7QfaE|]X`d& A^?řVsHOw h鼆o&m~7O&)ck&6΁7h.GiS[ w ޔod?|^ tNҶ3w]L۶d S wy\_c~|=yDCէIެr.0'R~J?~'4֮& ׫R&;O9;V Wp~6lInX'Mj:X#f"nXos0W:}Z;AA{BgFfdjYޣk(u/_ %eԲ[ ̡&#ߥtݓ[! [G;Xu~ ]R:11}ʓ"OPI'WMY9P[_V6h< ?% @2OK?faeeS曏фVr/˅)Vr`!: gDŅl /_GE?tYkv}';PX2a( I}db$tДZCh[9߄%qĻo~ 0RvBS$,z !K"a&X?TqL#ɿ]{^h&;~:HMӟ0QX UuTg8w=܃jR3rh+{gZ׵S&G^Bֹ!]WR=M@Im{/О yoaIeJB''+ceMoa`~gn% Thjc0S̵~nǫkxbu%s!~ IR@}֗m8Ðc\p|~ f5SD*t{FzSWmϩ\EgR \Cv:RCm?y#X/79Rru%OLrY\ymE}%1}T#Yb3ѵ1}Dkap'м48N9+1^L0U~xB={NAa2HR!%4tpdu [oK]yerf.uV3Mv Nw(|^aT&qw8 G7MM-Sq#}[t,gi2M)l@|ߓO*}0Mu0}zo_L]R ((+HKu}9Q1.TFح^4Po: {WptC3{}nb2VmwU6,EVsPOMc]R<`JF{IlVjLA^e),A~Kg^D78HC j\dh xv^ Y`Lb$!X)*AK&ܪVh+=:]"5mBc1tfU}>.XYZ~RҞFs ^M$9XnLX߂ƍt'tߘxd 7CGσdnNH=-_( F>Ll(Qt"ΠcBIXN Y0ޖ}4cGAxnd}І:0F}8g^Ȣ7i/L(w*$80WVaC1}"R%Ŏܧ~A-߿|O{"}1S+PR~~J]bB +ޔcp?Jb;f XG U!QE`2({oBJyy z\(H, Qʍ23._&UƅBcU7ս#ALLcyLIaXjk6pmSx)H 0 )`7Ϗ8F>߁s8u)S)O0^)_mdfE!_ayehI7Axm'eW*|BZߺPA `KǸ^-㧽1(S~}W| V$5M6qAcCY/9/]  ?9x DǚǓIT1xwZ5䫝;vm*o6n3L͘-/ą<1#JNH 돢gIVK fJn1QS'j"qKW5 0!Pfj6Xo”hL70r"Zo#.x!:Pn~d 0 %εZ1@b4tֈp0ݴS>I̞=ۅ^gG ܡ *ZtZtN^ĊKnٕy#V;뻉(u_p5󺔩kFP{\Rt%z՞ Nt\vmm#ԥY7אõ[wrQ~']A[pqAܺQ6 /j>hYL6ٺ^z+5?eCk{rʛmzٚ HH0NK\-*+Xֽ듯pT^S;s?Yf~*G̜ qk/pw]\.\0x΢/BCPzz.Wsĭymq H}pj{t\\P e}6?$J46ݳXG7J7ݕ+d˽pħ83ʅ`A0]V]LrƸ9m+~xkV?] .c r;fї~7 ^ەoy 7BMS`@5>5vl>w. "? ceum5K7`?zS] i9_2|#_~Ѳww l Ƶy{*M$4l:$)GSŦ>U2,liG9+3((4kK\YJP3Խ3~45󺔩uڒ„f`tJT$N:S7_ՠGNԥ<ؒ4b2WC5Fp\֚ypp! 廛q]L1+v6-\&tqX0&|>juJ!x,bt9G Z/1^mf0eWo:aKugӽۅӲkáN;ngt7scY842 f>O~C䝾n՟٬uc|ӑ."}ئpԣU5f[~ @G\2ٓd <;:wr{ג^D(Z[,9Rt37~=]{y-HCzZ~DŪ/71Ɯ#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#dL1qa¹ ߑ+#0@D@ G*E[\? Q)f{E?N!=BPr3F$QęeF`tUJhB w[Ir Of\B%R~+ZތwF[0#(5qb{ȼP)u~a1q,P#tJFֽOH~,lW:#0@HM:=Bj⁼L('j@U_ʙzJ rq^u&F`4lg]4C=Ͼk҅EmO} ܬk F`F 4`2՛`?4NvբA@g*`Fi#` r ~*xI.t.*-E>R\F`F 0X'A{fn"_coF`C |K1bOӗ4;#0QA%O E&R3~.s%#0@M!^RqkMA/B"fHǗDX`fCKAf Ci*t>h=lF`z#@W#261D bƏs{?ĤFh/ᏸŸ!q<#0#D fB cҳx_^6|M?4|ʜ"#DEh~{C^<wGx)YquOBgeДPI|$:fdFh?'NNNn@ "="=kD󲽃"{U+0 ^/q=xBC_ѤBG{4%w55xw?0RbHjFe#d,ŽqOAn4=᥎׹xo֑5L=LБb6vK&Nz{zw} 01ELeai3+1{/7I[3!$?Ҁ޸3b9ڳӦ8E&G.c^jb!w 'IK@0H%={>]NCʝt'$ Hσ|BؙkLx¼_jOؿ^ߌ'_ 3ὙÑt|Xq>0续Sv0]sQu#2rΐS?0-M¬()w(eD#_6ʜla'#cNNBuTΡ>:O&!K&ox ߨGN`J xj.'`+}k`bP* ρcP " ]{&&6Lha +  ClW5e)M'5=r]/}n LZ݄Nt' v仞~:tKYYU gs>,rޠL55y9LI.U:E ڬ,o`}ύܹiNE'`ʤIm^x}ai{B!&VL4ʱF `\iѾiG!zUwpFD T`@J@IDAT( NSv901AL`e! -,ypTg{ #UO—бPWJnM*wRb/VMi+­͛K_Pm0=ØIw)+`m |!taɞp naC[IEy,1{౛l H&`b@Mla@9A]^;Y!XwrqQ{s:ådL: 壡=Zv0.r2Xpr,8=B\0*=GjF$ihRNNL:i蜼\FeW`뻉քǭLL!ΰ 3n"D_F ?p2KM>=4J.)%t+miq|7gfmĿA be= +Liӱ"]5ҫ?Ԍ .g~lS!օ؇9$gLPˌRyD!M0}d;p4~|~1H-LlOD4x̶H Jh' 隖9n>Jٺg_iȹ$茣oK)9L;n=兴\*Pz&-mku$(_[=<{npǝ5ftߘA?\Tؾ]-~xOtb9cA ᜿O݁g8d}87t9˯'j$Yl2! ?"=!ّ`bXZ&b.#0@EZ.#0#Pꯊ 4S-qV"f3\\:!@?6#4YXd 0#TEUL؅`Fh0oUcF`"*&0#4Y7٪1#0U`_vaF`,lrF`0㯊 0#0MF{Ϩ E/|Ҵ;>3غR57Ws u.|/}Fe06HI#SXScJӬF}[49oFWNjak$[x= GG!Z$@=ԷdT,^/~\_WlxU=>Ҕ@ď'!6vjS&cq۟1ǓFJ"fN?V`WDX ѥ]2Pإͼ%h#C{ lږTX!<*rg &)le5-.~C.<&IC𓠢6xZ. Yxfsҟ& X@g&;Ey<9yH?wsfܦbjG9HM側P>4Z[Ɵkx?k;qŧ6gfUEuNinƑGl_o9"\{x@Ɣ]ګ-f ~U+)nyH<yJӌ.4_sΉ.^ׯnםwf*q3*bD{ ^,nSġj1sOtJuqt'b<=#kFܺHPj__spBa=$ly'5$0W~T^sbN1k8vkND"qm*f 7GhLQJu(?Y#7o< <8g.]B[לfܦYr1Ҫ]۳x҈km_#{F>\γ/l7c떉Js{>ܦ݂8"#PkL-5W/42-?%4 [ig]d͹os喖Ų5"0Q_(.- oW ~ǎGh@ 6Qx(ʏ[5ޏ~7?5[v[vQ-+Kjt(Q˾\LTЏ%6gz22%7o'5=%oו!Hc#@c&19',b/%UBEGp}ܡhlp:JYڝ'crfjqݦf7\;8z!GN (ä/QWJUriɱ׉O< gU]gMQ04*B63ƿpzeb`~-wU >WaqP[-]\|⿟ƟIxGv1m*NlQ[޾{/B!7#qD+)vO ]7+.MtO9/Twx}ΏbמB /4o+Yt)?KWo~tx/A_$C*S'KJL/z 9u kC^ nhﯗfMFb<4gE1%ךRjYmo7}cw}B) +ݵc'8 %:M hB@f"ky竟:B Nui|Ѡ@4Xo`vl-w@bܓrN=gz6ucw &g .,C,D1}?w.ZO^Es[aRm:Z4` Y1 i% ^"PD_/]%<~n˙WMć.\5p|o2Ҵ%TwO]]&hU4taHoP-䏩4Bkʷ :-sғqUXv޹%?3LjTsY^I !t]{; `sZRC<"V\؆}sOF`"o{ !ޢkI怎m).Bv.#]{G[_uha$WIP#s,_RsӴ! Bu4#poʫ\0Y97aO[xK,B(҉=d%a^\#x[W0 ŋo}K.wC+J>N+f( R)6qRq)^`^XU~9'[i=- wZ9ݑW):o0U`3Z _TcB+2eȿXhߞNϓDdחyOxƜ,To1y=r}yK?o IJJu1>AF:4qlCpx”r)'8TaoD /+c(=#0$s=mn{ր{yTDo4)j! ?FcJM-d-[Ieo,'?R (ƯnK;Dh$&nT#+*٤սĿu R\RVoQ~hlq'Y;4r2k2}-n4AiLYlբx#2$?2o+A4=аv2}X__7t:4`F1"@{`'ԤG:\G i;b8Kӧ|s<М')2mF`4IwVeEB+$#?s#/ q>a373\vF`F T2`F`o#OFدݘ/L~'4u71:^GWa"}ŗ3@t4N W#::4]Ad5&su5]Rq'QWF^c E~}/Z>Sm3q>/Xk6n([,lFqLFKp}OSQ,bNsGob O&\͆`"CWDZF /-+ݚGnt=zu)k۝#P7 /0"@_\rN$9jӾ e>g0Etdj#P վEb'RhF}jo]O|n%#@.}7'ky_OwEb|&ׅOKOd׳"$F }h8~]hrXFfc!F F?_a#@E00 /z\&ʩ1^7gFh^?piUm|ٴvr!nSv]TRƒfi ĺMRP.,"]}kOLp㲒$ѡmЧ.#(4o]wHC ]UKV .a+Q@s XX&Ц2Ӊ@cvxQ͖SI/,_؋[V]%Z aWT%E sp*ot:pLoP"ُg?-{bԞMEebٚbͦmťeDD6 \{`&lxϵyhf~Z͞3wzIvWv(r['v;żhB9@V&/yJ\|@q#k! jKě, 4A$۔ -kشm(r<PVh(͢Ow/!nD1_O}:3DQ 5HErKh-5{@X< a+ ;߯c߾ 0,<=Ҥc)#үKEDc~E?c=ƒ =튾"D_mp CrL}ٟ' &tGa_J>"?H6ËZ|({ N,^QɄM3e ѧGG|`qL"5*(/)xR g u4'@i/NJ8Z.+z(Z[+|p$uwx}qH.)R Nx,>aeuag?f)-o5$>m"#p[RD1ؒCjm{Tt3̬H:RRG@0m&gB/nW<<~*9 L-_ڒy<'-hV+h/!q,n/+ti4vxuQ.Iң-R0&+iɚe)?v: wGwO9G2~͕\W-CYװ*]D"]Q5&S6Ee$Gk7[}1 ~{DKzQ9cvx&76,g46fI+FT;WXeiDoYסM$)[yuT7ccH -q/)Cz8Ǔbƀ@e~b sUçj* 3v*0|U[/(7?{fqG]:]?w}J3CH $!I(%yTB t{wNͷuw:_Ү3j<~1 <_ gȣ&XTE]'ݣ3y|vӢ[&AHF'7C2CCEh84GǓ'C*.c=KY&ϥoO{ş84I@v(o)x8_X~V U&x7~mڳ)^eE<-A, z {xEksvD ,E4Jsn.ż.)M'RRхE:ohl˿-~|Z_0Ъ>fv޾V3(Ǯql=bg#i Cy &"$0bX˔|Ǯe/:~uƯUАr#*2 Qt!@+Otn%^Jj(Xj_F6}|taI|VKQ/ɾV+SEKt׼& oRvْ)Ի vw"Y;<4>'|Tk<4o t&:>3.V)6BC!>Z?]O{h %Dc?rv Hq5@/muW'l#ern94;CuPe8a٣RǓ=CmtO6 F/tjቇɴ&>{_,a4uqp릀00Ւ:T|9wgd /. Z%d$m\B ,晭ۗ .4;y:Lh%8ex-҃NleRh>MVm(A< 7(L/UW8R>|J.WA4z4@&e!C?WSG1KYџpn)q#Tɔuq8ᰋu!K.p۹YtVEZ%+]Lf!e&m Rs[D}n/uiW2uUWt:]{/"׫,nDu/a.g1 Z^]out4k;\#Uu::%F](8RWr;t#'GuKK+N+x=?5oٹGu/wu&S/+x6ӥBzB)alV!{EPB[4Ghc2klD߯t_ViLob)7-h~L*Qy!U^c̆P*)p~v^Dy4'Z  HmISQ:kq#wh ^d VY=EL&e*7E(~9vr9 9X/;ؓ SڬV 3Ń{\ gOJ{Uen,OD[9^ ]#mߡv P?N7-xn`{|Iݣ?2u5U"b8H|ϴLA-~NRK3){vױe~<}48͑a|2MmŇ0h63y 93!J 3;ՑB ~?V*׸%M%@<.eC`Be }&ҷz"xu2i=Wx>x82ѿ/}u0ʧ0F%@H])NM){,ȡ}^iGՇE@K7I;]%(~ ]tI9 >G$pm؋! 3 ŮzM֮iú@kIbC6xĨ),b~Umŏ: |0ehպdLۃ,I KT>?µ4yP99ldh~Yů_7cW!uKt.DyڹWHgG2<].TH-Tr=ps2VI!׊?\H9nnb^J+aʇ/@Բ7~@).ʊR*--5}ѡw\O0INЏ PLtsӏwF-7]TW}y#[712Y9aTp1藼$n\y!jҰbQwz'pLO(6;/t W;ef? qoߥA]}|Al|&6/:жF +)͋pԷhj?; @R*+㭴JWDæ;kE;r>ȣn=84yEn})O69@^O`3+NPrqRSJilw|.2 8yѸA#ݠ?#]0Q Dh媈fC ayZ4]7($#S!64Wag_$&s?"GCZZL\[\A^l*6|` d{*y~|"\A3 oDR 7{Wbb`,^~[ܝ"b*~2Xa>^I|+3vbs; kjӯ.xaYy`%m 5'\SZxVZ4g$9Xe{yА + U*DaY[1u|k]=R*S$Z][h,O4%k$KLOXv{@a&?(}X-f9< zdc^nv+3J,~bPCύ=I[Z%y 1ۙ\cyc!yb9:nIlųLܣqIzk>ԘF(X,s>a1*fzpA qvoaZ5:՚XsiCMg|qKJݸ0=aX7wwQVTσ%G09&&ANRFVc.^&Mnf[A*ZanG!,} 1*/+ G&UqYWV-ͧUԫ[02++ jdݼ<^ )9S2UP|POXP‹&6I4.HTV{pMr({zNcJe #Wز񩐫lŷ+9B]g!CAHޏdž8=({8% '{3" X@5 +}G4.wB^JAK O>fJsiOG /!{f;@' Tt!IGXB> 7>߿zI(PH(,RpsףR[$m=T^}Dí>}T7MKޘ.ada w$!}?6ْl+p4>Rg-˸_* ly>rx)jFrh wwclšV{=[ҽ/^S| <T'KbfGQMn ⷬS~âs^瓅VrΠ|^[Ìs~Ϲw̾(, BwJ?l*vG-mLKIJ$ۑzтgZ[1}Ub/-2efJXfZdFhvEoX^2 C{C> 1(m^eO&ɫTZ4-Ɓ|&{ISd"8OyZEB=[L|4;:eݲE77PR(I*cד("((ҷ-/xo)gQXQIBcꇥ"eX/LG%6P*-Ge/pJ?لV9 読{ 6nNۑΑڕiJLLU6d+ِ4IF"C+A\Dm7@[~P_Gl$L'|3+Nvr޼7h:װwۦW}jƻLYZG H闯/WcLE2iPle.GIVH *4ZX(\ <7aܴ_XV?/p.şvb,Þk6r%$S2R6`yA/a8kBI 9=/^6>ҝ#;-Rsߒ!qS?Cߦ/ &uI!'??J;*iH $/k .>8 #33L10q(@P(@CTw". |6<=5  -߇ 6q IgMseLU EQ"ѿ.:'NL:ű'G~vjHs_CO,DB{)~)d%94r{#'5f豎 Qj6[b85|<]~;vm^ynmġڷFOZ(̪HJ$jYܶ}6K|Xn^_w>ПX >ܸG/]җּ;7p6yL^% %Ksٿ_>+)+F f}m>r'6Xlv爙߅Tt`\գlR:\[y<{I>eAJ_Fi⇥_>.)+Du`CsÒRq+xGj^ͳ63uJ9^?CE ' &o(6XPR |g6EBa.Pp/`E(BMR(*TP|keļGO󖪚9W_7˧ .IL\+@@ |tY;}Oxm U?zV@! `[yJ_@gD&rF džk5 pR2AUhu,]'ۿ3o~q$|6/UD@_w@IDAT+VK_}* ,RJP2u,>M Wϼ 6y H%#+J&?,Y(zlʗMb/7`#7y.oHcH6dD!T UQPdKqH<@po6VZ}C uGoQ;"K_B(ˆ$~Wa%B&# S2?fCɦoM<8Mw"l> 9Op?}YiGW"d*QsoOx>ƅ`WQG6.tF0 J B@!HV{葄_L;JbB@!h}hfCf7׹˝RwrB@!v|k{p 7PW!N;~( @Xc2p_<K]!ڠU/V(l'/k!˛btR: EB@!u9/4Ooؿ鋹SѯG!Pdy;!I/I$˺+ů,R* B($8:GyxXJ" B ~- =#9 _j}~(  @!`6A~<1ZwL(CJh) |u_!h mAV=[H)_HxU(Da?w)BG]S( Qัnԃ)C@)A^P(Z By._ >{(ş=՗ @#s?A+ůLP$( |E`Xg..喢U!P΢xھVAv~lۀc!U?T1P(; ^22IQͫ, BP0A>VYA@)>P( V4$j B`0 '5*ů#JguMB@!P@ . uɪYT(]Y{ܳ7s `kk.8Pe)N6mIyY2'jrgS,ӗtp2%9"6il@(^>RT/R^y%M^ =</i Yd(Ɋt"4P,S@'~*^e4OS%oWt+ 0[L~,@"{`XTEWw@}ckm4M}~zrnM_` y=IfQ^L_|CQ 2$yL,{?Ayj/E4z~1} WKUF` 0`ܰ Ev)iu{hôr󞾛vY?a7o3/\9ZH nߺ`*Si;^LMx?yjwϚֻ-d{gC3] . ~ʳi~8B@ ?:{0lѿ+n}?~_B2oeliw82O*S JQ>Q:}gݣ?4`P#ggC ߽c&VI}0w||3A^Xs~~Λ7{!6v`lTb4TmB]:QY Hn?nH+zV۾W_rcEEy* yr rST\_7~̚V qr.>&LōЖ)ɨtŠW;@.qg\4^sv{_VpB2 y}r3w9PGU26֫ܧ#+x`T:`.wL SD݌:,"_h2-Cɛ qIỲ|斫1rrsN7 1)R;`,TbDTJ:ۄu4$ D>h9VG@@_X(6FTU~6" UU~RHV(K<;*S#D)LC4vkҬX-wCM 妯|c& K_WV4##C  Z^LY #Ѩ. 2ߞ,,ˉ.QIg|@ry=b~uYY\ORWxWe*׳T?2,(par#`dHoh|KB Y G%[b%&ɲ}>TB ;xLi!Rlj" ѨP%/y~S=::ͬ}Go=f< h#f}!P^4,>LZpw7/zL(K;0[ N@/wZ $%``qu~W=<12i\>X[d2iĠkΥ>u`V}78OTmϢgXFw]4w]7,:Pz}:[6 =ɟݯܼ>qC;|'sFJ4nٛ_bFp=G©e )BTTM5.˹=o ^xw%;UϲBԫntM;Wء{czg&zM Jx'5z0Ł4_Uk~m"4Dx yi;iޣ.HFw2(!ZQd1Y&`hQud/NR\ևl§ c%%?SZG,*eaGɀRFAeiҨ^A=&OJ>>yBכ˨i:V&M]0MwWoI#mLDŽ-kFLVlNŏo.]-1\!}$g+f Cm!gj>%ϵy}ڇkDfvwp Wlք  XRΰפ1n  ;ا..撍B_Ċ'G؝nhNN+}*K{JXVrjewȲl.zc᱃w :(ɮdmo%oq?kۭ'u|ȕHSxy!'*P/9L_@mc Bjz}=JPƨp)V$T>ęͭ{ewg"D ݇O O%'޹z~>vxK@4W{YI]z@Sn1pϏV0C'Bk@74k$xdo݇I#:_4HE M􏹋i֔t{@Bn u%k܅kiC!ݯJ2f}VUP17⻗1y+ Ν@6rõʩIh7 A/Z4D<*Q,~}➧-%+rIN7*{-s_}vԼ%[*]_&ePPߗ;:qW|7\ZF@n^0G0?:.=r.,JVĘ5vC$+C39ӕc%-]6shPq̱?i !>>w|]iنJ=^lh-eO6uVNg$6̚2C`40@ϡ6mD|HZmqܬFs\Ι4HĘ~߿n 9j\\ed&@#5q?:M=X_j3n]7q= lb=n|CqGL(2F+]w8 K=lkңXe?Og+MA+!Xr?" PA#.!E!_o%Ϙ%AcOMrjl=:pam#v:3B7g|wvЙkk>!.OΓB83Y7ŠGLwnt15XX{3@qjq#e1{pMXT^{>)8 ƿ [ #UL O*1 i>A=C';$Qʝ/nHX*2Vgq;ۯ$lGrӹBݞ[b# ~xF 7W)VPea]yvߝkE\ q(ԙ`˄.6#+;]+>r oqKu\)V9b(й\ s0woϛD!WpDl m:>n n7$ąZʚxj uH+N[uƌ )>>B _}io/D*+// ocdB>,TDK7? 1*E(nY]Gϊh?hhnn5菷9* xӘ!] n1ƛ7E0%`Ȕ j W]A.C`7 C^ s˹ . ; C#B'{xtưA%'Ǭw%k߄e]{t ;܂ W:Q;CxHᎾ߾_`Z ȏ8"huһ <2ÖGTPO0!@&D7W})g󓏾92>w|Hӎ@&e?\{$?3;7y4uק. n?|Dr=Sm qXFs"g/zqDҽ;d!+Y߯d9DWF\4mT400 F4|GhWMOPgg>Wn<1~OO&IQ~_{ <{kñCD\]^GD?s}/ہf3k 0v9K&9՝B HW? p/?'sVzw~5$^.t{.V_bwouO4/CnD}Yx!,>wa8JB (Y;<313o!Rİ x`R/=-V}@M{Rn#1Uof8ƸN)R3Bґ$t q[&K#կnjP4) \Cg@;@,8x/sBW)~8}WCSI!Hwm=~雼fR)ZMFJj ުV3=\U_,HGO'zF! CIAR!=J:i  x=agǟ<`ӿy2\~51y%R&~,\ [4^q?v"ypU!D X3;ɈF8a,]4+rMXJ^Hs sc֢J$\ h bm-6wwcH\$R2dXoyr}yԯ*r hZy職ZhD&wt]F̻%ǒXT[?ˁ|V=B _pw/r`^|M_ޭ<B9N#-F'N'PO\~=څM~O:8u5 h~{$aâ#ؗs6nt͂9FF[e.c?w?]x5.kFXsYXүplE_7(W7 BxWGeC-۫dVZ#Aeagb@!P>rӟ4JRV\]~ja۔?kЫ+ !%u>@+K_98EJ'zR!P m }zkWdޅ5kdif  \ B{xG}a0a__~<:P?*3yZچoncnO ETUë=~Q8SnsNX?#7[ +6G0)h"@F xa + b]6s\Oo(d'FMX0`'N~2u+_&SdEy*ac{\6]Zt(ߟUD_lK2X. =etP@16{5NVCdK*Im o4rOWV|0Ě4zf^yKuʋa1twBmcW+;7+G~B)m}˹Juth,`IgR8}.OYL.}+71W OgRd TP~c+O U4z=РdY/S躞~f_:]X; *6y_^kg"QtĆT[i?7 U:'TB;ϧsǭb+Qݖ'ɛ~Sj#u4_A>PP,[pEB鿶p= 4o)lDf*иgZJ( n}%JK d_08(f^wVP:Oп/#T'D?}[BϾQBKʔ>y½K_/ K_?VOoy+G7S+dPpw>}w1VTHG!{F Vm6h{=~hKȻ mV1m.ICN9ȗy,tbTW]=APeJO0#_&Itv/h`*OTZ-ʼnc.t7ٍ f;OA9D5`ăTO@PeJOOm6TyC Ƽ$3?&(dRLi.$)Q U@9\XY9T(?Cts)?Qs#P2pΥB]%Z##…5MJhJOT/=Y{r-\=MLaTTȔ,_&!asƗ^5KNŕ#iE?SUK@;?DB'NK={Ӛ7]>N58{?Ls%tꂵFYpuuB!E5seةz*r&ѐX4Zm?r9tX--ZCx_?F̴ӵ4J͹O_۟/OHj9NI%@ [şyET5hmz;36z)Tw M:s.Ҁ17ґm/ BHD>\!yדlm{oػ.;{<9U'9Q4qD5mu(|O%YZ Tìɣ +t(}xO|Jtżvg@ M㠻!j~REzOf[ ܑ-9@Ma#YT|mbl)0|ݓ4|7Vw$=yG҆E `B f klVn+OFmN팭' Jgj!cwx[]h{7'&6be|-VZ‚Lг z(e~FԼ6<^KiڸhMrm}OFYf"=Ԑ)yZ~]k~b>FVhhnPK7jҼ#s%QөᓻW=Fc?Mc/yttkTlpr#9+QDdoe)?xK3޺7|z*+&k61U^,rjC25_:QG!t)M9e2/iT/mg%ooT؍l駗'A*"eXsASvZdmNd4\>ꎮhϑnz*ŭ>vbf^Sm㙮iㆈ@w<f(- S -NjuyxFI8]Zdܷ߷N5e3EFkj\?Zf}ʗ 2Ir}.ji?;<> "q(/r5 =cO/> 0Fqÿh+Pݑ| d5~@R)+}F^WfMUCK7ҞD{Nq%HSMN *-)&;~j>߲sӏ^~oy-q)?sL*}>:yȱz5L< rxm n|=Oi& ڇ!#N[y ?^#轏6ESG}G Cd_UN{DH\o;kG8F?jX,;ࡿ ŏS.q;#'٭9KVO']ާ.*IE'jVcµɕ{|Vh_Yd j=p{GB1xClm} Ktwn/;5HW>uݯ+A -~Lو' !Q b2jh=y-hC0AH.$Й*s\PeJڇS02$OhfUykdU(E_owH/Owjq( t\5h,T- -1}t+ =<6BX*p!~yl0}~7NjxWMmp3L/B$/]=|c+,{raXdJGJ&/Bhe}ɓ_;,MmH]'Nv [X*Ұذl%տ}=I-f Ұ>^I{2k1WӛC7tS=<2K%z1_h)TOllHƠ~f- vr0jƶ9}uQM ]F#ZnX27ۖ?{[9/] gbqK^}ʘ*Tc}^#4eeV-Xd<:22aU/@jnjD'!<"V׋߉k<~X;uwԧˋa8^MNCd/އ_QU8}nX# Q˞k#zlL'ýK?]Kmޠ, /%ɦsL}*SO&D ^ѧ>,t'02 -iLI9uQ>.~DSyO=N'+6jckV&%ryt7T(c+#_f Ƭͬ+madEo)A/F4>H4uinCmi2CxIy(ob+J3ryr Vp93SQVTW|wO\>1"25h'!5U. ƌ|@Ҍ '2AT!R#cXTTX\ZUu] Òz;sG,iN|HKv{ŭuWw7ࡤJCˎ ܈ ڨ$ \ )++RޠN A; 0G>jIڸ=¥:$6`62?(ypuLZ{Bˤ1T)U2߸n2USpZ$I'(OQ)SZ<{t_8bYWnSɊk V:+R1,+}B̬=nn]%q+nN}{A8t{X^G`C闗c45/iM @~}6g9 ƍ+b1 [X޺;Giy]X&[IT|'Z+o,OeS-jxYPe=)~kLTTyvH*x[JAgHՌsbWձB@{{W 'X  dD{ !kٽK_~rA}toigϦ] v|bؤk %fP xOLBl ̳]@,Sg!%WIO_!H;+F+OJƤ4e LI)~-סS^v%=)~ +V[{iտ2L]cEIea-m"C(}}qk~g^2'޽طႊi>1hp`~A& Y1IV;tFdA~7Cn5K 3q$ؤw_%70Ƌ.z*]mxF4ߡoO2u4*>QJ0⒊'z_~!UC#@~F, Ɨ?VWD;7ac>*K({"AW:S:0lԪ{ lxe2Ï 4!_|Wļ EE>sT)~T+ EKh2ux'P739RbeNӛ⇋FEƿ7 :pt]1H%.9/C2+_֯`2;&|  ƿ{;!=B(ٯR'E'YP)Db@IDAT2N]ъo I>O5SE4F _mZg/[{!$ǦR Aw(K̿UǎM[LLE>LL%ГY|V,Yȗ ,JzSW?JLyeb[eGλ ˸K`y`h%;4/߲ʋѿyyS:1-f4L5۶eᲷjʾ r|v3{",K|E'qh͎ukv{յ0vr*Iy%2[Pۃ?2J|FNѼFN2avmg涴4bB-KkԖn.JX*/: ^}sִQG(*eufk(~i`&o^iyGx\Wss۶ظvn|XPPۏl3"uZhpZgV@< IRA8/|`)RdNAʈ*KYΐL|^ϊC!Z ,}җPPY:?*eȄ # 2e=6\+DK\+KaAHʈ*Ki<л҆Pb/g#}(|3֦8@lH/+-[^ݭJ<^TNeU2.+`֨` $iKK_3 2T ^n s&l0sY-DsZ,3r<{TrIKB ʞGJC [G@bDz=T"R*rQGC*+ +J%@"*G@PEB@!P(#7( B gP?gJP( P?y  B@!3(ş3YU( @(ş< B@!ϙR* B yOCB@!P JLV)B B@!<J'zB@!P(rs& B@!HPA!P(9R9UPB@!P$RcޠP( A@)*EB@!P(G@)1ToP( @ d"T!P(#7( B gP?gJP( P?y  B@!3(ş3YU( @(ş< B@!ϙR* B yOCB@!P 朡4C~G?&ap` *ӋZC(Yٽ|*ky/磊 #HlE]v*6`Et-(]]wuJ`Az啙;KH %y/9&3ss;o={wj൩,  u0~0qf:c5$E!#B#lFEUҊ0o8C/ i'un(/hu{ae$^A@&MWz9ohbֶJRhsN:Nt}vwCw>g\  @!P%ioݶ;x{X57ip?_gq\u*[Q:63`A@45FR>K<^϶)FZvEsgkkw=$3  M>x#vS`Ơ*6t`Z 40->{'.A^aUQ  T>}Έ}-N;h_0W%N=d87 >6 + A@j@Sa.N6dt s|5_ۘ~5ԤO[BoSpȽ @! K7XBzwg@ E:}!*#dA@A  nT[SOm+fQLI;];ܹDžq6l%  B71#'S귒ys;txRmٝXa>vĴY⁀G΂ @C# =S.Զb<:yO/ @( sU(.?IgΧ"w-%O'B\ 0*–^ӯ/mY7rӦwg)=u068L  YVѾMP4st̴߿*,A@C@$j`޷cC  4vx1OH?!M' 64f wećZoq>Rr+ 4f ܅yn#ZqcJfX_< T:A@M6mX堷{v4޺7i a @cg䳾v/[}|iӽ+~X Q @cfLӇn1۶~sU9r=41mvܢvB @x!И?buy/y ^{ ; 0Mbz-ڹA@Z#И%)3d&#??`UpL ӴK^[m ZA@E13~-_kX[֮b!%i`ZoXHA@=Q4d$՞:)R3s J%~ C>\49P[v sgI5/y42L7k, 0Xpo#oe7֌_1}hoI3}V"~~$AnθV-F{jm#E(u%{lφ|7Y^+K-93L72SE u p)Z3~=O??bZߒ2ӏmWSvUμҳݻ95VP 7nkUp>Z1Ezs[yɗ>)(F LkOG/HƋ_5zmVڲZ3~Mxu4=_ WiM ysٔ_a7腅o?pDuݿ}Ǔ"yPzcO'%gS۫t^f/@Y%ND$@FyajO gN `)Ǡ$({w[9׼x(h* AfGM[!e휔#Zq=c& 18p$N9hӘ9OPA9Xj{uT;׸=N1 Ca-D{jow?X?K$@F`䔯`w獔)' 1cj餴(E1lpn'B# o2ViӚ3uklC:2|9 Λ=m!b(3gÜXZd陁뒤_q`\4upH0ھivz!y~_ 1Rt&hƎv͎2s0nS/)bmDm;9֧8?tۍ{z9zK<o˵ĶV$(ݛr) &ՆrݳLŞxט|йZ?3>JF򙑳ޓMjN>n{;;X_F  zcMHr΂OP{}=KKɦµ!ہ~/җ'00,ΞBWrL >W8xXHQW`O|,`05ѓ^ 5ovʤ@̟`b< x~^+qgllȌ1%O5F ԩ*4'xxMA@%<>fy ޥsR|]W ={]sB}A¬`7a_0q j<q00w*yZz\H00(SO62uE=^}hj*Ed x=~fY3{l1}~X.k[K̟Gt8g|:y=&&v9fj -A#>y3u5/c]PA`q0hN_NWV}5C5mno^ A&@>P;>{ކ ȡx ]#P.G^'X->篧G%[,@cGa?}sUG-61h=w-P`8 kP`@c<饂U>7Ca.zB6mmf ;JɄlT!z, ;/}TiǪ Q "*çϛܓ۾ttG*A@˕%ڶ_u\S\(j(oLh<.%:…_|fh~jzWbKvY0F@1:UסV1Ǖg.yp1[s?GxyZ -o4ut*X p--:8 qgs_Ye#>vNLYY@N W*M!a XUUkSNEyEj(xp#EةSXr-4 `sYAww^yӕzNhfd[+A@+ rd~U}׎[>h=+?=3T{d{.}ĸZ&^ fT6zpV`e_ͰfZV"I#@"onzy lv!t߃HGt+:;rĴF~FƬpsYq77~yOywSTo7M n\<4 |'VZϯ;30%㳏@ 5ّwA6T0?>9k7!*֥i{A@jiM(5)ͩ4y1>q;Ѳ]kߥCvg=6uQriz#=]A"BWtlx ߨ/oH\=^Y\t"ClFd; bl Oߚtf 4sÔy=]䅄2rRzE¾]UcZb|򕤩ڷX۲}N}9i\m)rgPnxK;WE' {LEtXFżt/CϳGs esa+g=g C~ jTƠP#8(7ŏ K5.-&h$3/s C&Y;+=B+\๘V˽ ȑ#}hL_hhII,v{_UڜT_W>fRYݬ \:ϫz[_2鳪*WKgI}^L>u?o_* 2?7gܞ3֙\ל!ao1Qo)ppOL zAl@yϩ9>h쁕8ʕ% y/uu\|(-Kf @U*;: en?aZ{D)]>õoe(eU ٞF]?b)?r䩝+KWC {nQP);$ (iEɇ |/s}d{ G`PpĮHi[e_ _=͔}q;^n S< JZ\h1t<O~0}Tw<X pVSk/S.a^ vh5 mę@5 92 CːWJS@gzИa|c U:#=~"-.uhpqo0eضvju)(_ @X|{[bZ@-kg 0ږ}W;>l/W?ߒSeqDIj<~(w?cj(!  gݱ +K=/3A"?vԪ*'=]j4|Jw@H349tޭ?oE%>>/FO?U.0"#T%}UҘU&8cc1cp8pfXr+0Enjԏbt}i=OM3WlS8o{g^ڜX)ꎴ Z59>E*>J?ꚿFޭp[rBNf5<|,Y 4Qɩ= 3rp:,f'x]: Dxy ^U:x/}HvU3Y#vds-E{+POTM>7O?7A޲KM(q_? > 2&CT% dLF͢󃑊`s8;ι7<3Sp$o~Q2e]2}.|lvdz:dwQ*#?8 4lT[=_hxUUO#xH|N5~8J$q0C;ː?(ݺ/IQ7KaCJgEIh"bkSQ/jf:L)EjoX: UD[@\0t4-2xɶQkaA/{1P۝aW}䘴%['1|4E_usCqij̴NT}oTԪq3K#s;陦K'L[Qvc9yUu?W52hӚa }1wUk趇O*1*ki7X{\s>NE&6#*|x>> ==>߷SWق 9]BT/[:= M^BFE#;,(֠ ҶASMp?8pInt$x.H/xusgB6L,_̴Z9llvR!EёEQ䍆IYkpP43`8k,ׅ7x1ޱ}]$7J|:s:YFj|F T)CW &Dm>5C92:zz/-"GItZ|tyIJs0R2İo#KT^n;8>_:d|)Yd$%1": SsbI^g񴃎ACpEeHzs$r-9#j-a3q&؂(8|3Fh ܈:? eC=-}T+[jpB[k .~da ρ-}#ñWT@hIy-ca㧐,`C`6L+p]1]a{ [6xX<5X|FN|`f2q0*fxHGټ/#9A\ h붙a3.f!k0s)KS+Yf=hu[jnT1'MMmn뜨77xBB) X'V Lq X*E|[Ϲ_M`}mCY%{pqQgDh -kc40g5QZdž.yt[KUa(Jx?\d[yv_CgٜjWwۑ߄YBgjSzIM* &I@u1VaЧ }fR逕ݑ6xnX3>=;#lEvlEώMS mao`PQYU!")TP@B{$ =aKt;1`ca(TA04o|~!^ NJ )}{sVzc ˘vuZP?/hpr)@#D\w&"y+:`LAq A_7IUj_~MWg|윇W 9I$ %Yú-!^ aޤ;G>qSG  {|тۊSۆFag~` Ye;A@` QN0(.auYkuv#zKA DpPVdUu. &O]:zʴ…^sԤec&O3hZC@ J RqH"pΆRl#|zS3'00Z>}δkP].C+MhS]FON^цiz|MkP5y絺WS [iUYW ï?\;Q; :{̔{뢂ޔc]E Y3^sA@M~>ABUC"\~I)IS뇃Yfh|ʆ9YBBA @կh}#l<Pl\nPP0G> [_LNH:|< B䩣tW.xP#MCgV=)e!N:7pH=Fʤ8h(×>Y' &ٞx.pJ݊'&߼7Iy#h( <2tٵ_w=#Y൪;tCSO, |^uU漣MK|oU?m"7? \.yҧӋl Wk:ON&@UWQ:&#Cpqy D9 `H5yF;)&k/b(ꋘމ+ƹf65P={̟>݊z)e\sFov[[s^ϴ|YQZ"ާ^ 0WHeL;=J)1}.['%J4z3]f^H0 L~q x{i={x8`P04,Q^]_2ӆ+A!ߧ_U+ ->n;o/vzk}%Gѡ`x#eU%iށr0L~)O{cߍU7p">N;z3} ]Wz ,r^(S.sڿtC>63+QsMD|~ϭ9- d:)dKw?y\iv0 teF3s+.qgcxfj74НɃfCi(=gO}dsI9@,'㙿hcO(]05iUɃ7GMNفtQLr!4X!"o*##/# n)6=`-xZ܇sW5z;0B6gƗ3Tȭo=CWA_K N3/>r:Hzv,Yz2ɕ1}Ƙ?\Iܕ]x 4Ic\O,1P2 h)~o/xBY$ 6 |~s@تe 9vArB7%5;e|0T%fzMѳ;9`~lBC~  "gIɯ6Wc= \†K0"{6<~#0}EJ`-ҏJ0#mlCOuur$} e_Al ;r[0u?dGluO睅 IL_k=0 G_zepsUZ^~t75?вPmvm5;=l_Lczn(+``[mPQg`{`ՠpI/7@A[/yiC%u fL'6wBڼ1Ft4CA@%~a$ @At9HS! @/hvũ#V£JA@D%@Do!gA@A1"9 A@Aq"2ݞ΍U8. P_'& /ԥA@AXv'';^ERߴ>! xHOc>wW7Z l):['- wUPAvUOˀ/vpJ `2;q RTe8X~GR$Š%Y8!o.[+ ¸_C? j)G"}͕6mE1bzB\eڦ}-+x|#'PUmړc;~׫}V>2Y K T g$^ 5bQW:zǣFnzr.mzw]*QNI 9/,-{鷴mntR.|ңc}a|P+H"`_2f T=&/B=eczO>(I A:yzp?:O>ͻ,k0>OlʋhHЙBcH" _E=I|5LƠGn e m'Qv[c{@?$C['D$pߛo|V~yړ~^\o#z3yUcuHœ&~uR8P͍;@s[8d (4F'6 ?p:ΐ9p~ccrTp@54P!*~ 9Sڿ~ h%{"&] Gp3j  Doo45,#d2]ab2؋9uBMX"O -[COaٓBtC!LHCz vd|>[ P[=tf뀲,_~V7@tz^Zc|' |QIB|Zf24]Z Åe>nW)V~UQb{%AA'UQcP3}K/XPЕB3^}ToUWػ揀AX_I~D9%>'A6[mر:%$kvϠ:m=?zEubϝ.'$z?gث:2|zܿM2>M|ZSfofKp RLJޫ@iN3[hLghW`"j2").I]&MhmtfߓꖭF/-KϢfqTP覔fcW@3훟|O1NS,"*M:x4_;M2~vCK?B:]+\it4{ 5 ߮@]GYԽcqv?h  ̍&CqͅОt=:y{?\JYhߠ^=׿=P{ul!!6ddSԦe~yԪy,@蟟HQ13ؿ[{A6`fOkL;nAcJёs3o/^]gQ X0:Wl|j>Ma"գs\}M,nۛNo=pkf ;R4k7j|+$fl"wЁtCWn6ss-l)mԦv>_/XU&޺iMWD0o_tL/v38`| r҅gE0ls,2ZS-/~:PX3~H%gҧ߮04: Ϙkϧ.r ;g[Q?>_JڴijsOv& Fw_=tǠx)VhdX+Ⱥ7%lfD{c8+e[)&Al%п5msff\E4M5%a~smZ3[vQ-b̓Wn؉y)KQMf< Ct$HP?Hi ,[s|Kܖ.8HwDO À!0$FZ+N?vFoðh۪mk+*s4'u̩H雰'he 4,~_3daBj1hG1~3hkoA>o>|p?ї3{w,ݳBϏv"xޝs?c5.ͻ9[J>ʰ{ӡ93m; )y⣅ap4W- ?WsK. sjYW0zo9)S糄#HqOiVӌ|uESwn̙=Pv څxTheKVx%t+‹8VЈma_t`g;R7l[F8JkF PUʷg_`w㢪?WPMWv"4T UyeLS8f*Yv"Ug1+J+q P(fuUàwۄzAN`اN*BA tu%?ʈq_QVCJJ5,@ M ?]:tt-n7;\r1` 1AA ,}-~14([AWE A@h JX"1҆c%cH UD@Q|t1 ɂ;aG6*&؁s?— Ɩvc8 ^ލ;}_37]@nwc0=fuo4ܿ<8;.|<$ @1ka/@M`xba߹mK[^Y܎}}kz}д7?B腍f^9޴{cw+ߧ웛y@eL:.4rkЬS3 % G@/ek4_$~E}!86-O^U%;n_v t-r`+o=;6oY ;oCHZA…+PQQ'Ma_5 së9Ɩ k:&i0FEpV;+xkQlkitK 3G"*#гS2Λ?cUIZas_v'z.Po}khx7찲ny4@f,3-er!@Y Hɢ/ UG !..j}{MWگ,$%& aMS{t3v5; jz˟>rf-%Lnt`XgxiWY/~}D/ P.ҟ܌w;mwS,LJ+y "; +\.9?i>]̃G;<A@A 6H"j"`xCª3ӯ(h=lS빜A@| 8"[Y5'c]~rf.G A vU$ +5a؁A@B~ECĚ$jA@h|h_٣H+A@  4~;rYQe&Pa!B @h gAQ]qXHPkmKYf6nw2j)/SBL!)5״% (P|~ߘ6(ت7/m=-5;a}A6T[<U4A7z$}>tvƳ5 `?4 U'e'hj5`(4A0xlWHD y)pk Ebp7Hn銪lu}* UD]i[л_.t_H[(j7maN*(NE4x;XNursGG`TO٫ P.KomZxR=h Zzn 4b8)v;_U!_ 鉊 n7@L#?T{Z,>ZTMY5σQXؑf^N-k. @/~}R唯Ƙ)ֲ>5#trA*ڿXJ%@X#P*嗙wPEFObZ7pk[Ւ^ޛ~Ჵ4jjv1`ut|h9}|p4Ul-G[oL|x"\b:i{A@ p]fK5"˯goŹC;VhOPcmf2QiwaWpLɒy7vۃiWa{[JD:2pLɒ N[7gE3a:={KQt>΢ު%l#쇨Eղfgz< ~Z )sŜ>G njYC_FגD.4)[YG l|%AB{f״n;;םT SVz?>opI ̴ֹ޺{Xqx?{%4co:)&juKnqL_n^k׶ն:uJ.4R੿T70?Z"ȱN~Fݭ k\dݴm{ 9:P0#cj* 5 g&I^c=S eߏw_HX /*X9ϭ9\g zՒ>VsLZV-KLgltLul~vPu^Q5|E~I_oѹ?:΢6]vF*>+M(r6Q`En/*-BU`^3܆~MJ> ((J}F# |/^UIXmN ,O~5?[hB L02~:V%ZkBH5A-X0Άl&LnCMCSoMq|@F@XOTp _7ےxMa?$~m|MՖ u܇`ǽ*p*3Ǐޙ[%N_/Q WGM%ԴZ RkId&ea3w7׺6aFm1Pk EG`!~-C0_KR$ ?Mcpߙ;S{ޝ1W}:ky>Fݼڵ:,m@<9H߸Z&\JC(';hh޿6~=*hkOGI(Ρ"̸N"AN(53;qD_/O^IWUttLH:9#s^fq4*=s7Z躡oUWN}\Ԥ-5cfL'ktF/BeC6͹<>H+ɋiWN9E:}MIWtO7\ŻڴUH2A@(E_#4I^Y\@(?.0j4zwlp$vf8*GK*SۓG{җ(g lAL,ul;WooaW|vr`WK-5z{U'X>HɧP@ky)L^,FxwuX3AiU+A@UfQ +awuJybt(s= Ŏrw'CNZs*(SwGvO|U¢wYB`Əf֎2A ,+a_<+Jj~MOWu5aKNXm0혆&4 ^Q4_e4ittϧ}c(ٜ]WYpKv$ɳ *#4sx4:\`jOIԈE^KRY)/@~U_5:mz 8vωKBOs.#8~F;*B4'i?}~']o2t ̜̻[KR&κ9}i|J? 0,($>#嚖\[ w)Nzr-@^XQooNy{>vyP|+!@{iԯ sk=|!kC]Mψ*ƍ8N[ze !p!Nh@^VMSN^0{}n4z-Lеy7GMC~a95o5 AkA@t574V }giW-xRL\WI'0}x+[><a>p]5| |ikty5'l7h&]R $P^Aoow?;Jc.pόpD8Zid}n.SŢ5Z;fmtw!-eȍ n/pMuQ{]O]Ve˖+6ƀ ئBN  HL $$BBKHm1]ݖmIVo7sO{_tޮ~+on{;;h>A;q)Q|eP=5 .]e4 x} YA;id贛[5!w%7x D5J!'#oo?\aCsebLLI(O| ]M{DtPt[Ws8CnߢY$> kr슩?RbgKĜ#~<_`*ژ&%ܻk* spX^z(x G[Vj""5 wDlCv?ܤb6AX>GB4 G@'TTT}쀟?n}[/'I1/7cԷ.a =t 5Apw"}(EA43G+d;>iQxEO(*^⟞eZ`|4{kaS6W Ou\1b`p*/O,㲻~he F^o;yy؋; $\kh< Rz1\dz?8A}(;uuk # /IT;XP[,f&0&Mt/=E웙] #O>ё>E%86ñ:#{>@!p$xޚO8qYjlo;dw Uy<~{kAW68ܸkrC; }.i< Fq~qP`C#驟}XuWҘ~98 A4lVfšhI3Cz;1"_q_W {mث{V7lCMFߒg# ,pOtëE?{喼^oE:=G p5nQpFpy(PFpqkvx\k|noiUu}TgY P 0{cd``e&, lt?ӧ){uc1&xQǹv@`2SD~<гSAv:0('Y@e*4rSvy4e/u <%<2#S ='P=wNy̓EUMv*¹$?.qznzmjYXsҒNFЀ*ǥM?|XQ%}a@,YPX ǪPe(i(4`jG/< zRph!Ao@ntAMDalEoD'c+m?to(?Iq:]OМUi 8nS@BhjV2`^g<԰Ej_/ FGL >AKS^)0\ZQ}r56bq>n@M^ڧvbi偡LDe m6#;a~t?ހོzåC[k)ط(ױ=xp|(#0mH\kygm4zl(Ql rӰy@8 =&BM`6; ٍNe) 9MbB؄2dM~jJ|LVd*Z?>3V\ {1K!.”?YGU_5 A ,~#`l9E(5v?  5  f.?{$ 5~Op/)m4Kvo7-ֈZ5^E @Byד7 $}~mѼOlW8/OZ!f7 r/xfܿ>:rCgc0P+&/=xG# 4&{2z4ʯd.SL ٳwܻ}oGo#Qto_Dž?>t Or`Ÿշh̟I⪪ϸL7KDz0j]7ox^?7?])~ٝڼy ʪv Vb_o.ۯ}!fr5&?&C7){) -idƧM#H5}f)ҿX? fᆏjzI/~UbZBP9^V[|jmm5bYw Oxm 7qƘ4~~ʤH7Ι&o>l;!fY= YiTٓ#>l b2\l{e\q~&y@7t%GZ?9QI=@+[,?.??ϯly~s`.OB΃=;[lnvFuOћ*hn*Z2RrC]Ż~knrDyt)b1ʝ'0?xt5;msOfov$/ |t#?<#D}~|?W>nDOwX1Vx Kz )m4O}*`'?i4CŸ1k${dǝc/ A2N|̛=l3[΄9Fc~ZMXs`:B +JGk7}_W.'MP=MUCFy$ ~ IF\úeWXf .c3OA%eZah5V^~w9~j)[d\[>Ưw9j/K?6vqLrxҊr%!9iq=BTZQ뵻n ~1> 8jPnA@3uY!pݻy7nDj+ʡ蕧`1bK1<Crf$bqz7 $i4}\GxtrX ~nْ'G,JT9k/]iM>>hT\(:N> 9\ ͺ簸M:x&O?GKTxp@@3_z^}FC}}W}v"I44?w= ݉\U>/I9[^Pp쫿S'>gU/@@f"NƯBM"mM7~qAv7bz x @ }mkQNu~`4 k;Z=ZQ>G=ֿ6TZ1C =g}׹"?. 233?lkT퓫<k _a!gޙ+Ly}#:wdeĕKa;\N>JWSJK^ןې34Xr!o~z)O~*n,O(y9q>]SsC|_G7bN$Z\])tڦq\WwP)Kw%F[R¶v>0dzs\ZTT4+F+d^֞sdbx Xy~6( ` .?{R9#y?7=O.#?Eҟ4Y3_4nE`~/=fyؖ#Et+eF4~G T[3R1XBsy=0{ !=Ba|#/&h qў/Hqlx4ZӡR֟QS^R {qlP)hƏe>Ư墊ksnod(D>S=7mBCuQN^u|rB` 8=i{]QA3qWLkqTb'I/ 5E'& P$W1'#5z!⤤5=u=Mz*ΧS#@-Z$#H?z#s29]@Lqe+ߧgsPс}J-5~604DGRu(C{ %ѫ}T\k:F+:d !p:ג]c$rrbd h1*;Nj#K>B [!B*Hk?UCzP!A؁K/{q~*S|B_cU0b|TkN 9>Y/?^Srl `kLNNl!#ӭ_#mTs1E\'vEy>AR7̕>Am^8:8G@ScʱXI[cdrrbSfₓ>8*GH'Mƣ6h&YS'yi|\V7zd&u~sTU~;5kкQ3|˵O5Pls +7f$Y? 6]1gDâWņ`84O's H.CFĔ7 k(B"O}{aW O:;^ߡc>GG獃}MzV78m_1+:}߱籝G[E$6hw<+Iܘ (5F~WϚ e_bNVsjc~ZW@%`&,M{$hMvqigw_Ŏߧ_/2+u7]l@ߚYMps&jl'(>=Ip8ToIKvٓ >I3f=74q/>1EG{-~2&}#Izonff[Th+  PLjG,6O73nB~Z( ТI\bNً4;}R9Wa , eğ'ȨUز/iixʵ0nYlW?e⾔qRk_Ml ΃V߂'+- K+gwb#kԎP{2ĵ; I8wCS+}HtGs1I%Xų?,M##Ԟ!O)z70q>87(&B.mV72ӆg 4:q֞@IDAT5Vڥ~GshϏJk䩞IeЉvazw 96wRjO@qٙ`Stv}43{5}F$TbF?Zau(ܪPpJMϙ k g?z+~VXv@IE5|d=9| szm)I J~:+Ie3&4։녝O`HeSV_k뽟9Q{ߑ??F6iᆋ:o+8X dl-^7n-=Nck6jODi ~*.êU: T>)=T]$,}>&Wm c%{y nl(Xq`<10r{G ͻØ!}QЋ[/npĩVB?T7to뽷\zf}ӷ_~6vaS0+ V?,FmVh\[vȥ]iyeQf.=8BˁbY㇢,w˿|4]plnUpғӡOU[JXi$ڬN Iڲ5a#kƛ~;lN}s&BqYeϢ53DZ|r3T6€޽ O̴dyqj~_50aXtx!m%_N _-; ~§%Cs᩟\!wv/W!NN8c` lRZ#7aӮ#tA1y f笅M;zLlE^S:PЧe/|AO^ްg?#e %kOjU3K+a &Ysu pL 22MS"_4Lr@C e~n,N7d\!1ϿՄOf{U hex; N OY;1vZ"8dp\ eˆ`~?7ϙ+ޚz64" fp_"w (u;ҳ&%wDb%76j-=Yr?_):UU '~ ;7_7]||߭h+<,롬+=iҧ]{]!5]Ft"_2/?r2!]Zk ![}j,=F+6O5`G0 Tdާ5Ef+g6^XVz0`< d: LMC }FShHN;-0- GOV@9 }%ӽ2$&` hr{e-:&@%RQaͶ2^708|ҊR͇sO ;;^Zj+вڢFW;Q1k-/£p:qs#;# ˖vcSٲiھ.'(ho+xLD AfšX<;h޵*ؤXSTAj6Yq"JL3GY3fmb ~}e9^i·ڶC'e3?]LSV~@|y=f0rN֋w\-AW4ֺܠ: '+]7Oyu=Z)s :)`D{{kgɦ |Rbqt%n"0K?aN,b'h8:ԙ"L a {pYNY4sAq5||_W.T`l,G x`^\AEW}&Cr?Un!pLz,mG FZgnFA怙=Yn,xd?,MUş>]@fi(Jmͫ9-h-(g!(pMYI(0f4PJ:S$/-@Ytҩ:臝x#a˕[pq"M[)>almw)286GWPPGa^`jirM2?&o(tpPw/6Fs2R`49 rqfrDy#It?I&3A,4[ NJ;`]p0qHW'zf2Etf$?{[>jolj_H6}$? H3߲(vjւG呄%kKpڿ.N{dηYC5Ea7:7,1~m XtDSۜ8,x&GXu !Ɲ^+6?J+w5@Jws8x !ɏ4 $;pvmr*YpCr^ş3 :Q<JdQ+dm97?yU/jg}~6q^CH_9s2:3ne$O- 5s 387 tEkaݎ!ؽ遗Y$wC3=)'!K~%FC:TTtj/A>o>) A8L#};z9=gNsm%>(7s5M{ޑ\v֫дCˑ̣)dۊ'DFFM%MO_cYudo+ޖѣhDpq'>k/ d f ˗$D%9rkm;[ӛb_xk!lu`/>w=r_9F7@ mo/ 8%x iyL`7P=~M[vY}9;9j®'=?ǯBgBKNU z5z=9'm{Z%4%ጃ80|:hD}m/.Ä>#Br,)t00q3ӆ8R5@/b .9K0?q~=ܥ?|tw3,f2yrFrƤq~sYziMA3N0#S;@9ֿ6.M&};1TuRK}dӿ|5_ 5~J&g2DBur[nDjY\PfQ~s?|1D}T-{tE篎 g]/ }@(3c.s\x\̏:3qc~U Ϗ1'̾S6 3_Qww/2AH;kT~E@}h=[ϽEd{oTx.zU~럂ܳ^ JqWKЊtӡOC@/v0?Gy˪Hx0 `\fr8{ 1,½emL~>XAyf)G#@@8!=mʒW5,Zx r64m|ay)W-g>[Vo';`26f%:x Tc\q{ KtiAZxp\y {1 hrH1XnD qsjA +\p {0ß)|0u yZZȉr9 C\\wT-mu VR睆pCen{w6#OA}[j_ kR߀J؁7\4UG Γ6@u@[u;';W.wЏC*<.?<4uIOBv& N:VcB?pmX >x`QPJ$;s:G@I,0q[u8,$N𧔞  I ~4(r$n<HMʇPb%ȏr$v6Mݣ{YvgD"4~k=WQE`H0Nl;D9{@ZgJ׏4J@8y }4ax͎[d ƬY_hO!>X.n|5&WD?h&;Ȩ)@/8oL| ZbIh̎uB??k|Ň-V{jtaBŗgbO; /dFFu !7^ ue7M|)f71 ?E12^=T5.M}cAh^:p7y 0 (loU/٪<9Ԡ>ϜY,2[O5x^N7.q VzQn h͡bf~8W(r^|V c `N@7xfM}RW ?_7Ti vEPSD%0 s<$ϓ3?KL5KIJG#X!Z ыgF*(9d68g6/AJVߋFsݏ>CW:;_RQ\ ~&n_2Rq13PH>}7\&YA}!+@4RZR9o_]'Z0|W^֗0ؾw;f[pi)G3|>+46cul1OT$a8#Cu_׌2%:&{Y^\Z:8Qෛu?2UPk~*ݸLx`ن]"0rX]YCaY͉veM sh\vT,>%g 1>Jka7w%K6RÑ.Sao7#* NLo V`d6A! $^me5T-s 4nSag) }' 0&wH Q X IwRčg ;Q/] V /FG VТ3Y)(PBOp2C/@tS"#P!VfldW ~Wh󯦕Ձ%kc2 ~=!,(l$1oqx Nou'ʔS`ۧhǽ6˺Ud⤆t–݇5d+g>#:Du@>v+l{DV:mpd0~,3{DŽ>:DuB>vKE"@ o~}b }&te 1i p \Qha"sݏ8~P{?j\~jI~u j(`MFQR`!lz\c荦?s0qO20KdM_+dy0ÇKu wk)Bz$wQ?Z*eNK!E5+nz19sr^CǺ4\'M%'@ {@Datg`[Dzȿ_~ȷ:5pþQ8 }\Rę'؊`[Lv.z5~=_OZKD,Xmlxj"ڈ]Nt{ވ~RpD]?yZN2}Q(V\\ΜC/k] ~2ut_]׈s1HIV&h&pà4tippiTGή"uep5}Dgrnt\:ƪ ~&bt<'&i pKϨp&t}K0%bYS׺ eizT>B\R0zANf$'9lv}]^-{{vAޣ{?X' XV]4cWx1f$=^~X|4q 1~C ;IѼx9 ߳NKLXJꕷpzxݏibKF>v{ ~#x6$^!M3^B>Yǚ]r׺ed&bSBiy}'q[O0( !͑!Eu?fuNh! h* S˨ZJKBVN|D"8DHn8.r> |G^ H ~tAsv K8y ,*λ=@ 6JSFGc?MVGC˥`:pJUحsovr͈̘T\}(feϯ R(y=pQ7g^!W3"$u6w vޝl[/W7Âk)nP)^BaW7 p%KAL~Ϝ2R>蝙|L/kϟug^M?c|~kWttD `` icѓpTCٓp@n c-߈a9pAʜ693&' g}<" ָ0nˈ70"u6xژy?X 1^Lw`˾aG[F;MoBi)VF0 c+WBk1f`I\Zy:urAEK){Á//+΄5NT8g"xۉ)!"\4m4(Pܴ&KNο~#"(/5&t-C#Ͳӂ9 auSg5q/.Y>:cpd9(à~аp̴Viˆl?LŜ NwwȶYӡ 6uPZM!%0|kΓ;nbtKrH/ oIOpQFJt#zkjq+p8Ok?V(I{:tL9چx:$ /C]C/D &shǒzdC7Sg%AT4Jyj^Uw Ms0C6iykg6L#S 炟!bOyGZSE6=0aX蟛 WmUҰ]X89RuJUeU`HqԤ~: ;)% M{Rc |0V[޹zf;Cձ#n/a7#GK;}B0,zəPt"3yuǝv/so-ƊywrzMO6KѳϾcGfStR(ѳ[ie ?͸=ږn 1^#_ٓrt(횼.dc~z܇S,]RLP'-miԲSL (ޜ -a=&$ b_V*pW>t8] j&Y~ ťһp.u:}`}U<տkGR謪= };6p{uOy7v"ݣ'py>X Mn_lηU&4 _83vpSa=r< F?å HT@7"hFɌ?ci6:M׳D}y/SavhsK`@z/EXK>nx]ǫ;㡥Of@Aag;Us}W4 '.h}|ocAKdLL⧠Ov(IQ|ה4&3kp MCDqi*Ԁh@ޯ{.M oA7njbeO%>^!=w4(>:SZ k-߷<NK.{F}vڇ3½'cÿ11ʇL'5m5|BKD CS[oX} Ccܮ&I.#c|Frgeou4q.Hv~Kb%['Ja࢘8K; Q^oro&nQ?~7(ڌ$Ib˳ޅ{ ~~f585˧i}n1)tGZ2$p?Z>+7Jk_ ̾xV?堢?׺Ɩ~(rߢu-)HF礣6wz"R贴(O9\ XΌDS wIwdPܩk|~.y.scgL₟Ѵ׭W> ".mFlLF# @yYu/t8#h"g&2!QC}PG~D?3K/PO~B} Ed#_]ZCşQ(V ~f3n&K hRJ"Z&ZuHnL/uT~K rQra4b+0qWCc'ti,x*\NL"~Rܱ!|[Olp-Nb1":f\5E -녊hB4$4D7O|$ߤAP_#>*IRO (l%|†`Gأ¿[FƏ-gM&IЫvG3s' PVle% 'Aed]pG5©lH#Kt S?àn: G@٩{\'M z^bQ5Gԯ^Q歧喼>/%1M.G?p UK!{As{K<|UnF&Iv{8Np:qk*Hås;y4| vF:(D73KW[Ʂy`O|w/HO "hLn{() u56#iLp6j[ou_].U5P_ciq2*g1?_eqjI+㵍NtMO3 P+>'nGB[^6gqF hR+60H]h>M#}r#Ai"䡦DL'KtW6nj8[]'%8Yzv4AbF #uzӧ){O|&ף9q{OuUFjG=q56h{.'#F3zyql5.Qx4(GmaȂ8$'9>in^#If6x%A9 c3D!:!> Ņi6T $OIIdH#D/T\K@Y +2h&NOո޴G  <}E%ߪ u wL } KLLrȔ7w{GuXuSvDG[sMaa.5:d1*,Ee 9?.j˦~Eۏ̩O~_0l@7vJ<2`pzRPV&-=i$e6u[Z"ԀG?-E- ~"tLf'\i2GD҃$K$y4?Owwy l v$7+3{"G<6ˀ{`72D_S'?/.Dfp>iv N&$h`,{ ~$O||QgE7߯<ZNTݎ k_&MyOWJ_qv&ʍ mMuP{Rj<>t~':h'a ;?p45B0*^ϔ?ÿr}R ?oi ܩj8^Vp 90"' #Øu_I- ~za\竓#n3MDM#ʦ&-wRH'ZnvF2 2@vC:BNNyqR靤ӼzjI؛d+߃.bƏcfaFwV 3\K&í=U_ppd [o yJwq>j'}EgҢ.}G9Q^)]3{1jf$qU !=-_K>3ɧ!Pk~CUZ~{IJӬڈm.m{徯 f@C+Ŵ>\u-w'ʞɼO[?^/Ʀj[ASJhE>9qU5Om 5O>JZ4>V2YgN/M=?0eaF.5d^?RR4oN( W4?zُ]C>=Ā,o;ۇJ$-} LN=Dv6r *{OS{|w jCԦPBmLy'!JF>V?郥>._k}Sf_8L<)fRSdK6 $nZG+"i{Y9}K[J'ylGxF;?-^@QZ -:g49ΞzSO ΃<^஝+V75Lj?hAӦG@D4qPB?/Wkd4Gu2|:~gxQcZhKLJlF)fd<>W@mTڳ1 [B8q@^DHCMm]c'nqx0gCcmc]mύvݾf!4{Mu-6k&' H&Qo _]?u8?אg*nۼ@hc0tJJ<'G vHNM5g,6Zyk|Om JǤՓ'MU1u>W %,ԑ'рOm!EOtzL;tBX?%vHG,:d˲co8G܋/+ZP_` n&{jO?i>vTڟ*6G%UOGM3%*OvN7I[q#O|Zc^uM&s2{ Xa\&NZ33ϺKNu~yӦMKLLN>Yz?𸜝k|Om%.0:9U y1NJ'N\郦鑠iLg{tZl W= ox^~2Ɵj;aS r|1K_:T?͂KZKsZ9!nacCsR81G5;Qq)ݒp3O2v=#_ثۂ^}MLΉt$Ho+Oq#q|Si@>{즛beAuyc@|@$f|Uֿ;Wj"/7m?w_P +H|x6@#b6g71r\LqҺ,pjz=Vq!硙?OFYJ>C#x}Axx,n.# y-Zq!q̉6PCx֘FW>H:?1Sq3?cJLpBi^˱%a" cJm~>-;3/^8 ܱ/R׽9p"%(|/ @8\;>IC("N7oa0R\ۏ7GhcW0:730Z$ ^~dӟX"{ p1,.C,Q ~Ƅs/dxϠ8@RȬ ቹsۜA`,cU\30VW .i6i]Ab³qb*31_rq+K>vީe׭Ǖo~QAMRYlf~<>S1pD\BAJf0@K£='ov~?B%< Vuj+vZ{\Of\uOq<{u=z_4Q)B_yD(ﶇMDa6|\er1 y|ՖUǞb?NT>ns/=Y8Adtbb?:g.gB@IDATl# O7Hh⦺/#>wx}e o_ XwγyӅ{&}>C&/59ALʰϿIŘ4$?΀U4H 1<9gN:9~+˗/h@N@1觘>m%Oyfi}ǭ𚙚T\\@vytjW>Tc޷_NLb?LuϜ]`/ȯffI>_}I# `Ԡ>B̄A;鐖l ';{ 'lW dow왡&ޝ;r]_!K$Y 8>m?WQu|ԯeg$mkJF~~KtT;.{zV}䌚qHV}}e fQ>9}3 - ~hZ9(OlL_RtP_>Tdߊ0jroNRN.is3c/~`uߞcDA>j|4r3jMZϕws:*e㔽VZs˯%z$:Kᠱ~i?jmSJ?7#@u_pTVC+ _Oc_h5ϭI4`o74[d}dX9\Q Wro =_1Y$IF<^/o}9J2VQe4@,JeoݰUc4#}FG +֮x&k@ϖ/e V a.:GVӂ\ RDrVBQC#'|'%著fil<ƀ7HIgDg4{^Z7!MW"p:"*iЧbQ(/E'A?z64v /w\,Zg߬sN.9缧YS ( ߀*eM3bHvo~[ʿ> 4 _krY# j39oЏp1sѯH sϗ:k֘:/GyhSϤ#ӏ64KIbDk?%OsyԵc}y}T+{gkeaMtEgq#8Mth>Xu+i]+}|q#ˋ-o{_,[ 0&nc [*5Vgxo.ʈw\xB4ԡ4ảZ]Z1'jsIP\TvPyj {_[ ‹Ч[G[|NB gߥ~0A aE=+`Y%>+ނl5}oH}b 0؟?U oۮuW)ؤ{Ωjn<3kZDeմxϧp#Dz^gP`SpW6p|Пn\lf9_>y1"s'֧n#{bz3ndWfI_3L i.`] 9kTm} [PwY >;(q9eޟ.wJ};YvN[ϝȆ@X΢SYY]ݵ#5*f.U5tovvhM^0ACQC%@Y\e-\F E~CljKֵf2ZJr;?ι5'i)S %jra0ԮJ&IiWe ;XWO03:} :+]1be-*>{{iww`Jʼ/_PA񹶾O/PsCvشܬ=VSOUuڳʹ\u=%7dhVm/U!t߱s2uc 0Ay&7 Ʈp߻=-Bׂz/ZV):"BՎx)+1;v$K/s'2j_pU :ޢ=[(G;/q5YAPJ4uZWv*kF{ WֈKMv^v A6G_Ck&72(% mK3o-BZ{nGO wc. ,@]حz\A qeQeM*9]<-- l+*;,̇(./ܸxp9Yڋq7rH_MǞ2X+?Y+>Oaת40DQ4R#K6|­KTX!W`'aawh{_~ۮP|l͎ӂ+hnQ"XAuzwP UlcP<TJhfw>x xϊ m¬Au^suQΊhׂh{(oPFШȞA'NoT.ZYSxyAϬ%UApEc;JbQcO~p[1A15bm#Ҕ*Hhlx;gGcyij%MN_y=jh(rsW\%9j XB`ŧFRHY<{R7zX(q=0 .ztXt 7X!JgK}H?] ̋>g4f@ <+W/V XX}tyfH7@GJ {m}ߝ taܟ|zy xIŁr7:2C8FίzT<!Bqb܊- tKu\6IN ^q'mg?8O ?aEruW0P |<$:mX?nTӷi릹1 %42Xr]3҆7w02$ۉpG4|-e>i)͜k[e&;4s/З%t)a$6`DJ'Ҳ ;Tmᡯ(Lj~ >t!_HO`=A2m^+nӦa|V:O7{][m"<3NGS;s͖zsTmY6lu FAs =߹f֍8px vTr|ۏo'8,8vQ T˃(K2loXo+TȺd5L mڱg17c41 ޘ)wje b":{un70`Q'B#lCE!mM#|XmcDC+\%<7ܯMúzUK*'Y.;wmwҢQ2.?@߷W*d]x[93UQbH?z(}d5m Xb :E6~ҟCtg-eUx @x n̊01H OuUkЧ[k *Í x>$s!0ᄟ#W!ɘH>_}AkCY>~胔_6"`ZmQky;˛_R[/sP-Mn~U.׌7?suvieٸ"a`G_& "4 ?)nK#2 zˋYL7yR 7wMZa04^7'~1ĴU#VwaX %ͻJJ?ֿ5̡4F̊f"L>HE&#:ڲ;9g{zx;֣5z^T{7hc4,KVޟ“eLDnrwwy<t`9+Kavjoyu@Xk#hԁz#&ŠIa+݁o$c"gӹ:ҸSh2&ʔ?ԃ0=-Zъ痮F.gXBYc7^_gv>GZvZo~N\^gh^`-УSJlbE7č%Zʍ%s#W:tQ^Yvt7oV4s~oc?.y ,_v8C{v󵆖vCK弬 X@є}lpkx1be_?6, kgkID@UNjTTxMi7x Ze9^l]޴@ , }?ApsݓZc V ;/=|kJ-}7tH~.\E>CXQ  nkIt;cd ,E(F1-zK./ ӟ}U0?Ey >')GxNL< Ĺ<SZ~o)~}ŭy߆*HUL(}}yz0;-"\J_U<+v`b5SZuM%F5RƂ`[]43d?sN0]#m1'@$wڇ)?N$h H{3j6ɓ0:j]~ăT{ f9^/ӽce~>^F%wcce3KhzdUp$_O4Xk<"`:ů%7-MAͼ tx YxU! AmN~^J胅(?ץw{L c&/*k8*e"iDBmꠝ<:^FNKN}TC-{Sox~DPNv`GJtwai[$e9:xTֺ$G)slE<&iEZB3!ͯMpy0JZS|ix̂Eho+ 0E 5|/x渦|nEA!"3Y@I(_ۮO@>=',XjS mLc0ֱ0լ{Q Oq9Uջsqq]1nyʞ UqWG~}rMw=HhDg1 j8>h1ߩԦuYP >q\$S*~ 2o 0v OڃhvՓE]#[x_%>鈖TGLRf>2ȊsSa)}nL}=(ȲKy/eAQՠⷨR'KLb~&oohpG'j)pG{[;Cl>&J~x4WW?Ybj?<=|/-߰+hCcdy>Q/0b &.=fj yH0N}:\XJ*A`DG_>I8ZgԷsOi-]Jfj-~\U(?hߛ(偤)f̃\k"?jmՔ+m gCM{Mă8Xcꂸ0@x%`4 : {rֈ`w^k4)h?Եc>dYtO?D?YEw.;&Ҷ ƚw^7wkZp(Z^R]9ShffYrL"R:/o&Skk;t!Fд𖭰#|e eHE>N;jH0ױ\9q MtpaCҵϤ{~olp5ϠsFx Sqtkq ޳9s ~+ޡnj{ξm &Una_Vq.A -?ZG}v, ű'=`iu*ȣ#GЛ~CtS7N41`0ꄘXm|#U㚛{/b.z=iWҶGnRѸsJ"vVQ絷4E,i&>3}stAmءm\VDKQiAyR1]~hL-1U .Zm-Tލ7%i{B8J;4m,= EzWh$؝ThnҹG~.޴jNўYP'/Sqէų(m&6F|iǾ/i3qb( \uOp`gU2Y?{m T΃k7)rcyԉwËFvѠ܎Ci̫jod5TԔ~b1u1F{ݫ_H.GBʇ2=ʌP6ם`Doa۾^}xH [`ϰ^7v;]T 9\c n]|;jwoekA57 k'Gfm!oy˷E" rcؓ Aޱ+ y{ziԔ:Ƹnn.4-&@Yum*$D*mBYmTyh-)ǻ:* &wa Z9 NT`r- %JJ6-Kq9s|o|hq#x𡤫xH ˢS&}2UOBrشU{']L3=XC3GđcVp и)?L_|(u=+3y3wډtw`nϘ ɢc,)͡LMNB0(?W\heQVn7.{ 6hZ- eܧX|?E&(iM;`jE諌y# XtޘgBz1E^? k.؍" cϟF9k)œwt扼zv./>QLY">( cjl1Ot6ʒɕݺ'C14|gcwՇ\@W`A^?{UtC%3-@X_=t4< }B*3xɃ|Õb V![[JS{ z>RufVԥY]up m}rl4{Yw؅!p:d<svI9 4xC_N n+Ѻں6 0jJ~ʚ={lGByNQ6/}oivL)Py$Në\*xaspyL8izzNM}YO-5S8 Q~4Wlnx:(_SX2>y"?;;F5ۭ: fgFIoXF,|reW~w\v, K9̸~Yk)}v«}~3KJi nfp<e~'_ņB~vg3W^)q {Ǯd@ux$ }jo\Ws`|~%}|Ypl ;stwڳnSȑ݉X_$9ޖ݈t=T"m\|:0# Po^ vN:bMc]hMGP[,J`CƗ1Hj&*_<] 4b?6uq? ?=ufՕHůlM>l%riaO];Л4zr߶eiIۖ5GEsXگ A}O8?xq)IF #\btV*AypaFBh6rҀ^i޲|nwT u3Uy_L^5 &'ea6.| DQWR|(O(|?;l3G/0EPurRVth|(4':UD Llop }guή? 8|DgO[xQ蘿3\IJ1*dа|$g:Lv:t S <?lp)8LX>{C;r2u:!F^tGRjW> tDK\@]rhGa1}[ID(c3敤c {&ʾĶ>0ZVÔZx{YP7g(dQ^UƟK.!ix/sbO{<^½ş ?7q.= x oYsvK&O}XݲU>/tDK0ܣ<b]޳Dx~>%dzc/w{x+poŠ܉w|l筋U{ɾ>#cDBp~Dl'}pjCBBt9oEj@,V;Z)G#) ~pEe 3&"GM C}5dz$F^\؍#@){A6vnus\9,f zWPqfSކh;w){|vިw7a UQ= GŵkIh2o'ZRsjFkJbv<}L} +( 4wx$\:t ?rQAﯪkPjYIgst(rprsrX.6:Duj UU*y*A~^S[;({VE6n|,4r)'7G;h- z09*[@ty)[E'h[|ک֖Gu,`X O?AjkJ JcI[)pkuA.N,K%l|,V,U8X',wd1ޒ%;ݶ}t1e\|܇>NXpq۫Qz۽WGu^n yQ.^/A>JZCV)~ʵr++=$y?(o+mc]=U{ubGU7FJ8riptn}`ݢrIT)זCx8eͫDJ¥6g/jMoZhdꂺ\y}YBYHp] dm˘ي'#~mr_%:8d`!`$]e(b Yj,PpW 6[Ž^wn3 nq\KJ@;Gc#cbpS>c}ZE(o ;IeYx:w˾Ƽap$>O;R%G,|L4WYCOY _|Ƚz!޽&~, "nyl#)~p kLJ}ܲ ӭDD(x F9_Ɋxci0|X-s3`s>ak۠>-RݿD`5KR~Ϻm{m[?a؟4sț((̌#wUg"AA Ww~A.V%z_4N1 fOƟI ]{_~ӯy}bk@(P u4?Og_Aw;);">Xp,Tpgg@IDAT\xMƠ+u(l}Tˏ[:OWt'*A!s"8 4o ~ ,ϏU)Uy#W,WUU!d?r8k4Q/ᒼ^_{^wX%^LC_L~?X*X M~!N#a#Jt07< 0|%A׮ݴZAŷ|NeZp39an rf${K|w3 B< (tBk?F p'>H?aPMcQq(Ҕ_}Xׯ`̷2C5Ճ# 935B7l>~ wXyTڵiâ>=FTz~H y\?(n~fErO4 7bVnYr뙓Ltcg*zgyzݮrhCC9QWVBzfkdM񧵵]R徾:nE_norw^kG@'gJ5 SSs;9jpACs;9\Y6>&M/IJ,"6: uuu\mݪ]7Q"9(>QrOF7f~}-r\6+,) _ϲd ׮DKܫ*-"aȍV֙Pakk>YGoTR}j_yBqCVcC 3PµKJ>q¤1'7r6o3KנK?$<N>fBUyjoȃձ%*~J.Qe jPH<ECO%%Tl#J{akCÊ9$*~`PHG$BTp  "q_o!IIʽ!'bQ IXTo_ZlGqdd­ta+}1 _*}!t0A&2!#}pόn3Ar´'*B慌T;]OÍWIG_J#,7T½/@$SY;6ƵC '!AK$3#0ig[iP,wIk?iO7mI%@b{ݼ.9XZ<./HoK$ 0 xw){bmASR" Ung*)tgGs# Or/H'گY=`?eˈ%yJşƙIҦOw>Ͼ[{W<o#uwn[-Gg[8~UyߧOo3@P~ۧeUjh7q(~bCGˮV˃:VsRVֿ5̱{׆7RloiDIZFJO%){^x%.={WQo>U;ִJ椒hH>wL<!I#z,yg ң}۶6+>A+ n5y{M^Yؽsc\Fi퇓o-?te)A lJ& VUUeva}YK :X',NCGZnOScBїA2g=pʳ:6_te} I|n!lāgR4G_Q|ge|R%C15C.ŗG%[-nyڽ&5o ˶mlw<,0ID"ٯۭVq[;v'2ްa#q93Z0__U/ &wʝky ό|Tح>-'VlS5G=YJ|^4R*HFD قٯwGX髣]o*6%uq𒈲0q@z?drf{Aq.)?{ڴ.EWQ:sC)VӡLu:/Lxe.g $'USÇ^W\:2=Ôo9:+ s/"=~g`JrMG+$,"תbqT[i?c.2{<O|%cE2r8_d65~hˈYVS0oNj&+Z\ Y'~Gh:3kf3JSUOxP/p:f<0 VKȖ+;']x~*xoۨn+<_8jvҝoǃ|HşyOǷ=-nytbS [4b( _ll >n6-ڲwxy KEyeE@*{QÊ|+|5֗òOcU~D9L,6[Rë%n~7eaٵGvb?EXǦC\cf# ?J{ >ҝ)NQ2u  SYVKRƀylWYXU}t-2JJoP,9Wt iZ0_I"2*\6|y,y1Xձ9}X )LG@abÏ[ZMſ>-<s঑Hœ'W8myŢ&~eϐ HߐW@3MEHPO6P#(yzL ˂֕9}.BU.9z?Vr܎Ci̫jopZ{F/|2rl4<:.ˑ"ߔ4! g:lH}Skn>eBųacgC䫪쾃d[ t"Iյަ+ܜzVy;>X\k>BbP]wNV,%_H/e ,YRÁ)?L_|칽cWut#g{< -rQMΘٮP(/Uo(>^rw;c}$_tN&?rtF- 47>#-*Ty-}fѩ{{B"`\٭3Շ8 zDV?~E%A@*lۊ4K~_G&\B[E P|_{vG)ڱ !hKvog_syPyUڐ^?Yr :GWgGv>}7ߖ߁vGAp8V;f|A瓨{c F~+ c/}?._<] 4b?6um]y68n@̣$om˞.}-+k~F`-p}f:)7{]ޓ$ OHyiP/,hQ3i+sD0T\M̤'\5O݊j#li>&(eg~ђ I"wwLؿ}OD H?9yR}bǹmq>xo<oɠ˒y<:eY0X9͙orZ{n:R^=G݁Q0M3!Bv jAYc^͗ehHşI< J){Ow]m`t835GjV L(˂aȬIo֜k=y ۖq o%SyRKlZ 9;cGYq,V-~l{\>b̤{ѰNdqJ.N(Q R7W޻k4fgYh^^Mއ}lVT hQ84Țk|S;#Y:ْ$އo*y߾rE~bV@cKKJ%#!&Ffz#8Y(SU,߬IN/cFc ]_bާjJU7+xq2/̉@rI;#MطmkPcFc 7ZTersCfsgvvG;<B’ay㾋Z'5QQ7+5H(}1 k?>g$% vpe_U'spg;._5qQ==߻~>m3~S(YiBh:p % _o L;ٛnk| J{aCCCCf/g$% vri{sB%=l=δ @\7O9MsTU`܊$! *(V9-.6PLwpgItDPÀ?EoI!q)r&Ymrw$"Ь"i<կ>oф~ Ŏ>}Px?>mW,*!CC e#~xkLD Y\(8,׌:RT,o#U!Wŏ?=:7X7g/*>(yJ_-de|QSQɁh P<^$ID r>MY>}s:)C*vA_mQǃOv_=.^d[>nB€?qOȈ8X83it'G03 G?q/ܷD e rMS0O?0eO'Oy=i;+_5(-*x9}y‹}/?/pHU)sI20eN[4xtrgljFj8=rO00v>"I9hF/ \PsSa>7rW }icZ&1}YH[7 z8GDj\}7g{{W3%Ho̼,%hɏ%f\"  Ї%I" H$ A@* hLD@" H$D ?2[&U" H$RKH$@! eLD@" H2`hTueqLx }߅{G>H$fG )AG#+j?~%By?u z Vg]BO?\|?ëabAx e-)ʷ/ѡTo:@}YJf&}/'ZOiS?릆['_m]<}yH$fD@Zf5S񬼡?Գ\nNنx$^'J^Yy0\=3lc #U}8%zJ+};Spy?;̰D8'd:m7kho֝U&]ŧKI^w4z]RA<H$fE@*~有f&~p+&LXЦ[\Em?O8} ɷ)UU\O7H~Mȷ5ܧ_rO^J$@@*F'xGVw=p{:^VΧ}7bc_P9[E橊q?/H$"  3l,{C.yc?x{~ E_z✊U'U/w OVݼSCbqk'D ?riQIyӯ!UyGQi?7xw>PzEh2n^?=4r5Ag8NW~KUJ?rMYffՕ|)bnw;˟&n6x>qh (w]qFϊGRV?/#S?y9U"sw[-D@" eS`tHLD@" h o ,禣`XD #?#]&Z" H235e%D #?#]&Z" H235e%D #?#]&Z" H235e%D #?#]&Z" H235e%D #?#]&Z" H235e%D #?#]&Z" H235e%D #?#]&Z" H235e%D #P222#S}~*w$E-PIu9/SEjZu̵?y8D@"tO:Əpڌv{&nSR ޼Jנ8}nR=jkhM)J$C@*6S|zx< IP&K=Pw~;?m0Wg϶*mCFQn =ԓ,T*([YܽȣD@" $B>-Ϧ-~%YTlo7u;2XG,{΀˸1;\HMM?>ahED~3o+EU|*_k6BA]=*vQ{+* ԑbk:ѡjIA?n-By=Vy.H$a?,,ws@>V:WT_j|R=O*w+KcvӗYc9jmYosцN'.|OX~=o#P" HڍT\]%U1SVV|tk @ {]|T֟n"XgWST"5JvCU)I}.AMJ ;6MMvlH9/{gΜ̜3g(Ije)S!@!Hס+ܬ1#Pv9sr?1U~w?ڶ\'0;|rH@@&ݺQ̆˞^.gX9}X{::lc`;ڟw}7g+H)!@jZT>s W_ ă+P2ou0eB j$kOR Ɓ5.۬p?됳M@B :/' !jlW?o%r-B @`҇\aߙ7כ6F/u7<!P7¡6\y+d,|UޮL6i" ;-|&D Bhg?aHzQ2\67fbzQ *!@12w2׏mq)+G@CD=S"t(iDJƵ(5G@jlNolv?1NDɓ B ,HW':n?ʙmg1mp^Yϰ^R!SqNcg>%;_0ؗ֬%;ůYL˦fҮ};PB K]h75&+U9p0gpsuZay^z{,F?AL)N0,NDL!`Gz>5zДF~WpAfp1]Nby:qb0jنI~UqةxcBW> @8奡AʿC߶s>)U`¬YL#[[]~=x(Leq݋ wog=cW>@n+ؼ8]gVʾ ^?$, *"@ )MӼoXB_)ﵬW~[+gu}zio$tl˴,~"jY xA  z\B_*5Xg.{f=(RkmSYPz@.9g,K kTE˂fB *DRBBDpLa]gҿ0tYe|# C~ }i l+^bCȋm"Z 'BcSwCLEǜm/S쟾`NV?šN;8ϝ5ztiByStB 64VytGխɭB^h13!@@e _jyɝEj}O1Uиǂ鮱UX7!@5 @P^&Woūᬖ3y(v N?L7[6):Tk>:CעֈX*@`"_eM  6du;< '1}~〜S!Km;qV{J#Bӹ=ѕv+2u/mZlx̀љz5ڮԉF.\8_\w3c':);3)-_mE)wJﰝNA7יse:þP6 q>2!l~aD@po #J#Mt{3]ܘNIu X6=]{юt`LW?CyG0w ~J 0MH,}t'3W]XQrGYfTIA}H.L?F [9V톎ݗ/=A@N.\>c[c 3R8fW0=Lq#ZQ+ITcˮ$Gj!P䳼h+ T?.7`vapC`I[hh۠T?+BE+SL 'T*u5SIk+d^ML~շsQ|ߓ"D< qbX!Na$3)gt͎/+Af ~U466\>5k|p\k6>ic }lVd:gEKV}J]~}ənbSCEq>JG2RδM&.EG{;X3y•IH l-qӦ59p; x+ǎXIIRIvP4n@xȎ @~\ޙ^􌅹([!D{Py2L`Aqߊςbڑ&ݢ~y|aC*YP/p0C&y3ӼGS}1W'<[X|3#Źa5.%)](02?{GvdRd1#3 b>}B{PT(#l^s쌹.cJ1ty¼qVN 1 [bw E١[u{`Tf7$O Qh~5i~:5`@CA OLEUj+ttH]!h(#=5yq;%\SA0^6ݛg+4] nqSBLދ05q%^ϽHCWJ &LּoԔM[\hxR .ӝmve~T6sdg5$>*β\;&2vSkB^15j_:BNJB*/t ;'n2%GCR1M@~25f?>ݣ_E;%-ҺXӔᙓt% a WypB̻-cf̈rr3FׁGPB ԉ?I+F[#l( ws\0:w;D$D(#b"35/2P?.jz;:d6I$38=IDAT=C>1'@մ_湒Cp?t'sȁ-B t=|iרCv ]+yl7#Xq{5V#+,p'?%X@(LtJ+2-rZ&?O, Y*̼4Ij KvŹT~%ݶDqfRqF~PP5'v>YeRqnY83Y|Py`Y0//P9V03B?)$ˢSѐj;Uu1\ZBن.D}徊F`\)ns|ӣF;8fMZi#BcNj_B^ sϗ߇ތ=lK Hs:>SD `b^#G(V?z/Y3ǎͅ#LnߞM]J%EQͮ>6'iތcNvdyzvW%fmT 'P(◭H:#Dog.`6tfLԑD*~s˖] ON*=[l Fəm焒rP>1Zd{Z{A*p(#|qϣv7`̙&d c?(E.35 Pa.$~0GdD~AHy IӦO.-?тAa 4+L)DP17XA=A>[6-TS0GAp2V`%P(-SRF7!,|JTy4F̱4__&LFƕ/yK~_47FLsѻ";'B9`)=!R,e9ЯC.́42)V_¸~_H&`MJ#([t)ξX!]Kq'WHYNA|UaYS7z ej⼩83^1=J~!!>f:ꑑJ V}pun?wJpbF]O{с] +|q;П _b7đ7udLN77uj{xkRǞ|O`!(-G\l/P&;tPԎį $5y5̐}|_*G(tRW.-7#*XXx0Ef+@t'[#UNGU[* 7ExwǷoZ1 (X9+RŵYvPٿ˾':=uKd8XLqu>dr* BF(:(mŊ3/VcD*WBq)ˤ 08ŗ:+' +`⌴űC^ c}T~"0[y$U.c^szBh5B}xU"+͌ǒ>Gs9dKy[0Svr 3\/c=`I,E'8̸Aߠj-^Y~0 e5s=οU}9?z{u8VݧӜ_gȹE?^_A$L2=:2JW )r:qD::P8 >`a*ѹ{l+`ζvR ^sp5ī/4+STAw֒Y1'{ Kr=7\`v)31}/c>x"raRх DuZg i2s0ϗqnx&:gXVy7CNQΕW34(r}LRqJ:*GgM*܄58H)cH y,.)|.[,}-!#pڝ&=652/({6Lê#]`CyS=B{4oMw9сͅgxG4!>a[wscg@Ǻ>˫3 Emaw";B cԻ lb9-J *FܗKʿ/^a*tc۲'Jmp&K '^bH[n+LmIkQT -r1W `Jr iPB7\c&DnGY[Cyϯ& O{5Áúb0{7vE(q}lVM(Ь1]G2&ێag@}v鞧 0jUѩ@1ց5E d ̙"hSo Nꤕ쑬8Ļ#P##ޙ?{:HժG?&j7(bylbL tgX"=坎1!p,3<F/F*m(ґl6%1ݼB&QZ^c;Sf`O DU=;_μ/B=ԘEэ{JQ}hp8f_/ONcE+oöUnE( T+R60|'2bCɢ<7?Pi'-ٳK]]1.E8WSc Kc!u&L=3󫜇3{Q8+=I:,T5/KKkAz'oͼ*uJx_sqM4%msc<KN ?_V xऽlI{ 5U;?y ε_[tgx̪~2zC x[Px}՜Ѕ/,nGLaY|\)Ot@( yuտAY!pf1(?a`8AA ;Ykߒihү[$ao]n¢p[?t*\V%+t=XZvPk ]l5~͚')[خl.;!PQSTww.SԨs 7LYa=Ի®Α//iԴwHS%:)4?qS_H(tq[mYuָЦ<4S3*\b^8wtM1b^q!To`^~OW*=]^86jZcat{\_yyf)tsюH/'' 巇*E`iBgx|kbtn"dCqǐϽhBbO 1sݶ{;/įy>ݩ (pN#ioB$m\e鮱1s46y!2~Åm]4A@'=#t8 Bz]Xy9vQ?y}4Of^tyE+sux&؄t<>HLy#M@A/,aZsi Ŷߩ,mPVϷJ 8-v? f[sԸߎ0vad'N=z5.|DZOOywmnÍ`9sk\@_?S\]:nH?;cJ—r/KG FǮ^#y_k/7y!Yw(QO ,MQg%X Eނ)O'3 'sn~{' wPx/Ћ2F/HLFA˛@G ~;zgs _#h#Z_g-(,*>$}ü@pZAE˹>->n v3QILqR22ݝ`| c.p}祒oK ":IN;@GHz }椠.|YqyJyy >N /L+.UMJ3g淳+ܝ 4=)Ib9yrNt+=:u cfi ;`CN,QQ5pvK{ 4A֩9Yw'2b! %[h^h F鍑Uo)Uo}y7Hvw~.P伿NOB6 pCA=ʋ'nOvw-3dlzïMkg癘_sMq#Z?}, 1秡B"`?V߃)̱cs/Ƚ{}#toXe1! Qgl? +U')1-X1«_6+4# o@A|E"o!W ?KwQ;sE3K׳xo czvLar'7L4U=Nsf)N'Â9;]ywuؕz wHN0&3^, ;z'%iЀ `i0_as͒.1%\9\ϱz/.}:3߃󻒹]bؾƙ>O!g㓞8ckvӮQ隰GOa yf6t'GS' 8K_uZ:~̟7׼M" >0(I.T~c!xb!wxs@ؓ# %m,roŧ'!PWUgy]ssM M!\ʴq+םƳb[[ew<ٱC |ixE9'-[X\/N_ːIqS-H{ nsm+. .nTELgWtKGQ1>uRaea9{vt0A[ꉛVrBQά4*<HLiz 08VeW홅zZE]>=]_xYgmC9]UakfA0%i3i6n]|+\%<Y.y,˻'p ܱi"y%Kp? 1iE=ߠ tZzuNFVl+=nLLG]?m(X4fL ͧ<ʂPP:py:ihɮPж0lyǙXټUwldx.]Zβ8S-Zը-TnݗO'(*:7w`ZvkRE8-ZUm<وӇ;Ek.|Gl48=~-˿:9Jx?º3 8}S&ǩD^㹓gYT̝?:PK %;ynTP"js|#jYJeRCF9&U&O.PMbSY>24a W$i7L\ KL31r`QgK!D,,6Aspהn^aqӮ$|WH܎JGw<} _Y!<|MSnJM2}ց.W,n=}þ2yLl7qZܘ']<"-t|k`/y*9yWBJڵ8c>=Ol0>E]mQrk|}W?4>}kqu>P객>1}vF5 Y`et}kz;]g99F0'9Y߳w*\N&߫Lu5jm£dД)g1,B>oF喴 ʲyjԿJ }?DE|5-tO[iOFIy !,`4UNm>!~*?)皸躴w:U<s+pɊN-uC>[V|Ovw|3&?tRZDw}|WetCf` zl-Z,s(-dS4?)UA0ʎsCuPdo,l:?Pl)􇺦tս[Cw<=#Q2r'1,PYoe& ߱4/_0G,ęP9'C8#`7<Dkn~+w:R)?yEPpD]c) 4EYBKSgpAa5Ƈ2 e1Zk/t'rUFу a`AjakOFNޟ}!gHOcf̈>Xx3:QNWZ:񧒰m-c|[o1˄0;Ӣs3,ؾBp̃05Q==a!8ۏַZ$m!rX~'N]fcXKza.]AC0Hu.LKk]k4TJ!# t!'㩊;;8f Ma-r1], QF9Xf'@4(X{מ)b#0}!g+q}?urIҒ`5ǝ(4PJvzCx0Lq'Lȶ5SXu'e{ϭg+næLI5r{$o %9p4Po?oўhal\i9Ycm `iŖЗ3wOسGPFq-FσE\. 1x6Z 5&Rm#Rpb[Lu#»AּWGv\Ai/vcBAb,B`pew#{2YR\E*LSTGhA)bۣ9saFɧUC+L2g[xq5]3銵{z-9ݰ TЯg3R8rQ) {},JS5g.@>`IDQ1%-c[ZBr%{-\L B Kd_bpS' Ի=:$e-4ζ[|'WB XFNWOw9K?E4Rmb%/F"Bߑ@FB >QoyR9o |~ga BoWBLe"&.Ǣ?~tL _V!|O)$ Z$4q&&cӚ(`vaRc.E~EYkӜŅ %<U:;c΢[Tv22!_a)g# J[ON}|?(vs|ě= )xKZKFĢaQBW%ʅg_}M9ƕg-A(*yOQ @~yf܈9R"1vܫD{`?;31+x,sAdǹ5P~8[vTXp_$/c=0Whw0,]x@aSo4pOΙ*cQSV;FБ=%GZa(8|.i*\Eb/uWl>9Vc>1 =  ۺ ̋nUy[e?/|cCaQXjq^%ے9X36˫߇u { X Em!'ac)P\Ai(5S`=莸>30js'1̿^"ӕ'\=w+ vO\m)P2~(nAjM3L5g?8[N#Z}8q!O V&fzPT).O*$tQ0~snގ4hbe8٪ϖ $ޝ/r Fbak|vH<Kh0d? S"d(z:)ɉmCj>iMSbj;빘lv vk%L4ǵb?(/NU¤%䂫,r1TUx5rV> 13WRuz$ˎ9%RQPӹmGV%uk `F"ͣK`5n|ȾXТTZ3\jDe(AÌY׊ٔ;!plI}aY*s֍5CB8Ԍ$i]khyN2B8@>:AGz97B"u5P#nk/”3B"P#?&:Ck ȗR BAt;NFzSʽ|2̡#*1~!q6&fwQ(QV]( MzgY'ߊjχOKShSO-kB2-t2a۞YnAz]mΧQ^;)}߃ő#YA1-&V m< Ҽޢ}qCKV,=-B]Dž!-/Q]>/B[]эŻ<,,)B\8Pm\j!;;J;|6s?z3Ը4huQ+q5-,JpD:`bkQI t uIs-@Oq0(wQT6~22e~E釻|ז C㾾Q#X?Fazx=ݝ?+ |y@PbfEu%Kw-K\SxSzâϧ]18WV](+mFj>Ɯ7d<57ەiW- +|Wok'/;{8KWt+NJMJ1]9>dJ>yučڢ}GY+Ntsd, E9 dM\YJنxE+Ş ]N`&k\ن02`9b{MLЗG_Ct$ʑN7-6ǝ݉{b){J]'H*pIMF,;Ox9߂Y~Mei+z񲫑DMkj |H6 S.MkA,7f_>;}fꄕюR |HE dWY L3ï"#u x?ƞJU`3a( 9;ƙ9෇j"\>iE2PE.pF<`,ow8¥."~^.Ã_]p\3YL{$5-,Ct HLX}zf=Mnv}\l+`;VV[8gIzs4Ե:Nej/D[Ox[O W7s~WO鎨S0߸;-<5My fuM銸w.*9a^T+\w۩"MmH.PG<6/2T ~dBn nM|4+ȵ轎\t,G1 4MI?y R|,l.b2<ݙbHLxwYںzDc{u4Rr;n~AgcW|( TwE)Q01QJ \U/R2ΉjaB7ҦL|MТy h흔) |3_ m%BOsJUyGw~/r:= wkz;Tgub&yLM~YiFoy MhgE<,<>s-#0#G,LY\HOڱΉa㹃^(4t^W@IXU)!2 +MRPTqUmvvj'4/D]EMF".ˢ T}o<š'tz:b^Xc(  (Ӊ&PcBaI*>>{@eKH,̜Ʉs~%cEW0љ:s8O}Mر~0L&%hqb9>3X;c4l6<+Nd!δk+w9n,ƕ2NdS[4r|vpuGٴ-;ϳhn/wN'׭ j=%:W͟8!aRLaűu5,7 ~՟vRШGc`!uɌ}2,MEmOԉ?!Wأ(Bkr-h.O'v4`\ s2]~wa|O;?@Sb 8K_arGÚ %Gq~\JKz_xqDƊփ`8Э߾cLCΫwHV{oe8եF{%@ 4TlxKhK^yXIN_ >:ێko>w3S_1e5u9;h`ؕW6j'J$9ZPbЖւI#Dy6eJsw qB}b|J(CtupډpxXu>Oy6.ےھÇ mTu,۔]BFx| j4UJ B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B ڎבOIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowns1.svg0000644000175000017500000013356700000000000030504 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:09:40 +0000Canvas 1Layer 1Network NodeOpen vSwitch - Self-service NetworksNetwork Traffic Flow - North/South Scenario 1Provider network 1VLAN 101, 203.0.113.0/24Compute NodeInstanceLinux Bridgeqbr(1)(3)(2)VNI 101Provider networkAggregate OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(4)(5)(8)OVS Integration Bridgebr-intOVS Tunnel Bridgebr-tun OVS Provider Bridgebr-providerRouter Namespaceqrouter(9)(20)(21)(18)(11)(13)(12)(16)(17)(15)(14)(19)(22)(10)(6)(7)(23)Self-service networkVNI 101, 192.168.1.0/24Overlay network10.0.1.0/24VNI 101VLAN 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowns2.graffle0000644000175000017500000001352700000000000031305 0ustar00coreycorey00000000000000]SXyOւ8l6[d&U-@aYd>z,4Tac}Nw|{t%=>y⅃^ҏ{ߟ^;K*w7';;WWqwѱm=Ȳvvnnn{ɥ+89H0;ƶOp~˞g, A?t} A3tgv\zAM+ZT 4po~x:Rg0B?Dir}必wY;"TY>S2S*tgt1,zMpg|y7)6Q1Wb86Ɵ-|THbӆX m(_V5fGIMTŴ/e_pZr|;Eh20Z\\7aM<xؓ4~7}S 9ܤ}da}(PM6%?q_4FIz߂,_zpAҋhx_zAd~ޥ7oQjT@e "]_.Or41CZ:jSK|naRb5DF1"@? '\BmyUM|s\m4~Hq9kˣqr,z-Azp̖-iQ^8-]`טJ5Tʶ i{!j2^hx{V;IMkf&2WqәhX4FED[pP%-ad`2TxP%47F0#K/*&Ye+kcqqmS׳m/~y~/,rm իS9I3z5΁I/%$(^B<9'glk"HaFW(sz}&y45Y\$T> IY*)e21^8ϋp"/"I^jE"JCn: Ⓩ=A_d,QkHw;c޻7% ʟ>0r 2ц ?#5ny T7kV'AI٘b􉒄kG$ `rDA1"4VNE@v` J0.a0LhI%j(aYga3̸zV9\*cCnzhAM5#K&`&LONTK_ÓDhjz Rj%ƅ|`=mRLZe`ϤX"yI03U  єuhai F7ԼϽ(- VJH3KEt} |wVksD=vхpQVXŷoiWJ@o9Ll0e㇒k'5Z`>k-6 c3.-(,\k [ق"":?=MfSLp%gK;C G[\Y=ҮzZ޳ hj3kiLR+VY/UHBgD ǖH|HhN—iҮP, sk &0%IxAdmӰ4p9O/e4Eһ/]IO2Rk mnb3݉m8u_~ ;eTr73vm[l%Jy5@7k+m@֛Ҧy7M+Ow)OoR,yeo= }ԯk~l^zJrCZPP9Q>Ts"ejm" nl!7x+m M Vã@ŔV ? ]eŮ qV%.@gll&,)SmyBrd!m)ܺ5䵵5T#|}Ram!e,5g0TXܣ16 H-G 1- eTn2@ ֕ 7,wH(00fE^H^gַ{0S 6uɏ !!u#6R{M!=)I7`{{̐epb`i!`Id즅 0Hfjʹ6 Q쥱U:<((HQEG^#LLLב 7g{A 733l|t`o)%1&&ݼEeH`DBlGo B] 5d6d̦Vw7;yG͐ 71>㳑֞Ī)Oha9PDn# Mh0ܢP۞`Bю{nZB5͔T 㞛cR<&Ŭ1)NĂo \Hw"f, Uq3yJJ&)bkc;I8 5'.+O1<&eǤ4v/G U ZA}wړ )`W\k˥ VHBzIi[wf܇PXumօ".XU.S唒ZJRU.ṡЈ:BcequN-zЈZ#jJkdKFzh kEKeAϰeVTI癠ҙZꑦpZ7qf֮e< O~*Wd5U+S˴GrZ(UyO+ܷ}б]i 0B+ ʍvcmYB)fUXN61+#/֎[E޴&Yه1 #7sq)eu\ga*'=!>fEM'A ˰>*.@VIJ|HAk6nLS!Vh$P$P$P$u P# M>E}i?_#=MFcg"+I RK]HF"]J$R$R$R$5 Rǣ׃A#f׃uDmN6h}pDWD    tM4Ȣd, Xy,ZϥՖWRsin`.M$\HטKsMri.o]"@Sl#m$Mdv1GbCbCbk'6rbkD[?x'I<@c&W'zm Z ̗.Ѝ?&Ę6i̘6D&ڱY8yXUL.v PEu3+\GJM1&UNpu=nS.uzzka=YZdQdQdQduȥ[|I5Uq[+eєXԥKd(I )Je8= 9i}:a?#m4?/QUIa^i m)5iwog~x#LvB!#~N;fyN tU9tvd )nD'y ?y'<~xED|!j)v=mCH6$aKJ*ީ8Η7SPet+qk {yB`?L7滭mu\ʛ o"o"o"oo ` |}DGͬ[0Qe kX^b1YӇgC6R˫,~xn*s}M2i7HK oy›<9=r&YÄ7yfw6GO|1NvX 8-δbTH+1Ni΀YmZ"..wŭ #ZVk1`eGX$<E ”0"$aQK$-;x1PM 0)>ҡPOG7Qֻ𶽣0>駨fe O4>1NG48;zanb(.^8(ؽSmRPJuGМr4nqPA N ᒊGkwkcx]t7o41iUN]=äco ;8 'ik?_m ]ivu(=wuoއpR>JEt 3ɽ@p"Z_B=,L(0:NzAigQ4Ӟ';5t7~%c:ƣ6Q pa›;5a̞⻃2!j/J{oE4jPND;-{WANVj|/41M87W0ɔ;v0n; `*|U`fIOSF~+Az.’K1'R[ҏ@QGMemJ\.dExtO~4@<0enas> (y=O,-HaPr w'AV{A"룗o'5?.N_wKy|~}ݣP/ Uxi[/^Zۏv beX?|Ji)&RhacX$Tw M["a&:oS8ZEU,ϿС 0 o0l_ݢ@њYj#x8ʚH >ÊRԸ<Ӣ Uo2Ρ啝2<:}}-w0|9Ȣn^}4z6])2fu`lc*ͥf-O$k:Fr4ym 5 2 1 2@IDATx]`T֞@B ł]w$]IX( {?bG"tH@{Mr$n$g`sN93M93g #0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@! ,%Nh FN0@Id=޽#bn7.Y6Bn^Ttn[&6>ߒʨ=vBfdߠ:/+cz(KՇNRb6T/7I3lHfjJHQ<+fKuJ!ǃޣ4iLfK 5LMS(J<>X){|Rlzx(W=)/FRS5뮜zlU绵nԼ({vs+$m Q9Q(``(Ah7sT_zj̘HjjqS}YǚxLӁ 7,ۖźG{p3$n*x)yRKO԰ U*w٦Cbio揚=:c|ǡG˭L/$a1}YL\MneR"]H',uk"m&fELZSRhNB=ݲA䟂@FU- %Ofe(Zb [UݏBE@$I s !Xܹg{|M9/[I)fP?Q>f۳w] &G+icЪl.]c7gɦ4pg;?UB+Ք_Iu Ʋҳ+u)|>>כ_c|XYi6l)VUӢpoŦ2A5R>4r6!R͏z~x3Ls$M+I}Zl /)Q)B|0āJ_R3rN4M0͚u ߸EзRʘ]wɾTGȹ͆0qb#.Vq&/v"}azrm}YF׎V{ AcR} iN(o3}{wwB/:W{cɽ63k,Q[`SxDm0Xb -ڮ>buaSTaZH^W̩~$`>oRrV(O~|㖇j DҾNcݚRq#;ENҞ")S ؊ Ƨ G` k,G_ KU*|Y 䧚l6 ;p<a)i )W:\bLJ#/@!K|s_V` 7a0"1+73P a |#KXNx<0Ӥ\JXW/ϓňDs D2i>xsgf{o&tP*zhfV;(nLҺ;o~^%DCÀm < vm<'KTF9 ,3!,̅h=p<b<,~ Y3n?,< &'=G|WQXxGԃkDFUJ 'v[WIz)ߘ!WIfe'PC{'p6  z^|#N蔡#26[5v` ]b#߃ji6]sjnVƳ VXmOe N~' {ӤzT:ChGҖnA:mhn?S|9݀<c5dq'\NyIb#٣ALrO%ƅRĶ뭺qѶۼr_^V:K l }INg1.^|tE؎?a_W&DQшN&QIkc$̪JVN 7`P3218Sr! 4R2Iq'z. d C10>4h_Y~p KHwhkh)xRwl4ުftNNG,o[Q>@ƨ2_$RKZ^kG{* D br\nJ9m~;,7)!ͅ{ApxF5+_:J~"ʥZwmxM7Lo:H|F|O”ax0'Q3 I纙ΚxE>ߓ}m.&ն@ ITt]έhzf7v2U:< )x۝$ p,ӭ)4@(ykR[n^0Uqē9''eHJ9g4huO*嵙>o1dG/==Hq2} ӦM b:S5^/W聆5Uafd cV=nG?L7D0Χe1~HyN9CwB=)5ё?Vp5'Zȿ!i̡IM[}E-B*}<01lo%ifF%[yG{HT]w,?0V܎6л84ɭ|)LnVI.?@鯭rUx5D=!Rw\{ǻ~۞Uc{mlxuu5*/:I4aS=oSٵ5_|-LԷ_%+H\5=s<0{ogt4HboJ~p2tߓOB d"@[穕By`P*q1hCh֋N=mjzעi3LwKϑ`J/%|)/C˦1ܟ# aEܘBm a 9ľF[uh/r<1j`sbiUej]J$]š lǩsכ}*݁vXK ufI;$87Lwݵ)GFw3MNCe=H*`7lJBMݨ$] x.URh 99&Oؔ`MLvOwö0D|Ri6Д%/+-g  }B7JNཏ}ڧtw&3[&NOTty*4m)29Ӯަ]bZҋQ'gb`ɖ3Uzߚ2Fa/86޷_hhwt%!MW1vTࣦ(E <C4kFU=;?n9c%YyK$8Z**oDv+;$!8ŠBL~h(0i&~NN`f|liELx^94'BVFb8K_7eKDN׻ ]G & uԄ.>#qcBv~ߥU%Haҍ6/8_*ħoI͛?He+-\95D:TiEھl:<) 6M]Tlńe^yjRB.w[i+K;IBhO}} +{נ'o~_ X/ՍA`4Aΰ…':zX[IJI PFPYL!jkvx(Yד|COeRӮ 0\fGXa Hah.Oy?a5$37vʴ&)a²O4gހ'yXIl|IFA{ ^ T}>BukҀwZI$b'Q>$#+F=2loiaMI.Ft&__w[W^ye& hOd?qxm[]g?bew t-ta:c}%ᖭB+z#Ɲ7EY{2D#h(J~"'1ۥun&1 "A#yV:f1C1Hѷ͎k=)Qĕ׸[ /¯W,^#=( nmʦqV7Vqm3<#4=(p- VsBۗ=ruW۾R)km?_N_X˝,HRXk{"YGx0GɋnOVXӠv`NWDN0 IؚBn ih!HoFfHnM!W]Fy25p#jumO Yc'~j|G+ChE m{4QʏWjNqwB8[]*ΣoIJ ~7@0<y;hMQ1~R`:* sic"9rӄ/ Dvݼ8؉ ,:VO4¸]? +Iu9w #Kb6~;XtĨ[vL XK6':B.Ñ84?3hι0TkI~i3CئQN3hR߯ be7W(ˌ" o`[Y , nWz}4@ޣoO P&>z !QAaҚC-_ .hD<a~E"HHeMku^XQy47Z@ɸvS7t Fua=FѨ?RCiӑ(qASi5$BO O/MrFZH"&M =Ā  xt_.^x=2s )фҝP#zO88*cm:僕 3T0l0(  Mj4(9&7e AfvwXv O'?Q޷S~Λ󩛦uyy K{j2Ht/ yG?ܮ4tDKB#u|S[(l4В.c'O̘[(1GXשc+݅t=BH&FǖA~"QI@{5ŧF?+@p[c^+ Q@Z5D;wUJ,EPVzwȻ M\ë5 -cw'kpvKHX #Y[;p^86L*Z^+; S"qP}18RScdCC~+!(V UGFg&!&Pb}um=@LkM o v/9 pfR æ_| m\J1&`} LO 4~YYv>ej#sl <3Ҿ2nyb)*VSBӬ[Ni輇ޝy ׵=p[eK zc_qm6_@;{{0 (X{wڽ8R6sSUf+_$V xP{x^LZ}rQp9vc 9ZیmLw#QXjt+~Ž4ݭ+PQ71,')8~I0sb>m:;Vj2Q3u/?G8d ѽk@^< E-(LoqcN-iKHtrfQMh\zBp[+-Є{;ij>]#40G`)B9f`u^z:1+c撴!c;u .\vcP.ܦ&)o#P$ui/E{z= y\nyF/}{8ݙP(Sp Y>5ɡI`D{:^oKZȗp3@hwRz=hG}iXR'N*xo6q$ƬwBAx.T63d[[#"doxw̵ +ڢ~aH4<.GǮjCH1mkWMC/aHv*K$$\ꛟW]斢ph ;]))(B\|&tMpNtw"'N(n_Vl*OnE_:LumOX&_ 0$M7:Me9Db'^OO[2_$![J+n/rq0 ?<=>$&?a0#0e0#0fͦ#0F`Fh>4|Ѫh|~ 3"tF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`'|O}w|`4r^fZ re]4җ=Pl|L|G)_I . 4-uy}A jݞg}΢3Z^jA__n-NJJF•'ͻop4 |Fd 1Lc.6/Ӌŗ :t0yANp>3op(ӑT^jzզP-&]f}P[S|OtR)5Iz]gڔ >EKk9M|BSvlJ\_>>!)Ѯh:3TMs2E;0G n k+w~P_GNj[s:=3JttO+*2 Q&o4|vFf4g%;)J_7 毣\Sڵ_P½WBg6\oX(1 0{R5Ns ,r5 SnQz3o׳O;@nHx-1Wؙ:V&4vwiJazE *ߎu%>1Q\LkL@> =K&3i5p0׳{h ]ބiGnC7d*5lJYN" ~Nlޱc˃ .F2Aս3} Ϸo> 3].dƅB XIp>ngeb;aTz;Ӳ*CQ7{K)O̊{G a [}1vS RBKVJr7:4@۶OT@ym1h-\8|!"ɋ=ߌ30p~kq ցݜJE>\zNffVQ1|鿎M`bIqc=Hqak͊{t9tpZ`ht|ջOJ=nHĊ| P;Q2 |9Jv6V1:4t^76J§r%gC 4!n-лgww{o7C>E:'viٻ.um18q];7n^'=e_<W&f{ŗRÐ3]ڸ="UG? XbRpǬdSbҌlýYkJ"v~X)u髤&n^V:Iwng&M3Q?EiY/İ2IGzZd空:G{[OJOΏ؊ ssĉ^'+R{S8lԗc':mv=z--l^3"#TYS?GޠXu-_ %eҀ[ ̡&#?tݓ[! [G;Xu~ \R:11}|ʓ"vOP M㜑3 e;lumSRm-cM bvgh曏фVr/)Vr`: Ogx>Ņl _GE?tYkv}';PX2a( I}dB$t.הZMh[9߈eq{a= & );!IzLEG~USc%}0Cy rs DZ~ mp{ޤϒ?D&jV7%.~K)i+#ĦN{0[05PşYKe|s=yv(95#{hRwU_{]y8er%dҵ |*3ԦPـa^v8 -tr2V4&pwny\@&:1\\kvώ͓f%7yl{xfd%#,~MF A}z|w=qn_<=Xcׅb+ԇpcƥNC{NofYkTmTQn4ّjyB;c^XKfnNe-1+ŲO2--̬K+pz_L?5Rnx^V:R2&V%"[R#FLD߽N?{\()o0cyFxs~2.TD7{]UF4BLv0M]7c|:+Fl0XcqJ*VPU{%iy6-FfAY :j;>| *-R0a.4#s/UDɓ vNF]>38By9sLӜhd.<29S HLuw;*{J$`N˘˚mOÍV!O/9%U@C -ˊQ`~$Qx ߱"s0}yFVfr51>V!bSd] GLD@ LR\;Bz1}HJ@vU av.; IUP_S*zl⁑}l-uiəԹ3[y^qx4}V,_{;ݡy-VS.A$qw8G7M M-Sq#}[t,񷒧i2M.GDV6:V'u0}+:ƬAvUvrmc3A=ۊall ZhV+~<صXA GVgBϳ p4$T';lX"h/ۭ'f˩SGxʍHU XZSX!̟7io`o0 σnA^4hm/ #P5HB"HRT q5&EMUжWz*u<+Ej2b:}0]01pҵ=յJiha)@:WcIjuzQñ]%8ݱNi{™'6G🣏 z'8z[P+0e)}ԗlyu1Ide~:=}ӏpvnީ/ (w*$80WVaC1}"%' A-߿lw{"}QS*PR~~J]bB +ޔcp?C'!ElHT$a Þ[RlFG^=#GfI=Bm1qe^n\/4&XuQ;2IL? 8I= Km&NaCԹWs=5$]ڣ5慑&(;~.Tz*K8嫍 _os;0Y9$ϗm5&Omdjt[H^ p[׾j<$lޡ:CRLi& -㧽(S~}g| f4-M6q~6 AfBEBU Wx> j&NK|~gPێMƭw擩Ùg&qDrb/?VQ$Y-)ASCEVz;*+i+Ƚu_Ty]Tԍ6uN8tB `w3Ky T i_V1 Jy>ҩURQDm}g|a{d9R<#+PqY_{QP֮r<է-NDccj"`+CB+(l߈)*)\ t.`= u):RYFYZg[JNP<cёP\v } & LSEGjXbM; ĬY\({`EIEX8aťtʼuD_KZﺯB8ry]T{Nʵ J_=.)QfOkG ':.@R6D Pg:t#{u<8\4ދ⋉k)kUA_&g:%Q^cӗ#Hr-ljeU5Hvoy7Wl\r%9:mw:'o̟jΪC|&nBx`roc9ZoK\OXkٸFtJ3]'6!o fxV/l.H!{ݞ?4iK< ݺ{s?vnܺ wmxI3A,b(p4 [ .zX3kԏoUެO$4o˖0DB)tZrl XYu_|v^2UYG?4`>CeT>hv[{%8uGev q!Tԣ< Qs­ymq H}pj{Vy{9G*z!0C֦uGD&`va2z#+\vWʓ-oX+ʋ1AWtA /W{CŪ/G2qMCYXP%q'Ka([^ @hAG]v̢/3o.Cy"ARi6+6of"+7>Rk>zj̘b}ンXA|M~.)>ha$*ny8$k'r>ie 0ۜ~Ѳww lnB輽d&ΊV6#)bPl)a&4ہ𜙙oe$5J(OO\)\ (N˙D}?y]TSu 2V":mIapqaV:Y*uu@'/D_|kУqz'Rls0{ #Mn .kM<8lV&ax_>!v.6-\ 0&|>juJ!X,bt Z/gJqh3V)b\"ab&HAP/l߮uϦ{ f{ׄC30$v>#Ʋph6d:p> w~B1ftNNGcRQVF&DZQ+ItrZrdO1J$֗o]CzhnPRl/KEGmOOOP[xw)Z >o瑶@ -ZjEg9/V}q6dF`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F` cB5Ȍ 8νTh_F` JR (xzyMH1c>K-w J/07 = %,#0@oRBR/xs>߭%"ʑ"B &?śy6r5SJ~jIǍQn92#0@D` "Zdx^V'@8Qg)Y*SW3#05!@߬2WF!˚&ox ߬GN`J xj.ǧ`)}`aP* σcP & ]{&6cL(ߤa +{  C,Wg4e)M'5=j,r^L5%'y:&ON.Y:Y ,o`} 9iNA'`ĉm8}ai{B!&V{M4ʱF `ڀaz)(S~}g&%(yB,=#Ar>^1A[ Ybc@ɑ|Ma'#*+$Tr v\(T~5`[g3uKɘxGBs{a\57 d..z37Xpz @NCaT'p{p-p'HS3.֤ј4 tj9y!8z Nwˮu} [$HC˜aOA;D Q>0@TTdb!O}${i7(3KVf4\oKvEJ/Gu{HЧaEkWW%C\K@ +/ E1($C`{YG?h&cv ߃S̟ٙi*mЎ55/s좽³usӐs@/ IxKqVh$H!L 7|WQ*SkP7 q!P7û3'*3 3@dJa";>#n?c9L,>X2I{~F4ª'Ov2iShK\"ScKep1PH<#ϗN+7s 0yllʐny ptp0Lո'E#kRˠl7AAEL IU@8idwWD7-T*[9 ?!Z/W>4`;g8Ǣi;#DQD%QuYd9vsAஜΪyp%{gIļş}iG_R3rvw?n{"/ iT!!=lZږHP6{$xw[Gݪ X;zki{} '=`h%#`)ڵ='f=.?כn<{sʶz#@8n4'9?m-#2RsOsO*HS~\Bp o孑Vqy۵M. ޜ+1Zni_,]1 5Ҳ2_|=~Xh. l bsOXŌ5![OxȾ_ynyGU]ݲBxFoHBŤM ]]Yrxߟ 7X!(/#_~c~Tc]V[2yiH?v?4hSxR(& b[bY%TԘ~ 7aIm~:&o6x&mjvCz○rd?1b?Ljy%T5.&sHpV5M ~ekIx)Ԉ\`C1c _[*_|27"1A =q1[7ot ]*U"1#.Xs'#KŮ=^YaYԖFoQl-)݉(88kU5/nKbc&TTIGx7u6Ք< |Sxb!%Î-я~mx"se$7\pزcbq 状_bzoLj'Mb [w [(?-X&N9:Eco+,:ST*^~k|fѫ['ѥ/vukҵ[I7:xDR5rƛ54IT[< ꌎČD/Zi `uRBGsEosTzm[yHn(>Ł=:YߡM۰c̯o~kJ<╘p*8XVi}eNzg?c,bowѠ4eeӲG%[ԖڃxP~8%#e5ѥJtH͊Kݓ )ޘعā}C2}"׶+&]$~!]ϒUk~'KW:PJ|bK#cBN}6?c?o-狛/bŻSHW4ZS*_S-Ks/z8Vs #`f6 TęrmB!ٵ`S]?`e4&8oY+[ שt8#"dƙMeml Ʒ K0>ሃjLxC0K㫗f$mV{&`P;eb &ddIȾ dٶU9MR`$@JXsOD'PJL#XII|V4߉3G\ C z0MS>#I gPi (~`ek+ MXD4N m*H|n m7IJwNCCzwN \6:p;wt ALO mB&'p_GH[aI hu۵N jB{LiBEjEG WԻ{'v`+ſ>+I;*yZqEζ/~[?VJ`K: ^tb1ƀ@vMvAV4vNz2N+?l;DGqq ^Ḁjί*i{@?!=.PݶsO{;ls pK*pA?ʃ v.[mA"DRSTb~")ױ %#U]E{{ٶsw@b+m1̇*iձJ~dbW?pnV q>+V"}25LC|_ ] mӰ> JATDCM"A\E>V_u}%~+<ٿSN+;Em¢f aFkijL}hV= {yד(_q4IcޜWC`&TO!CR3b9/kP!i@I.4HHӎnNOX1*,"ee gdwtͭzзױcn<7rJh흆V3X-$A6H}L)I? x@Q,e :iC,%zz= ~#b궴CFbM5MZK ?Q!^"%e뇦מwC/&*S!bFƔEKW.| 7 QOO "TI 1a'ӧeqCCf+(Ma#W&xBM@ʵ;p#^t0}7x=C,yb"fF` @/=U|p_*Z$$XgX}MN9c1'0F9#Py9#egF`A%sF`f6d})ۍ NΔ|p@SGSa.#HutqF-[|:#!@B~k`|>sCOW*nbQ[W\ӕqn)n,,.םwbU~epHq8F Y(>a1Wu9Ÿ 6'.X.jzV1_=(\m͆`dz v!4uoA"F4|$^&lL.}oulF 2x~`@AaiܲҭyFinٳ^Wʟ0uCЌ#!u*ğ~N󖬲>;_}~6#]GO0 @_+S$<bk+oTЇ=؉<ӭ0Q@"`ѥs}-OW?}~]\zɩ#!{{V8:#0uA` 'ӯ M0#v8#b PQ2L`[Fa Q%CiD95F#+f#0 ^7N?8[] ٝ;?6nW6m*~NJuXr1M|4XqX]E8Z wPz n\V$:Me$Mica5jv|}W؉#le8hNKKC~:h}®wO5xC2~9R_{pDk! JDhanU"]pcC*݁I@Tc" jW?i%bw1)ѳL,]A޸պLT߾uh߶&Yk{ޚ$9]>O⹖8o6͒ [YYOW)i؆.=6FQ}_to]֋weڳ=@\5jLv_~o]P m|[>vE+Mi/~ ʄGҍj%:8ȃcm[U Ƅ@b\ *-EbYc.wM(6%o[EE"_N(|p]y-D7]qx_D՞4H{c1 oEvbS!bحʡb MZEIt%>7?ō/Wg6J HnmfO+cǓX!tc@av'uۗA;%bXt0etDu(hku8X{g7@x0ua]W}ڞMdY}<7 ĖV(]"G'nqh|߈ co;׊E+60Ӷi&l!舯(S$@&[e4'6lL4O8%bᲵZci}фpX~PtnZw~2OhSZ:֗E{"Me&/+mw)>B0LJu=-4I:A{6ݮTF'B-mVَ?_)ַ+k}Xrj[S !:2}gz;ܝŏO;d;>L@*W]]?y<+z>^N#0;DZR* &A!Vj V})Qˉݮ=u{"#mSΰVZo%~k5Xhq0tE[ m ,H:QhmKظeWe;KOb]cL?4O+%_.sj!KWe025|3ú1I>)~0g8X ͐`QԠ%QǓpp@f4hQǦhE"1fDL6cwo?M `mXeO*q}1::\e=g 17N/tVNhI*V*ek6Zp]<4xqQF3~ \"Bƻ)T2 0AeΨ7DէMѤLh?,OE3'tWYR?'MjEZ~?$Q0e=-e. vYb&.%IzԾ^U6Uwpd>-^ ,e">b^g;,G]:]s?{njHHH$$$ H#B t t{wK:dNIڵ>}ewfvvfgvvTE⎣ w%OQ )$9rᱼ$x'mj_j/*HR+Q}/$_r)%S m{D[ed=s_3bQ/OI$5\@*~ClFd|s+܀_C-.5şO xՋzɮGf ^zPX)ޕ@I( FIlUCBSOj5-c^/]MYpj5zޕN^hs%42ynon,źI A]4.iJm5&71S.3b"*' :l9_ Т2'oTF#:dH*KpN>yܡ@*~T^9JcVƣ>Qa~&x ĐA*|nLx[^{r$^8R2BY/fOwϲӟVK3iLnf+ UqG7y x") nwGEw#>|ʋjWS)[׎؂K kfuzαˊtZ3]cָ3eR#1|<;rf6Ѓ8iA}&j,S¿3'5^aPYƍ%\l_dhN8dZǝF-w6cm~*4jFUi^O}tw2Llmѩ2O;qw{*;/d6񓩡p릀40GW?j|Cf?GtZ&Ɵ60Yf=MpCd ݰ 5>N)&7ViS :xY?3I3X#@)x (m -3щ *Z&M#x +|x]Zų Re*1Oa^_l%${2ɔ,OXwETZ1*ؗ^)p1p;=ҊUVxKVQ(&T[n*#_"i [?*XHw+M7?oQ;Grғ?yi?ǏqW2"].7+3=65ğxjUH~O9Y|t 3%S_|qoX6U^AC>BGCzάh±` "䗬GއmCEƝ;;ƨHVH2e3ߧF4˾uG.;KPWFĴQQ=zS9mv) ;٧B>uzVOT_g|<7yǃ^)LiBaAE|V8)C0@`+ڋĿ'bwco3`:λ,z9^+e*hGVcx\-vћaC+m2H@t/2U JwSpb,9J{l?Mun:$_b=ԟ:WtEO]t]ȝG'e#{i[Zv8AM./mtAlSL%EqpNXgld2!o[HWp 0g^:G_4#}E0yڰ!MK¤R{ϝ}z1Ίf|O?jCX]GsTxs[;~^SPvzdd?:u'[hǡ>&)\"񏅣q38d*_a ׋:;g+u⓾kϔ=7R>_ؓxLI'ݕ?:}kAExjq{`BuTgJCjB^2ޝWOcTV#r:_D~`*)]6)Z+9!kgpb%8JPyY\; b}gF0ҵL4jP5oB3 !V ]䋕M֞ß5 T@lNM]ҰHr6CjxOsu;(SwK'FTH{yvJGЛ%{?ytbM\NEYVfx'R׹D% OV; j[@n/ 2ODy\H570p UxOؖw@^Wjan?^sEXX/~@+~a7$ɏfat,!3]?q JlK %SEmy1?ď}'NQ60Z[W$X׈{HR+i-nZ)PÍWU[i@ƣ1+Na-T$_=?R)(jbku\4m199yư */-bmpvS:mZx* Y*P#.!yӑr'\zYOkl;*+pJ\ s!%2l&3?n)g?PpجfNޅKZ>;B;M4e[ߜ^[FU.1Qtj~ 2u] \'}YxxK&S鮷Zy0U yQyZ˝ư*Ro 06U!l%C%P &-?bRElިx(4VdLr'{ctRYDc|(R E[ӌ׻7Ѵ!TYQJ%T>xBsA`14?\K鳼 #ˊZܢ@jCהYD"NfնAXF#+!P4bVօkGs鉟@Tx xByHXeN Z4Խlz{v9=|wF4ePԩr4mMHOm/~EDi򕗗RYTұ%:6=Y/1<+d^Stia[lt+͚? PzYq"-6DV)0^ :D8_q#'1?f~*SupY|ᏈѦ:#mdumMds5J`'"<.S'*24ӈ y6cc soTer>lpx&,ztZJ`҇y=k0{.ʓoQL猱Oi/.ZuO^["zЛgVͲVi*49a[C46Mɲ3}Vt ҠR'e`E+w7%hvY>YYזxŘBb;hEE`>8K3sQ̬+y6Z[#a/[^ 6NqPTG A.SnzR!y64 SRbA =CjxE ݇8X;&S#[+0!K1ȑKe8\lx{g~+[x W%X-M- SPt"/hS#% s\m9mksUra}@I^wP<#RAOF :Cj2Gx y_?Ol I#@L#*<U#7C|YfâM˳wQa7E\'FzzV =c6Dᨏn|rMIc2~"P~zR!x=C.tMZC5UX-61F` R&7r5sB:iGundˍTcz"ȇxFaK.H'c%5W`H%xNOjBm[0iΒY~<)Y] ?xiyPHps:  {ބvp{ NR$ qoDz>6+.R1+sX!+dr,#]ZgQsQ0}3hDJ\-ſlO[5HjKOf4{@X13ٿwaiUw8HgNJA^@e[]tM']>px7ާνS + Wxф҆'f]Cis< Aw*pqFǼ=PX9hb聕> +"$Í{Rk)>pI%T3z3%tǜVyi稿Џ#)=)@a2= I>VO?yШ!S"KEHUJw&xxhG.}Q0UI X>Bx"ba$އ/\!k?=A}߆ vl}qEy;Y]8i) _9nbnpDħSeK=,iwshCgZ h$Gg@{B= yt툃>#:mK 3`IWBP$O&5ח7gЮl1~ u L9gl~PH r?kT0ǯ/<7wI}^crpP*  IFZIJcK͂qpJٲ</~D>ǘ> 'M5{ |o~C/XT )-O\Ed:+lYVa:xu᱓;tc1'qk_XlKLl;dz[ *1ҳ~+CL~7 ҝ3ⷔ)Aq䛅&OrΠ| [6\[g쟳ouAJY6(U)4[\"c኿Hec`/& >9 I5n5ւ|l%~) -붷J,yJV4Gw(z8GJxpkgC= 1(m^rmO&T젵@wXZ@~鳄FݗqӷxY[71NRYaaAIE!xGimH/ K ,)3sj1-6kT'~styz@.r5F6URLr nNip|䉠Cѫ?XW͟l;κ~߅JK`y`XT򑿓pX巑DX+bqS(]7-X[ 6hqu_H?k<427%"y(w"yw3GH;#:w2yoǎwcy7|EYhD*zS zpcj/tڰQc9K*lGl>_ztm涖۶n۱q~Uo8 z2ϨxJRBbP@WKɽ۷lݾa>~R<7e*3{qw[sϷ-&ObP07QK,,45/w5[π!˂I _%Kk9_>+)I] @IDAT uVMRz'6Xl9 sOs{w!ɓ́zT TbN2޸CGhlhRFZaCϨxJRBN@ĵo]KgQqן+OյLx|̍wMQ)G(WAPkZoRbE ŏ%s!(}FS$4VJ6 ⇢GccBMhxP9BN&'GҗP=@yF48)ꯢ@8,#m8=]ݺS+ ( ( ۾7z](nStR]ȡ~( ( ( ( $MG1C. zI>iUƊEwJ_$~CW7:+*J2(N"~ ҄u{7|5q7ϷU( ( ( d3R4}/E$ʺ?+ů*R* ( ( >[c؇7^(]P@)~]TBQ@Q@Q ([ A( |C^s)@A(((w0 ?^oo;$s!s؊z{&aw2c0h z7BM)Bm@/R!i(cQG=SPPPH6ok7Cv G{1]xgjgLK)^eghS>Ӊc.FoRRP)rE^nZ >(x4i CfJ/ITn9D!@Z -߸߆3s]&]ƅNCe T9wJ+61zZ/a|ۓ;P>~Ԭ>Osf)w/,m0Ч,ΤQCJ3'aؾ=Ҿ^~ÿN![G2mcw86ojSLR Jw7Yo?G[ЧiWr/=\ᅦC3 3+6u"s}& +na|XbQ*}6M15Tm 2?J- H=^(_h4TY(7I ?sW1j?O0 sA Di^d )dfx)fqW\/iʛv{_kUoff!ZiL+|3O9X!CFW26>+g+pjS sa65oE2-:?/߻0_,㫴ޅJw°o#M ȗ7_,#!;몫ng|X:-u @ զ"*%E/? ͙Ֆ [:U6(G/,#kjL?l?eGeU_c`/RN@la"d4УO+Lb)+=X!X־]PT@AFCV314߭ ͱmߟ,,.QIɥXg<|gPͅt)y'6Uefb%ܠNbdJohŊ|K@8lAՆdů7Oﵶy74Pm*D:Y 62-J}!ˬlQJ_ xx^"e}Qi0Y}*&qlR_$ hKڀ;hp.En( Y͏mKFޒ"!/9{ГrPIPk’NyҵŒVnC+6N[! 㝚6:Hz,?&l"tO"ڡeB([{m+'Wx7Fls"?XvyO՜Omm-~m5nҐIkR-W}: 16Po/ZG-L-mn24rp?ʳoe)r31>{jVWIzk1ݾw>th8x5"[6 =^=/߸?úp=h">n\uǞO]ic?5=)B*z.Z[v.p ۋ/#WKOa?~hܰZ[FZa'AT^ л~ _YGO4:"<dum)`~Sna6ܱR?[,80#`@)]4yjlq .~sBhta=vԝ.N%,b*+.ܳJq8Bc]r Vfx/Ӌ-,ܱ~~WETJ |%~^pӁ#ڦ>s+{m__G/ i}]-{iE{]LMDZG`^35yd {%#w )L<_xIZz1T4ml-M^tbDɴCV[-Myx Ws>-JV(7-(ҍlN/ʪ*+>{>/ӂ5Q2ZN*~\;lVa >oaKw5GHe%E*ʜ4t6ewfY9oW#o/J[M1]}ݶ>^&wʲ3L:1%3 Κ(P6GGnc b, /9]X7\zZ<7>Mj[Q*7>^%:Lm<4P0+~x˕,جI* X’pcܞW [K`1z{zagċ# 7о#tSGCu@,B<΍a0ĥt&4EYyêA2[7p{>,DC{>!@e@S^h% ;3M 7Bv@PH.5 a+ފxb`my I#g(sCBL XHôDBGac  yܼ~ի+>W|T'pB*|Lz5a\[; %`h+j{@7.k$xdv@+Ɲ=cQ /:$UScsc9u^7 wFbʙjڸH*RJ?Nx儎xwG!("6y2>C'ܡxxv*^ v; lW>X):#!(x9K:2H}? ߾A.YDOYHo.Aư,E Ӽ$2p,xe3?x+>ܛZ].Nu-]c woHPVpCpz5UcϿO>PN`҅]Ɋơݮqww%{(xEB1G9x +Sg x8>u8>ZY*c <=0&{"ŷ6t^>,mjwΨSuГ`>NUU- KEkLm{^}JBV|g^<@N>H %5|>n揕a\х <8: ^"1>SNRicAW`DMvOB&b:q1kq|$':/ӆI*܃2B+ݪŽw3G["H42?N'~q-ơ(#Sw&߃/|*a 1[x \!-x&Sv*E;Ys!wwa!<,zK&_qd#w$ %&gndQaHqZjmЎ,[;;bPeJq(̿]0UV_x\Ȅ(|Xn~oqx XŁ"F 1Fȿ0p_{TPLxaSG}"Z\蜂o?c a8 {}=+0XwN5]%tC[88_Aop ݋lJFbobZ$~!0C飻įY>?~E1[< ` ҳs@A % >:eq) C9v#,GcJ{"#VQkpWa1:҉,+ke\l}/իaxL<uŷ$駀R駩1 ¡\0cuKQ mPv*)L4`C1H~=ʱxES~,\u?[4^>v|^yp"U!B ج+;ňFq/<|^_y(onԧ:o+=S@Y:_k~!:p/~țR@a`"$,ЁȐ*S^|oll^ ae>+7k/-}gHΆ=ied)PEr>Ek-V;GmŜ;|IY:Y84mzB^) T^ +!a9M,-M{䖝CW _6닝նX}M%E| `I[?㽙WD¾{xi$#:Xnh;yOCsO7HwBsk\3疃/1Q+(Iuc`s@l4xiUwglTɎ穮N$/"%Sm';|K:KFU܁F\N%^}k ]xm1MKvO-r_uR;xcF?lC<?]ђP fږگc+fׁc sVo#?{(Xy7]q38Kl}EP9ϱGwnz.N5Ư(O8o5W,vw B]4cXwOicKal*򽇟@[v(p/q`^|M_[>/nB>N,'N (?NBe[;?=۟=vq-u hq{$#ؗrnyΊyI*=7 ܮv!<BDЃ^t㮃T˗b3vrTG@T.ȓMfйWOя-ZfS?.̻pmV+$E6C#s+vtb]|DCV|b,Jb6`2o7GW 2O˓oflgh'?+_&Kq2ݢ=QqnO9T߱>Z}3)$!SqL4P ٷu~4ұ[jƒNPۃv) ^CuAYxD^yҧ,= */&J `j;㿒>Z|Y?g(P<.Oy{ w褥z> 4P,A/Uy먟hOr{9N/A :_:"3 - +]a}̢Tmޛz@be8jiz7ͺxwo$R+x= &,zޫVFTWyZM%\j4WgСֈwF؞x%o /_n7. R%dV@Aղ 'kiy#MuBfCg$koU:L]X vMɞ!4/6d+?[lMN4NHo[u[H*OE'-hsT OXY|>k[m{:{Қ !e&ihy  e#xh|M^(}Ypt@^h߾_ At' WJ"{J_N@'䵏VgY:Sʓu9#+J_(:D4녦.JK% \08(ޣQNXzgxJG߀7 Q>x,s,z7_`)T '/Ra%Vc =;w)<ݒ6o.͠|L=sS(P}O{;[iͬZLm65WlS#/D7(kHoXEeY:SE쓲jG*,mIĆw^M\BAJD[lgЭS쑟BWp!%vKf_E IC zpOVoK+¡So)Om#{#Z`:aG{y*?L}Tu%U{ů}ߵZ>z&K459ުW\͓VupW<)OrĿxOWDm`yCs@="<}&6>D wV]8_;:3]\PyJv) <}& f ɋW|\QQ[牏<M, ~J~MX̙xEL.>eӁ|@;\`EL-ΓHt.``jO[n]nu_v^*r7&I;`ք?qPPyJ.O/m6T{ "wk_V L2OuN $PR _MH4ɓq)[t!HQ(ޮ 6-]%NdOt K@T8ޅ*OT GI'sO gjp՟s?ML]OI Y*PcP oC#2>J4w>Qя潈QQ@\t(fJ(T ǻPIJ>VʡjuΙĠBI[~Ck d馾Sx<+ԓ\@^+~TDҷ˹dG4|CYh…3duƏ~Luߥ n^@_ZNl +GySGӟ^(-p8:gHzzB_wjV-y J[ӝ=l>u(SEvu (.G7DI_5?YB 6/n.]<~U1< l\VDwPfnl }ʹ;tԽanl>@@Z葏]:yhPf[͸էPbudGy }|dI h k 5oL7l|EB C3o-}tXһkM2?R%7)J=DԠ>kϋySǐ?>.͙f6Tx?3M2Ȼ)G4ήSk4ivz '"pZ z.xG.zHniR;<}S -~0wXemT;fZMC yZh՜hـג\D+g=]^'֥T\5Z[*2.@cAd< ޶[̼^z}r?pDd2jVHKo}R!l WQI6 /@Z?ų4+{"Ϟy%ȟ?9;%+| [iٶ6즢EwUN4$rj% ^wHT@"RJ$,_<*+{JOϥo~QIGcp7 ScY)G+g ׸\njjn'XHMi=Opm"ݿߧ#8K2\QX#!@;uw$;u%.h ar&yt (WtٝyvKvXKB7%0iɝʺىCO#:{bS^-|y! jam.-8ChLX ^Cjn:ڪcEO/hojf~{*) $CZ]duݼ}lTw" %7h tdǜaڰ+;._UEﴱb# GJ68ឍ|~)oѵ}#OH')qyBQcl>T‘ ,tYE5k}-ÝrwC;(Ritnn_H[?@wߢk1ouTRM1ãM‚7Ny2;d5mPZ}?MvFl#BRaS `f:QtMWj^Ue[ĮDQOQٕ])Ah7 x8P%Ed(7Tyi'i(ƽ4!|{_ \çEu;RsS E ;e`1_z)a}w+]gOEX'|]2P?Ԣu;Gxu(ᰘMb;m4gԿVG?EM:;ȳ^8Sgxgx*wEurǨ~GTR4JU^@[깷 A/h# ~ {|ʖyeHfhfL /TxkgMH,E*OIY3 Ы鶞l=~X^+P#u[)a1wJcwRCin^4[^>3fc,v (Tx]Jyb vy@~ ߓ.yYhUEUFC&ȋܗ{utW[4؆T:C"TAwF/pWSInO;O$|(=SPy*o)OlU{`JjOG()o?^9Va^l%տ} I,f ^ {*Ї`|m$:ͿtωVh:yLT8j[O`5Y݀0!_lք>[/{7j僕>FM>5K7X}idvi/qIXnC+y%4hHlDN( kUmK Sx˓'h\۪ Ko4ڪS/<ћʇ!-{fVP6lLm6UO}hyj HʝpjE^ wCf:֘){PxX˘< o7hS~X=uУ⇀>.qdPdej|NV֮LlB";5RoPVF6j#H7,U8YW۩Ȋ.`+))&gS\^h|lEL4xD(h@P$OYsR8ci\iW#𓫭װJ$A6b*O»~]ckꥳ"җLk<`͝9⽄#1`eCD[W?z#/ʓb1{JA5:Nڗ0KML KYNs6g> `1 Gbq3>uy#Ӣ46*Oŋw$^~f{hSN@Vp~^)'2Cm0myf~[1ZyT+.-#xNLt7+~7:,^?[脪<<,"@IDATǗy$^-1fo`Ȉ1MJѦ@B)%g=l٦8$d4.I񃸰RH'Nm޺f僆i⟳ouA.XEaK\Tp/y,q_ZS]%/Ź5T,GS`# _[Gc;4N,:@g[[3V,H(qHhKo0^pɷѦPn 3Ro㭕) 6 ͷ -iϣzTRLoI^-4bqȩ~P°9?RGλq2Q&~ч/22AXfX%P+66ܴ >^0b?]*-*}JG|]!R6x*,ݖCpoSwx)s?\VEg7 8o9\2xg1$VfPЧ)}-PFRaU fׄrq )[ v!34xCXJz ?½Kuڋs>,2g |~tJ[bD|>|i$0MW󕧀[[xnO(3M߹s.BjK C^%*~a3!mh>K8?~#kOn4rPрth~`>9nR%E&Pqʔ~^we`EHOӍ?cz"yW|sk=\8R AwiKUQ}ݑ#3οܦѦ8oQS%m F{B9)_Ӻe>Dž֖@JzS x,zX `Ӳ[ιkxRe%[i[xb|RY0 #߲ʇ}#q(FN4?>jtXa4=[6}>rZoq4~_a;HQ?I1Zmͪg_~ELMrݤMH-MO$EN gd 5w2yo"۱Eoϙڄ -I9Lm预p~ԙ {Ӧ2jDGQqa/3-6.iQi._^a.yΙ?\{mٲm=\*!:l3"uۖpIS֎;Q\\is8&sfj嫿D|^Ojk9o [֬(KzT;R<\y!> ,>o2˿2^?,9CƆHX(;(ff7PBM"ذ(gg9+PQ_`rV܌o)3V\@s{b)~K-4sF߃oHS$Ɋ/#/US~ҷlX7GkNX#,~GX7kvEo) 4:wd#[jt͛@+~5ռ RMǏ]n7*~Yun]H-ɓj:[j:R(~eF,#$o}SCfC2qMCXݰ6˯iT "]D[Z~Y[#[F\ &o^B~!YJ|>+k:.Xz/s,KG^BI3F_\'@$-"'񂊀B@! @DPbB@!5DGͣAB@N@ %! QC@<*T! @DPbB@!5DGͣAB@N@ %! QC@<*T! @DPbB@!5DGͣAB@N@ %! QC@<*T! @DPbB@!5DGͣAB@N@ %! QC@<*T! @DPbB@!5DGͣAB@N@ %! QC@<*T! @DPbB@!5DGͣAB@N@ %! QC56N}tMP QJmI[@ 2NUMaqP1ݸMS/M{piMa_! @c=aE5BFR'*ʿzj$iL;{tCys\]5'B@! I%+~jR4eTP;f7]l)ӶNS\Z8AsZۈ}޴;~O'v{}_}90 N! @Smo9߶=8)jN橉*] J9 NLQd퓛'umϣxKB@!P@KS4\皼ܣi':۳gkmm*i3NYZg*<婛z?rB@!v-IM h{Ga Sl'`!2,>ƽ\DB@:hIj0ĴR}ƱEreYRxe˒!kKz6u~a%B@F('2:ui9>њ_,? R;t|ry[C΅Bµz7ÖxK;X7G!IJ9(WM ! @폼UiL5?;׸%<\! @+~|vg8Fz,˘vbV UbB@r-EۑfcVŲAF--?TrB@ĺY.^ɏf~@FUє8j^! @+uhlWYFTrK?? B@e 70gR0BB@4&XV̍NJ\ LY-c9EC9QO UjZ-/d@! @,+~&417" ! @LFeX_Ѭ<-g /BZ-Ae~}V4rjJm7<Zu8>oJ>[[R\Rs:Jn}L ! hNlk!EvBW͑Fǎ~Ӻ=$6m$+vYXX WOS$B@ X[M־ΏHn-^2[VD>8urw~O,17V,{Imzv_ܢk%Bb]וa[EjlZimޭ8.5N~>y%*cS ɁB@" =]K^wa !* sS0Qc贫v=y+BݭS ! h6ԄN*]8?ž|'Թp7ŧt \! E@j5w5\ݻ}b`TV:B@f# Ň6lqVN}19?єs ܔփ6@! @spoLjvat_1 pB@!5bYݮOY`>HDz^~PB@!]bY+Mؼ^o{R,˶{-yG, ! @teOBf)}/x_@veato١o?Zs> ! @,+~ʓewގ~sDcYX?|ZKD*B! @eOkʖ/ñ7+7ZNOeIFs9meٲvջK^Ԓ{KK0 ! @BUB!tQ% [iwd(4 'j[۶FHOi|A2,7XqB@8CIAwx.={!+~Pv#C=' M8=Xs:w`ịrvKnLmght-W2m(Na~y[! ~2*?gyYn?b$ 8! @cJƊ\Y#m>h|PUgJ?]]6,mIwwSJQhb#^W;*إ (ƮMӅ [Ox[p_?B v {3۴MƜ5Uս|f)5&ͦ6˾AwJ|_}Η_}ݫ.ݎ}Lbrj#1!fKB)qJFUqVt~GXN !X6HȩaʭyXu4J JS!};>;4[Gu!,ٛLۡjv֔1uMñgRc}JI1[{Zޯ픁qvJCJh{p9Gvz U?7qB@6S>ՀWviΜu7qNJ,advr!wI\y,&c9ʫ63smD('gM^{Š3fMMYѳxKfcoXpYAM$hڭ a,℀1M`u2.)3#c ol2TV6SJ!*֧kq 2mhhmU(vͬN^X5N"nZnW;TRB |=~w:)g;Ǖ.FժO,XsApR`d?+rndj؋: Xk_DB  דkzG[N\iRtQ8c+n1npv_]  #ZWx.t+؟WU ae) YxĮQ )jNUiM]?jB@$G7e=y~X;k]ypCz=Aæ¬`c3a_0iH-X5y }Y4WjHG -^)mҒՃh9(BF"\ogivEѦvz)Zj$2vj݆n<ɞq]Y~Vع7VR|XX[Z?+.R#Q<35NU']'/''L=H- ! Nky)2a)6<|s3t&(bߧ+5T-{VɭrSm-Zk(WtF{}8~ ] r6pKX-58(Ýy :P8/>U0ϝhk:!#ȣ(yr׃)tb>xޤ),qd/OG<&67[2v2=Fd_(QAT~OF+G?CU9GeF %hgVzK%^_'V8Vc2쏋@uEUc;%7 |6mL nZ(TnPl7(H҉;y~_tj_E#eݒrp{Ķ}^<_B " @O'|{iڃj|tuD29P9{N N!{Ko7 5imX0j NDr|W5[\`oD7nY}_>⣶&bÝQ&񍾧l# 7:T0|1(01R鶹O(ϝ1 <&x.G=AF'ʘNT`v7UQ&ڄ=ѵ 4PG 0S~6\ls|x󧏫yO;z5xBxtN,öl0byNF6C|#&詩XBt3?ǽzۂRW^!]u{J$.SsKyzڣMI`Ĉx9}hס֒'dwIѳ>,T{B#(4SVB@Dt`U OaSTU^%uT"ܯټ`6`R:a8nLg 4hwgO> #{! @\.aqTn: |Xe|p(DT'EgLw}%=lnC!K`Vtlv/<X6~i)[$B i[ u} ~7ݯ"Iާn~!UӮ^HQK++?/Eo6=Ald-Nxy6Ilh/}jI~UGSp/0\X(|\{p_tg|yk{&1֥Ues! @C(5^ f3m{NG<6)q41 :qB;`T+HEz[LԂQ|vR}9's㈜h*Qt, 2{?c'W5tygQ8f$tF!`<ʦ_ @w@yנOt7V}\}fITGMJ쁽~VcF!bHS{=mlڱjnu})\vມ{ioQz3':'Y} =30ku'"0HjwƪP4Ae a ;;&XS]<(pƹiԦ-  fͩ?ZlRsy@ <.dr\!.r)3A][6I&g5hOc[_UW7iwY{*ZĔJu9%}MNHJޜuaDוT-j]9H'}:)KK)j7WREZ[VN@!~Iz{M (,V/a<1_^x6 ';>}>lɬ58jC ! C`s8QrcTZܴN%-u 5zWnڳCtJ8JL[ÎlC;LQk}xr%B* #%9*wwmHs9k˔NLZTw$NgS+ ŷvԖre?9!_{ng8|::xJ^УP8 Wyi2ZpDՔe-K2|MڒkB@Zx۔|[aqߞG˦yPu0J7? vCN!48kI&t=?1|J>s譴Q?P3kŇ6@q?b|5aI=\̧S_sa` KQP^]iOKYuθCI !}ۡ3=^o& B=Gc|}>&aC_+?6wAP/)ە򈣟+đh/9WMSHӰ]2[>9̞O4t'j>ٕ2__xެ~z:OYW9(.VU,uOѹ]]l4}ۤ/֖WudžjFMkA o9+5t.8 ]ׂ~\88!ܗ#t+dɶUMu| !Y9i wm[{کmюQ{WߟjF,UR"^ߖܳfpGJ%8o3%u{\N \N9QRZs_܇G#5jӪoR/T~D6:ΪۮF7j#]>jT8%︿{-uڣ CeF  ud>: F`umN}όUceāڊbx˟1uI}f|Mw?{}  @ŝ޻kCېQqR uk-y2WqGa "ŏ:>5;p&p0m;}X$NP^ (rh:,ǠԡV8'JxcaC.(}P^~oW}}yЯI ̸ uoG;cKӮ6/I;WP ~bZt_}# ki쬩]jyo V, Jyh %ۗ+>2g%~ρ³V{A83VlZ;qu|]@˵ oa &싏B(^T醏U7x|ʬsh)PPَvxv%dNn;w}o\H f$FeCi.Ûz:~i}fti*Q+Nw&E^[R%ʀa_hfCuDžX`fy(9hۯ+}T*ܗ5}3ax&ڔ2_︯jf8ۘhKi_Ws:ViP; xk`֝xߝ+B`$~āVyN?ŋ4In(Ջ4Үyz83[wcBNJzA…Ȟu _7|`(Gヷ<$jsa)eGp|Rnطv!CPWOi83>_Qq!숛2u5x>M[?Ssb>Tn|lhW: TĊAIY|_~?4cCRU7B?S\6Uqm+msQBԗ3j8U;p=]++n^qsg q=D1yc?`I HyIxtō؊q8 T?ފK :.$}>op#q!M܇yv ŝ[jMPS)EK,=4{0iO$ y{8rCL6{= a8'K߽_;i$*j?j:.C {Ɗ͊f?k턾[{a϶+jza \6adI>mca㧈/,CC(7À5<F?]b{ |oe?㢿03jąʢ;q J~˼faTxHGX+}6hSrۂ8V>gZE,#&ڢՖ[<_61/3p~dc<oO`8fIWy͊3ﰎi ϠC/ br.^phnώ{\̤x#-J;F?\^<զzy+$ IIj L lѷ >egL03X:t,4  !-`߹=Vf'V(@4հ{܆]6M^ fTVUhG^MPDABs$ =aC<'>65.ãpwׁ>1TkhWټDO\Rw% ʚnxo%A9xRҿTugⅅ6On͍%("Cync֍߲<1GSH`3k?BY|} J~35HoKZ^}v'%(E屭*:^13Pf鼝 ͉o4-#bKtԚn9\ ! >=Ub P礨|G;:V”{jwl3VFʌJao_4sie-b}S/9'|qD7+ؕ~;<|n#ISI_f(jv'#YkGQJfo)C:N&)iKZB@!,n)(\&saҵ&!٠St=#7Y!* JaM}qϫj}K\zX&%9! h^MӔ>w:UKaZڕl_kfW?Yش2m fs` Y,_VF*mEXxzbqUmiWCK ! @$pn $EzJ2i@bAXR-MSsH1uC}oB>h+D?A]m4*nͳ~T/& :UQ®NͺwU(99ko, 0nryӦ[>{dk7\~JLB@ qŞq2 Xw9䈄4=e}CDL !l5~ns嘕5 w!CB@D&^bܔ=Ztkgj*,5~6C2p>30AB7]V*D䳱Ro&i'YXj jkaQloOsE<襤*hT}{sVyc vtZX?Cipr(Af?P_mY2Ó+ \B@ ޿o~c#KuʅRƏZngyx~I ! j~Qaޢ|29O}^ +^}[ö!&lz;x}Dĕ3! ~8R2[tU!+~4wp (j:iB@!o?k"YoKooT7Ox7E,͓~7eڍ$*TW[o G$[>r ~c0iMeJ'̞>ehC2tө`r`E7WSh#5M_'ϯz]ӧg{UMp_|\zHq,a!)7 h%`~GCAu[c$˛m΍wsƩ'>$-N$hQ{3Od> 9 (4S{?z.㖩32R>dC|ȐGB0AMFQC6; xl=,\NW(#/&c!X$K>|!z˔i7|\|<(u@ \,7vrt98fM|zpwrرY/g:ldzГ] ϸ~nG۔kv? 9$22=eVfM#Fjvʗn |*VPCةg~ ^q]{NށJTlHnszxɓ8L0\)ꔗ=mp89-@UWQ[8%G&Zݘ|Hw`+pAj*CS]]|aq)sf, Χ[[>Jzݞ2/`w>8Vs:r[o k's]X{Ei{>, _}5)}dv?}8kSNJ5Q(5g4kSv>nK Ix&NR;w2nYv>W8Pu/ZXh_)GB3Roy6n;5P kx?;9Ήgs9< +YS/.͗@6n#zh+橨Ɲ[;aN}lju:N?`?@<(.2kezi\0Tnw3Fqffo. o/>6eLwAOq:/?2MI׍(:=+G Ǎ̺5YV}}.eCTݷ<QSp-3fX]29 <%k+ $

=~Rz)}嬇PU6ΊLImKE(켌փ^6BQqwMG&S~D Ƌ}8#j *u"y^4++h4_Uw_()}3BCCV9-@;$j͑ns{VyP8!=Y6GOQKN4OuRӂbPgcu5WWw+_z.r?v7mBڅnEQ>J3͸ 6<#`*7B5 +셀@oM8oG^jԢP=*chk敲` Slx ){8&{yʔ]_kT|zb+g:P~\ 440J_J hxt;l8ҍ\w$jw#2갏@F[ϭ:9s K(snU}a r.ZŮ?~(QB^Hf黼.C-sTLfs~ }G1Vޭ߸^bxQA?EbAX t/>˺] tү?oO[y-ٳyx [:immu`睉SݨPMRo!/m$dӴ7BYY$67{W 5(/.r'!\?7[1O)^U$n{{ Y<}4E 8&'$*<SǧD=I=.uEƣj CsWO2{?rx_<]0v% C!f?@ C,7ul=n{ BE+o"kU>AC~ŷ&p& P ^UzJ'!38R6 5MLrcvpDqGnH"Uy%QxsXםԒG?Ԗ/-? _ezh){C?X?4~rw#(WJGTLG Oϻ CG&Q.~_]]-&JC 墺/McB@뱢_>0B@!2F#K?FdC! @`񫤈⯖x ! 3U2"Bz0 1hAW9B@!М ,[04[0d/B  ?=/Qr B 6 (?NEs&?.ME?#CSQtB@45EwNWQl[4,HOj(q! @ յ}~H2WRbKџ;AM ǺXN/@hܪ'|8PNLE}aFA1(q-QLn1tc}ƴȎM Q\E#S?⏼')c:|6vE1RD%nn}y%FaKUmf;f}Z n /B@ԍ3+ܼc9_ܡ(5wC;8OثК[8IWܸ~ޜjG; y5G?_<> ℀h ki?{D7Eh۸Y_顇ViHGwm/M!=yS6lKS]uMy'Eؤ SdHha 4B4Gkf e*tNҟ1o?Nҏ\Ef'NnwM10_>1^7פy)$'A8v6)HF#ۉ⏼戏?WgC#ED{| )GMPd Pb!0k<021dOjfڢcӌsŵ[ o WU5qDmR㏘Ga b*Ĕsxrӏ,D$OmێDOIE⇙#\C#U3lqB T$8 UuE\V_~[%X;D'$9叱 sw4an-#$f"e'B'UQ+}_ޱЕHbj~K*3揀!o/)ߥܼB&'Ҁ:ӕN$-InC;xwɥG^ztj3"<ּ{#~]ZO·Hz;пg7CQ67X>niҟϧsO@/_OKl 9 G/%bpq\8ن+6'B$P^=B PWL(~Gj*)5)zv0Ӓ՛餁Gj3]x)ؖ:dhJ.7e#a_JK 8ry虷MUi9Lf(u|?795Z_WMGungkAs9/h) ݭږ/^MEԻ[{s dMA,C( ӍڗbGn^ omܰ>q@fNofSionܰڷM/<ڵN!.h ZAT > a[r pJٛw mS`WzOu*['SޫGCy*cyBYx8}1fٷ桐Ӌ>-DafT5ykhCw]y. :+IK1<|@}>jyͦiÙtb-lV&Smq4ptgSp9n,ڵ}BQ#9K7R1A )R㏱6+"V? ʕO+7Qr)9@i>Z~4լ 5o֯WmEٙ2ۤ/[ 4qy)WM^tP/O CPfJ0}q-4(}/ _Mztk^ut$YˁHFSV+(.X<,`Sѩ]+޴ˌg$ ^);+7$.a=F D,B@4YȐfx1deY|lޗKМ)Д|  ÿCm&C-}=ք[YrU[N=M'`f pkq:NhgƋqq6W`0ґdϾ߯aV:r8P*dyBw*(1k`Au\U0fn>`~oLӤbIB Vq>㏬?Ǹktq }WhM/Y-@b|m0=yx׹)MZR<̡}蒳_ u!VȃZmt(7,O%^G/Iux M[7@'"Oɾ3k(hѸg-OkNx 5khw? q(;Y^%k7Lp4/~9'V_|2təKQG_J?7Xпg m=[wԄ+ґӿ>s-eX?Bņ;xyx/:8>5ɚOs]n=]3Zb9h{䏣N2/1b7VFП*41-0nwU9j8l0?riB A}jl/$7vʴ8YS佸lOa^݆sGy+\>3f|O7[кP_ 9MW\#njwą%˽"ht!5MhĶha_ $+pՈ/]bqBZʷ'aw"sQUmI(RUT)E&#PI}ʒ[co5jWMJbOTj. !X̕vբ;o"hl`4JCkD#}e#[%hl`rhi>o9QK˷W4<5 otgv}(⏅(yB@XR:+}4@혰WE"B@X PX *11(Xx;%"saC!PG{+qŸ/ /'Qq/4WY:b൘Ɩ 8. \xƐ?! Z0򡏟' '}1+XƖ kuLw,Ss/[;{U8Cb:݊4;@B0F|u&з{Gڋ;/ ruVdGun~ e~y:Ӑu32]\uVs^-{=Or4ҭP@!P@CZ4Wf#gBS4?_kp7XAGXBv7a^|v'injs!c/7G!p >? xzi a]MYWW ״u]B@ΐEB d`\~vBG"B@@nWf] &B@D ~EC#NĆ$BMB@&+!>1SӯdN! B`֩OrPqK+fkHt(z"B@D4пbtex"B{r){./2q\˨Br{Y\ғMZǜ];Dly-= }rI/ci~Vc{x ? wenM-%ՖB ݛF^=>WJMv`tVƳۛ5&* 4yxr !^2^?@6e?E_)y)x5>3V|  ߋ0r 9(cѦUwDZ\ٛvҿ>]ڽ6RFدÔFTZڛJ_Ѣ1)f+5N%^%b??s!{&KK{P>,ۉ//^Cלr?HB (Wal2|\XV|NF;f(g(҇t_H#?RL#˵lzw/hO}L9E\HJZnn.W7:w8E?kZ7 /-%BMYיxKcJW"_"C4MYIDD5?T~>~Rf|*.lq_=ʜ>1!2j2_Ut.ldf"݅K ô,cc:ҒӛLW6&N,GS忪 r.@0Dq%yWLj_lS/;}EG tyLRb6e֝h*>|hܰE{'NF:q,_5͙OI[D,a :wT:4G QV}qԦ{!YϾ WLa*eЧk7!$[~4RCQn-};|3PC=q c¯l2{4T!K?xt+עz??n~Uv\o xdKV+rVa.~`kRAqijzr.(FVV>Ohx}_Cw2t}-u(ZɕgZa#u/5H}2ay /dMeW,\<_4bhoЏ-CpZz G|놰濾rHx! @vu;`u{:>_$O%2qmw*,vSr0Xm^5+EMw5F+.q5*D.bfꯨ-whO$(΃韍`E7\fv7D0G<˹fnr{MG!!Z/=p_Xw=? @t z?b{sNWD:/'|c`Ԧj!4hg+xlWՆQFs?+2 H#sj-9;{Fg%Mܹ&#A6QPen/ {? g9 u .Pp’'aJ@|UG`yaըyDfn .Ϛɯ!GY@$@ck$kCdCE=VSwCgsfe硡翡>!Т AcWT}e>~cjfe?j,kW[&{$^s%Q_<_I W[Cuchh!(.Z&WRѾ2?Pou(𰢶v24V1ܞ* g/(b~>~4V2I?߰gJ1>{aq.kuV_Z%(ih~/eH#Q~A?EtԠW/Le{|/qɅh*zY|wE/Q(y8k3Qg?NrOu> ߱:kT}7?IEz^B6o!2U tGӘ^|*pKR骁zgE.HV̂@h|μ4XQqc4S%0¡4Nx6b3gI`qm* |/-] jcܫܫ,ٖ^_fidzMݽ]woggvv-+pK8YXZ6ͷjI@pOj\$H%?dᜩ,py3|RҐ63Z ~F[Ya٦Ccy_wu>7:^OԡO 4>FaN}p-_7#b۽_iɰm+<@KE4<`ˏIݰsnv4>)fˬFVB.\>}ŏ'G@V8ԯfӛ}jk"PKM9j=W>Z2ktJh*9ov= ;3p8Egnd&S4ō&At!Rzݾfvmi ~MORZ J O$;X&nFef:E"bw0*S?zqo#܃c:b*;z,i&R?jz4j^B%^};C}vW泋.uC5/]nlG hZq*D8SSl\8Oo$,#9]|y 8) v K&S]k}Dg"-{D[$1|rlEY'i@H?8jGS`NBJ‡MMu_˜Y PB?S =u;.ֵ r-<7t}z'*hIR-2[~>=KX`XkYt-VW,kfG&õF`{ =e%Wkhs=#e?3_ R'6+xK"}TG?BK8AfcCxϰ6^8Gt/7uπA[N\)C{?\.y_% }oFgё=Ee>ڠчPG?Abdܪ.@#`,i5Y;=®i}˽SB /\^/G.Z%O u}ΏP0jlzAFeo64_}/&H7 NeaMz,L$^0v:GCW1&Դ 5:{~"s`Þz+< /])+&vܲU\#"(QڎK8h3Eq8 '=EhY\h1 ebNP_%z5Q ϗ *ע.(c`A}8TVUT{p hI8ޥi>Mkhͮ`7>Ot" M;VU1GZLd=:l sSdT7؆ePmɑUZst?ӧ){9HKwOdj2d&s :u0HkOU${8qsp/zj  %)r.Wj>XP@%tia{XҧO̐5>?$Zꡤ Up1^1 M[Z}8 7ϰx ˉ':P'Brr$&%& @H1~,z&?AkM2Ho9(`  =VɌax)"PBiv4T'1Vx-!nQ}47bk9 |p_rn |pP #v-[(oy:CRYIҔ_oU SFvvOlXAfMq=L~ 7fn4b?Qe7{Dh:Uc+,ؙHJDI 'anJ HVNMϠg3do x]l)$oc-3g 8W4Co`Hdk-P="3eɞ]rk'G@A@ OFWtۈC?C$.SJ6 =rUў (6K;| WAihICaRpt\E2OPhvHw1!liu;ޝOB?-5UާyQd*Z?>S/?z*}.$0!'b=$QY[*u( @#ACsX|J+V$BDTpjYnY| vٲ_ck<(piJMRbi4M&u;:5vC7`iyzZȻ O h8}eSƩM~~ͺj5?I+W+ÚqM }2՟ͻJ~0P+&/=G# 4i5_ɤ.SL ٷ䓏޽xmWO鿽nɆC gu5-~~(~$xIRb8947;PZ?i~(k }EwO&';thuyIp j\܅0QxƒGDw>7$ &Y}5y]"TOMG}ێƴ߶@oHF ~5O '=gK/>Sm2. z֛Aao ch6Hp:e/;y 3v4}E Wt Tq P0T_̀1l_[EKWjѷ^n|u/>TY-]>xS1G0K >R=2vᏑH_GyG#>$~z"Jz1X{! E04+ R[iLV7{L [TǨ(HA_P7'Ra_"j_N=8yꯞ~󫴑I# Q<P 3<.wi$O891o~Lmgl{ sڲ9smcz7u<'t+Cj 4eɑOy-[KOFOUCFy$  IF\E nYbԳϝcgbKkQP]} ~>> 8jPnA@3uY!pع~%׈W#5J(z᱅/,$=B,I"`BsJV$~s#$I3>i$ 8(Y1~&>Ưݲ%OX>c2׺[đ}"4B_c. 'CW~̋ٗ ,n9)0m$Ij|:CuO@;5~t};S]Zث]mۻx%kGI&ٶ)oNbYEYWwWe͸#_IOl\U<G#ed6 p5~m"5likTNjy+el/ |.]RGB:ʼn6 L8ҦW5jя 8! ڵrX鿾5?>n0m2Sde3?>5R*S]&K6?k&j?}s|0rH_-98{Yn7qerAc2\?|LiEݻ Y`Z>l8 Tµ?LWHJ#n y<;G nu}lM OLq[za;pMG O;IYǀX&Ix]Nuϗ| u#o9qۣ?Xښ;- KDJzVsYPV8:F5~G*qo.-HWX&FIdit=b!q$H5~-Jxpߩ{R ;ɳ5^|wZGPI?06RУƸo6w?h5hoڦS>g F h7q54[J&'lo9#`R:ݯԧ99qԏ0?\,w^\r4@RR7#"%Fx inNG[P`nLA@(/t_uw9!!fv&]Όn8q+~58*{M*JRPKzMljJW\pb9]g)NJz=gQ_5oSt>mG l"& NdrEǕ-Q |VVJBE:) &!` X$U2C"`6Usñ ~U_@DNr6kDNJΠ/QqRQ }*]_zCT#p{KKzP!؁lS/{q~*S|B_cU%SI`װ}DZ*췥E~Gh/s XM6Euì4#/zu{K_\pj9F@5c]~ XW(' aqv~9R (_c'ElXoZYt'#{p8UJOY-bm>ѢM:W<1/i\V7zd&w~sT;-[ІQkjN-ZiɖIїGbZJZ acH79!OZB|53sP <_sX%wX_tybM]T2a{ُ/>oǜ9׹z$J!ݳ#Gιs[GGE*c:t7?_II}&(Q <;hwlyKޔ_5 .<3qXלgFoܚ9Aro&\A'(>5Uu{٭! ^ܞ;t?C;|^/UኞIemߡavu\()0߉_j~R@`(.;H*A2f);Zh1r|Jbv@^j<`w0gt騴eO)f Ғ&a{y_̀az+~l#,q3x U%EhAxM {'u7}E`G+ vT܅+`(v(tl{i~{cqn>S&>S;hђ ?\ _RX+ԯ%OU>L3~q/πk?=ߣ`j&2aµOkgLFl,Ia+kp%:y|`L=0Љ|<9voÎm0zP>|'A!X\~4_}lnp3ROvR rĂs$Ԥo؁PxȚg`+I3/\|D(=R{_p\uT]+6/fл\y "sg%۾,;mpqȣȫwKԧ炟  |0t8D d? R'a/g?38HO/F Z~T,-(Pg ȓ(mh=G$LHiX@$ 4Ӧ=,˔D4Wgc$Q:)/aa'+GUMhex; 3N g5vH?8q`Dxq, ' +-fl:\;kȶn{]P,0x%p\~Bxc@Se( N/wHwHx޲y_0yqrs|fyb9QSWk=9DrA/KWvroWÑtXRUʱ@V>- }7k 1~l#*Qҥր}QeZauklF5&AR֣t|əaԑ0oK*%]0}GLBAiɉBQj!p;.Ԓ^\jRSB4xG SFΖd(]{bnQN>@~nyUZ)Nuc铇ôqVt,߈+VmQhFKͮ5ŧO;fE>Ex @@qܼlLr]R~ڪF Vh Eۅ CE5S?DŸ'D``3aWd,N4:lPkt+l5\æ8b%@&3! 'bYey'$^h es)%%:njKlrX~@|9=L%`5嘬IkDѝ >ʙ9+kJ8]\N7;Jw^1C6Ws~clŸJv5Ia֗~/%"2Y Pg315egi8e},;u,}>\P˟wV{KwAv9q\=E6Wo1tr i[5frci3jL9?a= ,?m2p' ;ugf 8|w2ӷMBVo 0%m^ioWlAkA%<~ѷ O 5eg$ X>@)%1ؙ 'Ɩh ꤦ[}>-0*jvZQK7E.G43R41}PZAyKnRm!q=l9@X^PPaNpjv۠yFNrrvו}9khGs?^v;FLc깙xVf~ 9839R$aO6 y2SXz088bdЌ=3M&tf$?{>jolj_Sl'"I4.gAfazG2v6YQd-+hSq@VaEn؎΍d zV+G;5Ǣ)N6' ^Qh_jqEh*[B"s|q’O`hmp|%iw]y-+y~O!#-9fP]%gx}/dK>K1WYpN-$g7ѡP"ˆ[.]HOV=WR2˟1yR/mꇐ)̸Q~7^4]P$ڼ%-gp:1o@+aՖ=!ؽ^ wC32)'!wѐ7;E{@{(98w7`пl6)*ێ(?b[ t\LxG6D\'RO,x/ϊqƹuaݎe QPpQhmHH= 'DfFM{HiuSXf D 2{o|OdNuԸsg e=f $D%9rstO`Mp 1ϼlڶg>qr_?r?x=[|'?DO)sξO|4N-`7P=~5[}1ۡ9j·%z~_>%τ>kdOyOfǠ#Ol]lB8mq8, gl)蔨A's;qA&pHngͷ}4K᫇Qs>ⲟP)8g{Xp!c).MUg)XCOϰ۬r603&Ss+ڠw2wMXcL&x`(ꇁ(%Kg|:w~ᤵ}S"m\ۗ8m,wO1fL\O{Ji\q1~O3N90h~G{i>=excPh`8݋$z{wom)aҰৢg}1(;">5A1~@9.^j{Ir@Zԕb:V%*B;VGjHx0 \f0 q@RM{'Ƙn}\IIy )'G#DP_{\ayOsdS V=C.'_^ʙ('OwX-t,DsZ psOay).-C'a^@Q \-Czqܓ_IFph?{?hk1;T(ށA}\91jiCr "z鸴`ui)Z54b:??c2-ax'.s WsysT`aEi@Qi@[8c!2ꈆQ@qT(\ ZuEsmj(Qz nCӯV|e'؊+b6FJe <VϜS?-=|B];.Xe`8mEh ko5$ P>HKk/nYqA;ߑ fw<8$g)POq}uNUn#Q奀m8)V݈& yCoszgg "゛#MSw9Q!=ђЪL:kQW6pς&=Hu@t$J,0{~_~8,$*PSH܋_$?7M%SQ(y`G9wtqݕ-Shg&Av"09= 5+"0L2f߯+*F9;CnxeM0V-s|>_Ir{AK izzi>0^gp4n?5z"^\v,9.٣l2 GhvXDQҍxN ' ݢtڽ9̓NG:\z 0_5Pp%\}bҹ%-jHťgoVsPAfZ.b I@c>]eb/1F p YbғO{2ܸтFwe} ͻJPwӹ 27v;S-g`1ȿߊslo0OT[s,a8T'Bw_,2e)pŌI .?.Ejx'ZIu?2I+}ŵG?n\ ~|~t=h6lkNf0|WV{P$:ύvu]uh\vT,>'>/%0iVoyr Ѿ.SA\PU3 w#cb{: Fh4aG҉Pk^NE-_*`p&zB?;?-^~c!`BC=#4.q#Y8n߭ ~ ;doBVEg3SQ5Ieb%&G5]AbBdf~{â;V2TV^\)bͿVVYQc&"z"QgcM0i\sGLcwjx=QD>EǦ>5ؕY֭"'5|.6l_& ^=#q abN7Viʂ8~ }u:|Æ.៽0E-^' ĉ=&AtNb$zp0;6~T *x)Sƅ৆L֡vj1ɚVj6Y87kט'zc)O{?L+l2Y DY87; V}!=dQ֘?Z*eNK!Mk=Vb2s>77`麝ЀukO;bhIOI:%h홉L=4ϐO-#ǨO F7Irg~`+:qn&r06q^%>k-M4`ݲuPH.Dѷ|.<{"myx~#"\+Sﮩ1"dhS= {Kv/z[m4oA{=jI p_F56B)KR'QʝPn-82"܊3^ۧGg9lߍ<}&hܶL~9!yZNDS$gu_i>rGcmhONkqn4!ɪD2D?ibT6`. )h?SD Σ#}茖V\#-~0Wtp+*Oz_]ZVx~c"腬$lJh:9MϺ4< + Ē' uH ~q("T,9]@&|0'rO.lhOp* >ѭ&EqHqu?R}FA$GQ hw͚Ss"Aai\h1&/FGc?-v{ᒾ'V*JqCa+b ?s,Z6U>8q0v1^-^|&C$DrOG;XxU*\Imp`8|s7ɷz~=V5w·K8yES>Ypiw),ir~9p޴r`Fyzp~7Xɻ=:G@Es?XLG{gMPZF|{c7줵,0j`aG.@3"Y}O~D7 VnyY>}aao2sYx4gDz? NXsE>8c ~6.l?m3q/ru@h=F_~¨pi&lOUk,uXvQ[G#?~@ ~ L\o S µ@#?։T%U-j*_Je5m{VF =G~l02/X W֢5l+eAjR(0|ηnEJCl6YoA a6|ewNrSaj;ȫGTFm();m8Uf0“'O63^Wm̔B_Sn+o9 05$H542e]Ta=ֶZ8TQk)B SJMrC-GXrz* fT4h}bRt939!kb4mChmƟ9) ]ZYWHNC("LsUmWHa \NT3T5IfӷpRxjxcCpW#w9P28e8'@w#] VsS+8:> ǾT%à\YÕْuP[JyA^WA;e[ (|.hpԧ`K@JXA܌Q(uZ/ GdeOY&xg)ǖeg_=и>K#,$0۶m}~v5p)a҈@c}Zɞ{+|#]4.[+8:>av򬩰t wpj% ^Z%dz ?FJ6= W>,whl>S6«!woҰ`?SֹExsn@ak5%JY|.fm}gs&{ i}nO`lfBfZ"޶c@w hɆp\w\K`y )tZݡta؀<Ǽe<9hiXPQ~>00O3 Q,%G9uTߏœZSÒuۡ%ű8DS lyfә7,Q_.iΔ~FL|S6gDSy;X݃FIc5Ǻ̯q FWc^> ҆q&~m8aΞ=Zhm=#{>F{$c_M_[DopU>d{)I K :&F h]}H4ZiPG}}= JLo]}N]¿Y'c|rgeouͷ4q.H0fbm-]bP>AObM[0l,LąRDpVB,US;9\j2$A6?i?c~ 5eh#1);JIIcXEXO]ErE zNJX>賀;&Cx,,! I7YL\):C ~(r&uf&1<.a/ch응T;-m-CAAn>Wf33#Gtvc>B؝<.?+wCZ54DŽ~0H$shV+ q܄aE8v*" _]Z#-~0 >̏pm\0-bZO@[f \[0n#kbh[F*K.><[}\'z.ER6 n&)mK D Vۖ{<a I clq>Da?i2 @kmRN0UaO+h(rd^'>"Mj o<\I0*+/]rCt MR'̆ kZ%|DaMk!u?Ҳ4+?[MP5O:^.7j};I:vp8_vr$\K=O4-mЧNeC^X2`xT\o`5;-{\'MD*=Syo`<^/)ƴl[gG@@T=+T-U G"d$iZenn7 PWY89ug1}2rnHtRfHjQqa(iñ?WD|Wyߋ~Oe0v8GCW|Dz#jb]o˥Qz:UhR4`wAy4eɑ jn'''AbRL'KtW6iY8[m%8\~v4vARnG +uzӧ){O|Vأo=cA5ʬP[u@OS{,8ӿ ʿr*##Ԟx=̷#IYRs8 ٬i<sݍZdfzӮR20*INr4O@6\lsAyq!J8&f!6!"EA'@~Qا&CJ H#D/4 p1ɰfK-nc7}3᥈|V Cz&mdۺXuX SvDvZsM1`2\C]k bLX]r_|\4h̸UE{2PcSyR4rT<$x)F r>hp`{i5x1 'x{ H;1$A N Z(䝲OB?-5U'3?ybGdChۈK6p9Y}NN'D /o0X1:TVHGR=P^$}r,MM)HOdc;"IX!\GQZUG?P^^핕9XfAuga/=C>N)1PHs2ё%{렶4<ͧƗyYfI^G4}ԯh9/U :BVe#CȐv9=[; XwB#02C*2iؼD(ݷ3AۚHtS\$H1y_qٻ_f`jIR)p2](](gBl9Y=c# De@f}=HHp?2u/)T H "pBoPZ4'8P4J0jH!8q͈hROl?Dy#ń Y %lr;ϧu?Ӈ髩hزf¤{㳥ҍ5@$T}OsNY6@MQaʘ! &6< EW:6ɝKŸ~r#O:5DcS(ΏΏb/bX`L3UܷP~~N:,m;[-=>@N+L Pfݗ~'q/xjǛb1O݌0{F6s?u0HOגόGaX=Z|F.5d^/.IqqC +ُC>=ŀ,o;ۆK$㓶-} LN=Xv6r*{OS{D~EnH! -[jSm6&<̓@e M||ViRܶfg3M<)iR00)߇ H8Ώ取ȷEcv^+۶<=Npu~*_7Zhb^XY/*hlkc/-y>om[777R~ЂZ1R2HfA5 m;*_ ~xOLMM |5v@!ÜIv3bFφ J;v-q{%#LMM47o)9u۔χ1>ܶ} ŘI׶@m )]3<IF2YQ >(i>SXsۃ׈G?wCOWr ݗ(+g|-yN~HIK?>8O\|~v-&(VOB4}5-`\D0PG2D?C@d2Й0Ww<#-_> J1c SX-}p0<wX,[˚ߢfBZxviO#FN SW?h;ĵ*bcDɎ^= ~HpnCf\tՖB>jW_.]ְi8E`'0޽ٱƷM O{j+] OSˏ NmOD@2Wm;G Y˂}#Hc'˩=O9;[8'$&y$үZvN9k'3>S HS'<g)3?yW˂ң[SG>hmcBI>kVAbrXjF&Ꟗr d|&Nv 㲡a}vco=`?jŸ]>` Xh]3eŸza>aO9hv/\[i'N(;aЯB$][^@YqSw_AiЫSO=0<{l} X%6 /uc3_z+0; e86ŕ/#E :Ϸ#Jᑛnkt9Fobb^ĕ$Q2EoMdž_ qA[" űxbm`|{lJp-C2 a|0:zߣ$*78&ڜ4bG7q\~\D1soaUIXg1&pq*ng`ΪĄ a\ &6[Y8S-l1:ϜxA 'c_8nx_"(|f^@b?Js>Tuv.cpƹwDIî#U9(IFE~.ީe5Ǖo~{Xx6 _NE6f0V$|{D[1pD\ANf0@?>{v(LLL`χs lMW/;RM=.AAjQ:~^sOA(= 2AOV:w=8j?:fE7n@]}s&Eg$4qSݗDXY7Z?ԂGgg:C@O_9NpjK\2$'8Ĵa*^b`%ypAIjꚤ&]ͮ_ŋ5!G~N PLy  &taf*f%f*'!h{=Wl-~O_zq9I0]Zͫ9;÷ o4٬'b}I# a>BʄC 鐞 <;{M.7lU lUioyܭ鯑%7eD[73R_)yǪ (A9)pgNohW]x/{`+|^}|ʓ3㌻w0?>ZsC%'RfLAr|ioJOB]u;0ed!Ň+Q@o>[|ӋĴ|>mǝgc:?K]5(RIw?'Sﴔ^GFL3{ڗs7 & yf$qGqr}H'y|0PxT_PX>~swikZHLc43u X?nB@]V_wTRCCI/ y- ~5>td+6zҍ"جxx.D?I ՛?\xl ZƀIYD ݁D'J2.@@]6ǟ.[ꓖ0zc_Ϊ`:+KWϚF _I 蝕ޚ[~WP靕ٽop2O3=|zB%é;&<VS3^=z*-KzN )Sr?Ty7gp92ᘾ9Mxڹ_5͜b]7Wu s?و4LG!u&1_w%ˈ>'KtU[=$]L :ې Iq8Exx+goe|;5*~;dB4}ғ_Q>^Qi&v~@eW#Xv2oZŷ?c!O|(f-B&Qi& |'Y]BTM!}'_  K&!g<:'[nf\}.O++޷~9//ӌKwe?zW~<;Xz94h]O/ZEյuA&nc[o~\LK>l2gs]!Yyj/|ƈ |#;4zC *qݼIZ(NX,C->Qe4@L JeoݠUs4#}FG 3Vek߷[~ַi If0Oق4ozWa,؏/}B;ӵ?9]PyA=骉'IRz= 1 RhiŌ+x"t͂*WICzS#f }zL&4oWŔy4'?Og<Ϣx}Z:As{5V~ɽRf4>CdFݥB/OPގ,6?]5((0ul#d%5I[b}171/u|y͜4 KHŌ ٷ$Ksԩ]֬~qZY|#Ӹ i]۴F+,ІZNnt>ͼo,[>L0&nC [2%VgxFR`\eȉ{t]xut扃iZx KΤ٣{Zįq.*;(zT4{_k gvZo;'nx2.GYN-(b3GҨ}o`h܉ZT_ѯٺa6mBXOg8 kH?9+6"Bwq;or;SiE-\~{9l~HOc'U5u7_lbYrOK~nGe4{b`}ڬpOL7o #'Jzs$6 XBT[h-=?aB}o5 %>!p/a, <޶,;nd@X΢Sx^^5toq6l*M^0A1CʏUI{Ejnl#Нn-eqs՚k//d嬥w wsrk SȓHe8S*']؊TURħ!}|;9gkh4?tV;cZZTv?Kɮ+Y6 +)v;PFr-|kͿ/ ^hwlNg%އj'Uj,s6F÷ܐӂ[-kLb {hSg70pcc.}_riKvV?a#ZCGɼe@BxYm?H2zNbDF" ]Ղrڳp tn5{pt).Xk|1pHn|JjC0-PvZ\jƬܰǽm'j R(|dkZp#rPkʿ4R*d熼*?V֌aZ#*K_umwȢ>!;yYQyuS"akz/^N A>ߙ*2zxͪߺ /0Z%wK dVC'Xa'枀uL,sN&Ҏk,Nh 1FA:7tΘ- y\x4_7Ȗ־@#9=M[c[-wY0%LL90;P HEŗ:-=y5BYe$q 9\{psx# ^ῑzi8Z^O;P^06-Yׇx>2ԿVMƉ!m()0RWDrɁ.``**5=~0ݓxWqwhn~>1oyWKѽ] 79zܻ wg{=q:DbM8ѩ-*{77dVIu@Bw ΋Gh<,]S& \m?󴡋2VO:KotDkCyz6FM;i4ot6rMdzM;hg,j =*|IDr]M8~tӭ6Ed[^A.|TыƖ03Fj]h `;bM^<\yP]REǐ㛼Z=Ⱥ>1enc4zX?\ <+.wt'Mn0>E/}]a'J/NAϿ?G!~.;ߗ?YvXZm)3&5Ș0ʼny++T0,^"4L岁$dv6:gWG 3鞿S!PԂИ $h=oME0ų4qƣob^N ]MV}Cf5ZgAwfP>Ohfm\ L8t"+~X֘/lW߳rEcVͻx~@~P/ W0Q |o8O^֛'Um4<ޝ.p,7DCfOV# >1ƯsL@D2&VڲqH@0 +[5/,AHoˀ}h$7p9Wii ۚv02$&ۉ`G4|ޫ-n0bYW@Mwh/2%+_`,Kx k`B(!",?` E%hd]xr!|}+}_TrGz:6q-x,ԍh#آ1W߯fԣ11߰%P)bv1Eeg.W}~ 1Q]5B/@={]оݴފ ]qb ;Qݰtc?(7붑n4\B%|P GZ~;3-"<c%Pяg7]6Y ˰\_bj^'XB[_| 9ڒ;AOx\OhCc5\u[߮} [҇' Cy1緃:i^F<ؒKD(e\ aW4$c=><1 ɞ]|2QwbRa@xR&Ez_﨧osXpZnX)4*pÇGAX́D8f&4F+#(gnklg\Bor{7cAp? œhfӪ[6q8ٟ;=?g:`H[w]q8j?I5<?\tga@^!F0ٸ}A^u/wвů o$JaGؘBF%ڒG`QG‰WJ<7ا-_=AGy%/V,rNZ$J@g8 D}kq%C֥#?So+&#ɗ&Xƿ!&sлxu켥h6%\F 7ԥ]ačY<)a:ɼ5*m76z T~!P=܇ZL<8OR?V|H2&R#os(K]7yRV[bNžecZ%F;TJte_{b):;j6H9W B~'Ry5gDFU xHU2byIU,Ӎo^n*~V|X2iF"Lh|9>j6x0Ӓ|DU %fST-Rfglf&SvpE&#vuڒu;5yo`#`?zl\':ug~W>ӆ #|ٿgG}.X|3tm_D&ݱy?' C@fA(y" !rЁ~V8߾0y3\e퇱7zwx٢8D wGQy\c@JZp*i2{4pY{Ypo ޅJa8vJaݍaMN:a! mTy]9dm'>(N6p&zZN]>%k]Ihj8Gkhi7俤!PnMcm[.{.aQ];[^O :NĦXꘒǠ_ S ^]g9h16_U۸ZryEOG.{_dBX?PF,p*2_Wv|=hQ; 3, &n~}O2]D8_*t _liy L{j 嚀 ]=tm.N`Z2qP>/SWS+~ZҩITuo|&H%x8?~Ӊg$*R;pֺx&tDQ YR.ؗG@3Tj\/)]TMPky)锝2-:8w9A/I"IxŅ %劯`]%>w$A)W|gMfbMp7)P*؁ GKpbfL=yQ'WP~Cw!Mvȋ>B :NJٞ:rWӸz"P]o[՘Z x/ٱQߜSc3<,OP~ vz<.Y+,K%r,88pdmj9U,rY#9OhV9\c@)~nY:){=`Yvc7Xl.Tď,?nO9UHDuKYyzTЉegcSb>xFEItg{M?()Ms).N߱Ȧ4.letWjt=}el_3%3WӋ \c^#_@)~2]"!(|L?Ofx$b4(? #Ͻ;.eo{/6ɉ$]|?f ;K’yԥz_8G.7j9]iqo8 l.IoXS<'|fϠ]M_=w/ZS{!gFWű5*r[*K Z$ 3=<z=?]wS1[] HYE+᥼&)xYiԷb++,,hCcҼ8Gs[ؒ6T`xtEI|y@C tZQ{if#Wʗx4ULh0Ip n Ќ5߾(9J_ x@+h͖5!Lt?a笤)}64:x_ęGNk[K4cA~')J_px/WL%TY*րB~WK Ka xzo lֽKH~jmo)YZxR~|AoVo:DoΌ3Y}&XpC?3bNIŏ}*o{ :if֝ECW2߁G$9KkR#,/vi~?[,~$nf'b"PV,\1ʂz6}%1~ܮЏH[wk8lvުݱ0[t1n=#䡎X鍄No+=^RGMyVQ UָҺ,̝;<0)9/1%gќ` PoFV:X\EC2UF8C39|(@(BEsO5Fʃt%%^C[Lfx+Һ Y19OP&¼ufHI`PKθ:牔GwX`>c~}rY9H`Dg1 jGwXoWjҺ,wE.O@J*~ 2Wg?0wOеʼnκ:6sITM<(>鈔pUELBf>c2Ȋs} K9,f,zPe [.Y*8˂~oRO??F[<gr7(zrx c={%D HI`/)?|>cPKw_;d(SeBۋBW,cYd̡^ X̍%v2HY߽ch)CwV't=Lh>Dh0ߢi D}X?-a}Y²_ÓV=Se_`C' nxZS+1,oq}.qѽu\ |Ǫ?,1TPcξ! 7%eivIgV|T'=8 nlf'H16/};~̞= <q2ĬB4Dʷ>RKj)n]nxkU߭z5{ʾ>%z4~̳lםssWRZ#Nқ:-6o^['_c]%#‰ C]+1>9s pҰ;e)o3-HO_Dc>x1A&^xĄV?P$$?|W^YMo~=/k+0/0ƏMBaM&tEgiNԓnxxV~w?M]~+ٻzN|;(wIZZO<67/)`֯_翎$Pp5SklgWO:9ٻz)/߾t΍iș߳hYӹĿ teSa=]M_ t]6Tu2"@3MsyL i;/c?Y@;S5[yƉ^e;pr&Dx7-;x1}Q i-];ҹ3ȜGgRPݏeZ"b|\?s\_?Ko׶^bR:?]z(YZҳEѹȎ6uEz+k=fyH@l#-+ZY<ܩg{dʧ/iMHOq/~Bλc mB#wǝȆ5]G:{D[v7Y6j#XںǏ4,YՐ{}xSQ^Vu4(`ZUT[Xqꀦ~O5;ϩh< VBFþN-OeZ Jc360݅.cDoaݶ}YQ6h. 8P[* @i+%sثQʞfH;71e9([{џ >@cSyN[N i v'ݼh~. u Ãk?*46:zp5)&ߪx-~:U_|)D3l~@Lsy0E+_Z{Q%0y*/'ʎ5={qx2ˎUkE#MQe]Gr/`؝$uf޺?s޹ѻ<+gJ`,[_AΜ`.Ҏu<Ü&څɒEYbGXwx1u{.m1w5Z eˤOۆҬPeɤ%i,v/=$XeJt7fڡ(N%knɅ lx7J՗'d tm7=ob3F{*;% dk$O@t텧)#SnOvÉ8P6]4S8rhɪl90⺨h:n?p@J gʐJYy&ۭ9u @FƴT:%x2ׯx걲֧*>]ۓb&ZP7^rl+k"aD*gEd+,{xSQv nkQ{5Hވ;q4CǍB6k]z],sT/j_V(~tiGi=\4>JPi{Sw|]j qYgPySջɞ{%C!ݮ v,evj݄L|4vOmt&H^g-ҋ6axjwm&[.aҕ{khܳus}>"2Tu/fЈnAZ[fZo$nEj6xܷHW!sWc*US!Q5n2~yp( z??tKJ&iQ1wHei񬥣mPcM G*3o twL}n[ֿkse[4h8g?iej& }N^J=sl϶S-{ U(N5gxQ6) QqT/ 5̱^2H{ŏ־Xz<]ҹ(wMW"nt+T)C]>cpjP㘩ϥx(t=HAYPkP$֊_Y`S|ˈ7,1Hf]nlD*%䰚hkq+:9bHqKDC2Uy׿fs;|tYu42J}Z|j(3UCf7F;8v6kg;FwJb_LUi W@hOzc _g?\|DexpAw: ,Nz=*iw<VRX:?ַX,5~Nu=xO6o|_;)1ګ9, -1ml'i|~~GvGOL}} ɭ>xd7vY;w*z-x0G r~\ B}JB6VY[J.юQ픕>y?npy Yv٧=gӼ .vԿ#f+<03Q]s:@+j/srۑOPtDgqĻT`e~aO1j2.׬)|;okgej۩(BntG+j{:S G:"@ zt(sh{aIaA g/+ %xǨL}=᡽aʤZxyYP3W(ɤ(ӌ:XB^l׌ER#q\\Ǣ{~+,G9W[c.z메pVPgIG>,*(rhO a#R0 v-$E۩?UdS08h8. l|/wk`iP[ϨecL}u]x<5h&wϣ|Yln~N|fHߏ9TqG"GMu:cej' eq@FvvcŒ8cf;y? wnusa>&Yy_0TqZuՕk#]κh忥|$qhxQ=-:YRw*>n"NK0{ BM—hPO*V[L}}ƆGVhXrj2e W/R[hkTtQzmT1 Í-/[T8 zʵ$6#3rM+DBʂ6xg>Vw<7[u r?:G^wD_P\߳}C/gzbz> 5rzL74XtIrg?bduۋYGy-}V;\VB* pfⶸMy<,CW5@|Z-<҇/,(Lh}}lNg x,\K[]𖈂Vyd列{YV 痷`&79有Gh?|OgcشtOxȲWފ |$DU}H3}8c fqcIKXcZwd*-[JЀ[NAO6Ry)-o;3k?YyoBh( *fpyLak]~k?I}Z>"%;[(%[|qd!oF 33\#C,yxS<$!v\]g.z_2N1gOƟI ^ɇo]zo~kE^w_Q <.w Kx<Ɋ߉I wk ,xt}*Tvh?t95 TTNgJC,?^}lOd^1H̉d$(x侁z۳_#Ψ@IDATh?HxT6_-D!d]8*r8w\RqϿ5j'Z,qI#հ| ee*7\r^HҳQTsV8v84MMk Hz}CSE K`c"iXcPaklth߮" "Drp-_<՚JLYr7ůɽoYnV,GA? ,. BW !q{>eXg@Թp֧^CRyX%^,C_,_0U?BڏgGF1U_XbPfoSgAW3֓~X;nءl[`ư?͂|AΌr|ia]2ߙ(ycR<(k=XyHzߵy[?*!rœnl <5?yf:DudoIW|rizSLG!ԼyA 8yBO&,H}fx@P8~|7O}>~à4P"q(Ҕc\[ɜߛ UV+(H3gްQ}G̿J'X;%:Cb*X⑯<ظݯ?xƄɽ^>9e?Uk?~wF?wwi7?wWG -u(P(~|b\_oB#>Cb a>LN F$҉cӏX̘u*.֪bd27o"̫҃:s,|DiXv~ VG0_x/K?ar?G~)Ҋo<1FSFc,蚗jxS.Yr_Is‚%˪iDJJ>:pE^F99Z9 ic`z} ;8a8*wn\/fέQ?,$-ȹ֜x!{Mgw=GЖd`pӟDEz6_6y  񅂄G׸/ŷ|ː$$ސ2S(Bv$] \ *bL=Y$B\QO}jl䇆F@*~CgdN" OYOt;+?fC2?8%Aж_fv4}>. žd#HPeD U=\YC/+cus. #M~%HRY/WU7"!*0=ZKz&K*n~<^D]yO^k̨JşƙJҦMw^[W\5wbԇ#ԥrYWoy-9Ӛmxz3m1mu־$ B@eUlk7)U^bC;˃ZoZ$,{wmp#rJYS&HşL:ok+\ܭ+D=+w(vw9ifN%y}h[%y=W_v$@`Gkz`bEjUaYE,uyBE݃h·\N\Yصc_<-˕n-;peIA lR V2Y-Фm(7+ʼn>A|Yd|v-?<}x s_;6kIw&U2T@@VKQ)}z_ś7wN-O?Cú&I"O c5w?pl}urf ]7h؈ֵ̜q- XWl0.?ŵT*ܭ>-'UlU9Kx]YR|v$fr)Rr$#"lAƬWVQwxGYݟrr&kA8YHşA=u4٤\0v{IJffO> ^FgdjLH &O;U1u=}|s-4{晙.ݎ;~R>"lG}oU2wa̜LK?0AeD 3IYvcV5+Lݹ=0nyȊ53֌l,Q5 6 gkh倹l9sӑg'cL-x,̝;B^2g4UT&-E@*[}L?lAt%{Vſi '@Z\"'\rɊ܁QyS gną [)ŜVd*cs gN߰olz>bg}V\c" 罨z#_G 'lⰬт*@&Mf|[-y ,,5' w{Hş<6IJY| ?%NQJʁOVK,(&1?'4&L3տZe:C³Hɳ{]d6aI(E@+dnVLV<(0 ۈc>`);_ŵI$a#/֘L17+ ?s/3,}B-&%(nɣD IkQb8JD^R:dvcS+~5#%MBIMqu \Q+|nf! )nA(b#+feC?'dy[t罴? ^/n VQ" Tl6K4udΥ7K#__D#_]H%8BcKhȓos=51)}8ex}~<<ιq5 9{9E}ioyJ޴N*vǰ?1}ʌ{ͱm8\*}#rrb+U[FuwS_Sh6uzV!4~F@/K8⮯-Ӿm7||Vim1.߮=C#UF®PQ|2t2li=;w=|S"ЈTXȳV5jwl F1TyY յ)V<2yRu$6:zp5)&xnJfk6y}+ut mo^C0+몜Kۆ_Gmn,*O$a" ' X&زڮ#94K~pO79{l;w'mD",Yۙ+v~v~O:qʋ8eD*!jסszKYW|G" RR*gEd+G{oS٢Y8,~]B"b8} ^XP.Ŕa=h^W} " LLNV|V^Z{ЀʵKzzm) ԎN'I"x\5<~r RQדbˣCk5;os h6/H d;hQ)琥<^x*k;")ɣD 8vd=w84س;R1>䮯Ҋos3õ^,m)_R H> Buڳ{D`gh֥R}o *o3[٣{5miGD Ȯdqb^٢[ Tg;-{D 8z`%1rUHlU^h30$H&O&)wɿn㲅3 ?ItC`ݜ{JҾM~$F@Z΁Ͼ?ث꭮3Yqnx7TS͌j7 ϷdeIu{ܵ.fR,ZR9S3B_y]C5\u$jkꪯFmd %xodWfSשׂpg/˂2% ? 21$9J%s 4HDp -3GW;/FY4߂u˲ ۖelተtG6XL9 C|ỗb)_=0 B@ȏVsV}MMUE 8H+<{YRJRYS("d(*o6Md[|~t7gO(i3 BI94[5[׮lkJ5?EZ~Q{f2vmXFdqR]Pvۯx?!WgoR^LѽK=mW +M*~ART4+d1QIU踑Sy۫#쟙[vEYcLS,Jsoi'*'%?)@&Ѝ{o~||/+rgxGoZ +?-aq/aC҇ H H1/peayǽ="H$Dmkʳ,OtgNIN>8B’ay㾃ژ'>o|Wair <!Pb>,~(|4 H1/apeWU'qpWi(nn;J:m/L@i4"JV4HX ůoMn6C{aCCCCf/g$ 0L{uB%]-fA &ڴSxFNLrW* H2ҁb՟CBBkTM4}MtDM q /z؀<-I" 1& D΄CFďO3WGTtN_ⅲ?<5&I"(.a0*)U`kF+&zoa[j0b6'_koc E%WB <^otGTr (t}'ϵ?@gJmLaJ?7*Xq8/r?oMu@7.a@DE'dD ̟4:XDeAɋ[yO"H̋2R9) X[^]02W @wzqo#nZ"`XB)0o5{j)sb'nBj|$3Ŋ+Gڒuhl$wMq?7^qJxe{MOϬXǑ^īr?wg|J6Mn_'er8{(H )7 wQReZS8z,ѹ9#&)f OOC{w 26_Ň.F9*-aoJ$)\ǟBʬ::Ot;x]Tq]9xYAQU^}7=ia[FW^rk+U՛Edzh֚mVH(o~ՙz*uRMjYUQ+ȣD@" oJ>mڻߛJ26t>tI'ey7pg%ܘ,@hC5))fO`{D@" )M<>nhID^3o*EU<*s&@A>.REuݏUPA]ejk2Q98jIAo&n-By ?~Cπ<H$H9O' +HU)v1[nRʚ*Ֆ[7Gwӷy^e~x%܀VtZpZDd(&?t ~$J$@H6dO?*yT\W{ؑՖ)ڐ 49jI~o`>?ެ'`H%SD wQfw/ EQ,c/RmaV(iB  $Oy Mq^X]s$]|4g»a)˄!@9H׹2bk\|_߷E~)w"sG͑!!@^0OS!&a>/[+%/B * @ `y-.9oceAcP B4PAͼ^8 _i6hZN:ф9!@ G6-ZY J=, D B#@N#cyg.3yØ_ 'Ta8X?B #$دXYC}~oq_9F/ : :#3 R'nY|ן'׉cn7߹3e=f씹/1!k6>6Xط+J!@FԼ8K܏5{6s>~ux6Fٳ|qXkn|q/>(̽_&W# B XⴸtWt,bR7}f:s|ᑑhώ2i=:pO~nYz妷XQ]XޯG8| B,$ M`t+㵞4 jfzړl^),uL\~?%/,SW]ѶWjoKjCI B!@V+滚n>?[jݎ8(]tp\ـj~lyWT E#B$p5XRG;71:. B_} ݿ1"պ}u CYYdZº, y!@ `DL ?T>6`Jow*9\T!w BT v*fØWBJ!@ @p{cϾl;)V8Va]CݡD6}D T\q$a&:B Œ 0ZW%j v9_9j?6&{')(O& C ; bK(DC!q׾.n>,ejk}ULkQl_&ʶ=3ǟעXnc;e#3+|e8B +$/Jfyײ^z] pq~Y?2E5O^cLyQ؁&ؖfݬ=E|SOyFPSdB "@? , 3aj%43yV=?)r6W*Gųum5Np "/əM-z}T/Mm"}Lny&B$Ãcˢce/ׄivݮśʼj;݊` ݂ \{fY fŇUޝq{'\mZ:^*,UՂ"!PeHWA ?Kq@)e6cO[- {.ޞʊ/]Vaؿ[+_3 T{˓ <)7{B '$\ }MڥZEf-NN4>=d*w(kvݦvƚٻ, pTsv8:n6fi* ,)?Fa!@UF!k`Դ-S ¼ &,;;*=aŁiɗ B _[H7t:*^2yVaͿҴ?JL4ߍE!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@@`LdmF`=GL8| B BA@ 6h]_K}gQC`3-bbʦ%!sb/c[քo(qܞ8?dzgr ̹>Mc7MW.8`1JW>-&A=ѦZ":ϟUUqޯ8hfJmpxʔ4};Jε]S~\*Ĺ> pLueժљLY}{UeQһNau\+p Q#ܩ/L2*D@opoB:G{3CܐIIs X9;C(Tϻю dV=GyGw_8ߊx9sifoY.jgz&n*Eɍ[mdYQKD$=cF0 5oX;\jLXDg_M4:;foV+,oLM~6+=3㾌q8&=uLsE8x3^v%%9r$Wxg:G[yP8~QQwfk'8^ƾ J#"dZ\D *xBQwz)1"pLFm?Aެ` `wͽoϹUQzbD$Ht`1q1:@EŢ\􋟁M'|$4{(OgLO~ߦ8kVЁF\U\/!œ|< ɞAx - 6ٱA9ʏo?+1+i`ne=ҝ:>ro [$սHI7( =HU([yP^>ҸIJw7/_,,kD5Kw稉'KcԴ̲RzԤqN`vaqAkRqf87nWwȲ kF4}kWd̔|X,fSI~DGf2 a1g,2%@2k [OـNpa3J=VvzNv؊xS(M.Uj;f7!|B_$ƹ4KM,ñ^R2DYюW<$JA&)1|Y(B)y}>EĝBuB)WbX^/ͷ^Ber!w8W8Vo!LS6P{F}賂WTX7bU|3($Ffh"{d(qY9y@ZZ#! P(Зh,(}w;*ޝ'5>7e~It]I;Xr,IEC=$w_v5 'zm%][6"|J fil2kH| vTewLa.,}rt^9j:[BNFB./t1;'n6JO+G#R<1M@~1+@մ1YzQ97_[9:c;-J:RE8A5MTISz]:Wm.3+}uءPQB xD>3Ʀ$ZL_a_~Y*̺4Ij K6RŹT].~9ݾTqfRqF~PP5[v>YeRqn]83Y|PSXf< ihH ]&)AVPybpihhy1Xi/W;_;< f݋%qͱHe'b#/]GsE7!lGd%F;)?? 4XWC::!'qWKs  [:0<$FQG+AM/nY7Cz,YQp1:z}.~{th|"~߻Iop|u^?u$.ŸʕS-~϶ [QrlE^^r[g3F/LZ88?ѮJM_Fzi4TڸÅwW?&,A )}9Nڌe1Zp82f@q)Ex}Q0榗^v'(x?֧ǑEu &ST+7N eʿs-p 5+(Aerr 0G-%׀ɳ>SṁqK6h;M *~,S#Bf,_K8ȝ+źk[9΄rTk:( %=]葨0X "m)Rfe Z8aoK#mE.{T`r݈UvG)آLqE ڊx|~yT;o^Bm(U>Yx 8G)P\f*ΛJ 8j3z8kǡ`E8k:lj>h%齜qш+@jaR|ϝz: 5!a9ONY0ENS|Xg] S'w`QUa{iw4VB;rŶen܉ENlذaMYW 9P'rN#`0e۱fw_ A1 JO D{,5RxK6aL(O?[[9Fuae(NEW*ۣ6S6wDw$CR8@".2uT Pk\] |пJũOA[Kg;Q?PxJ8L(|}O 838#+rqlW#@VFX}UH),f_4#ؕkNHYdي]`HtqlŢ4V;% 1=s}XD 3x+SV FXE2źi?5}יּM[5ģXuAsØs"32RIz}0sfs$:oܸbl2H]:NH)"Rw:2B|>idJP_zPeZTR15 GRqfl\풋ܢkm r ԡȻp̻0/Ea.`nJ8vn70Abr؁y8N';jƌŘ=iaU[ZzVm_aT~n$q|֐ H:)ؕ"=a(<MU>2n&c*pfc/9bd 0{7vEuْ(uslVM(Ь)]G2&ߎag@}vL鞧: 0j]"_,F:X-/d1r͗9_韘bZ`>~_2ɏs2dw>-(SPV8_)ivZ}~͙z] Z0="^W^1#1'o OlVҡ{F΍f5='ˠS?[s"6q_8)&:@Z:7%'i]A6?FO X TAQ.Πy0Q@DVĶH# h?:4Zcy%^!Pw5/̣܇8"|hp6>_85mLU+ZyKxҧt.=PEap%Xap>,]uu p0?mfHW~y31ef?+ϐok;ϭq"@5G'E)RpQcb;d>W^XZO 8yk5V_@[砄_8WD3["6`i9iC6>K8>;iO&'/kq40z'#߁L?_uM!;TWލjn\BߗؽcRf,9.t +:( yеտ@Y!pf3(_š``4IN1;Y!k-'sZ[$aoVSp1uX}qo!Y(%i~R >СT]z+pKrг!n h™*#qm ZhG[Зӓ߇[Nq!Gt36t|5 P?ovpc8cDJh?@L 1[vw:*( pջۻSW(pJ#iB4m\eݘ iyMwy!2~ą]0Y@'=# 8 B~p~e|&.W!x[p>E1oZ1VcxqP9C'ϨJ\OhSCc.y[SO"3f4ey'!iO.eir,l~:_8 Yqiu2~PJxX%o zz/iqxI%κIGc7!PuS!aNz NN#z5.|_DZ{FFɜuFo õ/G!}׀Cz"pX ʎs2x]oTXJM{T#?] >'0җ|3Kq#o73f@s( Pޗ;nHt0;l`J—r*OGߍ+&F'w?_j2|Px[X +V4+y$?`*9+&Ox ZX"V+?r꿺*vFW-2倓\WPCQɸ6nIb7 $=T;;-AC޶0< |h>kA1WcQ]n'%V> ξQ⣖`71n>4.p8';-+c3ۛzor czVLar'7L 4U}N+ZXJSNsNqk!?p*hS1?+9J vȶ;'8SA@>=Phå|=g/7KĔpuLs%?j뤿\ 02LN~ ^JgbپY'MsHM ϏR^[8sLzzIIdAzdn[cLLGHr@]A #u˳Qumy$Z1,cC;, BQ;mr@7o=نg;/ R2Ƃ*v|zXx4z"}ot 7; 21+K[w_^dZrw@Sy,|BMBv0Hs/NI4vHրQ)LcIq',ۋ[4u-=XF3 F0ZBx =ymU*O>M8T%M%B B B B B B B B B B B B B B B B B B B B B uYKc;NQjy ɫvqgi oNíe3+x){q ZB5M^[tέ8Uk()|G_]ՅR*F=Vӱ= +ŭ\ρQi2on0t'(ݞ秩 .9|WB.OUU`2)n*imsN?Hq̓,Z!K2^{؍ 4f l=oEp eLVyܞs|(^:1*fٻTT]Ο];w%і঻5\a߱8P37_"d>SnVwux"#gƢ.]K8y7^Y6P__ʡˌҮH_ZӴ5 Ҍn{w{E,N! W@,X܎ՇWS#=%1WyŚܲHOQ2љӓ}ցziW,o?{Ҥþ2yLL7qZJ~mᶃ |kLpC<+!%,O7#ܞ׎ې[wen ?{h|.|xa'}bǃk1EԳ"W9h}v%lz,ȚvIxv#e}rO9h;.I&_L՛m£dz&ȔgX9,B>ē7F喴ʲ}jԿj }?DEz)=tO[}NhwIy !,`9UNi1!s~*XHNpMXO w:Yzb䋎bF7:̥EF%L;\>G'{@aBZeM95} R-~A sM2t)GX[0ex=b6O Ĺ2-J:в곎js=r\q~ٛc om5Oc~ ?G nW| ո|`o侇 j@N>[Ļ,/mԤo;x yڣk.?8Jge or P_X$@agp OpYS*G9JP]x,ǙDSE-4EGf J`{о6>ڙX(Iƭ(I#) e ʕQ\?!B˃,qгvaYwx0f4Ue`N{,b] }hؿGk͟ND*Wa=H>K&6@a1ޗrf I?~ΜE7SN~xq%S- inb=8ȷ{JCѸDk$l:<7;+LO3o>sH gO}ܩۄѓ#=4Z*0ۙkT(csD/mp5IL1d>qgi!4ݮy䑐RGi}ɫ}|M$w٦K* 2q,C΃#|6Z&.k4GE~݊Lې:*a0,ypf''ȟ5c4- oA{|bdF3Wk<#w(sb!}=`sFcc-! ?.2H%:wwg-a~؟]52]3|KL[Q:.f;˼^֯7^_T_%C-G K뷛"xH} (”> $Y/|fz˄7Xh"QTLIEy𖖥\GGsEvX GPBj'&XG;;)A8iv=ݔH1gA 1M6_U+I;U3tO ŸL򇇘?} w$pvÏ|->/?G 3JQo_ϯ4,C\^z _Wa]ą`wٴ&4A0E_e·hBҋErHwAh22) fBh&c1ϽVti۾rQh2G>@?t +ʏgˎ I=s}̕':]5޻ gbؔ5.ړ{XԤϜxLj{:ZW V#`庅i?1 ;l4c PK4OtVN(a*Wfi5_kNTk cu6FNYo*y+G[IM$$ 4"G co_%"}B4K הdHSӮhҢɧƍ+)lgI҇O˔kpYQeT'*W D S}5Ƀ3F#BA;!Ίt*hY0ga;mٓ ezR X2$J$tT?l0/zTNn!ĖHߋv: Gg;-qؖ,y^c>ka*j+< ,_ JPY!DO\܅Q;=يaYzү򌼭0庆=q@%I7yĴP4l9h}ąiMSjj;]&&_3G6ũI4㶄@\pZ.IZxQ$Gm5x.'ϣ:-HaB}|S@"!4"_ J(X84B)+@x?΂ or!ph}bqS/.T2B"P+_‹q)0Y)ėk2LX8{D,/Ę Š@ ~Mk' prF{f =rz9p9BhԚt8wF܊S3 ;=`@=hy]8J> zfl ^aUV=Z2g`pJ\'-*Waʭ}O㮗ѰnFWvY:ҳ^|hW{Y 6G^P3Η0אSϩ8m4sq.9ȫ.O&t )4\ywt>86ײeʔ!h<?\LL\d@g킾Dzr/߶w s)|ꊾw|kzvxQ9]EK˲mWyc~g 6χNd$-Dπ^ . 8jT%tP8S2% yAbϤpvE_8ob(* aU ]ˇq_פ u,\|h :Ib F܀W|@rI{a:U~t (^G`B@,I2 {+d onϊx4~!]#WSN•79J;oEFBE&A yzO܍叶^ـ O22蛜4߀K6 ;|VTo*j'/'{8x1<[1=}XIs|(uN#Xە|0늛dE;n5N=GWU5%3WGdY.zI'8+uhWq#RQCy+Lt}C^ bXh\X+Bgq& d5 .v:^>xT m*m|ՍIU;XMJorԌz^.9Ã]q<+iL{$5-]ڭD4{:k>k:ao>.;KV[8gIzKtԵzNej/DۘO|Gzᆫ9++lWTT/( 7qxNGU {nG3! JapN37p"֝NjXSc1y`L3,1,L/a_XPyx3 `"lz+&ˑt̢-zxOkhb~0 F]>1,1"mem; υa rg'%i;9x=?L0痂R'9OJUyGw~/r:=!w>_; c1c3ʼ`d& 4Ƿs##01ǎ-M7\psŚ^(5|ÄAIXո.5)!2 +RPTs5mNvj'4\=G]ʢ#̊kmKvXeOKо`)bMq:u=#Wa:X-/D; Hre<+|?o%tԘPXz?PRfMg}Kuc.p[Fn 0>d~t <}FGuRx. (n6\]@1|X(8}#RbOtԢPxIm2us4iDvqT:r,a~R~æɳ0o!(&&c[w!a[wX]=Cu\O֍ܦ]"~O̶u$}sMۆgۉwwgIjKYT&ۉ}tT>~E*OcpҀ!n}Cœ horV]h`Ew;OQc+V0v\ʹ&-Afr(DN-;l)_piB,CeEEg];?,M`˔&Yqu[*FA;Fdjp~%cEU0ѝ:s8{<:I^ S_Eo=b@ {]9]_ fK6XWCn$mu\E{u,gjN|xN?& a8{1 ~ 1E:~U@T5mNi'2w5t껜{7w\'X]ɩ`{mX;=Q6d˭ cslZaX Sjru+"!efcv-LkOrm6zAKWQٰ`Hvyvw ᨒ}@[ ~owRШa`#u̶}2,]ӷEmii sS| _UGig^FV1oaKjt;?|ہSYd~w`|W'?@Sb=H( Ň^w2Ofh#3NQOS#t~"Pv"{F%u|%nSQ¦e[sΰPW0:y@=C qZeK*ro\J!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!P`SIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-flowns2.svg0000644000175000017500000013356700000000000030505 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:08:42 +0000Canvas 1Layer 1Network NodeOpen vSwitch - Self-service NetworksNetwork Traffic Flow - North/South Scenario 2Provider network 1VLAN 101, 203.0.113.0/24Compute NodeInstanceLinux Bridgeqbr(23)(21)(22)VNI 101Provider networkAggregate OVS Tunnel Bridgebr-tunOVS Integration Bridgebr-int(20)(19)(16)OVS Integration Bridgebr-intOVS Tunnel Bridgebr-tun OVS Provider Bridgebr-providerRouter Namespaceqrouter(15)(4)(3)(6)(13)(11)(12)(8)(7)(9)(10)(5)(2)(14)(18)(17)(1)Self-service networkVNI 101, 192.168.1.0/24Overlay network10.0.1.0/24VNI 101VLAN 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-overview.graffle0000644000175000017500000001441400000000000031555 0ustar00coreycorey00000000000000]kSH<zuA2$$a3yw-6bdOKȒll#`u~ss^ػA;I7㟶˿~?wG;=e{=8ۏ/Yv͍V~'p ;irٷ86tY%\8ݨ~ë׻,oa x㾅Q? {a:׎:A8SW,G~ w'&W'48;CjgؤԚ*3ϔ̔,j/W]߅]Gqѱm'-O)ᆴJ)0Rly0+ egq'ۋAGIMnhYyfGٷ8=NqN|-.&7Sv`ǣ4mK1$X``)zd9[{$E7ziyˠO ~a忌nYXFئt9QH/~jwN, /= Ҡi?\N׮ k~΅7o{?Eqx9uPmOsu$pb4,VY9#(<Z%B-(kTkK2K9RPkB $eӋiuMyp6&},<`.MMN]xJmXs.Qliw+{߰5z<.VɻROHCYy+7"RmPϢ87AjQ49-d#K2ki,!MzicOK-_Nzi08ͼx[Λؿ2˪@ n$Qz@ )=3=znt5zƼ).9f飓5rfgrӹPN:I' !AD P!;OY?yŒx5`whqo:$ Lʓ^}i|g~{ tSʠevO?"/|(>! yl!IzE&JCw(U'wY;Y$/^'<=9PtΘ!̂n'/@h턃d;p(>L``íH>J>|ɇS.—$n`b9!êZS|||!w{".<3 w@.,bf0ξ2ta㩧/L6vַV@0M8=َ4LtR|޸@XZ.@T'hHZ]Лi_JKZ(+ wݎUs,{΃6SA (8`++R, 0R"nlv4i=^6,qpl:FDrw%Vh:T( "sXQL*,q@Ҙ+)Ew@n -l@oL5k4>[MMF2,.PHp= (ЯbMv{a?%OF09FmBVNOFY %i8e>dA&UdSdOlS٪dIlIVVVKց`| ::1ZX=UqjF[ntLUwlyuC2Uۜbn8%UH@kl5CAigo0HA} $ Uާ݅{Y+D V i*[h!!G֭Һ[lF%ZR> F&B n]8 cսIhY3 ,$D RvHlc)F1c3jXk@;W~xF^%Kc!+TPvن \)1 )1aaa`8@2 ms.pLAGZK`2/M Rg)"(EH!QV83Vm:.pz)-$Mw;y ث{9q6;@ <, NRX}5 Trgۃ0UEYӾb鷵 QjrJ ($Kp I93 ~'Jk (QK$INO<=et5Y%7ao۔=7²Yh(ܭ@*a* <X\pWA쀲Z4Ćw*vAUxQB ISy/E8JU`OWؐ=@AxnŽ ^U{iAVTeAX!.gj-%`ʘ%\X .b kc8pjtJ Hj>W5*\2WF8h-cW5ea=*X;] (<8BCdL, RP*eHY\lj-$璺SЈ̡ pwq)|~ hZ/#5 b|dNՙ;WT2ɔk[WDDƓdUTev[%'akla`RpeXƀނ@e0rN'X|?UPr9&]qa+*ȈTQUdz9WFۜGwgpJQrnq0jKŵnU1vQ!4‹nӛ*@oB}3 "#zsZ>1"+$kjeɐr@p^iڗ4GKaR~ig3(b-ۃi^- ^`VJ1+v-*(zjVQR> ͘K)LA j\ayIT)Q^"e4nyu CCCvaaVTtcn z "4"4>UhMhlioAVDVDVDV\Vt]DY|-aq-q;53CWX"c!cmv!\xDQ6iU}#:Ps/6DjKBىTtImɥ%g^R[)dKAAAY{~&6.1;_$.Ї&W܅^KAnbZ֫]HnHnXJ׎*o[^mqJս(ULkTPBAj-hgN&fZ K9"2"M 9&xb2mr("0>{`9.7@ zcbOEzr"R!{,ٌGrbxD rFta\ָ#7sЏ0on)+k346 M.Dr&`q%@eP]X l^sAxnVD8Ȍ f pr @cz#l8PT 1111qeL+ոQD|8,z-E"7mec1Pr^>a9/:{w隣 C/4 XOBoz0p  3mD{3Jx3Cs:ʖSJa @pk8> 5>RYX]V۽I%SFE#rj{hђ-L>%CR` `!犌rJ`RRJaR5H*!5X$mjE#$**jG+7 i]ƬƠV'mg& ӫ&qǤ>r v60 GT [ 3QNlw%nf#lk0M.0ZmU!BSTHTDj0P0@ FM<@g,ff1JP$B?YŔ 3W*I/a}֫PIkhqr]@۸((TQf//ow~6?<³.PeAJ@GA1t5]2F"Z)<# ( ~D^ )̄Nϩp\駱<C20~nԾ949{A:L -E pRt!vzI/섁 K?*& 5 2 1 2@IDATx|EgRiJE@@ "vbW,$'yoGHxU]E)DBԻl\.]r\g>dvg|wvgyFL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L $s`L ewg+r: us}\`~q)Y)KʻnfyE4t_Cv0rHwy8d[NKZ1@Xq@$H?Bs/llbm\?4)?ieIgi}ǺkJH]Zl϶OP*#E+և+B!S5 y<|Mw\[cE~NkB%\ RJR*%ߙ} lDn]֦!Cqg9RgEJgdtJXW ;Nn=R)6/c2q9@U ^Ur|]ە͙~?Iʙڣ/rɎVM{VZ-Xg6gvLVݓBE葸*!%GM=-y^RvVJ$ lDŽ:HM/Č"18?7 `庽 )9:T?c':* Ԟ4a}_pzΜAږayBt7=;۹KS~8ʋǙX`}zmהP@!ஒJmUB;d?@VoR |n``Μ3 В~`V)AIu TQ\  BBJzzGR'qt9BnW!U+p Qn ӥF^Rjڄcx[/7sO!^(@v$ɑUO%ןCŦB}}j CpTCDebJ{O "l#g9\|CC 'mӗihS3u(4}OJLGTS9-. ssuSKM#S|MCb]GLA9`$yER0=)OsupՉ)Wf:ޯnz^__@@"`\.&@o))^ִϤf=BId'N+ ٺ8Γ xe腢yVyhO3)9ןM瞷O`KpNH}h=5dOLƥ{z%1_ᛎ2QHT[ׄmߑcs-KR͝;RSL{j̺#wMnjߕ>kU-ÎK11=Uc$ 149yuO飽>OB]AWiRq/ eHuհo9:|aȒB VҒ8'-;9=Yc0u횆CShsGD3MSgOK̗11ct$cҭhk~ҡENIW( QLW> @\~L(V/A?*Ōkn;BjWDe̕sWq9n<u-5 K&: )ԗWiQx.ЬYYiS~@M` xM&Ͽ;3`;˙/tEPG=V պp:`h1pI::\#ܢ1}O!|>q6+zLu1)iC̸ q$쿍A N!Rf[[JqP=tBe궖`uo$z'ҁ^ ]XJgMP_#bm{{'0YZU%9.σ| Cc tp,w!\{ xn@2~[<1%630n=Q~~^4=st%|݊L* =*!nGORi,|]ӳؔݺ{>.`iU)gڰO=UN>Yqqr`yky}M 6ۙ9x)mt}I;9||thȳMxѦxt,F/ӥ(h9+ sA̾BE+=:Ndz{׸yE bs'g[GH84渼Ǡxwcj7yj&3㼘@0t]G ?N֬ EtZ *i6tJ _O7\[p@ .s]xnyxn?sxptjux+^hIea]OËE'͢]g&qC'ym=?z&h^퍷Ώ̏(y!&Pjj'CBSÛ{x`0(ZT<\v#Cx8SszNZJ*:1xt[vTW~ Mq Tk 6B66R)1JR`iŁ!zZvooVEUt+M ?@Ojhh+'^6K@Is4JVqW'h\q5dcYyqo7i?J 6גׄz#SZs`i▗YuWyVR2'/w3Ppu HkgfZ7_^\^?nOթWcLK^2磾&ao>fZRL>ex&>6睾J݃o9 s7ADZ HO|.v [eVL]=%: ;zɃFݥɳEx>b>Oe _H B/Ιp-0e8cmzRηgFz WB:΁ D 6A;(C@ }sG]&qf 6ea-b7%ecő (vtXY5zLe`aj쏜^Tԁ+j4aЪ< ۵b^F>^KIos};3) 5u;e t1Nh}_TBѾ*/͑Mvjڼ{^F]cY$}+(Q#+W^:cbsS(䛿ߤ[Wϡ_yBs W%s" 6r&ɧ}Iq'S6ndוXG[n{J3Ԭ b%BAn14i&t#0PKWCgRp zt>؀As)=ū$֯껾ܲBy%I 2؊HR4YwgkCѾ*+Fsےċʍx_Y|ۭ ?atwZ({ AfOCuۻj0)u2l9Y =Y m̀vcǔ pBOiyFy@?P~r77&r̸m֐NSgw GG>c1$8^oh[&~g90ok1Y f.a9yIeh%[%[tCsXĶWubPTy\f5BA_Mv1(͈E Mő_*VmOGN $`6$Nv,F}~4C. b#e%l 9?[b-<:_@4}dUP`|n>AtX@OM۾uzy}%+2P@V&9:u)GB"&o@x{)Pg#$=:@c2E.7^tDcnͽ%>_vP>3;p5QBۧ՚Oכ%+y{ q_fVʽi>vlNԟ~b;oE'[E?{2y o@M` xMPA+ЖO,o0lH曠6B[箔Rjh3 hR/jX?7-H˼6< 4Vvɤ*f L^ vqoen㝦~MW3ŭԽGV&+- t4Zp8'en{=NGf&8i#1Pt% 0x7?D|oױr΢}hc?fٓW!C=`BCMcZТiTŧ<ȇo\Z뻟1^kl+ݥ/$т&ɗ7֨$KCu{/OR=%*?Tu.%MDԤ '$s~=O'E!;iӘHs h_3MzQ1\PI`^ҖN.Ʌ3a vOB2-7]J$:aEnVdnIDh(w:\S}o{L%.Lֹ/x3 r0x`UFG:æ❈O>yk(^Ją iia!, 3MPY8 6v-׆`{el=}'L sxGܷS\?|/% "i(gZ';](jˮ3[hra>?ˑ:6qL 0Jow@$ J Blr,KMVJR.ćϭV&anR$0fI3&1&*"0g wom4)g6r!hֳ><3&@&`m3&'P`i 7}dT,^CAcTrkfz8i8R/s 8=y bEj)Ғ{_4~&txHޅ$M8۞T|{F/FZɳIJW礥 &4iY^Ke92&k%0&$@^#=, Z0j81ו,SC;|G;kʂmlAqnJd7ΛR|1Suྰ+̿O9AcZq|s[A-Ѕ~35+1ӓX)_.?3>hXp=ל 0jȚ' {pǷ vFg%_Y}6{;r _\j1rC)u]CyJ|ӪwY5&oMSabBC|rҤ|3mZE<[&=`LgdĂ3;Xqsε,X a=zCtj3ocL 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&YJ߭G^Tiȗ`L 8JCRmjE̛6uEp)O{%gL T@m}abw JtRBɝ!2&Ao綵RB+VisX/Ihr~*49&@}&/dp Hf'0@}![&J<EsKkz0&j@>-ŧLHg[Q,|*"\Fߜ> YAibL 05D'o'ƄK|gր2&P Nc<=:T#2&"@8ƾjo_j-\&&n SaI`D` @Ǿj xϷ|!#`X E7::pL 0'ʱ8pI쐟cL Dyxd [N 0&Pr쫶iK^d4?'xTݸO\J&@gn-|;L -x鹧?SL 0:A Tc_pn b5.$`!#g{z#>p? B5NB2&`L 0H Pm x$T"0'bsk4f'FZRɎ̴lr::%m&86*iօkF.>>ZedL tc_::ƪ42/Df TLCć#<7 JB7TR?V[y]ivCX tr'>kc1^, åYi)g/yӝ򜚕xh4JջirL D4kNǎ|,Ӓyy"/˜jD%6J/f5@MPB9!5C5ҤbiR,QV&rloѡuScz5L D.HkNbJٮp䩻{,>isL` x %*]S-ɓ?"=Y :$7U]Jc?|| 0&!q+:d#[8z ADWkBv~CII+4/t:ʼn4J ogM9۝BnմճShY(@'gSWZ2+-iy}ee2nmVkj;'B3Q"}C2x8vIvm1&"@rk8*xF 5G.o=BS΂ AD*jLx>uI rLu eD;,0{ƌ7>ӹ)>og`>ʭsS%yjR"{{(HNaoԕLGYNH_VZqT鋱ˉlQSw<;nqƱACVj}UX`0\JÔPG2ɏ3}.3` J|s-}d'ʹ{qjEjS=LT|)[ KawDܩ l⳻SҺR|ƵR=S߲Νx H,FbK̴nKF:ap˕ǹt}aCCKF+SI?;&'c>RP0K1oE܌cQ~u{D#1q-%:i  Ggy5X_X 5җ,eW} IH3yۅ#߈A_ecZGm;W8R4A'N30g w<q^^|eQ}ߑ+y 0H&P}9΅9ûЗyBmw=v vlQ{ c==>yW wg&%ENSB3;-٣)*_Fsbww]jisě/.#[,o.Q _ZǞN0e|5ꝙѩCIh/&z@WM56//$;L sE{{`騫m\6ۑoW~8yۣxJx ߞ<<-7/ۈAbџR-S~HmWi݅>8V,̏'16,$ANO94@Gh_>b5#H`u@  LM 3Be~q\Žf^·qfC; [Aes3YJ2D{FҴ1nryQyel_n6e'&N,  &)"Nv990Ͻҏ{L tX^M9f˂U%UwbdVaR4 :A_W<Ⲡڀ.#P_Zd7.;@bez&h,csTRU!Miª-mO6&+&,З_[_yj0dniV'HKyw|δ/a~iPOXFCy/ueC%S].z-H`둕No&Й&`> 5,5`A(6t #G1c!%s )? pL ;#!!(9̞NC*eAhy_0.X`gM8`[gU::~hM:O 9G`jqsSa)& 'zA;4pGf(HЇxTܟ >Fi7џGY&@QR(\:l+Ĵ_5>4خ{v~ =)qb"?hUdz{rPg078t7.րW ocIz5[O9$7Bp7L0Ý͢3/U/nAםtyt oUt ݗ_v\>fszCM7eidK7q2J% a0ݷ 3=۵ #C/~3S65+/2cEM|K?N23+mխ_@Z\,բgK-Noz G ^4CM僈wo?G8 }>NUO9P@qކqUz-tSU0BG }kԄu>8L8Z'z [bŋ]\J{b,'o@<$.Dx[n"K!  AK('O΍`_|9ݺ!khwUDOF}ڈy*5e uYL9@G A?]&UN\ctJMk_{[g:Z4(rDNw|~J4hp80&P DB_^7) ZƭOz"Ŋ{^^aIC߼[|ӷZޢHW\uy יTp+_C~IK~ ]<_W.UkE^y l`^ѹ_9na^iƣmƸ5{]{8kVy&3}&PEG^?lKB[ ,ul-lheD$ۙ~=tO&{SdI,sε,X+ N!w {@<ѹܣ59v|oCS=('N|"&'x24tt]=9680@0%qAuFOHIýEȔtkN98{{m C+I/y ?iz59eˏ9":"Z{V/G6a|chlק[;<ПO{# N ?!Y&?T[x?QsŢ/6D+[5k4!څ6ky48n.m{r 78_|cqĈޖư턫}ij*(a.+ ۧ<4jzTJq?U5 f;{ _BF퐡~BT\s-ǻuݵOЛ /8]ӕh!h|mwʰ\#,u qsM=_OU0?B=H6WَXHHm,A- ~1q1HKQ-{pUa2GXD6496!HWSَлW_HymW@jp\X|"e2Vy1ǖî4ĭM-xDʸ5> Y22GGH*َWHgڢΧEvwmmw,m!,):i!|h8~Xq85ѽckqgVG717+Ċ5cUU| Iut ğ5#[R9]b허BmF?]wqDߞbW?_B8^%lEhcvB V+Ÿ 57;Z3/>Jf̱'zs&WXIJMO,=_^ 7>clkln#zui+cۈo4O2IYbɽSMn[;,%fܬiجPv58bmmܠϖ9Sw;,{D@V9+O.րDܪ_X k*S>РԉgaIN4\.^B}"KUpclZNZ"**U x4F -5-j,`OHdN8([2/WKjUhøhzjjE":I?WS"r ?Dŏ7g?Vѷ{Inݹ_8ԻXtr֝޺;:Z\>dѧ[{<=!+B4m/ 0-kŏGLCu*,c[I8I%gQBP,*NNqÅ+ [?]Ps H\:ƈCyD/WS6= 1?zِ򠿐['sӏ?F4i a a1e%M4IpۈZ-JWQJ0JBfT1Z/X}+@q_S}C @؞mhlkpְmϾ;?p[ ݓn:|4mƇ!:X~N?c.maK\Ith}q?a1֧!pj$j8HDR^r։.yE fr%3p^荆x{N[M ˽=lw|[/UKVftkgLZ-ٷBxyPk0/zh6?_$u4 qbkN4;ЉeW >Q Mn/,uweּ%t6&tVeh1hN `t'?љ\+^@:i.\=L|qk O<;xj_QPj GO_S T`ay0czK`R}hHނ@‚w )):> cDv-UCc'n3 hP?e넔h+[DϿ[v5M\-@S߯sr iclW)tUQ@.R*qSk]7[# [7&b6oo+V3ݵ/Gޟ#:|2*u2B~kLỄw $P[6@o2U9x(O1b5l-PL)u5_mĨNtC0 ظpL̨ Et|7Ch֡߉)Jg!ԷG9lXEt͈o~9)r:!\gctj6o@i| C>|~lg$;ʳ~-*/,ixy`FV}7Qqemnka}I(cKC.D#LB̿}dpf]$87WzQLm.;43T4Rcph(/7X*NWgl(Ho-;$n TTRҊ҄;=CZL $HY-q&d.mjjk!nCC c1a $\Tmn鞛i[S3lL%A7PstxqrѢDV4@6ˉE6}ma;߰e.v咁fh3[.Gk-F61W#ڶ8 Ѽ܎r8wgCpIagCYѩ+(2::lk}aOlô3앯 L5-g@ -v8y%ɕ[_P7_8}1*VZUzks N?; cگ7[VӭK7tܪaNr214 :(flj/ 2G@`k5Dy]i&Mᢁ} ފ Co$pxSC ݻK;/\:C:BxMwlU-q2kz 61Q;ry4{>9Bnm SJk/ku!D q"9 m9 [N??EyO3CKbN47+d\~]4L!ŭ 2h SO&\0ɁO GR7-V@їd3iz p! ~7J|_/*0q/1'tXrMwo>)/eHҒ}9Vv,*ˋ3K t϶7ecC;cͿa26Gf%M$q)ȋ}=8=τmMXyw\/K >F i}o_d5!So|U%7^ 5tp? -%s }㟂f pSUKeLE:DKg}Q'WP4OWMnI$J_;N‰v2PMӦʮ>OyuY' pYVw!(gxSn 5~s!M^5VooK*CùC# 4wP9+6L?~ o 4ИFs.pĐ?{vn ɔ7B}tw-VP؀. ^, Ӣ~X5ui;0fr"-?Mr<1OBm?jyX {cH^zP_ 5 Бpz{!Yv~[~UFeylsck|2'<H4$d9&P Jv@NHcAf$4z&@wÕ\_բY1NW G$K@ŀƖīs Ǎ;)X2^?C ջ/xaMc5y >eY aO4 frDZ3\{iŦBl\\ ҟw 7Gul \r\7x]fF1\\K }QYg'2⭄ o~Sd@"ᬰ)2V,!_ O:d&5O;n&9C(aP$ Oz|!>͛zܥ6zuסWw$N=1it$9 &_'VgϺ+ [m_1×8vMAI8EAv ;mDxDcL 4TV7+ tcf5f XRv^{9hfpea+^6 ҟqey&@ȭPn>V5QO׫3N@?X}= @! K7^<<}|g Ps!aL D2z+t4r 46:V!ܹ`*ZN 8ŧ ~}-3q On<N8@·MwO5CEyPzu2hM {DN`L"Z')@X Zކp-\h}Xr\aNӟVn Eo,'K|%ԕ 0p5/m& RU}횥-`umցLS)8{ fEC?9.`L(baX}7]s7޿i {li iKGaşB/3/ɷ\(AsEx󠕧߄.HcN$@vg7/c)ZېČIY n,>gk.>l* p`L 0&"@ /[N'4$Z iX,FF4Y![}n) ;8xhB>˱^6$qrXL~,J<_I@ iDCk5>p-(r*˕ ^[*Bݍ*x SCd6M C8_ف;pS@MQNu*D.*u{ׁk}s PhU)В+lFU7SH#MKlIh; ĬH ڵhfMgN/MjxŴ9Stg\UTXx,׶ױܥ#2eFL o!+\nwBw0=ۣ bp UzfEB[v៘O@0ieɖ$IkѦES_GJ[:<)o&泣y E?RՃD~T]U`L x$*1}wޟӎVNڻL$! \}ᾋbн!L=#ൄ>0qK!%^7gvqbCftv!|o2W/^7^>_R"a C x JwsVoX_QR:Cl eo'W9Y@̅HMQvq  '?1\-e8ا|Es]l_@(YI4[e1Xf2&ۼfNٵB1CyK}<~ntzNvA<_MOu$Q"H鹘L 0Dnvuޖj)Ǯ@B! W__yI~BmZE-lۯ'||Üw';AYnExNܰդffKVo*7oX;iy։-i߀=> 'bl m.DW>mj-}}Ŭw1As}BwIxO,pH3N3Jym|RsY,WW {Hf?E>L|yבZp1`4wZ'SpS>)M<+|mRJ6yf{&sV.q#~]|~{rlK*+d:@HhLI?CH -RB1Ÿ.,ɖdEluteӜO$ܝVy~۷o+ذ\rx83Q8g_jT U?u aʭGtx_#Zo ͙"ո#G]uIuU[?Ѣ/_ZO5- -Z?U3 fFH\Z<|gwly|heFlfZZ [Ii)g"@Ĝ[ep>b].:w!]5iwʕB^L Y)HW>\B/=g?eX#<8faQw~UGVycrz+- +nS' )(*%~T㱖z zKjH# ք\P;,LPxo\64{¬n`QCmo6G /pn1ꩺ)^~qdJ. Tw!൹ ;]'n.{jڐEv!JYw`?ѽ?ymG$0 ظ6_,uxMnSՂ7@џпڴ_+=U=Tn+1mqgT(.HaaTgϟ<8]v/Iǫ5JO1 O=uhOϝ3iU" @#`4ϼx!$q OV G\|9ѷ1zuyУH&s Q0ao]˝7f06G|XY0 =HsԚ"N!J"V@J(_vW8~ztUVz}Q]O|QƵo_z#SUZ S\S4Ɠ买\U_)~XM|!'_: Һ 8, (QxL!Pn@_C$c{~Ш +dUq;~ve#]K?5 "G5hڄ4熄,ѭnT k.DZQ *s$O\;7B :uܣxA .}J !b9,LyxYmoɯsJ?xUp͙`v¹)f^5T亱#3iZ!C:(Takx?Rc TPڞ b&k"Lām*(:yUt=leIL\ףG7f<v"űD!h<:sp,O{hY$CzI'|ʥSsT%Eൔh=ԖiHcp3h bd=cpfO|K_q@&|zBchYĬib~jMޔ7 AP"-B?|?(x' NW.6"m[ ,Jp;uHq*DKA1%ۓxK-Xk!<¹[ڶYV(3iѥ#f3Fx;% o0~2GzuΑ@ :\.>l*Z!=zp8i]"1|In3.J֧fѕc|0-JK$ᰞpcVoI7ѣ3۵uC3?jȜ^vd٫S 'Sh'Rq' ˘$㿔(Z~vZOD;Q.vl:Ju1 +JK"*K E۵:O !nY#~;!߼<I@*wQ3BN?1\-xV(~8z 4}B DNpS\ ? Slk.*Oa>ٳg;âO9^ײU( O𥔓[?1ABE#"}H8-Um3GJz+(1hHXA_s S=6:BS3hu>Ϋs|,]wJTld<9Z^.2ZB"z   ]^b}'oKNEjB@!P( [v&%e"?4z@7z+󷥿}rOw`MHyPSaԣS[w cFP/.6voBtJ8cf9O63dQ?jؓ<4uhV?LIi( Bt&naTasTP޶J+[7lEwmRRVN-JҙjL?lNAb9b(Wϩ׶ G[w g7R( ONh &K䡛/`Jep'1Mt>e47d~4U}t3sR&G;w9ޱqw/3Bٞ°.mkzf_mg#?SSN*Q=`s]ASHf?"㩏jO~lJ!d?w\`= =S3MU ^3д Ss |)M$En<4ztR[sަ';$h 8aitg K8)B)s\ _?RA|,FS\LuMnlPٯ$p3įF^-t*%] +~Z5*zJwO~jF!D OiZ~a8`L:MG[VG:v+Gg?.j2o^2;BaPݎ97ԿT IvsR׿~8/pu*$~Gt _gJO<<=o23a'V(u `Teg6%?\7QYE금mnR|ίw'nrRn'vRPXLd(ə4M/BVMCXЛz~TmP]6:=>aOF@` zf{ǿD.LZi}Qсlx ۬,1CqӧlPR ;+_uxȞ+ݻGu]Q=֧fQ!gaHqg\>mYIK) Kc)c\>u*K!UGDoM@RM1fPu)byl`D] lm_Kg~?΃YYw ,TӦ<ͮ9@̟mZ-M?e w ޞuTD;Q.-ۭK2LfT~Cyiw?SM@z*3(k+4?;D.D 9h8GV#mv` -BxIui惏aK]TV/\ZK^?@6?g΄5mpVxi*V֨R W|ePVF_\h?i픞j4UAYVZ,:&)5G Y(2t+8')3}|+muHNڝnh6g䐴'֫FcpWA`Muo#Qǁ K4npȄ}u(3I8l=},z(JߗKXl[)x}Оt _tA~ `Hu5gu]jqG>s2GװJ7vi\9E<&Oq4<9p(xH%0'f#mWH7;,mP;lIX5I&ăzv dSUqnK$[Clxo*fSgD{b޿Gj&R|\9&|b#QHP]:p `=L-R[8vѴ_ ޝ%IAl87zo]悂3-ݐIvO}C_qF6egtx1rLw|3.:R+b~CpHpR6 K86{"O`OWܿDnhdX[A{x+AA$Af*gt}D~q3 a6oٚ.̬a$.#0NbzŵcggGN粤/wMKWm?B7^1ɸZJ=yN,zDEGgs8?Ov'UPvc/_M ;{U\'5ג3"LZGN/5 y!vr_mRx\9_bc"'Lq!)Ee_^wk$:-931/g߶9!x =jh%A'AgjGjg/) 8{jZ L|*h&pDq{ҷl"\~`D%6璘؏srP%Qb,Ouqi$R}{6dr+"l NWZбr 5 `ֈL"8™Jr/䟋AWnQ!/AZ%8;V:>&XƄ/ZVM;XPo~ѝ _TBs*H|tD8ߍ H@IDATtX>BNtpvQ,_|$ϊPpط*yTڍY|cҋ%iֹ 4Crr>dQ t^H3Y6`en((6""tk\dh0a tAlYne M=6"'?7\wFf4QGv~0f?1 h<V҈~ ͥ>L39},Gt wبO:bA7Q ktO _CkwwO޷{.Bgy'|kkCt8[Ԟ |Tށ^73[(]`&|x@RvЈݪoDy+$Z&ʐz:j:a%|wlK}2 ݕDfx=hh߮Ҳrd&<~œeo~Qht 򀇛?E~\\H8>FnG'_tF!OHF2FMI u=]=w)lu<(U|'+/VXn I" k|ܺ;rlյC[z">9 $2-%Y~и}{seCE9.\OY.>~P Xu _F23oN̽*?}N{(h'px1JS' gL^~ S"x+~Wc))ӟ8#afM*! > ŀD$:-6e'/7ϵyC_A&01gnIS5 E8 k߸0SRoDO.@X?*sˇHBّIfDMcI 8C7^*|"cp 1c7aQHv\`{/VW!Sw^%81ҷk/wk_2?7]/v硛'E{#4z@9}ה$Hdax՗+r9q_󤰪c{]c=OgF קq7Դr_R=.FBbT .@X/j%:@T0Wxw]bRUi & ȣ綔w=CW!w>D`hMou-lݵA)o%: \@`4k'KoU#=ŸڰGn~ ּ ?}x`$mr%p1igg.?E34? 'nu'RA_`8y.P4gë 8Po;Q6*ɥjCgρ*T+B@ZcA Apؽݹgh!~dMTx|NuncK gy]ץ%okUVά)\B0rdϋLNg-x?;'/)l}t շ[G&ˮLP[w PaKw!gsp>P>Ъm{xrOFPWl=iF#}܁ѣ;4=ay ۸3?5u2NpDŸԏ/X)Q&}&Ay 樞rȖ( oSퟻbQ˄2h/N_i$ -:9žT!rCrܜ ^]?;6gd8WІZ.j-46sDxJїx`cU.G"|fQ>Buq^ngG] Wdr:3RsL N?8IpT>J ?>L%ѯ%N;PO8#[Fɣ >O_=ǚk?蓷3ޥ W]6q,b83^ښy_H4j@_oDTF@xq ŜheV U?h qv ;Rj>+XAw:GQG`b\oM.(׮3gB`sy[DnvgYׯCB)r5# +°7={hlspġ4;H}#D דPv1AmWidV*1X36+ oi'狣SRvBbw;P!` -iT_/א2% 9FFDE4(:.u=vZ /Uf @HEYy*a<7Spp xpxBwg:uf\>+>T n'`qP0;F% Kt~>Mg ׈F#X72|kO-BUT/)gR==rpb-|h6֯wV%-$ 旯O,|qSOi&RD6 ~ (@  ϗmO(r9a>tx?5miyq6goSLd8-9+=]0/GAGb{fe@@#G'hWV!t0"H&?2"B$ح=Y8ޓk$pt!Aqe=6GoN Ήih|0**""#2p0\J(*+lccƤn+ బκk*oS]6e7-Qg,֒\;F?b@Aԇ4!^+NU'Gx3ܦ "P_xDMc8nynb$O9 Y_9@ꑐLd>x\TR`ˍv9]h8zqr^pffz⡱tۜ5C?H&RVqO>y@.Qs@SHQ.Vgb6щ@%?4L'{-ﰶ}Vm[İCmW$?`9q䏜 fîL5cgIwo]M ݓo可3UnR B t|s(=v;YEl- ^V00^ڢ9;I߯SF95Kqb1)<"B}m pQ yS{_PTV96æ{smSƙj wY{9S&H&pA)`Z8/&ׁkDaKi.ም}xJ\?oΙE)!咡it#,kg^}˖G z/dM]'NJ!4%$ :V/2C쇜՝/| 'OέV u6@Jxg_[wWGFg_5|5q¥5/1N@%s.;`x@lL?x3eeAN a o7 ׬ErAÃsB1%Rp-'-W좰}ػ]8.,Sdt~^h'_e% B@!h) zug V3+xO5Cpiƈ@$3rM~nQ91,-.;{Ju]}s@[$\WuoIvoP,MШğ}̃%xZzi@yCy*6u3MLNWP,bʦ0_x&տ;|(ڟ{[Dݚ{^PI399}`&]ĺu賯4Tu`5XWۦVGk;'ۜ ?ɽYAyA_9C[ ?X[9z0V$ZX+cHr-3EFѱظ#z*i v;,++,+.:w(kioxn@;LZHDNs߸̪qm0&Q"n_ohdWQ: 9~|\gsY_=t<& Ucϻu/qJu}qJXL_uٴlBX]+%CBpԼFawˌjlq zdA~\ܠ 搂s^A a2aznϬoޫ~66 c0JsؘKD]8wgOӱ OOF|j% G[7l=E/2i\G-(d@iOω}}<Pu-C䛝Z9Nᓄu/n7cvW?ZF6Y nDeq,5S Kt~wfN1naXX/MJ !!`3Y5oIStB@9f%8}Oz"[$ ^܄*6m7l"yP!?}>o{&C5]W 8GOϫ8"Fh}&|5p/ Hp (1Zc` p=u:M'mL{vӶ]9Mf}8xŨfi>pe%gTc{kF]wbu;zOXdd ˨cq8'%MC8sRxqJkG˾?MO#?xQmc̊Ω~dZWDݑxЁ;C~kBbbU,v:|wOeN'E0fJk9/*[|uUK9ז# 3[s/isop\pp;ymsQ;(Y0Y:_L6b8| ֑:(#MY?wW!6׉G nڛm28W/hirM׽6$LEȢWF{}xWpxs^?[j+^ŷ+c+0~fӃ_t*~ܷoTKNrv%fmEE((A8Jg$p9DzcC畝Lum{g *F"ob9~ M{4 _`זC?z@䙸BnC3IC(7?ޟ[ktOI9ið+C cRy bLYlY f{$=)P({ա2Y|eh st'.[%+~i9EJ=ϕש8U!@ $`Ks`9}u;}2O?݁O-|pt|ّ:G|L#uz_ẁ Mcdy!k)p0Ԗ"Xd{j>s|1[%&yK/&'Gƺȷ+=-RvncPF VU (ۤG4  !I$/A၀+PF?/_=q.Zz z_,17T{Uc=~N%`ݵ׋fWf7l6=1)cm bPtn>9Jzй+R"3CY -SG[){}.#fNpr ½ßb7KeҴ?II |z,@xb&q5r'{L3π*uB`D.uү=mh_p޵ca5pJ7>εpvၘ.>n %Kxv"dfe t vw_F!O,S SOACs O IHyeo;Nv<'~>s߬!uS#F\/ fw󓮛rr '55nL&75B{-Ip+Y2fn|ㄯ>/xegXro#Oxc^y"ݓ@f=C}lvUv \"@ H""¨sl(%X=w bW|b>B}"hpx1\ |3V(.O\WgoJOWO==G[?կW "QLjscUv 2<#Fd[U:6ߥ͞ 5h{Y6ImWoRF)p g}>zǝc&Տԫ>z`6rBkE/DKVmO7CA`XT9DzַNzRİL1vI|-.\ ZzFFDP9ǮFuAs5J'ҿ小 +랠@oD \ 8;ERO!o\~>h¹P]k7-[Dnd„!ߋ3X g""x*a1v8iYl*ҩR'DH&ёa|oet |aq _' A9ehj&yxWO|9jOEQ2nߗ=k铸?^z*Z驪z !Qƥ)))DrF'୤u!(sϺ~zwB@XG 89tZu[[MRԁ@Cmq- b42‡ Ɉʄ> rݗb~Fڻv!aPf!#=:nkLG ;eeeg۳gԃ\.K` -ro8)[]}رučn+8E$7H%1a:T\bҹFx팷h&,DZNnؘ2CWeŨU?q邜˯y}yûwӯ_Xdd\hXXL9AI-92kAYiY{3㺛~UwlwrSȑEw@eYl -W]r|0xMt9+ ňyZ$|Oi+,<YǸ֚7.((쑎 /[!; m,,F+%}aіtq#7sRmN;#$H:=X PY))Ix[Nx 97`EKD2^f|c-|oo|MpYK/^'+P~)^^n6~jk^&˭a.;#~NW#c)A+rSٺ\$Ï]7-e}SUOVԽl+D=_a`g}_sk0*ǙAiÚ 2`g K9r!:k+v8t I10ߌe/n%  8L ہc9jŗѣ;غէ`A*A(! nbenencnRC0ZM6JaQW~e8d^D?x!0'" 1|'F9KjeiOږuddv<ӡ+;ce1獽;fvmF&Ry湌%ȷ*&n"aaWIhl>6:´$< $ Nj _u =bj&`OY1g>%\4e&XF|0A͊ 1@I@qF+8\x|%^E@='sD?1#'ϓRˁ@%?F_ޠP%G ;}F-=Z>1x ?bW(a}QONp?_)){ofgtMs[NY:@!P(4 tm\'!4|Y.iOuލ3dtV+aijUٮZg+#Njԫb}ze5W( /ʮ˘BylҜ=O cj_hKu|\/R2i!`mǨv=9?Pש5*LWzHB;i7SSYMR(zvS( @JD'9Jg/аUb;?b=f'{nMZqj["Qt{@ 7hQ! 5ʒPS\ B@!U9,eBD7xº*5|-rM3=-JZ'MVw\[RHBSw[Is*\[Y!P(@W2֝E䛂'yJ5Iv%aRRcjJ0{i;e,%!f&+)8ʐiS|>S~rWVpƌװ3Lx.u԰ޠ.RuٟO]Qǿ147ҝʺYҴ{OW<{Ĉk!w v-{뜽Yy7cWYPD AԆ,f6ٓ INCǾIǁMs05s[;ӦjKJF&Ht:<}-,R*(^>?UPI0 pIԭS;A!o 8Jxd搐;mݛ\\wMyV$4RRlG=ML⶚;"+v8T^n,*(.c,iPԭB R:r,YiѺt 9WDwӟsinqFk`CY5ۧ[K*P!vZ:3mI]7 M3us5t ûpW!w:Wd }ۜ<#ۨAX,-c.Ym6g~a^Tj5~ ;̆7׽>g0;9s!R՜ $EnA$+{JlQMu~~&]O $62sҼS(wnhڄAԯ{V_Yy9xRWkZ~I}8ԫ3L-SL!NgII4Wn 5r mp?~h:eI6%~N#`Z6׶M_/unjCK\F9+[{:ue#}2Fjt}1K$w0#n2<zoIAa1-ZNhU<]In q-d oiX9~uVZ.ɟVrT~~d-Ft}SX zO5?=EpРЮ]ġNn#OoCC's]%]p/h^'btS8nRVd0ИѺ_n\$>ec}j SF ŒˇҹRZmW Lmsa|^nRi"t A}}*7q =ahm!ԩS mMk.N)>~u-ǽlE t1*2$aZ|ALBƹva.۳h'je~.m]++x^ XH2hj/8ǖ](Lc^Ä"4Do./Q))Άs}E}ByiwAON*(y.(O!A~jj$TK $+EHB/btm9^nʡ=M% ;nݥ BJaU ^ `3|l ci@Q!kK9"K\Qak;iZƇE_X(7MOQI}9Ehҩ׏f#k;kZ@'C7su^ipeO 3U"ͅ|-'7W$>Gd#tY.7e"|i'Ldq4<9p(^7Au5pV[|ss]qY, Qo/O JIPeO윬îC7suu^w1$M^6Bݣ sm{CV! 6{' lЃr.U\qygԊ-f2\=Ng6(|K>aw[79Nۃ'yG':*,T? &BZm }(~_i MD]\gaJ˭n\"nZ;-KF ?lmؐ.({SsV9!x ZN P9ҏ¤6V*8L@$$}ۚF{7Dy|Kcc"4spP! ist7u4K">Ex+%)Z 7wЪJfzuIdN)A6gبַ+Bջi߬M%A|8fԽc;vٚyUF QNն:ښCO5.@IDATp"_ >ps-_I]O3*迩*u&MCZ_5eQ C߻c^$}# VвM;i>t U*љ"z춫h`ϺkSw3}E҈Vgj8ԻtsםGj+9nޭܻynTi By3iQ6ϿM5 887"t8";O8+ xIKF۷h]-YNLB|u4jW>ZJYkDؐ/BC{w~~3ͼz4n 3E @ZJuN-9.2.tAdJx&޿4M3|CR9lٳ i E^ЈPJlϞ3T+me>˕D~M])\Pp^M2|CE0erpISWpKtna9Q1u׳yP@bb 묧Bgjz g[IH^n_+6ge#Ѥĩn~y>-ݟIe.[3CudbOA%)d>u<%%,z\5tFN7LQe5\Dt9?2.DȠml}8muEqQb{Q^zs%Է{GVmi"1qXc+k/nĉ ך['>[!{}2 2K ܴc?}_0kB[q4}:ڱM8wjMHھÜ8,XH #(`GXю#ΊAeBp[Q;Iׇp&h}Mһ3-ߔJՏW_>r,$߿dki"_-*'!x(j+"5zpD: Mx(*Mϲb%:I{J5}lVqSW=2UcxͱfϓV`+~ v-9"I@6! W.pdg96rdO"z$\z"ָMp^5Ng򋅕iED1vTp}Ʈ c%Ġ^lh /BЧnB}瓥h젞t#hPKOIhMAfA@?D{WQf",Ln5§qHdYA69.B&Eeob"C㤑t2YO];1ᅅ+ek&HBZP 2];0ZUYjr-w˲e܍)ƐC $/ B! c{$rm?߬fZJծc_>3ܽߜ9.:Bs?H໒-{!Yf=.:K[NLvtzg6сHOgׁLJI1;J3UV=Jzi |#O껓<}Wo<`|W*Ȏ4fh? aX{j?)>r',1[:zx)ԟËf;0ݨiѓߙipݢ@Ͻ&O^ `~IZp1pP-NP[_<.IgnCs}Ǜ%j!x%J@[{! q-Y♺: W f TA\b=-%iTrLXòiđ# *]e"[w3؅UѹK4qT7P*~7W F C/b3qÅUMҿ~r M|/;yp~x)kwvX|gKtN0Ȝ>ǝ~=i ״8D|=GzyXjD t. !/]*fq#׹C*s&) zwS>iv~p7}4:0|"}Q9%aUA p,s?=bJٙa8$pe;b] u&EkѬk+X8Ub?[ZA~g-}+O^ pb!u0]ueg?:J* ).?Y1j/[,c+Gd<$0.EO0N,>GW0u_;Ko?#jmMA'Xq sif+]gwǟ-҄U; INc\HQlb7O~5:J .a4n^8 tX2e3lxC͚yw݉5%K:uiw|K.S=Z)=GvG/]&e-֭bͲD G&v4*!1%g J$AIRerO3<ڕFlo˽N_L ^v佟-ކOforfP#by Jײ&>jq),{}8o^[AlTٗ/_Y&νVr?;v`{ic˟ҿb[\ڭ}^K˪$^5㘨,k~pyNL²nAE& 8,p?`r--e$#5I<m| -݆Zk<'| {+#':҃[hyt3`}̇f㋝GمegKz}d\,E[ndsLn+')6ӌ*W|íaѝI„Pߞ3$ew+>\*v Fp$כfI1Z͹ (ɘwKwyGY,isM~AMU] W=+˝W91C\ fܞo3FWg C$YѴX^p}=cf368_Vu)yd6xO߿>YO ޵`À [ʆų5;/_DԎ@+⣻z!P`Cy+` f'iĭ7x׏Gnzo;+q'7. &; (a݃XN߻n0T1|gvq]{3ο>H 9sZi-_g_y}Q|bgESX) 5k߼n@;CyX㜗ۇ>Xn1N @w֊?[Z8 ȧNDo~518_pqMzlRN -4Vj-ϾupK1Sh׸m_=zf;G8I}xUXT\ߕ[* LFt"t;9ϭ!t*")㺉’ w>.JEn`jB,:D,zG3-D _|ɠ#)8l?TadQHL t &V3&nj;umo{,û'<wv8;px,hSy"1,ϣ7(Ypi-f׃M?h6헇˭V_Pc@IX8>_]XH^}ŏ` Kݡabsۃ쮑|Y .c8K{0L<|uZ[Y΂61~ ,n}C0s8dح؏jBGՀqG5Mَk-__RvW +?!8?[,_t_gR3sH Q=sj;4}m|;_|Lk{gjGFh%h< Ǎtt4ǥ`NSZZQH;+vCE+&-9??B׳0i"g:JSca(ʆea0gsڱCQ yIOmuV\Ty\7[8fgyJ}U_6; rSIzogPtR ],KBDkBIFx%֥pYe~kVڝ b(XI"'ٵAH/2PD$Ae(wE͒ٴ'|e1 x%JbQy뎿 ˠcFeWAwF+7W@5w*Gx Gq" ɿmOu.kW 37W( ( (+ -|9t:<'݅&( q̺ O|!VgADIxU'_]]]ow:t.kOS!pМIiVPRg%^]\|7T9Qth[M.RX:?#?+ԑ$DbARG(;91GW8,:Ws* ( W&y\[dfm 8UY&Ƥʅâ(9FY)'w@eP}'hDЫ[J@.}#7 fF^Pb]S c JM93( yO,EgYjѴL8#` zUJJ-5nX?cʆhn;owKv##F9[82Zz)KD}ە*5Ak0_>u߮9kW˼RQvYWS2w{g@*8i?NܰTJJJiJ7ҒF**oI{Gi dҚ.ùO3cV]AvgK)s ^_V"֯s$֒Wi:` ;㨵ϙH7H͵4#ctt׈<Ͽ"yޥj78`8|q D7VV( DBebi whMp.@?3aXU3VGVL=Ҁ?5聏ik--w$=>!VߝLoܚH7S۟ui=fH?BzLNqM[l<9R|g#sC;uQkHT` ѡ/ѴaȿG&X˯TT;M'iCⷫC/!̏hxզ*U.(h'ו:M+6a!Lڧ=]'/a;QaJsڇGQ: `dY*ᶽp/'SF+*#deb~.RU \DG/vw@Opҍә JKGZf-%1.&\?l5Tm?:;F%CZ0v%%!:$Lv;7Ӧ(Whϱ ;W{IJOi_(QPE't}Ɉ*3P(QhKp?лhw/-hѓ4u?u4&@MEyF=ºҀg<`#jL=Qݐ@W>)8t]( sﺀk-¬3G9a`3.0V|2 KsY1a7B:( ( t\ ܈PYR~2eD!2(-㙪.W7P w).3%akw}3隱HAQsdR[mGL8:~2kL؄'ǰytm5uh:an7< SZc;y1TZ[KK1Sy1mBKKql Ô@RꙔb;5r_4A|,$f7>]bjg~=E3 fM13@si휦v|rӻ+U Ͼ5EM&`S!PӍҽ`6Zlן4 :hט:)Bo>peS:*,줻L cE˪('c=`RP440]K}DFs2UU*Qn`A,2-(Oʠ4q~46Mh1gߺAhWKǏkwnItq|L4N; Amp\* [ h(NWNw.(YYl ێϓtA@)&xLZc ?qh6Fi?,`TU#$L4Wpw.( Fl6ף\PV*ҀXcevK7bdGRܺS:@ X]^]'&\\,r5A{n_A:G9 Χ;+w\xT%\qfS>o J""*jjLV,:!VKag_5C 5f n|¥2ݬv.О7 Sz~ U6ࠖt:;cjSi ,5?U&ZC.tyta}%J]1CӅ+Uن}tR %yv]s 9MVPzvUv(+/S~Tf2r\Ss1X9d07hu Ap[|h2l>Ïjoa=? Г|{QԐ<ߠk &G=w ɫ B+ YPKն@w2 ==AQ4{`4Di 40o(̺ZzPwwߨ/Cպ/6=SlDd̀+WՎc_¸ҷ. %6%Ln$oXM - ip>OKTTrQMI |_k/_USA `hv>A9IHII$&}BЯ;BPrqԏM_Cg*x|4;[k}5DBT>$Q:iA@~ \]ĖB#aR;鎢xB%|#_C+EX"Иl<PK3gvM+(@ݗ}thȘWv*Jc* ,XPbW"Lf1XƤH~S7 6A2c8Rl$&}'t|$'m@PׅjTܹwFD]5y[ pG;fuu*xuݱ= W-U:~ Ϛ0fMΡC}A7nsA7:G =k`$%Gى;=&twuwv]˯YGLYG[ t,B>E}p{Ze/5N$pjkk ;:˥sg9i0Ǩs[ hֳeOyf= ''B(aptT `2"t voq&ح'_8>ql5' t&:CJZ"o*&E ΢SrTޚ\Jõ;iG)*ڊj}>361.;;lrk_Kaj-TVUnrY4_}3u;GkwqevɢZNXj\YCO5v'JUYaݙ}*Zn p޻[O@%oEG5Y*U?}o'dw aed[:zEn%Зd}\\XqztpXfSeKyP=` =bqM~}̼YIuת.5'1Yx>zi4gRվm(6ӗT5$v'Ŕ꾚-ް|wDl;D|^w>k6=q(wct^%|bh~>+.'\u^+0LJBͦ1Ql6SZS֙+*.޵ym䟽tMw|$h ± +i4 : m j `%@8p 8 ] tRڰ?uuLN@,zsUnߟ0=/eStmD}.5;ManiI w/=t4'Q4};EPsUmTyQf9\~T["ͥ5t ĘP}[0F̀S:[u67p;5ջxţ8F}vF>2|xS1 {&geKOIXFb^n J;֯x]5Ę+嶯̛s ؄.OԄ p:̼Ѓx̪\r53JSoH5mBn7 pJSʋI[otV7Zá9)SwVxA5LM5;֭y:YK QGRlnPHX;_;AWz10̛':6Z7 IӦQ)3Zwpw_G Uu|ѻ]mV;3u}Z87 S^k?8jA/ݐ@#2T :R\p[w%i&Xe\Z0{úGw<1ys- I $m zJcX}T˨?bV#AIv6ן Tܟ :PP޴qƍcX[DWXǓiG~۟=d؈AKv=d@Tf;ZW_]WSUq'+[aF'n賲q5u:<'حfaF@:`g}\*ל_̴Ā>&R@o>O5Qj3DLf/?wA8yX0uTWϹs[dndz䊂w {-ingٵ1QOޤKL d+-{#W gWyJ+Mk k:ThVo2K+ݧ =ԉIIГf͋^2aU> @Ӳ|`omrص#Jon= + ?n3[OrYm]'\]v~rNz>SM:24wܰtzWlδ[5ۏmGZ܆:y-{Ô0.H9C %:edc&IN^z#Ɏn&Y]#@EM%X&)>@SVo!o1icciY;$fK遼 W tO,w,FoQ L{xk!l~٦7uք YQo]?!%.~49~K` XM4&}558Ɵ%k>čSԭp/^8.|HcnO) a7iytԤ8'JJ+N؂|v7YoF[ђu{;_M ?\?1 h h:q_|o$Ly9L fH?!Dosjb ȩ e4~/;ْu% 25Zl\n2]5?z=C& (kWHU=^&AGcV;lQnw̙ :5p=r br3x((⁗ZnjQJgצCk~AܗE 4WJB=D~"^[ Ƕ>}rboomx_[. @rnA8:>XcƮo)iP,,(󦌤! dRw9EE<m^G˪ u ~a2GӸG.uMG:bQdErǮ;DgrGS<g{ hkt[tSzxj%+f"Qt-kf9-دʘfc[p6H\PYrLH]` k"Q|! G\&E:7_MzRK/N^Kyu $J`-%g׷X7ٮ~-(y`<28u]jo[u݉h:2etpArNYoxǐy01tx`߰Cweb-:cqHԨf Q>Mpܸ}7}A[ظ;tdL;c72|Ҷ|-ͷ $(LMQ YJɏp;[S̲ i4fh_ﶧȄai:Gɔ;E. rgSL۸PU pIA%zRJ_rumYrLԽx02rGr&L-d LX/0+I 71wmZ7<考졤1zū`?i34G텠AI6j7{]Gr{( $(adN/f59EpNLy"&!=Rh&x?^OgJw2%oz'O}p`ˍnq14'g c {76Kb(3vk _?IXeNVwkm2z }'nzMp{ߝ%k ʹ*x;K/g7#1#yx/dͣw1 4<[k(ޚv휟) =[M{bhW.oVH&9Gkڪ"u#Y!MkڏvrF;`惴nqi<巯0:|& ؅IL}*'Pk֮m99P e{ŝV( ZcfSNkdm"[o.U^C2hlro qcHfܒn,;- wk [#JLeA$vW֎CdBqd}?.i(UQx6bu+X.IU\Ȗm>>_'tU1@]A 4ЃC!$?@yɘwYǍdbӴ(O.P;JA|( (Pc0zX ~n|({' )ㄢ,5ӵ D(FBGjj IxyeB4g'.Y&/ep | ; (J?q~63.W u,ݰ]njFf _Hs>Fqt\v'@Wke `ly\p W52_Sr 8g-vۜ+iu4LKiHa.(:NS i8~,th1V&YU@rZB4%tkb˓#Ѩ}pD.y|;#ٌP)x) Z>2v3`g+ eĴJ )jp||4ܦ@Ih6Zn$d`~{|jKY/+gÃvY㔦TPfI/Ln ¤WUG+sj2WBŎ گ'RSޠ>Rp) w5n~Iq:V;(HנힾP.T]NkPLݣ~EZN%զcTb[8a4(QPPPPJ $K"z=b`nu}eބI 梶Eˠo>jg,D@IDATe%uYU 20Ҟ]#uQϾ%)@k8Xl(2%EE4\qQm$l# O"qh>go^n]Sw<ʨ4` i 4s2hϺJjv=ZMْJ=ܿW:Oȧ ē `ʓ~XX7:R|4xJ׀?-ιnV qSriՖ"@hI_5{] hMfoLܔ2l-(m@s|{t@eh wT!z?( yv{\PǍde!JUlkk&RZrb?ND/cl{[ ]=fV|!Z);ΪрtH&RGn"kУ:8Hϑp 5{p>v@,GóĖrWj-<ۻ pЋmt+%ᄱ󥷏hqbANyo'VζmW]44ЃHakV)fL;}^ꕑD׹I)½?0{Rc氖GOj)"%>b7ky2-3RvVZwS}?oB\ [Rrv9-p[ҽl–b"ۢ"0v(YvC'<pb=~8M@JIiĵ^[I0y\9qNm׀Zr:FhAIh~=srA0wɼHr[Zj z3d cة4gpLM$#K#(##Veu͸Wn9HQl{µ޵`Hqh2]x8t:ZqO6M` Z& n) F9J{;ߒ{z"o,"{ZҀҀ4t)QFJB~:[#OܥvI`3=cLzٶ{%jWi 4˦:C:{/mXSKr0Lc7W^LjQ1/^9ө4h:aE?u&ah7"#U:xhK=*_ӕhAg}AI)-ᑎzz,xT &ј}m?/5VL;q xw$g1K'AHe`Հ; gPU9ypAۇE4\|,2ts$1|71E 鑒@ØY%PV`Ǔ>39k.BQVl4g,X΢ɱmIv4绅{to MX?f<3`38:gP:+q7۶%%GZ:NjgZe086Sx|OXgI1;Sr;}3TJ~E'_ GΧ81˶)poY^cC&me=vL[СIhZ @os;'ik:-ߟL|ς tY&~bU D';}Ҩ8F*zDۘ&%"O^‘/{ʝlD'J-tRJ̌7͙8<y55|p˪XLIG^PjM[wAo۶q6\ ` 0hg%|[3ȟyF/?~^YPhչΰ{o.1٩L6ob>Ůrjrk୳J:M11x tNd>%3LI||v}U(/W'U慇;(t31'm3ib<K暿RU#pfH :OIg ( Ϫ KI",dF)B+ +s0k(dܑPu]#wVƊw TWRS~|f8@bkxtjN&6+ DڻqAIW)[=a85lH#YXLkapvֻ? 4,":w۝oڊ"*bv M1͉.*'iAH1f+='" Frw8Lm}/nW( x| X牋 2 d2(RKQ@WIA±lE>g{4%xA5֘cLiѐHD KWt~)[SDѺ :YQރ [?;3uۥmGڋ_єP@bsɬZ1:$Gr;^izgNlöQVz{sDѐ^cM eeU5OI{wΗ}+jj;ϰ`X GLcxC@G5`_:v/a 8&36$BSM:*,(&FQ$f7%e-u5Ys^c3ꛢDJLLxOL0ZZk G2_{T;k\KJ4 Q]= ^Xs%h]]r'hw..)&߮cckyp9qihɖE.pN(HMGG鸻2rLaʈc J֯\|I+QF 𝒜,dkZCz(ۀ?E\,M 5oЅ#W㺩 ; ǍVXXwa1=E_O<a_+☱߁LEX܉+"XeԠބ(~`+`iQ+ q8[ptac7si(T&t4Ҫ+mC/7ږ.[ sh%=hUU?}3 "5;=z[4^NXo`Fg ge;_8>o+By9`-n'| ;8'_J%TC5':*N}F)P,c(bl懹f۾G?9uh]7vl &7Ex|;_w|;5ݶaH"b,@Xpv{G~8ƶN]8R' pٲ6byxۑ^}qw~?&~hkrI&Jr:J7RBMn;}Q+QYImP˷'b~Ⰰ\p3(?,j @wOB2x/ Tvʛ[m]l>+^c㯸Mg~X-fd·Ym8Smc &@LB&Ww9x3o0eu*4(dm GㅏUov[Cosֽ9yF_Sn(!u52h?py|'j]8ծ< 0|/KPݥ M_?W[*=z;6P4q옢KDGy`|襔HS۶|QL \vc 3JU L=c@G5|EUb6O?,DF##D  C)pJ<]LƔ2&}Xna؀!- C/rnLy,GwkIy71`9kQUgѥ4h·>uu@ɔ)FcOۄo4PDЀ{B-UJ[Πf8vFbrZ{ ”=<u1?xCԓS@CVja^'t\G {\3 +>)J?pi x&*րg-ʼnn""y-;aITr-QgHiQ!7l1/ ߺ_"oI/g;8KF4Q+;#\)LMXtϔ \5 L#Lq:O6OgN.Y݄ڎUc%&^6|WB4G82Oh HjBJ J4$4m`Ԭ_ ]. $$^:~wҒfXi"}f[H05FE-vl91q7>[Aj9.V/H\s=UhXˌ%y`ҙAd&$bp]07: Y5*\UᲛ[̀'iD1w7YV444 hZ_m});.&fAr'>9xN{m:? o9w8߼􉂕rG 4s![#X HEbt,30") m  tZZl+괈LN}hJǬ%JJJ@dt_m~&2]&SC^cIHnY=7*j kQc͛K|ctԸi'<[&]tTUܾb4zH,0pX|PWs Sg Ӵp1@bۿlXC7bT`;K\w|z=<L 2iIq_#P HDDt:R|ʪjNw_q<%!Ғ)7eZ L)F%W8 /e?̹vA@z8p\w*۲a ̻~bAbJ[bޖDE5:$N}KnY\qd!(n4Ruı՛2_R "NP7#='xf-.2OKdᣀ csbXHU?ẋ+_P( $61绨,Z0u Гb >r7NWO&vgC᨝Φ' EüC"奔4El\Co~3 F`K-[~=O9Y{|^| eX31Wp'Kʄ[WzB7*(}C|1A.*0bJ7Y[o@/87@x]<"lDYæ*':<սo-H'mT;ϴPϞ"XܤTqwHCwSdcKT"Ѹ' Ӥʠ)\%ÚT݇)*"V2nE尰\K1wL_>OدĜ$2O/JjwMj~{J5P[JJ] Ma`vaZfk9șK 䙢sޚ]݂iΤegЏU.!6@ `GsL X%k>čSԭp/pqC>g? gp>2- d\"G Wbq4Ë&SaHdvSa2)v Tz\yKA1!-2$Ya:1Ln>Y|Rz.a fQû4{&KT?_,V;JJN8iն|ߵ`bw"hɺ=3-Lᨒ΢' A"9!,[U> @rA8 Ѝ5 +7?&pmܲ;~ 7k78l)DMV=d>s_=KŵO|9D"~}ld>xTOG@Id-.(଄vt \q/ۯ Y:?[ei19s#7BXNv\˱m>|6Z" kYVcw%`ˉ|;BQcGQߗ_4F9!'[vqybQlS +\VpXyvw^`CJUM+FpIOC5.c:5E6pF*'_IpZd|/}k_|T82G“V5*g&t( EuidI j-w'Gqpptb>Zܑ_RǢ]r^(',S[DRSz/7ϿnJp]*!ߨG'ʥNζ";Y93pjx-̦;u~\o &¡s4o iЋŌ"Njw:sWRB/كƆȷ H5ˮ-:{SKϦD|Jǐxo:QZApO N *(+#![Z2lkrvSsZ^p  P./op] Nkb믎 Ƶ|۲x1 oW=7ȖwXs,uˮ|FFp|Wmyt`|zF 3nepiOi,Kp5>:$#oLZ(Ȱ\uڙ|*?8%d%xz B /=vf1-k*tyx^溏Hsy厓C`o@pς!'\Y6+6EHCswI{tbd_10ŕ,PA}.LoR 8 Ka灓9ZsRl0,&(Tg\h9f,BXlj !]q68Nsj}F*ܐ`/*1>bD&%LJ~/j]U0Da&$[Wu,)R3L DwNǏ9ΆbB\1QG;OҚ'+F f4 FXC]@,8pZӲв a&nD?_N-acH 2Rdy$9y)pZ< G3MA{Ev@ vyg-#lV}l4o{ges^sF}Widܑz\,!+ɨm$܎RGQ 0*Ps._m6m((i=ڶ-/p Vl=tH4I-[zX1B&jCpg'&jgDV*is K`Bd@> H3VzJӥsg]RFǡ(%,+ȉ3=={-(f3tJGM]oSV6Ú[*0w 8'd8 E" %9̓pb].#0Ǵ'#8ߎnŽ<V}_# ƍ,mLx@ BmNnt}]oZIRdX %0!@tlWhًpt2h'L q6"W 8 :Pkm-) 2 [BrOx=A4{4xd^]%k4kH£X e%0{U" ߘBiYr1v<ǧ&ӌÓ+20*(LjԢλvx8* ~MOf^\4D9 q'ԌSWT̥* K_o^W2IM!MRBѴ4ŕ^GԸ L^󚼨Ԕ1q·1ui< KT |Uq+ ӧ`<g)x +Ε33u%I)?vJ "z  ٕa6>塃3?)V\y`I> i9B 타oh3#ܬs~p8w^//YTՄp4bU,xۢEv{g{RWu4m7(00II+KG" (#8FϏ>'Nošn#IװvlR&0Y[=HφwYY󀖄W­fE˷_7!Qi{G-]PIA W(|hpMcu!igAL[G<4/m;!葵liSH] TLFM['G;qX-0XChA; Ը>_iAaXRR#l[Mp9;kJk^&(ɐdMHZnh}ʎvڔЯGgۭqRiu{nOw<|fR=t2u".{goEB{¼y iiEMUԊ׀ M_P4DM/Q6.+U=1A~^uՊu(Цpƈ3M ߝLTtj]gAFp*j/sDMMqL Y*VMpL3 8AxOmsZK⯕toƒp!|rڢ][2F "±rր(W.<)HTRZ IH vd6Yq".{6qc=2Z.|G5(Su[фwkT0`Ȣ]Ά@gDZd!J h#{eV%nXЮG59 ={邇^/{ :AR˴b(-ƂI/8o6(aL%O•mv,=ьi&a,m8ؾh#>m:^;jWlhf &6><[D*PR $ ;nn~$M9pW L\|Y&NH8݋ntplU!bW^Ծ1#P > 2tQJtܮm!` Q~l¸p"Ee)P r4e#p6@p' H&HwyO7 gqOذ(?uhuMZ\-_73'(Y7on܋wQw:᧏ek]sKz2#(&U2!Q"p _iW"M rJ"dNqFо8VS{vNFB9F;yudD>"s֪$4KyC26q|𞸍v) (f8'< A"\TZDfw I8ҖS8s!uzFa,^YKkԿҩ~`D?=c /,HLyp$ڥ/>~Js։-pm$57%G(^ ' O]' ~&g”?ydC V$M.SM3LZ^!D6c%ΙcƒpQl /|;7ֳ^AX'e*4H,%M6m!g4ixWy"A=ekfajWIƙ\׫3JRO}= u]wjyfr 1sUGdc tZM3" ƽgsprB\N@flZ /~b4z 0*'eש[`Ӧ|0~t%'rs&\AI{"YDiO4P1;!eBUt‹N 6D0RiB; 1ϥYW8l7<۴{uMf NP~OA^q4@ "ZHC;^{fЉӲTr6.%lY*juL]:}ibǁDB_'sq2%2ES%ԄrD@IDAT5cyȒ?Mܮ8yxx*?86%4F§L;P#ikO{CcIyVU"D*N&Di!;3^eSܕ[iOc% 97P] p5(?@.B#Nbݐ( DFyC9idBlk 0(ݺZWwi¯ :gu0OBwy}2Lh6V5ڡ 44|׆ V67x  fPI#= dv  8~.r[)iLN&D2־ x0zpop :- rbR5Qzol>iN7=QUNpa==9uVY2ߌǝp 7l5&6Ŵ= ?PߌԠk1-VHv!'`ut#Ydp/&{\-|_ *׺oAlx b9yW Z_V$ ];ԫNLS=`%wi OsH[xpDN|wZ\Hܶ&*X۶אﺧvmPsﭱv#`T>ī/cG,DiY8+|HJWoA_LJ^`0G@!D>UHE.A{eă&8# h-:\4٪k4ysH\WHi0[ǃnNu(pҘO|p8ӼSrchϐ|4'j 7# ,eѴꣀm_j{Bgh'h8I)!nNOp9ndN%oNV!Mvjy hCAQ_9EKʘw[SFan8wO8(y.;"G9-o92)CMh5`qbv_ٙ޷U-Y?^vA@Vo5hZ4 t*~ۘl[Oz:_܊PaĠ̝aslڗmJA.j^lg欋3u+*6 c-;QgpjB <)HdP|[iJ1W{ 5C":tɧCC|K}yQ} Ml$įFK8[:o:9Kyd܅D)TӤL{Bgi"oG+FVy" ^JJ$I ,D?Iʰv_n-sj"n:;nA>C}YꝭNRFWBNCp%|IMPl3^5* +4rOl1A9{@K! xK!H¿itѓqx| =v|Aw" 2 bh/<=ݡڹ d8Mekg/`i8z0\ _ő{Bϫ[~m0oX@VM!O-*Ro²-=E{5\Dw~G/OOoo/v$ r ӥ[CJ&n-U7A>ypup$< W—<<rOW${Usԃs47↰p@RwyqJ.;SccGGnCVEk]%~Bߤ5`6ABwi9JyyH!#/D_z[V; "7|!< W—9*)fu ?ǘ30?U}Z]upC[#qZ1I{Ls - Dw,oMkL-rl.-7霫!h(J87X 級$M,{Xobvᥐ[i]N&f /7fC_|dOOeꆫv&Iqj}2v#WJwB~al󮔦kp{&FԟU2-=K==h+? I.#ЊHLޜ֯0@%pўh=)s[qSuzgR4@ .EH%J D6++"^Hˑ53Sq Bm2+!1DÝmdzB GjF"ɲZ┭ W%Q{Bi  ]q0M| %Mܦpra֋@]Nw ~mk|sF{FNIh-kPCS[|ڴy~?+A\!B.4c$tL=C5v wf%8} `$Z9K" D)<<ԌEGS0M:P8=D32lrOxbkRB F x#@Y8Ί@]. 2(>R;eI~+5WT^/]'m偾zG_L<:Ȣ.ȣ⢐H&i c48ɗ($NEذ !-8"x5uw=E_ 'nO2fpyIE~ޙ*2FM4pyj-O.#ڸĐ!#_,d 崸>SBb(S+4k~LB_MEsV$v=$Me)U8D6DUBD#1Wk^k]%shoE{k 8,{ŠfT+=GVpp[!p ώL0E`|їq<'[>i8kҶipS%nXЮG5ݡ C u]'z nZpG!2:K fS#6o_jCYYB P&mјIc&'(P=!N(NX=Gulī/IO8M@&dzr\ Ҵ[A0m1TI {]Ԟ=INFKJ <[t\װ.WrjuwU5:u$^ Ifϔ".$Myf۸fe6"nxG&ӺvY^8˔2N%K$tNݷ=4M/+ Չ@QL!)o8\:GM+S =X"DHmM[q"޴9}jP_233۵}[.I93e6Vfcxxhi?6D2Y0pjGq;Ng:+eqfMi=mtE^=sTPZ_p=X'8BF3q8  $ CcwE$Yq==NZ8"֛B]Hl?SeA'xبIm=18Ciӑ/C~~*oIdd^9j(ʂ`7>}}-WD{Sp+-K W'>Ge'L1Q"# x!8D:fj␅݌OT$};nzlxNdA 'nZ>^^O* -Ef_4IhĆgBDl5h; mE7`v m#N}[|ydR,˹[\p|+ "qgTRNLrOSFa9/C[ :?\ N4j1 YD|峾 l-ElKhhkN*J>gEz5ג~1}W.!EuvX*%"Uy(N JswOT]/mO, <\Nӧl8v"@-_8i\nH8 J}AԠ$-(3R?ė0f;Nd=k"i3#bKIvw}'݃/x):Vѱb+slJ\ ꠁ͘u-8'4M2?EAƂqql%8<re¶zEUfD$XGO2żz#hqkOXUz5î]SX~~㟺'U_FN!3:~1dY˻9"(Q[>#r J|c{T֕XGpvIۄw Ceqͷxg^jHS[T8]GBo0Y[Z8{Vg)Wg2xV?չ!^R\K(j=ϐܗȈ:'%i<#`+=!*}h)}Vq@W|7I.#0pb!<@zMS"!(,^rymK~*e$sT*[W\'_g_:ZY#j'ڗ,1[C@#@ q̐G8?mtOȽ@)pZdϷcɛpxqZ#^EgB111@)H"^RbuScK ,=%yN KdfCWe:6jw˫G M&YY^G#9JkW4_G'܎$=YUPUd߾1HUukfmhH>&1"D6fpj9'הz 0\l uxs4w<=9ʲUx WwlD1[=BV*Clz>\ݭdRO^1%_9gwDlE81}%mu~cg$IK ,i{R7[d 05i.++.9'ti*pnGO~ioθb1]5]O\Bp}*5W1u'Dx2m }#_>I9^v(uY.(Ӗ-yhtI(|r܄㏽ !.k gKUyI'zdԶm95-NNHSb+3-TwE%qެx@kD=!p=akǦ[Sy%+szں~L{<4c嶸<M:P ŭ{ٽP^@s7, ወ;Y+8 gġWeWȼ#<)HW:O'} K4YBFduLܳJ5@kE'l=gzEI5-ǢSީrFl4[/OqyL}M9O1'=X6~7\.B?Ԕ5 Ms=K9q{qyh~$4Y)ے3seO>4?-\ދPN "FL~z$f J$srO=mfi#w] ;K==Xs1qq {ZC 0H TB&O5pR!i~;,SMv 0~1G86I :6Y@X'ZD6w1b7,-'<| 3a3X㞘$Pv*%QT ǓPMwkJc'XK^G!YZM 7ڏ*K"lWQXuuCq8 x Sd6l)!Ͳ|+n E ͫ^qd)І/_U[[ȎklxC]aJ;]HrJj~y=ttO7*"p}zk-. /0df5xuNm#>p. h1F{oT)3GU jA’ɿ(0n!^[r3/1{}tvY{L6(EӧXHoB†oM[gkSEeU*OߙV)Y%X9_? 5TOS`2>?Ona+{F8VBKJ%_R Ϧ~P}#2IČa&&f6R\Kg}IqntcѕsnJTEW l4}?7nKR7G,Π:6Xpө <پ0_(Oʼn![kb&EEJ?wknTcNotgd6j4D4}cظ& 7B?> 5Ŋ/vZȸjlbkP" wl-qA&Iel==XƏ3vq2{3n q/f# )vdFSyp̼tH sPBS|~^䩌l^\WNV3/D Y |f4uW k4B=]:Gm}_~^!ܚko *s4 XtJ= aE'I}O}ʈ^SpUSa܃Mhni!b ,jRN}1r7nptdˁ]<+?/[NrL FL?P#gLӟFC1$A` QJXo8"/>ITG吅$IHrb 5&§ KJfq9Ug}pmԦm.^=Q_SP.׏O|+UznTͬn\ҖCu?pKj9r"x^6%EA$#$ߎ݊f@x.Iq;Z7~GFԛ:*j>-i˅.yGɷir „!<=ꔗqh,{Ϸ t_,t$߭l \oBW&ʧrk&5M3y5j@L^^_]=&LSz=Deg\j7ʧ4ˍaCPn)_f@*ZC=f\j*nI d:]v9!AXlc4r4ikH#q\ ]E B{(E?2Ҋ ŭk;H9y\-znBR`ЃCws|E'P+.CV|8ü'%_J S3҄W Z%8v`_qnd~|?E5LdǗ6$Esg|r\4hZo΀t{XgDXO{("e Z%C0QZFŗ8h@?\4 aFb۔̒YDG v脑^>o-ewp"74W޸sgHuj="3Nuq/hzm-|uYz@8^w(R4᭥nh)qxRע#z䥉DN[_~i\7j e޽ٷo} |TN[uicG~1 ́2;O:vm˼i- hE"&M<yfl#y { 2JY6ֿEcXG.'_X RJP-xz~Ѭ0A4Щ:!\y狟6[/ F*^ļFr~|WЧV[oo;i=[X~( (#ЃV!nr[V%W<4QHoۮ=d)#zjGy  F?/ކR߀PQ%Oyh:jKY%dLG7J7Ar8-@3t Vb>-WKVv211l#= sҋhTG+v_۶7a#h9=pIV#UpT8j6Nm '4!!]BPG(K\Յ 2m!iPѩgG|>UXGZMߓ1ESpu*vvXV~m#pt(8jۦRi0G! @2Y/l:F˪F!x%!Ύ|bXQOa4P߆ )*9p۰Ŏgra@A@e\0@5'>?㞑pЙM۰O"GrPuёf^h9Q)Qs{6UOOE˗ [[!R.#h4PE%ɑ85/@%Y1!+$O@Em`.RظRƙ/,W^?fmvf 8VlEّRȡ 'r n1\kGR:ڡ\d%D,x;*OT*/_SZK׀#DkhEYp$N5~ N޵ -^ ocPp#^T'g *8;Zk)\G#Ҁ!ă)}huH _(6Rq&[8C %ʪ蓐&8ph6઎!%MdЯricʫfKT^>3tB!&(;y̖k%TiHOH|#d$?Pl4 ;IjE:S֦Ss x+G@A&/?GSd^yQ&| *8rQ$֑UXVWՑ^m{J;Qb$GDK֥^ikK!Ae?} :}ы|]{V7 vMGA\Atj-9i\_׀OT\]n=qnbF."6 MFuN+4k ('A`پ+>迭b~xPԂw;y tob Ll,ge0DhZ>}٥_AiڡF˪+H<|&|brs+Qkl2l_kE6ZD ] &ZoĞ{mu󁺪Xu뾧c {%vZx8{! zvrdZV: ]iH9~_ MA>C>( Pu R9(%G}"}>x<vPާX]0n_@݋q$(GYA sI&O.,w0% rѥ/S >iYp[AǿƜlp~8EPr5 Rh3Vb$m  S g|㯇Co  ȮuA#bQƯ- gJ\ n"3;gC֏`fp #ϜSy]b,q;d9D ?;Yp |}o[̴cF@nh9f=dILpϡp C{?1:Ee?'dZIMm͐sO8u,%y1Yɫƃ<(>suo26Xu.dYc(svNk@@q'Ί3M g| t4Su@.W$B6&@'kLTg&%P+z["PM\o Zh O< |<uH(~o X$P,F=~ Į;U0: e),&] @|W !~FPw /=MPg,x<pNVsG`pBx3eBԢY'X'"/Ӟb> MMz +^=+&rِ#0yH!3 8sl%n??> RaG{`ߟ/#z8Υ҂ Fo.f7Pֽ Ptn?d*4Ad8}@IDATkW{{vHΫ?u@!3#O.=2s m;݀_K.K#.EKh3s+t+~VV`L$&xn 7jDR~a)rr11a3$ 8!FY쉀 Yv\e1y?d]{5\'@ԆkuiTPyxϚ b@r.2H^< H8Id^ 忭bs]H'2Kp +)9Y9ں0N~pE>sI!lˠ ?-ɒ3P|x6-w͍Ss[װdMtEMm D손~=3!I lgiG ^]y .n\ ]\ڠP[1$<<⯃coVh`_UM_'S`=CӍ!R@dqkS^*;n)ϝ+TԆt4z¦o(y/_y"e_"x Y.pvxH;/DsQGAHˋlg`Q5P=]|8vl 8Lrs["QBzD=#PMGNyh<ť'O͂ă{֜)[8}Ƌ9m"S% TSؾw rm\\NanLLs2-|qrsAl m0dmL^|NV^bQ#(Mz12恠žGFԗI} hBV]DʯBP?&,-L&Txmv,[I5;q^}=XzipGWhTȘ63d5"Po(*uQ mp2u%G@8L|["ȴ4Pq7`) T*MYtiw$\rl}@'č<׭jp^ f>%rz7_P4w͌1+_dڄOkw_$|"[N@?r.Aer j*(<LhFrI@BEA6(zKZNtcF }yrM**MچW d.]ȅ^v63Y8+ ^| ɷ#)Bȭ;2zu+ͱ.x\-U0gfumqQK~DJm> ɒLMlotՒߘ|5v(2AÎX^~r1h)\@}{t'nP M?:@ЭV87(Zn#ځ IUdEn"躐Nv]>0fIo.|v'~8|q!TTR~}.vA[:ߛ+ԾZc,:D}_9E &y,_R9?yxo_.> `6Uqt'Y \ZݽG>9^r|Gv~ }F? )3@Ms~h|=mо$qsd~aҬ=0p_3ػ9~^߹>{vgs({РL*#6u{|3(n9bë O-̓?p㛫GJ L=J ~HHIzݤmg- ӣpjSN_KE[#tfM;"^pIZ(TSDZ4RȎk/R7 =Qt^wynWe~sZr\ãP""2j?D[{NtR;/PÝk(ѩ^3P-پ,zѵ[/Pb-]5ڿciIu/zH}qK[ux i{M`);a S߈ch4X_wHX[YW$ymM6 `pZ;牚{ze{Ӑ>#CwĘo&^B#vYƣ7Q7T^يZ$: z.䛪OĻ[=v@ 7n9BWmMG"Is7V.*|v"ޞ$TP+gm) <ȷ7vpv2l(5|7g c֥ Shxd($jj QG<ڨ_+7ZaÉ`F8{II<mRL%Q[>xg?IAh-T$'BBF-:y`Z2U6s#ypz p{EήmOhxi]61.$IAoΥ\j j):ce<)(g(]&O.p)n+.Dq= -Ů*uF纋,nP N;JsUS*&ե83/  "CM iSs4(Z'&>h?wZ` y=4%氀}8sb4?l.3Fpۻ")5+V5+Vnl % y%&ilYFCLt*dm+[b@m܆jw5#l\8h+n;j1^qz|p}αڶ%n,۹BGf=v7Fw svNFn=զ/%,qهYỏhi6N{D o)x>hV=W+ΝxMfA"U$\%yn)um~X^(,);1Cg }+r\8ygqнS[zo~ z8|\1rz+d\A lH?|uQG 0WgnMj猃G!?-O;>a3̘2z{߱Sa杰7;1wa8p,ĴFA/I}BRpDtޗ?ŏ`ik/G({JP #)X@̈Dc;on]) @e =k8wz.C'cS)Epj.nɜ1mi5vv#ͭzПwN[cj{-[ۋ=n̄'* wMT8\7!} Ng6>H7 K*VO;2O̫7l34;VZKקÞp帡б*Jss O;_ (cKT]aP\@,skSU7€^]6A::o VnqGWlK|qMH׌uu{*Rj8V=D A ,/Y%WV([h%@~!.֜l=qGV؞%E脜aG Ksߖ n9j٦p7 7t/j{s9Lm;] [1vXn*"T%{srU/ SJTIc0w6t۵4/RE 9 .dSNJ9W}bЯ;m=.۞,Sm6(WB6 }Q#yTqзjtCg+PѩYVD{54>EH|'8Ufq+ʊK~(X18МFصcG.Akl=sж$nfOmGq1~zNZjȨۿO~7|BҪr7.qAhDj:IDq{vԽ9 p۴рC iؾӎs8m79oDB=GnB{ !QކS#^:rT[πv"Eej:H[Ѧt)%uG2O@VHEB@/r[rkͪ$E~[0ǧ#שYЩ%'Afmyu|\0Rkdį=Fn(e-C^祥5X=/px'pO g1zٶnsxr[G1T^)IຉIuVծ&:CY"Be;>nX_GO{mN.{ĶsnBoa=7L=&IrAU-lr\DROmDyPqN /e q[BBO_ ]|D K 0~.^]t0p5g#"8&'Q7XW~)+K8;~츠t5M39N1 |S ys}I'5@Hn 1-qLxO1vB=;_;/*FΜbsf1E͛J^lL"|С)|ƣ3]45:alEO#MU8+7Ehm/eXZPJuEoj|H;4j 㳇vVYDTI&A*F Av ,:v`OW,Q+h1RSHl^yWGwkEmo)a$[ JQQCijݫaUYY kQTq`_d *! J숶?rh-?._S^^*H_?=C933_?Z2K9 s(N^nS?v\myXklMp%xGD )4e8 iR"nڱ@yB+ոnÂU;uX-bP9+O׵ K ,}[je/fA^ ,uY2R g(S> (j0 ;;_w^5%r|PO?+?l؁hCt$cq(+)HPXjG%0ܚ:;k;0w>7yå:X'I"ta*"hTX~k6<ܬO?,E& o$%<_+pj7O8KmNRr L1M4:.sJcN@i;Ԇá;.+[m)`a{t4}w(j0;Aj4y #P3Nn']a3aYzf!~y9Ivi _t/;e%N8j)5 Fs5ۄͽFA(BꢟQU&l|%/<] ]Hr>#"9)gF(d;f!&,08t%ނ|-wk:PQж`%6%Ƕ7{&Tӫ-!,]$^.igb9."z>|=:։6FW me 5>|}՘o ;f Ͼ&cN49/FhHNbb* ?ePa.LC3uaCR0CIx|V=:\LovH *QKF- @#S0oΎ{E2oG}OXO<ORYIEI0Y ,Gp6,v8}9'}؆Ί%Yztj+/ܺxSҭ{6! kxxdgޓ)+/koe ɟ)ǟAvt:-7:\+v33O@f {4:{9.I;N8w+t} 8c7g[3)v6sg"wТg5|v(g9.@aty@dU"]Wϱ?#0-.Z>$V/FAvŸR=]]&FgF!*8un!*꠪oM1)#ds3oLO NA NeB(ɚt*u.D9io->FSTnmTh?y%Mȉ`#(NEIQhIN^F@"1䆜%2[PlE4y)^I,MGEI=N8nʓ6$ߜ `~!&`|vtp U\=~K[9dƫGEӜAjrz!_6{҈^>_6Ib0:կ{ z+E\ _Zddx;#x|9 8v1*Յ5"Ih pܕUKC }zhHJ_)f{ Zn)܈ōEαsb~XVb[ WMvhu{FwV"% HnE۰-  ;RsF)0o Z|,#xtv y}jiȚo*}k/ &BG`5ݎA2֛po*1bEMk߾e5}\:\j W0FD#8ڞp Jb 0U }A})àMT8 :#\u<ޠi$#V;a&&>l-Pjʼ*3fF%^҃_>Sf!kYf0 w՞ p>Dd޶֦r f0dM’r8~*+M;UD(M )d2^sq7g& dspmn6 .[sû 667?>i}3X .wYyXJ j|XfгQAe)bwqXW/.;Q_j JJ ?w¦C˒```4Vq7Yy)42##LjV`]]}lI<3)a~vKΘyZ~-Ǵ19Ti|(/p5=l}jL| #81(zSNdqסctnz >?'/mp!QRW6{(p }_COd go7a9 `~$@仢 _ :'c00?p& mE%PifW,YU\E68=b}U||&@^sK' bQ9fP% h7Iș2U0إ@!v U;\^;~({F ~P(*釠R,H _Ei&^8-.5(H**"h"@5ͻTo n8D@ֹP7K*?oAouc]n-3c@ xrym6l8pwZ#՘C' nc/x` >],7cg,H0 a,eNL-ΪJKt<i|x1mx & wΉ3PTR*{4=>6RZaµ,V+{ku# ^5sZs!@!RB& eyfuX ,ƷR vEgw9!q8y(þ\urG( 8XUn0CzŭF+R PA+3n H::Uzk 1ʂ*@WX[ ឹm-܄9;w#&;si4ܹPy@ ?!s%dxj  z=Z\IWg;ulq^ʀpCiLRCE&GgL/d&O7u:|7-s) ʝ8ٱ݁i덙BAZҒ;ݑ/j|*V7U⻎Ĵ90=4/#ˀ7T,+`C^XZ/б7cpQй}4|~Ј-MaR_%ND g›ứSs#p&%øKVi|_I##RAXl`2zD֑Ч[z<쓪Y x-%HɳM)Q*uipއOAl~5q?%ΗST_:ę+GxZB\Kq-G MK#'e@vP尶( 4me';`o'n]K[Oư/Zg4&M̓"JTޜSjqPsҏ[@piboHI O[*vG,U_TQaУs[ '%qqÑ<&#~X5o.$2s. :ah,]TPue7)S|gMno4αu{a!#D70O.*Vy|?]+V=BM4wlyRa(`9~S*ZlVN';.(~=a%76yK+fxglh lVr&{#G^ pJKf"X}=~؊u=mvq猀|kg{v<{N1`Ћ g0n[3%`1x'#m6HABqXsG> ?]QW^_c'WL1y۝H ~rT$!Ѱќ|i3~H"ˎmSzM$,C`|u`u4{5 5qa{rk!w-l߼4URqs"P;2ϛ^q+~aV +u8vЇZI_2Uh^ahT+A >\3v oO|MFC&$҅Y!+ vH66~TŃ&|G仾Dyw5Wk>AHuh'N#-XV-C_\'Vd '1,$o ;a<ִ~UbF ЃFu{YxX`YQlCD.M^{Va~xMZQH NuƱ0nW;0N[%#=RwA7t+5p8 |I=t~"hp.4AV!s,&8[[韓8JݸT5|\ui|ϸrfr"hmeYn"Xy4)sʆP\ZBm`oIu(>;ԃ7aȁPQi5f6¥}p̮jVG OA1CqouS6 <[abr_Dz^/āzl?p!qazJ!nߟSб3W813iD6='iT36\5v04dC@\jꮬ UPVrRR~v-I<+)ῂXʠ.*MIߊlci|˲}! cP駳E~{1_4u8w΃#BaA0`C}q ̓=Yp-b:U`#*m{3ϭ{çG ve]]cZý׎E" ZGz~0 G`>̇FGݼdq¿`#ijJ"/^yɺu X 7Cz;.bDuAaPR^Gy}pչ-Ĵ]v|t> 0.!E;88$ \2' wzEetDAvQp_(< {wQ=ث>ԝG u#C@(R婬nbE&8:s:ZҔyn4i|WoT Q]ho4++;ݘ|îo.i%zw:4AkDJᡎb۩bc> 8HۑSa%ҙGSG7kE?ABpaHaDEئWW0n[,z9 ’r3^r tK{1qu+-6{M\8W:ik+k鷳'o! Z>h6-)Y yj<_OE`ZS{Ui|88ea.j,3G ֟ [5ӅMCH 󈃕[vUy)cwؼ#8na" UDY()j?N=`Aƣ"Fw1qu͋J* 2?ԅ?o͓_2!Mބ\X[KE%Y㺫 j|sJq@BZyݯnn"7cMB[Vo/}^r '1èA=tF }rtJcO,Uq fP5}g5wgS_" ϶&WGNUDsx!ajEi+U_5(k཈5[+f3'-ii܉4[<@\|~|\xn(o@ IH:$2MjH|i9f]7N)7-SF!1HǓgՇBG q]5]W;vXF0mT@f69%"з[ *$6C/mɻ&c^65E=_knLQQ0^lrx|G1]p#P>#"vmIa]jލYۖ&My-jI[#7Y.4Mm6U"H+ؿJ@`xիQGNAΉn)VowdF "O<*ȷFsE8$[J61'h^ 2wHtӃZ(]`Lxs ⶥ応5?9nX{:/xǗ[GMUb5X@IDAT(\BGlhꃺi|+} )1wPB@4K䐈 3;צkM1:T"n߮timK Q%澰  8j>;)y$ε!X&:b[l3˒TGϱok!ث\CWDð#fr\ 䡂b-ܧfNQu}a⟷#~E%RBYv s]j/t8BfNRΕIfΩ1&8~ʦc!Z (1D1Ʒ  N{݉ 8'V^az$BP M&􏍂8:+'FE=Bok'v@LT(LJv Ƶ+^扶򰥜'蕺c"Io8Vٗ \#/ },ȡ \ &#LuF\N'&_LVi|cx|F̺0cqf6<!DRQYPPXzv14*a =bOpLmjń{ھ)bC"vmp'$<"ʂRnU ⮇ ׫3Hwħ#lS&%@=i/P3pRC[h|YmJiƷR>np5&OxةƄ &>h3RYEA8LDCxD8;eé3'oÿlRRzHc8b9U ###T O•u(8&J4+Bŋ-P &b˱@mMp;؂N0صY4._ք"A9zp=n&mںA0)^<ťg`ב#a,3oBTpxDetC(d"?R^)Axť d =*#+[;ㅥSǎlB.:2Kl_ 5Pfb؜9WLJ/ 2bS gr`&Xjo|^)%s`n#X'#P&::1ɻ5bҔm{׸9HesmHHCy9 V8z~ *mPȞ1\˹8 ( mQ"<¨@ %ۅA&F0b Ey)TPj{ȿ~vrٶF 0+) l[c/ii"<!EM=SS67?;/Hkk|#Ͻ̠]l`9lE΂ќh恟pP=uǪs\.7)7#?12u2$жV oG.SH6|P"%Kyr,n5>;`' / vgNBjZ_Wz.o,d-'uڐ԰ /@{L2S̓&n :^ߊa; BIO:Ƕqb*{R 6\mZnq/_ȧZW[+;8x %v;w)0줢*͆$G$ [4p|@VBTN~ `'xi4zb&]zB)2ʟ]))dkN$hRuVm7}ki򵒒rv-9y۫:"*p2u+jv`&e}yeMߠȯL?:ye#@ ! DaX,` N*:p^F&~KW3$a $v 6aNi>LMaڈJ.rwf O'?`M5%unZ4 o}lt6l"o^>/olI-Kf,39We0o~=|G26պ}2\ΫJ _ Gdi΄/}B#jzD2)bAppAk?Gou»t{m;> zj.;U>rpߢUwSPPQ[1G-ťf84 .)!S푚huR}H\w ԒŮ(wUߤ]tXqؾ_]fgC#ԃzqf|Ίs"z8p2kΝ:y撉)7(#*zPq<$[]ƛJ0pRyyqg7nA\=1@h.&5S6jw8/+V IFKQqmXjjj# Ȩ3 f VB'965lU+!Mp\ zjii۬׊<%Xy :@ oOc ᅫsF&@ 8>"K@kGhu$f؜:RI*\2I;]Rm]{Ѐ[$jӾ U9_XGKRduR4(/^8xH1N4zbYبJ;e*8u/w9))z]K' MYn+H:NZԠ5ˁTo #N͆-&W,y[IXb0L#.uoM˔vo9,\),4`0aT(V)0ƹ͆c[)o!I0!'}r\/ĩ'jjF=uݒodXoZ:|q#|n#\A H3@PzJf>  {1]D|Hq]%g`WkZI.;O'N~vj-Dԗn+5wOHFF%\yGَˋ)USzzN@I@wH¯^ݻPVڇ M*3~~!w{ѵ5ɄOism^g8* :jzyͿqe9؏b"韸HHV3SƣIKv-"DGXO+r3/8L9{Mb<4)s"@累 Vnq-K^8랐3`A2c-Z@`4q4pG%w>^;(%@`¶7 #!+$:h8q"޷VmP%ieXo>"p&EN^RsG]dlEux܍KK\)H@(783;`$"@GNmK9H`J5 @OK[p^? H+ϵUמ'K[EO|V>0Djf|Z g牜>R=]Klْ[Ċ &ۣK:EWLD]IqaoBL𘹾6Ŗs#F\W`܀(C0b879ܖ;@qfVk;5 5/mK@`ɱ[icGE"j‘1gV-݌K2/]MQj#84'Oc f&^n}nM*L2 \ &%/t(SR\n^ bⷧWz2lGİ55[SR<Ym5g4&.X(r?]qXӱE`LG{k_F .-m^'݄/ij~rq72-}mMk[wswo"]uå1p/^u\嶑;:G\T->>+$Ὺ$mA#"s{Nw pCIPV&vvlME4q2SOd*\*z63g`\/i@̬Ye|N9 V[wsmi6$ׁQ+&i a i<]o1M!UI:v`G{yўS Ð9Wus$E !at Mk[5[F `~$$T!8σqnB TI0 );|[Ʈnʚ 0hL.ERsK]d%k mO7(wa BAtIBͣ&ϱ֒l #0`˲y=~-{:ϵ@ƣ= @' Qw׊^eD綌~ .VP#ى .*:&xڶ#Lݧ֬a{ `~!&n213*3tBd-p&E 2"mjS!(ǂ{p?ϼ떴Lc4cU"H?;i KMpYgVE{߬]:h&jԳ}Bd^e/y}Rg"mz[X*DIl9#@M鹆ȽIMMlvpPu#p&b#}U߬kKf"ֈ_Y ^7C%`\g6 ]J=|6t߸1?LO—xX*3,Vly,t3eYZK[IƷ&.6&@ TG@9!OZ[+m?|T]`R=ĦCp dK[fbb  5Ā5oct+?#LN6o?Q"8/#@% q٥V3 #@ )ZOq2$[zVr´54&NJK.w߬] ;LԂo~%$z^P,ЙH& _+֮ťf?FWǯfm@Ve&'x#hI1l:絢W:PƷ7αxǽɋpQ#2 ?'i$mus'/=$I v[m3R\!kn<^S5.\V>S|f ^bLe.Jer}.է=IqBm{/Qt [iuVgzmϘNG"3զrhofe\׹bEeb;X /Y>i|+#ur%(Sy.`܏ aݓ=h5?$JGzD0l4j[/C*ت7/4DSʱ{'g}9Wxdy>dZ۪5ԵǤjV\PTUt{b{쾣[?eh 㚴?X"/d Y_ AI_W^NN{lԨnr>Xp'iU[P1iŊJcMԶUD <1P1PiAQI9B~NeR~ai3mO.e:Plz-_b9vXvlY]絿I%X_/ޣ],3X  |=kjXDԟ|{v#T aL=G@B`zSJǹ?p^r7@$[|{k"N!)4@II%P#%3i_zyYZدks̓,G"׵un1-#QC^%GΓCp&@Ss )֗g=©'pv'Z4)$04̵?QG@l9#&tӣ2MݺiZo3L퓜˼$v٤Uq-~qr+$!ܹ>qG{syCVdx['⤈BV֡iG薚S4^h[ld2v5PRQ@Ӱ~^W\Z{s]YRRF$Vtqg=bf71K .=6u6<;lБӦ'}WNܥ}.^Pt?xcNo nƔ7>]qq"+@H8M4@ܶ䒵v{' QPFY[Fvbc2Sk1o!CoZBM&2f0LH8ie]0rpMҹbXv~fhT> ,l_Cb } H L% |K:ٶlCzD^'[oޛ=k/M0-GwAoն=((,=n28u7/(F z$s9$b\?& ឬD5D* rP=Un2荳oۖ xSř+|#•l7նёp$Wm|t3/K>8E GMK_vbR\?*AծW ',ON[?ٌ| O8I#HQ|89yM}*Է~$yluwfbŸzp|,]|TU>3H {HDE]uWwݵQtuUkCET&H{I#!@zΝ!d&y痗y};{wbw8QaAgMP|as|S.^q0i6vܯT('ï=L.Lj,I=^X?izRAM-Tf_/h[6=;olIyAրf@Sp{Gnv 4 Ʀ _L4B Ԩ@Xr! 8x2:'- u+-C/q*,?bO<QǏTxjٲrej@ ~.gBO0f(]I>﫟-+|]hhN w-ʸ^`MJ Hdf zok]nT ;!`Hd\8v\1}˱~( t9)#RReeO+tO.ZSޤVOVY@M"{zO\'*2Ja ,GLhlt^ap \:||:wЀ,v~z`kB1KE{S ^C 'g=*z={\z_<qT݆#NmޮQ2,n N,}|%otw TuRQd&^& *9-Futܴ;3]W d 4o0|,Kk@Sl&N|]o=N۪Qqt#yϰR͖p+UhE:yUS7;iݷWdLP)CH*4П}) _LjOM٭(-|l m :t9{GʼG]:pDCwlF, TObۜM[F܅KsqU-PJ\sOY7@Sp~XO*UJ+h:UVgִW4Z- AH\?~uTj\yP @x:$}tz]S I 67I b8ߏaAs{Uoq5aK\%^C탥!x_ܟj/54W߻sWtu֠$heff kߒYc[|G$|MNRG ,l9@mim159u;0++;L28A|Z޳-q:or/uи-.W(ݺEуP߮ܿp 0op4b^/&zCj͗'k)4`#Z_z(T):]u/R=vOӗqٴYzh{' _l$#W v Y~"p0Vr@K`inDЇH~:aߌ|z_ X}dT.&/jU?6~>t䡰?Q ?::<4xj&m=t^"l `)JJV7{xsR"U3]րnѶtӱWĴk:~6.YX ̑ޞ4{tt &NOӃFИݨ]]AS1w`@˶&'/%\\zun/7[%iql?9sŦS)<$Ci_m9h1n;CcEql{& m1Tbd|0~13V=)KSVW $Z9 DG>vX}'K϶6o πᓋRw5u+_n4Дܢ bs0Q5Ҫ鳠(f.@ےJSߋ 3,zܶ/R QIGޑҥ+\ϟÑZ9Sq8'.& n^v}0:~|NN?7TA\̈́XNJk9!MapM&a 3Xǭi n p{9sU IĖ{β:*[٥&!5 4~siIXogru쀶8Z}{1fE@i`ʕw8,$ ٓw"#)^Oi˿i)k@@j)F-y7V~qi]Z @ cAqYTR*\.8h,*IxHp .o.$JAU`Zf>z.Ӓ%$>ٜ$,ߜ-1Z^zXw6nApt%떏P^]JkK$5ñâ^Uxp9)ޗfagudP߫s;u좨2.'`@mo*_B)Yyu7Zr[I/4Sdkl-ޭK;1D%$ݳ26yxoߜ󚡞l5[ߣHF6VNIYTMAB\wSEFw D bbUUlXiaeWYIMXq٢̬L,O"C{G# 5-H{kE &.BJj"?|(EЎROX"’6uUa q*BxNVq෍G!`\n|.~p7T>uc nqk)|+Sr t7~Hϖ4v@eqD3tĥ^e 3*STȸ~ִ.00D q]E2IfG}L0.V]] p9oaM':;lZ b3uˁ[@H=~ *lgQa |<&|nbZ(9t a+Lrtn}tAH ^@YE%e.s;/g f *?uşg J"vfxAt)kVoRAYkuzPVm0l 翺ljhJY94/×R2_ L%&Vo,!o~ ?k"u{_oG@# O)s k l!|邵z{U##~HK:f`ka2?9DǝM$>^ń}քLoE9F*Hr^[Kί-Z>]{U,H-\f:ΔyeP^SDPbS yrkFe/o\;is4ux"N8Z#م=܋߫k _[>8r/F3nuhM*a l1[VZ?Ƃ/uugٵҲrt?g&XÙNV_aV~ݬ&8>:}2;WautY/$t=rԙ__Bg/%7B;a@fS?XJ}uj`(lٵum~\^Uۭ1ƊV\j`&-6g>tK/ε##c&ursz-Ut S_66Jm[KN头zw0r3 ./{{+cq1)hF< 9k6>yywWs#vYJk{]/(Q\搹%ghB%/WY0CUQ+ӻݴ)>uΜ5.&w k]Α1jry-"`u,!z9\50=~"W;d Q#wQ%bcu+`Y`-I( pGWI6, m[Pl^1g)ߧ1?ƃo(x9p LهxYhѮb*dJօFw.ou!ӫ~nrY5Z#4г2&ɠQ(pfrԑŠS2o , "OJk5o6]'9g'ԑ}u9؈>TvLּ B: ?99B;dzLN1F[U xmVhf<=<#Bk4:+CQPT±pEד˼Eq1Jak˘OzMӸ2ocbbV8Zwp0'e"Q^]B{1lxq6"k4_6v:,lwuui([]VBl_V?t}oQsR؅EUۊatI;B i#9_M..d=ڻWa"z0R]p`ku:@U{puLz3>53w`x9]w>J7t8gI5MvA۠G >erڸ9Qôb/46Lz Kұd cOS@:0VoL@7dLm{#h-s"_Q| ( * "qL _Yy))SIm$'̛5Y9uR-AC7_KrmItE?wgLud,p{].CSxpwڋƠC.H^ݮH82L~ڜY*a))) q!഑ N g $qn?/‘0Ab >w&’vMpc*rt5 #US}:݃9qbiym?u j.@2cm Mr:n,Ȝ&||c[C%ȯEޝgRDȐ@IDAT\ц惘> Tg%WEA$ovvg =eI7&]owznlnPG'5XHOҫҖA |bDUTw2Rhr9]ZV7g cDU#)gbL@1Vnr&6.T+{@|{[};.2a ${E#z}6aXlxu ɘ/柠!=Ũ99`u^}.m -5Pn.:N,~1g=O5( ~ֻP+[j *{I͖6GY']'.U\??L/0n7|&9(XJ0?>}R[삧竗Y^tQL{{4A/N|n xYAJ^#v $<`+RQQ(\Tr/"%hC F_en^UqLMДaKER8'%{KhZȕΨ% vްZ) c24rrTO9bg)%j s}ӝN'lC f[$r4ጺ`)Xcـ$+OO}kcGn.۲.3=^n>ζ>Wv0׀^ TH圭-Z_v= lえܼ?j٢ΎZ/ۡg<=iLit?" %fLg8TDt%W{ޞF^zmZPjIj_O6"fheN^ 6tn9{Ҫe7x:\ӻP\{6J %D/]eȋT<DMę.]9w,Qj˗oh#/ 4 𥩣҈тÞm{ݿ=gd_Q#}]_zNw|Awi[M+\M9j:ۥvǧd!p Տ9V[f>n:}.B 0UرOEߩd>%2loޢ+.XgKtkq*_؂͜olXw&[k\BQL04/3@E,C< y(RU oLNdYڋ|Gp8ĹӁS/h`WԖ#7|ץtš3q4_b:Fъu;yN s7mw֕r o~JUߝ,_FWj ypAgESÝ=Q >{J G= )d=^QwѮKM|W79?kVDv yi{11ӈ$##\qD&h XȒ'N5>BSAt:q)IX.O7"Fw2#X: XϦ31H,#BGQlrL8='`͠U' ~];h _TJa6JӮғ~uHه3rYqm$Hu;h B_~NqH.3a|6ЍҿJfMpo?F'.#!0W͜4*X;}A&R雭c`u; 9z>p.D_w?n 8K7\\&^Nqڮ_҄ M'fgܴW|Fw|9׀ OZ 'u3KR, Ay="7L̀pK>/\8ᗃgh鱻,0B`nc|Ћܠ\`nrYA CF)ធ.qېӀtz?T  ~ڻ( E}A[4vKa9B۳h;e  <0&;= )l9KУQ*]4ah/o{mt(Xd9:\n~7e2}3U A픐-QM7{ͣ,A6 <7&x#%,d TՀU'韟m`nC}\"i^$ b7nt*Y (=;,w5m)*qlY5y ͙)$V=:'?XstEʼ}Z_38MH͆;iT3S^a1u@( v `נثbIRb2E]x{׎mE[~So1Kpn0p{&Ї?v;]iҰ%gO6S2b% lpap°($O7z@WGMN4^ʪZʡ=+Ms >,2,) [_-#HOY,40oK;e#-! p{bΔa "^ݒzy 4OgaQaV ^9 =Co/,R wZK8$aipdK]EdX%!ن&b_ה<{I+z>zK2Cqpa@ -}?#Ie!>7=E%8W\))+(kױKlNR(wv8z[6f{3U,ͣT^uyjWs[k͜ );Cd7'dA/(D+1-/sg;I.@ր8~|&6ua~a355nv]w7E>"!23ӰJ7.i|7= |T<|dr9^g`=:zA_3kMNoN 3ʶT+WQaƫpfI?~~^NfRapI\0SUO{It}e\Ň~n/ M} s) 츴 pZ aZj}Z+4NN}1`&&iϞD N1Z8/?&4^I*+_Dրi@R}}: G'pPd抠Eθ7Ң_3欟[+ή"'=8{tt4rŤ.bIXCA AekȹT2Js_ucX0o{/ X#"GsջOt|U32C *,9`@K{6~fW5o[8iqζ1"YVg&%-ȅ]bm*Ksj@G?cm#ޢ][k|f#~ۅk^J8;o>#nCŪ޷zq=?GĢ?ܥq l5Z--{uDobyIXfm`+ F%ᬙU>[|rX֯B+nK]l=y]q&6;dL/_dᚺH[%AwRl=N=H[4U&kɺ^ i1Vo)>AyZE*ŷ OԷ~|Z/y‡EzJ'= 3Yx_li&,v9Qi/$NxG|,UmMma_rK|s\7kco.}|sG)_z k-ZaTT.AԱ#)L8Д۴$\lzIv^{4OؓgӋœ6S ǃ)]K )^kLu5[}J}SwStZc t7O@zoy0a_bCbܸIz#IՀ֢Ώr|eent{zD%Æ-k=6p`(iΗCE;ź'.3d溨S $5ɀ+h2іkQ4ɢ7je%,,kD3$8僓3m;tnH$C09֒Fq(e1N8t:#@Ze˨0QWhULkRxlOekթu'"tsx Jܩrt#B$zr̊0̕ }ԕr?oB&v;ܴQӷe #@/N`U-u9ɇ8V,YvzYMoocICcA/HGvacY_ɐB4Hu&adLpaH΂a9bتmlj 3ᬭQCˉ@χQa(12=KgՔcK dNؽ̵̱w/.䧊Y % LDs't#Tڋw׿BٻD]T\H`e |,c9>bFZkZ=ҷ[P9NZ#Wxmdo,~31Nbb?lβڥCP?k)iԻ_m=LNFr45'jAGsH_$bVثY*Z:8[?|~SgO$~]wa ?) %vkn'lwq)m ^ՐBZv5{5C78R?8m`Oeԇ`Jip;VV%Bj>GYs/U};QeO& vG]bc749n+7'nzj$`rcF7׮*Cv‹$byM|㩙4 YO p*ŌfrA=\mr*w}=g/8Oz7c ( ְs`sӪ7VG 9^~NpMy@\0ɜ,LB!:ƫ>9x:>=uOp0ĕ,yz(a}c`wtN\ )XTRF ?X'Zn? !)'BCŝIvٷ A{R׷GR %-&( 2v m5m$xy4jDྩ~_mD ḂgURpˬ4^GN㜐)Ze=[`n9MYfVtoד+cn> 﫚|I򤗇k<4UYg~q9#)+ݔ*g )& ?o'>trJ34]cZ~];F΃ 0E\}͖),e_(\ Y~$;I$Ĭ _sF$+DY]p1ޓ1t_>dڠg-#tvߛ,Mv#bA#_W.]k|Wt_d0ژۙ~O );jx !QuݿY]]]AMh%8>9yƬ_l\Fc>Uk::*^gj["v6B>40o1Z!Sت@DG|{ibafjt]pɢT?f@ 󳂙$4ǁ΍ݪzvj/=w 6MOs@w# +5-'ĩ\RZ.[ۿA;Ip@߆v8$ß~حre[@;+ߥ"^V-]4\"Ღ)jɆ-azhe2#O(:fO7ٰ b2a2s}gIlؽ/F7p&dK 넇 ZdJ4OC{ \Kדk0iW^~?c4uCQS)LUY1+ɬ{SYm40eS">C[՟6m+տaA|E n$Uق~bz/PؿD a=NnMnI8g,.˧*fkۛybbTyfJlHW5Ej%;bmGpP*Tʖu|u4Tmz4O3@pj'3E4-/p'C4?>e]'^lU1 pt٘<e_p0Stn:~\O=O3AͿг I\`vV-'b}W_OCk߬X',kO]G/6c2H3ξܼ=:ݫEK1`̓є,3 "zdH)C5f.pgk d xc$΂k %i\/_M{}0T<ϖs򀀅-[/ٓQ\ed:A=:ȕH~J[zGwGӹMbyY| h`O5Zٽ&} WnIwS>z_Ҋu;i*U"_K]xC|-^^" $s yꟄ6}HwOx|Q(/$1 ;j~ ='~);k>&{_R|BC`Z*ɧEq}+q!1-ymˁ3^Ϸ P::ȌzxEZx Scfl`}$W{bXz1A#7X8Pމm YmtMSMwZvigj-F@ vdh<X? 惲9:(7֧Z P9[rEVT_isaIdOBRsxZXL5pN$;Ә]ף瓈o_JL ƀi9 ^CB*^g}`և ݛot 5JkeNݖUG&^Xa䏇 $A2aWj_q3 l =U%, lM}p(S̎r)f-_q~~_7ZtP#W'fΫbzT1ɴ+E x_XAׇo~Ǖl }!fxqn4F4#{5[PnsEükkw{\R..axh+7}v[:ź n)իꧯ}\^>yFDhPxŢE9QUT.oǐpV6tjGF(e}Hؼ>`6PČ[lz֜}Mj1H3Q'1#|ofuU.&Xߋ8"~ᒅ|M40?\ܤ zsjIio0fj8I1‘pî VZͥc->WP H'~RŲ>κelҵ|L+|X)0[[}ݬ{<~gs cVg[JɯVٲ[5qwwAOzѩv76֪&|h@PLE GT;7]*5/6ԯ;oO)ϼ翾lAk;6)}֥+ib*E.Xѩ}2.L!@@#Ҳ]͸nHH# ag2NN90*Njf ؟cњCx`X`6ThʷWf+`uV*t:C "Ǥ̹PZpFv/ooYG f3@hn>qt<2"&Vt k#n($ٷZP:F@,\8s: +j=p]{jӳџh 8"*8+}.;|az3AUmE2;!H >Oa:JXƙgORj|k}9G`^uL=\KW^-r%;A\j͠X岈3V(#5qW3GZ=tw{x=ܕxWaŁZpM?c z: _ ʐ-+_[:w>ܙaT$چﱫ9t.>3rr 9CQr3^U癍||=5pܲ])ۆ¨~yiZuݨ?I}pNQLKqAs [̻?dd񟇛p׭gj*uRԲ'8n;t3bsܠMky4348u; -u2>X*m\uB;`곳^9NEAe~ E-R CV|B 4Fwg!;*<ğ,k[vn/7z(`@<4y63+j %5[ L*x xcf,MՇx&x)_ZKPR-7?4mH{|s3N85TWEc;m9J:P2 %הnm3T"e)p ?07Y+6_sMkY~3-8f;ҕTҲ=⍅[l| /ڟ jn~Jt=f^͘j7y0MLfP1Iee9 J`R"o1WJAyc~IiC?`9Jي-O("#w؜ra1K\ejM`5P՗ϰpC#>8pNYd Z2F[Hy:1'7G>Mt5ň!^YlLE޲6k/h8t L9bbggKcd 8djLpQ6gٽ2(VCYd h:O]IQ~|c5^-R׿?&AF+,8Ԣ-7Nրk@ b)1T=ӎ>PuS~.. 6I9kI;1ZMlK~P*ql?C-羿l0s22oÃ~IWd5 k95Ъ0;7eqZv~4c CV,k”Mn8@' uaDC;69KpIWD N5V(A[g/K\疛Zr]c5'Ùnؕ8S?qh**-G+3nOy&}émokd 4.^I`?韸kKvMqi$a7pq)|*9`*hhH]4Œy]*κp%]̈?=atؾ5iԻs;*.kV~f +l;|%Q) - zqb!KHE2pVDGLj ɩhTwfRf;mNŤP$I?:EB(~;HMod,#@3[=-;v!* B"NB~ d-˧6Ⱦy0z+|;ʪwed.t/f&K阫x3'g)jR \ʼnXV.]: RրipCsdկ>q)Y;p>}b){9x @66F~&6Ƨ3sS2|azj8Rb͙<Ɋ8k;V\Xn C/ вpߗ` uU@J` s-|`&A.(;O"[ Mߛ9$[#ohOUiw WcZ}SOY % ~S7ݐ3q/tު[G&L *X>!5>p/xv/,A1UmP|4AYTk)2ҝk9)}  !mMevަTuS: }@4_t%5Gjv%!T엀%X{v @opF6~p7q=0  Kv0eE񓓲QqY9kHK3ǵ.3',UWXB{R1Ta zg_`-lEOx}"!rf*Ć+7WmX\i@~[Vhʗnwz#wQ2!Łj*םK1It&&Y_R^G@-Rاu-V>NրfҀp7|pf7JFla}ړw'ք)YyPQ²ul 7VtoO )!-Gӱf t{JAQXIcw|)ڠOOxHRl`͸B3 bE/=:U\YC`ܾ]%ڵ yՌ\qL8ܗX8@s_8/^],/޵~M;mj+imt)ʥ/Ƣ^VYE_w@ܵGT;AwB*p6rڨVs֍5 nz̮)1s #̐j:d!։ |PJf>;Pg@~=)T뽂<~}E4Yr[[*qerG.$R53gmk–$*[E%`G/ V`p%t$\]#nOcQYx LflZoJ <:V'3 K\uZ֘Nuc 9ELڝ>xX̄2I9w< 5eikp R/<<\Qbx3~O_JLpݐQ^,(wr6jcrXv?uTh?-E6;czY۔]ҲjuIWF+@H ˔ EV;EYJqVT jEմޑ!ki4pPdX pLk{JaI9aq,,!p/0뚮*\Gu6-0E%{Kfo V(+tFmg=NAޮb:ZTjlh 6𜒚3IuOgµR?Fp3dkd}'cQE'[$9p*؂>w'%01 y{ӎi~OgDx`/^חm~ `oN-.)ޓ셀b}a+=`VbŸT@W}秥T*msgNp/Կ|I`Tex/Ƙk!l}pbJ*W%\jXgIURuwx]xHu]_\Wv$Jİ^Q4G$it|t3+F~^"Ϳ)Ip=0~7{;2T?viWmek`ռyUG[+4[Čkf\I==D1.< ^xtLʇoowS+tha|{8ڛ ךHו˗5 k50O*BS=4 5?t~Q.hJ@-vq)D#a=t&ƺhhlzr}KϱD@IDAT귑DK+ 1əR|wݲnz*,k;8cߵ|H,sro]_:ϠXw> 7[ܽ^kK^ 0z*08Ywƣca۶U\hESr?|-b1)i| ($h A3jix_,8e5 kk:rh&HP {~Ӭ'sVJBICv*)u>v|犥 6|{>%p_ 2֫mzNS~gz@g7\Jk4ZndcVCMYZd 8ؚȬ# A!ğrKL\ ͞<ġ-]44A5k)&ve~YϬokbK x/%abPjozTwc=QR*(-9U3=yoP`/5[Gu*_[M\ T`NѪP@?\a=ggsFvubw(eZL2\qSHD?{BWx_FN\f@_]mUn>Dbikv`0m!)=G_ZV!@FMH삑AR"'k@րkOF(CrE`?nHрm$Ud{7s* [>۸VP tA%eKhM.(%fCMc]P3.6Z3{#\R_Cz}iRjRgGﵗ,m Xǵp06YA:@yjB>e_?e/U{z9_aq]_pX +dP>ز+8VUO/zSrr?t&/Eaе W)" L8ӑ>+4z0XX8yV.(fr %e&ίY~Uo?K&o^R)k@@ $U7W~20t0oo![a0?rSQ"+~+UD"Qm'YRNj23S1k.߆QRH),W֯P%2Y{yaơ@Uz|gykr5Z ɕr_]Uvm;أn^TyKkasrJAHm3Q ) mRK~nyeޠPVzrc_2oN1N[*p4 sf*iG??\E˿Krv-._7+ӵ7gIF%A@JWŶ4Z=i6$54 =(-F >>Tp|jzoBJ+L4x|<^PMi}ThX0Hav()UWB֡yxu+Ȅ"սoٴޘ΁*:n|G%ht߁;3)˗gy)j/莪wEo1+8:=t`xS.TsO̻Y uN]琈*Ǔ%F2/y7k?% s<]=n"sBʂ 0kb;.Wk18]1/+I>[րYӀp5@-3j#H y F Y1`!EkJwvޯM'?irP\=..CkĭR>E4 }=wXgP,#5Y" k)~pkR~/+/àLKnw_Dv6OehiB6Na\>ЈQM\ƀnjj4pvc_ww?KwQT?3dSI HB "U4]`gϧ>?{y.*J bI!72?g6w -ew3l3ߜ=ܓ 9xwJ$5ŷew RR}؏sfl$k5'&@#;N= vjYw}"zX "y/gl,MI! g }^R }{aM\;% Ye_vBoe>K xh/KkTXr$D qRSuA W-_Դ4CpAz8Q6 ;4Z[7~£_dpq=X15550=,O{IS@։4·`텀.8%1FA ~5(kh^fl5T+JMs&й(lծ+< d75 .dѮ:ǐ $OztC8N򁎢n/.UeE#Ce2$}ҽN[ǡԜw]o{ —MⲪ׾>UuI:np.v>φuW~֝W aEmTyEO ħ ^:IDǘ`N@\ :L^ #~$V6R'.4(4^vֵYE=NXlA.t pI3~^LhvAKB \vN|%ĔUK;Yiph/ʜ p9 /_VȯEWz`&ӠB'؈Ejnߋ뚳]'yߛS> dMު*c D+pf\|pK͑Uh>+ދRg:%r8ԙci>Fd'6dB&p$z/x)^tk E|tC9%U`A)]vZ yAnΟ.oNIzsHKbVo/{ v3ͱDg3h)1ʫP|y/ wqor^q?י`$<Id+&qS|;j %_  QvAh²k48P9G;>ĢxPrqǓY4!0PtխӀ\swaM?8`|/ud?Q V6v,Fg~^( dy5<#%}0myߛV2&_iu6H]R W'/+:Ylo6/_`IiW\ڏ0|˄Y`-cyTh .&@#zJenAV^}M[oEo2Ӏǖ)WLD~*{ac0f'J1Z2žfC]{ : +Eҭ۵1gET/j@wr~r|ti뉨#zRKޕAW%ѿlm7h>͏IeoG׼ܫwQ怙?zkɦw ]J(I3K#^_m^2&:2࢓ A]xm퀏YO%lKz@`p;r #" .b%~w] ׺'VN^@kA69@H6Ĺ\}ʠ+fͧ /\l}n"GjoH^\MP?K3(cqL=ųۍF?b!q.yy$"¬j8,-MgKA,V2 n Z[&_ - (Ay l4%}s~|-N@-3Fe9esȏL55R(]8YYJnM^\xjbb~'/<>J[;$I!0cƻ`@sE9k*krYh,_Cqfs7= 7V?jᗽN;0=}~0Z)[[' x QkeOS|KP!K)K,iVx eZG,0[yLlk4^mP"y`u#>GL涷aOomohBeP\ZҒ2 7 QhI| 7J*>K 3 !KG[{yq6qү>8F˽ÇwUUu0!3縃4ydL t4lY)$Ĝ2 k\Z)>\?30EOwقP||_6 b"ד|q@椞2'F}[DmC+F YRR>pXtq=~;uVa}UB9n`N,ux'+J=xv@|z".<#5)jGWޒ cL^Vpap͕ѕe$}:75&:p]O8?M}ׯ?"y`X׳͚]Q LXbҵG,*)oF8izrRD|LȜ`]wR4T݊v-+?Y`EλVҝ,Çǡʋ2qz^cL o,uڿtE)B)/5*I=3y ή'^BθФ:`ܢ*paxaKK77C,W̗g3b^7+ƌYs81&6Uvv_K&8 f\Qjsuo'^oqzbt| l<0 }IuVu"|AZ[!nAW\o|o;m߶X9[|=[;.I60JWE^]uy?`L p]rESCo/ a=\! ]O\]+@ĨJ(pݣ`8i^3nʹkTEy=scn(8d|cW!Pc')y`&csƮ(7pT_z[AuF Wp\W]O/!MDW >N3(yi"5K 5xCR<|gqOj.=:ԡ:pMU{'MۚL 0B+ʑNS|h *4 4OPwWKwC(yv=JΨ dOe=zJl,_Ls,g\}A>ˏu~}ϣGw2)PFJ?`y6`L`=MW%nߑSbxbz޼;}(hUFqdAK&v4F{!s,kKKnJҥ8`z!g62& +ćڞYr}U(iInCJިY[; Vx*$V8CϞ݆&CbOgpG9ppx@gbg*%uF~mTYQ|os13*6%=#XfHqbL 0&hib|g \4l(-KF?P؞AueԭZe4I_ZZZ]x*,5`7 AO;Յ;cy}(]w-ZB@Э_*dV0|"7g@@[Y2Sޏ׏·uʚz=`N,N-w5nvtZ4wZ ^2*8%&}W#. E8Pvz*S!_{[vd\dUR>C+3ۂ!1V75ǼIC8OgeۼdL 0`tV>?߶fzA` |^(^NJ ͂hDh`k_`$v:`j;kWpI,F+_|7=#Gkk-;#.,McL 0X7䡛-vwY:unK Ǔ.q3tG'z %E>%WKuدso!q,4nuZ_90(c~N_5 vUnof!OzB)S{ `L=`^&Ys;!7 n%G*s| BC̚7W+ZJ sBtZZD(J'"ZbT|uVkh 3BNx~O9;NvgL 0p>k6}Ɵp7NZd :jUZ`MPkb,2&dٓ{̜ z׋o>i8G Wl3MK&h׮"C5Y0I4l%i 'l-sJDhe|986IQ7+g1bH{eY#yIR}_]>x:aٚ 1fX60MT nin|>`L M-G3i=d.w@Rd^o"q=uVp;[Nc9wZG7Z[#{>R1&@k xk zzr?iK+["ys kw@`FgE`9PUcg] D%c47/~ȆeеSL}!}RE $]Jђ<6  ?xXn g1˳`Ǟg X>ykW?j) Zc@`LZB?|2GuˤF $oJ\\Ӓ|{ P>I9>n;Nƶ$FlIu(WI+V6'& V\EE)!cV$Ymv +wֶ=0Y9<bcBx. (HMps0a6 嚍0*y̺v~' BQ=lP,qa3@"YqɘNp#ohc^0MgO=tlʟqkadup?,6xkN+5E_ƝyR5題 #$ț‰ 4Ms@8x։oFSRN+_P\[F0 y_@!˳fl򾟘(L 0BN:! '\#ƍ< '*EUÁ;"4]%K8T&;bPi@}-Gʪ49]ۿw$At0p[m =~ bM(M" tA-M߳3D/|!$Sζ姡5|Ȁ^ BHc!`85{'ECkʒU Eh~i̛r-茉Q:P:`q?Z4,)~FJ5+ᛱ-[G]`|-LNfW]{ae|V 8/`LsZff\  -єBHdd>VŁTgdtrlt~!qٮht=-k!GO1R<7t~ϺuێZꏓ t|C91P嬌WoTW?绅K2 0&OXvWg!iFͰYer?UGr;!m5T s!>[H2'w(d7> S+ly=xd"oK2$˾Џ~9+6/`Lf&|q49ŻOw`!g5#s FԭS( E覡lQNH<5YJ 7kםȏRpPKmnope1#L:oN٤ j͒I& &Ј@Ƥ)#s͆jgQA=KQyH AǏb-Lj m )kjxeL 0&D,JϧQdPh#"mSdwOU-Q<Y) %9뚋0LM('7PU%GC ^7N 7H.(ӔQCp\;QYSPDm}OI NL~ҳwF cI;P/6ϓAoWY83\:Z]qqt`&B%YL 0&dG&_'z'M-ҍkq&v(#0@wtc9Q:4!ykkj:4ee=?9u_K?l] -z\9]9HQt*׾&'ݎiKe_r \ۼ`^#kh\#R1:XIi&˨ap۴q:}\> o}I["sJԈ굯b=E] >tƲG~_/,ާr^t2L9SʊXzjw<'.2sۼdL 0`]~{'t?~ٜ@{$%1]L`\wp{ׅڪݪ.ī(nZBwY7Ԉ}-Y袐Cşh]~-ɎaL 0p}Z 0&w(wƤ*m6AW$?<ԡm)aq'>[/Ɲ6uL+7uVrJ]UJrjnc0`~N-~ށ\}&N kRԙY/(3BKy/vҶ ~ jDBnj7I[ZvP׉eY3>CqbL 0`S\`mA`%Qnr=nIʓ%Y.{jj~T&kNu@bl |A[>|b}Y\ߍy_l 0&|G]P|K(;qvBNWω E7ɜ5fUѷ7ķ$YP[w =ɡ|.eAnOo555ϰ_/h[pȻO\&@G#uWR AhP (/;(n 筓d4u ^98-Qy1=˪^"Rv/"t7Os7q?FFVVۉ+ ZN<*-@ڈj5pyխ c詺q>L 0HxGu6_5n8t]h3P I] 08!uLD`mH {rbp8jP KqFʒ ͽ5UߢH@qJ4ŷU6tpY|_ٛy 0&|M물n8NLN; q*: E-?56zwg4a3QɁ*W-Sֶ_y`-4&;/ӔÙ: p~0jhR{6TbI=cKy]?<.S~n1k p|cꃎ<>XŪ)C Y9`L !m_E>x ZNUν%Pt ">('*#hs/у`Pr~Vlgμ ߃yQ7¶ AEu |]ï'C_v:dL >ӵzNL3vM|zbD+Sg:l|nKdS1^߉|-͏cL 0`9 S%)i.4iM3Y[pV.=$pZ\r.鎂5N]wzt n-HSچSqJ% ~Yhǽ^. ێx5Ƃ7<}G {ZZOC%$MdX[6הl+]jT- RgWyߠZd@*Wgfy ΋ 0&ZFx˸y*"|O4_lS)zr})tЖӟ}`9y=" _P+<>aӎz~^ܻ2Jye ݹ}?-iBk>Uh Qc2|z<9ԗ5y7a||U.2at:>l{}.o)ÆWp|6/`Lm o[,=<8*-5;mEc4|.~A}DVkJ%%_*g>='| v)(Δ`eyFXYS݃ϛ3-2;,Ai>%xpbXZۊ5.4N:2qfڲpyO}vROE!?r_2ef/ ω 0&A\ l]5 /"ޒ#zk~2ѽ$_~d=qNq Y](|ie< p3t8پrQE|r͝E8.!&%Ӕ|9SQ7vfzKдwSk 0iM/(C| ~Gd`L@/za)1jT)7%>{HǍn;~1umW%'7/'=jwi082'Xڋ ̜!(A٤巋¼['DJ< +x{%=j,NZ\&oʢUw`,{\HK v ./TE-.^quy?HOg䬫5& vAKO`=JlN1Wk7jHtTUY3M 0j#I^VY [|k^B^4Hky+7("T`g F#~H;?ěYn-+ء0 t>0[?'ԩ_5٠F)r7#zc伩%IS+`Lh::6lƒ8$hF𐾝ᗝ`Y "a5T4 + ?wYM&骹4)L(>\a*L@ ?_,{8'FӦo赼APx>vl]8 $x}t}mHmYei~XV,Q>/`L@_|%/ : Y) @~һ{,;P!0;*N!O[$|޼stCg,ĕgɍN[]xICU'ФeşCZEQ7u>\[\UYxvI/ϬCo1gG}vKFYCb s l6NTOirDUïWNd`# 3$ĕ]P}Uإg ϬLdwX[mwa&+%UL׮ĽSPRb('K\O%`L 藀/8}jKu KT"`prp pL\BCBť-=1HcvBޞbMhP"ľ- Ý=zD쬘i8d LMyhgnr9|GL2s>P `L /8}Gч ,]֟/^'E%&^D%%aF/kP p;f [tBmr+qZc݄\P>zBlj+'' ׈zP8~\WM!sl\^ӵj]x?2Jodm ojdž+#i[~  reG\7bDj ߰1/ 8K&2_pjYӄּ'`{K֫7OŮ(D. qIQ[G865 дZ -u~Bgo5`D ^B|( Yi% o;N7]Iן^(Ľl l ^b*ATkwlHSnw }flQylקݹw$v^K4;hnOіጲҁ c`8`L _ pӗ;'gڜ#6 IHl GuĢYVpr=Zm8Jh'B|h,ĉ^THt43 [|3_ן7Qz&٤g& p5/X/~1x+Oqvׅ\;pe*`'U}Y: އkWS~Q|B^׫rL 0&5E_d/*PԀ-\_j0qg4GL$2Es;N)"tNl$ Ȋ\s١WNmg~ Iz6gI]qxt~ Hʒ߯ӪϲۭfJ3Rg)/,sKp|,>%`L _ pa/| NZP)獹?﯌8_/uؠxip^wO'QI-K.EBxeݥrA<(R|)7QB Rpm9?{'{|S:znwxaʞ>Ib9~"vG!HPyoI lRQ 30wl_4,wDbí|e<7/2`LK|%|%FN𣍊m1rcy(QDa@SDd2g46@>[6ۍ}V6(D{j~맹e}V1Wkl6fIv׶^luuue)^TVH$3I&8I8YN^Y@IDATP|㩮T9 %/))?(}QxHqbLKz*LN_oZaa5sL 0&UA1o4qnT 08)SN1v 4##YuFċJeo3OweGԿwcZRUyh_۷nߜM}DVnz&7 pN'(59 E2*ˊ/'S<>Յ\aA Y/y{Y_2S9X~A2]&`L 9![܌[k ]e o~O! YxB>f\vk(bM"_[V2]wezWPm]6Wrƒ/42G 7 mr7YI| 'a.qՙ2'<N oi~2zU0o|_PK>@-)l}^&'ݎ"ʑd&%`L 7_Z)EO&q@"8sT?R;uXomGtԡng73G p$UDгGYҦIQq6PWU)5EX #}&n\RB-D{c{kP@|r%SEaM߂/`@[p! }D -: .l,,8buFRj[Ū^/x'K"%m~n+96E 3`WB1Idž,+vޚ:+++=DcϦrû:cI-uӯ+/`L(VKI0p $IЗ&N[|pUink慊upnCݭbUOJ|9s%}蘲钨H) Vb9Kw ;=[0(߄پ{W%>Ğ~k|f}r91& -'+ no@c!!vvU=)J"\qlЧUe |iL+w* [\kg;[Y1_F7q/)mh4u}dQ:cI[1& XЬp$C_⃫/IZiAm&6Pm?F'BR|HmJ}V~p+>|s.-}{Z ˁ`,]`6ٵ}>ZONQZC81&@;$. .Ŷ8WK0)wKnzkT?lAfZ|TWΨpދ-7DF%/)^ha^HtuɮQEV^m o;B}vH:fWB yX7i㐕zpaL 0&;z[ x,& ctۏ&`,3VO~px ^ߵz)ѢV,DaSc/WgV@VI5C NL 0&N YS,&;^ӵnVH-x|)K'oP| _|WI|a*Fxi5_w47Q|ߓr[ܩ \cl~: ]^o!-}C\ `L`q[ځPTM}fEwhk:tGB.Iop=n'?;$(ŗm瞷말x⍕h22ML 0&POx= ^c~O`KZlR3fSq&EvoZ{2<|ǧN̥7pբ(vwJjk}^zxP Rl 0&7usj˭hLm7O|;śg}/"Cm^6kdcإV^-yÓ(-uZ @Ή 0&:Sz6R to<O]+mpexϯQj?Eǎn 2x=2"izmY4QngT_'Ku2] l. 0&c,u9\5&p<9ScZzT3RР$IO v M>Eu':"VYNx n܊ϰbtez!{0\"`L`;.qju̶۔c+^Q$gYH^\~gY;w$k3[$*JUC_&(m<|&@G&#>odM:(n,eUz=\ym!$q.rY6/_uV˝ɉשz(_gn.^2&:ùE kr'6#h)%e)UBI2\:T'5550ŷCXF9`jU͉ 0&:.:!;܃(]W7< 'y#P6,uTMbnⲢϑnـޗ[)0tyߌvJxH!MO]PcxcX!a?lXC.ƺ8# 0_&M:0&~O_pMW /-)z=*}j+ ᙡi0Dzgi7l5{ų֟ѣ#_UoQ*KIU`L 0`7i=*VuFiiؒpwk7ZQK\{_\p%Vnɘ6~ontT.2/D򸬬m W 0&tE+2D;:.E+(ɤt<ܔKAQݎ|Q51&Sop]Axl[,-HZ\RQO}.(c%~g\_<ԡ:>X:pW`L  L1Ϳ1R}]Qh[AU? ߵc<1ח5\]k}ꀀk/Xw'j0 &Yc[]>L 0&?,'\#Ȟ5VQnUk-j6ZX1,V&~IUk2ZmYz`ȑfkk1L|Cा9I0`L 0 p1hn&5EPSS/mH߂ɗ+5 +W} $Q%K+϶eÃt9`K)/% NT@dYJKD[x`LdNvgKbdNou}h} S|cQhF2Sw4n&٪b C7oc#jեx7t9q(2rۼdL 0&lo %>]PW.Fa5n]$չ5ֻ&! 9._K[u/& !2IoG[woQ]MNCUb$K%`L 0`TR|^#yBV13;Ѹ߸(D_BNY]WӞ7?ka⻛'ZY2^z_MاenrGpNgbQĩ@FL 0&LxQN3#LmګX;w0R / ]V<%}+*+^Pn+ryy3?"8 ?ŻDRz.^eL 0&d,Ogjhcf涩*w9S5K:,KdJW zg+w-RH]vKڋgC5OZ>p,EB' Z< ^'0&8ûѱM93cMآcj>p/cjήUBqGp鞋g.o_(1ֻ}J҃>Gd0^af`qkjL 0&',\P 5&b̸veZ/DA!eS%K / [\G L`<Npt 򭩳iҚ~CIMN6/9V~35.#k4+`%\]k.ߧ]kWnβI>r%tا?|mؗ\xU|6(`WRUEq /rcfVlUmpq8^{g,/l8,@!>#g]TdL 0vHx;Ԏ$vg׬KEmŚMu;U*P%TZёx {BvoD]eIP*K^ h8s6~$y`LX _&6Nc7eZ݌&Q8C[Fʅ8w/5eAFbgtS ^ϟ85S&6/`L x pOP<|B&zu:2ǰpnUoeY~-)yR <NQkrk+EZ%95?%qCQW&iu|X6/`L x pO|F`Ӵ^6Wn©cu7zH%xey%Y?[Xtx8nGwZpkwzL_kDivۈLN4i &@G"#u *; 6GOW_vK(_S{,Jy=~=K`|ÄrЫDd:&V~p{%gr߰a-v*m'g@S7!4ג 0&/uA gjLf4gOTPP zҽ;xJ ,>ZS_t͸=kt9YCݫNV{FbvXHPaaB_^^gL 0&I,=Ij-iT[W(7mƙS@C[aqa9yVk{ܱ/J(YniZR|;VP ;{?`CU) xBERnTۦ < IYZyxg ,OpX_P|q%_dc}/uF=ژWQw#tiR}8n&`#c(9gܬ*~?_vqh]jyϛgL 0?"܏:˟x|ER>͡Bh.AF J\83>_k}Yuo0w M.?ߏy 0&TD3;ҳFŽn&+"ڳR6wd 5yZc D-7,?xL;'7%JPx~v1.&`>!'w!IL xPϠ6vFۭ$lzo‚ln;c<Ū%GʧA=reG`VrKIPQwZлi iB^eL 0&~+xdpOV F,s뜘5 @B]`+z8jElIZ[h@ ʖb;ˊA"M}Zq?.lXu_5?tj_^EjٶLq_ؕ/&j 9LfM`IxzAlcX튽n5VثLm=*eVCRe QkL2xG(r(5/<$.Py.iiw%jG+j5,K'ڃFUލܫjTrm]=ZSㄡ bL1/F'fq#$+njM] ; @{vHn`L 9ZC׿ci魄\$[ H1}*v K}*!sawXv냁Bv;\Ah܌(&C%w J,JwhA T{T~֜q& '4T{7IQ tfroWY (=!j$K>F,y>g]M& aC3k3W|7Zc֫j;% eems+L 0&ڀ_X>Uw9gY = [V$?>RQHOXkts%7>zÂϵYNݧ`+@UTc)x5f0Ua@8iɪmvk0 3B7.'l/6mt6LP"Wl/Y: 0[%MߊQg*ri_E&uV~0wgn `L_ ]K(1l,H(ݡdTl'ƗdB>ͻ~yOi=**(WNӾxAGƾI40tc|fML 0&*eGn"'|In0v5F;oE=06Z@{wэņo'RW,k'aT3u&ov;ia%%7T(wc.*YcZVRMyG%i:\ۼ`L@'(m0\N!pSǣB1m kk$>0~q}%_;^spH\6N@fw..r]MOs/z+?Q4*=:Qpn藑aGXiQbITfh U,5,Hk G8"y3~oUvM|̙!2學̍86 /`L@s4ǀOz*V7}`k<%,$upgT* ;S y\a~z7ogJl|w|-O'_r UҭZ`Lhz`fT RXbc&x`I< Y8^TOdG Jē+OOޢ%' FxOߢL"&`>$FM&EN⋈x8a*7YBsԬD>$٬L{'1K2]Q!aӤ, 0&_Г'`Tz%ؐ%Fߝu w~oV35 "t7)k5kDdL 0&sz`IXB% -sn~Y:˲.mzhuc wzr흂kĻF;<($5z*/Y2&*= pj^mtϜ\p{}vOꁏ' X{2O楉oP_/D@O?z*`L 0V w%“r1CD9kRIi rKkLoZc$t{;`e<ϓ;a`ZsOX;2 A&9vcm N8.̂SdMγPe5* {B{.aWބ+tϸ6>/[Q0GS;i,Uu@mEd/ _"#?rz {OA}_?Mw= ^cL 0&=m˸(O-}#`+) ",w`ٝ }^~+(fmǝ˻ї PS,ʫ&;դ@sCĿd k`j WS7ؒ UX.^;$T(}e (&(ǝEak6k׈?C%(]A/Bco8ߣu  qwEP#~]`@غN?~:TGt6ލfuY~+ ua._'}YBI7fǝ!d( u2Y\MD6Dܡ`رq5 Hlޢ,-<˃oY9ӲGv@C8~Y}IZnXI/-o>ɢ {Kz&|瑩Kh'b`Lxz!?cڼ FLӅi qwŀvY'c ൠ l;,;;O<@Kۗ4Dm,}b?id:i/ oM , Ngٝnwu_aiqOGy7j8Ѵ)0on ~@pfbW$;Ԝy!v݋]\TvR%]"E WWcYo_\*N]ɈT$)6uB|/[]`L,^\ݿK8 y>d;-WDKoٕshm;CF5O_Q<ɗŎ&[QT>WB *B/ʳÅ2S0/N۪X ?b66qRxغqR!AO0{ ;-ψ,w-q1o`L 0zFWN(0^!M$kB~Ε k٤ seS\H{UqR)E5/: vD,wV 6m#DlXc m:NZ6n AH { 7RUTJu ulip2i'ߗ!jӶ$Ugǿs\f_:bn-c`T!iri rcfL 0&P T^asЩI i]@H,CcѾWl>$bkoIp*N Cٸ0r]d;jDByKd;hD`o]'EXȿ4 ,U9wK[U{@s,mthY:U%Y Ѐv&)TwIi,+8\Q[2\+!M8`7ŝK<⼻Į)oML"]Zu}JW:u / ?ejw7`L S n)8B|9:V$!8 `oʟÑȧ_|S t⛎+[Q]⛒J2xpD0@}|onov[Y|J+q=`L u'`<=Gg ϵS.)+ƺ;6`L fae(3ϣ9XaA,Ĺd `)ʮ hCxU2MEyur\6[:iN΁y[ZRԕC`@( p2ƞ|Wo>vWj<\5#/*,T8~D@ XLKPk|3N;#rv|xF?9t`L {$}Of+JzcQ}đx gT"H)Lġ#ƙU1?O= CIﭤFtK[yhҝ*љ`L u(ծ[<'t|CqjT,+]' {Rl]# Ak%70<*PJL+蚺bZT aL 0&BM["l/֙wz&B}R-hx=YkS#Cs\H5Ŀ>&Փ]?4a oɉ#=O\:L pm m_↮i+j9`L 0:@ - hm۵m1FЭ"#3$~?cyՏM;˺SImoC skp{ᅤp^9eeݞ=((8S&hBIZ"l>1l꘨C 7gf-<kh} Sݮ#f.u`7`?=O#-bXCJ!j9kj\aL 0&P -s$"mҁba * n/mɢsiKړfc:XZXu-^~J,ۿC܈iiyBAkwu㍎a$^pC:蜚>k&`L@Pd %@"snOnznr*J%q"^mU߬k"<,vI)S nVl D# q"^čzJ327~9ܮ!9>5u}L 0&@1P vE P໏os'r }6GS3 | Y7]͂/?o$%ė,Ρ$>v65/?!l¹8E|,[7|9ݣ =O.q\w7`L 0&Հph1{ك;}АOqǜmdǜ-"\'n47 5HPKS2O,e-x>%ΡQf~4 /?ߵssνaqwl+fJvnavqiA:\w>gJ[ ZɎi$nt|k>`L 0# -$HAL>5e_vޭm:Ҥ|͕ +x4Ci(XnYj.), ZaNɚIPG;JF/V8=z'[ S2KdQk S4c(MS,XCGf?zPMcbQZu`L O 8'6Yli  BŬo=:w:I1M##c( *:6ߛ}p 7[lWt*Y&ĕCUBnKOZ n=vԽG&Q1Ngdl}[lݴMkVn{z\La5ŷLr[ԕKڊWL 0&@BQUIķmݴEbc ,XS\;6L ^d&MoKIUB1i~z^6ұv/~"RJnSe3?x`L TL 8dE@X鳐c;M^xP7#6,d&!oT 6ǤݷqFd<^ۆk&` Nb%> LXuXn*X7˴ͅ7VP$I۾čPv;S_,݋[R+g\\"Ow8&n#n;Q}K\i20jgWGbr.o5`L 0&Py,*Hh5\Oŷ+N»>o\#1FHtӚ:oT}(ذfvO.iTzS%uL-?Ki"u:1&`U#ܾ$8hmϘh[Ix⻡1[t.n#[nKkw5x3#4/~;L 0&@NgL[e= 廡Xm.Ćbc/1S{#洶S*go}S{c'{t^}L 0&@ %n_-0HlPuG,`sھ>nbmޯOAL>-M~`L 09Gv&k&`'`9G&B@~dyiMҺY3d+cL 0&P4"_,dߞW1;2˽]RVk5`L 0&<,ǖsf!E }WO+F.)kcfL 0&K]P˗sg!A =H˔:$1;11M㺦FJ0&@!| @i6J?WӴrG7`L "bI c`L3-[ʕbtz 0&8X \&"gT/ȝb桱iL)'wi Y3&`G!(4K67?zcmJiC$J0`L 0cD12@;hPLVaދ4狉uj ey 0&8vX;5*yD84.F&T15ʰ?ؿחje7,ˑJf 9&NC-GvA۰zoW 1kZ71^3&`AxhCjvû~GtSo ѭ<3S t1RE0ReGp k&`L t&/UMtkyumL:*LXOZ/.HoN޾c'D1}|[xGR=`L 0&P &m@BX۞X e[\\W0EwbH(BJhOvMM8o3&` j_:!U_jc=O⛖Yŝf*1ү/UbZ`L Ԙ # h( hY)d9EgA1Y2{"i(^~W]j? o3&` kڸ,GvuJ#營/bkZF@7M1s.̻2;03z|w:ԋ\WJsz&`L(Z.]I]&o\ w)d ]@iOyNz$m[ Ub8y-g)#s.=-#b׳Ь0OΕQy|״wuZwZ2&h:\{oSD'<"~J՛տ7s A|Ӑ 2~bpkB}ifE7·N8ҷ^߈iV rQQΓ 0&XV;GRiDD-٢.^&c! C\x.d"ES%M[gգK߰8LyqFݤ"X7ۥO0&`L|,gSϐ2"-/7u$Ku9i7ķ4n+ᅤּPŮ$ķ_K8+IC+ oo(K{WXXIT8T I&t^Rx 0&`$B2#:uQ8GA e2ïLh]:w?d}>}J99ϣ/澿 FBOǮwFKXT.:Qnj-aa?u:΁ 0&`$f tt:IȘhmk<({Z86" >|P}i+"LD\wZ\RgiZyi 1+y)|Bh]SWv2ߕ`L 0*` xՇ4{%F[^S*bF[|s^F'LJϜD##DO::T?]N;oQ./)»,%E԰;,^PE״}4.Lܖ :NN`L T@ðU6a (Pmb<e쁷.[X*PF!BIt.aؿ#|+'&)wFLCh1udZXa[T a"-Uǒ8 T'_j 0& B%PJxr7e1̓e]փ1&`L T x ׋ 0&`L^`ZJc)Cy9Ro8&`L T qX>mz=Q(< rD`G L 0&@0 &jx2eU8?h`L 0&p,?+.Sھu ̲2ecJ s,R 0&`$<4k% b{޾#s!hy֦,MGV&[3L 0&@ :`ƾݻ?hZno"X5|qK`p^y7|L 0&@h``ׂ@\8Oh=^nʚ?SdV/YlN90&`LV Uf[dI}eڤ}1UBte$n/EE% ql3&&`L ùE! E/?Y=}û=N~tŖa$Nċm^ꛕo [/qf 8 p`L 0&j@8L9Xk4b!>zbR{ƹ&W33צ+O,g6% 8 p@vP2F(] 0&@%Pcbd&5P~lr aBb1K|0{ك?mАNܸ˾d׽"KQvqiA:\}ec.'4I-ҥT3-'cL 0&` XV!U7:zK20n pN1KXڢOM|ŗ]w>8oX (GEpCn Sь4ISeͪK0?4h!9%kڦ/ xszލ<5,[0&hj,{ԓ|r|A ~L[ӁECGBz'EF4 u:]$Tн34kWN~n7lܼnv 6 lt*YUxWzޕML 0&"XM'nBiCk&|Xȿ>K,`iQz=O] dŦŅKMb,$b!n[)BuðoPf&%tn 0&@C%Pc NWcu!oVb[;^ھ$m˷-5TG)_TM9v `L 0*~'EswJ%~Pz$[\h&!IķAkjx}es8~kǒ5qNCBϵ6roP`L*҆%$=L5 W;^); YI`50-<`^B_K nZBm\ u3_n^ 0&@C'PN0Ex# -Fme q -m!nf7 T7X>B=&e]] 0&@1 p(a5Ôu+wXע96%)W~TL<;nUDiXmפzZROUɧ:q'}ӓN(/Wj0^ye@2 uTb^:tĵW4C~Lm>q"Mj8v9;#@MPzR!{wʹq&8Ҕy^'3dY=5c{[2ʸ:6ڸ\kRJ;_|[ZJ,[O5V=y5D̦*Tr窞YR%,*bw' 8.jŵ!^^Ӻ w%m*|u*1& >`J-1wUx4C פ)>f_C4toxXJ̻qGy sHQՊ:"~BrT+q=K2,+%LO#OU1=q;3㟛2/7n\ʤn|pZ%M4 p%|N{"]7R*mkuH9OR2@!P.(q}/EsY!1gqA_OߕZ m]HÞ{I@^e(1VZ,Lqǒ"pzR^ݟRnN%Rԟ}A^馛*ðCq&<~T9yft*G؅'}% ,Jv7=>aB́$X9a.+Д1cSC ;9\8pS;M/H/ y8M'/Qͽ: FJ':?:Xv.s>4yty{|sڳc[gͥ*Sw`/4)*ˊWO%oC6A9(ta4:-Ǯc#Ɲ曚CXZ~;U5u |'rF^E.  nFF~ 1[/t0 ~iw^з0ZSWyjRM39969$/w FI_=)4\z+kFRfOf7QRМݼ^WOeׇ0ׅiJ;YVoxhX<7 jP}Ekד?=ϰXP M^dζ# 9Dcˁ3aɏNK ĽC %fW\3_skⳇ7)YVrRY.zb-XyM}>Ulx{挵齽&yqZ&Zgٿ5߇AY~&~o5y2oŹ%eۍD!v@86)Tj!YaULc g$M ܟq\ctyחX=?'A>D$Hr4rFu8NUiz"_tt*4rm 2++UEa3ˁb|7kBuBj p+ba`iRL0>3i,Y)q|pOOs Xt X>\jX¸W h$ck:tU"US77{TzLnjp'ƺ0{Cyǃ3܏󥕪{xm^}+:ati}Ǯ. AE^s4`y/?0ous,zE &h qrjo;=8Ewgcztg0Ɠ%< BKa{]hp{_jbı>kx!lx/^kPA9l?sXktJnG@[^^}u+BKgǦ|FGqmjJxƀb\V ^\>1YVÏUm%]81 &tvܮ0>P:otE),$tuKV6}Kn5zԟUw!xARh$j8#կ@/8?soX L]uC:ʉMӺaXKxJķui;6,ć/5浇C \P~IJP_-iCGȽʔ1~q*Y*ԧTY`=1{SҡZl{/\=GL^KJQi8LK Q %>Bs9Mg ~k=d_w" CWV{O7!lM1Q/_{^zp_<p]L/p'>4nMg&%}*3B/(uB5xƬC*hy㙬zXH!8nR.&~8[Z}LRSpftWX)|_Mޅ?L+?}1)! %5x}'.8T VY(fw_ X\ʑiRJ]}:XU>{a~\ٗs05`Yĵ;tl`G'@T?s#i/a,nl7oVa`C/`>=[Zi.\~B[,:=xDmcw)w7OCqUY/4Y=PHN U]cVyevh;x^wWcUgj/E=7,xzrC./}ܿa ɷH}!ߍ{7(T$t>X cB^\O9.4V }*i)>M +~z| 5ȼBn) j5kl_~5.ii$fh"Ld9φM h?X`YcsVM7!|WJs+`}B:k8BC+j=ۅ$6MTP6@t=MÉ 3m`(!5rW*N{k<:9=O*ҽn|$ڷʎv+c0V |eSwWVJy{wY Ɔ;z!N\,ӟ >Ig|sk $<4Ѿbce?r+BL/ߑR3Nz-$~0".25W/bkcEx|';|q+ؠ0g#SN!dT=y1g Y9+Y)S>!W ~Sm0QVU9&k(wW{?JBPN34G>dA SOڍ?QpXw 9 r*i"r?cnMEx!{C*X)5钪mf★p9zcM୔^ /heWpW5xM]fx~_noov=-w{2#Ќ*=0Z_8}ƛy^1W/3" |/ԮHgyEt:ޛBĶY~zN vʮUVMt+>3}p9{-*bߚ***ꑃӏէsYq+YNW1h2v"\"m0~|WnjDolӜl>74~ULo<2u;mO!X 3<ɓh!N_Ci*[ܶT YB*YGmK&n,V7}O0ˠƥFMA$&ā(z(UўW4_Sռ:umizL\w^`ڥvG{-kݜ\9Bz^) gآF:˜ӍFº7!N 8:*Liȯ}bBٕ 4l׻ (NE0iF a8 FoiohQ3F*1BNt%#K5dFvn>.zU=yRa辪0a5NyB5.ܡ[jvT>BҤa.GKk ӓNAFªRh1W]eG wK]APG|3eZb{Uу2QI9m0vu`'8pDZ_h$NgX|jܺU+>pr-R".5"`ھ IM7 8[Rk\/A.gDŽ `s&YNQ ɿwEAz1XRݑu|LIc'KF9]_# G% Q=*# …8e.02##A|_ [cڏ(uS' Ԋ/#k]Yׇhs.ToaJ4vkJ" f5g*l8Eu͘Nj[K`$ʻKJ8zI6`VKZ+#aJKL2:&N>S~* VjUّȍ.(_P%5QHN R"FBjmvnU)|ޥQ9!,cudg+ȷxzmA~E 2F_Eqw ]A͟3g!B`΅=&SV%x E:arBC\Nt/4~HWdR;1ֽ:K`NQ+X\aA> (YɈW>3}qCMMzbCMP^ֵĸ9C'aVwO^wI>\1Z |-W %1 __8֢&Y*9QX`(<f4Dy5/+Tg(TD9Oc8BZsY;ϸM258ɅVNޜ+Mk`o7B=)؆5B6w4#:ҬY6on-rɊ&9:չboد+6F_tkM \PrQ䑊J5@ pkۗ.@iyFe:3MFr᫚7p)uSs yp^ /yL CDhiΗcLOT#!qv~/R|Ӹ _Fy4gy:Mwqɓɒrqޯ?|=C"8Ud9[΍ .6zߨ2;(֠&nO`[9)`H03=U'XPj1J^&Tu9ǮӢ7MSNvX)1MًY*w/FPf^26єru܏evaI3~dIk S=jJ9ޮRh 52XѢ2pή^F/X1jK2~.iLa|ڴg6}'MiO_۰AF5͋3&PԪuIY!/qk -UdٱnEvP@sγ`^׾!boU(ߐ"I0fG|Z d1}MZlgg9"uR,5b /;ě@׻fPWĽ4VG|Ox/^aVL}3~PgxN^a8JXy.#&@V!/ۦ'=t-O}xWܮI>@R O2;bw| hC'P/7c$6Bgߧhzhyݰ{綞S֝qމ{Fe4aLkeesxL /nuMM5u)Xb/w-e6FfN쒺oʛ>l.o&[!:ڔf; _*R@VpK9 ?\s_cL9O^I 4?p}n3'ri@ٸ+tua D*3I$W8&pSW-;^|G " &1#R(vA 61lPik<-tҳ'{C*߼jzt!>;,>7>s>nٴg*i]w46K7=0ꂾ^kZ\_.&0]y %c_-ey1 w^.#-fzKc [J8רp=~\'f\i#*W_qlgΧ<8Wˋ޳{D_g+3±4}HW#8wg)/t[rE׌C&Q snװҕb {װ odBT/^q=UrqTKOxdF9Ӓ~Z?'S{%gAd`埖?'}Ţ%hy'ɿyk7][ʏH䕡O%x P[hJiXc&o(\LvyMe.lW0:q|9Ayx_l'kYytOֽC%,SG.9:\eت~&x, _=`5&2WR V F.)E찠JŹ`M'u@<Q{ml{&%pFr㦺:lV|LCKL @Fײ0߈@Awbvژ;[_4s;=Y,7i~t'9+H7iM_R5-i]^>#f&xMJh!*1&aPL@O!o##.!ABVKi{zdk2,޴La-:!>t|VڹƖb2tn0wϕ75Ss%%l%66#hb\/;MW1©=(k95U`䢡d=#U_t\N|%'v ܖ!IDATPg{`&ucRM~y^3&48zgU-[ƾAy&MTi@ 0Yd) npx@#zCԓvB)Jݩi!cy6S`0XO+\}k/Gÿ{k$E󲷵hGkm339grfyi r@QrmнQuOK_/EyZ329k&`L HhTfA1t0JídW0au=x|<Vh4;!_ D>G+3&4&1&`0lE,mz`#\c`p/Dtfx{l񰈕G@S8`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L  Sͺ_uQV)&2~9zs ODνSGk Vi]!fk֌' V9 )5-urh{tL͞ā 0&@(p`gE a-VBD V7W̝ 7K!r9{ET?APklڳ_T'դ  r6J 7\(,t/_d{Ouz㙒Lݞ7C*;hyy&`L peU6J}%ǻB=zy^ QʈVO%]g;𨪳sNaAVm[?TĭUZ E(!LB& '~ժ(b]PbkTdQD@&a&$!d">Ows~3ü=yObi\)gK [H;}X hQm J{+gw[)o[e}rHHAߋS]} V&-yzyi86vf2(~) o찟zZ PrTd~Jmz Dx䣝X$AsJJ&\q& WxwԱ`Q >vyG΀?{ sݩSud+ﶒeL8{-_dpi\?yG -vņO(OsBjideai(~&V W2[~5v(+ Nx_$iF_|Vٌ@~1^O9 Bϵ hX ^\ZUu졻4V 4L- ʷ7$(}fyV:i(V 7 TSRrYnR('ft:P4+~y >ꆭnx}yL3Y+oHi^)Sh|tB]"}{^ =p(~xsGqLS0BOxeʷRw&I'CsXxnU#6XcG+s1FU5+(=xo<.1ء {#SCse"mR\>IaߓY|K1T=2-NR|{˞Z*/w   'f}3ŭ NrY:xw;뒧c(Qr|] lQ֬]Q{Dp+|+gMn+c,}83Lbk:?#'ڢ K:RJҮѣ41e.1Vv=!+{ jne6)(u_Pvs}2Vy.^/ibFI0k@~FmX:zt!,e.fZ x,,eliJ.Y$J1VH?˻!6s}i0'o$}xk om - rPO> Xѵ.|ŌhX䩪֑/>tVDǦ9Wf2J& Dh6Jr[w[QtMJ|{$e-Mn׮<<uX!~]4?u86֚qw;{%k (Bu (™fJzKa!Wpkg嚵/Y{e-Y^ yNՖHiscm =wk.G>z3~|…y7᳧'XMzar4hQ¬+6` #  ѴP:%5F8=/(4NUrR<}vp8ZIx=\Zd({axmK\#- C s]w(Ibyb3]>SY$/tn5C*4Es̲4@w*v+k2'IsW{]}s.ӬwW_opk=Ìl^Y[ ԍ}mYQKSrB:rEG0m'4>A)HHx Qba(D5u ġ<׺L-+s&C;<j|/qSWfwa X2 ۔;gCZ$VǺnZnHY=R7w'A]{0h@v(03kx$ lcUyl*]CU 4gH@ʚ TcsuXqUtLET'>L4Fն{۰^Vo#4p/~R†T%=nބ Gw88?p34kbUЫu{N,u& 0fQjC-?5` )Yb|ϗO;;64g73aޕ|tKBQuԟ`=+)e2 1fVjɞz3r;ve u>N-J?E.ωP[Ic۞Wc N\3;>p-[qN>/_[?Dyɫ.<w/ݍ|TX)k!wDZ BkeI(Ri,?;?^S99_V D^ym~^YUDu硁&Su|X"1*%z]B8` :;8dQHH QŨ<+Q69r~ޛp8&xIi,zߤ:% W`/2٘ $v&Z>Yl g)CmvyyXMۺm:-33>xڏwtaQ7blPW)-D%9 OQӂM^Zcw]UzicNp{$tU ;t夻#6 1!ق`@OX>lOJjetk{zW$[swm RłܬhW~Xn{ ҼHH@`,-=oo?;㭘Vjejkn@ǨQމ~〝_ϱ6WF[wPʷΰH*),qszJWGZT:gyhg+*o,o {B-~胠"2O\){,YOӇt*U:ʽuq N^5|2|~3j(Q0rfDr~.סc\J6 Vnѱ|46|1w“(BCcr0D 󜰒/Wߵ0w$> @h& /~,@DžYQ#UG&wx8P6iZ@co\)чW Gw@bdu5:J[m׹zU")Qj98ՎaB:.7N3K&Q; w(J: e~Dyk]gejSEj>#eP[w@QkBv-x]u!JKrFs2t/Bd{@ȂN{ܕ`a&kHHGYpL~ө|]&O^5(l>6:zzOꕈ Uі2GiEH$@$Д{k=`Yy'-剕O$p0}~ޡhsV5b)^rO(gAxr`Y<6pPTÆ*\jMZ|,j U 6q{8è?$V}u]D` |ٯ:'bnv 1yFL>| mIg#K+ > ?Ay鼓 $@ΉkJ!=8["m洲VuX&ľ&mH#uKuDBy?qPF^Wa+]V)ա+MmbLu(Eȭ)B]h*ꬨ;o^eˋ}Յ#[aTEW.Q4)cwWf 9aUjtqG+?@DR')8`Cο×bHuVJr3&_7*xL' 8xb4DEa|]Ku~ַa 2 26 ˴?iZBdW  C[GDg?{mL~ i1Ky^^* M11YUFT9}RCYdknk‘612}bfdxwa]E;#0$KC֖~wK GjRr6_ށe`QxS\NCtUpEMdǺ0qBrj62HH4~iewpݭwi5*Ab&2-א/@g:̚W{UȰquIcڡN ;~/5y?NZ^7d_AI )gEhf(|=|_Z +p;`^K޾?>ö+G}Yè"s<wZ*gbwKl@\?#^GWJXDU}h0s'@ gr_ʻIf†Na^a##g}#߲/ב@e-C ѵ.пK:jW)1˺-E8KLèbkL^Cw /,0)YYC veo+ $@+/ˏ_aq'cII-H*g6|)_Hfcӗ 9SKoMxj@a"S[:8#Ofu >&s_dzB sJwT=2G}B~=`F[-}a]   t]semFoȄQ )ߺw>Cl1ڽE_l)OtxZ9\"nHް@ӛ |ކ(pFʫ+b% K #;R&{|N|7 + @]=;ǻc|OG<[iab]Һeۯ3JmVښ3srI5ؚxS_[*7oGϺ/RS$H֔d~N[`6P|F_ ø1']]u372b<  h*ߋu~PrpGP}ʰW'@IaHߢʼnw u}ٍ{9i^+DYeL}xE։ܐFu^G2qԛg迒;1:&C7oٳ{CPu$IHH{!+G=pLGX¿]^Cߨ^la"[.ގŖ }2$#'x+%ŸdfFFH3Shs\Bp0ǵ8xr/eݾDVhxHH!7'!RE"T>'O}@m)I}(FríoogX"'o|QQ;Cu3z[x9_13玚l+sy1I$@$@$@$ph^}oIf;E7|&g#Co@ٶ>y&82M?Ǻ"'5e3i?v5X7pc5<    8 hs0G>U*-qvuK^T%                                                                                                                                             8,?!aViIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-selfservice-overview.svg0000644000175000017500000013665100000000000030756 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:08:15 +0000Canvas 1Layer 1 Network Node Compute NodesOpen vSwitch - Self-service NetworksOverviewInternetProvider network Controller NodeSQLDatabaseMessageBusNetworkingManagementML2 Plug-inAPIManagement network10.0.0.0/24Interface 1Open vSwitch AgentInterface 1Provider networkAggregateInstanceInterface 2FirewallProviderBridgeOpen vSwitch AgentLayer-3 AgentOverlay network10.0.1.0/24Self-service networkInterface 1Interface 2IntegrationBridgeProviderBridgeRouterNamespaceInterface 3TunnelBridgeIntegrationBridgeTunnelBridgeInterface 3 Physical Network InfrastructureDHCP AgentMetadata AgentDHCP NamespaceMetadataProcess ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/lbaasv2-diagram.png0000644000175000017500000005344100000000000025415 0ustar00coreycorey00000000000000PNG  IHDRM*jzTXtmxGraphModelYo0 kx &!13+ asZ~6 ][ !8ibkӁ{=)ecs8FEPe`*sZҕPKk( g2ILdL)./{q \0x_K:ʤcg#:^f3l/Try}I?^ m|sc$aqNIT`ܰ[`6ґ e\z"=҆K_8C3V”9_VZ(?Aij+ٺ+5C2J3r4F9e` 2$Z nq 0n0@_XU+"Mz{e U},Y2,ZGB邥!q9} >ڨl3:O06 ;d7| "''q +\r1 [zBv@Xg\ɍD6;VŬX",_@XԬa=pN{CYXCD&(eIӳI+8,|XhS=ȵɭ0ǟk$JY[MU+d te=x6ymqH?P۵ 㨙Hw:Z>i8-? R&8Ժ~G~QWF:]ϟZ C~g!%VΤp DVC& ,ް}AQ{]xz!sDmByݠ]ϳT+잳\X2Zvw65])rhmg4ǰP%ԨuA#lۯ{aJ0E^7oU! IDATx^E QP 8FĬQ0AG *AQP (#9 x*  $Q?z/շOW^5sT?    L`&ug$P)?ONs3="*$N@ W[fFq %Hّ֗@4qq&@,P_l} !@3!`4 bH & DU/̎C0`qiev$`Lp܍P *˙Ale '@3$H4&E}ev$`j܇ ,2;If0&8FhQeble '@3$H421̎ "@3X>Lxh4ψbH 8Ը Y bH & DU/̎C0`qiev$`Lp܍P *˙Ale '@3$H4&E}ev$`j܇ ,2;If0&8FhQeble '@3$H421̎ "@3X>Lxh4ψbH 8Ը Y bH & DU/̎C0`qiev$`Lp܍P *˙Ale '@3$H4&E}ev$`j܇ 2 /K.\M7$ϗ'i_mժԬY{M6rmI:u6I'$:t3蠃w};jrzOҋ+} Gf0= ,l n2ectQ3j׬YcSSFJgtNZfͤO>,[B3غukdMT3~jrfRgt9sL:{?3XYjOn̜.cǎ5rҤIN;O\K{1šfP/kxu0W mf.֭; 뮻:)W: Bf0!,f>;DW\a ˷.Ȼ+{QFf'4|}.Rs^EOgP:5 0f쮻2ݻw7}AG86d>åyYն{oydCaTsX\"VI$t8+!3xyɯjK|?^j+󰈚@E7pyH;0{/X#dc4GQ:38|V63 &c=V,Yb ^ت=34* ` HDH .͠>Io"5w:{{GƜҥK5klz9g1̞=[va9=sO˿gP/I=z/Qg&>A}Xg {+CWfj.ijP'4^W3x'#kԨA3L *,nJY f]9RTb$.ty7͂ -w&H42)Of!>!I ]4fo$=%*(@[>Lh!eeR$B|v!I}CP1@hH{4 KTPԷ |ܙ BʤH >Bؓc$t ˛hoA3 @I@|4 م' A%HL7{# ,x/QAR߂qg$@3)+"h aOJ%@3.oFY^$IfRV&E ,gž7# Kf0]'@DH} ǝI LY.=o*1FH` xOf{ ;$AHY '@]{RTb$.ty7͂ -w&H42)Of!>!I ]4fo$=%*(@[>Lh!eeR$B|v!I}CP1@hH{4 KTPԷ |ܙ BʤH >Bؓc$t ˛hoA3 @I@|j`())Nّ T`pqc 4)CS T`Uq{ (4Pr2 hc@.$@8hqd&$@ ƽH@ 4Hb; DP9 BfzܗH x4KH $@3X @N$6c$@,![ `1t D & Jf0T7 @Rh"vH$@3l H A4 dS$@ O3FL$,dy5  &%H`H @HhCR @3*$`0R1P Ghe$@a C'FI$͠;lH 4It Nfw k4 }  z-#H`  hՆ Cf0HS4 ðHR#@3jvD$#AUaL$@iL6"͠w0  ݑ Ef/= @hgI<"@3 H(h@<-Z(9sͽ*"P~+ViqQG̘1SH"4 gnkBG FBV@A Xˁ03Af023I@Qde Cf- X p"`-"L$QpEQyY$A`$$`%ԊBP 3h3!3D!EIZf ?hтP+"nr B1L̄LP%k5@G FBV@A Xˁ03Af023I@Qde Cf- X p"`-"L$QpEQyY$A`$$`%ԊBP 3h3!3D!EIZf ?hтP+"nr B1L̄L2TӧO:رcK.@'M$^x<ҪUPc&@3awh@YkזիK:udҠAYf_^֭[G\$ B&Q4iC`ȑ2x`KY 2DN"4&%@TEtVpڵTV2 @?lBP1f`UgD9+| 8ذH?4iˆHBA, M1vT42/8:4H{VBa-h׈!Ow]&L z J?lBUqDU6#yʸ`()) >AiZ@Sk H/E_<-~v7>;A?t`1 $\ 苒G ez7u0a4 esI8]i/JiJ7gehYAI8h苒Un@}3*k(a,Pߌ 6 YL'alQEgG}~CFO1(yR6I}SG&@30P6.vo(䑶G}CQqVFf4<(yX7M3$fSI[u}QQ츧hЁQ$pLp솢/JMaRԑÄ & ͥK'ty(y(QPbdmM'/JV2ͨ@i TxVE_<-~v7>;A?t`1 $\ 苒G ez7u0a4 esI8]i/JiJ7gehYA$|GH^3,nO>D&NX .oF,.!E`FQmB7~v3 h2p!+dW^-l馕*4fYn\yNU_e6rGC7J^!:$`|Jg4>جBdg_|EKdڵRfM5jԮ][N;4QtUF!Æ O4c9FFm?7xC,Y";찃˗Kj2_ A}~?죗\N<ۚ DEBQ f0Js #3@GmPh"Jg@6mT.Rԩ,[L."=C3ф`YKT&P4 ǰ'IXg\rƚ[7߼=j̆n^5KSO5\fҤI~zsw-l͠>M|9W_}eL(bNӫ6DU'@3Xuf#!  .}QR 8#@3 -NOiP.^(QJg>NfΈ[xL'aI 4}QH@R&/Jf0Sr%˓0苒vώg= @3"&c d7}QlR&Lf0al.]N4s^IH_ݾ}{3fԭ[JIK{ys~ɣ>*%%% "I:}x͚5q:O_)W4֧z{QGe;>֧uXwuL d̓йx y7! l\H}Af뭷6ԧ«?qM?q/3衇l:kZ_~V}ǛOVt|:/.CL_~>q2}mf2sLwnVV-rIOVtW4 o6e]̿=rW_m֯S;SUV*%č3Gf0sc%\ !)N?t9C믏QKDӁ( b^{53Pi{キ 2D*7oˑG)j*55 ]piwڵ/}ܒ(]$)$:귫[nm^+t 'D S|`pCWii7>K5hZjj԰}Pk2k,EH/63?LrKEu[q񔟛/ַ.W6S75jL8y \n믿n޿Y}%Wh]epuh"{1lj:w\:馛sԨQfFg0v[θ_3SHfF'}@˧_W3#/iA7OR7SH_Ѻ=zƴZojrF=XyGᅲf͚_ҶA[taaS~뮻N>#Ŀ/ryYԎuF\8XBz-ϧ~Z ~5*ٮ}7&Ghepuhzܹͬs`Ԅ\%d4[Jn̶:sQ~fPg(ej{juQ/LJ[l#O?Zg9/i|>IJߤN!yT\`n'1b/5[ a&ݻ̠Y={^zii_~pq3:띿,]Tt>j2.yF=f'|b> EB>!h]fN : D/-錇q:xn4Kz/ɓ&̠jtFMKz_ދfDE 3IEo~̠Mnљ#; 3My>IJߤN!yD e]ff^6mژO]̠c ԙ|32A:5_Tj>u&sǙo*C jf0= O̕_q6n8s~EѸ}mH5AׄپSN•˔KF5q:+~kXgtPS3g̠^ց[gTtѧK|쳏$Oꢗh\^u]EfPsKĹKbfЕȮuoeGEshaٷܓjN j]*3:j$vڙsO ?+1OM H_t[̯̏6M6-gէu% L!]*"@3Ⱥ ]HӛǛL͠]zi/KVFfKX >O5/´PK,1^~=. ɕ.bue6dռ }Ŋ>Ĥfh IDAT_kBMA\ё\Q/o?3F\QuujJEkV@H/4z{>ͯ?!כcX=5zLF]P4j If[ib}C/R S:c3,zn{uD Y4=:먗tEԇKLuYe{ͥ內P(N7d5uvOoYhٲy:= I:ۧ< +[A}֯omZlMizU*Zi5,1?1N5RUے+4ȲT$ u/JE+; hxYcC%lbꚥh6`< (y`W[o|v4~(bI8&@vC%@&0oah%p E_AiZ@Sk L g Uˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/#ALPtX2-X421DdUˉu)3&@3/ ӥC2vXҥI&Ʌ^(>j*,~X+Nf0'Pvm^ԩSG/_. 45keݺuAu %&AL]#G.ͪf͚2dׯPL%dӐcY'@3 `AYkזZV2': iaȟu HٰNd.Y"@3%kr. ZFY3A@42%L:2h :t(Ĕ"+)L"ch3&8 >=ܽ{w0ab.$#֩0&0ce\ @&څLR:eJrslD8FO=[[" 9lUi٢苒kN+ʎ]9}iu6!ȹe4)苒Un/rn-2i f f2T/JI  bl"2T4;h!܂.Ld ȹP[cD%z>ȹZoIM3$MB>sR E_s+f7(yRŁ/rnQ1,"|0#Vr[}Q8< Y_<*B3X4w|0#~#(yW!~D/rn~TOq,.T{G>sKH< E_<<-/rnE/@B@>sK>|E_<|bƆ/rnŬ_E@>sK4E_ȹ9+ HBCE>s+TwQE\䀬/rn.j!6iCSxf fW}Q)A9 R3oF&NXfm&3gΔ?N:I:t gyK{vmg㎓ݻo]kذa_N;T%Kd~ɵ^+nA>… MGqD `FMqȑRzu#y}2f[n2馛d8b5VPEɃuZ񁀢oEٹ̍𧒒AS[ͫd^9sdǀjv ^Z~tM`n:+#TV- ]kzʟ駟.r\0 d'iXYNJ.F]nf`åg}&={43hՓca&%%%ҷo_:uA:ꨣ,ΞfemQF< {-v׿c=&mQ""|7d5k&4jȘN=\7o1h;wA7|sӧxb?!7fRguN;MjԨḁD꾏>oݘvI&MdҤI&?>9 c6D3XUtR|'ꫯw}wuJ^dRV-Sgm۶pTuꤴ7vAt3x1\=zz/R^x3;Β Θ_&V*73QUiٲ`ڵr.r|ffO͠jJNYrAgԬ5hƍo ^2q޽k㡇2#_~i3lbԧ.8$Jn].ɱҙ33+V(Y^=T̙$-~[Si).kv =Pt3YӦMͥSI/^tE2~xcr|u6Ng;و# eZ7,ol3Zz?5\c\P `믿m~@}ё`>}̥h}m#D3ObNsY63H? B: W_ }XY:Kj֬ifҰ>u3_=jtNKULŨO zaÆe̠@%4^ݏefP)ԇCibaS{h\Eo/P? &Xe&f27m(f  )OZ.b[\ M}Q~ҊY_ҪK3J ņ|0#疐A7/JAEaI4`0R([ʇ(y_Qn2@977V4aUP3rn3(yUi 닜[`47d9z#퇢/JHd."d `ň`F-p苒\%ȹ%$ -_ՂG>sʘ[苒fȹ|- a f" !(yZA!닜[AL3"d4fܢh (y[E-H "i`F-C%Zi(ȹEh3T3rn*QA4rnA]B &2ffB-1苒kCmY_B$Lm!ȹy^V/J`'"`%2`Hm`FߊJ/2}QHOzB9M4nz*ŔrP(䑲t/rn@ii97苒G1j >E-r#͠k|0#Q -}Q(Z!x1ȹy^VG3 f?:A>sz(yY_"Lu{B>s+zx(yxP^/rn^SA !ȹf|E_<| @97 zD>s+Rx-(yxU/rnPB,;F>sKRE_<?"B97?Q #ȹZ$v/JIB9 z BZ! ȹU>/J>J1cC9b֌/} pCMEnU 6ebרfPF-]|SO7,m۶WBt`K YK.h"ܢ$Y3pK袋dժU6i$ 3nCE#AhqN+w}̜9+LW^yŘ͛M7$u-6H, $TbJꫯ?\&Mi 4yO8Qq\H7d}STDuʺ@ndCi+;`Qc:ur*' [ZܶXX"~8c3ϔ]Fz;J]t%/͚5.&a(YYye„ rGh Z`ϝ;ON;SV[("8Ȳ"B : AW_IF_4n8{61`rx衇)޽ 6,^܌%A6Yl ֩l5>^{M:wl^SZ I3XEan̠1Bi &m4.9ɓ;W_}Em ƐqfpΜ9! AlGXIkO?/&!ۡ,@盇L6xc3Sr!8Ⱥ˶":M$)@=d뭷kf2/`2?fP?k "AY$i4V-[;vh#رc)K\H id&\`6u]eܸqrF% :(ti`M<1Bv!$9`Ā7uSNUٕےpe@uJx9Ȗ[n)1.,_-[ff qcÝGl41cis֭SO={ Qs皗V뇳up*b4:dCP1NY. r-HV\vi=23_n9>rX3yn梋̶I3z#~GQg wQd d}Qql49s',ڶZj%|ۣV^mf x c 9"d8$AwTӨ]?^E.n Zp ,0p76Aƍ n Olf!iTN'뮻N͛'z;whݱM姟~]:t cK./4i\x裏 %Dv d &ȽKnF9ド9@iTPYј}',rTk6^S,_\4h k֬˺u6H)֩Sh|ҥ1瞙ٗ$i}Q"F3f0C1nݺ1Z.e,?si5k֔!CH~|e4#B;J2d.ty;2P~ =9]41}Vx3 NCT1.F7|d4A (Ǐ7:uʌϺpV+֩@|G 2 l23KSؼys Nnօ٭2g1ۼK# GA3*N)ԛq 64;iˠAdСW0;)48ɊO E韝%@3^v}/andbO7'L z Hu*~ԤI/iƯ2 `F1b\{fG0Ym`()) >AiZ@Sknʎ;(sΕ޻ָ{hy[W6g6s1Gl_:3 y }Q\N}t݁[ŋ5hv\oA3:w`s?警q*f< n(H٤&0\o60h 6m1z#^>ROY(y`W[oFmF=csOh#^>փV7'Ԫz(QuXg~z5wMfo}RO>OYۧwxK.PE#I?JGڵ-[\ h {.\O>x0O^Spp(Q P߲2l0i۶-xi iHF3f0|֫W/vn'ᤉ(yUDC}ڬ\ZIDAT7:x׼Hzf6+x'OIh@(䑨@Q_yוեM7HlB3 r2~xs?aΝ n/xN苒URk~W˻[|1A,4ese˖ .\h.7o޼ l'_St—XpC8iYdʔ)qv>Dx7ͥc† -(,Ih=ئ/ⴭ%:93& Ao/~ؘnݺË$k[_ibBm}bPQ {I'I-%|4kX 3C\s%ѣGN'8mS% C }hg4 [k֬1CzɱJm}*AgBm3l8(X¼:F_=ŽCf0Ιe!ڵkƍ;vwwN }mq;@>o1mS^l<}43DӦM33mڴ1Sm}&N2}mqKBJL$iz9r-=&NI%8mӥޒ&cǎ_~YN4A@O>=T:Kؾ}Rdm @(ⴭ/ \vP۷ ^exb3KrJ~̼v/_\kӶ{BSO#8B<|4{>: ^zcdms1vb1q|mY NsP6mjE)xˉ%Kf0Yl n3SxW~8bDmYNqp;u`o)mY񸇶W:pO?d,5}t9]uv=$@3(Y IR;,aΝi[AJ8jJֿm=ԕ2~\D=|9CEob-\t6=&@38Y mfp…~B=)/黧~Z&O\m'Oz٨uz֩+ej7j$ _~&4 ` Be%7|O;† uPIO/a;ysFeEW4lֳN])WQ ɨo6y͏m.%@3]\guUǣF/\U&k׮5jm'Oz^AbE:,אz֩+ej7j$e]&)[o5&Nh.+a9Rjiٲ[u9#e̙4Y)L{-z &AviҬY3KhmNf0py71I7]6|s>|Ջ3Y(rLs(\[4 AMDBS9[:tPhS i軮v?c2oQWD=F3z^ֿm= z_:KcҨQ#yg{^@鹥/Wc5xJ@T3OOիW6l2iҤh=У\Tm cZq裏Dg-Z$4} pfXS[b[\֙sO9hC:]Q·o[O3rDfsϙ{,XP]}F fDh4m'OzQ6uz֩+ej7jT%qƙa.$PAFl'OzA9u*nh߶uZEno=..h3*^x|O>+!F `b̯ A)m ZhgRƯvsup-Ȁc9FU&/_2(4Prf/ j[A65\Ѱo[:u_`m%Kիg,rK۶m 2Qzفʰa/vکT`$~{w]{feK=Zo[:@t̙Ҿ}{YbEr]t?*Sa4!u j[ {9c.1c 7 ]v oRK:pzi2f0:=eڴi&5kW.-Z?^zLn fX|mm7|#o,XTf͚ɮ*52fP+қΝk }X^o]|'?cSO=%~ԨQC{9sJg\t&rO?ɥ^*}5mw(Kifj׮4iD&M$'N6H.YvY?j(9(u*X[[omf]r9cWvId!M6>!}?Osߐ: N>d' G/_.:wA$SN5_'hРHSw| 첋}ҳgOWX.Ӆ }?S7o_fϞmLc Ņ^(z,]^Nc XGq;nV0a\k߶u:cqƮJ@hYA ! zвe huN/Ǯ[ re6x`cg͚eO/K޽?6妽˘I5/Bi:+3>rW}yszƌ}e˖nvyg*j ֿm=u6. pf5< j[ {g.-\wfsy.j;v(7t1o7_[3oju0J6m䤓N23s1R_U6lhP3غu{tk)SHMj}XցXmֳNYj k xAԶ>AV;#EM^vKC )5jKzR3+ȁhP̠^5wtt3<\uꀰo[:fuJn 2q6և2|:릗k|3mժU2n8cu38C6mZA}:YT/{upС沰~ A;ԇCԘիW{ՔitSͩH3(ᛒJ8z7S7S͠w0 jEj2mԧ^_ݘA}5:}Z8̠=}E }w3|p=w}l6e̠nu[a76UtaZbo[: N]U(:W@Qz*u*h[SWn:+jF@fA `;ysFDW4lֳN])WQ A3i[A6\Ѱo[:u_Ff4hTpm=lO:pEֿm=ԕ2~ DP19Ӷl)N6z=ȲN1uŮS} #K@ԓkMF/vE@. @}&b;B 5v8]ߨEݎIB4*2J D=Fݎ$໾Q㋺]*1j(bgzr]*A on8@6  fvRA4'רANw}u;`)Sz͠0B D=FݮxqonWPԷP?._\nD |7j|Qc IB4*2J D=Fݎ$໾Q㋺]*1j(bgzr]*A on8@6  fvRA4=F]|$j[/SX^~tϘ=@'D* D}Q(@J]/^'G3< .-< E8苒Gq^FH3,+QE#J)`hv4, E_<-~v7>;Yqo< YI(]m񳣾q Ə{IDHAl!y\`ӣXc]PEɣ5B70cԕYUOU(X.M 5;*Gf%A|PY< .f7&8V0zM 3S_% i4eF$@$@$@$IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/logging-framework.png0000644000175000017500000020770700000000000026110 0ustar00coreycorey00000000000000PNG  IHDRaCysRGBgAMA a pHYs&?IDATx^ UW}'C:Š1VbSj[Fx*0Z)O%~pC3CZlTbSt-sAzbEC-͔fxh*7u]{>޳^{[bòĊCE',Y8q":p@$Q=R/Xn{.UwF驨2i E;wqf̘al(=uv7P/8,a}v䦋 [$!s̱5㉲`+CuJDukJĕ!3:Ti]4Tu/۴(V,| JDa%ט> 6Jx_;Gb}+ +Nj?+Qz0J,=4Ji7S_oh%zF¯LJrD5P>vD xnEʕ+u&^w܇%:uun]:)ze.oTi2k!ʱA`%5SRKُ?ԗal>YJ3FǟK6S6}VaW"m9Z8Y57}}T~mv-[Dcǎ͙LA-g@Z/g&D:f!rW" L[<\20B} sJdcױ;`^ )&Ç̮頾3eYM[Jds(BaDM`Yk`iQ-xo:ԕX+0a ,Ʃ]+-]4{PZ>$#z:^&o[ْ[$Kx[x iy/%D2V"-N.S94r\|9zmkޡCc-SXܕY^u)cDV V"03< DXiPU):b6+ѿ{l-)sD^dRh%2tYߔf10MYHm~V>)_:xm@q}kdej%<} CQRC%H -<2Μ9cg֠:xiއ -۳g-qi\yHY-j)m5 RMja`S:t%;̰BYJdܢh#4O@ˏ/}r~Rmf?ϦsW"wSf*-ZLX4ibǴg%“-="*ϴ%gfΜ-X2U0P/-iOj![OGXzeT/<{>*O\yZ:!l Y]-|"hrIqu}=%ݐy,rM?ÇiVw%RXpԑ`jWf܆Aݕ_a2xyƍIa警0썕+Z"3gZ5JZ[jާ?>? z -BdJQ­ cxWVVZIoۇd׍i= ;PZaV"nÇ\aSi~ ә>e]P uOMy3S7}YO"q%ʇ\D>k) ɨCok j]~R={JO%BRUjzZV.m9;\V"ۃZh7bs6!QFYXVPFM+Jd[/1SW¸惕HB&3+<(@?CG[kP>{ljpSD8UG$-ȔV"[5oPGܖGQ` ?'MTX$|XĭD5o"w=Rwׯ|#xٟŌs% /+r4U򥤕ȔdDio7r_ҕhjK:c% k+_jJ$2m9oܪt k=0>*|{Fƕ\@~qA)rf0ϽuO>\Te?>hS(n/!y.ӊ"t>w%8|~UOoj*"mCkXmQ/VW"5x3iKNZt:w+XZqUû 4sm1"1VzE`J$%v|A+IYImQ}ڒClqՖQIRK>6d%:*xQ K{[^^᾿zC^{Z-}kJ-s+QT,̖|v~ȩPyDWi·>8FB q% ]c+QmX(!Aȑa 4/͐oo޽#Ȃ[l}i|E"tj{Y{&wl\اU( /~ʇV_ x1R-^++4-c1g?#+̙3L`aaAhW[Z C+>zS_A2luu*w?GP"xw%P^-IӶ m4-XQ8e!Xqhdj/Koc&#U+__/4]h> gxʛr5ћ|}Tb_B;uT;ѯC4$go~V-&-bMF^]>bInݻ"ls6EqWeݔ5"m1u5쌅袬D&hn3<}rm"J8} HW}[)Ӱ( $QI\[OgP@¯B7ng%`lZ,~7twYRvNFb?W}«b,;$i_/ukb'bs)P̌l?{ًz6evLDne/>X[kκMǵDIPyQASls"?ty IPyQd:fS$L_a`6QOA]0ܨ'qI4żG  sΕz0^̭•zP̦47Iv= fSQ%uEhth͚Rp˰$;k,;{nmlo… {ccD!&&&2HD`v V6Wp[1$.\@3퐈ln-d5tygϞH/]?ǒ`q-H}TxcX`Ytq3g4u:έ|4 Ο??ig['YJN2+Ā|zH}Th܎[7L1*@C{{amPBIj} /K}816pϡyṵG1;* "d1Ca* Rb5>{_~<놦qt@ht:؛umBB{ףujL5Dh<_HNH/5F26(UfJC$5k:qPGji]Ax}ncY7T4Ba!,CDtgڹwJ_[컋SuGDW YwzT[44\;>5]y>@NՍYHGGsN#4([Ǣ9QGЕ/`3hnXQ-,|g^[*q58%)TC8A6)%圮,y\}aܨJv/bΏt~ObRϑ(hDϯ$HU|.ls `CDDm# /NיFܐ'-Xlwu;>?$+U:1sQ=yG@cwX ,rG+<`q]uT-.=ag+*Qy`.lo6JPsbI5+gӺ)y᱆rxNCkB>8qmgo]dףICnCsNp _/mÇ㺗 5DOVM%cL3N3_COnT3M`^=c+w>XCu g\>'w|o*':4 V~R{!n>g?>eF(1zSo]rv]g.8x^nC6+ i|4iH}*TIW~܆^g ,sOsagA+ +}c 5@W|[V _faa}*<}iCe4>njg$j W|}eCBMgͦfneό@!{q|{Ύُ#j+*PԈcvUOPHT\5Dg-p6DM*#pwRs[ky|Q! $k  Ks%.c Omxڼ bvQĮz'A0WjiFG~Qn=qOخ6u*mЀ7KiP(`]7{F>}*g)iPO(S_~]]E4lu˚;TLu):얧}ԓ ^cۀTLu!Dø9U{: ^cۀtN_krJzmvءCCc*Dklʃ>h_D^k"cۀT"s>e`&"1 HM'q!2͘t bۀLaXDD=ƶSzUDSl x9WEDDDDDDQb $P@DP"vػwo߯}R Gw6u[͐ .,|7nh4,?00 AW6 5<\EDjʔ);v~ tu8LGaCDw $:)aÆhbb~;t u~\dz;$jsD3"1=dѢEQuuKsk_O~,\ؙîxV"oStBG^1Q괾!͟\D]hĨ=z~[#1JSNk m۶6/^lew%q\eh-AD=nĠɔ8,TB΢pG/MFDx[$ qRbQKhCDlQ["F@_AD]N濞D_ZZ{?zJ PaDpImC"ƈiN-c#DD h a"FI1$!ɖK]Pꑸ6Cmٲ%[Lz'j;6BNEk?o>5D~V\#寓Ja&aF6"+/=-)>R˂kB8*,TnWSZvwDFrW$};ӫ98r׮]F_Z4Jcl L}}p=Sz =[/r3)WQ-^\K35Bo˙|$\:얧y\P:XAǡ>uTk["EcccǷ{C]yI( !Fug֏[~Z^qӹzqݡi!Ě^̟ۍHf#\bcϘiR4Bht6fqF|gN ,˩2qg#D!dx$ BCc>|4@'KOS4?CCǡ)4mF>ya>,@c Խ61Єӻ>'"dآ֭Wͅ6B9:~C $5B'OԆhssjC6@Ɉ|ӧmS!@= "*͎%"-*fU4B?Jl#3Ox. w!' !4B^ȆEpB=N#!b#DԚFW% >wzl# QR#zj6B*<}ip4Jqt\#0u+7OZmS<:gI3NnUߛ'%*n-~tMLKcIǛئy9-ܱF%}U7m4 [FPԕFmH+FT4ށ&O%DgԑEFHMZhdD$ QxP05C߻?cVM i2|f %0__KOImYr6D>nE]ZoԴϑ]LJ \%^"UemW(ӆH7T 1刓4uĉhҥ={lp秏O=AÍyFM ]:mmfKԻc6BnihuTjcO>cd}T:IÝ?_СCǡI6B'G g|C^&%m 5?+ ǿ便F(wVi DG=˗UVaZ'3{5B<Μ9c54FS?Qu޼RK4f?wEEͷg؊#­!j1CB ƫZlkUyR# 5.@ƨ 粿(PL؊i8n#tyYA\#qk&5@ H<%tAQ\uB#4ޓkYIGQ#?o;-h#!{保PhyR墿(m#/!bw+W%&;V(e0*!m- a:tJb]oг]SJ/&]|)ċWKR_boN}O0f@gz{*U\|;U;-o&7DBRQu{fWmeGDBf &CeVmMyCYQ)ۮ.kixMzfM?O1Ĭ|2[;wӐ-=q;Ne8.83gpN;m13.wIH6CZC{iHI9>v#qY:;T/-!pB.[{] 򓴈ef0CCGiH}tXC8zwat9kصS}]eGU1&v fyaFqx| /pF- ի6Z#dtP˝ٱdgdi0?'_!)w"]-')@qc-KzW-SN-ӟAO+mv$R;v ¿ uG:? >&#e#2mO1V!PZGJo!RkZ &:iq`WK!e^Ɨ>Li{v͚[PN%|;++DGϱPU4 I}F Ӻe.wf>W>, L5>W;O5*] ^C2C]t_5+ɨ6~?_W n~60q?P_l]ZÑע cյ3~wC,/ ܮKb`g<!/Гb!`#DD%&}z!/"3vFϱZz ?xoy)O:IP*HHYZ~p4r^cx蹓gdkDt>U{i/.\Iü~LrMr\un=p+;u~WZǽwgZu?4m-?%j.= ~9_}h[0w@_;|3T<{U֮JȤQ~|^!*dlғjjRZwJ`( ;^e~#u![=Ohz|%֩F/Nߛ)t [^X; wWE|q̻*[=Th\7]B#_Cܷ[+Ҳ24FaXǮPy\}]*(mt_GIey???mBY }/"/>J^ȼ_ZY6 6BJFH;)}N<է2 CR#CwTn#c̀ 揟]~Z[dx|ktN[:I&C%FᡱFy3Aǽwo+\ϧ2O* 42Miuo0E#aC628pQnAhhhFDWڱfdn1﾿ޒ5Bqܕ]wHӺ~2 Z藧xvAtV1>:jkWo0vm1n~Cà7כFj?t%Ի?'ѕ$|/?ϺG7FO[ϱ˃f62i5cցw:հ̆/xʮ?d}t1Vjh(s*9'?a~䭶dgO|LS]g/55a}^CO6BT+vqُF&?|R2I94kė^KX@:>vOXT4{$]2 Qm}]N>,߶N/ 6~Y0 ௳p~/E舶o7H\1;yn{FM545,{g5cρ'g_C/iq &tvZI弨Q/)wvܨFHF`7U9l" !) Z!)hxa"ʱNo Dt^N_MSK#;a&[fBY[N{Ɨy{ 6}Yp'HSnC4O(ɾ2i؇B埒4Jl5|,`+)bFDh#ㄨ#M|iFM$(Yq ̽#kc;^>¤p{`7޺ &x(6BԱ((nji e)@%^OneAѨ%G5U#o5P߳%%.\0W)r7ZIFGF8XQNh/^{F~kOJxSyѮdIj ]QQ5iUa4B ^zqb<֟tN) kN !z_M(=Q|մYJ oWu,[ }Jqx̓S9+"j7|CdkqgDx1~Cgidsrm&[' T7pANv4DT!X&gWjBmı2p+(ⲡn֦W+5Bn]}F(sSF O׮=ET+G u1!+մ{DD=~ 4.e?WDD=~nxN:󊈨د:n}ZwXOD=~ R8^|߼"")˳/zin4S7yf?^O/ݠWo!5Q_|z}MDC?k"b`_ Iu$$fKY&K-Y$ڸqcsѣGÇGv֭[z6JE$/^mڴ̧X,ぁh|HIQBұYDQ℄&.Fv~RI"9rVܞo}kڰajM ""^ .MiG}`SN7{ #>g"""ʉf%$R ""fj-[ !:\Z$DDD %pвS;@oY}FDDD /J3Bcll&5k !JQ0@/^Ԥd7iJkEs΍.\`7('O,_tqƌS>o5믿*QQ]|9;q}(-|Hd ^{ fZ ;ndsʕ?NJ ""8|楏dİXㆥtB[f-^XXB |DDDpԵm6=&pw ""jP#`0Q " 5F Ѯ`2DDD]璄['A%>DDDD)*D:&Bn~(v4Q>g"d裷 V!"""ʛ 7 r$L#TI$P-eX"d2ÝԱx"j$C+<+Çz^yo2CBϣqq*V7)\AD& S~ ӢRw0IJelq qÇP74%z^q5YD~>;QPCq FE~[z友{PyIP}DDh2P_.|fy]֦ $B"gAɐ&AH}IWTB=$qWqH(TK GKr+K"z{Ï'JI\_?~`B 'hqP6/Z.f&Ӥz+ll&Ai+WmnP0>m2P$t#q(ưZBIG}.}~h0CI\>|4MDrqus Djcs쟔/hvINM&CbŊ$j]" usSw1{…6H!j? SO2:OeDDDDbppЦ)$$C*%APQ:$^"|:5>~(L166fSR$CM$H500H\$Q5<I 'ADD L{U)S%(_F$s1NyPRTO2tԩ$u[26 &CgKp!\gWK|nRTO2 "LqL:^ Rɐ{j\g(tuZHWN YJQcK2\L[qd$Cǽ/k2wocDD Q; K'Z@2TN4 i&E(+?gܹb"DD%ɐ;'oϱxbwyw?Ɩ'CDEu Q^r4k.twu3! ;׵YL/$C:Yy,_[ ]x]GG%KJнZ `3pUg }WJN8-Z(6nhKdM\\0ҲvG½˴&I !$Cz6~Jd֨>CgidzH US> CAċ'?{oE1@ &CW C"ǺEDDm i[dk$t5j#7*Gp\7H$* H$K%FqJDDmpHBI@Y!-kwdM_IRWI$C>Yd%euX"O蘐$Wm 3m-.) Eѱ'(ő1ܰ w&QO8'!mo9p?$Zu\ϐ^/.%F l_"H5j ɐrO,7_zE*ǎF҃ ">([.-q 4vT2w^Z]?$AQ@J`7k$n^=CIkHozY*F☠PR$q}%C[KjUV3C3amd9(-p3W4N< L k.qAVbQ"BHeM nRv;:*.n,d*s(|-ɐ^[R={ء,X|n̙3's{+??s!_ ŕڈ:Bh^U X-qįH`S CF ҪdoIN8ddPE(0U\2K.\aԸBɐB1\-BCMghX˒^CHKDc%_*$t_YYyK.1OH"OIP{dev4&^~t&sv:!Ayvlmza0?qvjɐ>?!ʐUs&y#w4qd//( L(cZ%!BB2 5:zaQJ֡ ᱛd_ϯqVd }C]Nz\jt?^Lxw$O:&C$̗F\MFU0b0nӱ+6q3%rՒ!g(de~9$CcMԿ;MoO( Z & BRq !tP( y PzHxvYL1il>d[LA9sƎ)M! <>hVr빡+=N(Gihٟ$HxBrK$É9FɐI@2DW%Dv**4OO}F)j"xMj 'yt4:rR4qۼR;'?\ZhXzfژSe^SD ,G&A $, v{ptvb#OGlsJ .(]L$Lؕ#e{WQS3$ÆFq.JC{(#$: |C$҄+TmI=(9> 10toMg%k" N1.^Ҽ?г3:IEB<-'ʍ&uwq`ta"_NuXdz=%&Q@hn#9 FO̗t=Cky'0O5ܫ>OU9h\$u :H I:z:+B*^߅a7nBe>w$M&CIB&C)w8uhbo|.:q\|{Y=D(7  "E;.\{6I ,u!aCCn ӄ$SJ.Ο?_;J[d_ y䑊zru-%]\3ZE1^PSJ zj|6JW]%U[;p8RDNdȧ %F=vn" 0̗T{ ՍO^IBa.֡Z3:41rvdxM2%q0Mګ+0.i&HjAcWq ? Cg^ 3)iXI:qɐn<t]f(w52 mPQniCY(Bi )W\u(X:$`Ha~2ԏ{pijk"r]o]Xf:_w _d}9DKW88&$$D/ 3t֡ e\d(3Ppa7 q Z'B]8wxդHFM ?rPh\;0̩l/B9ˆ(-iy˒ / Jw9:!)](˒ i >IS:<ޗz!` iϥ;?<{$3_=$e')pJٹ$c2=K -OT˙[dh޿ =[Z)Ց A(pd1E'I\wՓ %8/׃&~rE'__ xX'SdHW-0ޠ5{ilk1tO@ݸk"nW$OuɈ*G`:θ~t/ws;zN?-%AW_~#],rAB=puM;8h``tg|BmK93XwpRun]ቈ&C(Q[-cз s'|\":(Э6PDD HVEQ V1X>֡KbQ"ItιOT$B6rGE6%܆.gHeHJE"TJ|{ "@W𪗽_%JRRN .QLoQu Ep u1rF 6j]>=Z`8T/KL{+"PhkK,J,5B nٓ&po\X#Fjes "j~/:u1$?Wc3l2q`1i7$?۟ BKMDD+دoKL\dACW9]У Qp7bL۹;"!"""iL(׵υQ u"pkά6fhs܏J?DO̜y#G ,3,;bP8!}vhtґ#訽0:}khxOb36_ȯ0å߿vl.1 pdȟzUDdhժUve 8q`. W{x|y;t |A;4y:Z5:,sy}uՉoRjksS:;my6=dzJPv=Pa,G1n2#dP#Q_7i8?*nFd(KMvDᆍ5) 80|vHJQ!&]$ºU_ͭO9QG34]/xnt_)zuK9 9 ƽFxq| R&C: M5$iO hЊd>zm5Na+>\{\z|N\>I yS/+2Yy~I-ՠ!P({><ƹ]8 !ni =;zc0qcçi/~Kvdc3?{]_ vnͥ+\ae^!kҭ) Ayu0nz׭OzW(L%L2)qAma!mѝ( QV=ex/|D3jz|$譏a2Xk"ԙA$O]qt#dq_m Ւ!H[}UK(ϜDYa1s4 T, ;9Zנ|6Cm̮:3DV s48N6 TkĬhDDvvKj1^L׿mo͐)$DF:qU=WL0w.}f#"j7IVOw!dtJ qvvØX(,gRL>Ի.[ MYl2[H"sfzOo,1IKB®BDDBk(4oL6rsdcKl=#ebW""|+r?n![PaPvTMc۟ BK]*I)Ajȏ8i$'Z Iep*l2=usvHxmázYb_nxP]':wHQ$ V3!I=|=#v&BIgI痵+?Fy]MĄ=Z`F<{&yk2(I%,q`t2Y9BҰtRS Dh#a̯wva;CIn$̇:Qyp|&n4g{ν31wYy7(R7?t{x[9ceQz;DD z!&/im*}öV+)qynfm:Y꛺dHwߏZgחsNpʼn@/Nph}78,Mݔ@,tP#qԳ#""y'lsIf#key:Y꛺d|a?Q<1mBl4GM'ugO|16]~ do!d(熧an9mqe0NZg.^f7do6 rO`2DDDDf7doH@3ø+)BYhڪ@ """rvӟNiVK>Pu7O'Uѝ" $@|@DDD?ܹ[B;4::d?'ODa`` t-Mۚun߾ݼbhK.> """nvO>|67V 2/=>˗M4g "".g7U>xz zjPGOo[7捤($Zdn#4=v.n:gΜ1 Q#Ms<# Kh1&M8Ƚ4!"""jo'BPhժU57BBE c2DDDD`7gSM?ٔ>""""fvO>|6M#"""n}F8LX)Q^EK%Қ!Zac%0DDDDuY&C)Q'l$$II 6DѮ]Db߾}{d wIϘ1#Zzusرcf>{nfr5"F(vq D,^8ڶmIVN<ȲeܤW%n|l۱ٜ:u """2%V=5G%H{q G$͛7Gccc6nbb`^&Af>/^%$W:zpH ٳ˻9bzn^Fw_nݺhΜ9 cMޛx>gIQ&a66mǵcǎrRti[^E_}K%xMDDD9c5k85Agw"""v2esεkjkRDdSp We9t"""j8eOZ̜NeQZ$DDD 8{%a7N, 5r sCDb sV… or9,ADDD I\ĉvKy Tbٟ ""^3N˗/G9􈈈.acocݚDDDTiNGp3U<,?ܿd.Ówѕ+WlIc,Yn  㚳ǣݻwm6;v<Ώ װ)ogϞ #āja:tȖdc$ ""뗨A-dhƍv ի/# /pR+ !Cte#,%"""jV9ydhѢhlK얘!ADDD-`7*x$=%6J, ""69t 7\ijE>m`ĄdS3\/HDDSؕŋG۷obtE)/.6mvaKS#/~G%,W% ݘسgOt9N8v(;$Sk׮ Au8رcvXj,8|pElٲ(˃k1>}::r䈹P]m#"".5WbD$ H|?Oْ.@ٵU """-$WԩSOn-[Df*O/qJQG'I8p9DDDD]M~z!"""iL1"""dz!"""iL1"""dzi&BTfHQ(oݠҭHB Q me%BDDdƗ`0DD-j ADR`0DDD]cDhcgqX:: #IJVH?#"".3M5Cs( """XW P! sD7?t{EDDDH  K>z`9 QHdz$>Y!5&CAL($I ]p 7 !"""=I|$CIaҘ$>Inғc% &CDDD{T$CןSM &CDDnDZF'>Z87idM-Z4 Mw[~G Uωl{_Kdhbb"RR56֭[gk8q"5kֵd #ÅHOJEkJWv Ys̱5&C!ĀDZ4Pz A]WJsK?j^(u mmY4*'A^2-^݈`tz=$tG###Q-M`ҥ`O"ZGJi3^yԫ~DDei kWyeo رZlTW _8_\%j:zq;> ktKS(UWG4=@qԽJr,YR6$CO.\X~=N$*wfR4+n_i&:^5mj.<*i,0>3n)H<=J2|^=wg 3>ˣ,jlQeALjW2iD-|hWbh2R7qyUHX`|R2S]rʸe8lC/iz24ݷNJ.\`ꡧ),u=\Jg޴ӻ$Oz4Y n6Ct>PyWB QZ%޷oI<@Hu6-GOh~h>J4)౛P£@UjuMش(Q"5Qb2D8C,k\ *3;ɐ3]d.Z8s!zRN+<]QjE2es=ohvaMlPYQKX_ ~A̓ 1ڇ]eSb.O7+B"W.2ʕ+uDX'8[Hjv2ϸAht2dHSy@uZ,H*%C1ClذAϸuTu9$4}7%C*p\o_Bqu ﱚygelPEBTg2]a3 I@_{Hw%%C#t@Vsh-MtF'CMrwH N_KgҤǭ>zq R,K."wEMIh"%:uqi8OJ:"Є@Út($duW:z;&_kKI叏Kˉr R+è !J!%:rn8DDD ]/=L HLH2dIDjM@߇DEӎdk$ѣ6"!$ J*#""JE7(-tuf A(N=ɐw"""TK"*$%DIIo߮(I RK A(IJdX,j" """J%R&C&?!'DADD]`u/:1^(H" hHJdh||<DDF_$^*zQlޙm̕-q"wQ6 2$C(wpm;^J !BvQtͩ;vADD 3X'&$(_梋K.ADD;$,L,/fJ%!JPz!}cDD)Z/d(Zl]U$$CODH Q ""`LF%2K% }S=CY Qбc:!j5f]t.^2DhpYiBTk2wDDi8 u̖/_nb'DH{=C}kdH_ ""pv8'a[U6HKTg2H~nrB&ڸqQ`2D`;D %B LiB&-qQ!j$fN>mˁ>1%:~50B&D aڅ cիmu&C.ޡٳgۥYpr}`z*'CaX?ɻ1u &CN%;p]$Cq !dr%!!p3Lex-C244={̻/HDe3D톛ꖗ ?{Yɟmz2#&%CUsNLj!,\dI422RZ+P7#B`;'e6 u{2Mb _{%˳ ݻ]DDԥ=Z"Dycvڐ \]GؕGDD]ۓ!8-ӸWgDy9=stzK!oDDz!I ʧf9 _OL m޼91c9ѣ4M~?ky%.HQd(iw{k\!܎=EͿ&CN -hΜ9dH~~Mzj;zG!!p'%C%Fc7IP|SK 'CR56֭[gk;w$IH~4r!""13PO%1!T ʉCѫ?&zwûp 'zUǘ ^K5|<('4qO H~^uGnm&l2$^-.`~lG""j/lK$IHmɢג$$!x 4~-y $C%jM$>i!}IdhG="+I٪"DW.Au4[BIQ1.dey4Cox )Dm&OdW7%Kr'DEkQlXH4Uò8Mvih|CQ6n&R/xDɐDm"OdeMrEI )ʧvb?6ؠRg>RRb-{:v"6I聣i:* Bs!2&f$Cd: qQ^"q| t]Kjm<%B.O ,2.MHbtΦ3 m9LG2;*Mt؟ƯH Iޡɐw{~d_J6=$μ nLN$C7H㟒Jy)Kr8X:I&C(DgT`kw<БCLG28k2 M|P0iUd!xibś|IlH bj<ٱ'A2ċ1Rnaw+,$8}Q$ 9 $l7KyM\P8(Q2hCq91 B%pۏ+%fJGgՋ@%C]e T#S8^ȇ,ɐP&Hՠ~h> ;}(6$&$śmf4L2u Qn,(li_.".{zYqs557:B2ćP# EoaLG2qP%d#|i|8x-Ϛ Z C&w?pͦG)-&_1Bb@OKC_gD)Hd^ŕH=pג /%dpֹZ0XthYdJ7-l-pz|<($>Lu!D2 Tajɐ0e%ys̙Z!@Y!?Xp,P(Go?+vzfy~F('!z_E+1kMn%C;-]{fi|σc5dHdHa1An"G;/HDb~<0EfU\S8|(C¢4q !MDsz4 rPުd^6;Mr磟c1㸫D-k 6ՊY44̀I4dȉ;Ig]dJ|{vIyObֿ'.5g+Dm]`n]UF=<& !1 U&Dݑ6EvPS)%(w}OK vKR3x:iL(l:i7#wk]f~Q~/(u:,K. jnpW~Șl?0ȴU|΅:jbt.Ul ސ" J 7x&_$I4R&Cs$%CH|(w&C2`W(E6e`Yܘ.*gEU}ʍ$zKdMN|iPӻܞ"ipaɐ>Gh7?=כ 7۵=LE;es%xYCO3d(.ۃD@\Fbs0ܮd(ğ dCCfcA!+Bʢɐqq+BW(wBM}|}d?e`\IW\(#]-N1/ZÆ?(t5/pd(X9Bڨqɐ?;tIv{! 2x))zENM8~`u$C_c(>$C`,L/u-Ǭ6;DDM8~Y1C!n}=c e15 %smQ:5/ǰ5ן'Ռ\zVm{.^#e:bҺ,=DqY#/P a}.]g]g;0ZvH~0e= /DDpWqK=] m0B~}Z/n7Z?&G0\K27smC%m+ᛴ{wq):Op2E0 %q a豉Ƹd(tP౿TjF2Lz~ ^h($a#(c FOCp-5*iîɊFw'xcO>gDTһo:W(nvtddH{FҬ:^!-ƭ_m>JtWW~q?f%CyAf岁kZBD20tǔPg% 1 O܅3HP蘙ZPa7_IP(˅qd()ϡOO"& ųߍy j*MD$Q2ޤ5o|vtdJ!׍Z!$T~OPn$]. 'M׎OM ~QkSIOd)űם$ e~2qgz_zTY`xW0.. k̥$iq)ZnkgX$6 "j4js>nO3s<6pN20T]Ӓ)*!@B!27TgD7ѲP8B*:CҤtb\BBcD ѓ[#ݟZ}  wv0"jōv#x"WrgPsᦫ85C0Wqg!|*̱AW}.|jBg!CDDvWq4$Nў:4.wb/Hߟk- wWۥMIı@ntLPaT~)n`7s  f;ODh`h]I bQ&CMb{vRT!uEnCS(&:t҅6 .GeeeNGă;9  "JdEJ=Éڥ ,#QZFxNAm#T Q')0nDDba6d<~<+!"j}nhD4!D1?r⊄D 9 ADRs%܆QfU"?~AD$Z*}D[?~un𬗠_VOl n5Nwrit'Jzbϼ 3oF/B/z؀@Ol=5n {Y+K)U1rF@rIDu@ɟKG~y"T`td0"z./nl:6 F$C7?tc=B=Y℄%Kv/K"+.]pYݽWKv5f B%eU 5jYbDaK%jˡ+ ?*裷 5Kz.$ذ1wpS+0""N]2= C25'!`2DDDbr7n#2&CB]Wb1""NpDB7jH0""^DB7f;$ՙ @ &CDDW 1""^YB7`8%z!""vH4.Fd0""n7GB7\\!""'%+=$drQ ;a\Q= Q1υQ ug$9dt٩H0 d^5$CӸnP /Js S.45Cd(jx]L$ X7T(< qRBrF#>ޛ6ޚ̼ gh}aULesv]GΊoحՍWo_'#]_]Fw>;u&HydE=BY7$ . b2D46~mSF&F"m I`#s! {aMdi+$*H mؓMЍ>rwn$C4p,U&n?cWR;ZWt>Bᱟ qcD%-F6Wɾ7I3j 6l?t>;Q:-º#Q]h*IRn z&C!͚ v ʑxg2bدr/Gi"D(4?r}GL6p^m`ybb] ' /Zd(;F Q n}2dBp@snhZs*A"y+?Ig( (ϛt&!;<}A&kg!O{tʯP&T}~îaD<1kZ#$OٳQsumxCTZ!'3o|t$Dvk%ࢧAa8MOʠ^cɐ3)? 5&^I!`0˥uP/iB#DK.ę3glt0M!qe[${xG*#VZe^r^cdTyumX2I7z] ,Zfwսfn>]P$mȳ$Cq9 >cgܝ>3qH]( pPmCz^jP =̜938Pra7=v>NC_^c]+$~yPb=U|p$D65 l\2p˚ IZɐ&kSMS1Nw~c e塯ZoB~R+D'n?M MBtPra<Ԥ}BCɐjd=vH~o:a[cٵ^{ bsPB{, j@\2B4щq$/,qusπzE#!Wxoi:㢻!@($0iU'8 -7'$HPh%;J&Cѕ+vn%&>${6Clc? uǎy49e^44yj8] [? wq)O:B;>F~xϑk OFDžnK{GdH_6nӻ ci=4z$/'>t{ nrgχzE=k`\:`8Pdi0_<{_á]iiD>`ғ6=$BO HOfo"iCk҄~IP(1qZ?CuP=@Y\¬,g5Zj"G{?PY35C9Z 6&8n17 C`o4#uC̸O&K.-= n"мʳ_v$Ckهn:r>(G7*{7~a?6ril?[ѫZLtcɗIriG;2<}޽_>o]"B3!(Gq`=J׫և4I cOBɱ[zqt'B@~3g}O2V,"*s56~Y$asӀ[GPI=(KۋTO|Ij~'oUf4ܿspFy< ρ8;}x9)Jf'Cwnz]>_7 TB!@'nPCp7Ðsh슕u<Q`ÖHN#ǰH$7b41J㰞QHұ8Y094ʅ{j"EYz6D(sDD@oјkRpKs J@7\Ka\3!i"iG Nםyl2ԴSCeBu]qz '%CI#dIc؝d܌8zl}>n3@"B0s]!hB6P[qP Iti܆шd(Tw+nOq4m2իQQP-z eDD\+ZOA_ ; s=%g/XUa?B=$& np T[LC%Cj$T+4YH;da7&"ʞbm!FXOL[.NwHNJO>T*HQ+vcccztkuMJ_qfu3΃ުo<꛴h3,Q'3G5d>YȐ ֮5>%i5͆ފŋDQ'X~8Xcy0?;XA6\ͺ}7cw^?ِ XQ\lTXn>byD6`f9#_N=B71%""jmA2dcɆl3}}ؘݼԮ]""2B秺?lV`Ev3GϞ>DDm4*E2lI6ڍ\.8]9}nᦫ.ǀ+KcD>ODFzz3D6p]dIdcE6z$N 7c\([ b('41Idɖ=:TtevtȽqnrGo, QDDDjOLu oAv$CW 9l ȹv45.ջ[2gLscC, ""D#&,$D 9Z Z' &<3<}6 kE2~V*8zlUh _tHR{ORPk V$C6I~&lWul1 @2"4;߸3Hlc;|V/1wQjHȶp"E-S/KEMnJ,rR6WDDDUɶ;fٴyܠ&hv2P*ƎƒZON-""" xo!ou'^q[-f'Cߞ&7v!ZqZ6b(iǸf'CӾ!M ܄guU(+s; hv2aGIA; ߺy{eCyl,i.n:w]t | %!1*ڬ~_*I,[]dDDvvlo.67/Fcѷo*ԗ5$CX87{LH1᏷!"zL"l>b7ԩ|MRtz7k =1s>QB! ?P΂%gϯvSJG𽪽DC;HUԐ J`<4 BBF`"$M(u'n ImA"c2P̐ Q >7v|ў={ '-XXZ%ʱQ!O(*0""j8[e2$02I9,X:tȎM6+:s挙#BHϙH Me,e2 WoLTdM!:\&dH!̙3mI:Y״tIϏHa:w˗/ %q|kH$;эj,[6<_] ve |փ+eYMwR U :2C8(-?M@f%C$@ɏ/k2PڡPj  Ow/{*닎;f?TU Wz_{`6%;y6Յ!%Af$Cn25qII]iWM!s%<͛7O | Jg~D(qdX{QJfJ7Eq(pqMiBhwa~Rݒ }& т6дKJ w!wdvQiťDDDԛ p(pu0& \}i$ 0`|ҁ̚ pgMv&nG>s?ιJsї{F{p1>&C eIh2",If#(?PwlOH\*ׇi$wBӠ,鵜E?(9N(?%:gXAU$| eMK-Q*$CP%]`HJ|Evwڡ̣Z  =HkRW}>ӳs($91w9U2дy(ԭw8/i#|MH@Z! ""w7Tu; u4 q>OJP&CՒH[{>ey'.^8VJƾa7Y_qھt7ɲ|ΡS^Z=sQvi C"""Qo2cx0˃a#NnG=z庫]g?R0i!f~+!|4:y2""j"lN6 R"Q05| }d5 %n:d/Sm<:;}sLRdvB7jR~dH՞I?bQ ߼{m*S|;tu]̾tϒ  TLZgz6m'za:,5:wxa0ҳسádņϛɤnz(R_jJJbPq0zL@Sx~qtRKʮ1R_n2!"kМ>"6O3(p \Y\PkA.~#!^ád(p-ih#1mbW *a7d6 x.w73""",f?,&C*iEDDD]nR?K2 NǵzCס,n.;?"""bvNY!]exD}t|\Z6|~:YM$C潠>$"""^vNL(f?T_E;K xm4u!QtVX;v%|5!aɩSŋۡx|Ju+Ogdd$:uB"?js"&!Oc"""ne7-Y$ھ}Orܹ3;wJϧ1Ql-ZdzR]Y6'B BPD>&"""Vvӧq7GXWиV^k]c.;-u1٧|>S>DDDDn m1q={lI "".f7M&kҺ⮑}"""bvxd]M-]L1"""z~aW"&pi2tСI|: Q=f+7O'J1@׫^# Q=f90Pr?q!$K3g4}%}da7JjBM T_n$HCv ) 3"""z~9Fwsd0!"""7=SNbv 5]f%~sJ 3^+ʘ Q=f<>`0QdPޥ50nK/|qFCy8#3"""z~oh3g$^s(!Q} S[>)}LDDDԭfB>&"""VvO!|Ju+٧|>S>DDDDn)Oc"""ne7ϧ1Q >#>,,qPĄDNYĠ6%i͐X'1sW$Bϑ5$NJ`xs%_b^%Bvv('ADyYfE6l>}:wυ ÇG7oΝ^:{K(2իݻwGb1:w\t;\t֯_o޻XFJ̑ """"""" .k$SѰdɒh۶mdxxa.hpp0ڵkPANJ:aB :EԭΧ>^DK.:Զ;uȺ:ӈ(K:9!ѨSD!YK%pph޽ؘbh'N;aviKk#SD""""""[ oٲ]Q<9+QrDDDDDDD=annCSLy^8:nTD̈́;! uhԩ'1D^H"DDDDDDD AI廋,&] 7p~/x2""""""J\(w̚5+Zf.nߍ[yVCD |O6> """"""ܙ-qJ]tit1S;wyPgŋm(*ܹsu<:\}#""""""jȠ+VDw}|###$_˖-Q[MHDO?]vڥC%$""""""jY8&֑Q,닎9bu^ϧ]CKHDDDDDDn$ie/:MLL؟D =~F`m|DT """"""ՍK,va}(:y$O?-)\9QqHt[|ԀQI0G͘1#ZjU~zs;~nlVDSp=]/mԣyf!e """"""\CIhѹsO]kh&nC#lX$ADDDDDD1p^ƍMΦMlI9zh}vsԑM# UwTbDDDDDDDŧHD^@!"""""4SNٟDwú.N"""""""3nk Qpk֬NS<(, tKs1e58Q'f͚B$IQFҀĠ :Ie/?߿, 6ؒ \Dy6!p07xޕp$"""""".< Fp4>>n)Z 1>}9|gtM7ׇfvٚwƌN2yE%HG8ZdX|a-ΝkN;µgXhyxӕNK@r [%L~scǎٮ΁-[;lH """""""贙+%t޸0NcD,W*жonՎk,rb1ڻw;:( 6ljAfH@)wxC hZ|4pD8I t'͖ """"""" u #>zݸ@SADDDDDDDD].) """"""".BQ u !˅:DDDDDDDDBB""""""""rN!uP QPP(RmG$.H\8'_bȠD#()0 BgPR0Z.ADDDDDDDm2GbL"=M`Z̃z#M """"xd`0 Q QFDDDԵB `0 """J `0`QLݢB?z`lc(:y1DDDDDDDDԃĎ5~c˘:1DDDDDDDDi`P;EQ"v 5:y1Sg$K̖Ŏ!"""""""@'OUo. >QLw4;$Il`Q\ t xw""}8qӲN%i-G1DDDDDD! "J1W?C#c?Zzu{ɓљ3g>LLLybx<[cȏWoG%u(CD HF':{ D_?xͦn􅹦7>>-Zm?SŬYK.ٹ$shϬwp:J̕hl#;/XrH๱n """cu~;C:WG~Q42C 7۶mzƍ1;Uz2S-ЛWgncȍ# +'%tz: wh1t +)u˕rCDmI\p61O#ePC~$((|Ya=tnIDDD Ԩ!?Os$wP"2jՒ3"\%{9prG]w]s.Q+tRJIwI C]lP+k:](Ǐzl-w`;wUp7;4W7wὸ0o| yVk\\\#։$n]|gFڎD"""a~҆*$>iJ2i[mDJ!%KDvFFFchǎ… Ν;ZG)$u;ж'qԎײ Ka:]ϓߡ4Z;0};{@WH[?O[_B}NtYmyŪ%q%9IJVmϙraD]Iq+W4Oc$4^ý+_N!D;pg[jw%+޽{ͼPCY(wiĺGJuZavdI&m_kO :f܎PĽNQPʝ&n:_y4q[GQfY$<$'I V͝Qw%1&~SFO6*v } 7:|Ѭwߚ1|r;kp.#HdutIsiw~Ix- q:]z~r.^4;rj}t;mr[n."""ԒTȒ0Q"|$﵉nJ( Ghwpg]1t;.PR" S?M ?yW;Ix- qIaui`7vMځ;?}AkA1gui;ZIS*$Snbd&I/\HyNCDINؿsO)Ñ1N___ũd|[%9!O%0S߻B^ HwX}h)lvN_A_Tͭ▋{tI5l;mfͷ|kLGPlQ:pv:41 g z;m,%.v{N#T1Q:_|nnFp痴\9zvkҺw1~^t 7w*;4' t%v u> 8յrR:4BDNi[: u5c-wڤv iG'muy@i?4GW:#F7HeDgu HecbFDw@GW] 1 uPOL15vihOCHCaÆ`Q{\cabllvW4@! tC^:lAkGx4uԊccQH{9Ǥon*$eǐ {!1侦Fv%uvt ]*^ 5DDDDDDm8Zڅ;(pI+WncӤ"1411WF$fDEDC-jRv2}:`ݺu& """""""tYD檳cHe੥CHch """"""")JTt@E=@ R:|R:ydk1,ADDDDD91ŋ-Qv{1B@~dR2,+"""""jPh_\oe/xvQ:_MѤ!g@V """"6@GC(Ag/^) K̐1 tDDCDDDDDt2sjwHNb?'* tGs(^c(F;;N:Um """"CNQ*'2X- ?'S #Ρ$p;~d%:x`k:aP@Kh /SnQCz!"Nzjk:;Zog+^DDDDDbXiI]"o%HQC %fKL*Q<|WW4>>ny RcPB,kgЌ3*^YDDDDDb wHzIY ?!!HT|Ƕony RCSR jWݻ+^ FFDDDD& wHcqJT|I266f} c[!:w;Ҩ!_RQ;:pԩS+^%DDDDD wHc&5vZk*CD =bYBPQ;:6lPl """"61cO~޼1DX\۷~1viCfs;gֻo7c(NdW$ОQp;o]N>mu:BBF^cC=즻o1 jTȤ#l """"6bPN@CDͱQEX#1c$CH$Ah:s jDh\Ѐ;r]"jtHT|qshɒ%щ'Pɳ_a:{pv A"cGXU#b.6CrC9. v 5 {gdǎΝ;M0CB9;@Q?{swȑr}/ """"`PN@CDͷ\E7nJQ>B;p*Y3ȍw DZ'/\`4:BH """"aPN@CDԘDwG-\14#4chvNzB3fX(ADDDDD9cȭꉺ {[.8,dGW;9Z_n&Jz2i#qDrG 儻 $1Dz%H`"Իv2X6;""""" wHd=bQ-p19D߳$Bߡh֬YѣGcرcٳ"""""@ wHr* \ßW(pADm~% 7:chQz,HQcPN@k ,ͩen`O?"J_N[{W݅}}ϋQsЕ+W)kwNSw OVF߻]p"""""":z\߮S$6HQ: %C~}:=U<1o< uIߚUc;r]eP!kGЎqyQxQ& }Q-S1$^-L{Ażz:Kga Xov 儻 $1DD)\M1dB꘺2Xv0$bi;p'uWKDDDDD=%˭5H4;r]yrx4Q:y1HWiGRchx)N $Y.m_b?cGŅ֡}# F ߛSP7ߧ?QÑCp1K%^#ILhGǐGYi`;T(pTjԒ:x'd~'ΤZ(LH`ۖHlBݘy jN[+r"s}~':qۭUsaSߋy:;t|f׿v; wo"u)t9BGAdr1sQ3e_Xf?&R ,)ƣC.\9]BI\jBq;U;Tß/Rj;}t8v'^['"j6|~z~QGÑ-xFe%&dn鹥kٳ'{%Һ}֒%KmK7eJӇ2L۸qc{92۰yW^yeh`̻QwGy y+ tg@@_/gtAgðr`KloB\c -w{5tޯ ЛCI!B T/[x,) Ac ]Ԩ 7|s2in0C!=n{.CU&Cb!-'?m^`HtW4}zvWA4 U{r Eza)YHi>c{cLq0)u˭ѩu7yէvWiW^jժt  Y Ml@n|!=/gHI/k^`H\njq~nuUt'\ utӕ s;4P5&Rȳ!_DŽ9a4]=^WPlZ[nݠ;fkk3i~ڕؐ࢈ GJ0$GMVu+V$g`H/3k0Cr fá w]pZD׷% .QR@zW '8z+]V@C\.JJMOao:Ǖ"D( yb0q/t9N@myZ;^ [DڔJ c| { tlBYzKҺر#c.p9U6`?v (d'W]||߶V~Ϸ UG0TxZgu~ O*RG\]j|iUAkPc (\))8yJU*3;CaLt]A D諙 Oi4ֹr-OVi׺⒱^uӦdwWjW(\ P#+haJVQ.Y|.yS_Gjw`Ȯ{lU +/ɲ~վEs/hM>ş,.:h]g.N֭S}XCto}n) ||+kȇC'z+Di,Unѯz3U>[Ja7m絏u0t/ћ=g1C fdWjclzd>bϚΆ)Y!DrRS%Qu|mQQ% eXv~g~cӿ7ݨ[zwd]q9ָ*j*eWhY sE!z3E 1KPRAr rBCo"tB! u} e*kd lRg>춨ʰn{dC~X|U7m{[>ϵ?O[w@gkO^vuUEA\29HD6>$R)P)W~_}K{zM)!Bk'`=]#WlWзcl޼9}X6 lB-+'uۢ*_ΥK$y嫲^s;e㯵~g;OKj$<_h2c %,zEC&R`z ywu NzW2 $dyQ+춨xn{!ᐂ??O{We׽~~;@ (y]e5~P+`QX7e\-oqKv,zc yW2Wk'nnڜ{ve5m8 ˤd^n>q|Szvۺ }/[gP!Oc\?I>4&@޶yuwN\?N=o9k՜P:j+5&Bi y7%dIO=nbݭֻWnW^C_w鲨pa|X GY! 6Ea,+b˰!V4E4]F=vVe! á Yx/ہ _mܐ} %ؚHPz^@ћ j0l/-u "U:JSZn跹ݚ>:kDS_{i@#ȏg}wSƍ_xܟ3$5G=b_|}z@?ћs(CANcoyl"Pi4y5Ex Yډz~Zkߡ_,$3Hc8dho+7MX4Ҙ@/B0ML yv<`[; F{Msf#;[j0h4h%4`AQsݱ/6]` wo\㾈nq5npR]:WtμyUOpl:3I@D?) ߦ~߸ɕ3\msk:W'b{m|neuУ(I=;KΝG#rulzUz_B['uAuycM `A8nWƏ/5ָŢJoKBkmzT؛֑韦_SA >-APtczΟj/]yk>̉֩7lkz߮$(Og_jmy̚3-%d`^Npu+ 4dPMWGډ-j43i<7w}_z Cc!V^p )61R\iZ0Y׼D?hڴ-]C.'& 1b iim'Mk2';M^p FveWYf貱:EBڧ᥉{.ɾYӆ^nVi~]\B3k]NZ;q{)WEQKTr>Jί9Õz Jjmfe5n e0vxv Rdzps bxeevæ*m =QepvrBe cѾY}ӳ˚x8QϿ1#ٽK<-;qKaֆoOz0z:G'WW}jFV*=3cũ%('t+BvbTn{+h5n *LJ04vڬPȳ 񰗆]N5}(@nGlZ{),PedpvL0҉a}@_go=}^`i'\@kݽ-}Cӝ |L6@i:jΗ`!JrKh(hCCPAr\W=QlϢ2$ZyefW>T`$ƾv)};6e狶N*^VSn9󷶹/I`ᄑ3@Wxkѵ%]q*,iQv1ۉ20kVao{om pW*F!PxIJAB&z ٠JN}ک2H}رS渐NhETfnWuzkl)Me2kT֗QKiYuڴ`zOLӃɮns5 Fuu+bvb8ޱd5n QU6гنm؞D˂۶!&kyeUc牽OenwKk[0[0ƻ'[65|Y@C3aO8[{)0AT=Y/y~ڞYP>6~xsKsm_ce; }n?NtVwɆo]<\?")8,VW7/Ƌ?3 Y[B:mTi HKʰk;jplG xS7O*W&k_]VVt4;vu,̱x!5mm̲}}W6rϺ5 w%CnvbW'd /O+|};oŮkxɭeOAy }eE!e% `A(l_PL7!` {v<~,ݮNUa 2y*Ӗeƌozie5P#ܶ,v/2eicN³XXgczɼuCvZmK^ Z?ҭ504j$H Uw#2؞]Yv[A{Y _{y>_b쥰eԄY|o_;}ǂ_ưCr6X@4(nEJKki谽*v9.b ߵ>dGFuLRy+oy7^BV͋ɸCh,~@7SݧY2QѺ3ZŮ 5s[Mkj7}.ogK/}IPS뗶=YAO=sN tA 0TU_suB}aTϡC/O_#̗ Tu:2ӣ4fX'5פvb Cao3_z<ֻӼ.+h96l A+&PBH'yaUUv޼}b9|f~:ڵ$ӟ:tw5S4Y:W}{m/>`z Juˣ.OIjtyyXd}Zn7X0 ?b=@Өb(e+o}:^ZWb!U?p˲ ~,{BĎ!@ɿF؋( ?p||Ls6Σ=ȭף0 4jxEJ Zq$譵宑H& :cN)ZscIu/2݉uknݑL!xvƖiT1y鴗K7#+l]ԏ`H^WU֥b1es-Eeے1~-t^JIw+]vW\0EReb4T?T[(o?Z;guL0$ tX1bCM!WC lzd1a!=t I?u,u{Ug TUޓ}'@ց4ZB}IGҥuo˝R]Nb\-wYۼ5VwE=%@1:g0\: z@;6W/3 AЫ~U'G\ק}.]b~unWj.TK_hl4գa:CbEQEQU}GaO?(_ VXTle 7~-bSEQ,!@Nb@St9%vP-\C/Y㥬2Px\Sv'/*)-?.6) ru2^.u/W*vYdJcraq*;@/MBj`ؗ[`ZM2>\CExeK#ܐ [{~iӱ5v])R]>zN_c 7_`VW9Ps#*u)tٮ5|(c.CB.tU bWE@u9~t,!zs?F)WA` KcJcfY,2} 4Y%CPlb 1[{),bC0pUKX:$`xwkpָ݀(!nW-5*D0LW+gcwe?׀"x0pqeϕ]u껮r nW/! ۇ\RWU˕]Ƹ+yW!BUx!@ j_} ^ʞ/ q ^Oj!ZvBrgzoegL.frdɑ{ϭS#{F[EU}]S#N⯵|~>K}T^d~19˞o:8\\#<נx[Oh]mϾX|Ͳ#cM(}쬷}oU @)4yx4,{xʗ 2P7ԈM7~zH`f[Ov[kKڠchQ6$ m'kx ,aȣ 㶬p= }R$l5*WH/,_aРk=-7ܧ QB>/ Uzha׾N;]k+*_ Γ4 K=\E5<\/[<~0(^ > VO:Fck%=+MV@pVMϤmLچ|( i0T&dHuv0THuUޱ!6 $򪮫vQ6 04*:Lo+R4 {6X@uon=%+9oӐhMzJ4ڧEqyg.9K, <{ۈa!vZU ]f^>./ﵽ2 |?zx<ˬk\3c;U!pQVOzildc-!J.߯FWahX$;sdi >\N5: !EI\MFc1tXt F^;<6caUuzK o3-C=TlO [y 2aC}{*{eUl jl9>WduzevcmB!7oZY%c6Պ+ZWnmܸ1{)(V\pAkر##_7އnd],Y2gYW^yek֭zJ?[VRZV]~K]l@ten_Z6d @{~˖W?>r]c Yظ/F!eCNם6m#_ 9*+4=|)оղ4ث%_vVaW&l(bR|L' s6aQx,c!Sǡ4p۫p\1 f;sנz@B ͛4  k0, (ӬZ*w>tZ={g]VѺTmUȔ7M`̻QwGydf/ FƯpo aɁ>0ԗ_8JP _  }i{ _^Ӈras! >Q^ P./K'NeBẨ]eP}X]^>z]fulc{m q(vm+3M(kOTycX,v.?LJBPHxRS % F*O] i @Weme+S9inTjX}}˘nۥǪo?oe^}Ni5l EI_Nn.{Má迤) =aA'NYۗ7 ;}^>.LPf͊kYveղǬ5݅^NʮcNSvyWݧPܭKNԫl !~:U7uƛECФF ʰd^q>H0Cy< F7!A; K(d ߋ>zTeXzz ؠC b5BQϧR1XJE&ʄ"h "\b}`; אڢ3wޙ w yYf?#U{1Yj;W/5 zUNN{Fy㭔 ElOUN/+6eߓ,6Q=6<rVK*PVc- ?2'VK b192~/|sW^%R(.x4vy 3?*h96j= -#ܖkCa }|2[Nl鋒l ?O[tmyq}0Þk6!k4 ʱs-o _Y᳥ ~ndM./VvgOUbLOFhު*}ס>e5sY`_U 2;{60uIv 8m ,aP ++Î<6RUu{ml0"=׾a^XyBμse{( ʬyʼWRe0x+ B{P|ү9/ywY2ck\V@ᗯ 1E_xl#:yETZO;U^{^~$Yz`[NZKpyN]tOѹ?92urt|؆ѱՃ5fe˛sk5k@ݦFSקk4<9KKcQ >˴L?::i] ύi퓲 W}Ӗg^[h[˪mXo _;OǃņF1z>3 I㍨_sgH/ezxv=Fl[QH/v;}lC_xضT *YW|v_mzgy>~fz7>rK`htդPjd.kW;Fnxฟ٫F ۶k4| `αA祟 ?mWK U~屟Yӆ+*;Y1: 쏘mm0?S<8΍A?ܺ ɑk`{oH~}!֗@EW?c_ 2iT1y˱GuQ}άu}?LXe]Wm ?~WZ~_GÞ2qM댍g'Zs+T34k~g^Ҁݹb?|Q6]G=:){^ 骪wLsEaJEYoE쏘mNgv~޽>nvG&~թWQ !6P_ Ǽq4.=dW#ƃ˵ePe}Qe{t>  危TԋӨQi̟һڽ|4&P,ş5g%x"v|?PAeh"H҈z;ҵZxlYe?|NUe2..;}Qc{?%d^mrte.>_;O?A;`N+<5v}aU^e 7]/6~Vu1.+bfN'BX,7>xyc*nvaj) y46A/ߚy5]Bw9N_:qrt^׼!iDt{^J'l 3*Ț6\^9\vz9\e/˪c X4.W-U]VPyqu)3#c[v׈ڢsܓbCܰ\Q8PEؘ4j~CYP˙O i6  _^M-ʴ-[|on_Mnk `4VT?hvvc:zL&OC{X8U|rkfz:]ꙡFuWZۆ*R4rBzL 0CRNcwl B*켪SRYU{iol%{:N[f7Ϟ;ZWj m{ `94ASW_~0_Ͻ| ^MOR#W l5.ꡢP `h:MƲ^GK>5m eJi٪0z- 1}:.!ˣ'Y~O,٠HqQnǃкoPGFLjs }ϡCҵc{ c0r 9i(t>o6 Vn;.3`o+ /^SaJ |2^PqXRP۷ICܶ,.58=Gݟ~]k/nTZM~&GIZ3^=|8B36%d8B tMoi6b-|WL7d[k]:j[?yj @YʞǓG+|KϿu{NHO;;~;PZ]- Gvq Y-`HckBbi";352~(ܔ0E+ ~r?:ܹtί*AK}O X2~fT%i@4؃%fzmZ_[M %KM'G}]e7Cq{e ϧ^A -GCr Ui.pS#NyGE,3cUWAzq1,:rǁ@eS']C!]:ګ\#4W׻VӸ(*p%bX՜Bǀ4g{ ]0\hl*?M:KU7 ?Tk\UƏW7~-cry7]: unZtGe#ۡ@>C!2LF6z04h\aNqt;P芋}(q҇B 1oLqؽH4RZ+zp:BGvfR;ΑOR0G xl4+U6W{cEtU+Sȵ=@voѱbq[(n0]3ƭ2*0 ~Nh(4~g0PB|Vll~;nyUq.tJv]F {0tyK-,\~~f^^枻>trC"4ӧ荩K}(49:;}ذCj/<6'Q4GczXtgU \e]vNמvWHN:6_GcYn ͩks9c77o@Rսҹ692~ν4 {NO4vY00/=4 ]!Օ>u9n-8( ~^~FU=>FeaȊ ]to0>v[[[/m{L~;9v} vM8=M͝wea` U+/<_ t[3ƭj&G7oZ:f8m觙W[?]]l&R  ٨B t3ٮ=eͿy& PKSW_C]vVq|(46͸B[!]xȕ˪#qU8T Y܋i4w^W%c[_YBa}`}0Lgjtykjd|o;4}M\iڪU؃h>0&G(IDATƚj_.B7|sa߁nҟ5jN}@prgF֧9As`TZcɆ1K,iZ~1S+Vhر#~ ]wusՕW^>S^Xe헃.ͭ]cJyM~ j`y ?z[= ^FƍGU0$]zuzu R0$3, -F`hXf ,:gwq/+WoߞEOg_cJyMW]v>llW@L 5󋿖~-mCW*ېϚ߫R" _< dFh{ e)lGSU ϛe0d/[*| Z0$h9>ruPMOOGNM |iz쎿DU ̗ ԃɑ[(<Ꮷ_GgYu'v;NmiT1eOc)A~p\-bòp%Y d:-$eC]e/ϲ_W! {[>zF>w!2n= |ŦWiýcҥ#ǎ+0}x؀衷à4"k6y?s qM\sMg}vkzz:}e";/yǎ+0}m߾u'h׮]35DB{!f>3qTҸ#G$=|HTg$ ҿc-RÇ@}k!4_MA A:D`8 U0tW={Vyr6nܘ>zիںuk|,{ǎì?RI~X"YUVTsv) .hOsu׵O% b˭+t%JDkzMU7В%KGe!oRR㗩m=@?$as*a;y(Hҿ\Ui/]#4.%Ӻc/!e\yb wPp"4Q, Yb Zp @%ae/ ShNWf`HlStZ3!oI~EkQ,*'/d]F0-i; {].򗐕+ Ŀf,=#臤q?-m [ǔ 8RϻԌ`[Ҹ_-5X[K`Hl'v[,CߒB|))B0-i/.U[T F*;跤qTD`8 #A:D`8 #A:D`8 #A:D`8 #A:D`8>EuR:l/JIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/port-chain-architecture-diagram.png0000644000175000017500000003716000000000000030607 0ustar00coreycorey00000000000000PNG  IHDR=e sRGBgAMA a cHRMz&u0`:pQ< pHYsttfx=IDATx^-Ge C"-4\``hdY ,( 4=(d!+ȆyOr9{VuUMOOOwi)gTկﭏÇo׿ X+p{ |}~۫klG/+`)A!> X+pc OF፵k@QNa(t'V V X_Z']޽s'Vr>|_߻w SpH+` ҡP%`=ꂬQٳg?&M9Z58 %BU㚭⼬XYQ0&OPm>?~O7oKJ$ė_L̴ ?eޢ0(R :ZeZ+e,X8;+plN@!4=-(|%Js?%05Un^8g0y{\X*P[Xm9!Zo\f]-0RFf4B;hhK^  (D[L(TBrAmVBq\L2KtcY*-zO(( $eD!QnÛjDH6:H;g*p d$Ҿǹ(debx%Ik5L4A#xDX۞':d}K3@&-KI/YWsf;QQ(;pj6y(L&UDREy*joDwlFV\ٕ6 2 &Gbxn\MANOm 6W"dve' aiԗi_]hj ZtB!^κ25 /= 8 /qġH@6(J!`r~gX q3 lZ5P(t 7 =PBayDLtB_bnjLi*.>O"c:DRMK,sm{*SFUu0ȗN ܔC()E\Y+`nPUV +`OX+`@+`m>`v Fa>|ѣx(l nePo}7`ó/+pH>}NnF *o/8|Qhz8oFQ)zA m7Qh;¾FsB߅~ x@e(țJt~ 1eV %jbAQP>h! WJ(4 ]m(#/M3||B%ר'9Ƈ3v 2> zQZ\=Qh;NQ~K2|Z"=X3w; D=|-htz.Q'U\a(EeFEas:b|K2EG9qrX|/QX}n|QT\=(D8nU4W[ 7 @.0k%M)b, ~ŇXHHk!Xȩǖt^@a(:–*Qxd bܒ 89 BamkEu Ar%Lf'],3jp)%(L䘏N4yoA>," cDgكi#<%ͫoRMmL^Kv ՗wSDIFAPHK*^ĄF){^֬j'tuGa*CLr(?*jWhlF34luW(%M( (?0FqPE:ZRoqYj| "ױw% E}Q*LĊK_,)%M%O2g#*UTGZA 5a7lP]-CB!О%(dpZ`j9(쏨Ux0e IW?]rk lQmT`-O{yFNX1[A! s%R2c*Ea9g3d:բ(\1;I()q3P(w2!+3Pۥd/W@jp'[F  ]{ij*X838u-1V:GsUS=5np1$!(S(%m:t0g(,xD_,L:+ŸR<:#Y&U=m@iҹ~e:}6 @˧&@&;++ȥFP@rv{W(4 }xir+0 0 90 ~6 90 ~6 90 ~6 90 ~6 90 ~6 90 ~& ܹsߗ8wE'GfBxunW^M=[#)W_ 5 +~/+pl>|'̈́0 o]Q+`0 ;V X} X+QN`0 PsV X} X+`} X+`} X++ӧO|AZV |A,/^8ґ{ L)'ofR(kw <}J#(ѣG7)Wn-(4 ?]QhG!vQhQ!vQh (|?jWpK@Ie(Ǐ) )T:'JOFQ+޽{ _|œ'OE ڡ0>|b _\ nR판&+SZ/_,<~x<rA%Qhj(dyt& G02;@ G D QBb0\6 =@0 F\ Pt~#a!IꜢ% KTT6QUUh?jBϞ=٘С%=EHP`l1TVa`R(MňBh,!2M *0"nl H )ęlXHKEeeRv^@ё~YFFQ8[@ҭ)MÇ.Iި4{JX'e>7öKh3 9]Eb:T@iB(K2U hʨ?YN ȟRFQ8WD!;(ƃl  +t!ɂ [`]6ᣡW>~BG!D h Ҧt0 xsRt $(eKD~=1Qhj(LK2OqьzPhv6QS0m4Q5FXB(\B'ai^"x$3 //a(4 7B\8-IG8 i(Q ^GajZ@~J_J(Lwt3(3M)kt+E#U}h)wCFa׬fbPliuQ8ATB!VM&bk«TsP,JXeU ,.?ղ%) maS>^Fa.›ijX\SgKƴc&. ¥P#8WsI,KFwwQMA D4dyU?<ӦB[ȦPXzg4j*0{˹ȮU鰂U,h%4DR -* 5;tH~q% d7CFa m$aB8B".`4 Yw-Jw",>֮:dvU F2BY*̸lZ$_8M!BkBa'k1rգBO9YI1/mE! KNfeD@e#"'~?5i+6bEuSЎwSPGȚțiu+̊´׬Ѧ|hG>'¢^:SZ:hl]Fq:}Z=)G21Ӵ&@d xS".>H?ZJV\ztnN j$8ܙvS@"ʔ"36uj3 V~?YBw 'E<`x GW)(^xqC~CFaF!Sn~G4: W;X*G-kGפFQ~_#عFQFa_#عFQ;B7oQ\_C x4Tة` rھ1 ~ ݻW_|٭_|` @(0}],\+BWw'O|Ǐ8ϟ?oÇWd,gϞ"FrbR2 ~O QZ@gKpCQ~ob'a(wC7?~4-g+djROH_pJԚݚ|dz,vrS4{cD _*L}ǒl1>:7c^RPu=oFQn@!IÐ{Yg 9= ,ء$\s8L>2ct" & Ls5z1 v*9S`"ʖGsb 8i{ފ!BB#( 񙘈 $“) pU$HC|T1kfo7DP(`eSR7߈B~#QH=W$/9s0 ~; #ݒ,NDXN$_DXi|dLPbG1z$H\OFљMp1j(iL3)+3w"9FB䶾1$ҙLC{3tD|WiTfD! )RcX~@(wX%2ZsBah7X" KUcLs$Bó[D7kZLA\LJ;ٵCFa4d^E3G-`DR|y1"$rbNn;!0g9rB|g9q(BrBZ0%z 1UB~ +=e S7>`9KSvj!DC!wҌXtQ҄ #vSLs4uNU+$mdARdHņG8 2 E1\AVY)Qcj=AyJB\6I۪ӬO\=E|3˜+ N\h-TjU7~%\9i;yDlQwd+s4.dUܼO*6QXSvpJ+(#сBL*ǨQh{Q@!tڀuwQh;QX!`I™-`.d5:bEf$"QhQ!vQhQ!vQhQ!vQh0Qx|Y*#ẗ́iԯ^_V !n&Qx3MZ+0Qa(t'V V X_Bw+`Q>`+t7V V X[>ke@)`9w;i3N |WFQ8xssTOlf&!]U6Qh~ໂY( [}b Fa _|s\R(4 =*(?~NjuMV㶁#!/!(T>|Ea"kV(4 a}r0`_Lb_`QxvBGÕ uDx$(nGɕM`r8F2"+>eXQV(Ì,h[Ѧ޴ɓX/pG1 B):`Mp>& za7ˤHf5 SJ*L*gz1iC>*>:1Ss$*358ɍXH i.ε WFacpl!-N s0hToЕct714 VZm4B! ,T&BPTK@Ui)/_`7 ںAfQF $]+QxMtBr9t7y 0>M~h d  AۇJ1 ~7iBDD{$W&RPvCdRC'?=# $F&*LՋqU)[_(& 8owFሊ HCa"8"ה5s)ML0Fi>2 `BaL<ՈkJ\iEuL_N&`II0rAkv^!QVM ѐI #aI)b$Vavℵ'm'y;! ,KNs>uP^>zF fa^Sla\ΔE0d 7砰tD#Iq!]TSp2bbe.TnTʛMw. Kk&OQ,MҬӊD)BYMN 8F63נQHsh*jw/Bn"FQ$~GAAךּAWC: `vᖔk 2FPH(jAasL>FCd4l^*Ժp7 ,w\K)}L]dG8+5Y岉,q0.Uޓv+all+ssR0d8CKfNEB6D\D ZT[RSR\id4LVn]GS5 `JiG!J }i1ڏ88 `"/Vbg  S .9ド X/XliFwQ]8DQƖ( Y~pRxNBה"DMJCe(N#pY2;mtUx⍓s#5,uqCuiUnsx٤T( ܎k0 ~oY!]29XT(w(C\(waC\(waC\(waC\(wa.!x):fBx3Mnj* ^0~0_}z/+pl~盱5 9W(<|V 0 9W(<|V 0 9W(<|V 0 9W`& /<:CjG7RH zRf* /2(;D#mN ]*0u~?DR`3Qwߡb'4S)Ӡs,Ql% _xH:/Qo ZD!af+Vlpc`R -q?cd&9Z B}v7(du CQ/++:2OK*H7PScO:Q]Hw\u!h`F<ݶk%* <5R }ѡ:I /AD6@!GiZꡡ`otl:}"Ma\a)`7B!o{Q|h~fg [C!]rA94{?ٗPZ8`j\2EDRFǪINbVQMnPEfժ]hFK3DhuPHl*LcF%\ɐ) Y4#().AoBall[iaA6rQYd j.ҷ7:(.V 1P=Us(lOr(𽠐&0 nj FQ qUX&r;&(ۈF=)]*Qpw+!GFv9xaTӦE fruPJYU"ci i _FڏOuvP\KiF4"eh۶4EB2z"%u h4Q*Ƒ{j7󾛖MҗD!rB9j`4eSnjו<7Ⱥ1F_ャn5jQf,n7ID)D!r:\(LqP$@owV!{u㥝w@wܺĺSI.iVuOIpmġBݲ >stWi7FDZr܃H7|Y.UhE:wF!JiL K])N!]a,f 8ōm($8vo҃-L 0#(>yL5 tzZJfGC!W%p&l%/,&Z̼ac4juiFP |v^maʻHF`^WJȌRtc+7RE p+VDu/j$KA uk5 )"h4uy;鰕dSЩneJߪ$$S(`Ǖ(G`lꨋ  & oPU 2)6s*֔1Z-. c:-.V\ܗN7? ph,Q8{(\Y6V YELiiYdmҀ70@<0e ի<ʊkN;ªMk-1 @a=,1$D m!z,]h LgGE8 ϩ̌i"Z((`K q,!^g9FA2Ti2_7Y}@:X#J6 78½4nPX@NhI(,4h5} ܃BF^F &P]6bi:B50bM%hlwϕr~=:Ku1 78l,Qx  S-ߒMB WQ@:-8S)4@$B]*g9պF%u4 5rK:8A@`_BuN)OP~ej9KS=9σ:f6l6,QK% OܹKgn)I1=[&@C[{ӪȲAv2xy%;B!mb#O@ĴT/ICGB*(D_z`,)]%D_nAH(SiC(D~18GNlmwתW ؈ 4˥2H1J ˍl W){-;rslNGjw_^\C LpA1ԖQH ItqwFҕ=>nɼˋoiׄ(y={AL⏛"piㇷ ]w`(I8c)Μ-xy0-Qt*y#UldW;@{q(%Qx UQAiFP``}7~߶)“Pe̶B:YˋAdܕ=m.fQX&DNm|q{2a <ŷ Sϸi*:Yˋ\uFQk&H{N}'$=XˋoibAI6 @S'yy1Ir&(Is#($tqP.ȕ.AajoD{qv[RIIeswj ܿ:?DrY`OЗ~O#^~g-[bVU:CQ>}>35|hUKP3Q8 AKlT\o跬.kBN-(Eh iV j"*FZ(ZTcN(QxX5 WLn3Vzqv_?q;qI `i/˱/R3V lD~ا ־ "7k>| w+7\(Z[kWhy]4 wѴ wL'V b(\Es31 Upk­Q]ɣ(E*E3PH['JPp(#'(1랋De}eBzLh\.QhA]qӀPnQ8 lﳥdM: WS&c}!],~MLDiTG4ǔ0Gyj]Lg/(D]_*,/+0Bɶ ǻ\ Q5PH̲A}ڍ8kfxW ˔c^8b9Sڱ^z$FIwBӎЇ)O-8=/1ݓi8@l)4Y9{HU ѸhI (,KłvB8VOvm,6|L?4;-ϲ Tv ݡp//QxV(QǩS}I!re5_vYb2SHdAkg_h,і5m@},g3 IBM;e\(O$^ CSk\ % /Yp,_7573<8'o/UxR~`pm´]#wqCmy'#'(9F_,$ó i>>ngOEJ&>hH,XTpA1/vQh=M͹OPGGi\iCһ x9 8Kw7 W춈сEm_=AYv*|Е&k/$&iD^G.mw(~]A.O-FY3^ 6L9\FP(qө>F"E5 qDֶ i@ S-r`TU $kڻ_iɠ.ݰ Ųuvr1 ݒ\z:EVPrsFqrdeJt .F]»p;=U4 ynW@!D-4#U_Nu;$-&ruOP&z$2k(xF<>Bj:WX=~Cl=gl@NG0pN).oF)s !`j d rfK!Ӹr %ۅדiٔGG~4tQYrb#'(c\z8$QϏ`QdlV b). IϴbS(=ZRäEvPCU6Z{|x18Nu!įBiTbil[잠L#}*d] \eKT /DmeQ)r4l<h6n & 9B-*=²E1F!y2tH2 G0|pN"y[iMRśDx3c7=@a> ߉#(KCU?d;շh{ Mś!>+ښ(JjDe Nmuč¡> }c) @ )t^P|6ӎy(=ȋ_rrÚmո$xZ`fu0ؔKNOCCVJcSa]ظ1RJL6sUTJrP ]F9LJO-Tua{Z-(9ɕAO\q}CVF7u&ѓ@^L)nX96M+v!SR (\iqV)ɈU2 GSqep}cdL2FሶHt U8PH NnwetBLUqz1lT-qS-ڍS`] XdL%7ltI%5Pqu*Fw@n0 Vp)q*%'R/78bQ|2hj3U)j(9Vd_lqӳTj;*9XS\r0)(jC!DauxSؿ]&K4W̽aʥ(%LբѺAԳ;%jXT5 塗-\j)䝷(Ƌ 3P08+b)_Bm%E{A!˫Q0ةE2 m1(DxO Du 'ám#qjGmMkN8ԎrDB݃cmzL& i4h“JKuC:{HcpseHV7xP W3UEJ_2i|),~$ZP.) *7zTRUg ?$AbNeTvF`.Lpi\1 Ok+/­( І*an1ƋbkuŐF?5kT6(D3 aFc(FVW i^QS6 OUlM4CFaO < ֶ OeWjl/XNpDOo߾6Nׯܹfo9/޿_6ЧO?ݲ2g}VcM )2OikuIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/port-chain-diagram.png0000644000175000017500000002750600000000000026132 0ustar00coreycorey00000000000000PNG  IHDRgKsRGBgAMA a cHRMz&u0`:pQ< pHYsttfx.IDATx^/1CCKVЂ(ȊE cp% K 4Zh2R@@F K+# t{ǹ{_wz>}~}o{OR@ X(ڃ..._~J~jۨ^:ڍ-75: @Fk-CԬUk;A LEͺ!_E:[5Ek(jֵDBԬUQS\f]Kl4]۷oyuyj^^{xrs!?~U=e|PdDZo}{-aO[Vy%H7fIN+Fh UlO=^hL3xnooQX75K*@͍~ 0'^ugVj@-jN0y+tF*>^,oRn1no(UKԄÿϟ?PYpF`lx jzI͙>ŭ95'Y1F *jNɒ&.ˁF VQ_ C{JlL/_díIGlq p#=mj.cc̞=Vwp,RR/Go(^|))XhDfe6B;D d>ſ)B{8Ǩ \R>F#佨&8UdPqX=^?>PD͊AAGyfX9=Ҏ[|X8|.5h2z|M4ִ{6%UH4*E55Q_|AUB%1j'NquUCM/Ya6,V ;60E?i*8Ng_@~"uOG A͙QrU֍<fUpm.yg_gjɈ6'AMug^7`y埃3Yo!5pi1b{DM* NMN.>NlK;xXPpr`&Cp/~}EgʼA͙kf\`j[@˛};5VfO5I.`vqVl)L^d;%OF| *?Eu~;>O>|P~yZtvgPqP_c+M DŽ}-$Yh?Sa./,3JE%h+"[fO!%ް^!qv$`x՝kN+O,jWR_|ի|N rw߽{.&lԴ'*[IA?#>(85I\ 2gΡlAɬ˶$rn5s %(WԴ-OQm24[sj>ڐo ypڸȳ~͸c#Hf/15+%^v+j-9ߥ)*P3OQS cPːAggK E N5+`Q `VǠr୨9mTJJ<*2p>|ˬԎ^v;=DnS+8 R Yh=B˅=iւۢ'ڐ|2ʦgYDhZ̘m'X]6Czjn& ڔvjf z݀ 43)Н4Žw2gl6<{5ڳPO tUkYV{0د\5̵0n.;ٺ7ZӧWWW25A_j$\䲙4[A[|[f Z{~1OV2?ŋ򟚳hL,_9ɈuIk ~M]>7n9]^^N`<~x qd}03_lxhnqtgxNLTdO1j?5gQ8z,fĕ!.8퉩dқS%]yΜjS[RvXػCj\<̞ggliCagYR95MݮA[kcЕ HSӏ6BFٿc j-O8 b< ~¿_|D!@.GWdž8gğVf$(C|88;Q1 [K/5͌ӢHfggtR7=-UV3_3ŵ{d"OQtFMHbaC[٬/3fcjJ.1Q {'|ϣ @.nApJŸ8QCؗ-<08uB,烳Ho M.o2DO Py?82W sq6w]uӫjФi{i[MfbVMxcҎRQs$kɗX' j&ۥFhQQQ<.*;QǬOJF(e5x/OiL i6(= a,Oc-C-}I@9T,xjx;j leDUFOM|G/Vv1tW?em~Q}5n)__φfk8[vFuZN AQ{TN [͍ƻAVz\95+1uUɱB[RADh-z31 -]Vپ?0 g-mP3{X]5ʩ9Vi g=uNZX 5[,6 t_f5QD6h܂^cC d>A?eBܯfjX~>=w7AMFktP|͠q3 jYYI/bs Z=M?֪:)#峃EcAF1i&jN]IR_,d7 50 chuzyc-jXf:Mi.jd3߯P0MM^'gEL5+eOEh9eAFj/IKvL`-YlP}#xIanFhgۊ}9pdbayITı1>㰐AVοYP !P{g^ FaApl\lx=985}v> xx4ɣUFTSZ Gi,@KYG-VN0Z6RFtG5 |j<_[>Qƃ]g#sY-mZ|m-U5ʩ9vF ͳ%G'|MȺ8]^9dg֛h!DԱ'3qj]ĺd`zḯ g^l%;raXfB7+A&cyT!se֍Ș `KQgXu.,3e>jV>EW XH̓AC'HXkM.Wf%W ~+!KM ðO?wwV'HW3m0@T`.2UP(ech_S'|.O"OHL'%DerB#Cp\"bG ngX tR@ HRw gxcYPQw:';~.~G;h&ҏW5KMG餀gԤͨɰ8HM=#gŬzpjQ. jM;Oΰ6꒥X Pӓ̨i_DDbr1|`dvF(UP3k7JR@ H9Ha Xz{Я9HM )k5_3&tȠ8亻l 5@A_N08њ'rw^/qp) PԄzaTcՓ?⣯v,8쇚XEhOdE[`kR@ V4h8vUHv]*#7d|wvvFS]?}Z Ϟ0f&LC&luDi>h QVR@ H R3)"bPͨ9Ұ2hTNӮ*gԴny8umrhl1,UIjl~#D'Z1G~; ~IL_3kf+SfPJ&Ji |DWB ğƨ,YŷYlwW,JɤR5U8K0^ )}/YW8-i3&K`QD%R@ HKxYspamQn *j@)RDj2'3jJ__58kS;4[1)Y4,J)8$ m6a#BqMQ#el - &Kv2jrURB)R R(J>aJG ÿ" ƅGcvl%ʔ^aI^ix../d%jjdR@ HHDH-8c@8,)iN@M[㚰ebD͹@K)o5[20'{3i&*~lIpj0/X".S۲AppgU;5Iw_ڬ[ NCM>M/rꫯ?=?~+rR@ R嫙 #=_NM[mw4k)!8źPlOV$X*t? mh~h/i-71j~@ H) ~x焇sqq[RjBh<Œ݌ׯ_N芤s+PGM"ӦTB_JP~ *FA7}H jۖRJ)fZ @3ijrޱ=40s@8+$4!Jfc>TS瞝,K26m`X<}+Kl8y<gj426$Ub) h&8Ni 䁠\ G&L9#!^__3' !mO"zjb`<޿όd;tO瀥TUz) h&{ssCIQPm\Z-)Hf(pPmSsp ASGԂ;T/kG3S,Z%R]vj AS_PN@;>"9m\SJmj*y8|'jZQмL H*ЅA6&NAb͜:j$]/|Q5S}M…iDV؃љx~ն,*ث]ic_T7KEM3%J=uɱ:$Xe7DyuI*Ћ Uzɐ k7kf#$y_@ 4bCXYR5WXU$) *HMiTȿg؉FB{ _)&3 ΀ߢa,ɂ8d`V69l~LN|گä@GjZS[89r's=djLMP# "fկKHirXƓ/K=3!S^GH)P@_jZC#O"ӏL<55֎-_ӟţ5gt5)DWOhkq@P]LCAY$H,s!q ; ̓Sm󊚅VdR@ +PGn=QsEK ) j,QSԬ%5)jYR` uO5,GGI)mDͺ'juضf5E:QR@ l[Qf() YwBMVG ^}wwX/9I$Xrs-׎̓:#QR@ T( jVCNOMr;.ww\Tg/d^ Y6,7.g5XGI)Pbz=+6OTQ.%’\(~$~ņI%xAQfa)+W`IjrX"7LBxيL9%5AG2)~3B%x2LkRJ)ШԌAW\Q{o%#9k0Ȯb\(MJjvZxcsb0KR}D͊ CSD)VO/_;YTNkI% |M{ &͍mF#{&E,z.kȟ4oЫ5K)Pyj_뉟cK{GGV&})t&kU@`M' N4$jS'̈#@V]@ԼwqHǏ9FCk.)I SsWPBf*R`FhWllD OJKϝC6r'$jrCkrDS5:YFM 9:?Xc;1F;5n{"8lЂ\ 2O8zZkDA1DlU) Nڨ 8#f nr,G RibeJ=6KEv'dyX3&~ VAޛ Sbbk ZW HXR#8 ˣOrX&g u~U9 4+O) X\ d)^ˡ$6g[h'B+<VdR@ +5ۋDMQsUH)fТYg9:J Hm+ j?QSԬ%5)jYR` uO5,GGI)mDͺ'juضf5E:QR@ l[Qf() YwfgמS7zYݽQR@ HrDr|YsU@m |WuRGI) @t}|(:Ǐ/*ݻJ hTɓ': <|ӧOE~P=IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-compute1.svg0000644000175000017500000003441600000000000030070 0ustar00coreycorey00000000000000
Compute Node
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
VLAN network
[Not supported by viewer]
Linux Network Utilities
[Not supported by viewer]
Macvtap
Agent
[Not supported by viewer]
VLAN
Sub
Interface
[Not supported by viewer]
VLANs
[Not supported by viewer]
Instance
[Not supported by viewer]
Compute Node Overview
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-compute2.svg0000644000175000017500000007254000000000000030071 0ustar00coreycorey00000000000000
VLAN network
[Not supported by viewer]
Instance
[Not supported by viewer]
Compute Node Components
[Not supported by viewer]
eth0
[Not supported by viewer]
Instance
[Not supported by viewer]
eth0
[Not supported by viewer]
Macvtap
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
Macvtap
[Not supported by viewer]
Macvtap
[Not supported by viewer]
Instance
[Not supported by viewer]
eth0
[Not supported by viewer]
eth0
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
VLANs
[Not supported by viewer]
Project Network 1
192.168.1.0/24
[Not supported by viewer]
Project Network 2
192.168.2.0/24
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-flowew1.png0000644000175000017500000011217500000000000027703 0ustar00coreycorey00000000000000PNG  IHDR/bzTXtmxGraphModelZmS65|l3 ^QIñ|BVJcC ֑L&>ϮV9 .V _G|< >!SR'-GZd*]/CudE 'aJ ^mzEZ(yhލ.;b,63-ai_b1hԔ(dR˔Fgd)M14͖ g4ʷ&$1+1-,֤.Uesy+@r@bw&Ꜯ%WHu]stV8} _aTD `^[䡙& S G9%Uk+z)&Hf :Bɮ}t !*Sd!0r ׈o{#}gSP߉me"P?,>4WCYNͯ>zޕCL$y DrNA[m3lٱ S''!_ݩb5ѕ@iB?`FKYXAl8ks+u@r'0 F uWo/3`dDctP$4)בprVUdl;!ظW)箠evCR[?ڒ}a ,Dք>V- 1͕p5L1u7S=vIR,_c3 e~x5qlD햷vN;RK3~hqFCIz{Dd8/ [zk{WwwxLEqbk{tؤKJ |U6%޳nL|L\ l%%]: < F[!å;kPաCK.Y:b#60؜ժ e[G՟˟{B IDATxwxzHwU:blx**(D5 Cz $$THڽ\; d|Ny73H-@~P47[ @٠].}qڈp҆/+vޱ,kVYu]AZP#f5=*`5yYKډe@䟵Hc=L㯎cxO`[1sg]PƩyu'8x2]g#ue| / H&q$K-q02M^vuXkʛË@6Wǀ<wR/ǂU | Npno^=8k028|mef|5F1Y;[kux9݅y0˺[Vk_wIʶv`ޅc'˰o`t& Gu+ lVs@uKؒwsŝ?;=qSA; $`}T56 2pfl;6) C|M,5F1Y;̱<ۜSDZ\<l\¸k-2θneXOsXgiat"m׎Zeyu[f uA:VGkk\@RZZ3qN0o=|^xeocǝ?zczrv_ai+2ܗ6{p}3Oq{z6/vDs1g|Q׿alǵQugQ?,߁ .2|<Gkq. GOVF'U`q26vZ} ׯ[ L\Vu r 󦟇?KA:=b`tOAWKw'?0rp_ c#oXx8yLx|Ԭw8[?PGoNZsޫ,^G3oPm~<Gk}lUAnD} 5aoa<5= sMֺ QpZ۽r8;~X `7a* _:ΏnxosEDM^pN;kDZ_A7DZ[ǐܖxJ7`8D397r0\:?,e_qLD2Sg!`8w[ [/mV8 ~[~~r0 ƱcF˰/43x2zG׭zy]U  B5FqWp kz'pn&bΨ 87BF1ʄBenDnGDZ NG=mǟGe_^c(qO0 10 MengK78q15x~='k""B(X ByrJ o˸~qZkzO _i޼`ro<7XLo8Ɲ3A: wz1t r,4qSøy}ƯXoћٖfy2%/m7ktC޿RCa|7_<-0~d|E0~9׍⺬h0(=P(9ø6Ex]V2m_O"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""<0=h( :X?25sb O H. :X ;f vQKq8m 1SQDD":ýgc7/09 *a"8D kDZqgx,?0@Y%$P@C,}`pTqiL4׏>4eś(]_?2M~]I| gE}N_yVчb]XKsvuXg^wG'mT,9bn̞UI붎ך-(O(OǻasG'[c v6pb]wP)zb|X;هz-o s,m?n>> s޺;7Xk[cRDD$(ux4]YS;|#l69>|+3* 3*eM,9&|); @MsyYֳN>Ūe 0:<:⮕0Ζ2,kKG|/usl#|m˗mǿ~> \a>^=W2F֡ ǤHPx8i&ge#PFu&]ٮ:n0 3_u a-(kf|`:Zֽ}>A8;,뛻yΏ&? qeXY˺w mc|8Ֆ/iyat-|`n칷?53xm8 o,ӌt7JO9ߤKىqԧqkO=ǶX9Β~ kZ歟mq7xGGG_uoC~vgc^m^W ƙLHq83Q@{veݶ"""AQUc2s\6\ߤ PIPcz.q d"mꈼ: &v㿀s:o^yq<!w`|]q 11 t7 ޜ6ol7ò+ǧa|V5j86,9,.5۹FSכ-V֏yqs؛}[~:X}eXa;o^y8gzy[^k{Ƶ[6-so8^3O t.fWzgjoR"015x2`t|(S,ӹtK۾*lїm 3v?F!m^ޖs ov}ݿDZp*`ǤHPa|޹.Ƃ1洿>Gp}>ס$36D`_rїmqW M883/]~8Fe ;>-l~v#8^y-o9G8/}p \<z|9 ǤH@*?G_P/e7~VsZ PVMeeIѠ W8 '?OH*:~5jj;#@;v(eHb(ӡʕb=f9{1oHƋ;`@C4Ap. _02]$1T VuWW7lx3ǵrE"Rmٖ^1z%HJ ?aHh GG),Unʫ)=dD0^.0]SSSLη24+=v<_`UeKQ f;or3B#6ނ8?X KsaYJy*9oy/3m oxQ\]y8c%up^'Bܦwa?ox1;s|a8d1qZIr} bF~=>nsEԎ.GZM)ϯl}bmPQ Oyw^b6ZqvD 9_漋_ozw#?qC8exoʙì~efۄq'C~`s`LR? 1\e>>=K8iN?3/༦=qP^)Wma<4/"02;ϧRќ'<2w;^l+pX?ka_ߎ G)0 jc<,Ý{Gf[iYo >w|r ƾ*ksݩ(R^#J)%+ 9^x~A9*hA|۴Ǫw90*`|'k"yx=9q{w?i>e=5qP^)Wwy&/OxY"qr LWƅ;`uw9*/7/iG_6#7 +3hqwooAX0+OA 7ÿ׺{w;>R{5qP^)W10cut8|"Ex/y9ݟ0Bx~Q^aj |8ςoA`?F/oA\ <wz }xHk'NG*yU1VlO 2/DH6#0ׯ{g901ڛ^m8_ KXma{z[@%r5byf$w:؟NG*y5ܜ63"E$w__$|eUpc&a_`84lfK +wy n3~緄q_ bxޥ}^qsqpKzg.Em;uW+U ;VDL%KQGa|ٟp^D5p/=p~^9/֛-㶚t#?[0?lm_BA\ 7kDf8br?Fw8(WƋ,qC["EJzk=0x(_bG V GaN/1D5JƵBm<;a bDžiX_#qF ˅0Gb ZX 87o8oFw8?)WGrE$BXlED$/+SHP^XD"JD$")W"""""""""""hyvԎډv"]OvNڑ0d>YtvN$KW^Sڑ0Q;j^|}^ԎQ;""p 1_^(DDBLA,"By%"b b+SHP^I pO-:V.B"bo ␲C^= x @ЮؙJ!҃8 bu%!yU/1ca)\V^0 Op9M 0:k ќ̜?`@Ys-Neax RCG 0*$yu?7a`t A~w/WO50@9 $; 7#`4sylFj@GxRY(\"t;"=:\;"yyO5_e<3e10nq^mOز>-`u99^CG =S*yU F'g"d-#a>>a>. ;kq `qi|0UAuyXq0>9g_`:" Y^U {08HQe v#7qQ7AU:5=| Gk,K3uEAy*z)~ٍ6a\S @cF6^8m_ `\kt_`\~/pgqUH%ø!M)jl(/ ani a%(~mG[Xc%'røuEAyaw/Ow`EhH 1D 啈Hw+>|;<3:R ;|bc{gvl^‡FyS^a'w<2'~%۝俎Xx}?|0]Ozj>z̅);]xa+啄UubG~9OUA}=㑵>$yS^)$|(3Ԯ'~{ɟBTᩝO~ƃ7R{ R^:eN&ܧ q},OdR{ R^ *Nl￾LRVP2h IDATZ bWS+WEݪ02;1! Ny>@yq;UaR3:W%W/%l+Oę0)%W/%l+`A6UX+JW n*LJA,v)D\x*LJA,v)D\0)؅ s o֨¤b+J$.TaR b Ky%_¤b+J$ u^ R](_+"xFG%0)wʫ*Ht<#"UX)«W* R~;UxJWETaR bQ{ R^:ё<%/{G/䬰>Ӳ8dnK QػP{pո4$mʥV+ *UdվTXt{W;~оA? bQ{zo|o|,lԛgyt\2l-oQ^A3:MZ4w/3o3"j[qA? bQ{Z2lo}'^$"HƻLJy%A* JL⫧_{ZVX;RRξ3:hGڿ/eknBo>ivKcT L} SrbvȫTlx?"ŶesԧP.Xi#LۋՊebqlp .\IIJU^!`AȠUbr?Ƌqޟ!mrOe:^_/{~oG[cՇvsIL Q]!RRgaɼ4b[x'=z5pފ^47ʧ2mV jjw.`IIJU^!pA<,h G}E3qB)VlmZKViϾ3N| F_{WqŌYVoNg:!W{s|[ C^$rl/(dܾxicUkGYڝV- Fkoa0Uً67t+Ti }[㨼0?JL?\6ay˨\q}Ye匜zG*[ǵcbYVC)6|z+_?ݚэrƾo8w F_mnWO>CbrVXyʭ|-/ؚ%Ln29~˶1,nks:WYl躉\`7<܀F1lT:$*D ~1h9\ӓ2҅m<^e[]Ʈኌlw5{ŋz\{0[/k˱knB^݆Oڧu0:? b ;UJR*7src9 uL3&NɍSGML+kvfk8t1ݚ!v9uV&g]qm3l^1çu0:+AAA,.5Y\k{ckkJ~U龭CbrVXy)>1H.t rtyʶdQ\:#kUc'-Бu \حc+O=oe9޷uHIJU^HK~JLbJW>׬ظ| ~NL:aL9<+[f19cN֨_l7cN֨ۇ/m׬1y^v$I48~uOlC pJIJq}ŃcӘ,2olĘs\6%,ӈO ^E}`*r(cE}`p̨\:t畝Rʰi|q,+_=}\㨼9sw%O J%&gq/Qg9lR,k1n>wqF굟 |Sfa9h㗜s"[ԊcY !19+(]yjGu>&$;RRh|T'wK(unc׾q/OC U ykV_6{YGF嬮]ئRY+^[u!k}^T:vv<O$z8yUY/=S;o1!.yyC"RRW4ʫ"Dfgɞyz5|I}=ο4/ǂ8o̫s+9{Њ+bb$2'3;;|Iy+y{@ILa+Osa[#We'8pv6n<o~cKswn:O9{Ҍ}C|IdK: ^;}K~;-~֧U^gy`[\8d)gv+ sJNAsǿg|g9~55C3Lv Pn+/^Tc| ]4.ye[|>No?Uu?gҬGId : e^-xq;7}ge}_X%l+A85k7?C:|G3P e^$?v4?*S; My%b!8&g~Wt_wyl)U+%)Uy%b8>58}BURQ&Bq[rqs] eY4kf>KGPRRHkl0L_cH3eYSxYl[_6Y8WUbrckq2.g+"غoUV/kA{f>KG~«RR{աB׎}0 D2nlQDT}>=%/_-Kc/f>KGJJL# ]6e:f'GY?ٴB3>kn<|< % cl/_?&K%?frf1hY#7գ)c/{#Ҟ3>\:Vi)cd\N97aq_) /%);V(teLÆxSBu^ WZ~,ogXu\vcm&=f]fk^; oqpo&8|Vfݸi\:.*Ǻ́mv.4Kg,˷gso:+x~(t˄bV/mwmu.KJpWL{6|ĕ~]os# Yҕ\|KNx>+¥~s%1]._3 oaҶv<T3l׎2~ȣ`,Yk'g}U{YVQ| .T/*tmϳE8rVfjm$P>I[qa7qT,l k5ӸvaQ#طjW{iB/6k!32}X^Ӛgd^|%KU};k(v(R^WY\BײcX/=o{][S>yXXNvӊ/rQrm:-nUfKWr3|ʬ.sok|a^1_WrT}~Gq+z9ig 8ąMv֏𩫘ظ*볙_zr)|f cY强YywgZl\>lo"vQK/v(R^WY\]k*|ל6JVy?И _MlW;.\βM_ryw;c/YyٟqD:xV.0ۜ?Kυ_pX9|u-^1C.˷d˸ky-S^%k ]n[˫+坯|^VQ| .TnHYZ;9֏mǗ.-Y~s6z` N'˰tҥ9c<α3!e'uk؋FrEJ:'XIff ر|dp= t`>ݨ SvrÔQlӊ'mϣ\T 0wx^W ZVQ| s{-t-\9udo0m27a_p񶍼RS>,'jŲ&plלv=iVZ=rDV-w%So+nC[_45c3}LJ7-b՟79 تg&7fT>YVQ|e YJ,bI\=7\;iK`!8iГ&gpq|^ k0 grYl >g‡1\5#/taJC&˝0-yEO4'PwycpM ,]'N)v(K^Vrpo ]:d-pηhS 8f7\WVl? ؾu/ :lf+>Y| "*oGlXF^y-g:y.¨m:&t%W&43|lǜVQ|e OMVZ9b1m>SD9m6k6sqa<fZ7.it [v^u&'bYDNXƤYs.v,w<>$Q۲1^]y`:5XXM:v(K^V;v|mS^e.._I[l\9Ks4h_G]ʶBqL%'e/^)h_G)jBq\6nkg?Ԍ1 _M8sۗ6/8nqq&ţ۸qffiSо:"6/T,u uq5:"6͟ X8JQ+CT,uEl"ǩYǯ;ˉ>S&lS9 v;8s^X*@OU^A(Cq>U峆-?r9 v5pg=\Ϛ=pS*D ӡXG9F}3sJN`?g"ʼav迄U>׌K+W"Rx{{ ԏ9"K O/,zWrP?w"E줕ɳuZߥg1,Ry}sZߥLIZ~+W"?=&gu5-\f8ui줕]_fʀSRZ_u@g%WItЊ-'lIy%H [RےJlIy%DA, %ؒJ"XlIAlK+%DؒؖWbK+$ b%-)ĖWaNP-%W.WbKv+BA,d׹]OWbKz9=A.bKv9Sr[K^ٖ؅X$)\(D$."aLyBy%" v  c++ > HS^P^H)](E˜ʅJDOABA,ƔW.W"|݅X$)\(DDBLA,"By%"b b%%ؒJ"XlIڒJlIy%DA, %ؒJ"XlIAlK+%DؒؖWbK+$ b%-)ĖWIbK b[R^-)œn{w [K+\(ĖW'ȅXl.sl(Ė:sz\(ŖrNyBy%d- HS^P^H)](E˜ʅJD/A<f}~D*rKy%b ێ㧥=<;*?_Cdq၉H~dF|E*rKy%bY}l{/Rvz8Q[/IW+ gT!5%P;J‹ލ~Z5.K_ܟXgp?X yn_·٥r7sqz^\,or'mesOꌉQ\ב#wpۑ[1 }^ bDJy%EyULC[q'Jxm< nn~?ko*˶o= J;7|ײYeonصO]U"q Sn#8NE'ڎ=̘Պ7.?R' bW+ /ʫ"hdz7<R&6-fWr9lWڭX=|xͣq^WMY1~@6kGθqGKb#׶>zepۖg;WwpƻXyW9b̑԰ IDATI>}{m*Mob-} O)W^WEnv+7btDhY*Jd:9o `FL:yyO͚|x[:dtd 8vtg+8YU<'≂;J‹ލ9g2Z{E1@:sO=5*s&YghyUuS95$5W3L/rk_8mXzZ=39b=Z,W2.O_]g2cÝ)4s>-3tGֱѼbU>eXtmo <\pXLvذ͟ngfOrtnќ1W-NSyu~O bDAJy%Eyqnf?F؜ǫ#iZ6n7HR*|>YµGp;Yܥ& _njwfwcr5yOV,Y&nٜe[=4s`l4b>՛>ƥ7Ϲ1MӛqF3nЅO;7W3m?)Jy%g@i'0\pHA,%W+?m*w~uz_8Qx9|ǫl nuѸy[ +utV2c<\ӏYOa%e{P}͟S뱆=ۯ͊e@+4aG<}(1aظ#\|r-Noɚw=Ƈ2(pT>R'vB]J.ye[ Vbg+{J$*3?\ A)ΔW*H To;BR)U+ N?TR)U+ |j* ;S^٫W"(PA䂀Wj̼{7۔o㓘t_MiYe{Q b3JDB,P_伀Wjδ{'۔óخr]>;%(R)W"bC#ӻq݉9fdr'붼}[F(؀,5džѶ0L.X^(`o\wbWmuZ^4bźmxK`sdNz9*'Řf ΘxW-Acq)\wbe"R^HL5 x=T[Ξĕoglֻ1zf&qƱY\Zw.߈_Ŵ&2oI…_,<6+7f{:r1\z/^]{U;cr x8¤wgo.`W:Qѝ5/úrE >Q'B]W+ /ʫ"hdz7T[ncV||㭬p3δ#ت|C;ɖl4.p Ƶӌ_ɫsq1wW7UY檮\t|2?Y%ǧ34.{eg䱬@xNy*FwS^blӭp#5/t+k׿N#ϰU{xwDƕeL\Yĕf:m2m59Xsf's/3陖lؠ i+e髞mEpY} O)W^WEn\׃KŬ\X1OUsh"TwGu8\&Hpgyevϙ#l9l|2_ސW=.Y\dp]vޟXV X+JK^V ώߟ FUjrVFn>_ϣ؇]XyeR^7v+ 4BA,ƔW.W""! H 1ْؒJlIy%DA,kmIy%H [RےJlIy%DA, %ؒJ"XlIAlK+%DؒؖWbK+$ b%-)ĖWaNP-%W.WbKv+BA,d׹]OWbKz9=A.bKv9Sr[K^ٖ؅X$)\(D$."aLyBy%" v  c++ > HS^P^H)](E˜ʅJDOABA,ƔW.W"|݅X$)\(D$! +ܹ5yfaԎHn3(NQoGT(lGAvlN/mE}^WjG=jGx "yQ^SJA HP^{U7PPQ@ WE\ٻ.hBIZ)P0PBKOPYumhnqIH0_z;w=p͹CaCL^`{E!6 1!z!6ĄWbcB^BaCL^`{E!6 1!z!6ĄWbcB^BaCL^`{E!6 1!z!6ĄWbcB^B-q[r_{/ؑc l]GB^BMc]~u?q.=\Rhҕ>%gkB>֥lq}1CD +B!Xa>1ѳeHxfp>e>yatwۿ .eKN?R*~ll9J6\^Ҳ}3Ĺ]5w0dH"azqw69r5wO6?]J S.M b$PPق]J}L!v +B1c7e.wr=*sK}HBWbCu|nΘ$Tq 2ql} !+Bx;cct}"}cn?{Fx$4},f};-4R;l^Blo[E'ϭYdW^$.9$6~Yy;-R=l^Bl>V]KoH kڷ{eqn l}n!a{b#93R7֬O%!7*l^Bl)'}cZ wM:獐Wl!6薵Z1}mV 1!6+B.V7qp]Bl+Wq?tBkwv獐Wl!6;Z8ۄn`ҜKqr#! ㎜uPh]Z?7ﰛӻzy`u)RV_NBy;힒yʗJ\zg-Wil*h4&gxō4!7*l^Bl;(K{JciLXrG%G/?>Gt (I_26%2b[;{nn'C]^8Bzdlb;B"B64~U)Τ!{_>w.B9lkxiw[.#|y_ksi˶o,7:D2&%26OHO/JgHT,.һ}_y.nl*P˷|. "eWR^/ANȨYPY8 Upra{Us?:bxF6.pO":nqzsY!Ee7-̕Eb%9-cêɹJegr[GHE!ҫH)/vm/O/^ai{0HX)>:tOwm׾l9":yl +k+}b q~HU-gdlӶN(Ɠ2['%TEa*9-nɛntsҥ[&_yﮛ䉐W&I& |L:t,Û%jipYY—?,mn{JlY.; 6J5wse`9ZF.xSnC #xgD9.;OWWW v/Fc{BuhTR"^)qn$/Ȣ3CtNPwW7 \Z9V- $_fmLztd[5@= $qeأ4k]=NBNl~hliyD.>akGb?h ϼ_ !UjUWaO.OBpG-/KgNP1nd{ϺJ׏httNZws;˫Ȇe!}Ytn\/;MKGZǾOvɰ ڼ+YbZ%%4E$g/c6,k.6Ƹ-#28"@?LZ.+BiBG}ܨd16]"14yȒS=ME{&c]Yl>`xŲ X sڂWZU2,,g]q].=Q8l좽"V07V"-XHTK'X<º@4UOX h/ȇj-7T}E[ ճzgZ=0j zb&ʃ#~ЎӍwWes"ZMʲie8MxX GbOT{=X>V:GE͕i2}ș65!B-8M=?Y'T`O XݵmC^KTq ڑtԾ7 `3P?6BWcO^5Aops/iO͗M nxCyI@2/oJυ45=QoxF~eicaSN,bx-3iZg [ uǚw彚tGڲ|g6gxL#YYuvB_P~{loY+j 3=P*`*8 UPLm]ڃcU@A3CwcX +]$o IDAT/3ô ^q@rL/%pG}sO}1._8uc/=)'Mΐ#_>X:W GbOzf{E > 7-'W}sj6%Q~PQ ޺*ZW6P, Ǫ Z=2n-PKpy$SP!P!#]ָ[G Ӣ,R_q 08{\lu=cK7Wgc7'vc5BHx]\.-K&kNxUGȂ.?@ Ğ3+Ҕ0 w6n0ꬹuza /0Top5!MRg̥̙WԕҴ)\.y]U@Hk~="Mpǹ.>^qJn?BO8 ]H[ a7ǭdفj8iBޠ5g: GbOzf{E:"oKTYqtRV)3ɒ eFR4_"-85QrNYopC+Չ>X}9ՀnǎuƩ4ZB&y="b/4ZB gB!*ZbOz&B"%gB!*ZbOz&B"%gB!*ZbOz&B"%gB!*ZbOz&' 4i)%Yj*T2yy˓R~Fſo?y<gVN5hQr4b] PljZbOz& ' ,̴Ԓ{O˾c?IfQ?dȾc?Ɇe_LX|v_[V{ +uTj X~fX IZbOz&B]N=+ǿF|}ISʴԒKk),@!T Pm;`p=/ȇ;O'P3M[;k'_X98VV^֟:C#>W>V>o<8>ϣ"fiGD-'x=bĀ(u/ӗ6=T;tqb@+k*נfږ;-YTj fǟ28ִ=rAiZ*hWqn[08&h=˒g>x!9D2 .ԼRj%/f?7 XPAY]~ X^:C^F=|ckڞe/8-*>ٱ}|<pv|˚4)CK ^τ0焀 O.)c䋣UsPTH P~9nރ/BE^E= -0ǚgYnW$T|>QfkcUP08&h=% -,DjlR@ʶ*6SV%Qz Lbp՛gPˠt.8vysPkڞe-ag@ݚm+@ԬmtGҤ-'x=`,ȯ 6O2T6Աf8@#z m=_m_<2z ;@sG:T0m8 k^rͽku-pvmXUp4 @ ǫ9 IZbOz&7w]VixOX]UzzjRBH#ZbOz&7(6f@Yij*@|5Y(p .dԜfCG6_zc%%%%%%%B)FGGqG;exq~qKKߨ3:g)|a_b*x ziC8޷ރ au,+P/ * y7 P%_-ZaCdu @2Դ=x)))q߅RJi앴uN.4vib8i yV,s#b1_*\:vi*WsTC=X]{<3W━]ncźBl:oDTE+{#ʧ n u08RJiepPK\5/9_bl_bXKTBș#-Pay)u {gյǗ4j ʟq| k/To?Q6Icܗ/_n~RJi=ep$@U<(}s@^Po+|@Z '[]@U@^u(rNP/| nm{͠nwb@T IQsҘX6tϟRZOI#PUj۲>(\Cf58S 5NM\& ꅕj lC{@>Ta8TjoQ״_VRJGGBc믔RJ)#!l~J)bX6t?3zH{)qܲeppBb'J) fh$/R0 !Ns0EDDȿoJ) bŤÂ-Z#K'L(Y5sU3O R~F1 0jԍNW>??ḧY3+bR!FsKR!Y=;dŴZ>1?yN -ܦ}r P3DϾ *D'P3M[;k'_X98VV^֟P`;he}կ}|hp7jT,#6XG$xJؐ_E7m=([KWL<ҦEm\[կZ[/VPAP/ꅕ\VuFWXu۫\`m#n!{kǚ#=p3̉5t:K6o,}zhM'ҞiXn4iUrC'z,RJ)}<)=lAbʂͼJU~*8Jˆ̙3RJ)}<)usKcVrҫ`W+GJ)m6[93mA{\uǚeC&j!VOOjqlzY~6z7ʼn2--t9'V<.䑅򵭏-m IG㊩aK/6H԰K|\X6t| m^:8>=_Ҋ^/{9DIN=#[&,2 -8;Urࠜ ~ nXUya45_m- Yͦ<~by3PEB mvN2N0P xjQ5c\e¡WD5 5= wjEˆ믿Mt-pgKQ҂ozn+'CSrW<8o9Ex @m}liݸqqp;$pZDazEzmvWO 0eC*63 2=5Phmn5DN)O֞SP~fA* +Ѷ};Yrmkq3?nPc(A njG(QK ׇ7' h3D2XIGoehSY15tAS.M[&fA͢r '*Π Tj5UUՔ{54BZFsT/@"4z5зy[BPXS4y28>}6Q*Qz E( Zyo,#/U|_S:& h9b. <0j3y28^Ɉ9ej=Ar|{"ޑޒ_xtekۤ -;G}WK -w!M="g>m!2-'ʿSx(T(6f@Yij*@|5Y(]ջXk1jNj3wBhv![Q^_epVt:%K6l Ŵ@ :&?]\V!iR`މ/iAg4/%ܫO^}~J5^Z$R\-W/H+@:Rf5'eߌ>@zOI|sYkoNZcut\\}V_\,Wʻkv_9/ 8_^&r{x\_*yݒ]T,h۾ɉmc~A/ =zO;Kam281'm V8u,Vn)/MbUAPS= 55vg{P!̹7P[A_<&~P!U5VD[m;nByV5kOqq{hli V:Er2Y^׻2ɞh-]Yxm`u66-(..v/**tM4WT$E$r@:R2 ziAH9YT$E;nt.%=?qZ9mV~MNpjVZ=+ (O#@ObHY&/*Awgٙ]$Ws h9RXir$n]@_򋊤rb2M +B<=wd[}<}Tr wt~@[- jkoY|k ŞWܭm-VW;< cy?}NBMפ栳Wr./ϗ>;Pdx9/(l85B]aiÑ/K@nw!'+m! C{dJon&Nϻ ^^[ [:rӫXkHn(bnIDATY*RPp\|YNn쑩hYT?Nd3dzn+{g+!ɻb|9d'Uxl}MҪep$ ` Pe}P; UL)P:5sPrL3V}*RuǓz ázT;@d~,j[:HA! }zb5}%AI+ۺmU-!wnSdש+eЂc|Wn:Yh'7yyyB&U GN]yxM:ZtNdI^1Bj$r(GNdΖ'!hMٗ;H"Xd˾9 l<>M' hJf2'Yr͔Wo"XUlA\c-"Ev-8ǫ69 ! 77=77Whn:dUdWڗ7QCA}pŧJ _BǶ}.A>sOñ˟0NTbh;Wf5~2{ ;\0rK_y⅗d÷Kk@2hQ_^?K"1<~ܥl9XBY,ɔ-욎ϺKvy/O,IH'"y+Iyhӑbt:EBCCm5ʩyݕA'77Wr'.CYz=ڔreywMPo/]sce6uY{jo$ƓU/=B~ Uiqsye*Eʕz\N_!uLɩxZk9KfE2r_垮@o/o~&\J6 !veCC)28Bˆ.;;RJi=ep$= R828BˆԩSRJ)#!lNyn5pvw=).4黂m}ҬmKe 9xn149x$OorI|FuL J26-o}_r`{U:5[FORO&:JAn@ DzuKw@ }钞.eQNۦ epL,iG2sJY#c_*h=H&k; {WfSeXwHˎt*ɑdPr[X]x3꘼Q Ae•6Sl+p$ 9G^$F42l~hӕbX6tF~gYۻHe@JN, \.#qҹ2mG$,,N.nZ=B=@R"G]-znTy˾bH)aV}钑$##CғceL| +SwdۥYdCrK%wҲy3iiwDTnFFd$o)mri oݴqRٗ!iUMC|$.ǟ6m !v#_iJ쯴Iʴm)埯x[zڑ*IrW2fK1U1iUuGw!F_st$KrT^8Is{t% tep$= ]ZZ5G'` @Y=A޳w^ tg&Wn!SKhYxp{snb%qry edjYtfz4JNp"$--MR /-cJuLd]wC% DV/wO8=)_mOHLIOI&m'/155OHGޒ9$%5URw8o&'v';-CoyR|RĴ}|T/7=CFɲdIMMS ZeSF.=KMcj$˘[@Тw#l$|܃Ҫ2k[*7izIZ-L?隖H_)dp$5:()))RJ&#!ľIIIqONNJ) b$%%'%% Ho)dp$7IIIB)a28B잤$w$RJ&#!1LJ)wٲe ! #6\N9H!طo}RJiep$=:]כe޽RJ)#!llRJ,#!l(SGBcٳRJi=ep$= ݻ)SGBH)##!lvE)''/1>>RJidh$/h4RJ)m Fӹ{zz,YbRJw !vO\\{\\PJ)m &..`0҆H{ /0DU.^Frnr !vOllszxxjk(=<
Compute Node 1
[Not supported by viewer]
Instance 1
[Not supported by viewer]
Network Traffic Flow - East/West
Instances on different networks

[Not supported by viewer]
Macvtap
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
Project Network 1
192.168.1.0/24
[Not supported by viewer]
Network Node
Black Box
[Not supported by viewer]
VLANs
[Not supported by viewer]
VLAN Network
[Not supported by viewer]
eth0
[Not supported by viewer]
Compute Node 2
[Not supported by viewer]
Instance 2
[Not supported by viewer]
Macvtap
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
VLANs
[Not supported by viewer]
eth0
[Not supported by viewer]
Project Network 2
192.168.2.0/24
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-flowew2.png0000644000175000017500000006345200000000000027707 0ustar00coreycorey00000000000000PNG  IHDRzTXtmxGraphModelZs6kxL`1䒶3I&3G,&9{%v?izr$V~yz8|i.0)<( (]&Jت,8sU)Ŧm!xy ͌aNx]*ʤcSFffǟd-32 X8_+k^+}P (5j\(VHIc\&lk|K\_m:plPv:)"s}NZrsr!bi@%DIhd BIW BBc,bЁ# :MɌ{0D msN'*4o*K%/a&KEj" @Œ*.؊1&'ܫ!v QHГC9.f}qH C:d3 w qq Ex̸A,sGFG)bpusEZSacZIpQxWۼ1ǻ?fe`< 򂻔!:Ēf*e:߷d32e4SNd=xh8jp"8Ղi[ "\:ͣ_n[χxiaa&}۴z!5?R;9Y+:j%0&b-S"2,ؒrSptxNAx4i rFr!UPhGc-?)?_aZkO-kO|Gs{^Kw>LxO%GЦXhدȈ1^v b*VWN*)|-௛;dܐ8t|kM SCMAG3D,lÜ.ߘNg|[1Qk9삆S~ dV5ӈ9`= 4PIB+ka{ҚcP[kgyY\sd_icB`u00WNnjE%Lb!hmC AL}Uyc̋9M~تeוMѥK+<ɩ҇}T.,~X/4EyO֜Dds.֠ƪVyrF eLZ7wJlS8<` ojz~<|{^[i6h2;2f1*$XrnrW7~K;%Ӎυ&"w@i|q?yl#Pq, /Ʊ8q߷ Wennp^{| 9&""Cv Q=_ukg3T*X8v]2o~܃:@#/}[G\c&pzHtw󶘈y>8G=tM\#;MDDr8 p>c:QvsR_r;>Q:!;޼og@ #tΗqv_Oӯz{۩j kOݴ[:ȘIj_S | uy5ZG"";L=~N]DZ ')+}6(PwPwMPa^i} k2m:T'B-8wu2uqlLJt]G}S8-h~@gvϝ4Q98Xwb{x{sy<-Sn_w5jiCP9s@ g{OnXm-~A,aӞ" 6u#Yu$""8SPwںa|ݸY _G{>dCAAkp~Y8w']7/B:z㿠Nǹvs+/Ge|pC.Q7NmY^qtI+?ntYwK˔} &<~UQ7{zB*%:`BuooON9׵U[7ܛ:kp0˵㨿}Kw;"¹pc_e_Zy%pq.v)Ȼt-[UNw7+AqI7T/;t.VJ |WǼԝuY:Uef!Pׇs'au_Fr5^G""qq "_xP-= p֟|[p,}H7l7ޘt']q8{Uyj۵D]Woa{0ǼY'}a^㘛}! Ηܮ޷sB7Pcz}Jh)z@eO_CNnHDD::sgq(xǑY!3 g8ߜNQݰ-uftp:ay8.[!`vn:KsXߜ} ?7c^bP[)p 迖6ݺa>pqXnxNPo|q ԩU_ڧ|OJPwwf\t8)n{nƗHDD.8o;i L>C7\p>wܾ\u];>B~K+O9Ƶ3 ㋎qgEkq,|Pnz8uZn j_Oݰ ktՍ}ksEvY_|Wt%8~GM_#Ȯ8]ry ۭ~32ުAzHM=ɦRmDZ|됻C|ql^ePY(j>oU-P[nPqNӿ[.O)rq8pbPGqw)7Yu$""9u8BPǻ WU@}pse֍{B73tzw3?8nTu_wu'H]OvJ^:yA%v_P;o޷B-z2I7D7䡍f/֑9O*A>@$*E.d3m^ ~[ ;AP׍q]VI@J5*H]^&P~}e}d HDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD> @tC &scۡ^(㘮=m}G"""7q`vXN=n uڰnZE[Pp| `nPFk77P@OGtm$ ںr]< +&ir.YeU8k $C72Y)4q^K!7, tYƿky.Yp/M-7͕Sۅ~=lϢ T4їڿW Tq|F7MԅC'xPGAp54qS}pIuwnb]^V^7y8w]W,7RqJVC7^x\y޶}TN=NmXm?FuqjX:M3 aPעY?kO;-C1xVp׻X=S:ٵ['ʛef[;hmٯsěut8;d h Ԇ k8!ocD_u#""*pqk`p|ާK/PGhu_gNV/Ev 捼vY&oe^;nźzv:ea,#`PIp(y8=> BF6,]{<_{~^ ԛbb5:M9W0WZ;xMiGwv߀֗ :TXUԵa nh@qjYڸ;>p!kgyš;?u  `UnĹOAmpľXV#y* 6o97Ӟwm{a*Nk_u" 'Ӵy`2ԛM~:\Cy bx*fо$ ڸPu}9=@1+.ӹ n[kAut58R1WΫB:]Z[sh_Q%X; @ ݸ@}{˄ݍ 7>pKᵵ6n( ӟ=nZu']n|g6| & _EA iup njq; +U ʮ%)Y&Nmޫʻ?[7Įw;ޭ Χ8/>v۷=H|%pKٟwpaj5sXBmK"P^AutŎ#y*GT|$O|Km4Cٿߴ ~e&p+pF堮! GdW;tkۗ6~kv=a99}q$_`^1WOq$w<,?Qe`U{-Էk-0}#8Iك.ڃx2Ag+ ^ؾǡ+rwA qn{AutŎ#y导* u9@e/#*pCI0]]mߠBmr#ܿ)'2=}+ qmmad=[Pw6S{a99kwGyU~ @|Gdju4aiRCJyi e Cm8~M\ ionچnm98Wn=O8~-kq=}q$_`^1W1ڴV8aPS{ο}z=ifǠa A-]}ݸድa}u dxԆo͠S@M, ѕ.li];Cw)j]Hb^:JC?TaZ".9{ UḈ"p//8~H7 @y-pZ7.I[=Pbs)b`6.oą1m~?T ?Vu:bǑ|yż:ީuS/"*P ~[k%6m+3PaeqNe|ė|8~S ~ogZpzN7#띗)?!kRp|~b]Hļr`^oOzQq)(;+""1(T0 &P""2B󊈈b X,C b;l~;.ض'a;lP^\[0(l,+v T;xaCvc;l혋 a;lDWDD7zYü""2B` b" +""1(T0x>p@-N@s̍Al(3e␙1B=< ` TH~ 6TUG[N@-k"c^0U=. Ե8PA\plQEĞN0r :sEWסÎ#Q.0 ˫J|uđ *=E@O8q';+iӽ xt@$GKuËh =vryeH^r4<_vCD}kyn*'5tW qXm kC}uQ P>L[^H+C n 6TG q$S1šN\#1@&77B _AV|=v =aǑ(Wxmk.D D*WDDmD*WDDDDDDDNykN9!k[a[eImڷmckOGüb^Wѵzl]WO̖/y'ˏOKIwqlۋu(+EfksƼb^Qp`^PG־̶!ru;+ו-rzkGgA=jc;r-,+rl]_[_mm>Ab{Ƽ b^:͹WLi`Buҍ'X bϘWS+Wґj+6Ir$5 23UUtVV_vFd,g̫)̫Yۊ\^ :dm7_g f^Wd2-@*SXAR b2K3_+2K^߂dV+̒W a +HALf21x+HALf21߂rV̂yeb^9KYAR b2 yEċ婔 IDATYAR b2 yEd+(u/cI1,W/QtV٬ )1g̫*QtVLV1Wļ*TOcI13UpW!k[SYAR bb{Ƽ b^:dm+r֦E2tFNmkdۦ6 bb{Ƽb^Qpa^@*'6/3/~_%㫟éeۦ> bb{Ƽb^Qpa^@mEhJX\/v;cCĞ1W\W 18~Cv 1 &g+UNɶc7Cl> b2K3Wd~f+۟:3"``CCl> b2K3Wd~f+_ XE'dk!U*}de^1̒W >=4``?6/{M>JY,KI=dNw$fV%PyhlU': &`^1`~ olbs7Xo%ӮʊZeܶӭIKG˗($˛wʯmD/': &`^1` Aq%uIfN 0 -ʀUeVp E+^z5K7J%!acJ+2sR2zjs$JG2 aY0WDd0OV 6Y׵hdiQ<][K#%˲dB+N,*(&K7 ee|Ry2RCW` aY0WDd0׷wLNW筙)MN˿,aaAf-UHn֌^RV/p^F@>L۵̐;Kw/z *}d+oA|r@*:&.y]*!FHeHn9m/M>,Q/齥zn28U7}zo^Į #5-gZ3*K> ]V|2D'}d+o?{*:&]G%],RHg[e\Er4)tY~Q&O%ktOtzʠ2>Y)S]ں21uA>x m1yED&t$ݵ?97 `R&Tk*YF.,]۶+1ĹҸƼs2~rOV|Z7~rOV|;t[6IHLvDH-2ePAmr$e `żb^tew'R 6t!uF4RJ-ys7jMݕ$ģRFWB7zbOZ XyA%>K"e`y!:mseO'9]?yEyULzձHȱ~Zu.*:h9_[_mm>Ayż¼*ƭFoLdoCbVn?tSioMi7} yEyU@}cPhJֽv=o;ںS;&%g,9-="' }:E8)Wvw^vtk(80Ǽb^Q`^pS^}vGR];dm+ʀVNu "K 686HJkGS&t1üb^Wrd r 1@BؼWd6+ 9 b2y1lWAg &1K3b^٘%L/PV b2Ͳļ"<ʊALfc#u̫Wd6f+bg & N̫WDP DyA(81b^Q@1b'UV+" (qV bļʊyED ΊALWY1(x{V bļʊyEDd01 d6Q,KϤi2A)ytqZ<4LכALbyE"l2/|E*߷^Zϕ4ѻev*iu[ci;[~)`wLJ3_+_C*<$ӵ6M!/,3ˈK'&˛w.8!sy Y}lڽ$)z{?^[ep b8W+ U3ug]wX0TjF<#}.ɌţADCUnymyR2猌\J5!)eڲ|bRL->(/R>[,/7)/5;miidRtU)}ۋע=e4-RYr.v ? @Yi" +KWI=%&% &o%W+2?i'o>rGge}f#jeeP1 O36Ꞑ>׎a^1(te{، [ 6aci Snu}yż"b^Q(@tŝL˔WdJ+ % b2%)1ȔWJdJ bSb^)1(0ɔĦļ"Sb^Q(a)1MyEļP &SbLyxۻ1YyyEd2-@NdJfye=|yEy ALd#u+'+2%i10 AALĘWNWDx b' b Ƽr¼"c;a1 (10(NDAyyEDޝ0 ` b" +""1ɔxΔWdJ+ % b2%^hJ+2%1ؔWdJ+ % b2%)1ȔWJdJ bSb^)1(0ɔĦļ"Sb^Q(a)1MyEļ r L,A̼r¼"S2K^_ ' b2%Ͳ>¼"S<r &S2ˑ:YʴNDAyyED v & b+'+" <qWФqìOD ˷o1>%j\j~ *tyEd !.-㒻]}ͧ6dW}c[FފF8ڥ/_C`ü b^?8+".դ˗Pchrbl"|:9yE)QHm= \>qz <ż" Q=uv*dl'ovW#Qa^WD?I,{mcv}Yb"3~B yEDb]naPC,ʂ?yEyU Dʎo3|VՔwUí2r[qҴdys;'CP;u[wȝΕͺ7k%U߸.S_UJAPH[Vg:D<6B6p۸zƼb^Qpa^@C,lj)y߾1ЧmsɲeiZӏw)eVY7oxsekCVrngףJ%it,ܟ$kwN!ԕ̣٭FNl. >6 &wĞ1W\WKl=wurD7*!.Mڰ UdIo:m$oUo}*:UrOIi5nӖn rU YyR _7!)׬,?VzڻRT n J쎵qRI'Yx^I}{OՐRa\Ǥulb; bϘW+ .̫h%R6YòʒRhYvrl>Z&M*OgJƞ~Ҥd}=dl[ɈZIED< #3U?,3k%u΃RY|:4V,"z^z͘$+:_Tk,:8U&4<3V.֒w"ӎn[1g+U4)>- d-W(/_!bG'}%7uR~{}*&jM["駭ԏ_F~$M"*K>"Ysfd 7^rN brAyEyqK}ǵRl"ewAoW*!)ڸzJu֕Y;8DXYo4Ys,9[{F*Wb:J{eA5HiV<ߧmYзJVNh"w'gIK$iSR[2縚~以t÷eUr|c)^ uL`i]n] &w+Yʴ DʆS+}^wJHҕw%kIu?Ob9BVj%=+z<_7qf'"KezR4ǝaҲT iک[?Mn( qIՒ䭔^D#I;c}b-sl,XPF!wO!Ԕ ^G|Yyż"3K^xD *?-r _#Zsdi}XZ m#zB;R/R7Zty\c,O|WzT(IFKz2Q*V[JRf i?%ipdel\3[ڣU KDǥևۈALuW+2?i+SO.aP b23敹yE)' (1\ż" A Y̌yeb^  &3c^WD!_A, &3c^WD!_AtuKnOiQߕ}*}_.Jח.>(1yED8:վNqi>yl퉒H]loiU᳀ALfƼb^ DJ~%Ȣ5HfJfRlPW>R$o,)hw>rO"Rǥm$,V>)wtX_*l!,-H&6JF~rԮPX0)}9yZeGA"R亮dII˟ & 6)KO{V4,XT-RF-ɠR2DYU1Qg--"* ˌmK]ȄeI"%+dpy,Z*[[͊[>m+H3=dቲQ+Ӭ]&%fOd~6m &w1W\WK,>4ծN1i:i[Th.wV>!k❑Ҽ\=}L)zG}[iVtN+V<.ÛJcUu릱hLL&{G*K:Cd;‹eqX] ss0 /ϘW+ .̫h%Rj[44LzBnO߯]Ԩ ?F(Փ^?n /%KIR>y]ZrhwW-dI䁇H:ʌC'}>N˖ss0g+U4) zN1i:q^/?%nxǻҼ\]mD<)m"&iFIڎ6,V-]2FTdQzRwe$ R(9#%y)M3 by_{R40Xfl-~TUÝ ֐6^FgI5Lg/FH5]vv*גxIg̲u*,y]>;8Bf,T -PftW[ZɌl˟ۄAL0=c^1(0 !H0˵I dڏȐj}}e$T)VL|T^LRŃ+s&j6(T%eNHBť|aa$n:8XbZ JJ qeWC=>?AL0=c^1(0 XKYbea{Ƽb^Qpa^9>4{?Xw}(I]u$0K3S+,yeZx[:ǏJeusHR8_df byyE%L/PWDlr/1+5vk?[_dfA]整 IDAT9yE1K^z1IZ&uӍݯO&(SÚLL=6˥!.-}U$yH8֡D5]NZ:eD wkI N*tyEd +&' "+'+" <1Qc^9a^Q10 AALĘWNWDx (10 &P""2LGLyEļP &Sⵁļ"Sb^Q(a)1MyEļP &SbLyEAL 6%B LAlJ+2%1ؔWdJ̫ ޝ0ɔ+'+2%ir &S2,#+2%σ_ ' b2%c^9a^)%LAALĘWNWDx b' b Ƽr¼"c;a1 (10(NDAyyED v & b+'+" %j\욎cl"^dJ-߉FڒݟgM}c;[Ҏ&Kޙ>9s5[Nļ""2L%vM:ìчֽ#r9paj7b߬1^F_lX1 &鵸?K*>0kwI_2z} d t'{Zk֞7Yk:q][5[DFZ"Z}n˨XK["bxd^0(Z:t5<8kkI3&u6<)F/#Q~0)Z:Ftr;+ vYkrФ.7c-ĮzhR qQ&uL0mn-3E&+-KN;'f6\O9]|JԸNގ!yEA+ҹM%)iSd魲uzOe{gd^Z5v6Cƥ}xs5j:BVj֨׹-UʈԞWY{6zqcmFk4qո䨑F blJĮ:vMd%ìݏY.%u6)m緛lM&}|#.9jKC _ cxkSd y.$Kɿ\EALA)6KÒR&uu>?>eIDLSyJlL4V\r_ u]27Nւ]2D0z k|!݈ԞW|9K6^' lm=Anx0(Z"3ן\)o,ǖIlRdOt}`'.O UP3, ׁz2(2<ӻ*2 9LO懶 1[nFzv#K'Фn7GV3u2B욎6 UZzs 1G]~WwoS]h" b2\lRqnflۖ3녕5`+ -QS6Z{rl5{Jû$FXGG/7?(3][bj.AubѦ𖶜p7ԑxc kۉAL3kYje24/F^c^QPD=^'>kStSq, u-6:Pїusm:S:xE|q1PPug `$:Q|X aݾZk-_\`nOUd^QP ^_sl&d|x#oigTNGP\;^pSS1;Q͊mQk6x@{\GVڪmP+p~Z[SVALM~8mVijX4u3Bsk{tU}U7?_vQP3Jir8΅zJڅ#X@* 玣iqEWס;\0Te0K{Z{veNqׇ8`(Au.`00֨+7I,/j$;+ 1%&u$I<29ߵl}|]1sjZ=uwuE8wuz*ԝ9u=M uЮ)p8:>ɚPGR7C8ՄV8ڏL &# b2L%2sé98ФI~xoMQf\J%ԑ_<n8uAu("kG>z_<<_!Y (ԲD"[[rJ㵏{ 3 0Y !!;a,Ag&aB`[Hgd}{s_~ƍ &'Dc|=;9Ȋ/* }r:s1q]qmΤ!~o^WLAty[V~RTפzbɗptS8} Ks+S8>a*4>cD2 X0u _"!L& L{]kyV'tŴ0!fX[c oueZ5~o^WLBcTE-fͫ^D7[yՋh'bi*!fڠzqŔ9uxZAk8ϧ8mF}PxqN.6GT~kPaTpᬒ6]uU~WL"ѫ6,VYC9՚&+JMw[%S5ôJCx ^I̯j)v)q2^ލƭ˧mCh +/d?=ҥ/ ։FgthH~ 5PaSk]E8Ƿ~ bZѦrdM?VWE73ImP]+aZlm X m;[2II:Г)dm^3=2⫋t0 Ok=ȣ =ׄ(Zq/lX+^IFPq|S3yGwLiQ׶ NOͣ3ޞ<ۇӊO%镵oL)fʣ~l<I)grKMcTZ% sƑ*I4`ka-֮kG[Jksi͵ȸ/вBW_RIXA6(k$N)l^Y6*kU"Fn8(}<1К3Ʀd--鉎~l'cu4NX[H}*۟B ^[v | J;YHƓqNP[H:u+gK$^CFRaS֦ O-gJpj-e}uӤ%SG:oZaYYY,j׾-S8t)2*> 4_)l/ \ GUsJ۟-ynͤ&jӞ\p8WZ;dѪ=ҿן~LJ4 0bծ}[:jM|bu J0͢5٤,ʡEOR_"?EޛO9^ Gbу^YNy{(oo#4"Vx4EW[vOC;=JCӰ(t.}22HY.m4M5ݫ"GIS>a e(pkEEGSə[= zʷu _x =M%STY+hlZ[O)Vegiӧ_|&LGm)jJ{њAĂeWT$Tb 0 sw7X XǏGLݖN'Qm =J-{>''4{#uw.)VŽݙ|F))k4p||z?C&PƩ,rxMӋ|B4𭉔ZymqQ~m!aaGM>!諬hKjV#9N)Baa~vԆ$6dqV%ڨ0 0 #몓htVWv%֨w0 0 kT+ tdKd} wW3 0 s?!c Rt%A+y1 $`Y6-fr.4dhR~nJ%1Tkl3}']fSNiuvfv%Mʶ91 +zRt-5.F7i00F$YL]:vQ`m:vvI[Tv!Lky a/nz7ͼ anƌc=?wql`4`0w # TR74Ŋ~w s3%ђ>cŞŻ/P?n4#S 4s FB_|a G8`1D6!0xT5G5f | . t~a` \pEW设Nq" G= 積u8"n-׃#=__{2X+|Mm0;S6J7Y{ksrc]h?@!`W aCtk;>]@t a1Ah87Ʊ&Byz =yj qa4i9?`0wѦ!Dryrby H)sa˔$KNѾ3כݧ4?)IeK總sML6F1N7\:t#ܭH!A2FTpb}cjJ=ۦ6iAkM]~gre gO*]_k\M5]Ό# qvBGpq i`$D3 h @G@t'_ql>1/!k`?Dx,O82L)Z,SRnu\r(:)P4IJ!W$$Z*2 xPf8mz*9tB&[GG=BLic?( Lރ1 1{p5 ; a5=;9^׳Ctk7Fw>9@yxl^@)5Ơ1ؚoeVP8ʩ^H+̥e;'שI˶&e!AdP~/1RmPI:e$wI3L#Tk윌 G.SKiv>kHߥ6ރBƞfU s>^{IF_M:Ba\gULJcV\㎖ 8a2kcC86 s/!U%l]=ri.Mipd 1I5FUkVr5VGi)]`o Ѭr;j)iTՇěNP80G)G*  )Q~lT8uܟZ*X3>*V#I2(ctʉz Z˂eN}߶ě#-a;ܡ 0w7ѦQZS|-heUŴE^u-֠xωa|Sa][~ZJ19;q|b1;eJa4FUdP'IQdG5I.2KHƊW_SKi50 s'C$6n{h}Hʨjձ(J3GIWU9Ѧg0w1&pٚKm+}`6a5m 5(%R3hU;gS%&?RIo?aZɉ-*6h)PU[3~{6f7l~b1l8h+1ؗk`2}PxS|gF8`-( AlJC@} 0M'iPVKaגOHJ?6綴"R͢xdtE)ꕩl#XkHrRoY %),ZRh+!5B 1@b+  k/P %v?]\5@vr:_Xzu; bXN[ğnB|{#0-T8@cTDJAaUꖚ¿_^ARzxEiVT̢GfRi#h9IUXb^4cX5=uuyl-/>9aw*BRW,nAV}G  L-6y|O  3k66%3/A,m?!2vCKlҮb_Ga$R?cŔX2M2̒AyQ2(%SS~SIEa8UynRSV s!:mb_ Ӳ-_4YK6+XôfO]v-N7o7[ucQl@5O'w %=0vBd 6ԫ{RDF k0D7o lai]d|B. O(F垤UTq O,57 wk{Cl 9g lfri3!CaZپ_܆8LvPa_qda5kTX6<0IDAT:3(;M RLӂ³4/ffTˬa sn;埏{B{<knYyݿ!;nt*>ތLcO9v>zjW lai㮱aZsF&Tk њlZs]&, hǝ3 bDn%t/sSb<8͡8 ']ƌ)ڼW]>PvBdHptC 0 0$}əp 7s0z~z @Fp +'dYa_˥?،CL1^6]}Ю_x"c9aaI6At.#;g'X!i(> fM519 Ą0lhF|rȨvdEPjaaaaaZ)DʺlIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-flowew2.svg0000644000175000017500000005526300000000000027723 0ustar00coreycorey00000000000000
Compute Node 1
[Not supported by viewer]
Instance 1
[Not supported by viewer]
Network Traffic Flow - East/West
Instances on same network

[Not supported by viewer]
Macvtap
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
Project Network 1
192.168.1.0/24
[Not supported by viewer]
VLAN Network
[Not supported by viewer]
eth0
[Not supported by viewer]
Compute Node 2
[Not supported by viewer]
Instance 2
[Not supported by viewer]
Macvtap
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
VLANs
[Not supported by viewer]
eth0
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-flowns1.png0000644000175000017500000010205300000000000027702 0ustar00coreycorey00000000000000PNG  IHDR HzTXtmxGraphModelYYo8 5}l#$MSbGVlM+(M~H{- ۤ.#?RYx9~Ql} /9ςSHEd 5_NB{]2bƲ(x2<4eޜuR;c%Yޕ)<@o\ُFbC侠΂p]~zX勆W`D%% w%ѐF'n;,#rk[.0hh˕L~9dB-2ɇ۵,4A k e;3jmDq*\q+.hg7 ߯0'Tq( F/8:Abx#HH6(t zw(Ci*Ϟ&~4:E԰+GoҔy3ZςO M ܣm lAlQG>V.¬S~eἣj:5ϴטN;VVƬgk!cN>mQrRb¨e(YrmPT˳o6@1α-}W8n% 8uϱ4ޥ@=A37Rjⶕ"J\2X`,v*%Ӯpo=yOeV  vEܳyU5 ht2{䎵Ӟ:zXiS7* IDATxw|u'PB@HmaißB[,Gn`u@>Sb^ރ0>`=/-m` y(ۢO֝{ƮU=^er0;򣟂cϢ?5/AM|]Fd;DD %ϝ`|QZٵk}fwk:ٽGηeO}N[p 1zİ}78f5o%aX 9Es#wͯu?'~1!>UvnϢ]0ۼ~hdv!8)ۀ۝rYp&+ϣ~/Ӫn. ^җ.sN#V^փHe `@Y}#Y%ZzPe?8S_q r)xd~Wie~,qS50z^/ toƮks eg.rY nb_f>jw:<㳗etpw=jvVC߮gUbY}{:8d^c3oNq]H6\>*Y̛,=wYGι9o`'tdh=]fǮm /۝?˔dY 4x6nEDlkgU Gp¶[7_p9GZ!׳i) uqt;+OZOF5is9eGp \gZ*8bc*#>Ց}+8vvg~0Noh >.#2{V2vB 0~)z"xs0. w]0quyޥx 0N]]٣~k-Mt.81tecGp|o z7XC/vn@;70Uq}7az,S/QKa2{~ _ /(8d#Uq?κ0P+x K{/C= w .!p? : 0FmAxiGףwvo@֟ל̷2?tA "Re 3XwV`\wU0ȭY6]Ycd>kΪ9 UspOY;:}t}#88*㹌S ;j#`m=Yr0.y,n@֟לN}ۋC C{Ngz,oAϮ ""0N YJI7s|ճ67E0*G( +0n\5+m^@^B`lǏx*q/m;?}a|g#]"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""e=mw0h0)'[go@|sZҿ"ܷ͒>1_{gce.6sN)t̶[(Lj>O1~Lqv(n#$gNw P 1k"]~SN/Wz i\@.ZO0?̿V ?cwDgA\q?:{,yw8_t Ϝl"""90psQ3癯G8V5 zaev='̿DM76S+a\0sج,So 7Z%'G! 6GGp,̀UE;kk6,""c ~זO!set1pkr#?R9-OZ cx*gγkf0Nmk-yJ^:&u ;Xw *?cy+Q?0yw8_۴]gnaazn0#W2u)1q気`<{Id7)@[5ϳH!UK%!d(<gXj;1\Q !AQDDH c~}z0G  X2A*8b#14__qyW{0PEDD `'f3\U߇wR륍0M|5HgqW᪊ W #lpl<6]# aa cN Sp2u,p3G;8w0'8L\!eGxF(#z"ƠP~'ad7:]_\`܀DOx -8t߂T '28ڵCQDDz 07@K8Ruwc0sy1KzfT0YOpeFf{N#T``_`b p2114qv2p FQ2 O1X||{ǜׇ`K0NE[IQDDp6xaOh֐ 2OQ[pރ*¸{{cm\y #uxG|avoov 9]/pv.'G"uEl+ Fu0Fo ȟ+8cXWp\=x7W]xu{G=cN׹} 4܃IQDDc>5#Ը~mq?cۜG/83ƕ}sWp,̀5]ގ8Ο3_Gp`2zRp)N&ÄH0Y #X 2snvGgڛv!1c0'8 t]G"+8U5s׵ޮq,;]e(""RxN"B[Ww w8"_G\_. -:Y/2t+g( d ':j \ZNy#x^+Jv0⏸289[_!3@Ff@IQDDD58 򼈈H(""""~Qp(8$%%1vԎ)툈䚹3rQ;j'p툈;BT;jG툈:(XHG%=/""""R)8_EDDD/ """x8Vp@efGPpq(⃂آ=[ `|^s2P@9?C< + nGԱoE(""(B;kp[*p@I81 GFث`*E4jƝj@Uv"9`*B;kpF0/P F+X5 |Uۣ$Z9CUiY`*`>b. ;~ ܃ci#G_m/0| q~qTRDDD( f>+T5{ WLO0\b^W<*"""bU^ _"a V7|M78Fq.hFkEDDpp0N`X)0NpƑ`i; `.qHREpꓷ]Bꑵg8:P:xN珬{~sFj~Й){v/tbn=nUwIm|~\&wJ:],""ڑu,:M'ک6‘u. (""''n&ڦ Ry.wv&$ED:9L"ܢ rp(ӂMHp)8Hudm_z5yΩ rdY| .G)2sU-o+ 5u{q܎+[yT7kR\ F.Ch IDATs:Q |[? """b!VQ Nu.gN1XMZ Wl[KVmϾڝ;X5 =eqX|g)O1zHo?GQDDDl[VQ N.~Zz [Wϻcwrc'_}igx k~g9o+Rd0_nF[ݜ7LW+a_̷("""\p|=**y]agpJzlv."vNX#=:ge}+|+ܱ]&uҲo#8Qp[,8~:(**YbC e+\pP%ʴf٧9+iWXvO]k7}z?֪ۗpTϦ,f'm\4Boa5Q |[? """b=|'يXnG;y;ǭVe o8ej_֪ݓo%Mڗᐔo8>e=m74ëvICT3֏XGv<#}5qh6؜QG,mF'}i?kV㍟҇5k%xcŪLXog凓8y0cGڎgMxu4s{KE%89.Lݓ%PaMs>7Gʐ2wٸ^Q0{gb֨ӛV1u6:JTf;Fpgsغ˂MHp""R`J~uO](XWTcV.T z([OO 6!Hjy'\h$y,= q/ EE;詎6wx4`o | ""R}\hjǩ?u}/C]ɣR7f NO؊Y|o9ҸsßG?<3uO v  Sغˎv<@բy8hQ ]&r).Ixhjd+Gz7}[{7]7H("" R(8-+)8JQ(""J Rs.""J R̂BJ """"^(8^IQDDD +)8xx%G/("""⅂EDDDc:(""""~Qp(8JQ3 """(Ee G)jEDDDQGQpFQDD$@Qp"׻鏽2aިnMV}Ʀ6p=8 {&"؏71}ܫ_\+w?*u%_R[FIQDDlJ;>C_8=IQDDlʀI$Na-z'n۟N> ""ED0cԻ{>>MyW[~g"""O/rYQ NvEU.KQDDDU#T,GWrY """7|^Rp|tD%8/\eD%8vϪ\eD%8Oy5kF>лr>k@8|awLm5m_zzQ Nck#l闞Ur_-_96sX /䠈zQ N}.ϵle<뇵?/|9ָؤRS>yc8سE6|wrb&,k|edGg9{X|&,_$wd†Vp)8H䲝\dz^ٷ-ܽsv㲝H69m4epX+#!Svr#0_8o9.?unfI9}">v]鱃a6qs\uotoqK7,d밶\ebm A8N{)9c3}_ ""RE%8x<ׂޞggx:UM􂯹hQN\)o;˅>^27g3t}Vnm?pYA]zf`6|:~ *-9f}+K^=c]ƶ3y^nG񗂣zQ NCk8 o)ٵUmgLMc5^ՔMؠεݼ*,u|Nr kw9oç[F7 ~-ngh{7VΩf{vzUySy^nG񗂣zQ NM>5k_u|ñoUp j/gb۫/9v,h0^usӾ{1mYy%>v +?fӦrFWrSVg ku5oysҾܔlv;{&voaɫps~SJux /GE<׌X'~O3aƬ[z>5+&mW5OqT,r$SᴥpR,=jnp>ެti㴙Y\ QO24˷ዋOg-*ed*Vd'T>Nr+8EDs?ox?皀7qkr>xV`VUYXyVj; zʇJ֩X goinf`vK95mxCU3l7[ڸ￵K87xOlXnG񗂣"qop}w^e(EDS.u²;h_QDDDUㄔ3EAFpEDD$_;8MJRp|Ӫ\䫠wG,;Q+TFQ:/{&"""a Ǧk' Uk#=6}~g"""tz7^S[rJg.oNz~DDDD&*asB[/6cF/CT-z% {'"""zq>klQ KQ NWw~zw6iEDD7- A)8J(""b?G)EDD(EHRpI ""b =ÍI ""b P(8JϹB;7 R$̂Bэ/ nEDDD|Qpt("""⋂G_(8FQDDD=ÍEQDDDD(E,OQ$],""b?G)EDD(EHRpI """Sp"IQDDltQp"IQDDlG)9[hFQ$Y[(8QpEэ/ ξÒ{ w:Aq9C|j丸5~DDDD*88K^&3!ѽ@hg?%|ʇZ?Rzn("""&1>5r] ?Y;=>5r]@<#yyڭrp0]H&G\V;C˭M_DDDo1I6U*&)BU+""R`$EpחuXWꤵc UsӇyh#黝 *㸌wbC׽>[-oۖ[=Jk\x?:ph:r9ҸvRK5l\7(y""R`$Epm-ήzܘ=c4*>,>桍8ߣIfrEݷndC(ݒ_ sng3vQq\?vOe̋ Zgj961% 6RpoEDISOg3I66[nsܺU6 m;h#?_5I]k}q\{e[O6 k;qFocS6ĺe[sG}0v:nZk6Ex71nz/[eCbc ڸG)b": *K?]ǭVsbǫXS7Z},!L>>\NI&G֪Ԗ#S똶;‘x{~jξ[DӳTXvSn{}gL#koXٞ5̅z^ c|tƿTyӎطG)b"~akm֓וk~ےY/_:f^]ְ߬gs0D>[6m1N:yk76 >NjR} Z^ :ayլP2o/M '\~leN Xssl0\OYezRpoEDId{,oS`ʪXaۼ^˾xw)@K)eJ1`vopgܸ% oC4Q՟3oaX\t½W0Ht6qͺe˸v}E=n_^h"OX׹ޱ)|Q8̶((8-񘎘n|͵KF5c9H6æ w+;ûT͝ƥgqY?~Vp#?\ [aq~}Z[TŇ_i -q k{rB3mك?MVp4&/5<ǍWO9Wq,&3;8ӎ'GFQDDlJLR7~ڰ[- 3Ŧuŕ5IGsUb[Vٞ3,m}2WMgqmK6t_@4a()[ͻ sɎ L5BLM}Woˆ;ZbH8r.ה!(ۢGp y6zӏطGEO_ǧ'gv_+M£.c]Wug[[!e֏ /wQ|>f]8|,e/[kr(JMnfStʷrCfQgxt)kZ<ϧ2(ws\g:RpopEOP """0 'P """0 )穂P """0 IgP """0 %P """0 +Nx=S4[Npzu'gfGkl|?_ǎRp WT4[Np"ٺb#ܓc*u,v*1I\vdJrruZ<>Q-XBUnCC\P`NO|7 eJwss…+kZ=^]rּM9`*y Gu3U.N 宻Rqx<ǏU+A 㵏=ϱM#|N+("""JLRz~)4W%ʳi~/C"kLG9D.ӝ*6+&rQ l^w';dUXph"w+8oo/^#Nunzdvo9<5XNstʰ+oq^ :Qpop)b"(c\cRl>i7kF+az8b8.erRtENn٢b=vK˹+fsHV>{l˅8qpX涮1#^ʏ⬌q\1k^bp| :Qpo""R`$EpaƻKᜳ^]N7Yuk׿˅ekwhzR,Vʲ\Xi RpΊvY7{{1 ?z MxY.1g*i9cc|,bZ\' ⍂X1IȀJلL\َW׻>0ՙ-*˛sؤRM7n'oWffpG#9s?Y]yKHgkXG8r&:VY솧9HXOV'gΩ=d벬ogd1@GFQDD xxzlXy|`tw{+t{ymĞ[b;Rb[ewٻju5-u7gbY|jx;6P3PE!#9@x(""VlR320U>x(""c:t?979PU>֔=36WvRt(8-C-~~O?81ξc>Jѡ(""%7<6w TC>1ݾv }C[HDtr׶ݾ{p|T6֔9i߫98=qSXeo K~26嘤ؤcS}=49"AGEDD$t퓛!h)qQpt("""⋂G_(8tQp(8_Hҙ)8JkEDD(EHRpI """Sp"IQDD~ R$)8-7 R$)8-Cq(E>""b P(8J3 ""b G7 """"(8QpEэ/ nEDDD|Qpt("""⋂G_7 """"eJfSNNߝEaԎ<)h;@vx;NɵBTpT;jGeזG8_EDDD/ """"G񋂣EQDDDD(""""~Qp(8_EDDD/ """"G񋂣dLR57 4}/]=tM&u}&vMv]{EDDD$Hb4M>fؤOuo6}glu)qǘ_~7~ؤnG} >5r\.}à """"@Cjھ})O/aO3C>e^IiC. wD}`/qj:O7pWZHGQ5]{Kox.&ED&E M>fxj9I&u{mhRDdI;.mt=?jOԾ;pϔÒ_M>vMvÒ{NKnXr c̥EqGi{c.O);"OܵmףH#+WY}/Z%Lq pɗGk ᩽,3}w0מ/25c)' 5+-"""b5ۊL:4#i`cB#ٹL.bDO\5r\+v %ST>jݱ`_""""A=.m9+>5Rlr~DDDD]lr0k&HwD_qp&'Q """Wqyg+5*?*6e= \DDDrbt?<5M*Zqh:S#}wq#j9z^ Io]K10HP KYrjzZ,:YYXՋMB֥'k7(ڴ`A :&5y[kɘa)=~v {&"""ÒMNOҮrlxuCřŻ*W}rݮH6 ^[l#ie{VX +U=5E6M+mJa_c-֞X]FKvv.MOSGDZu8f"Sn+JC'cօ>ژñVeg藫1\v藒 kŽVe;"9}E k[̵cٿCQ2[x~y&myԞ)~DDDD.:k8G_{'PV=|.bd%^P<=x]h=FmY}CCy||\^0k2"&mƿOʔ|g][\>-{-+skjYʘ&ED})b4M턴7~]1 TIi/Q6Cl_2/IB1r<._x'^d,#c8;-ۦϔ <_Ϙq[X7faXΜut>NlŲ5ƨ%\.eYrh Gklrmk{%"""ti=M8cr|^8> ˥昃Fx]h=LeHyXL)*S!KٗslPzoLcgimX L<2)'qT̽N){ԃssf%i;]KCLRۃ^eb=6,ۧ1IG||Njghr-kWcNos/iG ˞鳮o Y 9;sΎ1nVЊgؠbkF7_96P6es\m&81l.sMBycr8kM£hgؔ[F]aZ=-CKR~]86_L{'5K>I738栥o}2a܉ *]sb,͞38e`m6[p|P62&Y0G:m-"""i]9zV<Ցж8QGjl (.oS~KښOLni֯؊ohm>QDg9L:D06|EN:e.ǧD%"""$ebSz|;s::CMZDDD7>}+N*Zpp9MDDD$&G\Zthvvc`恵ѝ9saigX1{Uf}.)_ng/ gem%S%8ir_Ln~ڍHp{7]#0 #]2hrh]dաEFp,͜& UEk_gjrΔ3/J)J%?GEiכ"4SyJs 0L;H)\5?Z/ZW='p&Um^])­=13ChY{k d]\4Fh{Slwt?0 ΰ8ecvu2\L35Xc Wnxݟ_v8c͚_Y[ht \LQڞz;aigXS1:EkW՝];C4Y1ELڳQ콝uXa.Nn*!!!|8uN(M͢ɲǮY;C75EhN+mq<}40 Ӂ,y"UMٱ%YSee5+fHP1Ejj|̮N&6~`a:G|̮NQjSX}բI"%҃J@g)R_v)\b@VCBB[la@Xq@Fiݫ'φdLeJhgCSeI HW)B}7R12 8BXd WGgEk~{U-L$Yrp,=4EYY3Y$2&H`,HƓPHQ^?N:})aa:GY3,3EiLQ(?|jgQ3EiN*S6ßtlNaXXy_.őaő#ɓ'OaߣX 'Ooai6m$-ɡ8N8} aa u,0 sH]cV_~|7 0L;H]cgavő.vGwqaaqűcqd8aqű}WL'MGʬ#MNWo uN=Gd]z^(n8G;,Xg1{o[aqűe:i@+OWZtKI;U\HY'Xz4˧⸮۳/{\)2娽-8RW;tҤoߩ< M*Rt}s^%;NRQWw˺=KR8>87C{2Ά $00'ű8^o!.h L`:֤nxꫯ(94.ζ8=zI5)EϢe ׼,+\o):m"1R䋩su=!q|KcH>Ro @7yr^{,cwԍY&sG("ͯh;r- x_R-WM|^׹CϾ+S֥Hi-#t{_Je]zN}"{W#mϊo֯Q9z]7^%gnkqLʶ~_Ḻc(94.NV{/\м~z9rIbRtR^K4C 9l3]/YV)^O##W#r{o^Z IGȑb_1K2Bzg7>sN*%䎉:Wg˾벱ݲR~\!j{_,΀})X)9EIހrIFr8+Z+1R\<ܡٶu2}Q/<##+ǍwU4htQrh,#Gxw-L#R]%ʲ移I\%K*ƬϞtI :_\C9U.)V ó%ZˍMAQ}8QC3LT `I(.a9 #d]kq#$2#c]i /Ş_s۳R%Sm,zTɮOGI˾8LߣX G񮬬&Zt⥬R*+$V ^_!YYλaRXY)Ż[ *$-~ _He^۔xG;Ⱥ E;?7AYYvB*+d;pO&?-N3,啕R"z@0H%{+ry7}_9 ֭DCiZWVJeexݻ_J^eq2RWXc))n,olrݓ%r'Cݔ8;MLaq$GH𮨨&ZtRks7WfIyE/:[BʒMrσs$\`8ː/ @y|(O&yHqBֿ&}aRPQ!E{Tr+ =FH ׭`rVTHEEl@W֟tRqssvKAgYg֥\<(dG\]*m$d(94GBEEÇIi-:qRrr9r# .Ck_<_th yC%4breR` 9&}U[ s+WCzvd; f|q@v}|_R WK^#Vo Irpl@wן$V&i_uKmGzB2!۳۳mR*qS]0}d,|DyVLaq$GH(//.//&G):w}_m+͒5(uo|W[;%\Jgp@0Kn^y[qۼ^H]S$"l RR/;>I[dlXr׳hY]&RK%1rd]8mʵ[)9R^'a([ `uJ =FLVcIg֥TR2)E,GZ8>0+YJ}1.{#8^i蔗KiJy:q9kGM),^1 -OnyX^o8&˜G =$] ]DK1r wpwHQAn)VeYK/WYd] %r]]c-֊YYcy-WɨYt)-l϶Knh⮉a,^qle' n:Y`NőP^^]VV&L^L /6-_&9TV&eey妏#gJPty \|~- LͧXPw!rJed$%ϕfɊW)w" KB+} y^ےW]bd88Xv~]rmdarq ۳۳R)w(wUߧ^#ŖK)OZyߺ}1'A X ޥtޘE'F 8EүZ+,=z\/Wy{?g07ޘy;?[AG}ȼD{)sTX>W'GzWXްr=2oQ3%d8n>T= )SzFBm^/ɒ k$Yԡٶu)qyg䑡yH]cV,X`^vwLi蔖JQrETJr$HV2wd ˏkܽu7ï'E?/{Cqx.ldҶ̏^GQy`eJM*vR)N[.ʻkيlJiiX n/YI˛mZAqlt8RWJJJavőly"yG^Z- 0L;H]7Zz,yGYa:G 9cQQ0 ΰ8RWNP\Q avő ck avfժU2|>Cga:F"r\, 05,Djy+W4t D/gy~IR痾C/C{q2Z[F 9sDزaZ'&ɒUdժbI|>I͒-D/gynqb痶Rw3a[FOQ흕% Rn*g#{ nw-Yo)! _6 9Lq`Ћ2n YW\ZI/)]ҫ{7=Ĵ钞e+1YvNK{E|65\>qz&HKzz |UrYjt8cqdڒOn}e7x=qG2|z15f|xWoy{oZn;^ttU66[Z,W[nC/1e$726Q#dڽd91 &G"rx;45a TR$HI Jq 9X ~Lܕ,ѳ^;_ 1;EheHO1lMXY dfxJ˘eWDϖg@\t[$)-MOG_3}{ȝ ő.55aǷA/A˖ɲedٲ%)t\ǤHj'9Y<%W[vΛKʬYp"r\1%%aǍxGny-q$H>8>+)IH}o@k}YQb ?5LAk("IRRRļI- eH>O3!Ƽu˘"IQ{>/?ll_zyBLTM)ӞpdyHDőaőZ5kyɒ%uIDAT0 t,,DؒaXX%&&z'&& 0 8cKdqdXaq$"ǖ蝐 0LHD/11l6 0 ӱ830 ô?-???>#0Lß$.aaq$"VM&988Xavő;]a+9,DlwtgaG"rx;Xaaq$"gaaG"rx, 0',Dlwtcaڙ@qǽ{2 0 K#9.0 t ,Djy…v2 \aq$"% 0LHD-**;22Raő^c1 ޣ%K@@@;Qq8J8:(Sf)B`^rh])RSxt^})\5HtDDD<msK;Jsqq8N9{"2EhGY=wW앬JT+rVL?aԼee&ZҎR݊gqq8ǡ+>t^sE%DV9MDm8h_I׉mNx <(<{ QHD)\2+Zw6{%x\c>)QsHp{QHD YI'͗Y!v{G"f6 @ `50t3ۼe'9/гm~p] ӵtcccq$K0n♸;beAjSfly+ P fzKű7HD#]z3#>51Gdjlz(nN+K, uuůؚ_Pe`YR-}ľbS` #9>,- C9;@7QWpʲZHD)B*}DW~yѲ"Tkp`VQWVPNS?` "cK=(E0JGCB)z쬟0;mKC)(8@)5=bq$K7ϪϪq~jRq|@ ,=k-B)u-CQ́ B\kP?;`XC,F('ԝ~ٲGOYQ8ѥgЌUr2יF~Ǻtz(+},TCDE'}cS϶'TC9zh[o'ȷ`e~,YrN(0 (QR.m$"j#]z>fW'H값%lYh"55>fWFf7A,yJ9{-ꁨ_(wrTUO({~q {0k&C9 4Lrj0Rxh=2 (1)0K7Mbq$/R,uFmX"hL߫ kbV@9r+6[P]72u}\..,0ʎ3-Ʀ7aqmZN~rTt{\82&(7@zs0UQX0E"4ϟ+J; _)\ĬC[ԝ((G52*Xw"J!;Ʀ;cHrjPKp0EPJ&"PNUTq9l""j #]>sc Ȇm92'pnVL{ QHD?Rm~cͪC3e]O6Ej:EMDD#]^>fWY~QՋ3(mEiFC[]$A)֘HD6T$řdyT LNYeag9kxaX1:U+4HM)B]kTEk,DDvHDDDDHDDDDҰ8QdDDDDԩ.j""""jG""""jG""""jG""""jG""""jG""""jG"\cA &,N.(dX5aqQʼn0܏; @6.8sV-r]],5/г^ /HЭv""HD> q㓗Tm;*NKVeM*@iYhgiUNjŰ'n0@-G۸x GKű0ׯ 3CYG"HD:ii)'$ߛMαّrB&/MOxWR  `.} ͖5<r(;-w/'` ҙj(uEaqll~m^L)0JeZdžMLJ<`ekHXȾB;x6Ziuʫes^! ߰8>(aR 崶mq{PY( }P>OP ۧh㏖ul 퇲SJq= 25 +P ^3۹HX~%O_y6JksRʪ5g%kbQ''P V(E'ԕivB)XQ fP }c?o2mqln~ Z9PJ?; J ͟fq$X> ]fwreA>DuznnnGDDș3g.z222dgFIFӚgDJe{O-sÊk S/"8^u'"+F3ã\~K ?~9R\.J5ZVjN[^-h4SjOu!ďg;T9\\wq|ü/U)%"+F3ãL~˒^lR~NьQTu:]`^N>̊+jn*;w(ٹslܸvŊ53g}ٓ)&L87q۷Kaaa?wINNl޼vĉռA5;y&TV8FA 5kͼ << 2FqEӚ(jl`o[j(?ؚzʝ9Z}`8{q&2a„3^^^콍"&L=hqY} ?.ळ\` 4ˡ%u?m q4QP~GTkP ݚĞ,@G4|MsۺHpP P+ɲ,`q$rNMFcg`QWj G7|#g C+F9W/ F"N9 ÿSύ J,A/^ *C)_}T_(%O@{̣; SPr$֋Pm׳PZ P~ͦ55b G+FycoSLeտٳG233D>,'Ot|ײpb.ZǤmvLՙL\Ym LhUc?wߠ__=P~M=ݡ<  0VӠE(͍ooR[@˟ 5^@9(,DW Zh<9a„}ɱco9~̜9h4γwCDZ!0~GPR/Jd-fEt#0!hx{OCi]PJѷރRTmsx\ )Pp @(%wƷ96޲̷Ya#JdS@5 G+4qijqqqr ƒ"1 gj-"e JN2W/MGdf{\&'N`0mٲ櫯J5JD]YcūB9-; uGXo(K-5nPnX^†6p'C9bPnEzj+Fykڴi;vL3iҤ_Z{oDDDDǡ89z(ӊ8QVkȑ#L+jk0p"""rT*˸q+**i9qqq2nܸ|{oDDDDwk>,L˙?~^d.t:R^^~,#l~ɢ7K94zzz6 `DDDD? L.V<]e^.?&oj~EEE2eʔs]V4KF3c67ڲ.qqqVEV=u *j^?5ꤤ$)**jw<:!K-C 2swPWٖS$EKod$*&/S|lXn*c49fͯ~W=$3Uibnnu}&<<\t:]O[Qfŋv!iOCdܪaC?͙K&FRnxݟ^8mڴtپ}F3^ۓL^]_CzjtgjNDDDDdfPk#gu:)^HDDD T}YͦMd߾}5QQQdɒt5z~ 5rCן:ujժU$::&,,L͛Wj0 U*M,3ճf:n:ٳgDEEu(i& E՞jKXp*E:ӳjΜ9ʕ+eͲuVٽ{ٳG#w۷͛e͚5,ͫ_ׯwssh3Dw]IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-flowns1.svg0000644000175000017500000004556700000000000027735 0ustar00coreycorey00000000000000
Compute Node 1
[Not supported by viewer]
Instance
[Not supported by viewer]
Network Traffic Flow - North/South
Instances with a fixed IP Address

[Not supported by viewer]
Macvtap
[Not supported by viewer]
VLAN Sub
Interface
[Not supported by viewer]
Project Network 1
192.168.1.0/24
[Not supported by viewer]
Internet
[Not supported by viewer]
Network Node
Black Box
[Not supported by viewer]
VLANs
[Not supported by viewer]
VLAN Network
[Not supported by viewer]
eth0
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-networks.png0000644000175000017500000007651700000000000030204 0ustar00coreycorey00000000000000PNG  IHDR75zTXtmxGraphModelYmo8 5v6V۬;+JغRN~H{uHlP)$[j"dbF8^Q dSJ$VdT c2˖3F9h gFk1s-֬fsڽ.x{3?eRzF7!Dfg5F!RqV1Qv ZcHQm5H+4 di5J ;qF ں J|f6bv @J >C d ,FlCգNgd2h~IqdX(. LԬxDY+L&݀ i/o Fg$xIr\%=m8qw &wXJd-k0q6pфlX)5`neÛ˗Qig>O>]nO;G[)Æ/7P_%/ֵ7.:Ϧ7#-lsX+>e,@BĐЭ.5\]ҫqh`ߠ QI]? +am(M3 tk0]/FAr#qE;[9ZcE  +=%4,kXD$^D-V*]}y{*=@^W@RHB!i2"".4;:L5ru@${@Ryy6jr7"'Q$՗RUy=Tys{HB!$"Jyΰ|ddf wHAE=ղ< QI-SƩR_pB>T-JvF2UIM 99úoTiZYZV҂q2KJnIB!#@Z2}Ui#Dy"+Si#YDaY͈~'=[꽌Lj.UzNɵHU;@=I{`鲈,iQ1TgDJҞxR4ZZ@B)"dHH~J[{D-"iO~*6ʿI bXv>2lr_wd(wj*='dCCVUo&Ɍ]C=;1T_NfȗW6*&7HfVB) $E.1g $_GWLD*J''RJY2^uJl}eZvBDs/IʜOTI IupdɇYY]{DS]N_,Iy;Տ2ɗJ#ʻ.ȼ SϫsB!bH֒92I@\֤^6_3CK~ Wʰ YAWe ."}|/#@y%+߲'3TW53XB)c\;z<'SS?ΐwW|*E_􁣈@Rp+3@ATJHQX#\V+jYKձ1M&sk(N{rbu "\V1~e<=\J^cx.9!b"#΢E\NDr (LNyyK=0+3@(77|g(ċ&#"2RC ƶsp|v6RybwYgD䑤]6veH>lSK{byBxQD\E1($rw1&M咑ɒh"=SXƞID -%)e KӜYmiλ=͙6Ry"u{W﯉AD|;""SEdxB!B!B!B!B!BHQ'Ig[*nM#{&'<-\&wy39qK]%l+]QJM?Z&$$lK%Fv-Q^ߝ}XM]#S#'%Fvb҂%gBLFbdgȮwo%qoy?KqܝꟉqUn;jtkuZ}ǁZZz[-NζTBDS}.Т8g':f(lWR;?3P뤍gGʢ6ﻊ##r&=GOϊIv8Xz?J|DǪ]uu3p'1RL MsD 5N^O|Rb _&'v&foHuVb^]ƇvwyZ4{خ(`i'OPܣ݉)8~/'wcbVp?ZS'hb21GAm-n]QJisQeWL{?G.pg=ì[i7>lWRZ4u\԰ ўp(IsGX3G/ݳ+[L{6Sf ܊E\`(@2i#:yE?r}R$>vK" _ŽćLz=_R k@2{h t}$w ݄Rp.܊áq'Zb|~9fF8a/Ğ݆cGAJaٮF \œ!1'6!6.MMKO3>f|=. )0sa^2K`Q?fv{pyoʊԿ8pk?4 ON4I%f[s~ x.^Z]\I䵘8r$l'1UsTqʀ$rv³Ky;_oDx-ZNMr{az帾4:eFW 炈6 ؇AԑB:x5@ʀ$rvҳK;_oDxnR* `e_@jm+0{>Vɼ!vsQoRS ͖?W@Llrb&4bhjUTj-GE` ס9/O>2XEW;WSС.ʕm&LVF "^Cob ס "Z}_w#jw=WWD.Yxv)ug@2mAxIV)v_c<_X%\&`ow:Â?c({ luͤb]TwJ!MǕa0mA;Ljcw:~kBwP54:)&a7Y_#*f Ms&v 8tp D\q}O =j!煞`92u 6pFYCw4-c#Mv@2g!&Iͪ,ξmVQ-*_ +7$D=M*(66'5+*9`ۈIx.jQZJcFlnx)m0$7rZ?{$Q\NAL,וf 0}Yaxh -8'%|\qݽN =j!ٙ &K]}ݙ'flRǾY Vo'zZuk#ְ`<c`[:_} _, }WXPﷱ&Ÿ0*eA3 ˾1r ^6֬W6FOZs02knYWn Ty|ך8ͤb&ZtQGv[%*WOl_At;>RCL8 aiB^ݪt8m֨Vjt&bMèn:mR\^C!|$oZlM@ePkbl5.>W#&G"|"ֵ?c;NJ rL~Vetl 7K x)x lO#gUl`n>d=b㎃pl cՑZj%*O :ȽhZsxG6Gf=aU9,M@ph z+Z.id]Kg(.)n8)炈Vq̢) Ó0;&1Wf $/7]R,d6LJpIfI7~;?JMmG_gX#Ѷ\=EBwQ+)/sErcui>uJѡrm[k n2 sc5m4WcG7P"%x`98fd]O$V^M~fR\IV)4][#ୗ[`1%}k5[E' ?6*5?Cn(j$6%)‡` *~nVʏ6}b5^ [ ̀7Q;m"x4D fG~Xmd] jSc G]A3kC,&>4oVSv+^`ub+︋|:㏦?mnVvW֯5p=l=U[."X Ì~ttZ_z,ێGH F֕l{zR鎓FW \ IDAT]rn۳F4\0/:+Ĉ]=^"W KM"g,<3O 6ƤVᦦ#jWS'X)ˇm_:X,*-JeKdZ7fwC)cС()gaufͪ~cҨ"4ˁK铊 mue?+'jYa6GtI&_Rhp "lPê$*Q6QX4aр!R*CPbiX]m"7j|C/K0syMj!>oU %[Eh:ǖtf|]cWQv8)dA35ˠ%l51g9p(jtR>\[aLu["QB%b}M;`]y.a\vm.}> L j:BXm3|wiH»?Rk'b5?T5|q=jAۮv÷v?[??f \Yb9d٥<ԝYlkV1JE1=T+tru|?2V-izO UM+Q7vbز׬׺x\ZUmqPf>7Lg8ؾZCg,3qkI#ۓ6ԾJՈ:`~Q7|X>awJqM-J, M7"bsGmVhz )uP~7 8ʷrCH:LA͡d/Yzu*onX{ʴ'UחFLje"BANWIO9v VlѷlxcT:Ae8 =l jc>[o(Wg44o"=?Wӥ:^ 2W{N,0}ߞ5jne?h]'@eV2g{ ?^ <\Qp5+/_Obpovq ~Fʽ4!xwFԳ^1SdQ;{#r[fk`*ND.Yxv)ug@2{m*:jIv~ALIOy`|Q|g-mBu < gMڟUW)?=U@ü9~8ֻ*~qs ^=gWڤP;wˡQfd]X%g6G 2}jB+ 5jy JzGF b@DG5<_?AdB;m2֯k /n="C%P ֟ Mfźf6Mald]i>Q ?`"l:e9/5j ذfpn%=g}o [ꅆ/7/j>6A:`]r[26nħUcpu,e1r#6x /ªw*|^vK$z5|#Ⱥ'*k>A߾.~'%|\1uVy(3aJWT- مaxxޙ;n#hj|P%FF`<4h'҈=pmoR&cΎX 7wxȺLcg}p:܇~beȫXqFew66AWi]tƛؤ߈VUcPmlNM^ ڝw0+_cþ^.)V 576ҹ7 S;KAW[Ƅț&=NJ i3H2+vY)6'af[2h8Vj0zoVwLmߢvC"o`&(~|KѰB3؇Dr'^[eI[{Qy 롳Wqz\UX|5pyI쒅gPwf $f-[L-`^L!v8Z@חF P `ﹽwLˬX eV/Ӓ0ެniJ*xGºUhT%\#`FO۲+,KX U4^N̳ϩy^'v:jc帢I쒅gPw $f-Y@-`^L!v;Z@%Y}9htX6J io#+枋Oyo8eۓf-hc$W,Wi:%=H%s Y?m4Œ[X ΢t>z#}8KOW* sM"g,=3O 6sw\jfRS+Tɜ՗FnQ erͮu?vC1dVSߍgɴ^x=nW}X.VjPR} BwcX(xho΢ ^)՘bomG^&ìKsL"g,<3K 6swljfRCȩez帾4|hz4?6f譜Cz8kYirA)glrz6%=ѣY+.w#꿡{[-|g#E|%rүY;3W;{wdI쒅gPw $ff{S 6G6\B-H款4:I-HZ.L [2s]"Vܠ:F+zs+I}M"g,=3O 6sG7aV1d Ug]~BePi'd}S3M3pJx1't9N|N>ߣA6GO,P $sV_oP  >&N1)s_泋^@ъ.zD.Yxv)ug@2m\{Ա;&R \OۓïNN8>vܬJϝp%MvJtcL4{pʼn#j(jfqo|^3qgyS GT=Btb>!v9/K_A+saQY ˣoP 9}M3g/}ъR|/XfKy;k3=?AfQ+p2_i 4ŁJ-ej:cܦ3k[pt!dUL5=Xth$ޫKՓrwfRC`j@2gak*}`࿨xK2iu,eךĢ4dtfr_w $fnO7'GJu!m"ʽg ynj/bI:mF9}2nOsZzX7^t3׍曌!&w z=rwfRڄY*dA¨*€wkNoujaG-QeeŇYLaO5:,+KgĢ4d|fPw $fnM7WW1͐6ڽr!?⃗cHwwM6)^i1W@boCq͐~sT4Ɖ kª~g$x!?Qw-w-m&U;ךU;~÷}`q Lwn_M?æm8E[mFhҵ7ƍPQ?_BZNM0~4~1|_fps|n#xzYL[ };4Awc;|gw\Vk dAÜd9;`;|듄9s׷-~j?·s3cx^hcצ>7s3p! Z~~ÄyZi |w^羭Qx.T/[SGo~cf O6 $ IXG{F̲ìMb]lfPwf $fn7WW0xO;׳*W1c6!-wM6^/-Owvr ae)_( /-Q>"-m&U;i6.-cETLsNC`ݶ5O|=%8?@? ]bG4;`9]j)Xy|XM6_~WѵicE$jڲ/YzS&Zm8%i}9ht|lNFs[|}t34k [ ]l2'ylL=F%p՜ohCԯ;G˖оCs|08|6uӖ+/w?4hG`ۺw\|7_Q?cO^QW6oϣ3uݳ,efRV,63O 6s}T =+ڍa6P,wsôwJ{(:ަHd_ qɠ΍&0wCqG\p_ky-xtiT=B|m&$Lq&@|Sػ'pR}"h-6;ro;zM; Щ- Y_ [.ԐSpl-:-JsOixm_|u?wg0]ozix/Z-^7hkO`?Kf=6J i0`ﹽ]}5pD?}6秘Ģ4dlfr_w $fn ϛ{gl7D> gl[%Qxiԫ b FK̐/mrŖeg^ U~N>a7hT&V qkI#ˎO17C 9?G/MѸ٧R_-eGW-`7H"hU/;>lg;ܺ_GϭH款4:h6^2uAG"&Mzw'_wFWo锓Bs?EѠ&o< E pl=}; hK|hl |ϧf%LQ/ی1QФw^G_}=>5;. €>~r@F$S 9zѡVV|5pnI,:KƶׂKy;Xouኛ#|6m&ՉvXrtM4Kbz}0y76׽l;!v9/'|3/˖/2Ofϫ̾] =tyf'|vªgFMbљ]2Cm,73O qc G:7ӜQ K7c0qlk t7:LXL95zȁ9/QpS_413O2:Li::YgpU „vAޱc+4 1K?u/8[=I쒥gr_w $Ss`LуĩyMro;h,:NQh q}m쏑 ` 4hB D\p`S BͤWexwLwDf $7v]R,d6wXYp}=~Emg?$3㠍=Cvg<>3.$n0ay;sI.{nosmԢCh7]wej.ap[z䙳OlVs{U23fHfowtC뎤piSzi. l"Og1d=B(h#K}C0s$5s0s#<# 9at FۂQ `D$a\9jBߋs3 L\Xqrw?~E<<7\v\x)O/ zn 1}9L L¤է0nyFuΞG/#'s^5ٺʀnw_g.Ydv-Oug@2XYv6N }H=m%XsO@L G5C.x|fl3pP'5' 7 .8z8yG;h3jѡNbfl=9?3ܗ ٻynh'-ug਍QQ+斣>;fW\{mHGc/_#>8;dѹfqv)_f:~<כYI+J)-2z+mg#Z39Xx0?0vqYz {8w=wLFKmfn(Ad/QemR99 g!ΚKf]ꇿFH]QJiQUrnkk{f:{:{:z,t9蛨2fRJtbi8zE?vWQk@E)]OWQ5x$aZBң{g&20kHZ5K YRJ $͋vdgV$w̉k:{GX ʀ?qHZ~(@2Ku& I1l5ژ1;pʀ?qHZ~(@2Pf'c8j [p곘B#,F.9Q["j6[?)WZD)W$l~vE)Ks :j;zSmY_lٓ=s2piWZD}d+J)-X&OW3+:K`CG/MUIpެ 8K2gCGGdͶ>~R>.H; -9`+J)-X&a;fJYwɈ 0n\,Ob2ocg{Ϝc4.f13C;;N -^zy,?K! tNn{e{s?p뼃 ]'\z:_?vYg IDAT{:ysD?uF?q9Sg?s[KKcIVJIygrE̿a8Ig?vE)&\31vڈ>&4~ZFջkG:jtZVs{;G}BKn=Ct-Z(1Y^vE)ל3!&%Ig[*1ݶphxk_g 6'tfqRJsgBLNbdg+w~Ag,1^|DǪlWRZp̯D N 19]G6bk\vJ N9ޝxz7/-l w)LJY{+J)MoA 1 I:JaOIN~f ZL~aa=!K7KwPlWRXgB!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B)< "E䔈@Dyђe9(1,Cw"rPDJp+"R>QiB!ht*"D$""2ĂەJ^"RKd1\D$),qrò4l!iBHmy "fHo'"a DFD. o5S'"ODdtsEzY+"D䰈40 ˊNC9D5RDNҢE |3b%"wDdK@8Sql(㶈L%N8 /h6<p'7(lQ}9\7!8~xyH"rO5Y)"\ EHQ["|&"E($D ^$H):|QD䦈Xȫ"2_DʿgJ ߱"!]=pۧ6"r_D6KDRD8 @EH @dwRa I a}2)"[y!PQS{>ΐuntT}]rHUQ.#6ptRXƁAKa&v(!"偁ԁ>jYKձ6@&-aYED6ҷ}rB)Q.t%,JP5Ɛ(}ډ$gDY_Q`,m(㞈4A9&iyKY"ްS yUA6RҞ,}ܰ_C9؆THDMˌd )wB1)/\%غ*Jǚ:+QLDr#:DE>Q;(jjpUB{ 宾"ϰ]%Eyc(O"dȻ[dHÒvs}V( 9%i7gge I ai6"rT{Ek)9bB,]DRK1,hfOxBTB8!? B!$!AζTf>'"{'FLNyf JiM$1gr^C;K˄B;$lK%Fv-Q^ߝ}eeYyO&$paɲF1 lոWX5\݀ i\Xqi’a$w[{,އl\Xi’a$q)?X5^".%!,k\dId%XSX5"ax%,,FY $eHC%\Xix0eY#Ҡ$,ɉ$cԈ#eY#ҰieD1j ,M$ H,[$'Q#8eH&FH,[$'Q` S,kD64|"ͲlH2FH$Kpy/9MRfYM^>},)䬞 DzH:=nV HA"ͲlH2F8~:i z~~./ n [ws{a٤-׻ {AC;Uj ׈zoC)GSqA*ok=x˿CIz_oM3;k14tEcC[~_~ yʰ$\XraɅ%HD$[uY ?n^z< ktm?C5Pq'8EV"s=.n݃kPjuضq~t0kFvEVsC^ii?Q 1#vPOsĿn>Cqj>-bbW0/y\Wv>ҥB7R}Cj,Y⭆5P.e2lc "i9PlQj{#o 5flaۼa}EiGb`{6jaOz ^Gn<~.-4E'빈N24quO6{Fm1#S$I}eD K.,’a"(*e߿WzaAx\Z؅c=G~[ۻ`pîl! [brغ[h&޽j Zc9ǽl Q-Oո%_4Gpg^n2 Ӡ/vDŊoeӎX1Ovnŵ`.<Q<C-z~߻ y{cq;l.V{y-m}¶yk};)eH~ 9ssY(d?/$ZmX>g ?!va4|>_vG_Np1 2n ˿'⑖Gw"%77ax}o$uVh& 27HLZ[6"FK[~kb8:o#EO!/W_6’ K.,dDrN2 ڡWZ l:1be#Щ0ف-nܠ36mATׇi0*5G@x4-Z} 2ҥBqzu z݁$ ژYbMA9/33{ΡAhظ6~u굃݁7߱@zqΘw\Ԝ ھ}bW0sDruVrj| : ´[$I}eD K.,’a` AJئW=[ uZ̭bz]INز> &{ppdR w}e<&:2 YhFɎm93t_wlBڗ`o^I75'Ufx޷Rf!t駐}z;s^ m--{}}Mlm׆ pW)n_eeV0E'߃AР,?þnhc1u4 Bzqv0\Xѝ켬SB^2l"Ʌ%\Xra0z*ex-JzAs"q/x8:bzfzXb3iqmu0v~k&fq:* wF{e<0Mc8? wypkW:Nõ( ;+Fx]=hiK$D"t駐#:lEb ṁc%1ۨfU=z4}#W/` MĦ !o-#wbcP^Q,쾺-Fn:;n{aG@TڌuYI$ʠ$\XraɅ%HE tYI&٪.͊ЩW {1A]IWl\YbP >%k$j EExMax9NMnŀzc2<ڲG{=ըOıu$j]mzؾ. };CwO!G]zּ.zEnA|H1b/}SԨ zz)kޟ` Bo-#7>j.b:QMQ֫hCȗЭQ=^ 啭 vMEYaX$]^f[En MAD%4SNm'1pH+&\XraɅ% #k*ecz*eH~ 9WwO!/W_6’ K., KH$#uY kX(I}K?WbX(I}eD K ou),FbЩRfk#X(}K?񗷲P ʰ$,RX2$_WT)+YYH#])P\@?\}eD Kc ou),F"\SWP$H~ 95"WM$4jV’a$!:U,(2d.^V<_v 6u Da`<Թ!xgG>h^5&/,2Cp{WV#WlCwO!ޜu2l"Ʌ1˅e #k0*exžsoÄ^EplPɜqf&x#gV/׮'^k\}po6u ˖QHP(t -^^1qsp>b?mz>FbyυUl)eH~ 9DO!/W_6²xaXGȅ%r # HT)åvWalhS5̙7캣k,^2/^7E?ᖻ; ݃7m`־3ު1v"GSȱ+;5"WM$,VXQ0raɾ%HB !:U,3Lal? ڍ5<-w;PkQJ;,uӱ @d鉱yzz@]b}żŻz#yzC_Ŷ8t.r켴5bWM$,,(d_’a$` \S+TVO!ZW[,^iy|Ž;oz-XÅW/DyiLď+ňY=f 9~~w&m]YH#])؞]%>_bƁb*8}|e/x?+&\Xj\X/oa0*exS9'ci3vPOɡ-| z6<)s|=^yykY_w^̷gFhzwLq<oށYW+\ݹ_I[lv>ҥBCS$Zgtl 9lGO<>;7ɧ|*^`z菏c'#lh|1>L915ߛ G1Ȳ7r|iku_v Q'ʠ$\Xra #k:U,׳rzL„Ưa4N M~ Oze~H(Y76A>E=w!\ 6i_+czblzw٢z  ^wEyMDZ5Qgj7ԩʽݔ2 I}K?0׮鏁azq۝A>3փif{[Oa,/1Nhm^Eą@xXu*ޣ14&16ݫ|a!bD3 ֋pުO_.pX3s!yq8KEė HraY,`’ ˗dID2XJ{WN 5vp{6W#6m_['⦷;{@%s}\۞hQmxMs&#[4i?ll|d4 pF;n88;k:NMNiθQ&I}K?Q_a1x?."ȹ3qV|Qg0q|Zǡ[D^.򅧍 7fzz{u l0gF| ~pf^o0|@LK_ӳ1_/7p\BH$AI.,e\Xrar # 1pT)}67l\鮸j;75_E#M5֩G| f Nݨ6F]o!rѾ.6y(-p}9HW1 ;[ѣ;R&}K?/0WGÀl^Ơ}Ы6 l.?d3?dIDAT5_+˘X2/ݦ ƫ1-t+}dnt0n'#u."/a^l` w cp:OBzWM$,*,\Xrar #k wNU֖7oϟVJ>e@c↌2qV{1!9M}짐 HraYTXj)V\XraR # H4nnp=YlzS.'9"6}Az"kT/ }Ea녶qi+&\X\Xra0F8uz9&ǽ93*z>"$m֝fдYH=R2l"Ʌʅ% ctd%X}uw'[dܝ5/'! gyI#]>`Y =<Kx~է\}eD Kc ou*,F"TO©)cq~28;.W8;洩8i;i䐛͋Q9 4"G#ʓXr K}iAɠ$F)VX2$-셫k)JGU֖H aSwGF_B9WMC@~ Vr@Yx~իB}dD Kc ou.,FY 5OeH&\X\XVsa0˲FAI#HY-Iƨ;˲FaI',ɉ$cd%XW,kD64|"ͲlH2FH$X5" HA"ͲlH2F8\Y5" H>fYHN$Ru.eY9o]`x%H2FMv?fW,k5Jۆ\Xqi’a$}I9py˲F࣓rd+m2 dYҐ%H"+jrn?py&X5Ox``x%,,FSmM6y\td+m_O55D<’eKC #K oOxἑ6Yv(OC.,Yx4ta0jkhsQe'\Jɹjkbx%.,\dֿ̲=:?O˲Uى6d)G2t,’e  S.UVf9J 7=q`Yy9rS)4}ɜ93󦇦Kϛ"ڬaCدJEg^T}2aÇ&ypؼÖΞX3GJ2ϡK ҦLKmu+Yf9B"͈?DtL57 q"O Dd1zW Lv3X93o 8x9s8{/'"9y odǥCgt8r ~WpqJt[Gɛq)$]J_%!DDCS"3ƜHr|`xHc:a̳kPVS~ Nl#[ef$)ޑD ~ ":KDv'DM]"B "9S?~&z?%҈mNj"DtA=oՒ":W"z]v{DD(EikqWR?&fp&zCD DR^D#Q6YR@ѓ'e|8B N:ՓB 3}2FȬHU>t goSןc[p]u(oFhKDxo 'JDhs-k/-$aV89<}>Qtg)clX`5vhL_MDnDD) H"ڮ;EDDԐ|H$ut|$"zDrD5$f$`XGh5'3')YoHt́B[m,'=Y> q$>UD4`MDq$^Eb`@aFb`=B`<쑛;,(azY1ky.M'k%{$'N<Wr?A[RI Ic2哟>QӤrS&(DGJNE L" "$ZcϛJ|C"#";|_ݠ#!$B#WH\SkesyFD4BRB[m|N"n^$͸??+Ye#I"$ID%m\q<||`%z]ȏ_a@hٳdK%(L$Q՟DelJEd]"ZD"%Qk&u%8}TH_l=$`u\>D"L"Եֹdm"R "JE5x8Ā18տQ^w$GP8rҋ%KukI &hS&L{7S*l8;>R1/N7;RS4"7!IK$E`GşLPGH'C v_S/Ew\>DSe=Grnq.x2/ϕ'*HOaᎧf;?cS+Gw4wSӉ?v3"8U |rՙ>UWwB_yk}!q'ƴOH [%7u-%RR\ďrq\}A_F{4Xb0ˇJf}q X7Ϲ.HY7wS3i7HL&"sௐ{; W%>I$p:J!ҝH'qZ8UJ$-h{dK(!Kb/aчz3xǥia^>T#-;9]8b9|#:"Yѝ?H\T~<=ϛ/8"I$1Do}@{ɏH<꤮D=)hDb^G8L~L}KMg$0 $i9}LDI)|FRՃ@޸Jgqس`ǥpT֕&ڗي'$UHa"ك?.IM*z 8yH#Z4іHr} R;*ߖs3KkmsY16;ԟ;EDRQ16UV9+^ d5jY)|DⷵAc#8R?=I$H &󈚉 mZ7-g-kY:=R'8ʖLE-%$E?Y^V[B%D/\wvI3Xp† k3*{$Q VAgJ?5온=}7aKόqy}/r{s>X?K[F Hf{Ng$ZGx 1$ I/ M׉ q}!&{'9o9QH5>#9,sʣPŭrlJkm`NS"q"s*y_jEeDmk=81 _RmmMF0-";nspd|eY G&8čEJF7$NԘC7%ZF'?Մ>&qNםŦp.qv>#@\+;wbcJ=71wՏN!ieYJA(a>/eRO!D)Uv=81 RmmMG,L3m idXxR8WrD$G‰5r$Y{8оP|RmM|v Wss>: ?⊀x~qr)`1dzΡN=7_sXS4)}],5v4 γ$b&RQHEʬ7a^,**++lF柱Spwf8▓O&/8`32?jĵ?,*rn^U9_3c.&9œC8Q,Bh&/M?`ONJX> 0.?ZCdӖv9,#uixlɆݑ mW׵g 2BӮsQ-&twEh<](&Xo üxRmmMTVdkS,s,T2$+˼+˜eX9BJ _xso~H_ST #DY?}W"T~O)+qvy v9a@;u șZ0=$-)$#)4-LbI}$,yZj)>փ0ՖcSw%q]O;Ze$RU  bC'ړݜ]ITo@B<0qm;>"qDCGbH
Controller Node
[Not supported by viewer]
Interface 1
10.0.0.11/24
[Not supported by viewer]
Network Node
[Not supported by viewer]
Interface 1
10.0.0.12/24
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Interface 3
(unnumbered)
[Not supported by viewer]
Internet
[Not supported by viewer]
Compute Node 1
[Not supported by viewer]
Interface 1
10.0.0.31/24
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Compute Node 2
[Not supported by viewer]
Interface 1
10.0.0.32/24
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Management  Network
10.0.0.0/24
[Not supported by viewer]
VLAN network
[Not supported by viewer]
External  Network
203.0.113.0/24
[Not supported by viewer]
Network Layout
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-services.png0000644000175000017500000012631100000000000030137 0ustar00coreycorey00000000000000PNG  IHDR=TlzTXtmxGraphModel]s<?ii{Ymj3v#D4$<1'"Ցno 0^8EHXsu*A{cxJؓXPE^.F;m|T(%b(HHJWpp+^óМxy$j% Tc-A0q1 '#EHab8k%RQ&S&; 3}*({Ok<]xbiԧC9nDɾP23kvq 셧e;\5Β!j]b,XmԽZ6 rZŦ6zF; yY`jd5t0| m1~Yw,YU5bUV'^,۽7NjM^]Oxu[|S?wٖ@_$t_ IDATxyxT937$Ī .ԺZ}K۟VVk]J *Q\XdBBXEq%D\=BHH?3I&93s] s|9"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaana"K6Q"'KDC T"|,DDWS;0 0LHD.Xac㊌0 0 !|HD hO|G##~c!ۦbj;=a gv5h^ x`3=MaXŪU.hAOW" m U]mz~_y||Gǩj_cH\gRsC=9PtX(]_iK&Nն_QIDQ "a:2*lza>C&]EDOG5>I\L j#M/;دDm$L;2=8מG(z D4"6=꯸}BɪvPRUfM0 g>1B=f$ Q:bjst^вTm:έ'qA'~@BռKH;KCU˝j`v=v6= 0L\r:T4>$ L?DP DD7:6=9;X'IitO#5=$e)m~ƓIHuMj{v9%hylza".-"G$Cmd"P5=#hy7 rBծH$? 5)mMfnoS'TO ~8Lmb?ij<4jlUAۨT0 "3E]MD*؈ s)&"M'Ucz4B5Oվ^_w-Hߐ J'0>M!oױ/c%o8AeUWA}E>cas: u[KD#:X7$" ^\DG"K`'B,ٟIgyH=l|^!{U[:WNN&RxF'y%i)R#uoY<"30 chx>"ą$VL$O7uk+mPXV"M$ twY$L_I<`WVf Eaa-o?0 0 $a) Q="aa0zD 0 0aTo}ED+Huaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,D"z'V"%}np~"KJD!"S] #""w/laa"ZH=ND D$a5>L?`0=Q{#Ȧ':È.Hh\sy6%77D>HZsc"ʼahz?D0凘6%Z77Hܱ|D"I! $/#$Hx D-?"z(IP>!"z):HDtr /H騏} s5xT;JKDI$!"GDcH]_&F4Lp[cIgKv" cSLߢ,0=&Dt5MYfdG"60<^c 2ؗ(L3wDTL\tށD$LqS[ Nt=$0 g2N/]#ڙȌ 6*TۼBi;KsSl/'Q98y )E";"zh5^")DDDF":.5jʉ"S΢c,ז8rTcTmAą<;XB"Xʴ#p!MDWW'DxC"2eFmzD\Nt=>RYF2>0z/#JԵ 6$.Je_Ci{9`}& "1# ΛD4$a @"֞&_&" az@DGHۈh6DmH>-4r*JbVj0}sHHIA@ROZ"Lmwdz^澺2=( ϕ!Lч$ 'cDz@m21z :.Wq[D> G".Ee"7&;%Dԋ;9o6!7Rۧ49zQ[>UNm :&'DzL6{2F(9G_9ahu~'#j!"2pIBmIeUWWZ~3B;D'o.":Lڃ3%tLj5L qw}O헣Dt7Iܽ)6فc]:7l#C8o ROTNSΤ6sUv\J=3==O#^&>7O:8~c'm$>JDEBp큤{2zLσ!j_]HC!V;L'q 8;;'D]ecDHu\J$YCD@Dxbe\e]7&;p={'&<2 cMMGܠf(7PLOO7W5Ϝ1=v1D0Q[wK~|CM}uezI܉t,Z>]C$> Z9=骏=p29Lw^v$ٹ DۉS[{Sos+$hff"uS*h?R/MijcLOO3VbJDQޏ:8b%R $-7x $<+ӷ(5=UDD@Ͼ2=&e I6f+O"zNƓH\`Ht=ߗk MϏH-$$D{SБQGxu|&?N?PW5$~k`5=WE#վnVPUu^?Չ؉DM"KDm짶7:Lw^6vjg2i$ y{SЕ!7 q}0l#G$n QD4WNHtD$G2u<{'+ӟvVs?S0 st׈1j3^ |Dn%d%[Mӓ@>jMxd M^s5Oje?Lm 14iMOp|ww<{W&e~3OB0 0#`zjAaaXM0 0}6= 0 0 0 0 lZQp튂9)`Ϧ6e`j >ܴd?m!~S8NcBq1?RW}M5Ol~sܴW] qSQL U_?d!7XmMxIj8U)_*Fqvōعa2wiZ2Zor4 |]G}o4qۊczͫOmw"pI}]]pƧb%N9SL/.TSPoߋoYqoݿٴhǠB:1rC)ɾXLl):Sx8e:jWS qڷd8[2Z1rrbPނ|'tە({FMhӾ+#)_ߕ%pm潯oU_?$ڱ8]abŧ^x"\'YP'.-aΉGABQV8eE3N9Xь?ٴ憐~So`Q5~Sm]\?61J*N_YQ$+$jYL`,}NA/`kSd:vE-zփj_\qĊ!~wsoy264/J 1=Ը/>uс&R]ȍo>gŸ$tg4nz?{4>N;"dSIFz˥CO eYq=O2iti]FN$+$m2R[緽}V?`C+NVu:U8: uсdV?^~.+N^NG򳬆Lf4~.N hz8WP'$^+Τ_&sOLO85v_fHi86Cǁ!LC4q^uuA8ѓG gC+g:@CJ3oK8@yc}X^;ʖ?@d{K0݃Ԯl_Td0=:^>jLن)Ejc1z!$+2zdtУd~뤈$C@6;&gh/T ]lcE(սlM¾7ǫҰtԯl_TFO^\_D)Y?IF=J7# s12 IĮuPFҠPwO⊓$Ȅan,)֦a Iz|&f6a(] )$pZgacn$2I4‹bGgy!^̽ю~n, lÅ~+)El4^,|`L3XՉ> 6'/vG=2=G-![1}d 9}{]i'P6XGs̸ '1 -aS0+2zdtIofgETE> ʨ*J-}"g~R_r?.Z04LC/-pY$qw,ڌMA(93Imb&PZ¢L.+}s3oeL7r-Nճұ9Cu$Ѵ> EpM_`E…`wM:޸ w[fa^;x(JrE5IĔ>jY<}dbr֊4kP8ČKLæm~.-Nez Z'Y$%U"yd,'d$9bY&ꟓÎ+ػNӺԏylU T'bdže/t =߇mɸ-ۆ{/6cTzՅl7*YrA"(:^p *2qtļd$w 9?uEЃ5hAWFM8Ӎظ*~eF_%r%"FO=H}Բ8#VL)i?Oxg$'rzQFk7=݃S=fdxuQF\B4Bd_Fɼ.4.#j^05T9[B(kfH{LVT)2-KSE<83߃wyF޿ˊ~'tK3ݘ1T'j3pt]xWv2+eaB0E@L7dNL7N9Ʌ+ӱ gܓfe_Gxk9tĔ>j)WLOqy;C19ԭLtb@^4֦b`|{^\[FN/IF=J#JL7<wbCb;VcȸȏzSQ;-פ/MVf\u /Mġ"/FqfTM |?*:^p qt ]ظB̫֎!>|*.|{֦ay9nԮJo)M+$Qui;c'sLO/QKOw09l^%pFLI\[D8Y$%ڴyGeT6 DX^\ྱIس8 U7Xi@eFQ$3LdEcRR3dbNn uZ0dXٗH7ӔBl3vþ5ihy321 44pᔓQODdž/"?d%:#FO=H}RSp"T|2Mx7p!iE.|nԭLCDܗe)>چAnlZ8-^Qvd#=.' bd/\^/+/~xp:$2FJX+ IDAT `젱LŲs_pbꞟ:u πuс6_)`I1cCM:c |҉KL "q`lM& 9l(.K 2Ȍ3S5)Xs 4+mNAM6Ȅ\XU"~gGi>[-p v㳗qvE*+=e||C{ QFn3}MqSċD =:'(<6<2ډ3Q"dA>^ $b@ ܟυ4j'uċ3U &\D^8nū TU&F&yCMڱB2?~'ɂqhԧ|= gVE&u܏O%~u]N2:Q2Na0 Mtv#~FO/+qjtγ#LϢ4-dοՏRI9&D4+PFZy<8ЌOly9vSLz^azhۿmge2V\ G&`u2yY%`V?ߍg//RdLVeslLD:݁% DW`XirM8$W|妢&NĜ-H5*7ge *[q(?` X]~z̫Y/8)EFd,c&]0z2`|YaSp3nM%tWjvAnIV%%d]Uh0 hE 8J,f{f%^\,Q8[_,p滱kE2Æ|7$~$ 7&a+$2;|Y_v⳪d_e^3F=R?J0=؅ϗ'r-&37Vڻ-H8݃oW&x>W•H‘Wd8Q<n/ӁY%.r1vF2r c_ITB9b[3`dtM!IRM"֊ C™?wӊ6Cǁ!LW=0i^tl)rcلf3SƻAuy g9,<7 XR$aB7Gݸ ՆUJʲ^ܔhƸA]Ŧ_(߅]5.Z0gapd;U$a Uؖ'ɉya]!浖z3&dr6T)4ۅ3V,.p_kxYF'j+vLGÊ$|_ǡDlFO<8<;XT~?ƫz+Yq&'sL85ٲȅ&.݇Õ>l[o|kcǟ-j𒌼V%U xnf 9̸F76Wޕ8%\5pV3^dtУdLFO=c~e3N#LdBFf̘/9p, hU-$ t* |VZ{-wk6;|h^A-sb{9Љ]bzH%4.cș2Uٶ$lӌ ܨl*8XI2gظT5όK\xT 8߆,4>xM~͍1& ;3R~Ae;:1aEǟ$M2_R^5Yj<]b㦅g߆U #=w?$bD{Hjv5Љ]UڱE_Fme^c`ϖjC- [FDN,p 3mmfX_Zn&d8$D{kazp|%1<Ōf$nazkiK5 S%M# V D- 2;jW6V$y̸Q,:3ReƊ5N1%!1цUʹyX?χ}3\x8DŽ4Cf~܋<}YN|S~~pN2:Q2W¢YxcŊռr~@ qSóoêDw?FOh])K;E۶/f:kv/rccnWBjhgg|0=sU2. kEhÆm6ϒ)<:@e-.azkiXaznӦe'qZ np3cXWuy?Y1K̸v;ʴaC);R^<Ն%k󗛑a%$P`"NTFG\!Pؓ,([~G7(CAL0UoIm0=2v-֎ؐ7 /^ ӳċw{XƜf_~z{*74j'u2pV>`}l?IF=Jey|eF8,s%{qz4͎.3|҅\(LBN|̃Ɖ6 4po]7JpckE5hCnjgucL%ߊ e#2QsaJ.dbyd¯q|'%ǂS %L6B31lax< 's^cFNֶ_1=Kc y]Afbm{;}Y˰U7*"wc2ʰd4R0ovaz5;~e:G\ ӳ|Z :pWM~ 㦸ZŽY.tx k*PؐﱠHl%NδbcYm„W8pD9ZKbC;*cR1u͘5[opy]%v;]I]o#1HHe ={dz_ upٱB; [߆n4L Sm|gA3IL}΅n|?߅C7~Yw;ѨjW3J-1F, X'hyUHi{+,jf081nx܉#V ηaBY.̉k8;[i͸v eZBV ɴRƗ8%ˊ&1ʉfX`E^J\ZK 'm Jk8:po]n#1/R.d i9tĔ cFNԶOgv SB V ̶2m{;=,v?Zqi:ArH]q5:2HN2:Q2W8â橊Yλ$;ߎp4م$%Z\ށ%$nǡ '_ 1O=z4/dQf$$Y؉$dĎ;P_0G+@,L+ۆfz#懧2z20Wo ѶmId[̉-egPw{&`Njǘ& |eHue3`dtM!%U0= 8>݊|& OdH1=v C-r,;>`bÞ4h)Yl,TOwg-qK?NƁvĀoA~Y16>=Lfqvqq!MTIFzuR|Q:@5B (bŒZ$[(ỹ&M"Qi Ăi[/Js8N@!4:?:@o2c_ mjbŇv zMƱ3´)+LqsaM$m2/RǶOɰb_)%Ԁ혡ć^>M;}QP't7o9ZTGwaW Q\vU@X2=ؿ {p?cWXy>>>{܍`zNƇbN2:&s5թazB(p\'YbAlzXbAlzXM*Sałkz,cM$*S7,:m2/c:V,('y2NfVwdcM+bc`ż>_՜Ͽ?<˂><ɬ5=jBj18O֭q7::@̕]Vde?VLOӗa[Qvq: uAw2cKЕб`z|4N%4$-['hyi^b74J?=h+a>42NKDp :@o2o_ȍo>gŸ$t ZoqvqϿv2=\'SR'M/+~tb,K>ȊYxblwUcdkz84N4=\'WP'h"XqۺN'ϿoU5u6N}AxMɾX4ɼ"sˊ,Kq`tqj2=\'bN2:&R'3ař4IĒvQ3=\'^'(3={dz Я0ƩM3`dtУd~?+dd֚8oiMO801@zoK8@yc}X^;ʖ?@d{K0݃Ԯl_TFOf탤1ik#ɖ$mR~^1cNT$blŔt). uzzvCl`u1'3p:@!YUsü2pKkY$Y8^e^ F=i(!>|6 X$.e_)hx#%g`& M?^+^Xc}8eҰ"snF,FE#zV:v?'#uh".d3~X:gU(Bk-.5k-Vs}غ6 =t/NC&P+}MqS44j':ue_ɊY+LIC3.}0 !~Z1ޗ$%U"yd,'d$9bY&ꟓÎ+ػNӺԏylU T'bdže/t =߇mɸ-ۆ{/6cTzՅl7*YrA"(:^p *2qtļd$w 9?uDvM&gyQS-5tl76_W8q2|ɮѓG4覧ϽeqFحR~qđIN nnzzꢌT'(eDT 汣:X9gKuv>c&h  ~l@hhY(3]V< ץ_1d4:W,(+żg ])$ )JZƙn veƙnr Wc9θ'ʾNu!/>6z2pw򹷔+<}㘡VyG&:1 mkS1e0A>ۏ=G:@e!#V%{q˂;o!1ߋӱVd\G}`թkҁe~hKӁ)xyW]cKqȋ)v{|*a-H9ߏʶy1iÂtl6Ꟶ#y_JŲ%$_ǞhD^ۊNt޹Œ~Je_#!>&s80 )>AxMOtdKOc09l^%pFLIKuс&+ )`MxD- ߷Y`% `Ǫ44/"MžI|L*0mŌ"7a".$
`7>{ɁjWqRtu4ӫS]v q)6^ $b@ ܟP'(WzIgP\&eY;s?'sL7^d851z̫Y/8)EFd,c&]0z2`|ES+g:@%!IRM"֊ C™?wӊ6CALhm<Ɉq5uuAyE"+dd֚8oiMO801@\RLd0={*B4}e8tp2zdtУd^Q5Dpj<]b%e#fLv>g !%φ|h ^n7{oG=f\e|ίE;t` IDATȨled885RHjv5Љ]U @ #=k^[|3lg:G\틨gj"NJ%y.>@& 2!.&;چ#d 7vcw5ךʰ%A˖p_mOyp؋

ā!LO㯻)K;I۶>3\yvpE2F([{ulC-. 2.h/gn3fvcσfx, K~ Gotş$M2^R^ꊨ0kƪN<ńwHmضP.̶Ɍۙ'cܙ&$`8';_&AMsb4'voxV|V‘,X;P؉M8cT>`E^J\z'Ln'/T D /[1$ӊOJz/[qJ5}N Li|iLAdw5R0vm}0=K\",pA5RZk^ n첄]b'$ \>Ag:G\ጨ*ENKBXK g@s]\L&92&.&oupUv|Udܜ`;ԩ\<جlofYQW;#M G.`Ϛ߂,8%ӂb^`:}MqS44jǘujY`_HqMp6(W)c&Hc^[<9ƂKR~1r lxL\D g]bŦE]g:G\ngu/[Q Mt%[G2z2_S+g:@ BJ3e6V:$TLeN']eQ[G&s8)c~5b:NfzNUN2:Лus~X(:S3!A&y5qubˉ:kd֚8CqIDpůbN2:Л̻=$Š'۝25q]YH:4NӡALU,IFdR>ub7 |@ͬWB UhTq` SR>4>N4kzBTIFzEIn J@+Z$B 5i~@,o⋒~qqڿ:?:@TǶ>$kbŇvi ]ƀM+űm/)QSVt}VF́Vɰb_ru%rQÊ}m/exmzNƧbN2:n2yhznn'zj8?SR p۰,[5Q\/vU=$ꦇd|($m2e:U,VUxMOel10̬M+ĦM10dbu*cbSD?YbAlzXbAlzXM,VbÊD?YꎌazXNŦM10̬(Mϑ9œõ,M_{jztImQz{ڶ( M7tdtŊi88^(Y3-8G oz>-Q/Vk<.æauG`z3MvoGGXWˆhX.bLӉ2ہcl/w~{{@=~wsoy&(6ldtMY,V咶6<>\gsfF6*ҪUkպ^[u_ZͅEvd;Z5.UI !$1Cf$sfl߼sd _ʂ1JKY|Tpd.֧yQ7I{quͦ'EDBzSg9zfoACahIgK師&!MyIh;jrA;l ,f/\60}Ry|vmZwq'tغ[Z/gARҳs ep2_j\6Կz]z^Bz AX􄖞.m!Ixxғ|\SAd2NdI6l`I|VN*\61 ^fHNym/x%i}kOܜ,# }~cuM@> #P&g0+Kφ^4-2$7wI>{&kl޻ էq_|Õ0Ð+Y8p}M8N :9gaHvKE=vVp}ʕOdrl'I9Xą^ 7?]j?6d/=Yyi4I٩ r,X^˲8Pi SI83/_f<.L`LvN28+"µej63[MSsnJeL`Õglz<5-o*Rx_aSH=q8!=SӍS2O8HNtby$mCǴ.KƜ,X0F,':HS㎟Y4tؓ$̞`[+(b\n;'Iv2"Md 8M^uY4x,y8M/d/wo+i]z^Bz y@SHϗ B[| Ԯ әB)lsIn[Fm+̄IY֓CՉ̯S򂗡^^{N޸Fr&BkSi:d.Z_9ndy{m`[x;$svz:4f=i"/K݋=i_O74C,7f ܙFKr_aÐ߆ Aa""!=SӍ6\,[oHcۺL>FyI>n]:uSv}&gi&K幾*^d9#E*84 ӷMk$L!gZItyf4/(9c,g*Y/Y{%٥g} !GbJz/o@z^7[JRm/fR\;,FB/̀ s٘U8k,Ȓ̹qhu KHXa=c{*yź vѷM֏I3h.q]i|54uӿ1qkʞ$##%:٥g] aTz2`e #{JHÆtqq~$ɤw(=L5J+3hxAdatؐΚHl]l{}:['INsjI`[8aC:o_cywD&=K?В5 XzBC~/ oAtsHOAH?/t>  NAsp~ W\&ȭ0JrAt4Lv3Ney'3eBUIKϚs:7F6gKQ"o?nI@2mb a/a)1q] C;K9't0:,rsr$zjaU[sf$9~M3 mg/Y%)72$3b_LfCVv1 ~E"9Og$ZW'Y<'[jm-$0(̙QQΧ\)#K,)9 ~p/W&4A$s=~x-H<<8XAMYZL\'5 /,lw\ 7NJ 2g\$)̂+ߗF0"}` yN)ϰQV|nkw*g6`!v ~C\w~ Jt?W W~;ko-5-TV?Ru=|,Zxcg*%8&&= N^N`Ct8(|pϓ Qң30_{) [;GZIQge)S9*V2255^s並z[TBe~˖5~&ȜZ=NG+I$JEE|sˆKTGF:%Mrf r3Þb7U>Tހ w*y/nq"՗8u6UwX?KCdl;[}PdSeVyW7,gj徣|㤈BH#E*qu &e.3$y+<%I[}l/Sy=5 ɵȜt/$sGnS~vxOFq|&&/pXΫqެ拕Ǟǯ NV >.&D^BE^sgD@GWE'fgmhDՑO 7輎[)$ :W'RTP жoKud=H&L!=K=?(vߧ` r*A; ,1%=Ǚxʴc5>+0;2tQ~2gI+`iP:w?h;Şßn:}2tޮ>:?ȜvCZ98oc?O9V-mKXXΫ=[oRqwEv L`i\8Ifm>镢ZUp"R? nm ~≤QÐ_E@z5A.]Qz7N :BLIq&"*:,=nk|F Fѓ7a'eFn xU{60ytv>927>?EoƑ: +dMc0džnkxB n3ʊ/QcceBM`=:4Msz'RaLDί]k. v*.U|w?hgsZeNzOzq9ܤE.LWeOﰐUI$sCկT\:W?YoƑjǿa0I_FrXYGDq<2R%O 7輎"xo9xhdcYqsyL]ᶉ޶;.s^7/F2jI&=!* 0JIAG0 ID#IHGq"jȡI)mҺ<˛7Y甐*#~oa`CBKp6r5c&G^ܧ>DgUa`[X wh<[!IX\ g ݯd&d}3h{Xy"y{&}` t !=qBz!o{t% ӝq\JjGUͫޞ1, !=qμy ,搞!!=0F$+=p1eLN ny{&}Х[ 58)"#1%=&xۣKrj 9 .3|wLWw=cvY !=q@!·g&=! 0JIAG#l)8R [qBsn]28)"#搞Y! 7F }DKzvTܖ߮ Yq®J۵^y)IAG)1+HcKb r Pzn-ͳ$+TT8E {{7^cqRDa a)1+&25 ?Z?'i@@%XD@WAx>)08)"#g$f_^M*?Ɍ{FD~i*(4QʦbX撩g}뤘\z?\8 y$ ,搞!A&'Ubi ߖZi?]aSQ`ay~PzFGJknm {t ̍̂`v`l,V^#R>$LWyzʮR O(8e=naTs%$Z(둹^]T^&aA) |ay0̬iHm!=&cg(Ŋ=wNqCUF*d&IlY8Xje%DG!7U텁c߯p~rm|Gr^.T4@BeJgFG΂#X(JO1q,f}?̒_&)>\X&5[ᣢ]/6k|~!="!:#Cz%mǏfK4xa2=e**0eD% M[wXulI !=h"aĂp |9_q̡bbBuWJ%t]bOeޙkC2M2_?yv>e~?!=&љ ixOl]zAeEzqZ:HA6VRb>ԿymL""cg/%V__Oζ^4o}.Nyt'HS""!=KW/VS u|el Ŋ?n548Q?gk;"c#(=єO{7x h}+rzygm4ӎ""Cgl@SHϿ aR ?7^m|Ao]A|7^mqRD!GbhIgsjނ8aWMB۵Qߔ㤈ؙ"G t[8/ggxX]s52\ 4r]Ϧ0E^40;%T{ƧsLؐ{:䕱l'f 鉃#f3S/=2/=:KK>I8!qAy1\p=]݃K,yhm,O SΛ:i Q%'òD.XX΁^NqZkLL* ?Nu"עp0礲iA |^$kMyC/8ҳ&nz[ι!ފ+ֵ4txjr*ߔ&`s,'ؙa1|w۠$*5(=ᡸ>[i)2حS<$':XQN:^MgVC^8nsòmTԤS?Nj%qc;Sts {_pWQw&'R/C6+?E[&il#$g'M[N7emM]bӪ46J%WI ~=ctIn?F18)"0tmc]z.8򖖨_+⺢'=aᡸ.[i)0حQ, ֤045s$ ~-VҠTqh5JQ:x6+YHa]*_$,˼/LcL}22~&ɜKSguM]t1M]dTp_Rh ~Nz8 I3?)ҳDE☊aP|1[irZz N?[I;OVP7#oפB? =U:\zg$kq%Ob>|l`!R?IL'VՏHU)Ԝt]kSi /Eݪ#k'/I݊f%'xayu2oߦ!%۸:n?Bz ,1%=ߣܘCzBC~D/i.׏Huu{F18)"#1%="n|W5Jt L֩,JP) u{Bz q ,1%="n|WčV& :JCF+ w*?oAtsHϳ!q@q 8CHOAG)q"٥/3A;cAXbJzDE5 ? -u+-s $\g9vYm^֝/#I*cf8T Me(DrSb׹%$[[]9ޙ:Vuk|F#N]u|㤈Й?}L 9癐?a.0Ð2o2Gg,!(km$$Yat/3xm9CW;K^Lx{Q.rr_')MwR~C35ޭwEգ.vympA;h ,1%="n|Wčz:*W)n[𴕼^V*Ђ=oFZ|V\WlgvggoE@znnI~glYax9:[*kܼ'+<VM7inccskh#F]5 yU_$|㤈BH#Ĕ􈊸]SVZfi ZsJvָkPu:# c^ps@ Maڜc8X{caEQ;3Ⱥ]OIlδb7YRدr36*Oul;JH~++ͭql%n&ʜse._ֺ 鉃#1%="n|W5TY>U)Vm*?o˜i.J0U9P]8z{9=$1:_Vhd H"cc˒7^$}}vՂ7csj`%/FbMw[- lkzI.?Bz :)g?aSHO[iacGeUe~3VHHT'i:c,LxWs_xg8Yzd{D+3[dӖ,l7B^6,v|)7%[Xd IBdιwҳIsV[Gyฦϰv9ạĔl`ʊQÐ_[iboS:[c% `D H36Zu%sZJt>'XarGPrl*,+[9xuL *D K8 yV*4=Mm|㤈BH#Cz&DTU sBzػ)sp2C&NCqPz&947 =ٳߕl+/O u]qky4;»evP?VaCJ-vZq)gyƒtPzmϰ,rWc!=q@TptaEܨIOaoޭIO%, [u(q_fdCs!;?ilH>f$)Lq34 JOd;U?Y;"oPg^%7%isJ^2I[TvxϱQz7N :BLI * "٥g&8 Y)e9\Bj/ E?!'BH#ĔDjф[aٯ-U٘>8pSd ieUy_dBEi1q.=6 hxBgB Z2P"R+MabhV E ߎ`e{HAϷ&t{hr ˂SxLE0AJOeɮJ-Pnʺ29v .3\w Lqh.]ۭS""CgoAXbJz"n(U7nZz_bY=7o\}.SH:'4.ʐ%m, Jxh* .M!=aǎmZ@E'Z]KL9N  ,1%=\7*fHEt /L^[}gm-,-V S| 7dw \}4> ]vkJ sTE<S7j}풶6ϒPRU4)lnw}S""!=`hV EDp],sRg/,>>ߍ3{\4KvZtvW.Թ2g^궛*\P3̥RQxLE0A|Rb d)P"%ZK)|U 瓒'EDBz!+"\o'5Ikpl!Bc+r?76 MB$|];WdA4]am7;8(=?T5#xV>Zh$>68)"0v a)抸WKe2\nfdJyq*Ҹ's(;_ ( =* Wf35NDon<\  59q?9 bOiM=N :BLIO7W E ~B%͡0 oj=[&a/η$%Yx$.Wa۶̰rGtvYzBB+BWTfBÜ fE<S'fGU[]a|;Pe11N a ,ƅP抸W"W~rZ$˜ts"egȤ_hcQ^@9mp~JS,jf{tuM:jL( s. 7}е~(07BzL3 :BLI *]XWH1q,搞Cbx(nۅqBzDHؙbvRp \EnW]D ̍̂`vQݿ(*}L3@Kׇ=EEᘊ ̍3bOTI芸ia>~(07BzL3 :٥rh?5>χs573ҳ_~NO yʖ/ΡKcbAtXAiyfDci~V>.J9t%'ED@sHS|QV vRdGu?߼*gOu#jQ?wgkhqRD!GbIz?\)==$7ϖρWzbַi~-'yvM?)I68)"0v]+=Gk,x+_[a.18)"#搞1AX%=-`Uo jڮ펪ަ'EDBzAH l-8VY1O'4vmwrA;AXL!=aZ ?Ozm}= KՔ㤈BH#CzbAa&'ED}AXb.0P| 0\_""!=kҳ+l/N/Χ~cǮ)lA!'BH#Ċ4EIQp :i7v畬n@Ibȭi4[_?Nb?6 hT,'I/7tn]8cgW 9牰4#*)[ \4s{k't0<_٭$IH6mg Xe[x;)%K{Pt*\}_cqRD!GbAz>-hDE%dE!=/ev+->4F^h!v ǶI. K2],)π=w$Ih> x./e+Q^ ӘG8!83IFaue3q~ rz@z6d09~+w=Ntyf4À P}62aC`WqӟZ ? +4 IDAT JK%\pciܐAE~.XA%\|6'~IUrL4ʆʨaY"{L,NB/85_WPs' vkQ8isRNʠ>/sEʵҼ!YA\7- ܐT_oŕe gh<59oJx0_FJtv96JIAG0|W۠$*'_z֧w+-^uj1D+jҩZ'1˶lۊǴVmc΢tX0tI$n̴qy*nNa Nr3]4n:XeFqe:guT?`D 7uki&ߣS,i2]lZ_d*}g7XHOAG0lm"ҳ.[i)0حQ, ֤045s$ ~-VҠTqh5JQ:x6+YHa]*_$,˼/LcL}22~&ɜKSguM]t1M]dTp_Rh ~Nz8 鉃#f_Sbj&=K =p6=l[ʧvǭNnF߮I@zRau2U +%HָdKH|{ g6C6 N6lR9O!2RϱQz7N :BLI * :5)JB6jۭ_L )Ͷ)&;8$I 9om"((v)4LITFN5ɬ9WFb`۫2Od$I&T'Owgy`~F/_U*ξ.}N^)4 JNd޾MCBKqur~A:ֻBz>},$AWptaE0AJOaouht|"np}M8N :9gtH qUIN:EI3nCHOAG)q"٥geM`o8dtП;yaw a)q"nԤ't0"Ag|㤈BH#Ĕ􈊸]SBz  ,搞Q!q@q]+=!Ne,'.k˺e$Iě<0NncqRD!GbJzDEkvY4Z %$e-r{$+y2/. !=qBzhHDEPE0)gh3حruˆQ O[e,˜q JHӂxng$HHSnrʜ'mn@N -T,T{_Hε2s֣Gܼ'+<VM7incOh\!#K'X27iƀ|k,yh^{F9Xb\vvuaS#*wEܨIOaȯi0fqC~tFƼy ;ٽɢ 7MS4Nv~<'3lUwΗnt2!G#]V( TI78ؾIե .2~pm4>c#߭X̶`䷲:nB'e*h|Tiު9:Yb lnhg]oAtQ7+]z]F̀, J\Pnt矷s_a4Nqr2'kTp [b .'q)nŅP]a.WEOޢ`%v*;kcE2w_-xl,w6 Uv\R3E6jKۚqRM;:鉃0t-ScJ)+FKzC~UNeUf([o"= =SP 2Urc*k~"si>voݪc=N(JCavGO`{Y!Lm-S7Q=,9mvwuDz9N jG:|?-Vy9G:ۻ>^*+vD:N[b4Z^V IR8Al瓛|ԕ90G>j?ioK'txXb9 2 ~U-1̩0kVvw(45OGMr\ 왩p䷰N +z|@g|W($ZN3gXT r;:D"='EDԎ -v4{Kf7ߧG瓿DTčo 7}D4N[wmSC| )–rl\3Y|#X pS?k\?⑐dXy8#S$|?QȺIe]Kg2NI"1[e$֣Ér{~v -QUrS-]8vH Jpj`}$ -l*=rKvu0JID݊_=1Vy:s݊>5J#!q:IC~R?h9+l4QHeᣲD"='EDu+F<|;'|vv} c= {2~PqvcipR/oft$Tϫ$TUٻ,D~nMVV^V^h4WaAN {6P虨P9ơV Ȼ~||㤈bkoN9| F*0'W^֙7ӝn:ܭp{J=|5ŲdZe^(sx\@zRo\`Yxa[yo:eŃ` 7>fOo lFpF/oOX'qr#>ZC?@eNuer]f*DvmM9N >]AU_M7E*0ߛ?Hvv}+b[gگs'4_TuvT{88ٺ}ϸ |Cu>@E@znخ%.M87)d_tSoOX'qrGeN[~(Pf (mv➦'EDݶڨ?`c#?֭1Q5$]U7 wM2kh#ke}JaܧMzy؂Fݒ0UHτ#v=ec*']d{qYn KHV stEHɽ/ߖY*P b"/oqRDyDû7Ör zვWu=+b[Iul,-qAyq.Ny;*g' YecH_|Yyߍ33X%wi2>5ߞ"n7KO$d?IqA|*9P@ h-Q8PUlOL;N@ԭ1t5Q ç뮉Fy8$]U72wM4ko%ka}uγ33A;{Ǝ%NyPŝinb;7kSR2}H;;KC`ynmv"IO2vߞ"n+G"'s5-4*%>k58)eU>X>@ko>U盷7Tļ%=X72wUyKe?iy6v,v2VaKemJt>.46s|2BOʢة~{ŠQH/>߽㤈ˇk3x?tѣ韷PQW{ݛGz:"n(en}Lgk^qNky̘>#ʎEvX"խȽuu^¹vWk4.COӷ$%Yx}E$.W1Rq =8Zw n8Kz[ٱ<&I #6x[W_M !eW/ow>]{{IO'V p8&5[oh>c+,'iyxT3g-ɾ$1`@׺`\ԫ DjEQvB $bPiP(Y c&a$!Lfx139gr7vbkp>_H5ozF\'z$/%m_u8m˵N#\bW=#nZAqIDu]$rssnL_.bQzǤg9N#\bW=#nZAqz@=NR/K vۥ_hGSMjd4'L&59N(/=>z7~{3|^z18It/t4]!eK/yinJjyF쌸Uz%p>P[|XlGp~1վLZ$_L7W^"hMw`3N8#L'5v>$Ԕr N~6 U|`&.:ҁf#-h{;4_b֑ֺuVNψE'=p>>1WNKM| ⹽WLFjVkѼYz8$]#EiɮZ"v4~=^j%{ŇΉFWc@'T;JIn6*3tM(2u}=vztgáZ,N}4G7%ɾOj3 8) O~(~?NQ CDDm,=LOCDD~IXzo0= K-y-t""[,=LOCDD~IXzoJ纝?oaW*Qa@}P8?yGVOKOt ]eZѼ?[OJOtԬ|f>uk*>,=DD.s8'}^6[PauO?wa^hߧ_g]u nq|PVxw $1~ Ko'!K:ZCDDI^z4ukcG]L6C8`!""Oみ&0O=޺TQ`x&"ۿ,=DD*= e,,=DD7F6)X䥧qCDD~˫~MK#cF?:̊#WYp "g="RG-7#1!} KM Su8܌ -^Yax!I!1KİIahlۆZA;aA%Ͻ.=eh\iBr_i\iˆAFR## KO〥Wè~M{ܼ6wUxj!IA8R?W#zζ"XSۢBlp%yL3p{I_@뇑?B]VrBV4XˌgmWK(WDؽV/¹%$ƛ(Tr\Pu?if瘥W<_Ӳ*=mE` Ԃ3F%3c-*P-I2fk$9Ljr-[5Q عh 97oBAgB}"߷ec^gh1҂Eyw{]zFq#]Zrb,|;'[a !~Yz(K/;lRѯiYk(`n!ь#[#mE* X&P1F ;/ o*(p`[8U@jB]{e᨝a QR.;hqߕ4%MF8J"и]r.9Z$h5xn 1~奧qCDD~˫SQVY(WJ0vQ1}7uCAf.n $vQl e*TtzȈ"" xN#KBt>XzoyUzlV&CDDOVzjidbqpҲZӝF*͂%,zrgV!/g}nQ<ڬ@φ jjz.K-JOWiYCB`V$|# (D,Xz'5zWɻ6GbI1A ZmgXz(yUz*-+ueVbRiHƝ ;bPbo` ɮhCUϟT#"@X]kFIO$ nQ뺭z+b_'2' /mWO$@mQb¶2IDATL-n`W~uKkqs&\ݢ3/q\>a!"U)4*-+eQa,%ߥB3Ph'cUĂ̸xY5uFa38yf4/"Ũhķ?^D Ԉ+|#0`ލ 9]/_r HfK [ֻn;If%^yՄ+x!A`UcW^׏q7(grL>Iiu=',=DDgF6)n6*-]xwGPer_dP?G5Eo7k]&%VotgR"/ӄ<#2i rS7QqC^U#ڤByvWuwW>/PcP_囀|F+hl® pi[N P5vvXAY[Ʒ41Fy>K-J&cҲLQAJ3`&3"5'ip(S*=K zMI;ZKzdPʼny(`Ml̀=ڪByVWuw?6*umN#@ ]c=ˀϟ]p5v|oVcD{s/^"" xvtmXtJOWiY.= #b]Z?,zdi]-mx]S @L"ֺ'k\?wy!#8]`@X<9:^Q!ƤDe-S ̙?W<_cq /κ*®k[m%uvftxbcG6xDl >adF}Uz2\;- =]zѲLA"~3t򘈐5j: EX3E'ңi&@ zyYXyt8Tn (zqaa*>K9!?ÙWU7X@˵}®k*Uaokٛ}"/=/cG6˶a߬ ^yZ,M V6Wdj0٪Ů}oME SbzxcN5;B( 6^?B=MdA*<D:.ft\w(]~|ƑjJ$hCE<7M(>6ˆfwpc;W{OSz,=}=vzQ"-mN~6{'_xȞA"p0֝sb(2|=vzQ"mw{'OxE QLkxgG8?;)dN}cGlGMdZ32>єrLOFEz:Uj﷫9[վo;i45cGoҎMfν3; m 9*&@r}o?󷙞_W?qKDDϷlTV|FLb)Y"2EkBVx^aiǢ}=f(2&;6?&&0RZ}wb2EDD(-^,5}}W|N ω=dga( ؋SEo,fiu©ʻqtPٷImG7;Ǩ/gÖ(Qp}Kn&@z"i>ڲK_Q?P,U>M1JDDgjҒ6s@:ne'""d/&x~l9y 7C Xjr1IDDtK vۥ(pi>k{ʿn['NX$""./>m_g?o'hrv +P,U]6!p~ZӁZO+%n~KouͿUMBMi+gSP5f:s)hDCt315iWFDDst0Y2ba""%Iij'lIۤ,;DDDP](wMۤ3qؤlGa/MCDDDDDAśIsIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt-services.svg0000644000175000017500000006174000000000000030156 0ustar00coreycorey00000000000000
Service Layout
[Not supported by viewer]
Controller Node
[Not supported by viewer]
Networking Management
[Not supported by viewer]
Networking
 ML2 Plug-in
[Not supported by viewer]
Network Node
[Not supported by viewer]
Linux Network
Utilities
[Not supported by viewer]
Networking
 ML2 Plug-in
[Not supported by viewer]
Networking
L3-Agent
[Not supported by viewer]
Networking
Linuxbridge or OVS agent
[Not supported by viewer]
Networking
DHCP Agent
[Not supported by viewer]
Networking
Metadata Agent
[Not supported by viewer]
Compute Nodes
[Not supported by viewer]
Linux Network
Utilities
[Not supported by viewer]
Compute
[Not supported by viewer]
Networking
Macvtap agent
[Not supported by viewer]
KVM Hypervisor
[Not supported by viewer]
Networking
 ML2 Plug-in
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/scenario-classic-mt.png0000644000175000017500000010720000000000000026312 0ustar00coreycorey00000000000000PNG  IHDRgzTXtmxGraphModel[ے8~"SdflU25lf/XOm'0& ܀[R[R}q_Ĕ7vgqnl۲.|i>z݈0;%R7ޔSViDI)n3ߺbszcBMM"^BĽlQ˘MͥۙyɈWP6Q lz#iPOR'rx#^ދj8@]z>p{?kEjOk4o-h gc?<m^TlL%4{Cl V>w{Xw 93z/K`"V .&Hdy; @ƐW[!sJJnl6G!BEDŽ>G'|穯|{ܛmY7P8x D_u8_M*(df_g77\rj-62ĒZf&-ޗg{I"CaL+jmp~ք&]?ێO"c"xj+7w3fvZQf"fmÆwPjC_6bMlc.7b3xS}kYNXL ^ig)/n5u7RXw{?e勧smX?̟M;4sM*_+h_.z6LFW<kjD-gc (EÙ*sDaaW:Nv3RSZd=Qj.i%x^ v0nK3Ջz\*OV/1Ѩ'zQOK$!,(6_Bupz;8 2[y'AKFޭs+L0/VsB%=987zTo _rJLT+k/W.V' X7E,82:t\zQ[qM ~U>JȖq T,waA'ާdnKfl'{'?ظ캀6xVBK1`l`wK e:M^F\ۯl6ʽ 4z:EJ|}(H{ -e @!>Tb6/whS#<&8Dyr}7% IDATxwxſi^rD kW$(D,( A Nz@HJviC1.[fvf7<739D((((((((((((((((((((((((TV*"Ddȯ"2KD~CU_-(REQE-"T Bάz PGޕĨ6:R3`6l;z((?t!1E$Va@ID6K~ MDعaۚ((GŊ6"i=RDD:?L]Hὧ85Aj>Y0)(Br.sݬz%N%H_MDe.3a;"}Ed$x_ ED~\("Dd"BDh9~{}y"CDR("3riWǛ̑58δ˚:SuXyx((kTN<!TDwX*J@tNHv'nW$UP>^_LW=Y/J.9MN83M;DZr/83<_/ǡutu}=(2r'bُm8ޕ[(Eܾ(BΡb(w:-R}p1䕈H(UC :;q3eGD D*H9T &yOFsE"߯/q}0]g_((Ҥj8 ̵^[%8>a'%,|Z'qXDmKeT>sXoeK`Eq;9,s3~/ʣߊxq7;,wAs|=w <`:g((ΑO]zC^:{{:JvzON}Y<(Ug;v "}N48vHG&QHZupN𚻻L_?n"l9(H_0w38lzᵊ>qs췋z0orxs(1:_#ݾE|=(4\*'RىtVS9-U}^wT׀>Η/j!"JECa 3RDC^k+\,QrjL_?n"Ufs׾tsγ?((AIհ3I]T8'Ap^D׀>;k.$`:~Qe(v/8G k'j@*O^=9aȟpo%9~Y$߶5`9(4L_D >*"Kq3ZD~/+hd~?}0G;։Lo!:c 3aܬs:{^/z"$ʣ_LQ3`:9\#_ݾE.mk+E;K?mx((rE"Do ;a"0'U[u:[i"JuH,C5Ik@wz}F}9-%`s<EQEQ(w^ݗj{oI_Gvq((VD_"ʿo4GE2srg|x!ʿd _SEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEI(;E{q uEd$E䘈\Ⱜ|?EQEQTH$"ny@D="KDz8 0H# &LDֈH(!~L(չ"rDD.wZ~r$""CDc_bADw"ZD~% %P%`MS"򣈴oQN?Nj2vvY"r/[,J vVCQB 1Ee6HQwEf "")"򻈼$"r.EQEQT>)h\("DyLPU̥R9OG:Q;kE'0s8"HG9H)OEy_MOǴXDr^#roPEdlݬSMAu7!Z*{rg_ 9)ʧܒ~_c? RQ>ɠ;C3BOD ꉞƳU4QB5r.Z D*wK]DW:[(U`忉kDgf[Q0_VRK3SD(ߖJoz: "k: o տE&1\WMS7$r$ʣ8bbO"rB\*JX]b:T`bgH[ƉȋS=ѳx6Q*Q;Dd(绝((m:MEwI|X&D(^,J!־C"r({E*y@?s(7]䰬T("pL7ڏV־nc(Q^e7O_`rE:M;W~DKQ>V<ιS_޿lqH *03T'g5Fn֡+JD^mݢ@dGDQ9;]6`DcY;&RſOjbDuX#f Q:U%_(w7JuoE ZaY(t~7O_`gbkD "]DRO>ӾqxH *03TzdT_lcZ,xHSP{Q>4PT5yiMp)Jh(|rZ*'RySD +EE9ՎُAΐ1OL@YcRócHXI20)ਜ਼أA_E[Q@<#\̊/4V~_N֟ ꋞƳ"V.j#; oSڋr+o: " =4>U*?MxGx ""Ie×kɠ;CSѳ{vugW<((ÊP=[eQG0O֗9囮Gl֗u=ڄg,hY_ ::EC)ذcن'&k~7s,^CBp2]0J? BCM ѳC({֢VNر#Ƕ؊5TtaBAB٧gkF솼fvY7(!v)[q?U {J_/Tsѽ/ٚu=K-:ZE6Y7(!kGl%ۯ=^޿TXm/DѼ/Yޞw}Uz:l[먭>1ޡQ"Ф/Y =<[Zq׆N[ \NW_ _?uاzGec4TGj= ?ž}J_ g,h=בߧXHqx$؊;գưzb|Z;8b_B}!z`yZe ~_Z 3VaսN ~0BW)=K!=rG [q}? ؿDBB|a ?ghvOm]>[v=l-jK6MBB['{@ @/Dgח=aﺏ0BDm\Yb` TE~{=4>JƉ區5 pby#+M㱫6-Zy_)=>xJ:.V`U>T ,z--(\,npze HS|l7҂ k21zCw΃>|BDmT=KFM*s_}MpzEc`$L(_G~{J?vzBUF/v2BN8lЯzª= ľѼ/Ybd0w- j0co^:R)`d*߿Dp{d0( Ta }!E#{V _q8M}`ecf\* jŷFTv2Z*&S!Bٛ7 }JϒP~a`E#/oI<~(;aȿC 0".V_HeѳȞ 8`.oHk09_ٵNv2”y=2`ɾPj_HeѳȞ-- k0Ś#~{CNFt2 0"R_HeѳȞ 8`~{ S 0_d0"nE#{V %~a`i}(?oWOXs'?.` 疄gߜKcLt6"`/DyS9*%Fb S sq({?.` 疄g5GuI?oh0  v2t 7Tqʢg=p\t S0  v2B9` 疄g5Gg0Eި0W*`'#QJA}}j}!E#{ [ S97  ;Loӳ/TsPqʢg=! 禠Mb48D yE\0d7J 8DCoc4%ـ 0k9h?=1f4@wh1f_Лj}!E#{V ~a!_τ}qlD. _=Qxe`-* a응8+B Qf>&bc0}J:ּK"F112(yXW2A"͸D*c~IxEL00qY(W>C ;: }Jo8Geѳ=p,eHc_hT<MyכQHJSQVX `^-`f0BTlEpjv*gF e"4PIaxRHLĞBeyDܚQ?g3`jhdsct9! yY1F7̪(ĝg%"#ѩ藝 kaMܓf•Yp|Y@n*H3ӱvm^ +{XP?(i&\p2JIi@$Ήwy? J9>w2B9` Tf8L[zVYş~a0͐gBީ86<Z\eh^ς)E n?sl8iDIiin+ύM`+3Z&}[כd2Ta ) &ډDϙ3?vS>@t,aY:V5#D, NRتH%`ƩWl6+60/O'䧢9&\J-vxQhj^[0uqxˈBsbl#joԆH^߇ŢqxX u#` a_}!B*g҂ `O_Xs(H5$ܢ9rǟHĪ)(/Hő Ȅ˞M Jm/E"q,MV޿HLAmUv VkF;q>ß.I(}.gߐCcM$9c"`.-;i*>E,&nD1Uˑ;? ӱ?;`w'RxqXK"Mx샪ǜt;[p% 듂n딇c(҄&dx/F"D*JS1N-3̂ 1P~&`8W/7xYzȞU<bHcYм^ ~,wy}$6`d͉G&\\NNGKo#rQ,Rp|B޾PqQK1h$ hjBNIgLnJ@פD䶋@ZX9sF yCQ]?I84BL7qjw_ΏZR$%`w}IIշqɽUP}ӓ1) ڵ@j IDAT 5AT-RoqX4J팉G(̜V 'T嵲4nk赸}!疞 _0 Iz&~; >QB,eϧ&`fċ@"MM,Jg&1 fK$& G&DM8 ^&=x6rK4Z% dŷzމA:]4Ƅϗ+05K2*ʆZP?%f3ps L1)vSbϏ4{; š[u3s if0; O[=sSqdsS46}YpA] T?744~%\i(؂ baտa /U_Heѳ=̹~a 7)Je'C )R3.T`zn/nky89&WE*ˎBJb4Lf%*ȝqŞ7"$;'1Z>G?º x굎fϾlN?ӕoƦD>x%`Nj\Ţf}: [8oqt=`r#H9G+s%`zLZ\J ǀisKφg ksI?oh0  v2ts oT ,zٳ_š#8Xsooh0  v2t<19$zS9*%F0g5GY$LQp0F php 8$|=pi!a1DN}!ƛjT=KY%` k3bHL"` ;0&}!ΛjT=KYş3š#h(Lt &Bč7xYbd07(` 7NLĦ   ;0 p ǀisK׳?Ŏ˰IŒY_"` ;06}!ƛjT=K҂ ~QfMqzr0 'G`ӘYooT{ Fs6}!ƛj d3  `6e'#LQ:>H}Eվʢg=h,5߆c-80ބ_I$(dɉ&o¦1*) Oh0w]NF; 3@d_Z}!E#{V S;Ɔ1Jflcpd۪AY#lNFo'}!ʛjT=KYşS_}"/ٯ>&{ v2  GoRY,1g0 Pl^?lNFRQr_^0Zd_(Ѽ/Ybd*L<:`?.#YlGYp6սc@dbG8b d4 ,zٳ gw\<%ٯ=Rp=6SFs|G1zwT 'dx?{<f0`zfd; Bf``m Sc420'ȸK\Le40'kazw _dh0>&S=`UbG06Nڌ!xJL#alkÿL72|;Lg}h;ZaX~7԰4cXlP.<4>JƉ區5 pby#+M㰫&9g0`N־À/)`+|_tw ̝E7b4^┯>ۦ&cg K B} =; ~l>Lw/: _h0Ǧq81|&jMOiL%F}!>`FU~q7i|,0797`v&$؛.F .T=K@9߇){ұ}nsc̍p$86 ɾPhy_HeѳȞU88.}~{E0V4"aH򆰍 ~þPxI_HeѳȞUX|(ooh07$a5G -B*%Flp7\\ր)0 _T ,zٳLKDTƚ#0E A9/ _T ,zٳ?F c`i}Xs'?h0DžPj_HeѳȞ--v 5G9+`9|s. S 0 k!ڠE#{6iyh8.`.G”m0dMϒP0GF yC. S0넰/ިRY,1g9/8߇7JE0}!e_HeѳȞ 8``^"̙c`$LQ~0\4@h&kzg!a1(`_(Q/Ybd* O"`k H3-"dH_cB/4&ZT6ј>r8P06s k!ڠڧmE#{ K0|D\"Rp`kgaa3D"1pY׋?԰.`?+$(i&\p2JIi@$ΉwymԌT韆C%gs&1 g_ؗCLZ6agIy6iyəpI I1(ڼTtJ)GVw^1fIx4Da sSz Kŀ̸@:PR'# ј66PT 71Wk'h0ֹi "L[pu-$Ҍ+HDinmX0]γQxy`-υ|4@/Ybd*s_u?9>-#pdpƑ4n8k^Aji_>rpUZ8eߦHǫ+ ]nDYP_~r7p05G`N -8 Iyª{#aj+"Č>LIhl:ʆYиa$̌t2-#4^MLTEǒ{N< /Q~&`Bj}!E#{6iyə29:GI8hӗ_y*i8*'҄H{/{b]fԾ)D"D*JS1N-;gbY6%s@\8ys9߫00j ''cSkHk>٩8ZT D?yf27qijAq^- q8Xj3MOEn$; I(,?y,Տ-7œ8ccѬϭyXυ({?.`¾B*gـ%gNChOOf> yF0sҀy).WFVfĵLBs8rAC3엊]"qε_8{nr{ꛑ^|oYy#M{d%dn2oƽEɭ88.W׎B׋̸w RQ6$F R `fԻ#G=I钄[м?Q܂- o1Fd_HҮ/YzȞU9/:ߟQ(m]#Q,?Wf%L{2^DJX,oHkTX@Œ*YrOTy=s,e}p/Kv3?%$856ƚpm8,K4D d ??3$&#g'$:c'(Pֹ)x@θ AH '&`@SZvJߟYм^ ~gi !0 ka_}!EҳFl3'G*s2_jGࣜ`F0G$H(Y)8>!o_((幟Ï47#--u=mw1ά옘G ˿H05GaߠXt،"&i7JqGދF-1@^`Է`J N&-UvFdLX2#>Q,>A1oQ~&`/ľ}!EҳFliAs0|rE ӓyg9xş_$xlaBل0\o­q*d`vzCu' wݯE"I ޸Ҍxj&|7 I$L i^ B ,z5g_p=F 84X􆋀9%`9K$a06}!4 ,z5gB %`|h;~3b`n SB>`¾B*gY%`~}gO^jAūOo0_}"rF9̉'a!&B iRY,=kd0 0/ {d0gǑ0E }6}!4 ,z5g~şyC9+)!0 k %jd_Heѳ=p4D47_}s) /a_Heѳ=[Zlks$dxJ9̈!a5G<LZB*%F0F yC9=)  B*%Flp7R8ւSi$81%'v4@/Ybd*c86}rbJ$6.Dۑ '$?Wy3L}!E#{6cnsa f̻X76.Mcq^  B*%F!~Q%l>8=9FGO +dh0ۺ]wWu6. yNO6ch.i#0\4@h&kzg > .ǶqQekf|.ic-80ބ_I$(dɉ&o¦1*p]d_TEվʢg=[Zl7[?kyl |]W l];Ć1Jׄflcy-}F /T=KY%`~0ek`g%0:F1ߏpT񃋀9-6,~f˒>&Oh0"RY,1g Nv;fw>&#À0`/D4 ,zٳJD@v۝Tƻ%NmX}_1yBɾPFʢg=ˀ 0`/TѼ/Ybd*Sq}!O'0z70 o:`_h뀩ܡMQc>`F U3ݽ?>&#À0`r'i`z7 3:`?."`uKv'ڰ~Nc6SFqC1z70 o0` =I?0 u{ dwɟ>&#À0`/ Cic4:o0`L` A08|Lлn|Sqs[\Lmp0;0 'i`z70 o0` | >ht]P7`$g NU1shdr_Փ}'ڰdMߪsM_ I?0 U dws n}LF3aO1h0?FCfpa '8IлnO@vNc22.('0-~K->FG K {B} 70CrGSqVI48y41z7 p0Qq0G%{Gwx ]?=/l:ܯkox|~7ݯ7tx7ny4ntfƾAZLF,[3o}xk| 7ftܵyuW+`o_]?iXe-Cba ſ _W+cZ{lRvz}xIw }.gvPq̣;F`PQ:?9TGwo0z,A/o:Q[wkwU [O9;hEq~}-)7&w!Ad(OJcx#|5(Gnn̒=OكL .~ 6?V hStspn€I3;hП:3H׹zKdrSZ?Y޾kv,'AdOJ}cAi0w,nnLSg2`zonVo@,fnc:{,=QY~I 0{ So:t\}tnl &,Hiq^/) Q&=0]z7(#`ҟzS9-ono[*Y-|T U)`ZoֵGoKT#K]Dډƈ=/7%`z ާtXU{ޭU7`4Os T%ѻ87sxx ,4/\AE;q\(&D>+XT-;yվ辭ѤV$DLHlv+^:_|Ln4JCӌheݗ#_\]17{躴XT&=0]z7(--~{;hП:3H9ߋUF\6r?zɸ0tukئYK+S.Fdk*lY~AGFlΘ_f \4 ֽ[4G5EX$ MIc}?rRy%c(:sc-QXa_ǰt2f%cE(~?vy2F czLG]9*KGn{7&? _%גYX@Unde9ӱ=^~Q/(@b,%W@0}H'mQ[C/\pNUSuqsf]>i+a,Z7z`:X5%o1a_.緙}0> ׾7 TW"1HHN@brc~=^1¾EuչX~4uMEVRs;2*c/X{xt$\p|0tX;aB*K IO3̅[ʓ,h3Ep^0¦,ڷpyE,LF^66Mؼ1o\Ź +׾NSz =O[bH-nOڙg1þ_?MpoaFuOAzC >8Ge 0  uv0ut‚fJF+>݇/*? '5gxig\~ f;7Fb'0v,=sVnc/ٸNSj Wda̺(ZG[F|Ew< @fxrH]=OJ@>A}ULfbᘰ7#1ozI _PcwݠxVIO3̒3Tc1FReyC.ś'ceD x}dlQa6Jƍu.ī+l*Ho~=Iqhm #܄&iS<,e:J6OqJ&Ӫ\#tj38q1=Gv,=Aһ]ufA\E 53Gܬj1#q,Y]s>}&(}g] wnAԏYAz[_󸾖BDf>5{85}CSMo.W3WDL>5{85}CSMon"`~Z77 S0\TװuN5 ^̹ |h NOHs]SQzl-Y-v5`zWfנA0_ !0gItonOJo?B \TV5Lz7T ـՠA ]gגYf0V<\T#HaMEG3STSa2`N)!0(Ntz}Ouѥx6x`eh cwݰ dZřn-]h+W}gWvEx1xVxceopux;up4 AM Qi[eq 0_  u95chzL?Lu}oJLy~x{s~~Ov :wsx7/{#~73>x;!~鍗nV-Y0ڛxks9?&Өwk%a/+ZU}4=75)`c:zޥwY=&?u C:`__Kfaܯj#5C_Q7-pӃq>xkgn_8ze6cŽVsq-ENh=MqխϡOcb_]v| nhi[«+{ṻA;/~}0V01=ƀIһA5?T?sC6`00pZmɘ>6w|G1=NX vǍޭU-`e=i{ =(П:3%X 2U%".[F܄{k5y rt?W/ `j{1اk /cHQgz,+쌣rRYѻ5û̢~-(]ߍ˟s~ւA,&mR>^V?$*27ٚ0ha|Tvmxz b_hڗ03>^= vƮ_NJB*+7z7gm.~6vEضݟ]Ϡ]߾~ຂv50ut~08m0Y&yYKԤ^U=qFj`NnoVaŝ1r<Byl(i_涶I@ɾQ}! D5gm_m[?z:lfpk*%`rKv۝'i//Dm_H#ѳ$PYMx6<Bxf_B%gmڊ׊'Nas đ=+8e+jg4l16#$4cX ϹʐS}!;g i,z^kh+ܴmɣ5DN[;cGʬEeMcqlXXs! !ʿ'7¾:4> .Cw 0"U_HcѳS7?rǧI OaCq{?dmfoRpze HS|lyz+" /ѳ Œl{-yk?Kjy/pVŒ|7JȏY4΂+'aB&4>e)¾O} %0g//nօ>}'۞Ł:)0sO@{U uޮ˱6 3c&}LB:~(z8ctϊ(As]Af[QBkaƩ>UOpqi۲۲pښck?tžں㶢̅ 2;lq>1 3N.m, " BA=KBͳڲc0p[qŶ]S ƚqZqVVV~#~Q+`ZsXш!6lIl%T=KQ^IՀ! S9S}}!EųuF)=dwJj!$LQ{8gBCPB=>R[0|{ SB1`:}&(}! mjg)J 2uOsi$̙un`.6I110ęcL((U7ϤG؟_Lp]:. `ۅJՆ((uh3.;.;KϤGӧu,#1%cuwM7ALf\puJf+y<6IƆ~.Z!"I9u{p: PefMSytT".jqǠnχ70`REQ嵔90`9,<NϧqCqdq]VQ>^bdLIgmYw-ILƨXb`f+VlA)klX@l4 qWp0 Ĺ3sŨVXzhv3M} 3ͅrr+G0cCIDStkƵƅX4|1n| ){I_GU̪,DDDuXzB/ gr*ч"{ @vþG}d|뀖7T%VD=6({ 9aṂ∦58~_9aAģ(mܜcuQd=,DDDuR0'U;y(]s{glK[ ǖġ&; Rs5g0}istCz`;4qGS7{4uC}gGL[5sbWEU̪,DDDuE f#Ekn~efOi~h^S\1_^sq²GB2.wB'ݐT)Am>hX s[ʆGP<?=_U &UE Ekn~Ve tkƸ5n,jW]7.mE]85YGLnM1Y;?YI&#BWƭAcLhk##=I{l.L"""2`Nv2BwOӲܰ ?:A\ XY#vrE*gZ`Xn:a ^![ =8!|MKkk;y.QY1~o[ ^e#?91e冡-a1T#z|.[]ѳAh{Go[iX3rO &Qfт׸F*W7[\./nmcƸ@0s]Ê4dzxzNhXr+7Cݔ׫zlc s†%f'+?!;q{ Xy`Q)sB*@lɍ֬(3|+FuqªEpk3:Ȯhz\|x49J熭Ѯ ٣E\czha?^z vAO7{j~ eWLL̪̏,DDDuE f[d |18B룓bCCCĸ.tQ3=jPuSn]qǴָ(nrruD *s ,'mꚙk̢3ar=_GS;{F8!nͫN !aeCZ6u@:͟HeN8p6vhW^vZNr=l+V fws^lP+k`~j ,DDDTeJ_d εFZW.z S]qkRVk]uwFzhrֺ\3NN{43.FުBr{za8 tV f@y,DYhr\N8Xw P<\+ݜw])'D&vg*9>OCĒ2 wV .r~ [ R0gN8[V &UR0U;D6s΂.r`g uqp=o5Q)tnpwDΚ2Zp2^Y݆:㷲ÚۡZg$7^r0gS x=; #nu6:{$,+NЭh7'1޴FGs;zvq5˜ӹ)igqZu~j(,DDDTe-ZL"""2`Xd ƉQ fL"":,=pj"' wV3-7ptI{X00`vN{ZKq&{X00K̢pt#~[YgjK~[YG:`W,DDDT5*8>g: w#~_i0}~[iE5{X00`H~?~M| ;Jn,V0S~_a[p%G9l|X00KuǙPyx9Y p*{ &QL3>61,DDDu 潅ja$""X0-,U &QQL3; 61,DDDu 潅ja$""zF1UL-,DDDuR0G2ULc`V-,DDDu 潅ja$""ÔR9 1qz|n25gee$""lU0#c0kAϽ`f;0L"""[ѳ7b/7:Sٸ[ςIDDD֣5/m*v-י̎EҦZ}`a*@=v-flU'=ptXGǶ#EӜ_+ܞx{ǟy{`t!``W<"qDӗĈjiV߾,DDDu- Åx}?+o4~wƣkNޘg.=иU/Ay;8uBTY{;xƔDÓ<Lȟ>>9S3,3]5 I;dWi%:c_HqNIm'e~0L^;E䒈4eGHԯ3Ew TDvLÚE &zٲ`2 ,]3H^nxBlADieD/l;qB7TzlEc0.h{Z?D^x#D|o G,-^jU/{aȻn''`g\ x_C[O,?A'%xNMɏسG#g3ޅ9z$kv/( H˟y?nP[3cw cٛn<_.b|˽GEm8^2j| iXm}^D""MˤW`Se`ٚR0xTzX4߶wk!g!j8shWXmkb–&ݱ, IDAT޴zGӰb_֯ţya6&NnomڅzE#P6DFo DEKnxw/+VE{"]1$EדR0}Y0gRܩ>EWbfש=S|'iy{36ksMčJ;k7Y?Eͻ7@Dv4"GD6RNH+XDDDdLLtv8Qnڋ< "OD^4WՂyEDH.ẘթ2 "W["BD(ey." "]r`*"DJveL9oN#)rkwUᇶ1wW>RŸMyu6o`hU$AF5ebA0BwecņH> aϠ[s溷G%"@>R\Cl2s*ݖgsp~nzb|xMN[%׈T9[rХ*9A`e 6ExWur, Nl[WV0Ddd=\^+GQMY*Jh~=VDDd7Q "b=饼޼5~cM1;4@FMi]EĭmUi-1s˷0,%w|gо_-QUwSOzABy|Z?wQtaIZ< eZOJeɉ:i'6bI=gKn*4)?Mfeuco/4֬{ΦpgongE΢_E)H" (O0(E>AUQ&:yxC<^{+D1Oll^."Z#o"Yf\D(eTDE丈D OS]c˂E͓.=CI/ܮ /:m^k.F.aɈNhI"KDZ++w!bUĄvǑ)| ~/b7LW1={EPh$33je*e8௞Dn?.O<'̇̀< 9؜?IEq^㩒-_Gg*+E)dOI;]rZ;̰egQJ2)_'(3ͯQVQot0oty-LD)R4#")"+"ӛm_)7QN?Srjn(%5D- f >{t gRKwJL9In.9Qieyg)I=fnђ!imN(7e?yX2_)QzhݭP;Q\K{= r;Oy-Lj(t\I唸Q`j+Jyl]r*ʥ"ʜn"]lY0cvY4}۹c@.֮'کݬ /z|f+ع|aɿba7:qQ9١?DyXG1| >I1 =pKǺ<jkv t1;uZ=SƳ!|Ѣ;dñ(c5X2^Ӥ$Ɗr]Z;&ZDr#Cܾ""rB^GTU#Xїbabv^J/1"o*JrH\WVDvRrjqQvD)?-KjTR\D(,M|?SDd(Q2>yH(ED^qFu})*YE)Cj<@= Je]7x<ʌC cu/3E) k$_K ~ _(z JuT5Ube-JezwqJN *ww`rDk8HDf^Yl$JD;ۘ'ƪ[/gX0kquttR 7BÜ=#1Qg$Bv GѧdR AYHzxtQk Gt{0&,VÄU9,={KtpHK ZֶwW 8*rB^S{5>g$IoexY*P_U5.ƈ>ǁǠ]?w| O=-ow|Y._1,Ɣ#PwτwQQhcỏ/OCn/asInzIh#SW>Շ@wUU [u@FB޷kzwg]'?ܠ|؁%q'& :<1~ŲvPftUk jJcKTCZc:tU,")}7)u!ܾn1@J=+5wlׂi(.R(i"R DT^o&302y|6b?ObxxxY܂rŀ1wxEϿvCyPw+:2sǏ{ ?O=STx荗_:w/<WE?*뭗SV/7~̤gmoQzӛϊr#Se .EySIg7ED)j؟8Q9EyUe(RZ0;vUU>Zr Qn,ꝧN} MŴu*d1W8~2aax+>.>moQGNv~GG4ۋr7u(GbDy(r@GU6;?'rX5GO݋̤>/dj4&?(=iŐbnadcZt&,?1m/菌^3lod$c{ fL ݉kr̨;;Ǫxyz򷏉m2z[Yc):ѳ0fIW`j YQtjÞHU]\Q]{=j`"Ƕѳ0*t+Feb\x6Ƈ0̸lYvnYqrhn+;#3[DDDdc{d~WY77^TMŨYIXnU\xz~o}l,[R5^0$*Wf|WyzDD$3\Ws$ͼ`δ<=(1|Y2= ΙIO2EA fe2K"""*'=W18=SA*,|2e F>E|4U(3\cB[5,{ϵKDDD( ?GoA fuIFb<5NDDDJO&3Ϲ۾)`n'!.䎂byU+3l{[DDDL>39H̃L%~SdzR6W!ܳ}fbƾg3LWM巌AQ \}Q*k}m^pl\0GVroCDDDث{fb ƾؼ`\n/ 2KO""^^I̤ީJ~qon[{K fՊoq}.l$'72zJ7"""cM2{L3;3!3JzeIrsItSu߫$#̤^g3{mL3;#4qe Jcgy"""" &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &Y &t*q$Pg$"""{?G*L"""" L""""(L"""" k2?CwE`E`E`E`E`E`E`E`E`}IjҤtSTrigc畈@^!x CM?-ޙj_yq7eޜ{$$,ߋ1d5\6k>`BUہ6^""""EzgioˆY'dN#̭*g߉bl{K7fL[Kx}H鬗2vr/;{i ^ZCCW~Ųu컈2~ I+b:hMlDDDDda*MJW ӅCE[3qMHsWiRK|uƹ^DDDDd!FYwÁ7j<!!~!i9~!il)BSk=<4m_q vn?R9$a6'O1Dh7d84$?lwެSsW<?p]B r .;|Ӯ] I GI,3PgME۲ 'NJVe{.ߘU,J>V2aF_G6,`&Pa%>~Np*&+ I NK FDDDDdiX1z)ҍ'n}Mm}X>}.j*PF`1Gsb>:)$"""!)]Z㌡37Z}ɨy~Zڌkϊ9\n8mG !疷xCIjmJDDDd㋾A8/s\Y,RaKVst DDDDug@jwoЙCb+|PVO 2㌈SM7 N4l•SH,dmġ>ŶDDDDV5O8Szc S:۞⼵i^~!m;á2;HoQgODDDdQ>~i /A0Dm=o骭""""Ij3Fn9L gPi%|;A̒ML5Iml/U@MRW~5FQi%|\ jE~ 6z`QEFޫ2'o@DDDd*q.0\el6wɉPk=ߙ+L'41>:S""""q_w l>y5\1 ѩQHEn S""""/] j:}۹.\6|yGgkODDDdqQLɀ%Qۯ&e;tqW݋#'hMtYXi;"ǖ1,68k}Qۯ`Ro}yzyw+~.Ye'r'ȸ۞j̿sr}7"^B5/ڹZ6,LMO:itWF]0leDAɤmX ~K1ke Bkxi4K}A베 Sv'"""*oHZ̐9وz :Le~hoA2Ua2"6(/ ?-¢-zީzIDAT$.FFGE Bkn|I]YxnOztgo)֚zYg@jwo)7$-_%)gf%.xej6V,zbAbӳ2fFeo5&[ow""""S;6ExiMW} ~ [[wKRbKIos\zexJMqq{]xnO2GY F^5/;zY*0Zc2|ւctb/O9K񶞘 /:i:4쌁<"/5yފyq[1o'4|n4XHG8x]xNϒZYV];M,DDDTk{kfm) 9i-^m]DZdr:{r2ԭk!1sy,wEYf{kl퉈,# Θ?cM&.05a"oa""""Q;{LO`SY tBE[ DDDDsfaqa7sKQUÖ)kA1Tk u/Zk<3mҚf{j #c'"""*C5^1֙4Y[~]T*~35QKku؜]+c:֜~So4^DDDDThiZc>|VչT&<  jqZ=<4֘?d1K2M3n -_їX== ^^ZCX0lΞы30~E6&jƇ` ^֚:YU`J?Ezg[""""tc{4K
Compute Node 1
[Not supported by viewer]
Network Node
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Interface 3
(unnumbered)
[Not supported by viewer]
Internet
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
VLAN network
[Not supported by viewer]
External  Network
203.0.113.0/24
[Not supported by viewer]
Router
SNAT/DNAT
[Not supported by viewer]
Switch
[Not supported by viewer]
DHCP Service
[Not supported by viewer]
Instance
[Not supported by viewer]
Macvtap
[Not supported by viewer]
Compute Node 2
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Instance
[Not supported by viewer]
Macvtap
[Not supported by viewer]
Compute Node X
[Not supported by viewer]
Interface 2
(unnumbered)
[Not supported by viewer]
Instance
[Not supported by viewer]
Macvtap
[Not supported by viewer]
General Architecture
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/fwaas-v2-scenario.rst0000644000175000017500000000735300000000000024271 0ustar00coreycorey00000000000000Firewall-as-a-Service (FWaaS) v2 scenario ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enable FWaaS v2 --------------- #. Enable the FWaaS plug-in in the ``/etc/neutron/neutron.conf`` file: .. code-block:: ini service_plugins = firewall_v2 [service_providers] # ... service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default [fwaas] agent_version = v2 driver = neutron_fwaas.services.firewall.service_drivers.agents.drivers.linux.iptables_fwaas_v2.IptablesFwaasDriver enabled = True .. note:: On Ubuntu and Centos, modify the ``[fwaas]`` section in the ``/etc/neutron/fwaas_driver.ini`` file instead of ``/etc/neutron/neutron.conf``. #. Configure the FWaaS plugin for the L3 agent. In the ``AGENT`` section of ``l3_agent.ini``, make sure the FWaaS v2 extension is loaded: .. code-block:: ini [AGENT] extensions = fwaas_v2 #. Configure the ML2 plugin agent extension. Add the following statements to ``ml2_conf.ini``, this file is usually located at ``/etc/neutron/plugins/ml2/ml2_conf.ini``: .. code-block:: ini [agent] extensions = fwaas_v2 [fwaas] firewall_l2_driver = noop #. Create the required tables in the database: .. code-block:: console # neutron-db-manage --subproject neutron-fwaas upgrade head #. Restart the ``neutron-l3-agent``, ``neutron-openvswitch-agent`` and ``neutron-server`` services to apply the settings. .. note:: Firewall v2 is not supported by horizon yet. Configure Firewall-as-a-Service v2 ---------------------------------- Create the firewall rules and create a policy that contains them. Then, create a firewall that applies the policy. #. Create a firewall rule: .. code-block:: console $ openstack firewall group rule create --protocol {tcp,udp,icmp,any} \ --source-ip-address SOURCE_IP_ADDRESS \ --destination-ip-address DESTINATION_IP_ADDRESS \ --source-port SOURCE_PORT_RANGE --destination-port DEST_PORT_RANGE \ --action {allow,deny,reject} The Networking client requires a protocol value. If the rule is protocol agnostic, you can use the ``any`` value. .. note:: When the source or destination IP address are not of the same IP version (for example, IPv6), the command returns an error. #. Create a firewall policy: .. code-block:: console $ openstack firewall group policy create --firewall-rule \ "FIREWALL_RULE_IDS_OR_NAMES" myfirewallpolicy Separate firewall rule IDs or names with spaces. The order in which you specify the rules is important. You can create a firewall policy without any rules and add rules later, as follows: * To add multiple rules, use the update operation. * To add a single rule, use the insert-rule operation. For more details, see `Networking command-line client `_ in the OpenStack Command-Line Interface Reference. .. note:: FWaaS always adds a default ``deny all`` rule at the lowest precedence of each policy. Consequently, a firewall policy with no rules blocks all traffic by default. #. Create a firewall group: .. code-block:: console $ openstack firewall group create --ingress-firewall-policy \ "FIREWALL_POLICY_IDS_OR_NAMES" --egress-firewall-policy \ "FIREWALL_POLICY_IDS_OR_NAMES" --port "PORT_IDS_OR_NAMES" Separate firewall policy IDs or names with spaces. The direction in which you specify the policies is important. .. note:: The firewall remains in PENDING\_CREATE state until you create a Networking router and attach an interface to it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/fwaas.rst0000644000175000017500000000457000000000000022141 0ustar00coreycorey00000000000000Firewall-as-a-Service (FWaaS) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Firewall-as-a-Service (FWaaS) plug-in applies firewalls to OpenStack objects such as projects, routers, and router ports. The central concepts with OpenStack firewalls are the notions of a firewall policy and a firewall rule. A policy is an ordered collection of rules. A rule specifies a collection of attributes (such as port ranges, protocol, and IP addresses) that constitute match criteria and an action to take (allow or deny) on matched traffic. A policy can be made public, so it can be shared across projects. Firewalls are implemented in various ways, depending on the driver used. For example, an iptables driver implements firewalls using iptable rules. An OpenVSwitch driver implements firewall rules using flow entries in flow tables. A Cisco firewall driver manipulates NSX devices. FWaaS v2 -------- The newer FWaaS implementation, v2, provides a much more granular service. The notion of a firewall has been replaced with firewall group to indicate that a firewall consists of two policies: an ingress policy and an egress policy. A firewall group is applied not at the router level (all ports on a router) but at the port level. Currently, router ports can be specified. For Ocata, VM ports can also be specified. FWaaS v1 -------- FWaaS v1 was deprecated in the Newton cycle and removed entirely in the Stein cycle. FWaaS Feature Matrix --------------------- The following table shows FWaaS v2 features. +------------------------------------------+-----------+ | Feature | Supported | +==========================================+===========+ | Supports L3 firewalling for routers | NO* | +------------------------------------------+-----------+ | Supports L3 firewalling for router ports | YES | +------------------------------------------+-----------+ | Supports L2 firewalling (VM ports) | YES | +------------------------------------------+-----------+ | CLI support | YES | +------------------------------------------+-----------+ | Horizon support | NO | +------------------------------------------+-----------+ \* A firewall group can be applied to all ports on a given router in order to effect this. For further information, see the `FWaaS v2 configuration guide <./fwaas-v2-scenario.html>`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/index.rst0000644000175000017500000000076500000000000022151 0ustar00coreycorey00000000000000.. meta:: :description: This guide targets OpenStack administrators seeking to deploy and manage OpenStack Networking (neutron). :keywords: neutron, networking, OpenStack ========================== OpenStack Networking Guide ========================== This guide targets OpenStack administrators seeking to deploy and manage OpenStack Networking (neutron). .. toctree:: :maxdepth: 2 intro config deploy ops migration misc ovn/index archives/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-basic-networking.rst0000644000175000017500000005541200000000000025440 0ustar00coreycorey00000000000000.. _intro-basic-networking: ================ Basic networking ================ Ethernet ~~~~~~~~ Ethernet is a networking protocol, specified by the IEEE 802.3 standard. Most wired network interface cards (NICs) communicate using Ethernet. In the `OSI model `_ of networking protocols, Ethernet occupies the second layer, which is known as the data link layer. When discussing Ethernet, you will often hear terms such as *local network*, *layer 2*, *L2*, *link layer* and *data link layer*. In an Ethernet network, the hosts connected to the network communicate by exchanging *frames*. Every host on an Ethernet network is uniquely identified by an address called the media access control (MAC) address. In particular, every virtual machine instance in an OpenStack environment has a unique MAC address, which is different from the MAC address of the compute host. A MAC address has 48 bits and is typically represented as a hexadecimal string, such as ``08:00:27:b9:88:74``. The MAC address is hard-coded into the NIC by the manufacturer, although modern NICs allow you to change the MAC address programmatically. In Linux, you can retrieve the MAC address of a NIC using the :command:`ip` command: .. code-block:: console $ ip link show eth0 2: eth0: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 link/ether 08:00:27:b9:88:74 brd ff:ff:ff:ff:ff:ff Conceptually, you can think of an Ethernet network as a single bus that each of the network hosts connects to. In early implementations, an Ethernet network consisted of a single coaxial cable that hosts would tap into to connect to the network. However, network hosts in modern Ethernet networks connect directly to a network device called a *switch*. Still, this conceptual model is useful, and in network diagrams (including those generated by the OpenStack dashboard) an Ethernet network is often depicted as if it was a single bus. You'll sometimes hear an Ethernet network referred to as a *layer 2 segment*. In an Ethernet network, every host on the network can send a frame directly to every other host. An Ethernet network also supports broadcasts so that one host can send a frame to every host on the network by sending to the special MAC address ``ff:ff:ff:ff:ff:ff``. ARP_ and DHCP_ are two notable protocols that use Ethernet broadcasts. Because Ethernet networks support broadcasts, you will sometimes hear an Ethernet network referred to as a *broadcast domain*. When a NIC receives an Ethernet frame, by default the NIC checks to see if the destination MAC address matches the address of the NIC (or the broadcast address), and the Ethernet frame is discarded if the MAC address does not match. For a compute host, this behavior is undesirable because the frame may be intended for one of the instances. NICs can be configured for *promiscuous mode*, where they pass all Ethernet frames to the operating system, even if the MAC address does not match. Compute hosts should always have the appropriate NICs configured for promiscuous mode. As mentioned earlier, modern Ethernet networks use switches to interconnect the network hosts. A switch is a box of networking hardware with a large number of ports that forward Ethernet frames from one connected host to another. When hosts first send frames over the switch, the switch doesn't know which MAC address is associated with which port. If an Ethernet frame is destined for an unknown MAC address, the switch broadcasts the frame to all ports. The switch learns which MAC addresses are at which ports by observing the traffic. Once it knows which MAC address is associated with a port, it can send Ethernet frames to the correct port instead of broadcasting. The switch maintains the mappings of MAC addresses to switch ports in a table called a *forwarding table* or *forwarding information base* (FIB). Switches can be daisy-chained together, and the resulting connection of switches and hosts behaves like a single network. VLANs ~~~~~ VLAN is a networking technology that enables a single switch to act as if it was multiple independent switches. Specifically, two hosts that are connected to the same switch but on different VLANs do not see each other's traffic. OpenStack is able to take advantage of VLANs to isolate the traffic of different projects, even if the projects happen to have instances running on the same compute host. Each VLAN has an associated numerical ID, between 1 and 4095. We say "VLAN 15" to refer to the VLAN with a numerical ID of 15. To understand how VLANs work, let's consider VLAN applications in a traditional IT environment, where physical hosts are attached to a physical switch, and no virtualization is involved. Imagine a scenario where you want three isolated networks but you only have a single physical switch. The network administrator would choose three VLAN IDs, for example, 10, 11, and 12, and would configure the switch to associate switchports with VLAN IDs. For example, switchport 2 might be associated with VLAN 10, switchport 3 might be associated with VLAN 11, and so forth. When a switchport is configured for a specific VLAN, it is called an *access port*. The switch is responsible for ensuring that the network traffic is isolated across the VLANs. Now consider the scenario that all of the switchports in the first switch become occupied, and so the organization buys a second switch and connects it to the first switch to expand the available number of switchports. The second switch is also configured to support VLAN IDs 10, 11, and 12. Now imagine host A connected to switch 1 on a port configured for VLAN ID 10 sends an Ethernet frame intended for host B connected to switch 2 on a port configured for VLAN ID 10. When switch 1 forwards the Ethernet frame to switch 2, it must communicate that the frame is associated with VLAN ID 10. If two switches are to be connected together, and the switches are configured for VLANs, then the switchports used for cross-connecting the switches must be configured to allow Ethernet frames from any VLAN to be forwarded to the other switch. In addition, the sending switch must tag each Ethernet frame with the VLAN ID so that the receiving switch can ensure that only hosts on the matching VLAN are eligible to receive the frame. A switchport that is configured to pass frames from all VLANs and tag them with the VLAN IDs is called a *trunk port*. IEEE 802.1Q is the network standard that describes how VLAN tags are encoded in Ethernet frames when trunking is being used. Note that if you are using VLANs on your physical switches to implement project isolation in your OpenStack cloud, you must ensure that all of your switchports are configured as trunk ports. It is important that you select a VLAN range not being used by your current network infrastructure. For example, if you estimate that your cloud must support a maximum of 100 projects, pick a VLAN range outside of that value, such as VLAN 200–299. OpenStack, and all physical network infrastructure that handles project networks, must then support this VLAN range. Trunking is used to connect between different switches. Each trunk uses a tag to identify which VLAN is in use. This ensures that switches on the same VLAN can communicate. .. _ARP: Subnets and ARP ~~~~~~~~~~~~~~~ While NICs use MAC addresses to address network hosts, TCP/IP applications use IP addresses. The Address Resolution Protocol (ARP) bridges the gap between Ethernet and IP by translating IP addresses into MAC addresses. IP addresses are broken up into two parts: a *network number* and a *host identifier*. Two hosts are on the same *subnet* if they have the same network number. Recall that two hosts can only communicate directly over Ethernet if they are on the same local network. ARP assumes that all machines that are in the same subnet are on the same local network. Network administrators must take care when assigning IP addresses and netmasks to hosts so that any two hosts that are in the same subnet are on the same local network, otherwise ARP does not work properly. To calculate the network number of an IP address, you must know the *netmask* associated with the address. A netmask indicates how many of the bits in the 32-bit IP address make up the network number. There are two syntaxes for expressing a netmask: * dotted quad * classless inter-domain routing (CIDR) Consider an IP address of 192.0.2.5, where the first 24 bits of the address are the network number. In dotted quad notation, the netmask would be written as ``255.255.255.0``. CIDR notation includes both the IP address and netmask, and this example would be written as ``192.0.2.5/24``. .. note:: Creating CIDR subnets including a multicast address or a loopback address cannot be used in an OpenStack environment. For example, creating a subnet using ``224.0.0.0/16`` or ``127.0.1.0/24`` is not supported. Sometimes we want to refer to a subnet, but not any particular IP address on the subnet. A common convention is to set the host identifier to all zeros to make reference to a subnet. For example, if a host's IP address is ``192.0.2.24/24``, then we would say the subnet is ``192.0.2.0/24``. To understand how ARP translates IP addresses to MAC addresses, consider the following example. Assume host *A* has an IP address of ``192.0.2.5/24`` and a MAC address of ``fc:99:47:49:d4:a0``, and wants to send a packet to host *B* with an IP address of ``192.0.2.7``. Note that the network number is the same for both hosts, so host *A* is able to send frames directly to host *B*. The first time host *A* attempts to communicate with host *B*, the destination MAC address is not known. Host *A* makes an ARP request to the local network. The request is a broadcast with a message like this: *To: everybody (ff:ff:ff:ff:ff:ff). I am looking for the computer who has IP address 192.0.2.7. Signed: MAC address fc:99:47:49:d4:a0*. Host *B* responds with a response like this: *To: fc:99:47:49:d4:a0. I have IP address 192.0.2.7. Signed: MAC address 54:78:1a:86:00:a5.* Host *A* then sends Ethernet frames to host *B*. You can initiate an ARP request manually using the :command:`arping` command. For example, to send an ARP request to IP address ``192.0.2.132``: .. code-block:: console $ arping -I eth0 192.0.2.132 ARPING 192.0.2.132 from 192.0.2.131 eth0 Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.670ms Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.722ms Unicast reply from 192.0.2.132 [54:78:1A:86:1C:0B] 0.723ms Sent 3 probes (1 broadcast(s)) Received 3 response(s) To reduce the number of ARP requests, operating systems maintain an ARP cache that contains the mappings of IP addresses to MAC address. On a Linux machine, you can view the contents of the ARP cache by using the :command:`arp` command: .. code-block:: console $ arp -n Address HWtype HWaddress Flags Mask Iface 192.0.2.3 ether 52:54:00:12:35:03 C eth0 192.0.2.2 ether 52:54:00:12:35:02 C eth0 .. _DHCP: DHCP ~~~~ Hosts connected to a network use the Dynamic Host Configuration Protocol (DHCP) to dynamically obtain IP addresses. A DHCP server hands out the IP addresses to network hosts, which are the DHCP clients. DHCP clients locate the DHCP server by sending a UDP_ packet from port 68 to address ``255.255.255.255`` on port 67. Address ``255.255.255.255`` is the local network broadcast address: all hosts on the local network see the UDP packets sent to this address. However, such packets are not forwarded to other networks. Consequently, the DHCP server must be on the same local network as the client, or the server will not receive the broadcast. The DHCP server responds by sending a UDP packet from port 67 to port 68 on the client. The exchange looks like this: 1. The client sends a discover ("I'm a client at MAC address ``08:00:27:b9:88:74``, I need an IP address") 2. The server sends an offer ("OK ``08:00:27:b9:88:74``, I'm offering IP address ``192.0.2.112``") 3. The client sends a request ("Server ``192.0.2.131``, I would like to have IP ``192.0.2.112``") 4. The server sends an acknowledgement ("OK ``08:00:27:b9:88:74``, IP ``192.0.2.112`` is yours") OpenStack uses a third-party program called `dnsmasq `_ to implement the DHCP server. Dnsmasq writes to the syslog, where you can observe the DHCP request and replies:: Apr 23 15:53:46 c100-1 dhcpd: DHCPDISCOVER from 08:00:27:b9:88:74 via eth2 Apr 23 15:53:46 c100-1 dhcpd: DHCPOFFER on 192.0.2.112 to 08:00:27:b9:88:74 via eth2 Apr 23 15:53:48 c100-1 dhcpd: DHCPREQUEST for 192.0.2.112 (192.0.2.131) from 08:00:27:b9:88:74 via eth2 Apr 23 15:53:48 c100-1 dhcpd: DHCPACK on 192.0.2.112 to 08:00:27:b9:88:74 via eth2 When troubleshooting an instance that is not reachable over the network, it can be helpful to examine this log to verify that all four steps of the DHCP protocol were carried out for the instance in question. IP ~~ The Internet Protocol (IP) specifies how to route packets between hosts that are connected to different local networks. IP relies on special network hosts called *routers* or *gateways*. A router is a host that is connected to at least two local networks and can forward IP packets from one local network to another. A router has multiple IP addresses: one for each of the networks it is connected to. In the OSI model of networking protocols IP occupies the third layer, known as the network layer. When discussing IP, you will often hear terms such as *layer 3*, *L3*, and *network layer*. A host sending a packet to an IP address consults its *routing table* to determine which machine on the local network(s) the packet should be sent to. The routing table maintains a list of the subnets associated with each local network that the host is directly connected to, as well as a list of routers that are on these local networks. On a Linux machine, any of the following commands displays the routing table: .. code-block:: console $ ip route show $ route -n $ netstat -rn Here is an example of output from :command:`ip route show`: .. code-block:: console $ ip route show default via 192.0.2.2 dev eth0 192.0.2.0/24 dev eth0 proto kernel scope link src 192.0.2.15 198.51.100.0/25 dev eth1 proto kernel scope link src 198.51.100.100 198.51.100.192/26 dev virbr0 proto kernel scope link src 198.51.100.193 Line 1 of the output specifies the location of the default route, which is the effective routing rule if none of the other rules match. The router associated with the default route (``192.0.2.2`` in the example above) is sometimes referred to as the *default gateway*. A DHCP_ server typically transmits the IP address of the default gateway to the DHCP client along with the client's IP address and a netmask. Line 2 of the output specifies that IPs in the ``192.0.2.0/24`` subnet are on the local network associated with the network interface eth0. Line 3 of the output specifies that IPs in the ``198.51.100.0/25`` subnet are on the local network associated with the network interface eth1. Line 4 of the output specifies that IPs in the ``198.51.100.192/26`` subnet are on the local network associated with the network interface virbr0. The output of the :command:`route -n` and :command:`netstat -rn` commands are formatted in a slightly different way. This example shows how the same routes would be formatted using these commands: .. code-block:: console $ route -n Kernel IP routing table Destination Gateway Genmask Flags MSS Window irtt Iface 0.0.0.0 192.0.2.2 0.0.0.0 UG 0 0 0 eth0 192.0.2.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 198.51.100.0 0.0.0.0 255.255.255.128 U 0 0 0 eth1 198.51.100.192 0.0.0.0 255.255.255.192 U 0 0 0 virbr0 The :command:`ip route get` command outputs the route for a destination IP address. From the below example, destination IP address ``192.0.2.14`` is on the local network of eth0 and would be sent directly: .. code-block:: console $ ip route get 192.0.2.14 192.0.2.14 dev eth0 src 192.0.2.15 The destination IP address ``203.0.113.34`` is not on any of the connected local networks and would be forwarded to the default gateway at ``192.0.2.2``: .. code-block:: console $ ip route get 203.0.113.34 203.0.113.34 via 192.0.2.2 dev eth0 src 192.0.2.15 It is common for a packet to hop across multiple routers to reach its final destination. On a Linux machine, the ``traceroute`` and more recent ``mtr`` programs prints out the IP address of each router that an IP packet traverses along its path to its destination. .. _UDP: TCP/UDP/ICMP ~~~~~~~~~~~~ For networked software applications to communicate over an IP network, they must use a protocol layered atop IP. These protocols occupy the fourth layer of the OSI model known as the *transport layer* or *layer 4*. See the `Protocol Numbers `_ web page maintained by the Internet Assigned Numbers Authority (IANA) for a list of protocols that layer atop IP and their associated numbers. The *Transmission Control Protocol* (TCP) is the most commonly used layer 4 protocol in networked applications. TCP is a *connection-oriented* protocol: it uses a client-server model where a client connects to a server, where *server* refers to the application that receives connections. The typical interaction in a TCP-based application proceeds as follows: 1. Client connects to server. 2. Client and server exchange data. 3. Client or server disconnects. Because a network host may have multiple TCP-based applications running, TCP uses an addressing scheme called *ports* to uniquely identify TCP-based applications. A TCP port is associated with a number in the range 1-65535, and only one application on a host can be associated with a TCP port at a time, a restriction that is enforced by the operating system. A TCP server is said to *listen* on a port. For example, an SSH server typically listens on port 22. For a client to connect to a server using TCP, the client must know both the IP address of a server's host and the server's TCP port. The operating system of the TCP client application automatically assigns a port number to the client. The client owns this port number until the TCP connection is terminated, after which the operating system reclaims the port number. These types of ports are referred to as *ephemeral ports*. IANA maintains a `registry of port numbers `_ for many TCP-based services, as well as services that use other layer 4 protocols that employ ports. Registering a TCP port number is not required, but registering a port number is helpful to avoid collisions with other services. See `firewalls and default ports `_ in OpenStack Installation Guide for the default TCP ports used by various services involved in an OpenStack deployment. The most common application programming interface (API) for writing TCP-based applications is called *Berkeley sockets*, also known as *BSD sockets* or, simply, *sockets*. The sockets API exposes a *stream oriented* interface for writing TCP applications. From the perspective of a programmer, sending data over a TCP connection is similar to writing a stream of bytes to a file. It is the responsibility of the operating system's TCP/IP implementation to break up the stream of data into IP packets. The operating system is also responsible for automatically retransmitting dropped packets, and for handling flow control to ensure that transmitted data does not overrun the sender's data buffers, receiver's data buffers, and network capacity. Finally, the operating system is responsible for re-assembling the packets in the correct order into a stream of data on the receiver's side. Because TCP detects and retransmits lost packets, it is said to be a *reliable* protocol. The *User Datagram Protocol* (UDP) is another layer 4 protocol that is the basis of several well-known networking protocols. UDP is a *connectionless* protocol: two applications that communicate over UDP do not need to establish a connection before exchanging data. UDP is also an *unreliable* protocol. The operating system does not attempt to retransmit or even detect lost UDP packets. The operating system also does not provide any guarantee that the receiving application sees the UDP packets in the same order that they were sent in. UDP, like TCP, uses the notion of ports to distinguish between different applications running on the same system. Note, however, that operating systems treat UDP ports separately from TCP ports. For example, it is possible for one application to be associated with TCP port 16543 and a separate application to be associated with UDP port 16543. Like TCP, the sockets API is the most common API for writing UDP-based applications. The sockets API provides a *message-oriented* interface for writing UDP applications: a programmer sends data over UDP by transmitting a fixed-sized message. If an application requires retransmissions of lost packets or a well-defined ordering of received packets, the programmer is responsible for implementing this functionality in the application code. DHCP_, the Domain Name System (DNS), the Network Time Protocol (NTP), and :ref:`VXLAN` are examples of UDP-based protocols used in OpenStack deployments. UDP has support for one-to-many communication: sending a single packet to multiple hosts. An application can broadcast a UDP packet to all of the network hosts on a local network by setting the receiver IP address as the special IP broadcast address ``255.255.255.255``. An application can also send a UDP packet to a set of receivers using *IP multicast*. The intended receiver applications join a multicast group by binding a UDP socket to a special IP address that is one of the valid multicast group addresses. The receiving hosts do not have to be on the same local network as the sender, but the intervening routers must be configured to support IP multicast routing. VXLAN is an example of a UDP-based protocol that uses IP multicast. The *Internet Control Message Protocol* (ICMP) is a protocol used for sending control messages over an IP network. For example, a router that receives an IP packet may send an ICMP packet back to the source if there is no route in the router's routing table that corresponds to the destination address (ICMP code 1, destination host unreachable) or if the IP packet is too large for the router to handle (ICMP code 4, fragmentation required and "don't fragment" flag is set). The :command:`ping` and :command:`mtr` Linux command-line tools are two examples of network utilities that use ICMP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-nat.rst0000644000175000017500000000715500000000000022755 0ustar00coreycorey00000000000000.. _intro-nat: =========================== Network address translation =========================== *Network Address Translation* (NAT) is a process for modifying the source or destination addresses in the headers of an IP packet while the packet is in transit. In general, the sender and receiver applications are not aware that the IP packets are being manipulated. NAT is often implemented by routers, and so we will refer to the host performing NAT as a *NAT router*. However, in OpenStack deployments it is typically Linux servers that implement the NAT functionality, not hardware routers. These servers use the `iptables `_ software package to implement the NAT functionality. There are multiple variations of NAT, and here we describe three kinds commonly found in OpenStack deployments. SNAT ~~~~ In *Source Network Address Translation* (SNAT), the NAT router modifies the IP address of the sender in IP packets. SNAT is commonly used to enable hosts with *private addresses* to communicate with servers on the public Internet. `RFC 1918 `_ reserves the following three subnets as private addresses: * ``10.0.0.0/8`` * ``172.16.0.0/12`` * ``192.168.0.0/16`` These IP addresses are not publicly routable, meaning that a host on the public Internet can not send an IP packet to any of these addresses. Private IP addresses are widely used in both residential and corporate environments. Often, an application running on a host with a private IP address will need to connect to a server on the public Internet. An example is a user who wants to access a public website such as www.openstack.org. If the IP packets reach the web server at www.openstack.org with a private IP address as the source, then the web server cannot send packets back to the sender. SNAT solves this problem by modifying the source IP address to an IP address that is routable on the public Internet. There are different variations of SNAT; in the form that OpenStack deployments use, a NAT router on the path between the sender and receiver replaces the packet's source IP address with the router's public IP address. The router also modifies the source TCP or UDP port to another value, and the router maintains a record of the sender's true IP address and port, as well as the modified IP address and port. When the router receives a packet with the matching IP address and port, it translates these back to the private IP address and port, and forwards the packet along. Because the NAT router modifies ports as well as IP addresses, this form of SNAT is sometimes referred to as *Port Address Translation* (PAT). It is also sometimes referred to as *NAT overload*. OpenStack uses SNAT to enable applications running inside of instances to connect out to the public Internet. DNAT ~~~~ In *Destination Network Address Translation* (DNAT), the NAT router modifies the IP address of the destination in IP packet headers. OpenStack uses DNAT to route packets from instances to the OpenStack metadata service. Applications running inside of instances access the OpenStack metadata service by making HTTP GET requests to a web server with IP address 169.254.169.254. In an OpenStack deployment, there is no host with this IP address. Instead, OpenStack uses DNAT to change the destination IP of these packets so they reach the network interface that a metadata service is listening on. One-to-one NAT ~~~~~~~~~~~~~~ In *one-to-one NAT*, the NAT router maintains a one-to-one mapping between private IP addresses and public IP addresses. OpenStack uses one-to-one NAT to implement floating IP addresses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-network-components.rst0000644000175000017500000000441200000000000026040 0ustar00coreycorey00000000000000.. _intro-network-components: ================== Network components ================== Switches ~~~~~~~~ Switches are Multi-Input Multi-Output (MIMO) devices that enable packets to travel from one node to another. Switches connect hosts that belong to the same layer-2 network. Switches enable forwarding of the packet received on one port (input) to another port (output) so that they reach the desired destination node. Switches operate at layer-2 in the networking model. They forward the traffic based on the destination Ethernet address in the packet header. Routers ~~~~~~~ Routers are special devices that enable packets to travel from one layer-3 network to another. Routers enable communication between two nodes on different layer-3 networks that are not directly connected to each other. Routers operate at layer-3 in the networking model. They route the traffic based on the destination IP address in the packet header. Firewalls ~~~~~~~~~ Firewalls are used to regulate traffic to and from a host or a network. A firewall can be either a specialized device connecting two networks or a software-based filtering mechanism implemented on an operating system. Firewalls are used to restrict traffic to a host based on the rules defined on the host. They can filter packets based on several criteria such as source IP address, destination IP address, port numbers, connection state, and so on. It is primarily used to protect the hosts from unauthorized access and malicious attacks. Linux-based operating systems implement firewalls through ``iptables``. Load balancers ~~~~~~~~~~~~~~ Load balancers can be software-based or hardware-based devices that allow traffic to evenly be distributed across several servers. By distributing the traffic across multiple servers, it avoids overload of a single server thereby preventing a single point of failure in the product. This further improves the performance, network throughput, and response time of the servers. Load balancers are typically used in a 3-tier architecture. In this model, a load balancer receives a request from the front-end web server, which then forwards the request to one of the available back-end database servers for processing. The response from the database server is passed back to the web server for further processing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-network-namespaces.rst0000644000175000017500000000532600000000000025777 0ustar00coreycorey00000000000000.. _intro-network-namespaces: ================== Network namespaces ================== A namespace is a way of scoping a particular set of identifiers. Using a namespace, you can use the same identifier multiple times in different namespaces. You can also restrict an identifier set visible to particular processes. For example, Linux provides namespaces for networking and processes, among other things. If a process is running within a process namespace, it can only see and communicate with other processes in the same namespace. So, if a shell in a particular process namespace ran :command:`ps waux`, it would only show the other processes in the same namespace. Linux network namespaces ~~~~~~~~~~~~~~~~~~~~~~~~ In a network namespace, the scoped 'identifiers' are network devices; so a given network device, such as ``eth0``, exists in a particular namespace. Linux starts up with a default network namespace, so if your operating system does not do anything special, that is where all the network devices will be located. But it is also possible to create further non-default namespaces, and create new devices in those namespaces, or to move an existing device from one namespace to another. Each network namespace also has its own routing table, and in fact this is the main reason for namespaces to exist. A routing table is keyed by destination IP address, so network namespaces are what you need if you want the same destination IP address to mean different things at different times - which is something that OpenStack Networking requires for its feature of providing overlapping IP addresses in different virtual networks. Each network namespace also has its own set of iptables (for both IPv4 and IPv6). So, you can apply different security to flows with the same IP addressing in different namespaces, as well as different routing. Any given Linux process runs in a particular network namespace. By default this is inherited from its parent process, but a process with the right capabilities can switch itself into a different namespace; in practice this is mostly done using the :command:`ip netns exec NETNS COMMAND...` invocation, which starts ``COMMAND`` running in the namespace named ``NETNS``. Suppose such a process sends out a message to IP address A.B.C.D, the effect of the namespace is that A.B.C.D will be looked up in that namespace's routing table, and that will determine the network device that the message is transmitted through. Virtual routing and forwarding (VRF) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Virtual routing and forwarding is an IP technology that allows multiple instances of a routing table to coexist on the same router at the same time. It is another name for the network namespace functionality described above. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-os-networking.rst0000644000175000017500000003551000000000000024775 0ustar00coreycorey00000000000000.. _intro-os-networking: ==================== OpenStack Networking ==================== OpenStack Networking allows you to create and manage network objects, such as networks, subnets, and ports, which other OpenStack services can use. Plug-ins can be implemented to accommodate different networking equipment and software, providing flexibility to OpenStack architecture and deployment. The Networking service, code-named neutron, provides an API that lets you define network connectivity and addressing in the cloud. The Networking service enables operators to leverage different networking technologies to power their cloud networking. The Networking service also provides an API to configure and manage a variety of network services ranging from L3 forwarding and Network Address Translation (NAT) to perimeter firewalls, and virtual private networks. It includes the following components: API server The OpenStack Networking API includes support for Layer 2 networking and IP Address Management (IPAM), as well as an extension for a Layer 3 router construct that enables routing between Layer 2 networks and gateways to external networks. OpenStack Networking includes a growing list of plug-ins that enable interoperability with various commercial and open source network technologies, including routers, switches, virtual switches and software-defined networking (SDN) controllers. OpenStack Networking plug-in and agents Plugs and unplugs ports, creates networks or subnets, and provides IP addressing. The chosen plug-in and agents differ depending on the vendor and technologies used in the particular cloud. It is important to mention that only one plug-in can be used at a time. Messaging queue Accepts and routes RPC requests between agents to complete API operations. Message queue is used in the ML2 plug-in for RPC between the neutron server and neutron agents that run on each hypervisor, in the ML2 mechanism drivers for Open vSwitch and Linux bridge. Concepts ~~~~~~~~ To configure rich network topologies, you can create and configure networks and subnets and instruct other OpenStack services like Compute to attach virtual devices to ports on these networks. OpenStack Compute is a prominent consumer of OpenStack Networking to provide connectivity for its instances. In particular, OpenStack Networking supports each project having multiple private networks and enables projects to choose their own IP addressing scheme, even if those IP addresses overlap with those that other projects use. There are two types of network, project and provider networks. It is possible to share any of these types of networks among projects as part of the network creation process. .. _intro-os-networking-provider: Provider networks ----------------- Provider networks offer layer-2 connectivity to instances with optional support for DHCP and metadata services. These networks connect, or map, to existing layer-2 networks in the data center, typically using VLAN (802.1q) tagging to identify and separate them. Provider networks generally offer simplicity, performance, and reliability at the cost of flexibility. By default only administrators can create or update provider networks because they require configuration of physical network infrastructure. It is possible to change the user who is allowed to create or update provider networks with the following parameters of ``policy.json``: * ``create_network:provider:physical_network`` * ``update_network:provider:physical_network`` .. warning:: The creation and modification of provider networks enables use of physical network resources, such as VLAN-s. Enable these changes only for trusted projects. Also, provider networks only handle layer-2 connectivity for instances, thus lacking support for features such as routers and floating IP addresses. In many cases, operators who are already familiar with virtual networking architectures that rely on physical network infrastructure for layer-2, layer-3, or other services can seamlessly deploy the OpenStack Networking service. In particular, provider networks appeal to operators looking to migrate from the Compute networking service (nova-network) to the OpenStack Networking service. Over time, operators can build on this minimal architecture to enable more cloud networking features. In general, the OpenStack Networking software components that handle layer-3 operations impact performance and reliability the most. To improve performance and reliability, provider networks move layer-3 operations to the physical network infrastructure. In one particular use case, the OpenStack deployment resides in a mixed environment with conventional virtualization and bare-metal hosts that use a sizable physical network infrastructure. Applications that run inside the OpenStack deployment might require direct layer-2 access, typically using VLANs, to applications outside of the deployment. Routed provider networks ------------------------ Routed provider networks offer layer-3 connectivity to instances. These networks map to existing layer-3 networks in the data center. More specifically, the network maps to multiple layer-2 segments, each of which is essentially a provider network. Each has a router gateway attached to it which routes traffic between them and externally. The Networking service does not provide the routing. Routed provider networks offer performance at scale that is difficult to achieve with a plain provider network at the expense of guaranteed layer-2 connectivity. See :ref:`config-routed-provider-networks` for more information. .. _intro-os-networking-selfservice: Self-service networks --------------------- Self-service networks primarily enable general (non-privileged) projects to manage networks without involving administrators. These networks are entirely virtual and require virtual routers to interact with provider and external networks such as the Internet. Self-service networks also usually provide DHCP and metadata services to instances. In most cases, self-service networks use overlay protocols such as VXLAN or GRE because they can support many more networks than layer-2 segmentation using VLAN tagging (802.1q). Furthermore, VLANs typically require additional configuration of physical network infrastructure. IPv4 self-service networks typically use private IP address ranges (RFC1918) and interact with provider networks via source NAT on virtual routers. Floating IP addresses enable access to instances from provider networks via destination NAT on virtual routers. IPv6 self-service networks always use public IP address ranges and interact with provider networks via virtual routers with static routes. The Networking service implements routers using a layer-3 agent that typically resides at least one network node. Contrary to provider networks that connect instances to the physical network infrastructure at layer-2, self-service networks must traverse a layer-3 agent. Thus, oversubscription or failure of a layer-3 agent or network node can impact a significant quantity of self-service networks and instances using them. Consider implementing one or more high-availability features to increase redundancy and performance of self-service networks. Users create project networks for connectivity within projects. By default, they are fully isolated and are not shared with other projects. OpenStack Networking supports the following types of network isolation and overlay technologies. Flat All instances reside on the same network, which can also be shared with the hosts. No VLAN tagging or other network segregation takes place. VLAN Networking allows users to create multiple provider or project networks using VLAN IDs (802.1Q tagged) that correspond to VLANs present in the physical network. This allows instances to communicate with each other across the environment. They can also communicate with dedicated servers, firewalls, and other networking infrastructure on the same layer 2 VLAN. GRE and VXLAN VXLAN and GRE are encapsulation protocols that create overlay networks to activate and control communication between compute instances. A Networking router is required to allow traffic to flow outside of the GRE or VXLAN project network. A router is also required to connect directly-connected project networks with external networks, including the Internet. The router provides the ability to connect to instances directly from an external network using floating IP addresses. .. image:: figures/NetworkTypes.png :width: 100% :alt: Project and provider networks Subnets ------- A block of IP addresses and associated configuration state. This is also known as the native IPAM (IP Address Management) provided by the networking service for both project and provider networks. Subnets are used to allocate IP addresses when new ports are created on a network. Subnet pools ------------ End users normally can create subnets with any valid IP addresses without other restrictions. However, in some cases, it is nice for the admin or the project to pre-define a pool of addresses from which to create subnets with automatic allocation. Using subnet pools constrains what addresses can be used by requiring that every subnet be within the defined pool. It also prevents address reuse or overlap by two subnets from the same pool. See :ref:`config-subnet-pools` for more information. Ports ----- A port is a connection point for attaching a single device, such as the NIC of a virtual server, to a virtual network. The port also describes the associated network configuration, such as the MAC and IP addresses to be used on that port. Routers ------- Routers provide virtual layer-3 services such as routing and NAT between self-service and provider networks or among self-service networks belonging to a project. The Networking service uses a layer-3 agent to manage routers via namespaces. Security groups --------------- Security groups provide a container for virtual firewall rules that control ingress (inbound to instances) and egress (outbound from instances) network traffic at the port level. Security groups use a default deny policy and only contain rules that allow specific traffic. Each port can reference one or more security groups in an additive fashion. The firewall driver translates security group rules to a configuration for the underlying packet filtering technology such as ``iptables``. Each project contains a ``default`` security group that allows all egress traffic and denies all ingress traffic. You can change the rules in the ``default`` security group. If you launch an instance without specifying a security group, the ``default`` security group automatically applies to it. Similarly, if you create a port without specifying a security group, the ``default`` security group automatically applies to it. .. note:: If you use the metadata service, removing the default egress rules denies access to TCP port 80 on 169.254.169.254, thus preventing instances from retrieving metadata. Security group rules are stateful. Thus, allowing ingress TCP port 22 for secure shell automatically creates rules that allow return egress traffic and ICMP error messages involving those TCP connections. By default, all security groups contain a series of basic (sanity) and anti-spoofing rules that perform the following actions: * Allow egress traffic only if it uses the source MAC and IP addresses of the port for the instance, source MAC and IP combination in ``allowed-address-pairs``, or valid MAC address (port or ``allowed-address-pairs``) and associated EUI64 link-local IPv6 address. * Allow egress DHCP discovery and request messages that use the source MAC address of the port for the instance and the unspecified IPv4 address (0.0.0.0). * Allow ingress DHCP and DHCPv6 responses from the DHCP server on the subnet so instances can acquire IP addresses. * Deny egress DHCP and DHCPv6 responses to prevent instances from acting as DHCP(v6) servers. * Allow ingress/egress ICMPv6 MLD, neighbor solicitation, and neighbor discovery messages so instances can discover neighbors and join multicast groups. * Deny egress ICMPv6 router advertisements to prevent instances from acting as IPv6 routers and forwarding IPv6 traffic for other instances. * Allow egress ICMPv6 MLD reports (v1 and v2) and neighbor solicitation messages that use the source MAC address of a particular instance and the unspecified IPv6 address (::). Duplicate address detection (DAD) relies on these messages. * Allow egress non-IP traffic from the MAC address of the port for the instance and any additional MAC addresses in ``allowed-address-pairs`` on the port for the instance. Although non-IP traffic, security groups do not implicitly allow all ARP traffic. Separate ARP filtering rules prevent instances from using ARP to intercept traffic for another instance. You cannot disable or remove these rules. You can disable security groups including basic and anti-spoofing rules by setting the port attribute ``port_security_enabled`` to ``False``. Extensions ---------- The OpenStack Networking service is extensible. Extensions serve two purposes: they allow the introduction of new features in the API without requiring a version change and they allow the introduction of vendor specific niche functionality. Applications can programmatically list available extensions by performing a GET on the :code:`/extensions` URI. Note that this is a versioned request; that is, an extension available in one API version might not be available in another. DHCP ---- The optional DHCP service manages IP addresses for instances on provider and self-service networks. The Networking service implements the DHCP service using an agent that manages ``qdhcp`` namespaces and the ``dnsmasq`` service. Metadata -------- The optional metadata service provides an API for instances to obtain metadata such as SSH keys. Service and component hierarchy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Server ------ * Provides API, manages database, etc. Plug-ins -------- * Manages agents Agents ------ * Provides layer 2/3 connectivity to instances * Handles physical-virtual network transition * Handles metadata, etc. Layer 2 (Ethernet and Switching) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Linux Bridge * OVS Layer 3 (IP and Routing) ^^^^^^^^^^^^^^^^^^^^^^^^ * L3 * DHCP Miscellaneous ^^^^^^^^^^^^^ * Metadata Services -------- Routing services ^^^^^^^^^^^^^^^^ VPNaaS ^^^^^^ The Virtual Private Network-as-a-Service (VPNaaS) is a neutron extension that introduces the VPN feature set. LBaaS ^^^^^ The Load-Balancer-as-a-Service (LBaaS) API provisions and configures load balancers. The reference implementation is based on the HAProxy software load balancer. See the `Octavia project `_ for more information. FWaaS ^^^^^ The Firewall-as-a-Service (FWaaS) API allows to apply firewalls to OpenStack objects such as projects, routers, and router ports. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro-overlay-protocols.rst0000644000175000017500000000411400000000000025666 0ustar00coreycorey00000000000000.. _intro-overlay-protocols: ========================== Overlay (tunnel) protocols ========================== Tunneling is a mechanism that makes transfer of payloads feasible over an incompatible delivery network. It allows the network user to gain access to denied or insecure networks. Data encryption may be employed to transport the payload, ensuring that the encapsulated user network data appears as public even though it is private and can easily pass the conflicting network. Generic routing encapsulation (GRE) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generic routing encapsulation (GRE) is a protocol that runs over IP and is employed when delivery and payload protocols are compatible but payload addresses are incompatible. For instance, a payload might think it is running on a datalink layer but it is actually running over a transport layer using datagram protocol over IP. GRE creates a private point-to-point connection and works by encapsulating a payload. GRE is a foundation protocol for other tunnel protocols but the GRE tunnels provide only weak authentication. .. _VXLAN: Virtual extensible local area network (VXLAN) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The purpose of VXLAN is to provide scalable network isolation. VXLAN is a Layer 2 overlay scheme on a Layer 3 network. It allows an overlay layer-2 network to spread across multiple underlay layer-3 network domains. Each overlay is termed a VXLAN segment. Only VMs within the same VXLAN segment can communicate. Generic Network Virtualization Encapsulation (GENEVE) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Geneve is designed to recognize and accommodate changing capabilities and needs of different devices in network virtualization. It provides a framework for tunneling rather than being prescriptive about the entire system. Geneve defines the content of the metadata flexibly that is added during encapsulation and tries to adapt to various virtualization scenarios. It uses UDP as its transport protocol and is dynamic in size using extensible option headers. Geneve supports unicast, multicast, and broadcast. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/intro.rst0000644000175000017500000000432700000000000022173 0ustar00coreycorey00000000000000.. _intro: ============ Introduction ============ The OpenStack Networking service (neutron) provides an API that allows users to set up and define network connectivity and addressing in the cloud. The project code-name for Networking services is neutron. OpenStack Networking handles the creation and management of a virtual networking infrastructure, including networks, switches, subnets, and routers for devices managed by the OpenStack Compute service (nova). Advanced services such as firewalls or virtual private network (VPN) can also be used. OpenStack Networking consists of the neutron-server, a database for persistent storage, and any number of plug-in agents, which provide other services such as interfacing with native Linux networking mechanisms, external devices, or SDN controllers. OpenStack Networking is entirely standalone and can be deployed to a dedicated host. If your deployment uses a controller host to run centralized Compute components, you can deploy the Networking server to that specific host instead. OpenStack Networking integrates with various OpenStack components: * OpenStack Identity service (keystone) is used for authentication and authorization of API requests. * OpenStack Compute service (nova) is used to plug each virtual NIC on the VM into a particular network. * OpenStack Dashboard (horizon) is used by administrators and project users to create and manage network services through a web-based graphical interface. .. note:: The network address ranges used in this guide are chosen in accordance with `RFC 5737 `_ and `RFC 3849 `_, and as such are restricted to the following: **IPv4:** * 192.0.2.0/24 * 198.51.100.0/24 * 203.0.113.0/24 **IPv6:** * 2001:DB8::/32 The network address ranges in the examples of this guide should not be used for any purpose other than documentation. .. note:: To reduce clutter, this guide removes command output without relevance to the particular action. .. toctree:: :maxdepth: 2 intro-basic-networking intro-network-components intro-overlay-protocols intro-network-namespaces intro-nat intro-os-networking fwaas ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/migration-classic-to-l3ha.rst0000644000175000017500000001705100000000000025713 0ustar00coreycorey00000000000000.. _migration-to-vrrp: ============================== Add VRRP to an existing router ============================== This section describes the process of migrating from a classic router to an L3 HA router, which is available starting from the Mitaka release. Similar to the classic scenario, all network traffic on a project network that requires routing actively traverses only one network node regardless of the quantity of network nodes providing HA for the router. Therefore, this high-availability implementation primarily addresses failure situations instead of bandwidth constraints that limit performance. However, it supports random distribution of routers on different network nodes to reduce the chances of bandwidth constraints and to improve scaling. This section references parts of :ref:`deploy-lb-ha-vrrp` and :ref:`deploy-ovs-ha-vrrp`. For details regarding needed infrastructure and configuration to allow actual L3 HA deployment, read the relevant guide before continuing with the migration process. Migration ~~~~~~~~~ The migration process is quite simple, it involves turning down the router by setting the router's ``admin_state_up`` attribute to ``False``, upgrading the router to L3 HA and then setting the router's ``admin_state_up`` attribute back to ``True``. .. warning:: Once starting the migration, south-north connections (instances to internet) will be severed. New connections will be able to start only when the migration is complete. Here is the router we have used in our demonstration: .. code-block:: console $ openstack router show router1 +-------------------------+-------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------+ | admin_state_up | UP | | distributed | False | | external_gateway_info | | | ha | False | | id | 6b793b46-d082-4fd5-980f-a6f80cbb0f2a | | name | router1 | | project_id | bb8b84ab75be4e19bd0dfe02f6c3f5c1 | | routes | | | status | ACTIVE | | tags | [] | +-------------------------+-------------------------------------------+ #. Source the administrative project credentials. #. Set the admin_state_up to ``False``. This will severe south-north connections until admin_state_up is set to ``True`` again. .. code-block:: console $ openstack router set router1 --disable #. Set the ``ha`` attribute of the router to ``True``. .. code-block:: console $ openstack router set router1 --ha #. Set the admin_state_up to ``True``. After this, south-north connections can start. .. code-block:: console $ openstack router set router1 --enable #. Make sure that the router's ``ha`` attribute has changed to ``True``. .. code-block:: console $ openstack router show router1 +-------------------------+-------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------+ | admin_state_up | UP | | distributed | False | | external_gateway_info | | | ha | True | | id | 6b793b46-d082-4fd5-980f-a6f80cbb0f2a | | name | router1 | | project_id | bb8b84ab75be4e19bd0dfe02f6c3f5c1 | | routes | | | status | ACTIVE | | tags | [] | +-------------------------+-------------------------------------------+ L3 HA to Legacy ~~~~~~~~~~~~~~~ To return to classic mode, turn down the router again, turning off L3 HA and starting the router again. .. warning:: Once starting the migration, south-north connections (instances to internet) will be severed. New connections will be able to start only when the migration is complete. Here is the router we have used in our demonstration: .. code-block:: console $ openstack router show router1 +-------------------------+-------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------+ | admin_state_up | DOWN | | distributed | False | | external_gateway_info | | | ha | True | | id | 6b793b46-d082-4fd5-980f-a6f80cbb0f2a | | name | router1 | | project_id | bb8b84ab75be4e19bd0dfe02f6c3f5c1 | | routes | | | status | ACTIVE | | tags | [] | +-------------------------+-------------------------------------------+ #. Source the administrative project credentials. #. Set the admin_state_up to ``False``. This will severe south-north connections until admin_state_up is set to ``True`` again. .. code-block:: console $ openstack router set router1 --disable #. Set the ``ha`` attribute of the router to ``True``. .. code-block:: console $ openstack router set router1 --no-ha #. Set the admin_state_up to ``True``. After this, south-north connections can start. .. code-block:: console $ openstack router set router1 --enable #. Make sure that the router's ``ha`` attribute has changed to ``False``. .. code-block:: console $ openstack router show router1 +-------------------------+-------------------------------------------+ | Field | Value | +-------------------------+-------------------------------------------+ | admin_state_up | UP | | distributed | False | | external_gateway_info | | | ha | False | | id | 6b793b46-d082-4fd5-980f-a6f80cbb0f2a | | name | router1 | | project_id | bb8b84ab75be4e19bd0dfe02f6c3f5c1 | | routes | | | status | ACTIVE | | tags | [] | +-------------------------+-------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/migration-database.rst0000644000175000017500000001113200000000000024563 0ustar00coreycorey00000000000000.. _migration-database: ======== Database ======== The upgrade of the Networking service database is implemented with Alembic migration chains. The migrations in the ``alembic/versions`` contain the changes needed to migrate from older Networking service releases to newer ones. Since Liberty, Networking maintains two parallel Alembic migration branches. The first branch is called expand and is used to store expansion-only migration rules. These rules are strictly additive and can be applied while the Neutron server is running. The second branch is called contract and is used to store those migration rules that are not safe to apply while Neutron server is running. The intent of separate branches is to allow invoking those safe migrations from the expand branch while the Neutron server is running and therefore reducing downtime needed to upgrade the service. A database management command-line tool uses the Alembic library to manage the migration. Database management command-line tool ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The database management command-line tool is called :command:`neutron-db-manage`. Pass the ``--help`` option to the tool for usage information. The tool takes some options followed by some commands: .. code-block:: console $ neutron-db-manage The tool needs to access the database connection string, which is provided in the ``neutron.conf`` configuration file in an installation. The tool automatically reads from ``/etc/neutron/neutron.conf`` if it is present. If the configuration is in a different location, use the following command: .. code-block:: console $ neutron-db-manage --config-file /path/to/neutron.conf Multiple ``--config-file`` options can be passed if needed. Instead of reading the DB connection from the configuration file(s), you can use the ``--database-connection`` option: .. code-block:: console $ neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 The `branches`, `current`, and `history` commands all accept a ``--verbose`` option, which, when passed, will instruct :command:`neutron-db-manage` to display more verbose output for the specified command: .. code-block:: console $ neutron-db-manage current --verbose .. note:: The tool usage examples below do not show the options. It is assumed that you use the options that you need for your environment. In new deployments, you start with an empty database and then upgrade to the latest database version using the following command: .. code-block:: console $ neutron-db-manage upgrade heads After installing a new version of the Neutron server, upgrade the database using the following command: .. code-block:: console $ neutron-db-manage upgrade heads In existing deployments, check the current database version using the following command: .. code-block:: console $ neutron-db-manage current To apply the expansion migration rules, use the following command: .. code-block:: console $ neutron-db-manage upgrade --expand To apply the non-expansive migration rules, use the following command: .. code-block:: console $ neutron-db-manage upgrade --contract To check if any contract migrations are pending and therefore if offline migration is required, use the following command: .. code-block:: console $ neutron-db-manage has_offline_migrations .. note:: Offline migration requires all Neutron server instances in the cluster to be shutdown before you apply any contract scripts. To generate a script of the command instead of operating immediately on the database, use the following command: .. code-block:: console $ neutron-db-manage upgrade heads --sql .. note:: The `--sql` option causes the command to generate a script. The script can be run later (online or offline), perhaps after verifying and/or modifying it. To migrate between specific migration versions, use the following command: .. code-block:: console $ neutron-db-manage upgrade : To upgrade the database incrementally, use the following command: .. code-block:: console $ neutron-db-manage upgrade --delta <# of revs> .. note:: Database downgrade is not supported. To look for differences between the schema generated by the upgrade command and the schema defined by the models, use the :command:`revision --autogenerate` command: .. code-block:: console neutron-db-manage revision -m REVISION_DESCRIPTION --autogenerate .. note:: This generates a prepopulated template with the changes needed to match the database state with the models. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/migration-nova-network-to-neutron.rst0000644000175000017500000001320700000000000027566 0ustar00coreycorey00000000000000.. _migration-nova-to-neutron: ===================================================== Legacy nova-network to OpenStack Networking (neutron) ===================================================== Two networking models exist in OpenStack. The first is called legacy networking (nova-network) and it is a sub-process embedded in the Compute project (nova). This model has some limitations, such as creating complex network topologies, extending its back-end implementation to vendor-specific technologies, and providing project-specific networking elements. These limitations are the main reasons the OpenStack Networking (neutron) model was created. This section describes the process of migrating clouds based on the legacy networking model to the OpenStack Networking model. This process requires additional changes to both compute and networking to support the migration. This document describes the overall process and the features required in both Networking and Compute. The current process as designed is a minimally viable migration with the goal of deprecating and then removing legacy networking. Both the Compute and Networking teams agree that a one-button migration process from legacy networking to OpenStack Networking (neutron) is not an essential requirement for the deprecation and removal of the legacy networking at a future date. This section includes a process and tools which are designed to solve a simple use case migration. Users are encouraged to take these tools, test them, provide feedback, and then expand on the feature set to suit their own deployments; deployers that refrain from participating in this process intending to wait for a path that better suits their use case are likely to be disappointed. Impact and limitations ~~~~~~~~~~~~~~~~~~~~~~ The migration process from the legacy nova-network networking service to OpenStack Networking (neutron) has some limitations and impacts on the operational state of the cloud. It is critical to understand them in order to decide whether or not this process is acceptable for your cloud and all users. Management impact ----------------- The Networking REST API is publicly read-only until after the migration is complete. During the migration, Networking REST API is read-write only to nova-api, and changes to Networking are only allowed via nova-api. The Compute REST API is available throughout the entire process, although there is a brief period where it is made read-only during a database migration. The Networking REST API will need to expose (to nova-api) all details necessary for reconstructing the information previously held in the legacy networking database. Compute needs a per-hypervisor "has_transitioned" boolean change in the data model to be used during the migration process. This flag is no longer required once the process is complete. Operations impact ----------------- In order to support a wide range of deployment options, the migration process described here requires a rolling restart of hypervisors. The rate and timing of specific hypervisor restarts is under the control of the operator. The migration may be paused, even for an extended period of time (for example, while testing or investigating issues) with some hypervisors on legacy networking and some on Networking, and Compute API remains fully functional. Individual hypervisors may be rolled back to legacy networking during this stage of the migration, although this requires an additional restart. In order to support the widest range of deployer needs, the process described here is easy to automate but is not already automated. Deployers should expect to perform multiple manual steps or write some simple scripts in order to perform this migration. Performance impact ------------------ During the migration, nova-network API calls will go through an additional internal conversion to Networking calls. This will have different and likely poorer performance characteristics compared with either the pre-migration or post-migration APIs. Migration process overview ~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Start neutron-server in intended final config, except with REST API restricted to read-write only by nova-api. #. Make the Compute REST API read-only. #. Run a DB dump/restore tool that creates Networking data structures representing current legacy networking config. #. Enable a nova-api proxy that recreates internal Compute objects from Networking information (via the Networking REST API). #. Make Compute REST API read-write again. This means legacy networking DB is now unused, new changes are now stored in the Networking DB, and no rollback is possible from here without losing those new changes. .. note:: At this moment the Networking DB is the source of truth, but nova-api is the only public read-write API. Next, you'll need to migrate each hypervisor. To do that, follow these steps: #. Disable the hypervisor. This would be a good time to live migrate or evacuate the compute node, if supported. #. Disable nova-compute. #. Enable the Networking agent. #. Set the "has_transitioned" flag in the Compute hypervisor database/config. #. Reboot the hypervisor (or run "smart" live transition tool if available). #. Re-enable the hypervisor. At this point, all compute nodes have been migrated, but they are still using the nova-api API and Compute gateways. Finally, enable OpenStack Networking by following these steps: #. Bring up the Networking (l3) nodes. The new routers will have identical MAC+IPs as old Compute gateways so some sort of immediate cutover is possible, except for stateful connections issues such as NAT. #. Make the Networking API read-write and disable legacy networking. Migration Completed! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/migration.rst0000644000175000017500000000024500000000000023024 0ustar00coreycorey00000000000000.. _migration: ========= Migration ========= .. toctree:: :maxdepth: 2 migration-database migration-nova-network-to-neutron migration-classic-to-l3ha ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/misc-libvirt.rst0000644000175000017500000001141000000000000023433 0ustar00coreycorey00000000000000.. _misc-disable-libvirt-networking: ========================== Disable libvirt networking ========================== Most OpenStack deployments use the `libvirt `__ toolkit for interacting with the hypervisor. Specifically, OpenStack Compute uses libvirt for tasks such as booting and terminating virtual machine instances. When OpenStack Compute boots a new instance, libvirt provides OpenStack with the VIF associated with the instance, and OpenStack Compute plugs the VIF into a virtual device provided by OpenStack Network. The libvirt toolkit itself does not provide any networking functionality in OpenStack deployments. However, libvirt is capable of providing networking services to the virtual machines that it manages. In particular, libvirt can be configured to provide networking functionality akin to a simplified, single-node version of OpenStack. Users can use libvirt to create layer 2 networks that are similar to OpenStack Networking's networks, confined to a single node. libvirt network implementation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, libvirt's networking functionality is enabled, and libvirt creates a network when the system boots. To implement this network, libvirt leverages some of the same technologies that OpenStack Network does. In particular, libvirt uses: * Linux bridging for implementing a layer 2 network * dnsmasq for providing IP addresses to virtual machines using DHCP * iptables to implement SNAT so instances can connect out to the public internet, and to ensure that virtual machines are permitted to communicate with dnsmasq using DHCP By default, libvirt creates a network named *default*. The details of this network may vary by distribution; on Ubuntu this network involves: * a Linux bridge named ``virbr0`` with an IP address of ``192.0.2.1/24`` * a dnsmasq process that listens on the ``virbr0`` interface and hands out IP addresses in the range ``192.0.2.2-192.0.2.254`` * a set of iptables rules When libvirt boots a virtual machine, it places the machine's VIF in the bridge ``virbr0`` unless explicitly told not to. On Ubuntu, the iptables ruleset that libvirt creates includes the following rules:: *nat -A POSTROUTING -s 192.0.2.0/24 -d 224.0.0.0/24 -j RETURN -A POSTROUTING -s 192.0.2.0/24 -d 255.255.255.255/32 -j RETURN -A POSTROUTING -s 192.0.2.0/24 ! -d 192.0.2.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535 -A POSTROUTING -s 192.0.2.0/24 ! -d 192.0.2.0/24 -p udp -j MASQUERADE --to-ports 1024-65535 -A POSTROUTING -s 192.0.2.0/24 ! -d 192.0.2.0/24 -j MASQUERADE *mangle -A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill *filter -A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT -A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT -A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT -A FORWARD -d 192.0.2.0/24 -o virbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -A FORWARD -s 192.0.2.0/24 -i virbr0 -j ACCEPT -A FORWARD -i virbr0 -o virbr0 -j ACCEPT -A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable -A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable -A OUTPUT -o virbr0 -p udp -m udp --dport 68 -j ACCEPT The following shows the dnsmasq process that libvirt manages as it appears in the output of :command:`ps`:: 2881 ? S 0:00 /usr/sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf How to disable libvirt networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Although OpenStack does not make use of libvirt's networking, this networking will not interfere with OpenStack's behavior, and can be safely left enabled. However, libvirt's networking can be a nuisance when debugging OpenStack networking issues. Because libvirt creates an additional bridge, dnsmasq process, and iptables ruleset, these may distract an operator engaged in network troubleshooting. Unless you need to start up virtual machines using libvirt directly, you can safely disable libvirt's network. To view the defined libvirt networks and their state: .. code-block:: console # virsh net-list Name State Autostart Persistent ---------------------------------------------------------- default active yes yes To deactivate the libvirt network named ``default``: .. code-block:: console # virsh net-destroy default Deactivating the network will remove the ``virbr0`` bridge, terminate the dnsmasq process, and remove the iptables rules. To prevent the network from automatically starting on boot: .. code-block:: console # virsh net-autostart --network default --disable To activate the network after it has been deactivated: .. code-block:: console # virsh net-start default ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/misc.rst0000644000175000017500000000025400000000000021766 0ustar00coreycorey00000000000000.. _miscellaneous: ============= Miscellaneous ============= .. toctree:: :maxdepth: 2 fwaas-v2-scenario misc-libvirt neutron_linuxbridge vpnaas-scenario ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/neutron_linuxbridge.rst0000644000175000017500000000204700000000000025123 0ustar00coreycorey00000000000000==================================== neutron-linuxbridge-cleanup utility ==================================== Description ~~~~~~~~~~~ Automated removal of empty bridges has been disabled to fix a race condition between the Compute (nova) and Networking (neutron) services. Previously, it was possible for a bridge to be deleted during the time when the only instance using it was rebooted. Usage ~~~~~ Use this script to remove empty bridges on compute nodes by running the following command: .. code-block:: console $ neutron-linuxbridge-cleanup .. important:: Do not use this tool when creating or migrating an instance as it throws an error when the bridge does not exist. .. note:: Using this script can still trigger the original race condition. Only run this script if you have evacuated all instances off a compute node and you want to clean up the bridges. In addition to evacuating all instances, you should fence off the compute node where you are going to run this script so new instances do not get scheduled on it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ops-ip-availability.rst0000644000175000017500000000575600000000000024726 0ustar00coreycorey00000000000000.. _ops-ip-availability: ======================= IP availability metrics ======================= Network IP Availability is an information-only API extension that allows a user or process to determine the number of IP addresses that are consumed across networks and the allocation pools of their subnets. This extension was added to neutron in the Mitaka release. This section illustrates how you can get the Network IP address availability through the command-line interface. Get Network IP address availability for all IPv4 networks: .. code-block:: console $ openstack ip availability list +--------------------------------------+--------------+-----------+----------+ | Network ID | Network Name | Total IPs | Used IPs | +--------------------------------------+--------------+-----------+----------+ | 363a611a-b08b-4281-b64e-198d90cb94fd | private | 253 | 3 | | c92d0605-caf2-4349-b1b8-8d5f9ac91df8 | public | 253 | 1 | +--------------------------------------+--------------+-----------+----------+ Get Network IP address availability for all IPv6 networks: .. code-block:: console $ openstack ip availability list --ip-version 6 +--------------------------------------+--------------+----------------------+----------+ | Network ID | Network Name | Total IPs | Used IPs | +--------------------------------------+--------------+----------------------+----------+ | 363a611a-b08b-4281-b64e-198d90cb94fd | private | 18446744073709551614 | 3 | | c92d0605-caf2-4349-b1b8-8d5f9ac91df8 | public | 18446744073709551614 | 1 | +--------------------------------------+--------------+----------------------+----------+ Get Network IP address availability statistics for a specific network: .. code-block:: console $ openstack ip availability show NETWORKUUID +------------------------+--------------------------------------------------------------+ | Field | Value | +------------------------+--------------------------------------------------------------+ | network_id | 0bf90de6-fc0f-4dba-b80d-96670dfb331a | | network_name | public | | project_id | 5669caad86a04256994cdf755df4d3c1 | | subnet_ip_availability | cidr='192.0.2.224/28', ip_version='4', subnet_id='346806ee- | | | a53e-44fd-968a-ddb2bcd2ba96', subnet_name='public_subnet', | | | total_ips='13', used_ips='5' | | total_ips | 13 | | used_ips | 5 | +------------------------+--------------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ops-quotas.rst0000644000175000017500000003061600000000000023153 0ustar00coreycorey00000000000000================================ Manage Networking service quotas ================================ A quota limits the number of available resources. A default quota might be enforced for all projects. When you try to create more resources than the quota allows, an error occurs: .. code-block:: console $ openstack network create test_net Quota exceeded for resources: ['network'] Per-project quota configuration is also supported by the quota extension API. See :ref:`cfg_quotas_per_tenant` for details. Basic quota configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ In the Networking default quota mechanism, all projects have the same quota values, such as the number of resources that a project can create. The quota value is defined in the OpenStack Networking ``/etc/neutron/neutron.conf`` configuration file. This example shows the default quota values: .. code-block:: ini [quotas] # number of networks allowed per tenant, and minus means unlimited quota_network = 10 # number of subnets allowed per tenant, and minus means unlimited quota_subnet = 10 # number of ports allowed per tenant, and minus means unlimited quota_port = 50 # default driver to use for quota checks quota_driver = neutron.quota.ConfDriver OpenStack Networking also supports quotas for L3 resources: router and floating IP. Add these lines to the ``quotas`` section in the ``/etc/neutron/neutron.conf`` file: .. code-block:: ini [quotas] # number of routers allowed per tenant, and minus means unlimited quota_router = 10 # number of floating IPs allowed per tenant, and minus means unlimited quota_floatingip = 50 OpenStack Networking also supports quotas for security group resources: number of security groups and the number of rules for each security group. Add these lines to the ``quotas`` section in the ``/etc/neutron/neutron.conf`` file: .. code-block:: ini [quotas] # number of security groups per tenant, and minus means unlimited quota_security_group = 10 # number of security rules allowed per tenant, and minus means unlimited quota_security_group_rule = 100 .. _cfg_quotas_per_tenant: Configure per-project quotas ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack Networking also supports per-project quota limit by quota extension API. .. todo:: This document needs to be migrated to using ``openstack`` commands rather than the deprecated ``neutron`` commands. Use these commands to manage per-project quotas: neutron quota-delete Delete defined quotas for a specified project neutron quota-list Lists defined quotas for all projects neutron quota-show Shows quotas for a specified project neutron quota-default-show Show default quotas for a specified tenant neutron quota-update Updates quotas for a specified project Only users with the ``admin`` role can change a quota value. By default, the default set of quotas are enforced for all projects, so no :command:`quota-create` command exists. #. Configure Networking to show per-project quotas Set the ``quota_driver`` option in the ``/etc/neutron/neutron.conf`` file. .. code-block:: ini quota_driver = neutron.db.quota_db.DbQuotaDriver When you set this option, the output for Networking commands shows ``quotas``. #. List Networking extensions. To list the Networking extensions, run this command: .. code-block:: console $ openstack extension list --network The command shows the ``quotas`` extension, which provides per-project quota management support. .. note:: Many of the extensions shown below are supported in the Mitaka release and later. .. code-block:: console +------------------------+------------------------+--------------------------+ | Name | Alias | Description | +------------------------+------------------------+--------------------------+ | ... | ... | ... | | Quota management | quotas | Expose functions for | | support | | quotas management per | | | | tenant | | ... | ... | ... | +------------------------+------------------------+--------------------------+ #. Show information for the quotas extension. To show information for the ``quotas`` extension, run this command: .. code-block:: console $ neutron ext-show quotas +-------------+------------------------------------------------------------+ | Field | Value | +-------------+------------------------------------------------------------+ | alias | quotas | | description | Expose functions for quotas management per tenant | | links | | | name | Quota management support | | namespace | https://docs.openstack.org/network/ext/quotas-sets/api/v2.0 | | updated | 2012-07-29T10:00:00-00:00 | +-------------+------------------------------------------------------------+ .. note:: Only some plug-ins support per-project quotas. Specifically, Open vSwitch, Linux Bridge, and VMware NSX support them, but new versions of other plug-ins might bring additional functionality. See the documentation for each plug-in. #. List projects who have per-project quota support. The :command:`neutron quota-list` command lists projects for which the per-project quota is enabled. The command does not list projects with default quota support. You must be an administrative user to run this command: .. code-block:: console $ neutron quota-list +------------+---------+------+--------+--------+----------------------------------+ | floatingip | network | port | router | subnet | tenant_id | +------------+---------+------+--------+--------+----------------------------------+ | 20 | 5 | 20 | 10 | 5 | 6f88036c45344d9999a1f971e4882723 | | 25 | 10 | 30 | 10 | 10 | bff5c9455ee24231b5bc713c1b96d422 | +------------+---------+------+--------+--------+----------------------------------+ #. Show per-project quota values. The :command:`neutron quota-show` command reports the current set of quota limits for the specified project. Non-administrative users can run this command without the ``--tenant_id`` parameter. If per-project quota limits are not enabled for the project, the command shows the default set of quotas. .. note:: Additional quotas added in the Mitaka release include ``security_group``, ``security_group_rule``, ``subnet``, and ``subnetpool``. .. code-block:: console $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 50 | | network | 10 | | port | 50 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 10 | | subnetpool | -1 | +---------------------+-------+ The following command shows the command output for a non-administrative user. .. code-block:: console $ neutron quota-show +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 50 | | network | 10 | | port | 50 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 10 | | subnetpool | -1 | +---------------------+-------+ #. Update quota values for a specified project. Use the :command:`neutron quota-update` command to update a quota for a specified project. .. code-block:: console $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --network 5 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 50 | | network | 5 | | port | 50 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 10 | | subnetpool | -1 | +---------------------+-------+ You can update quotas for multiple resources through one command. .. code-block:: console $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --subnet 5 --port 20 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 50 | | network | 5 | | port | 20 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 5 | | subnetpool | -1 | +---------------------+-------+ To update the limits for an L3 resource such as, router or floating IP, you must define new values for the quotas after the ``--`` directive. This example updates the limit of the number of floating IPs for the specified project. .. code-block:: console $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --floatingip 20 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 20 | | network | 5 | | port | 20 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 5 | | subnetpool | -1 | +---------------------+-------+ You can update the limits of multiple resources by including L2 resources and L3 resource through one command: .. code-block:: console $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 \ --network 3 --subnet 3 --port 3 --floatingip 3 --router 3 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 3 | | network | 3 | | port | 3 | | rbac_policy | 10 | | router | 3 | | security_group | 10 | | security_group_rule | 100 | | subnet | 3 | | subnetpool | -1 | +---------------------+-------+ #. Delete per-project quota values. To clear per-project quota limits, use the :command:`neutron quota-delete` command. .. code-block:: console $ neutron quota-delete --tenant_id 6f88036c45344d9999a1f971e4882723 Deleted quota: 6f88036c45344d9999a1f971e4882723 After you run this command, you can see that quota values for the project are reset to the default values. .. code-block:: console $ openstack quota show 6f88036c45344d9999a1f971e4882723 +---------------------+-------+ | Field | Value | +---------------------+-------+ | floatingip | 50 | | network | 10 | | port | 50 | | rbac_policy | 10 | | router | 10 | | security_group | 10 | | security_group_rule | 100 | | subnet | 10 | | subnetpool | -1 | +---------------------+-------+ .. note:: Listing defualt quotas with the OpenStack command line client will provide all quotas for networking and other services. Previously, the :command:`neutron quota-show --tenant_id` would list only networking quotas. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ops-resource-purge.rst0000644000175000017500000000263000000000000024601 0ustar00coreycorey00000000000000.. _ops-resource-purge: ============== Resource purge ============== The Networking service provides a purge mechanism to delete the following network resources for a project: * Networks * Subnets * Ports * Router interfaces * Routers * Floating IP addresses * Security groups Typically, one uses this mechanism to delete networking resources for a defunct project regardless of its existence in the Identity service. Usage ~~~~~ #. Source the necessary project credentials. The administrative project can delete resources for all other projects. A regular project can delete its own network resources and those belonging to other projects for which it has sufficient access. #. Delete the network resources for a particular project. .. code-block:: console $ neutron purge PROJECT_ID Replace ``PROJECT_ID`` with the project ID. The command provides output that includes a completion percentage and the quantity of successful or unsuccessful network resource deletions. An unsuccessful deletion usually indicates sharing of a resource with one or more additional projects. .. code-block:: console Purging resources: 100% complete. Deleted 1 security_group, 2 ports, 1 router, 1 floatingip, 2 networks. The following resources could not be deleted: 1 network. The command also indicates if a project lacks network resources. .. code-block:: console Tenant has no supported resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ops-resource-tags.rst0000644000175000017500000005032200000000000024416 0ustar00coreycorey00000000000000.. _ops-resource-tags: ============= Resource tags ============= Various virtual networking resources support tags for use by external systems or any other clients of the Networking service API. All resources that support standard attributes are applicable for tagging. This includes: * networks * subnets * subnetpools * ports * routers * floatingips * logs * security-groups * security-group-rules * segments * policies * trunks * network_segment_ranges Use cases ~~~~~~~~~ The following use cases refer to adding tags to networks, but the same can be applicable to any other supported Networking service resource: #. Ability to map different networks in different OpenStack locations to one logically same network (for multi-site OpenStack). #. Ability to map IDs from different management/orchestration systems to OpenStack networks in mixed environments. For example, in the Kuryr project, the Docker network ID is mapped to the Neutron network ID. #. Ability to leverage tags by deployment tools. #. Ability to tag information about provider networks (for example, high-bandwidth, low-latency, and so on). Filtering with tags ~~~~~~~~~~~~~~~~~~~ The API allows searching/filtering of the ``GET /v2.0/networks`` API. The following query parameters are supported: * ``tags`` * ``tags-any`` * ``not-tags`` * ``not-tags-any`` To request the list of networks that have a single tag, ``tags`` argument should be set to the desired tag name. Example:: GET /v2.0/networks?tags=red To request the list of networks that have two or more tags, the ``tags`` argument should be set to the list of tags, separated by commas. In this case, the tags given must all be present for a network to be included in the query result. Example that returns networks that have the "red" and "blue" tags:: GET /v2.0/networks?tags=red,blue To request the list of networks that have one or more of a list of given tags, the ``tags-any`` argument should be set to the list of tags, separated by commas. In this case, as long as one of the given tags is present, the network will be included in the query result. Example that returns the networks that have the "red" or the "blue" tag:: GET /v2.0/networks?tags-any=red,blue To request the list of networks that do not have one or more tags, the ``not-tags`` argument should be set to the list of tags, separated by commas. In this case, only the networks that do not have any of the given tags will be included in the query results. Example that returns the networks that do not have either "red" or "blue" tag:: GET /v2.0/networks?not-tags=red,blue To request the list of networks that do not have at least one of a list of tags, the ``not-tags-any`` argument should be set to the list of tags, separated by commas. In this case, only the networks that do not have at least one of the given tags will be included in the query result. Example that returns the networks that do not have the "red" tag, or do not have the "blue" tag:: GET /v2.0/networks?not-tags-any=red,blue The ``tags``, ``tags-any``, ``not-tags``, and ``not-tags-any`` arguments can be combined to build more complex queries. Example:: GET /v2.0/networks?tags=red,blue&tags-any=green,orange The above example returns any networks that have the "red" and "blue" tags, plus at least one of "green" and "orange". Complex queries may have contradictory parameters. Example:: GET /v2.0/networks?tags=blue¬-tags=blue In this case, we should let the Networking service find these networks. Obviously, there are no such networks and the service will return an empty list. User workflow ~~~~~~~~~~~~~ Add a tag to a resource: .. code-block:: console $ openstack network set --tag red ab442634-1cc9-49e5-bd49-0dac9c811f69 $ openstack network show net +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2018-07-11T09:44:50Z | | description | | | dns_domain | None | | id | ab442634-1cc9-49e5-bd49-0dac9c811f69 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | net | | port_security_enabled | True | | project_id | e6710680bfd14555891f265644e1dd5c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 1047 | | qos_policy_id | None | | revision_number | 5 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | red | | updated_at | 2018-07-16T06:22:01Z | +---------------------------+----------------------------------------------------------------------------+ Remove a tag from a resource: .. code-block:: console $ openstack network unset --tag red ab442634-1cc9-49e5-bd49-0dac9c811f69 $ openstack network show net +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2018-07-11T09:44:50Z | | description | | | dns_domain | None | | id | ab442634-1cc9-49e5-bd49-0dac9c811f69 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | net | | port_security_enabled | True | | project_id | e6710680bfd14555891f265644e1dd5c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 1047 | | qos_policy_id | None | | revision_number | 5 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | | | updated_at | 2018-07-16T06:32:11Z | +---------------------------+----------------------------------------------------------------------------+ Replace all tags on the resource: .. code-block:: console $ openstack network set --tag red --tag blue ab442634-1cc9-49e5-bd49-0dac9c811f69 $ openstack network show net +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2018-07-11T09:44:50Z | | description | | | dns_domain | None | | id | ab442634-1cc9-49e5-bd49-0dac9c811f69 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | net | | port_security_enabled | True | | project_id | e6710680bfd14555891f265644e1dd5c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 1047 | | qos_policy_id | None | | revision_number | 5 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | blue, red | | updated_at | 2018-07-16T06:50:19Z | +---------------------------+----------------------------------------------------------------------------+ Clear tags from a resource: .. code-block:: console $ openstack network unset --all-tag ab442634-1cc9-49e5-bd49-0dac9c811f69 $ openstack network show net +---------------------------+----------------------------------------------------------------------------+ | Field | Value | +---------------------------+----------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2018-07-11T09:44:50Z | | description | | | dns_domain | None | | id | ab442634-1cc9-49e5-bd49-0dac9c811f69 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | None | | is_vlan_transparent | None | | mtu | 1450 | | name | net | | port_security_enabled | True | | project_id | e6710680bfd14555891f265644e1dd5c | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 1047 | | qos_policy_id | None | | revision_number | 5 | | router:external | Internal | | segments | None | | shared | False | | status | ACTIVE | | subnets | | | tags | | | updated_at | 2018-07-16T07:03:02Z | +---------------------------+----------------------------------------------------------------------------+ Get list of resources with tag filters from networks. The networks are: test-net1 with "red" tag, test-net2 with "red" and "blue" tags, test-net3 with "red", "blue", and "green" tags, and test-net4 with "green" tag. Get list of resources with ``tags`` filter: .. code-block:: console $ openstack network list --tags red,blue +--------------------------------------+-----------+---------+ | ID | Name | Subnets | +--------------------------------------+-----------+---------+ | 8ca3b9ed-f578-45fa-8c44-c53f13aec05a | test-net3 | | | e736e63d-42e4-4f4c-836c-6ad286ffd68a | test-net2 | | +--------------------------------------+-----------+---------+ Get list of resources with ``any-tags`` filter: .. code-block:: console $ openstack network list --any-tags red,blue +--------------------------------------+-----------+---------+ | ID | Name | Subnets | +--------------------------------------+-----------+---------+ | 30491224-3855-431f-a688-fb29df004d82 | test-net1 | | | 8ca3b9ed-f578-45fa-8c44-c53f13aec05a | test-net3 | | | e736e63d-42e4-4f4c-836c-6ad286ffd68a | test-net2 | | +--------------------------------------+-----------+---------+ Get list of resources with ``not-tags`` filter: .. code-block:: console $ openstack network list --not-tags red,blue +--------------------------------------+-----------+---------+ | ID | Name | Subnets | +--------------------------------------+-----------+---------+ | 30491224-3855-431f-a688-fb29df004d82 | test-net1 | | | cdb3ed08-ca63-4090-ba12-30b366372993 | test-net4 | | +--------------------------------------+-----------+---------+ Get list of resources with ``not-any-tags`` filter: .. code-block:: console $ openstack network list --not-any-tags red,blue +--------------------------------------+-----------+---------+ | ID | Name | Subnets | +--------------------------------------+-----------+---------+ | cdb3ed08-ca63-4090-ba12-30b366372993 | test-net4 | | +--------------------------------------+-----------+---------+ Limitations ~~~~~~~~~~~ Filtering resources with a tag whose name contains a comma is not supported. Thus, do not put such a tag name to resources. Future support ~~~~~~~~~~~~~~ In future releases, the Networking service may support setting tags for additional resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ops.rst0000644000175000017500000000024100000000000021630 0ustar00coreycorey00000000000000.. _operations: ========== Operations ========== .. toctree:: :maxdepth: 2 ops-ip-availability ops-resource-tags ops-resource-purge ops-quotas ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1070428 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/0000755000175000017500000000000000000000000021102 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/dpdk.rst0000644000175000017500000000170300000000000022557 0ustar00coreycorey00000000000000.. _ovn_dpdk: =================== DPDK Support in OVN =================== Configuration Settings ---------------------- The following configuration parameter needs to be set in the Neutron ML2 plugin configuration file under the 'ovn' section to enable DPDK support. **vhost_sock_dir** This is the directory path in which vswitch daemon in all the compute nodes creates the virtio socket. Follow the instructions in INSTALL.DPDK.md in openvswitch source tree to know how to configure DPDK support in vswitch daemons. Configuration Settings in compute hosts --------------------------------------- Compute nodes configured with OVS DPDK should set the datapath_type as "netdev" for the integration bridge (managed by OVN) and all other bridges if connected to the integration bridge via patch ports. The below command can be used to set the datapath_type. .. code-block:: console $ sudo ovs-vsctl set Bridge br-int datapath_type=netdev ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/features.rst0000644000175000017500000001111500000000000023451 0ustar00coreycorey00000000000000.. _features: Features ======== Open Virtual Network (OVN) offers the following virtual network services: * Layer-2 (switching) Native implementation. Replaces the conventional Open vSwitch (OVS) agent. * Layer-3 (routing) Native implementation that supports distributed routing. Replaces the conventional Neutron L3 agent. This includes transparent L3HA :doc::`routing` support, based on BFD monitorization integrated in core OVN. * DHCP Native distributed implementation. Replaces the conventional Neutron DHCP agent. Note that the native implementation does not yet support DNS features. * DPDK OVN and ovn mechanism driver may be used with OVS using either the Linux kernel datapath or the DPDK datapath. * Trunk driver Uses OVN's functionality of parent port and port tagging to support trunk service plugin. One has to enable the 'trunk' service plugin in neutron configuration files to use this feature. * VLAN tenant networks The ovn driver does support VLAN tenant networks when used with OVN version 2.11 (or higher). * DNS Native implementation. Since the version 2.8 OVN contains a built-in DNS implementation. The following Neutron API extensions are supported with OVN: +----------------------------------+---------------------------+ | Extension Name | Extension Alias | +==================================+===========================+ | Allowed Address Pairs | allowed-address-pairs | +----------------------------------+---------------------------+ | Auto Allocated Topology Services | auto-allocated-topology | +----------------------------------+---------------------------+ | Availability Zone | availability_zone | +----------------------------------+---------------------------+ | Default Subnetpools | default-subnetpools | +----------------------------------+---------------------------+ | Multi Provider Network | multi-provider | +----------------------------------+---------------------------+ | Network IP Availability | network-ip-availability | +----------------------------------+---------------------------+ | Neutron external network | external-net | +----------------------------------+---------------------------+ | Neutron Extra DHCP opts | extra_dhcp_opt | +----------------------------------+---------------------------+ | Neutron Extra Route | extraroute | +----------------------------------+---------------------------+ | Neutron L3 external gateway | ext-gw-mode | +----------------------------------+---------------------------+ | Neutron L3 Router | router | +----------------------------------+---------------------------+ | Network MTU | net-mtu | +----------------------------------+---------------------------+ | Port Binding | binding | +----------------------------------+---------------------------+ | Port Security | port-security | +----------------------------------+---------------------------+ | Provider Network | provider | +----------------------------------+---------------------------+ | Quality of Service | qos | +----------------------------------+---------------------------+ | Quota management support | quotas | +----------------------------------+---------------------------+ | RBAC Policies | rbac-policies | +----------------------------------+---------------------------+ | Resource revision numbers | standard-attr-revisions | +----------------------------------+---------------------------+ | security-group | security-group | +----------------------------------+---------------------------+ | standard-attr-description | standard-attr-description | +----------------------------------+---------------------------+ | Subnet Allocation | subnet_allocation | +----------------------------------+---------------------------+ | Tag support | standard-attr-tag | +----------------------------------+---------------------------+ | Time Stamp Fields | standard-attr-timestamp | +----------------------------------+---------------------------+ | Domain Name System (DNS) | dns_integration | +----------------------------------+---------------------------+ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.115043 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/0000755000175000017500000000000000000000000022546 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-east-west-2.png0000644000175000017500000026236000000000000026140 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw$Yvo 2(o8D?ȕV:+ZR+Qgsg宖(R!3؞= ?$L ~.D,Tq{OmF!B!GQBg&ea6mi(QBׄB,l6K&!S,,ᶺvx:E,o\鎡T Ltu3` RTUp8|M!"B.VWWb@9 E2Χ3Jͭ:{+JͪTݽ\cbJBUTB!N( 4uL" XM%F 5+Ǻi$l('BqI!G4(Pa`FӶ躎8NnwA<gee@ ^(;K[ ˪Llo`6Nql!'B|>O*"JQ*:OQ\.~@ #K`6^=n}d+g3ꗧ`p5l122gBF@C!C*bccBc(8 l0z<q4342)اlFxW @OO5hYB2oi5UUq88NL( 4 ETU%T&k7\lP U&kx Y/.If>0hq tNT*LNNb&dL&CXx|/R%'G23Bx ⸑Bh4Pˢ`0XYo63kk[*F}**c*=s/-m_>LUONOO*5MrAYR i6^Á[)gm$ J!N)BH$X^^v3<<4xO>!͔whT4ZV|>ޞbVU?Ϗ6JnGưma1c>g1J]\NOXL:M*k:~B{h!~2 /_bYnJ_D63ה-ٷQ6Z[˗I8;*iy_s:?@PQ U~+BcI !O[iDfxvc)hd\_΂djl-͗{>>bee2j댌rBq,}$Cz!>,TW!T ӛ tdؿhO}|RYh킌ŵ\ߝox^E0 fggd2;?BcA !'[CϪJ( [(?d n9 hjW%S/=PnH}S^w.FGG4 ˲X\\l87B'BbGI4 ?OX]vٍ%SoW2d'F}ZgpvK#,]c||l,,,t<_B#B쓭9$t>ϓ%vTnKZi/rT͔8QURD,y!GJ !8 ,kKvh~yADs}\*Jq:@90 t5> ÆCrJ3^xx[}dBh!>zm$ ܛ7@7'?F%S;t!:/ZM G`iif)BE !mJ*R)m3/d醍dF&{)_hu%3(3GmWJ;;!GI?B̶m8h0*x= %RZiJ}B,|y G**wW<ՁB#'BQ.cuu|>_yvˣ!) >Lrͨ_cT˃7A>wWk㬭5h!"B:h2Q륧T`ܛoM扁̈4IzTC[uLD WTy˲0 EQE!"BeY,--Nr^b=<%ؗ^bG6cJ5TUUH!2 ===\W!a@C!:dYsssR@ b6_N')l893viMv|I&V2E.;.%#0ȨSBѡ%<-O? p*Ȩ۹. ԮnH nYfwy@p7 *h]2Uk54H226!ǃBсJT$D?YeT/Ъdj;۹]T[ ]f]eQ^Ad4)!hX,F #Ng|LTozorkf3}}VI.e똶]psS`ZwdfkܚeC> W5?Ǚ^t~9e)|a=O }R}˭vٍXeP+RD!ġB|zy>`!(uw[TcT,Sk~y=oA5UQ';˕0,AUQ_z^ST\Ӡ`XtOA PO73s9 `ee+bI!-$IBPilv1BԎ?\NM?O+KԿ핪JMjuo =''Z';kygY |Ss,,,d2|Q!A)!h4MJY @4xL2 JE› 4l¡( ty CMjy#!384 EQx 38RS*r4UaxVRœen7# !8LB&n^גI z}g7>~7sSɋ|1?Ƿ~L}]H|)!h&d+w}qh&7ԭϴrr \2@5l`aa;Bq8tJ! 0.H{yP}Zg3e7_nh vZwWNv9 4+w2̭Bh!D^~cYf&#T m> tAE[zϽL/wR2U@˛+f}J!ġ@C!p:=wT d] Ү'⫷)g+_44MjQB!BфfzdWxΙp=f|lhre+HJ !8|rB~?XT*EOO!@(={-kokvddz$vduBn7xYk[tDgdMNq*lqstuuU?J !8\B@X$-OPw}|fiήT?o7gfGڵxLTݦرܬSiټ3lEׯ\y z~[sY)\ap}=E[.xm49#~*W+fg7q~eP`(BU9|2?m%Ģ^>??~aYdn/-r6Ss_,/arRO8!Ԭx|hnòH|ٮHl+aY29V7Xݨ 2TUիLNNm۸\.%!8B*%d!`6c6'֬P4KN3:09>crɋg<^Y`0+ðlwH X[Y Y1.ߠ?Z]|_?9@`-.BjWx>nTE__R=4E%՜yѴJX*8^y=r* (kεO&G$؀M@?1"0 ,M!B ~4LBc%:^|[~0w8,k4v2HXt**ft{6@(<]]맧ܜBP[}' BiǷ?+#hOnyoj EJs lkk^(d=n)=}}.^ kkk;h S,WO5O"`)3Q-'m۬dcDsY"/Wq뎆?X]ϟvtlEQ X (wFB\.nݺR.ˊCk\բFR6˕##ќK篴|M}G0\0+טv:Jvԩf~ty>BB"/wNęO)FGk+q;/{ 깋(|3ǃՕl 7ʶm= ^ZDQ"/ B7BCg-ѡ%cv;:/ck3*w wAPP(0hyKj|{%Q3y^dlmll;KՁA2~Jz!h!ݽ%~2;\a* :xlH(K;KP(JO7J<Ӓ=ݨdAE93Zx)eN[ً,ī}\+H', |~n 5)v8٣e^Z$rs}pY!N 4A٫iVp  bJ%RNOmoJ>KɔM8](ՅiX8)rsž~6dcm7JhCvul!8N$BXߜ{)²m&. 6--˥ C6rQ((w_^,zTMtP2}7vD-jjD |q&&&HRضMt<-OVLw=f> 21 1 >BD !ĮfrDlvXD!WO54&k>@˹^JJ= IDATDXZZ"ɐL&o7?&uz- Ew@"8%S 7j>iO)6t`2V5P/_I&%MEWU饫ܜEtd>&Ñ몃 h6K4xC\g؟ D;jC!aOyh;J(`6Qc-q8  ">Rg>YEQX^^f}}Bݍ`+.H$BOO\.2 kkkD"y!/^kdFѡ3 M]d.x%%cG99s14ibbRDT0t:nۍ@4J\bXHUU0GL(  &C|ctu :Fδ,B=H !4yXuy/5\e*OIEc,f4P ذ̶m857ܺ g-7 l6K>GQJ]q:|>~?n۶rdYdB.ܿh4ZT})̪C7 3>>$LX,Vb(Z 4MCQJr۶m˲0Mn`0H0D9%"8 4mswGG?m;o] B $ugX'dŎT/& m3XE<'a('Vn-¶mJR%cPv8H$fepp]׉b̰00ieDkddRX (vݼxz>ߛ<[3Ln;!qI/w\c.41oR*K QbH>X,VSlhme7:\.\|N'g{z;v(:4擉볥3(U>B$z|L`t&҉MCN8Վw;&(U7x BB;A]qrH!!0,TxDt?WOKR>|@*%W(`I 6Fq t(I -g-3U<٧æ\ J9S9Y3|-[PaO4t]tzzOP ],4ۡi\GSvNr-xbf_EQ9n|~):DU>ۍM:ee#Fx2ERB*~H}."PePqHgĴ,6bqV7Fcd9 *8&p0H_~u Ig:Dq@?#C{HqS)fXXY}#B~à >BYfEQ.]GԶsmk4]dA&O ADzmKqze6iR* ]'Ņӓt&F{݆C*c w  sl.O2!O%4nKgOۏQ:gEc;)B璘ԆC]񎶝ϞGzW\R94kkΡu]g&Ƥ?1HюUUa|xdyRI3iSI|MР* ?ũ&}TF8ݷoCY\fvq|݄q$#eHHqضyƴuƆp˸4-+tp}4]q880 a( .Sn|z,KkXu~o]DuùqtR,3 ,m0S%tTnc8m+PoC!Gb'ӳ,,rgGLiqcVjina2Yn?|]{U..D6A\t>/O T^Kebn/gjs<޽vY:. 4D*;S~]IƇe`=gzaF2Sqؠe151ى>J6goԼ>2 S2,!)߾[st:r G[^و'y \Raɭih8Fgo]<ȵSGvn\;Hҕ׺!>~]J=iwU=\K;gm7?#Y~é%33hUo |fѓJk|U0 4F<ǟߩr9Ig3sA<3z}ƻW/1ܿo>Op#< dē)>}=cTaݟabnsFW$h [^-hn\:_Z(yގQ>@_ܭ( o_HOwGp8̿v_:k΍w_z_<{SuE%_~>5j4ߏmVki#<}g"(5es+ՓGr&pTEʀT04UdSL$bdԩR(Igk{mm5/v>/;O*P_<{+;Wq4{"9x6b &Gd$Flbde;>³{uյ<^_#vG;ދLvExgd UQ|rRO7 =I[/lcPӉ<󳹙t`O䏊}p:K%l&H9:$8 ѪFtVLY)ytFв ~`вm5iOH8hm~serY「(D=Dz,|۶* yţU6se`f8ßݿ1]JɅ~/U?^_e5];7>G4yJ(* q2>?k4oLO!Uu|g?]}B2eQrF(I$8 ѪHhCJw|4lg;n2;ݲgOv4ymmc[MQ(׫t&WqsK+2L Ood_"9Ʋ" \GB!.'~P*gKcJkl}tc6AnL`xT`LFZOxsϟLw/d*He +1;^:iu#تƍ (;vSOrqmfٜ drՁK|~߶g,.qW:Oўe[mlԆ*,$|03;CLvEw1,o:ֲZidf_Vx2)Ȱ,tUEQlf!t{}ld3J<jƥ뤋 ¡iìd 8_'r,qp4<2K4'OO2b$8 ՑOZ`Xhyf``5|>X2/U(ngIe({.**><\|Gɶm`-;O7g*}86ИǸ7aYf˩Bo( |ǩiE<66 lezv$8R ~- F0mh4k5Bm#( W7:IfۙBMM*ET\_$6Ke}#o}Ry}6g6^y>33؎c @oz/#}%y;Zn#NahN*$8AӶ4|C' tjMj6j[iOAd\Lkko 8~l^-tw6^ 8d^rz̪kP4,7j~[[}7W"U?KF8l>yFP@UU&|Ǔ(5FC 7h^kB@zi0Ai:HCs=]a޹r=yeO7E5Ķ ":%Sr/l7|w=8qs4|B/(nf7M(u\!thc !lWG IDATEP*k`6ru0Y@鱑8e!8$8Ba&w<~V!vIc@>!8 {KY!vOc@>!8wھ* _ΐ+8=:R fLMy=*݉ڪu"1 DHԤ`w?ce}_ڗq: C|履9jdq9u 4ڪxHqLHw4Mš tL 4nox6.-P,vKk\o|f+rmB65XLcHy%M&R|Sx0( x)  xt& 3+*vy=24lb60JX5"ўDcHA&PN 4#Gw2Bc#(].>Qډo]<;m͆E¡o{a*4uut'&@gb[;, ;%"J~.Y[Y2<~n4c̼̋?~~'.`` j\v؉Ww9UWݫ&{0 F̃n/$n&܎x|*?0@gp)CX.0;׽nԌry`vyp<^[# g ζsGe<N7.taB3? ƘCs>})v`j~W/# woY50h3@ԛ"2¶c ,f>XQmXa<[y&)a%8\[a>BR.>s;}lV |%*zcyqUx*(M}DjcVױ47eڷȄӘG:ԖG=by<dp(s$Ʉ6[32² M>KWpcqFR˂Q`6Sb%c U|Ct&LRMraz /L&L&"- 4rTs)A)B,}NQNߌ @&/ fJjjVЯvt:>aX,8N8NnhyH)1qxѱMsu ncD;9VV+I1 di2`t:!r\DQ$rVlEZud2r`dJ?S.$zVu f3v;\.̴ȹjfQ<|!Ml6l6N~~nt('&8C`ʋJj6iM\i(( Vh6G2(BeȲké;n7|>=}!vllzI IH ٌ@ 6PCcdŧ+l6RMt:t:jHK ǃ@ =$R.J2qUx<nᄔmҘZ۽4h <Nk$hlcqjB.,_saZOϦ\0N}#N039os=_SՋyTG9 vrrX,b`Q(e`, {]'24MidE!2[QVc\.rL,1\nL&s$^JNrLOLԅ*2Rb?buudr hPJEUUTUe# G4yEb1bCpSQzBE}b<0}zbZ%2WO7(˗r ^ݔyS2aF8C;au œ2T*XB4 Meٻ>A ˡQF4ʃd2XYYs;mA$IX,z&fdBTBP@R!:m9lZwZ-bl6SB!,..R{BTUŋ/>o~?fggQTL&HjsZ Cx}:҈fC8FR4MC$A4E>G"a6133!"IVt8$~aV7L!CgsCdr7ovT*!L0 A0%.":B1== UU;t]0 )΂aNSs\.T]d2V[nQ 0 p:v$A;bffJDBr6 v\;,--WF1C]iϟ228˲P$)nll  ! E`0`ۡ( &.xOY^iD"ZeHR$7- ^Rvϟ)TV dwg<#  !"L/,]i4 ,,, H@4m{Z 6 ׯ_'fH$h4`۱ ei r~?\.?Wnw}}`ܤ!J e^̅gG<迗^8 dNOOcjj rDO2 r(JXXX&:8jE<'Ngg*Q`d۷CYS\3g/+k( azX4`nġx/yܼyJ ÐrW^ŋX]]"0J'GxA_bx>طiA6u\O*u|XXX I_W|UR) ߺri( .Tj4H$x%VWWw)xjZ r7l6#B!anedYXVrN:n!!QN^%I~DQD"d2$ <|npNlx|WlAhhRZ ㈱#2dYnIBbzzzW˲|pݨVH$ᣯgϞfoܔ54(BE>G^j(ַj i<Ń:vw h7oD"޵JҗA7G l6<]MzQϟ?jE049łEΒzEEQH~ e@I 0eTpKRPb`%FCCZ fggIcNrRVUU40( ( "XZotnf#9x4q˅zl6t:M TUkDr@ ʑ8HIL&Cy>|'^IP.au߽L&L&Ȳjafff$7;|3fffdvoxo28 as ݞHxoeIs( 墢?0 t:M*B(Bχ@ @bC?( H$xX 6L='t^zУ_nÙ!y IDATzid.:jhPN t:~w}8AZly쩩)ne$ ,Ka }s)@8C"@\h^yL&FfL& RbQ_BUUt:mZ-HD D*54(BUU<= ۈ vzUbA^f(j5yrCr92IOx^J䚦OgW!v(h%UzHg;aVW^E^G" PFY,\.(9;̱=]Uů?s0fq|>RF7Pohj4qp&놿eyW`n6;[SN^*r\.C}~t3^`0`2HRRBšA4_螕W5R/ɰ~:dYFXD>-,a$݊*4^0ؖEA&G&2p9N:eXZ5FXB\A\,wPo6Qo6.l|$p̎";dYFVC N? CZ?N=<BZ8_M ZcaHުTzQzaPCcLWwAlӽ+, ua'‘7Pw0nH yl6l39QbF#'$<_y+6iA5]mt:PU/[ḧp2u?AUB9K1+:=*m}]QeuX,B`08&TRC4rylh٭VLgddU:RN,>kORhg裡7Oԩ$zL>S54ƔqU|4R4}ܓA>'\.ױ#QwUq\# e%`613=p}di!ɢ pV' $ RiߞCㄪ(}7Δz$)W<(Rw54ƔqU|z7Y]u:Ҁm'l9 g666!v ,//nxt{Sհ9Q({ZGpj 4| pu>sMN/^#zmm &i7쪪bee1%X˒qrpVvHfY|c,#eaF8*yA 1e\.V*Q8#Il5KO4qKrŋB}(d2]|W\sLٗ~UfidDQd2צ&zG2hXYYO' 0#0 C"4MC>(xI,;ts^d3>+8+Yd24 \g씇iVT*}Pa0;; $Hs>MӐN GZfVelmm!D&*2>}cC8& x-( VVVv1;;;6a4nLTj&p f`"޾}ۧϚ&DQn:Vejj%c8+>Ap5<Oj5 >rC44 T]y0???1!_ʕ+`/.$ R)nx^8s)r|>gwraF1?(KZ"J{14@.;o}sܾ}a0D)9ׄ_Z:~8 sq`,c^[Cހ>]0pkʨx"f9ZQ"7$Y4b 00 `  =C&E9jҦꫯW_!BUU|G~to /~><`0òrIUU|\$b`qq[XXWǣ? sMDZq-QDQv9___&fffpݾ/_tGw9lV|P֠80.ԋp ;=VF?@ iq$afC"/ Id<< žW;t: w,{*4Mý{`XfcmӧEXڋ^sikLOOcyyxkkkkɟЊ_d^I'sٌW" Ms~Q5 MQ}`myL{=#٣sq~G0k`TΟrt LS1i?_E=O} ^r> X ln. n"8>3͑r)fO>EZ X\\D$<֭[~$** XB> saGGbJ@|;@P(9t:W_p:pmp;p;0M@!==:'1Oj?vO{1LvD nJV|ߵbo`Y_~%޼yׯ=' / Z׿5V+<6Yl6hv; ‹9gϘ+1 5}709( 2 2yd4etG~Ξsk g@A dO~'?-D(.O< ZXXÇ6RV!I,n70䁛NUU_#my?a\E-O?6h|/_\ </ZhVoYjncP |kģY;z ~^k༜?g9& jh!"&PTUih/ îス0F&S{ʐ$ &oCjX~q\w{c|8h΋"oR ~)>=k|?A²  kt.xz -}n$v ,^>IACtwky?;7>̽c޽??ƃtDݼv;z5QnƕE1L$o<(*ۋix.~Ͽ~kXw Hhcu/$Djs5P,d2g95k`Ƙ2O?( xɓ'5~ɓ'{GX컶 YtgAf3VVV** d_A=zQq};iJrw:G?"IzϞ=Ï~#G?"k`a ߜkgη~EvOK|9V(0Dg}u}w~ߺ{{Cy8z jP9h 4A>IN!aY6)/\;lq]|7o"L9BB Ν;C' ͛7t:c Ce~,//jL ?ӧOgA\~^w%IB*?#%r98)ɋ/--ÍFF<>[~sr?L"/͠p9pplY-c'k駟'Od2 `3???T~y]_]]06PՐL&}i }aiak@w(͆gϞٳgO>dh.ƨ׀50PCc ?Cr?5ۇ<籶F6>o@u{mZo~Pm2Ha{n1e0sggGza ߜ///k\9pDVGVGfz ve`1q8`a#;{? [M g@h USNDVϿ|` >)7)o|vD9!.h_|Z}{NN~RV v9˲w?9X'wޡ풍8?sܺu͜1y ~N |8l^Aه'cJEc*FSؒ.AP(gؒPQQQ7y\rS46$Y>Q7Q5'Kz?Oar\gN|;LΪdPŷð0Ӥ(hI~`0j6eGY9i˿zǭf3N;{r'%au3L>eaDDPjh!T=:4J*TF <>/zDLtnxӱ tp(15 xz`\4MCRERZ27nyK?B 1*E$<| * D&T6ٙX\ , ژt4MWodG=Vo_?GwnC 54ΆzS/=nz(cjXOE|t&=R(XxL(UhJ4MC,4E̶r0o}Fty| DՉJ7ȝx}M{7P&jh)SUr*SQœ}F%`a3. ,: uՇR7 ERO_#!r$2Yx }Wd _=D)7?5J7ʵ:nl&npP6ϦP.,@4ȝ6CU~MrPٱVL@>\LS;=TUECލ4e7OH(.3GϰNBY4'Rxb;w.ڟކ|5‘?,}R.6t:PwzN߱z6m*?`yn,ˠjU%neY,Ţ#xtw^DžerP.S;=0g. :^U\|n>ub1񝛴BZvLѕoh.g1pUz r}^|T9p fvmJP(ǀc Uz rϿ~ ep MU5 ߼7p 2YPCc JBP 9a=qTR(BB=E4E4RA07HP( 54( |PUEߵ{dzX rЛ?qnTQ(=U[)yn̑_CP&Mޏp-zRq6]BBB۫S^v=d?a C%`_ӝE 0 xðLv:T %P-5h<`AF FXN@ ۆ7]F'·R#gy Bn}w`Y·GULnt;M#Բ0sge9T2_`]')\C ҮBiWݎÝh24DܶMljj @0 ,&QxMIj9P7?+>h<plIvż󽈒3&_*O9; =(ǃ7PMT7}اo ͟⾂<з|#CLT3d1 ?vSvB#ayo1`X|E_A`yQN~ &7Ho3(g+oTޞ8#,314M~HJoOlD6Ko U~(xmG2حVrtG~jyN r,pRthΒTFG*d25yfg щЍ`tY'JB0000hWu`,`9#WrnGyZo6354 ]#*0 Qo@Rtr h]4K+@n SW-(l|vcY,ῇUndl37 t_44YؼWa(lsO滠Z-f5rPC㌰M0hIqPl(xwJoS U~lDn{iߜ} Lf=%󽍺<.'"V665+=Q:]4vDGP=uȫ ]oq )1H C}F/:~dm;‘\,q}^l$t.dC*rHx\(@MS&BQŷ VvMӐ  ewjY^[C [KWd eHw<3h.8Cf|(V;JBlܶY-*}` 8ûxOdh|tF5͊~4DŽBēi5SJ3v(h2TUEBiqu;LF#4M(Iplg3p 22TUF*CGAwg0öJc!;(I:W@}Y[ePX-fLyȽmyOih%'`Zqkyt(ٯ.ރa?S(ʢ-e όxD jh1s񰵤6ҹ<]ϣJB rG`>"F\T#ɢP sZ`'P*2B@tUo~?MM2{wá fkPUut}GP.|}t:πʹ_Z_a =kby>o;J=BP&x侲,CS54΁ []ϡJBWqdn,΃ٮ z;,HasXޝq_vf`HXHH$%QdK(eŲĶTwoޭ[TwԳk?JlYmɲvYHJ$.;@l 03h$8*_-W]%E@`R[T23I ˊ 3afwd}Aqj;0Ƌy>/{h*eY1ξ~d[LP+g^E/! cht.z+4A Gȴ?Ka$[Ogo?f Sdm6axlZ y<σank%cj%/82Bf(fM94o)<$c:KԷv OM>rhDcBMߴbq4wK\aqBw*wgdR ]f`)t'ǖmE >ܭ%ɦoq7Z|*2dѤty0#TGӪ~a[o L?24ξxfQr&(X' 袷XV3H-ejc"FKy:e2..&P KpÎ;nxGπfaȬbNDPbM";>i7F.t,3a˶0͍Nl8N"J[O/5FX2 tVWLq<^0]5. nO- d c F.njÔ? W(YdiflV :;pNthJ 14:x<ƶNu"C\* p)bS+$%T9iOb2btR6 "),6Wy)fk;f63C>F7*zfD"ѽ{2 _!VI^NGí+p55pvdI:SUz/aaYNՀ:֭Y\+&y\og'Q ; 04:mǗtpUlΝLLNzFN'm5W܆̿o)vW0cbr MDeqd*YޡD#<>\nh/n20Eo 5ut>,AX$=4 9ʤ^{*hWer{z9k]Q5Rv‘.^k7A򱣨@}w"Dl h/;7T==7-;u$ zxImB.kh-:pOPwX t2'Y3h}d 3>2)쮄袷Fb8.^O"gKP1H5/G7f*LrQz׊ hI*uV)qj2F.7"I6fT A#0d25"+'.!2쩀^Kk>2ƶ0^LS%?@SGlpɢ C$%Z-./^5M/us*wSoB`ȆNAMyt Fq{p^d2ͨ@I%h.0 vmG~NV #R(H^nu$U<ٖ"pa"(:dDz(ٖg Y]$ncr֨Zݴja5`bֆ,ˢm`Y*ƸBnsܺ<,dÖeEBkؘˍLD2XzD#8ƜhhkzZa2x%NC8]m%JlyԣFb8:{q{ОWQU4]jIJ%ﮠ!tjӭ̙F*.֐ #G a&w at rQРc-]*4 *9΂BN u Ig `d܉!\o aP]Ed]QNZ:{0I:0 2 zlFD7&`(aG ~'PUVl3mfB;12i003`4l4@QSKHBp{'0tcA$Z%T3(HGc.7;\,Ia6`4dC!W\OBR,Oaˍɤ)RMkՂbe4nI,aV)V*!J $8CZXh p@pwJۋ ;J4 vo9=I8J  G>BA,G4G$E D >Ք۠t睜#m[l4z9Yeg0tG!B!dսCcNB!UG!B!dQA!BYuhB!BV%B!U'IuBH Gt`ht ãNN"  :4B6NA)Cޗ(jAՌl ȝDBY6tQDH4jjG)vmDBn&D12}t nB"hC#dX{:-r,fZ-ЪUܣ8_wMyDn4*T{a6RYmطY;p]=ő1 (-FŎ,*^ oo|t>ǘC KJv'dP8)?N 9 |.DZ8_c#%K܊~@r[eÁd)2טˍٍ1WD;FK~oQ1=ec{1lyi))>2[]=hA^7D'Ӕy|prL}R(!* O/lUD1B.÷~ JS%?6$gY{ʶ,.oQy#V3^JQt[[0ƏktXţ ( $dpz_~2GCR%Àc/F1ٵO<L)P8^>p$"8>Iat[O(?sڇsf~)lIad%^E<΋x<FQ[o!)Me7E0!]xᛜB&Tȡi-7{+v"lJuHoʏ~-IOڏ}鬃p$?7 +)}Z5M5]+q q'<>DfMu$L#˰}[{_# x(HJ4٥Fw 8_yRZ}/mhį?T6j#"ӠOu([#)x_.OeFLt}WRkohgRJȱ=T):ǟ of}8R;QmYhͭxWoaH$c8T]6X<ۨkPҒJ4:Mk?qs{+vۿ%kWT'*^C MoIB1 eSs/ ISUQ-?őm9hn_IJ(RLy7ZfE&7#Z~ހXPdMatT LZ׾h ڜ5ůŅQVGNfݰJ`|`n=Bb֤O867ǘǯzGߦp%f_ ˤ,|||*~GⱞuZdpKj "G> 7_F}K߆JHqtW040oh濿}Qܿo \|SSoϏ_&w n6XrTʄPxy&0־@C -a_Îm lj~vFݳs;IqT[;ԝFB~/$C"(XE`o UO2eb!_4)r7Cefw?Dπ=Qm.dH%3d7>l7%G! -7қy#=Y[_>}0o|xvWѧ3jv j)KhR涳Ncm=~r9 |G5Pbp$BI-mN~Xx&3Qd6#+#Q0k3Hjkjk2VyUp̨k)hkD#]fD/=kU 3luh !N;FZ(HN<^28yh_#"me <,Ci(/-NqDtrJ|s MY[Kh&qՎRq(!<8p꾯2{vJS$eEP)-jJqD%)D==)nUC."hR{AkR !$qq[(HnD1B.îGDI'2%q} m赜"c!dA; =FQ"3ʶC*}3!fȷvݦ:ggC&0BH]ooj*lnhHOq׬̚Bv|T Q+AǨЖH!̒iV#ZD#0~:bX O%J(clF{,Fa3ũFp;(V3ҨTffdz;(|n8L{glݨfhN S *%,D)_w* @x.k9 5 IDATAa\?8ˆ/\k<&T*u#).\\37 f!tw,hkײݨFGPv(H1[|c1S#H)M8|-F{ކ2c$R (t8yxW~?#)L;IT8e8{?J_߾}&ͯ {3,89~ e`!\{ x4  nD#A'oAⷑXc..c?3Ϛʅo<ƣȔ6_Q; ,.nT;#\FQomPflC`]ϡsi\Ls\Q|𯰭1P/rZ>cdxu8amD.v #o(ڟAhjRqcWeB_\m% G"I5*O&i^D75^ƠkkﴝH{i@(Ia$%)0;s6SۆF;Ӯy̿ ]>=>:}o!Kc8:Peb{M !3DC/!MR A0\3 N"ΜcS}۲`MӅ?g.c1%K(8H4J7 HLqTd#vV|"m,@[vv#gPB-q #3 )ybQ?8FvlV-㾊h &Y)&}F@Vm+IY27'r(h%<ί*v0B%d8\zX^jYx+| #EBȊ z]!#Vˆҏ;B0"B%dːJ$xG|q} \ Z rq)= /px-),␢+r0[.w;Y;T@z޵?/\ nУ0r~0R RL/ĥӨ~ fB^ŷy ?Dc1`9!A(/8&0׍  :bC*gY_qd[L)Erz2<a:*>j2O^xBZY67K!SVe^5HOz׎TBȚDlI'`[ rLO A\-4!БxaȤ_@!sD"x^nLMMC{QуێQViM0 wӏ ̰Ć,B*BP@RARAH<##?hx<>C+7x?W !H`4a0`0`4a4SW:讂l nCN(=(sއaL&磨eee(!dhjjBGG<ϊ_P2<=Q?>  QYY;vBoo/ߏQ1H~3W4C4gj?b`B6J4Ȇt:؈; A0>>qܸq2 ;wDUU@4~?^FuP(6A.cΝ8prss5V466O\߷V~7VEee%*++&^QA61={K)R,x<4ܽRHhllDff&N8J0S!$B!\t D"+~0P(b*HqA,xܺukEI\|/_lQUUE( Ç~[n-`0 V FBR R LD4099 +g"FyH].^u\xOFiiN" .{a^FR \kA$\. qP*bAy^ldz߫ƣ> χ .~>!7$3 2 ǁeY1 ÈF%## -;w||o>Ss19s;vO> $N( Ƶk.8aT~۷oGgg'jkk Bldgg[H>+W͛j9#J|p8 8p> I\z׮]C(fL&˲ػw/z{{rƛo~B|0/`Yb;F144W{#GPSS^\rEVV`ttP,bÁ!B!l߾ǎoP[[ #, ǃN}}}Bb@*4s-dd2l6,aٳ8vocxxW\Ass3\(JD"addpߗ8b1A,r=zhhhaX&cddd5jwV( i/ ⭷JJ2AAA ,XV\.v;pqO CCCs_'OT?X&:nsf3f3^/qF;v GѣGڊŋhoo_#"''G0t:zQSSN]zFHQAҚ qFҹ%硆B! cddDv{>Hzn4R`@8qD__~?_)dffb & b$dĵk5n(**BV[<ǭ[cak׮Eo#G%g{lFII F#FFFt://tp\Ν;a68!)֒x^q 1(1Mk9YEUU4͢bGטq'sJal6chhN<ϣhoo˗166Nw ˑ<1!dYeeebp:s( i "&''Dobv!Hs |>J8e*##ͅ@oo/`0 JaF'$I@"RfǛA\cc#%9T%oAj]tp8  V+hD,D"0LbmlB]U(0Ya`X+\q171Yyd0b1dgg#??^w+FIk 5~ (//JBII 6bH$8p<Vh4Z|Byy9 (FGG<~CP@?q?~<^+W`hh6mގ<ϣ'JNI' <ϣ l(..hD8͛7q***pqϟn`={y'NhDNNFGG_zG3<ǃK.۳o}>Z[[-HGG"I(//`@NNv;v;4 t: PYYYLg<<<,FQvҥKسg=#G}_L!##. oߞs-!z&###ضmrss166??NN;wh4 .n@aa!L&SRb}]={ӧq)֢ʂdiwkX,&l6磨8uN'z{{qa8px8<|>f3ۗ ED`0cǎƍ|2]&~]]]t!i ֖y}}}0&MMMV-~AAAϐJ(,,D~~>ZZZP__;vѣ8u}]8NLNNe~&m0I~?ZZZV/[;xx!;;&IalƠVsNapp.\ŋw^nb1HRx^ /ێF;+p8``4с~Z|N4™3g0::*n򗟟4DZm6y#EEE(((puܹǎo( imB"̫.`0 ^Tq@ Nb ;;:nٍS䈻¾` Bu! L!JP kKTA&e%̬_p:J*\q\}}=˲D"='h4KrV4۷,<i J8GGGGׇR*aTsc,N( imnP(yAAՂtPP(. +(zŲ^`pיfh4t mT*EUUr|M8;W$ t:4   J%r aB!-b.RbDNٹ泧XqZ-$ Νqܢ%bF\ӒH,yZx<&a%$-ӔՊ~ /8IeR) r98qH$b<8b(| IDAT(,,DFFFf}n)W*Coo8-j!X n{ޢd`NlZ7 \m۶jeUmgu:82<<`x>OLX\.,R T  q_Fb57YVEaa!F#jkkԙE %dCITJ())f VԛFF畜] ,h4"77a^ t $NݻwalllmN*Z, =W*6 Y2)y``pɤl5t:#33 i ј-1^cccp\6̲,222č۹BB7 Sx$%0Rѝy~^^0BIVU08uTC$yXWyN'?oY\.444m^LسgC=$h544$JA0<#|+.H!-;;_k%cxDVV, ZȢKx'u:~ikIӰ&&&p5f@x<y߁u)O>d "#dqh P*8wX%a||P(0L0wŝp[$$0wW_}?k!ܫ"|[o1ozj,nnL&h`.VyLNNNNN.ZveY8qǏ䧴=~_̛.t:fdff.qjycccp:V*((s=Y J4Ȇrq׿5> Be222P( >yFz |>߲RI$:t'Ns=Ç/}>!l|; peDyωD"ס) hZ(Jrq!La,DDR)((#>===ȀRQ`0 Ϸd]pe abp8h4z;r 8qB=d2ߏ\ZAIkN駟~EM0 $ AEEE8|0o~Oшo~hhh'|2oDBb:(鑕DI\[f_ciP`0`0=<qyy I'h 8۷555u`Wx<.&^ݻwc߾}t~_RR3g_*>yUN!$hZ| _'Ѐ[nahhhE s7aё@UUvޝT!k-0 TUU .]Zp}6ńad2TTTk( qؽ{7v ׋6tww޷+W 776>l6l;v Oʺ'T*qa>|AOOz|ٳg{gr<yyyD"AMM 144tvv.t͛7Q^^~uJ(,,DUUʨD:P( ^ǡCp!}>5fģr;w0Faa! ٘\), ͆G'''t:x0 1QJ"@P@@B`0o) go~BR*//O eYf?~|~#G?Bss3b^z%?8ivԠD⚆b|778q:;;o2 ){ݻN?z G\:NMרLLL`bbnxŒIRT*t:hdl2dShZܾ}8|pJc/~4B*XV\|կzGT*QRRe+p`ǎ0}Nuvڅ;v>`Y@yH2 hxgr!33gã*?9g7&I HSHDĊz;^Wﵡ^+v^C7$$!$$!=پ?blz` d>sÞ3{λ/|gޙq[bwPP[o6fL&7|sT#=A}Kv$Je˖w^>g#<[w}C͟?;v4xռB2 QuD7!!=zj-3B 55TT*aaagwЁ#G\P1/b`n6N> Op衇ڵ-kȑxzzru=SӱqF;?Ι33gի͙3~;ƍl#=As#QYhY+W6{~mev|eժƯ7_~Y0 nݺ]szbp1vB}$''Lǎe7*UJϞ=>|8,[@Z_~% Ά 5jL:e˖=kϞ=t777wu|xWIMMVdggAXgϞP!S_$IuVJQ eʕz(۷ͭ^%g(5_1c4hv@ p4w۷o硇zUW`0LffuF, H [n%22Ҷx6mزe L[u o_-^5}(\#=A}!3Ξ=rxn6&ONkG0{l\]]9vK.`…̘1ÖqFf̘… k|`SNofh;*7Ej+[Y݆ QQQlذ(dY{vjje-AٖJLrr2 `̞=ٳgM87R\\ v0a/=z^M#=)))v#=ڵ*4jG !4W ,4jUj =Jos=~@МrobZٽ{7رc|޽g zMrr][p!?x嗁 ),,$**ڵ1611/m[HO\\ԩS|׼+vj <իWЦMjoooyg"ubXOm޼Bg"4 "ZUg\Ȳ̢E8z("f<<<6lVr)A Up 777bccY` ,p9@P 6U;"}JКBC*?;>,7nt9@`3~xn*ҧ!4W=DzlSJΝYlwq 6G lQ; 6+W:BCps1®51x`͛ǤIr9@Q"}JZBCpsMUeڴiw}|͔8@ BƏ϶mDG U.4^|EǴi06G rШFÍ7HllM!4W=Ah|GF{1g"Z9BhԏH\X,5ٽ{7~!4gܸql߾g"4Bhj?N6mp)͂Ve|/6G RШ>%h !i-iS eʕ>"ƟjAe$IB,KJ-d'q}thH\ȯ|ÖG_Vv1WhIB_Zʁm8l Iᥗ^k}WW!Q ?3m;8W ` iRO%]WSj+vQr`SOIuh6*V2 }X-n3j)c8WLr+W2rt|9%%Uk7sQQ*] c̞ |J|QȨz KA޽?Y 4#'Nx~3)it:\J3}2:wBlp|$^'g%1otz=:WWn++M \8DY~*>m:{0cnja̙-Ԝ.UȨsnSBhZ,SǏL t:^^ս;Z//vYSp|m"3V+[V`҃"+Nd1ϐJPzY+5aG"7Sn W6tsS58HDd=}Ͽn̲_WqRPTTRI@?;0xP_&I"@pd vz4Z$vQ!ϱx1[x68T>w9-~ LFX?p>; ]:F1l&MNQuP-d{MV}?Et溾XkW/|1sW`1(8qS 1KQ\q @v=Py:5Vba lY<*,R{~\=.VPU+z7mbȑKK>sEM{4xbIaa0GZ]nY&#y{{}yOx HPб.(.{ſlw?^=xYp]gK 4É7> ڠN\ݪ Y/$i x֘2UWF=JEYhB~#DIQXr+p>^/kcz:]6}_vU3 @+ ƈ IPl# / *u,˸u¿xCQ#!hQ{l 5\+p?I:p~Mk~Q\%ڃBJE@۶vٮa4厙$&7MAe"b65wshxFu[xzVipCtWdYhq5 IDAT8&0w6|e6@`OzF&c܋wԞ+%JaERBK~\\uxbPRI¿pۈv.[.muC*#5t5PdTZ2d{n~ؗCA_Ԫ׀d+>Rg3f39iiٴ4~ PdY`r^Q3 ~߄ӧޞJ7 ?6ǞuAy_/cp4J8(. '5=t= #'rCb?곬lbvVPW}Q[]X|Owg$ϩ>\!h1'2N"UOMd[aDg ;Wju @n)6nŊk>@P'Rز}7./xgqd$94^>Xv%RrkrW/BhZz-2j\[I&2.A\8sL-`"zSå<]ˠ!c;@P?6m Yp){~,YvrZā$QQΚyg)A Er(&nQHhhHoL;v8o'OFi1|CtALKh2J峏_ v,&#%y-2+SQ)D$I(29g!4- L /7媤#G8o*C-oȳORʔ8w~ R')>J.M"2Z5VdH̹R#I'3ՋյK68H:MRR&Aע*زFN O6W*J<"B4I§mGj# VIbms|&jBC" MrU),$!5whNII1fET58rggoD]vAQZ-6;V?yѿWYQň,cJvJL 9>\!hP*Dd^ÑW\oqq Ij^GzW-AWB0M~ '<,u6[8s2;Qm4iR_,x7bEF22pƎ>\!p*VkX-Ie4TVe'N$((Y}V1yQ{')/o_|BڶgD2.4A8F!ٰu/ڐx!ױ##Չ?l;DRʩf۟>㇇r?!IG<6\bgcNCWVƂ?xb"uZRrշS'ܙ<|lKR$IhoAiA#BC~Μ80Q:eI"[7w5={@\$vEONߡDwwDSE%e|hBAxSG_,xh Ձ]fܾc~Z,}oV.ൌdԹ_uAl=f4m:$"2*X(>M?$s4NZJ m15_OYVK`xxwwXP+Y>v K/_/EEM'!! k'vv۳ [>39o}ܤ_cqud8_dtٞ@J8pE7fKgizNQZMw]V+ ^qf"ҷ.%;8DF5?tM3AB Av-@KFvjғX,GFR]ȨY)˜8pIb:pph/ŲCK MőipTgԸՔD&8"BsI;G_/1b̹ĕ, {+.\$~;kCDFM_L)<j0CE$I)M3ABN cƈzd爮]m;ŅJ%d*VCDd"||}3^OƹL<;7o./wj64O]v=!mp׸0k X 4:WNA搸RԵ_KGףvuu.%5PxqT>Ȏ^]"J>hš>.ĈY))*F~m"#AAxPVR…L2RRl6FEk+QۺIV\\bA*SH)W&W>i8|8I|'ꧨض#RZAR]\=*W5V)'7Ap!2*=ƒzF .u 8x ZbDCЬ:E¥4^є vHaٚ?mzuC>Qr O;~>@ hm)+)!$s$%#):x.ca$IlWC|Q([@n$a1(>c{A-2l# tV6h3AB AruǧF{DF A;$2% z+~=DHA;lS?[j]H/it߰]v"MYpS8,u*'+勾G0ڵk@ pA`;0k+% [6֞{s0~']ˆt: pQQܾJPվtq+*A"YXtI$::>.Dꔠн; wh!ɲ]T]xCYaa\Ɇslٿ={66/!6rꔋBANȒ;IqTwuR!#qjDϓw4wePrCD" F}&h]!hvM=nJlkn&]H|tBBB-KgΟիX,=6n6p$J 4~Y2wHVNڰ1Nr:EBn6?Cnv!z چ裏] Kqw(3y"ds`ޣŌ PDĬ/Ppx6_8]hayrGtp˽%7v,ISj yXfdށ6Zdl?x T*>A5FSӯIHĶ}I'P(EY=Kip(JL=چpmуn&@ hϜA9HPD/jK+%cJq/EH@ &ntn3f ;*$qˋixK#2*`}`6y&2~/͸ֿCIS-Ub&,, &ЫW&u!)t҅>}8sJ IvbTb6c2`]SY=d67߿?&Lh7:ņm{/%UʗXg0^{ gaHDoi:NiqY1Sm<==o{lu'!K*Y]"N1t)I1/>l8fBw< uJʭ-K{`؄ 6$Ib']W mpDwˇf`ǎ8T4?E".]PL"}| E)5R/l1Sj,Eoc4)6cJ.AQ+ո)lU *򳋻joT *\V{*\!Jƕ?2j;uW*z_|fa(*.O"a:ňDFgM)V+4& Z/BhZ橧wE*( $h8bcluT }ր!8QFuVb֊b$8tJ>gSCvi699[Kni.z&7 n{D&mF_[9+˦T*єw%Ǖ{J]oc ͻ!ױ+c+:"Q"C$"-9rݻ7W?Bh'ch߾33t>;Jb*#:t$X< Æ c۶mm)>0H:a|NdIN"-?LJmbRj,X٢$Ok f =e2t';pp½ w;}&@pE?~/9ihӾ'.PR{=fˊߙ4iRPZTpȰ!I46$BCp8>* (h1##jصzz^wSHOO'<<)~S(jj)j5%%̘1"F֭[ {5֎l B2rDRnkM䢧,v.x<кjQJܔnj\%$ [GoSj,}H^l(`648!K9}Gk&7((:ugvg\dΆ 8Nסvj1q!uuUޞ^{'@ti!ۨo5ԫ8z$h=!p*+VPb"LY`9~QŖ9cČ(U;zuf]:=! Y&99iB#4$tge;NǬYcԨQl޼__U<ġCz]\yI'm:ğDJ(z#dYKk;c˨CdHDFNJ߷7y9ZwS+NPO, :cDz~zZ|ڔ楲;}7{`Ae|BE[DzGλmmՆ !TJ6 dys/W+ W|>^Jig=m!-?3g8Spܲzc08uYRvV;vO6צ1!1h\4JL4IbW.Lc!|xY{t'Kܶt/Ce ")|sϡP($7-. e+-j|3Q.m u MV#2H7erTFwRgxlnSmG#)ޚrkfkRTZJtttsF3!cnMIPUE߱‚ <o߀&4[NJa62*}IOy[mMΤcwn6Ħ{v/F/VgPOz[`7:u'=}e.&Ky:Uz̗}C7o{ř$Opa'r80G3j|b"\u$*W7|B t wm@taq:rٹu-.#M1]#9r$$3gΜfA=1v +2 hf:EdqeT3|$h=!hv9?vލB`ܹhJI^_V'שڎ]ۨOeL:ৼdWbeXgt101-K'Ξee\rrѣ۷oVUgufz Dv7Zl68w$3Ӊ%+k3rxz67[mZ j`w7لdlRH>~W%t@߾}j%Ibu]~,[ SHbbِgvRf;Pm(}C+{;7#IbzgLG0#ۏ]3[$$^˞{8uT&VoUi5qqskoSƒM7O<<`woȡGy흏 '4d d=IzZ&>#Hϼu?splp(,c\c4Aa1)--!'+SI:GH?t"$ЇiӦbԩS3g۷ogq3 h[| _ke8kɇ +ӴU%2Δed|]%4* BF!IX0`rAol<IFLf YYYͺ YVU? IDATx3򇟓S>LݷӣS-j>z=;k/mvc'z8PnrJfAXV||ӮjWW;!~˲EK:g.rErǘ27WW3V+Bϟ'i_@NxGQմ֬"LH(]TMFJKtxj=hG@5 ~f׾C<]ADPD[ev*W-b>,Re&3zՂ *dO5~^hofȐ!u6F#'OFV?Ĩ1ZlKFlR,cIͫ.V  Q+s1OLBňFU3'_=~ISf*c=--m[smau w&R(((`ذaL2^xr;~Aڹ,f3w- ^NW$IƟ|um{RT\FΈdh2cX YBUA޸UtЁ)SСC;.\ڵkڝv?~c."4PH`I[])v}֭wy'wƅ(FJ,2q*)hlkݝ^y~mLpd;Χ S!CyɇlUJ!4@kO>$,Xkyd#?iw]kugy {b6B^XR馛j &2*,{]ҧ\|Wq/[\ms72peص'2f Y]F+>>>ۗ!CbYf["ݝ6h=P*}Gו12nDGGѮ];ɡ2L&,VQNΝڵkE``„ _;f'ֳ8a1'N qc FLHCgШU1[<ȖS[|r3OoP_Xky`xNnr Z&2j(bbb裏-/)*aZpQ*ШqwWeׯ @RlޱIVҶ}gB#pU"I28G{0‚ ++ .PVVFii)fWWWJ%Z"""ҥ !!5 8L.]4hX'EEE_{W~@@111\{ V+ 7pPPbQcPp؅EJLr0eZ-YYYQZZJii)& ZJ͍ """h۶-,رcٳgg"F 3hBcݺu<n22˺SrP(h߾=^{-}M(?G_XH`}}hP2HEeeٳ xwy#Sݧ+աP(puuoO>DEEL޺trssINN&++ ٌ~~~Ѷmjv39c2Y0hp#$Л?L0C6y㿴ѣGӻwo͛פʞ{XsG0,r4ɲBhԏFU#Ӷ{<ؤXNվ7E.yoӿ,'N$((oQ/\Sjj#$$O- ٬\.bdKÍ`?|= cDE9^ꫯ߈usC~~>Fť=[>oFzzB(J e\]]߿?&L}.]jBfA gFvv6\s -bȐ!$O_*_F1!1|~py P*];mMFbccٺu+:]yzJl`\́1\nnv&/B) xٽ{7K.u?Z"%%%l۶GrIЦMC߾}QokAA#F`̘1kM<]9{r;glfɽB~ZШ_9޳{k-ׯM?SMm=Lf3ӧOl6tfKO,**"..$1LX,BBBM6h4ұcG/_N>}Mj%99$0L( [Q޽kZ8q"{nXRBYBF1qDzkj歸xu˫v+xAyO/̪Uv׬V+cU?/_צNg?2x!2g@>sҟN罛ޣJI99<9Ԟy{ոncyؼy3V7ż\]]y駙;wMi 2;{΅Q"0{シ[,Z#,δݏf;xy˶9";36N -uϠP(x7\AκuxXdI_3V̰}^_e’ ]Y7}c?0 $h=xyz8c.:Y%YZ3_e=վЍ!?#/_Κ5kvW"3gd˖-$%%9۔˜9s(((O>q)!4|yz|փ^|@~V ^l7ƴnӈO6h=ޑlw>bĉ^'ņ xgk-w ?4?ɤ'1foW.=FE|3wYqqDZ-:ĠO-?Iu7;>֯_OPPPh5koMi(J~GΝˁm G Ù|rd8ն{ W>1?c1nӞuw[Íox뭷xm1 CN塇?v_o3)0Zt%fW&=wozY(VSNi~ݝ~Xjíʐ!C5kM\!p'OPՂޢqsmNkӏnͶbbڲi]디AS3`,YԩSٷoNnbaBf]YI|9Knr]M hɨ*^E?r՛s?)5R/;wr]w/лw63k,VXAZZMi|,Z٦P8)]%W~ťy#NֶG/Կw̍7Ȃ ?~-38WtDG.: ?Pd(Oq9x z+K.eРA2EŃ>{lSZ4jŋkT Q Sk쿰rq9< W+s{~o>Aә;RE$J? 77"t2Fb޼yQgT >j$:ϷmwsCX<糳yߟn}A=V\`ddd_Ǽy`ۃ- Gh1yy .*!H1%m >s<79!-yH7z?,ZN>NS8H_ϋD[#²3Ò)MDIx3آ<>,:wW_G>'| Fmmm+JJh6 WNr2Rz<=i<79i(pꇸ\}[o\_Ƈ>6Vw=68Cvv69 P(P(dCFFRSS!J~=mӉ,;v RxFA~<CcUUUaͨBRDTTƌCUx6ɜ>s0!efق;0,aaƌx7{ATBT"==zBddd )>s 4ƍԩuϼ\޽f 0`p8pdee8r(Jaذa҅?r4fQ^^7ⓂOP(ҤGݏds2GXyb%T*:vaÆ`0[f!Ȱj*| XzYuc8\# w^!ӭ[7 {qdݻfd2(JhZ1={ܝ łt > -F.37#jPfZi&\zZ2/ke p\p\Ȭo \.v]vlC'ObΝ0LK/.8`lB(::CEnDkdXlfΜ B*cǎ>r9RSS*o2)|Aŵ$r4F;@ >(JLSDW]WﭪªU`6P(9s)ƹ1#笄E|˅ݻwðX,֭~st: zݨϲ,oFwANN\qá;k<{XXH  &`۶mp\m>$76m•+Wp8pW0%m ]|X\H%wC>˲B~С4hPu AK$t :uZصk;rL>ݺuk-ΩS}vF37nĻ+8hMAaPT0Lسgf̘P A~z@&aHOO5ܗNd?'** CuCaa2UN::EtBim)/]kqbDqst5jJ%ʐ U?,L&$'';~]߽џp\X,h4~ ?wE8N#Gp3:>>1Fݨ6ar̙38vϟj\p7oFMMMEcPTݻ7 T[駟ġŵkװi&TVVC2dpB\kg2 NZ a Hp…] AxaX+W=zxjRPFFTXTWWGO\, }? 'h7Oc}p]h :APb]µ-c2wߡqqqNN'o KҰ,1o`ݻh. ""Bp"Xy\} "dggbA޽<R~^Faq\XVׯիWP(0p@ӧş)Z8?Ǐc7M 3 Xh/^K 3ŞzXՂ88N=96oތ8v\^:w:|32Lп nԣGƾ}o`ɒ%$ """w8܇DAɓ0aRXGg$>ڪ- jWPQKǗP!  .Vno5VmT'r5q'H$P*Bv/C"@CVhu^WFΝѷo_H$,"1p1=[nRԀcތc~ԫJ2LTw>.\_ԩSq,AQ.^0 #R,Ҷl>j( :tl|٥٨ekUW:JjZ01tWOi옄yr`2`ل&_ߟ:Nhn 8Dh4"--MHT(JeYvgO{HIt 0  73 E8"H a4@(ÏNLVgBx饗0w\BFxC!TaYV8˗beYV 4h ˅,;ĒKrܗ|\V0j4(  Er'5ӦKd2XVal% v V%Ѷ[T*Vw Z|hT*&GH$T*R4-c qH$B>_+q\2 Ν;cÆ a~hh8Nv8NdR,UofcA&a\8['lÄ T*R)))ZMp5}=y>%8T*afs8Dۂz=JJJ0h\.J^L&L&kVk8 zC|B@ll,r9U=Ǎa~M0lnnm@!—at:rfyu\nyT:f"kJ=_XT ʕ:X {|dsQ*4 "4eՠӀ:b4 Rn#ɾ~1yǣO8IO;g[p ~|>LU8$c.<OX˲:"J%:S&dO_W4R)-ڻ+at:!տ}nLujb| A49Xxv`9NT*͍tgM.fł*)iq ˲?V0)?fNg܇Dh[,!/h|75o4E@!_-J%8. FQ8%a1k#t媫d4[4 @-iOHOUV Ɓg58N=0´jD*=!Ghԗ:+姲ۉ) &R)z^֊''rRʲ7m#Z珜`YKh h,]/hkD"M˳, 5-cH{P)tBw3hђeS"# BjuņøՇXbk62-'r4/)$b\(Z{NqB;cSǡVEr(uNSsd2za۽Ts5D:SW=g[>)» DM}]7J$ ˲D}A"BmyØ8L&XV sζIvW:Nq3ħQ=Z(ZsPs)"eT-%u8g_7~)nc#>}_\t:E |]"A"W􈈈JWir = xdB*ʧq\1 >CTu':%JE=hԩp0/,B߈N1vLZVa .|T` 9{)^y8X,Ai+J"j uc$+q3,?3;Z|}H}'{Nr,'<{VNt_Wn!~8Ԉso D9JԽj֫NWJnO-%Y1 ftxЇF=JeWVVtƲ*:ŗd-qa<:e+H\\H?m# ݛy:EhyXvB>ėeǏy{3Gv]I١~m>4T*% x]P?aZ,62˲02 /ܞgr49Qy^q rW^<_C;Oɳ,3@ |f^fig{?JR\.l6a ?RX)*|_FL!hޕf{J8DÐV^v==p8`2}9;NNea۽EFF >xf c$ˡP(h|u.RCNexWs:0Ͱ" BDFF|y.''7lPyy9R) zpO6#h񵵵bt*\.r<ڻ0 *++)^\.Ʊp ?STTe'YrJQQQH$(//oq $NVZRNSJ –.>˲^ƱT*˲p8?NN,󁟈aƹ:px8 r49]ӉZX<(pP.qYYL&>~!' J\r111HHH@BBj3M|55hjG7v L&RNU񪭭@eYZ j GBB"""4> h}0hs</СCHLL` PU*T*Uiv+ϴV>тq^2,뗢(XjZ<38gHvɺ4WJjk]kU~9r~},Auu5QSS)e#%%v[e&P9IIIͲ,9I<܃XfUUU S]]+V`…aVCS $T7iL伩 h4bڵX`Ae 5ǎab@TBVCP48M;Ӊ۷[nai{>h"##p8PSS**fW|1Xz5}QTl;/ܹs(**̙3v``X`ل&CٌO>_Ʉ+VZTTTyx|g8w:W^y%lSLh4@II  Xs}z }rPSSL&qp\`Ykz/b߾}XhQX+HPZZ DF3qػw/T*y^(>vj àJp8 z*v܉?a>Nu:B>`QSST ^gaϞ=dXh'Br49^* zVp\h4B>*_&h48DGGC"ap'Mb۶mx~&BI$jl6p@"@tN'rrrt:+D~h zA*"&&d2 eBZ Z-ݎL1GuOʲ,4 "##& f:knq8pjjj믇9kt0Bp~FYx=n66 ֭1rHN.hsRT*QQQ!4tPT>Un\v;6n܈CbΜ9~?( ,+l3 l6L&vH$hZT&u8FYY/^7.k-{!l66l؀#Gbر~ /PQQD^R T Mׯdz> ou:L&8=6mڄ}!kP(d`g,++Cff&yV"hsJWe)\N'\.8NA4J[O>$:woуNCƱ:HOOٳ%nHc0PXX ADDp5:!WZZ͛7' {hx qqqBLN)s\8s N8_=,Y 2 Fp$O!㸺k׮mWg}:}v 2QQQ~gK.a߾}~%X=ILL믿+Wȑ#۷/:tШ?f3֯_ӧ####P^Q_p?[oŭފ|l߾friii >jjjqF̚5+,@ k6o>?,˂eYX,r8NrT*˔LIƏ1coE^^ zyӠN'0 ?!T*<ӉL۷,ˢW^ _~.]v5J%p1Lغu+Ο?" hZzp]vk׮dPȄQFaԨQعs':Á  )A|TLL УG@z}!//v, v >}z[,Ayd2PĉسgL&N':weEkkkavm},"//G?, F,X@x7Ju8r JKKQ]]"~HNNmZnRA(R)lQr2h 2LHo%\.Gn(0FU"  ADH0ydyYYYb0 Μ9#:o0PWv899* )))x7[ p h2v;,X4h4ddd`ժUu!?˗{\lf͚%ѣ-[&^TT\f͚,\~~->Yu.  `ϗ_~={BT"-- v %!9_ᅬCPn郋c˅ .] /xGm 23f̀hΝ;}D҃>+V)K̞=[e̘1HMMEtt4v ^sεڵDS׮]^$yM> Sl{`ƍx[oիoѱc_RAUt:x>8x1l0 <#ٳ'<تLT*fϞK ֮] 1cЩS'8X|9z!{`0łs9o+ W1v}, , fs`/ gpw`0`HKKkk!Br4*3g⮻n8)++ٳgѷo߀iEFFb„ 0o޼~… !.A/{pCݓO>ua6mMB=4 ^@bb":v^x7n 5EYD4r4iv؁O> Jer{78tN<F#֯_n=CXx1Ξ=;`…ԩhh_|iӦ!** v~;ТD>y$V^;$˽xzz:߶e<aZAb"!Gri88իWa\.XVrH@Rtr rrrp{\Νѹsg??xWORlh1vBAAv .j"!!aOR(`Y҆6-}ȯm r4-nQoZ\/^ŋ[,hS#@hkwnTrЖo~7i\mZO?w'_hd`s8( Anm IڸUv v#Ы$0jkH ;:f4|#f -vr4Zr4@T^*Wz0^< s%HXi8pbK+*( ANLV+l;hA鈘 Jސ܍ JBD# 1h4n-%G pR'pPթ `.)b֭!A O #OݖpjwGC\% } 1Ѹp, S'p;BT[ p}ӂ-^/ƾ#y8s2*k`ڠV)>i]1b`?DF[̐e{ϰXr>uu6 wѾa3ج^.[몝u4aol͡բ}]:'@pະ%҄>]⑃cnA+i1 A&!G#0 ڥ#9ȿt97a1x3ZptwO?J?z1{?ʫ b;&!6;k܋ `L*Er KtKшJu8XrYsi> =| L*ZBB\,2wCV)[Y›sOnl^DiZ$z$ 9y&hr8,rz;N'6*V86!{w\.SAq䞺Rt\|9I(8,pcɇb{ϸzZ 88gw?v†3`#N IDATc'hmeN'f3^vb?-\, 9AbpF/ \+* +3RAh_nx| _ǼgהGܣe=w033{b`Am IEeGN, O(L L aD+kjfۃq;/l}X(<$:DGKb<^b|8:'LlU*Ph#q_%w-+iIՊ9qt>*`sءhҩ#F={]Tؾs8.zuM= traw6EшQ?|&zL_Z7?BƺmY8z, cc1qP2:=OW>c܎> l?>{-]gϋ>ODԧ'^aj!k!:rvZHMƤ"S7v0ۛ~<']@f\)L^R1c҄F3cr͙`wZ7;TP*FG5+ׄ(  dĩNƜi֠ `EAq ?.|D=;' ٔFp,2`S`2LTzY 2o}0US#du7`m;;S].jIhB|Ǡ=|7+}9fЉS8{*^}1o!_TZ7nfW,3T K|]&]/73d\+*˿AEuhU?ǁ'0k$:jX C:~LsPfҵc'q<ݻ 4 Nkp v 'r wM-trR6dR?NOMR|bs?=ު?{h{~M͵xoALsiHh*D财HD?/\k}-2:gTu \.Vmڊg6Ro qۣeuϾFiyeVl""H`ٰaǮz1;JGﴮk:pD?7Tqč# 4m= oNǽhx_YxV+NNp?\->^X٠|౓XQkЉSxh_~۳PVYu AM?ms GPm4eBg.]+*5[wZƍXGc{|`C^z!Zfÿ~fw\kc'{Ӵ@/\.Z:dYDiͷSZ 냽~ڋB&7.m{sDầ$f"BsB]1#D]7t&W o +kjjYOY+#8,/]A8i9a86o(=X3~O:e_~ױ۲(lϺVL%_e4>UƺWokSǏ:gjׁØrh4tG1͞czL; "trޟ HCDm #cPR^ѪZ}!Pxg]S_-A^w2vc! G%m=խM ͵&0"qD w]z\V&, ҭ8$%&X9!3kʤ%ߩ?8O6;6 ?|HpZ phF#jpTZQڹ-VlJ}v[h0ׁai7lB|_̻k b1:xO4Ӻ".&F# U0ɷBT@RaSHO8w`4*DE 5D5} Eeh.]: lvGVNspxh4DF@&b .dXm68{AN4t k| z`∡|wFՠ1Z㏿΁R!^uన/ =WIV>o?N NCL3zEOǘbCrK3M ss¿O^§#=(^xm%/#a!׿!Z ow68AZcb;+K+Kۿ?-7tڸ@SkbǾ) 9Asgwbis{ֶn( LMm^RE\7" 8 "%,IEe8΅Ҋ+Str"#ZP?ϔcXp{(9\zﴸ"uz$t _zLrOxH;<{vwR}j:"nz&lZ 0uA?=+׋Z- +PǟA i,.jQbO{J{D` G#¢Zٌo2_O=D]:NV>#7c-0@ӱ7C%x_C%%=3}x㯹4őX-:gϮ)seD =iw`Yx//e+c9K%!%()jC8kE")%wN'vgrMVѺ@ ?8qiAOxZ GQYh֭#BGMZ3 b"1y(d =пg`KuOIB[Ӛ}Gpa5}l2K}AJ aIaٗL}*dלg:DSد.w} ؞z~r Dp6|XOzA b y<<~Rњ͵X.SNB4TM+󳗮 kV,!Jr鍨ApպA>sʵptբ"|Z2Z}(&ެo). nfuMU gcgJaO=#Ŧ=BZQ ozJ]r zq1ѐJ%0k噋D=n \ P)D .憼 %Jw"TV&r_GHCR\),j0Ԓ4C 6PTV._ _rLoLYut 3H's~'!iArDL7 Kn_+j|Ƅٺ'.xϚrkCOA*ӄCŊW|nb0jPѾM?ſqb<{JDred?~>,0_*Z4 |6X6-ذFa$QZgO?q [MWWeu>7:Z͢~kĻٸ mw4xnwڧOQK=rqT5&զCAq)5}-eզm8ִmqohu9 >^V˩o#9|>挼;;S'eQ87<|b}:~L{G.b__~>͂8<1&x~S .avy/4B$uL_VbT(nqɸzHIgۻI#pjLuQ3/o?(rb2vV-/ =ݏ#vU%:x$8Q[R/tǎj;Qz̞2Ih WTZ/VS'|ǽiߦraq, KJqI8{/>6q1ԧPwoZN ~ęƗz88=S x/Ys,vCIyC7=cnz\K>={F^c7 \.\-*Ɩ٢q_:|Fk5x`.G{ct?|@o H\{e 7-J%"Gbso^3uo g{E`!^uIni45ɩ|c M].u2&:?|JaXu/Ѣ%hwywNu@!e'O }(`@zDdîlٺCN'02jP\tӫt<<&5)m zhZ58@`7# 8xf@9N,Cn}%I$ӘAُX*.c xq/t / (85&? 6*R,\,JW܌||8 -jn1aY'Xɘyۯl@R^Uw>[.*0I*@Z&N|b(+jDǵj A4s}o#:r Sk_¾>ݻfRX#yȿxհlP)Hi]1jP1ԥ9(.u@ s"Fꏁzz._?]*l{?[.,RH/^;qw?އ2JdHÌIp! =ë[ᓧe>s_Zmv>xΠrQH{OZFev,߅R#Dp #yGCIkwsdfI葚&V8i*sL|; SdGGF - έg\ZW|+ N3nd Dl$G#q,>f-rOI$qhL?֯59DsY,_)JKƂx'~%:Ĝ~]Nn%{ʍ[DMZ'{&*R{6e2<=^!?nܹK>Q: KIy>z5_.^=% zVDGQbɇŖJED`6l]d0 0d~u gԿ cw8cl޵WTZ.ai?#z;`CD=Du3d n5 Z: p9ٺ%>_J%E8@TR9b3jdr `X>~xz4$Br4˅ב}(Gϡ*DGRWT 럁=(ֆp\8|4~\' T(0 `C92p ť(,)EqY9,6U !HV*DC:ڥҒs8 ߴ3ACTWTKR*-qsh/8lv{ A*|\K QP\’RTaPk[4+dR)TJ%4j 1N$wJ mr4  ;԰   CAAA~   9AAAr4  ;hAAw   AAA!G   CAAA~   9AAAr4  ;hAAw   AAA!G#8'JpN{E!  " ݈ W6T"+t TZCp$  B3H]d:Ow2T}g>Ƕҩ'Ґl1FBBrss-AADVV,sVzgs?k%&)))! uhj8s\. rȍ!7_V:Oez`ޯ  % >nAf  kfHŐ/oܺp"z?}?xG%%%3g㑔7x.W]WBB|M <ݻw:kz۷o^?[t/{5$$$୷ˆ#УGl3ڵk1cRRR駟b2e;322?tnvHJJڵk}^AAQ9鴡 c\f-Wy_|2`0Z ;?===x^x׋pp]Yz5]ٻw/M6GDDDD"t{Y6n4Wx ?4eOvv6IIIkѭA477S]]Mss3{h4288˖-TmB .[Zcc#N"`2 !IDAT*c۷ofTr:\z@ .ks'Om6JJJ~|1,V[ C߱gLy=;;X9991vСC x"a4^Њ b G|9x rΝtuƺt:iii!PSSË/p<};re&s&8v_ԓȷD[fYڼ|,E mNJc҇Fl^6eˣQn߾͹sg0l6,ȡ%Kpa }Bl6[t, K.4`sn&N8Ž| Ȭx 26:f cg܆D؜N8; mh4enr/_$??@ @Fԭ_""""V4,lnht9lN'C(-Pj.y| ``ņc!#Ν;2DDDDaZ^oS(9JYXXYb0|rssioopKW_t?2ns򯘗= ~oĜ6e/4DDDDD$tFCDDDDDOACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDDNACDDDDD Tu"""""0mRIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-east-west-2.svg0000644000175000017500000055701700000000000026161 0ustar00coreycorey00000000000000 image/svg+xml Compute Node A Interface 2 overlay network VM1 VM2 VM3 VM4 VM5 VM6 R1 (E/W) R1 (E/W) R2 (E/W) A B C B   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2)   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-east-west-3.png0000644000175000017500000027424100000000000026142 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw$Yvo 2(o8D?ȕV:+ZR+Qgsg宖(R!3؞= ?$L ~.D,Tq{OmF!B!GQBg&ea6mi(QBׄB,l6K&!S,,ᶺvx:E,o\鎡T Ltu3` RTUp8|M!"B.VWWb@9 E2Χ3Jͭ:{+JͪTݽ\cbJBUTB!N( 4uL" XM%F 5+Ǻi$l('BqI!G4(Pa`FӶ躎8NnwA<gee@ ^(;K[ ˪Llo`6Nql!'B|>O*"JQ*:OQ\.~@ #K`6^=n}d+g3ꗧ`p5l122gBF@C!C*bccBc(8 l0z<q4342)اlFxW @OO5hYB2oi5UUq88NL( 4 ETU%T&k7\lP U&kx Y/.If>0hq tNT*LNNb&dL&CXx|/R%'G23Bx ⸑Bh4Pˢ`0XYo63kk[*F}**c*=s/-m_>LUONOO*5MrAYR i6^Á[)gm$ J!N)BH$X^^v3<<4xO>!͔whT4ZV|>ޞbVU?Ϗ6JnGưma1c>g1J]\NOXL:M*k:~B{h!~2 /_bYnJ_D63ה-ٷQ6Z[˗I8;*iy_s:?@PQ U~+BcI !O[iDfxvc)hd\_΂djl-͗{>>bee2j댌rBq,}$Cz!>,TW!T ӛ tdؿhO}|RYh킌ŵ\ߝox^E0 fggd2;?BcA !'[CϪJ( [(?d n9 hjW%S/=PnH}S^w.FGG4 ˲X\\l87B'BbGI4 ?OX]vٍ%SoW2d'F}ZgpvK#,]c||l,,,t<_B#B쓭9$t>ϓ%vTnKZi/rT͔8QURD,y!GJ !8 ,kKvh~yADs}\*Jq:@90 t5> ÆCrJ3^xx[}dBh!>zm$ ܛ7@7'?F%S;t!:/ZM G`iif)BE !mJ*R)m3/d醍dF&{)_hu%3(3GmWJ;;!GI?B̶m8h0*x= %RZiJ}B,|y G**wW<ՁB#'BQ.cuu|>_yvˣ!) >Lrͨ_cT˃7A>wWk㬭5h!"B:h2Q륧T`ܛoM扁̈4IzTC[uLD WTy˲0 EQE!"BeY,--Nr^b=<%ؗ^bG6cJ5TUUH!2 ===\W!a@C!:dYsssR@ b6_N')l893viMv|I&V2E.;.%#0ȨSBѡ%<-O? p*Ȩ۹. ԮnH nYfwy@p7 *h]2Uk54H226!ǃBсJT$D?YeT/Ъdj;۹]T[ ]f]eQ^Ad4)!hX,F #Ng|LTozorkf3}}VI.e똶]psS`ZwdfkܚeC> W5?Ǚ^t~9e)|a=O }R}˭vٍXeP+RD!ġB|zy>`!(uw[TcT,Sk~y=oA5UQ';˕0,AUQ_z^ST\Ӡ`XtOA PO73s9 `ee+bI!-$IBPilv1BԎ?\NM?O+KԿ핪JMjuo =''Z';kygY |Ss,,,d2|Q!A)!h4MJY @4xL2 JE› 4l¡( ty CMjy#!384 EQx 38RS*r4UaxVRœen7# !8LB&n^גI z}g7>~7sSɋ|1?Ƿ~L}]H|)!h&d+w}qh&7ԭϴrr \2@5l`aa;Bq8tJ! 0.H{yP}Zg3e7_nh vZwWNv9 4+w2̭Bh!D^~cYf&#T m> tAE[zϽL/wR2U@˛+f}J!ġ@C!p:=wT d] Ү'⫷)g+_44MjQB!BфfzdWxΙp=f|lhre+HJ !8|rB~?XT*EOO!@(={-kokvddz$vduBn7xYk[tDgdMNq*lqstuuU?J !8\B@X$-OPw}|fiήT?o7gfGڵxLTݦرܬSiټ3lEׯ\y z~[sY)\ap}=E[.xm49#~*W+fg7q~eP`(BU9|2?m%Ģ^>??~aYdn/-r6Ss_,/arRO8!Ԭx|hnòH|ٮHl+aY29V7Xݨ 2TUիLNNm۸\.%!8B*%d!`6c6'֬P4KN3:09>crɋg<^Y`0+ðlwH X[Y Y1.ߠ?Z]|_?9@`-.BjWx>nTE__R=4E%՜yѴJX*8^y=r* (kεO&G$؀M@?1"0 ,M!B ~4LBc%:^|[~0w8,k4v2HXt**ft{6@(<]]맧ܜBP[}' BiǷ?+#hOnyoj EJs lkk^(d=n)=}}.^ kkk;h S,WO5O"`)3Q-'m۬dcDsY"/Wq뎆?X]ϟvtlEQ X (wFB\.nݺR.ˊCk\բFR6˕##ќK篴|M}G0\0+טv:Jvԩf~ty>BB"/wNęO)FGk+q;/{ 깋(|3ǃՕl 7ʶm= ^ZDQ"/ B7BCg-ѡ%cv;:/ck3*w wAPP(0hyKj|{%Q3y^dlmll;KՁA2~Jz!h!ݽ%~2;\a* :xlH(K;KP(JO7J<Ӓ=ݨdAE93Zx)eN[ً,ī}\+H', |~n 5)v8٣e^Z$rs}pY!N 4A٫iVp  bJ%RNOmoJ>KɔM8](ՅiX8)rsž~6dcm7JhCvul!8N$BXߜ{)²m&. 6--˥ C6rQ((w_^,zTMtP2}7vD-jjD |q&&&HRضMt<-OVLw=f> 21 1 >BD !ĮfrDlvXD!WO54&k>@˹^JJ= IDATDXZZ"ɐL&o7?&uz- Ew@"8%S 7j>iO)6t`2V5P/_I&%MEWU饫ܜEtd>&Ñ몃 h6K4xC\g؟ D;jC!aOyh;J(`6Qc-q8  ">Rg>YEQX^^f}}Bݍ`+.H$BOO\.2 kkkD"y!/^kdFѡ3 M]d.x%%cG99s14ibbRDT0t:nۍ@4J\bXHUU0GL(  &C|ctu :Fδ,B=H !4yXuy/5\e*OIEc,f4P ذ̶m857ܺ g-7 l6K>GQJ]q:|>~?n۶rdYdB.ܿh4ZT})̪C7 3>>$LX,Vb(Z 4MCQJr۶m˲0Mn`0H0D9%"8 4mswGG?m;o] B $ugX'dŎT/& m3XE<'a('Vn-¶mJR%cPv8H$fepp]׉b̰00ieDkddRX (vݼxz>ߛ<[3Ln;!qI/w\c.41oR*K QbH>X,VSlhme7:\.\|N'g{z;v(:4擉볥3(U>B$z|L`t&҉MCN8Վw;&(U7x BB;A]qrH!!0,TxDt?WOKR>|@*%W(`I 6Fq t(I -g-3U<٧æ\ J9S9Y3|-[PaO4t]tzzOP ],4ۡi\GSvNr-xbf_EQ9n|~):DU>ۍM:ee#Fx2ERB*~H}."PePqHgĴ,6bqV7Fcd9 *8&p0H_~u Ig:Dq@?#C{HqS)fXXY}#B~à >BYfEQ.]GԶsmk4]dA&O ADzmKqze6iR* ]'Ņӓt&F{݆C*c w  sl.O2!O%4nKgOۏQ:gEc;)B璘ԆC]񎶝ϞGzW\R94kkΡu]g&Ƥ?1HюUUa|xdyRI3iSI|MР* ?ũ&}TF8ݷoCY\fvq|݄q$#eHHqضyƴuƆp˸4-+tp}4]q880 a( .Sn|z,KkXu~o]DuùqtR,3 ,m0S%tTnc8m+PoC!Gb'ӳ,,rgGLiqcVjina2Yn?|]{U..D6A\t>/O T^Kebn/gjs<޽vY:. 4D*;S~]IƇe`=gzaF2Sqؠe151ى>J6goԼ>2 S2,!)߾[st:r G[^و'y \Raɭih8Fgo]<ȵSGvn\;Hҕ׺!>~]J=iwU=\K;gm7?#Y~é%33hUo |fѓJk|U0 4F<ǟߩr9Ig3sA<3z}ƻW/1ܿo>Op#< dē)>}=cTaݟabnsFW$h [^-hn\:_Z(yގQ>@_ܭ( o_HOwGp8̿v_:k΍w_z_<{SuE%_~>5j4ߏmVki#<}g"(5es+ՓGr&pTEʀT04UdSL$bdԩR(Igk{mm5/v>/;O*P_<{+;Wq4{"9x6b &Gd$Flbde;>³{uյ<^_#vG;ދLvExgd UQ|rRO7 =I[/lcPӉ<󳹙t`O䏊}p:K%l&H9:$8 ѪFtVLY)ytFв ~`вm5iOH8hm~serY「(D=Dz,|۶* yţU6se`f8ßݿ1]JɅ~/U?^_e5];7>G4yJ(* q2>?k4oLO!Uu|g?]}B2eQrF(I$8 ѪHhCJw|4lg;n2;ݲgOv4ymmc[MQ(׫t&WqsK+2L Ood_"9Ʋ" \GB!.'~P*gKcJkl}tc6AnL`xT`LFZOxsϟLw/d*He +1;^:iu#تƍ (;vSOrqmfٜ drՁK|~߶g,.qW:Oўe[mlԆ*,$|03;CLvEw1,o:ֲZidf_Vx2)Ȱ,tUEQlf!t{}ld3J<jƥ뤋 ¡iìd 8_'r,qp4<2K4'OO2b$8 ՑOZ`Xhyf``5|>X2/U(ngIe({.**><\|Gɶm`-;O7g*}86ИǸ7aYf˩Bo( |ǩiE<66 lezv$8R ~- F0mh4k5Bm#( W7:IfۙBMM*ET\_$6Ke}#o}Ry}6g6^y>33؎c @oz/#}%y;Zn#NahN*$8AӶ4|C' tjMj6j[iOAd\Lkko 8~l^-tw6^ 8d^rz̪kP4,7j~[[}7W"U?KF8l>yFP@UU&|Ǔ(5FC 7h^kB@zi0Ai:HCs=]a޹r=yeO7E5Ķ ":%Sr/l7|w=8qs4|B/(nf7M(u\!thc !lWG IDATEP*k`6ru0Y@鱑8e!8$8Ba&w<~V!vIc@>!8 {KY!vOc@>!8wھ* _ΐ+8=:R fLMy=*݉ڪu"1 DHԤ`w?ce}_ڗq: C|履9jdq9u 4ڪxHqLHw4Mš tL 4nox6.-P,vKk\o|f+rmB65XLcHy%M&R|Sx0( x)  xt& 3+*vy=24lb60JX5"ўDcHA&PN 4#Gw2Bc#(].>Qډo]<;m͆E¡o{a*4uut'&@gb[;, ;%"J~.Y[Y2<~n4c̼̋?~~'.`` j\v؉Ww9UWݫ&{0 F̃n/$n&܎x|*?0@gp)CX.0;׽nԌry`vyp<^[# g ζsGe<N7.taB3? ƘCs>})v`j~W/# woY50h3@ԛ"2¶c ,f>XQmXa<[y&)a%8\[a>BR.>s;}lV |%*zcyqUx*(M}DjcVױ47eڷȄӘG:ԖG=by<dp(s$Ʉ6[32² M>KWpcqFR˂Q`6Sb%c U|Ct&LRMraz /L&L&"- 4rTs)A)B,}NQNߌ @&/ fJjjVЯvt:>aX,8N8NnhyH)1qxѱMsu ncD;9VV+I1 di2`t:!r\DQ$rVlEZud2r`dJ?S.$zVu f3v;\.̴ȹjfQ<|!Ml6l6N~~nt('&8C`ʋJj6iM\i(( Vh6G2(BeȲké;n7|>=}!vllzI IH ٌ@ 6PCcdŧ+l6RMt:t:jHK ǃ@ =$R.J2qUx<nᄔmҘZ۽4h <Nk$hlcqjB.,_saZOϦ\0N}#N039os=_SՋyTG9 vrrX,b`Q(e`, {]'24MidE!2[QVc\.rL,1\nL&s$^JNrLOLԅ*2Rb?buudr hPJEUUTUe# G4yEb1bCpSQzBE}b<0}zbZ%2WO7(˗r ^ݔyS2aF8C;au œ2T*XB4 Meٻ>A ˡQF4ʃd2XYYs;mA$IX,z&fdBTBP@R!:m9lZwZ-bl6SB!,..R{BTUŋ/>o~?fggQTL&HjsZ Cx}:҈fC8FR4MC$A4E>G"a6133!"IVt8$~aV7L!CgsCdr7ovT*!L0 A0%.":B1== UU;t]0 )΂aNSs\.T]d2V[nQ 0 p:v$A;bffJDBr6 v\;,--WF1C]iϟ228˲P$)nll  ! E`0`ۡ( &.xOY^iD"ZeHR$7- ^Rvϟ)TV dwg<#  !"L/,]i4 ,,, H@4m{Z 6 ׯ_'fH$h4`۱ ei r~?\.?Wnw}}`ܤ!J e^̅gG<迗^8 dNOOcjj rDO2 r(JXXX&:8jE<'Ngg*Q`d۷CYS\3g/+k( azX4`nġx/yܼyJ ÐrW^ŋX]]"0J'GxA_bx>طiA6u\O*u|XXX I_W|UR) ߺri( .Tj4H$x%VWWw)xjZ r7l6#B!anedYXVrN:n!!QN^%I~DQD"d2$ <|npNlx|WlAhhRZ ㈱#2dYnIBbzzzW˲|pݨVH$ᣯgϞfoܔ54(BE>G^j(ַj i<Ń:vw h7oD"޵JҗA7G l6<]MzQϟ?jE049łEΒzEEQH~ e@I 0eTpKRPb`%FCCZ fggIcNrRVUU40( ( "XZotnf#9x4q˅zl6t:M TUkDr@ ʑ8HIL&Cy>|'^IP.au߽L&L&Ȳjafff$7;|3fffdvoxo28 as ݞHxoeIs( 墢?0 t:M*B(Bχ@ @bC?( H$xX 6L='t^zУ_nÙ!y IDATzid.:jhPN t:~w}8AZly쩩)ne$ ,Ka }s)@8C"@\h^yL&FfL& RbQ_BUUt:mZ-HD D*54(BUU<= ۈ vzUbA^f(j5yrCr92IOx^J䚦OgW!v(h%UzHg;aVW^E^G" PFY,\.(9;̱=]Uů?s0fq|>RF7Pohj4qp&놿eyW`n6;[SN^*r\.C}~t3^`0`2HRRBšA4_螕W5R/ɰ~:dYFXD>-,a$݊*4^0ؖEA&G&2p9N:eXZ5FXB\A\,wPo6Qo6.l|$p̎";dYFVC N? CZ?N=<BZ8_M ZcaHުTzQzaPCcLWwAlӽ+, ua'‘7Pw0nH yl6l39QbF#'$<_y+6iA5]mt:PU/[ḧp2u?AUB9K1+:=*m}]QeuX,B`08&TRC4rylh٭VLgddU:RN,>kORhg裡7Oԩ$zL>S54ƔqU|4R4}ܓA>'\.ױ#QwUq\# e%`613=p}di!ɢ pV' $ RiߞCㄪ(}7Δz$)W<(Rw54ƔqU|z7Y]u:Ҁm'l9 g666!v ,//nxt{Sհ9Q({ZGpj 4| pu>sMN/^#zmm &i7쪪bee1%X˒qrpVvHfY|c,#eaF8*yA 1e\.V*Q8#Il5KO4qKrŋB}(d2]|W\sLٗ~UfidDQd2צ&zG2hXYYO' 0#0 C"4MC>(xI,;ts^d3>+8+Yd24 \g씇iVT*}Pa0;; $Hs>MӐN GZfVelmm!D&*2>}cC8& x-( VVVv1;;;6a4nLTj&p f`"޾}ۧϚ&DQn:Vejj%c8+>Ap5<Oj5 >rC44 T]y0???1!_ʕ+`/.$ R)nx^8s)r|>gwraF1?(KZ"J{14@.;o}sܾ}a0D)9ׄ_Z:~8 sq`,c^[Cހ>]0pkʨx"f9ZQ"7$Y4b 00 `  =C&E9jҦꫯW_!BUU|G~to /~><`0òrIUU|\$b`qq[XXWǣ? sMDZq-QDQv9___&fffpݾ/_tGw9lV|P֠80.ԋp ;=VF?@ iq$afC"/ Id<< žW;t: w,{*4Mý{`XfcmӧEXڋ^sikLOOcyyxkkkkɟЊ_d^I'sٌW" Ms~Q5 MQ}`myL{=#٣sq~G0k`TΟrt LS1i?_E=O} ^r> X ln. n"8>3͑r)fO>EZ X\\D$<֭[~$** XB> saGGbJ@|;@P(9t:W_p:pmp;p;0M@!==:'1Oj?vO{1LvD nJV|ߵbo`Y_~%޼yׯ=' / Z׿5V+<6Yl6hv; ‹9gϘ+1 5}709( 2 2yd4etG~Ξsk g@A dO~'?-D(.O< ZXXÇ6RV!I,n70䁛NUU_#my?a\E-O?6h|/_\ </ZhVoYjncP |kģY;z ~^k༜?g9& jh!"&PTUih/ îス0F&S{ʐ$ &oCjX~q\w{c|8h΋"oR ~)>=k|?A²  kt.xz -}n$v ,^>IACtwky?;7>̽c޽??ƃtDݼv;z5QnƕE1L$o<(*ۋix.~Ͽ~kXw Hhcu/$Djs5P,d2g95k`Ƙ2O?( xɓ'5~ɓ'{GX컶 YtgAf3VVV** d_A=zQq};iJrw:G?"IzϞ=Ï~#G?"k`a ߜkgη~EvOK|9V(0Dg}u}w~ߺ{{Cy8z jP9h 4A>IN!aY6)/\;lq]|7o"L9BB Ν;C' ͛7t:c Ce~,//jL ?ӧOgA\~^w%IB*?#%r98)ɋ/--ÍFF<>[~sr?L"/͠p9pplY-c'k駟'Od2 `3???T~y]_]]06PՐL&}i }aiak@w(͆gϞٳgO>dh.ƨ׀50PCc ?Cr?5ۇ<籶F6>o@u{mZo~Pm2Ha{n1e0sggGza ߜ///k\9pDVGVGfz ve`1q8`a#;{? [M g@h USNDVϿ|` >)7)o|vD9!.h_|Z}{NN~RV v9˲w?9X'wޡ풍8?sܺu͜1y ~N |8l^Aه'cJEc*FSؒ.AP(gؒPQQQ7y\rS46$Y>Q7Q5'Kz?Oar\gN|;LΪdPŷð0Ӥ(hI~`0j6eGY9i˿zǭf3N;{r'%au3L>eaDDPjh!T=:4J*TF <>/zDLtnxӱ tp(15 xz`\4MCRERZ27nyK?B 1*E$<| * D&T6ٙX\ , ژt4MWodG=Vo_?GwnC 54ΆzS/=nz(cjXOE|t&=R(XxL(UhJ4MC,4E̶r0o}Fty| DՉJ7ȝx}M{7P&jh)SUr*SQœ}F%`a3. ,: uՇR7 ERO_#!r$2Yx }Wd _=D)7?5J7ʵ:nl&npP6ϦP.,@4ȝ6CU~MrPٱVL@>\LS;=TUECލ4e7OH(.3GϰNBY4'Rxb;w.ڟކ|5‘?,}R.6t:PwzN߱z6m*?`yn,ˠjU%neY,Ţ#xtw^DžerP.S;=0g. :^U\|n>ub1񝛴BZvLѕoh.g1pUz r}^|T9p fvmJP(ǀc Uz rϿ~ ep MU5 ߼7p 2YPCc JBP 9a=qTR(BB=E4E4RA07HP( 54( |PUEߵ{dzX rЛ?qnTQ(=U[)yn̑_CP&Mޏp-zRq6]BBB۫S^v=d?a C%`_ӝE 0 xðLv:T %P-5h<`AF FXN@ ۆ7]F'·R#gy Bn}w`Y·GULnt;M#Բ0sge9T2_`]')\C ҮBiWݎÝh24DܶMljj @0 ,&QxMIj9P7?+>h<plIvż󽈒3&_*O9; =(ǃ7PMT7}اo ͟⾂<з|#CLT3d1 ?vSvB#ayo1`X|E_A`yQN~ &7Ho3(g+oTޞ8#,314M~HJoOlD6Ko U~(xmG2حVrtG~jyN r,pRthΒTFG*d25yfg щЍ`tY'JB0000hWu`,`9#WrnGyZo6354 ]#*0 Qo@Rtr h]4K+@n SW-(l|vcY,ῇUndl37 t_44YؼWa(lsO滠Z-f5rPC㌰M0hIqPl(xwJoS U~lDn{iߜ} Lf=%󽍺<.'"V665+=Q:]4vDGP=uȫ ]oq )1H C}F/:~dm;‘\,q}^l$t.dC*rHx\(@MS&BQŷ VvMӐ  ewjY^[C [KWd eHw<3h.8Cf|(V;JBlܶY-*}` 8ûxOdh|tF5͊~4DŽBēi5SJ3v(h2TUEBiqu;LF#4M(Iplg3p 22TUF*CGAwg0öJc!;(I:W@}Y[ePX-fLyȽmyOih%'`Zqkyt(ٯ.ރa?S(ʢ-e όxD jh1s񰵤6ҹ<]ϣJB rG`>"F\T#ɢP sZ`'P*2B@tUo~?MM2{wá fkPUut}GP.|}t:πʹ_Z_a =kby>o;J=BP&x侲,CS54΁ []ϡJBWqdn,΃ٮ z;,HasX}ǵus I! 6`qz&fI2m3;m}y}vvLf&;If&2I6lqb.` K<`fl}^~YG|%9_21/_zJ&'I4adqYnWjg/8S}}qgsgRz)'C/7wJ'ڀ JdԓDE1 `cnv#Z R zYMHy<σaok%f%_og!S71t%* _'&FhDcfC_BaT7?8 '=ś#^Nz7G&I FdL'32:)SOnզF7㓋bfD}۷exEH%Jox;k .ÙK,H4G9ƒCg`xM<߱ Ѷ5grrvI,qMn8_Y K/?' ٙjC;$ aQ΍ܥJBNx.5{ kh;Y][12:ADz(پJV/hDsą UM&bOfy]KLFP ~}*{Ps{(gȆFƶh Mb/2KL@da5{0 GFZJ#[S(шK/5ET<2p]80X `U ME2cYeeDB52RboZoL|~?*j02eSe.ӌuY`Y*Ƹ|{,e%œ4־-1݁LDmH"'ˉXoQ\ qJ%lX6qgas6ijkR.GQs:/P8.X"%&`B.aW;h_P`}v&̩Tnt Ax}552 rr9%`xt m;"S*sK Y~h טkEd`Hacn6mܵA?h \NKt[47!ӔMhZ]B3AT #-)Pi.! s8b0LKu9dYQK?q?0H&"˔.Lq^}\&uH5ffC1 ]bt0贈WQKH |p `Ӊ@ 81q*ϣ EF,mnuL&A1I 6 ze0{ Cv'FL/ZMF˅\FHf590 TJ)I%ppM!d) `(`0;;\9n|L%ti=SI88(2H$YPp`oFoT$l͂FMS c({`Dh "dI##-p&VPxǀ>CBHH$HO6"ӔJkVP8<>-1c !$8ڨ!YDc%+ŠݎAqBHlP)00ak /j JDDZr362)**U*(XxAۃQcn7/B~B zR \:q*(!r%B!EwƜ!B! B!BȢDB!( B!,:J4!B!N!X?A+0<: ?YU8 \r|_"SdR 0L#7 B!dN kP҆.BG!ѢQǡp].6ÖHhCԕX l Z1p  F;4BV e떨GSqhFnS}k;V\AMS3xNJ60̍6[-q-mɑ(1(ZX^BV/sd(X|.\'0BQ)] G^o M3~˱`ac^XC܊Vtva\&ŃΒ]`h!G,Dc% C8S~ ;*RzīTeJhI!Ex|>x}~ 991Wsʲ vo݌.1a#%T[߽ܞȶ4=ň-EdQ2K2RNDcR׀N~Ljkt S6"ǜ:. 6AXq um&HpW.h?M2֩ '%&؁^ QBb^鲋C _=bt:J4V ׋߿>.Eϲ,۰!7J#mt*kf< !#-%Jѭm^Q&Dz,ߋBQHJas7|M]},Oݍ{vD12J4VW#F1ضq RIQ2?Η9߃;E1G/>>NgY(FFU tEiüxCw}E15XWY߈^di}V`0ږV6`9 <'JxdҰhR hj;1#{vh2鋿GKgx_^cKE 5 )S]V;VArŅXIKku{ d#Z(шe+' ڽ}R)-UU~ XÉ p;Mv(k;,xǦe(F񼀟Jt6ˏ=DSPgo^9{zJH3FAnvCYm/障=m[՚EF\[_{SC"CaO(GaZCI*qΌv( }ۋ6?y%{?G*oB1 pxh*Y/axtTտSXJ4bQms+{bR(g>\*u+oJ]{uCuA|j$b T09uD"}MQnux_X+p`v|cQjuT[ 3O(X*#DT^cH@`m]z5¹p#w4JQnV~xzY^hĚ?_eR|'( _$.\ī'?~+B7ϳV?^%PFa%&o,TJE[ɝ1ع=Q.1|'.VkWVRۯŒRƇ͊HԀa%`(Uއ ௿dE7U ?{v$Q)';'<$C"(XDDdIJf| ɏfCG\wprjC3\kdQC^m$c,Jv^YkKdg/ rTJŋpǯ^{+.5Ս-,?xLɆ(FZ D1أbqU1 ZskZ;i+~Qɓ,-J4bDc[>\)ڽ(bDOU}x;GV3hbWת4wD9OMW?$nis͏>nP+ ㇧t*ňֆ l"ۇ4I<^.$sUU}<&eY|r#c8u" 'šd=ݠVχ^t U#Y2 `֐u8bD+ۅj[m+<~ݴw2^4]jZ]3%Q:es>-k,J4b'`00싏<@d,ט 9k#mZnsʎdi}aa" Af#N/ڶqUP[&îɑD vi,B!|~?V\rDk%QpyxOfe4v{G1,Lǎ)7wFl0GMܣaf2gj; ->Nû'G]D(bDk%Qv0K%*1u}; uvk;x`99|<'Qbáe-oRniYcW%dC,Y g:L_GGDKW^aAg~ ɢ 󷵳:m1+bJDcPv(FBnOݙjӈ{eJ=wNnGH^0RwkO޺ !N&]R6QI" mgnk>27J4`jϛX%!" ~]$e$r?xm'fO8\^^o8Z@˛3}&ػ?\aƧB=n0c,foko2:E!BYthB!B%B!EG!B!dQA!BYthB!B%dMZF|G`&,ԶV`c>ETF 5!>.N <_okq<B&ūUmO;K7J%PQAY bo/H Ql3zmb#!$zV=!39BΒՏ &ȤRf`qm>^Px\h!+l%]f̩+MM,Y$rG4wې%n AŠ:_oˢ^ɩ!/|!7F! <}7| ^2,0HTd8dTRZ^R\X7><h A|Ê9Mp&;(hY:h5c-xgv$}%bo|aVK"߳^;ڋ~usuu̸ )R A_.n<)uЫHV'à2@Ge4G,/6P q(G; T7f#'#=t( kT"߃k(R|ti ,d ?&̴Tر=QݞFH7mhsц>z006O0$B`ܨrC {B)Ƅ YcIcB&)q)`*@#wµ6=O Ov e|/j= )@.rd,J4ȚR1/{5xEt\7 (G|‚c|uģHb4c`lZգъ6GZDF;|Im8E.\E6r*ᐛdz? h -:JfvFXx L!gYO?|?R(FGҢD9ur o: AO* (G:|'b h`u:ZkQ;X:TT/{IUjSU\[/\"ZB#׀c9$*2b.v ! pw _ȇ\~^xMB`SQ6BlQؽv `u[1loFyXE&tE"lЏSnPғx l{FGGݍnl lʲ,R) T*T* }<~?' "3| FLWO;^ !Hhjjш!Y( k^^nuAbASSx oo O{a둑@Qiק8qOzz՜0JBE"lNތ"c+;aNE:u֟O,H<ڜmh5~ΎyGBuCuK5/ZScwn1֔oBY,Ԡ Nsc(=P7.; i&GCGGzzzՅA1 g? dҞ$dDdP]]:A044!\r2 6l͛s'$^Q[>(#y ˴ wakV | q ld'f㮜"~hkDCssDQ '䎴;׼{`y Hlpݸx"p8}>\ 6`׮]0LC}}=)[*n[M6aӦM ]hrV+N>:H)R,p81ܾP@ըFRR:M6El?x-Wa=x:1[.Cd!q؞S#P=XJOP=B|h=8uΉOZҌR>هq!(++C p㎁ @P@.D"q`PHl0|>BLqUTWW#??GArr|y9N|Ǹvڂ[*p.\cToȋ1{u:NzW 8}`A$)峐qzg90DhZj(J峯 `AT"6y=8ZhjjBqq1;6 ˅O>&>D;P( qX,ߏ`0(;&x|NoO?ƍ8]hroz8J4 ӡoBqcYɔad2HzV+^~eܹs%wbxyD)$ I߃#9Gp$vvcbrirBEozPf)C\,;]  w~UU¿\u;cyǐeU ~?8qqqp8f{wމZ\pUUUh4HKKhu8\.Gnn.$ F# VΥcϞ=((ר###; / 55###x<9ׯGiix^QQ28NqeY8Nqlcx. )) 6mZZZf[/7תf<TΟE9UhreeeIDlC[z *t:.`Ϟ=hnn˲Odp@{Eqq18C}}=.\ł8"%%Ep8,.jؾ};jjj"NDgΜYp5@8Ə =f}B[ƱcԤ08FR'p_y!>C9$e]w/ܣq41$CRx!==Jb)}EEE˲Z|HJJBYYxGRRz)$%%|!ˑFFp.Kc/_ÇT{/X&R+NR`߾}G]]vq9$&&QHڿؑс'O߁dBZZ~?`0<_\fY̖gG/az<$2a_>@'[NݖwQL>jRKx%:<4d^3q5alܸq @ ~GL]> }ٌ Aa``6 ?ϡh`ۑ 6`0qɒ //ĴiZ7 *,͛7CVP(1qxokkw2WTCcc# z{{a<򐛛F\pVf@.#==bBȲ,  `VUUQAb%#`ttDDo\p8 H>c^/\.J%4 t:Ʉ~tttZ& * DNH$D"+L755===}ճ&?)~\cGf\-SMO;CehIL[;oz{`慪 W_ W_)ބG7>/mMچ՜T*ռق 99y+~?a/NC(x߂mxlh/~ZSh؈g>`eeeOxB( A y򗿄BZƩSpYڵ qEFfyFIQe+~7\o̸\?8}x@Bb#N4j^07ڇ< ه_#ڪjs<ϣ0ͅnNիWqPRR"477ٳX,jغux!r)BtHKK ^}U$&&b߾}xt:qyTWWK}{ꅷB}}OH j~X,X,jh4 ylٲ%%%0 yW^ٳgt:a0pw ..NOgϞ9s vڅÇ(//GEEtT37: hlln ܴ1Bl5 Yb222`6<۷C&!M6Es[$%%p , pylݺCII gHJJBzz:`>c ]#]V@|q ~gB+hqU U/Dl a100,L&XV_v;4V577̙3Zuq#:GF#n7سg=#G AJJ z=fL USׄLBlFFFrrrP[[#Gf{Ů]*++qY\. ر#"Ax@Ղym۶a޽(--ŕ+Wp\t zfYN'ZZZl h2yGgg', RRRׇjJMMwgg'j.qqq3_!3s TlddduuuD~~>ۇ#Gw߅f(8G\, !dZ[8g:Dr;w{;tV0n!..p?NNS||gg'RSSvΝp8 J1<<NZD 0thjjAv+7ȈYBbbdff555|26l؀Rܹ*++144DEh2'+D2I(b0xbD<шTh4n,59VZZ+?aP(q܌)s, Dq5Cp|qxV<_<^BZa0?Ge?oeŨB@ іMS144L&ՖH$b' 0~f!%%%|r9uk`,"XOvrf]{1RDnn.^oDq3ǚL& X`>uqLT> v9KO0e"0o; 9gFH4QA$VG]DFMӠixu-NE—`娕X @p>EAOKenOs`r,:4  Ag4 >:4ۖrpuu+m{IRVdJju=uV|>D"^$-!nUnA?@4B 2\ nIpS'NX=n5^26߈ g7.U* [/mO)?ፑo൑=FA4B*BVwV4 g7v=tv(6TWTAN< t:r9JKKM{hZW7 G lƮB拙x~zE=V\Mؙ kӓ=VT!;2  Qk.nnnݑJ t ڠ_~ǙN ܺuM JQ;O ͸ܰi&=sAAWXWDŽm^ޅ2dもuQm̧©- ֺUWWj7o4A Q $*J$''w Bk׮J]@/x.p]sw!aq 7.v<"Fb5P.Zdd$<<22jD   b1bbb ΩT*\~)))J]Tj#))E* 'pp6bqJjVʌbǃ;iqoT5bU* jN>%ui=PE8 [.n}IKĐ 1P՗@ q:a[$r =, h$m6{Fݩ O?={pUTG~~>|>\\\lQ sh4cq4M#::cǎ"萐,\;wl.pwwkFAUUJKKQ^^n4".\ةs B@0G^R9JQ'Ø1p;^&1Mlr=GGGBbKRC*̈́X\.#F@tt4(<79yF @Q}0ͷ~DzGg&!*ۢ7j*p뮽ԩS8{ls@>]& !H`gg@(P*  r Jhhh`CLbQ)S`Ŋطorss[|_[[ZdeeA  NNNptt]=566 FmmpF„ l:E "4VX |w;szρ)G<3fSZ#QNΝ;yP>&iuIr9Jv9h6{osyC$,c)~NO|ច(/+_L~4 f}̝37?> ?zw^\L8CEBB._lrQ.I)(0^#G"44Ci ,^W\A||QX\άDn6кA5Yhll4;EQ ElllBw  s=ǥ0 0RSSH7j&ۍ cuO ^=**X3aUG LOƦ JA$ zu8J$L6 Ѹr RSSQPP`lC ô:;b. "d\v gΜAYYuzMh4`|>ȑ#m5 B@hB^Mºh>> BPP|||:q\DFFbРA(((@ZZnݺզdbwx< ""{(9A CZY옂Z A7"00hT'''1#FFAAArssrԴ]iD^`p]ll,9bP 3Zl g>åK5w^]R7DadkjjX;W]]n Àiv޾Žh=b1ktKk;LvZ,\&P 66am|k#==},'''cw_.BL1U*\LP 1_Llܸ.]2~4M=Z5ըL&RdDeB(%CA,٬766=֬i8x29||4#G :+'`}:T*ǃH$4 VE t%Dh{.]ԟG?g^bVZ%H HZ0PCp,p<7; ? &qpZ|?qgq:m &YⱈǬVVDD<==qY޽8z(˰Cpp0ۼB@QQ @h" ϐ*55 Çh@4R)hP(T* ߾};***jfioOO}"#F ?pٜÇCRj@h  p_s&lF}Ws"c6q&xYg3vi]}(o{{K㑾%=ypQ_uz*֭[qL8Ѭ{5Mp8x-(%%?afff"((dgg糃@AAAHKKk_4C.D C^^K"<e uo:>>k׮58n:/  =5r}·ƼOb?aG*++غužpaU羟=}O:] ̮)"#qC 4Oy@b>+6l# ;^EhUQ(E߾};tjQh4HOOGxx88@hh(BBB ̊xyy\"""0qDcXd md3f̀=?XTWWcܹؽ{Ο?P_h v{UY3=hoLOYYD7ӣ lTTTX] Bp_r:4@d1!]_{5"22鋌EQfd2:zJ3=KqCъ#G0LF Ǯ6 zuf߃@ XWGe`ӅMxhChTu,3<@?#F?{) ٪Ww^,x{{Y[n!88u{y. PTzx_~Abv0W^ٳqqp橩-fyt[fzs!Bpߑ_);K5-lR)v덳O칸8]oacBT#j:;%ۼa2\-jW3{' Ø{^{ŋ>#0 sa۶m`Kzz:mۆsεzB"##ر?<;ZL Ammk\T[=D1oƷ~w}cƌaݧzb;gz y4kgs(vņ0 /_5k-ȎhYYcOO ^Xc~̙bm8-ZPwwwݑ-__FReaǵkfعC4;k{,mvF2Fk3[l-[c1sQ(H$4i0c Z)))nq~֬Y5kplݺŵ… ͮ^[vٳ1i$rxyy? ͺܧ͛ƍk;LxhkgҤIfzt]BgAf4<{nx4vS\'y-oT* z밖' s Y{DewtD+?O=N/Oj ~E8 ؃׏%%ՋŌ3ڼ>44hLrJ&k>ӣR k]ԊnEE3gĉ'P__!CΙ-Ȍ&8O{M'K&rkqmII VZ'NpjJ `ǎ8~8b]4i-Z\9fٽ#ǚ#Zz?hpokh>u92I֟]30ݗ@ t{}(}?YU` }bId[";>66ֆi0 }$ Ǝ?<,~@0 ܳTɪP4Ecǃ;ߣlWStwÄ} r|###!J#Z_a4Xr`IS?w;{C!!/S롛 hi}Y!Af4$ O!:=vZ GK߷Z8B@EM}'OǏgVOIO7Xr@;Bz&Vǯn@ t N7&nZZ9g#9S@!3f?Z6?9tǙ\Ȍ?go`/lOY*×_~jlskŅ/a Exr6]ő#GXDkŘߟ\~gB3w<h;GBa~G"`{"4rOwPP|5+׿3fE,^Q`zB&66< 1i$E߲ۙ.TjF%@QY,>ZW_+7씲ɌFK'#u S4`o ؗ]{=>~RRR}v[g[߮Z6,b6-O3BCCdqZYM"1;j4T|||½ {aC ny` !V`ٲeXn[Zlݟ2 >4-xjSk {Q#Yy28l6}JVe2hXh]]S Vp/qAxiR<>qo޼ -ȭ챩:ق !vhJkNʤeXsz#-6X_WbMO' (7vOc?5z}QQ֬Yd k^v Niy6lPQQa2"c7߈[lV@y}aA|8ֆ ̘1N"S"4w=JO{R6amDFb KMnszρӹQX8ƍ8vZ8!jV_ily` |^;M8a{{{DGGm_L Ay'$;>j?xZri9a5^רT)I ,Iϧ~([(DhO \) g7ǣ{0z}]]VX[vJ'x4(Ԍ 0)plSѣ`Ʀ65;|Ϧ˙{&L`>6uV- ̘1O&S{"4w-JOJ6G?=8oz-L6 F:9oҝ˖@,6-Gzq;y6-@ X?҆Rk@iD"&L?B "4w-%|k%{>)) {G}`(4 v>F<ؼL`C{#aן]riOF)t|1{<< ZbzJe˖O?sgT5ȩa'.9ѣ6/Cq j:3wΘ@ >Aـ//~i{6ӧOGBBjjH!½  \-]MO兹svV5T_(pDgDǏDž hZ@ t(BGh͐dV7mCܧDh:6'm;g5 %yyyX~=6ocPpc0ЮCTTl^j8KEl^.@7Fڦ콱*%B<^ ]EQ}VXh7?BcذeJF;9\bH̜6==-*KQo/(+DI< @K"hv'S ̴&4._Mν+(.)C =sfƢWʼD8q"8{kn%4V/@ >C trm !3SK/K^ju݃ZF[{͗_FEa!ZX҆R{N+Cuu5ܹÞc6} _!)5  j\HڍߣϐX((*Ń•%wV}qf͂ vuMFUu ?&M_NBʁ=G\ϩ-?ߐX[pwܧJo^p)8*ۂ_WNjƗP{ jL  ( ,? X5n5F 8L128mK$7t(@`c+( 'OƱcǰxbj,|e>H88{ဦiOgR9|q-fN3//EؒK[c1 9|=u{c/i<9r|!zH BPd¾4 .6bxx˖-CUUVNMӠ( ?4Eb'DYj"B>Y+@d * wϮǍrS?=V)w8y䍍zc3T+OoAi~MQ;Cڴx9_T_3` fdۃBÏLɁO vvEM4?t"1%8$D;CS(Ⱦ EFJ/ K^ƭlk<]}JT qJѴ)2~_]tc- Sn[qoA@xajsqEΫYGF7;1yxYba]!ًay$׮\.3r(yx ;I}µsG k{fŐ8{ vwuS3sk(<>'}k3rX<2s!r cB&Rr*|M5۾PPVbo"_ r$rdr| L9Td#qTDcu9"1{wL:˖-åߣNRFDn~ Ͼ#Q "4F?ŏWdpARQ:uV㍉ 1'g1=FR㋭? [@qW "W*{%` *z,G=drt5y73a v;g{uuuqpwwCXHƌiS-FQ}07$݉hhVh?rև1$Πb݃Pp0<~<5fە#_qPؽ0.\Ҳr0v##zp<8c ϧԕ^O©V!)8'mE8Oe]wyZLx?4*jQ{;ʚ2Rp!د?N]4"4FǏ* N۩k?ҶС=EyG3P܉9y2R)A]e%B778zx-Hʤe쾗!kk0gq-p^Z]M4z4/Ne4hƇu Ϟ:<\.Ri=*Jhf8=(Lۉ1B"hKHNPUhAEY~;x l+yC"[F#* ߼ 0hU@4H(DdP Y 0 Zg4ٷ ք Bm؀܌ d:#N q|mMpEܼrжfujϻ~BqvqýW/ P,652}%G+;Rœːz3B6hO`h*G[^}w0884{i(2vHu-;{ 6m`> %bL{ (`62):J- ?8  >Ob L+Oa O7:?wRs> $4@"C)n&^GӐgF )%y96 ' _Y۷nAC3HpMfsS:!!`6?x<ffYkvFFyn.N@q uIDDz_~YJz9f p{p84͑J;1_m.2w̨ڹ^ "̶'}ڋ%;px\~^쀱gaױxϺ%p[GFŃa/l5NVknrlná;Q| U:ؚP.P&GnL6o<0 oDAjgײu@Vc[KQFKOkc8/ɿBQڬm:99Ʉsm {b{yz`mHIK7O c\Qd6MC% eEw!B-d8s(y4F_}cƷ5QbLdp2V^;.n=SB`mCRr* %v]",DPDթkم{VӰ;D8n7p(~}1q}1q@w]ж9DhׯCT($9@#G@aB"Ba+gδ<&8u#Z5%vQzpz[_R~Yqwg++{ە>]=p.iCJREF[ט#Eá$*ATV(Hyr\rOwoҵCb1UC]ZRA4nfuI] t h94(ip06ʸHh؄Q՝2ܼy§7T Y ierl>2$| ߠhoOhە7RNn&) νBDd0Sd0#캦,Dh^>>HqAip}o83cF .WZdgCJvZ>UECC=*yz(/-o_nN@pa@m0pws ivصׯ]а28ka%"m wC|Ayz[ Ԕ0~UDF[ƹlU=1(DED<ߨjz{1 M_#@$qvum YB46ee!C'4(9HA v3R]ǝLpvGO̞=ۯ%t,2t[ F 6mZކ B0 kz 79u fZYl˕ِ%%hT(0k,xzzxd*U۬Q&ì tk x,ήSN.x慷+ˮ#&תmD ڇT)]͇hgd9|H{CD"Pkk?8 ^Y ;7u*nfԶ{ ?;E7.5"(x(K)<BE Yc#~3JME08۩)`K1lLdXjpX #|{v^Qo6SԘ5\M˄5\3Y;v |AY (*TTfv$Cë'~M\8w? F]4$F-Z;QPX!vmi39 3f |AYm3ivhP7^~1'mLyiN|o4z; 2h>æIp<{!,FUy]>emyD=C/dDr9CggF؀t~D{DC 41ԯOտ<ˤRfF+ \\ i|sס6N!XAKzh5nѢ IDAT YuDh]F s\`abٲ6&~yuRyf@8cfO,{ ~#nFS86D{DCVlceЫ!}MS& $ZBKwK  Cía*o_2"}}QQP`2f/y:BˤeViyb [IG]C#8hޮ;ehX$A;簞p#O+4fRY Yͮ6QЈ/M튇Y#8wzJ <븜pboF#^f72a2g'W@ǖW~YWA1j-Bwv /M7*)lL?6Q", @rR"~ ^{a,ieu2)0#ͯ Y J8g4_)hD_oU@$9@_k;!1UB[~F4m4+f\F֕+36ͮ[\_lv([uQP?驟), EQpy] 9$U!%l-Z=Zӣ)*]i~-MӘ8a|cM-#F%"}#@'ǣׂѠ6;٪"(qc٤dF_ P-74U"F>\Z1}o.^(6}DF+_BUe%]\ÖW6(,*Wo;xć,gG{g[ʍ<$ G @J=G"R6&vۘ\@`v̽i~ "LK+2t[I5(Jxĥ~ȌSiCC](BuhhϤ6nZץƆT03C`F#nZPS~zꎇnoF(Sp)h~S044`@˥;N}ՃX߮K6a>h}!AVfMY]dP4 HbP~4mB,h:R 5"Mt6t7?t6 Ci$7j ..C%7:ׯ_nj9s˱[ri9JA?Kя1Vs)hu 2|$(SQ}@4~:ԡ[C$[ne2sARBdEb/Kgchs 4Ea$߆H$B]]MjuAQTrb6EFvN EM$ܟ B"EQ5͡p1I#l: u~a 23AQyZ&`6 skKe20sh0`]ݡv!HQ97mTH(B݇3 ziP|P; F9hK?EژBe,Xk=X_cvs]EAAZv-ǭ v Q)cpLd@ ໊pQxc">ḇƌgŴKȞ ZR?4BPW;W7:v#EB%o7s6,@4( yVs*-UgO5H\D`j(eV+VMQODji}._H@A@@-h uEIGr<gp 4KduP)N: (1B ܦL!vv&IܤE8y2"""2AY6nij@osnu/h=bGKUڿUG;og–Oޅh7A==8D ,X1(*!Y{WN55qi 4ʔۘ0ᷟQa1lZy:B@s*2(B^NxEM@QMT)N0 YEd4~LGf'NX,z/ t*R3q4j1(N+#Bm7xzq*5^pkG^J}Ni):F)SqӹnS&A1u8{9 [Ñ37vh>jqo8{h",v"ߡWYr zp희:4Xd=ژڷ}(ء}䇅 ڬ<UEEQP+quG,8D)$E!2RU؟шF 1g BoQ!0ȆBM0 'PSWAFZ`9JW"19j̓~mF~vZɵ\ysk54HS\sym϶^4GhHM>/WL^| 'OrsШj׮E CQୃ`P -E%Aq+ \Af^98&ցOeE~*Ji/VX'''u(æ/Do-j55eq<#'cx O\ EM)j3.:%C"#V2QT]7'x%X"4J'vEw|MQӇwۭܪbhjvvp@]E"#!9gSR(cт9r҇nY~z2i>4;r;N(NKp8ZQg{^8.T 9zycHDyϧa4Ȩ@g VpaKʈJWD#z*<+k]iy:&\4jWGP7wTJD @xH/L8SNʹ3CQ漽;W>'iAeW 4Mr367(8i4ベ3gb6m/Nc_>ܮ?hjjȠ( t^lZZ3wF0*rÆ ̙3[@ͨfH.N^;܎?%%8~zӯ]r{^:z)3￈~ f`% %nfDhǏWdG‘7B̅08vEb߮ppp6 7 5Y4Ӭ?3%2L+KQ4 7cg:$OѽFg211`ʰ&Twnƌ6MQѿ?Y /6P#5=Tt*HQ~̙:%-3uwEo GH~8:'čH\ana5ֳ(m(7`x >qET4Ԕv{sTWm;-GGc肗gy_dokUXIdE50Lއ Bp"=~e+ xW!rs3o070%,2(D$BͶ.1ݿPpjIQƌ{I]{㦣w("'G WOy}\.BBBQɁAԮ >sQ2JȠ( uz{} t ;cVY <]/20Ϳ..nsh0v?!7mZZV._m:xs?BѨwEtt4 f4Nuj{(bEӗ/]@yUnoW1P8pTZ"`ƣ"(89HKx9DhlNA]\8Ep(}O7oFYU"6 ${mpS7cƲwj :j. x'+`ɰW,[q_ێ" /{v,zޱs~+݁8Vܦ!2huڭ~P&<Cl<J6A/Aߏ9n/c1¡iddd|}bjXޞ`Ԋ.ޫv.d2^z%,XVcjmD ZGQㄏI]8ܹsxG#]),My3.SU]Fă"kJjTwj[ Ro/?4B,_/D1\0r444|~ "MYSh <U`4u0?z.wEhd*jVk'ВR~2pS|{k׮aضm= d].,WyY1R&o&>NmW{]%2(©F&ܟA)?^ C_0 %K^9sFFҬupjC(J\~|>6??ujm}PZeqѨQY^ܩC/>!2" b>w<q"l\-{'cXiӰi&L2b'GӨ*˷4נvgUe9>獁0k,HRuj^*i찛B9ҩD? B`S6'mfg@s O?E]]V\ ;Yj?9:,ߩ2Eظn5 PQrιڅSw*{5j\sp/3IL&yx{s99=y 3i! qUs=x뭷0i$L*g-AlQ'y}rK_b_xq))#F(jD%u(rTp!&s2*lV2d5jTe@#x]Wv͓6?ԩS׿LŕG=XZFed!]bcV(84d4ذw/nK.h߾})ʉ[orKΩ%xz|u U 7% !1"k1v*3,;6 R.tQJZR`if ŷpy= ThK@^iU S6d} QQ')2{!lNNN44FhbAAy9rN#-.jFCq8} dR)BZPPܾ˲Fv&rp'|mqO=7-ॻ^P+'EF7Sw& mdbAVV1 !Z9}݋=@G)}o`i ڷDq44DZ8@.B$A"H>Epp0F!'hX( ~aς3J k鯡R$XwFGy/^/<DG'0PŰY-X̠( 10tKADoO^}~DbjjV'27c#ZB:.ܺ fa,XDA&B%#utk BL8 ",Y-[4I@TTy9}¡ 6)fN-G9_SN:u*2?q-NPcb8boZ={bѢE8qk<@h xCV]-l= Eaܹ֭LRFdŽۡˡ!!`cXh{e+L{Ç~8Z[u8˫G#66Ç]wX #Gc]V HTk{F$o8p0 z B,An JRP C Add$ƌŚ8I:Tj $ (uj1#] :% ))qqq(,,DQQL&f3hH$BB@dd$bcc;4w&"Θ9ڌ3g믿W[ۋo~+l?q+4hx)O4ZGhDZJ$ШPDHLLD>}Я_?d2 >:t@LL[٧Lŋc߿G۴&h"l۶ 'V~ 22={D޽ /\eʄX 1 ``(2k"W/V: 'NVEaa!JKKa2`2@4 d2J%mB$C&kBˀ8E16FnӰuV]ǎs{EQ >3v  "hͲ2^>!!G^͛7QQQ!(fX \o>(Ґđe16X+6ߌ;=ؠkUG\\ q9a(J#** m۶u2E"q?߯WSi6Z%$ŠML8 ƎCz#j*{x3g>cދ@hi,n"{ Sk$%%^EII ._"pVp (9P(w_`5_p03σ; X@F",D6mFbbǞ],W_ҥK~z].hZL0&L@YYaH*P'")) k׮ő5Q @\.G}1vXhɘ^{ =V&GqhƗGl6̘1˗/Ghhh'%%!)) 4M͛(//D"Axx8BBB Uayaصkd :w}7\l0Yfէ ,ưarr~w4xQ!Cc8}4.]$8yhݺ5ЫW:UjRڡR~z}x7o{BK]oߙ3vC6A#aaa ۈD"L83{ENN],bbb}i[C>}:ydee!--+~mv.]ܹs+W@4b8޽; Ee{ŋ]K( xu9\{x*)<䓘1c RH$j ZR&LދǏ(**PUkժRRR\u֬Y㏑~Yvw0^p46+cuJ5jF*pdե"Gtryf :JzB ^]Bb2b,O?ԤsZiJRB+@ IDAT)ΝKo7E믿FϞ=1dȐ*;B Ib su<*ׯюP,\CFFF56e,fmc6IhEQMODDv؁C&_n2n5A4@.C=7nq =Xt)N8.]Z#22?#M,R:r"_8ҸpB,_}N6 EEEضqhbpW'WZJ+b@8G7xt:mۆO?_~o8[trc;U[CVV0 K/aҥo2dLiӦX@+ x7uF&>Ɵk@O###999oYx1˱l2_BAm݋]w¾xĶÏ>C<<Z/~NȽnŒ{L _B DW_źuJ4ٰN7rsg+wسc}+QT3g}]_H$Xt)=kqh/cꪩ{eq<)jVxlݱfÇ|r_BhGh}Z? j#C.]0lذT-DX:D" `eXzѓb6+>726uq4`q\bBf8_|r-Wڢ+9s&>_( Ox{{!\>GhW\wr4Ĕvz;n7W^|EX GMt .ԩSa|-A#4]Wv19upk|r_{g>{tww/V˥[j1X D䬎i4Jͥz>0}݇˗CRA* ڴiN:m۶| g>r4FɂEQ]{ ~!ZCzz:,X{1'OĞ={,R)4 RSS Jhz'!DjWRsq`tgFlllXw./"|GdH$DΝѮ];Ł3!H;(J1&e RSܞ0 222p X,$''cƌn-++͛QVVL\ 88`%.r:UOM_•ϯ?~<}]|'HIIA\sQ7|={b=z4رcسgF#ѽ{wAFF|rDFFO> ; hDqq16l؀"TVQH5xcv>R)b1 Zj>}@Xrg}o:ò,ۇ#Gd2aAxk(00۷p`={j*H$rjz,G9/tsa6~~* Y,lڴ W^;!ٌV}3g -Ȧx ŭ[ Jѭ[7_AWX/LM Сӹeee_a4!zY+W^y)))8}4:vg9rf NŇbVXY+"9@ѠCNH4ߏ K.TF`0,#lS0,~’̬sQ6lm{HhYflڴ yyyV-,@VShGEAy _i!j߻woGO΅^NCϞ=E"Zn֭[;X,ؽ{7~wHR7 M%9s ӡ1p@lذ>h4t { 5 F˱vZܼyݻwGJs:KPj^zrdgg#??2oNh4X`V\ݑ#Gj8uv7Bs!ڪOh4߿?r9!,-l+$ ̈e˖`0 ..#Pنj@(Ja6I0G.]aеkWu]8Qvs=;u](E󣣣V89998qOT\x7oFEEEE]P(ر#z X[gŵkװi&"""zEQ:t(̙+V`ԩ&EQh4P*( "/jh q4U0ظq#*ѣ=X|!beUTPTGyyyv4g'| ""s9\Ĉ8Vv 77:* ",B&l6{Q|F<ةױ:.DZZGF(VF˲o#H`وA;Azz:Μ9Պ]bNlK/ШuRRDxx8na}OAA6n܈"G V=lq\'5CF#֬Y|T*t޽a1SN믿nݺ5S{!\6^vڧhhv dXP%%" R-Jj,ߧBQ'iU@ǟ ٌ;V;ǂ /jPTl@k׮իW!УGtԩj8'OGt:Xh;a^j, a|ﺏXG͛7#;;)))ur.As+5 Z-U.va>׿"==23H$m0TNSPFPYL#9(R""f}:8A ٌ+V֭[h۶mREd-8ȁ`,ݻCZZJL&>s^e?9§D, B!LlhhmIh~V͛71hT*BVD"D"s#`,,+ ^Wv' e2!JIգnEUYj ]E1<  w0UrjVYq> :骜r\q\Iߘ) v-q˷;v-:M2q5_> )z-6_w<i;yq-K 8B B |>Is<4E"Q6MOhM<"Bt\c;S8F]qտ|>1 c1hSrQ DSTyS}ۑO? AP.:e(hO(ƲP3o;, \^%J;@ȢqL& `PBC;9ǎ@i>ϗwX\.-s o3 TZmmW|f,| P hf 0P(N74*Hí4Td4f NIcc $hvmE9- !a=$4f~bl"fs|w#z)Y 8:.P(ǂ8zƼ8#8C~mW:u`q, z}%!{>j*J8p&ҝ4a -SJ_h3եNK)--˲ծ\s\I.;W&x݁o?y@++ ;썽'6#49?zAt.DcDc_n;+˫m3hTnH$riJ6w!8uLrM't4]os1/2+ɜRlbnؖ˲0œ ncG q4Uӧq]h-91 9N, $"8Nauc4W"@ ێH52Go8N9-UבO \(wmiZjF33y]Ol!!0(qV0 P)@ qPTH$ˆPhrn} [P3hF+\l6P:u=qÏC,C&A $߷' :q4Z8|ndFVHR'ǣ;En@JBo;ac ]#<i%J!HF0I"|0X,P`F  (>MUcX aۅ@qL"{V+F0:~F`^! F 10 L&X\O(@P.qQQ "## |?hxl6  BL!,, шFLL juN>s -|(_M7j5R`4{\Ul6˅ D\FTd2 kiP*FTT$U@@5C|%dee!&&:NP,@V(P(lf\\nj5uV qTTBBBVZ{Ȳ,BBB0i$#++ %%%(**¾}Cz侁n, ttd[hVj(,,lwԣb֭EQATT*ǎ% M)o8ZƬYpرOh4DGGcyl6SeFBAZбcGعs'Jm"99l,:iJcǎEYY$nܸm۶!''P(C:-BPv{*0 1k,X,a]܆h4r!F X=z4.=(--`r~Csin;h4BRaԨQN1_֑ǁf W|HC'm۶EΝqEaz?#F4Mdd2Aa̙l8fn'/78tGl6 @3SƤ*` q4Z8Z,ˢ"AAA`Cvݩ>w 8&D;  ,ˢ hӦT:`,;V㉊=86 m} O` ~;Euns8eSLn8ҥWC ԅFL&`8s2ce d2!22b u;BBWn( U.ҰmT_=8ߧqlKP^^BTTTxիW{n̛7OZJ#' IDATy&8Q9׵L&V^ &SN}=80H$, @tC.].]_tBD"ܺu ,:_O,b߾}P(5kV8REQ(++OaW^Ν;k jZ.0},fbZޕX޽{!H{\~BKP@nbAR U>|X`G%N׃eYB$(,+c۶mx嗅LL'HFI=d`Bcp baaa`YA(d2(J(JjQuMlظq#x9|Ni* FJ˲8t***p€#lz^XQVTUjb͚5:t(.N_U <\aQ=FB6]6bٰa=%2 4M E!,, V6 "j ^Y,#G- DO~ Պ㮻 A<"+i醒D"hZrbXv-{xh`0PVV&Ʊkvlڴ ]t?MW_EB&A"(Aڲ7nĬY&8onL©Rh֫nNEuٳ8uϟpBd2*)Q4M8.++úu0}tk+r#JӦMˑT:Z;(ǎC~~>,XР0Д P"P 4]v!$$s~TB(877x7b>a]8q"~g 2)EU*ꤺnǖ-[_|+۷Gvv>~>44Ɖʅil۶ j:ODѿl6ܹYYYZ NCddħJ rŁ JT6MpΝK"?qUl&kc=Dnt)ݻ àM6w[Vd2a{ Xc8c=zV6M "q?tBCCoXzETYl6ҥK(**`@^^&MD o@ [d2F#G]~/^Dnnz`ѣc5v{@V :ui8~iZ(5j1{lw@.WѿvyyyuQPP{qqq>T*[L4qGP'b1ڶmmZ8.H$!pR)H`@hBH)@ qA #G͓@pp0f3wt̽ p\\ 6#@p8B#ql0{l$&&BRs_1{ ?Cr=&M$HNN/|_PPcǎ!,,L7i$dddƍ駟bժUM  dDjj*r9{no>!eٲeHKKL&c=OV᧟~˗/GǛa|sNad.=X|Pɓ 8ڵChh(ڷoV7ٳ\v ZCm7n8P%kI= 6_{gW~CV 8*VZa…xGFx)Xbٲeӧt:qM}zOǎMիWcΝ,X<BCC.?Sk[Űaðm6ox?~:8ڵkȑ#8rϟ$AAyc%IaL2 -_¿c?# v1` 6 _5NZ59ŋ!.<~mL8)))^d=Xf 1f33 T*_FLL Zj_~6l33ˉP#eĈرc*=?>ryrK d_| G^ڵk[~ӦMâEp9,^>̙3[vگb̘1 ݻ7B>}4V\/d帚xJJ ߏ~E.{?Fdd$(j q4P.-,bz*֯_`X J!-_!C 33_=M6hӦM"z-L&axꩧ)6hlq4|!fP4V,Z-XB5d9w6Ģ; OxĿqԫ4ÀaA z'+'zkG8 f# BKUG ]jb_Xlh B2JB ;:BVCz`4}$ @h3)a[%>@ ;lH%hjaA#WtDXH% l%>@ ;:BFpԭA j1K8ރTu:fƱ39 aZJVHJBq1V!Lb3 xhphT*L&pp1:$Z,\QGs J+`XTJNѯGWZL%}0[*s>5*eV Ӿ>:I;`UV;kĢ=$2SgNڷmyO=ðrCi(dF *$0n`DGP8>vp"< :6?݄'fdƹy8w9v ?jؾ >WOს tJ6?ҵ.-*7 m,ĖvTnq څG"CP :20o/(&v.*k5>&!s4|Dr|axy 0XIrؼ{ lv;q@3>n;sG7m F?ӶUX4dLݎo~_%%7 a4Y }*}A߱wþO"s+kfFvbǟ;Mr.]>;(8>"s\+9Vl܂&YOՙ1u(|Ƒy8JndϞF\9f_p BCI)詳>qqD"D"EQNK+*f+ e}&W&w!1ؘ(\QIޤ2`_1}#݅#/1.C<ipd`ga?¢X6hT*ķni-5 =a8}"Cvx_aǞؗu ez="BC0o/ wBx:[훆52p<f31ԫG8tu=mh t8nloza{ݵ>nNtb<5ݳS*y¶jE,;bXm6hj$È}="Sv0{u^}<_ƌ˿D 0~İ:ӛmLP11A]24b5* E=:XAN8s Y<4fd).3(;tai\/gp&p2meS#Iɛ4{)cI^E!6!t=a45rNƝP[(&S߶ʾ+7_P78/}6gsѳc*<ƊϾhe:sWbsO;-XmoȀj--OFiEzMf#(õ^2NbiYfġ0iݿO}Y0z:6oH6[^Zmv>q's/j7jĹ;iSc`HNg0r]7ّyHܮMkN蘂r0pEtJJhR<iA MiAW -{aX)v;7>'U? I!Һ F?٤?w9i&|}UU9}lZuvr2x* ot6<3bV{ݽYPZQ(w-V*Q;HD!Hn+2Ɍɠ@jB;(8ïxι:JP]$$W4=P7-.-D"XV߱{zMnR13:q&ezd3wdh^>y };ay 8m ?/֦gҵ|,0 +87|w4S|0-0-:u~? X/X24#q6Wu ΞGQ7!b57Gbǟcx:Lvw :VEODcd~¹y08|4UinwpR7NFtCiZoh* d6ڇݻ@"v4V:۾LpImԃdy|J CGp~N;2wOt.]~[#/V!2,_ZHGi^Cהje16,eܲ@վt=9y<?N V9v( ۓ#nGzA[l1z@ȭtv:QC@RxNC;29qxrxi8}?PmI9=:@&cB;dt\ pټk jЫKGKԼ֯?{7KZc8;*}O;kݷqG{٘}Wx=ZLo{蜜XnoGo4 NHD90C$X8꒘iqZ>,VЅ6H"6&'r IFz^cxM\Xmشk/X ս9Z:dD_UR=6ٽe5JuqhIQcՀ.)'M8duwaQ En+?n0tLlȰP ٭.< y7rT ? ZW8P(,$X0 DE]7PPTҾmafSԪS\ Hb ٭ƉlՊS. 1y΃U ׻ɂBה$i8Fc4&k/`ٛ0ч Iv:ⴺ;w^zJIV>cSK!u+;>S#GÿiΆFPyދNeuaaxދ¿iF ߹ X5T3jVn^'1z@CG\h]ݹòak j6xj<5 a=1xyق/_:1muۭa&kmҐ\a֩_hPPajx[n!ۻ(<nhh@jNE%"VxDch⑬:qƌ;dg7gdg7Ǹ&E<'&Q%x*ܗ 74G y|*߯~tUV_ݿ.$_ \2VCY)lmJn+/gß% YOmCNe> 1>'i׳ТinDHѧ9t V}'ΓJIsWqj$88LX3$NOenuޮӿy }1bښ::`?9u NQq+qvv2uۯC'eE mg'cmjHɎ>z@Π6(X $;5mh$Iչc-KO)xhhlZ~q$`YOd;&-:&q̙ıTZ &q:2 o+IU-;PWo)Z3&C?gNg |fxw)qS3b",;ZHp MZsR:fLۣGkw=>~A .3&:}I^k;IDATXt,xPϙk?=z? ܘdN3?DkjP@&՘(/9ihl >Mp嚤4šjө$zN.ailg/Xefi7~;E%ڃ͙f 1$pnɷX@N~x[V|W`7̥ ̜4Wt:&n;T}n>b^\ƝB#Ư5v`1O|ťX`bȼw/w72=/{wp+y_5dAk3_x׾è NژHT.]P565c޴)&,#C w *=5a5(.d޲nKKn !ms=|!-1Is&p>ıpG/'q,g8]bԉؒ_\_ s;|:7~ϙar2?r +9aEgtɺ'іHzť/Drj:>޺~7p~m:Bds eDQZQ=ACcP0$(PRұ~之q+Eo_mu$ N<嶞hsoixtQӐ/u34[tm' JPTV{X_jqcW9:mWOa|HIPaHblѷ!O2 RWIK'p!a`F?L~# @75^ C88'uR]Yҍ۹H< ?/١E2%jU?vG3?@gM>:S{?q嫄Kڴh4xϟYA԰P̜4gS3ZO_LåkLPVyO!Sb05>. '~ 7ݍBn-qN١8]o4IQ.\O~IEx`<;eK'+.Wk-TOn.$NILF^aEAai.^ٷrwCdx8j!pqrl'p$<31 W|L8w?90$,{V2CiE%Sӑ_ꁏ1?t\O/͇ R/wı9t[\CH{yX{ߝ{'B.N*`cL@n~!N@ö9a7~^}p.Ir$qqr//ŧۤu!}jP,}nycp5VLiwAp4|ͷwV01rN1Qczۤ:Nm8TXr)kq΍Nm.̍$Ę^S{~dfp@{:e|KKk7y(CMDn@3{7^^d'^M-E?$I,c`Ew,^ǘ@Zixzu竨vL&ፗI%`Tْ[9&1=V/Ys5Ox>P9:@&rt?_r5{K@q ;\ I<>c];t+ @Q{w1$pqV$39jdِ_['&p nRxw9y-s%q <55$q:Æ$NWyN4Vh4FEt`IUEd@%sgh@2?Agh4kג엻k?}I2}|V-!ꉺ|gdvuqBRSMW؈M_% H\2oȢޝb;FcSJzybA!?Gڍ,WE'GbBdF ty3.-r?/vR_} m!./5N՗n>i;ri3QoL)QF';~ߟ:r8*: gNõŲ6x}N5^CIgQPR*97mcS3N](,-CCc#v~6CBx?/IjuK޺ynƁWP\ЁX0sJ+*9aݲy;fsb켴' ͵#10z,`snIbvX $3Z4|+i3u2 s'Ṙ)f-y0Р.#3HҼK;OQ[_| ť:?XȬ'nG c!$Laxyڇ)YKbV ?~s\κiQOVTⳝxiߗ@UVR_^{ERFUXZo?:ubTDd5$~'; , `g>N,5>H;5.()'[w"t`fO-֩z ϞGrjs 1q,Κ\d*ֿDrj:}7Lj:^x&cFCD҂g㻤dɐv V.#l:2`9!wm0 >l$>nrd'צּ'TpT*-2<.ѓJա u(WR-*FqyT3ujlM/{ a:a%|>)GlWo!F6޼%yB sNڷ3J\?{nQ1cX89:ں9ԅ˙7q}].^pRሉLբkkQTV’2ܻ趂 `p,CR@qWX3ӑul'؈ #D! """""2?DDDDDdv 4h1 """""cADDDDDf@̎ """""2;DDDDDdv 4h1 """""cADDDDDf@̎ i[xf[7Ȭn$TWd] \'mIDDDDDÊJ$ٸ~TW܀!C?XlM7D~~>.\`|爏ǜ9s>S;w>0`;{;>}ӦMիW7X~=v-~uA:;66>>>Xbj5._ @4 &&v?~ SN5y֭= f͚T}XO^`` <<<$`tN<(Tnϝ?x -[={`ƌ&857ӠHS&i[OBN\,@\\oߎիWcXt)d2҂#GYR&ki۶m"rO0`ժUشi֮]m۶aFh8qEEEAaa!N<\;>/ƏorDDDDԆ)Ŀ $du???Eaͅŋ[o!//{CvڵkɌ?2ܼyk׮ERR"""Çwfddd`޽pBjJw4.::wFQQ֮]lܸ999ظQߥ''$$`ذa^ѓS;e? @kZ;@f2yͨQ0`hnneaXz5<== @):,, o6jjjj3gt{,pttDhh(øqdR%K`>|ꮝj7oޔ'::IIIĐ!C0~x={YYY4iRϝ18v@hXLfmij.F'.{3:K\.Gbb"RRRV7bʔ)pQƊA޽{QQQP#..NRdʘ1c+`ј:u*:$0Z XbEiJwo~pssç~ 4hT*H8993FVHHH^ѓ@е{!h/DZ]\#ij k!ȡU@klG&S zNzQ{AAA(**{/""""z +pPybM: k.Шq F+ A bʆG2jشi^|EDDDDO0>Ѱʒ4\8F0wp7+<퍀 image/svg+xml Compute Node A Interface 2 overlay network VM1 VM2 VM3 VM4 VM5 VM6 R1 (E/W) R2 (E/W) A B C B R1 (E/W)   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2)   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-east-west.png0000644000175000017500000025406200000000000026001 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw$Yvo 2(o8D?ȕV:+ZR+Qgsg宖(R!3؞= ?$L ~.D,Tq{OmF!B!GQBg&ea6mi(QBׄB,l6K&!S,,ᶺvx:E,o\鎡T Ltu3` RTUp8|M!"B.VWWb@9 E2Χ3Jͭ:{+JͪTݽ\cbJBUTB!N( 4uL" XM%F 5+Ǻi$l('BqI!G4(Pa`FӶ躎8NnwA<gee@ ^(;K[ ˪Llo`6Nql!'B|>O*"JQ*:OQ\.~@ #K`6^=n}d+g3ꗧ`p5l122gBF@C!C*bccBc(8 l0z<q4342)اlFxW @OO5hYB2oi5UUq88NL( 4 ETU%T&k7\lP U&kx Y/.If>0hq tNT*LNNb&dL&CXx|/R%'G23Bx ⸑Bh4Pˢ`0XYo63kk[*F}**c*=s/-m_>LUONOO*5MrAYR i6^Á[)gm$ J!N)BH$X^^v3<<4xO>!͔whT4ZV|>ޞbVU?Ϗ6JnGưma1c>g1J]\NOXL:M*k:~B{h!~2 /_bYnJ_D63ה-ٷQ6Z[˗I8;*iy_s:?@PQ U~+BcI !O[iDfxvc)hd\_΂djl-͗{>>bee2j댌rBq,}$Cz!>,TW!T ӛ tdؿhO}|RYh킌ŵ\ߝox^E0 fggd2;?BcA !'[CϪJ( [(?d n9 hjW%S/=PnH}S^w.FGG4 ˲X\\l87B'BbGI4 ?OX]vٍ%SoW2d'F}ZgpvK#,]c||l,,,t<_B#B쓭9$t>ϓ%vTnKZi/rT͔8QURD,y!GJ !8 ,kKvh~yADs}\*Jq:@90 t5> ÆCrJ3^xx[}dBh!>zm$ ܛ7@7'?F%S;t!:/ZM G`iif)BE !mJ*R)m3/d醍dF&{)_hu%3(3GmWJ;;!GI?B̶m8h0*x= %RZiJ}B,|y G**wW<ՁB#'BQ.cuu|>_yvˣ!) >Lrͨ_cT˃7A>wWk㬭5h!"B:h2Q륧T`ܛoM扁̈4IzTC[uLD WTy˲0 EQE!"BeY,--Nr^b=<%ؗ^bG6cJ5TUUH!2 ===\W!a@C!:dYsssR@ b6_N')l893viMv|I&V2E.;.%#0ȨSBѡ%<-O? p*Ȩ۹. ԮnH nYfwy@p7 *h]2Uk54H226!ǃBсJT$D?YeT/Ъdj;۹]T[ ]f]eQ^Ad4)!hX,F #Ng|LTozorkf3}}VI.e똶]psS`ZwdfkܚeC> W5?Ǚ^t~9e)|a=O }R}˭vٍXeP+RD!ġB|zy>`!(uw[TcT,Sk~y=oA5UQ';˕0,AUQ_z^ST\Ӡ`XtOA PO73s9 `ee+bI!-$IBPilv1BԎ?\NM?O+KԿ핪JMjuo =''Z';kygY |Ss,,,d2|Q!A)!h4MJY @4xL2 JE› 4l¡( ty CMjy#!384 EQx 38RS*r4UaxVRœen7# !8LB&n^גI z}g7>~7sSɋ|1?Ƿ~L}]H|)!h&d+w}qh&7ԭϴrr \2@5l`aa;Bq8tJ! 0.H{yP}Zg3e7_nh vZwWNv9 4+w2̭Bh!D^~cYf&#T m> tAE[zϽL/wR2U@˛+f}J!ġ@C!p:=wT d] Ү'⫷)g+_44MjQB!BфfzdWxΙp=f|lhre+HJ !8|rB~?XT*EOO!@(={-kokvddz$vduBn7xYk[tDgdMNq*lqstuuU?J !8\B@X$-OPw}|fiήT?o7gfGڵxLTݦرܬSiټ3lEׯ\y z~[sY)\ap}=E[.xm49#~*W+fg7q~eP`(BU9|2?m%Ģ^>??~aYdn/-r6Ss_,/arRO8!Ԭx|hnòH|ٮHl+aY29V7Xݨ 2TUիLNNm۸\.%!8B*%d!`6c6'֬P4KN3:09>crɋg<^Y`0+ðlwH X[Y Y1.ߠ?Z]|_?9@`-.BjWx>nTE__R=4E%՜yѴJX*8^y=r* (kεO&G$؀M@?1"0 ,M!B ~4LBc%:^|[~0w8,k4v2HXt**ft{6@(<]]맧ܜBP[}' BiǷ?+#hOnyoj EJs lkk^(d=n)=}}.^ kkk;h S,WO5O"`)3Q-'m۬dcDsY"/Wq뎆?X]ϟvtlEQ X (wFB\.nݺR.ˊCk\բFR6˕##ќK篴|M}G0\0+טv:Jvԩf~ty>BB"/wNęO)FGk+q;/{ 깋(|3ǃՕl 7ʶm= ^ZDQ"/ B7BCg-ѡ%cv;:/ck3*w wAPP(0hyKj|{%Q3y^dlmll;KՁA2~Jz!h!ݽ%~2;\a* :xlH(K;KP(JO7J<Ӓ=ݨdAE93Zx)eN[ً,ī}\+H', |~n 5)v8٣e^Z$rs}pY!N 4A٫iVp  bJ%RNOmoJ>KɔM8](ՅiX8)rsž~6dcm7JhCvul!8N$BXߜ{)²m&. 6--˥ C6rQ((w_^,zTMtP2}7vD-jjD |q&&&HRضMt<-OVLw=f> 21 1 >BD !ĮfrDlvXD!WO54&k>@˹^JJ= IDATDXZZ"ɐL&o7?&uz- Ew@"8%S 7j>iO)6t`2V5P/_I&%MEWU饫ܜEtd>&Ñ몃 h6K4xC\g؟ D;jC!aOyh;J(`6Qc-q8  ">Rg>YEQX^^f}}Bݍ`+.H$BOO\.2 kkkD"y!/^kdFѡ3 M]d.x%%cG99s14ibbRDT0t:nۍ@4J\bXHUU0GL(  &C|ctu :Fδ,B=H !4yXuy/5\e*OIEc,f4P ذ̶m857ܺ g-7 l6K>GQJ]q:|>~?n۶rdYdB.ܿh4ZT})̪C7 3>>$LX,Vb(Z 4MCQJr۶m˲0Mn`0H0D9%"8 4mswGG?m;o] B $ugX'dŎT/& m3XE<'a('Vn-¶mJR%cPv8H$fepp]׉b̰00ieDkddRX (vݼxz>ߛ<[3Ln;!qI/w\c.41oR*K QbH>X,VSlhme7:\.\|N'g{z;v(:4擉볥3(U>B$z|L`t&҉MCN8Վw;&(U7x BB;A]qrH!!0,TxDt?WOKR>|@*%W(`I 6Fq t(I -g-3U<٧æ\ J9S9Y3|-[PaO4t]tzzOP ],4ۡi\GSvNr-xbf_EQ9n|~):DU>ۍM:ee#Fx2ERB*~H}."PePqHgĴ,6bqV7Fcd9 *8&p0H_~u Ig:Dq@?#C{HqS)fXXY}#B~à >BYfEQ.]GԶsmk4]dA&O ADzmKqze6iR* ]'Ņӓt&F{݆C*c w  sl.O2!O%4nKgOۏQ:gEc;)B璘ԆC]񎶝ϞGzW\R94kkΡu]g&Ƥ?1HюUUa|xdyRI3iSI|MР* ?ũ&}TF8ݷoCY\fvq|݄q$#eHHqضyƴuƆp˸4-+tp}4]q880 a( .Sn|z,KkXu~o]DuùqtR,3 ,m0S%tTnc8m+PoC!Gb'ӳ,,rgGLiqcVjina2Yn?|]{U..D6A\t>/O T^Kebn/gjs<޽vY:. 4D*;S~]IƇe`=gzaF2Sqؠe151ى>J6goԼ>2 S2,!)߾[st:r G[^و'y \Raɭih8Fgo]<ȵSGvn\;Hҕ׺!>~]J=iwU=\K;gm7?#Y~é%33hUo |fѓJk|U0 4F<ǟߩr9Ig3sA<3z}ƻW/1ܿo>Op#< dē)>}=cTaݟabnsFW$h [^-hn\:_Z(yގQ>@_ܭ( o_HOwGp8̿v_:k΍w_z_<{SuE%_~>5j4ߏmVki#<}g"(5es+ՓGr&pTEʀT04UdSL$bdԩR(Igk{mm5/v>/;O*P_<{+;Wq4{"9x6b &Gd$Flbde;>³{uյ<^_#vG;ދLvExgd UQ|rRO7 =I[/lcPӉ<󳹙t`O䏊}p:K%l&H9:$8 ѪFtVLY)ytFв ~`вm5iOH8hm~serY「(D=Dz,|۶* yţU6se`f8ßݿ1]JɅ~/U?^_e5];7>G4yJ(* q2>?k4oLO!Uu|g?]}B2eQrF(I$8 ѪHhCJw|4lg;n2;ݲgOv4ymmc[MQ(׫t&WqsK+2L Ood_"9Ʋ" \GB!.'~P*gKcJkl}tc6AnL`xT`LFZOxsϟLw/d*He +1;^:iu#تƍ (;vSOrqmfٜ drՁK|~߶g,.qW:Oўe[mlԆ*,$|03;CLvEw1,o:ֲZidf_Vx2)Ȱ,tUEQlf!t{}ld3J<jƥ뤋 ¡iìd 8_'r,qp4<2K4'OO2b$8 ՑOZ`Xhyf``5|>X2/U(ngIe({.**><\|Gɶm`-;O7g*}86ИǸ7aYf˩Bo( |ǩiE<66 lezv$8R ~- F0mh4k5Bm#( W7:IfۙBMM*ET\_$6Ke}#o}Ry}6g6^y>33؎c @oz/#}%y;Zn#NahN*$8AӶ4|C' tjMj6j[iOAd\Lkko 8~l^-tw6^ 8d^rz̪kP4,7j~[[}7W"U?KF8l>yFP@UU&|Ǔ(5FC 7h^kB@zi0Ai:HCs=]a޹r=yeO7E5Ķ ":%Sr/l7|w=8qs4|B/(nf7M(u\!thc !lWG IDATEP*k`6ru0Y@鱑8e!8$8Ba&w<~V!vIc@>!8 {KY!vOc@>!8wھ* _ΐ+8=:R fLMy=*݉ڪu"1 DHԤ`w?ce}_ڗq: C|履9jdq9u 4ڪxHqLHw4Mš tL 4nox6.-P,vKk\o|f+rmB65XLcHy%M&R|Sx0( x)  xt& 3+*vy=24lb60JX5"ўDcHA&PN 4#Gw2Bc#(].>Qډo]<;m͆E¡o{a*4uut'&@gb[;, ;%"J~.Y[Y2<~n4c̼̋?~~'.`` j\v؉Ww9UWݫ&{0 F̃n/$n&܎x|*?0@gp)CX.0;׽nԌry`vyp<^[# g ζsGe<N7.taB3? ƘCs>})v`j~W/# woY50h3@ԛ"2¶c ,f>XQmXa<[y&)a%8\[a>BR.>s;}lV |%*zcyqUx*(M}DjcVױ47eڷȄӘG:ԖG=by<dp(s$Ʉ6[32² M>KWpcqFR˂Q`6Sb%c U|Ct&LRMraz /L&L&"- 4rTs)A)B,}NQNߌ @&/ fJjjVЯvt:>aX,8N8NnhyH)1qxѱMsu ncD;9VV+I1 di2`t:!r\DQ$rVlEZud2r`dJ?S.$zVu f3v;\.̴ȹjfQ<|!Ml6l6N~~nt('&8C`ʋJj6iM\i(( Vh6G2(BeȲké;n7|>=}!vllzI IH ٌ@ 6PCcdŧ+l6RMt:t:jHK ǃ@ =$R.J2qUx<nᄔmҘZ۽4h <Nk$hlcqjB.,_saZOϦ\0N}#N039os=_SՋyTG9 vrrX,b`Q(e`, {]'24MidE!2[QVc\.rL,1\nL&s$^JNrLOLԅ*2Rb?buudr hPJEUUTUe# G4yEb1bCpSQzBE}b<0}zbZ%2WO7(˗r ^ݔyS2aF8C;au œ2T*XB4 Meٻ>A ˡQF4ʃd2XYYs;mA$IX,z&fdBTBP@R!:m9lZwZ-bl6SB!,..R{BTUŋ/>o~?fggQTL&HjsZ Cx}:҈fC8FR4MC$A4E>G"a6133!"IVt8$~aV7L!CgsCdr7ovT*!L0 A0%.":B1== UU;t]0 )΂aNSs\.T]d2V[nQ 0 p:v$A;bffJDBr6 v\;,--WF1C]iϟ228˲P$)nll  ! E`0`ۡ( &.xOY^iD"ZeHR$7- ^Rvϟ)TV dwg<#  !"L/,]i4 ,,, H@4m{Z 6 ׯ_'fH$h4`۱ ei r~?\.?Wnw}}`ܤ!J e^̅gG<迗^8 dNOOcjj rDO2 r(JXXX&:8jE<'Ngg*Q`d۷CYS\3g/+k( azX4`nġx/yܼyJ ÐrW^ŋX]]"0J'GxA_bx>طiA6u\O*u|XXX I_W|UR) ߺri( .Tj4H$x%VWWw)xjZ r7l6#B!anedYXVrN:n!!QN^%I~DQD"d2$ <|npNlx|WlAhhRZ ㈱#2dYnIBbzzzW˲|pݨVH$ᣯgϞfoܔ54(BE>G^j(ַj i<Ń:vw h7oD"޵JҗA7G l6<]MzQϟ?jE049łEΒzEEQH~ e@I 0eTpKRPb`%FCCZ fggIcNrRVUU40( ( "XZotnf#9x4q˅zl6t:M TUkDr@ ʑ8HIL&Cy>|'^IP.au߽L&L&Ȳjafff$7;|3fffdvoxo28 as ݞHxoeIs( 墢?0 t:M*B(Bχ@ @bC?( H$xX 6L='t^zУ_nÙ!y IDATzid.:jhPN t:~w}8AZly쩩)ne$ ,Ka }s)@8C"@\h^yL&FfL& RbQ_BUUt:mZ-HD D*54(BUU<= ۈ vzUbA^f(j5yrCr92IOx^J䚦OgW!v(h%UzHg;aVW^E^G" PFY,\.(9;̱=]Uů?s0fq|>RF7Pohj4qp&놿eyW`n6;[SN^*r\.C}~t3^`0`2HRRBšA4_螕W5R/ɰ~:dYFXD>-,a$݊*4^0ؖEA&G&2p9N:eXZ5FXB\A\,wPo6Qo6.l|$p̎";dYFVC N? CZ?N=<BZ8_M ZcaHުTzQzaPCcLWwAlӽ+, ua'‘7Pw0nH yl6l39QbF#'$<_y+6iA5]mt:PU/[ḧp2u?AUB9K1+:=*m}]QeuX,B`08&TRC4rylh٭VLgddU:RN,>kORhg裡7Oԩ$zL>S54ƔqU|4R4}ܓA>'\.ױ#QwUq\# e%`613=p}di!ɢ pV' $ RiߞCㄪ(}7Δz$)W<(Rw54ƔqU|z7Y]u:Ҁm'l9 g666!v ,//nxt{Sհ9Q({ZGpj 4| pu>sMN/^#zmm &i7쪪bee1%X˒qrpVvHfY|c,#eaF8*yA 1e\.V*Q8#Il5KO4qKrŋB}(d2]|W\sLٗ~UfidDQd2צ&zG2hXYYO' 0#0 C"4MC>(xI,;ts^d3>+8+Yd24 \g씇iVT*}Pa0;; $Hs>MӐN GZfVelmm!D&*2>}cC8& x-( VVVv1;;;6a4nLTj&p f`"޾}ۧϚ&DQn:Vejj%c8+>Ap5<Oj5 >rC44 T]y0???1!_ʕ+`/.$ R)nx^8s)r|>gwraF1?(KZ"J{14@.;o}sܾ}a0D)9ׄ_Z:~8 sq`,c^[Cހ>]0pkʨx"f9ZQ"7$Y4b 00 `  =C&E9jҦꫯW_!BUU|G~to /~><`0òrIUU|\$b`qq[XXWǣ? sMDZq-QDQv9___&fffpݾ/_tGw9lV|P֠80.ԋp ;=VF?@ iq$afC"/ Id<< žW;t: w,{*4Mý{`XfcmӧEXڋ^sikLOOcyyxkkkkɟЊ_d^I'sٌW" Ms~Q5 MQ}`myL{=#٣sq~G0k`TΟrt LS1i?_E=O} ^r> X ln. n"8>3͑r)fO>EZ X\\D$<֭[~$** XB> saGGbJ@|;@P(9t:W_p:pmp;p;0M@!==:'1Oj?vO{1LvD nJV|ߵbo`Y_~%޼yׯ=' / Z׿5V+<6Yl6hv; ‹9gϘ+1 5}709( 2 2yd4etG~Ξsk g@A dO~'?-D(.O< ZXXÇ6RV!I,n70䁛NUU_#my?a\E-O?6h|/_\ </ZhVoYjncP |kģY;z ~^k༜?g9& jh!"&PTUih/ îス0F&S{ʐ$ &oCjX~q\w{c|8h΋"oR ~)>=k|?A²  kt.xz -}n$v ,^>IACtwky?;7>̽c޽??ƃtDݼv;z5QnƕE1L$o<(*ۋix.~Ͽ~kXw Hhcu/$Djs5P,d2g95k`Ƙ2O?( xɓ'5~ɓ'{GX컶 YtgAf3VVV** d_A=zQq};iJrw:G?"IzϞ=Ï~#G?"k`a ߜkgη~EvOK|9V(0Dg}u}w~ߺ{{Cy8z jP9h 4A>IN!aY6)/\;lq]|7o"L9BB Ν;C' ͛7t:c Ce~,//jL ?ӧOgA\~^w%IB*?#%r98)ɋ/--ÍFF<>[~sr?L"/͠p9pplY-c'k駟'Od2 `3???T~y]_]]06PՐL&}i }aiak@w(͆gϞٳgO>dh.ƨ׀50PCc ?Cr?5ۇ<籶F6>o@u{mZo~Pm2Ha{n1e0sggGza ߜ///k\9pDVGVGfz ve`1q8`a#;{? [M g@h USNDVϿ|` >)7)o|vD9!.h_|Z}{NN~RV v9˲w?9X'wޡ풍8?sܺu͜1y ~N |8l^Aه'cJEc*FSؒ.AP(gؒPQQQ7y\rS46$Y>Q7Q5'Kz?Oar\gN|;LΪdPŷð0Ӥ(hI~`0j6eGY9i˿zǭf3N;{r'%au3L>eaDDPjh!T=:4J*TF <>/zDLtnxӱ tp(15 xz`\4MCRERZ27nyK?B 1*E$<| * D&T6ٙX\ , ژt4MWodG=Vo_?GwnC 54ΆzS/=nz(cjXOE|t&=R(XxL(UhJ4MC,4E̶r0o}Fty| DՉJ7ȝx}M{7P&jh)SUr*SQœ}F%`a3. ,: uՇR7 ERO_#!r$2Yx }Wd _=D)7?5J7ʵ:nl&npP6ϦP.,@4ȝ6CU~MrPٱVL@>\LS;=TUECލ4e7OH(.3GϰNBY4'Rxb;w.ڟކ|5‘?,}R.6t:PwzN߱z6m*?`yn,ˠjU%neY,Ţ#xtw^DžerP.S;=0g. :^U\|n>ub1񝛴BZvLѕoh.g1pUz r}^|T9p fvmJP(ǀc Uz rϿ~ ep MU5 ߼7p 2YPCc JBP 9a=qTR(BB=E4E4RA07HP( 54( |PUEߵ{dzX rЛ?qnTQ(=U[)yn̑_CP&Mޏp-zRq6]BBB۫S^v=d?a C%`_ӝE 0 xðLv:T %P-5h<`AF FXN@ ۆ7]F'·R#gy Bn}w`Y·GULnt;M#Բ0sge9T2_`]')\C ҮBiWݎÝh24DܶMljj @0 ,&QxMIj9P7?+>h<plIvż󽈒3&_*O9; =(ǃ7PMT7}اo ͟⾂<з|#CLT3d1 ?vSvB#ayo1`X|E_A`yQN~ &7Ho3(g+oTޞ8#,314M~HJoOlD6Ko U~(xmG2حVrtG~jyN r,pRthΒTFG*d25yfg щЍ`tY'JB0000hWu`,`9#WrnGyZo6354 ]#*0 Qo@Rtr h]4K+@n SW-(l|vcY,ῇUndl37 t_44YؼWa(lsO滠Z-f5rPC㌰M0hIqPl(xwJoS U~lDn{iߜ} Lf=%󽍺<.'"V665+=Q:]4vDGP=uȫ ]oq )1H C}F/:~dm;‘\,q}^l$t.dC*rHx\(@MS&BQŷ VvMӐ  ewjY^[C [KWd eHw<3h.8Cf|(V;JBlܶY-*}` 8ûxOdh|tF5͊~4DŽBēi5SJ3v(h2TUEBiqu;LF#4M(Iplg3p 22TUF*CGAwg0öJc!;(I:W@}Y[ePX-fLyȽmyOih%'`Zqkyt(ٯ.ރa?S(ʢ-e όxD jh1s񰵤6ҹ<]ϣJB rG`>"F\T#ɢP sZ`'P*2B@tUo~?MM2{wá fkPUut}GP.|}t:πʹ_Z_a =kby>o;J=BP&x侲,CS54΁ []ϡJBWqdn,΃ٮ z;,HasXޝq_vf`HXHH$%QdK(eŲĶTwoޭ[TwԳk?JlYmɲvYHJ$.;@l 03h$8*_-W]%E@`R[T23I ˊ 3afwd}Aqj;0Ƌy>/{h*eY1ξ~d[LP+g^E/! cht.z+4A Gȴ?Ka$[Ogo?f Sdm6axlZ y<σank%cj%/82Bf(fM94o)<$c:KԷv OM>rhDcBMߴbq4wK\aqBw*wgdR ]f`)t'ǖmE >ܭ%ɦoq7Z|*2dѤty0#TGӪ~a[o L?24ξxfQr&(X' 袷XV3H-ejc"FKy:e2..&P KpÎ;nxGπfaȬbNDPbM";>i7F.t,3a˶0͍Nl8N"J[O/5FX2 tVWLq<^0]5. nO- d c F.njÔ? W(YdiflV :;pNthJ 14:x<ƶNu"C\* p)bS+$%T9iOb2btR6 "),6Wy)fk;f63C>F7*zfD"ѽ{2 _!VI^NGí+p55pvdI:SUz/aaYNՀ:֭Y\+&y\og'Q ; 04:mǗtpUlΝLLNzFN'm5W܆̿o)vW0cbr MDeqd*YޡD#<>\nh/n20Eo 5ut>,AX$=4 9ʤ^{*hWer{z9k]Q5Rv‘.^k7A򱣨@}w"Dl h/;7T==7-;u$ zxImB.kh-:pOPwX t2'Y3h}d 3>2)쮄袷Fb8.^O"gKP1H5/G7f*LrQz׊ hI*uV)qj2F.7"I6fT A#0d25"+'.!2쩀^Kk>2ƶ0^LS%?@SGlpɢ C$%Z-./^5M/us*wSoB`ȆNAMyt Fq{p^d2ͨ@I%h.0 vmG~NV #R(H^nu$U<ٖ"pa"(:dDz(ٖg Y]$ncr֨Zݴja5`bֆ,ˢm`Y*ƸBnsܺ<,dÖeEBkؘˍLD2XzD#8ƜhhkzZa2x%NC8]m%JlyԣFb8:{q{ОWQU4]jIJ%ﮠ!tjӭ̙F*.֐ #G a&w at rQРc-]*4 *9΂BN u Ig `d܉!\o aP]Ed]QNZ:{0I:0 2 zlFD7&`(aG ~'PUVl3mfB;12i003`4l4@QSKHBp{'0tcA$Z%T3(HGc.7;\,Ia6`4dC!W\OBR,Oaˍɤ)RMkՂbe4nI,aV)V*!J $8CZXh p@pwJۋ ;J4 vo9=I8J  G>BA,G4G$E D >Ք۠t睜#m[l4z9Yeg0tG!B!dսCcNB!UG!B!dQA!BYuhB!BV%B!U'IuBH Gt`ht ãNN"  :4B6NA)Cޗ(jAՌl ȝDBY6tQDH4jjG)vmDBn&D12}t nB"hC#dX{:-r,fZ-ЪUܣ8_wMyDn4*T{a6RYmطY;p]=ő1 (-FŎ,*^ oo|t>ǘC KJv'dP8)?N 9 |.DZ8_c#%K܊~@r[eÁd)2טˍٍ1WD;FK~oQ1=ec{1lyi))>2[]=hA^7D'Ӕy|prL}R(!* O/lUD1B.÷~ JS%?6$gY{ʶ,.oQy#V3^JQt[[0ƏktXţ ( $dpz_~2GCR%Àc/F1ٵO<L)P8^>p$"8>Iat[O(?sڇsf~)lIad%^E<΋x<FQ[o!)Me7E0!]xᛜB&Tȡi-7{+v"lJuHoʏ~-IOڏ}鬃p$?7 +)}Z5M5]+q q'<>DfMu$L#˰}[{_# x(HJ4٥Fw 8_yRZ}/mhį?T6j#"ӠOu([#)x_.OeFLt}WRkohgRJȱ=T):ǟ of}8R;QmYhͭxWoaH$c8T]6X<ۨkPҒJ4:Mk?qs{+vۿ%kWT'*^C MoIB1 eSs/ ISUQ-?őm9hn_IJ(RLy7ZfE&7#Z~ހXPdMatT LZ׾h ڜ5ůŅQVGNfݰJ`|`n=Bb֤O867ǘǯzGߦp%f_ ˤ,|||*~GⱞuZdpKj "G> 7_F}K߆JHqtW040oh濿}Qܿo \|SSoϏ_&w n6XrTʄPxy&0־@C -a_Îm lj~vFݳs;IqT[;ԝFB~/$C"(XE`o UO2eb!_4)r7Cefw?Dπ=Qm.dH%3d7>l7%G! -7қy#=Y[_>}0o|xvWѧ3jv j)KhR涳Ncm=~r9 |G5Pbp$BI-mN~Xx&3Qd6#+#Q0k3Hjkjk2VyUp̨k)hkD#]fD/=kU 3luh !N;FZ(HN<^28yh_#"me <,Ci(/-NqDtrJ|s MY[Kh&qՎRq(!<8p꾯2{vJS$eEP)-jJqD%)D==)nUC."hR{AkR !$qq[(HnD1B.îGDI'2%q} m赜"c!dA; =FQ"3ʶC*}3!fȷvݦ:ggC&0BH]ooj*lnhHOq׬̚Bv|T Q+AǨЖH!̒iV#ZD#0~:bX O%J(clF{,Fa3ũFp;(V3ҨTffdz;(|n8L{glݨfhN S *%,D)_w* @x.k9 5 IDATAa\?8ˆ/\k<&T*u#).\\37 f!tw,hkײݨFGPv(H1[|c1S#H)M8|-F{ކ2c$R (t8yxW~?#)L;IT8e8{?J_߾}&ͯ {3,89~ e`!\{ x4  nD#A'oAⷑXc..c?3Ϛʅo<ƣȔ6_Q; ,.nT;#\FQomPflC`]ϡsi\Ls\Q|𯰭1P/rZ>cdxu8amD.v #o(ڟAhjRqcWeB_\m% G"I5*O&i^D75^ƠkkﴝH{i@(Ia$%)0;s6SۆF;Ӯy̿ ]>=>:}o!Kc8:Peb{M !3DC/!MR A0\3 N"ΜcS}۲`MӅ?g.c1%K(8H4J7 HLqTd#vV|"m,@[vv#gPB-q #3 )ybQ?8FvlV-㾊h &Y)&}F@Vm+IY27'r(h%<ί*v0B%d8\zX^jYx+| #EBȊ z]!#Vˆҏ;B0"B%dːJ$xG|q} \ Z rq)= /px-),␢+r0[.w;Y;T@z޵?/\ nУ0r~0R RL/ĥӨ~ fB^ŷy ?Dc1`9!A(/8&0׍  :bC*gY_qd[L)Erz2<a:*>j2O^xBZY67K!SVe^5HOz׎TBȚDlI'`[ rLO A\-4!БxaȤ_@!sD"x^nLMMC{QуێQViM0 wӏ ̰Ć,B*BP@RARAH<##?hx<>C+7x?W !H`4a0`0`4a4SW:讂l nCN(=(sއaL&磨eee(!dhjjBGG<ϊ_P2<=Q?>  QYY;vBoo/ߏQ1H~3W4C4gj?b`B6J4Ȇt:؈; A0>>qܸq2 ;wDUU@4~?^FuP(6A.cΝ8prss5V466O\߷V~7VEee%*++&^QA61={K)R,x<4ܽRHhllDff&N8J0S!$B!\t D"+~0P(b*HqA,xܺukEI\|/_lQUUE( Ç~[n-`0 V FBR R LD4099 +g"FyH].^u\xOFiiN" .{a^FR \kA$\. qP*bAy^ldz߫ƣ> χ .~>!7$3 2 ǁeY1 ÈF%## -;w||o>Ss19s;vO> $N( Ƶk.8aT~۷oGgg'jkk Bldgg[H>+W͛j9#J|p8 8p> I\z׮]C(fL&˲ػw/z{{rƛo~B|0/`Yb;F144W{#GPSS^\rEVV`ttP,bÁ!B!l߾ǎoP[[ #, ǃN}}}Bb@*4s-dd2l6,aٳ8vocxxW\Ass3\(JD"addpߗ8b1A,r=zhhhaX&cddd5jwV( i/ ⭷JJ2AAA ,XV\.v;pqO CCCs_'OT?X&:nsf3f3^/qF;v GѣGڊŋhoo_#"''G0t:zQSSN]zFHQAҚ qFҹ%硆B! cddDv{>Hzn4R`@8qD__~?_)dffb & b$dĵk5n(**BV[<ǭ[cak׮Eo#G%g{lFII F#FFFt://tp\Ν;a68!)֒x^q 1(1Mk9YEUU4͢bGטq'sJal6chhN<ϣhoo˗166Nw ˑ<1!dYeeebp:s( i "&''Dobv!Hs |>J8e*##ͅ@oo/`0 JaF'$I@"RfǛA\cc#%9T%oAj]tp8  V+hD,D"0LbmlB]U(0Ya`X+\q171Yyd0b1dgg#??^w+FIk 5~ (//JBII 6bH$8p<Vh4Z|Byy9 (FGG<~CP@?q?~<^+W`hh6mގ<ϣ'JNI' <ϣ l(..hD8͛7q***pqϟn`={y'NhDNNFGG_zG3<ǃK.۳o}>Z[[-HGG"I(//`@NNv;v;4 t: PYYYLg<<<,FQvҥKسg=#G}_L!##. oߞs-!z&###ضmrss166??NN;wh4 .n@aa!L&SRb}]={ӧq)֢ʂdiwkX,&l6磨8uN'z{{qa8px8<|>f3ۗ ED`0cǎƍ|2]&~]]]t!i ֖y}}}0&MMMV-~AAAϐJ(,,D~~>ZZZP__;vѣ8u}]8NLNNe~&m0I~?ZZZV/[;xx!;;&IalƠVsNapp.\ŋw^nb1HRx^ /ێF;+p8``4с~Z|N4™3g0::*n򗟟4DZm6y#EEE(((puܹǎo( imB"̫.`0 ^Tq@ Nb ;;:nٍS䈻¾` Bu! L!JP kKTA&e%̬_p:J*\q\}}=˲D"='h4KrV4۷,<i J8GGGGׇR*aTsc,N( imnP(yAAՂtPP(. +(zŲ^`pיfh4t mT*EUUr|M8;W$ t:4   J%r aB!-b.RbDNٹ泧XqZ-$ Νqܢ%bF\ӒH,yZx<&a%$-ӔՊ~ /8IeR) r98qH$b<8b(| IDAT(,,DFFFf}n)W*Coo8-j!X n{ޢd`NlZ7 \m۶jeUmgu:82<<`x>OLX\.,R T  q_Fb57YVEaa!F#jkkԙE %dCITJ())f VԛFF畜] ,h4"77a^ t $NݻwalllmN*Z, =W*6 Y2)y``pɤl5t:#33 i ј-1^cccp\6̲,222č۹BB7 Sx$%0Rѝy~^^0BIVU08uTC$yXWyN'?oY\.444m^LسgC=$h544$JA0<#|+.H!-;;_k%cxDVV, ZȢKx'u:~ikIӰ&&&p5f@x<y߁u)O>d "#dqh P*8wX%a||P(0L0wŝp[$$0wW_}?k!ܫ"|[o1ozj,nnL&h`.VyLNNNNN.ZveY8qǏ䧴=~_̛.t:fdff.qjycccp:V*((s=Y J4Ȇrq׿5> Be222P( >yFz |>߲RI$:t'Ns=Ç/}>!l|; peDyωD"ס) hZ(Jrq!La,DDR)((#>===ȀRQ`0 Ϸd]pe abp8h4z;r 8qB=d2ߏ\ZAIkN駟~EM0 $ AEEE8|0o~Oшo~hhh'|2oDBb:(鑕DI\[f_ciP`0`0=<qyy I'h 8۷555u`Wx<.&^ݻwc߾}t~_RR3g_*>yUN!$hZ| _'Ѐ[nahhhE s7aё@UUvޝT!k-0 TUU .]Zp}6ńad2TTTk( qؽ{7v ׋6tww޷+W 776>l6l;v Oʺ'T*qa>|AOOz|ٳg{gr<yyyD"AMM 144tvv.t͛7Q^^~uJ(,,DUUʨD:P( ^ǡCp!}>5fģr;w0Faa! ٘\), ͆G'''t:x0 1QJ"@P@@B`0o) go~BR*//O eYf?~|~#G?Bss3b^z%?8ivԠD⚆b|778q:;;o2 ){ݻN?z G\:NMרLLL`bbnxŒIRT*t:hdl2dShZܾ}8|pJc/~4B*XV\|կzGT*QRRe+p`ǎ0}Nuvڅ;v>`Y@yH2 hxgr!33g㣨ٚ-)4H !!.UA:(/W﵋pE׆ǵEHG,ԄH !mwM/ly^vfv晇s)TswHHX߾}n+ћfV+GjG 1ɱsj {Rŋym@ SOn:w^׹ym۶JϚWThEPR\[ݤ$:wLdd$5ѣG&22TRSSQՄb8Dž8 ;ԩS?0:tp 6?___zV^vODbbg͚#<ⶽngeժUnf͚[ov&11#5BhEƚ5kXbE\Y>CP0vXz= >,nv/^v~X|||܎}|Ggq;_'55ՕpkgOs.]JffG"TzOE-Jp{T*$I*7?)4*yEEX'x۷W9@Az!4 |}믿, 6j?|8gfȑ] <}ǖ-[x)((jűX,v\ڊB#** ¦Mr.޲eK6n܈B(19--w=Eezz233;xW]SO*((( G !4 qz;pw0aL&SO <3h4:tEw1uTW:hbb"SN+8W׉fе97l6ӢE ׵*111$$$,nNMMؒׯktgOOQRRRӧvbƌ̘1un$&& \Z>n8OF:WZOѣGzzZnS\hVO@PBh .*4JkU-J:QQQL25O -Zpop8رc|󍫱СC|7ر{X,uFJJ[x']۫ L;i۶-/_&&&ı1699HOm[^O֭[۷/'N/`̙n5 0UVвe<0{zc4 Oܹ{(U>Kȑ#y嗙3gNQ Ji=| |s6bX0 :DƎ˼>))!C8>~xƏvC?f&O\iʌ3\bkѢE| :Lhh(==/+י>uwйsg<68qzzǙ%GC`ifs [KZf,[̭sbÇ3v -u6t{zV+f]_޸q_ͥgϞ%zya!z4 -J&M#!!~ ;(5k֌UVѿ7n\՝%^l6V+}kњQQ}F#dʕL8ѭh==AY AûvrK*U(EEEO?#gϞJO GCpOUFO@PGCPTpсpe`vCѣ}ƍc֭՞^ !4;v,O>$/_׷߫==Ae=&Ν;޽۱;>>,_3|o#%iSe#ҧM !4M$47o?s=Gbb7(cDzi&>%h!hvڵM(ڵcL<$o#.(^ϐ!CXbMj!4Cƀ;w.7|36G !4*BO Bh=-m8'Ncy@ BƎ͛E#؅+B^8q"6&X|Mj!4 4> y'm@ hQ1"}JBCШ$%%5ॡRXd ;v9 #FŌ3-[mSZC Aȑ#lmSȪUxm@ hQ1"}JBCШi*iSEiѢ+V'_9 "FSƎFΝ;޽ͨs:uW_}ń HII9!F3f [nSFΝ;[mW1bgfԨQl߾rȼeÖ_9~4BItdzwgܨa #AC4ko2qT*E3n7a^{t:nF~gRt0b:jCH{16ck#a8s4C]XHɓDD Mѣz뭬_FS}|^I'^&ɬW$+jyo.q[cl1(Ɓ7͝c Y[nvBkdۇ,W6YQ(n?^\ !4ή-[-1Ʉ,Щ6l`ϦMF:E޽QRnL2)SpB"bf1yDt7  +7dgsbܨ^|#@P1ͨj/0d$vV/_dYƔC–]?_>= uѣyGtWD֑aGe$IB% mF.$o%v}č5 W)hX ߗ_1\ -Iٽy3?}9Ϝu$I|ddd0}ts[~Iv0|U|ӡ@vK6!`XP*U6qL £Cq+ ,|FZw`IذۯPgt: }-lyv5E6_.k?4 .ZD b"薟C?p V,^K'Z@XTTjm޹Eнp~aBS8KHHWSJ+E1h&?7Mk٧l~<8l`wA Dꔠs!{U `ۊ 4fkA^^>?._ʵسP*Uwf̘IDD_Vh| 劌pw?ǾkhI9v8zɄJ$$$aʤ L&vI&Ӎ(JI%Eذrᕘ}ٌdFQmX'l*OdehMD9Gy?EӥK!2>}Aұ_U p!b9q'SRȾxɄϏЈb:u5kJ/Kd8?pq2n~!do[sV9|PҀJG Yo!a[2?vTnbi)$ۉr XU$nFR* jN\hQhm ~>:WGg @R0ER#T6lv~+EaoY:9!cc2Ǝ}\wT7S6 V\F]Y4 :W˫* u{saZ>')̾͜RA. ֝Q{5&zng{BW %,,RS^tߟNV@/*zvX, 9? OsU8ŷys |9[z}4:[eh˹߰h}netdvo?$"2m FR"??K\׵3|imw */cn]1$ƧD|Q(0b82 fL׸6 4Y4Z-"iӑz׬Xʿv@e -& w=k IDAT]w;oRY?E@ p̹4Fz/h}LWpP(ՄDQRtjBAP4%MAg0fS>ĈSOymlj͸n fr) erxdS1έUGBG:^`u6x9oM T*1JATU:tR]al|8lKOǦ(Q)+ 2* Eϙ.ygƉzêsN] NH %2*pv0g2l6o04J2VU8=WXm  8z[vҗ='≧ΝOyߐcpW}͞wk>!4Ķe}.+&J5O&+-ͫ~۹'WCu7hN T̺_6cCV"sAat֋ߎPfU8$29q\YcJP7!=pkaa"4PEQ^kwoU?q FWo V8r4W}'*3ZF\) f}^ZHޥU]㼮6D$I(2˗ /Bh/֚(/4*/'{ob]̽ΜJEA(24RS %JVDFe˼ I+_bc^"^h<5mS쁔.pa-eV^2ٶq1(,,AŴhJvRt0y7O%&2;N$ZzEdpHkHxgFzAhxxbUi"#eRSzqubE_Q#||Q!tN TLP@s/勍+e]{`ߟ,]5=46"2;_eNH΃^zygFzAJesK.֭ I}ap8}M׾#۳_⾻oS 9s.!c&aoU2fVAxrg15jGD^e5u*{Q7 uX|,ȐJ{EjJ 4. Sx9Z"?p*rr1 Q=xKx̧ΙkEFtNylg>kdswҡ:ko~#ZPjT/~mq$ 4j%-iO.QtjU4n}Hxշ0vng٦DZ/1 |sz@ CnGpH>w~3ד;7/U( #2ta|Zs{oEv7T[dr$ &T!4^a_rرʉ>,KQ;=SAٳTe2_8|Ѽ6BWD>]P#z"2>]Iq+6硧yJ눖?@P>[CGRQDy,֘W'"2c4ֆ_U;^ds`ז zaiWV/'܅)T>< ֮ec{6t$<"2$#N$h1:ѣys GDxvDllB)W$q!# sK^EIj!) Z}@+_,׸rJiQ$u!#qŹeGW-uGDFi{_K)M۳] %ar IQKjgH9urUI\;R"42RIhTi߯XUQZt"͚yg&sڮ|ǵgGKذ)H>Q Gq{Zٹ{9op(:٥j֨q-ȔcNz$:t?s`2j4œkEd(}胯gV٥*+2[z> QTi!z4uJ^Ny99Wh_q1-sGlhSfЯL/ZVMjoyvWgDJA{hԞ^tѱo_RN TLNNk-#OƕkpZURFcbȐd¼ ˡ* .^cX8v>4-DN),,,W$T_Xތ))4-DN'Ἦuk=zIpl?u U*̥78q ~3FVs5 碭V䔳8zx͟s]ۅT(Z{vl_M TVQƌT%=?zGcn1K ;$I"qkB߂;z\dHb"iswWcȸhrpNq BhF_@@JRvXTZ) vc}Q?LX6!mxlQ?Wy~Z[I՜WrDH$Uy,u*#=% CpZn@ !AQh`\q߮Q2:% O V՞]oat '**~;"y_u='`B⇕R/{DFy^gGb\$mzgH9ѝ:cG IҦ! ˗ "6]ҥK힉7utf-\WT8uJP9.Y 3Hr͘+W5lCF7e.]8SXͥZ+E.f/:n]:1~xILLS? (,vXEl9U@۶mO{?@pTTnznkz` .=FfJ9ٴz,&%fL;=C$>cڵk-܂lGӧ馛?Zfs^/W2&SGI\m ˰ j$R&˗~KO߇D 6 IҫҶ FTJSF9@f%u&2.[,ܟ7QՌ9N$hu׿Ŏ;P(N3gF`\x4:GDj,rYVl_tܙ6mx՗]:cyd+dgeT(mus's! 1- a% w~*U6SǏҒaF:#>:VJAA>2/pQR@R62hz/!4ؘ7ow}7wy'/F!J m.\M7SO=ĶIQz+$I=@ح&ڄ}(<@ՆbpFBV*| ZjO#~~~=AWB`L0I&?Ĩ@ < >ロgyk>QBykv ވBb6a2 Ia64l=+= ']23v`ƕ u/99g9vVB ݁Pj FALK_ hnV]˲?ٳYvm]L{ya tXPH`EICCUCX;v䮻bǯ';rQT 2Lv*\R>V6zݻ3sL%o2Bhg}]2yFM^~>$&bhk4 fdk6c*,@VsM7q 70hm4_ȸͦ}.au,"""۷oeo0⦡kˢŋ`Q2}K:-z уAĘ1cxbڳ,[ٶ-# (Jr/gq/߅T@lTѶm [&== )((j"2ZVKPPk׎:TJ4T*.]ʸqx衇/ꕯB~~>cƌa^{;3#'τaGTbi(dzE>}PlX_>$mhզah$IrEΝw][C߿?t.^HAAl64 JHPPoߞRm>>IVe3j(㉌3C9_ed=w9. ֭㡇bϞ=y]JJ ֭^N mڴgϞǻgdd駟b|66kNBA)(|VGΞ%jfpy._ FW_}=C||<111֞``ܸq <+1nݺ1wZ}V}sZmGheg1 dL IDATG-h^~cX?~=JFFZ-ZGlll[qΝˎ;ӧOmozFyp`X:Wf/z1p8xbu:h4ŘDR2g,YRϯ.$['$I/{ 4 ; O9w\wNG>}<" Z-3f^`ƍ57!IRXy/ 8Zwo$4Xs!֭[W/x9s搔DΝmN#(()SsN1 #u41c ,hySL!##{۔&Kpp0ׯ?>9A_fÆ \^msF3<Ü9smJeРAL<)S;1@PY{シ[ z^t#""ukpBo#4(>,Yš5k9^Gaƍ>|ۦ[f͚Evv6}M4 w}6ƌ?^ߠr{#111$$$s?{A_{~zBBBmWtL67|ۦ[J%?sa6GBCP+l޼~Q曼+%-qqq,_~իW{^?/vZZjmsO>$k׮6ܹs8q"9996GЀBCq{/ĺemۖ/ۦ4yuƲe˘:u*7o9Adݺu<\8oSo<裢Wn6 ĴiӼm#J}nm6vE~~>ٳksssٲe ?#JF^O>kΓ`(*. O>~߿e&UVqi^2Y U Xf 'OرcL0;#RtUڴiC||<6^p6l@nn. 1ٳg0ͮzItt"`L0iӦP(hѢEJ"**(㹹\s=T Bh*S\\pJE׮]KL@@@۷w;ŢECVʝw٠?8p:t(-[صkDGGӻw m0h߾V+۷o?gΜ9T*K}fǎرJ"2dׯp4ߡiQPPU8ut҅>}w_|juٓݻס).rrr{˲L˖-iٲq͛Yd *o2>>n:XZHKKcʕdddu]z- ?XlΝCѭ[eYftڕN:Ux_IÁnpP(\!4DZtczF#$ap8(JfGMl6Æ 㮻_k׮o00 .9+ QN18gN@u$ ___Z^Wql߾:t<шNC$v{ݛL&~'N>Z뮣cǎ5B׻UF)4֬YCrr2]w]ia̜9oS\gvl6[ŏ BhXz5{%..Σ; F#@cHHH@VXd8 PokS%`@VheYq8X,JsâzB5 rZj1qQB rc,oݺm۶O֭=rϢcQ"8EGcȑ#R銿$aXJ_=+sandYՃQ^n CA! 2B ,[ˢ1 lP# ?gp;:^T Oc,dzdYFѸ,K !2F|QsotRܹ3,cZkTLEc,۷AP(JaUm).̚Z{(W5 JmZ6m<+WdՊÍїk!pr9$InfsSU+ gnc}||\cqgL𤋮Op80 b6]sLs\ӷ5sv翫BcaCLL+ERVP(\jbX*Oi ))Nl6JZv 8vaeV1"2*ׄN5 (W./'OvQYh!p;cZ]UZ`q^kZ]3H@+m%ep0JuG1NZvZrPn,+NL*:Rt,˴iӆ*X՗wP(\(j^NۨnE1TAՒ \!f͏NRe%<4 ZZCc]qv;,q6@:٠p8\$p/_Jj5C vl*v߬s:wP*[$I(922Te?4qiSřaj-/ oh4ryZ93Z JBb4Q*(j6ʲnw g(,,t PVJ]qM5Fꮢ^-h!pf+3`6K\4 ^Yxp./h\UT`Z].>ŭӷEBhip8ʍ4 Srrr@4ٖ+S9v .|+dAQT0Nqʉ 8:EAw9;Y,^Ϥ:ؿQAxc2+mW]z-厾JIScĒow`0g/tcF:WKel6Ʋs,)vIV|[48w\ 2\+ 8gG܊7;bų+6FƲ.A첬cWOr<^e}nFۓ]CwH:>/۷RmLֵM'P(ڼY;{=dR,aVs+@ʳQ@@Ӹxv5,{&eYM&W9a+4 P* v]{v!l҂=D"XV nnAw a|s+ 4w௬V+!a2 y?V{m4 4:9V.F[p5Lh4}y{   ht0Ǎa;GH$ ƫFn(= ڈ?`9 p^l6Ch4|Ė qy sDOAkloPUUD"$ J%dm%(ر \ŢP(XxGg4L&jjj*ހn8?m+v9b{4X`\#+ R j9Ixl^GPPD"f3WuH$Bpp0 l+3 8D`&#C(p`0@rl'88qksl28 N>k6V,;*i\Ǖh4&琽9֟;țhd2dddሎFtt4bbbt1Ӥs`eWScAAAlƞ6sf4QWW`bHR( *^ h2xS\hDEE!::4vxߡ?H(7oDff&bbbRU2 24+a7AAA5۽A#ߡbAhh(͛bdff8uw|7Hc6JJMeAAAP*(++kg߾D!(DEE!,,k/Z øeQ_c. /8r._ P(ɓ1@ nݺ8z([x$%%^ߙˎA\.ǬYP[[ӧO8s 0c dM&Ӧj5"""C p=A@:6^BB.\GB.#&&K\t҅k4z|fGH(b())q5L&Ctt4RRRxжd^eGf x} r[ۣ֕o@sl,{d̘1'O@8cƱVE`` OkkKȍ vHk_OHGƍ(,,lZF~~>N=4˄ECCT*yFXVt:Ʊ,N l\LL2VohtrZuuu lv{צdO'6&"Ů|ZԣAbAmm-[[eWWcEEE3q:X!,,՟K|{ i>7q>=&ZVddd`BZ*hK]vܛDFFz>=C.{H@cwnV.au4箽V~9wwioR eeew)a?"!!<@LhMS^^xhM0 Ν;>}xTcjf[]]6mڄ^z >:9hh4nudRعs'V\ye5N :Rrsx/l6СCի_6/4!!!0LGmm-p+VV}v[7wmkN~~>233j*BM@sjC"pKcǽ4o߾T~"88V*娯Gyy9VksK]CCv܉sn=_;ZC,bjaXKܼy骅 IDATx7bl:]P X,^FbS /|w]n߾G_fn>шzD"({^bɓx7^~(\]d2J%L&z=jjj`ZmjirYvkչSհX, P(@ b*..kq#D' ^V PB2Hhwh6ٌUVy} Fe% r9r9o1ػw/Fcz8Ne vWh4jP(N9gΜA}}=}]#r<6BZ(:];vBJJ FEF''xRܦz 2p*ш={ǂ ^~oH$`-FP d{b ++ Xz__[&\ vkS ޽GRVBUP]] PR T HEUU~X'* h4rC~Ʊke2o> 8O<'vR DX @2Թ޽{ /u/1PIRP(,VfdZvs=8wj[V.1& ?3绫>MRC ~M quO?{WcsmK$DFFrȨjb( AIc<Պk׮ҥKxwr7kWX,F``  BZS㺺:ܹSOr9b1Tc\KՊ .͛x]cB}\crhZncT4V\z.]ªUnS(D"ZvP(P(0=7kkk?bҥѣGr9,YM6aذa#A((..oݪ 0!UG @ss5)0 ?P,Oq5,U&q[~1}Qlٲ'N Q h6Hu<& Gbb"^yt AϞ=û krc0 <^?~d^z%7|S޽{wHyyy-~-{.M&R+WRA:={ʕ+ع]tqٓrXj1rH̛7#eK,O?ĭNb{ \]> F7܋dDZcǠV[ZsYWW-[`̙x衇O*g^Ǯ]py 0I!|W^ŕ+Wo} 0l0 6 iiiHOOdBrr2T*Uc+7n̙3xWv鿦L2SLAnn.:V$&&6yѫǞ={0o<- (++Czz:_a0 t:`6Lz)%%ƍ֭[Jfll6@?OQd!xbfݻ`}me]AA222:Mp2[E~:t:7,XR!((Jٳ'~{ė3cƌhѣG ɄpT*DFFrCءR.6$$%%ͷHOOGNNF#V2\@SQQQ={vt:txb[ҥK8y$4 f3␐rYц޽< 换HYf8w;wF[044[̙3 [o.BᴹN͛7QYY FB̛7 N( .I$L6 ӦM`˖ƍvh41czPL>M&_ dӿnNasaFM&V\Ix;Rԩ5L(,,DEEPZZ|ݻwR);.\iH@H$B||<]AA!%b17^@@zE1B:E!Bq; 4!>aڴi.I;v !!!t p5s|A;v mݻC&!!!~a{!BPAHql4rJ$&&"00 ^-[ 68-׹~z̛7s";;}ñcPRR[>Î;@!rgqF$''C*"11<( n\֮]aÆA"`ٲePzrW`0joEAA^{5,^ϟo9s@Vѣ} ضmObӦMҔ7nys^ƍ=z ,, ={RD~~~ ! <sJ%RRR֭[ݷsNT*7/11]v6l%KwށJBTTt:.\c 5c^4/NNVw&{[<T* v9;( n\ΝYf5z7n*++zhٲeرc4 [caҥN[t)֯_l0 Ç;=_Gff&6oތGyaaa/?!ݻ?M8wb2 2Sq`@vv6Э[7DGGT@xYq z_h.\ߣe5cƌAll,oߎb9ro޽{lDݺuüy x<^voCn0i$>}xܾî]p!̜93gę3g<~ ]׳@k^Þ={<~ ķtYNIԩSq_rz?UVA*6X XYYYNf,^j?Coɒ%Xz5?Y/vʻ_Vo̙3T|=]tBZ]u˗}vG};_iiiغuk]v,::Pb( wT.U[K.۷{nXVz@$y>[n8q"222c589/ ?CCCw~>'MHNMMEQQz 6J#::EEE~[x%H0Kz=c-_k֬aX'`֬Y*6Qh;*a0 f3f3z=b/qqq16m;C?;;הr۷e"Q/]gn7ؽ{7vAs}vL4 2 {Ŏ;?{>3իK/wސJxGn֟J/pGnoܸ+Vڵk^^_ƱՇ~ ķX,wBMWr9+`[A" ::%jWXr% n:L4Eimޠ/_|[F: 4@b7h4z$Z^Wvw#@"sw# TH\N.n/Mfz¾ Eg  ɋ%!:-HTf_2f3lB<`R1hx}CA74LBHgXGPkRb0Vo1!ķh 4 4X]U[ŒB|]U:BOm džA祒B:z= / bI!.Јk♝[X ePw>@#GvuDxhK(ByUKBuu*׭hB`$ЪS^U.VCr+'fNäQgݶq;VŒ>UxnVTz4_VZQMIϡ@ D@MCVFaIvp rp-TC7@."V=1j {>P/mc>vmHF|oF f;=w`vk2z[+rv}=Ƴ˼S0-UTN͋}ݢ v!8SQ]z7 4@ ztŅk܂[^ 4&6s.q8my+(Ğ'o8vUu.^^Y'@;.;wTܛw`u lH1^.ow#b_(z= K,A.!:2zBˤ\]͛ Q*XFs4$);׼VA#l@KFv?y>h2q=ȼx %j+wݺ" f_ c5&h2m?Ͽ¡_p , uVgSZղiH*vIь.^Ds+Pk+(΃G_&]k7oq'(Q%.Ɲrc((*ݗܫ?zgbѬH>_{Y:y\π3.z:|}bHd/cEe.]ZY((X,X,@ _S_m?ޱ[, rr}Y.a%,//".zr2yM?N 0q0"XG z=f(d"0 ]c1v` N'mu'O}a\ο۳}7 قOTf6jjt EЅ~v>]^.Q02\ ȼt fNsjƒ8UpL ʑy (h>'!M* ?#0{$;=~m7oA7%ohxsƉ :46'nk sh<t߭uj r"j.Ks hkZKWw6^ٱא/–=7ı|oscîSR'k|{qr k7|zjsg_¼S1ëVFR}'21#e\Ǡ;Mh q1:791wwTnq8b(7uL?ۓdԨi!`aXQr ݫ]$Nk}_Lgh$N{'pM8 1k?6db|fgz>=hOS^4v}xbmخWPȻ=fȠ{ zceu*wIzk k lC 8UGOu9f^KD\`E +ȴ :}^:& {Lj0gZ8-%p$&-z]kӪZlPGRLO+0}}_bO1pדjȼtwH^,MP g7&chí?ácy z=f i+ܼ cnlhѠ#|P`ácØ$ξ㧚|_6s9ԩ5`3ټO{a߿ĎGpvt&,8K>Ys*3۩lJ[Ӵ|[`4A۠ aמ>'X] r 25Z^}?bP/soRbĠ8wF7b̿9fF-;!>> ._mVH=ƍnoC1mhܼ]ۺʠ@׼و W#L r 1OF8u(<&rμS}][Sw$q[4&n@,3!J``DL1{j"jAxH0͚0<]Ϟ: {"2< F3MfcӦ@& P&‡CtwxBB^V8  TbAJVVzq FSVN 93 HC79ٓDo0R v ܗ?XLu%Š>j6$q\Ǘ7JRdv7veCWW8-&q^^yurIL7 z7ߐD$p>~޲p|+KfsxS#T۾pʌqߩgMiw*/bҹ7A^`4UU? ?MpP̞Ҳ!^֠H!_; 4,.Zp9N.uSÐaN e.߫oXIwW+yΫbo;Ŋ[lյu\8d9k.G38kwZVyu! %DקGws}%&E>ܛէ? l˸(3g\cGNM ^'56dı[tʫQ^U ހQcwJx7d_5J<$-7n.PZn޼ Ojmǝ 8JW$#vy[RZQOm})ך2f$I_{>#i` 6/'{vɵ ȵ۴&\ѢmDZ5Z~M  [kĿO&1C`8^9`2gŴ}R`iX P^ndx'ωNo鳾ܲלx74= ذk/w;>6iH i9H9L—my^A!>.;KO wg$8j$^xc >ıgbR|ݓY\oIDATN.6v<٪o+Պ;pۊ@[eПQ#fDžky(,u1~G8j@;v)-av#K("aPEsIM}ӄk7 xk7oj?v =. ~U1 }fq9e#fNhebh6Ujw%ˆթ5(ero܂+<'._F hX,kbO=:A6گ/gʎGпw/O omǝ G8N>8N$NSB`1XswҖp'3).t@܏b>B$b܅A盶ztب.3t0}Ouu*boG_wdžZ]{Qj.^_[VUAhu:lvyR/%WDIy%fǯW\5u./CAǷ?ěǖ{߯7@[6[OS~Tdl͞:'MDQYJ**umfi5J7x@ mLڍ5 l1lb}{Np RSIO'p =%3RƵy62-5>a&q5a9M55ϓ(΃Gۃf.^ԣCc葇͋+' Hyu<1snr=)p)z{W؛:v$ҲrPeaݼ įTҮKSq(48-8~i*Jnmͦ^רH.kcb?C `#3p_$L;YZOffkP @Eu w!U1o2bU͎mw:yp;} oƛkB 4;.Avl븯yGRypOТq'3xC/ǏiwJ̟> l?1i-i^z{=AR &qXV.-i##kGHųjrtvhCN1 `|VnFח/]{7-GO.ڍ 3-0qp\ʻ=I#;.0g$|G+1f 8>snhZs ċK್s{nhu:uvxhX%; Ţvl@`[c K^zd]C0y-95 ;nGwX3t0nիWrnn,=mRz/ZpoIHIX4 8&؆Z,r wz| 7 x%}IS>hiƕ778m.Rc@ Brz++a g1xo&{ȤRDB+/| ]8[U=)x0q0(EG‚|!|lEGA,AC=lEܧGw<1 I@+EGݕa@9B bU;t0^X4o'M (F1]%(jb%wXl JǛsOZoiN?wvuj iI6q+ QE; 8'q+@2Į>wMk\}.@DhwvoȖ8l9mN$c<&qܞa` cqj;xRUm{oAMmт Z1M_!x~c]7n/n?ߴ{39slAvXH0aC#,7nM[y٩FaރSڭ g>0껝ȾrO(⡉c1#e[ 4;-v K % 'ih~wJ˹bUxv\΋'Tsؼg?oiGib:=Rab1~1.?9z|.{t*Ɨ[cܗ x(hG@b0 |at "xFZl؂rA@̞BA>N `M?ζvQY~ ztǴq1 &5BZGTV6or@ 1#0Mu@W `Ġ=%C@?U\GUMCH'IѷW<$QZȺtĵNDBO*`H9q*o4 SRQk7 PTV UVAg0pBG(B."<41HEУ[W$wVVKfwb&xL*A^=0Oo 2E"ͣ@0-0.!~C$uZۀ;(*+Gqyj h]4BX$L*E\UD8FEk ݻ B! !B!G!B!( B!B!@B!vhB!B܎ B!BQA!Bq; 4!B!nG!B!( B!B!@B!vhB!B܎ B!BQA!Bq; 4j1èlvQ!Bq+ ٘jܸ 7+@1 AOBn! !Bi#hGe84\9U]dr-Qpe Bbb"222] FvvA!ҡPNo@+`45<3Gvw;cJHH!B@4yMXV"@}[_K?AUi `ƫm !Bڂvpg03z b-u0,-D2hoqkť51m4O>XnG}ٳg?O?4 ,@TT߰Zmøbذaݻ;HHHRq!#<{믿˗X~=^~e:u pB߿Պ-[`ѢE{ * HOOǦMs1:u 7npddddϟzO>+͛O6z~Ґ˗/Ν;Fc;w< &NSNaԠYYY0ͨD^^FkO83g믿Ɯ9s=&B!Bin1imTS ܭWB^R#GҥKa@n0blfT~I0 I&ҥKš5k ѵkW+غu+/"d2g/\DXd T*.\pbL0}!H  +6\%dee!55)))ׯΝ;Ǐc l5vO<dB!tbԣю˲q\0d{IM;=Z-}gϞ.!B=h3ńkP|j+X@HxoD'LF~CbB!hB!B܎hB!B܏ B!BQA!Bq; 4!B!nG!B!( B!B!@B!vhB!B܎ B!BQA!Bq; 4!B!nG!B!( B!2oB!Wn&/IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-east-west.svg0000644000175000017500000055112700000000000026016 0ustar00coreycorey00000000000000 image/svg+xml Compute Node A Interface 2 overlay network VM1 VM2 VM3 VM4 VM5 VM6 R1 (E/W) R1 (E/W) R2 (E/W) A B C B   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2)   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-l3ha-bfd-3gw.png0000644000175000017500000035143600000000000026146 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxy|Tϙ5ɾ@* ;[]Rm[koVx[VRwqC7T=B I ٓ263`6zٓsNJ>gQn!B!>Tc]!B!#B!bI!B!vh!B!B!Ba'B!bI!B!vh!B!f bYVN'NۍNCӡRIB!BLphiiؠ """ &&Q.BBq.BѣGr5 IIIL:0%B1m@C!&N>dF#&[rEϭkF@[V_6Wr~` qűxB 4bnN8#G0Lwg/z=jӉljz6**L9qAAA_5~ACfd5|) 礥gsӧ٧Nf36 ٧hh4c4 bb@C!FR[[yyyzj$$$xNxvikkjkk=3@6w\Kػ{Ќ (~ެLތTWUV9l?~FL&~(Bxx8QQQDGG B$BR^^NAA'#FFF }ZN**SVJyc= Y-tZ@1#I 'Wz^F23XmlPL (^*R;,vt:sh4zErrl>h4$$$DlltBӏB1JKK),,ֲ!))ɳrwmv{׎وkVYՙ3Sk[>eݞ`b2.\Cku޴Z-DDD!FFn:mv DhtkDSSϳ !==d^% 4b?~}… v~n*9PTO;+wrr A53nճ[LCl\.H4{*,sÃȎK`V\LIezh855TWWy-!ch!pX,l޼Ndd$K.LK{_䕗1نX3#vmt*]W= >Ni0k.j-%%%466zMHH`֬YM!ĸ$B }qqZ-+WT.gI2HFf ܞJvsj(eSȎg`1|Ϭ]AAA,^p !cltvBat: 29̝|K`AzeB|nQ980;X:zM~c 91c T*m۶Q__ބB% 4bp8PTL<6>O,~Zʾ7k=*$s b_˸GtRt:N={zB!ƊB1L EZ.S ?ˢTO=#;w3`tq2q9p8ڵ"B!ƖB1LW2uPU!@C!IHHhX,jM~y9țe ;āM_5z6mDpp'kooye!O !&hZn7DO1W 6 m|)~侼tQ?oz&zc<j5tBqF !.OIt g~.SBFd53'f3ަUգ5˗e0ܻBѦ!vS^^NII ^gyR6׮Q *uav鯿fƩ>>\Hls+x2QQQ~>K!h@C!NQss3z '55$Pé5v[_AEO3E|>[SIm|gMQQqqqBqF !"͡C())KffUd6mػ -wB nO8w3`Jӽd |z e+Nga0PLƕQq{5|ҹ\.nwy!:%pz .\Fd3:&A2![s}Udm''OwdaZ3c˽1wR"~PT>?kPubimIFVv!v% !8#Bȑ#Rdeew9ZzH_.j)Z奲(Ηene d oexފ*#رcB1H)!D{{gwRRӧO7=7;;=_[pq, 7^tW^CBX8Ot;6oW"¯.ұ9{'fgvn08G_qQ " wrl])N^͓;.0zlN'OҦqɑ~|*]|<o;ggMF455… Bh! p\1{l:ڹh ͟2qᕻǵX5=y'OWx[(f`[oibҩi\N͟74.ܼ|\3g.՝Jx箿d#gd_M*gL嗛>Lxþ{h@䙃yM ''NPRRBFF!uJ!hkkݨ{\ZGUsm}?VNŇIJqBf\<3Gcq88a2_rY6]N.7[23a1q$$O?pPƳw:kς/4L֞E =Ni@A'bH!~TWwuJNN~fܸ}y =O6nrܵK%p1i3aq{ؓ[gw}LKGs_3󑾻Jz\n~GxCSSuuuPF!#AN !vήը+6+c@?~?VE)1um_1HJQHe~}}cEu]Ka~R2(Oq (njpJ}`(I78r~"""8z(B!ƎBC~+OTtzP)o|b+ H^^?s=l)+ l_ y&Q-m@LL ٳg3eB BA^bwoe`jl}~Zx?1^-]y" [1Lz=Cw&22&w-b`p!;p\Ԋ z~B`^WV{%?A?gGx,VVg{{{[B!Fd4jA t@d!U m| ]uXơ1X }uoB> 4e<{RZ{BV*A2} |ޝg̐3mt:B1:$BZ'^<9*D| 9g -*GM쵒 W8gJC |賫*i%(' $B$B=U'<]`q{?)=Jp '+ǧe{@𷭛li.x Dm l25o3g 3'.a.S L?RNH{ך'ӦMp!}h!@Wשn;gAZM1?JBXyT12il^FzwERyMrNp ~`fQ)э-'p + !Y(immB ܽb5 NMߟMK%lʥ90/ %:\v%s&_nys 'pϺ<ye4 ٽb_u%ѱ+˾J,lλ)S9?N_%DqsOgKxf^j~!(&ҏ'j<. ݎBz2RB!Fd4b{Pw$胺?ӱjz6u\uO?#*?y Օ,~w:5, lKyGVs'EOV=ݎBèm3p9=lmfY489㊜9/+rf >JԵɲkm&HFbmw{ѪԔ4yx0@KMGobCaσI CRu7{V &5VV=0믧ݻwYx1jzB!FB0)((3g6lv`V.S vfކum&CШ8]IrDm&@~ye$G:k&~Wmo#3TWbChu-hWffcv9pVrwMwry'Y\n;5f{ԊjTN͚5?@aa!ejѢEB:%JKK#%%Ǐ`;&ٝ^[;xoS?T[K<Zd1{Ohz_v%MN jkz]k[TWb沬.VI\# umncQ) OLsWK =G75p.ǨӡRTFD2/~M3)}.jL#99xe˖QVVƁKnCre@w7w}p?{ {,4y[ؾ};еFFvv6aaa!w6HFC!HQrrrXh:EYY?$6#]kdxrl 2z%$=s39*bbbX,d!8&BV\Ijj*ٌ34O:}ʀ|m_WU6 _GDy#$?̮%&ɳBG !z=YYY}^Wy?`J⣒T, T=w@8Pld2?g ! h!{Zrғ1_1 *V4`'?xZݹN!9ɡՐ_[[ 5BI !^aEhP*о+[>{VfϦjܾ3q(..&..FGGB 4kh4U- i1]2޺:XIA3\MM O>$!!!{OB۱T*O͛ٽ{75Mz-mZ jVR7 NNvVANݰYv-iii<#m86Vbz- p bbʉFL'V)8NQݨ\t%99\vwbrؙd q` JVXcƺX7 4t2 hnnf׮]bccQռ{~ww:5kUUUl6?AѣԄ C:)lFO;Hh1ЂW7&V;kXhZ&OLjj*s!88jUGit 3pm˙äаa+bjkk#99VL&'Ou%|@L&-B,Bnn.8pEEELFFFӧOg޼yR__:iiiL>~O?M{`Z zڃ2 ^PиmDtZn$<^K.sΡ͆b, fՊb[]`0`0Add$^O{{;MMM}V~fD(,YX6!b̴iӰTTTnGV0E}Iq&!FbV}1b驈G||<\.hhhƌ37ͦQcj 8t':^XbVRϢyEAѠj=?T*4n\Nj4#J(ih89^yvc"Z-:NYףƺx/ 4d2 ĩ)mj䟻qn7kf`:(//b o00jQըT*NLGG*F,***?* ,Yœ9sѣ}ERa4 ::(F#w-]㻋q߲^6!C&DbPY'W-OZ\EK3j OY3%z F{ :j%%%ErGuu54j8)))L&'IRR)))L>JEEUUU8=4&OLVVٌltr8n϶;shPZ^dRˊ2z57vJT1YV-Zfg۱l@:>I1=y3۱@'Z q*Z1Yj]߹~H蠩1͞,BV{F#!!!DFFRPVVFQQNgW7(JEbb" ڈ`Zinn'Nxmq%LK4{:>*9#[?Xk'µ7RMHO+4c]1ˣj僒>>i"pOgthنhaV:55x>LUU.*zZ'`hoo-nr.'4԰X:%Mֲr\Wy--eܫs|^b650;~g$:8x$-f^ohh &&fK## lwu5K~+Ok#-BW'>;n֣N;FuuW "&&n`=لQ|xcrQp7BF݋KZk^~=hj.L䛳xqBj2ydƺ(/hLd/8Vt#X,D9FZLJD@#X^:.+deeEGG---L&,g,ӉVEzg{2-5mA@ysCFb!ܻx.^FM(C0zﯴgsw #ElDo" sxq6|NWo1ۿ{fA tQUqa}[e@IGm=gߦ#5Kˀ8n>)=;qw# jQ:*S7S""yydfU X?Z5J%Bt{Zᠤ:fy;NڈL`j{D'ߤЇ%%X3w@CZDg۹7ybc. hȁ}ÇH:}&Ȋ髮xk [4t+<(e)S}^P}F(,g_T‹ .l\yh4.֬YCfdz uxh@Fss h4ib$yܽ=cLJo~kkWi;9KR%si;l*-}8^ K.gqrs>*9̏7k|vlD j!1ѭ}쫩?&ȷ,0hX2q:4p3бe JC@L9=JōfUWQ״AFM0ۇϜ&<[xWٹs'^{-6mb#]\!FY9^ߏ ^w˝ K! Fg>;Z̟m 11jh_]ۼ^XK/ ƍo'77w*F bӲgSq5O7s=?O~a{W;iico=,T'GtwuBd)N9s>C#X!&}rpȀڸxᇙ1cw߈S hTp\YXHkciNg7|Q~&ozJQx`*vsl;8g߅^w/#L4.L"sl䙼 f3s=ZoSYWXȽ y|qt:~ߎtqɬS_w~u/*E7/r裏R'hx0}~@&Ej)ij`FlXEvlXk0;ԞӑG%ᆷSE)5MYLh.?2{O½ދh颊S#NMxq2ԫլ+(ZRSSoi/5c VoJZ-/7^}6n4Z&M&Ow <y
HQ|MhL]-J H鋮Uɧ~:¥_'gYPe'5ڵz48s?oW3-* 8P[E^*-bܚ?)ᄃu߼&OBjL^?*ࡺ"{&F]` Y>,><%b].(,g,ObZh4hmmᒊCO>LjD_}5礤xJ='h {|ݷlȡBv零+Vzř+ЩլJs}.{Se2YSFg<{-J1)4g&#k=^f> n8v;j*8hL_w !! zS;cuJ Y Qs80DD8>!$naZX¬.נdS*bn$޹jx@VOIyΖR,KSHi|DjWVNCy~#TZq$ИRý߻ףʑF QQbB'nm&~zu:Šb 4h|/V^;y5?ҦFg-gv|ԊŠ\?k.禦nÇq(VrFEhJQ8].lN'/8_/gU/hL`޺N$-bD77;d㑢v ~gcGu!y uB֞Bv\)>{ e%|^VBbhڙ ["`>D(̎e˸4#nQKJ쨬lffa۱l@:I1颈3TA nye:lA $tj_d4[N|9 ~q8^sMǶo;qUvB 㠄mo4Wpyfn"z1fR~=+x_\)  4&0c4YV-Zi1mK@A53kxu*cR) FAVu\Yp]xhMA=r/ cI@T\N}i62¥A:M`tklذ͛7g 9vգUT1AJōse2xMt;V9;c=T0~)qF KS]3OfAGbXHFc TZZňH >Jŕ9ܳ,MiH~,sS9756@MM8¯>ȳx&tMI񗯶pqtΚǢɣtL6`piO7hL`tTrwmqo386ˇ_|E[1%CC "H'HbڰXtZ,^Xn7.pYF#X% Ў!\3q_V 2z9w% wQȴQ @NIˀdorclQ(sa{~oB4`Nezff3eR"q1AIXXkRU`=::X'|N`%g; y;ȈR3U E ,Hˀ[?(8kԑ-h6SP\}Eԩ]]c1uZ,ttǺ*8(c "=%d[I^apYk]Sg|z՚q7ßYo,9;{(Lh1H1:l*=q#pi׾<滘-=+EyY\qLyvC^!vϧ OjȜ:ųgdN*? 7F͢Y3X4kuߴ}E=#'+bD!/?@M^MAT2[{{;׏l"`ޤe@VWࢃsbn#6nw?ҧlʤDnr5S&a|w6pxXe K)(.í_q˕1mT˽7^GYe5/c'L/ VzٜR'b^=}6NKL{&;;ȉK8m.#BBB5kEEE3c@c Hˀhuf/;Xcw-Xs1hNc{{5sɊe2>*xd_D|ګX36I=wx8Nn7|9MzӢGVsIFddQx0Ocqr z㣒ku>oΞ̸QqJ)ÁngѢEL|cǎ+: 4&t3l)+%`9 ؟}.m&>-Z'aRhZf xxv 2B א:~g9Q?^|O76BMBF6J."m]'OgVlrSy70wV-w?6o]{*˚Wq߲l.+Y1\4-=A@}'v\LAVfj e0FL`mr IDATjeqHq-N[L{1k/epj*e`2r :箹}[mjn'=*z@Ztw=y |D߅].7STpCTU@V7YNT'"B͏EۊlSK+5 ưT#o OS:rYO 1:n7_1%_>y~"-SKsy6wA6~/\{kL "1 ȸΛvҊDG>ájjat+85ڜ(nh1=\b٘+*"u+y%*rO1\x1-6+ڻ6Q(8 e08bݺu9r z+8%hL`Cz޼y#U1?vnb{\i]1H o}uR|'(=VɆϷRz86dsezl; =Fm^ ËfBUBgwsec>;8(v#>'X|MLafFn̢x{`c>8RăW:~3bl e08]wEii)^z)=g?P (Pv\k7>ɢl.~A Q(k7}XܲcGՆ墮[cGIquf/ӶO ×*Oe'Niy ZSru=7&o/\ʚ3: N NW^y 6C+3όbD $ИѸ뮻xteh PϢ흝~b6j]}o'DmFôh@iS# >P&QT$NNM`3:v ʚ5k%yr|A'9X2yκtv?Ⱦ<%i)=*8E\sh5';懟a664ryhmi cPj0{Ssyπ"w(`c\ë.sVp!#';.?g r&`Qr kgiѩգsbL){Zod\|ż|bI1 8HEIZ&QwsiTQ/*l ྉrܻv-7weuJl6`M3Ku! u$g49?HVC:9s=3y@+57|?m>_>3V\s9㶯WEQD <>%qё,=oԕdQmkG1*~QgS*,2ݢM !ή&5ZSmT2ӓ =^ ՄM3Br9Gt:*---S)Yhe$e~^#۱ q_|z{!j^c8踈@N'$ѓs?~n\rZΙ9;i9~ F66"PQF*>پS:> ?߻n133AAAz7V)++9%LCR4f1y4$̧$p wZS'^Wk6 6xmj2ÎT*^jV-"6N^JL&M:9p15\.%[ׯ"Z6] W.vt5cX掭ڜMjz>HO6K8f%h"J4q =9g1%KKyK3J̱\29/ >/3Qs5>~|J2uQc=wyqNs.VV NVe82ng%gKM{зؐ 0[[=)Q@qE 3X齀 e%&{ZB]~3SBzzz&11*V#)SnJ`` {mpo'_jMqͶV~u8V*36 M/Uw_?7&WR܂``h4@D=Dcę5s)j/``0>5-]?[P~|1ʚ ȩKѥH~.-n2>N.1چ7}?ͶVl8YXx{wp$}2 b=oTV!Wj ")͍10%tJbffBޞ֡;KL?11ţ!Y&8wIb\ žC*cؤ`qgN՗63ch^{ow y*젨͍<:X^Z['Pd4tuo7xH80"l pp$%^\./f[߃Ma߈繗G\<Aߗ0[himoF4(rI.쫜Q>vdY)FS#,>a=!>wOp_􅳼]E#vnYo#@aPԪ } Gn0O ^RI񽔪 }pSd6'Ҥ؛[9B )nI~ bec8ky<8o~?=>JFj8 ?^^^RdS!HY)SU`8_jZv(6~%w 7t]Ez~)r=~q[07 A(r vL[~q+̿ǹTv'C#ӖGܺ^UƛgO筏#쁽&JO[‚^Qr 0ֆ>cP2A.g6uwqpJ g l {K\tLQYYᅬ#?8x :۶mc׮]S! H,d!k4#ݰ&"ͅ-x!j E`TNdMOxA@.ՍMroREhz~g2*|R .4VV3` `?V79{c=yS¶}81Wm߱ӮՎ{Js8?o D/?Lb ضm$''o>8zkA Ō3xee%oӟ0 8pE1;+LJ+ %ǯK%%%c@3S5g_vsiDR@_TBb 1gi93zra%;Z6??ݻw;Ç%%c"y4f1c .YGuaY7qdGb,<_7>w<`be+zdYӣ5ZwxP^ACs ` W}{a%$akpИdNb A~A:ߚUTr찲bD`wz %|p6Hn}o/R3Dd "rBAI(;}KHHL*`J8sJ,U*[Z+Yv}k߬"fxtbRf<&ڝɗ_~ɛo9bIIјŘ!YcEbZ)M7܋63 |_kM:PnQOGOIU"lD4-\|៳ SAq!z-^yIMbH,f,Ue@׌TUt%,ƛ1mMBBBb0]GɬIfMN_[d$kѨwȑ#Ybjjj&[<1")7)ˤjhtӭ"g~znRJ?" }(|u"ͭl\ W'DHM}'.%>-knSߌB;$$p7xB.`IicJ2xKK %!!Vv=U"JIјYDnp’aOn|Jyk x '.,X[Zzy?ΖmL6Vw'N)kzm$$A!)569h:eiiOS}]yT9Hu7g9 XIHH_U7lVCRe9-#U^lϳVR2f%ү9@Gg_KiWL&w'kK 5C~?; F.#2zlg{4%LY(J~p5v\.?1gΜ 1%LDhrd@mp.jnkDe3 ?οl;Fg/"񶵛l񧜱$ 5kXf owׯl1%LDR4f9ܰw,p}2m۶hm/) Ao0 x`na.n_DO虳rAJMDQS@Sk+2Avߢ$Qx񭼵SDTj\F9:{{E\"݆z;ϳ>$$& @}gLj4tunj>u_{Dzntݻ7o~z>L||'o6aei n.(fI[ 75AKn&2K""hm  e^ !%jՃQTu8:8ƿ;WY]o[xN81e2K hzt:a<-]#O[nbu:tw ²e~v:UZ,e NscpKɲg|'U+qur@Enj\rڴ*w5_6rn[TBqyRChQԨU2DܺEU>sѮ0@R IDATWs h:N|1L_^ 񡬹附aksWߚ-H}NҪܼUOͭzZhFUU@4 +tTj56!4]{bVg*sRm"o 樓iePaia.θ8 "MM`v,͍9;į=SBBb{+bjEe匡B[N/~! P(l2.*~'^A_`??V-b 4")3Ԭ\r^VNH́ OVVF%@Օ"8o er4P4[ˤt9Ή w4 j3I(1k+3jm.a *(*$id/Jai9֖lZ5ˢQ*eT#}3N]I&)3k@q1@ mhkj4Zܼ K?(BEM-5;ElXd|+vT=IWo6RJHMD^ 4 z_Tכ?(ykkfOS^wy{+5?ZZ-*N?řdmX3W:qsv%;;ꂻvXZc6C& VۧV"MEדʙӧI>qG$MϛNu]=7oSV}swKErJ-*!`n\4J}em<;8yip^\CA^FAף=`r9* JBX~zinnfz=j!Q*j 8::bc#5hnk/[T2` @ςИ364K{ŘR,j,-G?pL-˿uVihhza!wf]^r)52ch]Sk!5;"pL1Egh{'~Dn y,.x7*$&Nr H^TV0~,%),{O?Χ_}{عA|z|57*cktzĥvm{dO)ex/ y|g'^{IJn#Ǩեm6Zٿ@#)HIEw`@-B6Z\w0Fdnn.wuz?E^o-l@*&WO'AT|q8a$efs[&ܖvg?֯f8i"(YDᄌ(XYcea(ȭ&DDAqr>VUUEvv6$N$MMM455Rd…0+:TWWs9Fm*:455qyΟ?'k֬! `䙫 "}Wr9[ƳiՊ1YƧrg>rRsڽxQ7ţT*ioo'00NG{{;t:r /|{/qQ9x-+1qH4x5= ?YӾ r8.fbe-MJ%=7zzJD7g9(}TǷ8橤[1ܜ?g syA=B ӃNw.!''{{{֮]Kxx?8s &)888`kkk R< R 466Ӄ^7J{{;mmm^՟Jك7ׯgt#! + ^8>(7?{jߢS!3֯J,{Yk{y/gm5gZ&77RiEz8\.㡵}v;J5zz^~)^$Ecߎ3 V O gS2hUd SFw6""jӋ}˻Ɣ3<CFoYjv^l9;Fqqǚy :H]]Fe`0ӃFAբh͘<>MMM׏{ٟrvMLL 6mo>6@puᇻ~M7DШd@3ٸ ЮZ39NZ^db߽C߿jæ| ?/~ꋼw?UHLGט#)S崫 3FdI9D *8pq[/ KjوY(%1WOʴ$d/旯;@EM_Ljva1=$IɨJ6X }d`|ݹ_5f0ѣ.A@.ceeEKK pÙA[[[JJJ(,,X}@P䄓ӀyAsE}}={eҥl޼y+z?aeEPVZEyy9|g\rXBBBP Q _EJKKquueƍlܸFʢvLlقϾ(A!k(Jmm->,vrӮ=\]K;G !nliKi%k\Z˩3ԛ 9a"s.)MV S-áh8pv8{yQ^ݗv9*nNl\lLIјB Oɰ0WwNj)S](A<9riR ŖrS6[8~ ?~Zd/yڻec"L@T ^yu HE+!6Sؘ ={vXؾ};~~~Ç$::ŋZt: fɒ%^'//oA 88իW#ObC^EijjbǎC.GBa GGG ))㉉$:ɓ'aɒ%X[[իW‚W_}8šJETT˗/ΎvRSSOOOٱc/_0_YY޽{y%}Cߜ1n8vbVsҲ[֮Fg\xv,)f2If@Zo>9+45GGG~pA^um6}exg{(@$Eco_`06j53&+^f!XayMarAj4oa3S]w8pdKZe |rWLHbx0S)Ѯxkgܮlq wg'6NL3 <%RZuӸɑ5ˢY8bB; d8;;j*|}})(( 11?عs'MMM$''ĕ+Wfɒ%t:(((@/mhh 99\T*+V`ٲeXZZ@RRt:<<Wy޶wvRV}BZ<J ݽ*mm۶Jrr2̌G)œ?CZ1D>8p7]lfXIј==w1 CS>NϞ/63a ke ="A)Ъ&ա(E)h{td_/&62lQ)>2Q ^nSnI4r!%ittu DvY A~>~V?>`RdڵDGGPBBB())!))={B||<6mbddd‡~`,ホ$%%Q\\-wBTRRRBbb"X[[rJlmm)((@&QZZJgg'O>$eee|״ȜŢEX4L{j;wn>'''vڅBAtt4QQQs%vލqqq<[LJKKYjׯ_Z͞={(..Ύ|Hr9$&&RQQ#7o&22X;_Nss3 .ˋO?һ҈JN{j ?$K'ef47ۨlzy`Om}U7ȋoKnZtHaڕ6﫴zXEÔHIMM ___7olގ{{b|'<`$Ec n56ٲ 160-^T)Z,18RkӪh@g=5j׃GxGE= =A/ hE`et$Da7o %n!3.]affO?cAJKK/9}4=˗/'66bc奿/xzzGQ]]#O<!!!IRRxxx#pB7!88NGii)`^x>Sٳ&+h4wKZ[[ /`=Bvh@ÇSPP` 277lV^ͥKq ;$''ֆ?vP-GGGV^իܹ`0Ɇ LvICfuk1En?7_(KC8g osf~!)Y, }H$W|uKYN?yG5,pXMƚ5QhhSHH)^W54L0z`(gOϹ*a7N\Lńa?Pyyz=׮]Rɸí[ 77ט-z=' hnn&777oAOOAAAlٲ7+)Jv˵b{F9nݺEee%7JH]vX%CѐEј~&cZJqqq^f !!JEdd䘼ݕJ%7o6z%E[9tꮗ˃+s"1{/,5*,0ު%Q|u"]LEJQ:57}4LdՃ' LsIјDz{ 1,"R<1ZaӔ@{J,"' so8,bef۶ 8hAߟ:n޼ >{G&jkkqpp___\\\شiaaadggS^^΍7# sss4 A փmll %=$''$EC;)9ЗKl"Q'!!7oNJ0s𚞞RSS=y.Xh΃?`cLB Md2dˀ doK]C]XYb*AQIfK&`:Y}4F= kCw1 4q.)●k<>$Ecn {+ wW.}n{"QQ5Ŏ)7)^qIuDci6>>;b{+PMvَFW1qsmZ(*N\br9o{w(*?C(q`jnPIߙ|嗼:#qё\L+^=A1IhZ>{ɸ,"tXIqy%;<@{1vEÄv2,3B7֞@_oDo]Md´$[&c?rYhѐJF!==4 ݝåKHIIa~]]]|ҵp Eoo/YZZϝ;… E.$22(KGG)))j gǎCZZ555( BBBl߾777 44:C&o6iZ;6`jfcqvdRUCa25ZC WQԕ>/e,Tcjþ;t::[o+οu*eѥѐpb|121I\N[FPmÚA,*w<$%Oe@b0 ?c挢1]䮷/g}ZǏ/ ^h4Ο?Obb"ƐcǎF̙3TTTƒ%Kx'HLL$--v:;;G !"'Oi~;;;|.vΝ;qrrbp.\P(Xd X[[˗r Zh㱱ԩS Ξ=˭[P*zΜ9yGذa \p˗/^:::طoߠy$G\rq{eT6VC+6VÐIKK#==}zŊ>___bcc$;;/K̚zM8qaccCee%7ߵbwvvR[[iooG.uV"##o<3xzzȶmX~=$''@dd$+WdŊ|dddpU.\HSSӀRn_h?HDLhXZZӟw}l~ic%с`?_o}[I1 \[$,gHMa'2 1sY|)~~̼B=:LI:+WNhh(2o<:::z*FqqqCOll,׮]#%%L.\h :u%%%466b04雬kFGETrڵAes'ٳ~666j+++6nȪUHKK#11tV;Fbb+-]hܖzH m_ .G!l4,|[-2*W yv1GHHƄӣӑUXlKr ~y o= ? ۦ\.-Tˀ%:4}_@ңӑs:9i4Ґdddd ɐdFc8PPPxzzܬT*!**_L&ښ^ZZrǓ4WmZm,l[ZNS&T*(J+82˗s*IGc|W\2"h^XO^g=eq\d "(jl6˾;omLH>f+c 5kְfz-:~ Q4"-dcꢸr\%sIј`rJ^M9}Hy|E,p Ѩt~HSw7/Zo~I HFPПԬsN{),-cXũ#~~~$$$ R ;5ygggcס`0jqh4@uu<!?# ̌߫^{XX111;w_rˁ[[[찷sssT*UUU( T*:^FAMss?Rٰavvv Q\Va|e?(er^Z2@"(޿M/?;~62ߴQ~ߋbiiɮ]صkׄain7ʀ 1v$Ec/.5 ~vvDrC|||/ii Qhz{Qd<ޗ'2L *$Ȩh䗔"̩-:?~ j3!~DA.A`ʵk5Ckk+`YxR+ GCף AMGCXr%ܐ2Ύ{xΟ?O^^ވ}9z= ;\ڵkUL$(*ۏdJs35_z#xaon?;K7`P?9?l% ~y(1]^<8' X++Tr' x+|Qlr@Nϳ3y/R&Ȅ}f[:x{⊪i~ER4&~iϔ(~7t^wuq /o5R׃ `&xm˄ 9'ԏO 95 a'L:ADQ!gV<4sIG&$2( ɳinnΪUի |ݟ\xOdd*T^x/$x'&//,ʦ|1| fJ=E(6n+5Vtz=%7`0X4Qg9KQc=ELDx>\ymJ? :xI\0Zν|]ҢF.OB~}mxswxlkL繗/yGR*w-c¾z4&oOUs7Hҥpzceӡĉm-(e2.L0_W.87GMAj0ݟɚPgjܜ9h`vnw RTVAQYNɑ@"tzaiiI\\+W͛r OOODՑjsss ˩fos~'''{D{n+Hk{INc}ff^!5k/6!2O5ͽ7yuK=pm}F2 ]w.7vuxH8;:>~fވBI$5Y%*97FN/9qrOuvdK%KiX$%UlBHD/DYb 3'$|H ⲱQ1zpd% g,y`'$)?wL?ڶG]^̣aq8Wa!/|24dׇ; (HSKofLi%K8TS3~0l)x!kG@Ʌd UA [g,#PbdE{7"Y)J@WwjT]Zh1vbgْrZɬcGFF}G__ʹBOOfy aWAdd$V$fã///RSSIM5dtkaZX,vV+Z^GѠj_4ݛgFJxz0LK***y#|6ZΞ}9U\zG)QNIB&J xO^b="2-v;{&scn<,}ZbFK}w7Ö8r:'$܈ kR~|4M}}̋```Fˤ(DQ4fт44(pFֿ/p$qΣ;MS0'0Ph>+Tg*PF{0FE||zh=b+L=iVpoooRRRHIIY,zzze``Պnn9# /xxxh,3MPh #,,lZ-fӛr>JLX; G _ͯ?T &|5]͑.H.#iG(--jjj\F0uo~}#ImFhTj1Η_NI⏕|׸k=zET4NdzocXvoz[z_A%[<)Kz@ĩ~v̴lVT`Y|VU(hjFCӾL yx?q({EcY2x߉]AWJj*{/D8N˜ZWĝ@VJјHN$P'h:[u3Z$I87+3XNLMKzLD8/?I;|||\Ptwws5n!իW]U &"11ѥu&&v T1Pu'P'%P;K:N{CaM[tĺ j4ndmعy=;66l؀fTo!˕+Wf<{R[[r|ժUl8 ر|޽{&lPXV=oAרxq7@#snn$8woـ( t!`8}CRcYBf&)°AeTv~l8OT=1Xlfبv(~i> V*u/)<$i)s[חm۶38pO>9tX,oZ,22 6Lz){Gw_x)03rY?VQO B}s G i\H1uZ?Y4vMM $$$Cd.S^^>n ɸիW#pd9wǓҥKs`Xr /^z䙙xcC#3h6/QmÛfQ ##^\BUU m{GFF@zzwCBB 76QWWG}})(Ohh(r{a\})Rſ}=*O(>&hFlKtt_{*027<׭#_70Snٶnil>>4\5jINOYwH \36GժT<ƛY%4( ."F,6wat??x8 U/=F%f'NMa`֍ 9,)091[\JMt$Dtɭ?W^… .r2|i&***0Ϣh֭[)..f``ƒF#uuux{{CfϹu5o,QT$$$PQQƍ'Jűcؼy3+]; IDATŅ شi]=7 SG"C]Dx~~!$ix=r= DSr[wUl\;{A-?$srȜ2("dYlF¶9|,zd8W hr 9!t4%_{śq說VZڰި^G+^1kZVYHKK577m۶qǫyEO?ի_J$믓wOBBBҲ%,,); +#p2|\\kms05ی\JQ''5V{~Ɛ"Y#F¶S>O<(c8AJTCc Ckea:VO?NąPDf.w|$I;)Rnx@7Xf044-[r۶m㗿LDWWwFRd֬YCAA7zt:7<3TFHc^ڲ-huV:K8p4e,Xz=9i)ڼ@DjH6R9Xw߷p~vήb"-)wn',HǾ臨5]AuZML׮](:uR뉉_qxxx0444(oKC;_ɲ#tww:00pJa\a6u͵kرcUUU??׻F?vmT!Z8+'OΓOSk^zƇxyzCtx(6׸ODQ4w !+% TQz*.]==NTN1a䦥:nkߢ(LH r^z|[7Iyyu:DFFJʘrHQGGdiwݍ9u\;n57NG_W˓^nl10PJ;w -;[(GQ#&"lrGmM+, ȸ) O&Ɠ;WYH`qDGBXP?ߦw|_wÜ=;t^|Em??:\|%СC$''Rؿ?+VҸ"## `߾}ܹ&yWݞt:q88NN'6 J%ωᠦFu fhhm۶aX w(G$]]|Ui,$0`ƎXEcRGsORu8!z+KaXGBO=FDD/<3@AAo|^ee%xyyU4V\G]BpVZŏcdME^y{=^{5y'HJre۷Oٳl޼YVˉVVwsp8Xh[[[imm^nJTaw80^T< sh,"A0@dݽ}Xɢ:hiiTpWY__w9ӟS^g۶m9reütRV?!!!{I79>טxyyM9~anN\TUwYEH` e7Qz.((,&EcReD--Vsȿ;NƖV?V;k^ޞtj=7Z!Uڲx}9x2*M`dp8nc6nVThZZ-jOO -v1L&L&]]]L&v;CCC8l6NJBѠQ/ r)pg(;Ul6Y NCѸUp:%ׄIBLfX1-$$QUHU}# ړr־(ܴ-S7 GLx+Nj]F}}=mmmR(&b``qbbbHHH %%Nw.qEZZZ~===QV6@ff&neY *hlld``捇f3f.]^III2itF}}=MMMtvvb2ga:7?#W EdhȢ;o)/.P%-m7?MblBGa$>$ n.)I^< HpXc{g y&.^HYYmӥ.JKKh4$''IRRҌgOf;ƵkצK5tuuqq?Ntt47oX@Q4)(ްF={zztbaHyy9Mvv62w RQQ˗illf:HDoo/SX87ۛ8222HLLTȦUmv;M8N6Z~#_CB?O~d |"ԗRu6YH$I'j-1:Kjy{|_%nap_< ~71#z9B]]݌?VVQjbp8ރfkg@@[l!33A]CI[[Gʕ+SR0j5t:<<< Z(  p8$ ݎ$If, VUFybccٺu;Gaqd[YlF#nmt\b49tG!++͛7ϛ_$YѪП d` 55+V9NAi;pH شj97iio^S_U.zw^ D/Du%-$A~[a*3/:H}}}]$FCCl8NymX0 +'?8q|pV,N'N_NDE"88AGGǴF9zz-VX>x&߭(Ç)//p^`+eX*DQ[\il6cٰZg"N'ϟŋ,_-[yI|2G}Z1rH% &k,SM bٺu7ǔTT?q96شj9{Tq)<|F#,\nd ]TVr% <2@ְm*@Σ² 7=Ns$FSRR'\A@REww7AAAdff #'B󣺺+WP[[=WFwuuaZǭFw}UVCͨ(n=.]p$ƍ |}} 88xL&'>>!F#Fڵ}$#IҸI(,,{Υ¢( ZlJKKٷoV(QՄ=3???zzzh4Nv$ysQQQ?>뱘|'477Ox\dyq Hoo/W^Ӊ``0ᐓƇ\g͕+Wz*رCsľc'Hfl~w )62S7j俳T 7klϠ>\8W(w vlwZ}s QxB.idKԺOǏsQXu:O= TUUQPP'|'˗/'''g̶l\r!V\IZZiiiv.]_|jE-[ƦMٌj={KCC˞@$Ξ=KWW_W+I҄Jl6 6b jkkϧˆ@a6iiiZ7UEQCFYf QXXHYYqqqDGG@zz: zxywy饗"AQ4=zǏOz=:sQXXHss3AAAV$  WJLL6$ V?Lff&0E}qAt ]vb,$߿m"Ebb"W\d2?QQWf夥q5^Jmmlq1]]]׏S6$Iŋ*T$I4 OQ]@?_7>sM՛!ުvBw"ISQQU#r(]/qQ xpW/gMNz{.**rdqF㩬$??7,Yٳg)((,[+Wbٸx"X,^z%={ Z-k׮exzzAAAl6PT/̉'vUF\v?'xbZ]PP0NP$$$&Mhkk2ذa=&gR\\Lcc##N4V{{;z7j*<<}ڭ뉋LJF/I^^k׮e\x|JKK&""P,lZ^ZbVT*]9wΝCj%>>Qihhp8N>S ,fʩS/ܾ$ڵkٽ{7.\7b -[b1)?`K񵴴GݍK.e͚5zL& lذ7xPzzz%㭷^ 6V"v!o4Q4ZZa|&u\L'"Cf:{P*-tvbNʹS(-Wdg}rLѰe/_.oIKKyBCCٰa> [nso%Y,YBxx8PUU۷o'77FCuu5TUUͺu󣲲Qe``'|:bx"N- ǎs9Aff&z(hzH6l<}Gii)sU^F.عs'٨T*._̙3gdeD Ci ىl&00+VPQQAww<"EXD̵ŦCEQvGd,YBSS'OɓXkגK]]\rjv;BXX:hkkCӱ~zVZ'ׯ_eeeT*$I466j*éA$I|̸*եK@ll,6m"**A())ٳdee~zVXAMM ۷GC@@eee!I><={jٵk999h4>8^|Ekx)+eeefoVKvv6ZQhnnO>R6 x"111-EFF>W*Z+4 Iii)۶mҳ+/Ç]rj5/ j'&&f󐞞͛WJww7ϟw˖ ___z!rsseKE~~> n- IaåK[,N8Ν;=Ĵj4x9 ƍˣsQRR²eX~=>,&sQTTb!>>V+K._NTTO=˖-trE hmm%""ݻwd'N{n9vEEE9p>@=l' tٰ"tJ jX,Znscw4\*** www7mv;$%%OGG& _NII VvExK⩧7ސNcc#ѷ3sMdvZ[[B$<<<\rQFG€U*DGG+ls94%%eyVE)--+"@Q4twwLOOwd`X~KӲ2]…1bZX,HJppYΞ=Koo/Z*FٍTfTU… rU <̄BXղj*Ktl޼UVz7x,x L&K'+ CCC,]۷{YY,lh4oߎ[EN$9>W}91 ~b"_ KvY&ׯJHHȸBIꢵ@BCC'44| ʨ0 fjkkKNdo)..OI8Q6TFEEMh4innC _X,׻FHZF$4 AAAB9v0fh, iq݌Kr0,vTz9ӓ(F#:tA $..Jrh ܲ2FXXKAꈏsndUTTPQQ1BBBHII!%%eusluȩ{>yr:Nt:iii!yGE'Oը"##'""uֹ(\tIj@9[Pd-6c Q &wtt${6eoA-<\X4\/ϭ01 BOOsfeeMx >>>lܸ7pp8իx{{tRh4$&&ヷ7Fbccj!N*Jh%;;EјiZWlv;ߠV%Q,*aw8!T;pPPP=DQ瞜 iiiK6Ijj[%l6S\\,{xljwy'Or96mD[[7]A>ctm`` $''  IDAT҃p8\{P}:H{{;&QVׯsuv\D%44JQQ---T* ~p*++ ==6Wy1Yn( |ZlnZG#DO(\vppx~a(-->///°|k_C IfN eCHpp0Z*S1Y}zt:(b2q2񤤤8m+HDm CN'hECA.fl8࣏>Cg}ÇIJJ"##xzzz(,,ŋ@nn.k֬pG^^9BCC\'x|c``}!$ItuZqG6R233`ɒ%F멯^hDVzjjr)N:%AQSS$I9rшJtra֮]ˣ>ʶm8s %%%455Ndd^^VqoREcOXA388(ǃ///9Tʖv;[NV&8y$bbbt *++`ժUXɓ'immE?IW\K(ʛN6lć"55qnd䲐4>F;lʕ+$IT7-0 wmmm>OOO"##ijj|||CVrJfɓ'$<<4z====r%Ο?FAPZZJll,6lW^SNQ^^.u&UVVKiZ7| RSSIOO]vmBþ}())AӱuVrrr8}4Z>yjkk c}SNa6 ###Ԅ`s kOc28uGԩS,_\044]r3&z{>!3ȑڴ2woRPv{?(mNm6OtJJJ(,,ח~Ο?@aa!---pqނ׻b 6l@^^.\ܹs,\:tj:;;$~vt5T*.\Wx*++%""B5R#9sΝq!jjj~d>۸s ;NN8Kظq#7o|;a``!L&K.\N#''E[]]MBBK.eɒ%Zv 8pǎ#//O [PPs((( ++\]F~~K(ЍJZmJٶv#3gx`Z9GJ z=ё$޺DT/EwX05N$"%%%(2bgg'㤤Mtt-Ce5 +V 77W_Qptžl3]'Fh ZOOO^c3R~L*9 Ia QQQ466ǔqq(cp(KIVVTztttCxx8!!!T[&h{{{MMM:tzl6ۄꑾ=T=|ᇄ/-,**Ba2CCCTUUMRR\c;BBBx'NpI/_ZFV344D]]΅MF+iT"v 'vK_BD~߬qD5~ EChLJmql5"((Μ93.trorc vN-&f3ttt,t+2(rs#}"""i$0  j5*^DQN<#klfhh<',Y^h(,E%<<\5_nV+7`7"V%sGcٰZhZ||| LHa܎w: mD˖y1͜9s/pL/۶mcQXX@t:'t ,2z-%#9);SeLRRR(,,… ۍ0]\V燏ZVZ=v]dv5  usCD׳l2bcch4N8N'<&ɽ>*&8PE\Zly4222ꢶv$2Jgg_ڢ(V I>N![*fQO\\OLSas1ݞ;Z`$ղ`ID?&n#DAT*DFFOjn&Lup!3pBϯmxnX|>m՟bC`Đt_ yG?{U6L&7H-TEE,슂bOW[uU\{[+NQw $BHm1$C$$}]\9=9yyFcuǾ_Be=To, 4$_wH^6BM~K@hrgA5s%^lRoMv`h"gu\FyUJnI:'8qQd&J 8Vzqݝ9s搔Djj*]1LQ3qD,TC_ۻkZ9bee%eee]n $^^^ԣ h8bӕO.W__ߣP2   eWpUW]Eii)wթ=MMMdeeeUR*Bh4b2䕪ֿAP0fNgkVѨolDo0V xN/KMg'h%qS´A1Wy])&A}4i z}} o/Xe-s?YdG6RP3ݔf}BΨKc35M5rk}bMf HCS[ZKʍ{V]pMDs@P;VpZC+΁dihAm TokmV'`x>kF8!*$-oR}a.Š7A~x)Eɳ?PS_ϵW,ݾT//@z\[ dd21nnn5\T.zK_ljsZEQӳCD M_(o*EKK F&l6#BܸGVE9ג Xbcck% 5557&hZY'""PWM~?GQFCNw?oe #Yl`mTm'c=v&- QBu=(y}D]eK}!'~~@DZ9^\E?j.!#',ȉwg|00c1dʗ5t9O$yfE1L06KpTx{Sܻ,i6u(jkkfĈAh4v(z޽[^RRB`` hZ '&&fz=.EVV*++FfeI|||zݎV35LTWWK/P4px 4#cQ e0ytL:C8[NqѬl6moxÂ; H\CRɎ3>x͹(Сml}mg184+J*M VggKH>=:?hU25j& cIީuZƲzr+ƍCuVlΈdbƍ6ㄧ'vKDDŀUKOOgȑn dddXrDQdĉC+ 'b Wl +{L&N8q7bpq}`Ν,[!hf36ۺP16!'8|x Q8*pRxڳV+/wl]\}OƏѧ,i 0it"S'c|݁7Oך={6klKK }'11Om4lܸRSL{%K[o<~8;UJYLQQEEEjh̙3O.Kp2rfܹ|=ʨQJxGsPɓ9r=//;v0{M _f37oD E/F.;ZV4ROdQ؄g[Eo^a-"${'yE1[,GEu: PHꉶIcF ao/iE9ւ7ot:6mDff&fͲipС̞=WEFFxb6md[QQAUUaaa 6Q`̙3w'9f͛7s).EÉqƑB^^Mxd>X,SPPAx0iҤ>CBBÇ/wl|l0a+حF&A}c&t^aHTTIޞHHT dzyx0~, VQӧSSSӡv^^N"..I&zt;lj*3uTA`ӦM6rqq1~~~0( f*ʨ2kf.¥h8wݴf XL&rrr(**+_&w#1 P\\izHH+W`_t)PPPKRR&Mr O]]رcj`` /9'J9ɓaVj9.G57Asd[v#<8~u-:.;o#fX݃ Q FT =md`\d /62X$rrrӓ8NddY,ɓTUu]c&66+VaE3LBxx8_~eUkjj,OZ___كb&%?Q\~}vp1p$1ЂÃn?Ci^Onn.N" |m0.Itt4+Ww𵏏oxl ޽{9|0cǎeL~~>Ǐ'//UV^PpL2)?""$A8u3~{SϤ1mg=)ҏ }]hhjf1=0|B/@=B#w04 =BpAr>KP0E5ΝK||<_}]+9r#G " 6PzYTWWSUUEUUXPV3o8LE???BBB k|?\14 Kh"$77Cۛ#F0n8]xzzd.R9Wm*l #F`5Xi/L"M$$jS.{w(8k!\DFg)"^Z.f1Ȏŭkp&Qm٭L HfU3MHU^xj ?TTװ~pΘO_lց"$y᳄,tX g,Mrr26څTTTPWW@RDpp0AAA@kGϙX= jkk=0LFo+<==嘖 ,) _t"A1zhF XS³ j3&&i*3k,f͚EMM QPP`S_"6684GP3&#g Az *zr" \w5{ FM--罏ú[e1#ؗv6̜(>$lC[1#b;X~FsuT*X}R'`$3kMʴ0WoraWAv(Jhh(ӦMt:yҭ1 dbijZy內y&jjfR4g:tZ>>*Upp0r+_'d~eDэi_Ef:pݢ|u5>nZB!xLh4⇭DGE$j(kZw$>ތKl~8(?_1̤o%C o|ŗ1]@%LY<m{AH^dQTd2_J֚y)$$?sm $} J{g q)NDa(..>ۖHPPsg%** ^{^{cǎa2{xNc֭<rXEFk%O>!--z|||3g-_~|7{スj@PPPn[ZZtz, ۶md21sLY j TIwts'c߿t %ҩP\ZΎ)r׍+ duW`2inq<'QTuulP{?X,t;nc򴚆Nڧ󓒒X`Zr^}UBBBǏߧ^ݓPWGӑO7n+;NL&$Ibڵp1~mBCC6;Xݳ1ͼ;Kk.~'RRR:?vg\(Az}vZ^}U.9"{5k{.ᙞθql=̝; t:tM,XÇijj⩧Y`ŋ>}z׏?hlr+ `Μ95#] ?f*9DοP㱢DFMc {}/l8@n, %U465!^ gPSW/`FIa:/#^땝JljxO$qQøw(fƍ#t$ЪMdEAĎ\Z:|}^^X5ݍ0ceO$`Xh3fয়~@+{6 _~yPPP@tt49rs:ճGD8R Y8,\q877zQyX|ɓ<#FH/̄ {(**^ 88XNw.~-;v젩IhOhh(OZWBu0j~G(fl'+*S.{A~VqϪGp_}__}%F&XSG[ ;n\tI)}?<[ Ov$vI#GronDVS~ X,cq$aՒye2>bĄ[{3 se(Z[/RNa8YC]m:j]зFˉ͠כg#<&VNrg3]R4G ooӧ;O---ӻKx o(ϗ'9}4,]BALL ӧOg߾}] /ٳgGnnn>#t.TKcV1xHc{Ag=4j5uQɴů7ԨUx8x̚>iqtҊ'4v[бT ^,gCÌIH;Ņ~dtǰe8qzJ w`9~ƗpoRjeՐ_y /C9y3'OS>V^֭[غu+W>贍qq۷lBCClܸ˴v?6ld2޽{;KuB?SYYISSiii޽FXf_{뭷sNZZZ;0]v1}ty뭷_v^Ub`swt9شl8 djBX:\`?l$. dx!Y٠D o8S?<{;͚fܢc<«<қ| \#:B O&*?9`d( =sÔVJ 3| w`DQ(*P:W2Hi.K"bL*-lx '>c;zCVʫT[K~p?D2,$t6N5VJ;Cuu5k׮E$ `+}+;{6---,\*Y~= .d…:ak\ ±4< +ӧOw^Ƕl^yQ({*P[ű=OSWlD-4wi+')/m]au'FhA M˴E/#+-dK?r wXXRJaI)Fח!-b`hnѢܢnaDQ`,?1klr%z]%&cs{ XuW)񥥱d$LDVmd^~͍o^ARkRvN(@nYnx Wcemm-/[%Ki2Vvlf\fam՝s1#\tD썜r#O&%ݺ%i^/IG2OƶJ}~`0Bwo)@ZG7`ڵ}ݼ+;={ظK/2/zFMWg:<3=`kɹ߻禛n-{:IGm ڊc VȮX{fOH&~v]GDNx@ ?oo͙e3wc'sΕ'1Fh_SNš5~pW/L$541XMcdl {{00lӧOgڢ.Kd6 Gsivc0q-FKC8{e6[4!!bkb)%;RcJEbPTRJYU]OĉS=OB?X,_/u?ߏONtFeM-?GJ [x@yAƌ1ii+.EÉ8_i d&#''E7lDQ˃p&Eh)Inz]M@w+:|.f5z C׳qF6mZ /-$F| P(a6f?oC|s:'_P$U#* _DBƣ3W_}EVVZ O<9*M#~abccgaݺusDMYwSSqAT" XwAP4lKp K(GSC dYҨHrԈZG@%M?Qv :x''//ɓ'f gZ{9~)?&﫯>iU8H|'aoM j6eϦu̼M+7_aD; "W߱7h_7U+ͅsI gX,̎)C#%#|y 1IoldrnD&{Ф>_o8pO>Dgԩ,XT(L&j(; *dB;KeEbPz#"$u_q6X ^WCI/oA/&:2''M6?e˖Nyy\tK՚5kxG:ͬ1Lx/"CW_eU8ⅆ?JM%k^f!wq 8C)X{7ܫ?*}]|A/^LZZSNGGG/a26SSJUAb1X,]+g%1G+ZaD&,t,B}uueG?x"! ֎|￟5kpu[Rw|I. μg}򸼯խXFJ qdLWGbK;Ƨl5j5s$koSR^k #_ٴ-e/Nk Hi$\ypqN9$;_~gw-0'9]8mm oE4_zHJ 3GBd2'_轷>~˃̾wa]I`6rvO_w\ʕ+yygm> 6^Asc 2S[qNX%bB9jma;HD&\EE~tv(ȒhRV %># =RRR>|8^{-QQQl߾ӧwpz%k_;#Gn_Mɴ7MAwّ ~x$Ϧyo#G  Ov05ɓO|R2:!Ve}ǁCkts2IgZR3Enŕe'”'HTNRR]ߝ#!IO|Ɩc26uzԒ%KP*L<"-[Ʈ]Geݺu?~YfkSw!y3<|]]AP6t{|c { `eϡT:GRV-ZĶmۨԩSDGGp 7܀Zݩ~RRR矹M|d I3{IQw=۶% JǍru_3Oȟ(##T9yX:Iĥh8&Dۓz"Kwnp>0B`Us8@Sy dKBCDf%Yn֘^֭[)..`ʕDFF|rHNNc35k֭?DdGp+D[Ďq\Dx j/q{ K"x،^~lZ-JDSSw`-"b5O<ߏQQQlذ~NZH,s6a6:5d!GiaJA IZ’RT+}LH<EsZbD QLSs ~ C%D[.GusōR̢hx1#W}?׭W *ـ$h= Ac ݿy1={nyW=z4~)/E>VKx0~ҽ%K܈JXsq10(*&OuHՄD9Rɗ_~a{NN7o&;m]2f m5HWJUI2NS9 xrd,_d=c"voh?{]`@#4"Y5u ul\Kp2IRЀbiz8OQ,+u揄l``24fnQT`P(Tx2Q]W^/2e ټy3w# oU*G/&55Uoh4p=r-8Iɴ7*9㱢 77N.b fTƠ#zu^>xL&Š+0Yy1vXXb& l- 4bٺD QP !a1zTDn5]hiC?_~l Q .ks2Qm+'vbg g$"nUHD rhۭ)k/_ξ};Mdd$+V>át845e`S}Qn Qdʅ ᤈApq!ڵk6l7xPw.J wUEL#~m(*A蚭1M$<2=ئĎԳ+Vˆd1:|m0d~-# g(Wqwp1Q77njoGt7RTگkHĩgPubm!Tnn.EÅ '։0L<<{7ͬ{> ̅5e9sXz5_|b!/CrWV2DG]O;Q  Ok7(D%,>BΈI#S=Bh+Z0&~d?&Cc6] :-`U8olچj9c…h8)k=<< s~_Mt'qҺN.Fr&GĎ.x#RyHf|mSQn'0촷1RI0]?@๘LfgmM&wyWŹ}u4t h k]~l@DNq|h?/k.i}ͅШygkat Sڥ u%1|ԊݠQiu Mvڛ֚9u'H(<6.V+:{G K’9; *Y;~%*5D_KAʋFܴ?>fjúvK4^_6^㰘Hށg~gFpY4zKp vhߊͅZc@00՟3\XzR߰)'nď-bcB'9odB}5hh }sG/$z s'5l(Ԗe$0|*  *\d.:7p SSX]2?OxBbހ \ܮT>޸)M&$ JJ{+}i4Fޛ.ٲM7`8@ Hi fI~R Z`01[.խf:iFS1h*mI3#=/k<|yϐe}HiI T5RIdpiDO,${kbf*'YUHϽrޖ.UT:J[[_=?OƼAcc#7|xUUoپ};˗ϟ*yNZt -u[˾-Mg+oǢkΙ*N `QDKfq^f?p:P{ZnBuEE6Fڐ(G]CBcN[s<}7sHeWO=)soGcaz- ^1BI>{枛 Y.\==LS=|LmCC4wx^OgU3c0fs#=@ohLog?#&q- """')) F3XRRRyU)d…y|'y4_s.=OPY.˻%疊zFhSA.1TH;JbQ?G_X[d xA{;Ekjn[Yv+aκa/8׾>"WBW}m{'o|?O瓐si?;&R3A `w88Zy?c:5dgyP*j8xZ()՚/>?3bl=v5nl󝞉N֡N?ݓ]Җ+_w2$־:vgB>@w#idJҊ@Ŗro3 T=^OrwVZX6ې]V?Œ~=Ot0F瑿Nt3׼C{;c} NN^lskV>sxU f~DyVV34\RՒghL3 2ٺ{*p:]h4SZ#I՚A?\afN0:*&3|LqJ\N [hz Irgit ߂E/7;? Ժv;q1rEaٲH^p1l6螻-zJ>HK96N#C5аOi,f!r4T*yգ߱/ ͭ{ |s] 7`e{S&ƴ$?Vj9S7Wk%|4S`:m5l?C@](˳VOYvPYڛI%7>7RшhYinn8aUv͛7sM7嘏(>jaibt${!'8[س;d\Kr4WyZh;R21<{Q$:x6jp2iBS(` Tx^M(E8LNGQBJ!VkJOsIF͒ɿ.i~OQPPdǹA:`RzZϘVA;|`$j$%I^x!˖-IlJEQQ7o@^DDes˞۰%oO5oP=:BIHO;wInb lte.?Iݡ,+)pcx8 ί4=8ZDXK'p4f3.8To"j`*8KrsՏ]Uuhh91BAF:H\1=@1•J!Ə&:~ЄȲ^QIʥ|Flm 7s_K3E g9Qd=h-Hj5I_%#ᔰi+?e.> RVV$]fG0v YɧyʐΖ{Iӳnd>:mse/`"f4MF5Ϙ LpUOHX,yאs9ᑾ" (a/p:]-|F5w:FO#}p4̇o]$DQsS{& Vʙ+LJEgAd؛X7?szR"YAwK'(,`QkC}FU*-:Ԛ?VPk|#|pb IDATz*MT}]EJ IRJ_'wldGҠN'4>TZԓc?Aք6S/GeFWA1i&"c m?8]fiOy0cגw!19e'9r85؋rA v}>K VLh\ƦϾ`fcnO>;WV*VkC5חwvowr,;cƇ&8 i> !܎ĎxO(MHV15cGQh&=ў`ٽDΧJRwoaK;fv}#'&dLkJsҟjOl2]'"ܩU<᝿'.Lg V+Cv;{=caRIx'gftvu*ީNDJpȣAS֠׳lQ#",4KWj|W 5y](y"HyQnFr+!I6ltE cA1S?Lݙp lLI梳1 Gcj4|㲋+4?rEZ3)"FBAEJ;Syqс5kF{\\H¬q L`䯸R_]LeC ,rv̛k?E  ЄOQ\8892"tL,}N$痝8SOSyRlNzEQ&u923>v8TSoNJ"1-"*e-侟 X<醛=iIE9 ^z8gRO i͢l$ Z$iNjP*E9 R0Y R/C gˆ 0"ch!z-m;zx4pV+q)gyFD%,!,2ގ2V6N2bg*ԚI(VK RiAC4YߤS.}DY[ovJ '16Ȳ餣cmk렻ϳxqב4D$RwZykIBUCR6t1#y\l5%Ǘ^cygKjWh՜RRE$ ͭ橿`wo0~tz~=!T՚mJ qz8' Ջ29d[yK%G܌LWx]p.W."ql;G^ # ]q+.w?64S)\yzXػsOgLBEZ^$b`.7*EQ"H˹ܯud~8Z+.߸VKK{'|#8և()*`W;fM})InɚCQ1Ʈʠ־'(;dwDr2^6Gcٹϼ#)"}o=-7='½ZsqW/{,,@d#(&&pjtx8~u',|*8~#u0S&_-iItNg}D - zM$,S_*M] 2egrسѧ$T>5INo~౿HML|S0EeZ: Ia$|6U&clt"¸o1Ѧ5/Sl4*0={n6=p4fW> n׉ oSsi(u#zJ 99SMܿӟn.1~,ro|OlCS׮3ҸWYJƻdgqF2yp4[z^*_U5dz/x}?_EGq&!ֿNFcKO*=`L]N+{Hw>ϘJB޲[I q@0 Y$JQ̋,֌UYOhz$2!)'ݤDuзәŋek7\٤_dtbZE,^E`\Rwsh>BG?E Y:3SMd:;mYuZz*.;YL%m8 f/o_Nmûf3`݄2g@'Y-*;*´gD(^ t!&R^JH.( sxϢ>$5s `s?GQ'*҈Vwې۶yvH$I\yMhG8`e5O@>³VN^h@y=X^K.Q1#HNbq^!:ʪjx탏ijmU\{隀[~2|8/ct+eJ| B_*(VANNXG˦Fi79x W.')~*Ap{rA^Bl47]}9i~n^! 3)7)"KVY˖`d Q6mG`.۞IZPs9e4:q GDeE*G^Ea `)hۉ;=5!8 %ɮ*I;F d"16CА bn1habjI0*ęK՞eYC8D[g7o})fߊFfazi $M^>DGX(9rرXm6z4wAU]f{jW(.,aR1m+[Gn-ք&nKƁmVK wTj-.oPESA-C.=69!g(Cr(&c K9oŲx5OF ʦ_Pzb0@p<I|UtQ@U(;Y,}v[g,4<•wA0S)zǧCNL磟v` 188aGQSπ+ +4DABl4rYY3dbv:LCs('+!hdI~( Ґ@VlDX0FV`I,`]6G JEXh)EzWۜ9-o:ЂcEQ$ *i.Oڑ7*€r2l=Vh>>waeB1_rqEqZK`L}.Z]U0Tbħ˂} ݭHHtn7Ӊ>AG8a[HΚcG^崍yOQ'Yz7 +(8v3f!]@0$Oxd&{6ҭ?'Z\<+vmuWU K_=rH.->CgΖ]((TZ>shAK wC&VBl̢7Q_QY SJ!A4$dֆ-5ŧ¥7j8\!<2ez_h,۩-{JrނF;}ZEZZ0Tc!k[$dQk7Nz; k棛I^Ш h/$[,ihAz6kXt%5n䪟⎀DF ~-k(.ƭ1 ׊;`QkB+Tx5@Wn+ӂS:,47ysw%*.PO$2:Ș,-|ktPIEѓ|QqN@ ka룯<#4 ]ȐW)}h +.~hq=.H{0&|_1D$+4m@TrrF;:'B "j{#:$ď b,%v2\yOn݇ޏV/F8iRjڛ+Gc"c -{χG'KPq2hIվk·Ok@ 8uj @)CjKĥއ `ӛ%"*ct. Nf%ۑpG1P$b◒htp:f\RihoJH-1"uJ(̎F<$`Y`~}OtZ'n!,(ؔXzbWru<ԚP"L F19>+T>uG^I\C֢oߛ@ȲRj Pωwx}1:CiٲgډҢV)>1N똁J9vtNob՚CAH.e ]!]Z(9Vz ]U. ~e:Wh~3B gK^5c lmX:&X(Bя!ZD ]vok5:3.yT^0ݐ<]cWP>D=&B #9bt@ 8=lwWUEoSW%X**r -ITdY SX4T546*V_tz,Z&L GCtNU3ǭ"/@LҊY,ﭥbۺeڐθ!2`nne9Nd<=a4OExt:i$ݕTyxJSW(Lc[4TBCdL>yo{tmTj۱,Y%L&5$PꇳfWN*ߢd33pѴ^9 7eIRO  OZ_qʽa7{t:#˾O*7;;8n%pA" At cӿxR&C%Yu#S4?07RU ) /%.i=nȞ<SkB)]a1U# 7f)`N@r qƘѹTScC'Hμ̢oRְMARi(ZCLqB!*!EGvTC 䌋 h=@Jqy}M% Ji49 K_ͮ![Gv>3ӛ_/Ϊ-cuv=᦬}h;Iܥ7b5d/iڋQ7 ߿:\xE IDATGTB͌2 n K_FQ#- &mávtbWflWnlZW`4>V,8uUO90c:E]hۣӛK^,.͍+1wU6 r0 *pSf 㥩ԏ_@Q\Xܑ:H#<2cLQ9Bjz;ʦqlcr-D gɏ~#L&?IJJBv3G?s;3rbBb$xl&hJ@uCYW 8D `j݋";M>g\v {g]TZgֹ"WJS{^Ww%E=|N$ч%xg 8u>,G簾ӟıc((({eժU-uyM!z3";Q*uȘvϫH1˧XXRw?NoaϘZJ̏:E=,}uVQVK3VK3  $ܔzr}+Zj`W,ռ+o#&&|Ax 7eg:wr Q}Y=,\FKJ4ABJ3:(S%%:[#sd>!U9No":Sd@ 枑nG'ڇnzBHDT[h-A⭯pd\.~_ƽwz0PkB/%T'ѝJU;JK 84PBJ@GC ~=Otzƌy䯼GfI!"twJQswZdOԣqnZn3E55 2R2v(]>8^_a-@^xD~_r}QRR݈K=Lw?VK ̂kI^p?/)`:"!~!k&n·`zTlkjQjAz;,D_8zSm0-+DQQv;o6gy&vuQ__O~~>yyyy fpMѐeJ5& :%xV=:ɏ?v[Gv=JogL N"chYp(2VK }u'7f A0YD:vMiN8g}FNN\r .n Ie't>T<ΠsLJCz5fe>~Jq+i(gO>5\w$i}V@T=(>X17ѲGFq4*KQ]U10f_Q0c;'ʽN<,Y’%W|O?hSqwp}rJϻi ,;;2*r}Ep:qF***/IJJ ׿&66kζ`N SK?#jK^ENEB1`-[\zk2?ף{?53aW$ng˖-7K.+_ :j{=y"##?)7n$55o 26?y}-߀1:ן6ԣV/#Eذap4p4AQ"^_Qv Q+Po^ )) .grvJbYrν-{溏EqPJrm@+?js4!2:ngj5E8EOu#;f `݄Gf2&zKGN:w3S]a}+"cE_nwaժU ⮨T__1H*;*ҧ=3߆)hVc6)oh4XV֯_tt:q88*L hF>1=س&|LNCv F=E3p9QW֡'~q<\4W!IzǸXnz^{o I\F|h[shH^KFHyv4t:>,VFIGC04~Ѳ {&kW]]g},ˬ]Q31`n[GO5}q=a#&mԢ#IDD-+-KpF>ټy3ǎ{;Ὰhu/UoP&BS{wגW>jƯa6QF^rrt3  (aQd'igLd}5c/ . 6_0WQk^Q_'bzDQFv _QsJ }%??#T]1&ʽ.gߖ&w٭D/9Q}^.Ojl桇a@pRGC0X^zW>.;-}KK {/EEEf 6&kZhu^\=%`^ŷmf3w-Ƕsp8 ,=?}n6_W+\. ??0ǎ9hdhȷY`nrS;19dDяh|h`,LIrR-:BLZJO(XNF4Y~=z+\}\~\y0U "!x[?ak7&UXXOzH}Xa"D3ɡ)<4n˞H\kçXza ~Pi|5JNAprh 0E9q B\v|׫¸KIIItFdd$NJśoGg@w=`g,Dng=zNJJJPTTWWKRWWǃ>q a4 > _aT/p<*pShSWmXz0Ko $ܔzr|3IYYmmm-({68jM(Kn".L?ZvrȮ!xo5GM+++_&!!:Lr=մ=)*Y:[vǧCfq==>FK቞woo~f6o̓O>IMM /"?l&䝹4&Fv8ȲoKJJ 6m?hx9ኢxzYvjSKQf:%x&J!dݏaia䯸Slxk1{d˘L&&L":.l,9>,G=ի]>邓1zyԐȏc,YB{{;ׯwhHUGGiii|;3G0_p9T ǶǧEQk~lrv} Y;Xt֏1044ڵk}FÁ?x)Ap2Bv<6#4<•w>qHڍ7K/ %44iR+9cr 2GF=UE_wQ:a#ܘ1&u뮻u~3Ϫqii)III<444p 7p 7xIIIl۶z.hV>PkB[~޸WA1 _5GSBBBD[A1Ny\!w6r3>+QK+ 9#Z~|IKD0mJ,r + IzĥӨ,G1wUy㥖)G;h1ct{v 6Dcc#񄅅QWWŋ)--kk_XVq8zj\./p4s7eQ /,6RT^4\.{㕸h|:Nꪫ_1iZ198u4< ~_s='(((O~̻7c+֐ zsLi~E|{=\hg0@x.:C2͕+#u-BsO514qBRpJ_M@ 8ti6Idr-*SƦ,"s=gϢ EjxGQUU%e+pΜ9[G?ь3"9l-peQ ͯX8jJG:(.G'qcBe`9 ץZ(RcƦ]{M4j|5_Be}߆\ ]F/$ 4nte#d=rߤU_@boB3"|kA$~:{1__ q馛bE}}=jjj1d9(TFdlGFv5s!wc<`"T'r(Z rl|*>[T|Aa|hj JcR( d AkÿOZo3*j|Eւ`" ATn\>}cX,w 1BJZC|xB8?sπ IDAT.ڏۡԘ! !8mo(:(x埞+r1za(%d-|T722_A]]22b+R_X"ͯЛ\V/dq1H.>r-?C?.m+(y[mctB XtY?f 7(`XD@&W# gSLȐS|mܲ}fB 4HNx?V!4V}Tz]fnl𸮃d7r0غ+7{Zp2yq̿앶)FlM薰Z2u~ugaa'F_AV Aq[9m34}JrˠM+Fdֺ`s?}rV9Exӣ`-?O h`X9EA}33G6E\æ4sR>!+ \Z~E^0ܬ@V[~gS?ƆBz֖_7>ڌ7f2Pۿ :}2Qu]v ×=`*I){=\^Q(Ӡ3KSXX0خ!B(z>)Ȉ`5{akRϵCi~<{㎧4$= 4Hr 7pUx#.ڿ% 47yP.7[/#wJ|5DdVdm̚Fz(1!!zQI N[ǼM&4)~FF4MO;/pw5l& Xdބگ.3Bޠ$26 \Gdo[R]x~ ,wQqywE8mcgXw`2_AH,e3Ag,Ez=\㝳.<)<~x]1$U&@g,!FG9e›E5- mbxtͶG[ȝ>P`Ϊ,s%~BV$0ى0H[v~^(! ]{(sOc|䪴QQU[lj+n HY2dJBHBeDZވ;<1c`(}4 < 8j8zx'J6=\"\(B7} =2YmBg+HBN6.6'Q1cbMȊ@$|o^eي1 .{tbxfaöo޸94׾4cX}.4\X pN;C z n8MzAX = Ufkݲg~'{Oa)C&c47{6e*#|s\300Z*LDBH2@$r UQa.ky6 BLK àudEsOcn0Z6ϖ&;Zm xh~!O&h(]S014Rg(!|k & 1s5_rҪ+m_~g_D( hiv7 =LY5 6lhdU TLhp;zg| `ܴleœ- ao”|e b !8i~}i^րWPLBR L P8c &zDzKMOP1E&Կ]V~9`%7Z.^CB34DMOCp0} Yzgr6P?~,+GF%D@$iƬ y;0Zt<`ywޔʸl q⳰ _xM+\|FY* "cˑWFq2N[笽sza90,;u_BBǕ12p&c?TϠ3i}F z5)LxF[˜9f4K%*4B֚x$B I+pnV,aοFbYv}-U +)n琙 ne=֋Ax *aY@Gf7h_aW`SBV01RrBƇp;z&4lHL#p:APXrK>5x+Aǻa3pwa9F |"O^(a>vDV3X~ȼޟWBvtKN[ۜ3a T>uwP濹K8`dCN}+&uo٧1r,&0U_Y 4Hrx?NIC}%M 9J0 fk-2rn1s2.\;]=\y}`BI<M,#r{ ZCAD ]%!/ nEٖ/IiB?(r2vL†ޖ$7S"=s IU(e 2g^AF$`sj}LY4hY6XhSW! BH2ceiW5{PP~6y:r;aSƼ&R g l Z#dY7cl<XVkѾe9=g΀`e=2sw ,XұCA:.?#brzc ͯ j^u^? i}#Ϣkr`Sɴ.]W_ œue FHQA1s3XN>(sӲڥ@0H\[Tq9y7,rrދuwƥ<"Z;X ="n`9Tj 4< &Q|eF՗ 0ewAVBV5e9dބ]G3Bc1>|57e8wcgBvYB7>nlY\ ro^ B@Dl ;u"G>`j"#BC`DLVieQL`f2\\0dϞ_>>G. ZZJD 'O *d@Qd .=@둑 ( |m{Ʃ-ۇC#9/}(7" X&Zxx5! |~A Qʒ_ BD̃^AnV&*acy P"GZo!'7>` YZ~r&tq?Lcc6'GFFdnטmW*Wfxc&BRh#lHBf"@sG'>\j. 'Xd0BRXFBbP>n9\p0X/qql;JMqȲ.?K*eM$dO.]蜯W)P)h!  0 T"'su6. 'p"oCaDșT2j %Z慒c;W0 mXl(-kduI5|afcCY 򳳐 B^.3rc9u4` G;8>4\ظcX*`4!(Ј꿎WŕkmhT* p\[ `Bh{$ 3Ls:ݷ7m"|=ܔ{TnVn^m7@Jb 鳨sq+Fػى.Ϊ;0TEx|i5j|޻jMd@c>W~gL`dӱ i53>M¸ A˜JX^Ev5+PG ݎavB@YxS8%%de\O?1[uy[Q(1Kq[Q\o=8Y˯Јͨ_~ sKl^1sr\ʱ%Ə~KiYfa*USEp._X_=%dwĹ{'n)2o/Ai4aq_0qأy,vz0c0h*;Niq >8.m3 oEX{Db% _1AMqׯ?ga&TA !G~}Mࡀ(ž5.ZJd^_ŌK-פנjP0ݽF_>9MpR|x@n/d .Uɔ#dB0(ΣR1 P*W75wtaGuU*%jYO>+|a3wǓ<@ -RA\j&=Q#{=%J>Zv[׶֗R{"oT;xJ+|UJ|ˏ0R[c\eر&y)g,݁wJ{7_>1AƣIsfq׏Y,^2dmȔMd Wgy5Y||ˏAQϽeB#(zu8\.!_y~X\/=Zfy%&[},ƽ`w]8Y!%4DQij/uvlK`RնqfmɘMl1+RY/=&!y?+kz-:?[}(K`R$ix(2݅܉/>#5[)h|plL ݵ[p`M ,Q갍;F/%I~&:odyX}PJ8t0킜$Éo+=\W;J`R_Y2?+XZ7˫be2<}*>R6/=/+*ݓgbR tQE`0?0˫IjeKԍ:*(Y>wݲŅ|Q%IC801nۋtSKDI;$xV}o|Jּ/6!xSZ^bB҃B Dz\%y , !$r[P[p ʿ:GH/YLU$jhӢ|Q]A Ƈg\"ֵvuK ɲ,=h/!$a W 4}2Qy(ȱRo!q,l}tjjPxUz\Q\VBMos8ѻ:,K@CĘIuё !ܲ6fOBVVq VBa7}Ş/_¦;J=_n_-qH|tW 4Z;tOtCWHpH4HDKPR/=|uW|lFe , ShQ0 â)4ո NmGxu jmr,K׺zXK<+G.cG!Wc`(Ư2$)m\nV,^Qnd# o`sDR"C> =/-IɎ.$2MH0tuv'+kptblkKBcop6Ag05悽P 7,kN5Y""!rD?T2J]6cg+M; ~(f"]7>!|~(4>p >3$􉴌 IDATkmW&:}3颽ZMw,ZSx΍L6|"==U.9]Sjh 瑅$' A ]{|< n|4i|CPsQ{K<'m[r[ռ^Ye#Oq۟?EzHED&)ѣ1Օd@cՊH|G.]9xόeh=Z?Dz|r8=Z]IFAA(.,0GR1 fyerJSRP!dfJy5¿ [ M,\*q`2 "G#W)hLBLh"zE`@Q!$Dj!RP74!d,d CB!!,HTC !hB!B B!)iC$2 J1*rW (;1]i%و~fG?;BI$N+=$*d2Y苴_J'mP*SBYYy,_$!TzmM`IjDYih/H`I_BmSD'~Q$@g`|SYIKCV# 4Ȣl() UIwZzd]KB!חK=(G~5))dzSKDVEP ƒ.! .QrnѨ۴>! wݲKz>9DYK|LIBbmȒm*/şQý<'dA.|:f!R*G?{ \kG('X ò ʋpMuPU dJ^~=mAX[a`sEd]B!s,)FeI1|`w" %X+8 :dCVͽ!@ 'y} W?4183U.f5ÌH7%HBHT ۊZ%@ĕAu.!BI0JoK!B; 4!B!qG!B!$( B!B!S<رchll˲ҿ<ѣt|>˱m۶i_konCP@Th4o2`0>dggCT&8FKK >(J(JTTT`HK5ry?~nV`@;܌GJRDFFoߎ~u@#I7XŖ-[h`p1ː8* 999ؾ};233]'N@}}=<JJJf\.?~ d2J%Z-v؁.yr.^/***cǎ9301|>>?r y^[b8DQL&*v֭C]] C\>. f7oA@__N> /5L&l۶ %%%REz. 9993r9Q\\rСCR?ydddDS  4::_P(زe ***b^c2`2~v/"n7 V+}єǏ^%%%馛Oa11 ɓxgr|94>c| .撓*E1忇y^>|Z300g}PHj߶mjkkWԉ .N'233Q[[;eY"777fñcpAr())Y'\SS9Ӊtd裏ık+l||!PSSuш[Z-Z[[?墓*nҸT- *++n: ;;;%MN8z(\. rAVVӡ0 d2^/s%d lvttyTUUaΝjjJEQDGG.]/~Qގ7|cb>T*6l؀;wBСC׾'^>|vf[n]rcWf6144gƩk +СCԠ|k6aX`40>>RFkk+}]sͧb>233aXʱ\.{'˅bppKO˲Z0h41qFcBɓ'qx^lذ!EƸ\869|>kB@MM 6nܸe6b[o555qf2V<c5h,7|.\@EEłg]Qh4V`\HOoq< jb@VbXINt:d2d2Aw`ʲ,?A\vBHzx1<<e 5*}N8:=#[*^[ZZg)L&0p 7pia()=L 4+WpmI?exUfƮAQS"P*PAG>׏eYdffJ7ٺTSb,12 zE n7\.WLώL&jEZZڜpq !_F~~4L/K:S#F\/^{BPHH`cA "t8zJǁax<\iF2 m>#'R\ 4~~,J?e0 ^/\.׌B*@8 Iq$c(1c`2s9&9CVVtO8Undl rŴl6l6YYYPaJ#rT<t@ HsItVFbteYt:Ƙsr >8jwr*W#)Sv,B"33S{y<x<FhZhyͦţ@cFP(Bq tff& X*͑nS$G=(:L&92lIS\B&*jNS0R.ғ /& & @ʕ+Oz%w+ˁ@^>Z:.,BTJApZ J />}-/ݠhh`X "q|>AD@cT*T*y__Z[[t:Q\\ڑnT0E<++ YYYŸG&/5{dL* BR3Qz$2 UUUїң*CBTBB@ZZl61>>L4:P|XP===8y$|>~? PZZbj?W(b%%%B~PMMMv)ze0[rnn.rss+eMMMݻ}#UZ#yx^<RƲ,***PQQ >xO1!;ގ@ ixT R\Bf.D@WW~mx^8p`IԩE>G0Ҹ|yKK˔I$,weYH ~JW&2.Ǥš@cL齰(**oeaÆTS%a, Z 122!e*))YԄҹ̕A+J[NznqYih_nn.jjj|T;F`0׋@ B~TE rT/ ϗk.› m6aE  J@, 4 RdB__4WTUUIp"z4/Ǒ t:zcN ::*=^=kE*r `xxn[Q\\ҤʫFۥ}"[/M)B z0~?6l M#-- b&7S+ȹ+IJ,x^466nK]w2UH˻hZ }>jRTJ)Y~?\.Vk}m׮]q: n:џ+RWX,mmm( S=aQDQ .漼S-X%yy@E"  q "++KLI/ H75J%jl۶M'Rj]q IKK  aZQ\\s-_CV&݌d U/_ QRR0cbVZ@AL!zrJ\Iš@cD&gN q%aϞ=1. SYm"DQx<|`YV 4r9l"uR"Gafۥ4NEJCS&W#& oii[o#eZT,O BjV b¹,wHR󡯯OjJIe6ocҽmqVw4E~,J%AuRkkk2t**=,Jc׋v8NT*)-knfYʜD &D8 q\.\.N'Bu]!!dfƠbsz<ϣW JťE=UZ?䌊n p\|1O#瑑Z ˅@ KtF^1VqݒlZTZƍ fh,  \#U#b`߾}30/ !&MOKKCff&ryLL !GRICV3*cddXJEF( r9Z6&լg "\.._ R |h4x<)T.5sBFE_~=Vz i*;N[^RAz#h^q#==JRJyk0L6x<K⍍`ِ Z '5XDt:r-~KJS9ər#`ټStfp8T*Rc<^? 4,gchh v. CCCK>n0\?ɕcǃ>FhZ0 #U P(ñ,LrfhiiAFFlhZpR*wBז7ʂVE 8n[S`ۥZ`@nn.J%^/l6TwXT4 ~@jl6j`0Hux$ B(hʲfÇ~fdffJ?F4L&i^TS!Ј3 Պ .0Rf,F,d`0aO`xx|`pIb޽1͝?* ǏbdLD*999P(x⒏ kjFF~i0 X,dffd2Ij4P]>@сJ7NՊn-.i%, q"3 .{=?|쿖+ѕcQp8;#!ٳ0L!RR''CQш~`Y~ HkBl7nӧ;@";;ZX,`&.ɫ}XdwQ |2p[ogɟ뭟V?q;N(Js\(Ј=###xGCaxx(bxxS_le9&Wp^xIjr帩8unn_CCCHKK7 PnNcZ*j A`0P(P(˅B@#bpp1"F[Jz48nRv0Dss3d28ւ(**q`f>O,|H!% 0صkږ%ϝHtQl.rY}<,޶}~Wj_(Ј3af"GZ-fA$j1M6}.g6 IDAT,sY9yޔԱԞHKK[Nu~?.]Sbn1ne(Q@#X7PDҷvy<^;Ap /h \rhkkC]]jkkQZZqnoPUU]v= !e :###kbΝ|'xbJXэGAOOq9-\ g?/"  4,GC>|ؼy*  ?9zE7D;gΜAuu5jkkqƸwK/!++ >`\3D_YœO>VK/ aP]]-֭gA?;,+{ U*Rl*FcWXlb$jbQ/n6b-(XHPJ>e0i <<N;sfSVViW@ t[^^8KRdddx7ڒ4# @+kjj{nJ)wƴi?(((@@@ __vm&% RSSqel޼Yok 4TL DDD~|GFHHBBB0bĈ> Jӧ@E/^p7A|ݍ?FFFطo222G5%%[wR?Ɖ' ^UUU?>Q^^<ryf*X644D@@fϞ|Ν; CTT8u^b|g*ڢXwgΜAcc#֮] gggp\ףR)rrrp1lڴ"144en,,,#)) 530nܸvuY]x<\x زeAEgkkt[!!! ȑ#{ 8x 1aU_PbĉXjjkkS9s&- vmȿ7&(={;wDcc#0fDDDU]a߾}0aƏ4$P1Ek`ĈصkYf!88XiӮ~p@@x _ i卌addd 66077ǔ)SPgu'@ c0ٳa겮 BCCASVY] BܼyiiiT$цԦ)ޖ``Q^^cbԨQJ"HLLħ~w1w\TUUu:PêU榶Z GEHHmCYB!㑕>Le _LMMq=4553fPzꨱ܌3g|ˬ+$IaG{{{>^E4imL[yźlllį WWWj)@G 5pwwGZZ}XhQU477~9>CUlܸ?3|}}Jg2=)!68댧'mۆs!>>NNNݾ$666ѣpwwY\ t;Kƍ|ree%݋9s @e5VݻF[beYp7oơCPSSXXXtoii)ۇ+V]]H+V@rr2._ܭ[CTTFlݺ 5BVwR3d`0`fۻSR%%%ؿ?֬YC X]?I$ddd ::7n)X9::b۶m8v}:O]II ={BX,455a>|K{0so z9[6 |DHKKCzz:PD`0`Xn122jw^ (,,DUU?BL0CR)/@CKX, :Dϑ B{l6m= >$ .AAA=C SӧOWڟ7..rz.ٴ̘1 qqqZ:t(1l0|Wx AAc$ tׯLMM'Ojm5k:t/^L¡CˑJ[vňCYYN:]v̙3y#A'U&d97nP[PHܹ!!!044Ě5k4PzRUC*_E~~>{=\{3Z`466Ծo_=zȑ#Xd mKxx8`mm wwwXXXɓ'{/Ah:xP\\ LU]TT 7!We {'p)899 $PUm6\R32U5-,,sN3 |||MXd -nWzh7<~'$bS^F;X2jcKC T S INUmm{-w"`07W'dd?h58r"e@q͸.r _7^_M#.jjyS&ckG#+.),:9jD@4|HBleD0`ktLcGcGYȰY,1yl(L4\®=':\^ÆR?xr"|02bdjB" .NT`/O*fiDݓvlpsuw3ɑ ,D"11 ;?Al=U DB%a˷P\^ղ?µpIvqIƬvB$8{:'ڠk]W@/O&EqyVq,2KJi|% ŇƊy3>L+#Nj:j2=Υ (JOBbIfѺ=|~"1ll m]C~]"HCmvޤ[[Eeٖ{0EeTھh }C]1iLr 1 Zx<\9_()9;aBH }!yRP9Hmϝ2a8}wFTѓg2lwÂizMuI\0`#Ʋ95~4XD(p@(YGޝt̍ٮfdh~Dq*;-'M--@_/-w&#@f̍Ѯc2RB,|`sؐv J˰ xmޏfj_rfr [3h  _Y&tX޺ رk}%8|.exƷNyj774^CO05lL_œLn'bngg/PYF@Gx/5Y'|mݦ]]0iL0;0 #ڝn.ΰ%'_BFxX%H$R445ѓg2"9֛$.&44tb "6L %Z "RA6Φ7]`B(ks js iaA=P56C_Gq'!%d445;(yo% A$}u&L1܌VwL&ͨ??p-\qZn0|بu`T*ɋWȱH "Jzue`2pF:IʁP(Gzc;=S}C#3ƎL0>nԶ6&9&?FAi}#!-<b D)t7DN!K%eZȱGd<cgNq}LUSg( :ϫ*`qWW4ԁ7h쵹<~I`́9 `& nJH{i`c9+̈́5cB_i;kDgv;uckO!L|LX]FRPH55%En#+KA V *Q^]rB ʮ3+`́ Y,Lt :#3L5}cM ǁ)Fk,@ qTicLSD[?,uۍ)pD<+IDATՍ8:V)Yeɱw,7aNmXgw[>?}!NL$1ҦߥVEͦnS|:lzexg,,N_[ePfpSg,9sGXpn4U'|k-;`5{]ԭõĶG׽wck7j`"XBzРv-aҊ*;܍vTSj6A'PfѦ[HtzbHNDdK+Yw͒;x_s]pX~m4`[݆BuIjV|IS/53dia){?V T`[KDaY^/1U%QerL1@wcݕ_r:s1n524ho.S'Sjq" dQaYW Ju17ꨑ;Ha2[.f0K/vLY:P[= d\^^}\qy|cEL"s13m?.[ūEeM-<>ۊ+h6fNsLeBCT*ʼn+q*cI+91EmL%|w[4Hyu m԰dt#s&NjB}ީ< A*05lL.*1Uvhκ0 0/Ph =Xg60.({rq*&C w! XgjfdR5vp\u2??c>]פ>Cmqrhu bbXo)7qw4 Z² xwc~U&L:1ZMcO Ֆ1yJ$RcωTEƒtL0WWRFSMMun@L@Fv. K[/8?|U8nQ٨JCTS=+,lb1u f$=քHmC늶EL&v6m5TA>JK$J2|,mL /Ȉ-H~#<0f?: 6P^]C<+2ꬓd@C"g,^Y4A6AS_yEs5yW&Tt$9Xki>9Mud`#fN DQ+IA[j̩NtПYbbUᇣԺ`]CJcW"SC=Ц k(Š R)0)P3 ̼5Upy|4s8zw4=uKQg/桬_,`uVo~ Hvnso?S?/|Fϩo%AV+ Ngꎒ*UUq}uUd-.6gD8s4^YxSc}w9UU8TȒc-z)KȒ(k-CmΒcNm Y"(jd'yYb hw4p_?SD,9&䘌&D'YWu//nן\Ud1E.FAi9u<D(BG}^[ ;kPȒ(/] 3sdj?1%ȦdG1e1 J(ZA9@zy 5|vM߷Y㯿uy|~^iCgחw2LiNR)+pfmdPW,L2jˡ.zh@"^2GOKղ0>xm-c ;xSlƲU[n4&Oe9 rlh LākwsSSʭ;^xh-(J:175ņ˰Ij͍f.@k} ̚4F2LmbL@kBqQ')SPvg@ҍrvlKF߰_Ύ062 ssxb勩Y4׽IcB`ia6g{VΟesf(}8 .`Y03l}.nCnKpsqA38c7x4x3S0 ‰c xs|>E#ugs|^͜DqY9h7.FI'3ΨD:[|a5oei} B,=ÆPۉq\LFȒcYyX>9&KX* ֕Кڴ[h%Ɣ175MCLxhm[khjcڰzl,}DPv^˖cXWʒc,9٠fM։H$H$Vd,|vm2uoMȰdִnMП0GOD"1v=IXº/"<+*W?N.0-\--_WD9k.Tk\XZi+D062`pGXp 1v~>R砢Bf&psq@k|>)(¿-_;L Rg2O2s⻃Ǩ/N]#sW㐚f.`K{K7Q^] ##{y`H!,?Ǣ/1RMjYO/Ma2rhtAtWeM-~:~;Ǿx^!Abӫ+iݨJ+ㇽ|3Qm]PMq:C 5u`pE  _!8ߩeK*r5\m*\K) S`S: sSS =Sv>}$a x) O Z}~6mj6@-N\!:r< s1HΥ 'VoO sv$AAhIM]=>ݴŌdlf\Z s1|+N^J\VUPWD E B76vR*%Ӯ/h> 1I+:\lρ`H-@%H܂zWTU751̉0eh[$8t6|aS 2Fжxs);,̜8&qk0xg$)?ĵ/)vq耥9C05l LL]и'8sJ**b1aok g{;ȈLL*&H ԄVTA} !jd*,HLAiӐ5u.A x&F1( T*EJcKFv^~u">#CC 䱡pqh8@UU#Y>J*PZYp|jK Td6Vȱ=nJ @yu ga䗔u3AcldnDjHD% .A 5 *56 %(B}C#<>Zx image/svg+xml Compute Node A Interface 2 Interface 3 Gateway Node 3 Interface 2 R1 (prio 2) R2 (prio 3) ARP/gARP for FIPs/r.IP   Compute Node B Interface 2 Interface 3 Gateway Node 2 Interface 2 R1 (prio 1) R2 (prio 1)   Internet/Provider Net BFD BFD VM1 VM2 VM3 VM4 VM5 VM6   Interface 2 Interface 3 Gateway Node 1 R1 (prio 3) R2 (prio 2) ARP/gARP for FIPs/r.IP BFD BFD BFD BFD BFD BFD BFD BFD ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-l3ha-bfd-failover.png0000644000175000017500000035643600000000000027262 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxy|Tϝ}ɞ@ YdQwE*ZVZ~kkk]Zh[պ۟KE[[*jXHd2}$C&_/̽wνp{B!BXu[ B!8H!B!th!B!B!BA'B!bI!B!th!B!B!BA'B!bI!B!th!B!B!BA'B!bI!B!th!B!B!BA'B!bI!B!th!B!B!BA'B!bI!B!th!B!B!BA9 B!Ƕ:{91GIbP|>h7B!BfΜIqq1͌9E&cb 4B!DDN$Og߾}GYطXh!BzFCZZGIb9B!""Vb˅b!33˅Zh7Q$B! n݊VEӡ__ B!ÑTGLtGyشXh!qvLcc#XVnw} )))Ajjn8e„ a_onn&##F 2!ǩvMmm-^a4e̘1An8VvL&\.KFFTQ9FEccn86IBoٲe AL&Rp* V6 jL-FtGG{;A*<0LG p "'1ÛaIUu?χ(GUb@C!'UUUl۶ N#/?-|k'vWbu:;vgk5f,g+bvHک OVSRR˜1cCSL3ǥ`M_v. hjj:­CB֭[ٽ{7¢">N)$֯diV@VsN~8c)N7UUUnʨgh#sq␽e 6n 4xy&>*+9sI(//8gCdDC!6m T JOO'%w6[QPz O|::v:<:t:={,5Dz1;;ѨT~;HM#(NOg-vsga&Nx}Gbh !8ZNg fnGRa41 $$$FW޽{AFNN4³X])R 2ܳo{_;&g{v:;;Yr%sΕcܺZj-*lƍӧ T:u+:Mek+eL xX,OPV+DTh!AMM X{ә( IIIdeeX-=)1#F@HEa(F|Zx}ߗd"~2ʶ`Z)--e֬Y2A͛Q) ^_ׯ 4"q8He |JQxuft9N' .Dp`Vs"SBq|>ٹs'!^zj5hp8aKӦQ\\LUU |2 }AFQm)¤\؞6vu*;p 't<<l6N'#FA`20͘L&Y,0|dϴl`׭R,4|dcѷ3a4R  ̙ 7olpBN 6-j,;:bFss3 P}tI<+f] XPb̻NL8w3o\17P[SZ欳t~ZZZ$bE!9941bD ={8Wq Xuj5qwϝT^r%.]O9kJ3Tsݬ\ŋl2֮]{Z+!KUU[n H$%%QTTDvvv{ƽ{YgU-MmnpiDe0e(^f:AF)S2g KwW0}dZ-.e˖-ۏFAף( ^׋ G{wFѐMnn?%LW7oNJKRYhxyiÆ +7F 44 gqgqaldDC!]vm6ĉ zYV7׮bIVl.ш٪UjsIq j EEE߲/xu}xG32}EںiZHII />.Ҍ&2fR4:皴ڊn 7()))!)B%U!㡮?W(dܹy0AaH Gqlwq1t46VyK#?^3gRRRJnrJ۷iSk\}ռY.?a&eehcTvөռ3׬>~t:ܱ#?kVq-l/ r-&B1HzWMhKDFNCD'|L6;}<(]ߏ1̘1JEww7UUUQyykV*h8naC{5\=CII zak8­^ᵲ2N'eee+*]9bD 1Gx"!/4Pp]ᎍ2M{vΨQQ>m/YGg҇ w}W/#lIijj?1[l9Gmpvbi)^F!--Rq<@C!Ior 1ӂ}Ġwy Xf߰P6}D{_nSXX餵'cbmuѫռu+99r$?OxfDSkBqq1SLbIjj*bE!D<ai숒S81z5V v"F X1S8T0Kc9 ,l6cZbĈ>\e z:6[O>WO뤪q73:@j׶og֭{stRXD$B tZ-.jRLY"@ DbU{:&rPџU 'fpӅƆ X`:҆ CpONj <.NOg_GNzW۶oT/yԮG G;vPZ%?)}RөliAVzqz$ÈI!RWWZrQSSȔiADJ ));kf=,J 10qw; /e4\PʖFE!?)_Ιw&L`db"cx=<=>gêjYx|K9stٺu+-:W$Ӈ;vxP) 339/*S׺n@Q>/V.&L@(8NN'. ӉZ(^8VI!QUUEeeeʸzxMot8 GN] sʸGQ"L|=QQ -h?yɡ.0峆v¬s3PkχFbn~>sµo]xe9ӏ%Jf^bU\xg^Y/G^wKJnI ٵ?ywo͓)7 4 QVVT}%99rssAQJN栯ѻ4bTyb4TmnU m"%z*dffʈpԩ|0B[δ +ȻN*t{s1_tf6A(+uqVm6R{  4b|>۷o2ڈ#(.. #w([KRׁSLvcP &ľ ÀQ O{ߟpSu:]=U\.WԘDfnxkc#ӧOZ>Nq@C!vaO<Z,\ 2|qkKur0#*B=E([׃KRaa!ӦM &oF4ФT 5kvO !aժULᤓNBzWx읫uĀN})SqqpkfO@h 'V0ٜ,LKÒ%Kz|W:bh224L3VtMj !D|> 3fhhO{yN{f1F >.;cv +-Fg#"fƲo夑y!ǥLz]F 59HH6y+W8MOYvd'Of;_,/3i Rf̘!iTaē:5vX4CN։)!a֭x^ 'OU>I[5ē4mܿ.6}.p#z̿^q5-.a|{I\4qrnj廋cޓ>nڨ|9'NL;SN3mgW_dቓh‰ߏtɼ 2*᫪ݡD+ ~g`FD^ڲZ(**>8 ֈF]]o}눵M]"^h!DF4w^}EM[+r"/pG/NjeE&E8N83sry?vSVI^^5N ʺU<^+ved21; M¢up|IĆO9ʖf:) 8 ]4,w1rzvIg%" ֈTш}!zIBDQ]] sssX{_&=0(@}mo]tzBuMICVO~xGPا!ݶ V &QGx_VSyc Y2{'oHFBްνc[62N=ԘNbMBWy]uuuQPP(/-D4h!Db)>|ADI!2~t9_I] tY::\3xxzy__{S".w^=wׯF233h+uoU!*$  +!!I&Q^^.s6DLh!D.njԽnO6m $6֚*(X}(T46ϿǖŘ RL&JE9Yv65_ytRMFJkjh2:5(@nr2&kW}~Ob#'^-(NIݻwKG)*4| $uvr9s&'N z>1tI!]yzk~ORGuY?,Xȣ_|?|M_7/z;m-s"4@E'Q'om,ɅNss36 G BUF44 O>d jBjkkoh4\xrw/G3v#Ÿ?ֈAa9%oXgWm;{Ÿ'inL&##ɓ=z4f<<eq'cCs}z+cRRbqbTB> LѐBÓ(!ZV~^ϛo 7p)!QdeeP,gs͌pAvzWtށN IDATLXWLjA@>6s2L~ ٕ=k;eOvV_󤰰0IB= >uT&NHqq19p7Oe~>~>ln6 O<wqm:$B(rrrTלv:FֿCъEN>8I)vBvT0b[60ȣ(* 'H7"u;i57'"i ַhiiI}ě:RUh86VyǰZl6n&pwrgf!FR"4蠲  Fnx|^R?~럁I3[wX[͸颅`c]5: &3;|fMN6"u2n~ gN_tE\.^/Z60"%OU]v1|.6m$}sQ^wbف UUUGbh@C!b7n455Q__Ovv6o\Ac4#\NǙ'L`WSc3g/ǃJ۸PW]V>ܲ[8$N Nįt *EK/me\ϗ5*.om /c[S=g4F/}l\8mGQuQE9?9.܃'ϻfymz#/_~K'ྕK9c~?ljl8"d+y̜90vX#O" *4< DN'%%%TVV!LR" ǃ&) AOIM\.4*_~%)ӄ%x Y{HxŁ^^ۍcBHF&/qzlM\2e*ul '3FQX*r_Wz_wWT1yh=,\ & ;LG@_׭NN ´tJFdU˱4Xxl#- |!)[v2lgPhꪫ7o|>7n\ @hHU'@TI!&#Bчnafg%QyTކyZN?esNʵ&r;ΠlKQ&:lݸzǾ)vD8EuGs gR.8[ D:;e_GnR 1pV-4Y2Aۯj-t),1IhT*>U{Uޅns[UΏH_Nc5zYfVě:%UBK !n7{huGd2޽{)%y&L­?sBS>984wY7co-d%&QI^J N@m[y…Oޯ ]ՠm0%/hWgtr+?Dp_ĉ} t ]?lm|^}WVofx6%3fo-Ea¤IfVFeJ`΄*:ڙӡ`# nz!?O&PUH&2GxIb|75m$/''L>/林ݷ= #]0GģNt74P杅VfrnN[6ᰱbw%]V[7[̿-oln)*|FvpgcQ) L xf[6qI3{U6;gŬӡRT25kdv)Awk5TfǷmԅniw0<~~;b>bu$Vr8KRUhHU2!%b"KAA#3jkkYb_pV'_䔩%y|^~ƫLgw%?aݾ>زS aoq͌yxǶ>{(wH5ڛXw/xtŤM!Q8p*^ܸO:Oߤ |73#`R~>c6k+K&>>'orMƶm( tt:餤u:6=bu$V4iRP1c֊#-:^yVnBr:hٰ\Zrzh9gа={G&))EQ&.C@߱QWTJ@.0th=i+fժU,mo/h4ѳ7\#Յ$3!]IW_]xf|ẅJe'pS)x#{o̙ȑ#YhёlZ !qr'rm{'Whv19;*4g?Q3tzrH71q"lڴ ɞ={h4g?,ꞣF 2r 2 H2BҞBBylgG&"][{] 555dddpA5f=7盞W Lq22(L8xXTIr:j*9ӏT%bHJu?M¨䔐Z-ItvSI6I5IB&gggJEE{vc$&F@3b@PP *6=y%[-!:"Jϋv]} ? 477s)EnnT2L}"d]kҪJWt`5!JrE4^F!--{ 'bȲ\75]1Z02:%TvJEWR3~@^&P6S1:JN~䔩ȣ(აzlHlonfDpz(rhibgKt8lJN%kvvP֦n?#C@F4O jX,rX,dffrP47ǟ* 4֭[ټy3W\qnhP5#"IH܎(j\^F wÏ*B;!vw/e*|㕨 8j(--N2e #c?ry|^Ҍ(n@,;PWˌܼ9Ȉ&@`0uVZ-:NZ/U /h<;旿%!dXzicpI1NH!wQN>};p^DaȝOF܇?}4`GQ;uE'^/̛7rN'V>Ɇi9y a4 ]sz,۳}$S>ݱ3#xHHHolz&/|L0!5oW_}X&. 4?vp/Opv.gГv6[7%#:tjE!d"/)CFAVxdsMupANTq|NЧNPԇjHbb"* ׋b 4uB,_W*Q@nk tGvB"E#(NqxX\|ŁB[nBD#^_$##';u],\P B !b_ z/䦛n#%%trΎPut8!HHff (Pz)Fz}n`IHHNWJi t!LPt2b/sT@V)DӅSDQt:vWkR2 ƳT6fwTR ',^ڠ PeaynT(^0KK& (mkHU!M… 8q"k֬_zaG:@C~l}:6o 7SO=EvvQhUo (er 4`ˆ,lt\4R ƣ2aK]]OM=p1Pct#AE |]](%S埼;zѻ"1Lb)m]-ٺY #ResX y#T DYnՌ;VT3#IBD'z޽뮻#ت=ݩXS&rI7ZU<$aՊ}Ӄf`F ~#F `FBܓ^>cT7n_Eh BEQr4n} Ɨ`ƈs#Y_?>V=2ЪBSL TJMM%##cP#-:ZMjj* ddDC:::xGc_#<¬Y@ˎ=VNGDvb_tR#y8baΝ\u vvwE1 * DF "phpkfBG2n>F40w\hN_<)'*wPȆ;JNKױ > Gx+Ux~>( 4-~3~3ܲnnw67SR7`Woh0'GRα 6DmM ]u?ȃE1fN$a|XM]Wc}%OΝK~~>K. '':=j59Own5r"t=AӲ,ػ#89/Դ3' UxF$%a[ŋ{kOÎ2îVZwrnRrؼT,NV4=%#zGMM lٲٳgIS7kٙ` 1AE߭ftlK7n)N0Þ ze͌(Grv[к9sp饗vZv;*(s$t\6q2ML&*Zli@`If6 #cˏRUHDs(ƍy{Ybz q9N~ZSO~ujm;"O8>.tO4*Gf`0kuaK5'Of8N֬YÅ^Hss3Y۷SkҳߤǮ (mt{Ȱ9H±z*-fϞ,nAs*|^F׷bpk8ZzNZKƣRS+A \47ZӉdՍjCd/fƌZ 薔TY-hQ9yh@4tY0'GNrz_uSGE_I*4< tAGŻO? ~61tI!D?/UUUtvx q.v|)JQNV Chd٬Z իY` [1V;VVVCZCƫ(xU*y|`x0<]ntmVP__϶f^/:y1zh.rƎ#<7MTHŮoT oAG9YJVz,*Ur}%''si1w\Dze|(„ (((\GSzVB"h].ZٿEVBbP)i*$7?d޼yTVVL џBk.G' 8ҥK曏ٵ6<^/By]N*EEvb":D^ DJ9Yvm6qDLG}DCCfw^U_OWWpSVI&QSSCuu5?ygH߽$35IXѫv Pлdw܎\IF\B`5Ld+W:ZS5s éyn*.gUqrxGZF@i-wa.Ux&SXX?_MGG,(@C^x;zIkSgbs\s5\N$n 8tmg0QHr:"~Fodv`%?7qBS?/?tvvq:455QWWG]]]H'^V1 <\z455bŊyv;v_|]tt4%h72p+J)F`sm'-dCspiЀnՅjncp8@Z)FL&FRSS1zhmmeΝ!>fjcNCt9<6Գʖ&"̉:5 bݏo 2PWo>Qɩ\1$Oln矧AOL"ǡz~8>Gq;--'xChr:hn##¾r!(ܬӑb0b0e>****p8iFjjjXz5VχERh4Zm`u]9CMM ǎ;o΋SƩRAvL`0psgȮ]R%ޯ{8z'''Gnnjn'Ԝ<.(:!b-,dvfbfN&fߝy :R^^Ny=1>$z*-~zU?.>e˖+{ rg抡aB\veX,G 2ύF#=s=磴:*Eav$H#QG{Mx?\n7{aϞ=m6hll ,>K>/0k,.sfƏ޽{OGI'``Ϟ=,_7 LQTq'3eؽ{ayR0ͤNZZf;XOZR*R8-LIU_SOQ_+¸Z?9MCbIxᇣ%4ul6vwu]vAu:U2!ޱuֱ|r-[F{{;.+i 诰0j1bddd( lذZXd cǎeر\s5\yTUUQYYIMM tvv^Orr2撟 '`޽X"(pԨQ?FffRn7>/VF(4?ft\T;+GeKSФ޾30coUGn+'s-qigPAqXaw2xfZJ$W_}Œ%K,LO@g^/=uuur-թ\mt8t;5ZF%! 0&5"{b`jjjX|9+V`x{Fhv t{+,)Bnn.{/YYYR>}:iiivٹs'䐝M~~>1T:hiiaʕA Mqq1IIII8dکb;u]")J|ݷМsٶcE]4ZX gd15g$aZxd^6?c61cQK& 4İfZynb%1}ikk7M̵6<^/mv[JvYn,;r0ɟN6ШTA-k4$&9N֯_ꫯ.Rb潧+0rȸYPP@VV;v젦KMM 555dBףj#NiQF}L]!BM/z7 IDATczn;;(kjgG?o7EUW捼#~Fsn+忻;d:.W_}Z>l>Νn\1DH!gy\ė #1s<. {Zt=V(JAEKS P+ F#zF*8mmm\+V-$7z=_2zL&N '} ncc`0ANN'c[QMKgL{'3*9%d@hUsfq."P۹daqx $u*));wfRRRꪫ׿u]'ޅn[ou4{JJcfz|I233r: aLGÁZ7DIڵ+V|rbΫ8*￟I&9L&ǏgXVn`y<Z-ZX;A&b_JL1cCjw}`ٲe7|r,Ysl0z뭜uYv>ٌl&77w)t$Ea޼y̛7^C; 4tK7558<FHҌ&GP3|s4 dDCp%o}s9z9sg3#mŰh"~ r> s-<>/>ԊN LKNNgd6j:; Fä)5;HꫯX|9֭+(=uX/.׿!<jh`w8;8be] /UQmjMWU˧d吙Z|  I˨vۦoSJ89-=d׋sB^_1crҌ]jNѠ'l&;#4jIdPL]S 661QsSyྨ ǀCK|<jeʴ8=-՟Nƨb,!meٻgwԿGT*Z"&KӁ>r)<1 ë-ZZ!nwQ/<G*rFE(F#/Ah %N{bzXÏW+?ƚ97nƿ-~0;a @;zs2w),eLQAOzՋ*"".D.ҽӬۦI&iaޯW^msIs~/4Z'Ogu[wkUaHCmz!"><^?#G#>tΜ:IvF:fQ-OQD4j5AQdb|y=.? 7m`r$IatuZUŰϲ\^75ƞy{{@NJVr+Y< >~zQ!.Үj="t2rN"P!@E6Μ"( "ncpgVGNǟ鸬V+J#E%ݜM{'-Z /4Z2 r)u:BthtCE9/~ʵtDZ( Ze^Qӻ.y*lPg8v "iOQ`beUaՂ.%Iⓠ@tM*N׫;ztc\;DѴk(]|qۚfBBB%((2ydvMN(.dđJV9gۧ)N3fdN*.l{DIzm<#jx>J^VVo6qԝNQr 9}6#'NH9NML͕L"&߄u-8Ygstn'INqq݃];>&fe' D#D_+Uio0MX\t^G4YFBDfMKT1 lݺRRRɩ6#]a( &LEU}J㗊2rB rs,NuU*a$tfU(DI-*xa> 8VٲRE=+t^ʹf. FFv< ,1g#:tlx}E^OP`w3ݻ7۷\  71DF0o/ryN~c'E%5u;@qf]< #FƭDa*i#Zݟ#;4C 5۷oowY(,Zx0+VW $T"˼%Il Ik<bb|{eBC2&N>͖-[HNN&55ig놢P#.\Cr~6xUJ*FMTxCkP!ek6 j'qфAau,޶ {;W&Obp, W kUbESFM]1?#h/.OX'gs̀uFPJKK`ܸq<;7XV6÷?'_K&?*_h)O -ݱH96*1l`XPYY3KrS s[bHחqÓZEjr!III>q[EXEE5iij%r[ƖdLn!!1k,8Lf>[pR\y$Z翱xGh42c j5SLaڴi\y啍:^fv+|DZS.S+ a ([ԀyzF"ؼBd2l}&ΘJ.$OhTVVPDFFRT@eF' fy+Vou 8wsXhNff(,.ݧiyD K{E{oSJAMplϊA:F '4(FåGsW^QQmHIIaƍyOW)Qސr$MRyyy[kr?|MƄ yy=t$o}Yϼ/T@.yhoP4,FZ߮{(6F.]Lx,X9` 00={b28y$јL&T*-ǩ-;7嫿qXy֥G_ =|%|rҷG7n9M+=ʜ9s0ۥYVr Eh-pDV$Hl,3ݗ<33Tٲe'Z #z]4, >+cSVQAAQ1VBXT4^Nߞݹk\wvJ^]AdteޜhHb\y-5FK.yf7JǙ<{iR%j,DZ5 `U$*vQ8MF VZȲb"-]Jjh,8@Ѡjju:Zm:Y k6M wͽ>ݻ6:SGdz Icg Zjrkj: ܂BIR^HA(wDQ#:ުaPe= ~ƥ^N:pOhxJT H\dXq.2i͜7 GOVr#Md#C+.Gm="yXUh?1Vw>&2HLNnp qT#5IHrjJpF"RoYw>]3ށL=-OHh9QwȲĬ'G_jb4xOoF=XVmF||<>:~}fjgMK}w;v.(.-sXe A W" }A D[*nh$₱xcQרP_l%,.0vпW}{Bqq16m"%%M6566*mј ůaDZ=8c ~CCBh^v: ^pw4~+c(8f>1zGWX>I>x*m>2Μ}(DEgU/vE $EIsf%G'aT , )PX\/[3e\㜼|'5lݨd7rrrXd}VBqi,* (0~ x8f?wa.)!zd$/'Į0C -YĻf/$&̴E-vᓔ`j ,7`#p~ jp,< П5'ObZnW$?6]5/_~Lg_RHK?d&:"a<|, FyM:^{l~si1={ؼyU!&*G'7pF!PKt7oc~S!z).]Jtt4/dƌ-VhX`Jxն[Ƃتo6B;w?d…YcǎvZ$$$УGuFBBݺusJf()1,s۵W0g&k֢B( U-` H>wF"ͶGWμ 9T $X=]=F^=گjeݤ̱cu{4~HFcOڃ <3Ǚ?m&7ϞdfClٹvL;3gmwО[鼲|ټ|mMWϲWOB9cS q"3wz`˜NWlaI Μs^}3f0`RSSYr%:k׶-ȗ?@3gpf?nj< 0^x?77j7^zѻwozEϞ=m3WYgk\z"OTOuI('L` Eˏo[l[u3~IP{.UIi%>(QTToFJJ 7o#!(޼ "i#uy`j=7窫rOU3yq釜_ u[_z4X7ŅP{5ZơN!$(bV+%lN¸Isn yAzdX\gR?1J%PxlvIhl۶ wN~~>aaǼfQdfe=~}/oBCfΜI~X`nW^^ݻDGGמgr <:&ObtR"'~ERSMAZSPVVƢE>V-efI5@aN"lMAx1Ͷ||3mq2FuT}uO?7j m)@OfX6x]ט1c{>Ѩc<2KKZ,w^ gXٗ"鉣3{hjf [v?QVQG_#;7&p\׳o߾z]\aߕ/2R]siKբR 88UjLc29/긾}? _@>}XbO>$9{6]{cZEhd_|SYEFIXdj{6pk^uΞ=[~EQl+=$n NJgk] GϾ"f%tn*r]D"L&;v찉L׍< 5)Q=ެqj#RQ?+]߿? .tRʍU( d̺xbF맚~MCtIQE"I0 mbNV1fh"}uˏ-x*m i~cS`{fx⯌=*04@$uʛWKBp鼸VB6lʥG7~@BCCyYj+ť6QaTTTw^NeeB|H@`p1q퉊!2:ظD UMTg5_w^V^ջh*uOpjcED!}F2 ѹuw69["/@ƹwݭ},**b۶m$''qFDiq1+ QڌYcf7eѢEkgCml=ÆH׏+\fȅ86I '"bC۫dc?*9W!p,dϖ2iY)O7F7|(n`}&K$*ݏon$qײdO %eeCl9Il>aD(Gwo3^4LF:wL&~iNgxW. 'p.TM 85&PVM>8Bff&| ?4i> k׮JdE4E)`0yDvCE`D[}s%>7 _'64.v3[,|F7Sر+oN:Z.i1ֲ愍ӈg'|7xN~رcGSO3MHHgϞtܙ8HKK;)Mk_dth7#^?[\ZjKue`GHҲzL&;FZZHp៩ {E>}-rQoMܥ/bB.)b9 vPy+r"/Р^BvF:Y'nЈIN7Db(#XҺ7WF}'U Z͋/H^ ޮlXV~mrɸķ_6ثƷ!u ObCf= dY˦EUrI1x5YOa_5&իWop뭷6ؚ &~?vru@ϜLtt4o6.K.nZ)+TGVYJ$IIff˒FiߏNzz:m|p?MJ1 Xa늒{13R$]-qɨ Z$ܸ)Q#(כ7FyJg(/Wyr#gȑaIy+ٹH[/8o87gclN< 4O#^gΜwoa„ =z,h\8j8?lPYIM;v1qTηBȲmFBBO>CgrhDp` $IzS_ԣ至=?=WKpSy&O$lH*32H;!,"ң|M 5QiH$B9#XZ.yy=ՏPV^!lWä1y߶{kc Anl Ƕ9mYODq};g } dfSV}_h;|r.\gn[]؊HGO3bRX>Yu7kèvڻwk?zMǎ[T#??W^y6{-371A|oC ܅-klk*,$w^Q|V#R թQ]PD$KJ1"E~O?Vat`hո}pټ|Aj[xhKB~Jw͌3 p}Jh:ŒqFw/4|_h4ˬ}łNAr=c)B㜛至G^l~KijθD Wqo/êU/Ft\CGH}D#"IzM(!ƀ ;yH||ULjzWh4^~ezY@`P^Y#'B3lҹGriD9Buz79DoCGȊ+xA>O zϑQ2٥d(4TeͼvɦCH(OMk>EjEWIEQxN#kٜrJ9' <{>cgBBxgƕe\(m^yMdfvA\ԣVEcX89qN Jpy\>j <ɽn\cƪU,"fqL1t%D+-KNfm~Ԝvv`?/4h׎l'&&rJy8SQ]E%̴Z!,d6V]rPZ_,Gu ʾ/{S_ߏ:8Ջ~mD.V+3Oƍ=~N0V u'QwnDF4SI2S(,[&6j;itP|NI~EW89;Nmj0 FSF9DQ{}♍mbb"O>d8MELTMh/|9k6Bh|K2 "Hc&|{ulj>qkRZykג#+2Պ,ILӇદ-Y`6S% -#]HS\יaz=Y%f4vƞlq"ޚFfqg8i**DVsi^# t^IIO'@fLΤd=F#TZ,T*nILl7=珳O &@Qr k&.|,3o<?^ǦDUX|DxFa%p+"U7Bv-#!!Pn2^`6!KZSJ]߾GP|f*YdbUj9f6qT-ԲZq vyjYFdX면^[h(g$'ÓOq"njϋ/Vm_wω_hGIiM:KDXh( 9H9j4*Ш)h=s"biczMYA97ܿqYX W۷)'q1m&f3{֤We?dlTLi9qZޘ6@,o'$յKsrSb"ۇYb4ѣ-Fghr~H; qCe%F̑:~W0k¿.|cƌ?d& DF IyE$0Gb fG:5WM9ki\|mLo sB * `6Q\HFqHUYѹe6o>^+ SX8"By wpOX#.ǫ5^hȒLpԲBHNA/w!]x+ŌйfA۷ fqsPㅅov珢@IPdf eDZhgɨVz&QLޝ9,e={m!f3,Ɂܔ~M'x I9qu5+ /ǎǞBf|6- u#T#{V}j΋'>$ל>TB[B U4׺v+С/_|!N$,Vf m2ׂE?k?w}Q׹ a P]a߶p8f[+7՘V[VSj4)>T*|]pR!iQ+ mo}ǩh1Nrƾ}S"q}@Bղh"tҠq{sS0jĵѦ:rI^yc.*V~ٲ7:Y5 /ǏVݡe߿MȒY}{MkVs!M,ZJ#g +@IYjyhHICthﱯ[+1*ܕ!V}ťBNɟ323g7Bi eIȊcM1EDC>W'O${1\6:mjZE ?4Ƭ>^vX^9NnA!# y>NؘU*.?7=G\ֻ(Ph(vP~Z s u;Gw-f9֐e ZJU_h4Fs/z\r 1o܉bD<@AXC$( aD]B]T aY$@;⛆v!SEgƉuOD*0WH}TFY4>5];ѭԨ33l.(t댰v'vmL:Ճ-zE9Yk#Vt]o 韛gK!Q[j5W{r]1"v~P^$qaD565Wo\kB p`6׵kWs^}=EBRV,AD0n: HO*ۏ hO^{lÇs(A\@_W蘙t W &R}a[|mg"/Mb>5jτΝbrswgۈCX @e~M6[oVM)k*=9y:B$I"&2#-7M7$&ў=nӪT4U N{nILTI27q)?M_hPY;|g0T"jiQD7jѰ=DXHX%ePWT7YkۡRb ܌w_jR2h\gpI!CDjEA;عSCnS\,Dɓ <4Q,2G}97-RM*b#ՋEyL;(U*&9&tBT` ٥ ,V+sqv~ +"Ϗ-Wv̻o>K,&/7ł(6D"4f"'}0>-p0&.-D=-9[;l3u*jd̞-.XnjDDw6Q_ݯԨ/&*޽\0Dѣu'xv+]׎;hѢsjc`2m Aӵ:!I7멩\Z3}oFǷt cDffֻJn[ŏ-O.|}GM` dgԔJll0aZ;!S+5Bltu*{|fNK6w5 4϶q!0&N!)ID3<%5/Q DE(H;w(BׇHrEmxw[֙v>We/ߒؕdrz"W27Mx_h<9kzΟ<ûw{%azaY=]<||0SS3aO 55~v}t\_prܹb\ š5BmLXBX ެY0eQ /g=Vxq 6}oý$'xջ6$ItZ9t?B|wH.x%D3 xII5`޼y\r%l-(ǟUԙ𕖉嘔ZqoA9$ 4*5&E!(de(jL#BFwo'O:TTZ,\8FEAN~(_p sωoiNLƟu("SBb-."@sEx)RBwgZ%zZ`mml+l7tPyf4z$_2~mm.=ʷܒȖS0Y,|cӃCYLXa%W'Si8(t-|%Bn$JlNUQ${  ! .ݻ`n`xUM;22D+ #Gϴ41n Dwl`Z__B(.-EQYu=  {?ŋ!=wx)c[ZxI^~[CPL$u:Khe}Bj~s/0zx5:cZx%c 2zv a IDAT֍Eiū~gc  d}& d1ҺL׏|ˮeMU3l=rd`@=a*JU +1؂Jghj.D]` ,S[ŏO JC/|]t?d…| #`x?$DG#׭ǵmbܣL\xY\"qHrpR t@!Ipޏ!Cp"Ilw\}I݆,iӜFIΊZ`k0jNdlد+&sK^ýVxx8 nW`DVN.EEȒDdxqQm5Dez߾|} f0_?Gr*VH* ):v *\WOȰ?ԉvAAdUeF V_AC/|:'x${9*+]'$SڹUp,}jƍpu841!>4!B")ݯ4b4\TUkeQ^}2|q ~%ɳ9nu]ΝEZԴiWq !.Vl9:N}*ҥ z=z"c6!,n\jF!Ocmt:*aOG%85%n.NFE-r;&nǏv!B$"#8"JI"o`/7nC[ٕ #/,ʃwk!&LcԨگIq^ۢѨI˲̿ogy/Kfv&Y`H>FE6|=k4Fnj){&Ihp+ C͢@Q3s\t~˪~ KIV/4Z8ۗ>{]t 0Qd[ ,X,"!+*Nj۸q5a_q(ywF]tsqxuX##nQ_kwk: <8\6Upnk CeV{!0N֓Hyyؗ^/ s4.pT pJVf/p?mEUk-u[:L`,M䙭 ,3g ̾}x#_nwGqiY%3_MCTo}AhbX*~F P^}U-[oEz=c7q5p5 ]&kÒ!!"-[V9rDL~Vr_^]w]vȲLVVrQ_q-pHjL$^VJuX,wB`;Jv+ٰakUtH}pu*EE+)]ܣ BVF[?U95OdֿQ mߞP|AHPWO+/Dƙl )*rHDh(ڷC›߽:yr}~x_hP|ue[oA裏{?"^-^Dٺ=sFPyJڷ1cjXq!B}b,dYfe,^N]?o]FC0a:s2{lV+w}7Xv/:@{JJ;1DhO pqJnn.֭cݺu3rHQ5P|$ ]A566*v1GtV&.W"zn ׋T&=tJpsӧkz~>,~ދgO"WvdY殻W^<3 vNM7 /n,`6y'0={6Z=`Q^.D;;PI D"'6dլ^Y߿Mt 4k"I*Yb| PPT Jn)/9r'O6튊 ***(,,x-@:uDΝѣ:tv~iF )/|j$z)JJJ\nxiw0D*իOΆRC6n`1ζSL[n<pX^>~K q3gl"cj4"]jX2D-޽< ng`wwjeϞ=ٳK6ӵc<+I =U۴̞o>9Ws!:ď?Hxx8$))&??~Z~Bi ߄ Y`Gqm.bypQWOo̕jxJnS>,ҏey>}b z!mɱ쳢0T wݝL`{NE…jb61x† Ej(CDz(˼+I SRVRPWՐ" ~V-TRSSy7gذaTx/vX^J!esU* 5$''}vMՈC III!%%^z1qDwR^^NNNtf Qh2QYYNCRhjmtDGGSL/4Z(׹sg[75kָ,īJTWaqg} Z@dǖlyKf/qc_MIfɌ,M&+xb؎Ɖ%#Y6@H $vb{nhvY}.9}s'yϒٕg!7ׇ&$TuEQh4h4+nE?l}]wMn/~ ^x1JNan?ybĽ;`'C0B26m`p\ߒ$&`<eoT wx[btMp<};'rL2cv.޽ݻwu1p|q|n^DFp;HKF'|Bii)n{ , & шh`0] `Պ"w\8Nv'l={,Ν###o9Chhh&:;nm<AJJ )))2/X*l4BD ťz?}@-r?Bn:u r;=|k< 9% K@Z&VNC?<n)SfGydJh4<,\{_]cl3or#mm`|Avtt: A̴\TkML00{ ::UVjժ Ahi"vO3ޗd7joq~%o?7/#TvݎoxZ CCd"tXHEkDQ`0)>|K>(>W]]717 Anwޑ5dpw;Qw:]]l:,"d)SEY/c06cLK5kL:~~'WO;_`GӿZ{f͊q+I$ɕTI  ˧?d׮]AۤA@$ HMMȑ#-6֯ rcby[DL("& |Q+o%%$$$[oa4gn|I;6w[+" \/"55zni$r100?dRT F-r|"P[;.:Mv2+MF#H-!* wFxx8֭#//.\8˿ D4ZG6]_XX?OxO:D8*fi'&y^'Wo_XmYO0B$n*UU!}|C a6ZL6c Ͽկ̮]~Dzn'?T17p.\C='շ\.oPZZ$Icj'&z2Fr;\a#W;s񗿸s@9Ppol9xr::7!L@>ٳg{ &&\)((^lpQr9Nv5d$&&.JKK 'N˗SXXWU9x ǎI4 V\.ʹaӦM_>"..Ξ=n:ZZZ }XxU3ټ(**O>7Ea!"aaa̟?줳3(q{_~krM7¯1ƕ+'K,!))pl`y^AU.Fn>Ⲕo?^?zz$C ncj+asfزe˸[foVw233ۨ/g4|Am6%B2JJJ|rj<łlp044DOOz^]9qq.%=_32EE>AM|tDDDݭƹλݼQX AvL'P1L:}.UX|jիgFCq<@@A@)#SNu>CIII ,]ı 0|ڨɓenvnF9^'55^xΞ=K[[: 6~zV+MMMTVV7oB… ltvvFXXDFFr NJ_~Ks>x<ڵʠW+Wd444pAN:EDDIIIō{?jFZZv:::s1&&[oh4^Ix $I\p/["W߱cعs `΃mRpZlx<*Z~rٕꩧ`oJGl6Ϙ=EqԒ'gD45G)jun]4˖`M7C;O<1Xwy'Mmm-#)) ᠵvZ-k֬Qv;:5kְaTTTpB,X@cc#+W@/ʦ&^{5կ*BD p㫨%}>]]]D{< lAN W_lXx5,l O^5A::<ixS3vndl~i֫vs>ʡ|_:WVd8n76 ^VeѢEjT__v"33(u@! !V.\>vAv1BC2Z-̛7OLVMM SXX}GOOԄbappA|XϿ&1l޼}}}9rg}e˖QSSùs稫TVV@^ H]mjE$ KڌwuLD'xbUu;K,V]aͷ-ȥy曡@naR&łbQ+*$<}Ξ=Kmm-6m^QOαhla;& -t:@FF?яp,]T%3t=S.|ۣزETN'nKEv;.0 әL&Zj0 )''_t±c>5FեV<uC$x8kgOtwwO\EI~lj|f1iM[֭&*"xßS[=F\.ݰIWϟ?OOOqdeeQWWG*++1 ͝wɼyT[rNYYjkUtt4qqqL1$ZGEEE5Pt:j16o6OVd'NBCC~ff&ɔrAԬ` OfTRSS{NcժU9rD]d|;v,Dv D4\_mm- ӟͶp6`;8`hƬZCz= t:llKx89>,$AJNvm6=>Em-[zu 2|ۿJxOO[l?n@SRRNa0Q\,P\,N-[&;Gm >Ҧh'T@lff3 *0Sr 3 ?_E>ʻ'\W; {]*#kSC@%#77w ɵDZ3 }V^F}>O&++K$hHHH`޼yF|>mmm477388ȢE[~!;[oix9ӟ{^{9rrrƌUi E$VZg3rBmm-gΜQ۰Ξ=KOOϸ痿K^Ξfv;p9r7|"T 6VBaE'z}9xȄn<LjDQh4b4Lvj 7CA55CL_0@M)>#C/KM%QUUEUU FlgٲesjZp s{vwwSQQAff&:4N>MCC DDD`xvNވ Z_੧~"#_|jn'--;QٷoF IIIlڴ)\pJJA|||V*v k!qbZ[[=maw"ڎddgS׋XłpnTF%/$IH<{yQؾ]j;"MM# ^_|'OSOtSm; ''''JL'jd5"}0LU6yǎ @nopZ,t unXuNa0h4y"VuJYLo"yjGqL>Z[gvzߛld1j [G{===;_EGƔ0tRYYICCqqq׳b 6lcJZII gsJrrϟ?ZhZY|9&'|B^^,Z]vqy0ͪnd&SctR}}=111굴ӧOmtrBKK l6nHBB.2Z[[h4/~$&&rif3+W ͦ:_%''3IIٳZ9 R[?onṝl2%{ﶶ6^}Uz뭀`Dn[ p%L<́DiاsbAsC$ubzWg__99nV`0pE~cye}4ygXnAttr":::Ɣu:?f߾}ݻ .DAE5O9`b&37hb1L. VUfpp0`ߣ(nbV&j?`!2icnGsű{1`@jw< G5+2/?V?{⏉~7&D͛7v$ $IoF'Ey睡 7ԩSFVZlvs!uttjcr8tt2ocjMnxj@YU1@NNNHq1\X,2wzXXV0L&:.@ wzD戈z{{qGDDNSSӸSSGᠬ4X` |͜={mƚ5k8N8p.DRRRڪ$try>CK~~>]w]wڒL||< ,P/JGyI>}}W9zzzxi t!{_`z`7 !J5 vY rf plhȭQ%+#*V+RP BװBs\ꊷ l߮gJ6(ZǃWΝIJ455# qx oٲecwn7+W$"""qҥKYp!ϟ˦Mظq#:\p5 iO.U~nLD>VSAik$2fB ^ު_Ծ4 V(^/v\\䷿ q=O~9> <MĦMشi.GRTTľ}W۞WjkW `f 6CCCjz^up]p2䓙W;`܎č^ctp6 ;l6[HNNfݺue˖Pc0DjE__MMMtuuM:iSV.PU(99u}B˙cct:9{,Ǐs uiHJJb޼y)DNz4RRRhmmrF#v]mffU>~oL-2 Hc%^/{v*EχkLWY5Q"; ] :^Gɠ@re۵î] QjU8.v8qBv*-wrU# Yx09:8*䳅PUV]R]WϲhpFZVʹP}}}sfHÉ i\BǓD]]A'G= 0d2a2jh4EQ+-tJFgq8S>bbbXhF1Jw "D4@M`fdq1n D9m0p: OVFhزE6lDE*DA>RSWE~3;`{<3O\cAuR\0C$ՅleȕUb39{4rd\ ,,SPPƍ'uxoAϊdg.%kb,WeL[VKtttg䯵p88^WH*툊vCL*R%f2=s;vFLRSScclꈘ)Ypj=΅k!q|ڳ4Vuc^/alda0,<.kqIzٶMu]Ez=VgX]$ImzxGڋ Njb Ddb*0 RXXQTTDuuusC{سLj*q [YY9[GV#5I䖫2f6d ټy3W7tjrq8]T(dg.%>m,q ^ 3DPf˗FBBqqqjW@h\qp8ӿCNhl4 /!%b7P%It<+[0 o}Nnsʡj4TW˫чɓ`\K/qQ}P+$PfY(ٴj@zz:---9sÆJʼb `Ⱥ;`ڙ{>ej\B &Ep1[zrssc֭,\pzàʳTٍ'q$Œ쌥-HdaaaF@}(JhQ ;t:$&&Fe{6X,V(""bB!yHsm"D4ny^|E:::7qv 8]..aad^fu_}ҩB)Kҥ^n^ {QQ4Z5 :$!MdR@RRgϞto@_Izj˛{v:Ǐ?fhhHuS9h37֭h|:U^,ʨ%Ȗ[f;-u1":: 6yf6nTi dg$sTQUSǐNkG'|PTLLT$7n낚Uؖ-[T]]MNNUZ\SSHIM륰kU4A˔`Z 6:ϧj)cieaŵѸPLFQtpnA D!j' }Ջ-0'C ]L vyZLT(2e9݋hQQRb2}fUԚOFFF(+O9.2v:>C}ΟQX,c3dq6Ӂ#s\BMohG("^4 !:"2<(,-iLO?woEOaa!*I;v˗v&z;wQYYYWtmgmYEՊ:xBe>_y> FVE=B ]ٗKO_?^x$!`:M/ IEXNߺU^q '!BkY0$/_֬YC||U:x)jnbcNWسg^M7b.^Y_QZ ǏkK;l3/3-Q0"""XnJ.af#$Sv K%?{%K%GOPrx6nv;Ga,XHDgg'555cmù/> $p!q裏ImN6]vY܋ j1 zmvҿ NWTbc|s"qd"^ٚv?Vyyyq 莅`( U& ɾ}xw)..P.;Jr S 9poqĈ'2RlL@2Xܵ)ǐǙ;!wZZ7o&//kעm[/'$I? ba[앤$ԑR)9zAlnHKK $%%]KGGMMMAM&5ٚ7Xx1PJllU cѢnJDh\"(^a矿Юʗ_5"f ɈoK1d&z'4E@)jZz͛%n $L&|PW'W-JKܿ"99|\(e\)WL&9(I===DDDLq {=> -z1[aUR==ѿB&UF IDAT„\XL2,zdw9j.F#rדp1Rs>PHh`LWlQjEN7l (/}?c*T'lxq{7o6M!vAHɓ' >>2n| r)(\Qp"${nvI}}_X - XL&!}Smg+sUdD]̕B Y~}}bղ%hWnZpF7FwҥKyƐve1@iO1DDDIXX&iƕ-ǣf/ 200@:A[nZ ر 0֨ŋ_Qm^&͛7ȮEDkcga_+ l60 S$Ew)z{'93Y9@iahod:nZC….< 55S^^S}rݸ\аP$@1}ג$144Dkk+cϞ=TVVNH`v`"I#Ze1zԠ-K@ۜ!ldB?W@7r"w5!**ܵhti W2G{ ͭ|t0- x>@'I Ѡo>~ ny7trڈPrnrmzL֯_Orr?#~@ 磹f#&&T9^/]]]3npUg? n"sR~_p1z;> ~p pVo_$c.Nx%y3!'V|ߨ&Ip:\.t: ru$=:;季#0b<\.Ο?Orr2---LX|*:p\illiFCG||<񤦦Z?>>(%%%߿?F P5-SFm3 5ut8vq:jDYh4rrrg>ZܻF"cعs'eeez]Wy3 FJJNF6nH]]={69RaҥZТ^@hL ܹҙx9; X (SyMFݽ}t;-r蟲x+rժV6J'NxxtBp)hyi*/gܤ箮.>cJKKy饗Ts5rss(+ ^~޽{²xx8l;vZeD~_rp:46J|ekjH74ڐGLG#))M6QXXڵkC O& "f;}"涩ƚ(zihh3!"QQQ$$$f|ZBјy)**}:53#4*b( |}ݞ*/dȤgx!W۫ 6:шbjFDDqqqƆZ>CBgs=Guu5UUUV__?eAT`C@$9d .җ\`܊L< $S]jhZu䥾˛oz$Zeg{cLULZ \wSKYHc&9t 2[Nx\붌+V`Νc/mmm]Ϛ5k׊]+4~A-[`f9^˵<F__qq-p__wfHr~~>֭&K322x饗׿` n0g8f.\H&xpc``D13100nLLL Uaa#".nL AȠB233C(}} _<^) 0^x!͛)(( >>~pMb\lh`Œ81|ڜ(űyf_5@__WTTTxۿ[}:SO=5vmάl޽L^^ޘm۶ocx|L$Gy؈^g͚5<]6~/9xصk'O8n喀΁ !1ǰZ,_0Eee%UUU:u1m)$ ;5JJ{-8,Y\Έ|}Gfl,K;;illDkbu*%JiBs܌jAE7MBZ] #53oL}Nڤ^(*KA쨌 è ", :*"8JA@M}+t m&9o&mr.@s_W}y~@ iu2lĤIN,С V. }k6ȰѦM,XSO=duLͷFѬm2$Ѯ];Ed2qAͼN)q8~iz񝻓 `шb!1@[-ZпK||XvSRZkAOئj֔+K.4nܘo|~qPaؽ{7mۖGI}vڷo_+!Պ,իϵw޽{]u҅?j׀bO>n{a$&&{nN:ҷr^6ݑ#GɁXh1114k~kA 4.6Aٷ;\9r D˵UQ8-<4|}Sbg*ɈLcђEDD/lhyy}jh4ҨQ#$I";;$)5k`xEhTjb6k}D7՟3p@}ٚVZ`F9!+ xBWi@r' ͛xqgG1=eUs53VUIżCӽ{wO~Տ-x-38tcIg쏅ۃ6͛ѾO?ȑ#裏=zt5I?qyhZLVVK.̙3swһwo'(..NM6:tT{ZDtGrr2-[$77}1pjǻ׀_Cbb"eDll,Ooȑ#ٻ_,!\&F r}}1}1&О"S_+@SˮR #%%@dZGb2hk,?XɶVD OɱO: ~^2@@C$CL6MwE|<8pw/۪fӧOФIz=[0z4+[\}߻ywH`4$I&A޽0`{ h\[ArF&i0U19F`?^~2kL&=tt48u&L@e,Xʕ+:t(ǏDEQ?>]veرӨQ#kMj]$9-r\dYe˖.ՐbΟ?_ME@Mtt4}~8Ξ8r0Y2΢ 72#eFdYYL:1OCd@H붤2 D$)--d29M]ɼ[ܖ=܃NUVݛm۶:#//VϽW_u( X,|$$$T kAe ߣ q6Igeq Qa$%%aXM * "26"##"%%>N:w|}Q^9:'[#wN[“*XVCQ`ʔrDZ\? 9sиq,X#$;֭D"]-ZpYCۿ?2L&ʚ˫ lfz=a5h(//w*JJJ?fر5 4: 9I;Krz>>4o0,l޼{\'JKKmwn q2~SS',,i(22ҫ2.wi Bxs 8q!CpINԩS!\V ˣ>ZqrՈF1Mi"{Lrr2c޽㣔J ` h֤ RNeeeNnn]z irr2Dq[ϡ2 1oAAZ0kUb%lM>Pa #11YY6m:tsиqc>SONaa!M4!''2}4/EQN( Ubbbz1rs)++֗c?$ 0 }˒bhZx1={tRӸ0ʙl}Yh(nӲ1YF cȑ<,XQFqAKff&;vtڞm_UV-ct$,,|,>QY1+(( 552MMފzYY`ꫯ:i\8D5elSЬq#b"lԩS1 =zھ}ɒ% >un:Β%K\s?~kRTTDII ?[iԪU0͜9s[ V+XV۰X,$%%չj5PZZ!**K2x`l&r9EQXx19EQk Gh4H>ңGzaVXXÇGqR)VBP8Rq?..kS rD!88"{:`0,û jWt:e{kAAA?lYwG1fwl2w[7ӻcLzz0+W9m߹YkVK7R e3ldeek1k֬i\=}}$ g3ټkG%GEҺY,z=Y6mʈS IDAT#>}:?0$m6>s~Gѣ1lذjٳ'?#%%%N) ;ѥKǻBeFҥK0a<k?V߾}; +E!55 zj^ *ňc> ?A+(/j2QQQN2VłzuL$~\g?Ge:t@^HHH >>2K NP>#ƍgsBA-0w0=Eٯ7o4G:\x31L,YVnW.V}<:"qi8ֆ|~ټ݇PX7Z@$4oF]YJ55 @>IeM6<_60}:$(2%ePf2)!l޼ GK_.|Ne[fV67oDC׮]IHHgϞ\wu#80tO1o<~i'Wrx%M}x6{ rr xspVMʙ3gxw2eGԸZp z=-cкy3Z:;S( ޸6lSjAQN&r29_i#& \ 6Y,LV)w?5ijs jEeѶ];e`4y*tILL*̝;7qAFv; C =ֹ3a!!p0~xcn,M80> EE;&9&!J&*dŊٓ;j\HģIf6n4VWu}* Ԍs7hɣElto($[Cu_։w"I is>9m~(LqI _iиF yW=:Y G~Z"{λbGRSӇ'WXbت'c i 99u֑\G8;0j(f͚E|||P 9vIIINÅN'G7]8xvPرK` ǬBI)~Y|9˗/͌mڐ`W pٳ0a[TN:VxUqH/(5!)xh2YeC`OCGYNl =犯ټkhxuJ%;Q_v7GOd:jqLCNQl-AR EQxם}\cų#xy g#[fƌX btmW_}ň#:t(sd2t &KE񢨨1cƸua/֬YSmmUon݄…a̟oe\w+_PTNau>!;Xw>'.4l7cRN&aZemWC 4 {;usQGވAYOaQs2o)z1 {PVf|LY5 .z]DDf Eʴig޿I/سgΝs֤ t9k矋 rBMeW/ 2b5ٽ[[!Íhd߾}۷ŋ@ΝIHH`z`Û ք j!6 ݆]e\^{5̙T$It2f %\onJ}j'|QHwReO(]VdJJ/ :[qx +-h\Ad21ydF~nWSXyM*bq?u/e5׋ ˭ [@̞-3Z]QΝKAAǏɫlƝw*S #G`b ~0i Ν?())ap$IrRR.e°aQjU^hhwG =ܓJ(ϖ-[XlԖ^㊢u\3Nfg_rM=*V~ݶE8ںDFCedO*T{AQhh\hFr z3g tw7|l dl+^}T}3<7l +IJ:EνE^/VuEyЌΏ}0uTt5Il6XmDaN8 ??2S#28bᑇ-큊 =]d;om۠عNJn,(+܋׋'@7?h /xQ<3gtA5X?Sk*t rbЧ僊.>rXDQaqOCJE 4(c_oU oAeaH)elʰa8qɓd\ Ч. GrsE%UdYd5BCϜ駟pӧOP-[PP%ЮpTwGb0s$]ڷ~D 7e#67x<:|) =澌 QF5Pt;"qQc_UҸк%y?!й+2<7Mkq K=?7|~#\B>C<؎HOw-cCpp0C%jgx WW"rrD/@bILnz3!Ix.<\;G6l`ܸq̚5ZO\My2v!PhϮ&ϧ"/TJKg8~\EHϞ ~LEԃjN)))̜9)SK׎ܾ-g3G$#hޤe7Nи̥V+jU xCB/{Ju "8qU~~Я/ ]W .af\0q .&͚m7P9ᑡz Vqq%VݻoP\~n.^)ZcV-9f&ל1c;w&6Vk ZPigW`+Zlz__-ШZޡCOT!V{<˂0K_6brXӝb/&:8_~wݿfsJz5;"Cc#==#Gо}{u)ʦjBj*|~͕EDDuEf>0upwac%DOqq1&MbѢEh?W2%eeHQ + aTv9OSCCCE%zгMjK#@;/wCWs5>~px@sp[]nmEPߢW6t*6ƍY9995Yfz-6VO? GVn7L0ɓ's|O-QeS=D?#mA>9^mʂMo~iZxJ?w؜_}.;P嵬V+'OfٲeD 'L)u t> ~ߎ$r$%#Iu\,m[khh4 L&'SY?2Ձh\kf3VxrKMMw[^%` bW$`7cǪY1q1 u\df m[E] =b%7Vjw租kFD2 {PV+ӦM#??Ç;='##;mw<(* )~ĵPL&)6V|@[$IjÖ-3"ܵt߆]rssyט3gNWThڠ hθE$"z8, 햋rՑdx'}25M{9=Ue":cII;*;o戏?URR`uDxUkޛ2gСW&8N4WrDW^r]xIrz$lJ˰XUqg3cB-S3l+N?%>A^̦"{ gϴiCq1yON_k$ |j 3=~(m$I!._RZZBih3fSuX +9Utx ^hE@e"BCYyy4in==m۶KcG4ĺÂp[NQeKx^ 5.՞'05$5A oelƤ1)-LFd#:`6" YK@X[KsG|$"** ! hqRrbėdz [-&XlRJ~@Z?3DG8^k$8a‘#`J]hh{xf"ʨ t|rn&^z/z'p\jh>_|ԗKC[{%0`:M{sbT[ζ\U@,R-?MO kC l_v+U,[5 Si@Gy;t>ty&[KYY|"ܾƩB[Ӄ޸Ni4D?"<kxh\$EF95{W_gY1ԋK ٳEKM9{6n% \ /bdePS|)+G…(W26b<bbP3fa1 sҢT|(;TjFB"0G*`q3sa#I>ltݻclą(C` ~ __ץQN/l Wy?"X}5%vq/uo(?{Vd~^(u&[nƍ> ϓ#=KrpwMp8r>)))̜9)S9>W4;3;f+?߄ժg0פ1'Τ -r:56.F(O$9G ~Ҝ̮3҂3 q]QP !qF' GYYrifp>ڱJsj5)R+G 4."#8|RǦf,>=EQx7Q$ IDAT녺 YKwpa+ꅘ8pV ȃ@%>z|AHdOJ?"` 2\!YGzbb xiB9V+(X3e ~u@Xtf͏X.;;4Qy;Bl(/"ʬvgkq<Q4o.h]`ch(-u]|`$D b z4j8[vč}0r|0dJ»@Տ+!֚իrɯjjSei2o :U_53<ʾԏgGdͽVXJyh\<<xғ1c;w&6֝LI78^ȲLX 9SkhX̉MX,V:whWtj&x ]:rca!( N$I(lFTz _XQQDG+r4b,Ie`.N_M`x;.G! `pJOcG7ԶE5.:V#qeGAQe3xZQhEQzv{Aɓ|RhxU;\sy1~t,##;ۇo$Z"!ˋ15eL/}7W{;b<&a{5$EA.+E$08TRIdIR+AGkDϲXSP go*_2P7,..fҤI,ZS68.Rmw$tڵ䮛{\,HP 󣤬78 ?@L|@ZꂆF%)l.@"@3x_޺uXB)n- *h=Iu\rBF׶4ksNuJ8%o %%P[oMaBB~Pc۠-IXȍmϙ3k1 -v#&]. 2j%TzvQYb̈*"q$;PV_~e˖`8X6iU+k I7t߶ ˫r3YNb7g")5 Jq{+}sX>Ѷe(ϘM={hh\s.,$6͵B 4."[ 80 %X,V6;z_);",BȐ!IЩ*P3F`&j\6zbEOh jwށݻWoɟ䔩P6bW^11wOxi\ >Ap_R(9~W_Gb ) /]]hE^7vvV+Eq0Pm.\޽yXDC;vQ=Bݰ&quXFM<6'guxYe5VoY>:x|-E-u@Tf-NN'sI=9txZlO<|EΝ#y&9r%us 0tyи0r].kjSh\d"B]`Ez\ Yb} z{eBFVw޿!Uk[.HҼ'ҁGq-eZ[| Ljɳ±zL/ᰇ)JUOK$6/,vgdp$qpt$!&AbRVP@YA$ x8h/Dd~AVBі)))̜9)Sl/kZ9> EsYݲ;^(F%' \bqG[طasՍ//yII_Vy]n,Oi=x0- @WN]>>TtH:w̙CQT߯ՌVRrkFVvcͦAH-o@8>]ټK4\:Q9[ozpr_g[OUx^oo^$"nk,;dڞDҟbbe a>̓f vl*Rsy=vu?ٝ;!@(t33|9)ΫSq1iixEL^)ٛmf7= Lf/mzEY$E弹-""ô>KO~#ɡ*z_ְ{^'q-g0c'ǠӶE,aMDҼp6$BiL"*"6M1qD[Ɉwr5!7DcywD.s3"ky5W eƏɓIHHe˖!3y$3:eˠA:-+n;pAd#z/|\d 8Z¨cŇ%?뎞2h }Iiޯ/dEaZRXfqQ F> >08Wd)Φ4'Ʉ$r|rF@}NoH`2ؿ?s}Qh/p brQ^OH8 /CC y2siBi\mB^n6,_EQH|=7@.%%qo۬| v}[moݚL]hޯMz-ӹi41nڔa?L ȺB\ LI._+(Ğ?SW:MR1/ұxFW5%EQYѨW+"$I]:cѷ?m|}}x۽}blfö?~z,+n_ɱcT_>Do+V#;֜{@]GLkCƍ;y>-\WVoBʉc+ֳxe "ʌ#>ÿ }<ۮ8a ,}0cEO5"`$(*dwʔ).SVv/\Ț\"ޭ``UyV+gŒ%ZgϪ֚z !!b q*v [ԥ!2g̘1(¼%_!F? yVg=ԏ"|6o["?Z6%80EQ+,"lFX0}y}SD899^DNYCAJb@ZL"8nEe;2jc1d\|kdkO*: c?+M^5\3D$gLʘZch\b 6SfđGPwn0sU#1U+7Rw(G5+ҵ}}}8p C%!!Au.;/)cX)r!ϳ ܁Wa*S?No1 #QdƪxחHhٲk?Ώ?Nt Y@9NANJd;DEx81!PWDeyq&+mGqáQsff~R+ e2?fw?`6^zF?׵ܧ B޽vu36`5W/G~j\-{;p}u:VL.L$gqSgâ?3T *(hXbMĘI̦&Ml6mh F[$vAE6L?FF<#sϽ\a߻Ud\(OL(?MT>OrP*yy{e=4jK +nNjäedVCoG?>}lT@ 1=pZY/V4 .݇I]=6mFxx8+Ѧ駍[*h0|lcS0ط*0uG*vݷނ wH_BA1Të1jeKC+#gD/XQc^l,+V~j9|0QkײJMt(8t=v陷ģP9:`4INK'/?Bso*gyy&Mߴ˶޴:#0f{IUQ[u<҆ z6--m #'ϰ|}[YC ͐z"ШMu6JG`z$r :Ej62U#44'ned􇕤Nh2nn\ LGuQ LAGL?`_%%3FS?|S^äed+Ⱦkլ)'ӢnQjϞ2J"W*ilYO~2p]hjZ?iƇ&jq/~4EEDnf_ͯd2fLe@n4jGbeR{`vXlk6T2ug$ S)6ZŅ&MDǎտ|~Xe,HsǏrQhŴVzRLcx=__ IDATŽmƵe2SD7W*g=]ZVz؛[ٽ};h: :J[UL cBiudde+QT*hݢ9?XA`4Y9|&.\T(<уᠬxLᅴ[j2m7.Y?jF/,kdu:u^(r:_`s3As!7l&DZS&1WF]X9eY ؃Bٳgyme/L;^EUN8q"cƌɩZleuS.vفPX7A FKLݷʣwoc(Ya#Fʧ1Gȝ(rj\\P*ؘ_Rg`lGƍߵŒ 10Zd'cM<_qH摓_Jrt۳1 ___~'ETa&?F17lWăYGmJ>}7p9V]m \iȽRAj\XPgc>J6PaO+xz8i:=zr:GnܸΝ;}v:tL 33m(jSFp,!, 296IsHZWg'7*%k„ 曕Ap7:~՛SPXWcN&\5g,WʌNd ͻu3g-\a`gd9qr$Tk I4Bb=88;v`8ys.``>hmFff̬A`kLGI[PS"bXg'vw+E*+?3҄MP1$8y3+Why-9:!nqѓFAx{LId b#5}ɹK-*o]=z=EZ4"2INi@cww hX`wDQW1?z~Cg^&TBr9rڌt)rg@ Fqh Y'NX)UW9ޑ/LQgMVc Z<7MJB2u(L. W%%:؟G!?C~>FrB Z`@7A%qRps1ucEPPP"n&.EG 26oAQ[mtBàfqTh}*h*LJv3: ֢3ؙ$ uCM ՝>ݺXK"Ш f~?|P{ɘD;Eqrqǿ:Z?t.*PJn]Td\0`@C6$xaD.'kLzTjM4.'r<\E䅅t6B= QQƨV5 鬦s==-]P! Ҹ{7y39׮Ҭ !+Y8A @sD{D&+5Jr 6pJadad o}AC[_s_`zYN""Ш|g|eSBMEZ->D6vaAj{Zri@s6ՖYKdf(w  f3@-PI٬L2l]BcP#I)$]ڥ盂YQV2T(qn쉳 20`.vJ.ӻw껹Qɓߴ(RGS:9ۿYgJ&957ASTd{\ph_Bʧ PNBBZ~5j%@N 7$]IBRL&޴hMsoo]@r1z)V>%IB7X^h(I @I7K v?.pGUh|{LB/Hm)TW@E~*0ƬLlccOK^C$'&p Ν9zG rLmcr|S^OOSWP߶bOq9Tkㆂ&?KHHJ+)h]VmmBf "Hvv6=6FU~LťrZCV3b"""֭a:ln޼{ xn5n?OӠ 䕬}"㶡_aFYWy~AI/aF/]g=Z-g۶m߿|499h23Mve2DBA76j5^^Ydr])jүF. r$ t\ٳD\5HCB9fw~bkHICbn$G2VwL& i4VXu6p|Hdr9=}H+M %DQy饗ؽ{1W1P66#445W~: .uKkS7Le;+Lmo2$0V&;(kfv̥_2hڪ/B BuH߶'VXm/> !!!6-z=~-|RbRON+j5w\tQY8'kpYNGŋ5K*Tdr9%hNsNnn.{!::C* ( \Bq9߈u^'7" qIۀ1 澃 @ rnFޝJ+a 329مY{Z@uxf̘A 19wL-TmqS[ڽ:ubĉ=g&Rٻw/-"ۊAWPsNb銄)1f00Tg)LF._|:Z鸥Thջ7igRpJȑt2N'2/w^9x FQb[H7矯AC{t6+BQNe|$dy BA˞=5 {ğ&)̿KI8+,x6 ' CQ:wv[pĭQ[~]gد]˩`xPTά  h1c6E֪b3ݘި:5jĸq0a~~Mܸq7xcǎ9siiHL&åiSƶj@U,= _9c^:%K,<$*j\vÆ};1[ꃭ 9pucMuj6TkL07|VWsnݚk崏5:m.)Wvti7տ[c| 0\8-)Аhӟ^#>Ӣ}fR.#TB6n܈V5гgO:W1,J!{ڵkmyR`-!ɸ7nc*0a4˔R8}T\Y׮4ɘ#QU ,t6梢?lwsscΝĮ^YFs!ajТm }a!qQQ\1X1T{x0~]vٳ9s<<7aÆ؇}$~L&+ "IGF:nndeeJrr2͚5۲V%g/mҥ3S݋HؿҁF6mx`|?b"#9Odzx.&-6=gz!ҥ ]t^l߾7r JuL&cɒ%׌Fc #-- NG˖-Ke0h׮9зo_ڶmKСO<`*;v,AAA̜9'O2qDڵk6lW\1_dET5"]/""FS믿صk*)S0a„1:F$]KKu~*ksZolj4 5tj^IIo)hXuqe}BBBD!T"Ш%bbbꫯjmڴ!,,0븹/nAG`z%tw+:u'O\-j6rf488ѼP|hҪUSs? @2H6.TшZ\xy*\ 4jx VUёA1qDz]/E"F5k̢i`CajKx$ NYW2 j]5lY˽N)Se=Ŵу=z$j5͍0֬YSY \+ e˖Jb͌7Z~0/k7s/e[SxoxZAa^2Io`OϞ=qppť*4`Fx7J .???BCCQFrMA};?g: S)U\k6o^˗Ƀj\}ղl| 4Z EE £Cb:ɂcq0raٹs'>,'ND$JeaK>!͂ش4ܑ*~tNh4K,zT][׻{[s|lْnݺ1bz)^rH\݅G |:uPTN"+m;=?GFf1xπ)E^^؜jDEE' !!!DDDS:ίfq短sc$vёw $I"++ Vn|cL&cǎu.r):+Vp~Æёztkn.v`4ZLpvkI+18FUXi~,^K&m.SֳY7MqFv2GFբjtr=j9"Шi233y衇yr\.W^L8!CbՆȹYCVEʖt;i6ߝ`+Txp|loҤI4J\dcdryT I2q㷑x1 &8;B ëyw¾$^N;|18be$q@ll,J@Pm@&$_|{9ִiSƎˤID=h=$rd+ҭ6J''M̀\DP`(*28yzǎCP3beԕc `{QF)qmu#V=:J"S(Gjf^")~ 6s;\vՆL"//vڑf  (}^ZTQ2{1tr{ MtIB&ݹ3fΤ!2Wk;8 1tY^sT*Yl\z%Kp s09O^j!`Yz=L.w| `X .EGs):=:12f xv^2UI]J׮]9{,:u  hחpDJpaf EE4҅Nfw[h2LGaX.Ǿ_{?Dsۮ]taȐ!;lf @$pxhla̹[lQGB"`xƏѡ/,$.*ѐ0Hĸ &phG+1ΫLV20tՋ.%|>\cth4̞=&NH޽EDc$8@g)Yǎ?(::=;j/C) 28Ņݻw#˝>#<:7źĸ\βǑ-F]oqn|e'G=[ xV QF&HNNww^ bcxhJPŋ,XK.;6ӧy%~SYS;`W)IFB r,Q(T4ib8rX \᭦G;?U+4"Gm~FmfF#ѳgOƌce  !쉼DvpyBEGGG^yvʻK~岏b׀lPwK#__?Ϸё˞ŋٳx٣3pvאĸ H܏dskO+|qv{h4̙C||}Gʕ(F &r3/ĸ h ,{(j' f>KBJe4dJ"A! JR9TŬB੧[n曤WiݺuyTɣ㩧1BxtTM .m'!n7ZSJw~,J;R`Od۝P(L>1cš5kx'jzABLF&T|Xj?8tP(zmWvti7[ÿMhb _~zYt)Ç  %ؑf4*Tdٲe/p)L&BDd:4YY߸ѢG&+S+Vpj с?HKcZviFv# ûW) T&QPPESYf? vd\޹q_~I':Ǐ;7n(wl6mXn]/{ nu\ܦG""GG"Z. AIbؽn*n>f;++mے@jj*]vJ@P!\ 'hTP{\ŋgcETM'ǓOILȲeYutjiOa _Nd8F&Sݲ7in$Jg;RPWHFCRXXH^^CBCCTA  TFQBF'ŻKQQq"Ш},zt3.W<:t\R$!niI[c| GmE *|T*<< z$ AFݥxtdŘ7STaq7 *lRVe֬Y\VѣjhZt:rU!2Y=q"֯`WP1**ÇȪGGIj5~#Fy:O}5 y\֑}☒~P(FA /*ѫNƄΔ#IGO>={1W-[DFC 'hS8n8vʂ D[{xtl?.]FrwF;';A/QQQܸqVͳz4;RS*iӆ<+{a4hIvKl.\Ӻ}(m:NĵQۻS (ɝevXnOϞ=qppE Յ4;R^Ff͚1gΜvtt3ggI?5$hF"##gϞ3'48D!ؑd4HQ`E\rp['$'9Gu$>aգcf=C-ܑ!SY9sgرkJwYk0puu5A\\+FΝԩSd( [0m47nԩS_ .=$mF~%#nh7p\[}-bdr9%hLKӦ5W燯/ӦM矧y52}Fa V !J^@P]TGC>3s@bb"#GDV3pfΜɨQիsaʕV)IFSqb",ğH=e Gm;Ma` ODǞdm]}xqnҤ}$8^-Y1r$'/G[kYf cЫW/SCP?(Ө@ש+W2h :wL=zt N hX@`G* \իW?7`ڵpa~.\N`0p)bbbxV]1IFBܯ^8By!g@u5L.w| Ok'/ܯ ($K\&ꩧ1<:"##y뭷֭}ٳlْ`ڴiԩSy=LSѶF###'xhE @`G*/͵K.0FO>nݺ1m4;ƤIxgXb9nŋ9u9998* Iĸd݌8G)E(ꦓLoF@p9b7l1+W˃>H߾}i޼9Z=ztR͛ٳYf sСC8:: M=HɌFE|4ӧOM.I h@`G>;,L j$Ibƌ<[ 77.]RҥKr>q68kl.ioZ.[%qCh(3gn yt|\vwy_~?7G޽y뭷ʔk:ڶmˌ3xg{}֔%2q Ml_d ׮]cРAt:J%X@P T6!t:a$Ix c<Ӈ+WƄ XnnnnE#INJ]#̟Aj2Aڹ)]a5 =A蘶i/0/$ 328b+FdI6l?m^{͚5tWWWf͚ŸqXvmLGhh(k֬)uܹsYnׯ_'(({Ƚ r-[p~7v; #29'l%xhÆrYr%_|7oDҾ}{nZKdǨm۶w߱jժRc6n|@JJ s_g׮]oĉq .e˖ 4Xz!HHbIѹsglRMsg`-xwDԩe<:$IbŬY @޽8p ?: e@ 34'hD~LϓO>ITT;vLN9~85ܹs=z~\󿒝~ܥu(N}K̚5Ytyt?ǏөS'ի~)p,r  11Tzj54su4i [W68~???{,O hHD 1@`G* BX(1ԩS\~ݻw>}:F駟f˖-9p~fϞ _+\95׏Xu.vko@8>᨝.&;3d",Zd8bKwJe׿Jyt2&Me扌dɥ2/_OO2>LN(ugp^`gϞK$:Tk "He5꫼BX 4+5k,mG}d޽;/2[2tP{1bbΐt\;}.hn$hwxoS1wI+-_|6Txt(j$ʧ~?Oz*E1ѣGŒ6mVb۶mՋiӦLe2Ljoe]66 y'fz#F $$%KЧOطo͚5gŊ &̘CN>[G^sd [7`<-FP:&1tǩ5qN>CEV9W_]vf@&MDPP׿x#7owk׮[;!!֭[ *@%K T"He3BX}\r-Zlu*6ӛ={v)3;qf4rptyۡѤU@x.%xIr9nED,[Ƒen{tLWQR;xyy1bRRRHJJgϞ,\Iسg;wd̘1\x777,XoM-z+j^6ms=w"T"HUP$|ݻwgbcc2~_̵wRZwBY_Ppy`|7ϞeYتG4>\rsm6~ڶmg(ff{Qzdɽ w 0c0&\AGr@H;~$$!я{^q"ے-ٲzە3]dv}^2x}Y | 3gDeʱON IDATe%x ̙3Gz/B}):s8qׯGff&SO!)) O=ToG EWƪ #GbũS~.Q)..,[ Ƽy0vXWh4˗cݺuR>* 5ۮs5jmHTUpOQWTe[X6Qo'OŋQPPEᣏ>BEE^x8p8v3g^{+k2pݿGr^0p@w$ t NHonzA@axY?(?7|GƤIc <׮]}݇ .HǗA"<<ˎQR} wx !Y#mC0W#~ |{t11'0tbܞ/..#< .nҥK[e3gx 7CŦM(_ǿs_o#h[ssZ؈Hl6iR%%%7nJ=6$tfZ]FH/>@BPt:QXX{8Xvm=#8<RSSOb˖-tDFFl$)>>O/fVAReDJ CƽLAF{Yi)څK@e4'\(9tWܹ8p46RSSq!K.МJpfZѣG#??:`YbڴiP%y)1uyhVk1a6{x 4EYaꫯ"-- qqqo`s=D7мp{Eawz) 2e PWWx_JG0`LuE,]W}XN)3㏸oN{1I@Cq=Xxl܈Km4Qy3/]HJº'@MXm6L>J-{jEnn.JKK;b`ҥHKKÜ9spv_Bp0 =R*0LFzz:**!Aҋ:3 >3f෿-N8ĉ9s`РA /OރeYQK/A7@͟ũoV4n"e4=I#ߣ?7lhޣcѰXt:Wƶmk׮… T*1vNg/(;{g5 ~bV?JE!ZAH/*ܵ N5"#/05țP\(Ұ}v  X`fΜ+VaO>СC4󟑛,L>uuu҂`bi,GѹCֈҸC!0r>x7nǵ pnjVSv8Bz 7utDq-77i7'gEzz:f0Xx1^z%y͕v\~Gݦ@իW:ӰaC1cj6n.܃ : bp;1ddnXv ^~})&: |?ɓ^kt @1W^y%KRRRfB\~8qv;~a<裈òeP[<%ѣx?Į/g0K8smhT:C?Íאѿj̝; OH Bz}ԩcR^qq& ɭ HNNFCCCO:tӧOʕ+`bٲez*7nV^YfATJc5 1^}U[S8Bܼ %7e9b' %{>ҧe^#ėۣCNܣcO f3 C /SOI#~YYYmY#W])A\KQWy3JtS BⲖb΢QYYJ 2md#tfZAH/舆|nnne @qq1ӥGLƺ@sFgy>|8vލaC2q*l@u}IET$s[M ooc߯+Wڣ`0h`UUUN'-._n>'N6O?-We Z. PZ%rԑ7.o4g0 ҲќlBTz@E#GÉNڅ \(n*W8{,qdee!770 V\jn+**?$222PZZj<شi?JObY_~%Ȑ Sܼd8N?,KU[=:sV+ wBUf MMq7cOy0dBd͚V6P:v؁ X|9A &|;|<#19*[f ?; ˂8 ds"p(jOqe6T?NfR:bDP/T\r%V^2 4?L+6Vf\ބ|]dGrP(QDHgܣCsVZxL<g]w݅W_}yyy7o*)ALN۾ᩁM]$8G?HMMܹsC _5͛S&$T)Bz{vMϰ,X .UzS-Bd ]w>RSSQWW:K;] q#C7tiO/_ƒ%KPTTظuEEPGD-[߭q_wNG4$̙ %%v-WB{x!?QO=h}yΝ;os˲={6֮]SI#?#Ucﺇ D `cBoBЀ͛6ljjjPQQaÆ7OH05OR6*pJ\ m0^AqFl{)4OyK GH( KI_ĸ_DՅ 8W^註v yy,ł+W";;0 Xln;Y }~#b@oLAcCq yPq^Z s&$Ј!=ɓQr:g*^lyZ݌;[ !.+\N#ȜBa[:H, zqtt4v;l6&xESIg>VfB%;w"}4HXCT)1iNmuN};P^|Hɞ.'!!;{t|Cvmc9iiXc"dlkcv|~7`Xܷd()-!= BzׯC4xRw#P)Vȶp8 c1 = ZJ`#J w>ACR GD= i=:.m[C=:NFÍ^aJz6 ?ȷPaXĦIA!~D!d3@Ѹ=ƩTtn\يCnt)OTa!/xSJ{H|òeڨ' eðH0/e 4SKu5cȲxIč xo_6X1jZพ[;BHyUnfٚ>@˂ ?d+8Z)ouXX~6\<7ؚ*1lk`Y_@F@VAV#_4zT؇>/f;;en1 pט\hPh p eʨp,}/Fcɂ߾\W]ZTuf´"DX: 4͆WmnG1gdL5 Ztyd&aa3'OwQkXk\/q{<&*>#$dkǖnM=Z  :?/W2W}8eZp5.(`PD}p=qx?,8X/7@M]Z A~,Yh9x4>[Y ϻGctY|ft?+WP1M*48)t [0c2ɏ یΜ 3񳧗 !K(d%_?LMSGRr묛xo+`lފ4)l?>1gs`e'͆¡#sfb;Xpt>ZQ0tx4]Ї\.oWT",:#hӣ(sh}زXdXKǒ[oKAZ+~[1v4簋>Y *U w?DⲟK\p1. J+0Q.ZPQ(9(WpES|:''9i4E$?,T}N_F?**1<p8 3"#~0ШT8@sϥ?r8%}m> ʙafލP]TryW4cREM|k!xǤ|y;:5^^8)Ũ+hẎciiB{qx̻{tV9>[ُ% ]h͎~[xLZM/\nsHTkXEK@vv&ݑ=N 9tfʄc|vfS՞^R\tFt;+hHh_,mHsMƤܑcgq?(4Q@Afy͚iT"v2 k2ړh/s$8cl߿ZқځO-1 sDdnj?$鹖cnjL=ѹ2%뭻P@ko@#{FIXQ[ ݎn$/Rr~jdx&3wFrΩk0a1sD?(yR0v$JegkA,6k;GP*Xi&oR 4DeM-V? =ϟsSˢzC ۣ  J/^-¡W }WRbv(<0c6- IDATt˸xF{  [a7]ըxGq+.َ hx}G*94v^6 tNiE%j8wDED{kwQ^B-pe5 ﻇ=bxPTlvKwmv?x%iiBB0xpt8{ 4)SYi)3. $EGF`δ'NvVM}[ƴywOA_c~,M@N;eꡙwbBBcyn iwKS"F>%"{/ݾXx 7*X@}st{d&4S*;.s)m'+\ZF 쏄~tQGťe(,!ݧM!0~pD4ﭱI?|XeYLCSQ !26W*- ah-x 8;Z?rvNFzKC 4w 9 (A{k$ 4ŊB3mOҶ.KGǒ`X{1B&/ЙhƘ/aڳ+=97 ]F(׮2?rPKDG-h@fjtdv&@sdu]tX? iޥðH,6vyـԩWoC"usO#E|cXvt?$Q??5Xɿt Ag!p9zfrWSJM&>q %{Q_LBA|pe,^?(]`wA퇉AMІC40_CEݭcUX*`OhJ?E􇽩C"h+n}ACǩq(>_g?oaXM7q|m~S sPkUVZK'εp\PpK(+[AP[`a9YǖʚZ43\UtTǒ҆Tw9w;?8Tͣc^/ skH\."O~3WwC vK5XN B٥57U@AGJMm?]xsQ?Wғ88].8N,@ZR_H: ?p8(U&JE.~b"n4d\)уiA[ʫ[67LIcIHW5*\}~[:TY rjj|RMg=Xa)GeTPQVss 6v?ǓBQF`FLT >nʫ)!h[p\q~.LMhVǒxLaPum/N1ˆqه{fvksXNף(:?&xQ5ŏKjBhW%Z 1h e`M> ;#2*#Ԇ4ִ=H6Q)wIkjJ!\ .ß0FjM{%-&mHu?(U.~l^sPʾxyƐů#C@Îp:]}qC W6e0d4#o35z<&)S*7rKDHϠ Vf>wD1mD#w ?רJ̺ktܙ ?*p|L\\Fc;"@~Qc`9}pP\+33Bb(H|\w亭 %A,.O`0b !{B4Z,pAMKއ,!(7@c.ڦ)4/V)x{\2BH_IJ Y}(U0Q8eyy W/([=]b$xѯp

k\~$d̨˟'6Ĺ n|7?ff`2"B/Yɒۗa5bD3% JO=4m!tFŠ">r tX~ò 31u\.F+( !gpVgf7y|hbx0Xt FG8 20333`QQSzNk8CDQQi5!A "°|,NҊ*yJaƨA9w!ALV!5!9!!  LbB!JoK!B9 4!B!>G!B!( B!B!S>ra8w, XRKNNƐ!CBH:w8T*a00p@ 8:#~B鶺:lݺIII0`2224`qa:u VRRP(C"##ra޽˃jEvv6ƌغ:l۶ uuuPTPՈwI 7oDBBjg`Ϟ=PPAN{ ӧq466">>#G0.]޽{CV#&&cǎEJJKOzKyfTUUeY1:ؿ?֬YR hc"66Eq!|m&tJcƌѣ{'& =z׿{e$ǭV+ߏիWCT@fffo.\ݻwd2!)) cǎ{ 9fĂ |XZH( dž PQQB#G .:"""wq^3gΠUe,N<) 'geeql00qDjTUU!!!% LΝ޽{a64C^9T*X,44q. ÇDŽ :xKAKKDBaa!mۆvh4.q`qn݊ZwqG;e`V0`Y>*1 DhV, lقbQtt8Ѩ@ؽ{:2r:,˂yT*X,w 1Xz5\40 Bn@˅ݻw… l>|8&MjjΝ;}PRVV-[ 5jO z yX,Xv-n޼H1g  t2q>@oc̙_?`ҤIIe)+ ϛxL05D* V,X ʏEPCHwX,|嗨Djjς o r04yOyXVutyP(R0 v{o,",,Lh!iϟǴiqT8.W=m @CVCӁyq0T9w;^PO0^h˲yGv{eYFhZ>Yf GN[tXKcٳ2e 8ku lg`jcYj( m]\hK\qXJfyalCY\o+HyX Ʊ$xQAn[a"_4h,˿OhN1˲A8 ttJ4q%^L 'y'WL ΥgCcYiӑ1\n}f0OQQ8W~i{mB@HXQ:[Uо,eP8OQyRvb,YA@SSfԠ ]/oZ (Η- arT*۬CݎxMf "])| R!V,".5\.4] V4RhX,6H0q:n=c⹒OaoF\.WI'o4 -Գ,.h| 5[x7Ln?=y C:4X$ӝauwFTNXVLn ;FK:pjR3Li"q!VmJ/kkꔷ)?y-`iɯaIV]܎ x;k49aL7n}G\p:*eqk7h4VeYay v&O*:9Pheb_JVjno -s)<"vw? 4H+)1%|1g/Z(OBʃ1)$"Nyr樋{s Ft_TW hSUϑO (fyQQMoM>J/Yup:%!-h!9q0yfVngdyEVeY /3um{`fv#dbf)d-0@#{ŊCHT/f`ϝg䙖pH3@y#K6!N;uUzϾ;$AAl6i)!/S2h!X NB!h3566zmgC['6ZnK~ãfYz,f`0`0tnZ0m6M:GJ* :NnۥTr` v8N3\.466n5Hs'Dxxk"z ҼGqT*Fm:IGPĹMMMRb0ڬXJ[0p8\*Ji8 ] QYYN'bbbFRZFDDXEuuu0xI"_\.V+z=88aaaRt:[59) 4lhllFŎ0iĹuh 4BGrIAX<_TPx6`6#-ϡxr]?wvmc0\hpQ#** 񈏏GBBz}|0ϙ&AL*fSzJRj4=\kvBZ`pt믿VE||<0iz8 0}h8rQQN8J \ik4h4n>_xN8s v܉pqk<<#"" .Dii)N8TUUСCHKKԩS}X }‘#GXihH IDAT0(//g_^r7a$$$ ..qqq]t:}%ϥ^?Wo>}O~?v-3# >h8^0x`Þ={4W8HMMEvvCiZ̛7uuuP[[:?~={OzGM:!L&DGGcܹ`F \ҷy6ӑ={@"!!Aׯh|f_eY,_n®]p%FA||]Z{P[[ ]t:|qNϭq,N8m qHW_OH_cZCp\0Lr f̘=4NgPvX455!66< v;AbAYY]m{!X,tj9KHHhu\wSFF֫<,r|>p8sBb泮 LxG]]㑜춷/́F04˳׉|8GlX%/MIgx#nSy8ŭ ѣ6lX~!ވ^SSpdt'&Ԅ#tud h8q'jmV_#?wԩSHKKms0 Q^^Or:ظq#qv̄=HMM]N'N:*;QWW'z|xnqGSB爆lt缳dºue 4s0 `XVjR=9w. wFfffP6=/4p8hhh@]]HϴZm3L&Y˖-CzzzwM9dFjϕ+WPVV|>, l6ɡacc#V^+VdL_z=PSS#]nʕ+8q^BMZPyB/~ & 4BFhjEmm-AN6urQ__7xç'od2yDFFeY0 Frg{JKKk.g?3 F*m ˲jEcc#X`$;t\8z(\.^)?!!8CTTxlzU*Z-Z[Nزe ƏI&:tB!<<\]l6չqq444WUPy~7$.(tV׿۝K͆cԩ0aN!V > 1˗_eF\*Au:uZZZ͠%"44o&n޼d477;wĜ9s0{lCJ"tE`ɒ%@C/X`nP͎;֯_D9sNNN QQso\.GRRu'8ϟ@=gHjj*Z[[accNoi?z(}Y3lB$..HLLDdd$iiix饗yxxwŎ;p9...=zD\\ ٳs=X ''aaa]Hw 7F3h@DD"""4d2B(v>́.]+oʌ30c  11@PPP'&>|r=z4sFUUp-rrD"XXX@Px뭷0y(EGGcʔ)ؽ{7! mBs V(HMMomҥKP(pA.cذa]5&9 kkkf8qn݂H$b BA @T"%%xL=1'tF&,&M¤I Jqiddd@"@& BL MW)'''@pp0HKKCvv6R)r93T*e_*?pssC\\\ϋD"<|>3Qnn.Ν;( x{{_簢mmm8t{1DDD"t0w\璒pH$HRfd3ø_t x뭷L0H$۷Q[[">> PAtDll,bcceeee(**BAA3{mKK x 2c֬Y>/(0@IoFL)ˑ#˙Fe2֮]Kx-VVV2 Acc#*++DQ>lllLi7hxC7( g޳!C0FѨSB!DbbccuNNND"yp8^㏃ 99za???X[[}16B!Z( 8JXv-`kk0 rJlݺp[lA||=۶mChh(Cn1Ch\6mڄXZZbʕF޴8H$PT駟P\\W_}K.ENN6ϟf>}Yֆ={/-[ ۷ogܶm-Zĺeʔ)#!pMm ! <{. {Ł0p,Çꫯ?Ɲ;w{nxzz~CYDw}K.5N&Ʊ@ M0n8B< ˗=dmmEa̺C(bʔ)̺ xyy1[b>o pssH$BBBwOcݺu\4!_ HHVngPa޼y =z4-|PAzpY`Ν鉯wX9rb7G+Wľ}@XXbE׭X[lAVVr9"##;_GFFv؁yn#GԄE<~S2ľH3D,XzJ܈%`Bc@=%Kc4vs3i$xzzb޽(//GRRcO?49oFW|||?+>W^_~iM{>>>ɓ'  * ŋL;Fb>( XH8V(HHH5Cn4ǶmۆSL111dɒn?SR<Xp!BBB 9 11s9sp%oꫯxWqao1/.'%̙3?^OwށUHsQ㸸 K.Ess3<8`'[|96l؀B|nƍxZ܌s~|C'u ~:݋lcmyҞM<$$ؽ{󙻻;\]]p11gh>.U_J+V;wp!T*bXXX:|ヨ(㩧uرcx????oȰ 3}SRRPVV@nTb(++C,--!yLcժUظq#N RO?s5TLQA8rr( ( b~?D`_ؾ};ndD볲:}Oee%ѣzC1xŊcG:tI}'w^G}p1Cnvz>ذa^|E :VVVXpa wLi_JT69?{,o۶ k֬MkXeFyQ*rֺJ`fHW@=(%9Wڵk!H͛7#&&!ՠ_^c#J4LRT*5a$alذAa2` ޝs*PP\bDC*a}kmť8|,R/l嫹;}Nݿx|zuE}C#gw ml>?OGdކޟ2|ӟ樈{T1?ǃ5]6tGvOsP_ u؛00`?&C&nEzV$=B|X(*j0̂T&c{m*q-LVcC,,Lh6gLb!+X SdaA|wHL;UhmAT2Ex_neIc5WR/PR %teSWs. 4OƆϿetm.o0 D(0azܭ6I;GqY9k]V`Y 7I|oҳrPU,F!seK:t "&Xˮ0Y,T韨G{iW3RDv^Lâ򘉸8TXs}Qc,ֺة0Qֺ ?D@aq)ۡ6/#Fj!Iaok /OLGxhk|%w63ONIc±)\Y? P(8v63 .)G8p29hbjq)'Y^h.53wY~n|5mށ[Ϫ|,<,%$_@V^>*k J!CpfN.2}'=oqf$Ciy|> 1=NC d a~rk^!eFEpCLäPݺ Ƚsb;4~"ԈG tqL&GYU52FQ#'cmKc8|G1_kw~4%hikx\ 6qD%&4v&8{ fԮ{2ڼ܄XXPJ+i.4676 Fo`PZ4|@sk+.#7%w5}X ʚ:<|b&w&|Ĭ+Gp t6Pne6m݅&f\̾K953&w8|F>MS3y!z+pu\+ǦL; O$R)6㍨qcX._Cԉن~ygjqJZZpfF bQMoEcpq]̷ӞӧT(J.2l~g]eӨF]Lhr#L6.\3'@6|c$ :zIf%M--?t8hI> D½&p8.쳯{X`YkojP{ tH7̩T*|r {AGBW V0 q\Zߴ^C. DCI)~Iv^d29Ȱ  dmӥԌD4a4Av `6cMG{І 78w&CP2Ww{ +CSĹ|:DbG-شm&&^rS9z|)✽|-Lgǡc]^][Y }'Pt m"~,HL.֜pJ_~l|'B"MW/mkC]T*v66fd4F0a4yFl31tW}|:kxxdok7]{Q8R.e)Xji6v bN;e{MMqpurw3d29 K1*dh1Μ2OLRĞ_qNx\Z}4Zh>O`F:9'S/Y|fVohn߿ ?u)ZNR.ebVdvIX ?Z4p!8i?uC`K ĥ5:il9T9=\b&e* nިIu7I̝gvniOSѾګHYL$imRO_NZinmc6.:jU5 c  P|b)X{ulW n2Y3}__~S}q OK8zJ%nJ#}\}9s1IvK {tzb+IDATxTSg/_1w6T:u%?{p8Ȑ La.od6>K$-,b=.L3|]ߚB cV|/F1IJ/]4 /E]6k-\ʥLƺL;{eM!Y5EV&o'󜦈ӕS&w^ǧo).>H7^f +tr'o[󜮂{/+./`ynS=A'0S.e2}җTWaӻOvyXD"w>|uǴn6v fHJsڸDļ݅,N:V(~l뮺!P(YC9ԡbZ.YÆN>*_=xD!$uyaJj7LBd5_. պYO:)N!l_oӪsř?b}WIZ; g.O=~ r 􉽿Wgp8.䢴hߗ" 8q4Y1f0"NONr9}eeiݼ%f`ތhV: nҊ>)%ގ}UgAnRkU8ڗcco+&lGI4D"q34ПIDbI{rbgƺκlzJzTC$͕yne+a2VXNsS"FE(T*:zu߄!@ECqU̷Ӟfx[.}mށ6P;SY[dyXfLA4I15#3<S'#"D Cxa\IkҮdcƤq=ݾﮮ-Kgv-]>Ҫ+&:k+K/c=hX3'A0zT'V,쳣-;$Yoj2Ɇ5wҽD$05ݹ\v5˜[۰fӽCs<gN7;P7q#* VҊ*`~}p,軀.q4[w V`Em**ܩw3 VkWr0s^}~_T*lmokkv gh9Sq5_ #q4S[YɈdȼһ<Bg'@r YԪCTLw\\knT Bb_E;ASꨜx\81*D91S;Mdr^ ;J+*7uwqfa-cU J!ɕ7sjI Rnj¹N1)O$!l"> 8q \Q8.0"NW\<=0k$lY=&Ĺt,5ӁYLx\,ÜDb|}Agusq¤1uGϜgw 3VVr[vö}CU*`#hjiJLy-Du]=; XVfP?_V\[QQ]{M:OcFܟM,OʺOlC{`vϽMG)ز0~2\Fu>CIѦz5IkM6g4x G+́5uMvuy@&d%"Ng43j} Mu %Q*/Xhh q3u{"zJpužjl4Emq4OgH7nQ!C1]0#X2o666yQyu >߲/xVݼw;%s ou:w#53M-*lR25rQ'/ 1t)KBW $`Fhm?ny2U\7?6p8X: <2,3'GZf3\빌,\O7Wp9cNĞB5 F е>MDLpܸ ~'m"13;4}/JJa yu<9}ZGNKgu1t:h hLfRʚ: ߡ?h8ړ=s%ߍL.Gyu 2]Gn-j\1fD(3j&#~gfc:-*A *8P,~M9ol.__)=ݾ/!A @zn8'* w*pl*kowc lLk2bhcQ|g.fP7ؾھ/,} XZXU˱\z2JxvvXl16mcO־AȰ,~"V@ԸHP=yp8?3?mm1i(8M ']TTW>ź嶟97ZE/0;j2k]z:;ƒ'gf1"q!/ O2`GtM$2lc)w"YvwqO/0Xx4&7 y!\MR z[1`Ecp,c`~E]$R@Jv\0vq竨ol'0O/`MHNOIJ*ynvG_,-j|U>q,`kc .[k Ɠӧ×]N?/n5=?/X[Yb=CbuScB<31"j\yrbX;[L= _xqHV'<o/XZt>R]Ϛˁ%<<&ZoGfӌhm=p߶~9O5ѿ6%e̺C4+-Dl.E}c# A4&N_B֍TC&8&vh|,~Yn M\.߼6\n-|yx| ccqd2*1"15rI.f_ñi‚0f ,bsw9 OICYU5qbg/_AV^>ʫk a`x{"|XFD*, +.aFTag_{ ]a:|^o~S7p$J*1Ww3{fN,bڍ}khpI `16^NھU9eaaJ4̘L.w#+/Yr1;j2׮5%b#niNX|qa?ljf/^:Wq$S'ba&j;L-U3f O1U%@p #Tu]=ݹ}7W?KI-eu*_Ϧ+!0[֝rdp8@܌hJ2 n8<6˯hol*[v"8S&",xnj$Nyu .|ftacӻ-^~v <U?l}4 aC1=LK5+0FG4(xHDzr KPp΃0a(  /J:1{ Vx &purO*<V,xC|ѓL5v^ f|$Fd3܌󗳐|1uPOy22D7G!_':G`g oOx {;[XYdx\B*RD6kݪjܩDeylm1kdL0L]C#?̚{EC`g1#B@$ݣAL o!F!rob]!>Y&cvռ+R9אv e =DƌI`gccpHľI(<Dž3\++=S*KhjiAEMʫjpIk9ƍpn79$1 h%HFNMk0u8 x6V6$LJBf  %Ƈ!z|$݅tE(Ayu j HYE !race!t>^å\\+Ⲋngb&% k+KSDG@P(!JM!6-GYU5ʫk X6ԡүyB!BQA!B;J4!B!zG!B!D( B!%B!DB!whB!B B!BQA!B;J4!B!zG!B!D( B!%B!DB!whB!B R)A:B!BoE׶$~CF.дAB!GtEÈJSpX]u7I2?Y(c u Bzz0wwwdee: B! %FR^t@*iu Y)0{#Ep!BDښːqMTJ*p h-E-X'~{r/|LS &~SO!h_B!TJ[K#oG)G qBnN?֭[#BBByf湏?qqq/~;@uu5/^ 777T*u7.www|GСC;|@p$&&2_¼yXױj*?&L@pp0yH$ӟwbw}[b֬Y{y̞=Y éSwmVV|}}~D!B(00B|q86($ʘl5uxRD\\"##QYY-[ॗ^ 8~8* ;wĒ%KO=B!۷f>?99ϟGQQQ Ezz:ꫯbѢEhjj,[ 'ODmm-;lٲNOjj*RRRpuܽ{,ί ؿ?s!** ϟ\.ǽ{P[[L( ֢'NvugϞŜ9s?`n!BDZK 8*up(5ݪP5(--ņ `eecŊغu+ƍÞ=ϝ;\"337n /c[p8;!!xX|9B!^ @}5 ::v$%%cڴinu`ii >{ b=S/DGGcr Μ9Hv:Gg={0cƌNB!&kC{N)etyy9<==aaag%KO?aժU駟xbp\@&aȑcJY݄<<<: m۶mQQQ \~)֬Ym۶aٲe: ggg汍 ZZZGqdTTT ::8s }k?3̝;&L !B}h#X.o]T%;uxdL^^^k/{v\~\ Z%7oĚ5kpÙ;`޼yXz5rrro>\|mLwqZݻwk֬AYY6n܈۷ocF.=wx嗱~z&B!N,8(-|Bp-~{9#;fԨQTK.a=XZ ...3f Mѡx R|v-App0ĉq5VVVXh-[Ç3}P) qM{za„ HKKCAA&O} N:={PA!hk!G?[ tVɳƓYvsp#==B ظq#Nzݒ%Kf}PWW`899aɒ%OҥK1zhL6 Ǐgm+WDNN/_gv8x pttĦMpttdF1c@.BIIIؾ};z!BJ 1q[-NYǕ7+mR߷A@i9Gppf{~~~SǮ_B!;LW4\|^An 4IAeT*駟b…dB! `tEÈꫲpԟu΍aiQQ#Fpss7=@SD!BL0%FTPE'Ppp w4vB!DB!wt!B!D( B!%B!DB!whB!B B!BQA!B;J4!B!zG!B!D( B!%B!DB!whB!B B!B4uB!~q[sXIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-l3ha-bfd-failover.svg0000644000175000017500000052110000000000000027253 0ustar00coreycorey00000000000000 image/svg+xml Compute Node A Interface 2   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 BFD monitoring BFD monitoring BFD monitoring BFD monitoring BFD monitoring overlay network R1 (p 2) R1 (prio 1) R2 (p 1) R2 (prio 2) VM1 VM2 VM3 VM4 VM5 VM6 ARP/gARP for FIPs/r.IP ARP/gARP for FIPs/r.IP   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-l3ha-bfd.png0000644000175000017500000027145400000000000025451 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw|U׹;!{ $@Q(JmkEkk[OmڭbZa;@ I.3~>xrsɹyv!B!ęQ5%B!B|H!B!8$B!Bqh!B!8 4B!gB!B3N !B!'B!⌓@C!BqI!B!8$B!Bqh!B!8 4B!gB!B3N !B!'B!⌓@C!BqI!B!8$B!Bqh!B!8 4B!gB!B3N !B!'B!ӌtB!VWWǺu눊nh4tnHB&B .$//fƏGHb(&B6hfTUUtQNQfQXX=74!B Պ^GOGGHIhQGnhB!j"//NWWvj5#]D1JI!F !`ȑ#hZt::^QLhQ( MӍtB!Dp\}uur^ڑ.B!Z,\g۱l@#=r뙓:3jt:q:nOPy\z=6lSb$r# 4D3pLcc#L&m ƒH\\0V1M6$&&siX!Mİ{7ltA=Vm\GG'NuZLj =='qK(ˬV+466tQ$}4y˅V⭵k(''}Dzzz8|0 ^GFFMQ0͠VV妣vJ"##\"##g䣊 .4)vnePH 1zv^=v G_ 4ЬV+Rs"D=z N##+^Vaz7XPeNǢ9?)%ikIUU5553qđ8=!Y|רwI76dgg4̥cbKGclMrr^ C 9Baa!NFi8W_ტ# A^=6Jw6?Meh4N'ر[!qE"( 4İyJ<@n>,/d"hh&>u:::z>MJJ V:;;9|0III( ܸ&R (QF8_}.7{Οw/haL&cѢEAT1{bA(<}"zƚ5kh4\ve^*Cd)1l׿V]_]tw,ZUs"ħb:;;}g/z=jA4XVCǓGEE -^H]Gǩ OTrr&Erho)S`6b}9v%$Rs"ƾ 9HDGGKjjW@e%{˨hi.7#>HnJ324DPh4!p60elݎNʛFAף( . ˅f 8F!55&a,KbcQV+3g}nJ%>%$cơA fPsʕ>D5eee=zbƌ{;].6]b}}T,|rssyanO0~6chMWū騭V%%%cnzlv#"I4舠Ikk+X,kŤIȐUBAǚ_ rD jrSXX#<·m͛7<cL'ξmm2,N'ܿV𚤯OSSvDN< 2X`ൽliahO.\r$nթyAFwW^s}.:ED|{[y=5OM(1TΛq14Q[[KOO`פo?ڱ JbPPP@SSSsBB!Fvөռ t^{l~_կ~u)8 4YfLV{6w<&Ɗ*L:mXv?jC~ gj2С(`׿力rFdt:N={VVvC<bv8Ǿ}uuu<r-šRw,( 4YvyFBx=44Fq\߿_nhbnvܸqt:yL&|Klƙ ɺur)ŧ o+Κ:@~RVR[Za vsܹ4w.vWli];vP]]GTIط5 =H"L3g<e{^C| u5,?233'lB`wxQaf-}ͧ;ozFC|| " qGD%p[jT7Uv~|9t:1h47JKiC}fA^c=+bk:5ê^gÆ tJjmmmz VCvv6(ٻϙѿ 1 *N<+tqO|*&6s="%!D_|1+zﯥ@NDwCL\__4!!vd4rrr$رczKJJ"//Si6܎SCH,(pB P}8>K w?eW}u8f3HSK!l4}o ̟?ZZZZ׾oqV)>=$n VWW'741*9O}}=/`餥ţcZ}=øMN'cAO4fO7~u9k4\'Of޼y455\!?Tt}vy%> 4İI 4&F#iooz̙3F⩏7[oW#hL1kϩ(x@}MLAƚdn |Gҹ\. !NFCyZ-6mX&6L pjN&F;}gHv.@(z"(Phe g ed9{juR__Occ#;v`ɒ%y5 d~+qz$.h 2JYhBޟn̥3fj؜N~ Fǰnװ9_rig\Β N7vK.'u\4_%lN'1VLA|:M| ,5aw8=7/3.t]")܌F' 1D%@ostv(=42Hs m=&kNN_Gxj5]ZCEkgni1{~:Dju@zL, Rh" -=z0Z}!{!12+W~f̘yǎgBֲw^4 V邭gz7(x`{PsW_,}k&]Jbb"Yf1a9n4>y]0/3('Q2bI͉kf,}3j1@Me`"H=lo쪮b0 59UP \ ޽|+455*! 7r /P\\̂ <1B &}4İjgٳgs6lcb؅h_XV^}U*^㉉Rg+'xG~~St8kvRP`}?_۷rYFD%+!_\陴yM|p?yY=qo4򃍯۾ój&%$+09!u՜lo "KY53Įힿm7ɓwsY4b .:[`_̿쥮""y+X4>ɳG =Qy+H1F/t^8~4`%I'NbtſfU"?bu9ܩLt*,&ӳFgŊ۷Mtt'@BngpNGiipI| H)1B5ޚF~~İ1쬮m6{AW^9si/=ͫ=p](ϢqhTjހ$#6N@G%W˪sO *BCw Ѡz˖E9ƪ'}併7QnL6;_.7^ef_60UEQ2cGj(lÃ{={dDY6)um5NƠ9!CNa3C~6^;%ǂjlN<ɾ}GwɅNvz?<<khoZͬ >?k.谚z\VsGwY@S{XrQns9}/ĨףRuWsx*imD{+.YQCȎcnxOY[-fzޡPLFF{hLO{^7ploƑztJ@C$#rN}RD$lݺqq]\&qNw&S>SnlxY_JF>ȹٓx!ev ܸ`1ϯ:?Zy o?:` _ڭ9k#_۷TINu~x{LV-&.6sCh Gss3ǏgYL1lƫbDSsrUWynfGmUTd%]}_osWWFtNMG!x-LOI喅p)$&&rAl6h4osCm=>Pw,8 d HH 2|=6%볃m599lVBS6"3lnB3XŒL \cbD(&73mUfˇ77ݮ%MH =;O6Fql͚9YIΪGII 8f3q<@1])ҟOU,ADyD?LQoڔL>4ЏYid]Ͻ@?cMf\9m:q'U쪮/𘞜/\_v<+nn-'p #Bud4Z-]]]a"99ݎZ~K 1"v&73ddP RV]NNe ? *|oAxM^ gdȁOVþ}ϧfϞ̈́8鼡 ;yh0Gy8WN{!ƒp2#Gjtt:z ?$#"T͉>fUUd],ɜ bzkg;SE2r1`t׫{yAg+g3gSN7r܁+5q())aŊa0LDEEyIp.˝ʡ(o <2aRPOrT g0#ʄoڴi>Y,6l 7pV&. 4ĈL|؜NVSPUNAeEM ^_p[+NPPY?oL^ 0k?Nܾ\Cp.FZtbXILPMuȌAvo)`O<> 4dȸqPT\.p8w!e-|PXʳt O19!EY,ȒCjΣqIK/}ǚ5k$>$#L dXx6'vo{vx϶ *Y61gݚi3x8=^(LMciVo'iUgʠ(:::IyO18 *v:d E2af5NG d `\ ͎McCQt:|ӨT˹}8YQ;lT.͝8{@IK3%->Ԋ9 Qٖ! @5k0c v3<^7ߔAbDOD[@C(uR^FqKcYw۳Ђ:._yvs /c !Dv#d  |](%䕙XQ^\.wO"=:/LdG;[[MCw3SҸi"t:gv=dվӅ-޶ݻwS]]MNNĜl$#BjN>B5w,+6.:8?{gOd222(//d2a /၍it:"eB^p&n&V IZl6""\1dzPB 20cGC3:HnHtlZtj∊ C%RsVUj.rݬalK 4o|'f3FXƍGWW\x)|!=A2Aũ@1wϙ!Ť*Z6WXluuU*C{?.͝젤%Fd5;od%<-J"h D8- dDH1Th!5'c[Cw}}'kTuq߲ N*Z~Fv\>[rVDFMZóebӦM-  ZY'J >쪮.kmgzY|>&Sǟ*eDH1Th!MV ?~m/=r'Y]KcMb 5s9wB6Eҟ}ÇYd wΜ3{wQeCb2`Bn3M: zDsfdΌY&vZ:k.]?yvڅbAR|O\D?^q?^q廪ORnn^xcvvxK`ʹg ^e]yGCC6 Bww7& łljbX<~BDDDDDK\\zimm3j5'N$776T4vMKG'JY]*:g84sN9cmOPFe{/>ND.czr&]! @wO2 ,_+WHYYgҼ~hjU* gZCCFFپY]쪮b*7t,w*-ZpDK|"nb s~Sώɏ>JY[\=N <7f<䓼+|_e͚5\xU\16l@CGw_:eelc/~9u]UW]EWo-pphk{Z; l $Fʸp8SQQbYh$22VZFRp8p:fL&&h42uT*++=%%%1g lٲ LQTxbfϞM]]'N8+* Hll, cy#N />r~~e\7sY,rgJ[{ӷ׮Ɂad2/O"G 12?u}Ɩe~~Rs|\Y7^3RRy_;{sݞlDSSߠ#Z$HLLDQ\.臨FCNN999hZRSSCss3CC\\dee1eV+02Rff&SNE`61͞RYg.4 VA)yl4x;7r~;w7/\wݑz~55d4 =#m C-) 4ZUaBQ81b VXc=/~lq USLzt,wsg CNV ?E!))$6Dww7fٓp\jϰF(∉y(WT̟?xp8SZZJZZdee19j J^^ѧ2ǍǸqұxxrZ; (m =Т =&_{9:qe6^:r̘XedqND.˝V l6c,7<\ړl'ij0=qz=] 6~\4&pÜ&$s3<(;;?NMM .jjj޾zZ'`,D &ʹ++L)h2u{:אַǏz Nvs{݆Rhy駩 /^cٲe?{[o=cbD -<}mN'OC\n^×]ylu:gd4+&d_]̝;)SPUUEmmg;V-@bb"iii$'' XnI(VOgފ&S7{ji50#%٩ 6K}z55]۸ob 0::bvIll,_=yg׿^C#CI~VohyB^~A:ɯȞn JF?"##:u*SNd2Ngg'3 Dբj=ͳbbb# o IDATT1Uy ?=/@Bu@o6F̔4ØP T:cbD zC{xA@z+sgY3.#ϞĕSƱ#amk՛Vkc΂Rt:" z&&Z-Y@'5c6cX1[!Gg2;&x-b[igbNs,b~"m3UmtvP3 7@VgBLݼZt8E!EVlӒRh/F`1u$ƐH| iS2QQ]KiE,XEpCDopy٧PT*2'5y ?Z"-)ȝ Yh48 <cn%e3qXIW=lCEn3&b0Hgz$fN"-9q$NCŽl޽ iIKWENٮ9%TBJF&YSI0-``ɜ,[89q@c,cˮ}:tSߠqw̡"sלq:''|<̙Iˣ^7y^vGE!.&Hc=C ^lEVG@ހ!"GORcmipcb*#rR^Rу(;^]?}raX6 S'eV)}F 4F啼&J+OΠ3ΈA34{Vx"nk:@sC='?{.NC}_ uAlZR q;PtbZ1-wvrZ_v .$//fƏg㣺D{  ظ`FbloN6ٛ7wwg7owK6vcmq*@B2}CA iT3sf3Gg<){J1BkV-Y"gdGc:a|s.Ϗl_`Mr"ɉx9H‰#׹|8<21Bwo]O/9gF}KDJp#Xa7SdQ*7(\Ln.cZ^ 祧#<(Aʈ"=ɧNչTJ֦$t(…rr.sZ别FfAS* Wz8[q  Âu]'Zy&,aҸi92孷ȫٸaL&kR]=dc*,wvٽ5|rF8| ј.tr~.7m\%(} [ -x9-\8~9kjL^eiꞐ.JD@xEL6"VnR>8)I3rlS8[`p( ؼ-(lO|\1dgV[6vyҬ7)kHWgylѢaLflpc-]&Aqr4j5KurɁ_sizro0?~@e_g'߶Dd˕Uއ W/}*(Pa< X={e$96k(+~u|c0pvvffl6T*im>ښZ? ŝ,@d`JTy1Qfudm<HF#ZJEM^w`"E#hFo^3-1|zMͼv/_|b>J~Nva1[ϡj,Fo0s2B %OH[xc4@NO%`&"*M vqr.Sb /ӀųX'xNeNGii)jFFkکsz?_6;W&~~~k'"9V9h8R= kV >ϡfz`6[ջNƲŋxyh5ӯsLf%'C*h*pT ұQ(kE!Z CT;}VaBxrF}l6&o^~r Fv4*dTJv?؜YwTY\V)mj.)o[N+JjDtpY[(-&sx{|?W^]˯Ϗv QM0ׇO$//v>1\QiBycR,m_vSҤ[:Nh,+=cUm\0 |GڵkŚV''`0̼#pwxٗ?h?d(|٧dXV] d9WƲ` ŹzdJaNŨ衤oHc'ȌN{/o?7j FoM3YZh1:ݭr"+fo~f㣏>bLEX^} Nwo/oeјb F#ٻ_곯P(s;X3rץ1-0=bN:]H]X$[(wS;װ PLCs\dB`SIb]X3Yjvq/N#xyvި~ҫ0Ml69r٥V*>nݚoQ,+Ѩqg;{z9(ϑ7}v*++9|0>|_S!}7J J4觘VmlaiPE0x#'۷o'..}Z-+Fy+J6͢+,rTR_<ܨh+7'Y8?nfc4yϥ^ծhjZ7ڻltvwܶMItgMemtjT 1 =jwslM&.]"77Z"##iooc4ypJꚚ.,dN.#O 1\,j~rғ| n+2P|A%>ӣk㉜LBiż;ѮQk0Lv}w7c*K5J3]NCϑ\655hETx(kS9WY%]Ȏ!86 "$-=H;~b{??3)9Xf  3ZI<&.^BO_ώէwX3 ʥ66+ciBeQ [o=ǁG9y&Dv駟ӟt2D4ݸb F#22y͎kV!NMGN&:;кt,_ -(3ꍷ|׮]?d(#3ZݸF-*Ł<I cղYD]ݣvR({t+m{?B'MfxAb69pk֬?ϟL&7\R>_h7^W4&~=YE#V<~1qLU*+5hj:sf3yxW'M6Gfydz:Px<_gnHKaCZʄ(#3]o|_ +S8z& ш`$"Wʿbf߭2r g' GRrYKn*p~ "&\֩n~3V\Inn.9soۓ-CP*lNO sD3s wW-vD223;;;Y`dffifZV&Kۧsﰷ";(BNH=Wx2%٨ddCZb<.N@]VbK43/z2T*xOŅ=?())vy}rqCs:Ȏ$rN``%b}j%ZzL&]yX6/*+gB ZMzҭڂKffNMCā?c,EN=2^>Qt3p ٳguuuoo-⤒<$r>?dGc)tEz7vlceh(ΜqE΃.ȉj%?: w9&-j&Xxk'oŅ]vqщiJYPz\LA!5wDwo/mC/โBx)1ߜ?bxe%0Z,v CPBN B|Thv횢o16FpO &쁽tZ>iFP\B]*b9 l(µZi;:d'Zv奄{z̏|+ɩx;9wTvǿb("BASo/a:z3\D7w|)ZʰyQj9g12FЛ->VG8 u םQm7FH?.NNȵZ.pX3ј$VH]]t] g@z͆fGKl(9 *1d4W;w/vhG2mTc;`*C14?p Dߛ0[,\ƺ=Et3lLՍ.ro%.I:,/etutfW'"n\FIS#'_~W?~/iУT(x牝p`7w~q ~.ܵ:]x,|m5_\iƽh"B(RuјdGcn]V3%5FNJ^JL6ьd1n#Nׇ.~lcۡt8Wƺ9W5[ڤ~޸89gdoZ^KM;44BDHЄ,32-}\mspP)j/yjI"DrpVUR=P m4~~s;O'Gx0:.ID.}~u&/O1ӊjKF9;; zEޑIb:/4>cWS-qoZyf}tb<ldad#(08qng&@xNGTckh@ː4{Y=.(-睃7ZR(g gXlDiA ?h106D.`cX[X7qFBo1s%MK=NUg'φyus{x1x sau2^w8PكhLC/P?m^LH ||{)f<F7NZݨF7!qFc@ Lơ D/$};Xq~$ 3Z `Erkն*^\in  Nj5k"S40|m5?N_Gu]xȫqqhDRX*['|j5OrjTWR:)/b.65V(0ZhJ^NLtP, Av4&Qi씶 vs#-,1>Tk{z9 `0.c; y{q 齵~WEjNJC>&|^^^Rs3ϣ_>N50137tR~  %ͷVz鉣P*MѾqU7۞(r:ɜ0qGoLL|YQa][+joL6 r}`01hAv4&~VZ6|ʲecXXlZg"f{43 {=?|.c2S{A@T`X>pmuPo6&ſW |f 2o/WܸΙv vmgG N\qR޹>Ӊ]!Cgg4G#-$7Iϑܮ?&Ѹ_dGc06Zhv,^_:4fˣ@{N@4Y 3L32h4aϹ|+%Q;G^{c6\<=k'+Bصt)8I'˖MdG0 ifIheTJ%JݟȰP~=N ; n$ղm"T58sx IDAT3.mJFfjnnb `s%5(|7l":JgO*׭0yeḍͮj?xXPi0~NސId?t/8KL<223z荌8b/k?-˓ڵPFfz|Տ*"#񹇶3>pS& GA ?J83'?< CjLFA`ux8..LFfbBd7Y,YHT7މ y{B1fL/'&edz^Pm sȎ4f>Bs}VQDR[Vd222jLn&77D`k7ERi\Ffy1!}ـG.|df%1 KoWBo6Zl<ddd foedDՍRxl"\ɂ Ȏƴ`.UaaR;$L\O΁1:ߍ&3x%)MMB* t$-Ȏ4`-_22\Qߖը €E/Nd DZZL&!l[ѢO{ PL4DA;~_&g,z^P`TK%3i\3|ӧmB#y,֙W2e(m o Q/WnCRD?odS,Uz9zssP# pU)7Z%K(Mfv   &"8 _Gg懆Jzr"q'/sѢ '/I5>ZlrN8r'0ً(™ 釽 S*L$$OzbzvT r˖aƕ쒳 dјjR: bB”3$_ze 24BﰧF@tW8}>9y?t}&\?Dzv4 67OVfADdy$:Z-k#" lBN >a/i&4]81 ߳qq諯ph9%njZu-N lr04G,nJ##3lN_gO7gGgS)jfȧ L h*4YEұi TKO(\w N Ps 2 3B5’drYOb~+]S;DI$, #Wuvٽ#WrԬt6p\(n Jz + 8 VT7uZu Vb"Jۢ(endGc2 GO?=eN;0# blT9)ylB!φՌק֥&fy -wu==΀sV}E J&9zסE6\;ق0\\h ͍AAJf60G3n1h(~va^ /Hm쏘uD6뭸Xn=h~R'P5=cf~ 8 ۰fMUMIzF@^^_de(e>CȎ4G6|N/al۰G7ud3KX`>GN#>GPu:-axy:Zab?9)31Ȏ4E6|뻻ɺPܨC6 "؜F\-383>ެX*Gy7v>Lmct:Z)/ww‚PMw1M #*i)4ՍbͳF~>:;;Zp*'a<6@?DQC'2nhdNoB <8@G"s6MQyhLSd7x222S(#IzǛ??ߠՁRLdGc">A %;} R1S^dddd#1 1/4~g}s 7g -1M LV:::蠽.f3FłdjT*jT*4 Z ???nsed&m?}@O_?Gd {]TȺlrZaX0 X,f3JFFARZv2C?---vzł^jJ:ZբT*Qt:T*Nw9Xf dGc2 `^3tF@uVZFT䄓Css3UUUTWWS]]MGG]_uA%<Fee%EEETTTs4zz=ʂ HHH 22b|_ΩSqC娫h6l@ppCeHgOQO}_Qquv QvDDA DZAmm-EEE7uiooZ͢EHHH ::41[ɓ\zсs09uN",,0ydhLf ;;Iq.ƢB quu%>>+V+@uu5Ǐr\8 zzzjoFhd2a2Rxoo/Ӄd86˗/sbccٸq#efz?_\?x$8~=; HuR* AZL&f3Vuܟe6)))ooo6l@BB0c3&?˗`T*j8;;K* BB@ZbAE SDMM {%""xyMзIȎ4`6nN>MAA]%RNn> JR X,V+6M* J%++ϓ𸟯9'X,|hA@l6J0ӉV+UUUrʕrN u "eee\| 6fFf8xI82J<9r1j燧[uE&%gfI7F^[*v>c222غuٞ@l6>}zT;# nnn7ommqu^GxϞ=,_[M1fqiΜ93jtc0}ITJmt:ݨ(iA FXljŋYz5ׯSƉ^ߧDyy9ϟg,_om67n`޼yDFFer tvvC=v}]XV;F}}=;v@՜-4*5Vuگ7[\`ga")((ã RՕN|}}IHH`ѢEwLg@<==v/_RjQq???P{{;&imjiia߾}C>X,`ԛA.\ /^ Z A///F#mmm!9*H_0Cbp)_?/+1X,{TWW{-00T(,,$//QYjWl>CINN&!!VKkk+EEEJTTp+}^HII!-- WWW:::ɡ db˖-9sv;8x ;wW6d$jŅd-[6Lf._^'%%%Kd, eee|טL&A 66u!D]]'""j;CErrrhoo瞓(:΄a65kְ|r*++ʢׯHpp0Z@CCͨT*U(fz=jdV\===R\\̼y 8}{nyec ҧ3z?2۽8;;NJJ [l!??QC;S .655#)` QbDDD`X())A6mā .o6l܁ofʃ>… 8)(( 77ǚ5kxW';;'OЪUBƬ,^;7o&99FCee%_|/_ɉHJJJ$f0wCQQ'N`0H:<+W'3X䕔q2;z ?_֯Hf岥JhO^^ވN?k׮%22rxw?>W_9w`6)**ݻmmm%''R4 VbŊJvv6fBCCQ*( ^~e222zW^O?eǎ~f#Ü JETTRP%44&]Fqq1QQQY^xA SSS R %Kct:֮]Kjj*q QTx{{sez{{nTIIIq;;\[[ѣGٶm5G";ӐnDQÜ ZMXX!!!(J 555?~ӧOʕ+YjdKJJprr"88@KK & VŋHYCCVD󣷷'Oryz=(Jl}}}R__OUU]jWkk+gr{oHƎ;/tZT7 ::5kOehnnfӦMdee!"|SO=Œ%KEbhll$((m۶RDE(//x7o~]'Nl2t:ݔ;A[gs6ޛ ҘlXp8]]]|vϩj6l@rrXd ׮]#;;{5kغu+<?wy@ ̟? jjjΦOOOlBRRjk׮EEEnnnIyy9 Jx駹qlxbIˌɓ'svv&!!a~S(DKK ٳ֬YÃ>ƍ),,8\•+WPގ'<(J.]Dff :8xC0 |rtXpsјFwE_n/KtDGG!E333ILLdռ+466ŋqss#%%,BBB0L؈JbŊFCC)..FP(98Jnp`XX)7npRٹs'nww7R+Wg…R.Nk׮i&Ν;GCC88}4z.\͛ ; Kl!===yywȚhL;`(|m řD֥$XVo_S=Μ9cjyg  ,X PYY'|9v<+W$-- ~;ӟD]]ر%K`Z),,$;;BCCyYhl6SYYIKK ̛7^z,w ѸKVl5 wF>iE>r)}ɉk׮QTTDxx8UUUtvvJ)r!!!JXOO4+kXٙ"""9ZMBB^liӦ =G2ј4×e?ˋQ 4 t:^J~~>.\`֭ҥKh4\t B`ȼyxgtIuu5...DFFESbH5 gee7#kj(**Zkc4%#384d2P(PTƒ*y\p3gp1Xt)6ʺu8|\ffwn\b\O_?_edUFSӿlFbpE}A)((T?vhX&::^Z[[蠴z 0Lİm6F>>455Iʎm~P.F}`\G3gpA<==IKKhbccHuu5qU(,Z"n[p#3BPA{s&pi[.tRΟ?OKK ~~~Ks3pqq᫯ӓz<bbbv{$3\ 6DFF}v gΜQKpp0vAcc#eeeR F1w d; I#, 91EjjjLJ4JJJ(++QSUUE`` X,f4gggS[[駟ŅM6g;Ɖ'gggV^Mjj*NNNsqX|'|J⣏>Zl&00իWo}Ν;Ǘ_~IFF$&&8H9)@Ff&p{@j' / /رcDGGOdd!III\OOO N"++ JZZiii9rD?~8ǓŽ;"//Ɯ "G{&LVXK'nf3׮]FZ-۷oj<1 KАvA[[999|7:uJ2lk֬a޽deeKLL jvB^ekwyTVXX,Zsu^<#3 b4`6)++`?1l\pAr0III{˂ [^ >C<==Yz5;wsQXXHUUQSS#߄%c:NӜ>}z}F#ߟDt"?ڵksP wI7i69wQPP@nn.r`m`@EW^=bk֬!--/ry Yhr7p5ڤdc,l4z*JrX羑>J DJuxh$<<ϗVʜ1vCSe6!sW:;;po-AhllTVV$5âCӓ>9|0Ǐ'99l6 ,v?p°ЂaNNN#^/;?ٙŋDii)l6:::F_TtR3)3prrB=7FP((((: :CaI111$&&61ZIJJl?P(pssjع]fwySWW7{mJ ...v4nOww7^^^NgR*x{{KE载8cA FzN dGC殸]17.` _բhPT(a, 5558lbccLj5 ڟ[\\\|#}'dʕƒIeewaRgSU ;4 Ȣ&jh5D%$Yfr571N2c1qEhv}߻zGY~CSgy~KKKh$$$`BCC O+vUZvhllٌlPț+`ҥl67CC ՂB\sرn?k=\eHH(oބbl6S[[KYYvEQq+,3gF#'Og"..{evFeY[WMMMȲ%+=6 łb]+9?IMMd2Ɂ04oV|=SL!//erUU ˮAAA xF.:::hooVh__PPt]}M.9~8%%%}iTUU t:,[L'響@6_LF2ҙ;c*se?*vcIOgZI鮥]&`zBBB `0h ׊`X477wk$ rCCqd21c ),,NQz]yIHIIѪUvb\C /Epp0 ,>V+=ot:͛) j;w61DOqqq}ݴpU\mb?HD\\ӦM뱴@Vn<++g.]̥+ȲDzR"OeDsѧqחUVpBΝ;GNNNםh4yr~1eW+?"00h"## LxK1DDDA}}=UUU8p88 뉈 **-r =MXp! .Q^^NCCÈ~ϲ,UKHHT/ zh\˧EQ^XbvΘSIK C X˗S^^իW1F1c=mCll,XVm5QˣI {@^7+<*. pP__O]]MMMGppvz'yt)3g7] l6ҢaXV˻NB6z^fPPw"&4"Is;Rzs#sWZک}#<4˖p͋l ck||<]fʨF"CAwk:;8 0 DFF )ph\YZ+oP0=9͊o0Nsl6477kb鱑O_ . @3 ]W:(b2V}@!AAD_^3ݳ&p5IZ`` ӧOgھijjUsv-G3z=ߵ@N5QV@ M1VhUqK0\^G`0 Pvwq.\AGqq۶PPkξ¹Wig39~S8mWGpF#QQQi&,W ZxkI@I"Kx:0DC&N7 k ҝ \mgeeMy#8qm_jjx;'-]ƅ@eb$XKOiihևl6suL2r ƵkěO>Ѷ+**薇荸 vF˓ahx!ެ\]1mmm7 \}\t:x3sZ"j7`˖-{X)))a޽nS___f̘Aގk5Y0kKfZjr\f$IbΜ9n}(SK.˲,3o<J%+xt̛7ϭznn.]cc#nN*V4z`0ax m_[[vb,YdBVV.\ηqF|||<$ۑ$3d2MA{uU`c$;+WܹsZevvڵk6mGdꏦ&Ceeŋ; v!I6mbΝn/_LMM ^URpPRRBIII Vps^F^+իWsymUCQIHH --딇PPP!2+WdMff&nF={˗/3|233:477sy.^c#K2|H&O|c۽a@n:{=mba޽b Gٳ>}[r||{Hbb"~;{ug555i忻KV+唕X~֬YYfxahx)ެ뮻6ЕRSSCrr211178TU"-3֭#!!nvdYرcn- Ǐɓ1eDONNN%Ib_~L *0[p8@EQUPd4ܓ,]n])(( ==~dgg$322|P$%K I{u[UEgJdd$cp8^ dff}! /ƛ̙3;yw]Faa!qqqDEEǻ*zrsUf τ?'$Ibƍ;\~I&..mxzS]]Mii)UUU}V g֭ZO?._ sɛ6mߟCKnn.Lbb⨅*Buu5\v^MKK{TMA,^8ݽAGHHȀ EQhoof3}FnE ނ04,X@@@ovVB %,,Qwp8hllzZ[{O7 qdff ӧG}ɓ'{߮*TVVrYO#00 JF7kGGcz\BLJ%Kj*x?|b2Iza1,OAI)I{YիWoIuuu[ZZp.\@e"""HHH &&@`V=:::&uuuR]]o@Ț5kXtd <?~Gr:WpZFhDie;F6Mkѡ5I04Nʎ;ؿ?ϟqiooזs%Iח@|}}Ad2芫jbFkk6 YfaBBB'#~~~lܸ˗ӧ{Ul.\M;iXh-h`R\VwF|t;z<9;6yXs99ko תCWDd`0qXQTU6ZZZz  Xjdz+Wh"N>MVVVef|̟?oy\ͼyX`G'&+Vl2 |2ׯ_߷/NJff&"GC+/22{s.]4dO(:ޕ(233;w(7J0sLfΜTTTPXXHQQEEEڪ0ZIII!>>tbbbm\g?c۶m$&&:sB.K2$g2fG9q@YU$oBe̙Ü9shjjիRPP0U9{l0ёet˜FePz|8AWᅌwڵkYv-撟OQQQ`0Lzz:B  ,DFFj*~󟓔3YݙDGGs1vڅlp??̞=[rQ>Ξ=K^^^I|23fছn,.#$::zH燄eΟ?㳕ɟ'꼦`ey3~[(.Y??6O8,dY&&&nfmb&7$Od"Tq營qqqE0Im?o7/ȋ/ŋ<WvH"##;y$?O0 XV6MHdǧdB@ZZڠn7 ;;Ldd$7o&œo~۷o|\ppyҚ,B}}=.\`ݮ͜9s=S^K.aXHIIa۶mZz[o3gʳ>ˏ~#yҨ^@nv.]gq5~zϔΥKXjՐ3F%oQ|t|䗔P28}aٲeː #duxPς?D0R|wϥ/k~~~5'*VY +;ZVixx'1yyy"&k__g&uuuw}?&99k}-hH֭[K/?ykjygJKKg֭t:RSSYt)YYYZ`1LZS+䤭1 $ǑGJP/([8N<RdɒAB4gξp.^/LLL G^7+CuֹsyeYw$vϝ;'xMB[ZZv+;˖-Ce=)**i&rssyy{_}6:@u[0LHH[=aXDI˙cr5{!؛ԞѤ/ 3ؐ<i#z"u$}$04oU|v<ޞv;vŋ€&f>JOVkkk@1$$ٌОa5`h ?|~+eUTRZQ;f_+W?="h;xo'Oo'&g, 5[} B\%7ڥ˖-[Keevl.-Z;C[[xbyd֭IeokO=Ô)Sz<~Ϟ=޽[>qk֬і볳IIIZZ((r*h4Il34T4yF;u} 04oV|O?4yW8q䡇bݺu<=^gΜ9\rmL6 N7do͛)--<=*gǎfC!˲vy&:=-M ` [ɤxt6&Nk۾먨(i@ߺukF̾}ضmۀ%tXm<жeY".*y3Y~\Bt:DHxga,LDR|~o91W `y><W:+9z9UUB57ԩ (橧[2 8y!v@jB?z f zfN~zON pزzjjHo{E7E< ϴs?( 1Qcz nHļӘ7c⫨aB@ j(,̙WT^vmذa(*uf3-^/bs%` ] Hij0P{Z /e)>94j,hX77:JɲLP?QaatA dAUUrJ-*P)z]EEzZnx'C ot37s[xZA45o׋A$v@QT^y]Ӣx%%UŽHFs!D^P|GYU k=-ס(*f׭#@0YxfWTBMEQXxga*&sB#:H7'N]-:lnlQU +9(XB# v /y?zb;aA\E CKq)>εFv ~onFF 1"hE&Ov% w,C rn>~=K(-Y/ZsT yŗ_U=(u_R#h InTCTL2E|ڱUiikxc=@^Pz#_짶 8j8280%4'?)sgLe\E oU}u~9/~x{ͬ>*S|j@N^W aAb{).ҩkvg8J DeM'?ֶ>f52#YOʵ}v0݇> #9>+z$'PXZYarn=02z pp}3F ]7||VY*@e%>tY&eU5"a^0)PU$ v|,3K @MCdd_^ꪉ_4Q|E4! /D(' n8{"H$Jm_4q&*$ɨ*#}V5WT`Ћ }QYwv(;"B顶v uFK$BYvv^(t8Dg_9t4p(/rn%{L@Ss_$ޏ^bn|_;`"! /E(*~ٓx='M-B &[oY,K6Y/cnnFӱi KtXk=>eÃ04FKg'DT? YJY LKK%4{Ov|$x@2@ ^K>돇;7 '#S35 jHDdXIq@  ahx1B cKD/OjT_=N<(@ /'cc(>qs`C JO F6Tlsu@PUPQQUܢRO)ahxB `;mnEXèhe*D( T{?3B kv&-I`ܠ & N.@ :NpɎҹC_du|?8vI +ƩgL%Ɠ8Ң /jj͑LFCc0:yvEAnY"@ a4NW_H69sT"K2m(${煮7=oq3b L$sFAqtPtW|~#:*Ę;|1 XV&c] DG*@&_ױE$O{mnj?7FR$әHYcc$I$:휅:*ID^sƲ/0F~Ԣccp(g2!2@mnjPz(?UUFU 7qG?D_⾯c_HֶLq,g/OiXt{5~@ϓHSol[MŴ7`m&S@ WNˌ[/KK9>^wgJPB{xԿ+y H@(uW|By=)(K|ij :)ӛo|nh::6%οG(h$16ȲDJB<%'%0Ң :fΧ͜@K]3{<.‚;waXup],~$ISol] DOE${G~@٥WG;>j#B Q##04FB1^gMtdSzO)~p$)?IΙCdIYrQPU{/.]X`f'~$OiSxÏ(,+HxHΕ>_tz3uD9J@p2K7~K_v8Ȳ*.RVB^G/+xέMun| ^+v ܣ,ߤGg>dDLS=(BS9r9i<}edTz@PPb>.TUX'P׀v#KQV@Esalz7gP|}KDh fTf_at,̛1 Fcv 啘΢??QSDb֒1=!>Ferf*qވmyfb! Qd隡qJO]X_/m%?n1ۃGBe #cξQ< 6*bH%' g`Y-_2p3Yx{GG̷N~I)E!#)?_ӨIfm#?UZʀAN #K|/sr竀04㏏OBIO#04Fچápy6ZyY 9˾OxBN=TM ,3?H AQ>>}N^8k?i>BUUjɾǜ>_QT*jjqRk6vk 5'.=` Y6~Ap /v#'j+o/Fah",l\a)/m/&x7qX{ϛd{rea$BTQpbqRFxX`%D1kJלxk8E\T$m@3b!5!nTU 5eYT4[5LOf-.:qeƒ?YmJӱzK4(s˒8 E,=^)=# 8[;?zҼ}ZPU6Lc)?P۞&{o_ǿ{Tʪj8IV.u;YJʟ9w:[76qejxMTgr=!I23\> $I!zdIǜ?$eƽ=]Vt#ʎ!rI|_cMM}>:m^puɲs9z<~x33Q7Y9AQup*(Ml:+]WB"fr.=cNg u}FMYe,<5PUu稫J &i 1)kY3 WAUuL,ZF5fOМ:J8\IJ\5%CQ0zr݇>ֶ3OaLJ4rU?$6*iSZMfO  3t~&} 9s0w[s%l6;zk8PUf+:f[nHa)?-}o #"v KIZ5&s3,1c+~N?/,89Ӧiu.X/( KFu9 hkAi&Ee[s/ߵRMl$UUźbnj/j6Fe:?p8M?_SJo}Qc 6_xA~|NRyv i:͊3=9>Wl"Es)d*$N[< U:D]^3BNZE|kɭc'5Knb֍hba;D~IoUl]/n; h0m03#mDdRU9ZOڊV{錄,t8@i؞G2J\\~ qc9:n͙7tgŚ$WT/v|zeQst- _\_|M|oқ,8r򌶝n8,"1heWgV*h<g)`X5QQ!@};)/##9> z]FZZxך lZ;BeM-+WW9MF g`-+ˤs8Tu1=@dR" 1i:N&@Ws/dE߆1 _oY,34̉CSd7tF(HƳIE@TcLRJ(ؖ`WoTZtm0{4RQp):khoc*Q] J/uz6T{Eï?LwG:s#?'nT[8k}FPQSGKk}?13 T0m}#7B[a߀[@U᯻qiOGr@ąlnlv$F66CÓ=}?Wu:G9t 8ի :ç31k$ɽ?)^LR2y;J FKGO$/ݵ yPI04<}nyV/^=oh|A4ʛE喕Ҡu-ZVltĥ݆6/)ߑaWDXpИ1175?NQٍkNfl\ kXj)=FeˎchPQ ˼jmhln񓜾xچIʲsY8{s<^}{/u%MF_.NǙFK9ok!|HF+y||\jlnĂYYtфu6:6슫WG|&GA`Cq,Ɛ7Clv^y]T ]Ųݪ󹪯:GuzZ\vLg>~Fu /{rORYtkFWj΄NɥX!u1#I7V?}JCS3m#)NGp`A2bC7?|a<dahx %Uݬopz[DQDQ-4]~e&$׽A~,m55 Y0{; >CGJs.V4m[7snqʛ;ˌ++ǩ9sN϶;6p֌~ bnn毻sr FPc5J|+0UBr\,+ys0y onwp!C`շ(~a70TȮ_DI[6WL&_UG]G4mIT'`Wn(7YZXcI:,Ԗ8UZFpB`ѥQdbnuFo #!ʠ+L=5`qx~.֯YH@VZ:DDGgJJ9m QaIah'uqXk>#zμcD+$eHPT4tàe ƯHceߍitFTAc]E,>BcTgZ7=ߙk3!3:ۮ37RQ]Cyu- MX::hjc :jhߍwc ȴȳoHahd4GldQDD׋U/GAUmյTT|:97#aG[+Ѩ:84 @IH#ӎKt=v1 h )=NU'ح}@zZ$!2a):Hci,?%\{yPJY1gz,%&6}`L@047rmF.AU>=JZI4z(/8@U8=$z{ a?<ə$tNg@ oֵ`dzWv-V㌾D%,#*q9Q 0BeJMUQTe𡒒$_Hu(H8t͹ Uu~&IiħodEXr/vwd.f?iqSuֶ~Nu%04w8Iw-T~By6 @x͸Ơ&꠶$ޡ춞K^3GY|/N\1O}h_D/e<*@0@0{u-*Cӹ xZ~Vqlc ml= $Lի8#` FW8TdR7xZ$ܐKw(6M$&qD'$w/tȲzC 7chb++ Ifデ0ExF0`r# `0\ț 2¦xZ~q9u)jltB#3i2a4x3 ĦG 5:hnj1w :WITޫ'{8(Y[]sRBtD"*q9|p{kS|-rz<Hz' g~~[3}GY SIHBBz# Rz/ ¢+nevWWw]{pQiH7)22̐IH 3r3yLH&<3̤= :jz =}qSV3RII&Ǩ7LNNVz Yiݖ\̐!ԇ$xjΆpEK`A Y|N1d:$iUPP?GgՔS=y{(@ E)LIa 6TA`Ւqa@m۶hL=ci B'V4.lWMG.LZrTliԝ<I1jx23O:-9jRh# t7I(JzoͨQ,D.#ٵkÆ 3=ٙ0Νʕ+Q[MI#sW3ϻObN~'$fF<%.A$ɉz4v\cg#hH2لe_VV?L~~>cǎe֭1~?W Kh=\+Įz=ÝcAF./~mqœBF1u{Ȉ](~ A$7Bcm&/mr $\F/odRRR8v,Z+WK@CZA۽D!Ho\4!]|z2zFܺ!ːd&sj3d/>ÞHhj[;ZwzZ+ަ@]\\`D׮]1b6l`ڵTUYeTWW/K|||3g'N`İk.ط~0^(~i|}}% +m 6'xVKdd$TVVҧO]ϟ?<==ٸq#ƍCVEvvi֞ ^bԩxyy3ǎ.SSwƺu]h 珼Ahܽ"\PSU̞GLɐ <… )((`ܸqlذɓ'ӧ{n[N]:uJl*9$--kZ֮]`K/fN xlq@dDYLTlqHNx;|@ee%g2g.]jzŇ+`dj 8^'<<ܴ0tPѣ=z̙3L4^nyΜ9ìYgԩ3LӹWX )Gv9s&P=?̧~?Zfܹ̘1ne0h)9D^ZT~2O@8Bbgzz]ժ糵]HrF]mȺA石yf222x׉A h~$a7c4hL h?{%zzTՙܽ"6n%ʼnl#/}wH<a{tjj5ݻw2S!IJҴVz~߳`zb`0XܮwqwwgӦML>U*hZJesrY`:{,'O&&9sn{UyMIm5c&DO7p1ɚ1&0\tΝ#55+VrʂЩ@ClhГѤiȵ“TWo:Fķ<}{ۋÞZi3ՂS\-8#o0tB.^o-prrg5JzduSt:uٳy׭5>o  Ƽyݻ7̞=e˖q5jL67|[+::w}"j5;wdʔ)w]w$+yYVHsߦ:Bueʯ2.g#$fȨQ$..R 'J#r4rsdׯ1tUĭ rb%^hsrSѡhXrvvnpc_5 FBq۪M稭E&b|}-FmYj )kN(ns\5k@~7n/|T_}vy1ɨ;J]kt#~$eԌuTTTX< jDgpA?@N[!1qqyYKr4/_NZZ&Mg̙3<쳶 t*";";w(b㢈[di${ =:l2 b\| [:\["ݙ2e 3g "h䏠HTlMIm5ѣ6ny[bu\(JaŊF#+Xd2o ؎4ZGP$*OwԣǗ̒E権,$/}/)[)duEE$up-#Yp!'N$>>czЩ@C%A$*vѡ=:Arwu%jt ͼyd{*/lBZZcǎ ؑHT,zth)^hQϕdl#?[j\ OpFKL [K.TUU/ou,Y§~j9 Bg"DAC;Qw ~#_$FzL̜9{MWMr6a͟q&>"V_:F]JٿyJ;6^<0JJJ(**O>e +`G-]ɋs 96=: 蘌[0L+ OK-^hu$͒ǣPyB[՜ F󩮮ѣGS[[KAA'OTS ؑA+͉DZo(Ͳבzto#NNqQ[18wBМ9JUVT*a͚5j<==TS[z,}79`0(;ztԫOPdmGe^]OHLxyB{S~=O 1L[~3NMlDEt%ʼnuvR[}[! b%PԌ FÒ%KXn &hh4hZd2 thbECq9c}5:V$* ckaN.W4vD(+ vԒ (؊^_Kq`YN`D -]["ݙ2e 3g hYhD`?Fk]B:NB@ă!-`E92" hdEo߾bp'UTTą xLRRۗP͛Ev%E7]c=ƾ}E7@C쨥+bpa0x뭷ׯ ,ɓ̞=_|^h0 K={2Tj+>EEԤV] hM$7p(Cя4/ztdffrϟСCG0p@yJY|9GARvE7<<<2d-$;J9!<`KF/^?L߾}yؽ{7:u OOOݻ7iii( qss`j">z.v~T."$zChN7|\M6om6"""W_رc ̚5 I̤3fyf-[;|:\ܩݬ\"ՕnE?68K7o=:jKMۣ~͍%Kƍ7oigɒ%̛7'x˗zjzņ 9r]d-[صkeeeT* !vzSy ﶏg^1jϞ=||cmoAAA˗/g~੧Ӧq?<9 .CՙskZrA2X/W82i@Ց5գ\}E[{tF^z%֯_OEEqqq|l۶cǎ_WoիWy')_hv}6b#g|AW~ۛRJ(hs{CF1dMv!!! D2 Q:Hqvw}ǹs3g}ݻwg…[;(N:E.]HNN&!!~B}kJ&[}n˼(.v{mQ}ԝTf6:/^ԩSٓ}2h r[<>33￟SPprr woJ)69~Ĉ|DDDczЙS`O-MRW:{,߿7|ggg>… Mei_k.Əٴi˖- 2"WEXΕcns$6"'P+ظƭ0[NMUŸƀO~`ѣ#&&nc >lذ9sX\dddJ׮ |=z={JqZEUӡ뉋W^t:9b9 Bg! A#Iֲ]j*V\)^}Ũ%Kgߟ+WRSSݻ=z4K.%1<_shUϧP=l ztՍ0L<}%++ l`.ǂ /ٳg b&&--!z AM:>qO/757n 2h<:9W^>`ƌ (+:HVFs$In r}_d'KC~^*F{t\tsѣqrj4pժU_fϞM^xWxW3gm^/'QQZ~#<#AD SKW48p 999"QUjސ!C3g6l'gaСDDDYW-*e;yG1/j>"$fjgo;Ω;;F{th5rѣ=zhyl¡CHOOٙ@]^GNN^@K: `[";j钾HTcǎb JQdر\z<==5k/< :bŒ䥾齍_-;.1|[設瞣[n|W>;XӧO^~e hGlLUτf-X hHTl=tkRTcbccskߊӒՂ< 9s( ?[=:+ H;ko舚giZ6UfܸqG\\?)JD!vt'E1̟?ڵm+E]ptF#' 7~22Xh쳘>Q~=INxPS/33'x={駟ƲeIIIa,ZxyG2e_|SNfhՕ+W_Mdd$ΝcҥtR{LU:< .v>@XFu<,[Ç3m4O?͟'_ٲeRJ@[[FAͻEpEMFܰ>_O%/m)ۨ*ϵ:b]IM̘1 /#HNN&%%ySTTĊ+8x ?~h<==9s +Vয়~K0oGIq"qc$(jq={p=ĉIJJ2/((ٙ.]`k(=LN6.g~n Dhlܺ `4aNt{D*7Y`IIIh4V^͒%KT2e _ݻ7۷oK矷knlY 7{Li0///jkk-Ip}o B&N =I 蜉:l"""0|ݻLBBB?Y,`N:Ō3xGo{ b ?%ʼn;>9ߠnu>"sjIƀf֣|qHHʕ+XVܹs 0D\\\… M@@/2P֭[)--7`ҥFʕ+n:1=z$W >> ֊Uvb̘1(JU555 8|Q]]͒%K eʔ)̚5kxwMdUbccyꩧ0h5e g9+ ʚϻ1_4֣(4Vt7nd׮]l޼sT*|8oov 4ɓ'l C=^yqK/58y(#yŇ@s.FN$,v]Blꄎzt>ޢGܹsj%4.'11 F#$ݶ՘1c())aʕ򎤺!RTN₎ټd&m njjQNlI`G]Ѩo$%%fQ֭c԰zjfϞ$IW^GevщS`GW.mDŽE[W^^NPP SC1f^yfΜI~~>˖-#-- ӧOs}qF&LR4={O>|'Z-[uvsieK* %8z: )Ts=:a٣77&+Xb n%dҥ?~FuG?lCddr%~! #;e%RT療%LӫW/r#UW4FV5}ݞ[уlL׏Hufd@]E{B^G^d&mU֘;1V A5wHbVHNx'?hУͭdnSl''$$W_ԯ 9s_G ?c9nG^JU($h~4 IDATu&Je_J`Gȉۣ~i*++9s QQQ[oEHHSNeTUU~zMfi B!N ] UefW% LA_XB&S%) D L; K,X@XXz] IJaO Q_'$f:."B<45%f ;e;yGiᤵt/&##OOO&MĤITP3ti=FFz e 56 #iii]ǏsYq[O]:"I9\}7|j^y9S FsEY[ W`*˲^S:bQ PVS]]m B'" A'W?jiR$xSB等 T*-{[:MGkO!Ir[dlJ1"BZ+{tY&jgoq2Y]<㍭YF̤ T2h?P(]ښ+޷H7'W8=JjP׷C+BlN_aLO`m%)Trhr*s0JO|NSKCaOVKB]EHtk JDk=:IA>E~cer.n o\mZ=kbl좎$cZNJJjtQu9`Kz]5;?J%?zm$E8hU7%ѳ6ŧayHQo-z-N~'8f:>%Bf;y9{t D^Hue12ҙ!ߖn-nM$ q?K:1!$W8=b<14r5@&W5dQZZo~ψ͍t{M}o%FJ 2ɽ>"V_z>w!gѣCM1tk+i|MОsVxŽ$W=AM`c13KHPdێNRtC.Ǣ`=\IM+ OK-^h<$'`0!S>M΢=:b=@ٵK6$>q^Hh܇MFǨTtkvqI?HΥV1珼ySݗ7.c p'*wojjK)CՔ5/(Fr&W8 ߘwIAй Bg% A1I3skl7 /=Y\tvvvHy[mmGV !=G:;oN+7H3t7͡ADLXGoPD!ө"Էe4VGɾu'"a+HnP9p>й@C$\drmO28znNmɠPZF hKۅAw_='sFc[c*IcC BuJ$8j aqhp۔}IN?xې{3\nAh+ #W_ZI)5 54ZuDz7q$)0R_Nhs23 m_@# è,JWҮt;䄳ZZ7Ra{& 8E9DŽE}L^ڞ&C&Wƽ&J\ BkhIN|J*y\.Vww3 4 ~݈Wt$!ݻ#] ޑ]+)ujjTfhԂ guF}eoLն6HrT$L:ldTwI;~"FɁ INAüAZ;ˈx{y:z:BDQ F2r8JJFyET4,*8L20.p3 I H,H־ݻ:ѯI!aD.#2skY? ?o"Epԝ#(F#']`?RPl9'' BBRSFSVQI͌NZH]kx"çRYeuTtnF/p21"J+1RKRlw<52+}}3 qpbr HLM'='/h>3nsmu!AaX|wmD$UJGO]hi*ۋGfNA6@O Y[jj-s/]]>/ϧf׾d_.or)3Si3ByhrxBah@wV,. 4ڇZ>ڰ3)==ܙ2zEu9>ݲ]\aB`փc1S0.efHʱ8Ջ_-Cpw?ͬ f/˼I$]gMR5c)."ΓGA"]$!o~ 婥D]ʻm+clڛW;uSv+s3ǏޘQVR^a]vՁ3i\7?UU ,:[R2jO"hC~8E 2~}Qq<ρ3i7?̾omYmVr -SRVLc#ˆ1́3<)&F Jfgkkk- By3LjjXmgչ@(v-{7¢8fRJMq2h:jj5MZCLvrZN9nB~~*%gLF SݕGnMDrXhkVSWvIs#֢7*!f1*΄É 5^kfP/*ȩoW/HG;tEyGq^;۫* D/c1㆝qbljkvk˽vv\.TS D] z/!wWfg~3]ծv:agvʙwG==m4jI%=sN6`ۨ3At3xt]j4RPla.k5Y 6)! 0?s}Z9b9xYTN L, ;9&ۻAMP^jd* NݜY_h>a?)2uQ1Atc8Mikk wrMᾃb_ٌÆDX" '?xW+*#(M'ij>Y>t$mjOW@ .*vHF \,*nӢ|AÐgдpJB1JD L[ n>q&r4"ȎCf4 T*0itayY}a6#( A!|^ЀKWimpBF8^6YQaAL3J'2GϜ?AI(T"J~=ǿI?1 ;$eNAs1IL*?s?6 BOXP=4$Ct=H;8YSR(59 j:d !FXΏa_ƁetOOl N| †4p]mV!Is,6g)WaLJCﮝLW7=-;ZT^Xs=k雷:lvT[s+t)WbчV?\G`5W[tB-HuFAMH aVQ2ZMs0r8:ZA-Uu"o~q lTJ0cF2&.шRCAAxx;yf2 r4"+ihx (28  r4 Iwc N   "䐣AADHfANZ3:65!G(98GBvfJDDWdȗ"Vb\qu'ܼWP49'M`f(@t>']n*m|솊qEZ*\tit|oZ=’Q("d!#n9D&d”  "AAAr4  9hAAr   "䐣AAADȡSAti<v؁ǏnCP@V#333ҢA,ǏΝ;qj5L&󑟟iAa~zv @VV 0k PTTÇp4 4 T*RRR0ddeeATFX񠰰ǎ@nn.ƌغ:lذuuuh4jHHHĉ bۍk׮!==Z6tΞ=m۶rAB"//D|<-GΝ;aِ#FaM3gPXX(:Z)));v,222",=AEuu5֮]*( >رc/_Z R N^zaرHMM8޽hllDvvvƱjΝ;b T*hZF?,yt u.v;0~VKHH@BBlQVVw Šnƍq8Ny*J4EZɓضmV+1tP10q]}tD3٢!]9rv͆^z؏jdee!++KjbݺubFJJJgNDr46#87oބZÑ';&118pl]]Kl6h4yF6 VByy9 Fy >>z<σ8<R)S$br4My~-;e1|pL0!$60`Gv:!~$Xb]D >b#e;d|%`+ѣG1e(J>oǬWZ-T*lmKG0z8ےh Br ʅablcj,7H8P׋ƱІR%.tlBzO[$a2`Zt:NM"Kd2mIt-, rrrIFR)_erZ}WS}CJ&G|^ah BhAXDXbX0C@h[ɄVCնۉ%G#!G,+ NXcY+Hm7b%ey 8NvTІB; 4J9Vᙤe{)i ZJ%k[BbÊն${T*E*Eh4·xʱ` Aӡ@q:vZ B!Z:@vpB!" g:<ϋm+~ĉ! lj ;AR RRQhw;Īq4)BJqt`;XlKk!f h2ԀSt0PTPTAƂq']ve:_j4$''CVSգYj'B(D׀ B?MB+.>`൤ 4_XZ&eh0#8%nRiDt <ԿBФS, jm$~[cvX7w[ -1DBѪF;Baa,K_('4'JXPĜqr XeY,+#t:ЙD$FֿB!#cYiq 4aVۡ{"UaT_i; i{g9\E-u(P+ƲtY0=B5*(eGvBKE<aZEB0%AD ߴ ({G*@BТxҿa_ cZݢmB&f.B A DGCP,.K4<t:`b4AS2nNtJ:j H,,2&9Nx<1],L bdY}X,O8gtt_@KӉX,1yc-pFȡ6"J vUh4oh Cqhhh=I6hLoopXU)ⳅ$k,F9NYM| EGNhI+][[ #s%ei+-MJ qb:rDP(,cm!EPŎx{ԡhbXB+ ʲ 'TO~^5#>.]rkjtDG t@;rX@е*1 bcHGȄR,bm0 :|X9htsQyAq jxUG(vNwYV-*gЖNfY6!u]BU޳Dl -QTy8Nq B(RXc)*Z?!ba0Rv f9Ă-@n` r~Q *?B9NL&L&, 3a.>>-!NSl#Z F 櫻\.1J 9mD, rX[fr >)DONѠj(Jh4f6#񍍍b1L-*Z-s<0 n7jkk4)^Z- Ʊ ;J,TF˲HII,Z V( TWWw8{NAD R #Aw(JʼnaGweYXTeYnФGShtfB'..Nqv>؂n4x( (o#J:q\UUՊqІB'Lo;% xtػw/JKK4!==Fѧi{  FhZf!bOrP__/`T*hZL&YF4<,/^GZZz쉴4ʼniBz{ r49Kp!#55UT,@tt\.\nF#JJJi&NjqϞ= [N78$$$`(//ǡCPSS*޽}ԩSCrX7={Tq`4Ffܸq_GJ%k0 t={DbbhKKf ˲!)mHh4g uV/˗/CׯbDq!ϣ="n9 ul6,[ O=THDh4555|Aַq!1BM4CF7;`0h4bih\VV;vॗ^_l6Cףy>rnk566bʕ?>gCHJJJqy,bmr%ٳ/rLU(y&8Patxg:|h@vz àNt8BՇa۶mկ~3ߴ\#.q\.T*a6]8ڵ * /r'r49:/N ÁZ< .'VrA^ _Y,pD( 0 D#Qrl޼/I,hd)| p8lpP(0L~@޽{xꫯE~RT*`ZňF^^Ujkºu0~xzayHSY`@||jfdi8@CC^#'l& E\\Q6 >_kmt:j*L:rKe'r49Jj3Lt~UZS.. k׮Ř1cp\Hh0 t:aZrP(`4ڵ5q(..FUUx㍘܀H@` N'֬Y[n&M j醚( fhZ(JoXz5}٘xk2`ZQWW'Ʊw۵n_C~px;z-f* j˪*[=2y栺nlܸyyyXpačjRSSQ^^#GA\\Jݼy6lSO=ѵ~5 RRRud, T*L&_%#y9s'N믿Y * At _-q\__+Wv\^Jׂj)m_[wsZ*oaąZSђr9}4N8W_}5h4X,>)Q& & ,˶8_E!+++,rG#z<,YYHaZw9rxׂDg/0 8Wh4+ò,oߎ⋡7𗖪:d={%%%xWbb>a[{cʔ)UZݪݖn| rrr<,])F())NV.,bp8/Ns=͛7;JfGy:u ~-^~nd 0h ;w mvvZf,^ Kп:uJOѣG#e͆/ƍ "s#`Æ bu:a(oaZchI{/ϣo IDATJ۲9̙;3] СCo.#׻4_cРA5HKKïklٲ۷oGRR Ҧȣ07eYlݺiii1yl `޼y )**f^G^^^⅜CFFFgMbܹ(,,Ė-[0zh$$$\u._={g?YL`&==:.]ÇcСѣGΕ_6 Wܹs1dȐphZ<p8Xj= a0dȐIӧOԩSx饗b>H r4@AA PTT={v#??ŋq<1[/vn6={[lfZFNNNNk׮ł br0rHqƍ7g\p,˂eYvjx<jt:+LtIN'⫯BII RSSjBEEE`"G J=<֭[={eY 80ػwoqڂN XVlڴ .\nӂSSSa4a6qv؁E' &L rm6:tNnIIIHMMEJJ$J%%%Q]l4bϞ=())˲b%*%:d/"E~ZgϞ;wnvZT*щ'k.XVx<[Vk֬ߎH(J}*mݺ+&$$e8D+>z1L>;v\t UUUZ(--ł 9_4 fΜ3gh]v /^ٳgkV+fϞK}$$$`֬Y-~vc L$ '5,.ɲ,JJJpQ1òXjvcj}Fii)n޼zTTT;@߾}#$eEwicA RLdffFZ ڂJ[QΦAt"Tu   CAQ̙3I">>v#F08s; à@S}Bӡ_~@Ar48v\Xx1rrr`00d|嗝Q>O?ԧ\'| 0rss'WTTȑ#HJJ-X~:+{XbE<AD$> j;DBR>h'HYBe;N<߸|2^x„86xw1vXC~~><ةIt:.\?XܷrJbĉ⾜[4>S<#>Tv<@؟ !OSq-X%mnnfP{&sRSS1rHtʳ9DHqw6p9 :4lG#>(VX XXhq-'|#GeY=_|ҥKq= 111D0u֡ . $mRGϜN'9jddd -- O?tr܈& f,wBiM|A<3fLXe6&L^za(//֭[cw֭[o~h% 0 zpOAj؈^e4|Mddd`ڴiؼysX6B՟]v j-qF˗/|xCb`vAGyoΝ;-;C޽e- >#̙3 رc>C &T:ɓX|9JJJ:C t5<᫯VˡҐa:Ab"!G rt88-BYY֬Yp8VT*-~T)S`޽q}A>}|+ lܸo&ѷo_kx')6AtP;vڵk߿?4*Á4\v-fI/FeC,mtL;#I&87pMD)h"P.,˂eYx<x<8T/Q㸼K,D2u˖-~9rs***ׯLnB-Zs}֬Y+VĬv;/_iӦAaݺuXb6nNѣgo{9 0Z{o߆MbRCw0P(z oggŻry| DtquJ(t^KME4 B.o4*/^ Ӊ\|ǘ6mZ8Dj:ڟMA>Ft-шI加$tTx7B-At[X#[#@swZChvTJ?O:,WWm}ED'"PFt#( Aюm4IH*AljI r4"P;$#hD?Zvqk "G3 G#$ęuu hYGFbxBoGj4VҴ5#' AQO!~AFHIJ?߬$AD;7%FJrb#7j fQܖ:hATItDrb|%mшRc&HuDj9q "@oҭaNEީv% "8r +qsfbnGXvM+5%D?)IPzPq*Tܬ?S'|zH`<\,y9 eo`t5 ;蕚91~0$EZ̨eK~M9&S i˅ſlC>.͟t5U;땚7~Ti8/].?^z<W]sfDP'#'A#X8q V'gmш de±3g/_rWr k8w.bx??hi=#;OFjrRw;RU#)!:s/]w%UJ%JDMvfBlfYU(<>J^CZJ2 qi;YF>x)64G#B+~>|LpYO ᤒ]aGQň \nKySm9լ2zCdJ ȌHrx(~F;88zW+nDThۢ}Xul݁m{FZmӉ}pTTUrl4"7/f:.d)2MmuGko*pE+܅PTy3lCgaA9ovZ9k된ҫbUFʋD]Qq%q>OgRgRHڍJ:~ /^t{(6)jS.;Fnź]X_KWi\kc#@T`KېA ,:;]&wjjDӜP[{΀~>_ǻ~zUbE(9}R_hCXl6qߡpJ^{qk!_q wNEyk{AwF%>]WZ3d\w?5 u /9GO`mؠo6CG0{6kc'q>q|{M.kNSƎSId>LFM9(KnhZqE ݩ2"A Ao*>,hJ!t٨I{9/U 3:An-!xRʤi|~S)*.9 V+~>p8-^wס#mh0>{۾~s%zLFY) LF;Efk>\fP@~vtڦ s[k̞:69S;c֔[a2ou17/8JUk[Sw<.1rP4j5egtRZ^5-͆h`لCEXŴcPY]ө:}!Kggxo tWʺ>~{#ѦgeĐܜuk{8[)L$h!`!%)=gp8b"591(ݚ4;{^ ,5:"6ta]8N7q_7TlHtFƐvhD#|o8YSwڽhN%%-X4o&0 04/ rDy|  sW?û~ ZZnǁb%Y }{YKNb˫/X 7$ 0ڞ/\V75) os#sg Xx=Yn7߶I̞:Q@8|x_auV8]n<[xշ%'-+4غys؂(됣ab!ڭ;a;޵!ypqq>~7^k`vUۜ_g} (e)aJ)+cqoz"nf\ 0yA@iyEݿ#APp 2ә z8m+(輿@۾K5+h "sTqRfZJ*:NQ>%#7kJa` t85LF2*)#GNnwtΕ ofYb4Nk)e+m=J5D)wW+nHpYQrɲh'v&DM2b&ټpl'8 tV 7By[B>Rqx)l@mjJam8ęh΀ƌx̜4T`@ oɵ2qVh͞%m6 m{>䊥A0 "8j'|NL˱Nh/m#pt}; S'?h<-8kj Bt6gi! mbw8<}<9Ft&[#>]N擒BRy3oKh7mAJ 1>NRzP? :84ݫs8BBհpDuG-(?X):XF;Ō[u<>YV6 QWe0!G#J3u9'k:qX_K]-BɼWa6R$Q64Lh^3e妭2 ;à 2DI'{u~'Q Bot̚<\+ZPi^/u[CXT*9bhwK j={$a¨}_>^qcwq 瓥?%+7jpsn(ZQ>t+5 qfJюhI׻gq,^~a20 g6F Ō[aOQ\CGpfMJ 1r`>zo:}h;(:篔w0ŇMquh;/ow]'>`ӮԋqǤ vX'lY3E*nVe䓇 !#]oݸR^!ݸY7qI8w/>)I58_#8O7f 'E>4q7~b_'?/flYT'k~!ݜ!`9wotzv?7wN{Yu7-۟\}ě[r Ng (ov$w~5+!р%ʈ΁(Qqj9?`뒯 ˂7/>>]^LTшߏEH ]IDATw?/mg m`8qOSa̛1 -x aӮ-oWҼ"bML~~ٗ6ݧ>ޡa;*[ɉ xY6Ш՘6~4e8ڤiq 1(4CEظ+6mz$L5$zY|r W)mq=/ <w ?.7y@9N.cvz5 .P(}e.NE!Ϟ)hp ob?B3c-c'' z ٙ}px"+otӧqotZ( ęL?O?}bUdÔ7R)ѻg*N?2r_0}zBRl4<Ѫ"ꋧ>Fr>i=⧰` AVA:j8yp!^z=7nQo:ap< Z Nsʏ(HPwIz7=Cܾ~ T udNFB?y9D?{&>OV1Bi {D8I>+@A! +C쏉GʶN&pktHN 3RB8ёTxB'Фle=~v'uxOe9MUh,{KEߗ-D4sOh F;>X 6mǛMŏTW] 'VEKpR)jp:jrc¨2chJ۶>U5pndI-c|2:>'+{ՙqh.hhBT8FAct:Qsq1)5&NMRf$nrYw*/f3x N47AQD4cP'ih眷}k4_+p;Zx|~lT~=m_G[Nx/v!x W̔x&ŏprbaDL4s±am͛nc'ʡ⼣M;-y (jc#^f/0A4B߮73).mvO;a־۽^s4_r&1#jϳ:}݂Y3'tt^g0ٟIa`D]\}nA\lUиl|8FI%MrК Yŗ.ۅ=_+Wsʹ/ڗ[EMU_Of,:cJ6e6OO8U[?Ny:UUpܗ!Qg2Q/??~S.èʪ&s_N q2/t LKRȸ h2m`0D %DնM|v#10 eE)fwAs\& jsOZ5zwٹ᪟gba}(#:#")+^f3OϝŸNQиCďeHT$BgJ8y{{0z!*B^H)z"Q dcqF]d~lJ5qDϔ>LaP6RW_O<|bh[&gecq/(dݝ>'ߏ0Cۇ^~Z,]x\nw+\\eߗWPQ~>̜< o6S[w[rM?ccDL4E,Shx)MS?yD^<?iˑwSZYqHHp0!}ŢnK/S^]KYe5.]갭``ܨ̙DOۂݠ&d<]Hc71p8=z_CvDY7DZ=]tNAnS^]CbJ+)MMUEE}F# AC?k_"B Ntd@EM-rdť7]YDśd!%)hv=]]d2=U}ϕQZYEYU5uĕFO&rW1LX,` "wDEE@\\;wt{w={KW_}ŋw؇PV\IBB111,X&|8ws%**>u1sLk~y#صkMGdd$[lO""""FA6QvNjqsc%ەZ0_-un<̕vv;s!>> ֮]K/Dvv6dffRWW`ƍO`Z)..&''t>3ٳlN> Ɓ_f޼y\t TvAMMO>Nرc;w墿:W^MDD[l{Dl\prssimmB&Lpsw۷Yf3wN$"""" ]b  !S7b5`_{ܭ>|gobX?~;99PL& .jwmwOؽ{7fɓ'wߥKlf :Ipp0deeDll,~-{%>>??m6,X@FFӦM?""""Fb͍MvظIS_hw0CRRRذa/fÆ ̟?HII ---9] ׯ֯_Oyy9&r Ej*,YIMM0\ח˗/R?Ȟ={(//'))2Kqq1III|:{ǣ>JBBB/""""?Rb@ FIq2^xXXeee8/ wy'y)))!##LnG@@ǎhVg%KE\\g'??͛7sС37}lڴr,YBii)iii9s.r:zz:˖-cXgIDDD'Щ.{^{6 ށ7lo ͍ٮͨQ߿?+Vf @f̘ŋӧcƌ&E6W^yzv;߿}x"}vN8b0oC D``y꟯P^^NPP_""""r۪;ǯ㦯hq/l~ؽ{s-dIȐaYj?BH;|evskc\_CX{444BDD۶mcРA.IDDDD image/svg+xml Compute Node A Interface 2   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 BFD monitoring BFD monitoring BFD monitoring BFD monitoring BFD monitoring overlay network R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2) VM1 VM2 VM3 VM4 VM5 VM6 ARP/gARP for FIPs/r.IP ARP/gARP for FIPs/r.IP   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-north-south-distributed-fip.png0000644000175000017500000036546100000000000031461 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxy|T߳ef2  ,aY.mU\oֺo߶ֶnw7"{ @$d2$C&< os}I>yp @ #Fh[ sdfap8T*m@p @ \XVhii^jWRADD111DFFck@p S@pqEEE ӚCVرcQ#l@ .b6 !;vf`"0K$6: +PdD:;tzHR$88\ߒ@ .<*lʱN-[OY"q% M粌L'F|2lƎ{M BhBQQrgf~V4 .Я.})q#X55aRUUr2k, Ź=@ \X!MDj h= Gog骩 0ϟR<7( !4`0,PdTZFRBDDrD9rD1QK-}!R?"½?\DL&gKq /9Fa³!!璮.ijjB?0INNFصkvX y}0O^ _ oeE~2kGp8cܹ @ @ }MMMx/J%2 ͆`d2yLMEVVUUU455R9o;]]CDfjx)g 9Q\ rfى`l6c2r9r`4 X@ \\!gn r) p8twwA[[ P>}:8z$EF$>ddqof6 d2.rf3OD"!<<(u.@ \!g*0233IHHpy{oY9\]ʓTRNH҄!3>RTjj뜡WLV=4V 7dʟ EK7bX <:wrRD"nc1^ rHJJ"66Ve @p68y$@_-)SC\vxpl0)&^J&G:vT*ΝKxx@ F"U FFcc^LSz?m Oף'$~ FwoУ?ڿCݿ)s搝T*h4gZ[[}ܛ@ F!4`hnnj"JIII[ǽaﺪv_{ ^sqϐżxQ]70oaca8x >h!@ fCCC ߧCmU1d EE9?}w䫋 XnnZŢE j뱶@ F!4`|=2ɦO7dʍ3 ;m8|&^E/܌FaHRz{{q@  fg~=`X}!S:λ*BkhgZyyjy@0Bh4g&aAg18zxq#=`goPooog»`4?~<fV^@pBC FOhFu]>ƒ[x===^gG @ !Q(81"De{:.*"q}yoQ?OS=ق@AA2 @N y@0vzDD _Ȕ[aaxD8̇*`x3N [Зn;}j.s|  AUUFgRdq$_Qm n}!L>#|}& ؎.illtz2|\K !44ѣtuu97nIII NNwrkf y E)oRwo,||[ Hkh%y RRR@\\hy@0LǏKVV`{ 0,ȻC 'x; X#|"}dCN*uQNNVD"aҤI@ )Bh0Z &kK,=u9 fؓs@vG9s&L4+s@ 6} >}:rw͟ثcۢ>=8*|N 6z@q}&zd fo۷;8S@p^ N A8"#33ٳg#i3|_r/%cL;g>&m\dp0}TzEȠ U+y/ y|ry߻Ǐ+ yࢥ˟vgOݧ=Wii3\*++'p8ӛ:Х ɤ\oe[$\6.Ua`ܓ if: ~!6Ł>«|b6j5ǚa[+6[yO,_ABh&6/{G @KEy{>{b_rԙ̤ VK~~>gaT@p @ h-XV^X?cۊ1z';wK夆2#qKӖ2=q:99ȥ}"T*MV^=7i43m,7xg}0so? ݷ(.[S۩e# cӹXmLI7^ƎoL݂C g&.{y;/7sYF&}!חI\:l]LxC3+]z /6(//'33ӫ@ 87!.h:i(k/rm9UUTuVӈa8hFkm'8zFFE&MKK Зh`_mŵ)?O_߻ Uv)Iɐix&'&q/aixenVdOecёrbZ]a *N29a p̘8$$r;o`1vwZ~ͦ 5&6t&p̀5ܢ0#c@ii)"@ 2BhL"[pā=x}.(42~G@R*N` <} WlZRz9)" LyF!SmsiqHgj!XH @.~<$QաungGey!SD } ^ y WuGGpB !@ 8o0LPZıck9FQKUU ?H%RC%S$w+F8/\(99];p*yGQtL,o}<.#`AcU/?}pù}-Iࡍ/\{˛Zj-(K )!9vrm9G[:іk˱oN>"UdFgNZxii}4t/{JN|_8WaBoo_5kĖxp6* Ю!-:H(kiG\;`06:`j|L6N}V iDɯWOZdI+(Ӷݻdgףg⯎d3@cdEDIEEi ̮]l(@V<"H @ BhNCwqv/ZRʔ$%ɤGğvCX;s-/xkt:Eum6?LIm7z@gg'mmm jOѬofsf9kelX~GHPH@sot@ ΉNAqM{B&qS7ɱɊ"3:Фa2Q^64p!r9W_}5oţ7c"ߛ,D\3Xcqݯؗ7 d3qg~ITg7kj2m4FQko}z;v,6 $)øNF)S:J%RU@V) A&H•H%|@ ưQx4i)h,`Ovf_> Ú#LFfTf"n2ٱ3ĐqLgë@㐣8z>x9G]rj8P[d)=4K //[oVZ1/x{l܊T"؁Zρ.SnOL A!$`BBPE &LF2`E0Uo2Hu$Q("UN1##Bhձv{kv/kOERhbV,fԸEόa۝m2K'Wp9<:7E~G_:: x>CO ئ60L ==޴j mb`[D^R")c%ʉT Adhu4 ! $$%68VxV9A @&[^gpybm6& RIrt4=CVC<&pc]Bmn + r`rd~wnasf^)x|gV2)R 9p;଻r`[im512XM,q8CcLb5$&BJX *,Z/.vNz=E-EU3 Y`DCF"Rɤ1{D|Fxx.1 w×`UzwБ!z]ywT~8pl,Ⱥ#ȭE.ebiٟ9{ *s*v ̚ǫv`[clS| @o-!^3Zfƒ|pث%CGFE-*W\bnP(,C0)fb&yӢoNWGMW ';NR^Frm9Zzo6JK)m/e}zgZfjT&eΘ9IÄ "J BC h7t뎬㫊|hu4UY~"222ϧ&xp5gfo N5Ɨ_l!JwD1?xg ~j TjtF#<_m g5e[37%nG;m{(nmbKL'JiN :_2sJJXk+ռ:jul,ಔl)li ֓\DTK.eΜ9<ط ==BwX0TLvd3qR{2)Q-zrsɭu)Ø=fS|MKrXߓ@ 8;!\PwΪ6}MeU*p 7ps w_BW]^+@ԙW]/\AJ͑:S{0Yjd'&1&<؆npC v  u\=i2>TR)\HlH/߃agM_W;Mʌer|w|&6cO7qu$6wγ!Lv;<B~6=^?o|T4ٱqܵqV׏LTz3 IDATFĖvB.W|dff ,, A)SMvl۹.SrZ8xb }ՙtlƶmζ.fY2.drr/BC ;|q ^-xON|fO) iM\;ZF=^h{{yH>[+Jhb$|/kX0}wk1{߶ώe2PC ;Fs朢C>k|2EGT^|H(:]_~[j:P'E J|ZTFUkQV}nhwϽe'&)4 Tʧ7|V j]g~Llђ^Yt^D|x2Px`?j RܹsDD2N,qWwUsGאG^}Z44A|P)a),/=2ߋ@  !4 z<&/|Jc1FȹHV7 x`G4 ,k'O0\s' =[Mz 2p8P3dC&t C.97`'GDܭ$l).7W's׻o ZBsO7B/ic7vFp׍ハ/nRmzM==V}[A{FmQጭn@j9߄)S2XI]'_rHVkXiPş4XDn}Sxz^eݑu;9ߊZqKE+`?W+-'N)7zj22ϱ6=f3 {`z6jkkl̘1G|???IFbUוxs3NXr9~I |g ־&]&*z)jn4Y߱ʉtefqQH(mk_^zܿ BZDj5}_o+ge2!ʴmTtjv&N¿qVEzA_xԵ :ʴe粳j';wR^qluW5/|@"eXY+I M:W BC 8oiѷ{f}>11>vq7S㦞c /^>x `$@OO3gg}}/ejr lʶaw+'G[ȫvvX!|g5T,`}ksE3nu< 3Cp%hȉ<v}"ƍ7{k5{M.]6v2CP+ܖ3/Z @SS}r9^ckC5,潝?5wE*".}6g˿^Noo/jwCΧntK˼T&2'9E ƾ}l,ҍy+H4Rnz+ߝ]QRF `49lw=~)i+qWYRK0[~KNnxwߠM*Աܒ3Xq&2wc֬Y^t:!-"Ӎng1t&w4y ^NpafJ9{c-JJxx|YÃa"@pp8_>m{rm4YwM Q[Ni kP<$$\9#M`Cmn".$cJ:8qxtƻ|ŦAhZ-"Jtww@sNjPt$M"=oܽ=moDanr*S8ϙ0WS˟❣\si>_]zam F;y!>,5 +2Wd@p^#h#Τ?rnLXwNT1J>oՕ/j$y~n -9I sOezFV,X>DC c _ύ :~8PS=V8vcr&$nqڮNrj8PGsO7S{\"\ulo$%<5ӇHp,2.K*4>ou6mbKnz֟IU! ~}VWG SuNWEfLsREgNrr2z,/k|; %xxjѯ'FƞF׻KpC e_H O%.v`b>=~f;㩫{f %ξdDl=DDDJww7>oooޞ’pO9|0<oiSM`GnqVmw /%J8\9=0F_V2ϴ{x_[ @FT4s MJ%FGJX </<&[_R E;7= أ!vm3u/]7f$%ǚyaTحȔ>.W{r?2T1k=A]]L>AKPȍԥ6[QJWE~12v?c DjJr}3 ˩K/n;v`4IJJb̙6We|oBdswܺc3w0l9ݘ}#ϭ|`B7QO 8]NΏ@ZkqI{z1PtDwøHY Aռ%H a~r7d:p---rϟϦMxP.!*OA%[$8XeR R)\l[ 8zi$ 馛G"JN+٤AX[WKyұ<;<$(huT"OWrݤ)#f9P>u]4qp3`IvҚ f2sLE&&I]k`.6f*B@HO/?I\{̞=<,cSLaܸq}cF]Z"Tj{,|RΊ7^ xp?&uŗZs)7gVGu @0:ܻ^=K{zd:/|GɲOkO^~vXc~^R}Wgb߾}X,R)&L`ǎ*t+ʤ2 vp w8f#bCca2yےիIOO穧␶HLy#qz SGZS;JDBkRAHtR ~(o:H gѢE,^jkkq8H$&MDFUp^t;zi4QQtF#QKI#Ns~y.FZ-ܻ^6lpiO MbM뙟<,9Bhrk>ZC}wM?jf94xmuu!IaJ\cʎϋ?')7om̽ 77י)66Lg}Fss/J/gԩc6IHH瞣0 aY3C"XIԑ։M*0-T x][ )))7ihh?U 3fF:_)h營hW.Sg8juq0ǴHgɩWQ ?G~D[hJ'${p-Bh;voî?`-)!$\ ƋaOpPn:'=bxމT|;{*0h4O{0D9BII :]haaadff2af̘Ahh(.󤧧3a>#+= ZCT+ Vy<̐ 15KCiIF & [}V FVAdd$jRIOOZnL&cرdffP\5ht&#+9PWC^] 5]=xkMKwˮbrp6hidgkXѥ3ϳ+ip;Bh6w5||ܵ\0ř>))_n@؊B*xnRRdEDz wJᠪ2L&]VL&h4NWW_^B<$$8㉈njikk^kMuu5ׯ̵nY.ìcAVAVT*.R6;쮫v;?/,⒐( $ rTfs3LX,/<^4t[WÁ4b۹*s"]˾+o:Rkz$>OyǶ=qkΘ9|vgk.N@wʴe\;TrZ'Qh17N<+紽u$ʉdE_GJee%UUUFjFCpp0 LT*jb0 zz"'R]]M[[_Fll,ӧOGRQYY_Ç= _HR2337o999466;7S=cN^T*EAtt4QQQhD݈_ܡ}q8NoDkkG XRSSA"`)((\Nzz:( V+eeTWTS__O[[:)>J%U*h1 <Yp &1Vklv$~4efՊp<rDL&CT: E|Y^7-\97)4[9BFа3@޿%-zbH"_SDbEP4Kx.#&8f-cgI>,>ʡZ:4 %-[y. x!wO?r5wAףj`0^ ݎL&CPF!$$Hý.+++)))f ӐJ$&&lPEL6 #aᕂWm*WȚ5f϶|bH˻xySbC1j RqJgxZ?wˢhhhp Nk'cnj%3-8^66d&BU:^';W{\~0p/~6&t {؈g@0rlhc}/^l ,}٨IIOO{)/>J|볧 ƳvRp'Nd~~oIW8`l6 W|qquh1eqb~ 7{g#O&&wfbMgwM%*h/D"+&HğV]wUb!Fke۫^ٖ۶0=at3s$XS#"|O  ܺbWϘp:ءA"cngAg?Uۯ2~ݑu<0j9),З*+ 4X>790߿.F\$zo`۝(D5o,f޿Ed/Q#zNw6LV+QjaKU~U#edgIjXM[+b0YMgelb[#$AaS y  R216In"/fȮ#AyGy_C-Ǹ&5<@& BRѐM\t2PNأqchjk^фdr)pQG88 7 7HejoCQ6ȑp4Ȏjx52.8c4"v Q)ADGCT\Twv/@_bi@$ B?#OϾھUVt3~]n|+ړlu#K.9 w6z4q$Ee'k4S E&KqL`|Z*r'v"h\H-J+9Vv4i w '10hd7_x:$p"DJs81F*))y6؉2;8;[^sf/ȞOqP׹)F/tRt>&ę堲&U+ +`˗ҹ3?}*Q9l b졯PL*s_38#r˔[֑0Z8R6¾m!AJ|O8:t{ٲs;aXH$Dh)F'HQK] KҊ.lv;jE*aJEBgqᠳS"yI~~v&3F#]=`$'#62Pjt־>~NWCzp'Q:I|o>U;x=`H_?Ƴ..-Ʉθg~Á B#tKۨ(wPJQpLIY9> SOx_opQEw(~~Cf/&Vj)*t*iL|e6ؽy=7GW[w:_w&A@oN;?RIlY\V`!mk#Ku\lAx(@[G&K.bz9\R"J%f#N§qooٸYɡ݅X~28k ‚X(Nk{_^ ytvH]ƕr۵m 9|a|v&_dv\ `t6J'yל87vKtBBz6!dǥs*J] ǗRը\A ~eDQjy%T|~ld8OQ=oՖa|q]8:зZPB9O#wsNy轇0X >N7옭+΃ȧQcgqn*-kVr֍Ι(¥].JYU# С@ъu?f)kxYF=tRQU,HL`m-_=-}-*%pHL'k3V+Yy|x0<ôOfȕg =Aʪjsώ-^B㡬͐:kB'cj<⫴uvy~VR;[d%h%tDdX+/b˚ 3d^tJ{w`צ `e~'ңy'QܜƋ)lW·|WK]66oN\"Hi1:o%^`2ß?$'߱.ŋxΝQ|čZ^_I}{8JATx(O P^*~F~a U_Mff=f/r8|CUXp?,>Bc&PY[ϋoGGWOplYR=Y\k&{mk< S'-sWBAJ 3ƍFέcF]ăwO\raF2]=N%^CƄyxyf9~,-@wxȀ+~-wؽ M*XgdۼdM5};>^9/+cm1<SN~hE/j)(+EExy?g~6mUZE>GOij,LJ:Iq ^z!{ݹ]M\zAd[/"ä:v@BSEَU$JADq/([}wNoo12җ`^y=(6z.3m톁+8d29:N;x3N.x ޚ Î)Ta2++/|fu X"{}Idu_ MFSЭ0Ȣ q wo꒰%|c7UίUǿD 8Pܷk!ycV+&~]'+ldijy%/$2T*%>x/! P\w?+^3RV.oVЩ+̿>%|)Vپ{o3x~S-%"QU/_?0, d௵U0;Y[NeK=/U}w՝mEq}bՒTntfE4\SR>1h )UnNdm^f<[MJ~/GfʸF@K{;Md(|6[,dً`}g?>$ 5=ނ/G3m˧mb?~:F%v|~ٻ!K+y#~oEoݩ!nIAI[l1Q~c.7=O˶K良_/7~S`0?5Lug%dzrZZJFVg{.dzrٕ8s_nxKjI*>xX߼2CShWo+ ,_M&3o|pG>fj~|7KvۿAv^oO1o\"@BrojqZEf(Gz-8;;cYz_{!kYhTGp~ZkbMJdyQUÇOI Q|}\?ua0cZijmSg~k,Gzi[`]Qmg;nhU \XY4{wӱآ[YhL1|J}S]X(e3UFjsLl-t&E)=J!us,J=t/15T-z _~j`va/4*-?ٱ0J:uib7A|ɇѨ35AR]n~ν\YgNyM$ȌD9@|HOKۯ\|8Bf!7'#}}\.%7*A8r0r亃Ӷuٸ2׋8Wd 8N\]Ng?+u~w更Bym֪5Hi"Q1=^{qLiiY'mSCQ{x>?3Y2AS'H6B݃JD! b##$>:(bͼbH%)>g׆R\̿Im ̲Ԉzw X`>W??sv,G?$*_?}b4Nih3(' {d @hP ᄒ%g$ǿS__ukGq˖5;Ov8vn\GBL006?!jݻɿFo?}i/Id8Ts5{{1[,>aǹMNC)'+ZR#FZѰfȯ%=ov|~/a)ŽtZԣ{'L=* df/yN,J>5Owf%wLv^gyݬ[ yEc Ro_ooغͫf1Gd0l$>)ceǤn)A"j{fi;7fcZz{)nrfqX-ݧ]hدVnscÊÎٲf%a!wvPCv R;-J3|S.ܦ"}X:Za0[W"<5;+73x9 z3/K>^xR'#3]1)jhinEղ. #-3Vd1">I޼z0_aOOTޚ,.#3H_@a.BV+g+m^[QJ-<S #~?2OpP5<S\(flZJN/'ske{vikMckf?2VKZʌYhL"uw+֮ b΋4 t Aw|[$#4j5Wb ^rCF[݋KR)l.W4_(kk 5_gN14Q](mM[2))idVeNcKf?QaFV/]ZBc]K%$H}>^~A~d#̭ÚerEm-uu9=VT!>ѭ* Uu)FiƉRLW3T|UzBtx!39ߢ8ZdʵSgx}h|yN?-sF %W=8WcW{O^v|rmZѴvA/u?S}fy+|umc}'GDWOMmҶXfb-F~~&u_}, _:m4]t(nG}wwT({x4m*3=-JxV.+ؑ{/q> au'F]שb4VFR7ƩKtɣ"PVU#m$\Hffѓp LJxfZy(oo㟾8ʶyv` f19zqDQ8z_g)GR ؄R!7yo)a {`Q!(ddfHgG'Ʒ(1xyѫ#"eU5,[05둅$q6cKXp4yXO=]~[FbG4_:K<:3fեg: gHI}ʿ1dUUW1/(kyEc`\t*kb8]qcy'K-;/ |cuLYe5XV]u3N+g }8t<ǖY_rT<60ak7ᅢd+K[zJ~|tƁz3'M{9|B>LjXW';v{oRՉVGψʟPrjxu<ƸȪq^F=b(V YhxYhL7fƟV6|uίw-SO /ΞvיS#@}zd4@:g: ıOcCk{!A>` ֵ߾J}S ?3$Dy2iz@|%r3_O *>[ ־>KҸ=9(HJH(WYϗ̊e4m^ʽoJ}SqJc=h[+WVߥH}"3~d1IAcu D ?);F6#@pnAfGPL?_i:-yˉϜ{W;}o4۹gbQ t<[׮GBޜo](tRVrk^]`uLۓO0m{F>ЛM|| RNI?87 )h2α?ڧ%$쿠a!7|" &Nj9/J-fK6y<ޛ|? 82A{(;?:qXAz۳\{eƆlBRͭbSB)!Ŵ* BG2Q݅lKfs<齜*6%#?@kݺO7q:;R2&qq+#-3G,4<,4&Qi萶e8|v4Mm@M1}xOϰ,6ka}&CcJ⼾Ƕy{+c[ ሢHmc3~a)`8knӅU9\RćO>C]w}C$6=QçV>~:Dܢ&~xdts>n.(0`\_,A{T(1t0bhEƔ@ltw wFu?^seR.fޅ? kV3sA@T`6[?۪ߡF^`W_lUzss2+n$hpC]R(u>TuvWG:ޭFԮ sEr"}h8 wNJezZ7{PZ='$(o?0ѣL1IS2ǝ2#hL_q*R[ȓ3l5LF#[Cn馸4)Gש]4w%4Զz E X`>u4u aGEPL Wدhtbw F6|+;FSo𒇧522S٪r,.fԴem/gRPRۺ730[,d^PjxsCHɢJ*ERMp9Quױ0DNc.33 @6|#s dyZΊ OEh̖ ,^V;'[GW0jZ !(Bc >XE+_XږW3d "YY-I. FFŖOtC 2JBcwd_m֏6}0rMF3\9 Nckdd{{E/CKBM22S,4fsG>^/[ss 铎ИjZr1Y鶑 ߉@[[thB_@ʓ+>jw]Yb,'~z"d^@OށϕyҔk6&h~% l{>vnr?(Mը?OO&L7ИȆo'<@6m$KB#`KUЈ"a ,_D );NĄj-}-Rm%)(ƞbdy۰us'qStٗN})ndt>z+-ZHE$fgA3唑ndשΛFmu(dzs3[<%ޒeT~E*@gBbCnMBYhL__QA j/'tNڝ3Thz͉K67DLwmAH`C=X3 -}XM>ˌEvDgI베^Rq։M(pNbdf< 鳴vt:]6~E4nj8Ua[&>9{8ERw[ A^+vwnۈV3l(2G퇞'8|6f b >T-K_cJ-_I:Ξm'e<4}kL[׮b7ىB 24d:ï<( +-x)h*蝣E$(:dz]>M셆Up2G򳃹l{)k+ۚFɍ Z@>+A&j/=D[rcvdɠB1xn>Fc ՊDW$4Zh$HUpw'4 0fl|سG?CqVVI-nQ[E~o߰-kVzАBc3W ߉rی֟H >tZ-z!^x-:nDLnTL(DE;`}ptJi׷c/zRY %17/_lU&rb㥶%hi@!bBC&6ٹq>' FiչPj{wn sLE3jRz)~JsPOS^RS%Ϥ-L[xL94DDZDD ;ʝ (8LhM&꺻H rr!p1Nd{Ogܽc˔V`,?O!OXd{m"$ȵfm9ړRFfBc2 ߅z dC_?$0~.s*"sw B`Ab;7fLeiշ:ڋ8(Lhx|4a)|Jj/9kxjxH0/~||x]>cUԪilԑSS 2꺻8TQ{P6wFSoZ7}$FSZY Ɇ  LDMs+ed\#NTd1Cˆ>>c*ܦdHm5ѕЈ^v׺L;ٻ}3;tq,3kJnM.^f3ٌd@Thh4T*|||PbWtm~N룹١_nool6X,RjQ*jt:*@O "44Xff5Cg0NK..;; ɓ(J:[}9hjj*hooz~A 44xILL$(hjSN6*ʜx{ngxj8 rY:&$$0'IR|g>Fjkke{{;]]]Xq">>__ׅe`}V*++ݭHrBBr#Иj]rpF7(:.8֮/2hkk#??1 "477s€k]LL ˖-cٲe9-TQ\- ~*jZqu*++illpEoo/Ǔ̢Ejeם"ZQ'RKG_~IAAFh2>ИfgZ)//'??R%Oqy|}}?>iii$%%P̵Rߓύ78y$ڎZjkkOIIIaDGGOkrC)}FYBy)$/*`@hl8@}W'Ax&MGE:7+"^z{! Zͦ)((w㵵F^^j FJJ i>4t%FUJLQV'Mmmm JR f, VU nENNl۶|9lO?ܹsNʧ XVJ%qqqlܸQmXk׮I* (sUo͛gOnsרURcspu*ϖphE! _%ӨW^7Oq slW_z#2ίtLiɴZ-111(M8:Hcc#UUUذZh4b0tuuIhkkٳG VӧOs)6wT(KX{V+---v0 _{5V^͞={s YhfZ:uLKJRAӹENN) ?*=A, .\lڴm۶.Uny񸌌 V\III 999̛7իWtoZ 11$vECC׮]Ƹ8||lxcXϩ@Ֆf] $&AgVz8 pShڄjU괮8Q^* {\Шܾa-   7jXʼn8x/r}AP*Ahh(iii,\pDw&A$002^Jyy9FYT* sCmmmMͼ曬];C'l;}_DQdҥlٲJ)**" h\?KRRIIIz)Fnp2($${:x o~DQE\xӐl STn݊  $$$PUU 8DQܹs裏 (.E7qqqL&jkk),,`ͬ^r())ƍDFFV`0P__OSS*Ul5 & ^ZfժU_ ͥD⨪bҥTUUj|M/l_ `6>^+Bsp===x{{Ϛ5kصk.\ܹs?^ebp&""*++Ed6mDBBfBAow%&&85_}YYlg}6Ld{n,X ,]7rErssaҥl޼g}:9qO&((i0a` ;++ׯΝ;Yjr>C^IIIJ`0sϑ1l3ヂgSHjX)!a\ou-=d9* 004JͨxH9{IHvxƲ)1|%u阯.fŜΥNʶuXb9:j8SΖ-[HJJ,^u͛ǦMx'hkkܹsdggsRSSYf &|JJJ0߿_nKK ΝFÆ Xn>>>M^^&XJ% ~>}ׯ;|<nE JErr2ҤJll,QPP@rr27o&m.\@uu5>>>H.K,2555زe k׮ۛZ?NAA*`^JOO0jʕr5;\SSñcػw=5D3nDQw&2j5qqqĠT*1 TWW_p)֮]ٰad "::@ss3hZ/^ ͔cXHOOgƍÉ'A#ڵk==] H쉋VDӱa֮]K~~>Ν_$%%͛7s}k.Ο?OSS~;YYYHWW~)555DFFrdDQ,b޽T*EzJJJ(**bٲeÉZU IDAT{nXN7%n(}|zԥ|oޥAlByL{Ѧo_GGcp7e"4Z;:9s/s=SXh׭fQaf;??ŋK}{FN8ۛaB **(^#&&͛7{nvA^^,]k׮q5j5]wEzz:J+WpYI Aw BBBXz;, 挌 9@3b._̍7xbw(NGJJ ٳgIOOgӦM<3444˗c͚5deeC?555444RXnQ__ϟ' P(DFFJg,GDD]EE.]bJP.\z?FZvuu!h_v:,X t:(++o̙3ףhXt):u\z= ,`Ν9eh)`0d`` =4f4)..q|/hAR(*m 'dEIK C0= mG}؜ˬ(b*;$u?o6Jg뚕"fnffWLwV#bl4Vjkk9t%%%eeeOee%\LL- 6QMBB Z&--/Jj%//oݣHf! l3|YYʃXl2FCBB:ׯs.]Ğ={gڵ,_FÕ+WP( rrrEDyt:%%%={*|||HJJrMlrk4(,,tʚ"77ansiz{{ϗRkc4%#3X4BJ"55kJq.]"33?////_ΪUf֭[9r䈴ٳ9g*, >>NՊF6((I\i2&m^Se4p1)hZɻra_woGO.S'?H@ߞ۩/RTT$ 4;::x f3ѤCKK mmmR[[˥K'55{>>,Ze˖95z>s h?oՊ@jXMh4_s6'V*19 ?SU11[utt83طEDDHQikk"""{aٲeRQQAyy9ɤ`N7j gҥ\pAWXX( 104 JEll,$\FD/fa.Ӣ(REZMhh(Ys5L`LppɘnA`` ᄄ  )bJ @4((hRT(>Z);` ͥTnzzzA~~>)))J狢HUUEEEqU(,\E7bZl5Sa4iEw a9^>|eBGaMLg*t( ͚0ͿiOnBG9p5Css3aaaHu3ѣdQQEE+Nh"-Z64Xj. Ǻ& tyjZz=III۷BAff*&&ٸq hhhXrh4> Trܶ3dΰ h3c("TWWNHHR\\,eH1͘L&xTkkr;rNh4U?t::^OZZK,f:;;ˈȊ+X~=zJKKimmEՒ?uuu|DVsU"""HNN&00nnܸNc֭,]tTMffþ]6PT]wRCmBy|)yw&/p*4cEx=vbw6Aee%,^ik0pϟG׳d>bbb7 33nJcc#=!!!5Ե!!!,[ : BII>g2 $$NB"""wSWWG]]f˗a"""ף{6;9,}gN;s8! !Ki @-[+nv~{.-PoAr8$N|߷-lICh,Y![xGfyyJ%DDDPUUO||<%%%0|2_%&&JFl6;e -DA>t2|HSKDDbzz{{IMMeӦM\\;v@ӡT*ߟ-[P(Nǻ+prrr!שނ\-ܙѧv}]>ٰaٳg###C ^e˖O?{KJm߾JŶmhkkϏAXz5?0:t>l2.\KGGcc4T_ qɺaFc ) ZSCv|\R̙!A .{/p<"Df3۷oDՒg}ƞ={&77 I;wAXx1+W$,, 9rf+Vb v%ݻrss{ȑ#8qƬ "_|SZf gΜ!//immjj:%T˖-Kww7/_&00T6l@jj*MMM9r?={rJv:ĢE'((̩S(,,d```\g[^N3YΏ d(,"Jl?a0ti3Fe RQnST=R1Z]ssG,j8p*o]wEdd$7p۶mK.` ((uֱtRZ-z}QTT(,^+VN [oEDDqqqr.]JAAk׮q)̙Ê+:k. g߭`ĉddd@rr2r-ʍ7(%2R:00@||<)))4R&@RInn.aaaF_ʗ_~˹9z(G[-WLwi)wY-\;ӧ&&&LΜ9Cll,֢VjaaaRjItt4K.қ1K.Q466?(\MM ҥKTUUH||kmmʕ+N~2 n?Ç|r.]DOOwu`}>шF;b0ѐQΎvX`pp%4bc]Xݧo溗j&03믿βeX|9L&nvΝ;Gtt4(Qgm|^{ VXArr2ϟ8G;ɸfbPVVFmm4+Q[[˦M}hjjޣA5kSAϟ4@NNiiiqaigժURQQ!%N>2Аq:A.^HCCDGGc0LMM hZ, OFDDNdee0N0V^Ogg'̙3TWW@pp0%7immO?ERXXHaaF{=bbbXp!G:|'22M6fiСCa0GS\\<8ϱ pE#+(g2xul:pm8NZ{s̉eFfA:ɓ'?>Nㄆӧkj[HFW^ߟ뮻+VpY;ƙ3gaʕ,_={P^^NGGf)Wfr/FJKKQ*N\JCHqx#V %%FΟ?ɓ'ld!cut:ӧOJOO&iCQYYI||8~FXX}}}԰sN˒%Khmmb ---RmɱxbN>tm3Oa.:ܹs… tvvbXtRd)s}ILR:Ue""-Cb,/6sdW gNИ@[^wfsQ(:uJg0ۥٳgpB] {Xt)/"x J86t$%%z]*@ji ?q3)J""" ^DQtð!IIIJ?3[ sX5 jJRt*d2qg2g222!ˆA 00PJ;8&+WdΜ9>|J+}h #44pBCC R,|m0LEgg':N7.7+ʕ+tā gݵ C3zhpZhf̓gQ|*feXQQQdffra'w*T! d2aXƌ0 t:ڨvfGPFرc|m;?111$$$PYYIKKK[f2Bf-lsww7 B X,s188``0륔!22tZАBeJhdggS^^r\EskHHKFWhDK]__]]]ҋh_HHYYY4448 gg[];39Bmm"h4쐋(J)((] +]n &+2;r^Z $=,sJ]#h,~"nB4!# LͿըɂ99"pY&&-ZٳSw۰hPTZl3dǩhX@ff&V"""B 1V˜9sHMMQ;B3$""4)[pK2] P/^L[[ STJ)Q5łlK+ٹKz{{|2.]rz6 Y\6v'dpg3~ lϊ8 8y @fJ2 g`N1cOf,Yӧ)))14b_/ CTTQQQtttL[[[l6c6=JRELLCm"zq bɒ%,YDJ_\__OCC QQQRF$}=?Q_K#kGK 盇f4 "-6蹒9p|RB? ]&Jr;2s&|h~<7XZ :XDJj(a{b!#9'6zjVZECC/_b\h43g.SJvG||< HY]]]R' $44T7Z+wˆoDDD`6蠽nzW;BCCT;AݑG;ZKm3p:^)c``@T*% J%Bxx[N|j-ji^:TSoFhe~BШ?>½0L_wQ%?Կ})@e\,Ry%}z[hlm ccr_|ڶ'&&顾FrH0T*R@bbS 1gZ&::hKmUl1l: J%y\mܪ!,4|_6|ttJҡ3 . Zv肂$?c0 ePZ-qqqN9گ#j*:]n_Օ'B@%mCҖ$,y],:2]'8V?#.̺TUps5:CXH64tީݝ_=76b/i̞=ٳ~/HWWI&I󓄿J_ӑygr )&gヂxYhL|{/t~~~{=6w)[mTpQQՓ&+rtKL%u*q̌+5.u|Y^މERP:kCL@ *&kjҚƲeruW5}NAj }." +=*4,+U(鋗gNK39}#_v@d5 111cֲ4Guq-م&)z:0i[?۶mcѢE,_|ܕ"Ξ=w72/&ޫr-I+3"((u|Ih;:꾣&-Bم2 AX4w7+jUA[׭u'u]ӧ|zm۶~zf͚5%mnvISSe˖M v!6m^9ŋimm%++˧Rʚfjkku@f͚q留Bu÷vZΜ9#jX,IJJ"##猇(RYYP(린eM^^uuuREN:ŋYhyyy ===9sϻ,$rJ-Z4-zz;!QI+_pAPq jsle9[Wx{/@N{=VRSSr!"TWWK~lذ)h[nAPpa#Gp1222&%%7^OEE%%%#V'UVqFW9Z?4g^̼I/;rHh\Fb}zDDzz9Zw)k2] -ML&ndCF>XDDjԼ/srw'8t(F+WPUUEBB111W}Ļf]ruVfqN7LZZ| fJKK)--EbccIHH ))I [-ZZZyLal޼Y1S)*&,Ќ=3].Z[voL` bQbuU[wBc]z&}iE0{Fh\,7oeʕTi&/_Q)++ 233IMM%99k.r S85sئkGno' gN)S@@aaa*7, z>z{{t/k4nF')32 ŋ ?v>;00@UUUUUAxxW]f줣j5~;yyymõٳIII;vevQijjSNzB``X8FJoo/tuu;˗/g͚5>?2UVO: T &ޗ(4nHaHhT幵M躳cK x>V!9>Ҫ D*kLI&GR$Xv-YYYl߾{{{9{,gϞEPERRqqqK0ܙ0RvhiiFaݺu\R ADo!+=Xl[khzTR)GRU(ol|&͸E>t1|999lݺ/3gθ|z4+/urZ֡^pl0 'uza޼ytMM{DYjEEE8qbDfVѾ& !??)nCMgik'}NRMJh } `oȸA8Zށ^-ym4XiM:+jx;_;*=N=]Z`<@MWn?P($33vɍcCPNll,R5Nf8A e_ʨzɠVIMM%33,]eA !! E@bWW:~FaX$äV &((`ˆ&**jڎVI"Þ%*4W헖=6e>X3`ӐF^ÓXn +5sKU8T9>ڷcNh]SSCkk+]]]jhbbb&66d9.*RY.XdEdodd$,Ykɶ@n VK``/JCdm _DD˖-c2kVjkkioo:N`r NT* $$$Dpg}~dddx;<=﵆ RxJJʘL&nmb7yaU탖ΠLi}mFNd\^(,ݐqտp쨙 B9P*Fbzj}/p7NU'B ..8Vr3 ˦-i? dl̥Kr1X  o4MF Yh %%%Q__X͚5kOW^W^L&z)~9ϟᥢP *μQlݺUʊV0͛'Td2P\\N#::[oKp7uV}-0=E}{{.U.?ˍweO)3FaU*^h QUEzL?؏ENY  't[/}*j0[,d$mX&+%A~YZ5+אO!; 6o#m~[lO#yyyp m6l"}v)yj@TB5jzz:񄇇[d:/_>L& Qٲe /7n_N抟 tf3P*l޼@M74oѣGəpdyXXv˗/wD&Fp\v;geCUפX|@Zh!ؖv$4J 7e$o/x LCKSVüLfeHJJBA@T7 jjj{WXx1~~~0` namW|Ix m&sk_pã>(3w\`ƍ8qbO3`2xwygٺu+/*? yF1ʹw0Em3|~lOpPZW^y[RRR29썼MŬųgڵ\p@ZZ<~~~7p'Oa4wy*[XҵV[^PP={&)) RIff&.\`͚5FJ/uP]]MZZ={kxuv5E(--u*HhN+\nW)je_H v ̍+]9g9NI`.M`ߤŏ4ȟϞPʼn'o^AQ@zRiIçAZμdj$>l̙3.3g?v"##'|LI} ̟?͛7sAéS(//w}*HΟ?믿N\\IIItB>t4|7o ry ٱcw}+WO~&"/2 ,੧7HY9FԸqqq L=A}f?&ӛ|QwϹ{Flei L@os .WTqJ,,8X{3SIOph=0 D?Zu F'li? Yh =ctCAlKHIBm2e3|w)l^3gP]]9Tɢ8U qVN#,,!=jdd䘹- *~~~< @}5랗b N:s=s=7\V|4ܽ~qM|p {qK-^Fnиv9NiyǕ:f$:\ʴM,R`|kG/qZ"† {14 =23 6pq3w^T*?QZU-KػM2TzF샙 L7dL7|^?v5oR#-8ʯaaat:zb_h4;phjjpg*rJx/~17\{>]7'17&nv};kJpAˠ+sR38P}`O q-0 J>Fwo/gKJxo!y]^|->[H ?e g" %셆RaM x{h}0,4|h{94 %%%<#k-ֵw^y~mc5!Ǖ+W>߽{7===sS'11>SL&UUU9rD1Qy7`˖-(KEEŤs^φ hoo'**wy 6aNnnw-tmL)'w_v|O s- k {aon?c ~AKHH.?<(RTTěo)o:MhtFtRZZ0-l2^|EH6o=Ӎ BOȵbA <3t^@Oөn5o|L:ykέ^Աc2Ԟ9QsX [o}kBc]z&/=]\G $sfpP]w]hn렱No#a=6c~?1 t ur+V83 '`f?Yh M{)8-~ػMm@6ܫOȗ;TuU6vo/$4_ڎΠ#L뾯~nl<t蝳Tuw}֎NoF FX"|j ott2h2M< zL\*`Kp`0aNdd6"2 mdֆOo0С뢧ofS(R9xx_K:لiä|돏Kh<f#z/}ȣu A`mz&/:LJk"O҈GGHfx::u`.\o3e@fJPh DnSDGx22^A>L4|3&h"(WOqmհ`vW 9>vd|%*B/B71qly ɥbvn! V%r3_x2RHN$#9@9D(gٹf0DQZ,:ΣIBL[5MyGfB~FۮS.^eյc<9ϱsŬ[ns3(UU-},O ;ҽsoʼ_ ?1Hhc!^$4p+D[죮[G D? BFr1iqbys>7MIj?և'+5y٧5k=ed|Yh 3]V~;zzv"8Mm<}eF?8wǩk` O,+5[ZM=wR^ilĎ}"C%T!Au eJ 6Ei7Pi\Nٚ_o?o٧2Yh̊@> L\C)/h {"#\YG0QJ!3Džʂ%FJ*I9 IDAT0A\,ϟ|w}[8"Id5Io| Dyg, YW' ށ^.]fn1So񛣿ͳo?G!L,Zљk7_tj4]^yƖ6GIMBFEm= 2*Y6ZEj $.H@>猷! zyWM,Q>TjNLeFy~gN>b/://R]tudt;xxwuZWAdf/k~Oy4B *RdCЯVUH9}SKF9Zf8;/_tfEꪽvM{{"}IIv~aS^miJWŮ]mD8u?X0T6sjd>[(n ޘ"jJ.WG;+BCf e= 0αsz+fi8q1~"T ma 3:պNu.? V{gBN6֯۔ {C5u-Fbqb%Uz]/#_ =٩)T5>XfX64`@ ~72GT N{)RCjXW&#id4rXlįN2K)ژ;3%&oVU'+Fli='/-''>}W3RVIue[^6(R܊ ֿyvȰPZ;]I+W-ĪJ5M|299㊇dDvXlv!>L%DD\Z?eh! +]nϊ"9tc7yki;'6КNYD#6 T*EKUm}nMcj7kt4jcr1Q  F/4i_CCvNBFo/d14I1*S8L>3 ]Kw?mxva.W3K:98ގj_ƙ72_ۆ(@U' ?gs>k2uˣ—Ey}}Rܫ1SG% ~m}j[YhL'd4UiS f4{{ 5 P XS>/R*ٴv70 ;y3b(Qo=ۢCj[4B>L4z#0?aPv0d𬩔eFTC <~Km8\{XZ ֤ :6>(-o}@ٜ)BMfe\ 2<s?) q..w3tB،;|Ngcj0Mל iƝx:FCF[ M.Rx~,c/!~m9y`QRLHq? PQ_pK-nA#+xxqgrsnhC DG?# &/KS"# f=ohx3wΙk-f<;^GQ)_ Jd@D꯸s>~U*&,DE_ m"}J~|L1h2񟯾J--S5EҲ!"*L.JW; ̏~M&;5BGLF[u ϊȚ– l0µ~Ց{2őG6z\`pYڭpo(nAR>'{{c^m~uWTwUYoױ~~~@VGWUMbmb$иz}̆J~M,/TvuF'd'Nȸ6 `º?D6fn䭳oVe͸)qK>|yrcr-rtx d !uZ_[;;r<3j2t%d'j.~s0s46~3_>K]*.=ӣ׹vn3Qjs[m~i;(5?^#uGXrc#¥嶎N[u݋e歿`wMKʙy>@͙W(=7Quu6=(Z&=+~FP{52>2J݀hK!(V>xT3Ñ?ۀ6bkNH:?~Yh\]Z;a"|! /! 1vf&=_pe'c.q `KqѮ3ɴʉ"mf<Džƿ b~[M=wRzU WD]5G G?6*RZnjk80FH|V!9>Ҫ D*kLI&4(hL~]rzKrȔhCRȿ3J c軪~ GL歈d#rMb dnPCXt\vBR8vi߯LYhx.YS*=e&=_1Wuxd +]vD&@%[Q'<(Tږ,O1+rsp^pBB_ϳk~ nt-cѨX,T3;#ͭk|]<@bl [﨔*~#:y.2G ڭYP(FzEhϷ8lUGa1G<dz"S뱘,#ʯcz RÒ?ܫ4~4 KYu4-vpoN[fd$UpNj᷿ -wrr2 d> 0eEYWrqG#ѯc7LP g=K$+x{#-xCTeD͑'XD׳ t>>d2c29`A Blt== |bCi[ jQ b&و?5Y: z%n˫t Bûvt3 * O /azv;3hj g*xM71Y,rt]D v\A%Eږ0 "3$4tUn:=(%X i\9޿g.+lA Qٱ=Pyexб[*v Ndۘ}$`iy 61kD%6N>'Xd.Jm+ YLG_7# ƼF uKDZRV/]4c=A~ \{F>El`fl_{m!v?%&Rh1Q't;F=OKN;asiA`qE:瓼q. o{zmm^q?"""ΒM9 IyuK8~bE{_9wV:̛͎}]EdOBfD$с/Y#KVj2.->2##ZLR }^/}Vmw8SZ?u:9'-[Lp˶4nԱHװz;54BܮK#'/I f=H(3>dEwxTU?wzz$$BhHUTQok[{A],,"JzHBL2)I&|'9o.s|lZ4>lE j~E/+}Ȏz5v˯+)Ƿ۔Ae|b. 9XwjJ,ъ[H/̳]u[>gfrsxAS(/95ŗވ 2]Ю3$I]]3J"dE[9-Rd[nUs*r؞ލ3,f4uI؜C ۦ:\M{v}Ik5\#n_ ݻO'һo㙟=N/\*? e˞j_Eܛ kNu]W~yiZT]v١e= ʣ5rUcthts+J.kx/~݅  o夰ef^sEVTJdx%bCжVɑxp8ٴ{G-GN"#v5{CʅC ᡡ((TTԣ:9ŭkj1zMOn[aw@9P_HLyyw [^?g}xd#6KF-aL~bIK.SΙ>۞þ'ٸ}7X<ו6lK7(o}^oG>ɫS")ώq%iL{e1eF+3ch4c&Z 'xYzzKO/^~?&ϼϼ whK FfudYWaF&sfL ThfMhp|ת)t(x鵆E#<$^h"Q`c*ڪ8d?') \tO1!miȍ\4LgEjrs#$8[+iuНJ$&&üo<(E~(k LLwo]޸S^!͹V*/Bh#"Xt;S9|u' FQ/=`phhzp>+rtgN@I:?/z \”ʢٳcGS-k#DlN}XPi.FBv lIߏ,WHd +R6ƐMZ9󎎕PL; -/Rv]{NA_/NMtD8C탖Xa,$ʝǍӢ;w={?[UX΍P4I{n*[~s\?uZ˿Z`ߔ#%ur$-6vgjwBO{oZ8lwfF{e ,D=^ub-O?cDjW~EQmQ~wͿ)1=wx+w o Ugp4f#daj c1t̛9MMnQR5u!i;|VH !y{qzXT/>F_h!&~Iy0YVp8>ai H'eAqY9g 8[XLYEqZ XvȕSA7qLTVא{r/슉8i$ngW¢*ew꾛o8pw+Mlf`\wBZK^ IDAT?39;_DuP, ˻_!+2AL= A#i?wa|yntW?FUSO?Bz\Л2*mf3e,&qÇ䶛a3 Er,46%\lJ:h%' Zd]ǢS<#R2",߳XfGG?ʀεdC\ewOAkpRR|U<:;dU\|!Aڕ >,?9A[u,]TqMK+*ye]2G ⾅0Ȳ̹Rjjk$z4YHPE嫿eî@n . [ a S= Ǯ`jךJ} jhYj+ik L9˾22;2LU\V΋2=t02V58r_Mރ+Loez.&2?xw@8Txv/>#zGǭV1> @~zA1.eeUVw[س{~~]~<}ێ%4`Wb7߳%}zN$iG]@ɺTIIXH03W^NRUS}'6*HB; +2nVm?:QĘa9_RJaP6^\ k}6[&&1?7]qgUC3=;[pR[ãnFޑȧp*Nf+$(Cs2'kkٶ71A|HrbGGPQUtEZ i}i n}MGjO3Vbvdf9~*G=g2xdBKkBS}GUj!@lT$w4A)/Tc؜6ϸ]x?zñlIϡ|,& QL ca._F3&~ϾvW~ HtD8sgNea.N=P][KyU5u@fZ"BGk^7#]i8S[pmh)qQ<~$tkJA+ZlLH\t~)LK%ݏHKMa53:|bAp kI: kyrNd X/DjDRSܯ/:+<ϩa캔VTY:J#6scpPp:8'_3`9$8~}IKM$##l^w{;K$jK>ǟLc'zr44Ofq:,}B ٻtw בD̮'~ _؊pG׳6{-rF{gq&GV}BVƹ#n1Qt!dd4b2"Q-n>/pFqô?mǾ~Lm~yzӀZ5Ew2&oZ8͑l̦o HĐ) * ̶Oё(*-ЉL&3'O->#KQñFOL.#IKM! j}0 @jt*GA,j o{ hrg( b5zv-ň#p2WNg}FsEP^YjlbX"@pNZHGǏk^#rU g\,=#6NLsQq8RPTj̮Jqʕu#>,q,!+ -[2Hn NVOHɄd$*~+ϳ!gӓ!@Zc-(-VC EAF(v?j[Evy6KZ{#u~bLNZmu_q=$7It+HTVdYyARӾ mAq[ш $ar[4!(e;w*@FM-mkgMu.*b~?!,oxYmW'g G٭hM Ag@(vgh:-=)ISݴPU#,ώ|{65;ABXX~s iш # 3j(T0(;fV3)jn!^zjUY;ߙ̶3W*ڌ2H .;f8BkLr\~us QUjsyQPXLEu5:ů2 6+NSfv}tR Z qQ2l3.u43UgHg.y=C{P"AWlU%~9'K\꼅ć7xM1otit=zOk;k209j[?g᠅~<˻1*צ,"Do%UUgHf6'srq8%Oxhi*:BXmv_XDqY9f͆nh"ij/ v| lfs`E'/I /c5g~_LyٙX`xd3v( HD^Afb㮽:qYHđØ6vqK=.B,,22?_ o"s>Lw뙏mY |/U`R\o8ٮÙἅ(0d@?Mn'dj{U0wnޞB!݆gopzv[L4AF& .nVӸ} ><=`Z\q E%XV}Z GƙS mcI@(Cٴ{/gZX%l=$*HЙ9$[!T@w,9;`9F<txL1'n@QƼ90HҗJo;O}*L(?~lhdT~} b#8JJ!K>8[XYd"3'KWFy31k8Q}!`=|y;fsB-`FzvʓGR~)GMm )Nkꝏ3x4׹slk-DʽQT0͜Jtd>+OoQk\U_Ȋ̵ˮtv乘oWnL7qÇ`4~gm[49?Jؼg[_D_8Ol{YGYeUϢ##\.S؋АHF>yxHY1 ؜}5_N k 8r+sWڻ23y&Vlr u:43 PNF+e|xp# |ɶ^##B;}*G@; }C 6+6uf n;vc;&-`@ Xq8f3[v:u^pL;A)};MPײVYб;^/ĆlJDur[9_Qb/>=b5QY]ֽؾjy;ٴOr$h߰c!I[n_q;;w:{aXyr$3[='#݃ڏC|S~'.,P꽁nl8L|L]@>[XJhtΝ?bH 2q-33'b?HTԏ Ү\.em\.C 2Ndt|j JXt5MS~=+`kV6l`FM\8V}dȽn`Pd5e55|wעv$L&[Ngc+. [;p:pD3"m iѐ3ԯ\/$I̚8> gT׺6V} A(CX41䃕SIǰګ;xx|r+~_Q` T׬LȢٳwq4j'=#˜9~ lنN8z`Zt+awdW8Qz} W<,+lԘZ^[N*ͪPe Zb"6?G '{\9[k9u欟$wcku~\Հk7ՁSXR|Hy[q4 nF{%=(˾p+:aˆamhkp8߯u_ ;r9<~"'kdf/2/U%fn+>䒀 g\ zXLN3f=;x/9_ʗqEQXrl\@+i_sؘ͹2ЭX1r@PIX`5Z餰u[w]L5[fd[;/V{sgLƙsͭXkƕ=6?xϽj3pt"ORzEq2>[ 6xŤtbWKi`pE?Gy%FG6diΈZ9XW:_f:bn<{#48;?kGw-%,?o3^w8ʺmdvdc7{a:F=dYa}d{dE+k!221IUHtmQB)vPE_ߌF3c Z` j bwobynQ F;|I)od z~t׭]J1SSan/%#RsA_-hΏȴMu& 'o|_~%~ݶj RJ%8 ld\Q ?NGu}6Kݘ6vT%n?h%-+|O}ϖ-LI1jjovlT$Kn[VR26'_@RDQYR `!. Z"^Szfc:[X_}*z0jz!$P~|,bȀ}8]AS÷K˖# ,b5ykER};bʇPgqM6:s{4F0xW7hmCJFr0AƲJt]j;7X# k|}A&#?zvs/z-y]rUe\Zt vh-,5n%æ8pFJDT$J BRdt_ ߵWAMg,L~D.dtם`jIEQd:/EäkzG4i)̙ޘ8p${k F;oLm1wvT2Bhlܕ% cM\յ?JFt=XF9jmGl\hiN'A&{u:e T]FSRkײ̶f]{x&Tq8\  BI!P-Ʀ{)l3 9| si&gG$XSOU+|!Ofa>A:asOw|O^<XgfK>=c&@Kkh?zB=k؁I=|1XSMB/u)W˥\/!_x>VE˝Eմ%\w:x&e~N5ƫ ;3C%,ZBP- +zlxݑFzw]bt׺DžEh4ܷFtZ-5|}WW P4e֨c庫+Tw֩:s]^:q!t:.S9v+u~=WT]Zj ǎVv{\^kr/י_`OlsE%+5ms]q2KsZ(6ס/:h"A!ܥ>C=>q:Wh" ݻ1ţ8-;ZE̬MR\VܻN_') ^YUfDb'L[1;3QZܣ"/.ct=f:볛I&b\<4!zb<* |_4rz<-q%Mˁtyz+**8R'ybw ;c &ڤuN`,Vk6'Fމ}pi0xKJV׺"FMU,$3jrG,~\?Km?U+cDU_^{p4+[Q"I،K Fh]"=5Jbm ytRg1IXH03ǻƛPe;b IDAT*B0w^c++FT4dE07һid]W~%hpؒeQl-9xW:7n 4%3-oN[ՠ^Y.b6wE\Qxܬkx>= JbQ${nu8^okqǍVS[WVװOdju@dYawDOE[KC h̟5]==C^]_L)6H)}>R<6|U\xHSqk%m%D1*muPk6B NFb1piUTϊAgg`>$쮶=w:~`WZ?'T+\xhi)m.@p%Ȋ¡E0bz|4uc-G(dCj)j 3ǻ73ĸ'G>"2Om?NY{ՠ^F˳b`d\Nd&5Aa84J0h ^mhca4mv|Y:v)a0첝v 'Oi"@p%xY4Tj6 UmnVeE#@: 2 7/.hXkaOm;r](SzXm JJ;wej5"C!,ˠd8;/?t~79@r+k͸IkY7ht&)M|Z-j4~0ho>~ijocD&a~6g^˫{^%;yRϞ"s .b4Z~EeTՈ3P4Dv8C1V az5Zyy $@+|>>Y ҹ7_2Wk5Zn6.APSWGai_$%pװvgВ!@pש[L4a!3EBlK )I¼ H ]@hHng3L${}/_=RwR~v;>rȰ!,v(J9YޣMA<~(ٚU  f-wp-ӓSx|$t 55v8ew?Ț;rsP#xuB<΄>t Šmq^M1o*OV#a;;}kvUMH U)tQe瞡Gloҁդ_ށRs)GO_;SxX=\^xwFsdwFؕK#SkY\hHLKu+_yew~OKH靨*4 Yy|%eeqUT L-qhH;d'˨@iS/9W]VqWW6Ź5Y\.E&.:h6BEeq|8vړ!(w0+"ӇN~SxoHŹ+z. eo4:jNhuLk;%#8/G1Ob g-kxn HW=LG$,g >7b\Dmjc Mh֐YZ Rngh(s-JT% ӆ1+%YQIjL,NJ +)jazrJł%HuYu̿ũeXL +D0*&|k o Ka}? e %JiCoBW)= ǀEjAQW̥hF&ܹ'V1h?$,5Mэ'{=8"(6yV޹w$I~u>hhDf]OWn44ag͉c4!Ì>3qzիC%nV8|u=K\\ (KܕҒyw\|]󬭮˝gesgh>kiXl_ctV<58QA,uuZ5 \OL4LIY s.seEG2EqHHt#):KMvKY!7}Αu?f8NF=yfդđYZQwd竫'd\u )(<<~>i $D3>sk`B TV`a Jv**PEo_SӋ z@Y$Igk>hmKܕyk}ͳ"֛kyZ>zN E1yVhBuwАJҾhYs\F#Qo:kEv4w{&1gkLdQ @3\Fkd_p[f~h(|y_ NJŷ_˵_sIEcK) Ks`u88\^؝ڼh5j6CT5xiT]ʌtV_Dw4Kaq?H2Zm&U %,qWJKYFY16o̵:pjKFNFԛ9ī\UOl5g|zDw zÅ,eVFxhuAMݼo_mk WP]ծ,qlSFC,4<6>k=]r.#!tse봢(OCuJjnw Q?;{C%rG+^MQzy^^ks|q>s%3W;}.R5IVqs4:HޮKOhQRZٶ9 Z|Y'mek.g/7ςﹶy7׊SΈ .ht)@>QPxq׋C h6v@pECnkE?@p,; \$NPJF(vDB 07-@,uM(΁P4@ЩʞW률C ,ΈP4@ЩitϏ6X h/uECY@ h`Dx[aF (@js&_G̙~|=(%Iч Rϗ EC5E u:n;3jh6>Zz!uWf98kߓicF5{fMGO~]*N(rqIX<:'P]Tf&?~,F$̘[ j㡚.]mBZ_D7J >Mum-ew3lq3z8s6cȊ+hгxujhVEC9K1bfO7`UBk^Hzy5H](mSS#\wCCxtuh4<>fwA:%)`nTL}Fy چ<*[~Fi;r}Gg u(PfpvQ# Vw9 nla,m/}ÁL֞!H@+9(J88v^B11nECX4h ``ŰM 0=6s{٦|H6Z_5oh)ήEHpc a^imǼK:z{$)t~rw\D&I(]LJdzn]@f3o~zѧԙ]V YQHnʮ^E 1x wh:BtIn6t:rZv`I=uW+wBO}uf 9g (yy׫)0'uNik aFEҧW+ :G=7eY#h_1I÷L?}a٨Nžٜ:W, I#`Y  Ag@(.Kx~^ K8t" jj-V鈊wBOCdjOE6l_uYAFǏW]%#g9mXvp?c' 9͚M=Z;*QL9S'b4&//<Ξ=*;k4z=&`1LZ YZl6nt:eW$;d'aUgN{b4tMTT=m=R(ʅEC)"+ Dtq6~{bwZNt^p}2 hZL&z0ԟ-ȤT\W+j壛?J/ 'O}+UkZj i)TԐ[p*))/tqKkZf]t:QaFGԳ4:t'NP^^5@$TGV3`2HNNfС 0} --ӧOsrss),,T/\nmxH"YRjyɶmHJJ"!!AXFh-6M=͖#2%%%Gnn.~_C޽IJJO>DF6?/\9ּo y\U4gKZ_xh(Cw)~jX ͚=r\Jmm-w- ǎرcF ĸqHHHhS=iuW!P k׮ ,,C2t6" ApoEP|222ȠyqW,S\\Lzz:zR_ ^w:k𨇹-VU5{*iqi)>h''<|{^"ba۶mر$ ɄhD#I:V$I8խbp8jr<Ȁ޽{K~F)//gÆ >|Yl߾۷ԩS6l_ P4aѸrv;wfݗogڵۗSҧO>U}1.&IFQ,ՊnWc<, v{b l̙3[`![EwuV***::Ad?NNN] f3**++{B1++,ƍGݠ:vg{1E&NqAoueI6lݧIh,[ ُ6[֟Yʱ+&M6;B ( ˜SLa e IDATɤqqRRRpuuV㬮 ***?eR0ЀZfڴiǣP(ԩSx{{3b?NLL r}xD w:ϗJaa!<DOV֯_INNh4O{,u9x&>dV5O?: 7G7$~(x9~ypIc...1%ёFqq1/_ٳ1m4-[Fuu5'Orƌ999a6%Fa׮]9sɄZfܸq8;;KnV0x`j5锖Jh48py8x1LFxx8aaa"$$r׿EHHӦM'? Ӝ8qBݥLDD ^OAAd2ƎɓnOmm-㩩!##ZSS֭[yG{uA"nP믿n'2j54:DFFRTTDJJ gΜ&+V$.\@nn.>>>J+8;;cX(++#??ẑ d2q;FYYdffJ}h4ƎKEEYYY6Š٣{9r$;v-[P]]9ŅhRSSܚd2(6%oOsҾVemkq->˚kx{}~'N}:&LՕ{7 !88X^Oyy9TWWEBB锔Hc!55Ur=!4vK%~l2rbcc%+Vjj\^^ׯ'""iӦxb̙Ù3gpG͍}B~~> IPWWǾ}8ujƌ'мfMy[SSZۛ+Wؘ]j]HKKcȐ!>?|UpxgtɊ1+X{`-M& lcV ڱzj~Cx;̍ˌ3&i_&1lذN'bm 9‘#GڝVG[JEee% :Ze̘Bۻ%KL&#::Z*6 HnGV7ˣLڗ9˔V5`,+EEEV),,Ltt4QQQJ 4*ɉPBCC%A(ˉh4;wN nihB+HKK9d2puu7nr7j(.^/R1c={6lݺ AAAӓ(I9880bm>}GB':V!1|#&=16[}BwQ _?LS}WdsruurmX~^ <<<$ Fٌ`@R{% s:;;#[d#h+;k2f3J>JE}}=Ťpy, nnnDGGh]#d2JfȐ!8qBJl'x'xɓ''t^ \*p PU(w} ºB~|>Z:m th][[KAA2i$233IOO???FbLu...{֎ '{mۮD\.RdԐ/M]\\ D#ݣbv}) ,dЕ R)݇('%&-Z F;ڮv'hL&ϯB1 x{{,e/qssrTgR[$XApqq lll6|bb"r#GpyrssmRA =Θ; ٿ 3U?~v`S&6\! Y:tu6^LLqUk׾gɒ%RrhmFMM RaT*jjjDW_I.Lnnn(`SB[Z/Xﷵiuu5N-=܃/'Nܹsg-wwڤh4\tI* Pt2~!%ݦ9/Ƕ|u:+Z|pttdҤIq)._3888jILL9d J??._&Jb{G}+WOJ.\AoP<- iii=.'h%&bL¢ў&bAYY`²fKӓy摐RjAR#G2{l jPTTFR/٣IիW۹;fn͚5lذTK-[ƍٳg'N`޽<#̞=z} ~<<:QIh'{zFNj?fg`\['U;f.^HDDaaaDEEQYYzϣP(1bSL,8p=z4\!88RK:u*wGJշ[?jkk|rtݳ킈U$ >\SPP@AAT*1͌5)SV1͜?hnnnR=/r6Ⱦ}8q"3gd̙$%%q  44W@DjjMmL o%3N舶.։5[Jhh(%%%dee/S[[KHH?СCo>qsscСSSSC]]?˖/SN離#GLyy9!!!G~~~Y fŊ[$%%aMjj* !XXT)}^9N4W g<QTTDyy5slhŋ1h "##INNf֬YTTTCbb"'Nٳg9pjƏo# qpp@&7fB=ʑ#GHHH 11iӦq;ƩS#,,L5 * EC gE.@?F: Ν;GDDDFF)))!''^waa!'##www &eB^L&۷KKӧOKPɅKדaXњuֱn:i槣* mۨUYǟ{ɨymJl}K.d⣏>jСht \z N`]܈$??r!ƎKUUFGGG)**vQ/\, yyyiii{R&*++ټy3RAXL&˫!22p x"OfСL6 &cΞ=Kyy9^^^X,jjjV_y3HM&0h )))Ir=tl۶+W͓Hl6iSСC ??}qA&L@bb"~;{*jJ%6i4{`وe MztF[/ozX'K>acװɳo՚jU*888H  mRvф'eē vAV;LZMMMdgg#h4/鲯WRXXH`` AAAP(   ",,R^ʥKrqqbtEBC`TZ.}+VN#==RIuu5k֬&M5f#((儆vz gggbbbضLii)ݦHxx8>>>S\\L&ڊկ@p騟JBhQ4YDyG w3~\,}9sU?M]\\-#F#UUU킒fBRcL&L&f-NNN <ry1_׊J,*EEEd2Q[[+ \$\\\pttDP@cc# B*g&z="""q`#'%S"22;|XW!gggIxXZ(J   bFiRJ" 4Μ9cxA 1 p  >)=.qR8a&~4ɀI#<{;JŨQ^[,.z\.hez,",,\JJJE:@]@R1h |}}[BC`[݄Ec:[6lPUU ׺v#ppp 44NQuݻ1u]}<:wO 4gZ{Zȍ뷽Ϋ^+؟yygGl0Fju OOO|||ѱv3U*H}KKK70mfbF`O!KjtBhtGGV݉GS^^Nyyd)x{{VQkfRӱw^~oD/Loˌ3HOO瞓{7P*^Z:;0vXf͚,\5;>>R`hZ]1pppEr=rss˫W ^F[r3HuA<Ʌ3ҧn!.1ӓdEjTUUI/NzL&OOOT*>>>lhˉ'~C1i$&%%aÆ1dvލlF.jl6c0P*455˲ez4*>7}@>B򅜜̘1cOr}Dqdőn]ƏۥBJ%][Ecc#.\Yh% }DFFXt),,X(L&iii6@`pB#۽KKK{(, 6.]5\]]۹G,ZcJGmCll,{EQ^^Nhh(&Bz=,]/[Lr';~8,\\LDze؞zC=JUψck՞_T_ _򼄄(++F%66WH7BYYjvmZ$>> .H2331Lv- .]j#!!aF$*d1ѵ!QT nU&-- a6IOOQO|G{.Tg uWPP>`;d"''$233z=yyycbbx/U?#RTj|}};,טf4 eeeTTTt#<<xfzu M=|Rl_-k@ *//|t:(--@R|r)}W8;;3{l… y:m̌3_xqaÆ~nߚZ,[+&-M&^ʱ+zXėveN}Θ;7󂂂~9vX %%%;5 NNNRkRL8! IDATL&c41  z= hZt=swYvLL O=۶m#77֒^^^xyy鉋ug{jll2B`ʔ)̜9SXdvCwOnn.?IMMeݺu9ooomFEEbHqrJ%===dX555h4ncbb{{~޼y=jː!Czܯ@ b>OrpzQǓ;#˺wpp`֬YL0Çsv k\bxHÊ+8woaESZZ*śrIdY?CL&պQOd2o޼vy{B uJgĺuSO=_|!XjGرchfnSb-e899"[U}455IoZƚ>>>̝;aú=T],~$4PUߦoF^}p$N |ϟό38w)))жb:S<==9r$F 2Fŋ9z(]c6%o( aT}%P8)>|z Z͡Ckё3f0qD8y$Z>f3Z6}Ehh( 6L7cE6M=zj]ƙ3zY4d<{ŅD#++,rssݻ___ԽAPJTTQQQBt$$$0f t݊DnN'E[[vmzBC`wJ!*/TUU+sN~m/_9̜9iӦ˗̼ᙨ\Hȑ#IO~¿ )oJڅD?5ѣ4555RQQ!YLX,rɍ#7I\/}%yLU}-d2)Ƽy󨭭%''\rssTxxyyV """tS:Y\(˕Mz}|/Bh쎢"i;#6>^z%Kpʕ^kptt$>>xL&1??K"dIer6|7+*+V 22r@o(w/pi?/dVeYȀ"oy].>DzGd^ !hmqNiZy}~_3[:Z\NPP4茗_~~_tg^i(wQ Ŷ,_ɓ'SUU̖-[XbE}˜]LQDŽB'C‡g>`7pR8]3XPȆ%6>,?8|V… w4cfϞ=CAVsֵdvubaݺu5~|Odd$|!!!888-ȯ~+9y$~!ovU5 $''Hnn.G^ÃvC-rdkO *vfFOJ-uMnswIn+wOf222$KUYK@¢!+ j[AAVY9x O=(?8+W?'$%%b quuۻcgrrdQ`0pA"""!!!8pB.CKOee%?x>uء+KOvv62yr K@%+V_ýn DII ˗/dرîE4NThmeי///1LұJ}@uYY6mojGՏX'O]!vqqAR 2e o&۷|/3(sH,lK6#~ޛ^ 濏5kDjj*_~%7nGa޽@sAGy7vؿ``Ȑ!7L455Idee%z>>ܹիW9~8&L 88 t:FQr3hllDTJ /__ߚ"0RLd 4ɥ>ܑCJ7@{ y@PR6z BRR6l܇RSSٰaIIIo0=z4hZƍyg믿wCmm-펵L0x>x~G9|0O>$<҂Ƒ#Gsqpp ::&KE߈A ,]tR>)GRTWtg{lclJհ4aݺu[Nj{{okٳgw^.\kidf̘ŋYxͱaÆIv[{nxv5kHb/dɒ%̞=^O`` 6mbӦM+Rj_ϕ+Wlb;eĊU+tX3=|$4(>˦@YzθW,% %4[nFwʦMOoz=prp; 7;K.7/&ȨQ?@!Z ``~ƌÙ3g8{,k׮^=kt"_;co4Vʊ1+~ܟ`ɩ?<<űjeٟX 7 <ҟf>C㥚K.ě__^[4sH1?f>Nz[}ǦFJY 9kl6ߔ ^Ofn\)FCjxA#!"MkBٳL4SzXBO][on-)WjPѹBht3f`ΝDŽ H"^BC`UgDzG ]I&1k,Μ9ɓl<@O]Vh$&j,kqڞGo&NοIo{t]sS QsoRa }^wɰa8s ϟg(h`}z}bK PZiq5G/8Wr蚅 rСrZDޟ_d%Bh/**b%iq-+W[j]u螻ÇSSӳh]3>C z" wwFw/"ggEYi@L&cș39,`@>__iVu_{RJ߲2Xi{!4/ܧ-AhuF2DM }!`i6#5O?C|2˗/k݊u `R$iHޑ@0(d i)viLQ-̿;J/tVqS^^R^O !4NrYhHܗ\xiӦ׿۷~z<==Z*SMO& mi}P!4:yg;6Oti}wuG32Cg>A uu q_jYv-s{ԩSL0OqPQY?>9g)se)*.%&&ww5m4Ţșb>%u#??ko_+?ʦMu$4^Ha?03rëxo K{v+fb흶䥑M\/\'@_ג'CB֜PzbpBd2ÇyvJN'%ϊ`X!cg?TJ fj NL/o=ՏXXi_O neܕM̄gВ^hW1Tp#2زZh2-2waU[ L{ccBiC)oz?FףT*))-g]-ČgWv\N[Ǟgn>ߘ2ie\F7{Ō\.G&!d,J<w>qwK :ۮlc݂u9 m hցJ=ïcSOlߎQzL^ܡC|G-Լ?sMK-Vm?n*/%L'f$Y"~U}^ݟZ IDATFccފ@0,] mW5Vq:h*IMM`{?IAh.6Z.&~,‡ٳcuuueΜ9R 9X̦߫hb!s'_A޼v\gn. ( OI A k/$Xsۏ=7SWqS|)7A#qT:{ &!q:zCZm46r(Ѝ-`@I}OyQ90<9;@<> \s'ލ@Ђtii{\sRS|tYX,ݹ[/AgG%n,2cÛ>Ψ70`{ B t |dӓeM6%eIMȒl!j0c "h,&Y`}=H;gssG70ذՊQ1ڟ)¢W7ifhVoڔ'*(@I,4=K3qP@Ī-l޺]mJ]"1qmi:|lv;9yٮrc珟򽿰{X֣ldƘѣٸq#$2<+oz•2GC!`nv!nٻ$qaҎ!6`ƒةKnXD5?Mߏ&;.>3g A΄1 pHmٻm\Wmbe) ߶gN9_3b@WVSXV4j5Ybż˻u4YfNҭ鴰U,k,u9FK(LgW0L_),b]% u\~nץk}͂+q:%l}'3dL ԏ 7}rb S%iܱȎRz"aqWW~)KUD  (VS$8q]| &:S^ZC1:shC"Eh(]-{z%IbYŋ& (fZM>}1`gJ+Y BnͫVp셅d:\DPx8 ˳rs0jssƐJ'uHx$Ne` Bo0պC$C/^{C^}b2AӡVk(,Dn`'xuz `rq$u SO Y>aϾCpH*,Qhu9D^énKpjp3ҼY)Sunx>jN?Nb "6=\Ohh8I5̳[ޝM^yoOѻc?;漌e&urNaASF@VdX=?eCEQLx BҖjB ;m ! ٬Y9"8YίR ǎ eJF%U+Z$'~Aq'>Ⱦ45)HTH.g{+{nۆVح/AU{--3/V_MAhD ɻ3z&!4Ng0{p`BT2w,@^xɜ-йu+ޏΨT*wViFl\"~ ٹe-nO>~/Vi>aԖfBSD0(v8W9qp{@-d=Ԋ=kp\d*2.}@KW٪BNQY*H#VXv]ͽu,^k=?i,?@k|O [Qe6Gӭy7سߕʲUa0#45Ƃ=-wXcSxD4 ߎo]QeU(2oDzD$t6 jFQ:4uI!~>!p)4Y;ܓVBVEdk||~^=2s|tfFKHIqN @;BɆ8ń&`F' WSX\@kNn?}9Fqe CPIZ$š~M2d"1%7tS|uY=?`"mblW2*E3SPPht~KM~ "wg:3|$ȯIU!'Ν`DEEUsfc҄5uJEZ7kGkui/緇kT)(4&GJ6,rĚTKנYv+v Vhe+=t?̃OLvzQs ,Y(]{JhnxȣQ+8+xgh% w/(^ځ\E S Rz%'ӽ];40b{*ԵȪ6#c]Zn78dr,K#Z 1 V2JْY_|_}Q}ӗ62]N`ػBZUu+II:vQ%M1fن\+lPFLl<o_1˗g1fm7Xsr]K!M^Ԫ=S΢|l2xh ;SUM^~s%]HT?xϯXaP)Cߠh(4:OG#pr0,m#ѦS'_'uTw_ "ܱNI߹F˯}ŠRi/nytZ5-"Cү=/>r#!A|t K. —ӕP˯[@Kj$IL'.5 +jt$猐?yOw~*1/<|#A:ǕJ͐pσaӖ~[5 cX4 tAa* Zs(AzĽ$NxR!cTR qiD*(Eh(4*\Dj'2:fY;v,smHDllT]pd`N<͇@ej(NE|$Ilӯkj}Wj#}Crw)_ OAѦl#TީZS3?.XʡИ|Wbf1m4N_/21FGn u+Uӵ5p$y)[X:kj]nWM]1ĿnN^]o-RUOAxMT\BE[L?k/Eh(4*Fxa0* pnpb!26c۶^#J2BR c0*ާy")'{SNt^? ~.Hmq¥S5$W^SP}cG5H`#}Aw#¥n3 3ycM9žçYn/^1&RqԴ!a7`40o⳴yl"XU?͈ɘZ\fW/kSZ %uDήr]zC(ۛ)qJy϶g1)\](BCQYyl%]/$"P.Mߡ܀>}c<>VpQX-ݹӯi2И2Y[JCZF2t EԵ\fNI+E6RShZCK\SV;ZxRF+>k}#ricf;Uf( s+g_vtR>[:'cjXQ\yvU)2A IB}VdA.Ш8e\]DFUv\5O%j\^M" Brqf~6X Z&z!>^-%vS2X(tjVvs4t|.2QDkNqAnP+s״*X.%WcѣՅ"4VBUHO{ޣg\Z.fM&,{N]zYJAxx%2i#@ώct/v *5/($˕)(8/;,Thr4}?ElPi/71GV9WJ禟>hjߌJdVvZȨnE3֍'HuTk)F\UQi FcD{C}Ixkݮt3ӧA ړ>U*;*qƉӧqaY@لbAr+KCSTh8].9Át6L%˟umZUqh}ufN)(է.yq(*(6i AmLߖ1]b/$6*~h"C$U;^^FĵlYNA!xxj#ZS9ͣ"jA-߇q.w5|-\\8cQ0ވغ]7!1 P\3"smKDvLTNQncw *'!|$LB BJv)SV AˤMU94shhŀ\6f͎M5wqeFoj[T9{:QKwO䶑= 78DQı욳U}OA!P KF%לXX&,೸5}]fEؗ_hVQVk嚾 4wϗD>*"CNsi%<{!i9n~~k3wB'"/eWIsvzAa6}3 Eh(4 +_ΨȨym9y3֯g ظ6q1t@T\OV:u< $1j(Qd1Rhj>Kq(٤gØ<J'%.p<[rz寴t5m96o~ИX/L؞O=Gq4ȌIY&l׹hSvfW~iOL8o~}3ņ Ap9=)g IA;^»_Pk#2A Bec[Ǝ Q؛͹t77)\=m%nzEaVVzaIajJ].݈(?T ys"ZՋʮwbUw~.\N֭+_Ѩpa2VjY׮r $QPTE6:N,ٟ554#%>?طk #>3#uӣܴ&by|G<79QA! OwBxxX3f<73G kvjo $EEY8>BU<칐^!;Iqьܝ֭byG }C߱Y?S}&2<{n&;u0IhA8rϓwd;9{WJdxuۂLW!u:.z=܃FwEe14o"u(n_3qDbF˅R5Ě]gn.6KݎVe]wѿnߧ~o~\Lnզ͓S ~T*U(> JaMhzwiCΝۉswq~ ]Xh3kp('rNX*eܻz߶aGj8V!~2u!TӼ^BF#"q:1I6lGF'Ag3wRxL9Dd^ʫLEK)NPCkVe}Uaq2n8vW)\](S ~g展%;LIr#!~?_>7Wp% ƹsL@(d pOY]瞌9Ǐ4}tbI6g-6U+ IDAT(*(en]}6j5IImv q\飙A(ItfM.KEZm: Io]]SQ9uzґcL_$+Eh(]HMǶ[ ]/2j?||f6j>¡eĵiKS͌32d'/m,I'wPt!PUP9#Gʯ/.=}{۷oq 鯻M+٠Թ@\.7KQhmtQfģ"CFS-η܍/6P+iS[1cs?ɼx""qeWsAӍ2TUj]ƩS /({'`d\ZvxhUZy4+`UP5[6ti9}jŊپע75ԍ%T,m.TN':6_~;TJҦ|!2DyzU,8e?/)/6P+ q*|O?{> T~]@Ub4D%9r=5-pu-3tjl6SN宻bȑ\p8==ep& ˯W㻜ؼy3&Mbwˣ+( 1#_?y=M'OZfܩ15GmZzW" P[Q97T佼řBF ~#0gcƔ<==~Y^_H*)WsqzVVs1 bRܒ~!EΟ1cGf̘1XV^Etb fF|k _o<"gQȞ={馛/ e4:b5d;sYY۽1<+?A kdzQdwfdᑅ3"4/)ȓMS v?o¨!Hj^/Rj5b~?pV(&.s>h+zIrq!+QŇ$IÙ6ztIl62sLzѣ WHG>)3 >gHD9ml>9rƌ|Q3r(Qb:ǂ\.f}\EQ%וƼ.^מMhu),,dʕ^w= */c!-+ퟱf237zLj|;`Zyg0D7g$.]@V7y(NպoͽsY{.[8Զ\7> 6}?Oڵk7_Usv^fKTxSN1b^z%ndrV^dnr/eL\ V^TB{C霁U9_asr.f`<d!Ç#B@F5Pa4;U3nDv;"H-i4p|ڛK[gw8qS. ~!ǖ-n_߿z͛7Rl6;dL,juB :b,3r_٬,:wLBBB֛.1Wypf2!"+4\Ϟ$=-̌$Ƶ "8K1|$J8$W-DF$<2łh`4t9N T>N&)9mիg%bcL<}|W_R~aUe!PPR?]Xy|%/y9dff2bq2>iO=o~@XT,-b ,Hv^ܙNKѳS3.v|9Vת{nMۧ>Ʉ`n!\u.R8,Q Ltd(wqo6^y֭[ͧc|^ΥU!_eT3P:C)pk0t]'iId*ɲDZJD%H8\(vsL~1G.:I.8wo6Vb}%F'FFBD ~C?p9Ы ĉ5k[ uױyJͨ 6 KOg֨Օ UW1́4  A&2$IrQ`I9yJ[MNNcOG_g@0Pkci"zlaȑj> GѶ}wU= p,+}䴑ATΦpvKݠjU"& }j!88nA&WJ⫯b„ l`dnϭ|mV*>+!CնJ= ]P@leȑL<'|s2!G滟oj$ ɂJaa!ёMLt8a˷󧿾M#Ilץ 7~"BGq1d%-H$*$ɍPZNEb ^K6m[hӦl(<3,]Q| >ٳqYÊ*)rwu$)uؑI&y&v]j6I`9 .($]zTdG̘1u/r缒SípU"4¼ףGcZxڵ+'N17@Aa!VĢaЪTD˅$nV\ VeĈ :y !!5\CVnn9Ϧ-۱rID%&AGhh(zbРADDD0vX4?-^ͫKDx&3j\2rx"G1)DZn͹sʢ"N'(DDDKvС}ׯZտ~ǽ?ueJhGtl:A˽@FI>@Hݓu;ÉI"j0P q\t:j5h߾=ѕ>qD^y6nH+=mۖ3f|rnK>p_zу޽{ ԅ?]BbRE]*Tpت)[n¹sx"t:hZ QQQѪU+DQ}3M75+U/0x4|ʌFo~DFV6}'loɱ(yD 0D-[Ϯ] GaٲesnzIbb"aaaȑ#;w˅` ,,(ZjUA n/^sTzY98b,&fё!Da17n*,,dQLbbىCY86_9Ȍ8% +ӃE3%s >L-gM<)V^p0~x|N8AVVnBXXU$/ges!.-zb,LLL wu~[Jv֪z!9s?`zPrrrΦ`hnٳYޯI@-@@No߾7Ro~mΝ _J "!$هy.rd?s ?=`$  K/Щtm;LL7|SHJJ")) EVFHHXq p}Y,Xڵkr0Z$J#sdd$_=]w]k~~>k֬Gˉ: "A3f4]oƁ8~,BCCiٲ%={W^Җhd/ sW2Syȱ "r#l޼~+ѣ=z4P2p8tge>w|;>k +|j=KSrw׻bBC?ֽ]dfn un7<999,XV錾b1aO}$_O>}hx駙9s&ͫ жm[ڶm[ Χ~J=xY*ܸ9_p5הRD W. >%Ϟc 'GδiH~l޽{}a^cㅍяoNgLB霡|G>Oi W0P)?Fݎӧ7Tz9?,/_^ >!// iկpW&xUXVQ ^Ea2v_ Og뙭.OO2sL_< r7y<Vv W;P .|'=0x7xþDRkO#IJöv4fIҐ%Q,`{PO]7D~oU۫bL+*)HGa\}@EEz7[:ȇ[? y/_NTT_/ j |N>^'V3FS&ūC_ɾO"M3( ǖ T l=d NSmKd'7Uh&unjt 7N^/؝v<6 5Z~^x.]JVje= !4A>jK.e展*cҥ?~W&69&w܉*@D`bܸJf`<;2X{n-y4ħ~hSZƵW7E$dI^` 7$w@ɤp_ e˖#h"}Vnc!%[N *N;ի\C=j/ yoۙmFJE &#?4s!8; j/wJh믿ΫJAAAMj],0Cjm۔Oʢ_L?X!^͢7ndt'e66 %x>іҝ SNO?DZZmlxmaG0%iˎ. E WPh0tE%;<*/nk:w̐!CxmUM}X}j Z'uodΝ6KAN|@ /% (si< ( o%==V6]:uc xO0Vu'$+egpܒ~ALR iqwooڜf|uW۾1c7`4h4#&&;^a6y}e@&z'Odȑ[;W&^Lh?<{-~G/)U2¸{ywx뭷|hmGeŊ$'jnl{csǺu$ Fl]vk.`P'}F27-ӦMSN<$qFv܉V2^բjQDDDЩS'Q_rXf {fѶm[LR999,^Z-: c֚ya -+-,-UΒ%Kyg+Iʸ۽\(3˟vY\o_>NN#99Kx>3zNvźu(((ytM诸֬Y#NGDD}Wݩp z… bm=X<̽-c?h4T*z=-ZO>DFFt?K/Ĝ9sjIذa;v젰6mлwJgݺujt:&~Ѯ];_(**"99~ l6~WN:k/j=g>갖Y&Xd iii=z &`ۙ5kjZn%$$гgOxÓ<A ecDZqV^M~~>aaa<쳈bI&$I9s-[`PzM6mFhh(_}&LBRѢE*1FC||*A'7~qթ"~WN4L@h4) gxpzVg\.t5\S벛7oNcMJJ {{Ϳo7 γR[ΔS';ŋ˫V\^Oѣ*e˖76.kN>ͯŋ W^:u*~-&MS `61 (;vOwp9 h"N< @i۶mKnwzAe\шhDRFnn-4~a>6mDxx8+V6#l6c4EIjˉ|͛ǹs爈gϞ>iłlF2ˇj"4. 挛ǿ Xbҥ ^{ma00 lٕ'(p b(K`ѢEdeeB^$aQaBAA?FuVa(L6)SеkW:uTc `v#InJ%STh(BC(K]r%{tҵkZt;f *1LX,ArvQv_B@p\ >I&sѵkWTjP"0f7OYRp8|rˉ3g2rH:tA ((HNz_AiޞѡCY,F# IRkD K^ʨG IDATshZwNǎQN Tq 4IdGkL cƌ|Hqyt:juek=cԩw},Zn^q)REh(Ƚ\"B vʊ e|+O4Le qwTl)v]WCJ'.K_AjcOVh};)"(hZ<5ͨ%Mȷ~g}'Q[ѴQ UY`t:$v1vR=>hDj'e{=iSKЪ2EVk+K+ VRɱVdŇGxx7Pn oXSn-߃h4ɑEICǞHSt>A()/F\.W*\x7=ËeτQ̥T6oZt)=:TY^j4w:ʢ)Tө;ۍfj6hGKt==An_Mji\.W=H<6LJg(BCJԩR~.^$IU\Jϻ+O:L}UXXX|E< z  7h\-=S3zt:+}!BSl,{v....gPF ͕Ḛ$??oBePA^>;u|:w|EC{%Yd;WՖ6U/$QTT$,z"4*I, =|/՜ -ƼS$IP^v+Sr{޽j"{o"U_9 ީSޝ# ( ِ$̨қ^Qz(b Inn/BU(BCL7ɶ$Icو,\JIU5EQl6Ǿh7 oɓF٣k0{/?W>(S 3uZx h*S7=ޟ[V,|eGUzU&?BE ez{E * .M d J){ٻA=::Дj)JU&zwZ"u)4'/{7=@ZwQiS2%I%BХwHADXV뺖EAWן.MPRI:IHe2}xsf&dZ&纼<3<o);{4f^Z^el>iP`jext:T*Ŝ=hX+{K2YHGts2Mu,w̿CYNN:Ŵ&r}p-Yugc! hD]]mLj3&ӺѶw^V;W\) ?| {lX]h45#NR0 lYwŶ:,oO ug)V˶|>O9mEm==PLl3.y\&k7Z7t-w,zR>疛9Z/j=IuuIge~>0[*= x<d2Y7dyYYQ__-C```?gN2+4̆G>>>6ըLYM˲t:qh>{h4圫̰3C{0Lh4/MYVch[L&B(BӱW4ەJk7H(N+ӸhZvNCCC{_gԝFL&L&^V0:Ma:#D"յZ-;m0^v?R Vs" 9z=i1W*++!  $snc~t$AF'njolldLd <:{P]] C̘αe7e())^Ghh(X,F`` |>*++=zN!b0V@L!v[~zަs,s;b9t 4 J%{uIWkkkt:w@ 466{=0 uFqEENg919םVLps4$ QPP`#<<9{i90ˇ21f???f&cOje`Bbd2*^h2xsz=~GHR#,, g1ë/@GF'gY}6222B6,іH$H$Φi߸XCvv6:s@@ҬYF#1w\!##UUU3gd|wHc0BBNe:~~~(--mgy߲0!(([.Vz!x˺K/7oرcz*@&!<<'NtGrVf$އNr/ݻ7qY?~ GLL 8̝e M*b̙ٳgQ]];wAAA>}C2:fĎw c=^ ұYwp%?~RlK.l3;Uk|>˗/Gqq1>7n$ Ñلt:]Uf0Ѐ8KPPMmޕg@,3{dL>'NdTWWm;|asT*iӦq:r9rSu i hj5bbbзo_ܺu 0 G^^&O=4zW&,P(sAd2ARv=`T*v85`"""lמ w@2χ? /mt:rash"}gmEW471Axx88{k8b8`4l߾u0}vT*s1gl<;e]brdBzz:!5v1n؈PwãPNN+${P1;YU[{ð:˺s/.\@ll?,㡶s)^qqq2eJLhKSVV'hKq$&&:TcR*af8Zmm-6oތW^y .:Y_hhh`W7iMAzڵ +W|z˺xJX,T*H$j 9=zxeDN:T*m/رc}Yŵ؄8NsHCII ̙pJFnr:T*ؾ};^x̓4+???466ô-Z ZkP(䬇 eeeDlT w"%%ovvr9R)PVVlںkllĮ]0gǡ)& uuu`BFL&zXkܾ}iiixwbl:]>rFNNhę3g HK/ Pwy'NK"@.CAV& }6.ϟGmm-{=ߝ]}}=F#`4Nfpa~&H$q|@VCTBӁC&  `0`ժUN)?!a.h4(D"HRHRjQjo>1Gvqu닀vw(Jd2khĹsPWWիW{9d2׳;3W}}}m-եFݻ#G:sQ X,FUUL&D";\Eb޽:t(ϟH$^gox<CѠZ|>~~~H$hDff&***f>L~h4سgFc:8 |>rb?JOXbOdhhh@MM ;[]K/N_~x駝Yl`<0rD"Bx{'r/xsE:6߶H$Bhh(L}}=B!d2ݠNd7p^+w.BϷ59Ů]:|.C*B({A55l^]L&\t o{gw[ZV*BT55r VZuB1D"mDd2d2ҥKѭ[7IR,Y7o9KGxOP;PTT{M`B\^AVs5(ѣqI7tdq=a]㜜dggwO`֭?~}PPPg^ÇCVW^yݻwm 4L&]GwTAgϞXz5Amktػw/r9V\IAwk׮qcjt βRď?ÇcܹN)Xd ~vu:sѣhhhEKDRRyĉok-벶[nŌ3裏:+ .DFF>ٓ^]]݋szP1p@vFii)pMzzT*`0.&Rrr2ƌm۶!;; III-&-6`0 55<ӟ\Qd!h" ۷iiiիW/SwHOO4YkH$v:t7oބJb+ A.h4"%%ݻw7x:#QFaԨQj8~8222h BPvH3T*88iiiΆV^gWjl@oRaaa5kVT*txB]ʕ+8}4`08ˊ666bϞ=2e 쎢{@3g08w1\pZ]000]ܹs »{.LfJ۷QQQ`ܹAK$aԩ:u*snBNN{mCCO=zĞ'00ӦMkqN畫ӇݠS#;;/^d?z]jTaʕ m_N%%%xGRv\R &Ci@ĸ(^ BHkBvx+i;уc:E!Bq8 4!aԩvI8qPTx衇p syx<8qyXH$?v!B!:Z+WD||<|}}ѷo_㏮: l2lܸf 6`ܹ<lذ}YYYf;w.N8bl۶ _|v!ȑƦMXx8@8qYn He˖αFd|Xh.^躃qٳgǏgkll9ŋcҔ6m¼y8s^ƌnݺ!((ݻw\.G^^ˎܻwrN*9f͚יvd݋7x| ޽m۶!22B< ġKdd$V^Enα\.Ǻu0l0( <3HJJ]z<$H0o<_o׮]P(3f {_||[׀*Qtt4Ν >'M766__|3_|1a>|ة4:d2᧟~¯LdffbժU.99( YL96 X`$ :mڴ cǎV@@&LXpai2p-gpd'@bb Yw/vލ#G`ƌ1cΝ;c:x DDD 22oc r"Ͳl\&Oޚ IDATcǎ_z)Z brgĜJKK[9Ϸy`0`ѢEO?i7[d ֬Y\nyk׮+]rw}3f 00)))o駟:脴WbǎvE=#_'&&"55۶mTWq> Ghh(x< JL<ƥjoh4bҥ{.Z gߣDGGcHOOǓO>󢢢es?ǁx;؄#{èj5QXX퉳_"zl=1/_kbر00sLgx( 4^^``ZP(%9.**͛3szgee5;L8#Kb֬Y{Ν;6Sw* ;v  Ho>ܹpf=N{g5kW^AϞ=!O4ݽ?ey) G}>i&X֭sxy=M{;&-b4my4[9 J쒮yQHpד8s7`ʕh4HHH1ag٣|}WꫯZ6ұP"Z֍%e͚5XfEH8{DsNK #:x~}R)NwsW~ 4ZjgPy2\[vp_`ĝbv|lKÐK!ފ8mS^UvTxK(pK79whhu:li?f_qFr _O=K p4,*kj]B؟t3'"$'w/]{BBn.gs#bz$NQثP T"AxhC ]\–'`7 4ލhIB\,7VNOo#ҳl kzj -Q&bxNL& 57uoEw;͞,&2 ,z3Zn } ťP6`4$#'RSM==RX wwqp[ zu8kΤkOqoq< 4dp^eUWRrlwEztsfO/R,9 IR>>YQU,z.{e9;W:ѥe(™,}SǎS#72?XkTq<<.^AiE4:-dу`@R",۞;woֳ8 Gq5Wnx⑇%(NLFj%(Ç`%[w q7y`>|oBR!,$G!m+]%cR3{͛[p%&J**ۃ$ŧdo58k7PRQ V b"c4,xz݃~yiC(Wn=yB>әuaLx@w_XV=vU;%IµWc6Wh*3vCPt NGai2._[./ixlXrjb[^}'Nc㗭Ώsz%Dލ 7گh:Mҡ9Sm]3`g\}wno_[߀9ȾA|l7_J%{_ƕkȽsXȗWbރPkxt&[][7 K˰q>+.-u9WRu@Umz}.^i1i԰6F\~'g20=yLǠ;M]{FKWq9&g:N4Z-.6=: _cG>{;ӱsݢ":52w"=ф\ͻ>={ H%$+8k8Nı#bq8x*yNiYؿѪNFChF~x٥_=j`+ R$uP 2u WڜlNZi}Ogd<Ϧ>~1Z[~R e~y֐)UuDh$Yg\ je2\ÿ~‘r.SƌniA:v$n-m_]W/k,vt:=r ?ge|w6=z5uL߲5ߢG=LOK9`*\& oiwz,3f_懳ٗq&ǩ;N_;" ́KAQ ʫ}/'ϰ Cvs: #K9y(rkI0C[c35^[1I˫8#55od ze#|Ec;mU왞<;\&;ֿ9N%q>~e[y DVW}W}7,?n ̚Ժ!^֨RX!寽n}{Rrg[m0$k_M0[vQiգc=VoՁrcfnFX(p3M(r2Ieg?! Ee묵8)O?g\Ziiy^bX:ɺI2ԓէv< q 0qdtV<3/׋WPP\oOQ 8 G&q\<7Z9wpui%sPNW*þ38RQŭ_:(7$Gǝ`T'j"[^m^5HV1T*u>3{\ckmNuz={XԐ-g׉U0UV *]Br&wȺ<6q'IZoll2þCy$#8%qqU$5fy[>{ekM)ך4j8eI+5#cGaSKܷ$9wɵq1ȱش&B6&֪Kcw]>nPrvmnX߶xAiL$bq3X!ucM<,*?<ذp$, MkmN}B|6ذ&8NTj ;N;嫜Tl16CRH |̞2_o18qKv )(.Eb+wd$8rmI,NX\h4nI a`)UHpGod†]{٫2__[eЛQ!f$ť((2_bnxg:qĀ~4WR&̻UprB  f: 7ns^i=vd?w >|"U1, {h's*%P5Z̘0@K;dhxD٬R{.!l6%[έ;ǙurĜt-hĿ~ɮ&3dA{O{ -;c.*H%q',Lb֤dMO#`7p{S˄ ıR:dM57ϙǮ{bd q,Ab"ñG͋ [ҧ!Zxu<=c*WRJ&4ypffqZQ;dK'gHYYpHE(*FͭalFO<z%`H˼.z:# @dX(<ʫqqmq n[ӉÇlw.Y<qΨF0fpKJtU<6q\>N ӳ;e7m2)\Iy%^|zxrGa8?ywJ7:Eeȸ|WroKA}U{`fC'Õ\'pI6ıIN)l50',~mxz#*p&39=q[{4y מOBNa^ד#@.Gz8Ʉ%8x*sh/ƿd|h֣./GgG=h/7o= oD>>xslܽ^l~2 x~~Xx>mnf!}W"O؂aCp%M& b;ݎm/ŨAqtz7f@;dbOD닗~dPT6vxth%; Ǧqvu 1q%/Q'WPC&2ooRR1vx<u;QEu-w9=&IOOqEpN¢%NıG5 < h䖋s~UunSs8KנShƵ7l(bƪcȡ T>_=bq5"xbP4/-]omH>ꔴ;5h^Y=b Dz%=YG׉jS-gYPtADhvqy YF*HAF_TcI͟>=bi.b}6"Ga8y2V=%8`$GY؞1Crn3 %a$,nܾ$iI{S&c3I&5Nzz cΔmTYSYaޣ[ q={85+(*Al.@Um- $b1"Bw|w4CVTAOnQ9hJ|ݹ}m~$u>~'+7-9S&q!|։#bȺvJ a!!0bdw_/4TT@*oBB!~I6?=~ } .乱t*[w`Fܗq1xg(p!/^vgUQY9>pTӆRBظvc 5) @=Ü)?ʼvai9a+bꘑi !mSTVcip&339aҨa=eb{gbOq&3yꎃGM!Vñs% gjKg}X: @ү7z`}x#?ܾ{"kIeu .˜MAX61NFP 9Gl4~I_o݁hL>$Q0HǙY8k 1`&'GW7/C9uX+|a DvR)Y&54(QQ]{e[\rJ%6n4&J=LeM-6{!à>IqM>; Aܭso\\ɻɹB BƍƣUB;*ф_/^Ʊs/,rwq!M0f@L5 ~RCqFv:2 |kX(BC!i9bBh4BѢ(*-Gu]x< &%#jsH(w9yqwq1zab@be:Ʉ+q ܸoO!"CpwqH(6q+(*+GiE%T (!q|> 2IDATb1ETݢ">& /PRQsrN [܉<ztCĞܷ7$bDZFFga0j] B@ ;ze# Ee娩JAZU$b1|(B5,]A 3t<hB!B6#B!8B!@B!phB!B B!BQA!Bq8 4!B!G!B!( B!8B!@B!phB!B B!BQA!Bq8 4!B!G!B!(p#FE!B!ġ.@g(u7XD=Cpo! !Bi'B)8T\;V^d@Cf[!vҎxn,wB!C@EnBjj}AFVjfE%:uC!(pBd& |m | 4o$]Ez~u'Bi 4\/`Ы7LFw ToϨO@] mtnMy&N $&&bc| f͚y[og}PVV#,, 111oLa\1x`s}A.c8r<7Cxx8>1 x駡hǛ*K/{aو7|7bڴikz!<}ѣ-]Kǟڵ1B!3 4`РθP] sy BvUi6 mg41k, 2%%%ذa^}U9s`{9?gΜ^Guu5*** bȑ-]s)̘1~-fϞ1B! 4NY{z3L5Wgd5sQPP5k@,cXt)6nưað}yRӧ1a\rXv-R)v^{ ۶mc_D"dz  <<K,BK$''~;v Bƍkx_~eD"BL2b) 33)))HNNF޽qChZ;wׯg`@PPN˗K.4h褤$@}}=F#nܸR Cb̛7/F޽>ʩP(y㑒*cĈHKKCNNF꺳K.8z(oN!BH+Pd|z_H/V$}Q A9.vBzz: ,Xkbر-\G x<v܉J$$$ 88 . jh" 8ƍҲepE,YlJK|? AAAXn{ bA,O>4hQ(8v6oތm>6B!΀gBBX?̀N[׃3 `a0?{+rSۮ(..Fp/B! v)Ͽ?(CzX}` 2}CF}x 2!B:1BUY8v`$A8D=’9RDXX~tE"B! 3u(EjE2DxDt>~.&!B!A!B!h!B!( B!8B!@B!phB!B B!BQA!Bq8 4!B!G!B!( B!8B!@B!phB!B B!B ,sw!!B!^gIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-north-south-distributed-fip.svg0000644000175000017500000060363000000000000031465 0ustar00coreycorey00000000000000 image/svg+xml Compute Node A Interface 2 overlay network VM1 VM2 VM3 VM4 VM5 VM6 R1 (E/W) B A B C R2 (E/W) R1(E/W + Local FIPs)   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2)   Interface 3 Interface 3 Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-north-south.png0000644000175000017500000036127200000000000026361 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw|TUiL&=$J VTEŊ뮻kߺյ U4 $g2GȐܙ E<~I9}2jE @ .땗z@ imml6c6Zxxx\.S@pc2:1Lm===ߟ=z3V W 2:%W&PRRbVzF3z!4 ǏS^^nwˋ@ 2V3:(x)Uh 4+uuY=r9s/I BhD^^'Nl6Adt4jz&94 me2[);[zyx0W,1&'-uS($&&ҫW}m@ BC 222@T7.E|o5m2Ϊ( gQL٬ >Js-O Bh@zz:^"dD i|c>i ח1cƠV rC @ T [(N^W\.G鉷7(ҁ9z(X{llsu!" ;PpUHN@2qDaAgz)++ݝ d2VݻwcXFʂKat RV W gef@>ǎcZ aԨQ @ @ XV̤zVQ(ft:i'//2<==6z4/Da8gp.RdL_F$$$o7GL]]:@kkNTT*B% @SHjj*k*0[BvVFjkk#C {X|K8? S`\"%( * TWWЀbd2~~~DppMt D @ ###f%..0F|!j[ >zk ')2(O eE6+VKH|z 2ՕuһYhC{N(Jj52 łb`08M$T* #""`%BhO3g8qЖbDDD ;OC~2V`8B`b\<7'Pǿwncua8Lu̢Զ6JEhh(hZ 5^jWzIMM 555zgM>}U@p @ \l IKK #F jüi=mab;[d.6{)w_?©p3ws5mx, p#E%t6'CƄ$Q^ZJII ---h8p aaan|@pBC .&z;v`4 `̘1U˖ҨwaQwew\eʮo6[d“'رc]5 ???' @ \b gW@ HfJK*D,\@Jd8%i){~)ˮD}Z>#G\.G׳w^*++]M !4"Q^^dB.@Uc._d7RHe.7t;+wNJl\D߻#/[|ߗ1clȑ#466O  !4"nKjqؕKLw^(ݷ8@tcYȢ `x2a<<<0L:tH2@ .-BhE=uTY}R:l6˔n1p@җ]O{8YWEsܲVˈ#崴b@ !$tR`X\ Jl]L3}Sd} l.jª!0V~./--sˁ3:<.}D[9J[+:[3\[S\=w^O߾}0 8]@ ~~7&4z= .܃$,v8?<^]u5+u9XQ֭[ &= G @ HRZ=[biItB,ƹ.%EIn{&jmT p_BhEbPZZj? nrrHw.sfHB80N|&elq%2ڢ|Y,[/Fb@ Q^ j%//lzZfbDk{q^~W.LR]_xΌw *&pJKKm@c F @pE_۞&&ooZ*-^x{xll&7004* ~j?ZT^ |Bpr1mݻ7 GyCC[Eϙ^:8sNq=7§ jǏɓ@ BC \ެXi 误+Q+ C =X:{y ʩSζ] &>>VA{1p-ȹA='i{#|K{xc*#%6VdB!߿׏A ?+Bh+*mGP ԡbjṩPx8&Tʀs <<Fa_Osk۹%6RMc1 Q *R쥾}2l0*++߿?n+!4eI]lFzy:=r&;4߿:!CP*-V+~VåM.SnZ :нص,| s@cL52D30'0o۷fgXZrOjZ/$ʛ>{g3?PZMJ_7~̵}V#G,qqqʆ_#9g\Z:m~)>ܽG:u*>W Ea͐"] swkI+-7K.b{o`Z]۩mguܦFr_t,eyL&ƈ#lQ@pY/,j&,uױ>s=))Xqn$TبlقިGß''D֛g>L&>9|w z/,;h|,;h8^V"3̧y^10_޾q.wa6Bl_~L/|lwnaå#++RRR1bp_BhKNA}7.sgbt>6 Y?I1tgVgr{a\Ը2 , <&| j[q~H5sya1aa#WLM/>=:d3.N0՗Ǐ a4m `gw.!| 2x8YQEQ}߲}xlZJKl+rF♭ń k/is!x* ;+6&.. @ @ $.s3VpK(RL n !(i[{]F***FJeۯZMqmM͵.\?ڷ2؝upX>! dP03q3whN||x?F; za`X8w}łpmD6:!9=ȮUo'0X4puI†0gOJJJ$44TDKgbpߜO}MvM}23n&3f29f2JOI Ndɵ<9 xRz鋟LΤclX|<|RyV%6 ~=WTTg[>p:rH=҅;PymKkɈZ\G 6?FPWSekS0H]yɈdR`= jkMgۗnL9>ɃYBL=̲\NDZc?~| C @Qլ<m5Vf'fxnys>Mz627 ɑȔ^Sڕ EFF?޽+VB{.\~j;l}뒆:Z[ў|ۦŠfk>x K0gs ϭkJl[uI3ҹe?~'ő#G7(!4E]\|~s23Sf#Umي/f`@>cɱF#--m٨7-V6ө*gk2M d2*ɩ髯[ ꁿ)E.:| TW3MFbҐR\LuK31 "RyUSvYKϤ<"Ϥ,f6O'ߟ:rrr.(k*Cgۿ%&|9 +WRⴝғә?9\pD(l6gofKnhߠ=t|>||644>o(*`6K{2%7z>sTrd@ pH',M^ʷ`6HSL= nW}at&{ .skNe{\hjB =;d;_EDLxߕ@m$l PQ R8grn]=4̟?Jjjjüvl5+h7HF=-#c?KO8=}zVNol -NT`E >Jr6rcu8WT1k#3׳){M&cb;ӹ5F^×es1;am] 5Z8^qBMEs%%T4WKыiӰX,2gb]#): w̪!wbйt#q%}3q&|:WMovgfdϑ9M{|feh6Yj&/^ߋcbX@ \!ReeJ>JԲTF]by]И%%>UWq i[Rv#a6g;-cf_>/OFEV 5rلޤh10Zer|<|ohjl6ڊZ&2(NNB6]Y §+ct-rrJ`UjWv$/WgK^g9Uy \.X$}<|l]t%=ztk\@ #@ p 4K4%j%CÆ^xZ֝^ٜe M3f2;a6vfGG?JoW=y41ZhUZg x9k3f\Vӿg[. ^`63s߰lRko49Vqc$=2 xv{۹ Iϗ_םFPx0 $-~7\c 6"}?Yhs?k Jy׿2Mdا\OCk=rF>˖Y\LEEAAADNfYٻm]b=|g=߭%Λ'&]ړnriM<Qn7;< TWW%8xs曼4%V]^&6l u:)j(ɯ'>?ϝћ<u~j? #.0]F WNFey+sw'A/\gұ:87I&kgdϑ091ݱ@ACYY:m {ЦGxOj[Z؟3뾡ɅQT:8 ^߽. !ynλ*]۳;Oeg˺|&'~rY$dԖo߾'ޮ#uÛG<ȃ#d_><&+w{πNגSchptکo'4Y2?}Ȁmea~Xg#odJ˼Ax`6+WO8mמ|w{:}}{qdBbAP;7K&d '''&ѣwO~؄dm`Q\R_gor9f(N,=ޅ˒?R)cn??amF@8[4藙m٩ɘ={6o\8Es NiS)u]MvM6YYddSRu:IO2~=10d z#O:̯@ T!h5*^'NJgЅ7 ^2K=˃1~17fXnpyS>tjH Nw=h1ryg;x(]l󣾾lz聏E[p}PIV+ kk6=;F~́x1ԃCRDMg_rI ՟چxꪫ jo%<+J̿Cߠ`J( bdyiUW3&:g),=iߢ|zkU簾iGh%S 0Q=#1|~g&L!T~3{GX}S+J@m}r l=tua4DJEh'tvB';1cu6]MVMy:}hubHh'4A W@ys9~7IڡC9,iubcH_kЙt"}#Mܚx+_jfcF?r IDAT8rI*Ur۹k]LW'ʲ1o.ӧ)))TVVRVVFXXKffݔZf>k`jBTVӻ~\bJߑ_SwǏd4|=54Ȑ3k k5e2޾6֝8m~Ĩh޿e>kp￉Ƌe7NaCOmGE0O0Xg`oE)hc]I9nrng3du?_Z3P,߻QI(wjml&:6SNeȑ9Җ<266Kψ#sC]}k=5dVgQɪ]Sʕۉ!aC >`%yȇY4|,ͯgyr-@ʋ=aR !/i,aiR&/I:Ȟ#yp0Jk>3n&gUlmAi4ђMJ⿷oW.ɕйϚSWS"8^ZMmz?bI+)nL_7|KzIdFDE9g7{{΁X+'6  ՟`Z75Qasg+Ӓi$Vx8wrĮ] C0Y-MM|t,kbؔeo$f3=j ƧZT*ۉ#%%Պ/}q]\SId23m£#:tdkٱl{dHgthFEC 7BhW GˏYsbx/ ={x|}koSY{K&c\^_`;ZgN=:E-e_~5Uv{k{e4 ogy) v/ޝ踗~u;Nc{Rv$#ǗLZ ga\?0Ui)\?p03aj7^ mΥOe_r9^dG%WWWk&l hQȚY}1&<CPdk?BhWes? QyjS̊mB9Wm=;`Eɱm-9 {%}Qwkic/m9Yft3d".III,Yp7%x2C55JDRj;NEc>( H@Nď~+:ZFyS#A^Z;˛&Cm~ 񎥌wS ɬWDȬ+XK5H{YGg/2LCғmwDjL1T)JHJ7ٮ76^NZYem㴚s gjPU bc"\P8@p ,\!. ;_`K:B͝񱏻 eMe{]:U-UrcY;߉ʫchhdY2^SQY2f 7ѭRf~+Rb6:t(O?ELw.==CFPڊ Ȫ"?3SCZEۜk:}Io?"JIQD, FA||<ML1@ώ4B޼;\X+_/'21zbT)H-KHyg#KsZwc&31f"c&3 x+BC ٕg?u^*/ _>==ƾ}q :ڏ{#ṽ;wy/=9 |<|7hG-vp{s6URRBSSÆ =ΛoIAs YJw'˶jU847[7q %lZyߵ{h*xc8&]}OZ:5i^s]A lh[61~[|՞fUW1{.ƶmxqȱ@YY mowJ%cƌ+Ɇ76\D8ZQ}̭cBîwS K_EKK YZ{=@6W3=v8.*+xq׋˂ڗ<Β1K V'g9Qy¡ÛG/G17dmsC\&gfL^4zc{Kp_kk+YYY6؞y6!Z ׾ӽnl8ń+<"]ˉ8ֲطoiM lk;Paᬾnɺw};+-8>.FdkVP!Iˎer eztNgrJ Y/h`t&/{7M&:B̓#/B`6"}/y3gꃽylc<>6mQvnxHji;R̿f n`fL|վb)BC .sK6-!.\&7}d1ac?(l(tӓ'=ɢ]GW5P(xm2-2"%gj+g00 @~}m&C&ӏ!(5t4 + 3o;]Lupwb`?98n̉qe꼅YL*%)))$&&@yy9IIIp$-ɢbkq,?׿26gfvDWuqq]u4؞ٜY2\v\!S0&r [ogC Ffu&m~MَN?C†t9[y9^qܡ./>~:b0bՅ{gg=5֜qI-KuNoYQh"Av~cō+gFs$77f--<6mϮ-j߹};4¾M`] Gst%2ZW_C[>}j_\\Q~Ds-k% b] :/y}ۿ7KF/a%7ɯg{&=3׳>s=PJOh!4U'[z6ׯ}leŜ:ew]%W񻡿 vI+K㩭bg1Dhr'Ӕϰ iJFCR ɤUP_קfujgݑgqut6wƵoul62[\X E9Sblڵ˔cž>J>g 9\ĉ)-m;-8qxjL&ܼ b"顽8Q"|"xd#<2 ,K]Ffu]r^2{iX4|7@ )gl5j>і!!wʌW7.%.suL˿ׅLߎSUu;/GE!s<`02YQGdVB"6 {ou  2KV_%ټ\Ŵi&f϶eR[;g7Ƹqnّ"RS5dV+:gwe`"\ouqۓ˹wqS!"L ᥕD2;;3i$Νˎ;DDD0lc4Ze[N .>AA$R$&"}_ 6,HZC#"گ{K 砰;{s5G ;G?!߰a9MF;SkScuPo1n-uzwS6F0>7czǽ\zuXMY~Pr۝Zv,V?x (JƌÆ ,ޞXd2q2ih&IjŤD;@ss?3sv&zTT]d7ns%%%r9'OVĩzCE5&n \Pe5\&16+ ,K]ƺ$7*ccc|K0KF f͉5ܿ~jt5kr{+3^ۣ{MٛXqCn@M 7o}-~Ƹ}ye`hdbagjt-%1$%&4y)FCdb{;?}tڵ Jbȑ\S(RSF٧hL1Yk%E 8R$2{V 15xU* jz%fBY.Ǫۯ/+(-fT3Ft:Z6KIgĈ>|fH޽\BJɩSjfx~<=sbHX{JJY%`|xf!;E)Bh? <V]iwo`_V޸1co}k=G&/.CƝ/ ;Y?{pO ޷?OΗ">=)| :D$-wqwzߏhD.;8|͝YFQE!U"a+J xxh&[ x-ㅇsm/LrM%YUmovh#Y jF<#RX2Lg [-kϏ &0qD (,,j"߿?}8p9bZ, ]EN{u|} z=A!D˲ܘf6fm䍃o-gdԪA!xrܓ4_. !4CŇ}]_8t!_z7sߺ(j(>4l(o]yI5Z9XXނ\Q4e՛%2-}|vχVs+3Va4e,HZouСCH( ֭[Gyyyww\.'>>AQ\\` ,,zJ}Ҭ>H^SYq "}P3sw9_j޿}|xyOnښ.jTV=kg9Pʕ<8A$=% !{FJru?4y)_Z2SwujGVVk4Q(zoVbۈ{{{Bhh(X,jjj1cccILL$??5k֐eWoP*0 vG<==4iSN3g`$Ve2JJe\.sv;Aފs7˹+g ƅ.XIm8ǗQLa^p7`]{G/Ъxyq"`\ h04Ae~[~['<;M}_Zs-⻬;ǰzRs(oJlmI~?9Hzw}fS6'&D|';k{E&Μ9CQQWwwwz=ZVqX,444u9#Tttt+'%|a~XǓngɳ ^ڳK4?fFk#xDٵ!~:%f;}Nөu-屫z,]e{^B⁑l]7~ }Av'[h66$+s4Zd;QżmohlgɵңkT= ep OmzUއ|#~&Z "5p+b|cfc>9e#hm2&g>Ň>t9?!lq3_E[|><2~1b:၇׿Z``2cŗ]T//g6}EJ0o%|bD$޾,b|x|G~$5ײ|q YnmWnnĎ@+Z&_ybRsIϥ8Ԓ_MkԒ?2һ$_/k߭u\CA]>TΩ$L| :uǁao^^{͏'C+Vzʩd6c2[0BHAm y5ՔQA9C1د!ˬ>~h OAުPd7p y`؂V1txyxj9SNiE%4MffsOQT2Cٿ=?EU_d$ZJ۬-Z2p{.ikp:KD'Q~ FF<6fmtIw_u(o*gwn.T_huSɔ) {Cj4z z>#20hUy<'Ϟ'LE,zZEhPC3:nChng7HXVs<{sXQժp@  zG3*n:(E\M&GNq ``Ҹ1$OH$<84# @^q {76@0 L7wÍFR%%Vk.$ ?o.hj \9YVJm7v"k%6)pc07`hlTzc֋Юp80-4L7`nĐ3g&" ̙<6͹3xy`ńh4pV{=qÚW22(?h Y9UqR$1oH߫s 5$`h7fc2i4hi̸5ӧ.j7l6;[}_&sOwwb2`Bjdv Q7'|=n#rJyb]2B<ŭQL YɃD 圇փyP,TTP\V]#$Q O[St .%`B 7{*Zk6pVu#cw$A|kU5GKýg]Fd ] ?f*0{=xPU[#ϔ j$d@Xe,2K7#BH .;d6^7yUiyza ÷=ؼ[fv18jΝ&t7'V>]Ӑ&Ə; v{|a :dzu]mpn?SjUHsQ^2j [aCAױL;R\⭂CKDEx-5u9^ǽfD+Zmۺý"IX2nvP_NS?c*g*ϰdg3%r *kv9?ig~z#d2?!yƃ 1!aTJvq&'7?^xҨ>l]5#C!F *g ֢WXU. gaP ah5xu.V!yޅbeh2x}: l:^"?3=h26gɳ+c\DAk&3{2<Sz PZ^TF Ѩ T@Byzܧ2xaفy%#IoJ<f_n{=!a7lݓiKw<C/9uoZcHܑ<{!Vs|rΪbjE%!7^Dt 3ɨZUS@|P<}$CUmo~dQGcЋj{be<:ғ82xNଧU껗2ˌh+cY'4f͖fߡڸE`08BeW>Q F=|}ҮIRwy FFUEN+wVf)Lf Z9ϜU G+nT=,ÿmL\Ҩ|o齨HںLeM-.X&tRsQ/ؖYdۑ. >'ڴVtRV߿Zda<Ϳ~46==ӏ#j"/}-FEu 5K4j5O.Q|{EeD5h2ΚqW8!t vIL""|x?S DQ3~(*UuTrK|Z]gK>vPډF), YWlܙB54b~翑zO?a@цMJa^E,_I-e/yLGjc1SL{$FC7a ȩ,d ٹyL7H9[d:c2y5J}Jœ3vDkVm̚{SxY^N}{o6dy籆s\׎J%7rr. MCn,.cln>ׇ<{قᠬ-)߯uًUlwN}c#E-l]C#ݱ88_RoI={Vť|mW=׊043k6oBi/[p;cGuÕ ˉsqA*`ڤA-l zZKLxr!10[ j7<VR?C竸Q'0< ;sr ̬r%HϧMn"-@9m?f㙍}! ɓ4nfYrN |9uǔ[35q/ b&;/;3Spq}r 'o:acwDNEY&yZ O?y]p8 ~xk`o(+_3eCn mh5l6;N~NFVmŊJ͞1+$NiRKx[߫f$-`6FG7<ʡ'vt]wr*d>|}ItZd vxpKEFj*kZj먮zjkhZw4JIcvSۛG[锔W=}+)홷N`Đف$ߦsiIӏO9]NiY3.N/N ȪV)5[R0UpuZVR:%qjϷʄMR}wѨ]JDBTXaD28"FAlT V׺lv1lmC-F5ǘaCt~ʫP;J^^w{O$I!PYS۪oxr%C+ȗcĘ9-?#N3(;~~i IJ{TP/cg'sJtx(\Eɹ/e/ m\?/]/̟ñ76bٸ3>pO_5+.,}לje1Yy?|dyEn=ϮX6o'@pݬߑVƮg;>bdҏd6c2ٺ'l_=ѸlٽO);W1/v (BuAzNV3CxpSn ,_*EXƒk2 WT|xNėOPTIOAwJ{ߡ.׎e=̚r1PVRR)~,䩾E L; gδgA_5 oSJ)vKB3gMIzLF{` ~xwZ&O`4Lf3Fǘ=Y{ 1[edVv3 Lk}F[HB>Iaި`1APU̝:Ii=.ȷ)ޞ Ƽx2Iٓ'E$-ݦdnAwF/"˰~; sI"ϔ`1aLD9Tf:6(tS?9SjjV&N7M?+w}KǿϞ[m.C+?Uƪ}NR W˖F/rtr4 Aq4Ui/_|ULµ)A zP/-N2ټ|x}(ѪԬL~p%&OW-{^K<z-*5{74*]ktѓ1L9.XFIԐ ČkhyXxۏ:x_cʻ.+w.)eU{:*/ϻ ݒU7H$cWZgs-,Bq="A[o`ÏEI|wsG[6xDhTW[9x߿u* T3d&Jou?[f192`OOtj [fz>cIP$V}iу|ϕq]%9=Ͱaw8N-o<{J \<77FdY|~!c k<%(ޞ476V&3䚞ٞ,olӺCᑃu6`[ pl[r=wjahtrJ{Iبzr IDATf|^58fLLS_ƠĂ'9[YΊqypl"a#yxjBP|.ya J;?go}vnK=uO_MP<}s+%Xk2RvSeے=O4u TV׈X6(-Tps3ҏ`햯ihj)/gLLwޥ~Y6 tfP]=!?Qxy@RQԜIcQ C%. ž뜱Puf3=Wrph #nOg^݇><:QqU~:ĸD)FqEpChg}>>1ʛ_ z nhtEBJ#m+AIZ)s%C z/OsJVT C [^Y_efLLbͦm;tTׁuPⴒ$QO~36kLJG2+v(+?[fw0fe˙=rt/$GWv5>Ǥl=mqY1T$ZBMYeUq@ɣ-**1٬5T>3 K\36A0,8:L6nZ-уk =po| n,*{܉q.4>sUGw@OK|_FL=04zYiD-_Ņ J]J-ddt=XaZ})_l&J{n\f̺.nO_(ɠqVtJ;| <0z,-ͦ( PR`Ч\mdW4W4xm}ce]u(Y[ ̘,ؠu$I*l6;MF6;] zƬwRΧ璚 go^J&g.6kW]α¹.%5LA,sgsGO2܈\9~-VtZۿ [(={ABu}ݬsJ#hUm)sPe7/-W,+}̬K)o3*i_m&+x" z޸ ~WfΠE8dK^IP"\8sfqѬdT*>kO6mZ͝3AOp4v3nc>*EldF/`]×_U:}(@г\JmdZ^E1<qK|yD][-52'vސqZA׎p\WY[V%e|y;˩/+3#zFJ#,CPz3l(y%W IADR*=#aS&ۊfM:2Ao! ~P|sKLIJ#;6& /5dsobQeu YseZi%Sп{<)=]`i`Dž,y# 87G߇jMۺϠ㭠Ȕ)ʂۦM+5ײTsg&>F%}Z_w&,@14z:>>1MVgtZNJ+X"@ȑ#J{I~Vbdp嶎004=^ YcICFރGZ]ܼщq]TZJۂoE4G;+ߕ+-ͺXXfOx g׈-4[!Pw$Ъ5X611 !\,$sAjk"L 'ߏj!(!ٶ59P]3:-CyӢ)q*@cC"_eKE%u97,6R14L T8lN 46zSoez_z̙zuj`25i z1p`ilc'K@Q_3'OP32YAO"~'%7҆Rl.;AQF&; jkQI>\Cf^Uke9 Nz7` 6D..k~P:,v +N CC/IZc|^2jImH^m%RX|!ɁUMENn842[dv`Q#X8s~=t=|jF?E(ٜY9^4|Q~uKg7>*-xr%iGr0EE7*İhN-ؙͱP ֱd/.ϥ"uu7XOw F$a^aq˄)A_" ~P|P}UgqwvڧRϪ㫔IOU9KĔıLIKu]eUTa{u`AyJ'B\@={p f `NG!AxcC d 2elڵRo{J"*,0[z*%%}(@Ќ04)B/ B:&s u:*-O$=}~y_{揞d=ixm" @1W[m;{{2f6Kv1ȥ]ts-F 'uƹ^yoS2(T9GN\Q|EmF$._L[qЩutOyg~~;6pψ{:dM8dk-Y"@EETWWSUUEmm-Vٌfb`Qz4 :^GPPn-\Zdd'76moZj3dn͆dfaZPt:t:ZmK,Knje\fa4QhZ ___eL#00~r,Cr3(>ɤ vFEљμzV˞=$ʉ1珗ʫr|q ̉Sop myCat~֠1x~?t<ϧ[Fc%I"00(ZDYiR_2>|;#Laa!EEE䲺:pY %** OO^^UԒZԟ.9L^^TTt}19e\&$D \F?FS|$N'W΃Jx饗pssϏ0ew9_}%a=\o*-t;̾r˼ܹ'72ޠ_#rUUUqq?-v5ȲLyy9:LΘ1c3f ^^AG$-*I%*4tt8?gϒŋePGcc#&DEE1dF^w M0}r NpzZ@BB~AG999?~sQ_:Z1F9xiTxzz2tPNH'd Y f=ع,HɼޗX6zCtM/7CnjY.\ݻͽgEEE}v5kaa'_7ȲLM}n|Υ˲n2ȔUV!##I!}+|'rqN8Ac+WUUEUUGE2|pC݋u#7hӣHݻw{n"##9s&qq]K.1F?FR|դ+Eg444pQSS 24+^ _ch6Yӿi\9 Ǟ>mW|vINNNtٯ^|t:f3łhT|z,Kq89slFٳ4hP nn&~v\/HtsΫ6%IR4 $a۱X,XVv{eZ$33f͚EBBԿ*/^dΝ9sY;524 zݕ FJBR!IMMMvdYf!2&I/:wDAA}vm_.(Cp#(:RRR8|pZ`0(OJZFV#I6 ݎPFc>>.,sEcp(Kٌdh4RWWEUUׯgϞ=̛7Ol ={ҮνE7ay$'toqb~7l/QpK-3^4ideeqV^?~|͛7;ї$ Z'555;tg$|}}9}:yyysI||| #((j<==%66Ѩ]^ `…J\$8uo,˭,Aii)==i;BkjjO>iwzYaÆcǨDa$Ix{{lJ#BKpp Nl6vͅ x@QixIz' ͈n xZY^5vRaMOxjW6?VBBB8q"/^ѣG&??: Xj+W;7 _|žFN6oWUX/n0p&'21[+io{7eȿo[]jxxx`Xpwwg̘1̘1)S`0!'':js8s^^^h4 DR)W\\$&&Imm-CIxxR%dee1f̘9sgm6r"w ~>z<|u+PKjfhm۶qIs,Z9sLll,h49¾}">>dN}}=deeBqq1f OOO%{4551sL~(**b֭l޼J"##9{,FbbbXp!쬕FddހbOeEwnFd<~LZ-\N]>t\Sk4>|8^^^^'//T f7ٳgϧBV8<<.\r^Ixx8jDAAh4&Nɓqww'++KQnnna2(//b8q"jZY)+))n3vXNJPP dddpF#,++z>>>3bZvİrp~{wY9ne;BRKjv>ѮFUUˮPdd$/npf߿RVVƸqHKKCeIOOMƨQe'NFii)L0#GVe(,,dٲet:jjjXvK]^s=OLGSD߿O J.I5>58ľGi䞩RI1YgĐ*P[[˫▢j5kIII.c,˜?t &99ѣGcZ9|0@ c~Ξ=/'O&11VIKKܹsxyy/YYY?²eˈ%77M6Z^l06l_d2)IHHp|+//ZINNfȑXV=JNNG&;;l((( ___MƸqP՜>}Z1\<<< #$$Dqǻ\x V$>`0Gtt4Ťʸq6m?8s10aiiinDaabzL4 ///JJJ8q*<].Ipp0Nrrss9r)TOB;I'!8Ф}tG$Hlq7r12 ҥK7pꨩQ|)..fذa/`ɓ?9sRRRNcBFFFaÆ1wV;$Z˃>ȻKCCTΧN΂yO9~,sÝI1!?@fåLн{ze˖^I:t(C%''?uֱc,Xɓ4iΝS2DFFPTTD`` /Vܧ;Fzz:DDDp/O^srr(//'66}Q֬YCYY"߮]MN8bdt:ƍl6illTblظq#YYYϟDEEGMM"IOO^jݝh9VKBBVeѣG3gN~F04M]mǘ1c ,tDGGc08{,ȑ#̛7(&Nرct>}JdȲLLL ˖-`0Ejj*xxx벚(Pͦt$$$鲕6`'YbX>q?CU ់uK ՊW͊psm9~ZsРAfEɔ`XPTh4Fĉ8#Gw^v؁cǎ%))YZ{zz2c 6oެKMM΂ppt˹&Ievi4j|].O[hѢ6˔qaN*;3{  VkY-]g]W>ꊊbG"=B+iLd287dHȄŋ;͙sEAxx8STT$yxxjjjKb4)..>Bbڰ~Ih8d|}}; =Hu8V__OII ZoooCbd" BAUU 냨O;P(󣬬LWXXoMja*}y7?Ewzk^#0rkDZpv[QQJSSSCPPP:ucǎQ__O\\ ,駟Mrr2gΜaСŵ[,9wh\e2Ç'66NSdٽ2Hؐ 2F G7k5a=H8%ktW9 r7j49wG233źcqܹsC&00Xbcc;{ki;5$ڏc:OOO:9T[[wכfJJJ(**(-ZL&cb60 ev JTE``U'HUukpv&.NGbP]]MAALzz:'yyyhY,UYYi'-m͐Ek;C5n.nlg#)P\bf8!J*] CVVhll$!!#GW555;vӧOcX3f )))ȥKDR'Ŭ\.\@PPx{{FV3}tFuUؿݾXwXzo_7۴%%%TUUu pۢ9qǏ#Gr]wFII ~!ѣL>2VXxK1u >C3:/_CɧJ6nȽދgg׮]|DEEBՕ)S0a4 ޽TƏ//P(l޼ \\\hnn3P=pAoξ}0aGvXM696:s11L|,]ZMxx8۷og׮] :xDA`ر^~ØL&INNf۶mݻ'>>$>t_5Mbom7tK&ўGQ Iyy9yyyZ]B8pMMM3h T*X,vMyy9r̮]4iwqgСCQXXHHHR-vk&@! n֠X,ILLՕ@ 8{,*@fFɓ l6sYCUU <777RSS g˖-dffHtt4SN'33PPP@FFttٳg۹,*!!lYMEgaƌ!3:;Y=c5r*rqἸwy9s0bx"f"==Z.\HBB zvѣG$&L Z.\f g}Ʈ]HII3l3$1Lw^ C(-KwʖGqw-͛󸺺qwwgƌ?ZMcc#=bd4 ZK.a6%88{o>ƏϤI>}:ǎ#554HNNJʶmڭfͱ|#qdz=Ǐ'**P"""c޼ydeeQ^^έʸqP*455c` $$Avd2r9x{{Daa!}{aĉ̜93gr9BQQDDD/eeebH.W]SS'OdСͩS JXM[Lm?P]]M@@ǏhjjˢKFPN?\ 3bbѡĠD>|وިg qeO=+γtMvB_}bĉ?Nǝw X蛛|2~)MMMT*-Z$x<а0*++9r;w~ ԩS9|0ǎ#66v1;@^ 륣>|{=&Lĉh4pBΜ9C@@NWm8qP СCEF?Ή'HMM%!!)S0i$N<ɡCXv-QQQ$''ٳgٷo:[I1}_fK.QPP J0|zJKKٴi:A>|x͌5J+Æ cȐ!r!qgL47-&hlllܕ#qs! nLFFDFF^7 V1͜ӦMgN< zIOO7"!ljnn8qQFƱc򢮮'Oִն~~~%2eCFԩSINN=zSN1l0RRR8q"vTUUa2J\WUMMMdee!estLrrrĢ{*JRITT gr qՕfIhHt tZ'ONwߵ3Z PZZ*rhZrrr v߭ۛzٺu+wfܸqc6+W+ǯRth`sDz;.V/v2B~crvqʓ-sFq_ +#F@p91TWW;<_.h2fF}67LFZZ&&Z'¨W ccc=z4EFk\\\?~>>xyy*X۲ F# P]]VEv BssАą\G AıGBcС\|2b'.zzzF|NEZSSeeev{\Ndd$&MpDȿZddL4qÈ[HaA3f bJ鶩m4[P(x{{R'l+ ^hDӵ+x5A ::ɓ'+ƆH\1ZM\\&77N;LCCC+=/C UҸ<0_ ///ƎKEE999 ***orq66f61Lׂb!֕A߷ʋ\?Sa/4lrwSWWDž 8|FBCC>|]WMN;ωsE0:vq]z)Fôi7n'O$33ERѣb~y M6S[|DYYYAAA\ Ob` 'ę _g(++[d2a2z$3Bߟ@xM.ilnd-UlZ?=Ѝ57nƍQ\\Luuu~2 1Xxxx} \먔.<>2ɸMYEf|rټc!$ #1vQaN!d۶͍)S0ydp]=J"""8"##vfŅBBB0 jVMMGѓ%,/Ih8!l:Q닯/&**++WWWWv@r%)?]̗_pߟΞfw~wgmFfĈ@iZ8 VyˆBlǧ[N"ֱJ[Cƥl2.pr WPR^}fΤLNFvaaa1k,t:EEEPRRBMM]kAP%+ۓtP*M90Lb<-VG&P(DOퟫkjaHDN3 trnknnN'pza!P*vg,1JMlku3N{ m nZMppp7ցӓ_܊K;ֲqTr[nX!ÃXbcc}MMMP[[K}}8`4 Q+ 4(ۦ#*q'iQLNg׻K !ћHBଆ@"W.eðͪfTJuWN)Wv+YUYw22`$`ukwqyƍ'm^KU3f sr9~е3 gb(ā8| OmۉWT^MO6Ug ؒ](--m{xxQK$n$pRz \.mb[h-6JJ~.5 2sF***8rݾHǵC$$, OOڌ ޞ$%db( 35t7CjbС7Jss3/^g"&&KJJo؊ FpB*u||(..&4q"gV\.3;+W~&,$~1`0駟rɝmf7shs\]BZZؑqLL]C..,=[P IDAT @||]ݻwn0lݺ.0ݩg$**0Bzz:7|u;q9qY&1z>lčBN>\ѣ_t RƮt}Æ p+6䂜 עVy[hZ~8/s駟744yfƌĉ\FIMMn6\\\eΎ 1$ B OfLAgԩ2D̞=>hs!/~_/z[I8t5zÇsQ:t( !ƭl233;N.'OfΜ9O=F~q zL&3`l*%/>T_7U&''hƌCXX ==t,[L &N l۶nl6|||rL&*++)++D1 y}| q㐄̆oĈ,Zn@ijjŋJ`` 񮯯2`]2oM*O& O<61~/ٺe+uuuvטL&B %<<Z,UUU\rB:Ƃ ĚW#R6}1fsD)ϟ{- .]ҥK5Q̕+WŋTVVvxnTT{omJ8f„ ;^]]-fyruuۻKElzjtNeJŭJRRuDCNJ0|cǎݝ]Y@nn.h4|}}z&xR… IHH6,((^toN;jspŇL=ѣGoX,RZZJZZ`ၧ')U*݌kSSXc*jjjBŅ'2m4p~~fJEDHYdC)($zP^N"y_pʕv8}4OF&Oxx8xxx0$a+W\JbƌH3=Hxx8O<8-`TTb=*J\.ȷMMMb˶[nEJ3>@_ ߰aXj~-Nr8466˹ h@шZ[!)^zq #Gdܹx{{_羙= s3{'nmK1ydRSS9~C؊2xzzDRRRƋH_J#K dQne{x'8y${mա Q(VQ*rq6X,ihhΡ+Tg(JƎ˴iӤ^BPPSkke,P;3L-3f ևD# '?>Fwɓٻw/Νka†bg"~׊L&cĈLnm{d2ƍc̘1;wGߥBF uDPPGfر}>>"\$" '>///f͚ŬYҥKdggi`hUZXFLLdz_/Ń_<'>oq^5D[: @hh(L4 ˇ*Pof\qE-E ,&RxxxM@@̘D!`V+1#8s!m{[08wִv'lOyy9555,>J%@PPR\Ta0ˬֱyyvcܸq5^CRSS#tFmjqK RH /&L`„ 5wAA`W]]M]]/'N^r777<==nM 97N &&><&M7PMUr *PP\d.T9w#GF&Ș3?o?^9k1k0IaIȄ;v`4ۯ=?ψ{8_fɒ%DDD}%L-[g_\$,ɭ}B&LppxŇM[!iVe]鎟:*M$NQQ`ms@@ӦMW_eРAY5kpYF#O>$z{L& &ŗyx?/mΎ k).fh /d0o~)..&**gy{owLYIuLǓII0d, ?8 ??Rɨѣ} 1<1B^gO6.XٳCQTTĄ xz|WZ'dO,[_BLf31"p\3CAAA]-oKFTyA=L -_>~ 9njDBBr uuul޼˗h4tz\Fݝ[?1p_<<0c! iW-[Ѐ^&, 6l 777,[3gpJ^^^}3Y&Lx zvm=ԄdwA.`ؿg?/=Ɩߪ]9iȑ# 6욃Fm<'N&~L8; 1L_)oп SO=w}ǫ ӧck֬=#duդ0j(yx;СC딭r˗;w.>>> >wL0@ybccŁŅvs3_K؝zc={ٳ|8` "ƷxzC=wsqnwgyo_~իW~z;gy;v׿{ΠeggV_׿~1cƐa1\Ntt4ΝoJihkk_^}}OZF&յ}ݴ.Dt} kaаAO]p4k.Νa`x_шba̙3{B/$)'f*%B eH'Gm0*Γr56 |!ۇ\.gŬ^^z*,pYMnj/hf3wq .믿ԩSvm0y䫶,}qqqvbj `Z4YE[xL8u:vf⸖X]G',"#G(6C˗y駑dlٲK<~E~߶ HX,$&&ORPPkF``:o2p!; ٹ1oqΰ}ʎyꩧX|9o+Wl76YUVctgۍw`ӧOܹsz ’%Kpqqg[nĉLyyJyZ3fkw6mտu,rv쯞r={0c 2dUUU>}ӧ__Z< 4iv"//p3I} '{{ ݡu&oj*\\\կ~Œ%KZe?O>M~~>/X9?hnnfҥ,Y$qǀl;] l`AF$X?־:i$Ζ%"@@ ML2 i|̙bٳg Yf}5TUU`r9zBCVS]]}MJ87j5ݵJJ),)V :7W qkӓ=EEE̞=΄G$ߛz06)\AD?C qx_guuu\r],$)'ffNOԠpl{ǟa+[4iSLԩSEC-Wx`T⹭)..&44tȐ!b{GL&,YZ歷޲;|uֿqou|Ǐ&--? $%0e29j&}f=ee4y===-wTEmzM\.cDXpϮx{Ytt|[XW =*_=20UM ٳ9vx&ej- Vj -꬘)YF}O[L&֮]˄ ډ"$7#~VXAcc#+Vjޱ#c)α$#S6A5vZ%d2l2t:|IYϓxm&o^$)$ LNfZ&XJ Jxyg##xRd ***s+**,jv}aiil/qQ[W,ڽ>+~WRfqQ(ǭtLOOLwVU-ʰÈouudeep-f3....,]q$G?JEff&|b-G{nv?G}}f̙Aqmv;W\A?ŋ)mM&!!?a08vׯw8pup{衇(((?bŇfRSS;wnwֿy귩$%A/v<-鳦}5*wƏO\qbl1t:غu+ǏRoh4ÇZ.m1b6L&"4y协6u~_ӝ)Fl|{r9o83 ~I;ƙwdBnn.k׮^}f3MMMXaV$W~3aXHMMepff&ׯtt 8A^tZd SN%**Hq -޸r RSS dɒ%gԩ߻?^>)+rqnKϏ tM2+WO?ڵk{Čdmٺu+Vb9rUVi&xzz:C ϯBX@?_LxP ~b:gDt'>>/ۿsNI;ӻ}FfϞMee%lذٳg3{l?з}bn:X|9E))Q ֛oɛo)~ /dLJ;X5xƌWjwL&c4K.CeΝ]zN]^~ex㍮~؝~%QjfϞMlGZi& 0/?\?JRW^i׎/v`` t*bv%Kt/Ņ'K^q5 'N qL ߏ0kb/oGqFff&lٲvIJJ믿5e„ +t:-7lO?U남Gaƌb&"I[oU_<=DSS`]hRѶ8Z#G_Zܿh"ϟH}!X&"=\5jOwz̋vUb̊L.T\l1≋ٱcGnwni])+#\NȬ앳v1#aCkt$.L;*wbbި; GO/ ~>n7ޡNJ?p7F]:9iUTys]w[~-A||g/~V k 9TpR3N QV~Y< V4A7q-E+WVߔBl1d\m:w[bcR{ZJ:pJؕɃ&j@@)WO ^VILpDBZ)&bJ+.Ad;4BVKf399'; nzyhݪ*qW8q @N@6|geaP1]C^O]J'tw#\BC7'=Ay}9 ŕW]+bXXsb zM Ny)J\?fcw_"ˆ$kZwRI;wںnN!у™46IcɮO?ؙ\ãfƐ=P FN@7|{rqkY2hJ :OۢoCqXxzq17m]AV;9 |~<|w;qO6V7P#AO?)5M֌k\;?`^̼^iJdHXQ‰#*"7)aÁZYX,\+R^{RwpWhq*HH$4n**8W~(NPz՗]UhTjkXK.tz@QDꩳ=Ό wޜ^Ym6yߟUOͅ lwAn΂a 8A2+**}הbv+[A.GTD8aA=,a,[F݅2m4Vn])zF]27y%SgdN}KR1}8^\%W*G\R"=([wu*2UdcHLX,2Xn:QrBN McH85}_ACROwx?|V>{^\+q { ?ז\ڕ qTT[Bj7\&,*n@ &d(0_(cO!-#WpRrW]>Qϣ_?6Ϣα0},|ef0$$:=z<1#xǴ?t r _,X3Le\jHT0u(-bmbTi헄# 'e  Wq#I18l͇#4_`L_TV~CAw|[ Pz c=.- ~ofgvתKHX,X$ŏy[뽔WWY/+)5Pqܥl殥gϬDghq:%$Ih8!uWh.-txN<ܸ,>ПpcƋuL{/|L|?;@oK+W<^k"1p\ ϻm/Q*z>AD7n˳E;ШR[ӥ݃J! 'd 6&qDiEJGB2)hp#cGx0Aql]ϲ7;.5&x iq}&{W3ٔu\QB *Z=GV7=xoWsY:qMVə ZhjZ}wZhL 풐i$ dZh4#Q[WGKW9-~@I4 9 g7n YU1<||Ǽ>uTl{r0qΑ$:f̩dZ6oݙ>[: ;.*M 'shA!ӋۍMM]Hq;),$!HBIF+Ev2OhܦBh1xT7Axz~p7!-D^9˸54}2I=zhhq_9;~a6+!\r/>kx꧋-79[vV|搄!pRlFo;O OͿi @i]T&ai_q6kn~A}㩉O꭯"kB86d,'?Ͼfzk vBDB 1vb+.J z3($dNp>JU~A8%/3P^wFg]u|IhDz?ωA6p4H{{>'EG47H8'F# g?Hdx-F[v"X[HHt u i6yxdD1!a$I#bfÖl]_7WPʕv1.2MGqLK_bpsqi \ߊc$%t~ m^X,r ;,j6q?y[&!±B}۔DAN@7zjZn46v((qBos88#FtEWMGT~1n̘5c8TpW-444Zu쭭]ZZNgJ $+IR D@rr"z:o3zmbRZ7 FnlЈe~.THBf&?<գl =SvmaCPŬY<գT6Ve~y~v%n^F˫vF~;ӣ%!ᐪ*.W8V4z bJK! ^i)kA"4F=E%vfEfM9PzML>:g{F pvi%i|s^h"L/Vzj;?b|..- 5<@LO$$zEG`\IbpU, |H`/h1.W4,Xh25ٹS98/'~_ʻ)s=/rxm[ۗn絣۝S~=o%ޢX0l ,a[PܜE:Uu-ΐd̞4NX0bKU%nSG 9 82Zl ^@l5f4b6:wǐ^ W4Q\OЮ!3I{<6'Z4p;;Mk*"&%\VDh`Njxlcs?quq-zCP)%K $7ߚAbKw:W(AJ"2䥽N~K.:D_kS;DAlj"/u#NTHc# ^@F7Ѩ;}^ W4q4~CQh̊]1cw 2~BG{` <8+Y׾}me:tú6|m +n-bP[ʞlD"2vU7ZA4[;ӏ{A"U?e҃G,cW+j{hkmJi̽^$ d2fkjmB5]hhn52AP);̉+q=b1QϙMJ`Ŗb݌y{Iz'n&9<^*/6ܽbnmORgSf,/~_LE~pvwg%[.}1Koc2 fk6*%/>TX-J?4^CP :NYs(?w_2uU8Q WR";\04v%m}u$ݻk rܓoI#z]>=LmQcm>֔ QQCύ\ҊFqJ{Ih2 _/O*˫ }kGBTkOGZ߹O `H.?F?geIƆO&<B? }0Aϖ9F=+emcq!F_@#Z/*42.ec6wDB^4h/3|Sk:G" bHx:&c#2dSYw4y'ߤ9#nc,XL"eAJ/|\!n9qQvtF+qToG^*/ KzL" ^"waQLwPD{=l4z7_6$7l${EQAE(m93#E 3y/;m~S)/z.J29zUmlX/LTX/q4o7UZ3mWs*[m92- ξ=?S;ym'eфESo _E4 f))E\_@7R܌2Zp=B3R(3p-I#CW+O\"/2^|4VDhE`_[ܼLي\"FWP:{;q ^alka=(mCRJCFqj ifե/z.J)dX81+V$R>=Ql9:+qV8铟2:j4nx[k2 3|,;曱TjXba_,|,3z=o&MXH}  Yc %̟ظ{?3Ǐ2 zD~=rye(2o"~,ήX/[`^ (ãz?" ]'o d$ox5ұ-WoI-IA6Mhtl[]ן -zs&%;7CXGY6M&ZSи0[1c+qdio.yxG5cp*c@Yesv[<%e'^!fta!_ra vQI\]hg5Tr9fsιZNg APƪ:NgmV[P=(Mki oJgON8:zgWBXaXgd bǯ$7ujMt<#mGV=盒#[ CCF}qE2! __|¤s[csl54Wy}뒷"U,USWb ]JN=y J BEo*2s/\Z{erd2``>bQi=Ơ/%fK fW?H%~ NX̀/{aܼe&ě07־9֢s(6=۷ɻw-nn>}*?'Gλ{ߕd`3 ޯrƏ"y}; ғIs|fόy& a?Cx3lG^~9.&5ANِfߠz;e^lvlғa_R\]Jvؕi11z"?L; ^OsO]^eJ0 U즄L];+8uFoyrfNe),YyE^v:~%Bn~1hCvX9tzĢgih5O}BsZJt%A911cd%^*3|l^{|Bjbˁj^)S_u&oH+dd璐|Fngdq;#[w3/O?@`EJHJ΅aZO" AS6D.' ;=v}{YB7 2yyr;~N|{P[@VBWq@`d~욳f$t;ۯnSO!DFG, :gQ.\.>|L:};|E2id9zI)7hhM]oMhˎ4NJJKٴ}9FT2а1~>vC-_ S,z0+az$Y rziz-rsl4~Gܛyf>*Jsϫ3p&&°>=ki.1Q؃^_{v؄cTIpU*2C2a@w26vzv  mϮǥ 1YƇ04ꁱ W{y=V>o@,zXˣ#%/jz 7!!xo}[0bgju87(`#|3qh*]2iE?-[]juܬ3'_8}F.3M[NlHS}A&1!u4M!ę%oܳ;YҹNGc0L*3,x7Y(.u:4fl,}5`Z<-~ҹ04^o`њhJJ7fp;k8F=1~@ThJJy)B,z婋G,ç9(n 2 bBiGǻ ǪؚcGf4ZVoԘPtw`{d2mBCU;qXeP`k-SH +>yX*J:7\:6f;HYT3c 04 OO&_NCFwXSWCcp`xύݴlnJԊgUa &'y,'SϚY 6/TU$Pߒ KM'DѰhzFSGh8A5`ıyZz?HYL0[vpY7Teסce gk&5FJ:zjEſ0d$rbHekGDhܥlt9:ޱШGwc[Sƺ{p9ޣ]MPZǵ sEBֆI+) ڔwg̭7=02a-1U,USWbȻCϯY9|ƸQnvhgqb}Q(䔖jYa o?,ZW7>;Fӣrvjvoy{ѩyX4%}3y9,u6dh67ohF|# zD&9JqWm -=C,zPWC#/00<ԿHCzdullhql$uH0-C쨑㠐)xlo)t[؍c'WS99 d]KQc2|fQNP7fMMP}شAIR9;S3ˡT!j' Q Fd$k1yH; аwR^RgL-#.}{+єpQlՒc hu:N%\GڅqzZ-YP, IDAT,rKe^7%  ? SLjό*%c&pv:-)5LEs7M͝K/,SI7]y˳lΔO)O#-{δVB!9:S (Vkr&:z5=d+IvQ9ӳKG^xfr#.ޞ( .]3&2ܹYҝ;J4(d: (d~/2ќS'4ޞ6gYF?wTR~o[ j՛i| jS,+} l:Vعx~\kvwspUؼ .]Tkqʙ13ou:4ɥ,iyrf ;-])&\uu\d4W;= Fs'ZԐݙ6fslKqk.:/F}¹;VmޣdlT~"FD ?XTOԍ|h 9Lƴg񨜝gPPXL&Ýܼߎ*;=;lsu@췲s>~^RJ&cGV&:aG:˒[XARץ/zs.et~5QAR ڣ#/^AfNUmLtԁyOM@e`0ɑOӞ?3* ˚ܣQjM Vo|I&wѳR/ct 1}&FV=h HfA{jͪSP=Ɇݿ֔["u(MO_;7 '_:Aը5VoS|Lc ;j֤,BHx|0rtD ۋ_}}twk2FZ)UVCÑ(VkXu'Ϝd&Ҿmw J́пUQAGO]dLb)Q??>vc[Ό#Su3ogsGHLZR+7RIrī)zɚ3m:DCr_0a͕6DE6~>>4ԍO)6&nQj5Yza)##Gm6(1VF^'URnHMGIStwָoz)U_^5?F;I*OW.*.*;#.&Z쬑jkJP5޿OzVv|;?}㺈H# GEsYvJS9;'.**΍#SHM : 1wT̥KzWxZA(*VSPT&3'{yT5/sJU!'-fRJxfŅ #a{t< Rc/@nLzN= GGq>C@ุT<+Y$!㭾o˪ۦ֪yߑyݸnUo5\#1 W˱I$*Fй]h0C!˅$\FT@ p m1tiϟgek;ָ?0-;1_,mAN}3ͽPk4k4RqN;Lq1ߠC`ѴprR2*z@pPP*EΔ# V##;;ʦظ5& >1Orn5Kq3f4 !pXc˚Յ:<%oH`Q.T /5{xUiZ^* 6/@YUko|63jjo??v$e'I#4!pXlah06S:)\ɿFFe޴B&2KZW\&s#eJ_&KM+cr)Ѩm9waҫ%=[W_\]1R:s+>.>l]"t ʙgxkQ >i=V[ XЩ£!hCCస;h-G ֣щҔ20Npv$Y|f<vPt󑟳cҘ]>׷.CoГcʩSƂ:%pX<%n3ƱS <\>Ln? u%8)>IRX4a3;ͬ՘YEY lBޥYMYFL`L]TnRhJJIKLddZSBQު SP'BUՅ@?_B,@d/p(Dy[ӹxnsqQKVwYZ+ԕ] 7gŒL?3 |Y-Wٟ[_AtQyϛL%{SHLƕ7juVI hxye%h0FcDSRݬlned{b5JJKZ8w2#ʸa]XJ#]^y5"r9.w|ތؗ~]cΨ#vɎ5 l?x=GOPZ.*g:.QvNah4 Yi;'.XV^<ݖ=hѦQ~HiHWKWėnM)JPddriP,LU{toz@!}y7~vC‡xb9j YGb5I&5cGlٻg6N:cEZAdcM\njr*xzԓ ݎ F 5._/]eŐ C{& D;jg;h)$$fuBtcxj `̥u;CSR" 9ǎd@*.ϢS7Un˧,ͩZ;}bl5;N؎Z?.;,ϗDX%]ZǔUSr"_Υ1 9z;әM61Q&043qF4%y?s'MƵ(z_0]v3tqxg˓t؞{hyPPM8d!ڻSG QΡCLeƦo"=NzI;¬u%?UN6g:Qa쬙}hE[sX6y3:ͨx`֝ҹ<W*5ź&7(I.KfǛ?n.v֮R6&&߸m26f53vum{O~-dhG:3ȧŃ܃iOqT<((Ͽ*߿xٰ32?H%Gdj:l2Y~жlnL&cj9t9{' k|dqD4v]"tʑPkJj/T*4䪵*|)sH$z5d3Hbʅye#ފvpmdx`lN+ 3fP?ZWnaQS`[ $]Sgӯ[ëbmTJt\Wً6GqKÕX Q v8ɌߵZSBR!+>p Z]Q'ag?%MNJ% FyF]BfUw>̑ 'Րc5i"';q-zlW{~~윽#$YzA:Hڑ:m΄v896ԗK0b jV;)hcQK`[ʈD*,`JXA=IspFT9а#z'M {tێ9,o[z+,{؁ IWoznU{<=@v+@.1ww;kxhԵmuPc͒k!ҫ&`Saڢ+~MIVTZĄX!y}v/M3 3|,~vqg/^p^ށ IDATDEY5s MޱR(*&fSFvStvQRD}{48ASKܶ ex8*ue2 }$%oJv.kՏ3tI7/7nXye$?Ѿ4%h0?"Anq\lg7аOs庩V\LUlI% 9EI痮^QDv嫔\Ttlcgk)RMjg'':FEJg,躀eI MUƞ_8N,8Tw))5z~ 'hPЩ}Dt|-5iCN\jJV'Q*o[SZȠYRU%)XϴX?m=JWIyyV/Iéu$3`j(յ4Up0NNU\-8ż_fN. `1# ;qiA`fY ,QMo:}&~ pBTy3#Ñ=tcgöYRyIO~sCgҕo~[a(U9*-=C:mQ3LTojsSt $ɲ|,oʽ'xLdԛU9< bm47#uHsW pws%_eVBfѰjux[qu jʐ=4Z="G"aǂ:jZSd2_~݂Ë*x "g*]Y8n!Z-/QXZBO\KՑ_hήukp<+-5b-)ʪ< ϵgA̵ sC#};jҸ0YnUc 0zT&;%YIZ71?E$z[10ûiaqvan><59H*M/C1d^DIqr3sZJW;GE]p'*s~7pw޶l K}ﺅs ]<\c}[t$glϜe=Bz0uT2zB^*>EДX${U~aq景(ʻF5gGڀUγ%Ez.7t16$ziKJIFvrnUbo0zmxM) I#Ck[ ūݧn@P~7"B, (b WPNfVw(U>9t"vwLIb-/nPP;ȰI/oglOgO?3: /mS(ձ[_ G_TVQ`{R(3)< Uϵͳ%E(7E< bmhah ah؁")ý+?dW-2uMuȾ:W Co=+i:i ݖ kVG+Tt ߓkyeNOIiiD7soƞ9{d82.p0 CaǬ6tqѿURc B 6D k0JwU2V9BsmYsmCD2ͣj04l퀦4:U 1*|]z?'--DaJ1*=qkT]G_\<:Vsd2W"Yа(^F}gbsKGrOm09]prI:udJәKAsmu#چBnzGhvSAv U(LƋfJsSH-/%fK fW?H%~ NX̀/{aܼ=3e .!MXMs?4`L2 ?'eOqk5fQ }b>C^Őn3z_^pvr>^-{IwN2tPrsnh2lIVJ5tP mlcay*k1 #BΛR!vުžmg))&6kePhם!ae^L;Cr=_5ur͘ +{N97QҬ#CZ߿VW;o>.>욽q+Ʊ>=%=g7.)jϓ~fXaTS.|j%C‡قy|ۉKϳ՛gq,V 84p83|V>!4B<=4}[dg0zhhRee1b>>\y @ ahGH7ghphUU7lJc1rHKmPiI2^;aWQlA AƑ<`42ƚy5h!Cx5Mw'w€֦Dڣ֋)+v}5V$`6}@ j04U$%><^ ASɝM73$;vQKGQPRPŝa~}v/zu?W 5C={OqZ}Ƌ0x5APV!znxp :H!J0 ֢$s}ϬyzF)WѰhG}7ma7>ns*s :V^~g0}\]]XǏm۶Zg͚ŗ_~)OHHCk׎zDUTTD~~>YYY>N:`͛\zHII %%gggZl)Abbb?t{Tq@m6ʕ2\T*<2WwߡP(7n޽#FWX 884hz{JEz{:>+Vp!^F9 884:G.C:Ux%9G6ZEƎn욽00IÎ?ԫ*}dQLbɅ%]Է^S T*>szO?T뱊qqq)'_r%on:w׮]#8877ܨr DFFrIV\ EEEֳCDt| 5RnjcU\Yw*2xe2 @.V8 8b84 'Ev|ul/,㾮yp1 {55O== ҫ%f(=֫o}]sv$=KymgЧOs]Fqqyyw#GJ+E.m۶\zZMVV-[uܾ}FCJJJ9CCVKJuF.7Z}VһIpͯ5:oNNӦauP ^ CcQ@.xcb%'S{X%47j|Wbh$kVE:9HHvމ${o{|zzcp`N?eQ78KGwl?sqAϟOqq1G]ݐ޽[N憯cn>1SWO*$VZҥK;wTYgϞ=̝;KV8FN_st:ҏVk3srrh4hBɉpvm"ݻec8dRA$P~')É*`LJ8jӾ/UwܸqEW9VC1xCݶc2b;c䀾lSWOJ^? p+u,,q]6'38Qorxy`J_toѝ'),-ĀM /b`끼E))|ݥu$$a.E̛7o?iӦq1/^,]DRR̚5ݻw_%nҥƉ?~a2VFEEur 0U L⋟WRb@NrIO,ANmktm o,{ŋ%wĉ 6 FCY|9˗/c:uĥK,r;dQf<<6l$[%hXCCX3GIrvf}R5wޏp[iܭO1[M?^3]TX>}Xj&HAiќ>}U9]S"큱X|f<=>*ϩ(i믿믿ʀV ƞ={7nn3xp&L`„ :ߖv̘1:&;ؼQoP ̝M''%ɾǸv`يG ޱcǢhS1w hH1VyG=/"YuWoZh!VhRhÍe#A[2`0`(d W*gg}pv]EAcT(O\u31zhz9UU<j|<|Hlw}3=kɯ`Y_VƻErMoDɵR__ϕ+Wr  cBr4 vZșZ F2LX,1jAfRRʦtS9r|}}utˍ x΃x85hK/s86P^TN((]1-&6,@$::Ç"'gͤ5#?W;VJFOb40C.W_{>++hgr{99YV:*iy=**8 𹮡ˣT-9Z &-0]ʿ7yH)nB !4NMdk***HMMٳTWWEK$Bk(//SNR1bcƌ!&&憼! 7xiKT+ؐs|~ۀ8>R])G r;7=_v7::ǏJUUՀsyj5#F >>ܹs'UFvt5xxxG\\܀^@p!pj£qDffut\\\Cr,FRSSIMMϏ3ggYU pd2;H.LflgC"H~4%R@Q}3ޟ{cn3Ç9z5'd24 jZߔJ% Ll9łhI*RSS6lfuP]]޽{IOO}"y4 s9B@@ӧOg̘1b8=&`1HU U{uuuڵnL&www4 ZVJBTy멩rnk4lUYYɖ-[8pcȐ!}sA"wa09?mپwy Gy4{I-IY/"x O>ͮ]:ms[jN?c٨l62 BVl6c٤jÖqceffrƏ~^ uuu߿ӧOb5JB@.K"`0`2ZT@vFyy99[oeȑ}pA 7K6rlݺӛoM544Ixx8ݮ~!$&&P^^αcHII!// )r cƴ0 P\\ @||^D^mܼfZB'9̶bΝD\.',,LgL&?~G2n80a999;v .Cpp0455!˙;9sp)9qмz%NG~~>eeeh4Oɓquu$222億FEEfL;zz:Ç4301d",lVFs#wncs`LN>v,boE7^pqmZѣGۅ丸0x`"""(..ܹs>}ÇȊ+ثk!4N3N=zM/5jT.p9>S|}}3g L2,r9kFUU̟?ǣP(8wG777bbbX,R?>>>L8g݈#Q #Y͹|?|/W7?]ѯKH+M$'''-2~[* jdffJXMѣ=z4Æ C.SVVFSS~~~=zՊVϏvEVVjPH`` :ɆÇT*),,=y$zkBEwa7VT=OO@MM l޼___IHH`ڴi;w bcct8pooo233[&Ngɍ4ذisQՄ*]` jjjf„ deeQRR"ٖCffz&8!4NGS6SNm2"JJJxڪ*[vidB`@PP__Onn.:'OЀcǎۻSP( ۦRĉRGUU999DGG_!'HyyyM23{ضrC[j6.!@ll,PQQo'xyy1b\Q(KMM=h ;j +WPVV&r9cƌ[fk2_tIdJQQPXXHEEVXDcfg7"@VNxx$r9Çl6SQQ!{!4N.GSFVӲv5BT>աj)/// Ο?aaa"JfT* -m !??_ږ*im *.}KɎo:.Lk0ޏ(#rsjW^[v՜\]]|ȶl]` <<<$/fՊhD/Uk;ǵ2i4|}};s2@rrrm=QhhUjR^^NYYVJokW+WHYVȠAP՘L&JJJ&l 2 !|X8eȐ!TVVJ… =n` BCXmVfhowwޠ***(((FZkihJvw+**Vڣh' ɏW%TŴܹ \/+W$""%?r%[~iMlY:GĤ'l5.{fNj)o9ZpiMc:~^kg@=pչ***puuo&;;, ߟjJf3vZ-~UoGymūh_ƶ+%˻YKZ0JT999XVѠrGSR)kMV(l6ɋi(ZFRI҇D !4NKAZx4T*&:N'%Gա 촤j*466ŋ?˗/Gll,w}K! MMM޽K.aZh4 G7rVKr\:+H%׭[ǺuHKKl6Owyަ& 4nhh^`̙deeSOIJgL8Yf1w\+~wiPyn0Χ0g:غg 9]|,dzL4s`Lzn ߿mQYd qqq( bccٺu+c00?ӓZhjj?B>|x*(F<Z,<___/UsfZ#993gH_Kco? !ڌ9VPM@@]AjN"DYBCഴM$wܹs?R.\ ;;J@a63f a6INNt:BBB4hW\! VÇaȑtM,\#Gp)iNVw{3|]p1cđ#GϨWN>V%66}w :NجZʮc{9r$Æ cΝͫ~r9zz)ܢf^^+Vy=340[=x9>>Ç9<555ʕ+deeqyꃶd"--C7m/( ɓ"~ZJHll,K.%** JZZW\A&Cpp0L:QK.j_5&%f"##;l6.\CBCഴM%GqF+52(Jd2qQ<^'88QFh0LbZywh4{nG||<ӧOgƌ?~cǎQXXH`` :ZV.]dWݰ|Iy~mqRSS߭[=u8wW`?~<fGTT`2|2Ç˨T*)FPCFFӧX+ʂΉ睅p(ח~0n.r_?o`X**݌9|CDpp0qqqvqEEERk2(((ѣ>|q1m48s ɓ\|Cܞ`4%!1jԨ ={vZdffrP(ŋDEEqEhŋ >/k@!`ҤIvP*1|p|||ȼIzQsůgWSȩS8r'Nߟŋ4 m蔛!rG[Vrss)(( 88"RSS*gϞ~\:$_2hPp N>ͰaØ6mfbTTTP__BskzGyG2e ~Fc 7of˖-xڴi,]hn"ݘ ŋ;w.YYYZ ȗ_~ 4?شh4nWPqͽ8/zse4yqi47'w= 9hOh?'d2m: ܰX,_CDuu%$$iB nnn1hٿ?`ĉTUUa6qqqnC"fqח .l2iDee%7oTjiYX, <oooU .zΞ=ɓ'1bL` BC8CW7+R١lLDGדťK $$$OOnK4 Ү_LFvFg6wԩS:tdkՕFm-bĉҸW'--N8 :$(//'<<Baa!˗/s]wB`Mį'*-cưpB[#&+E/LלԼ.U;s=C`4;e-}*˩CRIeJ ە ZMllvid2r!wwNKVKȦ\.W_}cRXXHppxP(:5IU*SZZJnn.uiZl6z֞6| 3!iq'lFCpp0999n6C-zOOOqssCt-Urtm ݮ#zjy{1ӻvaaa5ruukՖ4ɣhdDEE!P*o> EҒ^_/[ʔM؀fe81Fpya{?OfEsbZY%c si?'0f3AZP*xzzFAѠjQ=0 455I/NG]]]jh'4z3϶7,, mC nnnҫE0,NBjI Mk\EOAiit@|PPWa#BC64= җtNPPyyyu ^oWvFZFPP(P*pX,XV, fDccUWۛ(7ɧ~ʊ+شiw&99$xfϞͪU:=N\\ϟgƌ6bwn f-acwevM|2˗/ nFfM,d)V:CK6/ģ'mQQ~0>^^"߬x[hOgZ82$l6SUU!)Z 2qwL`P46lXl~GTqd2Rbl:ijjpORVBjnCgvz=lV p]>FKz$(!pZ ?ca%F'2m0aB'\BhbqGUݙ;w6̹sHKKC;ȲX, 8qDY̚5A `vR])~S?h!=}cjTiL{oYYj\.箻PQQArr2yyyEee%gΜl?~<ƍ1̙cS"Μ9c/چOu'4l6EEE8q9r$111a@pM!pJ[ZGϤIHHXǏaH!99ͱ<=={\*R >#{ozftƇy1ja:)%)jKHH{ob!''cǎM]]݀ 8+W8~8iiiRG5Cߝɲe:$r 2225dv`}Baa!Ǐ'++<ƒ%KFZUNI[F{S$͛7V˞={,-S^^F___{7z(//;ew?3"B 77^5mO3>=2370-rڀ?!.2{zz_󮕶˺ذ!Cj2***ZkРA\r@+} =A S֣@K>}:x{{w}SSt#Rs=///4 jץ [jzuKT2efΜT5FPR0Z4XJNSiaަy|qv&$$G߿#GtX9)LVEVKT*2Rfшh`0t:xw 4nWUbȐ!Y[1:.]Zoooj,t-zeUJ]]]v  z%\+Ch;Zf…߿S>4ȵܔ FJrF`0`2zaWP0zhfΜ)uaxvws ~ ΆB/7Y"6߽;9`(Jf͚ɓ9x O2)I]Wd2J%6=:u*C~^C=ę3gػwUb 6jymxhj+zK/O0d޽|B`ԟ?5C8زde_ݿ† =Çw}Ȳϟ̙39s оf;S3f cǎd2&L1c8{,XV;4)!eBHVԩPT=Sw BhghfV^?OB`ر;Ο?Ovv6yyyR2 XFEDD_~%$&&gF.i&ƾ5R])6l))Qf NV%5jkx{]ȖlAl۽@h4Oxzz;zaGY"h6K5E-UUUdݺuN+Zp>-yx~>8)'g;Т޳`vܻw}}sQ].4-[g}FLwCa38y5 GM=3Z;ԎfoV~*n;w.v7(/|с;reZ{f ~Sҳ!,Yݻw̨b.T^/A ֣0;Ͼ}Xֹbr%l&z?MfYaaaH3CPw?|Go8QU;yyӦMo#G*/A Vh rG`0ww?0 ΟgY+?"|J  OúShѵ1%|JvZBh4gHC 6<1ӟ;v, 8oL bIWAW<Is.U_rEƔ)w*iڽk?9ЪVhf$%%Уgn ڛ_ #*[z=p֭oۀ7d2_h|ڬ<>}:NM _BPa :';05b*;W^<jFf<<>8:y2_N4ui7;ТkgjTvl<9|]|BބO؅ < bE ـ{4֯_dG^e#I׳9}a!45M5IHIc:&!4ZYh{yT!4NE۰pp@PZZڵkYn2lyǛK575΁V ΍Jo#、<`[U;l<% !4ӧOg=V,}RN2M W8Z=6SOc1r=o_%˚y.9tz999N p^}co|7nGio%aƚk(Ex4MCnr&=I4BCT\j2oȀw׮]:u_̍3S 7ڏݻw;*4UXjLVv#Vkzj,\={PWW~Z?nYM&Bh9^'୷B 9gȔ)8= Ώ֏4N+Kh37f.,DZ7YM,l;w FZ=`sv<<<1cFO-Pzͅonƒ!p2z4*tw̚5\ɦR{`qakl@<4![rjna)h1rקwq9ht7S-7sT%BhJx Bx\EO4.zy.aAd29zv>`UcV)%Mz~p@ZV+rxh… ٻwo§gPi,§7"b8 e{\i7VիW/Яh\h±}:&0. _#{-LZ]j}+?o"sݙ9sfO /i(oR"coVYjUQ>u?LG%85koYKw4~f37tbx O%-ח3oLбF N+s_A!S 0od]X*m;Vp|VsGMԊ+'NU_f }B_aЩz֬Yúugn2~oV ;@`ؠ<8uo^@L9Mڶv>N'F,\(|jV,݃iA")h۟{9ϟOBBBY300:e.`OIIcyz*l[1Acmnb޵}r|!4Օ[oodB~ 7ԟ }m~FU:q_|/bFwrIyh@_Iݗw/ݶ{%+Rc = ZRz]ɢe@Ч!p Nޏ 7ͬ^W_}>?yd[X2|ӌOE )}m aa|{hZşB{,Xvx6I!4NAC_}UYlYFaƌ?~7paZ&8/jα/wZ ŗ˿D`Y;pB{z>|rXl2M 38dCc|p +W+oqo4Z-'N$D_ZhWlYA Ke-@Ɂ=M瓻?*m5;‘=Zçٓ !4\9 ̳>KTTT;ϙ7ovblXb B&\缌uyM[,nMW5VT5VXBh pСO ΄ SiA )n #-ώ'ǓO>gǼiM|= nϹ}0e֝$]RY,8&`iC}i5uϪb%,^3q:5g og2j\q^_>FՕYf__u+G~*kcoӢ/)n(@k#u~b!/+맞zǾ=z4555o1>h<3n¡.p" :uF5x 4f YLs^wJWm?1 Z?$䳯QW+s93z9*Zn>Uv۞]?wגM\p޹2IurDyP:Gz3lV+$9) cSrT]Wyx0zdF729sz Ƒje xP(v? HgBK,?ї" 8qq3aߎ5se}\&û>d38^xSgP~5W|J V72vW\.I_>>~5n?/ѩ;Xz5RUĬO8֯fE.#ɐ\&CF/ضp/~\ RPl6gKJq8fǾ(M-L&àsZsJS8}* }W_nތ0!nο?W!8fR޼FWk寇'nLJ-_.isI+hDtdR3Fk7<@r=}m< >`w8 z"ρsPyMbڥwԩS?\mTM"Ck{ 'YU@E AHȩ^)Cy^M=+KoYinngI) $QP˅_a:ٞg_^kbu!$֥'GY1|BV\l6tZ-qdf3b@&^4]Cm" (cb";ၳ3 '&tKc^⾯aM-|`YK|::=tۙ'<,:zrG[_/_7qD>(u̹ 岋-sh U9;a[qh*?FvdH&cW؇9ߣG}7kn'E6S|p;xeh Q;a }tBM#8BCR~:zJh i7/g ,(˲꘵Z-=}\1s:Yw =GY'Qc$2.Vۘtlݹ% )Iݲ,6ㇶc`0׻1%} Ͼ:ϼ0Ԏ]Hl"w IDAT]3juP~ɇ1!6@d6΢YdƺkӄYx}z"& YN&63dzԦڐ:7}ziXvFGقVDߙ;/6ϠCm.5ճBQuQ=+U I0: X?qˋO Ju,˘S2|1.CoH &BhBFǷCSRR\f {!Vp}_%kӦ_Sv I_ShķoOLK W ͤ릲=+cLZa ,1]ۦӾszBdTJ ݑeΖ_psU2㟏՞M  ʌgwyd#ěC\s$'. &sdEfڄF' $| +^h4^giKjha/cO{w-<Яz{Ed(/AF(P^άj]2} s$MM&CdpA/n"b^3ܽFZ;VvvNG޽U{*񐗝͎#4H欏ؾkZk[dYSo7NU94`2dsshZnPM h2b@?<N5AA۟TnX,tZ}ldK|g<1=Kle;WjW 5Յ?}83xJϼ5L͏63'k^DΡCb-UBp5:z|feqtZex<ЦJ/cCCm:GLlL +~/ &dI7ۯ:6-% >grzt_ß\)\3e!v 2|u: -=ųBf/A" dO<˖.2jڮV'Jj5'07+eoؼӅ>"2$¡10 ;"y!@ F};% .KKFИs'ӳ`>ېkjrDr/ws5̓់-nvQHhȨoȱ֮ C Ӧo#.!vA0Ӊ+_nqv=% .r-[fvQZp"cCdHF#3o7! |BC%rf9\lر#dvU?*ȡ2!@,srKrevK<4Z_?ǁ=-ZYDF}뼆 ILbG־L! R?ǛɈhpjz@մ'N2;KV|]]q\!@,z'f\8uϫ.Z;Rۡ}_m&gsMi%$"ʈG=EOXrD""""46-BhB?GvIrjj'7cUu"#mc ;RZZ8-*ysϓH^݈` CXKYIPhiRӱ;Ő>4p@hߩ{HDFm#'Aå0xL! F\b"YDF]ιy͚5 I c[笱i:e+.?^Jό@\z6#ڏP_ZRKtlL^n Zŧ8|`/_b"t؎K/o}H|i+2*-HMM墋.jq !4-ƒdeۑ9:w>F~@PBj 9xN'\r -jrK1*sl'tL?Cpݏܾm'@}k$d]cӊuEh+{36I:f  DN ZEFM۵ 1˒DZ5LħYI5tlj,셅n1'!ˁ#IVZ ŸzXLFIv )O.ݸ{>|' DyKص{?:KbJur^xw>/9>оWgYt=7~?%%͓JdƴU+.WK]0]?o?/URA1#(ٱY%8BCТ.>ޓ{{bQ~ǚVRS\?KڅJ#BdIĮ]x9U[}Fg zl?.Yٖu%'/: -G;,~nfN h)\:u_mi_~=(##}e[a>c H<<&K4oi,{ؿVQG*>f26I6}?#}͗Aw:gIABCТ,۷LlIrVqpHMֽZٳG1KJKC7]dT26mjV}=),AeEA+ʻ,˜f۾EdT?eȩsl*RsPE$IDh`b3!hQ[~PI$j wWspv:I;jIJK#wWª(ѻ/+P;'aMb"Qqqr27µv\ӯXȚUw0())#T0B *6|àtwC[5PpנϫWnlu-`Sn^WU4ii_\2Gn;Ōjv4rmt{/o?bUK\lxhOL cZ_$,TMԨ{塩eXm94'1FCTNk,CS2:|ڜ-AQtMIb H3 s tH<2E)޳m7Id*2wO9x0` !4͎cEq鐉|hjp '!KuS{` 7ǃѠ :![~~E͛Ymz^˰aÚݖc/8.ϾZDRZ_ZsIHdf4M(1 F }2zӫW/*nA0j\*l~w(ԛhm'cƈF~bƽ5ύ2$4ggܸqvFe$Ibңo߯&H@ˉM3Y[]a5 %H̱R)))L8>}4gBh9R|Dݞ9̸nt0[ Ȑ$_}Py߱^謫=9=.C aĉ[o,ǎc?׶3U)7sx5373Bdns*4vfrVKU=$10ful )ķ;ʩw o>~|Dddd-s<>C,dS>Md*F.%2q&Ȉ+om1{ D)A3/k9=&nq#oMb"v {uBrVӖ%z=Ɣ)SZTd@E5YLwǪ =ӓL~g2nQ `X0ƝJCX1?ʠiW߇Caq>ۨseY𺨖Z1$l6Kjv@ Amַ牙Fw59ʣ IL 9 1b֪6CU\gHTǶ8>pp+& 7O55Z-]te^o `gz[~眍 Ȩ똦 Ihcǎ BCЬesfűGFF2x@lzQӮ077=@OO ӻ "ΛyYmAKpCH#]( 9]ٸgv~w-ocgEFG#D$ID5&:}G AͮoQ2D1*Ǽ((Hn]"{u9ڎ9rHi)4zNш֭E+P+QѐR hnR#S9?|u{Ύ9!,M,_6lƯN3¡:QLF= 9s8Vz,d{^~^w1f֯__eWߠR847:Z--jQۙbFȨA~Kb+/\Fo>C0d6K^VHX*c!.QUnʥ^ʇ~UW\$?¡!;-!%~~E'I2]*!I)󕵨gBh{bwZeS|>?qwsI*:dU#QtuNz}KFz{txξW^y-*>^/>NޙDZ#=i63@2l}y+gOT ed;aig."fΜرc2'4Q{<^~A;$ӷwO.XbEqo\d`rvzvr@J!h6cK;c+`NBBZZOnlxK`++####Uy9qt׻rdբ9Z9%Ak;?_oa_fF+buϯGE@~a5W \^WHm+4w\<FT>|1cOrWx_phNOC{Y1.G@ G<|!ѿ{GF$I!H荫8wjg|lqƳb"%|/ܵ  aWvRc^|E֯_F`5XZeN'YJMl46X84f4dѣ_ի:ujqӻgW3L(E9KNt*$Cdgq";%>*UK2zDFWzX|MBb;1YMV"L&7^2 Op${/{vĠӒ1̴t mydž KX0}7GGW|f˻_=`sXw6#'N`̘1}r-ƍcS/$61D%6}>lE'9vGp; ّ#'Y wfKdCqqK/%"*|LLLD ݎ㡬c9ܟcLj$'pW+駟fՌ1l:a|%hCIuR9zzb玡ԧä1` qL&!! aK42I‹NGK\)pr!݆MkcE|X h BhoU':je-tu]nj3Сz|LL =Gd$Y,UW \d[TDNa!^99 FCؙ^#j%l&`9^/nRb?N֡C?yL?asm IDAT&i1ϱ@X"x.JpDZ-thːr/7]Kn42J;Ysn;R):߳wr{p8=|^|>0uQGȨ݈9珉D"YxBF?IYߨr@ZI4'rVY9 (** /dw}i x0h4;v{9$І>ɤ$ۆ>u<?{A#.s5vf8l۰5+.!lGw nۃCHhd NɠsHӞt.rղ˲wy,YRf%11=oǖG,6L8HFK@@/%/V=Yl^1},%hvĚc:6hLU3fKAYtO(!h>7>}puUESZVw+V`50 54vSp`wUzƌykB:EƉ"VmJ^QQ2(s5yYguZ5~ǎns~ir</Ybnda &&r9τ kzr6=ilARR\Dމٽc#{9]Ғ鑑IFFg:tcˣrn7,c41ǓJ׮]޽:U ߸Sxoyuܫ8<s-gX&'zM*++c„ 1Gyc_~[Z,&#f,ӹsgСC|3n|z!]JN]INAo@dNsweaX %ayvR1 饬 ǃ`@bZcǎt֍j~uOn: .]Ol2~gT߿? "55 t8b1Le$C)h+װDnB4\~XV;FAAeeev1z"""HLLcǎoY֭\x h9^x7_qJH>|W$7/o^p*ub:bҥzl޼{JҥKp5 :ubРA 0@^^o&b:%%Ԧ &YIV^Bv=ʱ?}zz:ǏfqqUǬh0 p 0Ν;LV ٳcǎx 66Dڷo_E |>-Z[>pNy%^.VsQIIj0qDFlBkɽx1>y/tN$/Dڧ+w>1LmB|Ktq$8OP1̑#ВK>Et:KHLLoɓ'9x yyy|>V+$''YxW_}͇c,*CH%XD[HIIa䄑]8Vre+se5k| ͫf|\.QQQšj<~֭|7zMZ \~!C0qDVk/qGgW|ưa,ܳslٵ7>xxC\dyYM8q)S'*2222vsqj,Wُd޼yZv@zxOy愄.FYgZRRʕ+;j A$I]4s?Ν;9p*bbbh׮ `͞is,gO޵y0^ 9FV<ěl2IJd ?o_xx.L9ShZڶmK۶muN.cرlݺ{hm۶dffҥKzWwy'ׯgС.{kl63n8ƍHCp: /K,q9x>KĹ|3;>2ڄ6@.v*4^b|vm2o޼Zj 1Gd',<e[,Z_t:ߘ>}:sm9B$It҅.]4uf?}z %K| yy˸n>9SNi2ETJ%,sj%+_oINN=Xd21tP&O_p3qD233{g4yyħ'$Dd(I& l)t2uԐK  elj-rݿڵ*u/ۿk?֤sr-Ol۶ Kϱ,k9d \v}E&wO>5to$//e˖(g eU7W?1`y#}K9m|wf#<ς 0v`qe++$$3 w}L>= + ܱ;]Fu8ϨNX8XxPjBCΟʚCkhGw;oaD t0wͬ^]x_bc7ٹ|x}!h},ݷЈ^o) w,Ysx5{M 9m<&oԩ\`/l1"Ǧnp:SżPjBCYģ?ݡ%%%ko6% \r fUvpsK[k["X x7B]L  22V\l|+²eHLLlk4qSU8d_xg26m=\05z>]-bc3kM.TP,#?U4klS:M/s=ǣ>Z%宠y99Kc^Hk\y>}Qš.@ ~GVOPҥK;Y`u4l2$$vܝsqzըfnv1Q =v|> ?ABCdyCHC$^-'67=< .Ygf{^w-ky|"\Ac2GT Ei4cݺuL_(v s۱9l^JյHJTTz+3fVQs ֢_'$RW)6"4G¦{\%TO?ͫJ~~~rƓ6?e۷/EMJd SMQi@E|Sؽ{7&LwaԨQM,WrK=O*VO B)×={ϧIT0癡.!V4WAv̼(|QǎyyYx< :|'L&t:JIIGv" ygy׷pz@Er!.B^|E&LPZubG1^gJ](ly{uXnf^y^| 6뮻X|9C[K%)۷zj^/:B׮]ڵ+&)D%!4Myyyps9/-{ NFh4Ҷm[LBBBK;x'5kV~zYv-7nt T%%%^/V`l63tPv[i5r233:th@ng…>|g},ԋG>6Ӧ.NWYx1۷. 3jjîSN 0fᾳל_lgx}^"E>Ν;)))!66|Y$z=z~ áv0h 2I0{l.2MFm۶5c:4/))aj 7@|||K܂ !78q:>}ɬgif g^@n~[XXȜ9s(--Eד5\ӪOff&;w{Xz57nt R- ݺu fݺu̚5ӧӅY~}E]pl2|>_~ᝋa'ԙu r!\.{fС_{v^Aѿ`JK.9;`sOu E\l6߿,Ӯ];ڵkn~`ܹt:.RӃz_3˗/f1|pϟ/ `X۷/F֬YäIԂ!4RTT7|j۷J>ΓcGxhTrlٲ*μ5aXxx/mܸQNܹsPaÆa0#99<]پ};+W:~PaC"ˏCQ^^.!gt l;9܁x<ݛ:NJJ")T<#++m۶qM7-ͻ,ٷe1_h"kh4ҽ{wFaҥqMӚ#GpB cHĨQ6m~)_})I$IBeLw 8BCPr,XCׯ]tx+^!BQyM&& FCll,EEEZhq̜98/_W}F.bd2!2^^Oyy1/))aܹ;vx IZX,^/^Wݧjq:BhN fM[T ._~ A޽9쳛|"""eҥM>_%/>8_r~QeM~^٬a4ħ#|`2۷oy,c1eCϞ=<$IDEEh4}R aXT) dF 5N'O΅^H@c$HUDj{׍!!꫔ӽ{YVL&$z:ܰ}ı#׏=?g&N 8wrxboN~ KHHࡇ'?TG+aٌT_;Ax!Ee233̤c+- VZrY⥗^+`& eXb_$), X,a#Yeχ=7z Q#4r>SN8A&._o CcyO9yD`xA9U!7vj$ Y=>}:YTG0j jF ;wsEѨrˑRՆudzQ8ޣ"0fszx±Svɲ`PNg!2V-/z,˸&5a.[2vX4M:2ٙ8V~`0j*cڴir-,X7-BCr)EǢ8mQcІ2ߠ8H8Vl6;qbS{kp]e|>Z)sRy)m-yKѮ|nhKAfѹsg5DRףhTvq:u>C}΄&G_Icid,t:l60CAbAhB<\wus05VOΪα(Ǻn5Կ.!+>Ʉᠼ\MPdB׫vQ?m6埂VVe:ufkp.VѨ^lVŇ"<6áHQQPCGeY4!lW׋,!JrJSm+I)))̛7NMC ^Wvz  :7s֎rZV_IF;vnoµq\]ŭ4׋hT'h6G[ Z3hZ9~8pdFot:F#VVVw?XzP| PNY@y~ITeBXTk*M݂ցky<*1ʊ N84jr`PhWvZeGS*&kGFsFINƲ*a@PSl6111 3'4؅Oýq<"lçr[!4u:LeC! <Ʋ+')q,r5NZyjۍVƀ㚊R Ŀ2,Ao5áiS1TaФk#!v;6I΀:ZWz"+7\#""Al2gHSx<k(v= $BIWY!4B )((ָsCV C@}UVVpiP?EtѨ$IMkAx[z&S\pUM!*.+`^eBJȲ0,v)))bۦP0ZS ͕;-r֌诒d?W߈&/v [?^ruΆ#G A)%%dʽhgr/z}>{sZ-VU_D9 )N %tHpS'{QxeΆ U,`nuIBC#2Vi{^JJJ$$$1l=Ʊ,X,u;pP%%JYE午P; IDAT\ B3uZ(e&wZKBƱ2 f%9G#%|U\x"ٔLE A@ϼcQz3zDFFV Wis /tgzO 3'+dA,xT2:y^~k !ɬ촫Ai,ST[sd ^FG@N| FEMT* K84mW]Zl)%%?LuSNS,& VRG4j*m(Ԏg8JUq.NSUG].%%%wgrb;Ábbvq:U&EEEq\Jp6tzL&t:P*hJGfxe"'4vN3`c8Pyhd:t6mڐDRRɘ*|8L J65l6tjYUtRTT`hZ % WYY &׆fΜ9DDDDbb"IIIDFFiJx49 v8éX>p6l 99ձ@6&9V\*ݘflҥKRljDGG^ gHsz+$'' 6pIXv-:t`ԨQAn7}GUNd&!M ХwH^DXD(sK"5I( I @ ! 2>cdҧ%?eΙ3s>O|j5 !F+D"JJJ:Y]oXr8o`X󃏏|||4 n/JeIQtuuq}Ν;P___L:"Ba43#z( ׂ#00W^Ņ *___!<<ݹl ̟?ոz*P]]ׯs̱HQT6Z43@]]<==1oD"J%d2j,ާƍ|`ۓsWWWFwwwlX,h4,Paa!Ξ=zYϤ+xF]l6<==! HT*f!  j5Ve08<<ve %s LƌpuumSGRŋٳ'yKס;8Djj*.1~i߿'O6bjz.J%N>0V)/hN1޽{#55h{gM+JgB&s 26mڄgÇr4Z-޽c*۷/n4ZZT*8~8D"6nHAzwӏ2{'C\cY"_ѣdQ^;3;PsQ__e&-iHl޼وE]]]_kx.kjj~̝;g϶FQI'Bw4VX$>}mjNoa߿5p|}}ܹsx"<<<0pVeccT*bbbۥ3bh"5C!H x}'OG``-MH,\8wF={ "!!zՔn݊}͛4hZZD"ѣGpB 8Zuxx!pbaNHw=ܽ{^O4֡@Ç#>> P*X,nu:''ׯ_Ǜoekδi0m4dffܹsH$prrBXXXZ?~K, @uСC1%%%HHHRR JZ '''|tJQQQ0a8TbDFFj`X_b"; +WBVĉHHHJB~.77&8k >LR__3gJL`X WWWD"h4šwx\zHL7ƍB \R bx{{3]]<<Ewq.&&7oބ\.B`fٳ'3Υw\BR<@yy9둟%K 4Y<3g̙3eAff&zm}}=̙>}عĎgϞ5kV+.9 =PA0 tT*"--IT*fQR7R6ހsWT"??eeeAqq1z)۩@ 6]IPAZ ((AAA.JAA!5\.ӽC1Blf"B!X0sLcccT'x, Fyꩧb @7ppp0|>BBB駟!b B:RcB7",, ...8p ~W[CXv-vh]vaɒ%8pڵy)))`-Y(**W_ö9B,۳g"##쌰0Y@XrٱckڠeƱ\.V?\[Xr%lw0vh"… ̾KbΝ̾hbL0^z1ݻwcՍ?X,R)/_nc =,{טh*koRRĺ@,tu7,XbCEXXM8 4E+ŋcM^K7ˑAYhڵ8|0 k֬i5k`׮]HIIJˆ#=wARRۇ '=,'NK.zw5,y=HIIAEEW^y[nD brwa1}ŊX|9Fiղ;qBaa!bbbƞ}Y8q~s bɒ%`XO]|BupCC|M|WV/YG!00SLٳgZ~GcYAAZ-=k׮!99زeM8 47K6j5/_>oEwH؞={0qDiaʔ)bŊS"''%",Yxar;K_~Gs0w\̝;ׯ_18 K]\\\oz Ǐ1}F9fV.ӧOGLL 7z޳>-[ٹrwqqZ+WG ^۶mCVV׿6۷cӦMի:O;w.z쉸8ϭ]tBRu;wp!ڢ/#""t˖,%& °D]uqhf<|ǎVL&8P1yd$&&giyhfԩS裏Ѐ`|x饗YlB:up\\ лwonT2 (((_<*¥ul[۷oĉh_`*6qPh%*JJZ Z L.h㸰{_esΙݟkO'N8u5p:z=۶m6mڄ}O?t 隺vAJ1#D'࣏>bك 6`ǎ/hX@X4ǿ 6uU3+u$jظq#r9ñsNL2EvhA| t.hϠBcIhm6l۶"RFkxsNK8 %x^}U.Wgs'pE8&A=aXˢ3k RiǒBL!7xv*c3WUj5lBO.uzj(аÆT&o晄δ;9ұBLA-Pa={+XBz\G\]z;´ iک$ΠA*c;Cx{3UVٱ$GWfhx{7͉˅Օ6 !TA͎%(аBiEKBqtu؃֭ehBa$ЬSvK,f.D*tt5)w3ssgbʘnG<,`{۱$O᎜Gr;Ȋ*)c=h؁'..ohV `PDE%HyT& 7ƘtXA*lڐ+?2,l܍B7ۙ^{&en!}KkSVP5+(bÂXD7(>eUg|}X ;`@d5P(s$ނq)r󑕛.?@ p>**k^voJ h[w{*kGP16AK=^-J N\_(~q<΋Áχ'탨# ;۸-ǝO@7nM$ci6a'!7dحJ n$42Lj4edgh{\J%jwPIn ''7YXTjƄ=(Jx(>{E%4Hh$Αs_Qq]_ű8Å.nJPըH v (а1VTQq]ʱi._? CR>1)i.Kwz/3`꺙;0 1K9pe7٭,(c2_U[_^4 Rf2Ծ.Jى{Oa.u-6|ڴ %啸boıxzF‚1ypd棇d2\H{()\!1~ aݓ~'=o$6Nǝ@ޡx'j.JR QG`-7jp9,HRxzbؑ8bh㸞fW/ ϋONc_\#;!={W-0Daer9b&!n+ W( ruExh0m.2sj/,E>X4}J>Ӛ$ `a},UtG̬(,0K9 aܙ?Tg' IDATRBAI)nŽ\_bJM].m͙}.@RDe<~hoPp/%(а0ƥ71o$vЍxj#M;}C+,ŽݿhM]=Re"^&ĺ_hGI$̾{63jU`Ӑ=y|孪'_Zf_AI)v9GE%xƷ'Kc/5_][kq=-KfMǴqn"!\IJ 4>9;1[Bv}̘0ϴ9+TL57u]̜8mM1יCz52?Fz}lZK$q;bǖ 6I['pMrr1$Z@ׅPT9Lτ[!4맕Q);? ?AkimYFnSNҀj 5L/'4>ɩFA^m}=͏.6z'b/A&7RPU[ ըp>Z{&\|;6BW??*2IܵߨA>;iZz 2ZY`@RViEU5Q6 \c1qM~IL(*f~蘮JoZSum=Ž\zlۣ_{86aXa MVe, ڠkE͟uJ၏_S瞍:f{S0lu]=~f%s&V.YCk$^7j̇bE!t}xԭ}Nn61xNN'tK~a1*}0@7#s:)cF0[(kI}.77W̟{qm1O5t"F/\aM֭mMIfa.h}°.[rB&3 {V?7|q+3I,5u;DǚĔLɋh}F<ג`NꂑhXѰ'Ǎf.@eUt>[*3 hҔd,f\`Aa4j8VELsoX1=1j@x_8} ;167̜3.|>ϛ yLV f84Hq3E(.@{ `VeeՋí@fk\fO483`n[{0iCW18/4jfcБ$`};>|W-3ωy,zL9 {sC)YI.܀^c$NsO//6& 7nHnj[8Ѵb|a3h+xWS$b;t|`Μ qדwj.;awffoJ`I#au 1 N9!(а_1FlTj6ۺnHj=z4Fm}~}B.>}?_ݳ1E3BDhQ%rh(4jƨ\Y]TL,?g5x!zҴ|NKM("BaIL(U/҆ϐȾF;G}Pu77SǶNwb]KKG~Q>#IK%pJY2cIbkɋ3ϩoX`(fPmD_N4=M/j'w)}}PZQҊJHerf{T\b0CLFɼ& Z8c4nšڛıd$8J1ޖ6R\V/w6 ԦWך6n4zi>[{FfN.Sna~i)!`њ6nTnm5w^b\4:hsBX,se2w~cj´;D[33tp *쳣&__`ߩ 6^WDkLnf_sT'i#' F]RHq8l,1}Yz&+K0_TVoIK'pte$>s?8:cpz[Fh&4H3 ǏnwVŮm2ؕQ FMĭ,n1t7 V8f dX,lbʼG ] ST6p c[xq[)!8͂Lf0 ,Zh3S /&C%+0w&-JefJN}0IK&pIX9,'qI,a֤q}=&,Dd=/5ɉUAp8l^804HezqÆ;y ܹJr*k>?F oV >rjۙqZ28ŲȌmQZQB*C"bQgp1d-ŴkIwJÿհ⪪5{6 2~w~rGqOft@:)3/'ݦWh91m5 JPTVuԯ'"`ѶS'j]|>;]K3dQPO4E8>ѕ!h8NziQf5kG8.cTN 6$8z8'LYӽ\Dl英-\IJ莆 Ŋŋ K]ispZxu<7w&3wR =~kW>~4SQ[f<۟?𜜠PW*G bwU 1֩={Y:$ RO{[y|R, +1}h$$1ӵ^NJ;BYes![1_$-mw:u^yyX[4HeЀn0=g4+э[w0oV}%hb@>xj8}~W׳KgMg+.O4On)$}'/^A^a1FR I =>Y6 G"\Hϲxę:&88ppR|8& ]?}hҊJ\INE<6Q[|^G}]_,H[-&ql}NZ-xޞ: #$ruʅm^ 3~>*kIt +ʂ7<''n5v9ki/Zᰍ+6Z{x3m>/̴l[aHϺ({SFh4;¢Sӡ=_q&?oˆxEV\| ^[ _YsC"6wНoKaٓQbN{SOX1ot{d~Kk;u(Mv1Sq8|&K.Z7lԫ)w33.צEJۢIGOmK"X'ak'pK#W(~uhb#vʚZt7cxFS۠ShٜFv7ZRy<[[֯Ա#l6 .>`I 4 <9wvFC"O˗0jْEͯ<ɣM$A/1V.es2CcݒˁcƇbEW?Ѐ^95=SPn|KfMGߐ f_Æ`x;tF ЪFI{qÆ`Ӛ\|a Ϩ˖8NK:8>ܠf[J Jm6Xd2|^1탗]l/7S 55;; zcܰ!FA@BuDIy%*%\. aC0_dgvC̶iAl6 ~r>f{xj uؑ8r6)w3 J)cF`afz.%gg âSp';/.]h;p:.%Fssϕt&Rf R N\'xtCh?ޭ! W(0ۑ}B+h9{ck/7z|zwsp"2 pӧ_=<ﹹ3ͬfsbbtA[ba ]%a{(1;},yj@@Á)U*|K4Rf0l6fO9Q-ڵ+@8,>r¨[;6^h~|b Qq)_썗-踨V\q6Zuıxzv,UwZ̉ş?d].ᓯǭl;Zn!mriYy|Ϟn3t0GEH/? :jκp2,>Yf%墲r|‚1e I !Wn Zb@LO/@/;/C̣1PZF493!#.8;[ez\B+F \zIʫQ\f~]&1#ߢFk\]1l@$!w" VY]Yv/ I1{Xh0xghq-6b#!4M$ĄC1m( .iƭl>Rs8lxz7== pv 0F\zU Ufb0j,O!C@++,B|R*2QQUm ' Đpʰu"ZpZ26Z'b<F#+wqH(jʑ2RYUb9l6ggx?7| 셰@ . \OKl3!z<}1|`y.i݅Z\w128N]I%(,-Cum29d2{. bOF/1{ B!!B!G!B!( B!XB!@B!bqhB!B, B!BQA!B8 4!B!G!B!( B!XB!@B!bqhB!B, B!BQA!B8 4HQC!FwQ!B( (uȹ9gQS@ /I |W} I!BH *ɏٟgA!+G? wگf!11Ű___ػB! 6Rs6@!iyj )q[J9tB!Qa uHZlE5$p'I8  (NW*ʮoo !B: w+U2݆V$\i!Xz4rTpd%p<4ƭۛ|c̙pwwGDDvØ1c{ryr_=¢EwƬY>={6=p@?s񧤤 ((fB!Paej7ƹT fR0ە%h+h;22[oKj*={L9ۇUV5y~;wѣGF_# ŋ/ɓ'ʕ+PTByy9VQ^^,;s_t sŏ?E5yLB! AXZ]U4n.9ǶmѣGc͚5ؽ{7 00FA/_J”)Sdl߾zo0kb5˗WX,ƭ[DEE_~ĀbҤIMk.3f 9Y]54OOO$''#..QQQ߿?n޼/bĈpqqi5u'Os=bڴiM!BѡmL!3Up&i=\XX8991BBBAb Xn~g,[ l6yyyP*4h2*F݄,ڞ={EEEp8(**b`ڵk_`Æ سgVZe6`dVɓ"DEE/^Dnn.Z}_~c̘1M~>!By +s3kY\6m CzFJi0W^s,YM6!//ӧvD"ܹslZMذa0d@`+HKKÇqƍf)-ܾɓ'(**† PPP۷ؾ]7ޥ5ݻo>C|:&B!NY[0{6X,hx>_$N< wA3x`?BױsNf c̙Xn0l0AёxQWWF Ƿx,5559s3z3.]UVs۪rbdggfɈCee%0f$$$ 33Ǐo3 ϟ) Bi 4vB@dzrEf3^ͮpDb,_۷oĉb ;w˗/gX,>|VXa)Çʕ+1tPL4 OfCk׮EZZV^{6r{_wwwرлwo3 a XݻzIDAT}lB!KkX VՁՍ`qአfs1y~ySۯ(**G_B!;Nw4lQ3x+ʥTP@s>X2Ndh4|x) B莆 U?]Cw/I2$ |||'Ow.!BhؘFD~Au](dUp ߐ,N.&!B!A!B!h!B!( B!XB!@B!bqhB!B, B!BQA!B8 4!B!G!B!( B!XB!@B!bqhB!B, B!Bqw!!B!] LjGIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/figures/ovn-north-south.svg0000644000175000017500000057347400000000000026405 0ustar00coreycorey00000000000000 image/svg+xml Compute Node A Interface 2 overlay network VM1 VM2 VM3 VM4 VM5 VM6 R1 (E/W) R2 (E/W) A B C B R1 (E/W)   Interface 2 Interface 3 Internet/Provider Net Gateway Node 1 Interface 3 Gateway Node 2 Interface 1 Interface 2 R1 (prio 2) R1 (prio 1) R2 (prio 1) R2 (prio 2)   Compute Node B Interface 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/igmp.rst0000644000175000017500000000604300000000000022573 0ustar00coreycorey00000000000000.. _ovn_igmp: ======================================================= IP Multicast: IGMP snooping configuration guide for OVN ======================================================= How to enable it ~~~~~~~~~~~~~~~~ In order to enable IGMP snooping with the OVN driver the following configuration needs to be set in the ``/etc/neutron/neutron.conf`` file of the controller nodes: .. code-block:: ini # OVN does reuse the OVS option, therefore the option group is [ovs] [ovs] igmp_snooping_enable = True ... .. end Upon restarting the Neutron service all existing networks (Logical_Switch, in OVN terms) will be updated in OVN to enable or disable IGMP snooping based on the ``igmp_snooping_enable`` configuration value. .. note:: Currently the OVN driver does not configure IGMP querier in OVN so ovn-controller will not send IGMP group memberships IP querier to retrieve IGMP membership reports from active members. OVN Database information ~~~~~~~~~~~~~~~~~~~~~~~~ The ``igmp_snooping_enable`` configuration from Neutron is translated into the ``mcast_snoop`` and ``mcast_flood_unregistered`` options set in the ``other_config`` column from the ``Logical_Switch`` table in the OVN Northbound Database: .. code-block:: bash $ ovn-nbctl list Logical_Switch _uuid : d6a2fbcd-aaa4-4b9e-8274-184238d66a15 other_config : {mcast_flood_unregistered="true", mcast_snoop="true"} ... .. end To find more information about the learnt IGMP groups by OVN use the command below (populated only when igmp_snooping_enable is True): .. code-block:: bash $ ovn-sbctl list IGMP_group _uuid : 2d6cae4c-bd82-4b31-9c63-2d17cbeadc4e address : "225.0.0.120" chassis : 34e25681-f73f-43ac-a3a4-7da2a710ecd3 datapath : eaf0f5cc-a2c8-4c30-8def-2bc1ec9dcabc ports : [5eaf9dd5-eae5-4749-ac60-4c1451901c56, 8a69efc5-38c5-48fb-bbab-30f2bf9b8d45] ... .. end .. note:: Since IGMP querier is not yet supported in the OVN driver, restarting the ovn-controller service(s) will result in OVN unlearning the IGMP groups and broadcast all the multicast traffic. This behavior can impact when updating/upgrading the OVN services. Extra information ~~~~~~~~~~~~~~~~~ When multicast IP traffic is sent to a multicast group address which is in the **224.0.0.X** range, the multicast traffic will be flooded, even when IGMP snooping is enabled. See the `RFC 4541 session 2.1.2`_:: 2) Packets with a destination IP (DIP) address in the 224.0.0.X range which are not IGMP must be forwarded on all ports. The permutations from different configurations are: * With IGMP snooping disabled: IP Multicast traffic flooded to all ports. * With IGMP snooping enabled and multicast group address **not in** the 224.0.0.X range: IP Multicast traffic **is not** flooded. * With IGMP snooping enabled and multicast group address **is in** the 224.0.0.X range: IP Multicast traffic **is** flooded. .. _`RFC 4541 session 2.1.2`: https://tools.ietf.org/html/rfc4541 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/index.rst0000644000175000017500000000033700000000000022746 0ustar00coreycorey00000000000000=============================== OVN Driver Administration Guide =============================== .. toctree:: :maxdepth: 1 ovn features routing igmp tutorial refarch/refarch dpdk troubleshooting ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/ovn.rst0000644000175000017500000000612100000000000022436 0ustar00coreycorey00000000000000.. _ovn_ovn: =============== OVN information =============== The original OVN project announcement can be found here: * https://networkheresy.com/2015/01/13/ovn-bringing-native-virtual-networking-to-ovs/ The OVN architecture is described here: * http://www.ovn.org/support/dist-docs/ovn-architecture.7.html Here are two tutorials that help with learning different aspects of OVN: * https://blog.spinhirne.com/posts/an-introduction-to-ovn/a-primer-on-ovn/ * https://docs.ovn.org/en/stable/tutorials/ovn-sandbox.html There is also an in depth tutorial on using OVN with OpenStack: * https://docs.ovn.org/en/stable/tutorials/ovn-openstack.html OVN DB schemas and other man pages: * http://www.ovn.org/support/dist-docs/ovn-nb.5.html * http://www.ovn.org/support/dist-docs/ovn-sb.5.html * http://www.ovn.org/support/dist-docs/ovn-nbctl.8.html * http://www.ovn.org/support/dist-docs/ovn-sbctl.8.html * http://www.ovn.org/support/dist-docs/ovn-northd.8.html * http://www.ovn.org/support/dist-docs/ovn-controller.8.html * http://www.ovn.org/support/dist-docs/ovn-controller-vtep.8.html or find a full list of OVS and OVN man pages here: * http://docs.ovn.org/en/latest/ref/ The openvswitch web page includes a list of presentations, some of which are about OVN: * http://openvswitch.org/support/ Here are some direct links to past OVN presentations: * `OVN talk at OpenStack Summit in Boston, Spring 2017 `_ * `OVN talk at OpenStack Summit in Barcelona, Fall 2016 `_ * `OVN talk at OpenStack Summit in Austin, Spring 2016 `_ * OVN Project Update at the OpenStack Summit in Tokyo, Fall 2015 - `Slides `__ - `Video `__ * OVN at OpenStack Summit in Vancouver, Sping 2015 - `Slides `__ - `Video `__ * `OVS Conference 2015 `_ These blog resources may also help with testing and understanding OVN: * http://networkop.co.uk/blog/2016/11/27/ovn-part1/ * http://networkop.co.uk/blog/2016/12/10/ovn-part2/ * https://blog.russellbryant.net/2016/12/19/comparing-openstack-neutron-ml2ovs-and-ovn-control-plane/ * https://blog.russellbryant.net/2016/11/11/ovn-logical-flows-and-ovn-trace/ * https://blog.russellbryant.net/2016/09/29/ovs-2-6-and-the-first-release-of-ovn/ * http://galsagie.github.io/2015/11/23/ovn-l3-deepdive/ * http://blog.russellbryant.net/2015/10/22/openstack-security-groups-using-ovn-acls/ * http://galsagie.github.io/sdn/openstack/ovs/2015/05/30/ovn-deep-dive/ * http://blog.russellbryant.net/2015/05/14/an-ez-bake-ovn-for-openstack/ * http://galsagie.github.io/sdn/openstack/ovs/2015/04/26/ovn-containers/ * http://blog.russellbryant.net/2015/04/21/ovn-and-openstack-status-2015-04-21/ * http://blog.russellbryant.net/2015/04/08/ovn-and-openstack-integration-development-update/ * http://dani.foroselectronica.es/category/openstack/ovn/ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.115043 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/0000755000175000017500000000000000000000000022514 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.127043 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/0000755000175000017500000000000000000000000024160 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-architecture1.png0000644000175000017500000062224000000000000030237 0ustar00coreycorey00000000000000PNG  IHDR ^sBIT|d pHYs ~tEXtSoftwarewww.inkscape.org< IDATxw|u{f7 -a,g;=j!yMI$ww콜Ҕ:$l $@w>nf=&ckۀc@j cvW H/"*}2ic[\vu1ƘNy.%)󉞷blcw ᗀP'HE cv&2X`)%flcOٚ~ĉVh˚ cX 6.x7d1`R\0n`jj-Z(s@cM׍)DmV|ߝe1BҐ 0 G/hrՀcn Hd,@s0yl@@@dLVb1!;{RﴮDyqQ(mrc̾S,¥@Mv-]&s`c T'Ya9tk_$U;+Z(d1u< g[,uO7&dÝc-zl틙3vsc#¢| *GtýcEٓlcLˈn7y^.9Xb1VH-\c|ع:Ւe1 @1f9]lAԂE1Ƙ;9iTFccv׽TGdQ1%-Ybv 4P cZ?l"1۱c1v!:vT@WtSt*`ԤqZL7y%+[LƘi4!QC\Ok[0WԹ}ܗ[\5vP&Pe6Vt͎e匿Zu"@G<*!  ^8O-[焜p%nuzƘ=$;6U-Z%W JEWTcv"⠝z@BIҦa̞ʾ륮;\" r7`gy޺.w wx;y,Ag[[)|[1 Q)*&CiZbUXKD~0HA4re?yBnIK d$z_XҀ\~t!Ix("D 9E:!.1 =+OH,ؠE=m1qӊ'ʏDDx6yJ[|Zv/ʅ@k` N.;~ a)=Z1N^0F`/P߹a@Vn-'om{ĉƚ1g%8GVoyF,pjN-5urk=aFQzɠ`ܴn* Ll 9+C@ %D'{ǖ{wr  }_UOy5U W0"l㿏: ׉rD] "8/~.[< :'Z^~uN>Gh?ћP0+rhvrKU;W\`8HwF\SuGZop"o;T_>T0P9~'58 ׍tؔ0@R =/ !Y9㯕xG݁ AǗO ncmq).gsVYr_9>htP74S4\B\U8p3T}kUTEWusTyF,|Q3@G9 *E&:6؍1;Tո7 tPqs[XƘ}pPQl2AO?0u'~96@ /GeY:JhYGyM<Ÿq_7Uf+^? HuW>6ޕWp)"S;#AƊpl*[8j7w68?.i c)B7}zVtE46W-mRrL0nyL]g2HM*W-`[s4[p- 5y0^B  [@8=9z9_7w}k6ωޓ`d@qQ)e+IQa7ר^:#Ľ^q謹LjDŽU7UNA&n҅ST8Kac3.8ۤ)E5HZ\/u-ΞT>b}:¼S&30 G"S(9 vQ0߅<.颬W5[x2%u $9(AQ 9-TָcquEEP~SoGOR⢼?幸?y?!7EEgdJQU)" aTo9zɫ"-\&c>qRD~rRKT$,F7/;_0Lxk \; ULvѹW zVl_177rX'z5]P}sꂎG<@ND͝wX;Jt F$N5 r<?= 4cLD'gBw1&_~].. !JUaYyIP|/7QHW roVG>Ce_Gt9D㬼/,97r:[&HqVnE@,8ocI"ycRW![suy22q0gQVWAjoep(|W7?_irw~g gy^8+D "GBnܔW='a(_SAWw\qF( ~_AS0Ep^& QB:t@8 ]rW05瓓%۸4PT; Βf`Ƙ{-1ƘCcBȥ$>-,\?7 םT0nhQݲ@D%7>-n{=}w$MѻPW!QN{L 4:݌y\R0CZV!R;P(ﱖ-9x HM=.Uԇ܂W\4ƷrOїeqD>[Yҩ;5 tf>?h7v 04jbF4c1PѢ}@ | \T> Mʹ_8a7"}Z@萖c1`b5 Ѫ;ęl# PcA&a'sMྖ.1qyRckc@5j^ޔq3ݐS4JWz^R*r>B/dЍ|9E*кPRd [=^pR<00Q]@OHؘ24+$uw1y>GեKRdd>0b?W!*#QmH"^ ؜_wU% |ãS>`si,շ M25tUGLI--!نnʘ|A):' Mkl<Ը7,Uw^ynƀG<(mT(q1 Ƙr_k1f7Hr]naO ɚR?aީrΞtgY9E OpXj [XP\[?/ @V:It]9G1cǟS?SUAF"P灍r9BVD'Yo\:`"D׬hᵶGQ8C {U;-v7nkHB؇'9Y9o,0u\ۈ.;Hߋ N iY^8rS_98#9fuE? ɹ,?<~N v(|u")cʄ?681c-$Y$6Z6(O<&40jr^/j7лs~tiG}4n Wdfǽoc5~+U) M*;?ڴdDh*w ;$NѹlnTSⒷ±+?hUuNq޾yI6<69Ex+c1.m.Mp1Uy{0Y\$ 6w)f!*U l ⎻EB3Wg#auءa(qt~wp!ES:/fP Pskl#:/ "/.yDf &6V i}7С[Wdy+%Z\ύDYcǟ۾OgPcJd GzUŒ`02(gEy7p8wc1ZZ$QhTÌ;=)(py\Җ= },Ը[pEqaEyOB#L]4U{oE''^~HOYTp`EFUՊ½uN*"HBS ~LK1Ч?;n;n#;v9;DEtiU1owU<Yjj뉞{Z\ 5'pI;ۂS xj&-Wo%)+Z\]}䳸B=)qc1JZ nZ$W>y \(N!2wzDt?#w^yK%RI=7YIC wO& 5Q$'sX 쨆JiΉ{]lO /7lKӜvI>.P?}p=27 %% |bj EmcÑ$w8ђQ\Ϗp%I1ceDsYWp)4F* t.6c֦ׄc ZU}󝆟5ˎyޝm& =0}gjk%uyP.P{-Fز`{m,!hq|~G--/pnmҰ`}#;GK'Rv{:*Tɖߠ:=ﮘR0n};?L; ~pa¬ܢP3.P} `mW(uK Ws 'h#Z?K<<+_F7Jϰ~;+PiT6ng3ѯ IDATzb1si&XE%߼'E)z@'x)rE$]r+}"~An|[ncw>CfQJN+{0 %u孌 t)Iq}%)nݡhBU"P-eA7;R~~2n{OuVna G/r˛8BrOXl ,Ke[b sc1 @&ORlY)Y,½uKߝч>j1FTk'= rщ)y @|Ě)P0rᓞW^00۰(iKI UD~T‘Pҥ* Þ%|~5mS _Aa㏠F#'ABXC>0c0c9ԴX:S !wNQނ[Y鋾? M-[[x⟣HoQ| ?_7'D#n\zⷣ΢\dV^%8,:> "PDAلHaaMJr^\HgIf+8Qn7dzϻéqt#I׫K6wx_c1ƘCd(L)7 d?⢼[D윻Pq?xp1f?_jߒ=ֲ%2me1c9Xb1co,1c17c1c @1c1 c1Ƙc1c~c1c1fc1Xb1co,1c17c1c @1c1 c1Ƙc1c~c1c1fc1Xb1co,1c17c1c@K`?.Nі.1MաIƘCA\W0Uot@Xa]Ͷ386ٯpT"u/sPHdUEE!zRI{͵z)n;Z-j7v-O>@EdޙsP ٓ~ [l׸m@LwM(U R}&+gPh:I0϶m)DoXeFDN:ſܪU#뙃M2_Y$~擏~lGUR5W|ruкe1q[IpY6!Ҙ[nkrUۭ^\w EGqOk<~PV P`}&JGrN9ܱCIߡ71@nL?@~iva IFT}:Ćr,Z͇&BRRe'squ˖t3PE4 E*Um \*%#zЩ]Z ʹWxgbBj߻2G Bّw[tRx( XWKVik%8=u|_' f7]cv}&Ǩ >ܓ^p IAkje*%HV ə`3|fO>?{z_'@htnA5];WLtks֩-q$ 啴Iou%?zhBn-}8?m}~/;3sG|}v{񙷨vXb?+ ׅ.>΂ڧ֫OCFk2:ti!Cx ҭcWz^])IA >vgfDO9=k3$ӌΌ @&E ĸpq}(D`Gx`̘=u 8pc'lcvU$.;}(8I})Κm ߉Ȩu|յ @rJa}4hMH99Q~ лk{.8Pbs :j@7N:I))>Ұ r@cGNJREb#ӉւX-CȉuG ђE1q#vڻ1@{-ѧӍ^ }z}чHjIg?K^uSdr啷CkǶ-\s0:6JJNH4h52{t[#RRR3ZC ЯG}[Hs֩m[rjj7M{} @R3SZQfHFI&="zoك1͢~Jˀc-Ӹ@p̎>JCQ>hRcv&UtS7lC4ȭ t٧țP(̾D$wbSev 6IԷIUcb?9 kB52XP"ػ_dli sQS79쑺}{uGOj7f7n+N?} @OYa{3=1j]y߮ekES֣u垻M[+Ik0++VCf4K G|F Mv2ֱnS9=:w')ZWkZ^EjJn9.$奺&֊Jؤ{j^pmӒʓѷ}s1> @%99[8ƘT$mU}sn zcVQzV.[?m(gSU♷0c Up]s{Gӷ{{jB~Wn(7Keu%kbisGpT±\|>-lϿ`cNﳬ$zJKM [|rF_2tczkf~-?uk'ػz >2RV-e΢vl˰]+ǖL[LnjtI͞^ D'-ޓ:r}2ޙ;=+cv @j%ޝۑLiY%/}8{Х}^x>/?^]qɩCX7?[Hs9_$.Rۆ&fXMIvII  GHOKktڧB_?HߜŻ3ЦU2t߬Kq}c%-5e\[/WvJIbkEϼ=ӮMudڎSQYͬE%lRA̧v-ɰlRZ>8t`Hμ|6V[؃oO_ēo̢{f:?Xn*ՏSguJ aK90ObKFǘC2j 9}3y5ҡ oײhzJ+Aa k_|~k6K6;-φ jk%]-j^/&#-k*/x܎&g ܋;WXv sЯ{Nя$+< `ܓy߬!pbEy郹8py#HOK?9Jx9;=Fsa}iCv&&?1O%t%7%59Gsy,+لc.?aRQYÃO}̒K(v@f4m.g}dXG፥ s?6),^'%J+x詏:밬d_/]OYE%ڴN]c>49K(ٰ}2i*WiK5J =3%|5-3iXsö˻9KJشe5i8GGɌՒ}:gpA}2Y~+WnMN7!]_Zά%l\Njj{v䈾ۧ1C2sqaTT𻿼vC]ڷуzЦu Cu/PZVtjsZ\Bԃj‘XN̰] }>j)>K8sė(f՜xT߄u/86Vnc9iһk{nV)2Z3\v0B']p\#WKՒ'#xIk=3X Vrڈr*NgD؃TRy+fF%OĈ=G"ie3C[8^$oPBII PUf+QZm}Pf4?q/?>n;i+OƟ&Zsp~  6\y߬a7kaq#h'ҲJ6mdT/!ัxwc'ĚƤN5x9]Zً57_ygz'Ԅ"\z)͠*|>7ڷhx֩Ial1{jڛU輑NoґuuJkLH{G7\ѦuɌНԤ \Μ%%Н-fns}(FSo͊m뿟.Gf4u"Hl {v?>fu:ex?zs&;2gq O%!+3_*DFTJ*]o~r:f(];eͦ2^h>Wugtc$PujU5!‘U5SC\V '{gƞ֔WR]=vж2ZS&4[[>5"ыH8=䄼)Iѯ'oWmU=31ڍeѣQ'>LX?Qn?1:$ ' saƼtЖmմoʠ>}e}n9l.ۖ/?39ѿkc$ׯ-:.<wTU7mCw9Rc'غN M!v. o<yOq8{e2gq K63V›/u;O(#&jm7 76 8aX؉1) ~x\ Do̡Mdё+6%8i)Ԅ"qnZj4OKYV)vt>#:\sﱳV)Ayoԃ^]z^zW@vmXV)!H{M^T]SpG>\6 59Hjr}]?aiYe(;oi?8:K[Niy%&3)~x̘2x2{y:#`9xn5%=-Su[Y5CA2R.] =}1֕&\oUNaxi"'jt6;c O5u)z۪k{n Չ6RxwF)dEe [*_"?ZkS qKE،9PXA=e#59Ȋx2&&ú0gQ _-Yp‘}b7\+#锑Ɔ-| X[bc~x;U2uzwmϚ el.ۆs`@NOoŦ-;U߫'هg/iy:(wѷ9_}VBњfY&ޛ] FŝvCmNu(?#9ry#^3}1K63h&wgF׌)#u^`.O5!573کM,%)@lZҒ̾ `;ڛ+60{q 6Gk30i[u`KN% $Q^n ۽U5!w)V};3jho]ist[Ͽ7~0Ϣ}V<ʌfHKMdօ]7>ftԖ3OuMW>U^Yщ`0`/QW73<ƗVO':jKcŚRl(cPLVےSGc5\i4G3OXzmFҥ_T}~(AIQ 7$}qKWoї/D}rҰYTD' è |rzmnJRVpcӹڳx9v-]E f`HruD}Ƿu]I{K"%R{ز,Oَcq4MUmFӴ}ӷiR'MRg7NxJֲ"Mq > IDATqAP))Qsg `b16p:~w-BMS'@9S9ϖ`.gcjJ"#)%Y,?ظ4*DZb4w- &=vKb =ʀo/Hsfz&=4 Kx߳Gk{,S,ˢiʚ^K2hzX\Ѕ7? IUlbi(Ȍ':< zO)>8wXESx-n'/|E LZ5}R/o89SFkxw n3 DˌG :~]n^?ՍN;!5j:"[SJŀs*HB2=* dfaQxdȪJ9/#n_4U Py1 ё (#+f#IvZdK7d' BLYdii9<>&)Ѩ >1nԄhZ N사+ԪL]JQ#SlLYhqj2YyAg}~)WeiZʹ8z# EP =2:{]ttأL$*O~fJw)ܵtʠmv)BoRv" eР `ZAJ|UI%sX^do:tͪyirp?8ȋLG{كC^KInr$sRuH@}s{s@dz1(#)Fp2X{. qfLF ]/IIvj<^o9CmM{+1x]hQr;M>>řxAüva6i%Y85ό@qC Ք=O*AW:$ L 7R`iЈ$)':7hX{v?4l:_V}cTEbk#% \(XX݋sEPG#r=ҨUͽ p1d%Y0-{eã$ŵ(mѣYd&0sR@=үsG6{r&_rNsxg :HE;yfЅS2BG+6赑ciڊYyd ]}x}2Sd& JI-׋2Aa0IYVNlZAG"9ʃY)1· >^}7 rCںhӈu9V9h[9%ϭKc6=VNZܠDp=a   W@AAAjD"  U#ր jlXe =.-v =~t8=9[LA 2!>>P1lUz%\PO/w7c6|@4j\^@Y?⿙PH[)h2E x"AaٲWMgbv>_#ذ|s -t9ح&2.>VbCdI1N A%IA(utZ5Q&#zKKЇkz,ooGMe6qo9n?IF$;W`Z;մ28GoURU01+>?6n]05 e`r`xERW0}RNͨu/0ʦ槆kfEysQJSF0< \7_6' ~/*>l祰dFNC[߿Z/!!_FvbPN:z%YմrQ%j`^I}`8Z-qۢ"2յ$yv:(-LgJm= 矾pFC@X e[ROo76@ @ ľiŬ4&.;08(2ruVh&&wn6$ lS,Cq^2qJ@psd >HuC^IO~F l{@ Dф$ev+m.>^KS{GN7ѨY^ŨǨa1 dij,F=3'k3#2mt61}bq8>ZDYM+:sEna߉zn"AŽwj.2] i|Rm=E($SI]s'OdU `ı&l" P1RT*ej?d9Q݂ɠl1<0Es8v:$vѮ2\eTыd׬hQ''lj#6GL,yO I? |<}<4:P$*@v/&na zh4* ʼ~>c6N|~3-'ZZMǑ&W8QBYM+m~ol9Ju & m׶VKzz]6wp-8]UFQNKs]Gk3%xy*ҧ iI6t: ?{i+gZSkF$=}'7VKi݁?Dn"A*a&&4u_.ə*ʠv>9RKM>V4Qiy~j:#MJbnCQGPd*`ˁJ:Y2(F=l\nу*tZu̓ 0q Hn'%Xl*i8loP}p5FJF?{irenq8,/ͣ>/X9VGÉA_NNTX-]AǽiN~dPY#,tS@$NɢAym+ ReD$?-kcMS~$ohѨog[FBV_7LK7$ESVJY?RbiN>u-'-^RxÃA '$ؚ~Wҿ \D"pAN !'1t/"-1hr~Fj=S8ehh>od@%хה̟u4;z<ӎ,+Sdj: emab#շtm6^0L~GՄ\:[t˙<(Amj!,[ٯ #Uy?gLJf+s3yHz` G)y|~MZ8*2zrv>SS1y|iÉ,CAV|dJ% 7EvJ Kg*NM衧Ñ&ғ!U͸~;$aI~tɠFFj@c^qVdԤD&e'jcbv9iqGcy|4w(n7(J? \Ux~ <ێ#'jԅO@cLH`W]}o?('`HY7^rc'HKLK7d羯Nմ?o&->l#sJS8\)y)ZZnϬ}-f2/m, pw64P/3FM07׊S2!lAQ& ›~јvt,t)fLJ0@mS'XzVϟ7]NƠ͇08 JŎ9V L-Hdon\LX@݂ѠL*Jk. +Q\iu$QF5*=.gs ]\ =_>%(+ϴe8YLJKWҟD*H'V䮥ʚ>|  L6Լ,@I]^сJ?gq_\;PR;;S>]?ڱioyx[y_فC ( 䳒׾D=.eGGWU}[cω:B!NAF~&A)oon؄AӘf5H8n($Z4ylO>w-7={`ѰjլYXĚE,<(˂\/#5w_! MUfAay RyC-pjYjv Qf3']x!#I dZ%rPq7vaQe$IWo;f5κeuGs>fx.'|ufWd͢"j[iQf]nJrS"S.DXV{;Oaw9]0Da2|.|>>1 Ŏ7~3v>leQ5TeÛWfg>-*9څdJn2 gJݘNe P7{J22<> '$1);Gԇ)׀E3&Des)4u!;5c[2\|II6wbiHHV&;%&/`˾ ztjbmfMl?TMc[7.9N$NSeQܶ@YM 6ErzPIkFqJ dx.|7Ib0JpQY_R s`>Ũg‹>\6 G.Gq^2yC[Mzn[pY1uȶSQY=k nᾕC۰2oG(0 dP5|K9pz%C, FyF6 p  ('eX 0eK2;aŬA "A |s,#_w=򟖫XvIaDD"ho颮w]|sSuF)xs׏1 d < HinX1 ʼnDS(8q4_$04}/~-k6:u:8]^:XTٵMAnPb §0(ULk7(R$4u(biZ .^ z- v ]$Xp{5um1uVыitփ$Ar\vK^.<~pS%ԀHOMkHm(UۺkeD1?ErBR܉Džj$#)5&LK'}n 12ޗ.'nQOL G VtiUGK{tj^M07 M \@A>ӟpjs)B!9mBj,_w>z>WMD;oYX<@0ĤDZ nSP}>=}^g/+m֙+bӾӼ(zԄhtb7w>(')ҎF k19'_u>2JezgnJZy /{;Om6/OBE};~HEjPW|9d&}o)~<LJo]F⍍G9Xd| eܷr ])Epw l]eZce۫Y*SA>=wjXe38,ãk-&)JUCnOF+Ay <!-"7Q$IȲLYӲ.#y|:DD=Ww zKVxW`c =sS0ܼh&!AKTrCSgc0 ֱb9Mm=2H4+Vƒ+ɢM{*hrb1)K@Ҡ{1QF\5}ȳ ::-hI BQŨ#R)}.>(>f`|S{6{#R}dk!A٤-DVi/~-Xzns"q_dg\p?R?ԟAu"AD*@NNyCC6\)hW-'?e;q63`ьJ2xmں'|+ZywIg1tjYuyyŭd$hm.`qv3]rS5-$EFSΕE~FK,Ò9>g-8oғlCUZAi)Bi<~I[fz$V\/#96'=1?V1\M}JJ,O71gR,ɱ&koMpano>>9NGY]M}^'$c0h4j1"\SD"0B@+cݏ/VUj zb_ ߐy,.I@^ӢX:-W6ײ܁E IDATY󐛙LL рVAp;AF(Rƺp)4X~TBA~%7d> FC+ ?(pgi \ "A! >ܐ\cA4ٺX~fv: c+a4I]VD e5ʹu݋#Apm T@ +fֱ ,.oy )+(T8c3aELʰE7tio蠟wxDd87x}XM lJI&"L#W 8PD%VG/2otR~$R; םG g.LO^'QFD&xe3M=y .^Jjblf&f&\$)#WߍԝeYn#9vdu)/r ߔ?LHz^{O!E?m7k0pq#x|^e VH׆q}$[UA%]lyMCѠRI⊲pTD`/5K@ 4#HysE" HE$V/,ȗ@ CC^߉{`Fj2J5`)&kx,Yyzt:PEWaI;[KlBshE%Iްy%XU_SG;Wk:y%ppc B!Y FE"\epӗϪg&QzpP[WtDzH%1Um2Y ︲fh?KbԏroU{Wugd ȱ}?/gˆ2"9l\e~Hz$b '$cHpbbF;7rC]ՐLOThK?T78Cո+c=}^>UN'u5n|1 "80F@:C]0q+!399g_Tn?@E$)kA$IF#քw?e6#fז: `h FFK3Is];^("F@A]DdGsiF (Ixe3N?MtZzJB^§#ᛍS4O6/._{陏j:{($v Z&x.g}{7ضzGAڸ @dYAL# {SS9^ٌ,ÙnQ&F>k{(\*!Zyҧ;Q^}>_{/>Q樨i//XgUiʇ,EL7r;{*+*B~^hA @PրD޸ @B!^hQ`FgwǠ#"+p԰B| \]PMWgLoTNyǿTAOjG BQFDF( XWH0"R>5j}#8;/ryJAYiQhSS1E0Zfh8Q_u|k?.DZk @AB!vPtdGr28*;.mo+c=Jff)ćpx?T]/mdАjex]>feZ8}~ẐCA#a\ AB!YL._- #|lȐ 6c JBH±h`)Y>??neZ- ʔX_3[\ x۰ir/f W@qВ,+U_*h4ۖoхˠ~n<0?׌WŨS-9ZmqtxaC _];]2YZ:ԵQ3(9H{3=t9D4L̈&6Z~CCNMf#LL"=4,J'#o1rFEzT+ͅ A}NR>Ӄ$n$9^p0j(ZݓF ȕ`Yӕ Gpl ٜn?'kYJMda$^ZOMs]l^P$&Jigz^7ܔ m`5itЪx<-嵭ujj.`{d'[j5PKG~c{yGo 69?~PwLl5T1V}t *< O \"@OO`1_ylˁJ&` 9ZN>?Q6ؤ nlvO_G9X@f1jwQZ4ه$z+CY&t*HIQNDMki+g&Ӫymk=j{y{W#{W~ YY9# A8xsH*;#8Gf'oͥ/,HUNw0#?GF'v5J&x4* ң.M@F-G#&=y$NI /umYXȭ H/& 8Nƨע&84^!ÎjwhL?Zm •50+ĂgO:$?Jfx3hpJ|Z+[.OƎ9:'p8!=L7a|_/i`֤X,F T zicfA,EY6mX'@Id`,Hӈ|pęex郃gaiN'm=C!RIQ´u9ή^7u͝ :| ~) o`k B4+Wacjצ{ШU&؈Ũotώ7NDsѠN?HSG6ں4o~.ꚺ0$YqըIWkGIm͂5@r_VpE$%hhsQyL@h~*JTcn%@R ZOFyHs6NMhϯtAQO=>)#QFyixM b \\(^on>7Ox Oc6Ie3gE o:B}sUP5󊳐$q Ig 菛pyܵSV_ف^G_/ȯU̓xz.$b*305.o(KoGߍ<$;qoy)<}<ں6PJ*ckJU$ ڼ6&ɢ" k埞A0?|ӯ=ծ vJ2kO-[O"3LS5`&1fXM^Ix3}  ĬId&*ӸǪߑQHϐc/2ʜiSFEc uj>uo&ƪcbF4G:jԨT09+B&6=ʹU@KG/ML/HcFa&^vPH G)9ZMs9]DZ5m<̉flV#:>˖d0{rF_嵭tZbmf4@_^X1=8V \w?Ѩ맺XL(OLMz/@]s'Q$cj<pF}>$Iى?YǡFNV7 @Ryc/x&)Q<~UD Z^]=ԮumB2fBuDnT (`h/VpXu|3z Nz]>R,,NjzJf e21=%S#}vUOsG`PfbF496uڱ;ś(ɵNת( ]e ƪ`Joc6Z;MbŌA{L#R((̌O'𙛦Q#!٣ E)QGa2ۦVfbV")mpE#)̝IUCO5P@,S(Yi-FvkpzAFuyG+yu#fk+n2lСVRGr <@w bp՘VJa?e-RK\*-cnaܰÖfb)N'3r|Ũ7O`]1d[^Zyi^/f%,Z&Dg3z٫lUXw&?Ca- vG)LKF$y`P&7=s8IQ658Œ\zx)qVz]^%9w$ޮmt]@p &y`f$?^h+ #1rVg:ۺm_5o"Nv.*ڹgTlffkaǡ\?1VVgJ^2*DG2|Jn2QfvjZe%ɑc-6v" v<ӎFbԬ8GV?mpy#2gJe/SrI[ht>6a,I n-(O #c_>daOJ`bVyۋ6ckJl[4=Es},g ɨTC/2̟4d{rlԐߺ`paY1{m2d9ILI`Sb)-̠+ +e5P@i8?.W@uGdꧢ'?Xw4]S?C QցP&D)`0<|p[Y$I*r /IRP$ݲ, pݺu˲,;hr5DVПh)lif&`nq&𕛅S̔d:h΋O͘PF9q"˲~ǀQ>Լ|QB,IUJ$?v_VJ曧w\ҙ4:8]KsjN'}rj纼~;oPIws=QOym+]N7u͝8#g-ˈE]s'N '[8Pv> A ]S{?$)\ `hgقpy뭷C6IdYMΧ|Qd; ?g9 E-;5lW+jǯZ2ȸ<ʌhjZ;9D̩Z5~;%?3́B+y_NOHçnp7sk;k5<;vw 7=o<7]ec+T*PH&/#?xض [pyNa \ `!?%0*?'62pN~0]@_K$hu֕dY߾h,˹,P giUCF?FaFHVx~qtu?z;˛7oiҥWo2n)\g&PVJucL;nPwn_yN%S8QB}sݽWftJ>pF/_~HY(N5 p[n-&J=,$^F  Ie9]I\<$I_Z~}[oSJ_{G0|($;;8zgkWnȽ&qLT {p~@ @'!\b'7Yz}mVU,Yrl9>ݝ9sj|díjp׫KuPGGdӦM?J)u-zG}m bAbb<)66vcRʻ?RB{|V5X[H\m^w-jvKڳRVq) ` ][[[eNN9`7[J;rqMjK˻G/)%[g랳 \=Sv V ѝQ$,F%"C,V%ՍU4/<;#쌘as;t+R<^r9ÁESXd2&~aܵص{ 8l*{4wS؎@BLg1}BXO_~Lg^/|x)Ad :::WFNN#RʟB>wygTOģ%m'9$c&|?)xh:FV5t1~C[ԫ`0 I)J) !NI)EZ|>h4TU CKdGGjNN.!=˃57鲀T_(`ogS_\S%lrgr>|7k.A|[lA=V}6غm<1۷8[4g@h;=FX]#`Rj4\(U6/%%. L}S;ϖ Oa$E;xs_."#BEm e (@::"k ?Bϭ^53 +|B  ",2L!hͅʚܿMbovzG <,׫^Gb)lAxV޸9O6)@#C#$!nB'e;ZZ[[g%KIJh4tƍBtڵ p 75-&6"@P@|`P!vbBKQf#B샎#!:;@C_9m{Hd4'*,EVvΣ@[:{_tq(Ct{ jVbBCpc\ttkz̮8=}%NQP=zp(*: rXΈetrjxEڝ>#y^& q y_hSիW^gz<,,aXM\g룏*fUp[ڂbmG?+^$_@d&%x|P}E) 759 Gݸq}>W"rC\}=G{W#w-tݜɉ̙gٺe٬[=tfZxptdflVzew,S$Dž1++vv+ 5._s2>>j&::+'$o잁_/s2_s uhYrvSy,̼d9GÙ*OYLJLX )]ՄeBZ]d&E0!9aXH yy5mf#9[Pɼ jXt eUML7PPVYד[\C\T0JAykihSk<\lْ~ B;{.$^rZ;4\,&#6mOX ix= 8^fXVksq;uc|lFnn~ li MR?,|lcVQ JTU @yմt{Ą<ѹgc @ȳTڥ㒄ή s/7p*s48B0/ jTcwܒZ*j[PUŲ:(_+1].IVzUj(j4kmlD%߸u?$OؑQU8rc \hǭAncrr 'cQE1yPUU(_)/u# C 1D#`>v5/ZQuƗV 6ܱ~1b9dB|Tȵ{&t? $k/Aj\W7I @}su6̙+>oێOU7%tWr gb<7 T<7dJHTw[T7\r:#瓔tPR;'jmn42B1  QoTTU]gw]X %nIbV4uj;aUs9MVo6lk~Z1on1FGGg!{U  Hh] IgOK'#%<׫b5Y:'Nn3 ?ff3)5 .7m69n9SPEǺ@J\Bh.axvѾ;bxEJy"3&$PkcKRL(犪G9UsW)n @`Ć1O#*ihLaUtU^[IMC˦E``6qz@b. >\+A4غ^:fMwH)kVq/@̀ftttt:F}=7Gu*@"C!2cY>;wҶcMzlLI*~@.w^ۭ!`dVOl|C538[|3aTw )e)e%b =RA06C W*Z /毭Y滛7o^+ܥ$TQ::cNJlఙ⚜Ass;N^ސk֬9H)ӷlٲfaLGzDtjyZ|>֨.kTb\ تi.@tttF_(>ûttƀmqE G ڝtv4 TȮ7n\HW‘[@(ea HE#,y] 6O(%uk؁{w1@=S3P $uOBЮA]F۟X"!ut==Os, 7wiӦ_fH)/(H/E_8IY7c4iVS,'wUY,HECx  o>gk9^ "t[OG t&ym\ . !*C\4LJ hu+=ꏀ88/U4PZDh`1z87$=ϫv_~٪JJ )/;OThĨ@6ͧPI|p~ʀm^q#UW4UR|u<̙3r(_f͉6?z#~|o:ˆ_?1?/X{[?, lz|p0BA0x t} IP:Ȟ( _``c111EjݺudžͰt @ wPTHayI/t#sK!t-@$C@*wS)%@+NhihXt2w,<VSQBFbĈxx*l1(bLN!5>ʧNj#ʄHn?ͻ`BK똖ɉ#T7d2d$-RJxE+ë,qihn]ƿ\ Ch,@mSo˥KtHK+mhP3H él?T4%a5qTkLJ #:,{w_#pJkڐ@L`'E-wz m.rKB0-#ҿ~jz$ Q oGQ3=>fۉtzIV6* t҉h %N>v96m!d !NI)׭[ о&)VXGU!m%mT=E f,"'u[X%HL}?s̾F1ɚ79YJmABIȼ>̕7b0Y1BwxllTrL RsP_vXQd/_ ?O @Rz&Aށ_\E@([ Mg3˾S$;l1=s >, BNMf|:\^6c9>g{-;_ar GN$4f*VG4ր^'V{_E'^d9TmAa Oѭw+e>*1&(9999_vaX_eiH`00sb<5 :UOƞsd$EdlօxCU%-W4Rk" mf6m& KgH$$uQ+u:~R?7tOes=_Fs[_Х?k0H hh&eu?%. ـ(`V/bXjc:ַ}}Ⱥ-D {fdLSٶZeX_pEɩu,F4~]#0iE8X45S3"izRJ.Ui|>Ջ 0S%@KKUPф3w/DAYͨ%oظqǁ!ĚnGyPJ~-B;1ivXl!CNl/;E IDAT&}3NKx4'm#-91ymN9,YBcܴ5CQ}:)<<4 ȜW '^]pzϲtǹٛ>%_;]ʸ 7;ܤH:z ƥ`Y2="wT^#āDNHp !A8M&E:C2E%.V+ )1H {eMmNg'sL/ۇq8`|h1 Q⥝4 ][rH }u/MNMIyilGT6PZHiuϾ~}aݬ ,UL0ñE4iHG'ʸPHc&&L^Yg-n ;=Ĩ@*V6#tu-i!b2#[k5wOm<^տ\6|\~ 5k+N dsާH0W2A#eF<&7}NJ[EQ^޼yիW}3 rjI>El2p0mBo'=O;O#sG=<>o XR_@k} u ! ,n&V{gvꢝ~1*i cgPO]N< xQUO<ĈwOݞW!0 ج#h4PDsK+Ng'nOEUvй>QцlnDDx(bXteQ!WҞ8FUes Cvb2X8-{o 0``Ƅ8G#LˌCgK9t'0{rOPX6i%U}LNᦙ 9z8‹o{9/V-fa W6r2/Y,JNU}pH70Nz}l[ a0@M%mrj?kTvZGk39%4:XG7aTֵʎ ;S΂q|`vjn{'.NU}&5LN '!2VE !G /y;ծ7+؎Anho*䠿_?f,>%gr62R]ra$OYG?pөCI51 oXэLY-ߤ¥kbOH?=~ٳg?<;e\ = #;IQL&#{B&[howrxQ**ZugY rDPՂhЭ:Ed3WA\e+lf3ՓǠ(XW;pl uM$FŲ:D/jaiaveJ$.2?u .VbOYun/ڍtDC[q=T7Ȥh˃3-3Ie3Ӊ{Hbõd&|h%UV7f5Bz)1yL]V'<2jv ClcX\DwK'pbB@VrOmYIa$D$/حٲ"uM4tb6 ?<*VBVM,a QH j|LJ@VRڿŬcbd%ah)E/ޓT7v@{` ȫ+\\W~y,.yuVVٻXF^J/MC9p'pf,$M_|&Owg _V[ M&>O sYjK2eИg Pb!u6C -?gKfUUaÆ_9a ȸ fo@E\ mLFl9trzQ-"E`61[LlX٬Xf E:åe2AkWV=hT5qhoE0?;ϲL$3`{|'vL\d^n"ń䡫'ń3tkM\EELR5C#z 띄@?v~nML&1*O9_ۨPo5-{`>O}%:^FȌ(Z<5@h4BsY=x6WӾnnoz 7}^wEgp(= 8wi#-^I H{S)'w®mh)7@{c^[HcNjkqwJ [`,Յ1iZwGl)EUTosCY2 ajX![֏>;HPy!ux<^]ݢ`P0FF錀hǪ|hIz/ǁDCihW_q#7௏3뒳Eݤj~ [ 8kI\fsqp q)A}_ڦ.}k6o.zv6T`Q߱72EDm"Rö &.FyuѸxY`^26![ĄhVV)u1!B罎z3 jOuF?ܢ=XM忮tFBe}Nfqo>Ee0^OWn!Ѡhph-kuQtApmg5n߾=[omlܸ1zݺuz7+!@ܣ]E#vչi}/iA&tD1j:!4BhBR"dZᲓecœ8^ =DBx<Mtݏ,?98rmutttX?`Gwu3l<%4y]vz >!DO ?֢߇keE`lWrlZr~74ֱ{R|Ev:qgs%t{ He>.pSRHqe#+2p}pܱ6\ N- ١e<$hB|Rʅ6lpMl%k3BA0Zo`z9K86^Zk,q8}W#@dUL_s4]]8`=O*n7\q -NƒLNJ(ORԌXLw̫b,ܷj&N7:Knq-N7RbøsE6&=MAi=Cie#8EM}1!ܻr:ЁU+z;lL͌yT7V7r wp,&tyVLIDHs%̙3%ܱd2qZy-QQ̔hMOAH`pjB9ϔI8GD[d{ܱc8x8^>>B@J"˙?9h=[xe$vP V !d/r-Gyy֬YSXlXnP,5i30@WZ~e?p쭸OH).V#gؼJ%@T!.vZk3 Dw= nwȖ1R/! pp6>MF//@9Ϲjn?)/KYu3QƅQʎ#y|pDwob6`i._8BxP5mTַ ^:hV'?yan&$G࠴]G/r˼Lx깷q|,*%{Nr}h%!PXVO^IZI]Uڄ٠b20 #1tm,)}ɳ5΀ng 9vf!|YX(OO!8ڌVaQYg,CŹoPܻ b/u^gAt h=þ['U+it/mNOO8xo 07D(>8 +e̫w ElrkϏ.#6"*ݫ{}˧pZ [6꤬ٓY85@MXF/7XIT-ϲy?@Bt0Nܦ+E5?Uԧe4tFQAfr$6̣Y] @]S; 9tMD9Xsd,f#!,LqeGΗq̫ 3)3Uk"8l^~' Y2#߷ɨpɄ88FjՓ_k]!.JFm!%⭜+蘆7ahfgEl_J}s'/ֲd9.-,Ӏl!f6c0He(/{7$u;X+a`8h4sDå[K_mQ#J+C#w>b!iRUUu'CE|g@JOʶ?ۧM| nWVwkOw Kk~OeRqXhF#J M98*)S>!H)_Yzu@q#@yl>|HGGg<-@^Z,ƺ Y&b#٩ap&Dq#J @U% F`*jy49&ac4B(IBt0Q/f4i e-6qOۛI j6WK&Σyb;|ҙiAv l?I}S;E =_)5Zߵm|g9}nwx6Z:x#cP!P O !ȡB*9xg\XBeJpӀCQ0@vcno! ˽K$[M󭊿$K$]V_?I&)֧Im8_$9VȀW6lwXI}&:&+WF2Ct|pi˹] teqJ *:RmM;sv[ |s.c sON=5θFvejlxA ΀y ~ZlS^U(ۺ/ aZӴ"Xy%W6d3ƅa0h]+uz`U ܾxɈ 7D! U6PYs11%7%/v(6YY 8]̞-FQ \ѺTƒx}5(CM9673͜iU| !@1(Xffhx9JzB0: N, BThD(cxd)ԾFC"ofZkA8[܌|뙚za2?&.m) n.9UksAشvjSt<6FpR]Kkb0Z Kd/>O㨔8[ik,+(" {H"Uok RR_mtma΢Z'` N:IK1ԗDJO6pTS<o?:ck׮-|;k֭Ǹ 'ݷ+QGG%@°3v G;LCP קP&dOj6,&#s&'`|Js2il;O$DŽ8O\x?}tvzxӀf @i7kjgTV/©J~}T$;=:NW25#p::9_ne!Ѣi8]ޓEH 1j6ӣyA}s/T0/; $Ց_V'3otn{_vg VLfJ*kjrR ")q# AŠ(X,6BDx070dGX<̶pw*{"vc0$p揱{>czݼyW^}^bX!Fq;9IyPݏA;hng/2' Z ɜ &- ?cla^AyVA|w)n}Ʀ_NB[O^- wr M-!&}q /8Ss4yqN뿨-~RamԻ UUʺu橪zRaݺuOk}ƍG> j.s2~jS `㼺$n? lw`.&ySq$)a,&6 %=Q}ΈaRj4EEnq :Z`6 >w elS"ܶheRVseq>ۮieQfR!jXJ^ ?/cǎ/K)o6a„ M;XXXX 68arespA5sq\3wb"*<ń1G']te?zw52'N;EԔ8WBYC[bl Cwb\ʪ| ʉp!P.o@ha@dX};N!=II!*pC&6Uމ#;C%Up:Tv@Φw&횁Ty$7$7 ʪwR?ڋ!ٟ[DqY5B@w7Bb\${FI+K %ePnCDQUAdDU^?MCtM,Z (l*.ED7an.Z}Z?"bScTّUTPNT{b=6q7ө!LAwXQDHt3~#4}{ޜ7ƷM}Y(a5tbTP@h |YmT])6muFFiiioL2%~_.;T^rv]߾͐ѿ㊱Q'E%8 >yj'M%]\oud\/TT9i.J/pl.H:u<;2!!*48z`9#B ʊCE?!:aŹ檒2~Cl{פ>ٻGdff)NJ\[yoҤIgxpYޫ 0ïW_jvzYm>7( tKBUEAQD`|:p2?.눔Q!Ē>>+StIV. o?M?~~`{|P]Q@zl~yOK]{/#c?SC>[銡c+))،cztNS$5cM?9RFgRuM;#"`=v Q53,1n0x@s(@ XVS_uX@~iN\]8Ehڹbc|0n7 rVΒ\5w;pmQ222*fń ^Xxn 'K:un8c#ҠKħ\IޞOCE;2MB+3>ϱ[U]OQO1t? 8#FG^(((蟔ЉF  "k77U;<&M3X6~6vXkhqi,M;7ouuLx1uQ'vHO.JcY?Vo(1nQͶ"Vm5+P !'I #%-Zo@X5_@gǡJ*s$"pYZZvR>'L欼ЫӥdݧH9^[Na׷s1@]81f|u;PPs]Z#p,{]Bᓉ% 8o#kN ~q ?~xE`#~1u#kܡ]ź7lؐW!ğEyXgd kBt?=_`VnYr,gq:ofpy?|}B|ձG{"d Q:;ӍmqknUlf)>"v~!=vH|Lni?"IkG`>-dCTW{C&M4-8#zҤI@Bwpxr-'&XM_9ntOjKNȞv""LZzzAQ]u#j=M^-+o)8zp-o?@[cٵubHSOEI6 =/ ʒc;ĆDF;r>S'& Kcg8Ai&ޒzVY/Ģ*]! z<,{ͅԼnW u}b6)h4m^I@V)1'/X}`vuvJYQ;oUR85m+Yx{J̸OǽXo]R M˜Uqm߃9yVZ%q| *=6t-~r +0;VNʮ@b# 'zK6ERvJno4TF:pf @F5E10_Ij u̓y$HE ofgq.K u7Rbvg<'P£:?pWEt8ъ"-oE[Gʪ򣹇r~)[4 3 S|0 >Sr9 A>n¯^j穷a-u߅;. 24I ?`F^Q%~]P^Q'@4lv]Ǝ=Zq:`5sēTt =B,;_rQN#dNrnӄmu)2FQ<)%Bf{֦.W_^Cċ9iDvْRd0I?35Utjs>1'E)f%@5UZDx 3 ]Ǵhl=sK{ D6oq8Cׇ7 ѵa e!VK)?6U ~{ZXXDK87[NN5e3V7hw ' #(o[)^7.:thEn4>C8rh4&gk 39(D?.HJiu#9+ڴpyN2\]Sx¢n󚦃'6hgA/X֛s_7D7ؑ yYq:EuWhIq>ZNۖ]Ăͪ_r\ڌGJtԋOzcռz. 1a\=8Ifb}64~ ދiON!0dH|gꓞndeeIJg]|ѫEehi&<MR-Mg׶ ;ezf8qMӴaϊ6-@D0H·1&SmAN2 nsKxc𒀑vaƴ-Zvs.ݺ43AQèZhq@/PTcc w *8| kֲI!ҳs4Gf;YjjYYYHJBL~towc Uzy#Dwm3y}.?7A=8\чky.f'LuSxYrư&"d#,;t5-Z6{ JdD+[Ӻx:[\v6,#zbK_H1+fC%[H7-V :ņstRv'2CRnO!GUɥYmNx_m:Hx+.@kLޏ0o'v8NNA1.nN_v@1B^c(2zvK2##>!:ƍ+ʚpH^u> %޾H m`'HDAZmΞT22BI ۀ"Dj71!ו@aJ\JjvwThT")SꏧؓOk˄~0 ȓR೥  չg"@,,,,< w߯l<) -s <~OK`ъ~qVܲrLEټ_ 6ŨK\; Vi0!}T顨(*tq87?FJHI 9E|/J^H8k'9*?Y(7\ /70`wGkMns +xa|՞>]ts:fDOJ/##fEQLOKK\l٘ʪ +νH<”"j P҉ψkP'P%Qwb-#0[qWIJ R)vŏ*T -ݰ4ApNA x)--mr p K1a„ ͖mXi@x, 3BBT ^oakoIU^?w!\Ζ6â1Jԯ9,9h; 7}fEeJԉ_n8Jan.g>^52N|N¼̭l?PħkTɣ1oOlq;a)\yqgVocɷ9t<^U[r?7\֍GNiEӤmXp͊dH)=zۿdCZ?{Vf(haf80pSO_;wb@6/HpKluMNѨDP¡AumR0h tP(RQ'%%-2d5|(!~ŤI Y[@o(lUc,,, k x]mVE$k_>hɹrS|^nRA^Q%w䟖ٖ]z؝ &^]bBIue"'xzt!&Š^YmRy0ǫBIWN8#OEL8q]FFƥBwGFFOMM}'''g֭[ !~N0_p K) !2UUhѢ&LxIJ9E_O2eQ_?#mtteŭV蕎2o"})z{m\QRJUJMMJIJ%"0{P_`~וI) ).06r-a#< 6X{rjs|H D z: D:=-.@6aѡ^'th >m m{{gH 0gG"p(]7Xq; BOvZ޺}qTN\JWmM{UO՜{՝cּWOȳ sMPUhƁCZZQ)EBJ4p_=ѣgVS\\|a]EQQUSK~!>0iw,Xט1c3 >#2=##2--/<\K[^Hv%b\j9Ń] Qn|z8x(N3~RI)B!^QBu(RJM4fsBt: !R Xx߀ǏYH&~YH%נ*+ (7xkbBޫݓ[ђEJX a%Nu;5+bxwwUw@~+7a&F3g]"ٴ({SQtctd&]ԧp:l)S媰7YH iWt]KzųPA`BHv'K)&FY BC=cرcTYdImnmZ< !H)?B}7BQGt3=*`aou]+RR>!D)Ț55E1?Q( ak>Z3^RCVDEEm=zv-mR%@,,,֬U/!u{x5{^e9&,vhAv^i* s;O)@jxur8\XKù.l}# 1;|l%x5*ܱDo̷9^%iSlƏY;}J7RJe~u9pQ8VQXὥ;p9m B,ԍF@)e`E,) 0a6+v +&L\~< IDAT?l̟XPQWRSS !>>sOyՆay뭷*>SK)_4v̜MJٴu9iNMejaaaD(omCC c0.##cb)--0|owtҤIyW#""b~7 Rei#0E=7f{cܸq޶F |o%[XX4'Qï?b%JAfY4#FE%FwnnDžӮr =5&3nzy)̼VK)!=5Ŏj!? nYOm2}MPtsOEzˮ>c@g?a,K>k07{F&IH!ƹs0y!dGimo<ԻS|H ,ag 0y@/<4oS7;[ 9@ѹgpߴw!)JΙ|\&J! 7uׯy =;m|fӞB.;uSä>HsjLsa|ƳO< ͞ȼ3%Q\3K[ 5 E`U tvn=KNΧ+wR^etL$-,q@*8C~PKz*`+5`TM[u:8:(f[!|8B&@&"X0yH#/0>y'=4>,@ !V`u@h~^uVC~[rs:5_?{}GtbSPVK!PT^#pHo^Vl>"6X4 Z!J.Ȉ0bb"T@(\<ϝHʱ$nȈ0b V iŐu:!C◂5:B G.:0\ @=AWub?(n55WJ]Spsh(7]+=t_!&uiS자Rܥۡ|5~*.56ZПҼa7$^ڨeޚ;fTWkΏP<_S@xUt.@'rEz&%x m. JTcwd\rQ!ޫOC&]Νgz8qԜ\dh{`0Bq{ҟI"F_͸)Sgwg1?===t OUKXXX43 |sUHQiY;%IJb K*PRZATTёR=ENӎa Q xeeW `^om!D@(lv\.n:D1(y<(z@J3oYuˀ ]|ze=s7L?xⲷL?`9sO\6i'.{zz^KO/ .^TkKzz1Ԉ(WNmo ^*N6/e蠟{O\QzeB|ÿg4\ZVzzޔ'd?q،rI} L@p+ʳh.t )%omU~a.o)%TV{z4Ð-\H*aav0߻\"ݔWVttðHDͦtqDGALLaZ{O'O 7fgmަ*8mtaHZM[(%gGN~a$B$;` ))R0+FY4BffuBRE@}eeo*-zwe~PC BR].WNff&LX*'ti'`zq&?l܀᯦NgY#ЩwQ X-DUUll{@IBQrofen36Cǧίiϛ턬RUBݚRI[wٟ.@8߬AȥQ!# !~\)Ǐ?OȦ'=3}Ekjek?/ӽOH)PӴo-yg[OO:{ `_hBW؄dL1yZŵV#@vnbbaaqivk)X]#$p#¶HN^1Q^IF)5˪ٶ?*"ÜIHڈ;-J/{vٸviك"6: FMRRQwsx$'ĜGKپ]e#\p5jGdR/{VtpQwɼ|}7 '&&"CBn(UU^ԤfL?RfTTTlXtiÛ͑^o(JHaPU^ 'ׯXI/`݉lٛ7焞gw#ᮖіQUiҩuhD;y``^z<#8=|~a Գ- x 5mEu @JYxL4s-ȐRF+Ϊ.C 4 Hc$RH>"u Mo-Ztcjj>V"=(>9ʛNۆ4'R)[ҰC0,ba"(30t&?Z۞ W\퐕ٴ6¯nCFtғpNkvw޾"yQ\7`*FC)3좔FTH71cL8:4 Fio64Ui\9%>s.G0`ǡJ/ڋ"rfHzooH)… ̜5=06H)\ꌌO>䓄w_VVVۃ+W21QgQqz׺c}3BΆayN0y&O[_+6)\P_ 2$ +3y3c@L@[gneZ:!XVi#虸O.+eՔV U N*}=NоKfPDtbنTz|ńvv"9oF @RǨPE$c lٛGޱ ;·nXVwƢ3]) *6t-`6<˔WUn߸U6… SʺCQ5˥~]׿QU/RʚI')ebI)r*>?~Wc%څ;wNST}5!W5M^5x .oZ|믿6N,q{"AOƍ+m#B)郄*seN6{0JM9Om t@`KKBz ZizP' _3\Ά%qZf }Z8_ D<0*%h^5ktvAm*vWTOj<a$) CoqPwBQDවGJbrMZZZĉ;ɮ/wJ)\abߊ҂Ո Bzњ?e233GTTT>dȐy5{\`A/CD 0 G}effnRf !H)W-r􏱩Jb 0"V צL>wΙ!At@>Nsgaaqvf)xB~51= پ*QN|sݱUEaI%m<9%6/L3*ۮC ݆/e_=R^=~=9hw zD.LDSY8?pE.o@k l}[UU%yu7sd9#33s\VV֭Ǐ_q[X^Gǧ u( !7+<&\㶫0RRʇ&NO>WH~W=xȕTycWa zfI X!Dzfffb\DD|Сh-<(_5|z !&TU?cǎ=xq_!s~]{'O}g%%<}U6iNӅeu@8GL򺝎BOj[͛=cI6kK67\ޗM;S\W?cV=Z?VaHvK8i%!9T`\ikbɚ|z+7e 4n!PQ__dR9ly*uOJGOh;g">:Ƙ!?P,6C:G32ɝjиfcǁDG=77쫷0 I|pʫ|UxHNW YXWt@IB/-+?xQqqTBbQ"0~U5Mb/t{).xR ٚ̾ȩZOeڒ]|Ыz r2zE|v͟d?2G*`ǎ}UtdP?|w{^'v6 0WKۤ"6 ill6[˄7oh63##v*DLD `>{Ϻ\q0 `6w&l<ΕifٱP1y }I͛= P| JE1555:7\}Tȷ:~#!`Q/$ 荇^Վ=R;i`IоoKwU?ܙ= eE0V kmU꯵U:U@nQE& ;ǽ30{zsnr<~!M!oځEd22Cxܳ}[& !nB$ynJ{B@r #N|gML n 8<#nʴ:I$$3lmȂYylk: i`44vs)ՍTT7ڒ6S:t]_̓R$L:` IDAT>~3] #"""-JX3J ;8ܷjN&u)q4x8R9B8V}̸o?~)XxΝ{hHQ!;bgUh N79zJ)3:mݺz8400(C{y3v91Xͥx4X&7[TR]$1?GJ[ٰz1NCPSR\Qd$rީHAJeAjHrЮ;JRJu Y%35sONr&Ɗu%(H/3N~Ց⬋/6Kp9ϳlbҥ^{.!Ĕ=~;q)Z Ê)"VDrOj-*я@AG!ض nR|[W4Ѵx!֭[vaaYYYYy;9ʁ-6AH iN2j@ Mް͊u%W5\>ʛ[?y[5[8{h7vT̛qy4l6E=?/6G8(gR~}0f۰TQYȺdp.8|oq)-囍h:U Eп7*w{nģIVo-{3!Ima}7X]ʒϷ1:L:S/wj\(bJ_8_˃>Cuo<"v }^[趯\ƍW;=0&-nwdO]D<G"DҾ=Cm պ@kj|nAӴŋ_dɒsW`L)&͔)h Gb`C̸ Zp)jJݬYɈ=++76e0 \5 ofOYmPQ%+òE--]!>3-R>])%Nv FtɢOm44yE^)ʈ?wbm&y(G mO[!%KI)kWh8Q%U{!D$fEb"Cc ;G&Mn {wӀYc'hNJpv b``po4+ 'A='+hǘ~yPFd{I5UM$XKޯarvdp.\}n?NԅͻRY݈"IvFZt6:HU+KR gALzr NeoY-5 nbl&tMdX4%9̸\ni{EHxX3nܸ-^8k׮:I4 ᢃHO!*PB$h!KZckh,o I 8RJ-sYf W$@ Xΰ (kxDbԐawO[bJ{RXxzGfUSsj{e5+>ݺfŧp[CEY-띚4zׁDMOjZUъCsOXL ~zҫjir$I)ya'G@ڍ1 :$`3l0008 N ̦/|GDK '.8608,]tȒ%K(RJ3`:!9}u!=(HtEE0Dbێ0yf 8x_F4z7Mc]qIﯬe@ϴNp[HEk+BSesٕl11&mO6Ek?r'M*qԅBGuC84G?Jх6 ZslہHLl`jy0 7`{KJJG7MBN ;`¨0008 Hl_TTUiws};$EA@QD108H|ˁcFH)Œ%KTeeVVŋXNI&rMEJݾڊ#zJw"1"pm}bC;ֽ{J6-- 6:+.\8ewxnv:`tQQa``pt8." qv+zfF f".ym4ݎWɄ('Rd…qK,yI1QJuܹƏzL.4q&F w'Xz4$j"D(mե} Io@JIIc/ -ĉZ0{yduڠsROov'%DžBpAlSIηL #ul :ۭwesQ6\Ět&VlDABRtk,FrصM$qZ&kV|l 2ܧ7}őr pIUOrMK݇4Dm'8هzG?ZSS15@aVqʭύw}x?g+5wևgTmRyQ~IzY)UIuO;ڀA೏S^wvi~([z"Wݟwچۭ1:ڗޙIX6ՠjXTʁj,&4Lbbڱ۬L*ҚsriW%}bR-GfoJ^\E.qwo6gn .K۽}s훫vI{>0xI&UtsʹUK@hNv-=)9ņhM7nCn[8UAqF]SmHEFݥ_[7M0Vv y}6!%UU72000MHuBmmrngBϟqLԣ)d!=hjl⓵x4ڭXu_ggpqa5+IL#1!;f餏~ܰWoDJUw`][$%~XȎ*\[A]9ҽW¨K."!9>xt=N3EQ*){ƍ~)B7<яL~kUj5Z#M[HLҾD_SL&]ddOc:r\k}p9-cN oFo[pv:f@Lș|1'_vznkWmpD`0kNݧ,tkݵlٲX5*++ ?e2hC#%i],Zt̙֮EG7/dg/wEG0000(0r]9z.[s Һ9C9!`ZILO b&U&Mn )BHLHƒEb'6&xJbb<6Oں N.wꅀ3gp /(]ٺ kerƝj~:YaWxwi o9:*|)qUuNZJ oתMeid랃Wy9}P:kmݓѣ5eQ(:wSK)Z3Z}ÕGpץW~oxi,>ƍ8|c[QC*M)j] 熾ѕ,ڴG[Dm:o>R[|Rg geeUD>;~w@rÑ)r:]W=7gG>fZM200084bC5U bZ˟uXCQf1vTUfK]m=MNN҈c4O&`2XmK\\l@->Lj&!6<`nWvPT2∳[mpRR^$@2L tkû1zXod,&9X[FqEm6wʊw׼bSow4m,4ͳ}6ӓ +r$cxT/j7yX+@~"9Q&.;g;6vA"1/ɜoO.Т, Qݦ SwI|EpoXh,@¦}QG3X. \xT,Lll ΤDNn mA ͋ rjX0Mb=D=l/kjK{ӧb}2WRUΘ}S`٤ Ţv7![z޴jUQ K_&!Ģ@;\ }l_̥C* |I+ ZjE@^G?pܡHmG "Ԇ qZ\J'VZ Q"$ ڷ/CTpi```NS" ƪi" `3ϝ;gFר)⏆ھ8WF2QRŲp{tJ'-5]v qv3unZ_ nGMn>mwb 8M.֨m\ʨݨktewU>])w#1 Oy%ocW*|h <@7 9>6 Bcc5Xŧq{t*PwQv-ojx m F-`7@OW՝'ZAޠkQL}rPY$2-U/Dt"q#t-Ҁ[,|JDKH)J#b\&zKvjkk@[oT59o֌\ ņ7R69v"Z D1" G,O60$`ٕw![pyN#PP@ #Nz^zFUJ<J|rI [:%1klm.E=t=]H9 L֘wOU"-DV=e[x>{M^T4/M]{k9=A5X7w|.05q>'!w9١ (a@zTEA?0ai_K=fw!#>vH;)D|=Uqֹ|k'; 3ACxmwz⚃Sf_qT>Zbu-i@Gin43GIptka```p0I"ɎÂk;M3 Ս,X:KjńY>W:$B{{AJZ$g=ll݃ہ-v׎x[,0&0 ?s٣ޅx6gɚ`U{ד{5~kKOwlcW_0yԻ ҕz?afg_䑒㮧];(!=&{ kAhxx'"b^Y hHLhbЖ"f-s:_N4),JL1 Y`4ү :V_| ')^v~HcgZо9+q**&9P\M$*,+1v+.ՂJ,'NXxK,y뮫Xw7 rZКue)թŧo}xҤŕ+x񛯂mnܟjY,,yxk㺠XJ')Sp&XGC4Y,f:ָT]M Y+Q*EߐG l]W>h6I,:dB|`VWaNC-g-:5?z[t' Ԣ:C6vo7ӺX,fb먫kɉGu ՠs E fJJBB,I$'QSф 7i IdRQǷ|O>AKz]\ ܾx?쿮smse'̈́:;~ߪd!:[%6QC 9`krQy*>+3wܭ\sE@/dVQ(BwISzJ  u4;w|YGjnAw 韢@͚վ"!}ۃc9R qb(mI$DYC{7[n_cQULB| M45p{}g@ fL&ӽRʟZ}A\fRZM葼v8#<"9ыpG?j]JVarzp3]TYд箼jQJj}: | Hqy| q{nA7#L9oə>4qTxW  Jts'm[p,s6o೏:Lys@9  D)B2ٹmFÑir+}c-4iO9w9 ~<] z#ICZ *Y0+w i!% b!gRoϟ4yo84DMKyzہ"u1?'gSEcg*S"ƀ (HP\jsf|:aVnC[ PrD=(f?ܦB1Tfɹӄ.%(AAυ"~;(nbP \ @oOp1=r2Ud _LՂm8N0i!&JF]`7kO)vUPQD˃cg(BcW<ĨGRW|ddLl+v%$wsd!$p%KX[[;jܹY999gtGS1uo#5znlv.VpEvv0)@*d]kx>#(@|34OXj[3ŷ+nq]rjD(H\ w̪HxpHj 1zP߼!ԖU`|TrL[E/:ќ[!X*{=#Bi)v!*[>``˵DŽNW)=h'Oقٹ:a`EUUlV+) 8XMuM-M\n4M7q7ΎlnGJr"))I$%%ca6ڵ2zEپxm ,X7nܫo犢B`޼yku}9m4a{,@Q-Lc l(J;-Z4kĉUci GP8MT?Ó[ȟw*zr|Ӫ{?Bpu y铧<2 ֢`IT̓?{I~Q3,.JRݦxC$+v+/_(tTQXw<-!+dMQ.>PMr3,J1yǣ>(ZGq Sd'n/pu(*<&$}l!3,fB&EkGV=[W Q7!ǽDE x |Km)I!IVsE;U!nM~ޜ)v_74$?eЦ6|MǤ6M79oQ~hiWOٹkB2ɷ+Yą2]UT)_POҤˣD+ՙh=Vʲ&)JP*O8dp 18qOq-p# gxxl iW~1DF.u7GۣN+v3HGxW6 />4nq8lfALf1oFWllZ:PY{+C^#~0m/{0:l]G9PY%{,/[+Ҏ-&":h0;"]w9wpd&v(HZ!Y|pUysy/xP"IМd=|uWP) {aD M}ȱ~! 8Mco' ̾޶ϟ5cHh+J_M$H ѢONJݷZm,~dy N>$XnQF@}ӯP7p;c ]R+ٱI"D]λ!>\ G#fxo}LB|,W^veeY+|CܲfZ@߳us鞭;ʱxﵯhvV>Gi6Qk~$Xߟx⻄vi pzH'|mys%wR|xY|bՐ"=+QBv} ݍ+#ZB^ҖS(_\_^mX\TƟ׍92Pͺ_) IDAT ;gIFz ;Khh>{HMI X e׾,xB^6ޟ/ψs{N(qn}O:V0aZ>sm~x:C>̎~pJmM o^R|5c͞ÑK:=(@b3 ɛ?{c0ÿS7iT(u XIԊԧ3;79)fQ%OB($L[Tk9_z|烈N|.9sBR({[}d$Tx7ӖwǼ9xFmRۄl6*$-gڬ}%D Y|*)dI+}j sO\ȀHb)X͛7}Y+BBBBidXH@_sϹ+7%cN?^QC{ri}蓙GfJ>Yu۽RJmG@e ޟzi- "lκ=yWdG?{UϹwZz!ZH˪VHʻ*UPյL"A: % IH/)ܙIHHgzL~sXCj@JZP|n!ij+V.d:ujYQ'^I9sjk~Ѣ6a-Ҏ~{m9F MIEdܦuo$,kldQ)8m- k3kL޼q,y ESrs$K›y Sg%%ZIPٓݜ!o9ˁS :HBIRd EKoj;jU.uᄚw^- }ZQ%yY]3vj9oϛs1”3z,eEQ`ҿ9?&$$ܑQ K [x) IIKiZ&BE(޷0))q ~ lz+M~î>{߇FR&M:-iS'm߸U9Gpr.[Dg/W~z+Z%-KJ55[ij֪ׅt uLKqQ][LJՌKN&$a.Z::)`{6(7Uj5ԵG5ahe*8fʭn VUܪGhZU0y< ˍQ"́z梸h&L(֡?g};nܸzX(( ܊˟K^F5@ 'B4+ g1^T+=ǂ~}V &1>F(C?''- ?pi?1G~~4R*憪߾j-7p 9vmHF4\%  w|_ H_F#$mE *r@%Idr=:?9gRXzF|=lE@Z,B*?4@jVRQŸΌ;@J9۾}+Wp<,rTn`?RgQQǏ0acӦM;-' Ma! j9skg(DWHbTY#0IJI@*MYiС?i&z„ &N2BBBx8퉹܊h` Z<W'BJ|7! ߣޅ?u*++ۅnI䷩8. +'Lfu ;|痬E>#$$dNaaa/F:!xeӦMB,>GSY`rbY;Ӌ/Vm6yE˗/_n8uͰSnANYX) Rf0p*RQ"5n MPi9arJJQZs/0 ;Xt;wz233n;X{DH) }y@(󊋋}v{/`=zt;EC!F٣P +As2 '5&uZ^^\XRz׃jYO!3hxWVXibȑ#nl)˖-;dX:&B"UU=Ofrrrli˗/$|h XU'Ǐ?o_\\y޽ӧp8]-(tLbq-B|+l駟RR݊~O8իW5$@w 8qbܨQm6TXPʀ蠱c\D ++9<RxL>}irpLʦG¬ym5Y[d„ '˖-I/_~n8(8$\7qז,Yl6' 7n\"N]Jiĉ@"i oM~nͶ}Ȑ!&%%ٓRNJN)))ƔǒI5k)3J"5ӱK)?\RfB(ǺU1PgS3iҤtsomz׷ _~[Ns0l6]zˋEɔR'N|dҥL&*Zɽ˖- 7n\p|fCc" .ĠE1}z":iE6 ;1~p%K&Dg@eAՅ0<\bv^`rcap"F#,h|Ҳ"n#As*,kTB `\٧CSǠ2bhO '0Q]c w*K x<{n``мPɚys * 6783]:aoj 1qy9ӱN 7,fy ϛdrHa~`fh~" j@58;1 /&zh$54ye DQIE|t Gsn. Mӵe`Hpv=RH-dէU OHOBФ煆l`QRR. @J@700hFYX@z&Rj&*Rh 꺒_Te1J] \\>qw4x/sXdTȨξ q?Aψ#(eN&Esf v+~-!Db}S>2jZ’DO13Y4o@ m6TUyPJfAĀN]h{ fb108_X+dmTMק}%;ѮMA6%`+/w.Mf %V^E%Me#="2muC/ѯo"T!~@πm]r<:BQ&ؠ'T\o1Eo2r?!%*^'v 6*ıC|4 SwńC Ϸ$=Rasm``GBn  {V]YOJ ;jTTFBˏ@~=yׅ/|8TjA~,]rph0nƒU;Q%dkЮmOxŃ;IӺm b`,zx G]6)xg$%L}b}r .kDwp-666wa:njޣWuh6j4'n֝Q!F؎:1=N9`߳_58ŋ[vLNNM},XX:_.AJ-+aּ.^ ,\5 n (\r8]XʞÙlր[~oU.6ù A9x,M=ujTGQCtd=sa;X_^#^o\VmЃC3pM:ɴxS$ƒЅ7{/t`͙$K[  ՞{ f}K Ο.bX')K7Sp}ryEJi .XUV!M83Ѷuy`pH; .[wz&%950hL.TtC r©VFޖ GaӶU(o-,#"2~pV_tb_D4 rտs R]s IDATu`d2ʅ*ǞpC,ugg57޸BQW[nVx~(I)g !&/(+""ɓ'׈wYpH!k=0}tor4miV6Rي4EJ UJ9S[Jb {=11i߱vO@txHJڴiީ[@"_J#ب(JԩSKIIi+//c/XTJOQg~1RD>@JiV曑ElMLL\]LJ1O/ <(/R.HLL|!6 )}%>BڹBă 1 aP +4sW h |r_0ܯ ꅔZɥN*=Qq $+t f bDQR;vE(b!H)!;rssﮭ@EQfTt:R2k2(`iI)?Bd!lBdS.\x@!EJ.pr2_)))7Uj'0GJy9`vHIIiitq(x3'x]Jy~!Dr x[[d YӴË/VngBE=aY)e(@)ʃri5s>I7;1y^֐B7ΥCK2O6˕+z#f",P䷌ 7,~W-)(TRQ'YRR|hѢn зWӦM}~(7nRR{…v!_-Z)S"99y:)Schb{v!!ĝ K 99yZ`-@BB³Yi'jv-ʷQqBB¯|v{ŋ'Oh6ݒusBBBŋ{r㉉^t:i2FG?%$$$[p!EQݼnR>Xv8wϜ9xH)&>>!ӳu:TJqy|۹m›z8U fkxXC\<|(b``~15M0Ys)Sv^EQV7 !1)e35jis<Ͻ[nBu/RJpBHݞ)m{[xFt/(*GMj,P OqrUbbb =$AtF&)Y^ !ćo;mTY|cZ4m oq%hga'$…ŀ=-\`Z/ ~(&Ն~ l'~U{-RpQRVb&4ivhdkaPXBmM<"FEB|gge6ΈXB /g닏?e?RN!fH)JHHd2Z7Ο??d2mR(3n;SUեj_4we~)e0爭${5MBl;WBH)em]43g.\]!Dŋ78mڴ-g;N2V"@ `I& }z#6tSjeah[>v'i\D3~ {-lܙJdhsgǭid((vpq2]s=;eؼ87Ν/FRe'^ɕ};z A+۴1DGʝnnރsp\n @: w?ۂfԕ=70XDD~/& =rd XqxZ"~\S599^)x!Ą mLz+t1c?n/o[BRvgGQs8^}SjVfiBu̙y:eASerz~~Mt! . (VV@!|۹4Q4[|>eaFxc@ZRCsx~mƩb}t![X igSP{;_/%OUDo6U,,8elw8_ܢtA F ̊fK'5`T``+Fjb?O<-xZ0ox+|oG/yp`2} v)e2p$==1 L&??… ېr 0j۫.p1苞BAӴՀj޷-%%bbe]ƘX#EFF.91oΝ3?eޜ) t9|y10 Pi7x+hDGC7c4m`L>^a};ңck"-(et[շxۄ:FrbL_vT>S7?aLn;©"TJmoBm{]Vx-{3r@E/_JÉl"u(uc wcݶ#:غ؁ǣhOnYe)wA~{Ҿ|SUb[) :p\ n@Z~˯/T.R,!)m۶MQvRJr`OHH`e>"h.BoRUv}=hyM&B=]U-v}3 +b=l6?MT !>RlۣNEQ^OHH499yy> 4m4ppBVd}jE_4 ,XEe?E^sE!TER_*+wqxB;Զ'4ۣmIF>z~Nf5>1rD aW/^OZFzha_=RJNfy9~*eNv*tbcZJ}oݶ#G8r"FG&+ ;ǧtNNV迌2GEm"))sV_VAӮu8Ctd5ywllAAޙ8e9넘2"Y i>NJJrGDDB$B4!i},K&juנ7EY$|zRJP$g]v7!Uۖ$*R趔)M>HLL,X,#K)K)y<>RD)J2wL@.fxN_b&>銢  !RWRR_J9M呐bb(I)wz?εKJY̲X,#?&Lߕi&Y9و~.~ϝ8"+σbsBFdhE^EQn*,EDIC j].e(B@j÷j&$"@58^AOP6nڽ}NqQ!kbpU;7p OT彨^ץ:(*-g_)=;E$ WP|\Q)-w_X-X(g=痖SpW0=igfU:W`Hܖ’r" vM~rѯj):$^z 7Ur UܩWUǛzw&se1cƌT[\Xôi>eSV B6mڲL:5z9jV'>>d}ޮ~BB§@ !uQP}{1^PЋ`~̳WiQ8 t'tSY?rg9zj~]cء)#:lؑJVx*>{2a4A6 &yr䌩n]3(-()s^CW>ROfw{8>{f8\gߠ&_m}'l 4H' ؼfqn6[Ãء޹0tEMfCmL@O[=riý5Ky z.@Qi9/eXθm;ÙZfb#iB"= nƶkĕ<EUX& KYNwnIURJ(-LprvAxA!t9jDe@=_3~Fpˈ|gM՞U;)(vpngWp~,YfcĴ !Ϋi“ ܩQU+ g ҍ8]F U,~uάnOKK+Rx :It!DH<]W`1߭-mB)$4<|mR7aּRh&?d͕K)ӦMiO>ǺTږ?XKB6J]_'3p/f5ӹ]$WբRpvaNdPt*AxryawO ۵q)D5CctJO/fTH Au4!kq]b$$$|chPc^zex;-lEs^ySۈ*=iRes ۑkPrPC{gGUbFv%-#2ɠgǝreߎ<ήGa+S.*?7h"~Ѽz!0y0Gc}-":"UUs$"F ?].WaLn?Btx0%E6>FFN7w] {,=69Fv wW_LhR͏a1L wh%<4OTFtHeSpx6Yi3fX~+jD4HQS\Sf _t=,.KpSZOt#kpboȰ@wĺ, [[vz/kqYL .;PU;oȞdٺ=5M`Fɸk ";WWĶWf1he`XJc~3fHœ[B"5=juL“z8ǫwmI7wiq~놔vvCL =<Y+6iw'vQ\VNPY"j;RDTd 7])˜ج&wh}ovj[4EWߧK |5.OX4;z#*DmPo-kCtD0solNQZ"$FQB5cI)b2+. Z9XܫSѤ{ؗz`{IJ~} Fy׵l{b]۷"24y)n EtPd`1}yJfeoe^D6.L-%4?7 j(b`ph;#T5bwHgj6Ck(B-\UOҿ[[ɺmG8GnA) _j/h:3H.`ښ}WuK^)ޡu 6BY}:f?-9So/]t$r0uӝ/ G}˛QӁW`e IDAT[@J14w,[5wivhڴi[?%&&0wH)jV'Wi3mڴx<!7@ۊI] !@@GMWHCiXwߑFQ[Iɪ~qm{cc՞Ϝt׭eɷ;=>!`y*;ǰiw6Md0Yy%~ҧsYۭbaQ:DA-?z!;xֆmO}IvolbT}l]13|rBP#.t?XfKW$*"]ڠ"*j@Z@5&ݮRٰ4)-kI;3y-_  pX($Rs J- JI.<L%<'_ڸO"ȁ`:. KX-&JN5nփnХ}+TUH>[ŭyǃ ޿yαi&rePPdV>[a-䯐R_TtD'B\GvD/\iLot>JLL|>))I-2B4M;Zr=NDӴBF ]4m:iN,z.U n;;P/Blky{ŏ8Uu[(?_HLj~ټG27 Ʀݩ!u7^#Q/^z__UUثVHڳu >]G←^t3.n)I7uAԙcylq+-\ѳG/7#U[:<`?O]5ƛ+7f1ݴ .=]eY@@ z7{T5)qxljtGQKCuZ1MXvߏ'Og[YLX*ŕBObp8g-V3NWErYڸX֖[Gb_j6i{k%f5>Xr;KV <8R'Rn|G{< Hc8I)/X "88y},^X/q\g̘WxfX|AooQói(ߛWInwŋ-999UULTor^}P@R,X  **d DR+ol1L@[nwǓ6}StttXVVVQRRc<<|slWB RR.UZ[kI&ZmU@G][Ip颶WT-$dLfǝI&@6|gι3V% ϴl ]RRrQTTSJ9P9j11>{@?0=x`%.E LO 3- t阡sJ? MQQUԠ>?C\uzBfc_JaлgrBe?h?7C^7Jsj ֌cptKsGrᙃπi2&'P\VIB̴9rs:3J CX ִIi>V~٩TxkxNI2Wф׌ᢳ+`@ 9P(ӳlL=`tК27f8Q3ewXԌ̬᧜>.3G3Ȩ!$'6ֵv~H@>\{$ĚψK !#-3G ̢4\.۵#)ށբo7/+RYyoغՏ[9yn^t %WVcN%;= E_^ͻٵZ6+{&1jH66]'1(PkѯW*baMn|gOtz' } X:555/ x<H)t]n{@ )ev8ίr!? )%g/(8n/syFd eaa?&O~B{t]xI,)-'vH)-uh `4Ms:wdffBlٳZ?:pa9]|>ϟ|In 0}@JVV&} & ø@Ji!΅*cS&=v8A3p.<&D[s&1blFdV~1{$4ޣaQ.CeLK#X!M(?7)&~,4]`y=dCj#9"94@0R~<LLAiX>_#F hV0IK岱C;8?9!6SU6JOsչ#i␱b9?u]0zH/F iC^<+ {Zlڵ8;;j)UPh OWs!_!rR*պ̙ ^R+,!! 0N9s椆BRSSݵk-!!aa?0 ZM BO)O;Bp+B!y`piN`ٳ_uGAA*!D* R/ЄGgQW ;SX:I=1aWUʁ ]IxXvҿ:ݥVJFJn(!F݇hS  ˁ2(ue@n!D)4蘈HMM]UQQDJ»/ t~ܚ@6vyvB48ARJon/QpaRvG@k(+BэyB:A 0 m#CJXN#%2Eo?DYz/BUu ZqG/HI;#8>;ӯ|I0hpڼ- oV ,p7Przqy]o-qNJXADl| Ѕ75M 똴!4 Bm0~?@ 0.B CQQ@Bt@O!R6kGV@& @J9UC{UWW hib K( Ek 9:fHl8_9t|ܰy><^_\q"qWKVfjv1}~jfqNOievci.nY@ ,A{󿯙9@aQ5ueS( bYc`ɶo4M$9{ɓ'GTBl T$&&Fʷ̝;׶f͚`=b㎶fNsx~1pKGmwЉtWkoXQur.;OGϮ >Xd*JQtQA~uG~ 0;?t!][ )Tm+WF/Ώƍ@( };iP&Lcgg7lƪ3]t@8CT,M1rY `-,tvBʝw޹vxsRBwIIIt:灿 %iZ2vw1 87icax pm,s5VC˼.(((OnЄkx6~F]%xw8J)n/P 2=BٳgߥiڻAX ,RǑ/j4J~u!w#5)FnUZ7kvP^y'J B״C) fRa$dX-| E) HbՁ›ta .Nn(hM;ܷqv| ûG5W;e6,9tޟTav0Bnއk],)5 h{Ŧ(ơ? };@ b LOOopt:?|gs5M7 c/??Sd0v-3gNF M0?%&pUwuƧ~' !mt:_}H*̹oﶰM>=Sѧg2>?)7_q\1n~E a5j|{$0골nә2j|-\;b mלM\wWE0H Fqʐ QA蝑kdT8.Yv#|z+=Š|pjQYX';yPKV`ɪd3-衽900aJS^2[A6+Vӝ|r݊&ض\UH cD+W o2j{;ty!|K%Qۢ??8I&NXF85oT?[jV0Sw=//o ??`m1+ :;I3slEiGꂵ/\Ͱ[-$98PEYA}{04U_x/ޗk闝U`Kq9_,Ly0R CR1Ow9?@0ĺ4(6SK۸2ppÐ?_C|sG[]ͻٱCkH3Sybva++!;2tFR2c3.:spoMo}ev1\Ñ_]bfze$chCV/95l޵_Xi}Sf(N7vXݬ>xi5 b,g F׏I@/׫E6@4t]GӴú?`XD+GJ"v H%ܯ)B/<55uJ=^J=^Jb;Lo6p>$plZ /e&5T,y'1,'` !ΩC{tYel`0vT&oz뷗\??"mqiqv^t%RJF̢GI'+=]{+YD) #%@nJDٝś7~ўct7tM?=NH^i n77Km]@ 461;|S>FTpǍz4-) H5WOɃmw9`(ڇ>{&9/o_ b)0=^|z׸͇Ţ/]G593mkg )ϔmT@DD) ݘf2+Ya7wa[e^TxU37W>:4˞瀯rFn FcXBǺm{B0~Mʳ7lIs~{~s~pVhRQu0%d[@"; λ=^$Ih8a>ۼkzg`h(1Sf&0 DJ%'+~4n!3-A-Rt r>6rk4Md&| _tJu[Tz}4o+7F$5ÎafD ZH۠7Pݖ:Vo'pI}9H)?g}TUt4M`0_,: Azr*yi2B!=IKj:x4aV^`Mhmg sG4:W?GAnGH\>=STINel .3SҀ+ ?WfKuȓjՊm,_W)C{ѧg2 q)wB@uۊrS U\'D68bbXG6eY: `ƭs}pA~ʼnLS H11;F4;Kp-Cr2JO[ˮRjlV 'bņݼJ.;z,\_]ufDSD,X] 3~m?r@&+5-⇧瓥qٗ%,n 6f&SQc g nHq#Al 1]*:o!A&i Z c/~uyŞ⟝rO8bbXrZYح:q1$'@|\,6}0bfiB6PV@|I*awɣ8J6+}3SرroW 8)Myi޲v1v+6k˗ؓsx5H)s^mrx^Zj1X]E8#*ێBZ@ii}kO'̬߶G E7Eb]tKuW\sYBJ]]-. |VҒ#dbZ-G~%^%1Фhśp\ZvvMaijkY:۝[Uh7rM?S6( DER)څX;Ou쯨Ob\ =Rjk9lLj|{ owF ngMϿCARb9 BjiTcDL_u;}:Ha3/.FPZ^EE+=QƊ#@@t4Qpix_JQE5@@j[o_iqI)} =&f .>KY; nf3DӓH'))=~d H#Sg͓5GJyb2> hAAAA׃>ZY[[50=;n !-//gyf뛣zBwnnqׂx^_-J3hbX .diHu8Raz Jofof }3Xh lD  V dhbP#H HCq***TT3hȕktU#1BABBI $'' IDATHlL{v[NQ;d_i.!t)nv/vI) !~xw;}aLnBgt:qb 3_5M{3CB~`* oD Ø;y38)=s*Ndf,{P(ݕȆ8 O:/`BP%'O.={՚-R^ha2v^KII0~P"5f4Mx4 c+t SyN!tO!z'u]uEEEc Ø Lv:n<&..]–+hx6+U6[@TB8rɼwH(ȁoI6A"!@JuCBu@"_@ѣ~t?ic  f?>t:KnwpW]nnVe)**#&Lj#͎$??|J)n lRN6FᄏvB۝- Btv5z@ 8 F\Tyyr^}/b]Ҽe'4 H]V9nBJc@($m (4d[ޔw[ ǥp ;gee7 mLi&l|!@ӴH Rʍw\ LR^$Dqz^^w4Vp$1 h7^|[o/c95JZPHrlZ՗˷pEz g(c"x}2k>5h C \lX',PEgT  -qjOuUӍ^҄;} kW'$BRx<2X !`JJJ3<3TJ0pMUro \Ӵ!{#JRJ Vp ATGrÐG޲hں@;P]GKMm IePL$) !du*ZHZ;OJYTZ$E֜޾u% "H=R[ /FvM!X <6?oXBgNgDJh@NJyvQQQ޶5s {/X#kiA :P6E5u~JX-:CrzpӥIKOW݆Ne#k?~բq߯.⣯7xvLaíW~!`s4@>ZZX2SM=!21X,Z~n,b յ~`6S]r;+7aD2lܹ7>9l9e[[rn C$אQZi?킔toMS5 3 or8ߨ?|Vx故La\P):˷0o:uu~B̺.uҭ]v?BvAӴ뀛B凜P(ٳB,!-X`U/!J]a6mba`.7KAA)EZQQald:Qh$]Q4}k^X;W`2XyOg=)xv%wP73UO˲uZԄ8 CtVl`22y_'51M;۟iT񷗿`K~dp#|d#~ٲo̴b͇TJb,e+q!Onnc୭y qx{)ª9q-+N" H- g,] 4l4f`J}J Q;aҒȵ,^)zC>Y lB!ք6oty 便M]/8WT|}1cNaF3DQ7r`&wRJ~J0zh/4.F'_fՏWҍ3 +6Scg `5R{߰mxɩɻn,i}z&݆쯬axpqèYܸٯ.@uյ~jl>׍axlݵ[8B ȸ c;{3-sTE @wPH2&:wD;8^RΦ)+䯭 :] ^_>x:^x~BZj|Fe j| +mT:_5VF|Y2|t6*.<&!!!1))i_$#VΝ/כ:a„}BC?Ng%p|=v|# ݹcŢOx1 93!ѭ.C TXww+3w]K3EJIĉr].QvVK)bz NKK[|ԝhh" -mQ HNTU#1A oV%'0vT?j-wsJK~gy?j>71ޔhTUTchaOJK!@bPIŬmPU%`Ũwu"3oHx64/Ź @׻U}Ei>7{vlvÊe[0W}WaSP ^}%yg3IX-u8zd(d|>ožu+୨TZk1cʀRLE& Z봳gޘˁ:Kc`" ,2 3 9 mP@"wR@:8)>̲u0 IZꟖdNݺ[i1't#D+kxƻ_!ß5GBHݲF lqI$کㅷ2zXomU%jb ڠf!`c:Jj#$581RrƝe`󔕮40]gt  Vo_8oD.liw,0hxrayx]C$N/UJa wP!Ƚ~,/W.LY:mկ3I<۬~3y﫵J?8u1zhV) 6Mo-a}lݽa:$=f9`%̌*TdzeАu=5k;d-[•:j\1am|9W>1 Ç9: .D&v{<TT>qD,cALS88mHyԴ !aҔ7 uBiEd'0mҥ-b_E5 1d%Cqڰdebr9m ۮ=@ bG_%\o9٨ߘs=%eH'#%~VveȯdϾH5]\y#$9║_G$s衽tGj<1v*LPE!02L!Z z´~+ ( :\`x~ԝcB]uQ-1F4AVzb5.4M#:q16bĉL[jaYikZ藝~>7'tXt>b$4X|Gqb`@}Dwk- ~NxbzĨV ]QJF#R1}XjJB{1@xw/L|Cr*M֊ Ar9 3Z3n]ҫWvKX-4o V\Vj0ˊb|R!ej泷a剠&Lǂ\$|hփk+Hu5?T9L{xZYSBֈ( m_(ԏ%▖ΉsHEQ@*0-  (:Ì߾ruQt9`Iq@BmUaC;x+Z@ZP(ݖ( nۨ# hQu wm -g[8;5Hoq\.בLGbB"Έ#ZPJ=$Z @n!0$':q".'劕uڙE>EK80H/P#J3>o1| 8A- [[\"oouY HuefT4|8ڵBDيX86UZBVon-"})]ʤ3&}3x78e3eS͚q]a3Т/lhѢ | K[:Eq a[Fp@7[Zp^d#@˵ܓ7O5-+ EEr@oh}@G|%2* ~cn2,,5u!R@ @ v7xjX$Ewc>\:b.?RB:Hde~>vu ișMki B3k[<sO\;eGҏBPt/0™\.+x˕|۴s37Z6g֔]*[3 @{Kc[ BA|DK9- @_;q WUz}ԤXN')ށ1) RHq|~#% ˨3BH)C.z率XePv.[֞na@jxB~ f>2oP(Y^ye|`KA[E9sv?j4rHp+.W}f@bH'*ꗞ {ιMTj:M\:,[ʗ Wy| LmlgownT8*R@:?dGuw7VQG̚Z{yC fN9kD+cb?Q sf|%]-Wk8 wŃ 5ړĴtTaV.ÌC?:"+K 9 QQ8'=0T  BqS'xQ73 ږ;ev_%s3 e~;eà YsZ*Antlu$3iB_Z‸xx8`竳?[}Z"3iĶ9ь( NW  62O!e8x: LD3cb^R5 .X}QCVVd;* 7o/B8npϚM`0@< 0a> `✙Ӟ{`VnMᬩO?9>/L-|d,)3Bౢ @.Fig=05)亢Y4e ~dw#z;;YMYϒfMqG@(`K|Oʛ2N;]0kjqޔ/|uSf^u4yVьNq1CG$o!u39iB[fM$;cY>?&IK"7p?\.8 2kiSz ZHI*-䀞v e-gn VPY@&2t9.߿pZ6\9|Cxj/ 7p[m/E6B(&Z~?iG( Bq>@͢YRfT[}6,0\\gc\^ߩH! 䒨c/!bW/ )R ! !^A,4]Ա72/jE"5'?h$>3!:)YlFC K 0r>fFrB+Y06,vEXBPBekUJk1!Ei1Cl a#g(< IDATp; oW[r=$ ҆o# RJE̙55#d9>Wo 2hփ[ W00Tƃ9.z ^v8nı@ 92QPF7`IH^n+ Hhf*cҔcr4;^jBP(N,ZEY@ qw]- K | ePt1&, @N<|( BZT@d_o])@<~\JlbMg[;L BP(MҢF^^1vwӲ ]϶7 >dQt.ny$R( BQ@hk80]pV+h*@a^̬dP۪V Q( Bδ>XW " Ŭ?o4+w*:LS :Wt3lQ,v5{bY^P( E'Қ z Gq,1 i{;sp߇w7VQl=Q BP(Lk, KqX)H&iAfHC¤fq PchnDP(M.XJQpOx7bDu8:nX*/Qf3[Q BP(:+k2A傥h~V\ mhLo*Av5'.ĶԺK#- Ȥ3kR$!)=SPSHd.uY2uXkBq4D` eQ  J3.䶎k+L0"<_ގKql2i,oVQ 8_HBrd, Bqd‾6wGfM7UAѮF EKq8m𱄋_n 섘`д&NnbzK{EA.TViַ.P(`@ӱYDBzVCܩ3~q=RP? ϼ)3y|O])Z< 3Mx\=y𚓑zo)q)Z<ΘE/1`-n剚z1'q˕tK||ɥ=3T>.?XQkn 1 Af], Cʖ7e,)y &%$_P(:]h}eH%*:^+C-vPX@j""%1G tA},ޱ_H!+kXkv X߯wǎ /oT愋cp_:~r%cˊ}J[̻P> w_jB#opU+Ս_pi@\ 62j N~ `EKKL]$?0EVÌ-vc@$? TɆ%^E\T(r"Cyo!ÇQ@"",(uu 0`vqVC?߷`-&bѿ_ 3)fޠPi``?[]Tw!E1ߠC지v, ?z!AFBG + 0x̸[X 골Hz[,Fa1|߂1C9R6[LW@:zt"wz6mۏgC5;;jo`* Φ?m* 3ۓGX;A79\rq8 ɘz܈@t$Pij-}z:I=H; H{m[@Wn+2 B"C$ {c"jf!FEX*1#d "ܰR =zgW .D=J}[ڞ-ۻӇņ&4_HY .~:T@YDiu_jKFi:1ck1BU'!BQ@,F`k,4%;T|+-i^~<A,Yۖn#;ey1CC]f :E8vEl<[Zc:xݺ ))FB/gte=,Mz+Js^0000x'+#ьYY@zr޷Fqsxpj}~M@700 5HOrnҩJ% BM+W^!Ν;juG1O0[TY[OqY FdX\Eu`L!;; $ТD.75!l'&)Qcd!:"s>jgMna%h13wVTԱa^Kc2uvXN Oĕ.ط} xlR!kew0000H~%ѩYX@8Gʼnj۫!E48X&{4U kp8\ %{F!awӪ,6c'-zECs{>VodͶKOnoc{,rÃT@田e6[|}8:$FT@~嗸jI_g[opk(*%lbesTzmO7aI&;s| 눾h>ix2tUbf>w3]LBBL856;{SXѥ{M|}:9bk搔W d: ]5 HQ h502/%@U._Z&Y̻Ѡ[Ar3ټF/Q)dSە;e>ޚ ӹ[0,{-%l}9 f9]AHSyeUs)͊g*@$+{,)!*,{{n>' |N) -v3:3u<}8/#&2ԛ#?ψ\{0WV|ĝ~&a,̢}{&x<͆]'(r ?n!ęR>pŎFM]?U\pK;9i k(~="Z|/sK $ҩ$:"5+*6["\:T_qTB[gB/`vEbNU<ʗVrˌLݧ?2Ry:8Y5Q)| kefQDP[kۯqb:7 g𣣧︨P'1H+9]ĜK3qh2~[c3\6;SzE6{HURT^˛k2cbn@^銚:xc_r c&2vU76t? Ov( SFPmk 뻼 btƏn`RZYS  tX>_8u k"2 ᓅnu*5M-\E]:]NQ dCgy]Dr-  p:MVڈ $%>3EU"\-Rb-AnS <0D #\|K< @\ָ+륪 :@p?bK~ <۴HkۡKb@^^e6O`d@.8]>xRz.,hrΟYS#9.6qxo‚ږҚ&[8xĘn|8Q!w7 NNVGPm40~h2#'4ǡnȰ \n^+/eXTcq48[eX*_6}{_ΟHM`moeModOf6b"CRy_6Nm`N NdxxeW{m iْښ`pP[Zaju'*<9oOMdax}Qt\4M~5eDlR7gBUwL͘8Hl_{X'٦+t+.q)\3+`-+`-S|߿z?w9) Ѿc!B.J1I HQ}ؼ/7M;CGݭlwkUNa%)#Sp4xclvv:){Dnɯu N k'{N0mt_wx(+n7ZA1 L`N\MWNB]þc-eǡmvE0w>}4<OŎ",d^99ݒd° ,.^KQYMhkR̞a=8rn|^TlL+6frvIty#.n(#I;f6~ۆ_ DcbdޓMΕV1*dxDЫG] ~ mGqL6;UzNG&}LI!:2[ʮ[U<* ۭO=Ӿm^0(a!hhU'l LЉB j-$ޮ"}n5b@5VjK՟њ`si 2h0X*  rPO?|o>pGHUDm~bw Q]A!;8LxS.G:J ϲDnU%5mQE'2TZժ;Ruyd fܻ"W:\Դ%JP (JjZ% 5!5Ns/HH#iAFۭVKK}J8h\T(@jZjdaw2&hBAJr)1(HSi黅",흻 MTuDTXoRuv =ɤ0 {eH]<]đ"؈ t~U֟;g%ֻ*vKlv}XU|rޕ!%@Jl~O>lm}4G fh]S,N#*,)%Ͽn9ꭙ h$<$+7 og; ,8w.rӤ^l7dlٟ PR\$PGy8R6'^W)BT^]ϛC*h2VPo?CbdP;聉TQSus>b)E'0h޽Vk`S\Hᤉ%瘐~3j,xz -kn5AJM  !tKVnp2̗tdR?$$/?g=% ˍW`xy'ؼbRIF J*;9+Q$1}]rVx䜮9bUX~O[35@_0vc M-LJnfw%rK(rLAdZ@ yo R ~S`Z&&4/YiRT$|׳IL/ﺀΕ'ܾTvaLqE oO3b aVv%@A;vpߡ<)=Bj34. Wz?K9cSYZ7 坽J;X eu\!xha1QHx AtpդA8C_ŬЭxm:ˮ#նgt(7_v)U; i5=ʫx}n4Mb2)~hxܠvƉ3X{8q+4k{a8SFAi5+7b@r f.w›ͩ MP|tO;* ʪAJ{3.+'h-f)/׏у ʫT1\{p Kk@UC}gfƤ)r#?E"TE?9[[}/k3cBJ+ 4f6I#ݪj8sԅK q ^5'oVwqE- VoozF4G6Eҥ.4(РVhJP͊1F&I`Q50ѷh1ھW\d5}Ã|Y}m씜fSK9 is,?-Sɫmkcg‘= a8(j  Jn%+RR2 k5))@L1]$}{THMKoS2>08dv8T {. A&zx'27(Q|HfܟJyR$9COWɽJMBx]`qK ݳ~xyj-`DŽ>/!2ptτ~ߡJ?Hɨˀe" ~_:rDK055-fݷҖ =S\TU=~.Kb :'UTKcImu UӶ6 >o9i6V) !&W!tC[xH}h6BLdp'~(c{2J#y{6e w.=f64G0󈔢ڔL~ jy}[1iz tٮݱ@ mϩ&CV@Z +y!BRhC/L/¯Ws"ua3HK)~;Vnm/G˗?.dݍY'EYtѫiWzIt-4ZA-ЄS;+EO!쬌ħVXjm:³BfG) _Rj yϳAK \Y( -dԞ w߫K( P@ +j5u7oy* Y.k%i#ʿAN;KH)B@Bٝ8Z$(%RGBxsn*7əKK]ɐ%ŵ.?njZMb_R ֐oFsԴPJ}\}KŽU̚p~ɲ&ڒ B(Ri_+ fzzg:4 "cXJjԄruΑ)G$5S8`Y4t9q0܅z.@t\jݠ,ZRf1!Z7j-\'|{V'[8~v2/h<`(х2/3-[/2.uAz>wPj yڡ/km6 [*uGhb)D,hR6sۑ*b܋ }iRۍURme9.^ =hj߃hw³OTeo$&HTz;-ʉfJg,z-X!RK! ИJHP@ ΚN84M[h`&iԘ꼫}p&@ݰv4`SҳO=Gyc_ڠ1 `F@PW6~5'^Jjɂ`By%.t)zEvXw \sΣ_WH!fn/e2u⻼{?x>T]ijZ,oF.L_0}#CiR?0ժ,{fKwEƢDŽ=i `s@- $J%h' Z>Vt0ջLuO{>ip5*C/=7ɦ=')18%W;DHU5\,_wK}IOS}9uAz.LlzKUD=_'| -S!DAHf+G*Xx"Zrݲ%O[S:5-}7B9D+ njv> RDfmXVԤ'AE=T.HB*pHw`6oY1q[_}ꂧP>kԥRq߆6D>@ZU 2{X"cZ-T2#]!i/+mo}{~Z/A^CHyԴCHY 'cTv]>RHV9+@.G4*Rn_,6&@\ HENa/j[ φ/>R pj\2Z ˺:o"z3YO.zR_ Nϝa\Voę.]Bݥf@+$=Ɇ eYƢ)Ae?KY?k@Eyq5ɤT_ :Q K4o޲iR*@k \6`6mi-+blvhJ#C<"$wetBY2koТ6N2UY- x600]U@xr3d^Qϼ/y/O|1d5'_>!GNu؏Sy>zܢ悫OQvLmX MD[ʋna34jZgMJIIE-%1m_:ꧣ.^U#[ md=v޵AYdn@*t~o@W9K+q]ȗjyƢ?#v l%V"҄x'%}T.Ao`{} v!mbi~Y-"S^&؁ %]Mܘ^yfAbYmiq GOR/ࠐt=p88dOwhaāH)<]‰2]Yf/ 1a5gyŕۙoө:;JX<(/& tg[ɥ|+_,ہw.[@CVffOmGq% &%&GpT;n "r =" ŭIzgژ-fN敲X>~az6|'#b125ב3.潵;۟#8@ܢJ(bx|8uv'Y%V bB)?T4\n7#:O``r,nM#<$ ʪ8SBf֘o |Uy I. ˅kACk#Ǜc8nLw-wPPrx'&U!G6;յv|{&UI&y)*kyuN"ÂRR/Rt&-sxМ+|htn(<\0R2lS7P*"@f뙰&Hy~D:58(Qd Y-M/: =STLEMfJLdAkڸ0r*'+7ⳭGl}:>tWo1+ Oaw5}.߶evZl Hɦako.&3I7F 'U_ފ?m*#'#9;_죠уZ5{칶/D_RŪu*#$#c)&u3)("*kys^dRp:|4խŤr![8r8ٵ{ l 60Yd#<, t8r&wyo9&y}hDU&SVUg["%<|4FH`zO}쏿:DEM#S1 ?ݻSYLyUn9duD6959)C.H ӰM vKNJ_n٨n8W"mͬɃ;X82O콭Xz?B.ۗ/Yz WM?WngOf{24?\6T;P#3mbVѴ(ٖɚmc|&x44{<>k* ]Rc~g]n! IDATC100000ty&!kﶄ\ vGİ~̘П!L?uh!-fjYhuΔ^fr0ys2!d喒__A`TV暽h$lbܱ$ng+?2< Ob¸I̝:ͺtKc R7@/uz8fN'5.20%' 48֋~ved)_R\nZ;~ioj OE—3J(r+ W\guWz< 'QAT6`;AH7ﺌ㹍ݒzw))&2$(_\Q[/Nqy ‚~ ?z b-k Ch荾}`nct1L8[I=#Qrll?&Oդ 2*CB]H) =wfUvS]k'.*I:;faǿj@.ӗ:"eUWKQ^]GNpQa 7M۫tBCseqLӏ3B/L 3,E X'@ݟ0⣛ZGb"C(@1{F7Ciދ-Ǐm662kDBP^cM$n Ѳ²7Hgru S<o%"dبPb'c ZhvDߤƸcXT@W|fշJ-&cÛ_J\ԅ-اUVcr3ADeQ0x&Zy% ke4l 7\^X YZ/^7, #r岮w %(kκ#C#C4SsWDX̄|&,NiINPZicӞl;p+^׫G 8<:u'K#:YGW\˭!7GX>OqE-1!\>kRls~mla 8Ni )!:< q[H c>඙9xK;m(:o&f\.|˭q1 ω3E8]n‚z.|c9Lbb̞2 :o?̻m AɆiݍm/? b8o~$Yn ʪPUakX,&kseÙ>aGr9 {ȁrE嵀^m`r ~Ou$7V略Y%KluM|t>Iqb1lˤ|\9?ԓ}V ,8;fe$~g7p#8[B5w~G!ekYc<7g7%KmɲlpƘ^BP TpuBr&$\$$nm{\լf.Ǭ֒-ɲ-yjwov{~6(=RO3Q$^Tޒ:OJ%&Ķ,1Ngd]8K|e-ߏwA/znO ǴG[VT[[$qɤć=GO0mbnxMH{e,٤m5wv!2ѤąsiH{n/GZH#Ǜ1n_=c= 1u7 8^Amc' qܵvl]ӯbk0贴w+ΐdBJ4&dPRJsG/&q4 P!q7?>s43%+E{wwvJq<ΛPEEጡ9TE@ ,@$kś1cIO'_ڎ)iLH=ͦ 'ͻ+{er/2cR ]V|MYd]p 1&{9t\Inl׷LʌgBJ !mjl2Z&Ji,7ØZnYYV3$WږäGpϵsܦ"^2#tp?'ԄH",F;z)*i`)TweCUSDk߿faf/kHb x|~./ׄƖ"bdz" Ú+1,?m ڴJp{}EYxH٠OP W/̂,6pϑf<^?qQr)*$0܌8VSU!2Y1|ΥHDwvJ[NlV¡&n_=AԷ(*IEd )\8{P~ӈ 3fdzn:țy>1?@T9|+Ƞ9h5jv35;`s̬Up)Y sfj5g-{I:b2\X@@ ƌs  ú XUW99o:ɑf\a&\;Zb[{9P]kg10Tp;-}8\^ rn v:H`_:\K 7n_$AN{mHz"T*(}g?'Z d}wi մw6n]N;hnA,¢¬`w xC/g)*9*3'biRR'39N#%>n%5-ܾz:C_(E&չB~ '}ɳ:G I ܒbtՁ/393]VG*B $ &g%eۦxNP:_=2W/BKmh跣Baw2q>7εW7!1too;v5\~GzRT|VzlNo>U-,~O ;5vJ1JeUN:Oz *3|@06QGQ!@:QFje!,Ƚ>?vaLHFZZeRILH!7#m>r"$fqusCĨʻ4J>%&DzbԠcjSZ\Eg;%+ X57'dZԅ5]VcÉ0~;57?>BsG/mVNb[Y,6 <f )&|ox=ܻq-܊fJY.h `q [_2wG` >uy6 wP U5Vˏ[Fj(m ێ10ck|7!I𥛆&G5W4R.^=V3>\jHBen/i&(aP&kƾ P//8%e@ Ƃ PR @Bz}}ؘ4peB˪Ѩ7+a 3j`t[H̟,=>߸}1ՍT5tRZJymJY97QI=9)QǺ[q_\MIM 'Z).ke圉XL:.W͛y,(9I7鵻HviQ|e3wv xgC} {t$xцMH佝eԷteqb`oEC 2ywGeJ^ԜD"Ìl?Tf祱0h |pJQ%7=և&WT;Ϟ'9=9gH~~s:iJBq^$4v%eH3V{%LLq3͑S>̦]eLLtSZJI-+ G{$|'q5,~sKx?$y @ SK@6wv\.\ u&hT؜ i1C$uŔT^~ mVrbC%խlUμiƇ#IP^ۆJ%f$oB;RqIZh@S}Jv1}R 鵹h%:̄$I,(f2.Iqt:(*ky;udm0F׵ 2NN'$8цAG98o=]OwB/ ¾yz ޣ"/1ssby4z?07#[WMifRF\H,v2\dQ\sf\5VY>Duc'oo;F\^yGErɬ?rV͇*(eεWpx~=SZ FKj?E)MUlͻM*uKί"wO)hpKa{:>@ %@$xK%YVeTR:el[EY {El-bkpjN"w:NpŔTv؆ tZbaFnY1pSW J '6҂uJOK+Oz $&'EUl=(ٲ`He%f :)XR"x!U4q(XIUf97v~\,vc+a 7a]VNygss_IcKBLq`esyie%ًbԓVxaZ Rꚻ2Q8^<^-B?yp j^CspaPpܲr:git{ 37sn[5>LzR"j"-1^ˏX@Qc:E,5ŅhdВFX]kg0@<E<órE(4+@p֜1&'Ċ1 &&35;M|Lf ۯM˕À29ipa4KD$^7e BRb׃NI?3Y4-^;npy,څs)6FC tÌT*Ve4qv6^jDl9Xm n^Qx܀~]=t?kMDO%@e1yw F}٠#7= ׏lԍ`VKd Eujt'&kGG g@r&3pV&%>^ 7*D|FX4i4Ӛ BՏBb\Ʉ]Z(`c>DFL*/!:6HZ 1>:q `F&`;}EH0zt3bG&OO@pԌjkcc5 `wٯPEϿ"[u:QӐ(>Kk{讋lШcAB·CIpi=VA翥x:b+T5:H+8~s@ \xLHnNJڻe/{d[͚F?FaB2u1/1|5ut@(#ou@ ,]w GN' };dD|U4҉y?.Bx!<ڼ G/\l|_c+a߼7X-sE]@ .YT˟Ɗ7 ?Y ?@iu+%-t{ci,zy33-%7V^ lTޛf$hZT"Kd4__ 3ht#۝قu+2o*f[qu9VMJ\ݘ&8="Sﵻi%ih7zN 31 tVkw;k=f2mAQAf<_bo5#y-qG0 2$aV2Mdģj5e[6O} @ Ώ1_OnAak[ć٨%+D0f˾l]N qO%tz4ϓ/Ei ܸə\o ΅SHrL1Cp4[y㓣9ތ_s<ғcQ\D(s`D3@ .qƼOEsu!ܖۆ<*U2#L<,-eDIWNYVFaKQ'Zvǥ]!1mb22d% )vW/E#9./߲y{q::v{P,{w)I߄w/@ ΋1 [Q؟yic6lUJHoGl\M :@GG}}v<p|O1&n.Z+.Χ ' +p?EB%Th|v@ gϸ$ZF7oqly<ӆ^ﵻl`jN"adF6wahtх^CӢjP.&^eg|ռIuB|\[Ǒfܒf IDAT7vcF?Iؤkb=z>F\51]}iն9Now.D z1>Kq;z k77cMJ2@2ApQ$&gƳhN/w`0M 1ile%ɘKzba&}V.+a3 NZ ڼ @{\//֯7ݒ$(I@3W\/1,˵r z 9/֯_:VV,H, )Aqra}⓿F"2c0ses7`]}AjsGZe~r ώ8euabBd"@D,&ty @hpM՗Wż}lOmc$iVOy_UUPB î)ѧFE:l]bc㎚(9zSPDv81n* ]'Ez,VK7tvy8~Yz`NJ%E][ rq:]=~?qq7 t+>OKl4~`@ :H~vQ&TbERIe},ktz}Kna&oo:v pQT߷~}[V .ZZ|~q{}dPTâc6#Ol-v9]\׭CCw Oey\" .-^׋\̟O>Nj mXt}x`:W=.ux5CQs"Z@c4W]c@"my"sAW͟Lan6#˞'h%_2_;Þ\θnh30,wtOu$%loYė-$ņ[YrV7Ay&D0ĸJ4~7d g?>>GB%a/=r_>1q{1% d؅F5x .G3.q,NO-7g>'mB.J&͟oWaނqPtwPI Ѩkfb6x_aXoӮr6.#9.o߹^Ie};&& ;M-qz-+efp]x|>}{0%+ni>Wđfh˰[[{jܸeWCὝl{@l0sR*G+[xf*XɠOmssi5<7ͻpGYx超$Feq6,׮vNٹ\$?doq@f$nΰ g9@YmNuCNͭ+g0s,Qc9ޣ0pL캺=>cu"Ȳ4wqTi@qE#sgw1#7I(nac}*04,"丈ak'%>h^?/ξs?|t8nqgL_CMcQI*nV4px3vI ./mU%FVKuxw{)˛xC|~nXZ+ JmpH]Ն07CMl{|Heys/%- :!!&|{-?FfR4i=~tNc,Q_st^0e{^1'w.b#2%5C-83{Jkqǚd;VF}r(Ur{WZ=xw,ats?JY:?5>o^7e `GE[mtV^^8w\^)m;u~$4DcS_.ȵNjolA [hPzFND `D]jNTIRcGs"} )pa+uXF=f7 ۆ٠cDu}!,88鵿gI U*${Jl06L{-8#X-e4h*U/lϨ [CqvØڭgP-?Jњ$ N8mP`Gt"-F?oo;o{#'-.je=HiDG{,IgA߄ύu e.8dy6ӿ(sI0hc8O@0f\0" Xz7K\KiQy;Tw@Geտ,*o@i=cGSIs4Mڂ!\ёfn[J ߰> `"\ zvbs4"Hyx~ScG̷8uj ` 0Ussy WK;1ܩTYydN9{v{}t@/˹gѹrJ5.q$!@cg\ .ha3;YO`IŤp{ync3& Berf<1&:61q-pIQ]V)BcT%DSm,B` Gao2=7ʆa>^׎^PEv^KaHNjkWuzQpr1(bL0\P20?p$Jn;Icɠ[[{s{* Hanha'Ie6Aicr/[$s]j*>f&OۭP'f2ps3]^.%.Bw=Gۆhj2dv~:n9Ǡ2sٕ 3qQU\uT*LfV^cj>wLjhh!AQ=FaW+CxjBHx~zi޳,8=a+]Եtc'*DVJ4v'a0=7E4|ZCp,E :B$&Ckiksn":|ڗSX6k"i%Me<(ﱯYYܸ|^9uPn_Q[4-9yTV& _3& [ kGlAҿ@|sK{NhR=O4w-+ iCQv1(yi&񫇯t+YXE0ssY4#.CDHKh.P%dv#t'_S}&󯭣0:mhύ߳<4w.?Q?w-2 }L΃3׎5/7VZ dtZ5y~iA|o ?+Ϭ: gV^(%ci(LWm*?ƻ;ʨomSUs'15geEe ն1*#` ۊ5%RmwooI c+ Ǿv%.$B_CVKC tΟN/F="IÕX>!)^KG;_:>Tл\佳|{nЯ˅]kyȲFmc'˛(m;w-#9.jesqiCJhaiOg}PQ>:̝W_(:zGhnYԤGa)}",1 5l+ 'i`hn/ ~ : Y)1ޓspaPq> [pSEm m@p͝G#V| ݆`l3.?8,+9j_u>'?Cz.^ L>W63k/7sri#;0'/܌3y~^\,$ņ.!2X`yS9+M/jbғDzY!a1JS}VL:-W/BZb$GZ(QBۭ<kM%Xm.fMIEW4hfI14Y^4jKdh).oAǴAoDsG/DBv˺yguKYviwYH (ikOf͂)HlUcuEYkB^^}_329TKA&oՐ!;քŸ$Iq]8qڦ.*Kˋ{7!{^$}t-@6#8݊7b͂!Vd3xg{)'c,d%P:fMIF]s7s#Tđf^޷T6t,|ݒ>)铔U$(:8rc-|󳋑$Zu߀M<մt<5V$Fp͢)(6{c$rxO$Dž(mc_I=?g=EZGYm+GZs{l+V('Dž_dr6lSvOJ&7=mh⭭P\5/y=VG{]dՇgLN؅/3sr*3l-Tr|8:5 '`wy(ivi&Kvy%i} /ߩR Isrs9|^.~յ# B<^؝!n".)%,ur&>lOK { 2jm'C?N .2=}Nz-sSɱn/KGLi[VRUA[wv NIYy;rVͥODʼn6jۘ:~aa]96m]6Vjse%[v-I©=Fyx}~ && m㓢*[{9lY$I|DX T7tg?ܰt ;0#ٸ )|,ß^Eqy#(eLʈ'bd߱_̚A_~ҩ'RqZܩZnYY/l635;Jw/nD[HZzfpں=$?.^\*DNZY5両s?x@񵝸<^ZL\;Vʹ$> L-=P.֕WdJ:zEZMHKTԵj/ah*.I\IQ/leM~Nۉ=9C76 :%S b">5mA'LJc2hɛp2w5B@NMzd,4J$)KNZ, =J #2m+ُe( ۮrW8'^.!vyԵvRIO@iuPݨ,>я_<m9Le}٩弍AaɀRLE|~NM+ywJԤ%EƘx5N(.ʹGZ8Ƴ?>9-C9]^򳓸vq>Kfdj_eڻmwxo{)#Zn9 (re&r sI ckQb@ Tk D]o/Steeem2s}YMI1'wiKjl Z (.od2ڂa(sb >r.ǁ7Me.#OhٵU )W(߁W+_·xI U/Ǩ qN HȰ۝q 2xOVvj{e!*Ib l?XC SIMC'`jN"ݼr2+\<+鵻idQI[v+sӹj$o3C[l__?mXxkxch*#).oPҫe쉃 2(.o hc'I[3~CILJͻ OHjwJ#Lw\Ít:qԄHj[U·&$Gxq]6\/.odZl/g,.%JЁG"39N㣦`iihgRf`>A.T{H%EAC}%͙ZKkfrn.G*[n蠤U61#7e6vCLx~T6!ˊm [߯x(A;@Kg]Nx~sz+Bu| Q1}_11"2>ߌͰUat6 ^7^AmSm\|AaM\$Ln܂GbDSqcp휐=wWys? .U`uhѵlV /ʙ{;Ji$%>bdkqn[Ȳ,CVj KF"ekk,aUII]+IpsGmNZB$fîCME5 P>L^i=ľl`cH`Vξ IDAT< =j.TBcH`ol{i S dT* Yync1.kfd׷yީTu6Mm N⽝eёq1"Ìk]s`rC@p١E鷿N[Yeu@7~zǙG 7QRBUcNp9I\7's|ŔAGXܱf&08,E#~{jۨja6͈#;gfjv"ū8t.a91J[%'MIjT!Iw/ {nq>v'OLU(iZEZB$sSCa䧓J0~̜JvJ,Ǫ:z}DG؟kf`hT27s'8׷ï2P4^r8$>"#:͟CNZ,z*:5Fx;q=TԶp{jܵv`(,˴vبodMˉ415; pogg~j@q뤴cU-gu-^=}N rU7tRwZzc B˖Ty}gZ)Y =?<#gV_HD~vIg>U:^N{^SpLJ:fV̞8 #JIуrMà :- g zNZ fyCn{u4zch"<O@^VG S=l)D k&39[VL  gZ@5+/5'Yy|_ۉ0;%=!:i9|%{jYǛ}|t*W1֤G2sR*E <N3lsّmxj$b sdrQLG]Oc[/n.@B&RBQhjc 3L@mME.e֢*SuaRШ5tZ4c0c,Hs8?%T5N:LQYEe tY- .x#?!Jj;i|mS< ̂ P5o~tWiר0ztzZfL *PQFÎCtZܺrzҧm%l+i1LF:jzL&$xd:dKw~o&;> *N2;h4*">_ (6T1I % ]RQaq}";tZ*Zs FC\ y?a$ 61kb&SϑL0=+'tt]*0*w]?[.#6h&ޞ ָ iz&3f*NF+8aV N qch Fl}Z!M}{% ?$AjT٬9ILfh4HI9TCxx UP]$wUfxd2rC8l9}ÂX֎nFwvxwqNFҌ%8f^!7.i8fvajA:+pWa e&hfun4!d9jtyغN4MyH%زf=>cm̝͂~1:oFSz&aù_! ]x$0 eKj# %9Dج!@FvDV(nM7L ,gRU0"f0lV㜤$@\xN8V̄d {C TySs^GC?=%f߃@ȖOwOQb?AQ6„6 'o!M 55?EsMv).w$#( f GM,F\nbc$%ƓHRb{nÔ4-ʙy|y>v:veӢdzYL,jxȣ}EǪ8VfauG'e 5M^J*8VLj*TU!#96g뾊K* VFBJKX,F0Yʦx&8̞F~aUenn5o$ L-Hmo>=LlWM?l]= ㈱PLIe)z@kPo=;JQUEEF= ٲxztyK+Bԫ;w2{R6 m4<*I Я+Uf1v{|}~ po"b& &Ղfna1LC~w2LF=17r#Ѧ1^l FaĂSvW *73n<pNrC$nZ2L!xClSF?퍶,(y^ﴟ}ld>:\M~V"WLe2cQQ>Jꀾ75  Y\`7(F=ܳ,bX-^>GqHSHal8y ,3>zLN6Svh2Ir\_vu`4LpD'KfSRلh&H[Tp {ղ:_k)Y7(6p}ѻY(`@pLMpAU-Tzw咕QUiԻ$0%?&*jZY:d<^?G*ir)d=ע<>`{~AV2i"ghus߶QO]j4e$riQ^t6nJ8JCk>^h>^(65P]ơFۻxiA* +5:YUfLȠ}|ZtŌ ]nWַS{حz t9$IRYFmODJb 3 20F*x"*kۨnjd4q?9M?ήgoG}-g)~6dHvnȉ<5D'3 17.ܩ9lٍ($0m'a$iq+@*-wB0 $j XPML3&dRf&88r1עyTջ(ԋp3ŻwؘZμ)tv)n^\ǎU/#==̚E,4-'6Md2*j[H%9>lW6fN<{lu|fOf^yOc3X-),Splۗ>djA::I d"I$EspI~vRtޏS ֟cӳ"YL>R BboKjX_?4ESp|tg?d4p)r0Z/n:@ j62ojnalW]9Om1ce2vKKt[>/=eNMVe$ﰝILK7`q{}jy)^U@s<^lD}[4N $It |MQ Gw "QU}L[;1 ';O3a5xoAx91q.^5nBax:w€y>]0Ei a#-9`~}|H{nFpJV /kE?j?o]iNbf zNYwBvrt]o 6>fWIX&}lɥ2hL>!H$I%=p|\!3CNg$U활m n9g3'-|8bžvA܊ :Lea"Ô3٭%}spڭ8%QXi컴rJ$IeNQS@ gGX$ID $It ɃV+`&T`$I,D$I{;V˾cщhd߱L뗒< ODWJ$<D$IU^ze'Osn6O>k+;9R8$ ` wOX*I'iR;{hBɢHPX,2hkE0$k&~.)v,x$I,X zW $p8rmsGZHwS6J>A8t{esͻ˩oq*Yq\p"N\8ٛFSͽ,OrB wLdrZ݌KOkfc5Bzv݅db|v+j]ǩm@Afr+($'z>:rD9 l?PI7@Lnr2^ :؋ѠrԷu$) 1,U$Ű| o(.D' fcĬ |[ܼQ-0Y)q\4Xk=ϛ@G# ܴd )#7ksԞtd'{#VIsDpso8PZC;Jc[-򨅵#WhIeKg{fY {:黆Ɗ:B!܌ c|'qD#@Y375DUN}j.: }7=klW@fJ<Y٘Ff| )qT]wo~jrhhd߱ZTUEZ 9)4}=5w*Pƶ.n1eo9@rY ?oMNJB 8pW9woEUiIJZj6qwum Y`ZfM}jZB_{5f?.NԶ2gr6_s1n#ߺEtCX EU~dv N;oqc6i(zC; IDAT7>8zKìZ:MM4wqrf$Dzrd#ín5|٧0{b2a&It!o}(4 eoݾsD>8| ze%+Zq|c>]=J'y}Q@6~4 @R`B>*oԵHE@}%zxfiHEU]16vϻpqL&)N]s|ى(u8f&6Ɗl"3E؅nZ< >!{OKtF'tۏ?5w#|| Y-w/19sXO;HhNMgwu{i]5N9^%It Y HS{b?ջ?4  eG[ky遽S+i{l{|\g0 ϬI^fE}wFQp˫ޮnx?z02&G.Wv eKd[~vI4uڶ4wce3נ`z.SsX9V @({ *f6k4Gƺ X( f뽿q MRV|bV9_`yM_O-I4J U"mxfz?&V_5Y3eיTUmG(l;[_{UlFhG~ǘps&9ǟLJ'"]ttP&1޳zOE\$j;sEEyCm^Y˛yO9 gb0j6a5N[?!7 ـ?慍yT0*c d%qD#׳BF{KjeMk`blgU9v&da1Ĵl_ɑ̞EVJ.}e$pio8t y}3$IG ڈ13TXĸғ JQHh7d- lݰKn6ca[G7Ϭ߅l 5)[C.#FK{׀xAϽmë54W;#4j@pe'+NYz˭'%ߨ(Ko0qyD܀ Ҙp)lwFtd`8o=D8,PWf7P5;UG]5krz?[m)qܸxy'!NVjun|y}3_sƳe_1V30{3kvSU[qFb4'hZr;&܏)X_F8,q-{ˣ]KnhB:'h`PQU9ӂ4z e"PUO 6 ߏ~x[=:[-i$E8xyTC| $N1&*ܴ !`DZ,lRp:bd":N.VUX{׺=q7`lx^_Ɋ{-jlm=OpLjtb7"pEQ 9X`r5^jF޲VcL( dԥc <~vW{9HQF4z^~||v1F':vak HԏK޾/ 3GJp wǺeʢS=֝o?~->>jF~z󁼞2q ViL҆ ꁸx֯GD0D'EMJn$7AU!x7qN;fI Ҩ1\HwWAVDzRU1u_` "r]=}HOW=^'|7´`:^ef9t{{ci9D;[a$tWRJIȜVw7=N']j]eWSC;o\w\0w`Pݘ&biYRi42K, ~aMCY&QB$IKB4Mʑ.4Juv_Z@M_/ 2! 􇦡i4zD$iU:Z+Km3PKiOo PYrd'N*oaKM&i$IJ!?Ƒ.4JtvKۣׄ~rצvӗe0ɦ4Vzo=iZ=myLII.y' 뵔*$IQz6A{p'/2r6*kۆ4ҥu~ݍ>qkGхl$i 9% "PG Hˊ*#YSz??f-Yf-\/.-Z*uxϮD h\- 2$i8- ,cZkezI. B"<4@DYI퉲'!#6!.6-=d5ۅY„5Ɖ].O{KSiaz4Gnq 2$i8} >P?<K%It=TĦa8^"9} ={O*m==@%[?$ISN @K῁D(;^ۺuwy$IҘ(ڷ+B(t0zF#琐7)`^c`"7nWAj70U `X#n"-2BWj t[HXzfkfa^%I:~vm׃{׊a(.~$I.xU kO?a<] X@w @.'Z"?8M ))W2)'P?o|b]ϸt©I2QdF-`~`  }~.r$IC|4liAQ1l[|!tMP|\>z[A~(tŭ'q [8p&'_++ )tXfģKeﴌXY$I:Ag[U Q| jKϞ|K^#Ity>&~ ( cQ6@ dqUG-~v5sYue+f_vN?ϾuS~VtkG|v>ɋ"ifT ( IX8PN(,X:#dV{b8sz`X㭏8Xюa31>l=6m{m/m;Iuc7$tF*=0~8kV䓑dOY./gl|'=z" `V?"s~)E 'M*3dIt[%f5((XػMv|xLRMAQ4BQ5U$ Nkf c(B%,Q`Jo}%i 􎋴`D;^_cqv3Jt}̅0aOi;=Fф_Y?$ S=-iu .\>qx!>{v嵝fbT5viCDm7.šR>sz?kà|fI4xxQ3ڵ <Y4:$}Rv!HգvƤ&+]00߃jm1ǘh Pϟ0H`G[r2#ݦv#Nww?sƱEywwArN*<(?$J kvZve):Q|$ICIT'׮^̓W@oI7|oY;MX- 9)1M3iEo11GLktE=&29W$ hzfzA<\zhqdD^VþvjZdrҭ]6yH$I@5=|Q|I+#ƬGu'XWNSE8u%'ݘ*n`RNJфvmG[KO0LYPXpչ>fR4wxzVЉv0&JAޭkJN,FUq, 2ve\l4$ɤؒ$I=-ătIXsMGml?]c5r;5/mh3W2&9Msu,:lAQs+ rs ݻ&8>j10>IilLqG_M]$铑$IEP6o*4J*wƅYT7u1d7BGWQ%;5cqKg`Jum>:<~L& V%7\TbMxaAC*5e\,vhNcnwJ{SMOlT;vКytzpJJtzW2$I"ݯzk`ɲH'cby L>8 ɠg:%@RS֥'<[S\1 9)3g8ڸq1ZǓ$D$XY=vc_1~1ha"`:(zs(2TJڌ<ɒҟm~m]+<.@D$ld"I4T5Rk>e/aL2eA P&3~~[}vB聈$IҘ#IaTAXi(2<b j7%3ƞ@0̱&Qʉ6TU/xy7߾^ B$I6Ia[͞._Mel200o~jn\4UUc'Ϛ+ٜ0$I2$IF*,vhV޹8r( ^5`X  pfjI&I4L\ w0l4b >n[>52Q_Jz @ X[AdZI H$ 0 (8ݻ~+4:9dSI@ѻD1C $IDսG&M]NN8}gIJMt`0lv# p6{-I"$I"-'ssQ㊥ә }MVJ_ y[]0~$Inڵ!J$ *XY*}h = O0R/<' & G)%I:ņ &kRQLBL@R @qqhիWPG @$IA5lHC~Y_ۨ7E$_~9`0|IQBSU*=BdEY{^PUVb$I0 ?lE#Y钤wnLJo򲅄 O$ W^y%h4>*xdEUUvW^VL 䨐T0 -BpȟGHQzBx[o78䷒$I007r˺p9ő.=D>(| te$I(..]Qf)#xS H?HJW#>,`*x&uΏ0NSQo_ذaã˗/62$I ̉,~F4kh;Mr51 Q^Y碶UUY43/G.1fOqI-mnb6g%?ൽ]>0wJvix Z=aG|㾫1Fr_o+->IaÆ !BhG~(0E?[1`1U{QyM) ҧSXkcDd"I4jF]oö#1֧;H'o +%D<=^]08p?l_tݶ9O+z 9#AN6Y\!ys Ǎl$i)..!Eqkv[զBSQH-S~DҀ^W?sӳ¦_7* nZjվayS#@ $IC,3ɲjj^|Χ˺h7>djA1:9z2TԶpz.]=~~~P+r‰(‰:w\n_f7`$)B4tIJpO# wJEIfH9YM5呤3F1GӴ ~&!D`);[w/_5M *JpЯ$(]ޣ ˚e/B|]^qq[ou'FH$ ٽ ( @6[9(¸D#ۻ8TI̟æ-E(5dƓOm`( f!aiǕ3>ywGiyS o}X;; u`8_5'_LMpջpz 3>]ݴ|K"I#_iڝBgvMWQ4Mt>PeݪUaÆ9B_a`MoAc@`q98AUnڶԥ.K/ͽ3d"I4fE~;tDKҏ j;6%`^ IDATR^ dS;&e`zvCYM+X-~%:pڭx>6)㽽e%:Ʋy㣭F^rEV&i煍sW6b ~mGPUO!;=Z#P&d0cBIx}A^r6~TƚQKSob6c~Դp$y7,PɑaP%5 BiFjN$}!R*!G}>o?CUomޠ/8=ײ* Mo3mqB{L&Ӻ{Zj^{ =(i;>..}Q;H$ Q.aMC.Q61&Q%  rL=Gkx~TUa4=1 |<>N6#4yhlJ~Rg'_pYh`|v2W@KG/{#zƮ̝ͪe=1r)npXǚvMgcsi:'[tv ˜4<v1d%QUUkcKذaBЏ, *;<ވ/BfAq(7!ZR&__h 6B^VX$MOPJ!"MӲZUx<`(5M۱zj~>CE $ICobuv ]=zW8{tyA>>V/RF'7#ܿ.*jZ9r%5a n&]<1! 5q`w*D(_Bu B^Vfru&D*s=Ǽ$kf0kBFe[@I!@@Q_7\kQuTRBIP;Q8(<<%_B~v h8gm$6=)f_UUaqq]5ї)J( !-4 !TEQb9@(B !Q寚_`D $IC8Q7TL>_ɾc\`"Yq!ذMTUaWLaCtSX8c\t{yM+KXh2 R,Ge}.7zBht-l,;PHɉ:׀Vջnp(/^na5&?!=oUP8n5a*hGnyVǑV/$nl2b0{0oFJ0 n~E*WY:N㔟itE@[ߵ'}#g#11/*zWse/l}YK,v !4ms|B#OU2Q}Ѩ-3RQTJ`X,nnBܢ(wÆ τBvmUCC~H$ !3{Z5R8U˦q n + 'Zѿi6&K !j6QTش-MO aۋh9,>z )̟˻S橗w`g ?kr&~-Sf1z\xmQ6uqx Y'H ͆4K 6njlVY:|=̦=tjn[NӎjЭ_ 6Ӂ|>ߧΞ?y(C`P`ʲWQ(̾V߅XTϕUz؎>MUX(d* vK߅avXQez}k]<(`?|g&q$I2+5?{uwO@ I W.4 W;qOq8:u/qMclƀ{i#$4Sr3gGBg6k?93)E%+ 303's&&Q߬<1`LITQ -U4odʘ,hP贚~3Qub#9|#ݒ`?Ie42]xo)/'#Lql} I`BZgJʥj6_WcN tf1/xcbj$-Aj%$ LM慵GPT`fgs[6g(ck``-9996gd0B#-쫋0~I♇vK\nF1c%`ɬt<='#kBZ шq>s022fI#8c7Og}H&É"^\2֯_xڴZ]{haDDU uՠ;~H9Oz#1 㢔zhBߚLeׯkŊM 1$PϞD ƜA7(0X}S|tZb5JǤ#L>uzzxRUNESo g[ihln\˸Qְ 6D ({9 ) QԪ+a:v;)>̽ ****=JbH咓N43gɠet|:*WR#N-XV.7s իWkZ]n^^hJ,ƠE Ut. ! !Bj׬YQDQrYPA@Gh(룆kYhd*W=e]6;n )5jQ(/_~G }\.ö0B}$;j RZ;W1U /]%k.*HXP߿o 3 N~_+n͎vLHp-b0{3 *=\hVhǃMesj*f8%g6gzFK2Hh<_EI pvtzէ* RRǵ1~in=}nv)DùK.``@q巧dw&LglrQ,X****D 4\fs¡1~F$Ņ R~JqU RY<-uHۯΦ7"OOYQ}9YA|t c.++k xIb1DˡDcn6W@⛹;t3U\~4׵ZBe5fܳd f%A!;Oײ?<h{nyRvrۀchvD|6FcQsbƄ8LLItxtCg+(n"!&WGO_wm0j`ٚ԰y_>VIcxˡ3>ƿk؝.W3r w/oxAw[/S"LV 2e)s̃˺[N5rK^=f$2"Vt!19Q3Dqu9WKehz oŵGBpύ(neR*,8]bXkVcW7@?,H#96km`X"C8YփeƎ崴; G /laÞ"ۉ6hIzbPx:Vu~5B]õjH؉8X]NAte:Rj;J4*@TTTT y ̚6 PҒ"ClD -668MEm3S}Dft‚ lN)#j4(Oގ,r{ya& >ޗ3(&35щd$GDKws++@&2m\' kmMZ#Ȥѱ̟{6*LaޤNp4% NcҟӥoiOϒh;UH6Sb :-6ҚRA @:)Ĵۀtq,LAE z (ߤ羛269̔Hʛ/oxAfɢ%S\@fJD1‚Ls}_3 }grHZ>!Kym[>\ ?:R ed8A[@v [׷P\=~O ©iSP^ϑ9tv:Kgg֡hF %ؽK}e Dwri$/q࡙8]ÙFZiins';QQF}fA~Y'Qӆå`sq #sbplX]A__G4R |DEEEeǟ+eGyūٝ[#7D`BG 0hAKv;?:Lr\83 7->SV#/h'hlr@3fΖcsWwXF=ND:yiV:?:̺')h 24& gJstd: 7up!t<|Y-M|6*dbJ$H))RDel?&e℔,)ŨאɨcӾ0>%qL%̑: g|JNUi_ &‚L(nfw3ҝ53:1='+o#y =_|gqz=)]ZXN!8x |Ed[ȺtUs"Lcσ}HĈyD'b׳kS{F1oӛIw !h4zz.t@! 5V1=*fG,Em?lqR ݠQ0Sb`à}Ob1Q̚H( yoI(cLb$Y#8QTCH@w{N-ǨoʔqD^^kG.+%nDy0@X?ew8H殛&c œذ{O2&1)cFr(k@N  :%© |r0TgғMxH%@kM_‚L7aco}7.ƧDb ab0X ?)mNi7[)jz/?QWX9' 7oʋv%߅RJ@vvk>X/U)?-#b5zY.κT{3mv76.amnqY4UWӖ<Ǚ}޲6| ӊNoᴵ/ho)ڭX{S;oN7ۤM}mD͘ؿ N5VfA -B|՜|*@TTTToWtdsFr4?~|ɠ.iix<϶.&2!-gZܫM*_9%gOHb$nǁt32AhW t]Z/>:hp`kgDd@z#yI+bVLϤsZ9?[W|,ʚh 0 2jxjUeT7p #3HɈH%$8#Ovhl!XcyI@w♘I`g!Kg&3?KzS9yw ! 褔/N%o Yɢ氵vX1:Rrb׋̽$Τd)n` Į 4 g->>kNtx C|k#<W|/L]6-e%i½[8m1m4PbӅ(N IDAT?| a]Tek ]b0ϕM1|=j㍢CX A]tN!#)}! !: C{7}H.8H9t=u'!&4ӒsN*ݬ^Zg<7?O@ֹm]XX;JR]n1{W9"@+P[XLጞ)V2AQ C҉\p52fD.;x4^ѐ?V'>@ާ 8:Mt sc]ֶrʟ 0~QuQQ$E0G@Lw.1*}NMS; -VM!WCl6?"Nllc -+#2(3U(=[{G&mxksz;DiF2o&:~zb'`<+Ttz3Q|6(>*.7@?3W1+-k@NSQQQQ^p{UT|$weuȤϵBJ=Q l|nǬ[[KH&&q:cnmvsu^N$ݹ_'&iO~™:q) IۿS4 !);> 5E[1D<^L>zhkN{i;֫Ğ`j?a-mݺuӳ'QQQQFTIM u }70w(n9U#:ۼkc94"%cp\X4|X.|.'xf!6zHx^mMeɺiFO^EőRɉeGaiWIrmMԕS5b0vcg?3[AB}nzLbo-?Ao "eC$3;@<~e9ԕn'uCέ3gڂNϓfM%=dU /]`inBM*cwkxS~$yy"N-hiSܡE_5{…* 7n s8jQ<r =m^=vFi;#{G#o*Ωfi.cۏ hSmnajN~Enz*Z0T]1tm>|xIlRek׮[ree?{ aDByed%Ag1nFyMV@FF 8]n\ʟV+0%aWsnfcB8VCGRRD[a 6j#.2b4h !<{]Z@TX 6LFmv4B`oʺV\n7!f-ʹ[Dɂ$Hh  :zel4`,5Cm/=(?MK5P3EJ!@fсFFCآv:/VZH&S+A{ɹ&A4Ie?h@tBlR!й5?4iR. !IQ*lʢȥpS$O#).'ȟVΪ&pj*]N'g~&t{)H4n7Ͽ*E |y$Ć`sxqnN?gR2-p=ʵZah17;e4YyYf7?z| QuxHyuX:ki 'Su[:(h@5!%lޯ覯5_>0%KuF3O>PǜxR;T[(jz4uUbԃl$ߌBu|%"|MqbEϱ3 躾P0X D%-T֪khh:jnUU #iPWܳ)imasr{ dԈp +8xgE`;*uFJLRFFd9X( 2jeBj,+uW/~&&ql߯lG""C8UTC/9pm gF,/!@g"udz'KZhrhZ;h 6҂I1FBC!<,S+!)BNw/S8 S+x CW :8.fDEEEe,FAP\Աluv~DX1Qo$o}t OOװLGX-Gh%rLj7j(lVbuWfqi޳דYOMEMlZ2;77dS5@]zc!{ffOL ꚕϙ,%W6Q#:/ga}_jCu}WzR&!HAAՆ WBi1DdD!  @z`! @(dP A]nǰs~_\eB]k U 3thaRdk,`͖V5WVGdHs'ױpZ*' eѡ X~5m6LIL4qǖ4tOLx u3H Ebwo%.2Ә:6~9d$GQUʞc%8\.BL,Feۼɣ3zJ32>5b,[&C߯l3vT6@~`!m?EIM`눋 &=9 @b~gg< Dw޽΄@gBVl6؎݊rάY2akqhM@pf Nۯxhw c/Zrjn^~}+&^4j/vQT⌶w4Xܫ"FNZE{kj(sΨ8;޸W3LN4/>ҩ\k@ӹ >&(] 2O]oB9r_'EChŒDfd&X'+#@ck43`uf 2l> [.?F`41|Շ=JyU{7t13z~jyfJvC/pkɋ bǺ[n`ݺu륔9nqg_-?{[=i@tORp:ڽ:U_Ͼ~cy1o޿rz_> S`Yh4n1?Ϧ+  8} U 3PX 5(A~YݟPSHlX쿨-u8DXeB$3)%#B4ѐr/q.f#afݽgxt `lt#cҐD xͫWsU}nxH7vY$8iZ-6r__nKz۳/E[йУx71IܰlZ-#( 0xA4[֏hpQ6MSpy =*4((wRJ3<*@TTTTHyZ}nPQ)0`p[l?W6\]|Tc#ML bX uZ4̀\_oqO(y{Y.=R=yi˳wVy<_{-y"P~ۓiN EC'b6S] Sh4:CIC S< [G8M8lM>@[ʼiGioڡ34)*>e_pm^LHG+_sә0#p[3C|\;(kK5ثLJU޹x/I)Rt3TriSU2TftK~ϳ孼]yP@BL0f˓fXN:jѢVJjڹs~,#"0u/Lap[ %')S @hsO0gMuHVZSnF!֭ ;;͟y[_lG1m7\ f-CLYmvoOAg,5Bq9;׳ͻۄHnycg@Mv>ۍ[=ï? Nm;ٷI}.{bF-dʗwz>|awnQ=JAW>oaν밶`6t#>_/_|b !n]|Ǟ\NrʠjMӁ 6 G.+>X8;QUFL?TG}C V\.7R!Bk"uXn|ƶo'~7 s3HfHusBs3FwB5M!yo==7J?G׻ެ\2W=,`ݺus?5{{nvIx9OF{7Be?>z@TTTT._EV5F^n{T;c$^!!ӹJađۥeBP\BuC;E J+h鑠?t l2$ԶGvT6`hp8n_"ɞ~[gkgP;: `xlX,^eg|ڵ&!tGxGn++9=A=b|ܭ cg?᭿Wĥ,ak&s{wsQumJKɱ$*9Fo l'btNlUdaT>s(?ѭOR0o4:#;X14ad-yʒ%E[#4QQQQDx`F b±[U2nc f*^l=3%0u*W>`qV"o|x)!@?6 ۍ ˆ4E;o\!į.j_w7NgT+ }G˘2cOϟni"o6K{PyĹ|z!j?aMK tj6{a4U5)"ͬKͺc-c(:'e+@,OB!oŊVw>Tr M-,2ۣru0 &S*( @lwrł^ rrrGJn۽2;;OǎPo};ڪ9wLY vO"6xC[zyJ:㦺h+QIMYZ*?RUqSe io|OS), Pⴷ 8s, g-Ĩ/bm-#o/ \~ ˖-{[qҖ=,KS#k#Z[.E*Wןa۰tڹ3 \O!IO8 IDAT]]nnvx\QQQQ:SQNS&㤲 2PԁQ%#){f+&^m;SF7 6jABFq8[pAؽL" *ZH dabRxx5)>x<!p΢~3W\Vq&۪z-93N+ԕvxEߤ|/3 { FieߧkKs>T~ԛ0C4PSgg?vuDM{ʩ^hgϭW>NBV}&-zjU̚6TriW#4 G37PDk!Pz,LfR"Y{NTp915T3pQ~>sZikUb[Cvk[%ֶ~h-Kέo,U-U}\VZWR Vj=-g6E KtPXR0i\%hG']nT0BFPN8ޡ ?6'NjO6aw.#9xf6~Z̸Q$Dy؝n;kigC3,;4Z 2cBr{kd %5>BBúIAmD `1:C{0u뺿W6?-P[%8)WgsN%'h?&\aDRB>\:2`]WrKbq{$u;UâA9! 1&_s?mhx$oǮcx<"dвbv &)/8k=~w.s즶ʪEc `x ?wx%SD(n7Rl7i$UvK&L`T\ֲX.P2;LFWJbaS mHTܶ}E!})֓]]â tπQi%gξ}nYr/r1b-P H$HH{$$$@!M*6@l{ޥ3cյ+Uœ9Hk=yϿ(fH^|lє eN:xڝᔐɣS%|/U>f~\_ć[b|hh8VewQUOs0mlV]#d_2uUZNaXZ/^?Ĩ ̿t$iIjE@r/#$H4ttRNU4D }Av[k<F B8K|rQݣhˋl/]_Wu4pۭ-_ Vy(* OLeJf*ŕ -y~yvg11N+wgM Niڶ?]v@.Ӈ9(X=L8X46 6뛋p̘۬<C]* &n g=gΜ9~Z~oU? ѱIY 9Nښ(ͺYH))((HBLB0J( YQE IqyMV+Ff5'Nb}Q״qiY;YtVm<[D##q c"1.+#2ZU5¶Ķ֓$:ڧgEm\B-p-vXOS-!ğɓs'z.ibtpʛJQ: [O(e3\~lyp32LltFZ(N7NހUtpfJ+u$j3d كL|p.X`mpB(P(g?HŬ0Hדm`UOXOzrkv#A)gݮ +l;PAޚ Jʩ|ke.;.ޒul=PNIe#W1}l `pjKgrM\5mI]i7MaDžxHItaa$eѢEe˖1 crԩSzM^="VG^H~XK)B[G4L}(P:t6O(/bWKYgaqb1RB{k5Z[L[CA,Z]j~,Zfص7i#hnhb-;v+#GGJk!xO?QwO%@ ,c8›ܕzǵWY^N8),e3F28#gg"ȭ]p.06!:}^4z/VF".;߿}:5 ~N 6Kby!Yc1xAv+]pc1- Iɇl7\<1ݶ/Y ')Au^>]3wܒH)eddLHOO7߼o̜d/ vpBJ ;!oƵ{DxUgyf WU}K?kܛp5']ތeD}si#h&6==T !r:u ۤ#?pOvvs(P(Bcdo ngo8xX**.=A 9>gIѰXש\#vc֓(Z>3tIJ%M{^gϞl 9$]|!)t]O?},??RVVVxƍG=_o=JI"lZ8'RZQJwAfPg)`rb0{x=! Q W\qExR at…z8 BP!}^j$ǩ{i%u{حIn&gu]C-ZܕLK)FuQ^ wZ&;@R4mX ?yrss/3gϥw6KKK]uU_Zr$!57"Wk`4J+Åψ>DC_)5 ͛7' J)?BRuȑ#fuuX]׽VAC >әhf 0H d?n' !i s{:NQDP(z؇BOi.f 3#? sVgL̄=mQ<gYeVYfBi'eٳ^KX,/X`0 h; <*6;k֬-Zt=sc$oOBlƟΝ[lX+|XR!IFFRm;(BDqfzTw@ B%JP6d0DݏG!>իyPsV+,/H)]l٥i~ *0 //P8RaS !)9sfhٲeo\J9tnʼyn[)ݘv !V[njRxRH(ZfXJ)^|(}+ J)4M*Gu]3gΜ^[J BEM%[hKw'#W?x`aHZKP7)M_5M2`0 7RJ) biښvΝ[s|RJ%`u^^^igee}<}ݗbf?iEJY-4r߳>q:YV)e 3M~UtCg䦝eQ(^&S;Rq̼ǵW*,a |CFoi8 MOb̰b]o$FڵٝW;H BPSQUߺ. %ϟov"[bСCIYhQ-pW~~M+B nb_=j3M3+++RJm ">Ke a?+h`TFI+6fs!uaeI ƸRՂsg6! qn;>;9^u c~X!6/g'`3=ĉZl6'.@eSD5P]_6Bx|AY^))ɽ_gM|q8x>PX!CÛ+Zpu,0ܵz%Gy6w+V]#1ή~05 >L5a3'Kۭ\bbAtK h,t{麮H)?^y}BP;N r518ljQ2&wx~H {T?IO=>Ϛ=BtBFJ,X*!Aä$>&CĚUnv;H!)1GE#S |84lF7feem4 ˛1M]׷ G0E>]FQ(>'HEKQYBT5S@Hg!@hn~q\2dhq5ʽx `7fÆE`;p$%Ƒ?XP tqB?BMA(piO&(6 @tŒ4!t!+M۠skK(P(~ReM$:)'ToIh!܀㶅pV 4!٬Ǻt]@ 21ev@ CЄ,]bc\K_R 1Xm -]DŽߓRy< qCxp!pK),sB"0a?n?\`{_iBExg\/ti9 B0plmg]׀pUalLSig!XtB`踜vb]44xC&aGD5 ӎ .E\ӎӌRBi.4)y|cUOq4s.i~B @]l-erӵCϞ={wAAoL|077Eo]D Bei94MtZ)I}Ox6lO5p/8?~j5 ')ђ.{C%-GJQTR7_v+Vۥɉq:7stlsifyݑT]]X{c kW_QlBD[It~t|m:))~m Cۛ[T$ j"vz +-8V^KJb weOcHz(XϮRn?|KIIdKOlW B/.GJj$D~ IDAT!לđX8 }|.0lQ)N?WWacLYS$$2luc^B,RB=R̛7ێI7 !BP)$ۿݵZﺈt~謁B.\nwLӼ@n!D ;;@nnDӴ=BnfΜ]=;(P(N^ ;,x K9pə)c`j<Ɔy<^_1RJʫ5oFX4MPRY+7/]IV ANzrVKd鹥k} đijEܟX5< N%Si6k4%>>7Ȥx-+8TeRtML=+WѬ-Q)WͯRZkziiH{ۡP({w>( =1k6rss' !㤔WJ)ohhgnnn\hQՉ3 BĊp!|)kj-!.%!a0HnnM@zz͛̇%qD F&q @"048PW6 v̀=|xڂ Wk{)WKI:m&$?o;dfi,_|ٜ9s^Yt鋺o0Msug;v,]ק18&p,Z*''|}]EQ( E'lZuF LMp9l$Ą'DW5n3.&9>Z2N,O;]#{7@s;UkZ 벓Kv(N'[-4T60B&~wyyyO޾}SX|a)ѾioCA{gII5 AUԔH =m XBTI)GGjٻm*)4~ =C҈F4٪6_{Ll_R ø.??kYYYu8YP(NWk0/"7z[EƺiZHU݇-츛hLo./^=$R q.[_I~P4ϩJ4ͻya^7?$xMj0©uhMYWz.R:l:WkkG7Lh+BXAj tn)XqD<hK~=2_؁>ZJ6`J)'fҥ| DV( )3u@Vm;k[Î%Kbw+ؒKrS։e1rLίfPZv01i;uXu 8zz/'_V\ Ѷ4%>`4sJ|9Ip,zyo޻$zG_t[Kj~ԫ]iZr$o $4Ak`o8IB,0.Q&'LYTWW,0j `BVV..촠" B6n@7ԫb]v2B\+?o^\ͶOចַn v(c۾bN_\s!Ut&20 4zu3B| EВ#:Jt%nt8ԫHADb˜"ݑlCH"1xǧ=7os7 c[^^Q:MBP(:a|ytnju!RbI đ<,US#9MFJ+ 1.\s9??v+  i[N„ihZ3KƘ)/c@8jm30Le C) mݯQ}vU [~#4MLYx;#//X,w/Zh=oR>AG?WEUk{ihT"1"2 )^|7!ʚ{ҥ ,ħ BJ| v[(=&a=Uﴭ-NgErs%ŷWJKhoR-4y0 ͋ʰ8GG~~~r8@ 8 0wAhyRbCDUYUWi_mmҭ8lh{`~SOlΞlٲaIӴYK. E4*KP( E~g m^3raHk<Q t9RZOIe#BIik#2<:k~7?.1@>~^=zOWeӾ'1t@"-~Mѵ*tSq?qB )'9yQ( Bk.҃ n9¦ŧ{Kl@6+wv#\<1!8^Ku]x}QX\Kb+0L>QL/k.b6~d6=dNb4+؆bADew'dl=PE8z<6FQi%y>X֝P) 븉0Qғp;?uڍvQt:Sk+80EWfjk*> ͛7୷JI:+,BP(ƺ%dJ$m̔v>֏ͅp۬qXJ),Y̜:G8lWX;K{V-r$߾eIqN".`E4xLWwvnum,YbsI;Qwiwt]H6D$kĹ#\[T [xN>TDP( E2L>[ wPYZ##i\5~ĺWF"ғ[X-VFE|e5>Z[Dp,\wPZ[:?C7Oe4*JX+,.? 7<ݷDц%K::6V@D~eхotnjk^?_>)'>9Q( B+( euYKFxEG/IhY=cgy!!p##9nI >!' ?Vá|5(g~<|-pIh l9ʼFbU2əd B?p}k+"h9ѕHs^ku>w{:kKk !1>M=8s L`8VSobD|{6 +:&х&|; +BJ\S[xggyƕP+@,@Qm0 ~h蟖ԫuhH^rτa2p(:94@4.NvR(P( 9T\a cq:Z]iX@j=죃(`Ga]pA=go|M{JYa$} '~aߑjb Ji-Gi^<<1Ӈ2,=:kwl eN|QPPpi/ͫ!~7_=nӓ"8gяԫ(FL:q9c1 &?#lB[|P(dg"_ɣBP(a_qߠX~y_& OfdI7@ˎ(­Wiiɫk度 | 0MpX-8.зnÐ{-vbhTN4hs۱U_Y{1eʔr)eG ,O1AmفdGғG!ڼl&Oنq/c 43gʂ /^8PDP(乳Z֤pUHhJDuu.[uV=~# VJ)jhh8dɒ@zCA}!d;_;:Q Drpk~HLs1qD?<ٳgr޼y7lp%K>' Bp;NiJ}܎NCs WM=0W(")0X!myo_\lG?QHh~dM6oԫnQgDsFpWVsb`TAщsۉF$b@ qņ_[#ܢ%Am3gB"SJ N%@ Bfܰ UR^Jq*~ۋ¥Xt 6ݢO+gi"4g̙3@ƃ@g&Jz҉:ٝ"'8-w-((եtPVl!Ŀ|>߅+VHbF`) E@4\4cxAEem# gG|/5x{>!$IEt:lʟBɮCU-4LbdN봞!MXӶvRSgLyaҥ5M{WJgZvx}$~v:VoHWѮ-5v {? ʱ.m0qs[}'qp!>NH Bhj0bpnl$+RQ$2}݇zD `#De\bIJ'6.ӎbA>Y3KRipʹ'9VB-KOb~qKJo`YCT5/I;I00!/ŢaE 6T㴑BW2RJǂn.[D{4UmEj\<31mp[fávt6}e4$-]*"NqX>2]2CG瞊~m]khU!^XwRʁ?.QDP(B, .AdO),o4@~ΟiDGbBf`8cIߏ~`>5 R3nh? DžҨ T[;mjg ?/gM$9IMC%ϭ`46-0$]㘒RR5X3 b7G)gRS˷STRrXge̐~ghs)%B#g 0~WB:nX>]G? ŵ5+"8mv>R@{kHOswawk !"5깩 !#???K Bۉ4M HwPS[GcO(d`foBhjuۈ_RHJJra~Îfal>e'e m!K缑ywC²/oOɈO㝽Y}`_-V 'm%Ǚ4 )׳p?3e7K  ~[ n 4%ktP&$bu?eʃ4"D"E}ۏ|;sb©U+Q&J( E!q9A\\ x}> i"Tz.V͆".MBb5d]2Aڏ{J f꘴}K*>lhPz (KoGmmlUP5oRt'Ǝ/o욥_i{iS x`G уNwg `428VSMF|wLy> /EFFB+ysv~ qKK_Q΄2~*ܵakٲ1+s eu4 II+.dBj:GkkH&_[7\D+]q:zALSJ%ED6N.+ QVBu_a,jitsFj7TaǣyQ^Zޮ _KMCA< qM1)SZ*Ç>8cwj*:($ A32x{cG?Re o"V>Ht}'`hr ^zGCό'xmF~f~/6g\}g>fȌkxcVTXhܳ^'e8[Nrwn_%//\"@tMsR\_ǗJ] կ?|;_v~^Mי9t^EW!~ njժG K) <.~]BP1Ej!"_boPVOf#&> Ll=a_F_?n2Gr@s:bWRѽ*IG .A@y?쮬e%l"0֡K$T(ڎ>V)/G nKJ X^At'XD1!Md5{ObTJjs7m ُ?lwB IDAT}_ƔSQ2͋wlo_Ӿj~޻-}^V/i l0$$rk/쫪`"&,."]u m#1 4-PDP( gALLa2>~=HMtS\@m[ȁ$کmڝ7n@͙$9^GێHIr3r`BcVFgr^[?VM d1zpigu=S{pP!ۥDs5MtrtLBĶjن6m~3>mqK|_i&y#vlpB!ƥ?*aqq&itN[bqF=\{61`  BP(:w^;Xl?Fu:vΤ> Ey<^X-SǤqӌpƐ&2m&p:,sp*kTzxk!>A 6Yc,l[FG4 ,rtKn鰠:o6ySrWmۿc~KeOyY;#MѡɈs{vQw"Fo:]ܠ$!p5SFBJ( B8h/ / aҮUjo4Ð4xĹmic;vhN;.$hh<:׌fYc5ckvZtXA;p}fLݼemNHYU6̏kB}mHPӤBu){mnHcKx{Mׯ4PWDA:9ٝz"^ZnԲEӴNs5ZyyD[O]ߝn{lKqm3'EGDG^WToڒ%K,K,>,%@ B8 ;TfE]-ᧂU?&BpZ{&'㮱#xO J>Yߞ@  Vb^T%A}G O;,ifVMӾSUly$EltJt8ͯxHvt^r={VN'%Gbj 1Sы!UJFc0u<_#b<$WT7 eB]RJɊPDP(sZS|k tk"OF]+00Nډ<K èX,y>gCIЦ#*zYNMgdj*D9&EUh6{\9\x Esxe5d n Xwi21}}cKNj4wݘi_2Wwnxb9 `Ki w'Wl^]*:DiS%}m#P,ɨ}Eߵ`0Zvv]\@PDP(s&O㧤~^6Hq9\Mҍ<Ȟi^!++ќi0twNɦ^]3~_mZã;GOQU^|xNN7h*o=/^܋k`Cy#5>/7o|g2y*_~qxncK1|CNs= -]ZE? 5m~2MvVb{ =E&p݈L|닏S9+0s?ni_{̙3 ' BPHiqu{ )aw?-jz'q Fh"uMG \l;wS~'=GCzVOOUFu}WM?\v(e \ |oR| nW!?>97 ^ooco\ַ'}3/Bݎfi*o~f5}>q6utvQTH[K^cwxl出4VT+") Bۻ33d ]vQtvw4$PK_?m+׵owNWvUQ[w녂j+JE((^!@&3gf&3I0Cxޯ׼&af8$s|XjM NΖcxcPkABXaZڬ5QܻoMG]z5 wc8N>0L+HҖ Č)-'0c~3R[[[ 34I@Bq ؒ zxᵽEr[GCn<ްo: P9/wfSr9`۠*u}1[eʆ#˞2 W*C$s.&N~@$;L4ctNn*xsLT1 ܾ5C5z'8#}>ߏP՝i#iB!9"*uUo@6wo:s&0E#8{3 G\.'<.8 _ۢr-+@ ~2/!ݦ" \<ѣ4G48C9"b8.r6B׃b(ǻiqBrڎU__$ GQ<裇~גGu–l1Q1ՐWAՋ͚ezlYΦnC-60 _mmΝ;y֬Y󞞞di$B!)odUt 5=7[hYf mP[lĺoeȋ$ B!8vqVe (G#0 )( +ԍH0aV*G fsr؍ ^T$Nd b= (B=ιꪫf⋗\re&Ѝ].;*![{1dn꿩WY0Ww1ԱCgO̙3ƙ6Q93Wo۶Gn8+a|n/~d#s>}~LI?Ӿm9Ӿ%( Cl$Bhc=OVZn5!i} ETͭ=xár eWqP-FQNZ|;j͚5/0-̼G\j'$c{Ҡ>? x" Wz˱):Dٶm?O:צic;H"B4MEew ֍Pm8M!mP LEz]DjTjEvBqe(/CU)td7vuu}i[;zߔzʘ nA_vMzfJ1}q;s:nc|+ͲD!Ljp cOs_+Z:t0v$Qj 86xPYQ)PQQ"6VՏ;3$":NOT~͏fM &)LZ=9r͏!-)rj_={$B!D].PU3ǎ-pxٯЈV؃ XU (.AQ 13@DQXj~i yOk)H;a2$Gik*Ef(3GL!bKb].hˁҲLBW( Ӑ4( 4՚v[/"]Nli|3ɪMtAv D?q͏tctlǒ=9'A|a555/xaF$ B! MCǺZš: ,IiJ@l6k]~28ڮ"vvCUAV-О!ϸF?dHlʰAǖfXkUDt=3___h6I@3@8M Ӈ|#5p:0MN~Ųi$*|ɤd(=ɯXTq$y+ nKc ;kP?D5kCTCrnlW}ƛ59TزVA~}v`O}?u-3O2M,;I@3&ڐ =1v@#13`G^A!|T E(C1gi[ VnKp+Ot+<Ɯ:?? %|zύ^34Vor4Jb8@s픺aO~D!>ǩ o_s1($̌}HݴJ9z"_ffnatnykǁԗ;0< & ihhX(wL|[\@2+AH'vo2YNH qlƛ ><OwO&˒[Јw& !!K$-.Gw:W  3jFfn=tj,N[|plkCt X\tRZ*wOuSAfފ|q%&]U$G,+ۋji?nll۷ fD$B@`ţx#;܆=CxG/io6zN@@;|yni?#;ٵEm6M}kyֹK>LFCDQ)-$c*e"]U@p_(9oy`!ɚz WmۅpKۗ/_m"Z?Me$-XB0?/쏿@R.Qr#;><~]{?>ٵs#ͭ䳎bO;y]=/7bƅS0㢳PY53QiڿܐF$ Bq1.ͶhEǐCg"JD?x%K.=ݦǁV fj<} xU"Q8t߾G[{*lNJܽXLɳ* 3ݘځ݆݆XTCU`(* /1!T݀#a(3kx+_ʽ]]]y/ijj\WWךCA!l~+?_8ױmO3ቱbztw_}s`]:a]NU@1āheW]k[Z~2@,l9ۯ=vh+#{;&---?ٳg[fͿOH;S'@nCCCGSW@!l M.݀;8Uh3 kFB47-po(:FDvi͙3&$ Bb4]rlsӎon7ή4ܿ*.+f{HJ g]OD"cn۴0zF8+8"u~صwvm{nx^X-r\,}Xg`0l9x ֹ:εD\>SL`Fmm}ݱ{'OdИ>5 '|gZ~U[=b7y{۲ʡJ3 TØDqO˰*5C3+f{|ٲe Dd-4P__aI@"LԓZSܰVQ#ůl~emaMy))+/$9ED<MU5L!XG"_xgZX5_gZ mWι0zo@ϲ*}*?UUSOyCݾcǎ;Z|Q":g/d ~7xæMF"M|JW7"Nxkj+4D1!AI933M+Vxm….P HQ/73;ѻ^Q$a1t:0k,3H&5kִ8_QaUU[SSB~## BLrcR-y|nݺa0Mf3N˩SN=rHn-[LuJUU/4MCDM qGSnhhXP]]}i((=1wגob 4M{K : 0T frn]8ND?^xV͙3^WW7cݺuw"FD1fuuu' ""1fV}>[E٠}*֭[筩Y `HMC!ȇB'gT*޽ & nX V!U-DMY>G_$$uR?oR;M Yְwsn`1K@ ~TEH܇fgϞ`D4ͭ6|jtEQjjkk/bW}>=D44"J0z{GuuuիLӴ_ŋ4H3LC#ioy}}D$3'*paӓ%׃{W":78i~i_\H"yPݩ~f4ͤN~[JBKuE?n0lӥo78J0: < `.~F׮s4 cܹsMD 9BDafODgK/t(+Xkl/.566"%|MCCrUU3hty0Cb IDAT?m"!¥ ZDՔ'X٩(J{ZEQp. V:m۶.RϚ5hO҂%"^ *̪7X7[`TQP Ъ31߯ ] Ι3u͚5@%UUAD OQSͪ>Q BԌUJB9;1M@(Zx4aΜ9@mmC*RoL`| u]ݭјH_!a1x+O̧g퀄bt-Z6W0wK!F$ B1 O<(&R>pΘJ!F]~Yyl&XK҂%#h3䷭ t`B1 ߼_Te9?CqC!oaG$ZN!N*,\DŽ`*s|p1$BQp?-R^!xb6Gѱ_$BQ%^ f>cB\`2S0vk1cB!B!B!B!B!B!B!B!B!B!B!8/]bIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-architecture1.svg0000644000175000017500000027225000000000000030254 0ustar00coreycorey00000000000000 image/svg+xml     neutron-server NetworkingML2 Plug-in Controller Node OVN NorthboundServiceovn-northd   OVN NorthboundDatabaseovsdb-server OVN SouthboundDatabaseovsdb-server Database Node ovnnb.db ovnsb.db   OVN ControllerServiceovn-controller OVS LocalDatabaseovsdb-server OVS Data Planeovs-vswitchd Compute Nodes Instances   Instances   Instances   OpenFlow   OVN MetadataAgent OVN ControllerServiceovn-controller OVS LocalDatabaseovsdb-server OVS Data Planeovs-vswitchd Gateway Nodes OpenFlow   conf.db Switching HA Routing AccessControl conf.db Switching HA Routing AccessControl Internet Internet Internet only mandatory for distributedfloating IP OverlayNetwork OverlayNetwork   ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-compute1.png0000644000175000017500000042116600000000000027235 0ustar00coreycorey00000000000000PNG  IHDR @9MsBIT|d pHYs*etEXtSoftwarewww.inkscape.org< IDATxw|VU Et0ۉ'c'.w/Kr Kω4'Hฃ L$꽭c I4c_/^>3}} !B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!B!Bq9.u.rĥ.uB!.𗿝g( OӵUwETXZ@"o=o?9q-.r !W_ MS)r(O7c۶(fŤx>zWjv0엸Xj39 y׸\Bֈ߾eգne"Rc. #}?q:Yӵ_0g?Hun ׺*5qJ 0&~ILԷtQQۊa9ֆO/}q>zu%lREa'@M DW(=7]’ ang[Pݘz BW˥./i/ =7H!.YiId%H-Œ8(m;!,>œyͳYGW$%p1h4V?r !B.B|cs':XHM̜L paܤ33}{>(MEbH.Ow1Vz✥?dn 2R#NS{|18KY!ucgbRq^b$ p`f)%4߻L򕝖ĵ3bݔHZ9Z[*pB\EF\lj+Mu K#M*ʎeż#8R!fn{Da^ڥ,g4ocݑ3j 0~T#pv8-ɮKX!Na,JLƼY!k!A)Z67*;RERp4%3;HƼޝ׻\Bo Z$nC#L_pbqbN&}wDR22$dNqsڭHHp%cp!7W qQJͮ @R=>4!?V vzB\EFZ"_ 4҂Y96CPJwqES(:Z1⍸M4/oq B4n#]}B\F\"!!^r !F< @BKK!UEf車5wL.fQîyO';X6wY)B!bd]͇9ZW.<p, @O6P 3K"B!΋ .h ^Ҽs%IO1'uLKĥ1 E[@(B.FhT[4]NpP86. 5 KYtw^ !xgnoq2hs]IYClˋf%'=)E9lS'cmKӎa@nF2 t{ԷtPnΎMWO¼ج١phlPd$2}|.I Q*p':rihfLn礹'VFwF禒2䌕xSd%S\̚?`-`8ž::= L.ʉwJRyÖTP4yNOd| Sl,e0 qgVsYiITԴX-:GgRYJem.sIMJ8벉 Cl?X?-OIrtv1,4>*-6ub*<7=)9ܳ$w0n@vZR,9zkXTR؂ῷN(̒D!.W}:b-,c? ?utl7T{e5S\|KH/좦3e4&[隉(/l)C͉Tյc?߬J70 6?^?(}'QJO2hoa> wTqyK@Q~:_lEX0P—!V)ngk6(Ţ{"sސ:ՁF~W _ <`a 1!]6 EIIr7ǚ'+-m'ig˜L_N~'F5:eJAWOo`Uuaerì`ժ"U4{hlp)8Y^ݱ봸 !>P itztzFN2+ˋgS//g-=̟:Fvz2SsZu"'[bheweKg Gbw'f 7 ,0o<WWH07m?pGjzA4M ΉvҒ]SA4kbjK$bgĂ9L,̢g.[~v pML@)+oW u[7Ĝɣٲ]=*jZPa( Q~4za^r\pNe#cGepIOx]OlAKGUmtn #?@jRaB(Gg|0D۬l&@0krډ6f-aBa͎5ݼ) ͬor# tLMN݌sba67R܉?CÆ?D3pmIl_o[]Koيa(^Y9(M 'N$ ԋn0MgƄ>w%{il/yԑʹ6*.Yk4v @ZcYxFgޛfyq½U˧l8lfvGټ{xF[`ǡZ&fqϬ,-QvJ#ގfo%\m݀Y7 *_;OgY3qeo颼?jU IwƏw΋}{U˦cYxyQKf.Zu>zǼMa͋{70G޳D??.'fӮX6#5K1ۏL0ໟ+VQq{0Xu YmܮX1/͊릱tv1.}cO7Aئh6୳Z}l;+4֣wfu_>؂tXE_UMݱ>)INH |]-67B>26)BIuz6 9Q BQ3Fx!! qī8tPTԴRbVlN/-f<Jǵ͠Me̢a˴h}o^[N:1*'9wltWfL$@0ԟlswINzM=.̊5u3 zEUH Su PPjOi&Fc5ţ2bG3IMJO8dC; fն07 _ DcHOŔlwobΤQ̚O]K/n-Xu k^3(شI/`vy,SG*Aմ0iL6zZ$:̚O05C|W3*;qL,fZqnnMfw_\@e#a(*k9iPoIdrӓil7obeIIrfv~f"QM3$9mK(5wMjR]^?JAMS'<- 'yB;$:xaCwft9mC.7cB>OhuK y*bCm7#0q2D5vP epf QوCn|e%u}޿cyv~<GN4Ś_M+>srME'dY,CY2v6{:BqqIrr3T7vDqDrғ^I۬$kqz‘AjrlhgOym]>vFgfX})̉?9D4w1emS#7;JϞTec/DyL(FȊ>R˲HphZ7<0j+F>"/5Z2syŢ]aB/>rU!G².4ľcfN/8Qof:}}p$J78 bYʌu#=q2cG-ٛX2(S).Hg ʫ[bY@0h~e}f`eof7hft&pO֟kZz>j7Vε3G]b&flgMB{$9뷣Gd%9x wЭ@̜P@$jp,;sB'im48w,\[(<07fq9픽?R͵QHqKx!}מR\IF.5M|'ItcwŢEP.r2.b@`8Bk\%D #KۘT=vwC5Vˀ ,9rDSUa^ڠ'28HĠǛxa2V1cbfXuKT<^q k>+upsZOۤs(#i2!Irsņތ(-ƝQ׳Ic %ʰŢ1u\.;z;Jf%7?35Z ciƪ[9,Į#4ur{X22,Og Y/@X't,+sR-ytzaÎk[f3:g`%jɌF[XR(߈2(F ÎC:!2&7H_.0r-t{J;jǝ8uWmaI!^ۏa(ֿy(jɌ:*_릒j)O(?lx\KoԤsu\\fr@̜x7"1fVj x!Z=jv]%º[aOrco?9G>w,fgVAR$ϋ;tsNq~YncnGuF\œۉf^bܨtvCΝ:G.GBvd%^{'R5{!GO{n̝:902R;>i}8GnՃ?"/3CUM\{vo"dME:}\ހ~}_{ EWxͳ'aFv{|vU5Bemk9iCF<'"iĒE#a~׸qD2S]Bh`~/1e0U$jJ? sq9m4zz$`67= ͌ o4Ip?wguϦƂafI$7O ͇1 '[sZ<ێxn|8{;JS{}l-Y yt4Mōrxnn̜OkbKzGX4ˍ rP\L,u%cq;v~~B!.OW}"._ 4dEFS(ʗYIk2Z!;Pԉ?&(?FnnE7@[4E3ȍ+b|TԘANN{hQG0MI)G}بt c 4y G"''fQYnv*'g9$7}J [CKt7־*Z;yFuXlK0!bڸ<."ߌ0vkly !$͂i:}!\;) }GQXӒ)]~Ɵq3?: [qݴs*k~iIMJ8&G=Q5tCe%C⮥PxCi ~B!}l !BM!xgcY$Kgh!"F!x̟V8`T)!B.*B!B1 ɀP/}wU]NIS!BEK~6N}~ݷ?P8z:Ron nY8邖J`5-5gxMqG5B!I,!B!E#8u]l?T tlY/ [MjP5;עi0kb K O$j@5+[-楱l82Rɰֽv/hoDyv~Ӿd|8XH{o fd|>3& K)y 3 ywTPE7@0!i#+50:''y`ulǛBܹd*]il$ ?5ǐ8{]6w㏕'ÝȂ7pbM;*8^׎faBay/Aɾc.Ӌn%'-3P++xXt[M8Z݂neQ̟:l?\nE=T-B1I [u֮z' pUՍJq1ޣ(X4_0O|ج‘(G[ؼ/ܿ^Za;D8ߠd|o>Ae|k J}KbN6琢=?tqv}{e1>t\56ퟭfl x-G\wx@(Fڻuf`ߕncZmf IDATopxmw%͙߳[E}KjZ;.#xFjZ!ZfOSJ2Q!H2 0̲9*ś{c g}dN6v*[ }y/'qجclA:'7yvo } ON@Nzc)IQ^ɉNZ{XZ:z_@S{9IlI n*Y).^Q9d 2oh͡ ;s_6W>ʵ2kr@>50c,m2D";r"O0~t&J)Ƞ2躆n~fO.`LFebTԴ=D"o> @־?|-H~e]uGE,C]fڻ|tx|yy3&Dz%`-n RY .6b"N/b (b\X-gB!I2[Mk&&{!yfs?\K46+pf:Itx|t1_KMNѓ-̙4`؄eﯔ㵭tHLa''=)~u69^>d o̓"nl+/؂ @ʗ#SQJ[w@e"tz/Tq2GTշXM7a;w?J)U?w- v^zdPe \;6]GLHVZ"]>Z;{JKmcY_Gؾ2 S̈́B!ęIr\AˬV݆0 :z#S5MQ"QMv6=McDN׈[6nTD |qVLA~~;GN4 ٜKS4X?;oc' gbpZ:ѿА`p>vDno'B!Ο g!)`vʐYaN(iIu4 Rxkol?6`]{+]Y>o<&W~܀2܉ǧnf6Yzu1^mU2}\.#+=ǞFucG1Zq-CQ0JvShuDIps\'#-4B!x7Hr]&'CNz2)=;բ֞-D8 KgFbj$c6yO ZDC[7 \Мf{}7D4"Ţ h4<^6aIwSA7@?ΞAͯdpx#`]ә5)?Z0ݕ8ѷeL!B  |XMEOEM ) ,gN`D]s'7 ;ab_~(?]kicYu`(ͽIK@fL׵=hݬa :}t{L{`(/Do:O7vw];D"^d' ntN9\_WwG?wm,xB!'c^` K Hw'*W5*o$5kN>#o,3l?Fmc'Sz3YqݴXV EwԪx-TյҶ]!ps hF8eVw Rr;VƝLsꦅ(3^vBnzuL+䁻擟jLMEbAժǚe-H߿V;ޣx!Fj>w6RX>w< E~c-*B!νvy>?KHKu÷1B;`8 * azMsb͓ں|EIOq ؗa(ZibHNpԁ:5hh6'Zvu4v!dL~:P_osԤX_ 0;^4u3~Tx]=yYnrzGD tMMا㧵K$jvA p$Ja^I f]l6K,@9PTշ L^Nٙ_ӴXp*.G.p 7_{Ao衩C8% +-U߱ǿW߾dh5Ԁsr2`oy{hjF{_,7}us[g_2 qF楼M+1 "׻b&Xq6}]N]?bO5 SբȊt=OtIt=TVjYeO k& Lz&'<8pڭ1]7j`yzn/ I%;-`(±5F';-;SS99B!ÓD\1 CQAuC]׸~8nE!B\$W$i1^QC5kB!/ @A 35qB!!` !B!. @B! B!BF!B!E#B!⢑D!BqH"B!h$B!B\4!B!. @B!R@!B3˒ B!BJ)m+0-e8SJhiYadi{_C.a]v$B!Wg}vjii/5M[ؠi GKw4]cB fFp({nFhvRj Lt~~#/^R!W~֞oHڙ6>w-R<ƍ===_SJ} .>IS೚w߽"eC!2}gE߯h2^s\dWPФf#bu;w#IYYgREJϟMр:H||X=F;ʯ->"9[k׮^ڸGyaxBCWzң5h\C\!hN CVϳoC׏N̻~vQe*^ >`ۧT*h{t, `  pͻzqŇF̙̿5kع \!Sṅꑓ/zpxP@=ǧdB!S+lzo+9>t:mܸ˗GNw W2 @B˳RO|Dc03O4mRjbҭ@[0iw;MW@1vNlz}J GR/++۾bŊ8(kfY֬h4lJuO0 s=gRq0zתA&SňZUq՘4cNx:a&O9 n~͒6X}f2blܸ`@v\& xV)by;Wcu4QhUb!E۟Zh{3ԩknM> 0ǃ\U"1HVC~.C%c{+ 4}g> b|kÆ ?F@IYܷLTJ%}Wh M6}1~/@# eBqyo0<ۺz]GymW%{#INN/d"v;6]u#)isba!xA+-fbӊ&Npa?7W)--}T)C;aߺ e5r`FcS(@3 _Mv57tݵZ)uUJ6tE ]@q9?.qF皦cε~ OMFjժO]o$9 ޫ)>NLy4\` |-93;ڻЏ?l;hyhݟ+{]K-B1EI ZtrecڵkSu]_iR`>OӴ<ωu@߮Xǻd$ gmcwy'\b 1@[M;+bϛkf۟;\~yO~a׀Ӹda(76 / G xشc0Oz%S۹fz!D\@DGm][\W:afk" T paf w HYY?oVZ'1g(oS.sm dih?ܼ?VN+OcJOkiO}}5xL@P}VH I9Uv޳Wߏ_ׯ__F߯i0;iS֭[_?9ӐcMp[kDa[N?B(lF4m 0X"HrAh|{+3wOdzQbH6Ԣ]5bωRF=NSS+x}~ȹ#d9蕦, sl^uMRJ}0VX1 'yO$J ,G:}Zbé+1)pYro+ٶyݝ*5xx -Џht>׺A͓_cg]wzzʕ3u].isss6Z[[#u" fe{Olbǡd!qU7tߏJ]sxЉx0}>fX7;d>޳t4זd]R+iY|覢mG[i.@H$znARWdR{R:faf+޶zj]b[D\jO7J@kt/l`3lk4>ћPP 8yɳ+V>웝]ӴJ~_2`@߼u~5#7?- غm(26?DyA\q9ZǁpCɝ}3btؗK[wQ|\ Ѩ!V`B{eltWuX;TbbY,zo_)K\uz?-eܹ*>)w?a'5XvV0n3cD&`}0;ݤ+nZ /5[ni?&+>7gyOk$ `VB \kZ]Sg?W#1髬Eߋk\6}i }֝+~(zHCb/ٵ3 <:{y0k?GfwHTq{ 8cuݬ} nw 5m6MLq?Se$fQ-]^\.'.WNg0J ❸5k$z>Z8aTe:WJ@q1ܶg ̓I37/_ 6,;O-,۷?}WMYR ZgదI fљ9>no@JpNv=o32/ۏ,:fp5fs7U۸)탷c\A2Jy}o3m}ZR4/%qZ-l;J0d~%ߌ ?x0Y.{]!jS2yn.Ա`+ߛ Kgfp9Sdxeg#m;QWۋR ?V[ts:GG+̊%oCf^_>G8t ikG1oRQPBa`#:RE;WZ@R߽͇Ω"\a+tnYbseeeTJrFjAHJ_3+0;K\5cҥNHH\uV+9-Z^z?*z$ #FlA`t{tzGہբhm7?39,-?s DxՓ&ϓ{=_f½{BH)mz ruj<:SrMK_ܴYԌ?XW;++FR?,Y䭌^J z{l6sq&>qyA\%puN{VKhhZL2uٲe]x+X4 5Z\w}⣃>K ơ m8ɡC'+=7%2-Y>d?y涱 TZx߻Z%8(qFCYUCv6$G2s|7\Mkw;s=䚞;1@ŷ|S| ]7$!|-M'y^MƎˈH,*}u USXsHc*UVն'_0,W%R!sM0{pEGњrm9cw?EJ/4V`Qi=UCzEa+,71U]{aՌ2.C>)Sn8/ lZn) ] ^N:TVV'=@>,!XV\k-8M^B{(UUٵk׷_6.$$7sЅ!By ?>q?s5 :]-Z~JE oB٪#؄VD9Iڋ{nIi)* &kg{1RJghW;&i`< ȒӦIqKۭM}¦vSb~O}\I5*:**j0ѡNIz޽݂#o߾{」W\hK8Ns[ CiH\cݫ@8]l+)>;u ռwmQ]g!fgT]&A@+EULXL*!Q=htT8o6GȨ BYB4z]HAqvLj <يXd&##\WqpMc_Zg0m۶z֬Y΅ Rp\r>.],$:{nv~~ZAN!MBu[-dVXU``p,BJX`C4TԘٙ]Ʈ2ћ]CPJr [DPx*S x;OAUXwqhpy!r<~ӯ{V->}BѶt،۷Ij11^FL`h6zCqw|@w8۪tQr=)gBQ@_1 u"IeGxJk&rl\bʔ)qR@jC ÌK<Ջ-:#nc\3wܿ_}\?WfkН**Ww=%I;jk> >'CX-a<ϛ>|\h5^]B=H ;9@AltUiYz$_KR5qh<=;i_ GOUSeO` SÝ?(NA-d%1!L#Q}vLueXV7߯@#ZTj9;[d'Wx?L? Q0՞4BV䣪M4~D$*;lRJu;RAn!Bl!8*$,(1ҥ8cZT؁/ٱ*Ni]P\a#& VF<%ؑ_*cYF[UUu}[B[V@RCK6uٜhTUu;eo !C~鳮Ǖ^/lьݤ=!&) $6{Τ Z L *EGUNcj:mpk%_y] [->ݼ{OaNLr+?t>WCzE:juה8oY6y!QLvV?#\zDuyAH{~hlQJYlAuf}'2aL L| L)U|0̘pOKC >/TZ 3 M^@| N{zN\ɷ> S}hH)9yK6|r?P@c45ܚ !hޛ2e (@Щ5Fgp|c[huDƥ2h/0!6d=5/lMc!(ΧFyթ(k/])8IOOﭪ7GF%kByr.[FyOJ9]qUq9_hSF>q BoO=Ev/9'-c I}u1?FAwЭ- !|ל<{s!BAJWuMU@?^G dl)Q]1e:[ |̦jBz6֟!"~RZ]ɃKIxr^~qʪ]&,_o/== O\as>|\ĆOY Z 4PG& uI⣽HՊ4LuM=U#Ph5g.2 npzGOj/]&ۿs~zfJ v`1{l⓯',&׬)#og]:#J}?n8o3Ryta>0h:pyNOtx=ՓQ?@DHgp*w5?vQ]uV3Ɓ.RBH)6mڧ#s6Q]&xIg /n)7:^,z>yFJ 8`iǯ_z3SjKOsVy' jK.nia[( hP.µcylZخd25&C2 Bjڹ;2e +qh4٪Q]@@0!M6)Νc~>|t$R"y͸JrJ!X9poFO)?0ʥߥrt, 58'blJ= )>e=0,D|4j*Nʒ#` [Fkc8Sme{ۓ#rzl:衈:DJNn4\GPD21Ӈp`_@IVTBlbQ|l]ϲ' NϚ|m S@9A[NwԽ{pHOO*ȺB,zyG.\X$7uZhQyjh…w !EUՏ,YrԁÇ˞Pm]ЊmBh< B79'I8E~t7-I=bBր*د*t}mMmhJVV8l0[n98l%Kd#EO\ϙSk] {7IYŶ/c\@JmziGA?!ulfջ`1U;>SQᓗ5us8{)6Lea!^AM}MS;c"7KR_?o0x_9>|aJCE^mwnKk1$7,'rv>p%t寙L9ga,SNNJ-lć!{h4|uR?4sC^^3g{->|ᠹTՎSH?-O:f+Lu> Ш^h9wnca"L?NdLu2Z8qؼQ#i@3!}@ ә&⺏kW˗/ | 聣RqNLPG!lÝ5ڦ55gy>-Un. B@QVN$4{x~!<&5Tv-e1p5rEM%8{2(>Js[]pK|fE]Uα;PC>30y9̶tۑGcuVIꍶ=5`Qh ,y@.c+zR+Vx\)ȖR^طZ==}͛-"$|633sܹsƇ$GOհh9qn9?d2N2 .¿W-B`&Lk"&HBtTᄆhtCʋ ʤ#΍!7@tmVs܁VOl1{[I-:HbDw_t|` VͳVa5ټ^¤xCH"y]$Iӹ$z<{M|o !g(SMV8ƒx[1[ roNݾMo2~)gَo~y.>.C0Օ2 Nuig v6+n0Ɛz ]!(}@$@v4 c?$y}h?n\_)[<%A;]S?D=a`r>iʔ)m^'@.S'I)?~bŊ?yٍB?imQ/^x9sm*>|89WɛFϕօN^WB_KEgY[ *Mw@ihP4  AFJ?Z{,G6ض  q>%'ll.șHi(>01ޞ EGmE>T·NR$ؾ".8'~NUQSvj7:CݖkBEa m)(OLʈ1FSV=v#cq[ὦ$u}>W@_@B̜6mDZ?f*6Yeo<)/iDQ4ο‡b㤽o1Hx?} S]9:}=eP[Uѽ9P4"{-~~~.~6-SF ).?NMqKs~=~]P:=ΦǐX"RZH3@m^\і5(n뙵Eڛe"!m)=m]'@.CӧJ)?^IŽB߹(b(ؼٺ{EM^Z5dRrt^5w|Ӳ8f΁ͯ{'c6U??xۿ>aSZ/h2DjhZJBL~lMNg _|y'BwV;HXhݹQQ?] Qq:GbP Jԑk=?}{7UQr1YN,Z{W1*}߻kCdi#Ĉ]@tµt5s#Hmʱv9QX` 臕5ʁ7,=4ph ,h7 34I)cF]x(JK#?S[333R<6((yG4Ŝj>RBmI6~D}PzJIe.V Mo&r * [QU8ZXEI`?Q^Ӏj{w xI 'Kj)(ŪJƒtt $s>|E|*/2բo[/VwVn4[:u#f!-8UmU!j!05VDѬwQcR{sl}p-=?RCpd/ ,U;qnwf{Uiii }2"==){x~oV+V0 &?c1;w_-Zt-\WJ=ᣍhAT=ajPB~7d=뻱+yh_áJ-Otthfˠ?}= 딜*UBft~.@b0Zղe IDATL-|}3uQ 2·"P{|y87f%21c=Y xj%!Y';3f; xٻW;1hcCObNQ8S\(>#On'`z \E\;,Zn$>y7ڬ(ZuCzZ{1ʂ@(ZNSu-aW^9a> 0 AUw,Y2ʨ*X ӦM[J>M}Fݎ?^ 5uYxqݚE%57ƇmB^:AEQ`N48^ZUM{mC$2G8ZMG"NJl209N^UZkˎPpP=qMI$,dǑҳ<|PqF&>l&^ݎV^!Q_`66\h]Pzh;7.}Cʰh\ {&d؄wRvl7~~koyTf!F[[V0q6V\NxjBz5;V6 ᝆPYrWbBo)QV;'zb^Vy@.gK)S06`yd6cV5\G}zV{|+\㏷*>R]gff~hEhsn$bioh>%lzjK p1g]9Vb#BA[EԚlmwNFrg\).Ÿ̏m<\5=WÇ[K+i -y[w" oh3eR?y} 3_4Z=1hV3?}[iv}d>|_7j6~qo1zT>DΣhVHKOට .;F@Hgˎb-AIu -?WCS"շD&ޔ.xvɒ%ii}IOO5<'F|8F;v=hA̙3gOff毤3}hB;z_X6 0hjdegN56QrOd̸т3̶PS|4ǩR`}𠦶"N{,I҉3~K`-FuvΫl g-dJXQUhk5-Jke^Օ^!Dt>Qg"oUU+_V]Z9&_4wp<>ZabIX))Eq&/&OA&ǛͮuawieEظ&w7 u'Yoo_5~|4`QҍZ.;i[&֢J'9(D7 G>_KV!z5~ Y~$>JZ=G{h-v;_dzqv8֗ j}ELnOc܋ iiibF"}}yVUUŕnZ%,w̙Sp !ď##HQ%16Lzzzj%+ &n99Đ 1Z//RYcf<TɯrKI!@17Y1kzS^cfъ&'LwS3O¦%AѥmwT*z nݺ_ N^xU K )Ө0BJZ x',ZA<]o^}\%AR$Ea!qqq$$$[k輬q)S`eӾt un! },VjVQ4hmTB"IyTY5Y=m!!&3uiT!gO`Qf[JD] X,3Z?]wZwW 6-,jE*Qkc);Z,2lyE;=jNB( '@.yDZZR eRI+Vz y+-Zt7uYx9sZμy,Zh.'fϞWXXKJ9D F{k3g#<\Xcn5PR :L@Q92CS"g myt ps+4g`>l{"MqRGw~Jd ?~-4<@]7M0vq:3:BINh9jl2mI.5>.U{tb("ܡo[ھX6}8իH8Wa켏_ lF !i4J)m^,ɾ&ׄPQU5 6'OLaP_z饝wt>.aN(J_!(,,ECQ]u_=YQROz ^.E"C08 p9߹0PMQęJ 1?]'JTPq݀&>l.,P#:S+ME>\vuEE~yIq 6rl>h,VG\#/[vv,^5cWinl[8Uh=oIJM'@.23gӽr"UUo\reo#(ǎ'g)@qӋ/xFDڛ ~+ /z `!D_EQH)39\eUۀ}!o߾I?s1"ݣgJ).q04,R{O˱P=F]>Km~5wJ(1e*wy5>.4'OjRʾ؄5@QP kMS**fn7 rӹA%:s=;yvjx o\/fΜi"LB\xaUUݦyj 333JJK+ [ USj0:3A5`vZ}%PmZǮ:]oJэ=_,O"sQQ(WzaU BJ޴C;bu@ \d"X!+Q]]}-@|B&`TڤEP/;eif/vxۑ¦>nt EXno6߷C}~J=oIB}p(t~Qsͼoe"S\ހa8 `/v!f0-]^xh??GOZG1RwVz[pu8]@15&\ۏegqAqܗdznɂmU"*{^j_j?dcv8GPb">Mk4H)(v2o/x_Mގ7 DD\*Vs-G7ytt EpD{:u6c裤NxC_慮CSr6EJBkڴiX i)p[EGUHEDhZoa%Uܓg0[lky E]K =2a#Ӆ}?~688ԭ]l Rƶ3| Bm>)~Ѹs\_~Xxb,xg*e}`MuŐ >.ՎK Oϫs)I)f5[b5=Vn&>j)öX xe˖%z9ZfU-6N0)g7R|93;!7m9|VM?bq Ek@gG߶q\%;TeaODV۷o:D<?; :-Faܐd :_ laݶx3 {bƃ?o`{T);[n%jsOa)6UD&fj\ti_|Fk }SJ״ZO*Rl>ixyh޳ZI)ZuNE eqv>xJj*  /`TrChFthUP4\$;N&d &τ2EQ&M81io{nƋ[A8#BeuGX>.,z?-Gf@Nd%5Dq|l7sEH!@jMv'J)SBmC 9 \j[A4^_Z:{sK<ڮBp?z[k9ee.) !c$IcH;cVo?x0ts(h0v?~;v1F12eT5'o<~\s%y a\Q4:JN|O2n]R1b_#_8Iexh Rtg̓]{H'==[;5I~ی3?#&&N;j0EX*L@{R~ `gݓN"V!<9 ΊXm[M/={_aaa/{[!n  !MJoʕaxVu.m!/WWW9FzB66d"#/=,=SGxElJxY4&kXUUD kI&9/efUN)?|F}%*W+9O$>#`(>DmH$qp{ !&&N}Ɵ#z^+l%Ro&sڴi!%)PӧO_`w'@ τp5؇ Nޏn䜴MCvU[:uj(}C};FⰔr#1d2ꫯj:vK/ RjNBߞ?Y'3HztCvMs32$@;iPl9PGgÞbnW\^ϩ:71aB5_Oh9> IDAT`ش4%&B66j3Ee|4#L!D|HOOhdH)FkZZ_}x 턘]c9#x!|G|70bʟ)ዷ&SWe+.eҬIk*0w-K=4 c*:@L1vd:$A]U!BЭ[Ӕ讣ZL>{ 9Ǖp|fb۹ᏜRMO$o}y"v4nS9=vCJg[b!5{~,Xm'?IRUuju<(ʭӦM; iJ{RvrLƒ+`#"4:>[t 0ŝY?^ >瘳Q[L -Z4J񿪪Ng~)Ex8 'j7V{n9 #p)Q|*i.a毷}F[P] Lw\5Nп{(ձ>|2BHxEJ9ZJ|VVVD}}m=]։hDW/3Iϑs jf<C`K|0{ RDu$uyMGXL_*ҩl!40=*FNHt jCI\Gޏ|gN~OeBӟ"ϲS|x1@ CG|[9X1h|{C닊Kc^B,:uj&UU!*LNo^K pz>..]0`S06'Oj8&4vEG$ ! !h44щAffSRi-|{?h+ !mo}ѵsTsC P=eU/SvS\x"`OTX/r>|hiii_~D)aC-[vﭷ޺QJM !BٍF[jfAᱍg$o}G>M7#S٤1k sV4k(K瞓p|'k$}_$P8yEwqR?zwb#IL~,Ă0E0hT4hITDņ(E:H/{۾3zNAڻgy33;|os56ٰ$e;EPU<݇[^d[jG X%Q;RdEUksꪺ+8:G'D&b836nC6_뤔뤔l6ۖEp!amRΙ>}d!"/`ܱE'<'+s 8NU\r%^{bCS!xѸȑԫz&̯ժE JuDnjW%8Q_2Q/^kk+Ա;ڏ=25]kSVQ]/W T(܆Ѭ @'>w>sbs|t>o;i hcuhdٶRkO>#G'B ;]vc]CFC{~]`qɍS+..~OJW)e /^`{mI$`a4B1㛀h|LJIIϑm:^X7Ud3!דlw5 ?۱'qyUqƓb}mQ8erG4&Oz A_ȧ:0y`wKs>\Ͻ~#+HW͵{HߛΦ絛țR)<}hZߍYCBb6q5N+~@I}g5U> > g5ι %Ŵ>JNa?el~F{=q6|VNi]}J1cٱؤX0)_{.?K)&zɒ%wN8uq)!kM&SW}d#ظhѢo 8G#}gXi3f,&3o޼I)]VQ tD.#9vB^^b+ғ>u0*\T{P ɤc֔ Ps/yMONH弞h枟ފ\$3lB^QoJD#SQU:۵  Q ׮c sex}Xm>CHUa(\׾u@|ò+šπJMЗM)ޅ%!T}gߎظNM:q27 ~Xӫ$vsmvhZ5M <~чEqJTtB@*a)!:պcѢEUoClS pW}z-^)>UkhҎtRhlщG]Y߽dP(O^}:Ox4aq2bO Iܬ0gז<^ϭ E(EQP%ND8%!?|L&HΈÛ^up䟫+qUVP8'p .i,9nHBf2j*ekފ{.j'Es^W-U |e/^S^i9-\ڢ/[5ӫijk{gNo= owWѰ\!N:ɢH)$N@b0=7~{~~~CMM˶/9IscZ5H}q~GDJF0Onԭn/_\6/^8<΋oeo#]|@ iy/-$󲒹i9GlxAkkkbI ;4M[v\W^]IjBU<Ӧk:&KTU3xBD!_yop~WWy|pٻkcrcWkbۄA`00 F %hu28Ch&Eyx^E(cal\DޏueJGub=Uf9ŏ!%{ yñXSpT\ߐu{yc2 9/~o L%KdXnL>hA_4 Qo CcFPz'5{(ֺ]Ԕ,C x8ؽ1|Q(,;G;17 4CR}& !?y-G~Mw1atp>f 7v??b=%d"j[Jl\l`ö ݍ_]>k1#t[zdՖ0> 9Nޅ UT7gp6 їG^f]%l]Έ+j9XƎUH ! ğkr H=>?wfҩkzK dO̟??Q1uR[QwqǮo{1HLL m'_˞_.#n3O۟A$Eioэ=IקP 6Vq29RE BF=${"&n6q UTT (}۟y/=o.M;9HʼnMMJ[I%ئvNGn5MT7ywMnZ^ rRk\zyXKδ"]crgpl_3 ??EA°a{i Y@xWVΒݤ'>_n{9;{XO75}=,3x n)eN'| Ã~om1w2)»#t:Q0L='vT9y=}ql2Eˮ8NhROhD%]`аZ$IMI"=-$;6H>8NE!VI)J)/`^g;i K? jfg}GӴI@Jτ-ejK Ekyuؽ>b% _~@@R$,^ c2umNU-B:k" &YUwpONҿGO+`_YRJ5:`NN|}R>!4W0ڰ_<2==Pd-p+YZ'i̘11o޼ !&GI)s=4|6Z ;z#Nȡzj[hvz44dL$_㤇ĀIHJ"Qɔ@BRR"i)dfJJJ#8nV4w3qn7wivҔ|~nx%6A t.''[ h&"aGn3 9;nDL#OmKѠz(E?+,+Ϸt+ eo)B!oaY=w9l;4Z?5ggM-G͔R=YVFqmVȘ;wn6Km!b"шb!і@nV+MM-8]n</~MФ C) B"FL&# 6RRHII&%َnj_ŵq1uTwQQѓB6y7f2TsKBJ?MW^,6Tf_{KkYn/30ٸ;]褮貫|eG:ꡩӮު-%qbRۀNO y~Q=<) ?ߢiMǪCLRSSp\]<^*5 8BC9>,f3VD[ V f)08xZ7㢊irkJThoc"J=gP9I5V6 7EleF/r_~/@JTZ_5Q]#'t yᶐR&H@ODQ8g&MxB鑫[֬^v=8QTU%kQU_%-P{Kk9Cd2atlEoͮ57HO91K߆njr4fL4͛=2)=߄_Rm &#ja8 9u" %1F C##XL:N%#GΚ=F.I!<wb^zJKgj+h@JR~VR'8 ˄GxG>-pڠI]1߫OgvºЏΎ,mCspVʼn؊Ia~_V;ߒ(T~iƯYkV܁D\$|jS0Rξ 'Mts rW\9o IDATG5LB#ƞރ_y,G |g+7rvl\WgT{JߞHU٢ |[JYu;}w6H@.X!NUBo-vc UUugM>N1?B5s\2gq-n}􍰹n+J4 kbZV-]J`QfFŪ4`VfL" |WKA]XZ<L6m(̊[ r;B_PVmAPJCh~!gyfINN/V+W~zt88Q!ȳ\+޵׽+h٦k9Bv9·>Gqq1uԦK^zB7:d24iWZufmm0V5~er)3ݻwCwWAnVrva+:|(Vͣ=*wo4:"Hk5M[+\pEEE!nxMJɓ'Uޏ8Nج&هTrr!33݆hDM*|>F]%լ^ˣNO3l6OBfSzQe%`nΉ*O4"GqǑq%8kRcǎ-BJണW+\)Xi>l@qqqfQQl`Ґp3"4ysm45EQn: B>B>R4"dvigA/eqq񙚦MRGRQQ:۷t:i- ‡Ј؄Yqb20 /&TH3^ՆWO/;A(-KmyԨEEE[ӓ(,,DQRVoQUJBSUn0LMJ)Bdi6B1zm2)~S#8# #?d8)6X&ڒd2M$e ߷oߗ oc!9{d9Ww; ƥ{pӣ:HR~?"s"R4mCƠe+J;r q&r)=^ťgϡ/"9; j<kPFFd8QudD?$9Nި䊺ەaD^bdDْ`#dJ!QԒ"Xp$ Տ%$=$;1$ȖR?FAI HB2PAH& hHK"4!BIAr䜈4 ,XQ2v5>$/OhBZh|haZhhq X )E"_'Az22DF%# $7ɨI OȽ H&'05Ԑ 'Qdj X"Z7\ ?N)T:NBZ̈́Xd^ )æO--r);{oiĥvSivΛMFl2Oޤ[jkVԀ}Ġ&LS@*wz٘R*0=o֡zvVGh2d2$aSAT.D UJ50QER(( CM4l2x0 1i\ލ${I^U9ZY,H[!}Վ}FdkœqCSrMsѠ;ߝR{KTM :7_O¿Y o\?!ǁ_" ѝ.! nVWe!jB/x S'Drd+ qj) @ ӋN>T\!'&`0 ȋ*jQc-St>}Ux薉$ION_Oh>GqqR!nˮoqR⤼2As ~;˪'Z- rS*NT4?`SUĄ$ ,Uke&&Lx뭷vrgV)egBB#+t͍`@l!:"t*>2r1}f4;aU"Okid$%?eF`PR@aҤI[ָL@Ҩ(V l2P(aa*..RօWdxR-E,Y* 煔y?,q66O?$&S, fqtEXi*t^-Ғh E)/x4-%r}G7+4+;V%=%1\n6>t-srb1fW} "2$i͉wdEĜC9ҳLZ-r=F';M_h~ّ{W#cчtْTz`M{EB!jP窞R3*&":DbDĦ CXiϪB,P:T.IM!, R@A'#dnGvhB,ֻ!VDdhQ,*7[>ːDFD)/|Y?mښ9Mv@ʗ42v*%YHᦤlek4jK .&6X, 2hDZ~!R{^SȸŊ`2Zg"EJ6RZ0A;Uw)x[?ogbBkc4 9Oܬ }jj";XtnFHcXR;ngU`@Joĉ~m+k|M$//X缺WJy3Ȃ+S&&XT"igd4Vh <곬 y;3;&EaXz&3*r:v9IhU/-~?"@،F0d@5d2a c 5h?vh_ܺI"{ݰ `I{+IU&3cr87'lWvnTG'i>&ӡoK!WeT_w37|@Q!꥔?.L@ 몫+**z  t,**ZL렊Sqyaa6* /8f9O-sz.NYx@;}QH (icOjgpn1&\ 7DGnNMQ5ɰyaz&18P^Ϯ5<>ҒmN ;F}eu4IHѿGVx@y}N}PlvTPȈZlvz8P@}V@ri=T6jҷ nzK]Z#5)9iKo)RI)"jY"(&UŌۄ 3[z3%EQn 럋d`0`0(aBGqꢢw)|K/]EC)h':A(|Ǣ7zAǝ#3cؙg'% Q7@&?UbYe)DNK͠_r vMG^4.oma_7-&uz4m9/^XQ $]$3LRʚ@ P#4X,C$ܥiZ!L\"(**=0,,,t<̍~>W_HYBp7[5!9+ft$  n/dޘh*?1K[q*=ȁl[v,^_eǁj;[+i+^?-^Ͷ/'EQkذ 6?ݮ{)G8_.[v5}h< %+|^BQǔ.+7ׄXqc3ϳob1ǞޓO}<٘4UG[!1K>N̟?t`ztrwܱCm? o%xiZŅ( '5Z6,9VQ nf,|~f/ A-m敥L9o02IHb60 q"G_!!! kM&Ӝ4/B% U? 6 =/~qͼkQ|a~ϩW͛z"7*\N*.>8M&RfR-VLB *.MI.DIcӭ;w۠}@Ȇu⭻׭+GܶcSRʑBxhASz7jz~םw.[<\qĉCfSR9ꪫB]Zh p82KpE3\p T'%t^h3>⏷2N&%pzrQؼVo|%V3Q- -.TUܛYLϤ OÊ,%kٶ [z+7g|1nxooeR@3fs]F' EIe#eՍ*.6` C$'Zihq|^zB^}ƀ DPp(@.5Xɨmt147 . *# 2I6aԀtlqQزk+mҕuN-^ı[Er[Bf A^N_ JbMJy0uɒ%:~S/U$"Jԑa蜈$HJ~+%Đ?΀2+(*zD#N@Z!B:a"B>M9 UR !tG E/z ~yn lȕ"AJRd0Ya4/^xAee7xc؍= 9՚| _{\S8I HFJ";7?ނKc4_2 <M^f2;KT;x9 헋`_us9ldJxM_l5zО 4nm:M77aY)6djPU^+!̜v1N9^JMc+-4HK1NV]4zhhvjKIeb00S/SZՄTԶu<8 m]v<?f]:X91`nΉ*Xɸx}~}iW!^'2|εs`Oh)8tXL f0owހƒWlRSINc11 ǜ8'/P&"8I# "FK;-E^xk}R7!Zvml 6иOcgDD))%h"hǎj6.XI)@An&: H: dwLb΀UyEwlQQiBOBonS IDATC75rg]{5vT$ GY 5MlSAc{O78PQ\$db2fO!;gGI K"?:YV4ZX۟nOc+;3(O~z|I' U,Kv>B`t0P/kEϿzE[4y1-P &'"sBGBjS?:ֲ&t=7sORZa1M82Fk/׷%ɧ*0 @JIr+cJB֤2? p =2 1`"{Ο~>}擻7g׌NRIIGD/^<nOw 㢌b/;#?졝Uб#i<C)IwvhYH$7h7 x"5ÇTC!,Y2@Ӵv &'|A``YiNZb0~zHH)qF% a`$F NV7m5eEiG:h+Ȍ OBR ~* agW/i+5jEUeFb+BpNY! yHEms0`[)h`g;k󳿬>lʞR}`ށF c-b9>/39̾mo=r4_;{Cc@@45hiMHtsrr/] y..B{9s-VmYFcCn{)dL{PmkW/8$gɦHz/MX,f,V f)2%PPk5Pk52^e!XTIOKŬB(JߞKX&¡̝exB\|+*ϻE ,zt9CecLKDbeGTEGc耈Ċ@Ek7:߮-:) zrڦ;tB k4_PXX;xȁf#019NrpCٴ,<FbTB7?=jilq{螝ʀY:Xî5<' ODMC+w^:|,/E1Czɤ`JJKi[J(^ E(lUJC`Y#uMqz|XߞsFo>Z@e/ilu-F+7r=}eul_ş_3anhex4p[֜Ucm~%r{|L&hTNsMB,hSwc#A憞vC! ahv∣'%4--.l $%X.;<\wދ%w[6b0 @!HHtr䄓PH[7Ȳ-uidc{_ٷ3sOF>:-6:-V8>-6wAJj=vpNբ {ѭ!{o;::vK)EkXPL@a{\b[Dbp.LQ֏qrG(#Hdu{oc() >(߮8tRQ/|gr睿y[-~$iD #1[eYv e^=z#'Fl݃q,r@1 RfܒӛJo~W<)4{9kVfdգuqt?18f TIJ|^?~UUR"<`̜o+`_UO'L&24tK'"0=a^CQ-c5DߢH\PW._ۥOE@{d$x٬d7qnFRGY,xlDE}?V:&zjCcRJAvlVF2iqS]dF[Fz`2 fO,jp[͆CjP`#'+NvoSKWmDǟpXrV18w| x6ֿcժM ҕȶ."5`0H0D !rHII(&ӲϧMc$㓉rtEO=w_= =%937ڵk lYl R,=b=h0DdЖ~Hc&c^ %co[U?j`4H+uolWymҥ---)&VPZZ\ti }lh9 \r~zW}Ed`<h&1D3[k׮x@JnޏfHU\` m|`s- ;I }jIr+;:}C@Ӥӛv9S?F,{yg׭Ze%ޯq ʓbQHuY0,qEI.oᅍz%,%;ؓu]z=#SQw }iqIo:$1M[oSfx=Ap:mL5I?VZ1omˊxaS5\7rw+F潹V!=Ɇ_5Vmzۆק bxIk;y(Ӹf^GG I=||:#:5Mic|HlW .);2ZC7n-m"rƙY<%PD#Հt-QG qE =UMfu߇w|8.H`jR(ʔeխRv&꩚SjvhRȶD5MF-6[j Zm6b+(*3+JK:ױYiÀcwg[G)RTMB: vC31c!E8ť+w2' NNu '.Ɍ1i!psh%3fĴNH XNyd(m4A~86bV3ǦvRDs?p{oCzRdaS|eHfVG#V7z\lj6뵅F"jl9ȕ3"9f&sro{SɆc5$c G2v#^٬[ۗYm(^^:L[~>#Ñ^t@P0kFoy㵝l:Ȋ9gBGgK!֭Rʞ,SLlٲtr)5HXjCe8ʺlw4XA?x"GؖdX<*D_GH!%N֒So6-MUc@j}`%nYn |8.vBJbEi$&81M}+| 䲓ݕbHI dfEzZnwf#S:ԷtH ;HyW\*IMq6!yo[Ȉ^bRЅǧ+9.2Rmt4{Kw0ctP9P$h`lW?GO^ z90C}KdLO<}آ$AIP頩ks\9r G+FD#e!ZhD}8C+g]QER{D4|+~a^xᅩZhQS `)ͽ֑ :Q=ǩilCJHN`&w;=~^ݤ\. Ku{e,Fe~!:<>&OkQv\TR8h^p feb^z UM\>s$CrRijg2;MwxX ǫذSuU4)c 6&=Gu*4~JjB|8Z;v:}FJ~f2'<!PP@SϏIK@9쑫 N.SHr%bQHTR;o Onةf4<$lFOBBflFQ;lf&' *\m .OY1w4^\JviF2|Omo7fX@xwg KmT'um" Ex@ +jcHTMFXRvN}8ӚqqCJy%5)7/_;|̐;yJq.n@y-)ȲW`Y#!{ xDXdI،rZ[fϱVoۭ&9^JXƫV?#!9l3YhP$83A:t&H^8$-X FM_f2fͬ[L|}AMFGtҥK.Y//[/"l/~ 0P1p 1 B/† dQÐ l0Dt9|u7|i…B tg!<`ȱSM!򑙒Lb2;8^Ļ;1vX6G1k67A IDATbϽU,bbA5d5(J +үY4r2̙Tʆo( i}, T ٶ67f"v on=V29^sBJttx_s7fa̚0o ȆzƛS1rHF[tgH0Ύr~_ݝpɅ٬p늙O/֩Qc¿=^1q:%MsPxm'5QR&4g0}l:{1 e'tIХNkq9$lSR͢ hVnftDMS'+yQsfQT*,ftlMD]`d,!`x[T047f/u-h&HώtzWXy>^=g*\i;$ :0cL=bDÕvCsmhW<c|DԘScWW?]:-;5z,N]>WtǍW_ϩ.Z;4hlӭ=D,dV8^ɾVu |[>S`^dSfb#L#wj5ڵkoR~9k jj9YkZx:Dt,;H"b*"r@so H^O[e~'ZwKRz6)eɵk~r~% (^01d$3zwZJwqru'ia>baG{  4ua/:YkP~tDcpȒûW_%!MRqS'pMsj$:cd%͕cxcGGiq9 qq,TVpR*Osۢ'B!|aXLIRUud3yx N6쭣FO l2 N+s2Rϸh +#):>F&|X ɉVr4~h j!B2fž>&!쩣˫ઙy6j$;44zIv(ă-ΰĝWbˡFRE$ v3)v]M8tє'697?K灓 %CHy1`'38Báсd[f"߶X,5CBNԧ~-"r0@İ~Fwr`0$Pᇃ#y ގ˾OsJlB_M $`_Zզ P7^d0Yi3W,h, TXS"HKǒ;tlS~ڊEJŠ;22(cW\qEgii311Q ܱjUv `Z! Eylٯo/enX0vqnr1nX6IvZ;{{?^MfĢ3a0мPɚf NReP,s!k d=QxNq;YrX8 " 6=x% =iyCE7K 6_!@ǰo|~^#X bpCк֙X@~ T4hbLu:6m=&(o/SUuz=/Ά 8lq.O‰:YܼdzT}YyecY({ք!gYf,`;s授Dua̙X{+l?X e13|w~?[Z} U;w4wʺ꓊:XjKoQ;p?ĦX=+WTeWz4ADn_}'*bw-5-01!jПܱ[#%C^fms~f͚/_דD@r˨oMd$RlTD~+CDGԘ;oQ1|EFs2їGXɂiùDzɠSUJ{>.`]<%:324?;Մ4V3-D6103+ykO&dtONvƇ&wl8dAS1tA՚ RUۊnjpDZ\σjH9ӦMc۳d|x-Twˇt٧sX>jl9 hb |{wƁ:n[ ]%אHmg>ߘ9d̽ ;,Lsz$Ib 6sW mTtuY/*2S/UZ0kE@zDnಛ%'۟&%@TmA`٬Ǝ8~VF_Q } v1Ymv G/惇1Y$vr3ˌg9K!!g G*k)^Gg1y f-hiQY pnϋwX^/|;ZKENd#͍Ld'7]WCIFS۫OQIO=^8t/D+7f^̘L~02|im93тm="l5vu^q4aVL|x8#ҙmqzN^?vIҢa:w!0ZLV E(W3f EL%pX֨&:rwX,Yve˖}=; "7ߕ 8&OϘ8gYܵS:uG{}h P-"P2}{jx|VaD'"ƿ@@k~Tϲo4%`ԩSo_$ q\s.FK/bXQ.^{o>L;bӄAŤ g#3Ye@xXO3ǖ\ T==;Hٸkݱ7*y o:~C]0ECX_0#7sUם45kobbe{aT)(&RU񥺬ûeMu8d">/+V.=f@ۧLGFOUJSsbHfd;w55zh+zwzoo6|!bt=2r_ݤݕ堥<I'?f%@{twX֏CGZ%ux74j~mh݈o=֏A[Hs:Y4l$fE*5{'lݚvʔE嚶➡q)5[:;86*iJNN.(Xq lcב>=;[hja1+ j|c͆$2ahG$)y%+{MBם&EaL0)0E1tR񇭃ׇ%עa͊h^=TbBNrN7'D9% &!67 q` y~.t/0PpDDWxl׫Ú4h+hN4"P[͚&~UUwip[Cg4B$ RJdwD M(EHM$S?`֦8eQzMqצKSgp '+h۳7|%Jn21m\*}+i1rP}8o(Syl?m^&N!fjxz&qH VʍSZ[xH\S Y8j4y)Ug]&2x(GZy05,ƮN;^A;yE8'*W#FrPzp۫*cq\1|$IɨRrOMG{H9/%y$Vv6T'y.i}a͚5#pWEɊ8.dYzdv'"m`p,(&j@g*׷w#Ăib}IU]١S_B?}(Eς/aRbW# hT܏=J8;HMJ$';,23Ir'bZ0"]yɲ훶!Dko_@v`vr]f~P0xSN=A]׬>5d" -i9%"ո,R̈tf ܺu$PvBJYs(JNф0 '0ţD4>;c6 ǔRv4\\0GO]/7[f`lv?]"4~}ف-4q "kI~x|D `51 /㍊r&L`Lz?Ww{F-5Kx͚5knzswY Rl8.(Bޝ@@0MBDCs'{V|@Pc2G_M KH[|Mw0K}2P衼ĸd^OW%7́ئkV;^ɉNfJچV_w?;^,& hj4yIItT|dر2X V*뻨n53{m9^laш|vTw) ?2>]2*op ĢтX{wQ籣뉾]o]cǎSLnWMpŊ!N@0vhO6IZj.O@#+ErN@PCUABga,PiҟV WB,Rvy-T#6|yeAP,yxsG;7s|v%]~RwSI뾵Mbwy f~z];2DmExo^y5R'jͷ2`HHz- WޝL!,mIvN̶jT! wm85fa-2*{/?޽ ´wv7W`~0 ZPM&o^pկj-OARwMÊ%VYfԿ IDATwS v*j:xdIn7VgrfI6[TIqǓhVQ^Qq'; \3D BQPU/ǐ`7 #EVۥI;ya|fᵜ)Ue+{B`1N@pJu1up)T6D r^+Bfn7@P#?m`T[MT-f`Nnl3Z&壪‚# /BODfI=hv̽7DI FffL*UV%c$1nh24G;6_2$D,fPec$1sL:UF{[];;{UfޚDiI6=d7} \9#/D>&HM6VuD >=p]ۢT)cm[ 75ĬJރM}=%*%oVf1±&alI#+W#F*ڧzeD4w#2ځ^ӯe̖D8*Sv KF[c˘,Eu%f)'Hӟ_>[L:_5{s/Խ:4v9h )z{ӷ's N@⸀&B3Dv%D>z` Sd!Tլ@YL]MP+IAQDX-3':8@U%m? zD2h䉗#tKNqh3&Cn1Eoh;i2fH).+c ظ!D@6h`d$\@ըk2yx[|m=u}"9VqXfBiB'.]Z `>>U %~ lbH{h}2? \/z,;c_}mwRwhi1q{uKN@9>ZC @$^ 8үB$,{-f@ jzn~Tf^sxTyU.#fV_6M ՠE|We޹ ߧdImChtk1uD|r4$Ǟ?BEM+*۸eQdLHaUƦ1,/:FG݅J)гr䡇 v&)dXF7G|o_1Mm"JFvY̎y5D^WlIc-~exPؒF9Zwk '"İ~tcѓGD̏8.(I@pX͟2,@ U5I0b6~:prs54*?OHAǀeJ0oNEX94)int+a_iv>/[zjU4zIrŮIŗiFJF3|e%JR6;+kwz|ϻMwԵx]fڬ 7~tL'u]tyЈZ"3Jxyk5mcjfAd8hlJ0#l}]GJпOwNꣶ٫%=#8>|,]tڵkÁ;TS_Iĭ@l dFIqwdP5۶Pz5mԶvЧo`}Alf3&mdKyuS[쬪wxh[w<5P 2"熷x`ޥvE$:zvyX6odfI:Gp)'|ӣ9px| M0Sw KnA!yigv#) !B+V8 ̦ Uެ%RwCD6cg+ 7s{說AAA@whgӬ.dL;fLML&%j$7Aྂ "ȾtWWrVUWuU/ ;Urs=ugaG_[sYAwq>c:W릍W_xhr|9|99ݽ7{_5`a%akt{Ei3aڌ[ya#SO}ZxWMtR}VG !kCĩx 6, )+G(/J)ύ'!} . BϣKU>4MuEq+ v y36wiʀK$o(pYSj8bTM55NQ~F&c02Ąn7o^b+*K\}~Ͱ5sYfxU^ r>|0!`ڄ `yU.^U𜋋˱AQa) vϝ/K_;h׫,=y$Ij%e򩿲-?!UOQSCuQ1oiιC^籷bfM +h*͑nkmeOw'>m+~ccڽ~Cg<+_cڗyh[d:gm-9IxUh[;;ٕ+ wrପT'blhmFy?]f݂_js=f\]tW2iq!@UTUAՔ@o` Q*1''3¶":Lqꙁ\NZv9e.Ox mjja/_99h{FW[]WL歽 m&{&2]mݒ9^*cJ=WWW?] |;4MG )l Y˘5yB.'m{:BBȰxdYZўԦ$qի}He`f"@JyB>|=+p7zC<; Fe7xBX׫̬Y(0F͏c,|#ðT>۾]nމU_{ Ê@mvm]]]DCfr 9-.p_QoZtP\N.S˗o1-:z2$AOcڲi!s΍ !v?R[va4Ȳ9(A@!,_>, h-9__C;_@ m :CXr-)g;::Z`4+BwW,\BT ~Pb4U=TUz@Qx\\/qKr!$L*i$؀z"d? vSSޮ,8?dG`B$XXr "D MD;Vڴsą !袋ί馛K)WK)P[[Z<^__zqW]NaVAwqqq9Vw]0wMBR!az{7*$CJePzj$`8!Bx|Ro466444~뭷~lfxdit-vrg(tZ8֍B׈\!wlu0Bd6OP*J]x7eY]wݙR[qgY:r+U[1Pץ] I߫JKD+X*~yUUhѢeB# 8g /0, B," 0Dh0;qR@{Q*O@P * ;njRg[tz ---geFڗek ͜c1*␗ΞXf3d|I+HB!Sy X-sGS@%gv$)'@eqv9_ɶʹMb8$m9.ihhs=׏5N۶7wc=6CUM˗PQOtL1{Ж< 6M7b !" jp!2#ҁCGYi`|EC7"HP 'H}}  ...' wm@NިwbM9;[:3wv,26 #%>lEq  8_\O4P<&㺒tʿ"W/$ rr}EQ._ʕ+nCޗPGs.,4ɳ~&^(\-, cC^ )/=HǀmR@{y,Yy}_K`bx9? ]WԜ=eOI=[ڹ WcZ7 *i*r IˍϿ4KXRVyI,.'hgZāLm9ш! )e){+ϯ^ ⣃OvaGk{ [ $^5\׫jxϾX7wD"Mxo,{^ ) .Rm6=+ Nut8hDQ>Fom=:^o(0HR"(xZފh"i0Ll["%(@d@ ⅷӓ1gMys(O>-B"' 6=)c+ش DgwqU owqm+ױ*I4-ǣzu 5%B% 4]__|zISgsYYUT[{E(9,3Ǻ"m;{X>:V/ ˖-{TJ2uA'DG$msaiTrп]q1W1@!E7=DHq[۞zW_}''W\yW_^`rj^\NiEH&3Iˢ'A08!IJ i!aA";ē j4,l[mIL&iZ|s8FVhTE!QT˴Ɲ W۶L)gS5$Oþlų&vB| r$^kao[/ /ˈ2=t&yŽ-sL(=u%ST$TkZ&BLIh|{\!'*iN؏yP!sNWya r e_'1b`!2<U`\*=P B0t@BȂ ~N1I(E}WrvX) ]l!2V!R^ ۲IecK̶::UJtLY^l)Yu]I̵v¬C|'&ۀ%RJbʡ.pUMMHZhvf2*W?uI.4mٺ7H*Dfz) )-)PϛqzEH&0g"p D%a'@z^ REQl^! ]"g!, vJoM g]0Qhy-"Y<|Xۼ^DJp8YEQ~i˃_+@\N9djZ![M,51tE`Vm)Q7I0s,YDr'*"_PAs[n!1Ļ=NGL!x2a$׶qw-QD]Np(Ucx5KIqR*(--& uÓ"W{YeE+>\ ȉKUZ_ㄵ|Y`}EaWph׫݃pa:TGZQءw*$g×Hݻw|8~`h~p/tq9|^o*pۣh R7}4ngc笊B2Pk AUU%%Q T9ξGqƤdj*+*(. CAk1/Rm%H۝ùR UZ$DSy). G9eyupX?I[B̡\ۺWʴQQ }$>6g IDAT,Qp^xy]KycW!?=),D@ l98zj>(X`ҥKoB;n]r pDC:7nP4]= Lv|lZ@$fƊNG jZfTX5+|{H),[ej(-S^8(9pN>o?)BU{*at0ǣguM(.:;#DzbqiZضtLG-9/*{). QVVLYi %%Ey7大d2yʶ]/;Kl.Ly hM`b { S1 uCc ]3cYJ!"'؍&=,\r`KHffB4,X(]sSDS<7 ˖ttǐu}Vvh=7aX$bKW #TG%t5rmЩwO!EH'=I[iR=N0觤(==b Iۖv)LZhǣ}CBA>=vu*Bq p'Psj(PB&<~:^ iYn2 +,Ψ})ŋ8.งc.@--uEꉑHVSRSQՌ(NU&~EAMPz4S $iZ}pe|NcPA뺊߫OH0m<:b'gBPR7$i8i J}^Tձ$LIg$!":ɣ8[Ֆ|<GiiD01 2Suc\ ȩL" eH)B\\[R$0Zx>6Mr0ĸ֏[ .Dx_ZxC !meqƭ(v&BWWWצ`08ٲ,8;2}H!%i7˞@skqҧfdEW_4ՠ,GCU>~?8AV$s&|:~m!p*B x'(]f]\uT|%L[ä #(+A? .z:)e*+@N]?e:sˆ˰Y*Rc7vBd*d=h {H/b]Xr0l׫, cC^ )/=Hǀm l$ mx`o<%KX7tӕd2aЎqg/q_f}k])͢8O)-8UL! wpcS!yT*2VS ;y%t7|<qj>[q>to?-_^{C}^/(Xc٣ O - >"z9cY^5\׫ ThgҶݙ1۶!RV444(60GUDzO'sL#L89E'V.0m[zؾo6Z"lˊz<9B#SA3>Tŝ`Yt]I2썛B>͂-R`?$uR K B&sW~?CޥHPHhtW_}u{SSӿH)??{[n' {.L Y(5ݽ)Wykd.NǐӴa 6",e.=TvP։7YƘR۽{?@]]K.(Jx߆q&@&i_4ϫN/Sb/7h"qk۸d8'mVOQ1-BT%)z<.....Ommp^ti5p>9s:" tv Ui&%D^UJf=ܛ 0 -rg+z% ^CܣazꞁbxPO/E=zhT,ZhYSSG5uuu?xd ބO[!_Ckw"c Y#CrB3Qqix]\\\\R6S~mqqitx4,^鶤0(IƓ$(Y"&&;0(tpEs[I&GYOolE!kX(XWseZe[6xqPfۀ4ΫROvccֺ3pn_ό`Fq۴xʜ*Τҝ+ !G˥m0,ɻ;[zT4MCԔ2.......?mDS)3ϼtʔ)XjJ)M8c18R@\UUz5KLEpD*m44&`Y /mH)Rsϓv5U=+l//bOY1wp]rgo Poqۨ[m$$!0x׆4MO!-z`:(wyFQ^| GԸ˸>׋ǣin:S!ŻHbzL.....F}}}dD" !4m͂ ~vڋ*$ߴHQ8Xڤ){w @SSumK$wZb V5m|{"C^fTuv33Bq4?;EЌc; qMU)+BV+K#?{_RڤvG_<ҷt9 ['@bIHO`O0 uEwY JUWxA댸 ͢|,X UL?crqqqq9tΝԴJJyP.s9g{8,3R:!EbsRʕB/Z(d!;o>?L[ǵw1Ck(@WGχQC%xhH/%quL˲p¸mۥV-RR !B@ P,ٶR(#۶@W%8 ܥ(v۶w=zٳG۶}Rm!B_믿~<|O2rz$ڗ-3wW]"_UҒƈFcc IJl4+4-IR7ʹK655"O`n"x` T?ߴiӍ7o%!(D`(oSJ٦(v`b(o}ݙT+V.XR=(Ķ/!n]ti_r-VE$EӉ5^NR0Z<0&~t*q Y+]{B/]z7"ȃR=B{@DJ٭(JsmmkMMM[U,REQ}"\+ |?Hh0S]\\\N"jkk,^А\z/ L2eʫ81 jkkn˗/99Rʹi^M3 ,xCWR֖ijjjժ͛7pxvssBRa;Bh-ZEQ,n`^jB+pXR5?zܹs㍍eRJ=&! mg֮xo9CJiYaeY*C] =$]%dEUQ z4zbIIBPT-x ph*;9u%UEPZ'4%?jU$IIB-U$!L&U!2/<%%Rʫ'RzNWJPTJ /XX Rŋ VmmV^|ww7 dypI9ȭ:Eq#P '۟Zde˖='8kN(L5M[.T∳@ 0mڴtg퇁(p8{Kq9!R:5v,}eZx"J Dա-!KRVOK l)QH)II"қ8HZR"qr,xӲ^G)]xfqHHobX"k|TW>i3&$f,[ȼylٲ3m۞ \|@ "+_8*dccn`<!j˲V.\pC=Ν={m۞}>%\[WW~ѢE?曃`<۶!jRʠ"]ކLͶo=XEQg?k&ظX'~ec/V@qoop7˗/liZOoo}p?Cgo1R3w]uy~>>wG3mg^}=yT( y(f*X\S> tmˌō@) z3 eoaaeS%31 \+'&=)Rô]rXOg׾c=㘄a}=úʅFsɄ>tMC4Qbw^t})e)BH)Bmm0 VKY;z?()_cc3R(Cx*ŋ~[E^Jy> L|xX_ ,B kbz?u[s ) &p~av9v8H8W^M]S2BSҖ j=dfeK!*Etďm/j|4돕qc-yooz{9ˋ<9b/^Dz Dٺ7mCҰx͝^3LaJ^]wmRٳHg .|Pi@'B˥vccZ!įjkkt"`@(Rn۶tIeΏjkk7v'qqSxFEJhuD\<K$I;@)%='sQ17כe YN57p͜qLSIiI1` UHu!..'!R \(yRʇ ,b !XQf)eqp xBJ ,Ȼ$OK%]R lleݶ.Ÿ!<YJ68ѸIҰ 4*sz9F2]{dZol`Ӯn$)cbj;;ٜJLf.+ϩ lo!z&rJJC~F I'6#q&=؏/"M2cRy' "W@IwHY*!ie]Mn!?.Juo\\NOΚw0Lɪ5;g,/8W:W|~|i^#4+cT@J)OB|x. I[XNR k)#'ַ;g@°y9״v%ر/kse2|ix鷳; `ok/km#dks)ate+ϩf_GVn!u]ln>3o5~& fZḘLSDKyk#)t$y/%VfL*adhfS[f,3'҇Ws׊D}: m0,X ( O m- 8$izs7sBW) ù-/m ?.FQZloq+d}#PRRµ\{-Re,UUTA@46]@+d\~|lF}| t.^ḚyXd=1_GWU飋*i|a7Rš-ǗpIXL°Ygēv&TWweGc8cl1vw'-{n7_?gfͣ1}B 59S3?ߏm;b ˄#O\|^icb7Xx4XtUsFٓdӞ:# zR/~LLRũ7O,t,L.nnu&#}̙RvsGi֝KUa3;M\8.Y]:6Ա%=1u[;jN Z"DRVGz\ Uə Pf誒Z^IaaND!k=:m5M(!4mP(7İlNV,KNn`Ʋ$omn9vc4# s)ۢ~/>ǃ(%#$5y[z C=" ,yvb;bof;,%-j|w/Ry7mYHg~g+jP'k5pXߧl5MH8:>oa< g菐G[P "M%TG-[ȟ 6PĠ y_{XQ_v9t-H$JQ(H0 ;@V6ge3F0kr)!/xCN`DcFf7ԟzVa!ڶGk5ش'o}zϯ?ฏʎ}{1u#p9~^ɂu5P[)%]=c%AwN~( Z $Xx|===;m[`dphHQ꣥=p[*c_42џ?};~]54߽'TBUլ|u/I׍333_î%\xf%w.D`{UgLe_S*V02@Mspj,RP2=t]eaY6Ąٶ}R_&:! #pȉN=e=O,mr9F<裓UU9r0:}^J9* !DLJBTI)7Xu(MMM_9z U@g{bs*^z1 _WCJP8 9ԀsѴJrX~V,[;B}ί=jF̙Z~HH<$c=pV<_/|= ß#xׁٺUib07t]pZ8@SS-iu]ؼۣG8vƝjKΪՠHDqþ'̴{mi2ܰw=\iG-xNSƗrc a̼֝)cm73m'ל|u]nm)2Ћ<*5Eʼ"ܝ+aܓ3xKJ~_.z_Āq!GX,Łj,vw42_AY/a('HB[l_} K_>0W %-?$u&iX$ x¢7n@3CV*R"{zL ,X U=j@U)h5-t| ЛtĎt*r$MӒځ >uaq>}>Dp~@PJ?5!qas9|x5{5x@/FbDʅ76%O=8B$rXxq ;FJ!_@܌fDWo~)lR}8nr #3GdIUl~,%-OQ h<ڄ?d7E%%B!</~5M8cҘ M(B $e >$M:vvl,^MУ)̙ZAg4ɶ(5~SFq7wò%e!3'2I0lnr~q}5]Q۝xե>&.BӅA(f[DX !+VH7tr3Fz>$;;qnvI5[#N) u<.K$yg6nDѕvs^pF%!&Yn;=I*} IfmY3G8Oضd.!iXuƎ2m|1쫪3 9B`9w99ԹS{#DԱO@ 7Oa /߿ː^DV|p}=]?B3Ë"ĚmZ?{q|]]oĽ5~[9”< y5nƫiڴw˚UU̇?e(((⊐pլxe)mI4n,{a$!Ξ^YwۖTظ߮ږY^WüFY}q&xiC++_qbկg^'5gT~<.L;w=|ILċ=ցLbH[H6Oɦ"K،.-ƒv0gj-m1vgT'saxF\77o%9Ja{]{M/l_:*#@mfOk,iJ(Gbnzr+>NŒ 8\;ko#Y#<ҏqŋK   EJ~8u:mIuDv_=j/&w3^lv@SSӗoll{qT&5 #@nk\>kzYQ]s%7tPzZԮX |`eYB|JJYMB<, Êm455E_R(Jcmmojj2sx<E7EH!Kls/.]<+@\N^gUS^V̨YIUE)US9Ė޸IK[;"lmFJ6<Iu4pE!"D2;[m=NMqΙZ"QTy%m t&W̪YTx3A/o 9e/$Υ3hi 6=sR)Ov ]sf1]0g9s-@+;n)ɨ @ N H &'a@R466K$O644XRʏ !2\12 @u3gά<|!|)e%OV b J](y׾5c\%PѺ;b<)!/hLJi8WO!uNsژb(eK{@^ڰGeʸ*EA҃&5T! ;;r/֧:-BU@^_gRm`[s]N|F U:֖6]HWN]Ut̵m"!B%dr(,]tڝŋf-\J wĄH)mPp8̙3geY*R#s]]ݟ<.BDm^}}+V1Ml);#Fӻqq9̌ oMX,c{,b6!@qk-n۶"7(ևIM3Ss> :eEܒ}m.8u[;|7;WF&!:.\s?Tz0'-\ 81pɶ iIg0/L.R AȑsԱLr蟀c?YWqNp+ҥK-[Cce]<[__ P},뮐!Edc߰w<p=:#KcCRRqG(B|ߎ7|K !d8V{0sn}-!1`!iۊ[;؉mǛ؉8o&`#pBvbl@.@uij{>wfvGeW~>s=3s{mi !&|͛/ju{njm~SېR"r m^/a(/*O7tS~s ( pD~ѳR7w0o:( "p06>..^˅'{Lf哎Lמl3D3ҐA2˳ ٧.w䰖R:˖4'VY\ i0%`88,eO. x+%JxA_u`[YT[N4#9#c,YTSp8 N?Pd8[)a:ȆdҦio !|J)Ń?kr\&$f[igR"ĚUwtf7UL|{zӲ?3 c@Hqڵkvr{GF(Cz).o`Q-઺ƽB RʍR}Bw{7OBRʈ(r&!,),u!Dضo喾u"8A% As1!„rY~Z:L,_8`VJ"m;!4˹W-%PxnOo+(5xO/eOi;2ر?65jxgw#ɝ,-ܹTUP;1}EmUUpZ~&dj̟QZBzs{xww)Z߿DܖW@X(DzV^+V*P hö/ Ô>>px" ˗w_@7!?/40^SJɿFe⏔+V~ !~..z2nrZ({Z ;O]D]~t Mwҫ "%Hts,1G/=V^W\i=W"!}c " Oc9,יּO~O5@CCRʧ?zυ8M1 ,ex}m!GX.RpEQ4Mji(gb`_8f~o~o6Ex|||||s2իW"ęyV {KB)!0t^4׽@[O_=߻vQ9;IeXLg_pLMi biykșa9!y}|||) +"1< @?g{E8W !&?n8_X/"@Q͞ȂU;qvՇ qa^~ܸ]a&Xhii`:rFe1$qp粔?ZϳGid>g30ܹU,b .|slW ZnBpo˖-Bu RvZb@E0t]9X2Ms'Tųy~/2m\N-"g+C"ގl^4"S>tBp`(kX6. :(T*caO[;Ɵ~|z$.L9{s?3R 貫86GBʚ E7)GJy 縰/s=/Ձ@Z!īabN 4U%rUX~7{/[[t1JQBIT&(QN ` )FF„(ϽV`+^ޥiLQ jz¤Ѿ/n Nɽ2W^zᬏ\/C݆j./2 )^vJjXrsVSU]QJ+㮐]2k_;Vzu URSlM:7m-Y΃.tT/g,(r>7+G{a6ȀnbBfJ\8Ie:vvwk"$LT{GI4BYiR0`] tC-κRUjrW6(p5Œag;SN\}^^M,iQ6l~{һ33o**}7uLL K'ln2G fO-y gG\5-1*..^؎M__P4!TE#umeu@z^Cӌi@ PRVV}XDys㚥R5~"]COG[M( A7 j&OC=~]7(),\g&UaY^/!U5)BT ʼP@ȳ,p0- m[ն3`(nX,dSQʫjj]m8Nq]q(F4RI?q.Yp5_'шfhj?3)u"\)QZ{R\Ji^ W@@m_{ߏ_{PZs&&rRr; IDATE]·Ӧs%Kl#HҥK;Ax()?d˲̺ڟ?m tM#bSG[OX"e89C pS!PUM "0e%TUS^) @|mlEk\UPjF{<{xo? ~|znޛ[zב䑗\LC,QxV.-rnz W-wT{vc;#5SWy.z|Y6ϼ㯷r*fOޛݟatMj&MfW'ؿ .FJ+a[o<[ZzNgsL;w.Un?@l-\{ޥWz^}2L:ś/R?¸ IMMM_R۶ d>cqh.tB* s{VSDR%cӞ>>x|Q Hy<&g>23 dҐAw^ϧL9+Q^mᆋ'`޵R13"Bw,3g^ 3 \A-SIZ?yD%%J&WPG f~^N߬UavuPVQUPz:Icp]{¶UV1iz-f ȅW,~ \!)%ͻH%TDiEjWXG8ZB|^u]p)%\tGn?@2^]tZ0 4sEAQMXB1tuڟQ l?P'aH{o[R_o?c(PkhhmuL;2N&Y|`Q]((LU,-]Ev΢cͮ`2q]|~q xyS'Zb\P[~)(SƅY 6CA4mHl lGџarwi|d|<3&ZQ5>9f ۯ<ǍO=w᭗)('qUסi:'Oyǖ{A[u]Zw!`QN;ݱ~]Ӛ`B"%W# c&t_-V3~aeL9g'sx2?EQ0u;7s$ͶS+ň^E+W1#O rG(Fv*RBrTSt`~ [OGS? byYyRT1ʊrB!/;w"4oy]-p8ܥif :84evpļ(*f(mY4oB3jpVq9ṱͻFTz:Nhonx w^ ʥA/I)ÀϹ Yod՗еmCږ_8T?믭]u_IĿ0CեC=.]Q__?Xy76]u.9_|ϚmYl~5Zvo'@uj&2oWcɲOLj̪i qwf==Σ>z8kߧ]MtqJ\v])Ȥ?hMu%]#KH Ky65H0@~)B/ONq ?*#kbaGyO}`04TE%)- SVp(aÅ@ Ξ|}-M[4VK# ڇ$džBw!n(# S9eP^5njSQ]CO`Ahi9dh]ؾiCQɺ^}w >@㰚?>а_LHuL[M /}W{ \T/1'~yy@^WL !!~L5DZy7}veLGk MLʅW~xbR,jg q ,/Xbղe^~k5H)Iel̴Ek^ <.&N͗fh>"T)%B'NfڹsAwl̹ GQTοj"etvϚφѲ{qt4T"NڻlpGݷȤUV3sy>g!uh,YCݵ)P MWBsOB &-!FcN1/:9~w綂5R Oq`ʫơjTXoQ4Ҋ*@ʒh߿_[Ks0=mlze%1zW̜{H@?ߧh7J/gfON8Z^aĖBrj`ʒLlWt@ж1TX}+E~x/#1WmL9*/@k.{X,?a7q 7&L`_q-2kb^[7XꢴL*+O?^̜E_+5%$1[*>TU%?.9bSj9y\"!@5#(9~e}A*AS%P5!H8e.XMidp$hP)D#!} VIM_0hqL_MUu%qalvvr2ۍ|溹LTM( 41 SԤuL]"p6GS:|{ws WP-JkKe߮m5rl&#\wW] N%$HJLr ҡ,ߧyH^<3 @PJ٢( =?>*gfЋ]K%"H*/Xy'ٿgga?Iez&MVry*_|fLCŸ񄣥>Ϟm E XF1XlWe5QK,j<\v j/UDj˸},t"jI|f t2J-glg@)wtJiOVc,[|yAs}޻Pǯ|6oP&8[\QBP3ym xmO8Xqػc|yl~c[BB&!įz,lnѐ΃6JHdlkŞ b-!{~v5!>_)˗o_?H)of= GוaxVcMp8.$[ȣ&O|룘9Q-hzp߲,ML@Rn4ft 󧕲roVz1E(@Tˎ5}||||N?xu</[s|'Ae!Xһyd)~^z]g8B m4&Zf !~#|o~ 5+ >>>\{~ Jp\CuTE!N,wpitRu!#}|||N3[ PCC×_pc@ɺWOL 0nҔ㓆2qխmxL\ݔ?QQ34XqTŸGlW5e9$Ib$Te:!cqPrs3|AQa !7w]]pl}-R8e\~M; .MocEP=qI^ˢ˯) 9ҫE'c*hjjG}V)$ru~;A ;@|xѦȼE©A*ۂd*M*"`61z.XR.\D>*'C9:b5+ >gRʕX`'d3i^]W\q'Htl戕MIy-f&ZZγ=q۵8g,$]mAJIIYU'*;6o],uwuuu;{F2T;N//`ضM*òqֿ o@(cE5?^_:};o6t  86D|Xן=}d誺fͳ͛@ S_%G_r]wO@ |>>E:kܦ¾p-iȴ kTTUS*Hg2CbPZ\)q1j#]iwćn}/ϿA2=Ƨ (MSTUQQݭEew[{_}iel/3QfD"92ƌc%= vQT">bfNJH$ٵk/$;;߳O^!=[ӑO~xX(ᨊ@4i4ͫm!9 '>6!.)sv .QTB~]Dͻچ._8`Xw_7ב׋) 4M L4 CGSWS?10p-FPZ6Y*ȜH Ez:1\%iycclll,]@900h4:eŊq'd_ɇ?_W$kqWLS4 rJѷ/' :c qqZ.5w1ty:Y^shGDD`:A8$Uݰ}o'ٵp'j_KN:.z/gbbyko(+)e!Zt>g>9Os=w%KRJzmov!_/V|`J~~8gR%OeYdb.} 6ng4p#LS&PP_qDo*̓?W~~a?4U3T1u˲]UMbksqtl)?g\J[b#tJQHKD%P>Jg_|hO o>k/mL @ g9׊"Rž cYC:CSP|ŕ#Kt&-2C])]|e6˶-GZ%op2 w^i;w%啓]xѥii+:Xn)TU(j+TDFn)=L֢'.ZotA0#PT1$UZ4ZnGppxА!Vұp2R2h4- DdeE,/J0 C)hȍGJY| ٹd I)!DLT˒lmg˾65 )iU xm W> {5g_RV 9eO[tJ)q7=M:ʤQU@<7=}wו8VL6C&$W a&F @ 8@ ԃ:X8ٷ_~ᷞ_@,7ه׼9peקλʻtӜ`xd^zvy›#1<%$ài[n|7We=&öm>)e?O NBiEEg;SOQ )CڍYtǢeDN+lux{"8$4U/ CSG.(%!)Xgll 0Җ[Cq;Sܜ28W) ͹JUӐ.U!re~WC lH/@$ZK2ԕ"X"' ?{ [4p@EUQ"uPD7MA4Ua Qo~"WO;d{{{yǖ2}΂/BWzʮB OLl&C2-\E7 Dg5DPEUQ5 00F a UPuĤRJm_W~♮^ܖ,Hⳛvm}W\5S|BUW|x۱W~Jj?2j&[4iy˖-[|ZHhgt7R!{隓"i)-"ya@sK[Bd0qՕl.mQRRBey)[Зznc^c`y_rلj4KEi"9H:L:L47ڲdU7pC@J@ @"ۛ.ع>PWWuGɌ?8ϼPS>LƈRRQ--KC̘>]G86LX;{P|>~AqH&&),d Y6" %(z L4A ]UM?Dq<򟊻o}x A< l#(Šmޗt CB!_6)=-;>TڬRqg<gpQO7$/7IO-yhL\j6t7.\ QY\tGflxvX.]4{W\YHf;_3 >(UF]-#6@iy&vmD2I,`|qb8dt"kNA ŕ97h̑`֦H@ H*#T "#sKC:HxJe(B`.{H89 H4$PrTCB@ұ*{iZAcy}a*\35` v\8TkUU@(hP(@iI@`(m( 6?pV" .เ@nEݎh-GoNVY.A75sd$%577r[>Fε5/_ظ=/﩯wݵ=4w"L:hY9-w 1P&Ϩ%LDJn58Aئ_]_+V8Y2WG] #UW 43,tX,N@88tL¶lT@N&H`d:!sxmm]zmOC܀[QI]+n((, $@u 7! 9k^ܙ9sw%]q+ٙ [~(tϴu_BD'߼' РWBBFFYQD1Dc1ȄD² ^f8-)o<. Hk$@Ҷ[dhn M@4x\.xn;khvy?zrU @Tݩɀ(vC]}E_.3ULC]RQ8L6ʱӦM0bȵ\K눴²,dàiw@m}AC"f+Z;IBח̬}pC"@4'x`%:&)Ch­-{N αalA[K3:#_z:,Bަiuܶ/<^/tRJ[{[J-ؼ}g{.pu !~Q23U8BBpkn rMN3 ]ebjj9lg@  '{ЮMh@h#҆wa{}cTbữ1\s>y***Ѡk %@_ :& Q% P(}ĠA<=\UUÿ3g,NXsǍkmcQ|hnGzfL:Olo+4 ^Z܁ygظcZO&{"b0>~meQEvl\-M݌ҌX4켁p܈w=6z` HfN.j7C6F];~*46 3'O8N:uH>447#wAv3e->^XM_ZwmzWVGџ/n+**zf#(JWi >WSQNBP(c)eSC/<.\–{>XDoV<>|Ul#~ |;tmwr5🗞Am47ԣzxeSE#pV]8?U/M`fl!oq AZF&0r5`p>tp8tFظn vlk?KO?p[+{" O ,:& }gԩ{S!`;UDP1x,΂O')f= Bq43}R+Y\ 7W {6/~h$a[فTL<̌5FP[˅{<~m=!ۆ:Bh Bf9.HSv}RUDKS:s/-[/,G~$G "BDKGyyy :uj=0Ižy(**^*#y&YXP(ӧOR!D_eyxobn,#"+G{H,f`]hN9.0MKX0tXcơf+l\G1v֭ݰ x,3 ]75۳09y'ef3pޥx ˰mۅ+\|y^,(]K'".s髻Ї BP(&?+++_r%?yw_]]]gSKMܩKT*X ;j;rtGKc­-4BhAFV֭~,%܀ǜ,cQdff"3W%<ەpeK@KcvP?Ie1k֬s6#/`N>}3+zW(hxeBPBJnl~ 4tg BP^^0h 39S2 %ǽȥkkoX 7^z3FO<c|9s "uJLx?{VOq݈F¨ټ# $_׍̴VVVVNDXt/PJ(#&> ?0Q(Ä3_dɒ{`fe/mE!=3U͓3B}vz FO<c۶ш]}V[͍8٨ٺGYx<.F<.)Ġ8ۗg WgdN2 ,O?? "1mVr]+ BP(z"yHd,Z>Gr(lغq=k,RbG:=Nٛ3y`c:$L: 53D<^疢ag 3οxzJ^VӦM+; L4\Q(G Ĵ@!mBP(QJ嚺dɒs;7Õb 8i2||0xrkJqjtOƠHa#:VEokES.d恙;N )p;_/7~G-_ ;kmFV6μ2d@]Vlߺ頯A<kT#Ng@<Bi_yn傥P(po; B=}gF@Pڡ(ik+˞7'O0dĨ=ڄZE㾆܁qe۷W]daGU ϖR[bI{U oϻ3H=S ~=h~uhĮ.(( bC,ǻɑ nK~jP h&3u!BP(WCYjl޸b|͍xa?tX&mǎ2TYV/?ǎGvMC[s3waOcN4tWV -=snTPA#v{F}fT~^ۺe_lrO ,{ nhdv=b-^xfaEEE8=~T )Nsb[",oB7N'8a <S&DxsT( E!hr̵< sGJ՛ۋve%lX [wRbuؼ~]mS }}j^uUnu3?JDamfE&1@$l_pRf^{Hc4mi%BP)e%QŋHFh<)@ܑ>tvhG K.k X:?$f3"jKH(1נymnDLDXh6kH W B8(~3H$uf¶ iD%iYV^O*0uQ)@lyJD.,w~X9elvP@V'$.xŲ?ٽYZ),.h.]gMH#m]1l۵;ΧI趸!R# BW\qES6>oO&)em;ץ-x5$//DJޞU?qvd$TE}SUEE!Ht]:]:.]jBz@OA:^Wi""@hF,>uco IDAT'+BPtҥK=E$ohw71x<UB}샣qwm3f⭣&7dw곭02?o. Z`9O iG:ޡ5ǻk>G$;hbMm.>[z.qX״^uZnVm=͚IaAH.t\:4v] xS( š',x ϸy٣_rǓO6VC73C+Glbx_k'OαǟpiYj|9^ BϦv<.<]`#t|$ mal)YokuLOZii>+n5M0Bk'B"u<7.]˭C>R( Eϙ>}ەo8 3zx6}Dbq|a v6nfʅ}xʔ)]v%׏j b+7}ϝ|cZV ͑ `?ͻ7b+VfF$G8FptW̊^[u[kjŇK{DVf泼^*aIԷıo;S\H][F]gA31fkQn]P(ADf~駜r좢n~qaez ض;q҄Qp=XS͵`urgޮ DtC4Pq~rVs>.pgt0n1ˑY)ڰ6k̚Fnv޴Hk|i V47 ]:, Fxtv!EZz2Ӑiwrp0 B? WTVV.pؼuk-qÆ1vؠCrtgc 6lmg_c@߿m3cWS+jP]WDϗ.]:"Hr2Dj뛰yN4;oj;k6cx‚w߹33"hj ;Y=}wsCP`ژĉ10XǯJس q݊Uon\ : "PP%I蜍ܴ|mT2jw5panw.pX`4u].tݖKx|^XBBEqxG:E x\H9BFF:#a`N&2|#m7bG}3v6I?\-'A 3cX ˂vyt]Ӡi5 &kDKJH'4aI$, B,n!!,m8d]:QVVbEEŰP(TmY4M;MEEEf tihNRJȗ8EA{ml7Q"1D0`@ ZZhkkC,E$.!-fU "B .&].!nayR\1R AoVOK4֑MCfVr0 /ٹpܯ[$n_ `eZP(j"3_ ߘ:u+V8;?l6|y.iHyuCz{!a%HH- h ("14i@VNrwǟ~Ŋvnްx%p&Q"_)_B8=ۥDt?)3 ND& K,ɵ,WTTTo!H C"8jv6`Ӷз[RJ)eEv=Sgkgf>;و(,,|7>g@p%{&$ {#)N Ked)摗=Ȳ$,c#A8܆p$h4P! AEЙ 4>JMhp:7ҼHO!== >|^uWPrczsٽY&) b?)**z}_uA"v|ǭg+,Ґ<-|*//w_veRFD%KXPP0Rkֺzdg0$/s2=01`gc v44wNoۙD )f!hf̣h4d8KODe`s 6!;^x.sCD/Z1M)c-; 8P#pMǡ!Y =#=:, DxhXhxlIX}!ET#ϕ@G_$"M@rurݲzՁfM#I <$BPW^ye-3O]l)S!}O]oƇ`~Ff2Dj(ڔڨB3"z`BgΜyiihn6n4{=yrpZjɌxmڴ ,B\.瘥+((x"Zɓ5'=N+w!I]M.A$@wxd/!:| Bkm[@ӴSY&+\qg:mڴ?-])efAA'3fx<)e-j|!d)sEEEiwbWaa ?())?"@'PW߄޻(..D"fٹ.T2QYY9sڴi*++/iڙSN:eʔDT\kha,"b!;)>4c?Sc/JrTMzPU( BUbڴi/TUUp3,z wUUU5:o,О+shKaaŋp\< UJw]O!-((x*++3ܶm x@a0 Ou#% Bo?3SL<TUUu!]̼^).IvJuJ"߲e`⍕+Wߴ aw<n* I;cή㊋pZҕWXZWWה 8ϺDgA[:637Q5qDt{~~ʫMi%%%CDg8 *87۵jR\VV^PP0/]dɒ蓊[\.o/])奥Kf'<@&R!nM"9?E)e]"۱c0NQ( BPV~xdL -!Y6'0xUUfEDקL׼EEEk&?""س#Dqaaa# ?p5kVX,vyȐ!]*TTT # f>ݩ%EEEx+.&;K;ӧO_`MW8Ӯj}ؖm 4G7["L;D\vmBWP(IyyV\\mPQQqbLaaa4MW~~VDV X|y^<?Ob泅\XXrn0+C7u/W 5P(J~so eۤP(-O~6lطy b2tɒ%Ss`mhV0umXl>v.37P\\lUUUvMӞˣe ∁Flޕu BU𥪪c.goXe"S'9 =giq BP( ^(,, r-W=DnQ)e/iڣmmmSZ02Xgr] Bq W[ *YBP?^ӧO]>}Cf !;WXv( B|hC'WQ( BP(}F(8a<'bڢ[xƙ @R s{}5BP(A #rMQ(:"%ܞѶz:XDB?I;#BP(×@ o`Tr] #˒6>ٰ ) f)6"=3W[PE B8ck)F_O~nϼs"a׬C,P tBP(G j?1c#@V8y d-8 'X)KRH;P( Q3 ?qFap$EWy[}?8y 3﷯"WW~[tSv BP(G|,k|r6H-@-_˻.C5\pXsx ? G Rakfö햴=Qtgkh.M°! BP(rC7}7wȐU^p+U v4-1n,m{sy V @@+YbAnBFYۧBP(C9I1Ğ\x_9V;k|Cv51u E̼c[W~?4v-` zi惈qzOBP(}?qm~r] # U♍^vYCFzVn8Mwy@A$nQeӧO0lHeR( BqFt\\U 8 3^]V:ҳ y\n0 句5ZZb5mHk+*3ͰH lwG̀H 6@}* B8#  @7u*dGG Jo ~eroP( oQ1 G&$c-:RݯT&b$k~`!BP(9WlK|-ȑM!ttRCїmGi `< ho{BMqPGsWуeee%(?K)/`dggԣ(//w744GD[KJJ>4w1/4Y͍a|"MR͚5CaPQQqt;Dۡ[{X,v.۞?1~KD/3,˵3 oA=Χ58; W3HqrDt%\U/;T6 &r={C$`2\ND׬YCξ< 4M~?h=PDP(@qRSZZ?mf0Q1av4;3W U40~kƧ]4]eƷ+`p"= `ix|ز=%וQ(Gt%<&MURR*MYYX)D3ow\N"m}kDbٍ7޸~} h[?@@ϟ^ڵk׵Dt?Pdf>!Dֲg͚ա@cF$Z^뮻5m(VJ.--]l% x}P9 'a?Zv-fYַh(3oB,Rl8N3Mx j߿$4~`<x1''b_`Giiiܹsnohvm5M3f͚R_S5Ms,$oq RŽv#lRxx|) !R0MkDt-|)VM*+( ~܆a<\&1s!3"2 ci{ <47noEKKK;w̃5M{P(T @ue0sL2MsKvv*++;SJyV(:_J "bi'8MtP,!}hYCgϮlߢE:w=f@D9 3>' ]$,02㥥+Sۘb拝"Zf~0eY4Zf=&GoƖ0/ ^ tQYYiR;FJЬYj;mf6m"O4퉙3gK/Pj])P(f ?};]__ڈh+_yaNeS?mv KKKu}Rw(&_f^;qM"z^83i73cY!BܸhѢnd_DX8Ý/UVVv^IIɖ#Cu\2&TwG< ð}oďNDyP(d?p㥔/X`v(mhhx4ͩ⎈`0@Jy=S,X0!گuޑv?mH)ֲ =23b…̜9[oc!PhG, CD`eG()3?G5[Jkgwi"ñ] fv--D4 0:p@"x@ `0B9e<~hѢs3̦@".K:~yO;|I</'"ӟ5M'Ϟ=;փ`0xq"x2׻B<[ӓ ڮ]bk(4ͻ ø3 !* `qL ( f 9Βd"h DSM"#3\SS5 cXnnn|[cRI)a;>/ ]lTVV6c"|Ynnna^N5fH$2 b!lgΙ3紻6]nYVeYy.k:hL4 ca[uqN|3D)hvC'[ ,[jjjEwYޭ[y} B<[~0цa bk,.g*+\!ٗise >f>=x B00  ~{FJ)rss?Bty2yeeggr"zD})nLHcRɹD"[}=cFߋW… GÎ'[#`Ʃa[diUVV6%˓eYx'} TS__sgFh4c1e &8x3Rʱa1 fM/m۶ "z8{ڕ)Lf555n˲<`t8.I6z ŹYa B$=f)7@̡P .&5P(8n ?@ 3f|" iiii(ei4_p9s|{KIDl|4)fhr\)1?6 |"Yf@ip>LJ+y CDў0ODtjjtdi V Λ.\an'aOi^y2iSg`ܹ=f6'UVVv nK3{0644`wR?MӼ`0xVii)]2󹥥kN !'ki-,<S] x̛7AuQ+%(s~"t4qv0p~} H)o!&m0CD4{޼yComlea,K={Z4pZw[siHOO3Bq7uƬ_`BDt 曛,t GJy[4$͛\hwvt"Ҹ^_VVvGDI<hwxr{II4?P>I<% v{ BEu%@ ^nHqgfp\!:뢻M(==}K"G5.]Ku|@8x["~nlDuLu%6up7GB"].P<@ru>f  r3*@G|8|9H)Wk&iBTH)W=h5 cp{k5M?.zwBpa 4=QGiclc {|,PhDαBf<ȟY^^%S_΅]hr0QW;[nݜon|Pv94͕b4خ7tQ>(,Bqc`c?폗{gOtN"gtG4Mg}M#(3'y=g~!Ν*3Wpzs/cELiۅ @ۺrccjA{(r#,X` 1cfJ{ԬRVwjs@̟??^Eqw̞={-3a!\ % ./++;`;>r6H#"6h)mwbRǽ;njj H$>%NqZ0:mx<^}3Dba= CJ.jDP|e,+,=pcwH) !fl4W3yEjP‰>RsjhDOΟ?JJ9%54\C\Ƕ=u 06B%\d֚ @BtKAB6J]]][~~~Es}v;\"XJyi'&݀C@hmmFFD{dY;°3b$%q=YC">e534Mks0.MfyuKdyoCDۘ^7ξqqɃ@[jDP1̸̼wvמVGâE2Lé23v& g"Lf*s%6H/++]C!ĩ3U$]y3 q~qoGJ9p2͒x|/#<^1@^*|gKKK6 c vӻ,ˊÞ%q8='ܙUeY-;1Bte*cqmv?킹ǽx8gT79DWzRžC%^9J(#M @$Ϟ=8? vxnYm]W#s~P 555˺i!;3391LDXug h[ /&otj^!@8t̛{!Rʻ߄W.J_a<;M3XR[S2^;&;h^;!CMeF iDǸ. BN F(S{R!z5{ SV(9UW%u}aFNBwlzT;q'-pS?X,v ١j;wn{{4t]io馝-1f4KΝ NwޒQrQ.X +򧚦AD`\3x-772S3BD Ri>̑P(t:T]Fx<7яBySa h'//4Ms3%RE, 3BD~ka Y%%%CoD4DJy5d?3FBW5yyyOeY$_ku@ `+ue}/ } "ḳ>'E(&^̨4Pudiسts  _%s?0MCخ߳G Y)Bffy R#߿4? !f /C]θ -4MD"q3?j v?47xV"{뛙&ↆ7L\a-ǔ|aͰ_<$g(3I@SD@DdgBD)K۔`mٴDp)K*"&@dwlB(}ޯW^<<<3;;<LjHT(V{= xlRAJj?jO^R6D;ol eY UyIZƍ{u}E"r^l#"ӁFU-[3LnYYgUUUzǕ<,X7vvv=שs|> JuuuqD]"Ca Y]* &t:s86Ņl`.MH$!zo"rsʵ9qch[[)"#SほEd{dYTn[~9&DE`+܌Dc=F94J|cCk6@<}M1"q>\L&[E=K-[vDDUUUTJ 3'XG{,{UDrE$ l?< PBxwO 'zh1fX@t3XG|j(G4˾ܩ\p"(G>ߏ V[K$| .]Zaر3~(~СW߉prů) 4URՁD$kE)VUWU*_yo\81o%Ѹg}Hw/ ;zų,4558EORKAZU. [xeY_hkkۨ/Z:?&t eYq}kzV>,}Y8 o=2LT`|lx2/Sa+ eYX.s p_*ZO^<㺮,ZֺuU=% CI$*D"13g=[,k8o)Ԧq]wA_gZ#Ϊs_gIJ> \םX(~/"[@U}h}}#}7,kQ;yQ(ros:OeYeڸLfD>Ql[@,˲,˲,RAmIJIwՅ,˲,v,˲,˲,csV&3hY[V@,˲>F8 -1gΜ-ƏG˲68bYzè#¥s/𩯀FUwN//u]Emݗy>6677oo93qǹvΝ;4Lng̙3 ̄85}_U.5K.J&ۆa `޼y1;ս9k֬ʪ1J)"_DRx{Kb…N"x/"s P(>1o{/N*ko U5L;wN t/Psn H$جNKKK|>1f{q:TT*T_Q*~ы{]]]߿nxYDy;=1 ̙3"H|L{<˯P}Uxnɒ%w{K{ 0| zdыsܦƘ`[r4_tEܼmP>PUinnKUvN JWDi~-}/(K+++Ì1,\`}ry]U$~*\ʡeY]w^l`Y \D䗹\nۈLU^ "?]]:D+׉a2pWD..+BWwtt -1fw :8:Sy{U |/Ln38 Z8#q} c >qq|vU0 wVoe-]~" i9wMݍ1wIU=)7l U>1WTTT\U9sF},S=UJ`x h2"("tttR oϋȷڪST5phǯ2d_#m9XUTտGX<2HcF\@X(r+JYz ;_@EzPslò6,͙̫Ɔa?˲ϬL&$-`TmmmKȣA|OUoO؞]mV&}]^N/[wǮENKm H"رc_\qeI0 ,ou)fuΜ98p-}D"38csi- Oxsp ntX LRt:]5wM#"STJD%J?߃ 88t&ɝ1͞-" "IRd2}߿|M7xdĈǪ"2/J_TuA6ݸl6\WW,Lu 0VU,GKKː߭jv+e@|߿8gȑGw8VG˲>lIJFicDȽ͍_U#F ZzGOROs;-JQ ã%Ҽ۬\GυaKX.p9sGm=*?~GɜpY{{qM .l{샞/"/*,7c̘1|Eb9qR{/w%#JUwU 1^Y`^SY[dQZA,w3^Ἂ^Zq_|V[M"WD= ,kc`YYVWJֿld> ?J<+Zl 쮪G""[[[]~ 1Z׿5yYWocOA "fj8y&I19}o8^+./a֘8@mq4tл>JtYU[BC œeY32Iӟ+˲>GE]`>"u$ py=/'n(jX׿m"r[cBq ٞ3J}Vw|4\QNU UcƌYyBΏe]]ݿߞ5kք~m{júx))PSUq=ڍ-I,% ݶٶXި }}8+HDsuq`@lF|CwѬHڊu&";dzWa<rՎqQD =+ kuՕ"2o 7Wްa~G2U X,0 eY9L|yڔ'My2w0c)v+_QK4-5_X1 ¼y|4~ ldʻd2o,[삞r|/vM{ P(촶} ~n6noo?l6{DKKq'MMMI$ȭ=H-Yf 2嶉vDe IDATtsx͙3gD.;(Z~c2'nmJ?qt๶߯M,HTPQg`Yf9qc68 ZikU [w ·>j|ҥ_&%y8D4*WVVV}%&M?w eY덹()¸E+}mYVijj$ؾV>, \ExB,kN[tGOeYu?;sC_Ųo/UeYeYeY^6`qV@,Z/4KxrɿXeYfl,˲;u](8wG~eYeYPaj'?Z\n[qTn=I*eYeYL&U~M6V@,Zz; ~?u,˲,j GfĹ < -;#9?y}U eYA*uRs3h6'ԑǚ7З9,˲O 7͸#JqZ]:{fLƙXrjYէD.ÜҚ.Cd |v ¿ ASܾ#pS&P7i [ݿR5p\W.CT9y"8`̤[$LƆM~pfL'@i4Ϙr@ĩA<hp@;G^4=TۜTMS8/X|JicE띹)6L[8 x1=7dHM6_a&OhAZɗ^8 q(K<[|vV&3hY޽Ʋ~-W(?wɔGv!‰QTzm8evgZTs6֨56]SS[!4ur1ڸ -rh8J3*ZƆˎ@G?٨^AqY]4N).+zV\V7LظrS)1qH=;L,>Ŭ4-ϑ)~^Vr]8uO{!7$sΙY⇊I!yXpesQv8yzQN|FiRaxBpYL(2qo&OCTm󌆋!48p<@*'uTL¸G͹dQLV`([\ް .=5Wy(WQ'p*̐d]PLcꃮܥu݄ c,)v2ⲚLohʰ*(Kd8كIwof4|8M&`ڔe5ȨtVq&SG!( }(hr7yFe'ZJc?{r0ṨDKroj(\ SѶޏi)? 17y#^  w&M=S㲺9h|i|&>7 rEn[,3Lfp?l{A/RylMŅIHr/;PuGVtosGqGr',T]nXݽmi8Hi~ugE+]> ʖнٸ;*!N$)DӻȐ)!D{7ةaf nqH1m +,tq 4 Xv]4aWwlY{;\DNٹQPbJHMqkq4S刉ː{'0$ަ4ϾxnR\#R~Ϊ(IqV-߱xFei,'D,wep\*.~ ~eetL:4:q (f YPJ+$.C_)!AT|C>˿phKY~U)}gEe[-;#RMPDwUwV;D.0Re=PJỿ#dHtMB+JYQv.W2,SS!}KǢey ®t_[A,TVIzҎx15l ʾ8#JM5ݽH?Z:N%I 4N{ |][)")/0TV/-eXXJ+_[EFQ-=Be0S_($^L8ŢӭSk2< J֔nYgT@`XJݯ~,˲,k*Uꄗ 7/MoyW>X\ *'e.~g'S}AE^w)N1L3 !J:<B驰G4JC?Z;)ϯ]tWU]sqqrrR˚w8˖+ *A!aJEgzk)'&)Sz yM'IӺ˰_Gw`6Din5FPݭ,w-!N%'zI~PG)7G#\FOCuQJۘ_TTN iHu2Yx*d2d>&`|sE^.ETxD{=L QջBC;eQ7p&ۈRpk{?μYa+ Ne CJUa@O.Z̭~Jge\Ջ->|FAB3gάׯ_?uyH+¾ [V%LYBȋE>>̮?|O:Gg:?VdUVVn ZWW޵ݚ}Df[AF1su]p`GU7=χ/Qz 0>J,k]f8uuuD_g?b;U/f 4 aS4{އI7H"[DnMRO8,]t'/\pW_}kEՓIM}j93nOPX~mOh(ë7m$"{?2U}CD|p„ lvtՅO$ ]џ@5L&a8Vni9>TDD9_[}O4U@QD/p]͝;w[u}^.]!_mk'kf"" "rm*y_gMȦBaw:}u-5h2*vpϹI\t}998XULU̙3Vqf;} .Cvj&@v^M[UQE"]YY|v9H}OL&?yZtERQ8cm7d2WT=0YDSջ0;oL&~ļ[fpUݶ!`+ Hh [Z֎SC8vmfck Q2@~eշg2 | Uˆ(pQ 7uuRjǜtIk}(Tk0. ÝCE$ P_A燉㠪W}kWm".Anm|lSSSs|x9toƍ{&JݞNFԢ4AUmsc]ͺ2|(–Hz ?B:^6MMM, MR##555uY̙3ΕN*Ef2ȑ#w2%K^X8o q04A{ŞTu<g[YY~Nt9)*OK##G~AUV__e2B6'"Wŭ%۩q1︮{) qWΝK歺~'455Tooow3 "VY!N{ލSs+++d2 81& }ϙ3gD"`vQ_})NHaqE8wf?ׁCva"`/` Y ":"d Ƙ=Edx]ax뺋U5 J9TU@GkkkQ z L(O@;qt ,$͌1)U7T˯8^}Ovttqzyl]af90q{7t| IDATbUjkk^vV "{;S&{ 췍1;ȕY"21 Jð*+?("E(`oc̎+k{{{8x_UoT+U@^ LÆ 3N={F=0`wAOLcުzfOUd_6A|K^dE@c}O.@tnF%Nylv<^=< /`~ȫOKT?_~xNUyo`SkkkEeeM)"^ O-.֤l8~f ./"(@KKK?uNʞ~ 𾈌jjjڤg?1b+TC1fq^ }wyhTՓ4\ 6f-"'"%~W`Uys86~,"'FD~DmU9ެgyj̘1D׋ZZZ[! CꪪMnTqST8ù\y7}wzэNaaHR9(^yް3^my. \0 _[1yXDz{ƿG_2Ƭͱ ֶy;cFO1^Ÿ|a?qH$6"2ywyetvvX ںgܹCUxi^}Yzd*y{"rZ;0pw\&5TTT߄385NUt:}rD}ZssVeONDO mn}6%"/Z֎Uz9HzUEDL&|饗 [n &$AT)4J]V%GB`Hu8Iu61.]gBE@CvԨQa䚸+T'tb֫:::~'gS]MeV0mBaǿQ\7o޼BLC󁤪~1@KKu]]]ο=zmQmmm 2)jm:GqYT0 hK^{y75583:L&wc}<: N/8η(@죪}ܹ;D߉Db߱c~Ǜ Ϫ:s sPFHj]]}dd`V]]}l#>_Vxùia~ t:}MØ1c?8\D>W\t ppuyWT*DЖ!A!򆪦g+N;"yDUyr3yAc*h.ET &/ڎc[]OUIW"q{3N\6[DF%Z_+U.NQT|޼4Dx<ﹸܷȷś?&"W-l6;QD~8NRXz\wW\Kz|l- a^ A zcǮba5W%?("{GNR?`<C[[ۗSԃF~WW>V&\W^faŋcE_U?'zZi*00q^[jC|O#z8tvvn#7?lceDUݭ:UoG 鑕D[l(M: "Am߫3%U1f빈X t޽"͚8΂_XpD:p\(-%qzq&s*DBjjjr"=l- 0&:)WAXtSP%"_44ॡC?mA{=4{xTչo\@BSOQkk[kx#G= =J[UӊZņkO>>T-m`Q(( $3{քdf2A?$w2k{??w2?9f( B07z…rKDUt]fqth e{ƌI)wF9 Kd2m֮g A>eB/j28M$v2y[ט|f Cd`Ŀ`{byyt: fnEM6AJ4KgeR"JӈBj |gϑRg䠹"d2uYH)s0H@צ `&-y޹*^Blkk̤Le~uJ7Xoxoe.%hŧx9u$hk )R],}߯f5wa bYV[۶ߘfe"dm&>ïK)8n̙3bS `In? f"Q+(y~7WhH$9c\߁ٞIDʌfU SӉd2CKRAhÖaQ>=Ű΀O 5@@1>0ma RpN<l(ڼk׮oqa^Ð\>|x{eGvwRxf<y@^|RDT0~q!huk'vհ/D"tո?1ʡJR .`?/8ܠ SL SU7IAJmoK)1YF*}6Klii/~ͩr@:;cǎ@ `AsDtN*z4NorX.@< ha'V/hL{q0"Fr8t\ & 9M1D(37zzm@#<y pxo~\:@y~"ymzPh4M/9/'cr5K,J9\s>4'#nsp=___V>āH˸4@ >+땡s\)0l0 @>hQȒid8Nq hll|f M\`w:OK|^\H$rw Ty/N_]PlE<4hIڌ$O3^8痯L`FIp"\I^AliLF~/=SfWw@1 ;wVwXx@U}}}e"ZsNZ^^~Bt0 JwtvRlyy-ΝBm% 5;FJ3:$~Mm/hN$ݞPѝm/`b`1!5G0+JيF7?.RtS:fleMfOH$ڋX!*+I`;z2T z}cX[R!|1_bI| gBPIr2 ">{R<:kl) ˗//R~Ф O@C^rgMc+LZ'^΁ ˲2ֽ9r(cސm "zOD7A6"DDƔO0\ݼ4qʬ5 bF.yžNR^KICmDtqt]x<Ο53gJDwZ#ž. mnsgvM/-9:ob曆ڧj477KJu_u]6wLqL{ނ-ORڦx=QL ΙX yrzw-<3~u/;:ŽI&5[u`8l *Z(4umݗU SZ\mmo'dr8?u]w׻~+;I{/涹AD8;S' ;UJPKK&z t:JJcq8fހBmL6m|f^Xm_ 60>mYRʇ\ 93q:6Q`d 8uRΒRN`7R-В3jh͆D"[҅[]]S]׽X{m۞;9 ;} 6μBlf&Mo,Yn_?-VJr oM8~68t.WlQH$d?3dr, &ږ 1M*R{ǹq7L]myHnA1kOr'< `3tvEhi*uK)R[\FQ.[X[[3]D}~}'1eRَ,p/=-H$"3v >"w$7λ{"!">#2JfSb-{Y_'5{#RG BCקA!+6o8֗"`> /-ٕ}&"Z"zdP(4?qm)H;fΜ9-t\hi̼L)V`ՙX蹱c=us-ȯ2L49`$ ԛ̼Zu~_bOdq"*ȷT+VOh,+lI%^K" mppW0f~g旉[Hdi>6$鹅\]|-F%Y`;w6 "h]ŎBdtO`!"\foB%zs>b֪|-.(못dd|Y6+dFUEEeD̋Q) p3ݗ=G'VB&|&'Jj3DžRPd2eJkYYww4 ]c1D4/H+0;Iohhx@I!D{B4Ueee@O|-3lDAlNe_8hhnp33g\@G kddV\s m+|[AڍfrIW_'|$+ x֜Gh$/Nض}Af=G1yg]=h[&H$!Զ3Q)`0rX1v NO1!J$ڶ$"?pRۉDzb}]1ܖ9\:BmJӅ@E!Ɠ5 %xDW\ANO۽˗ٳfJwf̘QWW7ֲ1Bf\`׏~"t0Jbh˲<|FbرխVUUUCP# zW]QQO:5QlR|rkǎU'2hT|vt¯?*; `_>mv'+AJCJy?مds/@#ɌA8Zs3 $ l`<<X'IDATfJ- ç dA'/;My4Ac"k{_iFK 2 hUOPJ#}`71/SgI=Ù 6LD=cU0f PHcnRj6׃AJacm4`U'}Pk~گ֮M}$WdA!dA; yX|3TG04>tzRaC|cdAdn`\Bs;??atIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-compute1.svg0000644000175000017500000015651600000000000027254 0ustar00coreycorey00000000000000 image/svg+xml     networking-ovn-metadata-agent Open vSwitch Interface METADATANamespaceovn-meta haproxy IntegrationBridgebr-int Internet only mandatory for distributed floating IPor direct vm connectivity OverlayNetwork Overlay network Provider network Other prov. network Provider Bridgebr-provider Interface Othernetwork Interface Provider Bridgebr-other METADATANamespaceovn-meta haproxy METADATANamespaceovn-meta haproxy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-hw.png0000644000175000017500000040470100000000000026112 0ustar00coreycorey00000000000000PNG  IHDRsBIT|d pHYs 4btEXtSoftwarewww.inkscape.org< IDATxgxՀ3+ɒ-rivhC@ HhV`kWfJBǒ)! Ip %0$K9ߏ]I[TmY}G=w9S="&&1E)[;B`h eeCLP8(g?h {?&RAy`0 0eؾBzod}`0vgQ:XC'@. ؑB C[+F끞0q:SK{{JkW Ǖ {k=Ѭ˿K) Ne2xܟl Hڂoˎc^"Uw,lreĒ=_F<8U>mEf* tq+ƻlM?<+ 9ζp.LFt{RΆpZ#e"\UV\8ePDe7[un!(֣6؋/,:XUNRRn^42W"o۾IԢŖ2] 7DlػG8Φ=-g2WQJ]-Ǿ@9Q0 P (َSB*SKǸvpVHDm>_E偲RG-sCL Gpe|'S7wL&gUezroD̶oMH~eJjh <7V5, QSޘU_^mKbNj@.+ X'3/|F\Rs_ gԗ%n(yD(TeDJi`3(==}P.|Gޏ 3+ή^VQ.4Q5tSK{|v();Zo;' w]qB: O(<^m+7*MEKm_FST`'dՀo*>L|^,ccX!o%TyZb$4Dkh1 lhR=F.9 ؕ!vЫn,|-}HD+Pa {5Fh;+(i`ZQ5bGƈ9i-aWhA ǎߊ6xjd<>kƌjdmNe6rc^uKoNzzUm^2[rkNb2ǩ=IU&ꎮqj8c}]%:1aSva_f1ڋ믿7\\kCM{PC9I 55^8NǸq@{Ou閛֥2sS\=eFN$zg/e*脫#0eBMU>VN/Ou^<=iSb5oq<+B "z(rQBDyìjqEnyqaA-3 _Y Pt;u{7;Ə2<s/Sˊ qra{JB]xE5ʖE,PUɍ6g=ifIrjioNRddvU =8McI*Ⲿ:'-h?},86wb68I2!d~&OO4Q8nykD9YЭV ŽLT8Rit}K,]+d*z@(nr>T)ߐJG[+̈AF|L'm0Gz#IQ?>jmΑdT|P"YQ/^ }% lt+H@\hvcG qIkUes[\p)X\V+X]]@ yÅ;Ζcvb[yIa„D1jE˪3o O MF6qr}*W @wAU m%PڶάɊ^,!ƇqYym94L v@GU!rr?Y_IQc]\.: H;2+\?@[i'FjDNaEYŅr,t0e1x-LkqB08$Kv|d4?IZ7%#-0pʟZ:fe(7AǢq]R:@q^eguƤ抄rK\I/~_ zCS$'Hp\'UǡW% y&7|#ݐ!B5,S+C@ǩ_dp Dխ@pj7 Y ;C@O}Oˋ b_]'9cbPAA(aMDчgòq„MWZk !VԢO߆^(Nx"-?Kw(Qn=@gUdV+,:<8mAB[ftW;4E/6::cE9SB?xGyIcu{#*)$Ƃ}.k%7ee3 x]oϪA|y4_(+?Mv /U\`M/>j<-2{Rm{^U_^KIw{\LײԝGM>qIc\![f?JTqu}Zn:t'DmRrI/ n;YCS ŧ zB^A6Ӽ ȮP)'͚cb 8֪瀑 Q9*{ks+ |\i &^ZrxKI]q TECC%W9鉓]*>q]# ʱ*+.G6+^_(Q0 b)t&{' VV\ؤTV69TVZ9zgZ7A6w^2ҫ>aSv3U=OS.#͝r+BLH"ms8^8{ĬWEIT.B@ B6l_AI32KO+dsoh$KzRсkM9FQTfٔsK=yjhk*kݓ9EϫL?zC/"\U7"k]|0Tq=)TEw}YŅ ZwdVGfoyE5WQx9O޺:M\_Xtp3cZ=kb@r!^X_WKVaSϞ}a ŝ7wNKBV _5#r&07UB A\d~Y  s<ϴS"D;FV ff$@J7"# gR:1hq)+ .Lq,IVmU/./vy1O {[k!0x?k"]SC8qf;NnW({e$?œ{2đb{,?PmoMW^`7T *96?8XNhNLޝUOdWGID)R=-K :bvS(}V_AD/ա,㭀k+D)TH|$~= l-niǁ?Hؗ RW,~68_AK|DE_>rE,r6טZ:uWٔ\f T4ڀFg#j k\S]4wcZH2F[Q^BF?/ Ok YuNn13)`")",CZs.bKtUĂ{$Dˍ V8aFIF}v %A;m J.s؁J.`8Q6}>ӖI;e M*j%UHJAnPqQ ًEׁv ]UtjCD֤j~͠m]0ꦴm,hʴP(RxV+um;0ɣ3eܞP( $ 6hj&[_V .}Y\R |=FTܹ ʆNj&4C)̊ۘ"P*3as[PVN nRYqSVy[֮i8Z$D?<[˵R SM" VZk«*hi˺ %'rs F(r_b'xө K# d -%q2lj^D,ivlՂز#|v k͸qR(H굊Z {pxGm%ǮNN+:{\+E$E@},$LmՆ\%ӥ~p7r i>\)oGՓG4RХj;:qX}hv%ڤs.!q*@ݽ`5%Fj%wM%ʼnyN$_(-PCf> l"Tg+v"7{3;ב[Mo)NKܽz:g q*\|Hpk&MqoCgf0:̝ޑY8$?P|UY1vŇ5 Á ˓ J:CH4D]kn^Fp=vs!q[,kou`q|aIJ$C]wy[^*dX;dZY[cOT'9|7J(bc[qfTDU_&oRAb|uJom(rYbzo^A#U񂞢t]9c vоO/X\\=ezU<+dq\?y1R4oqhO @̩:1~C$ufNt %l {InE{2XTV/x#/ %E*dUO]kI>7bYce{WI`U5q@'.tct|t`2ݘrs[:DsqB;vۻy UbZb{TFҩW0 '@{ Hp޾@EߎpES"$E`ݸe|qݡmqhJV,7%>]v2@ItVhdM-=ݖ!!w6F̾2KG犰}>$EuKR?_*ɀ[wM?e&Ǘ7D?Dvh/EU_V:mVyIڲ1h/S"S+Q IDATy ]+( T8?Az^^ JZ=ԸeU\89rGRꪌz$7Co @^ x ^ppC\H!!|8c?h^S8|MZ__o: uZ^ے@*mw'WQMKǼ>w}UM:_W(W&԰YI]ˈ/nJَS->Ct5݅EcAf@ȫ'UTp"aa識r^|(%+]ѤvS}B]Ey7 eFcAyԿ7$h!_ q8C/1o"|l M;2+~wdp ! Q^%g~$1OwdV|,𥪄A qmc(8zJrbNF~ *wE$`ԪSf.W[WA{rMDʜ=H~ x[,k-!{_"Kq G8 CN]_]G_=#XF֨عh(9ibzh0t fLhp*eDԍ E$ڸgm_ʧHLU]gڄmKF~FҬ p G\@л= KKKtEl{( A~bi(-RkGJϡćEg,s"Ǯ%o(i"RSG0K-;(}d$dC4{bC O%w z*wp-HOMѭ"k:BB*RW+wGF9RDY!(bϪKc|fxˀWF3\*\C⨶Nl`w) @<N^(l3͏cHR.[MLiƢ>Rba[OUt:UPof ]!\ʭAU.g_hSVZhb&(UR.wPY2WE= z2ќ(B3+eŁ2 O, ("I+M#@~O& R8kP =NW0f燑i钺+VZ΁<8}=f~z#?%g_Q졥ԲOp,VʒOqZlY2X&! GT%7|-pv/}@5y | )g-rOsvfx"g<Ybϧ? ZTV1m8۶,}$gm(O ٍg>&E͵W4s\k/A/) Va"NEB0D͈:Z7>z-"I "*|Rw tgÔ{Q,;zS;E64pmYӧj>I0bl>mA0_T}(-m͆U؈_V#H7屒7>1{ 'CD; s-u?`,/_3JC PUo i?6ւ%.gZh9l%)9f_XLUApE q׵*-+p<mlZӧ)Gȵ'Q*iqwkVO^e,8yj+]= _;Wk#%q6:dv9Xܵ7G%";>8U n59 tI-<XKmcYˊ qS!T _u>kz8auKPb+}Ee"1DkghCˀ#3oڶ!{7l_=dCRV | Y{VϡEfن7xY'p0l 421J/H>DAYEd+ܬഔ>s!Is[#mjVqHC9/_ v+@$6ˆ>õsUhK%9}Z>f~vpdTiw^< ׳!:eªdW_➧Xw`$[f{8XxJT6) dbY=Fݐ'auB+V6ojsoO^E]M'JyG'op%ʁըX*G`Ʋiy|u>50U!*EER/\,SKǨ9]U^]ArK]_h`?ӲiOv@{Xz51G}kNϵkB{am莖cOr/2zlmHim믿7m}cT&8y[{'8NZ~:{Ŋ>7dڹ{|cQ+/-aOϰAJ|$%lj0ſeEyi53t^=Á;5>kו ˦vm?\oó2 2  `0 w` `0 f)`0WcKmq} a` `0GJu1&R`0 0 `0 `h3a0 `0 ` `0 6(`0 0 `0 `h3LZ`$Lpth9: p4Hl0 6M[!%g%CUmc~P׭[9P)3rj T<;UUΞ}wU\ڝI#-S$D?2;Z΃OÑlݸgZ D؍ur 82z:@$]Qq]]uӖ.:ꟁW } JQD@ _[6ݺ0quozљFqZQcL`"R|tҬ93褥dWP̍Da7yG׳8c sL׺xAFz<_2«o橇z![v[z8C#ue! y&-Gˆ{մ>p^ữ>ӊ7xWVMW71XJVW=v"m3`LG9]q]I$ضM?<`'^9pݓ2[sS~C&\yvap5ӯ:d]`!G}U3jt۷m6]xV"L onؖc[Ju(²o6U.ݺ3{iI&߻tҼgR*jYzl(TyGo}jŇGpoA4;6ighiM-[SSyVmfzkk(5SIvVF*Ĝy_Bݡws,‰mUp잉e RY똻hKv^V+X+C\p!Fhc܋3o.[S :f!YBA8~zgH\skcOvŒ%3>fZVV[Za}UTkyӯ=Î;{Z4Z`30#7pTg,ȁpQ.ڈ}r8xkޗt{@9wFLB*ݚpw7EqqcYl WmϠ/}9M8 r'=ci,(vQCzs~cR32;uO}Yhg0iӕUa@32 [ykāhB!x~ xҽ:al;IaH16Lw݁4hs⯜X2rdgs9Gpb/N@ NFzm S9={ۏ{A#άMGN0߻6ClFfkE~eHLrb7,ehӫ߀<;t~Qt8D;!MD=A zd5hە&~p$!L7{CF =#s@vNNb=@ggf|bژr9ݺ/!: ҍoSlվ@4hmmIϜ(zٹ@5` 2 _n ᷇bz?!Lp.NN.{Vo "9G''=^ou LaZҸh4mwv"#=XaeU.h|$ rg:ze>k']hHS|^͑kvD3a6NJul+)6冫8ط9]23NN4_qm}cݳB~>'h9zx\a/ n$Fͻ'`:M `=FB-$a>jciNw*'UU*s.Jܶn4Lw:HvjL{z{]6 ah͢}Q!@Yβٺ4ͱ3I } al!+n'^%7׻GWe9[\WyuNqJpeՆΤ[ɸ6m5 ӻGI}j,[ʪ^^.z㶄ݲ-C CZ]^~wfMdfx[`Y͓/}@F0s˕#,.]dx=6Gd-we|rU5!ef0|`O;3Yy8M0fIBVl&{&]$T붲yNe_)y8v5[p]rjƭ Vzdgѯg7jH;kk5(LM(Ly~tI[ Yy/Eӯgي Mټ鏽ON_h8/Y'Է;7mJoz4oKy?RUS^D8|`&{$6`c: .<.iqe{/o]GCq)|%;k5Sߟ 0g!TΘ1mhWX~+ݵ чGt^~w1 O|ȑ&4C F('t-Isade۟~Gಳ0=vw/> ksݰ0,[fϡ*m gvv/YŢqOgێ*ޜ /U5a\Wy.==/sd=+m?8 WYsrƺ=|s2ze^̵m ?<N=rdʹ*jظ=Ko[*xu rٙ1z>gޒ3Cl4C2S-w ز}gm[5}>?4wK`[E5ϽUN"7;6+nჅp'oxOquF_Կ!"2?| ٜ߬PWDQl^s6gs}|KVʘ96x9Oi^zŊ|{8~plב%+3+8u&f`h)Z77+628u}reaW]b{0.__IoYon]ǟ{_^z#jRlRAnvw(rfr6 lWcjVWR9d!A,ٗk{F7lgze#K-915(E#]ڼmgJ d{dwirۊ""*Wofhܺ~w L֚ŷ>ùuG {.rw u} Έof֝tIRZ}')G3¿]Ɓ#/ ]ٟk.8p_G8ăRV1VBV 8nZ^U88븱̙,zYj-l܊j S+Ѣ|pEvH Epaft>',Y͛c,]M[+ٙ`XVt>yi^Q*;aiʑap^~w17H_d~ʑ9dfqdkQϿZG8;+gw,lfᲵ) FkxlRAF_\=rU {dqQ^Y>*:͘IuO _ٜz(^}oo}΁#1鼣;wʦuoAnvVEh{8xt_1ϿP OA#aKT`7ɊI3԰:f=Ueμ/g{]WK cg5!# j;>/:em:b`.ǂcF j͔>b5ǎX4fvޞO#G! ƪuzKz@&¬׫73jH-UG|&>^?X @nLNkO^.? 8S(R2c:!O5ټ7;f`V  pj`5yfnmt5ٝ>AEJۭd>;-񞷪 )g&"ğjNe~\5u^OºY>0\.oePYiuUtPNg j˲hlvogfŷ{;8cOK'LoD{9EnqUO`ݒoa,zhm8{ o(44}:mBGM}ouͻqoj1W T#GchȜj6檩C մ!;gWrLLp>ZNe+MIQͷ2z]8Ml۟GfW?۷$Di33}\F;NDuzWP]LYuXWC9enߝ@dH#\x ją|U"B5-WK[\@gk6|&R~OŽtzabBP]\9i %ª $2TGeM#_GtrN O}v:v*`T1oq  .* >nMv*jq\vtC̜<6 8ZXHa6-r'NCZD ^p8ݸ*[8\.-وbBof|F'D(dϤ²F#9EiSC}K){=(K'謹z{&5ybvWIA /nbkwW%cX6ڈքv3lY;e p LjwN #289#y/} 2m?2&⓵{یc4RJh}M<럍*fcz.9`f7kq$\4SGc͎,v)dG)#OJfA||sI EТTS^HeMs8 ?yS0,Iz̞IJ/wr07Ӷ3tie ͖.E <ΉPJ]G1,\4/s/qjooW|_a}? !:Ӈs8]G y̽T79RТ N:iA;!:;fݯv``2pv*ᙬ; ?~QO.bdTsg6隍nb4*8[FayN F k7DTXEp1isKi7s^i`!164'ni$jiW3&p)=G r> kbLxis0ep^/!:ԧg&!:).`gX_ ilN‚ I⊉H|:gsZ9{ ?馺?q ߎEb@8.JH_s`R  cr 5}J c}?`\j 4<s7\y-D>a\4oɱa\nk.ӏ!}cq0zAV'VG#URI!,ȟcu !4$ "6L%.2Ȟ窩t &ιSGGGGGGGGGG ^C7q0Y5 c`Rš(—}K̰Ѿ)(#96aDz q Kd>f~舘qݴE09ߑRʭl;oepht6L(&2edv.=9\>aO(瓵{ C[%R3 Fh=ٔU7( soݮ,jZmBܦgsP}r%ðι)ASdNCG8zjmFbèo$DŽHAi v'a(Bp}%(?&2KQ'9ae6[0,Gm֡+xqK?De]~&uv[&[p{J/Aą!0LzD&Nذ;mQNdD|A$m'Y~)A~}] lN#vdQ|k&>۝.'6'·_B8*yN_9y-`W]Dy~&]>K ˍu+'b3XSss7xM:6Q CJ-!֝c90=GxÍWSRUwj~Sh'DbL(u Tsh v+& u"OӒj Jk uTP'tjA4tr#%|Dzzj`nb4kz{.锭$(s&aw۔ ̄a:\oA}"ٸ'Vl~x*k}<ɺZc1h-8ZJF~kkZK9Ωb6g&!J m4d޼7yb׻u]52,ȟ] ŷ2 򧺾E4LهI̙>:lVY7$EKE6k~f"u>N`P~ _u;CRY-&f]<pnom ?36͛7W؛Yi; s9gf6 S[El'رّy%x&B:Njձ؈OM+b2rxlݟ W>n&9.j䖱f[&I1: FT 5 D2_ vKbǞ~Szs3}.lfB߹LF`,Gaw6$puxߴx*oFie=ͰmV=s;8Lb`Rf-{spu:kg0ml?cof1V3ΝLX?zk Mvmjwy3m+oV^pElݗco|8e 8 M`+koq oXӫYn7P]okOW7`s8;܌Q{|TUBVA%1AdTe_qFP%WĔɺZ-i|Kkof|GqECH 1̜<?ƣk.!$DH'H9| vgq8& Bk)w7Pd'*,̞P!f39WΑrBNVa.`53Vo`Df3 9 ^GAu}aAZπֵvDcfe{1jlv^S9 !Ϯ#[8 C,ov`Q u|''a0(u(dB IYucƸ\>a م[xL_̺0ndprO;& *j jؔFkMSfV<&ob6 )#  L 'ca3zgCvMs؈`b# 5Ω+F_AIU=ϼ"mۮ׭s$DDIU=|s }43[蛄U  _o9>N|dAdVRV98Z2* |3 dxLF= Rkk מ7hQ_GKʯhSLr T^5w*v30) }Z^L2 Ibl?XfؖashL`cB 4UI#])i2t,CDkFhiӟt06?U\*9sHn9G +Yf{&c$q/CNQ[鎿$#.̍ fo1qрj!v,iønp-!mv.E\?cT @eM#/ܪJU+G@.?;2y3m"|d`exM:/o 1a4ٝ5ڸ "Y9%`EqS:`Vn57u`!96p͡`?w n햬ٖAYuɱa\2l ٺ/B6.2$&)& )%}f Xjw$FrvĀX|"99GpECٗiK*׍W&"MXM`lLa\ƒ۪MѥlL B n(lٓߧynoUx@9[k_{ *4:kYQ6ߵ7]6~@Ś_yy'o:?rlEĤ[AvQEJ\D0{*6ɡc8,v}`b_>VaE.eLnCkFx-\4eՍ$ń9|f3'bDXv*HBtc$'1眀sv2ed C]3|dX^9i WNj c;ux=}3&b$roa4&)6Wf@b$)h xidSXQȀ߄di ̨f}? 3@+#+h!}pn9&[٥_! wD!&&#$n:::g!"}i;*3iD2Iuӆ{Lv#C֊7ӶjG?l9BYum ⦵? 9LpuZ&*,W%G1(x[At+.2ı5V?L٧oĈYGGGB%>*:*j1 LןCySNqTawоLtsp!9O !AIGG]U' t骜w-ImM]PRГ{Z碒ҭv\RGG\6Vk.7n*=ɜv[hyz|>uzֿw BYzEuH)՚Z@; F>UZEn A,@o-~~wTPdeD{;ݤ[TӻTԵ<۪2LHO^WAޠ=g'ld3]M94777:~(*̧p$4`R\.gsEQA-\euiy7v=vIccջ~QDTvܭx]nvr${PX_ker5jhIwi$ jo mB+mǑkGzmkwdǬ hN-lDU%.q_n@kn N qnVAمUK8>6ΉSU6XV\tM%_/g;2&)Ou6ÿ?HhNh봥F~i5v2j|j<6J{ew=,V+ߓ*Obt-McU|#˷J?̌B1읕MuuKfڽpS^#e|y;6t&::qBƾmBB7/?WŬgX)NF\.mvlW}hD^^r |قg͛l4n;3ix2!(.h&٥>RJ5_hGkz7 +.EQbHʣw\JXifS7tQUչc;g1*H/vo&9z,D(Xʒrjo:Z$P{nz- (dZqE˿Aqn"X&hE3aw{zhzŪGm#:>hlv~Q> no\7hE3Zе"+W7o^Qjj%B?hB!B*)sM]rl)no[{0V)-Kg.8,bUp/P[]OW(DVz:0n׾f YXVKaTټc+o|-zrEںqO'A=}8&hꟗ.en?eZs$}*IKqi1$ڀ穟on1ӧ͎M` E1"b n3wR L ΢eͽ3oH)5G7mX{$}gBM%Z[TMfOʹU4߰ ~?ݒ8]nm>?WiTzhmt..ԜR>iii+^fMrmmm"BTUuBm?}l_!+..9ȡ?K݀&Xz>g#nW56*; ٮhۯV"`4hGED ApҢ*[SW؄&TGZu5`»]{Ș9BCcC|TVdn"587mq] :~g2 ܊HWUw__߅Drh-*n\(l) qqqo;:uu5}p;\ z1oFkpҢ2.mDIzmgڕ CDT\tۣ[HUee\Dhv+;n^rUŃƌ{8؇i5N[ScE_ٸn-mTN-Lh>wY6=C9N=w҆|h=tWسӢ/ /*\Q\\|t\GCMh \I7r؛jk?z~ MCO e?qKo. 5͜]W}@ZI{x#ƹ_OZ }BEW]}M#ZZч ?fϞYZZȇ7!Q(ZO܍ĊW#ˎdԾGviW!NMM}r޼y>wtz8AݫBpO/7|bA/Ўf&.z3wr4Ԃ eMT,h*=Zgj2ښ˃W3z.\m7Z\CXGs.4dڣ<mkݟtWhݿxK{y^駟$xĒ%~a(-0Vϭ D͊Gl}OcM3I{B?ϝ;WBsy:iZ:'Zlx;ٓM_PvX7ڳlDlI=-j \oz s =iXst<{ÇYwMZQ}i״[l?w ˗/Z,B6&ŀf!D9s-^{\)@F~e~!KMEpǯwNoCB7xx(DA0ThyA*!H&C&$F0P'T n0eBF7u;]-jmOZtE/|`E_;K_ޅx br))u/xVfYK_GO^A G>O$c4.PL_MI)sqSRxAEsNB`-Z?]^-הYGGYwܟ F=QI^0 w!aao"c¥%y<}D!`2|g`4* |'HU=}Fy L& [sjFՎKB-Zuπ=`;A@8,%9_(^ZR! kxsx@IR2`1rAPlZc\R,)1))*+U٥O&<(]_^2--mάܲJdUqwb&#(N$F.EHE0o4%  npxT):E$>>UIV\w_rܷHxeʕ}ujwʕ+]e5u[g'L1*+Wfl<92`3G!Rd RJPg,]?euD{NC K/3K;|A0ttOGGGGHqnnY;!B!e݀[,ߡ]HPD*E@0yɑH Ѣ"4 JjU@j۩6 Hd1H=H!Q }ziRiuU-G@ ejs;ZU( r- R!KHvdvkIwR(w*ޥ0)2[=H U:N@Rí5D,PU0v!eD@=b4曌tرI)BܡJڕQOɑZ4!v:ژ˟ކ=_J*JzcsS~b0T5CJymŲ VeWA$ݘ4 Cy,]R!$RmY&RշX/..I,$ i2Y<Ƹe~f7 Vs"˨]B . Y-SSS7oޟP.(hC VUQ$%38< dR̟ /NR X3:+sˀ$2矙{ʯp$ k/Y㋶P/}+:""-LkpEBK%?B(VDQG҅USu󖴴4`V`z655/&o /={y00h?M >W5T[p{6{7yUGG羧HA<&LBɠG6X?[c~՜DSRmπ D˻=l@q|i/-~S Wg wŕFH-<Xx;0w |?%gRxkv( 6AOD½BDŽ/\b4H{p,x3i[JJRKSR*<5zE @<.:'j\;_QTÛaVwXE)M5]6Ne.$lWWϧjkkaO={ʻ@){yɳPn[8^j`<{߾:kD*--*UU͛6@OgP?])\;=0::5FgN^>bf޼y Rfٞ͝:b%&UQ{))5}'bϩ'UĽhm!?ޱzuZz5AGG׾pPQF8e.>Sg}'Ϣ&c{5 "$R&ܘQOKm$ j$Ůah?v#E.KF n[HUB\n!?p taQLkbܝ&Dtge&8uDY,(VaQ+ɡ௘Q]]/_/,QuVd _O,%FKg][U69O>(h4N3 k̙S;vR0Mdwoۅz-320^;tF s~\R vώ3~W7_}c MFWB0_ܳRj*?sCI;Zn;S^FQ@m|"Fq >)؟54޺spS~$V],U9CxQvlaMSշ?!a]t6}ONSR2bMLK1tT.t3u-X| ? RRRB' PTo.SbƚS{g$1ιz'i]<eA*162;~G%D͜EBh!td-!d5ݡ@>NTC[oZH)SRR~-|E!Z{Q}'$`ܿ e,åw0td%~/W`4FN7cP+Ags 40!E[J ˦c } &>?֧A]ivXEַ 3WLXDS滆SMK`=-@>p#,!q9P(F'[* wH¢M M%RʁBE OX"]LqBOU}LMNop Rg:L^{gnyb-o^nqWWtᢗH[M~4$"@(ZY9k)`4@E/au!!ќ|@i.\ _'N+k$)GDu|MP}ElGPGGGԠg~`}rzΘƧ~FV `X~ϟ/]z{d8vFtvv/]4n$E4 )׳f9gΜ9sPXP5-;K;|TUMnW\2@GqbxT< )25j}!`;xЊnԷ*w^:\v"k|g̙棩3gopx@>Y?!/ =tPT8_IKKRJ!,xzL?Tb1̅Lt'3c]F;n mN4 &? %o*ۭVkg㣨I6ZBi, (&=)~vhO $TЛ !޷,-t:3sν+!:'&&8&MyP` m``````𿅆8~g vRΕR !MLL|V!B`'oD#YnA\c3Mjj#RM%{@ "Bs늙D E0}9UӨ쇷6hwv ,蚒򒪪?`i!R*P0 :o_L>|gEJ޿"0Κ`4M{ 7{^III#"DDU*BޓFsJKqNe G]A N+j"d2={vJ}䃡`'qQѯP ?GDH!&SYHM3HY =YAӴ!x$))[qyJ=zӷu6#-7Ӽ FJB%eqЊߙG N_[テة ~Φ3-?RjZ_WCP0 #,&+@1$?ټ7Fcml?YgFL<3].I C-8ZX;aRuPL‹?}_x<#򛤤$@jY<^t@Dz(9]F,._p4?ܴ ù:|y 0+&&Kub2?7ol"2qS ٟ͆t.kێؐ΁n`Qx{&*4lF߱W- $sٰٓB@[N2 C46и8.őud```pFit|Ev%{J~5<.\Ȯ&S3aC\ɾ3Pm6Ӈ+V`8Po5޲oQ9`]UB\o3nr%;u/hq,Rjn{Vc;??2,N9fq.jw>٪q^TH ;SVJMy<t` /pw7GD0Yw㍧5Ju96jD޽Wo %&w(=IHMWma8~|_xB&FZՊE lG|6׏ݻPš6Esq8{I !IǏ#BUy؎ Xg_b5͹^|eo7Xncui?- ~3?UzJe,k4ns5&. b\S.cwݧdr|~1]sq6,ܻ9YMӼ;k஁hh-y<H,!B(f3b1ܪ &!/Y" .mF߅pyL<޾w⍌cJk9jӠ!őإ;OX841 [Ƕj]_{a,yjmf$Ǻkm-5:x}>N|=}o̊wHHnw>R!|_~Kuѭ_oZGB&Ӳ%7JQ.D&H2A?f2A/.SyF膟83ZDXT̾5hpͻ$ivK~jv|vkD6{V5Xv.vTbF(f.*e'hGt-^ezdntEz,;pbD4H+_#*,~?^T¢NKh١PG'w'7U[>شEv3ڸ؋W5+Yz`/OOpw_*σ۶S*e<f 1,;]pyv4Ogz%u,22wٴ;t泭k?dH8ZlGuC` RyEGȶKU C\#lV3)apEE`MSd+#8=cc9T|L|&D 6iV%6yj}{ʅ~mQ&tEӈH2KK٩ e.'23hӠ<Μ,"#.+8wgc2'8p=z\.p;=[z8L?4Rӈޝ+|3_AJwyJN?%6|؎A.|9{f&!L&99.]|1PJ'}K/+n7',a˽'I:LX=TY[na-p+E c:~<\еk3LN&~xw9W"1+`q׮h.Mowobz'W/4csX5x0BhQ.}=:}7{V_OD~ wa!m`iЀf#FtpN/ﳤ[sr&1M.Ǝ%w`23 i '~KQ&rҖrdۨn&K]Ki :#L._&sחy ѢlIIBjslǧ<cm؛ls X.r~m o&IowdBv-Cm>~.x\ƭqQl3:Q+ vEN˸sTb>\ GMF9TP^_Fq>Q\x4 l 59\P_O-_S#Gro;EqKY$]LMըv'h(Q-ku&6TZj-4c\FwY?uԢTS\l[ɄZƏo{«)v Z~>|o=mD%4@hӆG~U=o]#3000+/~pƍJh>j3Dwb~=i~Xk]Ak5١ۓjUyه?6a8| z 墻Y*Ν1:g47]t6Ѡ8p; #:MѸ0TwQ{`/>X?{8 ,a ]AVC5- ;ФJswSW&8N&XSkc{Z~°niͼ~XqNɢVrez)?Ex4?[UߒCGe;DXj\:LSs]Qt* b e坡n\ٳUK>@&WyعstwRpmܤ *W^C]2~jQuQ+udt̊BbȪptfSvSHS7_1VγG^{"p\#%AhfeÁ˨QYr&1 #/)@dFEBM2}E\Tp*!fs |w .jTSD5x. vd1k:6j~UݚFi_*N"4 !(m}Ǭg-y`TFu8ڛrA%}^GjY_+(=xw 5>gvvQ3JHF40ʱcsJ Ti8`s3Y k6r&,$AA84kV~+Q-jeV]/+,YkO? @T\/xH/0c/JCJ}?MAʺcF6NkS9t2~pĿuLСb69?xvڙ)Knm<9*bBØckN){vI9<|Z3ӇIDZ"Z4`遽A+_ņ c[piQv7VZr2$\{-%wuTyּ9hܕUGV}Gڧ 1=z%$6N?̾Wv-ݞ|)شoŕkrܹZEDv46%݃3':4jDdǎHLb=3j&!E۷dP2BDmK#Xܥ:&N$}{~;r?\a2[i 샋a(GBM[TrҖ9b&?րf&phxuGU;izHXt.Nց"v݆^D8y͇o94qx[V %7ߢJi:o ͚^h2^CR Q9_9>t!j%2"cK,2 MJũ[BhEvY)%^d|k{:>cr{mHFq!6R~ۙ{1;Am'2LDDbX8V\TcO\ʫEt4.Nj=AHTG:kx'&&.Ϟ=4{lSRR*D^SC8ow?3DzfwHl ݭwx,.d{o޸B5CP(r82_=oF3[8&t }9rNUbgQSe[x IDAT=}!,,nMV !F `9]`vѤLpvm3ijkI1j% lciCA-:~y9m>LAHEA8J3U}a4ѭ3˿ :ҧ!më4}~ .zΗ=8GLAO!57~Q|f'$s7ULD ְld2ث͔U|M ']5"3Th,n7s*W?4w)9uQr/7'5H _QOJJ-B{olͯ|j9xhWD/s9ˏ?(5};׼}Z$2$\||yswno|+bVl6dQh c~wʵ1ˏI/*]&|7ߕ/?{iPAG P\Wi֌P+%YTE׉(u!œz")o*2x(Z+Y;}9o؀hq8Y˖_G7<@59K;Ua?~JyԲ22LM%Oz*l>JHH ڿ.=F3nNxcȠIX7,-ß|Rk_[tQ;rl\Zx#B6? R?Wͭϗ塚Ɔ4F{Ms]Qy{$?ϯdp:m.7?>M*/nUb*K.WSdJC#;9YU[ kFU!P49ħTSѴiITEQ*[kA*ℐ,zV06rK}~͛)\1|9˗WIo[ lѣT‘Ucvc>y?{JK1]J~UPw5F N PΚɎAE7nܺRRR|t>u]OLNw'XW.j8vWTUwv])T_wgNbGT/&iG|FV4MF 8qѣGﮫ<;m s ?q煍+YO9Ţ 0?&UU !^?cfwʱ-XS`]Sz6(8շVtCQe[SլOJ9_Ӵ[njl4{C00000008/FDtBLBw.`Yt868w5j_JJ4(.￿;oa5dj=4LA][:a$f jStTxʕSOP}4(*C2R>2jԨm 3fwͻ<$$xȑ50飅o`'Z ( 8+(9<*&VR$Хՠ zN1c 55qJJsQQQW^raN*/uiThRAh)Ad @՗S4 Tq~l4QD\Jȑ#w(rb:cǎ=97MFJA;0 o{}> B{Fj5i7ƿGy͇KibrII%z7ݹG/܂ů@]iP sھ 'j<ܦ=w\'FLOKѯfEQ=z 'B3P0 *!T GtcZ5ۆ~e3)kVJtR&EU`L!89_ l? LqTǚ7uW%CRK/_3f┮On6s&N+x,XTJ9zkh؃O!hE +fe ku2˗TAxd!ēRʅBGWZS+B| y8.\. :SȺ>Zn!,>sѣ%&&r$%%%-JA _04ʷ=U(q'+ ԗZMQGXoYbZ`/0|mz]3gjz 0Fog|@T;!kUi@3uӢ Nm62mPhjnv,Ңq)ư mBq0 . a#RՊ'4T IC8S?;eҫnMqk؟}HZpV'yi4Bf 4g+WKJJBwXv_+ST:s %=yP!̗Z{0|noԓ{{ij}ip HjI<RؼƒIxHEi֬?#E-Yu2#swF6ޑR->#ChʊЯ]A(Pmd@Jͳqղ7`U0]ތ_o~xʒ0Vv ՘8-\Ǘ|#li%6),B3𩐜Q)e1cֺLE@ЦQ'pECrU3'>yII b .\LUFZ J!yDg_~Ў:j]U-!o;!q#qE𞭛{\.'׉~V/ $M57V3_J>Z= iہ#RV}Z>x*ܡaҒsZ<6(Hfdꎮ.d25^44cZX WwәќK*ƔO\J΍y]-Ə0$@m ҮCBuеTC4XB%p9QIK{]bgVZ%QeǬR.%u+G&qgR[ P9ʼnW%99 7]ڴHNNRRR.OMMڕI6UK+3ծI*5%p D{'rqaV=<D?RNd‷A˿Иx o8cuAH5&JBB |RCB)mҔ]0YG]x=˯ݙʅ1a`p6VMIpյIl`5ZVWi@""9p;:RJW~] ПIx;K o5h+ᑑ2,Ozj}+5ٴd{*ݾ-?܆\@٤Ǔo&YM$u%%7\zuK~;p$uBSDDT篬"q6JPR)MJ߷ - ;=@DuhlkR[=37-PY(qQ o؋HV=ₒm-۹"Q d{r7^#m/z[uwQ!M.BjQ.;}f9tqIXxD!xzߵ+ٰot]tx$WR={o.j1u-mt Tıs\m\ ֈۤk?-O߾%R|nt9#}:}pY>Mʡ^ ޤWdћRSS_RKJ9N]&=Hxh#aL^1Xr]$ARb eۃ7̱F)D3?Q42;e4/$V=MJy㗏3CM3f̻ P ͪEgLXDl_mڢվafCaqfZgz"vrM#"fr.L+]HM8;6]wd߮ t(ߍ k R(arr~ՋL9.99r[~_Wva-А90 * n$eTV~-N5;4o% UUUSԕJ>ėo05DY@*wuݾןgCÁjޏL>+*_|-}]2*c/+_ZGJ[+٨P&J@&'''KJJ:K)xy؜+[ٜ=BPK) X'm@*_\YM)S5|(F˜Zeł3Ԃbm6MxlBU*e4 aV5B\nB.Na^Ӵicƌx<BpGP H|`GR`h?Ƃ̣~w9T|K9x¥ &zղwܴ~}-}d008Ecn9@CϺǵаƙ2t8w~:! "O.П.t,]9gB𚕅"_A|biCE r>^GTP 8ߨ~-߶f0e'|T'j(M$Q%Io[7bBW*( ,3ūKVoYTەۤy(gS"|Z].ERlfRpMBz & 4vc}6Kdzw-7@?v*EIїPq!b:]C !{>-)k_m_tŠ_,o 7Wu= mߒMWe×-# _*OQ+}qox~~>IJJRTU5~ܹC% !nzTգ1!~R~2lذEQ1]HH}]VOZ7ȉ ;*~1XeVUى+*QQْ) EQud0!K) !6Y,-W_}uرcw9"`"EU'ƍW뢖'˻ɥx`4!A8-.g"a]T/<*2{0Zm@8,Tw7j**5{a<>Ji"w{?kn|{Y9 nW\.\ x,g{Q'O`,L3']l;"9K1j3|A| f/Rfl┾k5PЯ[ _7Q94|ߊTVߏߌRYd;g*&p|羲䯟c-A 3?INvԖ>ILL&555MJpG޽{,XX1HJ9._I)3UU5!!a[rrgMJ){GYR&Mˤ(5+*جThBPb ,4Ōj6UP!$LVJԸqvcǎ}cѢE N1!߿ߖnѢd2E7VBcƌٹ`4M)e \y3Ԯ?1&P*?a끁B NЧ]R=/^{t{á0:F ˅˙ :M H20|3=h= 100ۦ>Z"w϶kpgxcۑ`IYE38T91 3/#0W9#爀q%|3ϗtg&׿3gGls68UR>͜9Ӗ,Xieȑ#kΞ=JQ4ʕ+%%%h!qOLMMtwR>H9!eR~0lذVB;wnO[1Cz}22 IDAT)%M6Yf͟?zL߀KMH)? !~QU˵{θr 59@DH q36/ g%^GKw,kN" tɯOgg |ygS,gp=1emቈb'~of[ >`ܹnǙBW M3V2Ĝ3U+(X*t?D|)*وO|u֬瞸ڢ}3}%oz)ӧI3RO<&-Zp8b&L_oՒHf2Y,LwGñ())}6<BS&䆊۴5Uw#'$"$|P0COvTE$AQbl`t`W l|hb+bEE3l&u$s3e&#܊w}<(Fǔcw&Ý##i+2PPR|\a>#R 68]VˬReFc%@G4_~JizܭJ؄pu3jBo=+ :*䲟+K^ W@ [6ǣ 1.م jDL/m\bND}pS=9]Bte䅷1a/ c1(;qow`Ĉ St ↊bc+#=Vw*4pGQǶgkmo-(,Y~_W۴ȶϥ(HDCp,W݊Q,Ÿgʽun ~]-v,۹km}%KӁ=y~}#rm SXZ|^c?Xَc;Q¦.>եgkHk}?pR94Ե9Nǰ΄_& iӴ15 9)1Gv(mTCaSU6o/dMٰM댨KAcO=P&c&g'n]VoWa [,B#T<1bk=۾/> z.."A_( C6c/u}z bi9#H'9HX~l]OYrW৲whlx0뻟CCzn|AM9e3%J6l[ٲmE"۶a6* 1110!F $'%ҼQ|kxl/*fu,[>c*|'*7; 6fp3+F}4CzWI9ڶ/ugt7m>5dՃ EMߢ-q xG+/jۑ<={aBlLߣڷixT6l[_X~m^?ͻ PA+mW`D}y\rR"=m%>i;{ܲ|/ T8vǞe4hV#מcגx #iNovbCDjAc柪*ST≳g:q A^`A_1CD8 ہF{P%wwqT/y4!'ȳ#p‘ksP(EUǭ04͛޻R5 QM3VFE~ /iӦ $3۶/مyZowRme$t9q5~/O7 ۺԕzmܣIGQǾ(X4. 3ׯꦿ-G#7yq+VL'&©T5 ͩ~طE귢-[1Q* 8JCÆ\qf'i^^ީ;{]j`fo7qIw{,R!s~KQQqPm|֖e۝:˲n)))Y8l۶N`x=VФ:nP-C BVCF'{F ,..n'"p'0Vw޵. .RG;-%exK!0P(6Ț a}ѣSuv/pmjjjnmeoK S!l7{2#34jw) d4〗ً'M /")s>9.O*Qw;gkTNp/+tN 0uv݀CƎ"qȗ<`8sC:ռ(CD2槤}X\ T4.*ZcB FXk ~P)sPhmJJʻU*0NfN7h=]cw1NPUA[ro38,e:<ɓ|]l^[PPM7Tܲ#MpxiPXX7W6VVVV׸߮ߣ-VRRlС* FvضS Xhvvv0edd̋30cm>0 q% K?q G|4EdUzzuU z6N6-im CD9 4h/UD ٶO R>iҤfqqqӿ ֭[nv>ǟ|G8_heY1"KUcDdSNN>g`J0쪪GȒ&M|V5>>~i7 "r\c&LWn2 3sU U4+~i qV*U~6"rpP(i9|{[nnnl||vG||01[ԲOTg4U}XӧUߵh'p93,w;聾ğir#qL>k@{~"`08[E3nfq~eYEzifffȲVߟVzSD}4ljYV" x 8Xe*HہH?12MlS ѤIK)SjO+".Uu'L 8w8r|qt&[:1(-H}Eλ 8NU| 2U7n͸L~6oE1"reFFFg)20/p"KݗV3oQRRR>r48pU! t322N*2ƌ;ĉy;nlw"vDq/n`\X]I WaX4s HqGLq]|uzp?8EUNmP(8poWZ5kc5 -NgO?TUȌٳgwիWY̹s6oID]pa7/'"Wp\ˁe"̶^q AKqwT!RTAyU195q*0k/i7Kwu if:R9IrrrZ9^TRt'WNv+#j~-V7m~&˲EUm[53i3 L<`4ܸqi][ 2dk( Nz5l˹wl^_pNNqޛU}4Q{^R Zp q㖋14q0[6~~_;5{!DE-\j=۾pa_|zi7g<۶_tE*[fƌ-}>_9qE$lU.???^,m-\Cc%77A\\\p4tB>vV?lx-N4Au0oDd9@q߫tDzl@ piTMOKjbbb mq`B@bccM$ ^odUuQMTU`f0y:]7U;Uw_jEcu1;;M0v 0`Ku*CȯڶHx;Qly{ԆOREX ~W>}P=.իԶAĤ((i,(-;TVTȌόVtwQ {蜜\ \D3Mz(&8n1J3Mc套UŶeY1ZnfUNDkӦP(URR۷0=[7W\>C^^^+7G{8?locԈg49I[9s\G to& Q?S{0֨j6:KaT ]<n.9qq5)ոqY6lXs233OET̲G~I$J܅bq Q 7[p6ٸ@]PR.+`f`;(6}d~̙Db`Z.} ن1UT_I˶C]}\jZ.fa/nŊSE#71%ogwU$Ƹ!U 'D^F }sƇBEؠ+M K%_gŧ!"Vfq 'U8I,B IDATOBݶ6pҶĂ"}&a+cbbY5CUzE};˓䏔J~eY&U8Guj"peqqe-NRUHMaÆygY"rv &&eaaY'833˲F$˲zgfV!fgggg"D U| |lYYut2 +r5DV8J57AзZ \.pM1tzUp"BsLwa-wyyyG8ujSȰq*?J*qr^ 7J'o*\*MZAfz!p]mUzX9 ‡=qqq_[yyy l27+ߛkgAнVnNnmģT57t_kfx}LkѿسD*qe 2zV:aD=s;5 # ضmOIUVU۷EEEeAԪ#OJJ*z܎fIIIW qevIII8 ϫ0UTDLmBQQqqG8i"ZU?m"RVZVF%"U ?\rNM0/K,)K3hРiST5VD^IOO>Ccq>c~;ãLQQ7#{J?w1555,%l?0$&Z_j&UJ>Vc]Zs9N0kp-RSSV!KPUy:|9@DH!Ċ%iiiU D\2Ӏ~`-obpTCUFE] bcaq.('L B=<̙3m2P(tiU,gz/ϟ/"9jjjG+݄4)X6‘~vt?KJ`}IY߫?nT3}Wܓ=>P b<\N6_&"m]WZn7cܳ{[݅ya9Dpc>6-:&wsR\>,11ȓ?@IWk_J}*o\VrZɖ ~9v%S" YضæP(t 4Pa}N R[q]"M~."*۶L4T "]TT9 o1MUb;= Sj޽x%2_64TD,ּav""[UuIaaX'RLTU8p]=”)S:8eW28hPXHK cp_ i+ߞE59lBLUf:1gzIw Q6Oe8׮R FXrDuvO[>}G=*"SqSo^TޫcI0 QF P(t8W5yKT5xR Xdj?r-5br ,Hڵy@Й*jT`|| "׻wkdLwgM<<e^ N4%$-=v ~:C8RK Q 0_ jF4 };NDlU)_rB7C9DsDHHz?m(V[CM09DIDn))) */.ᱏ@^Th]Hݢ;[+6Ú[ZDgU=ͭHs:+e#[Mڎ_QSʨ5^. N(T1R<ãg?KVbW2JlnUqjUNVnW ;PݍK_y;7tGt"R^V<_0ǀFӻSմP(JN뼦#5jdgg2 ƌ,!"322^ݕy'|+ x+==}eYO'$$;fԕa.?W% T;v+%333cw s<9q<"0##ǧzEIIY0&>>~aMR,{u0< F܍pB_`:n hBC*PdX;Lss+Vyf'9x֬YE D4ѱֲ7.:܏pBF><< ]u?jSWuT뿀pxiiTFM_|/s8\pĿQbIՍQ{-h m_^ W]]6l8G5(W12TNP3f{ﯾt=2dȯ@14U$##6˲Rɼg7'8۶}A~ ,+OD-z]R<c$[B_UTE{+` iPX}PU !eMnĈTUxTj֓ƔGv܏MY{XYC|>_HM5t(ܹs}[l((,,| pږ~aWpcĈ0z_w|U} u֖ze}RB3 I6 cp}<;;&9,$UWD.}q@˲>PG~ `pnުM\\m8>hР] -˺ 7%a LOO*I[u9$dbYVWdff ;s4բ`ML| H quͲc" D]rssc7lp?nVv@NNN+qf⾟Wȴq"eMP[-*<8OD322.s3n@`p] oMEUG222-\~'|c.1<)jiU KBgc[u}qi:ǝ*o_x~8\(Z몫F~_0l޼iiij]3rk8D $|BqU:ë,aÝ8ˈd;`ԩ E}0%"rx8FUgD6~UwEb85pDaU8N~du頪咽jHp?`T0 LU=666qL|褪i㜭-^ UQhƍjḵ,됨kx]UNoWՑe]8TEJӤI"uUX`q0TfÆ W鮪n^õ֊vaZ!f(`x/> J} Xwܭ=Q5r. uhguINV/l5}-nЀ{Q9/Wg̘RDU4TG)mD7ʵ;+Ya]Ͽ i|/ßw̚5 /ˊTSX0+S ~oҤIZ.``U=2p8hbKG-Z txm[@dtFFFNlb:)"𛪞UEeSe~L˲oG"MXUd8ia.J$QڔG_eYe2&LHPzeff;[,:_UwG6WoEdA}aYVPyx#33(sTmA"rֲkbF&>ZbgUɋ!U{~͍~lhߞgI8>>F|T50bTU &Wk~MMc8\pĨz  ݦmnpsזu .}< EVƪUoݺ'gY;a(uaU]Vs=,"A[ ej,pʔ)Mm>ԸU$$$Ę9¡E((3.ZG5+z>U-:"rtX'1u+|,`VII-@ oc//(Xƍk=tݻ_"1;US]#/'Rfqqq,))WsZ2YyhpUM+iGb"v] >=za(jG=vdĈ P'}=v 1–e!"Dv.ugL0췫FS2W0jt)nDDDU#m94iiÆ "}]1MSUJeT0INN(--ϪjuZ8Sz"2ILLաm~OxO / U$unݻwntᐆ ń÷uPlj|}7R}G)i(hܘmʲ |{95("vI~5e˖-ہrssc֖dԛy?kc9 p@ 𯼼EHU5tE7l-1AhcxF=%zrt8eXϏ#;; EWqr}3'O>h&".7[T b1.RreUܵk.eD 0Dv64iRC|8;Wje֭K 8 XEI"R#zASXJQQO111ֈo̸uoG[pg`xe[p8|u߾}ז NR *Ҹ^: eJ'4ѱNB55Cl?D˟~bs6MܻgϞG_RQkz{UDJHHe(Tjx?V_Մo6ڂȥ7OJ ;"9Vspg 8a]UuU柖=0pHvcp5 "?8.:gdggkFHiDA@`n_ȿ`~d`im۾u֯l.na\ TTTz\\>E8#wetMeE`0EU Ø/Eo0|qu~)k@rƍذaCt:t`08 Ĉ9@#qdggfvw4SL\V;2`]qPP'" //f ,XgqqcgPƵq7y2mO=6DMh RD~W׸o 'Ӥ/3+%yJ:q,k q,+=*"=),-{T5ihV_~tVNݭzT=-](7HHJJ1 3i5˯` NO233zLs8`,,Q߀pȮݦi~ȃ~#"Dsda ʏupx+l>wӪ:,hyӁIPTu#p8?ض0>LmqlVIa >q"$"/"rgDE}0 c3&c74U}xWU;{ōd/8s{dU߿ !"ew,*UQ"24fʶ#Q5UuD8>=r-|I4#1V=PDU}h("Ufı,W7!60Klb"v†%Kʎw]:x0q},JǓ:L+3j'_ekѧsWU_EE|8`?=LMIm?y5#@U}ڴ*_1."-_RJL j]aݢќ2G/ƵM63_ 7 U{z 蝀*e9vR~]4O{]Ǫ {uaRSS;QO }%>Sr}2eʔCmQUEcfƌ-Uu&q1.ڸq8;N+w7 A#1lIIINgY~];vJmXx1>c9W/۷g_H zDeδԉf(޲5_|}I-[IhڔSi8~Y>;Nn(ڸ,:va|}<Ztŋe,ԶizRe ̞Mڵ6|><<y$W`}Ŋknz11_\@aY1gN;ɝ:X/xݺ^+Vc^waK[̙3?[5/߈3g6X*"_q&&~퇹Ѧ@N&m[l;r&2;^ᾗ|o*ȫ4FtS}Ǩ*7uceGM|0'. hMd~Zb!㎻wX߱<<Xu pnқX۶/rwq%f9730 8b4I%'yox Ŀ`[q25"}{27ZypJJoqXۼR_zgykԈ,wGI8ꪫv㍬#:wś6߬?-ۨQm@|fl_tJئ g"M-\HAu:g]︃C֭4҅[kڔE  <~[. IDATW$ :;"mӧٳO ogMIKK+ NszДFP~aP&Ik`Ill"=,:DUWW}P),XW_uc7XЄmk)ܲUVzDtk2JF`=YWt@!0 Nѻw'm(17TC;ySUTua76nx>o% ;}=kr1P(EU/McYplѬY[3n7Ǝax3@+q}ͬ]6w10Oԧ _xLq1j͘QQᙳbgzq|f&?? /'E R}ųg3qsi~~9N.}4Os [# 5wUx堃OgqTbJEMz I=g8~8/>>W{q3dEDU ,*`-jM3l=˼S39L~\w)OgƤ\nI{QTu>4mݙsF b|l+))yz| GOfwx{ZC羜qKq[ļY 8-;z0(פM7ϜGUޏvp8\~r nDXc .`-0XiƕJ kD)y_ ;= <<*!##KN-w1JJ@W{A-ZcqgJJJYњ)/7 gfy߄ 4`'WRi P};M:;GMƍ4?r|o^~TW]hݥ /642}۠oST]XHƍ^apu1Gʌ qǕT|6~<[ ҙn1u6.ZƟ~"9Rp`Ϟ̻N׸DLA$m[yXdYàᡇVnխz"On ]uPʆ듟?~ĭ3\iwȩ\4!nY'xcyl߼'(7Nv|wQνMOHBGzo*E;RBh?>hP> h@!;ZB{~fI6 ݽwٻs9Ǒ0OfLD꿮Nn\6Ruc^D#;kk-i #ۅY/g[fb dݾ| 1-hк_){Oan[vZVDZuO[i "ќ9s*Q>4fcsqߘ =j9Ǝtd>ɖc&;9֔Z3M sv3 ;gƿ?vv*3{z/ |U8_ùfh"4eefTOzVywXyU:z+Adލ#ϓUmْ,ʬ'n]Bbc[~r#5O.oHf҇Ĵm @?nQ# 32JXV>,&J}aKL0$-m𕈼cRRҡJDcoGn7x.Fr37W=<>\8Q1'[ Vp<ʯsQq;Capp4=?d]%oY<{[Qw%Eyʷ׏#pti,.}k5uqj\@މq?;kX,M;_'"*&UvѢQ(f'-SI&=-",s^?pU}f1"98; F>999ab!99rХrթwn}g5 =̻YIII=졡(9d~o{r2ul{jJ`a͛wy||>jȴc#}a(sQ;+91j̄Fl01|Aɨɏ  P"erV{L+s_242|KLՎqKp;0nϹcoأ.e̙vm%}Rrg 0+ݻcvl8qO(?(y3b\{v6'0TY(@\.4t*N6/_|M)uܹs;URv""";xf.rE%r1yݬXQqqurh j)ZlY>'ӞьU;cfwU=~/q=M0e񗷔g1\6Zu4Z_8G*۝"(F HOMM ,zLB= ~pc<.”ZNxh=;{ᘘ nNN>5|yʩ / 'DO861s YHzzz4muBCj՛p`wje $7{>t7o!vm.1b[p,KǛoNۼ:˛aqڑJ^FouZ'&Ҵwo|Ow_Ml6 Wa!͛dXQ~*Zh `b̙ty 22؛8]|1]~#FB_1 qb"?V Iw sΝkƃ brNz!pn T>P+ahq DyFKCNs۸+/[%?{jan&Y*RJP` E.g9'C׃vewz/3ۊ_9"ÿSrBWƦUi"2B):0!TO_{җzZ.R",x衷mc&L=}إƎO$kƄq3~&KP bܺ{%0K6l̄[4$H.>f e(zƄqO15:G>#M [@aq6+T̋(M W1ʼn*Ȳ0˕Rq/̘'ƽM3Bԕ&gRc 7gM'>7ޱZD|oq;{fLtL6q+Lhd1!'Y;l{4QcEI.>vgL܈^*kıKFp3 E'0l̄\'"'{S6"OzϾ_)]m̚5=+ yıý}ߨP&5b̄+pYNacǿ(Y>}¸ۼ:])UP2aƄqċ\ıG0)%xEɛ'{羧[eKɀ'6|̋+>qu~B,"3O҈g'6@C3ܪaϾwMiϋ3 isɑ Z1{ܢacD*}q{_)u1wQ#L齇6i̼c'wڠ[F_r3&gJd.> Vcrb)|65ۿ[ yb>/r*嗱0|۹g2N_nDr '9x1YX蓻Xk\1~<@NlN~-plgoZ{ss7?+W٬tV8@z^Կ:n֓' ct4/;êYRn@) x,g,:ڿ~ РiR2t=޷N'(8?VJM~/pA>lZ6T;%Vc\?5<2i(rr b_PSM9꣬u:UJ[~ KOOhX5aı}P\#"+ f4ZPBzh(H 내Poǣi4+iQ ByCpfzis>(U_ BQV\Ϣkij) Bi*@\s*xez)v=zP1%zW11K -ě ߘxU=Mk H jJ4^5/f󔡮Oet,^V(:6X(cXt,^#^+)ѱxuQIz^[}:xu:|:T#^<:VRc!J@:FLX@R %]S Z^LUX5jb`uJ2B'G*36{9߂@eZ:wȳp|`uNDݺdmI[0;"rՂ j'$$ް19~;Pd,@BEO'[ >!ǖ}M G3M.7S ~.+U0\lC_?m)}›4nן#F}ׁ^]z{q^Hk{ 7E\%c}x ,vٹ QN/UnVV#?J= "?K"r餯8\ @ph)n=XO ߓ1Hה"b` 5S^(Q[ioэ@4:q2K| 墉/}>@ e=Z4pXSq<}W|pouu=eiJzaThu Mhv€i.ݝ* E=+ d9@;5cŽA> ߂;eRwݮiʭy>a43J-P2 icrv{Uǎe,BMT(Շ nmv }wx~4d h:l/3{R:64a&pQ_j!jW?{/h:`h|6_fՆѭy>_{nlD %~jsX\a9୷MWK{D0| ~.q\DZa`Ƅq_qTᬉ_ԫRDd{RRR?ZCQKcuK* s::^+V qDD$$$դј5v⤤&*2 }msUGNiYđX]/(#" W$ƾsٶegQvwӼ"ca/&;k{P :_3kp$W%VsԷuziz%.G>[8cǒwb{)$La{<%}KeڣW>p5 ø~РA>Ĝ094&"(" *Y0T ө֬lPC}3%E5pZ~LR=W[W҄υcSLJQL.W? /[/?{?24`Yfض~F;ETi경(˾ˡdдlmǹ8E}Kc1AEm6Ȑ޺9`u\4?~ԀrD-[Jx+HzJzĴjExz؎'g.6T5ܽtqʎMMQŌǡ>_L+JbM9Ru]) \Ѳ(4=ݫDOȀ}С㙘M:n0nw;KpFnY*;ִlUq @eĶmd+6m9թߧꔢl~L G7/~#S+kr!e|{ʟ/U 4UӱdpaKK3119W0 [|⭂,,,Jõ&U/&r t}04ͽza>_b3-JLREE P,S ]fWhp8swlJF T隒D?04 }[O5MSUvUE۽@DZN*vz+9*L&po{YKSa\ZeO!RUr,[Eh%4NÆմi>9 gJ`pG5bDO2n億l"oP*2+{2*0E;s.`i LLLML$))) "$8/TiV#cfFx#Ok~;zNLf\DEU8*VЇ@eQQz=j_x)#ke#aC&7iڭ_~&ÇRIS&g9;ڥb%DTSFw)㸭!۲ "K-K#}ꑘ͘YRj2`nLMM=.,&[N8CZiܣ]SaCtFTfǶju'RVekiekRQ9U}EDkY=p R"rORRks#Gtbr=eYa,]r2(#UߪiѢywoeeŚ[YKBBA]@)>:uj@2zXX!z k(rcgQNHZӇz35kgipt411xĠ]-(ИV,\v-k׮s52WnVkD11Vh4TMה]˭[M^˲[o%M$_)F}r>Zl6JD#8Rꁅ (L j% VCol(ą>IW؏Ȩj,"e.,ˌeX%|ӿ |׷o_3Dt0Ljl6sK}R۷֭$S0YK׮M#]}ߵ?{Q@]/r%>=KE[k_|'EHVn'cJP@l6ܽn-ZD>IHIO#avwm'1n\xѸOʢCy4ӔHtY9h۴ArV(.] ]D2t]O2s 2$ SSɮr^7 IDAT T>337мc _Qc7{ӠeӒq&qd@VaceĖ +s(&R*9p&&&arV3t".S)lzzMצEW]Lgj>գGl>}뤍k\M03i/ի}-ޝطp!\~Ϙnݴ<'ᥗr`bB:I_}-;M2n]j_{-WLP O=W^a-'-W sfdV9:e);{QmɏcM7ѳ'&9s_R#fM1Dy H7o^S")1@rd ;V Lx)]KPU۩R3XуBkAdl £[m "9>+EDl !ѥӂoIF5*% 4JkptI 'V BbJ]םSk/79j͛l֬@ ୄuk\t0LzWq/DӴy~իwSA~njZjd뫯b se6͚mYXs^}5r~$ll(<F.]>FV%(<\v~=zۮ|ÇAeX{t.P72K8c!kŊRcwħXu.*G3MpMIh '60`?)"+NRaMIn? . ƲW$ x_i}sаU4m?RMoco3~ $=7>HCFvnUj W2Тp>I\̀cp]y̜ܚ/_i5$^Ł9z>!_e7ഝkvOhNC1h79K=1N>=aE(T^b4l_ &Һl^_̾.ZhzK>녭-+o#͚5JӴ@!f3EBB«vy_SJ5jԀK.Is6oz5VSX[\ǕOY,l?WtGV*7!oF+p:8 72Qn؀tD2OId)z&?#嗱;V>~:8~F7:ةM 4㢋.ji퉉{X,w [Is&J)IOOh[nCߝ+h@UcGC}V]na;EQ~O"JA~>.{n^]*(sOtw&Rha0[M%*y'vRqO\m}8wȮU=1 6-ON1eiopA[c ϯ^p/4O? .'Y[R);cXbb~v&&& 9EBBJBy%==yyzjޜ 02T+ȞW_ǨQD6lX;jԈU\wxjr@rPeÇ/\H=i=hg{~ϞMiҫ~J۶m~Ƕof#/##nʎ'kE fܹy\USͨIhT:M׭khق ]PJPJJL¤B0~[4htKH#u @7phbmO~/[J.xeԹar?9:Y#*\π]?GLjB\.l\44Ũݸ'w׆WZ/.5{ sQvpz2kXE]6viӦDvsabr~b:& 隦]vMKK{cJ)FTySOщt[ iv\#hFn Z@o|' fg_u۷gς( p{ٿpledfr /&M7]n,(8tyD6mJٳ}򪫏J44*14?:{~lQJ}=}ժUKJJPXX󔤤T `mn߾U]P1{tX6PvdR|h~|(n-rRٕ40b+8ʾh&!D@=itK(nWQr_gͺ$N'B"դb}9d"]>r=|l|)))=s09'8pޠ8 <naԩS\@)kfcѤI(0?^y>/c0y!ns;wo{ܴGW`v/Xc+T8]Q܇wI+]x1w_-;_xb'oہۣ}oleQGj|T{yݻvh;̤f|*.VXnس~{6boIAiv976o,Mm؍fގeXiYz @DLK r:k+иSZ{d==oOT\S太r=MIC{grri[LYRV"pQ5<h[o]k?O#u w:yys~=|#xY3627 ;|2m'N0O~=T~4#{ǎpp">j׎cDz;-ﺋ^JQVتɓYSh&o_Usel>kttB|zxe9t˕'R&&VDLQה=GHxmZ^tsu}Ixt#.Ru5={_wȘ躵ў'KƗNBNF.1Py/ndZ\~OIVe}OXtʕ >~uZZҥK Thbbr`.699rsܹ @)gBCWeݺlq&A6~y:fn}92,)חL 23Kձ;Ʀ> ؖo04p8ruNnm֡L8 *{ r5FzC""">Kiii}51)ȑ#iii9Q@B\&.1_ {6aw:ȱ+Xiآ(u_g#edހᲳ,u$q0}ûb/8MsȆQe _NƖhZM:Bulr31U}'l~ُ+\B[ϮQx>Dܫz_D4Mabbrb`H D˖N(P˅5bQI(Pe}qקS[Ueki }׫W/|iڭըjb/PJ)*鵋^'k߲҅U?gl}Z}}?ȁ jn6/I?'~hFӃ8'#j6.z&ydl]#0vBxfF6`YUeMIE?V|m˾dn:zv|YuPTT#M41190C L <`]bbφ2:Jq:N]ܐr}NUbW(&uʲTOlӇ-oᆎ"8PW r6iZO/Ʃչ~(fɎl8vՋ\DqРA1119/1g0Lyu2iMHZaޔe]/@ B%Erk^OޭQY0p8.>tJ=wV71gEk* 򊤝Q͓v7vhV 09/p:{/-X \P@EIAEawMgm u+]) ((sOEOOdl=5"7`kiL~\a4#aR-R 覄Q.gp:2;9\%4Ms@)\` <v6ra1uFKȷZɳZ}NEY?ŭ&k(NUNyʖ%}{ \XwXML|DDD d+72%Sп({Uep˾+˨8Ÿ  ĉ;at0LD8Р0TѭT֚N]Ӳ J~ 4*p&QZ۵ョ/K-71 o/;\']j/4'NWQ*CIdݧO욦Us09Pބ^&{P"kh:OO)k *kęqznUC(*5>)))VJ41)io6,~WsJXILgwPхU('e ;r.b!qc4hw0'!!09'H xMد2nAKLL\fm`{q}FAP')*1fOGO1N3U~WxV2uG~iApp@d$%%m&&HHH$nysչa~y"(tfWAo6&2.Tdj?l8v9?/-8$$$$6]ד1119̎019+x:a%Oa@"ڵ|NڶSk^ujEܺu(è  ; }gd6#bܬYnݺ-E乤񘘜 .}VJv:?iY-X7ASD>\؍(r\8hN^f Ry2+W2WLQD@A'ܑZ ʉ ełB+"W$ruȱRMo)")@ pobbG`Rʔa"WjgO`CqUO]t@No-B\ c= ]>k҄11;r:=[, 0`2 ,󁫁l2n׮]o=5\DXel\\bP؍F4]q<~{QzK }J9J F+1kʍX4 0D-A$j5_W7j6VDv _UGɹ`DؠCQoxxqx}JJJhHHbb$~-RX33#ׯz/~Ln:' צ Rz3ªW: cdsavm8~[B! tTe_."}~ \ĤjRRR"BBBfYa<OS32k=|i ҆+7 H\~rWTu@lpZsEmA; O8 ޽{wn@(DDHJJZm#_!HP0X ! }R?boeD×X-X?w" Աkb=~$tS@paS (lT|RꋰÇuW"<+ӏ= 4ʿqU+:u]yx\`R6SfjT\K/*M;HOhr2gΜ8]ןRJ=,۰aC4kVdFivΏ>(`ԩAiZ_h:)"ہKR󊊊n?j><4ͻNCP%+ٻ++h#xMDR6PDXE$O?t]4mbbrc:&@SI?Tv\/O:acϟ\y*K)a;P6xF0@УGݺ{ѝN[(nт-[4^T~9!\ĉnfSJ݄-?q]|1?O&1E6NعLDFGz/ V-%Ar4ZQ_E)5 ;uw}w Da4M0TR'a@\8RŹe SJ<+""kƌ)))Vi})1|v*)**>trkpހA Q!X.&&5&==paW):q VP܁gYiUVMNN6 衡ϋ3xIII;v@0y+6mɴ"rRnʜR|W˭811110M%; Ȁ2C+YLXts&5ܮzh)X|rZi IDATZpfTJOvR8טUJNLG;RM)܆a-))iaî4mгT k~>yylhXp ZbIMMP)WR"!"RD䘮6<r 5MsQv]IIIyaJ`dZ#nw,P[Ӵ8j)"-EiE?$n|mXfW2DAJϴdO> )ϓ^3Ƃ G- 0P]5M?1tPwudvWJgkQJ""iiȑβu]6]OIzJjN pz.hNg&&&&`i HǭÅfoYa~R0OO#99Y֭`W)k, ݾСzRUUxIGeJtԄ aÆ]Hόb}6eʔémqJlyO8řl;';RDdZ>x]SXQe3 W#Rd|rIjq3wMoDdA̛{R(ď7 Ҙɟ?2 ~aY{RdƂzc|ObN`I.Ǔ7B'&;\D"t4BBMӾ۫W/[oi3mTJLHHX]|}Ĉa"ǻ'Э SSc/SJ vRqhJ tG9 t]o4X X)"""J+D\9wnMZ*?|&xUP$ >LLXPJ:լMDYJm"1((hɀʅ=͟T,>ɑ8LLLLt0L$ބZI?+)BDܹs= hjgx.2Km%k׮TaHDq 4KCuRqT o1̝;P6Jas͚5ߜMP <쓉y)HԇS',`2`>'-˲qoW ⸂g~o{L:5QFAAA}TU*H2%l֋SJ9sY,"K)t^UJ}%"o',QvΖk{;eW#)t0LT+~`` W˿ BI~١wQ~lIOHBBOHWU+rY)U@P *^b*%A HGj%$!!ovM I@Ϟ23;A΄"[)u;KB^=+,4MiM,Ϗ*BӅbV  1Bd) uX6)%052 nUf_>:oB? W_`ѢE>>>c i!F<&͞F -uh67˔0],AD۰r:M\fnϱQ(J`(e#I`5ō7j:8:ḯaL$4-HR*((8wglX .s{ OL0PS( BPCn\ B>]¿&lBqNx̥ѾJP 5 MBP()oB#l(!E . dʅ;/ V(]@P( E,&` 0Bfl?X Ņl#XgTxbP( OR WKXB5²HDcèڴMK`X B܍`(zMEPS(u7SXTG(x82Hx cD2Y:w"YXo%@P(.J`- Ni0a,{Ud'{ [j` Eh,ȃfM(KA-pP(%0.P$9xs<G6]T?&KwQG)) biSU+ˁUNs68l`.YJ~8g國/blj>,n64 G.Ņf3t ]E \UhS_X_6*. J`rgtBv`DBfK)/yG5ԅYmٲR\mU%vT(_44o Ѕh)]K)~SG:@=Y;πó9VV( ~^#O "9|cTn[H`<`y[&)5~gO?f^x̄ˊM@' s'e ]K`-~2p`;MnĊņC^!9|z}f<`tC:LrHf❦WEJڏl6a/&bbb"c!ne9&ʻ!ȔRH)gXV 4o3tyrŢ}f;U]p̆I.2 {6[s0:-͟>y]]2KXyώ7sA_?]FPH1jT)q?>9-/_ZW0F1`Ot{a(&t>›do4 -c  !ӹF1fԛF.<Ѵ)Ծis`4&}VE.yP 5  OH}W.m ^:̑0݇Cѓ^-C[q?Ob"" qÇSgQ'6lRh"oo/aBNs쓛~hE*heYYd@ m;ߌ |.s!fTS[ " ⦋I\HٰHa p/oWLcmE'%oMX%.iS~RWĒ'MkQv͠Qt8/ewb+X{P$ӊ@Szڣ[3R>#'쮓oEX VСmm3 $艜wll| 'G)+W 9eVNa t 4]^{1 ٰKCDi/mL(zbėHp#@|eyW=} r .7m90Di+l5v:<PsFga`Ԧ[,Nݘf@)eK) pIGvÇ ^<|΍O%FEV3z.~o3:_(Ç_0QE Q?[ꗙ^OIBq2rG$?|eʫiϨIn@ ?wf S|?7C$߽.KCC]=xh.з^H}ʔ&0I\'!Aä.).`i bbbbOJ9g7mJKD%jQ-B| 3A(m۶[.|=`W2Zh8hgO‰ k?AP\r`H}q睋Lf.Aǟz_ԏiSCSi1E>{Bhӟ(VQ,vY‰6!dɒf))˿E&d``iTTTsI1/|Eg}8m[Up]1|`!pKUH0f=L'a@3y܈MP(.PFM6T@J]+vփ ?ͽ)'řܑ4Nq1fZO…&<用,ICzI:\蹅{=8ƿ⿿f3'S4C|&.I̞x&&O9 OZ|lՖ~7F ><|ر^yAd'~~] !t\ 581\5iE}.uMxHRi)&ɔP8p9 1\ 7j/nqVx. sfTwúIP=m6_08}Ⱥ,I"5Q@ C₲m` &~; IڧƸVT;$ѥ;_ +xQu/fAKgV/o Mζϻ-[ roJh|q% =^DH)_lg_Iʃ0#f!xx)j"bbb^Ru[neoFyb$ms8.t4?sUhԣ޻)炏f۳;yDBBDJui!8J$еB0:RwtMD6[ctEo,P3bk! xآz*sS $27GM47͖ B7FT]47jG[c#_DJX3xx*CwпG:!|+|>G\XX{m'ܜymP@s6[côBe5_B}hR,gRc(?~8} n$s+ d2 ȅEJ2Y},gR-=y#RX\lځrʐX=٣ M[lxMoר3Lvhڶ x~Br%CkD\4rrPXyB%Hr+Av C'rZA;/B3EzSoYݻ7faBl&O݀J1z<$r'i#'tZo8گYEP܉LB#u\w9a?r˿N^0OvڢH!{g4*$8 )J\#LW,>cD*01 IDATݑ;vo)t:l;XG^uShSc<Rhn,yyfŊoz ﭠ1wV /IXDAXz| O E =!`4Mj\i5dkެ53 T@HyҺ6[dIH=w= F mW'jGCg_ ibjt:t{<b%R-ڏt r w4Cqܹ .=RK̟?!065].Єhj|.j$@PZJXtoލ` M Z!VMsUs5MT<(~&MxRxLa_@8ϤZqj>T7gXAU iOe2R$xUEj(*~FTܖɓ%^h|l!ĄEUcf%z}`thoϙ, 08;,Q /!%F׵ڻV.Q 1|VU0^ڢ8f2Gu]P d{6`4-XbuHEoZ*%zh"@ \'5 )ug"^[W+91KʅEEPpʑ܌T3rPeU 7 2U&#!<%iBBBO!Dk9Eaflƚr/s]b|-k/?BqH!E# Ewz=VqWBcU.R5=[yF)SASЩ.%#f 3䀔RR-D { E))*p4 E,[MfJ)b0^&x͔䓅X޽֨µɳutrFxZQS]j Oˢu|Rq&)OnRV,:J`D?BP(KBB!B̔R[\rFG<[ ᅚ hRdR2@5MW1ejCP( Bq^lٲ_Bh)帨F7ѥilBhU9(ETpryO\j:5 OrhLy 7^J9nk@ BP( yKtt,.5@vz?K|=FrxQ$kT UssAq_7pƜ ""1Edfn!~թӚ̃)EJ6&&jA BP( yGLL̍BT!uÆ G !к5s#nչFo5CY=j.Sݼ*7=SǗBBhRocbb PCP( BqbŊ@)B)%** @'<r/]jjXɤU$VH^M]Er6yR /1cÓt AJVttjQ%0 BP\tXM&K[D2g_nٺ Sx#Yi>'**j@܆d9XiHdh(!  a *7 *EsA΢ŗ fr Rw$5[P( Evxk %!;>4_֛g¢6!'2hԀ2ыBoX"t>*H,((xSRU=,"~ܱ1}ªJ4!LJƯDx4>M|u_ɪH%1QpR~dɒzkFu(P( E-z pWT9\r X ><T4 e9Lȱr-E5Wn˲];<n.HSgEQ~-"-XeW!1KiӀǪ B"O|##(LM%\Kt ܙK9{uR.@P.vr2ϔhk<`Fa20pB+g; Ϊ̰V7Ѫ4]/<<΄fUSI=t[l[z;)Vz,mѱ{/iن:Km-$ϮZNP^& X6~o.Sxc~>z+#ZѭQS*] x&^]2{/4^ɭ;IL\٥;z]\;v[cK>;R'nzH)!>h=ztP!➚CV= PWJMLj63SV6;Ÿ'doV kĕp84I9Yj6s} %%7-'s$5Ć5 gtIۆ i8"C c8z7W2K4W渐=̦Ȣ\x _@p,+jBU_!|F-BY;.*a+,D\g BP\uBGя?H` XgsoϺn3f`gϿ+/94уy88gN{6$V430[޸95#?x77NAN H橭8 " /BHZ0/ 逷_dچ0߰fk ٧w,.9wL{:>= D%"{ )'nĩXM&ލW~_6q 4MyXu"t25i7{vrǗ!!;F,qas|ч+#[qWms]vY\ђރ_FZ~t«7/?nr4jK?4]Cund mditt+I5m?Cnbp̝fSvz/.s$^yE"FmIEp(k +vRmi}ˢ[X1w_ԕ߱d"CBMl)6h(^/wlhՆ¸opD4ۜsY/uq\߶=4+fV`[͚0mv%%"]úRt]2k`um?|<qs\t<)y o>C1GJ𔻻 0z wjJ̜YcOe82u>̙?>gי}LN|[fH\P\]H۴!v ~~X(LK#O[Rb WFn3+ {J uB^\;w׺5y'N E ل^y%BARKp0))o݊ lop[w6A|r|ۻxӪrRak+ _Þz;pب?닣 ^OaϤ POT¼URt-t"A˅$n*]Ѣ%ZNffУiscٕTu%yHIW ؅0? qҩ o\V7E;}G91f3Nk9NRN6ܡ+Vym1!fC` P' !hXi*05;w'(7~+tQrK 7.|~CʤeO?f[q-V>8'\Ϣ[Ȳ3`l}v21|VDIO2 rO)7̪疔3xgJZ^eH˥G|y׽2ߴWpUdknS&MrK,޷~ g#ykz>skjWFxo_e֭{BܻvZƾdף4g&mӦ$0:2H-gǙnx#Vr , ̙822pdd߾=V`EӡCoӆ6c[IoiZu)NH>mև80{6GBΘ_Vi 6v,Ǐ'worïukǧY3nǑAHެ:͛|"!!8eþS9`z/^L #/0͋naϠ%wѦs>y=dK\:_IqC8F\0m/ȎSv,X@,B<̜sk} ⒫gk'0#pJ @nѶd/^m>}kƝU؝N4}p,&]5.9=1#i֐[j=؟rK~$T.ϟ\o݈&["߃ܾ#KֶBDVT#mk__˞HF:19ʝ RJK~)e쨸\5UMvdUl9q * EUv;x#Np% ܳ<±O?ըQ\Bz&Y}e ЬV9B`Nd 3;͛1?#FgI^O 1{O9C|,!!47Sџ,6?DI4u5VmL?zChN\RХ\Fl$sS?њ oo@".Kef:8=2Iu?U=Tu|4)[TJ'qXQjzVw"A6ȥ}(NPLF !ܵAsVeRYv|f`_ॽeZE#RG) L{VvґR[%E`#gO:< G?&>l;w;n@~|eF5Kя?.oۖv?NWb »Q#6,aJ'' ysXTکi Ļqc h7uI/A3y+ws9tŽx.o-"XsXmj$0]֟8wl;!>;'hԀtf܊h0zH6lZ3IYBQMbu 1 ɗThvܼX$EnFwԕ)ׯ!*|,cF|r"*Y!e!h曊D]Yq0NQ!?7kTF{655___JⲘB`X㌡ԼHݰUJFLF$,[W_eѣx;ܿ?re$ĔΙ9 =ڳK` mے?'O端&Hz){_~ J`ACP(G#мڻ^+9k2`H9L>> WΙoR5?A4rT#IWlkznwAw"u'؝F7#q~!'{i'BZr.3i+an&+i;&/BH܄hGNWc-u{ IspgTcX8`{b')'{d, #7C +WL5wtܤJ[ >,9!F).\xxJ!{ns_;~]p:˕*=y@(HNRaU.Rř Q*CPfp,͘0iO% x|ͤSXH{N'($mgjyŭ“⼼b%\(+ 320yWf@KXX40r:+^L#?yrY4kLJ?t[nɓi?aBɱABPqa *VH۴6cƐs`o>E47@w8&۶a ?i6ҧ&M*y'x#M;nLOD )]|KN^n__w#'m?RwmKoȆS}gǯ'Q|d??G~ 0;TF9$#}ZMN>úgeJy|" nq}*{O"bxfeLcoqCw8T~NeN9MA(o8 3&v/1{+ߛluG]yFUٵ}= 9[0!u)@ /92w>Ӣw7jРA'[Ďfd^\H<(U/Q,͉/` С®F7JȻ(WXX%`ONfϿMBtYWEP(jVM %?_I:V)uJ]]S+Wr@ IDAT/[%(7o'm[wr?N\dC~iQ:gjJ;užL!C6v,|k֬'e:|##i|, vId F.ûI;v$W=vUWd%oghnŐ~y1wo]r+nFhf N,O*t` iEO9Uܮmb W0YpI˯w&5yƈպt'U4'r6I:PyEUamjrĝLYߢi){եi ^NZ5lI3=vZΆ#5ovlWйIt)|&cfޝwj>@NMh?BT":$ⳳhHІ|V;obhϾ,n N?|)] jרʋe;f5NcUݮ B`$&2|}+-]JBt4ciЀQ~ӉfbC:46 K ̕ǩ+2}:Ǝ]]wqj*/DΝ;O42_TP(ݻKDEkQ   ]wQn4t&;wIl5^2Bٱ?F0| ?z]FDrWb_ǯU;Xչ3KP'.,qI+:۶%}6vtQ)Z#mӦJ 'Odcsm(-[܆.YYEaj*q_])$^Mgڷ\?B`ذa[bbbK)-_!CV`-x?{C#\Y|it|eN4p$5<]JۻP&dן.GS9Vx%Wr8mU% q'whQDa?RmGED)%.qYB4hP,p5**:ypr9[űW}B-ޔ{/R߷m[#%[Fbc!rW;ڿg*sU]lr7ظ0'’`%_zIvd7!$!HK M1ƽw˶dV]W3?Օdu|-͜3g|n#<ÑgsD"H$I7XiYf5kZӾ7m.Rk׷|>^;]') .?ku*5 Vbvw,cҤIQ?2"5B\!!וH$D"Nvvv!0{ڴiOKOjz93=9ug!`G/9!'J*y0’.BQߍ7.;--U!D˖-sk^`H$D"H?|>S@ c\<=usO& :1%x:>,ӎ&y˗/aȑ[wqWH$D"\u\"`6 ذa_~xh/Љ./Z`9]|-hG]Ny鼳PM#e;se˖|@(?u:+,XE5EJ"H$Dr|֭ϷmڵWXc=6*Ƨ1 Q^wsbQRЯGA\ ё$Tܹs'#x E ;;iH$D"\,_E1 VQk׮{|ߚksvOiu P"wO;E/ZG=BHsyN6XQ+-[6{0H$D"\,_e!@(/ZtiD4=]z4@t~^^|sA"‚"MQMB +VXNQ"E D"H$Uϊ+:Y[`dgg A롺FuЧ dsAVy缴Xu iEQ^~>'R`H$D2@W ɠ:jKkk㊢(}mWD" !s45eg8ynk=@kޑ#5^~ 3BӼox7>Enn ީcrnaU!wt\~(y-7v{5jP+(>޵׻8wOPtSKX@4ДwfHc+Be)$3iJ)4$QWYBY]vB|&'V܇f0|#>ۏirwh{kk;#ivۿ ]h hB];\"Rd>k{zuэhC/{ھxnk 9(ĸ}$WYRt>erŭN(!-_F)(-Hu܇rADK ~.H߻0=$TjBOR_a3q7-OTLFޝ75T8w| @;|}}$G6u7^9o|aOՎˁ/>O>$J'EМ>;В?pwj=bx9Ĩ Y`(p͡5g:0֜C߅ -搈SnWٜHIoWWEe2ZyڵEY9u_ݻ b}spa+\ җ5zwOoTUݯi~mQcC8{1L# !Nҙ)9w|jʏB9G٤Ho4}?V>訐а;3蓘m6l- ~٣E;8$Uٹ=7݁ޡmZ pmyo{EهΙYȠ)Ciן8g[7t~-ͧ+K>4{E'$.RUtiS~vmG=mˤ/rU) RQgwNl0y͋^EByHn'CpdXN/ŋ/&ϐ-{OsLB/bկzh?Q0نz`Me>.iCo`I$Q /|}C>:a`u imظFm>d[ ~\B[Cc&lbHn7^]S^^} j@ռ@[UQOB£;e͖?,~y3ǏhMj!`ڴi;E֭7. Ҋʭ\.Z8רP׼_iv{H-"U gڵkӁ٧_lңQ i~5fͥh!RG[kwz m^5["\R4 oOϼL)(LwotWUܿ7;;O Zޡu:VVWT0Ы<t:tqQF&@ikiܶY&= Lj2*ܣYfZqxawܺaÆg_ƥ0D.RPu:Ion#eijeǒp3pרB_:Zo"ER*{-[(~˷{L?\Ȍ4`yW͸)TpK цn l_KoI$kٵ5 b뛁h 0<&^S z4}] wfM2(zy8:',П-@e"F{iwpigggUEQ>Ydɂ 6 s{^djb/>9 &M#%$,=ׄJkysI$5aDq8&\f3 ^EE3*>AEO(4 >& tau mcv{z(z7zn ^:VD"hw[j&XJ$Ɗ+*-><В%K(gÆ NEQB^QUU(Jdj~]Beݺu#WXq;]iڦe˖ݻ|o^}!no%CM^XhABv78@7wz[C(V V%1E``ժUb-;avtbD"H$I?}ݻsrrF_B<<%`ɒ%v@Q!D(>!hѢ !''g(A)44 ,p[n:1''gǫV~g(Ks~Z(n`^ww>-xO SYE(J"/B)$Ujr\ Aq>K`H$D"H$Wg?ڰaCiBLFB@" !lBzUUsѸmɒ%-ϻ|6lP^BV\f͚kv~D"H$Xd Ֆ-[~oB<ܦ(ʲ}Z[[Yn]\-6!D}{ﭼDH!H$D"\ ,pwy(|8 EBԩNע TH!H$D"\$oH.ʚ5ɊKFUF+uM AGmM{iC؄ 6ʰ@u+<:3cmئ)(I \~04og b%֬ x oX־0P2 GF?7^oWS2Nɣ?M Z?6d(:g~7~?C}kZdQ3'ay-`C~~kF(Aaߥ}D <..n= 'EnΘw7E/  d2s3L1.`PnoIAiߘ6wC;6`q IDATj X 0+k/H^0adSb7PQB\J ;'}<4* K[[XEph胋V=w;o^ ?[@}->&L Kqe#v0=!5ş keH|ϐ[EUQ+E&.2̫aXOw뭇 >oO}}͚pgNۧ\T_D[ڪOtGN7}}G?+?\3YR\\D,f_w6k~1mN:rħ|x@\m(B: Jd&E$be7?o`4FO`զ~mwU(kP 5!wߌhl龞MWTbI [>B%% <8Q.fPl[B0 twcR.qJoLn4 5uq$!>E*˂) JW#k0Iݗ tg𢗎.bA/ 1!k<7% N{k}K+@X4HH/)a薿v7)nI *Brl@o`{ D[ڭJY>Af|rFice]ԙXQs.#vRk{v Y/7"V#Y6{\:g!DC d(J> ~+{Ees`O IH$D" !ʥOҍ&6ZYu" Jkkjl2™/C%˖9VIøuzfmG+ظ'F SƤl@j<^Iq71%ml=|f>P 23eQͯೃ] 1>ɣ/J$ڭ'i=1aL% *ojCɱ6s12ҝD"\\c|r +/@dǷ7 `s89yjO 3%|vVLF1kLr.-!z#ܳpvN`6H NŁSe K87K2$v+xA%X83=p߬ߞKiu!V3/BJGEm S^ی1a,1)Σ8y6pKeTzY8&j&5!Y30I8^jZZLEv4ڨihPmq6PR#/j_pQZՄ%<8Ԅ`k{1XFVq]bs}[1BAɫEqeuͶIߜ:WoفZ8r#y<]X{P}U m4 QTչ8uçyɻx|:]5ȄWx|l;TH}n{zUfwa5hsm~,BfNH+o[woLc:ysDG6ST,;\n/%Us-ۖ 9X6o| 4 ]O /߆^塪&2Ζף( 5i_͜MLduOijuPPZXR2pjm8^Ə//NRlxcڜn~׭4( q!8]ΔֲP!/~F8\@pGg^L^!hns~[.;1iT2OwK}l=\Af/ɴ1V5wv 3nxO{  jֿx}mQv=uTPYcX:w<ŕ mݎMOƓDr1.OE/nqK +0 *Iwr +4AVf3 M|ᢶFz%g?M6qDh~E <_W;'e0{Rݶ4=y} vj#B?"9lc! b]Xum8^eG*Us 3FGYPUb#C!K\T(_^&?'T6SRԣxc4??D}ʺ#YyU_L]/ww*>;Td;Q^̦ F$d2his2ut 3ƣ:W͖,5/?}S=o7}o|p}%Mn=hZ_J^^aIxF&jmn/{Tz\tߙ²:&#u쬌K6nհu d@U4M`suKDPN_P^?ڷֱaI.}ds㞼3y83Ƨuv@ld(D1v liBM|mچ&; r $Fvw')kgw|2ӺmIH`Pm񫷶1c\M6"AkRe\v8)6o=01%UMhU4p(7)N3X꽳oySC%%>MWTC^Q #pru35^||sNVg.n‚\ &"ySG`s{M6HU¬T5 ,c2&<8pNVMQY%Z\TB/b18BQyBѠ2mL*vgQA/x?}SNM6 gC+I\<1mv/K^Q 'zivO!+33G|NTһ;?-$=T9-6aA8\.vWϞ.Af%~}'kDdI!T6RR?JWUUF; 74DZ)똌xm-oa'H`\Z|v324*߿` `Xr NawW"zHOgq㭍}pox0x:x1o}t ]uEQB;nZ.h i TֵLJf˓kOqi>X?Sd积~弻XO˺υ4@w}RUʺ{GZ -&,A&ly5e>A["B`lq [Sl?EDEzLV;Fd`lF-H #{ΞEx}X\Or[}5 dDs 2&r;ŕ 7xPF2<9u[si;m1)0z#?w)Z\m.n-vQaᡖ~ޥ{Pn:LUK9L?\p[9'K8 Q~׸~6,&ɈRu!083| Q} Fi8_&MC#->F>ٓAQ;=4PS0V. ؟[Bu}+c3ovpvhP!#ì=Nh9(nBUUBi&~7#yht>ޝ4!9GҞworE5Tֵݷf8idԭ s:wM'=1QZHYuaVIa@wj\71 gdNbJd)uruy{=(n׉d_^Ϥ&DD {Ol iOܻ _~OO=:]FQe#v'f7.7`i˨ɨb2HO綜<[Ec&lID -mNv+fL %ϯ`)0iwyxw"CRL8-9g5$ʪ8BwMy}d4j 꾯Dr\Z>c2KƽM8x7?׭ a&sٓ2XLl~FJQ=\TSQ- bf4ΛpG?m$C;gpL`o΀O5) ̟:S{Oc_+wuǘL^dȤ`ۡ V.L??>;@t0+'`6-&-A&G&q}ct#{k*˵,?sqt>;T@Yu3~ g$*Lk'g[.9anyAwzYT7غ=߾? |6U>h*F(=3EdxxLLM)<^"B, bD~fťLD"\= 'qI4ۜ(BxH##RcChu 6c2v \d_$-3:M=QLEM3a devD5Ę0&62YYzZi6֬@hH G-H~% xR#3L>FvɃh 24i5JrICr#h ~ў FH$@|8u@Iv&K|T(w.3e$GjѨ{1 %Lmgb"}NB61D"H$DrѐC"H$D"\4H]xq%D"Hפ{KvxAv7!3)Q5{,Òye>[M%Fp(#pnuЭMd>:LQes&gG"Zo[slU@\GrL$]NJm3G]J\nRZXFR"Y0#$rr#h 1&32I[x${f '{9p\}ҬD"H9jwlَd 3-ő BMU] MD[ )jׯ.QR/%^JIU#3TiN歏>D$%2׷jwa2xۺ{)EU/ ԪGJٗ['X0S.SN.zs+6'G$2<9&j/aLj8t EQB!%DKrim-GUFk`4de&QFIU#aADS^LaYNïAt5ƺ51 kjoo|V̨OpPר}r}o|a^I?u:J7*hu;ef]X67۱f8A&<M6F:'5'0>uf;!3B"%]<32o&3%uC!Gm A%3-KQEh9 V8VX1-66<ҹ>}:qQ)h/5hq6ى ~:& 8W{aIdF+r¢):VO{noo6 >}I Tq "¼#Y2g99^z`>kqvI偻bwzKts4ӥRD+(nmisd)Q'JX:w\`}?tO!>+OM=NJl=XȢY9SRKEm˥M5FLd _~!Óc͌qOEm3 a-'<،xchh뷷x<[ᅷ/",BCoىD}Ӡ*̝2keuͯu{ɓi9vt{?"p/ <_ sKmf&c3ٰ$̬aWvJ0"1 .bHAh`-֠krM{fb59 A@;Q@xq"25 !G/oFr^6rA'eTQ5T="py|.b66(*o(l`̴XjlWd4ʈ.eWYʹz"B IldHJi)F̘'Dr9v}D%&3䁓-! 9.k.u2%oUJ`>~T),ܳU-MQE{OpT)!LFfeJ|T(9馰²:6[sfOly=Q pg&f&}zio>rA~Wڦ6E! IDAT­4:M| DrFLM]c[-l~̬aܳp| WUi\v7]"Xpr`2n/#Rbè8kؗ[7u|ӄtjmߞKɲyٟ[rYs3ofMFay=gu6>uEz^X @TxPolst7)6<`kl׶ 9.+"lc#Cz)mjsiGߧ'Fa ū`[lwʺV*ZMLwL!,؂Ltnc弛 G/}EAdNl+jPk'2}l *dncdJ MOT̴X Jy;d20qD"+n".2Go9HUs1 4c((D7r8NJ3exe5͌1|qtìT57r&f[O `բ)LLOpLEtFルu7`=Nj )xwF)#TյpLf36#  wގ9+q1zm| zN& O_p?ctrʺVF?LFO'-QMx}TI#8_ɳ0fXFiuQL{dX>W^I$ņQY1u`P|?c c@Ie#F'H,2}^xk;QŕL9{3&io>tcg*i9)i`P>6&SY/FDsL|>T7xw^rfNHr8t`̴X)0:pcq8^(`4:hh]wƺ&/ {D2x5g6luz}]t=:aDv .X#S䞫K'sL GQ‚{Y|'y-' ,;`ى̙AAi-{{ocRɈ[tD<;;-bsܗ0y#%]̪;|ls 1gpEfs-b1a1# p)Qn9ݜ<&ۋ4yt2+Lșr[,e%EU0T?ƾ4ۜHoR` L]> ɫIwu02rUh[|{Ost9 &J FZ+o"&2G(llRȒ[,W #3-e&n (9ܴ]DZ-m>Ʀ>^M`*O?Jdᪧo)H&*ԊӥS B]pxJ BiU H>>34 "*,[grIH+ɳ5]12-g*iaaXF+),^"T\st9|eM#ddټ ,7b&&,8[:̵_s*.Î#gq͈﶐#4Z uZB$^FdKTxf;6gFY)Ih)@ @tM[h ĒĒW( P(-%-mY -5$aMvx4s#ْ'1rc̑f=lo~b^W| 2uq{azYƒ7<0aʻ+RE/}&t5ٜyxp]%pYn-#/hZGCF41v},krp䁟gZDD8%Ը2 ?ʼg*ge g̈!iGZ{A\s瓭>Ŕ=&bt`n7ۏir7_~W]y ?s-˷n/חTzFV2m }v`M0!?{GuAqĶY_;ӾM| y||/1 q [ˆ"Қ.7iNx؂mY21 om:zL}ZCM}#IW]0/ժmekiN$rya9hq|rx ϊ)M3 /H'c?#*{N͵>u5|'lU kj6# d C_b UcI$]I`Vw`{LO~́{QʬMK gbpoՌ6fZ.H'"|aב`ka0:M0`w`0 }'Ǎdpԗvg 6lCl(# } -Tf0l4qWka0t95ٚ9UWИT'$0m=7ѻ_Q.SoIe iX6Ss?E١iLL&z:jca`N,iM'ɧ@J3үQ] PSu^nN 6]GˠzZ_32UD]ju۩nTO׀cRmӦ1S\7DsvFIG @x2Շƛ9`}\?}> YFcj%+tnoGh2Ѳ`Նz>]g&}R)\7ټV3,3JF~MkW4moF)u=qnu>^Uy[%I._aO?#Iw.\3ӯc646om pMm|5|lYWc󋶢HUoP_[o Hqx;Oj ?|oenW?t;dKBV/}UQUk9?[,㽏S,]lkhmڙ54bB߶ M-WPxKמmclN7,\m~Q&$>. u?]zz1&-}-N׶E6U0|+, G<>??Ŧ8%^~|WN ?P'w>X_~͘?KsOCn X[oc%TKՏzĂ%}.ΆP+jִwQ2 lnN3iyyv) Ko}Ko}B `rx<ɺZ=72`h,ut+-7}hIG8Q Wl`C}X5Iw \5맿 ֢VSz#LmLy ]OtZjxKad2>7&ɇ>c92>^C3tP>Æ Ķ$66fZxgz^Of;sE9ds+o}#<}doijI^ywM-[?//ƿwhS9~yq8xq]mb!'㕾[7_?7kc6[,X|ʕsڙ#w kmYBC4U/xrGz`ïAmhhh|owNoZ066苋?#wl<ύ /xrޫJ_-PBCo> cvdV;5^ ^Y|¸-Yޙ7_xx.ve[TƋ> Ͼ/oc7"^]>5>wpTh?oc&z㽽C<$w 8{ȡfH|r=^ j5_yM-ʖTe+V,o8x jL=!&M V|yc9" D?t=7_'znЂQ{g=v@vME ?U+7+}fz68YC6jye5L47ׯxɢ|E+kh;?]Hox@s<ް8|ȇ8쨓b^~DČ=B=mjTje^{CKصvW%H@frA_=3?Țޑ~3g4f<=L`{GN+~edҴ1c9i_ nSogـ7^}O"g륅nRw@=\E#]"m>ңm듯>zq2Xs&s-=c+0 Cl-az`Hnf{ҊD~Kis\l i7$m3$-_ZL[3rs~zGzҘYbG{^/f|KfzQD3I(}K-_vET3\|ٱ;Ba+57w6.k#@H"`m{ذgv-܀o3ޑ.Fmmd0 aG$mL+V?`#gw(`0vTw`0 RRv2&]?'Zzd0l`0 [дJȑ.5Qy_A m!2g^5&(\噪xYYr ؏uwz}\ޚ =vpk;TKw8Nsb0NRr*G+(7D.|Wrc{ zn~1=iEFg^c KT@a;0ksQh_S{v1Pߒ eS06LUR<*|2l+hєiewΚQbKb0l9hMp7핋TՄ8| g$) nX`yW}ꃯŎ3$7Fo/,cs~wcmQ婕 lɏ8q-[WrݷJo~:ΰF3Ȥ^E3up턽n)͑ˮj{gEO?qqD /VY9 ; % g'փE%eGTFKE4lX%39qSH`Mڿl{n=zlퟚ|Lf[OuaMI{Z ˯/(lBE-}#s$CsK?Ch{ؙo A(B7,sZ0Ta7#3[3Zd2'88;agx9BhH PPr)Hn(]zeN澋"QGoH__+Y._qm-PUy镹6|[e&Az]G(i`$6p{53Y}}O?Q-k&8AxmnP8vס.!D8qU~y/:n2iҽ[1bTZ|"Gwq9z,` vΡ`䐺{:0>/5)КN/EϮ_AQQm EU%qj1<`0 g m V.SQ% IDATDVŽF7YkNi+ꭙ#p0}oJ Ns2UPr M({Lʫeǝ}R?d4&N3/*~buº GOKU^fcs{QX\;KqBoc:xJ$:uV䑎Q84sT^J+DeԺܳ~FA@䊂/ l>Cf` רR nk&;C= P܌Uݹ&̉HtO:w0aWBrт("P᭟.2`F)n%rQ , &FɰQ0N3תv4JOoe%O57wUwO^"\TDm`h~qLD'Z˩] ; 1b#wgY g@zBdH@plX{^`aqr?q'51];B*ea""oG#=XHɤIΜ;.*"LiMHH( *|9D^(,tnEjO=.zE҉"UJDidI>%Un%Oȟ}WUlKu/NOɓEhN=,lB8SkѨpFp̞&ѿJ<m+TNy)le";sV/х0qHQDwmw g <5]V& ES->< p:kO~\Vg)DIb%TkƸږt2V964 @kYw75Hw-ybZRjv̶.`e4Xux(\ھ\ Hեm>plYʽ5./u{١q±@/nv'"w21#?a'&dat<[&۫v!pdMP8\N.zd-y"[z8\rԆAZpX_BhPuweydJCL+ZtmҌkJԖص.yFCdd8)e'u&-+7ptI0l q)zIPSyh(%McGx`iՌҧ=CiH[Z`ceyݤ IML:r[MRތ&D@W F%\?aP~gKac]Ύ,jg$U$`\rؔgHlvy"{O*S"xxiS4sUrAQ$:T6g  \itt}:0=A4OZ*/<.TRv3Lۃ~eۗEjB덂QYi]wEA9fm\$pl> p|Q$ge䃕u@ng "27qD`襝bAͭ'[8:FDF8UsJ.d(^Gᤜ˱Yٽ<4[9[ME q,qWmHumÚ^ H0pڕ $]W ;8*z~ Q9,Wiѻ{-}Q'4on$E0Ꝛ EUOсAVaAtI xYs,$ etU+;( 1(Gc6 88w!hF|ޭ0}Ym"z] mEU7 urQѬ&+… gg!3n+> gmG^H7)I;-޳bnG J'ZعqFU6>^b 3Z ue%9y׊VU@7je @:ЏA৬U?h0Kn-S<-Qn_qCiB3z|22E׈x +R^Ir ׹ p50lo,:NsMj9ActTp͢}ăoTG^MUSow۱cu7ҹy+ȬQaS6xٱ{q]S"lbUTJ\1>MjE3ܛ*8qĪX @;waeaz &3ܠD-\ljetWqX3&KV.F&T]^rxO!nq,GAfIvp wEWMQk *Ir ]X߇de')xZ8ՍrF{n"x(/TxBдFNa[`` $ʝ=^٘8y!v^TUy?ҫ,C]!; /H=FnexckP8zjL,7=tLBUc":xUY~ yB6 gy5)yp?7'&oT:{֏압jԨ nu_ݶ*nʊH,.oе2RĺbǙU PvWU*9]&ZhfB TdvdJjyV~n8ɢp<嘙WMsŎ3dكʃi[m/mšplJɈ7SܚJq>WU+;v$3~*YkܬL[m;?L4^;ȁ9G"a[;pf^Ucf^5lX\vu6h@mAs5S[[N1eh{"?%wQ2t= .;[lEIZ̎#hZžj XB*,i镱'_80q v:_R֪r+r[4nXǨWD9ɐxЃɆ;MOPJ7wn}N%Z3x unl$3sɡyչ)ZN1yxݹe3ҟͮDeXwIT龂"4Y4"A:-\+hԮih=P8 X2 gJ>vG8^Qq/TރC*lt>/)azZf)}!7ES̴kgا_P>!3宜 ~7>>Z9q%UӇŃ'o[y5R:zqфU PL acU0JʾLl36>{x3sg"<#"nOK.~`*N\}@u"CЬUQ&M׆ٮc[U]Ī{RS/*k>IC[7`Y>X4[&WG5 : Em0%8+Va9PQbrQ*,ʝ',@9i`R;EvGtUǡPٵ*Vx*=\rAn2g>:΋PdcEnu5عGXv0~SQ8 ]π=X)Cћbx&8 q;nO닊_VMUqwC0a_DPQCp9*RwJA**f̌LE}&P7>AR`nbTS%v'~6ʊkplV'U6v˘ӳb[B豪 ȶ6nS-Y 89a GEt8ݸ[AJ;V' C .WսI5;~xb= GGDGe':L\o^ѶXa$`wX±YT@kX4?vdۦ$s$v 8Ú^3j%[S 4 T'Ǚ.+kO&s7[ ꘵ަ~V"R-oɠWty;w \2P;(=UMbok|~ _LXf2 \*ݧvYl`m۩U#ꯨoU6Ès}pumۡ-U<΢VV?.NUH󞶃MdXT±,i FD3 ؅\a8v`]V?W/#b|,*U/./7&7piӔHU/]>}9ܖQXMG{j]OaqEd'{+(.{bd+K: 8kB TEp[:l7/_DAE;BݤYѬ#iVWO 6Y~:sOӅ`Nv-kjh[H#I;9n++$&( znNj]^tUmM"ߨ0`v>S8&O+;˲aݞE':$5rimP-2Q"AΤ` gRҶ"z( GhN@u9QW7rc(N>;9p[4gwӫ±t̚kD-f͍AT~ N&pP8JG'QBׅe'BqKEԎdo'6kkBh'l.s'jzw<. c Vyyۮ%wq xK|޻VJ? .ֆZ$wGt9[oB6u^gW2m OUS?bܠ,D(rc JxuoQܪnjp`D뀏PyyhGz㒼!msn)ÎIڔONRm?5kNvEHFUGdXouukVJ6@rqܗ2|Sun"8 9ΫZ\UEX*W=\ ڼ5 {99ӬUT@2E"<)S"rwn\IUieE%Ⱦ5߻ 2 ©ՖjHÐ5L?ӉtA{P\*IՂsBP:vTxz^Gv:&E. ǐtnODžpyJʥV9p+n`R^yu:I^ܞ·ix# Ty1dYh}YB7>c%ůP|$(o6\ukyÔ9F~&ʐZ|rwvܑI] w^&uޙz2V짲rJx()En;vB:x~2n3Ϫd?\rz64Qx&׊9܀ Vxhg)ؾD:u7J: ~VyUn0_S]f }±R K2m8`^8&&xyIP‘Kq@át~IDAT":H |qE- V1*H ڞU)*ʮl[rVۻcDTvn^1.?~or9 H JgY)/,{[plTE{,մnJ`0{ HA>ATO Җ`B((zQ. [mc^Wa 0UR~*`0%u"+G]|C$ \R׻2(]x(e ./>,w1Ux ÎwP1M=vv2aVavŕAZaK)*.?TPS֌0"EbI C.^>L~*EEvȪaEKW_ 0~UH}gD"P" CE--7Âf}"~ya7f Nf*8|7*(^+½;WE/ ʌJY",cvt_QSi&`0LǾr)_*g_̊!l.E',?b J%!Moo ۠jOÏ(H$[?{ ak"0|SR%5_ |'_2 Co)* X߁]$.mo sO?츓D}r_{W;`0u a "V2cÏ=0T,#Οe3 8U7iP"UXmv qy U]n,*~g(`0lBӿ!Œmt?kF鋂LIKOүB C4^:#s{*"&J 8I7~e%T]f0 MQ8v,bQVAx-W&?@@op`0:ˮ:|z#{>hm)ʎ,y  ԩٱ'? a(D)ʽcFQʊk-WgErO "sV5o XÛnUKJfR(DOAuRVo2`pǪۿG(Ǐ1STmplArYey~`0|Zz7,:[  z~vЭW9d D=r\Z-={vta B*roh~VY;)`0|}U= ??)s#hm)E;WᩦAgշ8Ϊ>`0q&H"0 4_Eų&UV?_zϤIY|(%-ԉZ image/svg+xml     Controller Nodes Interface 1 1-2CPU 8 GBRAM 100+ GBStorage Compute Nodes Interface 1 2-4+CPU 8+ GBRAM 100+ GBStorage Interface 2 Interface 3 Internet only mandatory fordistributed floating IP   Database Nodes Interface 1 1-2CPU 8 GBRAM 100+ GBStorage Interface 2 Interface 3 Internet Gateway Nodes Interface 1 1-2CPU 8 GBRAM 100+ GBStorage ManagementNetwork OverlayNetwork ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/figures/ovn-services.png0000644000175000017500000027675700000000000027341 0ustar00coreycorey00000000000000PNG  IHDR#pVsBIT|d pHYs 4btEXtSoftwarewww.inkscape.org< IDATxi\EՀS{& a'컀" jPQ@^t$> K'}AMD(E%!!udY S3]uku֩: N|x@]oyUE[f[O2~eWׁEwba>,|„[= |E@qYc<'503(x„[*rcGñ6^T?*:6}M59-K+܅lw[F.𢪮 v|01 A> *_*0%eR)(ߣ_u!*DyŮX_mwlmŰʧeǒ @g&-tG?@DQrp4W1zlX3FiHe>'ʏz f{:֙3.lՓA}`(J"ߜ2bɦAV|6 `G!>mp oaQ4v￲e%iIAQ=ϟZS]T{ '"}+-xb.-At4p8zD%Uԁ.bݡ-/0Ґ'bK r\Վ͈4F:Ws\k7yF^WxKs \G?"h"7O\uMN\rs[_zGC24 /ZӟkX*s ʗ'/kpX2{*Q5xg Zso:6#=>|q? D?>yб{Y[/]4jg_u9 `G?};Cg yzGL]dz|!G#{=wܐn:XTʪڜo8p8Glȇ?蘏_7uYJ*Ϧڄmul\=3F'txOa(7B3~(X.k>/ZiɣUkvvQ{?T}߬-XFigm6:G_b]&Bύȅ4ng@ ij1=F&AN,FGcAJ6CM-D/׭3ߠt[|/0wro0̹ Wn6:G_)ى5 ?z1(MDZs%aB)?~^Z:}QVXk][Ņ!q :G?'M @/vU!r%O J`1#+pl P>Pw6_jX GXj_[^`Lً86"&,*ݥF)Ӂe]nj?Xrp8^|gp8޷(?F0p8p86 '8^uy7u[/LJf?#b?Tœٯ[l=1<֒i|hSET(}ioy|naޕW~{S;7qt_\{jGeb*3ިS ^=G &Fт9a>lƒW'QQPkRD{6fDJTEdr2 #X2s0VP 2*.j[Ɠ٣Tt▙r/rs+uN -3|P _1h%df*bwׅuV ނg&A}9g, '3/)(Fk-l3g潬;\ꁍQo,-r\u4D*JhO03ꑈTߙCV8adII;w$:hD>EW \[}Fk*KS7OȽBcQrZQ0#Z[1]/+r.'wp >^t8DHŻX' ۃ256AW .@9 f٧r1";ƧͺeN FA9c,6H,ݴrOAd% wd7X"ogMG&%F E*Q~CPGc!9Q 7R#lv86.ygDEl&aۨOSrʯY+ú+%n[##VʓcſwFO7kn8M}ehog:'pcFݾ[)Sԯtku#Etx5\6~પ*]]KKf{bjͥn gt1Qʨ>=x7Ne`T hޫ|y״ح4VkN>~(z'23w?<ϥ ]]7V=Q$P`hkN g[H;X)`PyX`Dad^-u'0!T] b*}\߶XV.?mP|Ys} nɏZcBe9jtGDeBT+vFyñ:ֈŠmYb L2cìx`V_{WrO)SԷf`@%NjS`5~Tf'EeK #ۮ#߮s&׀=sDUwAZz+>ƆDn`,so1um ;-Ws<>76aUv3VP1}XܠUC{Hq0QYڒM?{'rY/x/4'_Ʊx졢?J W}z9g,z75o|7m9}12TaX ߜx D,pij/^K&hVu{w„[:Va ۯ5pL9-Tl.+l[1{շ"uO]ˮs}:ʌ7C`͛?3dnU *֘#(C{C6$rTfU9ѭTu" F{XS<7'&2=Co ?[ j]h5`mZ.X_T60OeSPcK\KۂCRҘ)?:J[[Kfϥ~^Hx2:\?_rcTRI^ce,3yblZΫbG,3KO45˲,MUHB+ƒB8JfХ_dPԟ_5&>7k|,@#A+ƒ,>bgb܁ԞTURQDv+_x2wt0B+:LO6]=?-0pZ[WB!t"@FnZ$Q)[ѓs:j dVW Z,l^QW/XQx|ճdT#bKߐ5X ##hi5۸J{Yv%.T[+yXb쿍m}Kf):ʁee.D_$c{ke [-1 `p9r9TO*¥T6K!#,KBvFDzI߯~Q E j$[)sn&~M? Rw< ]?P~FVR(0en,qɧW곔&]/ #4MUA*[W}jv<لmQ a*zv}犼Df\5e:5@ela/7%sWt6PQg\IiI\,FafG;:ꪷ)D>B8(Z^:-}"!=r-e:kHjsTG<~Oe>߁C}K= !A-TQ~%"Hf{\1?& QZ4@vc&c%ca5yFӇeʎa4ސ촶 i8Q&=$u lI raP 4T_,ݴ5%AwP6bTmn^*D@B<ː~StN9Ӌۈڏ_:GH ]9`a/#ڶr*q!·E8WR.[\(jSՑRS+ FsP:FeZ,ݴwŌ,qAf(<^ڵ! TPSV\zHK.҅㇣zVguD!rR+k_5y<|.="K4P$jtSB{ O/qN*;t:\z|. yo9#zT FRsEm 5Fϓ zI›$lٚKV$ʂ mȗ{R3qZ  섆dS< eN ``>:ߜ~@4((A9]HT,|`Ҍ#AC ym\zr+g嬆T_:mAG̎2I)> !7+){=`\zlk.= ';|rҸ]joQ&tƞ1gc54c+ZZ:ES^4F\P/y'ԞF*:K76SY*zM Da ňY=e"R(ݥgV\~]]9[=4ߜ#=zk^!SՉrIxӹeEv4衠UhAp[wD8 PKțʵHW9=& *|%$():gL4ت%t*셒^7g6͈Ԯg4ᡂZ[rKbtЛuD=hݼX2{LE#\y2i5 Y  F#û=S^1p+O |.]YyyTU"VtuWrFHwK.U|:+~Ҫ% CIsw8 JM[[QE- V*LSUѷ;ru9!H'^#r<}Y;ǡhp5%8Zb]@=ɱ@a᮸ci'b}A@k%ڜ J/IܮQE㉲@k`K7) Mualm5|51Q{2%3.7+T&4wYj `S֦x\V3G7#T|.~TN涋'rۨ[Q,:_uHEER0/zaR@lE r##yʾ_1x屢llc;TfQP%7}pokq@,kG]kHeWDOcܚK7$2PY 8(>2_5vr.x2sz5;lcͳ| VOL•-ƇZfO_֐dDh~ }[>]$VCI+tek:lm6V^̞c{XNfxĮ[r{m_GUP+ޱ THyg90x^[lZS֍*hm{z.[*Bh<i9P}"kzf|(f@K.5_YshDD~Vn60b}꧞=Lp` =uDw޳wqy IDATӪj\1OkbMۭȎ9gӰ03 )Czzt[8Of vg(:9KϥYpatt0X0%|[r=!^4jxs+ ZkKBeR( `l>> K<+sg6-ߜ>T*LmnT)c}JqUsV_|,9TU3̿-ž-\#2<}EG3s硴ܹlU&0*Y^M!dK$ٝPVh0%>(KZB!*al[+ϵˇ]}! P]H6ighEVʹvaZ ݭQobuw%3&F́>5 zƙE?lTaMm\(zg4NcM3G_Xo DMzE*5y p0E+3ߢ^2*bMSۅн J1vk#koׁ}IdZE"\[9C-ÙPuq8rSzsvEkPղŮ{ª|2Y:(a#wzTa n4$!L|ڬ#lg\ŭ1]L#DeL:ֳ We{!d/g=VG-y!yt+> {ꍕ H`WN2ͨ. ]فloRxQaX>PNJ<06Zy[{3?%7?m9 tW<9z3M_tQE5DQ%F孲Rw'3#<ʱ@^j+NY0WX檫[O7*wӹ3A~.ZڡSsmdeQԸիTa # a̫7LKf^622tn be 6'_/ri+x6$2?QeGHANPt 09,"%'al+.%2#:%1𯷞[LCd|-djcT᪫[Od.U);KeƲ|DUGx_+Ny6uV!Uxzo ԋ  +G8F[OPG W6ѠsFNk&F ͺ-*((l0[W=Q0>K 4ɣ̑Ҋʨ^pQK~RX-d^q(‡@d:bX"'ƥ:}>5ύ'\o?h)_ " Y ) "y#EwEi--% N& =[T|z(4>#ʶdҸʞԎN#C纠pVV =ٙ@E[(y:ձP!~S`"d(ߧpLײEHǾtC&4&KWIF:%"(s( "{7j,)rKK! ^l)?>'*{@$!Ҝ\‚Q;c@c~1f<|l@]X4ר"ʇ)ك=H/GVg|E_%zt4fڴCy~DtaVVסFpQ¹]]0S]SUU&Ju7T:vAUߦR%7]~n 6\#cyPQXEOvYD_X3=d EW/7ufA B{itw8a_hW^T"=nKfөxXal#%êEM$Ṙ=}GgO(]ZRj˱5{E z<%zUT*C-A}~o XeJ /Xk/=^W]v m(V(v폭=tY*«Pz&^4H]G|׶(ϋa[/-TiIE#-*J ̍tծ`^qtbDD/yKѿ^R%bj5  *^Qe~A|NV|8QA\>֣| o+DN=`Jj>Acof?/+ȑ 'yT=^Vg:K_ԐED?hi"""eħWB 'rx!rHf}OfА@jcCaeAWo4'h:GWb7]AhhwnW\ܽ IzȖP)in.U}^}D[!E~Q0R1 7R&tL=tBHT|gj/5jj"5UGvT~\DY 3u*fW72]WUi.)z Q3o٢UhIr>טnH7XWÔC^@vmH$0޻$ ju5v3~mui~% u{֮|x*s2(`PVOLTLu~\Z_yc5rRdQVmϡh~^z [*UxJ\Oc$XJ+h_R·cO7}Jגiwz,4oD][cCА&́<ϥz=7<$J9-Tf7[7x^c9ENKkSbʲwF\>zOjMC,t"aװ-ñ9O4"?M3p8)SԯdU;*U#l*Ap86g0p8G[9T ޭ]N|`#͛Mñ9p8DWi ʞd>ޫR( U68WҎ'8Qꙉ7upOGFwp8p8p8ñpˆp8$8ap8plFfR"':t"ñ'NG1.kɦ/MrCNbA|?mͦ)p8 f-ĒW*5JMDӗ54'Pmcǩ#HpH?![iq魹-X2s3Pm\zl94^nˀF6sREN(K}ӈ9> 6TVuiF #ñӯԴDtӁGAs|nSiDLԍp8G_ #+M ;,z6u#Ƹԭp8GaV''Gͥk nF'9B0[+E9|52'哈DE_HFu%-:DByJ:?hwL0' r VU:uX}/QT*Xyk~X]xN@u`JY<#u_p?0606G%SC`Of7^U\Kf~k|E%O<>7[-1r.BfVV܄2> .%77^xR闣)Df%SC).)iE)5A ;T&ϦW'IUvE@b̯A^y EK@VCJF-yzriLc|  L;g̖kgxkmmD&W$Ēٿgυœj5Jߕd饾IGP|̎'l_]%23P>+e9cԴ*rʑϬ-ˤb`PjBZO[/ ][ e=OAXp;!62]v|@QXEL9rmyb3KY.e%ÃPK6[~(?v UVS}%AK<Jn A Zb5`+?^C=ϻsoQ'r'):2`7T9gJG wFouxVJevA*O ñ?(|Mɡ rZ>5`@|*h>`[+s"ϥGG='|^4ߚKjL-͕"riWkŨ,Qrpd1R[SHu1Ԓx/O+7̗h0bAtk@o-E묷+L!};Hr79/ȍ-+;c1`x>B BIwj*lB8.Kh0xp;W=2K}\E"^vDw)Rc(ՖRTRN9;vp؊3/X)?;tW,F]{N-] V)ꋥ~)bQPcgџ5Hrp?T< x k(¢r)Ted}{Gplh0r}6K!QĻK"v"A4JgK4U=J[Z֚6@D=BG]pNBv(k_vU~;mPMFw|и 4VP㣕>e@k5=}QW^UEndfp86@K4 @2d%ÖDu)?Uu,f\uVd-. {"ےM_.nFooHe."mBk!xD jK6Lq'g{u[Ȣ"2\u5/ 4>erh n?*\%hlhSW%%/ -tN”DI3w}Ec*Mi7e)Dpl@lJʁ#'7$>*zUTo(Zm +R݉_ТzޚTא\ӧu| ~-u,7f4o 6kE^V- `UD nI;Nh::~x!l8>p_BjmTPp̈OUŒχ&({k| t6~u潠tbzWTFnNmHfR"O,b|OFR'~g&s4Ynu8dž_H]ЬoУרbQA%3 EҧpkdFEKXǓgUa@OYDdW|&܆*-;̎eus{*b,M4WX"CJ6& tX2󸥰:B䒊7"A΋'*(KEƦݼ\xT< d `w҂0 ɛ (#Wkv@Kf& t(ZFS'ew5dIhS}.ERǒMxA1#@[7(,׽ ʯ7b# Q}F:IX/˝$M4GUR%piS"̤^dpTHQA;&j+='@%쪺BQSp9 ~ *ŒR̞Lw.uwH˥ڪrtq[ SG֭ǗιԒgfxUq`I$DCnhmñIwFTUՠҋtYh [fLp*FD9k5CsL/?fdU8o !oe%WWiW'LǢW2xY+rײ{:޿ ^6l劁+W#<{om QQ{|(iYSʗ2;3a?wFҨbYŪ{ ,bHͦ@H$لbfY>FY2<X3sϝy [eI:%I>T)3Ox,̥Y,/@"׋)!BƳjoP Wix ' }W: gy㛞|d^0h=ؼy`vKɧ7%T>9|nz`$ww@p)xk?Vk^['_c++>sjYB IDATo>IY8LK @ L-B@ SPF@  @ @0%eD @ L B@ SPF@  @ @0%eD @ L B@ SPF@  @ @0%eD @ L B@ SPF@  @ @0%eD @ L B@ SPF@ @ ZP^i#]3\HmqdF(#@ \|ͩnz{$FZ=\b5t=qbE)\Lnl8TIB!)rۆ_xg?8xTF ^_\N+@ .1?I$7nPGW~qO?MKƠӲ~a& S 1NuYkaǑJ4Vk ҳ _kn. >3/I$rǐMـZ-#_+ ;ܴuP|>+j…w(JD\27RtI[e$ç >էŏĨ|Q˦'Y*#b(,5d^KF'ԋ/'E8UxIW^apl͛-3|Ću&$1-1GhSg\/]Ǿ5^RtMw,"1&2?`*}E>Uƾ2TjurΜ%={w9$;񡕑M HfKHB h4B+:h짱TRlPH/?*V?g,_KѡkY9oopZ5'4:j=7^bq2 e:Um4VMZBaFp䊻[}gN\֡wTDH|BX9{:}?QKph؆N8"^Τl޼YRP |X›vVKܴ$JūO3rWv_zz|lӹxfN /YxBS[hhd~Mog@?ʤ{R< #|b}!9iWւ)@aMhd~/U`X諂"\*VTp_A .6^u3htE3*{/1W\!%. Lfzb䕾`ٰ$'kNCG8z[<[ E:c݂L5HT|[(r"΅~ Z _o S"rW_ɴp"?_XX=W kg~?`j\Ȟk",HԴoGȞS,#IkFpQ%JdJIJX? l3"e䫗U(s /eYh[ojf QeC *x=r*@X Ie @X[.IRb.-W91wc9W;@R&_FH1qi(2⽻b `<9YLN&#( ,{5(JX}PRIAzVqJj~/ IF :hP{ZkD 23ꕛ TӨ"uA??HӅ\5Q D|OEFBcG%CtVZBPTǨWh+ɈXtDX|D= >\F-Ҙ >reYQ Eͨ.g2+(,U({E yԚ(Ղ! :XN,&BSݽ.J~DLr2+: ;>"uΓ>+?k?ʡ{~!&"XZ܅e -f$E[tBnB]nOo#<|r}nb q܄ȟ]k Ш/>k%X>m{1<||&R4pךq_ eDp%`':opZlCj|8*x<2m Dնp8= Xjy^Lw03n4jQ *ʴu7h#$H|TO^Lk?Q:݉m؉ɠChh%&"ȰE]KaAkF+IA4$Dž6v ^wJ""Dw Gy7hd"l@{;hgAUWƛː$T*6VD{ wI\dEg8t>@W\Cg)5{zmXtq8\n/CAW6WEFπl\{I!w鉑DKUc=\e X/KtyɤƇ#8^VN#!ztlgOQ5Zd{6ٸ<P&"I0k)FŝgRIx<2/(g[$kwy8-{JST͚yܴ4CRsd-/sx8o3^h)$Dž!2y9Gkjgoq5:dr 5yiVo+E%I|bp<*lC'CN,FS5#Lv^% '8RZrX=7h%gOyLc[?˻XEP^hˑ$E~!zm ;v:-[hTTԵ\o_;*ĺܾ2~յ7=ܰ(~~cycyZv:ΐMsG?CZ5w.}e T4) sf$׭8\R*G] ?j rY_J"hirwU|tPtc ܵJs~4j|F$xuw y߿ȱoۊze&- ^{:_ڮ>+[VчVfzb87,Ƥ"WJ5;Cx6,ɦ(mvWa4h^y]}6pT=]|ux<2olCvPn\<؈ |K[w>+a!FVI~.ju'Ebw9^rp_n.6mTwEpӿyRZJms7W?îUH ڜ+aOQM{jW\à͎$fցrje!'j&=e5+a*Xiv_3+g/Scޕ].d s<.ERz]8\ʔ3y7RVFMS7?B$vESTžHib_q o+Cj'Àq>-Gؼ: {Ö#=[k 3~gQ [ժ렩YƯWXEZBZyco &r`;)V#,/[? wh¾x<2z Mk;߽("+gO"=)6. hI~F.m/f,q2|WƠVNS{?c}lRbo_Ϧ;Ɏg8ti|Yt7]sq GF1ܜdL-,sܴXB X׎Ȇ2O;t|4xbh*vrd-Fۇ*s{XEMU4?/g栱-{K)nt3軇JO *n:߿ 8RZsRYϡKhl%+9!~~NQA*زmq<؆yyF:z8SAxceh*J *:v8I0gF_`7bψb63N"BM%Frdd&XYMR)`DY̴ ql V; e'K}k/Q!U*NWb2Y^8 yR7wsAC|T(*:jrAAFڣ ؝n8QLhzldF"M]gJ ):LsGZܴhijD^zOlҳWZG:${Z ݵFî#)IrbX‘ʛgm! e[rNۯ>Mܧ71/7y u`;33/7O*;hbzbCTw1-1N˾̤Ə]q8YKY $ǝ[6b#BXT0XմQ҃%) ӇKq<̚٨s.dD;DE]`#1+mD`4d,oi ~ ˲"v'9iF 2ݫl 8>Yqɂ8t: H٥>#e3x" djjwdxR% B .Tsy,KI{;NUS7+' ;&{b<~2Զ7=7bNv|(Kk9ܹ_ 1sǧnH4wq ZNOozvu}&mƇkOoe FחjuXFdybyb]ۊ1bvY+y5Hl;X%ZBl k½Q$WF[ _XlKLx0|Lo+pc.&6"nŊ733ƶ>nN捽|龕G(V}eD u]@So6fH$5~e?DaJzt}@tQE;TȲLIM+ݩl G8\ҀZ-Tғ"{WVKn }CäƇc6ꩨo;N2/7hzy~[`#ab9^E$^u]GJ׈Љ'뉖~w|<:E!] =EU(hf݂L,f#}CÔִQVlv&R3MGVUܶ2oiV%( GY_ '+[n|cK$Wwg`wVC|Ki5l=Xo姲Sg[})m;O2:Ͷ|e7}CE5`|cKL]PC6NZ%1MzB q"*( SS>r~zfkZ9xòYim~=^VN'62sxPT$)pk1p{Uqa.Wz?aQcՅ1 JpĹm2ǣw:\3{F"(*#>#а槲YNV6s*aMriWpzKHwWu0>ELjDld(=tz*')f߳9uWP;{GI0s7 }JUZg&EKWXAna^jgn dOnGJ,{}&6?v# I~pr",& :-O?6nO:c vwN/_gwxɪVdlv(T=;ըiGVQûǫ5MѪ/r31c>ISw-㖱xmib!Z: aZb;WRì6ݹa;Or+| F \eZw`; $J IX̶.0+#>̂s6+=|%ܟyɸ\W@Ȁk{J޿p}=NVu?pWb6ӪzEլ%9U*dF'yɬOWҚvVUF$IG_Ƞw~gZots omZAx.cI]z6;_o!Az߳h觫ϊNW_ERl?^4!@UʮDt~=6Śۇ*9\ROAF?aA^9cy$[/dhU?u-X+`W;8d{%9]"Tҵ3ڱ;H #lξ5,K੺y:Y5,O=_m<=>+;}Ɩ(u:‚MLO ijDgWS*DfJ4< IDATZ;GaQN2pRIE$DcoZ=񷱸 ]+9}}H #ͧo[cz %B}Ck }\I1a,QO밓^Lx33p3i ;]kF"vnDJNWg IRsհaI6/}''+{IJͬhFY1IKT#IOCt%h*Ͼl[+fQ~*nnBLzWF. j# ^ǺĄ+_u 444#0ށbcQ0F&aחpr1^+O n&*ӟ@IU+m}iF'۰`ek+h6Z?qCihEQ#yNF KѿJU_w@Ro9Q+riI"6Tf^:aAJ~d2Y1;-`pIj$1&041°߿[mLC4wSKh k4´|U.4* Rٸ<V,7BhI+T\d08 8q#62I5 }xco TژKIE;#,A$džd7.&,KS^%&̜l%6Ĥ'96}j|6qazmvnE78Vi k]Ghd6Lz=j.;O4iA Hb$՗v 3~kr$VEuS7# wvN7ih5j$0mi '"3%(yRBS=-}54w0 IG[ GY6+>1蔍[]'l2pd-nĘPff[BmsMxj+/U_wJT!$ɑsDq<7^v,#gk5d&G=5˛H c꠹VMzRnVMUc;Wb2h9^ֈ$I A қ#$D()+`m\ Z`{gxi :z0;Q#kafx ]Z-r7{Ո'ZJ:젨ZD8)ǫPI%mTFw{K 1%&"p%ɋ;NN_CC[/}ND΁f?݇%Ȩ;grn: ϓ4%kA?3z^ MҟPF!FCFѠӜ#uIM/* H∽(ǑcB7<޹ztI1>w>Io|*_k楳Wd峦Q}lsor}\6. IyMs5p(#zg Z+wC,ܰ(.jhlpF,zj =V^>ouʇ6oiBoY;5e)IĠٻ¶bs[gC$).me?,3IJcf2}K^l\FbJv*ĬqVknN2u ۃFb$sf$AHO$/=6{ZGX8U>^Z-~a Ͽ@ZF;ymO ~ e$'-+곢Ѩ aYܶ/IQfAܴ$ۿǫ¬^jqìxyI$nV͜y9>j5[PRL`#nKoŷOJ X#+fOZś<2QxNwEըTkerv׌%98n4"0^P bܜ$ 2}Y_-5>'Yˀs. +H&&܌WsUA@daI66h ظ"ϟbP_rN  (:~Af]:m'"wz?t[q܄ gowІmI@0'Ye,h|(NKX 5)LIiI67.A{ ^KPtAp ^~4/~r9^hȦk{N[cyx< cw,S\GĘP|d?ETX?fsϺBv‚M<:n/NFFˬFqܾ2m+iiZ FE{aƀ-qDO W&0 gO?? Ą|lmߍ=33m׌i1 o lcM *Kٰx=CxIe7ȨG_8~xR$yDGǮeu@dh<,K { Y {n@n~S|1dse Huܵ/gvA\_Qm\T;Cvj4X73LǏn2|~r_&.1a"G@ `Jj@ ˠN^CX鲇9uAb%p2"@ cs+(mըme9.%C<|2"\XNLF?@pt+ϷI .(m'%.YY x<^z;&.:R|ܞ$2]~FGJ{ WPɰÍ٤%!+ ,g瑳ܸx{ -Oֲat:5A])ԯjWnlm݃~՜QOZ]} jxdRYŠ?; ٜ8nPR-rs^VLy];#2ۊX@mS7}^.1f*MzZ{Է # &M s#6 W߿v'kq{<$Yj5U4?dJEYd$}+ ./'+[[i &LDh-Tu\ ZnXŚ׃ eKo߼ۊŢA RQ3su ŽXFg/:IJ|Q;/kw8TRG09i1=^:{s. mE1!^R-KseY. 8]GgD ̸=^6v"IJ61C{m+lԑΣĆ3hu #53j2Nsq.:m݃{Ajc!9rAFrU.#`Òl`4jWfu2tca,Zm+1?7t0/^ffđ;]|Қ6RJfwSk.v-rq{)Kj|dMXI \ܰphk}5,.H偛RVF0q!TԷPRƂJ巯,3ePU]tۨ0?%'4v{ɞ#/ 8Ŵp"}afںl$ozxZ.\f4jA=6;?}a/33Hdzb?Nw~'nxZk +AհH%zAUd&G17'.v"!:`ndQOYM< :#% NS{??.$IbVf=+o7bئ$WuJ%P3[sX Q3;TRTē^^Zz(h 楳22= qGIO|T(%m,*H%l?@ym;i z-<QD ѨuE=Ⱦj=V{Y[h!zɠ='R!#)$I,.J9Rzkpڄik߱s08a#PF+2?q^Z{ 3ٻŽÕ؝n/E ۝ܹI$QEgnƶ>bÃIO n OmZOQUDǖ I}UɚS|E/{ &#)`&** JE ut'^7vY1+V8ց2W;h'.2@F)JHy];VBRbĆW" ldG W|nv;UGgo/ӷ/o{P%o_t.؂Mz )FE8,FjqV7wo$V{rݧyuiq밓c*ׂk^˿b}CTvp %3SiƀFTV_\{K9RڈV+柀2;;;W~%𺮁xz@pPQ ۊQ%M|9ىbAmS7E3`o<ɳNI %䦰hm݃|ۈ7ϓ033T7u? LW^MCP^Όh| }Il>Ŭɬ11P-ƫT Ry@9zVsV`#}CôtxlsP1x^L\Էi1e15̢T߯ ;Xk%gZ c 8Y3hetqA*o/HI=nZ5i`{9)7Cʱ`8?:H[-[W呚NKV;Q'E8_P$ɕ8YCUS7a'!,+F/jA^  2 ,Xɀp\b6lVd#[TM}kvtjh {kkpka B~‚$Zi4{d&G07'c*'8ȀAs.um{qA*[Vp1 owO=N#/d~n28QAAQO{ _y` W%GJwi DhU6lFY ")Bc{;%+9>_ᛖd=fox,Fql s 6Me˝KQG[ ?>2#ic,2S"v ԸpzmW4s߆%6<1VK,=哄߱*k'FNf7xOUvAh5jWPF+vPQۉld4-P͂8>sb`22DV"*<$DZ{ 6 &0?9tvbZTĜl SPU8t-{K$ T\oN : bku;{*j;8VHHpb|i,n$_1;V_(ղ1 Z"*̌lYkOSMUc'Q F-/ݿ-{J8u 2AXPD࣊PF++})Ȉ4nB?s>|ۂ ߰$۟@Mx R'<^N|=-i1=b6-&=؝'=;y?&̣w.09i2@FB@ SPF@  ´@ .6ca٬4[\Mo(#<#@ En?yAjg_q t[W\Jx!<#`Bw`v|aUajڰ;d$GMU \٩n"Ƞ#+5EE];:ة@ wQKDH?sz :i8j Fg(jUVW[eٲ^16`cB ]r.r54\ lLq{djm~JkX~gigf}gwo쳡g;>LuÅ):Jh鲐k 0؇ ~ hjbFCYVjD!Aߎ#%>۸aD ieo% Z=_]4{3&W>8NWo?Z# ǰ9"bο8-}c?٩<^gOlT8? X \>̫gz|>P|d0 IDATR×YFq~* ?Zo [ilwuvy齣tm_o9PcQTkL5HA>bfd%ܦCA(Y ?wB" Vռ.P2>r2c0 lFQ0')Mks_m@pc;8ZJh@p5)hU+CC@ l?RǎuGG{s|:m<~/{7PKeI1413'+ xUpa~&otXٰ|&_gp><>n_9+ضqE!:^8qdqWfBg͡a7[J+9yITDTi)8]s5ę"Xp:@ N0f}ve 3/_ލMjy,>wv"7-ZbF6,d͂=YDE))Hg ;zmEkc Gߞή Zsk/ 3(U,Μ)̞L{̛+a::p8(o`EqNy2MܱjX /f!t0xfQq$p*:@~'aA6Z5f;]}`c/ݻ청72"\$N6w?`ٜlZ5 Iq4w9XLu)VhN;l.l'6@G9k>YLs'-! #`*^aˋ|v`̞ ,o"+%H(UwZE|LqцVѠGUSֽՀSǰچ(O`E3+} ں-]ԵRRNr\+yx<>Rh4x=57x\n;:¾$Da9xg e_|>X17 AӰtN6}AtCZ4wysG6(c8m6}#È@08RzV4Nf:^rɲ9٨T*]w808^AE}dhV&+[_Kz@wD=c@U$tyj,.q#- 9OLd 'P$KV3V -cS4!lSV;0֋u=\t. :i0a&;5D<Fˈ@p<,^3es:x79|ONJl?\&/+d+1|Whdc󈍎E8\JkRuLקoOan2 3緱Xw8 Өy[1ux>13ظ0VUq8]Mb&\{7ʬ˃Kd:zm4w3/? cCÜtNvLUo$L{ڌ[d6:z6)&zBN%$7hV&f;?{/k[;<\ٌ}h g%pn umLH-X:'bo@ 8)4sԵb ըIK`ż>;q<,*F)1̎^Ly]'Ey[x}>ғQ%̓4jڐjk8][JwU:ժIM&n]>'=1vd++.GQ1pXw$L!6@|" F)]Ob"5!8ZoheZD5\dՐeGW~`δa$3| *.~8#ɦ .OK(†^Ii'L!ʠ? ,fǂJ%mls`BQ(pe+*[Bp\1P`)th2" S!,(h. 1gFa:^l "oGG Iob1(ǜn=!z̃!HC \Y6IeC'e|RIܽvN?ŒelWͯ_كV{2/?;o緡QxS\p9noyc?n.ywUׇA偛^P[+pʆ.xza:$k!I. XdT*)HZQCCk~0aZD"Ts1f鵚śhGnN ɱtˆ9{?Up9z܌i .Jf;VK#)6U7S-GNiz,&Drը?&Dyxjժkw cD$$iTxFMQiڿ=c&l粒Bnz]dYqE2gsi|2Po @ xz|״7!\1igy.Mf5٩y<$ąޛx}D#=70F+ n^OaNUM=8]$E27?Z#W237iJ\gȃKex,o<0OѨ̞BA?yK8QӁi8?%9=+ח<$k5|Tuma ga*a$W2*hP`7/G$/Nٯ/FQ tVϑ ؇rL|E^z^0"\9i-:1kNcic-#i.M9xTvj>d'R|_'\Ex`ݹǽi1ck朷MIXVòq{r2"\7QaqCQȶبpg&0ygMz $e0iǴ7@peN@ `Bˆ@  A@5^g"Cg^_C0xU2ˆ@  ^}%r@ke#\n/J9֏"^]N|>!\F[^`ˆ`J IFL/r E4Tv>90>']W[/M@yRpNdWF\-kQFqαv"T5v}=LFNYC 4]vW+Qɷ&\ST8 DP$$IT=eY$)X`XT]Wu aVްM9m'a| gKgv080^up=\(9nGK?ZlM$Iqo ?3Q乎(뤺(PaDtN[J0P$ذtѨjjդJd|ZR_҇,$*8l.jzmmsR&?,֦^K~x_>0vSKnLdVp8>o|>ac9ccnN3k?cP\4uǷݺy7;yږi;m|NaDEMũvk9yn>j& hԨ'8'5VXacѪ^M(B%3e0Cm'jyꙭܸ`:Y Q8Upm㓡ơS->Ղ,|>w^t({>?~lnY:XU8n5ή .7nv3{zz&R K|՟{j:-GI4E<~Stso; jm;oO%#nbL䊊yk"<2ږ^^~o#ag岽Kcj 2u8z/?ӟIJ|6 D`|&`Pֹuk}a[L\ېsiٳuu'PƏ?v% #IR^K%!LڅӘ^؉d:"$'IXĭ8g=cحnI+圳ڥ`ױ:>85}W u?^/@ -]n_YHnZ&OA/eulQAg Uˎ5A.SxP*,ܳ1g_rK\rj^'UZdY ) 6W?#H'Q82v:-=/]sDbj:Np>xdWGy^=ˈ$IuFr\$q~uM,WK*jD%-/<_z}~5=/@;+xwODFło}Gm[Ks]u$i&0"wv%>`\RMuc7a: ps=Up9xF9tu}Fb,(cBWiW. - _k@, (\y!JWZcӍ1YCpZt;ňEE X.y_f_jkiɤ&D3@S { 7 j>`W_2xo'D%i-!,cۛݺÿe2LWpמ:\[{$%3!^tyMt6=:|egAY%,> F#S@]`1P e6.5GkE 1~mc‡r1"1pWO%%1"1h`l%:HTo'O8QsbRdI8pNK_oN@٘x.W^|:xpsj<ۀ̈&يI@,^6" 62B,& H ;6_;2~!\E{E #*e.2&\ JvACCN|ޫB`uLyȮ@4yUL.gN>_I`X>qdFM(1>aFkR`փ2N/G&,R(Z@F1}F3B\1 ,.v笆g7>gmyx2^6+H>, u}(v-tx|L`ˀH ewہ>0=,̜#ZQ&5'E#=!. (HFkIf35S *Wjn]%*!N-FDĤxn<י L,5!DGeD9A#2>њ@nk` ! ,5BPh*Wka.L0""LYz ySkE9њĀkS!YHuc7f;%w`QQ&zv{q{}h*tګ[!/fpţw-&dߟ߆'+4j߮CAQ@ \wLZadzWe^~/wdUq-s?CkÉ;RN&@ 2iѼiT{084as CzR4f;fn"uub"bFRS>SXc^i ѤďXilCN[KgggNՁV"/=G\ Jܺ^٩&z̃tbUqT7 Irk'C^Zګl@ .I/Dgc_Yˋlo>4ܶhV&}!8PvNԴS,gn[ȶC@ kL羛\Z |$v4ol/Q,ˊsHe1;&sSCaFH?Z{__r~e7^Oc8x VEӒ)UΣժo$#)^ADp{/'!f+@ @0YȲ-kݧ,n/|aU8X̜$jwrTVϟFD0q^T*ne{S3G'QI_0~j)(Jd>X PFRlni<~ 2MfYÛ;˩n@Y#Isyoof;y|zC GNR}߰@ @01L4Vb9~6d_G6J}URzAuS7L43 IDAT H!6Zq 3@K33hJNHHmK>LnZɤ%P2S@j{BaLe( zĥ!n8 ~ Sc ŮX}$(/+8@ @0ٙ(nW QzY)&n 3yYI YY\Z߯|^Y>n[0n/L`#"\.9=Z q=^~@ \\ˆJ%q tBEakv{L1qǪ"VϟF)HE0׷q3oQ"I"%>cmle^z<*D}[utY8Zd~3|EEѡZ^vCAއk8ZK=@0kJIǤ 0gz yԵij>wbϾ?]೷/ ~J;<=ܲdq632Q$|>@{IFR#wm1ol/WϿbn /Fj%_ZU.;i j[nbn ׇJ&J@ LB?5%$,#eHq'C}=rzTk{}ɆROez5R#'+u^" HM+7qA ^Ŧ6P4-ҭ4c;ӑb"*B)"h27kD´|s7TJ6(`,* cHLc^A<^/ $Ǎ q$ƆfƧVGGժg`PDNmɞ u곺 @pvdsjɮ殮=bdlN1*K~3)I"39f.CC[5=|$"#o .٧ yʲP`$T  CnJZwC|g/o Cm!5Kd3mhSXXA? GEjIOV8ίэh&}GϻP4W O[r} ▊A :BLO32_}W[_x9eYDfd%E&[{͝f ƨ/z~g+0.*z)UK,q7@f fR$ 0OAN7ǫ[ټ>ˠ* >' IOq$4K___Lט'##(_^05Iں-$}357wg{}} |92w6ȒYnB.3AS'Ady46UKg<>k@Vg^|ó4Q$B'4CU\/߻|L`ꐙl8?WK6ۥĤ/ZJQ\c W2 &XR :6a|@J:&[UKU*/ŗ7" zn(c* QQŦ\soEf#Mh̲9}{7i`ռ<ғ9yXpZȹdYf_YXN4j)^Gn SiרU$,*$"L[FWܷ+xam!s~lLS2Syp|u Ivt.SI`Ն;޵7Ҹ'}0V '$E+?|L5E!ه< tH=K' gˆ 0`:widFV"[V!I:%;Ձ\q0b!`&0mdZdשqܼ .%mVn2IѴt}1ˆ'wӒ`o5vowe ?w?nN7>ǫpM5x1 T@'IpϚ9BX&13v`/"/?2Z_xߢe}; r r`2ft6VY5H;>% 뎀J@W,e)ABQE#2ۆ"("W\kR #T* &VeF.OXCxheVK4wRGNBë1ϋ6yn ' 3f:1sh 3؇w '`V-!dFCF?y^3s9׽T% 5!fЂV(/'0FQ|\&TKԨi17?ͥȝ1e4Wk^ x=<ˆOjAw^,P00lfǑZVa:MFt:5] :J%8;>*",]\ئHFvCq~4]V2ʛ;ʘȂ laa;jl?\ }h8}=$FɡS->tmbK>S$$bESTEhjP&I;ybG$q\_ĎJaHQV+@0QPք**d90v/s1#{%Īy t^0 W@q4=f;/wg^KE}3s.Gjڝo7\cxDd;Wqiuj[w8q5 iRkLjoT EKc25WY"kέ+gd Ƅ#'0;eYTp&*.b ɤ˻{Nӄ,žƋ>0~ufV_c`cT^t:ALqd4i1GGmt{? j^^Pd$jc(55@FBZF`RF aZeX=Kd5ֹWgճYt}.;5ݿ0*qgZ6" RfwXuj5w|sAB*8>Z l  ,SK[2)qQLL@vyWERЍgӌN(/n+f\Q=i1$q jP8P sZglj.ZEBXD1F2^;&v8%>/ݻ^v>ݱE2l.dFɦ(/wvW2_߀)rll:\g$\} K2ѪV\ZkB6r^yhUkȶrt*.L=f;=~(Ps #ú3its,Σuى{S1[VU%y[귣^aJouty ǠzD  Զh22#;qz8>fN=f TWOo`GldKL*2A'CNFHLi QVAAGɌxaD @dYe%=){͝4W7(ͥ(o #nUm,asi7'9> uhԗ5tm ؝ $lskDHO4>~ ^L1丨"c٤\/,`9b3Bo\Qg=tEnZ)~غ& J%И]8n [URI$] O;ȝGM1]]/km$Id'c"x{8Uɺ3B)tk͇9RقZ-֑h2"?jUq>:dR7?>V3dۥpGjL3_vci #UMݼqfL*ag;JZbF.Yyo_5Tj >y\ z?|=zvn*6hzH~:ynAG;Qzq+;nGo!sޕCѓUH}[-]>RzMܹn }T5vxv[VR0Nvt`zFdYFF&"\GZB4I1̙~n/soo d']K14GB٩<5i_rUDOZsRVj$Ş/_ޅF&^5;$X\Ibl$G*[l/9ٜPE3wP=X(ִњdWfӮBy}G~h6U'Z0qVQq4 Zh`E3f;,ɻ{Nq9D9X9<]vJ L6aaAZ@ 8hV0mP6` ꠽gdrkЩV.|/.D 6*Xy{jű|N6 Gv] Ͽs.+ˊsX6'wTR>:gn[kNr(i:xMI&#_g I?vܾrjnU hlgff&4;ӹb,?f C9 0 qDܞPh4G*[e ғΝtնR^ fSje$.:U%y]v4;QJw iT4Fe;#%>/߻,x޺^}c=f;9F?MG8Zق!\}k2wF*C$U] <$EqEab7iBuS7/n9JZb_{=V~>a̝vyiq~вi;ynAj[{I ͇kbV(߭4S^Yl.=EMS1ԷRI,*U\$ Y[LlH]L ٺ':0EpV&n*:lJO^ nH9wSqSҸ%ftvu(odѬ N5t*Hs\$ 8R4wn즺O?Cn""lX>ʆ.*A5=E*b" leZ uNeD*i)ܾj4MUbϟd戻ONCaR;F|6aŴ;-#!fVxsgEg?v.^rmOƆ3q{ e9l;tHS4EQad'b 9;˙@NKym''B<ݮĖf`poϢ=l!'-ki~=f;.RDcc3y|ԵY d͇j&5>$BpitKWD1(D׵Ǹ,hPE IQ%Rbe3[vSt \r {OiW*$6,I^}JN`l-qyer9$ERzV]R_%;8 q5g|bR23VMKwvK'!făESn n߿~;s+ʷƮ?h5:{(V^ _^r-Ȳ̢,rm{}ˆq ׇ؝~E6XU+nʢl1dq1˩Nj/?aZnYR@[CZB =T}%l\Q=kKNP!9.'/޽{3PYhfxp}IHuX9Oqٟdw,[3#+4ucKw/X= {O6^y֣*7?uo\@zR4;]n*:Z=+gd b?% u??].]:JKŻP^tPdYf}Cа]cqc>X=/w0rhX${Wy{p_ȗ]Rc쬠2CUXU*"|'0VL;o_ШsI:tږ+ EȓLUcm=ȲLj|43sA3yK0B gGoAms}VSy)gMyn/Y;>:XVKl\Qț;+&DއJ%qʳLVOw/aL>q ϐͿV*f;0jX03Õ> &@SG?a:-7-`zFu]wF→\>:T˯(zS?pjRI|<NwyF IRus/́_R:_ŖJ z-:&h[Tx5 } IDAT?}|#K[RzyY^;f{,Y-⊍1`cz @ !^%{-7{I6lH0`-K{o6#;z$c~?}y33ϼssm iy!g{gAt/m&5D} o_2D$$rs$ hnIoF%\ zr!B6|L SY4mt'h@pmk8lh2?7EG?b:/NG׽%HO]j%ܲXԪgiű6Gz4EqAwy?7./`òBQMicd$YXTʑE z ksKSH5hE :ZH7Meb|] M:FgPXוdΨmS!J_'VS>ml?@Qv"K)ɛ]_YVV(o>Ղ֒ dRyl}b\uIt P*ta6h]q[1X[Kԇ$$$$>Hƈ5^pQK-NmbU>/} -)`yMQv8‚9/]h_i~ WG(f%Ru 2֔尦,g*r>e쀠-߸`=/:ĥ!֬uW/ɸoSٜ/#9J1? ܵtvJU ]r}-!!!!!iIHHHHHHHHHH|$H+#sr7‚Qqj 1Fz$ǙD)UXXB.ʚ'&Prht 2DtQa#ƨ*HOSu(>jvgpuus$$>6HƈĜ:Ĉ}IGJUm JΗ?7}՘ JSDq^Y\.cl*;ٶxdy歓`B^zܬHsUl2Fvi>&.F6FAAvG\.F.*Re$Xd&`pdR Q?):sϻpB##žV JܷxFI/T̒dqT'hQȑx}Ad2_ÈEg(1F-yʋmB‚T:l<>3g`txc3L8ZQS6R!g^v"U 4*!RYahշ)Ȍ8; Q50FG(P8Q$.#j}^r9E9B u%3lwt*l\Ⱥ򼋺N{n)qfM}q1'dX^ $cDB2::WPV`1|„)pwVPfIv B ; k= 'khfa~* />^ӂVֈJe%hxM%y)aYYuܴxzlch03ę:\?HzzXB8ZGOlERP3+X\‚T^[M\.ۖl~`_z~TJ`Jcw4?q1tV!Ұ;ݴ'0?7 _*u+ muKMew?>L&L i<~J?`T%JnYŠ8B4j V#=^3rpDm/jSkQP(ŤcaA ZIBC/Z`tGtpzP(d$YM%ZFנ+P(Lc zyىQCv'mxezBLG}aE5ͫw=QT*gpE.rZjeܺfnUBL2K%cDb.~flLr ˚6u.~wQ+^[G8XFQ'#W SݹblVp/@$:K8tZOr{?2>k?usSݬ\ME zQvԱ*=q+ݼ,}%Eldx*lc෻yv)~-n f>yS {G9Qd* ìZ)JOlsF2' 5viW(3¯_=ʋ;OEv If lv2tk`fJS8ϳ+04B!\^?&H8o>^;&VR~S =\?@(@e]ޝKy<&=>~A*:`d2}9K3'% cixQ)7NR @kJr+  &İt~#v''9|ޡqqt3Pȉrpy=^\? JI(fd|ǏV"lÅ/$)ty]Ca,&-&}t5W/Ȉc`0<~$:jָn+ͫ BT50_:>xTb6hP(+@0$VSid2n/q2X9 $X QU/9q{OrڼThOr5КEˡgXh/Crd+7,[zY Zk1:gЎ,<{e2mY)Nm+LOl%ƨ%vv;VIy5un,ERow5$S2k5̯_=u-EVnW3N7WxK3h l^UD N{Ok~.o8crٍP(L[Cv\n=J\*%q1 Ѩ2d2٬Kskb"֕/졣oM,.J7ێs\J`nWee+lὓ-78 fx>T˞aa`6h9V:uliA ߹RΙ>~I!~tnZ_w>n@0[9Xۇsrk-Qr_>F-KŊ7p{qGo_NaftQqJ_؊NƳob.25*Ր%.;חrl'6;?}_pmbei9ˡGS^7hNJ¬D;l\=\XL2kXSciCaVfǂ!CB!C1`&S{qU#5jШ,+`U67JFKJs1UwRϿ~6;ko乫3jdLcjR;FT qR*Q<# ٝiț|~FyiZ&\'YWBMk?., aqdh&#²B !tp{a:zG%csǛp6 RΒyiܼXR\.+IhaqQ:sXYl`l|߾~mY\Ʋ lcNLYa*B #j!j;lyi^Q?|{8+yX#,%bۖC0=gnv_ǻǛ|U[ Swxq5y9W[3}K\L)5u 'd rlcMܼ;_kP*wPQQdWk TR6/5+]>Ƚ7.b1C~5l~7]W㥡Æ\&gׯ;jmyf{ɖr14ًl 2ym_5UxY)s-NeE&nXo8Sd2;/o`OE 1N5CՈŨ'/#n^?וfתvPK@ $ϚVh|Unbl^3Ǐ.?@qN2 6j%3JK^CzI!7Q1rl܍?l{es3„]<\&IOwN3qSLp`P0ZT k 2y8>~ B9OmAI5R nYhԄ9vY] fLZ2832:&qqrj4Q"PF>)VNjr^0l`xɤ%P>HFRLTd Gvpz@ gFRRa 51֞4w Z8fv֡ZrN$9 N@!\>?WSLpo1i8|ħ@Ul~A!EϬ+8^Ӊ(G#K2Y0S= L;‚T2 T6@0&=)%h t 3zADtjtIQ٠e~n5-X}kM`\ʋұtS :tc5hTJ ֕oЙ6;cM{d+JW*z8mmy.: d]7=KѪO^'quk{26mG ٲJȭt 83:Ha$ڈuBaTj9VF&f6̴(fbԒ`5r>꼦N,+骠{qxd2!:;JbIN`6jgiŕ%7F<>?x}%e3ؼ7K|Oz8Pc~ynOAvFQ7bf -!O݃gLrSEb}Пo:^fyI&Np0F~笋,O%{#! {85-SL6m)~K91&96jۏ݈;=6/#W0#d2fߩcXXИq002d 3;bBQN>Z6zZ5M6B0`޲u4v٨l̓O tFaf!7*U!b ^oۨ0A M %zhRN7Iqѓ%ʼnܻ@04)S'b'Yl%yܷLlc.e%*2|S9N{(ʋ\&ۗc6 z%.Ԅ۴jb49&LAŜ~}=HuK?IF/P..FO\BV~VR1{"֬Lba1.HO!=i`5F2LT/؆8 iճel\‚dF.cP(LCD&w .OP'V50$;5ǏڲOxN)]l{_L͎ɨ%bdl%*-eQBuğ3ז|A&}~F-ɱ6T#+ie3$C '/={6.UMlf\Bc#%f{s8ٺ ZArQ~V)v ypg8ǭk3?' ]_q7 Q)ܶv> d/ǠPڶab8]>Q)l][BC}Z8ԇR)g1IJR+l;._Qq,g'7rEu-|uo#3JWQ*HJ|go x3E>qQ ]v~b :4jCIJ9Eى'Űai>N~X _gI\x|~<-UE3؊s0$+JqѿQ\qfdЪ1hE}dD;W-D|ZrVNM4qk6h/Өs\m\Rc_tݼKyi syc-nm}2Fj5au4bn(rZ5IXcZɷB!g042 |sb%37ZOMZ Dϱ́oY tNw31飬0E\y֥g3$/=X1,3}L&#)٠;DeCOD-Fr.6G٠6$kT]kKH7kCNZ6b"^] Ե ty=#q / =)X^]C$Z,]CS68+d%;V?Whx_z͢PR*X8zĘ!矿bRjՌ>3rL0I&Ө37(VQؘ_8K^BŒ r&EKHHHH\z.LD-uI\L8.&^!AyGBBBBBBBBB/ᒭsIWix<^</`p8D8,HHHHHHH\aLiT#@IJ| ~ b|ԭxLIn_A!+$!Q J_p:tIݵ"J:SBBB3Dm'cn&+dhX*-# ϣ ٲz>n 9X6?AM}5jVfj,)N*gG&Ě&h:\:Aˊ,Qܩ{ΩnFf=Ǫ;Mcl܅VUBqC?ue2n]n IDATSB!gʆc\8_,YKgMl]7?JȇbA47,TL3 ZDm'J2Ъ?5Nfiq:Ǫ;[ n[cfHr,xpoN⢿Vv CDULafB3fg|ҋЩl;L&(U6fk% Si8;8:yHyU }tR!Æ^&5.~ d2 =s/D3dwr/޳Nr+a9iB`<֍q~C:\QcZ|j2925e91d]7XZΟwaCV_܌٠xvTky`CIE z)!'x(`͎!qQfѰ}a$HI;X OzШěIOv7;m sf+N Æ}J )HzE<3̄KJY 9g;JF/@l^wpy赍ת*#I:CdĊFE t[J%KA({$f8jP!e%I=#xA2S5*:F'*GgĊn/ 6Z s5֞a.3h=E8]b1L=QG'iTd%D%v Q*HI4s_,&EKOs@ Dsc.bc f& ߲JFaV쑐xeYB0ٺJ8|9Ŷ,*LeMY/:.r8^avרgC)I 9H# wST uzvܹaK2yp=oeFR6$$ǑeܽT93Ba?C[zQ6=#{ymY[mpy}k(^8ה ymo5O>X#;6m_56pur[t_e5FB0z Yw#{KW?&34|Uk`5Q :Y[KC1'7ha=^f+csHbi8QśkN؁;3 /Ȉ}8P(cNKxuo5꺹}] g{kT,.JG&j_݋\.##ʽ7.$7-1޼5lKQ8'72gI1}&V,bZd2RMܹl3+x #ܳRb^bldm7,+}F'E#=6G3jC}HsHާo]J|dB[_踛VfƁZZ{Og1p?HriF8Cgظ03!lsSۦP=o'r*6ҁV^}? 'DnWMuK? ֧7`lKv<Ƨ7`1m乷+HMe%TkƸRL]AĕZ@RpNz5pN + :RR)g4t;x}A/Fe1;Bm =iY0hLLLzIImtc6hp8=y-za5,+ɠw!nwN'b4;ܹH4ӥu|y)mŝaV-F.ai"'VR*?\Vi)kԠSXz`dIOQv"0Lr9+͞qݭk k?P{V-:ފTV)X=-, Pj+[U+y*F.Gyii|Rܷ9:?$ O@Af^_5'Yyꑍwoʅٌ8\?u~8[ז;VPQׅ;Srn>I{Y)w'O!16z u}|H9PW_˲L0Gt|Z  HJ| SIY naR\?k6$7-3I&TJ=|wo$)e%ěX:D SIKaqu{(Cy©]aЩiۿ|~(݃vLz5Q' ;6?$xzvq.尨0A˪EYpANs3"zvI~1<p޵a! SgTha1qP|>fyI&jІEBH _'6,F}+#b`ӊysc,6.D=6;@_rTwL^~=wj#^?2΄+OQ-KL^z8XWv 9QӉL i>ӍԴ l:sBJRb  Lx~g%{OU,6.NEnf֔=q05ě,,r>^Ul0`$M.JoDm+f5\.c%ưl~'IO? 8|pV/ƠNn"` 3oUm BE09i+<ɱGo^UDNZ,9I4w VJ`Ǒz:G mۨ3ܴ81e pzНcake%͈^SCNZ,9iqtYTJQv"$!*b~W,d۾jNvyBS8ɺnhq\.rس><8Zȱ.hĺYR,ɚ.N SUVu]`Ƅ.֬=JeL |S+Mi}H&a Ǫ;yXd&[H]>s2riwNP1w>ITg??#^|fM 1ܵaɱ4wh@e+U =#Hs^s6(03M}8ҀmɼDFaV# {9C^BB7.B!s?:Vd|\_*OfaD iq߄+ ϩxm\]Kuh谱p=7*bǑTܲ P6/ͫlBn@'7 [ێ`5YX7 9z J1%$ǙP]+xmo " LjsRJue\NI~kۘqwKVVVb`dW;C(8'iF+]Ųai>N wzA13t2޹<a4| YuQN}4v ra6tw70ph5bkpyI58 \)Im)NɄ`QAi18\T6tcaKm/%Y͘Z %,6)Xu'׉l!/= :oCr`b>1g;vku*TyxY(22n!%DټT^wq2hX4JI&l~,3M}3ާ\.#;%a4jWhUI%$>4*%\79td2!s6:0.Rj$ߠSQdiE_o{!1l.O}( QUhͫټϏB&jӒ qp>rxYu޲,aCRE9f>qc8rYXrhێse2{K5z1$/%s(y~^MVzQ ޹g:3+x&rL&-KXSC]йVV*アWW)s>l%. l\mIuKg1mcI7?[')rV-.KgPDW^_ӌУD[3L^z<tjQ`< 3ŊR!>By$ԣѨUXLZZQsg1'nըKCee%BvFIFS$! yYU姊3 KY:?A;?d^0 εI@ÎI-Y)֨Fo-n/j2j`| dS̢Klo?Z٩e?}uC!R&MŤ{w^GJ5Ydoѓ g$[yэ3į?x=n {ct܅Z$%$^W ?N3TcCt;dK =|ផ„&2X n/Y)޼o HzŘ}٩q|uhۺV=0j֑'Z/cqQ:]Oz:k%`80[\NFE߼DG'J(qb В$:5YE0&$ c`xZˋA ܳa(  [I<Ak=Zޛ Y3MmL0pJӱublT9eW72#|&fizlĥE&Xf6Nӕ΋ RAvj,yLrI S1(!!!!qy2\ғbf5#]X˾`&@NܫYk]0֤P73lIl!܀8=/f{gMD)(iA+J !]`F`J%.rY}%$$$$$$>ߑW)ʧsϡB (vN+5sꇈdHHHHHH\cC 5##^P0(J?Nv:OUݞCjCZeJ"; :A8GSZ= ~}pz{^]w7j֝90 A lhO/3헹70=C3S}$1"qvՏ-"iz9@X}Cg;ⶓoN}3o=CUc{;E0H.h{q"N"ĵC$|[}iwVig|sQ?OuNu_J5C#us$>d<>?|12ԁj T,5  @Kݙ/`0vÆ%(&>jZ?ŝUuG{W&Ps]CP87eǑ݂%dl?$8Fhc=Ϯz!#NV,.Tijk">m$>ZF^IJ/L]U|*J\ݼZD5<0N|+$m.(ݦRDaf1:( qes=$ LN8wP Z0FHg60p5]xyԵ = #K}哞F$ƨdMY ,-`O<9ӉFd<ǨmhaLqb\Q͈‚TF hqyg?o& IDATda~V9`p(;y^5#|;Y}ckzś*)𸝎Gw}p< zA&K@`;mk7n'qy©:%o&#憣{|mv A,F*Lb{=蠭{gZęnl$-BY>BeCq&%l_J)N7T&aQ^+kXMzvgߩF*mٖy6~ɯ|ih|]aVfq&=xJf .Va󄜣#]Ԩr,}^{'bFn];@q^ 3Hbn|n*껩nAc{wڇ0ޚqJȞCǨ+|x33wV-9%hI=y"v 8z{G9}.VJ]wlmٳ_-(-[?gޭ8kFI2gfaI ^`oW{ͩU'*d{ňy^\17W`vF"mCM,ʓgD^grQ¢LNTss(j`YZf6ӷ9~CgYhA=sJ>5 ]O>ӓ)/{'j13^3j#>ĈWmhu-}ԶM5>~d?UnBCsr7ׂ0b8fGgks#`ȹBOFB^ sp٪=f^Wdyn܉ZW33PΘWm тX^MϡhnQ֖Du;md!3F^FͽB_@g[,;Άd&;!~;O߷횷|XtcJRfR8y=s'ˏ5WW#/@o/j.´X齻_qݏ'gf7f5Z (]npWGSCձ;O~/&py~Fn9#1"6vS8°CsIH`%i ZVrF9dXIv2Q2;nLk&9 Ap0u] E& 3v0BޯH9ma5Q7>&ڑ&zQ4 C@ њ>ϣdPAAAA!+VoQ6 #ʼTPPPP)Q ”]$绪 )STG)#8#.!{*0t D"DPGcJܱδLږ> (R1R1o0;e+|8=>~IT1UjZ J((((((((((\S&FA@E[3ZE 3-JnD'^`СhQRPPPPPPPPj( %[y`mo㼳EXzezh7/Nrbф0YLYhAAAAAAAAAb1"h7;B!ladᦼu*O1&H¤c4Y-$&ڈ:2 G8,P)# Ѩ1 MŠZ8!a$y]$p="J aT!jx[i$''gFV+^.[T>qQn”p? ox}ϛo:NYmْ%TeaAJaTWaza :X#)OyvAil$IBI 1<2 (zC?:4j58$ɉ$&0u*`| 72B H[(\AJVX]+?@D[X"TS6vi&]2RƖݣ#W^3cW^ayjQTnKH_}; Us嫍(H R z*F-' ɞPµGDQDThtZ-NVAEj}￵4,H_Fn& JZtMax]>lЃkŗl?G΀PC%E3z>W͊P8V[[|;csWݷ(M᩻d3M) c}O?K/.sSeӯdٸ{<姢(^tM'UcqFog+|Arb$,PC.ވ[$*t:FA#h1QbJ!QF^yƍ09$%rf#Qu^o3sN~|dK͔ +A$iǢpmDT-Kg͚O|᯾MG |[BYPrWw~ξ3y J_|&*DV˔31鵬)Ţ9Y4ubIH>zhW fH8U*<v2>("V)$IԶr ^\~.cw-ϊS>ybe#yת]Kv!T%+??j}n˖-bW@x ;զ^]ŖrC۷"cWpb$ w%@O #ϴ08(PղD%AȚyQ3YIܲh6;*U⵷ءOv|.<$~_B/D/>tKeܶ~a+6\Cȡ1^A#@Av2 L^FIXo hEdsU"Dn2,F=C5-օIC^,U+ޥ`@Uc:}DT7h`VѨhH[ږI/f]K!r pۊ"Z=Ѹ|=< $FAF } k4*ZE$'YEU g[5 \ŵ#"HțɌĸ IV:}`CzÓ>Da~EJ蛑.9@<`4\K"p!+h=~yg꺢 3PH{߼w~u?@$хV+1:DQ5mJ8x&c9ęXДTl.[/']T$ܺSeqjv^1,?_#V$߬贲FT:yhbL\ *!$l2@o4%# ٨w_Wl1AT* [NA~qFf_`B5!FdfgAסGi%d$H)a79|93`,TkZsG\~aJ@PV9CQ&ej@s t!h5Gr-ClGȤa/aIRC/I0 ُG( hu:ZWε HHA9bLaơ"IbUyjԘFm$'cšE!NR$X 8$!F!/Ni)BRztōS":M4dϝ 9w7MRP{H Gs'wWK-"x w?H(4fQIn|$BjFcd4o%-5D,ZVg09 ӄ*RJJcdZ3EyQ:dl`l8-!\$IR09t>W)͑ &\n>?KD#3JDQc0YHL`fURP. ((\ȡ1xrTNuRjUWDC5ΌFl2`r 벮pmbQ1M,fFA" +"ruv *%YX!$ f*(((|z H%/U*UlZ|~PX A@j5X)_R.P<" 7UPP"*N`4"!Z"Ff(FD( pCzC#N/zxǟH ;b@b>ꡪQOYqƤ9]ۅet⮟"}ٝJ$#:QPj1pS3eDQNVD12QG^W?I]kln&L<%s9^O?Q*z7ː8 ݼ4Q7+Jr j 3!MAC %1!dN}Vp D0Du;+Ks'3ffN^ {O528rRm@us/mG9t`o?ŎZ^3P(βh=n!'nKa 8!%;ƜDž\ qdq;aaAa:)6{4w keB r8]ŏ~w@0D(2+vQ!'^PG5T"V 綼G\(% s󒟭"Fn y3lMlhԏL$5L}=l& /8CJ ¼s%ǐfǰñ6~x|P ̃+g< Cg&b!G|Kf74|Oof1 Iٝnn(勏ϡa gЭ<*l=v \F.t> >ydoN=KN7.jdmG>F/x]S 5-} ;< 9ԴQ˩h8ܾyruMkEg0y$Xϫ0s;i LP9JMAH3vz/VыFb#$?he+H%9LIgQq啭d.9Y:/a3]!'=g&![*_ ljFvWV$"fB  N'j6 ᰄmbGl6𸰳`h^CDt}DF$ZMccips %tN6Z-rϚyl^=A~qmf6z?=w'}SaY.N,h~!Rl1cO^; [},sSO^?Lf>yU.#nj"/#ͫ\k ̙.v$HM~lVo yd,9R d${?ݕ4utd͋0w`ŋok(Ma^/\^?N5Vش?*@b5ẻIj[+Y:7u#Es8YAUc-}p{H- 3PdGኒ\sͽưb`o)t]'m=ano9Y,,Şc U": ?Ds E)4uHM69#<8gEަec'P⌤&Zh覥S7"-rpz|Hsc!$u 'MBk}'E =º|Ca Dۘ;[Mye+&PcGy|dؼj\(#n^u6v۠`3;+YI=~?^*Wrj[wjCV$L,%%^7?z <*e^.^~, XYDiAumso$jd,^~a'͝g%ppX"/#D }(e|,KOߐ^J Ҩk vzvcb%QnoKAH$aNJ )|7:wxv^QKj19qyBa@ـFbchE0f1hKO;rH$*~{`7b39UɑfM-vFzX:/^=D(irqz9UJ&#`8vN-Dye4]> /i>qy| Vd$Yuhs?EWVވ(bD`ɜ,*Kr(lf6x $[{++:p"^ jTlZV3-~ۋ =|hg?P82͢,[I(|}’㿢c$Vc6jS^Ɠl.umd&[cevs -]vZ5 V͝v*zX8͍dMsbc(d1Ͽ l5^ Il?R_rCl*||:^$AiA:*IG<̮ ,CYQ&mWHye+L4X:nʸ\mfnO4PRƮ h4*·\?/m=FECwEř|Դc^CU~}cJUsw<@ѨUMsaѶ Kmf*x_+O>ꉍ)ʜ51/㭽<;Wpl |8XS6yعv޸QreGV0=8N?wGlu0\g6a6n71{ rZ!f=Ktn#n^~_0~{g^~k 8D86,ܶ8NԴ34uebBRxbbNuh5rBvpn-!f؅$IZ5B@6b"3ܴxVqQ$\)_4l1{+XW6Vfہs! ;LFJ$hN iUnejF,$ށLjNKܶ7.D`*9PvfAQ:+KskhUklihGV2eǠqj\K[8Uk;r86}ÑG"eIgf:|o5hL}'.YP'̬)ux3ܝ!}4uDOg,ํbEͬ-ʢ~҂t#njZImyai0Йf:F;+zn6"^=egZH?ųٸ]I3y\T /'>ȈO^?̶X^Cfa 3yc hTQ^q4"\gjTAݔN0'YIe 2wV*-/UA[ p3u] ;-QX$ɡ_0ԷIK4j:y;/o  aɍxu};N;Y uqsM=XznP xxA;q0 ě{*h]sٟ۵PH./,Jc VMDqn ~>2/+|8/%V|2sM},>8M]e$p IdF(նN '銼 V K 跻uV6CG5dANvh_}rzr5@0޹jεSCB/VjgPXbh5*F:. I8Vι^39RM<t*PcUm8=~p$Gc v3Df`0ȾSl\Z'nt9Nr:ϪFEw;ٺƳai{N4.-H<bϮc8ܲ絢n-xU1t8lb`؅' #I30Z@Өq: jm1<4a$1iT*{8rǰ{ݍ3bt0<: `5qy8~T*y6Jh\rUi OݳW;EK.98-—_KEQ6:F ̝F~V}#8<>Qf*zX27?-?yF7>)vn)aoPvmo\(CN7x}b2K&C.`?줭{>V{MnziOS{;RRydKE}7#rt>n 9i,Ѫ Kof~~*=$y7U$!9~&sR}Hx/jAŹ);Ոo$?& g:v&ffl<?݃/God1Ȱ;Bו' <NYXL:jA.;s^J c[27E4v Pˁ(O\t@\$L}/m=BQCK1r^y}voD$: ^ AˢLV͢d?<Þs58E1Ekus@љ'Fn//vq>-F,+KFS ɉr Զy,^CVR8q 7*~1n_I&\ԣA//+Ks9RʉvV=@US6n}"Ϣ^*9ÎuYXY{9]߉$AIϼԋ9Uqwu)XYL0Iâ(lٱv'wȿaQ #_Qw.fc%{F: n_Um,ͱsm# %9ܻnV_0>FM0䧡c4I6푤oO^z"z0' ZE b&n[^sm8=>A`N<2SldZmrssb&9HqnJO;TepkZ #]T,kCN\@Le$pЇ!bd9S߅`GĜLɀF-Woh-DV{QiڏjR4Z%='@&Tʊ3v3 ]$ڌ$Lٝ^)S.F>8R"RrZdݼY|ݸ<~i$ި!-- ՂN' JLkEMN6򋷏Q҇(l70mcL)}'SLvAr(䍼C}~FcxS#.8xzj#.=!MDAנ+^ ߍ$I :8RϼY$XMڝl嫟3gEI.= ;ᱢ8d{͏U 8R>E߱gyfu10DՐh(Gió6KIVkVf;8ʈKn< gGCY)V2Slkꉅ-CIO}{?IV8[߅v\.c<#.Ba Z'ZJqn N 3hǼx7w,"j`F9OJ~ gᦒ_t5w*N;@$U fMȝF-Nle5HʊX5G7-%~ܾS^FJlPhT|ޥ;لdpčF,=8DYQ&RQɠ#'-InOM05<7TT3dqY!uEnGs%@JyV[Ѫv| eřhk`R1yTVIOK8^JZBEZCrQq݇ : VUZ$I%k>iᄋ㷗a5{!f!6tܶVO,@47HAuXUSVAmKukFbÒ֕3 ƒ;QDS7+M+0F.pm(79p^ʊ3(l*%=ȩl#w*[JJ^yI,Am[FJ^wvVQA-Z_?WNRLiA:mcqF&v{VLCxcoKVQJP1QR̤&\SNz|,|ғ,DPę&&OFϙAФb^{Ai8nBa@QN2E2kN^Jd0(52^,͒ٗ|ŨOOQxpC)n&LdOopƥnO.}EYP΂}Οr~wݼ i&{ϙ)6[9k2TTbs'p㱨(dYnX ?͎h@WG&cgy+ըX>?7.x{_{N4qYQ$b7*bd w]FKXz&ěW8z^K@8ljw㍧(7{q} |>?ĝc}߷qi@ VXYH/=vͫiYEM_ 篠puL4IϘܥp1;;a'nlV q:V#!#ҹY 8V΁ry֘deEI3n7 R 4ja ׏A!bB|gdI7O`y|F=V.@vb2=fA?*E"UFuM9tuۣ0uLUQ_ծpajCtq8]x<>,VB&!-]kd/wet7pzFu}E:ޓ˙.mų^^็Wd5cwj>J-WwO\p9LQA(&  $I/uX(QJ&]A$P42ST7j䘗VFATjI0$m HOà&R>13Jc gp{:dh?HHֶH"7#3] Nv\s&V ךg[u)6-+h r4*(NG0٩˪^8ͽ (I"=ʊfaN^uI(\pP(|Wa2}ݸ}~fVF}CN98q{W`iX>/IO]gB B' ?Q*Y4 9sVmb)='܆'R}hM -V~Q2zG=$ڌ$ۮ";#-ɂ|yav< )qT0=|XhzeBw'xsO⩻|V8Y:\UL^F]#sD7.c;.ؼZ܃+n.1phg^B$Z,Mw(.8ܿ#^kwp{k5tّ$(w.l}E\;q\;0V Km N_=0qN/lZZ-;L "/=a ejZzc}}{u;8rpA}T4t=K/ڡ|~ |Ք_XVpX¨8|13LK8-FZX: ǕmVMaNr,r(2oV*{q},(Jb=030dMf=%9H -Q.]=qOخ1p hh}l?+3[N&-).=.i87wVsӵ]ܳvf"8ZFsn9xU QXyk{ًIa57T1@R O0(7ZŽQ D%u0,(j1P-WerZr/[(\TնT4v1E;o?8iYI4P_$KÜBV ѨTهVV)z6eِ/Dmh5*<񋝈>^]A݉ @Kʿt;*?|B0t&nn : _zgz8|ep}"2X6We7X# SȒYEwkI nUV|z#϶ ځ IDATY:7a'#NopQN2i" ONZ<[hƨױ43uJLX<'c>f*/ܟg)I (N"!Ds #Kdhč @ZЈ3y;8QA݁( $| Rcۨ ["?}Sw/%jghc/BSsv!Iaf+KXQsyn?{F^( !~!Ag`m;$Wj'BQGV x0uUsh EMidۛ{ɿ nz}cz8RE rtٴi\IS֞ÍXF.]OUc7Ve_a%IQ;J4q2b [8ɕge{5cY-FR8P-,ފf{,h7vn6\Z0u-=4xiΖ>:{,,L pktf&3%6&r=vkڈ tg>.gqQ&1a4 iPX>?65]A#O^kE*f8v hZb4iѣ~@~FBc+~14\rσ h˹NThП8{׶>B1Fmv.!*̞Í,khg:cIq%3;@UcHՌ맢:-&׶S\ɨ瑗2p|~zBC32pO[O?7Ix|ZzUxcÏ*oj62;/A?Dfr !;ZL|ʅts[Ib#tggڶC,*@WXRAyMֳzQ>8^,&CpljCǨnim]o}E[@.^p"t:hl!DXa52~Uyqz>_ %8(ioټq9y(y(pA)QZOj=U :nb+8PzdgַV.S8.Rb).C\&SCeC' >hC'Yycgғ|i!UmDZC'״/:y|媅VX.UUY\-W/WwoHR3I0" ۏp9\|&}[l;T/yl]R-U--Y1?EQ>)J #9S Mt0\x+RβFf祰LۓrA2pJ!+%/^M rhh}'v û;tcg$K?&)>}bNvD-h72uK}'^Nm౶`fw߸}}cU{Z֮@x21.4-(٬ǯ% l4}[B)ϥ0S\~ZX&C.l!3¡V*UވA :l;Tփu,7nXFYue`j6r L\TԷp kʦCMө( _tYى0U]^ff';0D`$'v->f$eY{}hK@+De`D!f⣭FnݶAr͈v5) W-/oKmuY\j6ﯡfE.]<ų2xa~W6[DE];q~&.kxj@ՋYR̜$6~dں8ZN0s`xz@)l5wlVLbUOqkFoШlvmgWpoH2*@uS-l?T.GRlvZxY9832Yրyل[̴uĘВ4w2[$EO}[/Ņit :( $i r9Tsgw(3Z99.{oKC[/}n U  ܃t:sxsaNTU ͲMatl᩷v@-LF=U]x~ϡ{pr+:Dc#PцP˃5`{F㣵UՎ=I_N!8GFYkg.gwhpqŹ6ut9_4oF*i꧶gj,r" d Fes`o5~m'~y9Ǘ/+E4wqh Vζ,)ʠ0;N!eY@T8 ;n`Ji@!PC{`WG ^?8kt6 ~`{>zd֔yYR;̣/mcgi#d^G^ʎzk(.LcZ~tQq.}|:?_[uWwzQ>7_ 秫W̢VPM[WzQ~(Tb"*_ن!).!7^pLFmO%Ʊb~.o}\AG^ ǏCJB$QVZَ{#?#!tFf}!^۬o;ѬHr|n ?es+X8Ms4[5݊6H]y#CNO(`J#=}m~bLAQT{H2b }d&ӏd2zt2N!p3 K?9GK!1 KgPYiU%3 ۋge@x?6LF=.T"&:q\{kfYFo@LpTdR۬ ;06lZD+(xְ&Rb 8P/4.r͞s Σh6zBttųve Ԟ7#{oЂѤ @j|0—\ȕ+f5ϧc%%0Ќ .GBqveQ(DT}58]PRqmGi ?#3RY>"@p<0N|lm+\0㢭pz$F7.[V@McXLFZ1+.( ~|b1d1a}J Mck FOec%Fq痓3죮/\:^89>[?HVj,z9y}  zJ0z~g-~q9x^8/\:oOR O/Msîy9yp\׶dƆ6 14KΣtnUÃqIU+fQQ׎d`nn EQ̵+g;=)Ź  0##.]XL?l4f2A B .)!Y9IAgBL8xbi]_̡V"f 39GL #bbVNQ`25!7?.ƥKfGkW?})Ypn&wײe_-iIQٝԵ`YuLH'MIK▫O\DrٜQE,.'vՊYe_nE33X4st¬P1t GĘ1Ý_ʦd0܆}('<.K{;Ro,M]K;JG#;ʴidx}UJUz8"#D[PϏCW0WU 0.bGsv}~L=;@R\tv歭fd&.zCMN7 mX- 0oFIO_|渳W,+>pOGrՉZ7*d mHi1a1 q/!=)zc2xcvn =}z$Ƅ3F*SԔ F.ҫ}E,)Ңə9I8]Ē,^| -}4wHO> zKg#qŲB ;6:woC?KM/Di5EWn~vnMk.*ꋴTOFϡ&Tgc zXL2=Sl!j[zp%FΘu :tfl4pm;N,f#Q'E֯7n#7ڙ03E^lC :R#H PM(rBmEr|CN5=d$Et{Gmp8ij1>m=~F1jGms!I,|YoqΘrHKG_hͬ),HfJ,G:ijHeoE;JvfE姆*]7ꇇ4:=\4L=;JxQ^~Liu[XX@ɋ䠪+ilqM2RFM(gl"-u(\8vٍGm_]_XN}_=h>]q$G6uyI6DUxOn(sRaV"ţ6y}~~iz{v YWw0p~_\=:›[$;U>z|p -ذ<ʵ+Ja)^EQP⹡3֏9I <$B!^06%@s[*+wc"Y>?jC?)HRl8?dt Jزi;҄Ѡe͆Nz9P 4i\8/[!l!~B[>?Ջ&IѨgbΣ,>!>n؏W_-|˖EspѤC9/˛r˖bA.;JX8OG܌x0 zvn`18R߃#!&<o,2of&DŽֵ/;RCnz<%ufs-&XLaVn2t;6l?†Gj-m\(FL\^[[XNm僽* O=JUSר`ˊGmB!mJ#YT6t|ㆥYaNLF}hNp,6@es2>6ʥgaSnOm0T 1&f\r6vW%3x}>`8fR\$FǍIjSW7"Vlyը߫ʮFa1 8pYifwqz U p^~x]eko]4٨@e ;lxSb2K$l䇷]Ύzzn좴 N#;U瑝K1c33+U@G P[@1y\ N;hd%!B)|-4wx񍡟MznP.ToQ-u]>-jpN-ފ&V4ᒹf'`o}|Yrrl^C|Lx(Ѡד+NsQUk;-+Mgj|'Jo'FIlβR~ffA`_Ж5, <]AZXxV7bӞ*z,ɨGjLaGiøh갱h+{7OteLA (佝GyRl$Fi53p n״q !>JQn25mtDGX_epOGQp0h=Dli3?]!BL/S2rWph =x~"X<+1Ke=^/gc]0¬$Ub",pF0K,Υ0+yE^0#i̟ 5F> _z O @[NjpzҕŹ.oWY:; Q}!xec[L,ID/\:7b4ψk)9.7>*Ur`7wɨ\z?wO'œo~t)^ 6I1tE1~P=~Ec !BM`fef -~\?Ravǯ ڀDO?2w~gCխjhԳjaGkfNR(VPO3LC[/NԄ1[avRo|`@~F|*Zp=HXBz f$GVJ ۀEW½]d$:"Lg$I+54w3tf65PdfN?Jz"b$3%& X.(.H VtlIKggއ}MLԸP`WI!BL`\nwέ/3#-a&r{EaLޠ؈1 J‡rp2pѨ?ۉڪ);"rǫ,B#`Ѭ ~Û&B!P]/qJ)h<@!8ߨt&%bٝx\nO;hGRCCMD!;1 gHz"7]QL~z<;Kشsbrnl4*Եt{L!~f\/}W6,7nXϯ@eKL_oW;U8ww~Bf>IbL89iqXLlSϧs^~G'E{il/(7G[xx>E;Q^FzR koboWl&16y.~f 32zP͋頣i99( n|DKCA@yfR^ye0`+[ֲ|^,o84p_ 6 wr6 jee4LG}׼gEېn8`d;򊊟}+b#N"62LiiSYZh]O^'ZG fݜ 3#+ڇl^ɆDGXX4+.ldF6&+5~y}N~ۛsM8ߺi8\ΎzL\ ӈӎ ێpĘn~)4ty 7oZAkG?{.k[p> 'l$!&.X;x}\x$+ R^vʵ+o=D-"";#/o-v3@b4ϫ jm.ؿ,3u*!Q>7:D疝6t+;z}>׶ o':)wSw~;31[,2e7 t.۶wvy^'0t.6>yMm*ൗwݼpGĭr+{+&JUަF1t=hȹLKcF*;쵳iw!7߸a1ZQAۨltR⢴@iPVet脤 KDxqkf>GUUwvw6#V/z8{@;*;Ryp=ŋ.-,4<::`4i]TU4vٻ[SҋlZ02/NYF~9IқvW5]tD٩ZW3܊6_8/;*`O55mE1;gONVJ̨6',tE1 XFLHQFdWxUߏ|i Q,d2ctFQ\놏=a~kSnqζQX:'sFvLRxOni{s+Zr-aI:NvdN#}5v)+m@{3G{lG ΌL-^eܼ+oNLK$<2*`4E ӊۚk=^;!ލ?g{HPpp ؝h? #z&tLB{ٹS#NpmF?1Ç^;}hv>rI3aQG%sXRAuC>aևnc67%.RЉN|^qo fϾfm\.YO}k/7om_{GQwo!=)hkh ׯ]^e ;9Yu?\htU*/m( 02|OsԽ{Ц;$'k7Dsiӫ/>lb}6QLL .m˓Yx&3160Q}ы݁Ikx9N>mHߙnFN݅֗>{YF~hǎ&:B^z¨N7_.(KBt8Y( [{(2N z=qϗdFFBh٤/._KW h :q2>>xU? #T]ɨ=߽yt 3#9!} vٰ]9ўԄhr zh׈ِ]2^XHl؍@p3\6)o@^>0U6 hFƈs[jvكqS!Xa(hd$#Y=c?gFD1Ԟ23y5@bL1czr  &"lYVKVH Q&D3(#5-16t/HnKv9Ļ+NpQVӎE}I?q zݘM鲘M='35ĹG),5!vd7\6r"ӗ`9}\V("@q>yFb*:)!SpB!BI!B!bRH0"B!!B!&#B!BI!B&E苳*:=STBiC1(~|Њ)ME3mFUzƻMtt@E !'b |`DNN3y)j_c>K*s@`J'8BcP_)jx)))~;mb*xR=5 \aҊRAk⼧0@omMtPuq:t[*f !'@jJ FbU3yh1ITA( r;~ ~p/Gb TԊPA pu=$p*l(peYhB|2ɼg4)Nb2ζGL] t9yt z=D%))q(Q= 1^`P({'W5lh{o\ @Ax*'pmٗ~'K%%&T턉#`4YY @E]o.fw`Kރ`d2b)@cE@B>?ܦM'r_'/ .EU8mUE ĸë'mBqb(ο錎&z,#-![+ [k鑠?-SryPPIb2Z-XfznBfF0B7(+P'pa8ys۴?0p*SuewH=k-o>D5@+B7yG g؊` 3seyz!{(n HL^ Oe!CQWf!:*H, F-)qu߃?Q~gXн?`{w}MF}G{ߢ֬;Ck=;>W#;1{Oŧ|ͯ-uG^ђ=t((?4z_k^#Agq*)I{? _D֭}T󛒒;>YF@(|{Ie:o5 H1lN$7E!BLq].3"qr}Vـ!B1i#77N̷J^|!7G xZ@"SiB!"䴃2w_-ٝgevUR~ 8_evD!Br}UW+£bnn?6+PHLxz= :ig:z-"C_x%8fGdfD!B|:#*~7s _˞q5nSŦ=Ug}V f^#B!b4מ|yl{Ex'83d>z`;*Z 1!Bb@ .Uڪ`02T(h6[:PTZXOϠBE[5mhaĬߧWEBeD!hn6ҋ͔ァ:{MB!SAAm%^Ӽ3`mjL F2l`2?,Rj O FB!ST ي^#ó"L†>$x CQ 01R[RR"jB!SEQ7(7+,o?i#xf܏6UBdDwJlȆ !B̀GE,WX @%NkkY( iB!8']w5*.)8W2/Dhߩ#3< !BE[NWס ,ӽ^&Ebjv뿏EAj!Bt=]@U5<8͝k} uw;&_'EB!bt|oF dR牒Ik8ZRb1x)*k[?nB!\6ZRc_CeUG]?ZQu!N5z- ?j?f2&B!1ٮav Gʭ@;( image/svg+xml neutron-server NetworkingML2 Plug-in OVN MechanismDriver OVN Layer-3Service Plug-in ControllerNode OVN NorthboundServiceovn-northd OVN NorthboundDatabaseovsdb-server OVN SouthboundDatabaseovsdb-server DatabaseNode OVN ControllerServiceovn-controller OVS LocalDatabaseovsdb-server OVS Data Planeovs-vswitchd GatewayNodes OVN ControllerServiceovn-controller OVS LocalDatabaseovsdb-server OVS Data Planeovs-vswitchd Compute Nodes nova-compute KVM Hypervisor OVN MetadataAgent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/launch-instance-provider-network.rst0000644000175000017500000012010300000000000031636 0ustar00coreycorey00000000000000.. _refarch-launch-instance-provider-network: Launch an instance on a provider network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. On the controller node, source the credentials for a regular (non-privileged) project. The following example uses the ``demo`` project. #. On the controller node, launch an instance using the UUID of the provider network. .. code-block:: console $ openstack server create --flavor m1.tiny --image cirros \ --nic net-id=0243277b-4aa8-46d8-9e10-5c9ad5e01521 \ --security-group default --key-name mykey provider-instance +--------------------------------------+-----------------------------------------------+ | Property | Value | +--------------------------------------+-----------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | hdF4LMQqC5PB | | config_drive | | | created | 2015-09-17T21:58:18Z | | flavor | m1.tiny (1) | | hostId | | | id | 181c52ba-aebc-4c32-a97d-2e8e82e4eaaf | | image | cirros (38047887-61a7-41ea-9b49-27987d5e8bb9) | | key_name | mykey | | metadata | {} | | name | provider-instance | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | f5b2ccaa75ac413591f12fcaa096aa5c | | updated | 2015-09-17T21:58:18Z | | user_id | 684286a9079845359882afc3aa5011fb | +--------------------------------------+-----------------------------------------------+ OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations when launching an instance. #. The OVN mechanism driver creates a logical port for the instance. .. code-block:: console _uuid : cc891503-1259-47a1-9349-1c0293876664 addresses : ["fa:16:3e:1c:ca:6a 203.0.113.103"] enabled : true external_ids : {"neutron:port_name"=""} name : "cafd4862-c69c-46e4-b3d2-6141ce06b205" options : {} parent_name : [] port_security : ["fa:16:3e:1c:ca:6a 203.0.113.103"] tag : [] type : "" up : true #. The OVN mechanism driver updates the appropriate Address Set entry with the address of this instance: .. code-block:: console _uuid : d0becdea-e1ed-48c4-9afc-e278cdef4629 addresses : ["203.0.113.103"] external_ids : {"neutron:security_group_name"=default} name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" #. The OVN mechanism driver creates ACL entries for this port and any other ports in the project. .. code-block:: console _uuid : f8d27bfc-4d74-4e73-8fac-c84585443efd action : drop direction : from-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip" priority : 1001 _uuid : a61d0068-b1aa-4900-9882-e0671d1fc131 action : allow direction : to-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && ip4.src == 203.0.113.0/24 && udp && udp.src == 67 && udp.dst == 68" priority : 1002 _uuid : a5a787b8-7040-4b63-a20a-551bd73eb3d1 action : allow-related direction : from-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip6" priority : 1002 _uuid : 7b3f63b8-e69a-476c-ad3d-37de043232b2 action : allow-related direction : to-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && ip4.src = $as_ip4_90a78a43_b5649_4bee_8822_21fcccab58dc" priority : 1002 _uuid : 36dbb1b1-cd30-4454-a0bf-923646eb7c3f action : allow direction : from-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 203.0.113.0/24) && udp && udp.src == 68 && udp.dst == 67" priority : 1002 _uuid : 05a92f66-be48-461e-a7f1-b07bfbd3e667 action : allow-related direction : from-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "inport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip4" priority : 1002 _uuid : 37f18377-d6c3-4c44-9e4d-2170710e50ff action : drop direction : to-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip" priority : 1001 _uuid : 6d4db3cf-c1f1-4006-ad66-ae582a6acd21 action : allow-related direction : to-lport external_ids : {"neutron:lport"="cafd4862-c69c-46e4-b3d2-6141ce06b205"} log : false match : "outport == \"cafd4862-c69c-46e4-b3d2-6141ce06b205\" && ip6 && ip6.src = $as_ip6_90a78a43_b5649_4bee_8822_21fcccab58dc" priority : 1002 #. The OVN mechanism driver updates the logical switch information with the UUIDs of these objects. .. code-block:: console _uuid : 924500c4-8580-4d5f-a7ad-8769f6e58ff5 acls : [05a92f66-be48-461e-a7f1-b07bfbd3e667, 36dbb1b1-cd30-4454-a0bf-923646eb7c3f, 37f18377-d6c3-4c44-9e4d-2170710e50ff, 7b3f63b8-e69a-476c-ad3d-37de043232b2, a5a787b8-7040-4b63-a20a-551bd73eb3d1, a61d0068-b1aa-4900-9882-e0671d1fc131, f8d27bfc-4d74-4e73-8fac-c84585443efd] external_ids : {"neutron:network_name"=provider} name : "neutron-670efade-7cd0-4d87-8a04-27f366eb8941" ports : [38cf8b52-47c4-4e93-be8d-06bf71f6a7c9, 5e144ab9-3e08-4910-b936-869bbbf254c8, a576b812-9c3e-4cfb-9752-5d8500b3adf9, cc891503-1259-47a1-9349-1c0293876664] #. The OVN northbound service creates port bindings for the logical ports and adds them to the appropriate multicast group. * Port bindings .. code-block:: console _uuid : e73e3fcd-316a-4418-bbd5-a8a42032b1c3 chassis : fc5ab9e7-bc28-40e8-ad52-2949358cc088 datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 logical_port : "cafd4862-c69c-46e4-b3d2-6141ce06b205" mac : ["fa:16:3e:1c:ca:6a 203.0.113.103"] options : {} parent_port : [] tag : [] tunnel_key : 4 type : "" * Multicast groups .. code-block:: console _uuid : 39b32ccd-fa49-4046-9527-13318842461e datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 name : _MC_flood ports : [030024f4-61c3-4807-859b-07727447c427, 904c3108-234d-41c0-b93c-116b7e352a75, cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46, e73e3fcd-316a-4418-bbd5-a8a42032b1c3] tunnel_key : 65535 #. The OVN northbound service translates the Address Set change into the new Address Set in the OVN southbound database. .. code-block:: console _uuid : 2addbee3-7084-4fff-8f7b-15b1efebdaff addresses : ["203.0.113.103"] name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" #. The OVN northbound service translates the ACL and logical port objects into logical flows in the OVN southbound database. .. code-block:: console Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.src == {fa:16:3e:1c:ca:6a}), action=(next;) table= 1( ls_in_port_sec_ip), priority= 90, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.src == fa:16:3e:1c:ca:6a && ip4.src == {203.0.113.103}), action=(next;) table= 1( ls_in_port_sec_ip), priority= 90, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.src == fa:16:3e:1c:ca:6a && ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67), action=(next;) table= 1( ls_in_port_sec_ip), priority= 80, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.src == fa:16:3e:1c:ca:6a && ip), action=(drop;) table= 2( ls_in_port_sec_nd), priority= 90, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.src == fa:16:3e:1c:ca:6a && arp.sha == fa:16:3e:1c:ca:6a && (arp.spa == 203.0.113.103 )), action=(next;) table= 2( ls_in_port_sec_nd), priority= 80, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && (arp || nd)), action=(drop;) table= 3( ls_in_pre_acl), priority= 110, match=(nd), action=(next;) table= 3( ls_in_pre_acl), priority= 100, match=(ip), action=(reg0[0] = 1; next;) table= 6( ls_in_acl), priority=65535, match=(ct.inv), action=(drop;) table= 6( ls_in_acl), priority=65535, match=(nd), action=(next;) table= 6( ls_in_acl), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv), action=(next;) table= 6( ls_in_acl), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv), action=(next;) table= 6( ls_in_acl), priority= 2002, match=(ct.new && (inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip6)), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2002, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 203.0.113.0/24) && udp && udp.src == 68 && udp.dst == 67), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2002, match=(ct.new && (inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4)), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2001, match=(inport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip), action=(drop;) table= 6( ls_in_acl), priority= 1, match=(ip), action=(reg0[1] = 1; next;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 203.0.113.103 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:1c:ca:6a; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:1c:ca:6a; arp.tpa = arp.spa; arp.spa = 203.0.113.103; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:1c:ca:6a), action=(outport = "cafd4862-c69c-46e4-b3d2-6141ce06b205"; output;) Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: egress table= 1( ls_out_pre_acl), priority= 110, match=(nd), action=(next;) table= 1( ls_out_pre_acl), priority= 100, match=(ip), action=(reg0[0] = 1; next;) table= 4( ls_out_acl), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv), action=(next;) table= 4( ls_out_acl), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv), action=(next;) table= 4( ls_out_acl), priority=65535, match=(ct.inv), action=(drop;) table= 4( ls_out_acl), priority=65535, match=(nd), action=(next;) table= 4( ls_out_acl), priority= 2002, match=(ct.new && (outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip6 && ip6.src == $as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc)), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2002, match=(ct.new && (outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 && ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc)), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2002, match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip4 && ip4.src == 203.0.113.0/24 && udp && udp.src == 67 && udp.dst == 68), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2001, match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && ip), action=(drop;) table= 4( ls_out_acl), priority= 1, match=(ip), action=(reg0[1] = 1; next;) table= 6( ls_out_port_sec_ip), priority= 90, match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.dst == fa:16:3e:1c:ca:6a && ip4.dst == {255.255.255.255, 224.0.0.0/4, 203.0.113.103}), action=(next;) table= 6( ls_out_port_sec_ip), priority= 80, match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.dst == fa:16:3e:1c:ca:6a && ip), action=(drop;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "cafd4862-c69c-46e4-b3d2-6141ce06b205" && eth.dst == {fa:16:3e:1c:ca:6a}), action=(output;) #. The OVN controller service on each compute node translates these objects into flows on the integration bridge ``br-int``. Exact flows depend on whether the compute node containing the instance also contains a DHCP agent on the subnet. * On the compute node containing the instance, the Compute service creates a port that connects the instance to the integration bridge and OVN creates the following flows: .. code-block:: console # ovs-ofctl show br-int OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045 n_tables:254, n_buffers:256 capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst 9(tapcafd4862-c6): addr:fe:16:3e:1c:ca:6a config: 0 state: 0 current: 10MB-FD COPPER speed: 10 Mbps now, 0 Mbps max .. code-block:: console cookie=0x0, duration=184.992s, table=0, n_packets=175, n_bytes=15270, idle_age=15, priority=100,in_port=9 actions=load:0x3->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[], load:0x4->NXM_NX_REG6[],resubmit(,16) cookie=0x0, duration=191.687s, table=16, n_packets=175, n_bytes=15270, idle_age=15, priority=50,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=resubmit(,17) cookie=0x0, duration=191.687s, table=17, n_packets=2, n_bytes=684, idle_age=112, priority=90,udp,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a,nw_src=0.0.0.0, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=resubmit(,18) cookie=0x0, duration=191.687s, table=17, n_packets=146, n_bytes=12780, idle_age=20, priority=90,ip,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a,nw_src=203.0.113.103 actions=resubmit(,18) cookie=0x0, duration=191.687s, table=17, n_packets=17, n_bytes=1386, idle_age=92, priority=80,ipv6,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=191.687s, table=17, n_packets=0, n_bytes=0, idle_age=191, priority=80,ip,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=191.687s, table=18, n_packets=10, n_bytes=420, idle_age=15, priority=90,arp,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a,arp_spa=203.0.113.103, arp_sha=fa:16:3e:1c:ca:6a actions=resubmit(,19) cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0, idle_age=191, priority=80,icmp6,reg6=0x4,metadata=0x4, icmp_type=136,icmp_code=0 actions=drop cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0, idle_age=191, priority=80,icmp6,reg6=0x4,metadata=0x4, icmp_type=135,icmp_code=0 actions=drop cookie=0x0, duration=191.687s, table=18, n_packets=0, n_bytes=0, idle_age=191, priority=80,arp,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.033s, table=19, n_packets=0, n_bytes=0, idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=75.032s, table=19, n_packets=0, n_bytes=0, idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=75.032s, table=19, n_packets=34, n_bytes=5170, idle_age=49, priority=100,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=75.032s, table=19, n_packets=0, n_bytes=0, idle_age=75, priority=100,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=13, n_bytes=1118, idle_age=49, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x4 actions=resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x4 actions=resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=65535,ct_state=+inv+trk,metadata=0x4 actions=drop cookie=0x0, duration=75.033s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=2002,ct_state=+new+trk,ipv6,reg6=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=15, n_bytes=1816, idle_age=49, priority=2002,ct_state=+new+trk,ip,reg6=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=2002,udp,reg6=0x4,metadata=0x4, nw_dst=203.0.113.0/24,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=2002,udp,reg6=0x4,metadata=0x4, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=75.033s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=2001,ip,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=2001,ipv6,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.032s, table=22, n_packets=6, n_bytes=2236, idle_age=54, priority=1,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=75.032s, table=22, n_packets=0, n_bytes=0, idle_age=75, priority=1,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=67.064s, table=25, n_packets=0, n_bytes=0, idle_age=67, priority=50,arp,metadata=0x4,arp_tpa=203.0.113.103, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:1c:ca:6a,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ed63dca->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a81268->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=75.033s, table=26, n_packets=19, n_bytes=2776, idle_age=44, priority=50,metadata=0x4,dl_dst=fa:16:3e:1c:ca:6a actions=load:0x4->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=221031.310s, table=33, n_packets=72, n_bytes=6292, idle_age=20, hard_age=65534, priority=100,reg7=0x3,metadata=0x4 actions=load:0x1->NXM_NX_REG7[],resubmit(,33) cookie=0x0, duration=184.992s, table=34, n_packets=2, n_bytes=684, idle_age=112, priority=100,reg6=0x4,reg7=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.034s, table=49, n_packets=0, n_bytes=0, idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=75.033s, table=49, n_packets=0, n_bytes=0, idle_age=75, priority=110,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=75.033s, table=49, n_packets=38, n_bytes=6566, idle_age=49, priority=100,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=75.033s, table=49, n_packets=0, n_bytes=0, idle_age=75, priority=100,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x4 actions=resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=13, n_bytes=1118, idle_age=49, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x4 actions=resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=65535,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=65535,ct_state=+inv+trk,metadata=0x4 actions=drop cookie=0x0, duration=75.034s, table=52, n_packets=4, n_bytes=1538, idle_age=54, priority=2002,udp,reg7=0x4,metadata=0x4, nw_src=203.0.113.0/24,tp_src=67,tp_dst=68 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=2002,ct_state=+new+trk,ip,reg7=0x4, metadata=0x4,nw_src=203.0.113.103 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=2.041s, table=52, n_packets=0, n_bytes=0, idle_age=2, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4, metadata=0x4,ipv6_src=::2/::2 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=2, n_bytes=698, idle_age=54, priority=2001,ip,reg7=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.033s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=2001,ipv6,reg7=0x4,metadata=0x4 actions=drop cookie=0x0, duration=75.034s, table=52, n_packets=0, n_bytes=0, idle_age=75, priority=1,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=75.033s, table=52, n_packets=19, n_bytes=3212, idle_age=49, priority=1,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=75.034s, table=54, n_packets=17, n_bytes=2656, idle_age=49, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=203.0.113.103 actions=resubmit(,55) cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0, idle_age=75, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=255.255.255.255 actions=resubmit(,55) cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0, idle_age=75, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=224.0.0.0/4 actions=resubmit(,55) cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0, idle_age=75, priority=80,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=75.033s, table=54, n_packets=0, n_bytes=0, idle_age=75, priority=80,ipv6,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=75.033s, table=55, n_packets=21, n_bytes=2860, idle_age=44, priority=50,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=resubmit(,64) cookie=0x0, duration=184.992s, table=64, n_packets=166, n_bytes=15088, idle_age=15, priority=100,reg7=0x4,metadata=0x4 actions=output:9 * For each compute node that only contains a DHCP agent on the subnet, OVN creates the following flows: .. code-block:: console cookie=0x0, duration=189.649s, table=16, n_packets=0, n_bytes=0, idle_age=189, priority=50,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=resubmit(,17) cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0, idle_age=189, priority=90,udp,reg6=0x4,metadata=0x4, dl_src=fa:14:3e:1c:ca:6a,nw_src=0.0.0.0, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=resubmit(,18) cookie=0x0, duration=189.649s, table=17, n_packets=0, n_bytes=0, idle_age=189, priority=90,ip,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a,nw_src=203.0.113.103 actions=resubmit(,18) cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0, idle_age=189, priority=80,ipv6,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=189.650s, table=17, n_packets=0, n_bytes=0, idle_age=189, priority=80,ip,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0, idle_age=189, priority=90,arp,reg6=0x4,metadata=0x4, dl_src=fa:16:3e:1c:ca:6a,arp_spa=203.0.113.103, arp_sha=fa:16:3e:1c:ca:6a actions=resubmit(,19) cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0, idle_age=189, priority=80,icmp6,reg6=0x4,metadata=0x4, icmp_type=136,icmp_code=0 actions=drop cookie=0x0, duration=189.650s, table=18, n_packets=0, n_bytes=0, idle_age=189, priority=80,icmp6,reg6=0x4,metadata=0x4, icmp_type=135,icmp_code=0 actions=drop cookie=0x0, duration=189.649s, table=18, n_packets=0, n_bytes=0, idle_age=189, priority=80,arp,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=79.452s, table=19, n_packets=0, n_bytes=0, idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=79.450s, table=19, n_packets=0, n_bytes=0, idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=79.452s, table=19, n_packets=0, n_bytes=0, idle_age=79, priority=100,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=79.450s, table=19, n_packets=18, n_bytes=3164, idle_age=57, priority=100,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=79.450s, table=22, n_packets=6, n_bytes=510, idle_age=57, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x4 actions=resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x4 actions=resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=65535,ct_state=+inv+trk,metadata=0x4 actions=drop cookie=0x0, duration=79.453s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2002,ct_state=+new+trk,ipv6,reg6=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2002,ct_state=+new+trk,ip,reg6=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2002,udp,reg6=0x4,metadata=0x4, nw_dst=203.0.113.0/24,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2002,udp,reg6=0x4,metadata=0x4, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=79.452s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2001,ip,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=2001,ipv6,reg6=0x4,metadata=0x4 actions=drop cookie=0x0, duration=79.450s, table=22, n_packets=0, n_bytes=0, idle_age=79, priority=1,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=79.450s, table=22, n_packets=12, n_bytes=2654, idle_age=57, priority=1,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=71.483s, table=25, n_packets=0, n_bytes=0, idle_age=71, priority=50,arp,metadata=0x4,arp_tpa=203.0.113.103, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:1c:ca:6a,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ed63dca->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a81268->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=79.450s, table=26, n_packets=8, n_bytes=1258, idle_age=57, priority=50,metadata=0x4,dl_dst=fa:16:3e:1c:ca:6a actions=load:0x4->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=182.952s, table=33, n_packets=74, n_bytes=7040, idle_age=18, priority=100,reg7=0x4,metadata=0x4 actions=load:0x1->NXM_NX_REG7[],resubmit(,33) cookie=0x0, duration=79.451s, table=49, n_packets=0, n_bytes=0, idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=79.450s, table=49, n_packets=0, n_bytes=0, idle_age=79, priority=110,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=79.450s, table=49, n_packets=18, n_bytes=3164, idle_age=57, priority=100,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=79.450s, table=49, n_packets=0, n_bytes=0, idle_age=79, priority=100,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x4 actions=resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=6, n_bytes=510, idle_age=57, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x4 actions=resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=135, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=65535,icmp6,metadata=0x4,icmp_type=136, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=65535,ct_state=+inv+trk,metadata=0x4 actions=drop cookie=0x0, duration=79.452s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=2002,udp,reg7=0x4,metadata=0x4, nw_src=203.0.113.0/24,tp_src=67,tp_dst=68 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=2002,ct_state=+new+trk,ip,reg7=0x4, metadata=0x4,nw_src=203.0.113.103 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=71.483s, table=52, n_packets=0, n_bytes=0, idle_age=71, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=2001,ipv6,reg7=0x4,metadata=0x4 actions=drop cookie=0x0, duration=79.450s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=2001,ip,reg7=0x4,metadata=0x4 actions=drop cookie=0x0, duration=79.453s, table=52, n_packets=0, n_bytes=0, idle_age=79, priority=1,ipv6,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=79.450s, table=52, n_packets=12, n_bytes=2654, idle_age=57, priority=1,ip,metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0, idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=255.255.255.255 actions=resubmit(,55) cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0, idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=203.0.113.103 actions=resubmit(,55) cookie=0x0, duration=79.452s, table=54, n_packets=0, n_bytes=0, idle_age=79, priority=90,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a,nw_dst=224.0.0.0/4 actions=resubmit(,55) cookie=0x0, duration=79.450s, table=54, n_packets=0, n_bytes=0, idle_age=79, priority=80,ip,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=79.450s, table=54, n_packets=0, n_bytes=0, idle_age=79, priority=80,ipv6,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=drop cookie=0x0, duration=79.450s, table=55, n_packets=0, n_bytes=0, idle_age=79, priority=50,reg7=0x4,metadata=0x4, dl_dst=fa:16:3e:1c:ca:6a actions=resubmit(,64) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/launch-instance-selfservice-network.rst0000644000175000017500000011517200000000000032330 0ustar00coreycorey00000000000000.. _refarch-launch-instance-selfservice-network: Launch an instance on a self-service network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To launch an instance on a self-service network, follow the same steps as :ref:`launching an instance on the provider network `, but using the UUID of the self-service network. OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations when launching an instance. #. The OVN mechanism driver creates a logical port for the instance. .. code-block:: console _uuid : c754d1d2-a7fb-4dd0-b14c-c076962b06b9 addresses : ["fa:16:3e:15:7d:13 192.168.1.5"] enabled : true external_ids : {"neutron:port_name"=""} name : "eaf36f62-5629-4ec4-b8b9-5e562c40e7ae" options : {} parent_name : [] port_security : ["fa:16:3e:15:7d:13 192.168.1.5"] tag : [] type : "" up : true #. The OVN mechanism driver updates the appropriate Address Set object(s) with the address of the new instance: .. code-block:: console _uuid : d0becdea-e1ed-48c4-9afc-e278cdef4629 addresses : ["192.168.1.5", "203.0.113.103"] external_ids : {"neutron:security_group_name"=default} name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" #. The OVN mechanism driver creates ACL entries for this port and any other ports in the project. .. code-block:: console _uuid : 00ecbe8f-c82a-4e18-b688-af2a1941cff7 action : allow direction : from-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 192.168.1.0/24) && udp && udp.src == 68 && udp.dst == 67" priority : 1002 _uuid : 2bf5b7ed-008e-4676-bba5-71fe58897886 action : allow-related direction : from-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4" priority : 1002 _uuid : 330b4e27-074f-446a-849b-9ab0018b65c5 action : allow direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == 192.168.1.0/24 && udp && udp.src == 67 && udp.dst == 68" priority : 1002 _uuid : 683f52f2-4be6-4bd7-a195-6c782daa7840 action : allow-related direction : from-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6" priority : 1002 _uuid : 8160f0b4-b344-43d5-bbd4-ca63a71aa4fc action : drop direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip" priority : 1001 _uuid : 97c6b8ca-14ea-4812-8571-95d640a88f4f action : allow-related direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6" priority : 1002 _uuid : 9cfd8eb5-5daa-422e-8fe8-bd22fd7fa826 action : allow-related direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == 0.0.0.0/0 && icmp4" priority : 1002 _uuid : f72c2431-7a64-4cea-b84a-118bdc761be2 action : drop direction : from-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "inport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip" priority : 1001 _uuid : f94133fa-ed27-4d5e-a806-0d528e539cb3 action : allow-related direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip4 && ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" priority : 1002 _uuid : 7f7a92ff-b7e9-49b0-8be0-0dc388035df3 action : allow-related direction : to-lport external_ids : {"neutron:lport"="eaf36f62-5629-4ec4-b8b9-5e562c40e7ae"} log : false match : "outport == \"eaf36f62-5629-4ec4-b8b9-5e562c40e7ae\" && ip6 && ip6.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" priority : 1002 #. The OVN mechanism driver updates the logical switch information with the UUIDs of these objects. .. code-block:: console _uuid : 15e2c80b-1461-4003-9869-80416cd97de5 acls : [00ecbe8f-c82a-4e18-b688-af2a1941cff7, 2bf5b7ed-008e-4676-bba5-71fe58897886, 330b4e27-074f-446a-849b-9ab0018b65c5, 683f52f2-4be6-4bd7-a195-6c782daa7840, 7f7a92ff-b7e9-49b0-8be0-0dc388035df3, 8160f0b4-b344-43d5-bbd4-ca63a71aa4fc, 97c6b8ca-14ea-4812-8571-95d640a88f4f, 9cfd8eb5-5daa-422e-8fe8-bd22fd7fa826, f72c2431-7a64-4cea-b84a-118bdc761be2, f94133fa-ed27-4d5e-a806-0d528e539cb3] external_ids : {"neutron:network_name"="selfservice"} name : "neutron-6cc81cae-8c5f-4c09-aaf2-35d0aa95c084" ports : [2df457a5-f71c-4a2f-b9ab-d9e488653872, 67c2737c-b380-492b-883b-438048b48e56, c754d1d2-a7fb-4dd0-b14c-c076962b06b9] #. With address sets, it is no longer necessary for the OVN mechanism driver to create separate ACLs for other instances in the project. That is handled automagically via address sets. #. The OVN northbound service translates the updated Address Set object(s) into updated Address Set objects in the OVN southbound database: .. code-block:: console _uuid : 2addbee3-7084-4fff-8f7b-15b1efebdaff addresses : ["192.168.1.5", "203.0.113.103"] name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" #. The OVN northbound service adds a Port Binding for the new Logical Switch Port object: .. code-block:: console _uuid : 7a558e7b-ed7a-424f-a0cf-ab67d2d832d7 chassis : b67d6da9-0222-4ab1-a852-ab2607610bf8 datapath : 3f6e16b5-a03a-48e5-9b60-7b7a0396c425 logical_port : "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" mac : ["fa:16:3e:b6:91:70 192.168.1.5"] options : {} parent_port : [] tag : [] tunnel_key : 3 type : "" #. The OVN northbound service updates the flooding multicast group for the logical datapath with the new port binding: .. code-block:: console _uuid : c08d0102-c414-4a47-98d9-dd3fa9f9901c datapath : 0b214af6-8910-489c-926a-fd0ed16a8251 name : _MC_flood ports : [3e463ca0-951c-46fd-b6cf-05392fa3aa1f, 794a6f03-7941-41ed-b1c6-0e00c1e18da0, fa7b294d-2a62-45ae-8de3-a41c002de6de] tunnel_key : 65535 #. The OVN northbound service adds Logical Flows based on the updated Address Set, ACL and Logical_Switch_Port objects: .. code-block:: console Datapath: 3f6e16b5-a03a-48e5-9b60-7b7a0396c425 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.src == {fa:16:3e:b6:a3:54}), action=(next;) table= 1( ls_in_port_sec_ip), priority= 90, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.src == fa:16:3e:b6:a3:54 && ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 && udp.src == 68 && udp.dst == 67), action=(next;) table= 1( ls_in_port_sec_ip), priority= 90, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.src == fa:16:3e:b6:a3:54 && ip4.src == {192.168.1.5}), action=(next;) table= 1( ls_in_port_sec_ip), priority= 80, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.src == fa:16:3e:b6:a3:54 && ip), action=(drop;) table= 2( ls_in_port_sec_nd), priority= 90, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.src == fa:16:3e:b6:a3:54 && arp.sha == fa:16:3e:b6:a3:54 && (arp.spa == 192.168.1.5 )), action=(next;) table= 2( ls_in_port_sec_nd), priority= 80, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && (arp || nd)), action=(drop;) table= 3( ls_in_pre_acl), priority= 110, match=(nd), action=(next;) table= 3( ls_in_pre_acl), priority= 100, match=(ip), action=(reg0[0] = 1; next;) table= 6( ls_in_acl), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv), action=(next;) table= 6( ls_in_acl), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv), action=(next;) table= 6( ls_in_acl), priority=65535, match=(ct.inv), action=(drop;) table= 6( ls_in_acl), priority=65535, match=(nd), action=(next;) table= 6( ls_in_acl), priority= 2002, match=(ct.new && (inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip6)), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2002, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 && (ip4.dst == 255.255.255.255 || ip4.dst == 192.168.1.0/24) && udp && udp.src == 68 && udp.dst == 67), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2002, match=(ct.new && (inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4)), action=(reg0[1] = 1; next;) table= 6( ls_in_acl), priority= 2001, match=(inport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip), action=(drop;) table= 6( ls_in_acl), priority= 1, match=(ip), action=(reg0[1] = 1; next;) table= 9( ls_in_arp_nd_rsp), priority= 50, match=(arp.tpa == 192.168.1.5 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:b6:a3:54; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:b6:a3:54; arp.tpa = arp.spa; arp.spa = 192.168.1.5; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:b6:a3:54), action=(outport = "e9cb7857-4cb1-4e91-aae5-165a7ab5b387"; output;) Datapath: 3f6e16b5-a03a-48e5-9b60-7b7a0396c425 Pipeline: egress table= 1( ls_out_pre_acl), priority= 110, match=(nd), action=(next;) table= 1( ls_out_pre_acl), priority= 100, match=(ip), action=(reg0[0] = 1; next;) table= 4( ls_out_acl), priority=65535, match=(nd), action=(next;) table= 4( ls_out_acl), priority=65535, match=(!ct.est && ct.rel && !ct.new && !ct.inv), action=(next;) table= 4( ls_out_acl), priority=65535, match=(ct.est && !ct.rel && !ct.new && !ct.inv), action=(next;) table= 4( ls_out_acl), priority=65535, match=(ct.inv), action=(drop;) table= 4( ls_out_acl), priority= 2002, match=(ct.new && (outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip6 && ip6.src == $as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc)), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2002, match=(ct.new && (outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 && ip4.src == $as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc)), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2002, match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip4 && ip4.src == 192.168.1.0/24 && udp && udp.src == 67 && udp.dst == 68), action=(reg0[1] = 1; next;) table= 4( ls_out_acl), priority= 2001, match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && ip), action=(drop;) table= 4( ls_out_acl), priority= 1, match=(ip), action=(reg0[1] = 1; next;) table= 6( ls_out_port_sec_ip), priority= 90, match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.dst == fa:16:3e:b6:a3:54 && ip4.dst == {255.255.255.255, 224.0.0.0/4, 192.168.1.5}), action=(next;) table= 6( ls_out_port_sec_ip), priority= 80, match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.dst == fa:16:3e:b6:a3:54 && ip), action=(drop;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "e9cb7857-4cb1-4e91-aae5-165a7ab5b387" && eth.dst == {fa:16:3e:b6:a3:54}), action=(output;) #. The OVN controller service on each compute node translates these objects into flows on the integration bridge ``br-int``. Exact flows depend on whether the compute node containing the instance also contains a DHCP agent on the subnet. * On the compute node containing the instance, the Compute service creates a port that connects the instance to the integration bridge and OVN creates the following flows: .. code-block:: console # ovs-ofctl show br-int OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045 n_tables:254, n_buffers:256 capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst 12(tapeaf36f62-56): addr:fe:16:3e:15:7d:13 config: 0 state: 0 current: 10MB-FD COPPER .. code-block:: console cookie=0x0, duration=179.460s, table=0, n_packets=122, n_bytes=10556, idle_age=1, priority=100,in_port=12 actions=load:0x4->NXM_NX_REG5[],load:0x5->OXM_OF_METADATA[], load:0x3->NXM_NX_REG6[],resubmit(,16) cookie=0x0, duration=187.408s, table=16, n_packets=122, n_bytes=10556, idle_age=1, priority=50,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=resubmit(,17) cookie=0x0, duration=187.408s, table=17, n_packets=2, n_bytes=684, idle_age=84, priority=90,udp,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,nw_src=0.0.0.0,nw_dst=255.255.255.255, tp_src=68,tp_dst=67 actions=resubmit(,18) cookie=0x0, duration=187.408s, table=17, n_packets=98, n_bytes=8276, idle_age=1, priority=90,ip,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,nw_src=192.168.1.5 actions=resubmit(,18) cookie=0x0, duration=187.408s, table=17, n_packets=17, n_bytes=1386, idle_age=55, priority=80,ipv6,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=187.408s, table=17, n_packets=0, n_bytes=0, idle_age=187, priority=80,ip,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=187.408s, table=18, n_packets=5, n_bytes=210, idle_age=10, priority=90,arp,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,arp_spa=192.168.1.5, arp_sha=fa:16:3e:15:7d:13 actions=resubmit(,19) cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0, idle_age=187, priority=80,icmp6,reg6=0x3,metadata=0x5, icmp_type=135,icmp_code=0 actions=drop cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0, idle_age=187, priority=80,icmp6,reg6=0x3,metadata=0x5, icmp_type=136,icmp_code=0 actions=drop cookie=0x0, duration=187.408s, table=18, n_packets=0, n_bytes=0, idle_age=187, priority=80,arp,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=33, n_bytes=4081, idle_age=0, priority=100,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=100,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=47.068s, table=22, n_packets=15, n_bytes=1392, idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x5 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x5 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=16, n_bytes=1922, idle_age=2, priority=2002,ct_state=+new+trk,ip,reg6=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5, nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.069s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ipv6,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ip,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=2, n_bytes=767, idle_age=27, priority=1,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=1,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=179.457s, table=25, n_packets=2, n_bytes=84, idle_age=33, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.5, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:15:7d:13,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163e157d13->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80105->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=187.408s, table=26, n_packets=50, n_bytes=4806, idle_age=1, priority=50,metadata=0x5,dl_dst=fa:16:3e:15:7d:13 actions=load:0x3->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=469.575s, table=33, n_packets=74, n_bytes=7040, idle_age=305, priority=100,reg7=0x4,metadata=0x4 actions=load:0x1->NXM_NX_REG7[],resubmit(,33) cookie=0x0, duration=179.460s, table=34, n_packets=2, n_bytes=684, idle_age=84, priority=100,reg6=0x3,reg7=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.069s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=34, n_bytes=4455, idle_age=0, priority=100,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=100,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=47.069s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5 actions=drop cookie=0x0, duration=47.069s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=22, n_bytes=2000, idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x5 actions=resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x5 actions=resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=2002,ct_state=+new+trk,ip,reg7=0x3, metadata=0x5,nw_src=192.168.1.5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=2002,ct_state=+new+trk,ip,reg7=0x3, metadata=0x5,nw_src=203.0.113.103 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=3, n_bytes=1141, idle_age=27, priority=2002,udp,reg7=0x3,metadata=0x5, nw_src=192.168.1.0/24,tp_src=67,tp_dst=68 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=39.497s, table=52, n_packets=0, n_bytes=0, idle_age=39, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ip,reg7=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ipv6,reg7=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=52, n_packets=9, n_bytes=1314, idle_age=2, priority=1,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=47.068s, table=52, n_packets=0, n_bytes=0, idle_age=47, priority=1,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=47.068s, table=54, n_packets=23, n_bytes=2945, idle_age=0, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=192.168.1.11 actions=resubmit(,55) cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0, idle_age=47, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=255.255.255.255 actions=resubmit(,55) cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0, idle_age=47, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=224.0.0.0/4 actions=resubmit(,55) cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0, idle_age=47, priority=80,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=47.068s, table=54, n_packets=0, n_bytes=0, idle_age=47, priority=80,ipv6,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=47.068s, table=55, n_packets=25, n_bytes=3029, idle_age=0, priority=50,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:15:7d:13 actions=resubmit(,64) cookie=0x0, duration=179.460s, table=64, n_packets=116, n_bytes=10623, idle_age=1, priority=100,reg7=0x3,metadata=0x5 actions=output:12 * For each compute node that only contains a DHCP agent on the subnet, OVN creates the following flows: .. code-block:: console cookie=0x0, duration=192.587s, table=16, n_packets=0, n_bytes=0, idle_age=192, priority=50,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=resubmit(,17) cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0, idle_age=192, priority=90,ip,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,nw_src=192.168.1.5 actions=resubmit(,18) cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0, idle_age=192, priority=90,udp,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,nw_src=0.0.0.0, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=resubmit(,18) cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0, idle_age=192, priority=80,ipv6,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=192.587s, table=17, n_packets=0, n_bytes=0, idle_age=192, priority=80,ip,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0, idle_age=192, priority=90,arp,reg6=0x3,metadata=0x5, dl_src=fa:16:3e:15:7d:13,arp_spa=192.168.1.5, arp_sha=fa:16:3e:15:7d:13 actions=resubmit(,19) cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0, idle_age=192, priority=80,arp,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0, idle_age=192, priority=80,icmp6,reg6=0x3,metadata=0x5, icmp_type=135,icmp_code=0 actions=drop cookie=0x0, duration=192.587s, table=18, n_packets=0, n_bytes=0, idle_age=192, priority=80,icmp6,reg6=0x3,metadata=0x5, icmp_type=136,icmp_code=0 actions=drop cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=33, n_bytes=4081, idle_age=0, priority=100,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=47.068s, table=19, n_packets=0, n_bytes=0, idle_age=47, priority=100,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=47.068s, table=22, n_packets=15, n_bytes=1392, idle_age=0, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x5 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x5 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=65535,ct_state=+inv+trk,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=16, n_bytes=1922, idle_age=2, priority=2002,ct_state=+new+trk,ip,reg6=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2002,udp,reg6=0x3,metadata=0x5, nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.069s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ipv6,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=2001,ip,reg6=0x3,metadata=0x5 actions=drop cookie=0x0, duration=47.068s, table=22, n_packets=2, n_bytes=767, idle_age=27, priority=1,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=47.068s, table=22, n_packets=0, n_bytes=0, idle_age=47, priority=1,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=179.457s, table=25, n_packets=2, n_bytes=84, idle_age=33, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.5, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:15:7d:13,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163e157d13->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80105->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=192.587s, table=26, n_packets=61, n_bytes=5607, idle_age=6, priority=50,metadata=0x5,dl_dst=fa:16:3e:15:7d:13 actions=load:0x3->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=184.640s, table=32, n_packets=61, n_bytes=5607, idle_age=6, priority=100,reg7=0x3,metadata=0x5 actions=load:0x5->NXM_NX_TUN_ID[0..23], set_field:0x3/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:4 cookie=0x0, duration=47.069s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=135, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=110,icmp6,metadata=0x5,icmp_type=136, icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=34, n_bytes=4455, idle_age=0, priority=100,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=47.068s, table=49, n_packets=0, n_bytes=0, idle_age=47, priority=100,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=65535,ct_state=+inv+trk, metadata=0x5 actions=drop cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=65535,ct_state=-new-est+rel-inv+trk, metadata=0x5 actions=resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=27, n_bytes=2316, idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk, metadata=0x5 actions=resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2002,ct_state=+new+trk,icmp,reg7=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3, metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2002,udp,reg7=0x3,metadata=0x5, nw_src=192.168.1.0/24,tp_src=67,tp_dst=68 actions=load:0x1->NXM_NX_REG0[1],resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2002,ct_state=+new+trk,ip,reg7=0x3, metadata=0x5,nw_src=203.0.113.103 actions=load:0x1->NXM_NX_REG0[1],resubmit(,50) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2001,ip,reg7=0x3,metadata=0x5 actions=drop cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=2001,ipv6,reg7=0x3,metadata=0x5 actions=drop cookie=0x0, duration=192.587s, table=52, n_packets=25, n_bytes=2604, idle_age=6, priority=1,ip,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=192.587s, table=52, n_packets=0, n_bytes=0, idle_age=192, priority=1,ipv6,metadata=0x5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0, idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=224.0.0.0/4 actions=resubmit(,55) cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0, idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=255.255.255.255 actions=resubmit(,55) cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0, idle_age=192, priority=90,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13,nw_dst=192.168.1.5 actions=resubmit(,55) cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0, idle_age=192, priority=80,ipv6,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=192.587s, table=54, n_packets=0, n_bytes=0, idle_age=192, priority=80,ip,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13 actions=drop cookie=0x0, duration=192.587s, table=55, n_packets=0, n_bytes=0, idle_age=192, priority=50,reg7=0x3,metadata=0x5, dl_dst=fa:16:3e:15:7d:13 actions=resubmit(,64) * For each compute node that contains neither the instance nor a DHCP agent on the subnet, OVN creates the following flows: .. code-block:: console cookie=0x0, duration=189.763s, table=52, n_packets=0, n_bytes=0, idle_age=189, priority=2002,ct_state=+new+trk,ipv6,reg7=0x4, metadata=0x4 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=189.763s, table=52, n_packets=0, n_bytes=0, idle_age=189, priority=2002,ct_state=+new+trk,ip,reg7=0x4, metadata=0x4,nw_src=192.168.1.5 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/provider-networks.rst0000644000175000017500000010013700000000000026754 0ustar00coreycorey00000000000000.. _refarch-provider-networks: Provider networks ----------------- A provider (external) network bridges instances to physical network infrastructure that provides layer-3 services. In most cases, provider networks implement layer-2 segmentation using VLAN IDs. A provider network maps to a provider bridge on each compute node that supports launching instances on the provider network. You can create more than one provider bridge, each one requiring a unique name and underlying physical network interface to prevent switching loops. Provider networks and bridges can use arbitrary names, but each mapping must reference valid provider network and bridge names. Each provider bridge can contain one ``flat`` (untagged) network and up to the maximum number of ``vlan`` (tagged) networks that the physical network infrastructure supports, typically around 4000. Creating a provider network involves several commands at the host, OVS, and Networking service levels that yield a series of operations at the OVN level to create the virtual network components. The following example creates a ``flat`` provider network ``provider`` using the provider bridge ``br-provider`` and binds a subnet to it. Create a provider network ~~~~~~~~~~~~~~~~~~~~~~~~~ #. On each compute node, create the provider bridge, map the provider network to it, and add the underlying physical or logical (typically a bond) network interface to it. .. code-block:: console # ovs-vsctl --may-exist add-br br-provider -- set bridge br-provider \ protocols=OpenFlow13 # ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=provider:br-provider # ovs-vsctl --may-exist add-port br-provider INTERFACE_NAME Replace ``INTERFACE_NAME`` with the name of the underlying network interface. .. note:: These commands provide no output if successful. #. On the controller node, source the administrative project credentials. #. On the controller node, to enable this chassis to host gateway routers for external connectivity, set ovn-cms-options to enable-chassis-as-gw. .. code-block:: console # ovs-vsctl set Open_vSwitch . external-ids:ovn-cms-options="enable-chassis-as-gw" .. note:: This command provide no output if successful. #. On the controller node, create the provider network in the Networking service. In this case, instances and routers in other projects can use the network. .. code-block:: console $ openstack network create --external --share \ --provider-physical-network provider --provider-network-type flat \ provider +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-06-15 15:50:37+00:00 | | description | | | id | 0243277b-4aa8-46d8-9e10-5c9ad5e01521 | | ipv4_address_scope | None | | ipv6_address_scope | None | | is_default | False | | mtu | 1500 | | name | provider | | project_id | b1ebf33664df402693f729090cfab861 | | provider:network_type | flat | | provider:physical_network | provider | | provider:segmentation_id | None | | qos_policy_id | None | | router:external | External | | shared | True | | status | ACTIVE | | subnets | 32a61337-c5a3-448a-a1e7-c11d6f062c21 | | tags | [] | | updated_at | 2016-06-15 15:50:37+00:00 | +---------------------------+--------------------------------------+ .. note:: The value of ``--provider-physical-network`` must refer to the provider network name in the mapping. OVN operations ^^^^^^^^^^^^^^ .. todo: I don't like going this deep with headers, so a future patch will probably break this content into multiple files. The OVN mechanism driver and OVN perform the following operations during creation of a provider network. #. The mechanism driver translates the network into a logical switch in the OVN northbound database. .. code-block:: console _uuid : 98edf19f-2dbc-4182-af9b-79cafa4794b6 acls : [] external_ids : {"neutron:network_name"=provider} load_balancer : [] name : "neutron-e4abf6df-f8cf-49fd-85d4-3ea399f4d645" ports : [92ee7c2f-cd22-4cac-a9d9-68a374dc7b17] .. note:: The ``neutron:network_name`` field in ``external_ids`` contains the network name and ``name`` contains the network UUID. #. In addition, because the provider network is handled by a separate bridge, the following logical port is created in the OVN northbound database. .. code-block:: console _uuid : 92ee7c2f-cd22-4cac-a9d9-68a374dc7b17 addresses : [unknown] enabled : [] external_ids : {} name : "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645" options : {network_name=provider} parent_name : [] port_security : [] tag : [] type : localnet up : false #. The OVN northbound service translates these objects into datapath bindings, port bindings, and the appropriate multicast groups in the OVN southbound database. * Datapath bindings .. code-block:: console _uuid : f1f0981f-a206-4fac-b3a1-dc2030c9909f external_ids : {logical-switch="98edf19f-2dbc-4182-af9b-79cafa4794b6"} tunnel_key : 109 * Port bindings .. code-block:: console _uuid : 8427506e-46b5-41e5-a71b-a94a6859e773 chassis : [] datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f logical_port : "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645" mac : [unknown] options : {network_name=provider} parent_port : [] tag : [] tunnel_key : 1 type : localnet * Logical flows .. code-block:: console Datapath: f1f0981f-a206-4fac-b3a1-dc2030c9909f Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 100, match=(eth.src[40]), action=(drop;) table= 0( ls_in_port_sec_l2), priority= 100, match=(vlan.present), action=(drop;) table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"), action=(next;) table= 1( ls_in_port_sec_ip), priority= 0, match=(1), action=(next;) table= 2( ls_in_port_sec_nd), priority= 0, match=(1), action=(next;) table= 3( ls_in_pre_acl), priority= 0, match=(1), action=(next;) table= 4( ls_in_pre_lb), priority= 0, match=(1), action=(next;) table= 5( ls_in_pre_stateful), priority= 100, match=(reg0[0] == 1), action=(ct_next;) table= 5( ls_in_pre_stateful), priority= 0, match=(1), action=(next;) table= 6( ls_in_acl), priority= 0, match=(1), action=(next;) table= 7( ls_in_lb), priority= 0, match=(1), action=(next;) table= 8( ls_in_stateful), priority= 100, match=(reg0[1] == 1), action=(ct_commit; next;) table= 8( ls_in_stateful), priority= 100, match=(reg0[2] == 1), action=(ct_lb;) table= 8( ls_in_stateful), priority= 0, match=(1), action=(next;) table= 9( ls_in_arp_rsp), priority= 100, match=(inport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"), action=(next;) table= 9( ls_in_arp_rsp), priority= 0, match=(1), action=(next;) table=10( ls_in_l2_lkup), priority= 100, match=(eth.mcast), action=(outport = "_MC_flood"; output;) table=10( ls_in_l2_lkup), priority= 0, match=(1), action=(outport = "_MC_unknown"; output;) Datapath: f1f0981f-a206-4fac-b3a1-dc2030c9909f Pipeline: egress table= 0( ls_out_pre_lb), priority= 0, match=(1), action=(next;) table= 1( ls_out_pre_acl), priority= 0, match=(1), action=(next;) table= 2(ls_out_pre_stateful), priority= 100, match=(reg0[0] == 1), action=(ct_next;) table= 2(ls_out_pre_stateful), priority= 0, match=(1), action=(next;) table= 3( ls_out_lb), priority= 0, match=(1), action=(next;) table= 4( ls_out_acl), priority= 0, match=(1), action=(next;) table= 5( ls_out_stateful), priority= 100, match=(reg0[1] == 1), action=(ct_commit; next;) table= 5( ls_out_stateful), priority= 100, match=(reg0[2] == 1), action=(ct_lb;) table= 5( ls_out_stateful), priority= 0, match=(1), action=(next;) table= 6( ls_out_port_sec_ip), priority= 0, match=(1), action=(next;) table= 7( ls_out_port_sec_l2), priority= 100, match=(eth.mcast), action=(output;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "provnet-e4abf6df-f8cf-49fd-85d4-3ea399f4d645"), action=(output;) * Multicast groups .. code-block:: console _uuid : 0102f08d-c658-4d0a-a18a-ec8adcaddf4f datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f name : _MC_unknown ports : [8427506e-46b5-41e5-a71b-a94a6859e773] tunnel_key : 65534 _uuid : fbc38e51-ac71-4c57-a405-e6066e4c101e datapath : f1f0981f-a206-4fac-b3a1-dc2030c9909f name : _MC_flood ports : [8427506e-46b5-41e5-a71b-a94a6859e773] tunnel_key : 65535 Create a subnet on the provider network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The provider network requires at least one subnet that contains the IP address allocation available for instances, default gateway IP address, and metadata such as name resolution. #. On the controller node, create a subnet bound to the provider network ``provider``. .. code-block:: console $ openstack subnet create --network provider --subnet-range \ 203.0.113.0/24 --allocation-pool start=203.0.113.101,end=203.0.113.250 \ --dns-nameserver 8.8.8.8,8.8.4.4 --gateway 203.0.113.1 provider-v4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 203.0.113.101-203.0.113.250 | | cidr | 203.0.113.0/24 | | created_at | 2016-06-15 15:50:45+00:00 | | description | | | dns_nameservers | 8.8.8.8, 8.8.4.4 | | enable_dhcp | True | | gateway_ip | 203.0.113.1 | | host_routes | | | id | 32a61337-c5a3-448a-a1e7-c11d6f062c21 | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | provider-v4 | | network_id | 0243277b-4aa8-46d8-9e10-5c9ad5e01521 | | project_id | b1ebf33664df402693f729090cfab861 | | subnetpool_id | None | | updated_at | 2016-06-15 15:50:45+00:00 | +-------------------+--------------------------------------+ If using DHCP to manage instance IP addresses, adding a subnet causes a series of operations in the Networking service and OVN. * The Networking service schedules the network on appropriate number of DHCP agents. The example environment contains three DHCP agents. * Each DHCP agent spawns a network namespace with a ``dnsmasq`` process using an IP address from the subnet allocation. * The OVN mechanism driver creates a logical switch port object in the OVN northbound database for each ``dnsmasq`` process. OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations during creation of a subnet on the provider network. #. If the subnet uses DHCP for IP address management, create logical ports ports for each DHCP agent serving the subnet and bind them to the logical switch. In this example, the subnet contains two DHCP agents. .. code-block:: console _uuid : 5e144ab9-3e08-4910-b936-869bbbf254c8 addresses : ["fa:16:3e:57:f9:ca 203.0.113.101"] enabled : true external_ids : {"neutron:port_name"=""} name : "6ab052c2-7b75-4463-b34f-fd3426f61787" options : {} parent_name : [] port_security : [] tag : [] type : "" up : true _uuid : 38cf8b52-47c4-4e93-be8d-06bf71f6a7c9 addresses : ["fa:16:3e:e0:eb:6d 203.0.113.102"] enabled : true external_ids : {"neutron:port_name"=""} name : "94aee636-2394-48bc-b407-8224ab6bb1ab" options : {} parent_name : [] port_security : [] tag : [] type : "" up : true _uuid : 924500c4-8580-4d5f-a7ad-8769f6e58ff5 acls : [] external_ids : {"neutron:network_name"=provider} load_balancer : [] name : "neutron-670efade-7cd0-4d87-8a04-27f366eb8941" ports : [38cf8b52-47c4-4e93-be8d-06bf71f6a7c9, 5e144ab9-3e08-4910-b936-869bbbf254c8, a576b812-9c3e-4cfb-9752-5d8500b3adf9] #. The OVN northbound service creates port bindings for these logical ports and adds them to the appropriate multicast group. * Port bindings .. code-block:: console _uuid : 030024f4-61c3-4807-859b-07727447c427 chassis : fc5ab9e7-bc28-40e8-ad52-2949358cc088 datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 logical_port : "6ab052c2-7b75-4463-b34f-fd3426f61787" mac : ["fa:16:3e:57:f9:ca 203.0.113.101"] options : {} parent_port : [] tag : [] tunnel_key : 2 type : "" _uuid : cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46 chassis : 6a9d0619-8818-41e6-abef-2f3d9a597c03 datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 logical_port : "94aee636-2394-48bc-b407-8224ab6bb1ab" mac : ["fa:16:3e:e0:eb:6d 203.0.113.102"] options : {} parent_port : [] tag : [] tunnel_key : 3 type : "" * Multicast groups .. code-block:: console _uuid : 39b32ccd-fa49-4046-9527-13318842461e datapath : bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 name : _MC_flood ports : [030024f4-61c3-4807-859b-07727447c427, 904c3108-234d-41c0-b93c-116b7e352a75, cc5bcd19-bcae-4e29-8cee-3ec8a8a75d46] tunnel_key : 65535 #. The OVN northbound service translates the logical ports into additional logical flows in the OVN southbound database. .. code-block:: console Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "94aee636-2394-48bc-b407-8224ab6bb1ab"), action=(next;) table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "6ab052c2-7b75-4463-b34f-fd3426f61787"), action=(next;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 203.0.113.101 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:57:f9:ca; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:57:f9:ca; arp.tpa = arp.spa; arp.spa = 203.0.113.101; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 203.0.113.102 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:e0:eb:6d; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:e0:eb:6d; arp.tpa = arp.spa; arp.spa = 203.0.113.102; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:57:f9:ca), action=(outport = "6ab052c2-7b75-4463-b34f-fd3426f61787"; output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:e0:eb:6d), action=(outport = "94aee636-2394-48bc-b407-8224ab6bb1ab"; output;) Datapath: bd0ab2b3-4cf4-4289-9529-ef430f6a89e6 Pipeline: egress table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "6ab052c2-7b75-4463-b34f-fd3426f61787"), action=(output;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "94aee636-2394-48bc-b407-8224ab6bb1ab"), action=(output;) #. For each compute node without a DHCP agent on the subnet: * The OVN controller service translates the logical flows into flows on the integration bridge ``br-int``. .. code-block:: console cookie=0x0, duration=22.303s, table=32, n_packets=0, n_bytes=0, idle_age=22, priority=100,reg7=0xffff,metadata=0x4 actions=load:0x4->NXM_NX_TUN_ID[0..23], set_field:0xffff/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30], output:5,output:4,resubmit(,33) #. For each compute node with a DHCP agent on a subnet: * Creation of a DHCP network namespace adds two virtual switch ports. The first port connects the DHCP agent with ``dnsmasq`` process to the integration bridge and the second port patches the integration bridge to the provider bridge ``br-provider``. .. code-block:: console # ovs-ofctl show br-int OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045 n_tables:254, n_buffers:256 capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst 7(tap6ab052c2-7b): addr:00:00:00:00:10:7f config: PORT_DOWN state: LINK_DOWN speed: 0 Mbps now, 0 Mbps max 8(patch-br-int-to): addr:6a:8c:30:3f:d7:dd config: 0 state: 0 speed: 0 Mbps now, 0 Mbps max # ovs-ofctl -O OpenFlow13 show br-provider OFPT_FEATURES_REPLY (OF1.3) (xid=0x2): dpid:0000080027137c4a n_tables:254, n_buffers:256 capabilities: FLOW_STATS TABLE_STATS PORT_STATS GROUP_STATS QUEUE_STATS OFPST_PORT_DESC reply (OF1.3) (xid=0x3): 1(patch-provnet-0): addr:fa:42:c5:3f:d7:6f config: 0 state: 0 speed: 0 Mbps now, 0 Mbps max * The OVN controller service translates these logical flows into flows on the integration bridge. .. code-block:: console cookie=0x0, duration=17.731s, table=0, n_packets=3, n_bytes=258, idle_age=16, priority=100,in_port=7 actions=load:0x2->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[], load:0x2->NXM_NX_REG6[],resubmit(,16) cookie=0x0, duration=17.730s, table=0, n_packets=15, n_bytes=954, idle_age=2, priority=100,in_port=8,vlan_tci=0x0000/0x1000 actions=load:0x1->NXM_NX_REG5[],load:0x4->OXM_OF_METADATA[], load:0x1->NXM_NX_REG6[],resubmit(,16) cookie=0x0, duration=17.730s, table=0, n_packets=0, n_bytes=0, idle_age=17, priority=100,in_port=8,dl_vlan=0 actions=strip_vlan,load:0x1->NXM_NX_REG5[], load:0x4->OXM_OF_METADATA[],load:0x1->NXM_NX_REG6[], resubmit(,16) cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0, idle_age=17, priority=100,metadata=0x4, dl_src=01:00:00:00:00:00/01:00:00:00:00:00 actions=drop cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0, idle_age=17, priority=100,metadata=0x4,vlan_tci=0x1000/0x1000 actions=drop cookie=0x0, duration=17.732s, table=16, n_packets=3, n_bytes=258, idle_age=16, priority=50,reg6=0x2,metadata=0x4 actions=resubmit(,17) cookie=0x0, duration=17.732s, table=16, n_packets=0, n_bytes=0, idle_age=17, priority=50,reg6=0x3,metadata=0x4 actions=resubmit(,17) cookie=0x0, duration=17.732s, table=16, n_packets=15, n_bytes=954, idle_age=2, priority=50,reg6=0x1,metadata=0x4 actions=resubmit(,17) cookie=0x0, duration=21.714s, table=17, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,18) cookie=0x0, duration=21.714s, table=18, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,19) cookie=0x0, duration=21.714s, table=19, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,20) cookie=0x0, duration=21.714s, table=20, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,21) cookie=0x0, duration=21.714s, table=21, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x1/0x1,metadata=0x4 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=21.714s, table=21, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x1/0x1,metadata=0x4 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=21.714s, table=21, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,22) cookie=0x0, duration=21.714s, table=22, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,23) cookie=0x0, duration=21.714s, table=23, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,24) cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x4/0x4,metadata=0x4 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x4/0x4,metadata=0x4 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x2/0x2,metadata=0x4 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=21.714s, table=24, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x2/0x2,metadata=0x4 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=21.714s, table=24, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,25) cookie=0x0, duration=21.714s, table=25, n_packets=15, n_bytes=954, idle_age=6, priority=100,reg6=0x1,metadata=0x4 actions=resubmit(,26) cookie=0x0, duration=21.714s, table=25, n_packets=0, n_bytes=0, idle_age=21, priority=50,arp,metadata=0x4, arp_tpa=203.0.113.101,arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:f9:5d:f3,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ef95df3->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a81264->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=21.714s, table=25, n_packets=0, n_bytes=0, idle_age=21, priority=50,arp,metadata=0x4, arp_tpa=203.0.113.102,arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:f0:a5:9f, load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ef0a59f->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a81265->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=21.714s, table=25, n_packets=3, n_bytes=258, idle_age=20, priority=0,metadata=0x4 actions=resubmit(,26) cookie=0x0, duration=21.714s, table=26, n_packets=18, n_bytes=1212, idle_age=6, priority=100,metadata=0x4, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=load:0xffff->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0, idle_age=21, priority=50,metadata=0x4,dl_dst=fa:16:3e:f0:a5:9f actions=load:0x3->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0, idle_age=21, priority=50,metadata=0x4,dl_dst=fa:16:3e:f9:5d:f3 actions=load:0x2->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=21.714s, table=26, n_packets=0, n_bytes=0, idle_age=21, priority=0,metadata=0x4 actions=load:0xfffe->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=17.731s, table=33, n_packets=0, n_bytes=0, idle_age=17, priority=100,reg7=0x2,metadata=0x4 actions=load:0x2->NXM_NX_REG5[],resubmit(,34) cookie=0x0, duration=118.126s, table=33, n_packets=0, n_bytes=0, idle_age=118, hard_age=17, priority=100,reg7=0xfffe,metadata=0x4 actions=load:0x1->NXM_NX_REG5[],load:0x1->NXM_NX_REG7[], resubmit(,34),load:0xfffe->NXM_NX_REG7[] cookie=0x0, duration=118.126s, table=33, n_packets=18, n_bytes=1212, idle_age=2, hard_age=17, priority=100,reg7=0xffff,metadata=0x4 actions=load:0x2->NXM_NX_REG5[],load:0x2->NXM_NX_REG7[], resubmit(,34),load:0x1->NXM_NX_REG5[],load:0x1->NXM_NX_REG7[], resubmit(,34),load:0xffff->NXM_NX_REG7[] cookie=0x0, duration=17.730s, table=33, n_packets=0, n_bytes=0, idle_age=17, priority=100,reg7=0x1,metadata=0x4 actions=load:0x1->NXM_NX_REG5[],resubmit(,34) cookie=0x0, duration=17.697s, table=33, n_packets=0, n_bytes=0, idle_age=17, priority=100,reg7=0x3,metadata=0x4 actions=load:0x1->NXM_NX_REG7[],resubmit(,33) cookie=0x0, duration=17.731s, table=34, n_packets=3, n_bytes=258, idle_age=16, priority=100,reg6=0x2,reg7=0x2,metadata=0x4 actions=drop cookie=0x0, duration=17.730s, table=34, n_packets=15, n_bytes=954, idle_age=2, priority=100,reg6=0x1,reg7=0x1,metadata=0x4 actions=drop cookie=0x0, duration=21.714s, table=48, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,49) cookie=0x0, duration=21.714s, table=49, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,50) cookie=0x0, duration=21.714s, table=50, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x1/0x1,metadata=0x4 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=21.714s, table=50, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x1/0x1,metadata=0x4 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=21.714s, table=50, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,51) cookie=0x0, duration=21.714s, table=51, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,52) cookie=0x0, duration=21.714s, table=52, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,53) cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x4/0x4,metadata=0x4 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x4/0x4,metadata=0x4 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0, idle_age=21, priority=100,ipv6,reg0=0x2/0x2,metadata=0x4 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=21.714s, table=53, n_packets=0, n_bytes=0, idle_age=21, priority=100,ip,reg0=0x2/0x2,metadata=0x4 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=21.714s, table=53, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,54) cookie=0x0, duration=21.714s, table=54, n_packets=18, n_bytes=1212, idle_age=6, priority=0,metadata=0x4 actions=resubmit(,55) cookie=0x0, duration=21.714s, table=55, n_packets=18, n_bytes=1212, idle_age=6, priority=100,metadata=0x4, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,64) cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0, idle_age=21, priority=50,reg7=0x3,metadata=0x4 actions=resubmit(,64) cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0, idle_age=21, priority=50,reg7=0x2,metadata=0x4 actions=resubmit(,64) cookie=0x0, duration=21.714s, table=55, n_packets=0, n_bytes=0, idle_age=21, priority=50,reg7=0x1,metadata=0x4 actions=resubmit(,64) cookie=0x0, duration=21.712s, table=64, n_packets=15, n_bytes=954, idle_age=6, priority=100,reg7=0x3,metadata=0x4 actions=output:7 cookie=0x0, duration=21.711s, table=64, n_packets=3, n_bytes=258, idle_age=20, priority=100,reg7=0x1,metadata=0x4 actions=output:8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/refarch.rst0000644000175000017500000002315600000000000024667 0ustar00coreycorey00000000000000.. _refarch-refarch: ====================== Reference architecture ====================== The reference architecture defines the minimum environment necessary to deploy OpenStack with Open Virtual Network (OVN) integration for the Networking service in production with sufficient expectations of scale and performance. For evaluation purposes, you can deploy this environment using the :doc:`Installation Guide ` or `Vagrant `_. Any scaling or performance evaluations should use bare metal instead of virtual machines. Layout ------ The reference architecture includes a minimum of four nodes. The controller node contains the following components that provide enough functionality to launch basic instances: * One network interface for management * Identity service * Image service * Networking management with ML2 mechanism driver for OVN (control plane) * Compute management (control plane) The database node contains the following components: * One network interface for management * OVN northbound service (``ovn-northd``) * Open vSwitch (OVS) database service (``ovsdb-server``) for the OVN northbound database (``ovnnb.db``) * Open vSwitch (OVS) database service (``ovsdb-server``) for the OVN southbound database (``ovnsb.db``) .. note:: For functional evaluation only, you can combine the controller and database nodes. The two compute nodes contain the following components: * Two or three network interfaces for management, overlay networks, and optionally provider networks * Compute management (hypervisor) * Hypervisor (KVM) * OVN controller service (``ovn-controller``) * OVS data plane service (``ovs-vswitchd``) * OVS database service (``ovsdb-server``) with OVS local configuration (``conf.db``) database * OVN metadata agent (``ovn-metadata-agent``) The gateway nodes contain the following components: * Three network interfaces for management, overlay networks and provider networks. * OVN controller service (``ovn-controller``) * OVS data plane service (``ovs-vswitchd``) * OVS database service (``ovsdb-server``) with OVS local configuration (``conf.db``) database .. note:: Each OVN metadata agent provides metadata service locally on the compute nodes in a lightweight way. Each network being accessed by the instances of the compute node will have a corresponding metadata ovn-metadata-$net_uuid namespace, and inside an haproxy will funnel the requests to the ovn-metadata-agent over a unix socket. Such namespace can be very helpful for debug purposes to access the local instances on the compute node. If you login as root on such compute node you can execute: ip netns ovn-metadata-$net_uuid exec ssh user@my.instance.ip.address Hardware layout ~~~~~~~~~~~~~~~ .. image:: figures/ovn-hw.png :alt: Hardware layout :align: center Service layout ~~~~~~~~~~~~~~ .. image:: figures/ovn-services.png :alt: Service layout :align: center Networking service with OVN integration --------------------------------------- The reference architecture deploys the Networking service with OVN integration as described in the following scenarios: .. image:: figures/ovn-architecture1.png :alt: Architecture for Networking service with OVN integration :align: center With ovn driver, all the E/W traffic which traverses a virtual router is completely distributed, going from compute to compute node without passing through the gateway nodes. N/S traffic that needs SNAT (without floating IPs) will always pass through the centralized gateway nodes, although, as soon as you have more than one gateway node ovn driver will make use of the HA capabilities of ovn. Centralized Floating IPs ~~~~~~~~~~~~~~~~~~~~~~~~ In this architecture, all the N/S router traffic (snat and floating IPs) goes through the gateway nodes. The compute nodes don't need connectivity to the external network, although it could be provided if we wanted to have direct connectivity to such network from some instances. For external connectivity, gateway nodes have to set ``ovn-cms-options`` with ``enable-chassis-as-gw`` in Open_vSwitch table's external_ids column, for example: .. code-block:: console $ ovs-vsctl set open . external-ids:ovn-cms-options="enable-chassis-as-gw" Distributed Floating IPs (DVR) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In this architecture, the floating IP N/S traffic flows directly from/to the compute nodes through the specific provider network bridge. In this case compute nodes need connectivity to the external network. Each compute node contains the following network components: .. image:: figures/ovn-compute1.png :alt: Compute node network components :align: center .. note:: The Networking service creates a unique network namespace for each virtual network that enables the metadata service. Several external connections can be optionally created via provider bridges. Those can be used for direct vm connectivity to the specific networks or the use of distributed floating ips. .. _refarch_database-access: Accessing OVN database content ------------------------------ OVN stores configuration data in a collection of OVS database tables. The following commands show the contents of the most common database tables in the northbound and southbound databases. The example database output in this section uses these commands with various output filters. .. code-block:: console $ ovn-nbctl list Logical_Switch $ ovn-nbctl list Logical_Switch_Port $ ovn-nbctl list ACL $ ovn-nbctl list Address_Set $ ovn-nbctl list Logical_Router $ ovn-nbctl list Logical_Router_Port $ ovn-nbctl list Gateway_Chassis $ ovn-sbctl list Chassis $ ovn-sbctl list Encap $ ovn-nbctl list Address_Set $ ovn-sbctl lflow-list $ ovn-sbctl list Multicast_Group $ ovn-sbctl list Datapath_Binding $ ovn-sbctl list Port_Binding $ ovn-sbctl list MAC_Binding $ ovn-sbctl list Gateway_Chassis .. note:: By default, you must run these commands from the node containing the OVN databases. .. _refarch-adding-compute-node: Adding a compute node --------------------- When you add a compute node to the environment, the OVN controller service on it connects to the OVN southbound database and registers the node as a chassis. .. code-block:: console _uuid : 9be8639d-1d0b-4e3d-9070-03a655073871 encaps : [2fcefdf4-a5e7-43ed-b7b2-62039cc7e32e] external_ids : {ovn-bridge-mappings=""} hostname : "compute1" name : "410ee302-850b-4277-8610-fa675d620cb7" vtep_logical_switches: [] The ``encaps`` field value refers to tunnel endpoint information for the compute node. .. code-block:: console _uuid : 2fcefdf4-a5e7-43ed-b7b2-62039cc7e32e ip : "10.0.0.32" options : {} type : geneve Security Groups/Rules --------------------- Each security group will map to 2 Address_Sets in the OVN NB and SB tables, one for ipv4 and another for ipv6, which will be used to hold ip addresses for the ports that belong to the security group, so that rules with remote_group_id can be efficiently applied. .. todo: add block with openstack security group rule example OVN operations ~~~~~~~~~~~~~~ #. Creating a security group will cause the OVN mechanism driver to create 2 new entries in the Address Set table of the northbound DB: .. code-block:: console _uuid : 9a9d01bd-4afc-4d12-853a-cd21b547911d addresses : [] external_ids : {"neutron:security_group_name"=default} name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" _uuid : 27a91327-636e-4125-99f0-6f2937a3b6d8 addresses : [] external_ids : {"neutron:security_group_name"=default} name : "as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc" In the above entries, the address set name include the protocol (IPv4 or IPv6, written as ip4 or ip6) and the UUID of the Openstack security group, dashes translated to underscores. #. In turn, these new entries will be translated by the OVN northd daemon into entries in the southbound DB: .. code-block:: console _uuid : 886d7b3a-e460-470f-8af2-7c7d88ce45d2 addresses : [] name : "as_ip4_90a78a43_b549_4bee_8822_21fcccab58dc" _uuid : 355ddcba-941d-4f1c-b823-dc811cec59ca addresses : [] name : "as_ip6_90a78a43_b549_4bee_8822_21fcccab58dc" Networks -------- .. toctree:: :maxdepth: 1 provider-networks selfservice-networks Routers ------- .. toctree:: :maxdepth: 1 routers .. todo: Explain L3HA modes available starting at OVS 2.8 Instances --------- Launching an instance causes the same series of operations regardless of the network. The following example uses the ``provider`` provider network, ``cirros`` image, ``m1.tiny`` flavor, ``default`` security group, and ``mykey`` key. .. toctree:: :maxdepth: 1 launch-instance-provider-network launch-instance-selfservice-network .. todo: Add north-south when OVN gains support for it. Traffic flows ------------- East-west for instances on the same provider network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ East-west for instances on different provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ East-west for instances on the same self-service network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ East-west for instances on different self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/routers.rst0000644000175000017500000012235700000000000024763 0ustar00coreycorey00000000000000.. _refarch-routers: Routers ------- Routers pass traffic between layer-3 networks. Create a router ~~~~~~~~~~~~~~~ #. On the controller node, source the credentials for a regular (non-privileged) project. The following example uses the ``demo`` project. #. On the controller node, create router in the Networking service. .. code-block:: console $ openstack router create router +-----------------------+--------------------------------------+ | Field | Value | +-----------------------+--------------------------------------+ | admin_state_up | UP | | description | | | external_gateway_info | null | | headers | | | id | 24addfcd-5506-405d-a59f-003644c3d16a | | name | router | | project_id | b1ebf33664df402693f729090cfab861 | | routes | | | status | ACTIVE | +-----------------------+--------------------------------------+ OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations when creating a router. #. The OVN mechanism driver translates the router into a logical router object in the OVN northbound database. .. code-block:: console _uuid : 1c2e340d-dac9-496b-9e86-1065f9dab752 default_gw : [] enabled : [] external_ids : {"neutron:router_name"="router"} name : "neutron-a24fd760-1a99-4eec-9f02-24bb284ff708" ports : [] static_routes : [] #. The OVN northbound service translates this object into logical flows and datapath bindings in the OVN southbound database. * Datapath bindings .. code-block:: console _uuid : 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa external_ids : {logical-router="1c2e340d-dac9-496b-9e86-1065f9dab752"} tunnel_key : 3 * Logical flows .. code-block:: console Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress table= 0( lr_in_admission), priority= 100, match=(vlan.present || eth.src[40]), action=(drop;) table= 1( lr_in_ip_input), priority= 100, match=(ip4.mcast || ip4.src == 255.255.255.255 || ip4.src == 127.0.0.0/8 || ip4.dst == 127.0.0.0/8 || ip4.src == 0.0.0.0/8 || ip4.dst == 0.0.0.0/8), action=(drop;) table= 1( lr_in_ip_input), priority= 50, match=(ip4.mcast), action=(drop;) table= 1( lr_in_ip_input), priority= 50, match=(eth.bcast), action=(drop;) table= 1( lr_in_ip_input), priority= 30, match=(ip4 && ip.ttl == {0, 1}), action=(drop;) table= 1( lr_in_ip_input), priority= 0, match=(1), action=(next;) table= 2( lr_in_unsnat), priority= 0, match=(1), action=(next;) table= 3( lr_in_dnat), priority= 0, match=(1), action=(next;) table= 5( lr_in_arp_resolve), priority= 0, match=(1), action=(get_arp(outport, reg0); next;) table= 6( lr_in_arp_request), priority= 100, match=(eth.dst == 00:00:00:00:00:00), action=(arp { eth.dst = ff:ff:ff:ff:ff:ff; arp.spa = reg1; arp.op = 1; output; };) table= 6( lr_in_arp_request), priority= 0, match=(1), action=(output;) Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: egress table= 0( lr_out_snat), priority= 0, match=(1), action=(next;) #. The OVN controller service on each compute node translates these objects into flows on the integration bridge ``br-int``. .. code-block:: console # ovs-ofctl dump-flows br-int cookie=0x0, duration=6.402s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x5,vlan_tci=0x1000/0x1000 actions=drop cookie=0x0, duration=6.402s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x5, dl_src=01:00:00:00:00:00/01:00:00:00:00:00 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_dst=127.0.0.0/8 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_dst=0.0.0.0/8 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_dst=224.0.0.0/4 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=50,ip,metadata=0x5,nw_dst=224.0.0.0/4 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_src=255.255.255.255 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_src=127.0.0.0/8 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x5,nw_src=0.0.0.0/8 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=90,arp,metadata=0x5,arp_op=2 actions=push:NXM_NX_REG0[],push:NXM_OF_ETH_SRC[], push:NXM_NX_ARP_SHA[],push:NXM_OF_ARP_SPA[], pop:NXM_NX_REG0[],pop:NXM_OF_ETH_SRC[], controller(userdata=00.00.00.01.00.00.00.00), pop:NXM_OF_ETH_SRC[],pop:NXM_NX_REG0[] cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=50,metadata=0x5,dl_dst=ff:ff:ff:ff:ff:ff actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=30,ip,metadata=0x5,nw_ttl=0 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=30,ip,metadata=0x5,nw_ttl=1 actions=drop cookie=0x0, duration=6.402s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x5 actions=resubmit(,18) cookie=0x0, duration=6.402s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x5 actions=resubmit(,19) cookie=0x0, duration=6.402s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x5 actions=resubmit(,20) cookie=0x0, duration=6.402s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x5 actions=resubmit(,32) cookie=0x0, duration=6.402s, table=48, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x5 actions=resubmit(,49) Attach a self-service network to the router ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Self-service networks, particularly subnets, must interface with a router to enable connectivity with other self-service and provider networks. #. On the controller node, add the self-service network subnet ``selfservice-v4`` to the router ``router``. .. code-block:: console $ openstack router add subnet router selfservice-v4 .. note:: This command provides no output. OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations when adding a subnet as an interface on a router. #. The OVN mechanism driver translates the operation into logical objects and devices in the OVN northbound database and performs a series of operations on them. * Create a logical port. .. code-block:: console _uuid : 4c9e70b1-fff0-4d0d-af8e-42d3896eb76f addresses : ["fa:16:3e:0c:55:62 192.168.1.1"] enabled : true external_ids : {"neutron:port_name"=""} name : "5b72d278-5b16-44a6-9aa0-9e513a429506" options : {router-port="lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"} parent_name : [] port_security : [] tag : [] type : router up : false * Add the logical port to logical switch. .. code-block:: console _uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37 acls : [] external_ids : {"neutron:network_name"="selfservice"} name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5" ports : [1ed7c28b-dc69-42b8-bed6-46477bb8b539, 4c9e70b1-fff0-4d0d-af8e-42d3896eb76f, ae10a5e0-db25-4108-b06a-d2d5c127d9c4] * Create a logical router port object. .. code-block:: console _uuid : f60ccb93-7b3d-4713-922c-37104b7055dc enabled : [] external_ids : {} mac : "fa:16:3e:0c:55:62" name : "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506" network : "192.168.1.1/24" peer : [] * Add the logical router port to the logical router object. .. code-block:: console _uuid : 1c2e340d-dac9-496b-9e86-1065f9dab752 default_gw : [] enabled : [] external_ids : {"neutron:router_name"="router"} name : "neutron-a24fd760-1a99-4eec-9f02-24bb284ff708" ports : [f60ccb93-7b3d-4713-922c-37104b7055dc] static_routes : [] #. The OVN northbound service translates these objects into logical flows, datapath bindings, and the appropriate multicast groups in the OVN southbound database. * Logical flows in the logical router datapath .. code-block:: console Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress table= 0( lr_in_admission), priority= 50, match=((eth.mcast || eth.dst == fa:16:3e:0c:55:62) && inport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"), action=(next;) table= 1( lr_in_ip_input), priority= 100, match=(ip4.src == {192.168.1.1, 192.168.1.255}), action=(drop;) table= 1( lr_in_ip_input), priority= 90, match=(ip4.dst == 192.168.1.1 && icmp4.type == 8 && icmp4.code == 0), action=(ip4.dst = ip4.src; ip4.src = 192.168.1.1; ip.ttl = 255; icmp4.type = 0; inport = ""; /* Allow sending out inport. */ next; ) table= 1( lr_in_ip_input), priority= 90, match=(inport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506" && arp.tpa == 192.168.1.1 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:0c:55:62; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:0c:55:62; arp.tpa = arp.spa; arp.spa = 192.168.1.1; outport = "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"; inport = ""; /* Allow sending out inport. */ output;) table= 1( lr_in_ip_input), priority= 60, match=(ip4.dst == 192.168.1.1), action=(drop;) table= 4( lr_in_ip_routing), priority= 24, match=(ip4.dst == 192.168.1.0/255.255.255.0), action=(ip.ttl--; reg0 = ip4.dst; reg1 = 192.168.1.1; eth.src = fa:16:3e:0c:55:62; outport = "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"; next;) Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: egress table= 1( lr_out_delivery), priority= 100, match=(outport == "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506), action=(output;) * Logical flows in the logical switch datapath .. code-block:: console Datapath: 611d35e8-b1e1-442c-bc07-7c6192ad6216 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "5b72d278-5b16-44a6-9aa0-9e513a429506"), action=(next;) table= 3( ls_in_pre_acl), priority= 110, match=(ip && inport == "5b72d278-5b16-44a6-9aa0-9e513a429506"), action=(next;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 192.168.1.1 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:0c:55:62; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:0c:55:62; arp.tpa = arp.spa; arp.spa = 192.168.1.1; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:fa:76:8f), action=(outport = "f112b99a-8ccc-4c52-8733-7593fa0966ea"; output;) Datapath: 611d35e8-b1e1-442c-bc07-7c6192ad6216 Pipeline: egress table= 1( ls_out_pre_acl), priority= 110, match=(ip && outport == "f112b99a-8ccc-4c52-8733-7593fa0966ea"), action=(next;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "f112b99a-8ccc-4c52-8733-7593fa0966ea"), action=(output;) * Port bindings .. code-block:: console _uuid : 0f86395b-a0d8-40fd-b22c-4c9e238a7880 chassis : [] datapath : 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa logical_port : "lrp-5b72d278-5b16-44a6-9aa0-9e513a429506" mac : [] options : {peer="5b72d278-5b16-44a6-9aa0-9e513a429506"} parent_port : [] tag : [] tunnel_key : 1 type : patch _uuid : 8d95ab8c-c2ea-4231-9729-7ecbfc2cd676 chassis : [] datapath : 4aef86e4-e54a-4c83-bb27-d65c670d4b51 logical_port : "5b72d278-5b16-44a6-9aa0-9e513a429506" mac : ["fa:16:3e:0c:55:62 192.168.1.1"] options : {peer="lrp-5b72d278-5b16-44a6-9aa0-9e513a429506"} parent_port : [] tag : [] tunnel_key : 3 type : patch * Multicast groups .. code-block:: console _uuid : 4a6191aa-d8ac-4e93-8306-b0d8fbbe4e35 datapath : 4aef86e4-e54a-4c83-bb27-d65c670d4b51 name : _MC_flood ports : [8d95ab8c-c2ea-4231-9729-7ecbfc2cd676, be71fac3-9f04-41c9-9951-f3f7f1fa1ec5, da5c1269-90b7-4df2-8d76-d4575754b02d] tunnel_key : 65535 In addition, if the self-service network contains ports with IP addresses (typically instances or DHCP servers), OVN creates a logical flow for each port, similar to the following example. .. code-block:: console Datapath: 4a7485c6-a1ef-46a5-b57c-5ddb6ac15aaa Pipeline: ingress table= 5( lr_in_arp_resolve), priority= 100, match=(outport == "lrp-f112b99a-8ccc-4c52-8733-7593fa0966ea" && reg0 == 192.168.1.11), action=(eth.dst = fa:16:3e:b6:91:70; next;) #. On each compute node, the OVN controller service creates patch ports, similar to the following example. .. code-block:: console 7(patch-f112b99a-): addr:4e:01:91:2a:73:66 config: 0 state: 0 speed: 0 Mbps now, 0 Mbps max 8(patch-lrp-f112b): addr:be:9d:7b:31:bb:87 config: 0 state: 0 speed: 0 Mbps now, 0 Mbps max #. On all compute nodes, the OVN controller service creates the following additional flows: .. code-block:: console cookie=0x0, duration=6.667s, table=0, n_packets=0, n_bytes=0, idle_age=6, priority=100,in_port=8 actions=load:0x9->OXM_OF_METADATA[],load:0x1->NXM_NX_REG6[], resubmit(,16) cookie=0x0, duration=6.667s, table=0, n_packets=0, n_bytes=0, idle_age=6, priority=100,in_port=7 actions=load:0x7->OXM_OF_METADATA[],load:0x4->NXM_NX_REG6[], resubmit(,16) cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x4,metadata=0x7 actions=resubmit(,17) cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x1,metadata=0x9, dl_dst=fa:16:3e:fa:76:8f actions=resubmit(,17) cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x1,metadata=0x9, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,17) cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x9,nw_src=192.168.1.1 actions=drop cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x9,nw_src=192.168.1.255 actions=drop cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=90,arp,reg6=0x1,metadata=0x9, arp_tpa=192.168.1.1,arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:fa:76:8f,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163efa768f->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80101->NXM_OF_ARP_SPA[],load:0x1->NXM_NX_REG7[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=90,icmp,metadata=0x9,nw_dst=192.168.1.1, icmp_type=8,icmp_code=0 actions=move:NXM_OF_IP_SRC[]->NXM_OF_IP_DST[],mod_nw_src:192.168.1.1, load:0xff->NXM_NX_IP_TTL[],load:0->NXM_OF_ICMP_TYPE[], load:0->NXM_NX_REG6[],load:0->NXM_OF_IN_PORT[],resubmit(,18) cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=60,ip,metadata=0x9,nw_dst=192.168.1.1 actions=drop cookie=0x0, duration=6.674s, table=20, n_packets=0, n_bytes=0, idle_age=6, priority=24,ip,metadata=0x9,nw_dst=192.168.1.0/24 actions=dec_ttl(),move:NXM_OF_IP_DST[]->NXM_NX_REG0[], load:0xc0a80101->NXM_NX_REG1[],mod_dl_src:fa:16:3e:fa:76:8f, load:0x1->NXM_NX_REG7[],resubmit(,21) cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg0=0xc0a80103,reg7=0x1,metadata=0x9 actions=mod_dl_dst:fa:16:3e:d5:00:02,resubmit(,22) cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg0=0xc0a80102,reg7=0x1,metadata=0x9 actions=mod_dl_dst:fa:16:3e:82:8b:0e,resubmit(,22) cookie=0x0, duration=6.673s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg0=0xc0a8010b,reg7=0x1,metadata=0x9 actions=mod_dl_dst:fa:16:3e:b6:91:70,resubmit(,22) cookie=0x0, duration=6.673s, table=25, n_packets=0, n_bytes=0, idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.1, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:fa:76:8f,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163efa768f->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80101->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0, idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:fa:76:8f actions=load:0x4->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=6.667s, table=33, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x4,metadata=0x7 actions=resubmit(,34) cookie=0x0, duration=6.667s, table=33, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x1,metadata=0x9 actions=resubmit(,34) cookie=0x0, duration=6.667s, table=34, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg6=0x4,reg7=0x4,metadata=0x7 actions=drop cookie=0x0, duration=6.667s, table=34, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg6=0x1,reg7=0x1,metadata=0x9 actions=drop cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=110,ipv6,reg7=0x4,metadata=0x7 actions=resubmit(,50) cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=110,ip,reg7=0x4,metadata=0x7 actions=resubmit(,50) cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x1,metadata=0x9 actions=resubmit(,64) cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg7=0x4,metadata=0x7 actions=resubmit(,64) cookie=0x0, duration=6.667s, table=64, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x4,metadata=0x7 actions=output:7 cookie=0x0, duration=6.667s, table=64, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x1,metadata=0x9 actions=output:8 #. On compute nodes not containing a port on the network, the OVN controller also creates additional flows. .. code-block:: console cookie=0x0, duration=6.673s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x7, dl_src=01:00:00:00:00:00/01:00:00:00:00:00 actions=drop cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x7,vlan_tci=0x1000/0x1000 actions=drop cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70 actions=resubmit(,17) cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x2,metadata=0x7 actions=resubmit(,17) cookie=0x0, duration=6.674s, table=16, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg6=0x1,metadata=0x7 actions=resubmit(,17) cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=90,ip,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70,nw_src=192.168.1.11 actions=resubmit(,18) cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=90,udp,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70,nw_src=0.0.0.0, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=resubmit(,18) cookie=0x0, duration=6.674s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=80,ip,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70 actions=drop cookie=0x0, duration=6.673s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=80,ipv6,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70 actions=drop cookie=0x0, duration=6.670s, table=17, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,18) cookie=0x0, duration=6.674s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=90,arp,reg6=0x3,metadata=0x7, dl_src=fa:16:3e:b6:91:70,arp_spa=192.168.1.11, arp_sha=fa:16:3e:b6:91:70 actions=resubmit(,19) cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=80,icmp6,reg6=0x3,metadata=0x7,icmp_type=135, icmp_code=0 actions=drop cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=80,icmp6,reg6=0x3,metadata=0x7,icmp_type=136, icmp_code=0 actions=drop cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=80,arp,reg6=0x3,metadata=0x7 actions=drop cookie=0x0, duration=6.673s, table=18, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,19) cookie=0x0, duration=6.673s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=136,icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=6.673s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=135,icmp_code=0 actions=resubmit(,20) cookie=0x0, duration=6.674s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x7 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=6.670s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,metadata=0x7 actions=load:0x1->NXM_NX_REG0[0],resubmit(,20) cookie=0x0, duration=6.674s, table=19, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,20) cookie=0x0, duration=6.673s, table=20, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,21) cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x1/0x1,metadata=0x7 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=6.670s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x1/0x1,metadata=0x7 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=6.674s, table=21, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,22) cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk,metadata=0x7 actions=resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=-new-est+rel-inv+trk,metadata=0x7 actions=resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=+inv+trk,metadata=0x7 actions=drop cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=135, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=136, icmp_code=0 actions=resubmit(,23) cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2002,udp,reg6=0x3,metadata=0x7, nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2002,udp,reg6=0x3,metadata=0x7, nw_dst=192.168.1.0/24,tp_src=68,tp_dst=67 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2002,ct_state=+new+trk,ipv6,reg6=0x3,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2002,ct_state=+new+trk,ip,reg6=0x3,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2001,ip,reg6=0x3,metadata=0x7 actions=drop cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=2001,ipv6,reg6=0x3,metadata=0x7 actions=drop cookie=0x0, duration=6.674s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=1,ipv6,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=1,ip,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,23) cookie=0x0, duration=6.673s, table=22, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,23) cookie=0x0, duration=6.673s, table=23, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,24) cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x2/0x2,metadata=0x7 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x2/0x2,metadata=0x7 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=6.673s, table=24, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x4/0x4,metadata=0x7 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=6.670s, table=24, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x4/0x4,metadata=0x7 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=6.674s, table=24, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,25) cookie=0x0, duration=6.673s, table=25, n_packets=0, n_bytes=0, idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.11, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:b6:91:70,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163eb69170->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a8010b->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=6.670s, table=25, n_packets=0, n_bytes=0, idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.3,arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:d5:00:02,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ed50002->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80103->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=6.670s, table=25, n_packets=0, n_bytes=0, idle_age=6, priority=50,arp,metadata=0x7,arp_tpa=192.168.1.2, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:82:8b:0e,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163e828b0e->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80102->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=6.674s, table=25, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,26) cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x7, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=load:0xffff->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=6.674s, table=26, n_packets=0, n_bytes=0, idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:d5:00:02 actions=load:0x2->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=6.673s, table=26, n_packets=0, n_bytes=0, idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:b6:91:70 actions=load:0x3->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=6.670s, table=26, n_packets=0, n_bytes=0, idle_age=6, priority=50,metadata=0x7,dl_dst=fa:16:3e:82:8b:0e actions=load:0x1->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=6.674s, table=32, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x3,metadata=0x7 actions=load:0x7->NXM_NX_TUN_ID[0..23], set_field:0x3/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:3 cookie=0x0, duration=6.673s, table=32, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x2,metadata=0x7 actions=load:0x7->NXM_NX_TUN_ID[0..23], set_field:0x2/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:3 cookie=0x0, duration=6.670s, table=32, n_packets=0, n_bytes=0, idle_age=6, priority=100,reg7=0x1,metadata=0x7 actions=load:0x7->NXM_NX_TUN_ID[0..23], set_field:0x1/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:5 cookie=0x0, duration=6.674s, table=48, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,49) cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=135,icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=110,icmp6,metadata=0x7,icmp_type=136,icmp_code=0 actions=resubmit(,50) cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,metadata=0x7 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=6.673s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,metadata=0x7 actions=load:0x1->NXM_NX_REG0[0],resubmit(,50) cookie=0x0, duration=6.674s, table=49, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,50) cookie=0x0, duration=6.674s, table=50, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x1/0x1,metadata=0x7 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=6.673s, table=50, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x1/0x1,metadata=0x7 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=6.673s, table=50, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,51) cookie=0x0, duration=6.670s, table=51, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,52) cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=+inv+trk,metadata=0x7 actions=drop cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=-new+est-rel-inv+trk,metadata=0x7 actions=resubmit(,53) cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=65535,ct_state=-new-est+rel-inv+trk,metadata=0x7 actions=resubmit(,53) cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=136, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=65535,icmp6,metadata=0x7,icmp_type=135, icmp_code=0 actions=resubmit(,53) cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2002,ct_state=+new+trk,ip,reg7=0x3,metadata=0x7, nw_src=192.168.1.11 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2002,ct_state=+new+trk,ip,reg7=0x3,metadata=0x7, nw_src=192.168.1.11 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2002,udp,reg7=0x3,metadata=0x7, nw_src=192.168.1.0/24,tp_src=67,tp_dst=68 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.670s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3, metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2001,ip,reg7=0x3,metadata=0x7 actions=drop cookie=0x0, duration=6.673s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=2001,ipv6,reg7=0x3,metadata=0x7 actions=drop cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=1,ip,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=1,ipv6,metadata=0x7 actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) cookie=0x0, duration=6.674s, table=52, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,53) cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x4/0x4,metadata=0x7 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x4/0x4,metadata=0x7 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=6.673s, table=53, n_packets=0, n_bytes=0, idle_age=6, priority=100,ipv6,reg0=0x2/0x2,metadata=0x7 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=6.673s, table=53, n_packets=0, n_bytes=0, idle_age=6, priority=100,ip,reg0=0x2/0x2,metadata=0x7 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=6.674s, table=53, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,54) cookie=0x0, duration=6.674s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70,nw_dst=255.255.255.255 actions=resubmit(,55) cookie=0x0, duration=6.673s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70,nw_dst=192.168.1.11 actions=resubmit(,55) cookie=0x0, duration=6.673s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=90,ip,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70,nw_dst=224.0.0.0/4 actions=resubmit(,55) cookie=0x0, duration=6.670s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=80,ip,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70 actions=drop cookie=0x0, duration=6.670s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=80,ipv6,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70 actions=drop cookie=0x0, duration=6.674s, table=54, n_packets=0, n_bytes=0, idle_age=6, priority=0,metadata=0x7 actions=resubmit(,55) cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0, idle_age=6, priority=100,metadata=0x7, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,64) cookie=0x0, duration=6.674s, table=55, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg7=0x3,metadata=0x7, dl_dst=fa:16:3e:b6:91:70 actions=resubmit(,64) cookie=0x0, duration=6.673s, table=55, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg7=0x1,metadata=0x7 actions=resubmit(,64) cookie=0x0, duration=6.670s, table=55, n_packets=0, n_bytes=0, idle_age=6, priority=50,reg7=0x2,metadata=0x7 actions=resubmit(,64) #. On compute nodes containing a port on the network, the OVN controller also creates an additional flow. .. code-block:: console cookie=0x0, duration=13.358s, table=52, n_packets=0, n_bytes=0, idle_age=13, priority=2002,ct_state=+new+trk,ipv6,reg7=0x3, metadata=0x7,ipv6_src=:: actions=load:0x1->NXM_NX_REG0[1],resubmit(,53) .. todo: Future commit Attach the router to a second self-service network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. todo: Add after NAT patches merge. Attach the router to an external network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/refarch/selfservice-networks.rst0000644000175000017500000006201300000000000027434 0ustar00coreycorey00000000000000.. _refarch-selfservice-networks: Self-service networks --------------------- A self-service (project) network includes only virtual components, thus enabling projects to manage them without additional configuration of the underlying physical network. The OVN mechanism driver supports Geneve and VLAN network types with a preference toward Geneve. Projects can choose to isolate self-service networks, connect two or more together via routers, or connect them to provider networks via routers with appropriate capabilities. Similar to provider networks, self-service networks can use arbitrary names. .. note:: Similar to provider networks, self-service VLAN networks map to a unique bridge on each compute node that supports launching instances on those networks. Self-service VLAN networks also require several commands at the host and OVS levels. The following example assumes use of Geneve self-service networks. Create a self-service network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a self-service network involves several commands at the Networking service level that yield a series of operations at the OVN level to create the virtual network components. The following example creates a Geneve self-service network and binds a subnet to it. The subnet uses DHCP to distribute IP addresses to instances. #. On the controller node, source the credentials for a regular (non-privileged) project. The following example uses the ``demo`` project. #. On the controller node, create a self-service network in the Networking service. .. code-block:: console $ openstack network create selfservice +-------------------------+--------------------------------------+ | Field | Value | +-------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | | | created_at | 2016-06-09T15:42:41 | | description | | | id | f49791f7-e653-4b43-99b1-0f5557c313e4 | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1442 | | name | selfservice | | port_security_enabled | True | | project_id | 1ef26f483b9d44e8ac0c97388d6cb609 | | router_external | Internal | | shared | False | | status | ACTIVE | | subnets | | | tags | [] | | updated_at | 2016-06-09T15:42:41 | +-------------------------+--------------------------------------+ OVN operations ^^^^^^^^^^^^^^ The OVN mechanism driver and OVN perform the following operations during creation of a self-service network. #. The mechanism driver translates the network into a logical switch in the OVN northbound database. .. code-block:: console uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37 acls : [] external_ids : {"neutron:network_name"="selfservice"} name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5" ports : [] #. The OVN northbound service translates this object into new datapath bindings and logical flows in the OVN southbound database. * Datapath bindings .. code-block:: console _uuid : 0b214af6-8910-489c-926a-fd0ed16a8251 external_ids : {logical-switch="15e2c80b-1461-4003-9869-80416cd97de5"} tunnel_key : 5 * Logical flows .. code-block:: console Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 100, match=(eth.src[40]), action=(drop;) table= 0( ls_in_port_sec_l2), priority= 100, match=(vlan.present), action=(drop;) table= 1( ls_in_port_sec_ip), priority= 0, match=(1), action=(next;) table= 2( ls_in_port_sec_nd), priority= 0, match=(1), action=(next;) table= 3( ls_in_pre_acl), priority= 0, match=(1), action=(next;) table= 4( ls_in_pre_lb), priority= 0, match=(1), action=(next;) table= 5( ls_in_pre_stateful), priority= 100, match=(reg0[0] == 1), action=(ct_next;) table= 5( ls_in_pre_stateful), priority= 0, match=(1), action=(next;) table= 6( ls_in_acl), priority= 0, match=(1), action=(next;) table= 7( ls_in_lb), priority= 0, match=(1), action=(next;) table= 8( ls_in_stateful), priority= 100, match=(reg0[2] == 1), action=(ct_lb;) table= 8( ls_in_stateful), priority= 100, match=(reg0[1] == 1), action=(ct_commit; next;) table= 8( ls_in_stateful), priority= 0, match=(1), action=(next;) table= 9( ls_in_arp_rsp), priority= 0, match=(1), action=(next;) table=10( ls_in_l2_lkup), priority= 100, match=(eth.mcast), action=(outport = "_MC_flood"; output;) Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: egress table= 0( ls_out_pre_lb), priority= 0, match=(1), action=(next;) table= 1( ls_out_pre_acl), priority= 0, match=(1), action=(next;) table= 2(ls_out_pre_stateful), priority= 100, match=(reg0[0] == 1), action=(ct_next;) table= 2(ls_out_pre_stateful), priority= 0, match=(1), action=(next;) table= 3( ls_out_lb), priority= 0, match=(1), action=(next;) table= 4( ls_out_acl), priority= 0, match=(1), action=(next;) table= 5( ls_out_stateful), priority= 100, match=(reg0[1] == 1), action=(ct_commit; next;) table= 5( ls_out_stateful), priority= 100, match=(reg0[2] == 1), action=(ct_lb;) table= 5( ls_out_stateful), priority= 0, match=(1), action=(next;) table= 6( ls_out_port_sec_ip), priority= 0, match=(1), action=(next;) table= 7( ls_out_port_sec_l2), priority= 100, match=(eth.mcast), action=(output;) .. note:: These actions do not create flows on any nodes. Create a subnet on the self-service network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A self-service network requires at least one subnet. In most cases, the environment provides suitable values for IP address allocation for instances, default gateway IP address, and metadata such as name resolution. #. On the controller node, create a subnet bound to the self-service network ``selfservice``. .. code-block:: console $ openstack subnet create --network selfservice --subnet-range 192.168.1.0/24 selfservice-v4 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 192.168.1.2-192.168.1.254 | | cidr | 192.168.1.0/24 | | created_at | 2016-06-16 00:19:08+00:00 | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 192.168.1.1 | | headers | | | host_routes | | | id | 8f027f25-0112-45b9-a1b9-2f8097c57219 | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | selfservice-v4 | | network_id | 8ed4e43b-63ef-41ed-808b-b59f1120aec0 | | project_id | b1ebf33664df402693f729090cfab861 | | subnetpool_id | None | | updated_at | 2016-06-16 00:19:08+00:00 | +-------------------+--------------------------------------+ OVN operations ^^^^^^^^^^^^^^ .. todo: Update this part with the new agentless DHCP details The OVN mechanism driver and OVN perform the following operations during creation of a subnet on a self-service network. #. If the subnet uses DHCP for IP address management, create logical ports ports for each DHCP agent serving the subnet and bind them to the logical switch. In this example, the subnet contains two DHCP agents. .. code-block:: console _uuid : 1ed7c28b-dc69-42b8-bed6-46477bb8b539 addresses : ["fa:16:3e:94:db:5e 192.168.1.2"] enabled : true external_ids : {"neutron:port_name"=""} name : "0cfbbdca-ff58-4cf8-a7d3-77daaebe3056" options : {} parent_name : [] port_security : [] tag : [] type : "" up : true _uuid : ae10a5e0-db25-4108-b06a-d2d5c127d9c4 addresses : ["fa:16:3e:90:bd:f1 192.168.1.3"] enabled : true external_ids : {"neutron:port_name"=""} name : "74930ace-d939-4bca-b577-fccba24c3fca" options : {} parent_name : [] port_security : [] tag : [] type : "" up : true _uuid : 0ab40684-7cf8-4d6c-ae8b-9d9143762d37 acls : [] external_ids : {"neutron:network_name"="selfservice"} name : "neutron-d5aadceb-d8d6-41c8-9252-c5e0fe6c26a5" ports : [1ed7c28b-dc69-42b8-bed6-46477bb8b539, ae10a5e0-db25-4108-b06a-d2d5c127d9c4] #. The OVN northbound service creates port bindings for these logical ports and adds them to the appropriate multicast group. * Port bindings .. code-block:: console _uuid : 3e463ca0-951c-46fd-b6cf-05392fa3aa1f chassis : 6a9d0619-8818-41e6-abef-2f3d9a597c03 datapath : 0b214af6-8910-489c-926a-fd0ed16a8251 logical_port : "a203b410-97c1-4e4a-b0c3-558a10841c16" mac : ["fa:16:3e:a1:dc:58 192.168.1.3"] options : {} parent_port : [] tag : [] tunnel_key : 2 type : "" _uuid : fa7b294d-2a62-45ae-8de3-a41c002de6de chassis : d63e8ae8-caf3-4a6b-9840-5c3a57febcac datapath : 0b214af6-8910-489c-926a-fd0ed16a8251 logical_port : "39b23721-46f4-4747-af54-7e12f22b3397" mac : ["fa:16:3e:1a:b4:23 192.168.1.2"] options : {} parent_port : [] tag : [] tunnel_key : 1 type : "" * Multicast groups .. code-block:: console _uuid : c08d0102-c414-4a47-98d9-dd3fa9f9901c datapath : 0b214af6-8910-489c-926a-fd0ed16a8251 name : _MC_flood ports : [3e463ca0-951c-46fd-b6cf-05392fa3aa1f, fa7b294d-2a62-45ae-8de3-a41c002de6de] tunnel_key : 65535 #. The OVN northbound service translates the logical ports into logical flows in the OVN southbound database. .. code-block:: console Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: ingress table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "39b23721-46f4-4747-af54-7e12f22b3397"), action=(next;) table= 0( ls_in_port_sec_l2), priority= 50, match=(inport == "a203b410-97c1-4e4a-b0c3-558a10841c16"), action=(next;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 192.168.1.2 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:1a:b4:23; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:1a:b4:23; arp.tpa = arp.spa; arp.spa = 192.168.1.2; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table= 9( ls_in_arp_rsp), priority= 50, match=(arp.tpa == 192.168.1.3 && arp.op == 1), action=(eth.dst = eth.src; eth.src = fa:16:3e:a1:dc:58; arp.op = 2; /* ARP reply */ arp.tha = arp.sha; arp.sha = fa:16:3e:a1:dc:58; arp.tpa = arp.spa; arp.spa = 192.168.1.3; outport = inport; inport = ""; /* Allow sending out inport. */ output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:a1:dc:58), action=(outport = "a203b410-97c1-4e4a-b0c3-558a10841c16"; output;) table=10( ls_in_l2_lkup), priority= 50, match=(eth.dst == fa:16:3e:1a:b4:23), action=(outport = "39b23721-46f4-4747-af54-7e12f22b3397"; output;) Datapath: 0b214af6-8910-489c-926a-fd0ed16a8251 Pipeline: egress table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "39b23721-46f4-4747-af54-7e12f22b3397"), action=(output;) table= 7( ls_out_port_sec_l2), priority= 50, match=(outport == "a203b410-97c1-4e4a-b0c3-558a10841c16"), action=(output;) #. For each compute node without a DHCP agent on the subnet: * The OVN controller service translates these objects into flows on the integration bridge ``br-int``. .. code-block:: console # ovs-ofctl dump-flows br-int cookie=0x0, duration=9.054s, table=32, n_packets=0, n_bytes=0, idle_age=9, priority=100,reg7=0xffff,metadata=0x5 actions=load:0x5->NXM_NX_TUN_ID[0..23], set_field:0xffff/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30], output:4,output:3 #. For each compute node with a DHCP agent on the subnet: * Creation of a DHCP network namespace adds a virtual switch ports that connects the DHCP agent with the ``dnsmasq`` process to the integration bridge. .. code-block:: console # ovs-ofctl show br-int OFPT_FEATURES_REPLY (xid=0x2): dpid:000022024a1dc045 n_tables:254, n_buffers:256 capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst 9(tap39b23721-46): addr:00:00:00:00:b0:5d config: PORT_DOWN state: LINK_DOWN speed: 0 Mbps now, 0 Mbps max * The OVN controller service translates these objects into flows on the integration bridge. .. code-block:: console cookie=0x0, duration=21.074s, table=0, n_packets=8, n_bytes=648, idle_age=11, priority=100,in_port=9 actions=load:0x2->NXM_NX_REG5[],load:0x5->OXM_OF_METADATA[], load:0x1->NXM_NX_REG6[],resubmit(,16) cookie=0x0, duration=21.076s, table=16, n_packets=0, n_bytes=0, idle_age=21, priority=100,metadata=0x5, dl_src=01:00:00:00:00:00/01:00:00:00:00:00 actions=drop cookie=0x0, duration=21.075s, table=16, n_packets=0, n_bytes=0, idle_age=21, priority=100,metadata=0x5,vlan_tci=0x1000/0x1000 actions=drop cookie=0x0, duration=21.076s, table=16, n_packets=0, n_bytes=0, idle_age=21, priority=50,reg6=0x2,metadata=0x5 actions=resubmit(,17) cookie=0x0, duration=21.075s, table=16, n_packets=8, n_bytes=648, idle_age=11, priority=50,reg6=0x1,metadata=0x5 actions=resubmit(,17) cookie=0x0, duration=21.075s, table=17, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,18) cookie=0x0, duration=21.076s, table=18, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,19) cookie=0x0, duration=21.076s, table=19, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,20) cookie=0x0, duration=21.075s, table=20, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,21) cookie=0x0, duration=5.398s, table=21, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x1/0x1,metadata=0x5 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=5.398s, table=21, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x1/0x1,metadata=0x5 actions=ct(table=22,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=5.398s, table=22, n_packets=6, n_bytes=508, idle_age=2, priority=0,metadata=0x5 actions=resubmit(,23) cookie=0x0, duration=5.398s, table=23, n_packets=6, n_bytes=508, idle_age=2, priority=0,metadata=0x5 actions=resubmit(,24) cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x4/0x4,metadata=0x5 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x4/0x4,metadata=0x5 actions=ct(table=25,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x2/0x2,metadata=0x5 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=5.398s, table=24, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x2/0x2,metadata=0x5 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,25) cookie=0x0, duration=5.399s, table=24, n_packets=6, n_bytes=508, idle_age=2, priority=0,metadata=0x5 actions=resubmit(,25) cookie=0x0, duration=5.398s, table=25, n_packets=0, n_bytes=0, idle_age=5, priority=50,arp,metadata=0x5, arp_tpa=192.168.1.2,arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:82:8b:0e,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163e828b0e->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80102->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=5.378s, table=25, n_packets=0, n_bytes=0, idle_age=5, priority=50,arp,metadata=0x5,arp_tpa=192.168.1.3, arp_op=1 actions=move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[], mod_dl_src:fa:16:3e:d5:00:02,load:0x2->NXM_OF_ARP_OP[], move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[], load:0xfa163ed50002->NXM_NX_ARP_SHA[], move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[], load:0xc0a80103->NXM_OF_ARP_SPA[], move:NXM_NX_REG6[]->NXM_NX_REG7[],load:0->NXM_NX_REG6[], load:0->NXM_OF_IN_PORT[],resubmit(,32) cookie=0x0, duration=5.399s, table=25, n_packets=6, n_bytes=508, idle_age=2, priority=0,metadata=0x5 actions=resubmit(,26) cookie=0x0, duration=5.399s, table=26, n_packets=6, n_bytes=508, idle_age=2, priority=100,metadata=0x5, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=load:0xffff->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=5.398s, table=26, n_packets=0, n_bytes=0, idle_age=5, priority=50,metadata=0x5,dl_dst=fa:16:3e:d5:00:02 actions=load:0x2->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=5.398s, table=26, n_packets=0, n_bytes=0, idle_age=5, priority=50,metadata=0x5,dl_dst=fa:16:3e:82:8b:0e actions=load:0x1->NXM_NX_REG7[],resubmit(,32) cookie=0x0, duration=21.038s, table=32, n_packets=0, n_bytes=0, idle_age=21, priority=100,reg7=0x2,metadata=0x5 actions=load:0x5->NXM_NX_TUN_ID[0..23], set_field:0x2/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30],output:4 cookie=0x0, duration=21.038s, table=32, n_packets=8, n_bytes=648, idle_age=11, priority=100,reg7=0xffff,metadata=0x5 actions=load:0x5->NXM_NX_TUN_ID[0..23], set_field:0xffff/0xffffffff->tun_metadata0, move:NXM_NX_REG6[0..14]->NXM_NX_TUN_METADATA0[16..30], output:4,resubmit(,33) cookie=0x0, duration=5.397s, table=33, n_packets=12, n_bytes=1016, idle_age=2, priority=100,reg7=0xffff,metadata=0x5 actions=load:0x1->NXM_NX_REG7[],resubmit(,34), load:0xffff->NXM_NX_REG7[] cookie=0x0, duration=5.397s, table=33, n_packets=0, n_bytes=0, idle_age=5, priority=100,reg7=0x1,metadata=0x5 actions=resubmit(,34) cookie=0x0, duration=21.074s, table=34, n_packets=8, n_bytes=648, idle_age=11, priority=100,reg6=0x1,reg7=0x1,metadata=0x5 actions=drop cookie=0x0, duration=21.076s, table=48, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,49) cookie=0x0, duration=21.075s, table=49, n_packets=8, n_bytes=648, idle_age=11, priority=0,metadata=0x5 actions=resubmit(,50) cookie=0x0, duration=5.398s, table=50, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x1/0x1,metadata=0x5 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=5.398s, table=50, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x1/0x1,metadata=0x5 actions=ct(table=51,zone=NXM_NX_REG5[0..15]) cookie=0x0, duration=5.398s, table=50, n_packets=6, n_bytes=508, idle_age=3, priority=0,metadata=0x5 actions=resubmit(,51) cookie=0x0, duration=5.398s, table=51, n_packets=6, n_bytes=508, idle_age=3, priority=0,metadata=0x5 actions=resubmit(,52) cookie=0x0, duration=5.398s, table=52, n_packets=6, n_bytes=508, idle_age=3, priority=0,metadata=0x5 actions=resubmit(,53) cookie=0x0, duration=5.399s, table=53, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x4/0x4,metadata=0x5 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x4/0x4,metadata=0x5 actions=ct(table=54,zone=NXM_NX_REG5[0..15],nat) cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0, idle_age=5, priority=100,ip,reg0=0x2/0x2,metadata=0x5 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=5.398s, table=53, n_packets=0, n_bytes=0, idle_age=5, priority=100,ipv6,reg0=0x2/0x2,metadata=0x5 actions=ct(commit,zone=NXM_NX_REG5[0..15]),resubmit(,54) cookie=0x0, duration=5.398s, table=53, n_packets=6, n_bytes=508, idle_age=3, priority=0,metadata=0x5 actions=resubmit(,54) cookie=0x0, duration=5.398s, table=54, n_packets=6, n_bytes=508, idle_age=3, priority=0,metadata=0x5 actions=resubmit(,55) cookie=0x0, duration=5.398s, table=55, n_packets=6, n_bytes=508, idle_age=3, priority=100,metadata=0x5, dl_dst=01:00:00:00:00:00/01:00:00:00:00:00 actions=resubmit(,64) cookie=0x0, duration=5.398s, table=55, n_packets=0, n_bytes=0, idle_age=5, priority=50,reg7=0x1,metadata=0x5 actions=resubmit(,64) cookie=0x0, duration=5.398s, table=55, n_packets=0, n_bytes=0, idle_age=5, priority=50,reg7=0x2,metadata=0x5 actions=resubmit(,64) cookie=0x0, duration=5.397s, table=64, n_packets=6, n_bytes=508, idle_age=3, priority=100,reg7=0x1,metadata=0x5 actions=output:9 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/routing.rst0000644000175000017500000001423600000000000023331 0ustar00coreycorey00000000000000.. _ovn_routing: ======= Routing ======= North/South ----------- The different configurations are detailed in the :doc:`/admin/ovn/refarch/refarch` Non distributed FIP ~~~~~~~~~~~~~~~~~~~ North/South traffic flows through the active chassis for each router for SNAT traffic, and also for FIPs. .. image:: figures/ovn-north-south.png :alt: L3 North South non-distributed FIP :align: center Distributed Floating IP ~~~~~~~~~~~~~~~~~~~~~~~ In the following diagram we can see how VMs with no Floating IP (VM1, VM6) still communicate throught the gateway nodes using SNAT on the edge routers R1 and R2. While VM3, VM4, and VM5 have an assigned floating IP, and it's traffic flows directly through the local provider bridge/interface to the external network. .. image:: figures/ovn-north-south-distributed-fip.png :alt: L3 North South distributed FIP :align: center L3HA support ~~~~~~~~~~~~ Ovn driver implements L3 high availability in a transparent way. You don't need to enable any config flags. As soon as you have more than one chassis capable of acting as an l3 gateway to the specific external network attached to the router it will schedule the router gateway port to multiple chassis, making use of the ``gateway_chassis`` column on OVN's ``Logical_Router_Port`` table. In order to have external connectivity, either: * Some gateway nodes have ``ovn-cms-options`` with the value ``enable-chassis-as-gw`` in Open_vSwitch table's external_ids column, or * if no gateway node exists with the external ids column set with that value, then all nodes would be eligible to host gateway chassis. Example to how to enabled chassis to host gateways: .. code-block:: console $ ovs-vsctl set open . external-ids:ovn-cms-options="enable-chassis-as-gw" At the low level, functionality is all implemented mostly by OpenFlow rules with bundle active_passive outputs. The ARP responder and router enablement/disablement is handled by ovn-controller. Gratuitous ARPs for FIPs and router external addresses are periodically sent by ovn-controller itself. BFD monitoring ^^^^^^^^^^^^^^ OVN monitors the availability of the chassis via the BFD protocol, which is encapsulated on top of the Geneve tunnels established from chassis to chassis. .. image:: figures/ovn-l3ha-bfd.png :alt: L3HA BFD monitoring :align: center Each chassis that is marked as a gateway chassis will monitor all the other gateway chassis in the deployment as well as compute node chassis, to let the gateways enable/disable routing of packets and ARP responses / announcements. Each compute node chassis will monitor each gateway chassis via BFD to automatically steer external traffic (snat/dnat) through the active chassis for a given router. .. image:: figures/ovn-l3ha-bfd-3gw.png :alt: L3HA BFD monitoring (3 gateway nodes) :align: center The gateway nodes monitor each other in star topology. Compute nodes don't monitor each other because that's not necessary. Failover (detected by BFD) ~~~~~~~~~~~~~~~~~~~~~~~~~~ Look at the following example: .. image:: figures/ovn-l3ha-bfd-failover.png :alt: L3HA BFD monitoring failover :align: center Compute nodes BFD monitoring of the gateway nodes will detect that tunnel endpoint going to gateway node 1 is down, so. So traffic output that needs to get into the external network through the router will be directed to the lower priority chassis for R1. R2 stays the same because Gateway Node 2 was already the highest priority chassis for R2. Gateway node 2 will detect that tunnel endpoint to gateway node 1 is down, so it will become responsible for the external leg of R1, and it's ovn-controller will populate flows for the external ARP responder, traffic forwarding (N/S) and periodic gratuitous ARPs. Gateway node 2 will also bind the external port of the router (represented as a chassis-redirect port on the South Bound database). If Gateway node 1 is still alive, failure over interface 2 will be detected because it's not seeing any other nodes. No mechanisms are still present to detect external network failure, so as good practice to detect network failure we recommend that all interfaces are handled over a single bonded interface with VLANs. Supported failure modes are: - gateway chassis becomes disconnected from network (tunneling interface) - ovs-vswitchd is stopped (it's responsible for BFD signaling) - ovn-controller is stopped, as ovn-controller will remove himself as a registered chassis. .. note:: As a side note, it's also important to understand, that as for VRRP or CARP protocols, this detection mechanism only works for link failures, but not for routing failures. Failback ~~~~~~~~ L3HA behaviour is preemptive in OVN (at least for the time being) since that would balance back the routers to the original chassis, avoiding any of the gateway nodes becoming a bottleneck. .. image:: figures/ovn-l3ha-bfd.png :alt: L3HA BFD monitoring (Fail back) :align: center East/West --------- East/West traffic on ovn driver is completely distributed, that means that routing will happen internally on the compute nodes without the need to go through the gateway nodes. Traffic going through a virtual router, different subnets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Traffic going through a virtual router, and going from a virtual network/subnet to another will flow directly from compute to compute node encapsulated as usual, while all the routing operations like decreasing TTL or switching MAC addresses will be handled in OpenFlow at the source host of the packet. .. image:: figures/ovn-east-west-3.png :alt: East/West traffic across subnets :align: center Traffic across the same subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Traffic across a subnet will happen as described in the following diagram, although this kind of communication doesn't make use of routing at all (just encapsulation) it's been included for completeness. .. image:: figures/ovn-east-west-2.png :alt: East/West traffic same subnet :align: center Traffic goes directly from instance to instance through br-int in the case of both instances living in the same host (VM1 and VM2), or via encapsulation when living on different hosts (VM3 and VM4). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/troubleshooting.rst0000644000175000017500000000315500000000000025067 0ustar00coreycorey00000000000000.. _ovn_troubleshooting: =============== Troubleshooting =============== The following section describe common problems that you might encounter after/during the installation of the OVN ML2 driver with Devstack and possible solutions to these problems. Launching VM's failure ----------------------- Disable AppArmor ~~~~~~~~~~~~~~~~ Using Ubuntu you might encounter libvirt permission errors when trying to create OVS ports after launching a VM (from the nova compute log). Disabling AppArmor might help with this problem, check out https://help.ubuntu.com/community/AppArmor for instructions on how to disable it. Multi-Node setup not working ----------------------------- Geneve kernel module not supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default OVN creates tunnels between compute nodes using the Geneve protocol. Older kernels (< 3.18) don't support the Geneve module and hence tunneling can't work. You can check it with this command 'lsmod | grep openvswitch' (geneve should show up in the result list) For more information about which upstream Kernel version is required for support of each tunnel type, see the answer to " Why do tunnels not work when using a kernel module other than the one packaged with Open vSwitch?" in the `OVS FAQ `__. MTU configuration ~~~~~~~~~~~~~~~~~ This problem is not unique to OVN but is amplified due to the possible larger size of geneve header compared to other common tunneling protocols (VXLAN). If you are using VM's as compute nodes make sure that you either lower the MTU size on the virtual interface or enable fragmentation on it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/ovn/tutorial.rst0000644000175000017500000000046400000000000023503 0ustar00coreycorey00000000000000.. _ovn_tutorial: ========================== OpenStack and OVN Tutorial ========================== The OVN project documentation includes an in depth tutorial of using OVN with OpenStack. `OpenStack and OVN Tutorial `_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1310432 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/0000755000175000017500000000000000000000000021546 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-config-neutron-common.txt0000644000175000017500000000076200000000000030031 0ustar00coreycorey00000000000000.. code-block:: ini [DEFAULT] core_plugin = ml2 auth_strategy = keystone [database] # ... [keystone_authtoken] # ... [nova] # ... [agent] # ... See the `Installation Tutorials and Guides `_ and `Configuration Reference `_ for your OpenStack release to obtain the appropriate additional configuration for the ``[DEFAULT]``, ``[database]``, ``[keystone_authtoken]``, ``[nova]``, and ``[agent]`` sections. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-ha-vrrp-initialnetworks.txt0000644000175000017500000001035500000000000030410 0ustar00coreycorey00000000000000Similar to the self-service deployment example, this configuration supports multiple VXLAN self-service networks. After enabling high-availability, all additional routers use VRRP. The following procedure creates an additional self-service network and router. The Networking service also supports adding high-availability to existing routers. However, the procedure requires administratively disabling and enabling each router which temporarily interrupts network connectivity for self-service networks with interfaces on that router. #. Source a regular (non-administrative) project credentials. #. Create a self-service network. .. code-block:: console $ openstack network create selfservice2 +-------------------------+--------------+ | Field | Value | +-------------------------+--------------+ | admin_state_up | UP | | mtu | 1450 | | name | selfservice2 | | port_security_enabled | True | | router:external | Internal | | shared | False | | status | ACTIVE | +-------------------------+--------------+ #. Create a IPv4 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range 198.51.100.0/24 \ --network selfservice2 --dns-nameserver 8.8.4.4 selfservice2-v4 +-------------------+------------------------------+ | Field | Value | +-------------------+------------------------------+ | allocation_pools | 198.51.100.2-198.51.100.254 | | cidr | 198.51.100.0/24 | | dns_nameservers | 8.8.4.4 | | enable_dhcp | True | | gateway_ip | 198.51.100.1 | | ip_version | 4 | | name | selfservice2-v4 | +-------------------+------------------------------+ #. Create a IPv6 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range fd00:198:51:100::/64 --ip-version 6 \ --ipv6-ra-mode slaac --ipv6-address-mode slaac --network selfservice2 \ --dns-nameserver 2001:4860:4860::8844 selfservice2-v6 +-------------------+--------------------------------------------------------+ | Field | Value | +-------------------+--------------------------------------------------------+ | allocation_pools | fd00:198:51:100::2-fd00:198:51:100:ffff:ffff:ffff:ffff | | cidr | fd00:198:51:100::/64 | | dns_nameservers | 2001:4860:4860::8844 | | enable_dhcp | True | | gateway_ip | fd00:198:51:100::1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | selfservice2-v6 | +-------------------+--------------------------------------------------------+ #. Create a router. .. code-block:: console $ openstack router create router2 +-----------------------+---------+ | Field | Value | +-----------------------+---------+ | admin_state_up | UP | | name | router2 | | status | ACTIVE | +-----------------------+---------+ #. Add the IPv4 and IPv6 subnets as interfaces on the router. .. code-block:: console $ openstack router add subnet router2 selfservice2-v4 $ openstack router add subnet router2 selfservice2-v6 .. note:: These commands provide no output. #. Add the provider network as a gateway on the router. .. code-block:: console $ openstack router set --external-gateway provider1 router2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-ha-vrrp-verifyfailoveroperation.txt0000644000175000017500000000125100000000000032132 0ustar00coreycorey00000000000000#. Begin a continuous ``ping`` of both the floating IPv4 address and IPv6 address of the instance. While performing the next three steps, you should see a minimal, if any, interruption of connectivity to the instance. #. On the network node with the master router, administratively disable the overlay network interface. #. On the other network node, verify promotion of the backup router to master router by noting addition of IP addresses to the interfaces in the ``qrouter`` namespace. #. On the original network node in step 2, administratively enable the overlay network interface. Note that the master router remains on the network node in step 3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-ha-vrrp-verifynetworkoperation.txt0000644000175000017500000001702600000000000032023 0ustar00coreycorey00000000000000#. Source the administrative project credentials. #. Verify creation of the internal high-availability network that handles VRRP *heartbeat* traffic. .. code-block:: console $ openstack network list +--------------------------------------+----------------------------------------------------+--------------------------------------+ | ID | Name | Subnets | +--------------------------------------+----------------------------------------------------+--------------------------------------+ | 1b8519c1-59c4-415c-9da2-a67d53c68455 | HA network tenant f986edf55ae945e2bef3cb4bfd589928 | 6843314a-1e76-4cc9-94f5-c64b7a39364a | +--------------------------------------+----------------------------------------------------+--------------------------------------+ #. On each network node, verify creation of a ``qrouter`` namespace with the same ID. Network node 1: .. code-block:: console # ip netns qrouter-b6206312-878e-497c-8ef7-eb384f8add96 Network node 2: .. code-block:: console # ip netns qrouter-b6206312-878e-497c-8ef7-eb384f8add96 .. note:: The namespace for router 1 from :ref:`deploy-lb-selfservice` should only appear on network node 1 because of creation prior to enabling VRRP. #. On each network node, show the IP address of interfaces in the ``qrouter`` namespace. With the exception of the VRRP interface, only one namespace belonging to the master router instance contains IP addresses on the interfaces. Network node 1: .. code-block:: console # ip netns exec qrouter-b6206312-878e-497c-8ef7-eb384f8add96 ip addr show 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ha-eb820380-40@if21: mtu 1450 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:78:ba:99 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 169.254.192.1/18 brd 169.254.255.255 scope global ha-eb820380-40 valid_lft forever preferred_lft forever inet 169.254.0.1/24 scope global ha-eb820380-40 valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:fe78:ba99/64 scope link valid_lft forever preferred_lft forever 3: qr-da3504ad-ba@if24: mtu 1450 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:dc:8e:a8 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 198.51.100.1/24 scope global qr-da3504ad-ba valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:fedc:8ea8/64 scope link valid_lft forever preferred_lft forever 4: qr-442e36eb-fc@if27: mtu 1450 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:ee:c8:41 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fd00:198:51:100::1/64 scope global nodad valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:feee:c841/64 scope link valid_lft forever preferred_lft forever 5: qg-33fedbc5-43@if28: mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:03:1a:f6 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 203.0.113.21/24 scope global qg-33fedbc5-43 valid_lft forever preferred_lft forever inet6 fd00:203:0:113::21/64 scope global nodad valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:fe03:1af6/64 scope link valid_lft forever preferred_lft forever Network node 2: .. code-block:: console # ip netns exec qrouter-b6206312-878e-497c-8ef7-eb384f8add96 ip addr show 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ha-7a7ce184-36@if8: mtu 1450 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:16:59:84 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 169.254.192.2/18 brd 169.254.255.255 scope global ha-7a7ce184-36 valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:fe16:5984/64 scope link valid_lft forever preferred_lft forever 3: qr-da3504ad-ba@if11: mtu 1450 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:dc:8e:a8 brd ff:ff:ff:ff:ff:ff link-netnsid 0 4: qr-442e36eb-fc@if14: mtu 1450 qdisc noqueue state UP group default qlen 1000 5: qg-33fedbc5-43@if15: mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether fa:16:3e:03:1a:f6 brd ff:ff:ff:ff:ff:ff link-netnsid 0 .. note:: The master router may reside on network node 2. #. Launch an instance with an interface on the additional self-service network. For example, a CirrOS image using flavor ID 1. .. code-block:: console $ openstack server create --flavor 1 --image cirros --nic net-id=NETWORK_ID selfservice-instance2 Replace ``NETWORK_ID`` with the ID of the additional self-service network. #. Determine the IPv4 and IPv6 addresses of the instance. .. code-block:: console $ openstack server list +--------------------------------------+-----------------------+--------+----------------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------------------+--------+----------------------------------------------------------------+--------+---------+ | bde64b00-77ae-41b9-b19a-cd8e378d9f8b | selfservice-instance2 | ACTIVE | selfservice2=fd00:198:51:100:f816:3eff:fe71:e93e, 198.51.100.4 | cirros | m1.tiny | +--------------------------------------+-----------------------+--------+----------------------------------------------------------------+--------+---------+ #. Create a floating IPv4 address on the provider network. .. code-block:: console $ openstack floating ip create provider1 +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | fixed_ip | None | | id | 0174056a-fa56-4403-b1ea-b5151a31191f | | instance_id | None | | ip | 203.0.113.17 | | pool | provider1 | +-------------+--------------------------------------+ #. Associate the floating IPv4 address with the instance. .. code-block:: console $ openstack server add floating ip selfservice-instance2 203.0.113.17 .. note:: This command provides no output. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-ha-vrrp.txt0000644000175000017500000000645700000000000025174 0ustar00coreycorey00000000000000This architecture example augments the self-service deployment example with a high-availability mechanism using the Virtual Router Redundancy Protocol (VRRP) via ``keepalived`` and provides failover of routing for self-service networks. It requires a minimum of two network nodes because VRRP creates one master (active) instance and at least one backup instance of each router. During normal operation, ``keepalived`` on the master router periodically transmits *heartbeat* packets over a hidden network that connects all VRRP routers for a particular project. Each project with VRRP routers uses a separate hidden network. By default this network uses the first value in the ``tenant_network_types`` option in the ``ml2_conf.ini`` file. For additional control, you can specify the self-service network type and physical network name for the hidden network using the ``l3_ha_network_type`` and ``l3_ha_network_name`` options in the ``neutron.conf`` file. If ``keepalived`` on the backup router stops receiving *heartbeat* packets, it assumes failure of the master router and promotes the backup router to master router by configuring IP addresses on the interfaces in the ``qrouter`` namespace. In environments with more than one backup router, ``keepalived`` on the backup router with the next highest priority promotes that backup router to master router. .. note:: This high-availability mechanism configures VRRP using the same priority for all routers. Therefore, VRRP promotes the backup router with the highest IP address to the master router. .. warning:: There is a known bug with ``keepalived`` v1.2.15 and earlier which can cause packet loss when ``max_l3_agents_per_router`` is set to 3 or more. Therefore, we recommend that you upgrade to ``keepalived`` v1.2.16 or greater when using this feature. Interruption of VRRP *heartbeat* traffic between network nodes, typically due to a network interface or physical network infrastructure failure, triggers a failover. Restarting the layer-3 agent, or failure of it, does not trigger a failover providing ``keepalived`` continues to operate. Consider the following attributes of this high-availability mechanism to determine practicality in your environment: * Instance network traffic on self-service networks using a particular router only traverses the master instance of that router. Thus, resource limitations of a particular network node can impact all master instances of routers on that network node without triggering failover to another network node. However, you can configure the scheduler to distribute the master instance of each router uniformly across a pool of network nodes to reduce the chance of resource contention on any particular network node. * Only supports self-service networks using a router. Provider networks operate at layer-2 and rely on physical network infrastructure for redundancy. * For instances with a floating IPv4 address, maintains state of network connections during failover as a side effect of 1:1 static NAT. The mechanism does not actually implement connection tracking. For production deployments, we recommend at least three network nodes with sufficient resources to handle network traffic for the entire environment if one network node fails. Also, the remaining two nodes can continue to provide redundancy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-provider-initialnetworks.txt0000644000175000017500000001043100000000000030656 0ustar00coreycorey00000000000000The configuration supports one flat or multiple VLAN provider networks. For simplicity, the following procedure creates one flat provider network. #. Source the administrative project credentials. #. Create a flat network. .. code-block:: console $ openstack network create --share --provider-physical-network provider \ --provider-network-type flat provider1 +---------------------------+-----------+- | Field | Value | +---------------------------+-----------+ | admin_state_up | UP | | mtu | 1500 | | name | provider1 | | port_security_enabled | True | | provider:network_type | flat | | provider:physical_network | provider | | provider:segmentation_id | None | | router:external | Internal | | shared | True | | status | ACTIVE | +---------------------------+-----------+ .. note:: The ``share`` option allows any project to use this network. To limit access to provider networks, see :ref:`config-rbac`. .. note:: To create a VLAN network instead of a flat network, change ``--provider-network-type flat`` to ``--provider-network-type vlan`` and add ``--provider-segment`` with a value referencing the VLAN ID. #. Create a IPv4 subnet on the provider network. .. code-block:: console $ openstack subnet create --subnet-range 203.0.113.0/24 --gateway 203.0.113.1 \ --network provider1 --allocation-pool start=203.0.113.11,end=203.0.113.250 \ --dns-nameserver 8.8.4.4 provider1-v4 +-------------------+----------------------------+ | Field | Value | +-------------------+----------------------------+ | allocation_pools | 203.0.113.11-203.0.113.250 | | cidr | 203.0.113.0/24 | | dns_nameservers | 8.8.4.4 | | enable_dhcp | True | | gateway_ip | 203.0.113.1 | | ip_version | 4 | | name | provider1-v4 | +-------------------+----------------------------+ .. important:: Enabling DHCP causes the Networking service to provide DHCP which can interfere with existing DHCP services on the physical network infrastructure. Use the ``--no-dhcp`` option to have the subnet managed by existing DHCP services. #. Create a IPv6 subnet on the provider network. .. code-block:: console $ openstack subnet create --subnet-range fd00:203:0:113::/64 --gateway fd00:203:0:113::1 \ --ip-version 6 --ipv6-address-mode slaac --network provider1 \ --dns-nameserver 2001:4860:4860::8844 provider1-v6 +-------------------+------------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------------+ | allocation_pools | fd00:203:0:113::2-fd00:203:0:113:ffff:ffff:ffff:ffff | | cidr | fd00:203:0:113::/64 | | dns_nameservers | 2001:4860:4860::8844 | | enable_dhcp | True | | gateway_ip | fd00:203:0:113::1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | None | | name | provider1-v6 | +-------------------+------------------------------------------------------+ .. note:: The Networking service uses the layer-3 agent to provide router advertisement. Provider networks rely on physical network infrastructure for layer-3 services rather than the layer-3 agent. Thus, the physical network infrastructure must provide router advertisement on provider networks for proper operation of IPv6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-provider-networktrafficflow.txt0000644000175000017500000000200400000000000031345 0ustar00coreycorey00000000000000The following sections describe the flow of network traffic in several common scenarios. *North-south* network traffic travels between an instance and external network such as the Internet. *East-west* network traffic travels between instances on the same or different networks. In all scenarios, the physical network infrastructure handles switching and routing among provider networks and external networks such as the Internet. Each case references one or more of the following components: * Provider network 1 (VLAN) * VLAN ID 101 (tagged) * IP address ranges 203.0.113.0/24 and fd00:203:0:113::/64 * Gateway (via physical network infrastructure) * IP addresses 203.0.113.1 and fd00:203:0:113:0::1 * Provider network 2 (VLAN) * VLAN ID 102 (tagged) * IP address range 192.0.2.0/24 and fd00:192:0:2::/64 * Gateway * IP addresses 192.0.2.1 and fd00:192:0:2::1 * Instance 1 * IP addresses 203.0.113.101 and fd00:203:0:113:0::101 * Instance 2 * IP addresses 192.0.2.101 and fd00:192:0:2:0::101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-provider-verifynetworkoperation.txt0000644000175000017500000000603200000000000032271 0ustar00coreycorey00000000000000#. On each compute node, verify creation of the ``qdhcp`` namespace. .. code-block:: console # ip netns qdhcp-8b868082-e312-4110-8627-298109d4401c #. Source a regular (non-administrative) project credentials. #. Create the appropriate security group rules to allow ``ping`` and SSH access instances using the network. .. include:: shared/deploy-secgrouprules.txt #. Launch an instance with an interface on the provider network. For example, a CirrOS image using flavor ID 1. .. code-block:: console $ openstack server create --flavor 1 --image cirros \ --nic net-id=NETWORK_ID provider-instance1 Replace ``NETWORK_ID`` with the ID of the provider network. #. Determine the IPv4 and IPv6 addresses of the instance. .. code-block:: console $ openstack server list +--------------------------------------+--------------------+--------+------------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+--------------------+--------+------------------------------------------------------------+--------+---------+ | 018e0ae2-b43c-4271-a78d-62653dd03285 | provider-instance1 | ACTIVE | provider1=203.0.113.13, fd00:203:0:113:f816:3eff:fe58:be4e | cirros | m1.tiny | +--------------------------------------+--------------------+--------+------------------------------------------------------------+--------+---------+ #. On the controller node or any host with access to the provider network, ``ping`` the IPv4 and IPv6 addresses of the instance. .. code-block:: console $ ping -c 4 203.0.113.13 PING 203.0.113.13 (203.0.113.13) 56(84) bytes of data. 64 bytes from 203.0.113.13: icmp_req=1 ttl=63 time=3.18 ms 64 bytes from 203.0.113.13: icmp_req=2 ttl=63 time=0.981 ms 64 bytes from 203.0.113.13: icmp_req=3 ttl=63 time=1.06 ms 64 bytes from 203.0.113.13: icmp_req=4 ttl=63 time=0.929 ms --- 203.0.113.13 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3002ms rtt min/avg/max/mdev = 0.929/1.539/3.183/0.951 ms $ ping6 -c 4 fd00:203:0:113:f816:3eff:fe58:be4e PING fd00:203:0:113:f816:3eff:fe58:be4e(fd00:203:0:113:f816:3eff:fe58:be4e) 56 data bytes 64 bytes from fd00:203:0:113:f816:3eff:fe58:be4e icmp_seq=1 ttl=64 time=1.25 ms 64 bytes from fd00:203:0:113:f816:3eff:fe58:be4e icmp_seq=2 ttl=64 time=0.683 ms 64 bytes from fd00:203:0:113:f816:3eff:fe58:be4e icmp_seq=3 ttl=64 time=0.762 ms 64 bytes from fd00:203:0:113:f816:3eff:fe58:be4e icmp_seq=4 ttl=64 time=0.486 ms --- fd00:203:0:113:f816:3eff:fe58:be4e ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 2999ms rtt min/avg/max/mdev = 0.486/0.796/1.253/0.282 ms #. Obtain access to the instance. #. Test IPv4 and IPv6 connectivity to the Internet or other external network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-secgrouprules.txt0000644000175000017500000000276000000000000026510 0ustar00coreycorey00000000000000.. code-block:: console $ openstack security group rule create --proto icmp default +------------------+-----------+ | Field | Value | +------------------+-----------+ | direction | ingress | | ethertype | IPv4 | | protocol | icmp | | remote_ip_prefix | 0.0.0.0/0 | +------------------+-----------+ $ openstack security group rule create --ethertype IPv6 --proto ipv6-icmp default +-----------+-----------+ | Field | Value | +-----------+-----------+ | direction | ingress | | ethertype | IPv6 | | protocol | ipv6-icmp | +-----------+-----------+ $ openstack security group rule create --proto tcp --dst-port 22 default +------------------+-----------+ | Field | Value | +------------------+-----------+ | direction | ingress | | ethertype | IPv4 | | port_range_max | 22 | | port_range_min | 22 | | protocol | tcp | | remote_ip_prefix | 0.0.0.0/0 | +------------------+-----------+ $ openstack security group rule create --ethertype IPv6 --proto tcp --dst-port 22 default +------------------+-----------+ | Field | Value | +------------------+-----------+ | direction | ingress | | ethertype | IPv6 | | port_range_max | 22 | | port_range_min | 22 | | protocol | tcp | +------------------+-----------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-selfservice-initialnetworks.txt0000644000175000017500000001053700000000000031345 0ustar00coreycorey00000000000000The configuration supports multiple VXLAN self-service networks. For simplicity, the following procedure creates one self-service network and a router with a gateway on the flat provider network. The router uses NAT for IPv4 network traffic and directly routes IPv6 network traffic. .. note:: IPv6 connectivity with self-service networks often requires addition of static routes to nodes and physical network infrastructure. #. Source the administrative project credentials. #. Update the provider network to support external connectivity for self-service networks. .. code-block:: console $ openstack network set --external provider1 .. note:: This command provides no output. #. Source a regular (non-administrative) project credentials. #. Create a self-service network. .. code-block:: console $ openstack network create selfservice1 +-------------------------+--------------+ | Field | Value | +-------------------------+--------------+ | admin_state_up | UP | | mtu | 1450 | | name | selfservice1 | | port_security_enabled | True | | router:external | Internal | | shared | False | | status | ACTIVE | +-------------------------+--------------+ #. Create a IPv4 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range 192.0.2.0/24 \ --network selfservice1 --dns-nameserver 8.8.4.4 selfservice1-v4 +-------------------+---------------------------+ | Field | Value | +-------------------+---------------------------+ | allocation_pools | 192.0.2.2-192.0.2.254 | | cidr | 192.0.2.0/24 | | dns_nameservers | 8.8.4.4 | | enable_dhcp | True | | gateway_ip | 192.0.2.1 | | ip_version | 4 | | name | selfservice1-v4 | +-------------------+---------------------------+ #. Create a IPv6 subnet on the self-service network. .. code-block:: console $ openstack subnet create --subnet-range fd00:192:0:2::/64 --ip-version 6 \ --ipv6-ra-mode slaac --ipv6-address-mode slaac --network selfservice1 \ --dns-nameserver 2001:4860:4860::8844 selfservice1-v6 +-------------------+------------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------------+ | allocation_pools | fd00:192:0:2::2-fd00:192:0:2:ffff:ffff:ffff:ffff | | cidr | fd00:192:0:2::/64 | | dns_nameservers | 2001:4860:4860::8844 | | enable_dhcp | True | | gateway_ip | fd00:192:0:2::1 | | ip_version | 6 | | ipv6_address_mode | slaac | | ipv6_ra_mode | slaac | | name | selfservice1-v6 | +-------------------+------------------------------------------------------+ #. Create a router. .. code-block:: console $ openstack router create router1 +-----------------------+---------+ | Field | Value | +-----------------------+---------+ | admin_state_up | UP | | name | router1 | | status | ACTIVE | +-----------------------+---------+ #. Add the IPv4 and IPv6 subnets as interfaces on the router. .. code-block:: console $ openstack router add subnet router1 selfservice1-v4 $ openstack router add subnet router1 selfservice1-v6 .. note:: These commands provide no output. #. Add the provider network as the gateway on the router. .. code-block:: console $ openstack router set --external-gateway provider1 router1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-selfservice-networktrafficflow.txt0000644000175000017500000000147600000000000032041 0ustar00coreycorey00000000000000The following sections describe the flow of network traffic in several common scenarios. *North-south* network traffic travels between an instance and external network such as the Internet. *East-west* network traffic travels between instances on the same or different networks. In all scenarios, the physical network infrastructure handles switching and routing among provider networks and external networks such as the Internet. Each case references one or more of the following components: * Provider network (VLAN) * VLAN ID 101 (tagged) * Self-service network 1 (VXLAN) * VXLAN ID (VNI) 101 * Self-service network 2 (VXLAN) * VXLAN ID (VNI) 102 * Self-service router * Gateway on the provider network * Interface on self-service network 1 * Interface on self-service network 2 * Instance 1 * Instance 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/deploy-selfservice-verifynetworkoperation.txt0000644000175000017500000001274200000000000032756 0ustar00coreycorey00000000000000#. On each compute node, verify creation of a second ``qdhcp`` namespace. .. code-block:: console # ip netns qdhcp-8b868082-e312-4110-8627-298109d4401c qdhcp-8fbc13ca-cfe0-4b8a-993b-e33f37ba66d1 #. On the network node, verify creation of the ``qrouter`` namespace. .. code-block:: console # ip netns qrouter-17db2a15-e024-46d0-9250-4cd4d336a2cc #. Source a regular (non-administrative) project credentials. #. Create the appropriate security group rules to allow ``ping`` and SSH access instances using the network. .. include:: shared/deploy-secgrouprules.txt #. Launch an instance with an interface on the self-service network. For example, a CirrOS image using flavor ID 1. .. code-block:: console $ openstack server create --flavor 1 --image cirros --nic net-id=NETWORK_ID selfservice-instance1 Replace ``NETWORK_ID`` with the ID of the self-service network. #. Determine the IPv4 and IPv6 addresses of the instance. .. code-block:: console $ openstack server list +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ | c055cdb0-ebb4-4d65-957c-35cbdbd59306 | selfservice-instance1 | ACTIVE | selfservice1=192.0.2.4, fd00:192:0:2:f816:3eff:fe30:9cb0 | cirros | m1.tiny | +--------------------------------------+-----------------------+--------+----------------------------------------------------------+--------+---------+ .. warning:: The IPv4 address resides in a private IP address range (RFC1918). Thus, the Networking service performs source network address translation (SNAT) for the instance to access external networks such as the Internet. Access from external networks such as the Internet to the instance requires a floating IPv4 address. The Networking service performs destination network address translation (DNAT) from the floating IPv4 address to the instance IPv4 address on the self-service network. On the other hand, the Networking service architecture for IPv6 lacks support for NAT due to the significantly larger address space and complexity of NAT. Thus, floating IP addresses do not exist for IPv6 and the Networking service only performs routing for IPv6 subnets on self-service networks. In other words, you cannot rely on NAT to "hide" instances with IPv4 and IPv6 addresses or only IPv6 addresses and must properly implement security groups to restrict access. #. On the controller node or any host with access to the provider network, ``ping`` the IPv6 address of the instance. .. code-block:: console $ ping6 -c 4 fd00:192:0:2:f816:3eff:fe30:9cb0 PING fd00:192:0:2:f816:3eff:fe30:9cb0(fd00:192:0:2:f816:3eff:fe30:9cb0) 56 data bytes 64 bytes from fd00:192:0:2:f816:3eff:fe30:9cb0: icmp_seq=1 ttl=63 time=2.08 ms 64 bytes from fd00:192:0:2:f816:3eff:fe30:9cb0: icmp_seq=2 ttl=63 time=1.88 ms 64 bytes from fd00:192:0:2:f816:3eff:fe30:9cb0: icmp_seq=3 ttl=63 time=1.55 ms 64 bytes from fd00:192:0:2:f816:3eff:fe30:9cb0: icmp_seq=4 ttl=63 time=1.62 ms --- fd00:192:0:2:f816:3eff:fe30:9cb0 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3004ms rtt min/avg/max/mdev = 1.557/1.788/2.085/0.217 ms #. Optionally, enable IPv4 access from external networks such as the Internet to the instance. #. Create a floating IPv4 address on the provider network. .. code-block:: console $ openstack floating ip create provider1 +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | fixed_ip | None | | id | 22a1b088-5c9b-43b4-97f3-970ce5df77f2 | | instance_id | None | | ip | 203.0.113.16 | | pool | provider1 | +-------------+--------------------------------------+ #. Associate the floating IPv4 address with the instance. .. code-block:: console $ openstack server add floating ip selfservice-instance1 203.0.113.16 .. note:: This command provides no output. #. On the controller node or any host with access to the provider network, ``ping`` the floating IPv4 address of the instance. .. code-block:: console $ ping -c 4 203.0.113.16 PING 203.0.113.16 (203.0.113.16) 56(84) bytes of data. 64 bytes from 203.0.113.16: icmp_seq=1 ttl=63 time=3.41 ms 64 bytes from 203.0.113.16: icmp_seq=2 ttl=63 time=1.67 ms 64 bytes from 203.0.113.16: icmp_seq=3 ttl=63 time=1.47 ms 64 bytes from 203.0.113.16: icmp_seq=4 ttl=63 time=1.59 ms --- 203.0.113.16 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3005ms rtt min/avg/max/mdev = 1.473/2.040/3.414/0.798 ms #. Obtain access to the instance. #. Test IPv4 and IPv6 connectivity to the Internet or other external network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/shared/keepalived-vrrp-healthcheck.txt0000644000175000017500000000136600000000000027656 0ustar00coreycorey00000000000000The health of your ``keepalived`` instances can be automatically monitored via a bash script that verifies connectivity to all available and configured gateway addresses. In the event that connectivity is lost, the master router is rescheduled to another node. If all routers lose connectivity simultaneously, the process of selecting a new master router will be repeated in a round-robin fashion until one or more routers have their connectivity restored. To enable this feature, edit the ``l3_agent.ini`` file: .. code-block:: ini ha_vrrp_health_check_interval = 30 Where ``ha_vrrp_health_check_interval`` indicates how often in seconds the health check should run. The default value is ``0``, which indicates that the check should not run at all. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/vpnaas-scenario.rst0000644000175000017500000004725000000000000024133 0ustar00coreycorey00000000000000====================================================== Virtual Private Network-as-a-Service (VPNaaS) scenario ====================================================== Enabling VPNaaS ~~~~~~~~~~~~~~~ This section describes the setting for the reference implementation. Vendor plugins or drivers can have different setup procedure and perhaps they provide their version of manuals. #. Enable the VPNaaS plug-in in the ``/etc/neutron/neutron.conf`` file by appending ``vpnaas`` to ``service_plugins`` in ``[DEFAULT]``: .. code-block:: ini [DEFAULT] # ... service_plugins = vpnaas .. note:: ``vpnaas`` is just example of reference implementation. It depends on a plugin that you are going to use. Consider to set suitable plugin for your own deployment. #. Configure the VPNaaS service provider by creating the ``/etc/neutron/neutron_vpnaas.conf`` file as follows, ``strongswan`` used in Ubuntu distribution: .. code-block:: ini [service_providers] service_provider = VPN:strongswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default .. note:: There are several kinds of service drivers. Depending upon the Linux distribution, you may need to override this value. Select ``libreswan`` for RHEL/CentOS, the config will like this: ``service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default``. Consider to use the appropriate one for your deployment. #. Configure the VPNaaS plugin for the L3 agent by adding to ``/etc/neutron/l3_agent.ini`` the following section, ``StrongSwanDriver`` used in Ubuntu distribution: .. code-block:: ini [AGENT] extensions = vpnaas [vpnagent] vpn_device_driver = neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver .. note:: There are several kinds of device drivers. Depending upon the Linux distribution, you may need to override this value. Select ``LibreSwanDriver`` for RHEL/CentOS, the config will like this: ``vpn_device_driver = neutron_vpnaas.services.vpn.device_drivers.libreswan_ipsec.LibreSwanDriver``. Consider to use the appropriate drivers for your deployment. #. Create the required tables in the database: .. code-block:: console # neutron-db-manage --subproject neutron-vpnaas upgrade head .. note:: In order to run the above command, you need to have `neutron-vpnaas `__ package installed on controller node. #. Restart the ``neutron-server`` in controller node to apply the settings. #. Restart the ``neutron-l3-agent`` in network node to apply the settings. Using VPNaaS with endpoint group (recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IPsec site-to-site connections will support multiple local subnets, in addition to the current multiple peer CIDRs. The multiple local subnet feature is triggered by not specifying a local subnet, when creating a VPN service. Backwards compatibility is maintained with single local subnets, by providing the subnet in the VPN service creation. To support multiple local subnets, a new capability called "End Point Groups" has been added. Each endpoint group will define one or more endpoints of a specific type, and can be used to specify both local and peer endpoints for IPsec connections. The endpoint groups separate the "what gets connected" from the "how to connect" for a VPN service, and can be used for different flavors of VPN, in the future. Refer `Multiple Local Subnets `__ for more detail. Create the IKE policy, IPsec policy, VPN service, local endpoint group and peer endpoint group. Then, create an IPsec site connection that applies the above policies and service. #. Create an IKE policy: .. code-block:: console $ openstack vpn ike policy create ikepolicy +-------------------------------+----------------------------------------+ | Field | Value | +-------------------------------+----------------------------------------+ | Authentication Algorithm | sha1 | | Description | | | Encryption Algorithm | aes-128 | | ID | 735f4691-3670-43b2-b389-f4d81a60ed56 | | IKE Version | v1 | | Lifetime | {u'units': u'seconds', u'value': 3600} | | Name | ikepolicy | | Perfect Forward Secrecy (PFS) | group5 | | Phase1 Negotiation Mode | main | | Project | 095247cb2e22455b9850c6efff407584 | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------------------------+----------------------------------------+ #. Create an IPsec policy: .. code-block:: console $ openstack vpn ipsec policy create ipsecpolicy +-------------------------------+----------------------------------------+ | Field | Value | +-------------------------------+----------------------------------------+ | Authentication Algorithm | sha1 | | Description | | | Encapsulation Mode | tunnel | | Encryption Algorithm | aes-128 | | ID | 4f3f46fc-f2dc-4811-a642-9601ebae310f | | Lifetime | {u'units': u'seconds', u'value': 3600} | | Name | ipsecpolicy | | Perfect Forward Secrecy (PFS) | group5 | | Project | 095247cb2e22455b9850c6efff407584 | | Transform Protocol | esp | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------------------------+----------------------------------------+ #. Create a VPN service: .. code-block:: console $ openstack vpn service create vpn \ --router 9ff3f20c-314f-4dac-9392-defdbbb36a66 +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | Description | | | Flavor | None | | ID | 9f499f9f-f672-4ceb-be3c-d5ff3858c680 | | Name | vpn | | Project | 095247cb2e22455b9850c6efff407584 | | Router | 9ff3f20c-314f-4dac-9392-defdbbb36a66 | | State | True | | Status | PENDING_CREATE | | Subnet | None | | external_v4_ip | 192.168.20.7 | | external_v6_ip | 2001:db8::7 | | project_id | 095247cb2e22455b9850c6efff407584 | +----------------+--------------------------------------+ .. note:: Please do not specify ``--subnet`` option in this case. The Networking openstackclient requires a router (Name or ID) and name. #. Create local endpoint group: .. code-block:: console $ openstack vpn endpoint group create ep_subnet \ --type subnet \ --value 1f888dd0-2066-42a1-83d7-56518895e47d +-------------+-------------------------------------------+ | Field | Value | +-------------+-------------------------------------------+ | Description | | | Endpoints | [u'1f888dd0-2066-42a1-83d7-56518895e47d'] | | ID | 667296d0-67ca-4d0f-b676-7650cf96e7b1 | | Name | ep_subnet | | Project | 095247cb2e22455b9850c6efff407584 | | Type | subnet | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------+-------------------------------------------+ .. note:: The type of a local endpoint group must be ``subnet``. #. Create peer endpoint group: .. code-block:: console $ openstack vpn endpoint group create ep_cidr \ --type cidr \ --value 192.168.1.0/24 +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | Description | | | Endpoints | [u'192.168.1.0/24'] | | ID | 5c3d7f2a-4a2a-446b-9fcf-9a2557cfc641 | | Name | ep_cidr | | Project | 095247cb2e22455b9850c6efff407584 | | Type | cidr | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------+--------------------------------------+ .. note:: The type of a peer endpoint group must be ``cidr``. #. Create an ipsec site connection: .. code-block:: console $ openstack vpn ipsec site connection create conn \ --vpnservice vpn \ --ikepolicy ikepolicy \ --ipsecpolicy ipsecpolicy \ --peer-address 192.168.20.9 \ --peer-id 192.168.20.9 \ --psk secret \ --local-endpoint-group ep_subnet \ --peer-endpoint-group ep_cidr +--------------------------+--------------------------------------------------------+ | Field | Value | +--------------------------+--------------------------------------------------------+ | Authentication Algorithm | psk | | Description | | | ID | 07e400b7-9de3-4ea3-a9d0-90a185e5b00d | | IKE Policy | 735f4691-3670-43b2-b389-f4d81a60ed56 | | IPSec Policy | 4f3f46fc-f2dc-4811-a642-9601ebae310f | | Initiator | bi-directional | | Local Endpoint Group ID | 667296d0-67ca-4d0f-b676-7650cf96e7b1 | | Local ID | | | MTU | 1500 | | Name | conn | | Peer Address | 192.168.20.9 | | Peer CIDRs | | | Peer Endpoint Group ID | 5c3d7f2a-4a2a-446b-9fcf-9a2557cfc641 | | Peer ID | 192.168.20.9 | | Pre-shared Key | secret | | Project | 095247cb2e22455b9850c6efff407584 | | Route Mode | static | | State | True | | Status | PENDING_CREATE | | VPN Service | 9f499f9f-f672-4ceb-be3c-d5ff3858c680 | | dpd | {u'action': u'hold', u'interval': 30, u'timeout': 120} | | project_id | 095247cb2e22455b9850c6efff407584 | +--------------------------+--------------------------------------------------------+ .. note:: Please do not specify ``--peer-cidr`` option in this case. Peer CIDR(s) are provided by a peer endpoint group. Configure VPNaaS without endpoint group (the legacy way) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create the IKE policy, IPsec policy, VPN service. Then, create an ipsec site connection that applies the above policies and service. #. Create an IKE policy: .. code-block:: console $ openstack vpn ike policy create ikepolicy1 +-------------------------------+----------------------------------------+ | Field | Value | +-------------------------------+----------------------------------------+ | Authentication Algorithm | sha1 | | Description | | | Encryption Algorithm | aes-128 | | ID | 99e4345d-8674-4d73-acb4-0e2524425e34 | | IKE Version | v1 | | Lifetime | {u'units': u'seconds', u'value': 3600} | | Name | ikepolicy1 | | Perfect Forward Secrecy (PFS) | group5 | | Phase1 Negotiation Mode | main | | Project | 095247cb2e22455b9850c6efff407584 | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------------------------+----------------------------------------+ #. Create an IPsec policy: .. code-block:: console $ openstack vpn ipsec policy create ipsecpolicy1 +-------------------------------+----------------------------------------+ | Field | Value | +-------------------------------+----------------------------------------+ | Authentication Algorithm | sha1 | | Description | | | Encapsulation Mode | tunnel | | Encryption Algorithm | aes-128 | | ID | e6f547af-4a1d-4c28-b40b-b97cce746459 | | Lifetime | {u'units': u'seconds', u'value': 3600} | | Name | ipsecpolicy1 | | Perfect Forward Secrecy (PFS) | group5 | | Project | 095247cb2e22455b9850c6efff407584 | | Transform Protocol | esp | | project_id | 095247cb2e22455b9850c6efff407584 | +-------------------------------+----------------------------------------+ #. Create a VPN service: .. code-block:: console $ openstack vpn service create vpn \ --router 66ca673a-cbbd-48b7-9fb6-bfa7ee3ef724 \ --subnet cdfb411e-e818-466a-837c-7f96fc41a6d9 +----------------+--------------------------------------+ | Field | Value | +----------------+--------------------------------------+ | Description | | | Flavor | None | | ID | 79ef6250-ddc3-428f-88c2-0ec8084f4e9a | | Name | vpn | | Project | 095247cb2e22455b9850c6efff407584 | | Router | 66ca673a-cbbd-48b7-9fb6-bfa7ee3ef724 | | State | True | | Status | PENDING_CREATE | | Subnet | cdfb411e-e818-466a-837c-7f96fc41a6d9 | | external_v4_ip | 192.168.20.2 | | external_v6_ip | 2001:db8::d | | project_id | 095247cb2e22455b9850c6efff407584 | +----------------+--------------------------------------+ .. note:: The ``--subnet`` option is required in this scenario. #. Create an ipsec site connection: .. code-block:: console $ openstack vpn ipsec site connection create conn \ --vpnservice vpn \ --ikepolicy ikepolicy1 \ --ipsecpolicy ipsecpolicy1 \ --peer-address 192.168.20.11 \ --peer-id 192.168.20.11 \ --peer-cidr 192.168.1.0/24 \ --psk secret +--------------------------+--------------------------------------------------------+ | Field | Value | +--------------------------+--------------------------------------------------------+ | Authentication Algorithm | psk | | Description | | | ID | 5b2935e6-b2f0-423a-8156-07ed48703d13 | | IKE Policy | 99e4345d-8674-4d73-acb4-0e2524425e34 | | IPSec Policy | e6f547af-4a1d-4c28-b40b-b97cce746459 | | Initiator | bi-directional | | Local Endpoint Group ID | None | | Local ID | | | MTU | 1500 | | Name | conn | | Peer Address | 192.168.20.11 | | Peer CIDRs | 192.168.1.0/24 | | Peer Endpoint Group ID | None | | Peer ID | 192.168.20.11 | | Pre-shared Key | secret | | Project | 095247cb2e22455b9850c6efff407584 | | Route Mode | static | | State | True | | Status | PENDING_CREATE | | VPN Service | 79ef6250-ddc3-428f-88c2-0ec8084f4e9a | | dpd | {u'action': u'hold', u'interval': 30, u'timeout': 120} | | project_id | 095247cb2e22455b9850c6efff407584 | +--------------------------+--------------------------------------------------------+ .. note:: Please do not specify ``--local-endpoint-group`` and ``--peer-endpoint-group`` options in this case. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1310432 neutron-16.0.0.0b2.dev214/doc/source/cli/0000755000175000017500000000000000000000000017757 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/cli/index.rst0000644000175000017500000000050400000000000021617 0ustar00coreycorey00000000000000================================ Command-Line Interface Reference ================================ .. Add links to neutron, OSC and its network plugin command reference once their CLI reference is available in neutronclient repo. .. toctree:: :maxdepth: 1 neutron-debug neutron-sanity-check neutron-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/cli/neutron-debug.rst0000644000175000017500000002063400000000000023274 0ustar00coreycorey00000000000000.. This file is manually generated, unlike many of the other chapters. ============= neutron-debug ============= The :command:`neutron-debug` client is an extension to the :command:`neutron` command-line interface (CLI) for the OpenStack neutron-debug tool. This chapter documents :command:`neutron-debug` version ``2.3.0``. For help on a specific :command:`neutron-debug` command, enter: .. code-block:: console $ neutron-debug help COMMAND .. _neutron-debug_usage: neutron-debug usage ~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug [--version] [-v] [-q] [-h] [-r NUM] [--os-service-type ] [--os-endpoint-type ] [--service-type ] [--endpoint-type ] [--os-auth-strategy ] [--os-cloud ] [--os-auth-url ] [--os-tenant-name | --os-project-name ] [--os-tenant-id | --os-project-id ] [--os-username ] [--os-user-id ] [--os-user-domain-id ] [--os-user-domain-name ] [--os-project-domain-id ] [--os-project-domain-name ] [--os-cert ] [--os-cacert ] [--os-key ] [--os-password ] [--os-region-name ] [--os-token ] [--http-timeout ] [--os-url ] [--insecure] [--config-file CONFIG_FILE] ... Subcommands ----------- ``probe-create`` Create probe port - create port and interface within a network namespace. ``probe-list`` List all probes. ``probe-clear`` Clear all probes. ``probe-delete`` Delete probe - delete port then delete the namespace. ``probe-exec`` Execute commands in the namespace of the probe. ``ping-all`` ``ping-all`` is an all-in-one command to ping all fixed IPs in a specified network. .. _neutron-debug_optional: neutron-debug optional arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``--version`` Show program's version number and exit ``-v, --verbose, --debug`` Increase verbosity of output and show tracebacks on errors. You can repeat this option. ``-q, --quiet`` Suppress output except warnings and errors. ``-h, --help`` Show this help message and exit ``-r NUM, --retries NUM`` How many times the request to the Neutron server should be retried if it fails. ``--os-service-type `` Defaults to env[OS_NETWORK_SERVICE_TYPE] or network. ``--os-endpoint-type `` Defaults to ``env[OS_ENDPOINT_TYPE]`` or public. ``--service-type `` DEPRECATED! Use --os-service-type. ``--endpoint-type `` DEPRECATED! Use --os-endpoint-type. ``--os-auth-strategy `` DEPRECATED! Only keystone is supported. ``os-cloud `` Defaults to env[OS_CLOUD]. ``--os-auth-url `` Authentication URL, defaults to env[OS_AUTH_URL]. ``--os-tenant-name `` Authentication tenant name, defaults to env[OS_TENANT_NAME]. ``--os-project-name `` Another way to specify tenant name. This option is mutually exclusive with --os-tenant-name. Defaults to env[OS_PROJECT_NAME]. ``--os-tenant-id `` Authentication tenant ID, defaults to env[OS_TENANT_ID]. ``--os-project-id `` Another way to specify tenant ID. This option is mutually exclusive with --os-tenant-id. Defaults to env[OS_PROJECT_ID]. ``--os-username `` Authentication username, defaults to env[OS_USERNAME]. ``--os-user-id `` Authentication user ID (Env: OS_USER_ID) ``--os-user-domain-id `` OpenStack user domain ID. Defaults to env[OS_USER_DOMAIN_ID]. ``--os-user-domain-name `` OpenStack user domain name. Defaults to env[OS_USER_DOMAIN_NAME]. ``--os-project-domain-id `` Defaults to env[OS_PROJECT_DOMAIN_ID]. ``--os-project-domain-name `` Defaults to env[OS_PROJECT_DOMAIN_NAME]. ``--os-cert `` Path of certificate file to use in SSL connection. This file can optionally be prepended with the private key. Defaults to env[OS_CERT]. ``--os-cacert `` Specify a CA bundle file to use in verifying a TLS (https) server certificate. Defaults to env[OS_CACERT]. ``--os-key `` Path of client key to use in SSL connection. This option is not necessary if your key is prepended to your certificate file. Defaults to env[OS_KEY]. ``--os-password `` Authentication password, defaults to env[OS_PASSWORD]. ``--os-region-name `` Authentication region name, defaults to env[OS_REGION_NAME]. ``--os-token `` Authentication token, defaults to env[OS_TOKEN]. ``--http-timeout `` Timeout in seconds to wait for an HTTP response. Defaults to env[OS_NETWORK_TIMEOUT] or None if not specified. ``--os-url `` Defaults to env[OS_URL] ``--insecure`` Explicitly allow neutronclient to perform "insecure" SSL (https) requests. The server's certificate will not be verified against any certificate authorities. This option should be used with caution. ``--config-file CONFIG_FILE`` Config file for interface driver (You may also use l3_agent.ini) neutron-debug probe-create command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug probe-create NET Create probe port - create port and interface, then place it into the created network namespace. Positional arguments -------------------- ``NET ID`` ID of the network in which the probe will be created. neutron-debug probe-list command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug probe-list List probes. neutron-debug probe-clear command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug probe-clear Clear all probes. neutron-debug probe-delete command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug probe-delete Remove a probe. Positional arguments -------------------- ```` ID of the probe to delete. neutron-debug probe-exec command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug probe-exec Execute commands in the namespace of the probe neutron-debug ping-all command ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug ping-all --timeout All-in-one command to ping all fixed IPs in a specified network. A probe creation is not needed for this command. A new probe is created automatically. It will, however, need to be deleted manually when it is no longer needed. When there are multiple networks, the newly created probe will be attached to a random network and thus the ping will take place from within that random network. Positional arguments -------------------- ```` ID of the port to use. Optional arguments ------------------ ``--timeout `` Optional ping timeout. neutron-debug example ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-debug create-probe Create a probe namespace within the network identified by ``NET_ID``. The namespace will have the name of qprobe- .. note:: For the following examples to function, the security group rules may need to be modified to allow the SSH (TCP port 22) or ping (ICMP) traffic into network. .. code-block:: console usage: neutron-debug probe-exec "ssh " SSH to an instance within the network. .. code-block:: console usage: neutron-debug ping-all Ping all instances on this network to verify they are responding. .. code-block:: console usage: neutron-debug probe-exec dhcping -s Ping the DHCP server for this network using dhcping to verify it is working. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/cli/neutron-sanity-check.rst0000644000175000017500000001762100000000000024572 0ustar00coreycorey00000000000000.. This file is manually generated, unlike many of the other chapters. ==================== neutron-sanity-check ==================== The :command:`neutron-sanity-check` client is a tool that checks various sanity about the Networking service. This chapter documents :command:`neutron-sanity-check` version ``10.0.0``. neutron-sanity-check usage ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-sanity-check [-h] [--arp_header_match] [--arp_responder] [--bridge_firewalling] [--config-dir DIR] [--config-file PATH] [--debug] [--dhcp_release6] [--dibbler_version] [--dnsmasq_version] [--ebtables_installed] [--icmpv6_header_match] [--ip6tables_installed] [--ip_nonlocal_bind] [--iproute2_vxlan] [--ipset_installed] [--keepalived_ipv6_support] [--log-config-append PATH] [--log-date-format DATE_FORMAT] [--log-dir LOG_DIR] [--log-file PATH] [--noarp_header_match] [--noarp_responder] [--nobridge_firewalling] [--nodebug] [--nodhcp_release6] [--nodibbler_version] [--nodnsmasq_version] [--noebtables_installed] [--noicmpv6_header_match] [--noip6tables_installed] [--noip_nonlocal_bind] [--noiproute2_vxlan] [--noipset_installed] [--nokeepalived_ipv6_support] [--nonova_notify] [--noovs_conntrack] [--noovs_geneve] [--noovs_patch] [--noovs_vxlan] [--noovsdb_native] [--noread_netns] [--nouse-syslog] [--nova_notify] [--noverbose] [--novf_extended_management] [--novf_management] [--nowatch-log-file] [--ovs_conntrack] [--ovs_geneve] [--ovs_patch] [--ovs_vxlan] [--ovsdb_native] [--read_netns] [--state_path STATE_PATH] [--syslog-log-facility SYSLOG_LOG_FACILITY] [--use-syslog] [--verbose] [--version] [--vf_extended_management] [--vf_management] [--watch-log-file] neutron-sanity-check optional arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``-h, --help`` show this help message and exit ``--arp_header_match`` Check for ARP header match support ``--arp_responder`` Check for ARP responder support ``--bridge_firewalling`` Check bridge firewalling ``--ip_nonlocal_bind`` Check ip_nonlocal_bind kernel option works with network namespaces. ``--config-dir DIR`` Path to a config directory to pull ``*.conf`` files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. ``--config-file PATH`` Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Dafaults to ``None``. ``--debug, -d`` Print debugging output (set logging level to ``DEBUG`` instead of default ``INFO`` level). ``--dhcp_release6`` Check dhcp_release6 installation ``--dibbler_version`` Check minimal dibbler version ``--dnsmasq_version`` Check minimal dnsmasq version ``--ebtables_installed`` Check ebtables installation ``--icmpv6_header_match`` Check for ICMPv6 header match support ``--ip6tables_installed`` Check ip6tables installation ``--iproute2_vxlan`` Check for iproute2 vxlan support ``--ipset_installed`` Check ipset installation ``--keepalived_ipv6_support`` Check keepalived IPv6 support ``--log-config-append PATH, --log_config PATH`` The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, ``logging_context_format_string``). ``--log-date-format DATE_FORMAT`` Format string for %(asctime)s in log records. Default: None. This option is ignored if ``log_config_append`` is set. ``--log-dir LOG_DIR, --logdir LOG_DIR`` (Optional) The base directory used for relative ``log-file`` paths. This option is ignored if ``log_config_append`` is set. ``--log-file PATH, --logfile PATH`` (Optional) Name of log file to output to. If no default is set, logging will go to stderr as defined by ``use_stderr``. This option is ignored if ``log_config_append`` is set. ``--noarp_header_match`` The inverse of --arp_header_match ``--noarp_responder`` The inverse of --arp_responder ``--nobridge_firewalling`` The inverse of --bridge_firewalling ``--nodebug`` The inverse of --debug ``--nodhcp_release6`` The inverse of --dhcp_release6 ``--nodibbler_version`` The inverse of --dibbler_version ``--nodnsmasq_version`` The inverse of --dnsmasq_version ``--noebtables_installed`` The inverse of --ebtables_installed ``--noicmpv6_header_match`` The inverse of --icmpv6_header_match ``--noip6tables_installed`` The inverse of --ip6tables_installed ``--noip_nonlocal_bind`` The inverse of --ip_nonlocal_bind ``--noiproute2_vxlan`` The inverse of --iproute2_vxlan ``--noipset_installed`` The inverse of --ipset_installed ``--nokeepalived_ipv6_support`` The inverse of --keepalived_ipv6_support ``--nonova_notify`` The inverse of --nova_notify ``--noovs_conntrack`` The inverse of --ovs_conntrack ``--noovs_geneve`` The inverse of --ovs_geneve ``--noovs_patch`` The inverse of --ovs_patch ``--noovs_vxlan`` The inverse of --ovs_vxlan ``--noovsdb_native`` The inverse of --ovsdb_native ``--noread_netns`` The inverse of --read_netns ``--nouse-syslog`` The inverse of --use-syslog ``--nova_notify`` Check for nova notification support ``--noverbose`` The inverse of --verbose ``--novf_extended_management`` The inverse of --vf_extended_management ``--novf_management`` The inverse of --vf_management ``--nowatch-log-file`` The inverse of --watch-log-file ``--ovs_geneve`` Check for OVS Geneve support ``--ovs_patch`` Check for patch port support ``--ovs_vxlan`` Check for OVS vxlan support ``--ovsdb_native`` Check ovsdb native interface support ``--read_netns`` Check netns permission settings ``--state_path STATE_PATH`` Where to store Neutron state files. This directory must be writable by the agent. ``--syslog-log-facility SYSLOG_LOG_FACILITY`` Syslog facility to receive log lines. This option is ignored if ``log_config_append`` is set. ``--use-syslog`` Use syslog for logging. Existing syslog format is **DEPRECATED** and will be changed later to honor RFC5424. This option is ignored if ``log_config_append`` is set. ``--verbose, -v`` If set to ``false``, the logging level will be set to ``WARNING`` instead of the default ``INFO`` level. ``--version`` show program's version number and exit ``--vf_extended_management`` Check for VF extended management support ``--vf_management`` Check for VF management support ``--watch-log-file`` Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if ``log_file`` option is specified and Linux platform is used. This option is ignored if ``log_config_append`` is set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/cli/neutron-status.rst0000644000175000017500000000347400000000000023534 0ustar00coreycorey00000000000000.. This file is manually generated, unlike many of the other chapters. ============== neutron-status ============== The :command:`neutron-status` provides routines for checking the status of Neutron deployment. .. _neutron-status_usage: neutron-status usage ~~~~~~~~~~~~~~~~~~~~ .. code-block:: console usage: neutron-status [-h] [--config-dir DIR] [--config-file PATH] Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: neutron-status upgrade These sections describe the available categories and arguments for :command:`neutron-status`. Command details --------------- ``neutron-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **21.0.0 (Ussuri)** * A Check was added for NIC Switch agents to ensure nodes are running with kernel 3.13 or newer. This check serves as a notification for operators to ensure this requirement is fullfiled on relevant nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/conf.py0000644000175000017500000002256100000000000020515 0ustar00coreycorey00000000000000# Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Neutron documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()d with the current directory set to it's containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import logging import os import sys # NOTE(amotoki): In case of oslo_config.sphinxext is enabled, # when resolving automodule neutron.tests.functional.db.test_migrations, # sphinx accesses tests/functional/__init__.py is processed, # eventlet.monkey_patch() is called and monkey_patch() tries to access # pyroute2.common.__class__ attribute. It raises pyroute2 warning and # it causes sphinx build failure due to warning-is-error = 1. # To pass sphinx build, ignore pyroute2 warning explicitly. logging.getLogger('pyroute2').setLevel(logging.ERROR) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. BASE_DIR = os.path.dirname(os.path.abspath(__file__)) NEUTRON_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, NEUTRON_DIR) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.todo', 'openstackdocstheme', 'sphinx_feature_classification.support_matrix', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', ] # Project cross-reference roles openstack_projects = [ 'neutron', 'nova', ] # openstackdocstheme options repository_name = 'openstack/neutron' bug_project = 'neutron' bug_tag = 'doc' todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron' copyright = u'2011-present, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # Version info from neutron.version import version_info as neutron_version release = neutron_version.release_string() # The short X.Y version. version = neutron_version.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['neutron.'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'neutrondoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('pdf-index', 'doc-neutron.tex', u'Neutron Documentation', u'Neutron development team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } # -- Options for oslo_config.sphinxconfiggen --------------------------------- _config_generator_config_files = [ 'dhcp_agent.ini', 'l3_agent.ini', 'linuxbridge_agent.ini', 'macvtap_agent.ini', 'metadata_agent.ini', 'metering_agent.ini', 'ml2_conf.ini', 'neutron.conf', 'openvswitch_agent.ini', 'ovn.ini', 'sriov_agent.ini', ] def _get_config_generator_config_definition(conf): config_file_path = '../../etc/oslo-config-generator/%s' % conf # oslo_config.sphinxconfiggen appends '.conf.sample' to the filename, # strip file extentension (.conf or .ini). output_file_path = '_static/config-samples/%s' % conf.rsplit('.', 1)[0] return (config_file_path, output_file_path) config_generator_config_file = [ _get_config_generator_config_definition(conf) for conf in _config_generator_config_files ] # -- Options for oslo_policy.sphinxpolicygen --------------------------------- policy_generator_config_file = '../../etc/oslo-policy-generator/policy.conf' sample_policy_basename = '_static/neutron' linkcheck_anchors_ignore = [ # skip gerrit anchors r'\/q\/.*', r'q\,.*', r'\/c\/.*' ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1350431 neutron-16.0.0.0b2.dev214/doc/source/configuration/0000755000175000017500000000000000000000000022057 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/config-samples.rst0000644000175000017500000000073000000000000025520 0ustar00coreycorey00000000000000========================== Sample Configuration Files ========================== .. toctree:: :maxdepth: 1 samples/neutron.rst .. toctree:: :maxdepth: 1 samples/ml2-conf.rst samples/linuxbridge-agent.rst samples/macvtap-agent.rst samples/openvswitch-agent.rst samples/sriov-agent.rst samples/ovn.rst .. toctree:: :maxdepth: 1 samples/dhcp-agent.rst samples/l3-agent.rst samples/metadata-agent.rst samples/metering-agent.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/config.rst0000644000175000017500000000151600000000000024061 0ustar00coreycorey00000000000000======================= Configuration Reference ======================= This section provides a list of all configuration options for various neutron services. These are auto-generated from neutron code when this documentation is built. Configuration filenames used below are filenames usually used, but there is no restriction on configuration filename in neutron and you can use arbitrary file names. .. only:: html For sample configuration files, refer to :doc:`config-samples`. .. toctree:: :hidden: config-samples .. toctree:: :maxdepth: 1 neutron.rst .. toctree:: :maxdepth: 1 ml2-conf.rst linuxbridge-agent.rst macvtap-agent.rst openvswitch-agent.rst sriov-agent.rst ovn.rst .. toctree:: :maxdepth: 1 dhcp-agent.rst l3-agent.rst metadata-agent.rst metering-agent.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/dhcp-agent.rst0000644000175000017500000000017200000000000024623 0ustar00coreycorey00000000000000============== dhcp_agent.ini ============== .. show-options:: :config-file: etc/oslo-config-generator/dhcp_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/index.rst0000644000175000017500000000020100000000000023711 0ustar00coreycorey00000000000000.. _configuring: =================== Configuration Guide =================== .. toctree:: :maxdepth: 1 config policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/l3-agent.rst0000644000175000017500000000017100000000000024222 0ustar00coreycorey00000000000000============ l3_agent.ini ============ .. show-options:: neutron.az.agent neutron.base.agent neutron.l3.agent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/linuxbridge-agent.rst0000644000175000017500000000022600000000000026221 0ustar00coreycorey00000000000000===================== linuxbridge_agent.ini ===================== .. show-options:: :config-file: etc/oslo-config-generator/linuxbridge_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/macvtap-agent.rst0000644000175000017500000000020600000000000025336 0ustar00coreycorey00000000000000================= macvtap_agent.ini ================= .. show-options:: :config-file: etc/oslo-config-generator/macvtap_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/metadata-agent.rst0000644000175000017500000000021200000000000025460 0ustar00coreycorey00000000000000================== metadata_agent.ini ================== .. show-options:: :config-file: etc/oslo-config-generator/metadata_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/metering-agent.rst0000644000175000017500000000021200000000000025512 0ustar00coreycorey00000000000000================== metering_agent.ini ================== .. show-options:: :config-file: etc/oslo-config-generator/metering_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/ml2-conf.rst0000644000175000017500000000016200000000000024225 0ustar00coreycorey00000000000000============ ml2_conf.ini ============ .. show-options:: :config-file: etc/oslo-config-generator/ml2_conf.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/neutron.rst0000644000175000017500000000016200000000000024302 0ustar00coreycorey00000000000000============ neutron.conf ============ .. show-options:: :config-file: etc/oslo-config-generator/neutron.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/openvswitch-agent.rst0000644000175000017500000000022600000000000026256 0ustar00coreycorey00000000000000===================== openvswitch_agent.ini ===================== .. show-options:: :config-file: etc/oslo-config-generator/openvswitch_agent.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/ovn.rst0000644000175000017500000000013600000000000023413 0ustar00coreycorey00000000000000======= ovn.ini ======= .. show-options:: :config-file: etc/oslo-config-generator/ovn.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/policy-sample.rst0000644000175000017500000000074400000000000025374 0ustar00coreycorey00000000000000================== Sample Policy File ================== The following is a sample neutron policy file for adaptation and use. The sample policy can also be viewed in :download:`file form `. .. important:: The sample policy file is auto-generated from neutron when this documentation is built. You must ensure your version of neutron matches the version of this documentation. .. literalinclude:: /_static/neutron.policy.yaml.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/policy.rst0000644000175000017500000000070000000000000024105 0ustar00coreycorey00000000000000================ Policy Reference ================ Neutron, like most OpenStack projects, uses a policy language to restrict permissions on REST API actions. The following is an overview of all available policies in neutron. .. only:: html For a sample policy file, refer to :doc:`/configuration/policy-sample`. .. toctree:: :hidden: policy-sample .. show-policy:: :config-file: etc/oslo-policy-generator/policy.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1350431 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/0000755000175000017500000000000000000000000023523 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/dhcp-agent.rst0000644000175000017500000000040500000000000026266 0ustar00coreycorey00000000000000===================== Sample dhcp_agent.ini ===================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/dhcp_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/dhcp_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/l3-agent.rst0000644000175000017500000000037300000000000025672 0ustar00coreycorey00000000000000=================== Sample l3_agent.ini =================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/l3_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/l3_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/linuxbridge-agent.rst0000644000175000017500000000045000000000000027664 0ustar00coreycorey00000000000000============================ Sample linuxbridge_agent.ini ============================ This sample configuration can also be viewed in `the raw format <../../_static/config-samples/linuxbridge_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/linuxbridge_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/macvtap-agent.rst0000644000175000017500000000042400000000000027004 0ustar00coreycorey00000000000000======================== Sample macvtap_agent.ini ======================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/macvtap_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/macvtap_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/metadata-agent.rst0000644000175000017500000000043100000000000027127 0ustar00coreycorey00000000000000========================= Sample metadata_agent.ini ========================= This sample configuration can also be viewed in `the raw format <../../_static/config-samples/metadata_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/metadata_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/metering-agent.rst0000644000175000017500000000043100000000000027161 0ustar00coreycorey00000000000000========================= Sample metering_agent.ini ========================= This sample configuration can also be viewed in `the raw format <../../_static/config-samples/metering_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/metering_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/ml2-conf.rst0000644000175000017500000000037300000000000025675 0ustar00coreycorey00000000000000=================== Sample ml2_conf.ini =================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/ml2_conf.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/ml2_conf.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/neutron.rst0000644000175000017500000000037100000000000025750 0ustar00coreycorey00000000000000=================== Sample neutron.conf =================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/neutron.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/neutron.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/openvswitch-agent.rst0000644000175000017500000000045000000000000027721 0ustar00coreycorey00000000000000============================ Sample openvswitch_agent.ini ============================ This sample configuration can also be viewed in `the raw format <../../_static/config-samples/openvswitch_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/openvswitch_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/ovn.rst0000644000175000017500000000036400000000000025062 0ustar00coreycorey00000000000000.. _samples_ovn: ============== Sample ovn.ini ============== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/ovn.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/ovn.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/samples/sriov-agent.rst0000644000175000017500000000041200000000000026510 0ustar00coreycorey00000000000000====================== Sample sriov_agent.ini ====================== This sample configuration can also be viewed in `the raw format <../../_static/config-samples/sriov_agent.conf.sample>`_. .. literalinclude:: ../../_static/config-samples/sriov_agent.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/configuration/sriov-agent.rst0000644000175000017500000000017600000000000025053 0ustar00coreycorey00000000000000=============== sriov_agent.ini =============== .. show-options:: :config-file: etc/oslo-config-generator/sriov_agent.ini ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.139043 neutron-16.0.0.0b2.dev214/doc/source/contributor/0000755000175000017500000000000000000000000021562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/alembic_migrations.rst0000644000175000017500000004737600000000000026165 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _alembic_migrations: Alembic Migrations ================== Introduction ------------ The migrations in the alembic/versions contain the changes needed to migrate from older Neutron releases to newer versions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially to update the database. The Migration Wrapper --------------------- The scripts are executed by Neutron's migration wrapper ``neutron-db-manage`` which uses the Alembic library to manage the migration. Pass the ``--help`` option to the wrapper for usage information. The wrapper takes some options followed by some commands:: neutron-db-manage The wrapper needs to be provided with the database connection string, which is usually provided in the ``neutron.conf`` configuration file in an installation. The wrapper automatically reads from ``/etc/neutron/neutron.conf`` if it is present. If the configuration is in a different location:: neutron-db-manage --config-file /path/to/neutron.conf Multiple ``--config-file`` options can be passed if needed. Instead of reading the DB connection from the configuration file(s) the ``--database-connection`` option can be used:: neutron-db-manage --database-connection mysql+pymysql://root:secret@127.0.0.1/neutron?charset=utf8 The ``branches``, ``current``, and ``history`` commands all accept a ``--verbose`` option, which, when passed, will instruct ``neutron-db-manage`` to display more verbose output for the specified command:: neutron-db-manage current --verbose For some commands the wrapper needs to know the entrypoint of the core plugin for the installation. This can be read from the configuration file(s) or specified using the ``--core_plugin`` option:: neutron-db-manage --core_plugin neutron.plugins.ml2.plugin.Ml2Plugin When giving examples below of using the wrapper the options will not be shown. It is assumed you will use the options that you need for your environment. For new deployments you will start with an empty database. You then upgrade to the latest database version via:: neutron-db-manage upgrade heads For existing deployments the database will already be at some version. To check the current database version:: neutron-db-manage current After installing a new version of Neutron server, upgrading the database is the same command:: neutron-db-manage upgrade heads To create a script to run the migration offline:: neutron-db-manage upgrade heads --sql To run the offline migration between specific migration versions:: neutron-db-manage upgrade : --sql Upgrade the database incrementally:: neutron-db-manage upgrade --delta <# of revs> **NOTE:** Database downgrade is not supported. Migration Branches ------------------ Neutron makes use of alembic branches for two purposes. 1. Independent Sub-Project Tables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Various `Sub-Projects <../contributor/stadium/guidelines.html>`_ can be installed with Neutron. Each sub-project registers its own alembic branch which is responsible for migrating the schemas of the tables owned by the sub-project. The neutron-db-manage script detects which sub-projects have been installed by enumerating the ``neutron.db.alembic_migrations`` entrypoints. For more details see the `Entry Points section of Contributing extensions to Neutron `_. The neutron-db-manage script runs the given alembic command against all installed sub-projects. (An exception is the ``revision`` command, which is discussed in the `Developers`_ section below.) 2. Offline/Online Migrations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since Liberty, Neutron maintains two parallel alembic migration branches. The first one, called 'expand', is used to store expansion-only migration rules. Those rules are strictly additive and can be applied while neutron-server is running. Examples of additive database schema changes are: creating a new table, adding a new table column, adding a new index, etc. The second branch, called 'contract', is used to store those migration rules that are not safe to apply while neutron-server is running. Those include: column or table removal, moving data from one part of the database into another (renaming a column, transforming single table into multiple, etc.), introducing or modifying constraints, etc. The intent of the split is to allow invoking those safe migrations from 'expand' branch while neutron-server is running, reducing downtime needed to upgrade the service. For more details, see the `Expand and Contract Scripts`_ section below. Developers ---------- A database migration script is required when you submit a change to Neutron or a sub-project that alters the database model definition. The migration script is a special python file that includes code to upgrade the database to match the changes in the model definition. Alembic will execute these scripts in order to provide a linear migration path between revisions. The neutron-db-manage command can be used to generate migration scripts for you to complete. The operations in the template are those supported by the Alembic migration library. .. _neutron-db-manage-without-devstack: Running neutron-db-manage without devstack ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When, as a developer, you want to work with the Neutron DB schema and alembic migrations only, it can be rather tedious to rely on devstack just to get an up-to-date neutron-db-manage installed. This section describes how to work on the schema and migration scripts with just the unit test virtualenv and mysql. You can also operate on a separate test database so you don't mess up the installed Neutron database. Setting up the environment ++++++++++++++++++++++++++ Install mysql service ''''''''''''''''''''' This only needs to be done once since it is a system install. If you have run devstack on your system before, then the mysql service is already installed and you can skip this step. Mysql must be configured as installed by devstack, and the following script accomplishes this without actually running devstack:: INSTALL_MYSQL_ONLY=True ./tools/configure_for_func_testing.sh ../devstack Run this from the root of the neutron repo. It assumes an up-to-date clone of the devstack repo is in ``../devstack``. Note that you must know the mysql root password. It is derived from (in order of precedence): - ``$MYSQL_PASSWORD`` in your environment - ``$MYSQL_PASSWORD`` in ``../devstack/local.conf`` - ``$MYSQL_PASSWORD`` in ``../devstack/localrc`` - default of 'secretmysql' from ``tools/configure_for_func_testing.sh`` Work on a test database ''''''''''''''''''''''' Rather than using the neutron database when working on schema and alembic migration script changes, we can work on a test database. In the examples below, we use a database named ``testdb``. To create the database:: mysql -e "create database testdb;" You will often need to clear it to re-run operations from a blank database:: mysql -e "drop database testdb; create database testdb;" To work on the test database instead of the neutron database, point to it with the ``--database-connection`` option:: neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8 You may find it convenient to set up an alias (in your .bashrc) for this:: alias test-db-manage='neutron-db-manage --database-connection mysql+pymysql://root:secretmysql@127.0.0.1/testdb?charset=utf8' Create and activate the virtualenv '''''''''''''''''''''''''''''''''' From the root of the neutron (or sub-project) repo directory, run:: tox --notest -r -e py37 source .tox/py37/bin/activate Now you can use the ``test-db-manage`` alias in place of ``neutron-db-manage`` in the script auto-generation instructions below. When you are done, exit the virtualenv:: deactivate Script Auto-generation ~~~~~~~~~~~~~~~~~~~~~~ This section describes how to auto-generate an alembic migration script for a model change. You may either use the system installed devstack environment, or a virtualenv + testdb environment as described in :ref:`neutron-db-manage-without-devstack`. Stop the neutron service. Work from the base directory of the neutron (or sub-project) repo. Check out the master branch and do ``git pull`` to ensure it is fully up to date. Check out your development branch and rebase to master. **NOTE:** Make sure you have not updated the ``CONTRACT_HEAD`` or ``EXPAND_HEAD`` yet at this point. Start with an empty database and upgrade to heads:: mysql -e "drop database neutron; create database neutron;" neutron-db-manage upgrade heads The database schema is now created without your model changes. The alembic ``revision --autogenerate`` command will look for differences between the schema generated by the upgrade command and the schema defined by the models, including your model updates:: neutron-db-manage revision -m "description of revision" --autogenerate This generates a prepopulated template with the changes needed to match the database state with the models. You should inspect the autogenerated template to ensure that the proper models have been altered. When running the above command you will probably get the following error message:: Multiple heads are present; please specify the head revision on which the new revision should be based, or perform a merge. This is alembic telling you that it does not know which branch (contract or expand) to generate the revision for. You must decide, based on whether you are doing contracting or expanding changes to the schema, and provide either the ``--contract`` or ``--expand`` option. If you have both types of changes, you must run the command twice, once with each option, and then manually edit the generated revision scripts to separate the migration operations. In rare circumstances, you may want to start with an empty migration template and manually author the changes necessary for an upgrade. You can create a blank file for a branch via:: neutron-db-manage revision -m "description of revision" --expand neutron-db-manage revision -m "description of revision" --contract **NOTE:** If you use above command you should check that migration is created in a directory that is named as current release. If not, please raise the issue with the development team (IRC, mailing list, launchpad bug). **NOTE:** The "description of revision" text should be a simple English sentence. The first 30 characters of the description will be used in the file name for the script, with underscores substituted for spaces. If the truncation occurs at an awkward point in the description, you can modify the script file name manually before committing. The timeline on each alembic branch should remain linear and not interleave with other branches, so that there is a clear path when upgrading. To verify that alembic branches maintain linear timelines, you can run this command:: neutron-db-manage check_migration If this command reports an error, you can troubleshoot by showing the migration timelines using the ``history`` command:: neutron-db-manage history Expand and Contract Scripts ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The obsolete "branchless" design of a migration script included that it indicates a specific "version" of the schema, and includes directives that apply all necessary changes to the database at once. If we look for example at the script ``2d2a8a565438_hierarchical_binding.py``, we will see:: # .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py def upgrade(): # .. inspection code ... op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), # ... more columns ... ) for table in port_binding_tables: op.execute(( "INSERT INTO ml2_port_binding_levels " "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " "FROM %s " "WHERE host <> '' " "AND driver <> '';" ) % table) op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') op.drop_column('ml2_dvr_port_bindings', 'segment') op.drop_column('ml2_dvr_port_bindings', 'driver') # ... more DROP instructions ... The above script contains directives that are both under the "expand" and "contract" categories, as well as some data migrations. the ``op.create_table`` directive is an "expand"; it may be run safely while the old version of the application still runs, as the old code simply doesn't look for this table. The ``op.drop_constraint`` and ``op.drop_column`` directives are "contract" directives (the drop column more so than the drop constraint); running at least the ``op.drop_column`` directives means that the old version of the application will fail, as it will attempt to access these columns which no longer exist. The data migrations in this script are adding new rows to the newly added ``ml2_port_binding_levels`` table. Under the new migration script directory structure, the above script would be stated as two scripts; an "expand" and a "contract" script:: # expansion operations # .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py def upgrade(): op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), # ... more columns ... ) # contraction operations # .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py def upgrade(): for table in port_binding_tables: op.execute(( "INSERT INTO ml2_port_binding_levels " "SELECT port_id, host, 0 AS level, driver, segment AS segment_id " "FROM %s " "WHERE host <> '' " "AND driver <> '';" ) % table) op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey') op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter') op.drop_column('ml2_dvr_port_bindings', 'segment') op.drop_column('ml2_dvr_port_bindings', 'driver') # ... more DROP instructions ... The two scripts would be present in different subdirectories and also part of entirely separate versioning streams. The "expand" operations are in the "expand" script, and the "contract" operations are in the "contract" script. For the time being, data migration rules also belong to contract branch. There is expectation that eventually live data migrations move into middleware that will be aware about different database schema elements to converge on, but Neutron is still not there. Scripts that contain only expansion or contraction rules do not require a split into two parts. If a contraction script depends on a script from expansion stream, the following directive should be added in the contraction script:: depends_on = ('',) Expand and Contract Branch Exceptions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In some cases, we have to have "expand" operations in contract migrations. For example, table 'networksegments' was renamed in contract migration, so all operations with this table are required to be in contract branch as well. For such cases, we use the ``contract_creation_exceptions`` that should be implemented as part of such migrations. This is needed to get functional tests pass. Usage:: def contract_creation_exceptions(): """Docstring should explain why we allow such exception for contract branch. """ return { sqlalchemy_obj_type: ['name'] # For example: sa.Column: ['subnets.segment_id'] } HEAD files for conflict management ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In directory ``neutron/db/migration/alembic_migrations/versions`` there are two files, ``CONTRACT_HEAD`` and ``EXPAND_HEAD``. These files contain the ID of the head revision in each branch. The purpose of these files is to validate the revision timelines and prevent non-linear changes from entering the merge queue. When you create a new migration script by neutron-db-manage these files will be updated automatically. But if another migration script is merged while your change is under review, you will need to resolve the conflict manually by changing the ``down_revision`` in your migration script. Applying database migration rules ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To apply just expansion rules, execute:: neutron-db-manage upgrade --expand After the first step is done, you can stop neutron-server, apply remaining non-expansive migration rules, if any:: neutron-db-manage upgrade --contract and finally, start your neutron-server again. If you have multiple neutron-server instances in your cloud, and there are pending contract scripts not applied to the database, full shutdown of all those services is required before 'upgrade --contract' is executed. You can determine whether there are any pending contract scripts by checking the message returned from the following command:: neutron-db-manage has_offline_migrations If you are not interested in applying safe migration rules while the service is running, you can still upgrade database the old way, by stopping the service, and then applying all available rules:: neutron-db-manage upgrade head[s] It will apply all the rules from both the expand and the contract branches, in proper order. Tagging milestone revisions ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When named release (liberty, mitaka, etc.) is done for neutron or a sub-project, the alembic revision scripts at the head of each branch for that release must be tagged. This is referred to as a milestone revision tag. For example, `here `_ is a patch that tags the liberty milestone revisions for the neutron-fwaas sub-project. Note that each branch (expand and contract) is tagged. Tagging milestones allows neutron-db-manage to upgrade the schema to a milestone release, e.g.:: neutron-db-manage upgrade liberty Generation of comparable metadata with current database schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Directory ``neutron/db/migration/models`` contains module ``head.py``, which provides all database models at current HEAD. Its purpose is to create comparable metadata with the current database schema. The database schema is generated by alembic migration scripts. The models must match, and this is verified by a model-migration sync test in Neutron's functional test suite. That test requires all modules containing DB models to be imported by head.py in order to make a complete comparison. When adding new database models, developers must update this module, otherwise the change will fail to merge. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/client_command_extensions.rst0000644000175000017500000000227500000000000027555 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Client command extension support ================================ The client command extension adds support for extending the neutron client while considering ease of creation. The full document can be found in the python-neutronclient repository: https://docs.openstack.org/python-neutronclient/latest/contributor/client_command_extensions.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/contribute.rst0000644000175000017500000006522200000000000024501 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Contributing new extensions to Neutron ====================================== Introduction ------------ Neutron has a pluggable architecture, with a number of extension points. This documentation covers aspects relevant to contributing new Neutron v2 core (aka monolithic) plugins, ML2 mechanism drivers, and L3 service plugins. This document will initially cover a number of process-oriented aspects of the contribution process, and proceed to provide a how-to guide that shows how to go from 0 LOC's to successfully contributing new extensions to Neutron. In the remainder of this guide, we will try to use practical examples as much as we can so that people have working solutions they can start from. This guide is for a developer who wants to have a degree of visibility within the OpenStack Networking project. If you are a developer who wants to provide a Neutron-based solution without interacting with the Neutron community, you are free to do so, but you can stop reading now, as this guide is not for you. Plugins and drivers for non-reference implementations are known as "third-party" code. This includes code for supporting vendor products, as well as code for supporting open-source networking implementations. Before the Kilo release these plugins and drivers were included in the Neutron tree. During the Kilo cycle the third-party plugins and drivers underwent the first phase of a process called decomposition. During this phase, each plugin and driver moved the bulk of its logic to a separate git repository, while leaving a thin "shim" in the neutron tree together with the DB models and migrations (and perhaps some config examples). During the Liberty cycle the decomposition concept was taken to its conclusion by allowing third-party code to exist entirely out of tree. Further extension mechanisms have been provided to better support external plugins and drivers that alter the API and/or the data model. In the Mitaka cycle we will **require** all third-party code to be moved out of the neutron tree completely. 'Outside the tree' can be anything that is publicly available: it may be a repo on opendev.org for instance, a tarball, a pypi package, etc. A plugin/drivers maintainer team self-governs in order to promote sharing, reuse, innovation, and release of the 'out-of-tree' deliverable. It should not be required for any member of the core team to be involved with this process, although core members of the Neutron team can participate in whichever capacity is deemed necessary to facilitate out-of-tree development. This guide is aimed at you as the maintainer of code that integrates with Neutron but resides in a separate repository. Contribution Process -------------------- If you want to extend OpenStack Networking with your technology, and you want to do it within the visibility of the OpenStack project, follow the guidelines and examples below. We'll describe best practices for: * Design and Development; * Testing and Continuous Integration; * Defect Management; * Backport Management for plugin specific code; * DevStack Integration; * Documentation; Once you have everything in place you may want to add your project to the list of Neutron sub-projects. See :ref:`add-remove-projects-to-stadium` for details. Design and Development ---------------------- Assuming you have a working repository, any development to your own repo does not need any blueprint, specification or bugs against Neutron. However, if your project is a part of the Neutron Stadium effort, you are expected to participate in the principles of the Four Opens, meaning your design should be done in the open. Thus, it is encouraged to file documentation for changes in your own repository. If your code is hosted on opendev.org then the gerrit review system is automatically provided. Contributors should follow the review guidelines similar to those of Neutron. However, you as the maintainer have the flexibility to choose who can approve/merge changes in your own repo. It is recommended (but not required, see :doc:`policies `) that you set up a third-party CI system. This will provide a vehicle for checking the third-party code against Neutron changes. See `Testing and Continuous Integration`_ below for more detailed recommendations. Design documents can still be supplied in form of Restructured Text (RST) documents, within the same third-party library repo. If changes to the common Neutron code are required, an :ref:`RFE ` may need to be filed. However, every case is different and you are invited to seek guidance from Neutron core reviewers about what steps to follow. Testing and Continuous Integration ---------------------------------- The following strategies are recommendations only, since third-party CI testing is not an enforced requirement. However, these strategies are employed by the majority of the plugin/driver contributors that actively participate in the Neutron development community, since they have learned from experience how quickly their code can fall out of sync with the rapidly changing Neutron core code base. * You should run unit tests in your own external library (e.g. on opendev.org where Jenkins setup is for free). * Your third-party CI should validate third-party integration with Neutron via functional testing. The third-party CI is a communication mechanism. The objective of this mechanism is as follows: * it communicates to you when someone has contributed a change that potentially breaks your code. It is then up to you maintaining the affected plugin/driver to determine whether the failure is transient or real, and resolve the problem if it is. * it communicates to a patch author that they may be breaking a plugin/driver. If they have the time/energy/relationship with the maintainer of the plugin/driver in question, then they can (at their discretion) work to resolve the breakage. * it communicates to the community at large whether a given plugin/driver is being actively maintained. * A maintainer that is perceived to be responsive to failures in their third-party CI jobs is likely to generate community goodwill. It is worth noting that if the plugin/driver repository is hosted on opendev.org, due to current openstack-infra limitations, it is not possible to have third-party CI systems participating in the gate pipeline for the repo. This means that the only validation provided during the merge process to the repo is through unit tests. Post-merge hooks can still be exploited to provide third-party CI feedback, and alert you of potential issues. As mentioned above, third-party CI systems will continue to validate Neutron core commits. This will allow them to detect when incompatible changes occur, whether they are in Neutron or in the third-party repo. Defect Management ----------------- Bugs affecting third-party code should *not* be filed in the Neutron project on launchpad. Bug tracking can be done in any system you choose, but by creating a third-party project in launchpad, bugs that affect both Neutron and your code can be more easily tracked using launchpad's "also affects project" feature. Security Issues ~~~~~~~~~~~~~~~ Here are some answers to how to handle security issues in your repo, taken from `this mailing list message `_: - How should security your issues be managed? The OpenStack Vulnerability Management Team (VMT) follows a `documented process `_ which can basically be reused by any project-team when needed. - Should the OpenStack security team be involved? The OpenStack VMT directly oversees vulnerability reporting and disclosure for a `subset of OpenStack source code repositories `_. However, they are still quite happy to answer any questions you might have about vulnerability management for your own projects even if they're not part of that set. Feel free to reach out to the VMT in public or in private. Also, the VMT is an autonomous subgroup of the much larger `OpenStack Security project-team `_. They're a knowledgeable bunch and quite responsive if you want to get their opinions or help with security-related issues (vulnerabilities or otherwise). - Does a CVE need to be filed? It can vary widely. If a commercial distribution such as Red Hat is redistributing a vulnerable version of your software, then they may assign one anyway even if you don't request one yourself. Or the reporter may request one; the reporter may even be affiliated with an organization who has already assigned/obtained a CVE before they initiate contact with you. - Do the maintainers need to publish OSSN or equivalent documents? OpenStack Security Advisories (OSSA) are official publications of the OpenStack VMT and only cover VMT-supported software. OpenStack Security Notes (OSSN) are published by editors within the OpenStack Security project-team on more general security topics and may even cover issues in non-OpenStack software commonly used in conjunction with OpenStack, so it's at their discretion as to whether they would be able to accommodate a particular issue with an OSSN. However, these are all fairly arbitrary labels, and what really matters in the grand scheme of things is that vulnerabilities are handled seriously, fixed with due urgency and care, and announced widely -- not just on relevant OpenStack mailing lists but also preferably somewhere with broader distribution like the `Open Source Security mailing list `_. The goal is to get information on your vulnerabilities, mitigating measures and fixes into the hands of the people using your software in a timely manner. - Anything else to consider here? The OpenStack VMT is in the process of trying to reinvent itself so that it can better scale within the context of the "Big Tent." This includes making sure the policy/process documentation is more consumable and reusable even by project-teams working on software outside the scope of our charter. It's a work in progress, and any input is welcome on how we can make this function well for everyone. Backport Management Strategies ------------------------------ This section applies only to third-party maintainers who had code in the Neutron tree during the Kilo and earlier releases. It will be obsolete once the Kilo release is no longer supported. If a change made to out-of-tree third-party code needs to be back-ported to in-tree code in a stable branch, you may submit a review without a corresponding master branch change. The change will be evaluated by core reviewers for stable branches to ensure that the backport is justified and that it does not affect Neutron core code stability. DevStack Integration Strategies ------------------------------- When developing and testing a new or existing plugin or driver, the aid provided by DevStack is incredibly valuable: DevStack can help get all the software bits installed, and configured correctly, and more importantly in a predictable way. For DevStack integration there are a few options available, and they may or may not make sense depending on whether you are contributing a new or existing plugin or driver. If you are contributing a new plugin, the approach to choose should be based on `Extras.d Hooks' externally hosted plugins `_. With the extra.d hooks, the DevStack integration is co-located with the third-party integration library, and it leads to the greatest level of flexibility when dealing with DevStack based dev/test deployments. One final consideration is worth making for third-party CI setups: if `Devstack Gate `_ is used, it does provide hook functions that can be executed at specific times of the devstack-gate-wrap script run. For example, the `Neutron Functional job `_ uses them. For more details see `devstack-vm-gate-wrap.sh `_. Documentation ------------- For a layout of the how the documentation directory is structured see the `effective neutron guide `_ Project Initial Setup --------------------- The how-to below assumes that the third-party library will be hosted on opendev.org. This lets you tap in the entire OpenStack CI infrastructure and can be a great place to start from to contribute your new or existing driver/plugin. The list of steps below are summarized version of what you can find on http://docs.openstack.org/infra/manual/creators.html. They are meant to be the bare minimum you have to complete in order to get you off the ground. * Create a public repository: this can be a personal opendev.org repo or any publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This would be a temporary buffer to be used to feed the one on opendev.org. * Initialize the repository: if you are starting afresh, you may *optionally* want to use cookiecutter to get a skeleton project. You can learn how to use cookiecutter on https://opendev.org/openstack-dev/cookiecutter. If you want to build the repository from an existing Neutron module, you may want to skip this step now, build the history first (next step), and come back here to initialize the remainder of the repository with other files being generated by the cookiecutter (like tox.ini, setup.cfg, setup.py, etc.). * Create a repository on opendev.org. For this you need the help of the OpenStack infra team. It is worth noting that you only get one shot at creating the repository on opendev.org. This is the time you get to choose whether you want to start from a clean slate, or you want to import the repo created during the previous step. In the latter case, you can do so by specifying the upstream section for your project in project-config/gerrit/project.yaml. Steps are documented on the `Repository Creator's Guide `_. * Ask for a Launchpad user to be assigned to the core team created. Steps are documented in `this section `_. * Fix, fix, fix: at this point you have an external base to work on. You can develop against the new opendev.org project, the same way you work with any other OpenStack project: you have pep8, docs, and python CI jobs that validate your patches when posted to Gerrit. For instance, one thing you would need to do is to define an entry point for your plugin or driver in your own setup.cfg similarly as to how it is done in the `setup.cfg for ODL `_. * Define an entry point for your plugin or driver in setup.cfg * Create third-party CI account: if you do not already have one, follow instructions for `third-party CI `_ to get one. Internationalization support ---------------------------- OpenStack is committed to broad international support. Internationalization (I18n) is one of important areas to make OpenStack ubiquitous. Each project is recommended to support i18n. This section describes how to set up translation support. The description in this section uses the following variables: * repository : ``openstack/${REPOSITORY}`` (e.g., ``openstack/networking-foo``) * top level python path : ``${MODULE_NAME}`` (e.g., ``networking_foo``) oslo.i18n ~~~~~~~~~ * Each subproject repository should have its own oslo.i18n integration wrapper module ``${MODULE_NAME}/_i18n.py``. The detail is found at https://docs.openstack.org/oslo.i18n/latest/user/usage.html. .. note:: **DOMAIN** name should match your **module** name ``${MODULE_NAME}``. * Import ``_()`` from your ``${MODULE_NAME}/_i18n.py``. .. warning:: Do not use ``_()`` in the builtins namespace which is registered by **gettext.install()** in ``neutron/__init__.py``. It is now deprecated as described in oslo.18n documentation. Setting up translation support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You need to create or edit the following files to start translation support: * setup.cfg * babel.cfg We have a good example for an oslo project at https://review.opendev.org/#/c/98248/. Add the following to ``setup.cfg``:: [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ${MODULE_NAME}/locale/${MODULE_NAME}.pot [compile_catalog] directory = ${MODULE_NAME}/locale domain = ${MODULE_NAME} [update_catalog] domain = ${MODULE_NAME} output_dir = ${MODULE_NAME}/locale input_file = ${MODULE_NAME}/locale/${MODULE_NAME}.pot Note that ``${MODULE_NAME}`` is used in all names. Create ``babel.cfg`` with the following contents:: [python: **.py] Enable Translation ~~~~~~~~~~~~~~~~~~ To update and import translations, you need to make a change in project-config. A good example is found at https://review.opendev.org/#/c/224222/. After doing this, the necessary jobs will be run and push/pull a message catalog to/from the translation infrastructure. Integrating with the Neutron system ----------------------------------- Configuration Files ~~~~~~~~~~~~~~~~~~~ The ``data_files`` in the ``[files]`` section of ``setup.cfg`` of Neutron shall not contain any third-party references. These shall be located in the same section of the third-party repo's own ``setup.cfg`` file. * Note: Care should be taken when naming sections in configuration files. When the Neutron service or an agent starts, oslo.config loads sections from all specified config files. This means that if a section [foo] exists in multiple config files, duplicate settings will collide. It is therefore recommended to prefix section names with a third-party string, e.g. [vendor_foo]. Since Mitaka, configuration files are not maintained in the git repository but should be generated as follows:: ``tox -e genconfig`` If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files:: ./tools/generate_config_file_samples.sh It is advised that subprojects do not keep their configuration files in their respective trees and instead generate them using a similar approach as Neutron does. **ToDo: Inclusion in OpenStack documentation?** Is there a recommended way to have third-party config options listed in the configuration guide in docs.openstack.org? Database Models and Migrations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A third-party repo may contain database models for its own tables. Although these tables are in the Neutron database, they are independently managed entirely within the third-party code. Third-party code shall **never** modify neutron core tables in any way. Each repo has its own *expand* and *contract* `alembic migration branches `_. A third-party repo's alembic migration branches may operate only on tables that are owned by the repo. * Note: Care should be taken when adding new tables. To prevent collision of table names it is **required** to prefix them with a vendor/plugin string. * Note: A third-party maintainer may opt to use a separate database for their tables. This may complicate cases where there are foreign key constraints across schemas for DBMS that do not support this well. Third-party maintainer discretion advised. The database tables owned by a third-party repo can have references to fields in neutron core tables. However, the alembic branch for a plugin/driver repo shall never update any part of a table that it does not own. **Note: What happens when a referenced item changes?** * **Q:** If a driver's table has a reference (for example a foreign key) to a neutron core table, and the referenced item is changed in neutron, what should you do? * **A:** Fortunately, this should be an extremely rare occurrence. Neutron core reviewers will not allow such a change unless there is a very carefully thought-out design decision behind it. That design will include how to address any third-party code affected. (This is another good reason why you should stay actively involved with the Neutron developer community.) The ``neutron-db-manage`` alembic wrapper script for neutron detects alembic branches for installed third-party repos, and the upgrade command automatically applies to all of them. A third-party repo must register its alembic migrations at installation time. This is done by providing an entrypoint in setup.cfg as follows: For a third-party repo named ``networking-foo``, add the alembic_migrations directory as an entrypoint in the ``neutron.db.alembic_migrations`` group:: [entry_points] neutron.db.alembic_migrations = networking-foo = networking_foo.db.migration:alembic_migrations **ToDo: neutron-db-manage autogenerate** The alembic autogenerate command needs to support branches in external repos. Bug #1471333 has been filed for this. DB Model/Migration Testing ~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is a :doc:`template functional test ` third-party maintainers can use to develop tests for model-vs-migration sync in their repos. It is recommended that each third-party CI sets up such a test, and runs it regularly against Neutron master. Entry Points ~~~~~~~~~~~~ The `Python setuptools `_ installs all entry points for packages in one global namespace for an environment. Thus each third-party repo can define its package's own ``[entry_points]`` in its own ``setup.cfg`` file. For example, for the ``networking-foo`` repo:: [entry_points] console_scripts = neutron-foo-agent = networking_foo.cmd.eventlet.agents.foo:main neutron.core_plugins = foo_monolithic = networking_foo.plugins.monolithic.plugin:FooPluginV2 neutron.service_plugins = foo_l3 = networking_foo.services.l3_router.l3_foo:FooL3ServicePlugin neutron.ml2.type_drivers = foo_type = networking_foo.plugins.ml2.drivers.foo:FooType neutron.ml2.mechanism_drivers = foo_ml2 = networking_foo.plugins.ml2.drivers.foo:FooDriver neutron.ml2.extension_drivers = foo_ext = networking_foo.plugins.ml2.drivers.foo:FooExtensionDriver * Note: It is advisable to include ``foo`` in the names of these entry points to avoid conflicts with other third-party packages that may get installed in the same environment. API Extensions ~~~~~~~~~~~~~~ Extensions can be loaded in two ways: #. Use the ``append_api_extensions_path()`` library API. This method is defined in ``neutron/api/extensions.py`` in the neutron tree. #. Leverage the ``api_extensions_path`` config variable when deploying. See the example config file ``etc/neutron.conf`` in the neutron tree where this variable is commented. Service Providers ~~~~~~~~~~~~~~~~~ If your project uses service provider(s) the same way VPNAAS does, you specify your service provider in your ``project_name.conf`` file like so:: [service_providers] # Must be in form: # service_provider=::[:default][,...] In order for Neutron to load this correctly, make sure you do the following in your code:: from neutron.db import servicetype_db service_type_manager = servicetype_db.ServiceTypeManager.get_instance() service_type_manager.add_provider_configuration( YOUR_SERVICE_TYPE, pconf.ProviderConfiguration(YOUR_SERVICE_MODULE, YOUR_SERVICE_TYPE)) This is typically required when you instantiate your service plugin class. Interface Drivers ~~~~~~~~~~~~~~~~~ Interface (VIF) drivers for the reference implementations are defined in ``neutron/agent/linux/interface.py``. Third-party interface drivers shall be defined in a similar location within their own repo. The entry point for the interface driver is a Neutron config option. It is up to the installer to configure this item in the ``[default]`` section. For example:: [default] interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver **ToDo: Interface Driver port bindings.** ``VIF_TYPE_*`` constants in ``neutron_lib/api/definitions/portbindings.py`` should be moved from neutron core to the repositories where their drivers are implemented. We need to provide some config or hook mechanism for VIF types to be registered by external interface drivers. For Nova, selecting the VIF driver can be done outside of Neutron (using the new `os-vif python library `_?). Armando and Akihiro to discuss. Rootwrap Filters ~~~~~~~~~~~~~~~~ If a third-party repo needs a rootwrap filter for a command that is not used by Neutron core, then the filter shall be defined in the third-party repo. For example, to add a rootwrap filters for commands in repo ``networking-foo``: * In the repo, create the file: ``etc/neutron/rootwrap.d/foo.filters`` * In the repo's ``setup.cfg`` add the filters to data_files:: [files] data_files = etc/neutron/rootwrap.d = etc/neutron/rootwrap.d/foo.filters Extending python-neutronclient ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The maintainer of a third-party component may wish to add extensions to the Neutron CLI client. Thanks to https://review.opendev.org/148318 this can now be accomplished. See `Client Command Extensions `_. Other repo-split items ~~~~~~~~~~~~~~~~~~~~~~ (These are still TBD.) * Splitting policy.json? **ToDo** Armando will investigate. * Generic instructions (or a template) for installing an out-of-tree plugin or driver for Neutron. Possibly something for the networking guide, and/or a template that plugin/driver maintainers can modify and include with their package. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/contributing.rst0000644000175000017500000001544600000000000025035 0ustar00coreycorey00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Neutron. Communication ~~~~~~~~~~~~~~ .. This would be a good place to put the channel you chat in as a project; when/ where your meeting is, the tags you prepend to your ML threads, etc. - IRC channel: #openstack-neutron - Mailing list's prefix: [neutron] - Team Meeting: This is general Neutron team meeting. The discussion in this meeting is about all things related to the Neutron project, like community goals, progress with blueprints, bugs, etc. There is also ``On Demand Agenda`` at the end of this meeting, where anyone can add a topic to discuss with the Neutron team. - time: http://eavesdrop.openstack.org/#Neutron_Team_Meeting - agenda: https://wiki.openstack.org/wiki/Network/Meetings - Drivers team meeting: This is the meeting where Neutron drivers discuss about new RFEs. - time: http://eavesdrop.openstack.org/#Neutron_drivers_Meeting - agenda: https://wiki.openstack.org/wiki/Meetings/NeutronDrivers - Neutron CI team meeting: This is the meeting where upstream CI issues are discussed every week. If You are interested in helping our CI to be green, that's good place to join and help. - time: http://eavesdrop.openstack.org/#Neutron_CI_team - agenda: https://etherpad.openstack.org/p/neutron-ci-meetings - Neutron QoS team meeting: This is the meeting of the Neutron Quality of Service subteam. - time: http://eavesdrop.openstack.org/#Neutron_QoS_Meeting - Neutron L3 team meeting: This is the meeting of the Neutron L3 subteam where all issues related to IPAM, L3 agents, etc. are discussed. - time: http://eavesdrop.openstack.org/#Neutron_L3_Sub-team_Meeting - agenda: https://etherpad.openstack.org/p/neutron-l3-subteam Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should list the core team, their irc nicks, emails, timezones etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of enumerating everyone here. The list of current Neutron core reviewers is available on `gerrit `_. Overall structure of Neutron team is available in :ref:`Neutron teams`. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ .. This section is for talking about the process to get a new feature in. Some projects use blueprints, some want specs, some want both! Some projects stick to a strict schedule when selecting what new features will be reviewed for a release. Neutron team uses ``RFE (Request for Enhancements)`` to propose new features. RFE should be submitted as a Launchpad bug first (see section :ref:`reporting_a_bug`). The title of RFE bug should starts with ``[RFE]`` tag. Such RFEs need to be discussed and approved by the :ref:`Neutron drivers team`. In some cases an additional spec proposed to the `Neutron specs `_ repo may be necessary. The complete process is described in detail in :ref:`Blueprints guide`. Task Tracking ~~~~~~~~~~~~~~ .. This section is about where you track tasks- launchpad? storyboard? is there more than one launchpad project? what's the name of the project group in storyboard? We track our tasks in `Launchpad `__. If you're looking for some smaller, easier work item to pick up and get started on, search for the `Low hanging fruit `_ tag. List of all official tags which Neutron team is using is available on :ref:`bugs`. Every week, one of our team members is the :ref:`bug deputy` and at the end of the week such person usually sends report about new bugs to the mailing list openstack-discuss@lists.openstack.org or talks about it on our team meeting. This is also good place to look for some work to do. .. _reporting_a_bug: Reporting a Bug ~~~~~~~~~~~~~~~ .. Pretty self explanatory section, link directly to where people should report bugs for your project. You found an issue and want to make sure we are aware of it? You can do so on `Launchpad `__. More info about Launchpad usage can be found on `OpenStack docs page `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should have info about what it takes to get something merged. Do you require one or two +2's before +W? Do some of your repos require unit test changes with all patches? etc. All changes proposed to the Neutron or one of the Neutron stadium projects require two +2 votes from Neutron core reviewers before one of the core reviewers can approve patch by giving ``Workflow +1`` vote. More detailed guidelines for reviewers of Neutron patches are available at :ref:`Code reviews guide`. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ .. this section is where you can put PTL specific duties not already listed in the common PTL guide (linked below) or if you already have them written up elsewhere, you can link to that doc here. Neutron's PTL duties are described very well in the All common `PTL duties guide `_. Additionally to what is described in this guide, Neutron's PTL duties are: - triage new RFEs and prepare `Neutron drivers team meeting `_, - maintain list of the :ref:`stadium projects` health - if each project has gotten active team members and if it is following community and Neutron's guidelines and goals, - maintain list of the :ref:`stadium projects lieutenants` - check if those people are still active in the projects, if their contact data are correct, maybe there is someone new who is active in the stadium project and could be added to this list. Over the past few years, the Neutron team has followed a mentoring approach for: - new contributors, - potential new core reviewers, - future PTLs. The Neutron PTL's responsibility is to identify potential new core reviewers and help with their mentoring process. Mentoring of new contributors and potential core reviewers can be of course delegated to the other members of the Neutron team. Mentoring of future PTLs is responibility of the Neutron PTL. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.139043 neutron-16.0.0.0b2.dev214/doc/source/contributor/dashboards/0000755000175000017500000000000000000000000023674 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/dashboards/index.rst0000644000175000017500000001537000000000000025543 0ustar00coreycorey00000000000000CI Status Dashboards ==================== Gerrit Dashboards ----------------- - `Neutron priority reviews `_ - `Neutron master branch reviews `_ - `Neutron subproject reviews (master branch) `_ - `Neutron stable branch reviews `_ - `Neutron Infra reviews `_ These dashboard links can be generated by `Gerrit Dashboard Creator`_. Useful dashboard definitions are found in ``dashboards`` directory. .. _Gerrit Dashboard Creator: https://github.com/openstack/gerrit-dash-creator Grafana Dashboards ------------------ Look for neutron and networking-* dashboard by names by going to the following link: `Grafana `_ For instance: * `Neutron `_ * `Neutron-lib `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/development_environment.rst0000644000175000017500000000554200000000000027270 0ustar00coreycorey00000000000000.. Copyright 2010-2013 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing Neutron on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with Git and Gerrit, which is a code repository mirror and code review toolset , however if you aren't please see `this Git tutorial`_ for an introduction to using Git and `this guide`_ for a tutorial on using Gerrit and Git for code contribution to OpenStack projects. .. _this Git tutorial: http://git-scm.com/book/en/Getting-Started .. _this guide: http://docs.openstack.org/infra/manual/developers.html#development-workflow Following these instructions will allow you to run the Neutron unit tests. If you want to be able to run Neutron in a full OpenStack environment, you can use the excellent `DevStack`_ project to do so. There is a wiki page that describes `setting up Neutron using DevStack`_. .. _DevStack: https://opendev.org/openstack/devstack .. _setting up Neutron using Devstack: https://wiki.openstack.org/wiki/NeutronDevstack Getting the code ---------------- Grab the code:: git clone https://opendev.org/openstack/neutron.git cd neutron About ignore files ------------------ In the .gitignore files, add patterns to exclude files created by tools integrated, such as test frameworks from the project's recommended workflow, rendered documentation and package builds. Don't add patterns to exculde files created by preferred personal like for example editors, IDEs or operating system. These should instead be maintained outside the repository, for example in a ~/.gitignore file added with:: git config --global core.excludesfile '~/.gitignore' Ignores files for all repositories that you work with. Testing Neutron --------------- See :ref:`testing_neutron`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/effective_neutron.rst0000644000175000017500000006556500000000000026047 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Effective Neutron: 100 specific ways to improve your Neutron contributions ========================================================================== There are a number of skills that make a great Neutron developer: writing good code, reviewing effectively, listening to peer feedback, etc. The objective of this document is to describe, by means of examples, the pitfalls, the good and bad practices that 'we' as project encounter on a daily basis and that make us either go slower or accelerate while contributing to Neutron. By reading and collaboratively contributing to such a knowledge base, your development and review cycle becomes shorter, because you will learn (and teach to others after you) what to watch out for, and how to be proactive in order to prevent negative feedback, minimize programming errors, writing better tests, and so on and so forth...in a nutshell, how to become an effective Neutron developer. The notes below are meant to be free-form and brief by design. They are not meant to replace or duplicate `OpenStack documentation `_, or any project-wide documentation initiative like `peer-review notes `_ or the `team guide `_. For this reason, references are acceptable and should be favored, if the shortcut is deemed useful to expand on the distilled information. We will try to keep these notes tidy by breaking them down into sections if it makes sense. Feel free to add, adjust, remove as you see fit. Please do so, taking into consideration yourself and other Neutron developers as readers. Capture your experience during development and review and add any comment that you believe will make your life and others' easier. Happy hacking! Developing better software -------------------------- Plugin development ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done during plugin development. * Use mixin classes as last resort. They can be a powerful tool to add behavior but their strength is also a weakness, as they can introduce `unpredictable `_ behavior to the `MRO `_, amongst other issues. * In lieu of mixins, if you need to add behavior that is relevant for ML2, consider using the `extension manager `_. * If you make changes to the DB class methods, like calling methods that can be inherited, think about what effect that may have to plugins that have controller `backends `_. * If you make changes to the ML2 plugin or components used by the ML2 plugin, think about the `effect `_ that may have to other plugins. * When adding behavior to the L2 and L3 db base classes, do not assume that there is an agent on the other side of the message broker that interacts with the server. Plugins may not rely on `agents `_ at all. * Be mindful of required capabilities when you develop plugin extensions. The `Extension description `_ provides the ability to specify the list of required capabilities for the extension you are developing. By declaring this list, the server will not start up if the requirements are not met, thus avoiding leading the system to experience undetermined behavior at runtime. Database interaction ~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done during database development. * `first() `_ does not raise an exception. * Do not use `delete() `_ to remove objects. A delete query does not load the object so no sqlalchemy events can be triggered that would do things like recalculate quotas or update revision numbers of parent objects. For more details on all of the things that can go wrong using bulk delete operations, see the "Warning" sections in the link above. * For PostgreSQL if you're using GROUP BY everything in the SELECT list must be an aggregate SUM(...), COUNT(...), etc or used in the GROUP BY. The incorrect variant: .. code:: python q = query(Object.id, Object.name, func.count(Object.number)).group_by(Object.name) The correct variant: .. code:: python q = query(Object.id, Object.name, func.count(Object.number)).group_by(Object.id, Object.name) * Beware of the `InvalidRequestError `_ exception. There is even a `Neutron bug `_ registered for it. Bear in mind that this error may also occur when nesting transaction blocks, and the innermost block raises an error without proper rollback. Consider if `savepoints `_ can fit your use case. * When designing data models that are related to each other, be careful to how you model the relationships' loading `strategy `_. For instance a joined relationship can be very efficient over others (some examples include `router gateways `_ or `network availability zones `_). * If you add a relationship to a Neutron object that will be referenced in the majority of cases where the object is retrieved, be sure to use the lazy='joined' parameter to the relationship so the related objects are loaded as part of the same query. Otherwise, the default method is 'select', which emits a new DB query to retrieve each related object adversely impacting performance. For example, see `patch 88665 `_ which resulted in a significant improvement since router retrieval functions always include the gateway interface. * Conversely, do not use lazy='joined' if the relationship is only used in corner cases because the JOIN statement comes at a cost that may be significant if the relationship contains many objects. For example, see `patch 168214 `_ which reduced a subnet retrieval by ~90% by avoiding a join to the IP allocation table. * When writing extensions to existing objects (e.g. Networks), ensure that they are written in a way that the data on the object can be calculated without additional DB lookup. If that's not possible, ensure the DB lookup is performed once in bulk during a list operation. Otherwise a list call for a 1000 objects will change from a constant small number of DB queries to 1000 DB queries. For example, see `patch 257086 `_ which changed the availability zone code from the incorrect style to a database friendly one. * Sometimes in code we use the following structures: .. code:: python def create(): with context.session.begin(subtransactions=True): create_something() try: _do_other_thing_with_created_object() except Exception: with excutils.save_and_reraise_exception(): delete_something() def _do_other_thing_with_created_object(): with context.session.begin(subtransactions=True): .... The problem is that when exception is raised in ``_do_other_thing_with_created_object`` it is caught in except block, but the object cannot be deleted in except section because internal transaction from ``_do_other_thing_with_created_object`` has been rolled back. To avoid this nested transactions should be used. For such cases help function ``safe_creation`` has been created in ``neutron/db/_utils.py``. So, the example above should be replaced with: .. code:: python _safe_creation(context, create_something, delete_something, _do_other_thing_with_created_object) Where nested transaction is used in _do_other_thing_with_created_object function. The ``_safe_creation function can also be passed the ``transaction=False`` argument to prevent any transaction from being created just to leverage the automatic deletion on exception logic. * Beware of ResultProxy.inserted_primary_key which returns a list of last inserted primary keys not the last inserted primary key: .. code:: python result = session.execute(mymodel.insert().values(**values)) # result.inserted_primary_key is a list even if we inserted a unique row! * Beware of pymysql which can silently unwrap a list with an element (and hide a wrong use of ResultProxy.inserted_primary_key for example): .. code:: python e.execute("create table if not exists foo (bar integer)") e.execute(foo.insert().values(bar=1)) e.execute(foo.insert().values(bar=[2])) The 2nd insert should crash (list provided, integer expected). It crashes at least with mysql and postgresql backends, but succeeds with pymysql because it transforms them into: .. code:: sql INSERT INTO foo (bar) VALUES (1) INSERT INTO foo (bar) VALUES ((2)) System development ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when invoking system commands and interacting with linux utils. * When a patch requires a new platform tool or a new feature in an existing tool, check if common platforms ship packages with the aforementioned feature. Also, tag such a patch with ``UpgradeImpact`` to raise its visibility (as these patches are brought up to the attention of the core team during team meetings). More details in :ref:`review guidelines `. * When a patch or the code depends on a new feature in the kernel or in any platform tools (dnsmasq, ip, Open vSwitch etc.), consider introducing a new sanity check to validate deployments for the expected features. Note that sanity checks *must not* check for version numbers of underlying platform tools because distributions may decide to backport needed features into older versions. Instead, sanity checks should validate actual features by attempting to use them. Eventlet concurrent model ~~~~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when using eventlet and monkey patching. * Do not use with_lockmode('update') on SQL queries without protecting the operation with a lockutils semaphore. For some SQLAlchemy database drivers that operators may choose (e.g. MySQLdb) it may result in a temporary deadlock by yielding to another coroutine while holding the DB lock. The following wiki provides more details: https://wiki.openstack.org/wiki/OpenStack_and_SQLAlchemy#MySQLdb_.2B_eventlet_.3D_sad Mocking and testing ~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing tests, any test. For anything more elaborate, please visit the testing section. * Preferring low level testing versus full path testing (e.g. not testing database via client calls). The former is to be favored in unit testing, whereas the latter is to be favored in functional testing. * Prefer specific assertions (assert(Not)In, assert(Not)IsInstance, assert(Not)IsNone, etc) over generic ones (assertTrue/False, assertEqual) because they raise more meaningful errors: .. code:: python def test_specific(self): self.assertIn(3, [1, 2]) # raise meaningful error: "MismatchError: 3 not in [1, 2]" def test_generic(self): self.assertTrue(3 in [1, 2]) # raise meaningless error: "AssertionError: False is not true" * Use the pattern "self.assertEqual(expected, observed)" not the opposite, it helps reviewers to understand which one is the expected/observed value in non-trivial assertions. The expected and observed values are also labeled in the output when the assertion fails. * Prefer specific assertions (assertTrue, assertFalse) over assertEqual(True/False, observed). * Don't write tests that don't test the intended code. This might seem silly but it's easy to do with a lot of mocks in place. Ensure that your tests break as expected before your code change. * Avoid heavy use of the mock library to test your code. If your code requires more than one mock to ensure that it does the correct thing, it needs to be refactored into smaller, testable units. Otherwise we depend on fullstack/tempest/api tests to test all of the real behavior and we end up with code containing way too many hidden dependencies and side effects. * All behavior changes to fix bugs should include a test that prevents a regression. If you made a change and it didn't break a test, it means the code was not adequately tested in the first place, it's not an excuse to leave it untested. * Test the failure cases. Use a mock side effect to throw the necessary exceptions to test your 'except' clauses. * Don't mimic existing tests that violate these guidelines. We are attempting to replace all of these so more tests like them create more work. If you need help writing a test, reach out to the testing lieutenants and the team on IRC. * Mocking open() is a dangerous practice because it can lead to unexpected bugs like `bug 1503847 `_. In fact, when the built-in open method is mocked during tests, some utilities (like debtcollector) may still rely on the real thing, and may end up using the mock rather what they are really looking for. If you must, consider using `OpenFixture `_, but it is better not to mock open() at all. Documentation ~~~~~~~~~~~~~ The documenation for Neutron that exists in this repository is broken down into the following directories based on content: * doc/source/admin/ - feature-specific configuration documentation aimed at operators. * doc/source/configuration - stubs for auto-generated configuration files. Only needs updating if new config files are added. * doc/source/contributor/internals - developer documentation for lower-level technical details. * doc/source/contributor/policies - neutron team policies and best practices. * doc/source/install - install-specific documentation for standing-up network-enabled nodes. Additional documentation resides in the neutron-lib repository: * api-ref - API reference documentation for Neutron resource and API extensions. Backward compatibility ~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when extending the RPC Interfaces. * Make yourself familiar with :ref:`Upgrade review guidelines `. Deprecation +++++++++++ Sometimes we want to refactor things in a non-backward compatible way. In most cases you can use `debtcollector `_ to mark things for deprecation. Config items have `deprecation options supported by oslo.config `_. The deprecation process must follow the `standard deprecation requirements `_. In terms of neutron development, this means: * A launchpad bug to track the deprecation. * A patch to mark the deprecated items. If the deprecation affects users (config items, API changes) then a `release note `_ must be included. * Wait at least one cycle and at least three months linear time. * A patch that removes the deprecated items. Make sure to refer to the original launchpad bug in the commit message of this patch. Scalability issues ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing code that needs to process a lot of data. Translation and logging ~~~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when instrumenting your code. * Make yourself familiar with `OpenStack logging guidelines `_ to avoid littering the logs with traces logged at inappropriate levels. * The logger should only be passed unicode values. For example, do not pass it exceptions or other objects directly (LOG.error(exc), LOG.error(port), etc.). See https://docs.openstack.org/oslo.log/latest/user/migration.html#no-more-implicit-conversion-to-unicode-str for more details. * Don't pass exceptions into LOG.exception: it is already implicitly included in the log message by Python logging module. * Don't use LOG.exception when there is no exception registered in current thread context: Python 3.x versions before 3.5 are known to fail on it. Project interfaces ~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing code that is used to interface with other projects, like Keystone or Nova. Documenting your code ~~~~~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing docstrings. Landing patches more rapidly ---------------------------- Scoping your patch appropriately ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Do not make multiple changes in one patch unless absolutely necessary. Cleaning up nearby functions or fixing a small bug you noticed while working on something else makes the patch very difficult to review. It also makes cherry-picking and reverting very difficult. Even apparently minor changes such as reformatting whitespace around your change can burden reviewers and cause merge conflicts. * If a fix or feature requires code refactoring, submit the refactoring as a separate patch than the one that changes the logic. Otherwise it's difficult for a reviewer to tell the difference between mistakes in the refactor and changes required for the fix/feature. If it's a bug fix, try to implement the fix before the refactor to avoid making cherry-picks to stable branches difficult. * Consider your reviewers' time before submitting your patch. A patch that requires many hours or days to review will sit in the "todo" list until someone has many hours or days free (which may never happen.) If you can deliver your patch in small but incrementally understandable and testable pieces you will be more likely to attract reviewers. Nits and pedantic comments ~~~~~~~~~~~~~~~~~~~~~~~~~~ Document common nits and pedantic comments to watch out for. * Make sure you spell correctly, the best you can, no-one wants rebase generators at the end of the release cycle! * The odd pep8 error may cause an entire CI run to be wasted. Consider running validation (pep8 and/or tests) before submitting your patch. If you keep forgetting consider installing a git `hook `_ so that Git will do it for you. * Sometimes, new contributors want to dip their toes with trivial patches, but we at OpenStack *love* bike shedding and their patches may sometime stall. In some extreme cases, the more trivial the patch, the higher the chances it fails to merge. To ensure we as a team provide/have a frustration-free experience new contributors should be redirected to fixing `low-hanging-fruit bugs `_ that have a tangible positive impact to the codebase. Spelling mistakes, and docstring are fine, but there is a lot more that is relatively easy to fix and has a direct impact to Neutron users. Reviewer comments ~~~~~~~~~~~~~~~~~ * Acknowledge them one by one by either clicking 'Done' or by replying extensively. If you do not, the reviewer won't know whether you thought it was not important, or you simply forgot. If the reply satisfies the reviewer, consider capturing the input in the code/document itself so that it's for reviewers of newer patchsets to see (and other developers when the patch merges). * Watch for the feedback on your patches. Acknowledge it promptly and act on it quickly, so that the reviewer remains engaged. If you disappear for a week after you posted a patchset, it is very likely that the patch will end up being neglected. * Do not take negative feedback personally. Neutron is a large project with lots of contributors with different opinions on how things should be done. Many come from widely varying cultures and languages so the English, text-only feedback can unintentionally come across as harsh. Getting a -1 means reviewers are trying to help get the patch into a state that can be merged, it doesn't just mean they are trying to block it. It's very rare to get a patch merged on the first iteration that makes everyone happy. Code Review ~~~~~~~~~~~ * You should visit `OpenStack How To Review wiki `_ * Stay focussed and review what matters for the release. Please check out the Neutron section for the `Gerrit dashboard `_. The output is generated by this `tool `_. IRC ~~~~ * IRC is a place where you can speak with many of the Neutron developers and core reviewers. For more information you should visit `OpenStack IRC wiki `_ Neutron IRC channel is #openstack-neutron * There are weekly IRC meetings related to many different projects/teams in Neutron. A full list of these meetings and their date/time can be found in `OpenStack IRC Meetings `_. It is important to attend these meetings in the area of your contribution and possibly mention your work and patches. * When you have questions regarding an idea or a specific patch of yours, it can be helpful to find a relevant person in IRC and speak with them about it. You can find a user's IRC nickname in their launchpad account. * Being available on IRC is useful, since reviewers can contact you directly to quickly clarify a review issue. This speeds up the feedback loop. * Each area of Neutron or sub-project of Neutron has a specific lieutenant in charge of it. You can most likely find these lieutenants on IRC, it is advised however to try and send public questions to the channel rather then to a specific person if possible. (This increase the chances of getting faster answers to your questions). A list of the areas and lieutenants nicknames can be found at :doc:`Core Reviewers `. Commit messages ~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when writing commit messages. For more details see `Git commit message best practices `_. This is the TL;DR version with the important points for committing to Neutron. * One liners are bad, unless the change is trivial. * Use ``UpgradeImpact`` when the change could cause issues during the upgrade from one version to the next. * ``APIImpact`` should be used when the api-ref in neutron-lib must be updated to reflect the change, and only as a last resort. Rather, the ideal workflow includes submitting a corresponding neutron-lib api-ref change along with the implementation, thereby removing the need to use ``APIImpact``. * Make sure the commit message doesn't have any spelling/grammar errors. This is the first thing reviewers read and they can be distracting enough to invite -1's. * Describe what the change accomplishes. If it's a bug fix, explain how this code will fix the problem. If it's part of a feature implementation, explain what component of the feature the patch implements. Do not just describe the bug, that's what launchpad is for. * Use the "Closes-Bug: #BUG-NUMBER" tag if the patch addresses a bug. Submitting a bugfix without a launchpad bug reference is unacceptable, even if it's trivial. Launchpad is how bugs are tracked so fixes without a launchpad bug are a nightmare when users report the bug from an older version and the Neutron team can't tell if/why/how it's been fixed. Launchpad is also how backports are identified and tracked so patches without a bug report cannot be picked to stable branches. * Use the "Implements: blueprint NAME-OF-BLUEPRINT" or "Partially-Implements: blueprint NAME-OF-BLUEPRINT" for features so reviewers can determine if the code matches the spec that was agreed upon. This also updates the blueprint on launchpad so it's easy to see all patches that are related to a feature. * If it's not immediately obvious, explain what the previous code was doing that was incorrect. (e.g. code assumed it would never get 'None' from a function call) * Be specific in your commit message about what the patch does and why it does this. For example, "Fixes incorrect logic in security groups" is not helpful because the code diff already shows that you are modifying security groups. The message should be specific enough that a reviewer looking at the code can tell if the patch does what the commit says in the most appropriate manner. If the reviewer has to guess why you did something, lots of your time will be wasted explaining why certain changes were made. Dealing with Zuul ~~~~~~~~~~~~~~~~~ Document common pitfalls as well as good practices done when dealing with OpenStack CI. * When you submit a patch, consider checking its `status `_ in the queue. If you see a job failures, you might as well save time and try to figure out in advance why it is failing. * Excessive use of 'recheck' to get test to pass is discouraged. Please examine the logs for the failing test(s) and make sure your change has not tickled anything that might be causing a new failure or race condition. Getting your change in could make it even harder to debug what is actually broken later on. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/index.rst0000644000175000017500000000464200000000000023431 0ustar00coreycorey00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ================= Contributor Guide ================= This document describes Neutron for contributors of the project, and assumes that you are already familiar with Neutron from an :doc:`end-user perspective `. Basic Information ----------------- .. toctree:: :maxdepth: 2 contributing Neutron Policies ---------------- .. toctree:: :maxdepth: 2 policies/index Neutron Stadium --------------- .. toctree:: :maxdepth: 2 stadium/index Developer Guide --------------- In the Developer Guide, you will find information on Neutron's lower level programming APIs. There are sections that cover the core pieces of Neutron, including its database, message queue, and scheduler components. There are also subsections that describe specific plugins inside Neutron. Finally, the developer guide includes information about Neutron testing infrastructure. .. toctree:: :maxdepth: 2 effective_neutron development_environment ovn_vagrant/index contribute neutron_api client_command_extensions alembic_migrations upgrade_checks testing/index Neutron Internals ----------------- .. toctree:: :maxdepth: 2 internals/index modules OVN Driver ---------- .. toctree:: :maxdepth: 2 ovn/index Dashboards ---------- There is a collection of dashboards to help developers and reviewers located here. .. toctree:: :maxdepth: 2 dashboards/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1470432 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/0000755000175000017500000000000000000000000023561 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/address_scopes.rst0000644000175000017500000002673300000000000027327 0ustar00coreycorey00000000000000Subnet Pools and Address Scopes =============================== This page discusses subnet pools and address scopes Subnet Pools ------------ Learn about subnet pools by watching the summit talk given in Vancouver [#]_. .. [#] http://www.youtube.com/watch?v=QqP8yBUUXBM&t=6m12s Subnet pools were added in Kilo. They are relatively simple. A SubnetPool has any number of SubnetPoolPrefix objects associated to it. These prefixes are in CIDR format. Each CIDR is a piece of the address space that is available for allocation. Subnet Pools support IPv6 just as well as IPv4. The Subnet model object now has a subnetpool_id attribute whose default is null for backward compatibility. The subnetpool_id attribute stores the UUID of the subnet pool that acted as the source for the address range of a particular subnet. When creating a subnet, the subnetpool_id can be optionally specified. If it is, the 'cidr' field is not required. If 'cidr' is specified, it will be allocated from the pool assuming the pool includes it and hasn't already allocated any part of it. If 'cidr' is left out, then the prefixlen attribute can be specified. If it is not, the default prefix length will be taken from the subnet pool. Think of it this way, the allocation logic always needs to know the size of the subnet desired. It can pull it from a specific CIDR, prefixlen, or default. A specific CIDR is optional and the allocation will try to honor it if provided. The request will fail if it can't honor it. Subnet pools do not allow overlap of subnets. Subnet Pool Quotas ~~~~~~~~~~~~~~~~~~ A quota mechanism was provided for subnet pools. It is different than other quota mechanisms in Neutron because it doesn't count instances of first class objects. Instead it counts how much of the address space is used. For IPv4, it made reasonable sense to count quota in terms of individual addresses. So, if you're allowed exactly one /24, your quota should be set to 256. Three /26s would be 192. This mechanism encourages more efficient use of the IPv4 space which will be increasingly important when working with globally routable addresses. For IPv6, the smallest viable subnet in Neutron is a /64. There is no reason to allocate a subnet of any other size for use on a Neutron network. It would look pretty funny to set a quota of 4611686018427387904 to allow one /64 subnet. To avoid this, we count IPv6 quota in terms of /64s. So, a quota of 3 allows three /64 subnets. When we need to allocate something smaller in the future, we will need to ensure that the code can handle non-integer quota consumption. Allocation ~~~~~~~~~~ Allocation is done in a way that aims to minimize fragmentation of the pool. The relevant code is here [#]_. First, the available prefixes are computed using a set difference: pool - allocations. The result is compacted [#]_ and then sorted by size. The subnet is then allocated from the smallest available prefix that is large enough to accommodate the request. .. [#] neutron/ipam/subnet_alloc.py (_allocate_any_subnet) .. [#] http://pythonhosted.org/netaddr/api.html#netaddr.IPSet.compact Address Scopes -------------- Before subnet pools or address scopes, it was impossible to tell if a network address was routable in a certain context because the address was given explicitly on subnet create and wasn't validated against any other addresses. Address scopes are meant to solve this by putting control over the address space in the hands of an authority: the address scope owner. It makes use of the already existing SubnetPool concept for allocation. Address scopes are "the thing within which address overlap is not allowed" and thus provide more flexible control as well as decoupling of address overlap from tenancy. Prior to the Mitaka release, there was implicitly only a single 'shared' address scope. Arbitrary address overlap was allowed making it pretty much a "free for all". To make things seem somewhat sane, normal users are not able to use routers to cross-plug networks from different projects and NAT was used between internal networks and external networks. It was almost as if each project had a private address scope. The problem is that this model cannot support use cases where NAT is not desired or supported (e.g. IPv6) or we want to allow different projects to cross-plug their networks. An AddressScope covers only one address family. But, they work equally well for IPv4 and IPv6. Routing ~~~~~~~ The reference implementation honors address scopes. Within an address scope, addresses route freely (barring any FW rules or other external restrictions). Between scopes, routing is prevented unless address translation is used. For now, floating IPs are the only place where traffic crosses scope boundaries. When a floating IP is associated to a fixed IP, the fixed IP is allowed to access the address scope of the floating IP by way of a 1:1 NAT rule. That means the fixed IP can access not only the external network, but also any internal networks that are in the same address scope as the external network. This is diagrammed as follows:: +----------------------+ +---------------------------+ | address scope 1 | | address scope 2 | | | | | | +------------------+ | | +------------------+ | | | internal network | | | | external network | | | +-------------+----+ | | +--------+---------+ | | | | | | | | +-------+--+ | | +------+------+ | | | fixed ip +----------------+ floating IP | | | +----------+ | | +--+--------+-+ | +----------------------+ | | | | | +------+---+ +--+-------+ | | | internal | | internal | | | +----------+ +----------+ | +---------------------------+ Due to the asymmetric route in DVR, and the fact that DVR local routers do not know the information of the floating IPs that reside in other hosts, there is a limitation in the DVR multiple hosts scenario. With DVR in multiple hosts, when the destination of traffic is an internal fixed IP in a different host, the fixed IP with a floating IP associated can't cross the scope boundary to access the internal networks that are in the same address scope of the external network. See https://bugs.launchpad.net/neutron/+bug/1682228 RPC ~~~ The L3 agent in the reference implementation needs to know the address scope for each port on each router in order to map ingress traffic correctly. Each subnet from the same address family on a network is required to be from the same subnet pool. Therefore, the address scope will also be the same. If this were not the case, it would be more difficult to match ingress traffic on a port with the appropriate scope. It may be counter-intuitive but L3 address scopes need to be anchored to some sort of non-L3 thing (e.g. an L2 interface) in the topology in order to determine the scope of ingress traffic. For now, we use ports/networks. In the future, we may be able to distinguish by something else like the remote MAC address or something. The address scope id is set on each port in a dict under the 'address_scopes' attribute. The scope is distinct per address family. If the attribute does not appear, it is assumed to be null for both families. A value of null means that the addresses are in the "implicit" address scope which holds all addresses that don't have an explicit one. All subnets that existed in Neutron before address scopes existed fall here. Here is an example of how the json will look in the context of a router port:: "address_scopes": { "4": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1", "6": null }, To implement floating IPs crossing scope boundaries, the L3 agent needs to know the target scope of the floating ip. The fixed address is not enough to disambiguate because, theoretically, there could be overlapping addresses from different scopes. The scope is computed [#]_ from the floating ip fixed port and attached to the floating ip dict under the 'fixed_ip_address_scope' attribute. Here's what the json looks like (trimmed):: { ... "floating_ip_address": "172.24.4.4", "fixed_ip_address": "172.16.0.3", "fixed_ip_address_scope": "d010a0ea-660e-4df4-86ca-ae2ed96da5c1", ... } .. [#] neutron/db/l3_db.py (_get_sync_floating_ips) Model ~~~~~ The model for subnet pools and address scopes can be found in neutron/db/models_v2.py and neutron/db/address_scope_db.py. This document won't go over all of the details. It is worth noting how they relate to existing Neutron objects. The existing Neutron subnet now optionally references a single subnet pool:: +----------------+ +------------------+ +--------------+ | Subnet | | SubnetPool | | AddressScope | +----------------+ +------------------+ +--------------+ | subnet_pool_id +------> | address_scope_id +------> | | | | | | | | | | | | | | | | | | | | +----------------+ +------------------+ +--------------+ L3 Agent ~~~~~~~~ The L3 agent is limited in its support for multiple address scopes. Within a router in the reference implementation, traffic is marked on ingress with the address scope corresponding to the network it is coming from. If that traffic would route to an interface in a different address scope, the traffic is blocked unless an exception is made. One exception is made for floating IP traffic. When traffic is headed to a floating IP, DNAT is applied and the traffic is allowed to route to the private IP address potentially crossing the address scope boundary. When traffic flows from an internal port to the external network and a floating IP is assigned, that traffic is also allowed. Another exception is made for traffic from an internal network to the external network when SNAT is enabled. In this case, SNAT to the router's fixed IP address is applied to the traffic. However, SNAT is not used if the external network has an explicit address scope assigned and it matches the internal network's. In that case, traffic routes straight through without NAT. The internal network's addresses are viable on the external network in this case. The reference implementation has limitations. Even with multiple address scopes, a router implementation is unable to connect to two networks with overlapping IP addresses. There are two reasons for this. First, a single routing table is used inside the namespace. An implementation using multiple routing tables has been in the works but there are some unresolved issues with it. Second, the default SNAT feature cannot be supported with the current Linux conntrack implementation unless a double NAT is used (one NAT to get from the address scope to an intermediate address specific to the scope and a second NAT to get from that intermediate address to an external address). Single NAT won't work if there are duplicate addresses across the scopes. Due to these complications the router will still refuse to connect to overlapping subnets. We can look in to an implementation that overcomes these limitations in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/agent_extensions.rst0000644000175000017500000001051700000000000027674 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Agent extensions ================ All reference agents utilize a common extension mechanism that allows for the introduction and enabling of a core resource extension without needing to change agent code. This mechanism allows multiple agent extensions to be run by a single agent simultaneously. The mechanism may be especially interesting to third parties whose extensions lie outside the neutron tree. Under this framework, an agent may expose its API to each of its extensions thereby allowing an extension to access resources internal to the agent. At layer 2, for instance, upon each port event the agent is then able to trigger a handle_port method in its extensions. Interactions with the agent API object are in the following order: #. The agent initializes the agent API object. #. The agent passes the agent API object into the extension manager. #. The manager passes the agent API object into each extension. #. An extension calls the new agent API object method to receive, for instance, bridge wrappers with cookies allocated. :: +-----------+ | Agent API +--------------------------------------------------+ +-----+-----+ | | +-----------+ | |1 +--+ Extension +--+ | | | +-----------+ | | +---+-+-+---+ 2 +--------------+ 3 | | 4 | | Agent +-----+ Ext. manager +-----+--+ .... +--+-----+ +-----------+ +--------------+ | | | +-----------+ | +--+ Extension +--+ +-----------+ Each extension is referenced through a stevedore entry point defined within a specific namespace. For example, L2 extensions are referenced through the neutron.agent.l2.extensions namespace. The relevant modules are: * neutron_lib.agent.extension: This module defines an abstract extension interface for all agent extensions across L2 and L3. * neutron_lib.agent.l2_extension: * neutron_lib.agent.l3_extension: These modules subclass neutron_lib.agent.extension.AgentExtension and define a layer-specific abstract extension interface. * neutron.agent.agent_extensions_manager: This module contains a manager that allows extensions to load themselves at runtime. * neutron.agent.l2.l2_agent_extensions_manager: * neutron.agent.l3.l3_agent_extensions_manager: Each of these modules passes core resource events to loaded extensions. Agent API object ---------------- Every agent can pass an "agent API object" into its extensions in order to expose its internals to them in a controlled way. To accommodate different agents, each extension may define a consume_api() method that will receive this object. This agent API object is part of neutron's public interface for third parties. All changes to the interface will be managed in a backwards-compatible way. At this time, on the L2 side, only the L2 Open vSwitch agent provides an agent API object to extensions. See :doc:`L2 agent extensions `. For L3, see :doc:`L3 agent extensions `. The relevant modules are: * neutron_lib.agent.extension * neutron_lib.agent.l2_extension * neutron_lib.agent.l3_extension * neutron.agent.agent_extensions_manager * neutron.agent.l2.l2_agent_extensions_manager * neutron.agent.l3.l3_agent_extensions_manager ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/api_extensions.rst0000644000175000017500000000617300000000000027352 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) API Extensions ============== API extensions is the standard way of introducing new functionality to the Neutron project, it allows plugins to determine if they wish to support the functionality or not. Examples -------- The easiest way to demonstrate how an API extension is written, is by studying an existing API extension and explaining the different layers. .. toctree:: :maxdepth: 1 security_group_api Extensions for Resources with standard attributes ------------------------------------------------- Resources that inherit from the HasStandardAttributes DB class can automatically have the extensions written for standard attributes (e.g. timestamps, revision number, etc) extend their resources by defining the 'api_collections' on their model. These are used by extensions for standard attr resources to generate the extended resources map. Any new addition of a resource to the standard attributes collection must be accompanied with a new extension to ensure that it is discoverable via the API. If it's a completely new resource, the extension describing that resource will suffice. If it's an existing resource that was released in a previous cycle having the standard attributes added for the first time, then a dummy extension needs to be added indicating that the resource now has standard attributes. This ensures that an API caller can always discover if an attribute will be available. For example, if Flavors were migrated to include standard attributes, we need a new 'flavor-standardattr' extension. Then as an API caller, I will know that flavors will have timestamps by checking for 'flavor-standardattr' and 'timestamps'. Current API resources extended by standard attr extensions: - subnets: neutron.db.models_v2.Subnet - trunks: neutron.services.trunk.models.Trunk - routers: neutron.db.l3_db.Router - segments: neutron.db.segments_db.NetworkSegment - security_group_rules: neutron.db.models.securitygroup.SecurityGroupRule - networks: neutron.db.models_v2.Network - policies: neutron.db.qos.models.QosPolicy - subnetpools: neutron.db.models_v2.SubnetPool - ports: neutron.db.models_v2.Port - security_groups: neutron.db.models.securitygroup.SecurityGroup - floatingips: neutron.db.l3_db.FloatingIP - network_segment_ranges: neutron.db.models.network_segment_range.NetworkSegmentRange ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/api_layer.rst0000644000175000017500000000562300000000000026266 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron WSGI/HTTP API layer =========================== This section will cover the internals of Neutron's HTTP API, and the classes in Neutron that can be used to create Extensions to the Neutron API. Python web applications interface with webservers through the Python Web Server Gateway Interface (WSGI) - defined in `PEP 333 `_ Startup ------- Neutron's WSGI server is started from the `server module `_ and the entry point `serve_wsgi` is called to build an instance of the `NeutronApiService`_, which is then returned to the server module, which spawns a `Eventlet`_ `GreenPool`_ that will run the WSGI application and respond to requests from clients. .. _NeutronApiService: http://opendev.org/openstack/neutron/tree/neutron/service.py .. _Eventlet: http://eventlet.net/ .. _GreenPool: http://eventlet.net/doc/modules/greenpool.html WSGI Application ---------------- During the building of the NeutronApiService, the `_run_wsgi` function creates a WSGI application using the `load_paste_app` function inside `config.py`_ - which parses `api-paste.ini`_ - in order to create a WSGI app using `Paste`_'s `deploy`_. The api-paste.ini file defines the WSGI applications and routes - using the `Paste INI file format`_. The INI file directs paste to instantiate the `APIRouter`_ class of Neutron, which contains several methods that map Neutron resources (such as Ports, Networks, Subnets) to URLs, and the controller for each resource. .. _config.py: http://opendev.org/openstack/neutron/tree/neutron/common/config.py .. _api-paste.ini: http://opendev.org/openstack/neutron/tree/etc/api-paste.ini .. _APIRouter: http://opendev.org/openstack/neutron/tree/neutron/api/v2/router.py .. _Paste: http://pythonpaste.org/ .. _Deploy: http://pythonpaste.org/deploy/ .. _Paste INI file format: http://pythonpaste.org/deploy/#applications Further reading --------------- `Yong Sheng Gong: Deep Dive into Neutron `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/calling_ml2_plugin.rst0000644000175000017500000000403200000000000030053 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Calling the ML2 Plugin ====================== When writing code for an extension, service plugin, or any other part of Neutron you must not call core plugin methods that mutate state while you have a transaction open on the session that you pass into the core plugin method. The create and update methods for ports, networks, and subnets in ML2 all have a precommit phase and postcommit phase. During the postcommit phase, the data is expected to be fully persisted to the database and ML2 drivers will use this time to relay information to a backend outside of Neutron. Calling the ML2 plugin within a transaction would violate this semantic because the data would not be persisted to the DB; and, were a failure to occur that caused the whole transaction to be rolled back, the backend would become inconsistent with the state in Neutron's DB. To prevent this, these methods are protected with a decorator that will raise a RuntimeError if they are called with context that has a session in an active transaction. The decorator can be found at neutron.common.utils.transaction_guard and may be used in other places in Neutron to protect functions that are expected to be called outside of a transaction. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/code_profiling.rst0000644000175000017500000005644100000000000027310 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Profiling Neutron Code ====================== As more functionality is added to Neutron over time, efforts to improve performance become more difficult, given the rising complexity of the code. Identifying performance bottlenecks is frequently not straightforward, because they arise as a result of complex interactions of different code components. To help community developers to improve Neutron performance, a Python decorator has been implemented. Decorating a method or a function with it will result in profiling data being added to the corresponding Neutron component log file. These data are generated using `cProfile`_ which is part of the Python standard library. .. _`cProfile`: https://docs.python.org/3/library/profile.html Once a method or function has been decorated, every one of its executions will add to the corresponding log file data grouped in 3 sections: #. The top calls (sorted by CPU cumulative time) made by the decorated method or function. The number of calls included in this section can be controlled by a configuration option, as explained in :ref:`config-neutron-for-code-profiling`. Following is a summary example of this section: .. code-block:: console Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: DEBUG neutron.profiling.profiled_decorator [None req-dc2d428f-4531-4f07-a12d-56843b5f9374 c_rally_8af8f2b4_YbhFJ6Ge c_rally_8af8f2b4_fqvy1XJp] os-profiler parent trace-id c5b30c7f-100b-4e1c-8f07-b2c38f41ad65 trace-id 6324fa85-ea5f-4ae2-9d89-2aabff0dddfc 16928 millisecs elapsed for neutron.plugins.ml2.plugin.create_port((, , {'port': {'tenant_id': '421ab52e126e45af81a3eb1962613e18', 'network_id': 'dc59577a-9589-4617-82b5-6ee31dbdb15d', 'fixed_ips': [{'ip_address': '1.1.5.177', 'subnet_id': 'e15ec947-9edd-4793-bf0f-c463c7ff2f62'}], 'admin_state_up': True, 'device_id': 'f33db890-7958-440e-b07b-432e40bb4049', 'device_owner': 'network:router_interface', 'name': '', 'project_id': '421ab52e126e45af81a3eb1962613e18', 'mac_address': , 'allowed_address_pairs': , 'extra_dhcp_opts': None, 'binding:vnic_type': 'normal', 'binding:host_id': , 'binding:profile': , 'port_security_enabled': , 'description': '', 'security_groups': }}), {}): Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 247612 function calls (238220 primitive calls) in 16.943 seconds Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: Ordered by: cumulative time Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: List reduced from 1861 to 100 due to restriction <100> Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: ncalls tottime percall cumtime percall filename:lineno(function) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 4/2 0.000 0.000 16.932 8.466 /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:132(wrapped) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.928 16.928 /opt/stack/neutron/neutron/common/utils.py:678(inner) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 20/9 0.000 0.000 16.884 1.876 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1317() Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 37/17 0.000 0.000 16.867 0.992 /opt/stack/osprofiler/osprofiler/sqlalchemy.py:84(handler) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 37/17 0.000 0.000 16.860 0.992 /opt/stack/osprofiler/osprofiler/profiler.py:86(stop) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 8/3 0.005 0.001 16.844 5.615 /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:224(wrapped) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.836 16.836 /opt/stack/neutron/neutron/plugins/ml2/plugin.py:1395(_create_port_db) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.836 16.836 /opt/stack/neutron/neutron/db/db_base_plugin_v2.py:1413(create_port_db) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.836 16.836 /opt/stack/neutron/neutron/db/db_base_plugin_v2.py:1586(_enforce_device_owner_not_router_intf_or_device_id) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.836 16.836 /opt/stack/neutron/neutron/db/l3_db.py:522(get_router) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 16.836 16.836 /opt/stack/neutron/neutron/db/l3_db.py:186(_get_router) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 34/22 0.000 0.000 16.745 0.761 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py:35(instances) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 39/8 0.000 0.000 16.727 2.091 /usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py:285(_execute_on_connection) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 39/8 0.001 0.000 16.727 2.091 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py:1056(_execute_clauseelement) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 17/13 0.000 0.000 16.704 1.285 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1310(get) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 20/14 0.001 0.000 16.704 1.193 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1315(_load) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 19/14 0.000 0.000 16.703 1.193 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py:88() Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 76/23 0.001 0.000 16.699 0.726 /opt/stack/osprofiler/osprofiler/profiler.py:426(_notify) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 39/8 0.001 0.000 16.696 2.087 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py:1163(_execute_context) Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 75/23 0.000 0.000 16.686 0.725 /opt/stack/osprofiler/osprofiler/notifier.py:28(notify) #. Callers section: all functions or methods that called each function or method in the resulting profiling data. This is restricted by the configured number of top calls to log, as explained in :ref:`config-neutron-for-code-profiling`. Following is a summary example of this section: :: Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: Ordered by: cumulative time Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: List reduced from 1861 to 100 due to restriction <100> Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: Function was called by... Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: ncalls tottime cumtime Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:132(wrapped) <- 2/0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:224(wrapped) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /opt/stack/neutron/neutron/common/utils.py:678(inner) <- Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1317() <- 3 0.000 0.000 /opt/stack/osprofiler/osprofiler/profiler.py:426(_notify) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 16.883 /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:132(wrapped) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 2 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py:69(__init__) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py:1056(_execute_clauseelement) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 16.704 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py:3281(one) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py:3337(__iter__) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py:3362(_execute_and_instances) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py:1127(_connection_for_bind) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1310(get) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1315(_load) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:2033(load_scalar_from_joined_new_row) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1/0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py:840(_checkin) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1/0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/webob/request.py:1294(send) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /opt/stack/osprofiler/osprofiler/sqlalchemy.py:84(handler) <- 16/0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/event/attr.py:316(__call__) Oct 20 01:52:40.767003 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /opt/stack/osprofiler/osprofiler/profiler.py:86(stop) <- 16/0 0.000 0.000 /opt/stack/osprofiler/osprofiler/sqlalchemy.py:84(handler) #. Callees section: a list of all functions or methods that were called by the indicated function or method. Again, this is restricted by the configured number of top calls to log. Following is a summary example of this section: :: Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: Ordered by: cumulative time Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: List reduced from 1861 to 100 due to restriction <100> Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: Function called... Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: ncalls tottime cumtime Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:132(wrapped) -> 1/0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/oslo_db/api.py:135(wrapper) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 16.883 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1317() Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /opt/stack/neutron/neutron/common/utils.py:678(inner) -> 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/neutron_lib/context.py:145(session) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 16.928 /usr/local/lib/python3.6/dist-packages/neutron_lib/db/api.py:224(wrapped) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/session.py:2986(is_active) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1317() -> 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py:579(do_execute) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 2 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py:1078(post_exec) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 2 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py:1122(get_result_proxy) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 0 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/event/attr.py:316(__call__) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/event/base.py:266(__getattr__) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 15/3 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/loading.py:35(instances) Oct 20 01:52:40.788842 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1317() Oct 20 01:52:40.791161 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 1 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/strategies.py:1318() Oct 20 01:52:40.791161 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: 3 0.000 0.000 /usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py:852(__get__) .. _config-neutron-for-code-profiling: Setting up Neutron for code profiling -------------------------------------- To start profiling Neutron code, the following steps have to be taken: #. Add he following line to the ``[default]`` section of ``/etc/neutron/neutron.conf`` (code profiling is disabled by default): .. code-block:: console enable_code_profiling = True #. Add the following import line to each module to be profiled: .. code-block:: python from neutron.profiling import profiled_decorator #. Decorate each mehtod or function to be profiled as follows: .. code-block:: python @profiled_decorator.profile def create_subnet(self, context, subnet): #. For each decorated method or function execution, only the top 50 calls by cumulative CPU time are logged. This can be changed adding the following line to the ``[default]`` section of ``/etc/neutron/neutron.conf``: .. code-block:: console code_profiling_calls_to_log = 100 Profiling code with the Neutron Rally job ----------------------------------------- Code profiling is enabled for the ``neutron-rally-task`` job in Neutron's check queue in Zuul. Taking advantage of the fact that ``os-profiler`` is enabled for this job, the data logged by the ``profiled_decorator.profile`` decorator includes the ``os-profiler`` ``parent trace-id`` and ``trace-id`` as can be seen here: .. code-block:: console Oct 20 01:52:40.759379 ubuntu-bionic-vexxhost-sjc1-0012393267 neutron-server[19578]: DEBUG neutron.profiling.profiled_decorator [None req-dc2d428f-4531-4f07-a12d-56843b5f9374 c_rally_8af8f2b4_YbhFJ6Ge c_rally_8af8f2b4_fqvy1XJp] os-profiler parent trace-id c5b30c7f-100b-4e1c-8f07-b2c38f41ad65 trace-id 6324fa85-ea5f-4ae2-9d89-2aabff0dddfc 16928 millisecs elapsed for neutron.plugins.ml2.plugin.create_port((, , {'port': {'tenant_id': '421ab52e126e45af81a3eb1962613e18', 'network_id': 'dc59577a-9589-4617-82b5-6ee31dbdb15d', 'fixed_ips': [{'ip_address': '1.1.5.177', 'subnet_id': 'e15ec947-9edd-4793-bf0f-c463c7ff2f62'}], 'admin_state_up': True, 'device_id': 'f33db890-7958-440e-b07b-432e40bb4049', 'device_owner': 'network:router_interface', 'name': '', 'project_id': '421ab52e126e45af81a3eb1962613e18', 'mac_address': , 'allowed_address_pairs': , 'extra_dhcp_opts': None, 'binding:vnic_type': 'normal', 'binding:host_id': , 'binding:profile': , 'port_security_enabled': , 'description': '', 'security_groups': }}), {}): Community developers wanting to use this to correlate data from ``os-profiler`` and the ``profiled_decorator.profile`` decorator can submit a ``DNM`` (Do Not Merge) patch, decorating the functions and methods they want to profile and optionally: #. Configure the number of calls to be logged in the ``neutron-rally-task`` job definition, as described in :ref:`config-neutron-for-code-profiling`. #. Increase the ``timeout`` parameter value of the ``neutron-rally-task`` job in the `.zuul yaml file`_. The value used for the Neutron gate might be too short when logging large quantities of profiling data. .. _`.zuul yaml file`: https://github.com/openstack/neutron/blob/master/.zuul.yaml The ``profiled_decorator.profile`` and ``os-profiler`` data will be found in the ``neutron-rally-task`` log files and ``HTML report`` respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/db_layer.rst0000644000175000017500000001147500000000000026104 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Database Layer ====================== This section contains some common information that will be useful for developers that need to do some db changes. Difference between 'default' and 'server_default' parameters for columns ------------------------------------------------------------------------ For columns it is possible to set 'default' or 'server_default'. What is the difference between them and why should they be used? The explanation is quite simple: * `default `_ - the default value that SQLAlchemy will specify in queries for creating instances of a given model; * `server_default `_ - the default value for a column that SQLAlchemy will specify in DDL. Summarizing, 'default' is useless in migrations and only 'server_default' should be used. For synchronizing migrations with models server_default parameter should also be added in model. If default value in database is not needed, 'server_default' should not be used. The declarative approach can be bypassed (i.e. 'default' may be omitted in the model) if default is enforced through business logic. Database migrations ------------------- For details on the neutron-db-manage wrapper and alembic migrations, see `Alembic Migrations `_. .. _testing-database-migrations: Tests to verify that database migrations and models are in sync --------------------------------------------------------------- .. automodule:: neutron.tests.functional.db.test_migrations .. autoclass:: _TestModelsMigrations :members: The Standard Attribute Table ---------------------------- There are many attributes that we would like to store in the database which are common across many Neutron objects (e.g. tags, timestamps, rbac entries). We have previously been handling this by duplicating the schema to every table via model mixins. This means that a DB migration is required for each object that wants to adopt one of these common attributes. This becomes even more cumbersome when the relationship between the attribute and the object is many-to-one because each object then needs its own table for the attributes (assuming referential integrity is a concern). To address this issue, the 'standardattribute' table is available. Any model can add support for this table by inheriting the 'HasStandardAttributes' mixin in neutron.db.standard_attr. This mixin will add a standard_attr_id BigInteger column to the model with a foreign key relationship to the 'standardattribute' table. The model will then be able to access any columns of the 'standardattribute' table and any tables related to it. A model that inherits HasStandardAttributes must implement the property 'api_collections', which is a list of API resources that the new object may appear under. In most cases, this will only be one (e.g. 'ports' for the Port model). This is used by all of the service plugins that add standard attribute fields to determine which API responses need to be populated. A model that supports tag mechanism must implement the property 'collection_resource_map' which is a dict of 'collection_name' and 'resource_name' for API resources. And also the model must implement 'tag_support' with a value True. The introduction of a new standard attribute only requires one column addition to the 'standardattribute' table for one-to-one relationships or a new table for one-to-many or one-to-zero relationships. Then all of the models using the 'HasStandardAttribute' mixin will automatically gain access to the new attribute. Any attributes that will apply to every neutron resource (e.g. timestamps) can be added directly to the 'standardattribute' table. For things that will frequently be NULL for most entries (e.g. a column to store an error reason), a new table should be added and joined to in a query to prevent a bunch of NULL entries in the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/db_models.rst0000644000175000017500000000425600000000000026252 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Relocation of Database Models ============================= This document is intended to track and notify developers that db models in neutron will be centralized and moved to a new tree under neutron/db/models. This was discussed in [1]. The reason for relocating db models is to solve the cyclic import issue while implementing oslo versioned objects for resources in neutron. The reason behind this relocation is Mixin class and db models for some resources in neutron are in same module. In Mixin classes, there are methods which provide functionality of fetching, adding, updating and deleting data via queries. These queries will be replaced with use of versioned objects and definition of versioned object will be using db models. So object files will be importing models and Mixin need to import those objects which will end up in cyclic import. Structure of Model Definitions ------------------------------ We have decided to move all models definitions to neutron/db/models/ with no further nesting after that point. The deprecation method to move models has already been added to avoid breakage of third party plugins using those models. All relocated models need to use deprecate method that will generate a warning and return new class for use of old class. Some examples of relocated models [2] and [3]. In future if you define new models please make sure they are separated from mixins and are under tree neutron/db/models/ . References ~~~~~~~~~~ [1]. https://www.mail-archive.com/openstack-dev@lists.openstack.org/msg88910.html [2]. https://review.opendev.org/#/c/348562/ [3]. https://review.opendev.org/#/c/348757/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/dns_order.rst0000644000175000017500000001252700000000000026301 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Keep DNS Nameserver Order Consistency In Neutron ================================================ In Neutron subnets, DNS nameservers are given priority when created or updated. This means if you create a subnet with multiple DNS servers, the order will be retained and guests will receive the DNS servers in the order you created them in when the subnet was created. The same thing applies for update operations on subnets to add, remove, or update DNS servers. Get Subnet Details Info ----------------------- :: changzhi@stack:~/devstack$ neutron subnet-list +--------------------------------------+------+-------------+--------------------------------------------+ | id | name | cidr | allocation_pools | +--------------------------------------+------+-------------+--------------------------------------------+ | 1a2d261b-b233-3ab9-902e-88576a82afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} | +--------------------------------------+------+-------------+--------------------------------------------+ changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6 +------------------+--------------------------------------------+ | Field | Value | +------------------+--------------------------------------------+ | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | | cidr | 10.0.0.0/24 | | dns_nameservers | 1.1.1.1 | | | 2.2.2.2 | | | 3.3.3.3 | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | ip_version | 4 | | name | | | network_id | a404518c-800d-2353-9193-57dbb42ac5ee | | tenant_id | 3868290ab10f417390acbb754160dbb2 | +------------------+--------------------------------------------+ Update Subnet DNS Nameservers ----------------------------- :: neutron subnet-update 1a2d261b-b233-3ab9-902e-88576a82afa6 \ --dns_nameservers list=true 3.3.3.3 2.2.2.2 1.1.1.1 changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6 +------------------+--------------------------------------------+ | Field | Value | +------------------+--------------------------------------------+ | allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} | | cidr | 10.0.0.0/24 | | dns_nameservers | 3.3.3.3 | | | 2.2.2.2 | | | 1.1.1.1 | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 | | ip_version | 4 | | name | | | network_id | a404518c-800d-2353-9193-57dbb42ac5ee | | tenant_id | 3868290ab10f417390acbb754160dbb2 | +------------------+--------------------------------------------+ As shown in above output, the order of the DNS nameservers has been updated. New virtual machines deployed to this subnet will receive the DNS nameservers in this new priority order. Existing virtual machines that have already been deployed will not be immediately affected by changing the DNS nameserver order on the neutron subnet. Virtual machines that are configured to get their IP address via DHCP will detect the DNS nameserver order change when their DHCP lease expires or when the virtual machine is restarted. Existing virtual machines configured with a static IP address will never detect the updated DNS nameserver order. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/external_dns_integration.rst0000644000175000017500000000346400000000000031413 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Integration with external DNS services ====================================== Since the Mitaka release, neutron has an interface defined to interact with an external DNS service. This interface is based on an abstract driver that can be used as the base class to implement concrete drivers to interact with various DNS services. The reference implementation of such a driver integrates neutron with `OpenStack Designate `_. This integration allows users to publish *dns_name* and *dns_domain* attributes associated with floating IP addresses, ports, and networks in an external DNS service. Changes to the neutron API -------------------------- To support integration with an external DNS service, the *dns_name* and *dns_domain* attributes were added to floating ips, ports and networks. The *dns_name* specifies the name to be associated with a corresponding IP address, both of which will be published to an existing domain with the name *dns_domain* in the external DNS service. Specifically, floating ips, ports and networks are extended as follows: * Floating ips have a *dns_name* and a *dns_domain* attribute. * Ports have a *dns_name* attribute. * Networks have a *dns_domain* attributes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/i18n.rst0000644000175000017500000000241700000000000025076 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Stadium i18n ==================== * Refer to oslo_i18n documentation for the general mechanisms that should be used: https://docs.openstack.org/oslo.i18n/latest/user/usage.html * Each stadium project should NOT consume _i18n module from neutron-lib or neutron. * It is recommended that you create a {package_name}/_i18n.py file in your repo, and use that. Your localization strings will also live in your repo. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1510432 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/0000755000175000017500000000000000000000000025026 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/live-mig-ovs-hybrid.png0000644000175000017500000033420000000000000031333 0ustar00coreycorey00000000000000PNG  IHDRrGf)tEXtcopyleftGenerated by http://plantuml.com09zTXtplantumlxXQoH~GŶT qƺVMTM\$=*amwFQ, 6i,U~͘J{NaXAS6XYi.s=a0LKt,|Pd*B'YRe\+[Dfvȩ]]U-qAu)nl>Xiwr|0P E)uEiJ|YbA3W'^m3ho:n)}dָXU]Ѯٱ֨۱fL"M f,\*J5m% |Kحpbu}ɝ1,>S=*;G,~4@ѡB=ߒ_ ,nBZ$ $e,\Გԟ{0Yja"E1|C[7XvzX>f _!e3ó෷j~X..fc`-fqׄ}z.Io)G]q3ztXkrCxt*F ZHXxڟ;;{S@+Cw9hbyY:X_T]E=Y $XXA$˜0C6 u&4J|,(ImstiZS"*nϊL]{Y[x Şx|sj܌{߶4#g%k_V@zE%Vs=+y$4t {\E==slxl P}e-!2 voPX5fpgݿ4PA&QXY0#m{ ،(?hކO/N[XW!~y订!$F<ŘO"Ken:/ X+KzwV{;:h=9;ws}FþuE Gy` ZIDATxt}(z5(PZEOrU$ub%Q.%{||y)nb^)!u q.*U)֫29ٙofRP $@i ANJ>9}( rP4@i (W0崯o 4S夰VGI9ʩ-`SX1>N{ߜR:T@yr*#=Ô/ђP!4U9} J<0;\{[n\ݻwp M6I&z/rr,y]fˬwvv}„n馞V>tP1f4sυwe x4ɡ7֚PnX'9cI< ncZ lq9VȑAEB7`gΜmm\a5Onݚ+VTo//|g,LWWW{9w/`EO8(c{B(-^cE2,\n|rՃ} n޼9:t(s=7Z:oOӄ'2l0GI .ttt*Mc"m۶-/]4^yaƍŻ}%2B}Att}H%4Vh—?>7ٰ´iލ>>;cI<߻Yߍ1lVȑA}߮{׻u+WkX$vUP\9 .TWW9s& (w7=3/?Ϸ(u̗A 30o;zfѣɇ6q[η r޼ykF9KW0qQj;, a*^;M(r(ʏWA!_E aGq);97+ щCrz=y_|(pN}pn4K_tcҥayǚ5ky! k677Gdɒ%sH| 1u}ppنP߄(|_QӬhZp~ K‡/[:u5ko&*qEn}.X::/;?~n*ŝX}y?lW9ɓz̚5+̇%[ 駁ӏRT9oQkwG76zE /ŋy#irByɶ8dYA*'Nq]wE7O~_%|'N?O,>>;ܜC  JN?ٮ*s?o=uӧˣDtuu`)q!s魣G߃"- {w}ѭ۹sgtz7UHwOeҥK|ka)yɓ'gϞ][[ߟZ;0{Z8fUu#_>sɒ%#NX?2,]4lgwKr\~ǰ&|FyQF!$vUPy𲾾>#|f]]ӏ9^۷o?~!(8=o>|gfhQun>x ѓ{.\`՜7O{8'ƷoHrirB>gϞ(]>w8xIT鸷rKjYڜ[wE,m!3gN>7p?D~C̰nKBl9m~T(p%sHBZضs˖|gN6-Ο@ٳgΜ9uuu7tSxNRkٲe!R'O^fU,F~>9}( rP4@iҐ;ϯKO|tUCMR M[tM@7kd0ȊHra7;昮t 6Jy`d4mMБ6ݬI$"+#ͽ;ME`XMWR 9i:R&5I2DVdEy$9gMEz9+@ Iִn͚$L"+"k,_2>}^xԩS}}}TrH6#mY$IdM"k~qƽ{߿g 1>{\9 Hun$`YȚc,ٱcGgggO:%@% pi:R&5I2D$?.~z(x޽ǎ 8GҴn͚$L"kYxS]!_::: /P#iڀT fM &5iljE;.L?E*ў6`2 wl|WՄra~Pd٢)Oqǯ'Pٔ;hW$TO,l'$9/[o]6N=#7T3r-U?>} 2 Sj[a9(!{A 76d~0=prA*JٴOu0EѴAGܑ&t&HY5xl#tC{ M`Y񻅏!Q!z*c4-*UAȦ'_*]|Qct! Wi"[mwFo\IoKB7455ɟ֐/yY?w ~5?#~륗.}cxkw?O,î]"ݰiE*fN?JHf+oh 8,>Ya&sKi4M{f5+4mБf-י# ƨPoPږ_|"Yba}\ 26~1CP09\37_UX"Evl#[`BdwCBΙ?uɧڬȦ͎cN4~X52 9i:R&-ҵ6K2ȊȊğ3X#iښ6`tiV\rأQ iܙѳ%DVdEVdE+@ Iִn͚$L"+"k9}( rP<#4m'zcEdP9UsDWM@ XdU"DN4m'zcEd 6"liGM@ XdYD4xF.hO""J(9/~rߟ1;JC4T3r3sݰ!ZrwLSOO?#Ȓg2Zo!a}s{T){YDVdɩJ0^|tk>E"0Jr>(S$r>T}/@o"*ۜ~rEDNOƈ,"INTDDNOƈ,"INTDDNOƈ,"INTDDNOƈ,"3rIEkLyN*k檗_~ٲe|{#}/1"ȊlR~{ﯯ?~x!,ԔdrɈsU[`Ç/omm=sL#G,]t„ 555sm;q~a۶m!KZZZo߮)P)K.mذa֬YYV\ijjڲeKtM/_>v"_Chmmݽ{w撎,?T9}d9HMM.\6o\H֭[7iҤ'nڴ)ZrɖEϹ<,ܸqcCCC([(ŋaђ;ca˗?>f$Y__EKPcMMMg̘Ooݺuڴiٳ3cD4'l9쫽=HǕ22YPӧOg7,ԔdKuׯ={v'M;\hѹs׭[-1cFww˗þ¶} 7tҾ+L|ŊqZdI(IUV^b__o=~7̯Yflmm BАnu8p8ZZZ2y\CVfG}/1"+*Ad+S3Ȥ-"O?hjj:qD<ɺN\`SL˗'Na&6쫱1^~ܹxSƫ]paҤI̜9zhӦM;y'Qg#D{r1~f2kL>|9}}/1"Ȋ,铊r~oo… /f>#sf绻ϟ_WWm97Έs/s5hi6㙛o9zpwʕf|o\{7FdYpr""OSLnL/?ANo߾K. \ϐ!ZWvAmf8p`޼ya&~dL~:p*gL.jkkkGG$;wzF@ EdE铊[nȟ^ijj D{.|d֢9s&~'O OO+چ /^eɏ=w>yG5k֖-[.\8m&̼~KKKt\˖-W9*s„ Cuǎ27v}vMI @o""[<#Ts?ܹs/?rHKKK]]]uuu]ݚf֮];a„'wuuM6-|iͣ鷷O;.) Cylj\dȜd*i~>9!WYDCeӗH*NRo=2CoKwU_<_6FٴU +Ed1$O*""%w]sfk?S{˥޴!+Ed1$O*"")􃇿]yapw>S@o"铊Cڜ 憬 5mcW1""Wg䒊(rŗoONkƮzcDEdSJPi0Jrh>E"6$OH (yF.E"6ƮzcDEdGlsi&?дtYD>hSƈ,"INTDD4m)zcDEd$O*""U J-[͇{Ǘ 7FdYe3rIIkM*O[r˞ ʘ+EdUCN4nZ}ljj8qbKKO?sY.mښ$}9}DN4žt҆ f͚<ˢE֬Ys+޽o̹a50sIGGGVLr>hگy}&._{y{tuu͜9y֭߸qcCCCl[[ŋa[opE7mX]]=xMMM_/ba O郦ׯ_?{Y,Xr|1GE=sի rҥK3֭]vɒ%2^ŊϟϹ/jkk}ȥHA~O'N ^y/Oz̙uuuSN]fMXa釒lڴ)s!?~13nLCCCsBN*+EdmN?9D""ִ,voo… ٓS@&EdELr""i]ݡ먹>|'L""cBNTDĉ+he‹/=߾Edٔ\R \]2 ""%Jд@Ilj+i? W5WϾrU(>N\д>ݽS?V8խP ʼn+PWp{Cx+LG7?L""+ۜ~iN\"7O}[m=i~{ܼ ?? dYD9}ش?Xz#͋I 7̕}>SO=dYDVd$OZN\,v4uϛL&):::}Y52 ,"+d'-?9}(˦Cvq+LJ_8ϕ;K$ mEGQE2 ,"+d\Rv{v״_ yC?Ks K:::BϞE"m=U?_xmo{[}}}ccOfNɝ郜>OOWύ7 @N!wNWknn^>RӇ׭[2W=fƮ荳|C*YEdL3Ȥ-"rT9}cWqD9}|N:',"Nr""rTG7oxy zqg̘w?3/ٳgEEdINTDDN qusLR™?暚7M{>Ϝ;wNN_ӟ>};:;;`NYDM'9}R9}*~dܹ{s=|گέ*Ʈsr~z;vݻرc""5S!Fӏr7o~;Y[[;eʔ;sߝ:ujMMMx+g}Ffm$g?PM6 ݰw-d/,9l*tuu%F5mG3gΛٳ'TB;w.,whr` x;= ""T (9}ENɒ%/S>яqwwԧ|_>jժ3gxy 鷷|#a#?/^\gϝ;0~ߏZV\3777'F5^۷/̇-`!裏&d755O:.`}NS$r00?99}DNFZRSSz|o/d_ 9n8>)ZoӦMfϙ3'NŽ6oַ5-wѣG5@r(-r> uѝ^ks^>~_|qF{ߍu~2~_etɟ̷P+HO6MB!ȥHa`9~/Ï}cg? /?O|MoK92} Nwvv;w;懍- }a_{{{Ö8냧N M~2~C}3>g3mo}[7n8NōƮ荇,"[Z6A&mӧB8ynllO}SeSL39oy[bWwuY w}YO~wӦM}{Òk"e~0X-#'lϯn}ZʯCqH q}N6Ȗ9}R9}*ĘOǾ:ws⡸`޸YD铊S!*$},A?8l+]<>"Ȗ9}R9}*Dӏb]QվZZZݿwaS\sMsssxYح}$#li\RqNƉ{胱+z\D--UӇ`֬YeԿW.>VS.[lŊWCuXo}[`=|_ŴEDB*>@ӧH!+x㍕ӴGy>k_ܹs|Ʉ5?ihh }(9}R!OÀ~;U[[¶mZ[[3l߾28O(r> W9!ں{%貲P)r3r)9}O5ڸqcCCCmmm[[ŋY3Y/Sϟ\>D|lx5s̰[Fijj:}tf{{{B_l@o\)"lsi&9}*j,רjҥ}Wu˳f^5ϟliiIoooo_5kE}}}9sfš=Ʈz8O)㜾Ym-3r""rTXYN?ǯQUՉ'Ǐ766˳f^5O<͇ѣG36mZknڴ)|9}@o\)r,"[*IED~_9|s;;;Lwʕ :tвe&MtvuuE {Ʈz8OGdR!O*""OEs u~uu˗N?l!9y慙Çv#qaZ[[w\@o\)r,"[*<#TF9}*j,_~]w]"._|̙˗gO%e˒sYlٲp]gmmmǏӟ}z̙3{\t'zf˸P)e(3r> 'UQOOOss 턙vjE"r֎ 7onhhkkkxb| >5V ճ__TcW<#3rYD$mN_""OE;䳝?w0R@o\2f2#O*""OEc}{s_5ww0R@o|USYDT铊SQce9+?~㭻a׏knީ}O]_9}D-r""rTXsFwG&|h~cW)r,"[*<#TF9}*j\9]QjM+YUEdRS (P)]9}*g\GY˝ Δv'|=HhS8PfIPIN+~Vgྷk ي:O(re$O匕ce?;}IHhv޽ǎ9}R!OZJrT9|^z;{~$͖y>@\2Tӧ'{+/lK*@1v(lxF."ȖWmf$O%h6)e;kȖ9}2Tӧ` `49}D-re$O%h6)r,"[*IPINJ +l:SYDTxF.i*S 0v0Myg"lRd$O%tqN铖>@Nfy"P*IPINJ FƮFƮF@NcWt#l'-C%9}*>e?v tY3i+XIU`z**$:ujA k4-[Vb u=BNYNG1VM̝;'c)BN %͏a=== Ҍf#l BNqƩ >|x3g9rҥK'LPSS3wї!kf\NcJr\sH3yWumB.o跖eFS<#EdKGÀ>=-iuҥ 6̚5+ky__ʕ+[ZZ===MMM[l.Y=xǪ rq\r#+@y[[[w]o-sa1@ U!r/\_ֶyBnݺI&M8qӦMђ'OՅ,Z(Ȝ~0` q'Oo-_ 9X6x$|*,JYk̙aass֭[Ǥ9(^B>[`e˖|ه626 YNW+ӧK?d=zqPp,/^[oua&^ekll|*hvy>@ U!r>:Ϟ=;ks&MLsw.Zܹs֭Θ1a_aa[[[־[XtCyZZZu%K]Zj;+L6oa~͚5;Y W__ kϜ9/}!xQ(H~$o[7n3gNrm E*UvV׻ 0c8iۻp={#-: E[Zy4w$H{m|#ÿ+W,pmСC˖- {vuu߁)`%Wr9֟W93^46b ˇr)"ls?̤-"rT9}u욕2eJt5te[[[ w}.]4pV?9<:'Nd^Y"ߑ&;9yy慙Çvrn*s%cRȬA,R-$Cop%:!_ŎuWF\Ob'\<:!̅;wܜ)eӗ#2Bdˌ>>BNqfen )%C{[n}̙jO<9qvT9|oiiڹlٲx 6,^8J=z4O z{{k#Gdɒ!̚5k˖- .,m*DiBfxQ(H~ Yjc/Rb}vS*BGDy?̏rfz۷>OGdR!O*""O`\ǮY|ܹT9r뮋n1xkk׮0aĉM>ԴyQ/^rʚɓ'g|Æ aGf͊/F{kkkVrm[l =p@oSafa3gLp uG"BB0d^&FiXƜ;]uҥ% ((V^]{m/x/g2TTF)r,"[*IEDrճ_˲6zzz}+p6lo&SYK=]fvϿ˙*Akcڸlyg"lR> 0R}{s_5L}w7onhhkkk[\vt),i\*V]I|R1h68 9}2TӧL5M&4hv@NxF.iIS (|+G}wtrgÇ-_sËP{Fq,"[*6L JrT9} ?η>HWw]s>8qN_20"[IPINJ @޽{;-/ۏ.٩fi*S e;#a]ݪhv SYDT铖>@N1?_= :.09}D-KZJrT9}ztxF."Ȗ*U@JJrT9}ztqN铖>@N'8)rBN r?ۅ_y+xNo<9}R!OZN\r?tWwKKO|Go\y>@\r*O%@ P?xk; Sg/?w}Eo|,"[*6L O\r?o=󻪯Usx[ez2U -crUNJ @޽{;Fskyپly>"Ȗ 9}r*O% !M&tu77~ Jw~?q}!>OGdR5S! KϷocfu"P*)9}ٴ %-C%9}*>ƮFƮF@NcWt#l'-C%9}*>ƮFQU5S̝پƶׯ_lY:,ھ{ԃl qN܆ܪ r 8pd5___tT1){zzBfK;-xҥ 6̚5+ky__ʕ+[ZZ===MMM[l.%>xǪ rW`sں{Tjo4[\B Jh6m)r%3),*S ױkVf .Fmmm7o.d֭4iĉ7m-9ydKKK]]]ˢEkƍBCi/^-/V{{{~RUewqwL<9laϟO8|/^!, K2?52U55s[<䦦ӧOߊ}rn*z3~8߫]͖yg"l(ۜ~3)*S ױkuׯ={v'M;\hѹs׭[-1cFww˗þ¶}UNҥ}Wxa&syu]K, GV[jի?߾rnoooo?׬YY9 h}̙s*gi 7-֮] //WX}] qN_ -3re$O%`\Ǯ7558q"^d{'+ϛϔ)S=[5r'fkX79IG)sy},Udԩ/\0iҤϹ9zhuӦM;ydfrV37mڔyP9xP9K;&ߊ}rn*Gfϝ;ok9͖y>"Ȗ 9}2Tӧ0c8ۻp={#-:E[ZyrsM(吥ʪՆ[曣G W\YH<̡C-[yvuuQsssf*TҎɷb4ߴ/ؐ_| Cl #l'-C%9}*>:vJ&N2%xz[[[w}.]4pV?WWW_|9?Tj6 9|ʹ̛7/̄>U|Yxyj8: ?ʩ:Ҏɷbmd :'lm;wzFl #l\2Tӧ0c׬ -rO455u$s=>2xkϜ9yɓ'wE*/-' d^xaԩGb/߶mی3jkkLJ-ZTWWW__]qYgo,_(3[N<9x {%mmmG[3Ë𶺺:|/WXC/^4iRm+f%+V۰aC9oypĉlrСC۷o?Upo _E)i~U8p`5o?~4?W˄m^l0- *mد (L40,?Y+ ŋW}`毢 nkkg4>@bC2*I>Gw>tttEϰUcrfw"HpHP&C%>i `hGסk ӫ=x{e?swfSdIPIOصwn5']DWk.~v0EeQ٤L?_̔PIO(;._V8>1:OL_FΟ0*[dPIOzHd2d4{DʢI#ӧ\?>i `m?:}uf5']间8O飲lR)L4Pb~?ƚמqXh؝QYT6)<#r*I>%흝]]]9e m`4;)ʢI1JP&C%>i (7_wuXS3yJgFO dLa?}eٚ%f2T2}zi @ 7.EeQ٤L?_̔PIOOxc8<3}Q:TlS.'2}@`ttɺK|_Sd,*2}UO}w =ػWos"GeQ٤S.'2}@xٵ'kN[_椏lz8+0v1_Wm+GoRAFOYTDO2}]v"GeQ٤S铪LcWqQYT6)dEEdj,@o\n)2}TM ȥ,~2}R5V` 7.EeQ٤ 2}8"<3} #gL(>@R!2}8"L?sdzރ$ "GeQ٤S铪LE>L?L>է^åIB"Sd,*2}ʢ"2}R5Vҽ7O[}%w$!)2}TM ȥ,~2}R5VҽǓ_۾-$ ,*4#vfLet/ ߺ <>@"S*802}F+J^ҖATd[_P)2}3rgIXYPJrC,f[SdI!g?铞LEL?~yL )<#;3TOz2}Ry[۴SjjfϞo&U).DSS]U yݺ&V;n\ÊWfskkQ]}Wӳb9o(erZؓ /|s}rM7wa3wb@03'gF;{{X$yg䢲lRTl3Qn铪L`}w_ Sxh /곣mg>3!;.5Ss/zz̟??f[:p`QC71%o]>vm?}sN|wˢwb 7ڰg+WeQ>+J^ fٯ:/7Ty^Sw|)7ii9mOoW}I nbKF/Bؓ3RCCMx˟ +{晧b 7ڰg=ЪUoڴ_; I(>*&LLTet/޹>ʲKo<74ԜsΟ[77򱏽;D)1/\2+ׇs\1}<@(kvZ_; I(>*&LLTet/N?{~u}/6gEugWUWtfo"~J\2z0s\|qsxӟurʇz(sM6uwwk$ er"GeQ٤\('Uce>@)Kg}?"s0ξ~w`<{{^|ѣO.|SO=K/uuuk$ er,*4#vfLet/lkjΝ-?Jy>ω,˰@Ǯ?ϫO0v79ˇgyzU ---[vy4?g͛^x;3-_ n`4_oq/1'N_,W^]@S*802}F+J^F0d|"H >#wfLet/2}>#y"H >#wfLet/pMMX\1:O$g2rgIXYPJ"HF<3rQYT6)*6ӏ(IXYPJRbL\&7q_ > IHyJg2"YT)IXYPJR~> I(>*&LLTet/ӯCr>O飲lR)IXYPJ"ӗHF ߹y;>w־#̿⊳zz/gOjl<.}r覛߻ufNpFXfܸ+̉΋I,9|[۴?ao-/nmr]]u‹h~X>QYd@jyFg䢲l"ؙy2}3Vҽ'_W\q֮]߿EFM7f6|Wfٸ0Eaٳ'/swןE_G|Xܹϟ|X8LE(|̘txӳ bv_pO fw)…Fh|fB\ *L;3TOz2}RDxSyAvty]'1̯?)~;>]Ǎ}&ڙh~ثxKiK~M=)xv#gٻ(:hEc)v}޽w$L;3TOz2}Ro33zq-{jkߑLU 6z͛psufłYp7b!%Η+J^JNEuIzu:/{=ѽsvfopO]_p7JN?{~u2}3r3Oeg,({)v?9/L}rt/o~IzXןwa{Sz_&F>` Τ/xs؟3Yl7履٥Gj^WWs2} <#3rQYT6*6ӏ(IXYPJR0^𢺺.D|2n3<ꄖӖ-|$}߾s\P]}'weKǍk&3s;Ν-_0 a (;n!I؟oaC?Jy>ف%K2,pL֗r>OL_F7V>eQ>+J^}`bW^mܸCF92} "GeQ٤S铪Le /ڻݻ}Lpޚ>*&LLTet/#8/[vyc)={} T$ C>O飲lRxF.ek铪L%qIH},*4#vfLet/鼡pm<#( *L;3TOz2}R2ɩe2}Bȝ*'=ce>@)K$aesV~M]69s.Y >@R3Oeg,({[}Oߵo/|თ]6NPI)2}\F+J^b"ufNpFMMոq +V\YprZu^͛f۷&U)o3 G mZc)asgO|تsm]ڿΰڰ,wئ4Y kJ:O\TMc&ʭ"2}R5Vҽdc>ܧËO)M7kjk+3‹asRKF_YhVܥr~MM lҘfYn?n> I eD~lS铪L%&ni9mO >=򦦺f^uxxJ)+a ZUv_pƎ=5{d6}4-PI5˿3<=_H*@)KLynPs9nC`={~t~?b_)6ئ_lhZ`ܙ_yܸo e$Sd,*2}ʢ"2}R5Vҽ_9pD;,}QQ^_pUUU'ߕywfCNএ 6i<--?ס=tWH I(EeQ٤ 3Oeg,({ɩ{ Խw[80͛_/ÜbweG;wRbڼyꄖӖ/|L?|16KDP}ٽb>(ؤ9GPI)T>#wfLet/CTLSdI!g?铞LE.`$SdI!g?铞LE.`$SdIܙy2}3VҽHeeQ>+J^$2}IHQYT6)dEEdj,({$ #y"GeQ٤S铪LTL$yLEe3r)_LTe }HIy䑇~xu{|uzIByxF.*&(MȐ>uvvZa*H(h(k(nWW?r29OL!2}8"-[ndڵVzJ LSdI!gL`BiӦГ"R#(>@R!2}8"Їlٲy*B(e(h(k(n__?r29O$g2BdpD0x}}}]]]T}WR#245IBxF.*&Ef1V>+0vyJg=7V>eQ>+~VFŅO?N2墋.T#荏p"GeQY#Ӈ>@飲L_CGtAy7~' §as~bS7>̙#GeQYY 2}(sVz8i/\{zrPyJ8[LLd˖-?ڵkà} _<pnPyJ8[v G/Ӈ?yW6mkk>(o$p @L_[l|>JN…<P¹I8C )lYLL$ {{{X}w_#< P¹I8C )ld#ʳd6+zcTVeQ S~|DUd`Z* +EeQAS z7k{=* +EeQ!Svc1|_/BB0v,*{4dEEލ9iu>9 Iik+cW1*ʢG3r)_cDUOk\>,ЇDo~;ٸq0v,*{FiFƀ_fZ]wId2Ut5kl޼Y7pdRWOc䩗>Yg] o~o,]G]~m4ѓ3BJΧ_ُxQ䏉!,}0g͚5Gy;wj"'g A{,s= mܸqۅ97o޶mΝ;4\F`3Χ_ߣ2,ѩA @oʢ*f'Ȕ[Eb2{_YfojI?ʦLȀ>7FeQYEOYTD^'zcTEe$ӧ,*"}/1*ʢ\(}/1*ʢ 2}$gL"8J2}FL(yFn?rK qcƌijjY&Mt)1}D,*{4.]L?7nܸQ?q~3!2?֏7zFQYTO?]L=U&׿.i^'zcTU4KeI%@ EeU6U2^ٹe˖W^y/{-SgnJy6L_ @oʢ**L{{?iӦW^y7{QJS2P8U~KdwުU֮]oٲ;{~L?U1z3tHL?~~xժU흝]]]2LP|}555cǎ7i[[[cccUUUf:,^g ˇ%O;sGa\pA[˖-V믿~'+Exf oN ;=ÒaoÒvKYo%' [waΗ; ۿۼysxtG7_y啿ox/| %l4{ :g?Jx|fϞ=_׳ˑnuF+Ye?ܫFb|+bwlsXvmxzɓ'g֟!R:O3r3y~m۶}{?;vlv;sI{Slͧzjukkkx_['? /? &afxݻc2v /JYQ%)1ӏ9*Xzf1>ӏo ?Xd %?~@_ @oʢ*D39%>5{}{G2IK]K7n /MLbѬhӆg16VUĘOc}/"Y7FeQYM~ojjι1^ٳ'K *_zux-O<}ytwc?mll\lYz1g]w}0a„+g)vpކ%}(eCMf4Kkkkh~̡l_,"Xb-p2_~yĉ7FeQYM!Ma>$/B=Oƨ,*i>TgI''%~}ٙ{׿ d 3~r7$Gt7x4'XCkmmu'ؤI2Y+ ̙3gϞ`2}8PS'VO ^'zcTUٔHu 'x2}@ @oʢ*2O{QYTVeSB}/1*ʪlJ_<~eLs=3g}…===G޺iƑOTx㍡?L?>_C(3>|wqǴiӊm+N蔲&O|dG8|p̶sRrrɒ%555s9p@f~xfڢw9cƌڰӧO\2\W2z谆Yf۷/~ Cx1o޼e˚7Ғ!%i+V'O޺uk|S %UtŚ" 09;Z|yCOYgi%e>̎9++{Rlm'N {د)z$ή]0bd/Lۯӿ{&O3?ǴiΝY,L{Kxqwgï1cX2s)o|\s͞={bz w>,Z($FϞ=XB%%if nɒ%Ї⛢/ _1cF,^ecLS\g|,Nׯonn.֪7x]±/]1G>勵Igv4%726/'6f̘мݻ #U#cǎͬ-[4#=9miiyggկN8y…aN 1ah?;vhjjʼ;vlb \ohh߷^D /Sr2WCk6sk)Wk:*TfժU>p9o(/&9[ٰ mX\Blٲ$쬳ΊϷrʰŜcW1*ʢ#)ՙ~|d%tv'?3̏cǎGw.!mvvv^r%ݒ%l9wn߷b0e$%&*C8R+cW!IDATU=A]e[gv%4K/m֭3glhh-6lۡebV__owcW1*ʢ#I=;vlt?7]~2o߾!_:t[*Y2@/Eu ^~3rC!Jʀo)bEa N?{RuU(;כm)!.&? K kI6lPڵk[ZZlR㞑 QYTI2O$/[oڵk3={-Zt%Ĭ0~sSLYtÇw=k֬{K3hѣGGͯ۵kWŋ_uU8׿ut?b;}}}}(xCMZ*| kR8cƌgΜs?*)gPUWOn~6bfGS|3bm,Ek EܓgÆ ˛_ LW\i,QYT12O$/h{Nz͜9i޼y9W[!ߺuɓ [ZZx0m555zkf~xVWWguVf=˗/a ٛ^xqkkkXrҤI/p ԼeCo^V̅5ŀ_ ?wٗ9,) OUW=IK3;VkZ̞\[(yvlĉѽw3(V+{` 7FeQYTO_p߇ŋFoZ_;cp * ʹOH7oI@ʢHu_œ闹ǢE755Ԍ@p *C(M(w]EOG6'}7_<~m}a˗/oll3gN DZ&zs~6\oh2OD.{?'|3u>0 ~'HLt'3lg&xM&d2*{z]3/^{v2 I@eQY+O}/@XSCd%?_םuݺ]x`RFhEeU6d/L$Q&WhS ~|×w҇FhEeU6d/L$o|:s ;4jooҞF,*!O|d Yz::Cg͹0hEeQyFn' 7նߺz fQYTTgQ<>@E24_<>@E24_<>@E24_<>@E24yFn'hEeQ|OR<>Haih`4ʢ*N2O{Ra͢ʦL?œ^v14{fΜyaÆ0pV0dv<ͪ,*w2O{RaVШ̘1;vȼ:u'۷ooll SI?ÕI˃Ѭʢq/LD%fh4?N6EoqWmvuUWWO:uڵrRG}4Ff6?1cʕ+&J܇2%hUYT:ӯ*dZxIr͝;wƌ۷ooiiYbEr-[̚5XQN{7>S>=s!o})S[_<>@E:L?:̙|Rvnhh_lYfΝ;g̘Q[[2}'M~= D[V>]bEb?y[f8p`޼yuo /ۂMy']j u;\dIccc8Ѐc$4ZSSSUUUpboQ[[[|,_WF0k֬}XxLPL?œTaN{fΙ3'3̘1ׯ/v޽`8?-ڰ̬Y>,Y-Zt5 o n1;?b7) E[$fOfϞ\#7^dϏi{6,v뭷FU/ӰHv _<>@E:---Z4?";9m1cǎPÇ3v-[}}dV.5455E^466\2 [ ;0ٳgi/P7Ә577G_߿CCC2 3r_<>HG]>O>3gx~4K.ͬ*3֭3g k>s;:: 5.Č톎faOu8!l鍙-Z,Lrmy0EeQYT6ՙ~iRR')0K~OOرc3׶ˑ̙?F}CyV?ذaCt)w3r!b-Md_<]$^UUu}d?sz=BN E'L{my0EeQYTV}/@ ;ҟ{-|[:R3k֬h= f̘rʂs?}o)xS=YI+/֘9x⫮jΝeiy0EeQYT ~')0K_xᅩS6|۶m3f̨2eڵk mѢEuuuuguVVKKˣþwy'Nsd?·q*b,XPgoݺuə<9{!If=:[o[lOrVb/ᣰ@|߇ŋ%'M]_a<YTEe 'x2}@ 2F/.}hkkWJy12.n}-F,*[Io?}=*;(}/@ OA Teu0EeQFO7lRWFdШ ̦dm:}ږE&d/L"䉺K׼cj宮:F*Xd_]a#O|did2L&T/ʼx\X2}>04hVeQYFH~s\?8UmL?Ŗ^vFF,*[12>z͓~3޵'qKd/L440EeQ3g_?رq{{{gggWWWZL}/@ ;LC#YT=LOќ c_<>Haih`4ʢI7նߺz3r_<>H),*slL2'p2L'p2L'p2L'p2gudy F,*kl,ӏJBR'[0EeQYcc~D_œ^-͢L?"/}/fQYTXudy F,*kl,ӏxFnYO{`4ʢ2(4\<>06Gde]<>06Gde]<>06Gde]<>$gݿߪT}/@E240EeQ̿ #ގߘ K_RٍL?O!)œ^vFF,*[}obƫWa 7NL̋')0 fQYTeg2sfT|D_œ^vFF,*[X?Lo>g^ih~')0 fQYT6 rn“73ϕz&%ccM|d FF,* x:ӯ*Dߜ@cMi_<>@@f}_e2$OP-FXF~ή4L?œTydn3-[}/@͢,o~M_cUS?W5+nooM6qooo!ՙ~iRR')0 fQYT6U>y'[֫~lٲ%2O{Ra͢lzD3|p'sf/{{{<֐'x2}@ hEeS" {$둹៙)!O|d ,*ʦA~]t}/@ OA fQYT s*X?ՙ~eOP Hb~2OP HT2OP H(Je7L?œT$C#R|eW[/LT$C#YTEe:ӏ?MJJd ,*ʪl:_<>Haih`4ʢ1n*۩(+O0>Hmih`4ʢ1~ݦ2Bi:g7M}e &0 fQYT6ew?wQ`2}@ SPC#YT+J6|{_OWWWWooUTgI'31C#6s߮_å`c}~*L$ei3>;e &31C#64jԨmOmT/O0>@e0d.4OVMx{Fn}/@e͢Fr햸[`$3!OʟL440EeQ٘'(S_ڽe &0 fQYT6LËe.oi9K7Xqg^x;nkjO˲׳t'ꪪNs_w|ϭ}GXWӳ L$gŃNFs7WrVon\WW"0/b ӗѣO_#3sݺ&fڳ`?uvvL{Ra͢ĺy{]Wdߝ/Z4kٻ_nz> Q}c6n%E+gO,x{o]>6|WJ~‹0fE_)}a~psN?Jf1=gA4sPW񷷷L{Ra͢ĺr{(+mu9qySSݎ˼/C]>_pa=' GgoJwyEc)رꟾ}W\-떖Ӗ.kyV\9` &=540EeQ٘`o@fڸ}= 'sCҿ>!K bvan<74ԜsΟ[7sC=jժ6m˯o3TCI{}+z77? , ?\g_t\U8̣M/xs2?yooo~}e &31C#6LѢ_޽w G>97?zKmW_}vt)}+9O_ս^O/_^~c‹̽z^|CC=ϦM+O0>@e0g-3G>.˿~fZng^UuBKi˖]}d_T~Kb+8<|w)aNwas'E+eU{q H/Ǯ /Ə ڽwzG}ꩧ^z饮LPgbF m$9g8m5M)n1I}g&LT,*m$y, /ڻݻP m9~%&?>Hmih`4ʢ1~,e.ol<gOܷo7N$ӗ'L440EeQ٘OUӗ'L440EeQ٘ˑL440EeQ٘Oz=jԨaYF_i<#׉+04hUIe#>@e0Ȥ7=,0 ӯ|2}>34`h#ILr$31C#6,^?ӪOwn<73߾6:LExY8RpU˖]Yy饛W3OϬ?ovן[[0+YPl͋Mmh?iO퐏(L͜0ጚqx2}L H`~Mڦ}_𢫯>wbv Gy;wVuxJݙ?ƍ,Zh _+ ;a=#3>^,?3r^C#fQYd־#ssJ/S6o{ j9֭)>d ,*x xSSݫ:R67? , ?eŮr_숢o>Hmih`4ʢ1^zhk=w޽w\}wh2ѣO_9s?L;ٳ'f/mÔU?>Hmih`4ʢ1^zoTi޼gf% 8ufyUU'l5/\xQ]]u}IK~3bGc׌'NpFtÔ#u { 0EeUFs?L*L$2}ʑL HR>.ӧ*L$2}ʑL HR>.ӯL`4ʪl$24&?>Hmih`4ʢ1.U#>Hmih`4ʢ1nJ$ӯ_L440EeQb4>#?J+L2}@ hEe޾jժ+E_{…uuu3rL{R{ jh`4ʢڲeK1]vժUT?RBBBB˚L?dƊ6m 5$D(V(Y(\(_ooo~Ye &`F0\ܲeKgg$D(V(Y(\(____~Ye &`FWHPPPPU`2} fhO{*,*ʢRǟ&%x2}@ hEeU6d/L440EeQYM'~')0 fQYTVeI}/@ ;LC#YTUtO{Rxbh`4ʢ*N+x2}dh'x2}=3s 6477F؇{キ~%ˤ;~'H?43f̎;2oNTpe9eǝL?œT!FiӦȟ7޽;ʶmۮS]XQN裏݈ԌX3fXrq܁DS<@O{*dZxIr͝;wƌ۷ooiiYbEr-[̚5XQN{7>S>=s!o})S[fQYEe+@3ӤO{RagTWW߿t]9s/_^.} ˖-ٹs3jkkVO]1qİ~G/h7ZZZzzz}jç+V_ 'Oư7-Ex[)2o$صkWXm.}K,ill 0b̞Fkjj*n7z[lm>jkk/ؘ+_ѣf͚o߾2iy0EeQYT ~')0KN{fΙ3'3̘1ׯ/v޽`8?-ڰ̬Y>,Y-Zt5 o n1;?b7) E[$fOfϞ\#7^dϏi{6,v뭷FU/ӰHv fQYEe+L?œ^vO?hiiyע9 m;vlR>\__ylٲ뻏 &/ڰLtx)ڷ𢱱:5lb؁dϞ=lLd|Me/9i\,*ʢ"O|d f)ڵ듟3<Ϩ~FnLc;;;/̚2n:s̰s=Z[[sn2jK nHldPw)qIޘ٢b4,>ؖYTEe=#7œ^x?cfm/#ϙ3'?~777?쳇:֭~7aÆRgC(V[, Ⱦx~әa3g/^|UWܹ3v?L{1-@A2OP+Nz$m۶͘1jʔ)k׮-EGwttuY[---˗/vyUWWO81_SS۷ojeX`A[ϟߟuɓ'gDI؇'ѷ36z谞[o5n=YITlm:oC3/^4iRtm~~["_s>MGdP<>@EJhpߕmmmJ)79FF~eޭH0y7_A_<>@E24*ggoA=Wm޽ڄtO{*Q9UMYT!ص'kN8 ֞55/_~)ՙ~iRR')0 fQtb? j.ߣ*}H2O{RafcM&d2LAlA2}>߻F:gTkO8US3't's#( 7[d/L440EeS菁SWW}xI?gxп>҆~3r_<>Hቊ,*6}k/K{&W3(L"Dz::09#9AB2OP 2V[WW@d _<>3RHdِ\(5Q_ O#eXh d՛S,EMx Yt0Ԭ+;>+!*( (o8D x2}HpEp܂/L{|#,*#(Qҙ~7yOh%#v{p x2}@(o͢,e!>mݺþ-\2/L{Ja:4p4ʒCsss8Xzb7Ql:;;"%/C#G,9#?0?lݺ5;utttuuE޳5r x2}@(/*͢~м? S::::;;:dyx2}I_œ%F@O2/L(94z|dEɡГk|d P8EeQYTsM*C#G,*I_œz/@ 6LFfQYTVeKL'^l͢ʖ&~O%08EeQY-M[œz/@ ~Qqhhl7kcl*L8'(JC 8պ3ֱ %/ C#@o8J_d_<>@~wh '"}C'rO;;;:V(d_<>@~wh '(Y'R_@\#'^b,*+7L? o|p״O +wsM*C#GHeerMKP_X_œz/@ 6LFf)G_VV&IOl߼`a2}~aO%08D*LL?~b}L'^lR"=qOnߏU^O_|qܹ@>~}{@Qrh|%/++kl?7x7ꆔ* !C*FvҤ>]QqM7ڵkN䆆++LXNsH}6꣏?*L M95l]w Sc _xab>=|<ٶʶϫSܹy®#wޟYgUaK2.<2-*O2~OP_2o}뼏>^HC{(Nuνύg 7c {tw?z`CxlU?ꦛF.gŏ5K ɽ +Z=3L4nr|׊yG\r3.<2e?)_rX?d_<>@~whdDgxO߽x0=V[{ZEuw?GÇϟ-ݎnwψ'};vFFoXW+rH[}a `thhlpΧvao?7>Q{MͩŷȮ]sQѹsRs/}ٛo z9yxyhvO~rK@96=lkҞBg Ļo}&?N⤴3mj،g1wڞL_RݢE_]wHze˖=֭{۽gE_œz/@ 6LFf)f,3gOq;{ea0̚xW^rgR5wCT 8 ~-<<ğ}pkݝuVuX׾ĞzIw =g?dk֬{Ϟt2/L{Ja:4p4KT60ikQ_L?d_<>`thhle?]9L_7-Ff)f++ 1,]_?ӟe@t_œ%F'Y2}3'/ C#@oF_œxFL 'L'(/AO-X;4p4ʆ}aMsM*C#GZe~_bאիIx_X_œz/@ 6LFf)ʶ677Zj9y[o^r=(2/L{Ja:4p4KUv֭VZA~;,K.:::gE_œz/@ 6LFf)ʆ-[O1߰'‹- {6Q\#'^ 8*>nB~_ /b /Mx2}H'Xx[xɅޡCE_œ%F@O2/L(94z|dEɡГk|dCv76p4ʢ*[J:5P':U[ 6jjfQYTVeLWʙ}.zaS]axot4{JG,*@_+L_ӌxu|{]kN|O P: ShEeU4 xd+s'n~u52L\S~@^Rk2}G,*5r x%Gφ^ `0K$EeUV_~q~lȄ՟/>švlx(>kZjՊc)ׯ[n谣t xy>/?.0ԯ"ܼXaJ8(ڲe3i@|J3ӏZnuo_(>- Sn:::"(2/^)g/w5_ 3c)]]]tFnO;Թi z/ nʢ*[J:ϝ Jd ʖ~3ϜP,,“م{/_ve_~v ,*?gyL3}JL -^?KB/ѣG_X,Sم{<u!Pt;@ЍQYT3fFnakmmmnn^jBΝ裏Λ7o9PV~kƨ,*ܪU[[[K"/&[nmiiY~}⊂kŋ7md_ ʒQfdV>'1*ʢ铈A?ЍQYTLDTDz/ nʢld$"2}{OtcTEe{7 CCdž7|]ЍUEe Z]PV?|1`0:@A6`0ڮ>>%/l2}`?Ϛv/wvv:tG P@dMo0 }9}?.^& k6`05rM_D8v@7VYT-,E._  C?20b}+ʢlӔӝ衬)'zw}YޙCLhbƿ]ЍUEe Ln2eȬK3pDaCdcWtcEeQ!/)+XH3%EǮ*ʢ5r %.e<3CkkXp ,*pevAA韬s46^_[{ZeecoÆU}G)a0%QnHyRd\\_FE)\7ߜM߱Iίt~Mv횓{ 62lɂe+kjN uטW^rE :zoܼ϶ٖm؍,*ZHS і3gF_YJs좸X|}]:dHEH2$!onn '/l2[}FO \?*?GsEqww5fml_1cwăJO{0Ov|"Os;7s(CB2+W6V4ͪFygď:3w?Qw?w_vww?ZQqJyC?{OOΞOdĈ ]]߯Y|0;sd܀lϱ{|?ޒ+ ?[r%,6x֞e '1_l٪U^}-[wuuF_d'+O!Nf/0ΘqqG>o|_UU&N/;V7ߜy督+;O^yeJ_ϧsoUݘqQ~g pOf|+V]7MggO i\#;0W]5"=O>LĈ/t a)!<#uOݘϢN?l{>,6uϧN_s,[,:ϖ-[:::%]tcTEeh3eK~2x>}C|>hgCT@隚S}&p޼ zNjwć1vwXi-LlmngQ.4hyl4۞϶+D{>|2$gW^l?nݺ_Q@7FeQYT6id4%q'%_ບS++gw#>{skw/\0gT7[+S>OןtC~ذyM qy&uVuX׾hb=]IQu܍,sg̸8ENmgag_ ìY__!2gk֬I=N[[+nʢl iJ 2wYL_8vЍQYTM2~?MI0a{xϞROcǮ1*ʢ n=ӧݿ"ӗ]tcTEe.(h2}`C_d JL A~a L([V7 qdcWNPYTMW/B]vŊ˗/_LWwuPod>@7FeQYT6dEyժU˓jq .Yd9Pr Bh(p ,*42"u֖ׯZjE"=}=}G_ `~'_hvjGG+nʢlD[[ۖ-[ZZZ&cܹ_  -44NCSt8vЍQYTM-mmm[nmmmmIΝhѢ`-ihvuu9Jƨ,*&M]P:;;;::iqŗrC=+Vh8IB 4T:H>$ >䓋/޴i}LA" >D0@ C){OEeQh3 2IL^EeQ^IDEdƨ,*JO"*"@@7FeQYTW2Gzvml=]EeQ^Fn+ 9֝ owLJƨ,*.H%'#֗ L2|2WڡrO;;;:Ͼ}L 71dyfa5Oe$?L?L? rM_$X_0@{ ~20>XozeP \@ЍQYT=6ϝ xTL-Cb}>^OEeQ<#p,6.j2} ,*;@2GOĖg;=ѿZ?es@7FeQYTvd鏒Z[[32} ,*;@}}6꣏֞M9ƄtC}3^g}fy%,m<2}ذ KݒŋQW7SOt~UէrniԮ]sybiݿ}]:dHEH7G7g#onn ,*{B99i{QOO;߭?#ܱ02bߟM~uueEA|wi7+*NI[c]ݐhяSd3a9C~&wnO2e˖ZW_ݲeK{{{WWWosy>@n2c/N #Ίn=7կǪO:χ~?˗Xbڵ---o:;;}{r#La$O?bD_=w?+[(VKw<^I ,6/ixre˖EgٲeKGGGosy>@n{<30\|q| gS͛pD;|7>@2+&M:߾p督55#v͹ { عˏNG sSϧ?L˖-{׭[믷6C^OEeQ<m;A<3gNs޼ gU]^/}ᅉ;cԜ`uN+S>̰3n]ka#3.v]a5KwOEeQMs^o{s@@7FeU-A2G%-%ojOO>v2}. nʢ*[\#7\_@ЍQYTS]*~_/Tۏ  L2}>@b!ӗ$L2}>@bF12f2墋>WYY~YK~3ctzʡC?x7rO95,p4GO&֞`uK2} z/'nʢl6ϝ xTϜzذ_ad׮9g_;Qw?x# WW~a 3gfַ!K2}Pz/'nʢldSןx7>`vS7?wG>Օx}_?ЍQYEeL?Q=s7ߜy督+;O^yeJL?cmbO>/|ƨʢEFk]W7$//Tw}wv鹖2}. nʪ,*[d\#73>}lȿ55FE]skLĞO8oބ[o=gǎ;|7%=o?7^r_{?tcTEeTfʘѣ?[QqE}.>Λoμϗ%KnHMν|ȐC?x7rO7oYgU\|qmXK4GfVWSsץ.LϽcD2ce%?L_X{ L ,*s'9%{?tcTEe$O\^?tcTEeI(L ƨ,*&L?Q2} ,*{Fnf---k׮]bG2}(Ycƨ,*ʞPevAFͫVZN]v`؍. L?[_~ժU+裰® ;0ƎhHY[[ۖ-[ZZZGa]v`؍. L?[Ga]v`؍]]]. kfFv]؁a7:t(ڥ2}(cƨ,*ʞPENIZEdƨ,*JO"*"@@7FeQYTW2}Q>'1*ʢ铈A?ЍQYTk䒈wL^EeQ^ >$2}3Hd$g$?/+v5r$~_){OtcTVeQ"S_UDgE {OtcTVeQ&'@@7VYT-\2}Q~YY… kkk+++g̘qhz5k֐H<}ƍcǎ 39瞋fH9 m<555aӦM s/sɒ%^x3lҨQK/N[uwwwxaa]TWWW^^nܹsUUUa97|={2>x$۾ 3,]4ڼ=۶mBE?tcTEeLO"*;ӿ; #=X4aĉp3>lذh!{3gN F=Pccc|o;wnyꩧz{{7^EeN:uG Ϯ6WUgÓ= /5,$lCxȌ32>f}f6mZx.aQ .?޻@ЍQYTM4%ܙ|ر.#f<6ojjڵkWBz3{aQFڹsg1bD=ν8?rH͊U'哣?Oݤ{f\EXСC3>fWa7z/ ,*Lξ Hzg6}۶mSL>7nܘqo[֯_Fߙ3gN2s9{zkk5\SUU y><>t:~H8DN?uz͛%\}a$}?bĈyN?|8@}%"c |J=~t0}ĉ9g̘gf555!C޽7Cd|ܸqK.)Ͽۢދe{oOѓ2eJ><w{Ӟfi{+>d ~cccMMMeeM?x9s*={v<}ڵ\pAEEرcs,Z(̖;/]4ƨ,*Dd$"2}{OtcTEe{$= vʢl $N_>D0@2}L`d >F.D@?tcTEeOU>'1*ʢ铈A?ЍQYTLDTLPjz/ nʢ*[d$"927~Ra]@?ЍQYTVeKk䒈wcLP᷄ :'1*ʪl+ gojs~檟U^c'qB 5QYz}k?Ghiל@>P d ^3;(|2}I>;fug^!}g'ˠ|_=Ojn +LT "~4PwwD?C{OtcTU''~ڿ40îv#'1*ʪl)铈#:5=I{OtcTUْ%'5^EeU%F>'1*ʢ* 2}3Hd$gH  k2HdP ^@7FeQYT*L?wL*"@@7FeQYTW2={YeL م{? 6, ָq&P,ƨlr,^X~0uYe0^{fA!^#Gvg@7Fe3ϔ飲2BRR2{c(QO%ulOM &w=%.Hz$D%\ںuֶ.{ L?NnRfq_ZdPRL /lnnniiٲeK[[[gg=NP2}ZLJP_pVZ~}KK֭[;::(t2GEK,9s*++KGQQQ illl-//O1qx3F6c7ng {.$/2<>/~1[WWO>pWɽ33>l8g??~r5׼ikϚ5KeG^|U[[[(tP'NvttL4)܌C??7Gy$Iq{wϞ=yf3g K[~}9rd>[goְG?Q{O~:ka$Nugf|a9s,.ܰaCߴiSxI;E1*w/_bŊk׶,*[6_]6ߙon~_n><|뿆0%~XE|<-L[H920ٳ'5Fo[ɱzݙ~ze3O;_}޽ 9N{{رc_) ƨl(L{{dߙ~ڔl|vq+f~ܛ*>Q755=msLK/s@ЍQ٤飲JPw9[_giy_38 ;lOA[oƧߙs,?ػwG' g?{3wy' /">>wOM>*[d LҤI~ۿy__F>#<ƿ<|N;0wͶ(_~޽{OO~-g\~t'}oXWt>ݻw%7tS;::csO?ٿL?N_GkNP~yE~ggf]@7FeE+M~gK,9<#D?>|xg|O?YƍG2rȰ>eٶ0uˏ½a9,~Bs;_Fg馛2y^[ӟK_RXԩ>\rs@X*X^s@N5r*!QFq?tcThF.*[6_]6ܸqQ~d 2e̙38v=g{_1c0}-uloo7) ƨl(L{{dvHw?ZU\qcaÆر#}0QB?ЍQ飲J+?/{WVV矟:ujɓ'\RB?ЍQ飲J+?/A{]ԩS׭[:%썴^͟L-V+=6oKWTT\z۶m/\rƌ G666f?lذ)!aJ ̆7n;vlXȑ#{!awڕݻw:z/ nk䢲Ū.HrەgxfMw#G,\p;0c|`9wl6lئMȞ={̙M^MgPdn2 /~#G)yD;v쨫 sܹ3#92{/7FqaΦ_KJ+͞3=53[֯_Fߙ3gXԶmۦLR]]}oܸ18rȴ?tdPdn2ˏ9߿?h>-0wa 3͛7_}a$}{nCƇ3u!WF.o2}(V+.[x#G3mڴ|2Ag[`sL;ƍto16g̘cǎOf555UVmK]ZXʕ+u(^EeUfBi2mv饗?3d555{owww x3gVTTdž%d<}KUl޼96Ӽ Ǝ{õ۷onp3T{OtcT>-Y2D]qxGyB턑F @@7FeM+~ۮL8744߿?8qb/. B+~ۮL*++%KVUU͘1zG. I#Gek&@@7Fe5rQbUf$T+(hECɒ'~7 dPdn2n*@Xve TCrD]~7( ƨlp\TXm_豬Lߋ^ggKL?mWz/ nL-V2D]'1*2}TXveܷo_ x@?ЍQٞdlrD]~Oo wml8@?ЍQٌ\#-VevAۮL?vs߻^xa0l'kS3}(Y2D]'Gyg]RMkNbMŕ;_z$n*@XvK9ӏ_zmD'Ӈb%Ot-L(?YW n['2}(^[_k+?tv5@@7&^# Ac_6+|J>'1)L{-t2D]~,~m@@7 677~飲J+K7wml@@7O?(t2}TXveִ@@7&RgQBn2}@?ЍD5rQbUf$D)LJL?mW'"LL?mW'"LL?mW'"Lk&@@7ODEeUfw]>RgQB'Otەz/ nq,L{-t2D]>RgQB'Otەz/ nq,L{-t+^ݘ>AD\#-VevAۮLODdve}"Xve\inO?=eʔd> h޼y ""Ӈ= ve aÆر#O*K]od}P,HCrs\ Pj3~Sٳ'~Ȼ{w 2+X~!md0pm@l?|(}bT]oƥN}8c2x7.mzWW̙3'Oܾ}{}}ҥKoݺuڴikd'fmԩ֭KJL?|-/8E'E{%K+^ƅ=8p21cƒ%KYc=V]]=tЦhΝ;'O\UUr7ǿ55L[… kkk6=x`4; ],E&,aڴiܳBaÆuuu3)QaJz>mO=qƱcdžǎ9wڕ'sk`֬YC #ѫooի0sύvҥKN/K/t۶myqf|MfܪhO3V{YD1\飲G+^)/ҴWWWf<7|޽{{h^zȑ0qƌi:^wuTFRUzjĉ9sx֕q!=Pccc0>w ȸ3==a6mF?6?'qk’fѿڵ*'K.V;mڴ1ᵱpř5qOF>ߡ8Eq\%>*[|dn2}@?pp>G#iIy>|xkF92tдu%92Ym7)uzueܪȈ#8p:ϸ yRd=jԨ;wn@Ɲ٧鹇RIoI|RO{> Յ0aÆQ- [SN ӣM8^y83&3nU4޽{--DYDNjL-V+^Lw}7Heee5r~k\^^6Sg >}8?6.w +zxT~]vŋ96mڴGWjy>hĉmՐ!Cvnv[t{/>~F֕c!ƍ[t7ޘa(R߼ysU³H“ʸ}y.\x>3a<5jԢE2xɞ[g+W@q%>a]+Cvo??\qU;y䪪.,:Jϥ544 2dС7n8jԨ%K 0߶mۥ^-gI]HcccMMMee]4#fEEE+ϟm-Z929nܸm]9tҰ͛73==p .[8vl6iOõ۷o?O*i~ۮLTo c]J/,e/񚌗 h "2}(b}b{^9gr8j \*_Wdlt"^i ,Nj"l$f.+^oNڛ֜rŚ+wz:+39=QkG 3}Yݸdve P3a3q~檒 r"J)Ee L?mW0N"9 `0 "Odl'fP߼8itlxp4?ge޳lAsD]>{ڿl5Wb}GY,*[$]>@QШ=l?6؇Y>Pdn2}C߿//fصn9[ddPdn2}4CCښ$٢!Ӈb%Otە%FsDCrD]>%Ff9{F.*[6ݾ C#Ggu%Otەz/@ 6LFf9;p2}TXve P ӡY|L-V2D]>`thh'Gek&84p4فs\TXIn2}|ΞhECɒ':4 Gv~|s#Gyt #F~("6a6?J3:ԟ~`fUΓކ0r7[_… S#y'zꩌ˔郣YD飲J+^lSg˗㛭\sMUUU|lg"murضm۔)S?7f|`O-͌9rdsݻ70Օz{~9r .70>v:{0-[Ĺw,"|Qbn2}@(/*9ˣ3|r4)GaÆÇ7ۿ ٢"SN?͛7Їaxᚻꮢspnݺ/_˽}}}{1{ lRUhrveTW^},X z' ~4|Μ9Uoxxmmmwww<ٳg۷/<WR 9'Z8DٲeKXsղdYlذ!BFO_zn-v0aY2veTΝ;MVYYtfmmmK,'eUUUx+1yh:k׮s=a rkkI˜SNM=W=áEe8r˫OZlY<<-8wyauL_'yZ8cǎ544޽;^H`-ܒ^|YfE|WՉaѽ{lٳg2%D2}H*~Yw2}D]!]XI>hmm-+ބehkkwvCR˺ە$LF(%Ӈrܲve F|רd _.CY؛ΎR&Ub3hve` vvwvfcL]>fx>9#H>*T2ve` vvHg; 2}T6de}/0efIl)Q٤rܲve` ͒Rr\T6*4A9w2}D-g0f˺ە$Ll)!deIcwdT2ve$;[J2}H*-nW{iFG~-؛%񿳉lR%6"2}@ 0f;Frυ::u:@Fw6afcL]>]ю 懷㕵OtlRQ٤u+cL5:X'Ɏ󿰱n_o32"n>*T2ve` v5gOΝ>>^b:s@̘fQ=r˺ە^1x]`>7"#M MPݮL 9O|;w\OzL_ݮL 5O4}c_g|\#;vCR˺ە$Pw<޽q's_}im#;T2}H*~Yw2}D*xhS?W::5#l!#]>Ha=x5}]K؛eg=rQ٤Jl-ݮLӮY_3}۬x̒u+côk`o'GeJ_ݮLӮYLM*~Yw2}@ 0;LFfT_!Q٤rܲve` 57K>::Ï@&U&(nWHv`Dzfx<Զ~gca̒u+H$F0Rmz?YNYęuO~g#2}H*~Yw2}Dth͝{V0(֏N祝?fgeT2vesرcc!\CGL_{N͛7[( ;E۶m EDfTxr'7o]d߿o˿&Ub3 hve :;;.w#aȎ;N{zz4Yr'j.Cw}ǿmz1K_ݮL$۞={Ga7Fe lRUhrve >Y2ve@I%/nW@dT2ve@Ie}/1*[Ee*~yt2}@ @o m6;f˺ە^ I]GڣӦ؛EeD&L]>E;vG_b[mYTdlR˺ە^ I]#-w{lQ٤rAtIDATveH΀#-[}={)GL?nZןͼ0ㄖ M?'^oXh0~SƏ4UdYTvLM-nW{$ d _}uiw[X=OxxCÙ_Z1MM\^[eo7C3"1a;Hx|{iU.LCx&N7iO7Im|2lU(Ynذ?mc;͢C #M MPݮLH!rX9x.%8Ӈ1K_ݮLHOzP_'6ĉz#GZo8pEGbťxJa>`o-R3}۬mv̒u+@muuhǚ5jh8m N]!7 dl˺ە^ I]C؛EeK.Qu+@9u7Ć^{v4 {2}>pI#/nW$ig w.=Owgy4𩧮<- 7o.790 /|ouuYgխ[1Ó~wVS;Nm߾8`Kˌq޾zUןvȸmm3 Չ|Kjk#<kjVўhːqF000LNU$Ӈu+3O_73Zڻuuy&Q~u綴ϻeyoz?w?~|˖M[_a^amm3vG?ax_M?v5iYxj<ڳúGxvŊKge<_r[hː-8aӗ d<[ݮLI,UUFgS_.Iy[́1m}?cMM zebt0ҴIM8.vPO7@ \x0yxO0<3o`l-vXeiuadOq C.͢%fmcL]>Eyg^ӏ_EF^}16|#w_X4v~WAoპ#O{pF_VifQrdl˺ە^ I]D _zɭ~$5SoÄ X3G“2W_5q5פޫ6-Gޫi D<3z]]Srܟ<,j_ Үez-a5| ۵kkJL? g>{2T!ן?hDӉ#͢*\飲#/nW{$u9TS]]yEkiv~Wnx\qŤ:UU]kז#s1\~9 /|oމ_[[5nSrgUWYy?qgZxj_M˖MVv`ժ+ٮc ۷/ +εk?5hɆ3fQS.QqܲveHPSݤ> C؛EeG$\r\T6y*4A9w2} I;rjLJ.%8Ӈ1K_ݮLH΀:zTWWj>&\C˺ەIS{LG_ݮLH΀ xL=r˺ە^ I;2}>`o-ql$6ϝ nW{$u#ЏH?p"$LfQdH|o͎Y2veHRQΙ 7 #GeJ_ݮLI"'齽+iUc~wVS;Nm߾8gyfޅ֭LefU6ydlR˺ە^ I]ĠůXq5|7^{vx#_~wWonkyk~/']]˗-<}>Y#dlRGnYw2}@ $ig` w[=ON٦w^}k-s>YS#M MPݮLHz8#>};3d2}SR3}de$ s~~Ɨ~Ƒ`LK2}H~Yw2} I;sڪ}liqgk>z_f…Sk<8_L.!yde$ mW2N{{W,[6= K?wS~>s}OU^;i󍦓J.dK2}H-nW{$ ȩ=dYT{䢲ɓL?wEȩ=dYTR3}۬mv̒u+@9L7ʖ8\飲#/nW{$urj>`o-q$GeG_ݮLI"2},*[pI&{u+@v2},*[p=rQ$ ȩ=dPp)>Y2ve@v<<%>$L]><?px8$aK2}H~Yw2} y:;;7o޼aÆ 0kgOIe}/<;v?6mڰaÜ_nF(|IW%|af6,*;p=rQIl;A-ݮL~m~7oIF(|IW%|a6,*;p)m6;f˺ە^ y~w'}ݗh—$|U&|mzzzl>YTvLM~Yw2}@ $OOO߿gϞ9iuՇ4BFK* 6GfQ١K2}T6yde}/1*[>*T[ݮL?p\T6*4A9wz׻.O*ͷ ^|ӧOK/`xw%;>@d ӧDV^?袋B{y{ZU; q>F¢Ed ӧ:;;7o޼aÆFAbŊoyʕ0r6lذy={@_t/;vwǎύZbŊy[FQٲٹcǎ_~y===?TVEzۃnwϞ=/Zf7tӷ~e`4o@ 7Feʞ={GJPY3 ~[Zz>@ @oʢ*KFKYl2}?,*; M@iIOIOIO%Wؖ5y͋'+WT\L['EcF#`#vO{^޽! w,*[N)fTSS3s_|/k8p ]Ν[[[[UU5cƌM6ek9E)s>Md iayC4baximjj sg? 4yrڴi"z1L9̫-uB766VVVfk?VauEgΝ9%ϙfm5f>EL}q-JaHWWWjvww^zcTEe˜LHiӦe͒ե~f:tP- :;;?.Z(>~X|Ȱ-eΝsRxϷeΜ9=׾X"<;|C#]dIs:::rLaH3xwX>F .^8 7ϑJ9B60w+9!Ç6Z[iҤIiu/G;L)z]]]&LN4.Z(>~ӟ8qO?}ر'/3p[lOSgU<߁n>$_>oeey33Ր2؈#S"#nNd_wƉ^x!||zH~}}}|xEEزeK'X[[ݝu[~=ޓR_= 3gN|6Ř:uu뮾xHkkٳW^Ҟ JjnnVv޼yblmiVCiF}?1)}[iÆ aԷB֯_(s2}JL?3N mw\SSSYY9}3o}S>R[[;nܸɓ'O555]6Ϋ2eJ|{:bd,Y ?r˫OZlY<=uuօnٲ%uI&U:ujڙڹhL?ŋ‡ k/Me_Q@^+7ٿh@$ӧԶn Co߾k׮}h"(qi8󲭳W!z_d$ |G'qkRAWG;~44fַV^jRs\H_j[{S`@oʢl)%6wgN,k7FeQYTvdQ X۲LEeQّ%Gbm2}FzcTEeGL-QYTY5,*;*4LJI@d$pc9ӯMwyyJj#D\bgMi˖-'NL[Sa+[rxI]1RP dBݛ6{ﭫsՆ#V%kaOmMiƌ?OFvnyvڴ13߽{wCCC8PEeQYTxI]1 `[ΑWQMM̙3_|ŴhxOOύ7TUU5nܸg?o#ly-d'זyÙH)0~ͤ:l0 3mڵviv,*[ģ~G"m˹3ɱcZ[[N6<ͬYVXuY7n'>q=C :L?ϖ>g?6Ǐ9ro9sPsUh8=8PEeQYTvĎ-G2O~φHWZP]]hѢ#GD00o߾暚f͊Ρn0 ^,]$_fMccceeY755EIv՝A[ uuEmbڴi;wD0fk)SiN4V_AWDм6AQTlK5]Z+eN-.d(aa**ʢ#GbmywN6-mx3g.^3[?ܹs{N Onhxx:< .,KgѢEw̙3СCa%K,_<g\0xxxpŒ剔? E8qZ u u kjժK.$wߌ&Ǐx[;q]Y2.gGGG>ǎ;qRB20Zt~:< '2#=mT9A|sgkؖ-[BϽ:y~|dsӦMMMM;v(p\k{g}zXǏ8p`i=is̉ǯV$^[[ϮuQ+ė϶9'Z8 6O,9>h`sssyj3t`.زe˖P-ےmura1sڵ'N|饗%/^'-n0<:#p#?1cƉ~6o޼ƥK[@siӦ 655{ゥ`[[[}}}uu%KIxYUU #wttL<9ڵksOB[[['MƜ:u829rd'-[,i>vXCCݻj$>h`XŋL=W:flw^Ԕ)Sk ,JoK*rf.b۶mK+hikqyd"a+{#tP$+8/Ɗ-Ckk466vqڽ{Il\CrWa)i9lklak%e=Y{]#,*ʎ|h3@@ e~/C=X]]]o^\QmКa?pÐx ml#BOo~v,*;bڣ@@Q2ΤP&֮]PSShѢ )YM^{A@EeU#C<}`,xstpʢ*KF2~kaȿOmd$p2}E槞?nݪ=T2} 'Hz'?vb2} 'H_j[{@G. N&pʢl)%6wc1N,8PEeQYTvdQ X۲L @ @EeQّ%Gbm2)'UTEeGL-ˤTQYTYI?,*;*4I?RP8(%>Iy˰rJP8>?e?~޽{O޽!,YsL:gyN*:ujKKKwwg?ٚf͚^=== ǏG[`A< oll|7Ôgs,|ssmD@EeUvJl0 J-ʭf7n8Ǐ⋷v[CCk ܵkWccu뎝w_y'Oȑ#0|ܹ-Zt}e[?0TQYEe> k[xL/ÓUV544TWW/Z(N00o߾暚Yf8pO?ij[,Xۛ"2'K.=)<׬YXYYquttL2%LҤI<@esղYMMM]]]٪U'vѻagώ>aÿ0v ? cwwwX8PEeQYd(PmY0gLܹ=''rK4<я?>j a„sǏyt'N?Y-,Cx]6@ӧǏ?p Ү{G̙____5nvwwdzkmm={}W^y%5phszzzex(,@t-[UOf.]:y6"1K@d%?w9mڴʦ{75okk^dI4<< /[axɓ'GYvm<{ SHM[[['MƜ:uj-,Ñ#G/_^}ҲeiY 3eʔ8isղdY;vaGDn喴/Κ5+Z+2N7 ӏ%Sw` ̞=;!,I܆A2} '(sH)֡EE.Z[[7alAc{@@de*ʎ}%ؖe%?Kՙ2dUTUYd(PmYpʢȒ@@e@}(sFeQYTlQ X۲L`43*ʢ=rQ X?2}:,*[*4$@9P8>@O>3>$@9P8>@O>3E,*ʖfE8۲LHуQYEe> k[xyp/7O\~UYTv @@e>HGuϣ?8/[nDeUV#$Gbm2};>9o3~yi:@TU1=rQ X?2}o|{s>M?>xG;,*[*4<%p2}"nqxrF#O^Z2>ǗQ>,1N@d?iiUU}}}y-Zvm>t-Ս7n͚5ѐ}577Ԅ̚5+>cʔ)a&MzN/x1eK}7dû֭  çMsh#G.]Z{Rx^fl% d3eʾUV544u 1ǒFkll̸7~mejkkؘ[oSX`Aoo2cs<8PEeQYT`xzw9mڴ'4ӯtynYf:t[n^pǏ -Z ?~|`X|8džflga[V䒷eΜ9='͝;78X| fOL?(c 9d…qP99>Ó93uwqGXжa%KUX_R^v *ʎJ kã'iI m0aB?~|ܸq05k֤}b(~Ɇqx ' YX"n0ǰ.ɡC] g :<,3'뫫V_q>P8>@86꫿#թa3mZ[[gϞo߾W^/?L#9ZG. NP3-E7`Fps<8P}}:OTU1.~$(,gkkk9\w%,C[[Ў*NQRgZ{-TTWGg(ӓAeQYd(PmYHc)/UJvKm k[I?R QijtU6&{@@~heRCz<~ѓמxnБPQ(b&%~O^N5sWE;QM@dRO((mC7oc6F5>I?E槞?ٻߺuF#>I?d::ïd{GN; ) pJi.fUvKlh mپ2*~g͢#K8P;md(`_@ UfQA8P;m#+?pw,*;:Th+?, 22}+?, 28P;mQ 2~g͢L+?pw,*;:Q 2~g͢L+?pw,*;:G. W@u;?ͣxOaEeG M@dRO8~>׳Q@''j@`tP8N`~;훾?F>I?>ޙ'&LɤP&X? k[ p))eExz~jGg͢L-W8PT2XGu]֭[m#C+8P~۷k+,*;Uh]N#HWѷ>ޙi]w~ay#k.F2} '@oZ{7^W= }}i(`4P8Mo~OÿON!vzvڷo_OOF#>I?ā~|SH|opQ=rQ p2)'UG@?X6&Cb3܇I(PmY&pJd;C?b},*2}(ֶ,8P4++ַ͢L-ˤT)~ʊm&Gbm2)'UJ-V8_ܪm(PZ*%p?[q?m-[ɤ@)(L @ Lɤ/ztutF0ʤ$ر[<-]Wz/PEa~W&p/qHe]nm-Xb3܇I(PmY&pǏT㷶߯={'>ڭL @ @b~9m1Ka??k0ʤ$K?R~GggXAvs+u[̆3+.ۻ"|Kjk#q檪HM靫W_kr\Egq|:z>a;RLܰa~m۶ٳ6@@ejgqy~2)gR۷/s><3/DRUV6̑?C?p{{s=_~Y%L-ˤT>C:O?ux p&? ٮz=a5| UkkʐlSI-\8%&_}rxiF YXc=<ƍӟٳ6r\(L @ @5I?^d}ՕO)ۻbٲa`x,]xxZƽjaw >۔GpR>:sSUu7\yYgUV6DDB;φ6 -SX㏧^~_ͪ,*4٭L @ @b~=0ʤ$K/ʁLa~W& 1?^J)L(1>ڭL @ @b~2}G.  k>2)'xlx=2},*[fP ۲L @ @5I#ӗfQr Gbm2)'$Of'RG2},*T2}(ֶ,8PMRL'mL-ˤT>_ҴڪO8Rwg55.z36gwﭮ<묺u>zJ~ƳeYT6#C+8PMҏzŊKoڳ8R󻻿s[̋/~L?8٦9~|keӝmVeǬ Mpvs+tcc~4z44G7WU1v8Ni65s^{mkcLa~W& 1?^Fq.62)'@̸ڳxx5H~~i.\8%&h`mmվ}_c{@vs+8PM̏wŲeӫ+c4L?4}tι羧 /ٻ(;OĊ"ˊhh9:?Ƅ,MX,.&K>68r(EQe fKa'PW؀e (Pq:'Mh$4SSVOo/Yl~2},*[~63 /ˤ~S$w}-32}(Ծ,0~ZN#pgQR GB2)'?w}-2}(Ծ,0~ZN#pgQR(PL j92},*[ k}ʤK.JLsZ2)'esR L @<8*0~PfvO?S[( >{@ɤ~5۷oߴiS S z8sgUTeEؗeRO?T޽{wڵm۶M6=+I6pGϪ,d(P}Y&`C8p_޵kے+>Ӗ4oC~SgUYRQ PL j9rȁѱ+ o]+iB߆zק>@@esȑH5<4oC~}i:ʒ=rQ PZUTEeG8]ɤ@1>(&>'0~$`dRO#O&`CEeQb*L?$(¾,0~ʢȒ@@eUTEeGL /ˤryEw=iҤqFGhêUWdj<,*ojڗecv?~C3f<*-lhh)_q#駕Iσ*ʢ6X^ V0ߨ[_{ϟesέ1cƶm ) B3UUU3 nnn޸q(6ȳ ieJzσʢc8]<~Ʊ3gZZZN6wGKݻw]>@ϴشr'O/Zhݺu4iŊuuuk׮>|:e֬YSL hjjz'Φ_q3=-~0}ڴi틦:ujҥ5 Oˌ]ؒ+,6ϡLwzꆆ5hI &TTTdl_f[u___x&?ǧmx,Xpĉe*ĆyM<~riӦM?LwG3:~Ŧ{c=v6{bW_}5eҢwww^O>n~xW=tQX΂ Z477oܸ1ϡ?OkIB6`lϽhzXu<LkCKKٳ>eid7۪Ó2yS!?[glCKKKSSSsԩ4UTEeMڗecylii)뮄67ҸQTUid>rw;ܩwʪ,*[# u?i,evYG;XVu[mPEeU-﫺aI?OUU.jzt7J/Wa8 gkno9O(2}O&`(Y];l-VkjN$`dROR[/%:gs>I'`dR?=<<<< xѓ5X EfR2}'(:-G.kwt4ցAl)LߡWQߗeROQTlgko뻿ѵ4ևcᰋʢ#GB2)'hySOho߳g,.*&L /ˤh{GK9,*[d(P}ٯ '@|[7GShTMEBh 0~u"Ee8]_AODWhl4 dWῺ0~ih >WLrCH0<mu;tY)ј"A,*[f6}`FB?~%kTx#;1| IШ,*82}}tnm[*fDo-տ+P뷿|'h2PhTM> ӾQǚ5k^ye*Ali@~|XLA,*[fݞ={v_+?{sO+|>|WL$P w JL$P<{XvwuN@H@G. صcLqEel3fk? *wEeQ$G62}YT-A2}k? *wEeQ$G62}YT-Avm'X$wEeQ$ 1H@("JԩSoofzW\ve\sX5k2} .OӾ$L3hdnjjXu=]>o3ʻ>=rQ \XW(79fO^Om׮](~;;EeQ2S~3 صbk~t꫸p{otlofjԒݺe(ӿk:::ȑ#~li@](FXmE뜒􇽴\2}~$۷}&ʢIvmd2}L_/Oױ_~6mڶm[{9rT-M2}k7L?ʳ֭[wWUUM8v '2iҤ<퍭 /m`p1ehLSaڵiw^\xᅋ/~7elXX? &|^uB rO6?^{?͜9==v|z߾}a3/xaW^yer2V$|'GcmEܰ.=Gޘ6C 4p]iuK:Ͻu>?Z:LJ\}DvB \hҥ2S~?S6mھ}{GGNAeQ(` nؙ9s~#Gu]eɟIxկ~5<׾X%KtwwYþlAxo}[QO_5,| _8v7|ʕ蟁Ǣ/;|_1&L/wܹsٹXԠVGyhm۶Eq9*^ya7BSc+/^:R;f!s_"ZL?_ڵwzꩰ˗V(*[f mؙ믿ܿx(32} nؙ~ڔlgfy6&m~nOF7Iϳh.?ۜ1Gd:@kywI$LkDgfSx${,j>Z:ۇs>R;f!]r'{zzI$Lv]ww^~;߉=׾_xYkq)Ͻ(ݶm۱cǾ/ .5&{#G_Ӯя~4<ݎ2eF ?~0g`'(;|hLX__'XTV\sMUU7@G. صvnݺK.b„ mt׉'ƹXh'_vea]MMMa /0ã>;vD4iի [vUW]毪s_9$[2nl B개ЀAu֙3gVnعsg鹷+G S5gBHԘ?IJ8C 4yٶ(NJr45ϱu9>>Z:LJp>R;f!.9G#Eel3fk4"E%՘mR /~o߾_|^OR1k *;EeKL@eűm۶-僆~{S:пgQ)ϯ2}SPYT4Q 02ȶ2ƜVSM0!J>2?O7|sxcIW;f|Sܦ~!)2}SPYT4Q \G>ExҤIt<W),*[#@ř>o8 56ʢi.ЦNJ蜡L?R>@PpQXOZWP:di>@R(8>%Hz$LPdi>@RG. ص鏝3:I5,7 ,ݒݻwO4)u4)ʕ+͛O~1ZjURzv\TMWrݰfk dca,L[1R;*Ns7`ƌ? -EG{ƏСD#EXxgggCCC|_,*ʖWnG6P@_3o^{mwwwoٿܹskjj*++g̘m۶soCړIȂfCzK3* :OÆ a.O/-ubssƍ~e,*[NG6P@_3Μ92uԴ齽/nnn^vvv666_>:eu޽ ,6 ]253oݺ?羴ԉmmmS2}TM > ص"2BiIVee'OOp^hѺuY+jkk׮]M9|psssuuuXˬYs3N khhhmmMZ`'Φos?7!L SR526/hoo2eJOH#3nxUȿI7hiӦ۷o Әd?Z'=O{rա%aQa[N:MO.]Z$m0aBEEEgˮi 2}YT-MvmHaLgy+W6mZRlzYf;voŊ+?+L\hQں%̝;ĉ=<?9sªZ,Ylٲ '/5~ox~(c'dkwtwwϽ7<*ߤ<ߛO r4oaŸV^}A?HCҪK]">yfwa2…?-䳊x#Eeb.dErފGOҮf3qlߌ֦kHӘ0aÇII&[qɺ7?2x`Q{wel Zvm# 7)b)i͋ϰa!CҪK#'ⓟe&!(Ƅ' ǎِs.Lܾ M_/ n>JGng :::fΜY]]-"ms"yBR \.;nx|6'ۢ7o޼н^zi{{{6<* IߛO m^N?Ͽ!s¨RcD|Hˈ>h˵w2eگ (4~ܿ?8qbt6trѢE&ۉ;v8s.1<Ϳ[gmKs4;{n)< }V2.*u)#ȴ FmRͧ6o`'?9OzC͛7GnôL 1_u&/җ'Y{c=v6{W_}5eҢ jwww^P>pvxW~777G77o^1(?z#Eeb.dictj>%H@ "9onȭOms?{Ee >'O3h?nqG_PX@ Pp24H۷o8x`4%>m?:a!@Fc>%=rQ L?ͰSw_f8ޡFk4.ô{䢲lRmvmaL?stkNѸe(*[fd(`0N`4.ôLEeBvmHaL?S`4.aZʢI!G6P@)00-GeQ٤p\kE dyPv\TMqB駑"K0]ƙ>@Pp24R$qeI!diHFR;LB@Hƥv${@](8~JBt\r޼y؀UV)h\iEeQ٤((`0eo6tvv644(h\2N -32}kE dyb?Usٴ 6̟??uQsqƵ>I-˜:u˻Rghnn޸q`4.ôLEeBvmHaL?S_7˦͟?֭u|pԩ9֒=bzxH qie,*2}kE dy9R,85#^zuCCCUUբEN:M /xÇ777WWWWVVΚ5O?Wׇ%,Xĉ~ZdiCxtҚ'0ڵk'LPQQqO2%,'<7-[O\ExlUtv~%d+0aZʢI(`2Lܹ OVXMOR_ygΜ q~9s;v,̶dɒe˖i ˗/ . Ǐs𤻻;nLܴl/32{C|3g؊agjkkSxh\iEeQ٤p, N~|CM0!z>q88NaҤIOh\2ʌL)D:=1sOϖE~mm9۷o޼yuuu^zi{{{7i_f\ESSSk>O`;( %2aZ2} Nf)REEEĉ>OҤI;v8sLx-kEףES;p9M֓W1PMOojjzS^AP3UV^O %2aZ2} Nf)׬Yݽ`xG̙____5{tuuūkii={Ç7x#5_"gz{{6^O?i(4 ݻ&d|c'3bӦMaR4~~̙UVM6-3fg?^ݻnޘ;z| 7nhR0-H E6Pp24N7mڴG}45#nmmZdI___4=< /+++ß 'Onݺx#<84559NzzYpԩe˖UW=Ph_6gʔ)qޘeɌ8sLCCCgggXXٳ?k׮ |Ox^v%DR'eX"m{Ѹ,ʢIQ~3 ص2<aHCpM~Z+ et{j߁}.L@Q2#G6P@)HhôLEeBvmHaL?s)R|"ddېm%>'0-GeQ٤@](RX s;)Ѹt2}TM E6P@}(`4>+w\T-U.y @Ӕr$ƥ00]ƙ>@Pp242}qeI!didFR;LB@ƥv${@](8~>ѸʢIQ" صR dy2}qe;ʖ> ص"2<L>Ѹi>*&L@~)sQ0-GeQ٤@](RX s̑"f8a0Ѹ,2}TM E6P@) 7?GPѸʢI1NPh24H={~_x f>pfls?Ŷx,8(32} Nf_oms[?r복NoЍqh,(8~AS}(`4>+ӗ*>'Oy;7L[Սq3أ7(=vmdiO}ʖ|z>hc0,*e_k0H{޲ߙ;!@G>Lqw_l@](RX s̑"ϟk-pC7h\i>*&L@~)ֶ|]= 0qe,*2}kE dyc'E0aZʢI(`"00,*t&O#E0a3}2#diHsw;ܩwt$L駑"0>[s/haZ2} NFPh_4<ٯn ص"2oi0e׮]/zzztg.bF?sM=ߝ?0-GeQ٤@](RX c׮]a޽{8pȑ^]0l7cK[>t㮙9*;EeK{@](RX W0ȑ#===OEUtn~idiEeQ٤ (4>t׉nH;1?L?0]ƙ>@Pp2}޺<:2ôL )dL$O=LB@(A2ôL )#@%Hzv\TMsQ ?,?4gQYT4Q ),?4gQYT4Q ),?4gQYT4Q ),?4gQYT4G. ص"2}'@I#Eeb.dcƸqiO2[6`ʕ+͍*KWZJL?0]ƙ>@}L ι:t47a᝝ Lz$+.dErշzk6p󻻻߿ܹ5553fضm۹!Iq6gufmÆ %Q{粴ԉ7n442~aZ4Bo3gδL:5mzooŋׯN%޻w F 2،͟?֭%Q{粴ԉmmm9)R O=L\7̽kC_3-a8ɓUUUE[.%XvڵєÇ777WWW̚5+>L4m!Wnhhm=uT4/ ßZ[[3j\x<@}}}X‚ N8c۳kB3)]aJv3ۧLOdƣGF lmA?e\TxtҚ'0 &TTTd\+!SʢIQ~3 ص2B\rڴiiϾ?ӯKjy衇f͚uر+VDʎ0qѢEiLܹ Oⵇ'mU3gNآ0ے%K-[c+B/ d!g`?~ΝIwwwl3|TT qQ˗/ .}"UĻI~e,*[d(` =~8[oӣ'iIy8qb{Fkkk5Hom0aBܤוUI&o?yd]]]ϸ y7R˞ImbsI+wEeQ$G6P@_3-L8qbtlER~Mwq̙]g`:L?z~ĉa?hg+Bv}M7'`ZZufaq Gn[[[ImTjkGS1}lSci'n޼=r=gQYT4G. ص"2Bi cwvv666yW_ ojݩWKޕq׬Y`+Gwjx=h9s+^]KKٳ ȼe3ʶ :uo<;sHa(R߽{w职iӦQ[P?|2^O?7駾=ܼqFC\rc#,*ʖqB鏬=ӿܿ?p|g̘q6)onn>}zt}K[|yMMMmmm|^{{ɓû׭[w}M-GM]Hkkk}}}UUՒ%KIxYYYf֪Gy$,!u---MMMaΩS>PuXvޝgg={8嗇N2%M&9ss7*ck3"OL?N:lٲw}\z;yQ"d2ʌL鏔=;6h{ǘ2Rׁ9.3IDATOE>nmm+q2ôL 1_t&?G{/\ܼyܵcLTTF!>`i 'di>@b M?l={~_x f>pfls?;_x*jOl~aZvmdC?~gk?TPh2ô{䢲lRmvmaL?uqތ|#X-;EeKL@~f㯷lxwfUO\0Ȏ=zhLT-A2}kE dy}+[*Ee~l @](RX s ݾ}{GG)i o>Ca4ʢ%=rQ ),C%GK?00ZqY#Eeb.di1E:sڶo'Fq4.tgeF@Hƥv$L駑"K0-H >'O#E0aZvmdiHFR;LG.*&Ef(`0eF8;/PTQ ),9`?LQYT6)d(`0eFe,*2}kE dy2}q2}TM E6P@¥H :xV\9o޼jRhϪU8a=rQYT6)|did7~CT:;;B7a3}2;='OS(^uo:q{]`AfD/u֥-uҥ[>'(Ѹ42}Pp24N2&ѓիW744TUU-ZԩS2L jmm?|psssuuueeYϾ\x<@}}}X‚ N8E6'K.yOxO]v„ W=P{{)S򛚚x≌msӲUGM_“3fܹ36|`:?h_{K/4uzh@ggٜ~WWWh ?aZvmdi ϝ;=Ɋ+Ix+̙3aEe\?ߒ7wTUU١qY#Eel3fk dyzC&L=8qbpmmmO4)~ɓ'r҆$lxرlQccڵkNeUea3CτGHNL6g?Y*&L@~f":=<8qİӟ4iҎ;Μ9f0[t=~Zd?O?uzyioϿw/`ٴl=qenN-mѢEk׮K^_Yξw_|1ټy{¨ey飲lRQ ),9`HOf͚ ]O{^O?>gΜx{o<+^]KKٳ>3نި O}{ڪ3 r{ ߘeɌشiS؄ƍ6k֬z*0G.*ʖQ ZX s`Ka4.a3}6@Q2#G6P@)00-GeQ٤@](RX s飲lRQ ),9`?LQYT6)#@~?TdF#Eeb.didFR;LqPfdL?Lh\ji>@R(8~sLN?g? -~z$LvԳ'f81*m0aZvmdi"9_9?)0QYTTQ ),9`F??~lأ7NAeQ(`"~'5}W0*X8LG.*&8]@ 5E:sgK7ܡ?a3}2#di"~'Q{a`F@R(8~)Ѹ2}p\k'O#E0a=rQYT6)6}`F6P@)00]ƙ)~eFvmHaL?S0Nn;1qôLEeBvmHaL?S0wK?7g;LQYT6)d(`0e#ǟkx񹯷n@R(8~6/Lٻw9۫FJ?S=iy3گCϙ}v L Y#@9x~U~#L9rHOOOooӧu*:7ٚzX_a}Kel {䢲lmc(`h{ǯox[y}|b~_XT~rl kgAeFvmHaL P|'~֍QqôLEeBvmHaL `4.ôLEeBvmHaL `4.ôLEe=rQ ),?%{v\TMqB@8(32} N%~$L@eI!dPi>@RG. ص4l"2}TM > ص"21?ӧO#fK0-GeQ٤@](RX ~x۸|ˇo8 |-ôLEe=rQ ),L?~|~ѣozb=}ԩa N⧽̸ǃx퓻ɯś~=Wa~Sjd?z&tE袻4nC.xԸaqs~* )߸qXmD2}TM > ص"|sii 5$74\\+5k>[ym~hH~U>&NpoOx+iӦ/{{{,ou飲lRQ ),F2#w}Y]]% /8=F25CmLܨ|fYfG0-GeQ٤p\kE F</xiOTsw:<<WT|ǿ2p]_>r䈝mvʢI1NPhr=ǿӏ/sH~kj*O DyK/}?+ㅧONx0=\ϖqޕv=1zd?,./Zuw^1.|K3fi C7oOnذa֭?O =ϟ6<[oٲE~P"i>@R(|颋Ja2}:LB@Jwwa>X;L=rQ 2 k~C\pʉ|}v\TM/U TX0Lߣ2}ْ:Lqw}-32}kE d2}ْ=LQYT6)d(`k2}28LQYT6)d(` /omi%55]wg+?-8çN-O[NoW/r7&Q |-ôLEe=rQ ),^%裷]|q}x鬓'^g=\#Ύ^~W]iwoҗ2}f?LG.*&8]@ ;߹Xxru?/\8e`:?hw/R}R /̻ꪏVUUw_ٴu'/qhzx/M $˟0?J0]ƙ>@Pp'30ӿ+뭭~S*7'O~mćsDxŋKKk%aK#<{ׅx/|@i>@R(eotҤھG'OPO4SeyCLrϞ{5OxAx5y2.<@R(e͍CJ_yew_~% fٲ-LUQA"G<0-H E6PpOZ>aBM|zt~Ci NI02uxvgx K/}>>k~L?yG.6v~e0,*e_@](`HOϾK&~#<9pK>mN-/şyf%w}ߡ& N^|}yf]z=Un΋?%g_ 'u}O}{MMIl>2NϢeFvmHa2خ] &Gxuᮻ.`E> >{WOϮ q1?~򼫮hމj[SSY[`ժ/.’3f-WUU,^<ɯEOt'i X %{Lm2}TM > ص"CG_*&L@L_6[i>*&{@](RX ꣪BlqʢI1NPh2}>@8(32} .~/}3RK{d%u$L^?ٴL_0v2}PpцivBv[.ТEW=LPfi>@RG. صFκx,k,R)`{%bISEq`HU^c8P_H aKB^m0sTV%1dCŹ{nܝyvvڙ}ٙg}th8=< wfW]5f[43r,͋t@u wvğ̊|5k'lYR_ #nėq1u<9qQGM;?yG/Yzz'_V۽Vl66/q ŏ+uw떏;-V-vnើsɶ=}@u? ƍ__tIyşs5w1(ݿ|?s۶}#pL|w.[RWN{}+n?[Ƴ_}ܔͦ+^l|OK}4ޔ7oذf[4dl^# ,q~W]"2eW^t;nLS\|'ƊOz`鱵ͦ+WZz^QV8h{ٖ9M#Y$z0&5 oٲ M~1mذhwy3Rw/C ?Cvo6ixZ~!/]Ѩvx+ƍ_:찉6}y|~>`5Ӵ>Ey3r`jMj/)'o.#|3ΘuAğ6,:#+n+nvyMos}}ӯһr4*;_8uw}&>`5ӴEH6/94./Y17~iҤN~aosN~w?I@BOoiZO /h8=}CO i=}07ٌ}F.EyѲ=BST@Of3{nើsɶ=}@z>`5Ӵ>EyShR@Of3{GH6/`jMjzlfOzHE@ 4Y7Xf43r,͋q6bO?Q(֭[zUVet Z> W{Os{;BM:M䅞> W{OP(_s5{:#bhiZO /h{}}}7o. ]]]Dw ,cu Sh{bxTtG􃃃& `5;Vidl^lOqs+ 2,?88800P,ط#=߱cX͎i{SY$b`jMjW?P4dl^# ,W?T̞, =}@x?QqT'h=}$dǞR`jY3r,uf͚5cƌӧO6m ^ziZ>Mٹ Ș8Ai:N-٤@S wvv2#NqtŢE @6T}}}7o. ]]]̈Ss4'끁l0*}}}===kj<'8Mzppu EzM9`j488800P,j<'8MzǎS,ӗ=S@5FHOO6 j,Y$dAO6 j,Y$dg" P?P%dl>s    *77&WrsNw26=~*̶jVAds~c2z7#&1Mm@!U6ed0$+YZ?;Ѵ]wj (\#ڦ6*2kh2dK[?㣚l@+H (& d%+Yg=}ʞ޾b888(Lm@!U6ed0$kHhYO?siEWWWPؼysd<00 Lm@!U6ed0$kHhYO?s?zׯ{{{ŢrF2TYC!Yg=uZ*2PH6*2kh25$k4bի׭[W(5 (& d X?雺+X\#ڦ6( ( !YCO?Wcܸq H l{ZBC@Ad%Կ4/;V>ܪsgWF.~<}&Noٲ}6(.9|WVi} 93MɦZM^(E-}蘲|Wm{Ɯl~笞(B3PO=Vg٧/?_>TO_OOאc q?sl &/4oމ=vkE曅˿r꩟6g%9~bӓNֶ }.lݕ+onO/YrnOܙ<+gӯCKΦM|`1%nʘ?1/ϟ<<+F kȷ;wf@~㍟Ǐ|W铯V]ԎW6LmPHҔbR{E̫oZqL>5.EJ}%,ev:&0.RN+mŋ5diT[Z]{{o[>H?v;r5e?wcPMNi=VJv۶gn'mlOgɗ;v<;IW9s+ǂ''gf)?y'>_EG>P-=j0)黔Gq=,~|Y,>|9aB{!arʬіؽ˨; >oEM[fjBZQ]۔/[V{SƊ%:ev:&0.RN+mŋ5Q}mŞC))mrMFOZM ds1gӯvO[qʏ`w7&O>Ӊ?vaqt>{^<Oۛ!vQZKO> yʨv=T˸QGCs/K=}2ފ2Yg}4Q]+fm]K ,`~jeJ;#!K:[EmrML;ŋ?oJ6s6[=W]uIߠ?lE;9;Ycq|Ǽ_mH@ mŴ,M}+X7cu׷ϟ?4,\xŵs%,<;v~je.R+ֲL_Zvˡq?l#BO?_&OIiJ6svlzqENŖ-kOZFcɱKw@̪hʮrʬj{}o@\0mڥf3PJ/H kH({O+Lj;fm8ڠ+Ґ׷FqtڟlEx-nkDk9m:ֺMJv'~-'NϜl~{#8זLtW\Bs3gUy_BI'MO?׺rш/MU}{_X}8$2]jfO?4^ziݱ|fJXCB)~x!ƦM?-}:>-Њ2+N8UǂSBRLjHC_8y>Pi# =|Oz b)6nkuPTZCCEلdwo%K.YXz#֭O,Xp}9+ٱXR<喯M8q]1~^S;O>)=nEwo[oYķN8aښ5?>]{{$ZMݥf3PJ/eV5<.Qq,jEI 80A!rObR{E֨G̩~:v`„?ɪ ?&]W仵!G\ZĝIϡm:oCQ~6?(ݛl &ߍJvrΜ[sV kبg' h5mj (y[>q챇k2HVd5\6XePf8,9gܹ3KU YJVm͞~; .䀲,-|djڠ2Xe6]+mO\c=<vI iJVlk$㞾7=8J5fj (&!YJYO5\0A!5M@U%+YCa \#ڀBlʬ`H֐/`5aj (&!YJYO5\6A!5M@dd zkk$SPHM@54 lz/ugѴ|TSW@ v6TYCd%k\OOQ525aj (&!YJyt=3)Viţuk ?2bx@PSRePf MCs==r===]]]h8q (A}LmSPHM@EJV{{{ %Wqa_,뀠> (&̢ɀd%+Yzz}}}7oZGSġ=r6TY4d%k\OOX,h8qPڦ6*2&d +qac: mj (h2 YJ>zzzO#bzCIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/live-mig-ovs-hybrid.txt0000644000175000017500000001140100000000000031361 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generated using plantuml. @startuml title live-migration with ovs-hybrid plug : nova<->neutron interactions participant nova_conductor participant nova_compute1 participant nova_compute2 participant neutron_server participant neutron_l2_agent2 participant neutron_l2_agent1 participant neutron_l3_agent2 participant neutron_l3_agent1 nova_conductor -> nova_compute1 : live_migrate activate nova_compute1 nova_compute1 -> nova_compute2 : RPC.call : pre_live_migrate() activate nova_compute2 nova_compute2 -> neutron_server : REST : list_port() activate neutron_server neutron_server -> nova_compute2 deactivate neutron_server group port plugged on host2 nova_compute2 -> nova_compute2 : plug_vifs() activate neutron_l2_agent2 neutron_l2_agent2 -> neutron_server : RPC.call : get_devices_details_list_and_failed_devices(devices : [port]) activate neutron_server neutron_server -> neutron_l2_agent2 deactivate neutron_server neutron_l2_agent2 -> neutron_server : RPC.call : update_device_list(devices_up : [port]) activate neutron_server neutron_server -> neutron_l2_agent2 deactivate neutron_server note over neutron_server port status is never changed since port is not bound to host2 end note deactivate neutron_l2_agent2 end nova_compute2 -> nova_compute1 deactivate nova_compute2 group proactive dvr router creation nova_compute1 -> neutron_server : REST : update_port('binding:profile'={'migrating_to':'host2'}) activate neutron_server neutron_server -> neutron_l3_agent1: RPC.cast(fanout) : port_update(port) activate neutron_l3_agent1 destroy neutron_l3_agent1 note over neutron_l3_agent1 "migrating_to" does not match host end note neutron_server -> neutron_l3_agent2: RPC.cast(fanout) : port_update(port) activate neutron_l3_agent2 note over neutron_l3_agent2 proactively create DVR router end note deactivate neutron_l3_agent2 neutron_server -> nova_compute1 deactivate neutron_server end note over nova_compute1, nova_compute2 libvirt handles the live-migration end note note left of nova_compute1 live migration succeeded end note nova_compute1 -> nova_compute1 : post_live_migration group port unplugged on host1 nova_compute1 -> nova_compute1 : unplug_vifs() activate neutron_l2_agent1 neutron_l2_agent1 -> neutron_server : RPC.call : update_device_list(devices_down : [port]) activate neutron_server neutron_server -> neutron_server : update_port_status(DOWN) neutron_server -> neutron_l2_agent1 deactivate neutron_l2_agent1 note over neutron_server port status changed to DOWN since port is bound to host1 end note deactivate neutron_server end nova_compute1 -> nova_compute2 : RPC.cast : post_live_migration_at_destination() deactivate nova_compute1 activate nova_compute2 nova_compute2 -> neutron_server : REST : update_port({'binding:host_id':'host2', 'binding:profile':{}}) activate neutron_server neutron_server -> neutron_server : update_port_status(DOWN) neutron_server -> neutron_l2_agent1 : RPC.cast(fanout) : port_update(port) activate neutron_l2_agent1 destroy neutron_l2_agent1 note over neutron_l2_agent1 port not hosted on host1 end note neutron_server -> neutron_l2_agent2 : RPC.cast(fanout) : port_update(port) activate neutron_l2_agent2 neutron_server -> nova_compute2 deactivate nova_compute2 deactivate neutron_server group port_update processed by agent that really hosts the port neutron_l2_agent2 -> neutron_server : RPC.call : get_devices_details_list_and_failed_devices(devices : [port]) activate neutron_server neutron_server -> neutron_server : update_port_status(BUILD) neutron_server -> neutron_l2_agent2 deactivate neutron_server neutron_l2_agent2 -> neutron_server : RPC.call : update_device_list(devices_up : [port]) activate neutron_server neutron_server -> neutron_server : update_port_status(ACTIVE) neutron_server -> neutron_l2_agent2 deactivate neutron_server note over neutron_server port status changed to ACTIVE since port is now bound to host2 end note deactivate neutron_l2_agent2 end group @enduml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/live-mig.png0000644000175000017500000035071300000000000027256 0ustar00coreycorey00000000000000PNG  IHDR0t)tEXtcopyleftGenerated by http://plantuml.com09JzTXtplantumlxX]o6}p@=Xj,Z4M24EI= @KL@&R、d}Y::%=F:4330tqO\%RH YΞ6{@>~F ±nfiXS\VX_վ_PY򖠛=7VHή2+C w9hڊhAz3a;ݬ)үnDybKJP&iyŜ`/dlލmFFzI5̣N`+.I|W5T J}tEvég˖ssj2% YRUf4P5Df`qMFژD" t#DvN0] 82>"jSc_`BJYtq_u^cжƖ]UƘW"hm҅hÐ]~&ϣ>,UCƫH>o -2j}U` Ҫz,>/z blL =}˶PiEGǠ]өn^_ yq(Jϓ:Q"G& P(t2&//_Bß_&lք󻳗wQV fX|o*$\}LWl~=.8Np>LDӮ3ߍgLW$1)lh[_HJFc9+pQ) TO}'Ne|{zLݩ#`:.OVS:cӶ#IDATxUw} 8)] FDLK1$RwF^,Oy)'FԬk#M)ey LH0 /I5Ҙ &8'G{s<9sέ*]%AMJ>5}( jP4@iPҠAMJ>5}o.˜)} Wy.7Flb?e@TFM?sEuEؼR|ɛbNU|HC9@TFISlɵ͛`('Q(lWO:5l۶-^-̇%ӦMٳgƌuuu555SLpk֬inn뻺>ׇa_/bfc7F/k׭[-[x㍡UV1c~OWeO?}M7]fôoݺ5+4&}'=lhhͿ wi Hh^;Mhr(;6ѣG8ۺФ0-^1rERb3zG A  {Y~};?oń58EBWXFnB;óap%!Q$yˆ#5r}Yۖϑ晬"c40'MmAΫ r;Vz衇̜9s|X]vUWWCp'>Ժuw1O?oo|\[LmLWWW{9uӧg̘[nȑ#航T[l +en9sɎ;YŽΜ9fð0uo1ŋQ%nzwuWxva~߾}k_xK,y =ּ={ߋ̦HERTdk LѣGG5kh5LeS'%柬JM?!w^]MZUs6lN81^kwM?>{Z:x`Eh_}'nҒ؃>>;M6Ewc4 ]pa홷CɿIA1ٴ-ӤZ鸊K/VFBE![n3g֏s ɼNcIN}GpjG:d5{ 7I%Wv~V7.#|;N]]ĉ>}zÆ ӧOg?a[oi;2eJ#ϟ.׉/~h9sfꏩf}coj|{T֍$7o&?!%۶m u׀O6-8HUڴ&eܩS+W#GD<ߊvcB((ڿf͚ nëBc{,ɉ"#;rg-Hȝ0aBT?vwqؠR@}@dE?զ۷5Ǹqϟݝ..]6m4a„'_~[T߿n455E[n̘1 z駧O^[[vwM7ر#W/Q.b7xcy4 Q7o^uXmooOMTꍑ?;O#^'̏s?óG~Ff~vN_S\}f{vOS־h >3K2Pz6ziCÿ~? +w򗗅pa;mW [CBȾ?OHђA ͛Sж5쬧s"[_f"[c!=ܗO~hG]z3?!_0?g-#-s~0\ۿG~wҥ[Ǐ?`&7ǿ*^OM?W^ܤp-͜0.=03fnZjWb`=o6mRh@?OSZ(C{hC;L?qW80A"͕HI)4ohY+|aSʧqcr}riTD3m/єzm."W&s"[_Nx8Ƭ5CsIBG<é:=f|<̇%i >z>}O=`\;TSs!%MQr5 PHdǟƫ릛;t?W!d&P5}ehmhiwߟ J dPmROv뢤'ɚ7&A/6AUJ4f2|9Z۬5섯 8Gd'N\%"[cs-ycj7aעN7Gޙ={zX' OOiϵ<`SjCKդ+Q/zfⴧ؋ФO ? &䖄d26?G:}/G}tѣ'n0Z"h|vshujPsȳ#XG`5ls?8GY-1[\50yoPעEm©S~k=5\m(~°L~7XJoh/S3f~4!$$bnWK͛U_k_':jif3mڳC&Z'F+lg"{%"[ 5Ҋl<6Y-1{ujRE?:qoE7+q+?}Yck MNmw@t iIy nБ>T2s5Gm~C$ﯘ+䟑Ҷ?s_ޑ_s5;mY? T6W%&v}E?|Г701So~{;MhjR#_/\8'? R ǿ^դB<@ 8X^fB҂sC "g?^v^:>eP2QN &'҄dFJQNgOַFm _y/v_طO_\?`CchڀD*mҬI$"+ΟROOlܸ (tx mh H(2 "+ΟR߻wΝ;nv% "tu?.@% C$RifQd@dEVd?~agO|{{{rWGGGjVZP]];v.lg֬Y'N։ ȿ ,s/'l"mMSԜya~a{3gΩS.\pE%cUU[[[__ŋW\Y__oLҥKΝ̙&͟??._a# %iۉ  ,s/'l"_:x`SSS ;f9-<{1c%ŋԤ!˥6ԩSYw3zDT0OdcEdsz֭[Lw񳩢 䇙3˻O^WWֆ̗T0OdcEdsv馛L7}ccctq59ڰ} .oj7 mXdYDDUo 'Of͚3gK/_>{~^L~z?Z[[sǎcǎ0sĉn-~vԨQ}}}Ckj( #,";|Ux k֬ݵkW˗/onnL~Ϝ;wn555cǎȼ~k„ MMMW}BkSWο#WzڹOKWZ.zzzK]ݛ>>Wh\9jW͒%KO>=gΜ0_GT55LO:6vUqjS6zsΕzxMjַoџl`dUore0uW~(}@6FdYDvʶUME2ylPW'ȟ1""WN3Rv<[o{M[F'GD@?YD>#7L\o?7?_Y/{OdcDEdOE^?gᄉk޽[  #Ȓezo1A=ܳr;vR{OdcDY ٣Աᧇz\SM`)5}aRӧ@IMQ&KC%{OEd+lkd-"j ȟƈ,"HM>Ƚ'1""Rӧ("r/ l")  #,"; KQ"WUU\Te˖Fa7^EdEU CM[R=?~ܸqׇLMrMf̘/f.okk;qDϝ;wԨQ555ӦMۺuplj ~ׯaM]ҲaC JO \pa'ON[~̙ D{zz֬Y]ӽwy wڶlْ3FM~M?RSSٳ|{{իiҥKnj3zUVEK;RWW2k֬kY+Vm :w\<챡:Zrw;66o޼ҒqΜ9?EKcMMMᨯ={[n„ )SRcD4sp8aa_MJ=Y"n@֦/,4d~#-[6eʔ^_3fLj;~f:u.]-뻻/^k5s瞹,{ ϟ9s愖.\pѢEI#-;;::gŋͶp!oƴ] %I5`g&~ǛTq{OdcDVduV'W)s?ȑ#h&;YkƏjV/^=ztھu뛰rڰxSΞ=;f̘m͜9s۶mHmmmoy~|ww-rM?aֻ]z-Rwx[o~8`l3aNȵ|LnjsssڽwCޑ{OdcD OQD$?>1}z{{{|Yollܾ} .]πFWvA"mڵ릛n 3Cfì9vfrS:;;_&ٴoʽ'1"ȊlSӧ("ovm=ХNTM~K2Oĉ;vlԀ<*~/K~j˗/={vT%?x`|ndk֬9s00~---q\9jԨqi ;ݰa$ȟƈ,"+oR1O?=mڴK߿nnM%K5ju]]]&LjjjZzkcǎ]ps|ɓ']kGYfMŮ]̹s,XPSS-`{ x 6uMpB}}}OOO0̄qr=J_;)+l"lQƐk%<|477_/__N,Y9syoruh7 /ulxRӧ@*_[[;f~{FjWkoo~r7ܸ }?l*Rӧ@*!*GooVa\-~#o2Jeߋfޮ00;knL0p #,"{\b4*AQ9{:6P @6FdYDT C mIMQCaRӧ@`)?0&K( sWEda*ۚ~rb)1""Rӧ("6t l")(  #,"; 5}"" `h_\lYkkk4f~o6EdEK~#"j`h__􋭸q>|8zS__zAs #,"*]@a郡_1cƋ/<-?s]wTSS3z薖={d]?}k⟶׷. ǻa7@MQCu?b_pa'ON[f֬Y/t2-[r-Y78b鷵L]ٙV*>>~^_ŮɺyI&677[@WXQ__^~ܹhyG]faUV544TWWg)E/,~)5}0_re˦L<͌3,Xݝ?"5qEM=qĢErܹs\f.]-_dɜ9sa" GZ7oR j`h'ESSӑ#G2^~|3&Mkll\xqXAfCKVZv|5>͇nL}}}SrBM*sWEdTNb?>|G۵kW|0O]z~B:;;_ד6\EdEKjID҆vZn{衇.%O֭QԩSK,>}zw0ioonk׮cF G7_z?0g?,3gNS_7WҲa7EdE5}""jPiC;OO6Rٳo?qDw08qbMMͤI{<ncرaͅ ?>Z~ܹE^vwӶ .DLx(fgν|ʹ+J""Wȥ(F>ڥiZ5Ÿ˗/of:::٠Tvu57$ ,";tv9V<?Z-$0}s_W 0R (?0@m6ro ձl~uyezweS j`hT];[G\ >9M&Oez4s䱝#oR,?5}0Iߎ-o/{;˧ɟ:@%EdEʶPiED mr|au?/,TYDVdJMeǏ>\?y}]v@%EdETjED(]!uּ?|@%EdOQDW0٣ԱᧇzO;EdYDH\b4 6)\]* ""%J+`hȟ@IPW?Ҡ/Uگ|Փȇ>WahwO4~$խө'/@k'_]0\>OF* ,"lk_(>kܖ13o7^m:?$ ,";(j \%~x5~7f?oȟw}k׮ݽ{PI@dY%>UMrhGS&dg E?DYRS,?5}(ˡ?#wqkÏǷpw{~), 'EׯEE* ,"+hTӇ=o{M_Yvmȓ+), ?v.PI@dY%U.0~z}q>*ݻw^X߿رcgΜE@*5} DM T=orr{u#@)5}03v:}EOCMQCa8wYD'((  #,"; 5}"" `hHƈ,"HM)1""Rӧ("6t l"F.E1d]dcDEdT ( ?0&5} D > k׮=.[53W>v^KWbڴi>lšϟ7nÇ===aW>v^KN6y׷.iiiٰaWȥ@c5ֶe˖%iU~1""T525}]UUbŊsf?>^^ё.?nܸ3gΤ. / KT&M6ܼnݺ%MMM p #,"{SQӇJUUUs=sYYti@6FdYDبSQӇJiz>`M?n֭[ 6o߾1c\{]]]*x;sWEd٫oRQM*mhx~uuŋ-$wuM7/ /#ڴo]dcDEd*]@a郡,o‹/8qb޼yoii^ښ\&Of͚3g:m&hoo?|5cF 7nږ 6;pS j`h㧪cر .<|<ٷoߔ)S|xs,XPSS^֬Yvk׮]'N [4iR| .DLx7)5}0_S5@===W/_l'ttt:pS j`hg5%K>}zΜ9a^7r)5}0SyWkoo?w\L8w@6FdE-?e[O.3QlQC@:@6FdYDv@jEDNYD>E?(ס}u' #"Ȗ5}""j`h SdcDEd7r)Ѩ9wYDS.0."5} DM j@)Rӧ@."5} DM j@)>E6""lkJ95}̡W_tdcDEdOQDDM*mh~}WAX #,"'5}""jP!C~-cf/nA eƈ,"OM>~}Sx-8}) @6FdYDvPF.E1\|Q7ǯѷ\邾+1"+lY>TоeFMQӇڟ[o5k]Tj\_,5} DM*|hkco-o>S j`h|u~XYoR j`hz?BoWnp #,"iҊ>T~Vu=S #,"5}""j`hHƈ,"HM)1""Rӧ("6t l"F.E1/#'6]dcDEdJPC.~}WAX[7B m@Mlﵓ~-cf/nׇPCPӧ@/~}SӾzZX<߾[Boڵw#@M%>yM256kn~|- PC{ʕ?>K XoϛL2֯_cǎK]dcDEdlkd-"uϽ߸kj֓_+{P֮]r?_ K:;;w;vL8wYD>5}""?2m[Ae}( wwevRX߿رcgΜE]dcDEdOMHjM?ZW?7~yU!Ƚ'1"ȊlSӧ("YӏhHX[7  #V2KQƄ~䵓Xy+z^EdEbU c5})5}aRӧ@IMQ&KC%{OEd+lkd-"j ȟƈ,"HM>Ƚ'1""Rӧ("r/ l"w]Uo-%޻w?~̙uT3U/?0>Xj0?;ww{'O7rT+^5}A?EEd?qč7nݺuΝ{=~x:e[T5?~T]zoo֎?K{:Zjll +ԭ}5ַ.XG?QT{UV7KѣG78p Zfð?qx=4{I8fhmX3`o>zԙgY?lUN„Ą`/Lж뮻ODM_?YDVd+GTӿy䑍7>݇J]GMH ?gΜG?~hxg.\xĉn| ԧ|js?f/>񏧖?/|wx;0lٲ~?:u{I]-M03:k-jY?lUr'0s1; Bn曛PA?hwjr/ l""[!{<#_җ:;;wyuԐk/Rpg??~$v;=,ɵk&onn_|osυw]waX曚68q"F7>03:k0_[[gM?lUr'0L brM??XDs?EwH^EdE\闰!Ӗ+?7xctO䛴ӧOwff̘N\5l8ևm^ĄgEWqASЗ{OdcDQ5 |O]!:r' NJ}mt /0͛c=zy|w?yBV̏=uYmV {Ibr{J:4u԰F}@@6FdY(]/uC=z/GG__*iXO}*Lm[֩SnFzG>(Voy[*o,3C/k۶m]GGGttǏM~PO薨Ծuԏ㩯ǧ@ۯ+V|{_jU\sk.~6?چwakmۖֆ56&WtuuE9ph³vrKB V5C] |{{IIJ_F~P*~=~ѣ2HMSQ }6~e Ĕ|;{6mڳ>[=cƌ.O-VBhBV^>G@|/].Ef̘/-EϜ9s]w555Ԍ=eϞ=Y/Xq]jڮׯ_֖lY^_[[[`apEƳ6oxjRJo.\X|ɓӖ5kŋ{{{/]|˖-iXj Be>[5Bާ05[޺cǎ/;w1cF}e. I)O\]dcDEd'WK%xj2 m555Ypń}% WXQ___[[~ܹhð0<رYfE gw}رc͛ߟm3~L7tڱcGccc^mkk Ǿrԇ7xc$x| ' c$C\-ɵqƅ 3'N P^?>Z7hp #,"{|/rtȑ婵ӧO34iR]]]ccŋÒ  и=nhhǏ<%ãGκgϞ3fLr۲!?ufO:j%+⫧S0رc|\J=Ƅ!D!!:7nl\Ǟ0\}vyF$[Fz>r$BBXjU,I/„ k3mذ!1jsWEdBR/_J͜9s۶mi>|; s!avwwO>.#D|KSݹ%m0`"ig}aEa N?u|"uQ;כ'/Q'|f#WE7WfՒڵ+W[655ݻ7aiƈ,"RE#xj2*tvC=t)~[nNZd6 7ܰrʋ/8qb޼yis,~9s;6w8xQ-_|Q9s58 gΜڐ晻޸qcxU{.+yaKKKք'*lϠ,[n5\}]=G!Y O^l8!Ȕ\}<Zuk!('N 4iR|LFA Y[p|nSmX|yo8? wr>)pFP-}(a ٣[SH <Є[G0"zÉco}No7rK>xjEnD+SP`Wkoo~ *ɕ]a^G 6a3.YW@%EdQ5 rOM{JQߎusĸzڎ?/ۗ?","{IM Ƚ%e7] ˟fYD_d2L&SyO{̑vr* ,"+jjjE]w>ȽooE[YϿx$NS#lEdEܒ>  Ӷ7y[ݲos|gCg,"+k<5}sh7S/O<}! CM䃧PZzÉco)|FK>xj%W^CN!P/%F@&[S^,""*5TȽ08EdYLj%<5}@,"ȊleR/r/@&LFfYDVd+~OM{*0a:5p6""[FnOM{*S#g,"+k<5}T F˖-kmmwo ߌІ￿?#ؓaW~OM,ǍwiӞ}٫ҒОrzTM?-LzS/!UR]]݌3^|mmm'N_s5fڴi[nGVe^~}hF`}YniiٰaUl@wgT=PK>xjei85h… ˗/MZt1cFjժhɱcZZZ^f͚_5iҤu]J_q3z{{㶥>k5kքSLٷo_ܹs~Lx+- fuwXbŊp=&$tZCCCuuu a]?>< SY;3kcdž-̛7?3LWz","UE&Jf/[lʔ)i/?f̘3g ؞~f:u.]- _$/^GǍcǎ0sĉEe6R2͵ٰμyBS+V:]dɒ9s朹lܹa=30rÎ=ВDž|Oh|] {7xj P 3?>=W=?~766n߾… .'sv/pYk aWWW_x1OoڴiPԚpz֖ A]gg?$id|=fYD%<5}@/*FmC]]Gijj De^x!$skO8z?cWnook׮NpԨQ}}}7n8wܬε\Vs̉o~ 7\ŋa;͋OkIҲaÆp?˲>%il@\Ou<:^?Wgag>vX?x`|0'<8EdYD6Gc^|ѣmm۶%CH&QT5Ѡ_<&*p'Db od<"..1<"DqtNG&JMbaⰀz&{*ijXջW4PO &[W_}u555s.m޼y u7n<ëZZZ-[=9l)R[[{G޹r·y֭+*R}̙Sٳg8p ;^8묳ymI؆%ѫhEs7G-%y ۀJm|Upï:۰`ѣGg?>6L<Ec_<>@"5Z`A5w%lâEJns s4kǑdۛEc_<>@"iY2Y;<;6~8Ǿx2}DU\TYq=O a혔;rc_<>`H$nEeSbӿ1955篩=glw7/c/@ LnEec k\22O{R8`fOuL&d2LOxSϑe2} 5@7,dӿ\ ں +Xc_<>`Hဩ5͢lbDajn+Su|Gn')<fQYT6~?e/Ͻ0q[[[{{{WWWX3dOHZ#o ec_<>@"iHo׋Vz>Ǿx2}D޸L?œ8oL?;rx2} ,*e!|+ĥx2} ,*e~UO{EeQYL?"ӯc/t,*7GdU]<>`pnEe2ȭc/t,*7GxTsd[@o,ӏx2}-7GdU]<>#2.Ly eߑ[œ^-fQYTVo,ӏ I[!.œ^-fQYTVo,ӏx2} ,*&ɾSv?7/9UI%ӏ}d @7ʢ ʷ}%ؤsOL?œ^Z#,*&]xQko+}?;rx2} SPnEe-7=O@ݔg(L F\뇩TX>1W4~'H$݄'~W>2OHZ#xX?{%~^~ZXIkQw~owCzcߑc/@R[FYT%ބ__U2dOR<>`Hဩ5͢lJ?dgՕ!ӏ}d @7ʢ*e)4ƝL& GCc/@ LnEefCY9_mmm+qwwwL?œ^Z#,*ʦJz>:?>}?uvv8PO{Rx 5͢lzD'M?}~MOٿF3dOHZ#x7'Џ7+oēǾx2}D@@?+L?œ$+g0֗Ǿx2}DfYi}Gn'5͢lj +X?ՙ~ӤO{R8`jt,*`?e/Z(ܓ#ӏ}d @7ʢ ~7Te˿*1d/L0pfQYTVeIc/@ LnEeU6|Gn')@ĴF5.#O=u3L{+nj9>x|/{9}uu5,\xIr,l#۶:iGLxsω-61'٧LժU2$LLk@:Jn/ܕcmyq ۶ݚ};w~*rk]R$ogez4F8%{]tQ#G6FjSS]qKqK\}g˗>]]]===ǘL gbZ#Ie2 { /p}QٛdoS{ߏ};A?PjQC\??sʔMMu' #ύ_~^dit,*ۿNr3 _{mV{o߼8*N?Nnf*Դy{g;tttd2:/Td @7ʢev|3y&LtwiʔBCn(?miojoLNy n^{n_fnߟ}I޷.j?ُf+|)ڀ {͛CяL˗/׭[3tuuWc2} SkEeQ2;>=sg8ȡC^g s|M---]zyŋ?]WWS-}J蔷ٳ s4k{ϙ9s|J5ţF5==G&;3<;a9Ɲп{YLR%ӏ1>`H퀩5͢lwV8mr˨QMtquq'S-0FYT-"q;ݻص ohw:qZ#t&<ҥ77S_3P܅'o+ӏ;>@ĴFLUӗǏL gbZ#I=~?GOu$LLk@:ILj$Hu޼~@ÀLw#׉+`EeU6yL_Ӥ'0FYT-E#_bi۶:iG'cǜRK7oBSS]c,{´aqN5)lp)x2} SkEeQ2;^4Sww-Zt?sw{뭯kO ?z{>wE_57(SOs7͛7o]׿~ĉ' ۻ[{0{4lXSO}.<رcNߧ0^Z#,*ʖ xtw}<|xo;+8L;E-9w Wt#Fm@[{0{rܒ%l>ld @7ʢevG?͏~%?>M^79sJϜ2elSSI'Æ2XN\c/,*$+Oox>__ɏ#G6>ؔ}R{ڣhڼyZxL@31$+O͛p'Wv?}v&:Ӧ[r~9yw_jO?=6oݔ3d>пN|ϞgSWWY޻ŋ?8я6L3#ZZ[h~=}K\v~=zkƎ@mƝ{nx2}di_'Y=1/L gbZ#Ie;r^YTVeIei`H퀩5͢l}L0vfQYT̎e>H{R;`jt,*[f2dN\c/,*$M1d 'H4\rŊk ~LkAjoookkkmm]A…uuuUc2}p:;;CǸ~֕D(V(Y(\(_&),L?d 5 ^#4mmmkPPPP²^fQYT e2.vvvoJ % ),k3IO{R;`jt,*۫L&յ%AԞ%BBBB_XV~c/@jLnEeU6d/L0pfQYTVeIc/@ LnEeU6|Gn')]ti%/0?Zãm .~Qa с k 떼曽lEW ;*fF|޽MMM4;.~'HNΝgh~V]]]wVx~4..&;^:ujX'qƢK=ztM]z]l} [6[ҧ;T^U{g+?)Ӏx_<2OHr?;v1"{m{yƌ(zȑ#x≷~nSg}6;r.R-U^<]$^SS;dٳLz>}SkՋnI?22ߧ+аshɭ]w].Rqt+={\s59%K;a97xc- &OjժǧM$o!PƗ_uv~XuR3o,XpUWm߾=p@v /pYgeDے aKW?E :4,o^[jKERᘇ_'D/u0 aG?~|tm~apy͢Dl߿)˟&ťx2} 3т +a-ZԧTrCdW[]{?`H퀩5͢:d/L08#@7ʢR蔐Ǿx2} ,*JSBc/3Rt,:(N ߑc/3Rt,:(ٔHu"%d/L):(RB"%d/L):(Rwƾx2} ,*g6%Rǥx2} ,*JSBc/3Rt,:(N ~'H͢:%d/L08#@7ʢR/L08#@7ʢMTg(L):(RB"%d/L):(RB"%|Gn'H͢|fS"ՙ~y\'H͢:%d/L08#@7ʢR蔐Ǿx2} ,*JSBc/3Rt,tPmmm]]]l|O{yF5͢H>uvvf2g6R'x2}jm sBڧnǾx2}D[{{?tvv)8DėL?œ$ߺ0'tww߿!"d/L F@!ߑc/@"it,*Ju_4).œ^Z#,*ʪl:c_<>`Hဩ5͢ʦL?œ^Z#,*ʪl:c_<>`Hဩ5͢ʦȍ}d '*Z#,*ʪl::OFd5 c_<>@"iB2OHZ# GdivǾx2}kG6p ou:s>_1;rc_<>`H @7M" cfOR<>`Hဩ5͒M"{6~~ݽ~ٸǾx2} Sk%%QV[s?["e2xO{R8`jt2}ӡnhwX_/ӏwd @7KJ*+7L?L;/~b}ȍwd @7KJ*LȐ!xʘ#7~2'H$neKeU5vW맖L?œ$^kL@ÛcCSSsD}QgO /dԋ/~ +RM~B2Ox ?\~wǼڬ0gڴӖ.< ^r-gp߾y_|ՙ闟Lz5fC YcjO?}Ϟ%K.>c-g54Ԇ)ǰ}a &GZ)S0,[ߺ3?Xtᕔ葯|wҧLժUUfOR<>`Hဩ5͒ʖ}hΝ_FOkn>pQE#F=TWr\4s̘m5<91ڰ65-FGW[wy+ Uj-+o*yyD?Mhg~BF+*;Ta_箥O[[[|Ɏ٪"ӏ}d @7KJ*[*/5J # /p}Q7 K-+?裓ƒg&E3vƒOnk^ ER~zyع˹ _Ro1zH+f\nR/wwwV~')0FYRRپ^0?i#G6>ؔ}eoY̦/hɹw/37yQMUJZJW//w|qby0K-Tjjwuw&_j˳wd2>UEc/@ LnTw#LymӮcэڇ=>1gڴӢWeef˾vۊ&gW\~/g?+|׮.u@6Wگ](w=>:)y6>S>m/U~6 79L˗/׭[3tuuVߑc/@ nTTp%C]WW3sy1=sg>'<'Lf=mÆc_SsDKqK^skhml|_tcŋ?^^""jiyQMag#G'eoSj*܆xZJW.zyڄy͛Px-ʕW55ՅO?}y(* βe}ޝ׬YS&,=R'x2}DVv?xM&vuOSNz5GWw\Ǿx2}k~]]M--_92OxP47di;rc_<>`H @7M2}qTgOR<>`Hဩ5͒M2}q~')0FYRRYIo4#ӏ}d @7KJ*dI_d/L0pfI[eʕ+VXCVv G_|Gn') FYVT{߹Af o*Q<>@"i\O[[[W!frፗd|L?œ$wttmmmk of o*2OHZ# W& ;;;7!frXUd/L F@L&յ6 o oVߑc/@"it,*Ju_4).œ^Z#,*ʪl:c_<>`Hဩ5͢ʦL?œ^Z#,*ʪl:c_<~dmq Z#t,*;rc_<~' j`HyknEeU6R'xi׶k?>0 2/~xY}ya溦K|E'DǾx=إif/cq5jkkkoor =d/^2ˣ&d2LBٙd4 OE;rc ~mUOG޻qf0$U[[[h[^'aN;::BfQYTVe#ՙ~9.K_~9[6\ycqX Z/ s:;;CSdzzz",*ʪlzc_tf9wz#O~e#{+?Kj'aN&ٿCEeQYM~싗L?6 ;6 `hʢ*T2O߽{ҵ{q Oƨ,*;rc_<>`0~`4FeQYMTg(qRLo~s9.H?^LTҒ%K>'gyf)2vbkʐ}s@Z͘1Commm+ϝ;? tkmmmkkkooJERܴiCWVmv_VhʢAjmm]~M:;;3L*2 rlٲ#ԯmmlz_~-O`0,*{6mѱe˖n~d2PMN8 IDATUusw 'A0~QYT=H흝[ld2===2 5[fwy7+WnIwFcTEeRWWW&ٿLH7%K<ӎ{QYTVe)wRF>{QYT!8H2}L $2}$ӏC5L&SSbw!&du ~R]0,*Zb3wzInL&L?Zw;6]0,*ƔL?)3L 3c_瑮k+FcEeQ~7t&Ty76.ْ[lzW*ʢ"ӏnL&O~jMXл`4VYTߑPOC uΡ^.v0 <d*+FcEeQ٘ĚL03eȬәf)~6_M/Bo2^_f!՟-! b} &d&ӗe@MhѥL~=swcذJ0'M^{{?E |Smj;o0u巪a,RwTrL孭O>dGGGWWWOO.лQYT62$J.`dx-ug#G6>ؔ}^\uMNaGE#ϽN?RG>,6^/ӯLŊ+W\vM^~n]w0,*[mdMIẟw)~v:zvCC):6Ӧk,>LW_ϟg?l+_ndoy-RPj^^x+YT?2p&M:9{L[ꐖ:CM#{?}~{g;tttd2]w0,*[m|GnⒸÒ/\xСG̜9~;'?az/txfTTa1c9帥K/56Ȱa>Gj UQ>{QYTLLOƨ,*JOUTD^'FcTEe{;rOLOƨ,*jC$>Dpd >A3HdٳG3~ NdRÀ5jԧ>%#BeQY~&0oѣG H)C> ?>ٙHeQY#'uw\X?>Sh|H]|2}T>UQ>U+֙~BlOLлc!y흝[ld2===*ʢIOUTDO:A2\ڦM/=裏'<ַ|M>w0]6?Bѱe˖nEeQ;rOL%ӗgM0饗w_o?+8)ں~M6uvvf2EeQ4!`p~d,xٲeGF_}ߍ96*<{Ekjj~+y vؤKaGo>ȹ~UyG;vƍ,'{G^rE=Ѣ7|3/9tYd l+Vwuu92I S7x#L4)Ew~Wyq/g_x7ڵ ˯LѢEg?ٰwW]uUַ\7 j…ѶA̜93ׇǣG.s4>~|'0'?)%>Zt+V\rڵ6mڲe#Pd >1L׿uW_}5|$#Z.qL&Sdʯ+/Ͻя߮[P/r9Qj2}x3HdT39/3JU&/J~+;>--ҥds{̊-[vqDž'?8ѣ >@\Lwz^K^~d^oFB^zMKs:K227 >PjE^yag_r' Bz]ƇQ3}g}fQلIl_>A*"ӧj;ӟ4ioooSL ?~ ?yW.c s^}0_~o/|a!a 7/۰wO81L&+?,,?#{?ۜp wŋ=O0g̢ #ӧ**"ӧj;_lI'TSS3|(dϺ;1"JՋ-:c{MK+7n;vlxѣ^6&6,Ȼg}.]tܸqaN]]ĉ{d_6)e K22_nE]TO|a*z0{N 飲lZr?CP^G?ڏKq+8vd,*/2}"2}VJ2)Sdo_zĉa<@ŗzWqQYT6^|G.UiSg28q@G?guG>Xo񥸀hk#EeeCSL?5kV%0J|02}L2}8DCK/2}L?~|2o!ċLA"'>)HYL .|G.DO[ `4NYEeQٸHl_>A*"ӧʻ[ `4NYO3}g}fQلSSmL?{M)vllF$QYT6.dTEEdTyw[>MBI|@+81g=2}T >UQ>UMy~h>*ƅȥ*>2}2?_,QWWWww}^+8g=#EebCVo* ymE?["YO3}3Hdĺ雊fa-khw  g=2}3Hdĺk^O[{E߳>@\$2}b;/|PQuJ2?E>>ąeuw[%r?P;t]%+8g=#Ee"~jL*ne2^3[лcx֓LYYT6adTEEdTyw; YcjO?}Ϟw1lX[o}%zUjɒˆo9bHy^R윯}Cp nuҤ돪=rw옓}mꊾ|ÆƝfCW]]CT{~p%e[ͼL.FxQYT6.dTEEdTyw; מ[_ Sx0oބ0sΜ\Uw~\+,_`w޽wf^{W,'Zrm=hvOW񷵵+8^g=2}T >UQ>UHkYÇ7򥖖Wsmj/Wr# -܌ܗmCOW s]+R/de>w[]!*DK.mޢkj*>w0Gʢq;rOL*n$}BmW^Gg8ʄEU\ y{ /p}Qy+|Ϝ2elSSI'ÆnFQ=*m}:էL孭O>dGGGWWWOOw0WYEeQٸ08dĺ=Dii\02<闺N? y{lʾ}ozy4})zq}rJ`/mof%~/@$8H>DOہ~[n>x嗏)P}mecǜ>g?ُf?+_nd?t M;-zUJ<<&7O *ܶZ]Cw?sckKm[[YT|x:::2U~# >DO.dУjfwѓ|0sif:,*'T[EdTyw+F0g̢ #ӧ**"ӧʻ[I]>飲l\VozWqzd,*2}"2}dh>*ƅȥ*>2}dhߑʢq1!`puw+7HYO3}3Hdĺ-LyL؝B L ]o}C5!u# >DOJ2foQzdq;r$2}bVwvvw++yOx7CxK7F&QF*?,*'T[EdTyw[IZގM{Ol- ofoQF*?Ip,*02}"2}$d2looߔVs76oo%@ `4>*ƅLL*n+{zz3LWWזZR{፱~%@ `4>*ƅLL*n+1?=飲l\\(ӧʻ[ `4NYEeQٸ08dĺ>Ip02}LXw2} g=2}3Hdĺ>ąLA"'ݭLHYL .|G.DO[ `4NYEeQٸHl_>A*"ӧʻ[ `4NYO3}g}fQلSSݭL `cȐնa8zqsoɳ>;r ;էWNw` j٢g=2}TM˭TCEdTyw+7?+=HV?{1P;*TOo ~}L m۶Xa[nmnnM7[x#GeQشjO5TDOw2}c/@,(K_οwW_jkk'L~߆4&ΗFlO/Y~uuu8:~8|?Μy+"L ܼhѢs=C nvP>|azzz'9UaN B 6nx駇G8 Yt+BTk+,A{衇ZZZ:^hndXd ZHر#.^쏋/[e߾}YDö >ԛ3ǝ;wOFYO3}CO[>hsӿ:물yԔr^qoN;w 3g̘>ea ]wݞ={L<9z7k&:o3gN.=Ae~-^ϝ;7wEEB6lXݵkW47p+BTk+)Aͻޅ_-^s-4z}#U~J>Z;L?wΛ7/7~?}?-T>ݻ)=g=2}شCO[>hs샼{E1of;y4} zȑ#ػwoSSS 6le7ɽnOnOE-]4;^ oRR6/:̏އFq(u`j{k}c/_c.!( 2JzdiLXw2})ڹsg>7 ؤ򯭤n^\y_q?'ѣscX+Q\(̙Sٳg^vT]{fszdhYl铊ޭL_HI>{}Ǻm۶5ʷJ7b7 }z"쨖قg=2},Zjڞ4Lnej/@{<̚Qœax5 ۻw~{K ` fy כM8eѲ3rIoLnej/@΍:gOLcL%K455Ϙ1#zo IۧKFq]W_LEVAv!ӧ{2}*,0ن >yIty.j@d8>"ӧ{2}52 Cu#2}*wL4neTk]:g&8bK5Pg=2}j!'-]%>ip(/~w?c/>@\UޭL_pר=_]kCWlxF.Z-[-2?g$'ͽ[ PQ< xׇvkg'Ùghٌ铖L4nej/@Fyʁ=[$@?V㌝Ѳhj!'-]%>i^ w@5YL-2}UޭL_pHWGEV %-]%>i^ wdqz<#-RUޭL wdm{onոOg=2FOZN\ew+]#y ر-g&qg=2}j!'-'2}ܻPᮑ< 4 |-tQzdBOZN\ew+]#y W{Y(OvUe?Uܳ>@\r*'ͽ[ PQkkk[[[{{}p$Xw}e0Ksz<#-̤UO{2}`0 ;2ܳ gSe3FO*ZDO{2}(imm]jg_”oڴi֭vt+k.rCwOѵDU<-M!%2}R޻髽'@ٱw]U,Z-B*COUneP--G#?QB2}*DOUne0OTeLtdw+x* d$'ͽ[> U>)$'ͽ[ ~ U,Z-B̤$'ͽ[ ~ U,Z-B2}UޭL_P?Lq-M!>i*IsVDU8E˦Ltdw+W{Ox*wEˢeS3rIKWIO{2}@,cРJ4c*i$'ͽ[>ѹ_9n۵kW-[r- _|ի|F*p2?lٞ~Pi{G܉'O^lT'2}إ HIWIO{2}s07n\3gN<9q۶mK..%޼yiʵ 2g۞{~#YZUV%)8Q.]@JJ2}ܻP?Lkkk{N߷o_]]]4>cƌ%K￿q/l߾}a-'N55[… 6ݿ4;&ZZZ nՠ*2$,aڴi{Ms!СC”0%]=wf^nرc{G;vHÇ깵%~{V7ȬY>Fa VSSS{;w=4UL4vFⵇnU4iRDakΜ9 غ .fKKK0>w (3==СC^]O[[Wo} .j޼yaxӧ(eUz*wEˢeS(~vdw+W{~^8ӣ{ >سU#Soߞwfk\xqe*eV7HHSSS<ݻ-D~gѲh铖L4nej/@U8Lܹs^ /#uuu-:e[[W\Q__-&ore M(^*om+n!zpw̙۞xW_:ujh3dȐ]g_xᅡ+xСiӦ9=zRkG'M_lvn7xctz+~A֕q-]k-qgi{z0a˗/" oE)ߴ/Osߞ{3ye˖)qU}*wEˢeS3rIKWIO{2} U@wo~?Jd~˖-'O \ڼy_׿nݺSO=5yɒ%G~G}4w!---C 뮻a$X[[^ 3۪Gy$,!w5jTsܸqTl] YtiX/\a:+lرc}655m۶?T-ߊRi=3ڿ9s>6{xz{~r {;bGeѲ)4. %]%>iH΍lBiǺq2 K̟??9oE)7)慽W'2} HIWIO{2}ׁ=[yfiC) r"ӧߊ;_U>i$'ͽ[>s/߯i^ w7YY{O^"p5;OAˢeSHOZJ2}ܻ髽GS ~b}Pj,wE˦Ltdw+W{*5*7]fرnjɳ>Z-[-<#tdw+W{*5JStٺxվ޷'g3rѲhj1. %]%>iphHqzz2dLtdw+]#y j\>@铖L4neTk$OP+|#2}UޭL w)mj|#KZN\ew+W{*5юum>{„7B5.vhYll|`&'2}ܻ髽ʫCu ÚSn~eف=qYO3})@Ѳ#'-'2}ܻ髽nǏҕ\TymQ5=eѲBOZN\ew+W{*5jmmmkkkoo( W&[ehYlu铖W>i^L`0X~͛;::YL-K*~ew+W{UV_)ׯߴi֭[;;;"20aձR5TYK y#2}*DOUneP-G"GP-<# Sս[ ~>\,ZZd6ONI[IyVqz2;EfLTLnej/ gGEV >h>)^@5YL-2}R"2}R޻髽'j>Z-[-<#T6IyVqz<#-*COUne@z2dL Sս[>@{*vkƳ>@S3Wew+| U7{?XwzdBO̓_ޭL=xf;A>@5gR3Wew+W{U<{e;;;8+ ͦ3rѲhjL?9A&m-"'[ Tǫ7'LoܺuXЛ gS΢e3FO*ZDO{2}|=Wuז}c W/'fGEV >h>)^D>^DZ Wo[>7>Z-[-dEdw+W{x20o٪8eѲ3rIoLnej/P@K&_.f3rѲhj1.bgɯIsVPC%Ο,f뻷>@z2dLʝ'*'ͽ[>@{2*6>@:zdBO̓_ޭL=yKUMML>v޹0r74Ԇ!]=lXCM'ksrXȑK^7owڭY_c;-o=WQocO [ryy啙K%C{gsOo3)aFSfӎF*HYLZܙy2}ܻTP0˾3>[a#]M;n:=~WŲC? #;v̙=‚}ЍoyaӧM~,Þ޹acZZ?M/-9s\9sd+{_l;e<T ȥrgɯIsV9̲~{V46!#xzS;w~47h;ětw7x} /Bؒc#ۍua7SNz睯N+{O,__ܴiS{{{WWo;7EˢeEf3Lnej/Pz, +̜2etccg|jک߸/OFwډoMSb_pG8gޏP_g5g87yxЬiѯNv@o6%g=EfLTLnej/Pzҧs^ׇnpGؼGSÿn_S\i*79Fw﹜.F¿_$|3e˖=x6mͦGEV >h>)^GO?Do羽v̚OݽgÆ; 9az|;8'^xl\0G}_ώg+azsӢ[W;s{l5לO)6WXO<=Ovv@o6%g=2},ZZIEIyV' 2为3wo4}޹g_&a֬ y…f(Y?̤ѣ?][{̘1'Ƿɛ?L?唓jj>|f{啙h%K}(͗>g~xȑaO챉a#7l#g>ڙʕ+=ЛMYL-K*~ew+W{*|[C>7Eˢe '*'ͽ[>@{2}>@S3Wew+|ϡĘ8%7q_ >@gR3Wew+W{_ͦ3rѲhjL?9A&m-"'[ TN?{Aͦ'Ùhٌ铊ޭL_*_Od2}JѲhj!'-"'[ T ͻnc-:N{lbs󉵵ǜwg^yef4wvg2LSw옓<}N> K 9.tgsOڵSnj993rdҥEŶ,y|KUa{h>)^g"]vu}78ZZ?M?7&?/<}<7|ƞ=|n8~/ta3͞}aa0QС?ɟ;o nIYl32M;wEcvXc܌mHPt>?p12}*wfL4ne9~~GSÿ&ۘ^ӏ uK]_p3JN?wzu2}>T>;3O~UO{2}Oǎ9y/Lr\|/;8$=,[ܳ0L2:Ӣ7ߗ?~nx9J7l#lO~Yl3zO?ڤǷ65~oohݾ2}{#2}*wfL4ne9 νvc-:9_v)TS/$}޹3f[[{̐!-Xpe}~xȑaQ3;sx~yOyƌ9{6#o aKՄm [ٳ/ 0k XO Gj}>@zd3rܙy2}ܻ髽@{G>0 ֭w؏pxCn#f3|hYll ޭL_*_O*8ϝ{ў=e9p<,Z6cdEdw+W{ד'΋_t|}'Owܪjdz}#GˢeLTLnej/PzRuAVYL-2}R"2}R޻髽@MV4ѲhjQO{2}|!%9L_Eˢe '*'ͽ[>@{)IӐ6eK_yȺ1;t֓L cdTi*s-ӿ'}޽s_yH>@zdBO̓_ޭL=yکcƜ\WW3rdҥ&67X[{y}WfF{74Ԇ!crUSauӧRlQ=sbo߽aaa \szn^U(KwKw}a2},Lʝ'*'ͽ[>@{ С?ɟ;̞}a9o;}_KUgs^tMpgKN?e޼L&.H@][֮z%$YLZxF.;3O~UO{2}|!!nn>qѢ}wvB_{Ea ͬh<45_J>-*7/IÇR2b>=Pp-SN9)_f3rѲhjL?9A&m-"'[ T$Dۯ2sʔэugk㤻{KVR>Nުdܥ7fÆ;FlܸøALЛMYO3})~gѲ#'-"'[ T //%3^\TM'칧/#wiύy[O84Ѳhj!'-"'[ T$ӧn aCWz߼~Ϟ{nn ۷=~a0Oآ.`#|;~ٽfr6تdܥy]#w^/fS~#GˢeLTLnej/PzS?̤ѣ?][{̘1';{Ν=º0̚u}F.05 \9dqa3o)W^yyDsK\k>KXEXQmۼb>=PpwP2}@o6Kg=2},ZZxF.mޭL_*s8 dlz<#-*vfL4ne9HeT'Ù>@ܙy2}ܻT Pɳ>@S3Wew+|A.g=2}j!ӧrgɯIsVP\@%zd3rܙy2}ܻ髽@{p>l%z<#-d"2}R޻髽@\7[ɳ gS΢e3FO*ZDO{2}|=1drz=eѲBO*ZDO{2}/ׯ_jSO=O̰+V2}@o6g=2},ZZIEIyVuOfڢy<9 ݗЛMYL-K*~ew+W{yׯ^zOeДAC%fSrhYldP2}w+/[nݴi[[[W )Cf K g#ӧBdTuV_:::nݺy涶dBhРYCvuuGP-dTLKWWWgggGGG{{V2!4ehЬq8K>@S!2}w+}#KޭL_P?TlxF.Z-[-2''ȤEdw{I'MS~%d;ӗIв2}mLdL-k{ddAZ~;=묳F}LGv W3fѲhYGO._Iv Wpz¹O{{-Wg)6o޼~իWnSv WpwYO8eѲ2}d2}֭[7m:%p}:;;nky涶tN8 >]]]deTЩ tN8 >p G/8|Ka P{O-EUd"2}P{OTc,Z-+>h>'1Z-LTL^-EJO*ZDj/ jEˢe{͞={$eu |C6l}uƍ7đc@5F˦ǢEhY0rAP;/ @?2D_QF}_<餓d p!T(&L ӇL63B=i2}Lkkkۼy֭[;::v[Xf7˾ܻ <@PѲ)esNkk7mڴuN-vW $o˵4>^-[.QYg-_|ׯ߼ysGGEV;~aG%KN;Çsg˿#FֆxZjjjN^JLcF 1zu֕=Rlu>HXBXŋjX>aþ~R$̂X{? o+^xᅼ]3YfqP?PѲ(=zO>|ֶv-v2;ӟ4i{q뭷//Ï8z]wݵk׮S^33g^:5-,1oii _B~x㍥WWa+_ݻz(zia$a'3 ~ 6lXq͚5a^ 0 Zd8hH|'|򩧞ZjnݪeѲN_agoF-[~|o0侱8ZWWU?Ԕy )QF_=1jsssvʍѣ7FvB;/^LX_[[ӟt)N{{,]-{$dh٬;ϛR[[[b/%/奄(}j7^JExg-?~|t~/>'1Z\2;;` *u=[ ^ZL?N{>e%^_lyԿk]_O闾3/OX~{nO37|3 nj#re>ӇK_agz{ooSL ?_u}~{ힷ]lٲغ|ջw?>eŶg4_pBz>հ~;w K~7vtt&Oyx~N~F_5L1bDMv238# ;;;dU2;_dgQSS3lذ8.{Ç3ₙoKK '[na-F kS_l sLX~͇^ yGYŋnj]wuk7M}cB^dLO?mavaʑoV'>dg&O0eʔzuׅe>%JѣG= ~e3rѲYLŠG9.? ¢;px?*e ;<'@@5F˖K3}~g,~aQ_ףi&q@@5F&eJ_XC5=qP?PѲeJ_ظqɔAԩSO>g*A{{. jL-UT/T1hr\lV LdL,~RՓ闑LbdU24`8Îum~#$Ӈ'UL {IWvw82}*Mz2}C3-g^-[.ͪfwo/ ÿ%?looe3rag=G*p3|8@@5F˖gej]PRr~ 9U2X[75| ˛wfX6zyI ~{񫿻 X(Y3}dIUOL? wqM\ʻ>?ZE O,~iکaJf뻷>P>dL?;--W55_WW3}ؽ{sС|]aJ!L}עEWPSA9zq߾{b,2w|!C /~aq=faȑK^+ok?mjFa <3)^׻>?=cO=>+3?~8ON/tֻ>]6arUdC[[XLJTʒ||0y. ̽{+_ ItX`9x 7tΝؿ̙fϾ0>th}ts=]ܹEū?61f[fͺ D|>&opu?L|L3ad;?T÷UJYIU,oϊ?lXCyͯ67SNz睯korO]~<]]nlz-wg'kh7[ݷbO^Wtt}m1۟;lx%Q?^˒%D)|Z=_lX ^-[.ͪf+?WL?w8=a$;cƹJH9Ew +̜2etccg|jکGx0zc-:a/.lƍwFo N;r1b?oFO,__ܴiS{{{WW*Fj/ j-g~gX2wӏ2.ҿ_vps$.̖:|(1/{ ]wjt^r,? (e_ם'1Z'>Z6dIU\߳0ӏsmz챉\sJۣv'O?ZM7'|˧EyͯN>6Flpǐ!\~[G Vf!Rp X_l{g5[_Aq'x"ϦM:::T1wOTclO2}lV^Y2 2为3E7&npG…,GVV;O93adOnj997NVwٳ/ 3a֬ r?³~' .?$\_p{O=uccc]wd+Vx'~{?i{{*. jI*Mz}pgZuPLʕVOk/ j-Ef 1cN|L? uu5'.g)$Ӈ'U=AT#>dL?U8ӯ'f3s;e@dUTdG3%'@@5F˖ge*A㏼Ss>KP?PѲLKTd2}>81ZѲY%Oz}ykzf}'kkSwď=60>+3;}غ,X0|; r\Xg=ĵksr8rdҥNjji*lX62^EªsGo /Yr0 ZߊgS»”wlb_ eN*~RkC7n͛wcL޹ᥖ>Ʊ7 C){]wݩ&Ν{M71+_y?>»¢͜9n C'FvO ݶRVݧϘ;ư፷zf71a,y8)wڜ9`{=ݞVlQ{P?TclѲYIUH}ƙr|mx^[{L4>| o=+of<᥼Æ5HSvFnuu}.on>qѢ}wv^\maO1wÊykd gN{ͯSN65~WV+e1Z<#-U삄L;/?&M^3NHK {啙Snl;O];OuսN/)qۜwgo #3ME|L?51bOٿ^Ouo̽X>!ba5 iե\_%'#NN F~=?uL(>dL?52wqNu|0t  ̽3{|荹7O~x—|ZW͇(ްrAm[=bsgFo2etP}K?Kb;- c攞_%KY%Oz}׮z)'|ŋ5޷ޙ33dq \Ys^P;x]MٻwՄa֬ rJ_O91l6=̤hb=a3ƌ99;a†UWU3΍X9 i،- gڢN cM ܰᎂ;g%,JL3rޑ<#ڇV~غ#Y{l@5F˦ge*A㏼w;={ٵ뛹7pP?PѲLKTr_WW۰x5MMr{έ"q@5FVL-U273}LwO-[dh٬'U=A'jF2}lVyFnRՓdP ~e3rѲY5.Hz2}LFa'U=AT#>dL?%gC>P >dL?%dP CM 1Ǖ$X_%#1+>dg&UR2˗?IEH~oo,YcW ߷ ߽vU,^-[.ͪf+?WJyPW^|(C=Ҳd*Y[օ^GG*Fj/ j-g~gX2WJJMBMlmm]Eq}u,X裏W7-|·.|:;;U1wOTclO2}lV^)~GGG7onkk[OqS_}衇-Zl2*Y[օ^WW*. jI*~R+%hooJqj/H~uK/W7-|·.|8 ~e{e3r^)>em z:'1Z\<#-U삄'/#>@d>ӇKTde$>dL?HP12}*~RՓ闑LbdUTde$ӇslO-ͪf rUO_ ~e(YL?+"2}P{OTclѲY%Oz2L^3#'Tr[Wy7ࡇ:uj:?TU  +1Z\dh٬'U=~ZDj/ QC}wRF*m۶555RUclYѲYIUO_F>g:W__UW=vmv߲e˖[n/^zoCH%jz[ш ț駟M=_\Zɓ'/[LUԛE˖gej]Pde$8'?qwuu͜9sя۶mknn^tit]͛MVmׇ-8msϥCyW8E }g0pvABՓ闑Lw0.9}߾}uuu3,YRʒ/^8}ɓ'ׇL81LL3o! .ljj vc^jii/Urīw;dȐiӦݻ7[WυCgS»”wܙ}ڞ{xݺucǎ 5j?^#777ر# uχY>Fořgo+0OvҥKN_?W_=/gddذa555yعsgN ( >do$T=~z>:zϛfYm1<ĉw}G9眶Cu3f[W2[nca$^{ɝVE|I&Of뮻̙񋭫Bo s݀;Os:^ #v[< nmy’h#ر>'K.V;mڴ1ựpɗwVEO>=F)CL2 LdGswߍG#yy>|xgF:4xu% ~)6 6,ޤUp"#F߾o߾_p][o^d}ꩧn߾=w >mO=ָx?-Upk ^]ijj #k֬#-n-L65p>|1JrNܪhݻw[Z2}\dnV?g䦍Lϱ';wk_xxz.%^_0lkk+%\L?wz V퐼mr 7DΜ9rxSN yg[F*G8x#ȸq^z/| }w0=aQGLN]-614{͢e3rѲYL?9A.+"2}P{2t0>|xtmrsƌw?/Uk֬oϼOiMM͡C{unU<[R>~u\/_=oҷ'aUǻ3rC;U*wkûrW_{?/0]n 7Ppoa_:[LŊ7-g~gX2'ӯXAP?йø;~Qs۶ma([z.-[]r>dȐ]g_xᅋ-:tPXڴi=&M/U ;wW7o1[ovbJXȸq.]zז3==p?_~ ~˗OUpkG;/\OGxSO}G ~a ɞ[v9ɓ-[*͢eB*~RՓWEd;й睿1߹/J޲eɓkjj.*=6o޼[SO jnn^df-G]HKKː!CA ?ֆ ŶG K]G7n\=[WB.]V/3==p [8vn6yMMM۶mUpk?gΜ͞=;Va?0z{r|od{{ϯbZ6~0ȭF>g{<̚QB) o?fouw4y Go-[^l'.Hz22΍:gO.ӱv8p>~%κBj{|'P3}dIUO_F2}?0a'/._u*LJTde$ `a@[2}~RՓ5U@(~5Nuč6LXqŮ2g~CVyFnRՓWj/QWOKtb}Y{6EfUf3#,2Pe(񫭭mmmєXcRlVe>;X2'ӯUj/k xׇvkf)q6cdh٬'U=* ®с=[$,e?fL-U2'w@wtggL-UTd@%k/,h٬d$T=*8Vg0`L@gLJTdF8[dU2'w@5j$Ӈܤ'w@*Y{ttfq-EfUf3]bՓ; ,Lj<`LpR(tqCM:7#l?9@otgF*~RՓ;YOwwСCy~ߒm۶555(7K i#GfL?P],uz1?U#hO?mݖ~ܸq۹sg 7P'ĮCųM6-Ӈ %&lɓ-[fAJ*Mz2}T@hwdn{z.СCw}7e˖aÆ-][owΉ]SS0[n>cƌ6>|7r"(3rѲY&U=*@FytcYpaSSS]]݌3⤸;&ZZZo>yډ'ڵ_~W~wȐ!a ӦMۻwoҩEm#fjX/^dL?PR5J{;Æ Ƈ;`n~С8ΛaĈEO-lC.Ft jnn^xq%oLh~,^3y_7o^X~k_{/}Ka|…Fx|2e"R!dIUO QB;XvWǏ{-6x§mCۖW_:ujccgnݺo,*F|ݻw#]]]ݻ7~_?~C:c7;6zGx0MĽwDCVyFnRՓ;Fvjjj;|qR|1b͚5 bE/Ԣus{{;_sKhdUvmE6.~?3gΌf^x^Xp[l7nܾ}r'X3rAoD)<#-UwUO T(v.E:th׮]ӦM˻ޏ~4}ҤIC wvn7x[oL8H܆hzd _KhdU,_<|Orne˖Ç? 3co/^:?0,?z;x3u{o1!lIYD dh٬'U=* P]>ݸhZ úϟ;ބmhiizT8[dh٬'U=* P]#>,G8[dh٬ܤ'w@*Y{tkTWWW+PlzqJyF.Z6\JTdF8[2À%Oz2}T~AdLJTdk$q6dU2'w@h]#> Ӈܤ'w@*Y{t5z8fge*P^3 ]#Pq͆g~gUKTd.@)l6ѲY%Oz2}Tq`us@qq6dh٬'U=* 0кF988 2}lVyFnRՓ;F%vty_ :Л\lV L U]΍l0Îummld>ӇKTdiܳg^8+g&U=*P kڿfAdge*P^߻FT?{U+k.rU^ k6{zqVw-[dIUO 8ohEO^"Лl>Z6dIUO 8ktsϯ㣫?uutOf)q6dh٬'U=* ?dǺ6@og3F*Mz2}T@רĮс=[$,e?fgej]PdF8[2À%Oz2}T]#pF2}*~RՓ;l5CVL@Rh{onbdUTdN\J^]#]#YJc][8h>{„7q'EfUf3]bՓ;q*Y{L]#YCÅa)7Ѳ@X3}%Oz2}'.LV^֞r/o?:Fdh٬'U=Wq_F?{w$U} 8qv'YvHī1_ѰS!Gsk()ᡬEL3{[ !I \s'RSsa/ǜm{5MS]ӧۯϯs8sO *ޒq}飲Jd*`\5L777777Su۸qm/ +N 0mذ!| H1Cі-[碃E>2;U7ީkˮ]wM[_~#-U삄'E2}:;;7_m۶h===v$_\o/%ø%Oz2Q$h;/ٿ{zz=j@?DG'g\@TLQ/yBuC'u=(~sw]LJMz2Q$Ӈ1*[$F.*[J6ONz2UDz/ nʎqKdL^E2}TTL` Hʖ*~Rד"2}{OtcTvQR7rL`GL^E~#-U삄'E2})L-~Rד">0qthC'u=~O~[ cqso%uƂLJL?Gk|Z? H22 >>*d5>Qo-%a@햑w|7ʿ۽{=`|EeGEeKUf r]O_ACb Wk\QP4ʎqKdL^`<)[L?~k?xƿ~N4ʎ>*[dI]O_ACbO^ac߈fQQ!GeKL? V>xhRp 2FL?~WOh飲o&u=~F>x0 З'gr>,*;:F.*[& ]O?_@8tӏi]̙~-KXߵ`dJ>ӇqKd5>Qo-Pd2L?)aC'u=~O~[ !r[M9eM]_X IDATp 0埭*+;cBs~.PWZymSe,c݁xkmQ]]1qYa?̆sB[$LG<XXz{O=ׯ9; \U(Xa> LJL?Gk|Z? dwZŋ뮏x/~8N8?BZpY͛5,z޼!~ݬH='nmJwWy(޴{$ mk骫dЕ:}&왰+./ǯ.L2-^hxs~*,_zXMo&u=~O~[ @"h]PWW p7JO|pݻwaזxijg]DmҤ[3]Ӈ]gϗs㵪ҞW&}WB2^xa͚5?lٲ{,*?ʖdØf/˃:fQ|u̎[2'/XEdE <7]xxOk \qyYGÇ?8p*[dI]O_ACȚ?ԧkjή(kivC ^Fۂg$˖}&L5~Y]g^vه dL#畕p3&ۺ+>MrNuŋ*8?Z\Ӈ]|yXz{a_gɒ?:Sa%a=Ouelx*,_v|EeMʖ*d;e@Wwm/~umLi!ʖ vABדG}U0 >82}(~%ø%Oz2(sۊ2o+VܒJSYy.dPdI]O?_%a@ &C'u=~O~[ 9u |գa,N#$ӇR7rLt'?^? Hge>ͪll*L?9Aγ V>xhYޠ\~yjkJ?Y>oãvmS0wJp<ٽ^uuU<[z;* iVeK^gY%Oz2UDz/0ZDB>z]*uo/FWQWW3ZmժυG sEɎyGb ySyVLYdl'u=~*"-"wɒ͛ 9p7zך}3¿0~Æ{>%FH.Ce_j2 >;3[OlmQ]]1qY˗6_[Ssv۽^V VݔJ 5" |Kӫ- auuUa3'I؟YhuAvΑ#نE<ԧeOlQR%Oz2UDz/0ZD>;|mijᚚx I>(C瞋Z[gᮮEa|tYh7UV_pc{$,+̶xn=n9Pp{cad__kK˴xn[}m 0?wksn<xY灄|<~(preYwNaeOlQR7rL`GL^`<|,gDgrG~|frnki` cxpn޽ s+0i_;篢@ x0h8 3 9_/na;'}2}YTF.*[& ]O?_U J@>~wW< =|џϯO?wÿa>nÔr[sEa\G40gXYnuaOub:^S>[2'ݯɏzk0Oy]O?]u՟<͗G/_ُ.TO_%oq_|t ΑO?9-:](J>;ypjTLt'?J@B/9GKϔ4郫V}5L]gjk+6#G~[YyFoWSsv~t=E_;=_aUq-Ç kנce_UUoW3[4u~P{ɠ;'O40>LJL?Gk|Z? $\+>:#$omJx4>uuہ]k~ޅa87uwѬ/Ϻ?OUU'%Kn<겲3.<uz55gjivC ^mge Odnm  pʕ sl EdPFnRדG zF=o2}YTvF.*[J6ONz2UDz/0ZZ/|Ee 3}Ǭcvܒ'u=~*"-BN**>,*;dl'u=~*"-BN&|Ee Fʖ*~Rד"2}{"\M4L-U~#7 v4AÇLi-ʖ vABדG}U00*dg22}( %ø%Oz2(ŜO2}(12}(U2'ݯɏzk08I]/M*0fݟ}vfCùg^qŇnmI~k5]vه**?zժe]L_GJd5>Qo-P=/~kcp ݬOKp[MW]'ϭ-\xe0NTܤ'ݯɏzk(fuuUh8 Rd}J|~g&??wBޑ4EeKUf r]O_AC4Cϸ<%snm瞋+.^{I/iVeljqKdL^`<|zw}aLY'dl'u=~*"-b`n[UUoW⻭3wur]O?\ϝ7ojtM7޸˕|U#GeKL? V>xhse>SQQ'齽.2  >~Cg˳.ϼXn4t"u>,*{飲o&u=~F>x0 v>͢7rQR5.Hz2jPn2}(aܒ'u=~O~[ 9L FJd5>Qo-Pn2}(>*~RדG@S`dPFnRדG 9Li-ʖ͢Sc1;nL` 6Li >*[dI]O_AƏr-W^>͢&GeKL? V>ٹaÆ5kּ,o}N%T &lv|Ee'GeKMz22}{c۶mMk֬6'_N*HK%`fYT6~#-U삄'E2} l7l‹$T &l<|L?GL߿?o۶s#$ /R /q@dPdI]O?d@߽{NH^$^0esQOJdHP02}(U~#7GLcƨll*L?9Aγwy72J-@7FeGssL鏻wvvnذa͚5/2^O?%K+8t;@ЍQbf͚ 6tvv޽[(¶m6nܸ~p +_yV߽N?tcTYf7nܶmLܹs˖-mذ4կ~?l_ 6lܸq˖-;wEQ>'1*ʢS ,*;(~uutzEd}@@7FeQYTvP~#7s#n ֝nݣQƨ,*i]NO1X_0B2/s v}Go$鿏L?L?'~_ܹsg>L`d#?߽6}'7s2}5ӟ0a(?k~ a<;6'nʢ*L?9ANxVB_p4* a<^@ЍQYT͓L?YtXy]O/1*ʢ#$|L/1*ʢ#$|Vw~w?ƤRDckkjή(KZ▆sϼڟU>^qŇ?X#͛750,⩧>&˗pwϞ/VV~ ۦtu-e̶w4<@O3 ukKL?ٙ5֗]OEeQAhѿ{OǏg=wL[ʬ.޷+?ͩ9wO3:/8|w?a|6͛3 O7oj㻋_V53x{$lBU[MW]'8Æ Yc}>~@ЍQYTStd_64?gϗ@}ķ^ꊬ3ĺhkլzĉg%ga{Rs sgHի2}O>~}s|67_?.*Ns8<~?SYEK|֬Y`˖-wm$OoOԇo|хA3~{AP>,{߻5 @m_ 2y/~noo߸q/˃6y'L?.<3o#%Kn㎏Fo~EG鷶/<|p瞋r55g_]]Ѫ}2peykaw<_z /]g˖-m$3+*x),θ/yۻ3kj~OHk5}#64b-˖}&d.\xex(,#b+ /Һu~޽;޷{^ƨ,*d39YCM v۹K_]gk׮MΝ;Ls@ЍQYT͓L?YŖ/^|8@Erd]UYTvg>R+nIΩyS{{'1*ʪ%|V2}. nʢ*;nWLCaL8@7FeQYT6Ot2}>@ђ鿏L_Pd#ӗ-d_{>TQQvիV}.kt~uFuuĉg-__55g9{%?@)5ӯ-\xerc)_=r #X2-s/x睯[qY˗6y%7~u嗧RG<2-,짞t:L8-z/'nʢl&钯&8d#ӗ-eEK>2}>@#ӗ1*ʢy*L?9ANx\^ƨ,*I,L ƨ,*'~2}. ,*[drMw?tcTEeLjnƍ_|ŵ L-cƨ,*ʎ vAV6lXf͋ Kuaݸ{hFHݶm6nܸ~5k|! ;-캰nܿKe#$nΝ[lٸq ® ;0ƃFT0B2߿sm۶uvvndN .{zz]*!]OO߿{ QiaׅvѣG]*Ӈ1*ʢcd3bL^EeQA)A?ЍQYTLL^EeQA)A?ЍQYTȥ(F>'1*ʢ`P2}S 2}S 2}S şOpȥ@NL-_){OtcTVeQS_rLUd3|H'1*liSl3A?ЍUEeO_2}" -[J***a`U'x|GGԩSē'O~9Ēkď1a1>hMMMXܹsy\K/OViʔ)|͌Em sjkkK_+VՕkll 9snf^#GTWW'3?qmڴUVzKyQ[oײOg766FԔ+f?dpzQ #jjj***ϟ[hQI .Ƿ_|SNOɒ3U7mڔ>ҥK'O\VV6mڴ||7V>lo\ 5eʔ +WfW2}8\ d?~ڵk^9'1*ʪ,MKd"2Xkkkooロ~{OtcTUYN)$d&ƨ,*DdEE£@,*[F.Eq4u {OtcEeQLLLȥ@d0'nʢ*L_J{zUDz/ nʢdEE ]}߫^EeUvј's Kݶk˯ƨ,*L (A3fu݉No?c蟐 >2hjo ?>yR{g+>G9nN)7y-vF.9z𯗾όNWC{OtcTUqd3}WH?*20ӏ^0AWGz/ nʢ*;)$d\~w'A?ЍQYTVe->EQA3}@?ЍQYTVeS ,*;(KQ2}{OtcTEe5.0d#$ӧ@d#$ӧ@d#$ӧ@d#7r)>z/ ,*;J6ON) ӟ0pM6'/'ljj%KH{OtcTUY;!LHid"f̘8a={DwwڕJH^EeU2}"%v~EEE/Ҝ9s466^@@7FeQY%LHe.kΜ9֭Kޞ ʒoRGcBwU˧O}xeRTEEEsss__'fa><+<-W.[[[ӓ>&<%2!M1u0ɓ'?Sjwuu϶;:A?ЍQYTVeI0.0"ӟ;wC?lٲ:?{잓#<2Yg '666&kwŋs-6ڨ,Z(Y^^>p^dXd9ǏS0~޽={>1 Ô틆@B[oS?eʔY\bEY2}`dXdO7ìnׇoKKK¬oT]]}vttD#'O򇻮$S ʎ? 'k9$g6m@7Y̜9s߷^yoF.2L+ ?~sG^O? Ô3`ڴiV[C}gy&Lba___KKKyyyxnCKާ[jUXĦMC)>\ D ƨ,*ʎWEdƨ,*JOQTDz/ nʢdEEdƨ,*JOQTdL{_{OtcTUqoRGcBp6&)@?ЍQYTVedz v1L;~pOպ >p?}zu>lֽq H)A3f4ܾ}׍uB&dȠ)PF.y?lFuwnf{^EeU%;Hэgb}{OtcTUY"2}"~4?ϬF«!'1*ʪ8'ӧ(*20ӏu%A?ЍQYTVe3>EQL?r+ڏ$'1*ʪ7r)qL{OtcTUY&L`dL`dL`dL`F."Ӈ@?tcTEeTf 2V>'1*ʢS ,*;(>EQH ~î'|)K,{|vЍEeQ'ӧ(*"-hQYںaϧvϞ=]vR0R@7FeQYT\hL'AeeM7obM?gΜO߱cٳg̘~19ZNQʜrKS X^zpill\z +nʢls dH~4رcK.6m{/?iiiillڵaժUY۶m;w-2SŰ̙nݺ1)?EHO #ӏbMȑhyʕ#p3\*Ljժhl߾=a[\hV+̧YbRՕaNPd'Oދ5G_]]_Ϝ9СC~׿ǎ;qR?iӦ=O?}||V;0 s@7}o!3n力s3г!ey.4+'y;3nX9s洷|Q\@ЍQYTS2}" {[:;~ݵkWCCC& @_Kϊė>ϊ'nnniӦ0M4;籔z'_O?D{?k֬XnWcڴiV[1K.;ފҞ Jjll6)W kglfe!ey.4wkgSA3Kk֬ ӧ?jzjgWEeQ"'ӧ(*G?1cƉvǎeeeW^yet͙^ijmm8qb|^GGǔ)S³V\O~O:5|O?]QQk5remmm555-Z⤅ ㇱ܌OjժM6Otɓ'M6mZƙ[4h6%|ްչ.kglfl!ey.4Px'κ3)⠙XcǎR]vEw@?+8Cz/Ϯ1*ʢEEOQT$=/l8]vM<˾-]4Nhkk˘3^]tcTEeȥ(F~ihmm}w/kK?fݶz/Ϯ1*ʢ~EEŨpʕ+STeeesssƅޖط%׭=>rw*?9? }FL47sm޼پ ko^O`t)(tss+3~g~HO}1˗/饗^;vEPT_T~{-vy7mڲƨ,*ʎ v&($>'($>'($>'($'@7FeQYTJ6OP DzLG#EeQ%G:K;ӟ0tz}' iNsQ5M6gҩ]p-YG#_TQYTۯxjct,Dmm={2?3eeeynP79~TUv7OmMGhƌ?Gwa8YreƔYڵ+J#G#_TQYTïxjct,'dnz73Ǣ===>`CCCyyĉӟf'OPu{饗̙3p'=b "}ۓwѰss퓎SyNAӨ*~=u{)kuqY$A1h8; ؑ0|rO>9}n首\fg9) <#0>>K/V,lK9xYf:t(L6E%puhmm 3LJ˚ZHIP! Ν%leˮ]'666>7>^xv> 9'rdc]w]S5͵V焗}B!rq "6iҤ u|֋>7 {8>1,b߾}p7\ͺOjŊAaH~>/z2eJ|.^Y\kzNxg=~q zP^:l:^)>7oٳg…%buȸu⻝7pCeeeGOCZA-2yYfͅ^8CzH}같NV\M6Rp>5u+7Agǚkco@&M_#;Eg{>L,OooӯW;v䥄reaRy=>O?}|OdERk9;sRk6m4pw~]744l۶mk>yq"y+7r|QEeQYTvLl3@@#ַu"zׯrCp 3L^\+ v̝;7߽' ~4~֬Y555Æ㫪-];Rηz+~NXh^X|׬YnIX1n]'655%gY:p=]eӦMFYL%$ñXϕ+W׿ꋼkVO>GܠիWUTEeǎLX?я~4cƌӟng#߾} yLb0SLrxO?搾KN<9L9mڴ'gpu[hQI .gl`Ƣ;Jvڕ\W#ɵAc\h'jii~t>ͺOìN_f`Q^-6'5븈mٲ%\KM*}'syGهjkw!|QEeQYTvQ `L'L(uXt cj׮]'Ovp 7\psP1u|8ں::Co/,*ʎ~h(0FDzL_(_\ڠ5-LAØ>c\#o?;m</,*ʎZhP8oi_Ȍ"CXre*lnn~8T| VֿV~S~:}4EEeQ7h߾{ ltbo1@V2}ytOߝu~k[u'ǡ|d%`dYEw*O挎Kgu>!E _.vbP7{n'/,*+LM cYC]_qb **ʢBcu,ˤ~sw **ʢcDcu,ˤO_TQYT]2}cY&ʢ(0Vo2)UTEeGaI@!> LI@!> EI,*[H%'MB2)b&O655d_TQYTVe3> ձ,()={5ٵkW* (**ʪ%G:e?Oa?EđlK/4gΜYeeeӦMkmmN^y̙30'J?~]~_ԩS=)lْİ&0>>@gYYYt'a___ꫯ;v, sm L]>O~~߁6m?pٴ\{2"̙ޞK,z_lii&^bŕW^u;v6mڑ#GG+~#`<(0|2}+2?~Νq=ޓ^O??k֬x<;^ҥK}z+="qzzzux*@tM6M<7-מ̺5kքMHh <׿c=w;vL4?vX ?xbŊu֝8FWɺ[o-X }dccիD**ʪU~$(,}ӧ544<3~[[[MMMEEa -// ;::Lgʕ~:!=A^tɓÔӦMK?W=W׷hѢ.\.z_|q؜SxbkOf]ıcRԮ]5<#7ߜ9sf7|s|]Ȼ@WWWNM݁{;GukCUTVeQqHcu, ?tᚱj1/tbMX6Gʢ*;Q `e>@LUTVeQqBcu, ?+** ٌ:Z/,*\7Z> **ʢ`dR'PH2}O&0FL^@10|2}2}(f~#OPzSʢl1+LWN,8iʢl1@X2}ӱhΨ,*3> ձ,8iʢl1@X2}ӱhΨ,*3cF+y=/Q*ʎ7MPOτ ::Oɺ@GuwW,cU[Jvm)C'I"%u,}YM7>YL,ƲYlp66&mLxG?}w<F#hf9>;w}ܹ_0dNP7k[ x6`b(Lg~ck0=qǎ3m&L4~~s=8o;럜bePF. NPsb 8QEeQYTlUmTD ߗe\,\|w_k?r?pʢ*;Q XL`/G|y=W/v'NTQYTVe'9> k_yě}_]p**:Ae''> k_ ?13a=**$7rQ XZ>@ϳ'<7$UTVeQɦFP0>(%>I?RP8(%>I?R(P8**ʢT~i `_I?,*;d(P}Y&`pʢ@@ed?׬YhѢݻg̘QS3g k׮2yp,*[/&9444uÇGΝ;%)O+'*ʢmZ7o)(0Z>5p 7߿?sŋ8p`…uuusݾ}{Q+ޞ2oڴ)4#tg8yw{{͛'_EmH+S<8QEeU$jtxcsuvvΚ5+mҥKۣ?Z[[7n].w%Kyŋmۖd{PUن2UzL2} '(eN?}t|]|GGdž iիL~hѣGZϟ_3s̰z|%7qR_͵ƍٳg۷/~̙˗7^?vEgfKCٲ0qݺu---aCkLhIiӦfm̵ꡡg^ڙYpw477%,YԩSe*Ɔ'<P8>@׬Y3{ߞ755 ؞;s'OZzu4񪫮 ;::SN600rOdžWs-6̳dɒІҺu뮽uѪU,X0x… ßYטgn`Ê5Вn-F>YWNO}ݡol˖-YqRv `P8>@ԫ[[[_{xz$;im.ӧOO)SDzׯ_z}d/%L6-n[xҒueϑjj k %'Oqc:$YW :=*:33fo?}tSSS2ƝEJ<~Ʊi7O#uYy544DK۷oѢEaɗ_~yOOO%egL?dTws#$Uwfx2ˆ**ʢy}_I(P}Y0gjlzӧG׶ʑ;::'z3vyܹnݻwǗrgFnwwwfZl8""S2[Z.Wڒ2Ə:<;33OrwGDEeQټ= i_z<9r___kkk'}_z̥EH~sss|x掎v&666K޲e… 6;bsO:`fs ]4<<dɒx7o~:~.zS-$r5>yx\ֆΛoѣ+v?L =NTQYT-L/}LKQsO6 sپ}{֥Zqʔ)u===]vYxWkk ♻ʺ3g޹{S·/sE+?sʕ+/XbP4}߾}gώs$!$~Wrr-[7WKրr5>תÓgR!?Wgf-9k֬2'<8QEeQYT`2}(־,gggg9w%ktH5vTUi|^@σU>Fw:IEeUv(PLYH)/Uq7%TU}UP0׏2fǾd?Zvo`P8J׏'>V\xTT:>I?Xۥv>0y(L H㧇ǨO^V L,0~8QeE zS>[׏qVgQي&Gb2)'U׫>]zahsσ>giEe+L/ˤNTG{zCi;gQIEڗ}W0~8Qe>]qY*;\(ցwe'UgϢF2g*Lߕ8 Pd `Y we'8T+?p8} U'(2'gQي#Gߕ8Qu>V>  `,YT2Q we'NTgϢo@O:bEe+C.we'8T>+?q2] AO`g}/Gu'J}@(P8*=W-|qͷl9<ЁYTTm<|@@ e*%pv=W-c7O[_EgQي#Gb2)'UJ#58n}f}D2}(־,0~8QdX?V(> k_I?RJ~jeYT:\(ցV&`pJd *+ַϢUFP0A@?Xt2} '0~?Џ&p2)'L.z~$;t PYdN&` wv?⾰wL8I?,*[JU'&@@ e**ʢKڗeRO'?8eEe &Gb2)'jsV{zro|=>TYT02}(־,0~8QǏWDoEe 7rQ XZj5'.7ӡCN8qY1,*]X'*0~P5/q8fQ=e{b} 2}>U9x7_WS>XȟL1}O~U&`j^Ԕժz;|칮<WeRO%/F&*;> k_I?VSdͬnhif:u{4=Z57~gO]'uÇ><^:Ꟈ'Ǐ\bNyG{2ϜYtxRw!ַϪ, d(P}Y&`pZM5.|i$Ooi8yÜսs%עZ[/Oڊ̘1%45GϧO$C 7o,ַϪ, F. @+0~8Q׈<<k}2)'Usus]M_@Dz _(:Wfk\G}{g?ى'|T2}>U9x废oH~|[0}76=oGZ8.fFy/57_uk^ϼ[RuHXfyᇣ;?>v옏J'*0~P5qk.uWNݾbŜ01k+{F. k}2)'ժ9xǹ| {yr-rMM?yE2}*;iUm|%ؗeRO'?YZfx/ydgUvҒ@@ej5Ofj}} s-fFy/57_Mll;z"Gb2)'jԩWS__˗_{k=+~W_;a-uQ5o'ӷϢGڗeRO'?zL>V:ʤNT%pgQrP WeRO%p@'*0~P5/L(2}>U9x e@90ɯʤTK.ʁE1}O~U&`pZ5/L>ʖOP L DG.ӷϢ@ڗeRO'?y*Q XL D#uY%EbheRO'g޽=۷lh%_J6pǎϪ,jtI?l<۱cG76p'NR(L dsرݻ9#mϡ}T2} '0~0 8qرc:Hq =9ٳg}T2} '0~7rQ p2)'UTEeKj3$(,0~8QEeQYTv|Q XL DEeQ%Gb2)'UTEeǗL/ˤNTQYT_~#uI?,*;jtI?RP835k,Z(z{3fLGhڵk,nz2Lz2  &0~f5444uÇGΝ;%)O+oJ<~JCC 7ܰ/r 666͝;wE9x{ʼiӦЌҝd7o~y!LEJ<~Ʊs:;;g͚6}pppҥџ}}}7n.޻w%Kr^ k_Ǧ7xO?OקFngMc{{{͛-6o߾E%_~===Y֖vSgFvC3ВQ&5ؒWQߙ4.>ڞ',*}Z7o)(0Z>ǧO]۞+G u3fعss_O*v_ʝYkⰊԋkkkNJn:_jM\=kK ?0[s}}}a(}饗[2O`` ~}û;::ؽ{w'/y˖- .\Ƿ?uԂ ⛹ϙ3'tpXΒ%KZo޼9P韺 MZdUGêsufZ:;;oG篼J|2'<P8>@ϴ矟;wk8P[[;gΜ۷g]ڪUL_sewnذ!+9sf|{キ>'|[ZZ2Wk23gά\+V E7{h9<ZKBBKw%g<9,gٲe{s$m!i *Ws:< >/suff:;;œf͊,8nxrCQ>Fw:I]@UP8>3rJhCWWΑj&,N-qt7O6{ㅟ LUx42Y;_<0Stě*Z(׏2fP&w]`_ngoOl߉jRYEeLfɧI(P}Y&`Xq= ;Q]T82}(־,(x('q,*ʖ!> 1?&Pq4ցᰋʢGڗeRO _0p2vQYT\(ցV&`(x{ѵv4{|={ϊ&PYT &0~=a,}.7PdYܜ>]q1UI@?PJ2}?Fc*EgAOѸj T2U'Q `B΂Ξx_>OШ,*[qd(P{~?oR$1eRhT82}(Y7W}; ?{ip^`4l ʢGEٗOixuKGL~#G绐 hL ʢo@xhϞx=mqCtm[W|pϞ= `Gc~uA,*[ejtc9 (~km6U>eH8{Sq2E;ӥIg,L6mڴk׮"Pd|{kk/nx[3~)ݻvz=G@%0ٳ”_|qv-7FN}yG"PF. b~ㅟgQYT0D2}`2Pdd$\kEbŊZt0Qf͚uduKoߕ[xwwkf2}<Ee+Bf؎vmeT2}kEg(ꖚ߾;>ߟ;wnuooQYTQ ]^(ֿ뮻|}jwws=w믿^ʢDvmd2:2kw޽a{;688<Ee˓E6Pt-ӏng!.ޛ$[曙ڼZ[79w[L?eW]uՎ;?| ),*[jt6~飺<( mΜ,YU_ƫ+}#QWnٲea{1_`ʓLe2}~t2W'UW<#[lٱcGooC|(O2}L?ʳ6l~sO >cƌRxƮښ?A . /֯_oИK/tҥj̆E 6m]wUXuBra9BGlwk. /͛7駟N]ɽV۷/l_=/f.'kE"ַk+↵W⊞*1m\K+㔹OEB]rRfSG[Su>cq11pOh2W'۽wyG}tl5@Ptg ,8rȱc>χ?tß_W~yEo\lX r5&>W$mQ7dG 8Wœa__I؊hKV6.mT+JT$YQ^;f1g?kmm 1cd[Tvm _~?FN>=]8_"<SRxرS__jL[[[Ir*}%jXEH sNH豨O[EBc.⺺'OL}K m={\sMҗw|W$+“$lE4[B3K)R?e]Q^;f11dd><#U;OqV(`JL?mJ]]]ks4~I~뮻袋R六T anMq~Wєk6I U:ʤ.v+Fu|U)eݢɼN?V,i%lQ+J|?a>>ZY:ׇnWe] |ly@](3Gyo緾կ}kϯ~WR݋#L9p@ ص+8߰a>iӦF/LFN>=ŲFc]]]\rɈ\qa]mmmaeBcŸabx)pwΝbg̘nݺ9aׯ7tSCr5) `XNKKKhٶmۼy/vڕ<=yzo\+W*5&OjcRﲒk+?~3$,mεE +Jhj>u \̭Ncc*ly(`n\?ʪ1eۤ2q饗^|G:}{^DecƁW\/;*lyL__۷o>rDw馛 O1__ ⶵ9r!)L?R>@PtQy`uf3QV&%\w~wO_-eW21C1cơC#2}"Lkkk{W2#2}"f͚JwPrETvm袰0~~ao䢲lL?vmaL PAïLqEe˓L@_,*ʖ'> ص2}'@ 2}YT-O2}k% da N{Rn ޛOG%d3fȳݨQ_=f͚EU'BG]6W8ʢeZ7o)(`-,O2oxn,U( &7`ܹ-%DZ{NzGJ𾾾iџ2ôEeQيʭ (6~JCC 7ܰ/r 666͝;wcoCړ"mi 2!߭(^G?WQ___~7m}i7o=8bBsuvvΚ5+mҥKۣ?Z[[7n]w%KWd.~a L۶mOؗ:;W~aZP1_u&/ї'YuuuO>_ѱaÆ|zꦦ)S_>rg>44Օ:w^ZdɩSΧo9` N:88w)섬 zzzfΜ&=CȬoR>ͳ ۸qckkk>{}Lm>YKz1p\n]hIXTؖ3gDÓ˗7^mӦML?tZ\zT~]@K>:5k̞=;mgMMMq^.wyO<944zhUW];<<&vttkT~X… O:sw/X :eٲe+WL^x2/O/+~ox~(k'jԩSw  Ͽ 7)S-Y$l]xiݺu^{mroAʿruV]n_*ⓟg zWZOvm?-䳊xeZ7o)(`ʇL4gZ<=zv(0ӧ)S5iL6ѣ$~38}tSSS ͎ڟW^y%n7ˢ ~Kǥ\{G{WX0Wo$sTcGrhOf3uqc“x'O&6dF.*V(`J=~ơU7O#uYy544DKMy,w$O]H̵2?OG?t|6'ע۷hѢн_~yOOO6<*ФSgC;NV[[[j^<46b GCwNtgS|BeL@~ϴjйʎdBݝ;w;w[d OoyuZ@҄fZ92wя~4< ݿڊruBE.3x\Ta&|J0b2;\;^OGkݕO`'\:=:LJԉ[nY2}T2}k% d?Ӓ[o8;~kmm Da芧LIDATK/ޒ7nnnoޕ~яv.Z(oW^y%Ozcccro,X02Yfmܸo̳r-*~3>sVkoT@:;;TQW:+.O=L\TC.dctě}+_lm\jթS~_R %s|3822}N_{~o>yw-?xvm/Tnذ#i ص2<;z{{:M/ۏLy *ʖ!> ص2<̄|+?ӫ&j4ôLEe+L@~)o\}z`G*;LQYTR\k% dyL h\>iʢFPl24R$q822}NF`4.ôLR(:~)Ѹ2}J!diHFr;L*E6Pt24R$qF.*V(`J0eF8w 22}k% dy2}q2}T2}k% dyKBt~a֬YhѢ l@Xڵkc4.ôLEe+L@~L M:؆*0-GeQJ7rQ (,y2:7e6mڴxE.\YO|6[P[[9k֬UV޾yfTh\iʢu:L?LW]L۶m˺ӧOߘ5kVZ'miiy§"0]ř>@Pt24HiYpjFnݺ3gDӇŸabx+ѣ uuu8Uqa K,9uTҩE6'˗/o <ׯ_?mڴڬ3s̰z(kܴ\=㹪st~~3dgha4ôLR(:~bd . wynZ̺-[MHs֮];{Νwwџ{Ĵ7&gU/7 7olr0-GeQJ!G6P@瀙"۷oٵjF\__lٲhzx /{zz.h96l{a sggg[[[s֬Yתg9ц3gά\+VrUg +̜931Nܴ\=uΝkii[ ˼㗂ׇ]?S(s ԉamWN$*B&v4ôLEe+L@~f)Ҩn\SS*.Ygg&˾e>WaZʢo@](QX DEPqEF.*V _(:~qOh-lƷ VPqE822}NF`4.ôLR(:~)Ѹ2}J!diFL&@L_PF. ص)Ia4.ao䢲lLߩvm~Lh\tgm@Q*#G6P@)00-GeQJ!G6P@)00-GeQJ!G6P@)00-GeQJ7rQ (,y"007rQYTRMF`4.tgUF@ӌ1E:{lo`4ôLR(:~SCM7NHzT >E'O3o&nkቋ۹g09Gp,*[)6w*@](@VaS7>]OMO^bshX<EeˏL@~fOCyLEe˕L@~[)҃>oe;7ȿ|9@FcT-?2}k% dy)/G|]OSb} 'aZʢo@](QX D%3E:{>_spquF.*V]@"OvLxOnѸ*UT>E'O37>} = 0qeB@HvT >E'O#E0aZP)F. ص駑"07rQYTRTm|`F6P@)GSg^?i4a3})@Q*#G6P@)=o~l40-GeQJ!G6P@)_<05<80-GeQJ!G6P@^ݱcGooC@wK?ov?\e2ôLEe+E6P@JxۻwcXDS-7F]/O=L\T5b駉%?~kO<'O=LWqPedL?.Lٻw;688枭'_qG.O#O=L\TU'Q >,?J7^q;΢ly@](QX 7~T+wEeQ$G6P@oW8ʢIvmDaL PAïLqEe˓E6P@o(+2ôEeQJQ (6>eHzL(:>eHzT >E' S2}J!d&I)WZeYfѢEQU)ڵkCeZ7o)(`ʇL2b:uÇsJYH zWdiʢfrQ >,{#ņna/^<00 .lll;wކ'8i]ň'.ͶiӦPݨr뽱,-ub{{͛ q;gQYTLҨ= ص҄2bqxܹYfM\ti{{{g___kkƍKݻdɒjL66l/޶m[nTX:;R(W8ʢeFQ &,{LK2>}>zѱaÆ|zꦦ)S_>r-dݺu---͡gΜ ?RWWW<VդWqw477%,YԩS ۞k] N:88w)Q'{zzfΜCeǏFe6󈟊>iY,_$8mڴڬU7U+wEeQ$G6P@_3:5k̞=;mgMMMYm.wyO<944zhUW];<<&vttk2 ^kORتw߽`Eae˖\2as+B/wuu odQ'Nk׮d`` ~o|>*#~* e]ԪUBxn-|VnT+wEeQ7rQ (,{Lp^GO2}\2eJںFu6ފІiӦMJu]Y[1cFӧO755%o~ue]+z]e]vdQ'ׯ_z2 ܨOEa O- OZZZO'-6/Sd*k.d%r0q}sw?%U;wwB^z)%siR=+L?9sw}aiK,IszKӏ/X ?Wuvv| d^yYZWBf͚qo1U{2{8Ew=ul6*kk"OZ Fz?Էur<7o643~aZP1_u&/ї?ܹs'^29sDW\ڪUL_sewnذa}fϞ-O]HWWWsss}}eˆI.fժ{7,!u՝mmmaYf^)׺qưݻwٙjOf™3gwIsεMFemmVTI.̙3+W`Ŋg~R [: Gz飲l@](QX /`x.p4PdL?͈)Ҏ;z{{:M/ۏqpL2$di Nx';f|&pW7Lh\i>@(`Nf)o\}z`G*;L\TU'Q >,9`N h\>*8EeL@~Lh\ôLEe+L@~Lh\ôLEe+L@~Lh\ôLEe+E6P@牊Lh\ôEeQJQ (6~)ѸUT>E'O#E0aZP)dL?DH :0k֬YhQY5)gڵJ8aZP)|didehhhԩ.V)Th\i>@(`Nl3 ^uo˦mڴii׭[6 .lll;wۣg>|xΝK[ӧ[[[_f}c}v(Ѹ<~#Ee+Effk dy2bq,xm۶MxZ[[7n]>w%K$4#[oݰaC򻺺/_9Gfmeۓ"hllܼys>쩧&&Nnݺx~+#GL x?șoٲ%l+0-GeQ٤@ (,9`_q] ~{4?LΏ?c[ZZ{/,3cƌ?eW p suie͛V?O>=w|GaE֭[I{a-ۏߢ:mK.{,L<]tQ<>|{)㏓'Oɛ{PPѸ"2}TM > @~faoM :?c:80|ǎ.-lC.&_eh566.Z(+9^e1[ec=VEYǿ2Lh4ôLEe3rQ @JP),O'9386ʈ-*^,E~ӟtԩ#G\fM2-u]i{;~UO?=l|xS;K=ʽwOFT3f,Z(^~ɓ'}x}|O<'qE+802}Ni„ .|nzצOO?e4(k_ϯ۲eKv{%\oM"6tuuEېz?ԗuFa<#d|a-۞K. !~ǯʴ -v?=a r4ôL )dL?^Ozub]Z 䋋{キrgzhôL )<#tmdid9'0,*{ڝ6P@Y@TSSSdnC(8ч ]Ee+LtmDaL?HF9LQYT6)d(k% dy2}q2}TM > @~fm6e,*[<#tmDaL? M\DFrK\TMv&O#0a3} #didFr;LB@v$L駑0-H E](:~>ѸӞʢIQKtm~Lh\tgm@Q #G](QX s1Eչo=4h\ai>*&LtmDaL?3GM;7,yMFYwfd8u4P~dL?M)R6eJ@=՟_V5\E˫O};=ҍ2}ʏL?EzgO^O~#._)b}>aZ6Pt24Mvun˾_{h}PѸӞʢIQۀ*k0sHIЋ״؍}5Wa3})@Q #G](QX s1EչmъocOh\ai>*&LtmDaL?HF9LQYT6)d(k% dy2}q2}TM E](QX BE`4.a3rQYT6)L? dv?2>LWpPadL? 6i C`4v$L駑"?/a]C!Z-չhv$L駑"ޖu/ @~wMMM+VsW^ܼqƶN`///:켧>:#._.OXʢ"G](QX ﮥ Fœֶ.`oN|ze+:W޾e 2}TM> @~wmmm Oh0k׮]vo{&z~Xʢ(k% dO>yMNz/=#Eeb]@+;z.x_7T>E'2?LB@2}Pt2}(ôL )<#tmdOq=#Eeb3' @Y2}'Ѹl;골l@ (,?e{飲lRQ @JF=LQYT6)d(k% rda*u<l"2}TM E](QX;۵ k9kZtl=#Eeb]@m߱qL(:~ßo>}ws׮]z@eI!d957xs 0-H >E'~ho=s㊁h#b}l ӞʢIQ~~P]tm~?Xyib}l+8wϢF6Pę~o*&LtmDaAo3 &54XSS5}FM7-L ;xp]U>Rt_Snj96wcƷHmwuegw5hӦf]`҉'AqL3aW]4exa=3goX|K~*[MMMb}l=#Eeb]@^5xp]4&ll>il7txҮ?7&uWgB2\P& *L+8O1#ߴiUW1իf\~Æ?R[_tÙ}zg|޳>c~d3G}tҥ<ƍۻt:LB@O~o~ujӖ:tSO]s0WU{mmtԷ{scSUz{mLX^e+٩aZ2}nOm-O?{GO?uuoic<} : a~|C;6^yb'0hbӦ/-O?*~clm)SFFF̋/>*_}]1O٪{<x6nѡaZ6Ptg=h55U3gcǭΞ=! m֬iI}O/[v٨Qw̘-q!UU46hy6m9nGx?imܓssg6&a͘q\x>L1=9lX}Xq5f}p|ƝUWO<裏>cw~,پ:L{F.*&Ef Auе ~ZaapBї/_;29LWp:EEe+LtmDaLmܓme֛ 1Lp62}TM > @‚f55UۢE54X[co+p60-GeQ٤@ (,mUvf0-GeQ٤\е2}M8-ôg䢲lR (6&(tgF@UvY2} 8LB@I`Ÿi?uu|+Dzw^5dA;wK[OWן66u_VAPVi>@R(=G8?A3njl<&qkxڴcrя]wE祭oi|E~^@i>@RxF. @[7S>&N>yڵWO=1Oqhݻo6<իsxMMUx7qa{xpu}tӦ ͛X__3p ϼS : mڴcm%X`RCÁao74:0ˇ\WUO>lӞʢIQ~BP]tm‚2/>G/K\r׼zvG~WjjTj?6o3{ilݻo[`҉'A4N qyw}f9o㵥m嗏40vܓkxO~rLl6tgSYTQ @J6/f :pۢ/\k_UW۫?np}^o2=O }W&̇ 7rȐ-[h)eSwEhh80^~˖Ϻ2}TM > @‚fwyjZ~C;6\&֯3{o4Fw1իve0UU>=~ /p6aZʢI!G](QXЫLyRp`ǦޘW3 .82Ϻu Kׯ.|~oiEeQ٤`Pldkf]{'?2edm~UUp+/YrI}}M3p2F˖]6jG3=նzS{=9lX}kkΘϟAT͜yFo;{03YNm}(o0]>@PtO_+*0-H >E'ӗ0-H >E'/T(aZ6Pt2}M8-ôg䢲lRTlTW]( q~o}.)wf0]}02}е<g0-GeQ٤@ (,=ޮm+u릝}1͛gv飲lRQ @J_x׮z۶o{'}0>l2}TM E](QXPwV:f55UÆf\&76\]qݴif474:0~ 3G8Wn/&œ} Ljh80cg[m<1qkXmXyxH woMmw,Va3rQYT6)fVvab9gOȸilݻo[`҉'A4ܓ/w\h_>"fΙQr曳kk۱0C96*~ys6wq Sןu{#eիrdvL(=^7ߜF1SE>xpk͊DCÁa⩧60q}q!Qգ/am9V;ϸyC|2lo]ȶ{e/432}9LB@ai̫U__3bġWO1N SGΝ&;aګO;mhsܫ~yn4sܸ56Ն- o(c}wȸ{w@72}l+8wϢF6PVM8-ôLEeB6P@ge{飲lRQ @J5>ll2}TM E](QX dٲ=L{F.*&bk2}2?LWpPadL_aZ2}N0-H >EO&(ôL )<#tmzc+VXdŋO,G29L{F.*&EfO 8Iuе [ZZ.]?Y8X(w(z(}{{8tgSYTQ @J鷶677ZjҥKKPPPPp6Wi>*&LtmDaA>~[[ƍVws*:;=SgaZʢI!G](QXOo|ГPPPP.p6Wi>*&g@ (,'hooo7=mU(t(w(z(]tl_=#Eeb]@}rL((,8CL8@D@E>PdI!.\x7c=zQÌ3d(k%Դt϶ a:]Lp*keyժUK.]‡i'\84t8LuGGT= 6mmm7nlnnnjjZi'\84t8LuggT= 6mmm---|(vѸυCs8@t8XwuuNAeQYڣ@k|Ƈi'\84t8L]\L$g@ `h,* ?1$U @0~`4VY;AeQJ"G] XeQYT62gw$->[_ol Tں6očM0VdK٬AeUVeU_?g}IOz,y0 H aV2h**L'W^u]*\#ں6` 5lYMȠʪB2o->ѫ@e[ ؓk$][ M0 4UYu,OL~ᖖֶ.J\5 H aV2h*Vg~xkjjjnn޸qcqgg%@I M0 4TV+L qҥKWZjѡ@+Fҵa0jBMe5Պq,/?v/^jޮ@+Fҵa0jBMe5Պq,//_ ^dɊ+(qH6` 5lYMȠZ1Οe H RͰ f *j2}M5 H aV2h*z0`5kT3lm-@@Ƞ**2[tݾ*xı=,Ե>i҉554g5` u!uZ6~!48,ېAes| (k9ϝw}NUYL_׋5@>2J]pt W\6He2}@?PlFgq·}ONܹȏ_?o s~ }L?w{뭵W^yNXIX~Rד=Ǐ}G>M԰ÇэU^9ݷa&}}uU}펰pձ~|Æo~[~ yPP%K#Es2%lǏ+ih8;?U >Fҵ g ` 1fLvڽpa ]|~>oLA/7lHi3^ >ZSm3ܻ"fg< g;V>li2 lrWU2GzoooqJeѯ-7읗_^7mZoE첳Žf^qac?/^ o}ƌWqpX>LwtpriM]KŸUdٶ!%7)|_zCn t^lV??NtbXjw{_JڵL86l@/ׅ|7IVkֵC^6tm0fHs &HiyflG.E\>k'd;r&Ы)#mƋQvj|x^yee"ȶ9.Wsl^Uĕ-Y_I}S_ϪlfߏܵIŽX{EtҘ=?x~sVvu9>g>}~7զGgۆޤ;T0?vt0&^,X>Mؼa>)(J.k6}_gtm0fHs &I=+5ۗ:;gslv1yܰ *EJHb09dzj-frlr5v|PeK\ْeQ{ӣGK &76zR`݄CL'޹32Qy2޸<(M'϶ i/&#Oh& (qگBߋ[ؤGOGQva]k_㢋>k4ؒc0յ͞flwpqS-30Wڰ "%t!:GWvE۹A-qeKWLe]qӧ_Ϫlrlye|ѿA_~vΜk'4 _k |<϶ Iˤ@E BLlaOpK2}];/IN?6Hs-9r +vrCá_ҟv3z He>Ҧ6sܧ92+*[ʖC_%^>I}ݗ_-t_-6mZ6zx03?֮} cͲiM=qSS7 Uȸ i_ &2/ݠ#GҊ6/ 2}*)ҵmeuc ﯘm0DJ[a=_r]ζi+x8M`S2iS9dS;Q|vEs*+[&~*:;Ϫlrld=G^Z}h! _ʼcJ}W(O0:O}N[o5kjocSgƯJ}FԩpF R7,6 rcjJyrq^~yȑi(VZQ>xGBC۰dT@kgPN` =L_9jv%gGD.{'^;9cEW^Yyu &M0> \WTU8k_D߽GD?{|ܛk _wïƏlٽʸ ˗nM*e_n+r{QB4>Ya"3W$+ҵ3vANک8е@6'`}voac:\y9O$˶WpЫ6=L|9da{8sޯ7ۮs;Kޭlƿ [oI'Wo}yϪl"fZS (5kR&`Mb{啕#G2ʪʪL_/sk$][6l93/ko~!M86zVAeUVeUVe+3ӏCZsԟ T>\RH >oI9 6la6)R#m/\ޑ#Ŏ$ yIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/live-mig.txt0000644000175000017500000001163200000000000027303 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generated using plantuml. @startuml title live-migration with ovs-normal plug : nova<->neutron interactions participant nova_conductor participant nova_compute1 participant nova_compute2 participant neutron_server participant neutron_l2_agent2 participant neutron_l2_agent1 participant neutron_l3_agent2 nova_conductor -> nova_compute1 : live_migrate activate nova_compute1 nova_compute1 -> nova_compute2 : RPC.call : pre_live_migrate() activate nova_compute2 nova_compute2 -> neutron_server : REST : list_port() activate neutron_server neutron_server -> nova_compute2 deactivate neutron_server nova_compute2 -> nova_compute2 : plug_vifs() nova_compute2 -> nova_compute1 deactivate nova_compute2 group proactive dvr router creation nova_compute1 -> neutron_server : REST : update_port('binding:profile'={'migrating_to':'host2'}) activate neutron_server neutron_server -> neutron_l3_agent1: RPC.cast(fanout) : port_update(port) activate neutron_l3_agent1 destroy neutron_l3_agent1 note over neutron_l3_agent1 "migrating_to" does not match host end note neutron_server -> neutron_l3_agent2: RPC.cast(fanout) : port_update(port) activate neutron_l3_agent2 note over neutron_l3_agent2 proactively create DVR router end note deactivate neutron_l3_agent2 neutron_server -> nova_compute1 deactivate neutron_server end note over nova_compute1, nova_compute2 libvirt handles the live-migration end note group port plugged on host2 note over nova_compute2 libvirt creates tap device end note neutron_l2_agent2 -> neutron_server : RPC.call : get_devices_details_list_and_failed_devices(devices : [port]) activate neutron_l2_agent2 activate neutron_server neutron_server -> neutron_l2_agent2 deactivate neutron_server neutron_l2_agent2 -> neutron_server : RPC.call : update_device_list(devices_up : [port]) activate neutron_server neutron_server -> neutron_l2_agent2 deactivate neutron_server note over neutron_server port status is never changed since port is not bound to host2 end note deactivate neutron_l2_agent2 end group port unplugged on host1 note over nova_compute1 libvirt destroys the VM and corresponding tap device end note neutron_l2_agent1 -> neutron_server : RPC.call : update_device_list(devices_down : [port]) activate neutron_l2_agent1 activate neutron_server neutron_server -> neutron_server : update_port_status(DOWN) neutron_server -> neutron_l2_agent1 deactivate neutron_l2_agent1 note over neutron_server port status changed to DOWN since port is bound to host1 end note deactivate neutron_server end note left of nova_compute1 live migration succeeded end note nova_compute1 -> nova_compute1 : post_live_migration nova_compute1 -> nova_compute1 : unplug_vifs() nova_compute1 -> nova_compute2 : RPC.cast : post_live_migration_at_destination() deactivate nova_compute1 activate nova_compute2 nova_compute2 -> neutron_server : REST : update_port({'binding:host_id':'host2', 'binding:profile':{}}) activate neutron_server neutron_server -> neutron_server : update_port_status(DOWN) neutron_server -> neutron_l2_agent1 : RPC.cast(fanout) : port_update(port) activate neutron_l2_agent1 destroy neutron_l2_agent1 note over neutron_l2_agent1 port not hosted on host1 end note neutron_server -> neutron_l2_agent2 : RPC.cast(fanout) : port_update(port) activate neutron_l2_agent2 neutron_server -> nova_compute2 deactivate nova_compute2 deactivate neutron_server group port_update processed by agent that really hosts the port neutron_l2_agent2 -> neutron_server : RPC.call : get_devices_details_list_and_failed_devices(devices : [port]) activate neutron_server neutron_server -> neutron_server : update_port_status(BUILD) neutron_server -> neutron_l2_agent2 deactivate neutron_server neutron_l2_agent2 -> neutron_server : RPC.call : update_device_list(devices_up : [port]) activate neutron_server neutron_server -> neutron_server : update_port_status(ACTIVE) neutron_server -> neutron_l2_agent2 deactivate neutron_server note over neutron_server port status changed to ACTIVE since port is now bound to host2 end note deactivate neutron_l2_agent2 end group @enduml ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-compute.png 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-comp0000644000175000017500000061276700000000000033316 0ustar00coreycorey00000000000000PNG  IHDRHB pHYs  iTXtXML:com.adobe.xmp 5 2 1 m @IDATx]`=y { X"XAł]A`45{ B B ?w7jH̹wgܹsBcJ B@!P( G#B@!P( "J B@!P(9}DP( B@!HB@!P( GEr9) B@!P(t@!P( BC@GNA B@!P(*P( B@!xzZP( |ѣG3gg"VTUK!!`j3r?B@!P<&6Xt1Q\! Va`(TP5ފP( @jj* D x晗1p`BR34$&&`iYf!TWW( B@!h"H)W*B@!P k2m2XSLvZ2=(8J]Y!P(?$r66մ Ff`16! BpYF#q\M  8~4ʞ_o$9G#&1 s7n/~xַtXScbEry+J9`l:HU B@!x B:vmX<}K&xiPZ1)Z\-ӱthԨ"z z76MkaF>Xpٱnf@nO!4.XX읊a͡s[4;0!Hg$_/^ż dL֬Dž{1q;xVܥ!| l0YKv".FR1ʑo5YG!P(! 'C(X<U!txy`s<7j3bf!>b9BGE2/20ah4?Nu* $ZȣL)_=ِ[eczyI^%B~沲[%iF;l,8aCqiOfa\Xznf^"~Çluaݡ`X=m̎׌|6'\tdhY9#xtt|2 ި^&/Cpb,z3˔:tk*en9S2H13$eSf~>g\?UPI!P( {ᝏC1PNh⇿_ls=лىek!": GQR hto8v>]qk6F jDڥi\?8~ E 'AJ# $):s+׮GsEJ]HEf̀+ǰf~  =nNU㚈 ށ]Q [v@ͦO^u/߾A)M:Y8.< ,+(P dwb?q_]ta{HX,& HUW"-3f l^X"2H;L&/d-"xXǡu cM0l!`CӺ7J;׬OhֲJvQ?!ѧ;o|#[T%!b1'1btY,4IK Xq$YDEh6;{]Cغ~SX-j;ay8C}J@VMNm±(a;v"& Cu`K\(jDP.'аE3\<WuB6o ȓXn B_sAT ju"mGcP( ?999isiWjŋwiz"@}Emmȗ5eͧlMm:vMնO.GdG Z!ӵ+1v),BUm̕ڎñG=4;+hek7cy]@c,sYh)MOFߡP F1 x]i8iX/\Iѣc&'@0R_KBP i+WP,Z.\iK`N`r9ж}[X&Ѳ dzZbNm×Dh:L`/R-P`fqan Wa%锄N R~ԯVLfy+0*4- ?U@]ٶns?noF#wotxz4fMGeH,c;k@qiNԍ&GU+ B Xvְ#7M)}EVN4 ƵpUа: D2Wd6X5+6oJwcI&}:R,ˠFHBгa,HC<y[Х'ۗ'Lu%E>u5v{l /P m)|' ;Xƭ ֯HDeNj=[`8z! ibʒChޯJa[2T*B7Nn% $ :_ GzNhP ڷ|}4X[ïUcՈ'f`Zu3? o&m:Fi۽2ւB'[$3v.x/.3$[t#EЦek8ֻx ; _܄ 箲=Tdm:JVG z3nq AْƠ>ԟ WNc5Hy:rZK_H'_YB!P(E@ p12%ۀF2Yd`inD-iȠӣXAlocI',6+4kL5Һ ; KαDcNJoTYX[NRcO'waA|8e BHIMK-Y7IȯIV?1 vpeHՊN ֖y=p>6i'CF1ɍz EE07Э4 :HI>v:OïiDFHCաY Y  c0N3{V o\f-EVQopytPN"Դ4ؑ&Sδ;C\,'rq V`+ϓcK,mc˅r-ZsTeGd T4_K lxlFf]@?_25KhA'Vܺ"/ɺSn+LR$C T( #aMkcIl j 4V=珉xy; . \ ƒti٦ :6޵r&Y }u%WS%6f!@,ZY' JzaWoΌjO!#1xMB<]Ct|- O~V9l2p~I@LR*ofNt#%5)?OHFhZn^JtHs.}{`ߒ9›Lxq=D` oIi lzî8u-fA;.߉7jq=e6<-^z!+~:ų3&(I!;K'It\ΐb$m*"%&oMC SS}0i_a^f 9HA$'&EpkA:^#9 v.g,ZB@!P(}vŴq3qL8\շahxyسeVl y^ƺ]NOfAf̛IM$W'AY ׅtN L\pۢI[حudCzR2N6}ќ[ BM?}`q\M( l O!j&^jYZJR20II|+PAD_qq^\I4Sqx*ĹCxڷ? vѸ#T.#-9 [SAr8;7`K'IbfOB~$Pʺ% N5[ѭ$~kN){B̷hLE˾mYb\pv{JٷayCi˭{,F#oQ:KzH&.)m77[af䗦4 VXlY8U^e]_231+?IguB@!P<Yy`)$b*>Q&e>l gQ+"b֬-|XJB^[ >tkCvVH}.V( ZBÔY˔CINOKa~&RG(: GY\AX /{Rֳ){TNB@!P ''{/n@S>TI,w*Ϗ@~k~m r̤ҙW¡yK /@) 0_ 0"]Nٸ0`ya@yc!DP1=p[n惚EQq:b@" %q4{G!bX.{je`(S¸.gyt`Ef:hXPnUeFsp]--,o?q=1V>aOU2Wye8rfnA^0,~ھߨ[-u"!E1QҞC[&n*.")wGNru0qjI{&yyr^3+ZnXb衄\deYp㔉\izea7ie!]N&-e=ʺqq^$\T됇 .ltZn3HVeȫ!O{c?^1^'ฟ3M$)'f8Sc<;oIeX\&p[y$8MI{7;2LٓtC)5ISUvCR} ܂O?3V!#2%n77*z|~K<(ӓgxN-Xز Wb }OA{4G%B :~0c㬒BB້@׮@' a|9QYxqk7 hq\BE!i׾̼,{$䳱~Ou¼Ͽy{5 x8HAy2]y?mjau,[/#/RǟgޯIɟ3$ǿ1~̼_F$?->EAyپyK2+K^8 @u;}x?8&G=II$ ?(㓯A=+rݷ߸^3p~KNqq/){ZkĨ̺ 2ճqYy~oz $̬C}ŋ5taۤ}[VI17-u൷v(8yqLڱhqL۹p1޷;e!y8&hY[GIBk&qn7Dž@qEQ]{32lW~YϝgX`+X2,tB\Wo2~ 8H,b[ݰ\/( ױamڭ]D`Yri7#əX\`IEA X,IȨX%,O2.I$X !$jdXrr%dSrXTbYsr:h!ЭҊEY,y"٩qhac;K\Xw'@a-]Mpkb- e[H%ŶyZ/˶BÜ#9 !{׾F,t͆/иQ7~=dܣU#Yn K;=@e!mAȲ/_Ϻ7gM:$FnGjc?}!+Y!R7 B2yhض[X_T28b^!xR m5֩ъn|}T':$V_x%_ĶHJ![+ԭ֚:$I~ף~_o wu:43,kߌˏ8Ӑ"'ܪߋH9iN:vXw7(S炛$ѭ5ZV ]J kC|;67d*-e2fX+ }qZR5bQ9~s*VMw _~9k1.P ȑC6EŪUPffG9T$7g\G.ʋ!ljB@!Oذa^|E:*UjT2-reB!B@\v A h5,v OZ~tR$`S= 1. ~[P<??6mڄKI2樺?Yy5Bx45qwwxK/D_4'^TR<rj՘}0,?Ȍ>H4UY(={ֈ%Nm2%˕`lXQnSy<玟C8}žz艣-KPM^2^ƍЧO*UꞚ2h%&O"eف򞀼ϋD"*ݻwF wK S$>Š.W<8J6 rU*ޒJbO%I- Eށz| -s\Μj]#@W=3n66dlA'tN+~|+!EKkͺ~|T=/=ئZb׿cK\xp$)|iMJJ¢Euq8Jw@cLjL8:uWWz gK cG8s*'?Rg#--`%q'(hH#xuPƸZt*)&VlE2?C3S$.M54AYtSt.o9q q5^g-lv:A$ޙ!a =\ItDͳS }MYڂg=}$vs=h-{SSNK4:yd 0{ָ̮?$4^m{Cao*ԉߵּ:WnDTJT ]OyGc?BC~ʒ{YPc`_p\QUԬSMsgI2|l_w,quqC@ʨݴ, >mY>wqV=aع4ҬQ{o@ :x/-ߏ8yzFFiIlỶagm~f`ѧmq$*K.ޟPy \Y_~}VUa!K`iYb>kj]!@ˈc0~_Vٟx~pSX,i5=a.w=QZ[/'krvW|/E@)_cdzZOW$R1=Jq+Ū 6&ڥ+3v-u+ 3AG1oл[\?wqLDQbs]FJl Ӕ񃕏*CTt٘I$rCG·=g@zP=ViL#t18/Zn/Bgk9ㇲjBZ$U{CZ$~}L{' @Z^i䀃KTFO xXvS |9c> g)ߦT O#; .,_;$BVx~1N+;NeC%uTJnh vޚQ7tn^n[ .>ekpJ8sH 2|cQCQLV(R,BZr*Ҕf{}=55{OjW9=񌤫X{*/z6>@xY__3R3oU_qGJ`kgeۆ54u~9&5a94;~ {FsO7{.⟹P@rR*c 5~OGrJhh 8s.ҟ˞@#/ bS\9WOE|L:ʶW qL8`DzUa^^k=w7vArB]c_4iu4%%&֚/Q_qi"K ' jMUHCKCPSNbI+DI^y"D#휝55|Xh춴QZW0yz:uW蓊8ӧ"K60B@IqѸ 6,eg)T⁳+*fzlPp*ww[D?{`Ka5t{:W @yTd?o,-qU'%ک.aᇿ`k/TOh Vcۖ3(W""Zˊ1_/rJqi9\|Qy5{WD@@lt,ST撌׽ؿ}abyܿK-kURp~@Ú`tE!N,k?&m,- '#M  GLdm1d[]ޝڕA=ՐXtXX3<3`BHH +. -ΔEc'7­FU3-DUz|> ;fLüǡzՠ(?'oA(ts&F^8z/к 98~1 ѩ6xꇑhۤu)]”8\e"%(B2৕ߢEm's=| }ł?RK ݓi)H6zJͨۓ$O㳑еX)uIW#* 5zBMd\Feؤ2b+芴pcXű-h6|K4s_ɞ55Gw* RРQI\6 c.Ebbn>Z1GKC19=-qQLc.\$VvLu#0y#+ ~?yq Yp8=Eol>p݄]qڱN#7 zlּ @~pv+`M1 uq7AHޯrb,.awWH9 Hq:4e3;,>x=K+tgZ]PO4h $مgeͮ% CM^BUHfoX P5oO_\V~?xcPEĻd(h??U[Lhף,zJ_GQU2zܔ8Ilpb.K*eY$}/\gRp`ifa lxJn&}Ap=7G_wC@ UEVca uH}rsqs G;o7pӹ@isaGް Ckr0W7K|kX)"¾0oLFd>7?^ ~A談r;×>ŊOa7W,Ǩp ,QW\iݠ}vmۈYo=ͺaވ{Jqw`s}ʜN ۋôUYQ;|ƫ"/LkFQ.N@l%\"k8{b钐8$63TzUT䉽.ؑ:BKߖ!MpeukUڣOOE|@)6c;Q8[ O+mR}%FT.$Ԣ@2Vbݶ0.t`ߗM3#< 2mzʸ"Z46ރժcZ|bD)?*ȣyo_hKYע(_P׽$q;+q\Kc^8]?:< tiq¨ӣ⎮ܙ8 tq4"W瑛]Ĥ&vg?㗦#*Öz.W%H0yۋ3=,L@龜?\*% R\ݏ?CO"6@LŶxrP#T+YgR#4- {CLR uF=kLK>Uӷ d*5㳼%cBBˆrAD3%#}Y,&nDX"8b^T|gƉ3ѨPڍڗ gt}o<5^o;1]E4S}3⪐wgp9dm>S]Cŷ׆e=j{0mMlEB”+ض*bɔ5ps·lV Lع9 N|&} }foWMT8Şo/E}ѾA/޸sXD&Šitك߾݅_CbG~bO6\k=ZScdX{҃d,֌A߮|i12uGHp>} CZ‹jL$ ~4  ^2Nu v!)qWpxcZCxsuN>4[-*;w#B5uYç|K*)V3'B¥ Rq1vk u }S~kPMWip,RP>sCI0ھlod0%J׺w/L60ӿ+tvFZߜC}{k/^_<Դ4F[(~ߌ5fT-ykIJ؊5'EMRL|'L~[/&gng+są,E/5[W{Ox ɵr}WI! ЁT,\ R_f邎5+rր/4*-~p3bv(Z#iC#!>G%sڳJ\K ;j]i? l-4𤚁< )#oޒU:7 H0ž%k GޙWבS]'ƾS;1mr= Kif#D6q2;5wD%h}f@g_)-G~WBd-:Q?#_=*+r#l 7^eHرMaCbyrP\jUl%o&7eɺyJuCvBBƨ^B'aWtZ,fD܆]?FW|{.Ӭ mئ<ǁ N7.C}xTnoH}ɒ!CCYQ$a|d}t Ҹ#[BlaAvoYOEӤ=-?nhz2 \mѢcV*}'q@IDAT١ ]z6>v3A%엙d.#>YR3ճϢfͫQF?)*GˁfRdYbqu-Q/5K1؇5UNpyIx t=ܸ*ᖾ#qw[OYS{J 3:{=*7툄ToԪa7U>rK&[kDq\މJG8S>Nݳ#B DX]h/Ţńq[ (_ZjVb̤DlHj'ʏ!8rs>!h[9l=q)$>]yo[hu0{6+C} ?ضIkMm9$pӆ$\r!C).DX$p9j*c"\gZ +2EPjip ;'*%]yڐmҵrFlI%k~Y.ÛK^"C}^Cy\>wףb{: Vў2t' ;_vˁ%t ~Tҭ$0{I'KT%l,$nʩ,̑VDbl"z,?ӗ2z7;CX"+'W'F0qC?T %aa5ˎ|wK=4!%}sKg(T&ɔ?ie [;8p{!7x3_Œ\m)O䙓{[۠/h`c+Vr4z1d3iSn#&=ө_COr͍Ĩm,2?⍗;9rο4Zp%Et} M#HW}swmK?t գ#ܼ<]C|Un?sx26zGgXbQJb@ig\t^l\Q+ϊAbаκAr~8s|4QwAȋtq,Γ88ۏ;jw̡N8rg&78::.D7q")ӈ~ 1q, uʣj*ZԒ |^RN/dʃ yEBû]GE8_);WesehZ/32wx 1tB_UT`6ekÍe}DL!"CI$sQMKn0a?NNhHneG4a z5}A;Gz*(2.lXS{O[2Sr 9{O8ka|tؓ,8+칖Ŗ$!]Hq!:FsFAPYUʳ( o}Hu/}kz^ŋE Q `G&pU/7S|h'LI†y ]q=PA8ZYVH`,Xqol+) 3glj'*:g[$>^^8+,v5`5p mA20v:NWi%JnÙԩ%jqD(_N|plW٫{%b#յ- |RBBKu2 if'sB҅KX*\kf2eѵ}ԨYzQNNq_v-7=l811:Xݻ`ضp]o2ϱ(ZkË1IC~,J"ltDň'C%lZVԁPx 4_iD䲆 2u=-Ez$Wc#)99Ia<+>!c=;c|'C&8ZtmϷ_Q]t3(xV)pp!$2|cPJu'c6C yUνo5-%~]pgDkpx9v/Gb/0yH:CЪu*GD˒aXb'eQhg+qSN]&8VXuDzOA/'kOk:~R`8y ߖ] X9=|3ӟP%=?耊+3,Mvɖ3NN~C5Q ;t Xj!֯ qK0\{z;|J$Y 4l͘5zn-U:DmQ^S UY\Ͳ~t_[oٲ.h_kc ,u%noFun~fI7(O_qp[8j/[~t [VmgcgSJ/mEWgQ8 f'+'yڈ  Kj޼~͵kױaJ\+`մ 4ah>oB`J<9PNc>pVfb[Cǎ0rH"\֩w_Э[w t0@->ɸ~-SOZע׎[%ұmV1|eN_,{K܍DCKpF|-]v<q l[‡_"$5@顐F밤{ ap=%  Ϝf煢hC~CmrOw"o1tP0񇧇_\wCh䒟HX+Z`Y3N.P[b۲q8n=CH2 hz`Ifߨ`t_(Q^*7[42a-YndѰqi2XHs~Ix[~%2H9(~10xT^@Ü<$/Y;oŀuaL3f.gϣ͓[BuGDFDa›jFʔ._y 6Cbi]7dˮp IPHթӧx\L-bIt}GyvņOΏ#'ϯ@\3۾ Ы81>GպKy5][R%RoI qNhe_jתO;.Ft|oN閇$?k)_0_h- h+JkX6=WtяWs^eh,DIRѻP|2 #1~ %Q4֮ |l?AqyHºsHD}D2H,Q C_]_?p`p?,+77S?(Y+ Ə?ogM x.qIȞSp J#Od8e9e.YXI0t_i '/b+[6X|yP`{b#;\G= /0hDŽ~Oc j@.<$(eƲ ŭ[z>"f5^uɩ ^'z,{g#>( tlcGHr/N]A!Eg&!GK/WAv=0fxݿq/˼R9p@@S;$Z]tܸ h$ IݺwGu1|``rP͏ƭoG^d~ST^+2<…e%+o'teh63&q5OŻSߢnȂlGN yȏ9үfa#. QxLJt"'vqOg}Kmfh6m9C$Ri\l+>nnXeg"Eݸ)<0_|=A4oielI1ܸ{Gޥ73Tu;2'p3F^aѰ~ |KrEHw%bi²=+b3NSL-Xǎ}'cr՚S}q]`T]I@B衅:& MQH*(Ҥ;T.wK"&Kx {ʾΔ.$i̚ gG|j=ƼO>aC:3Р.H?w`N]C^Cn~  伷ᣟ*iROʋ~^Kaӕo-$Faܤls4v4kKXr]C1y1$O]X3Fn1u~V+u#J$ O߾S~A wDtB" 篞~nw ~VՉdA{mJ"+;kiރ_! gYf>}Wщ*?l H2x&vmvVOqb-b \ᄋlLmNJ>ARMѝ \Bp.wD^i-g"a)9GgY#`w@tlw2}a1Drl=F.0B&>IxZfh{"?t\ѽOWtns|43|Tf걐G3?pliPBØj|cMwo~74epj,6l68~ꆞ}\6Zu)X~rW6Z0IS^G?#%5Uİ jֽ{5}++I%:5cb tq@нgn nƌdͩOZ;1+GXۑ D@5q_i` ##-{/հA -hs ڝ99+bK\åc۞p)C^)ey /GrT4Ѵ\&fls>UYSA \~z2>¥j\uQ)cšCy $fZ {6 G݇_dgԥtc5vklI@f~d6)N8eC e۶m;/o6ldDhKڰN_z5 ps"e0d 2;q1p#Qtz4,sWWOngpC5 73*vTUJP:٭xr $$WC' V =n+ɔp.Gt榖}@xnVZƠ0Nʢi~9[.wIy Ww_P_ {(ݻBisАwoѮTqg\(K{:S?, %&^hDVVMK7mm(QI4qNm#wi`>Rʖ],A'SRqfy\Y+dnRO;wtܥ6޺ec)&3* Jk۲eKBXNT" Րϓ (iVm ZœO6dU[Zk,,;|G +8xAh'*iZҗFr)fFK:TfSMd̓]vE1:CN!e_N훯q!(s+,{sN~':vO++`[et1ΐ6")'72jO)N܂s+-l\@J!XCF-%"{{{xgb_B@B}T j n?}b˱jҬB)31̉Uq8^njBWGE|Qʋ,3;;\ 7kWb̙>tV`:Z,ra!ra%*[:m+uj=-1iLHQX߲@i#tiŲW?ݰ`0racTE\1L}d MhY4Be-hŸJ'Q@IC#.Xqccd ڗoa5W=Rg<"2̰{n,Z4 IqXnR+%X#ohd9߾O^]_kڌy&,%ͯ"X80cƂ'!}ceNL1?}4>pN:qs7cΪ%Tb0f&Bwi']3(OW|Ӓsc q>5 3>2(o F>0%> z쒳 Dwv|*j+"MtO!-T0H\i@|qh%<[`0|hw~3L*c$!^͇"qi.?hKKOի0h@g,\0zt÷M$<ퟓW'a 7ʿ?EW{2wĊ"!>Np9k\D<^pԜ+M}CB-[~ȑAqwŚw3aCu9K3s-i!&/{w})uoXʟGA d1KהϱG1iҋ23"=@"C?,>OSWLƮ}[зw ig:E_xY|$8N:SbzVU~ˉV}i<0")lBnݣ#(F9-cQh_ftM,$y7/͇q&DG(Y?u+y)Hy0eS\`\^Q Е[{HLL@Ͱ-Կ?tuJ.e ŝ?. n^y/wm>f ]dl h6\Y zKE[œ:ጡG<@7,qh: ù7X&N ؕ%&n4\<~ IlM={{):e>'z֋hӣ5~Z?nZׯE]he7n;;gbJ4I 5P?08w$MR6 0°SoQ$1xp?/<8:"l`cgQ'+TG_ |Kt6m?oA1xȋ.YA˂'OLcc<ϟ?[7ų0ٳ߱ O6(:WOApn^6u*)F^?4B(j bR{.u|ksfQ&+XHEqh{\8xQ/ODn=o?xyyݣ\<k$a`a)IHL#6QQሌK.uBDq10F灃e`a ^]"Zسz 藻XSw#f"Hciyne}ܮW[ ^N`ϯcuy7ɋ;lq%it \G%&!:&&cD]CH ܏zqc@47h\ ava֋[q0/N}H4۷_N?_V`\<|̂G%s~ڏi";u3$Z> K ; &aii#h23H#14){n!|47VBP.< KǙP5dR`X\ۅ] gé|5/6cWR5xzyb rI:~>G>GX :Stpp&<AVVe VB$%A.CaH[XȪk,>4JO^pUC,ː6u}"%!W]ùk /y"'ӝ0S@s!"_!8e )̨3: =zI`[Z>PKcMiBS QNuR|^}&V) xФEF_'sɬ'rzOz^_tB^(Af]v̀c B* ?,zo#Yd|d48~tK}OX}#Lp>s.^Q#,,QȹLe͐ihD3k֠11hyE%'xU9`Cc ݆vA$#~<Naҳ%D& !O_4k׎v [Ff$Z9y0W@<}С2ܹ.БY۠~'O`A|/?qNs.Db9p57Fy񿉘4=Xe Y(14C@p+Y5O1S0FZr 9>cĔ79K+I1}[ 0I yRypLIO-N:2J Kތ=Yf%w3ȷWd^;zzYȓ~ddj-|ѨECwJZ b(n*MB3S3-#\w|lў"agHS=<`AqwtT3Ü7g%qf ZM-Ѧ2-c9'1'Cӷ6o@&Q#6"|8<3&=LŸ쀋,OZN^> fg,!hEU.]~1m2MjN:> "pm|";c鮺`O>5%%0h~n4f.R$FƤ]h^E5);~߸6YT$ R8@v.N5sh[e8~Nl?0lhr)zqA`5I¹{sFй _CyN@6$-\ܿ=&1{4h{kψI&GV$>o1\B[;b(Ll~6M0w[c~?'&Q3:w j]Fi͒Ő΁-Ш G"al>x*&6 PRnj8JcGV]lp]\ѭƑ ̛!=Dy?-X $LG X.~\h׳+2.oǗmohպ UB9Bđ;!p놠B{po}3V Ǘ8ڞ>CXY i#[IL_Pौ''xQȍѮwO(Ī_%`۶Fٿq%ۣSV%{MŸ~t_!0m K)b G֢?`.bAtiNZ[6R3p!!SA \agt yI҇Q-gB wݴ CY9»`v،SwC#dš@ C0(nƓ]h4lEM0dio%wR@Nt?y`7d~AdGof]1rBbHs Vӽ) b Vܑ] [d(wRmbwђ|>YC[=tucHĦC}OfjrX^roe>k}+4zBi H;?" jA,d]M6eÖ|&’D\䐿#YbaF5֖ 6+8X X>9gz\OSuFr97eѫFEO"^.  N۲ۅDؘ:v:,Dѹ&yI} I[5FUG96u&&"zGys)yR1] 6%!ʠe瑗6c^}O$ 깥AJciqSe(N)+bjd[1_X4擽KV@7MaKl:¡MNH:M\Dދiv遜aP5Sh}1^HD$Y'Ұ#@ie?x,( 8h,ő8pj.1|{ 40jk#!x;4 Ogc]SndJ*>]I( &f'-ǡ ߓtk} $k{B%d&6%R"C<^z2}aOgl˚^G{-ʥD63s ]FO_G~$f+d߿ 8u4iY rEF;&0v; oOt pM _ ZՑg Α!whLrK+b YEWFq^aOA[j\ډK"!NϿn}(P]GIm0Ohu?RӬ h{DдauL2;Zjh2zϠA|,?M_ȁLRZnݳ?m yۏ-$Ե%vl'gd `?򫆪<~$5F7?Bcv) A@\БLnZM\=; x4`! oY )|T _5ߕOBoSCWҀ71f ٯc` eh7O#6'xz80SX[J k'JX6A\hqhNؘShG&Q-%Z%启D]Kœܨg*딚D/#ʥ +T#[!7M`kŒD*'UJ`[M{ҳA@~Ђ>XK8!R 0er0c=LZ$Ijd(qN~GEGĩM!ZJ(P t=q?`Xx%^!O9 ē\i$,PDꐚɿ;+@L4P8Ͽmh6#EF&Ke{TRK:c vNQIT?$DS8םBQ l]V.%7LQ32-+=+ 3'SI瞼of?Z2sx=ٓV0]1=JwI ~XJU=%aWҹ*'YyP-N͵qzGC_5ۚ\8N𹢙JNIG/[70BvOEf,]Hs((5Ӵ ͅw*Bpa@N|kEω( AVyH,eC禢CN-@mQ%dmYUNQh^$%xŻPKRqJ+^릐KL[䫎-4QIx]@,YfkHt@= E%1T"+TF@}ɹ t(,a/GzFg0 vMnYGJ(HVndYS\uLMNi/F|\K,0Ryw",+/j^ynƩ O3L!$E򾼤)+?cKzI͞c7$EkoR<UI]tPj(Sc4u,bfxp&E>㣊Iy0)̯2rxyIB VWlyS5`uS%}y\0}LkqGbr"|5?C2aIΌ4ǫM%r 2Ъ >ۚTر(ޖDr?c ؊ʸDfQW^$:1;qq4, Y>?G<|8?n;EO&ӀZV c*|uwk$l%RӠ'g'.mgH簹ӑ',k%PxFNf6M4UDt҄B0Cͧ0֫%Sx7@IDATj>4u(RD;]N׬&LG]J#y t1Y*^Kފ|5LLg($,W)5պa!azrp"35ϊ; ԥӳ+0+31w^=M!1E|r9~Ȼl'Rwtaܲ#۶׷i@WL]$T,~v դB.-UkaܪS1؍ѫB=L%ENQڭ8{/Sz7X]Rێog_6;k}=>ĒЦ=Z&$;-[zgi=u~ T oyL|:y%tóDގB@KrAi*ͼ$ #;oPr7,Wj5 wCh/!K:#b mmy ?Q9GIrr,5&/_Ͷ>{F\?֊&9PPЪ)Tj;%6 'v+%F]-"<3ҐKB﫷oG\D!H'5u\0nx( e&B.2 7G~ƾHPKH -У.hk csQ'e0vxK8X#n%:0"st@Wq%,wlL`GoĽ@g I;sinZt9ToɦБM7v}QƮZ'iF|j# 㪏|X|GE&\"3 =?YMmԏbʒ8`% 6ͽOe}nY:4EEY);)xpEbb&MR`d(++0|gKЄSR61LokүU91S-wv,C&4qX&Zd&=+*OO4n {Y\t\^iO[vQ曨Oz; -5$ UL^w!)9 ]~ߪ2Rxv{;aO4Bps$e5ФfUI]G\5-[ٺ.Vߡ79~]iI 0.9NDJ:E|z-CRT,OEMZU98Fj&ԐECOsDUHp<{dK!=dD"//K3U>͞x( w06O*و> +gFb z6IFI+P6oH+;3h׭2+y2ǿԎ>S. z5RpʔN;/lWm Ͻ0I1oB{g :GL SbXP֬UENҝl4u|?k=D'"=tsm#S{qQߜH;ʒH-gG}b{b{07?QaowI:b;1EM^mp7'nbaD62v[TN_uS zex\lKİW -M=Uq*wU[u\V4J?g"de*2Z\"-3۸{cټhkM˜1'c[]ZLP:;BW^~AL"SIѹgO^u9HFi#6PBc0(A댿=p-6#z ;)Ї\fMn~sjy3Hӫg 3#ɓE TwM) R@H@V.mœ6Wjijֱ۳~Ȍѩ5]%O~h7%H麒##ݧF$Aɞ*pbdeդ0=!)9!zwk+c"7r ies0^T^>Yw?D H-8[.suncC%-s)WEӕmU̘ǭqUS̷) V4Bd|)+EesTB;֯_OBevaT 3U)#Cxq=ؑO2\ r]-A)E<=Uפ;( 9d]I*pUO[Dtt=F'}+g'}hx+BA,#qam^a݈8s!'OV94MA| (8w%#_U=y`iLޭmv A0g@UQ C/ZszU=Vk] '{jSQـ 06֫b}R S)l s Sޝ3oriq^UQ &ɩzHpU QW0sP=rIIpʲ|eM՚Kߍ{/7T0GJνhJKIFe$/^! +Ck:i׽}XL~v:uƌU sS-ANڿ uo5*R~?5 gůy sn[T0JP"ib<#((4l10g`Te!_wHbB xR8<u',#OŒkr/Rh!HO~s~ ΪI0WGcr4ORpS8+[W`4N-(lsR BtDq%$'P]pIir'3HafD^ޏ w`h 6M01yB^ ?QKdDx!GƔK]WaړVv?(p\$&$l6_G|~ B|O>ι2)ɸÐ q(]!մl-ܢ:9YHUf5 ;lFz/ }ag]e896bSNhu ! ٿ> 3&#O♗RCYQ䈖|>6np0Ӈ<1p~D>.| [nߚ(IY)vPINP #K9rX[~H| CO@zf6zưŕKqr-ض/->@nf O/)2phWtkv9@7\1`p/bgUsmu}x *ȧ$CҍȐCR34sR8󃍹.\9_;>o ['/4<$8#4ꁴ){$P"9&g{ZZJuakc 7!EVXG4+9PݷJ\c.EKcx88@Qt&JE߯?#^%Yvt(|%$05֧> %M6ly*푟v0vD=?oV@;(7lDZ+97`ǹKP#pv.O+hk_+ٌ뉚2:{Sd4q<+򒮄U? [BJ=sdU}l[4 ˗A8V/3=nX~ A;"'6wl[`uKg|΁;'|F@O4~^j L#eR)``FKOBnnnQ E*|ⅲQ@Bx'bA+x:}2b] fظC(`da{rWxNXCl|yg`gch$d'!x"q\. cJ?6ωr( %S> X3v G/n]:T_sac`BP Lů+><['h̜q*7'6}VV j g PU). _-YQ1e??c`c[Z.bSu(3?8yW̞ϳ1g(FM<iVoچ0:D3&5 nZi7`bݞxv%Z:ς .?1JJZի&RB--6dd5I"-2 IMBJZZGkKF|!nBAڒXh/Qk?4l 0kƿ3&~K)HI_lXP CF< mC/~M9 B$rɬ%$ eT o:1E7?Gkcxn^XZ@qH)z xgQԲWa2Ct#Kg5khx\-a`|\֖KB-u&FrZ %"r/7 RE~yc"?(V0Bc&6sx>g~Ʋu!9CFtM<Z0Y?wcO.2~f߶_KroM\B*V5dƾN~e77*s֌!~:&p_<[h tRh@˶N{*2/ .lCXMbM`txfK++%R. 4Aڀ e^f8v(q;c]jo#%= K"& f ѻw0khf'A 3Jg GPi 9F@6\&̓b 0h͑J˩y1x6'jF=h;f~&h GT dr /ENz"ZaNoBO.Ry /բF|"P^yKVceo8m sB߿=|Ҍ\)i3!syuȭ1}>$9! rvÇrE\' X+B|8"<;_;2aո@¢I7 +DJnd2LMAn}dBc:e}XUDMO L`lf /o_EaM|kH 3\?-r[U\_wdޞ)jQ/Qw6rG5DxQۏq<|? s؀X7}xBJAHKYT:.#Ȳڴ]E)Vj~[s\p H1zh([}+>, u*'Vcm iD̷dA~5󐛕!aig]7!aR]"Cs$DF>DJRRNW0>ݻAYn~y䦍IBv/F=d6e=b&V4penYHKM%{<3Yi4CB)s9)IoIv $&E"&1)Y%2qƕ7'ZLZS48= Dc+X( Ɇ$ee"3-U6ᙃ2[qj2qjPCGQH~/`V&)4& jʃ6f8VQ 1ץM/u]Ж\mJxf!xk$< [#ۏ#";)gnAX: ?PG0%U(#S Cs=`} K:zK󑓓A#2B;i4Stc[M`K-)+7BbµWa} K9sOG2jSaH<-ؘ~}^WC85ꆡcaۯ }T,XNÐH* %Rs -Ǝ[omOlJsGe`i3: OfFOXaB/#}a>_( nd"ʧZϞLF^@%\j@L2pHq ɑk.ռa| fgw/H`W`Eˆ4yp*Of]Vdg>=9*>~$bv׌N6BΟ;}.0G 9BwpLp*;{^NzƐ6Ǡms'" ׷|_#,0|`(buh>e%`>Ӫ[n,޲8ݩ LNٽ6[ˣĹ#"Va9 0I#'⎷q7CE< 7Bv?  Ѻ{hYߛjƟ;p<. {v~ oxxk (%YF$yg.mؽwl|}qi2ƒPFBcrf v̛bKqw8|'FPAcBO-swnZ!4}>t9Z毁Jb$1гBLؤVƼhDabHqeFnIeMTB/K맯Q`?nĠAٖi j}1]ٵ}oyrk%aԬGvԲ 5tMqR45 Y28{S"ƝkW$g2j=¶P2Nc9cb&#45L*_wgb +6WDpv2v@9m~&w7Wh 2ib]1U>z}g`v X? ñ8x#<'~od ֢ͨ0(pqca/z>'.^N_Nx2+(1zH"[[Sb)pˁL'ӵR-ar}A\jw7#=N >u4l6oBn6GWL-Oo)\;] C f4THBкm5D9Syh߾!L pV&q$ض ]NMq2 GK_HŽoaۙh(ӯao5h6pz&4K9D_Ħ h{Or zvYYX hUqKH|]f` WS᪦ H8a|+^xo ˿ƌH UQK`g02 d ӻ1cd!\d<4Yx’~ֺ'_sSP@c\Կ0!<Ǭlp O ^cjSVoE>wyc%m=dKUPHjLCZ*7tk? dǪm vlE UA.ׇ]!Adv>;sѰ? EO)G"{uFUnM'IHhBcTޝ;逸 ˝`HK '@nK&!@ 6ͼy377|aN8k+x.a 7 1޴DqwlC` J2l<VVI9="5 s}n2_sYSoo11mHPװpz.myrj}\~^7p( {C&PAPW Ɩ:B .AAЩeDZ]9j+tmbh{)- =**+[Z@-4՞ƺoR kH_WC&[R2ci:.OB;*KGS"Nu֔%m mU6R2ٯjm3U9RGj|׵fΣIF]l>( OS l|O 2dh ]C 0`(UD#OJ]A0oUu .P6gT 61-N;-WS{v̟`4gSY<XSSfr'| ?l^.FNcBCl"ŵ8Žq/ QU *up7j;d릢W#֬x %&-vDjv1?/,]Ш50w^b %-hmPDIe_t^@-R6l܊w]`!C p'?նLVT9gqDX Q[ehWL%n4u2Mά3n<(72Y3^ǜPZoPU$SmD0+'&?}s /bwV׽xk,m-y#x"2U s-aCK(-$κZxt #Mhh3ϱ_QBA@\ k{?Kf *:-~Aa]Gڂ0ioRZSJ0PBN{k'%@DDnͩO"y{5º⭏kaK ҦXuTŸcGJ7KQUjuj9g 6Cxi}FrK:,>QS7'{Ud݌Ne*XbthWL*3$*RS7yy_X۸@^T`ez/ p8cs=ah1(IkYo(齆? nu Dwo =[g2oq w3L{xBK~Zfp%H6GW`vO|f,[WY^\,H4=*!JOٓ&~;RR9ALW#2uBe1ֶ0W\CW-6@@vؾ\1ƿMiirݭ tTYYz K{{ `eE9U|jCKaF. B@oQUb=\蹚\Er)sd*+fU]Fc,!"ZJ?<[ $gq-` d ụӪ9\Hk6fSoLƈr[ÆD|^S{">XbF難1 mt-vV`IZk`~o#pW+(5/ʴ`%MCs*^HJ6 1'*>W6UĵnA&RoXObHN!΂muinʉ\Pil~ѫ8"m\=7ə`OG#GV`LnOܜ\%79Kg0>iC%ƻwps8P(ꍯJJKsq O!ۍpޛl+e(i]߶˽hvk11utG/9'סG!M%s$j刍M$۫uTEP 1>nuU|ܛԛUXK;zz@֠^%<ߔXSo7D&!`J cWC[.J&=w>܅ԫf V]]mn"$tj3!CdAC&!@->S6K,,x3N :N=_g /𙕕ӧ|*0yyt ˇ:m᠑w0c\$Nqe&e-5Kc 22 &WD[Hy2Rvk.TׄΪ@!h WXRUxv. | C(Q,䄚L\HO!~PQqg8,-̐ܲ@R s+,s3fE^Yqn`D[D1s\7'?;F=n|@8`k3 (+I@u?ZMO2}xڛ/p nJO7wZq [D&mL03\ -/#Jj1΅sՕP9G,niP\O|Z̖xhh [;TdQU;; ^@%Â56X:DWRٰaBUݓ cO -SqC2Q4pc܎v=bŝQB1]LyϦ|8gRHk2 W߇'O.I`ɸ*˱QCiquq,%+>C cbPPYE&IfpFB9A'g/$I3MAQAH_e/Ƅ0?)mۄgA "~|g.Gƹt:saĐ\vuq_MS,JT\</&RԳp a3.EfN2iocjb4=[-HK?A#dO|8mYnb0e|C8#&^-n-h;/e}y'JPYx}I(I^,D%`߉m~7rzl+GQG'ΥT-A˩8 }Y,[4 ێGyvlXƝk8G$Mt no}PRnk P(cBڪ!(܍uj ,SN% mV  L *VByGj"c]xw%=b#'w2NL(JcCА 넂؝ 嵏@_ s75xڨ *_y%(}':=a*c2zăRpQC^@DP&߈?Ɯ,9cJB폇 F#^n<2$h䌎%uc_]f",Ae)8qt7;d.LVHOقRQZEqtEIf,vťAb{@m3?|WC_Qѓ^\At&6qpXPoNjqUD/!H!~Dmꈽc "))qsBiI1 Slŏ+^Dpৈ_A} o{5j9_Hxg,VoY=q$YqwΆP5Ǹo܇nL J\OL=HR0%Qp(Zdێs 1Bey!:Ɗ*? g ADa`SGJ/htY .}'v?n;IoB5];vGt 4Ԥ⧯B@<4Q>d]vD ~X23&̂F0[֠1}c?7޳i%O|v}O Ae! žafn.~ۿ;Ea#a~4UЧH|[˾Q-b:ziNLFkMF]x6(T2~Q2bW\Y^N5%ϛ͹ގZ* z׳B_zvC1$ Gydk8/janm1װ\:|zazɭp' z ^&S3=%`]VAFxʧ8V KbU0mbRߖ???g;jk؟h;VKK4V8-ܳ> ?1{9 :3S :%×߉^F_d$xN0a^ė0yڻpU#?60eʓb:Sgk(U %N’C`xU{0=kFJxylVW|~( _/Á]=ryr-*{M~H\EIض/8^G| ܋*٭AK{A`OJTSI}<:۽j+llt8g?<:)qh㇞]cޅ1Q}ů'WS>\6zL D ]t.?-őGqJ;#㊸بSFĶM^oJiiixM3\N7=hEx֍XV% o Tƹ 2' s1?#OJXէ?#sBOJyfmrhLLquu9Cgt {JD%TMC|xFROp>Tn\} eo>dB @<֖&`_I ?t e9SșPI=( ^ e9&TaKHiJ&d&ۄ^GK<@0Q7ȶS"mE w[T5|u%(_eP:##z@IDAT3~%SJgfj/DEBT,߱} a(xΞJ 0䦣 V-G0c: -1z:D9JMI 7V8inF5d* v7tZ( +w!)]!e#RSN&w66gΤU1bYv=, 1p3ۼ,B9z+16Kڂ3),HKDFkӟZ%̌>$SXf BȔr[#! $y޴s'q2:|,?Ǫi^NJ\f=IN e%\젶rin GEj\R#kjyh %;C2×e+G} VW(k;Nd`iX8=*~ؼCmڇ}B||In:BJ 2)UJHfn9I<*UNpv LiUmSs{X[ЏGOOk{gXZ;L:"lpq''jdY{)q3nj5cqAEcPB,UMҽ+-Y!|5N['wh̲?k[ZS3TpB%"毈 }jr N+^GpbTLoBYSc+y?T0,> *+*wW [+YZ aQwa7o_4;VƞpwS7ar!cU 909F#BX[:ln P8?`,nzvtAg:sjO4KK={нb@',,%)얝obX֤˂eg[I\\5ikʑ=ta#2SOSe)V#)E^,6oBUƽH {N&RPs~" sCy"^aΓy>oE;Ж8B7du$g|,A.dǣ8{[@I)߆e Wۢ߀!\zs# $J"QCc_"q7P'@Å:8UeBy3pf6h ,l"-BD)]nX[yۙ>:@t]9_@Z aCK=MP\\3A p,E!?k? h+1dRKѿ~;[Hyz]R.x)uL:w.Ou#b6iQ$koRS& =CfspuC-ݢ_L:EpguD7c rD"=IMi \"b%+8">S0>iwk]C`ȸl7JKl`F)-i?TPu1awb`'jI!uѧk( 2 ÂQ?0[w 456 lo_ Or{Hxm4ogOJ=# s㑣ʌ[ki3$YʌY:V>.UVҒjQ}ahUYU$a)|(Eqs0hUd|ϋ|a8#&IVk/ jT¿. gz'Sro%c4Y)Ȗ'~*@R}(: iDg-+TV<!081bٹ׾jA. N] 8 }_!ŕ< (nh 7Xrjjjt :Ck4`H򻕜:[XHz7PR z^@7"Erj 9NY ϿDj /GIL-wuAq6_b~rWjЫ#&,R]p:w*C1ajS\bp!b. Is>OQ)t]^ 7 륥W*T @uPΐyٔ5ܔHu6R Bn\2NeB+`J:jyYcqa=Gݲȱ T a {r{ߩG0?qi0$`s"'ㅀLK[7I/R#*h=z,Kr ;8tXy݈Iڊ ǠhaFGrjP02j31!R܊sJ:Ii}],¼&?EbM2,m-=tce)tV/F7i/lvxlcemMj`D [Ju7vRI^)N][Es7g@=+WIGۛ,ߴUseo\ض:\6;}[QfIztρ4*;&5E8_Rθ*ÁP A`Fj%3\ŏ`^Kե~& Bz7n#On]es7ii&]};$)ba3"ͼ٬2Hhl ~6֛MKjma2T>^mo#Q=kF0XYe>ޅ:k 4o؅>zhD+#'ݸ< 쌍v"/@M~TJ:O25X8;L #_9j|oVx@7:l>Z剒r5*L(ɭus4EEM5nj颫#sHo75EWV~D4LX*c+ԁ%6wXLun ,2IkZ⹣[s7^.AÚ?}s J]QQ|QoT|c t#TN=/޴]#k(|{ݴVeOnkL|6cuFX#>[{džm8`MqoAv >+G7:]lQo Cg8]]j:Wfh :qb2jh;s01DqyL--^*.Fc [(L=d= kBY4TEA+s6W! js14F1!3+?] Nԁ9Jߝ]m%N0х zzݜmvajˉ3ғ-vGkZSQ^R`22yKE - r)ս'!)-85PlI]=٤)0> ˠBDтsEn<շK J"$ӓaC^m3&|-wǬ;{aGk1cYHFGTh{P ?Ξ5iJT)Y=ƀAY3#Cutddnqss|jt5##4\;Uz(iɓ#٨~-cz_]9<kohL.3Q`(k!%3tz%z{@pLl*>ݽe6nA&6-3 _l=gvӹ킕)SᔺV",3Y 7~pRK |w>;Y& E.ݴرn=~{G㩾QH_6":lbg7ADX zW o EÚtW0bRT7d'Ǧjk t'sc_>h}a4ucQXg uϥ+֜M:t Oƍ ۶d" Oykngf;~d䯾h &JqR00rT?][V!އBI)G(WU˅I>N<1{i-h R{cgfP碃KCjNIHsύ!yiy&^J:J,-kxjtU\A޻M cc`/u/vc1ǽfWçᵭ;'-k`kSMbx}&p䡅tDORG2xb"rAx>*b竰y< c1 $A66sS#&a ̾g +A[8Ov+3G]5b ?zwaj-~焯[@^͚띌W.cj8u&YVNrXx(:qGh&ƨpϳ&µpG bǤBݙ 0=q Ol\T vcHl{f&bd81&c+tޒ %I7uKƚ@E` [1/r5\-EoB-_~!{>rۡO]_~Zjy;!T7mn@㰮-ni3iIn3z $.akg)p 'Î.9[-m& ;JN`[f0`R[9B* )ex xXUfx@s)22wt7` emʫ] s=X8h0$teؕ!xj;z;`֦$bd^x<1*lO$XbC>TS@-{_h:fX_7a$K-ݻ\k9?t{ۨ:x4J]_Y^mޮoh׍a~SW{|#n8;hb~\`DOBmNtUTOQ5SJjv\Futv>dpTA0Siaa,|њ S~[-,%%ְh5e"D-YĄ̮AV Ѱ`Zk)5f| כªu0X/'8*~6n'[JBǹq*:S./4}Cɯa9 =%Ak_ǖ㮖Dq'yyBHWZ?0515V.$吴JDzRa&-$V '"YaMIE%>%'Gz〷_#.PY.*jPiùyUIڭd1D67*s{]>K\9.M|9񼨉cB 8->|0Ӈh~A%0!fuEមcQN巔xN %-|Qה׵cHxa] PZ<;OoIbr }cp 7{xgliB0Еω߭Ph'_/57|MԿg ooO&!!ȓ1DZJ$(+#`FoffaہDdafa^Wcd#$%OσD{7/D\|% HMCI&5;NToPOc\ٸU`g+5KޚpCnj*VI&QBEvhqN/79<9"#== {b1" T%v # Qݻ+hk%waBW ΆixeQ,MCMF6eJ%ѻWWؘLDbh0axOe?&`Pޑu#+ez+L77p 1tu9GwO*/>*{ sIp/|);n u5bd=},p*|`᳹dO̠4W#&NGZi5$/ 0u@#Za}v˜ҺmGd-ma4`;B>(BRA;w÷{&TYq. `լywTÉ>lغiRjB7b5gkXXí9T""I\w)~g NRC !4 O}1 ;P^LoCZi)X6B0~o# tF (bBH %\w\d~=>@r= zJL< b԰`*8:3%:bj`h gzӕϏɴTY +k5marbC*t^3ݳ s-qD"RE;gsn#`_g$؇OE%%.0.DZܰ?b2x9`纵`ض/nL Te'kH&Nu58| ]mGcu2j4ۂv73 尹tM[_zaxa'2zΨ$R^\ TUԐQC0* K5(+yT9x {t4)]9'QŇόE3R'7/+~=b%C:ctg8䃔 bߞ#Ձy" -/@i%< -+GS _MУ?ULQ(4v݌H :U'Mx鸿R|OYj pS31^|\/VB97x9 =6ANBeʈB3н%Ycz#l)e@PTTF%D%TB)dHJƅ_ڊ*Wșs??"?.ǹr| ۙ% |v2#:q"W^H]'`$%ݱ'ƒc&gOMFi)qg/ gQwMȮ.(Wq70/>0t2]p Lr&`A ?n؍ <;'Iml1}O-0~eo-Bzsį}곇\8s٢ߐcjC|ħi1}\0ƞADؓVGOegʇHA@X36MNsfJrBV7ҝ5!?q} #^I);|{a}`BFLfFb3RUlbsX#>c䁳(b(TǦ397rѐ6-5Mr-b6 // mcXg+r lb4OLAǿG4#cf{b"u%#1ƬYh+u3_.Y3Y: > pLfo7Ni_dkqqşzhu3 }1L G}O=aA1;| UϵBon4;Zd/URkgd_x-vã0'yZzعӟ$.>'ع`NL_t;l㾨>CUx̅;+o~:GG³"?ݬ+&T]2iÒy/͠pWZ' or%KXptbRBD.6FH(LkՂ&}\qY PYM{sGK qE5Iw̛5UKWíixQ5BP1~ShDy9_nL@cgAkaJeUڄ0s/9 7"˶q5Ll?“E{`δaxrZ7XoK"u??~j}xLd\£Ɯ(|8x1~ed9^N}^.+Z9bL)-:܌=pלxWHj )2!tӅQ/iTTc1Pt?؈b% `8ylT 0 ﻘ&L|>!0&̜9d~dʿGK?rjO9:* #d$ToIvt*$@P>PS7[w"p}u$ Zf >\8NƁJ{ ;I)/fJr86L=?nG[Q[1JLIB1QGHpOfC0B="{w2hia  1>8v # aKbhg%Ǒr\#+ՎGY :B J ̽# Yb&MF Usʟp4r煻SBR5dnq#:.f6&f8L ZȚuœ6p8놾RO$jHP"DXX|M|K|]x^/~ݛAP24,_oAqs1:tAFR5-Wxf | bY?_ !@ra;(tVMπ5 {o7_/^1577ucRG泖[!URleEN$njj4ZSC 2Q͋5M<7Qyc./ j Wd2P~\APR!4< ʼn G[&]a碎X Y2КoW`9=f&R%/,jﱶ=3  2xG"i%ح 3>,ۛ=糜@+RcDuArTI4lvfW{ggNzsvB9<&E U? u10od0͏*=@L4&xE}z\g#%j˖5nqha}J rG27#aqGgywCU &Òny4)obÂdD]DOaw7%' ̮ {N>F&O.sa.)32I8ݟSX`ng_o?&ϙD펁w1f5bU 領s`;'xp?')4Ÿ xu0)gWSW!٩C`sYv!ҺK }8gkPbjc1l$˫ixh3S!XFM[t<!as#l#[t /Yқ4LPJc#Ԗ].%}D W7L o,E̠PJE!?C>'zDLBixB)Z<705dL#+l`;i1O;iqk7SEK|y 63<]#dhF Ot@*mK{2+$nx9Έ~u uÛtE9* >~{6 ?OxSi4ý=(aZ0DrTs|v? j[`ѕKwT_ LkP$&+ @}0gV#^]_m"@K?ם@:,'ǚtqUS7@N81$ 6alzYߌc;/?1}CPěʚg/1 P#4JWؽn?`o#mі]v֍rP*'-{N.4hХ %Ci*$W"2BNɈ2IX,yŕ<Ტ_yӍeH8YT|{tOoq(] {NqS<,R=,c$&&vЭ[(uo7lP<׵+ 2q|^x52&%݂8cFz|< |&s?wJy?)7đ3ƹmwrj̙Z`DG-v OG"# S Ț 7 UWCZXcF-_J D.6 k&_C*/$1~<"l<0kd`&^1xD? SY'ٙm/ӳn> eI!CYQ-+Y~L]VH~$rԏa#iWc0JI* aaaW'C:!@d8wM3:ُ ?K.p'&a|3gA4&.zR WKW>sey1"Uw!>k⒑ůӂi*9h~S D'ùtl|6&DpS4=>\c\a92-5T(p'3M2{%n\qhL^{z#k!Ä7Cڰ} _mLn2ZJ̞Owbѳ\a]/_e-lXɵZ9SbG2[ķ&?M$\:X_.xpi̙AVB¸Rm]˵1jj P˸$=#rjLQ3Bݹۤb"ȩB@4'J8>g^=d&51/e|p Qd} pjYH;˕p?ץ -H-ubг``t6=5.w]uC20k4J3 ḲSLn6TbuG[]Es:֣vyxLK ZuT|^h^ vn] g{qDeBfrt/dt;*j[/4Ba$gӽ- JKH,F-KBI;yooi]g/6Wu2aݭHo>nū %jK:=-K;*}nHKKP^^E JlEGTNfZ9QQ^ftv@"J> 07ǯ[ŀxf%qx 9<"}څ5T*fwѸL,eW'D O6-a %4#CCE4榈C/_bϺ:fRD s1?\b6Hsbva9g SO3+ޙ3A@3,qaas,s730.׺{]]]]mN֎]6;IڴBp0c#!n55k0h@\+a͓ZzbL&2 tN05k|_z%WVQ4<%9t PZb ԔV"T:~q7N;ISb=͓E۽fĺ|}x}ʼfk"%%EoZ_ !5{-g89Ti"XtQa'&XQj? 39gKUkH3b[֩X06o> l0bd8O'%yDնD)ujWuS;)#MkufN?ބwL)ljժT4=rlPTb2(|3PN=o#ugָXۇ dݻۢHw7G ~ny2Ii}י .NܱjEx0ZSBPy޹8AHpẑU 4leWAE7ÇáC-tŸʮ57 ݦZ3s-,, jjnF@VB$Z˕kϺ E0o+29z+qsz\oR 2*-[>*u׍Z=zSO Uw~w#q]k shۅT4/VyKl w.K,݉ض4/DjZ.BCp]=0}]?slE]j 7oK]3@WT%nWoBnq}㴴RҪ-1>>;Gѯ_xr0 ]TeRNU\ba%J7jj HdڊbxX0ΜI]fNwO>]rÚl@H?Z/ BP@QSoJn}Qd!;0nT lyw 6.Pݍp_cȐ0__Xv Gƒx%wTr@&Sْ8vWw:=?_}C :3(+Ue,V9{a\QnTro&7Aqqiز%VŎ1puS[;J~IRz>\yΰA|d-&MFvypqh-28*tfܨcG Zzѷm#rP`'-ZFogԤr@倸+pO,-_?բBAryM'gOrn)(!! VsU?۫u옋*| JSߤРOle}Ƴktl[`̘Xv쓏7gnmQ]wis@bJL swct$T*ᾶx+ 66AAn\a ;ccj4FFzE])\Wi jy-e0c%.Uv-Z?LƗ_m_lΓպЗ:u rkڙprkju%e+-֮N6\H*Ei5GKNPK%ji4-Je]PP[ce)GaQ)zhu &Lp?6@S#44uغ N6%v-{eBUrj-]qmJ<Դdtr嵸t`m_E; ̙Яo(lhQz ^ +O[4@WU\ զ-55M!C9 B|ruLPjV5iawA,,kלx8>-:r >:(̯?IUi\`i0Vh4nl"ܖEUr 'jM |K9q]cTSLL-'oumvV*]??L ֟ƍbQ ܖsm݁8&G31-=~-S+go<~Le7P tQv2AMNY #>![b̜f's@jYS&eؽ{zfW*?TU1bA22\I_QQ?}{b}cq`!<LNtܜyh4ZMr '߆;f.),7T%OYf98Wݻ}K,'O,(zmu]rYr]Y$X.R i(!Օ7iX][ſ=dp=hm 9^K9O&J !yM!>*b_5]roI=rRh2D]+?Ҕ$s\7nk:m5}*게%h_q_vK/s%UVJ^|F=>)WʗgXTmXRł&IVSe3_@7y>.|Ya\#mUMJ"aSK I/. }}RdI6W=[rI^Jf2jF/W)T)yY/0aZ957UDy>Cnڤ\_&zfo |PU6M Lx!4UsV5M)!ޓu/|F^ƋL;7)WY\/U!͌u)SWG}'G6V7df^#Ɉo/*A OFp2UT\+49$^npaX%@)tHtt 〬j'JLQlyV,uslðd.ܕ_V8ˢPZAmUq#RW=ft.(a,o|kxcڄxe u]>d5+ =Tbf$ڦx8k`^.aHLM"qpm t-ߩ%~]"s)ȓؽߋcGC6筡T^XI}`x1)Nx؏ʆ²;쬒w@e9yJd9:adte?p{N{(ԁqJ ,hO IckBƍ'8|T1o0rmA+HSt>@2Y @^ >ÑUa|d96X5Dq6O4E˯Ŏ>8|f%go EìzEQ=?ړ^-mYY_&C)QV5COI}8HN/׊ >X&R;{FKuҭzJ-1`7ۈ4&}(;fh*B B /eaOrLa-KIp¦>͓8%>-=}B >t SN>p}(7sΈҧ% %阡Cɫg؇Umx*ߵCWg-CɋT|ϔ>3C l1޶ َBt vUUDss@ӕ?QPN4**xk{<\ۆŧBig^O>E%W`,{ Ev+fg? ١6BZl>9.7 $gm8.wOWx"kGfS6!ر.6. l2'(9䇟osZ NEeXa bA밗ʀ{SZB+XztvE7x}pPq*=҉{!v؇*pKT88! ¬_#TXQ1.A~aoZ &ړ6RZyIU5?uV֫G.yaִE)W .HqJ?vVSЉqwi}ʩe}%Hαǧs‘ц\-60#:S O~]8cѽ6&v>GH+|*nP짲PI["O)1dyeڞLQ]9.._hˢٜ0?17AQ׿On+BT\a!6 a JX$Lj[h[c,e*|&JǓwBhY7KօKGeFq N(F%WYN^( vm >.90\=mxTr3M C89WpO&Rb,}Fc J.\YzeP-o~Yɶ>&y($g1ws #D1(cS1c;Zc?*VYdψ+%ICTr6c|"ƼrbliS٘#kpM6'qC7='Fؙʆ e9{|r+9Q6$ƿ[ˏ|ߘ'[ҿ)}h~MFT~/|GZ[XZT8㵿F+/]T|6ITr˕:( S6eCz' NQ>}CcJKAh@ \='#هu&S'ߤmr{ ~g6xfwP\Wa-7qF jR9pp`֬YxW~s;ShP)XZ$)D v~U XU}Hђw*IJ] ˴ARiKGb=Ci,tM:& hQ nXv I}t g|r6Axs$^V {Tp } gz|] vNg8!XT79FL O2b%H倗fvVU M,xZdBd[*TEN'ӝh5'hj떴>X!V᫏K)iu3Y1DBY".KGst A[QP!ENUT i?<|BYә((bzSH Oh @#|,{+RZ%XB]('*f m:xžy̘9y(O,|3'(REn DbJsRdɐ?xZ RQL[4](/е倮ȔN7ʽ;jSRM5:Rѕ$ njkķQɕČcĿXEbND6yp:<=JYC"V*䕫 +0d{0̘~?-5 >\n>EQ2`+\>'1ģ480F^ ?1VRA2QI9ӡl&tN63w0^א/HOQ.$Ƥ ɓ3! ȇ\*o|6ǰyӓ{}6 eV<cduHVBT 0fr<2V<87:xrb{1=m&ڔ;+rc5X1] (ﱬT֘8'iJJ&XLrZuV-yQI'R_SrL?g?%bt0|7Onxwai{QvJ_>T҇8LJ~&7\ e,++wb"+ /8)?"e[1ĽI ,(f}.Vw'-sC>_hbQBÇ8~)fd7ǝ@x"fF|K^[֧O wOkyJf:46/گ׮J|ERx_V&$Ѵ+kZ,}K2庤?+!yh3-ô ͜G~cg``-N"*vJ X[p%qR+$vbUUgbQ.q{XZe@|FY~4KKPgk:j>d^)C#YԱ>t@˕6;s^ O~+-FQrFөȎy2*y0Á}f֩$#/XrҼG߅7+mfBZtW$pɕ( "#XW5~QH )PJKBޗغDt%jXF÷B|GQ/̐*`戏?ˠQ}W&7U% r(sq~iSXQFS N f@*]goXk<%PdwE|eRxc4 mt׈-/цr4b-ӂ7(1(;wZ[:#1^H]3ndh+/&|z]y+.{]HKVW+[2*m*TH)in-{&rZc/0k{:-sZ񊓨AY~:|[<ZYrl^~ډl*6vػm+< 6(/_HIK1?xMN f%wƳp0(Ys9|<|OF:Zr/a_vN8/}T(kP\9̥25]3fHVh# -? #h,N$d")3k6lg~‚2YO 8<,Zc+_8?.ńd(J i/'%Lm_Mݹa;;߬ǮRT۱/(yIDd||! YIQwPNx 'sB-'v;„֣+6PSYmDT HoM(LMPۯr@r,p5I,*.򓃹J믎j\ޟJmSt5-+~xȊN'pj,3Ѻ=DZm9vrr|u~s3+D9/NPњ w*_LŋtB 6# OCh+mu'9pUҚ+<Ϩ+ ѪgﲀZ-|b_-v+ξ(8q4\z 5x|hAX7dDad_;Ƴc@#[d@iu P¯݌x oPVeQ_; J0;qPС,4f}ok'>u/OSpxv? p ~R"q{0W&M?b[akd>Z,W $igťr%F5_D剝0sE2j&ŇFakqD2v퉗F A 2 /'q+=as^ux{|ZQuS<"+{;y%x`W&Ei%*i)eܜAn~9B}P0gVڬLD(တ QH. !#Л|47\}E;.~ |ص%ǎwN脇lFθG1rOF?>p!?->X/?z n}v z~2y}֒87NM*TRPk楅_-Ç'"]*2ɯ{QA㭟Ɩ]@*@9nT>x%B5|BGcڭkA+x:ACA,<;VWpȫ]|p_7,m;ws a\-H.1.XgQ^g_ +?mF6}3OSoC Bl)"fubVݳvaj.\:fJe4v]<?BS,F_ xp]*(❛g >Y8a(I§pK\=5< Ga|;'/~8-, 5'MSo>O.V~BFkoCj5⫅V<n܅/5i$]xan8,8m}o;,-kN@gksV⭹Qkor5!`: VvaăDD_snJ;ر#Om3 'mxtT+Ε["knpĉrSIV&[&*T\!<ufO2+Q)=qG\]ѯ_7<>d-0α4f^[GcD>0̼z'vϋ#=QÃ]\aCe86c@L {lɂ(OJDc;Gc7p?nTRy]f~T<<41w`&ahyn 0;6w?=GgAU|*֟Mˆ?ų/߅;)2 f/'XY޸CCu/6-Y[> =}%I-7Pzp/ 58 aR_*xcخ"-ܭ0ij{!=# GO'ZЗvAXX29jU [O t{?lĹt "EM#K<@5Tj88p:iƂϟ#Kq<@WKݼhm ?cѡF`0,U3ӚeKDm wdЪT+>͕\JKʐ["Bii2wveWy/o#7iyQ }EQј`WG[p/iә![R1q+>FB ~咘8]ge̓Q$\%BzBQUR Hմ%gUlJqx1v<^Uv ^oxrdhZnׯ߅KGK> <޸gA&u<gNv>*v#=+`;;;VaAY>ﲆlБ'@cnf2A19>wѵ_VatkćPg%ރ, w#dV>Zh͘kA˷kH0rĬVbe'ޙ9kow{ЩK(,EDM7=qy6PQl1C +v4S<ҴM 9 ]|8 j`nu#mܶ}~ Cv'蟸g8l 26&_^Юo#_riZw;/>2'vm_mG\vQ;ٚ.!xu8t2 KHT YF>/t UxvmPm9<. ?3apK5LI-Bzz2N%raRLJ"(ijS t&!G*o>^6f tS%XØ[ v8E;[;a}678n=CoX9^S~4۪}1ct[$'ة,^߯qi ™3I8z2ooYZ3K 8r"Gu\/:kh1KYϗZ~4~Tx8n:Shm0C"C*'|aSRqmm6>qd׸ycybOԕ`}ú('h{3,h0Zyn~RM*T ~orCzNhܷ`h` zWp{u3wcJn7빛wg/ ϊ;SÖ֨̂2fG9<55<nU%}O}۰u7R 7ґ]808!mx ƍ@UU8}: E'N8BNjdtT56u:A}gHdluWaim&mVX3f[lnYÜp$z%ާbb;ޭ#~[_ٌ23G|QTAx3™Xʴ2K_1aYRߎ Wd0BH+8JwCFڢo{#>&עƌ*+ =le*!"1pjIYv:pc(/xj<[k-l?6S:N.CLo O{@0#'Q܊~pbI]x+L 2 eJ;Lb/7T%pPbIxOK:tQhlQW.2\3Yב0`J_'/ OVrGOb &}{Ҹ2+l5,KʘYf{VtEtzkei. >:!WWYe ԻŻ[+J%|(1~Vs=0l5Tcb!U>c d #$X$I˥M$M>b\b2# 1wӿ7zvΫfN;_o?P J6XZdaAza‰݂; d)ʀOh`#Y"a,1_!C2Jo+,H_\3J6Cʐ$e V2\1@[3֩o+ZI䩡RG_ؽ7-%,\9 YbE{ylj޵ WeȻ"y,"7 uN(}4Vk<‹z ,.?X\c2Ƽ[lhws,&( +V `#E ]|><<8vT6Jhi0/q}cEAs정f9lpyu|?@h<,) c)Hו/RtniDsݙ0oq,CRd4p ȋpʩ8"mgdN^1Lᛖ@9ڞa@vx7ellH?h :R:*2Ů B&_Pq[!IJ=vwn=yOd;|J~=3ﲁӂ<;FE^K!ts qq8{"-0/wSΧY˦fK+I+-? mOM*zTLYJ۞/K+0;H H?@ xhM|G}N~X /c{bxA`9vLޣ-mٟڲ0QSpT7>8B23o? -* 9PykOTjRaSPĞ`W+|.Lb)e fav,Kj(GЅ˭ZrQc;KыiT$6>+\Qw6QjdHCQRن4we;0^.SU? ;B ' :e1'o;Q:{VTKʁB[EOaT4\GbKInz1Y`L215f"14:ow%tnH^Hy?p@G TD%e:4QyfbE-"yGtQy$X_ {P3>YPɂqkI0-mDw0*)TxA}El7 *NX$I] ]1+WhkrbZ.L G{O'n䅬!@IDAT^H2ŷ_, ..7F|$b|\}2|3`H-&4i~@\~oz.Eu[{IN8r,#F|' f. fo2aLoCdjo |h ߢ(乫`߬ώ?(wOm|7o_)X^56X)h ?y?UMڼَ+ڋJ1 ^>oɌ_Tt-n 'c>OJQ| .uE2}!aW8b͐Iʭ|_S .&g2mbyUXw:QŷANߗ&u\ċVǻo,Vo'ws1mg%"oQܩWr;&| oBov+ti7*((T4"T%rI9 r2Qdd|.&RqqeQz?xQqcĊ"r]wي9vpY1[KTHU6aږ?!R:I=G({_7[dY_]oHQm(sm'.oy6aՈb^`}a;xne}ۏ>O9ڇ["X&uY^6so [\n/ۘUV9dh$K3ppţW]3 ŋo}9 +ʹZjXݹevP  ^-'6cj~-4Of\Sb w s)3UK㦓Ê* vhxWp!N#?'KėD[2e<ջ\!'bYB:u//C։o#UpqU&vS6I;(Kj!UUP౺r \xhMTr4͝ܮ66K ŽibiydtT?͈oyWԲ9 XۓYOqb _tōgH'^J50O}T@DCa5Tj&rWE'CkFFK_C}65<&TðginQb63l)Q#?5z#_fhZE<+G$-E/h i&2L0$2WSA *Q];U?FK/fjz|S y^a1oQɽBlWUɽ hy`)qdx"F%)'Y UKkn 6tsP+G7tDɮ,+FC4B5s@h$YFG|{]'|f!!hʉð"m2qCXk_ׇx# th߾x7jJ}d?yŸ:u oFi\sUc1K>55hhՊT{ɍVpػM&b (^%i<ˬՊGrK-4$Y?3Y ])n_C`싾m;q,&Jg` ,B#0y ;ZFA#{K}_[E"wG"x(WePϋى?Bw0xZIAh<65?+ct=;PAa.C+1K?`P/&#йhlv?0{ rmnTg`.yؾw7F`[̟6d٠oȱRZDon† \h |W1hRi. C~\8v, p"^ջ`)C 0CSfCf# <կZK-ܸX_-G-y>!z|HaQ. Oc@7<'#ȹh߃G O`׃s]PnK|F%l7o xbv~;^EP,9aޞ%Lo KްS W9p 8K0/Jzcm˒-w@{zÖʡMu GxWTQ|\>)?~X3;̗aF'h,`52bռ%Ӿ'KVXRXeʼ$ڶ_G@kh_ػm)KL_||Oj,VٹcCxT4Y}%شɂǻDZn&nSCOwCp;d1_Z{/= ^\ֶ}ᘷ`4/cKнU;^q[QS oyÎЮp`߽,-%_~\wG^Kw[Z;-F= *Ge]`e~w KjRA%TTP1@-LTVD@JBc9tś;'꒿kq6v"B]`| MpL&.)BmeӖ=/K~#9POG>+ç??j[Ok3;'js[svUea$ډϧPϜRϾ1}9C}W3pƕu1 W}x߱brp? )/L~sd5v* Ycdh׹{a ,Sq1{`Lz,El;+] >T ~B.(7 ey!V*X}8+]F6v =Pj`ᰱGnJ4>|}"Y]{'v3 ΓG-ʉտAiӧP'b_ wM9oqo԰>&:ٛN"ک!څbh5`0j %|b@hŠݯzu âKt< @PHTWIbɔ\c\ ߾/-mX']QPce"?3\ӛz4I (q7@HX ># QTFrPǷ[1ЯWph-*)U,:(fE@'tA'o . wdXlG?:?a+c0umOx@oHͷ9zQsxO#l?mBz2גv'_ܾ]!zض?=j2:7CU,cbɊ6>QA574n+s; o;5! >8tVڎeݦOբSxqX{>n\j.?gS}wKA6xe|h>rO33i#++?|p/9ڥ\V!q4y}#c#c4a-3ʕ+<ͰمP9)2U8r^/1좞o`D^h QU_L?F L G$-;/N.Ғr(*zR)RDY7Yk/a[3,ޛ^ՊUj4I2R [K?† hCdt6oGېV {_&8u*ܞ)]NhreD~q4v0C~c}(3'+}JR:Rڊׂ^gj[ɡs')PRT-⍧F˜| ߆H>oYis>*+֢yZ8RɝʤdA07mPVin=3cm谗znԯZ%$ Onn.𚝝˗/ŋ4ABB<Ґ|C,)Q)2I˯O~c)@>A '>ٸ\ffN"˧ ~ {;K%: T=WO۰0nӹ5kDSf2+pVAR! 5][;+YhS5+r^ٗӰ r8Q" !R] +;;(%[;yB:'_ƶ]>< 9IgX:ZB*gB$%-ƅz4 yF`ƴI(CȦZ-"SpX(L\4Sa)3,\G6WؾɨxޑR~Rݘ$a{hG{lQfJf%zB|~fo ~[9Eh䖉O>mIWhn,L,`Dht&bf~@S Ls "ZM0+V>iiػo/:K/!J4"Vr u-x<71u*>Qч!?˶}Jo_@A%)R 'м|nw"[I8]GN##5(1oml O/9A=w4m77T"emY`c~HD/k (9KRXyđ7{rU@ Sϐ>EvzgG`Ҕ⭀e(DӳFOQ^Fb.ň|n|LP9,D㝸Z3ubgD=USjx])P\f/OŶ]p0Xi6-iM~ o-n-x!p(ŕ87TF3.jhץY(McwEo`D(#綾Hi`-k*mlI#;o'#P8=9Fn"s*+G ~xgkaӰD'OW)]ock7G#jz~" ߟtC&!XNX,7Fa!Ly4}z/Nò9~I(7U!38O0m)4"EB{i?~Ǚ ?H 7w< 3SX}2dʈ@*pKZ-oLPWMm#z48ͱ[8ps%W }*O |<'bǁ#Pej:.pus"} 7Qޔ=4Fj%(Wv*)7/~+9Ux[xY"ݠ &o R ޚE]l[ZVe\ŀ0A }JJxSB)yib9~13R ٤{R'/66}YS^gqr>;@6$@ۺ|l=xuZ?cPd/QlNat @8dow'AFZӏ깢J= Fp0Ŕ#O6R@r٬_VHW¤ceshԕͬZ< >w/`JpB6q# ӃR-wxo3zG%{{CrQٴ Y'#f,o6؏* G~n0wqF300X &V(}.eŰ2x8CycƊ#HA!MqETR3!$[!"l^36߄gOr奏@4j׈+6o8y:qHɓ`yak[]Qf hikr *79: WӲyasiG>6/%dXz( QfU<КJaq ~8y.ۊ%-o+-<{ ?Ƣ{s|;}"lzyqcOF ݛu~׮E#[`5?5.p@o_U?k~g<+:]͛7*jT\s pE/w} jnjCÊc&*41C!l"I+(7},z<v;V1,X{5䗻 Iu&&_5|*βL_oSPi5RGz+joJ~ Y-Ծd1 IJj(Qjx__$Z=t|夌VOJG+CTCj@VUʡҵ.V,%]@GLxt _ܵ^<N\w(8TͿVS:TEn;^:Jߣ5k1aν1mT3{Z_]:ۭ+itXtWP@+8p =A@d>_:SXt(@? u[nzضb;}{zBq%oW/IJQ$GŋO}eF-RMeܧE)E~%{l@TU!9kM2O)6(uq:oEYR:wW]Q)u9y2xifJwu-rJn}RQfMUE4)H3u)PFo FMEaAhRÛiI 4T`]^c俖K ^:g B>:=TSmn$(Ϲ7_)0gF4] ?oi&J#xPI9o]wA O3>w%/iCpȧM  ( hOs.@LH{ʇJ=ʹO pC\t&Bu HqŵkKyQ$L)X^2W#*>Xy,YmJ9vX< 3$&D)|K֦437G(\6nL :7[t7FQУn!}g\!qm2N[rJX[ߧuݹk+BBr:'O[j.ЯjYY!{)hGqǑ#qLt;(pC SD@#ƉVsjo8CԪQ^g_)IC51P'J^Gbҝ@8Ԧ^[$;G/?|{WfZ"nǮe @;q{r]Q!S9rGSV S(bg{ 8e[kU蓒Y>3 N5bĖؔCE:4V|fQI.3z069<,-ck?ZZ:Nb^1%QQXn66[D)s`lӦpwWڨf^{&h۶ /~5m;ߙyH&nUtP`-J$k/QՁsY˿zUfViܚw]VGi\.]Ns%.%*};bbKě߱< MMk*uO]ܻ.'zwߔ[yPMh`"JM}Ir~nu6Y#dWFo6[ .T'etwcM?]BRW]NSYkVڨsi$cV 1#18@iZ"Zb3lV|IkIiHDJKCqՇ1`CGavP~4"u'U;#4Q91k5Ir|mՁ9hɨ3*Njr2㯢p0^RkB_'I>ga ek_Ju}<n!%)) qB=G$]x}%IkFn9nފ^Ёbd!.d}غZ+n" Ix0TjyE3˫^RT$)הTV{(˾5sc`99!I:F-Z6?o5Tg3<Ҧ!#](}ef- j|itMz]4|-#/ NNeJ;BW|HEʸxS" U$vb^*]ZJe,R'Ly$SER\M-oΧIJ)%R3pes0Ǚ! rUiwHK=Fw20O¢Z5r"evf^?siJ>뗻;B"B"d Q\RJb (-Qc${D܅x$;->i 2\[dI*tyșBid[ a a#P+mFF4g0"!;93ge]Wt_wWmѱy:T"s$=g @{{? 57nKܱ'Nݭ1;9shG3|1ui6Ut?͂w 8iP_r9G|50 i/$LEJ;ƌ&b bIӯXi`cdo_{ k_/͒@#珧# 񈾘ʈp󄇇= %*Rqx tfmeDw:ϧĴqYp2EtrO;,诸|%A14J%vUɇ,)Ix29eqwo]7_i$$ߛ Fpb԰ /\+ig5[<)=Of:&OM¥1+83/% |90Uu/JU>9RYsu;<#hPEK9ue"SN`SDNΤ˳ld"1& 4KDF$5˔ agB;z…F//Gؘ+\_#e&Szo*n?)$5+n&82#ڠ'`f ^s/y(/XS슝Coo̯W] Me'hnK<4jc$ok=F0c-w!?k&vnNGqdv?X@X0xwjL'z~9JLdEKx/ gJ) vâ>L#J \}[<$4/wl⒵ G=qpp|,X #FG¬A3iF]23;<+=0G\z+9czX0S)QJ y8 쇍 Κr=}/Z eHuOq8-,[ |Z74֑ư|#lᣇGIҸH%u5]̚m9T!PdwDxOWw;cqlyɯI1=!(v+:SQ uwW~tm?/Ue }\yE "Ro7TQQ?F`qr#vu'@S5ʪ/B*-)k*a*;dD}JFq+ +~14o-#R7 $cd j(̐뺤]DyA*SW(ȭb]^Ű,+8Ä:fxHڻn@xXo}ۢAlP1VႚY w5 -$̡sfKG FA$H֮vf0*$L&-˝K{|du>|ҸҖh42GR|<OÈ6 ߷ϟ-G'7ȽB֮X$O|1046rŖ-zLWK"3GCaS.\D( W!|Uzv&wm1;[J sDo߆A$ ?ԅ_Ζۢ^'`.aX>±};1wC)LQTZdԃYCN\sf.tkF#4 TE@!Ehtf&\\_"c'  VAK_[*Y'ƵHn^Eȧ:?۴" E Ʀ"|QP*Y"D L-vf0vFx 0#XˀA$uIG;A9d,F\{L݃Q:v@Hkl[s )]0| C"{H9r hKv.-R)c1jò(i2S3QL r# \׬" /Xc2,xo?zƆ7)AzBZ-톆K4LM:1]0ޚAQK*P=a>r,/ů}MMѦSOT(WIBwJ?}IL^v@P* JݲDɂBc幖I<+y2΢D6F^-`619JMLK]Ŋ6WT a"USO(p%emZf$ ;ϫ#\}(p)PN\!ڍEgbK.8p`^r6TU9WԒ(l Lذ6UA = Wш(sH.0Ű%>_נ(@@\\{CW˪;U = 2qb204}x}h{,8fU | ph2{?_J!{%ĆEfΓT@L)K )+7&9QqO~U2K'C}dPBRg莈y0&H G~&hGAS<72SޯaײC?xH919Z^y8/M1HRzF)xbbnU 4 Ml~N$˖ X=B"پ%^ 1iF7dRobhM #Sq9NZ{" (UG:b%_BSC)xfs[QRTBwTjDk8r"CFgꅍX!%2Ak! r)5 N{ Fep)V ҸS-N\@$8 DrјK\ne"bb/! cCVcE[B[آoShEX[x:4JuId+EQÐ2rfE[t f[(3nhcY\3 BW iøWzHJOD2 P31w8\,qbggk@+.u'%3~ƾ, Ƅ8{CQP#U їٿHWf(pW\]bߐ L =0dH#|1| D~;xSJ}DEPFJ" ~Nش{ ~ @yaaaZ&*E:hbmM[ ?>#a~EƢ*i!0-#xb|#z(N]"练ZqX! GiI ܃m!@okJ5 ҰYS9y}ףDT4CD~m=p~nI1Ԉ0X;:֢d")uТ=~n>6囑Vui<_6cϩ OT0JK?[#lvȂ ÂnX&e|@МQ -pz:̳k qvPtqu6[ΩCƺ'gc/P/EN>xkz{:#[wROT1X[Iy=m:ʳbEjii{Tg֪Npu|hKOf4 p|wڟۤ~.(p)PyH (pD}  97tq50׻v4z;!Gio) a_5g:.-%*e]QyZwJqm5ޮsW\^m#1̦m_-غ rNlɈzYX ݂y} Bl1X.Vw@IDAT؊gRаo? {=D!"' CTLkCo'p ~qΜ{H"$ ܽh'3[2uÀ1bnxu5v9˗YIh'ЦNakplՌ(kxcu gUX8FBwƣmlXD%vmc]WPqsB}|e b y cVPSN+NE*u;NaյL%qXCVUJ 1Z jR.^1d9tl;TJotׇ;aO6;,aApK>|MZ,wG9wjo.c;N!.^x+ `ffHdmlLann$? x壁-J$juĶEue%$d5BܕIGD7(-B~Pj\f:w$,]5q$%Tv S>P(0+O\//+;I]: hSƏ'/ ޻K)9ߤ.By|{%Mpi0RI!9K\j@$85_3)ә{jPO~RF-ROb:TS@uۚ>I/FuSKT-}IYME{sEvڕߚf$rAM :Fue4r VFGӎ'5N8sړ#KjJkYh~vҎ}雺f5n9խi<#ɫ5s])#k"u$%ǧOlHV)L$4w__}F_) 6?:TeQHY]H~;::9c}Y[r0_ !1c]aF]TɱxOJ\Yp(H7oGE@/ نmq8{` Fi"eA0W[e#H90 O);tFQs)a¦©eؼy#!\@4$R@WvM9C Z55T~55l[s}걄դ#"T=3͸$SerRvnOU/y Ry˫kVzwZ5;2K lVYW#m]v2>Ϋ}M#r휫w;Nr+j!]uuIG;BJ_l Fa*^zq!p}l%^ԑ`ŸSA 5h2Q[^b#V9}ը2NQ9\=\nWc/#5(@0@3ގ߹~׀B?}jEVzLvǷOB7;HJPSޑ!Px;^A32o&"i|u7,O 乒4u@6rSE7hVTGLdZ.p,nz>t[( -J!ҵ?$&^ok]t {E'z]/hR^^:M05ǁ AuP@)%Upr,U%B kTRF$H19׏^s:_I9~뒎whBh2)6nifJ:YҴ)taHڨ*e V&s0ym`,~+R)݄'Y5%j%y+xʸ^*e^ 2SSIK$/ tc^˭I&C2}Rw!KJBj&SP4qhu1ޑb}N'tפK: (w )8?CNU=h`'^bri6<> cМZo4:u.#&i)[jކS׫}]WS>Hb9QpFsX ҴkXSo438AO&hZVS[^; 8+{g$!f,a'r*Ji-NMV$GWnĎKWsec;. yt'$Ibeع7;./lل[s+ KǦ+Xy%~oVb׫nv<D»we,^v[Z5: (p#jd2z;F[ h#[R^Ey韋CL(]*gRYѯߠ8W-32W )8"㒗)ʊQTD'\&&0fx+7yO~C)AN>êӇ!RǾ1xQ>=4"H!U.݈QW_@77=26ƣaY r )ւHSbQ蜧DKgd܌v+\) w)Ԑ?[w*Z[QGLOg!J ErJ)e1iWD8663nF6 +,[Ӫǭ<=FgbXG9W[^J*TD4渥^ICAWBqA.&2xP'ys&3)‘fY [7Iqո  b:pt7f= ܴ!:6b.}^WEq ]"41 Nm]֔Ѕ K{,C-cbpT B4wvݍ4+7OLgN&r@T9k~9s\\LpƓSb {">Qsx+08Ás9d-,@'lbq/OaoWlXOY(诿XhB\. pdfXw ׎uHGXcظh^͂#wnS|r,$4o7&V>-F'O`[Hu$1D4{uLq>s(o}w,ڌ/R u"}ߖp쮝X9{ ,P7EPEGpNKfCp:Vz^ BF8ZZEon,4Wf" mg$64&ɋ=fmó/wtRW5 aJ0/aҔn\!') |'΂g{b dWv~<]3qdVƤ: ܃ v V+{{{=HvvY-ƫSzX{_O?F~\lW\1/⧏RR9Q}u;;r p b{`bo`jaB]a~,6dOGLJi_c1 pQY^__ cx9a `,݆O]2go4lm|9nG{M) klm_.Ī(3d9' ^<6hW$g"8|̜yܶ_q24i|$SwsIu%Iık( tl9up4b\<,k F,GRvG_7%,KWˬ'rp9Rg%[0vB^ l &qԆH;KG`GԘD"I[#Fb8f|€ƅ O_MyU>ED%UJSlm捇@K8"-֔?9xJ1ys8*Y>KzDEԻ/C ]QH>(ܘ`L-v)I j8$ZGT+ol]CŽ :jR6Udj/ޠ5u8ls"+u49!]j3KxUR&Tl1 pbzdbpm{4q.,_ÛvL7N:WGMBJk/v Z o?(%n0ehHU.VquH7Cnj p"5Χ~uH>谄,FӕԪ U;RCwj(~uͩ}~]{􎨸$.gd 5?|WWtme {318rܭ':[@e s8Yq7Rr;Fwk@.A{afiɐzHM6cш8qN{!̈́JBUP~$($赶4@)X SH%ػ`).OJ=im_|* m {FF pg/E%BCeo~J2,}nNBb,dHW|L?z9?{P8س$}ee`HdZkG$澿#l굟pAXNjipw˦x~֧aQș/ o{7a!hfL_B!xWEofwKRx1̫ԃ0Fnz.L`2>~p +k'AMѳ-.ށ~-~yt} ~cEɥM11N+̉%ӿAqM 7D_\aCX5 |`A5"e4C1+z&HմRh0 +]縕fQZ\;3RV.m >,I>(j9LбrHDq\1Zq{vc*6P!Uclߴ; DcY10th'.B},#nI[U:̵,FǗþ8L iGlR9^3FG?%#~]M5zF5䜢{R>W1 .ߎ# #!%dfbˈNsZH'AJK5N[3%m|tPA.&=eoͰN$ڣN 1|9^-#"‹@KFhܒ% qvYsAw=ץp-_PI)@YZD@0XGH6)|1QBC x?GR& \KllaxI' šW) 6?FEH\ Spڴ%l~}Nk3D؍z,\6կO]` PCFq[㷷Uh_n>XvV#T=s[q-eeL:#; <K6.z4_b܋xscV'ۓ&-='aD4_ȕ ѐ* =(/mj:=[xϥsL*D.]YbSiᏯ ;N[äAZoլ,p#4Q]Wqȶ;7{4{;7a sWä>d!"vLYtuG!J*G54xTNrdFj#4_?W}xCϙz.I㰏fId>svHƎv*!<+/3=Cx񂥝 ٚ|z} uL`Ñoѥ=ڌObw,5}by˜Riq~ң)',׀֔fo]Z+'^F3J=X14Yc` 4ƥsu쵘ȪҦl+b2>q0SlM'0{yYTT0ް5餒*ώ1+ m~ #R^N7uH+u-BttAoFCOϚps PP NU8T0n*C_?d7m)˚[7F``]<֘ $CXjW[SnÜ(Ј_ 4>:s cs `L_1ʂ@Uh[gסtVO֖ڧ(7wŸFHc.&>tDrBm`OpP4),ۢc}6W00_!GW 1`ؚ$1^XKtMVoLNk:gРo4 Tp"9[(4w1QgHj]g# .G!i:;6,,Isz%WBݔbUc^T@,0䓯,'02PXmƏE"n?dKي/BY:$[*Z3?`K:燬r)@TzZۿ>Òzk7u: ܯHz~% QBZb*}2P"+)j:-QCvuSpݵ(->^B1Ou)-,h$֔ P")tZ .T/IRX3x_t7RC9%64]gOG֣^.8~34EH=%uJFh\"! nȡ2 vJv12~ybn w%J-fMyB*8y2Uiika$OLIzƶh$5.z=8]V tRgY_׍ wӥKH&펽Shɟ|ɡQ~EI! '%]!DZe̜4A RjHhˤ$ 'x!FX؋}?xm1`B5d)aʚwX[r>״gST=ׂ}J⊪zЩʵsDǢyԗ5'[j_(.]J )rfi@s4#=kLjGkRP֓!]W7JdENq֖f7]!V=_re*^S'۫HGX̔ÛyCD-!ۖ.a>9}]L>ڄUN+Gߔ^ 5dyo8A &2Zj2e zT>V4˧.a8(}m7#󦛻.ܗypw?/Y!k˟>}3TO 0jlI6 :Rņ;lXPl&UB%Md!Ā Ny{w'*RK3];ZQg%غm?~[Z0Vo 7ɭV%26sܨͭ-OяH# tbOޢKѦM̟?gƔg~>i#EXs+srOestE+[7hc[4`Ri8Qm']Z1# Dl< HMB^Z&B;@3xx\KIloZbWհz{t}IwnLܘWW-2a|,b"n8LXXLc ;Wx@)Klʫph6|8n݊%`X5u5,Ut"@`vвWQ̈́<- .JDjRAQ<?m޴|d+Dů(('o.rsSS8CJˁ|ѯ[ n?;V$ߛ\])\{`=WVe.b )]qNgOg_Eu-P -=>7 ~ Bd›Ϋg}b~BUq"^֧B )yZU-6X"U,aY,[ li !ƕ(B`&:?)ۆlcZ\O-_#uTB`!,&c2JT 1]; \5T={T>9 c\ڹc'ك ?lBaq! t-/?pw0~E+Mu$% LZםdb |VЊwqA rRlj/ s3 p3rPVP#w% B#4 |e}tWp[뮦•{IXɄ gIѺ28$*,0ǩ8#ρv%XA8 _WaN|ǥ#1!|F]R8"Bi+JK<c0df )-}!\sj*l* #7GkN dd!h66 bŚ}(_&3obNZ‹4*[tJb8ٜÙx>}͚ Dn(_eYʾOWߓ>}o4pP}?mcGGG+;%t4(xa:tHKMCv\6-XŒeV:3jcHf@; <;N, v5dP ɒH@{ yt1' &ry5K4̗S;lR?'NaGw"<93 A.(cǠI7#7 xNp1_#\:ebP~[ #'&.Z ^~n hΉ50+%N^ F8(܌,k#NFW.^]>f؛Xx]ɺ|&2a<犾[>Co=RV&HBi9C?P¥[:;[{O{.NƋ Dhvj~(?tH;#bJ//>ps 'n%x6^=gÌ{`ͼC[}~*f?hEƝ`2lE CO+dұwKm⏌N1~c`Hc_) 8W/)o4w©#rr (͢|'klrпR$$disMyW^9%p) iQl7.ʿ9G kFj \k<%P b_s,{xƕL  /&c͚e5͚bj sٿnMف^ȭAQӷqS WrС㇜D_Nul10K6a= UZKlXYْ睤kL0LP2ls.dafحpWEJN:5#%o;3b6ddD9P±1I$l H٭kDsfj@0k ͛^54q|v:>8P%2foF4[ 4 Ə?<ؼ)khtTL}HNN1hɩ&vɽ9Se_#z򮾤{|ȽX%j@cʥ9c2JBF'mT57=|ec>|7ԫ8VH7= UԐ##2‚vՌ~.lE-m} bjh"E*@oȫ-X#QZZ"<aa4CP9[-u Đf?~EAf*@@&%p$@em|a}iqE WWߝUh?Rh'ݴ;vgۻ7q)O%}-9E+)}c!0uC59&2g@?_K,\%H1+wD"!;#d"v٣KYҿzk"Ev:Ns[<~5ft5VjB(%M3,;ϻ3VKYAxE{Rr-#䷆kjj@ tB}HIr%%p%!'쌩%@dffiݻҔZj[5Ԥ hJZ06}˂O& ui_mb:eU3 iGr[ 'o|0a=t?mx  an GS3*UIlL%/ gRg!ԙTEg_ٗڅ <5_ϻrW%%ƛ" !Ʃq4q%i ,Pۈ;\AhT 4%g"wjwSS +D ]-V [[XȹVg@-C [3H|C !~jY5/a u0I^PGiV`zۘQ [|1=ɔA[\([î S;#Ƚan!y 6 r?:Azi g0Fzgp={f# 2>BWv)?u캸 U3J`Bqe^7W E| h-U>Ť.l$Dj:Ag; [B֎bv(4N@qEu#r2+5F/% 9ayIe^K -93j1n8¶R9^h W3AՔU[ !)I$mb6hįsEO#jtK#!rKAYJ9Wz):xbUDoֱHOwq90Feėս_E+ɧ  HIAzZӐDP{t.22 X Z s-[Agm^> 5o ?۠=W+fW&j)))ؼy3vE[>0@+Uf PL~.ݕm_P\t" Hko<>$Cµn㏎CC4\>۲?c'UbWV F\& 'Qs 4TZ6$PScN.k6pnCފ EE8s@ t0?3Cb}krt3"3 >8y81Dv$šB}h'? vyP﹭I𛂓N#I&R#\WGWih޼95j;HDCSRӕAK-x^Hٳg駟A&-?`q0o+"c& F,*w &~<>8S7#ۈ*H}<7`P-M]VoCT†\HVZ@ IW@{ˠڟ2Nc8b~̨"; ߑ+\qVZXSԌe \S,k}Jc8_ i3g':ͻ7`x A*.* Z5䷥>efƕLiv8aC^srsi>n#բB0d |׭EץJӨɽTI5vލ~!**&}OOOj3@ŌUJ@|b6˖= w #(x3i'K1߱fsh2_/VWoqQ=),5t*Ē1X4+P poӤ~ԎD4j%^y`O QgSfԐ+a,|&}<4,;?Y©S9X`~I'r&-4CnwN;a<"4h Uupyޅ.67\Sj}~r:GU!\]NVTfb'8l:ՆSew)rwcI 9Ղ6Eb0gQ}?|8wQAz/e^Y V|4jI m"l9U]2:D[ Vq~t옹wks*Wj,m,H7pt.T/)I&14GX> F;c=y!Q3Q#[5 vOQhבL\mZH7PUE v9@ɀjA޸oD`۱jj1}9ǰhڱ1<}qC7OW*'eSj RRQpΖԡFOv܌IJEj '2pll?}I)*i35KK̜RL&Ep$Bd|;OߌYSWp>!qoL|>yb[3 Dz>y'1x8E854KQQA=P8m/\ Ǧ3,Jʑ"%µ*N&Jr9ԶB:)) гVÕLjk&@IDAT:2 J98']srx?L"fM^V{×6ҳ2P2ad{p }o"sǢw;gq,_i35Y 醹ԑYV+CM4Hۻ +7=_K,;BD߁-9Yb}Y6q},0.CoFB'NY0%F!ҕԴm94 |՗ꁋ5܋!2`1%%UqYbˍu {48X*]%Ú^!Tڢ{bE 6.^9zD`Dh+mhj&ᒵ%,?C:_arjIjH?D|Zs: i `\*/qCkYC]p"9: C@ 'p(. BfHڟ ;F˱1Fo\8cGѴk 7[X ›  [|éI-i Q P)z« 54e ?4EA9`FͿ͍L{-,?c`p/L$w&+gĂ)'i| vFM@{1Zvt!&g2M&*+PRf @_D> 1;2Ф{$^߇h9s $n݇ )$Ey9Bmh,#Ep'W8Xk`N*^[l8f  /u.CCw%%Pփxoɭ}A|9r/ILƓ0J(TsstN^ţuŮ]Ì-R,-mLwhm8T$g{98@CSU,0R0x.KYҷpBر# :Se9Q=x#ci"}ZU9Xb߶xH=l܆m,?-o33.h8bfkoS΍23؈~CUKI~or%5Iih=1~Й.2s>ºbr8@Nt_ıՋrLi8eW,f;>|i EGwcۺ䤘b⭸e0}J"f9^.VH;rVͺ8 i\iv<"lͩm 'o [1Qj[SƟB'0zG`P|MZe`Co  ` *Q|AzV@l|DX٪FSae+cn4e9 vRǦ`L=k={T{ڌl$3Y ޜd%Ŷ^*Q-ldbkN~^>;UXQ7Ь:u `+ z5?=+czŴGC$E6 Zw{IehI1IY{I\$qM  M<ʱzVEgad{&ie:mӏbx4 mv@,CyEY9"0oħJ-NnZ9#_Kdѩ,,Xc&D|&n~Qxdudj;L45sXcGwÚ4a|#HXa3ENFz\8iCbQj[C)6qEay8'}~x =;c_p!5,kdxpzor_F+2"$qǿ1]RjLz`s,N i^7!#c:g9+6VM0H邧rWtA|v>Em~CH(dRg _W3rWѬFvӗ3q"/ϗBItޙc{ D"b,ϻ[V=m:/B\}Zg`($砪E!:ԱO D#]1vK ctrj(Wr.oUr]DrD%s1 (WVY/8>$".x[㚋WGWKs˼:o^+ud WM@>wMf8̞ vZRz@[:!EiƜ29PWu[A6o |=|3Ns!zG lTSB#)Җszu=Okj_4+} /_QgS]PقEG/r /P*һGF"~yjY!}aFiHmu-S82SWݮeEe0hfjχJ]tuwmDZjAN&6D~8=ܽaILYNT?Umե2o`բH==Q !NA-`4^ GO7'VEa2V̍Eㇽ2wjs-=[7cs?Ǥ=Kr)p O6r84vS%.u>axq`!zSzFཟn#Q#(- "[Q grE(Iɐ^bAR86SY,ns-H>AݢeOGA-B!8n6pщBbFmSjaAsPvM'̏j98H 9VҩN1$0O "on"v݄,D?9rzEj;klyrYZS@8ڙbB' &h% [߁co 1efi~M{ÓÊHސn%m >}? AvS ΜR ջ{I9S(Ԕϯ !⽸ZWg(*n * Z0JYa6nZ["/Ѓ"}tȑ'UZd_us帩cpm<= oe 3P\.[ɛe]܋헜k^cA;`:.ٰB@nxOk09g+v!C}eYGF+u+א1Q,^< ? CƊC" Q GgjjqC]Ε~3UqX ׾V~ PBQGmlLF \$/ i.@@,_Kzm\zeӗ}h?\$S~5vd)( mzb;3`AǰKeјưL쐗 2hPkWgiÐ/gQz[.CCC*I"5N3鄦wocʡ(atHMeV-?{hڪJFp)vc<|W?tXf? nc*9 6&5ڎ){7`>`h`kd~=|`QZ *Yq 50~%qgزtr9Z c#N[ NۦQ:x.egMG[T+tΚXv4!73t:` Y G9*ki_%Ѫ](0u3(HQ;\a U bY[t%yȥI \GK s=l܃c;"lG<mY̥{ 0=!0>JE{-I $ N2zGo`c9sEzC%H;ip79pvMLd2gT✰@t4dYF;Mٺ^#ȽDA]o 1)Og#7u [m)E|?7DxRe9ьvaZr-//UoLhpz=j1daŸQ2/=90楷:/gTVF:M/LͨE1e^3@)K+qZI1z9rJKJ"r֣f2xN&@OrR믨8VPj2Lv%ї0'f/h%,ƌrujWA&欟b˂(w?4 mD,L}q|d;x?Ǣ%0s":AS{QrliV4g,#Aorn#? 8U M~eNyS^AGRee)ee:mf.$r/NSzHQV&??y?^̉ۘ.$ /:[vCwbÆ,[b/03Do gGطžje?bbAwwƪ?thhq5IJ,#L)E< +HuلK5l`czcu\τ#iNn>tY&dG5pJG mVz=9-yO!C~{0sTK:i-QhAvzҬIi?}1 N@TAQ|G |tx O&#%nl`T ۆQEbXhy%>-ѳ_l>A8y &<2~Zĭ?6mr>ag~'2i{sCqlxlV"f!txQ~_#~'N[:iV 0>Gs:-YHrBӥ ,^-L2-凗᳷fa@o;ؓnYJ4VuYDOь[1uFNO܃E?ŗ'uܦx37֎,q:-E0ޣ`n;*yKRF>H__;MC7㣗rPGi3gc0w;"?!_>=6yOVf{&B>1OECkwmJV˿zM1]9ח$Ĵi=z9Q}6y.MS![>ۯ #sDܞM}"lݾՑ`ؿWĦ!8W,Î]{Q?3BhڢEdc!M|]qNz]-;v %ىDJ`>+9iKn$%9qzN&c?`Fg2۷$-wt*gH`ش}NΤz= ϽX#?ہf ?Gz?و5Q{vRf1@Y+|fOǑ2;ypN0`%jw &}0}4ꐇsE{,vQ{)u+N;?z & H.ǐJ?bݟʱZb|JS{ Z vnc9xBhVVkW[6!=d' :wjYAmEz$M99?ƦD4, 2K^4 'C[ɉ)/AL#(ۅ[yPkeJOu+kq.N)v|bfC_@ڣv3H3KkxG? C 5s!4=B]yɅx#0 ^~v֢-#EfAhBda`˜"ڄř ){"k;An}"@ÇK!h:倊 ;D3CjOڪsm%$Å]1GȯKA-O;طrz& }Ƀ&0z= x#o|nSC b.rV:mxsh5= XAA` ` _{س} 1݁{:$ҁo].u rg?/c6'm\o?1_̇_0=cgqq<8a+>WޙNhşݘ2So{=`kg]&6P y4L?4l:gĝ7C_6z2Y2:(s|2iz{1V ZOEt%ʷOV&iS N|[h;;[.g|ezBRxN;?r|3-bM*& v19_UHrYZKV$*e* eCjYQ_uSA4mK\E>'?A~V5S[c8OF:U^jک׊kPCw{~RHu~>jRS3s wu.y* ~撖s m5\oVH;ԧHQ(EgPJ$O#>ed]KۍF>Sz&&[P#" 3qy)MmBD#n%@D?>6&~i~XLnJqfǝNǷSfw߅CQ9PQ/LMfh9h&09;,*1v= &?fCeryAϒXDIj%gr\KbF 4/Z[P_8 |0cN8/ɟN7_^48T=;?^X6c8=*|Bd1x6ҳ]RyдY,7L6|7V״ߕWsltX M-ag`Ś`skGa<S-Dth⽢jV1[=~?_CcI"N;0L]Թ^y18ɡav{IB0d@PWH$bV'Wc&㍡obȮ#!+!HTI ZQJ*Ri>"_./|H>1Ñʭ۹kUh^k(gu~ԫ @HΡsZW(~9&ڝ˹v5oʐZ)WEVj} m0\.>z 9+ r8"ٚV[%]_SR򜜫rf~rv2U=_- _>pP/9f(B2]mWSV|aT+`g֕*d >86Bс#4Bp.hAn D8ߎ](me h#He\$j)eCkC+, )-/XI3.9>.y"%bYǐ4 KػK#-K=ѭGGj/H3]wNs Pۻm+#WvΥBSLĞ?'^B6 `t2ेf^c'ԾŘqtkp9-{ /1Co}L7;%{*Ysq8;f8kJz8st`=kG %meƬ5+dLF pK4w2~}'3Lzsl6iy /V솓AF ܸ0`7%:fg$ęB3a50S Cz*;"zzY:߅Z4=8q#bAFL5 k(DbNZG蘱ٴ厳WМNe:ڊqt+7;i?ps)e*Ҫ;E4X,7cD0Xm4WiG$̆K;MGq /u}Bnݩ"ɤ1^0|ȸ;^I4y֤ T<}Δޑ dnZD{fۖ^ ZVL4] (3c _. +G4ȑY,tP)؎~2Wd|( g+35Jƒ<򩄷 kcde\,݊f!vGWq5#,?UV/v.z }]qgIHjݐ}] Vi/s$?wRHs t]cuW>Gr$5O|P˪,Ю{|[t1=:)` oSfh("W |e^XF=Oě\/wy5oT2ЦN:r!"HڣPL@Xtfus$ga ~?7"iIE42$8y0<(Ù<8 ytP.I>Uk4Q]ѹu0%{]Z%W cs/ң>{o<:GЄDud&fމX,^Mb]ȍILZ?Yi'tR5`9 ' \O6p@"ӋKKdH6ו>b8z[Y2>`%pCK@' tOi?O6AH!'$JL ÙSeHZ:C]^DZ hd>o˜gs Ll54C3lϷ萠5<&B pr*F,/KOqdRx20GMW/ v:9Ȑ#IJ; :;Q2Ñ$ISG.>_RWQTq1Uͭ!~jڐvԅqf ?y Gx 2&E̩|8sGT^匧Fg_'$tMhy"*L,܂G3cqq6iZ TKػ ȰKKIs6n{ur.0w@Ϩlc&avsKXHz8!95| |fR\WRq>[:=z[ Y?mxs֐|{_¡S(O7_\{|,Uiس7ctNܷٙy>/폏c£#SO^-`P.Xg:x Gcv_ Tlw:a%Y&$2:O^ a+(wPHA$@Fg;bIj0&C@o9BC=#1o㈢YCr\2!7!ZZ¾oNwV(SժƤH7B.gutq9&n* N`{ӱB)j'fx;|W.Ù| R'6304X[ &7xWS"k毀 nI^ʩz]eIz`bo\F~Os Τ~!g%' :z{,^xm<^|H_x{w9Bg0,uX΁߭}ሹH;7=ٕmR*ns $xea#i,QG&&?9vLE#:ގLGXua^hLw/\LǦuS0I{%t-,`pYٝW蒴Ѷ-i͔k& \ΖT_BjyJ8V\lݫ )Od"2U++e0b|rp,jggZHr~ȿZp:@Pf:˺G>/`(c0) 4:Υs.-CW>ef}.F0r쒀ZXyd9rj?0:]rj)A'WK۽'s@hת5\<@ 1M g45HNςCл8o'?E*O>hkl}7v:]l;bG=5QQjٶ=63Gh[#n\4)4VYHwaLf~ W ~߄a;#Ustc'>RrvFSk$$2%_|C_Gil#D$sJ^Cl\ʫ L<>D.-niCF&Yyq s*eOT l5xj TWщ:v†/1({X5k\̆#PN=9'֒9L7$yM;㾷?w]C ElBv"Ὁ_a{ DTg+m-fHQ=GCMB#4#˕ ,9 8 1v:dneWMLdo%!J؏_>XpA2ͲB0ɄHVZ0P_xX/]^a/~a̞}G E37Ĥd<!L|deم"% u-Y.?! m`'3" G,д_'vYa}2[7(.&QMĽy0ComW af̜~wzҜ5㋞_?[ÇI :j;e=@EY)V~{qj&Y+:_} =9nWmp0m:!aZ̙' Yz FRcCȌ+Cg1$vw#mj(z EjV8~fO#pۈvvq8{Iަ7Rqc:vhDؼK1Tv~7,~}7cFEuukJ] 2hpox e^l_]_E)4T.M1J"M"Jr>CΕŪMF/\!h|}a0~|[?g3RI;Ѳ:t=Hic36WW{ߢأd}|cb^nl?s?f:7_Ac f̘wQ[Knz^wDg/ӧ'+vTPH;!@ n{i,݁ޝ;wʙ3ߜ9T4.m>[6Ӊ"UJڹcpnBzt0h@=Fԥ"E=¯c?Er,-u\O?z#i*xrE8[7 7L1gM%SquS`F7V{#1a*gYJ@\fL_? ^EHčiIUD k%/e)ꘐBFz5G(npv8+Aۑ$|(`)6+40Ue{x=4_Fr&l)/4>i<oݺ?5WQ-ȑyp#ޞ*z MC޵xyg |*\7^D9g]1{*#H[]bpߖǔ'$? ]8߼q Ss& ħCcX'V|bظ9"uX Gg`Gy UkqDWvE iǎ";7=;z9ވ`\C0(v(=QOE3?ןx-:K vC&/| L. (3-\Y7o W& ˍ@8+@jK4t"6T*.^* mN_dٍo>º4$XϠ1oOCDGr0mpu5m[{ozKI~.nKll '0:Fҵ0 ak]FPzХX!x'!˦ y*--Nް >N3|W79>dM3Gjhɩ^5IOkp3>TX b1Rw7+}^pL Vcn:}O>u<>m@aٴᖣ p2G:R_LAD}B?DΥg=FsCE:ˆʆO-Uۯ c/;GMj%^I___LdŘFoi򪮲 ?^Ct dXdt78U[U34D.eفdW‘(ɡE LourļDAW@g#e r:Fx:aػ ,3܎̧Uhd~" FjA%? *[#D"<\2wAgҴT`^t*ܪV|1`H,_ Х[OD҂Oa:3UuxI(8LCu8*WO&Qs='׭2\Jb]_%I9yJIӌr 8AT[x~D_7752Xcͽs t`gggWWWC^ 37A(4NY}7 "t k@R%LUt,ACXN7_6_LS@N=Q޳ 5Ǝy7,;Jptim߿ZMC]Q5v"'/_CڞQTmķonA 74+*`C<} e*B)ږwZ;,qR9&%`@IDAT]#MՊ\Y-`x~ŬkbBT DHgdݏ#87yvAlH(p+V;mV$<[D x\ WZv,z;'6mJ@z/?% .˾ٌZ]2뵪JKBsl,z>p  K,I>m~BzV1|2hfV ܐ-mh!շ-3dT,;{XޱAЏ dZů zQNWddӡ۵U"k1<1X"&#b֭4PE**]@nrqqСCǟb˖۷7wy3N-SŢ\3ZZ1bn嶭X\} y_Z7%Tyz>pqC }ՃH]>luny0!Eύ_y3>現{o\4i_xD60gdS7̊eؐc oQŚ|VPB%f"n{z>z#ؼ^AE3mWf@H3|#\;Np@4s4e:lR/[AyB~Q!$ 41@HzFKSUxն[лwhaҖt2{mTݖj}v1c{`_7 ɓ/D :_,܌y_(YkvOX<<rQ)JzMTh_Z>J~tD HDyž uSNx$g+K6?5MͅǘfeXh9NT. I$wRw7,-nGKN3U:kh/0>cyM,hK.</QlVDFj"9۰:#'}Ƙxύ{.\=SV=cL'.du\s# 9Q;P7G6i޷lZ3'sɒDGj{77v_JB M:Ү*-XU[J+VQ3}xl][*w)Ski F1]%\==ݡt%w^iF nZЛWu%bO:ڑn=OEUqPMej5o XЦ5勭J 耨Fi):UrZ6DdGxfn@hƋ`,!95w+izRgoC_ v зױ6vn2ԾqkamwҜ# TSvdj }Y5kMVWkKqڊrP8wQk'[ңY+l,jU+GՄZ2V1Ӊ7FXOl@ΰ= \ [kQ Sls-MP.Yb s>մKZ1WWACSR+zO8j8hmu13M1'"D~8/tIJߖ17+$< """kP܇{yw=QJy0]GZN))4G"ct6𸶸TlÞk)kW,2}\;iq.(a{S,lʗ ''AaoO|z 9U֦N:rd <廑Y;6*[.vka]C h+[j/S^Oui_lܕL#V!嵾s9RUOJ, YMPLir-O[ HXكVĊN5Z~BI}NG,6e ``KkPV~ʩNqӦ0 ma}(+߄tOTxPPUzAg4/Kx8rb)/݀`[عCrYQfL// :G(O1!X0vЮ;:g%MjQ`̝;~;|Śv-W֪*zwTtE2  D?:44eӖr?@It<;WD]}5ȣĺD>{dVsSҒ2qC)u2]Z/M wڵ+ϟ$ziٯ Еv.[ ~mCN`IWy|c$=DB 11>/\4HU8i4,gT`K;Z$!掸Kٖ$u%m]ʬŒZ fNq˗_,EuZ ֝og+ *肁6C9Ql}1/ZprwrKC toLqu",,bɒE0)ؠ˿m18frD'J-ɭ1 q!iL[4nAt8Hr=Xm)iJDj_'{Ù" BЭQRN#Yr5zgN1җg'Bp5janK7\W!=PN*8"ZڼNOڤ_nZhij}Sl(Р$(Lnݢ[S+^gYy9e//LUƖ%0^LbJ6MIٌYRġC[}Ŧ.6EmƸ|Vgdr F(QׂmK8  fkVķ/¼Y/wR瑎*6{Y^,_Χ:¦6CQZ: ҂B-r6}BWQK:Ph!jV#j,Y 8(ij'R "T i'*6&/lC7_qL!fl_Α#8? &QYP*άym%y?/:wG _o'RQTS2<7@s"o[mhF'7!ft>f&CdEl7`b|};JE:Le7Ykt0tu-#1mhD|D?R% 1aI{Bp t!g(B{(LMši(I,__ڍ \ 2DY545H;Rk/=wT HRТDƙ9;f~5 f(#"m2#~&b+sK_bͱ2wxhrӎPy_WBO<`"^WHK<Ҡz}wQ9?};vb#w"=(cH70W2'b|'d3 ޳a-Rsrruf^&h^ Gf)0dqs14\_xvN̳6x!gfx Uv|7;xefq'HX#zz^gg_S~-^['tW3K%x }9r00~5>U@re12 q;a|۰.y7\E7EXlݪ෢>xC pGm9]E/@Oה<1݃#6 f}CΖem.jh'Q@P̶nr(:S`X Vkb$%jʲ&)ii`a"bGkm U#p,Pf-yH-$gW8ۍGĵz)<Ǒétq"íը*Ác~ݔCyNGק܅kۿbע<5kD Pv.7Dz˸p#ץn/ǎ #Ncwލ`Vcw\z[B Kg4깹9}{戻rs{1f }zr{XOees_<1ým(#lBCmq0g]ÿE 9>0 C€X -X5$ !TlbFUF"!POA2D"<ut}ɏX {PSUT 34}ĬpD3;V )NڹOᕯ}c9vRei>-Qwiu8BmEQ~lqJx"!*ܜc&9r@t2u a׵DFѦmEŽLYpz;jm^)e3v' .6bі/6Y8a]pIH_; lҹ1tb4kD dxjd傡Gⲃg)cs!wͿQ^hs:4s7jͤ0](p9S!'.IE]f\SO.IAKdzUP@Wɢ3Td(/G-O@{8Lj\C lBN;x[L٦$.ztF6R!sZ"k#=ѭ(Ό1só.JT;*0ZsP3ec>M~5k)Gh^rtW8@Y@zъ,7^âZ 165q[yx"͚QaR3 :Kڍcpl_&8IʼnĤw}2~`ܭpd M-H%VuѸc"IT n(/P)SE)7®=.o'ôYiI%l jrA,~w3F=n/A؋UQlp<'x3B\ gXN$[.K7AiCHJ/O;Gl|RxSgb֗G`s?2HZM5R)lNY oHd mCꚣ2Alj79lT6|Ơ@\FD`G,`xNW{OWScAѮKt4& (pf "9Qf)u>C6E nߙ -P& [sŧ'Tw@6P`o&\`F0UQX A@Lޚ 3' x UhYD ꥔cxݮ.mu{G ߴO}gXo .:~|tv޶~*jE'0+삀\hjс8z4 WV4n]᳼pOrSyҫQQjggǓ3Gwv웃F4'azKpj8 !CHy P^u>I܌:L㇗մJ5 % CTooǀ  W_GgEDBcmMQp#qh}"E!GGKL\-1yH,qS݂0w @kckq9]sōFtf"~)6sCMblf l6q$&\/~(ۙyzu*҂-F7z8Ǯ ݈j 4R*w H%Bkz*lT`R-1(Жq:?sPLEG@rQ'@ץ kẃiѧgՍ;qIQơ}zW#2ƴA/-'VZ%Mń_D؞D>T?ɸp=v=|DED{P&!2()N^^Fxf\!@("V\& :Plr"@[VkkXJDI?#`jW2cym]4#cx9os#~{j迈~c> 9AyE5|_Uy`9)HSZ{Lᄚ*'k [Kn{uP6b^fF(gO'L]ZLvt5\X~rO5<=`Dʹ5SJ 1 #9&V7C7­ [[ϢH{$,*@[L~j:aWz =`(Iȉ7LcQ)OocǪ]ݭ7{9sP4'3)(pP`޼yxewPVu>jq&IKw*FA \ @9r*0fDpq啔᭥mJ>~s\m]|W}53gaη? _Ac1Hphҽ3ëܴ́vmt>(,mLWlt>A7:]?ViK\ <a#()S>%AP+H:)uPʺnNҖ3պS_I59WSv~iO: uȆT-h˰ڦ?yPPUj Ur1HJ1\{@q-*g'l?6ޭ}N%F44u 3CB0 V8-&qὝ;^no$L遟~އ%KG?Sa=#۟6EFδpA}3b&RNK F8NS5߭k]|@k!y_Xjm|u$~t%ݩޮSu8]'஥NTn TqxNlےp&$w!N mZIChH"uHi۝M8÷O$\|$ϖ>9UH6~L  ~Wi}EYE 39>œY_'&BBל䫎KbL'd"=9YNA.KhjR,]jͨ1`'#""M9grϖb& tZ pFMhY̨La F(Yׅ%&^HMtQ\Uǒs++<^j_"ej7b b͈q1|2k̐å]>f5 \?JKH Soo:ݴa$|a duT|rp;?X9n!*x-h7ڀuss@~7#-CZj!2JY.RSӰm>TPYN],<%哵tb`M Zb`xڦ!T,q#)Q@ƉhamE^Vwԋ=`Z›™ح n)TAg0Um*KrZaokoo+C nD1n/T0 EYS& \l } 䞉|݆n:]Yixiue̍.&C*nhf O6UZjN[Ix/áZ C^1h.e !ݓݦkƄVA# є誫0k؁RQՅy"pCx{NzrpE8ݼ([%\w?~eOAN2_R pC2ˮڏSo6\-߿F 2 <>N5<⫤̈|%&&t (C [RrS Y'sDkDѓЭ ^ot7/{$5{ eMnjhNj5zThԼ*4PFAӊ5A]:9ZÓ\U//'".Η. ⓖX@|.Mcb.8]?2kKR" "1p@P'anGS!a=Xrȧ >)p/nb{'XI#WGETݓ*AN= t %7k{D"D.6/J.!{`/ 6b*FEE-M9=U!C|L_,O0=\x7ϩ~0e/p}"+m3l({VEI;QR4}! f"k@$n\," Q?N/h8VlxA3WAt?u+ޯHܸ6$M7g4N 7޶;`EyH37><\:S1SNSDNLDCѨӦp(pY\IUVaonzX,A8DjzE5É_EE#?d;dSZ;:;rAɃh~8= i!v: k#"b$ëסb43# z@TAEjF8T[omHo]͡|{K VSģRV3ZK\53g于yg |$6Y,CJ z^) 6Gmכ#2SM= Lgff;QJXv?&Deשo pT+ؐ1_~U9epΕl&zZB@GF4+Vśfc#S0^qnCp"p:v.ߍ=xy >_ޞC児`I Hl& _OAMqh{Qpnߊٳgc ´ǯG|8^g@X2Ԝw?_L_&n[` suD11pLW#51 `+q?Cnx_ЧO5Yy$N(\Qp p~3]`4[Bj9DYb舢/CUN*UûJ( |-\E{U<&囲"\H;NwŘרnPjJٵ|DC@p_ɵg,%+"ܭ+F=XB`F4úy{ }b>NM̀o@39OCtB&7ꑞmv`D|}"ܝh}{b((Jb@ԃNLW?8 GC;]-/[w^r-RݖsrgК8n*nCzf(Dؙqf|e'x㍩ t#E͚Z@%NI:oGZӇG`JpZEOKDjriS^W3_Y|k)e3_ETY(ʙ]< 7SH%]Z[hxAQ`NYxzlihURJ(QW }1큖֡bGb)Oj?mѱ@3b r"#GRadljQKKJɲzriۊ5DgA~].Z)5ӜtL_V˓%r UXsX̟`&'/vY}$T\̞" Qez?jmތ׿݌H-݄AcQCd֓vhrp'G*)wd5 ^k <0" q ~gUX@K!E'R6S7DU|Es)\ \ w[^EDL¥m]ʙJ1QҒrN䣌 *`YZ wX "rwS=JI6pp4& 59x'UzXs* wuTWoi?"3 WAaj| 7$2hưLCu(ƣyzoJQHy`rs#lNR% ,ߎ s{d tSW噯ĿM!m穀Ìz8/=C;e64|6ŰaaOww;Ey͊\uUXLq& [ہE1XU0@IDAT6ŝ? \ ĉ x{'Gc9Jc|m,_q@f&@ 1))pzˏ:Wq3>z=ep"ʮ'\zruzdgmOf @*OXSiƒ~vvMn\@XP;Dq"e< \[BZw 3R!0̃O{_C<ɞDLM@7Nj wWGǏFB퉮((Cx#`n TlOI:wYq*H ZWZˍFXjIqJƪUP^PhW`!`>t="P|Aƾ-AƯ z`GqN"/+Vص(Zs h_*qUx-ɩWީMW& \m-@@:(<_3r_G걫go}% ߆\~NКF "Q+G\쐖R8)#+% 4T[YEY:r_=q Õ\"Ӑ; b;{YMEB:P̅ʮ%hőlK_GV"ۊw*"4:<8ji;o['WnZ SQMr-g=%sH QR]1ޜؾ_]y;O$&|ݥ1۾b77GKkt; +Z19Xo[w;ܾ!+#G{nl] 'R2aiDzt(:V:n&/` pQf#0{,zCʤj$e Xz)^ D kv,x5 "hH@S,0M숦Ŏ . @MԼp pnh S^f#xPl YѦ8ZRk^};r:z5U6NKL%HMʳ-iǚ@Z!]SNSiS2B3mQn4yR <<6MnJvyD-UͣU8) 6aqjTh<(ji/8n w[g/~ =u͢- ;J C w  (GQ^觓S{<%-HXSʑ>ʝP_DFy[sĨ9vs*S ZP[û#2$`1wJ:bx^l$(ېWJ"#u+?^֤ŤH L{&1ViUvPF'bpuu8v-߅t@q\_:|4 {!= om(2=M&Qmjʗ+FEW*r 7h@SZEiEe-9z:ԖD财Ey&GږjTZUSƱ V|Z½Qi-JQ%EUpsbE!KaxO.P~:>~t}.LUz8JIͺ \;F7Y"ZA^rgEW5+fQ9ҐMd{)Wkid.'yM9Z@; 펩N&0?+\cqd1G!j$~ %WAE.-S|@ӑKEᖻcš݅oq+Mn_3JjnGUu&lF*E#9BQY0t$daGp'ݼK3=ׯ,>A-w OBC>Up E4Y0?/^%y4lј0im_J+ŭݎ(,ra(\7~F֜}'2e82r=nXmu2,\!ҭ%x[d zrgDvvf i[:/ȧ،:jWkAE_'t ?wR,ڋ&ܹ`[6iPpT9U0=?7U&7X'H; WwJ0#7?{F4qdy J<"MMC`i'>d]P8E2ٻ{/{o@EE((*b.(E jBz$^9'7$!xYٝݳ;;;MB~@[l"4٩Cg0k^|62k`"5x"; "^j-Q\BjBiMz!ˢݓ d==c[)'g? Ⱦ^`K?Hˆic`w {8^{ľxQ 3]A@)4^"钑"X.n@ tz8d>{jOg|^QhgsI.M}y,4?S`GOuCSlh9$Bx9baٷ3,OD[ Ծ8l=?,|9vRm:w@mh,XDOOa )(oߎ@ X|="n?^{ 0$fN`⃖mM!xcpfr,4% 7` m  ߣ5o>][p1qPVAӶ{1-[0.|(el6tEr9O&(f |10x#x` A𘗣@eDK&gyI' +I}҄Ǖ'N᎘beؙaaw i ьRxC }a}lm(etE˫'h E푕K8I`~;OAaMн{(Ӊ͍PjXxyxXyrK4Z`#@coX4vs^Ƚ)ΈŶ9h9:Geи!Ե'Š'3;k<(o oӋ<#hx<,ᡰ#xW&fS+PL:x:`ǚ? plW2O 0!,t? hvNT3+p¾D"v' %#jHbH[E@Y$?[ ̥?.T8}c~vDyI 5/ ko̲wkDh҂bD?p6nkԌ<ѾWTFYlE!b; KG:2-q6mafh`FgWo}RftBQr#&|jgO︨x\IF*gs!fI۵:4W<5_Sjg-x\J)% Ϻ$J?7-NV2S10kHTxak<D2򠉕)GRN<|3CRҰ8- [>Frx۠5}ϑZ^;>/,O6tV) uqZ)e;g=jۍP*DfBLA,+ZC(S2+>*AΦ1,.@inDT"iaC]w. C/jdECn:Z݆T+-gyn7˯18W{D_Rb PyK/8SI Q6zItA}<50ګ٤ϐ}X?/r,O{}IMQ x:3ܳ3oB߁R>ۏh,[y:@f^)=37Նenk, n;TtHT{ٻ WN&9ً6C zǖ+Ӽ,|g:O{݂uX!Imyt>?g 9-.1琐Ls%UNʛF`Eرtlo۫9SbP/ ?֡7O욳 fФ-ZOgX&.kFh hֳPۚe/pr{y;bΐ{J:5qHH䢠k iZaI[ 쇽釱]06jh`?.xnf C#/=fÂ6/b`(d>~z:`!LJnK,l9&Zbs ^h;iggA,BΚ+cJ|wJa1`^M0IN28nJ(_eXKk,|gr2wno.@hـK;N,1_ů;\g1 f'zuEW"³r"TyE6a"2 ygd8+`IMnP+v "́Dk-uz8| r-@jqOZl͡A0"rkH hY@}CG"_+ *x m '?~^s_0Lx8 ޻=', `A!#dg&@v!hc4RP52AέIH06pkg5\-Ըzc3QC)b:-{?u0w9YڵipX+ C[/<6}<|^@ Vhڧ/zvvҢ [evmϦM04FG >Ӗ`{?fٍ|C^:H 8aԟPj`>`FXp~|;>a›ؼ$h䵇+K6I왻 Gmi(ؿWmAc1ضc 6 )+O>"lVD w;'{*[lݾ 6 k5xLȫO=/FGfGI'H4ԙ"H?2*[n I Neb DCW`R_MrlyrEP\b2n l;i Y8i<7n`>A&rѱG=P^"`B]U7:&&QeLJwip{"1 'ibΜ}k|A-7VV:? : p9y .(V{`uv<7&BxZNΊHbVPJX&cScF>TJ[%*'P)m8[K=$cy?z4$vFt5=mI~>x sz+MuI2I$ga1E3C 5l%#.eT>I#g塘8kHQL)%h5u&i6+:td؁:{g,r"4()^KKb_[+3O fJ3=ڽ_.&͉}L;BlmoMU >#Ѱ}U ;̐|SW?&Fn|vzHTa1Nbd_G'=2@)N==C:CC3k|oOuС)+@(;̓'Xxwf/:aNC+q^x?0 ޟRKhqDˀ4˿ోi(/q@"JEJ/p@.ꅧpSLVW*^~S(YTeW+wT߫Wj풮|vyW;2_gqU\ڮ]<' MtPQu1r8 w%<0kTQRZF%, ܠ%O+иESċCWs">Z STN_2eP_7sǽ\}δְiHt؎ nood^^w7Q: YY8p~;O`묝ЧC'1x۴E @ {*R),KNB닼ӵ'|po:YlEZlEzm\jR[y+ZQqk"RцI';~Z_:[ɋ)uNqfxgx Q[BX:x(GbY{tV i }r.g=҉AkЈN(Mحv.Bz-[⿯-×b089qf|9UTN9c;PxuYEXDcF/CDycXՊXh6u7"Z"so/+J'υ\9A=rowVC)^t~%8uii~a@_-{̯s5ʩbBॕX{kv2k'1y;DBUE_Kb}&UIQI\jת)iΰ8r4;l×_n+AChK^ \!dǵ}k~\оQQ/z޸zN_Gܠ fPkR݊#z+BWZuu^y6'f?>5(cBk236`ƒp nзgnvAԬIr\=qNzcH\Y_DĜBPfzйԎ0y%dhlѐUfhJs2 ѨCcX1HǚlD> cWo4ིkǴL֙Av*M,C0B#RNFRejmT!.^.wsߙ*slt#Y$|]Z;I25̠|KjPT6%jV:i2L}yMA+V P$p *ZݏOGaV ψf8w( y. Gaw<0y*q=VM`BIqaZnLԨ*%^FQɪHu7_H̺> .ת:Vz5J6mKV/Zy%^Fզvt`a7XwFNЗn @i8,>"Ӆ.z_G:]FXD1㳭wd2B匩{A們ʏ ww'wcͼ߫l'nN=BSmBt-^\6ZmFl8/rc3CRc.?V%Sm 9RG{]7i={f1 O-LF\LD& 2Z6UOZY(P_#hdJ}7/i@e` Q)L1xP8դnIhF)ptBIR)4%8"G|d@';LI0 wi@M@ m*#IhSS{1y"APQ'鸟1dL<ݝ 9? MNgp0ޡ@n]jnm8edֲ̞D4 v=SNEL͵6 '}QcݰoR,X@2 /]BO7dܩ7!FV SÎ;P𸌨KljG^A6<&9KD`(9MK2g&ҏ]$uI̪C谸Gq@Lж[x!'BxiP0D.bEȦƴA6hl±=1/4OFoȹ pdn9צA'l['e_]'ٴ2BޅR;e()>5{Hl܆ٖ.ch׺2WnVw=KGLZ'mBƅE@^=fnճ~Ǐ]crju˖> ר+G(QN\EV+T:.D>A <_&/@BG`2"t}CN3jpMcoG2%'YFI.?ݱz) M!Bi}60e0rq7vsݜQ@D_U-M:8޶4!GCHwS.rX%/K^[L~!فo6Q7 2?ۓt7h7AY=v,og>BjN0BOX\.O'=( Ehsaφ l>,R7l(ls>ylch!M} Pҕ#9=o!B1C#_,O-%rjl3 +SeD di.? =ٿӹ' q+L`^1?}:PCˏ/r|3lO󕴶efj߼dsi^ooBɋxآbXvti߹a"V3\ ^ō]Q@GEq!fuhQ`?\ɟN0BrXD/Ѷ֊(?x`}H=O[8bGn Xr#bbc KM*$y6e+ Dp1ٰYN+}#CEWCe)KMT(eH"_24"PGX%+ yݖs\->ƛV "K`iNڴ#929c2윳Ado7ba|_Lbl 8l%>4_l%Yt4vN}~ C G`Kߠڃw54N?ӛh3a"&=|q C!~S0qU 2t8ʃ'PzQA+0mjczu6{F~/$y.Byz:R\j1 5/sV*Ov)aO~VL;~2)WCpEx&hHBzb,=1`t3L0%_F %cת0BMEur}4|h8$)|P#Ֆ±ŪbKfYvy Uuq3&f{7yXpr D&־@ƉVBl85p+uh_ dNeQy9'mN#rQGUrEkַ)җ+MMzD~g(hsR㹋N~a\ ` yhPdr`+` M)[8.ޑM2EzE׃]6@hV7)XeH}vȂ+B<u9݀\vK󟆓q*D>[XvD4-gv4ܹ(ȸ29i> cPMk8Ҧ(CZ`gOݸ}p/V#>["| _/[ 76?> J\Ibprbw'ڙ?8laj.ᛉSd}`Fij/KE~ݞz=ɛ~C 5Gk^ xa϶= V}09g[)UhB_Ǧh2Î}xHDW`xbZ כaU <ڒYifAC -m<^@ra.߄ =KLѸk{xPuulfė1$lh[wf~m(9˖L-i bks`h^tH0"^K*ͣTkEX9nC\#|}&8yCGx*&f4q,pmSȭjR+j3&;✤xxU ǖmd6V%NWW"h[?>{roE Maja"z[/=aPd hAbNg}E|Ncj-c'{!}݇Llp5W:}-ĻamT#Ch:ZiEGI:Eh&bCN' qZyWT8#h[:-+x(40+@`Zң5㏃tbC(vFZ8AX"A,X`hn;Gp>'XɀkZٗ@؟iVF2S@Y e4K=+)nz|gpKLmUPG|}EކΕx*eEXUk="b\,FaѾ[ "2"NtիW dRSK_J aNtgvJ]T/xIFʭpyM"rIG-$GZ2I[n!Tu $B/뗍kHG^_ÚA@&)@U(F |'t0g].oeMz:  ҉h$'!Sk4oAFb]W#^BBDY̓yڃV#C!dqF(Љi\(#\ݧ 5CSB9LNJjK'GҮϙWݩkí0)ȉM=o}[J4ѣq? yl:z$dOO=*rҲᣝ@O̘9$ޞ9ִ+9=/h91 MxDo ׫%Xy>GK((҇(@;[,rv1> Ǭ@C =YQD8FwE S ­Wk$Z:5pr0CIA..%glK576-8dm`]y&jkD?{f%Q5!ؠeRV®C$BoυA*Lm hګ}lTC"˔g&aXQ~5ٿ1,x"MZI%GB$/slOj+/HsE۞DQ< B]>:)kg֩]y_oM;Ɯij|AmW=6g5ضmKe6fk '`e !<5R8)d"2^yץOtJ9*ERAԺʺB:H)Z^^*BO6ݛBJ&]9R<5sȴ[JE5Smj^uߔ;7'/ I0zt[ \D| f,؃]ߴ7WJNwu%vF6߿͔Q΅\Xw9:Mqz3a2$%p$ k9CW/'?D1!C&_وTuޟJ3?ǂg͗0`n9Ocso#kh(Xo㗼mln*>1d ـw',9EQ(Q0p@IЩKM̄ J`DI'LޏQIgCz\Yx*8=B\-gV\劔/d%62C'ԕe7CG64V4T{ݸ@}\$+x14=|1)NUJRoKB*|\_aK~/OC#tߔ.;B{w<1?EP Aj;Y9s& )#&o.BS0iDݽJyaZe7  OXu$;$#[vlCmI]~rga'G~}cZ^#]QJ^k2;?Kc+ o˫vToz*_l*~DVd/ax,t .DS슕L1wTy*EakigGaxu>/E"1HqMmAO+3< А&Qlc3`3F´ %+Wsigjn)j8(M@tIk0%I *!.O.Mq@yYH%Xly#μoOe)aFFND+PA 9OŌh3W4-12lFJ /'lXYO(D|)L}6w8j(4sLVu ;ZFދ?spO+LLNe ۈ)>>@njb.Rj@@z:R?\2p~q<``mBӳJɐu r/ |xX 4R BBS(&G ` f…c| ECC}rO,.SlH8Jc?]leeE Wwj"Vp|4ƎyD$&8X1'<^m9KWoa~1 ɤ<*#7 Q?f`Xq$~toIiW0DPjrZ*n@r$`F4YV08 y_j+U8h^io&s)u`tVȞ۳SpR:ijB̊(OU=@L; yQ>n)l| O/o]%1qt:=;a)O: qX3g?gw/q2b )Ov|vƎhEA& NeY`gгK9o,J!6BxvZ}vnxH{W6 wFpG-wcokɸ/ȝb{+=ݹn: "8"Gu aZSb!ȸ^nXF!H4Ԯ3DR/!56Lj(^70Ѫ>sZtQc! [Ӧp!8à0k~< ל)Bb݂Cmq=o,&r"`VP[|@[g7Ua#NʖS+  uv-Pƕ)/5YvŸu{Wf#NчL4 ^OdP y|9m?N ' F BEC,^Fo z~!v;2_ꇆ9 3wx\+ %V9w> ~]2Rr$/0} XVSW?|>e2bĂ gtcj9Ui黗"95qp 23 Q8qfs) d>4)xwy*Cv=pj'.$J;je-ט8}oO@l iq4'[J%D9ּ1|~ JE9l=̄t: $cOT|6mCю&YQx&r}M{%/OǢ;49!ƒvt-څ"oY2/c7般'8p^@PlMYX4%E(fl<֞HDܑx$KQb:`nf6m_U ^7s@<ьȫ;%U~\C6(ȼlvd^܊Ru%&?#'.h7dAh8p/Bg>zef<9kyf/uNz| ڿi HWzyGQ&!)&XjcYY)Ī"?HE X|Nܕ 5.3JKY0cDj'x{{#g[4o]v-mnQsVp@)E%a۠Z:Q\srWtyia[pXǿ?All\Б ywnq/uĴ+Lx|\5mܠה| 9aJh[ ^&-Yb߹[?_#ᅏ{A+fuOCR9卣 .ko` : .&&rS{.G[j%SiRYr %ٻaB\,eHKc4=bNÂЎZ#Sh}PWtNi|a) )piw{ 4c%ŽBGJ1v"jB(v'|y  } pdhKRM"y* nb#K?urίH9#g ڍDI(%P%GaGc51=[_\[q{R.# ":wIlR;> F;ޕf *YrCd%== 6TH2vE5*F@Ch(K8EsT3Pe%JУP} 3mp]!Ę ifTUb$P) S\maA;p3k<3p6")ҋdѥ1{rkwb(6m۶ȑ#¥#!!IIIHOKMO&.\Fnnbb+N%rFv.>P3\\H6Prd,LhH/XKXX+p)vvpuuUNBY^K{ޱƌGj_z \SQf3#}ؤ OX):]ʀg`ӓU2>(ۼ>;D_*9`6(Z +,=q?!)@5 ]`Ue~vuw'ۈ1:GI)؉gw@AB.lccݽ~`CPT={s~/rOôfc}CBτC9uc5vE/sٳ[c\L+ >gPqgY`a^>l)y5sA $yXc1}ȲÄ1ce/ߢk.\\0Q_')6$S8Xc5XiAc{#Řh5Wm<@FcQHrߕN=oE*-r C -CiPd r~\PBoY_.⬅G-u-Hk<5Sy/ԜyIr;7?._VQֈac"vږt`y)b:>v Rj4 "y;A̟^T8_ӈ?X dlذYNg#ԥ7#.` 'Nk#c{|L oôMW! v^뒡5OrL!S>M8W]E,K_fTA5H'#at zIOXu0 D???\h' Nbv9(1RY&rs8z3‚龬$w@nEՄ9q9Ǔ4p2)!L|fЍWcp_\  (Wós78[T<n]@^c2Eg@ጝNE'oXdgFq`GxKégyhss!B#` k@(q֖w:*̇Dac />BȆ}49> p9r ¸ ]v Hm2_f 4H^9ޡliL3_ rglزj8bЋ0C0-n~g#GIꨦ\Ǝl{vG KL8(>w-ڄ3hɹŠ$ɧp`F(59>z+9v7ᅙ.Xp'0A=X8cN\_ z4u**>B~*[z:pzOǃ%}"d5Fe uy Egɖse))֘[JR[{q9Y477iخL'ToYiIڨ #H~ޱ2mo8.=bw+׍Wp^j0H+įC]_sRΘ$aD(^K>)u cRC mFfA?4P1a%J~/ْ17èv6 2n Rp-õaS,g Wa!ْ~TFiWv/u[--*}A7ޥ]ܫjΨJf;.~@dꟑ#B2K2f<{a͗#MU_2'ƺ =1擖 ٘Gn\5CubV|n6cYj[^bԤ"""""""""pM!kj:\yd֐.?S[3K9,(qg6[9~sE@%9u@é 2sQ^?x%#y(-%WDFҕP /7CEhF9DsonTh._SǩG\^̢znʚw%QBy*+ݬf h6Nmut1u5݌J_J>E0œ>x._) p%,_ %j~;c볱m)t}UD"ui^/^*Rj> *ra89xmI iBd3:"i*3wac\=+Hbrwc>=hHnނMGfv qEŠU8ek].Lo?4Q8}4ϗ͟': ۆ!c–stvDK%zZ])ʤXwf}dNfU·KK,ya|9q6N2$r2d,,LN$?L }*?a+mkLǤ6s W{Ъ8$NeKk\+LۥHHƊ 3 #m|f|+/%aBF&cĕ`3T=OGrV9GvDSwb6GEHJ)㗥'P};NVW\bLq^{>Q+A0~y1J<F+C#Llкo;; J;=^x{(lVrL!Jrlj!k.Wg`ͪi,^.1+/5v !$JscxQ6o+ 7 O>@im砅e>>"Jx5jG&HXQ aKB24q`8bҥ} IJu#Ib$eh_O0%Mȷ4_?,|x4Rk{yZ9Bs|1!g ~syȞ+z ƳS/GO? \4 2kH;#6THZkw}7~y#߾^ŧdpgUë{<cǾ!H6%8D.DDksdG m KbH$mqPT&i5ʱQ[.'RN  oB.ض _;z4O"UcX`Q~5Lp.v\0-d)VHDQI$"$/KIK6UJ,=GmGu@yQ *1lXkC_Xtuu k`gmg@XjJMɕ!5(9gWRh8 4ܙ`WwT撨Dض) Mvi{Ll[0H+˄ BZJh X[UHiԔx; K_ق4w;uEX=FcHJ"v.|`{f$6Z[?wj8M!JLˬ춬*P*Č%*sUTTT!Xȿ"q鹥Sª$J.l!2k#f3 jnZS⹟Fp~̢Wc[_SPV/DŽK"~e:=J?4ᓈ6'%!H('$AڝKvȁyxsq"v` FHȢQ-Fm1}yQ#QBkG[Y@#;KKXqY&(-#N(#=-4(:lvQ3k r~CT^&L*@__ c̻74{?NF9闠4TaKm֠UlW:lʙk[rܟ2|A.Aq^4^6(̣!\Łbb./Z 31{ RSPQՑHuO\EE#P|S2PN~@i 3:Z#n2 3;eo*\zvW@Ћh6ڠ@k'W%]$?2>nyv".9c wV~/q8u$:/Ēu{ #{p!S}{b mVh7-+Dl 5c!C ^Jѩ#g?ImVjsX>s|l5?lDn6=N,/?}3 8?RhW#IH3mƇdE3v#z̃pAB嗢8% >8 R4JrE=""""p!@'K@mA]I=C@X&* X8ӍrhL"aCfi<:̿O,L6ϟ,D/BE/Sae򁗥5zvEs˴X@REkM?[ {Gbh id Sڬb] 髵%{'֘Ï 6}F/KUU -|q=޺{,yίM-6hOzZ!Ӊ/<nf0WX{a`lV- mS@ 6k@|EQ[3ւ|bBsol7mCAqUDyql|aEWerd+ `nh\jJ^aùNTSՀsJKܟ?uJY #tG?7Xj*Ic*eN,6.#NH>\W~~p?`9)oT $ljozPR>P*ϻ-=թ$YmEE@E@EPH!(..z^vW"]qe~ [xzhؽ3uA~c4&`}Xkd(o 'R,9X $KƶjxU~s%IH&1^ IL|0.+h xܕzD״&˦,ǩ#GnmXP.EV9ex4k$+&t X8_Fɤs.1 P<4Izghж'.ZQ%_!$4]F5^k eV~-#PZJ)JjPձCT3);'fZ,:I81 {AқZ .$ Ӹ=JDO-ԫm>9wζay90^5s6k1i.^XVtͻ楏_ͩ"=g._,O6_JՂ*KNŀrsQЁ$MAJ6MXRwT FGuln{Lkb֢$[mAҵy6Nk]\#CV7# }[Lm+>ٳsZ2jdoi΄+//a8DҭLr ,S-Z_N}NM=`j*KɓװWN[RTw =4|}kŭݔi,+h~EB>k׬Fb*7HV?2Mj]58-Ӡvkp9W 5+/gS\5Ԡ:uIZ-U575xg6{3(RjR"9WC3'u8< <}t&-ZA#>BdJZ$$n߉ Gtls *Ƒ@H$ZUaF|s^&tUsy'!^ ʞbͅgiܧXSq˰sNBL3h~{ E7 ۙ;P~^1OsW aVX[W?f4XeشzuTY/BfN<\aKO A,JedƌnÅ*ZS^s,9#1:mQIQE@C6[qJғY_g'נÒo$RՕOTDK$X$&p`l:XSoi$Fa]zKcRuoLnhʗCg"KzE(I ļLx^,֠$!&1:}YtZ@_t=Aرg|?y`:GwÓ Y.^&`jvH,z!$12BOڛat7h7u7bC5_UoK*Nro*3SI9$G2YH_0:/M)3BK TNIޙ(*fX]姥e22'gI0TTD)\%`B-K*,y>gMC9KOQN䒩a C2OBǪ,xvmwuKxiPY!.0ch02ϙ, 2)Z+zT;䣉%s-ĵTSUC b G$7:T IK·:ΜEƹ|ڰwK3i2Pʰ>psfMƱ)Ѡ"7)i e6$Aq b'܇,Iö<>|PƾpxMTWԡ,G%e5V<l뇄~쟰".|Bm=b淭3(3oBexyXS,r֟KVI%fRPP{"n4 7a{l6M{ QiGB!R@5]quTҖ8| 5x`~6 '2h&1C%n~Vtp'xS!> Nk*@Rb!LldJj0ޚ+SCbjhB5v%g_iQ q<ë8ɏNrpIZjƌ cHma '"ӺIok0wxD;gü_5js+֒bWV&8OM~.IzW0 v%._"t7?gEj8{6/"6ڍro1כ;`K_aY| )a:Wk3 99A=`᣾^;$5S<0 ,1On[AΓq+Gמu`Դ~&.Q#v""""w!U`TL:FQnTG+v?&\NF/ӔӿOW;W5XZF¼^*\ P siqhz$e@U.r5ݘ8Y#KB5%YHLCχGK_Cs1ڊRNpsvqrv %싎]Qʨ[g-rLH/NPR8xI ?gϢ-c͘ 3SAWq >=qgq0{®icQk<4M|UmwX8en/{ӁDV-+LkYٺ#+}G? ߮wp2ii{>O{Wͩ h]؋8sN+,]DB >ʼnXQ3ǹ=۰:ѝP<ؿ?]/y7*5tAŊӢ}}÷QaMr[ 4|^@?X%@޷ DB6bDC}5bx9ƫrʨyUTTT0BBd)z|f IJrE!q(5R Fnhk^+M?8SRC#R\xGأ}|k^]]~VOhֱBOu̢utFvH.Tr0ajĮމGoGl_-Cڸy+ X+_ښR )&~^TBХ_4%uCqp4ZG4?vnWT"eo*Z hAK;CDEZQGn2t5؜]ԸerO/fa}|F8#p~5ە?7ԨܿTJE@ yv0A@hk^# 9hJ)_r+W!;:Ni.ن&ơ\/ E`_>=?5urck zT,ƒQD9%c*^SzG0HdK8]ƾᜅ-<4>l2 @CGn`\cl&9ls|v)ݷ'~fxI B7Ff Qt6,RQw5Д,+c0H4y>Tg撴-lmnSv ^9XU緦X?z 'SjjxΒ.^s~ z\f3azرu?n@ͣɗ +It$(ȅ1+n¸_vmhHkK^AȐ4Ӛ q}yvebۏcؐ0|7k,W y7iFO57VIڌ@s)*Vo-MRLC HA+?ũ!bzԓ(/SKB toH$j|Hv`^v;dAfx Yj ÿSWZ~Aszv `I~l-d(TrXQGs΁Еq;0 8C7hҚRP2J}%I& UaϷbpHE-čk6 f O&3v0>B0K/FHo*!񲠧'w]G]ۏ2nՖQ$q>m;IM|gP$IhSfPCIhMQ.-Abp8~j*鞫bdWC̾;{s'+5;b{PXmdń*w66.X6@Qf1^(M/G'|ۅBTTzy< K7+vupjt;i{!x Wz;qVS/tpBFYX)zƑ/R.\R& r WWܿa~Cc2$z\5oRxc"`1:隓ֱbaKуδ74Q(+#z hwӷOF^5IN(,{EըYVftxO$}ŇV& X3WXWd=غvvpp1xQƉkw?]HvELy=Pw˦S\Yyv*EoJ%[: ]=oÅqJq-H֕/!<$JTxΔNwߌ/_,@#^x='܍Pmu?,ƂDdۏ*kG"16Q$ѷvƧ(G#&fW+8e+wgS8p>mOU MuFLl|I>2;+  T |X;nQ-挵桺?# 3 ; nڹbPJ-plFc`â;3;*Oa8Q[p/߰_Ù1t% 3EpwѣDH􏴬6"/F@t~) yxm.<%/Rm*:.RVN5OWnA@.3'?7ADW=V;?Uڡ }R7<̄]Ȓ47 }B\;iN-@O Ut<^!RN*G7DϛÕTT@̷][ñٔY{aԣ7!(yR]m 17E I=ZFVZXP`7UPl3_[QZhnڔ@]V\R-n 3]$3+^%yVEWvمHLHB܂X:ij8q"r%2,jWvTAxdA8U'a0Fݟ%YBBuI%b졫(=-/-CyqLgvKtci1ge|Z+ B^X^ZۨG( |;кR'aUL#Y]QH:P! 8VTs>ۚA@P7(Bw18V|>!:#4SJ2Oz{|/\~'3aB*~e%|^۔t% (U)\ OV%(MQ};\6(QЪc((A4Lda5o&!2+ chX"TXlYNG^5`#ٗ=`%eǥvR~9ZakRHO(L+e\r12B!i5wڌ > 7`HoB1o x~i3y]:MRia8"!=9fL9rկCYmyo,UWXZם")-,7R⟃8}0'w=Ľ݋֭i/N¿xԮ Ϭ ! D$رn? )¾mAQHɊ*t),kn0%Gm;Kvni[@ FF$iGoǝ1H4:dߎ[PTDH{DP.PcW! FPݸ 䙼?ط.gH.^ Pv׳H]Bu/<sV[oE}uV,]{ǖio 0wl7W8;ɉƌ`jAJťeSH1!eFOW 7 *#q#mio$Hu h_hgc,xxVJm7Es{Bo Ifo?nJr±|iI΁…hcz3%9f^ꚝx=Νsw3D=Ug$kSM!g~usҰ&٫aD ^I2I-UQ( [5"/3yH:I΂DwI< F6mj}_JrRw ck$B401ec:Dꁝކc, " Y5Nn%3~!D 'Wb_<+p`s .ܻl+fI˝I]Nz hĸ8x8&)l+R]lg,~x0Zކ5 `&pRѺ -3=oFztwqڵ<<=0b y(88x ;k`Ÿ<U槊aK (.$UeU Zs"򂟟i!ԭ`#TIb O Ok_/Ec eRϤE*G|B6ҥX= .^לKKAƔfYj"_R5|SUR| F~^> P\Xs4)oĿ* ?]*&VY U_*-׶ؙBbɨkvvptt+=24G -~~}礒\ξXgS/70I-6hO Qy)q09:r,Љ\2%"ޤ!]әs6ʹLrC_aKn6o(E^~!n}ř#͛)TbtF~#lF9 ߈6 dWA!2>~}ks8N9A$;p8e'v}7v͝}:KahV^0{0#5ֽ !E}-#-+0Rq)ayK9 Xz&9G@n3ցzPjN?D5o__þ{^ћU[H^LP!Q(,,T>eeTRN\GȮ˵z_or/ ZZiW!O!&I?0CꝐ']R.$@Ut5@ËaMdj ixŦj1g7 6B@㻗G|X3qX5`UXt*wGҸ~i:_/ߨVm' 4rzw $:V\S)P LA7O͒NI1wRO"Ad&kt"y2| 8Lddg0|<>`xF"iɨ`xq#W$*JKr(e.#t{:1EE*+\d#DXHuhТp(? [GʘO1x߽ mAعa3<_&~^_u6SּML*z2,sDZˑ i vgR/t b ;FغB揢8}8} YH-'5.W}ټ)%v=hlx4WOC>8?f݈^M***3G04bb''aTá,/3޻/L}O$s#[h*2 20n8l8X93g[о`p2kMb@gv.h߹/:5Eˋppl%T"gn}LldǒΡ${D62>XͨO) # ^ENvM[\٣9F=6"QZ꧌x=]Kf#sTRmew劓8u(=}ѵeUmDF:'0~ۏN 0xXؖpuXn0[֮I>ZCخ XfK?eRzKGkձ͕OtBA#{xD#5a6%1JN9^.0Gc!4y\ Na]Ц`:MKxp EDE% !]NNC":)V<mh;Mt!5KOFUizL&-]e!̱wTZwwH[\^ݽazn8SG+D777, L@ Ǎ JԮ֡"""D@%yҫ-e$[ktRL* //O>*Es}$V޺UTxO0K`x1zG Jݹ'Ӽ7zsxe~KPԣE/"|k>}ή<9Bc&7E1t?vx폸DOk*LGZ6nc8r$t?`gAjT Jqí{Q{o?6Jb{?l+fWc ¸C^Ƹ4ŅU~,]^"7w OxΌ.7WnT\ZN:Wߙ: nciDkF/>}(O痷Itkp}o67X Uטՠg~s4,+@le1`^-o~>!3#ޑ ' b= +1v)2Q|_Ս&88nn2۟8@|BG9"M'Tڹҥ؟V-"""/F@%\0tZz*̹lJƥ^V8RQ.eNMe cw+"g`쥸# Xֽ#bѯ]8f}%][o#!(٫O0z哌3nFeJ|</C{?k,Ɲ8Xy\*<Ͼb` S%wm(mQk}*EUeH>_|<!nA{J'q_0kg8c!bĢuFMsm7+v{Jij[pve;\1"Knz)Y>I7D9= 3^+O?v|p. ~S_/OYWIr }zINID)ŷq!$T^DDjkj(EeAv?+Rٗc\v -ᵰ Ǯds+l#][x7Hd5=W4[tgKc$iWev?mp ƿ\u\J2||mO;#x"v?Z.55Jre*E]~ ɌwTi3}oolqt65v;coq%A;f|)ޏks7OӧC۶hoQ̱BKЎh}W!:Hn}EP 8S{0>)bO|W USJ}`|8`w_y\{Axӏ3w ?b *^CF羃P2-Gu6&(E)N@_+%WJG++Jh6pv>[ˍjޗ(8f+,1[)ĺ#XV(t*= ~`6L\LqFbZab,b\7.7ͳa|6~8XEŁhe}ר-$CP/h=3mB߰k:@D᫛*** PI6$J+CͿ-^n z:>cO\C24"DFGљb]c‰X~T O|tG?ErEzWr`+lZ Stz^e<3t4"yu`,_ˎupDv)T響</%0NUY>[W_yx71ԫGIA|1WQQ$r9HK^B^J^=EP IU /ߞ;p7G9?0aX0cded,‚oiW[c{=ٟN9 :Ai.zPwUSQ&eؽe2hKT4Z #S&T] /ӞO]y-Ao&SwgFu=X-No2*|***W1*DG빽jp EˠXhSH-sX4s!,냞6X᛭$ORUŒFqZfa-ZOk$$dY7ª$۶%SoT[ի:Gn 29ƻv eBy  w:L +''u?ħwUb*btb;܅^юdv̙w/_Fe(x#,3tO^7]WѨG^gcU;ïJ K$x 82sDVyCM*** *.j?f}AAA\Qr7-H7rejKjVvijk᮸ (˾39 ǥ903rs;y:29@DV}E&h;9BfW LaWjzѹp1\4Ll_[_|&zj$&$"`e#F|܂fLįg>gሳnL@oKx_ۂPg9=:Oc*%ph K>su`in2Y9D*(,-if}+C[DR63vd^\/r}5)U|XPD|+1O Czm4 Ç&ʉ4N3(c2:bm]+ 8G}C >oS}sŬO(B(;4.N05E(5mUZښ;K9F ;IhE) %knr' pKuQ'˅-EVx\𣸡UELqǴSTۃ M1e ; WQNM{na=D1u;GL"}߯m/|z "OBRf^^sc1&}.;"zAHӱ2˓!T=+;.E2ǦaG~( E՚ fM.Fv0wq1 Qqg(?)5sAX<!ӶIA?m\Vbi͆ԑH0O(\(0tBGLT,bSLA)|1͟ {sYѐШ=Ix4ERzϏeR;E 8'Ќ 飶fܟeL7Jfw+1yA 'ֺ^tе,:*k7O0RӲGzˊQRCX>ey!-BiqųYkbeVX\шృT9NhZt[X4k*Qq>anJ,6n9X|/.e䆠jK.8s u ⌔e puO<|W&bKʨ5Av86Q_#1=<|`RE]wB,luU.pu~lԥCl9nށb87neï0ԥ  ̄5c;h7'ct+Kxf+ѾP=Rq)& Tt> *%5 Fzt jF^ |j!\6$]`'*y9`~[ >Ô)r"Vә2״mm7iwjWA"m*ǗR1J\ؔZ3W>ou`zPg7ŁE2(n`߷bt Jo(>2{XDwUY:VıPxfRS@"7v'أgORH?+C^1] u;  ڏg)3bw`ӲŒ[1 g\%cd"M? .Rb ~Q⓺zrJ6A4;אK ' p#6o98E1ζƨq#vTB!MlW;3yE[ j4:og,B&-IFLYjhY~6~Z5W.W=5[܇2}czewq99!׹ 0݄ Hiucp)&+H@'@ݫظq Б@vq11 RSk fxOC9:"s%|B#D@WN/&JX)M%hJXKBT3;m"[S.[삧4٩goXЌ;< #goH$M3=p\Hpp񂟟,ebkyp0Vrl<:BN!T]e9EA|ggmUgqC6h۱=Y ɗRF;FQ5`Mˆ~2ȌۃKi['6 ;/lLBJ J|ц2B *[wѮ 8}zE#GeP\\]s mn3yJD|Q 72n8V>As(|C(C&U`' 9ZMŶ='f_@_4_la++<KfMј0-Y94' 5ܚSdnmJˊF 5"% rBFV/ļ *DO&qwyTY,tc6#.HlVl2a0j;a'k 7uE½|9w*C*Gg#pi߰Œ`;A&,NРŷa[!ػs*P~b7 /n[Ibg4",qį34CM!VomM~5OƵL5\i={G|/:ː~W}|R'Z@\Acę(Ŋ\j/CbS)Cq ĞXvPWM06^&MܚhJk 1p\o&\^ko!~߶|k*fJ' #o oq.ZMP+ Lh# NH2ǽ&2J7]^E{. ټ>PHdғsL' >Kҥ^DZ1/V"/Ur8IDATٗrw>TH 2W ddf =#woԱ( `}^B',j%AeY1 Ka`~v8u)%K^C_CI$b̄X|̛9ѻa)xwޮpv >C;I pÜKh"L~mmլU|06 {|dq&m[)&!|<;vzX_,KVX\̔`db9B*A+to Hz )jsnhB|ڄ _{/9˖,V?KCz ْ=$*zRBS'f~1:xlsF~QovA](5XJ\ 7_yl!WͺxM y;7t@ *tScpB"m'G( ưnpC+.;%d:P =ʗAO^]ɸ7`M;W݆ ƪJu?Z\$;XaKS]" Q$97p#!V@~ _ '7SSK'v bOGiEGF`MIAʕKQqd7T?U:}O q+" ޘ>=B`iϾ#hFkP/F N$PZ?ȟiVLV!Կyb;) NE*-zj+n%AX=R!s[q[ja–=1ً JZWeX"V(Aʙضb:a;‚\\% W]us|j K!%עCkg8fBcd3i3i&&DUXLvWH^R >ml')i0% fI31dK2S+&r%p"?ߪRk(?p4<coĖ:tmu(/ }3@(@.YeDBE0mB TDn$֐GHEHxU!751ڴGc䤕X[ƒfDW d_ӧiPA!V,pV@-gLѿf [t"VLSwTt4Ty 0Gغ$`$$7sjG;m;W&|3Oc_̜{åEj IWap1lDY8L> IXhHE2+r$^<2Ϟ,9d.0;aOn B+1g~ipGz&eږ=AapE@aN zn~[20r$8ZKlb&f<=zmDwi_U' 4 4:MF*1IVts̢۬+tӣت0݀01ʄ9+16>L!4z؍z<836mMIRh}خOWݦ䅊H:p,V) Y"3@/OF7}R($rs a$-_ @3iu$<} 3C;j]%((TՕ" \$ Jd:ly#m?%n(҉k7&-< cT%& g#mcFu0gX={ V1k$vYY{].N!xm`_mˌbR,kgQAc%5hzФ[J YBZѠW)<ޜp&eJ-`/Ŷ_6GZ!3ܔ(Kq,`a Vhս?~b`).Ĺc')嫏N#¡ʹ*v4g:zbwB#!l(l];‘K~{zv9Nh&jQ3im&I({[I}Ƨ -5 w4ts 0 vBb5 kګU/{ia}CRr-ѶK2Sg5n ݳ$KH6U!':bո5ڸYFID01"*6|\kdEc ZAr3m:K/`ߡSx(tǐP&ϛ"I ?z|dޥ$Lú[N6&hp կ~zCy[Ýt$_S_jq'$@קJᯐE$ 9E ܉`Lܶ8i9Ѐ돽[s2C_wkrMCWWL[C cY^rab %  \WOKptW1.X=[Q,·$ ǯQjRԱx‘+pBtȕй֐}l$hKsErJ58DE[r")-L(Vz~x{0,w-qpt<C\XZBf@a)}XTz^OJ`og}q6tC&ꛋs`7vAdbz #&9Ǣ#xr8J E"1WkcN%1yև0_Fqs|9Iֈ_)VS@2{-ww^{id*a6ov}y)p0ĉ l5|wIes6&{#n5 ы6HS{+?cvQKYx}d k;r.:03@HBWMB5_#zZxG_:s z6^ah~0s&K h( z߅&U:r ,l"M`؇boPP S %"'?vT.WBA~H|1)|_Up *ҌXj;. t>O<ӼV1d7:t4NIe .6u1pDPN1[ )#Bt59bbΞ@Hji{@i36fF()) MaKIF88& Շ ]C,):&Nz:=>A͠eR  ;9s3fw֐?3Zɐ2{.NZad錠@Z[Q /co+d?`Px Rz2kt"o%#N&.$>+X6Mm=ll&>AkBuڢMf;Tį3c"mÆ:Q!d?HZ 0uJ6IM<8 N0u%3%j-6 xG/܆wUO h=R̬zFsٙtCUݖ2cvn_ 0f8=.;':ۉg+98㹃0>PI}ۼ%(-? ({7vW' p"@v% < |a GC"TvO:@MYHpnEU_.'޹j[&^w+1yA0{)K w3?>5:qjַ+BOKc=bF^t(r ThLҨ"mVr킷ǍƂ ߈8h BGջ_~.zXtX2zV7zv^P YVլ `3sI ٓ-hO'40˯3n ˳9 pO@'w8 T ,$y0:7-d؝`p7blQ*?ǰ?QƜ' <.rߚ$@=4) (F2uBj& _K& (K/' 4\63 4wo*'^? ax8N" Ny Epy8Nx><' p' p@K'EnKxųO蟚"8Nx³kkaJ$_D^p@!jXo p@-.rkYOc`ˋqB$Bn ߸E`Y ӡHnӢsS")[.;{}~Ze{ȒR((UVVOZ ߏ8z +8 ܺu ׯ_aR|-'P?CB~J| ' < .r߇8N8NI8N8Nx\> 5' p' p@&En>=q' p' pOB'8N8Nhmҧ78N8NIp$>' p' pMMq' p' < .r߇8N8NI?be> IENDB`././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-netns.png 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-netn0000644000175000017500000024575600000000000033324 0ustar00coreycorey00000000000000PNG  IHDRY1 pHYs  iTXtXML:com.adobe.xmp 5 2 1 m @IDATx`TUkzr #Պ *6tu]uŵ V\ׂWlTKBK#:}I0 `#ry{{9sd.*`L 0&m ȻmϹL 0&`|!0&`L`A/>`L 0&X5`L 0nNa7L 0&`A`L 0&93&`L!_L 0&Xv `L 0| 0&`L`A/>`L 0&X5`L 0nNa7L 0&`A`L 0&93&`L@`L} zx{zBp [ U5k0jLc=aNt6;<|ߪNn::Xvs=aݜ 9X^\\Jw^9!+s]9^+l_T# , GZJ<ܵgݶ.Abfo!}&A~䭏aL O?}7 gu*1v8&)᭫%( GH(Zv:իˁ ='^O,֔`Λ/a^n{xw6>L cmruL 0 XPQPJkc'* F Ymm:;V|?݆A]i!)^ MLO 3odX,5plPOu5j h nA֜Q,瘳* k0+  ]"̝3 vl{Nh|$MM[j&Sݡױ8L 0sH@$9 _,g4\Y^s?EFn 4~ٻ!Z ʕH|<ᰙ}l<زf5KjI1(P{rK աa⊪|[-V? 4=Z/GJfu r߽ ;F0O` K^ăca4=y44SJ*PCwa=49AWů?.B2 zBB#I~0U/#c?^H? 9G!Q tsb׶ض,rzzoiqaZ_""B);Qs{{H,zGZZqB?`Kaڙ8d#DD!LI3`C͹HguUG_K^>5r!i>MLaygo"O<4&a01!n7cټ}h8Vp!'`ƌC}o1Wb'B 5o+ۭES#\f8vWN1;xvl#7}.6lεQ㑇cyQ^aw>g]Aj f< œuma|nuHv1FL'9+|o~Eƃ%:6r>9r7)Io8aLhV`9Dq Q^Rǐ_7`ۅ?Sɿ]㧕 r 8=:aTLIˊ]xW.O7zEll}n=8~yʇbL oU8޸xEЫU ðaCr*ÈXVbʎ=pWlEnNd6x" !OYU(HKf4 e$܊j68Nsﭵ8WFKGb74!#1ht*ٻe^]N9b4d7?,$G9 ԕ!@zB%2%#ǐ{29`jDƆf*I+GByGÀ}J~/GbHLV l«{w@?]1:~"vN8w0,}5s9epYO4REpXdlS`bܸ%cZ;ZI44Tě%2*`tvZh3y5?)I:l}߽\tzh;cPq]pR64L۹%aR'tAv2r!䝫}!: F|j<9p *^T i͝Tаh^Vi \HC73j2Jw 0& sL t Zu46S$\;hHXĀ q#:>F2Fږt&qCHMiu!D-VZQ+9pZaw菎\&5tF HjbIrY aq?u[%H4 W_oFrdjxBp ţ Ihc[XbLG@Da"p-}[?.u?"[Fqih+rh;I}צ> C0WJlė}  fUraLB8@Džh  0&p FbBBj jE+PhAavNpI> [E9H%bɪU H9 ؾ}+6@m}vlZEud qㅬP /g Vڀ=GЦ_Ľ#ϾC]lBW_aؾa3TK^a˰m&:X@[zl'md#ˤQTZЄDR[#OnL #P]WsGH`8bBaVa-̭@UAlTLQ)՛BS6 ?x_a{n.&/_|4&a{s3 O\FM„[sBa->JC'!Ԭ*i-g~;$1)Ip߭[vHH2oy6M21ҡKIt:,N'qH\?7M\ve0H b?s(p8m־_YGqD) "b0pA|Cyҡ!`/t&xtE$G&#Ұ񡈋gy| I pHL:o>&o^ɩ0p-a,])y{gn˥fSbĄpK~^̞п߄wq޸0&p> hȩt|,&9&`kG W;-nBdP_s WPycEc(Gn|#]D>6boـÛ%Se+4 %Y;w?#/ ʋpP6A`y#I Q[jVlܸ71h)ҼL8:ⓒÊ;zzZDy&xx?o؊?Y52d0Nu&Q>kC5܂ [v֩EC1o*tN>R" 'r=tE56D'#@ &dz$''XA&ڕ v˕3&`L9q `L 0&ЮX+^ 0&` ŽL 0&hW,/W`L 0Oa?GB&`L +튗+gL 0&@'#n!`L 0&ڕYe*),,Ċ˱u6iR4k+;yr cǡGh4GhlhJաƍaHg6;%1)2jT*X|}J wwwg~g,*0j{gY^Eaa|}j~t:PUUAIɘ6m$ _ Nb;f2|aog^.Uxڨ_9ĴGw^~6(Ŝk.L 0v Ƿ=5۱!A?pUJaMUf, Vhp3a-܈KޕF4޴ǯL 0]Oh= 6w/Vjr"b + s|(Xz<{ ja؋[ 㭇A5,ZWOgbp*ؽ̻ᩢaL<;Vl5&0B2.I@]ztaX: כӽG-q5` *1js=H ;+OӲ=2&iI;{y?)s`;q@~,Yz*d׻!KGøH6iXnJ1du3o=aqb: wwѷaضw! $ x}Rh Y20ܜ?= %# 0&ѽTU\7h8؂Uؽi)JjJUL,P_QVin/G `0F!:1N GXXa*/∂px5`'CM ÊOx7ĸ'aW_hא EFw>.jnvNjW+0o_+}0]WbS^&#& Z/7{QL0HrlRCOrFsnfh*DNF%zBSS'&beAط6AHxrþ C.q 6w."QhCh߫1jM=FETj7rkʍ魃X K"wói7@'p-Mw+҄71)< m" Mv9,z,Oᅧ^8/Xn'\SGR~Fb(D>7˚|@4oo7VD0YQĜ']G7Hҡ0&_ >;7¢Rh)DBua1 CoDƕS@ d#|\}u8PY^y\"y{bӷ i/U#o؏,?^Gs`VGxqذ[ [a8J6 ## =.}1apQ%.L 0`t |ܗ[_(y6"Zџ(ªc"T/f!Mؖ4?Nes;{BiٰBZ(B5O!l 3&p6ĽJA@LPo#0nn7F'f'TA0tӏ^Yj*.54 Z̯vs#a9le(L tON nɟ[֝w[6lz6Ҫw\E6w}Zo`m% 'r_GBc_tRP/%1޴~/9J ;\$U/Kx8cL@|v`]bHWti}ժ}&Rz:ԈP]B8ҿͣ(-+`L-e$~/_Z$yKr~`gO@JQ')ʳdL{8PDڱEiĪQ@s~}L 0&8C =?݂io#צSFZS?^[FQ( LD8(ūI1!44 Ҳ^b#aqqEN#-D"C6b;ǔuªr >$rŜQ~e &/\(xt EIp"'{B6Ȩ EsV(Bvkuz+( ws\DvJWA?WVh[W+.dgl?uKk >,*/nr d ҃/Q-USDz\mĺ-iB1(] 5sb,ZNz X9A~ZknBR-$ՙw-.\:+_X`ؾj!~]rF ס'Na|,[ u_| F%xqruLؿE][KB--]Ga?2 zw!`mYrGC/ZG|=C1n|2|| Mc@;B;,*LEd-|ϼz Wb ^Z: |L<3?\9}? ?oc՛Vc[}ɲi(BzaXd/ އ菸u)u| _۱D!hܺ _1VFBiډ|'["[*i⚘@G$ CEslk,F n+):HC&{)nRp*N Xa4jPr'WقxzaɊz t= )4̘1rOWGf/8vvs srwQ|چWsP}^Q+OYPX||.z:8J˓CG^y5N‘PgU|10hlX{9\>! ލ;/*^s>OY"sF, 6ޛ~G0cpO4`*d4|ծe%.ߔB2ߟgT~.DX,mBjiFck^2rj"mBYn\uEҰ\F߰+CC;B<fz_q;n_C4G.9zaRԠnI3Xv BȊGI9>}}+%3(z 6p`/aw |+{7Ð1v\wUje4>=a6H9B=((,1 gж4ф* ECr¿/Jo55\?ف-_ >衪'Q(o ݧ)149jZpuxZn9"yh|Nt>xEQ1> ͓۸/o5 #%<,uIӕU(XK70G(|> 3W6%>\ܨE$"/f10yD$̍N3PƄ୹pǞX7xcw   omvshYR?f_C_,~a,t@*TTJSxJqݸaH,tVCHvc I::ݿ3׭GVҍ2xt=Aŗ$cK<h lammt %Kش߆YIs1h]mU5<(_KG;x:w3R6QR9l9Pz=124wτ!_S{D=^$h c{> ];9Dxr8K U*'6oލe~ĉWQXЦFL 0&@$0uT̚5 (xƎߵPw޺bHU,&aմ,vE+-E"HK9nkX"{9a^j)K$<"iQLq[~(-6'܉mvG`ޘ6m:VpaL{{''e:[&i I 2c gƮ][q?D":.Wx=u%ȯ=0K3)c~);.0n`C5 (0(ʁanIp\k;5!F[%P/V .Lh 4I!_ |MV.?7'^]$\3 ӧ?=iX `ݐكg6Dd Ԛ-R ˮ…l)=NBQ!ǔ;K`İ$Q@6.Lh Mh4xq7bŊعs.q.' <1f$%%IO5aLKH}B12*vڥ:Gz 5Xv7]LhCor'*q$pI{S1曥e`m#(8>M~Q"0Z&7oB乆b? 9L*rZ!&\ g1)73 v"ıy;BBR,.N>o7\$%UgiNŤs@?zšO|9%3[bY>O냿P#GQIՔ9J2cyQr]jsG< =@)IEux<qB,˥5L jq&y;|++K5bXK'4=GEy{ v` G1Y` Bꏑ}ċK`O!H]z2v*ڕeE, 5 Qјh0xof(S_ Cѐ 0K􆓆tE~vhUFHr Z-|:xQNYua>l4]mDozLJM}{H^K8hY,y$C.אa8ڕHDJr4vA5W<>5 mŠ1{RJ<F5C.nvP] oU>^:4ILz(cW r qF/&׌ð\C絑zO ƚC8ZZF?"$AQ ѣG; ISn\Xv]gL3l$"\©њzTPNZMb0qt@8EqS)fZus0$"#ȫ,G#)FA^QvF( _&!9\>w#ˉiUE V^y+H̰%jQ[&4ԡކhx݀ՅU> C\B@tns3h/Db5=Y2 k+QVV@0)i?QS i=z%Ǧz44PQUo s7 7 Mg'@:|_HP EugbA}s&.apSȊLvj< {"NaTU(Kp!){HĎu$ۯD$}Ba+/ %#iWv ]'喆*t6M{&\RM(-A$kL<|A6B!r 5/ R}#i# QLL(jH((#E$ Eawgs.&甲Fqb0֩EUi!oA0`AJ `L\inYgF]c=mkL &/\sFpISzEXIP*$;>Lp f!@q$2U* qz!5(xarebU#9 !55O melGI~>ju/Z|(A7S|LթVwuK$&;ashH$  %iɴlX:9P;1jPOn\Fb] gL HQAfNH6m; ډ""? 1AMu`֫e__k>Ւ,l}54_ ")DHb.G%>z}JB0җpUbO})&S;+zԴEiciQC˹Jro^NKi &@;8@#q Nm)BD~\2DZC.L ˀ 0f4tfc^2uǣioCa^Ue!0>m,Ev. >wasiҙpUR͚RmCeJ}_ P(yUŅUfP%;_w&p d=^(Gn=|c9?#99UuqZ<| CN鐂CAs$4lyM9USX)HWiYׇݚn/mCY.ۍs ^ňLV&|@IDATG<ˆ|4|ohxx襶v&#y$ ʠC *|~zOdyr<9!7x#0(-+$L]zɧu=`ݜ n~taG#3 H846vvv)r!)9 9ٛhg; tiD-YѪM(.EEE-L7YhnRӉe$h59\h*;Jka# aSӆKN)ܬ*KģeP!P76{B H[qPMl@yA-- ~prG@X whaiWk8>SAIyݓX +c8TE(<' nu{˽gL B:$|fw7ٚ!%Εđ`$+w!=GE8hh~Xȑc8v FY1 Ebi4$T$l4gb1uX'WSGsW Sbdg!*!A ȒCAiCаVLrR8,5I5(8Z ދFyz#(>^8{S& J~س3$rS]CzQl#Fٽ ѷbh0&v,Ίؐq!cv$Juk($*'˘Bbh̓(: '#!ƉɆ& ; :a94%%Ѽھ7l tID?C!ѓW(.rFǕ+-9Ia`ߏZ7">JEt/%06YH)5CĴBu#AWjAK ohv QQAE 1GBQ4WQy| t@b!'|3kb G:d=Ps';"f1Š;6UPL58t +ÑRF`* %IZ_5s8gX3\ F4Y?ˁ"~-Zwx{0ţ^G/o9fiS؝Q׌'`ŌS!fL+?.#xG?}? w'O|k/QYoL /x)X!^.n8@-lA.ǯ#/,Ndw[ RиcPP C=c3Ѯ7Liˬ8e>9u=LՕdM-׎Z:Zp.n;wp_ &1(EP0g읰6Hd΁1<An8VpE%6r+HL"E8⼅EaذXgdxSNLҭQL 0Ly<3aqqxb|-&O ~aQ6Uxf!;?nzӧƜY/bg [8FR0VU7G1S ,85A[Ӟ};i]Wcob\\Q, ϸ&JM a /SŁ`-}Ӣ~W Y䡪@!|$@ ]CP$$M쇿BQ?̟?|5[R{Ȼ4f R1CʃMApء:F cRb+(c.HTi2a#10qKWCCN(eR;יljj7RzK ־A)S 8+r̩RIpS( JK. woCpazK!kN7 U&L 0v'C페БXsjTC_yIN"?Q 7b5Sy*M¶'y=: 0M&ǻ)i'hySO>k$d}ڲ # %Hihx8G$zՖ-roq{oN>zO(HHR ҫ)nɒU,[tU?Vu{{̾[CHRhp`1[>♷։rXp7y60,J* 4xP9ki cu!ؖjtShdډڍzg̪a" jR}N)Nxx8g÷Ux?#Z>]NtF!Θ=r!0"R¶_BЧNH z:-d h@8o?q AvJd~m7?>tlJ(1sP5Hi@/*߻^|G?7bɥ?,gGEpRlPQ JC W {_D͞P5|T9%6k1;i؇^r-XKĭIx;+/@wacI՘v3^2 mm\{M zMJ)jE^^ztXpp3G\ttN NcMe =K0ZB `{G;JJҍ_pCѬ=jsT1rro\AIj:՞v :xֿk h LU h@8U{N{3g~sG(E{7no.k^O~cˉ(Q "MIS_GQL_~JK) Y9 ;+|EDH#xX[Zϊ >3$h!zi'vn5,!* -[,Z,\#V#0??7_gx0_Tk;qPi*GYp; !Hfa@]p)/KÜ9)ã˖9N'M9]Ƈ!$˒Ș#CkgRwY(1Z=?,ƨm2D'p9~haf/Mr;Ggn0w|EQP0<^}&lFr!jьM]#5|׏t;4vx\s_ށ8/Č|f -Ɠg-\%t0R=HOIEt" StTޔdᄈnż9X4͠@n/n9,ݘ??cV jkMލE|dWPx544 8]A&R{zdm,SFO^^ӽ.)nO;$՛[Ƅ4"K/>zXb-.ZBB!J;VsAfzǎ}pO3JũEH**˰eFTהc,?s= AhXsWAP.^mDggV·%9yu}MMӛOo0 =,ؼyZ$1l2񁑞IL1cZU ((A\l8cs&&pd|lx^`m ΝQU&&C~Drb58AHcztK==>`r5+p^RgzoToĘGަ0!UrAGW^ه gӻ14 <;߿57ݴo?7y;b$:RGQڷzboT&xpŕK0gnl)!z oQj7s,2xurr3 B?Y@6YBv6n:͇q .K >.BI%U667w׋mk%vGС@`ؠ0:(,YgF<νiG O.:k]vu/cEiyԑL;m+fDE@fCii *+Wogx x Tr.H kjZQ{x'%kf:xq$n 8" #*`%jp]#*>Mcn2̘/#hu)b!io/v>oxy ȓa -v {P9g<8j h Ѐ)F|!7/>-]ƁzbX{ v p\03ɊD|/59rM ^`x:gj *Q[ۆFUǡl`%L:Ly#&J#480u'}@f>~$ sˠ"H"=H8";фAW鯚''/(D'_\:k}c= eOk_>5K;d\QK(bb & lʱ`:$P[o4%kN.0\&'c 5İ.F6O^w}J\·*J`)>P.afD],N:0sk( R/)SmI̙ ^L SDS 8$$99O7=e.`$nJ̰;N E2%E2{Qc' j(E's4E{ vj Z`s(4(կ°p3߇=&FR_zz,Ϡa8RG[:EuӕNמ%ƯkI[O m\[=44~Eyc_j6H`mQ%kXfD%^܁+! 8Q)SS .ӕp$j4 <}Z?Q@/ klr+3!3+JE9D04ԈY:K×_K)cU'p*wMMMЀ()􎦀P@$yqq0&04ĄĄ~u`BBVQ,bye͠HK}魦(񨢏i h h L|vP(bZNKV-w"O\/йǢOtwQ@B%VS@S@S`) REgJ*>><^|@Q3bSPTT T\]i h h ه.A 01om.aƑ62v(zlz(UWk h Ѐte~GQJiq&IIVo4Elc,{ΜTTx/ѧMZMMM2NS@S) H6lńБd,lc8dZiL '54NO A|D7EIcHH`{/ ǂMJ kK V2UΎ^,\ɜȖ1mW"2f>)/obʾD<7֕s >L%/}6>րp$Th h h F={QQaXȼ>:ttt!22l>/'s al7pGp Z1EB ߴێ*}( O<]6DF0 6NkpX:YdfƢ`N \e׀pNM)xv#"#"h ě%_ZEqU+ƄI=𙏖I}Q@>Ǒ@b3ab{CȽE)kh}&gZ0cMn . tx)E}C(pŵӭQqg3ZM oC"MO^|k'0Iu{IJ6mmX<II㢦@9Op|}4vP񣭮8 ;6Ɓźmpuli\1-`Ne\|JE:ڍ];HrSE9ȋW)'J8(~ETmЖP]PH/lv'$`Fz =؀~pXv[RnL wt44<|(\|OgLl,!Dtxgc+4 <]{^?S@t(T5E:@ KSH*ǝ"XS@S@S`ƆVlrqXuFc :ys cb{< *MM6^mGPUՌPc,{4Z,Fв_5**4w+ LRկȠsS@&o0[%^Fͣ&NqN^/_ ƙZ/u=>Wx2J[,&,\YU w{K1rHc\.^)O/2/ńü18sG;-:g?dh g1wL0#xdg' zbQ9!҇7 i;yLEN'\Wƣ4I .4'+LU&z *d&/#Saaca>>mwo4-;F BM3 !+ asUvT ډV~92IMGǏhύ0\aOf|'@V@0V.n%9'@-`1HLcPs0""B1 /ԨPG#So@URs xu@3'N[[ #'~8A_.TNsSI^ؖVmSAN!>|+|LؑoUW yĨ&>G%@>!y_+/bȀI W _c/) A-рpA)J8W.2Ćv4wnz)3 M7rm)R0j2t_a1PG'cs 0a'+Lv7j/V޳a։nv]Vԗ]G9z%Q@jJ4RSx:#D+T&rdпMԠӆ_/R g }q۶W2sJ,R z!G4o55wmuQaI`yVJpr-%3dK^& '+6pQ(1ѐX)is+|fS[ku/J+۱Qk!qaH&KO0E( ^&7 '^cNYy(}^gW~EPQـ6ƳCHYiH@_Y dV*5`# J}Pٞ>~xL1aٺq0ro%`lvU[q`W5\$((He`X2 J^CȃQZZ-%1i*qNM;t"Bv 깘-%X?XրNج DI_8A^rte 9<f3cz`ǪX7[-Lm(O ՘ 6Ž1-dMHA<[c58|5]2",܌X$=;;Qt[Vaa=L.4 \뻏2 *Pɷ݈6 9"ҐO48k Cˢ̗E0P2dz=Y>="c=N֦@bzN褐?+1sf38g}@RIF 0cf"ĎߤB/b|EM'Juxa::1BQ 5 ZSh Fs#̨(B,0Ua,CcFYXmS^\WjJXug'Z[QށxyE HLR&>xX>Ѐ4g ؗun¶[T[F3$'aDY4h cQ 4}o;f{6:0) S*6-8Ԍ}%5ؿQ&"(\,1^GN^-if3MvE}K%1#NĢ c+bD;{_Z6cf|<_Y* p_M,>SAb"KnmAյxhΌ"*1N\'ŏ( *1H.)j9`NL< 񔴉mwRFb*ѻREqȍ93p+mGQ;vV`v*V$CKψ! u*S`j]$6V.NzzJ/SImj0 &wPVGn?+u!+:k IqeZ7u:e|LSi#yv`oM/Eo";'AyT't]?Ov Q`jp1zX]ԤxՐ~H0(c.]yzX]\y$p:%Gx3ԧRS~-ތI8kC23z)Ogó D;vR8ccfL @.moH3֟Y@Sd-%R֖nV n@̓%XH=It,Tj|CӞ \Vpfizt+\sϞC4e_DgҀЏ:C70x}0905E2q|cC,7 RS+*MXv&֭gX^*)h`xsꗉVbcǠ@-FJJzVww=^[?v8_ރbyf&-OTDf6eMx =8YSfR@“G8YPR^xaad\0{Õ D>C cgNJ*ip.sճpB 3ceVrbbFY96Vn\tH?~)4&,]2{WMx[_|SIicb^ye at"Ǫ8 , HO'E1x!TܹeHJԒq#qu#.:TYX~k8ZEՕԅD6vïog'͗CZU$^]q_uz'Sa2e4#LLܤUJz<|CÑN"a}*7^ۏ1I8'&bh#(GT<^lFys30#:513'wc[IP(Apt ES(]Q N; 7̘V#pc77R3aӵDc"x^sqQ㈤b> #Iqx7XT^:9Դ>_q4@RZR-KPݘu CDR[؂F+T#/>&9T#_hjʽu h@8>tյ4QjXjAq $`9sW| e<|?p+Q  )_[9[|F+\wsTJ<7}YeKl))X]BlO79C~TYxh1ᅗvqx}6 :Jj73M+A-n3Joh@%Հ{R|-w|]?رvx\>]ɻϸ͞ 0$CzP?8I~ ( HU =tRAXGD%ERsWkh:PX{{AI0=)ŴMM.#M5u;ru9kcphU'^Py#J6,ɨE`H nXcgCIn%NtXV3V]Z4qvbellp#}0 LNjO騲ekZ*Z$x>Nm頯gEEbΜT2Dw22rɇu W92sr6yC͔ڌFtxd27+@"%4q[ߌYX`񦆣Rc'$Y5(Mb,[Цx"3jGBcp &ᵒh3Ev5#qN,LGFc$ hӭU2{)9uexSah#ӑD#e"~zr-a Dp61aa8#'389 -1!\6*(ZPj`q|4#@5.KӲiA6mgE`F\"43hM POOsSk UD%MG׭{`AŀJ#sFO`f`IHuB?ɕ!aFA~LnZhRygg RӥL0͝G֕ehEEM}$ԣ_18 Pj™A&2$RG oqѩXEE!%2i'V?qhc{TXnɳV2ni̊e$ny mX4J^ja~T@;`"߲`o}d,%v#ә< fc(r \!cOTՍ] dVP@PK;iDv(Tr!{ ŽɔX(-?jBW7m% .Yӥ6Wp G[C}0> "r_mz|[2n7ϓ[hZB~5wFn,OىCݴv8A;hVm;P^6":ځ55aF~4R_z h@xCG#1!wV"6W6(Ftv! cpC飯@+ etAf11]<_I[걷,•Ymol.A=:m s}a!F3zFUp,݁}G@ W.]%Ѩq#32 xAJH"LLՊwyؤpڹND6E"'UVeh'UqbKrzyVICU ӪS311woK#44  vʮVf!#(r :d1&Ů{Їwzq!3<3o69'5=t׉n5O7uN>8j,͝TUMGZ.B_h n^}L vuY{nqg~fK@fڱoQSG!!F}O܅.YT: +gfSwуd2FQ:75kD*jiì-x #s3>k@#EuobH ƏlA1]teK;VcMF3ެڇ#&Rݒz,A1|,OCmq`d"ԴM[Fw#^zzH?w.bnHZ2Q? Sߧ#]Y% ߚճMD:[MG;p+ dV%n+R gf(3PIh;*[AY$;Qh$& ^Ѯ0B#Ub.Sic:GP$f6^ilm08Pʕ\bI~'ǹ1ji:Tӫ.#"tܛU0iC^Gb`i\tytHׁGJS]( H.+Zn5"dC P@nvRNM5zq/(9ATpa\PI7PEbQ$=0M>+IBhНYUlH2%^VF4o>@EB0 0S.DYXց9K0N135Y\jEK %/Bn^6)>En*O"kX SҏvG<%*X/|A ^v<:ܖ+~ H~BγR3)ݣR'%f§$֨bO(b8闼ꋩ6f1o>X!;XE"_bq6Yϙ4i} cy˱ڐ@ %&*R 6Om.\{s7ĞðMލ޳Zp T^cө)wzyZ džq@^nnjx཭ RU99*XԾCm!5y#fchK#_ Ф3PS,Pu9?+|Lo |z+o}E4%VcP06y"U0 Ø|ەcM#c&:4Kz oUV!&) ^3g&g&2: wSINI]H$bFFӯ(\w <.<mmX(c$Ր\r"f?-mNJz 3˹f.:% "yodE1u=Q߽<vAdRDnG% B<6 >\vZlP``ţ8;trwy\@IDAT|`j `y[uQR@QP_> '>,Ʀb'CeL&޻@Dbx2! M}0|by/Pgn W pH^m4ڎO@'Wp3gsvd2HG_恵Cc~ }Q@&[Q`g=GӁI=(]&Kfy;3"N[;k^YUx}~<{XٙH l'tD/4O}}y@[J2xyjv+v, N=^>&Nǽux`9ʩ/HrۿW>62 h@82&f"b :SjQVYVdganC0TN0$3,oΏ"y>HŶF~lj+r'Mbt=IUEvv<+)l`ό>XɀYRPEV0b |?h1]jVcC)> ),115C-6DITRPqeExuf2{X֎dg??Se|Gj@8յAJɉǧ2RBӄڞ2Q^tzKaDDQL l8):m*jN8x_is3֡}| fy%dPm-'٦hKѦ}iDB.[p8~i08U{D+c\u۶W`6:8(L:c]A$grHCx޲/yt/etbe-*4V?~j!]LѤ"لрpBȬo2p'L%V$p9sʮBqq5K3{#f1HjZdJCCj㘝 _/"׫Bk ͭ*uCɬ0 g 9)-^N4ug1Iwr9o%qy^Wyf˺ b"gIuիf{2=ݡx`v#?> atJ+Onǭ?^&-=t3yY}{JdB`Te#(@yCUcscCG]Q@&*y>69[ lXQ Wz LD:#g$J`R ˬzUR$i%U r/ #qdku:pLmabz+!pB^vR 'X=2һWc?MLsdx3V1du2Iqq- ]{) 򽯑! .]2ÈZ^ػXUzG򅡷2bXgdn_5?Vz,]YC:Sw@{2?2aMGyX;lL?c,$3u^LL2|}H')4 Sr&QFNL99bGY􄴡Aӭab&J9$ivB [!DǛO]'Qv= IF+Ih$ AD&3g"; si.qgDJ`8]Ə2q74t׋c Oo,<4[(+k`JmpdtO 1D.z%&N 4pG%cx M\ tR L'r> hz, ӑJ`GI`aM^a^~{^| h@8}0i- /+Iيԩ?x@1e-YE771%p\40…\#m2 ʵrUΔN uhuY扰 PRuz1&({ CTrNDa‚ &c6Si@,c'ӌ"؋Mf)0RG>>X-PS@h E M`?YTL|K2ŋ=i|žҫ0vN/s( D R8/MG9I-𯶶^~վGcs 聋( )NТr"><&]_b?K-L0CpC5!d"C؋Q|R0% RgJ[0' @N f2C; O62ٽ΅ 乧bрp&{_@n 4JCC#V{aMiJ#\OwO++ѱCtt֮YKfeT`ms9"y$}d*-VSL@&! Qm˾0H)Rcr; h5bb" Gvd7n/d,`Po"φv<ؾ}ps,p]Wfh@8 b < ?_<Ջ@^8f?|_%%zC0%}|3[?g Wnmz؇y*J0CB䉊$6A{\ Y9^'S H_RSw$uJBByrw*RNpDMI'a"P"c9/q,R"^iQ`)x-M7NQ;Y 6UO>9|@Ҙ3#4WHΝ{=:FO9~[n!JJα1=܃nM7E衃_'ޕqww;V+,wtrژXʓV vj-Oz;O>uȐ@߿=G?^}+aVOW d3DxA23dR0:' 6{TIuiѲ ii1j>J^NxWCvv[w͟xċ]}T> ο c&M69wՁnF_^:'=^|EU'?%E8wufTknf=Ͱ!%:{4U%%1#%ʛ999Rb}Q@r\ e[;FOb{3ؒl!צN#]WW/{YCox- omϑw#4!Ǻu2 v.<UTʏ#8kPZl%y̌j8b)\Ei36t 1'afY4޶x4;n{rD>xKl fNe E3wuaSB ױ>li[ucCk`P}Rj!M/ד}`i)asS4k6@~+Ro{v~z`JH"ZW|#3~:-5F:K;h<t?;+&[ MM8r.PS@T#&^Awڋ^z)ލZ '[xwJɝ2m߾}`қp xS13I NևQ@L6V $C=9"$T}Maь&\>?݊];([+oOJ1#ݸ^)+G"L6` my~Kbۧ8~ziDZ8gq|~N{/c~5H &xzdfػъxEԓgA{s {N@LD0L(`Ŭ; F P#x_C£mb<+H nﲢ/'LF|:YY{s3:{l0 k4LnG/% <$YA^4뗡!hы`=Dr}R@RYIKR~QM.EC ,u\ Ht1U#md%?s"ػZփP:l dOJ5 b[j'>!tBCT ":Sh84U/ȂĸH5:{I$ ֦ڃ0X;9Gu;03O&p<0z;;20lIݽhcCvιGd"b#t~JB%V/>ciBvn.읝%;sl*-.X=jhpaqy?G <K^/~'1waQFuk9 B=csQ@U%ə "u?O߇oCFz"tlJ$+Tk~|7.c,νnvz4ۈ s ξ~Tu9JYx>nw1?'RcS{!FQO՟>sZWGp_S/x>{D$f܅x럿=*j<p(_(7Zï~p/ xqmŞuwc"-=1%Mz5_m+xqpkDef=6Tx5A>O]X&T1e{.lk!<anA<_Ӂo Uc[?3D{m1}k?_x;qXaGw$|oF_3xC$@jzeC8{v ]4oT*l)dI~׊3q${_~& %8p]FͶW7Rigf̹V?߻SL5a3|aN=8mLėo$MA~paCy}P+P2[oa >r) ß=n"",:a/p`]BFbZeҢ52)` 28 탛FyhQn+"(Wɜ \FƲptG݇7ߐS8>!:!PsH W[K%T@BXZ\J]W&qa H595N/>S; hAXN"ctt(f2sIQqI-[{EOKH0eI1Ob5}L*JT>0Bj@-l nR)WވNJ1EjJ(00G@}4- DeD}l58MZӠx(˦`RK3O'??EL8r 76CP㖄$˻1ȉb~w0SD;H'rr^X9~Q4 n䶚F44#xv,z'9JFЇPZ'{ǽ[F#ڋK`E^|3(Yl12&8w;k *|F2-f<~σH=O>~C\Ww=.g5ʷ:o+ZR_Ś.<֍ݯu~f㶛ߡSAQ!#!(wBIi-pV£HAe{piO,ݚXxhu (od|5޽7[ 8K\u Kj{MZmt6yPVNx߾ɂl؊U0̀~n3Xuօu1x4wȀ+.bK9уh|scd4o=ϼ^^s+HB͞{@$Mv,o|Z8mkrbQF\O#N'9'ރ`Lw2aaսwz-^{vZU8H6gb.<,4fE/PN/ 72+0`f;ܐG msk֧. {0}e|`^6#ʅhw",9Q,޸vl~o?X U]0oلd0wU{hWTbepݹH3<t ۰}X>;U)"LHBn^v_+Tj<|<|9v^ȡ!+cߙMx27+/dLKbypCHpHG&+"0< kNs9nWa~…HZ<}ӟ~#+?@C{5dnJ\ }Vttj^$/mCNL'DE|\,BňH yIxw~ vQ?%d=Ng~Aҋo Kԟ~:ù흗!M֋ ~pz5dFL8^z%TTTko|0[Q0/e;Ķ1w [s#.9o5bB\Q^9XqR:: r6q%҅YzJF.ngsٌt|pּt)>ݾr \7_aEuw,Ҹ=XxWpP} l3*Zɸbf յ6h9.,Bk/Aבb.@|.V _9 >U $jIQfh_`›["ց kDžܕڐap'ncc^{}?\˭oT*2>€II6_f?_Cife 7`&VMncVFcݪN&5SaW$n{$%Krx$] QUxW\+X ͛` D|\zX0￉-v#k7{[X؀T,\T[;JkH_y܉f=ۉwU㬫]ybh%s_lkI<؛KQ=7_XpEoc^31e nXhG}v[Hs.nkX<EuɻxmӻXp!n_  IZ 6n ֎59I#Ո|ߩ0P!F对v;Hx.TzUg^߸nFso@3Wȹ&* m?Z T f-<w=;Mg,?h9)RsM.p)0+\ AYsZvR zm'劀!99i*S>G"?NZs(H %Qp "Vu"J1A Hk/JV^A+Eő\]3Eѹ$V2:9}.Ky/$hrEJZIƯL8[(|Lbe) om큕ASep39o"urt!s?&k/oh?i4I>a%-}W/~1IQBA(nJ}98ߊ4SqrTQsHCdc`qߜ+Al;qu8λ杅ǡ!ӵJ!Pz7+UKt_GXJFo{t_?!=ON*fѧl'+'>p_}ྼѾ|k*28}NZ av:s .Oߤ໓vR$Lb:vpe+ r}4',:(`D"y 'xŴh8^!Å_gK`5b|/'w^32J\"_> cTB1~:m Sgr+2 8W3H͌.Yu l!BB $\i$,.]o,7M{m4hlٖ-[sv.]m͌S[SLy^-cŦ bM*/Noa$źr,(jhaWO.E˦d%gn I{:_BM$kjsTHKb;dO'+ߟfŽQ=쏬]h[SƐ&D$;K2FYhoo})9"۹IDRD2[",4LE%dʽpt5ע׬qrs]&#Cn݊D|_IJ :âGv.$ PE@] l~h٬;#Au*#U˗[7̈́wV)d I:S~:Ym11< -sF6uLYPTTqԩ{^DF+9#!{_97)3!Nmnܘ'ATT CsC؝bP܎C/>dP"mvdя~O~xMx{GB џ  2IjgACO: fL+ͿO_`/M|Lw䒫6uehlV:AFaD2PGE=ݝEP^LLlPg?˴#$e" }vY1Şbp0yo fO}}4܊YD%gn'ٻFF⡷Lg֞t^,cfd/N'ER4?گ|OO{CPXKc|wr«t7mڄ_|?{Nی'޳tI@ nTianϪ jD߀&Z^;bӨ{I匌_z(@-FE4>K؀&Ađ]]63.qlٚuj N tKʗaӲ]@=鬽Js.Th7MiD'" 2) ~m֭y/I.7000411g8Lƙ<<4dV Q.CZ T'w3~w򬌯8$&&]<wa9uW/VYhfii)>?ޗϭ| =P$?w_Ҭ^^tlAt:]%BYOlHry2Uyfj^׺0J,.OSRÂ6.Xs秃#砣V<8(4t^;-ez+ZQpN0(K 6o.p&}Α;sYg1^u޻gw .^l@uu7r.z+/9CpUCjsg&~W*d'\O䝐9.qZn Szlhi|7bQlW\_ VZ1sBndg6K@f.1?{֧ȵ6!WuH['OTx W<Ύ^GGG#55=܌ cRĘ1̌R妬*8Je#jRq#*_R>g7ח ϏHað4إTadM^1IL7 M8Ҧrw;WDV1و9} *K$"#R˺0Iƣc=J|'eMb'^2AǶ⠐哵ʱc92]ISэ2Y{ܹfVzTXa KcOaBP\18"FW]LG[ (FqC hXbKNVl\|:^/UM1g*Iw8L.IEl;;'8˾>O MMkƅ g:ƑAܼ8~*|+q+(t`1܌x"A+7ᛍϭJ` h{:E5iP>55aV+] .fƩpK!r:;ăZpx .)991̈"7)'Oztt *F  K9L U0,NΗOW_t kd6ApfqͰ2 }H7`4I!\N(Aɺ!ё#eN`-$2'g+#"zZ,rql.gR >MQN{g7x߾ T&FbB2^Aܼ)PУ\pBzBBC$+G[QZV(vqغ%a\odʸܹZǍfE.1us\6r8rF-M1:ND7MnrSD @G/bGG+9eE8(9(lؐFN^8NӏtWZMoFHNj{f^S\-Ѷ~/Ù< xG:_ & X P`$ۄ8%!54rݽe8|2cժ$5`#vXĪe  ZEAm<{5޸YKKlGYU֮Ma<[Z?aI?=9C#B˹hMر/ohO-nh c&IB udԫk܀vr_0Pbvs"Gar2)f#?n\1>W&??qO5 p1$c,OiS-~ձ<4sDd6dЁK8<|B+y}HVTe (3MTN0RTgL /VAII1N~WFl#Kr&PDeo@vv49Sʽ\utr%t^YvvSEJLc^;ߝޓ惃CACS=N*t8B2* -N?8nݚĨGt [gp,|Ÿ\o/cѩƮg޲9Wil|O> CKK ~OR$-y.RQ\nYBf~!񣛚z`S.}*=HRn?98( ^CC]p1aŲd.NqmP} EaL[LLFp229?х^t M\@ћe8w>L+Hgs9΍<&-@> Žbzs(\'N'Sq\@ʕH! 78'FAo觭]GQ `~~~ǒi-[ȸ;smo?Th959A\XDdR>.^}P֬IV:Ӊ嚸ڼ9C^Hmj5'* Q!k=86oO9tQmCk&* #:D)qX2y7l΄Q\]O||P7ƍ4iQs@8vF8מip_rS s`qPrF xGn iIJjDh?6Ek<ҙkU^bzѠݢxb8q@GIċeWG?HK>BR'aŲ>"4w'7nFe D<_ 9Z d8 n Ym8?dr=Qm4rQu H%0D޽߻T5ENͽx"5' 6dcoof䩀!\ nGy(W+WyHg6!}.wrS`!SCYRkN2@W%$)ty AjF9g.ۀm={YddYҐzBU2"-\n\r !:H;ĭ98.U4 mع"ף\\cPp3қč,t&Gtj.o| WVv 9ͺ!ukID$7+Q͞z]:M^~sC@IDAT9$p=Ӻ}cI]+RJK%e &8 (,[-ÈĪZZ*-/ё^lTb}Ia09"MRW'| 2-}rC#B .!9KrkB5auDY]#.ҎcM7@+WE:EK@#IwFn[-Ev87Md#Cぜ\v誹\/(9SIhig E{MeOxD ՅbQ EqL 1$d@KNgH9! 5R?2ܥF \&e&bnti#ue޾e!.$}!D nfU>0!Q|X~ 2%>! =\xHH᠅ Mh8r)w$KOYSB^Lt8؜\7#f'4g-ݢ/$._8db? fppC,#ʡkT$%̼tA[d讍3kxzIE3u|ŠpmkM גн>9vd{rzAoLg|^|!q[ 鋌E]l"~\_~-.5Do >XܟQMuvɸZТ5 \hPU@лqvaԕ^D"#M3_ZER6;E rxx?EP!GB?Nurk0tt1EYPTJ@$e. 7WﭸJLM#3ge~ܮ n@Gn4@.#6)]qdjYkB|d0J6xSdilڔܣ:YKXBljqFpP}I usXd:cB/8DͳѸݜ !iLpN|rrA͠HH]NW8V#$ͧ$CuCMNZrG?<O:!D(0* XIfINDq:jzr'n@x-=ܿ$?cШ K CRd6W~rQWDC^:#ҲV";xʹVO"_+hU3̙~'__ "2Po!&&&+)$ӆXI; Ő=]61>_KMO9"2DV_(gn8@6Nc`{k7":qC &f2emlW;t P"zȻ0Z~~0Zl|W3:IDXC:rxL-劄M zcJʁN x4Q W*sYp.ڡ}: 24tz:x#[ zfaX 9v,Byi=.\(MKʔPKZc\(7[Gr3TV1FT_F $kU#gt+F%Cv爩$]+ԑK6AF(/+&wpTtqI5 .s&"BD} v0+C"e3ƱY)zh7[J\AÛ |Kڢy"9)ar5%0N90*^1i%s}ޏNH8팧{? ԉUc+=W% }<])?O)p(@XY@[qaS-}0 l22p5uW["&}=f ,3b,sةdZ A*HnKysI9Dlqq'WC LZ?Q^43>h$Z1Qf*+gN")= UTxK1FbBTNwD., 4## M1TT: nで5h)#`EgxU}-3Ʊ^qV򝎓9K%:B}PQY9%QHnubk`ך׍}M=#(!'zM ;^kiц͈b$VdD;ɧ$.f6;hA7ggrN 炢[d)ద[,^kڿmDZn&l?~ԗ c4]%TBDxD$Tu1,yt4A\ЌtB?Vxxݷ1Nt#by>C4Sz IIj wKK0!u>,8axw_iR~yJ%w]Cִ܋Pbc -;/{<֠}zEŤ.YBЎI3qjR@s1٥V2 5>I[䯮i&(o,&PudY^KX9 iaI7.l~h-fc5D{4Gd[:/<\ n"f"q?믪B5ЈQUm{ʵ[)'̀eK{BZfFq|}}8z:xFEڒ8'N>ƈ%k; '7yɧ|ZT\ڤQ)+,>##`Ow?\Ŏ DxDv"+Q\P¯ܨ`i XLҟ|iuĥ A'eߎ)ט_?{zq\STچD4ha}ǔM`b[M[dB=xy4RZ.ӖEAgҊsNuvS(I;gH|J6ze$`סSN=3\!->eވPk#]hi8{R ع[>A>x\ZY~J,Ee4/$Ҋz4N\˝?WxP6HÆZ)^P@cCBz\!O[}p]=q€I/}OWU6"_M[xw&XBJo` :fׁl>x_{a^ 7PÍFƚ*=7)Ksi "|EA*eD(>PbދK畳ꔔtPClSXF q|^}ځvr}PJ3O ]PAU]uO0)~5"0;&ܴ |#kI hkAY6cGSї)k\qNjAeu9N ">[?4B%:)ͺ_ z¯7WP  ݕ BI=k(ΰR *@Tt iɌ療А0nZD68YeV[+?rﲏ8ZNvv`Eui8ƫف̼e01b~VuHD\t^0CfS焎Q͉Ų-8v BHF`IVp~FqUBD8)J:'tT"**#hmH * ljid,Xod[+":񸦍sC.\&2/ܮfRe{ Gz'q3ζc.DQU2T'_|{18}]Kǫz4$y"&1TJj#}; Z,x?C" aض a iŗpqad𤮤:D~ʨDsB?yJ lCH8_"\uuʝQ m% Luv%**\'\qr>A{Qxzť*pcZDU "%J*ܯŜlD:FЀB|?卓GT҂3Eu"`]_6L 00lz?n3eHQ˝ sPwBee)12჈r5{`4D6y݋`_įbY:arb|3U$@}Gh{{l35 }R"<6 !Q0zJ!h7O΋sE(7GݦP gB-wKĀFauH .Zv $ޡ@ zJ20\t:LcN(o1&鲀DyB"`Yҝrɇ392v̠F"/r*@ދis-s1g~wB82ݔpU[=OB0A"Zࠗ&2U|R+?+U%+ ($K %YTN n@CYՁΎh(d0AۧVnO;܊gˏ;t3˜Z᩻kk t=⍞r(ڥq?u|Xb[}g8,}BRDOW YX'9:ZOohL9aD/7x>af ockB)zaPM;0:@d!(_NqV݌Q-4زs[jY@Auh $۷F m! ;;)bn.0x1fSQ|s+ajRKoh'C/4$hy^z:AӋt4{rxav.2 PN܀fw'&$r#'JԡΚB:UMa>c myw9 cz -B7VlD? e3{c:cmSbK*|lmcBs2P[ E]Vwo {" &r\<_MO`3 e_1▭Kb#ܜG8P |Þ3xmk2? pގB` vĆ6lc'?(DHu /zw/&"d&pk1Vʜ8e5:}| l8~[j p81*XGF{WkBT!F{+JkZjA}V<^=qd7mHlu:.:a7vDsW:+pY!jEao[LԌ,9u‫#Q '\е0deFobɆыst;Η:ybz-C11ڏ\;b#[#䢱Is]? oct?"~\X ߔnn=~KNZ H4q h H^GQ\рe1)gK|C&*ѿ|F-CA<,[T؇v~#sy?`0oeUh$4A\pjdELoSSeBʍEh-;ruHNMA+6ntq4_G RK#^!-~tcG0 ȟ9Nh~i3942)B,k/WBVъe9h,İV$Ұi':\ D\Z*=v_kt7y/)1Ez`|p] p- )C9ԃ x!>) f{+mXubawf \o&0{ظz!,>Z[;.<hǨq9O3R6<>w="Œ^HA1◮AOc%{4'Òq1 949ɭ8Nt'%Q  Ãh,GkDpCbFRhh:1AlF|mȠLoAb24527"%oYEcc$-GqP>RbR&m Fh{c/>%kd{Ϛ_ХࡹqrӌZCh@MBQ{i C'Z;!M#Шk\#:p9r ;7[Y3n0GDCgv =(+Cn^Ro-x0&hqPrθtIdEXxZ.Fː 9< q%! 16fbBpi?Lzt̹OpStx-N`}"6uĨ6` ި-D!>3oA[ΊXM~'Z2{B(wn \S=C7fj=ow:<(|52uťj;Ky\f,ña@GtE]@o32? \.FH.ō,^>&$F&ۋw*.Q{?jN0RWZQ mxػk aǵXzz*.,B#$Jö.]@#A *KX,rp?)GʕkLA`d<,MF58ՀU+]121\ (z=C勨)DՅS8*cXՐ8|Xa@Tz&$C(U4|`{o .GY#2M5\B\̥^);e uCF|?w|].Cg?Șm(?_^8<qיvbԀv;bm?3=K,NekM-<M]7Pg=ȥI47kBGBѻzzi);=Ppsvwv(I`lVFzv5:QOmH'E*?:\FH02|6E?Ȋ`2޾A_7K+A}0@a59&&86h6 +v:^ɶm7VqzN:z `,]v7r=`Ҩgk GSsҁq+#xWqPƃ@ @"jd$q3)qg@B;a e#Sa<&zʆh0(A{vu(a(iiqkRLӰL0^r)I"P¥amk!i 7i/z MT|pd>Li 4M:ƞ[ڠG\^,kq }%|{IhqV^:IiQS!\=W nDSm=_V`iBaE$E"ʚ!nx CS"<Ǒ,z~fCsWmjPgwmZ0_ }%>Fb.;0lBÖ> ÏޝyR"q[ gHk"7Ă8m>fL`ONjD/CE#+4V>zSFwmteW(e3]_g3:wKG}+rbWM @OBsMwJ 7Y\sG`(-zmZiMs$@ĉ̔` ga#bh=Ly1 UGaokDuU@P2F Yi:{+ qB[s=|<1AW2 1SIn:Ý5-荊Y ZJYq">ͨI{ȡMd44mdP 3E ! E̷Ԫ $gC:Xi5&gЫ YoDAQVSzu= pdݜ= 1$Iԣ*CtJ65z6>Vr(Zqx &Zcͣ46@W0O aƌH$h*S*rkDwOiH5f0[6#mBO y{"$q51ԉn3B,gBn`ZE+BKiutOևȁth:WGਥŵC.#)Yt.z`0D3"SA?fѨ7D{GyH.02Vt|r+#En@xo.}R_~,HCcETRCJI|O#uz$P?:}OM1b=)A jX:E,Bqr(fQP$K)gx P]#`&njq;Q|^#b>]1?g_m(- ߕ"$PÓ|S=)ތ\LsYs 48c,S wq*,h)F2 -OkIdI}D2}L^Sp;lW@zF8uPb{@-;}/"t.:"A୓&; vo(A@bxtNM5 %*WzӒG];rwi2}Ws:7\y3L!Ԟ N6+QKeA%_3'w}=s9>_w)~ ڲ9kS>I08mu t@dm~?H+I'rnמ<`yhtK] kJ-rT#8Wz&.1X>ϕ~aP~ꃠ0 L.8H;SsqQ@T ~C5P{$i풹NwN7 sڹtS`Z 5ILZµԊ 7~@G p;/ł:/G-^.%7FyFVe6[WeT-yն Ss@C}ʹoݕ􇨥ҡV2 ɋ;eʻ!s^C]<g^X}Ztp>0{C:桮o/-yL-Gy}d66Eu^OZ8NS8W/AJVws 7P@-GAC )JP(m8v9/wؽm^8wN`";P\܄yK ;QH{c#*Y6I:_xė=/l]w 8Fήv :\' ~ۨ8ghB+DE-rJyjּxtVè( DEI2dc5$k%o< 9Eg҃;ү1u=MP/32WmtT F,_f]sjn@PGn[U/q.Gꈏ7#hbCԋT dPQK;CmFqaʫU 3q.q.G˄HX#t|5œ5됾,OqH59nCDޜÞZZkrp.{f}=*p^^d]7?dH QeP~xg4i殮ڧ8VF1}^f>CQt4+uj 9g\g\j]*w1I4X\f ߸䵙dH(x0LF-rr8o\_-mIo"p~g3-wa"Di3C6i~b~ vPٔ0B wL1WTRd1pPf37m} mKd+.DFPDE عԛjt(˒KiM9e}$#l}h̪(!lYD\\ꅫU^Z7DACә^.$>0ARnaf2W1:2=rt`Fgs]w#) }g/>}'~:9h)2mxX9:n@8GhO3չWġhb$< %7:_nY45f MՕԉ@DT$||}wccIrO@ZN`B:-${PbCDL M*`܆  1ᳶfԵ!8,f(3weC(a_"$;fjǔjջ?&3zƝM7j[o ;7iiKEwmWeEgyAm߰jH,psySTϨ$l0{M.gA?wܦ&g/?8}#&\ 򟾄?Jx`hb̆s8FN.~:>P}7_5o~-}'g/{_y?ć0";!9 |ow/x:Fw5/8֒dy3 '?Ͻڎt +v7տӗ1L&5{_x7_3;w_wp_6Ox~]쪌fG_G>uã,MM_8QZ8NH;^{8U禼OuvKFbx'yʜ5NwۏϾ,JwrSn(iNn )p) 6&,߁{׎<~o?6ny1^6ڍˡu!)B]-Kĥ&[N`~F1a #)g,;/_7[_EߐlZ#a+ QX0y}mvpAPV\ID8AgdC%Gln3AQD=J F`媍H_Fvi+EiiLƌ aqEYY)!/ 6myqah*Fs5РKu='yVQ'ч.N9Y {ocH \2BN59%-Y]8]wtGvkAFs$:f؏g~,R8cn "zK uw =d$EfB||"4ݸpA1;P vwtQhIo#8$A|C>xh*n/Ca|n7##11؉~#g>~H$%~߽Kd 5*W$wgOKc("cY/:mF@;@=&~E),Aʣj+ G[zq9 f'6O`7J/FLpȊҷӟh u>bGKk5DZ.W߰PgЯU_"sC7cTO^Cy:k DCa%o瓋܆]/5}=aËo#0p#FŭDiTb|,_ @$.(j%cRToۆ#y:B.ZT'J2 “=G&|c:Cg#EsP5ͨfOMhhl™#ѭ5cG#,vݧqT1<*& Kx ~d)cHڜ  Ka6 ?~ct#E_nɋF7p񍩻G pq(:2>bRV5"g0kÇ0}@ E`x%q3c7Q+ J|~\ds%Qؿ{/[b¹GaNÊZTLK)TT^>=`(#8_։t3!#M9mp+3Ov$9u%OH&18/}M ,( KfrVc]k>~HW}klG ;- {`ϻ#_Pǐ;炑Fy&?xE >ʄv Pg4̚0$#1rȡg)=9Q(.xmIr$Arȹtu\lVT~Pv2:g}/m|$ x]97zэUrJvd>QN,O6ĘCvWӂ'Ek[ N3A.w$C֘+b7kF(ϱ:Z}Ȓ{}i 'ѶJKzll7[|sERLj?m-I^i;)4>;,[@CP}v1gdrp'T<"ڙK,]j`\W3̨e]nK8Ȇ˟ ,, .,]z ˦ݲ-U$dˊlK4}̛n7w=W!'-ErR%Cl z̳[vs}>=:s/U!x]>sP|u^zuu|++snHl~롛'6TSԛSV|lr|KwX_/b*La2Ѥ,_:FʒE]O: hkwgiqx= ~zW,y˥0kvo\#zs O=% 5.w% Rk 񪈯+/G֎Ȼ&W&#My8(CMԩd*L/]C/~(/_w\VaPQB>"ƃƴz(=ؽ76v?O=,-|v1U0XtqoGy\+$H]>?=n$TNֿ|۝<8U Mp iBF- ]G '߹i8|x+6o/|P4cD$Ht#;+Q) &%6t;eGDn,k_P,)UY ,wHh+)'_|OY^Y >tz|>[~"ЏS_o_y%,ω@ěi˾:_ſ}S'?z85,-ku/d5Xgbix ֽ$˯I,h,3[wEH Uf.y[3o.a*ÐFrd;>$\/9,96?2lXS|#xGmoC ~%(6qϧoorhj}'Cmo']\[_3G|8ylvͥYQ\=Ţ2-B1(b/7!P?ZY7Yur*ɔujIgH/܅=X,.W, +pz. e95.$ fG%fHel|J\&7hɚmژ֮kܤ, COw~%d'[JX[?.Ro=|d xw<,N|wgvoBo+nxwr' Ǖ7M^jq?s%L6.'ooszg#Tb.Inb3\a I`,Cj09B׉56j'uA3p'+%0'`W<ہ2K"m ބ.rS%\FpYpm_D&_:4GʰH`M,rYPND^ Lsq5IX;4IIgtbw@b&WWAg/ЬeJj6_' 듐rp=aEbG [w_^}Q&ga i'Z{_2r˱ra6#95 :Ja!?ֻU, Cذs_ j<9Q%H@ M(DMLPyCݬ-C;g#ĻwYi-֖o##r+7)(/T{/+AȻrp8$^| LPwEK2tÞUp#N-GEا2RP4K "k J*jqMܬMRLFVHX9_2dzBokN4wY4R? id0YP`5erPI7pVm/5ܙqeufaU)ce2;-pЅN}nQbW8z*eQM_ +֨kڥT3"&qb^ygIo܄PSR)R\T̛K}Β6<;1 &,Y% )[{b\QytHXox+Gc:I9_%kiC|V`zq5D+ rޚ5߈r3TMɔs^n0Fc 4Xpr~ *G6ў'1EJjdUWl*T֩ʍXa86 Ӊ,^'CjxMNc1oUV_lNeT~CB{zш<$zl/(O1 Tr5f]0eځ6{M $%Fuێ8u99؞Jꜗ?)R^c&?vU>#/UIc}~)/oU<,zuK7rtut:7&Kp,')PC(-/S(*nQ^#)\{'˅"*1p+!A#C&04,!$B9vJ;L:aH|M{M|mD:7jY"Úp\Ns}H.EƒefK$w[U194 O$Fxxp8~˄}v 5YJ/JFy˜U{;3uhiLW͉| dy5t_ u9R>%%&OfDc6w1 9[<XnmNO=~YOĆ ]D ƣLЉY3 {P//+FE cr/z#ynyy򚶆h! ow-d|k h:3y몼ūYk835W啐SA 9ߜPi&Ⓚf'=]O<8>r-(,.GRR6<>[R+tuI<O9l2hwr5u:2myT Xa?{ 6m0Pn;ҀDrŕ̐xKi @UQ-2$N]ַ;{މ"LdXDQA^H,CފChB)2~Rl0_1b0 J{=p\d[r3"a[]ȕ'd.-j2K] e7nuUUx!Է\&)&S.l2OC*D~IV59/~s؀$k0IţwbyA+$5Ir$a g;G9#yy( *K-pK \X"a dKGL]\b}@A7v67dؐHSq(0Q{^\\^:gv9d|O]}-Hh$8cLjdkVmNxQ[ v2\))AA*1}}>n2+JE&yml5}*oz_8at*AR.*^W1_$W mDA8{m1bcfjFo•UE9hkj eѺ83)X>O^"y]yOh缦噗Tq?8~6͝"|h}U|ߞD]bD.^/|bd'VpC}2u"|cW"1 S6wpo $:@m|?<Mn\*ɝ-+UHmA9zw(H;w6bAM86Ksb 0-2)IuF.4⡮7EvQ䡽ZJJ/J[K(sc|%TY=$ {ZlD%+l@6xC8OW>z to!EAqkY( X+#%bTnO̫ėzx~=^mmص+W}& V}Wtc_>޹-Gc+e۱]ƕb<TUȨNzdI>uߍL:]iuJsT Y ]hcuQX.Jf;.-ҌY׶ ;LaqDee4+7T_vy( c4Ɓi hOquE1Rt<֛xh7WUN8/~@E*ucbM޵ UV TߓNOYKDNf:2-h/:>ͰB:|RK+p˪"<:՟\7`abvkЏ%Ɗu< k D|7㭏-?~4 Α9a,nAnn{)5n_ScM$@E@,wzV :<{+h8OXޓwSbqرz "jIdpE0$uϿz-߇g6' i:܈]nuڐfA~.·M𭚻HBk禚N)tۋ>7B\bwh_zD81 5.e %-AE|z}ȫ\R˸ӂ?WlBuJ&Q)N+:zvǯ^ǯZ,?Ǖ"S. R$DRꘞ:DA8;M' qF^z.KSw^نxG~ .!Z%dkr!NؐH d(y92X#8 w<\^Ս[֬u2&)#qUA [@'vw@Ga+^ۊZxjLu\9N#IHTbEㅆ3`sv5+ t#aµE(彴>D(>9qO:=n3R32Qi GݘD8-x$#vg,b-.BCƢ55ܭWYG<4j~=z?  ( ^K{<†EX/A ED!>!] ,z2۰C]\- .[ ʶ' |xZxE`f{SbqM& V_%H3{ Ouq ~F$(㾋@ Y% 4Tg:+j yE?i!ЉgrL/%e\^v:3 7eF ˡ K$@o'@Av&B$@K\lt곕:jH\F>3 Fg q4Ѥ݇ȟι1K)*"(­$@$@$DSk7~\1$K S0@OU!RAby0)v%$@$@$d,!>r,uXD=vEDvݸ鿠6 ĥp:&1%ZRat$!(7$@$00'яӒ٤ 6*hr>9Ak%8DosX @A : ^O.1i5zg)X-}fv =R7@dPF3K!Ebxzl9vՅ0^@_eÞh|Elkˈuayqix6  (nF$07 p PVz. u?1݊yԹ)lVX ~t,n؈j2 I"j=~~K*aKAmq*T|KL0{u'^|!lmÒ;#\?%NDU1PB-E3  8yZmK٩bXI? |(o)K2PY1peT%ؓE4^\th`L;N6WL& Y'@A8] D#^WfN?D\MxǦu$bh2;+Cb 2YHck} |D,̆MBTAJEro mJ dWG3,& cU#Ia`P`ڎ+t7ޒ U"#q}.f\T=tEVn.,Nn:t!s> ЩffLqG0 P}/Z"s0^3zN,2$w-۰3jѲcy063](_yw -x%̖ꚫ w@}=7!A== v52\''s \i޽%ޝxw+VW҃%˱,ߊ&7gF{lJiSmP̔g9KksDp:c;l*kw 7]'l忍ېp\&B{ʉievrQXxSʠ\<Ѐm'Y'~vʞ:,5k f",tPZfk 0 p睗`ヨc$w(QujM4OSəX_[]]7RrpѪHmPa=8,s rbGNN2)#//Z$BF z:c+ZUi TÁA3ⲁa惻QŠ,]!Ez2ғvaoVd8p b^m)LQN>հJε|Q22̡㉞]APoEBXua\u'satHOOg?wd3 $(׿0O zā92  h12Lς:q1a5p冻Cb!|֯C/Vd[! :0Aov(y}AłQVV;30b'|P/1Y %_y!^K $- si fi$qRE)K|Ux)x1 "3he X#@Ak= l-,f#f$@E@ rt}MOpI$0#(g+3%G!SskވΊv*C2R$@A87&002HS~$X$r:TpщRhMnM=8؁%yZM֋ 9l0 dXF%3ͲuF8 VAlV"KMWIUtl@f]'=?\ bǯ^: o,o?؎"G2:WaH B= rԸp;dP_Ģ&B/,b.JWghY{R2*sj"b! UYCF2'U'Uj3Z()،PXOj9.19r5 CyHbaw!@$M N !at% ;V8Yb΃>&ح&'$6:^Av&&yAxKLho,fFb}n:$Z0ɾ f5t aaP'7׌ `Z$+aAX:zϬldeg!ӊM3.HbaK$ x?xH蓹%bY[|ul›6Abܰ жX6_/h+W0+"4cWS,AliYxu6Ђ< .$dck `P>NtcHL2 2lA59 6u,]Bw]X@ |cIxe.Op 4$$X>**w2D{[^݊WëHMFw[. !,s0_?U7аYDXi I"J0Y @|PI:ص[L0wo5=z{q 8k/\7GR%֠L٬#1рv<[\).4tlĎ,!=6Yyؾ8~?q{*q%H2:<"BNLؖaơn2bY[/*21=7:K(Ro$@3L@ěņҼl%q8zf!FG  "Y=[BϘloUHGX톒L?Xt).Bgx 09NzSٿ+M@j~<_qCc;&Tf S+1;XZ"gAҜuZ,vNVʱE%wuvN..Y*9C& X&@A˽Ǻ D0܅>R3*ݐ@˝&DYiJèߵ{|,2e"ՇLLLXI))(Q1'ˆhiHWU҄pC[4Agy%ml%2Ґ&sq#<(}DBj|ZN6SבrL$S(cXY &GtEH#+^"&\I2abd/?Y&7AO6u,meh d={#'+DLG,NxE$}(OX# X% +SS,,T}#6ItYVa.JeQ{R$g;:>ЌڟkA$p(/!3 N~xktDv4mةNo=J+ڋ37SFs} 6nZ#|]>aVa,zJEX( 2 YO$p@_O> ZٿDr{Q5E9ۡO q|5q/{Dl=Z"f8V]2(x?rϸ#@Aw]@e:]xT+deb00Ba4A8ax8t? YfGS,'QM󢷡Ygsv5aeU(*JIo2\%@A8W{&($QB*(v:.(Q˅_ݻq-}eīVw,^\DqxnƺW{@ eH  (e]5veʞtŸFbͮr d^áYcIu?+Yh .aH5K7"LLKeCi&ggc4dv$0m( %3"[/na$@qErq՝l L   +q՝l \\?MJscXo "e-0ta[Ò@gR^^Q x]`Zb62PsLa, \ ǣIH/lE]YXr{&RX;Ձmm=HOINJtεԞT 7;^=vz|~y%S3I$0+8d<+Y( !}xaKxgDlIYǎ43fY3J~1dp3+w$@@Y"I0V֔a뀬2^ w UpPD `bUq)p*+q_v)ܞvୃjȹ*7m%Cp5[J$0kt0+:I,vt0YpC/]~ADdiD<[ 1Co4i6A/l GS%iCrlH>uU9̰aP`c/C& Gp9[L$0 Ժ^m؋ Q]̯BݶA'V4[(=INUbf<]]xX%[./hGKETxy ݽ. k0?AuE !%,H` Q0HfZ&-d"1؆A?֤P^lu N,z2DL6W"<\3{Cz`,Fk" Qr|yłX?77,'jk+KK*/][xGu9]5B9R֘H` PN#LfE$@#!P0(s-t$6ñchj~j6$AԷw7\GI,\W)W5(nf hg6`]|=I`n V D ׍t IHX|"| a3cIQP-9 ^k؊'!`+ʌ>v!i4`P˱i'0$ģQ>?H 6 Pf$@1E@V'ᄐ}Vttaϫ{ZʶdX\?Dӕym޶=#nGMQ%nK@-&a<)'<т HH `w~:.J񄟬l"TcLeeI.$@$p>2YaP˴ #\RȒ1}n+>wiKHVd"iD@f ,(,ŁP$)#) m0 Slƅx2CjK$C=6F+l)+t&%joF$@M0#vK)z8hf~N9g*uݥ4 IDATnYz3(=E~Xl$`q]M? &W m^?2| PkNg}'_@!_q)Pb$>C0eEb0j|uQD(4HE&(r$ez2e5Ȫ]{`2T`%W!5'9NJn`WZG-EUJAchOѳjzD@e(,~ &@t@Gw-'^;XsA Dщy21 ߅dECiCqYs# (­$@$:?cea#!f5 Y(FTEso!RT^>ٚӯ"siF@5HJpx Lq0&:P"lFd>ϫǑW/ZodɺmhQRZ8]f>$@qD0:M!AٌLXLg_w!+~'5X3ZN օy:d).vʗ$0ԷZDA˽7MuWرv)WfC$tz«GwK#BAYXN$Մn><# WHF@'뉷jKKmXNA 4u4 ֈ?</n9*6H`ә5 D2=jBmLZ~z& P.— {9Nc!$0 :80AOFn@(cצzY堨ԧwo1zOfC3C@'!_BbFww7(O8HE 6:3\IիWl6_XVtN m%,HHHX   EE @4 ^`HHHH` P"|M$@$@$@@0zu    Y$@A8Y4 D hցHHHf,g$@$@$@$ ?\dIENDB`././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-network.png 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-netw0000644000175000017500000050465700000000000033333 0ustar00coreycorey00000000000000PNG  IHDR9 pHYs  iTXtXML:com.adobe.xmp 5 2 1 m @IDATx`EדK!JTTTD~~g^;*JgII +޻Ap{S~77ogT HB@! K%B@! Dʅ B@!P +B@! +׀B@! @& ^7TN! B@\B@! zݼR9! B@r ! B@k"xuJ儀B@! D5 B@!P +B@! +׀B@! @& ^7TN! B@\B@! zݼR9! B@r ! . :R}9iH:MmHMBRJ&mPQj5(uJP+ eB(^ւ hUVo Lf3ki=q3ƳlX=C*66) 7PtvߒQ0Nxh;DPN xf@+=[w]{buR3}1a֧`\\}}] Fv?t|dZͩE=Ikq%`j\RSG@{~B@G@ ;9G +ળbܹHz%lxUSq Q ZVLܳaL| va}EYQ -bXԞ15ۙDZ2c|vnEwDdao".fTEᑟlXW-r|J]<[shS J@) !p) h1PQ@"m|/d]T* oĮ <ѮcO֪Vk"/DCfG'σK!CA{۠Tyf$^YN}`@Ǧv4*;,Sڸ 6lEN Q-;`pӝJTW^8j&? >rUA 'RvDz^h?M=c v LQ#,z4,Xl1R\0_$9+:`7X1K7lGE8 8a~.(؇Eco,]jj2Y R0gjhC[`p6GO|B@/vi LiyMI.n 4.ʠ*=Z+0(?\TMu+ |r?M@\Y]|~k[EYْbG*ܩ Solh]]?JS^1F*3'q2x$$KW&[JQvVzukFe2JY=*+:P:QotKŧtx0(f5(1^-N}P%P%$v%DY Jt_ҭOwEo0*]Ƽ''΍sP 3T4VaP+m\+R\`?4*STVL]n@,ʣSZ뿬WTO6GUrB2p0|6/}. ]+7⟥% 6ɕf_|B讆.}^Udtu'is l-SaYX8/,]+LGڴn[b`pq+\aDiXa9-݀i_d2/he/xMP >< r:k., G๏m1.hf,Z o} Anv!.llߵRdJD":exz />]ƴoAej9G0o,+Yyup⽧[Wk:ϵ! ' 3,lYM{BGC(\?;* 2H?Jz"\7l @n ѷ |(&?('?Ϋf){T?:B:D׾qݖ"r^8!Jgn}*~5hw?% BɄf۰w[apu8tNiEl_ 7 #A0o>Z*yjF o8X];W#5 xU 4swfY!,ށ5鮘x w5؅lF#:ކpoؽXQWOOOVl ! .*yx%q! .'b{Oq'5$< N*CxTcxifC\|ou5kںu'|pyBm$Wbպ}U)Ud5Vӝ:zL.aOF0 W~dHeg^2𖠈uɞ jF?۷y7ķo;Bi< w-`Z5xWH-/î!H¸+B]2pR 6+ZC|Ki'w=!>EXf)Mo_)$D^̒ ^LD F*Q\ S(6r> ~bA kIsШHd%Q K*(/RN/\O3BRB(0viVXG7O x(%7 耋/<? PYLjHy`PFh\;Ӕa%[u@`D݋OU$FBIJhAl,D9Ӕp̩2(T ;ȟWϧc2Քk8_~ҭGW.Ǎ׍K?<ՆB(* 3 s>+`5Cpf guhM+ϑM\[)f})mu߾ph?Q6{F$nzݝ ޱTOdgS!p! ཐ4%-! .+MR vj {C>w5X ϼ:"+;Q`id%3 ӣ-+MCw1Iwr=H2vÓ&Kaf\CGW-1>xtN+P9nG^BװSg饅5*K'邛}~5vR}0[d[pQqccuɯ7u$Z67"1)4'qv fwmLs~}W۞mK,4 .< [A~!'>=_Wg3| ! .YibP4,mYYZ 4-ki6/u[Aˆע]t`5S}an0:}pSuհ0rh()[ZceHx!ƽ ^;Nd%k#0kl[ Jlc8e&=w1}bXQ[m<_O'B5p A\@[~P9|hM_qW^3f,Y A_*Kg{rѵwoDp*NܶsDF:ai~`8c?<._7WP: kC^țS-څ ^ޟ~#8"xϟ B@!P KC-n)B@! { %! B@ZL@o-n)B@! { %! B@ZL@o-n)B@! { %! B@ZL@o-n)B@! p.s)I u@~~, 䘜N^ +j fX̴L.V-ͦ菖8Z.NV7tcZN)x%$$$?yTZ~n#U[*5JJC'E«H{{yÓnբxkucI:u*@yvE6Jw劕0"`b 2*_$ -f-8- [;$(z @--hIC\LjGRrxrlݺUń}{@֥dj4`a۷-b%!pT(.)G^o@ > =@C259+'JNq~ pHB< l6]{|OP H_aZ"pmJV5ytx~~~9o򝙭b=grb'~n(ew "m)"rAV]},ku~N􅬑hRBъ9㆓O~ɇk!' ! BenxL,WxBiέ0SYf1ka@'jpo  @#P#˥t slFEjL|4!P/ 54I uӻ-ARPV\@#m:A_kEl(u@/;**bW,,xmF>~[ruyPluC$j a[1̙VL&<O־,WMMdB3%G. ! bsaUX3a|0ޓi 1ﭛ1{ }CУd|-j9)"hD6 !phu-8̷4 D?j j+<k@bh?b"{3 r! B@!P=sB@ F@U4GD^A 4ݒ ~Y9Ũ kpu!Ѫ8Gwpei>Ha>7, L@LX| #! 8 &cNǗhJL B@J@E`ҡVo kγX:{׸ %49(Qu]?meTFpwۭOBg,.k=%s! @m&p^nIz"N MeF oJCvӮiREcM!%e(0bCCa`^л5ɕ.]E>4+ٯW : Ŗ8 rN%5b! j 3)&Ϭ> EPH4O6l:f]6}!†ݻPBzNɁPy H6xO#iZjgޜ3/,~29ٙ8$!а 4f" Uaۚ99rXL&hbKy)L/Xh/YҪ+܏aqWط`&($D:lؗ^!pA4lO|n5U9EP7X>|z6 ߄@.ѹ}Sw q%f X @=KF8`++ +|Ĵ Z2x0"I;-K7Tѿ C9Rnu3ª#kZ;Aѡs{eI$_nCnz3_}O7%oU`Ɍgծ,i`JOM⚌R'ACw~>_O|(Ol  יJE%x{nmx[s3EdjrCbkO!÷E<8OIe{ս [rtĎ(=ޱh~V| 9+D,9NвeWh"X9PVίIB!u6 zO3)=X*p_"7XJz!j- W584-o8jZ.-fcC ~(, 2$! @.Uh;4 9#6yYj9͗d'~- ,. ǐšÿ/~D` Gds-,$m؄"exP>/聰ytzD(& 3 +"Eǭ܎VVŴHz%v Jb'!ۆ}r/ {-j)N EF!&0-BYXjH^ND yx[nZ CNnJ2 q4;oD:ɿB@8okhBu i{QJu,TzxB/iJBGֆt-O=v4;lB'\}kīң;֓iZ)O~Ҥ|(>BTW ^ FrP~$UT SB޼47^p岔ӴF74Ο%AMVsGO ^騯߫g?(! q-xM%yHڲYG{XnInBNnrs"a.4-s ^h2n;܉^a>0F۔ I. ^)ʅUN!Ĩٮ7dT8ͤ}a]qe@?Np+ n I%m B~d肝 rޢMMOO}mS{; $;h'f09D@!p^׎:OgM*w7|̹TCbٵq]M۔6p9_Qs8á2{Ǘ3r2gBl޼YS߂ѦEr@7 y)>~=JY:@} /K-^u`CεyL6(: ~~];jS'JoxY0P_;zq^繺0~|74ṇr55Wr KX؋Z`))6 qbb+ G@S/>?WD۹MX}ü8+(ۋy3^:}ZpDEw?~^Į=Bod\.&=1uO!P_lIi<2H[_.m"LŊ^U#/V6u*rQF8Gjke7Wrr5ܮG Y|xmU}OF6G} \?h}KTOzB@ԐAh_Ʈ聹NZvqR;h˄ C=4:Gˋ<,[8>QRd@mkD0{6"KC&5ԟۭT:ֵ̍bi)ȹOoJu?2k>lXiRdɁ7߬ǷWߞvSAUwmט Hh@j,xqͰw9^ i%}sLFNB@s!@7et/{d 4jD!HկSK5,Y-ی2*X\jgKI.& ^^+#5<Z͌浭opմYmH޿ O#Q5 B@s&R@ptvԥ;sZuD/VceB33Lu@ѣQRR#E^&v]&I! 0ɲK3/1@C&P#{wkiȜ.x]]]B@! .. ^54]ܒHB@J*(/)A! .)v̄hxT˰7(6dL+Ix}fV*:6"`$_ZZYTK\rB!ʋ2aȳSixw{W-XqHC/$[Q @-&@E`B| $|ӭmu୰k:/-nQx4 ӿXiΔBVRB@4 j |<}АmwX*c,I8iiE\6\{s8]^*-7lo4A@T9:3l4դ}Inf>ζ@m;B& rh)}5q~ EUr~t9 ir5~݀ϭ,[l&s k$xmyZ-ﲡPz! ?^+-'o^ܜ4pO^ Dnn*uMBBO>rJFʆ&y(݅d3-d@gЦM=E8OL6NKv`G1W#$'#zˠB>إX8;fwLlM˃L̓ zԓ*zhmXv+(M F-Ro4W4A>q>UD/H1aʽH* $<\n1:O23sasqGq <~*-990c; {۝T_ Hؾ Ik<"baapw! ;=< axs< ߛeGH'݄=JʡPPLvbĠݿ 7=[So 2+^䍷nCD gOLiIvbd]K'u^S̓ؼlVdr?<5 㪱סgFduTl ʨƧ~هnĔؠD\}O 8-|?L`L~XQt oýC8L5AMLeefL `Tëv9ڤcH|hȋ*-5S^|  6F~;R3G#EKhA+H ڇ:0ghUß>B v[l4j> ol65!=1G . zE  GCx nijݎѝ#`) [Y .ߌ2Z[5n"5n |]'x 1/P¹&P1dpKRuS8 w~'b}H! ~FdQ?qUߊ7H!&<?y3to\YNL(+wU&@b2)X!JBh"z9g.&+mH*hJCq!y# R\+C艮4uSL@O0QC.`A'RDnn@)s^_t#nZ h!z9?oEt".M;Иt2 :54 HU)IS*68UHC鰟$͝$sCqGQX hJU</˅j*F^ZvEYq= w)8 ʡ-kpHMk %(G]0-Չ8?l֭iOFb^ Fk&=|5zEqn?2wmAVI?P?tBoP 69ؚpڄ `5 *,~)N܂;۟}B.+!*bH$_. 7$1ԅVvn'*Ǹ,`b\sO"X$C "n$ZXH-xlAыDeٽس)/C%$.{1݌cHh6i1 $Y(ȏ]K>g333 Il7J%!Xoa;SILyzn`@i{`adCE@cB5 HH=^6^i3,jY5+ܦAWu. '$aby1QSnmwfٱ`/#V/قLt:LLX< iס7=;\+ ܁&$LY.[F:?Nn9?@)2ө~Nl lS,*!f[FlEE, D7}q)?(^MΉnHrҨ-a{-YM!(q{*r\qtHek;bCP]A܌:?4umނNdioL/ ()]DT.-u,3[ KĢgck0n -gikݾ7ScQWA*B~*~ PߋPIMok/o%G3(Gqyo {(.[}[|mQ~UƱ8ludQ[ݺeq8e:P8'{e!6؈_fE1z)<,`Ea;\tY3WśK70DK 9xwpt#Vg.+A'ϑč̍-vn793}N8D+<?5tj8. aj#(XӜ/lM/1}鲦ȲCMvbCx1C0.C+؁DlLS0_#-+uy| LaxHZB\#@e j_FI9{ zXi8~ݫХqEJ"xR u3$ kW"kgkS3s H#T#oЧ>|* Si8, W]=SD \1?|Z!|xGnX)ItOk'Σ({&kX6{V ˉR3w6qߙFBQC{ UY,Ղw<,>S q5xm_]؎ZބZqW}Eijo=|聁N2]ɝLVl7#߽i!{|4=SϔB@T;#B< _\hRԞgrXNz>4N˂O¹`7a eVx{ZUmjFCV[gSVMc臨ksn;˧'hFw -!]b[:KM]xlՏ$A4d{:J}"_*y+ G*2Gejv%#扁]"FO<&"$ B a(.{$YVuaUj5xvU]~9\kN>8t#PV@r@Pc[M-h_;#>YmT*Wc.9 7]7 -|v Ov M"?uBg& OLL4;@ iY Woĕet|\^^M6ϝ|ow^:ӡ4mڟiMB8?!A}t}|( _~J %@{!-?.t9O"C ZѼv;◠JfϜFojN!P \밖&@1M;hB]$(=q83M6C]x>\3'U|ls)EӘH4 ibj+E rl^JthdDn#e]@J6ʭjaa+Bc$X܍j;M#FD\_ux3;. Tp 0' )wkWϽr۷Aa(EE&%nhFm ʣKYsȋDn\px:sHMNuX-?)C4x!\EZ|ɷ+1f0-_YPJh_9jbhޕ;K/A&3yݗˊ C:%&1^X"|rϬ<)x`հ2нWh*<1s% 4fdh&M؃޼>5l]ww[aBUPXFYi *hx:쪚.V2YC#ii}NMS[ 2mU3lZjmrsOdqP,cvg|CEyIdؐogw%@uO=u_nZo;%E ^FGOq:4 7"7Z\?VggܲZADTc #oEfYx ~ͦ+!(9芩 A{.4.ӧ @V>|:htOx m6 FqXx<"vόGYg#![b3RSriؽ;ȦQ2PG+H' sU]0fsB-ُ0o4iphBk+ʿ4=tqhF,ܴþ Qδ~rǢ~bTN_ ~nedؽ{vn^Ln r+ۼxQvW{O.|V}׫FF$R#p/#NZ_c9ڵkBfh5 iOrеIߕB¦aZz&񂿇yY*ꐵv?޳i ^DFA^FG;6FB Kh- \-uaAhv*ۏ^iD8qnM $ ] rh}Æ#ؼ) ˗%SWBLO/xx; ޴Wvv+:}`; Fב ;V~q̄0r،rڧ#̓ѳW,5nxs4֬'G[~PS3gv[CǏo%s^Er10tpS ކڶ }t1h1rT[_ыj+S7E@ݤtB@Ԍ%4PMBEֈn!X?%jB_?BOtWUGc_2**nE((-+=A]R0<Ϥ̤4K-DmZp7mǟ^ȥu\w>-2kBYXl+{"~K-pPY8O>Y9& - i٘\]FU kɁtȐ%/z'έrbY/&zan$Ķ;d:zG.]16*t($+Ý{-x|׾ClwbV=G{ОZsB,}ic~6 @!pi/"KqFDQ8}(kz1q8c>^d6ZƄj@=ՈoUYƝi(-E h10sᑩt6e.s$Js Y6ϗ{BFHOLJSFeg`w7fJhi)@NC:劎 K.fx\pu3ݩTa)ܡw<^7#DR-du亲.⥫G;*-@'p/dhod ߐj`Dx<͈Wy&^T@KԪȏs(dթj.vӠQ:j[Ǔo=0< eh⨥tM&^o- ikЧ;K/۹LV/k+NϿ07=~=6p!f@N,' {/-'g/utEΤ\lYOd8@IDAT >H^XO&1!Ů7uUB \R˥huQfYjZձ,WO 99M93 }Rc2 4GŮ:V c/.UV^?]|s[}d7vakCzH(1KFv2x ~x wڈcM*ŭ/-!&) !P TUr2ՉTw¯*nװ⛧_:LSZ@ʽ@&Lmi?<}=1ʹ ܳX+r;Cх\45[_ xxH;!q"~61ĉnIU>5VM)W4'ӕXvP(wtm6AI!eEMMKh֓rnO[v97=reŷ凒"+[KQ:NKk`z"fz= ѥ+ [v'N}9ʿB@=5ef$O}>NΞ)rZ1HzciܳOa(- bpnOHM+DrZe&:bFq,;lBG5z-45L%ط#/-rL#hF~*%! bk~&AMnj,WY1}Px y= I'yTg)_EϢ-:>Jwh7NoyԸ٧Iui clbz\;iMr+F]ъd`YJOQ w?n5ґ_~JW]1͡uK>6RňR:N"9=lbQC0Ӓo :< #w7/c/ࡱMh :dk/-֊/c`MR 6a|tҊb18ۭ%uj4,-?'bIXOᔂc;#g\gOrR3wv-`x߫j1x< mCp 1NbFs<W=D4A q`z݁s'vASEz . 1q6E!}S;͈#Z&7S9K+`1E|VӼЮtTAYQ>lx;?[—gF)5;4F,}9{Y-6.9i %q_vF:J%;#eeV}}jK*Rh1#'+~)tmi]zmamP^38ڦ)PZjf2QPSwǮm0xy<+a}MT0Xۏ… jCtliwOpf3.=7 n@[{,?;vc]W_ߟ=+WoCIv1~$z 0޿zkm(@ǻ;#wVÇ>;G !P Hn'A\ZduUԈmJ~|xqI]H :li}أk wbO0;U .1ATH/h[]MFr|>Ml;ꁧm{}CQW^Y`~NX:m:V=\b>AC:o:|r\֓Vv;1@#RPFev7"?fU_ļ7>łٻQ\lGh(^ВH q L ǒswN{]c6D-`ӱp^".M0!J@^>-狮W݈E2/Tnlce& R;GJS]cN˝i0/1a7S'6cZn:_E=wI8K_xkL[@f v;Ƭq`ގ-~i\1[bpi~PԿ, 5%q@!P#[w#%o*h9[m0Nxo𦮸g aW>قGamX,[-n'd$^?pw|@[Dl.Z{b;Y׶"%niرhz<~ @O 1s7qku4-ZTl} l3UčI %Ku^m9bZbf+9b߬Cב{8dn]˾X z1&EM-IYB<F9|0[4y&Ocbb(pwvq2k,VkS))س7II9.FaA HhXih%977?4 19#UJ!"/-6=,sBny74rUo< ?~=zGΡșVs1Y7 ʀ={]Fn¢)w0#'ؾ#% 5/%cơ1͸hc| ߂жW!\enS@iiaj_%I_W<2o.5!n͸e!ؗyѯtG ?7PwhI EX+GG9J 9~d+q-j ƻk;xaCwg(FAд4m>)G޾7I^~=:lB>G`g }O1=M۽WA)2!Rq]Eo MF^5b#+>5ryB-H/(7 'u^ xZ-/&QŮ_VVse{6oN޽Y(**j4 ,bj4Փ_79M((̧ɍ&(sEiԁ: Lвfn#]QnQȜb&JXKi1Ou oov掱hm&odsq-7 +_<6t-%Ќ 4hxy{LzF͢ ƶ}6JGͭfDST1_m"Cq +$ .$sE&F\9 Ya9y3HUЍB@'y|.@{n#ώmu-0l Hw߭W_Ƒ#yA\\+}wW$pqq^o #l+^ůknn6֓p^-W^/NA}ps߾"hum{Gݢ>hfnp/FCQr 4~u?bbWy<> ~It8e4rW!s gA =u:߄ _ҳS3ig Yg 3McfЕ;/^v?% ݺimz?5qM}g?a,\]H @’-jI0~v|N>rKl!(YQ^JK/{i$_طz#2}UO!$7/6-diG~wUTKM{^7AzGAQAT|'*v}ᏽ!XH/{/!$\I S93%Z&hQ[J9ЮJ$lJ2. ǧD97i} MA*6|.AGmo5lH 3g b3j g-"(_q"(B{o9Rܬ= DFRcz}w SC}&B<1l(oɳf to<]s_ęvQ-]'rCH\w@)]1TZN#pσGѡkOa2&عctkِHs lO<! }u% ଥzsOV+75*z.OgOWZb tq/.݆"?_x!c4Ԗ_٣̜-8s'Z~WapIž3N&>0:c~x 4%PTXȩi2X#Yr RΊgQQ1śwVx +`8ƌv9`w`YzbNakޏ_改?ڱE8j=dM|^h/Bk60c'n_^)M3xu|4>I5x")6o9G^cR1d(q FZ|FaS͕(NMqnj9o@h1hQ ?+*dk@xF.3<{7#{Tood@iR"(S3W6 VxلP6&axeؿ>{ގJP~OAIY`n]o˷[`c9;G"Zu.|*>~xKflC -~u/5 h^>*VqY$*5A:$?wߋ⣏:/ݝ*Xع}Rӟ-طk1#iה)nl(lyf4U].xcZK_ǪT 40 0.q[+ ,r!fxKNE_:ׯUQdl5~[g^-# DbT>RiVA>$[E?+zKct^aDK 2jϖJ V7h wNs1i'4! %+ړ4ỬDیDq117"( }eLES S3pIό6ZwpKp=;\ TE A'ix]rXDzJ血, ZX$ٲO{s8=ؗi._wErB1p{j ||0zLg#f ys-RMsl:?̙%S~~{<~ӏ0iIVJ25 GAwŴF|2YO$OG0g/MϷ =:Hі]%Jȭ\7˲xMH8KyK/9oBk@Br0Ȅ4=Ï!@b`s/bL:ş~\ghj~Ybf;inїkEˇh~ǎG,̜9 KNMCoq񩓧`]Q }KDz[Z*U@ٳgw^jVF% C^gEU*][U=uF0/OSU)Wfc#ҍKY1Ӱs[f5Xl iqɕ/RiW0\bcf ["f ͻbp5X?Ge/_f2}vk īwϯ@Y5tb{]bݷ1t}˳ŭ, ]s@?7R?=TE2*U Ko~{8Ue5nT b…\`7sU!^ L}rzw_$}*8.j}R3/+jש//ڵGAFWKk\D3 ̨-5 S33CX ;bϣ/5.ߢ嵷awaotEzUysTaY>z[OYUR%J3}zص+liK@ZJ`wMt40YH6KK8[!YFʆWWz6L- aC]"{N(MR(u334c#X墮ZUy)2k¼T:%cЬN&o74mN MWi\*ٰq3MbnWP%J~IZW:@cleWn(b}fҩ}tB?Ds@6x~`0j vNv4TeSI@zr pP4~|g 6-݊aC[bQd퀥/ܛ((LcSB== ֬ H[XT@"lg?K;c`J٦$]@NVb\7ԔoVI*F!j^iDq* VT[/.J*"tպ+Ywx0/v0EHM A엿5&׀C- *O՛ y$I񽧰yEgc,kprZstW!:$Bi$vkBz &[Y! an1ƈ;5ikD{^%Bۑmqx뺫H&޺7BHhS u\ԃ#pB5|ԿT C TCUлw$n-{v`ɒ}Xl~-}ZrE#4O'B#.tdiJ0+A5[юT=ջ;!`Vv1ctѾZGcR\8q=yĞN@j| ҳ`5m]CgpF=`$*T7kFW@o? ^$?7l* KӻwӘX+\ҋ/Nbgюxc$%]M*5DD%W>oF˓3_`gC! u BJK@}=l@-(t oġCش$sܴۖlDvv̤q\Tdh+j͸HKfXʠk@UNk4!/֦_hCB7C ,`WGo1?R#;-~c+t&pr0[`p6)Ci@ы;3M\J*e`_FgڶST\`J ڷlEBXnܐlvF9/}"|sMKKœOO0""}XmLt~ʨ5+`#3)%ծl%{50 uW3KcI&m ޓX՟xexwF;Z` q q;&$>ܣ2sk̛w/u-C0Ѣek^S{ۂ,(Q"99+V >Ê7ߜf|XCmäd@XtϏ>Gei9-?v⣏&`Pyf=!+Ćl sQI*F!xE;RAVvA!D\x7rN˳P [on jiUDʓ<=͙>px6o:߱xFh@D!>a;`sAk$(a7G텤dli vٍO؎oA{;]sG)PO"vޟЪUG 0 [epvWKW,*BMOnn)9R+%pz*u ċ/LPR5%3yNJR2{Jr.tVg{`BLp 8AJjO%Ez=qmjUnn .x後 }|2>QaGs&ί7W"SZBzmއRj@*䙑M+ED~<1YHH '۪ 'zpoh mo;g;ؘڰTa%;`rl w7]ğLG Y3r`7-ΎY׿ pJ ]7f y ||ы(*ή M5B x{ťBd*#BJJbccp).!**Gx)33'q@t)scHk(yaf5*UU[ϧç{QN8yVޡ Z1iU*C) dQ;0%>bt%80C+^PDi Y1D^)5' sx Lpv/ϦBɂ`7e= .;bpyj`M({'6CNB`5Add B@G ]''`gbw}tpܛW,*44!sQQ yv)\7 eqaUyɚ}}79ïf 3JHvDzF4m9[Ŕ֡ =AWo|j5] GG0g>Zh +prN_qaz9_5U˕" $Ŗ"])>c[e'ct i27lvHe,'O\K xA&n\ ̣DX2)g[j^U%<]p-yҽIa.úu'׶A[es;o@~['[8pCPO+o~z-yqCxFj#Z6:K%KztxЛ y3C>ݰ!5%aJ߳2N oFtE|b^ =醍?kIa Ы#P=xxkr.0Еk+de_:_owd[#p}uY&'~&SӪ%Ԗ⇭qog M;+GLkߜC~d*p35;qmF͒(vfwu#ڧf$P>6eH.,CkSxSU=Y\dr%z(<_@/_ Զ@#6 š~yא-`Ѿ>M1֚rg`h8lMB:̴[$3\Hl_GGy9*:`i_M:R3鍻w95X+aM:--0OfzM|F?ji#?çmسn?^zu9^~y -НɢE|T ?o hJݝv5y_rFu, 44GXhNHV^o`RNeě WFagfG3Y H@YWoMRͪJ@@KW:ڪnHO{hbƎs 6$KņT4H rR&tOظ<3Zx2KSM00 N'&9=>h4(c}xo1li-ݟQIֆŒvQ(L[7!٢ x#a Xr,ǥ;J0S;;{l?l\HIVǧR1e+daK+g ̥p:֭=Mkb_c)5e x`VAhӫ%ZR:8֖`CXP4o O l"vڍ5㉧%wd3O {A4> TLn%i;l ww ua.;CPwpW?/Z8r4]h}J&p$cS"۪Ò[jI@:_ERsTI* ފVk9ׯK115Zk@Z)#N׭۴lbi_74eHAiySL 쀃ȣvVW,Ъ=ԮF8;#, EQ9Z:=k/ī}{T&Zå4ke9p m!ueH` P.OF6=p#Y^1;fhp e{ :-QӨ \TZ2\,smjr8Nuk4= 9cV Tl= dag ~r BطZO=δ#=<׌I/vIZ"A#I[H)`766%X0{TC9seP7{hQYwO ڴ `Buc[AvcܪU jxYY3E#%|iue-ALQКT 6J|7;-4#03gЁ[j4pyua\~{`mbna!Xz~wpz >v8~&ܽ?_ sJ0+6EәcXvM1Sq:O%~lI#d3y2̭9 raI VSBRcV:WC5nK@^Xلj;-: G2Ĩ_9HcPA[swEcgG-,pp{[`ÕmHId Vh.ڣ Ah5[)B'>G|}po20Os.M :lē o.Sl')Ck [jhԈ8d1SBx,اlxDU6–`7 R3Q))7.600,=zsӎ&.,(A6iNY%Hˣ<.lL-0gV "J[6ԠjRU7(>eSû ,jIl1"*ᩙ+3`s۶8Q gp`sVe1!L}?m?[a15X.М!i6cA!k~&3?ңņ '}#~\;'n ^W^/z`ר͑Ǎ}JT 4 J9/j](S9GYJ@| 5iؓ\e'`WH& piCF` IZI?5Qj,h;r*[g[5G-L9H(Oq8^n svbC s`mau9op[{}^r")mPqޜ)cXM QӫP%P$PGrU.Ihca7^/)__]\lfü=3gR;MN;iCߦs@F+aXޥ;qۀQtL~J{dIDG{Г }ndf]C}GmP"}]uPFMjڼĩ6X@">WzbGz`jccλ:R \iLbq>9(+P wi-^KYH7`@3C˖Z _7w5~ ~i/voDQ%Y3 t >p=a5=Q:bK-y0^a^o9US%JUݴT_jJU FtGϭp?rw˵6-$u& 11)8uJ$l|(Nܣdt̟Yܹ+Dֿ$[-~Vj~˱hXnI iCԭ0ym8B-QJI@gB{P3ԪT \[kpNdSʋj^J!Am"p oC3=o{05Bd'n\O`X,3E3_cKwь SϛA8OeьɴDq_qjͩ}_Y_ c rMD0]8R*$@N66Ԕ <ʇE,!>mr*[H^`zKp`ޛ:"?r˴峧Pe(ٙ,^Ӈ+_U^Gre"Srꩯ#*+j^G,(#kp U_ۢOGLTW k{EGPy lj=ICe}|^}lA`S)A }{_DK+ -z@(Ϛ@8U=J> LOm~_OkN`K Y+Ӳ\Kk}>xo@IDATdņ@68fC( /4f nE/)vnZ|6VA֊ ,Dk˲ȋ^ h6P\j0u68 媐 5j-Z& xط/k}v"}3k^I9V|ob„ \)[= @L&SnW=T s h5WeԩSؽgٽn⫖^ H)'SuA#Wo -r!fxKNE߾@XFBJXr+ǥ}j{I:Y0%M_ֿ??OﯝC$`w/ <؂ y#G{Xj[-קȦ*K`1Kjd&]`- #|*k|` IZqa-dž<2*#P+(ez0z*TQ3R!-uP] <Hkěs0WQveZ.BD̋r"`W,||!דEE}FWs?j |ޫ7ihDr? n|i;Mx7mʕo1O\ {+nu,ssh6𛩲Jx/%W;V7vʀ_y0b"9]9Iښ'i몾ʭI}5I+2Z},/hF"ڎx$H>0vl{ *URnt f@Q- .TҨ6F?YTIJ.%@P!-de;y%1yPٍu< J055[ɖ҄G…,Z4tcShM%UThne}mz}c,0;Twxxi+&pVR[̰^9yG ~@`#c.NౘVtU s0&T/P%А$ކt,2HekL^/\|Te^Ҙ[m ;+ g#O#~9 !!/]{o=5Z2c/k7LmMj{q- CAՈL\_ ᜴GinW#eeɹDU"Gr,n)M!DS{ hGGG!V58؏!66sM,ޒ@A~7"C_&&96R<927ҧPhPoCK GvP@iw [cܑ~WˋDTy.Dl PH^iϚOUsEFTB+eZ!jM`ffJ]98Zʂf1[ YA/\m͎>DJ4 &Vxj}17WkJY1C,򔅊=s}Cӻ[ mssСhFy;O)\PYB(-$VF@[. *J0טc2(r4 R-o2p0a$9W% 5}Z Zؘ[3Yƚ97*7X`fMG+| XLEQ_~A,C uKk9ن4p+_u r ]R=Q-XK <.h_)T<9n|'7J1LK ه JcbRy;KJ@Zz>rQ[W*7\ZG.}aJ_F oNJ>K0YX߅ @:G<&ҏiܒ%oၖX);V MM/2cN`o= xig NfBmm&LmǮpphjEM5¬Wj*nؕ@+[PQYE3}1‹5iw!/]NFڝA&HMAs?hΰ®Md:\j1L!(9o"lICmʕ@x'жmWL EVYr)_ͣJzlII1N:+>o1X@5f.$@C|lYPϰ[aBuE8 >]boobΥGLǬn6BS[r97~͊3L̜R.LǶ&>󐚗cҗ]a?SI]pו`NO۶/:v`?抈p7q7鿌7=/_UJ*USeUc?Y98+`͚c8z:i(/aM]q_hZtG bx^hWCO,w9s` m[)]&&UK@'V_: +jE2n¾۶'$8>hCAS2 Xڕfsr8@`v>>BI]LIyOxiox%hÆk0v[ ef ,ÃQǾz ۶gmg0lKJV qw`֬% OS[jܙiD^J_A\l| W##3^h C"|aghEP+ P{SɸJbHPH^ q 1 K'o@Sw  wBnA/&҇=їU%Pk ֢S3VOܳ|| ^iC4 )1Kǂ[qH A__x‘Mot P,vv|}Od1&Ps=6ŜVu˕h1cT 2P p=il;Y ;ί?|hO<䋊o1oUՕ-a ϿbÆ4fk/>\sխ03޽1s1$#[aXx(;ËÊЏ]ϸz}u|: ..ۺmp>'gcXq,r'h#rm^>xs@Ҥ@'#ͳb~A-JD&Զ-Ҕcx_cuDŽxDd!xuQBwU3~xoɋx g94k.7]qn̩[@M% Txxu6.Bi]US~aGɵ0 !bAl7䧀 ,stApϠ ^Ncu^|pƌniSM![R$60Y%. dZzX(_lf/GF|.GEВ 2&>jpoM2 ~eRc2WO<ک#;곿)]9]+ k 9QFEEU٢6Lks)U Խ52/jY Y5T+WDn{b#[w.d` >eyJR?fIp?*9F5oX-=v . Kه4z!xQJA=T%P!BK I\@bËe)Ӣ%53zzKе|ݒ :ж+?<%?= p7Zc 5* x\SjZ^H@뒼gB_7S(=-wŘf裛Sҏ5ͮαQ}m֭z+{ ?ُƌTQ% s*s 0#yi#oB'ѣ1ys:ScZAiB/mp#BcFV^7|(e+1x44[uDB EkbQ|)[MJJp<)}/6'I># tb`NgV33Џw*8X5]PSY*,&Ku!N^N`UZlx c G|;nvU1e2ux;<fx}845.&.cĕب(=? OQ:WEɵ>Ju[d w@}[WkU<6s)gxpjH r$αaݚ7t&ͰE/,5&WV۩{838D"Ѫs pvƷGvo<Jt՛QI՘~6F$Z6ZzkL/ L&6qz+jhr 1;3'O`uLeN/KDF ǍE3?_`3MR&p|`?Lm(a&kXegTc$3bo||YE=%XFxjBj~4ˢ G.Yg%k[U9iwdqYL۫Q |&k*6$6];[c8I;F'p#PeD.;`~q:m$#g%PѰօU TOV!d >> >e22 ; a4U$3X@+z}䚤 ]ƤWQGe~UwY))ix:gd.RW/wNL!= 2-/]b_UH`TSz8Faӱj<9m4 ݍu]˶<O)ycDkĭÔIclQ;&nV;a*-CqG×>lF]|mݤ,ßw+WtuqJEtGjDZ8vo'vMn>+*V$ to/T1W7Lke2kƛ>ǥOb9H%e8<4B$vkڲ7N>Ht~a0#0ArR:8C@ 54UPWc)iȤS@uKV͔ Ċ`C'p.+ҁsewZV01Y(,+CKR7HE5EFT D. UśAN2WJ;-WniY~xi9{EйgCif1og7v)ҒML@tFGP,0Kq>םgƤ =fasp޴gRXLf2=,Ў\joar>lp}閔M5AFV:'"D:+xN=I q6# VF)73㹞ݑO0&V%`,>*K c6-+޸X%@|0;~_\gg<<~:b1quCQYS>OFAmavְԔ#.fn;ϧgg"jgA`Q]@[?EVL~9nic|{.| G vjG?sPbO#)-6{cw.AN [sĞ>b-pNNARj=ݔbZt .= Z,R ib` :v~¢<::AV>sEkʂ@D&Ņ(&hux󍗑m3-䓧[r)yWrJv8c)|Oo{FEy;=`n[+S=%J;~\6J:l8썅qY{qVs&^e8o'Z.$X{þ}au*4C9߼4&ץT9UՖ |U5?]8DUP)%Ĉ E߱SWS=zRbqa?Ԏ1O[h|{6;O-U pxM6Xqp? lՄ  Ct^9J 1e&xkwôpW썋!vߍ2[<ٯF"WW%^\:ݫ#p*.nIy;e %񢜖<ߕ^5y߲m{ F=>͚C;[ exг_< y+N*v`OڋaKyjwػƌvR|+rMmVa2t9<8?6?`gWI;SǵBq7 $L#^Xb{}Wg;1-XvnrO?7 }JUMŶ,xc">22^rkp$33WZ):]b"-1oI<xë=cV43NO?F21⧑CrA =CAM9)u_mclSZ$Ĵ 1ke}*;-.+-Š;QfCK5.-,f=^ݛ}YODYd#X|VlƦ[@M$p3ךԧItV|ȕÎ_8X~qf2$`8m9ۣW[s 2u3:{&+PɦA9"4]@NN.^.nɨAN:^8=LpAH:9P Y6~aH>{vsn,4CrˁFz7wOU~SgcW?@xtW`pvU6i-›\^<,qK B A~)&NOKUCQs"kD|)[S0w.Lx|6lxqo?{;!$Cj~q(&<%z6<7Enp2I? Nw~qeֵMXعDŽp!݊6}KM1O|yؑ슻İGǼ>]ZlwE% ^njW_O&?Mb7+>g971uʃh }U<gwRo˛q#ٓMGWQHpd"nڵ]y}ٹgLj9ڻޔ3O@wb_O򰆮Z2S @f d9D=Z!3OopHAog Y _@sƄF-mp4fVbďЧY+47|-FF*+Y3lٴ R|Q$$P}]RI(e׷'89\F:2,m]󡣸l.K{RE PZZ\.*਌Oz^ <҇}̉rIgyc}%^3>*Qi4ܩUd&\Mdz˸HKX+̦&Hࡃ[0=uxq7xX"\U><%g9"/y.'٥k"||IiRD2~FbĚ+c&E^^GGTx$LzCK9bs3jM]ggg綸cTtx Bm[~tM$ CMy?OlÙ\k<>9SuPO4XD;j I ǿYރeeaZ mD@cvme6{ ױFTj2޳bS4ꁎ xlUFulѳw%BH $hpww+b-RRNo)U@q-wQB n,` 7vgޝ3w60z`N8{-̢d5  &йy]e],F85d.H̩Ѓ?܎FG(6CʑG)'?_~AӺ74tƆ_":SUYv'xݲ@|m P.D04Vn8g:P:Cra݊uȧrI)? &팶bĠ~0tPk nІfHp2 ׍Q|]+n;!e'à3~sI\FzӑO-OCFE`eԲ渄* z^&,VsV}ux{b9Hm>ueǚ;+PG3 L Xg^1uvɔxF15i6  ̜Ng0r>,_a7ri9Ф}rgs30R 'Y ?_tJϧsl|m' PL ̰hNeRBWOc !g (:pQp";`Y?8*Dalؿ (.q,쏏LJwbP4n\EVbbjK YQ}I2c*G."YvLAz{=Z xyM.\ճ3l܅+V"$QSzȹ%/ ԳfJXq&\ݐk ^L?VNq-F ہG"xI0÷?锄߻BX[Ę>Hw2r0&uI=5P=.y.z_ߏwĴiݸP4DI\)N;#hi]'ahW#.TG\t6lZr#Ľ~/ ҜOE)R/uI="7JLQ"/XJ@Z$g}n)EE`-.lߵdҌbF]K3LM/fwB׹"0!PYV &YiۥG_~~!Ơ.5hKqa Sd嫒~k](ػu 1y@'>V +BZJ_kԃk.^j9,'LH ="RОcԃh%Rw/a>٬Jǥ)&dy\̟X +()e=^}>ã˗㗡CQMM쯃s> #٭mB/_a1֌b*AQ;’fRZsR\鎁W텇yk=/.*, DCg3I{LO;ǫQfH³f;6|mT swdJźϟOl:o.iI8[-;zs *ro/u] KJx-/CjZ.fd. PmY0oq]G;ڹ"P 7J:MSߑ/A9>?f\i[bU]sԤ}~3*JM1dN1*rgA V e(UNM|Z%٧>!~u\ AATV][aY1th庩*ApD1X&6;d,?_yսOGʖi@h ) /4e֜Ϥj5N|bJu+{ROHJ|KlGAbœsqE7(·=6wPRFWVAhlo~Y/\D]1!+$!2u#R މ%n!7 6ߍǔ'~b6t'Q++ 7u[tF:MV,s{Ky֩4OK!&<?o9&fV7<&tķY 4LW:hyUK~o@K*4&KebA 5oAn +UdM*<ڕŜLf^xz{H vѝWw.¶o#,ȖEԯT*\WUtK#TWK~ J$w)EEYZx'Λ mbE|ltʝK}X-T|6mێsBO̜ٗ{"W$6nM y*"e_yUvko/gwEP.zFX> ȝa)=$L-Y/_t, h?*3G+/nbazQߌ=h5i{-b{gÑgCU'A(*èdXjTxhDMJקЛD;޿ >ѳ:8o051ۜa;꘿:m .OkiJ]W)?}=jF&JCv`{aKPZ$L駡44';5DmK6c$P.55Z47ۢŶéBg3/ΠkN@ԯ|/tFcf J`qN}f```{!VosAъf1`&.}QXB~Wm4&w*[HtѡsK*AލWkLy5kVaPHȰX*EyEݮ>MX%9VV# ??͖t7[k2xU5%WB2 U^f\k~4{]3c e%lHee&alQP7'(mWD7hcO|o޽N4߯!|<lj;pCj1,(Fm̽*K)GlH*G Ip8piSw2 K>fUx6/[]~zjW|F[;o.+W5زr"͹VuQhG&o9ƜwT]+;JJ1~#~ ,Y: ݻ %6=hw4keYwHAW?<Ԡ!PZH#Z+_ޱ^΂e)Q H*1ޗ_b(^,vxdщ_:7- :‘H/ )8ʍEdbxY*:'TG. -FU4Rڔ>Fjl u0J HYH@+:+O *]܄H|Ԥ(4+w"KEа"s:E*z~!!QxҒ yrFi)iF uЧ*9EXSSB:Pbh1[|I &j"A[*qѧ%6VlJ*j(Iê|yd*Xય0fLhPhQH+OE!~̥ )_?RBB9j()h К>9rc&If+iSi[a|PtUwKUBRLMU!޳ OVCu);55΢X{ qΎQ) )*e !"ÈYXugq"5&2 ƏmMה i&EF{;i,*$ەV^+/rVtݸnOa_uW4.;N=N[tC??S[lmhAc.0+ 2WBTće]b,l?-qg>Nhݷ6}nv,2ctu^Oҍ~^ xUG_/A׿P9csc1k;p`{ls//4aC阵STI1HP wrDVû"L噽 yHNft=| rQH2OtR5@oJvEZ"ѽ]ˑ 5ϖo\"cjDig`AlD(SUus`,QTL{IoKƎa"IIHeP %[(B7W&*Z !||ݏG8u^4c02QQSW"xܡk_;G^N^)?Kf}q}OGg2&Qnh^ǓChZSf=^>U+>,?/ZP`e]/?HJ%zsG'rKe!ʅﭥ%wt`yHzifn]ömt_:Ib{EX3:#>1C++EJ1Q1F+[{qQT>KKIlcޣvY t<{ͧZ:ǧ1hZQ&IMCxz2O'%t-dN~?oO㽚4.lHB;۞XvPS $-gЅ>oyJUy1 fQ۝W[-: I" rsiggKpIn!zFpǦy/J/R8I,FCIEU" lҾ ΜB /G5r!AmKvhl)zz +od@hUBPDKvv6 #dܹsHO@zz:]ó4 EF{)#^3VocúӤ0LZd[P..Df#uк܎>kšaHK {@v-}fKKsiWK"=- YHI±uLF +Ѡ«zPݬƃb]/'>>uO2:77ԪNFgBÒRI8pۗv׭F*0S%K*t&!iFǤ!ﲘHGJpܺ8R=5I??g,rR$!2zpkҏ; nf(f;&owyGhߎ7ޘ>A #RiCxH;C}b>Ehٲ6+*tIz[AW-ZZz~*^F&&&&K`tl^'Y|Vq/^@lX,bNösU Ō{o@Z6wZ4$CW~` Rm/J$CUӀi!dc3 3j哊˒ ^Aq8^K_4ї[l܊~5mL~ BCkwaExx\!KV{xIfcVUNQuh?dG;H$7xބ}A~zܙe"^VY넯:ys'LER+1 Q1G…{ӏ @ZB6+*lū Oz^F1OԌC |p?UBRRN]{6;ƀ@u!U\]XTd?CNVAޏM*Kw= uHݔ tSWuUzlz%> \ , OEdT u2L]s8q2gOOd)0f._JVTI-輦0-}'ĞMIilCݗrUOl+%6AZUvGZlX<&]1>hַ!nlEjkx WF.)MLH޽gqH"ϜRA]PL|X:8XgcfeE+F]4֕6jrT6;SHJz^^~t1:t{i9)+mpĜMcԤj9XΒPt667Ӡ {~ ٺ+Z;StJS%jD/A5JjTyw>x">$48qP{SNSH ,՜Qz=_4{-N٣r |AMgo I@ cjZ>cГT~u}0o'kY[u gk5^d$ƌnMCrw$%81 ڵ'B!%*Q%~&' d #x3}7S߭ѣw=Z6mZnLl decy%mJ;tȝ>[gExI˾(pOEuVUUvhR{O+4 #ţ9z4^R7_"vc\&>^h쏚|a?Qg0.徨[Iet7 yQ\ W 'xzGQA:Jw*8+^E$ڜq\"u7'NJ<-RT$gևOna}HI .j*|"kEk4<ѫ$Oξ6*pr69Q$֎6K1KX(*"M0~2{VA&>lٓ̈Nb|3JDƣ3I/Gwtz`O`qca = b̿ݣ_5 ~ߐ_l0ШyM'FtNtToqj;jLI͚5PUՋ7U[^)pO 3D@KOcK[u_gR.Z:Cyl.ȿ#Vdje0vd|)&Idr͚wUk>erTu 5kRH "zԇWMwGc&Mj2t]W%y_c'4U 2JL¾=ضaCO_RẸ.H~ ϳ\Sr6Yg/}~WF%eʕWnP.ۦ/ c٩ȣ%^,mo%9X~}'M^}BYi((L>Vh]媤~Y*#Goގs9Dӂj  77lWօL,O?r/ Kz=UY/t\I&q1c5'MnPplLώ^=btJ&8e>**Y>X%v uR1|zÓѝH;!B{پ~0 &RƃMѤL9p G@cj Р]' Ь;o؍D nznP34nܱ8;&lơ~ Hk8.¥v-vlG{J2ٞv:ލBNA%S`jzޏ Sp^h2VTոU(G .hֻ%jzA'm<*mB-tCFYgl-<I`X9ϔcuMC23 C]rAy1Pߖ$28bڰ~N+ /d'kcǷc2VjKlQ/~O:nqc)1Lsg8&ѭ' ۧlLInU ^#jaxt @g{inj6R&(P%0\^΁۸ {OUJ4p(>p=,Fz6hR7NJ$O?0L,-Քxi|^X}.Vq`2:ܘS ^5LuXwƧ_]N8^<$%:m\[[>Y2[c-H`X\P {)]F}Oj.9`o"бv&H=}ϭ-p5,pE u%0)#>sֽh"Ӌ9 tYi3> m$;q'qA^YF 7Y<o>Gnev1)@~4 ,܇1^aѕZ@/U D>C/g:How:ob68t0j V2dhMEqC!Ĉ`Os]5o@JN -APpή&ZP~YYyD^Nj-n~ƳBѺWs4튳K5PmA;[9 HsvZypz+.̉VEg`m]S/ph7Zj [td&2(ϳ 5V~hEEW9zmQ'h?;D~X=?Eyuƶ1k #YR]?bFhͼmzB.0InMV뇞Îmn@KɻH3K:A@~pU?v5Ka65psfF&syجfd<'D# 5]i]E8Ip[6Dbz?@N@D=jh 8~}7:EgJgvV/[ZĪ@mf;a..`x8(Nm=Gc&a)Ѱ}z}`ظn@z""(-o^xkqMcv͗K0}ػ"7ĺFgY0nlBE.H:X IeGOSBRR+y]ұl+(n7a0jYa`[r5*T.&%ÄQbdLмpbleܝ-Q-`E7GzEH}O4^D J0Ő垿sDÿч3- {mX[\cr'E`B>+ 2uzTC ݸ鄩쐩cG$,ߨ/fmLF ) "*T*sYib4PHH2-v**Wj]._#4~ fǁgy%dV\B>иl2A2VXJ{b#KoQdib, |QOl݈+Ρax` n?\]i,eE)*/i0]Vbb^y&ut$ٓ۰|Iߛ3;!id攰d[S/[!r$f"+16#Wu%MLrhBtGGVGVEpyjd='|Y7}fmF _?ƨMn4LБ`Wv uq);:/qj+6ʡ3ӯX`?;Q~V~Uv U3}3xCuKd򪖉^q(yٷ7ZՓR$cJ,BfJ%uee/Ȕ_SsH0-4l24Ap|lDaJ8Kay3s`fn-3Er%)-lm?/f+/>x!nj,ίRv/jX t$jw`d-Otf,aХD.*k!"WSGBvb;K[' NvvTĂ./3d; VZ\j,ENGkM`sWBh knaoQoZU Y; LpJ6 dШ{noaM3ڷķڎԁ|exulTiHcxV_MRz >Ey jsqFaGbxoDZE39,ZRuMWYu'Y-ûݶQslBzZZ4cr͘n?t4؀F[o{1mZOV!x5Jr0k"ȇ II8|$ͩ LFs*IˠYUi?dxxvT=-iyb2g6ѯ48#qVfmU q*L$ZBD |r荀7O#4F|u;5F}?5v> &8'm> h6Z'5xHi^j6 @ҐRlqGh6Nm Gf C7:@XOBgEd[.yߦ=4:|9Lmj.!m;Cip 3MM;XcoMD6Wp"~ :-ͧgBE@M%`8攀a]p Ct&+jq8+Zrmfn郴Jz{/t|q<ꙜWtVkSxnU(b蓁FwWҟvSD~)<>1KLoF~j?֭M:uH.mj%aEB Ә~JZ?jܼ,~"g"܉$V+]^1x7i8Cc;p˸SPxٜxXaZ]fHC~sZbա@ɣzߴsS\٩| TV\RyeR0xխznW,e4@+˺tE]r,,[Zxa +ݎ,;d(y aO*jEƘgO ||+neLdl$GJ3j(MЮ] ٰUUj)Pou#==IPvx-VZP㕗п$.L -=O5F=vEWD l9)x;h4nTQ>++O< ͱvT3w`L*Fq̥A|lOPdQmmKFNlͯѯ}Ti;5=$1)`r} M{]TPuUq꿩rECLE֔ʯ@Y(Wl1Ƀ;GUVAnAeˎGiU|VDA{Wsi4\"g&Sط/ǎH`E{`#g95'MGM޳/W.&8EӨW%YF  TasQn&s`q8OkYс}IARLO#; %G!=0.HZ)v!1 6%x֜%q`s4E.S IIz%%@ vRSLҨ&; XfIGKڢjp[ [ry .j&\ܨľh7:Æ`tZՌ׈jb?YXt,G"D',阒5gIn$$H`oX9Ñ4A-ұfM7W4)Ő;b~]FDáy3g.Y,*)I>"/( 437q)8-PB:B{7V3.Jz/7LJ@vv6*Pwwg΁-34"<#Ax 6 \qCgeeBa5@?y<`݈v#gNAV1!K+ӼՁ z#/1}dX&'ٛ ۷8?LMBĬ)?0vBx Z8`4k,{"V̘[21IМ_65Z + @Oa|\?kn!۲")* XG؛oOF^A| E#c)3igg?hOR0VL3汻ዟþ~CHŚ/@c1ꕱl{w1wuŲux0p >4.߲.\mֲZuVř϶xA)lҖ\i9,傕\.dJ-E܁ S"N8e @LDF)^+%\ h5Fݺnprr(7i0|xs|z,]z;׻+_)Mːr82DScGbq:C:6 hƣ'''88,-L 5[ J! '*dv*xMUf߯!B{Å!.Be\Vxgx~xٞl+,ฬEGQQ>ΝKi_M^" P5ɑ2g"I[M̬.,@rjt‰+@US)IZ2v [ú# ]1t~ Qh2,utcqh4xh:(y3 B1L=~nVP5jЖyJ㏀g B~ KQ~^{; 9fIX~.0,.^W/mڄ#թL)cm. п 1ᕞJ?g(%;Ipa'˯ŗР'F E^ XǃD5-îJ?8BX:Zs ys 7H?Ise n "9EU*))2^gpއonku *d"%]rMK.g~ǹTe#DnBB$=K3D{OV=?uև_MnlIkuȨ8EEGtv s>MC=1bdsU3d_v7CqGYo/w幼ffc@ՠ} xZYDη 0qWS/T;!,udyR؎fc"Xs[" k#76ZVPS˄Ƿ5]X6ۚQ`X_*2"-J!?x)/}/zpk?^Zp2Ȏ{B\2h$Gɢ

&jRXFZ_l@` ε@"ߖ'}Mu]:v|T)Gaaj2uwn x *^^~nH&eNX_͍O7t^ D,]@0sl¸qmyRБ._kme z+q?t篒 }QGXBWRԩ "x !`gX톗j,]]\ֆ*mbaaŇnW_#駺Rĝ4a%J&e:ՍnOOPM;"e]c+UGչYxUa!O '?|:9/y/cuyp$HksCSUOJJxbu^17Zm!؄uNѣ?35yi,hhH7Na]vxFN Ch:!!ò+Ȅxlb{uxF}}b?8t(\ nVg\ƯF go2NdPrUt*JbKbuubAn;U4L*.Aߙ0fdb4u! vdJ;r˨jo4JRBqЌ ,}/RrK0F6HC_ Kxh! > z5nbĤ).B҉sn\8Ub 1wPXWa#ؾ)xF'iCcy.Jbp$ }믪;O@JE6THh4=5k% І F=4YpnBh-A^'Ғ:Xy 뀋xq"{KcIDcXbbL%{(*vAEQ@78=;+[f|s.]?܄4'Ҋ Ÿˇ^Kq(D fJnXkkn8TժUIeS^{8ODQL-Q46Э[_jJ(BT@HebSb:p@ʧy~4Yl=0aB 4iRtLNk*^4J;Wg4lT\jχkZ">ɻ0oұ8׆Qx;EВ嫗}_ymCVU^E^wnbd9~|k ْ1IA3*![/`S `Νd ̯`Y7SZļnA}Y,ʧ} @s@)}eL *V*@5 m3Z֥96+F\W:yE

ioFOqlѿ8Ds#mƥ{"dXX#rǼv,ٝPa }-N6?bJ/B<(\;u[pwYCzHd_K8=7'U ѨQ\j#DE(hreLIlEuA- u,@a@IDAT݇x~%_ɳZ9s -ZCh~BƙҜŵkرK sN|8-|Ԯm_gxYگDGБZ [>pKGʕ-3cAΏ]6Q%S GB9|yaW}ypt NH6ןv/\.b/bRIŶ#GO{ *hߜbOX/5(=d~OCޜwEƌ(|:tCʍbN/+cMͩэGYt_3HO\3t]^MpuKiγ{M\Lolp<͉LVě=t%;Jż\'HCo& YPg=nj=>s"F_OC)]p8Fc쀞a5WӫQmTZfyQ{b,'lrO}=~YBaZѬnN9XlѢU\ Xr+PkoZ|Y^G,л55.c/~'\Q3m PV9, GEWDgE*(?ٜP F6h@<FNjMBU7Ynn Ս6PG 3)n ${)0 O`IՉ4p-Q&ъpjhD&: ,< V + 󢏔"$ԯ_ZO$uYDV4}<pֵDb.vԘ!3n3GD|%<\1Ԉ؈`&hBx•(8R* 52 Vr!_<(&0%,RrU q;}JAֽ:5v[Rll$nC7\[ݏ-I`.V{ ~mi;)N7%k|HFX'g0B0cfȝ(]Ӑk*Vfwʆ,K" ϒݖ#tͬ &+h.K_ge5&1dm))uVY. TSayB49Q4: 5ne"t%})gqd۲ټhGZ8Q( wXKw3֮x&:Y1c/])H䤛aWH.(QzK/EUѨiMt]kރ`⨞p߿imD$9;5N|L:qg0s8ƲFIN3\峯qf(&DCx^cʰ`!:!rpܼAؕ*Q7!שּׂka:"^n [tD3B8FlW'3\\w?vf&Ex11{E䕨R ];a&}>z5VQH[Dqf)]ާM{Vռ<'ɟ6%=//~ƺkW#9,SWzY=V3z],V&6hܹ]#*&@JjQ[GM\ =/6)[+W&dYbzR&^A2 o/zۿXH#"{vR~$Ûj+ըڮw -QPF"C)-BbʡDaE VJS󊋭h%Pf8edH ?}76-{hcKz q_U;`*^ S{OWIzk |epH_VNkncëo?2|qNƙ7Igq;^S4;4*c"P-Ta(ʿJb7o^8^Nъл`a78# 9J(G[@ )~#~g6FO<ۢ@KQ8%ک>M`$>d5*d/MT$rD/nۀi?*b.Hq1g<)v/y>Y! yQ7&i8%-ϛq@}ca0w94RC;qj˚r# 8OS(MkDk㘦Ը__OO)!edB<?',έV~zq .!ln'&^UHkxk%K1X8g!v/ۋ ʡFj(Z(Ez _%HW'11t mgpxq>~qt¤ֽ ZAG Rx@jdCp&)4MCW,bb'0 ݻ~ JBd|Ӫ8mݺc].+*ޤ;9Z+;^c4KIRy)Pixu \T),{=Uo#߁`aKTt |Nb-* [Pڕn) ['aS8X':>+ZI{}ܿ>I}| ƢwAShР#,姍adO[MZ%c)0Ö-0ǡGhKld5ܘ91NU2ǎIn @w(jkRȨV`6'oP1%9!r%P>xB0Yɽ $ULhfn*aFwDV4yqp,o0%"#Ъ[Syg'T(/LQ,N9(XߡB\hذ8i# GUS@g~,ՂP| 74fl5kp5ԾC)jcP~j\lSOR~LG̜5 [&_'Q_n@?e^0}s VO #)Y)ApxFpڃ=yaOCI' _<:!q9xWƚ`0g+#+58<`L$x V =C RN%C1)L.ԭ[%JOҒhDu-sۻIFط'yt(:ʖ-IpmɍlODd8f0`HhfddD-bГThS$Uara҄PByGݐpt176EGX BGc.::b1a\_PLhp F (S <)ÿU|,|VH +iQΠd,gw4yEtT8∨aLH jU4g?i Jtbϛ'f9kQ Nz ?# ƍiֿ!Lwt2~+0*_Ԙ3!lF~Ax\E'7bsWQƅ(*aXbdlΨ)OlڼcFNsTYVKxJ̡ãi>ew-uϾFdNGKd*Kz4-?!ޤhͨNA?NkB[("iؓK } %;SjI-RŁ  vpvaT+;d65S[2%/'@:D~iS'{.Ͱz ;f)L2uv%yC04xw>CN5K y STFM/fJ%HӘgΜ9N (8"*ouW,_x0~Bkn옌2VxQ;" +s]ԫWaMsbYĶ$QcѴi%Ya-1kͫЭk_1ÅoCD.(L J_i'Ў :s!7+cF(MA: q|uFy.$(j4m#"~=i3K)T9"eTHF̈́bibHsJDo(NX$ұm5ӆ=[6{]&~.2f)f#ƴi y] oĭ[^Dqi> _]f={I SK7f|~חGOtJ)!]+SF40cԩSb ǥKp9zoFg)dK]<lj_E{_B@^< )9ʅʢm!p+Tn%J" db՝3&X!u0Pz!T^vx@NMn"R$Hs!;,DВZDe(qFKLFpr͉˅[ 4Z:~(pLa:U,qWT%⚗/qgb mu)FEkK-mlh5۷C`AeS}/%jJթF<Ԍ& dYF% ?/GqCo!*62f M8f1BxQ3"`dDͲ UNU!wW4l,-^z3)(x3UweR7FqIC2lL4$o͔D:''{£g7 -;O@MV15ݹN(ӋSPTE~r,mCy|J~/Mz~SD0率5::%_> MΛoC?NAM;fpGW{Dr.f1Ë3KCPz [[.y ]\kv 3蓞z7uX.^qV[|FWޗ~H# $|QvzI-aT{hշ*K42~w!jK$Is[{,r]K} 賔S jc"h5728&C~.^ 2uj4on{)uoݏ\KФ6oK{N)-u9%/Όpw%JܙFW C;ykֺӆ -. ^i3t۞ܱKOyIXӗ%Dx#xY+ɇJ] 9C**I%U21DC.V[b/,0^^%!Z.'cR tܮ+Tqÿ_ryQ$ЩOh >:GI"t#1P&Jr"u-qRui3I_o~{~I4ה"Z(ZO\J]rfL9kϤӷ[$38FDiZwc[HͲ¯¿hGal1A'2N5~?]!Ӟ2)&'UY,Ĺ1ȪVOI!ÍW~k j'O/Ʃ{NIN+(Q|.ã' k ;S@mA@BR 1>q/bմ5FLxKpV NVp?8Ht',(DDA@kȠ@x:p˩ MV:Ux|ۃNUyDRӣޒUao=p#0t 71φ87=.y&6Pj f> b`OPy53HE4ؐP0F`| %׀Q%IXSwuaq +&Fَ5m1A>Ch. V;t+07QD $_;8SF͍ ᖓmN7Ǔ8tA  #wBVXV]׍六*n AG.;ʹ$ K,t}#Zƥe`&uJ*GOLDT9fh P˃nr{e*UI |~o57W?]/pI,4WnOf*Q3_b^Oz[!flr3ɭN3q#[cPT|Qpn*@B|.}%BI/is% ;4g`B("C(sFM`<4#hB7U?pi$^[wLOZE_ς/2>%M~JL%kڧ87<`C;t4|fr7ܯ⑏?e` ߣx n_G 2ㅅ9#%~0\)4.moXۘ+YaTDg''VϾ]P\W(a_pU ,{f5&FciXB^ĺqjl9 c:G nYODpt\U}Sc^1fFm }S-M2iRsm<5G>jQTJقTl\<5R *r]<9h4"ԁ*.<]{勖1 _оm4Mq'h+*V`$bİ:&2A4QHC  :DcbzVzCQ)dz,ʸ۲'PYe|rƌ+5 /SB#ęYV9Z-G=Qbe܂:kn4 v&0ssa`t*"껦 !T;q2o_akcS`r>VQu9?%ZL(AaWҖD;|`htb6KrMqjf̀߭G:p appˉps4Vު5';qs0O>R9:Gk4ğSFahLhp~0}Y5K8zh<.^sT(`u2+ ".jeo@8GNpk1,.7hIS8e{.@[MڰyV1PhP%g|Q3)L9L=%>fkrFZ8܁+׼ sHH8\ӮN͚~(7;6wI43KV윿jV}dNs4ڻ;, G>@2v&3sQ{.0Fqll?-;|!+y~S\ &ήK}R^=z d& Pf"EN8 KBK} dݣ̑cBt7هQp͗&~mQR" 6̏͋!Opd;E5Y V°qpr<䯂./29i>}~@E8EaA8~­Uwv?xuSib}"U05B#z&bwpHPWh#tp)ᓆsHLZMv|oac{a6:9䗴>P@;b` K 7-ϘA87̝6kfuϛ2!w61Ɂjl ? {`(ף=GM`bBo=Ccg] ᇕ"ݑAh'e{|I+6V{^<1kvcn8kh֣ ؂ >Ԩhs \፼Ŋ+_ dú mZVę!}ٗE^[BI; G:Qx9Hto*詿UOwI]RSW)&棤3p*UTD`6Ԧ  lx\ ȡ8­zyݿ g>ɫ!pP;dw Oxx|7V- ٰe);m. 6Xv#:}R֜PauE mלr~W}jKfVYf(W_p\ZÍ[,OIS@nuhܸ5eР&:fFzrgwm^P1p&0f:Yx*LJap˶&+i51 x"~u$ap%YYF?Qda-*;vߢPҩe{+ EctF+Xu. 5WfpZdLɭ^M6,c4L"E#<,fF$ ghѠgeD1zKV^U`dKi Ɔ~Ub,n¤siQ@UgNEƵhX$'tuC>:+Vy⛍1G||xM(zZI#1Tm-JhI;GoS*$9`biI4@ٞxڎ/85s*B_ބd2EnciJ&_rNٕ늱u0}* N!v&"_D(g+y..D?b~[Dwcj>z4N. }\ԕta2^{5s46 {Ǻq~_(͈|4/ r=w:6N~GNjg!{6ϲ+Ҩ)\r<# _oij{*Ż#RͿicҽh:v.a부rkn=2oLZYeĐ*Lmk /8wٲcDl޺KfčqrR{~&]~^ظx* k jR.ѵgu\\݌;79?q37oDaF ߹-m _Rύ xxnё]¹i/Rl8^9 Ou3,싫D[f+sgd8۝pwAAqO͉C~hѱ9Xݒk `q@'PE{:L;M@AM S+0{B9pѝmS&./zp4~i yP}wXEWQnڵL# -݀"(G&V^\EмO?d7e(^dSq+h"[E%pހ+Z&:%*_Bʹ*Z<#N]b~4[ *X%u*po\~1sTfI 3jC E9Nhx#ThQG.|IR;jn/õkkץ]Qv *KO1xD, l"4^եթD-|Ώ\N䭩=IW |]tK"Nk> B/uPeoj4Gᇟ:Ȼ߼q8z5Tv2YbL41*+'Rʅ BO|÷EQ7xط =k sr( >)Og ~ o+'1i.|+с8B"cyx*S^>#J& ɳ楽&i֞SʑyMiIaQ4>L5uA$eTq[T%i֛YKäg_\O"#9Ie?u| xWv3M ^~) 6iT{N=7]͉+4&]b$ViPM#W!,m=‰d#壛Y-X9N|MK{uz |kx?֞0*:o3L\E2C-RF-I^i.'A_"%0h^[I9j}LyӞѷ3C:#Qۛtzğtghzu庌$E<&)}䩨0̙U̓D__"wM~yܥccqBMu>IN{JZJP@/9!)aהjm+Ty\^;I a@M(IꥦÑL"Hi.DfI?D%PR ]K9ԓl)ϦWT|&<5PxGZ >#@jbKzו#ԝdj-(9$niZg{mfʻWM'1 KM>)]TDSPP^n.$UhXnxxٓ'vs@x[ز!x-^DL&IJk0ǞW8ӫX@ACSfO d7ᡡ0-7"_;sFѣCx;N:jʢObp|vyeM._}:w{ՖތyK}V;ӥ^t=P`2xf,MhJo)&Ӷ2S=s' oބZw)@IDATʥ4_fZ'2iR+"ό8Śxghh)ҸXK g'ؤKiLҦK E  Z1IcE1IJL1sqjyOi_S M9{cCc[׮=ﭲA_m߾013KӨH`z d  }DPe<ȸChlt4b_Vz6W'W -]yJ&s2iK#C4e@j|{BEt. !y%dr\l$9PkcǼMMnѲFRBxxʴ)#e)hzj[XpE5Ѭo+XZesVI!!ψ Sj\{Q'}S 3S@/f,uɰpvbz1JSLMvs A2|'9stl/S \]'L:exX^oP;}FLPTQ&>GgW1}"ߺs+KN)x8-qU}QDW3O{$ ǯ_}Ccv|j@z0-кgښ+L@WQ :hr1ގ|?j5bGEdv2$ۡNMg5h&'DDÔsg]P@/ *Hxè2e;\SR:0%6GART*B)ZnS{ާR4`%Pb/-MTObTDW-`I>-TMm"lKR6eq>NaT+˰|OWS|y9AgkJTEd^!L܌~nA:y'z7sS殥L5ny뾿" )ԆfpSΎYdNE#WD*{s nX*!ܔ'`/. . CTCW8=Qx98S+/A ?%K9h%8XRkSØ:OCtH.]<sג(橴}?#|{9\ hBdEVjs^w+FT0\u:۟nb٤g^.DtleL_'3nf̚Y`~h^!lHpxM*D-Ԅ O #|Mu A}^ OzTχ>8/uǞ3qfXl&ˆ60B4Xի9lGw>cp >ORa#iXg414&$M-m)\Diڬ <:QЖc lSGN[bbJ)CS@Ɵ4|?S%7M_᝘/0Ɠ_;0|-8._kgbG1>^05z﷞8@;u$>4G` 8y3|O$cd|CZ Mk0{@W}8hkS@OPS>)kNom"n4gHO h$AJm׸o`nH:'ht'U!8y`+Nyކ*lMCp  ݸjب-MB5#8l]]kO'Lu<Ȭ]nyE"*^"E&="%I3l;wE®ѷc3Q1?%"_# jBEpNȌ٪)^? y c'"ݙp =mn|:ocC5tDSwp/nT  "Q҆jH6$I1sp"o`=0Qhd'o+w`b* |6&qp1F-Y5|_1ܬƖ й/~Uή'>UIn7c(Rv[a +|r3~J+g!!Px6(:3/Ch'8XwF>8>R:x=}t:0c'T*S|7`_! wѠ9e+<8][ Qϻ(J§;jɮS ɤz ܂>'D83'Z-!o2ZT1> _ߏ?I: {t[to#=pf(Q9!:I`rFʝ8 b܏`kz1(ylMFL[Nvv/6RE1<~w#s O`yv, Wn~1Ʀ3mŖ?`Pڙy a7P;}YTJX9V|Lto?['#Z !=x||wã/ڍ|_)Xz g? w!ƈW^VՈϓNxHd؊+7ˆ:Yihe 'vls`.@^Ӱ^s wpf!#w\bPsW`]Ć',ѧ[kBmMQd!lM/AR9;zaF-|?hhW:'L]Jcw3@~Dڇ3wmg6G{`WFkFt) *K ”C WL})\ 듞IN Q^D/zeHς3\FƦzd5V>Qfj[#ǎMvj0bԶw Y~4lXAٍ/=#e9ˢ}Jt%w}KdS'Ijҽ9o ]`aooj-ʋQp<#ШH4ٰ?_WW /*gvu|4踖;o\ڶzΆRslϐөM7,(͛CvƖ(Sg /NML H~%x|Ǖ]uiC 5/ T{3ۂ^  -, '=1 '߿G*-*b֡uڡZWSi{ 47f丿 "PB'4u1Ոx49 Mo䩳hH |/qQ7E<}~>>9r9 ߛYQ<_nKgL:"E4f5hTmi}6*K׻Tyr %RS3o[QM/}t)x?ݾ-('ky2{efbhCXg2?uAU&ޖ DyTdX |17gFBYkO tM QI, n }QOi'e!-Zup jkL>/w `ރhTf_z~72#m{UMv{!!!{ * RD ߊņQgWPX@T IH%}KwfCI @fΝr̹w9sL9 ԄRᅇ&<_&~^ )#XFe*R0 ?.Z]9[wC*i3R-iԹg`]OzK$҆RR;НE~e=Hd[RroKvt#{`ٴg߾k"TK< ;I{A#AxBz[W2_J&|1z7]\S.V7-9 U] qG49 IUTQ /,[zX- NqZV~?ca:ϝ܉J _qL̚$#1^Բ3C7?i)i5Hpе}cn*fH,z]Ѳ`̱{kS솝ncC~3ss/1oijȀ dee##FMMB8f@o/$&-gѷq2Y9[acںa+Z!*Ycn&RRҩq IjmmkܻQr@LDzVgL؁F]Dӊ^.DvH9w )Hlަ?Ap=Z˽3rurAAfN^=`fsg=Xd[i<2h>yS@Gi)i`I?Kxg)i^LzRz֍#-7e᭩!#؍vGb"M)yp#G%rw=O$ܮh=gGtڔ095 ;C<}r`PmOpO̵GF䀢VX!˴ؚ888\wn߽ H i.6l#9=:iЯUTVNwxK9)!Q;91D_" aV{b[Lm#+Q8ŢAcJUe:||޹\YyX?//FVz$lY)o~ѓǡx㙱Wt1 1cx|6PKu07/o6aОZMZnXp2T,]J:PMCa3ʀ.=%'Ng0˳Чy}NDfvaq9g*r/X;gGY:rXZ@m8kt5{@"~ٛA KaҞ6r"&;~bV\uIʽ[qrh"_6g{ 35PXt\ק܆}lq:'?j):0n,xnnx;xdbwtۿM>܁##кI.̻=J"DÃKѯ^1T{,T &Ft9D$cYa+/@Nm;9#H@Ac}uUZ?۳}czշbaz[LJ. M~ުVf <7%tp U)yGS,t-s>3 + ԥzF?[z08{gS;bmr]/EtVt5ʁ*!659{ssh{n6[%Z֨ϧv R)5X\.U(pܰw<=GgoJVLqeܑ}+ ϱ6pR@MaYG!WVrm{&Y!%rkăuȥՐ{&UUJ?^_C32,TU@Rk| _^ڲ)d~ӈkX8矋8S*-*N'O[\}%*eW&m'.(DAp< ,SX5%ޚ]qZw.";鎭t)ђ46سw;ҒϢMpi?q]^<KkMX6%ӖIl]\Eh奵+]ٴdZ}0h?NqB._,Jܓ I>#ye㕴WⲊbp}uKbRXʋdӲ(׋ܓv˒Mq]ӞW)8=f#֯[Tt /Y_#EIyWXJP&jbZLs0k4wnc CL$[Ke1NN!9qydT:meOܛ3 rE ve+cE5YNǼܰI~_Rhpn;J-eZ]Iw7nYļ ;kK>; ϑZE5\ϳdJ_:2M# ݎ4pͧ_.?zs'i칡F r%6"3E\r c䝤ղ>K$]d~;Ww987ҕ ԞVmgB=)m4zг}Z皬tVx#;ҧNPy턮eٌ71@INܳe}k3؛,e&Җ)4RЦg][[j>bCғhύN5ELP֑5M+;IߤA*8iSҊ挴SfQK\[lQUK6H~I+k Ik䷤y4䏒^d ۧAYQ,r%`EʕOHG+ok)I9 MJZOJ}GYS(WJ[%ؔ0m fLE ҝmAz# Pd%l3Ȑfisd$^}Æƹ" )iEO%lkb"8_ZGR(&!X>,~-Lk6hC Xnj ?:uEE<ʡ<Ӷ,S.g`<ȵOʋ nƤrHƦ /DE{hONW8I9؅I)AO9`(t l Ҹljz& wUW٬j:W|AU⣕o2}+ؽGYԉ]qIY+/гCp 0i.Q@-DFe"wkDF?R4 ۙ |@:dHz8fm9!nark儅qwGY,˞~v5ŏٲEm9ڪtۈ0-?9Iu/>uؿy6yy`lP84L+jOEߥy5-m&wBLx3149;9`dHPF&N%3Rx' Ğ т=a< ' i&SfMhҲe@j Gwr͝ߩuӒ6]-؟<D%XͲF?`׮1"E[r_7ґV]?|$w'd,mFX3[Cd m`KKgP 2#",4l$0m|vٹ9m/ 0>~UhkAUo$=N^9t4PpON,;4j}ȧ׮gv녌d"Fq%dȅ3 l xʴ x<@_e|3Cgifz⡬4:IU^)g62;!nlʄr/N˩gQ?7[{2mLBP+|:ʎǘhOJsYFW]vBholg|Ë=_{9=Li``~6Fl1|J]gC^LAf}O{!'q*^%m |H|l!!xg@hAm1\'_rC;!ݷa6` owwnz]4 ұrk|掇7{jP9PqD *.DcPOl:(~}ykOӟċ&Xw 46kZ/qu&}Veʺ$NSE[%1rs&Fm[QBRt p"-B'A-A&G&{4@&% `ՀLjfT. Pa^ojqWK&sr*-ŷ5Ac/e_G}> vD+oWNOVFu;^3'RBL 'hTQs+e0 yjϡ>w[hcnOiP([g6h*Ǻ ЍL0a(.JKoRpwu%"`2](e0bгOȌ8yr@Hw,<4Q)ߚFQf B%^[?̒_앬EMV{9*IHsC_&wf,\fW;+z~K%_B#myeOoG׭iAm3^8(dGE^eS rRKU%㭏+>*^*m˒dsu rS^|yq^J QS+B[aQErP,MHI+K^~K}奕\>)S0K-Kf(+m.j8)AkI㕸e5L)C0a 0i)|=~Fp]k>eaWOU};A)rh?nڀ=HG-W.pWhsyV1J9֙ #9AI=WQ22r9ϲ眎nNu-irK/NpxgX_8+&s> Mp>xEX1m[a>SG0a|9Q8Qʁk6 7\{ˋXE)ZV4]bO$-*[⭡uɸx+P"]q(NWɴU)WI[N}%(EWQe@u=&5XK;7'yK+٬eL[>kѕIk-K+U[^\YCk)Ҕ_5]! dOݻUPFq}e '0XMSY(FWIi)Kk8mQË)W)xk+g}>-NsRaK'5KUZ.iq\%Z!I+**i+M&[hto\7P-ؙm}xbH4f?3Yb򌟑][6sC[#N>7?YDd፧= &w ^Lj3<=o`3(uf2f};4:#8^l D`=^wwOHnmd|z'.Ⱥ-m? WW'p*'uq@7Z|Ȁݾߎxs t9hB;R'ڪ@!Ť ΫtDjRRT1oӞK.4=!W2L" ;}vD\~j"M26>MhVEg<1α8gl‰4tCbj`:mBXHg xxyO®}[Mω|A@;0'/÷{#`-Cpk <;&>YUCʫ]$L4 յXTT T[WF/IZHJR,7^d$` ]aHD07uo8?#xgZ*зi3sS1` F6k>z|7><ɍJ.X}/haذ,݌]5HB`8ˎG5*d5VswDh f rPS9Qs@x o{x|(SRWЭ>;v{kѬw'>0 g֮4Fv=мZ ??/*~cW&W|,g*a@D3̟?&h#g*2Tj֖PJel MT\(;eDwA8wێOLD˂øk*9]|q]VVEKլƜY. O m ?ay܌m]<7? >}+85& lMTTSH:tG541eBr2-l& еNx;#asgV9+ s `6ZUzG6EY;ODkd( @~HvǷYSTشm:qŇYrTٽjyc}8Co$ۉnC4-Vm 8ͻЎ{:`ڍaP9_:n+Hǜ߷ $? b괦8Nav.l w@w衢.v< fD4h]&O?CcsQ?&WY$ٕY rɈ:T[\m%9K9LT9P)p/2ok]y9{#\0ݿ7 9-6 H:^mM02.5ULI6FHw<RTֆچ-v%yy8grОf ԣѝۇ(w.o +gDc||cq'Żm;~j-"b[X]k3xapKt&mPhzrQuP} kz4Q(ə tAk8?Ϸn{h~ w}XD!DDLA10f&=DL_;!:,<|St{0X:[ߧ ~L,ص j OMrQMr, UV1גQ@-EeR"GǗ䞄*WҔLWZ/ x+&5Qြʷՠr.w3^ Oz?c=`QhB{Ǥθ 0<3OibmG?iN1I9kp̟Դ9댘in3G0FS2ڈIo$@L\֒Ջ&RoPok΁;h[r^jP9Py`tSz`WDUO0L\Qw{q (;̧ O/\*JX\qa ^rL-O (2S?7r͐q|/^oN.ڽu xZ8ŸpoSL#"P^y.[!},,b۰ ܭslln/'wݥu1.qfRYrtE)尌mc{y5Yvr9 ?͚:Ж5R@sX|R4l9}!Va7]FFѣG-oU {_V=\R i^Sd 5INvr/̀WeTh *.f YYJ2w/З: 愾*@3߇atDǎ6z "M5^{:VoupQ- kxzѯيݫelv]v:`%6-=zg8 2|ʭI_P6UKl59T"22 ziPC }>_RӔ!AeY_xigԩRSڷ:H+U9@܈0_Vʁ **S'GƩjP8@v2^hO%齒MRDrF6&lwbcǐSxV֭9tC}KZ8Nk]Zʁj xjQ7:v\j&KwĔ{¬7Y^TK n+YZkݻ˭bcB%'q@@#fMF^-hh3}C5gZlV˽P5`Zu΢'YRCֆn)Z~HmΤrN '؟W 5ʁNF@9P-FC7RԶ\+xPΌLmƐMkʛ^ۻ .v DʳR[ $TN/^yj *T ZOht7ܸ^OP{LjsB[M>gxnp3O8Lf5w-lz3 h]oMfލQn//t`V5k1:Bu&_Z._۪%8ުq% e˺ofɕ*tqdeKZ hښ=7>ұ#5j{= m[iP;?ۏ!UtuYQ3B5Áj}]όPiW>ی@IDAT9- ԡUAIU Lɣjr UF =}IwHdžc܈~/QeI'?AA^&ߖk)Ӥ 6N@^'P>eykk]XVԠr:xiTU\"Q 5k)+oF|6[EؠgYhVE yxFc:h#CIa6^yc {{9{ Va,EK GS@O .HaƇyx`Dl#8A5da}u@ 5Hr@uv -?j9UIjh,Zͧ# @FFd 'F$.%5+q%#Jz~ץAhhR/3iСg|(/6\ P.YHk/SWpd_%F2բT\*${ԛun֮sV\eY`c`=0c\2kBJJ]R[$ۍ٫O" D0/~~.Crr&A4;lz7`2Hȣϥg"95I{c='wsiPc9nKWltK#lع`Ā<4eh,An/3i.ۋW 5yus1&S V7o)TknS՟MfRZSl^S5ˮ]\>O_h%V51Ll%nyh76 2փ?2jO_!SxyX#ǮXDdBhq"݄*yih&;웏 vcZ{iᳯsѲ{'tbCf|fF`q׸Q|;pmE&V>-ü m'hUZ;_P(||ݐ >vgw$N$7C+!oIdKzļ<[N`kƸ"[H{*rF!c\p^5JZʁ+ x{j~w>̝0egbOB6~n,F{##) <4v6Ћ{2 8Թ͇c#M26vWCC߄~Ϟ( <$Æj&:5:yPP%YZVʁ*s*FprϪM%f-(HCo 0.c+mڄY3#UO|qc$R *gz=jG;)㚎8āZj*b4 ֭+xn8;o?zyPN'ϩVIArhr!XW[nݐR ٭j.ܽyxemW֫W-qh>TcΜuغ86o:f.E#^k: ]Evvt)hKU( iZɍePʧVcr]+LIw~+E-drJ xk$ >[U<ע٨ARԪk-@uᆮZuߵۑU H;ݻ7̙K{ix3לO쀧=:w!ФI""u1!ug *0[\ 5ŊXHt+G-+x^pwMԠp@CbNHgFRi8pM@jlml)z&13;>ps.zɏ M$*Iu2A;<>MEmG794Q>Wz:cЈ*T Fj z #Xh~^ =Тep4 nz̯->*R - x+Vk?Wx}p }^ᖇcc#q| v߃o-û.EA1Zs[tAfA8`SZ9$d#}wYٛGRrӐ<=,=d#;`G#m]<slFFfQjiujh(& W]x2PeUdWx \2NB2ISȏ maoODHH(#6681􄏏|Ah{"5uRpͮLQ‘ҭU(fЛvw!5%KY4v<' 'vM7ae3TG5XNz}(cz5Ec5x?M[0hӱrL9ؿ l܃ nSs3Ѧ]mLwohk,PkU: (0tYsw$c8enK ||ԢD7nԐ;.vᷳ+d1] *jnaY 儎F=r2s}.9d Ke#M@$s'f3[Ypur8"81F-ЩS'E3lɟLd ]}fc9ࡇϿLBտbq?ҔU|WS}`{x[q]YZI 11oQ3ۻWC "y`ǎxEX4^ÁPꖐX&lX'Ô0O}6Btyx|(arzsK^0S*Z>ICظt 9وLJzp//W4fE>O[a!y1o6k/vKfSlШ]C4LgZଔneW*n4`o63$$ICwv>/+ SyHmV1*UMn4iRso@27͟-|T:UUK]#w{jӅؾj|qG‹/.c11 FN&//7;:GA$q َo?6 B.<1"AL%D$YYxeܝ.~>Ihѭ")| MS>e-a#/ @ drDgf*ud1rKI/`)>m-/}Yшs\]*]%-ⶰ6/ yf.F`E+uqũЄF)\Y*.^Fw* Ef[XJGm*KUE ^|zz 1 vى߾cnEn )La;R)D7 vlӋ~ǨѭvUn рϰusH R3|&MHw!DԲeXpw oC[UB`G7J 2ZdEBldt8Zn~ж7jp^.މ>_!pĕ כ1[1ђ6> ޖHMʀّ;0椤!+7aQ%A?P$ q|vlZBsξv|+^(*/@y Aaʤu8jFPWqA2y2f`­p 3WY,t (A(t "0o4''Kbe„j:5e jikd7 ޘ97/qS;5S)&2d ,3089+vmBf^l?jiӗC[aC* bSw7޼g6u dnyн >{mCaHT|(}0|F N<,C #~zI9lYv2~ 3<w&8z><>{hY0%=nƁ»/+o@jo=#1bpiB+%6kfFcѱMBEYC>&iifER<<_-Ϥr9$i{P[\Ct_|%V5@֩s ~YO?i~c(r Erˊ98:y&hֱ1=gO'c ʆ=6 %][RDBW/A^t-܆_?ԟqPkU:8 ^L z;Es@Gd:{`(K.w̟}ykOMƩxr1 q,|^u^ethSr;' yQ}P0 G@>circpZb"a@>l!5'+B•Ͽ!3&xԡD( 6H8pCvr 0!(*98~45 A+Qg'SknϺlAna?'3Pz9QQ++.veIGyDž1OñQg7D\6Q)9kyppA|:U\Sx0m~S;<1xBh5X{8;OM;f1n[s V9cע%ЦE5ZomD>O~;ksF񳴟֣: |Mf>b5kj7KwǨeQ3ÀO]< qpF?tz=wz:bwXwR~yzŮƼax~n/:A`ʋ#`8UD^XY7~j5Ǝ FA1|Lн9mvp<-pWf':cGw$rjM%Rb`ϡl!\0"8YOC3O}Ż}&&=no/5 yvfu'D⩷Z`!Ǟ9bֿ>cؽw/dמaĺX\is?p;͜N!raaU DN(6U[.չ46#yw1ߙQU$:5ɁX/~Wx~^ƚlZwMq@ ^B1{Ν xq7a7/RW(Rg.CeUrA[kzNz+;)kgX_^rNo{kٽj1pNxOm>LpP_Ěi'j"( a{EvIȶ^.dAzB B _W”_uX!1'Q#Kkiմoi?۾?<1g/p~?fۖaaLK3n<3_f[G?wB:&Kt>5|89SÚ(0Ɖ oCLHR{wcŝObwf,x:)̝^o_Y?{6U M\J?j ggA]8 v!cP$n:Q| ڐxK Rcҧ/ ֿ5 &߄h?EnO ſ~Cd,}[-ZhmƷ=O^~Z31.yXN󤿃yp؇ջ^<Т:uGUpYA^Q3TRoȊabb fznãt뛴n.F .ڦ㻠GR|OXF8t vMz"<0[Z;Xщ^AܐAs( .lg{hl f5~4H+phQc<ڵ =)yPCM(Ĵֿ!N{bm+OcbШy0ˑPoo7Wn`48`2|ЈHH\-i NscrJ&7 {y lw_Ͼ^ZhܹG\dj:|X8>x.O'TgI  Ҍv$G5'oFys5ԶMx ŨGoA0ӌ. |f~`̝_lܲ{(v%`O1#0&UT,ytS1r4+uܵ/~`>~+> >҄[jɘC;@O{`. hײQJ;'̟:_r 44s,Aѿ1仄ݰ:Fy Qa- f-ZB,`h0#g[x:u?Du?bxpScldݡQ:v[C܈%:kCpCEZAdž inG{U{  (U &%!?v,Dx2~g7)<sJĴUiܻեh-SN,B K &JZ $i+֘2٠+Âb CE YM@젶~ KGEYys"G|h$Z" Gwg_p\T,І]ҊT~ޫX_6Ev8`iM^ *jO<K)5IZDfhӶ>l5^˶Ǣ7]pEXۖhs21%Vل ZA%YBƑ]X>nz} v孯b7ٻ /۷1Fltw7H* i!WA %E@QDBC oc:ʾ{{xn[j)c؛H y12;~Yl#=!8yp08-:.#N@jr( ?nE!5AEM>cD?rMtvEWJEʟG)$'BqSءI3u -/Y{h:$i[9aA!.!,.Ɗ p1-CNjFts 3 .E*C~C0ܻ2={#r&WEXchKbŀ#^^9\1%tFs@z/VIy6Cs*Y'viCuO^Aljc!:S6U8~Aӯz)f7n9xG=2̜ hp%Tns2.= g8JCE@25\K}/'Y'Hv1Flh|yDésI~_=knDc6e 5@Q-~%o_0l\Bs )-1lx[4j\}3OMGcīT9=@)Zx<L +:0YƇ﭅MhЋp774}n?҈4OCoޏ2h׽>4d1&p xׂQMs i'gy?)Eu]-z ~H x\G>йCYYY!ɡ4ؐcG1z 7.P$eӋ?5[b?&>mނ<x-V|<Fas.'Z`X 0DQ4YYW`N~>~n=D톾C5F.qrLe &2u*72|h`jB)Գ6S'Rz*~XU<a9oO{_m&DB"j(`T.Jv<\N(^,*.Sc+J2ԗ_P2` T%M`˥l1Ej 4p/Gf 7:nM5$AuagtE=;JwA` jcqnEY{o~eWڰ3+|Pkfj%`FKrbx 횡FM%4+v9A/@IYF]sPaݙ&7"iGiFntF޿Pb.Ե @z՜ Hi[veJт =lPS-D3ru ͜GeK*vŠA5JLȼ†| `86j,>4JW*Rpl0Rw@CFYnj nDŽ$V6O۠WT#CeF'!&Mj M O݃C`%%!1%*KxШwtVFiИ c YqyܸZאpxVMMNŖ`倞z>!ĈY_KX0c۪~й^C3'mSWLrOG9 7]4[֭vn}{/2egM BKE* qR1aN¿ ҝ*H5|oϠR2_ߥI])U7 vh5RMRLPKssb)=yUTK|bk3h+1M`J֕_/ ٖm>m?yVKBJ W=jAJvjbjWiMz~dhPVzan=Jh*Wy,ô3hJt>O>`AoWq?u[=BkPC jtϋ=6¨PPۯWMWR!\忇*nKk "խMΨ.mKڢ_UkiiюO]-ʾ_v.m::ЯpZ*~UОGM}RG'jG{Ur>W=iGzИuչ^At|TzDۧvuҖ.]>_=9os@4}R?q-У+=n(3b }Zݱ-j&jJLdVz,OfH%aNf=mƪyЮN[J҆NKW *uqPh1Ƀ+OK+J^[0w \yC㇎MS$,|_xz8@.1d`L4opN>kĤIr7L [[q-B@Bʸ_)uLIJyGugn5Et6|Qx4U_.jI9Q%bbz8z.NqzzLO®`ƛV+?ִ銄K %qMb&郮Q 7w 6N[Fo?CDLa1Y6d qc{a ec-ж=Vqm=y/^g"&[u[zrQ9uZ1Lj>i=^al0 ?٢|S[ ьz]&;iزh N ?OSW_Gρ8 %9$#~(ΎdHOc#Lv 08"4XbV0f ~pf :2a Kl$1-d".1]}pSW*{9;%陰`n3d ""IVdm 'j?-\NEݻHHe '3=$tɅpۆ/ao!:&NpqBrBROњPSOc&}DS:#6Z_Έ)>C<)3PSi3ХkhS-&F1OغvA5k^t&b08paˤdZU)< ) dG+5x&cGن ?"n^ڇųcW aO@QÉ|z5[0"}97Qɩ$b|FaE;&`|]VP㫑睒GWRU|Sb䲢N|rNǖ{Z+@L;18fUiT>:&Le,,wJZ4 -R$ۣ,0sC%@IJ%$M.Ƅr#5f2I!L(pRΤZWɱ})FϏ>9΀sfOM:2Bzm$3{l&Ȕ"(wVrY3* M)؎FhܕyUj|z+M-d{Rhlȱ83-a&i>fⵂB eᰘp$Īav‚(+- lOLk Mڋk#,"ˣMl#+6!,qop-_-lbٸxi"x200wio8Ūs#@Ru98m'v]g'G#.a~Q"3zY:~!r> tҝXz'ޟ7A%lzU`&H 9ukl[Ƚ8u[p֯7^6{ ]tiHfMؽwDK-͓îy=ԮKh6_=mn|Ub"(IQصxsTj[MW'-i8af#e>z+-l+ x~[yu_mb16xof3e@ZD1ĚN!*)4ΣSCb6nJ%qphv`\X@IDAT/j{۲ѥ GW-tj݌+c'` 1kwc3֝F1_jaO˕i3fX`:TǠSR8K,=v{$sShvq<.]~ <_Lc&h4GܼvIzPK?6"n*"aAv޽oc!h5MJIp.IúT# =][3=m "qJ,z  M;rKid!މpf*U(0><̜'ߡWZ MJmj_ׇ1jaGWK O˜)Q=j05G` ɍ?aH:ߺ6VOC#0Q P,MX z5  '0HCFhM/(s@6[F}Y[ogڋ ;*Z'qX2g\Q?LZ8<ث1)x? lpdl|1z+uƸf-%=3x;ggys>}e2&z|'`|v`dZv5* RW<Q:t-ƒ/?O^K7 `}S>7j@-!=;~/l-q`R n2NS? 1W~9bH~_ pgvL0&fu+A\oX.ܸ Y˰䗋 y?B|5rr0?9~xEx,l-8Oh[8'-7Ĺ_ 8r~^xq_apcr: tLƁ{p74cF_ `Ah_R8^iG9`@M@B\ܼʼZA%JƇ06V3㳡;N8zv,^ؾS0a*݁ަX˯H~=Jem^:Y7aS'v!2?.,N upl#1FT(eb@QԈZ#k `a}!1 X;~}2z8;`pو\m( t^YLKdi&HIf:\oPhnF](mǶI1m6|5eIXOҕc^>Bz)&V5ţP"{1lgؠbbxoi8_= R-1zP%"&Ib hldXlxu?Nz)>vɊ&6z5jcQfځ[6О̾F҈i]?^HĖū銍վՐth?$`h[7NŻ'NO|8q^$-g$OZoaq @j(ݒX]P.#1ג ?ȤҌvD(gQMTIt k@Md&CSVF}R0f5fnɥ-k g6>2o>"p؞yxz6Nsu#dHe93M%3E҉/} m&f6_3ň)[n{hoF hӯƌڷjDfU̟MQR2W,ǥwބ'5aBY/1C*V VJ!Vfqpe &`СKTYn94rNJ& B/-3?z6g/z9DȲU)2YFnuO,D tkn#j\i@0=ǯ!W``אO,5N~96DG*2 T+_ _h[,=Eꝅe| !BptU_=(|dN}+gfFRF>Wt>-.wlзSʰ$6C,ݰ k7&`܆c]~9I|e)I)4#+MCܳcwv<*8 *ch{%_#F )<^~”2diPuܗoi >5rE7ÂxcXԫZYKڤjp\2BSqjMǥ p p =F ,i1ʇ)CB'҆Y(<\~Ud{^e*lqdFeW ڡF-j{bKE (S:WKýGO*(ލ{\4 kƏ4*(] OНȱs,YfQNx#Ήj:)ʄ(?z[$!*+S=ez§z*{Skv _ꁹ阴d 9 >D޼+!,s3#Gisr\='pT 1`ߕ\b+\)125kyIM>ŕy0wGsc%٢Vp0 !4Kn})q.:jCr?vf: l*P?sBèoEE)5I*v CHiCI2:XK%B7f~/⽗k"c244U @{ɀV|nX4&I8q4Ӫ% ǏEL:5vG G4lނsh@_R:}xF.O;@r\JH0DP=(" mhJlDvŐtlRMU|S>jUs&gǗcICʊj2`\w6:tӚZZc#:C¥$JSF;Y90&Ɋ`Fqjpt WŰaJDTB {޽XF T:}I\(ʕ5Z:9C8_(wc i%) qrE lbq|>hv7oMaƝ Fi{ر, += l-Фo6+;| KZRi ײ>r5ypf8b BEs!AhNuwGW({0 MLn뱌R!PzJwkC?sܾ|[њ]_3GI WyH/D`{1ey ue v-[?}pqs1z:/=+B->} =E3 ( }0p><ͦmhEӬgdA *h ?ͨH l_`ECjęYiXhkb/q>,6"SAaI )rĺz `LQ7m0fӼ+8Á1V wAu&#S߅88ĝ/kB1#cuOrWZ8mJ4#DO0 KIs[Xa=K 6LV<دeZV(K~ygfed .Rԥ֫ +Ҫ,)&ئ%*fŠzT@^M!feW:ʷE_HFF|[ExE3l1@M9w4:)say2qAOcu#j5$3420Cqw[i5p O'7.E(/ah2޵PliG9CvMaLy3`1ZXS\sm wd(B!b$?TH`|FډEz>|k4B g/zn7_5Ǐ*z9 Jr7alt~@%`A4jc%]v9i ax[(f=㐗ĄM%<½!Hј+1shDk[@Ah`(l'WlWQ3)M Sh0ell,!8͖exTL儡9l((Z,XVKÔ65͒)0sƀC[iЦo'aop̜ftϠ}-V>`"62s;+>GR)-!">\Z V( aL4#Iƅ7KKriި&aȁtߢ9=8\9O04#\r0q#̸! sí& B$PtI=4dgLLlelTXTIse VN|bBG;s&VO 0pF臦M˳Qb`.ի%uѣ3QzQy-)Yt ulfh|9&')q#M-`>\ٴͦm֒= VT^g 2ͬL>hDj&azʱltjH)cAđqڎ $qyH i&mD Vvh?,XOA+$p*q +[ha 43ƊڂXWk;P'إ_?8f*zR{1`& Qps6M;,e7/.)Q'ޖ`!o5 jوK|b&L|&v,gн{Gbxsw*/PxG^H&Vm8犭ȐҦu{Èv2D5S3KH?[&K|1b@_c{m|_vo7!/kem; uבO GdX$\ S{lil1\KbOo'^@.0-7)llEoU%An @2cv4'פ뼣 ^\Ccb7)̘`R ݦ*27J5'x-vmH 8x_^ * %@O5wzJ% {nuiKbM$*:Z^#M@/*$8_ں)c'Sp[WJʏ-P⃆oZ~?=EC9LJ),h8 ơCkbX>hY'r5baLqJ> sW#α9&ݐmIS*9#9 @U+X 3W)O݇^Ƈ܅ )y^VTWؙT`Xޞ5yW?V>]ުuV +abiDS/ 1hz ?AO[[ۗZSŽ˷e@Qt1ӰP{*߯uUzvħ.|?T {Id !,\[Tp WW_CXX}S&'25\g6F3^hgG}iqy0BsDŽiE)-]ڔػ SuTܛ`T7c|h>3| ǕspF8K`m[Cl883._D#>z9r~7Эk##,8hDe3bȓgGRߙ^& 4[4eL'K Vrhh\b rNc`^Z XB@0Hn<˘Hڠh1bp43A@Mۧ ~+! S:2OAͰJCM|$jMW4KpeK.lfbQڶ:緐iGGI֍t3\ pFKk -OiAB7e?R%tHNSEm˙-j5㛞t=q3LOf!Q׆Y3 O zhb,CXb[cиv1 vCD_ˁڠI8zt?8DA>d1G"p@k6m@ϩh݆"PHML~:N8!016U221APaE`S̽{3F^mLץ5+LHtὢE{z|?fO_#PX*~\Z;f6JūS>)ݐ(~:pJBɝOeqpPY1x CRt$.;CT87oܡst0MXUdD5.`ФP;p ͬ  {o4ch&PRq#8Df;Q8O珞×WN&ǣ8˹v=)ݱRȊ;H&Vx 5Cqv,j$%:rQ䰕x/dKw4/QK1jHt]3~Q V^ë{oɁL +W`X!1c|%GQ3oVn̝NcLdhz+C^=!I64(-)WϣL$$KSLb2>(0(i`sy:Laa(ғ%X~$ o+C$?O;F2j | q|Sgc:IZ|v*fa8X2A=f@cث_˵Lpb*ȗøCEoX(K΂)اW C'UjSct*v7P K3y/^I6s9z gOue ׀j0bX[#)$PN9d_h:PPxig=jg7a~ Wn1^18ݥt.:fXdwoFV/bgnvN|Ԛr05Jh m>OUfE# Dl-L:1WØ9XXsnG/:-vKCcD̊zr YM:}6sFIYwoZ#,X|sgDgEp3R ԓws Ŋ`w.x3ŋ{S&yHP QRo#LNLmd|jq:0PeQ b0t*\&XEHd$'a=X 5?賨}kٺ,>ΞOV+͌/.Ck|=$݁[CdD0F`K?LW;}plNQOC &SL}C>[QWo",$]*X`X.ԩ3oL _9K`XqxQtFclf''L ET?NȭjtBe#fA%`E0*,an{5bL۱d$ .Xb!I` CUtV- S{3=ze]>q@} |2yu0v.?aB+x8?Qz.+ eҬ&>MAY0ode:aڏFpH±y`AeW3Eds S߄M n> +Gѕn Mb0l\}SgUwt~M@]^&3g+©-zKnBk0OjEŌ̉I3ͱФksR#u&@r@@;ȓL4kc55jJ%o>"ۿ$cȐfhH IͿip'$pEY?w%\.ooC'.g#KEp) RɂtVR nL۠j/CSxJY׺Qg!< liYe-Yt s nX9!x&(Ӭ>cd)9Ld ]6q3LʪG;URS, "֍ `lj4CbJsc·sy*~^I6([L6b`Q' F"5Z9@.&Ճqu떄-9:3{C㾗>Uvmk.…WQ,>|^z ʹûYq vPsKX5Fs+|8 ˊB)+]:#{)윹$~3eR2KШ:tHQA.m&W2uGw3jGDo;zMKnFa p~Sءacg&ݚa}TO$k=9{L)1 +?%GPr=-ZQB:i ^ҧ@%Ehͤ s)w%U`aH-d50_[5ǿh clG==kWy(pXsō.ƅ'h|)\l|qcFU#hpez-l)O{'2) onQ\qH/WjB>;Uvx)W(UDGkhdFf]iY4q0FVC˗b{+P`R؛"W3yO5YqBe$j#NkobȘmV]ݤoA؆TB S/D3e5S9Lh!ur$&#g3℘I" !,h?m&>iy<শH32A([~9W0.*ը>w +GN-iڮ J >[7 Α{i+SICǏ^ƥW[%Lp\,|0m1 E+˘Y:tD"YedΏt7lrйڕg="vhԨ,=.]'$(Vh%nqi(P")ySrP S^^s/oȐH5viƒV̐` ۧn C+8r!.(<yJ5i axĸTFIAPCk'."ÿ-Qo?:e҈8#G@>(VVvsqwxU 82=(^nH9tnuC=/;Z\@d%Wuťm+0~J2?LTC'\J5kQZ>F!۾}obpD!yBӃ"q""@-,~fcI$E#N|la|ѐNwW(kp6űn7 k[ehܲf.Z_PIW2Pg/qX$wwcO̔B3_":C(ХZ46tډK|=f' A=E _cȨYv9 5눺㤇C\X,La/'ctXjrbTfX}IeҝxxV~؜ do%0NJ:I!E?CKAW #ILWk_- ^)BPY7j 6#?j5pZԸJEw?!H#k74E0-ɧvL"ܨgKT`O :BPs] -ƍ%iOv\ sd:u-.`hhR`ǐJ+qeEn6M-z ?u.D%p39ۨ>6n7bJ{Z}Q3BlS&^Ò &%ѶocX1 |Kd+XQRKw>xhW4u˾R| RЦ,;=҆w\HFFhzg=[a[mhS*1A 3iNbԜR*XcfN^3\ܱ  ' wu?>Aw7 V>^(]1땅Ax. Gَ3%6b?tZT>f(U8,3kP\k '􇭅<QC^T >(ݴL4NxmR_nd 7edJDOkn9> iR|Րsښө2*@Rm G.mj %%ag(Uʑ(KI?dB[ N0gF}"s%9(Wj!=&.]FV P^qQH-9M8gFוFgGvB'Ud3*[Fz"E`-~ Ed`ЀvOSLKu^t,oi|xn#'! sgOߢj3 ߎg|F+eyg}Sic=C3"Mϴ,ȥCP4 78ўyйgt5 UFjLRG3ATk75rSW-Yb*l^ٖ{MӢqVbi @1wW1{e.U|UZT+]V8 e[G`[[Sz~x[N$MC,V:$U֩`S(E(oTQKO*U~{>A eۈm"El|mX8d gG~)ɧGٗTNYm#vT~ Y)}G)k)[j#xO3ɮn]_Qρ|DXT닞$G&%{LCh\6.yJnYݞGҕ(;z;/Eufh^Zy =ugO](%QS7V̢!:W`:CKy| Tn\>n ȷfScـdR3N֝>wOv?lKJaW)j0`EZU=Ke5ݾXbDhK޶ZGޒe8/T֔_19;۩LZ^kkQCS[C9~? k?`6{7) o{*pm+O>f<.I6ZBEM5JhŤ$$*8N> ;(.xx.qFx(5| GyqC#ճZنo/GtAg&=[>Uy@G\B#Ȓb"G`ߦCj>'V)3c‘hq<bte|}hjږIɀ5z5pME>cǮcǜuX1e) WΞpq3Լ)^ԝkYIh'ͨOfuIf\oW]?g8  ,a;hM{c./|--y}SdO'UfbCZmC ksBhSJhU_HtngQDG Vv'W\~+,;0ROl߳=}Vwr`1}jц7~Tϓ|e UF5_`)qPլOi@ThQ暵v-?-zF3?Aų_!SUz bpi|wl@ |x 8.#c.NWoL8cSIl85 !]ᕻ+w>9~Hy%UJ#?`LsV`TkX3OE3R>DQͻEQiZcB$N&44KcM2+.߼; A:ҁY̌uȆ '(i>&p@4PS:)=όJ4Ff3LaakIxcx_.KN齉TUETDv+"vA"UQQ[{w9R(.w-v)Ɏ̔4i`O<{jϧÈ$Mt9~]h؜)A IOEL1Lx&Nvc5-v60%pJpKΛabXQ H'A&%q%uoKg6_Dge O s`+?ndlJrлٳ Ɉ8x6҂*%̭-amoK,㇎,,aI&T `f5oĄH6m:{1Cay.XZ{JCbłcRvb:jO ?=s2y- ɰ̔L}Y>*ӀV?m7?tjooRDoN.r %R]l{NlW?.ˋчӒp) =?$kV.ƋSF9z!;o!f| =ꇩtbd5^SY?}Y.$G]!ޜ{ a}!LGJ >u'>C$у bK!WRhsⵈ8=Ĵ! `HSLU'B5r!|꒽^%X6槩rȤelJz/#إɈKG"M$$!J Q@)Y K)ޗE,nD/TA]#);J鏔CJbA"y/F-.ݭAVS,h40ܚ<д\\ G'Ѷ%lllhs z{/^ Ap/4ݧQ$;jst D U1m^ > raOOo;"haXCR4lCV@IDATtK`!Rm[ʾZ& CGb|ȴ?aHg<%,4:k JF_#lB;/7Ǐ*5hمF㑽{|/C p@[08cOOcBNrho|Bbe$q1-nu9Eǹ RT\+ɩ1N3:ulu1\[j w䣡ڍٚBbƠU4=tϲV+'Gi v/jLdž3DŀͨfF8'461^w M:(exh87kڨmjC&5[aɗ2CYh] <6=Jthgg7q~fkKp\KQ!ZmfuY ݻv^57tS*!;iStګu߷Fر8fhv7{}nz{Y@6we֧>9psC=99ԅ5,ŘdXxz>d :h~AQ ԢV͛@|=o<1 ^% 6&-"3 u~3`lkCe3"l [LJq{:cT!4ّшJ1C7%gp[z'^<]^R+yB҇Z=M'q(@jxJ{wrIfơiI05x>b EwR+DLQr>>fXYhcIfgrբbA)Jt0%)OޜZ[OLJ݇@7 dnKSBMe͂2eJw-ﳴ;~HnK!uERK4d>AJ}-4Fn 婚]%+3k<إ%э^ADWq郞2^n==\@I-J9}z-Ohs2R"Ґ@Tp0m:9I'y n1Hb2ŻsRZdQ!В5b9T; V6OXWY^` UT^T&hݚ$9U*k[>lZm0wŴѲӉʏ>w8 carz{np"F2Įߺ όFrL2`+n%T.0omԇ9^i`BZ-_:)p[Wd'.f^ONAXM`Fl{F ?Bg@ҷkN8+/}so9 {Cg9 &ZJ1.`S?&xMkwo U^__=ZHs(;r7 #m _=j%V6Ɓ'i~M((5b+(޵Ďhԯ/)؋n`Ҿa|%䗟۸+r!XG j֭*>2 ϋ5ZXElƆzͥiTYm;CͤsVr@xke艺; W{h7u鶳|wۥMnvO[M#tzџx[H⢧PY?YTH#\fdӡBzPX^3 RjO:x~pķSÿT-K̆7׵o #rUf5Ks@pV+Z[B\l:~X| ?*!_#asic]K(/EZh\Hb]7ERʟL5jtF:6n;#[C[P3ZWV/%k_ur`yuR**^dh$6,+ޔn}bMe7Zomm=]wGn5 XPuDFxg . .e?wu q{ۯ^0jQiل[_táxs֊k/NtR*Zpt+?Jĝp! &81"ZU&ϙ2-UŞXi7t[ !44AyDlj[ʮȈ;7jh`nn~}ޭ⼾ɁMIHQ*5ݜz+W  FJ^Mx[[fl{y47 ) #IL**mu#AaNw'HUZP[Lq!p-*U{L$d#pM,~Ogv^QW֐Ck/Ï+|!UWU4j ʷmKKKZZYҝ^^񁅅U@Y1]n5Y'}zs(>Q(+7,w@|S;RĖZ,) _eccxF|~e2)L߹M] aa _k6LlJK۰R9rGP\l )B=f"5-ql<.$"::'*;kh~LLala 3s[ۛ܄5064f])y# B ݩ2YpH߮AQQݚBՂ~ ?79HHDnl0ŌSD/\aU[[xxGpp0)`pvv}0g@,+m&F곩 +3 z?S^e V9pU+n{(~ApsȈrpM-WàzG~ȟ8ֆDzg`@ie::>7tLeXוTcH{An1 Rrk<ަ"!99 @u3 G=RfVf045?VocCcE&RTwUJU8PjY+=^'s3A|g;WN 1|1mK lfb;xzz"((͚7C$U =Eq,\zz-P]PV- 98:uF7n U#ZMk(]jʉAD]BI!8Aa ۸;|lOIR1Eri[+* b.5%iqKXΞE!}Y[ S}O5_nZPMPnm^?ANWX]rC--`emI\e(ķ89y<ÅpDDTX4Vl9EKAUB@`u;ajO'Ax i.\h>9@ rRܾ,kQke.>qx(>+ ֔jBշ)}bwa1cz'189&ek̔2yk8PM@C)**Nv1JJhHh,Yr۶CTAzfO7򃣧#\}a6]?-Օ@m^|hSG[ρBz TQ %hٵܲ<$D&"12 Qqxu>kxUV΅n۶m}aQ{G%̟7<qD/5h~n3*?,M*  q|:t`z+$y5X>I@ge= 7ÇqA`>r믳-I3`l` ]'z<є"%^GfR:Ԩ_!$^v𷇅=PR׀* % +Wi}[#[Eҥ!\[%8J zݠ)iu=Z<iHM͇3hAJK|9jK8ZiBI~6 @1PJt hVVPʇ&y8v2c;$ts>x8ɵǯ<-M)MY)^:υJN:,]fN t*/(}dEĶhկ3\ٲ,߀9ðyk,kKiqɳz\:!t>=HIW6e,o]4K3ue-M8j}Ѧk de|s 4:jxWTM/9(x*zsC~Ѷw+Z_ ^?*Vn<.u|?i+'}Yk28VtJ2ńpi '}Y\ۏGʥ *͎kGI ' µBGx P[=Hw_cG>q~!6u,h1D$s YkH-4Ä07ɺd 5 axgf}xz}%coǘFlrL'խNVkOy45[b mhF0Ow?MٔH+[%9X?sQS0ixcܧZ\eୡ z`%=-,] s^_ڲ3 .UЦGݑSr_J Q@@p`m4i^+k]I{k]I\'y +hӗ8Sfqp,RJ:$FY%ىXW8~6SQKaR'm`R5oi|.ڥ[A6 CG] xMU 5h:~[/`; qh:_DɁw+£+4BsEIGڅXLC꯭a?aO\Js3- 782]}|g{M#1{@/)6#g8cOzP2íBLB GůcS](H+{-z6n~.rҫΧmp`c-4q𘻅d~O ħ{v#$tAW.IoJ{1UktG;wwa;uZvkAVѠE=t^=S{iu`(.v:2dVviV}Oo@cO}Fq!6*ӼOcmV0*-: a"x̟LK*Kռ@4eͅTLlT?}k[wGiGf.ֳK5x१Pl>T$\<_MM9 EEZRuo#/:r#HpDg@ma 料:IAZ⒊`kkQ(r?, sqt"yZ^8$'' 2pT4r Ow[%tAs6Y b |ܭj> قK^<+u+]5=: pHCŠ< 3]ƐKg` j/l_ V÷?hRdL@)|~J0Y'bg_nZ%ꀌU:'1=k||Q\x5pc3\jȱ I.n<-jNA(HMs1--U|˝j S6`BBL| ⬘ 4!9c ȐśKUlpkM^+ׇ#躿o4_7u-Vd6HJkUc('3qiRrr aʶFl .*s췦ч8TW J 2.i^E%32h$rT/GT*P Җwz:"#Oۼ]nM{%u ?;}vL5uߋ:TfD=rF4y> `J; v%W51-=86mEڀl q?R"6QX"2^in1Iw|NFDs'x6B1.5f.1edOgEx6aW8TL5[1f!_p* (4t@3o3`g쁇a##2q"S+<6k 0i⏿. "D%KU$[E'=1G #P oN^^=ܰ}K<+Fe~e#hE|=y&H"+ HũDoFAV}E#"81DVi|< \+"Fcxi4WUSІg`ިig1al(ҧOb̷x'^z|SǕ( 3ZC^¨AalV+WTD^:i᥼;0oA̿ЀW>z (}ZoR.7n.䤏@]65Bs:(*o'}貿cHܟ9[`0mJLܰ ʖj%9sk;աx~lHAzeL]KO]DeBMIʙpj .AfJ9 RXrN^^KW Šn=jdа!>0ŪV㕾aO9IPj&WGUHRm!A[jYHojk]) TXsW&c3dw8ѺNzz`gE`կѸKtMLcqiF)aᕅHak-~g^~]Fim1gт7 nE 4%s[YIٔ8? 濇o@A͑~&9T4(dL]a0['Ƣkc3~ģkt9,Sqh f혅{P^;^+le*Rè`}ltv1K7;>䱌b"$]F^X7V쓘b0~X䉷WY߂ge,їNOkờcC+IGB:%}! '7Fza71#{X#\c&?\u bd畡|l<>!C,%҇ed`I\1W0xT =>S3.X_>w?DŘJ8BuGcH;R.OcE|=$^Y -ЩZw#'2JTx\uK>^Ar)Yoc7ukڋ\l^ @|# ]\oIL*ֈ hި%vC;q<|;*v;CiSJܦq==-le_ o{'iM,8*!/@Hv+g'>Cv <ύ2FM!d_+Nn؆@ܑR<wî7! -!t1vG7_XM 2=#5U}ՀP;>7oE~b[mH@^ⴒϏ&w93~VIɶp^۠I=7D %x( w95L4A}|_G}+G߾>LtC/,xIBr%f:'7 $r{Ęz@E/_oFAfu2ѽg>U¯٠h/ *"7!\ő}o^NNCE$Ԝj ,LM*w*;A]YOڅ/JŖ):҂+- Ÿt m@И:ݣ}Px4 9Njđセ^ct`[#0 vA&.ʏr'I.CN8RyWX%rO"O=ڡi#'ظ~> g5J_eFXL:`*\'}*EL,+uz[ gq`n`6N{ zrkl='gxYbXa-1Qhָ-TG7 91vigs8}9)Ns۲'aްsFOWcHhe<]Pi~3Efp+wOMI,NE,ϐ`χbȌ|7}PZٚg^rHXg~qtVUr*`UQ:v.ہb#lxy::nGغ!jnqK(/Knx٦X\: -T)锒E&Q{j%lg߰5}-Cpif̙0J zyZ(u _PfbOYB^Iŵ>J8UDsz[޼ P=2-AFbRh2$=)u?ec]M x@O#ĪN$8wp[! zd +ҀNL|,mkdX,{0%ؿk 5tʮXSw^VL(bpQih>`ZɝK*Iᙉ5ZAر4l:І?bvľ'o  {EP~'-wlٯxyMU vTW0!J; p7jalWJJ`(+}-lZDQ`\V(ŝ)K_|VҺԙʤj9zj18 !*<n#)TtSbBSjU iY8ĉi1M8ٖ ȉYqِ:E̯L@.%n*yPZD쾆p&'5jt G[@|FWlcysmC qD+ӣE34hE~gc0#(+!(SJC0)FTH = Ebooj^ǎk0@e;Yh/&ض9|f B*UCPVݮ"0 rppX.$htusc/AT=bV2>V/܂7:K2*Χ)w&r$ =x)v|5u;>Bqt{Z jֱ]G+Sm2J5՝,1~6|ͬ16bɳ\J+5 Pյ+,WXąa1%D:5 >s|=vRGXfB~x"#RQYlвU>~m; Bv1At[N stwNAr[GǕkU! yp7*.%PLLާ%r_GQߟfEF8>Eyz<, )9b\:~fmzYOCS>ňȩNTQ`-{;oXڹ ~a 9º]x,SG&4m463#OhPfCx aw&pgҗR[8pq]>[C\w-ͼ@eIӡbAVJܟ &_:.OҳDf~:PY>r#Hd"2'vNp +` 51=Ha~n(IIY\I<eFaDdϟĊ&*\ & 1E<а)Z:ߍjcfF=^St"ԁ(=s".b,@ J=0hp(q_N_kUw'cPYC$Ioi%;kt'mHX88\C]OSQ:jfc;gwtMBSDJyTpEI+qil_;[zBs:-) VeP&7ЗGadd BZi} lw:q1h8~~a&74EN!p-Aqn“34u!.s Z7␀ [xÄ*DxuqCf>F;nk4O G#M2uN[ q/ b>mYwprG^B6TV>4Cԉ0$eP Dب[}]Ayx0tlvda) A&6mHWfsDÛ *JO4DžPU =PۅqtA>X4::%2APrHDG3=LwG-F cS ZF[H aXPBSVy K[֘8Pe fٱ,όFMi1"h!%n?<s*Eh=f W@mO2o5;ba1cm4ƑFS?o>y)ѣ ^a?ňkUVwJ^{lZ1n+ q|:t`ztVzD٣4bR-h3NdYH%br$IQ5VLσ3Z:.NQ]A28[y(8)r`EXF$ tFsy?=7,y-"ZU@VSJ⋼ZX5\ mϤnעj~WosΊ\4_IE/עZUWT^ݫǫL{ˮz]VϒeTz+b~U-zwU+?7OoU,mNzxw }do]`<K %zO]3ʷ^낒7ppZɓy]]^.5=JdS:p;%Çuc¸64-|ߌ86Uw<Ó)N?\fЍX #yDҩC)u kk7hfe2\39uss:gd 'WB*'֒>Qpj ˆW]2.M\Z.UNA+Jy5PcIx|w;P=8 ԓ~tBWl8r2rq&pn^9T5NoBAz f=0O"]<`aK<TxOe#UZ׆rGLEpu;/e7PiֵjU.r&7LxL־Ft}Ex!Y7GG#k4/^9GՊmDAQLN.Ϥ hҞU@+s)DZ5_c֫sqI)eہ:9 b?^-ʵ3%Ks6s@Wso{zjaT4eoAtKw'POG9()(414%'.ӭ+r{Jzʠ$'5>rfŔ*Θ$R>9ƿʇE*}Fxt2 -@S٫iđF~0~^9Lu5˨_BiHD)r %LCZ2y" :yR_JiR+%.ٙנQ8 . R,N3֑>:^R[FIkŌ+EyUvx4qbzo`:Q:ԑugA"=)jf%Xm_׆~o^^L{excgHWVc \SkY/|4WSZ;v@O<\N[^=΢yU倊 eI<ޕPTLLw-jq)x_`U6(H"e8y"m!#."$T%NI"+_Vd ,聬8/ }Z GI)۲ S B/x|xh D.qJ#]X p%iAA@Ǯ0*h.]Mb1R/*U^]Zo1lJlEbj+n'c;.i:wġWE;ڕ?6`/2_x`lgx: AzDgq[.:s,#ǻhH*Jo nմebE2a7ql Th?+='jIQY5ΏWB )R}׆T }?\4R:4k~2 36C3z1mD_AimMD{}\z7K5| ^QX"W7k/񸑰[ *q y=ཇKOjMsCt-̃k#ӎڕ5]6]&}qCa 'o~ea()1hPvbhA]>~ޝЃX#m]I}5Pg^.hl];0xWGx >{b6|G@&bs2޷aoXlޘ5%??1N'aERZ)-8bڦh2`8BFZ`Ť ϩXv>/ǁitu\Q}",sĔ/'׍2l@t8~"0[{0s|z[;ᝉ;auǪ?(L_M0~LS] N>8߉ p58c3zy)XB$D"X\,)V%b];>1,v~)XIos!1lRk\ O/zF, x!9,:~K?AУ $Ǜ.~prytv1:E8ULizJX>Z>Tq7\J[m#5u=j}=}mnr@M se# A#!=#y6ҘPjw`VK`gQ77`-c Zfb4&<Ee!8)܄ eplOC}^7 FV.8@u"xböaZ8 ,XOW t`߭ 1"w `1^]")hIez~WB:e{N\yOOǢƫu ,)n)l\K+7"0a?lBQ4mlQ89s? 0DlIl탎lz}ZH༙kG [VOh"X畞UB^<jsxx: ׿ kL41~#lP)y~VRKy6.?]|pn87DX =!|^cDO+[ x&){PDYؙ@IDAT0cC Z3$s+ RK?mUҒ|ddӳ̃bKget,4Q1 ~ j{c]-Dj]| 8s*l;& S +;K͔CO߿+ur8=-t a=2{cOjfHj@]od?{=O=G3JWK|RXr[>6]}莠X [G/sb{8A{Tc36r9\aH\3PƳga%Ƕ(nz9b RfԉXy iИfCcAZrr _UƔtO vؼtoO.]n+K7&9l #K aifNВcE[I1bM-o_V*yn֑B#۽zkywAn X5X VIqefQx׺{ϵۻ{Xds Fm#Ku)H)5! /47'5@G٫k;5 Ńa"0qP0Ur<plY[DSl|E|~I[R;cR0=_(co]Xx)`TnQG#x*z@kxnlLvŔ9O!U ke0| Bͳ: ]^RI4E1h)X, iPȼiȤ-/\3'.ø pQ#HRt Nộ;0nBKE4t J[JB\㋚hҳ5lyk;dK\X|\Tz)A9p_xC0&*SV'~Amd+DŒ `TV Q()mqCI#^Sw9#%*5 #yԼ]r eePBŕ8(-~U2'7%TCHfy2i)u&m)Gu?M#Poʠ<]t OۯDl^Ey(ȷ"@@I$RLpjөZ-or)Bh'.u3e)udV>MJ2 v56Rk&թInb$(Dj3xkJwr}bX"Z:Sjظ*ɖW_3CcP՜*Y,W}#A1B2M\+!* cNAAe+daM3HՀldܡSvqiX9;Z`)΄W6j_v]wM7)W7>Y 00Ep i <@lv]kcW[Poj욿3VN]7 6x0r}Dz䣼 ?f4}y.p.npn =ཻח^82 *~,`oŁ҆pR`FAeˎK) ڑ*q(@Yi(}9ь#+s+!$S>ˣ$4v9QW ~ڸI@,?ԵTD;'I9-gMFP7$1KI|FxZl]ˑjCt$EB\HsU)Y6Nl8-m‹L-mN;0)0,^K@ v&KͼeExč$miMxQq-iFԷĨ!} yyCbMyҞ$MʻԯI-y,d&ޚ^I\ϴIC A23P]NMm"BB3 d*z&Г[{GAK@۱.!aJ"̒,/#0(*uTsQlf, )P@2XK, ͏8jeξ@~{;>J_c=giBWŸ|(CWƗf>ҧCKIlx;d=bHF%AO%İdJY- motmΟ$γ;Dqd{B[V9mҧ?Hb_H'-.ݕ@v7aZ-~-k> hѵWkDgA߇ZŒ6cadF~Qo<7uX:٢ݘ8 ^Z2%zyc,欝W2n`1_N)//xvMU m`bi @WnKjrCmPc@gaS5׾>&On1?~Z/]m3l[QHH]q_tBTʂ<`e<5CCiB:E%X6X00Gq8tZ'%*; p>{;sɐzطZ?;iabZ౴ k7>?X=~t}x0ȉ3"":= _<F<KāgŸ==ꜼB`A'M2'qs BoO\r pH˵v D`Y-qN|QN}_e ; AwMq`ך4'Q? }8ozU |xjzN8?{Bq4gIo .ƶ8O_ GHK#?f ia:6/0uЇg1Mϲ>hV^C F&wތArBlz`ӑ|D >ʸ[Xu"Ђeh!-0,c2SߊYaH-mNO=xM&X+"5`ɷ( z4#WsۚbI^o|,/GR1'X[{JV FÈ;?.V nt^(!k4!Z`nFRRcۛgim3 o}qĖwbg v?~V'z*f"'اgp!? 4##.tD b.KE-WhWCXq?=6kK`+۝M7oa/C !vc?b_4mC3)&bymm,Mg:5bIZ'[?RޡA _r.G)m{OSB^Ym&&x0:>zC&$)`w.#jڟ%,   QR,Xd\نKo|6V\7#-6& alwo:*MsB/Ҕ.`APCQyχ(O 4AP)[IHs7 3[Μfo=s+R2PiÅQN䵳 Wb0h{qҙ*Y{䪚V^rmqIr)gheZZ3*ĽaQ62"RyB~8s}bh;0x u4GjIV9®ߠ I92H+Φ]G7xs+Wӻ ^Ӌ˳Ria3%46.vtYFaf>[8p̸S "2U9?h|?s {]Pcp n mg B8x;>e^C ؕ|Un:Yo\9i+v V ɠ+~5j 7DusC|RolæF6yp[.0DH=BD{` c_ȶ<4)uX NǂuvW-jxVvsw[RMOǍEYqfI+;@5*ҟ&ԛPn);xe=Q\ !=r16‡d/%<$*_ 5{:M&FGWG( )Z?{W3,E4x-=Q~1}9 yͼ c8:e"{Ro >euXlҟl6CƂ<32R712ا*敷fA2d531,o[l)gdb;BC*zVъr<)99YLݲM֟5HBjt";QrWPNHgC`">qtFhv.[e.)IKqΰsB@fΈ 5[la£IJ]Efk#N10;>OKkw5n* oks, `!a5Z~ֲ/%Yɶ`7{VI^9.r!%y mĒ_;Y}vYe6|氨td;Yv֒~[sT}Vd3`EX3\ y5& j#ďnjk H3wŸIWrV3ouDݼ_-ziGmmsS*maU;5%A T헖H^>'M[;;]\q|W=YӂIi]R,9, pNxW]GC4bۑM 1 6A0|}@`ލ۳^˟gO&2/4v֟m}H[ >0\>9.y+Q}IrsZtS_syO&7Wɰ8 ӑtq;UHkhTmJh@9,sZWb/Mzk0ey>Jz!rd]YmV+E}pp q݃2:j3[6 b^S[Tr嫒eRctoXm\SUCDr1&jM&Ákizl+8RMWb NdnD{Z!iܹyAX"Ijl蝢~Szsh}viFUB@!PX Y5ղ*Ix qd*":r;:_4W7W+ \.k uncC F|H smwGz] يfgr! +g'toUG sQNb*3LntUsBi'gDnlj1\t$dm eV[l]Z&r,osy h48S;׸ֵ4uɨQ(?Tfcq;vV/z )\%K Cz`{~\l۸[7h=2FRJwpM5mQ jW5X ~H8rg4|k=WS.mLHO=^G\QpGwG $/9{$4TT>=oIE3 1T BBB+KuU⺡lT;19 %iZA.NHbcXFP/Yʭf,/$/ඊ ]F2PĦWþ+еs|kcOsmVWhC# ̺ Hjqfc.Ê_GPyD'i-R6z~(Q$׋Z^`?,_cà_}]qEez#Xx]mSzJ#h{o~A8)] z`G; F{;xͭ\"R< v:[ RBF@rضg$' a' (8##c?OM9VW㲩wùm e|`ёJ"pt YdH<#<۴A /Ӑx3wJJnbaG3 >]7!X˜xb$'/KG"J_)Kh ?:Zb(,/ 9t( n!m)"Ј(S+imOƗ}X~'M[89,m.r! L V.>R$!>m٭GU%'A>AHٓʐȎsm_4'9Ո0 NrqLn\˫qh1T9؅n{THB>#nS%v\XjEk5g>u/`O7y-=3<~0&c]}[ö" dLǨatoFYU|03 9c@^4i(qO$՗KaW7o'@4)8 .B獏.O. P3/5snWNUI˿himlk>|p~ *qռG0g,z-$ S_+= MoC#ɒ/[F]Ej`'d^2pB/~ڙ<($}OZ -G q,y$o)1EHɧ`avU>Ƅa{..|ql_n R)uϠ'px [/IʷDڋ-[[j7}W3e#p8m8Fz@|{x}jUn-IH;~'޴'a;j w[Qx@>}HMF]/'gN^ x& rìuM_?#9fB1~x壭(k  3{EWƅrSQU*  >.{k_/6 JDҮ-8p9 z |fЉ~>h ټJaQolr.^|q,HU*8a":Şi)aPOKtfE`E?QJgBzf5~IӿƜmc gnU0C=܍ `1)vT#6h ISy~r31^vEbQN_gҌH;T!߉D0߄K1O^칗#yA$džW`wVf.7? Kkz`/x-:zm\IS賜fh&  E~Xp3*iVP˶Af%Ff I}u<`؊[@o(I)p<^;쇣~Tr8/:[wƍac& =.`" 4#y*ؐ_y~~9(CN{b}F+^?.YI{Y;¨4j`ا%&WGsfb_>]:yql{N݈V}+ EJsư[^q9cO @ߡppt89Ei|V"a(JPF>A[F=jϟ?D@^Ked2 #ȭGZNVP^1 qF+"betG:4Sgn`DJbi (r@ѫW(COvNkcDFIlTF\(/wU=KJD6cf H8ٸ/(/wqxx52#!:^hoZĭc "ASGvQ0zz‘!}Ă vWG_U0AXvwؕr=mơHyU*?ZeABz@J}phSϟ| =^y1Vݢg>3@ݼ#uc &̙&wuͰdEavj.8B/^c.o=WN{\ Urґ e`DrY;BJlb߄c":{t g#6uCt 3//_H֨O>(}_|{/_KgP{6"`8 –`Wñ`iOme"Ijjb2= 9dzQ8$}XhKU#t5#[P"-IeQ( x6K0$[B"7r5[<ȷ0>;Տ/~6g6H苛ϖIAĆᣏm9 ؎0o ePG͕$Cy~X=qq@rБ( C0k~hjjd5`}omh+Tv kGO?o>ˆDҞ &L#ۯLjZ]S|>Dn铺.e>L|Wf%cw \2oa5B$u)z ޭu4-j䐞j F~:ckTh$)Q>M53u*lФⒶ!F "?|Ub[x+6^Nh3>DQRHЏV4O"]q%A3+bm]g۬ZOxwp7V.\h@ xl ؏7Zsbjr%t+bC '>-~ju??6(T'AlwoDHDÙ N>"xѧ*5hۿ`J?K{kml}:#g£}-4 ?mQ|HWc%Ё쉝mH)GH)N\)^Wh[X>!n&={ +24 w .D_Ci6sH  ; kJh^W d#׾-:qi?z.4 Evti־_4\hՂ!xQ,\k^7}[⹀ xқ#Y⁶ۣ}׎/I.W{8ttnavEP ]뺠mvУ#{!;!ev~vҿjT;"qPٕ&"ti~ d˛)z, mgĉt,}s9<vxп%?ҡ|B=I޻7cPFJ YDw6-#ǯ,Vr֊zn>5?Π&{\gMVTPjd3|s=D]>/0䋯՗&cijýiXӊ{2k }(k6GFݵtEIͪX"ɞДo\%rF=ocad]/ioR,’dm]-dq _^rbc.E0 Y,x;񍉠Z}y?Kr"nHI-^̦ <ܓ,S?iEYl&ujda^hlc2܋&Lf9ٹ\{;6֕`K;n8xxdb҅ǖ "$'pBBW!p.Jqq_4 U% VC:ϼL.d jlS!i12mO xoXpDpd0:茮tAhD <ɠԼڒ)=WÇ}Ks򻶆}s4UCF1Ho%K-$uל$~Xʳ}|gvN_zýunXWkcśЋCu}"!dnqE?-ԯ]zS9qvx:|x/ oq!c}wѩuŀHrZZrX|ki|MKI&1si{7Ʈu?66 =^˷ZRa}0A1bQL["w_Wpo"77V}/{Z֋lL`IqPq\O^A>LJ9'D$Aj }:2<=C:T9V{Fd=zlIWcWb }z'YtLH-'TF)dx*F,oYG+ȡC Lz8I͡6f t_DZڭ+Ν;L[,2*{*t9B@!P%JJJp!O> L8#GҴ,  [khVWW8{U89Y3\,?k+nƦgIpUBM~u -l̈*j$e(+a6z )-,?_XJr[MXDGG#66mڴ?11?gX+{ΠU+ B6Gbb"I32<2l,r95e|]M .W'4y~Yi! gy#$I2H`m/D֞2Μѣ||u mpp0#uۀ`WWױ^YYSP( s[!)//Ge%_c42W 魍*v.Pe^X A.v-##GH1){EɤP( B@!pZq B@!P( @B@JB@!P( "gPUB@!P( @B@JB@!P( "gPUB@!P( @B@JB@!P( P(.S#OtGN/S% &"(OTsz@sƥI9lc ;s]*_!Ptdi:Q q8i&3B߲U%yHJ˫\D[{фNF̶~.6)˱Нj؝ qLN?"žeVYc)9  $l v?̕X 9"|  x՟5   TM'9u侙x-9vb]@(+p cî`JO|<kpUl0tֳ,{ >w-{m/N,T$}_Ͻ/iUX釰=B֖qKG_}yJsZ #n+ʡP~pt2[+q1" URD'`::9 7%dUE{( w=txw`;wC 䍊JٴEiq!lp5h/wJ+`@m_GX+Pͺ]P]ER^^bdw3u5W#y6N(/AYa<=(iokahRRpH{9 >zqOM؛*}_3؆ Z;1qrvF,:_:ME5R'ö'Қ وn.. ?oaqזW1 \=h_K̫*a6Ñe57mP(-@@(DYSg1zd!V3rm}7݆}ig= ck6'\6W jGaױ,tނ:cw?ʩѩt1K ówij3CY7a->tj*_$XկF݇LO 8&b҃Oaڀ w?þ ʮ5ޘl4 Uj lо]G+_NM7==l`Єq|j<<$V@c{xll 삗ߍ;"diq967%x)X ?xa%|i1~YsC1 Y@<".>^^$+$ɧ_ExXYE?o^èxkԷB@!8#&ϨuB@!heT"(M7VU ΫW[R1qۤ+aL\I7݉'ބ*j#ԘD yX ywATڟ-c%(4Y/LX9>z_x. W`DŽQKCKϼ,mLGa&:Xج&S5 !qW`OǨKØ>m:>ۖKNkF"s'>/sE]dopЮ0ހwڦs"}G\ & ߖSZ\s=HD~'͟B&7VnETh 3mea*ƒ܏|ȁ0=5*) ?bP(v[,mG?9;k]_]ij3?Q{̺4`m˵훷j[nV/]Fs쮭>X%y]6wi/N|Բ+w99i/ɚC,-/mw}I9ZqQo#Y?Rίj6]]efUdm}k%%ZIinMڍ4ZsڨvZ۴#'ZEҷo;e#?/HߵVZS97iph}1Svh}hʌ[ZY6/~@ Gj-#򴻆Ehn4GP#5"EwWۜS;Y3\VsvgOG-Y<==P+mP( egg zB@ 1!=ѧK|m#/=VA;[NJL ߙhBc0Y#GbQS7Pa~_mvs1m[K7!5+e'U~N۲h5͌ ; U|i7&(c݃jFs7sT Gnxz n^xm^ lӏ?~,.s`[lTGyoO!p^S{J ~gdf8-YxWO_.ѵ~?xm˫-7;1I;:c߷or=A4:a9* ?@s?[^!P:ĵϒ,ֽBiCs*OʫU6.E˕e m6 Bų81'V S{mK Ƽt#|5|*LgfvC3h6zj_Y:u:6tvDD⯓'וUw~8;rW1:pF7h0 }.^PR^}WjD]sojHpni3PXrךQtO Np+nĶ.R(-G@ޖcr*04IDATFLYSh*>Î}1Ko)323 ~T6J|w@N덞=֕o="`ǔ[Gb&$yyIM O[}*]@A_bz= hHLs [MNao!*j/f<$~X3 OZ2RQG[E6='&J2Gz&)L~YhAb<+J1mx7D;0-؉!,>(? F$J7ܜlq25b!`ü+q1Si}RJ BEk-JeS(3HLt}eQ# <꓏cuexQf&^ܾiHl̺i8 ?btPLd݆^x}1_^OX_-µ'Un+=;pK2/y lݴO+p ̨vh&&37(()]#\ss߂-n8f OpnN5Zf[?\?#lFaeg 6024oOB|p["ؐ9k6mo?KntFo$Qj nޜ\ZZۡ  `;L.T( ֌@M:acFT :[($؄eVSExv1kHg%`۱J\6 Dg$7dѳ!iVd`a+k'wO'~3F\l?<GwnU+#4>L8#uϰ_ʌS|;;a3pFh*}[`.V!AN(OsF^:;ƠW|;򓏰tz'w<8 MRuW_ ض']܄/B(/NCfyyzEz#,1}= c#6j2ԃtDfA.e#܇4#o S  !ַ~1#2P(i`S) nPX\ Ww/~{e(DZ:mo.:k.((< Q?JE]:*l8LWOojMJ(+Dfa+x"4*J.:/z(g!BU!%).~[a1#z%Fxxx2ESy!eSk/$Sy1R3@ك|`. Open vSwitch agent API ~~~~~~~~~~~~~~~~~~~~~~ * neutron.plugins.ml2.drivers.openvswitch.agent.ovs_agent_extension_api Open vSwitch agent API object includes two methods that return wrapped and hardened bridge objects with cookie values allocated for calling extensions:: #. request_int_br #. request_tun_br Bridge objects returned by those methods already have new default cookie values allocated for extension flows. All flow management methods (add_flow, mod_flow, ...) enforce those allocated cookies. Linuxbridge agent API ~~~~~~~~~~~~~~~~~~~~~~ * neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_agent_extension_api The Linux bridge agent extension API object includes a method that returns an instance of the IptablesManager class, which is used by the L2 agent to manage security group rules:: #. get_iptables_manager ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/l2_agents.rst0000644000175000017500000000023200000000000026166 0ustar00coreycorey00000000000000L2 Agent Networking ------------------- .. toctree:: :maxdepth: 3 openvswitch_agent linuxbridge_agent sriov_nic_agent l2_agent_extensions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/l3_agent_extensions.rst0000644000175000017500000000241100000000000030264 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L3 agent extensions =================== L3 agent extensions are part of a generalized L2/L3 extension framework. See :doc:`agent extensions `. L3 agent extension API ---------------------- The L3 agent extension API object includes several methods that expose router information to L3 agent extensions:: #. get_routers_in_project #. get_router_hosting_port #. is_router_in_namespace #. get_router_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/layer3.rst0000644000175000017500000004523100000000000025517 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Layer 3 Networking in Neutron - via Layer 3 agent & OpenVSwitch =============================================================== This page discusses the usage of Neutron with Layer 3 functionality enabled. Neutron logical network setup ----------------------------- :: vagrant@bionic64:~/devstack$ openstack network list +--------------------------------------+---------+----------------------------------------------------------------------------+ | ID | Name | Subnets | +--------------------------------------+---------+----------------------------------------------------------------------------+ | 6ece2847-971b-487a-9c7b-184651ebbc82 | public | 0d9c4261-4046-462f-9d92-64fb89bc3ae6, 9e90b059-da97-45b8-8cb8-f9370217e181 | | 713bae25-8276-4e0a-a453-e59a1d65425a | private | 6fa3bab9-103e-45d5-872c-91f21b52ceda, c5c9f5c2-145d-46d2-a513-cf675530eaed | +--------------------------------------+---------+----------------------------------------------------------------------------+ vagrant@bionic64:~/devstack$ openstack subnet list +--------------------------------------+---------------------+--------------------------------------+--------------------+ | ID | Name | Network | Subnet | +--------------------------------------+---------------------+--------------------------------------+--------------------+ | 0d9c4261-4046-462f-9d92-64fb89bc3ae6 | public-subnet | 6ece2847-971b-487a-9c7b-184651ebbc82 | 172.24.4.0/24 | | 6fa3bab9-103e-45d5-872c-91f21b52ceda | ipv6-private-subnet | 713bae25-8276-4e0a-a453-e59a1d65425a | 2001:db8:8000::/64 | | 9e90b059-da97-45b8-8cb8-f9370217e181 | ipv6-public-subnet | 6ece2847-971b-487a-9c7b-184651ebbc82 | 2001:db8::/64 | | c5c9f5c2-145d-46d2-a513-cf675530eaed | private-subnet | 713bae25-8276-4e0a-a453-e59a1d65425a | 10.0.0.0/24 | +--------------------------------------+---------------------+--------------------------------------+--------------------+ vagrant@bionic64:~/devstack$ openstack port list +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+ | 420abb60-2a5a-4e80-90a3-3ff47742dc53 | | fa:16:3e:2d:5c:4e | ip_address='172.24.4.7', subnet_id='0d9c4261-4046-462f-9d92-64fb89bc3ae6' | ACTIVE | | | | | ip_address='2001:db8::1', subnet_id='9e90b059-da97-45b8-8cb8-f9370217e181' | | | b42d789d-c9ed-48a1-8822-839c4599301e | | fa:16:3e:0a:ff:24 | ip_address='10.0.0.1', subnet_id='c5c9f5c2-145d-46d2-a513-cf675530eaed' | ACTIVE | | cfff6574-091c-4d16-a54b-5b7f3eab89ce | | fa:16:3e:a0:a3:9e | ip_address='10.0.0.2', subnet_id='c5c9f5c2-145d-46d2-a513-cf675530eaed' | ACTIVE | | | | | ip_address='2001:db8:8000:0:f816:3eff:fea0:a39e', subnet_id='6fa3bab9-103e-45d5-872c-91f21b52ceda' | | | e3b7fede-277e-4c72-b66c-418a582b61ca | | fa:16:3e:13:dd:42 | ip_address='2001:db8:8000::1', subnet_id='6fa3bab9-103e-45d5-872c-91f21b52ceda' | ACTIVE | +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------------------------+--------+ vagrant@bionic64:~/devstack$ openstack subnet show c5c9f5c2-145d-46d2-a513-cf675530eaed +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | allocation_pools | 10.0.0.2-10.0.0.254 | | cidr | 10.0.0.0/24 | | created_at | 2016-11-08T21:55:22Z | | description | | | dns_nameservers | | | enable_dhcp | True | | gateway_ip | 10.0.0.1 | | host_routes | | | id | c5c9f5c2-145d-46d2-a513-cf675530eaed | | ip_version | 4 | | ipv6_address_mode | None | | ipv6_ra_mode | None | | name | private-subnet | | network_id | 713bae25-8276-4e0a-a453-e59a1d65425a | | project_id | 35e3820f7490493ca9e3a5e685393298 | | revision_number | 2 | | service_types | | | subnetpool_id | b1f81d96-d51d-41f3-96b5-a0da16ad7f0d | | updated_at | 2016-11-08T21:55:22Z | +-------------------+--------------------------------------+ Neutron logical router setup ---------------------------- :: vagrant@bionic64:~/devstack$ openstack router list +--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+ | ID | Name | Status | State | Distributed | HA | Project | +--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+ | 82fa9a47-246e-4da8-a864-53ea8daaed42 | router1 | ACTIVE | UP | False | False | 35e3820f7490493ca9e3a5e685393298 | +--------------------------------------+---------+--------+-------+-------------+-------+----------------------------------+ vagrant@bionic64:~/devstack$ openstack router show router1 +-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-11-08T21:55:30Z | | description | | | distributed | False | | external_gateway_info | {"network_id": "6ece2847-971b-487a-9c7b-184651ebbc82", "enable_snat": true, "external_fixed_ips": [{"subnet_id": "0d9c4261-4046-462f- | | | 9d92-64fb89bc3ae6", "ip_address": "172.24.4.7"}, {"subnet_id": "9e90b059-da97-45b8-8cb8-f9370217e181", "ip_address": "2001:db8::1"}]} | | flavor_id | None | | ha | False | | id | 82fa9a47-246e-4da8-a864-53ea8daaed42 | | name | router1 | | project_id | 35e3820f7490493ca9e3a5e685393298 | | revision_number | 8 | | routes | | | status | ACTIVE | | updated_at | 2016-11-08T21:55:51Z | +-------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ vagrant@bionic64:~/devstack$ openstack port list --router router1 +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+--------+ | 420abb60-2a5a-4e80-90a3-3ff47742dc53 | | fa:16:3e:2d:5c:4e | ip_address='172.24.4.7', subnet_id='0d9c4261-4046-462f-9d92-64fb89bc3ae6' | ACTIVE | | | | | ip_address='2001:db8::1', subnet_id='9e90b059-da97-45b8-8cb8-f9370217e181' | | | b42d789d-c9ed-48a1-8822-839c4599301e | | fa:16:3e:0a:ff:24 | ip_address='10.0.0.1', subnet_id='c5c9f5c2-145d-46d2-a513-cf675530eaed' | ACTIVE | | e3b7fede-277e-4c72-b66c-418a582b61ca | | fa:16:3e:13:dd:42 | ip_address='2001:db8:8000::1', subnet_id='6fa3bab9-103e-45d5-872c-91f21b52ceda' | ACTIVE | +--------------------------------------+------+-------------------+---------------------------------------------------------------------------------+--------+ See the `Networking Guide <../../admin/deploy-ovs-selfservice.html#create-initial-networks>`_ for more detail on the creation of networks, subnets, and routers. Neutron Routers are realized in OpenVSwitch ------------------------------------------- .. image:: images/under-the-hood-scenario-1-ovs-network.png "router1" in the Neutron logical network is realized through a port ("qr-0ba8700e-da") in OpenVSwitch - attached to "br-int":: vagrant@bionic64:~/devstack$ sudo ovs-vsctl show b9b27fc3-5057-47e7-ba64-0b6afe70a398 Bridge br-int Port "qr-0ba8700e-da" tag: 1 Interface "qr-0ba8700e-da" type: internal Port br-int Interface br-int type: internal Port int-br-ex Interface int-br-ex Port "tapbb60d1bb-0c" tag: 1 Interface "tapbb60d1bb-0c" type: internal Port "qvob2044570-ad" tag: 1 Interface "qvob2044570-ad" Port "int-br-eth1" Interface "int-br-eth1" Bridge "br-eth1" Port "phy-br-eth1" Interface "phy-br-eth1" Port "br-eth1" Interface "br-eth1" type: internal Bridge br-ex Port phy-br-ex Interface phy-br-ex Port "qg-0143bce1-08" Interface "qg-0143bce1-08" type: internal Port br-ex Interface br-ex type: internal ovs_version: "1.4.0+build0" vagrant@bionic64:~/devstack$ brctl show bridge name bridge id STP enabled interfaces br-eth1 0000.e2e7fc5ccb4d no br-ex 0000.82ee46beaf4d no phy-br-ex qg-39efb3f9-f0 qg-77e0666b-cd br-int 0000.5e46cb509849 no int-br-ex qr-54c9cd83-43 qvo199abeb2-63 qvo1abbbb60-b8 tap74b45335-cc qbr199abeb2-63 8000.ba06e5f8675c no qvb199abeb2-63 tap199abeb2-63 qbr1abbbb60-b8 8000.46a87ed4fb66 no qvb1abbbb60-b8 tap1abbbb60-b8 virbr0 8000.000000000000 yes Finding the router in ip/ipconfig --------------------------------- The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent defaults to using Linux network namespaces to provide isolated forwarding contexts. As a result, the IP addresses of routers will not be visible simply by running "ip addr list" or "ifconfig" on the node. Similarly, you will not be able to directly ping fixed IPs. To do either of these things, you must run the command within a particular router's network namespace. The namespace will have the name "qrouter-. .. image:: images/under-the-hood-scenario-1-ovs-netns.png For example:: vagrant@bionic64:~$ openstack router list +--------------------------------------+---------+-------------------------------------------------------------------------+ | ID | Name | Status | State | Distributed | HA | Project | +--------------------------------------+---------+-------------------------------------------------------------------------+ | ad948c6e-afb6-422a-9a7b-0fc44cbb3910 | router1 | Active | UP | True | False | 35e3820f7490493ca9e3a5e685393298 | +--------------------------------------+---------+-------------------------------------------------------------------------+ vagrant@bionic64:~/devstack$ sudo ip netns exec qrouter-ad948c6e-afb6-422a-9a7b-0fc44cbb3910 ip addr list 18: lo: mtu 16436 qdisc noqueue state UNKNOWN link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo inet6 ::1/128 scope host valid_lft forever preferred_lft forever 19: qr-54c9cd83-43: mtu 1500 qdisc noqueue state UNKNOWN link/ether fa:16:3e:dd:c1:8f brd ff:ff:ff:ff:ff:ff inet 10.0.0.1/24 brd 10.0.0.255 scope global qr-54c9cd83-43 inet6 fe80::f816:3eff:fedd:c18f/64 scope link valid_lft forever preferred_lft forever 20: qg-77e0666b-cd: mtu 1500 qdisc noqueue state UNKNOWN link/ether fa:16:3e:1f:d3:ec brd ff:ff:ff:ff:ff:ff inet 192.168.27.130/28 brd 192.168.27.143 scope global qg-77e0666b-cd inet6 fe80::f816:3eff:fe1f:d3ec/64 scope link valid_lft forever preferred_lft forever Provider Networking ------------------- Neutron can also be configured to create `provider networks <../../admin/archives/adv-features.html#provider-networks>`_. L3 agent extensions ------------------- See :doc:`l3_agent_extensions`. Further Reading --------------- * `Packet Pushers - Neutron Network Implementation on Linux `_ * `OpenStack Networking Guide <../../admin/index.html>`_ * `Neutron - Layer 3 API extension `_ * `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/linuxbridge_agent.rst0000644000175000017500000000365600000000000030017 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L2 Networking with Linux Bridge =============================== This Agent uses the `Linux Bridge `_ to provide L2 connectivity for VM instances running on the compute node to the public network. A graphical illustration of the deployment can be found in `Networking Guide <../../admin/deploy-lb-provider.html#architecture>`_. In most common deployments, there is a compute and a network node. On both the compute and the network node, the Linux Bridge Agent will manage virtual switches, connectivity among them, and interaction via virtual ports with other network components such as namespaces and underlying interfaces. Additionally, on the compute node, the Linux Bridge Agent will manage security groups. Three use cases and their packet flow are documented as follows: 1. `Linux Bridge: Provider networks <../../admin/deploy-lb-provider.html>`_ 2. `Linux Bridge: Self-service networks <../../admin/deploy-lb-selfservice.html>`_ 3. `Linux Bridge: High availability using VRRP <../../admin/deploy-lb-ha-vrrp.html>`_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/live_migration.rst0000644000175000017500000001765700000000000027343 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Live-migration ============== Let's consider a VM with one port migrating from host1 with nova-compute1, neutron-l2-agent1 and neutron-l3-agent1 to host2 with nova-compute2 and neutron-l2-agent2 and neutron-l3agent2. Since the VM that is about to migrate is hosted by nova-compute1, nova sends the live-migration order to nova-compute1 through RPC. Nova Live Migration consists of the following stages: * Pre-live-migration * Live-migration-operation * Post-live-migration Pre-live-migration actions -------------------------- Nova-compute1 will first ask nova-compute2 to perform pre-live-migration actions with a synchronous RPC call. Nova-compute2 will use neutron REST API to retrieve the list of VM's ports. Then, it calls its vif driver to create the VM's port (VIF) using plug_vifs(). In the case Open vSwitch Hybrid plug is used, Neutron-l2-agent2 will detect this new VIF, request the device details from the neutron server and configure it accordingly. However, port's status won't change, since this port is not bound to nova-compute2. Nova-compute1 calls setup_networks_on_hosts. This updates the Neutron ports binding:profile with the information of the target host. The port update RPC message sent out by Neutron server will be received by neutron-l3-agent2, which proactively sets up the DVR router. If pre-live-migration fails, nova rollbacks and port is removed from host2. If pre-live-migration succeeds, nova proceeds with live-migration-operation. Potential error cases related to networking ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Plugging the VIFs on host2 fails As Live migration operation was not yet started, the instance resides active on host1. .. _live_mig_operation: Live-migration-operation ------------------------ Once nova-compute2 has performed pre-live-migration actions, nova-compute1 can start the live-migration. This results in the creation of the VM and its corresponding tap interface on node 2. In the case Open vSwitch normal plug, linux bridge or MacVTap is being used, Neutron-l2-agent2 will detect this new tap device and configure it accordingly. However, port's status won't change, since this port is not bound to nova-compute2. As soon as the instance is active on host2, the original instance on host1 gets removed and with it the corresponding tap device. Assuming OVS-hybrid plug is NOT used, Neutron-l2-agent1 detects the removal and tells the neutron server to set the port's status to DOWN state with RPC messages. There is no rollback if failure happens in live-migration-operation stage. TBD: Error are handled by the post-live-migration stage. Potential error cases related to networking ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Some host devices that are specified in the instance definition are not present on the target host. Migration fails before it really started. This can happen with MacVTap agent. See bug https://bugs.launchpad.net/bugs/1550400 Post-live-migration actions --------------------------- Once live-migration succeeded, both nova-compute1 and nova-compute2 perform post-live-migration actions. Nova-compute1 which is aware of the success will send a RPC cast to nova-compute2 to tell it to perform post-live-migration actions. On host2, nova-compute2 sends a REST call "update_port(binding=host2, profile={})" to the neutron server to tell it to update the port's binding. This will clear the port binding information and move the port's status to DOWN. The ML2 plugin will then try to rebind the port according to its new host. This update_port REST call always triggers a port-update RPC fanout message to every neutron-l2-agent. Since neutron-l2-agent2 is now hosting the port, it will take this message into account and re-synchronize the port by asking the neutron server details about it through RPC messages. This will move the port from DOWN status to BUILD, and then back to ACTIVE. This update also removes the 'migrating_to' value from the portbindng dictionary. It's not clearing it totally, like indicated by {}, but just removing the 'migrating_to' key and value. On host1, nova-compute1 calls its vif driver to unplug the VM's port. Assuming, Open vSwitch Hybrid plug is used, Neutron-l2-agent1 detects the removal and tells the neutron server to set the port's status to DOWN state with RPC messages. For all other cases this happens as soon as the instance and its tap device got destroyed on host1, like described in :ref:`live_mig_operation`. If neutron didn't already processed the REST call "update_port(binding=host2)", the port status will effectively move to BUILD and then to DOWN. Otherwise, the port is bound to host2, and neutron won't change the port status since it's not bound the host that is sending RPC messages. There is no rollback if failure happens in post-live-migration stage. In the case of an error, the instance is set into "ERROR" state. Potential error cases related to networking ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Portbinding for host2 fails If this happens, the vif_type of the port is set to 'binding_failed'. When Nova tries to recreated the domain.xml on the migration target it will stumble over this invalid vif_type and fail. The instance is put into "ERROR" state. Post-Copy Migration ------------------- Usually, Live Migration is executed as pre-copy migration. The instance is active on host1 until nearly all memory has been copied to host2. If a certain threshold of copied memory is met, the instance on the source get's paused, the rest of the memory copied over and the instance started on the target. The challenge with this approach is, that migration might take a infinite amount of time, when the instance is heavily writing to memory. This issue gets solved with post-copy migration. At some point in time, the instance on host2 will be set to active, although still a huge amount of memory pages reside only on host1. The phase that starts now is called the post_copy phase. If the instance tries to access a memory page that has not yet been transferred, libvirt/qemu takes care of moving this page to the target immediately. New pages will only be written to the source. With this approach the migration operation takes a finite amount of time. Today, the rebinding of the port from host1 to host2 happens in the post_live_migration phase, after migration finished. This is fine for the pre-copy case, as the time windows between the activation of the instance on the target and the binding of the port to the target is pretty small. This becomes more problematic for the post-copy migration case. The instance becomes active on the target pretty early but the portbinding still happens after migration finished. During this time window, the instance might not be reachable via the network. This should be solved with bug https://bugs.launchpad.net/nova/+bug/1605016 Flow Diagram ------------ OVS Normal plug, Linux bridge, MacVTap, SR-IOV ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. image:: images/live-mig.png OVS-Hybrid plug ~~~~~~~~~~~~~~~ The sequence with RPC messages from neutron-l2-agent processed first is described in the following UML sequence diagram .. image:: images/live-mig-ovs-hybrid.png ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ml2_ext_manager.rst0000644000175000017500000000267500000000000027371 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ML2 Extension Manager ===================== The extension manager for ML2 was introduced in Juno (more details can be found in the approved `spec `_). The features allows for extending ML2 resources without actually having to introduce cross cutting concerns to ML2. The mechanism has been applied for a number of use cases, and extensions that currently use this frameworks are available under `ml2/extensions `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/network_ip_availability.rst0000644000175000017500000001356200000000000031235 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Network IP Availability Extension ================================= This extension is an information-only API that allows a user or process to determine the amount of IPs that are consumed across networks and their subnets' allocation pools. Each network and embedded subnet returns with values for **used_ips** and **total_ips** making it easy to determine how much of your network's IP space is consumed. This API provides the ability for network administrators to periodically list usage (manual or automated) in order to preemptively add new network capacity when thresholds are exceeded. **Important Note:** This API tracks a network's "consumable" IPs. What's the distinction? After a network and its subnets are created, consumable IPs are: * Consumed in the subnet's allocations (derives used IPs) * Consumed from the subnet's allocation pools (derives total IPs) This API tracks consumable IPs so network administrators know when their subnet's IP pools (and ultimately a network's) IPs are about to run out. This API does not account reserved IPs such as a subnet's gateway IP or other reserved or unused IPs of a subnet's cidr that are consumed as a result of the subnet creation itself. API Specification ----------------- Availability for all networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GET /v2.0/network-ip-availabilities :: Request to url: v2.0/network-ip-availabilities headers: {'content-type': 'application/json', 'X-Auth-Token': 'SOME_AUTH_TOKEN'} Example response :: Response: HTTP/1.1 200 OK Content-Type: application/json; charset=UTF-8 .. code:: { "network_ip_availabilities": [ { "network_id": "f944c153-3f46-417b-a3c2-487cd9a456b9", "network_name": "net1", "subnet_ip_availability": [ { "cidr": "10.0.0.0/24", "ip_version": 4, "subnet_id": "46b1406a-8373-454c-8eb8-500a09eb77fb", "subnet_name": "", "total_ips": 253, "used_ips": 3 } ], "tenant_id": "test-project", "total_ips": 253, "used_ips": 3 }, { "network_id": "47035bae-4f29-4fef-be2e-2941b72528a8", "network_name": "net2", "subnet_ip_availability": [], "tenant_id": "test-project", "total_ips": 0, "used_ips": 0 }, { "network_id": "2e3ea0cd-c757-44bf-bb30-42d038687e3f", "network_name": "net3", "subnet_ip_availability": [ { "cidr": "40.0.0.0/24", "ip_version": 4, "subnet_id": "aab6b35c-16b5-489c-a5c7-fec778273495", "subnet_name": "", "total_ips": 253, "used_ips": 2 } ], "tenant_id": "test-project", "total_ips": 253, "used_ips": 2 } ] } Availability by network ID ~~~~~~~~~~~~~~~~~~~~~~~~~~ GET /v2.0/network-ip-availabilities/{network\_uuid} :: Request to url: /v2.0/network-ip-availabilities/aba3b29b-c119-4b45-afbd-88e500acd970 headers: {'content-type': 'application/json', 'X-Auth-Token': 'SOME_AUTH_TOKEN'} Example response :: Response: HTTP/1.1 200 OK Content-Type: application/json; charset=UTF-8 .. code:: { "network_ip_availability": { "network_id": "f944c153-3f46-417b-a3c2-487cd9a456b9", "network_name": "net1", "subnet_ip_availability": [ { "cidr": "10.0.0.0/24", "ip_version": 4, "subnet_name": "", "subnet_id": "46b1406a-8373-454c-8eb8-500a09eb77fb", "total_ips": 253, "used_ips": 3 } ], "tenant_id": "test-project", "total_ips": 253, "used_ips": 3 } } Supported Query Filters ~~~~~~~~~~~~~~~~~~~~~~~ This API currently supports the following query parameters: * **network_id**: Returns availability for the network matching the network ID. Note: This query (?network_id={network_id_guid})is roughly equivalent to *Availability by network ID* section except it returns the plural response form as a list rather than as an item. * **network_name**: Returns availability for network matching the provided name * **tenant_id**: Returns availability for all networks owned by the provided project ID. * **ip_version**: Filters network subnets by those supporting the supplied ip version. Values can be either 4 or 6. Query filters can be combined to further narrow results and what is returned will match all criteria. When a parameter is specified more than once, it will return results that match both. Examples: :: # Fetch IPv4 availability for a specific project uuid GET /v2.0/network-ip-availabilities?ip_version=4&tenant_id=example-project-uuid # Fetch multiple networks by their ids GET /v2.0/network-ip-availabilities?network_id=uuid_sample_1&network_id=uuid_sample_2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/objects_usage.rst0000644000175000017500000007756000000000000027147 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Objects in neutron ================== Object versioning is a key concept in achieving rolling upgrades. Since its initial implementation by the nova community, a versioned object model has been pushed to an oslo library so that its benefits can be shared across projects. `Oslo VersionedObjects`_ (aka OVO) is a database facade, where you define the middle layer between software and the database schema. In this layer, a versioned object per database resource is created with a strict data definition and version number. With OVO, when you change the database schema, the version of the object also changes and a backward compatible translation is provided. This allows different versions of software to communicate with one another (via RPC). OVO is also commonly used for RPC payload versioning. OVO creates versioned dictionary messages by defining a strict structure and keeping strong typing. Because of it, you can be sure of what is sent and how to use the data on the receiving end. .. _Oslo VersionedObjects: https://docs.openstack.org/oslo.versionedobjects/latest/ Usage of objects ---------------- CRUD operations ~~~~~~~~~~~~~~~ Objects support CRUD operations: :code:`create()`, :code:`get_object()` and :code:`get_objects()` (equivalent of :code:`read`), :code:`update()`, :code:`delete()`, :code:`update_objects()`, and :code:`delete_objects()`. The nature of OVO is, when any change is applied, OVO tracks it. After calling :code:`create()` or :code:`update()`, OVO detects this and changed fields are saved in the database. Please take a look at simple object usage scenarios using example of DNSNameServer: .. code-block:: Python # to create an object, you can pass the attributes in constructor: dns = DNSNameServer(context, address='asd', subnet_id='xxx', order=1) dns.create() # or you can create a dict and pass it as kwargs: dns_data = {'address': 'asd', 'subnet_id': 'xxx', 'order': 1} dns = DNSNameServer(context, **dns_data) dns.create() # for fetching multiple objects: dnses = DNSNameServer.get_objects(context) # will return list of all dns name servers from DB # for fetching objects with substrings in a string field: from neutron_lib.objects import utils as obj_utils dnses = DNSNameServer.get_objects(context, address=obj_utils.StringContains('10.0.0')) # will return list of all dns name servers from DB that has '10.0.0' in their addresses # to update fields: dns = DNSNameServer.get_object(context, address='asd', subnet_id='xxx') dns.order = 2 dns.update() # if you don't care about keeping the object, you can execute the update # without fetch of the object state from the underlying persistent layer count = DNSNameServer.update_objects( context, {'order': 3}, address='asd', subnet_id='xxx') # to remove object with filter arguments: filters = {'address': 'asd', 'subnet_id': 'xxx'} DNSNameServer.delete_objects(context, **filters) Filter, sort and paginate ~~~~~~~~~~~~~~~~~~~~~~~~~ The :code:`NeutronDbObject` class has strict validation on which field sorting and filtering can happen. When calling :code:`get_objects()`, :code:`count()`, :code:`update_objects()`, :code:`delete_objects()` and :code:`objects_exist()`, :code:`validate_filters()` is invoked, to see if it's a supported filter criterion (which is by default non-synthetic fields only). Additional filters can be defined using :code:`register_filter_hook_on_model()`. This will add the requested string to valid filter names in object implementation. It is optional. In order to disable filter validation, :code:`validate_filters=False` needs to be passed as an argument in aforementioned methods. It was added because the default behaviour of the neutron API is to accept everything at API level and filter it out at DB layer. This can be used by out of tree extensions. :code:`register_filter_hook_on_model()` is a complementary implementation in the :code:`NeutronDbObject` layer to DB layer's :code:`neutron_lib.db.model_query.register_hook()`, which adds support for extra filtering during construction of SQL query. When extension defines extra query hook, it needs to be registered using the objects :code:`register_filter_hook_on_model()`, if it is not already included in the objects :code:`fields`. To limit or paginate results, :code:`Pager` object can be used. It accepts :code:`sorts` (list of :code:`(key, direction)` tuples), :code:`limit`, :code:`page_reverse` and :code:`marker` keywords. .. code-block:: Python # filtering # to get an object based on primary key filter dns = DNSNameServer.get_object(context, address='asd', subnet_id='xxx') # to get multiple objects dnses = DNSNameServer.get_objects(context, subnet_id='xxx') filters = {'subnet_id': ['xxx', 'yyy']} dnses = DNSNameServer.get_objects(context, **filters) # do not validate filters dnses = DNSNameServer.get_objects(context, validate_filters=False, fake_filter='xxx') # count the dns servers for given subnet dns_count = DNSNameServer.count(context, subnet_id='xxx') # sorting # direction True == ASC, False == DESC direction = False pager = Pager(sorts=[('order', direction)]) dnses = DNSNameServer.get_objects(context, _pager=pager, subnet_id='xxx') Defining your own object ------------------------ In order to add a new object in neutron, you have to: #. Create an object derived from :code:`NeutronDbObject` (aka base object) #. Add/reuse data model #. Define fields It is mandatory to define data model using :code:`db_model` attribute from :code:`NeutronDbObject`. Fields should be defined using :code:`oslo_versionobjects.fields` exposed types. If there is a special need to create a new type of field, you can use :code:`common_types.py` in the :code:`neutron.objects` directory. Example:: fields = { 'id': common_types.UUIDField(), 'name': obj_fields.StringField(), 'subnetpool_id': common_types.UUIDField(nullable=True), 'ip_version': common_types.IPVersionEnumField() } :code:`VERSION` is mandatory and defines the version of the object. Initially, set the :code:`VERSION` field to 1.0. Change :code:`VERSION` if fields or their types are modified. When you change the version of objects being exposed via RPC, add method :code:`obj_make_compatible(self, primitive, target_version)`. For example, if a new version introduces a new parameter, it needs to be removed for previous versions:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): # version 1.1 introduces "new_parameter" primitive.pop('new_parameter', None) In the following example the object has changed an attribute definition. For example, in version 1.1 :code:`description` is allowed to be :code:`None` but not in version 1.0:: from oslo_utils import versionutils from oslo_versionedobjects import exception def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): # version 1.1 changes "description" if primitive['description'] is None: # "description" was not nullable before raise exception.IncompatibleObjectVersion( objver=target_version, objname='OVOName') Using the first example as reference, this is how the unit test can be implemented:: def test_object_version_degradation_1_1_to_1_0(self): OVO_obj_1_1 = self._method_to_create_this_OVO() OVO_obj_1_0 = OVO_obj_1_1.obj_to_primitive(target_version='1.0') self.assertNotIn('new_parameter', OVO_obj_1_0['versioned_object.data']) .. note:: Standard Attributes are automatically added to OVO fields in base class. Attributes [#]_ like :code:`description`, :code:`created_at`, :code:`updated_at` and :code:`revision_number` are added in [#]_. :code:`primary_keys` is used to define the list of fields that uniquely identify the object. In case of database backed objects, it's usually mapped onto SQL primary keys. For immutable object fields that cannot be changed, there is a :code:`fields_no_update` list, that contains :code:`primary_keys` by default. If there is a situation where a field needs to be named differently in an object than in the database schema, you can use :code:`fields_need_translation`. This dictionary contains the name of the field in the object definition (the key) and the name of the field in the database (the value). This allows to have a different object layer representation for database persisted data. For example in IP allocation pools:: fields_need_translation = { 'start': 'first_ip', # field_ovo: field_db 'end': 'last_ip' } The above dictionary is used in :code:`modify_fields_from_db()` and in :code:`modify_fields_to_db()` methods which are implemented in base class and will translate the software layer to database schema naming, and vice versa. It can also be used to rename :code:`orm.relationship` backed object-type fields. Most object fields are usually directly mapped to database model attributes. Sometimes it's useful to expose attributes that are not defined in the model table itself, like relationships and such. In this case, :code:`synthetic_fields` may become handy. This object property can define a list of object fields that don't belong to the object database model and that are hence instead to be implemented in some custom way. Some of those fields map to :code:`orm.relationships` defined on models, while others are completely untangled from the database layer. When exposing existing :code:`orm.relationships` as an ObjectField-typed field, you can use the :code:`foreign_keys` object property that defines a link between two object types. When used, it allows objects framework to automatically instantiate child objects, and fill the relevant parent fields, based on :code:`orm.relationships` defined on parent models. In order to automatically populate the :code:`synthetic_fields`, the :code:`foreign_keys` property is introduced. :code:`load_synthetic_db_fields()` [#]_ method from NeutronDbObject uses :code:`foreign_keys` to match the foreign key in related object and local field that the foreign key is referring to. See simplified examples: .. code-block:: Python class DNSNameServerSqlModel(model_base.BASEV2): address = sa.Column(sa.String(128), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) class SubnetSqlModel(model_base.BASEV2, HasId, HasProject): name = sa.Column(sa.String(attr.NAME_MAX_LEN)) allocation_pools = orm.relationship(IPAllocationPoolSqlModel) dns_nameservers = orm.relationship(DNSNameServerSqlModel, backref='subnet', cascade='all, delete, delete-orphan', lazy='subquery') class IPAllocationPoolSqlModel(model_base.BASEV2, HasId): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id')) @obj_base.VersionedObjectRegistry.register class DNSNameServerOVO(base.NeutronDbObject): VERSION = '1.0' db_model = DNSNameServerSqlModel # Created based on primary_key=True in model definition. # The object is uniquely identified by the pair of address and # subnet_id fields. Override the default 'id' 1-tuple. primary_keys = ['address', 'subnet_id'] # Allow to link DNSNameServerOVO child objects into SubnetOVO parent # object fields via subnet_id child database model attribute. # Used during loading synthetic fields in SubnetOVO get_objects. foreign_keys = {'SubnetOVO': {'subnet_id': 'id'}} fields = { 'address': obj_fields.StringField(), 'subnet_id': common_types.UUIDField(), } @obj_base.VersionedObjectRegistry.register class SubnetOVO(base.NeutronDbObject): VERSION = '1.0' db_model = SubnetSqlModel fields = { 'id': common_types.UUIDField(), # HasId from model class 'project_id': obj_fields.StringField(nullable=True), # HasProject from model class 'subnet_name': obj_fields.StringField(nullable=True), 'dns_nameservers': obj_fields.ListOfObjectsField('DNSNameServer', nullable=True), 'allocation_pools': obj_fields.ListOfObjectsField('IPAllocationPoolOVO', nullable=True) } # Claim dns_nameservers field as not directly mapped into the object # database model table. synthetic_fields = ['allocation_pools', 'dns_nameservers'] # Rename in-database subnet_name attribute into name object field fields_need_translation = { 'name': 'subnet_name' } @obj_base.VersionedObjectRegistry.register class IPAllocationPoolOVO(base.NeutronDbObject): VERSION = '1.0' db_model = IPAllocationPoolSqlModel fields = { 'subnet_id': common_types.UUIDField() } foreign_keys = {'SubnetOVO': {'subnet_id': 'id'}} The :code:`foreign_keys` is used in :code:`SubnetOVO` to populate the :code:`allocation_pools` [#]_ synthetic field using the :code:`IPAllocationPoolOVO` class. Single object type may be linked to multiple parent object types, hence :code:`foreign_keys` property may have multiple keys in the dictionary. .. note:: :code:`foreign_keys` is declared in related object :code:`IPAllocationPoolOVO`, the same way as it's done in the SQL model :code:`IPAllocationPoolSqlModel`: :code:`sa.ForeignKey('subnets.id')` .. note:: Only single foreign key is allowed (usually parent ID), you cannot link through multiple model attributes. It is important to remember about the nullable parameter. In the SQLAlchemy model, the nullable parameter is by default :code:`True`, while for OVO fields, the nullable is set to :code:`False`. Make sure you correctly map database column nullability properties to relevant object fields. Database session activation --------------------------- By default, all objects use old ``oslo.db`` engine facade. To enable the new facade for a particular object, set ``new_facade`` class attribute to ``True``: .. code-block:: Python @obj_base.VersionedObjectRegistry.register class ExampleObject(base.NeutronDbObject): new_facade = True It will make all OVO actions - ``get_object``, ``update``, ``count`` etc. - to use new ``reader.using`` or ``writer.using`` decorators to manage database transactions. Whenever you need to open a new subtransaction in scope of OVO code, use the following database session decorators: .. code-block:: Python @obj_base.VersionedObjectRegistry.register class ExampleObject(base.NeutronDbObject): @classmethod def get_object(cls, context, **kwargs): with cls.db_context_reader(context): super(ExampleObject, cls).get_object(context, **kwargs) # fetch more data in the same transaction def create(self): with self.db_context_writer(self.obj_context): super(ExampleObject, self).create() # apply more changes in the same transaction ``db_context_reader`` and ``db_context_writer`` decorators abstract the choice of engine facade used for particular object from action implementation. Alternatively, you can call all OVO actions under an active ``reader.using`` / ``writer.using`` context manager (or ``session.begin``). In this case, OVO will pick the appropriate method to open a subtransaction. Synthetic fields ---------------- :code:`synthetic_fields` is a list of fields, that are not directly backed by corresponding object SQL table attributes. Synthetic fields are not limited in types that can be used to implement them. .. code-block:: Python fields = { 'dhcp_agents': obj_fields.ObjectField('NetworkDhcpAgentBinding', nullable=True), # field that contains another single NeutronDbObject of NetworkDhcpAgentBinding type 'shared': obj_fields.BooleanField(default=False), 'subnets': obj_fields.ListOfObjectsField('Subnet', nullable=True) } # All three fields do not belong to corresponding SQL table, and will be # implemented in some object-specific way. synthetic_fields = ['dhcp_agents', 'shared', 'subnets'] :code:`ObjectField` and :code:`ListOfObjectsField` take the name of object class as an argument. Implementing custom synthetic fields ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes you may want to expose a field on an object that is not mapped into a corresponding database model attribute, or its :code:`orm.relationship`; or may want to expose a :code:`orm.relationship` data in a format that is not directly mapped onto a child object type. In this case, here is what you need to do to implement custom getters and setters for the custom field. The custom method to load the synthetic fields can be helpful if the field is not directly defined in the database, OVO class is not suitable to load the data or the related object contains only the ID and property of the parent object, for example :code:`subnet_id` and property of it: :code:`is_external`. In order to implement the custom method to load the synthetic field, you need to provide loading method in the OVO class and override the base class method :code:`from_db_object()` and :code:`obj_load_attr()`. The first one is responsible for loading the fields to object attributes when calling :code:`get_object()` and :code:`get_objects()`, :code:`create()` and :code:`update()`. The second is responsible for loading attribute when it is not set in object. Also, when you need to create related object with attributes passed in constructor, :code:`create()` and :code:`update()` methods need to be overwritten. Additionally :code:`is_external` attribute can be exposed as a boolean, instead of as an object-typed field. When field is changed, but it doesn't need to be saved into database, :code:`obj_reset_changes()` can be called, to tell OVO library to ignore that. Let's see an example: .. code-block:: Python @obj_base.VersionedObjectRegistry.register class ExternalSubnet(base.NeutronDbObject): VERSION = '1.0' fields = {'subnet_id': common_types.UUIDField(), 'is_external': obj_fields.BooleanField()} primary_keys = ['subnet_id'] foreign_keys = {'Subnet': {'subnet_id': 'id'}} @obj_base.VersionedObjectRegistry.register class Subnet(base.NeutronDbObject): VERSION = '1.0' fields = {'external': obj_fields.BooleanField(nullable=True),} synthetic_fields = ['external'] # support new custom 'external=' filter for get_objects family of # objects API def __init__(self, context=None, **kwargs): super(Subnet, self).__init__(context, **kwargs) self.add_extra_filter_name('external') def create(self): fields = self.get_changes() with db_api.context_manager.writer.using(context): if 'external' in fields: ExternalSubnet(context, subnet_id=self.id, is_external=fields['external']).create() # Call to super() to create the SQL record for the object, and # reload its fields from the database, if needed. super(Subnet, self).create() def update(self): fields = self.get_changes() with db_api.context_manager.writer.using(context): if 'external' in fields: # delete the old ExternalSubnet record, if present obj_db_api.delete_objects( self.obj_context, ExternalSubnet.db_model, subnet_id=self.id) # create the new intended ExternalSubnet object ExternalSubnet(context, subnet_id=self.id, is_external=fields['external']).create() # calling super().update() will reload the synthetic fields # and also will update any changed non-synthetic fields, if any super(Subnet, self).update() # this method is called when user of an object accesses the attribute # and requested attribute is not set. def obj_load_attr(self, attrname): if attrname == 'external': return self._load_external() # it is important to call super if attrname does not match # because the base implementation is handling the nullable case super(Subnet, self).obj_load_attr(attrname) def _load_external(self, db_obj=None): # do the loading here if db_obj: # use DB model to fetch the data that may be side-loaded external = db_obj.external.is_external if db_obj.external else None else: # perform extra operation to fetch the data from DB external_obj = ExternalSubnet.get_object(context, subnet_id=self.id) external = external_obj.is_external if external_obj else None # it is important to set the attribute and call obj_reset_changes setattr(self, 'external', external) self.obj_reset_changes(['external']) # this is defined in NeutronDbObject and is invoked during get_object(s) # and create/update. def from_db_object(self, obj): super(Subnet, self).from_db_object(obj) self._load_external(obj) In the above example, the :code:`get_object(s)` methods do not have to be overwritten, because :code:`from_db_object()` takes care of loading the synthetic fields in custom way. Standard attributes ------------------- The standard attributes are added automatically in metaclass :code:`DeclarativeObject`. If adding standard attribute, it has to be added in ``neutron/objects/extensions/standardattributes.py``. It will be added to all relevant objects that use the :code:`standardattributes` model. Be careful when adding something to the above, because it could trigger a change in the object's :code:`VERSION`. For more on how standard attributes work, check [#]_. RBAC handling in objects ------------------------ The RBAC is implemented currently for resources like: Subnet(*), Network and QosPolicy. Subnet is a special case, because access control of Subnet depends on Network RBAC entries. The RBAC support for objects is defined in ``neutron/objects/rbac_db.py``. It defines new base class :code:`NeutronRbacObject`. The new class wraps standard :code:`NeutronDbObject` methods like :code:`create()`, :code:`update()` and :code:`to_dict()`. It checks if the :code:`shared` attribute is defined in the :code:`fields` dictionary and adds it to :code:`synthetic_fields`. Also, :code:`rbac_db_model` is required to be defined in Network and QosPolicy classes. :code:`NeutronRbacObject` is a common place to handle all operations on the RBAC entries, like getting the info if resource is shared or not, creation and updates of them. By wrapping the :code:`NeutronDbObject` methods, it is manipulating the 'shared' attribute while :code:`create()` and :code:`update()` methods are called. The example of defining the Network OVO: .. code-block:: Python class Network(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron network.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref='network', lazy='joined', cascade='all, delete, delete-orphan') # Note the base class for Network OVO: @obj_base.VersionedObjectRegistry.register class Network(rbac_db.NeutronRbacObject): # Version 1.0: Initial version VERSION = '1.0' # rbac_db_model is required to be added here rbac_db_model = rbac_db_models.NetworkRBAC db_model = models_v2.Network fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), # share is required to be added to fields 'shared': obj_fields.BooleanField(default=False), } .. note:: The :code:`shared` field is not added to the :code:`synthetic_fields`, because :code:`NeutronRbacObject` requires to add it by itself, otherwise :code:`ObjectActionError` is raised. [#]_ Extensions to neutron resources ------------------------------- One of the methods to extend neutron resources is to add an arbitrary value to dictionary representing the data by providing :code:`extend_(subnet|port|network)_dict()` function and defining loading method. From DB perspective, all the data will be loaded, including all declared fields from DB relationships. Current implementation for core resources (Port, Subnet, Network etc.) is that DB result is parsed by :code:`make__dict()` and :code:`extend__dict()`. When extension is enabled, :code:`extend__dict()` takes the DB results and declares new fields in resulting dict. When extension is not enabled, data will be fetched, but will not be populated into resulting dict, because :code:`extend__dict()` will not be called. Plugins can still use objects for some work, but then convert them to dicts and work as they please, extending the dict as they wish. For example: .. code-block:: Python class TestSubnetExtension(model_base.BASEV2): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) subnet = orm.relationship( models_v2.Subnet, # here is the definition of loading the extension with Subnet model: backref=orm.backref('extension', cascade='delete', uselist=False)) @oslo_obj_base.VersionedObjectRegistry.register_if(False) class TestSubnetExtensionObject(obj_base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = TestSubnetExtension fields = { 'subnet_id': common_types.UUIDField(), 'value': obj_fields.StringField(nullable=True) } primary_keys = ['subnet_id'] foreign_keys = {'Subnet': {'subnet_id': 'id'}} @obj_base.VersionedObjectRegistry.register class Subnet(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': common_types.UUIDField(), 'extension': obj_fields.ObjectField(TestSubnetExtensionObject.__name__, nullable=True), } synthetic_fields = ['extension'] # when defining the extend_subnet_dict function: def extend_subnet_dict(self, session, subnet_ovo, result): value = subnet_ovo.extension.value if subnet_ovo.extension else '' result['subnet_extension'] = value The above example is the ideal situation, where all extensions have objects adopted and enabled in core neutron resources. By introducing the OVO work in tree, interface between base plugin code and registered extension functions hasn't been changed. Those still receive a SQLAlchemy model, not an object. This is achieved by capturing the corresponding database model on :code:`get_***/create/update`, and exposing it via :code:`.db_obj` Removal of downgrade checks over time ------------------------------------- While the code to check object versions is meant to remain for a long period of time, in the interest of not accruing too much cruft over time, they are not intended to be permanent. OVO downgrade code should account for code that is within the upgrade window of any major OpenStack distribution. The longest currently known is for Ubuntu Cloud Archive which is to upgrade four versions, meaning during the upgrade the control nodes would be running a release that is four releases newer than what is running on the computes. Known fast forward upgrade windows are: * Red Hat OpenStack Platform (RHOSP): X -> X+3 [#]_ * SuSE OpenStack Cloud (SOC): X -> X+2 [#]_ * Ubuntu Cloud Archive: X -> X+4 [#]_ Therefore removal of OVO version downgrade code should be removed in the fifth cycle after the code was introduced. For example, if an object version was introduced in Ocata then it can be removed in Train. Backward compatibility for tenant_id ------------------------------------ All objects can support :code:`tenant_id` and :code:`project_id` filters and fields at the same time; it is automatically enabled for all objects that have a :code:`project_id` field. The base :code:`NeutronDbObject` class has support for exposing :code:`tenant_id` in dictionary access to the object fields (:code:`subnet['tenant_id']`) and in :code:`to_dict()` method. There is a :code:`tenant_id` read-only property for every object that has :code:`project_id` in :code:`fields`. It is not exposed in :code:`obj_to_primitive()` method, so it means that :code:`tenant_id` will not be sent over RPC callback wire. When talking about filtering/sorting by :code:`tenant_id`, the filters should be converted to expose :code:`project_id` field. This means that for the long run, the API layer should translate it, but as temporary workaround it can be done at DB layer before passing filters to objects :code:`get_objects()` method, for example: .. code-block:: Python def convert_filters(result): if 'tenant_id' in result: result['project_id'] = result.pop('tenant_id') return result def get_subnets(context, filters): filters = convert_filters(**filters) return subnet_obj.Subnet.get_objects(context, **filters) The :code:`convert_filters` method is available in ``neutron_lib.objects.utils`` [#]_. References ---------- .. [#] https://opendev.org/openstack/neutron/tree/neutron/objects/base.py?h=stable/ocata#n258 .. [#] https://opendev.org/openstack/neutron/tree/neutron/db/standard_attr.py?h=stable/ocata .. [#] https://opendev.org/openstack/neutron/tree/neutron/objects/base.py?h=stable/ocata#n516 .. [#] https://opendev.org/openstack/neutron/tree/neutron/objects/base.py?h=stable/ocata#n542 .. [#] https://docs.openstack.org/neutron/latest/contributor/internals/db_layer.html#the-standard-attribute-table .. [#] https://opendev.org/openstack/neutron/tree/neutron/objects/rbac_db.py?h=stable/ocata#n291 .. [#] https://access.redhat.com/support/policy/updates/openstack/platform/ .. [#] https://www.suse.com/releasenotes/x86_64/SUSE-OPENSTACK-CLOUD/8/#Upgrade .. [#] https://www.ubuntu.com/about/release-cycle .. [#] https://opendev.org/openstack/neutron-lib/tree/neutron_lib/objects/utils.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/openvswitch_agent.rst0000644000175000017500000006750200000000000030054 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Open vSwitch L2 Agent ===================== This Agent uses the `Open vSwitch`_ virtual switch to create L2 connectivity for instances, along with bridges created in conjunction with OpenStack Nova for filtering. ovs-neutron-agent can be configured to use different networking technologies to create project isolation. These technologies are implemented as ML2 type drivers which are used in conjunction with the Open vSwitch mechanism driver. VLAN Tags --------- .. image:: images/under-the-hood-scenario-1-ovs-compute.png .. _Open vSwitch: http://openvswitch.org GRE Tunnels ----------- GRE Tunneling is documented in depth in the `Networking in too much detail `_ by RedHat. VXLAN Tunnels ------------- VXLAN is an overlay technology which encapsulates MAC frames at layer 2 into a UDP header. More information can be found in `The VXLAN wiki page. `_ Geneve Tunnels -------------- Geneve uses UDP as its transport protocol and is dynamic in size using extensible option headers. It is important to note that currently it is only supported in newer kernels. (kernel >= 3.18, OVS version >=2.4) More information can be found in the `Geneve RFC document. `_ Bridge Management ----------------- In order to make the agent capable of handling more than one tunneling technology, to decouple the requirements of segmentation technology from project isolation, and to preserve backward compatibility for OVS agents working without tunneling, the agent relies on a tunneling bridge, or br-tun, and the well known integration bridge, or br-int. All VM VIFs are plugged into the integration bridge. VM VIFs on a given virtual network share a common "local" VLAN (i.e. not propagated externally). The VLAN id of this local VLAN is mapped to the physical networking details realizing that virtual network. For virtual networks realized as VXLAN/GRE tunnels, a Logical Switch (LS) identifier is used to differentiate project traffic on inter-HV tunnels. A mesh of tunnels is created to other Hypervisors in the cloud. These tunnels originate and terminate on the tunneling bridge of each hypervisor, leaving br-int unaffected. Port patching is done to connect local VLANs on the integration bridge to inter-hypervisor tunnels on the tunnel bridge. For each virtual network realized as a VLAN or flat network, a veth or a pair of patch ports is used to connect the local VLAN on the integration bridge with the physical network bridge, with flow rules adding, modifying, or stripping VLAN tags as necessary, thus preserving backward compatibility with the way the OVS agent used to work prior to the tunneling capability (for more details, please look at https://review.opendev.org/#/c/4367). Bear in mind, that this design decision may be overhauled in the future to support existing VLAN-tagged traffic (coming from NFV VMs for instance) and/or to deal with potential QinQ support natively available in the Open vSwitch. Tackling the Network Trunking use case -------------------------------------- Rationale ~~~~~~~~~ At the time the first design for the OVS agent came up, trunking in OpenStack was merely a pipe dream. Since then, lots has happened in the OpenStack platform, and many deployments have gone into production since early 2012. In order to address the `vlan-aware-vms `_ use case on top of Open vSwitch, the following aspects must be taken into account: * Design complexity: starting afresh is always an option, but a complete rearchitecture is only desirable under some circumstances. After all, customers want solutions...yesterday. It is noteworthy that the OVS agent design is already relatively complex, as it accommodates a number of deployment options, especially in relation to security rules and/or acceleration. * Upgrade complexity: being able to retrofit the existing design means that an existing deployment does not need to go through a forklift upgrade in order to expose new functionality; alternatively, the desire of avoiding a migration requires a more complex solution that is able to support multiple modes of operations; * Design reusability: ideally, a proposed design can easily apply to the various technology backends that the Neutron L2 agent supports: Open vSwitch and Linux Bridge. * Performance penalty: no solution is appealing enough if it is unable to satisfy the stringent requirement of high packet throughput, at least in the long term. * Feature compatibility: VLAN `transparency `_ is for better or for worse intertwined with vlan awareness. The former is about making the platform not interfere with the tag associated to the packets sent by the VM, and let the underlay figure out where the packet needs to be sent out; the latter is about making the platform use the vlan tag associated to packet to determine where the packet needs to go. Ideally, a design choice to satisfy the awareness use case will not have a negative impact for solving the transparency use case. Having said that, the two features are still meant to be mutually exclusive in their application, and plugging subports into networks whose vlan-transparency flag is set to True might have unexpected results. In fact, it would be impossible from the platform's point of view discerning which tagged packets are meant to be treated 'transparently' and which ones are meant to be used for demultiplexing (in order to reach the right destination). The outcome might only be predictable if two layers of vlan tags are stacked up together, making guest support even more crucial for the combined use case. It is clear by now that an acceptable solution must be assessed with these issues in mind. The potential solutions worth enumerating are: * VLAN interfaces: in layman's terms, these interfaces allow to demux the traffic before it hits the integration bridge where the traffic will get isolated and sent off to the right destination. This solution is `proven `_ to work for both iptables-based and native ovs security rules (credit to Rawlin Peters). This solution has the following design implications: * Design complexity: this requires relative small changes to the existing OVS design, and it can work with both iptables and native ovs security rules. * Upgrade complexity: in order to employ this solution no major upgrade is necessary and thus no potential dataplane disruption is involved. * Design reusability: VLAN interfaces can easily be employed for both Open vSwitch and Linux Bridge. * Performance penalty: using VLAN interfaces means that the kernel must be involved. For Open vSwitch, being able to use a fast path like DPDK would be an unresolved issue (`Kernel NIC interfaces `_ are not on the roadmap for distros and OVS, and most likely will never be). Even in the absence of an extra bridge, i.e. when using native ovs firewall, and with the advent of userspace connection tracking that would allow the `stateful firewall driver `_ to work with DPDK, the performance gap between a pure userspace DPDK capable solution and a kernel based solution will be substantial, at least under certain traffic conditions. * Feature compatibility: in order to keep the design simple once VLAN interfaces are adopted, and yet enable VLAN transparency, Open vSwitch needs to support QinQ, which is currently lacking as of 2.5 and with no ongoing plan for integration. * Going full openflow: in layman's terms, this means programming the dataplane using OpenFlow in order to provide tenant isolation, and packet processing. This solution has the following design implications: * Design complexity: this requires a big rearchitecture of the current Neutron L2 agent solution. * Upgrade complexity: existing deployments will be unable to work correctly unless one of the actions take place: a) the agent can handle both the 'old' and 'new' way of wiring the data path; b) a dataplane migration is forced during a release upgrade and thus it may cause (potentially unrecoverable) dataplane disruption. * Design reusability: a solution for Linux Bridge will still be required to avoid widening the gap between Open vSwitch (e.g. OVS has DVR but LB does not). * Performance penalty: using Open Flow will allow to leverage the user space and fast processing given by DPDK, but at a considerable engineering cost nonetheless. Security rules will have to be provided by a `learn based firewall `_ to fully exploit the capabilities of DPDK, at least until `user space `_ connection tracking becomes available in OVS. * Feature compatibility: with the adoption of Open Flow, tenant isolation will no longer be provided by means of local vlan provisioning, thus making the requirement of QinQ support no longer strictly necessary for Open vSwitch. * Per trunk port OVS bridge: in layman's terms, this is similar to the first option, in that an extra layer of mux/demux is introduced between the VM and the integration bridge (br-int) but instead of using vlan interfaces, a combination of a new per port OVS bridge and patch ports to wire this new bridge with br-int will be used. This solution has the following design implications: * Design complexity: the complexity of this solution can be considered in between the above mentioned options in that some work is already available since `Mitaka `_ and the data path wiring logic can be partially reused. * Upgrade complexity: if two separate code paths are assumed to be maintained in the OVS agent to handle regular ports and ports participating a trunk with no ability to convert from one to the other (and vice versa), no migration is required. This is done at a cost of some loss of flexibility and maintenance complexity. * Design reusability: a solution to support vlan trunking for the Linux Bridge mech driver will still be required to avoid widening the gap with Open vSwitch (e.g. OVS has DVR but LB does not). * Performance penalty: from a performance standpoint, the adoption of a trunk bridge relieves the agent from employing kernel interfaces, thus unlocking the full potential of fast packet processing. That said, this is only doable in combination with a native ovs firewall. At the time of writing the only DPDK enabled firewall driver is the learn based one available in the `networking-ovs-dpdk repo `_; * Feature compatibility: the existing local provisioning logic will not be affected by the introduction of a trunk bridge, therefore use cases where VMs are connected to a vlan transparent network via a regular port will still require QinQ support from OVS. To summarize: * VLAN interfaces (A) are compelling because will lead to a relatively contained engineering cost at the expense of performance. The Open vSwitch community will need to be involved in order to deliver vlan transparency. Irrespective of whether this strategy is chosen for Open vSwitch or not, this is still the only viable approach for Linux Bridge and thus pursued to address Linux Bridge support for VLAN trunking. To some extent, this option can also be considered a fallback strategy for OVS deployments that are unable to adopt DPDK. * Open Flow (B) is compelling because it will allow Neutron to unlock the full potential of Open vSwitch, at the expense of development and operations effort. The development is confined within the boundaries of the Neutron community in order to address vlan awareness and transparency (as two distinct use cases, ie. to be adopted separately). Stateful firewall (based on ovs conntrack) limits the adoption for DPDK at the time of writing, but a learn-based firewall can be a suitable alternative. Obviously this solution is not compliant with iptables firewall. * Trunk Bridges (C) tries to bring the best of option A and B together as far as OVS development and performance are concerned, but it comes at the expense of maintenance complexity and loss of flexibility. A Linux Bridge solution would still be required and, QinQ support will still be needed to address vlan transparency. All things considered, as far as OVS is concerned, option (C) is the most promising in the medium term. Management of trunks and ports within trunks will have to be managed differently and, to start with, it is sensible to restrict the ability to update ports (i.e. convert) once they are bound to a particular bridge (integration vs trunk). Security rules via iptables rules is obviously not supported, and never will be. Option (A) for OVS could be pursued in conjunction with Linux Bridge support, if the effort is seen particularly low hanging fruit. However, a working solution based on this option positions the OVS agent as a sub-optminal platform for performance sensitive applications in comparison to other accelerated or SDN-controller based solutions. Since further data plane performance improvement is hindered by the extra use of kernel resources, this option is not at all appealing in the long term. Embracing option (B) in the long run may be complicated by the adoption of option (C). The development and maintenance complexity involved in Option (C) and (B) respectively poses the existential question as to whether investing in the agent-based architecture is an effective strategy, especially if the end result would look a lot like other maturing alternatives. Implementation VLAN Interfaces (Option A) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This implementation doesn't require any modification of the vif-drivers since Nova will plug the vif of the VM the same way as it does for traditional ports. Trunk port creation +++++++++++++++++++ A VM is spawned passing to Nova the port-id of a parent port associated with a trunk. Nova/libvirt will create the tap interface and will plug it into br-int or into the firewall bridge if using iptables firewall. In the external-ids of the port Nova will store the port ID of the parent port. The OVS agent detects that a new vif has been plugged. It gets the details of the new port and wires it. The agent configures it in the same way as a traditional port: packets coming out from the VM will be tagged using the internal VLAN ID associated to the network, packets going to the VM will be stripped of the VLAN ID. After wiring it successfully the OVS agent will send a message notifying Neutron server that the parent port is up. Neutron will send back to Nova an event to signal that the wiring was successful. If the parent port is associated with one or more subports the agent will process them as described in the next paragraph. Subport creation ++++++++++++++++ If a subport is added to a parent port but no VM was booted using that parent port yet, no L2 agent will process it (because at that point the parent port is not bound to any host). When a subport is created for a parent port and a VM that uses that parent port is already running, the OVS agent will create a VLAN interface on the VM tap using the VLAN ID specified in the subport segmentation id. There's a small possibility that a race might occur: the firewall bridge might be created and plugged while the vif is not there yet. The OVS agent needs to check if the vif exists before trying to create a subinterface. Let's see how the models differ when using the iptables firewall or the ovs native firewall. Iptables Firewall ''''''''''''''''' :: +----------------------------+ | VM | | eth0 eth0.100 | +-----+-----------------+----+ | | +---+---+ +-----+-----+ | tap1 |-------| tap1.100 | +---+---+ +-----+-----+ | | | | +---+---+ +---+---+ | qbr1 | | qbr2 | +---+---+ +---+---+ | | | | +-----+-----------------+----+ | port 1 port 2 | | (tag 3) (tag 5) | | br-int | +----------------------------+ Let's assume the subport is on network2 and uses segmentation ID 100. In the case of hybrid plugging the OVS agent will have to create the firewall bridge (qbr2), create tap1.100 and plug it into qbr2. It will connect qbr2 to br-int and set the subport ID in the external-ids of port 2. *Inbound traffic from the VM point of view* The untagged traffic will flow from port 1 to eth0 through qbr1. For the traffic coming out of port 2, the internal VLAN ID of network2 will be stripped. The packet will then go untagged through qbr2 where iptables rules will filter the traffic. The tag 100 will be pushed by tap1.100 and the packet will finally get to eth0.100. *Outbound traffic from the VM point of view* The untagged traffic will flow from eth0 to port1 going through qbr1 where firewall rules will be applied. Traffic tagged with VLAN 100 will leave eth0.100, go through tap1.100 where the VLAN 100 is stripped. It will reach qbr2 where iptables rules will be applied and go to port 2. The internal VLAN of network2 will be pushed by br-int when the packet enters port2 because it's a tagged port. OVS Firewall case ''''''''''''''''' :: +----------------------------+ | VM | | eth0 eth0.100 | +-----+-----------------+----+ | | +---+---+ +-----+-----+ | tap1 |-------| tap1.100 | +---+---+ +-----+-----+ | | | | | | +-----+-----------------+----+ | port 1 port 2 | | (tag 3) (tag 5) | | br-int | +----------------------------+ When a subport is created the OVS agent will create the VLAN interface tap1.100 and plug it into br-int. Let's assume the subport is on network2. *Inbound traffic from the VM point of view* The traffic will flow untagged from port 1 to eth0. The traffic going out from port 2 will be stripped of the VLAN ID assigned to network2. It will be filtered by the rules installed by the firewall and reach tap1.100. tap1.100 will tag the traffic using VLAN 100. It will then reach the VM's eth0.100. *Outbound traffic from the VM point of view* The untagged traffic will flow and reach port 1 where it will be tagged using the VLAN ID associated to the network. Traffic tagged with VLAN 100 will leave eth0.100 reach tap1.100 where VLAN 100 will be stripped. It will then reach port2. It will be filtered by the rules installed by the firewall on port 2. Then the packets will be tagged using the internal VLAN associated to network2 by br-int since port 2 is a tagged port. Parent port deletion ++++++++++++++++++++ Deleting a port that is an active parent in a trunk is forbidden. If the parent port has no trunk associated (it's a "normal" port), it can be deleted. The OVS agent doesn't need to perform any action, the deletion will result in a removal of the port data from the DB. Trunk deletion ++++++++++++++ When Nova deletes a VM, it deletes the VM's corresponding Neutron ports only if they were created by Nova when booting the VM. In the vlan-aware-vm case the parent port is passed to Nova, so the port data will remain in the DB after the VM deletion. Nova will delete the VIF of the VM (in the example tap1) as part of the VM termination. The OVS agent will detect that deletion and notify the Neutron server that the parent port is down. The OVS agent will clean up the corresponding subports as explained in the next paragraph. The deletion of a trunk that is used by a VM is not allowed. The trunk can be deleted (leaving the parent port intact) when the parent port is not used by any VM. After the trunk is deleted, the parent port can also be deleted. Subport deletion ++++++++++++++++ Removing a subport that is associated with a parent port that was not used to boot any VM is a no op from the OVS agent perspective. When a subport associated with a parent port that was used to boot a VM is deleted, the OVS agent will take care of removing the firewall bridge if using iptables firewall and the port on br-int. Implementation Trunk Bridge (Option C) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This implementation is based on this `etherpad `_. Credits to Bence Romsics. The option use_veth_interconnection=true won't be supported, it will probably be deprecated soon, see [1]. The IDs used for bridge and port names are truncated. :: +--------------------------------+ | VM | | eth0 eth0.100 | +-----+--------------------+-----+ | | +-----+--------------------------+ | tap1 | | tbr-trunk-id | | | | tpt-parent-id spt-subport-id | | (tag 100) | +-----+-----------------+--------+ | | | | | | +-----+-----------------+---------+ | tpi-parent-id spi-subport-id | | (tag 3) (tag 5) | | | | br-int | +---------------------------------+ tpt-parent-id: trunk bridge side of the patch port that implements a trunk. tpi-parent-id: int bridge side of the patch port that implements a trunk. spt-subport-id: trunk bridge side of the patch port that implements a subport. spi-subport-id: int bridge side of the patch port that implements a subport. [1] https://bugs.launchpad.net/neutron/+bug/1587296 Trunk creation ++++++++++++++ A VM is spawned passing to Nova the port-id of a parent port associated with a trunk. Neutron will pass to Nova the bridge where to plug the vif as part of the vif details. The os-vif driver creates the trunk bridge tbr-trunk-id if it does not exist in plug(). It will create the tap interface tap1 and plug it into tbr-trunk-id setting the parent port ID in the external-ids. The OVS agent will be monitoring the creation of ports on the trunk bridges. When it detects that a new port has been created on the trunk bridge, it will do the following: :: ovs-vsctl add-port tbr-trunk-id tpt-parent-id -- set Interface tpt-parent-id type=patch options:peer=tpi-parent-id ovs-vsctl add-port br-int tpi-parent-id tag=3 -- set Interface tpi-parent-id type=patch options:peer=tpt-parent-id A patch port is created to connect the trunk bridge to the integration bridge. tpt-parent-id, the trunk bridge side of the patch is not associated to any tag. It will carry untagged traffic. tpi-parent-id, the br-int side the patch port is tagged with VLAN 3. We assume that the trunk is on network1 that on this host is associated with VLAN 3. The OVS agent will set the trunk ID in the external-ids of tpt-parent-id and tpi-parent-id. If the parent port is associated with one or more subports the agent will process them as described in the next paragraph. Subport creation ++++++++++++++++ If a subport is added to a parent port but no VM was booted using that parent port yet, the agent won't process the subport (because at this point there's no node associated with the parent port). When a subport is added to a parent port that is used by a VM the OVS agent will create a new patch port: :: ovs-vsctl add-port tbr-trunk-id spt-subport-id tag=100 -- set Interface spt-subport-id type=patch options:peer=spi-subport-id ovs-vsctl add-port br-int spi-subport-id tag=5 -- set Interface spi-subport-id type=patch options:peer=spt-subport-id This patch port connects the trunk bridge to the integration bridge. spt-subport-id, the trunk bridge side of the patch is tagged using VLAN 100. We assume that the segmentation ID of the subport is 100. spi-subport-id, the br-int side of the patch port is tagged with VLAN 5. We assume that the subport is on network2 that on this host uses VLAN 5. The OVS agent will set the subport ID in the external-ids of spt-subport-id and spi-subport-id. *Inbound traffic from the VM point of view* The traffic coming out of tpi-parent-id will be stripped by br-int of VLAN 3. It will reach tpt-parent-id untagged and from there tap1. The traffic coming out of spi-subport-id will be stripped by br-int of VLAN 5. It will reach spt-subport-id where it will be tagged with VLAN 100 and it will then get to tap1 tagged. *Outbound traffic from the VM point of view* The untagged traffic coming from tap1 will reach tpt-parent-id and from there tpi-parent-id where it will be tagged using VLAN 3. The traffic tagged with VLAN 100 from tap1 will reach spt-subport-id. VLAN 100 will be stripped since spt-subport-id is a tagged port and the packet will reach spi-subport-id, where it's tagged using VLAN 5. Parent port deletion ++++++++++++++++++++ Deleting a port that is an active parent in a trunk is forbidden. If the parent port has no trunk associated, it can be deleted. The OVS agent doesn't need to perform any action. Trunk deletion ++++++++++++++ When Nova deletes a VM, it deletes the VM's corresponding Neutron ports only if they were created by Nova when booting the VM. In the vlan-aware-vm case the parent port is passed to Nova, so the port data will remain in the DB after the VM deletion. Nova will delete the port on the trunk bridge where the VM is plugged. The L2 agent will detect that and delete the trunk bridge. It will notify the Neutron server that the parent port is down. The deletion of a trunk that is used by a VM is not allowed. The trunk can be deleted (leaving the parent port intact) when the parent port is not used by any VM. After the trunk is deleted, the parent port can also be deleted. Subport deletion ++++++++++++++++ The OVS agent will delete the patch port pair corresponding to the subport deleted. Agent resync ~~~~~~~~~~~~ During resync the agent should check that all the trunk and subports are still valid. It will delete the stale trunk and subports using the procedure specified in the previous paragraphs according to the implementation. Further Reading --------------- * `Darragh O'Reilly - The Open vSwitch plugin with VLANs `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/openvswitch_firewall.rst0000644000175000017500000007210300000000000030554 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Open vSwitch Firewall Driver ============================ The OVS driver has the same API as the current iptables firewall driver, keeping the state of security groups and ports inside of the firewall. Class ``SGPortMap`` was created to keep state consistent, and maps from ports to security groups and vice-versa. Every port and security group is represented by its own object encapsulating the necessary information. .. note:: Open vSwitch firewall driver uses ``register 5`` for identifying the port related to the flow and ``register 6`` which identifies the network, used in particular for conntrack zones. Ingress/Egress Terminology -------------------------- In this document, the terms ``ingress`` and ``egress`` are relative to a VM instance connected to OVS (or a netns connected to OVS): * ``ingress`` applies to traffic that will ultimately go into a VM (or into a netns), assuming it is not dropped * ``egress`` applies to traffic coming from a VM (or from a netns) :: . . _______|\ _______|\ \ ingress \ \ ingress \ /_______ / /_______ / |/ .-----------------. |/ ' | | ' | |-----------( netns interface ) ( non-VM, non-netns )---| OVS | ( interface: phy, patch ) | |------------( VM interface ) . | | . /|________ '-----------------' /|________ / egress / / egress / \ ________\ \ ________\ \| \| ' ' Note that these terms are used differently in OVS code and documentation, where they are relative to the OVS bridge, with ``ingress`` applying to traffic as it comes into the OVS bridge, and ``egress`` applying to traffic as it leaves the OVS bridge. Firewall API calls ------------------ There are two main calls performed by the firewall driver in order to either create or update a port with security groups - ``prepare_port_filter`` and ``update_port_filter``. Both methods rely on the security group objects that are already defined in the driver and work similarly to their iptables counterparts. The definition of the objects will be described later in this document. ``prepare_port_filter`` must be called only once during port creation, and it defines the initial rules for the port. When the port is updated, all filtering rules are removed, and new rules are generated based on the available information about security groups in the driver. Security group rules can be defined in the firewall driver by calling ``update_security_group_rules``, which rewrites all the rules for a given security group. If a remote security group is changed, then ``update_security_group_members`` is called to determine the set of IP addresses that should be allowed for this remote security group. Calling this method will not have any effect on existing instance ports. In other words, if the port is using security groups and its rules are changed by calling one of the above methods, then no new rules are generated for this port. ``update_port_filter`` must be called for the changes to take effect. All the machinery above is controlled by security group RPC methods, which mean the firewall driver doesn't have any logic of which port should be updated based on the provided changes, it only accomplishes actions when called from the controller. OpenFlow rules -------------- At first, every connection is split into ingress and egress processes based on the input or output port respectively. Each port contains the initial hardcoded flows for ARP, DHCP and established connections, which are accepted by default. To detect established connections, a flow must by marked by conntrack first with an ``action=ct()`` rule. An accepted flow means that ingress packets for the connection are directly sent to the port, and egress packets are left to be normally switched by the integration bridge. Connections that are not matched by the above rules are sent to either the ingress or egress filtering table, depending on its direction. The reason the rules are based on security group rules in separate tables is to make it easy to detect these rules during removal. Security group rules are treated differently for those without a remote group ID and those with a remote group ID. A security group rule without a remote group ID is expanded into several OpenFlow rules by the method ``create_flows_from_rule_and_port``. A security group rule with a remote group ID is expressed by three sets of flows. The first two are conjunctive flows which will be described in the next section. The third set matches on the conjunction IDs and does accept actions. Flow priorities for security group rules ---------------------------------------- The OpenFlow spec says a packet should not match against multiple flows at the same priority [1]_. The firewall driver uses 8 levels of priorities to achieve this. The method ``flow_priority_offset`` calculates a priority for a given security group rule. The use of priorities is essential with conjunction flows, which will be described later in the conjunction flows examples. .. [1] Although OVS seems to magically handle overlapping flows under some cases, we shouldn't rely on that. Uses of conjunctive flows ------------------------- With a security group rule with a remote group ID, flows that match on nw_src for remote_group_id addresses and match on dl_dst for port MAC addresses are needed (for ingress rules; likewise for egress rules). Without conjunction, this results in O(n*m) flows where n and m are number of ports in the remote group ID and the port security group, respectively. A conj_id is allocated for each (remote_group_id, security_group_id, direction, ethertype, flow_priority_offset) tuple. The class ``ConjIdMap`` handles the mapping. The same conj_id is shared between security group rules if multiple rules belong to the same tuple above. Conjunctive flows consist of 2 dimensions. Flows that belong to the dimension 1 of 2 are generated by the method ``create_flows_for_ip_address`` and are in charge of IP address based filtering specified by their remote group IDs. Flows that belong to the dimension 2 of 2 are generated by the method ``create_flows_from_rule_and_port`` and modified by the method ``substitute_conjunction_actions``, which represents the portion of the rule other than its remote group ID. Those dimension 2 of 2 flows are per port and contain no remote group information. When there are multiple security group rules for a port, those flows can overlap. To avoid such a situation, flows are sorted and fed to ``merge_port_ranges`` or ``merge_common_rules`` methods to rearrange them. Rules example with explanation: ------------------------------- The following example presents two ports on the same host. They have different security groups and there is ICMP traffic allowed from first security group to the second security group. Ports have following attributes: :: Port 1 - plugged to the port 1 in OVS bridge - IP address: 192.168.0.1 - MAC address: fa:16:3e:a4:22:10 - security group 1: can send ICMP packets out - allowed address pair: 10.0.0.1/32, fa:16:3e:8c:84:13 Port 2 - plugged to the port 2 in OVS bridge - IP address: 192.168.0.2 - MAC address: fa:16:3e:24:57:c7 - security group 2: - can receive ICMP packets from security group 1 - can receive TCP packets from security group 1 - can receive TCP packets to port 80 from security group 2 - can receive IP packets from security group 3 - allowed address pair: 10.1.0.0/24, fa:16:3e:8c:84:14 |table_0| contains a low priority rule to continue packets processing in |table_60| aka TRANSIENT table. |table_0| is left for use to other features that take precedence over firewall, e.g. DVR. The only requirement is that after such a feature is done with its processing, it needs to pass packets for processing to the TRANSIENT table. This TRANSIENT table distinguishes the ingress traffic from the egress traffic and loads into ``register 5`` a value identifying the port (for egress traffic based on the switch port number, and for ingress traffic based on the network id and destination MAC address); ``register 6`` contains a value identifying the network (which is also the OVSDB port tag) to isolate connections into separate conntrack zones. For VLAN networks, the physical VLAN tag will be used to act as an extra match rule to do such identifying work as well. :: table=60, priority=100,in_port=1 actions=load:0x1->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,71) table=60, priority=100,in_port=2 actions=load:0x2->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,71) table=60, priority=90,dl_vlan=0x284,dl_dst=fa:16:3e:a4:22:10 actions=load:0x1->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=60, priority=90,dl_vlan=0x284,dl_dst=fa:16:3e:8c:84:13 actions=load:0x1->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=60, priority=90,dl_vlan=0x284,dl_dst=fa:16:3e:24:57:c7 actions=load:0x2->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=60, priority=90,dl_vlan=0x284,dl_dst=fa:16:3e:8c:84:14 actions=load:0x2->NXM_NX_REG5[],load:0x284->NXM_NX_REG6[],resubmit(,81) table=60, priority=0 actions=NORMAL The following table, |table_71| implements ARP spoofing protection, IP spoofing protection, allows traffic related to IP address allocations (dhcp, dhcpv6, slaac, ndp) for egress traffic, and allows ARP replies. Also identifies not tracked connections which are processed later with information obtained from conntrack. Notice the ``zone=NXM_NX_REG6[0..15]`` in ``actions`` when obtaining information from conntrack. It says every port has its own conntrack zone defined by the value in ``register 6`` (OVSDB port tag identifying the network). It's there to avoid accepting established traffic that belongs to different port with same conntrack parameters. The very first rule in |table_71| is a rule removing conntrack information for a use-case where Neutron logical port is placed directly to the hypervisor. In such case kernel does conntrack lookup before packet reaches Open vSwitch bridge. Tracked packets are sent back for processing by the same table after conntrack information is cleared. :: table=71, priority=110,ct_state=+trk actions=ct_clear,resubmit(,71) Rules below allow ICMPv6 traffic for multicast listeners, neighbour solicitation and neighbour advertisement. :: table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=130 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=131 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=132 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=135 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x1,in_port=1,icmp_type=136 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=130 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=131 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=132 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=135 actions=resubmit(,94) table=71, priority=95,icmp6,reg5=0x2,in_port=2,icmp_type=136 actions=resubmit(,94) Following rules implement ARP spoofing protection :: table=71, priority=95,arp,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,arp_spa=192.168.0.1 actions=resubmit(,94) table=71, priority=95,arp,reg5=0x1,in_port=1,dl_src=fa:16:3e:8c:84:13,arp_spa=10.0.0.1 actions=resubmit(,94) table=71, priority=95,arp,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,arp_spa=192.168.0.2 actions=resubmit(,94) table=71, priority=95,arp,reg5=0x2,in_port=2,dl_src=fa:16:3e:8c:84:14,arp_spa=10.1.0.0/24 actions=resubmit(,94) DHCP and DHCPv6 traffic is allowed to instance but DHCP servers are blocked on instances. :: table=71, priority=80,udp,reg5=0x1,in_port=1,tp_src=68,tp_dst=67 actions=resubmit(,73) table=71, priority=80,udp6,reg5=0x1,in_port=1,tp_src=546,tp_dst=547 actions=resubmit(,73) table=71, priority=70,udp,reg5=0x1,in_port=1,tp_src=67,tp_dst=68 actions=resubmit(,93) table=71, priority=70,udp6,reg5=0x1,in_port=1,tp_src=547,tp_dst=546 actions=resubmit(,93) table=71, priority=80,udp,reg5=0x2,in_port=2,tp_src=68,tp_dst=67 actions=resubmit(,73) table=71, priority=80,udp6,reg5=0x2,in_port=2,tp_src=546,tp_dst=547 actions=resubmit(,73) table=71, priority=70,udp,reg5=0x2,in_port=2,tp_src=67,tp_dst=68 actions=resubmit(,93) table=71, priority=70,udp6,reg5=0x2,in_port=2,tp_src=547,tp_dst=546 actions=resubmit(,93) Flowing rules obtain conntrack information for valid IP and MAC address combinations. All other packets are dropped. :: table=71, priority=65,ip,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,nw_src=192.168.0.1 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ip,reg5=0x1,in_port=1,dl_src=fa:16:3e:8c:84:13,nw_src=10.0.0.1 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ip,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,nw_src=192.168.0.2 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ip,reg5=0x2,in_port=2,dl_src=fa:16:3e:8c:84:14,nw_src=10.1.0.0/24 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ipv6,reg5=0x1,in_port=1,dl_src=fa:16:3e:a4:22:10,ipv6_src=fe80::f816:3eff:fea4:2210 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=65,ipv6,reg5=0x2,in_port=2,dl_src=fa:16:3e:24:57:c7,ipv6_src=fe80::f816:3eff:fe24:57c7 actions=ct(table=72,zone=NXM_NX_REG6[0..15]) table=71, priority=10,reg5=0x1,in_port=1 actions=resubmit(,93) table=71, priority=10,reg5=0x2,in_port=2 actions=resubmit(,93) table=71, priority=0 actions=drop |table_72| accepts only established or related connections, and implements rules defined by security groups. As this egress connection might also be an ingress connection for some other port, it's not switched yet but eventually processed by the ingress pipeline. All established or new connections defined by security group rules are ``accepted``, which will be explained later. All invalid packets are dropped. In the case below we allow all ICMP egress traffic. :: table=72, priority=75,ct_state=+est-rel-rpl,icmp,reg5=0x1 actions=resubmit(,73) table=72, priority=75,ct_state=+new-est,icmp,reg5=0x1 actions=resubmit(,73) table=72, priority=50,ct_state=+inv+trk actions=resubmit(,93) Important on the flows below is the ``ct_mark=0x1``. Flows that were marked as not existing anymore by rule introduced later will value this value. Those are typically connections that were allowed by some security group rule and the rule was removed. :: table=72, priority=50,ct_mark=0x1,reg5=0x1 actions=resubmit(,93) table=72, priority=50,ct_mark=0x1,reg5=0x2 actions=resubmit(,93) All other connections that are not marked and are established or related are allowed. :: table=72, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x1 actions=resubmit(,94) table=72, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x2 actions=resubmit(,94) table=72, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x1 actions=resubmit(,94) table=72, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x2 actions=resubmit(,94) In the following, flows are marked established connections that weren't matched in the previous flows, which means they don't have accepting security group rule anymore. :: table=72, priority=40,ct_state=-est,reg5=0x1 actions=resubmit(,93) table=72, priority=40,ct_state=+est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=72, priority=40,ct_state=-est,reg5=0x2 actions=resubmit(,93) table=72, priority=40,ct_state=+est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=72, priority=0 actions=drop In following |table_73| are all detected ingress connections sent to ingress pipeline. Since the connection was already accepted by egress pipeline, all remaining egress connections are sent to normal flood'n'learn switching in |table_94|. :: table=73, priority=100,reg6=0x284,dl_dst=fa:16:3e:a4:22:10 actions=load:0x1->NXM_NX_REG5[],resubmit(,81) table=73, priority=100,reg6=0x284,dl_dst=fa:16:3e:8c:84:13 actions=load:0x1->NXM_NX_REG5[],resubmit(,81) table=73, priority=100,reg6=0x284,dl_dst=fa:16:3e:24:57:c7 actions=load:0x2->NXM_NX_REG5[],resubmit(,81) table=73, priority=100,reg6=0x284,dl_dst=fa:16:3e:8c:84:14 actions=load:0x2->NXM_NX_REG5[],resubmit(,81) table=73, priority=90,ct_state=+new-est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15]),resubmit(,91) table=73, priority=90,ct_state=+new-est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15]),resubmit(,91) table=73, priority=80,reg5=0x1 actions=resubmit(,94) table=73, priority=80,reg5=0x2 actions=resubmit(,94) table=73, priority=0 actions=drop |table_81| is similar to |table_71|, allows basic ingress traffic for obtaining IP address and ARP queries. Note that vlan tag must be removed by adding ``strip_vlan`` to actions list, prior to injecting packet directly to port. Not tracked packets are sent to obtain conntrack information. :: table=81, priority=100,arp,reg5=0x1 actions=strip_vlan,output:1 table=81, priority=100,arp,reg5=0x2 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x1,icmp_type=130 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,icmp_type=131 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,icmp_type=132 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,icmp_type=135 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x1,icmp_type=136 actions=strip_vlan,output:1 table=81, priority=100,icmp6,reg5=0x2,icmp_type=130 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,icmp_type=131 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,icmp_type=132 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,icmp_type=135 actions=strip_vlan,output:2 table=81, priority=100,icmp6,reg5=0x2,icmp_type=136 actions=strip_vlan,output:2 table=81, priority=95,udp,reg5=0x1,tp_src=67,tp_dst=68 actions=strip_vlan,output:1 table=81, priority=95,udp6,reg5=0x1,tp_src=547,tp_dst=546 actions=strip_vlan,output:1 table=81, priority=95,udp,reg5=0x2,tp_src=67,tp_dst=68 actions=strip_vlan,output:2 table=81, priority=95,udp6,reg5=0x2,tp_src=547,tp_dst=546 actions=strip_vlan,output:2 table=81, priority=90,ct_state=-trk,ip,reg5=0x1 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ipv6,reg5=0x1 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ip,reg5=0x2 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=90,ct_state=-trk,ipv6,reg5=0x2 actions=ct(table=82,zone=NXM_NX_REG6[0..15]) table=81, priority=80,ct_state=+trk,reg5=0x1 actions=resubmit(,82) table=81, priority=80,ct_state=+trk,reg5=0x2 actions=resubmit(,82) table=81, priority=0 actions=drop Similarly to |table_72|, |table_82| accepts established and related connections. In this case we allow all ICMP traffic coming from ``security group 1`` which is in this case only ``port 1``. The first four flows match on the IP addresses, and the next two flows match on the ICMP protocol. These six flows define conjunction flows, and the next two define actions for them. :: table=82, priority=71,ct_state=+est-rel-rpl,ip,reg6=0x284,nw_src=192.168.0.1 actions=conjunction(18,1/2) table=82, priority=71,ct_state=+est-rel-rpl,ip,reg6=0x284,nw_src=10.0.0.1 actions=conjunction(18,1/2) table=82, priority=71,ct_state=+new-est,ip,reg6=0x284,nw_src=192.168.0.1 actions=conjunction(19,1/2) table=82, priority=71,ct_state=+new-est,ip,reg6=0x284,nw_src=10.0.0.1 actions=conjunction(19,1/2) table=82, priority=71,ct_state=+est-rel-rpl,icmp,reg5=0x2 actions=conjunction(18,2/2) table=82, priority=71,ct_state=+new-est,icmp,reg5=0x2 actions=conjunction(19,2/2) table=82, priority=71,conj_id=18,ct_state=+est-rel-rpl,ip,reg5=0x2 actions=strip_vlan,output:2 table=82, priority=71,conj_id=19,ct_state=+new-est,ip,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15]),strip_vlan,output:2,resubmit(,92) table=82, priority=50,ct_state=+inv+trk actions=resubmit(,93) There are some more security group rules with remote group IDs. Next we look at TCP related ones. Excerpt of flows that correspond to those rules are: :: table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=0x60/0xffe0 actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=0x60/0xffe0 actions=conjunction(23,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=0x40/0xfff0 actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=0x40/0xfff0 actions=conjunction(23,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=0x58/0xfff8 actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=0x58/0xfff8 actions=conjunction(23,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=0x54/0xfffc actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=0x54/0xfffc actions=conjunction(23,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=0x52/0xfffe actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=0x52/0xfffe actions=conjunction(23,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=80 actions=conjunction(22,2/2),conjunction(14,2/2) table=82, priority=73,ct_state=+est-rel-rpl,tcp,reg5=0x2,tp_dst=81 actions=conjunction(22,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=80 actions=conjunction(23,2/2),conjunction(15,2/2) table=82, priority=73,ct_state=+new-est,tcp,reg5=0x2,tp_dst=81 actions=conjunction(23,2/2) Only dimension 2/2 flows are shown here, as the other are similar to the previous ICMP example. There are many more flows but only the port ranges that covers from 64 to 127 are shown for brevity. The conjunction IDs 14 and 15 correspond to packets from the security group 1, and the conjunction IDs 22 and 23 correspond to those from the security group 2. These flows are from the following security group rules, :: - can receive TCP packets from security group 1 - can receive TCP packets to port 80 from security group 2 and these rules have been processed by ``merge_port_ranges`` into: :: - can receive TCP packets to port != 80 from security group 1 - can receive TCP packets to port 80 from security group 1 or 2 before translating to flows so that there is only one matching flow even when the TCP destination port is 80. The remaining is a L4 protocol agnostic rule. :: table=82, priority=70,ct_state=+est-rel-rpl,ip,reg5=0x2 actions=conjunction(24,2/2) table=82, priority=70,ct_state=+new-est,ip,reg5=0x2 actions=conjunction(25,2/2) Any IP packet that matches the previous TCP flows matches one of these flows, but the corresponding security group rules have different remote group IDs. Unlike the above TCP example, there's no convenient way of expressing ``protocol != TCP`` or ``icmp_code != 1``. So the OVS firewall uses a different priority than the previous TCP flows so as not to mix them up. The mechanism for dropping connections that are not allowed anymore is the same as in |table_72|. :: table=82, priority=50,ct_mark=0x1,reg5=0x1 actions=resubmit(,93) table=82, priority=50,ct_mark=0x1,reg5=0x2 actions=resubmit(,93) table=82, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x1 actions=strip_vlan,output:1 table=82, priority=50,ct_state=+est-rel+rpl,ct_zone=644,ct_mark=0,reg5=0x2 actions=strip_vlan,output:2 table=82, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x1 actions=strip_vlan,output:1 table=82, priority=50,ct_state=-new-est+rel-inv,ct_zone=644,ct_mark=0,reg5=0x2 actions=strip_vlan,output:2 table=82, priority=40,ct_state=-est,reg5=0x1 actions=resubmit(,93) table=82, priority=40,ct_state=+est,reg5=0x1 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=82, priority=40,ct_state=-est,reg5=0x2 actions=resubmit(,93) table=82, priority=40,ct_state=+est,reg5=0x2 actions=ct(commit,zone=NXM_NX_REG6[0..15],exec(load:0x1->NXM_NX_CT_MARK[])) table=82, priority=0 actions=drop .. note:: Conntrack zones on a single node are now based on the network to which a port is plugged in. That makes a difference between traffic on hypervisor only and east-west traffic. For example, if a port has a VIP that was migrated to a port on a different node, then the new port won't contain conntrack information about previous traffic that happened with VIP. OVS firewall integration points ------------------------------- There are three tables where packets are sent once after going through the OVS firewall pipeline. The tables can be used by other mechanisms that are supposed to work with the OVS firewall, typically L2 agent extensions. Egress pipeline ~~~~~~~~~~~~~~~ Packets are sent to |table_91| and |table_94| when they are considered accepted by the egress pipeline, and they will be processed so that they are forwarded to their destination by being submitted to a NORMAL action, that results in Ethernet flood/learn processing. Two tables are used to differentiate between the first packets of a connection and the following packets. This was introduced for performance reasons to allow the logging extension to only log the first packets of a connection. Only the first accepted packet of each connection session will go to |table_91| and the following ones will go to |table_94|. Note that |table_91| merely resubmits to |table_94| that contains the actual NORMAL action; this allows to have a single place where the NORMAL action can be overridden by other components (currently used by ``networking-bagpipe`` driver for ``networking-bgpvpn``). Ingress pipeline ~~~~~~~~~~~~~~~~ The first packet of each connection accepted by the ingress pipeline is sent to |table_92|. The default action in this table is DROP because at this point the packets have already been delivered to their destination port. This integration point is essentially provided for the logging extension. Packets are sent to |table_93| if processing by the ingress filtering concluded that they should be dropped. Upgrade path from iptables hybrid driver ---------------------------------------- During an upgrade, the agent will need to re-plug each instance's tap device into the integration bridge while trying to not break existing connections. One of the following approaches can be taken: 1) Pause the running instance in order to prevent a short period of time where its network interface does not have firewall rules. This can happen due to the firewall driver calling OVS to obtain information about OVS the port. Once the instance is paused and no traffic is flowing, we can delete the qvo interface from integration bridge, detach the tap device from the qbr bridge and plug the tap device back into the integration bridge. Once this is done, the firewall rules are applied for the OVS tap interface and the instance is started from its paused state. 2) Set drop rules for the instance's tap interface, delete the qbr bridge and related veths, plug the tap device into the integration bridge, apply the OVS firewall rules and finally remove the drop rules for the instance. 3) Compute nodes can be upgraded one at a time. A free node can be switched to use the OVS firewall, and instances from other nodes can be live-migrated to it. Once the first node is evacuated, its firewall driver can be then be switched to the OVS driver. .. |table_0| replace:: ``table 0`` (LOCAL_SWITCHING) .. |table_60| replace:: ``table 60`` (TRANSIENT) .. |table_71| replace:: ``table 71`` (BASE_EGRESS) .. |table_72| replace:: ``table 72`` (RULES_EGRESS) .. |table_73| replace:: ``table 73`` (ACCEPT_OR_INGRESS) .. |table_81| replace:: ``table 81`` (BASE_INGRESS) .. |table_82| replace:: ``table 82`` (RULES_INGRESS) .. |table_91| replace:: ``table 91`` (ACCEPTED_EGRESS_TRAFFIC) .. |table_92| replace:: ``table 92`` (ACCEPTED_INGRESS_TRAFFIC) .. |table_93| replace:: ``table 93`` (DROPPED_TRAFFIC) .. |table_94| replace:: ``table 94`` (ACCEPTED_EGRESS_TRAFFIC_NORMAL) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1510432 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/0000755000175000017500000000000000000000000024363 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/acl_optimizations.rst0000644000175000017500000001655100000000000030655 0ustar00coreycorey00000000000000.. _acl_optimizations: ======================================== ACL Handling optimizations in ovn driver ======================================== This document presents the current problem with ACLs and the design changes proposed to core OVN as well as the necessary modifications to be made to ovn driver to improve their usage. Problem description =================== There is basically two problems being addressed in this spec: 1. While in Neutron, a ``Security Group Rule`` is tied to a ``Security Group``, in OVN ``ACLs`` are created per port. Therefore, we'll typically have *many* more ACLs than Security Group Rules, resulting in a performance hit as the number of ports grows. 2. An ACL in OVN is applied to a ``Logical Switch``. As a result, ``ovn driver`` has to figure out which Logical Switches to apply the generated ACLs per each Security Rule. Let's highlight both problems with an example: - Neutron Networks: NA, NB, NC - Neutron Security Group: SG1 - Number of Neutron Security Group Rules in SG1: 10 - Neutron Ports in NA: 100 - Neutron Ports in NB: 100 - Neutron Ports in NC: 100 - All ports belong to SG1 When we implement the above scenario in OVN, this is what we'll get: - OVN Logical Switches: NA, NB, NC - Number of ACL rows in Northbound DB ACL table: 3000 (10 rules * 100 ports * 3 networks) - Number of elements in acl column on each Logical_Switch row: 1000 (10 rules * 100 ports). And this is how, for example, the ACL match fields for the default Neutron Security Group would look like:: outport == && ip4 && ip4.src == $as_ip4_ outport == && ip4 && ip4.src == $as_ip4_ outport == && ip4 && ip4.src == $as_ip4_ ... outport == && ip4 && ip4.src == $as_ip4_ As you can see, all of them look the same except for the outport field which is clearly redundant and makes the NB database grow a lot at scale. Also, ``ovn driver`` had to figure out for each rule in SG1 which Logical Switches it had to apply the ACLs on (NA, NB and NC). This can be really costly when the number of networks and port grows. Proposed optimization ===================== In the OpenStack context, we'll be facing this scenario most of the time where the majority of the ACLs will look the same except for the outport/inport fields in the match column. It would make sense to be able to substitute all those ACLs by a single one which references all the ports affected by that SG rule:: outport == @port_group1 && ip4 && ip4.src == $port_group1_ip4 Implementation Details ====================== Core OVN -------- There's a series of patches in Core OVN that will enable us to achieve this optimization: https://github.com/openvswitch/ovs/commit/3d2848bafa93a2b483a4504c5de801454671dccf https://github.com/openvswitch/ovs/commit/1beb60afd25a64f1779903b22b37ed3d9956d47c https://github.com/openvswitch/ovs/commit/689829d53612a573f810271a01561f7b0948c8c8 In summary, these patches are: - Adding a new entity called Port_Group which will hold a list of weak references to the Logical Switch ports that belong to it. - Automatically creating/updating two Address Sets (_ip4 and _ip6) in Southbound database every time a new port is added to the group. - Support adding a list of ACLs to a Port Group. As the SG rules may span across different Logical Switches, we used to insert the ACLs in all the Logical Switches where we have ports in within a SG. Figuring this out is expensive and this new feature is a huge gain in terms of performance when creating/deleting ports. ovn driver ---------- In the OpenStack integration driver, the following changes are required to accomplish this optimization: - When a Neutron Security Group is created, create the equivalent Port Group in OVN (pg-), instead of creating a pair of Adress Sets for IPv4 and IPv6. This Port Group will reference Neutron SG id in its ``external_ids`` column. - When a Neutron Port is created, the equivalent Logical Port in OVN will be added to those Port Groups associated to the Neutron Security Groups this port belongs to. - When a Neutron Port is deleted, we'll delete the associated Logical Port in OVN. Since the schema includes a weak reference to the port, when the LSP gets deleted, it will also be automatically deleted from any Port Group entry where it was previously present. - Instead of handling SG rules per port, we now need to handle them per SG referencing the associated Port Group in the outport/inport fields. This will be the biggest gain in terms of processing since we don't need to iterate through all the ports anymore. For example: .. code-block:: python -def acl_direction(r, port): +def acl_direction(r): if r['direction'] == 'ingress': portdir = 'outport' else: portdir = 'inport' - return '%s == "%s"' % (portdir, port['id']) + return '%s == "@%s"' % (portdir, utils.ovn_name(r['security_group_id']) - Every time a SG rule is created, instead of figuring out the ports affected by its SG and inserting an ACL row which will be referrenced by different Logical Switches, we will just reference it from the associated Port Group. - For Neutron remote security groups, we just need to reference the automatically created Address_Set for that Port Group. As a bonus, we are tackling the race conditions that could happen in Address_Sets right now when we're deleting and creating a port at the same time. This is thanks to the fact that the Address_Sets in the SB table are generated automatically by ovn-northd from the Port_Group contents and Port Group is referencing actual Logical Switch Ports. More info at: https://bugs.launchpad.net/networking-ovn/+bug/1611852 Backwards compatibility considerations -------------------------------------- - If the schema doesn't include the ``Port_Group`` table, keep the old behavior(Address Sets) for backwards compatibility. - If the schema supports Port Groups, then a migration task will be performed from an OvnWorker. This way we'll ensure that it'll happen only once across the cloud thanks the OVSDB lock. This will be done right at the beginning of the ovn_db_sync process to make sure that when neutron-server starts, everything is in place to work with Port Groups. This migration process will perform the following steps: * Create the default drop Port Group and add all ports with port security enabled to it. * Create a Port Group for every existing Neutron Security Group and add all its Security Group Rules as ACLs to that Port Group. * Delete all existing Address Sets in NorthBound database which correspond to a Neutron Security Group. * Delete all the ACLs in every Logical Switch (Neutron network). We should eventually remove the backwards compatibility and migration path. At that point we should require OVS >= 2.10 from neutron ovn driver. Special cases ------------- Ports with no security groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When a port doesn't belong to any Security Group and port security is enabled, we, by default, drop all the traffic to/from that port. In order to implement this through Port Groups, we'll create a special Port Group with a fixed name (``neutron_pg_drop``) which holds the ACLs to drop all the traffic. This PG will be created automatically when we first need it, avoiding the need to create it beforehand or during deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/data_model.rst0000644000175000017500000001611700000000000027214 0ustar00coreycorey00000000000000.. _data_model: =========================================== Mapping between Neutron and OVN data models =========================================== The primary job of the Neutron OVN ML2 driver is to translate requests for resources into OVN's data model. Resources are created in OVN by updating the appropriate tables in the OVN northbound database (an ovsdb database). This document looks at the mappings between the data that exists in Neutron and what the resulting entries in the OVN northbound DB would look like. Network ------- :: Neutron Network: id name subnets admin_state_up status tenant_id Once a network is created, we should create an entry in the Logical Switch table. :: OVN northbound DB Logical Switch: external_ids: { 'neutron:network_name': network.name } Subnet ------ :: Neutron Subnet: id name ip_version network_id cidr gateway_ip allocation_pools dns_nameservers host_routers tenant_id enable_dhcp ipv6_ra_mode ipv6_address_mode Once a subnet is created, we should create an entry in the DHCP Options table with the DHCPv4 or DHCPv6 options. :: OVN northbound DB DHCP_Options: cidr options external_ids: { 'subnet_id': subnet.id } Port ---- :: Neutron Port: id name network_id admin_state_up mac_address fixed_ips device_id device_owner tenant_id status When a port is created, we should create an entry in the Logical Switch Ports table in the OVN northbound DB. :: OVN Northbound DB Logical Switch Port: switch: reference to OVN Logical Switch router_port: (empty) name: port.id up: (read-only) macs: [port.mac_address] port_security: external_ids: {'neutron:port_name': port.name} If the port has extra DHCP options defined, we should create an entry in the DHCP Options table in the OVN northbound DB. :: OVN northbound DB DHCP_Options: cidr options external_ids: { 'subnet_id': subnet.id, 'port_id': port.id } Router ------ :: Neutron Router: id name admin_state_up status tenant_id external_gw_info: network_id external_fixed_ips: list of dicts ip_address subnet_id :: OVN Northbound DB Logical Router: ip: default_gw: external_ids: Router Port ----------- :: OVN Northbound DB Logical Router Port: router: (reference to Logical Router) network: (reference to network this port is connected to) mac: external_ids: Security Groups --------------- :: Neutron Port: id security_group: id network_id Neutron Security Group id name tenant_id security_group_rules Neutron Security Group Rule id tenant_id security_group_id direction remote_group_id ethertype protocol port_range_min port_range_max remote_ip_prefix :: OVN Northbound DB ACL Rule: lswitch: (reference to Logical Switch - port.network_id) priority: (0..65535) match: boolean expressions according to security rule Translation map (sg_rule ==> match expression) ----------------------------------------------- sg_rule.direction="Ingress" => "inport=port.id" sg_rule.direction="Egress" => "outport=port.id" sg_rule.ethertype => "eth.type" sg_rule.protocol => "ip.proto" sg_rule.port_range_min/port_range_max => "port_range_min <= tcp.src <= port_range_max" "port_range_min <= udp.src <= port_range_max" sg_rule.remote_ip_prefix => "ip4.src/mask, ip4.dst/mask, ipv6.src/mask, ipv6.dst/mask" (all match options for ACL can be found here: http://openvswitch.org/support/dist-docs/ovn-nb.5.html) action: "allow-related" log: true/false external_ids: {'neutron:port_id': port.id} {'neutron:security_rule_id': security_rule.id} Security groups maps between three neutron objects to one OVN-NB object, this enable us to do the mapping in various ways, depending on OVN capabilities The current implementation will use the first option in this list for simplicity, but all options are kept here for future reference 1) For every pair, define an ACL entry:: Leads to many ACL entries. acl.match = sg_rule converted example: ((inport==port.id) && (ip.proto == "tcp") && (1024 <= tcp.src <= 4095) && (ip.src==192.168.0.1/16)) external_ids: {'neutron:port_id': port.id} {'neutron:security_rule_id': security_rule.id} 2) For every pair, define an ACL entry:: Reduce the number of ACL entries. Means we have to manage the match field in case specific rule changes example: (((inport==port.id) && (ip.proto == "tcp") && (1024 <= tcp.src <= 4095) && (ip.src==192.168.0.1/16)) || ((outport==port.id) && (ip.proto == "udp") && (1024 <= tcp.src <= 4095)) || ((inport==port.id) && (ip.proto == 6) ) || ((inport==port.id) && (eth.type == 0x86dd))) (This example is a security group with four security rules) external_ids: {'neutron:port_id': port.id} {'neutron:security_group_id': security_group.id} 3) For every pair, define an ACL entry:: Reduce even more the number of ACL entries. Manage complexity increase example: (((inport==port.id) && (ip.proto == "tcp") && (1024 <= tcp.src <= 4095) && (ip.src==192.168.0.1/16)) || ((outport==port.id) && (ip.proto == "udp") && (1024 <= tcp.src <= 4095)) || ((inport==port.id) && (ip.proto == 6) ) || ((inport==port.id) && (eth.type == 0x86dd))) || (((inport==port2.id) && (ip.proto == "tcp") && (1024 <= tcp.src <= 4095) && (ip.src==192.168.0.1/16)) || ((outport==port2.id) && (ip.proto == "udp") && (1024 <= tcp.src <= 4095)) || ((inport==port2.id) && (ip.proto == 6) ) || ((inport==port2.id) && (eth.type == 0x86dd))) external_ids: {'neutron:security_group': security_group.id} Which option to pick depends on OVN match field length capabilities, and the trade off between better performance due to less ACL entries compared to the complexity to manage them. If the default behaviour is not "drop" for unmatched entries, a rule with lowest priority must be added to drop all traffic ("match==1") Spoofing protection rules are being added by OVN internally and we need to ignore the automatically added rules in Neutron ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/database_consistency.rst0000644000175000017500000004441700000000000031314 0ustar00coreycorey00000000000000.. _database_consistency: ================================ Neutron/OVN Database consistency ================================ This document presents the problem and proposes a solution for the data consistency issue between the Neutron and OVN databases. Although the focus of this document is OVN this problem is common enough to be present in other ML2 drivers (e.g OpenDayLight, BigSwitch, etc...). Some of them already contain a mechanism in place for dealing with it. Problem description =================== In a common Neutron deployment model there could have multiple Neutron API workers processing requests. For each request, the worker will update the Neutron database and then invoke the ML2 driver to translate the information to that specific SDN data model. There are at least two situations that could lead to some inconsistency between the Neutron and the SDN databases, for example: .. _problem_1: Problem 1: Neutron API workers race condition --------------------------------------------- .. code-block:: python In Neutron: with neutron_db_transaction: update_neutron_db() ml2_driver.update_port_precommit() ml2_driver.update_port_postcommit() In the ML2 driver: def update_port_postcommit: port = neutron_db.get_port() update_port_in_ovn(port) Imagine the case where a port is being updated twice and each request is being handled by a different API worker. The method responsible for updating the resource in the OVN (``update_port_postcommit``) is not atomic and invoked outside of the Neutron database transaction. This could lead to a problem where the order in which the updates are committed to the Neutron database are different than the order that they are committed to the OVN database, resulting in an inconsistency. This problem has been reported at `bug #1605089 `_. .. _problem_2: Problem 2: Backend failures --------------------------- Another situation is when the changes are already committed in Neutron but an exception is raised upon trying to update the OVN database (e.g lost connectivity to the ``ovsdb-server``). We currently don't have a good way of handling this problem, obviously it would be possible to try to immediately rollback the changes in the Neutron database and raise an exception but, that rollback itself is an operation that could also fail. Plus, rollbacks is not very straight forward when it comes to updates or deletes. In a case where a VM is being teared down and OVN fail to delete a port, re-creating that port in Neutron doesn't necessary fix the problem. The decommission of a VM involves many other things, in fact, we could make things even worse by leaving some dirty data around. I believe this is a problem that would be better dealt with by other methods. Proposed change =============== In order to fix the problems presented at the `Problem description`_ section this document proposes a solution based on the Neutron's ``revision_number`` attribute. In summary, for every resource in Neutron there's an attribute called ``revision_number`` which gets incremented on each update made on that resource. For example:: $ openstack port create --network nettest porttest ... | revision_number | 2 | ... $ openstack port set porttest --mac-address 11:22:33:44:55:66 $ mysql -e "use neutron; select standard_attr_id from ports where id=\"91c08021-ded3-4c5a-8d57-5b5c389f8e39\";" +------------------+ | standard_attr_id | +------------------+ | 1427 | +------------------+ $ mysql -e "use neutron; SELECT revision_number FROM standardattributes WHERE id=1427;" +-----------------+ | revision_number | +-----------------+ | 3 | +-----------------+ This document proposes a solution that will use the `revision_number` attribute for three things: #. Perform a compare-and-swap operation based on the resource version #. Guarantee the order of the updates (`Problem 1 `_) #. Detecting when resources in Neutron and OVN are out-of-sync But, before any of points above can be done we need to change the ovn driver code to: #1 - Store the revision_number referent to a change in OVNDB ------------------------------------------------------------ To be able to compare the version of the resource in Neutron against the version in OVN we first need to know which version the OVN resource is present at. Fortunately, each table in the OVNDB contains a special column called ``external_ids`` which external systems (like Neutron) can use to store information about its own resources that corresponds to the entries in OVNDB. So, every time a resource is created or updated in OVNDB by ovn driver, the Neutron ``revision_number`` referent to that change will be stored in the ``external_ids`` column of that resource. That will allow ovn driver to look at both databases and detect whether the version in OVN is up-to-date with Neutron or not. #2 - Ensure correctness when updating OVN ----------------------------------------- As stated in `Problem 1 `_, simultaneous updates to a single resource will race and, with the current code, the order in which these updates are applied is not guaranteed to be the correct order. That means that, if two or more updates arrives we can't prevent an older version of that update to be applied after a newer one. This document proposes creating a special ``OVSDB command`` that runs as part of the same transaction that is updating a resource in OVNDB to prevent changes with a lower ``revision_number`` to be applied in case the resource in OVN is at a higher ``revision_number`` already. This new OVSDB command needs to basically do two things: 1. Add a verify operation to the ``external_ids`` column in OVNDB so that if another client modifies that column mid-operation the transaction will be restarted. A better explanation of what "verify" does is described at the doc string of the `Transaction class`_ in the OVS code itself, I quote: Because OVSDB handles multiple clients, it can happen that between the time that OVSDB client A reads a column and writes a new value, OVSDB client B has written that column. Client A's write should not ordinarily overwrite client B's, especially if the column in question is a "map" column that contains several more or less independent data items. If client A adds a "verify" operation before it writes the column, then the transaction fails in case client B modifies it first. Client A will then see the new value of the column and compose a new transaction based on the new contents written by client B. 2. Compare the ``revision_number`` from the update against what is presently stored in OVNDB. If the version in OVNDB is already higher than the version in the update, abort the transaction. So basically this new command is responsible for guarding the OVN resource by not allowing old changes to be applied on top of new ones. Here's a scenario where two concurrent updates comes in the wrong order and how the solution above will deal with it: Neutron worker 1 (NW-1): Updates a port with address A (revision_number: 2) Neutron worker 2 (NW-2): Updates a port with address B (revision_number: 3) TXN 1: NW-2 transaction is committed first and the OVN resource now has RN 3 TXN 2: NW-1 transaction detects the change in the external_ids column and is restarted TXN 2: NW-1 the new command now sees that the OVN resource is at RN 3, which is higher than the update version (RN 2) and aborts the transaction. There's a bit more for the above to work with the current ovn driver code, basically we need to tidy up the code to do two more things. 1. Consolidate changes to a resource in a single transaction. This is important regardless of this spec, having all changes to a resource done in a single transaction minimizes the risk of having half-changes written to the database in case of an eventual problem. This `should be done already `_ but it's important to have it here in case we find more examples like that as we code. 2. When doing partial updates, use the OVNDB as the source of comparison to create the deltas. Being able to do a partial update in a resource is important for performance reasons; it's a way to minimize the number of changes that will be performed in the database. Right now, some of the update() methods in ovn driver creates the deltas using the *current* and *original* parameters that are passed to it. The *current* parameter is, as the name says, the current version of the object present in the Neutron DB. The *original* parameter is the previous version (current - 1) of that object. The problem of creating the deltas by comparing these two objects is because only the data in the Neutron DB is used for it. We need to stop using the *original* object for it and instead we should create the delta based on the *current* version of the Neutron DB against the data stored in the OVNDB to be able to detect the real differences between the two databases. So in summary, to guarantee the correctness of the updates this document proposes to: #. Create a new OVSDB command is responsible for comparing revision numbers and aborting the transaction, when needed. #. Consolidate changes to a resource in a single transaction (should be done already) #. When doing partial updates, create the deltas based in the current version in the Neutron DB and the OVNDB. #3 - Detect and fix out-of-sync resources ----------------------------------------- When things are working as expected the above changes should ensure that Neutron DB and OVNDB are in sync but, what happens when things go bad ? As per `Problem 2 `_, things like temporarily losing connectivity with the OVNDB could cause changes to fail to be committed and the databases getting out-of-sync. We need to be able to detect the resources that were affected by these failures and fix them. We do already have the means to do it, similar to what the `ovn_db_sync.py`_ script does we could fetch all the data from both databases and compare each resource. But, depending on the size of the deployment this can be really slow and costy. This document proposes an optimization for this problem to make it efficient enough so that we can run it periodically (as a periodic task) and not manually as a script anymore. First, we need to create an additional table in the Neutron database that would serve as a cache for the revision numbers in **OVNDB**. The new table schema could look this: ================ ======== ================================================= Column name Type Description ================ ======== ================================================= standard_attr_id Integer Primary key. The reference ID from the standardattributes table in Neutron for that resource. ONDELETE SET NULL. resource_uuid String The UUID of the resource resource_type String The type of the resource (e.g, Port, Router, ...) revision_number Integer The version of the object present in OVN acquired_at DateTime The time that the entry was create. For troubleshooting purposes updated_at DateTime The time that the entry was updated. For troubleshooting purposes ================ ======== ================================================= For the different actions: Create, update and delete; this table will be used as: 1. Create: In the create_*_precommit() method, we will create an entry in the new table within the same Neutron transaction. The revision_number column for the new entry will have a placeholder value until the resource is successfully created in OVNDB. In case we fail to create the resource in OVN (but succeed in Neutron) we still have the entry logged in the new table and this problem can be detected by fetching all resources where the revision_number column value is equal to the placeholder value. The pseudo-code will look something like this: .. code-block:: python def create_port_precommit(ctx, port): create_initial_revision(port['id'], revision_number=-1, session=ctx.session) def create_port_postcommit(ctx, port): create_port_in_ovn(port) bump_revision(port['id'], revision_number=port['revision_number']) 2. Update: For update it's simpler, we need to bump the revision number for that resource **after** the OVN transaction is committed in the update_*_postcommit() method. That way, if an update fails to be applied to OVN the inconsistencies can be detected by a JOIN between the new table and the ``standardattributes`` table where the revision_number columns does not match. The pseudo-code will look something like this: .. code-block:: python def update_port_postcommit(ctx, port): update_port_in_ovn(port) bump_revision(port['id'], revision_number=port['revision_number']) 3. Delete: The ``standard_attr_id`` column in the new table is a foreign key constraint with a ``ONDELETE=SET NULL`` set. That means that, upon Neutron deleting a resource the ``standard_attr_id`` column in the new table will be set to *NULL*. If deleting a resource succeeds in Neutron but fails in OVN, the inconsistency can be detect by looking at all resources that has a ``standard_attr_id`` equals to NULL. The pseudo-code will look something like this: .. code-block:: python def delete_port_postcommit(ctx, port): delete_port_in_ovn(port) delete_revision(port['id']) With the above optimization it's possible to create a periodic task that can run quite frequently to detect and fix the inconsistencies caused by random backend failures. .. note:: There's no lock linking both database updates in the postcommit() methods. So, it's true that the method bumping the revision_number column in the new table in Neutron DB could still race but, that should be fine because this table acts like a cache and the real revision_number has been written in OVNDB. The mechanism that will detect and fix the out-of-sync resources should detect this inconsistency as well and, based on the revision_number in OVNDB, decide whether to sync the resource or only bump the revision_number in the cache table (in case the resource is already at the right version). Refereces ========= * There's a chain of patches with a proof of concept for this approach, they start at: https://review.openstack.org/#/c/517049/ Alternatives ============ Journaling ---------- An alternative solution to this problem is *journaling*. The basic idea is to create another table in the Neutron database and log every operation (create, update and delete) instead of passing it directly to the SDN controller. A separated thread (or multiple instances of it) is then responsible for reading this table and applying the operations to the SDN backend. This approach has been used and validated by drivers such as `networking-odl `_. An attempt to implement this approach in *ovn driver* can be found `here `_. Some things to keep in mind about this approach: * The code can get quite complex as this approach is not only about applying the changes to the SDN backend asynchronously. The dependencies between each resource as well as their operations also needs to be computed. For example, before attempting to create a router port the router that this port belongs to needs to be created. Or, before attempting to delete a network all the dependent resources on it (subnets, ports, etc...) needs to be processed first. * The number of journal threads running can cause problems. In my tests I had three controllers, each one with 24 CPU cores (Intel Xeon E5-2620 with hyperthreading enabled) and 64GB RAM. Running 1 journal thread per Neutron API worker has caused ``ovsdb-server`` to misbehave when under heavy pressure [1]_. Running multiple journal threads seem to be causing other types of problems `in other drivers as well `_. * When under heavy pressure [1]_, I noticed that the journal threads could come to a halt (or really slowed down) while the API workers were handling a lot of requests. This resulted in some operations taking more than a minute to be processed. This behaviour can be seem `in this screenshot `_. .. TODO find a better place to host that image * Given that the 1 journal thread per Neutron API worker approach is problematic, determining the right number of journal threads is also difficult. In my tests, I've noticed that 3 journal threads per controller worked better but that number was pure based on ``trial & error``. In production this number should probably be calculated based in the environment, perhaps something like `TripleO `_ (or any upper layer) would be in a better position to make that decision. * At least temporarily, the data in the Neutron database is duplicated between the normal tables and the journal one. * Some operations like creating a new resource via Neutron's API will return `HTTP 201 `_, which indicates that the resource has been created and is ready to be used, but as these resources are created asynchronously one could argue that the HTTP codes are now misleading. As a note, the resource will be created at the Neutron database by the time the HTTP request returns but it may not be present in the SDN backend yet. Given all considerations, this approach is still valid and the fact that it's already been used by other ML2 drivers makes it more open for collaboration and code sharing. .. _`Transaction class`: https://github.com/openvswitch/ovs/blob/3728b3b0316b44d1f9181be115b63ea85ff5883c/python/ovs/db/idl.py#L1014-L1055 .. _`ovn_db_sync.py`: https://github.com/openstack/networking-ovn/blob/a9af75cd3ce6cd6685b6435b325c97cacc83ce0e/networking_ovn/ovn_db_sync.py .. rubric:: Footnotes .. [1] I ran the tests using `Browbeat `_ which is basically orchestrate `Openstack Rally `_ and monitor the machine's usage of resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/distributed_ovsdb_events.rst0000644000175000017500000001365300000000000032230 0ustar00coreycorey00000000000000.. _distributed_ovsdb_events: ================================ Distributed OVSDB events handler ================================ This document presents the problem and proposes a solution for handling OVSDB events in a distributed fashion in ovn driver. Problem description =================== In ovn driver, the OVSDB Monitor class is responsible for listening to the OVSDB events and performing certain actions on them. We use it extensively for various tasks including critical ones such as monitoring for port binding events (in order to notify Neutron/Nova that a port has been bound to a certain chassis). Currently, this class uses a distributed OVSDB lock to ensure that only one instance handles those events at a time. The problem with this approach is that it creates a bottleneck because even if we have multiple Neutron Workers running at the moment, only one is actively handling those events. And, this problem is highlighted even more when working with technologies such as containers which rely on creating multiple ports at a time and waiting for them to be bound. Proposed change =============== In order to fix this problem, this document proposes using a `Consistent Hash Ring`_ to split the load of handling events across multiple Neutron Workers. A new table called ``ovn_hash_ring`` will be created in the Neutron Database where the Neutron Workers capable of handling OVSDB events will be registered. The table will use the following schema: ================ ======== ================================================= Column name Type Description ================ ======== ================================================= node_uuid String Primary key. The unique identification of a Neutron Worker. hostname String The hostname of the machine this Node is running on. created_at DateTime The time that the entry was created. For troubleshooting purposes. updated_at DateTime The time that the entry was updated. Used as a heartbeat to indicate that the Node is still alive. ================ ======== ================================================= This table will be used to form the `Consistent Hash Ring`_. Fortunately, we have an implementation already in the `tooz`_ library of OpenStack. It was contributed by the `Ironic`_ team which also uses this data structure in order to spread the API request load across multiple Ironic Conductors. Here's how a `Consistent Hash Ring`_ from `tooz`_ works:: from tooz import hashring hring = hashring.HashRing({'worker1', 'worker2', 'worker3'}) # Returns set(['worker3']) hring[b'event-id-1'] # Returns set(['worker1']) hring[b'event-id-2'] How OVSDB Monitor will use the Ring ----------------------------------- Every instance of the OVSDB Monitor class will be listening to a series of events from the OVSDB database and each of them will have a unique ID registered in the database which will be part of the `Consistent Hash Ring`. When an event arrives, each OVSDB Monitor instance will hash that event UUID and the ring will return one instance ID, which will then be compared with its own ID and if it matches that instance will then process the event. Verifying status of OVSDB Monitor instance ------------------------------------------ A new maintenance task will be created in ovn driver which will update the ``updated_at`` column from the ``ovn_hash_ring`` table for the entries matching its hostname indicating that all Neutron Workers running on that hostname are alive. Note that only a single maintenance instance runs on each machine so the writes to the Neutron database are optimized. When forming the ring, the code should check for entries where the value of ``updated_at`` column is newer than a given timeout. Entries that haven't been updated in a certain time won't be part of the ring. If the ring already exists it will be re-balanced. Clean up and minimizing downtime window --------------------------------------- Apart from heartbeating, we need to make sure that we remove the Nodes from the ring when the service is stopped or killed. By stopping the ``neutron-server`` service, all Nodes sharing the same hostname as the machine where the service is running will be removed from the ``ovn_hash_ring`` table. This is done by handling the SIGTERM event. Upon this event arriving, ovn driver should invoke the clean up method and then let the process halt. Unfortunately nothing can be done in case of a SIGKILL, this will leave the nodes in the database and they will be part of the ring until the timeout is reached or the service is restarted. This can introduce a window of time which can result in some events being lost. The current implementation shares the same problem, if the instance holding the current OVSDB lock is killed abruptly, events will be lost until the lock is moved on to the next instance which is alive. One could argue that the current implementation aggravates the problem because all events will be lost where with the distributed mechanism **some** events will be lost. As far as distributed systems goes, that's a normal scenario and things are soon corrected. Ideas for future improvements ----------------------------- This section contains some ideas that can be added on top of this work to further improve it: * Listen to changes to the Chassis table in the OVSDB and force a ring re-balance when a Chassis is added or removed from it. * Cache the ring for a short while to minimize the database reads when the service is under heavy load. * To greater minimize/avoid event losses it would be possible to cache the last X events to be reprocessed in case a node times out and the ring re-balances. .. _`Consistent Hash Ring`: https://en.wikipedia.org/wiki/Consistent_hashing .. _`tooz`: https://github.com/openstack/tooz .. _`Ironic`: https://github.com/openstack/ironic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/index.rst0000644000175000017500000000040000000000000026216 0ustar00coreycorey00000000000000.. ================ OVN Design Notes ================ .. toctree:: :maxdepth: 1 data_model native_dhcp ovn_worker metadata_api database_consistency acl_optimizations loadbalancer distributed_ovsdb_events l3_ha_rescheduling ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/l3_ha_rescheduling.rst0000644000175000017500000001670400000000000030647 0ustar00coreycorey00000000000000.. _l3_ha_rescheduling: =================================== L3 HA Scheduling of Gateway Chassis =================================== Problem Description ------------------- Currently if a single network node is active in the system, gateway chassis for the routers would be scheduled on that node. However, when a new node is added to the system, neither rescheduling nor rebalancing occur automatically. This makes the router created on the first node to be not in HA mode. Side-effects of this behavior include: * Skewed up load on different network nodes due to lack of router rescheduling. * If the active node, where the gateway chassis for a router is scheduled goes down, then because of lack of HA the North-South traffic from that router will be hampered. Overview of Proposed Approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Gateway scheduling has been proposed in `[2]`_. However, rebalancing or rescheduling was not a part of that solution. This specification clarifies what is rescheduling and rebalancing. Rescheduling would automatically happen on every event triggered by addition or deletion of chassis. Rebalancing would be only triggered by manual operator action. Rescheduling of Gateway Chassis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to provide proper rescheduling of the gateway ports during addition or deletion of the chassis, following approach can be considered: * Identify the number of chassis in which each router has been scheduled - Consider router for scheduling if no. of chassis < *MAX_GW_CHASSIS* *MAX_GW_CHASSIS* is defined in `[0]`_ * Find a list of chassis where router is scheduled and reschedule it up to *MAX_GW_CHASSIS* gateways using list of available candidates. Do not modify the master chassis association to not interrupt network flows. Rescheduling is an event triggered operation which will occur whenever a chassis is added or removed. When it happend, ``schedule_unhosted_gateways()`` `[1]`_ will be called to host the unhosted gateways. Routers without gateway ports are excluded in this operation because those are not connected to provider networks and haven't the gateway ports. More information about it can be found in the ``gateway_chassis`` table definition in OVN NorthBound DB `[5]`_. Chassis which has the flag ``enable-chassis-as-gw`` enabled in their OVN southbound database table, would be the ones eligible for hosting the routers. Rescheduling of router depends on current prorities set. Each chassis is given a specific priority for the router's gateway and priority increases with increasing value ( i.e. 1 < 2 < 3 ...). The highest prioritized chassis hosts gateway port. Other chassis are selected as slaves. There are two approaches for rescheduling supported by ovn driver right now: * Least loaded - select least-loaded chassis first, * Random - select chassis randomly. Few points to consider for the design: * If there are 2 Chassis C1 and C2, where the routers are already balanced, and a new chassis C3 is added, then routers should be rescheduled only from C1 to C3 and C2 to C3. Rescheduling from C1 to C2 and vice-versa should not be allowed. * In order to reschedule the router's chassis, the ``master`` chassis for a gateway router will be left untouched. However, for the scenario where all routers are scheduled in only one chassis which is available as gateway, the addition of the second gateway chassis would schedule the router gateway ports at a lower priority on the new chassis. Following scenarios are possible which have been considered in the design: * Case #1: - System has only one chassis C1 and all router gateway ports are scheduled on it. We add a new chassis C2. - Behavior: All the routers scheduled on C1 will also be scheduled on C2 with priority 1. * Case #2: - System has 2 chassis C1 and C2 during installation. C1 goes down. - Behavior: In this case, all routers would be rescheduled to C2. Once C1 is back up, routers would be rescheduled on it. However, since C2 is now the new master, routers on C1 would have lower priority. * Case #3: - System has 2 chassis C1 and C2 during installation. C3 is added to it. - Behavior: In this case, routers would not move their master chassis associations. So routers which have their master on C1, would remain there, and same for routers on C2. However, lower proritized candidates of existing gateways would be scheduled on the chassis C3, depending on the type of used scheduler (Random or LeastLoaded). Rebalancing of Gateway Chassis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Rebalancing is the second part of the design and it assigns a new master to already scheduled router gateway ports. Downtime is expected in this operation. Rebalancing of routers can be achieved using external cli script. Similar approach has been implemeneted for DHCP rescheduling `[4]`_. The master chassis gateway could be moved only to other, previously scheduled gateway. Rebalancing of chassis occurs only if number of scheduled master chassis ports per each provider network hosted by given chassis is higher than average number of hosted master gateway ports per chassis per provider network. This dependency is determined by formula: avg_gw_per_chassis = num_gw_by_provider_net / num_chassis_with_provider_net Where: - avg_gw_per_chassis - average number of scheduler master gateway chassis withing same provider network. - num_gw_by_provider_net - number of master chassis gateways scheduled in given provider networks. - num_chassis_with_provider_net - number of chassis that has connectivity to given provider network. The rebalancing occurs only if: num_gw_by_provider_net_by_chassis > avg_gw_per_chassis Where: - num_gw_by_provider_net_by_chassis - number of hosted master gateways by given provider network by given chassis - avg_gw_per_chassis - average number of scheduler master gateway chassis withing same provider network. Following scenarios are possible which have been considered in the design: * Case #1: - System has only two chassis C1 and C2. Chassis host the same number of gateways. - Behavior: Rebalancing doesn't occur. * Case #2: - System has only two chassis C1 and C2. C1 hosts 3 gateways. C2 hosts 2 gateways. - Behavior: Rebalancing doesn't occur to not continuously move gateways between chassis in loop. * Case #3: - System has two chassis C1 and C2. In meantime third chassis C3 has been added to the system. - Behavior: Rebalancing should occur. Gateways from C1 and C2 should be moved to C3 up to avg_gw_per_chassis. * Case #4: - System has two chassis C1 and C2. C1 is connected to provnet1, but C2 is connected to provnet2. - Behavior: Rebalancing shouldn't occur because of lack of chassis within same provider network. References ~~~~~~~~~~ .. _`[0]`: https://opendev.org/openstack/neutron/src/commit/f73f39f2cfcd4eace2bda14c99ead9a8cc8560f4/neutron/common/ovn/constants.py#L171 .. _`[1]`: https://opendev.org/openstack/neutron/src/commit/f73f39f2cfcd4eace2bda14c99ead9a8cc8560f4/neutron/services/ovn_l3/plugin.py#L318 .. _`[2]`: https://bugs.launchpad.net/networking-ovn/+bug/1762694 .. _`[3]`: https://developer.openstack.org/api-ref/network/v2/index.html?expanded=schedule-router-to-an-l3-agent-detail#schedule-router-to-an-l3-agent .. _`[4]`: https://opendev.org/x/osops-tools-contrib/src/branch/master/neutron/dhcp_agents_balancer.py .. _`[5]`: http://www.openvswitch.org/support/dist-docs/ovn-nb.5.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/loadbalancer.rst0000644000175000017500000003255300000000000027534 0ustar00coreycorey00000000000000.. _loadbalancer: ================================== OpenStack LoadBalancer API and OVN ================================== Introduction ------------ Load balancing is essential for enabling simple or automatic delivery scaling and availability since application delivery, scaling and availability are considered vital features of any cloud. Octavia is an open source, operator-scale load balancing solution designed to work with OpenStack. The purpose of this document is to propose a design for how we can use OVN as the backend for OpenStack's LoadBalancer API provided by Octavia. Octavia LoadBalancers Today --------------------------- A Detailed design analysis of Octavia is available here: https://docs.openstack.org/octavia/queens/contributor/design/version0.5/component-design.html Currently, Octavia uses the in-built Amphorae driver to fulfill the Loadbalancing requests in Openstack. Amphorae can be a Virtual machine, container, dedicated hardware, appliance or device that actually performs the task of load balancing in the Octavia system. More specifically, an amphora takes requests from clients on the front-end and distributes these to back-end systems. Amphorae communicates with its controllers over the LoadBalancer's network through a driver interface on the controller. Amphorae needs a placeholder, such as a separate VM/Container for deployment, so that it can handle the LoadBalancer's requests. Along with this, it also needs a separate network (termed as lb-mgmt-network) which handles all Amphorae requests. Amphorae has the capability to handle L4 (TCP/UDP) as well as L7 (HTTP) LoadBalancer requests and provides monitoring features using HealthMonitors. Octavia with OVN ---------------- OVN native LoadBalancer currently supports L4 protocols, with support for L7 protocols aimed for in future releases. Currently it also does not have any monitoring facility. However, it does not need any extra hardware/VM/Container for deployment, which is a major positive point when compared with Amphorae. Also, it does not need any special network to handle the LoadBalancer's requests as they are taken care by OpenFlow rules directly. And, though OVN does not have support for TLS, it is in the works and once implemented can be integrated with Octavia. This following section details about how OVN can be used as an Octavia driver. Overview of Proposed Approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OVN Driver for Octavia runs under the scope of Octavia. Octavia API receives and forwards calls to the OVN Driver. **Step 1** - Creating a LoadBalancer Octavia API receives and issues a LoadBalancer creation request on a network to the OVN Provider driver. OVN driver creates a LoadBalancer in the OVN NorthBound DB and asynchronously updates the Octavia DB with the status response. A VIP port is created in Neutron when the LoadBalancer creation is complete. The VIP information however is not updated in the NorthBound DB until the Members are associated with the LoadBalancer's Pool. **Step 2** - Creating LoadBalancer entities (Pools, Listeners, Members) Once a LoadBalancer is created by OVN in its NorthBound DB, users can now create Pools, Listeners and Members associated with the LoadBalancer using the Octavia API. With the creation of each entity, the LoadBalancer's *external_ids* column in the NorthBound DB would be updated and corresponding Logical and Openflow rules would be added for handling them. **Step 3** - LoadBalancer request processing When a user sends a request to the VIP IP address, OVN pipeline takes care of load balancing the VIP request to one of the backend members. More information about this can be found in the ovn-northd man pages. OVN LoadBalancer Driver Logic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * On startup: Open and maintain a connection to the OVN Northbound DB (using the ovsdbapp library). On first connection, and anytime a reconnect happens: * Do a full sync. * Register a callback when a new interface is added to a router or deleted from a router. * When a new LoadBalancer L1 is created, create a Row in OVN's ``Load_Balancer`` table and update its entries for name and network references. If the network on which the LoadBalancer is created, is associated with a router, say R1, then add the router reference to the LoadBalancer's *external_ids* and associate the LoadBalancer to the router. Also associate the LoadBalancer L1 with all those networks which have an interface on the router R1. This is required so that Logical Flows for inter-network communication while using the LoadBalancer L1 is possible. Also, during this time, a new port is created via Neutron which acts as a VIP Port. The information of this new port is not visible on the OVN's NorthBound DB till a member is added to the LoadBalancer. * If a new network interface is added to the router R1 described above, all the LoadBalancers on that network are associated with the router R1 and all the LoadBalancers on the router are associated with the new network. * If a network interface is removed from the router R1, then all the LoadBalancers which have been solely created on that network (identified using the *ls_ref* attribute in the LoadBalancer's *external_ids*) are removed from the router. Similarly those LoadBalancers which are associated with the network but not actually created on that network are removed from the network. * LoadBalancer can either be deleted with all its children entities using the *cascade* option, or its members/pools/listeners can be individually deleted. When the LoadBalancer is deleted, its references and associations from all networks and routers are removed. This might change in the future once the association of LoadBalancers with networks/routers are changed to *weak* from *strong* [3]. Also the VIP port is deleted when the LoadBalancer is deleted. OVN LoadBalancer at work ~~~~~~~~~~~~~~~~~~~~~~~~ OVN Northbound schema [5] has a table to store LoadBalancers. The table looks like:: "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, There is a ``load_balancer`` column in the Logical_Switch table (which corresponds to a Neutron network) as well as the Logical_Router table (which corresponds to a Neutron router) referring back to the 'Load_Balancer' table. The OVN driver updates the OVN Northbound DB. When a LoadBalancer is created, a row in this table is created. And when the listeners and members are added, 'vips' column is updated accordingly. And the Logical_Switch's ``load_balancer`` column is also updated accordingly. ovn-northd service which monitors for changes to the OVN Northbound DB, generates OVN logical flows to enable load balancing and ovn-controller running on each compute node, translates the logical flows into actual OpenFlow rules. The status of each entity in the Octavia DB is managed according to [4] Below are few examples on what happens when LoadBalancer commands are executed and what changes in the Load_Balancer Northbound DB table. 1. Create a LoadBalancer:: $ openstack loadbalancer create --provider ovn --vip-subnet-id=private lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 2. Create a pool:: $ openstack loadbalancer pool create --name p1 --loadbalancer lb1 --protocol TCP --lb-algorithm SOURCE_IP_PORT $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 3. Create a member:: $ openstack loadbalancer member create --address 10.0.0.107 --subnet-id 2d54ec67-c589-473b-bc67-41f3d1331fef --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 4. Create another member:: $ openstack loadbalancer member create --address 20.0.0.107 --subnet-id c2e2da10-1217-4fe2-837a-1c45da587df7 --protocol-port 80 p1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"= "member_579c0c9f-d37d-4ba5-beed-cabf6331032d_10.0.0.107:80, member_d100f2ed-9b55-4083-be78-7f203d095561_20.0.0.107:80", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {} 5. Create a listener:: $ openstack loadbalancer listener create --name l1 --protocol TCP --protocol-port 82 --default-pool p1 lb1 $ ovn-nbctl list load_balancer _uuid : 9dd65bae-2501-43f2-b34e-38a9cb7e4251 external_ids : { lr_ref="neutron-52b6299c-6e38-4226-a275-77370296f257", ls_refs="{\"neutron-2526c68a-5a9e-484c-8e00-0716388f6563\": 2, \"neutron-12c42705-3e15-4e2d-8fc0-070d1b80b9ef\": 1}", "pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9"="10.0.0.107:80,20.0.0.107:80", "listener_12345678-2501-43f2-b34e-38a9cb7e4132"= "82:pool_f2ddf7a6-4047-4cc9-97be-1d1a6c47ece9", neutron:vip="10.0.0.10", neutron:vip_port_id="2526c68a-5a9e-484c-8e00-0716388f6563"} name : "973a201a-8787-4f6e-9b8f-ab9f93c31f44" protocol : [] vips : {"10.0.0.10:82"="10.0.0.107:80,20.0.0.107:80"} As explained earlier in the design section: - If a network N1 has a LoadBalancer LB1 associated to it and one of its interfaces is added to a router R1, LB1 is associated with R1 as well. - If a network N2 has a LoadBalancer LB2 and one of its interfaces is added to the router R1, then R1 will have both LoadBalancers LB1 and LB2. N1 and N2 will also have both the LoadBalancers associated to them. However, kindly note that though network N1 would have both LB1 and LB2 LoadBalancers associated with it, only LB1 would be the LoadBalancer which has a direct reference to the network N1, since LB1 was created on N1. This is visible in the ``ls_ref`` key of the ``external_ids`` column in LB1's entry in the ``load_balancer`` table. - If a network N3 is added to the router R1, N3 will also have both LoadBalancers (LB1, LB2) associated to it. - If the interface to network N2 is removed from R1, network N2 will now only have LB2 associated with it. Networks N1 and N3 and router R1 will have LoadBalancer LB1 associated with them. Limitations ----------- Following actions are not supported by OVN Driver: - Creating a LoadBalancer/Listener/Pool with L7 Protocol - Creating HealthMonitors - Currently only one algorithm is supported for pool management (Source IP Port) - Creating Listeners and Pools with different protocols. They should be of the same protocol type. Following issue exists with OVN's integration with Octavia: - If creation/deletion of a LoadBalancer, Listener, Pool or Member fails, then the corresponding object will remain in the DB in a PENDING_* state. Support Matrix -------------- A detailed matrix of the operations supported by OVN Provider driver in Octavia can be found in https://docs.openstack.org/octavia/latest/user/feature-classification/index.html Other References ---------------- [1] Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/ [2] Octavia Glossary: https://docs.openstack.org/octavia/queens/reference/glossary.html [3] https://github.com/openvswitch/ovs/commit/612f80fa8ebf88dad2e204364c6c02b451dca36c [4] https://docs.openstack.org/api-ref/load-balancer/v2/index.html#status-codes [5] https://github.com/openvswitch/ovs/blob/d1b235d7a6246e00d4afc359071d3b6b3ed244c3/ovn/ovn-nb.ovsschema#L117 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/metadata_api.rst0000644000175000017500000003752300000000000027540 0ustar00coreycorey00000000000000.. _metadata_api: ============================== OpenStack Metadata API and OVN ============================== Introduction ------------ OpenStack Nova presents a metadata API to VMs similar to what is available on Amazon EC2. Neutron is involved in this process because the source IP address is not enough to uniquely identify the source of a metadata request since networks can have overlapping IP addresses. Neutron is responsible for intercepting metadata API requests and adding HTTP headers which uniquely identify the source of the request before forwarding it to the metadata API server. The purpose of this document is to propose a design for how to enable this functionality when OVN is used as the backend for OpenStack Neutron. Neutron and Metadata Today -------------------------- The following blog post describes how VMs access the metadata API through Neutron today. https://www.suse.com/communities/blog/vms-get-access-metadata-neutron/ In summary, we run a metadata proxy in either the router namespace or DHCP namespace. The DHCP namespace can be used when there's no router connected to the network. The one downside to the DHCP namespace approach is that it requires pushing a static route to the VM through DHCP so that it knows to route metadata requests to the DHCP server IP address. * Instance sends a HTTP request for metadata to 169.254.169.254 * This request either hits the router or DHCP namespace depending on the route in the instance * The metadata proxy service in the namespace adds the following info to the request: * Instance IP (X-Forwarded-For header) * Router or Network-ID (X-Neutron-Network-Id or X-Neutron-Router-Id header) * The metadata proxy service sends this request to the metadata agent (outside the namespace) via a UNIX domain socket. * The neutron-metadata-agent service forwards the request to the Nova metadata API service by adding some new headers (instance ID and Tenant ID) to the request [0]. For proper operation, Neutron and Nova must be configured to communicate together with a shared secret. Neutron uses this secret to sign the Instance-ID header of the metadata request to prevent spoofing. This secret is configured through metadata_proxy_shared_secret on both nova and neutron configuration files (optional). [0] https://opendev.org/openstack/neutron/src/commit/f73f39f2cfcd4eace2bda14c99ead9a8cc8560f4/neutron/agent/metadata/agent.py#L175 Neutron and Metadata with OVN ----------------------------- The current metadata API approach does not translate directly to OVN. There are no Neutron agents in use with OVN. Further, OVN makes no use of its own network namespaces that we could take advantage of like the original implementation makes use of the router and dhcp namespaces. We must use a modified approach that fits the OVN model. This section details a proposed approach. Overview of Proposed Approach ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The proposed approach would be similar to the *isolated network* case in the current ML2+OVS implementation. Therefore, we would be running a metadata proxy (haproxy) instance on every hypervisor for each network a VM on that host is connected to. The downside of this approach is that we'll be running more metadata proxies than we're doing now in case of routed networks (one per virtual router) but since haproxy is very lightweight and they will be idling most of the time, it shouldn't be a big issue overall. However, the major benefit of this approach is that we don't have to implement any scheduling logic to distribute metadata proxies across the nodes, nor any HA logic. This, however, can be evolved in the future as explained below in this document. Also, this approach relies on a new feature in OVN that we must implement first so that an OVN port can be present on *every* chassis (similar to *localnet* ports). This new type of logical port would be *localport* and we will never forward packets over a tunnel for these ports. We would only send packets to the local instance of a *localport*. **Step 1** - Create a port for the metadata proxy When using the DHCP agent today, Neutron automatically creates a port for the DHCP agent to use. We could do the same thing for use with the metadata proxy (haproxy). We'll create an OVN *localport* which will be present on every chassis and this port will have the same MAC/IP address on every host. Eventually, we can share the same neutron port for both DHCP and metadata. **Step 2** - Routing metadata API requests to the correct Neutron port This works similarly to the current approach. We would program OVN to include a static route in DHCP responses that routes metadata API requests to the *localport* that is hosting the metadata API proxy. Also, in case DHCP isn't enabled or the client ignores the route info, we will program a static route in the OVN logical router which will still get metadata requests directed to the right place. If the DHCP route does not work and the network is isolated, VMs won't get metadata, but this already happens with the current implementation so this approach doesn't introduce a regression. **Step 3** - Management of the namespaces and haproxy instances We propose a new agent called ``neutron-ovn-metadata-agent``. We will run this agent on every hypervisor and it will be responsible for spawning the haproxy instances for managing the OVS interfaces, network namespaces and haproxy processes used to proxy metadata API requests. **Step 4** - Metadata API request processing Similar to the existing neutron metadata agent, ``neutron-ovn-metadata-agent`` must act as an intermediary between haproxy and the Nova metadata API service. ``neutron-ovn-metadata-agent`` is the process that will have access to the host networks where the Nova metadata API exists. Each haproxy will be in a network namespace not able to reach the appropriate host network. Haproxy will add the necessary headers to the metadata API request and then forward it to ``neutron-ovn-metadata-agent`` over a UNIX domain socket, which matches the behavior of the current metadata agent. Metadata Proxy Management Logic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In neutron-ovn-metadata-agent. * On startup: * Do a full sync. Ensure we have all the required metadata proxies running. For that, the agent would watch the ``Port_Binding`` table of the OVN Southbound database and look for all rows with the ``chassis`` column set to the host the agent is running on. For all those entries, make sure a metadata proxy instance is spawned for every ``datapath`` (Neutron network) those ports are attached to. The agent will keep record of the list of networks it currently has proxies running on by updating the ``external-ids`` key ``neutron-metadata-proxy-networks`` of the OVN ``Chassis`` record in the OVN Southbound database that corresponds to this host. As an example, this key would look like ``neutron-metadata-proxy-networks=NET1_UUID,NET4_UUID`` meaning that this chassis is hosting one or more VM's connected to networks 1 and 4 so we should have a metadata proxy instance running for each. Ensure any running metadata proxies no longer needed are torn down. * Open and maintain a connection to the OVN Northbound database (using the ovsdbapp library). On first connection, and anytime a reconnect happens: * Do a full sync. * Register a callback for creates/updates/deletes to Logical_Switch_Port rows to detect when metadata proxies should be started or torn down. ``neutron-ovn-metadata-agent`` will watch OVN Southbound database (``Port_Binding`` table) to detect when a port gets bound to its chassis. At that point, the agent will make sure that there's a metadata proxy attached to the OVN *localport* for the network which this port is connected to. * When a new network is created, we must create an OVN *localport* for use as a metadata proxy. This port will be owned by ``network:dhcp`` so that it gets auto deleted upon the removal of the network and it will remain ``DOWN`` and not bound to any chassis. The metadata port will be created regardless of the DHCP setting of the subnets within the network as long as the metadata service is enabled. * When a network is deleted, we must tear down the metadata proxy instance (if present) on the host and delete the corresponding OVN *localport* (which will happen automatically as it's owned by ``network:dhcp``). Launching a metadata proxy includes: * Creating a network namespace:: $ sudo ip netns add * Creating a VETH pair (OVS upgrades that upgrade the kernel module will make internal ports go away and then brought back by OVS scripts. This may cause some disruption. Therefore, veth pairs are preferred over internal ports):: $ sudo ip link add 0 type veth peer name 1 * Creating an OVS interface and placing one end in that namespace:: $ sudo ovs-vsctl add-port br-int 0 $ sudo ip link set 1 netns * Setting the IP and MAC addresses on that interface:: $ sudo ip netns exec \ > ip link set 1 address $ sudo ip netns exec \ > ip addr add / dev 1 * Bringing the VETH pair up:: $ sudo ip netns exec ip link set 1 up $ sudo ip link set 0 up * Set ``external-ids:iface-id=NEUTRON_PORT_UUID`` on the OVS interface so that OVN is able to correlate this new OVS interface with the correct OVN logical port:: $ sudo ovs-vsctl set Interface 0 external_ids:iface-id= * Starting haproxy in this network namespace. * Add the network UUID to ``external-ids:neutron-metadata-proxy-networks`` on the Chassis table for our chassis in OVN Southbound database. Tearing down a metadata proxy includes: * Removing the network UUID from our chassis. * Stopping haproxy. * Deleting the OVS interface. * Deleting the network namespace. **Other considerations** This feature will be enabled by default when using ``ovn`` driver, but there should be a way to disable it in case operators who don't need metadata don't have to deal with the complexity of it (haproxy instances, network namespaces, etcetera). In this case, the agent would not create the neutron ports needed for metadata. There could be a race condition when the first VM for a certain network boots on a hypervisor if it does so before the metadata proxy instance has been spawned. Right now, the ``vif-plugged`` event to Nova is sent out when the up column in the OVN Northbound database's Logical_Switch_Port table changes to True, indicating that the VIF is now up. To overcome this race condition we want to wait until all network UUID's to which this VM is connected to are present in ``external-ids:neutron-metadata-proxy-networks`` on the Chassis table for our chassis in OVN Southbound database. This will delay the event to Nova until the metadata proxy instance is up and running on the host ensuring the VM will be able to get the metadata on boot. Alternatives Considered ----------------------- Alternative 1: Build metadata support into ovn-controller ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We've been building some features useful to OpenStack directly into OVN. DHCP and DNS are key examples of things we've replaced by building them into ovn-controller. The metadata API case has some key differences that make this a less attractive solution: The metadata API is an OpenStack specific feature. DHCP and DNS by contrast are more clearly useful outside of OpenStack. Building metadata API proxy support into ovn-controller means embedding an HTTP and TCP stack into ovn-controller. This is a significant degree of undesired complexity. This option has been ruled out for these reasons. Alternative 2: Distributed metadata and High Availability ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In this approach, we would spawn a metadata proxy per virtual router or per network (if isolated), thus, improving the number of metadata proxy instances running in the cloud. However, scheduling and HA have to be considered. Also, we wouldn't need the OVN *localport* implementation. ``neutron-ovn-metadata-agent`` would run on any host that we wish to be able to host metadata API proxies. These hosts must also be running ovn-controller. Each of these hosts will have a Chassis record in the OVN southbound database created by ovn-controller. The Chassis table has a column called ``external_ids`` which can be used for general metadata however we see fit. ``neutron-ovn-metadata-agent`` will update its corresponding Chassis record with an external-id of ``neutron-metadata-proxy-host=true`` to indicate that this OVN chassis is one capable of hosting metadata proxy instances. Once we have a way to determine hosts capable of hosting metadata API proxies, we can add logic to the ovn ML2 driver that schedules metadata API proxies. This would be triggered by Neutron API requests. The output of the scheduling process would be setting an ``external_ids`` key on a Logical_Switch_Port in the OVN northbound database that corresponds with a metadata proxy. The key could be something like ``neutron-metadata-proxy-chassis=CHASSIS_HOSTNAME``. ``neutron-ovn-metadata-agent`` on each host would also be watching for updates to these Logical_Switch_Port rows. When it detects that a metadata proxy has been scheduled locally, it will kick off the process to spawn the local haproxy instance and get it plugged into OVN. HA must also be considered. We must know when a host goes down so that all metadata proxies scheduled to that host can be rescheduled. This is almost the exact same problem we have with L3 HA. When a host goes down, we need to trigger rescheduling gateways to other hosts. We should ensure that the approach used for rescheduling L3 gateways can be utilized for rescheduling metadata proxies, as well. In neutron-server (ovn mechanism driver) . Introduce a new ovn driver configuration option: * ``[ovn] isolated_metadata=[True|False]`` Events that trigger scheduling a new metadata proxy: * If isolated_metadata is True * When a new network is created, we must create an OVN logical port for use as a metadata proxy and then schedule this to one of the ``neutron-ovn-metadata-agent`` instances. * If isolated_metadata is False * When a network is attached to or removed from a logical router, ensure that at least one of the networks has a metadata proxy port already created. If not, pick a network and create a metadata proxy port and then schedule it to an agent. At this point, we need to update the static route for metadata API. Events that trigger unscheduling an existing metadata proxy: * When a network is deleted, delete the metadata proxy port if it exists and unschedule it from a ``neutron-ovn-metadata-agent``. To schedule a new metadata proxy: * Determine the list of available OVN Chassis that can host metadata proxies by reading the ``Chassis`` table of the OVN Southbound database. Look for chassis that have an external-id of ``neutron-metadata-proxy-host=true``. * Of the available OVN chassis, choose the one "least loaded", or currently hosting the fewest number of metadata proxies. * Set ``neutron-metadata-proxy-chassis=CHASSIS_HOSTNAME`` as an external-id on the Logical_Switch_Port in the OVN Northbound database that corresponds to the neutron port used for this metadata proxy. ``CHASSIS_HOSTNAME`` maps to the hostname row of a Chassis record in the OVN Southbound database. This approach has been ruled out for its complexity although we have analyzed the details deeply because, eventually, and depending on the implementation of L3 HA, we will want to evolve to it. Other References ---------------- * Haproxy config -- https://review.openstack.org/#/c/431691/34/neutron/agent/metadata/driver.py * https://engineeringblog.yelp.com/2015/04/true-zero-downtime-haproxy-reloads.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/native_dhcp.rst0000644000175000017500000000425500000000000027407 0ustar00coreycorey00000000000000.. _native_dhcp: ============================================= Using the native DHCP feature provided by OVN ============================================= DHCPv4 ------ OVN implements a native DHCPv4 support which caters to the common use case of providing an IP address to a booting instance by providing stateless replies to DHCPv4 requests based on statically configured address mappings. To do this it allows a short list of DHCPv4 options to be configured and applied at each compute host running ovn-controller. OVN northbound db provides a table 'DHCP_Options' to store the DHCP options. Logical switch port has a reference to this table. When a subnet is created and enable_dhcp is True, a new entry is created in this table. The 'options' column stores the DHCPv4 options. These DHCPv4 options are included in the DHCPv4 reply by the ovn-controller when the VIF attached to the logical switch port sends a DHCPv4 request. In order to map the DHCP_Options row with the subnet, the OVN ML2 driver stores the subnet id in the 'external_ids' column. When a new port is created, the 'dhcpv4_options' column of the logical switch port refers to the DHCP_Options row created for the subnet of the port. If the port has multiple IPv4 subnets, then the first subnet in the 'fixed_ips' is used. If the port has extra DHCPv4 options defined, then a new entry is created in the DHCP_Options table for the port. The default DHCP options are obtained from the subnet DHCP_Options table and the extra DHCPv4 options of the port are overridden. In order to map the port DHCP_Options row with the port, the OVN ML2 driver stores both the subnet id and port id in the 'external_ids' column. If admin wants to disable native OVN DHCPv4 for any particular port, then the admin needs to define the 'dhcp_disabled' with the value 'true' in the extra DHCP options. Ex. neutron port-update \ --extra-dhcp-opt ip_version=4, opt_name=dhcp_disabled, opt_value=false DHCPv6 ------ OVN implements a native DHCPv6 support similar to DHCPv4. When a v6 subnet is created, the OVN ML2 driver will insert a new entry into DHCP_Options table only when the subnet 'ipv6_address_mode' is not 'slaac', and enable_dhcp is True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovn/ovn_worker.rst0000644000175000017500000000675600000000000027326 0ustar00coreycorey00000000000000.. _ovn_worker: =========================================== OVN Neutron Worker and Port status handling =========================================== When the logical switch port's VIF is attached or removed to/from the ovn integration bridge, ovn-northd updates the Logical_Switch_Port.up to 'True' or 'False' accordingly. In order for the OVN Neutron ML2 driver to update the corresponding neutron port's status to 'ACTIVE' or 'DOWN' in the db, it needs to monitor the OVN Northbound db. A neutron worker is created for this purpose. The implementation of the ovn worker can be found here - 'networking_ovn.ovsdb.worker.OvnWorker'. Neutron service will create 'n' api workers and 'm' rpc workers and 1 ovn worker (all these workers are separate processes). Api workers and rpc workers will create ovsdb idl client object ('ovs.db.idl.Idl') to connect to the OVN_Northbound db. See 'networking_ovn.ovsdb.impl_idl_ovn.OvsdbNbOvnIdl' and 'ovsdbapp.backend.ovs_idl.connection.Connection' classes for more details. Ovn worker will create 'networking_ovn.ovsdb.ovsdb_monitor.OvnIdl' class object (which inherits from 'ovs.db.idl.Idl') to connect to the OVN_Northbound db. On receiving the OVN_Northbound db updates from the ovsdb-server, 'notify' function of 'OVnIdl' is called by the parent class object. OvnIdl.notify() function passes the received events to the ovsdb_monitor.OvnDbNotifyHandler class. ovsdb_monitor.OvnDbNotifyHandler checks for any changes in the 'Logical_Switch_Port.up' and updates the neutron port's status accordingly. If 'notify_nova_on_port_status_changes' configuration is set, then neutron would notify nova on port status changes. ovsdb locks ----------- If there are multiple neutron servers running, then each neutron server will have one ovn worker which listens for the notify events. When the 'Logical_Switch_Port.up' is updated by ovn-northd, we do not want all the neutron servers to handle the event and update the neutron port status. In order for only one neutron server to handle the events, ovsdb locks are used. At start, each neutron server's ovn worker will try to acquire a lock with id - 'neutron_ovn_event_lock'. The ovn worker which has acquired the lock will handle the notify events. In case the neutron server with the lock dies, ovsdb-server will assign the lock to another neutron server in the queue. More details about the ovsdb locks can be found here [1] and [2] [1] - https://tools.ietf.org/html/draft-pfaff-ovsdb-proto-04#section-4.1.8 [2] - https://github.com/openvswitch/ovs/blob/branch-2.4/python/ovs/db/idl.py#L67 One thing to note is the ovn worker (with OvnIdl) do not carry out any transactions to the OVN Northbound db. Since the api and rpc workers are not configured with any locks, using the ovsdb lock on the OVN_Northbound and OVN_Southbound DBs by the ovn workers will not have any side effects to the transactions done by these api and rpc workers. Handling port status changes when neutron server(s) are down ------------------------------------------------------------ When neutron server starts, ovn worker would receive a dump of all logical switch ports as events. 'ovsdb_monitor.OvnDbNotifyHandler' would sync up if there are any inconsistencies in the port status. OVN Southbound DB Access ------------------------ The OVN Neutron ML2 driver has a need to acquire chassis information (hostname and physnets combinations). This is required initially to support routed networks. Thus, the plugin will initiate and maintain a connection to the OVN SB DB during startup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/ovs_vhostuser.rst0000644000175000017500000000464700000000000027257 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Open vSwitch vhost-user support ======================================= Neutron supports using Open vSwitch + DPDK vhost-user interfaces directly in the OVS ML2 driver and agent. The current implementation relies on a multiple configuration values and includes runtime verification of Open vSwitch's capability to provide these interfaces. The OVS agent detects the capability of the underlying Open vSwitch installation and passes that information over RPC via the agent 'configurations' dictionary. The ML2 driver uses this information to select the proper VIF type and binding details. Platform requirements --------------------- * OVS 2.4.0+ * DPDK 2.0+ Configuration ------------- .. code-block:: ini [OVS] datapath_type=netdev vhostuser_socket_dir=/var/run/openvswitch When OVS is running with DPDK support enabled, and the ``datapath_type`` is set to ``netdev``, then the OVS ML2 driver will use the ``vhost-user`` VIF type and pass the necessary binding details to use OVS+DPDK and vhost-user sockets. This includes the ``vhostuser_socket_dir`` setting, which must match the directory passed to ``ovs-vswitchd`` on startup. What about the networking-ovs-dpdk repo? ---------------------------------------- The networking-ovs-dpdk repo will continue to exist and undergo active development. This feature just removes the necessity for a separate ML2 driver and OVS agent in the networking-ovs-dpdk repo. The networking-ovs-dpdk project also provides a devstack plugin which also allows automated CI, a Puppet module, and an OpenFlow-based security group implementation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/plugin-api.rst0000644000175000017500000000227400000000000026365 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Plugin Architecture =========================== `Salvatore Orlando: How to write a Neutron Plugin (if you really need to) `_ Plugin API ---------- .. automodule:: neutron.neutron_plugin_base_v2 .. autoclass:: NeutronPluginBaseV2 :members: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/policy.rst0000644000175000017500000004734700000000000025631 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Authorization Policy Enforcement ================================ As most OpenStack projects, Neutron leverages oslo_policy [#]_. However, since Neutron loves to be special and complicate every developer's life, it also "augments" oslo_policy capabilities by: * A wrapper module with its own API: neutron.policy; * The ability of adding fine-grained checks on attributes for resources in request bodies; * The ability of using the policy engine to filter out attributes in responses; * Adding some custom rule checks beyond those defined in oslo_policy; This document discusses Neutron-specific aspects of policy enforcement, and in particular how the enforcement logic is wired into API processing. For any other information please refer to the developer documentation for oslo_policy [#]_. Authorization workflow ---------------------- The Neutron API controllers perform policy checks in two phases during the processing of an API request: * Request authorization, immediately before dispatching the request to the plugin layer for ``POST``, ``PUT``, and ``DELETE``, and immediately after returning from the plugin layer for ``GET`` requests; * Response filtering, when building the response to be returned to the API consumer. Request authorization ~~~~~~~~~~~~~~~~~~~~~ The aim of this step is to authorize processing for a request or reject it with an error status code. This step uses the ``neutron.policy.enforce`` routine. This routine raises ``oslo_policy.PolicyNotAuthorized`` when policy enforcement fails. The Neutron REST API controllers catch this exception and return: * A 403 response code on a ``POST`` request or an ``PUT`` request for an object owned by the project submitting the request; * A 403 response for failures while authorizing API actions such as ``add_router_interface``; * A 404 response for ``DELETE``, ``GET`` and all other ``PUT`` requests. For ``DELETE`` operations the resource must first be fetched. This is done invoking the same ``_item`` [#]_ method used for processing ``GET`` requests. This is also true for ``PUT`` operations, since the Neutron API implements ``PATCH`` semantics for ``PUTs``. The criteria to evaluate are built in the ``_build_match_rule`` [#]_ routine. This routine takes in input the following parameters: * The action to be performed, in the ``_`` form, ``e.g.: create_network`` * The data to use for performing checks. For ``POST`` operations this could be a partial specification of the object, whereas it is always a full specification for ``GET``, ``PUT``, and ``DELETE`` requests, as resource data are retrieved before dispatching the call to the plugin layer. * The collection name for the resource specified in the previous parameter; for instance, for a network it would be the "networks". The ``_build_match_rule`` routine returns a ``oslo_policy.RuleCheck`` instance built in the following way: * Always add a check for the action being performed. This will match a policy like create_network in ``policy.json``; * Return for ``GET`` operations; more detailed checks will be performed anyway when building the response; * For each attribute which has been explicitly specified in the request create a rule matching policy names in the form ``_:`` rule, and link it with the previous rule with an 'And' relationship (using ``oslo_policy.AndCheck``); this step will be performed only if the ``enforce_policy`` flag is set to ``True`` in the resource attribute descriptor (usually found in a data structure called ``RESOURCE_ATTRIBUTE_MAP``); * If the attribute is a composite one then further rules will be created; These will match policy names in the form ``_::``. An 'And' relationship will be used in this case too. As all the rules to verify are linked by 'And' relationships, all the policy checks should succeed in order for a request to be authorized. Rule verification is performed by ``oslo_policy`` with no "customization" from the Neutron side. .. _response_filtering: Response Filtering ~~~~~~~~~~~~~~~~~~ Some Neutron extensions, like the provider networks one, add some attribute to resources which are however not meant to be consumed by all clients. This might be because these attributes contain implementation details, or are meant only to be used when exchanging information between services, such as Nova and Neutron; For this reason the policy engine is invoked again when building API responses. This is achieved by the ``_exclude_attributes_by_policy`` [#]_ method in ``neutron.api.v2.base.Controller``; This method, for each attribute in the response returned by the plugin layer, first checks if the ``is_visible`` flag is True. In that case it proceeds to checking policies for the attribute; if the policy check fails the attribute is added to a list of attributes that should be removed from the response before returning it to the API client. The neutron.policy API ---------------------- The ``neutron.policy`` module exposes a simple API whose main goal if to allow the REST API controllers to implement the authorization workflow discussed in this document. It is a bad practice to call the policy engine from within the plugin layer, as this would make request authorization dependent on configured plugins, and therefore make API behaviour dependent on the plugin itself, which defies Neutron tenet of being backend agnostic. The neutron.policy API exposes the following routines: * ``init`` Initializes the policy engine loading rules from the json policy (files). This method can safely be called several times. * ``reset`` Clears all the rules currently configured in the policy engine. It is called in unit tests and at the end of the initialization of core API router [#]_ in order to ensure rules are loaded after all the extensions are loaded. * ``refresh`` Combines init and reset. Called when a SIGHUP signal is sent to an API worker. * ``set_rules`` Explicitly set policy engine's rules. Used only in unit tests. * ``check`` Perform a check using the policy engine. Builds match rules as described in this document, and then evaluates the resulting rule using oslo_policy's policy engine. Returns True if the checks succeeds, false otherwise. * ``enforce`` Operates like the check routine but raises if the check in oslo_policy fails. * ``check_is_admin`` Enforce the predefined context_is_admin rule; used to determine the is_admin property for a neutron context. * ``check_is_advsvc`` Enforce the predefined context_is_advsvc rule; used to determine the is_advsvc property for a neutron context. Neutron specific policy rules ----------------------------- Neutron provides two additional policy rule classes in order to support the "augmented" authorization capabilities it provides. They both extend ``oslo_policy.RuleCheck`` and are registered using the ``oslo_policy.register`` decorator. OwnerCheck: Extended Checks for Resource Ownership ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This class is registered for rules matching the ``tenant_id`` keyword and overrides the generic check performed by oslo_policy in this case. It uses for those cases where neutron needs to check whether the project submitting a request for a new resource owns the parent resource of the one being created. Current usages of ``OwnerCheck`` include, for instance, creating and updating a subnet. This class supports the extension parent resources owner check which the parent resource introduced by service plugins. Such as router and floatingip owner check for ``router`` service plugin. Developers can register the extension resource name and service plugin name which were registered in neutron-lib into ``EXT_PARENT_RESOURCE_MAPPING`` which is located in ``neutron_lib.services.constants``. The check, performed in the ``__call__`` method, works as follows: * verify if the target field is already in the target data. If yes, then simply verify whether the value for the target field in target data is equal to value for the same field in credentials, just like ``oslo_policy.GenericCheck`` would do. This is also the most frequent case as the target field is usually ``tenant_id``; * if the previous check failed, extract a parent resource type and a parent field name from the target field. For instance ``networks:tenant_id`` identifies the ``tenant_id`` attribute of the ``network`` resource. For extension parent resource case, ``ext_parent:tenant_id`` identifies the ``tenant_id`` attribute of the registered extension resource in ``EXT_PARENT_RESOURCE_MAPPING``; * if no parent resource or target field could be identified raise a ``PolicyCheckError`` exception; * Retrieve a 'parent foreign key' from the ``_RESOURCE_FOREIGN_KEYS`` data structure in ``neutron.policy``. This foreign key is simply the attribute acting as a primary key in the parent resource. A ``PolicyCheckError`` exception will be raised if such 'parent foreign key' cannot be retrieved; * Using the core plugin, retrieve an instance of the resource having 'parent foreign key' as an identifier; * Finally, verify whether the target field in this resource matches the one in the initial request data. For instance, for a port create request, verify whether the ``tenant_id`` of the port data structure matches the ``tenant_id`` of the network where this port is being created. FieldCheck: Verify Resource Attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This class is registered with the policy engine for rules matching the 'field' keyword, and provides a way to perform fine grained checks on resource attributes. For instance, using this class of rules it is possible to specify a rule for granting every project read access to shared resources. In policy.json, a FieldCheck rules is specified in the following way:: > field: := This will result in the initialization of a FieldCheck that will check for ```` in the target resource data, and return ``True`` if it is equal to ```` or return ``False`` is the ```` either is not equal to ```` or does not exist at all. Guidance for Neutron API developers ----------------------------------- When developing REST APIs for Neutron it is important to be aware of how the policy engine will authorize these requests. This is true both for APIs served by Neutron "core" and for the APIs served by the various Neutron "stadium" services. * If an attribute of a resource might be subject to authorization checks then the ``enforce_policy`` attribute should be set to ``True``. While setting this flag to ``True`` for each attribute is a viable strategy, it is worth noting that this will require a call to the policy engine for each attribute, thus consistently increasing the time required to complete policy checks for a resource. This could result in a scalability issue, especially in the case of list operations retrieving a large number of resources; * Some resource attributes, even if not directly used in policy checks might still be required by the policy engine. This is for instance the case of the ``tenant_id`` attribute. For these attributes the ``required_by_policy`` attribute should always set to ``True``. This will ensure that the attribute is included in the resource data sent to the policy engine for evaluation; * The ``tenant_id`` attribute is a fundamental one in Neutron API request authorization. The default policy, ``admin_or_owner``, uses it to validate if a project owns the resource it is trying to operate on. To this aim, if a resource without a tenant_id is created, it is important to ensure that ad-hoc authZ policies are specified for this resource. * There is still only one check which is hardcoded in Neutron's API layer: the check to verify that a project owns the network on which it is creating a port. This check is hardcoded and is always executed when creating a port, unless the network is shared. Unfortunately a solution for performing this check in an efficient way through the policy engine has not yet been found. Due to its nature, there is no way to override this check using the policy engine. * It is strongly advised to not perform policy checks in the plugin or in the database management classes. This might lead to divergent API behaviours across plugins. Also, it might leave the Neutron DB in an inconsistent state if a request is not authorized after it has already been dispatched to the backend. Notes ~~~~~ * No authorization checks are performed for requests coming from the RPC over AMQP channel. For all these requests a neutron admin context is built, and the plugins will process them as such. * For ``PUT`` and ``DELETE`` requests a 404 error is returned on request authorization failures rather than a 403, unless the project submitting the request own the resource to update or delete. This is to avoid conditions in which an API client might try and find out other projects' resource identifiers by sending out ``PUT`` and ``DELETE`` requests for random resource identifiers. * There is no way at the moment to specify an ``OR`` relationship between two attributes of a given resource (eg.: ``port.name == 'meh' or port.status == 'DOWN'``), unless the rule with the or condition is explicitly added to the policy.json file. * ``OwnerCheck`` performs a plugin access; this will likely require a database access, but since the behaviour is implementation specific it might also imply a round-trip to the backend. This class of checks, when involving retrieving attributes for 'parent' resources should be used very sparingly. * In order for ``OwnerCheck`` rules to work, parent resources should have an entry in ``neutron.policy._RESOURCE_FOREIGN_KEYS``; moreover the resource must be managed by the 'core' plugin (ie: the one defined in the core_plugin configuration variable) Policy-in-Code support ---------------------- Guideline on defining in-code policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following is the guideline of policy definitions. Ideally we should define all available policies, but in the neutron policy enforcement it is not practical to define all policies because we check all attributes of a target resource in the :ref:`response_filtering`. Considering this, we have the special guidelines for "get" operation. * All policies of ``_`` must be defined for all types of operations. Valid actions are ``create``, ``update``, ``delete`` and ``get``. * ``get_`` (get plural) is unnecessary. The neutron API layer use a single form policy ``get_`` when listing resources [#]_ [#]_. * Member actions for individual resources must be defined. For example, ``add_router_interface`` of ``router`` resource. * All policies with attributes on "create", "update" and "delete" actions must be defined. ``_:(:)`` policy is required for attributes with ``enforce_policy`` in the API definitions. Note that it is recommended to define even if a rule is same as for ``_`` from the documentation perspective. * For a policy with attributes of "get" actions like ``get_:(:)``, the following guideline is applied: * A policy with an attribute must be defined if the policy is different from the policy for ``get_`` (without attributes). * If a policy with an attribute is same as for ``get_``, there is no need to define it explicitly. This is for simplicity. We check all attributes of a target resource in the process of :ref:`response_filtering` so it leads to a long long policy definitions for "get" actions in our documentation. It is not happy for operators either. * If an attribute is marked as ``enforce_policy``, it is recommended to define the corresponding policy with the attribute. This is for clarification. If an attribute is marked as ``enforce_policy`` in the API definitions, for example, the neutron API limits to set such attribute only to admin users but allows to retrieve a value for regular users. If policies for the attribute are different across the types of operations, it is better to define all of them explicitly. Registering policies in neutron related projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Policy-in-code support in neutron is a bit different from other projects because the neutron server needs to load policies in code from multiple projects. Each neutron related project should register the following two entry points ``oslo.policy.policies`` and ``neutron.policies`` in ``setup.cfg`` like below: .. code-block:: none oslo.policy.policies = neutron = neutron.conf.policies:list_rules neutron.policies = neutron = neutron.conf.policies:list_rules The above two entries are same, but they have different purposes. * The first entry point is a normal entry point defined by oslo.policy and it is used to generate a sample policy file [#]_ [#]_. * The second one is specific to neutron. It is used by ``neutron.policy`` module to load policies of neutron related projects. ``oslo.policy.policies`` entry point is used by all projects which adopt oslo.policy, so we cannot determine which projects are neutron related projects, so the second entry point is required. The recommended entry point name is a repository name: For example, 'neutron-fwaas' for FWaaS and 'networking-sfc' for SFC: .. code-block:: none oslo.policy.policies = neutron-fwaas = neutron_fwaas.policies:list_rules neutron.policies = neutron-fwaas = neutron_fwaas.policies:list_rules Except registering the ``neutron.policies`` entry point, other steps to be done in each neutron related project for policy-in-code support are same for all OpenStack projects. References ---------- .. [#] `Oslo policy module `_ .. [#] `Oslo policy developer `_ .. [#] API controller item_ method .. _item: http://opendev.org/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n282 .. [#] Policy engine's build_match_rule_ method .. _build_match_rule: http://opendev.org/openstack/neutron/tree/neutron/policy.py?id=2015.1.1#n187 .. [#] exclude_attributes_by_policy_ method .. _exclude_attributes_by_policy: http://opendev.org/openstack/neutron/tree/neutron/api/v2/base.py?id=2015.1.1#n132 .. [#] Policy reset_ in neutron.api.v2.router .. _reset: http://opendev.org/openstack/neutron/tree/neutron/api/v2/router.py?id=2015.1.1#n122 .. [#] https://github.com/openstack/neutron/blob/051b6b40f3921b9db4f152a54f402c402cbf138c/neutron/pecan_wsgi/hooks/policy_enforcement.py#L173 .. [#] https://github.com/openstack/neutron/blob/051b6b40f3921b9db4f152a54f402c402cbf138c/neutron/pecan_wsgi/hooks/policy_enforcement.py#L143 .. [#] https://docs.openstack.org/oslo.policy/latest/user/usage.html#sample-file-generation .. [#] https://docs.openstack.org/oslo.policy/latest/cli/index.html#oslopolicy-sample-generator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/provisioning_blocks.rst0000644000175000017500000001627300000000000030407 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Composite Object Status via Provisioning Blocks =============================================== We use the STATUS field on objects to indicate when a resource is ready by setting it to ACTIVE so external systems know when it's safe to use that resource. Knowing when to set the status to ACTIVE is simple when there is only one entity responsible for provisioning a given object. When that entity has finishing provisioning, we just update the STATUS directly to active. However, there are resources in Neutron that require provisioning by multiple asynchronous entities before they are ready to be used so managing the transition to the ACTIVE status becomes more complex. To handle these cases, Neutron has `the provisioning_blocks module `_ to track the entities that are still provisioning a resource. The main example of this is with ML2, the L2 agents and the DHCP agents. When a port is created and bound to a host, it's placed in the DOWN status. The L2 agent now has to setup flows, security group rules, etc for the port and the DHCP agent has to setup a DHCP reservation for the port's IP and MAC. Before the transition to ACTIVE, both agents must complete their work or the port user (e.g. Nova) may attempt to use the port and not have connectivity. To solve this, the provisioning_blocks module is used to track the provisioning state of each agent and the status is only updated when both complete. High Level View --------------- To make use of the provisioning_blocks module, provisioning components should be added whenever there is work to be done by another entity before an object's status can transition to ACTIVE. This is accomplished by calling the add_provisioning_component method for each entity. Then as each entity finishes provisioning the object, the provisioning_complete must be called to lift the provisioning block. When the last provisioning block is removed, the provisioning_blocks module will trigger a callback notification containing the object ID for the object's resource type with the event PROVISIONING_COMPLETE. A subscriber to this event can now update the status of this object to ACTIVE or perform any other necessary actions. A normal state transition will look something like the following: 1. Request comes in to create an object 2. Logic on the Neutron server determines which entities are required to provision the object and adds a provisioning component for each entity for that object. 3. A notification is emitted to the entities so they start their work. 4. Object is returned to the API caller in the DOWN (or BUILD) state. 5. Each entity tells the server when it has finished provisioning the object. The server calls provisioning_complete for each entity that finishes. 6. When provisioning_complete is called on the last remaining entity, the provisioning_blocks module will emit an event indicating that provisioning has completed for that object. 7. A subscriber to this event on the server will then update the status of the object to ACTIVE to indicate that it is fully provisioned. For a more concrete example, see the section below. ML2, L2 agents, and DHCP agents ------------------------------- ML2 makes use of the provisioning_blocks module to prevent the status of ports from being transitioned to ACTIVE until both the L2 agent and the DHCP agent have finished wiring a port. When a port is created or updated, the following happens to register the DHCP agent's provisioning blocks: 1. The subnet_ids are extracted from the fixed_ips field of the port and then ML2 checks to see if DHCP is enabled on any of the subnets. 2. The configuration for the DHCP agents hosting the network are looked up to ensure that at least one of them is new enough to report back that it has finished setting up the port reservation. 3. If either of the preconditions above fail, a provisioning block for the DHCP agent is not added and any existing DHCP agent blocks for that port are cleared to ensure the port isn't blocked waiting for an event that will never happen. 4. If the preconditions pass, a provisioning block is added for the port under the 'DHCP' entity. When a port is created or updated, the following happens to register the L2 agent's provisioning blocks: 1. If the port is not bound, nothing happens because we don't know yet if an L2 agent is involved so we have to wait until a port update that binds it. 2. Once the port is bound, the agent based mechanism drivers will check if they have an agent on the bound host and if the VNIC type belongs to the mechanism driver, a provisioning block is added for the port under the 'L2 Agent' entity. Once the DHCP agent has finished setting up the reservation, it calls dhcp_ready_on_ports via the RPC API with the port ID. The DHCP RPC handler receives this and calls 'provisioning_complete' in the provisioning module with the port ID and the 'DHCP' entity to remove the provisioning block. Once the L2 agent has finished setting up the reservation, it calls the normal update_device_list (or update_device_up) via the RPC API. The RPC callbacks handler calls 'provisioning_complete' with the port ID and the 'L2 Agent' entity to remove the provisioning block. On the 'provisioning_complete' call that removes the last record, the provisioning_blocks module emits a callback PROVISIONING_COMPLETE event with the port ID. A function subscribed to this in ML2 then calls update_port_status to set the port to ACTIVE. At this point the normal notification is emitted to Nova allowing the VM to be unpaused. In the event that the DHCP or L2 agent is down, the port will not transition to the ACTIVE status (as is the case now if the L2 agent is down). Agents must account for this by telling the server that wiring has been completed after configuring everything during startup. This ensures that ports created on offline agents (or agents that crash and restart) eventually become active. To account for server instability, the notifications about port wiring be complete must use RPC calls so the agent gets a positive acknowledgement from the server and it must keep retrying until either the port is deleted or it is successful. If an ML2 driver immediately places a bound port in the ACTIVE state (e.g. after calling a backend in update_port_postcommit), this patch will not have any impact on that process. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/quality_of_service.rst0000644000175000017500000004723100000000000030216 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Quality of Service ================== Quality of Service advanced service is designed as a service plugin. The service is decoupled from the rest of Neutron code on multiple levels (see below). QoS extends core resources (ports, networks) without using mixins inherited from plugins but through an ml2 extension driver. Details about the DB models, API extension, and use cases can be found here: `qos spec `_ . Service side design ------------------- * neutron.extensions.qos: base extension + API controller definition. Note that rules are subattributes of policies and hence embedded into their URIs. * neutron.extensions.qos_fip: base extension + API controller definition. Adds qos_policy_id to floating IP, enabling users to set/update the binding QoS policy of a floating IP. * neutron.services.qos.qos_plugin: QoSPlugin, service plugin that implements 'qos' extension, receiving and handling API calls to create/modify policies and rules. * neutron.services.qos.drivers.manager: the manager that passes object actions down to every enabled QoS driver and issues RPC calls when any of the drivers require RPC push notifications. * neutron.services.qos.drivers.base: the interface class for pluggable QoS drivers that are used to update backends about new {create, update, delete} events on any rule or policy change, including precommit events that some backends could need for synchronization reason. The drivers also declare which QoS rules, VIF drivers and VNIC types are supported. * neutron.core_extensions.base: Contains an interface class to implement core resource (port/network) extensions. Core resource extensions are then easily integrated into interested plugins. We may need to have a core resource extension manager that would utilize those extensions, to avoid plugin modifications for every new core resource extension. * neutron.core_extensions.qos: Contains QoS core resource extension that conforms to the interface described above. * neutron.plugins.ml2.extensions.qos: Contains ml2 extension driver that handles core resource updates by reusing the core_extensions.qos module mentioned above. In the future, we would like to see a plugin-agnostic core resource extension manager that could be integrated into other plugins with ease. QoS plugin implementation guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The neutron.extensions.qos.QoSPluginBase class uses method proxies for methods relating to QoS policy rules. Each of these such methods is generic in the sense that it is intended to handle any rule type. For example, QoSPluginBase has a create_policy_rule method instead of both create_policy_dscp_marking_rule and create_policy_bandwidth_limit_rule methods. The logic behind the proxies allows a call to a plugin's create_policy_dscp_marking_rule to be handled by the create_policy_rule method, which will receive a QosDscpMarkingRule object as an argument in order to execute behavior specific to the DSCP marking rule type. This approach allows new rule types to be introduced without requiring a plugin to modify code as a result. As would be expected, any subclass of QoSPluginBase must override the base class's abc.abstractmethod methods, even if to raise NotImplemented. Supported QoS rule types ~~~~~~~~~~~~~~~~~~~~~~~~ Each QoS driver has a property called supported_rule_types, where the driver exposes the rules it's able to handle. For a list of all rule types, see: neutron.services.qos.qos_consts.VALID_RULE_TYPES. The list of supported QoS rule types exposed by neutron is calculated as the common subset of rules supported by all active QoS drivers. Note: the list of supported rule types reported by core plugin is not enforced when accessing QoS rule resources. This is mostly because then we would not be able to create rules while at least one of the QoS driver in gate lacks support for the rules we're trying to test. Database models ~~~~~~~~~~~~~~~ QoS design defines the following two conceptual resources to apply QoS rules for a port, a network or a floating IP: * QoS policy * QoS rule (type specific) Each QoS policy contains zero or more QoS rules. A policy is then applied to a network or a port, making all rules of the policy applied to the corresponding Neutron resource. When applied through a network association, policy rules could apply or not to neutron internal ports (like router, dhcp, etc..). The QosRule base object provides a default should_apply_to_port method which could be overridden. In the future we may want to have a flag in QoSNetworkPolicyBinding or QosRule to enforce such type of application (for example when limiting all the ingress of routers devices on an external network automatically). Each project can have at most one default QoS policy, although is not mandatory. If a default QoS policy is defined, all new networks created within this project will have assigned this policy, as long as no other QoS policy is explicitly attached during the creation process. If the default QoS policy is unset, no change to existing networks will be made. From database point of view, following objects are defined in schema: * QosPolicy: directly maps to the conceptual policy resource. * QosNetworkPolicyBinding, QosPortPolicyBinding, QosFIPPolicyBinding: define attachment between a Neutron resource and a QoS policy. * QosPolicyDefault: defines a default QoS policy per project. * QosBandwidthLimitRule: defines the rule to limit the maximum egress bandwidth. * QosDscpMarkingRule: defines the rule that marks the Differentiated Service bits for egress traffic. * QosMinimumBandwidthRule: defines the rule that creates a minimum bandwidth constraint. All database models are defined under: * neutron.db.qos.models QoS versioned objects ~~~~~~~~~~~~~~~~~~~~~ For QoS, the following neutron objects are implemented: * QosPolicy: directly maps to the conceptual policy resource, as defined above. * QosPolicyDefault: defines a default QoS policy per project. * QosBandwidthLimitRule: defines the instance bandwidth limit rule type, characterized by a max kbps and a max burst kbits. This rule has also a direction parameter to set the traffic direction, from the instance's point of view. * QosDscpMarkingRule: defines the DSCP rule type, characterized by an even integer between 0 and 56. These integers are the result of the bits in the DiffServ section of the IP header, and only certain configurations are valid. As a result, the list of valid DSCP rule types is: 0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 46, 48, and 56. * QosMinimumBandwidthRule: defines the minimum assured bandwidth rule type, characterized by a min_kbps parameter. This rule has also a direction parameter to set the traffic direction, from the instance point of view. The only direction now implemented is egress. Those are defined in: * neutron.objects.qos.policy * neutron.objects.qos.rule For QosPolicy neutron object, the following public methods were implemented: * get_network_policy/get_port_policy/get_fip_policy: returns a policy object that is attached to the corresponding Neutron resource. * attach_network/attach_port/attach_floatingip: attach a policy to the corresponding Neutron resource. * detach_network/detach_port/detach_floatingip: detach a policy from the corresponding Neutron resource. In addition to the fields that belong to QoS policy database object itself, synthetic fields were added to the object that represent lists of rules that belong to the policy. To get a list of all rules for a specific policy, a consumer of the object can just access the corresponding attribute via: * policy.rules Implementation is done in a way that will allow adding a new rule list field with little or no modifications in the policy object itself. This is achieved by smart introspection of existing available rule object definitions and automatic definition of those fields on the policy class. Note that rules are loaded in a non lazy way, meaning they are all fetched from the database on policy fetch. For QosRule objects, an extendable approach was taken to allow easy addition of objects for new rule types. To accommodate this, fields common to all types are put into a base class called QosRule that is then inherited into type-specific rule implementations that, ideally, only define additional fields and some other minor things. Note that the QosRule base class is not registered with oslo.versionedobjects registry, because it's not expected that 'generic' rules should be instantiated (and to suggest just that, the base rule class is marked as ABC). QoS objects rely on some primitive database API functions that are added in: * neutron_lib.db.api: those can be reused to fetch other models that do not have corresponding versioned objects yet, if needed. * neutron.db.qos.api: contains database functions that are specific to QoS models. RPC communication ~~~~~~~~~~~~~~~~~ Details on RPC communication implemented in reference backend driver are discussed in `a separate page `_. The flow of updates is as follows: * if a port that is bound to the agent is attached to a QoS policy, then ML2 plugin detects the change by relying on ML2 QoS extension driver, and notifies the agent about a port change. The agent proceeds with the notification by calling to get_device_details() and getting the new port dict that contains a new qos_policy_id. Each device details dict is passed into l2 agent extension manager that passes it down into every enabled extension, including QoS. QoS extension sees that there is a new unknown QoS policy for a port, so it uses ResourcesPullRpcApi to fetch the current state of the policy (with all the rules included) from the server. After that, the QoS extension applies the rules by calling into QoS driver that corresponds to the agent. * For floating IPs, a ``fip_qos`` L3 agent extension was implemented. This extension receives and processes router updates. For each update, it goes over each floating IP associated to the router. If a floating IP has a QoS policy associated to it, the extension uses ResourcesPullRpcApi to fetch the policy details from the Neutron server. If the policy includes ``bandwidth_limit`` rules, the extension applies them to the appropriate router device by directly calling the l3_tc_lib. * on existing QoS policy update (it includes any policy or its rules change), server pushes the new policy object state through ResourcesPushRpcApi interface. The interface fans out the serialized (dehydrated) object to any agent that is listening for QoS policy updates. If an agent have seen the policy before (it is attached to one of the ports/floating IPs it maintains), then it goes with applying the updates to the port/floating IP. Otherwise, the agent silently ignores the update. Agent side design ----------------- Reference agents implement QoS functionality using an `L2 agent extension <./l2_agent_extensions.html>`_. * neutron.agent.l2.extensions.qos defines QoS L2 agent extension. It receives handle_port and delete_port events and passes them down into QoS agent backend driver (see below). The file also defines the QosAgentDriver interface. Note: each backend implements its own driver. The driver handles low level interaction with the underlying networking technology, while the QoS extension handles operations that are common to all agents. For L3 agent: * neutron.agent.l3.extensions.fip_qos defines QoS L3 agent extension. It implements the L3 agent side of floating IP rate limit. For all routers, if floating IP has QoS ``bandwidth_limit`` rules, the corresponding TC filters will be added to the appropriate router device, depending on the router type. Agent backends ~~~~~~~~~~~~~~ At the moment, QoS is supported by Open vSwitch, SR-IOV and Linux bridge ml2 drivers. Each agent backend defines a QoS driver that implements the QosAgentDriver interface: * Open vSwitch (QosOVSAgentDriver); * SR-IOV (QosSRIOVAgentDriver); * Linux bridge (QosLinuxbridgeAgentDriver). For the Networking back ends, QoS supported rules, and traffic directions (from the VM point of view), please see the table: `Networking back ends, supported rules, and traffic direction <../../admin/config-qos.html#id1>`_. Open vSwitch ++++++++++++ Open vSwitch implementation relies on the new ovs_lib OVSBridge functions: * get_egress_bw_limit_for_port * create_egress_bw_limit_for_port * delete_egress_bw_limit_for_port * get_ingress_bw_limit_for_port * update_ingress_bw_limit_for_port * delete_ingress_bw_limit_for_port An egress bandwidth limit is effectively configured on the port by setting the port Interface parameters ingress_policing_rate and ingress_policing_burst. That approach is less flexible than linux-htb, Queues and OvS QoS profiles, which we may explore in the future, but which will need to be used in combination with openflow rules. An ingress bandwidth limit is effectively configured on the port by setting Queue and OvS QoS profile with linux-htb type for port. The Open vSwitch DSCP marking implementation relies on the recent addition of the ovs_agent_extension_api OVSAgentExtensionAPI to request access to the integration bridge functions: * add_flow * mod_flow * delete_flows * dump_flows_for The DSCP markings are in fact configured on the port by means of openflow rules. .. note:: As of Ussuri release, the QoS rules can be applied for direct ports with hardware offload capability (switchdev), this requires Open vSwitch version 2.11.0 or newer and Linux kernel based on kernel 5.4.0 or newer. SR-IOV ++++++ SR-IOV bandwidth limit and minimum bandwidth implementation relies on the new pci_lib function: * set_vf_rate As the name of the function suggests, the limit is applied on a Virtual Function (VF). This function has a parameter called "rate_type" and its value can be set to "rate" or "min_tx_rate", which is for enforcing bandwidth limit or minimum bandwidth respectively. ip link interface has the following limitation for bandwidth limit: it uses Mbps as units of bandwidth measurement, not kbps, and does not support float numbers. So in case the limit is set to something less than 1000 kbps, it's set to 1 Mbps only. If the limit is set to something that does not divide to 1000 kbps chunks, then the effective limit is rounded to the nearest integer Mbps value. Linux bridge ~~~~~~~~~~~~ The Linux bridge implementation relies on the new tc_lib functions. For egress bandwidth limit rule: * set_filters_bw_limit * update_filters_bw_limit * delete_filters_bw_limit The egress bandwidth limit is configured on the tap port by setting traffic policing on tc ingress queueing discipline (qdisc). Details about ingress qdisc can be found on `lartc how-to `__. The reason why ingress qdisc is used to configure egress bandwidth limit is that tc is working on traffic which is visible from "inside bridge" perspective. So traffic incoming to bridge via tap interface is in fact outgoing from Neutron's port. This implementation is the same as what Open vSwitch is doing when ingress_policing_rate and ingress_policing_burst are set for port. For ingress bandwidth limit rule: * set_tbf_bw_limit * update_tbf_bw_limit * delete_tbf_bw_limit The ingress bandwidth limit is configured on the tap port by setting a simple `tc-tbf `_ queueing discipline (qdisc) on the port. It requires a value of HZ parameter configured in kernel on the host. This value is necessary to calculate the minimal burst value which is set in tc. Details about how it is calculated can be found in `here `_. This solution is similar to Open vSwitch implementation. The Linux bridge DSCP marking implementation relies on the linuxbridge_extension_api to request access to the IptablesManager class and to manage chains in the ``mangle`` table in iptables. QoS driver design ----------------- QoS framework is flexible enough to support any third-party vendor. To integrate a third party driver (that just wants to be aware of the QoS create/update/delete API calls), one needs to implement 'neutron.services.qos.drivers.base', and register the driver during the core plugin or mechanism driver load, see neutron.services.qos.drivers.openvswitch.driver register method for an example. .. note:: All the functionality MUST be implemented by the vendor, neutron's QoS framework will just act as an interface to bypass the received QoS API request and help with database persistence for the API operations. .. note:: L3 agent ``fip_qos`` extension does not have a driver implementation, it directly uses the ``l3_tc_lib`` for all types of routers. Configuration ------------- To enable the service, the following steps should be followed: On server side: * enable qos service in service_plugins; * for ml2, add 'qos' to extension_drivers in [ml2] section; * for L3 floating IP QoS, add 'qos' and 'router' to service_plugins. On agent side (OVS): * add 'qos' to extensions in [agent] section. On L3 agent side: * For for floating IPs QoS support, add 'fip_qos' to extensions in [agent] section. Testing strategy ---------------- All the code added or extended as part of the effort got reasonable unit test coverage. Neutron objects ~~~~~~~~~~~~~~~ Base unit test classes to validate neutron objects were implemented in a way that allows code reuse when introducing a new object type. There are two test classes that are utilized for that: * BaseObjectIfaceTestCase: class to validate basic object operations (mostly CRUD) with database layer isolated. * BaseDbObjectTestCase: class to validate the same operations with models in place and database layer unmocked. Every new object implemented on top of one of those classes is expected to either inherit existing test cases as is, or reimplement it, if it makes sense in terms of how those objects are implemented. Specific test classes can obviously extend the set of test cases as they see needed (f.e. you need to define new test cases for those additional methods that you may add to your object implementations on top of base semantics common to all neutron objects). Functional tests ~~~~~~~~~~~~~~~~ Additions to ovs_lib to set bandwidth limits on ports are covered in: * neutron.tests.functional.agent.test_ovs_lib New functional tests for tc_lib to set bandwidth limits on ports are in: * neutron.tests.functional.agent.linux.test_tc_lib New functional tests for test_l3_tc_lib to set TC filters on router floating IP related device are covered in: * neutron.tests.functional.agent.linux.test_l3_tc_lib New functional tests for L3 agent floating IP rate limit: * neutron.tests.functional.agent.l3.extensions.test_fip_qos_extension API tests ~~~~~~~~~ API tests for basic CRUD operations for ports, networks, policies, and rules were added in: * neutron-tempest-plugin.api.test_qos ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/quota.rst0000644000175000017500000004410200000000000025445 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Quota Management and Enforcement ================================ Most resources exposed by the Neutron API are subject to quota limits. The Neutron API exposes an extension for managing such quotas. Quota limits are enforced at the API layer, before the request is dispatched to the plugin. Default values for quota limits are specified in neutron.conf. Admin users can override those defaults values on a per-project basis. Limits are stored in the Neutron database; if no limit is found for a given resource and project, then the default value for such resource is used. Configuration-based quota management, where every project gets the same quota limit specified in the configuration file, has been deprecated as of the Liberty release. Please note that Neutron does not support both specification of quota limits per user and quota management for hierarchical multitenancy (as a matter of fact Neutron does not support hierarchical multitenancy at all). Also, quota limits are currently not enforced on RPC interfaces listening on the AMQP bus. Plugin and ML2 drivers are not supposed to enforce quotas for resources they manage. However, the subnet_allocation [#]_ extension is an exception and will be discussed below. The quota management and enforcement mechanisms discussed here apply to every resource which has been registered with the Quota engine, regardless of whether such resource belongs to the core Neutron API or one of its extensions. High Level View --------------- There are two main components in the Neutron quota system: * The Quota API extensions. * The Quota Engine. Both components rely on a quota driver. The neutron codebase currently defines two quota drivers: * neutron.db.quota.driver.DbQuotaDriver * neutron.quota.ConfDriver The latter driver is however deprecated. The Quota API extension handles quota management, whereas the Quota Engine component handles quota enforcement. This API extension is loaded like any other extension. For this reason plugins must explicitly support it by including "quotas" in the supported_extension_aliases attribute. In the Quota API simple CRUD operations are used for managing project quotas. Please note that the current behaviour when deleting a project quota is to reset quota limits for that project to configuration defaults. The API extension does not validate the project identifier with the identity service. In addition, the Quota Detail API extension complements the Quota API extension by allowing users (typically admins) the ability to retrieve details about quotas per project. Quota details include the used/limit/reserved count for the project's resources (networks, ports, etc.). Performing quota enforcement is the responsibility of the Quota Engine. RESTful API controllers, before sending a request to the plugin, try to obtain a reservation from the quota engine for the resources specified in the client request. If the reservation is successful, then it proceeds to dispatch the operation to the plugin. For a reservation to be successful, the total amount of resources requested, plus the total amount of resources reserved, plus the total amount of resources already stored in the database should not exceed the project's quota limit. Finally, both quota management and enforcement rely on a "quota driver" [#]_, whose task is basically to perform database operations. Quota Management ---------------- The quota management component is fairly straightforward. However, unlike the vast majority of Neutron extensions, it uses it own controller class [#]_. This class does not implement the POST operation. List, get, update, and delete operations are implemented by the usual index, show, update and delete methods. These method simply call into the quota driver for either fetching project quotas or updating them. The _update_attributes method is called only once in the controller lifetime. This method dynamically updates Neutron's resource attribute map [#]_ so that an attribute is added for every resource managed by the quota engine. Request authorisation is performed in this controller, and only 'admin' users are allowed to modify quotas for projects. As the neutron policy engine is not used, it is not possible to configure which users should be allowed to manage quotas using policy.json. The driver operations dealing with quota management are: * delete_tenant_quota, which simply removes all entries from the 'quotas' table for a given project identifier; * update_quota_limit, which adds or updates an entry in the 'quotas' project for a given project identifier and a given resource name; * _get_quotas, which fetches limits for a set of resource and a given project identifier * _get_all_quotas, which behaves like _get_quotas, but for all projects. Resource Usage Info ------------------- Neutron has two ways of tracking resource usage info: * CountableResource, where resource usage is calculated every time quotas limits are enforced by counting rows in the resource table and reservations for that resource. * TrackedResource, which instead relies on a specific table tracking usage data, and performs explicitly counting only when the data in this table are not in sync with actual used and reserved resources. Another difference between CountableResource and TrackedResource is that the former invokes a plugin method to count resources. CountableResource should be therefore employed for plugins which do not leverage the Neutron database. The actual class that the Neutron quota engine will use is determined by the track_quota_usage variable in the quota configuration section. If True, TrackedResource instances will be created, otherwise the quota engine will use CountableResource instances. Resource creation is performed by the create_resource_instance factory method in the neutron.quota.resource module. From a performance perspective, having a table tracking resource usage has some advantages, albeit not fundamental. Indeed the time required for executing queries to explicitly count objects will increase with the number of records in the table. On the other hand, using TrackedResource will fetch a single record, but has the drawback of having to execute an UPDATE statement once the operation is completed. Nevertheless, CountableResource instances do not simply perform a SELECT query on the relevant table for a resource, but invoke a plugin method, which might execute several statements and sometimes even interacts with the backend before returning. Resource usage tracking also becomes important for operational correctness when coupled with the concept of resource reservation, discussed in another section of this chapter. Tracking quota usage is not as simple as updating a counter every time resources are created or deleted. Indeed a quota-limited resource in Neutron can be created in several ways. While a RESTful API request is the most common one, resources can be created by RPC handlers listing on the AMQP bus, such as those which create DHCP ports, or by plugin operations, such as those which create router ports. To this aim, TrackedResource instances are initialised with a reference to the model class for the resource for which they track usage data. During object initialisation, SqlAlchemy event handlers are installed for this class. The event handler is executed after a record is inserted or deleted. As result usage data for that resource and will be marked as 'dirty' once the operation completes, so that the next time usage data is requested, it will be synchronised counting resource usage from the database. Even if this solution has some drawbacks, listed in the 'exceptions and caveats' section, it is more reliable than solutions such as: * Updating the usage counters with the new 'correct' value every time an operation completes. * Having a periodic task synchronising quota usage data with actual data in the Neutron DB. Finally, regardless of whether CountableResource or TrackedResource is used, the quota engine always invokes its count() method to retrieve resource usage. Therefore, from the perspective of the Quota engine there is absolutely no difference between CountableResource and TrackedResource. Quota Enforcement ----------------- Before dispatching a request to the plugin, the Neutron 'base' controller [#]_ attempts to make a reservation for requested resource(s). Reservations are made by calling the make_reservation method in neutron.quota.QuotaEngine. The process of making a reservation is fairly straightforward: * Get current resource usages. This is achieved by invoking the count method on every requested resource, and then retrieving the amount of reserved resources. * Fetch current quota limits for requested resources, by invoking the _get_tenant_quotas method. * Fetch expired reservations for selected resources. This amount will be subtracted from resource usage. As in most cases there won't be any expired reservation, this approach actually requires less DB operations than doing a sum of non-expired, reserved resources for each request. * For each resource calculate its headroom, and verify the requested amount of resource is less than the headroom. * If the above is true for all resource, the reservation is saved in the DB, otherwise an OverQuotaLimit exception is raised. The quota engine is able to make a reservation for multiple resources. However, it is worth noting that because of the current structure of the Neutron API layer, there will not be any practical case in which a reservation for multiple resources is made. For this reason performance optimisation avoiding repeating queries for every resource are not part of the current implementation. In order to ensure correct operations, a row-level lock is acquired in the transaction which creates the reservation. The lock is acquired when reading usage data. In case of write-set certification failures, which can occur in active/active clusters such as MySQL galera, the decorator neutron_lib.db.api.retry_db_errors will retry the transaction if a DBDeadLock exception is raised. While non-locking approaches are possible, it has been found out that, since a non-locking algorithms increases the chances of collision, the cost of handling a DBDeadlock is still lower than the cost of retrying the operation when a collision is detected. A study in this direction was conducted for IP allocation operations, but the same principles apply here as well [#]_. Nevertheless, moving away for DB-level locks is something that must happen for quota enforcement in the future. Committing and cancelling a reservation is as simple as deleting the reservation itself. When a reservation is committed, the resources which were committed are now stored in the database, so the reservation itself should be deleted. The Neutron quota engine simply removes the record when cancelling a reservation (ie: the request failed to complete), and also marks quota usage info as dirty when the reservation is committed (ie: the request completed correctly). Reservations are committed or cancelled by respectively calling the commit_reservation and cancel_reservation methods in neutron.quota.QuotaEngine. Reservations are not perennial. Eternal reservation would eventually exhaust projects' quotas because they would never be removed when an API worker crashes whilst in the middle of an operation. Reservation expiration is currently set to 120 seconds, and is not configurable, not yet at least. Expired reservations are not counted when calculating resource usage. While creating a reservation, if any expired reservation is found, all expired reservation for that project and resource will be removed from the database, thus avoiding build-up of expired reservations. Setting up Resource Tracking for a Plugin ------------------------------------------ By default plugins do not leverage resource tracking. Having the plugin explicitly declare which resources should be tracked is a precise design choice aimed at limiting as much as possible the chance of introducing errors in existing plugins. For this reason a plugin must declare which resource it intends to track. This can be achieved using the tracked_resources decorator available in the neutron.quota.resource_registry module. The decorator should ideally be applied to the plugin's __init__ method. The decorator accepts in input a list of keyword arguments. The name of the argument must be a resource name, and the value of the argument must be a DB model class. For example: :: @resource_registry.tracked_resources(network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool) Will ensure network, port, subnet and subnetpool resources are tracked. In theory, it is possible to use this decorator multiple times, and not exclusively to __init__ methods. However, this would eventually lead to code readability and maintainability problems, so developers are strongly encourage to apply this decorator exclusively to the plugin's __init__ method (or any other method which is called by the plugin only once during its initialization). Notes for Implementors of RPC Interfaces and RESTful Controllers ------------------------------------------------------------------------------- Neutron unfortunately does not have a layer which is called before dispatching the operation from the plugin which can be leveraged both from RESTful and RPC over AMQP APIs. In particular the RPC handlers call straight into the plugin, without doing any request authorisation or quota enforcement. Therefore RPC handlers must explicitly indicate if they are going to call the plugin to create or delete any sort of resources. This is achieved in a simple way, by ensuring modified resources are marked as dirty after the RPC handler execution terminates. To this aim developers can use the mark_resources_dirty decorator available in the module neutron.quota.resource_registry. The decorator would scan the whole list of registered resources, and store the dirty status for their usage trackers in the database for those resources for which items have been created or destroyed during the plugin operation. Exceptions and Caveats ----------------------- Please be aware of the following limitations of the quota enforcement engine: * Subnet allocation from subnet pools, in particularly shared pools, is also subject to quota limit checks. However this checks are not enforced by the quota engine, but trough a mechanism implemented in the neutron.ipam.subnetalloc module. This is because the Quota engine is not able to satisfy the requirements for quotas on subnet allocation. * The quota engine also provides a limit_check routine which enforces quota checks without creating reservations. This way of doing quota enforcement is extremely unreliable and superseded by the reservation mechanism. It has not been removed to ensure off-tree plugins and extensions which leverage are not broken. * SqlAlchemy events might not be the most reliable way for detecting changes in resource usage. Since the event mechanism monitors the data model class, it is paramount for a correct quota enforcement, that resources are always created and deleted using object relational mappings. For instance, deleting a resource with a query.delete call, will not trigger the event. SQLAlchemy events should be considered as a temporary measure adopted as Neutron lacks persistent API objects. * As CountableResource instance do not track usage data, when making a reservation no write-intent lock is acquired. Therefore the quota engine with CountableResource is not concurrency-safe. * The mechanism for specifying for which resources enable usage tracking relies on the fact that the plugin is loaded before quota-limited resources are registered. For this reason it is not possible to validate whether a resource actually exists or not when enabling tracking for it. Developers should pay particular attention into ensuring resource names are correctly specified. * The code assumes usage trackers are a trusted source of truth: if they report a usage counter and the dirty bit is not set, that counter is correct. If it's dirty than surely that counter is out of sync. This is not very robust, as there might be issues upon restart when toggling the use_tracked_resources configuration variable, as stale counters might be trusted upon for making reservations. Also, the same situation might occur if a server crashes after the API operation is completed but before the reservation is committed, as the actual resource usage is changed but the corresponding usage tracker is not marked as dirty. References ---------- .. [#] Subnet allocation extension: http://opendev.org/openstack/neutron/tree/neutron/extensions/subnetallocation.py .. [#] DB Quota driver class: http://opendev.org/openstack/neutron/tree/neutron/db/quota/driver.py#n30 .. [#] Quota API extension controller: http://opendev.org/openstack/neutron/tree/neutron/extensions/quotasv2.py#n40 .. [#] Neutron resource attribute map: http://opendev.org/openstack/neutron/tree/neutron/api/v2/attributes.py#n639 .. [#] Base controller class: http://opendev.org/openstack/neutron/tree/neutron/api/v2/base.py#n50 .. [#] http://lists.openstack.org/pipermail/openstack-dev/2015-February/057534.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/retries.rst0000644000175000017500000001533300000000000025775 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Retrying Operations =================== Inside of the neutron_lib.db.api module there is a decorator called 'retry_if_session_inactive'. This should be used to protect any functions that perform DB operations. This decorator will capture any deadlock errors, RetryRequests, connection errors, and unique constraint violations that are thrown by the function it is protecting. This decorator will not retry an operation if the function it is applied to is called within an active session. This is because the majority of the exceptions it captures put the session into a partially rolled back state so it is no longer usable. It is important to ensure there is a decorator outside of the start of the transaction. The decorators are safe to nest if a function is sometimes called inside of another transaction. If a function is being protected that does not take context as an argument the 'retry_db_errors' decorator function may be used instead. It retries the same exceptions and has the same anti-nesting behavior as 'retry_if_session_active', but it does not check if a session is attached to any context keywords. ('retry_if_session_active' just uses 'retry_db_errors' internally after checking the session) Idempotency on Failures ----------------------- The function that is being decorated should always fully cleanup whenever it encounters an exception so its safe to retry the operation. So if a function creates a DB object, commits, then creates another, the function must have a cleanup handler to remove the first DB object in the case that the second one fails. Assume any DB operation can throw a retriable error. You may see some retry decorators at the API layers in Neutron; however, we are trying to eliminate them because each API operation has many independent steps that makes ensuring idempotency on partial failures very difficult. Argument Mutation ----------------- A decorated function should not mutate any complex arguments which are passed into it. If it does, it should have an exception handler that reverts the change so it's safe to retry. The decorator will automatically create deep copies of sets, lists, and dicts which are passed through it, but it will leave the other arguments alone. Retrying to Handle Race Conditions ---------------------------------- One of the difficulties with detecting race conditions to create a DB record with a unique constraint is determining where to put the exception handler because a constraint violation can happen immediately on flush or it may not happen all of the way until the transaction is being committed on the exit of the session context manager. So we would end up with code that looks something like this: :: def create_port(context, ip_address, mac_address): _ensure_mac_not_in_use(context, mac_address) _ensure_ip_not_in_use(context, ip_address) try: with context.session.begin(): port_obj = Port(ip=ip_address, mac=mac_address) do_expensive_thing(...) do_extra_other_thing(...) return port_obj except DBDuplicateEntry as e: # code to parse columns if 'mac' in e.columns: raise MacInUse(mac_address) if 'ip' in e.columns: raise IPAddressInUse(ip) def _ensure_mac_not_in_use(context, mac): if context.session.query(Port).filter_by(mac=mac).count(): raise MacInUse(mac) def _ensure_ip_not_in_use(context, ip): if context.session.query(Port).filter_by(ip=ip).count(): raise IPAddressInUse(ip) So we end up with an exception handler that has to understand where things went wrong and convert them into appropriate exceptions for the end-users. This distracts significantly from the main purpose of create_port. Since the retry decorator will automatically catch and retry DB duplicate errors for us, we can allow it to retry on this race condition which will give the original validation logic to be re-executed and raise the appropriate error. This keeps validation logic in one place and makes the code cleaner. :: from neutron.db import api as db_api @db_api.retry_if_session_inactive() def create_port(context, ip_address, mac_address): _ensure_mac_not_in_use(context, mac_address) _ensure_ip_not_in_use(context, ip_address) with context.session.begin(): port_obj = Port(ip=ip_address, mac=mac_address) do_expensive_thing(...) do_extra_other_thing(...) return port_obj def _ensure_mac_not_in_use(context, mac): if context.session.query(Port).filter_by(mac=mac).count(): raise MacInUse(mac) def _ensure_ip_not_in_use(context, ip): if context.session.query(Port).filter_by(ip=ip).count(): raise IPAddressInUse(ip) Nesting ------- Once the decorator retries an operation the maximum number of times, it will attach a flag to the exception it raises further up that will prevent decorators around the calling functions from retrying the error again. This prevents an exponential increase in the number of retries if they are layered. Usage ----- Here are some usage examples: :: from neutron.db import api as db_api @db_api.retry_if_session_inactive() def create_elephant(context, elephant_details): .... @db_api.retry_if_session_inactive() def atomic_bulk_create_elephants(context, elephants): with context.session.begin(): for elephant in elephants: # note that if create_elephant throws a retriable # exception, the decorator around it will not retry # because the session is active. The decorator around # atomic_bulk_create_elephants will be responsible for # retrying the entire operation. create_elephant(context, elephant) # sample usage when session is attached to a var other than 'context' @db_api.retry_if_session_inactive(context_var_name='ctx') def some_function(ctx): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/rpc_api.rst0000644000175000017500000001447100000000000025737 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron RPC API Layer ===================== Neutron uses the oslo.messaging library to provide an internal communication channel between Neutron services. This communication is typically done via AMQP, but those details are mostly hidden by the use of oslo.messaging and it could be some other protocol in the future. RPC APIs are defined in Neutron in two parts: client side and server side. Client Side ----------- Here is an example of an rpc client definition: :: import oslo_messaging from neutron.common import rpc as n_rpc class ClientAPI(object): """Client side RPC interface definition. API version history: 1.0 - Initial version 1.1 - Added my_remote_method_2 """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def my_remote_method(self, context, arg1, arg2): cctxt = self.client.prepare() return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2) def my_remote_method_2(self, context, arg1): cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'my_remote_method_2', arg1=arg1) This class defines the client side interface for an rpc API. The interface has 2 methods. The first method existed in version 1.0 of the interface. The second method was added in version 1.1. When the newer method is called, it specifies that the remote side must implement at least version 1.1 to handle this request. Server Side ----------- The server side of an rpc interface looks like this: :: import oslo_messaging class ServerAPI(object): target = oslo_messaging.Target(version='1.1') def my_remote_method(self, context, arg1, arg2): return 'foo' def my_remote_method_2(self, context, arg1): return 'bar' This class implements the server side of the interface. The oslo_messaging.Target() defined says that this class currently implements version 1.1 of the interface. .. _rpc_versioning: Versioning ---------- Note that changes to rpc interfaces must always be done in a backwards compatible way. The server side should always be able to handle older clients (within the same major version series, such as 1.X). It is possible to bump the major version number and drop some code only needed for backwards compatibility. For more information about how to do that, see https://wiki.openstack.org/wiki/RpcMajorVersionUpdates. Example Change ~~~~~~~~~~~~~~ As an example minor API change, let's assume we want to add a new parameter to my_remote_method_2. First, we add the argument on the server side. To be backwards compatible, the new argument must have a default value set so that the interface will still work even if the argument is not supplied. Also, the interface's minor version number must be incremented. So, the new server side code would look like this: :: import oslo_messaging class ServerAPI(object): target = oslo_messaging.Target(version='1.2') def my_remote_method(self, context, arg1, arg2): return 'foo' def my_remote_method_2(self, context, arg1, arg2=None): if not arg2: # Deal with the fact that arg2 was not specified if needed. return 'bar' We can now update the client side to pass the new argument. The client must also specify that version '1.2' is required for this method call to be successful. The updated client side would look like this: :: import oslo_messaging from neutron.common import rpc as n_rpc class ClientAPI(object): """Client side RPC interface definition. API version history: 1.0 - Initial version 1.1 - Added my_remote_method_2 1.2 - Added arg2 to my_remote_method_2 """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def my_remote_method(self, context, arg1, arg2): cctxt = self.client.prepare() return cctxt.call(context, 'my_remote_method', arg1=arg1, arg2=arg2) def my_remote_method_2(self, context, arg1, arg2): cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'my_remote_method_2', arg1=arg1, arg2=arg2) Neutron RPC APIs ---------------- As discussed before, RPC APIs are defined in two parts: a client side and a server side. Several of these pairs exist in the Neutron code base. The code base is being updated with documentation on every rpc interface implementation that indicates where the corresponding server or client code is located. Example: DHCP ~~~~~~~~~~~~~ The DHCP agent includes a client API, neutron.agent.dhcp.agent.DhcpPluginAPI. The DHCP agent uses this class to call remote methods back in the Neutron server. The server side is defined in neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. It is up to the Neutron plugin in use to decide whether the DhcpRpcCallback interface should be exposed. Similarly, there is an RPC interface defined that allows the Neutron plugin to remotely invoke methods in the DHCP agent. The client side is defined in neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyAPI. The server side of this interface that runs in the DHCP agent is neutron.agent.dhcp.agent.DhcpAgent. More Info --------- For more information, see the oslo.messaging documentation: https://docs.openstack.org/oslo.messaging/latest/. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/rpc_callbacks.rst0000644000175000017500000003132400000000000027101 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _rpc_callbacks: Neutron Messaging Callback System ================================= Neutron already has a `callback system `_ for in-process resource callbacks where publishers and subscribers are able to publish and subscribe for resource events. This system is different, and is intended to be used for inter-process callbacks, via the messaging fanout mechanisms. In Neutron, agents may need to subscribe to specific resource details which may change over time. And the purpose of this messaging callback system is to allow agent subscription to those resources without the need to extend modify existing RPC calls, or creating new RPC messages. A few resource which can benefit of this system: * QoS policies; * Security Groups. Using a remote publisher/subscriber pattern, the information about such resources could be published using fanout messages to all interested nodes, minimizing messaging requests from agents to server since the agents get subscribed for their whole lifecycle (unless they unsubscribe). Within an agent, there could be multiple subscriber callbacks to the same resource events, the resources updates would be dispatched to the subscriber callbacks from a single message. Any update would come in a single message, doing only a single oslo versioned objects deserialization on each receiving agent. This publishing/subscription mechanism is highly dependent on the format of the resources passed around. This is why the library only allows versioned objects to be published and subscribed. Oslo versioned objects allow object version down/up conversion. [#vo_mkcompat]_ [#vo_mkcptests]_ For the VO's versioning schema look here: [#vo_versioning]_ versioned_objects serialization/deserialization with the obj_to_primitive(target_version=..) and primitive_to_obj() [#ov_serdes]_ methods is used internally to convert/retrieve objects before/after messaging. Serialized versioned objects look like:: {'versioned_object.version': '1.0', 'versioned_object.name': 'QoSPolicy', 'versioned_object.data': {'rules': [ {'versioned_object.version': '1.0', 'versioned_object.name': 'QoSBandwidthLimitRule', 'versioned_object.data': {'name': u'a'}, 'versioned_object.namespace': 'versionedobjects'} ], 'uuid': u'abcde', 'name': u'aaa'}, 'versioned_object.namespace': 'versionedobjects'} Rolling upgrades strategy ------------------------- In this section we assume the standard Neutron upgrade process, which means upgrade the server first and then upgrade the agents: :doc:`More information about the upgrade strategy `. We provide an automatic method which avoids manual pinning and unpinning of versions by the administrator which could be prone to error. Resource pull requests ~~~~~~~~~~~~~~~~~~~~~~ Resource pull requests will always be ok because the underlying resource RPC does provide the version of the requested resource id / ids. The server will be upgraded first, so it will always be able to satisfy any version the agents request. Resource push notifications ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Agents will subscribe to the neutron-vo-- fanout queue which carries updated objects for the version they know about. The versions they know about depend on the runtime Neutron versioned objects they started with. When the server upgrades, it should be able to instantly calculate a census of agent versions per object (we will define a mechanism for this in a later section). It will use the census to send fanout messages on all the version span a resource type has. For example, if neutron-server knew it has rpc-callback aware agents with versions 1.0, and versions 1.2 of resource type "A", any update would be sent to neutron-vo-A_1.0 and neutron-vo-A_1.2. TODO(mangelajo): Verify that after upgrade is finished any unused messaging resources (queues, exchanges, and so on) are released as older agents go away and neutron-server stops producing new message casts. Otherwise document the need for a neutron-server restart after rolling upgrade has finished if we want the queues cleaned up. Leveraging agent state reports for object version discovery +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ We add a row to the agent db for tracking agent known objects and version numbers. This resembles the implementation of the configuration column. Agents report at start not only their configuration now, but also their subscribed object type / version pairs, that are stored in the database and made available to any neutron-server requesting it:: 'resource_versions': {'QosPolicy': '1.1', 'SecurityGroup': '1.0', 'Port': '1.0'} There was a subset of Liberty agents depending on QosPolicy that required 'QosPolicy': '1.0' if the qos plugin is installed. We were able to identify those by the binary name (included in the report): * 'neutron-openvswitch-agent' * 'neutron-sriov-nic-agent' This transition was handled in the Mitaka version, but it's not handled anymore in Newton, since only one major version step upgrades are supported. Version discovery +++++++++++++++++ With the above mechanism in place and considering the exception of neutron-openvswitch-agent and neutron-sriov-agent requiring QoSpolicy 1.0, we discover the subset of versions to be sent on every push notification. Agents that are in down state are excluded from this calculation. We use an extended timeout for agents in this calculation to make sure we're on the safe side, specially if deployer marked agents with low timeouts. Starting at Mitaka, any agent interested in versioned objects via this API should report their resource/version tuples of interest (the resource type/ version pairs they're subscribed to). The plugins interested in this RPC mechanism must inherit AgentDbMixin, since this mechanism is only intended to be used from agents at the moment, while it could be extended to be consumed from other components if necessary. The AgentDbMixin provides:: def get_agents_resource_versions(self, tracker): ... Caching mechanism ''''''''''''''''' The version subset per object is cached to avoid DB requests on every push given that we assume that all old agents are already registered at the time of upgrade. Cached subset is re-evaluated (to cut down the version sets as agents upgrade) after neutron.api.rpc.callbacks.version_manager.VERSIONS_TTL. As a fast path to update this cache on all neutron-servers when upgraded agents come up (or old agents revive after a long timeout or even a downgrade) the server registering the new status update notifies the other servers about the new consumer resource versions via cast. All notifications for all calculated version sets must be sent, as non-upgraded agents would otherwise not receive them. It is safe to send notifications to any fanout queue as they will be discarded if no agent is listening. Topic names for every resource type RPC endpoint ------------------------------------------------ neutron-vo-- In the future, we may want to get oslo messaging to support subscribing topics dynamically, then we may want to use: neutron-vo--- instead, or something equivalent which would allow fine granularity for the receivers to only get interesting information to them. Subscribing to resources ------------------------ Imagine that you have agent A, which just got to handle a new port, which has an associated security group, and QoS policy. The agent code processing port updates may look like:: from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources def process_resource_updates(context, resource_type, resource_list, event_type): # send to the right handler which will update any control plane # details related to the updated resources... def subscribe_resources(): registry.register(process_resource_updates, resources.SEC_GROUP) registry.register(process_resource_updates, resources.QOS_POLICY) def port_update(port): # here we extract sg_id and qos_policy_id from port.. sec_group = registry.pull(resources.SEC_GROUP, sg_id) qos_policy = registry.pull(resources.QOS_POLICY, qos_policy_id) The relevant function is: * register(callback, resource_type): subscribes callback to a resource type. The callback function will receive the following arguments: * context: the neutron context that triggered the notification. * resource_type: the type of resource which is receiving the update. * resource_list: list of resources which have been pushed by server. * event_type: will be one of CREATED, UPDATED, or DELETED, see neutron.api.rpc.callbacks.events for details. With the underlying oslo_messaging support for dynamic topics on the receiver we cannot implement a per "resource type + resource id" topic, rabbitmq seems to handle 10000's of topics without suffering, but creating 100's of oslo_messaging receivers on different topics seems to crash. We may want to look into that later, to avoid agents receiving resource updates which are uninteresting to them. Unsubscribing from resources ---------------------------- To unsubscribe registered callbacks: * unsubscribe(callback, resource_type): unsubscribe from specific resource type. * unsubscribe_all(): unsubscribe from all resources. Sending resource events ----------------------- On the server side, resource updates could come from anywhere, a service plugin, an extension, anything that updates, creates, or destroys the resources and that is of any interest to subscribed agents. A callback is expected to receive a list of resources. When resources in the list belong to the same resource type, a single push RPC message is sent; if the list contains objects of different resource types, resources of each type are grouped and sent separately, one push RPC message per type. On the receiver side, resources in a list always belong to the same type. In other words, a server-side push of a list of heterogeneous objects will result into N messages on bus and N client-side callback invocations, where N is the number of unique resource types in the given list, e.g. L(A, A, B, C, C, C) would be fragmented into L1(A, A), L2(B), L3(C, C, C), and each list pushed separately. Note: there is no guarantee in terms of order in which separate resource lists will be delivered to consumers. The server/publisher side may look like:: from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import events def create_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push([policy], events.CREATED) def update_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push([policy], events.UPDATED) def delete_qos_policy(...): policy = fetch_policy(...) update_the_db(...) registry.push([policy], events.DELETED) References ---------- .. [#ov_serdes] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L410 .. [#vo_mkcompat] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L474 .. [#vo_mkcptests] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/tests/test_objects.py#L114 .. [#vo_versioning] https://github.com/openstack/oslo.versionedobjects/blob/ce00f18f7e9143b5175e889970564813189e3e6d/oslo_versionedobjects/base.py#L248 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/security_group_api.rst0000644000175000017500000000621300000000000030231 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Guided Tour: The Neutron Security Group API =========================================== https://wiki.openstack.org/wiki/Neutron/SecurityGroups API Extension ------------- The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by projects. .. _`REST-ful API`: https://opendev.org/openstack/neutron/tree/neutron/extensions/securitygroup.py Database API ------------ The Security Group API extension adds a number of `methods to the database layer`_ of Neutron .. _`methods to the database layer`: https://opendev.org/openstack/neutron/tree/neutron/db/securitygroups_db.py Agent RPC --------- This portion of the code handles processing requests from projects, after they have been stored in the database. It involves messaging all the L2 agents running on the compute nodes, and modifying the IPTables rules on each hypervisor. * `Plugin RPC classes `_ * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API * `Agent RPC classes `_ * The SecurityGroupServerRpcApi defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent. IPTables Driver --------------- * ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` * ``prepare_port_filter`` appends the port to an internal dictionary, ``filtered_ports`` which is used to track the internal state. * Each security group has a `chain `_ in Iptables. * The ``IptablesFirewallDriver`` has a method to convert security group rules into iptables statements. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/segments.rst0000644000175000017500000000444000000000000026142 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Segments extension ================== Neutron has an extension that allows CRUD operations on the ``/segments`` resource in the API, that corresponds to the ``NetworkSegment`` entity in the DB layer. The extension is implemented as a service plug-in. .. note:: The ``segments`` service plug-in is not configured by default. To configure it, add ``segments`` to the ``service_plugins`` parameter in ``neutron.conf`` Core plug-ins can coordinate with the ``segments`` service plug-in by subscribing callbacks to events associated to the ``SEGMENT`` resource. Currently, the segments plug-in notifies subscribers of the following events: * ``PRECOMMIT_CREATE`` * ``AFTER_CREATE`` * ``BEFORE_DELETE`` * ``PRECOMMIT_DELETE`` * ``AFTER_DELETE`` As of this writing, ``ML2`` and ``OVN`` register callbacks to receive events from the ``segments`` service plug-in. The ``ML2`` plug-in defines the callback ``_handle_segment_change`` to process all the relevant segments events. Segments extension relevant modules ----------------------------------- * ``neutron/extensions/segment.py`` defines the extension * ``neutron/db/models/segment.py`` defines the DB models for segments and for the segment host mapping, that is used in the implementation of routed networks. * ``neutron/db/segments_db.py`` has functions to add, retrieve and delete segments from the DB. * ``neutron/services/segments/db.py`` defines a mixin class with the methods that perform API CRUD operations for the ``segments`` plug-in. It also has a set of functions to create and maintain the mapping of segments to hosts, which is necessary in the implementation of routed networks. * ``neutron/services/segments/plugin.py`` defines the ``segments`` service plug-in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/service_extensions.rst0000644000175000017500000000644400000000000030242 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Service Extensions ================== Historically, Neutron supported the following advanced services: #. **FWaaS** (*Firewall-as-a-Service*): runs as part of the L3 agent. #. **VPNaaS** (*VPN-as-a-Service*): derives from L3 agent to add VPNaaS functionality. Starting with the Kilo release, these services are split into separate repositories, and more extensions are being developed as well. Service plugins are a clean way of adding functionality in a cohesive manner and yet, keeping them decoupled from the guts of the framework. The aforementioned features are developed as extensions (also known as service plugins), and more capabilities are being added to Neutron following the same pattern. For those that are deemed 'orthogonal' to any network service (e.g. tags, timestamps, auto_allocate, etc), there is an informal `mechanism `_ to have these loaded automatically at server startup. If you consider adding an entry to the dictionary, please be kind and reach out to your PTL or a member of the drivers team for approval. #. http://opendev.org/openstack/neutron-fwaas/ #. http://opendev.org/openstack/neutron-vpnaas/ Calling the Core Plugin from Services ------------------------------------- There are many cases where a service may want to create a resource managed by the core plugin (e.g. ports, networks, subnets). This can be achieved by importing the plugins directory and getting a direct reference to the core plugin: .. code:: python from neutron_lib.plugins import directory plugin = directory.get_plugin() plugin.create_port(context, port_dict) However, there is an important caveat. Calls to the core plugin in almost every case should not be made inside of an ongoing transaction. This is because many plugins (including ML2), can be configured to make calls to a backend after creating or modifying an object. If the call is made inside of a transaction and the transaction is rolled back after the core plugin call, the backend will not be notified that the change was undone. This will lead to consistency errors between the core plugin and its configured backend(s). ML2 has a guard against certain methods being called with an active DB transaction to help prevent developers from accidentally making this mistake. It will raise an error that says explicitly that the method should not be called within a transaction. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/services_and_agents.rst0000644000175000017500000001132200000000000030320 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Services and agents =================== A usual Neutron setup consists of multiple services and agents running on one or multiple nodes (though some exotic setups potentially may not need any agents). Each of those services provides some of the networking or API services. Among those of special interest: #. neutron-server that provides API endpoints and serves as a single point of access to the database. It usually runs on nodes called Controllers. #. Layer2 agent that can utilize Open vSwitch, Linuxbridge or other vendor specific technology to provide network segmentation and isolation for project networks. The L2 agent should run on every node where it is deemed responsible for wiring and securing virtual interfaces (usually both Compute and Network nodes). #. Layer3 agent that runs on Network node and provides East-West and North-South routing plus some advanced services such as FWaaS or VPNaaS. For the purpose of this document, we call all services, servers and agents that run on any node as just "services". Entry points ------------ Entry points for services are defined in setup.cfg under "console_scripts" section. Those entry points should generally point to main() functions located under neutron/cmd/... path. Note: some existing vendor/plugin agents still maintain their entry points in other locations. Developers responsible for those agents are welcome to apply the guideline above. Interacting with Eventlet ------------------------- Neutron extensively utilizes the eventlet library to provide asynchronous concurrency model to its services. To utilize it correctly, the following should be kept in mind. If a service utilizes the eventlet library, then it should not call eventlet.monkey_patch() directly but instead maintain its entry point main() function under neutron/cmd/eventlet/... If that is the case, the standard Python library will be automatically patched for the service on entry point import (monkey patching is done inside `python package file `_). Note: an entry point 'main()' function may just be an indirection to a real callable located elsewhere, as is done for reference services such as DHCP, L3 and the neutron-server. For more info on the rationale behind the code tree setup, see `the corresponding cross-project spec `_. Connecting to the Database -------------------------- Only the neutron-server connects to the neutron database. Agents may never connect directly to the database, as this would break the ability to do rolling upgrades. Configuration Options --------------------- In addition to database access, configuration options are segregated between neutron-server and agents. Both services and agents may load the main ```neutron.conf``` since this file should contain the oslo.messaging configuration for internal Neutron RPCs and may contain host specific configuration such as file paths. In addition ```neutron.conf``` contains the database, Keystone, and Nova credentials and endpoints strictly for neutron-server to use. In addition neutron-server may load a plugin specific configuration file, yet the agents should not. As the plugin configuration is primarily site wide options and the plugin provides the persistence layer for Neutron, agents should be instructed to act upon these values via RPC. Each individual agent may have its own configuration file. This file should be loaded after the main ```neutron.conf``` file, so the agent configuration takes precedence. The agent specific configuration may contain configurations which vary between hosts in a Neutron deployment such as the ``local_ip`` for an L2 agent. If any agent requires access to additional external services beyond the neutron RPC, those endpoints should be defined in the agent-specific configuration file (e.g. nova metadata for metadata agent). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/sriov_nic_agent.rst0000644000175000017500000000470600000000000027473 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) L2 Networking with SR-IOV enabled NICs ====================================== SR-IOV (Single Root I/O Virtualization) is a specification that allows a PCIe device to appear to be multiple separate physical PCIe devices. SR-IOV works by introducing the idea of physical functions (PFs) and virtual functions (VFs). Physical functions (PFs) are full-featured PCIe functions. Virtual functions (VFs) are “lightweight” functions that lack configuration resources. SR-IOV supports VLANs for L2 network isolation, other networking technologies such as VXLAN/GRE may be supported in the future. SR-IOV NIC agent manages configuration of SR-IOV Virtual Functions that connect VM instances running on the compute node to the public network. In most common deployments, there are compute and a network nodes. Compute node can support VM connectivity via SR-IOV enabled NIC. SR-IOV NIC Agent manages Virtual Functions admin state. Quality of service is partially implemented with the bandwidth limit and minimum bandwidth rules. In the future it will manage additional settings, such as additional quality of service rules, rate limit settings, spoofcheck and more. Network node will be usually deployed with either Open vSwitch or Linux Bridge to support network node functionality. Further Reading --------------- `Nir Yechiel - SR-IOV Networking – Part I: Understanding the Basics `_ `SR-IOV Passthrough For Networking `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/tag.rst0000644000175000017500000001024700000000000025072 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Add Tags to Neutron Resources ============================= Tag service plugin allows users to set tags on their resources. Tagging resources can be used by external systems or any other clients of the Neutron REST API (and NOT backend drivers). The following use cases refer to adding tags to networks, but the same can be applicable to any other Neutron resource: 1) Ability to map different networks in different OpenStack locations to one logically same network (for Multi site OpenStack) 2) Ability to map Id's from different management/orchestration systems to OpenStack networks in mixed environments, for example for project Kuryr, map docker network id to neutron network id 3) Leverage tags by deployment tools 4) allow operators to tag information about provider networks (e.g. high-bandwidth, low-latency, etc) 5) new features like get-me-a-network or a similar port scheduler could choose a network for a port based on tags Which Resources --------------- Tag system uses standardattr mechanism so it's targeting to resources that have the mechanism. Some resources with standard attribute don't suit fit tag support usecases (e.g. security_group_rule). If new tag support resource is added, the resource model should inherit HasStandardAttributes and then it must implement the property 'api_parent' and 'tag_support'. And also the change must include a release note for API user. Current API resources extended by tag extensions: - floatingips - networks - network_segment_ranges - policies - ports - routers - security_groups - subnetpools - subnets - trunks Model ----- Tag is not standalone resource. Tag is always related to existing resources. The following shows tag model:: +------------------+ +------------------+ | Network | | Tag | +------------------+ +------------------+ | standard_attr_id +------> | standard_attr_id | | | | tag | | | | | +------------------+ +------------------+ Tag has two columns only and tag column is just string. These tags are defined per resource. Tag is unique in a resource but it can be overlapped throughout. API --- The following shows basic API for tag. Tag is regarded as a subresource of resource so API always includes id of resource related to tag. Add a single tag on a network :: PUT /v2.0/networks/{network_id}/tags/{tag} Returns `201 Created`. If the tag already exists, no error is raised, it just returns the `201 Created` because the `OpenStack Development Mailing List `_ discussion told us that PUT should be no issue updating an existing tag. Replace set of tags on a network :: PUT /v2.0/networks/{network_id}/tags with request payload :: { 'tags': ['foo', 'bar', 'baz'] } Response :: { 'tags': ['foo', 'bar', 'baz'] } Check if a tag exists or not on a network :: GET /v2.0/networks/{network_id}/tags/{tag} Remove a single tag on a network :: DELETE /v2.0/networks/{network_id}/tags/{tag} Remove all tags on a network :: DELETE /v2.0/networks/{network_id}/tags PUT and DELETE for collections are the motivation of `extending the API framework `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/internals/upgrade.rst0000644000175000017500000002621700000000000025752 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. note:: Much of this document discusses upgrade considerations for the Neutron reference implementation using Neutron's agents. It's expected that each Neutron plugin provides its own documentation that discusses upgrade considerations specific to that choice of backend. For example, OVN does not use Neutron agents, but does have a local controller that runs on each compute node. OVN supports rolling upgrades, but information about how that works should be covered in the documentation for the OVN Neutron plugin. Upgrade strategy ================ There are two general upgrade scenarios supported by Neutron: #. All services are shut down, code upgraded, then all services are started again. #. Services are upgraded gradually, based on operator service windows. The latter is the preferred way to upgrade an OpenStack cloud, since it allows for more granularity and less service downtime. This scenario is usually called 'rolling upgrade'. Rolling upgrade --------------- Rolling upgrades imply that during some interval of time there will be services of different code versions running and interacting in the same cloud. It puts multiple constraints onto the software. #. older services should be able to talk with newer services. #. older services should not require the database to have older schema (otherwise newer services that require the newer schema would not work). `More info on rolling upgrades in OpenStack `_. Those requirements are achieved in Neutron by: #. If the Neutron backend makes use of Neutron agents, the Neutron server have backwards compatibility code to deal with older messaging payloads. #. isolating a single service that accesses database (neutron-server). To simplify the matter, it's always assumed that the order of service upgrades is as following: #. first, all neutron-servers are upgraded. #. then, if applicable, neutron agents are upgraded. This approach allows us to avoid backwards compatibility code on agent side and is in line with other OpenStack projects that support rolling upgrades (specifically, nova). Server upgrade ~~~~~~~~~~~~~~ Neutron-server is the very first component that should be upgraded to the new code. It's also the only component that relies on new database schema to be present, other components communicate with the cloud through AMQP and hence do not depend on particular database state. Database upgrades are implemented with alembic migration chains. Database upgrade is split into two parts: #. ``neutron-db-manage upgrade --expand`` #. ``neutron-db-manage upgrade --contract`` Each part represents a separate alembic branch. The former step can be executed while old neutron-server code is running. The latter step requires *all* neutron-server instances to be shut down. Once it's complete, neutron-servers can be started again. .. note:: Full shutdown of neutron-server instances can be skipped depending on whether there are pending contract scripts not applied to the database:: $ neutron-db-manage has_offline_migrations Command will return a message if there are pending contract scripts. :ref:`More info on alembic scripts `. Agents upgrade ~~~~~~~~~~~~~~ .. note:: This section does not apply when the cloud does not use AMQP agents to provide networking services to instances. In that case, other backend specific upgrade instructions may also apply. Once neutron-server services are restarted with the new database schema and the new code, it's time to upgrade Neutron agents. Note that in the meantime, neutron-server should be able to serve AMQP messages sent by older versions of agents which are part of the cloud. The recommended order of agent upgrade (per node) is: #. first, L2 agents (openvswitch, linuxbridge, sr-iov). #. then, all other agents (L3, DHCP, Metadata, ...). The rationale of the agent upgrade order is that L2 agent is usually responsible for wiring ports for other agents to use, so it's better to allow it to do its job first and then proceed with other agents that will use the already configured ports for their needs. Each network/compute node can have its own upgrade schedule that is independent of other nodes. AMQP considerations +++++++++++++++++++ Since it's always assumed that neutron-server component is upgraded before agents, only the former should handle both old and new RPC versions. The implication of that is that no code that handles UnsupportedVersion oslo.messaging exceptions belongs to agent code. Notifications ''''''''''''' For notifications that are issued by neutron-server to listening agents, special consideration is needed to support rolling upgrades. In this case, a newer controller sends newer payload to older agents. Until we have proper RPC version pinning feature to enforce older payload format during upgrade (as it's implemented in other projects like nova), we leave our agents resistant against unknown arguments sent as part of server notifications. This is achieved by consistently capturing those unknown arguments with keyword arguments and ignoring them on agent side; and by not enforcing newer RPC entry point versions on server side. This approach is not ideal, because it makes RPC API less strict. That's why other approaches should be considered for notifications in the future. :ref:`More information about RPC versioning `. Interface signature ''''''''''''''''''' An RPC interface is defined by its name, version, and (named) arguments that it accepts. There are no strict guarantees that arguments will have expected types or meaning, as long as they are serializable. Message content versioning '''''''''''''''''''''''''' To provide better compatibility guarantees for rolling upgrades, RPC interfaces could also define specific format for arguments they accept. In OpenStack world, it's usually implemented using oslo.versionedobjects library, and relying on the library to define serialized form for arguments that are passed through AMQP wire. Note that Neutron has *not* adopted oslo.versionedobjects library for its RPC interfaces yet (except for QoS feature). :ref:`More information about RPC callbacks used for QoS `. Networking backends ~~~~~~~~~~~~~~~~~~~ Backend software upgrade should not result in any data plane disruptions. Meaning, e.g. Open vSwitch L2 agent should not reset flows or rewire ports; Neutron L3 agent should not delete namespaces left by older version of the agent; Neutron DHCP agent should not require immediate DHCP lease renewal; etc. The same considerations apply to setups that do not rely on agents. Meaning, f.e. OpenDaylight or OVN controller should not break data plane connectivity during its upgrade process. Upgrade testing --------------- `Grenade `_ is the OpenStack project that is designed to validate upgrade scenarios. Currently, only offline (non-rolling) upgrade scenario is validated in Neutron gate. The upgrade scenario follows the following steps: #. the 'old' cloud is set up using latest stable release code #. all services are stopped #. code is updated to the patch under review #. new database migration scripts are applied, if needed #. all services are started #. the 'new' cloud is validated with a subset of tempest tests The scenario validates that no configuration option names are changed in one cycle. More generally, it validates that the 'new' cloud is capable of running using the 'old' configuration files. It also validates that database migration scripts can be executed. The scenario does *not* validate AMQP versioning compatibility. Other projects (for example Nova) have so called 'partial' grenade jobs where some services are left running using the old version of code. Such a job would be needed in Neutron gate to validate rolling upgrades for the project. Till that time, it's all up to reviewers to catch compatibility issues in patches on review. Another hole in testing belongs to split migration script branches. It's assumed that an 'old' cloud can successfully run after 'expand' migration scripts from the 'new' cloud are applied to its database; but it's not validated in gate. .. _upgrade_review_guidelines: Review guidelines ----------------- There are several upgrade related gotchas that should be tracked by reviewers. First things first, a general advice to reviewers: make sure new code does not violate requirements set by `global OpenStack deprecation policy `_. Now to specifics: #. Configuration options: * options should not be dropped from the tree without waiting for deprecation period (currently it's one development cycle long) and a deprecation message issued if the deprecated option is used. * option values should not change their meaning between releases. #. Data plane: * agent restart should not result in data plane disruption (no Open vSwitch ports reset; no network namespaces deleted; no device names changed). #. RPC versioning: * no RPC version major number should be bumped before all agents had a chance to upgrade (meaning, at least one release cycle is needed before compatibility code to handle old clients is stripped from the tree). * no compatibility code should be added to agent side of AMQP interfaces. * server code should be able to handle all previous versions of agents, unless the major version of an interface is bumped. * no RPC interface arguments should change their meaning, or names. * new arguments added to RPC interfaces should not be mandatory. It means that server should be able to handle old requests, without the new argument specified. Also, if the argument is not passed, the old behaviour before the addition of the argument should be retained. * minimal client version must not be bumped for server initiated notification changes for at least one cycle. #. Database migrations: * migration code should be split into two branches (contract, expand) as needed. No code that is unsafe to execute while neutron-server is running should be added to expand branch. * if possible, contract migrations should be minimized or avoided to reduce the time when API endpoints must be down during database upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/modules.rst0000644000175000017500000000222200000000000023762 0ustar00coreycorey00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Module Reference ================ .. toctree:: :maxdepth: 3 .. todo:: Add in all the big modules as automodule indexes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/neutron_api.rst0000644000175000017500000001014400000000000024637 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron public API ================== Neutron main tree serves as a library for multiple subprojects that rely on different modules from neutron.* namespace to accommodate their needs. Specifically, advanced service repositories and open source or vendor plugin/driver repositories do it. Neutron modules differ in their API stability a lot, and there is no part of it that is explicitly marked to be consumed by other projects. That said, there are modules that other projects should definitely avoid relying on. Breakages --------- Neutron API is not very stable, and there are cases when a desired change in neutron tree is expected to trigger breakage for one or more external repositories under the neutron tent. Below you can find a list of known incompatible changes that could or are known to trigger those breakages. The changes are listed in reverse chronological order (newer at the top). * change: QoS plugin refactor - commit: I863f063a0cfbb464cedd00bddc15dd853cbb6389 - solution: implement the new abstract methods in neutron.extensions.qos.QoSPluginBase. - severity: Low (some out-of-tree plugins might be affected). * change: Consume ConfigurableMiddleware from oslo_middleware. - commit: If7360608f94625b7d0972267b763f3e7d7624fee - solution: switch to oslo_middleware.base.ConfigurableMiddleware; stop using neutron.wsgi.Middleware and neutron.wsgi.Debug. - severity: Low (some out-of-tree plugins might be affected). * change: Consume sslutils and wsgi modules from oslo.service. - commit: Ibfdf07e665fcfcd093a0e31274e1a6116706aec2 - solution: switch using oslo_service.wsgi.Router; stop using neutron.wsgi.Router. - severity: Low (some out-of-tree plugins might be affected). * change: oslo.service adopted. - commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c - solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents. - severity: low (plugins must not rely on that subtree). * change: oslo.utils.fileutils adopted. - commit: I933d02aa48260069149d16caed02b020296b943a - solution: switch using oslo_utils.fileutils module; stop using neutron.openstack.fileutils module. - severity: low (plugins must not rely on that subtree). * change: Reuse caller's session in DB methods. - commit: 47dd65cf986d712e9c6ca5dcf4420dfc44900b66 - solution: Add context to args and reuse. - severity: High (mostly undetected, because 3rd party CI run Tempest tests only). * change: switches to oslo.log, removes neutron.openstack.common.log. - commit: 22328baf1f60719fcaa5b0fbd91c0a3158d09c31 - solution: a) switch to oslo.log; b) copy log module into your tree and use it (may not work due to conflicts between the module and oslo.log configuration options). - severity: High (most CI systems are affected). * change: Implements reorganize-unit-test-tree spec. - commit: 1105782e3914f601b8f4be64939816b1afe8fb54 - solution: Code affected need to update existing unit tests to reflect new locations. - severity: High (mostly undetected, because 3rd party CI run Tempest tests only). * change: drop linux/ovs_lib compat layer. - commit: 3bbf473b49457c4afbfc23fd9f59be8aa08a257d - solution: switch to using neutron/agent/common/ovs_lib.py. - severity: High (most CI systems are affected). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1510432 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn/0000755000175000017500000000000000000000000022364 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn/index.rst0000644000175000017500000000022100000000000024220 0ustar00coreycorey00000000000000.. meta:: :keywords: ovn, networking-ovn, OpenStack, neutron =========== OVN backend =========== .. toctree:: :maxdepth: 1 tools.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn/tools.rst0000644000175000017500000001002100000000000024250 0ustar00coreycorey00000000000000.. _ovn_tools: OVN Tools ========= This document offers details on Neutron tools available for assisting with using the Open Virtual Network (OVN) backend. Patches and Cherry-picks ------------------------ Overview ^^^^^^^^ As described in the `ovn-migration blueprint `__, Neutron's OVN ML2 plugin has merged to the Neutron repository as of the Ussuri release. With that, special care must be taken to apply Neutron changes to the proper stable branches of the networking-ovn repo. .. note:: These scripts are generic enough to work on any patch file, but particularly handy with the networking-ovn migration. tools/files_in_patch.py ^^^^^^^^^^^^^^^^^^^^^^^ Use this to show files that are changed in a patch file. .. code-block:: console $ # Make a patch to use as example $ git show > /tmp/commit.patch $ ./tools/files_in_patch.py /tmp/commit.patch | grep .py tools/download_gerrit_change.py tools/files_in_patch.py tools/migrate_names.py tools/download_gerrit_change.py ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This tool is needed by ``migrate_names.py`` (see below), but it can be used independently. Given a Gerrit change id, it will fetch the latest patchset of the change from `review.opendev.org `__ as a patch file. The output can be stdout or an optional filename. .. code-block:: console $ ./tools/download_gerrit_change.py --help Usage: download_gerrit_change.py [OPTIONS] GERRIT_CHANGE Options: -o, --output_patch TEXT Output patch file. Default: stdout -g, --gerrit_url TEXT The url to Gerrit server [default: https://review.opendev.org/] -t, --timeout INTEGER Timeout, in seconds [default: 10] --help Show this message and exit. $ ./tools/download_gerrit_change.py 698863 -o /tmp/change.patch $ ./tools/files_in_patch.py /tmp/change.patch networking_ovn/ml2/mech_driver.py networking_ovn/ml2/trunk_driver.py networking_ovn/tests/unit/ml2/test_mech_driver.py networking_ovn/tests/unit/ml2/test_trunk_driver.py tools/migrate_names.py ^^^^^^^^^^^^^^^^^^^^^^ Use this tool to modify the name of the files in a patchfile so it can be converted to/from the `legacy networking-ovn `__ and `Neutron `__ repositories. The mapping of how the files are renamed is based on ``migrate_names.txt``, which is located in the same directory where ``migrate_names.py`` is installed. That behavior can be modified via the ``--mapfile`` option. More information on how the map is parsed is provided in the header section of that file. .. code-block:: console $ ./tools/migrate_names.py --help Usage: migrate_names.py [OPTIONS] Options: -i, --input_patch TEXT input_patch patch file or gerrit change -o, --output_patch TEXT Output patch file. Default: stdout -m, --mapfile PATH Data file that specifies mapping to be applied to input [default: /home/user/openstack/neutron.git /tools/migrate_names.txt] --reverse / --no-reverse Map filenames from networking-ovn to Neutron repo --help Show this message and exit. $ ./tools/migrate_names.py -i 701646 > /tmp/ovn_change.patch $ ./tools/migrate_names.py -o /tmp/reverse.patch -i /tmp/ovn_change.patch --reverse $ diff /tmp/reverse.patch /tmp/ovn_change.patch | grep .py < --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py < +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py > --- a/networking_ovn/ml2/mech_driver.py > +++ b/networking_ovn/ml2/mech_driver.py <... snip ...> $ ./tools/files_in_patch.py /tmp/ovn_change.patch networking_ovn/ml2/mech_driver.py networking_ovn/ml2/trunk_driver.py networking_ovn/tests/unit/ml2/test_mech_driver.py networking_ovn/tests/unit/ml2/test_trunk_driver.py ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1510432 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn_vagrant/0000755000175000017500000000000000000000000024106 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn_vagrant/index.rst0000644000175000017500000000102600000000000025746 0ustar00coreycorey00000000000000.. ================================================ Deploying a development environment with vagrant ================================================ The vagrant directory contains a set of vagrant configurations which will help you deploy Neutron with ovn driver for testing or development purposes. We provide a sparse multinode architecture with clear separation between services. In the future we will include all-in-one and multi-gateway architectures. .. toctree:: :maxdepth: 2 prerequisites sparse-architecture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn_vagrant/prerequisites.rst0000644000175000017500000000206300000000000027545 0ustar00coreycorey00000000000000.. _prerequisites: ===================== Vagrant prerequisites ===================== Those are the prerequisites for using the vagrant file definitions #. Install `VirtualBox `_ and `Vagrant `_. Alternatively you can use parallels or libvirt vagrant plugin. #. Install plug-ins for Vagrant:: $ vagrant plugin install vagrant-cachier $ vagrant plugin install vagrant-vbguest #. On Linux hosts, you can enable instances to access external networks such as the Internet by enabling IP forwarding and configuring SNAT from the IP address range of the provider network interface (typically vboxnet1) on the host to the external network interface on the host. For example, if the ``eth0`` network interface on the host provides external network connectivity:: # sysctl -w net.ipv4.ip_forward=1 # sysctl -p # iptables -t nat -A POSTROUTING -s 10.10.0.0/16 -o eth0 -j MASQUERADE Note: These commands do not persist after rebooting the host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/ovn_vagrant/sparse-architecture.rst0000644000175000017500000001024200000000000030614 0ustar00coreycorey00000000000000.. _sparse-architecture: =================== Sparse architecture =================== The Vagrant scripts deploy OpenStack with Open Virtual Network (OVN) using four nodes (five if you use the optional ovn-vtep node) to implement a minimal variant of the reference architecture: #. ovn-db: Database node containing the OVN northbound (NB) and southbound (SB) databases via the Open vSwitch (OVS) database and ``ovn-northd`` services. #. ovn-controller: Controller node containing the Identity service, Image service, control plane portion of the Compute service, control plane portion of the Networking service including the ``ovn`` ML2 driver, and the dashboard. In addition, the controller node is configured as an NFS server to support instance live migration between the two compute nodes. #. ovn-compute1 and ovn-compute2: Two compute nodes containing the Compute hypervisor, ``ovn-controller`` service for OVN, metadata agents for the Networking service, and OVS services. In addition, the compute nodes are configured as NFS clients to support instance live migration between them. #. ovn-vtep: Optional. A node to run the HW VTEP simulator. This node is not started by default but can be started by running "vagrant up ovn-vtep" after doing a normal "vagrant up". During deployment, Vagrant creates three VirtualBox networks: #. Vagrant management network for deployment and VM access to external networks such as the Internet. Becomes the VM ``eth0`` network interface. #. OpenStack management network for the OpenStack control plane, OVN control plane, and OVN overlay networks. Becomes the VM ``eth1`` network interface. #. OVN provider network that connects OpenStack instances to external networks such as the Internet. Becomes the VM ``eth2`` network interface. Requirements ------------ The default configuration requires approximately 12 GB of RAM and supports launching approximately four OpenStack instances using the ``m1.tiny`` flavor. You can change the amount of resources for each VM in the ``instances.yml`` file. Deployment ---------- #. Follow the pre-requisites described in :doc:`/contributor/ovn_vagrant/prerequisites` #. Clone the ``neutron`` repository locally and change to the ``neutron/vagrant/ovn/sparse`` directory:: $ git clone https://opendev.org/openstack/neutron.git $ cd neutron/vagrant/ovn/sparse #. If necessary, adjust any configuration in the ``instances.yml`` file. * If you change any IP addresses or networks, avoid conflicts with the host. * For evaluating large MTUs, adjust the ``mtu`` option. You must also change the MTU on the equivalent ``vboxnet`` interfaces on the host to the same value after Vagrant creates them. For example:: # ip link set dev vboxnet0 mtu 9000 # ip link set dev vboxnet1 mtu 9000 #. Launch the VMs and grab some coffee:: $ vagrant up #. After the process completes, you can use the ``vagrant status`` command to determine the VM status:: $ vagrant status Current machine states: ovn-db running (virtualbox) ovn-controller running (virtualbox) ovn-vtep running (virtualbox) ovn-compute1 running (virtualbox) ovn-compute2 running (virtualbox) #. You can access the VMs using the following commands:: $ vagrant ssh ovn-db $ vagrant ssh ovn-controller $ vagrant ssh ovn-vtep $ vagrant ssh ovn-compute1 $ vagrant ssh ovn-compute2 Note: If you prefer to use the VM console, the password for the ``root`` account is ``vagrant``. Since ovn-controller is set as the primary in the Vagrantfile, the command ``vagrant ssh`` (without specifying the name) will connect ssh to that virtual machine. #. Access OpenStack services via command-line tools on the ``ovn-controller`` node or via the dashboard from the host by pointing a web browser at the IP address of the ``ovn-controller`` node. Note: By default, OpenStack includes two accounts: ``admin`` and ``demo``, both using password ``password``. #. After completing your tasks, you can destroy the VMs:: $ vagrant destroy ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1550434 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/0000755000175000017500000000000000000000000023371 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/blueprints.rst0000644000175000017500000004215100000000000026315 0ustar00coreycorey00000000000000.. _neutron_blueprints: Blueprints and Specs ==================== The Neutron team uses the `neutron-specs `_ repository for its specification reviews. Detailed information can be found on the `wiki `_. Please also find additional information in the reviews.rst file. The Neutron team does not enforce deadlines for specs. These can be submitted throughout the release cycle. The drivers team will review this on a regular basis throughout the release, and based on the load for the milestones, will assign these into milestones or move them to the backlog for selection into a future release. Please note that we use a `template `_ for spec submissions. It is not required to fill out all sections in the template. Review of the spec may require filling in information left out by the submitter. Sub-Projects and Specs ---------------------- The `neutron-specs `_ repository is only meant for specs from Neutron itself, and the advanced services repositories as well. This includes FWaaS and VPNaaS. Other sub-projects are encouraged to fold their specs into their own devref code in their sub-project gerrit repositories. Please see additional comments in the Neutron teams :ref:`section ` for reviewer requirements of the neutron-specs repository. .. _request-for-feature-enhancement: Neutron Request for Feature Enhancements ---------------------------------------- In Liberty the team introduced the concept of feature requests. Feature requests are tracked as Launchpad bugs, by tagging them with a set of tags starting with `rfe`, enabling the submission and review of feature requests before code is submitted. This allows the team to verify the validity of a feature request before the process of submitting a neutron-spec is undertaken, or code is written. It also allows the community to express interest in a feature by subscribing to the bug and posting a comment in Launchpad. The 'rfe' tag should not be used for work that is already well-defined and has an assignee. If you are intending to submit code immediately, a simple bug report will suffice. Note the temptation to game the system exists, but given the history in Neutron for this type of activity, it will not be tolerated and will be called out as such in public on the mailing list. RFEs can be submitted by anyone and by having the community vote on them in Launchpad, we can gauge interest in features. The drivers team will evaluate these on a weekly basis along with the specs. RFEs will be evaluated in the current cycle against existing project priorities and available resources. The workflow for the life an RFE in Launchpad is as follows: * The bug is submitted and will by default land in the "New" state. Anyone can make a bug an RFE by adding the `rfe` tag. * As soon as a member of the neutron-drivers team acknowledges the bug, the `rfe` tag will be replaced with the `rfe-confirmed` tag. No assignee, or milestone is set at this time. The importance will be set to 'Wishlist' to signal the fact that the report is indeed a feature or enhancement and there is no severity associated to it. * A member of the neutron-drivers team replaces the `rfe-confirmed` tag with the `rfe-triaged` tag when he/she thinks it's ready to be discussed in the drivers meeting. The bug will be in this state while the discussion is ongoing. * The neutron-drivers team will evaluate the RFE and may advise the submitter to file a spec in neutron-specs to elaborate on the feature request, in case the RFE requires extra scrutiny, more design discussion, etc. * The PTL will work with the Lieutenant for the area being identified by the RFE to evaluate resources against the current workload. * A member of the Neutron release team (or the PTL) will register a matching Launchpad blueprint to be used for milestone tracking purposes, and for identifying the responsible assignee and approver. If the RFE has a spec the blueprint will have a pointer to the spec document, which will become available on `specs.o.o. `_ once it is approved and merged. The blueprint will then be linked to the original RFE bug report as a pointer to the discussion that led to the approval of the RFE. The blueprint submitter will also need to identify the following: * Priority: there will be only two priorities to choose from, High and Low. It is worth noting that priority is not to be confused with `importance `_, which is a property of Launchpad Bugs. Priority gives an indication of how promptly a work item should be tackled to allow it to complete. High priority is to be chosen for work items that must make substantial progress in the span of the targeted release, and deal with the following aspects: * OpenStack cross-project interaction and interoperability issues; * Issues that affect the existing system's usability; * Stability and testability of the platform; * Risky implementations that may require complex and/or pervasive changes to API and the logical model; Low priority is to be chosen for everything else. RFEs without an associated blueprint are effectively equivalent to low priority items. Bear in mind that, even though staffing should take priorities into account (i.e. by giving more resources to high priority items over low priority ones), the open source reality is that they can both proceed at their own pace and low priority items can indeed complete faster than high priority ones, even though they are given fewer resources. * Drafter: who is going to submit and iterate on the spec proposal; he/she may be the RFE submitter. * Assignee: who is going to develop the bulk of the code, or the go-to contributor, if more people are involved. Typically this is the RFE submitter, but not necessarily. * Approver: a member of the Neutron team who can commit enough time during the ongoing release cycle to ensure that code posted for review does not languish, and that all aspects of the feature development are taken care of (client, server changes and/or support from other projects if needed - tempest, nova, openstack-infra, devstack, etc.), as well as comprehensive testing. This is typically a core member who has enough experience with what it takes to get code merged, but other resources amongst the wider team can also be identified. Approvers are volunteers who show a specific interest in the blueprint specification, and have enough insight in the area of work so that they can make effective code reviews and provide design feedback. An approver will not work in isolation, as he/she can and will reach out for help to get the job done; however he/she is the main point of contact with the following responsibilities: * Pair up with the drafter/assignee in order to help skip development blockers. * Review patches associated with the blueprint: approver and assignee should touch base regularly and ping each other when new code is available for review, or if review feedback goes unaddressed. * Reach out to other reviewers for feedback in areas that may step out of the zone of her/his confidence. * Escalate issues, and raise warnings to the release team/PTL if the effort shows slow progress. Approver and assignee are key parts to land a blueprint: should the approver and/or assignee be unable to continue the commitment during the release cycle, it is the Approver's responsibility to reach out the release team/PTL so that replacements can be identified. * Provide a status update during the Neutron IRC meeting, if required. Approver `assignments `_ must be carefully identified to ensure that no-one overcommits. A Neutron contributor develops code himself/herself, and if he/she is an approver of more than a couple of blueprints in a single cycle/milestone (depending on the complexity of the spec), it may mean that he/she is clearly oversubscribed. The Neutron team will review the status of blueprints targeted for the milestone during their weekly meeting to ensure a smooth progression of the work planned. Blueprints for which resources cannot be identified will have to be deferred. * In either case (a spec being required or not), once the discussion has happened and there is positive consensus on the RFE, the report is 'approved', and its tag will move from `rfe-triaged` to `rfe-approved`. * An RFE can be occasionaly marked as 'rfe-postponed' if the team identifies a dependency between the proposed RFE and other pending tasks that prevent the RFE from being worked on immediately. * Once an RFE is approved, it needs volunteers. Approved RFEs that do not have an assignee but sound relatively simple or limited in scope (e.g. the addition of a new API with no ramification in the plugin backends), should be promoted during team meetings or the ML so that volunteers can pick them up and get started with neutron development. The team will regularly scan `rfe-approved` or `rfe-postponed` RFEs to see what their latest status is and mark them incomplete if no assignees can be found, or they are no longer relevant. * As for setting the milestone (both for RFE bugs or blueprints), the current milestone is always chosen, assuming that work will start as soon as the feature is approved. Work that fails to complete by the defined milestone will roll over automatically until it gets completed or abandoned. * If the code fails to merge, the bug report may be marked as incomplete, unassigned and untargeted, and it will be garbage collected by the Launchpad Janitor if no-one takes over in time. Renewed interest in the feature will have to go through RFE submission process once again. In summary: +------------+-----------------------------------------------------------------------------+ |State | Meaning | +============+=============================================================================+ |New | This is where all RFE's start, as filed by the community. | +------------+-----------------------------------------------------------------------------+ |Incomplete | Drivers/LTs - Move to this state to mean, "more needed before proceeding" | +------------+-----------------------------------------------------------------------------+ |Confirmed | Drivers/LTs - Move to this state to mean, "yeah, I see that you filed it" | +------------+-----------------------------------------------------------------------------+ |Triaged | Drivers/LTs - Move to this state to mean, "discussion is ongoing" | +------------+-----------------------------------------------------------------------------+ |Won't Fix | Drivers/LTs - Move to this state to reject an RFE. | +------------+-----------------------------------------------------------------------------+ Once the triaging (discussion is complete) and the RFE is approved, the tag goes from 'rfe' to 'rfe-approved', and at this point the bug report goes through the usual state transition. Note, that the importance will be set to 'wishlist', to reflect the fact that the bug report is indeed not a bug, but a new feature or enhancement. This will also help have RFEs that are not followed up by a blueprint standout in the Launchpad `milestone dashboards `_. The drivers team will be discussing the following bug reports during their IRC meeting: * `New RFE's `_ * `Incomplete RFE's `_ * `Confirmed RFE's `_ * `Triaged RFE's `_ RFE Submission Guidelines ------------------------- Before we dive into the guidelines for writing a good RFE, it is worth mentioning that depending on your level of engagement with the Neutron project and your role (user, developer, deployer, operator, etc.), you are more than welcome to have a preliminary discussion of a potential RFE by reaching out to other people involved in the project. This usually happens by posting mails on the relevant mailing lists (e.g. `openstack-discuss `_ - include [neutron] in the subject) or on #openstack-neutron IRC channel on Freenode. If current ongoing code reviews are related to your feature, posting comments/questions on gerrit may also be a way to engage. Some amount of interaction with Neutron developers will give you an idea of the plausibility and form of your RFE before you submit it. That said, this is not mandatory. When you submit a bug report on https://bugs.launchpad.net/neutron/+filebug, there are two fields that must be filled: 'summary' and 'further information'. The 'summary' must be brief enough to fit in one line: if you can't describe it in a few words it may mean that you are either trying to capture more than one RFE at once, or that you are having a hard time defining what you are trying to solve at all. The 'further information' section must be a description of what you would like to see implemented in Neutron. The description should provide enough details for a knowledgeable developer to understand what is the existing problem in the current platform that needs to be addressed, or what is the enhancement that would make the platform more capable, both for a functional and a non-functional standpoint. To this aim it is important to describe 'why' you believe the RFE should be accepted, and motivate the reason why without it Neutron is a poorer platform. The description should be self contained, and no external references should be necessary to further explain the RFE. In other words, when you write an RFE you should ask yourself the following questions: * What is that I (specify what user - a user can be a human or another system) cannot do today when interacting with Neutron? On the other hand, is there a Neutron component X that is unable to accomplish something? * Is there something that you would like Neutron handle better, ie. in a more scalable, or in a more reliable way? * What is that I would like to see happen after the RFE is accepted and implemented? * Why do you think it is important? Once you are happy with what you wrote, add 'rfe' as tag, and submit. Do not worry, we are here to help you get it right! Happy hacking. Missing your target ------------------- There are occasions when a spec will be approved and the code will not land in the cycle it was targeted at. For these cases, the work flow to get the spec into the next release is as follows: * During the RC window, the PTL will create a directory named '' under the 'backlog' directory in the neutron specs repo, and he/she will move all specs that did not make the release to this directory. * Anyone can propose a patch to neutron-specs which moves a spec from the previous release into the new release directory. The specs which are moved in this way can be fast-tracked into the next release. Please note that it is required to re-propose the spec for the new release. Documentation ------------- The above process involves two places where any given feature can start to be documented - namely in the RFE bug, and in the spec - and in addition to those Neutron has a substantial :doc:`developer reference guide ` (aka 'devref'), and user-facing docs such as the :doc:`networking guide `. So it might be asked: * What is the relationship between all of those? * What is the point of devref documentation, if everything has already been described in the spec? The answers have been beautifully expressed in an `openstack-dev post `_: 1. RFE: "I want X" 2. Spec: "I plan to implement X like this" 3. devref: "How X is implemented and how to extend it" 4. OS docs: "API and guide for using X" Once a feature X has been implemented, we shouldn't have to go to back to its RFE bug or spec to find information on it. The devref may reuse a lot of content from the spec, but the spec is not maintained and the implementation may differ in some ways from what was intended when the spec was agreed. The devref should be kept current with refactorings, etc., of the implementation. Devref content should be added as part of the implementation of a new feature. Since the spec is not maintained after the feature is implemented, the devref should include a maintained version of the information from the spec. If a feature requires OS docs (4), the feature patch shall include the new, or updated, documentation changes. If the feature is purely a developer facing thing, (4) is not needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/bugs.rst0000644000175000017500000012264000000000000025070 0ustar00coreycorey00000000000000.. _neutron_bugs: Neutron Bugs ============ Neutron (client, core, FwaaS, VPNaaS) maintains all of its bugs in the following Launchpad projects: * `Launchpad Neutron `_ * `Launchpad python-neutronclient `_ Neutron Bugs Team In Launchpad ------------------------------ The `Neutron Bugs `_ team in Launchpad is used to allow access to the projects above. Members of the above group have the ability to set bug priorities, target bugs to releases, and other administrative tasks around bugs. The administrators of this group are the members of the `neutron-drivers-core `_ gerrit group. Non administrators of this group include anyone who is involved with the Neutron project and has a desire to assist with bug triage. If you would like to join this Launchpad group, it's best to reach out to a member of the above mentioned neutron-drivers-core team in #openstack-neutron on Freenode and let them know why you would like to be a member. The team is more than happy to add additional bug triage capability, but it helps to know who is requesting access, and IRC is a quick way to make the connection. As outlined below the bug deputy is a volunteer who wants to help with defect management. Permissions will have to be granted assuming that people sign up on the deputy role. The permission won't be given freely, a person must show some degree of prior involvement. .. _neutron_bug_deputy: Neutron Bug Deputy ------------------ Neutron maintains the notion of a "bug deputy". The bug deputy plays an important role in the Neutron community. As a large project, Neutron is routinely fielding many bug reports. The bug deputy is responsible for acting as a "first contact" for these bug reports and performing initial screening/triaging. The bug deputy is expected to communicate with the various Neutron teams when a bug has been triaged. In addition, the bug deputy should be reporting "High" and "Critical" priority bugs. To avoid burnout, and to give a chance to everyone to gain experience in defect management, the Neutron bug deputy is a rotating role. The rotation will be set on a period (typically one or two weeks) determined by the team during the weekly Neutron IRC meeting and/or according to holidays. During the Neutron IRC meeting we will expect a volunteer to step up for the period. Members of the Neutron core team are invited to fill in the role, however non-core Neutron contributors who are interested are also encouraged to take up the role. This contributor is going to be the bug deputy for the period, and he/she will be asked to report to the team during the subsequent IRC meeting. The PTL will also work with the team to assess that everyone gets his/her fair share at fulfilling this duty. It is reasonable to expect some imbalance from time to time, and the team will work together to resolve it to ensure that everyone is 100% effective and well rounded in their role as _custodian_ of Neutron quality. Should the duty load be too much in busy times of the release, the PTL and the team will work together to assess whether more than one deputy is necessary in a given period. The presence of a bug deputy does not mean the rest of the team is simply off the hook for the period, in fact the bug deputy will have to actively work with the Lieutenants/Drivers, and these should help in getting the bug report moving down the resolution pipeline. During the period a member acts as bug deputy, he/she is expected to watch bugs filed against the Neutron projects (as listed above) and do a first screening to determine potential severity, tagging, logstash queries, other affected projects, affected releases, etc. From time to time bugs will be filed and auto-assigned by members of the core team to get them to a swift resolution. Obviously, the deputy is exempt from screening these. Finally, the PTL will work with the deputy to produce a brief summary of the issues of the week to be shared with the larger team during the weekly IRC meeting and tracked in the meeting notes. If for some reason the deputy is not going to attend the team meeting to report, the deputy should consider sending a brief report to the openstack-discuss@ mailing list in advance of the meeting. Getting Ready to Serve as the Neutron Bug Deputy ------------------------------------------------ If you are interested in serving as the Neutron bug deputy, there are several steps you will need to follow in order to be prepared. * Request to be added to the `neutron-bugs team in Launchpad `_. This request will be approved when you are assigned a bug deputy slot. * Read this page in full. Keep this document in mind at all times as it describes the duties of the bug deputy and how to triage bugs particularly around setting the importance and tags of bugs. * Sign up for neutron bug emails from LaunchPad. * Navigate to the `LaunchPad Neutron bug list `_. * On the right hand side, click on "Subscribe to bug mail". * In the pop-up that is displayed, keep the recipient as "Yourself", and your subscription something useful like "Neutron Bugs". You can choose either option for how much mail you get, but keep in mind that getting mail for all changes - while informative - will result in several dozen emails per day at least. * Do the same for the `LaunchPad python-neutronclient bug list `_. * Configure the information you get from `LaunchPad `_ to make visible additional information, especially the 'age' of the bugs. You accomplish that by clicking the little gear on the left hand side of the screen at the top of the bugs list. This provides an overview of information for each bug on a single page. * Optional: Set up your mail client to highlight bug email that indicates a new bug has been filed, since those are the ones you will be wanting to triage. Filter based on email from "@bugs.launchpad.net" with "[NEW]" in the subject line. * Volunteer during the course of the Neutron team meeting, when volunteers to be bug deputy are requested (usually towards the beginning of the meeting). * View your scheduled week on the `Neutron Meetings page `_. * During your shift, if it is feasible for your timezone, plan on attending the Neutron Drivers meeting. That way if you have tagged any bugs as RFE, you can be present to discuss them. Bug Deputy routines in your week -------------------------------- * Scan 'New' bugs to triage. If it doesn't have enough info to triage, ask more info and mark it 'Incomplete'. If you could confirm it by yourself, mark it 'Confirmed'. Otherwise, find someone familiar with the topic and ask his/her help. * Scan 'Incomplete' bugs to see if it got more info. If it was, make it back to 'New'. * Repeat the above routines for bugs filed in your week at least. If you can, do the same for older bugs. * Take a note of bugs you processed. At the end of your week, post a report on openstack-discuss mailing list. Plugin and Driver Repositories ------------------------------ Many plugins and drivers have backend code that exists in another repository. These repositories may have their own Launchpad projects for bugs. The teams working on the code in these repos assume full responsibility for bug handling in those projects. For this reason, bugs whose solution would exist solely in the plugin/driver repo should not have Neutron in the affected projects section. However, you should add Neutron (Or any other project) to that list only if you expect that a patch is needed to that repo in order to solve the bug. It's also worth adding that some of these projects are part of the so called Neutron `stadium `_. Because of that, their release is managed centrally by the Neutron release team; requests for releases need to be funnelled and screened properly before they can happen. Release request process is described :ref:`here `. .. _guidelines: Bug Screening Best Practices ---------------------------- When screening bug reports, the first step for the bug deputy is to assess how well written the bug report is, and whether there is enough information for anyone else besides the bug submitter to reproduce the bug and come up with a fix. There is plenty of information on the `OpenStack Bugs `_ on how to write a good bug `report `_ and to learn how to tell a good bug report from a bad one. Should the bug report not adhere to these best practices, the bug deputy's first step would be to redirect the submitter to this section, invite him/her to supply the missing information, and mark the bug report as 'Incomplete'. For future submissions, the reporter can then use the template provided below to ensure speedy triaging. Done often enough, this practice should (ideally) ensure that in the long run, only 'good' bug reports are going to be filed. Bug Report Template ~~~~~~~~~~~~~~~~~~~ The more information you provide, the higher the chance of speedy triaging and resolution: identifying the problem is half the solution. To this aim, when writing a bug report, please consider supplying the following details and following these suggestions: * Summary (Bug title): keep it small, possibly one line. If you cannot describe the issue in less than 100 characters, you are probably submitting more than one bug at once. * Further information (Bug description): conversely from other bug trackers, Launchpad does not provide a structured way of submitting bug-related information, but everything goes in this section. Therefore, you are invited to break down the description in the following fields: * High level description: provide a brief sentence (a couple of lines) of what are you trying to accomplish, or would like to accomplish differently; the 'why' is important, but can be omitted if obvious (not to you of course). * Pre-conditions: what is the initial state of your system? Please consider enumerating resources available in the system, if useful in diagnosing the problem. Who are you? A regular user or a super-user? Are you describing service-to-service interaction? * Step-by-step reproduction steps: these can be actual neutron client commands or raw API requests; Grab the output if you think it is useful. Please, consider using `paste.o.o `_ for long outputs as Launchpad poorly format the description field, making the reading experience somewhat painful. * Expected output: what did you hope to see? How would you have expected the system to behave? A specific error/success code? The output in a specific format? Or more than a user was supposed to see, or less? * Actual output: did the system silently fail (in this case log traces are useful)? Did you get a different response from what you expected? * Version: * OpenStack version (Specific stable branch, or git hash if from trunk); * Linux distro, kernel. For a distro, it's also worth knowing specific versions of client and server, not just major release; * Relevant underlying processes such as openvswitch, iproute etc; * DevStack or other _deployment_ mechanism? * Environment: what services are you running (core services like DB and AMQP broker, as well as Nova/hypervisor if it matters), and which type of deployment (clustered servers); if you are running DevStack, is it a single node? Is it multi-node? Are you reporting an issue in your own environment or something you encountered in the OpenStack CI Infrastructure, aka the Gate? * Perceived severity: what would you consider the `importance `_ to be? * Tags (Affected component): try to use the existing tags by relying on auto-completion. Please, refrain from creating new ones, if you need new "official" tags_, please reach out to the PTL. If you would like a fix to be backported, please add a backport-potential tag. This does not mean you are gonna get the backport, as the stable team needs to follow the `stable branch policy `_ for merging fixes to stable branches. * Attachments: consider attaching logs, truncated log snippets are rarely useful. Be proactive, and consider attaching redacted configuration files if you can, as that will speed up the resolution process greatly. Bug Triage Process ~~~~~~~~~~~~~~~~~~ The process of bug triaging consists of the following steps: * Check if a bug was filed for a correct component (project). If not, either change the project or mark it as "Invalid". * For bugs that affect documentation proceed like this. If documentation affects: * the ReST API, add the "api-ref" tag to the bug. * the OpenStack manuals, like the Networking Guide or the Configuration Reference, create a patch for the affected files in the documentation directory in this repository. For a layout of the how the documentation directory is structured see the `effective neutron guide <../effective_neutron.html>`_ * developer documentation (devref), set the bug to "Confirmed" for the project Neutron, otherwise set it to "Invalid". * Check if a similar bug was filed before. Rely on your memory if Launchpad is not clever enough to spot a duplicate upon submission. You may also check already verified bugs for `Neutron `_ and `python-neutronclient `_ to see if the bug has been reported. If so, mark it as a duplicate of the previous bug. * Check if the bug meets the requirements of a good bug report, by checking that the guidelines_ are being followed. Omitted information is still acceptable if the issue is clear nonetheless; use your good judgement and your experience. Consult another core member/PTL if in doubt. If the bug report needs some love, mark the bug as 'Incomplete', point the submitter to this document and hope he/she turns around quickly with the missing information. If the bug report is sound, move next: * Revise tags as recommended by the submitter. Ensure they are 'official' tags. If the bug report talks about deprecating features or config variables, add a deprecation tag to the list. * As deputy one is usually excused not to process RFE bugs which are the responsibility of the drivers team members. * Depending on ease of reproduction (or if the issue can be spotted in the code), mark it as 'Confirmed'. If you are unable to assess/triage the issue because you do not have access to a repro environment, consider reaching out the :ref:`Lieutenant `, go-to person for the affected component; he/she may be able to help: assign the bug to him/her for further screening. If the bug already has an assignee, check that a patch is in progress. Sometimes more than one patch is required to address an issue, make sure that there is at least one patch that 'Closes' the bug or document/question what it takes to mark the bug as fixed. * If the bug indicates test or gate failure, look at the failures for that test over time using `OpenStack Health `_ or `OpenStack Logstash `_. This can help to validate whether the bug identifies an issue that is occurring all of the time, some of the time, or only for the bug submitter. * If the bug is the result of a misuse of the system, mark the bug either as 'Won't fix', or 'Opinion' if you are still on the fence and need other people's input. * Assign the importance after reviewing the proposed severity. Bugs that obviously break core and widely used functionality should get assigned as "High" or "Critical" importance. The same applies to bugs that were filed for gate failures. * Choose a milestone, if you can. Targeted bugs are especially important close to the end of the release. * (Optional). Add comments explaining the issue and possible strategy of fixing/working around the bug. Also, as good as some are at adding all thoughts to bugs, it is still helpful to share the in-progress items that might not be captured in a bug description or during our weekly meeting. In order to provide some guidance and reduce ramp up time as we rotate, tagging bugs with 'needs-attention' can be useful to quickly identify what reports need further screening/eyes on. Check for Bugs with the 'timeout-abandon' tag: * Search for any bugs with the timeout abandon tag: `Timeout abandon `_. This tag indicates that the bug had a patch associated with it that was automatically abandoned after a timing out with negative feedback. * For each bug with this tag, determine if the bug is still valid and update the status accordingly. For example, if another patch fixed the bug, ensure it's marked as 'Fix Released'. Or, if that was the only patch for the bug and it's still valid, mark it as 'Confirmed'. * After ensuring the bug report is in the correct state, remove the 'timeout-abandon' tag. You are done! Iterate. Bug Expiration Policy and Bug Squashing --------------------------------------- More can be found at this `Launchpad page `_. In a nutshell, in order to make a bug report expire automatically, it needs to be unassigned, untargeted, and marked as Incomplete. The OpenStack community has had `Bug Days `_ but they have not been wildly successful. In order to keep the list of open bugs set to a manageable number (more like <100+, rather than closer to 1000+), at the end of each release (in feature freeze and/or during less busy times), the PTL with the help of team will go through the list of open (namely new, opinion, in progress, confirmed, triaged) bugs, and do a major sweep to have the Launchpad Janitor pick them up. This gives 60 days grace period to reporters/assignees to come back and revive the bug. Assuming that at regime, bugs are properly reported, acknowledged and fix-proposed, losing unaddressed issues is not going to be a major issue, but brief stats will be collected to assess how the team is doing over time. .. _tags: Tagging Bugs ------------ Launchpad's Bug Tracker allows you to create ad-hoc groups of bugs with tagging. In the Neutron team, we have a list of agreed tags that we may apply to bugs reported against various aspects of Neutron itself. The list of approved tags used to be available on the `wiki `_, however the section has been moved here, to improve collaborative editing, and keep the information more current. By using a standard set of tags, each explained on this page, we can avoid confusion. A bug report can have more than one tag at any given time. Proposing New Tags ~~~~~~~~~~~~~~~~~~ New tags, or changes in the meaning of existing tags (or deletion), are to be proposed via patch to this section. After discussion, and approval, a member of the bug team will create/delete the tag in Launchpad. Each tag covers an area with an identified go-to contact or :ref:`Lieutenant `, who can provide further insight. Bug queries are provided below for convenience, more will be added over time if needed. +-------------------------------+-----------------------------------------+--------------------------+ | Tag | Description | Contact | +===============================+=========================================+==========================+ | access-control_ | A bug affecting RBAC and policy.json | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | api_ | A bug affecting the API layer | Akihiro Motoki | +-------------------------------+-----------------------------------------+--------------------------+ | api-ref_ | A bug affecting the API reference | Akihiro Motoki | +-------------------------------+-----------------------------------------+--------------------------+ | auto-allocated-topology_ | A bug affecting get-me-a-network | N/A | +-------------------------------+-----------------------------------------+--------------------------+ | baremetal_ | A bug affecting Ironic support | N/A | +-------------------------------+-----------------------------------------+--------------------------+ | db_ | A bug affecting the DB layer | Nate Johnston | +-------------------------------+-----------------------------------------+--------------------------+ | deprecation_ | To track config/feature deprecations | Neutron PTL/drivers | +-------------------------------+-----------------------------------------+--------------------------+ | dns_ | A bug affecting DNS integration | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | doc_ | A bug affecting in-tree doc | Akihiro Motoki | +-------------------------------+-----------------------------------------+--------------------------+ | fullstack_ | A bug in the fullstack subtree | Rodolfo Alonso Hernandez | +-------------------------------+-----------------------------------------+--------------------------+ | functional-tests_ | A bug in the functional tests subtree | Rodolfo Alonso Hernandez | +-------------------------------+-----------------------------------------+--------------------------+ | fwaas_ | A bug affecting neutron-fwaas | Nate Johnston | +-------------------------------+-----------------------------------------+--------------------------+ | gate-failure_ | A bug affecting gate stability | Slawek Kaplonski | +-------------------------------+-----------------------------------------+--------------------------+ | ipv6_ | A bug affecting IPv6 support | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ | l2-pop_ | A bug in L2 Population mech driver | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | l3-bgp_ | A bug affecting neutron-dynamic-routing | Ryan Tidwell | +-------------------------------+-----------------------------------------+--------------------------+ | l3-dvr-backlog_ | A bug affecting distributed routing | Yulong Liu/ | | | | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ | l3-ha_ | A bug affecting L3 HA (vrrp) | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ | l3-ipam-dhcp_ | A bug affecting L3/DHCP/metadata | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | lib_ | An issue affecting neutron-lib | Neutron PTL | +-------------------------------+-----------------------------------------+--------------------------+ | linuxbridge_ | A bug affecting ML2/linuxbridge | N/A | +-------------------------------+-----------------------------------------+--------------------------+ | loadimpact_ | Performance penalty/improvements | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | logging_ | An issue with logging guidelines | Matt Riedemann | +-------------------------------+-----------------------------------------+--------------------------+ | low-hanging-fruit_ | Starter bugs for new contributors | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | metering_ | A bug affecting the metering layer | N/A | +-------------------------------+-----------------------------------------+--------------------------+ | needs-attention_ | A bug that needs further screening | PTL/Bug Deputy | +-------------------------------+-----------------------------------------+--------------------------+ | opnfv_ | Reported by/affecting OPNFV initiative | Drivers team | +-------------------------------+-----------------------------------------+--------------------------+ | ops_ | Reported by or affecting operators | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | oslo_ | An interop/cross-project issue | Bernard Cafarelli/ | | | | Rodolfo Alonso Hernandez | +-------------------------------+-----------------------------------------+--------------------------+ | ovn_ | A bug affecting ML2/OVN | Jakub Libosvar/ | | | | Lucas Alvares Gomes | +-------------------------------+-----------------------------------------+--------------------------+ | ovn-octavia-provider_ | A bug affecting OVN Octavia provider | Maciej Jozefczyk/ | | | driver | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ | ovs_ | A bug affecting ML2/OVS | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | ovs-fw_ | A bug affecting OVS firewall | Miguel Lavalle | +-------------------------------+-----------------------------------------+--------------------------+ | ovsdb-lib_ | A bug affecting OVSDB library | Terry Wilson | +-------------------------------+-----------------------------------------+--------------------------+ | qos_ | A bug affecting ML2/QoS | Slawek Kaplonski | +-------------------------------+-----------------------------------------+--------------------------+ | rfe_ | Feature enhancements being screened | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | rfe-confirmed_ | Confirmed feature enhancements | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | rfe-triaged_ | Triaged feature enhancements | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | rfe-approved_ | Approved feature enhancements | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | rfe-postponed_ | Postponed feature enhancements | Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | sg-fw_ | A bug affecting security groups | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ | sriov-pci-pt_ | A bug affecting Sriov/PCI PassThrough | Moshe Levi | +-------------------------------+-----------------------------------------+--------------------------+ | tempest_ | A bug in tempest subtree tests | Rodolfo Alonso Hernandez | +-------------------------------+-----------------------------------------+--------------------------+ | troubleshooting_ | An issue affecting ease of debugging | Nate Johnston | +-------------------------------+-----------------------------------------+--------------------------+ | unittest_ | A bug affecting the unit test subtree | Rodolfo Alonso Hernandez | +-------------------------------+-----------------------------------------+--------------------------+ | usability_ | UX, interoperability, feature parity | PTL/Drivers Team | +-------------------------------+-----------------------------------------+--------------------------+ | vpnaas_ | A bug affecting neutron-vpnaas | Dongcan Ye | +-------------------------------+-----------------------------------------+--------------------------+ | xxx-backport-potential_ | Cherry-pick request for stable team | Bernard Cafarelli/ | | | | Brian Haley | +-------------------------------+-----------------------------------------+--------------------------+ .. _access-control: Access Control ++++++++++++++ * `Access Control - All bugs `_ * `Access Control - In progress `_ .. _api: API +++ * `API - All bugs `_ * `API - In progress `_ .. _api-ref: API Reference +++++++++++++ * `API Reference - All bugs `_ * `API Reference - In progress `_ .. _auto-allocated-topology: Auto Allocated Topology +++++++++++++++++++++++ * `Auto Allocated Topology - All bugs `_ * `Auto Allocated Topology - In progress `_ .. _baremetal: Baremetal +++++++++ * `Baremetal - All bugs `_ * `Baremetal - In progress `_ .. _db: DB ++ * `DB - All bugs `_ * `DB - In progress `_ .. _deprecation: Deprecation +++++++++++ * `Deprecation - All bugs `_ * `DeprecationB - In progress `_ .. _dns: DNS +++ * `DNS - All bugs `_ * `DNS - In progress `_ .. _doc: DOC +++ * `DOC - All bugs `_ * `DOC - In progress `_ .. _fullstack: Fullstack +++++++++ * `Fullstack - All bugs `_ * `Fullstack - In progress `_ .. _functional-tests: Functional Tests ++++++++++++++++ * `Functional tests - All bugs `_ * `Functional tests - In progress `_ .. _fwaas: FWAAS +++++ * `FWaaS - All bugs `_ * `FWaaS - In progress `_ .. _gate-failure: Gate Failure ++++++++++++ * `Gate failure - All bugs `_ * `Gate failure - In progress `_ .. _ipv6: IPV6 ++++ * `IPv6 - All bugs `_ * `IPv6 - In progress `_ .. _l2-pop: L2 Population +++++++++++++ * `L2 Pop - All bugs `_ * `L2 Pop - In progress `_ .. _l3-bgp: L3 BGP ++++++ * `L3 BGP - All bugs `_ * `L3 BGP - In progress `_ .. _l3-dvr-backlog: L3 DVR Backlog ++++++++++++++ * `L3 DVR - All bugs `_ * `L3 DVR - In progress `_ .. _l3-ha: L3 HA +++++ * `L3 HA - All bugs `_ * `L3 HA - In progress `_ .. _l3-ipam-dhcp: L3 IPAM DHCP ++++++++++++ * `L3 IPAM DHCP - All bugs `_ * `L3 IPAM DHCP - In progress `_ .. _lib: Lib +++ * `Lib - All bugs `_ .. _linuxbridge: LinuxBridge +++++++++++ * `LinuxBridge - All bugs `_ * `LinuxBridge - In progress `_ .. _loadimpact: Load Impact +++++++++++ * `Load Impact - All bugs `_ * `Load Impact - In progress `_ .. _logging: Logging +++++++ * `Logging - All bugs `_ * `Logging - In progress `_ .. _low-hanging-fruit: Low hanging fruit +++++++++++++++++ * `Low hanging fruit - All bugs `_ * `Low hanging fruit - In progress `_ .. _metering: Metering ++++++++ * `Metering - All bugs `_ * `Metering - In progress `_ .. _needs-attention: Needs Attention +++++++++++++++ * `Needs Attention - All bugs `_ .. _opnfv: OPNFV +++++ * `OPNFV - All bugs `_ .. _ops: Operators/Operations (ops) ++++++++++++++++++++++++++ * `Ops - All bugs `_ .. _oslo: OSLO ++++ * `Oslo - All bugs `_ * `Oslo - In progress `_ .. _ovn: OVN +++ * `OVN - All bugs `_ * `OVN - In progress `_ .. _ovn-octavia-provider: OVN Octavia Provider driver +++++++++++++++++++++++++++ * `OVN Octavia Provider driver - All bugs `_ * `OVN Octavia Provider driver - In progress `_ .. _ovs: OVS +++ * `OVS - All bugs `_ * `OVS - In progress `_ .. _ovs-fw: OVS Firewall ++++++++++++ * `OVS Firewall - All bugs `_ * `OVS Firewall - In progress `_ .. _ovsdb-lib: OVSDB Lib +++++++++ * `OVSDB Lib - All bugs `_ * `OVSDB Lib - In progress `_ .. _qos: QoS +++ * `QoS - All bugs `_ * `QoS - In progress `_ .. _rfe: RFE +++ * `RFE - All bugs `_ * `RFE - In progress `_ .. _rfe-confirmed: RFE-Confirmed +++++++++++++ * `RFE-Confirmed - All bugs `_ .. _rfe-triaged: RFE-Triaged +++++++++++ * `RFE-Triaged - All bugs `_ .. _rfe-approved: RFE-Approved ++++++++++++ * `RFE-Approved - All bugs `_ * `RFE-Approved - In progress `_ .. _rfe-postponed: RFE-Postponed +++++++++++++ * `RFE-Postponed - All bugs `_ * `RFE-Postponed - In progress `_ .. _sriov-pci-pt: SRIOV-PCI PASSTHROUGH +++++++++++++++++++++ * `SRIOV/PCI-PT - All bugs `_ * `SRIOV/PCI-PT - In progress `_ .. _sg-fw: SG-FW +++++ * `Security groups - All bugs `_ * `Security groups - In progress `_ .. _tempest: Tempest +++++++ * `Tempest - All bugs `_ * `Tempest - In progress `_ .. _troubleshooting: Troubleshooting +++++++++++++++ * `Troubleshooting - All bugs `_ * `Troubleshooting - In progress `_ .. _unittest: Unit test +++++++++ * `Unit test - All bugs `_ * `Unit test - In progress `_ .. _usability: Usability +++++++++ * `UX - All bugs `_ * `UX - In progress `_ .. _vpnaas: VPNAAS ++++++ * `VPNaaS - All bugs `_ * `VPNaaS - In progress `_ .. _xxx-backport-potential: Backport/RC potential +++++++++++++++++++++ List of all ``Backport/RC potential`` bugs for stable releases can be found on launchpad. Pointer to Launchpad's page with list of such bugs for any stable release can be built by using link: https://bugs.launchpad.net/neutron/+bugs?field.tag={STABLE_BRANCH}-backport-potential where ``STABLE_BRANCH`` is always name of one of the 3 latest releases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/code-reviews.rst0000644000175000017500000001536700000000000026533 0ustar00coreycorey00000000000000.. _code_review: Neutron Code Reviews ==================== Code reviews are a critical component of all OpenStack projects. Neutron accepts patches from many diverse people with diverse backgrounds, employers, and experience levels. Code reviews provide a way to enforce a level of consistency across the project, and also allow for the careful on boarding of contributions from new contributors. Neutron Code Review Practices ----------------------------- Neutron follows the `code review guidelines `_ as set forth for all OpenStack projects. It is expected that all reviewers are following the guidelines set forth on that page. In addition to that, the following rules are to follow: * Any change that requires a new feature from Neutron runtime dependencies requires special review scrutiny to make sure such a change does not break a supported platform (examples of those platforms are latest Ubuntu LTS or CentOS). Runtime dependencies include but are not limited to: kernel, daemons and tools as defined in ``oslo.rootwrap`` filter files, runlevel management systems, as well as other elements of Neutron execution environment. .. note:: For some components, the list of supported platforms can be wider than usual. For example, Open vSwitch agent is expected to run successfully in Win32 runtime environment. #. All such changes must be tagged with ``UpgradeImpact`` in their commit messages. #. Reviewers are then advised to make an effort to check if the newly proposed runtime dependency is fulfilled on supported platforms. #. Specifically, reviewers and authors are advised to use existing gate and experimental platform specific jobs to validate those patches. To trigger experimental jobs, use the usual protocol (posting ``check experimental`` comment in Gerrit). CI will then execute and report back a baseline of Neutron tests for platforms of interest and will provide feedback on the effect of the runtime change required. #. If review identifies that the proposed change would break a supported platform, advise to rework the patch so that it's no longer breaking the platform. One of the common ways of achieving that is gracefully falling back to alternative means on older platforms, another is hiding the new code behind a conditional, potentially controlled with a ``oslo.config`` option. .. note:: Neutron team retains the right to remove any platform conditionals in future releases. Platform owners are expected to accommodate in due course, or otherwise see their platforms broken. The team also retains the right to discontinue support for unresponsive platforms. #. The change should also include a new `sanity check `_ that would help interested parties to identify their platform limitation in timely manner. * Special attention should also be paid to changes in Neutron that can impact the Stadium and the wider family of networking-related projects (referred to as sub-projects below). These changes include: #. Renaming or removal of methods. #. Addition or removal of positional arguments. #. Renaming or removal of constants. To mitigate the risk of impacting the sub-projects with these changes, the following measures are suggested: #. Use of the online tool `codesearch `_ to ascertain how the proposed changes will affect the code of the sub-projects. #. Review the results of the non-voting check and 3rd party CI jobs executed by the sub-projects against the proposed change, which are returned by Zuul in the change's Gerrit page. When impacts are identified as a result of the above steps, every effort must be made to work with the affected sub-projects to resolve the issues. * Any change that modifies or introduces a new API should have test coverage in neutron-tempest-plugin or tempest test suites. There should be at least one API test added for a new feature, but it is preferred that both API and scenario tests be added where it is appropriate. Scenario tests should cover not only the base level of new functionality, but also standard ways in which the functionality can be used. For example, if the feature adds a new kind of networking (like e.g. trunk ports) then tests should make sure that instances can use IPs provided by that networking, can be migrated, etc. It is also preferred that some negative test cases, like API tests to ensure that correct HTTP error is returned when wrong data is provided, will be added where it is appropriate. * It is usually enough for any "mechanical" changes, like e.g. translation imports or imports of updated CI templates, to have only one +2 Code-Review vote to be approved. If there is any uncertainty about a specific patch, it is better to wait for review from another core reviewer before approving the patch. .. _spec-review-practices: Neutron Spec Review Practices ----------------------------- In addition to code reviews, Neutron also maintains a BP specification git repository. Detailed instructions for the use of this repository are provided `here `_. It is expected that Neutron core team members are actively reviewing specifications which are pushed out for review to the specification repository. In addition, there is a neutron-drivers team, composed of a handful of Neutron core reviewers, who can approve and merge Neutron specs. Some guidelines around this process are provided below: * Once a specification has been pushed, it is expected that it will not be approved for at least 3 days after a first Neutron core reviewer has reviewed it. This allows for additional cores to review the specification. * For blueprints which the core team deems of High or Critical importance, core reviewers may be assigned based on their subject matter expertise. * Specification priority will be set by the PTL with review by the core team once the specification is approved. Tracking Review Statistics -------------------------- Stackalytics provides some nice interfaces to track review statistics. The links are provided below. These statistics are used to track not only Neutron core reviewer statistics, but also to track review statistics for potential future core members. * `30 day review stats `_ * `60 day review stats `_ * `90 day review stats `_ * `180 day review stats `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/contributor-onboarding.rst0000644000175000017500000000431600000000000030621 0ustar00coreycorey00000000000000Contributor Onboarding ====================== For new contributors, the following are useful onboarding information. Contributing to Neutron ----------------------- Work within Neutron is discussed on the openstack-discuss mailing list, as well as in the #openstack-neutron IRC channel. While these are great channels for engaging Neutron, the bulk of discussion of patches and code happens in gerrit itself. With regards to gerrit, code reviews are a great way to learn about the project. There is also a list of `low or wishlist `_ priority bugs which are ideal for a new contributor to take on. If you haven't done so you should setup a Neutron development environment so you can actually run the code. Devstack is the usual convenient environment to setup such an environment. See `devstack.org `_ or `NeutronDevstack `_ for more information on using Neutron with devstack. Helping with documentation can also be a useful first step for a newcomer. Here is a list of tagged documentation and API reference bugs: * `Documentation bugs `_ * `Api-ref bugs `_ IRC Information and Etiquette ----------------------------- The main IRC channel for Neutron is #openstack-neutron. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/gate-failure-triage.rst0000644000175000017500000002305300000000000027744 0ustar00coreycorey00000000000000Neutron Gate Failure Triage =========================== This page provides guidelines for spotting and assessing neutron gate failures. Some hints for triaging failures are also provided. Spotting Gate Failures ---------------------- This can be achieved using several tools: * `Grafana dashboard `_ * `logstash `_ For checking gate failures with logstash the following query will return failures for a specific job: > build_status:FAILURE AND message:Finished AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate" And divided by the total number of jobs executed: > message:Finished AND build_name:"check-tempest-dsvm-neutron" AND build_queue:"gate" It will return the failure rate in the selected period for a given job. It is important to remark that failures in the check queue might be misleading as the problem causing the failure is most of the time in the patch being checked. Therefore it is always advisable to work on failures occurred in the gate queue. However, these failures are a precious resource for assessing frequency and determining root cause of failures which manifest in the gate queue. The step above will provide a quick outlook of where things stand. When the failure rate raises above 10% for a job in 24 hours, it's time to be on alert. 25% is amber alert. 33% is red alert. Anything above 50% means that probably somebody from the infra team has already a contract out on you. Whether you are relaxed, in alert mode, or freaking out because you see a red dot on your chest, it is always a good idea to check on daily bases the elastic-recheck pages. Under the `gate pipeline `_ tab, you can see gate failure rates for already known bugs. The bugs in this page are ordered by decreasing failure rates (for the past 24 hours). If one of the bugs affecting Neutron is among those on top of that list, you should check that the corresponding bug is already assigned and somebody is working on it. If not, and there is not a good reason for that, it should be ensured somebody gets a crack at it as soon as possible. The other part of the story is to check for `uncategorized `_ failures. This is where failures for new (unknown) gate breaking bugs end up; on the other hand also infra error causing job failures end up here. It should be duty of the diligent Neutron developer to ensure the classification rate for neutron jobs is as close as possible to 100%. To this aim, the diligent Neutron developer should adopt the procedure outlined in the following sections. .. _troubleshooting-tempest-jobs: Troubleshooting Tempest jobs ---------------------------- 1. Open logs for failed jobs and look for logs/testr_results.html.gz. 2. If that file is missing, check console.html and see where the job failed. 1. If there is a failure in devstack-gate-cleanup-host.txt it's likely to be an infra issue. 2. If the failure is in devstacklog.txt it could a devstack, neutron, or infra issue. 3. However, most of the time the failure is in one of the tempest tests. Take note of the error message and go to logstash. 4. On logstash, search for occurrences of this error message, and try to identify the root cause for the failure (see below). 5. File a bug for this failure, and push an :ref:`Elastic Recheck Query ` for it. 6. If you are confident with the area of this bug, and you have time, assign it to yourself; otherwise look for an assignee or talk to the Neutron's bug czar to find an assignee. Troubleshooting functional/fullstack job ---------------------------------------- 1. Go to the job link provided by Jenkins CI. 2. Look at logs/testr_results.html.gz for which particular test failed. 3. More logs from a particular test are stored at logs/dsvm-functional-logs/ (or dsvm-fullstack-logs for fullstack job). 4. Find the error in the logs and search for similar errors in existing launchpad bugs. If no bugs were reported, create a new bug report. Don't forget to put a snippet of the trace into the new launchpad bug. If the log file for a particular job doesn't contain any trace, pick the one from testr_results.html.gz. 5. Create an :ref:`Elastic Recheck Query ` Advanced Troubleshooting of Gate Jobs ------------------------------------- As a first step of troubleshooting a failing gate job, you should always check the logs of the job as described above. Unfortunately, sometimes when a tempest/functional/fullstack job is failing, it might be hard to reproduce it in a local environment, and might also be hard to understand the reason of such a failure from only reading the logs of the failed job. In such cases there are some additional ways to debug the job directly on the test node in a ``live`` setting. This can be done in two ways: 1. Using the `remote_pdb `_ python module and ``telnet`` to directly access the python debugger while in the failed test. To achieve this, you need to send a ``Do not merge`` patch to gerrit with changes as described below: * Add an iptables rule to accept incoming telnet connections to remote_pdb. This can be done in one of the ansible roles used in the test job. Like for example in ``neutron/roles/configure_functional_tests`` file for functional tests:: sudo iptables -I openstack-INPUT -p tcp -m state --state NEW -m tcp --dport 44444 -j ACCEPT * Increase the ``OS_TEST_TIMEOUT`` value to make the test wait longer when remote_pdb is active to make debugging easier. This change can also be done in the ansible role mentioned above:: export OS_TEST_TIMEOUT=999999 Please note that the overall job will be limited by the job timeout, and that cannot be changed from within the job. * To make it easier to find the IP address of the test node, you should add to the ansible role so it prints the IPs configured on the test node. For example:: hostname -I * Add the package ``remote_pdb`` to the ``test-requirements.txt`` file. That way it will be automatically installed in the venv of the test before it is run:: $ tail -1 test-requirements.txt remote_pdb * Finally, you need to import and call the remote_pdb module in the part of your test code where you want to start the debugger:: $ diff --git a/neutron/tests/fullstack/test_connectivity.py b/neutron/tests/fullstack/test_connectivity.py index c8650b0..260207b 100644 --- a/neutron/tests/fullstack/test_connectivity.py +++ b/neutron/tests/fullstack/test_connectivity.py @@ -189,6 +189,8 @@ class TestLinuxBridgeConnectivitySameNetwork(BaseConnectivitySameNetworkTest): ] def test_connectivity(self): + import remote_pdb; remote_pdb.set_trace('0.0.0.0', port=44444) + self._test_connectivity() Please note that discovery of public IP addresses is necessary because by default remote_pdb will only bind to the ``127.0.0.1`` IP address. Above is just an example of one of possible method, there could be other ways to do this as well. When all the above changes are done, you must commit them and go to the `Zuul status page `_ to find the status of the tests for your ``Do not merge`` patch. Open the console log for your job and wait there until ``remote_pdb`` is started. You then need to find the IP address of the test node in the console log. This is necessary to connect via ``telnet`` and start debugging. It will be something like:: RemotePdb session open at 172.99.68.50:44444, waiting for connection ... An example of such a ``Do not merge`` patch described above can be found at ``_. Please note that after adding new packages to the ``requirements.txt`` file, the ``requirements-check`` job for your test patch will fail, but it is not important for debugging. 2. If root access to the test node is necessary, for example, to check if VMs have really been spawned, or if router/dhcp namespaces have been configured properly, etc., you can ask a member of the infra-team to hold the job for troubleshooting. You can ask someone to help with that on the ``openstack-infra`` IRC channel. In that case, the infra-team will need to add your SSH key to the test node, and configure things so that if the job fails, the node will not be destroyed. You will then be able to SSH to it and debug things further. Please remember to tell the infra-team when you finish debugging so they can unlock and destroy the node being held. The above two solutions can be used together. For example, you should be able to connect to the test node with both methods: * using ``remote_pdb`` to connect via ``telnet``; * using ``SSH`` to connect as a root to the test node. You can then ask the infra-team to add your key to the specific node on which you have already started your ``remote_pdb`` session. Root Causing a Gate Failure --------------------------- Time-based identification, i.e. find the naughty patch by log scavenging. .. _elastic-recheck-query: Filing An Elastic Recheck Query ------------------------------- The `elastic recheck `_ page has all the current open ER queries. To file one, please see the `ER Wiki `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/gerrit-recheck.rst0000644000175000017500000000306000000000000027020 0ustar00coreycorey00000000000000Recheck Failed CI jobs in Neutron ================================= This document provides guidelines on what to do in case your patch fails one of the Jenkins CI jobs. In order to discover potential bugs hidden in the code or tests themselves, it's very helpful to check failed scenarios to investigate the cause of the failure. Sometimes the failure will be caused by the patch being tested, while other times the failure can be caused by a previously untracked bug. Such failures are usually related to tests that interact with a live system, like functional, fullstack and tempest jobs. Before issuing a recheck on your patch, make sure that the gate failure is not caused by your patch. Failed job can be also caused by some infra issue, for example unable to fetch things from external resources like git or pip due to outage. Such failures outside of OpenStack world are not worth tracking in launchpad and you can recheck leaving couple of words what went wrong. Data about gate stability is collected and visualized via `Grafana `_. Please, do not recheck without providing the bug number for the failed job. For example, do not just put an empty "recheck" comment but find the related bug number and put a "recheck bug ######" comment instead. If a bug does not exist yet, create one so other team members can have a look. It helps us maintain better visibility of gate failures. You can find how to troubleshoot gate failures in the :ref:`Gate Failure Triage ` documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/index.rst0000644000175000017500000000212000000000000025225 0ustar00coreycorey00000000000000.. Copyright 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neutron Policies ================ In the Policies Guide, you will find documented policies for developing with Neutron. This includes the processes we use for blueprints and specs, bugs, contributor onboarding, core reviewer memberships, and other procedural items. .. toctree:: :maxdepth: 3 blueprints bugs contributor-onboarding neutron-teams gate-failure-triage code-reviews release-checklist thirdparty-ci gerrit-recheck ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/neutron-teams.rst0000644000175000017500000004166300000000000026736 0ustar00coreycorey00000000000000.. _neutron_teams: ====================== Neutron Team Structure ====================== Neutron Core Reviewers ====================== The `Neutron Core Reviewer Team `_ is responsible for many things related to Neutron. A lot of these things include mundane tasks such as the following: * Ensuring the bug count is low * Curating the gate and triaging failures * Working on integrating shared code from projects such as Oslo * Ensuring documentation is up to date and remains relevant * Ensuring the level of testing for Neutron is adequate and remains relevant as features are added * Helping new contributors with questions as they peel back the covers of Neutron * Answering questions and participating in mailing list discussions * Interfacing with other OpenStack teams and ensuring they are going in the same parallel direction * Reviewing and merging code into the neutron tree In essence, core reviewers share the following common ideals: 1. They share responsibility in the project's success. 2. They have made a long-term, recurring time investment to improve the project. 3. They spend their time doing what needs to be done to ensure the projects success, not necessarily what is the most interesting or fun. A core reviewer's responsibility doesn't end up with merging code. The above lists are adding context around these responsibilities. .. _core-review-hierarchy: Core Review Hierarchy --------------------- As Neutron has grown in complexity, it has become impossible for any one person to know enough to merge changes across the entire codebase. Areas of expertise have developed organically, and it is not uncommon for existing cores to defer to these experts when changes are proposed. Existing cores should be aware of the implications when they do merge changes outside the scope of their knowledge. It is with this in mind we propose a new system built around Lieutenants through a model of trust. In order to scale development and responsibility in Neutron, we have adopted a Lieutenant system. The PTL is the leader of the Neutron project, and ultimately responsible for decisions made in the project. The PTL has designated Lieutenants in place to help run portions of the Neutron project. The Lieutenants are in charge of their own areas, and they can propose core reviewers for their areas as well. The core reviewer addition and removal polices are in place below. The Lieutenants for each system, while responsible for their area, ultimately report to the PTL. The PTL may opt to have regular one on one meetings with the lieutenants. The PTL will resolve disputes in the project that arise between areas of focus, core reviewers, and other projects. Please note Lieutenants should be leading their own area of focus, not doing all the work themselves. As was mentioned in the previous section, a core's responsibilities do not end with merging code. They are responsible for bug triage and gate issues among other things. Lieutenants have an increased responsibility to ensure gate and bug triage for their area of focus is under control. Neutron Lieutenants ~~~~~~~~~~~~~~~~~~~ The following are the current Neutron Lieutenants. +------------------------+---------------------------+----------------------+ | Area | Lieutenant | IRC nick | +========================+===========================+======================+ | API | Akihiro Motoki | amotoki | +------------------------+---------------------------+----------------------+ | DB | Nate Johnston | njohnston | +------------------------+---------------------------+----------------------+ | Built-In Control Plane | Miguel Lavalle | mlavalle | +------------------------+---------------------------+----------------------+ | Client | Akihiro Motoki | amotoki | +------------------------+---------------------------+----------------------+ | Docs | Akihiro Motoki | amotoki | +------------------------+---------------------------+----------------------+ | Infra | Rodolfo Alonso Hernandez | ralonsoh | | +---------------------------+----------------------+ | | YAMAMOTO Takashi | yamamoto | +------------------------+---------------------------+----------------------+ | L3 | Brian Haley | haleyb | | +---------------------------+----------------------+ | | Miguel Lavalle | mlavalle | | +---------------------------+----------------------+ | | Yulong Liu | liuyulong | +------------------------+---------------------------+----------------------+ | Testing | Rodolfo Alonso Hernandez | ralonsoh | +------------------------+---------------------------+----------------------+ Some notes on the above: * "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata agents and ML2. * The client includes commands installed server side. * L3 includes the L3 agent, DVR, Dynamic routing and IPAM. * Note these areas may change as the project evolves due to code refactoring, new feature areas, and libification of certain pieces of code. * Infra means interactions with infra from a neutron perspective .. _subproject_lieutenants: Sub-project Lieutenants ~~~~~~~~~~~~~~~~~~~~~~~ Neutron also consists of several plugins, drivers, and agents that are developed effectively as sub-projects within Neutron in their own git repositories. Lieutenants are also named for these sub-projects to identify a clear point of contact and leader for that area. The Lieutenant is also responsible for updating the core review team for the sub-project's repositories. +-------------------------+-----------------------------+-------------------+ | Area | Lieutenant | IRC nick | +=========================+=============================+===================+ | networking-bgpvpn / | Lajos Katona | lajoskatona | | networking-bagpipe +-----------------------------+-------------------+ | | Thomas Morin | tmorin | +-------------------------+-----------------------------+-------------------+ | neutron-dynamic-routing | Ryan Tidwell | tidwellr | +-------------------------+-----------------------------+-------------------+ | neutron-fwaas | Nate Johnston | njohnston | +-------------------------+-----------------------------+-------------------+ | neutron-vpnaas | YAMAMOTO Takashi | yamamoto | | +-----------------------------+-------------------+ | | Dongcan Ye | yedongcan | +-------------------------+-----------------------------+-------------------+ | networking-midonet | Ryu Ishimoto | ryu25 | | +-----------------------------+-------------------+ | | YAMAMOTO Takashi | yamamoto | +-------------------------+-----------------------------+-------------------+ | networking-odl | Lajos Katona | lajoskatona | +-------------------------+-----------------------------+-------------------+ | networking-ovn | Lucas Alvares Gomes | lucasagomes | +-------------------------+-----------------------------+-------------------+ | networking-sfc | Dharmendra Kushwaha | dkushwaha | +-------------------------+-----------------------------+-------------------+ Existing Core Reviewers ----------------------- Existing core reviewers have been reviewing code for a varying degree of cycles. With the new plan of Lieutenants and ownership, it's fair to try to understand how they fit into the new model. Existing core reviewers seem to mostly focus in particular areas and are cognizant of their own strengths and weaknesses. These members may not be experts in all areas, but know their limits, and will not exceed those limits when reviewing changes outside their area of expertise. The model is built on trust, and when that trust is broken, responsibilities will be taken away. Lieutenant Responsibilities --------------------------- In the hierarchy of Neutron responsibilities, Lieutenants are expected to partake in the following additional activities compared to other core reviewers: * Ensuring feature requests for their areas have adequate testing and documentation coverage. * Gate triage and resolution. Lieutenants are expected to work to keep the Neutron gate running smoothly by triaging issues, filing elastic recheck queries, and closing gate bugs. * Triaging bugs for the specific areas. Neutron Teams ============= Given all of the above, Neutron has a number of core reviewer teams with responsibility over the areas of code listed below: Neutron Core Reviewer Team -------------------------- `Neutron core reviewers `_ have merge rights to the following git repositories: * `openstack/neutron `_ * `openstack/python-neutronclient `_ Please note that as we adopt to the system above with core specialty in particular areas, we expect this broad core team to shrink as people naturally evolve into an area of specialization. Core Reviewer Teams for Plugins and Drivers ------------------------------------------- The plugin decomposition effort has led to having many drivers with code in separate repositories with their own core reviewer teams. For each one of these repositories in the following repository list, there is a core team associated with it: * `Neutron project team `_ These teams are also responsible for handling their own specs/RFEs/features if they choose to use them. However, by choosing to be a part of the Neutron project, they submit to oversight and veto by the Neutron PTL if any issues arise. .. _specs-core-reviewer-team: Neutron Specs Core Reviewer Team -------------------------------- Neutron `specs core reviewers `_ have +2 rights to the following git repositories: * `openstack/neutron-specs `_ The Neutron specs core reviewer team is responsible for reviewing specs targeted to all Neutron git repositories (Neutron + Advanced Services). It is worth noting that specs reviewers have the following attributes which are potentially different than code reviewers: * Broad understanding of cloud and networking technologies * Broad understanding of core OpenStack projects and technologies * An understanding of the effect approved specs have on the teams development capacity for each cycle Specs core reviewers may match core members of the above mentioned groups, but the group can be extended to other individuals, if required. .. _drivers_team: Drivers Team ------------ The `drivers team `_ is the group of people who have full rights to the specs repo. This team, which matches `Launchpad Neutron Drivers team `_, is instituted to ensure a consistent architectural vision for the Neutron project, and to continue to disaggregate and share the responsibilities of the Neutron PTL. The team is in charge of reviewing and commenting on :ref:`RFEs `, and working with specification contributors to provide guidance on the process that govern contributions to the Neutron project as a whole. The team `meets regularly `_ to go over RFE's and discuss the project roadmap. Anyone is welcome to join and/or read the meeting notes. Release Team ------------ The `release team `_ is a group of people with some additional gerrit permissions primarily aimed at allowing release management of Neutron sub-projects. These permissions include: * Ability to push signed tags to sub-projects whose releases are managed by the Neutron release team as opposed to the OpenStack release team. * Ability to push merge commits for Neutron or other sub-projects. * Ability to approve changes in all Neutron git repositories. This is required as the team needs to be able to quickly unblock things if needed, especially at release time. Code Merge Responsibilities =========================== While everyone is encouraged to review changes for these repositories, members of the Neutron core reviewer group have the ability to +2/-2 and +A changes to these repositories. This is an extra level of responsibility not to be taken lightly. Correctly merging code requires not only understanding the code itself, but also how the code affects things like documentation, testing, and interactions with other projects. It also means you pay attention to release milestones and understand if a patch you're merging is marked for the release, especially critical during the feature freeze. The bottom line here is merging code is a responsibility Neutron core reviewers have. Adding or Removing Core Reviewers --------------------------------- A new Neutron core reviewer may be proposed at anytime on the openstack-discuss mailing list. Typically, the Lieutenant for a given area will propose a new core reviewer for their specific area of coverage, though the Neutron PTL may propose new core reviewers as well. The proposal is typically made after discussions with existing core reviewers. Once a proposal has been made, three existing Neutron core reviewers from the Lieutenant's area of focus must respond to the email with a +1. If the member is being added by a Lieutenant from an area of focus with less than three members, a simple majority will be used to determine if the vote is successful. Another Neutron core reviewer from the same area of focus can vote -1 to veto the proposed new core reviewer. The PTL will mediate all disputes for core reviewer additions. The PTL may remove a Neutron core reviewer at any time. Typically when a member has decreased their involvement with the project through a drop in reviews and participation in general project development, the PTL will propose their removal and remove them. Please note there is no voting or vetoing of core reviewer removal. Members who have previously been a core reviewer may be fast-tracked back into a core reviewer role if their involvement picks back up and the existing core reviewers support their re-instatement. Neutron Core Reviewer Membership Expectations --------------------------------------------- Neutron core reviewers have the following expectations: * Reasonable attendance at the weekly Neutron IRC meetings. * Participation in Neutron discussions on the mailing list, as well as in-channel in #openstack-neutron. * Participation in Neutron related design summit sessions at the OpenStack Summits. Please note in-person attendance at design summits, mid-cycles, and other code sprints is not a requirement to be a Neutron core reviewer. The Neutron team will do its best to facilitate virtual attendance at all events. Travel is not to be taken lightly, and we realize the costs involved for those who partake in attending these events. In addition to the above, code reviews are the most important requirement of Neutron core reviewers. Neutron follows the documented OpenStack `code review guidelines `_. We encourage all people to review Neutron patches, but core reviewers are required to maintain a level of review numbers relatively close to other core reviewers. There are no hard statistics around code review numbers, but in general we use 30, 60, 90 and 180 day stats when examining review stats. * `30 day review stats `_ * `60 day review stats `_ * `90 day review stats `_ * `180 day review stats `_ There are soft-touch items around being a Neutron core reviewer as well. Gaining trust with the existing Neutron core reviewers is important. Being able to work together with the existing Neutron core reviewer team is critical as well. Being a Neutron core reviewer means spending a significant amount of time with the existing Neutron core reviewers team on IRC, the mailing list, at Summits, and in reviews. Ensuring you participate and engage here is critical to becoming and remaining a core reviewer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/release-checklist.rst0000644000175000017500000001412500000000000027515 0ustar00coreycorey00000000000000Pre-release check list ====================== This page lists things to cover before a Neutron release and will serve as a guide for next release managers. Server ------ Major release ~~~~~~~~~~~~~ A Major release is cut off once per development cycle and has an assigned name (Liberty, Mitaka, ...) Prior to major release, #. consider blocking all patches that are not targeted for the new release; #. consider blocking trivial patches to keep the gate clean; #. revise the current list of blueprints and bugs targeted for the release; roll over anything that does not fit there, or won't make it (note that no new features land in master after so called feature freeze is claimed by release team; there is a feature freeze exception (FFE) process described in release engineering documentation in more details: http://docs.openstack.org/project-team-guide/release-management.html); #. start collecting state for targeted features from the team. For example, propose a post-mortem patch for neutron-specs as in: https://review.opendev.org/#/c/286413/ #. revise deprecation warnings collected in latest Jenkins runs: some of them may indicate a problem that should be fixed prior to release (see deprecations.txt file in those log directories); also, check whether any Launchpad bugs with the 'deprecation' tag need a clean-up or a follow-up in the context of the release being planned; #. check that release notes and sample configuration files render correctly, arrange clean-up if needed; #. ensure all doc links are valid by running ``tox -e linkcheck`` and addressing any broken links. New major release process contains several phases: #. master branch is blocked for patches that are not targeted for the release; #. the whole team is expected to work on closing remaining pieces targeted for the release; #. once the team is ready to release the first release candidate (RC1), either PTL or one of release liaisons proposes a patch for openstack/releases repo. For example, see: https://review.opendev.org/#/c/292445/ #. once the openstack/releases patch lands, release team creates a new stable branch using hash values specified in the patch; #. at this point, master branch is open for patches targeted to the next release; PTL unblocks all patches that were blocked in step 1; #. if additional patches are identified that are critical for the release and must be shipped in the final major build, corresponding bugs are tagged with -rc-potential in Launchpad, fixes are prepared and land in master branch, and are then backported to the newly created stable branch; #. if patches landed in the release stable branch as per the previous step, a new release candidate that would include those patches should be requested by PTL in openstack/releases repo; #. eventually, the latest release candidate requested by PTL becomes the final major release of the project. Release candidate (RC) process allows for stabilization of the final release. The following technical steps should be taken before the final release is cut off: #. the latest alembic scripts are tagged with a milestone label. For example, see: https://review.opendev.org/#/c/288212/ In the new stable branch, you should make sure that: #. .gitreview file points to the new branch; #. if the branch uses constraints to manage gated dependency versions, the default constraints file name points to corresponding stable branch in openstack/requirements repo; #. if the branch fetches any other projects as dependencies, e.g. by using tox_install.sh as an install_command in tox.ini, git repository links point to corresponding stable branches of those dependency projects. Note that some of those steps may be covered by the OpenStack release team. In the opened master branch, you should: #. update CURRENT_RELEASE in neutron.db.migration.cli to point to the next release name. While preparing the next release and even in the middle of development, it's worth keeping the infrastructure clean. Consider using these tools to declutter the project infrastructure: #. declutter Gerrit:: /tools/abandon_old_reviews.sh #. declutter Launchpad:: /pre_expire_bugs.py neutron --day Minor release ~~~~~~~~~~~~~ A Minor release is created from an existing stable branch after the initial major release, and usually contains bug fixes and small improvements only. The minor release frequency should follow the release schedule for the current series. For example, assuming the current release is Rocky, stable branch releases should coincide with milestones R1, R2, R3 and the final release. Stable branches can be also released more frequently if needed, for example, if there is a major bug fix that has merged recently. The following steps should be taken before claiming a successful minor release: #. a patch for openstack/releases repo is proposed and merged. Minor version number should be bumped always in cases when new release contains a patch which introduces for example: #. new OVO version for an object, #. new configuration option added, #. requirement change, #. API visible change, The above list doesn't cover all possible cases. Those are only examples of fixes which require bump of minor version number but there can be also other types of changes requiring the same. Changes that require the minor version number to be bumped should always have a release note added. In other cases only patch number can be bumped. Client ------ Most tips from the Server section apply to client releases too. Several things to note though: #. when preparing for a major release, pay special attention to client bits that are targeted for the release. Global openstack/requirements freeze happens long before first RC release of server components. So if you plan to land server patches that depend on a new client, make sure you don't miss the requirements freeze. After the freeze is in action, there is no easy way to land more client patches for the planned target. All this may push an affected feature to the next development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/policies/thirdparty-ci.rst0000644000175000017500000001330400000000000026707 0ustar00coreycorey00000000000000Neutron Third-party CI ====================== What Is Expected of Third Party CI System for Neutron ----------------------------------------------------- As of the Liberty summit, Neutron no longer *requires* a third-party CI, but it is strongly encouraged, as internal neutron refactoring can break external plugins and drivers at any time. Neutron expects any Third Party CI system that interacts with gerrit to follow the requirements set by the Infrastructure team [1]_ as well as the Neutron Third Party CI guidelines below. Please ping the PTL in #openstack-neutron or send an email to the openstack-discuss ML (with subject [neutron]) with any questions. Be aware that the Infrastructure documentation as well as this document are living documents and undergo changes. Track changes to the infrastructure documentation using this url [2]_ (and please review the patches) and check this doc on a regular basis for updates. What Changes to Run Against --------------------------- If your code is a neutron plugin or driver, you should run against every neutron change submitted, except for docs, tests, tools, and top-level setup files. You can skip your CI runs for such exceptions by using ``skip-if`` and ``all-files-match-any`` directives in Zuul. You can see a programmatic example of the exceptions here [3]_. If your code is in a neutron-\*aas repo, you should run against the tests for that repo. You may also run against every neutron change, if your service driver is using neutron interfaces that are not provided by your service plugin (e.g. firewall/fwaas_plugin_v2.py). If you are using only plugin interfaces, it should be safe to test against only the service repo tests. What Tests To Run ----------------- Network API tests (git link). Network scenario tests (The test_network_* tests here). Any tests written specifically for your setup. http://opendev.org/openstack/tempest/tree/tempest/api/network Run with the test filter: 'network'. This will include all neutron specific tests as well as any other tests that are tagged as requiring networking. An example tempest setup for devstack-gate:: export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST_REGEX='(?!.*\[.*\bslow\b.*\])((network)|(neutron))' Third Party CI Voting --------------------- The Neutron team encourages you to NOT vote -1 with a third-party CI. False negatives are noisy to the community, and have given -1 from third-party CIs a bad reputation. Really bad, to the point of people ignoring them all. Failure messages are useful to those doing refactors, and provide you feedback on the state of your plugin. If you insist on voting, by default, the infra team will not allow voting by new 3rd party CI systems. The way to get your 3rd party CI system to vote is to talk with the Neutron PTL, who will let infra know the system is ready to vote. The requirements for a new system to be given voting rights are as follows: * A new system must be up and running for a month, with a track record of voting on the sandbox system. * A new system must correctly run and pass tests on patches for the third party driver/plugin for a month. * A new system must have a logfile setup and retention setup similar to the below. Once the system has been running for a month, the owner of the third party CI system can contact the Neutron PTL to have a conversation about getting voting rights upstream. The general process to get these voting rights is outlined here. Please follow that, taking note of the guidelines Neutron also places on voting for it's CI systems. A third party system can have it's voting rights removed as well. If the system becomes unstable (stops running, voting, or start providing inaccurate results), the Neutron PTL or any core reviewer will make an attempt to contact the owner and copy the openstack-discuss mailing list. If no response is received within 2 days, the Neutron PTL will remove voting rights for the third party CI system. If a response is received, the owner will work to correct the issue. If the issue cannot be addressed in a reasonable amount of time, the voting rights will be temporarily removed. Log & Test Results Filesystem Layout ------------------------------------ Third-Party CI systems MUST provide logs and configuration data to help developers troubleshoot test failures. A third-party CI that DOES NOT post logs should be a candidate for removal, and new CI systems MUST post logs before they can be awarded voting privileges. Third party CI systems should follow the filesystem layout convention of the OpenStack CI system. Please store your logs as viewable in a web browser, in a directory structure. Requiring the user to download a giant tarball is not acceptable, and will be reason to not allow your system to vote from the start, or cancel it's voting rights if this changes while the system is running. At the root of the results - there should be the following: * console.html.gz - contains the output of stdout of the test run * local.conf / localrc - contains the setup used for this run * logs - contains the output of detail test log of the test run The above "logs" must be a directory, which contains the following: * Log files for each screen session that DevStack creates and launches an OpenStack component in * Test result files * testr_results.html.gz * tempest.txt.gz List of existing plugins and drivers ------------------------------------ https://wiki.openstack.org/wiki/Neutron_Plugins_and_Drivers#Existing_Plugin_and_Drivers References ---------- .. [1] http://ci.openstack.org/third_party.html .. [2] https://review.opendev.org/#/q/status:open+project:openstack-infra/system-config+branch:master+topic:third-party,n,z .. [3] https://github.com/openstack-infra/project-config/blob/master/dev/zuul/layout.yaml ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1550434 neutron-16.0.0.0b2.dev214/doc/source/contributor/stadium/0000755000175000017500000000000000000000000023230 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/stadium/governance.rst0000644000175000017500000003535100000000000026120 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Stadium Governance ================== Background ---------- Neutron grew to become a big monolithic codebase, and its core team had a tough time making progress on a number of fronts, like adding new features, ensuring stability, etc. During the Kilo timeframe, a decomposition effort started, where the codebase got disaggregated into separate repos, like the `high level services `_, and the various third-party solutions for `L2 and L3 services `_, and the Stadium was officially born. These initiatives enabled the various individual teams in charge of the smaller projects the opportunity to iterate faster and reduce the time to feature. This has been due to the increased autonomy and implicit trust model that made the lack of oversight of the PTL and the Neutron drivers/core team acceptable for a small number of initiatives. When the proposed `arrangement `_ allowed projects to be `automatically `_ enlisted as a Neutron project based simply on description, and desire for affiliation, the number of projects included in the Stadium started to grow rapidly, which created a number of challenges for the PTL and the drivers team. In fact, it became harder and harder to ensure consistency in the APIs, architecture, design, implementation and testing of the overarching project; all aspects of software development, like documentation, integration, release management, maintenance, and upgrades started to being neglected for some projects and that led to some unhappy experiences. The point about uniform APIs is particularly important, because the Neutron platform is so flexible that a project can take a totally different turn in the way it exposes functionality, that it is virtually impossible for the PTL and the drivers team to ensure that good API design principles are being followed over time. In a situation where each project is on its own, that might be acceptable, but allowing independent API evolution while still under the Neutron umbrella is counterproductive. These challenges led the Neutron team to find a better balance between autonomy and consistency and lay down criteria that more clearly identify when a project can be eligible for inclusion in the `Neutron governance `_. This document describes these criteria, and document the steps involved to maintain the integrity of the Stadium, and how to ensure this integrity be maintained over time when modifications to the governance are required. When is a project considered part of the Stadium? ------------------------------------------------- In order to be considered part of the Stadium, a project must show a track record of alignment with the Neutron `core project `_. This means showing proof of adoption of practices as led by the Neutron core team. Some of these practices are typically already followed by the most mature OpenStack projects: * Exhaustive documentation: it is expected that each project will have a :doc:`developer `, :doc:`user/operator ` and `API `_ documentations available. * Exhaustive OpenStack CI coverage: unit, functional, and tempest coverage using OpenStack CI (upstream) resources so that `Grafana `_ and `OpenStack Health `_ support is available. Access to CI resources and historical data by the team is key to ensuring stability and robustness of a project. In particular, it is of paramount importance to ensure that DB models/migrations are tested functionally to prevent data inconsistency issues or unexpected DB logic errors due to schema/models mismatch. For more details, please look at the following resources: * https://review.opendev.org/#/c/346091/ * https://review.opendev.org/#/c/346272/ * https://review.opendev.org/#/c/346083/ More Database related information can be found on: * :doc:`/contributor/alembic_migrations` * :doc:`/contributor/internals/db_layer` Bear in mind that many projects have been transitioning their codebase and tests to fully support Python 3+, and it is important that each Stadium project supports Python 3+ the same way Neutron core does. For more information on how to do testing, please refer to the :doc:`Neutron testing documentation `. * Good release footprint, according to the chosen `release model `_. * Adherence to deprecation and `stable backports policies `_. * Demonstrated ability to do `upgrades `_ and/or `rolling upgrades `_, where applicable. This means having grenade support on top of the CI coverage as described above. * Client bindings and CLI developed according to the OpenStack Client `plugin model `_. On top of the above mentioned criteria, the following also are taken into consideration: * A project must use, adopt and implement open software and technologies. * A project must integrate with Neutron via one of the supported, advertised and maintained public Python APIs. REST API does not qualify (the project python-neutronclient is an exception). * It adopts neutron-lib (with related hacking rules applied), and has proof of good decoupling from Neutron core internals. * It provides an API that adopts API guidelines as set by the Neutron core team, and that relies on an open implementation. * It adopts modular interfaces to provide networking services: this means that L2/7 services are provided in the form of ML2 mech drivers and service plugins respectively. A service plugin can expose a driver interface to support multiple backend technologies, and/or adopt the flavor framework as necessary. .. _add-remove-projects-to-stadium: Adding or removing projects to the Stadium ------------------------------------------ When a project is to be considered part of the Stadium, proof of compliance to the aforementioned practices will have to be demonstrated typically for at least two OpenStack releases. Application for inclusion is to be considered only within the first milestone of each OpenStack cycle, which is the time when the PTL and Neutron team do release planning, and have the most time available to discuss governance issues. Projects part of the Neutron Stadium have typically the first milestone to get their house in order, during which time reassessment happens; if removed, because of substantial lack of meeting the criteria, a project cannot reapply within the same release cycle it has been evicted. The process for proposing a repo into openstack/ and under the Neutron governance is to propose a patch to the openstack/governance repository. For example, to propose networking-foo, one would add the following entry under Neutron in reference/projects.yaml:: - repo: openstack/networking-foo tags: - name: release:independent Typically this is a patch that the PTL, in collaboration with the project's point of contact, will shepherd through the review process. This step is undertaken once it is clear that all criteria are met. The next section provides an informal checklist that shows what steps a project needs to go through in order to enable the PTL and the TC to vote positively on the proposed inclusion. Once a project is included, it abides by the Neutron :doc:`RFE submission process `, where specifications to neutron-specs are required for major API as well as major architectural changes that may require core Neutron platform enhancements. Checklist --------- * How to integrate documentation into docs.o.o: The documentation website has a section for `project developer documentation `_. Each project in the Neutron Stadium must have an entry under the 'Networking Sub Projects' section that points to the developer documentation for the project, available at ``https://docs.openstack.org//latest/``. This is a two step process that involves the following: * Build the artefacts: this can be done by following example https://review.opendev.org/#/c/293399/. * Publish the artefacts: this can be done by following example https://review.opendev.org/#/c/216448/. More information can also be found on the `project creator guide `_. * How to integrate into Grafana: Grafana is a great tool that provides the ability to display historical series, like failure rates of OpenStack CI jobs. A few examples that added dashboards over time are: * `Neutron `_. * `Networking-OVN `_. * `Networking-Midonet `_. Any subproject must have a Grafana dashboard that shows failure rates for at least Gate and Check queues. * How to integrate into neutron-lib's CI: there are a number of steps required to integrate with neutron-lib CI and adopt neutron-lib in general. One step is to validate that neutron-lib master is working with the master of a given project that uses neutron-lib. For example `patch `_ introduced such support for the Neutron project. Any subproject that wants to do the same would need to adopt the following few lines: #. https://review.opendev.org/#/c/338603/4/jenkins/jobs/projects.yaml@4685 #. https://review.opendev.org/#/c/338603/3/zuul/layout.yaml@8501 #. https://review.opendev.org/#/c/338603/4/grafana/neutron.yaml@39 Line 1 and 2 respectively add a job to the periodic queue for the project, whereas line 3 introduced the failure rate trend for the periodic job to spot failure spikes etc. Make sure your project has the following: #. https://review.opendev.org/#/c/357086/ #. https://review.opendev.org/#/c/359143/ * How to port api-ref over to neutron-lib: to publish the subproject API reference into the `Networking API guide `_ you must contribute the API documentation into neutron-lib's api-ref directory as done in the `WADL/REST transition patch `_. Once this is done successfully, a link to the subproject API will show under the published `table of content `_. An RFE bug tracking this effort effectively initiates the request for Stadium inclusion, where all the aspects as outlined in this documented are reviewed by the PTL. * How to port API definitions over the neutron-lib: the most basic steps to port API definitions over to neutron-lib are demonstrated in the following patches: * https://review.opendev.org/#/c/353131/ * https://review.opendev.org/#/c/353132/ The `neutron-lib patch `_ introduces the elements that define the API, and testing coverage validates that the resource and actions maps use valid keywords. API reference documentation is provided alongside the definition to keep everything in one place. The `neutron patch `_ uses the Neutron extension framework to plug the API definition on top of the Neutron API backbone. The change can only merge when there is a released version of neutron-lib. * How to integrate into the openstack release: every project in the Stadium must have release notes. In order to set up release notes, please see the patches below for an example on how to set up reno: * https://review.opendev.org/#/c/320904/ * https://review.opendev.org/#/c/243085/ For release documentation related to Neutron, please check the :doc:`/contributor/policies/index`. Once, everything is set up and your project is released, make sure you see an entry on the release page (e.g. `Pike `_. Make sure you release according to the project declared release `model `_. * How to port OpenStack Client over to python-neutronclient: client API bindings and client command line interface support must be developed in python-neutronclient under `osc module `_. If your project requires one or both, consider looking at the following example on how to contribute these two python-neutronclient according to the OSC framework and guidelines: * https://review.opendev.org/#/c/340624/ * https://review.opendev.org/#/c/340763/ * https://review.opendev.org/#/c/352653/ More information on how to develop python-openstackclient plugins can be found on the following links: * https://docs.openstack.org/python-openstackclient/latest/contributor/plugins.html * https://docs.openstack.org/python-openstackclient/latest/contributor/humaninterfaceguide.html It is worth prefixing the commands being added with the keyword `network `_ to avoid potential clash with other commands with similar names. This is only required if the command object name is highly likely to have an ambiguous meaning. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/stadium/guidelines.rst0000644000175000017500000002254300000000000026120 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Sub-Project Guidelines ====================== This document provides guidance for those who maintain projects that consume main neutron or neutron advanced services repositories as a dependency. It is not meant to describe projects that are not tightly coupled with Neutron code. Code Reuse ---------- At all times, avoid using any Neutron symbols that are explicitly marked as private (those have an underscore at the start of their names). Try to avoid copy pasting the code from Neutron to extend it. Instead, rely on enormous number of different plugin entry points provided by Neutron (L2 agent extensions, API extensions, service plugins, core plugins, ML2 mechanism drivers, etc.) Requirements ------------ Neutron dependency ~~~~~~~~~~~~~~~~~~ Subprojects usually depend on neutron repositories, by using -e https://... schema to define such a dependency. The dependency *must not* be present in requirements lists though, and instead belongs to tox.ini deps section. This is because next pbr library releases do not guarantee -e https://... dependencies will work. You may still put some versioned neutron dependency in your requirements list to indicate the dependency for anyone who packages your subproject. Explicit dependencies ~~~~~~~~~~~~~~~~~~~~~ Each neutron project maintains its own lists of requirements. Subprojects that depend on neutron while directly using some of those libraries that neutron maintains as its dependencies must not rely on the fact that neutron will pull the needed dependencies for them. Direct library usage requires that this library is mentioned in requirements lists of the subproject. The reason to duplicate those dependencies is that neutron team does not stick to any backwards compatibility strategy in regards to requirements lists, and is free to drop any of those dependencies at any time, breaking anyone who could rely on those libraries to be pulled by neutron itself. Automated requirements updates ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ At all times, subprojects that use neutron as a dependency should make sure their dependencies do not conflict with neutron's ones. Core neutron projects maintain their requirements lists by utilizing a so-called proposal bot. To keep your subproject in sync with neutron, it is highly recommended that you register your project in openstack/requirements:projects.txt file to enable the bot to update requirements for you. Once a subproject opts in global requirements synchronization, it should enable check-requirements jobs in project-config. For example, see `this patch `_. Stable branches --------------- Stable branches for subprojects should be created at the same time when corresponding neutron stable branches are created. This is to avoid situations when a postponed cut-off results in a stable branch that contains some patches that belong to the next release. This would require reverting patches, and this is something you should avoid. Make sure your neutron dependency uses corresponding stable branch for neutron, not master. Note that to keep requirements in sync with core neutron repositories in stable branches, you should make sure that your project is registered in openstack/requirements:projects.txt *for the branch in question*. Subproject stable branches are supervised by horizontal `neutron-stable-maint team `_. More info on stable branch process can be found on `the following page `_. Stable merge requirements ------------------------- Merges into stable branches are handled by members of the `neutron-stable-maint gerrit group `_. The reason for this is to ensure consistency among stable branches, and compliance with policies for stable backports. For sub-projects who participate in the Neutron Stadium effort and who also create and utilize stable branches, there is an expectation around what is allowed to be merged in these stable branches. The Stadium projects should be following the stable branch policies as defined by on the `Stable Branch wiki `_. This means that, among other things, no features are allowed to be backported into stable branches. .. _guideline-releases: Releases -------- It is suggested that sub-projects cut off new releases from time to time, especially for stable branches. It will make the life of packagers and other consumers of your code easier. Sub-Project Release Process ~~~~~~~~~~~~~~~~~~~~~~~~~~~ All subproject releases are managed by `global OpenStack Release Managers team `_. The `neutron-release team `_ handles only the following operations: * Make stable branches end of life To release a sub-project, follow the following steps: * For projects which have not moved to post-versioning, we need to push an alpha tag to avoid pbr complaining. A member of the neutron-release group will handle this. * A sub-project owner should modify setup.cfg to remove the version (if you have one), which moves your project to post-versioning, similar to all the other Neutron projects. You can skip this step if you don't have a version in setup.cfg. * A sub-project owner `proposes `_ a patch to openstack/releases repository with the intended git hash. `The Neutron release liaison `_ should be added in Gerrit to the list of reviewers for the patch. .. note:: New major tag versions should conform to `SemVer `_ requirements, meaning no year numbers should be used as a major version. The switch to SemVer is advised at earliest convenience for all new major releases. .. note:: Before Ocata, when releasing the very first release in a stable series, a sub-project owner would need to request a new stable branch creation during Gerrit review, but not anymore. `See the following email for more details `_. * The Neutron release liaison votes with +1 for the openstack/releases patch. * The releases will now be on PyPI. A sub-project owner should verify this by going to an URL similar to `this `_. * A sub-project owner should next go to Launchpad and release this version using the "Release Now" button for the release itself. * If a sub-project uses the "delay-release" option, a sub-project owner should update any bugs that were fixed with this release to "Fix Released" in Launchpad. This step is not necessary if the sub-project uses the "direct-release" option, which is the default. [#jeepyb_release_options]_ * The new release will be available on `OpenStack Releases `_. * A sub-project owner should add the next milestone to the Launchpad series, or if a new series is required, create the new series and a new milestone. .. note:: You need to be careful when picking a git commit to base new releases on. In most cases, you'll want to tag the *merge* commit that merges your last commit in to the branch. `This bug`__ shows an instance where this mistake was caught. Notice the difference between the `incorrect commit`__ and the `correct one`__ which is the merge commit. ``git log 6191994..22dd683 --oneline`` shows that the first one misses a handful of important commits that the second one catches. This is the nature of merging to master. .. __: https://bugs.launchpad.net/neutron/+bug/1540633 .. __: https://github.com/openstack/networking-infoblox/commit/6191994515 .. __: https://github.com/openstack/networking-infoblox/commit/22dd683e1a To make a branch end of life, follow the following steps: * A member of neutron-release will abandon all open change reviews on the branch. * A member of neutron-release will push an EOL tag on the branch. (eg. "icehouse-eol") * A sub-project owner should request the infrastructure team to delete the branch by sending an email to the infrastructure mailing list, not by bothering the infrastructure team on IRC. * A sub-project owner should tweak jenkins jobs in project-config if any. References ~~~~~~~~~~ .. [#jeepyb_release_options] http://lists.openstack.org/pipermail/openstack-dev/2015-December/081724.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/stadium/index.rst0000644000175000017500000000342400000000000025074 0ustar00coreycorey00000000000000.. _neutron_stadium: .. Copyright 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Neutron Stadium ================ This section contains information on policies and procedures for the so called Neutron Stadium. The Neutron Stadium is the list of projects that show up in the OpenStack `Governance Document `_. The list includes projects that the Neutron PTL and core team are directly involved in, and manage on a day to day basis. To do so, the PTL and team ensure that common practices and guidelines are followed throughout the Stadium, for all aspects that pertain software development, from inception, to coding, testing, documentation and more. The Stadium is not to be intended as a VIP club for OpenStack networking projects, or an upper tier within OpenStack. It is simply the list of projects the Neutron team and PTL claim responsibility for when producing Neutron deliverables throughout the release `cycles `_. For more details on the Stadium, and what it takes for a project to be considered an integral part of the Stadium, please read on. .. toctree:: :maxdepth: 3 governance guidelines ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1550434 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/0000755000175000017500000000000000000000000023237 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/ci_scenario_jobs.rst0000644000175000017500000003375600000000000027302 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron jobs running in Zuul CI =============================== Tempest jobs running in Neutron CI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In upstream Neutron CI there are various tempest and neutron-tempest-plugin jobs running. Each of those jobs runs on slightly different configuration of Neutron services. Below is a summary of those jobs. :: +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ | Job name | Run tests | python | nodes | L2 agent | firewall | L3 agent | L3 HA | L3 DVR | enable_dvr | Run in gate | | | | version | | | driver | mode | | | | queue | +==============================================+==================================+=========+=======+=============+=================+==========+=======+========+============+=============+ |neutron-tempest-plugin-api |neutron_tempest_plugin.api | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | True | Yes | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-plugin-designate-scenario |neutron_tempest_plugin.scenario.\ | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | True | No | | |test_dns_integration | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-plugin-dvr-multinode-scenario |neutron_tempest_plugin.scenario | 3.6 | 2 | openvswitch | openvswitch | dvr_snat | False | True | True | No | |(non-voting) | | | | | | dvr_snat | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-plugin-scenario-linuxbridge |neutron_tempest_plugin.scenario | 3.6 | 1 | linuxbridge | iptables | legacy | False | False | False | Yes | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-plugin-scenario-openvswitch |neutron_tempest_plugin.scenario | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | False | Yes | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-plugin-scenario-openvswitch-\ |neutron_tempest_plugin.scenario | 3.6 | 1 | openvswitch | iptables_hybrid | legacy | False | False | False | Yes | | iptables_hybrid | | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |tempest-integrated-networking |tempest.api (without slow tests) | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | True | Yes | | |tempest.scenario | | | | | | | | | | | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-linuxbridge |tempest.api (without slow tests) | 3.6 | 1 | linuxbridge | iptables | legacy | False | False | True | Yes | | |tempest.scenario | | | | | | | | | | | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |tempest-multinode-full-py3 |tempest.api (without slow tests) | 3.6 | 2 | openvswitch | openvswitch | legacy | False | False | True | No | |(non-voting) |tempest.scenario | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-dvr-ha-multinode-full |tempest.api (without slow tests) | 3.6 | 3 | openvswitch | openvswitch | dvr | True | True | True | No | |(non-voting) |tempest.scenario | | | | | dvr_snat | | | | | | | | | | | | dvr_snat | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-iptables_hybrid |tempest.api (without slow tests) | 3.6 | 1 | openvswitch | iptables_hybrid | legacy | False | False | True | Yes | | |tempest.scenario | | | | | | | | | | | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |tempest-slow-py3 |tempest slow tests | 3.6 | 2 | openvswitch | openvswitch | legacy | False | False | True | Yes | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-tempest-with-uwsgi |tempest.api (without slow tests) | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | True | No | |(non-voting) |tempest.scenario | | | | | | | | | | | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |tempest-ipv6-only |tempest smoke + IPv6 tests | 3.6 | 1 | openvswitch | openvswitch | legacy | False | False | True | Yes | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-ovn-tempest-ovs-release |Various tempest api, scenario | 3.6 | 1 | ovn | ovn | --- | False | False | True | Yes | | |and neutron_tempest_plugi tests | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-ovn-tempest-slow |tempest slow tests | 3.6 | 2 | ovn | ovn | --- | False | False | True | No | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-ovn-tempest-multinode-ovs-master |Various tempest api, scenario | 3.6 | 2 | ovn | ovn | --- | False | False | True | No | | |and neutron_tempest_plugi tests | | | | | | | | | | +----------------------------------------------+----------------------------------+---------+-------+-------------+-----------------+----------+-------+--------+------------+-------------+ Grenade jobs running in Neutron CI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In upstream Neutron CI there are various Grenade jobs running. Each of those jobs runs on slightly different configuration of Neutron services. Below is summary of those jobs. :: +--------------------------------+---------+-------+-------------+-------------+----------+-------+--------+------------+-------------+ | Job name | python | nodes | L2 agent | firewall | L3 agent | L3 HA | L3 DVR | enable_dvr | Run in gate | | | version | | | driver | mode | | | | queue | +================================+=========+=======+=============+=============+==========+=======+========+============+=============+ | neutron-grenade-multinode | 3.6 | 2 | openvswitch | openvswitch | legacy | False | False | True | Yes | +--------------------------------+---------+-------+-------------+-------------+----------+-------+--------+------------+-------------+ | neutron-grenade-dvr-multinode | 3.6 | 2 | openvswitch | openvswitch | dvr | False | False | True | Yes | | | | | | | dvr_snat | | | | | +--------------------------------+---------+-------+-------------+-------------+----------+-------+--------+------------+-------------+ Columns description * L2 agent - agent used on nodes in test job, * firewall driver - driver configured in L2 agent's config, * L3 agent mode - mode(s) configured for L3 agent(s) on test nodes, * L3 HA - value of ``l3_ha`` option set in ``neutron.conf``, * L3 DVR - value of ``router_distributed`` option set in ``neutron.conf``, * enable_dvr - value of ``enable_dvr`` option set in ``neutron.conf`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/coverage.rst0000644000175000017500000001506400000000000025572 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Test Coverage ============= The intention is to track merged features or areas of code that lack certain types of tests. This document may be used both by developers that want to contribute tests, and operators that are considering adopting a feature. Coverage -------- Note that while both API and scenario tests target a deployed OpenStack cloud, API tests are under the Neutron tree and scenario tests are under the Tempest tree. It is the expectation that API changes involve API tests, agent features or modifications involve functional tests, and Neutron-wide features involve fullstack or scenario tests as appropriate. The table references tests that explicitly target a feature, and not a job that is configured to run against a specific backend (Thereby testing it implicitly). So, for example, while the Linux bridge agent has a job that runs the API and scenario tests with the Linux bridge agent configured, it does not have functional tests that target the agent explicitly. The 'gate' column is about running API/scenario tests with Neutron configured in a certain way, such as what L2 agent to use or what type of routers to create. * V - Merged * Blank - Not applicable * X - Absent or lacking * Patch number - Currently in review * A name - That person has committed to work on an item * Implicit - The code is executed, yet no assertions are made +------------------------+------------+------------+------------+------------+------------+------------+ | Area | Unit | Functional | API | Fullstack | Scenario | Gate | +========================+============+============+============+============+============+============+ | DVR | V | L3-V OVS-X | V | X | X | V | +------------------------+------------+------------+------------+------------+------------+------------+ | L3 HA | V | V | X | 286087 | X | X | +------------------------+------------+------------+------------+------------+------------+------------+ | L2pop | V | X | | Implicit | | | +------------------------+------------+------------+------------+------------+------------+------------+ | DHCP HA | V | | | amuller | | | +------------------------+------------+------------+------------+------------+------------+------------+ | OVS ARP responder | V | X* | | Implicit | | | +------------------------+------------+------------+------------+------------+------------+------------+ | OVS agent | V | V | | V | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | Linux Bridge agent | V | X | | V | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | Metering | V | X | V | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | DHCP agent | V | V | | amuller | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | rpc_workers | | | | | | X | +------------------------+------------+------------+------------+------------+------------+------------+ | Reference ipam driver | V | | | | | X | +------------------------+------------+------------+------------+------------+------------+------------+ | MTU advertisement | V | | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | VLAN transparency | V | | X | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ | Prefix delegation | V | X | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ * Prefix delegation doesn't have functional tests for the dibbler and pd layers, nor for the L3 agent changes. This has been an area of repeated regressions. * The functional job now compiles OVS 2.5 from source, enabling testing features that we previously could not. Missing Infrastructure ---------------------- The following section details missing test *types*. If you want to pick up an action item, please contact amuller for more context and guidance. * The Neutron team would like Rally to persist results over a window of time, graph and visualize this data, so that reviewers could compare average runs against a proposed patch. * It's possible to test RPC methods via the unit tests infrastructure. This was proposed in patch 162811. The goal is provide developers a light weight way to rapidly run tests that target the RPC layer, so that a patch that modifies an RPC method's signature could be verified quickly and locally. * Neutron currently runs a 'partial-grenade' job that verifies that an OVS version from the latest stable release works with neutron-server from master. We would like to expand this to DHCP and L3 agents as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/db_transient_failure_injection.rst0000644000175000017500000000307500000000000032223 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Transient DB Failure Injection ============================== Neutron has a service plugin to inject random delays and Deadlock exceptions into normal Neutron operations. The service plugin is called 'Loki' and is located under neutron.services.loki.loki_plugin. To enable the plugin, just add 'loki' to the list of service_plugins in your neutron-server neutron.conf file. The plugin will inject a Deadlock exception on database flushes with a 1/50 probability and a delay of 1 second with a 1/200 probability when SQLAlchemy objects are loaded into the persistent state from the DB. The goal is to ensure the code is tolerant of these transient delays/failures that will be experienced in busy production (and Galera) systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/fullstack.rst0000644000175000017500000000253500000000000025766 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Full Stack Testing ================== Goals ----- * Stabilize the job: - Fix L3 HA failure - Look in to non-deterministic failures when adding a large amount of tests (Possibly bug 1486199). - Switch to kill signal 15 to terminate agents (Bug 1487548). * Convert the L3 HA failover functional test to a full stack test * Write DVR tests * Write additional L3 HA tests * Write a test that validates DVR + L3 HA integration after https://bugs.launchpad.net/neutron/+bug/1365473 is fixed. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1550434 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/images/0000755000175000017500000000000000000000000024504 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/images/fullstack_multinode_simulation.png0000644000175000017500000012124000000000000033526 0ustar00coreycorey00000000000000PNG  IHDRsRGB@IDATx \TU?yQ EuR)2-v VsV2$mc-S6VLeˬ4)22M-R|@Rϝ{<={w$םS-k% $P/{]}=S=I .'p9IH4k׺{a("i|? =*~  x/*{(9 =*~  x/*{(9 =*~  8@aN&aҴ(r\jk.kdO$@$@$@N#Pt"귽 N_Q.DNN w_aNW9k'   eӑ'*ÃBmYY# % ǖ5 UecZ83DHHH "a@hк#"LPPVVh B:]QV* +$$eQzBMʋ0ftԾԆ$EXXh ?r4 !Z%V]:p'KP~m]:GIjYG KpU\E;DsT{.p5";q@$@$@$@I^)MFvF GA+ }&*9(WQmςFxl%?SѫC*EC_k+؇: dp_3S2uMhbcAxX牨yk5a֚|]n_D^Vi3X*lvĉ`ҰhWEU (Cv0ˊԃHs ʴ OS6D9+eEH^"ջ~ԟ׾\l0}A$;Y Y)1cJ)loiT0+?2v\2ZlHHH !yvV再'/Ć\z&Y2֊~dcc ;af >J)8|֭ێs"wc?YWg{$@$@$@LMZ/ ^[ saWV60 ))Z3Y,_O̒K$ A`[mvu)5LkZUo"m|뇨Vc(D$ݰJ(~.8K=g& \l~IP76:̺<%  8yHK7g:JȽk%IBxyOyR6%{YUVui_D3m-L^4=nDĘ~m6no7]={ï#JBEEGyJa$@$@$@$nU e^p$UT"!ɳ@*_AޅBΒa@,<g[;!<.q[cTUxě׬sptDۮCj+MjIu?v)HS+qX8-Zv(U.w*&HOT;7J}xR?9־Rg憇nP+[pL2H٨Ӓ<'Peؼm^b0\ I>__f dw@p'$/zȪ||ep@ jǍHj Pqj'f!5U}ݕ"NKio9.; ]yx @XY;+PKcT$&%!))'˔Kp("5_+Jn]WqP_ 5u+"MX#7I|@P'͹Fu0z+k׮l{&4 h0qN>c&b}z902Mo{5qZ8d \mյӔFSG /F9_a{A6+80:s]| fET0xU]̬K0?F6LaÆ` !IjE&kqyiڵ ƢYHJ"5\8¥ ;Ei+y &MReG5ZT0"+kؿZM}}l  3t ͚5CH07s"M2v&zL$@.&P_˼o3Z1`m10m"]b|c+Ztl4{jٲR1C_^8u`8zWBF-"f6q7SubEINdI WU!, FZ;q~\c/{EF~SoD=3Q}رu*ͮ4nc*PR\ 60)"lNf <~yȼiUOؾ}7~ V\p+QGs+b(U(EX0D7&q=/ɇ\Bm/EHS DaW?A6FX!&٫^(gT᧙m)b/W65#u;i Q6oK{پV [YRhH6hޅ#66rbu,}(mW'4ԔU.Y!pGTsD3ԸF ]r2l]͗Ci^3IMfaylL׶-81ie^Uo : ?L3sr 1_ +ӥ/rݿz#S sׁׁ,O{kÜW05^NsIuf|3'ï`[Sa꯼,_ps[Zx=`CXňa}usVyk16x=gjK Y&Jv`đXT-4Ӱapt7E&YPK=0qKgJ qP8/SAMѾ#:D4Ul\Ų3Frw^x5Hxtz6[QXR.r,\ eh ~jk&i|$WETfrWcɣoY%SF"ۥ(2YZ_bQ`hTWȜY5f**ӃC{)=JqWZUyHMEI.%XR|5ɆIElNl8.?b3.R)e7Ub` 7T씻T{5`-?jk}+$7Jn:_r5|'o ~CAPLY3L8˔.lZ6)f8 Ev̴[rbэ=cjD'z %C$@$`]Hy?zhvmΆØ:P@4;8i6nA<Η,ƖDU둔im̼ X4i&_ҿ:{)-G|"ĥdკe+j*o#es5 ."۷eaby 6yp M~aDAMgS$šōqgΜѾaLbľRԉՅPZ\B+}\_1԰[d,D8 l?*4Cg0E*Be /0aapeF Tf$|ꝳ$Þň6V*׈dɆz5s0&#ƪfV8~  2C¬/4JLάUU OL%iC/S1yN$Wfg@g6HMIk[ڽu-yJwMGޮ6bx1,Ȝ2Ӗ`G~! #{ Zow;m ))"#c9ZմT}XUt&vHB7GNROb*>݊}V.Mr4/ }܋˳yVlL0ry- ᵪJx-%ΐa׵s bd0dB?U ؠMWNZb篖=0]e%a\^w%IG$/KOPa1X+?OVI5z$k~c[P5TaϜeKFʰFI<;eПT/,:hQVؙLRv?bQ]Y{Q%g19z/Glj<9> by(ܮkK[rU!m\-F5:Y\{)wUX OIAޅB閥5tr_e"/SICHh2i$~b2%$ a!:_*ByPIjmI?x-z2lUG+.:v&瑿;(Bˠ&M0DBZ\@0Dzi--܇}K2 Mjk7S?̬X\B ,:_ނŁIL[E&4Ӭua^v9j#k#Ѽa u slj3ݚ(VABadVӏpx],4kKak[VFJ]?ad8=HO1gj~I͹D`|V!gMZTٳT5.Mch$+y 8=Uue7 2$v+ iᩕطc9ZFБ#1r8U`w\4Gbxl>=l-,3GYV= EBb"+))}DӖXPxrĶBH71DӖl5YQB`I1I,%}nS 1)]B %4 뺅脹nX:#!PZCgp5(H>4GGYun{ݏsЩKgtno㗺|bDf"CASBL=ake%س'@0Zu݄g}cg/K#D[14sCR%*TKXN9kH2߈zf"zsNek0_6udmP|fͤ:{&$ZCn: RBjpsq49b UmSq{;1P&C̘E(\eXA&~!ƛ|[NU0I<3zcFOԂu I6)ujI'^U#$< ū6Q&)""^QXD4GW[WM5O-lgb`7<*]`q]m?_bð(‚}lNZ-7|9*Iڀ/cqxF!73E'1qpMsTdmVŸk1~:Sg+-x&!F |yvJ%PuDVA$@O lm3p.!>Usb!Bxe蚠&x=%$= >hdVSQ({SL@3Jܵ.5yHxb.rvhjiĺ_~/g~ ݛMݟ?  rqDW_ޞ7vRQ3'"~@dKAJ4L=j1kk*b ٙL$@$´)k-|OPv,ڞbEf-3I'4;lx>:I;e'#8^6ۈYt$thAlX *Nb>f t!mktiyOo03ݵ"~LehY~XQqպ6$ \Q] f`:GP&6"V P2up#7UdB WdEѪXϡ}:o zɺBEC^R.TUi-u^/܂Ļ|Te0@d$@$@$@I sK'sfe"_M 䞔VfasIHNl1!,BaFtU*Jw|ػkݭgCzbQa{O3 ՅM!# _GJ5@Y3*u%$@$@$@K Ds Pb_5Zg6'cFgL5sp< o#ə˷B0L|~nh.φ`D.#|GBH~$WƲ}AF>IMRvw|I^,b{`۱TSiY(.6b޷P쎖K{: dc86e"6abZp,hEHHgp0a3Ŏ_Yb/6+ 1wX5œ ({6#!JUh'Y]qBKe9/#6+?eXURI Nv$@B?wX-݁#Un$zZ1v1lر PƁ:"<|NSR+dd[Y@EFB^Ԋ尪L KY@5'k"+J^dσdkzL^A7Yݶa9WM d #E]CqxWVdZ1Wgfΐ5X&_̖g, Zn)+-E(B }GthjPMŭ:\Z` =Ţ0i?(@iv mc`aiCaaPRt]& Ъm3גfn(bK|qNzY5qkDv@u!#*bRCN0 x *V/ gϵfOIHHH' f'/+;E$@$@$@Aʬ\gHHH|Y *qK "|8qpU*$@$@D jy׽K\JK$@M旇3LC$@$%^җ?UH/ӽD    puZdEV $^Βr)^"     pKJ,t!^:}8Dx ĉMU\Wmu^dV KqR    p)SXIIfh;V#şD4+++һJi$@$@$@$@(;>֞VZK$@$@$@$`3c-6d!>i-@a mik\Ik-%   ?'JX[Q'HHH;cmm'>im%|$@$@$@$ikV X[Q'HHH|'ڃ>iż$@$@$@$C<>Ik-% ZWr,{]x/pǑ/ADI.vff&p!;>Sdffb>(Ѯ];%Ζ@n{K(;;Wƾ}FcbJ$I9<e|Tf=QBi׮]*"{Ym_[jUb%E6 @2#!!;w*37nѣG# ))ɻ;HIH=fgvHMʬ ~ 35 "$U6;fj%VRl;(˃HHfx WT>u@l>֍GGYYYYZ~V>~AϞ=( qlл pfֻ'!,X`EPit{.Iڋ/jxI Jx 9񨿂b#X7>8OZ/v7#u5$eeH\e:Vu,OF$83yFֆ}H{~ sy8}жh -{=th ϧ>?] 0Z1H[%Bi_DIBFmsǭw}g6\֦H=®\^~UUUl ]uFj>[==qh(̬]c?}Xy-P?9C^éol0iPff}DTf>e-"Ѷ~oۃ0v(w](AbsŴU]ֶH{Ѯ{/k?PLc4f.{u6) PuY2*Z~T]C@HnuAFp!h1b+"Fڦfޒ &ڰY}JG`{>"{|qW8:e1ڭSq021HbU$ CmVDGHsA<@::,D28Hv;׮l[ O<Ơ_!>wy~megyfBvր}1{lai; `ܹ8p@ 9Y7UXKȘ*<v&H83LZK9.pbh^FNгs+X3SïWūq{+U"JJ᯿|r%m?jɨҢw/pO_]𾾣"#w 摸k[XvODd`0`)YY1UfYhJMr3FÊEÀHiX>ʱa\ 9B9~m}/>Qfţ]T `M4/:-'mMߍS*/~Cz(@U&p8 ,u-ݗE\f,Gj64U:W=7N./Í[  ` _cPƊl00"uXKӬOӺBػw0m+Ʀl=c%y*ܣ>5+۷0_-H&ӦM /m F o6^zI<޹JK#C7Gf ? K#ת hc! j Ff٘HL~<:g*nIwy3LO+0Db&o=L;ߞ!#Ed|9LMۤ8['4k{ vS^4;1e.MfOcr1o8B2!vS[ѿ9eθIcQO?FD.PGϏ^hoˠ0 9/7S7U 8Ye.#PJ{JfEcC6DoZ= _%{)Ś)X|e&k1.?NqD'z`"+EG:5c3ǩk@glnBEpH.tiLϕbN >ihk]q^ paG. •|O58,ѻ˲}Osz!p*"z@4 d/(+󧴳RԃO&Y9S}g۹Pf4 c,X;p%& )Btoz UۂuvOZ@G҈-^j?N "p*c˚=0b O r?|EchvJ >nC qxկZ:KO!1zݸq#+vs$Ǻؼ|86:+@W/~k"$̷/{g{J;"ZQ(=@q+G)`߅Fkta0uػ}q(SN?Sr'^G?a`Z;eu)Tdm]B3J@UkKAK+Qb =Cc'bno]mSᎶVd:AX3vf IkٷbT}1|`?" Y;1Vw| bVf0ߪͅõ B nsvѾM6-S P| hۺ֬bG>Ԝ+}oGS),w 8 E<&tJ]3T>=h ʎmԩSk0v P-V[V[+l[Y>ikaGN"s<% OE7ˑ pfց0Yo<}fcɬV3{,ֿg̗~VtP<]$\Ef_Ŭm_*Ìo&ͭRJ=b+w23)^gϞV()BW@@ԽWC}7k?^azpI1WRJAўX8o2[ד+GV5-&"ex)A]̏Xܝ:ȡxPl~ Mhw[[0WFV| py=;sIɛO/i;o_l+wߕ{*F wc<|#|8rXq'=7U=o=2c Cq2O&P. jd k gFQ@AܶnUgb7ѯ@}ܕ g(P";ljv̽˰,;f\3B.{v";bTde@.={6n6mǮ_+Wj7ZؼYz`qT}vonN)$@u%P~!;r g±Sm-r ln gpA-ХGw+.x ΰ&MV^_ۭ[7;>߼jbXj$V:"""0vX<#IN `+qp|>&7>qݷoWZ(JJ/ qK̬P"LnԽ[*'m.]v37ndS+- x ̬\ 3JY}g|j 7AIIWVfi<놧4%q~2'Y߿졟}^xv[I+~Z^OZyv0Dv`X 2{ה="},/ H kΝ|JcVR|y ;=Mz<Ꙉ/cux}:{J nGrx zɅ$P K>i˓ ԝٺ3d $vu%+֯.7;KOf\&?uI7DXsn|i?_q8쇋pfE 8cMՆ}ֆː 8gfIӻ%o\qFo}VOzcǑ7n$Y7g$P 5q6u6aO$`YkdOHxQ(5  {I~xcJn|8'm5p<0ɍiP$O&@e֓keϵҲ5 pСZӍ_Gb,L^8u }b@ ?S][o?91tE̛7۷Wlz׿{ݙ] 0*>|q5"ӧ###C,XzHc@XX}Y>|;Bo'dHl%`b>p"{oVZIر i+ݻcѢEaӦMYivʔ)_$@$PVGi$d^{Mʊl6m0|,\z웥ҬF T\\Yfigiw~!) G2ї2'Nhؙ3g̙3ڮJO# Ͼ'xBJ5b BrJ9$@$`FffHA' Jf}Ҙ%>m4)q ?;УG… صkbv JO.x gf4&cg̘aJҌ,Y\6$e_.߿VVIMKK~-?#p=*gc³۵"#hq)'zKcH oK/>EٌׯVUNj/={x ͬC0!y/xᇵ/_f \6~׹P=v# ǣP¼W~嗰۝q#Z~?8jS:޶"8}$p}bcѨISi?4hDyHPAa|@І2׻SA$@jU`HHHHPEaIHHH̪i0L$@$@$@$Uz墰$@$@$@$@jTf4&   *TfrQX    5*j x*^u(, Y5 IHHHY\HHHH@Mʬ$@$@$@$@^EʬW]. K$@$@$@$&@eVMa    "@e֫.5'P+R0sxr<ٓb*O⿳œ))+IR*: %GAia*ݒx&y,===E[+jS^Gw+V,JDZbŞI oɣb]"*R]{+הXO~JHe238Bgf2v'{ %y|EqԠ%"w2(T7N^Fdc(=wQ7wuXGAqt8~q,m:7GqX8*׏6r|X.2 Pu2`V:g@_DzM팱s`tHC|e8Q;2եr"#[ZU,)1۱l2?>>t˧Ň+SfI!܄ioÀ2]OMTX:Y0Vs@y~lXe1)c_{d,Ì{{r9a+۶~M/uxoqUtyϯZv r Eq49u8:EXF0hrƍܲ%p2*N]K`BFvFd:1Cʘz!cSh=AnDbwf,R${a/^٨a[g(1ӟD\3JT釱9X(vf%+zWn9HwF|.q{>6lUl_>"&܁hq9JӏvkRQ^Z~6G =;QxO<v2.'-"+(.o%ab 2N 9r 2UI@"o8jq"]VLi0IW>eIOu~KBG]Ż"(Qہwqv}~8:[:i8n^XXfq"p̺5[rv27m3W*`֍I5Ij r.MA qC2<%aTXGHmbc5Q6.lՏQw.7 x8..#;j;}~9n>gş+J2{}6TUA]\5C^P")N@=L}#oQfB:܉y 5nk'& ۗtʃ!P~Nm]MBjpEAqt [vr%{pwnVec:jV]+=7y+DC3bVfoGFKnԏ#y ՜B'@e٢+{թ8}PQwMG6b66eeߧ8}_5%j;Ul4գঈ%ۮsY".%(W?Q;B;KǕj,(>s8ZGЪWa@o8j(ل̌! 43@)AzHP]%v_+ٱ:ʅrE[Ɨv(:٠nՑ%o 0 &3ؚo>ʠ7(aGV,|؂76[q՗j6٣;y`4Ǒ.$!72ly2?-?%˥عb&ѷ7`nr*tbJ*qآ&%j3%@zRzE,M\9,$Z2fVhOyBWw#Xն%͖>B#^mG8 ÛmsB3~d)J uG5c: 8gfǒ5@cA^̥;mY1s 9͋/aR#sx/~9=0HL B-Db#4.ɕV,E3gh_{\LxPb?~1.P4o^Ǝ~<0JiG]ePZ9 SHCKп~O!8iِH|'6؄w~СZIڭ[7\~Veի'aԪ B҆gϕ [}ֶW1ZDO0nw۹ج+$!o P} Z{Θr)赫ә5NF]I/w|m'su!mxpW8j#?}q:[ ΎWp~׎p`p0 킙F crDm oXՏzϮ$"u].hhe6 nk2ܦYhغZWQhf+ak8pE֙(X7 *r%%Pzt? SS).&$:|gc$ Ё pz늋 c8xw2]e¿"󸪏+àNǥ" @ߦ+ttyW6i&& x.kB@@aMHdc?2U.I+ugz6qbyQY 0H 8[;LH:ċk񎨪:s1H^a@PQxo(Hӷ"NGUh؈3.&He̺ 5"ffjW K86eHHHH PΑ o2ח#   &@e֧//;G$@$@$@Mʬo__HHH|Y 6*}};   iTf}s$@$@$@$eHHHH p0ѹG A)% x0 `( 8Ye6~ |#KzSN34t?q96b$hfൗ P=@$@$@$@$z$@$@$@$@Tfy x-*^{(8 Y$@$@$@$@^Kʬ^: N$@$@$@$@e 6MλLok'kV;@Z?۟[NI ӟq 1`q,l&XmFe5#?h>um;][r)nURl /E&   2;HHHHk PKGIHHH    ZTfQp    *HHHHYtHHHH,    %@ek/'   2{HHHHk PKGIHHH    ZTfQp    *HHHH@`%pm?I ysO-~'q2\ISǡ]pu] 0`Tksb'ѴY ]FOG|LxuMXe"G>}[;UC77 Zq4&jkew/oΠs0nhkį<3Ʊˡ<:4pM6rr)JJQqK7lYF-;q=ǾGwr$+PX w uM6"ϖ!I+7lYHg Z(dž۵`zq|k`#:ر^˿=vajPvYH%*Ֆj\2ɬ 8<~>b8jݻXGqG'eFdk58Ҥ8*+_}84Ε V##;V8:o猽_>}&'I<YC7lj8CVŞ ?|mʚ/}7ƚ2L "W;j۹YFO)RyT11B ?Jպ?Ȳ^9SMX(1vaO,'jg_&A_N]c5&u וOX.YYK #0$ӏnOL5T~mLI|5+捶Cΐ!pؔ>cR}}eۿ!^1N7@]rex5!(>LW~;֏ORۀix>ݧi18y Ϥ}fhxrě"3~#H fVNy*(:.{ KGΗ8+$LjwcQCz_S;ǎԉw(& Mf8t i|@lJ=;ճ[{!Ͻ.{ƠRk9e|/YPdٳVNU+wEiؖks/jeݖC&WbA2F%_˦Y 5dtÂ"+ UZq_ۊ̍[rvM#a<@N104Pq>R>;Ct֟bOy9Endbb:YR"?+`1u ``,zu 67%bLXNgئHs]:#yr+zLts'-4$cs1gb̸!;'"0G%jDxQ(YIPϰ5gD0YCPXƽqF斝xoK.{n•Vu4dfkmBb/Jݛu &#yY(k@}]@b۽yƍI$@Ff?E熮0~@̎Iǚmp_@mbɁhf(fJ8(44iq\>/stG ?>G je|$n+ѭ[핵zy^TMp#bF?sGȵ[BENGRLV>)$Lɾ9 3Im"xE4CSakiJ ۸S_;_s?Woö]ňL+r1cQ+01A F|w@H D &Š\T<[+(ӞRlZPlOD z SQAKZC,@5'fgv6d7!vv_#;>|2lƷ~܏,4cƴӏ5 Y;i? o/,枅]L Xa89?A9֠k,}>Goq2z\Dm4N k׻>s-毫| -<0~,.";^m. ez% q5 hklTnWlԾS* !|}Rc>fz :nwN ko&mZ[p@mL1f)s-]':HQd~4~x} \K"GeOt aG6+x@݋;+ou Du2xdY]BҘٚwz}s`>6ls웻QC͇ Zj,]K/LLs_}!s,;SdyaV<tԚ$:e;ZiZ]bM6 \wq~4`>oJ [L7*OgWuaVK>B?eA?*~ۚ&%ŇNӳ} ~cV5zhs*rrL=?:CEy~MEpUZK.qIH=7!40Q#|? 8|%Cj1ZuuU}&+볣oRwh?z?"S]s#w[&"Gȗߘԏ:jzE(ЏzksWЪ;ድqMAhE?HDt@מ5ձoW&}~]^ɽܱwr&_כּt^!0ɽ,ĸyƀo?oL5AaiS[o?7ZݺH̙채f@GO{_)wnѥ ܃ӝ?#?l ^ g /\97:d}?wطs=3Al 5%z^p?oǧt`w&)xE[J5{hyK'v)'}_y%]K^fkM-^U_3])aSF !!"l=HI0N lY&h*S쵪> "SlIx2N?rkGM6Guezd(D=3yduEG 0@$g/PWl@`jm{y< jl׎6iҐh-kђ@<.}V}0(-XV{D{vnբg*~F}cKdw Wlս/Y%KUf~h֔٫%@?j[gj_?cZd*kɔQ1?q)@TFE3z!ʵ"@piκTjۚ{T0>O?yV>uPGwwalׄ9e1=| kڪZ]O=_1 s^њmgLhr^F+kͶ3ڳa[~xp3|&鸄QEyvly@SSi#{׶ Џn9ӏʶ|&Jo:pFk_ݛf/./@fZ<W'椹_&L )qk ugƻ]gث6h>=DOseʁ\ʛ\ĻTnGӮTU5mo*Z7Uo)VYy[Ziz| ؊CfLUT5K߁jjjTe~UUk;phR?]tNo =bPO&enNm' \iͥ/(P)`N]v[~ &hҤI0.G[60XVe ҨQYJ9\QrԳgmi˪N{7ޫs[θ 3J9c(kP?%L\]eM"𚃺>5U;vi1&U1 kvϞ2iS哆(99Yhm'3'@?yt(!S ?gܮ7M<3ģlGie\|gT}tgmҾ#&MӦ, gW6j"ILJRgpgoRs@ڝGMQN9_>b?2sOWWRߚ@Ҧwߙgx V:׽o_ݒɗ<]{kدi4\=Ġ`<{цMBĢMZpX6g߷Q:{la򯚱AM6,-#O6m^~mg WiV6\7|QLl9 MxTo^|JH:3% D&m 3nQM6Cd\u{V;xxo>ɔeР@=oԔgtHjڏ/M~4 ]׺lC3:cxE`+-r:T}D^ N?\繆d_ǶdkCY񒗵a"r峕3(Ym/she6%G=G'7wי'qNoݓ݉vq`HK5I$}i^ cvV5]-}fuIW\k:)^l.e  &Oũi$ |)=b>"F{3 M_[U kݙ= Nh0rfY?Q] sۣ§hSQp˧~Y;aun|O?QCAȩ=nJP0 W]T:0:h8M}A?:lXoW|X៛2.l;+{cGݏNڡ"סH/|>c]kE ZmW T8YNFs/u{~<|i fe^K=b+\w1XZ5;ii3[c=Ҝ岾.iwܣ/oWZDu ނj=>zݸȽz߿cC>Ҫ}@jg76O5E~Te>Wnhbo}L 1䧅Â'0k.%ơăN9gܔ,K̘$fAI+΃ tcm]RjnO+̀v^zwQ+b ZsA n̟Џ> LׅYkj3~1O0 }OUW)|}aI Џ? dݦculvGuQ$1V sU]fH=;eM_˷j|;jd$}x\MMz>I9w.m~Z6h,*hMy8s/=_}/j=Mn6r>58kR.m}-6Cvš7_ml`s\'~u9bГ/_lOOҏ }Ԛ&6+ 4Q6 83%Յpl]}cXw\ştla\x[3`ΓV׆ ᆎ ^4}vS}kNڷ2yиrm頉85U =w)]ݾQ8ѽ{M:USNү462Q`>1ڏ˴;橉Û~t߱Ȟ6hkX,π+q fEnTڲX'GGVҎ-hbjs"=|ٌx۠kC`\7){Z`hӸtŪԫJŻnDs.i˦j8;M}fѰu(ޥ{'ђOQ…h,(7y:(D:C:ߟ1LON#ں m`&,/j\ZӂΗW}xcn&Jl?ks/lƍCmZZP"?l>&7od;`/;;u`Y2ܞU!ߟv N!ClISՇz†t^H?XG:uk=띕σ 83)J Qy֨P[PުcgU^A#gxYo^mGg]%ǵfzsޡm+0K mcAxܺ+I;7h}bן*` fxrɅIõ1S[k`s5=ܭ J3N2ZVӘm'vMoֶM~u8j?t6 nӖI7QLJ_͈#SJm{Xoc\e~Ҕ>t>+CMxӦ !q}M~HoWS i`Z;0<22j>*õ@w=cz%&61Xb%΄AS}52gC cdkNO괕!BnMuUU5Yi)f;4 ,9&Mn'/ur?nW^W(5=~h? S+V#,:CddJӝSb@7?Ykyn d7Tl°||IWGWnU|ʀ v 8):f&ɔ}~ P +UsSh_"MT4yvoWcвgVfMu,t'_E%x$w3 0Y3u*}koףЇq-j~jX@S6=]%8\kKѲ 2.ٮjhD pfb)7g:5uJ*ͅ\+t^t hEhzy֣gGxя{5KhE Za]%f.j ~Q$@m 3h@@H`6  M`m^F@"(j   6.k[p_Bra0p\8  FLE–JKK[Zr#w98&8@ 0̠}nB@(h  > @@ f(  @fF.@@( F   m@@@`6 "  O`}nB@(h  > @@ f(  @fF.@@( F   m@@@`6 "  O`}nB@(h  >e##F0f141c"1JO(Jbul'⥪gBB/WblE5hT'QS.`A;@@ 8K  ] @@:N`,  @ v18C@8َdK  ],@0@@f;Β-! tl;@@ `gɖ:HlnV֦>^ӇmuU*8/_s*3+C)msl%u}Mz7X>lU'@?8ˎR]Ur\:3gΨLHl/N:q^f mھWm]^|SS<"On;k9?q@o>R&̸Syg MXeת6 qE"Q5CQ]Y8G+ Ql.|FLb@5: k}WZ(֪{wCֽ/Y%Kmf~h֔٫cD~ICU聱Y-^d(XIH.Y+{F~ غ_gT]yT9+^5I(sLb@ԫgկQEyvly@SSi#{XE֊Ge[%GT693 3am]Qxuy D о#0IJ.tvZ0~@ Mc&V֚uIN31? q9]"BIDATz3n=:sZ5]K֩|]vJ3Gćߚr=l:g{&C}0)ʊdb_T㛝$Mҗl׏9L껔i =oݖcc*fh.v כ^;,-ˆߏ@{пa dݬc?R}Ƌ?ӷj<}[5NSD v~=xMoMs׿Q  ן+Z2NS_C@8V禌 ^c:A BhG#|h\֏Nڡ"WCd//H:rcܑ *ӕ1<[^L]T|=>sQֽF߭r"@; 1Y@$Q.P_G^R֧'cƪ6?SqfNzbAI\4`.:\yD9 6? 3}A Vo ֩SW>6D<ʽ?&2!Ffgf5~d@O7>(ܵos 4hl;eu'UCO:E}񢭖Q}/fsK[bjpL]nM&X.N,ҹ]WpV% Vvvn5eڱ؜:YX@<ҏj|3Qڏm+5yxmn42 %`Q-Ыg_{8gEV zwVT72[Ro,idV&g_l`x~{3-T+˝hm_CE ~DW -+%C}P:ܬqͺku6-)26iLjo|O_inCeE[RLg&M"EzSJb*/M:uNSf }[Z8 pf~>SGo`s@Sf;|XWheִ즁u_X .GXz(O,#;Gy?myK_آ5-*N\~^RA  O]?&wCGN7Xی'aO{_p,VoK0cAچ;@)j7,nR&nz֕'Pߵ+ fԃo}Uio2P޶7{\m^f&wLgU6#!s+z&Io'Wie۶t:&O*0 r5`ƿʃZ=16g)=wx׻-3#FNͶ%K*/oQuA-6O5:22j>*#" L7T򲿚JMO7OJT}}GT錕/9Y)[ fΆjA֜(ӟi+_s50#SC5 PWUj_YMR:|A;klIx;ЎlPh؏d0ho/P<ڏ+;0f6 '+OL)ҹs33{mvV+ͣ+[O *ep噟ΞL쐼)[٢yآt :Zŭ@iҬtu5'{˽A.Wdwdo Gcgf,G~gq,wm4,rv:~=J?:ۃ&~vױߣ(:ugf;MǙ@p. L >0(`^%ltɅ@L pf6&]RBg78Vi.]ɥ+_̆vs/%r>T?\yGq:TQl@bR8 \IEG-f@@/ z(+  @lo@@$@0֢  AAA@Z"  q@@K^j-ʊ  $@0@@/ z(+  @lo@@$@0֢  AAA@Z"  q@@K^j-ʊ  $@0@@/ z(+  @lo@@$RV p۷OoJwɿ}{_/PFѣ5vXz@ ^xs׿9'MY#FiLu~ѣ>֞GTX+=ׇV隫Ѵi7i^& tl0X8r֯_>2e钱 hSRR}?g('2'ܾ^Z͝7OzϟL' 3   X0@Da6nܨkAV>$+(:{յS}?JFҬYo}Ka L  saÆx)=-alӝ[}k?ZgB@l9Z.Ǽ_4մ]S;UVVv~@:F]Ph;ԷW=5mcVYdB@ ^fwV?&^}mt^L`˪ ,@0˭KZx9,+f65v֪UO&@bU 1V+F's뒱KuTOW_ 6B#srZvgsڴi@fhn֩ͯu;aL'Tҋz 5Z4-W=ې'\e:?A@l6 }#_9'M?M5CJ\_~x[/rמX=3i:`ΪUOE6 @t FW{P.طoF]z>eXkUZUxC3M0:uZK0nB2"DEqPx74xHfz{.k>;mko:ժU_&@bQ`6[:(9T:+; ק<{;؞WV}@EXlUVwa$AaYv:m;ָYL ĢcfcUSX;禅Mי *o.VNءUOL Ģl,*u +p)%'l*}}m ZKtܬUOL Ģl,*u +P__==:}z[oIƥJX˄ @, bRO:?ͻQsƙ *۵Yi\^>'Y˄ @, .[:ݧjkO+%G/Z]3;m[?xY#uB0kӪ/ ( bUSXGTMw Zl/]r ]tl[i՗ @X VNa_xNkty:M8~u3|˄ @, bRFPű:?A¬ruzZeB@ fcUSXѣG뽣G¦NRgU_&@bQ`6[:;vMNvrwWv@V}@EXlUV 77W~XIf]jz}ZhURfWk\UOL ĢVN \s5zک8Qa_NQdODۜȪUO&@bU3ڲ+i7}MVz2! ڲ+itêxV1! ܺ-@fff͚yش^J`ǪU?&@bYrRoiذa0 QhNd~;I@v#^On~pnêU/&@b]`6[E$o~SgimDVzXaB@ fL#X3f]4B/QhJTCz?X@Nlq/ X|}u? XϣktQV@ =[AǔmݦOzy^Vyr[gB@ f㩵kD+WޯKsGkeDV9ZfB@ fũoD<>Qz;DkhU>Vy@Gxlu ^IE.*U.|2@AW?O X_>Lsќneݵ؋1@f} X%\wީ5[Ia֓"Xn] ء /07f1 `wrk~Ol,56ڟ_kV9dcࠢ  "@0!l$^'kJO;y6?31Y۵oڟ_l@ 3rQn }]-YDׯ}.R,]2r]rirDU׊cz.-чV隫Ѵi7iqiiW:!!AgsLtn@^B(*(w83띶Q,`vy۷Oov::}땘}y5؜5rn(Vc~Q @ :f](@ H@<%`j.    uk0  )YO5E@p ̺5G@" f# xJ`SEa@@n @@<%@0械  nY   TsQX@@[y@@O z(,  [`֭<  f=\@@-@0`@@Sj.    uk0  )YO5E@p ̺5G@" f# xJ`SEa@@n @@<%@0械  nY   TsQX@@[y@@O z(,  [`֭<  f=\@@-@0`@@Sj.    uk0  )YO5E@p ̺5G@" f# xJ`SEa@@n @@<%@0械  nY   TsQX@@[y@@O z(,  [`֭<  f=\@@-@0`@@Sj.    uk0  )YO5E@p ̺5G@" oG J> Џ8@ ̶݌MJKK,-U~V1# 3H@@g# r  xV`ֳMG@@f9@@<+@0٦     lQp@@Y@@ z(8  ,  gf=t@@`c@@m:   @01  YY6G@ @@g# r  xV`ֳMG@@f9@@<+@0٦     lQp@@Y@@ z(8  ,  gf=t@@`c@@m:   @01  YY6G@ @@g# r  xV`ֳMG@@f9@@<+@0٦    h@7QDTGя`GM^]ޱIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/index.rst0000644000175000017500000000231300000000000025077 0ustar00coreycorey00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) ======= Testing ======= .. toctree:: :maxdepth: 2 testing fullstack coverage template_model_sync_test db_transient_failure_injection ci_scenario_jobs ovn_devstack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/ovn_devstack.rst0000644000175000017500000006710600000000000026471 0ustar00coreycorey00000000000000.. _ovn_devstack: ========================= Testing OVN with DevStack ========================= This document describes how to test OpenStack with OVN using DevStack. We will start by describing how to test on a single host. Single Node Test Environment ---------------------------- 1. Create a test system. It's best to use a throwaway dev system for running DevStack. Your best bet is to use either CentOS 8 or the latest Ubuntu LTS (18.04, Bionic). 2. Create the ``stack`` user. :: $ git clone https://opendev.org/openstack/devstack.git $ sudo ./devstack/tools/create-stack-user.sh 3. Switch to the ``stack`` user and clone DevStack and Neutron. :: $ sudo su - stack $ git clone https://opendev.org/openstack/devstack.git $ git clone https://opendev.org/openstack/neutron.git 4. Configure DevStack to use the OVN driver. OVN driver comes with a sample DevStack configuration file you can start with. For example, you may want to set some values for the various PASSWORD variables in that file so DevStack doesn't have to prompt you for them. Feel free to edit it if you'd like, but it should work as-is. :: $ cd devstack $ cp ../neutron/devstack/ovn-local.conf.sample local.conf 5. Run DevStack. This is going to take a while. It installs a bunch of packages, clones a bunch of git repos, and installs everything from these git repos. :: $ ./stack.sh Once DevStack completes successfully, you should see output that looks something like this:: This is your host IP address: 172.16.189.6 This is your host IPv6 address: ::1 Horizon is now available at http://172.16.189.6/dashboard Keystone is serving at http://172.16.189.6/identity/ The default users are: admin and demo The password: password 2017-03-09 15:10:54.117 | stack.sh completed in 2110 seconds. Environment Variables --------------------- Once DevStack finishes successfully, we're ready to start interacting with OpenStack APIs. OpenStack provides a set of command line tools for interacting with these APIs. DevStack provides a file you can source to set up the right environment variables to make the OpenStack command line tools work. :: $ . openrc If you're curious what environment variables are set, they generally start with an OS prefix:: $ env | grep OS OS_REGION_NAME=RegionOne OS_IDENTITY_API_VERSION=2.0 OS_PASSWORD=password OS_AUTH_URL=http://192.168.122.8:5000/v2.0 OS_USERNAME=demo OS_TENANT_NAME=demo OS_VOLUME_API_VERSION=2 OS_CACERT=/opt/stack/data/CA/int-ca/ca-chain.pem OS_NO_CACHE=1 Default Network Configuration ----------------------------- By default, DevStack creates networks called ``private`` and ``public``. Run the following command to see the existing networks:: $ openstack network list +--------------------------------------+---------+----------------------------------------------------------------------------+ | ID | Name | Subnets | +--------------------------------------+---------+----------------------------------------------------------------------------+ | 40080dad-0064-480a-b1b0-592ae51c1471 | private | 5ff81545-7939-4ae0-8365-1658d45fa85c, da34f952-3bfc-45bb-b062-d2d973c1a751 | | 7ec986dd-aae4-40b5-86cf-8668feeeab67 | public | 60d0c146-a29b-4cd3-bd90-3745603b1a4b, f010c309-09be-4af2-80d6-e6af9c78bae7 | +--------------------------------------+---------+----------------------------------------------------------------------------+ A Neutron network is implemented as an OVN logical switch. OVN driver creates logical switches with a name in the format neutron-. We can use ``ovn-nbctl`` to list the configured logical switches and see that their names correlate with the output from ``openstack network list``:: $ ovn-nbctl ls-list 71206f5c-b0e6-49ce-b572-eb2e964b2c4e (neutron-40080dad-0064-480a-b1b0-592ae51c1471) 8d8270e7-fd51-416f-ae85-16565200b8a4 (neutron-7ec986dd-aae4-40b5-86cf-8668feeeab67) $ ovn-nbctl get Logical_Switch neutron-40080dad-0064-480a-b1b0-592ae51c1471 external_ids {"neutron:network_name"=private} Booting VMs ----------- In this section we'll go through the steps to create two VMs that have a virtual NIC attached to the ``private`` Neutron network. DevStack uses libvirt as the Nova backend by default. If KVM is available, it will be used. Otherwise, it will just run qemu emulated guests. This is perfectly fine for our testing, as we only need these VMs to be able to send and receive a small amount of traffic so performance is not very important. 1. Get the Network UUID. Start by getting the UUID for the ``private`` network from the output of ``openstack network list`` from earlier and save it off:: $ PRIVATE_NET_ID=$(openstack network show private -c id -f value) 2. Create an SSH keypair. Next create an SSH keypair in Nova. Later, when we boot a VM, we'll ask that the public key be put in the VM so we can SSH into it. :: $ openstack keypair create demo > id_rsa_demo $ chmod 600 id_rsa_demo 3. Choose a flavor. We need minimal resources for these test VMs, so the ``m1.nano`` flavor is sufficient. :: $ openstack flavor list +----+-----------+-------+------+-----------+-------+-----------+ | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | +----+-----------+-------+------+-----------+-------+-----------+ | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | | 4 | m1.large | 8192 | 80 | 0 | 4 | True | | 42 | m1.nano | 64 | 0 | 0 | 1 | True | | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | | 84 | m1.micro | 128 | 0 | 0 | 1 | True | | c1 | cirros256 | 256 | 0 | 0 | 1 | True | | d1 | ds512M | 512 | 5 | 0 | 1 | True | | d2 | ds1G | 1024 | 10 | 0 | 1 | True | | d3 | ds2G | 2048 | 10 | 0 | 2 | True | | d4 | ds4G | 4096 | 20 | 0 | 4 | True | +----+-----------+-------+------+-----------+-------+-----------+ $ FLAVOR_ID=$(openstack flavor show m1.nano -c id -f value) 4. Choose an image. DevStack imports the CirrOS image by default, which is perfect for our testing. It's a very small test image. :: $ openstack image list +--------------------------------------+--------------------------+--------+ | ID | Name | Status | +--------------------------------------+--------------------------+--------+ | 849a8db2-3754-4cf6-9271-491fa4ff7195 | cirros-0.3.5-x86_64-disk | active | +--------------------------------------+--------------------------+--------+ $ IMAGE_ID=$(openstack image list -c ID -f value) 5. Setup a security rule so that we can access the VMs we will boot up next. By default, DevStack does not allow users to access VMs, to enable that, we will need to add a rule. We will allow both ICMP and SSH. :: $ openstack security group rule create --ingress --ethertype IPv4 --dst-port 22 --protocol tcp default $ openstack security group rule create --ingress --ethertype IPv4 --protocol ICMP default $ openstack security group rule list +--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+ | ID | IP Protocol | IP Range | Port Range | Remote Security Group | Security Group | +--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+ ... | ade97198-db44-429e-9b30-24693d86d9b1 | tcp | 0.0.0.0/0 | 22:22 | None | a47b14da-5607-404a-8de4-3a0f1ad3649c | | d0861a98-f90e-4d1a-abfb-827b416bc2f6 | icmp | 0.0.0.0/0 | | None | a47b14da-5607-404a-8de4-3a0f1ad3649c | ... +--------------------------------------+-------------+-----------+------------+--------------------------------------+--------------------------------------+ 6. Boot some VMs. Now we will boot two VMs. We'll name them ``test1`` and ``test2``. :: $ openstack server create --nic net-id=$PRIVATE_NET_ID --flavor $FLAVOR_ID --image $IMAGE_ID --key-name demo test1 +-----------------------------+-----------------------------------------------------------------+ | Field | Value | +-----------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | NOSTATE | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | None | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | | | adminPass | BzAWWA6byGP6 | | config_drive | | | created | 2017-03-09T16:56:08Z | | flavor | m1.nano (42) | | hostId | | | id | d8b8084e-58ff-44f4-b029-a57e7ef6ba61 | | image | cirros-0.3.5-x86_64-disk (849a8db2-3754-4cf6-9271-491fa4ff7195) | | key_name | demo | | name | test1 | | progress | 0 | | project_id | b6522570f7344c06b1f24303abf3c479 | | properties | | | security_groups | name='default' | | status | BUILD | | updated | 2017-03-09T16:56:08Z | | user_id | c68f77f1d85e43eb9e5176380a68ac1f | | volumes_attached | | +-----------------------------+-----------------------------------------------------------------+ $ openstack server create --nic net-id=$PRIVATE_NET_ID --flavor $FLAVOR_ID --image $IMAGE_ID --key-name demo test2 +-----------------------------+-----------------------------------------------------------------+ | Field | Value | +-----------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-STS:power_state | NOSTATE | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | None | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | | | adminPass | YB8dmt5v88JV | | config_drive | | | created | 2017-03-09T16:56:50Z | | flavor | m1.nano (42) | | hostId | | | id | 170d4f37-9299-4a08-b48b-2b90fce8e09b | | image | cirros-0.3.5-x86_64-disk (849a8db2-3754-4cf6-9271-491fa4ff7195) | | key_name | demo | | name | test2 | | progress | 0 | | project_id | b6522570f7344c06b1f24303abf3c479 | | properties | | | security_groups | name='default' | | status | BUILD | | updated | 2017-03-09T16:56:51Z | | user_id | c68f77f1d85e43eb9e5176380a68ac1f | | volumes_attached | | +-----------------------------+-----------------------------------------------------------------+ Once both VMs have been started, they will have a status of ``ACTIVE``:: $ openstack server list +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+ | ID | Name | Status | Networks | Image Name | +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+ | 170d4f37-9299-4a08-b48b-2b90fce8e09b | test2 | ACTIVE | private=fd5d:9d1b:457c:0:f816:3eff:fe24:49df, 10.0.0.3 | cirros-0.3.5-x86_64-disk | | d8b8084e-58ff-44f4-b029-a57e7ef6ba61 | test1 | ACTIVE | private=fd5d:9d1b:457c:0:f816:3eff:fe3f:953d, 10.0.0.10 | cirros-0.3.5-x86_64-disk | +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+ Our two VMs have addresses of ``10.0.0.3`` and ``10.0.0.10``. If we list Neutron ports, there are two new ports with these addresses associated with them:: $ openstack port list +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ ... | 97c970b0-485d-47ec-868d-783c2f7acde3 | | fa:16:3e:3f:95:3d | ip_address='10.0.0.10', subnet_id='da34f952-3bfc-45bb-b062-d2d973c1a751' | ACTIVE | | | | | ip_address='fd5d:9d1b:457c:0:f816:3eff:fe3f:953d', subnet_id='5ff81545-7939-4ae0-8365-1658d45fa85c' | | | e003044d-334a-4de3-96d9-35b2d2280454 | | fa:16:3e:24:49:df | ip_address='10.0.0.3', subnet_id='da34f952-3bfc-45bb-b062-d2d973c1a751' | ACTIVE | | | | | ip_address='fd5d:9d1b:457c:0:f816:3eff:fe24:49df', subnet_id='5ff81545-7939-4ae0-8365-1658d45fa85c' | | ... +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ $ TEST1_PORT_ID=97c970b0-485d-47ec-868d-783c2f7acde3 $ TEST2_PORT_ID=e003044d-334a-4de3-96d9-35b2d2280454 Now we can look at OVN using ``ovn-nbctl`` to see the logical switch ports that were created for these two Neutron ports. The first part of the output is the OVN logical switch port UUID. The second part in parentheses is the logical switch port name. Neutron sets the logical switch port name equal to the Neutron port ID. :: $ ovn-nbctl lsp-list neutron-$PRIVATE_NET_ID ... fde1744b-e03b-46b7-b181-abddcbe60bf2 (97c970b0-485d-47ec-868d-783c2f7acde3) 7ce284a8-a48a-42f5-bf84-b2bca62cd0fe (e003044d-334a-4de3-96d9-35b2d2280454) ... These two ports correspond to the two VMs we created. VM Connectivity --------------- We can connect to our VMs by associating a floating IP address from the public network. :: $ openstack floating ip create --port $TEST1_PORT_ID public +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | created_at | 2017-03-09T18:58:12Z | | description | | | fixed_ip_address | 10.0.0.10 | | floating_ip_address | 172.24.4.8 | | floating_network_id | 7ec986dd-aae4-40b5-86cf-8668feeeab67 | | id | 24ff0799-5a72-4a5b-abc0-58b301c9aee5 | | name | None | | port_id | 97c970b0-485d-47ec-868d-783c2f7acde3 | | project_id | b6522570f7344c06b1f24303abf3c479 | | revision_number | 1 | | router_id | ee51adeb-0dd8-4da0-ab6f-7ce60e00e7b0 | | status | DOWN | | updated_at | 2017-03-09T18:58:12Z | +---------------------+--------------------------------------+ Devstack does not wire up the public network by default so we must do that before connecting to this floating IP address. :: $ sudo ip link set br-ex up $ sudo ip route add 172.24.4.0/24 dev br-ex $ sudo ip addr add 172.24.4.1/24 dev br-ex Now you should be able to connect to the VM via its floating IP address. First, ping the address. :: $ ping -c 1 172.24.4.8 PING 172.24.4.8 (172.24.4.8) 56(84) bytes of data. 64 bytes from 172.24.4.8: icmp_seq=1 ttl=63 time=0.823 ms --- 172.24.4.8 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.823/0.823/0.823/0.000 ms Now SSH to the VM:: $ ssh -i id_rsa_demo cirros@172.24.4.8 hostname test1 Adding Another Compute Node --------------------------- After completing the earlier instructions for setting up devstack, you can use a second VM to emulate an additional compute node. This is important for OVN testing as it exercises the tunnels created by OVN between the hypervisors. Just as before, create a throwaway VM but make sure that this VM has a different host name. Having same host name for both VMs will confuse Nova and will not produce two hypervisors when you query nova hypervisor list later. Once the VM is setup, create the ``stack`` user:: $ git clone https://opendev.org/openstack/devstack.git $ sudo ./devstack/tools/create-stack-user.sh Switch to the ``stack`` user and clone DevStack and neutron:: $ sudo su - stack $ git clone https://opendev.org/openstack/devstack.git $ git clone https://opendev.org/openstack/neutron.git OVN comes with another sample configuration file that can be used for this:: $ cd devstack $ cp ../neutron/devstack/ovn-compute-local.conf.sample local.conf You must set SERVICE_HOST in local.conf. The value should be the IP address of the main DevStack host. You must also set HOST_IP to the IP address of this new host. See the text in the sample configuration file for more information. Once that is complete, run DevStack:: $ cd devstack $ ./stack.sh This should complete in less time than before, as it's only running a single OpenStack service (nova-compute) along with OVN (ovn-controller, ovs-vswitchd, ovsdb-server). The final output will look something like this:: This is your host IP address: 172.16.189.30 This is your host IPv6 address: ::1 2017-03-09 18:39:27.058 | stack.sh completed in 1149 seconds. Now go back to your main DevStack host. You can use admin credentials to verify that the additional hypervisor has been added to the deployment:: $ cd devstack $ . openrc admin $ ./tools/discover_hosts.sh $ openstack hypervisor list +----+------------------------+-----------------+---------------+-------+ | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State | +----+------------------------+-----------------+---------------+-------+ | 1 | centos7-ovn-devstack | QEMU | 172.16.189.6 | up | | 2 | centos7-ovn-devstack-2 | QEMU | 172.16.189.30 | up | +----+------------------------+-----------------+---------------+-------+ You can also look at OVN and OVS to see that the second host has shown up. For example, there will be a second entry in the Chassis table of the OVN_Southbound database. You can use the ``ovn-sbctl`` utility to list chassis, their configuration, and the ports bound to each of them:: $ ovn-sbctl show Chassis "ddc8991a-d838-4758-8d15-71032da9d062" hostname: "centos7-ovn-devstack" Encap vxlan ip: "172.16.189.6" options: {csum="true"} Encap geneve ip: "172.16.189.6" options: {csum="true"} Port_Binding "97c970b0-485d-47ec-868d-783c2f7acde3" Port_Binding "e003044d-334a-4de3-96d9-35b2d2280454" Port_Binding "cr-lrp-08d1f28d-cc39-4397-b12b-7124080899a1" Chassis "b194d07e-0733-4405-b795-63b172b722fd" hostname: "centos7-ovn-devstack-2.os1.phx2.redhat.com" Encap geneve ip: "172.16.189.30" options: {csum="true"} Encap vxlan ip: "172.16.189.30" options: {csum="true"} You can also see a tunnel created to the other compute node:: $ ovs-vsctl show ... Bridge br-int fail_mode: secure ... Port "ovn-b194d0-0" Interface "ovn-b194d0-0" type: geneve options: {csum="true", key=flow, remote_ip="172.16.189.30"} ... ... Provider Networks ----------------- Neutron has a "provider networks" API extension that lets you specify some additional attributes on a network. These attributes let you map a Neutron network to a physical network in your environment. The OVN ML2 driver is adding support for this API extension. It currently supports "flat" and "vlan" networks. Here is how you can test it: First you must create an OVS bridge that provides connectivity to the provider network on every host running ovn-controller. For trivial testing this could just be a dummy bridge. In a real environment, you would want to add a local network interface to the bridge, as well. :: $ ovs-vsctl add-br br-provider ovn-controller on each host must be configured with a mapping between a network name and the bridge that provides connectivity to that network. In this case we'll create a mapping from the network name "providernet" to the bridge 'br-provider". :: $ ovs-vsctl set open . \ external-ids:ovn-bridge-mappings=providernet:br-provider If you want to enable this chassis to host a gateway router for external connectivity, then set ovn-cms-options to enable-chassis-as-gw. :: $ ovs-vsctl set open . \ external-ids:ovn-cms-options="enable-chassis-as-gw" Now create a Neutron provider network. :: $ openstack network create provider --share \ --provider-physical-network providernet \ --provider-network-type flat Alternatively, you can define connectivity to a VLAN instead of a flat network: :: $ openstack network create provider-101 --share \ --provider-physical-network providernet \ --provider-network-type vlan --provider-segment 101 Observe that the OVN ML2 driver created a special logical switch port of type localnet on the logical switch to model the connection to the physical network. :: $ ovn-nbctl show ... switch 5bbccbbd-f5ca-411b-bad9-01095d6f1316 (neutron-729dbbee-db84-4a3d-afc3-82c0b3701074) port provnet-729dbbee-db84-4a3d-afc3-82c0b3701074 addresses: ["unknown"] ... $ ovn-nbctl lsp-get-type provnet-729dbbee-db84-4a3d-afc3-82c0b3701074 localnet $ ovn-nbctl lsp-get-options provnet-729dbbee-db84-4a3d-afc3-82c0b3701074 network_name=providernet If VLAN is used, there will be a VLAN tag shown on the localnet port as well. Finally, create a Neutron port on the provider network. :: $ openstack port create --network provider myport or if you followed the VLAN example, it would be: :: $ openstack port create --network provider-101 myport Skydive ------- `Skydive `_ is an open source real-time network topology and protocols analyzer. It aims to provide a comprehensive way of understanding what is happening in the network infrastructure. Skydive works by utilizing agents to collect host-local information, and sending this information to a central agent for further analysis. It utilizes elasticsearch to store the data. To enable Skydive support with OVN and devstack, enable it on the control and compute nodes. On the control node, enable it as follows: :: enable_plugin skydive https://github.com/skydive-project/skydive.git enable_service skydive-analyzer On the compute nodes, enable it as follows: :: enable_plugin skydive https://github.com/skydive-project/skydive.git enable_service skydive-agent Troubleshooting --------------- If you run into any problems, take a look at our :doc:`/admin/ovn/troubleshooting` page. Additional Resources -------------------- See the documentation and other references linked from the :doc:`/admin/ovn/ovn` page. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/template_model_sync_test.rst0000644000175000017500000001224200000000000031060 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Template for ModelMigrationSync for external repos ================================================== This section contains a template for a test which checks that the Python models for database tables are synchronized with the alembic migrations that create the database schema. This test should be implemented in all driver/plugin repositories that were split out from Neutron. What does the test do? ---------------------- This test compares models with the result of existing migrations. It is based on `ModelsMigrationsSync `_ which is provided by oslo.db and was adapted for Neutron. It compares core Neutron models and vendor specific models with migrations from Neutron core and migrations from the driver/plugin repo. This test is functional - it runs against MySQL and PostgreSQL dialects. The detailed description of this test can be found in Neutron Database Layer section - :ref:`testing-database-migrations`. Steps for implementing the test ------------------------------- 1. Import all models in one place ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a module ``networking_foo/db/models/head.py`` with the following content: :: from neutron_lib.db import model_base from networking_foo import models # noqa # Alternatively, import separate modules here if the models are not in one # models.py file def get_metadata(): return model_base.BASEV2.metadata 2. Implement the test module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The test uses external.py from Neutron. This file contains lists of table names, which were moved out of Neutron: :: VPNAAS_TABLES = [...] FWAAS_TABLES = [...] # Arista ML2 driver Models moved to openstack/networking-arista REPO_ARISTA_TABLES = [...] # Models moved to openstack/networking-cisco REPO_CISCO_TABLES = [...] ... TABLES = (FWAAS_TABLES + VPNAAS_TABLES + ... + REPO_ARISTA_TABLES + REPO_CISCO_TABLES) Also the test uses **VERSION_TABLE**, it is the name of table in database which contains revision id of head migration. It is preferred to keep this variable in ``networking_foo/db/migration/alembic_migrations/__init__.py`` so it will be easy to use in test. Create a module ``networking_foo/tests/functional/db/test_migrations.py`` with the following content: :: from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from networking_foo.db.migration import alembic_migrations from networking_foo.db.models import head # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES) class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name == 'alembic' or name == alembic_migrations.VERSION_TABLE or name in EXTERNAL_TABLES): return False else: return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass 3. Add functional requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A separate file ``networking_foo/tests/functional/requirements.txt`` should be created containing the following requirements that are needed for successful test execution. :: psutil>=3.2.2 # BSD psycopg2 PyMySQL>=0.6.2 # MIT License Example implementation `in VPNaaS `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/testing/testing.rst0000644000175000017500000000206300000000000025447 0ustar00coreycorey00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. include:: ../../../../TESTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/contributor/upgrade_checks.rst0000644000175000017500000000406000000000000025263 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) .. _upgrade_checks: Upgrade checks ============== Introduction ------------ CLI tool ``neutron-status upgrade check`` contains checks which perform a release-specific readiness check before restarting services with new code. For more details see `neutron-status command-line client `_ page. 3rd party plugins checks ------------------------ Neutron upgrade checks script allows to add checks by stadium and 3rd party projects. The ``neutron-status`` script detects which sub-projects have been installed by enumerating the ``neutron.status.upgrade.checks`` entrypoints. For more details see the `Entry Points section of Contributing extensions to Neutron `_. Checks can be run in random order and should be independent from each other. The recommended entry point name is a repository name: For example, 'neutron-fwaas' for FWaaS and 'networking-sfc' for SFC: .. code-block:: ini neutron.status.upgrade.checks = neutron-fwaas = neutron_fwaas.upgrade.checks:Checks Entrypoint should be class which inherits from ``neutron.cmd.upgrade_checks.base.BaseChecks``. An example of a checks class can be found in ``neutron.cmd.upgrade_checks.checks.CoreChecks``. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1590433 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/0000755000175000017500000000000000000000000023716 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/feature_classification_introduction.rst0000644000175000017500000000471400000000000033765 0ustar00coreycorey00000000000000============ Introduction ============ This document describes how features are listed in :doc:`general_feature_support_matrix` and :doc:`provider_network_support_matrix`. Goals ~~~~~ The object of this document is to inform users whether or not features are complete, well documented, stable, and tested. This approach ensures good user experience for those well maintained features. .. note:: Tests are specific to particular combinations of technologies. The plugins chosen for deployment make a big difference to whether or not features will work. Concepts ~~~~~~~~ These definitions clarify the terminology used throughout this document. Feature status ~~~~~~~~~~~~~~ * Immature * Mature * Required * Deprecated (scheduled to be removed in a future release) Immature -------- Immature features do not have enough functionality to satisfy real world use cases. An immature feature is a feature being actively developed, which is only partially functional and upstream tested, most likely introduced in a recent release, and that will take time to mature thanks to feedback from downstream QA. Users of these features will likely identify gaps and/or defects that were not identified during specification and code review. Mature ------ A feature is considered mature if it satisfies the following criteria: * Complete API documentation including concept and REST call definition. * Complete Administrator documentation. * Tempest tests that define the correct functionality of the feature. * Enough functionality and reliability to be useful in real world scenarios. * Low probability of support for the feature being dropped. Required -------- Required features are core networking principles that have been thoroughly tested and have been implemented in real world use cases. In addition they satisfy the same criteria for any mature features. .. note:: Any new drivers must prove that they support all required features before they are merged into neutron. Deprecated ---------- Deprecated features are no longer supported and only security related fixes or development will happen towards them. Deployment rating of features ----------------------------- The deployment rating shows only the state of the tests for each feature on a particular deployment. .. important:: Despite the obvious parallels that could be drawn, this list is unrelated to the DefCore effort. See `InteropWG `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/general_feature_support_matrix.ini0000644000175000017500000001265100000000000032734 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.ovs] title=Open vSwitch [driver.linuxbridge] title=Linux Bridge [driver.odl] title=Networking ODL link=https://docs.openstack.org/networking-odl/latest/ [driver.midonet] title=Networking MidoNet link=https://docs.openstack.org/networking-midonet/latest/ # TODO(slaweq): change it to neutron docs when it will be merged with # networking-ovn already [driver.ovn] title=Networking OVN link=https://docs.openstack.org/networking-ovn/latest/ [operation.Networks] title=Networks status=mandatory api=core cli=openstack network * notes=The ability to create, modify and delete networks. https://docs.openstack.org/api-ref/network/v2/#networks driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.Subnets] title=Subnets status=mandatory api=core cli=openstack subnet * notes=The ability to create and manipulate subnets and subnet pools. https://docs.openstack.org/api-ref/network/v2/#subnets driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.Ports] title=Ports status=mandatory api=core cli=openstack port * notes=The ability to create and manipulate ports. https://docs.openstack.org/api-ref/network/v2/#ports driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.Router] title=Routers status=mandatory api=router cli=openstack router * notes=The ability to create and manipulate routers. https://docs.openstack.org/api-ref/network/v2/#routers-routers driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.Security_Groups] title=Security Groups status=mature api=security-group cli=openstack security group * notes=Security groups are set by default, and can be modified to control ingress & egress traffic. https://docs.openstack.org/api-ref/network/v2/#security-groups-security-groups driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.External_Nets] title=External Networks status=mature api=external-net notes=The ability to create an external network to provide internet access to and from instances using floating IP addresses and security group rules. driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=complete driver.ovn=complete [operation.DVR] title=Distributed Virtual Routers status=immature api=dvr notes=The ability to support the distributed virtual routers. https://wiki.openstack.org/wiki/Neutron/DVR driver.ovs=complete driver.linuxbridge=missing driver.odl=partial driver.midonet=complete driver.ovn=partial [operation.L3_HA] title=L3 High Availability status=immature api=l3-ha notes=The ability to support the High Availability features and extensions. https://wiki.openstack.org/wiki/Neutron/L3_High_Availability_VRRP. driver.ovs=complete driver.linuxbridge=complete driver.odl=partial driver.midonet=missing driver.ovn=partial [operation.QoS] title=Quality of Service status=mature api=qos notes=Support for Neutron Quality of Service policies and API. https://docs.openstack.org/api-ref/network/v2/#qos-policies-qos driver.ovs=complete driver.linuxbridge=partial driver.odl=partial driver.midonet=complete driver.ovn=complete [operation.BGP] title=Border Gateway Protocol status=immature notes=https://docs.openstack.org/api-ref/network/v2/#bgp-mpls-vpn-interconnection driver.ovs=complete driver.linuxbridge=unknown driver.odl=unknown driver.midonet=complete driver.ovn=unknown [operation.DNS] title=DNS status=mature api=dns-integration notes=The ability to integrate with an external DNS as a Service. https://docs.openstack.org/neutron/latest/admin/config-dns-int.html driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=missing driver.ovn=complete [operation.Trunk_Ports] title=Trunk Ports status=mature api=trunk notes=Neutron extension to access lots of neutron networks over a single vNIC as tagged/encapsulated traffic. https://docs.openstack.org/api-ref/network/v2/#trunk-networking driver.ovs=complete driver.linuxbridge=complete driver.odl=missing driver.midonet=missing driver.ovn=complete [operation.Metering] title=Metering status=mature api=metering notes=Meter traffic at the L3 router levels. https://docs.openstack.org/api-ref/network/v2/#metering-labels-and-rules-metering-labels-metering-label-rules driver.ovs=complete driver.linuxbridge=complete driver.odl=missing driver.midonet=missing driver.ovn=unknown [operations.Routed_Provider_Networks] title=Routed Provider Networks status=immature notes=The ability to present a multi-segment layer-3 network as a single entity. https://docs.openstack.org/neutron/latest/admin/config-routed-networks.html driver.ovs=partial driver.linuxbridge=partial driver.odl=missing driver.midonet=missing driver.ovn=partial ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/general_feature_support_matrix.rst0000644000175000017500000000262100000000000032761 0ustar00coreycorey00000000000000======================= General Feature Support ======================= .. warning:: Please note, while this document is still being maintained, this is slowly being updated to re-group and classify features using the definitions described in here: :doc:`feature_classification_introduction`. This document covers the maturity and support of the Neutron API and its API extensions. Details about the API can be found at `Networking API v2.0 `_. When considering which capabilities should be marked as mature the following general guiding principles were applied: * **Inclusivity** - people have shown ability to make effective use of a wide range of network plugins and drivers with broadly varying feature sets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing how a user wants to use their networks. * **Bootstrapping** - a practical use case test is to consider that starting point for the network deploy is an empty data center with new machines and network connectivity. Then look at what are the minimum features required of the network service, in order to get user instances running and connected over the network. * **Reality** - there are many networking drivers and plugins compatible with neutron. Each with their own supported feature set. .. support_matrix:: general_feature_support_matrix.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/index.rst0000644000175000017500000000233400000000000025561 0ustar00coreycorey00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convention for heading levels in Neutron devref: ======= Heading 0 (reserved for the title in a document) ------- Heading 1 ~~~~~~~ Heading 2 +++++++ Heading 3 ''''''' Heading 4 (Avoid deeper levels because they do not render well.) Neutron Feature Classification ============================== .. toctree:: :maxdepth: 2 feature_classification_introduction general_feature_support_matrix provider_network_support_matrix ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/provider_network_support_matrix.ini0000644000175000017500000000312100000000000033177 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. [driver.ovs] title=Open vSwitch [driver.linuxbridge] title=Linux Bridge [driver.odl] title=Networking ODL link=https://docs.openstack.org/networking-odl/latest/ [driver.midonet] title=Networking MidoNet link=https://docs.openstack.org/networking-midonet/latest/ [driver.ovn] title=Networking OVN link=https://docs.openstack.org/networking-ovn/latest/ [operation.VLAN] title=VLAN provider network support status=mature driver.ovs=complete driver.linuxbridge=complete driver.odl=unknown driver.midonet=missing driver.ovn=complete [operation.VXLAN] title=VXLAN provider network support status=mature driver.ovs=complete driver.linuxbridge=complete driver.odl=complete driver.midonet=missing driver.ovn=missing [operation.GRE] title=GRE provider network support status=immature driver.ovs=complete driver.linuxbridge=unknown driver.odl=complete driver.midonet=missing driver.ovn=missing [operation.Geneve] title=Geneve provider network support status=immature driver.ovs=complete driver.linuxbridge=unknown driver.odl=missing driver.midonet=missing driver.ovn=complete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/feature_classification/provider_network_support_matrix.rst0000644000175000017500000000244700000000000033242 0ustar00coreycorey00000000000000======================== Provider Network Support ======================== .. warning:: Please note, while this document is still being maintained, this is slowly being updated to re-group and classify features using the definitions described in here: :doc:`feature_classification_introduction`. This document covers the maturity and support for various network isolation technologies. When considering which capabilities should be marked as mature the following general guiding principles were applied: * **Inclusivity** - people have shown ability to make effective use of a wide range of network plugins and drivers with broadly varying feature sets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing how a user wants to use their networks. * **Bootstrapping** - a practical use case test is to consider that starting point for the network deploy is an empty data center with new machines and network connectivity. Then look at what are the minimum features required of the network service, in order to get user instances running and connected over the network. * **Reality** - there are many networking drivers and plugins compatible with neutron. Each with their own supported feature set. .. support_matrix:: provider_network_support_matrix.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/index.rst0000644000175000017500000000360300000000000021053 0ustar00coreycorey00000000000000.. Copyright 2011-2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Neutron's documentation! =================================== .. include:: _intro.rst .. We use different index pages for HTML and PDF documents for better TOC. Please ensure to update pdf-index.rst when you update the index below. Installation Guide ------------------ .. toctree:: :maxdepth: 2 Installation Guide Networking Guide ---------------- .. toctree:: :maxdepth: 3 admin/index Configuration Reference ----------------------- .. toctree:: :maxdepth: 2 configuration/index CLI Reference ------------- .. toctree:: :maxdepth: 2 cli/index OVN Driver ---------- .. toctree:: :maxdepth: 2 ovn/index Neutron Feature Classification ------------------------------ .. toctree:: :maxdepth: 2 feature_classification/index Contributor Guide ----------------- .. toctree:: :maxdepth: 2 contributor/index API Reference ------------- Go to https://docs.openstack.org/api-ref/network/ for information about the OpenStack Network API and its extensions. Search ------ * :ref:`Neutron document search `: Search the contents of this document. * `OpenStack wide search `_: Search the wider set of OpenStack documentation, including forums. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1710434 neutron-16.0.0.0b2.dev214/doc/source/install/0000755000175000017500000000000000000000000020656 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1710434 neutron-16.0.0.0b2.dev214/doc/source/install/common/0000755000175000017500000000000000000000000022146 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/common/get-started-networking.rst0000644000175000017500000000251500000000000027313 0ustar00coreycorey00000000000000=========================== Networking service overview =========================== OpenStack Networking (neutron) allows you to create and attach interface devices managed by other OpenStack services to networks. Plug-ins can be implemented to accommodate different networking equipment and software, providing flexibility to OpenStack architecture and deployment. It includes the following components: neutron-server Accepts and routes API requests to the appropriate OpenStack Networking plug-in for action. OpenStack Networking plug-ins and agents Plug and unplug ports, create networks or subnets, and provide IP addressing. These plug-ins and agents differ depending on the vendor and technologies used in the particular cloud. OpenStack Networking ships with plug-ins and agents for Cisco virtual and physical switches, NEC OpenFlow products, Open vSwitch, Linux bridging, and the VMware NSX product. The common agents are L3 (layer 3), DHCP (dynamic host IP addressing), and a plug-in agent. Messaging queue Used by most OpenStack Networking installations to route information between the neutron-server and various agents. Also acts as a database to store networking state for particular plug-ins. OpenStack Networking mainly interacts with OpenStack Compute to provide networks and connectivity for its instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-obs.rst0000644000175000017500000001036700000000000025320 0ustar00coreycorey00000000000000Install and configure compute node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compute node handles connectivity and security groups for instances. Install the components ---------------------- .. code-block:: console # zypper install --no-recommends \ openstack-neutron-linuxbridge-agent bridge-utils .. end Configure the common component ------------------------------ The Networking common component configuration includes the authentication mechanism, message queue, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, comment out any ``connection`` options because compute nodes do not directly access the database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure networking options ---------------------------- Choose the same networking option that you chose for the controller node to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-compute-compute-obs`. .. toctree:: :maxdepth: 1 compute-install-option1-obs.rst compute-install-option2-obs.rst .. _neutron-compute-compute-obs: Configure the Compute service to use the Networking service ----------------------------------------------------------- * Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[neutron]`` section, configure access parameters: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- #. The Networking service initialization scripts expect the variable ``NEUTRON_PLUGIN_CONF`` in the ``/etc/sysconfig/neutron`` file to reference the ML2 plug-in configuration file. Ensure that the ``/etc/sysconfig/neutron`` file contains the following: .. path /etc/sysconfig/neutron .. code-block:: ini NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini" .. end #. Restart the Compute service: .. code-block:: console # systemctl restart openstack-nova-compute.service .. end #. Start the Linux Bridge agent and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-neutron-linuxbridge-agent.service # systemctl start openstack-neutron-linuxbridge-agent.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option1-obs.rst0000644000175000017500000000401000000000000026673 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-obs` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option1-rdo.rst0000644000175000017500000000401000000000000026674 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-rdo` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option1-ubuntu.rst0000644000175000017500000000401300000000000027435 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-ubuntu` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option2-obs.rst0000644000175000017500000000517600000000000026712 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-obs` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the compute node. See :doc:`environment-networking-obs` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option2-rdo.rst0000644000175000017500000000517600000000000026713 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-rdo` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the compute node. See :doc:`environment-networking-rdo` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-option2-ubuntu.rst0000644000175000017500000000520400000000000027441 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure the Networking components on a *compute* node. Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-ubuntu` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the compute node. See :doc:`environment-networking-ubuntu` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Return to *Networking compute node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-rdo.rst0000644000175000017500000000757000000000000025323 0ustar00coreycorey00000000000000Install and configure compute node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compute node handles connectivity and security groups for instances. Install the components ---------------------- .. todo: https://bugzilla.redhat.com/show_bug.cgi?id=1334626 .. code-block:: console # yum install openstack-neutron-linuxbridge ebtables ipset .. end Configure the common component ------------------------------ The Networking common component configuration includes the authentication mechanism, message queue, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, comment out any ``connection`` options because compute nodes do not directly access the database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure networking options ---------------------------- Choose the same networking option that you chose for the controller node to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-compute-compute-rdo`. .. toctree:: :maxdepth: 1 compute-install-option1-rdo.rst compute-install-option2-rdo.rst .. _neutron-compute-compute-rdo: Configure the Compute service to use the Networking service ----------------------------------------------------------- * Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[neutron]`` section, configure access parameters: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- #. Restart the Compute service: .. code-block:: console # systemctl restart openstack-nova-compute.service .. end #. Start the Linux bridge agent and configure it to start when the system boots: .. code-block:: console # systemctl enable neutron-linuxbridge-agent.service # systemctl start neutron-linuxbridge-agent.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/compute-install-ubuntu.rst0000644000175000017500000000724700000000000026062 0ustar00coreycorey00000000000000Install and configure compute node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The compute node handles connectivity and security groups for instances. Install the components ---------------------- .. code-block:: console # apt install neutron-linuxbridge-agent .. end Configure the common component ------------------------------ The Networking common component configuration includes the authentication mechanism, message queue, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, comment out any ``connection`` options because compute nodes do not directly access the database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure networking options ---------------------------- Choose the same networking option that you chose for the controller node to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-compute-compute-ubuntu`. .. toctree:: :maxdepth: 1 compute-install-option1-ubuntu.rst compute-install-option2-ubuntu.rst .. _neutron-compute-compute-ubuntu: Configure the Compute service to use the Networking service ----------------------------------------------------------- * Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[neutron]`` section, configure access parameters: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- #. Restart the Compute service: .. code-block:: console # service nova-compute restart .. end #. Restart the Linux bridge agent: .. code-block:: console # service neutron-linuxbridge-agent restart .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/concepts.rst0000644000175000017500000000530300000000000023227 0ustar00coreycorey00000000000000Networking (neutron) concepts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack Networking (neutron) manages all networking facets for the Virtual Networking Infrastructure (VNI) and the access layer aspects of the Physical Networking Infrastructure (PNI) in your OpenStack environment. OpenStack Networking enables projects to create advanced virtual network topologies which may include services such as a firewall, and a virtual private network (VPN). Networking provides networks, subnets, and routers as object abstractions. Each abstraction has functionality that mimics its physical counterpart: networks contain subnets, and routers route traffic between different subnets and networks. Any given Networking set up has at least one external network. Unlike the other networks, the external network is not merely a virtually defined network. Instead, it represents a view into a slice of the physical, external network accessible outside the OpenStack installation. IP addresses on the external network are accessible by anybody physically on the outside network. In addition to external networks, any Networking set up has one or more internal networks. These software-defined networks connect directly to the VMs. Only the VMs on any given internal network, or those on subnets connected through interfaces to a similar router, can access VMs connected to that network directly. For the outside network to access VMs, and vice versa, routers between the networks are needed. Each router has one gateway that is connected to an external network and one or more interfaces connected to internal networks. Like a physical router, subnets can access machines on other subnets that are connected to the same router, and machines can access the outside network through the gateway for the router. Additionally, you can allocate IP addresses on external networks to ports on the internal network. Whenever something is connected to a subnet, that connection is called a port. You can associate external network IP addresses with ports to VMs. This way, entities on the outside network can access VMs. Networking also supports *security groups*. Security groups enable administrators to define firewall rules in groups. A VM can belong to one or more security groups, and Networking applies the rules in those security groups to block or unblock ports, port ranges, or traffic types for that VM. Each plug-in that Networking uses has its own concepts. While not vital to operating the VNI and OpenStack environment, understanding these concepts can help you set up Networking. All Networking installations use a core plug-in and a security group plug-in (or just the No-Op security group plug-in). Additionally, Firewall-as-a-Service (FWaaS) is available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-obs.rst0000644000175000017500000002517600000000000026033 0ustar00coreycorey00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you configure the OpenStack Networking (neutron) service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``neutron`` database: .. code-block:: console MariaDB [(none)] CREATE DATABASE neutron; .. end * Grant proper access to the ``neutron`` database, replacing ``NEUTRON_DBPASS`` with a suitable password: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \ IDENTIFIED BY 'NEUTRON_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \ IDENTIFIED BY 'NEUTRON_DBPASS'; .. end * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``neutron`` user: .. code-block:: console $ openstack user create --domain default --password-prompt neutron User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fdb0f541e28141719b6a43c8944bf1fb | | name | neutron | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``neutron`` user: .. code-block:: console $ openstack role add --project service --user neutron admin .. end .. note:: This command provides no output. * Create the ``neutron`` service entity: .. code-block:: console $ openstack service create --name neutron \ --description "OpenStack Networking" network +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Networking | | enabled | True | | id | f71529314dab4a4d8eca427e701d209e | | name | neutron | | type | network | +-------------+----------------------------------+ .. end #. Create the Networking service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ network public http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 85d80a6d02fc4b7683f611d7fc1493a3 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network internal http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 09753b537ac74422a68d2d791cf3714f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network admin http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 1ee14289c9374dffb5db92a5c112fc4e | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ .. end Configure networking options ---------------------------- You can deploy the Networking service using one of two architectures represented by options 1 and 2. Option 1 deploys the simplest possible architecture that only supports attaching instances to provider (external) networks. No self-service (private) networks, routers, or floating IP addresses. Only the ``admin`` or other privileged user can manage provider networks. Option 2 augments option 1 with layer-3 services that support attaching instances to self-service networks. The ``demo`` or other unprivileged user can manage self-service networks including routers that provide connectivity between self-service and provider networks. Additionally, floating IP addresses provide connectivity to instances using self-service networks from external networks such as the Internet. Self-service networks typically use overlay networks. Overlay network protocols such as VXLAN include additional headers that increase overhead and decrease space available for the payload or user data. Without knowledge of the virtual network infrastructure, instances attempt to send packets using the default Ethernet maximum transmission unit (MTU) of 1500 bytes. The Networking service automatically provides the correct MTU value to instances via DHCP. However, some cloud images do not use DHCP or ignore the DHCP MTU option and require configuration using metadata or a script. .. note:: Option 2 also supports attaching instances to provider networks. Choose one of the following networking options to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-controller-metadata-agent-obs`. .. toctree:: :maxdepth: 1 controller-install-option1-obs.rst controller-install-option2-obs.rst .. _neutron-controller-metadata-agent-obs: Configure the metadata agent ---------------------------- The metadata agent provides configuration information such as credentials to instances. * Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the metadata host and shared secret: .. path /etc/neutron/metadata_agent.ini .. code-block:: ini [DEFAULT] # ... nova_metadata_host = controller metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy. Configure the Compute service to use the Networking service ----------------------------------------------------------- .. note:: The Nova compute service must be installed to complete this step. For more details see the compute install guide found under the `Installation Guides` section of the `docs website `_. * Edit the ``/etc/nova/nova.conf`` file and perform the following actions: * In the ``[neutron]`` section, configure access parameters, enable the metadata proxy, and configure the secret: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS service_metadata_proxy = true metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. Replace ``METADATA_SECRET`` with the secret you chose for the metadata proxy. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- .. note:: SLES enables apparmor by default and restricts dnsmasq. You need to either completely disable apparmor or disable only the dnsmasq profile: .. code-block:: console # ln -s /etc/apparmor.d/usr.sbin.dnsmasq /etc/apparmor.d/disable/ # systemctl restart apparmor .. end #. Restart the Compute API service: .. code-block:: console # systemctl restart openstack-nova-api.service .. end #. Start the Networking services and configure them to start when the system boots. For both networking options: .. code-block:: console # systemctl enable openstack-neutron.service \ openstack-neutron-linuxbridge-agent.service \ openstack-neutron-dhcp-agent.service \ openstack-neutron-metadata-agent.service # systemctl start openstack-neutron.service \ openstack-neutron-linuxbridge-agent.service \ openstack-neutron-dhcp-agent.service \ openstack-neutron-metadata-agent.service .. end For networking option 2, also enable and start the layer-3 service: .. code-block:: console # systemctl enable openstack-neutron-l3-agent.service # systemctl start openstack-neutron-l3-agent.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option1-obs.rst0000644000175000017500000002013700000000000027412 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # zypper install --no-recommends openstack-neutron \ openstack-neutron-server openstack-neutron-linuxbridge-agent \ openstack-neutron-dhcp-agent openstack-neutron-metadata-agent \ bridge-utils .. end Configure the server component ------------------------------ The Networking server component configuration includes the database, authentication mechanism, message queue, topology change notifications, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in and disable additional plug-ins: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat and VLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan .. end * In the ``[ml2]`` section, disable self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = .. end * In the ``[ml2]`` section, enable the Linux bridge mechanism: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-obs` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Create the provider network --------------------------- Follow `this provider network document `_ from the General Installation Guide. Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option1-rdo.rst0000644000175000017500000001776300000000000027426 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # yum install openstack-neutron openstack-neutron-ml2 \ openstack-neutron-linuxbridge ebtables .. end Configure the server component ------------------------------ The Networking server component configuration includes the database, authentication mechanism, message queue, topology change notifications, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in and disable additional plug-ins: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat and VLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan .. end * In the ``[ml2]`` section, disable self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = .. end * In the ``[ml2]`` section, enable the Linux bridge mechanism: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-rdo` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Create the provider network --------------------------- Follow `this provider network document `_ from the General Installation Guide. Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option1-ubuntu.rst0000644000175000017500000002002400000000000030144 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # apt install neutron-server neutron-plugin-ml2 \ neutron-linuxbridge-agent neutron-dhcp-agent \ neutron-metadata-agent .. end Configure the server component ------------------------------ The Networking server component configuration includes the database, authentication mechanism, message queue, topology change notifications, and plug-in. .. include:: shared/note_configuration_vary_by_distribution.rst * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in and disable additional plug-ins: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat and VLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan .. end * In the ``[ml2]`` section, disable self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = .. end * In the ``[ml2]`` section, enable the Linux bridge mechanism: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-ubuntu` for more information. * In the ``[vxlan]`` section, disable VXLAN overlay networks: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = false .. end * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Create the provider network --------------------------- Follow `this provider network document `_ from the General Installation Guide. Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option2-obs.rst0000644000175000017500000002235100000000000027413 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # zypper install --no-recommends openstack-neutron \ openstack-neutron-server openstack-neutron-linuxbridge-agent \ openstack-neutron-l3-agent openstack-neutron-dhcp-agent \ openstack-neutron-metadata-agent bridge-utils .. end Configure the server component ------------------------------ * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in, router service, and overlapping IP addresses: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = router allow_overlapping_ips = true .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan,vxlan .. end * In the ``[ml2]`` section, enable VXLAN self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = vxlan .. end * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population mechanisms: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge,l2population .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. .. note:: The Linux bridge agent only supports VXLAN overlay networks. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier range for self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_vxlan] # ... vni_ranges = 1:1000 .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-obs` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the controller node. See :doc:`environment-networking-obs` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the layer-3 agent --------------------------- The Layer-3 (L3) agent provides routing and NAT services for self-service virtual networks. * Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver: .. path /etc/neutron/l3_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge .. end Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option2-rdo.rst0000644000175000017500000002214200000000000027412 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # yum install openstack-neutron openstack-neutron-ml2 \ openstack-neutron-linuxbridge ebtables .. end Configure the server component ------------------------------ * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in, router service, and overlapping IP addresses: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = router allow_overlapping_ips = true .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan,vxlan .. end * In the ``[ml2]`` section, enable VXLAN self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = vxlan .. end * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population mechanisms: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge,l2population .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. .. note:: The Linux bridge agent only supports VXLAN overlay networks. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier range for self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_vxlan] # ... vni_ranges = 1:1000 .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-rdo` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the controller node. See :doc:`environment-networking-rdo` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the layer-3 agent --------------------------- The Layer-3 (L3) agent provides routing and NAT services for self-service virtual networks. * Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver: .. path /etc/neutron/l3_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge .. end Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-option2-ubuntu.rst0000644000175000017500000002222700000000000030154 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Install and configure the Networking components on the *controller* node. Install the components ---------------------- .. code-block:: console # apt install neutron-server neutron-plugin-ml2 \ neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent \ neutron-metadata-agent .. end Configure the server component ------------------------------ * Edit the ``/etc/neutron/neutron.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/neutron/neutron.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron .. end Replace ``NEUTRON_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[DEFAULT]`` section, enable the Modular Layer 2 (ML2) plug-in, router service, and overlapping IP addresses: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... core_plugin = ml2 service_plugins = router allow_overlapping_ips = true .. end * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in RabbitMQ. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = NEUTRON_PASS .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` and ``[nova]`` sections, configure Networking to notify Compute of network topology changes: .. path /etc/neutron/neutron.conf .. code-block:: ini [DEFAULT] # ... notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [nova] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = NOVA_PASS .. end Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/neutron/neutron.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/neutron/tmp .. end Configure the Modular Layer 2 (ML2) plug-in ------------------------------------------- The ML2 plug-in uses the Linux bridge mechanism to build layer-2 (bridging and switching) virtual networking infrastructure for instances. * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and complete the following actions: * In the ``[ml2]`` section, enable flat, VLAN, and VXLAN networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... type_drivers = flat,vlan,vxlan .. end * In the ``[ml2]`` section, enable VXLAN self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... tenant_network_types = vxlan .. end * In the ``[ml2]`` section, enable the Linux bridge and layer-2 population mechanisms: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... mechanism_drivers = linuxbridge,l2population .. end .. warning:: After you configure the ML2 plug-in, removing values in the ``type_drivers`` option can lead to database inconsistency. .. note:: The Linux bridge agent only supports VXLAN overlay networks. * In the ``[ml2]`` section, enable the port security extension driver: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2] # ... extension_drivers = port_security .. end * In the ``[ml2_type_flat]`` section, configure the provider virtual network as a flat network: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_flat] # ... flat_networks = provider .. end * In the ``[ml2_type_vxlan]`` section, configure the VXLAN network identifier range for self-service networks: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [ml2_type_vxlan] # ... vni_ranges = 1:1000 .. end * In the ``[securitygroup]`` section, enable ipset to increase efficiency of security group rules: .. path /etc/neutron/plugins/ml2/ml2_conf.ini .. code-block:: ini [securitygroup] # ... enable_ipset = true .. end Configure the Linux bridge agent -------------------------------- The Linux bridge agent builds layer-2 (bridging and switching) virtual networking infrastructure for instances and handles security groups. * Edit the ``/etc/neutron/plugins/ml2/linuxbridge_agent.ini`` file and complete the following actions: * In the ``[linux_bridge]`` section, map the provider virtual network to the provider physical network interface: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME .. end Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying provider physical network interface. See :doc:`environment-networking-ubuntu` for more information. * In the ``[vxlan]`` section, enable VXLAN overlay networks, configure the IP address of the physical network interface that handles overlay networks, and enable layer-2 population: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [vxlan] enable_vxlan = true local_ip = OVERLAY_INTERFACE_IP_ADDRESS l2_population = true .. end Replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the IP address of the underlying physical network interface that handles overlay networks. The example architecture uses the management interface to tunnel traffic to the other nodes. Therefore, replace ``OVERLAY_INTERFACE_IP_ADDRESS`` with the management IP address of the controller node. See :doc:`environment-networking-ubuntu` for more information. * In the ``[securitygroup]`` section, enable security groups and configure the Linux bridge iptables firewall driver: .. path /etc/neutron/plugins/ml2/linuxbridge_agent.ini .. code-block:: ini [securitygroup] # ... enable_security_group = true firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver .. end * Ensure your Linux operating system kernel supports network bridge filters by verifying all the following ``sysctl`` values are set to ``1``: .. code-block:: ini net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables .. end To enable networking bridge support, typically the ``br_netfilter`` kernel module needs to be loaded. Check your operating system's documentation for additional details on enabling this module. Configure the layer-3 agent --------------------------- The Layer-3 (L3) agent provides routing and NAT services for self-service virtual networks. * Edit the ``/etc/neutron/l3_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver: .. path /etc/neutron/l3_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge .. end Configure the DHCP agent ------------------------ The DHCP agent provides DHCP services for virtual networks. * Edit the ``/etc/neutron/dhcp_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the Linux bridge interface driver, Dnsmasq DHCP driver, and enable isolated metadata so instances on provider networks can access metadata over the network: .. path /etc/neutron/dhcp_agent.ini .. code-block:: ini [DEFAULT] # ... interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true .. end Return to *Networking controller node configuration*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-rdo.rst0000644000175000017500000002575500000000000026037 0ustar00coreycorey00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you configure the OpenStack Networking (neutron) service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``neutron`` database: .. code-block:: console MariaDB [(none)] CREATE DATABASE neutron; .. end * Grant proper access to the ``neutron`` database, replacing ``NEUTRON_DBPASS`` with a suitable password: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \ IDENTIFIED BY 'NEUTRON_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \ IDENTIFIED BY 'NEUTRON_DBPASS'; .. end * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``neutron`` user: .. code-block:: console $ openstack user create --domain default --password-prompt neutron User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fdb0f541e28141719b6a43c8944bf1fb | | name | neutron | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``neutron`` user: .. code-block:: console $ openstack role add --project service --user neutron admin .. end .. note:: This command provides no output. * Create the ``neutron`` service entity: .. code-block:: console $ openstack service create --name neutron \ --description "OpenStack Networking" network +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Networking | | enabled | True | | id | f71529314dab4a4d8eca427e701d209e | | name | neutron | | type | network | +-------------+----------------------------------+ .. end #. Create the Networking service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ network public http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 85d80a6d02fc4b7683f611d7fc1493a3 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network internal http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 09753b537ac74422a68d2d791cf3714f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network admin http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 1ee14289c9374dffb5db92a5c112fc4e | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ .. end Configure networking options ---------------------------- You can deploy the Networking service using one of two architectures represented by options 1 and 2. Option 1 deploys the simplest possible architecture that only supports attaching instances to provider (external) networks. No self-service (private) networks, routers, or floating IP addresses. Only the ``admin`` or other privileged user can manage provider networks. Option 2 augments option 1 with layer-3 services that support attaching instances to self-service networks. The ``demo`` or other unprivileged user can manage self-service networks including routers that provide connectivity between self-service and provider networks. Additionally, floating IP addresses provide connectivity to instances using self-service networks from external networks such as the Internet. Self-service networks typically use overlay networks. Overlay network protocols such as VXLAN include additional headers that increase overhead and decrease space available for the payload or user data. Without knowledge of the virtual network infrastructure, instances attempt to send packets using the default Ethernet maximum transmission unit (MTU) of 1500 bytes. The Networking service automatically provides the correct MTU value to instances via DHCP. However, some cloud images do not use DHCP or ignore the DHCP MTU option and require configuration using metadata or a script. .. note:: Option 2 also supports attaching instances to provider networks. Choose one of the following networking options to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-controller-metadata-agent-rdo`. .. toctree:: :maxdepth: 1 controller-install-option1-rdo.rst controller-install-option2-rdo.rst .. _neutron-controller-metadata-agent-rdo: Configure the metadata agent ---------------------------- The metadata agent provides configuration information such as credentials to instances. * Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the metadata host and shared secret: .. path /etc/neutron/metadata_agent.ini .. code-block:: ini [DEFAULT] # ... nova_metadata_host = controller metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy. Configure the Compute service to use the Networking service ----------------------------------------------------------- .. note:: The Nova compute service must be installed to complete this step. For more details see the compute install guide found under the `Installation Guides` section of the `docs website `_. * Edit the ``/etc/nova/nova.conf`` file and perform the following actions: * In the ``[neutron]`` section, configure access parameters, enable the metadata proxy, and configure the secret: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS service_metadata_proxy = true metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. Replace ``METADATA_SECRET`` with the secret you chose for the metadata proxy. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- #. The Networking service initialization scripts expect a symbolic link ``/etc/neutron/plugin.ini`` pointing to the ML2 plug-in configuration file, ``/etc/neutron/plugins/ml2/ml2_conf.ini``. If this symbolic link does not exist, create it using the following command: .. code-block:: console # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini .. end #. Populate the database: .. code-block:: console # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron .. end .. note:: Database population occurs later for Networking because the script requires complete server and plug-in configuration files. #. Restart the Compute API service: .. code-block:: console # systemctl restart openstack-nova-api.service .. end #. Start the Networking services and configure them to start when the system boots. For both networking options: .. code-block:: console # systemctl enable neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service # systemctl start neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service .. end For networking option 2, also enable and start the layer-3 service: .. code-block:: console # systemctl enable neutron-l3-agent.service # systemctl start neutron-l3-agent.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/controller-install-ubuntu.rst0000644000175000017500000002453100000000000026564 0ustar00coreycorey00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you configure the OpenStack Networking (neutron) service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``neutron`` database: .. code-block:: console MariaDB [(none)] CREATE DATABASE neutron; .. end * Grant proper access to the ``neutron`` database, replacing ``NEUTRON_DBPASS`` with a suitable password: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \ IDENTIFIED BY 'NEUTRON_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \ IDENTIFIED BY 'NEUTRON_DBPASS'; .. end * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``neutron`` user: .. code-block:: console $ openstack user create --domain default --password-prompt neutron User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | fdb0f541e28141719b6a43c8944bf1fb | | name | neutron | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``neutron`` user: .. code-block:: console $ openstack role add --project service --user neutron admin .. end .. note:: This command provides no output. * Create the ``neutron`` service entity: .. code-block:: console $ openstack service create --name neutron \ --description "OpenStack Networking" network +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Networking | | enabled | True | | id | f71529314dab4a4d8eca427e701d209e | | name | neutron | | type | network | +-------------+----------------------------------+ .. end #. Create the Networking service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ network public http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 85d80a6d02fc4b7683f611d7fc1493a3 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network internal http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 09753b537ac74422a68d2d791cf3714f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ network admin http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 1ee14289c9374dffb5db92a5c112fc4e | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | f71529314dab4a4d8eca427e701d209e | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ .. end Configure networking options ---------------------------- You can deploy the Networking service using one of two architectures represented by options 1 and 2. Option 1 deploys the simplest possible architecture that only supports attaching instances to provider (external) networks. No self-service (private) networks, routers, or floating IP addresses. Only the ``admin`` or other privileged user can manage provider networks. Option 2 augments option 1 with layer-3 services that support attaching instances to self-service networks. The ``demo`` or other unprivileged user can manage self-service networks including routers that provide connectivity between self-service and provider networks. Additionally, floating IP addresses provide connectivity to instances using self-service networks from external networks such as the Internet. Self-service networks typically use overlay networks. Overlay network protocols such as VXLAN include additional headers that increase overhead and decrease space available for the payload or user data. Without knowledge of the virtual network infrastructure, instances attempt to send packets using the default Ethernet maximum transmission unit (MTU) of 1500 bytes. The Networking service automatically provides the correct MTU value to instances via DHCP. However, some cloud images do not use DHCP or ignore the DHCP MTU option and require configuration using metadata or a script. .. note:: Option 2 also supports attaching instances to provider networks. Choose one of the following networking options to configure services specific to it. Afterwards, return here and proceed to :ref:`neutron-controller-metadata-agent-ubuntu`. .. toctree:: :maxdepth: 1 controller-install-option1-ubuntu.rst controller-install-option2-ubuntu.rst .. _neutron-controller-metadata-agent-ubuntu: Configure the metadata agent ---------------------------- The metadata agent provides configuration information such as credentials to instances. * Edit the ``/etc/neutron/metadata_agent.ini`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the metadata host and shared secret: .. path /etc/neutron/metadata_agent.ini .. code-block:: ini [DEFAULT] # ... nova_metadata_host = controller metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``METADATA_SECRET`` with a suitable secret for the metadata proxy. Configure the Compute service to use the Networking service ----------------------------------------------------------- .. note:: The Nova compute service must be installed to complete this step. For more details see the compute install guide found under the `Installation Guides` section of the `docs website `_. * Edit the ``/etc/nova/nova.conf`` file and perform the following actions: * In the ``[neutron]`` section, configure access parameters, enable the metadata proxy, and configure the secret: .. path /etc/nova/nova.conf .. code-block:: ini [neutron] # ... auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS service_metadata_proxy = true metadata_proxy_shared_secret = METADATA_SECRET .. end Replace ``NEUTRON_PASS`` with the password you chose for the ``neutron`` user in the Identity service. Replace ``METADATA_SECRET`` with the secret you chose for the metadata proxy. See the :nova-doc:`compute service configuration guide ` for the full set of options including overriding the service catalog endpoint URL if necessary. Finalize installation --------------------- #. Populate the database: .. code-block:: console # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron .. end .. note:: Database population occurs later for Networking because the script requires complete server and plug-in configuration files. #. Restart the Compute API service: .. code-block:: console # service nova-api restart .. end #. Restart the Networking services. For both networking options: .. code-block:: console # service neutron-server restart # service neutron-linuxbridge-agent restart # service neutron-dhcp-agent restart # service neutron-metadata-agent restart .. end For networking option 2, also restart the layer-3 service: .. code-block:: console # service neutron-l3-agent restart .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-compute-obs.rst0000644000175000017500000000177200000000000030403 0ustar00coreycorey00000000000000Compute node ~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.31 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 .. note:: Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on. #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to contain the following: .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME .. code-block:: bash STARTMODE='auto' BOOTPROTO='static' .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``compute1``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-compute-rdo.rst0000644000175000017500000000214500000000000030377 0ustar00coreycorey00000000000000Compute node ~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.31 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 .. note:: Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on. #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file to contain the following: Do not change the ``HWADDR`` and ``UUID`` keys. .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME .. code-block:: bash DEVICE=INTERFACE_NAME TYPE=Ethernet ONBOOT="yes" BOOTPROTO="none" .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``compute1``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-compute-ubuntu.rst0000644000175000017500000000212000000000000031126 0ustar00coreycorey00000000000000Compute node ~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.31 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 .. note:: Additional compute nodes should use 10.0.0.32, 10.0.0.33, and so on. #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/network/interfaces`` file to contain the following: .. path /etc/network/interfaces .. code-block:: bash # The provider network interface auto INTERFACE_NAME iface INTERFACE_NAME inet manual up ip link set dev $IFACE up down ip link set dev $IFACE down .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``compute1``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-controller-obs.rst0000644000175000017500000000164700000000000031113 0ustar00coreycorey00000000000000Controller node ~~~~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.11 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/sysconfig/network/ifcfg-INTERFACE_NAME`` file to contain the following: .. path /etc/sysconfig/network/ifcfg-INTERFACE_NAME .. code-block:: ini STARTMODE='auto' BOOTPROTO='static' .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``controller``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-controller-rdo.rst0000644000175000017500000000202200000000000031100 0ustar00coreycorey00000000000000Controller node ~~~~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.11 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME`` file to contain the following: Do not change the ``HWADDR`` and ``UUID`` keys. .. path /etc/sysconfig/network-scripts/ifcfg-INTERFACE_NAME .. code-block:: ini DEVICE=INTERFACE_NAME TYPE=Ethernet ONBOOT="yes" BOOTPROTO="none" .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``controller``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-controller-ubuntu.rst0000644000175000017500000000177500000000000031654 0ustar00coreycorey00000000000000Controller node ~~~~~~~~~~~~~~~ Configure network interfaces ---------------------------- #. Configure the first interface as the management interface: IP address: 10.0.0.11 Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 #. The provider interface uses a special configuration without an IP address assigned to it. Configure the second interface as the provider interface: Replace ``INTERFACE_NAME`` with the actual interface name. For example, *eth1* or *ens224*. * Edit the ``/etc/network/interfaces`` file to contain the following: .. path /etc/network/interfaces .. code-block:: bash # The provider network interface auto INTERFACE_NAME iface INTERFACE_NAME inet manual up ip link set dev $IFACE up down ip link set dev $IFACE down .. end #. Reboot the system to activate the changes. Configure name resolution ------------------------- #. Set the hostname of the node to ``controller``. #. .. include:: shared/edit_hosts_file.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-obs.rst0000644000175000017500000000733700000000000026734 0ustar00coreycorey00000000000000Host networking ~~~~~~~~~~~~~~~ After installing the operating system on each node for the architecture that you choose to deploy, you must configure the network interfaces. We recommend that you disable any automated network management tools and manually edit the appropriate configuration files for your distribution. For more information on how to configure networking on your distribution, see the `SLES 12 `__ or `openSUSE `__ documentation. All nodes require Internet access for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). In most cases, nodes should obtain Internet access through the management network interface. To highlight the importance of network separation, the example architectures use `private address space `__ for the management network and assume that the physical network infrastructure provides Internet access via Network Address Translation (NAT) or other methods. The example architectures use routable IP address space for the provider (external) network and assume that the physical network infrastructure provides direct Internet access. In the provider networks architecture, all instances attach directly to the provider network. In the self-service (private) networks architecture, instances can attach to a self-service or provider network. Self-service networks can reside entirely within OpenStack or provide some level of external network access using Network Address Translation (NAT) through the provider network. .. _figure-networklayout: .. figure:: figures/networklayout.png :alt: Network layout The example architectures assume use of the following networks: * Management on 10.0.0.0/24 with gateway 10.0.0.1 This network requires a gateway to provide Internet access to all nodes for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). * Provider on 203.0.113.0/24 with gateway 203.0.113.1 This network requires a gateway to provide Internet access to instances in your OpenStack environment. You can modify these ranges and gateways to work with your particular network infrastructure. Network interface names vary by distribution. Traditionally, interfaces use ``eth`` followed by a sequential number. To cover all variations, this guide refers to the first interface as the interface with the lowest number and the second interface as the interface with the highest number. Unless you intend to use the exact configuration provided in this example architecture, you must modify the networks in this procedure to match your environment. Each node must resolve the other nodes by name in addition to IP address. For example, the ``controller`` name must resolve to ``10.0.0.11``, the IP address of the management interface on the controller node. .. warning:: Reconfiguring network interfaces will interrupt network connectivity. We recommend using a local terminal session for these procedures. .. note:: Your distribution enables a restrictive firewall by default. During the installation process, certain steps will fail unless you alter or disable the firewall. For more information about securing your environment, refer to the `OpenStack Security Guide `_. .. toctree:: :maxdepth: 1 environment-networking-controller-obs.rst environment-networking-compute-obs.rst environment-networking-storage-cinder.rst environment-networking-verify-obs.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-rdo.rst0000644000175000017500000000720500000000000026727 0ustar00coreycorey00000000000000Host networking ~~~~~~~~~~~~~~~ After installing the operating system on each node for the architecture that you choose to deploy, you must configure the network interfaces. We recommend that you disable any automated network management tools and manually edit the appropriate configuration files for your distribution. For more information on how to configure networking on your distribution, see the `documentation `__ . All nodes require Internet access for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). In most cases, nodes should obtain Internet access through the management network interface. To highlight the importance of network separation, the example architectures use `private address space `__ for the management network and assume that the physical network infrastructure provides Internet access via Network Address Translation (NAT) or other methods. The example architectures use routable IP address space for the provider (external) network and assume that the physical network infrastructure provides direct Internet access. In the provider networks architecture, all instances attach directly to the provider network. In the self-service (private) networks architecture, instances can attach to a self-service or provider network. Self-service networks can reside entirely within OpenStack or provide some level of external network access using Network Address Translation (NAT) through the provider network. .. _figure-networklayout: .. figure:: figures/networklayout.png :alt: Network layout The example architectures assume use of the following networks: * Management on 10.0.0.0/24 with gateway 10.0.0.1 This network requires a gateway to provide Internet access to all nodes for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). * Provider on 203.0.113.0/24 with gateway 203.0.113.1 This network requires a gateway to provide Internet access to instances in your OpenStack environment. You can modify these ranges and gateways to work with your particular network infrastructure. Network interface names vary by distribution. Traditionally, interfaces use ``eth`` followed by a sequential number. To cover all variations, this guide refers to the first interface as the interface with the lowest number and the second interface as the interface with the highest number. Unless you intend to use the exact configuration provided in this example architecture, you must modify the networks in this procedure to match your environment. Each node must resolve the other nodes by name in addition to IP address. For example, the ``controller`` name must resolve to ``10.0.0.11``, the IP address of the management interface on the controller node. .. warning:: Reconfiguring network interfaces will interrupt network connectivity. We recommend using a local terminal session for these procedures. .. note:: Your distribution enables a restrictive firewall by default. During the installation process, certain steps will fail unless you alter or disable the firewall. For more information about securing your environment, refer to the `OpenStack Security Guide `_. .. toctree:: :maxdepth: 1 environment-networking-controller-rdo.rst environment-networking-compute-rdo.rst environment-networking-storage-cinder.rst environment-networking-verify-rdo.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-storage-cinder.rst0000644000175000017500000000105300000000000031044 0ustar00coreycorey00000000000000Block storage node (Optional) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you want to deploy the Block Storage service, configure one additional storage node. Configure network interfaces ---------------------------- * Configure the management interface: * IP address: ``10.0.0.41`` * Network mask: ``255.255.255.0`` (or ``/24``) * Default gateway: ``10.0.0.1`` Configure name resolution ------------------------- #. Set the hostname of the node to ``block1``. #. .. include:: shared/edit_hosts_file.txt #. Reboot the system to activate the changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-ubuntu.rst0000644000175000017500000000676400000000000027476 0ustar00coreycorey00000000000000Host networking ~~~~~~~~~~~~~~~ After installing the operating system on each node for the architecture that you choose to deploy, you must configure the network interfaces. We recommend that you disable any automated network management tools and manually edit the appropriate configuration files for your distribution. For more information on how to configure networking on your distribution, see the `documentation `_. All nodes require Internet access for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). In most cases, nodes should obtain Internet access through the management network interface. To highlight the importance of network separation, the example architectures use `private address space `__ for the management network and assume that the physical network infrastructure provides Internet access via Network Address Translation (NAT) or other methods. The example architectures use routable IP address space for the provider (external) network and assume that the physical network infrastructure provides direct Internet access. In the provider networks architecture, all instances attach directly to the provider network. In the self-service (private) networks architecture, instances can attach to a self-service or provider network. Self-service networks can reside entirely within OpenStack or provide some level of external network access using Network Address Translation (NAT) through the provider network. .. _figure-networklayout: .. figure:: figures/networklayout.png :alt: Network layout The example architectures assume use of the following networks: * Management on 10.0.0.0/24 with gateway 10.0.0.1 This network requires a gateway to provide Internet access to all nodes for administrative purposes such as package installation, security updates, Domain Name System (DNS), and Network Time Protocol (NTP). * Provider on 203.0.113.0/24 with gateway 203.0.113.1 This network requires a gateway to provide Internet access to instances in your OpenStack environment. You can modify these ranges and gateways to work with your particular network infrastructure. Network interface names vary by distribution. Traditionally, interfaces use ``eth`` followed by a sequential number. To cover all variations, this guide refers to the first interface as the interface with the lowest number and the second interface as the interface with the highest number. Unless you intend to use the exact configuration provided in this example architecture, you must modify the networks in this procedure to match your environment. Each node must resolve the other nodes by name in addition to IP address. For example, the ``controller`` name must resolve to ``10.0.0.11``, the IP address of the management interface on the controller node. .. warning:: Reconfiguring network interfaces will interrupt network connectivity. We recommend using a local terminal session for these procedures. .. note:: Your distribution does not enable a restrictive firewall by default. For more information about securing your environment, refer to the `OpenStack Security Guide `_. .. toctree:: :maxdepth: 1 environment-networking-controller-ubuntu.rst environment-networking-compute-ubuntu.rst environment-networking-storage-cinder.rst environment-networking-verify-ubuntu.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-verify-obs.rst0000644000175000017500000000611100000000000030223 0ustar00coreycorey00000000000000Verify connectivity ------------------- We recommend that you verify network connectivity to the Internet and among the nodes before proceeding further. #. From the *controller* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *controller* node, test access to the management interface on the *compute* node: .. code-block:: console # ping -c 4 compute1 PING compute1 (10.0.0.31) 56(84) bytes of data. 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms --- compute1 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end #. From the *compute* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *compute* node, test access to the management interface on the *controller* node: .. code-block:: console # ping -c 4 controller PING controller (10.0.0.11) 56(84) bytes of data. 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms --- controller ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end .. note:: Your distribution enables a restrictive firewall by default. During the installation process, certain steps will fail unless you alter or disable the firewall. For more information about securing your environment, refer to the `OpenStack Security Guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-verify-rdo.rst0000644000175000017500000000611100000000000030224 0ustar00coreycorey00000000000000Verify connectivity ------------------- We recommend that you verify network connectivity to the Internet and among the nodes before proceeding further. #. From the *controller* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *controller* node, test access to the management interface on the *compute* node: .. code-block:: console # ping -c 4 compute1 PING compute1 (10.0.0.31) 56(84) bytes of data. 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms --- compute1 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end #. From the *compute* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *compute* node, test access to the management interface on the *controller* node: .. code-block:: console # ping -c 4 controller PING controller (10.0.0.11) 56(84) bytes of data. 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms --- controller ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end .. note:: Your distribution enables a restrictive firewall by default. During the installation process, certain steps will fail unless you alter or disable the firewall. For more information about securing your environment, refer to the `OpenStack Security Guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/environment-networking-verify-ubuntu.rst0000644000175000017500000000575200000000000030774 0ustar00coreycorey00000000000000Verify connectivity ------------------- We recommend that you verify network connectivity to the Internet and among the nodes before proceeding further. #. From the *controller* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *controller* node, test access to the management interface on the *compute* node: .. code-block:: console # ping -c 4 compute1 PING compute1 (10.0.0.31) 56(84) bytes of data. 64 bytes from compute1 (10.0.0.31): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from compute1 (10.0.0.31): icmp_seq=4 ttl=64 time=0.202 ms --- compute1 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end #. From the *compute* node, test access to the Internet: .. code-block:: console # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms 64 bytes from 174.143.194.225: icmp_seq=2 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=3 ttl=54 time=17.5 ms 64 bytes from 174.143.194.225: icmp_seq=4 ttl=54 time=17.4 ms --- openstack.org ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3022ms rtt min/avg/max/mdev = 17.489/17.715/18.346/0.364 ms .. end #. From the *compute* node, test access to the management interface on the *controller* node: .. code-block:: console # ping -c 4 controller PING controller (10.0.0.11) 56(84) bytes of data. 64 bytes from controller (10.0.0.11): icmp_seq=1 ttl=64 time=0.263 ms 64 bytes from controller (10.0.0.11): icmp_seq=2 ttl=64 time=0.202 ms 64 bytes from controller (10.0.0.11): icmp_seq=3 ttl=64 time=0.203 ms 64 bytes from controller (10.0.0.11): icmp_seq=4 ttl=64 time=0.202 ms --- controller ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms .. end .. note:: Your distribution does not enable a restrictive firewall by default. For more information about securing your environment, refer to the `OpenStack Security Guide `_. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1710434 neutron-16.0.0.0b2.dev214/doc/source/install/figures/0000755000175000017500000000000000000000000022322 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/hwreqs.graffle0000644000175000017500000000767200000000000025177 0ustar00coreycorey00000000000000[W۸ǟ˧G%Y0m 30gX,(SNw?@RHI -֯_z~JG/ ~yN 7l{A~T}ݳN}čw;Gvfslqrt:k9/ټ6p^b8hDa_Gl.3:{!;msW϶>Wn]#FGA[j&A/uWGVsqN |?:4EN _Ax(c:r:_˭$gMmPgBڊLS0A6K,cl/y __4!k! I[e,%9'e;2k7,'9Sã{se+|v<1}@ a qu&L2-Rt~Z4?۾ *y<֭)vW&TIW囔nRҠKW%wI¨Ǎ?8Nyܴ\ǯ\*nKc|<_+he۴Rn*%9̃DZjyvTEҲ4)Dq aDJl_rNڜMM8©m3ܠɘbT۪ Zd$/k2 0脳ԥ .r%Mtg$ Q}qS q_-'ltϻv4Zh]lSXYrgqkZ͖g9eu<߿ᕟnKJr"Ir}Ӕlγ&eTR̥䅔G_K^V;:jee.=`AQvt5Stڲf@GN R6Sj>gK<&fm<;q];wC7tC KyT!;x^:@ǤQ<=HyLnu|}O2H}wyEHM䍁'o$ovjyj"Hj7ISiIr^/ x9g7": <&^wT6k49h_`ɒ6ЉH* 6<Rx{[e.p.;fcPͼ{X4Ra y7gqP c 60`XND"gMCm^e );UeLŵOY6 pCdy3hr !#9B!+Rx+RTF7-|*%1X$)8"\ ֔oĬB*JmX!M֧HL)bAJů"~惬q*djBVn!3RWfU*RRRIvϓUMRVéjFIs!" 2miTTWDԏ-߫Vȫ^ī,I%8J%z5Jr5Vڝ10uviPnK"MpQWQWq-iQru(psXqow~G?^^8;c-> 6}w_ߞ#݉S4n0;%pD3$.!p̋JWΠRNx#mۍy4q 8L.>u{ ~{mޑ@o{J"-ɪ^;48DNTPih4 |V)4u 18f\1sO˯pR74\bG1Ou \YfLLGq 2Jg[ʚB_jRNiJQzMyV{闅h[TU>4WHga././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/hwreqs.png0000644000175000017500000026132200000000000024347 0ustar00coreycorey00000000000000PNG  IHDRvFNsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]E֮sTL gTs8쬎,άb8|FTQ HJ%g6LgvfvR{^U#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@( #A*ܕCv˖W ތ#0@D(f&mX&,O8H2.q60ZDP@D;0@XbF`@@Q Ey E,[oMSܕhV2Gb@07s\M?^WꝹT2e# e_FH2sVq]_[oXv?wc) i,f5Bh 5;hczF><\a=fδSٳg[oxxwoG􋗮HtYnR.v:ggE t>}w#m3`nNe4M E;+HwKJ;R~0!g2#kpFYr(D>Q^M6ʥ.G=ky=#ӅìekǾ)OZLGb!,ߕ '^S4mGނf#n[E(8^878oU:x ~[}c)jU Ct\BbRZ\p ]C9@K<\p*B;^S'N,өY%sauy驺.ߺe̺dY?U|/ҟS |("z 5&ؑ"s<н eڢئ(=fb2GTz-&q7^'M=Vռ"gn J. d?'-6' 9O~\ALcdfOs"%۳MӄrIOԣ_a 0'7yBރnESRf:'*i0^c]Mj P 7#AKkW޷سf8o"(>+caa%|>'=aSzk`ԕ<Eb+--' ˎ`Ggǘs`LG/Ҕ\Mt>ݣy *Dn{U{.|XQpRЫ{bT:}{ 2&(Y$,-џ2PIiFEcm|oVH,s ˫(Pt'*wpT(ɓq_2.uNJ +촪(Cul"VKgф.oAcBU-x|Iv}BNq1y[=|á'7PS<,65^A;J^M K YHzYizP0h16×J j1^S Tߠ>#zuV@9+ j/zy>nl?IRiVIS5$LS[5]v-wŎ'qy7gnZmϷ 0rۃU364@e0)BgS4gC3R}1,3s'œ2*t^Q-,t!t4~g9XIq@J[+Ii8FZiv.pN ùP>@qfOzlaeFu>DlV|dabPWƧׂBE|C`I9Od ݫF%B ]3i,^( fC /vI +1ԢX.$'96J%D&)@]ӴkL~OMXB=T?q.WEyڨ$w޹N"5Z”d"Y_/#"Шct"2: :CcwKu b7@i QPzF(gwe:,C|6^-=k^$SIIgXsAVEEK4U5@Q61'R>}GMwNØ.w޵jq ~YQvJ :"ԨB h%mGOOVB9EX$O"_z<yBvk00ѠDR=|+]̸Y۬=G}+vL8|}X0#`lza2#-+gE/R?km:Xoh,r2wY.b>h}Yޓ PoTA2}rlRxɓbD"ʹc@ǂHI Hv0P8Mt-G"ܼ1jO:e#OVp`5:xI>պH·>t^{-h~N+<@?x +C'=1GCC ,`,9*> i$^܍g^dmH'E:'z`~T1ԋ[`1+7Oyd$R\~0 9r@J{zu|deJ6h5>]7RJY^vU`E|2uC54ȞfP+aWś,ocֈjq&ٶ(9m #wH~ˣ?>O2[y &&GS,7rG(i F$|L_+hc$Igewf+ŢlBՏY!$`%94*#5wcmݓ RL7U4o{ĥߒ*|xE*J 7&3?N`"#rd|-#lyJL7QhjLaJt=Bs:b{as2%Ok)]U.(g}ffJV}kBn[oFm=m-͆c1҇ 揽6WL4 vH?<߃GJ")^J7vf0L| Rٱ֣:0`E92>hTvN&V}c0+/gU@a1Pi>p* ưX)JٶGglFl٬_cy-ӬkC`3 v,)_Cv3-}Pğž&?]!`n Β0;3hc#c0p PLh@Hk,c!rʠ#`Ք*+{tL#ś;_@5ә:Jiyp)Mke}fU9E0︍f2d3G$Y(mj+fڴ2_ynچM>'rC`C93lY'jLvcYEu>Bmг̽p ӓsoW;/wCIWSyp=88uMp$ðOYQt䯡޳#(+o˕CT.(r hZ%ā#ʬ{$Bm6GxŰ9>}bZi7L^̊*3i(8OC/:2dTʐ>š]<$ww$=O`@ mcon뉵8i )&נGRn(,c[}޷q91p!8xݳo} .np[|^g?> Jsxw﬊Sx3$,+0h ;#G7IFj#/lE/pF]섣Z@θ??0K gTw<1'``T)lʣ F irҠ#@\(HB[i@;Jϝ|q䭶(^YK}_;|yÄ@;X֋Xڻ SQ~d,; |f=;SJ%w~-6=pĤ;_`H j;ݢFxw$ܛZA.a8y-_doӱfg*΃}KYS ?-L "lO,ϼaۻ߂w`wr]1oxpo(HK'dM\zű :Իq{ 2 FAjSN.uLn"eF i`0ˎ`trf8oߒ2>Ǟ{fuƓϒO.嫛_T~fӺ)K?لP,¶!}7i{#7һMa<߁P/ vW)=ZJu3s&aڐ?ɓ]I e=]ŧz E%%GJ ܑ))3cA,Hd/lv!~/[,M4ig|dr,Fi pʹimJ M.TtxPK]E,Fk d*(0M܂~u]O}sD`5RGЗeEѮWOE E5U<(|d1MB^rb;MUXNS4ӘH6fܘvF  (cvy򽺴B2;o[cXINNSDCw<_D!IE9YNS2Hv)e`D`6- _ZZG'g\MSާN"[b "8F"FJs!6ir+vR)T؂KNVYTcm`Fܳ)g'Y$'eFYdn!/g%9Yt@nP_U'OI/:i! Q:{ͫ=2w#DyMd*ɔmReB_RgAe #`@xɑ[]nYN$Hޅ,LɬU͑\fF ^(pq61B#'S`95VN `\RF sHXQυ&Or)cAtJ",Ij%AMmv $ڵM*aENܣiD@sG$ˤr4(9fY2UO/KF !p % +H7̛' ˑIƃ4ɀrreTMsc y˥f6ډK>{7W&R3G2EDr4r4(9!FsCɗQ+…~F Al#dDŠ2 b_p|F!"y"Jc9M#s / +#$=Hu&('J@Op+yt-XFp8H:JMX^*|Jʯi~)A)kť„m>f"pV.[o23/N;L(tZYە3'}95;a1deCtd9={l[NEM6^47orc NnR"K*HmQH=ﰇkMpEc*LMݮ3q-ó'`fqaixAzbxxE`BIIWJA vTsmO8YQl\j)2?7Rjq:/Mfn踖`aX7ns 3gڵu[jl,-q$լ7?eq@~ d_g8xg_|a\~o i +mJ!Em2+գΘGm1C[v|&劫ZE;MceFrkyYbpMt,8@*/h,QsA#ұ]<ݷǒ^7GT+NAyQ& 2M.rǬ)(B¹ky :Sv `n^P@֪uvnfڴ}nhn},W|=+"F&(Vt4*ɄQ}cīAqA>u?0U+֎Eƣa>p# ~.PE(4nW!=Eî6p*}ZOA hxtafs.-Mr4t܇`Wзgf;fHζ,"m,Y*c'.GÅrk`˧EQн\4e5 PeiPNPEv'&߱՘t ~ ~RJfɰ姚罈7no{-ߪy|K3І4:a}B :~&Sr,|g~觿'1|No}AUh/<1DQtM]mf. \ ehB^(-·#~F9U=_ai,5 D[VcO+>ɓP<4rV0i'יNJ_gDx< Б 2J]_ rX;AJ #5/PEk-ؼ3þG wgsL~čW8vT( ^=]W=-tmi{q՛4=Zl)rwt^u Vxtzy0;R]꙾ ňN[~|iԇaP(C=XC)Dky0)c脵Pt7Rh#e?\,D>q7ws~Nݺ&t7wcG6aߢR{z .LF2Cn&(^9Ȇnis4Rq ޥ. cg=w:2 3]E 8Wk,W ogɑ.I8q2mT>vm8|㢉"Fp>TN4@ou٬m>©Xxi dBvA}Cc)g[ZBɚݹU_ծiLO^Vf"ΦNQ~n(YB,6҆ay躩Sӡ:Qb&`ml.F?QL /^>ڮ)[?lgڹzz1A~NENrmh'|z$ڟSgh1+.ZG]e+uzEts ],bF?%rhmk1DRm10`&,z2{l64@,uBPB`Eyy.P^C6yMVQHuHX ER8(`/Ke2@:zC~Ip:hA!|L?z,/#=#gKG3*HyTLWtO7jVs.m]論CvaJU8]Gs9O5X ̞Nle NO6aP^O i@=*RIf&cIզB=]+%$<4{GA~XYt:UR"m*qEQXU5#]})(ӗ'}Oܦ[}s#A7O *bt4kJ>+I&(l;b'd5lͭ}A+k~7\WꜼ̸ŝ#=&v Ki?MѱۙdO,~LtFBz/bG%BĢ~ QE=WVe1Œ[M7 BжN+as[Y^N_lDz#[m_X(f"j.X,:, *$ N សv>{+wyt g)w_a#3E, гPV4$m *Zwh H rLAb.hyv(jOWzo,?5F?I-8IW$|W2RO.#四kR;靆1ϝ1a›(`Ka/jLW;׀I.1? ^.yH]YDZ fhKz*S|+٘Iܙ.dzo<]a} ODspqǢ#Ć<~hk+.,ФY(<y3@<,<Kd{ *YfO,lSb*lsL؞lo<6ϡWӺ}q1@p)h#,lſ gAWHߡQɘHнm'jo[ъT uZ%O^iQ] /]y=13RlQ4ݎ{J04$4taC;)\"KzJGޣ Z =0(|-m7az K1M_C4> }#9.Ƅ>$wg'e˷\'VeVLt>Sh+pN29yl!ØD1;ЦBmcPw JCD ]O[iVz/BbXRO~\1ϫ/(q6/Qݭvj)wx=;E(wBIV%fOz5ٔeϾ7`[?p$>"*eQYUC6Ca:p+iܸg3hzy>ثmw[h ̰'ꍰÂu pIt*A8%2k3(Wfz< adѴAf@,^wOD&@^TH1_+0Y:H?= L9 *=lWåv7 'v֙^q]6p׻ۭa3 NٯH Ie'o;(VдCq (`5q70g(0DsPקca⧜i`#(Iޗz Pzq -_Z,DK.~ݧ,tgZS;܄Ix6"'.6f/8Z$ѩmt`=O'=دdOv tz}m=D,䬷PިRƸ@:KƑxdS_0خFXk ɑ5lV!-ӡ1$& eI5v(ŶXRNXU7AvDg K& "L}Xă-l`G>:_j l#dd`<2 K''YjaZ(彜Q޽s->nrL E$p ˼ݤ4eB)sxmUlo]C?Pi(x~"מ_GKa$ߢC {mvY˛t(jGt pkA6zr y7[O^m"x.Au{OF&rbB>LC:gNPQ}@`*k  Ou-&Ͻj;tv[;Wfgi1kRŭ~po`(B gcep>S}B<+ !B]ݱQ"ܽ;/Py6ziE<|%FKSfOE!4mf2](g 5̫c@?$sD QRQOqN">v e!ڱ7$tl'iVYd|m D?6u6[ >)c.},uWaw,.T=*ꄋ6xӫHnYPo9F{z'ty}9b;$ ,]`ʵͦpO;oKJм4wx &&δDYG4^2h_11DUޕn?uMvF`] Oȓ>g$(=xď:oe%y 0nFgwa{9c̓ف hX-ykC A5E:<(`ǻp??ṭ%-Wc)'0,/ h\e: ߥ~&,38޴۬Cy|9i؇U!>a,}   36pɼBV=WcP @|mN ud4]xk]=,_@o{gb$lw)ǢSy䠬K+J[ d]^B[[( lهv(*Kp>li:v pkbaB*Tb9Z_k%BAnEkt}ݮ< ; `Wø4d\tۑYM7dH}l,_fSSP֎ )~a}>?! [ܙ?s0g01ll6j}YS40^^ˍ[$Z6lqN7;h5&'YqVlY=!+ab)'}o{F"&^3dʌ4?VwӲ5OM=R|:ڛUefoH{[EphGu릒UVkuW4~{d]ƓaX\]`m [Q}N|ﶆV8^ŖVWUVpEg: ]1KȔri"%+nu:zlnߦd%$6I蟡]C}l, tVEN7T{/Eh|_ 6ӮsMɞA 1)Z<c6pN(f(+&3if*~eT{ Ɉ6j̒'t캂]W5H͐xLG3ߊ~hi]f|} 9UOR>: v*1>˓p2dMBedF!0sĢi'i["1#uƓVǞ-9+˲֬uQƁBȌ#Д[(I_Ҕ ea P5Ů)!f2Z5ڝ(XQN=0#0#qn1Uɮpc!D㸌#0@X8%F#hIqH3g`FHG@cc BI̗A83)#0#0 VØ\F`F` rjp\F`F`l2#4M&:gf)T@Д)Se9y=58Gwtu^}a#yKb^7>^3 TMq c2ta%g%qߦSQJAS.yp[}bE(-.U#)bPPz=[gҬۊlc!⽉o&Z6R&[>⛦<^|tڒ:2y]V#5A5Q9$)iPo|(Prl5G 6'7zUպЩX-r6=P\vTj{+-2ȵ>Z[ɗ^z AHJ\zmY[^o[ofj;oUO,x s&/üB(kn_x&U˕4-#YNG#2ENS(+d۪MۧUM:_wmA!tWr}su࠲FL|^>movz5kR;˫ݘojoF}]]ʓ!=A⟟(nAT^sgM(eJ*irP΄64w^>JxY,~ݷ4 \StQ?-:ȍ|N1sb*PXЪ f^sd#ԾJRbƂV]X#&3ȻXTu&ӉjP9ι4$eK,{Q#/jw٭NERl(_[l7TB*:M5 =y5Ő~i8d7:m2ii(wQ˩tf+ʔ>ݫ׀xGeg.Fjyc\o12֭cA#0*0Ĩ)5 r$ih_W*~^`9ݏEw,qCS3;jJf=SsQ^=J#v#@]-v~ķ?z?s&fOf͕y JQI!u$n_K4ARܾFCRp`D|f9 95Yc5O۷ք]~cvN H5kv^|_2kɅYGse^GRaRkn_G,$RоB r*HU64 NB.UrjLC&wYG@-ȟ]uF )9ٸ)[ QUf^'kgȐf} @>rj a9m ~=t@IDATJ95KQtI/c0쓜& 7Ή  ϼ-Of_7aogQSKg|#y4Lal.cxM*oYpMԅMU@ypOVXU]*D䝉kX:&:O_%!+V*I^&]NCAC%oC֝m[.jrA}5e[VV!_(ϼ5_ypqؑ!ٰyϜ,C).=e7 dN0D;d=}XzS>8W,#zvn/=T%~:ŦtԮsAaźY, pE:c=|< _(Xv^ug'(XlzXyYW`_zoXrp{T1wqQEOmٱWbMe~2쫯'Rb=aI_4HggHw涯a)S5QӠ@!A4 Ivi„wȄT4Lkʷ8,A|w{_~Emeg] =䴴Akvfh}$r$۴'9_,>XclܶKNrZoܷصRQMtj* BA %|tI 3>9v; {]KƉ[D//74JۭXnS8|IxZ A':(tҪ氟V^]iK3= `2|x_ OMd/גXmݦ˟~zLͼ% myR޾yY=4 ܺE.Fm6/<ǃ1`Ц66(}lK) -AVkSMn_=#H8B!c4T-_gbbnٚUecЎm[ Z47vԟL m)r7g2BI^qzyj4r^Saq3Wj@%ɚQ/M|k+\s0Z=ط,4bpw8CIs:<3.$萨8Kd\4OFz1q,^J7<{*u}E~[+^H.ӟ9_.U&ͻ&f/`b`VѮUYI^:+dV#u^oqpf;2\ 浬3tMi- O wMy># COc`>qx"-_S!'^D$2֒. *ѩ]˔_n2NNc= hC8MFN]sW ]mR]6m\gf}!~\OjqfRIN':]#^6x7FLb7CQ%NWb-{֎܅cROdsgڪsFA\Ӌ&Hi8|: 35Ӟ{WL܀-!*VVo n& 𙔭͘}#^x_=OGsI絑gu sSp0}_}p 8HM0?5{.Bp)tIԁoڶ[_U}):X(9\$ڈw:L!jb)GrtnCM~ᚖ1P$D}7v*}NI iegwh?|q&Y^2TEgdc(SDtHc-AiPɑ rB|ǥ3Xg1b,H3jh&%{؊l.WU$7SdGG+QEœw]]irߧ1cB 8damLG~Yv-y0ܥSFAvrOiHl>咢egqRw|aZ!ʶ gkHvqTӱ?8h>EnB(#i3HYJtdf2_~/îB;sƛJSTzArI2TOOfXZcEKD׿Ox0CʱTh[q!:ZH&+Ad \:N?4{t)vK9%s aSo bYHL4)PDr}حE4dB|zx-)$oیt];b_>@;.;GG ΐ3xmF߳Xb}Pr `lHp9Z,6CEPMȶrۮ}M svwDȴ~OF) 2#ͨϧkPϾ}ũnw=E/O(Zaf˄يFu? VW~8Qxy*Gc$Kbq/;IUxQWreۘ3@rh1k<[8RcE98 #0#l&:O4g>Mvڜ#Д(3+@`F`D@G]('%GgJK@di0#0"hY@cF1sigF`F0 f(Ӗ'W7p)$tiᥱ$,F4=mHrʮ>bw~}: [wS{GG:ئ%N>Ŀ,hбGGNizq۬M\>ؾk/NX8+OG ؎kŌ>qŧgN#Ʊ Y?Vz5p{\{՟FZ|~? sq"JJ KCPk8, ']'Ł:0DbE|j(MG(Ĵ}Wp4F1o7?{goҊf<89q3==i'nQux˼3{}OߴΙ S XN/=f!p$w{.&I_~? Wg␙1HpYɊ#'N]aFm(l3U.2$lپ[-:%s{+sok6n[`qۄf̴iT4@,qs:r:yj%tXZL\sɘUbOٓxTj~n!o%sR(UUmUw #)}e $Óѩ(FDu[SUm_gZ.QQ ,OFҶxJ]D ȃQZ-(y dIt*7S'2Cn0iM8ȃšmZGrit)u:W»ʩF{r-Rчk.r: Id6 Zヺ.ޝpR `R-r}_H?:_jݘxZ}:[X &zlEY0kkv]E}|} ڥtY' M<FsVFSy=ff[7*W/,:~@`w!Yo`E3#/>54lT,?ÈX.DGL 񭶦fW]#b.ݼ&[f>1&[势9J>m?`ޱO.i2Jio_ MUNn_82hsӡPlIY~G_0}u/z'xW?VyPWH;f 3GM&cq`]* *Y6ԀK%@غFI9vر? ~Xo Fl7n%h1Gv;(>WQe#MtSNV~z~f,#9[{19^auNB_W} Acq[p>W)|#y@~0ܧt=uw#{Wj1E*E?_'h쏿Szum0LQھkjrjbj-~$x2Mf9 ^)2BL޺sԩW8^t6tp}g0 ,OmlD9Ք~ :3e@Bu+Q7o=D%=u1 {6pѸ3%x% u-K# - FA?=J?|!NO  fl@ѧ{W%H@.6ҝ{5qkǶyb򓯉ŠԷ/d^pމn Χ?/cm)g"|Oa*D$ʼnNv ?qW6K8K̓LK |KFҙͿXxe t:y33t➌%Ll_!3&Fde4"\L(S,څ y*d;.A TcO0| ?(ʏHQ'9@xڂEo8q5 d|ϼ6}xȥzk|w)h_sn/0mgȝ4q?IޥRNPi$Ear{|.;]n߲}p-A4K84|q:7$HdH._,T2cA+)u@Q% w&ѐ&&䵉mr@!il C.9thڈ ipTIhfΨTWϿo۩۸xCh9JIgRВFPT9ߡRzs >x M3L?ʯUv#+ol.}?b$f/z"}u/X6żOP~V|g]טiD贈KS& 6=Vp!$k TP=F&::(Pu Y 35JA~y{<#я!tϹM^u|mr\|3f9mPK4IȟnW [+whsׁo>+75tJ2p}& < 0A,A4LS'f+DQ"xJqVR*M`|/OؑB-T'yF{rg2>J+K=ڇ]ZyoHc):p2fJQ̂+ THʲB*MQlGbr,Mctu}LOy&9Txe*ϙgglsHaRbLWyߜdr`ӳĊ3Ii, ::4>kYYheƊ44.웑>6kYY}caNj>&4MtFzVmVo)t:e#_S2d^'Ԧ]vŢ :B9Z2t"y>]< $JSφ"""!R}fnnڽ0;~gΝsϜ{mAgc#=wߴ;v G{ރײ{nEFɦGYH*>*/iNk9z%vUj|H{#+.h]dɧ^ɦEz]iBiACfזC|rxbye6]~~/Uj O+q {Wk&v8x*]| 8/f)#S'<9·[ITB|_:œ_V> x{^b,S QFڳ0 ^h$?زI>2/iL`nK> -/4>$N2tfžP*s6)%>}ov&UHy*I/ w>ebWqKRHZ`U_TD%|*|Z1FRкdɧ(UFCPv?>VkD_XVht5NIqFq\->'7nA@|,E7!I:2Aݭ\\ʣ܍ɧ%X|%4d*=c0|:G䋚eS",>1)itTښ:ď8Zc,)jc=nKϩӍ|xV9Rӫ,iJ5Ir| .$i{s$xxORNɒOCL:ŧ+ςԚ 87 H ֲj8¿rz;D SI7cbCJ%D~4Z:HZ5:$Jk(M| j~H>NM>A\&5hnJ?ɕvmwr9 Nđx ChaK˯L_A#䕒V k_0TZ'`A]`LFH>?>Vu@|ݪ7<г@鈀>FOъD?7Ġ8&)>Vf"(~;wiZ;/[7,Vl^tm+BE֩1˹bWc7\W :W,?%Dl}PpOJa_Z'eҮG-:v(2ggB׎TM` ŷ lvCthT\:o8.G>F7A6QPnyM?p(7/<1Gq0~X* k~8[ 4+4D4owO_љ³] UO[I>վFE1O HHrDnJ-(?ZjLZoxX+4R+{=.6v;Mt6CuմBRKgA[%h1ng5,30?\߉sgC޾mï'$:(emnH@!$_ \MeCF2'x^I`QZg>.}hXiIش렠a6 /;G{z {vd?˝7aB[_v^/+Qu أn1>]5 ,wDB4C">w|ՕO#4zBz)"yx7#1&§ޭz%FO1O HPRT rVϲyXU*^n&GNdD-p䒗4w ΜɧQrv)WNiBpXXy8t<[,YyW A "Ƞѯ|#ˍhYwمgNjB#Fϳ#F{t "_n:I1cbgR}jjɳ gwi})`L嗥%kwi+yHF34(kH Sȍ>HUO[v|QZ?Y3Y%HAp7}\< 67cBhZ\ ~!{?%s8b_YDʽW ~eGDvNl.DQXq1UvA/˄ю",d)بW2Ss#Lz#Nk8Q%!&E&$׽ϏRsX&6=qRٴ5W>Eӹ?-p[/;WL>u ܂RQ?"3ҺGxLO[9ɧKhV 4DOKV/(z [~@%S@[GP@Rҗ I Ɖ\]<^Cu,`x[EDe47UdFge9ZYnDs@u]KuHKu>Nk%M'x{|pͼ!M35չ\'YQtyg?/.U/MBϹ6ɚ428Mk`1n|5X蘾w+*(hZrF!iTX^z0(Fļ_761/rQ4Հ41/~۸(ia˶'!_]^ $C'o9@WNRĻRꦀp8I`0.M8L&IEt$ϒ_oߧE|k1{)=;Bf|*X{Yr`0|`ldJi?^ne%jYS#(剷UGBPf#6<$v?C7.@Mdnovb-[ϰ=ϲ Wʹ"+;,$uvN⅕hn(07mP\DxbmT(Өˡ]%k҆gy~i5_WFN.mkg鞍4{:,m-wOL*cޫSkѺY2p.ͧ8imr&ru糖huFug9zFвjF ѩBO{m|?^n["jA:=j;f,5n#ez+j^癚{)#/.]KB3_&7]:@-f'6\{L3-5P\c)oꭂ_JtZG6DZC.O&z>UPQњv_7Xsmʎa (Ցp+ H|DFz(#j*(ˠW))wkW : }Z;.lcVzZd./s\i;Ge ѯX)]S.}YsGS g:לq2/<4uXkC# 4ywkExޗ|Fո%KUUN+Jҵjh6I-C#P|Q(QVHl{*f\QG֑,`dq_l_%L-|Ѫ^#H$D@" HAjEJ" H$D DF?_٢azL^H$D@" H".&*Bvh8uIA9d^D@" R㊒%Ij9jkH!M n.dxCpm7oe\d|\kJOHteSieSj_ v{!HhѸ򂳴gmn[U :#9:kďģ&B|=g$ü[C5_g`{kOC@qXV$Vu n?>=|l&} T 8&&?65}X_8>uC]T[AL,^]6r7JѸA]՝I^TKeu7f#Pj ʻ5u$ѽ}Kк-?qt[_3;|8x/w P܆鹅h a,մM1H׻ܷpذ#S'ͭS/'3"1h?bv ..nk ho˱us|e. !H>.ޕ]ʦ@hәWevj,ZM:\ѳSߧ7gqtB˽ =w嗿}:q0SYf_ϻo!nǻ<:2|Gݥx/>za;G{_iX/QtlD$cڻ@\ܯp\fc0DVMĿ!sŌy+ĕu]ۉ\F|2_j5 F\v u&EKEZ;=Dێ_i'Ol ݊i/Y$FߪV(2:u|n_|ګSk]l)P}OO}|}œ fNFO6 28 [ 'O nAٝh|9Z x tWb~f# AYu8v;yбuS>u L.1kկ Y%'%`J.%m{oOP|U狄X_P|wm`Hnq$ѫckmpش|C3fŜujt!v30GguI \|֠S _hW].tT]9F_}9.qÐUnwp]Ck3F_C}jFz|KURyxǸh)rGP@}Z'!.)dܰ3S{_o`'v~ e"Сu_)`ӵ- GVbݾ 96ѵh_iU}O3SG(ӋY's(*q=?:C?/Z'$Ɖa( +Z7/,3z}šdro.~s (3luPc~΂@|,f"Fa5̲e9 G"Azh30LSY7H‚ʪWumh:R㫯g§_⨨~WՕOgwkZL3L!hXsȗi\kEP6BƤ(P ʧ@kMYK\q>FZPִf9GN(4]hdp{4o!-_: !‡^6#>'5O#=mubMZߖmpwzܺNb˞C"挏6L_ @밧'P06)ȷ ^_8Ui4psׅuϖMkaf\G.gdzsĺӚFc;=CP{:u96jƸ([8;'f >g-^[*wg ӎz؊|8oD45hOlс8{v-DٞXUV&7n6o_,YCeg,j4=&onT먶Һii_}kFp| JS.B{'R{m\?7B3Dúu:$L Scp6h OiGsT*|>FT>@IDATBLfcݟzI;G{C>ŧ\ͬa`NXI[ 6z rj!4ezCݹ Y.6&dqE}:v-?%޲W]=^?aOQz8snv1f[3?X~9E0ۦe[xؼ⮺lh,p-|,ѼcfOv>Dr:K xG~/k+FnP$W/JQՙO#8#2i^|zQ^D[s^NAO{vl%?7|Qm9)AFvA ~}+횙WbR>T=kT|* k iU}VħmiN:me~;XHʬRxF[767lsP[D.e}&F{jZim> *h:Y-).<oF'!=JA'jO,ޣ947).,4*F>$dJݥ (W]ާ.&*V - HAf_dl~]IlIQ J詢LRdpO!YI*'@h+A3mi&n.+ ė8ZtzYX3n`R/OrF/<7?^I|ZO;k0Si Fx|& #9&S3I!.$-n׼۵wvuK%-BRZZ0C$e6I{ 1#Y3es{jghU}ѭ\q#~/y-+h#4+~j̃ktZvQ_}6=#m<$pӼ&M]mE_ˍVNn%E:ɧaK^M!|8qpU 筫߸F|%;@6jY7h~pqg,5s1&ė8o&7uYG5xmۺ3/)Mhii]f3US``#z. k(M.G2&OIHyvmޘi/,ȬӰYv:pC=зM{YCv dX'((,gŠC9ذnH߸;'CUt.KSΙ)VY3h 閃ŐOYu(E L^fr|uYڅ2FG˷\L91F":1T(G3d|ei"ͪCC FHa,t̤I;DqC84=-W4]pիv;䐄wjR|\/QZժ7]q:vmg;}`PAq4)D0](:\ǃB> uAY(#*"A/pX ԴU6uUI`d*Chxu>SJ hI:a.[y[ԫy3ָ{]ݶvǏ:{@2.yx]S ]HMVLM2{'Ao ǥwuaͧ,TҺbh uUBҬb YyT ;\#k9&i·uB]^ AyZ] 94%J[)WUf@0DPnimVc}͑3l(1ߞ Ze]Xk=oM>+Om2qX?V]?\H ְIpُ {ZXZXç,H:08æu`J, __tZY:>[,u]5}{㧂^m:vngۖZ|\#YjUY~"(l1ե {/e˺hVA1t 6ÃמBrmSBAA4˨I?PL?jɥPoRLR_]1w*SV"iiHCH1,EGQe #p-76lR?繃R9uß>+2:,؎EUimlN)i,N_ ( K/aˊpgs=aLtM(=j(R{v&bS!!>B]湲oCfs {fdv>eIk=DkE+iΐiV inpWhL#PgH탸a3GjMJTɆLkS ̾5]$I?JQT4d$އGu.M OK?=i$*YUD~в?1Z:f iɔVJ\bڌ!-o?y ˴h E_k6J= @N9b~;c\YYk{囶GU!T+AِB$D@" LUubT/ R'^R]_Biiye|SRW0JPM D@" T2S' Q0z0o ]S}eifLU珱Mj8f Yxpm?t,VRG@U6cYY*r2D@" H D8) 1J)fͲ1?VpVk,>i;UeC4N(i$nkйPz! DM SXs#nο?3< ~KA22^" H$QDOQlߪ M6Y~Smz0:uU0Ø,#ڴI%ھ}1.HZ̮争jr.{2 a5t++;G;&I:Oe9 8W;Yodq H$D RB]v ^)i.j3<^Z\.]; ۫ܶ Pm`vbeAxͶwAb}*_JQXY0O[ɬD@" @@Q7w`LAgbK]f?}hE?u:(DLlL %rϵD3Z)$xC7Jky߳{2^պF*_RP;S" H$%{A*=|$c49Ӈ(}-&6N־x枫D&dtw_i޾lLqC+Z),+E" ,P$D@" j} lEh ۥofIi(5El k'b{=4@Z)g"uOSPQ%D@"5"m)3Xps_P x }λ @R޿ 2K!qrr&mEķ{jÕgZ:E" H$F #m<"]O˟ק:gf5kSNݤ;yis ;y+v[;,XS?hZƜ5xpQڙL ʵD@" <G%x\󒂛M}Ѡ!Έw #ŽG>rGau_eyֽq1+D; eg)(CMfH$DbC[PVM q dR,]!ZĸRĨ:Jh 8k $ v_D@" H${vX5{Ӱ,~'=uo1"TQ}rbi` 8Tn)nVwĶ}EDt\&f_#N[/("{tQW?IoD\;z.]ڭĬĭ6KsVQ,y-kwQe۳CIB8♛Eg;glR\kI/;.H$U ѩJm -7|33e^*S BMGd{V7v8s-įK׋0 PjZݡSgwo">!ܢ {Ѡn8k[wMeW^wlTظ DEr:ycޝݵ_zkѾEcm; x\NniMA9l\VsFz(#)5F(ːH$@( j{lU暇x#)еY6zתl+CM3*YFh)4OzXz8uJ9&+ (3 OF:x Bov<_@ uD|l A9@.yEת5!kdg%D@"Pxǖ 266u fiK7M`>oipª-{MفaۤA]q,봻'K ևF'o+9MKۿ{{9v-q^#vҫ f>'3 E%Bv#?đxf? ٥"$D@"  $)UfZ5B c]8aOvvmBfvwpWѻskq_n!]޶.^@cfw.[G)a >խ7bҍ ם4o|&/eBry5k+e%D@"%,f wUfNd o.\)8{7־T.@م^>]ˉ\PʫDN5//},ycA^:.j\_x) z (FkQD^.Xp.}Q@CUo O+z޼Gئ%?hngvf;7k_'qgB~>3xCC RP9O" H$X6w ZX,<1_uae϶-6iU[-Gg+qVѠ^"L(0]my}:7"' N($ϝnrAB0!lt0ɴD@" ^`*E&5>wHYj鳍Ze&5K޹e_vpUudy3-36\k%fH*ဟdUEas5oӮclBB:,鐥v,,,8]{྽/[:?7 ( \v'8٘ʢ$D@"6S' a!S *\{#ﰉPv#{fddPÑ# G=uqAegš Ҕ_j C eBy ʺɅm25ʵ*'`z1*Kr8ɼD@" 0Uyr#hۛz` HN7ɠ 9 n \KAhq16vin3O.P]Bu$Y" H$ SWx"odj[l0&M>6- Qej=ڠY&. < #FaZe&vE " `%D@"  DK9a |;]_ }iv!M/JeO DX?!5L#dN py%D@"j|eA5>N¡(־S'<?YA1*xO2 _Ѕe ƺp ȵ8FpHeD@" T )Ƃ;a| 4Uudl@M5C-p\53Ob_ ]LTGG  ӧO7vY׽R`m{iј.~8j[Bk6=*7.DnVyKU#OZ\F <1O^qi$֊4<q&|Uq8u0~m4G>t z>":*k-c<܋pݨ@<52_L%>~}+lbf4ۃtSiarK!4&GϕI|>/.Z5(c(~xHt+isɽ /+㻑dLK4'<[xCН&X3_D5N c(H:v;>#ǧ!-/d<,(`yMnU/1|.pj,ʰ=Ǡvb$_y%>xέF@`(vU1VF{վBo>jcݱcOD2@C.x!.W7lMjt8`j!8 {yrnsfl*Q[vlKT!sNS'*JlQc##(Oظ W`rpħDV]v!K+EQ6vy2H$U% ]Ѥ*ҴٌQ)iTN=nѨz_[6_#ld>!f3h^-S>DgPIk@/+2*ӓ''eO2hFOVf&PqgwMʿ,11{V+1q8:DWs ;Ti/بY²Q~QDv:!ii<췧g?ֶ:$*,^|S-,_ !9]UL:ak%֫ ɨߺaY؜f$K!9xpRAZ4J&e:lQ }AU6kp;n4fsϧnd~D 2lY2 @ms @lӗojIJR9aT2SiGy$[>\Иp_IMrtLs}`bx^ 0eD (oyt3i4"2* @t.x+ʻcaңZn"%5nxԮrǽv֏OH a b^:~tď8ͦtSr.ʨUH#uÐ^4R|3M^$hZy.[#mBWHJ#@ \[:V  n6:u/]en]_F糌vngI\ H<-fEhqr F~&Yo9Y$, b4) »t.cJ"!)sK>H(^p;I I޴+Ss]ӹޝDՊ!M#`m1}RrnMMĐ~Űs{h3Z6.rJtIgXth[UaZ9k6 Bt Msnf\??-\#Nm6nX$jeg^~A+AA&iVݧ l22hϪ5ˎJ"f] UreaZY> H7˔Ax\nbaZOjz{uj%cYPB8ѬS'&Vlkw_=H\=lկ#'5ax!X=,ߴKB>Ec-jLQh:фOZ$5o(mn#K{EfPx"4o̧\ .mje8X!2D@"P*@ˁͰso02H$a" G-aV(Spӊ#n/8NM;|U8!Ndhc]5XDtYm&T7m$x4kTO?gՂ,5+6” (@yc^:e_.ĉ_9n\4gnp @tMAHLTrYČ{716X6{wam\o X%X׷ldz9rTp!4<<ӳnGwL,+,QS|3lԳ5Gi$Mr5OQ;s"P nd~컧2htaYW-}m~ _{}[/uEWK֋@^~OOpv6b [m .ڋEuᏸ9 { ̷tvѧ[;]6]#ba]Q]1)b_ж>Xxe!Bxx"pİ/"4WZ<_32J6WBH^ dtvpp| bàuSu, kLPz$xٓRd0~iR'ON܄t8m?kpM;܋ l}_{]%jRY5LA&dԎBzn[4~|?)7­m|9WB` c1G~9A[TWNDͦ~rr왻wY~JPÊE8g犕{ڔơDf kNx??$"W˞%]sƆۻpb3o|.~3shbpnߖqo} |q<~N>LsXMWDW^9.:X,fSMii2=l-Kk6zcg0{fٷN /cDV/Z^OI_7^Y;o={2y.#6 ,x Y׫J-c: cnD3+(׉R uh$ĊwU*59/ڤvsW7xK:|.Ύma\!Ku9s(gP^ :k 0v1ZC8",x6hF'}34qvco %1aӞytī$Iubyjm.xaz4Qf<`4QqZs^UJh益WE8jO,Ѕd/p!inؗ C| -,gf4O~"h$t sYG(LʍKוup[DWW|&I/#E% @nQEuFT'0I7 [=o )icoӳeҺsߞߊ~|c*9"`>Ԑj_TLch|DU,sU{ɈFE| 0i yvA1)>CQxNCSq;v&Θ_o[Z}?mṺͳny{t|)[irH M:՞OƇ aZFb^Z0 LhМhTU,ܞ}X9|`1_UG^?X EXxʣpKA5dy?MK($Dz ^8w/Z_ Ļ&<}!K`n_ɴxP\$!${f2 A|kizwaPK==j04=*z^OO{fTvxL.c90Y@J@Mo#{ Up}.kڎL8zg54pr7'{u8e/ Z?Y&=iψ#:ݧ+rGpw`xJ,SVv{Uu=_x$jQx4=]Ո#GqE> w%FӆmQLS$[hF\w?LTcŃ2['`L᠟.K,l>|ϊ ɛ-Mu}S-\C`4/_Jx44>/=:~D[Y̆0 z^GE C?WW>u`r:a>ƇG  u4h980LSQ@ UuQzQ2ڻd*i-Xd˖|a>u߻1@y&pmkWg,Tp%CE A lAI;UG[}%Zh;A᭬jƭ" B{&Ѓj9~ -{Fx2@y@FZԮb:vEu G 6Qi BQfZ-'E|6{y|q+pG3RR 't.4ߌ66d3./WbK7cecaWŋE"o_4, dv10M`CKk[b"|_5>3`LruR?5-'=Դq3G̓10uB{ϧ"E!6n~ͳRo~nA{= Ӑj冠%$dViO.A`Ygkǡ׿9η}*9BKf:h-Ӿ`4:S20/d6˻rB|̆ŌB$ױm XxYvni k"}isn|*IƘ匫[ 5AZ0|6LgarAZRQ #O^NMSxFj(ffFﯯQ[-4ۋ&\SKvkƉ\>r{ e7ܺiï{ہK2''%bXkvfdUo?4cF㔑uxWK A"UA:L Y0tk쳝 I }g`[*ilfN-m=K]+ju_b* '<~oZ[t+.m+vqn! ު8}`1^gE0-7;9|"e=z2je-Ѧ[nFi^hV[8JRM=\zK/M}鬂 Xl? lYT !>\jr~2rט  g[-3l= a1څDP6/uPPuz$OG-ncw:Irci(ƣ~uӌQ /> b *-rxD]Nآ(␟1P\(?fMI2!ezEGj2kQLc"s}Gz}5gQIpG!قC{r%ڧomڵKH IY1#] Lc=t~nɃ{wo۰|z0%-;\N\-(~*76s{ݜ(pp)ѶR1{)@~h!lHq|Ӟ{By >D>O.(=P)jh{*\=;>UCUu[R_&Kg)`h_< KhCX|  [0W|rQbQv:Ch~Br I/NmF8a9 ltL0e҂>r=.suS:U#^g&4ҏ3P@@)Qke_=~/.??pՂykpC<]e cyItA9 T e˴fDĠ5Zг흿VewNh>ƿ Xi6R7ָ<꾧k|tǕ` J62?5WJ1 ؃zy : xt 90\bjѪRa"ўOIU ťGK:1ӊM¥M8$HԸL-p3@$o?uؖm~G=y⟠[:_%<e'RPtSKmU|#})[gestuϧ.;ե4b O$$EEjwM^T>MYZMZc!ċaB0 KaBs_7x6+|fRՕO,1dL i?1!5<*8L6U^5(T^=)1TzXq4iqYk!PÍB2tAB)ē_Y+_5WL&(#:)UNvph8X]G^+й( =ϳSuݣ怇eu_ZVew;eM4TGyOfѶ4i Hl**a0l rTRF@8ۤ@AZ Uf`7A@IDAT|m00"˴~C #)~֡֯u2?ׂ>Bii!P^}=wIt65ЦdCSHWmdph T )1\< ݶᅌ&/ƓFb揟bZ]m.~~?sO`\,,~9p 7yM?^'X\N>ue§{^^t|[g\UQԐk4 TJQABM2^sxާVy0_5/ <󠖘B05{Q*5u!Yϋ^{|x5^Sc#4AYbz4>vKd)$){lq vGAnxQ.X,UAfc~j?aV ʏ6 E!c³kba%bmk*6))JhOnh!cPt_f?MP \[чB2sřUKxk4 гBaoÒh}\ibDLnƐ AtT_JR*=nX|W~3v&(]H@N</ieiuס> J[xT ՕO#l/ xN_v+Jzt%"pL*`pħ?^5c<=}6*7TvT?aMISL1xε!9wM?8H\qQVL\kEoCv(!a\@J96VqC<9?؅ޗJ3W;+MMLΣ1cS^: l3ZMIɄսo61yrfUp9qβ1ƣƧoUS̤unl~T\GBP.Şܵ;Az6?j+/+445aP Oɋ=GZn$]h>WOL G׹?޴=E`jz+̈_FLDQGZ[co(8 3F]$>fz*q1j/OxQĺu%א{T$Ⱥ&]*PvNpu2''[-|b172|jTFc;X(^]||hd0+3}Ahfh<몃^s5[&4| ~RL^ڗ0"a4Ư ش=nj'm|EvIQ>1_@5Jau :̳.0 -*=2Gpp85zB@A\Z eqfh`ЌYpvt,ɋ#I!9P+;)wЬAqDNPk1nq8Kx H13YLW,2W ((zf:ӥ~@&묉oZ(ELtdžuwл~Wʘ<ה|w>,&.6WyI1֠O@OtvsҲv#1M-nhkܽQ&J"\GO>'^<<Oha.x?[H>-I1qvxq\t.>.LbgwlcwZWu(^hoӝCr@8.5!AjMyhNzN,Rx Ն͛-4횜LkOآ++pL "a@ZUVH zZMm㶄%m遣=LN,Ȋ9OçKU-646II=cO.D@"PQ>+' 9 >A.z+j XՊ~zJ= (("MIvw3w3Iv7{Nvٙwgy}hwujwx_`|x&>q %Z eo3~EY?rK (* Ͷ?/݂RNiZMfD|iKa$?i oj;Ep jζZjodk_F4͜ '>$c%v`) 8%MW6kUL*.% To4٪<]E/ck]1rw1>&+a; orG7nڵ_V&P`k[k_p&Nh.F@'(Ϛ_$,&c&&!I=;]ЫS$z)":Y+`ӰyoHd4Z˧¼oRaYWſnw8Z1oǺ E՗cN.v\O~*umjSzW˝NW[F T0jT?Mkq˂Kxw MdHMz+{Zg'nܳkpńRk]F.پv[jEYW3l¼vl~Bb kC$r;Iuw }Sl*_ JyrJFNhmg2@#D  A<>y7<ɦ-!lk2{'/O>{:P$?g|\]tqâw(` Z+ʵ> --遛Gb,A 5p"7{0,ѻ\l?e,uO?雲[7ޢh2Y'oUr@0@h(SO%5U[H~ DpH["9TǼI";֪y OrBk\ D42.1<T0oa!ƍړ&'jz1 nSb v}K ]cdyso;jϊE`hşosbAKr-sM  O8/#o^,Ud5&jNlv?͒@>aG#cR_2T8_L$Z'e\yB5>O6]WyF ({h; š7 D}M!?YnqGU62T>üAN ͜W.D.s텳@u`ȜdM50 =c|`YH` < &$iZev4dꙢVu> τ xdp4oT**)1pKPuˤ⬸TR?-zvA'ɩ\q3<7rZn8>Y}­`eLly 7ĴN0.4}9/VÐᎉҟ{|Op4ƘW6pu!Ƣdnv'eVk[@ r1NƶTRWno7g~}3d%Ջ*;,[: %eܺE s>_-I>iM`+ʾ]'2o((x>^|h_9$ì+ +uRocS^T :q**m]Э ՗ m’敿!3׺F#sì}2F xXhՠbi4=lX4S/P9.)+.m[~+TaW|Gz dKşhȤz| EHQOCú7Am!!^$g[cSjG0Oa~#kDjĵ3D+l79~~c+h޴P=>W<5 u>y[eOgjPp>a+|6mY4p|լ\0#h_5JaRbWq aOg.\jHok[O 4L;i[5#%":Ac~Qnw;!=4Lq{v8~`kQ 8㜫a6dTv0nHAney$y2I2649,\7HS 9~B>6$sYdVT4wo $Anma0DVfwov/OyXmWM΅3&KUmk)!+ʒ$Wv mf3;j[/Wfѡul?ĕ8QӴl.Yt8$8M6-uӱ3]d*P1qq>~|=t  zQxܩӇZqP"EN!Փ ēvDrJ. -/\~ڛcv]cڌ귓KdM EVD7Կ՟|TX:HIJ!cxٟS+Y]~CyU9{2zf?3%BYKɾbt _W% JDhQfWk`oz>0zFK4|X++fskB>.3:_ 9v)#bQ_\3IN O|??NLҵ6.QG3rD>NAQi9>@'Aώ]'im|Y$>wfbF@'h( +B:t}3e|C`go}\tuB %ފo]^ !ӻK$L?ɻ\M$onX XM~/6I7>6F$B.iv{4?&j L}p;ͤ5iBՅ*r!4RHv_#KI<}(TrD2Z*fJ3{&ddo>/Gl,eZt:}2kE†,)4t5u#eљ @פ  Uv^'4MSڒu2-L}NK' mZDX/FΒu8-̕mNŖv6~2uqȩ/-֢L$*;]<D' ݥ,CuS9 9XP"+Ѥ0Hs?Fs8dBr)# 5eO*Ou6/Kߡaʴ`ʿ4wHd9DV)e7~;ejl?4Op }+0Lw)bNkZ6k 8ϿTv;-!\sKZoh/z:~x$dAfT K [\.֠i337t|0(Z E$tSA # B)܄ Ԅ䰡UIZre^FNiY ϝ\n9k??pbF@(FI}>+:1!@ouC:qvF "`E9"iN5f&fc~ /\;#BXQ!\#.3W \/#)h0)p?HC hE-UvIp"3#)F`F@mVR6%\#0#0N%T*8 lF@ǝ:{ .dF`0 FSe%29E6]52zaI}/S irsmrO r|v!8uέku-c R1B.s y c6CWֲuuz*t0oݟj>ZjNx7N"O_ t/ÝGK,/"TN€33@!/V*fVUр^,",5ulFz[V^~/^AkepH%Z.9Jk o{ QQ&jO6EnmaђuhYAUߡ+Q؂_NG9\xlSB~!4E_tKˬ<Lٷµ?T\s|%l#UN_u]r#5Z[vTZ+.EQNNJJD=>jY<*@?: PPT*mZ;U㨲_o^-.,.}Ӛ& ,JdopOGtd/LvC82VYQQ@Trýb3Tpslʬe" ;Z\*)>qla_ɑ*Zg0@8JQ\(4Pfb=)=%Qf7WOgCvzSO*r%.&Ì;j׏w(ZaANS*\r+:̇u|Fz_0ۥF@mQ&@&wְHꞨ 8$O Fl^,2胰 aٺ]pE٩/kp$<\(V>WyT~xSvdܠ$>g"޴7-a4u-c1Ͼcg_+*yj=)|[F˲kZ6"h*h\u]J®ç]sp#΋z\Ho&Ê{@~b0%;yZАkgJ #MN0Of} L*aRtI(ټn/ܼ7G oN[{>Oa>ΛLO׿8mY)ՒV`fs2|'\[ Bذ(<(d-Dfe%؏^wnRy(ۄ#VN hmAS\Ӈyu5d6O^P84O[و笄]Ѣ] GҘkuUIr* 5#4v6sJ'>]/IUN .] ugo{k䩾p_XRYyģ'Q4лmE@ޙӯ~JUƃH-nGz8ܼau?vLӣ6nq6k*:ߎuu%רӾE5iDgC֘Zat5 ci8jt.`4B h2TVP,uK/wlAކ+Q dj+ȍ[~nNw}e o5? 7ojv.e '95 8<&rFo©$uעH7ki/7`|C xE~=dդaRidU!ab+n߁[^|{GWjaZ?cot`Id'p˟ax>\ oڷRk<2a6N#4ZzA2@^Ku?nOo@pZ"q l8}[k-^ '\ _Y-%KNMȗ_%oGўhmN?l\PXTRsÚ^RĆ>" l{qjjwdϮɔߴY{r:筸h70k1.4_2UV|Kn 8uur_3u˖,)--`Tp6t`ɓ}74}7|tUyy))dE&Bɟї5s JGG\ .qC+y7# QpXp%T1K^}۵ҭ[|bRZlB|d)*6ke$R9s!,Jk$)[JkS4_>dw3>IzL|\SUTN|ġq K?=$ܰ͟(MsjsC޲4k5jaQocPâi@VO_(b/~ٷ}U7]v& Q"\h6Y#IQ&K2:FR~-s*;q9遈n̟SIem MV 㫓TŠ=Ζ/#e(-_9"Hd=B$p!HHR")‹0:Nyō75I̟oʟqEf%0uvu˜aַr.FPejr0Ll5Y$VI(5DxPR",^, IpF6WEirKaIQu5`+ƒĵIM0A hEGKu`NVIRZtJr)uorLkRłaI̟g 01{5s&+g[{F@w(a )P(ƴѤ$ \_`EzIM_mFT&36׮"kƵ˜OϜo1e] x7h"ERr,ָ+aBI e : \kubUÜՇRbu0[`TE #4AL̟<;<.s&ס)9Sg>̪C.`zQE{x0 )6q@$#HFVC9Wf?ZFF`Fq#̖} pF`F` ̖*7.F`F`F@ЊrG; 7F=#0Q+QJ޴% $ѻ oqVYݿߊ&Έ;_x\ Vr5XZT5W?9e`,c%v`) 8%x` ù֪*GAQT\f5LgϞwVe[W-zDJWbd!ެWnk.E„N\בF5u8=aVt׌#2du*M5wK1cSR吤̞ZK`) bʬiؼ7Gs$t|/Bl2>>길G+6uy3 g .?/-ǜ6\4ȟ2UcR-Ɵ1@T0jl,?R T$/j81IW6H9n/!ܳkpńRk]F.پvYC!s~o6[Ha\uy;q6?w\1hu׆!WP(So:ĉp*_ Jyrf`E hEd?丟?}1#-9>DMޝyl~ľWDeǟ^-Ao?M& ̛o*yPP:{?ĝGxf}V`UP`J2~bΟ52g9;#@Њr 5xI)_!e끛GLsCn. ],9h薝`nʖ+wf\c{y?%oyE壳;u}`XP7`a}k5+WcP Sy| @:]ɝ++*E05U*)bfjr=׏nYR|BoK57?Rd'Z5OI9\""h()7̵:Z\g" Wph0J_rkwLbhmV:+pr}շjšC1o_ ۝ 5Or%B$ZE$Ys\E$!Z I<͟UYe[x`@7i/*,Ħi, [%6:/G0o_(7,\h_F{\F+!נXr.^FA+,U4 krk~6f4h "%IjqG(162T>üs8q44sʃ( `|̫12g~x|_`|)`t@Њ2TQ< &B$*[+=VǓ 戶G3!)i֤{?*?Լi\xK<`KȢLQ? ;u|z E +d1 ʵ?P hKm 4_#3#Ы_-Yq-.TMW OWaaq#J.\bA%&1bQ a^AQ7ߕ72zvRfݢjdt=(]K&_!=;@ZN8#4j/Y]jZFˌCHVrM{i2|y3C.M1(Wy-\HNp| A FP 2XR ]5~Z"$H tގzCrx0[c!Ku/zhN_~ZR9QvK.zQk%jvyj)ʨNR7Yv7 qrJ:~s B;DPկչ=uS#|*w`!@0tP..\.iɉc;\qvMަfSmc }C_=+KPYO>XCuwjDkТL)(Cl;!:P9Maْ,lJyQ+$:2M(5keAn ~ZY(;&Pb! fHF hrt6i&Zr,%I1 (h/[ڴLwiȆ 0u_I eK}LѡƅC/m-IZ\#lAqEФ2̛}١:X9mOJUNmNŖV\Ye٤H:݋L,'F VC "SMc-SiV鷌h$͟yu_N(Zдs05`[#0C x+Yh"F 8(<W 0#0"E9#P9<.T>F@ìn^;cm``EY;ldF 0(<.'F-0[ja YuH@F@ˆr#@D(pb!@YfQ}n"h,!QUrڨ2QrX,Uv`%eЪy*\7~fcgh[Qdwt??Rn${k"to >_?.$Pvn?_iоUsj{iAku`9UP ic`0".K'a㞣mci8|܃{ |j++^ :ъ|j]Go6Q} 4֊J8v&dS/˩zX6XN SNFe_*"qTzchj"Q:{_Jʬ0OYpפa@!?CjR@pՔB]@q`1dkddާ]~~9#1:*8wt=d5Lu-?#0W9#>=}Vc@IDAT.2qI`E,SQ%1~0w6pbZ4:ɀ +}FU>p~ZgxIT)Za7'Z7d &ү[;xIЧk6|b3Y+_:b܆z+yԛGE_8 \ Y7 2O`;VXRE%нux""h(2i(]Q#0Ar~,iNmZgoLIJTT-!Y 9u.2}A@V{&_ q_ϋdR|-cQN<5 zwj#++yQC-cr|iד;&r᩿ASES6ʐjɵe}=D@YNF;A^+ՂyWznZh@7x~okw·@88gHԦ=Cn KCVùlN8C>ޣC&e?΢``bJw) DL*j:fe%GTzxfxo{ c16RYy<:@l@L-V9 Z^"Z1@hZQVX)ʲZqTuE"&4(.0>dtY󗊡'/^ Ӳr/btxYeeߟC'T{řR\L ZJv-( 6U ZhsAhݎ-oIOIg}]#*+*  GXkQNCk3T/@93# ]/RUZEP$kYi^qyA[?v.žj{ ,E1;\c43r:_@ytc(Gܞ7d:ٯ\ùCV4IhiXjډp$<**`JժJ.W`hFo?~ړ_L3y,~xv!MqN4}\8Ŷ< ;uA!j\Gu+e^5vN9p|u#(R'ܙվ!^^)K8C7v}En zwIW:g8E|^G|2͆ЧKIњ8d-] FZ5s6Yv:%.rL3^?twD3_\ͪ p>$x( U6~ݲ+hqYM} ncyy6-a@ve.E! [+UN}ZpG{wfF6k*BSCkLdŅpiKƯzmc|ubyTz QٗX`ř/50P &.kDy$=yuִ TTVkBu<"ΰ6W6^wç.T%\hnZ=3k{un6o6ҟAOSy]O9S\ύL[U &r]w k:6rIN_͜?\# P֑ŃRlTC5>9) 8{}pL4SE]õRI8e@+<8]ڦ6G8^<{<%)(Kт7OkyhQe H,\yjEЬE3d\{HSrbb P7Z@e2@h(Ss,uK/wl-A5Ƅ5;WcrZp#pCʲջzΛ[<^7 ^)(u4D\8"f|wp.k{U1@8B Oo] E1dƥi5Z⪪*rRFyqUjz`cI4ܐ^*`lN 6D δq`湇vo߃xbŅ.y\YƄ;ojo9Wq*Q<\e qqԽx Pަ)buܺ8#n| O2XF0#L r%ZCIAܩ)͚IVClw ,εrR @ķ?f8}[їxjELJVT?tزMfϼv}al${/7awJ= kz?P3wUrb~>0"y4kAʲ;/7%宻Z5O1Gq#@ >Y K]0'Y#IH*_ _Z޽3'#+K*z3oxIѽ\)BI߀sf5+4PZ k`*g|eE9F"L\(TeL]69wEaHk45['uہ4m¢}/"6BiTB -ٕc2r6qOD V\ywnuxX( G J2E8jl-8V/s-a>T-K-0#Ъ>V;0<30Z @Vczi82S4..zI||bҰӮ[Q&9gc8E˶a(\4E£ 4On+:$_dIiiEle\h:1HRjN<򗘘lɓt2yC'q7?oo*//%"BVWp:[׵\#=r=:T.k_F+A+d-7[(Y8Mp )kgbҫo]uC9-6!>l2d2m+jD];iHsrkayiI>s`CX!!+p d&kvʩA%sמMc"ogT%䭢uK'8phqCZ?ޙ<̣,Ӻqpm0ġX[ K1NH8[A.&5K:Aqk:_}m:c0"C2̪n;Efb 'Y>R.o-%}mBIh4 p߰ SGSU֊?xY~/'…R|fOؔBHR݂֤1,X+ >'N\qz wXzÕ̟?^uuZFˌnΦmCq I嵃u?M4JzA]Kz̗6sFA+8Pju)œ+wRpeK(fKDAD,11Ũ0p3[ ͘ظlxw@E~F-V k$)d E &Fß&PU SxFԊ(3o}rsK~(*'gN?;u~Tkk}BvOO`G xEẕVfRe"J EY)1qqyfeKN.UV}$%V߾{ϟ=uRVJ%@NJҢLB P(XgGl?gsC *x|KbOr[t1cwL6LpBOTftgazUr"F_B(SNkGI$eLV)XsNUpWvƼ3F]w/~?șlLѢT) ϥ5 <7_#и:.11&V[wb;ws1J*I]to?~mXWku{fYfOdL;Ճ@RB(ɡWO-4w?N*3nͷ_WQԅ/.>ko_9#\1H9U7fn m˺KQ9fԵπ>XG賯Yʵ9\Yxf5ewRQ aRbLkr%&3MiQբ?fN27m__zћ鿊`NV$ڤ; [8oKcbcr F@O4dю˞u]۟?SoYMW3ZguəP9 ;| # 5e,U~E2-t XqWhSlLQ0afs[\SCfKp[[Vm~|ofxվ &Ġ_+11gnD9m)Ɂ1 ȏ>ԩkEpoUoMoNzZW ~靳ŧ17DUzi<|m2-/a6 1 r:Ŧ&~4gf;b@Wbm]2X ;p:xn._ &I>n4ߨ8/u9ox:֘8G&9/#h@Њm y1R G+-3k|A-٩J3oJ+o0j -"`G:*a3>WRO6ӧo1#0 "ZC?͜E%rlWJOô#Y6ǜRJW{T購h7>wOfM4W@%9BQBNfyX|>TotBqΞ?(<7Z%IQm 5r7t˩Gq3>|`Qpբ,\29h\w'W/%%Ó^gOg0C8!^&q K#KT_rx\}~_^o0)eG_zz9}U6V,u0[D*Q#y3OEYsۻˇ;r|9'..!ils"dez8wCztb5zM !r]*}1Ƣ,zG\\r}cl|ԅ,5Xl D:ѫx`0#p8Rl+d+3wï߂AMfdJ~lnSbB3|l-x*6y?0l +m<]^8ȥ7;MC=S7`%~r<@D q>5b)ʄvKb?A{w:?/_"h+:l J?e}%],"3@cB@ E$,UDA\2TVJV*CNe4&}圆 u  >}᲼G8"]n|N8UY-#Khخ^G#XN\`?ZQDKUl,kZ#=\+݀K\fI)Y9K -#4t_*.?n2+N3-۹*++'))):K8Lpd4 vzPNNc˩:x0@(^~o[!$?gա*fh ZSmړgARtL<E%0{7h01HY&wrx\ :n÷Έ|Ơ*'Vr*x`? '7G}ZOv8?lu.F&9#i*VO;M,j5ɽm3AFj_X]Nχda |qtl!g]ql{ faHI~v ͽ/ zqu]]G`]Poczth?4 >C! t@aL+oHN#SBADF *^PUlfd$E/$Ud*߱3PYU}d_~^87⍳Kۖqw_,zYѻƏRo6@d`+*"muڵj&+v?{v/߰ )=4yKd5& wŹ֫!`@ç\ǂ '_™ѠtL4Q4U(QEॲJ 9Ɖm\BN kaDmce sp4szC_崡1b9%M0F@ VBR%Uj4H/eX)k.%; Knm#MXdeZPPXJ]Љ,L}:gCl R?z1}SGuTkMi㐒NEw9ւjr壛ټY튯}ޥ]K;C"X~u'e;]+?G8U'6._Fk_|O8gxa̘9$Dڡu\/;lg9r闌B얰xv鏜z#&, 0\#wj24 {O*. %Vl|ŵ*\lwlB]?x &r}_B+N3[ὥ`hʼnvơ;1\3)Ey6)4+F2"As gpfGN^y*ڹpUcd0]%tƏ y_Qa;4G8=+ |G3*~8<_4/7s_xgtnY}V9ݹOQHoڬg_$oo@>>ԕR!\M䴡14Pݟ2@`r r}bÅ \5ú_C;{Sny`d*ÓK3pxR<2{:/­şsWZkaUMN"4X 7|Ou%D(%I#XNkF`C@ E" 9n\2B_]8sª>¢C^9A!/pÊeOK/L2'F+4>m3IKf9oYN}És1@d";j}S/w?w^ze98ӆBkX7`;( ބwM|Oq_ rL 3-Iо8\h* zK'_%R#1Ҕמ9nzg;>|`]/REׇ`|zGV |7?x%R(э@r %nԤ( pr4)m96BI&aˆ>ʤ G|"hFAZ0 g}#Ϡ7gimݺ+J,NC1@T# KIX97#Wj8 Ng͛ħ{: "HJ G )BQ}PwEl/%q&|LNL!dS/H%𛂦t7=[X"˩{XYN{F-%*iUӟ_ɠIp,z}kRr0YAAQl)ĴN6)Ф$S^qKn7Iϝ9z|=c3罈\* iO=ts3)v,N6,N<`(gf-:k'xa\0gG}̹=(/˅"Zeq&|hOQOrx\"`@RJEE^Ui`tE݂:$,&F˩ K~ a"ApCP&Mv"et#&%cZSVENЍX(LkNr)p㏗N{J/9~{3~x~1rW!,qYD<|C3g>{ޓ4- ̙E]Ib;bi0 ! I2 Ad.ޑ,oI;lR@?LBS# s<YN=r(Ǖ06#  ލ7hYUW||֕0/wmvqKA,f#%9QJi`Ļ`Ս4TT2GAQfCZfG4@`OhPLI2?^P\9 -x5*_~.c9uO˩{\x/#D'j(QgKŀ_ϙʾQ%%wfCA=]ڶ4Q1;WCRD &;>=:q6:i?%?p㳦<|M.+l]nI>OSgλJE9$;~Aio-,`nDW3A;Fs&%6VjQTbN_ o}~bm`o> lG+Zs s 㰓-Fuj*-imXFkHF͜K7?҃x =:d:V}ĕ&/h2S W\!N2!g,oeG/,i,qu`1CuYvg{An=lj]WCl-SMF%۰)~}FǶoٱJkV.EM(QcoY15mef=ҹ{-s?XNk0r0|W[Nkj-F lQ n1$ ~`}Ѭ$G8%3#j| ϺQMEͺ#-9x18 StzvtZOcF@ЊYhiD}\,D&l$9W7Orp~bu1c4>#88kM4(fuCi+!,AC0@#B hE9 -U kr|ѣ⼣q4E(ҾӰz[xWCQ7 {qfE9VV9~>8r"f>s`"eJ}*(ǥyq 8ծywtk' rySqj|O81/u2ǝ:{ ƒS(eM6mW&`Fog׹o43S@y7Ɂ )QqNBQG ' O0[?)0{à\jrc%0RD8YLN .&ūft} x xp$lI~aXvt瓆zG7_Q9_7Сu0?ԭc\Xq^mZ+{ʧY K!E:L*HM xMxC:@8 TM$qx`\N?п[[d=~zYf,~ a9GD#RB8݄I KUUeIQiyѿw TVUti7텣q#*䖰qw2xgZѯ3N'33+š9 .ꨞ?t u"~vt:}C*Y/ GoIE4? @hqrT雬-?šgԿےAۿ=Ϛ>4߼36Z"--ʉ+srɟ37U?33_{ߗJ̃Ohz/(29F޵y5aq9[nۋO=d:,2˜^,k8ш 22O:(}luJD:@.Z;Y7o Y魵uk/˜n[5CN:v.'?2BZw+8GgC꘭u̗s8v!=ǟ{= #|ʞe'f<7jdQece(? o5OhқTKN?Z}ɯ}ΡyP;\3t˶LӳFD5sZnsz̾N6r p7ztq.ǫV|tC-]<0F#CGЛyPsh;=?.\~Mܜ7?&CN4GD?,s|A(B3$18W2%}w c,2-co bxHpwϑzi n =4ܵOy`Sڧ}x >,9s{,bZܼ˓էgvΰ^}v\Njeok>1߾vvT$oI׋]鞹봝~JZP!giTC@ tX2Y; W8>sGْf<ٗisLNҵKL0z{2+[QkA8ݞ5>C>?9ŗ2o9 ()O5Ua$L:kA!LӆT؆_EUYSzs}}͝y5ss`vr,k[L_˒2prc<ء}+Jte77H*S,[,{g:Z2k ϡJgjsiG#tC:kaQ:H!lZ0ʝ F;[Q9X֛"+qÞ}?]Fۥ0&])IkmAh9T["}/l"'=ǟR4H֡;\/v PLCמwGK@r:m! #@hqV, =vjU|yPi$Lwr7wJCd٣wg]t׎ܠddi 2-62:$eqe8E]zy~|k.E헬sآwY"sX򣞽t'zR~WAAawiY!5#}unW.Yſl۲E6틬0@\^ ogԫrҋruQI-<|q>\|sNơAr?+h<%{@38PxZ7J@ (M8 Kxj8 u{׭[k_7F 5]VVfҠ% \MGYoӁ5Pn˅z\p|BO,Yz\IV~q(I@:@e߷ =UvF~i_( ӡE^0(րO' g d㴋:(jj?2{˺ꩭy$'Δּ3JљI~sw\MeN+W^$n#7N IZ(KWgN>?Vd|̗@9l!p=r@YB@Ygd(C˲~Hb Jg]/0HM*8ٜ= 5θ*ȷ^\~ktFoqöKEμBN$@%U'mSNdӔS 5[} X:}eٜ7S{.>MjrR3[ё=;7Ot6{N5KcWl:Zu*tb|hlOVA#D=|4À%lY0@ 9嶺IgGTm+a.S|Nnsæ8?y|،P |3/d]{/a p_k7:+Ǐ?lq46O" >e_\ᵨTyH>4/U'*+4iRgIEfX YϹլ;~~oG^WO>iMg:N͕ }Unm\>y+gV JqXJ?{ƾwۘ1̤D*Y3=.%zӑ?-$!5v팪x|GWu:V~uX7n]?23~~r$D#^&s̷/C^k}gqńʓk+y$j"~S/Q3J}^tRsƞPХDŽ@> Ak2/&HΧsE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@mˢGUL_2q{e,g&j=lQ{u@@]eߟeUrUƺk3GYk(7^J(#3Cg@l?b[ @@!ЪrIBa!@k2?yVVc^-GM;|9]/+DŜ+@@*jrIEZ;uoI UgZ|KWZ8'0f`j*w 7HkTh{>bb}.~ikY\ܼ姝_` Һ|׷ ZIDATi瞷aѳ^,y  Q -9dhuB25E@d[ FSH l=ov0#.'H>0݌QqeBj@@4P-27ItKNjg)13ZH? :>ɍ{-{{O3C,+F@h@e}qCͬ e Hzf\  69 {0'm޹KQG$}  r(˹Oa"y3qM  pHhRy$uID< }̷L  m,@YM!uʼnz+  @ EW@@h=ֳ%g@@,@܁<  zʭgK  X@yT@@r6EU"p[ UwI`92ƚ2RՔq6%  EyE]rÒgG:jW)A*u%Syr& Jv]YߟUj˒J`nQvzllioNHS[ C?kϖSs ,N;{$3Y<3mFe0qq<3Gs'6mITzӤI=mJeXuĬucEkix˰,) uxub¯T!ʾZcnn7_^\p ucէ /swT}3<7L|`]uMg#Ψ,[itjjv&=N:FZ>;?6pg9c!51/Y1bynH߿m̘MzdEՌtfڈ|m=yraX_]1*)YςdˉM-_ZcnL++[p6l&_5.i:WTM&u$lML  >hQnCk!җ Wcw.i4ƛZo=cvmAnVSwkJ#oun ctyet硒g_Ϲ}ݠ6XZwwobԹ@[nwLvM$i%^)/<pPZѝM$2.CՔ&%0V=~G&oVAwǍ7.=uS][[>$ejf W4fڻT~G-$1SjT~guW'}x2ZFv gGswn &)vs-{)LIn췮N"﬇[u)/جn٤Pk/H#S]:Fre #Պ4Y fcR_5tY_ZA4A] -u~- m.zf}~ܿ5R e/}_bW߳_O_D;ۛ.z]-J"s޹;N/W˾EnT/# p0~ʞZ9vwKr^H7g|NJ>7#Q'-waU\h`4N ʀh|dիɔdQ7vMA&<|VCI7%0X2/;OR Ni1KL4IWǫʺGVךm}SI_Z7]C6"~S|e-eKdd+5䓣ytl64  pP|Ps[PwW {W֥}cI}:z]2 K5҂yYv>!7UiZ LTZ7HsV[W$ܨ붂>~"9 3W]M0W{WnkS+M*-]H4zQc۔IbKV$emdx?vPkƎ }b&%KnzC؆ X@Z5ѹBzsLήTUɍ{tkdX?qѭ)Gj%UUtC:Dоl1.`s].琫ԥsǨUC؆ \8^̌QE2;BԮ@Y>oJR9@0ܪ3?󯖹Y¸YGr  @r(K-dG0wjxf\1i<=_7)1@@lԋ RU@oFzp?{?('Gf')w̨ :3! -YʲE2؇rw[\+2Q<3x  @L͝GdsL:Q꧎`{:iq*  @i< 6^vbQ\&7  ОZ5P]V%W]%#8sn=aE]13O08:n7'-@@&rv5FUL_mH_ ?%Pޤs]ia|x9G@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@iGUL_|q{4gC3W|7{[ޡ;XF.Ɨ'w͌ĸߵzR@@ ڤEb◜K]y:=ieeZ]NZC\cGfT2M8@@@@Gwm @@+7Wo <*LlJf#N-(@@ -??nJ:[A6;y՚# (yKK%oVt{ieen4u[&.G{6;muU׬wy?Ԇ'3BZz-oh:?6qbim>D#57VWuNzmrMNMH cl)HkSF'Ϩ,[2Ɉ+igG~AZɟJՉz~?Y=UJYEpz/F\ /H1򣣿 /XZ돖7^ʱo9Y+77\]Yig㤬c$ϻ峵,Ouw.bWXQک}g]zsytRC}}_t @ve׋W$kr% 4>'I-6tEvTɺ?N{rGF K88W5ɷ-$wEXE&nԥ6>.)ֳWO,sz]F'ARVF7%PNk4h ^b圷 =!ܿeir2Åsz=n?X %Fm,K-Od9ܳb/fϑWڶPy("a.H=G@~Ę Yn|s;P1.,ay$? Α-/.8vN|7欝?jv @ZkzSj.bqA[J{o#>$QY~mUwpl> KtuNo[]QNs1K7v쿦8^yV}2(#vl] к-3WTM&_kK4|-@~ zJ^Jp:NZgTkN˷/$M5xDU;sUrI3pl{BKKswaF:p3m^>|M#֓zɓ \}u]8g1;FZe3%?0+Yo?X[;p6l&_|>+-–D"pO"/_>Yk60iu{ *eDnIm֬YҨմ%컬"=Z/dg{=_Vw˸L70|H$[;_ȊLZ5v$eLoW$~*&)];\ՙ^a}-n:J_}lVfn@N%-ԚK:~dm(owoJ݁W̿{G>V@@hQ>){Wד)w}Ci\T}Zu]-deǟ3-頑9r/+U?1\g{LUfBwf,:1eƎWS^Fsdo%z*R b 5{)z]xK{Ub[˟ʞk> Rnr؏ @GjQkiHaDnΣGAa+o|S"LeҟL:ZbQA`~$I/ܫӺ⛫>:=i RJGg>'eܾcS>ܿ7$J*&Fץ6Pn <݋~_NJݜݖNe*W\1auwk;K7%0X.;OR Ni1KL4xUYhZO}*Kpp~wKKxl򃡲%2ĕҚFcdQyUʵV7vMA&<|~+&{lJu<Εkp3K@[ g-2@\罋`KsK 7-DҒeyҊDI59#fcCgT_س`FQ:X\9O9~ZNnbGϗKƻpe].9.)#A<_&_o_xG?˿3k&]-zL6VaY[OzԂZpi2TJ0"}]H4z'_/:s2xU21X)JK&J%~IڹQcc7ȍ$G}檯gg@ZA@n.zFV:ol{ڵ@ h ݊+ݕ~/ ~Co+8\כ;MǏ?qz%UU4ֿG}٦IX.TUX>*ā^6V^coZ5} @EsP52L9c L{E m.b_Z965/'@9킛f$*2cT# KVf'iֱ԰5Bq;ي  К9 5}h׶5+/yowt|wDs^{LR۝!}wW9y[@~=ru9D@89{ȾNGl}<ʯ 8LyGM7zQot<8eN@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@  [.eTIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/hwreqs.svg0000644000175000017500000012130300000000000024354 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:57:28 +0000Canvas 1Layer 1Controller NodeCompute Node 11-2CPUBlock Storage Node 1Object Storage Node 1Object Storage Node 2Hardware RequirementsCore componentOptional component8 GBRAM100 GBStorage2-4+CPU8+ GBRAM100+ GBStorage1-2CPU4 GBRAM2NIC2NIC1NIC1NIC4+ GBRAM1-2CPU1NIC100+ GBStorage100+ GBStorage/dev/sdb/dev/sdb/dev/sdc/dev/sdb/dev/sdc1-2CPU4+ GBRAM100+ GBStorage/dev/sdc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/network1-services.graffle0000644000175000017500000000770400000000000027255 0ustar00coreycorey00000000000000s۶?O>;i@0ڸ+f4s E$? Y^}db v 04>$ Ĵ*^ ~|-?/7vup1 43ϷwGPmo\G ><}n<ʲыkR`}#d7Pd0{Y9fRz:pˍg;]? >cF%GQO}G!125PKkg{s^E<%1O?f^֨"dXq^|ӅI=eƯQk/tz7 gUz?52{uټSx󆤍vT=ΚB4 ZL8r6SJ@ݩ-,b;B:MvmriJ1.8uujdJ:yTRN1߮SЙ[[M\秵LfKnU , C-CҞ3]p6RQNjҭqثcȹTNV7q%VDD*+߯yzvxVBAlܽ~wݼݭHi+XVvi.w\Ɗ%gҞS.d[_׋j=jjv`zQqdᆞ(9:,[zvqI7^/kl49A%QEg5-)܅VuOA ~$f?g;]ݾG|W^2_@2;Hp;O8SPyI}‚p,RQ/i/El7a,V. p@D$/Y0T|@O0~xu7N(M/_sG#sBŸ ǹ G%ukIF)֕Y\ܓ66@yO2It.ײ%ё{s<̹S]Gh9 ʼp<0\S2 > iMAK#a;d{ A!w5|ꀌ;K`G%_= ۆItLK  TJp דtD:>J:@~(itnL 4`lmU.:F1)cks@@H爩DH"$kɇ#mP{؄X@D|:H$q;ك,,P钧ncI@sm;6n-a'p a{ʰ==9a[ɢ߹b)ae%Ao|jIdzg*G"z@o^2P2ͯiNLEҭ'I5*TC%7ݍo6)6Nlp5TD*"q+0?HNuǁ4KBT9 .F\SQ$[]@o<ۜanƶiy=~P 8 :EiܻEU\=JI%G|)"bq՞?gbm/çbk::D!pzg?O7KX޺sDbI\SFVHd${ N09\rERDzD4"1||c_)h:/ 'µ]CȷNC$>&$~DD"><\E7 6<tg t[246FBũ5kG|D>"v RPWlYt8H[`#tHZ[a'>!9rn8w4-}m|15%NC!Vp=݂fpbѵm33n n4 p[4?dܢs(˲+t\3"8)Dԣ-j=He30}#x\ĺ7xAy2;eFw fnj?١;l]?tHBÏ8y̤ރ}i ;N02j.%pPrZ7.E4q4hgUW$V3 }EjmK,AEMl!m=ss]yíc}C59g-'r?GX;n,c9CUf;̑GF[UF*ե eqgWpAMRpE`Ru.fl u AMˆxya'񧠧YjڽOLM<>0#$.+Xs6#Z^)~ܦA3Nj'ZRW+c]!%dW]dmHNbxi枵 qJ1ScBL!STexafmfuVGm[`8ά~3 .™HSY 3;/.Tu7@ı)~\Y"`1v¹#NQC-yZ` 1c5C+˘ΌÆ~RK9 ze=3L:5e5v*:=NZσZs|_:W!n&ґ7J/䋫ϯj@z gl5)%wA\4:9Dsu+•NPg*iJ.jTm$g45'C_kN$OjJ1HS]tû@]UN;yx{{ݍF=թ~IiVDt.Q{j╹K|W0Q.8kTF1>c JgNGx,$_U 4@Ӑ)hDJIg?AN [ۢ /,A\\yA%&xYv_#}OL-~IgIFvz& ]fµك1]@Vsً,x Ԡiy0(g)TU0C3O4 D< 쵞ho7R~8z{:WWgo-N.}w||.܅C~qASknsx?V4*wVR1-OZxc Z1lq\8gptӐR pˑ}JtG|>Tv<'././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/network1-services.png0000644000175000017500000051645000000000000026436 0ustar00coreycorey00000000000000PNG  IHDRsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|Eg]){E}W}$ Rr\](ڐDEWW_{{W@Per%ʳ\vwv晙3m 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&Z٪BÁaL \W P`U=dL&p~N%}|>m|=g9ci??)S,yĊc#L׼wQ;;* Njń^FGRўUƅB; P" ]ե00N~[O(O(_w]3c䲖 [[H&sLr꼧Nx CIo [pnhG(4Z:Nz(ݶHi]8e ϐ쌸vswlڎ}}[%N;?OZws}(/CpR+%dB}n٧LpW.[o*)ԃVX}Pd?tK-qtdyw+WR-sSu./pk:W(cyx@>/KwHM!-k]0o eׇoǥ3&v<lxicu$ΥKz ̝PjD%b1iS=4GY;$4i|~Ή-;y>CZ]LvP%æ>{{pSM_2@~+a;kVw8rMNs? gç% Ӕ9sPZ θ%%8I=6VhB.^Kc)|Azη0i1ʫ<055yXU̝SOS5[42riA^maCnPiqsYM (NYA5᠉xuGܲl2;Gem>&|Th_G6{Ni>ϞΎMu W KM 켳ș??KWValp{敷(C턼l/%Y#rq 1챹ʼoȗ=n]<ıGN]Hh4R Mq8t#z>āUϿň+5FDRa{Lt:gOŸaxGXjV"^PXQғU.I4!/vF\}[:Wq)@xvm{f_yM!S yy }Lg%DƢ韠7a0NM c454 *V+l~ 8G%>QaIӔv>W\Pi160gOǝsE'deqc:B1˜=/`Q<4RRChQ >9Θ[߃dYgPY@C4ވ^6[a&yTlz5FG h,AR>ϝiT73p;zesw@a?Hހ~fx/3GӴa?)yx :1=nXJ܎' Ay[0zM"!zDz\ޥg(ʺhJ2p+]iQyx + '0ǯ!.qFzHL9帡p>.0go2_"S4V.lX3x*n]=#d+U꺧^5гsozC>?v@)z r!V#h')=HSg`[OO ]yϱ}FJ5}}GV]² ﲼLLh6?g-Az`mT^]gV|TXGkMQFbռ>qB=P=HK1,@Cr?^SH{ИCHXraQlKZN B(Des|iU?'D"I}#'q커h4Z[`crC %LѿγeryPmF x VhƳzGJ @;t&N\}y4^[!Ngi]pE 27 C{΍Ms!?<&xG F(QU7 뜚nҴtTS0MMSVF~hxz=KaHߋkr4/{%K*@tp-ØaxлebrPZ#DuzOr@y*/v~m7T~5O2OޱBE%kJשÿN'wt/5J Y3NK8\=v@X^(/O3F{yW[*'7H'﹭:Y[1J_7 y,>Z:y3oM fI9 Iq w( Sը`Ўԃ,*q+ eFcY ~Y?*!y: n7ˎ@=C7= '{t8첌̨R 'mN GznO5 SUTL̃5]APS EV|τ) rc7L|-)s(:Q\8 "UNq;1{s?YL)c Nun+=nES#lk$;FT93uȌ/] y=;~h9Li,R!7S=HX?dĒ/7+A E_wN/Ww/}_59nW; n n߻T_7BZCoy6DѠ0XҲȴ+8ڼ%<:|q.H1@ٌwD*pNMLaO(QOH/\2:uܸrJn!M>Jfnv+&0cFaO@CPc=>3jht;M1ʖ#ei% :yNg@HgKT+ $9=C t3(gEwG| yS1Es"92"V6bq$е{z h䟄]]E< p%qC`A9pz㉴Qo2db9|[IL@^l]iQP] BWP}Ĵs쮱*%;mی ܘ.'sZ˾emmj Fn7\B Z;\* J{$/U}kHhpQ#Q^n*7዗\J胺 _؊캡kCqǗLMD(9L As+=2 6CdFm+``+ :?EC^ԋz:~R ]T5!'WY3f_ǓF2zw~)uF?pO * d%`IAbBS\(Nі_b+aM%;8_JcKĬG8g}";}cpT(3<ydIwc?YU+fs=L[nHAZGO[pé@ci=@AÔ9C/Z# !=yhϻkߑ0Mi}!^}P>俑H-ACK>ȨWw)qE B ؛xg(Xba.cnN8nuؼY +}~o§rx,Ec0hO{]V.Φ߷Aw/ֆ捈c`Ҕi^a?-\f]<>S6>-D{~V|93Z3VBJ`)#s/z?*iyp*=vm¨ U)"Ӽ%~w)eڕ k=J4)l<*f>_oX'hE?E*hNu')+X\3*$kguۉ8alSq!挝WKs{^"7,'b3b*~YbЮ4.G>DQ ȝJω4 ٗ&M9W{NkcJ&,ԩ?8͙qѢԾ\J<(zcz|bH(DoYUW9EpFOuJN?Ɖ(/> LR}88lV tvj LS  ͨ^FxnM>W"bD3 n ?ʽ펯@k##-E8<)!t*4 չcZ;90_ 3-NUBR&G{*x#ѳ&zw&%>Nؒd(G"^Q ے&0 CLl݋ cٍg <Ŋ`M*y*44X=xXH3лP,n}0Q)U[6WKĝ):AzDc x.ޕ1\&gԌi\ڸFp]ާw14ZZ5$4W,2(V=L@c))Ǐ k1T JEaIWCǍ]^:)(1R!w}|S%#6l/41JwzwҤe.C;lzUb(|Z-V6ZmpCv9" MKݍ Cly2Ǣ0{4veJAxN7矆Jph8 M_'|0LQy`8Ζm!ӵ.QuqRԿRt!QOECbE]sJ1 ߐ?*p>)> tnfz]5Bѻ)[5`7:uX9є/(6~.O;HS;`F<؋>7gGݞys1r0 ,L8$]KUpm ,j Jɉqʯw]&eU^֨ѬVuA2_O"JiWxkl䀅SmƁN(s\qy-+5&F`wC0@9]76oDK[PNå|9\zv \gò2F\|Z5 VRh|DjrN[1bGhߍF~ OP!mfJ7240aw^ F/ܫh^. dUSɨʺtEvbzI5;R9cK,6M/a"0q!5ћ!in mFω7'Q1ׅd EPxL Kvfِ_Gm\qb !~ Hv < yM8r4B1.bP>>8.·!HOB7O |>\HO䗺JCԀ'9o )r 'JCV +_!/P^b|;'Oo@y} %+,;ZN˩hAMp_719XW[uehå7go>4;PƶWJ{E/G בg;ko(N!3~y>7&o$sMɲ(+*Yo+C[(3֏?/ƇZcQޔ 3.hI<ђ&'Hiduf&#P/Cw8r1;3cR(Th$X%Gi޴'PVbVxwo%n $w1~P4݃vK8| "{D_߳{|X}I/Vet6J&E@&TYACF]hPaRC>%h't05 h[ԝ[Lv啾+}4tv!(7w{+L–s0[aە kS+8ճ裟r!(чCH?dik_4$~"|E:3܆S4Yϰx=vއaGxgL(̼_Leڹ0< _Tuꀜ=&Yȋws[Z׋tH a}L#Yj~DSkFSgڷZ_1=v\A9^ח5Vp.bYp5F1V0ꮡ]xjhez͛*0rx51 r MDŽ^eZ?"Z9KF@ MS>o~prO4-ОXWL<54o龮TjĪoZ+ƭ{_bh#l9OVK_-yzfvHfi&`mFF+¼0Ej*RXЗ1J XI_9{q Oej9`+1U9NLZa]]vǂ쾈614Ӥz`O--.הּDPO&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&N 0&RI@w(6VFGRv2“C~ɸcL 0&RETd9L sD( t_B珺w]7wԭI_چ/( Nn3PEcÿCוjJ?X6`L`G 82&9sXs^&KThͩhP\uxCib%M(y>g@{lj 0&Мh6o5JiڕyW]T ZovM`E-'" 0VCV&4emKxd"~FBO*u2dﮤ Ta?ΑhɜizRq8t9|Pbo  ^_*447aXk3BG q^d܇Mú,ʮy:kҲ/ >=;\AѸf EcU$8KL (AXCj6y2ţsj8}.nGe+Y4 ,"+$*n^=v E/}jEs?\ٜLYDF *1&@xd~Fl 0&P/jD Dcx^ź-+[~w_y-nj1X<~ק@Z1JaYsp?}%t̐KJE0—oݵڳtX1HoAp7U`*@gr+,51ݛ<}Dܱ&MG6bsaS&@JJP(n-oYi8s0ȧ.E~$|ãGE`ʢjE,cDb m;ƿ?ӚLI⎹ d$^Av [~C# aO2C@B|r˴u Bvvj$F+^q)uB%% 0&l4?s 0vLs!zeK6ROml ; D1<($(;N37 'R9F}׊*ɀ_u'K6k^H%bѓ5Mk%̝?vBYbO(3b5ţ,=xh0snbqw@ J =ʌo`L ;&v UԪgbv-|E 5zAw1]⾧kC1u e 5 #GeFwgHak]&Z1 ߅>)P M[-5REFWC-|>{޴l:sL 'JhH&H=V6Rϔ%2& td{U2_3`-v_ EAՖpz^6֊5@/~/,' 02(/8|.HN~[O!˺>_tI/b(W/.Ч؁3}! ڙedAz޾$U̷h|`!P=d! 0&b(3Ρ'GLg4j_qx0v#,%psūxGjeQ@ϩ15+Z ^фS}êT/}0c.mbLb1»+.0 ]({Jt|hIYVۗr5FШ6#kΕIS`L 0FU7B;eL 0=h"ofeRyVKٔxSY5_vN :”Rݒ L]\:,UVs.aT\%fj%m|ga#Ç%q<^bƕ27T"_F'2-VamBYb 6_.Ov-*VF7 ޲Z9^Ǿ,eF4Y!CNe'>nޠ|&J3`L 9</I߮،EW(z OM%^ACFeRֿjeZ/eb+v̱Gg{݋n}*Zc5`L Ce,L 0&*cg!+Ζ,[c}Bt}WXyZI{챮RqA[^瞠ؔᛤѽ"\.>xBSH4y9eL>xVHօC|ީ_.yy>k&@13`L 0&`Q=)R_p`;$^C&;G 0&к qRg`Z`L EQE͙`LE E {`&#F`L <*%zvY(<,&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0! FlMcsg 7-TVR]j;&`B(!J%gyaa"b5.m=fXB\&CEgl;6G-ȫkHxP=&MsmBkkR]gX)% Q%eL 0&Ц E臣胺#4&u]}9f\iyͷUJZ \,dؓD?3B([U~?RI`ΓM& 䝂!#R>CZh\&h?Κ*TJ]iS}5ƒ֘*-֜wfNT[ f % L^fc)l!Gn0\ NRMJ%'k\&hc0cbčy;[SkMѺҚnfneBZ!`L Sɬ@aT.磊9Tqtdeb8?)0++#086@Vc鑙34LJMWX@AJM V4ϔ%0&@Mv݂:_WTUԴLw\5vMKE#O;r$+$C5a4O eͣRUJ u^ 55xڒ.JXf5fNV5/ k<9^L5g3+YFCkhCHya^>SWgtJ*%NY&" e%6Ҷm(Jjwo"F>uU%/]m ))K,rXfS5#xe'mɚ(/pC/™iZVA0yCc`)Ɔ`wL Oc_X/ŷ3Oh[Rږȇk>mu6H uJ/EZ'VIugPjqStQ&I %*\֗>y!o{ 7G 'DΩX а@TzRb:X;P . V&uLքW n{y.;{`BdF[K ,Gy(7:QN迡=rZc,{8'䣯BXV6R WV 45/0@(O}/gq{a OidFnҰ+h7v;b' ]@#K+:Ke\OM&v3*nB|4Oh, 43@YR_ ic]]$ᒓe%N))SfL3˦-h_` E_6qdQ(QM- ҵ+m9gnNtp~pp[a/I'9f 9j&%*m;= /IΛ3e[ 2=^)9$ܗWnUT詸#.DQ⯅y̧v:Ul`XZ[P8Þ8H׵bcp/WW + Gn DhVc6 |qB,r/lu4q`tdvn*[\]ee}({(9GVntEd$\r'o3Uv PFϧ)TNkbjNGLZB}w] 8} B}/|p{D k"gŲ3{}EyմϜDSqޛ4! +9C9 ѹgOQ~0;w^OfU=?afc%=3WK..Ca@\VVRjA0w]7I^*%zOeHCFt%BoʧP &YL .f5Y>;>Pb7DCu/^x!!_󛋍rn7^ZTx0ŁD?ydsZ~eiKAϓ 9 bGe#\UE8#9b k!so)*d9^ViA@* KYx?톯M6ր$v[m5My.OMQ/wǠ Q)>H,ʫA(g0aXuShF݅(x·aF5e&VrW*_@_/2EƩ+]L0Ƶ=e iòΎHœL>;kVrcoH-sOK{,L3ZYgGS$#lXOɶC* r멈fI|VʠiYxV)s̗dWX>՟^ع2G@ՖAx-BG/^y . ^C[6מ;!X|%F2C&?J^Ig)A1xy{ ~5ޣ:a!dAa"q-< e~ޣg\/mhsi&%ŠQљlQ˹b"1FX5JSWWZ5Ye'f9{ݫ,%ϺwQ[h&3p8FT% -if/l{'ઑ!uJƒ5pZ@v o#+w`\p0:UzPO!1 Ļ M0ӴQ{r7v" {P/LAiz`|Rh( RQJ(wDq])aZ߆kic/cmȿSZ>7sr̚ G9T36`E K’eBgz@Qpl$Pwl@ݱsYU񱂅WƧ8ǀKiuf,u1N{kتOVi803I@SLEVq"2SކJGjX'儥1bL 9ޞת0=d@Ӣh92B;t6yCb"]y1KJۍ&JmI) Z0mBOuHvW 鎋Ls:qH%C2~2u*D#?Kr2 u2c*c;[{S[S;ݛv2Fnj ]&E"{Fȗ?13mOg/tcv ٠ݸ \F8>n* 2hG+:|ݖQVLzA̾/ޞ"\NS;&NTTa=r;b=,^]A5D0Q`1pO#"J_7?C7v9\ʲnEJ }$XSgR >%}gHp~Ŏ\LS2W#<p>ZӖ*DiweZ[~/ 3Xw-K(t/?%'1J]0FqvZcXÍR=~v>M;Gx^jOsjyG6j*47 9/h,S'ðf=Q+)LL(NC`Ox9_<& L!Oʚ/諈#)D@TKc{ixe!+)?WBh0d≅( M h4*yDQchV/@;"@OVS`x7գɼөD0I( !l)hXg:[⎣eHj|]3<e{ICuT{'(Oc-QG =#6;M}ҁjL>+̛%sdG]G]>`-k$ .b͇g;ͨ*GRB\h00/pO@#?%oZ0ބ\vU:M10ۥ 7 /xhfzSq4Iz3L1Sif uA' *v]2Z#w.n}w#8Xz;]׹wpݜs>goC" Zc\+0-UPon{Ԫxɼ-gyNT&_ xʝmNqΖs~FZchgc~Ӻhi<~Ds>|F;.n4dx'XnPn~/?%BO2n3淽".0RweYKCAf| 4 ,PF3 7He^փ#Lӣ_5w5OJkSk F{\UmƼ=7YYMdҔg2M1Mɑe3&@8GG2t*\|w L0ZEꛨy? C&Ϫ:V4E`:(dSWc t;l=84=(oMbe#yf 0&@$GJC[}ѧ#YPSB~g)[)N)"'~VZ Χ#]ȗvI QP6nZ橈`%U?)DD6-%VdP¼5?ne7w;0Bif o bXJ 4)DVn(S)g n9.XifMI@u0*5/,siH6HT)R"!ք\ira} ]2:Xݻf>%ifrS[.ZӴ6`.OE08bh1N9llak`Ueo~_\cWO ؏ |BR_]зM3sC=fec&J2ʚ`z˜@ p>MV""fas˩ 3:lߡbˌ鞦姄 W/_#>]~\餫W7[4GJҨ%4mHemZ*5dp>.gםh⨃ٿD#j >h`ٙ~oEo3M]4[푧㧍 )QL+˚?7qbE}0rxPr g?b\*(-X .h*1 Rփ^Fٳl7r- ,H;!?WE5ZHq< vB{Wjꅂ'hsgNE==BmI$"#Bz۫.7\~ P46[m ((( )-w٭>}M-#|8֨P3 +M?DfnZMow1+7^q<=DEMJRpN.9E٭>pCʏV0(|Z.{PB=20ݛ/ 5:Jqu}q8n=4pBGפhǰ {a*~¯Y:K'd́P(0(%` Xe@Z|:fFt("_<'BR(4!g^s{Ѧ\ygA$N PZN,mWOדU884nLR?{N2?`wO5j ݘ VD`])WgF~BGS|R۲>V5.sw=פ: LJc=!RxΦ!Lѡ[PvHvP!|(<{@?KhM cY=.dQ(|[_o';~MU[ʧDW4U4|ӐWOݳ'ni{)7![CU]s2 h_(M)m)53%C}eiD yZŖճg);bm@zODyQa %2Up4AECLkh?fؼ.oLt()>+ txZK"S~e[p? ;Qۮ E;75 =@OڎI[ʧDUilK3gT> 8eEۡ AweҠP6ߘЧ죸hPRG{P3:wyx m'iSdkDZ*[H`ɕn3{]!eG1*.[ޮt֤62@~Eg|A@_;8J>O{-O}i=`+u69#3YkljLu`ZVN9#MS`{-B'iJCd }gC-SZXs!tF7j@*pwA'JXweA'4n@D6P7yč ?Q?Jݷ޵*ꌢ+l p~XH{kEyϳ̇_ʃpmf <=kJ>-ԳJO>`cq`4nO(oFlDִҼѮSM >v4G~XC]Qe:.U?dG<rនG]<ղ;"9Ν;u:wt`UjGєq ,\cG?4G۽TlH nYR)m"/P8 D4M㷡w-c˅AƸXO_ koQbގ^)9fǰF@[ɧFi0գbΒ%4uoڛMsp]%WBacKm9^>]5Q!6:phiܘ'?}= [gf]SU m(;}O>wxS1KQEX HIPj^|ZPNr קDdw {5PJE-emFΨL >Uw7zGwO=Lx:<̺զ ؞PIEh2lbȨl#h!b[8Y8h m!ˆ4łS\`gEG@!YivCj [ЙhZZA"[K͐,G]֮$>gqƑPZ\x!W^_,me_^!Aة@ P71`!gG˧PƚJʰ@2U|x#p.( ֘@R-?`FM3•:|@aq ?sݻ}0? }^G[n ޏs(my!:|~ |jop`:1bP`DGR&[0M" {S*I!e`5zيUb[i׫8!{G:Uq_=w9apm J\}{_Lx/gDc)|1G۸UP"aM~_ើ@i_VR2"-:GŦ%̿;EKO[r; ZJ}O'ߺ>Oi"ƭ:^;u03:7eY%2".>C<Ҿ_n>l!`㥟[^?CwbϿXWw%C|rҷQv萦*ʆl)x;{r|z`"4h-5x/G#͹X]ز:HNm9ߜы_K(!nʩ RdRз3lRr<"qI|D߃PsV%:LdC<:(/n[-IĻE"{ ?֋ċȾ |?!W P+B)2II қr<}9smDrB0ŭeIWoamC#z~3uТQ!#O66ғǟ,1a"5=C?sA l?R4nYw4X%]J!˗F?8wp·'E7e~b}aeqgW9|z,7cv#yB(S@pC !Na|f*;LPry݀81U߸t*> `xO%dD!ElDNpI%vH$?Avʸ:pBuj5Y O)TNxE1E-d;\!l8!'EEN-BDmLpmuњŊWEb04Ԯn !|Utl^_}kW٦vCbƽu#:BeE⇡n-k.<#Uڨ2GAӻC3rp]ڨ9K70׹ص5sb Ц]/<T[\M&B:K>tPILWvDC$C.qB,<4SzZ6XoE;'5?.4P%}C]O56b%E2׮QU`q~@R$\I&hhڳ =~;gAa(˧p=3iqQ0" MQ_wHS!^{lto4`?Bv=SDȶHcGnwT.2 4wiil D., %[5ǣE _׊Fu"$].B =蹇i,{ہb!CcF-GsҌtW470``@'nwbvs+"A<.@|4$@nirO-JXY(4+cg*5?0  (Si߼̿DS`? W$MآپֳdbEVNi%3:KK@s 2&=5=z+\.'N%ha}:Y0nnrJX--셰@-ADxEѴ^t{BT"@`@id(ÁcEKhBqd"id`cj@x}CTp-# B)hRP%c|[ h7 ~R˥Gi/F (2҄hCUily>Ps)##}w}{ @3{Fj~q4U5!nL:39vXtPPh1{k16'HM5RAz#lsDo}Ѩ8(5k՚ I i.F S˲sh%΀Ep76܌`z 2bP"5lfBM%͕*(M]*cUJj:d@I *!RӄiѺ]"|p:) #CD3#d("M Bd hbAp}>_tnPU?+oX{@6\ ;!$=%z> z? ovo#Qjl:x4ںFT_P83X8wF]pq"94s!覮&X݈*" + 5F4db *xZ6Z=N 9ٛo8WhiFt%U"{lt<;,h'e σ8)^@I >3BUK'5e F˩Ш\mp (\Q #/]IWo3ԙ7hG0iΫE \[QU: j 00_+K/{ KذLzdFa+GUȕ.={GƯa!͏<)נ )0m8OG\MVtBת^vo9QhlUbEYhhF ]P^N&釱n!1oV)0W)\*p9IXwi >!A2Tt+jg1o&XMW`/s{wj TP\ɾ|J5-}x.hq_-8;phfg2~?W]<0QCшfa`0[Ls0 w#o1Ź8y AGrH tV2V)02$o ywd!(͏†8[PBT+iX 6K7SN}'$]ъLV 0,i Φ,g9t@ plX4Z b9#pÔNKctF(ǁZmT{lwDAU^҆b&9'WiFhasۼ~MIw?#PAs:p ˠ΀- vcVclLNͫ`>%:~s ]&KPQm%$  ? ̯CZHgRAM[H\q{0НhhͭhN({}8 2nܫ#7Qb'n(&ln}ƒoF=Y&4 ֦ "08ru{oE$MMҠ92v|+Z#x FGp2W6\A&f\ wǦjsYY7gAsU~l>ˋ}:HRhtFݻT (4yY;̵34J&LGD!BC^ yi5a4mn;vЊ(oD♂Fs3JKw hC&}l4)dNB((&cȲ_'tn$@W׊#AQ /֑(u>x6~(Oq ` ݝ`^]dn;G!>DE['FDnC #Ѽt[8 =:SވJFg=T[sRd1z%)Pe3g6p^DЄQ 9_i89>T-&ڲqTUs>)H8S:$fw[O8*[l̃lK:VL]Z[|MjBly9kQlt^'3\.=Gwޛh_py P yyq!$t*3@O],Uo6g( :e1[3t8-[;pqϋ#PҦ߀p5}QO"6dD0c?VVn})rb46<41@/ϾJ3d2HMȷHzGW>qd(94xh^#8kB}:7E=ls{c|I1%6 C|0tuu\QQWa c=`o`E|ՁVv(N;d% xİR/O w@9u}E@x)&"O,dڤmFAF NsǙzx{G(\E$-fx9EFbTW|Jn\7##{ F,_^m ڽ PUE E)lazA~OaY&?%,8Ќx&$`7nk0p-3 3jKJ5,sS^?M{uhԇm+M`ЩtϧMRIas_;cPj㮖D[nzj\(DUVES ^r*A&(bDє0r|zWN@ztjv>L<,9/#z+a5e%+1WbCV*#H|{eB,0,A 挌kjٯg.J{6Rd'}|:wvAu{>d.2oJлaeZ\RW'=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&shΕܕЈё}\YQW!0&;NX1kru]LW=g~1z!hĝԑmo<_jOI/x>"‘h0-+V)aC/(qvȩ&|4 #t߱-ȨʥX&O :u|[tǏ60|voXf1˷ WӮ!rK-qn a6"hs#r%Ѡvul1izzjֻxndAO"xdt&c=c_4 4[C<fhLm1"ش22V.X'`$6+F$plcH23qor,vF+_1;sI"T&4!=ڈ5u8֪=F Zq̯2oDi:4kk7/,w ˫K1Ţĥ+cbGf/Zf.81>/Sj=݅fL|c/t$lX%jP,,sD kwA?-dݞ2 ӯsK1w};x`]t*vqE'n\b8ls9MalYܣz=49x7!eHtj1Ź7$8l0Юi]LR0v0+)7 bDMĄ;Iab-GLqXh"J0a:5z Q_Zŕk"%-]:N[f51nxOI=U/$b|L*؊vS"!ca?zb㞣qB9 jXշ}~b|w3w'nau5' gcڣFf젮5-U*IxAv*b }x*T;?Af(ltn@շw[3^:pq'گm͢$$C{-ƟcpН{Vl/]x)}(!PW.uW4-58#F>8OZ,~[~04?Nԕ'VR#’qKԽjx_$3Ɨs8^2zGa #}gf &H fv]nj$ 2[6+ևp+^`zUF/=mq0eWu˱@Ф^D ti%jC Uyʌ^t`IɝڀYm߼$b!h娡 Av=G-XֻWf>jLDJjhB9zѮGWhNMZA;0[Bhn*nDUY~d5TŽPyMA ~(vlY_Wi;R\@6࿅^hӸ.5cg=BçV."LWZ%71҇<"|jǼcҬjVfyjM (;xsC* [Så7 ]) 31'f{Ps)SV@(^5suN?g B ێ?,VϢQ&T x[>8 ~C}-}R8ESm2xL&w4SPs(^,ZK$&][EYeVvn 6B6P0:=~$NfR(JP^GU `/䢧}q98b^5CrҤa {ons+Mbgr$`3KA%UAL=c}:8}ewbxg<}G303鈛O& PP{م+ד-@>}N ~ՠ }hlaŦ\ӟ#4vmQ\ (g_e?LFŐ@hH!fQ 9}z L -cAL 5jt †'%5F?@C(} bˎ]z;jeXUt&]H`raJ%;ƕ@IDAT%TfT =Ђs??/- H C-2BKd P˅ISph5Z5#7w.8rc˗IQ.4τd>iLQO)7v<>wfssΔ{jSI>ka8>#=270t:aǦٹ˿4[s'#I$Ɵ f*/07c ViȈ(jˍ*DFƌ?ugP3y"t v4&6dîEm/O^>c21=5v7K̿fG[sRx)8&39iU>8n:B9'<VF;7or7#|D>XNC"1>~GxDX1+cl/>Xn7 cvZ"à΂AޟT 0V9ǤTR5r?b1]Jzݳ:+`'ޯN%S i. 2ZkҀ% կb S+j^hN$chmkMZ8=skmuH'{/gw0O&m)肓yU[vta[ ~Z<>f8@8%=ڊAXI3/Ecf 77,3׮:(aC-TtX?I#aC+PLFI4ú t̘>eM;S Ƣ^Wm9 /ReTтl޽Ч A Բ08 4[2 ҢAmYF9kq0 1 N~H)Щɒ oX.NAIf gf5: #No\"o+hhѾ|ut}pU}뽇M u^d]V@ Jа-L'r&ͣs#b _-jo >OGaē'>Ƿz٨%>Ʒ :< LxΟI4w]Ty2pj?ю1@HzEN='ĻVP@@-MbޢIje`aqo():u%̬5 Y7)P11X[/>>V J }05_Vn\ի}Be *"}\_yX?lR=yCդ$42Ϭ;grܕW'cI0̹j/ oAۗ\CtдN⵫W3s9+6GGj0u{<&˕`3$Gk/ow[ݜ>\p;SAfK]ݹߦo| ފuc 8|ؽi'))콆bHe>΁]e@N]kԮ{V,}.69d|մ-fǩqU-RpΠvC^Zj< \!.\*G7tJPwܸ|%awFķCE4*-%#@שQsEmԳ޷=U*RZ & 4O#kXOѸK Qb-3Jկq', arM82$ɭ퟽yB5L6k>E2Kۼb6l5y(GY{ a|?]y`5s_z6+ vVlnY|>o5Զu2y繱2ɒ {dD Dv+dt- xILWԢA^9N8o༹w'L֏W>>ee;̫}MagN6͐in|kRqm;5MedX"<>L( "椏~={ՅN 6aJ6?d6Sf֭ccx2vPW?_@ ptc~ZlOQ~JP dPFAaT\&-9摟׫IOeEw8(#x~7uڻdl;8Zaex-4zhzW%CN\Qj>b&MW{(#23"nbsE)l(f3Rعπ3.7Oxŕhca +^Ψý`/eT!::e5YCfqb)aCQi5d& J#>(Rs{ۋs٪ M gZԮY=Q8?isy˵_\)Op`Js5PyܲRH-gV#|rkk4-|PLvF`(Q"a6H49biQUH{曊AɏfohXL;#nqؼLFLCl3H% ʩPu`6S:^ ϼA)}vԐ 3hD %oFvьf^Ð2Vl{d9SB%Yp (xł+j jZ\i}F.v)Ze΂q0aǻ6?~ زzy-b6,;;k 0bP 3:µL 3a#` SNŮڰ䏥iiW"y9w#à ҁ-`R ~­r8hڨ@dbjj6"p2qWb[Y0&r̸?c78Pؠ& BT?i%`;X@a?wM;bOJ[`mlY6[86mm=Q`1ѩ|Mm{h%evh[Cvм /XDОme1*W)8PhD'prTQ>\? 5l5]۶ Ҳ?1%KMb?jBnp)esgk ش^fBBV 7#ΐ9##=rիI'۽_#"@FB9Ja,6p!}5HTnp[LܬN-CτWk׿L|7TLj 719HXu Y"񟝝q=ڕ)N>n.dqޠA9eFŹ:f8.s82[H` KQP&T,eY~:]`„eM9iTTrc z#h(P؎v h͹hT>h80 0m?1ĺ`\Ȼ" MA\a&9ti))p9Dkh;)>, mO>h30P 0B/na/ىRCFf #v׶]bqLFqB sfu cM [2d}aV% J >X3 ڹA !;Ķ8wHaA/MH^gsQmڭ7qP†:xsn)Z @ [\&͗{Z4[:jvAL6~\][V{L*P{b>(ݪ a{ B D f_Z$L%e7KmqN3 1SJ0|DXp>RgǏ E"QPB;8>|LCL~ZǢ'4?1W ZDIS5)քJ!m.ؕ? ?.$M'b*U %] |ӱկgo2η*}2CS^ԯUMG ͢hBbRh .@Yӣ-O$Xi;!@ Z6 Ǟ{~e=sYN=C`Ї]E5"BiB;*aCSp<ǽ'mUs^SQoQW 08jDt}%ۧv:)敫iRqsIq`4f ڰ0b^aE (^b)B #UԤm 00Pl 3}bW=%l(S1a+h( Ɛ7QXQ0+N =Um8.P(C JS9):uQ=!~ j9~nyJ)܁0J}0N[Upa5ridmG s#69"nsi]=װv ᬥj2cz&u-gԳhR}:ڐlsX_ fxf}:4q oW>Y&U M6QgL2Խ(`K+E_]&ԼGM ./>YE1 Z}T8 152Ȭq^]羨`GQ1EodTռbEs{ҧEܣ6c Wx*ǬCՃCls06? G1n5U"cEͶui:ؖÿ| 5l\(pBJNL׶y~3nZ2/,Uz<-lS~8Si?Lc 2f j^v9m9ulw~x])!C]S{c JL L}'#r|=I^+hh 9?-Wcj2~8*o\y#;ICՕ<7&i$gI~`f41]ڣ7Y5{|VcXϗѐEn1uM\"u$}eʛVIDOUs]FM%ĀEX̔ ̖~~~?{}Vq҇Gg9{ԗw>>ycwȝlIFyb0g ~丱erk!|Bmb&zf|M\ Eu*ܬ]"i5wfIm4dn/ n |Pׄ'743+E@ 骞dN;玃ۯ {}ʩOgS^ j`:H//2BrǧOQ73ƏYDaԯOg~ ӟ2gEL@30t$3'_I]L?3PKW|N}>%nD5c^?O6i#м鍧w˸A L<t9:m2K7FGdۯɈkRzPLT^_Ša%}=C//'2.%_N5 |%Ւyz,=4YY,2KkFI3%i(GDʳHjyyV[qx6ycaEfee=CFVgT"У߼s>#lNg>yz=-ljVx%d8kd,/k,X_ _}^V09vԢ=L i/̕@h/ydTӗoNA~ =`thr--|Jz>2! p9sXKz@?>K*6 yS:'`صNdE_!p+Bb_F\PptjAK FCcrsI"Z%*Yd3"l]'$hRd,ŐNvY.83 C8/\KNщ4zB #whϺʕ?BO+] nq[klM`Ɨu"* j4JRʃ1"XCf327 {"7t[66gg_ϒbW/nb6_ںvE,҄Z,r>&%_-F㣟QtkD D#DE \*|[w 0gc`ȓg. pXNtW,>@>U=!9H8,^o',Aa5i:QoA0Bڙ>}MG `VRl+W+iBԢa:ew1;-)z;A*[0[z751LVx~wgn paFl:}oW?=>OCq]U`dBqC2K1a2Lӯe mkz鄩ӣ-O$lŹ$gM;v_~l6V}@hHm?|_ݰ +Lu,Ǹlߒc1N|de$i!|xJ JظnႥ欬difa@Ǘ|5b~?o*NI oMiTЩ3^%D*BVAC[1?ڲ>.*3&s%ǽ>ת\{sn˶m\w3*bTtS/ &|<\<-x 41(L-- GX, 1VLHKK=fw4`p=uk18޴i4mfL2Ad"ө )q  B;:=`v+qx;f?N >D|l h=977{~fK1؇T ~+8t9~KC_z|=2r\Fa;֭qd_ż5C"xr\9y2j8i0T/#~xNK,8p ͫg s[&l#F$Ûr]%;p~!b~rѮY=ڍv,f-(MLwO{HrPD-ZGm药TURQjXK ޮ^EP?t>w_{m˾cpF2N-h/88zzz}tT.';IׯJqMNI>>JԨRU7deg#B$㬘Kq .X7-OTpzhF0seb75itLFI݂UXOnRKfmi:EƱ}?-y6ͧ+aCi6e$<=Щ; 2 jaAdJdsں@}E+zņ~ZHfTE\=,7`Rd4''"+!nsh)vsbGGC-XS3 ȸ~:wxi0QBN(7g}^J!˗PmAfR)lpRB9AAwÜeSwRfIWos詯3g؟OJ%#GN_ÀCy%iX L!i{\L<ةWP>K4%,0?Æ2Y)׭fG̚MxM o^РS7:u!L!JLjX[/;DEFɝXN jU?L,LrZ`q'}dFzQ.9%MP2W{XEUÐm2\DACQ`;D_;(]Na6+F#1 ?\KDx12$SI"voXA= hEjazwt)r%e[D܉3",$XthK;ՙt3prB0M-hTE/DIɐ8q/4\;4+8^t .zM\ѪQmq5 2(ȾZILWdzuj"g/^PAF Ϊbu""~U玃ɻA %~5,~\ Q2,⾡X-ViRG,Ev@8\{NMO>i'{?aJIKnЌfpkջ 8QKaWo 1q۠!}zY )aŠ X\23-τ}Mq6,ciZdj 9+N4Rǖ̻j0.C0TR.N9DV,zwl!wa־c$j4WkWIlhׁBFD#2u7yu({2R`0f"VHo95Z4/<8D ?1OsBlUQ; &Mwun!c4ߙObT)1w3ɏb(uaصX󐘷r5 WiVX,c ?;(@OHFEb2Y5DIk}(M= cZ-M۵>A O݂w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Vl{D_G;-Z[[,#b6fUH H\7:}C`t(P#-t Ma Uit?v:׸MGj[/mOcandT]oj,Q ?iWO G&=:=j($+-ݸǶ)<(/4EffjKU?IeD ^KMH0a2%@D;؞^qn8 })n&sO-Usq#x!1S t@pK} ΀з$ٜE:7s=7_(ϻlM0G`|ẇBmR}8ՃVSo]h~E ƯPaq)p Zk65ǓV`0WF~N%lh?j`4G -;nRkAQ}JfYFߡ34R)`=y66h%8iWa#B):aT]uѡ֞&26{j@t+qR|*Wh6~b %!һcSLMIwDڡ&kY 2F%g.t& aCMj/3*աfpQ@U /Vc O2cH5K* #۔6[X|ݶivTNZc$jfb)b$* Ee[{}"Df\ >E"@Q9p^aep?ݏ:8W Z&Uc2\ Wi4:ǑǐTi0W׹E0G-]NN|>mҖJ''O[fL҇\`42d<cϞ?i:sGzJ-;4ybpWd!jt8zgg8G&$=:VB~pݾ(- %+~lIa0ז:=878 (HsZMXa߆@!XM)E-I4n >CD4FrRm[,{oއ.b]b<60"D|8Bҕ0y]j^`QsXovN ;h}!81̵5To9%hF J{/ 2rwgu1Wa4rHgageY;Qj+xyѹߠ&[V.I@}<)^S\p4ŭfЬf>Bs?C'?v?ڿ|>h0$hF{)\QCi/ Q]YGA΄ "j7rlUw@Gj+EanjPHCrЗ^{)!{yy8oL vF(5G#_=yVyv/ֹe}RA[K_i?r8@w@>+̕[ӎ2S0[N zgOުI1 U]:5[Q:*LlK[&Щɒ!-m?~| {%3Kc_.ZU_Wn'iE?zoqOJ-T\&E¹K'YyQiX~P4SL{^v'];An\Lo٦8c?Ci$"sѴ/CjRlǻψfnQ})?L?>5(;),"= ?B͈Ǥ~{t$K3'_I Rb6~g~N:Mm i`Nu>z_F"9\3-<>DVcguk)%۾yCmvy4%dKOԱE1U{jC46!3υ=t0b\Q^uĎRp^>l8rsZU$l ja)0?.Et-(Eǽ0y_`-fs-h, a}S&!wk딂 H9|Zlw&2}6;G^ȗoM]e0ԗG/ﺤSw ?\nr8hŨ@0Pb,5p5-oB>/3=BfzL8Js-2s>!o~m]G^(|H^-(/ύ''$~Gyw)@{ nQݼ3?3ފ^|Q<"LyL\ P!r%"NK/xa޵ kN}e Oѩ S Yl'3V:x+W`@j:^9Dk؇Z`ޚ aBE_*=`D:{X`wˢ*ΔF._^kPQA Ɗ)s:LlɈ} T׋ڹǾ'5M;h`0]0 Ԣ|ydPc 0yZ?n@1m=Щ 0 "y, dFZ㦋Ix 0#Va+x] 6Ő>?L3a ,ݸW&dVIC&i~Vo} |!99˼?4:|r4g'[sVX9xfeg9Ü0tf3҅-ns2>X-s? '\\7 hXۤ=G-`q'1.477 rJӭbө'/R<sFƵ]0\*=WCK0UfXfܨ##{JH*Xvrd#n?WUzd. NJЍ| yJG^e_9?K0ZcY ̶K*]0[@nDz\)tf Fb)p^s%h(4xt3'7a8bE CL|h ;9K2g V!mRQB% DI rA]d+*JegbA\wۼ< oKA(*T/c(T=BQsEGfn >z w5ah4`]GFulX=NOz_N5鍷]†/]GPc`4cO9KqT(>>Яu fsM0?#hVѠvui6UÇ%لL䌴Bz#1G!vx06d'"cxDi><763!J 2-y14@.0qN&1<\FUi6lW7#тĴ'F/Hz]n$*/dқ aKz|/ro֌lٻĥ+cbD"~:J`( >gTGl^Ֆ٫ߙ&4=kZ Ge>qef%W!CG*{\\G} [1SrBnq\k5 Su^ .bcŷ.l[շBIU5'2a3װR< E3D>DinWm >KsHH%!J#1w';gtɣa xF}u,Feoh#F>8OZtZFkzN%lufԽjx_̘6>~UDnQKd} &dfa+Z،s>rڹ#S=0t=hEZUZ5"ÜB eo{HxkjHztIL"Jkk.ȳU(,N:=)o]fT?{&;Cl˥B{a=IK6ʨ4>>i|| h.CcGqQ18Hzm'] ө䁳xyK +huhYM{65c4{| sz{|gK?;֧mbp5nxzR4l @IDATw1.Ŭ])*U `A{mzQ^`~̫z(EZl7][ ދG ~h4W7qfTCeniR:woN,a)X#_`B}4N͙ S(K*5^a$ʳX1:ш+'K-tnHٶݡΎtI Ӟ-=o{4YvH@a93\T  `͊WS`uΞ Kpt8)2qƉT{k"(U( z@A=')te co9w\Kjɼ~/GPP*2ƾ(h(~nyto6"*W 5|RR$^@k81}D;xNFo/~Eˑl.Ҏؓb#|")<-X}GʄA NM .YHj$V*"fŖ؂aڴqeƨDĠu4asHŐ U}@oѰKGeA%H}Џ٫[i1[v##3##!%=D_Jބ4ٞZFhTY0_:텂*aA΀U tЏ2S*[}zRuXojk,;Bnd.IE+r9rLJx1@DzU+4 ,W (c SE2"m-xNPV'ܘGӪE}eFҴYݿ5x:yp 0zy=]2A nτ/$#J\-#EY~DzXv ,$JGU\|7#`ҽmcѧS YY`o$SؐM)0_6hʴpX~># 1" W9/_ο!A}"!%>/}D~iL(L[îY<~N$8cl:q$n=4 XOƄ?i8-7̹LjƼ*[4gҦ*թ޻ QjKFՊWf&."un!̱aj.kg4cT3^H->!Z/_/a|r+Yo{EzY$3fDh66; &ݡuv,Yկ%PQ ;b ( ڋ#hL, #1!RkHb%p^2w~ը2}0+= .Jg=<" 4 h61gҷCg` >a&MnE(vwԝ?-+WlٱӅBT;`|2̵aQ||>7{pOɕTD7g[wĞl_ %hpo^۴eѪ689{VN_J} s89i}FK| 09C?11J)'Q/-]ڶĝUa1XkW⒤=K 0¹b ls׵`c(c&}p^&~xN# V .Qko6sb@V6 ԂPQ|c1w""UV{Zgbc ҃7W߀`5Q2pjfjm5@8L<trQ::iw["V(4.*&^ S󞣀:,w t@wUڄQJv40RQ@K.mR=Ţu!+; Mm] Std$;"ݹe4c_lBGm6PSB?ǣ/Z-S|pWo<^-""œnXpØ]Nc`輸 |?6 qP~p-dkܩ03 ii){m|z^ yޫ۲r|M{0eߩ\H҄VA$ڵI`hF}ǞmfTُks%dxAHI'$܎hἥ.uֶ]fά~Q4 0EtmU!m{>ޅiGa;e )(l+M]a*#E2@x "9<6Fc]~ u&Odĵp?6Өl 䈦uшHPG>×r-2$( Agi^&ûj(u$|DU͟W BsU:YȜ^`/vZpAN%nsQW'?9PP@VVV~}[mAp0Y^1d Zs;1ɇ\3?áght;Ic_ |Z"] K]Me>&.\w n^nG C҇cK#Gѝ`L`rM{jP^2V&]~x멻eY3f/ct&,Vs? apuBX"!ƀ){d+@ =~3".ψ߅5p$Ok)l{Y As[ %.]uR}cOWvLaoP.Z0~x|vI)':-o%P𓙕mJHI3aL u0dge^Ũp)\PРwIPPfT4KW(#`ڕQm:hМ`TCa*TČz,3"#+ĬLG&]i٢ii)B%m (@ y?yO`l UWGYFܯ9F(6=" )a6wH?Cº@-2_I (?4 X,Eࠒ]:@3w&ay5'ދ +!<& ]&ENo)ۊ]w le.Cf!9q^Ҥm55hR!>$<Ӕ}Nrb"o(*VwQ[|⾄쬬djĊSMÆ2zfFRZJSG;#\# d$i&̨8 #mpWcd_>7iѨV-BJW(X6_7^$rq1;J{EڠAw =X>;KZwNqdw ۘ#h"IjIJ~)̲ue#1 P!GäQ}EhP:j >q7'Tm~q:x|51KmlW4zR CAPZ !qS$Y|\3?w}2 <ȜPwn-qΝhC %lk-I.%(3c؃#TI#Z~JؒF(|*Ag{Z/<:U=;?NY U&]e`}:8Hz3@y&O͇j)0A2`XhA܉Hkg.&†Byi/Z1 & .ā+Ai*`bz=jY1d0Z\Y,w74J H2 C 5߳63U@Ȉ`0S.h(yYc6HJJQ4b>PD άO? ĉ~_5>tp+0_M{p Mn7px6_8E'F0hԻ .#?Oч W6Fp34) a(mHOhI80nCp,Þ6]EFiX+†Qڢ~u82/(#;hD*,c@ǀwcO.%ES:(h(a}AF%w8r5~_G0(C1|΃c(T^҈N0:xq9 KE'QQ?@R:87YΪ3kk)lqHe%Ծ_QбP9ٮXfe6ѷCSD 1n-{zz^׼:BEIwCx1_0}gI1>@Fel)짧Ia}"@&C8QKQYJP̬O'Us1pcɬ5Eb&̣'kUzX/U Zmjv|QD/Nǐ4gS.-}z# }ߔcŷ_=160@3&&,#|ڰrX2hZZ*?Q7vH].s(Gr>y|nfow4V{ -8(>x|@& a)4[nХݰ6+ps_VnNMDń=EYpv,\K {Z†B2d@Be,C=>eY-eaė‰l ?qxpXbtB (ZPEm2"=Q Y\\];7\k48ЖsRPk9t!Cah2L+~I=hZGa7]XkI̅kbx}M5-ڰGv2~`]*hُ"Vk2o,d%lEiD@V2&#qlL\@%`'D48܇ >$pogLn9.ch\ 1AC>pѷj\NipNM`7̊|uSzx _WL_9;Xߵ<"#sOcI藡C1a`G!R9i+Y5Dv4RPj¦S<4L"g_!Kҁbr8`yV fZL~]]V:| !%d[69$zm"~ZEjXCxMEgVВl{>R&7ARߩZU*@#XJ_#R2$&КS'b6"# mLp\h0y d.u i1!)͔g4ޚ8mF<~\d"#S&3Bu(2^=Q'Kge'L@PIXDԬR1t]lwֱE3_;Q 2_`3:DK;̘s=1mFogMH[t,:x5Bօ!qLbO Tw(m\ J>[G?o{C?QKJIjw{)SEkJCU\i(Y2@imJ*ɶ<~NFL9S[<-(acG qOjĊ!.qEΙ& ]Fbr?ow.$c>صe?7? [՛իn"[6n::x?0),cLr䄯ލ\. > 4zŽMј3TujW1ui@thV7f(:1``>fHGߙ˯?r>s0A{ڧHnYOy2g`^vMkKS.p?dp$i3Hs4#ơJ2VFj_vB%?>ΎR{k h@A^RIGg %) kT;:%.?좉=:{ ݮImYERhsI:a³Axz; 8|Ǽo'H8 ";oٖ*g/&+o4?A(p84x@i'L9 I\?+;8+mFa--sk -v$$_gg?P~mt@ ]Se@P94"rLI0 L4GѤxbokO>'ԶXK*@rX(4! @ iVn|5)Sa >Hw^d.m Oҩ' 㯾MP"1 WF yiPn7m?x S" _IL:}ڠ;^fV:+wgGC`)FBV`/_g/{oD|I<:SFZL5oo yOtpm&A{ʖ1P0DseJDI *I0鏺X&0rI'z_?K'0m"XK* `Zм>@H2 K(( !M=7=s GtR99/ߚ(͢ػV}.h87*Ec1oSӇOt[24#CF*Uxj[Ӵ4xCBCxph'7̱[I8iD <hV:maRECJв /KV k3Uʋ0?-!ͅƖGE܉x5ʶ1J22өWd@ZmR+xF}G;6H\)Q9 _O`%!9Uز_\MLѧ>"&Jq62՟;2V2Պ0L.\RI}+QӐ$GC7*[c:' >Dl1 :}cL2i5k ACy }l޽LG sgoO_h}ȡ~FSBnm pah8{詤>@Gl%$;"gJ I̗(\hxqzV6EoZRI˶hdLKж K^8 TLlh|Xi7YAŇ| G"ǹ^6γuWGU5`t::ZNdfXwݟ7qa.lw bsb!#}W]G(}ٿ3`'Ǖ{"O[>wIDx'M}/|_ :jq\7r]@A Df [/=rSSkLʼn<-a]l#&c[q=wɨ-QjE1aDOQLx5YF-._'He^+X70kmJ`R V>)M_fB0?ݬq;M8_|Z 6f5 S>ZqN5ȸ.D&C+pBkEit֌-2rO,Qpbc' "̨Hyn] F#CƵĜ%DXLea~JbHYIg\g CŽCՂl@1_GWI屑H|J0%ţHd ,v~G;jU#bqTHI Р.;>R0s$(bGȾL'Ic0,l?(;DC+b.x"(=ۈA`k {3f!3 QA/x`/R2 -R7dqú &}"pL$D'lP0?ib̐}kҍ12+l*@#=l73&e՟.FGaڵq9A4΢eb |qnmO Ev$OW1QP~[Z$ӣb53Smc'uA{(5]LfӲERlSH¡ i@So[]5$Ldt1>4oJW<';Ƅ$[3NhHӸpn{I3Y' @-I22brZ%7_{h(ֈ:U]%x(P@XfNͱ^ɑ΋}"dՠR}+΂ d"&bJCeCP0v91u2 NSдPa"&R@v˾rAC}}n;ѤvYPiZ7\ kwl 훊Y0ksE155u#4(6bS j=!,O?pdfea>ԜaSZlRj:B 1:HtV(a?'6{G-HKcm\U}vPj0Y&3#s ءd!xS$:z9'cjM(xR(&з H_+bKOY, 5wxKL=s8wfx;8Z&H30?VP4ɶ*St:Q.Sw DjUe9dEA 4=Y F6;7>wM&@!h+ <~N"YNre&!97E ܴN5mƹf jVIkAm r ͻ2A+?|߾} RVW&eI\gCJZ7c1 X>4o{w5)EԂ4,߷?ah6tk޲bokOy-_KJEnqMj$B-"* O(hƣ.hƑyà gW\JP== jZŀ#jpG\_P` A0mQR€C+j-ՠ`A)uP+F^:zI.I;|kʶ]†0չ(m߽50@&AуbC[ |lgqTeu섯r@ڽg%TV׫^\$]yh6,g X\4ϲ&u5Ha 2iGT>7yS !\A )`FoK b:ŤQ}--}x.(Dh.(h0`op|+OV ߛVae'u^Js-򴹠Muok Q;}&ަԜfkҪ~LmQ}8@F4H "v/KnyB18/?^u?Bp&dt: =gk8p1OKeiXpe7QQ} 24i >dd]Zf}fHnTK~Z B߿o{8Jgb{3:χ X:+3supg_о[_‡78nRCU)0:~K7~TM-imq&P; @KfsQy_|ma9B' V+6Z:@++s5<diZ'o`Fg p1 >F4*\OUg5W"ŕ:żiqKAC Snl^u!1Вx ,O{сFt|`rJkͨ^ED.j7N}"Bɤm9g:b6҇X̷V ٓ30Lkw]e2KMwEIƞ-{{/>2D ?޻~D^ E99Mh"~3i>zQ!uM/d?0AůC6 #۶IWljU[ex_KZ [ڂgn`i Ͼ:fS7cw@8xBU]\$} k}+!#;YkWǙoOB8P-tnY_PNq*&-T<@ɈX2 xamyh6.eL˾>#&P@ow3%襁0+q f8td Hv)eo cw*)lq6c"{ A̵a6jJG^MlVB RkE KaSel>w͌[̭#R} TH,>pAHֻۣXN}w4N}RbGt "J%w:m&a4Wx``: 1#ԯQIm0m&NT%X,&na4z[c&)d:n'iE0E8vr'zWI3j6{j#irpSwטe2Sh־5B^8&X_H擹uÓ|A#1`)h-EIDeEWf1 g2Mj+7{Alk+k)hv4=k[t ?DysmUiZ_ϋzM3}4ܶz9EF /OaC ^l@o8:YroѶ#?"M |whW-3FFFk \6>"007L՛̧9ƹy}6X.<6ǁ^^MQ$ڌ(h}-6M>t ByF;fZadypXUsnNpfwh@'a%1$B+4!X3u-UQa4"$CIPnh;L)SEX4Ђ#vJ"ܳ^ջ|ҩ׀dט6sÓVpԧCSQ0/8Mݑ6N1oVyg5M+7Wޘۯsd"Ǟ ~\Qڱofaҵ;qHnF) L1wC Zni([Ʋg1I A!&blkIiұ6E9h:hN(W&D /9[A\r#g/?b{S-1WծI-6屈_FU8t0B֜^0"0':Ҿū~3?ߢ& (p)(O~1A Z=iS^2<hNpJA}ӺwuK7hҸ-2m?x S" _I@w4H%.y:,,8#zby~_޴+g_ȯTѩH$*H_;ƌ-ψ/HөY91pV9s`:e0t{ v5FNň33o(̤!jP@sTUiT !J론mZb[>Pq y,tz5eapi4Z#f޷]GN_}cgM's!H6N"Q;JG`[{}.ԾX rhi](jc!Ӯv]y A%Jzcw2肆/ mxr\9~~}~ Cq9Z=?j$ O>:>rWIn~N[5 /~f Cdr=B> kR&JǥkQc&94Ij>nɬE2eCBXl[);(_173J<=_QCy&[B  C>93FxAj˜Af S8 b/4R`[ =vN`A^X,+_'F(hk 1c(T=yEV ^,Yع#-\ A:r@}Fj!Is<#>BוMWںd.2oja͂('w7PlT\^j4\6o4V+ KY{hvc;DžąՈ~_?Bb^w6lu83ԡ4{q|s 5)`hZ,gdxW3LE{O?ȫB\>_ќ>~Ю`IbX)ճԬ<94[!A7b^-hS{1ɺ]N0 ,˶ۿdR;r!>ַOF~m[xb:8 3Y-^7TT-UVV>԰Ă;†sZiOjӏy7%}w5]bرoRH=}4tlѦS 3xߗ4l+R|ܫGCh |=/6?~ixd(px#:N9I5}7 Lk^{ z9ݰ*pL1'[S+>'lZBZAC[>#0UJZL4oA`uM2jBD w),l[bmC=1@歔Bs"P Ӳ-үh @M7F7+dh)C_Ds$=09c{ThJSItMEY~Gդ3'B15 GThH'"oWO"{.gG&Tlw Db{b-H<2`xhp %6Ed}4 `"j5yhaS&D59{柍{m A*Ə ʲ v|onM˰)V Lj{6!?;>sF+ꤠ!8)3E&:qydS<6f4<00$p<|NM447۷B2z|0:D*ۆGqN:휀EBkr0!;$辅kZ|$y~;#Dqú$dON˟D>JɄRl4hE9{ 5HPyͥB^P蝃2u d|fX e3|ǀ:'?E,X|*Oa%0CC2ͱ"v܆x|StAH .l{T!TsJL򿿭Ƌ7 e{ۆbYG,{u8x/M@K`c&c=紴yVՊ C@n t󙈹7ճ 78W+m/b׉76xd"ǟPl?ky]⒃zh9i6_`r];" xta$w #'Ȅ8(%-=y`Z%j.@4{Lf \* FPF`rRzЯSv-!ҏo/~^%[6@uWk`j3ocv&,jFJ9wvj!ѥU#>9Ԑ0h@ǖ$ -S}Q!=re`]о(`0 FBEGhev"8c $Rg&_=iι-{ wb>iVi5*W GÁW]S]8f&M6~E-FcAe 'u31Q3 9O;,[_ Çy%گuJA-1Sy7 jEȸest"}h;&( MRxOwA?D3OX Cڡ y;n״*`1Y({1BM$TPy@_/ÀPP&@qFVS_=X#(ėņP;_/R)(m۟kw-̟öYդN7ejJ e]xg(.E +!GƒL\_@@2A\n;i-uWdXsS4߳uqj4e\w Q|,Pǜc==M8%M5 &,:m`i.|>M %rCMGzD3c^ xWRC0s0wͮʖ)a#JE^sQeHINW*8xaeKG3m~Z .4&{|+\@ e=wDHXy y ”j.lA;BK` >CC'˅u"z!{FxPI7iupP9C-[&uIq# _@څ _oFiѩ;]x_p:w'*MgAXG/m0  =F)4C^^ "Ra+6m8xa@*z#j޶q|UU?,#Eb5mZM{XS$"sQ_bPV͝L.Gx_Wn&lC$37e,% }G"fAMO>/5ǪH{ISˏ͜>muGV+N Zuo^ q@7֨e`t=nTI6 Qj@zAq6o݁͞03-'E3eI9K6A(acäPú ^QaRC А`a ZxeqX4ݢ >秖|xVM-Z2M&ɨdpv?wA.95[ scGC+kO`hT"O|1`&y;YCt}2OFC&Z01?60`ޖ%jR $^tE5`.W8`Ve݅Sr8y4n# f(Ln|)8%0k}?07h$P(gDb$E tp51VKAC;yh^J|(ZFx:蝂69B c4_ LAj| S H !wiVacޢM@SeFuB&u a4Pm;]lH:m~V Zws>l4i m:c`ts@ ZOȝV SpUs*9 +w k50_}8#g P ̭s4>8~cuEdK7%}Vv rnS!N("rwJڿ~נU+ @]<0ccbI Im9Jj-܇ktF5.U=§Åe8gʝ!i˥M~O/@AҸmC"_^PFyqgj[-˱Pϸ#7rQ,MJCx{LXj}CZ4 )G2_\2Vk>kimg@ HAқꏭ+rMƜFJL\Ym.'`SK][2ذ'8+ X{#d~7 hRȄe`BVLK&~+ T@uA ?C;+$3 a1_NeY2 Π@ C"-'=1ww]>pR1p><+( -U2<  Z?hlO"ThiWGk܌#mՠ c7801 PT(c F`|ip^z6`hfqm0 R7"MFⵙ; rͧeW|Y Z:Hℂ?5WRƼ *à/[ghwj$wƞ@³R!hN7(S65pfLh]ʹK3&SSF|kJΞ8'zí4ztݷ6Z4*,ЮP6٫ד.:%)-{9xlg*љ00IU{,.XumS:p)ԭ s̛:@=hJ+xXX{hD-hZA܉RP`Xet>s1A+Pԙ}\9)#= mMWO_FԡR"ewɴфh%{ u䆎y7JT4-g%t8[A+ThyDO[\Y>;,A.0 hb/ VYHGIԴT,kk8 B՛F"JڇWԲQ+ ҏ\QͪZ#LzԨѬkn2Ze;_U gHpY(Ep #*9R@u ~wϤ=0w/?z_A hU%QXM99Q-X*aC lNތE ~g _;j0r/v@+xׂ2k6 rٌgLaAФNUBbLj>(6]U&6w n00nKB3V3$]qYk\jwE}䗉;%N^3@FA@aX*xl}-]>׽3ٜ) P;}jޥ#)5MrVT0~;o oA,Z l4 h^H?:z30k9.hCZ$2a=1\}ߡ3;bS:8I\FPOdTQ[1چnFdO[v'p?(T]aMS$wn1m܏(Z_^Lȁ}=F+љ3$|盿$>T.<{h/|GFk@6 5rsqT 謑U! B曛4T>TiVD1;&zWiaB' Kt{0=͸j.t S0ߍGN-PEڠ2sisYͫ.U=)rzǜӗoMٖԷ򛳦OͳX7Np0 *57~zrpFJBA:hO)j4e\ql$.%5$j> I4*66sG~'=z|D@VVV~}[mA hӣ?rp`gQEr]ҪZh+Tu3/~ںYæ?<-mpU-PޞLʍ@Ehnڤ#:QAjH- RoY'=AIj6_R\v7ge^TT/nմ~ G锛FFsg ;;G20i~ 4S`4q+ZP֞6KZsk 1,}JG"&kPk=Qp|@$[\@8~8Wq /aŚ {NJd`tk> aA54eN|ChAZv_pXҤm55hR!>$<_Ӕ}Nrb"ߑxV ;b(\)67LBvVVz^y2zyb)&a_y=3#)-%#*(hA*}9Ǒsypc@oxM @=\&O+`NѸU lG9muAsஔZȵdٱG,HP6 %@"DVPؒ) sٕE:ՆZ ]v8g;bβRNwnn^ɹC+lk t1: 9g]:\M}UukKM@Z…v-  ksV1RkA@1jwJFAi5lN@-FшTVc.9)gr>NOz(t(P ^>Ǯ E&up.QB;8>|O:oeӧ{MTjKNvN͜V/z a OgE.' 6Z-k%lp!@ Z:ń+fA1{ZA1 <>ǘԻ :}XEOTc "PP QUs'L"wMzIU,e|.N܁{5op*\c@ǀ30 S+aC߬yg] @>.8O.r6k.,\d(pX 1#YPcHƀR(p|atnp'}(o8XnP(C HdK7Eߘ]◠ƑG톚7;G&y1MZ2ޅ+I2- X[{GW nGy7}6:VA}eZUlFP+}_y ?h+m<7BNtE_<)ljYa&NʼnPǏ2,,ƼCU%H(aXQmm~Z{Wef{?~`g%*"/\ s̯|D氟e!0S.wSśĞçEJZ*>{Ѯi"׭*`~]thVWݾj~Vg†ZT1ZB"a@;Y)&L+UNEb{ΑN(g{ת}ڳԵ^Ng[}U㧘uO 6[ -^xP&#GzZ2n2_V UKj:ϵnǀ<|vŞ}E ftU'?`5 T*A[k[V\† ?iAP U eT}0 "M 锟ΘMO&cmA>NnB+Š%y`5-l`{wJY&uzQB=3|mS'NuhVxhP/!9U_Mĝ!rzh6Ocs:;Y4Oᜯγo07D%hКի.?_ *vŝL"طC3nΒMF0q%!EluHtjQO=sQL9;?,jTzow}gG4eV)FoڎQ:7Eԫ^ɳ)UuǨBE&lDF~x6dLRg ~Shp\h0QábIی Ii ?,8oM6#jǏkYd4+2X>YW x*Y JIX`ԬR1t:*[:ulQN@2_p~V_v sǁ~rB'xCԨiVu+[Z5)l?(Z6)/+Jb ]/l$fg(!\th^W>u^!* 8QObrva7BԩVISˁ][K $BB,EĈ>6KawVHLM-U**Hs4+[:H VQ3DUy4hNH m#cu1ѿKK:6(68xb=7]I:a|LJXj.L1'[SOVTȻƀ9羂0UҩCzĪKѶZǀ22{cU}^ջNd> 6ozɽMј0bҺw8CvoH"C/oS6:v;MGN_9QhVy~+)[f@f?s\pаVUeiRaa|3",4D,ТA +61 JBآ| 2g0 [44i}sr23}N)<`?]UjǨUJ2 - @ӟ,5tZy(W&D4'fun)!cQ}ERsV @ Z#_KYmC]|†f Je};qI2!JU$ԑ\[ً =/~OQ {Ӈ}S7ЇxbSBC)ר>E˝;]r8$_JX ذ%?W^|9zF浄T^I F w9)❯|gwk- m L`ʋ1*!)ՔuU.?R=YӧhIbV|dy)ݴn5椑0#AU:$Pѱ8>Q`1 |AfA:/l;(ͥ(t2 r e/SFqWe5jp;s1gL"G.=3\"'Ά_}9 G1]! h nK0ʴ'L4|N$Ju[ x0T +Dﻳ!nRqK:AG:_鵠{s&yNuKjɄx3+,I3;R%Eyh6힛*%_GMlX+wӋxqv!Z(Ɩ4j iWS"(JG߀R%K@^9}Qѿs }Dj:’ج}vqU7Fw KBR+My uXawj㵂\OF[5~uMK4\:Q[{c2-i'6{Ӈ="ϊ@Ā_fᑓF6dh7]иEhB 2Oi63?C[Mq]gp:1iC>c8{X#N_ISMsӫV(+Kgop8K:2rUk_ɪPoHfuz2v!i9-sQq?s(M²h== t~aWRžgI (gm[!-m#,at!"ѤJl-͐OW((GSt"g{|?.VlհEZhSѨ'4%J|E)j4|e0v+ KY{hvdKáGo9K躘C" ů/765b|6jQ\s 4}6tǀDԛ?G[ؠ_!Cv}xv|Žܥsr NX@;fF l|o<Wl wiG<6Jd{;`oLؾ~U,ڪ lzq%Щ.x5WMji^ޥIs.A0#0B?\ Чⶠ#LHN[p/L)%t;r=)X7G^by;~tunEVIf{B+QӐ2GR嶞8KkG4,.0Ql#U<v>8mԬ. =q Yq|t]cqasbXN}#O3Elӯ70 i!@`, ]|u|͋tc |><1((4{".jS|l 3@D% Q4Vk>k.-uMM9L}O 4<9||hf-iN׭uCC iBqC捾L܌i~Mks]Ѩw Y „ɛ'[f0昗 J9%2VE҇!"">xh4[=[[T  737|`P.4q=zh4v=JRƛ^t\u!{Dr՟{PWqX-֔(dfLKKNs;`lmbqEr2l}ri@7 )Iv24bM4:kԉ<&-.)\ 5!̣(3JmWz.ǜcR6?J;B~m]չhmGdO_EȘoc਀)0` WGԾ1۵} va"[4\^聘?0u&KD|O+D܁{2҄7tJA7fsw?Χ .p $]pX=bET@PB  $zBHOv&wlv7mw9MfvΝ;瞹{Z%dj6nAOsNBRx2>Ǭs/$`e$H&l؉ɽm+ڴ7%\5z2Y2tx}jrrEJ$hn.<%Z'[K/_D,'6U|oA\TD"Fl':4+1q'3W0.-+ZXn7bמO3Pdb˛2RS<_1mN-_d8)aIݼUXוƹrǹWMt͊2Ѡ5`~]%x%ɸc9҄;1Z:Ug~c{~͎ՄoGB1R&r}2:qD{ct}&>DZQpz/D_;~*ģ9Nd49Mma9}YԴwSeV+&L33a܀K:~۪eSR'J2p֘8w Nc5m{C{:+cǂġ&?oo3Hf#-̡@/0𓜖aY/뗾)l{j3(dh4γ:N %;Ŭi;Q`&b`F n춋8JtcqE MYNқ"ՈzW `3sQ; @>tB '.˕AJ!q`"1tM¦XL֊w(O[4SMNڷ%µ&Z7m!\CM1b@ *pƼ`4 ׆cV]eC&0D?&?zHy&]v)}{T/'!,@ٻ̇ 3ܼx|KAU6F-c$3L@N8E)@d& P6CM|~r$#xu1ص7BCO -@J@ ]:`RX:~Lȉ8Ҫh״.1i Yf5هGC{YhiLfQT3cĭyO[ev*}yDO釾@8ӿԒ>$S *^l 7(#z#l#P.#Yʭ"p$/\();Z( Q [i79u-+559Z8Iv6i۷R`o/OvK@VvVzzjjbW1*&284XjBJauWGҩ55n7.] ۆzMZԬ۴isLɀjG3/BL3?'%&Op4~Cn@ XLe$'%$ĝ>yxo) 2X)iHrE&|4&|д<'飪/}>Pc{$ (&{[],@{DMX9sA \JH"3$.3r➫:59@ 'bO9؃Pŏ$ڂ Ww7_~{cuX1j=4@:{FH!flw NA-UjG{j3.++[~ch-hkk 0E1 5vtC$3CE'vZD݃:)aHfdb=;^Zk.ygdA|/%ptkl?Q؎5ås?ԷzT3[/gGg; yJ rs2`rJXk O]t9ڣ4kUHdÑ&SNӂN֠ajr%ؙ"9uaM Kiz-f 7L T]NLSs++Q!IOMJc=GLb\k@IDATYCHiAbNK h+|BBU`u\P)Gc/ A+ǗCƜ[aCa@at󨠋"|h&G2xd8cNZ0p1iɏ9ǜA>xs<^'}ߜrZpeqi.IF"6 ɔ>t !Ʃ䦀Chn)4}¼{ qۼQ%i}b 2FjI~Jy~x][FWϋgGG?Q)O&Z!C(tALw3Ib#bѺ]f2RLԷc3)U,KӪ2ˢxhdinB2*#_:n$c08 ='[2_. ZC璀eVvK2Uw܀>aW1 CxLp9}<6W XA'ԹV ( Z2{ET -~9mZ4wL6MrG8 ZN tǰ\2YSp"VӬfi\yOߨmsAT@fUW׊:ja3[B]FĂ!J2EWcqumU|$Y#,<=nO[WA&#}(LA5uŰX f{2q܈ ~8RPJPLdr G,š[Ϝ H@2I_ćG/CS(1g6HC }4PAǤoE#L:mi)"Zj*8IRЬ~ذ+ZT#[Z#\9c9ioL4GoXTlߑӢY`e!0 T,RGXDᴘ{Rb:ce, QG9 KG%p?i 1z{-Z%zU茶M J`֖!s#Nz<(:?j`7iIC) 5&>)3ֻΌJ}ab4ƂS͗@AC!>Ѩj+jJ0AaEJt^쮾J6YۏAdxq0llf}!_ HrʘGTW;zV6ɝ '{2\eԮ+FAn#7!7bܸWw2cV6f (]S2JP2MU4kU9}=7EFIVUk†_-+ ?0$σĸ"̾MN:˾G|(81ϝ>Q^W^R 4)!5Ur\%ҼB7g0J΅KA02XUA:"FczpрPrmKNF 𝡆BEUq|夙&f@$NFt^f Ԏ#1U᥉#,FX36#iq@|q|B7M8/rh!>5ENjDʭz5Ři-^E{/[{v0 3r;>^SPS .i7UFgy:֖S9đ‰vo ?2/y}Zt0b~S;n a#k>I5Pq]+V(;\dܡMцTv;*xd-Y *}úi䣉sY`F†<שCFPS\V0J,myk zIcnғe;N#mTX.>k>f>kcqq,h)isݪ8dI,Oǣ/vl4B F93,`Y[aCi5cW7dJYTzjgNxQ 57ri~_pN$,)\d?hp[3&o9a: Ā=#V`hJ`%&[ߙWI$cx]7sf+͡y MٳСgKM-wac=Aa  s-3U1"HupC 1Z))%h2͚7y25UQ*hct0@3x|^ˉIdv0aN7>xh%Õ00q#kԹgIy?g X X`MBgȀ|GNIK w<^ߋώ{WBh>,\S !GN!P N1U~8W5¸Y 砶Mֆ4x#19ouө.lWCAN1PbR@d\G^yYOrW 0;+5K0] RG27UWf8] VWcs^gf`\ǹ~)垝8bvnY&:,14GtUiËWցNN}r)ZU9]t 1P k/ʽzٛdx[YxK+&4y![,lrُ/O٩y=G>eA'/HZ-)6-+w+Ӂ;"GuMP4q] N e̷;uaUa;E}˙C:b,b[}d0NjյɱdOsl4?r*́azlL9{^1d /L@N*39vW``鼡=k C:mӤg=;vԘ)ou3*+xUvLziukT-QtFދg1'(8t"N:cTOu{&S͈ Z",IɩbޢbE sCtܝ4oL, ٰ"D%h!uWU FjRD {v|>\nm,ʚCa޽e62Pa)y999̼l9 JjV4aTO+91m _ù s bÆ%XF+5Z7{K Ljǎ};[4 Xa#Cq4j09DN?+CYƧH=E+$ZsG>|g/V* )3L҅["Dr ʼqo7cMA50<2o Fo# |ޭVh$gl@! 0 3Uk="S쇘S!]=1rj. 3e[2Bbp~+\ PѯSw}PI N\SW6*?P=4L~F`va\A_) D .2x[FTf X0kUنy?ͦ>^F<-IcV`)f+F"q$&#ɚway=6."1 횆ldҲ3}ڿSsb_TYY˷ jS 2Iۏ+#m>硞H 7_Nr6jv:!#EE44,ɩbTvЉX\H5Dp4%hK6GĈXiB%'J}}D3CdȐ+i lD tW\ONpsRf֮!R"ش'F2=6Yi/hZY ѫ|dvmEA!xRJBˀ"Cxġ94CD*4;nO-a&x1X!"w*Q 5( yaCG]kB<{G bx6vУ@=}fIӣuIJW"855Y 4]zsJ߹̀wb.b}dѱY=q' ʜ0i﷐b{tP9ŭ̥4۷R)6Egٺ>b)l|M9t+-):Xѷ#LOx^[>>w ?^I-ntAkRGL͉YA"V/ CbʸRƀ F2Vѱy]u&=[!D(}Rd{ RRcg/zC`5qT/IayN}odxp0!;ًCЗv0d#6Y y+6VlqRLV_, A5)>#3[/ޢ~jaa·Sf/ ~>Hw>ɸB3  QDW0#yyH6SC,th"smtpVmu{RkDƕPZe0Lj:7EZZCpsZw:.:7pu($w7wBD&p,: {]7s0t뚝4S RHa[>tCAX!R|d 1`SsۋE^/'^G@ n(A? LC ;g(2u_l+*.WCx[sϾrJܫd?i_ ?F=ܱN#RLgѭ=4Jzpj†7ң˕J;4W0ĞS@2gWag.n( vnQ?-83 MldJcPd!_1\e5,{/F.~^.!p ԙX, )mꬅx6;^Ъx[j[Ș}`&*W!T W@EpG5=W؍tUڨ ju@C %qjdwA Rd4EML3Z6AWj@VvJczM5%&!եK؅czvm)}VZPfiߝ}ۋXx NɭۤFZJM:W8qH+.4h BLݱF>`B HquTI]k^"4X;V)PĄu@ GS"MMfͫŁUń<n\Y?LڻL1?7kS~|Y.:QV_j>r.> PTyfT&c22or C5W;iB`vi.㣥fNo{XtXб'-5!d^-ʵ0 5%=/ +&8'j+̝rܦ<$PϥegL?MW9aOi axj*T4ys:f7|?hfCY+=״SKC74,6FS3 Y[h9sìP~S@eA8lRKx䊚= 0+֩C{7H߇ CQx BP@M}p3U8YS@Q+w4ɹ i+cjr2w'ҰSTP>0 3rO\$r@K2k>+{a>Ld~&`g[ F~e' $+M_\wfRe+NX]֣ ëܥ3J(i(p@b:0$mF:e`A|Vȸ,@I}pFm5F.XnQ^j$~00y^ GIm'J ey> `c84ih&km{o9ZaƂrpPՂ!7}~ԪiAiɬ[[Ε;Oiڦ}՘=qxnЕ)Y$U^97/\-W|"~{[X<ǔ4X*{h/=.^F'MLLnj[$ tZ;[i/BN{󗉁]ZB;[/<'/ejX ,†OŊ,N[hiclx[3&\!lX*tm9KK{}do<&[F"-{BLb`L&tiYO8x Z-dŨ#L|l-|G<(s}Fj&H;>n#U؟Ln0w;px0R#gE= >6)ymED5@&ݴSlK&92Rsmӄo;vX"#j`[0̂ܣv!foΚ0@Iw0p5Z X7+춅4 aHU5H胓zR >>c@ cEzH1 Yo:k?|yN4'5) -QE +K[ SN2D(pXоix`FE*F)$Z ]ԌC0駥=oUjѱkO9 AztG&OEv˕+&n25,mB4Xu x_*1!pt0_-وU` =6i`u ]NBqJrR|5KN-_ a*'hmLc#D}C#PH5r'̶8?oo}hNm$(tj95"h흜q<8yn جk7pg}2?{:?(Gs8N ᛱ ΰ4Mf2XWNr`@nh*6>>ն3J̉X:#I2E}I9(v Z&Njpt%p[k;eK UPQ*wfX;}̍ÇB*yʎVFRZ= o2??^#!}JAA"(>X@[c[~Zg/13`F,cT _^M )f}C?{vOP`iJa Gѥ3L<*W9*X`v}:61O39}L'pX0v`g-ҷBÔ 6h&Fh^GA\{K'}éc1[QV0Gqv}d(9r+K h0ݰ@3Т sđFвkq,  =+z(UZPmݳ]$.?4燖hDQj[$bO WiPs[x~F \ipL0 i}'}ˋg"6!+!aƆ2Z#M]Z! 44gi A4eRw(lFY@ҸEiwvl)ɝ$H:jNܸ](d*mYuмж ( >4šs[˹cc®MGrlGi D WS3WMxf]Ԣiͥ=~p?'Pw[EN6ErvT2Z26 4fJf9J!(*'ͧ80𱡉[cWժq+CB{u@h^2dͼ`e( ӼU3o6Ws)krҎ{K~PY>ҹ S٩ڎq_}KAC zmwtUѳؓ'DԪ0ip,C:m f5c:Cf(\3楚y/0WOeT⥤WU/&25OHj :< _\D.34ԼR*J 7:cLh:/V!e 0T*䉌#+2 i <j$]!,CS2ޅ<4*DvW]fY&He俗0r~qUha_+zX%),F$W!-KKG>{w%t[0-M; S`1t ѷR]^HAv0j1 Mw6% c¨皽Y(ESBgw4}1k<YA f%=00TyQ PG_Mىa-Y*aC lN6A hRŭ="7JRg✅?&[ ;Q`"{pssݭ^줍Ƭp!GaC \5IkL9.3`M9y^^yxd6[Vs@6Cf_Go,)):^5 0PpAiFJJ};zRX'ܔ@=`ئ!b}O-^j# 6~j}vV ۹'We N#s]f8w&[@1SH1P|QhN i CfǼ@s}x4`lfn1/މ :jګʹni¹ϴ}/q839w7%5T*JMZj?i|ґIun j&SN) 4rX^oOmO:o@.Qg_L^[ y> Z2 اXٿJ78~T, zt KH:tp2&j" l0gr@!2Rvz7HcĐ `Prt['㇢޴-#s_.l(ՙR30 (YVeoH{A1hB2dyh3\rMf'-W #{%v: aEC'VN|Z &S†l&>[AmRc~e*-'` CQ10w[8pG\.^8ekaeNLoS.}&Rܧy@isq!BA70͈ԫ}Ue꜌p:=$94 u-\!f ye/6orT0 N@"P@z%ݧ2Cn"UZgd,~Wngo4JpHa +K>߯ u dh [򲐇 Ԛ< _ʨBtuRĖm$@}<7%l(3*E:}ZϹ npXň76Yvqo`jv"@9)0a57E>/޼%<jvA8tkpmέ €*l-1b%-#lN*f}Cxmkз2d5 ,1&d́Ct0ctioe<(J` GӰ5H ֚e !u.Ms,V9euAZfڹamXF FsS% &f\-DHlc -3ĬL؇A݀>L:~۪eSRsE:tndA:$b> HJn%p[FꋒGjbЯXT׀0&;d4etZOzf)z }"ǏK߼7s  6hIaCM8ԡ XfgyF[+,c-1=kadvh6~m 0ͼ^Yb[n_WF@2ƈhi:6w{i"+M1Kڣ͝=s9 MpCvHө0#Awa^n7ݾf9Zś 2 TpU"pd\Ͽfm;6 iܴ_Py^yⶀ,驩ׯ_9}$&*&28Wd(P1]i5HZC؁ApӚSQ8~h־s 5 :Si3Hݷ\ūZzzzV`q$def'S'VJ8iR']8}ȁ#{*(hpAs?؏+}c[+# hFaʗ }>:zJ,#y-+hFaF.^c%hxA9ΰd}hڦݓ66_&C#IRj"מ$o>l8&@?<ESnJdߒ) s *u3u*:@k^=g^lQÅ {YR|\¹~<:):V}zyh7 9np ͱEj : cb4˗Vu1{+7 W Vi1{]q+Ёw"k&D ޚ1Yd a&@MJD0tT|y~up/ c S:~%f9-F?v4}#F7~sW*TB5sp(PqA-RpP%dt1`LnI?m6.(JIW!{k@fKϊ>$WlصJ$Cf 6 j:di(pcS*a-ecghziB;8`'c WMҨbfy/sp (ܫqcP _5vh5O8ĀvނE9ʁU) tqڡ*afIǜX8P4cK:7Q`*&NLp&}(g8~X.P(C H`Cy7E<◠G7[ E;p ̊ gz_~_/ukԤ2EE rgn\fķ[ o틬Z`"gQV9P}k%UN%d N({sǪ}ڽԱ^N{[yUuNݧ` |e> od{^5X;!P8'E&13}\0ƈQɧeVhVFY`mסHNݸ$6ޓa9;tߎ37#VG,*4QRԢp,b I[$6H9aTo2 i)Qz Ӆ jN2qQ!⎡w H-Ƙ)rHJEy[?KKP!g?1v0d25DMu%U@߻iI&Lfi̞:}3fM»ݗrC;/sqzy w-Мb7B_tH01!E{AMDo[QtoHܳcWlO3@˳ߋL^pZ 1(%JxE  ?  "NQH=[g_!=< ;zZϛ=%+ا P+idBHfoh7 +FqR:&uLN%FaU x2S̬Gy)HլNW)gR79\+[V50Fv3-b̕0krBw;q*?>7DMLr8lI6%#]FUgSC{GΈ Ƣ 0-Rr3%D}:6S=8$+?俨A7gqj@jW"ŹK hC 4'Y̜GkRwLGm?4=~81k 0WEJbw #װvuѯsN["ő3E&!FUx0 ^mqٳ}zyr~h[Q 5ݻLGϜ'0OS&sT)}=7V(oǏBVYf5aC' t >w1a,&cb>uR9%Ǽ <.f#}%dƭgAR@0V' XԂF@m0wb+·Q,D`Bƽz`̘+#:,>t\% ZLd2g⻿bEuDDߢ N;LA ELNE)M97%38f4 גMWPQQ?&97\mߐ~-Lar:4X 54sX`f~vCj/ 3?s fa.  _fkx66R0Lwj^4aTOPn @h/O0>jP $ :}`-@P#0q=D/c7%ꑗ_769sXgJKInLLqسQN&)TUaT+-ʭh !@fCoI-KjmB2id]j*.^/l_s4mQWEx OtW_i , 7r~j,ӒG"IjTܴL_x%Y~XoL:ŏ;?EK :>;>}ZY[Eg3RS҅{}qʌYY3YMRcHMIrMMˀFJDz4b!  "Ѷڣ pѭՑ*Va l OS{mɲ{w\yorrϝ8Z59\%lZ5F.ze o NxaZwyz>J\~ʕkAR?\a.~' 0 )&_OQnN}ˉ}7rş#E˅p#2g2߄2z):(y&L|1[kihl*@IDAT>_1{5?K IjۧLf`zR8{IZ<#r񝑊((am[Y٬'o8d(lYWcvnE#Ō&ÌZij63cլ[:Y3x@Y휐PZ/];ƺD#˽70 YnP6I3˷ V> ԔP;HD i]`fGtHVB.[v m'}Q!c舚lax}$|UzeAcL8&oy}#n[Ex8K}!awŘUы: MzGx߾mɭ>B}ێ>>m'ppX zV3T{ND3\uo.E]YͼvULD(U#lP ꜽ=UuMF]5-MXuoKnv rǖcˑO*iKdHAp-Ё;dQ>%nxs>bj}E1y{ Ԡ1ًeVYN3cYI`[3&sw'n,;"(Wi]d^h 1>?翿_\c˩bÒQ%K#7":u߁ľiCz~}vq9j)COrY^9>m4%aDi{-#Pˈ@D'̿X/1j" nifؓ'D$ `✜'n,ZY6Vp kD9MKo>Ln#Q|1R`eC>ex{tB?mIn;;p7Ğ8%p#8FJ9.Y׳4m0Ynf_+x{OEi\ϯђt(c> ;v|l4n:8z=n?_;~SݸTMAC [/]mJ@L#͙ne C֪Q-5/J*h(\ր#<}1qYؿgג%h6HS.ͽ6fnYEfffzi6f1FjS5PfI:}7]}|-=V8^&skе0r\?udMLfagS8_sdϒvi4Aa_Khۘ-oɏ=p'M+>EAZyhDζ5;[ń\6ּҥ$INQgQqb,]q0<e{s:M|]  Z""U%b,K8[4#(K1 C^^}@*&ǏeHŤc\6x9~8m2t^6Q1qg=Un%S-RР`$$x?ׯcbY 9Ո 'mDKO̜(<}>}%3 i"C|b#G-`UWbC!8AcH$hnnAt KHJ?~ib|dlZ,DU9tնzݑ']`MXi@)1|S* }@h gUE~.9_/љZJ-Z*|dQt$_t >˛2RSŴeG?slS1]u]i._5C7+68GShu h>E}9Dh `R.C-<_ Z:Ug~c{~͎ռ{`gDgV'h"[.?V*:|ck)0ǰ9s`KYn Xod_r%(PVpv1 ʼn%vDذ׶eew{\+><1YVEb?4b/\KT81; fS1ܣ؊_Cb52K?8vq7!;d4pZ}s;?3FSBj(xP60< o˨St7fe&El8OpR)aTiY3^U-RB07 [֮]ԱW?Yj@ ͐ u(v]+(Ӹ'^x}1Lbq6E\7>5}v ))rhR"pA%bïbr-=bQeu'm }0cq?z֪x qǘ^ݿs ċoЍOH̴? Yxki"!"$͚uw~1x=4/'qIWAFݑۉN-rNlaːO;@WO~Y쨵*T*\fQ}pDD8#6m@/UVg⯈[#Ec:1}ABQ={񂛧A|{yVo=ct&޴' j?¢+|o~hnn0Vy$S\MtlVO7j%8t ٶqD$C;Rk뚝H~v}D$Cce[4.{AG;~*q!qBF7 ίj4o[X{t2ovvVo045]T>Wcn i&fg>h 迤S1뷭Z:%zMGp77D:媶ոkط=lw{ 90xL/ 8m~lgb eV^^ajO''9-tG23 ^/}{Sx<}fP)io\1ym$`&Tb^I l9 +by&]v F0ejc6E+YG)MCDP/&n tR8wY$r keIa+߽:4ɓQ|`-V4ˋ rFqf2 +{Dѫ`d?]VTd<| Nq{-9#.HfnWQR(0Y3@- vhV¢BڑZQؼ(,O$C-3ۢi&ls7cDZB%m#G#T .-*FN]JMM΄c9NiҺ]M5XKWqSnYY驩ׯ_AƨȈh`b &JN`uqU6kҢfݦMTFȴ`M Uk?z=yiBtVffg9)1xEUL,2N<7 Ҧڨ *|c`g?yOg⃳ (xy-)p8n݀>n6wvG-0T$7:a3IАZ9R9~3++DJЉX)0yQӺ53B$375{cNG%gOϳ*̶&=3~]s;>' IwꍁPѶIϋuGГ5f0GqVzm+ړ}*JAw1.el6HR@s-+߾YlD<>x,V&a`זRF9Sؠ|%:4dɢK,٣is!796ؑobtd.ժeu;WU8oG)؇ܔ0~$b2p\1枌b\s{)4;^# ^+<Xn^S81\KZ8qN}GQ8S9xVp4~z{֫|ǔA:U~V]H@oBMt:իHPJV4Iօ盳E`<$۵)"FtkH/6C6{g3Vۊܫ0[jR7X޶Y+:jʂCU@DVlawZ`["aUh="Ѵzfi])glUʮ4RgrbVLoNJ!MF^n| !>d_r}KACi5J̤oqvW*w/)2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS β!<&}E%6+4S۱YnP ,K!D)CEO7|Fyk#XK G` 5-4ǰ>m̺;nZp+(,n(үb|Ie$/6*LFc%lp— qSz7%J LC2iܔf+jy3˩{qX@l1++ꯡGc(d 7ߏʕ %lPAߥ _|bh71GG=qNat>c%kD%.kMcA+E:΍ IT7Հ7LlZ4LJh7%0҄-^quX$g<@MfGePڷӗbSD4VPf`07 yWYL˷BcxY1~>Ҧμ!x)@h39!̓ݒ^Bs)ە+OXWϢ?qv9(լ&n$޾?3,醂yߑӢ=>)9EfRҞ?h nC G!%U+I)4.Ѵ,^X*ݫ6ZI\12lT.E (Y8؟ܴB`J >sߏO= ڍo㲶d觏p$Ȥja˨Sߦl F9xp~o$S jLR}Sa왟?[36r[Uv̨ ޓrJׄ ZI ~W%WK6^_%v9{B苁,hZ|iRD!>Ѩjw^T' .vAd4bo,^w)ٛO  ^I8aXan1!-Y[F tm~@&$UZ|_jh%U`6>Hmktc<Ҳɔ4:}i~<k`U  1e `KGӲ{SڻW췆mV;Ndʨ] V;@GnCnSŸqs0'Od42v2csgܠi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ۼGTC(L%lɳ|/3/f } &%C"3ĸRπ=|EQ^yxP> :wytж3')Gfh)jѼkLkঀ]=F(1gXZc,l檽Ć? pj]L>;т5W^=kֲF( !ΐ,٣iU9AIR1CnԼB5ELb̴LPܖ}֌<>sF6dXfi8߉UE dTvSe{'h˩cm9uSZ/O)h-اyw Hξ+hӔap]ذLFiVQHd1ʶ~jɜ4-=2K2rqHu a4XO:!;݄ ' %y6wER֬ *}Jc܌AT)dɤY{qA,n=mU8C.s@F& u=o݄⊦yn *O:ۺfUN\s!w[grY'k)vD2F|/lƟoCcޘӹyw&~qͨ/ .BCi73J'C߫0"OBrY> T㯾_ ,Mu }W97,9 oJ`|/4[}XǔYҏ䡓†ltcX$Y4L: S`rMH6GkQ~r3Yx[:2ri(" z$,WHwG,T?ױXI(4ӊG$eS+iXT-~^W+tE l*3 {#9]xCXϷbv Zl晹 aop4~v#L8~f~9lB}8odj >~[{{{s46^!F0 6A1<3m\90^FYgFP8Vtdq)]ذN:]^D`RCmFan6X*}F\Aa DR!\B?6D1YK 5ɿE2??7F(%s 1K77|5_#;\ު;Uo14CzH˴ؚ@ؒWMZxh!>lv1^pUS?4@%U`VY zy*?ˣZZ Am1 ?h8{G^}cs^߈2 })YTYǏ+U]ذ(<U WF$^Oo?s@ ? !f+.*0Dvca֥1c@@(h(-]g'pą[֧i`'2q7cdEyc19SoWI@1 6Ph]J.B`Ok.TP^j i Ƒнm#`vQ é |^7b(M71@\ڠAEKOiߢUSRĭbb_M=1?~̯ 0 ^BzMz/RE)TEwذLSBbGt{!+$6r!{S޼̾7}h _Ph Gofb{ ^&R(VT@M*¤j>rl0\<}>N0Fr bC{zt|dFq>?ilфV#97î T\N-C^@,W9 PvW)hu honA=/I'wњׯ&r z9Z+@n_Y~][_Fͨǂ| yy3P{ Q -${eIgf^nhYAXK}Zuhui-CҪqLƵHb>1فGNyj#h;˶ YxL yWv20@ˆ /aw]ٝ,/]sdBP399iS*EcΈLGı`s I%^Odg F{W+Dbd7x\ȴ+4ڴ^wש]CS uCܿtÞ㇏:'PNө?US0L5ɑ \`BݮmX4IC8;psd&)ld$nfAs]c֡suM+\ιc8wLȃ"[:҃}Odf2&eSo -m$,yk0Uxna-M{ɳL,;7A(έ/U3v'n yjO -\`wis:U˥ヌӗF|v\hCYUFy=O6Z86u_ש*!D6wk-M^EjjzBJ(ڳisW ɲ6r NZIqFS@Đxhݟ2M_i*(Z4cTC} mʤ:*9DaE&9h:C};g0nQllbR#D#mӏc bi!0=FHW u1h jjY O !kG>u.c *KVЁe2|!\/CGB) (y[.1Ӳ@o;̀8{J(XfDzNIrLX|8ۑsq/HI-B do&n-0>}s$ _Kr$ 4i K[@gzV0||??Z&7F.QJ;`RHzC ^Pu~S.r^tju Lަh.שC0Bn/mK /Ůf{qdO92hd]I0N A aA1{} 꽵M/!LT'qUiTK=zRFDu@Q|)_|Qʇb.9OPFEi:q0}j)У})0P5c^z~][I#Q QYݍ͑s:v8-瘍֥_;bēi`XQ =AXMl<%x@z%sIɐ%sOK(O׵k!KĜwΜS/`6OНI6?\M0bNG|f7 hdO`{r gܥ&$$ȳtGcFwlLbFƐ@dޣAskD>{\^| ?C2nhg4[7J + "͢hh&g6њ2kw(AjTy3O ? ZPy- +T ?4׫B& |w'ƛFa/sBnKI9nw%>Tǵ-˖ &J[[C0Lm<͛:; O(Qf5-ܲ fcؽҵ)y8,!*:wa#U|F5aNR1k2VmGO ~0rlK &{epW,9fAA*T<.͡h&뷱\^^ٺ?AVvmva#J*C)UeS l42F\NuVxТE-Lu7O2 g,k{K%'G%&kf4ClAsιOIL8gI4>-N1g rP=FqP`9pey?@#A"A8(gw"ąBXZV%BӫiβeFPc.x٘>+C^AA&M584tY)Y,&Ixs])Q,̚EgPѼ^ʹޠB|ԆPӀ"d}1 ]jPB>~Fua2F"Tʁ~=Qf_j]0OgQiQVWfhB|ƵQ7 .{.d"A Hs?\ gdt{tjgŸlԵp|R7׷j(}V hxAUe^kh&]Б"MשdYv#Sl \;N"q(|D5:$puasι?jܸ+kBpEDy0[o m{ݱ̧旿1հlz/P14wiCS*j]&sB,Ƙ}:Mx4s9~N|P^P8q(Li} ɊaUtY]]"|qo;ŗrsUTi>_ͱk7h+jV*+>2W<=YZ &@#Yt ҍvh߮(hG.5[PN7ꋟxmnz< 06)EvLL?++dSVnN85,p`%їȗ¯j3ϔ+^L +| C\[6oyfͶe7}tYpw6מ^~wYRI]ӯyL a)ڽ9syshd!{}B\p3.?'(Xs aS{a,;Ou<ݻ}&! ;N=fܕ6*r\ɃJFCfbQ7A )@Cⓟ/eyW㒐Ϡw&cߥ؄ah|BŁt@o5L 6%cުi0Bqݰ̚{j}ͪQLP҇ʍ <̼9ȬVB3#C]`~돷g;q$|32pWg~s4Qe|WsllC]SС] dcS1tQdy]L%oAd Rq~zZ&<AL6~!D ˕/=G/QFV42 [,Bx9 q>`tm'm4EaC 9:U)y)`kY`yo Ne;֘Ե=Ail_ \L.-;ې^ _a𕰡[~ lΝsF0=J/7sie_ŝVH8@AI@7DmwjMQ/ p{qӚoT-ե9wߏ9~ԗ\eںş&ڤo}Mv!|]3d0u##'(SԨD*` :AA+;} K 2O}2bb ?1+ac+fM_ZtoqCY35F!H\vpΣc8=O)l0A1 &}Wd>Ȱ^%as8زnnPa8&zp7V;XdoNv,QEaw%џU0K;8K+\.?]HgMntf\xu¨0ÈqvȨKCPhv@`иsS=cΜbphO)j4ߧ|Z &S†lp&} W \}n>_?| )]a}P v*Fv)ko/0t=ޮU| aA9F_uѓ6[.]*q!-~үhv-:W]ۙz ZѻEkF{\HW\}(A,5Gm_jgu8JV/* 2}:=?$MgVx&c aܡ];3cnBEHƦdA:p]lMR "}(aw4{a΃1忟ZE讕)QL{^(MESHyd]?1AǜnK>{0+c\7P& ŸotJ"o;Z;d`UbYcuy+SCϛ7W=u*wL h Z @5"w!Q s_qofmרנiHXEB 0#dANNNLMx=ڼ~?:Wd(P5]i5HF0ÈBrAm4veS~jتmju6 +Z>|ذ)qѐ*U)qwfKL+O,#9%9BŸٺg̀I8h:ȵ"}po RF`Zg7CqphмS6[]ƥٹ.9##UIu(8b8Bp `MJp>rj((I#S@@1 <+p%l(LHGEPAF(`pC\C~eZKxVOzQs|*s]❚ \7x u -Pyn%NH28*=kZ4 W><[lspw >B4k74glaΤGŤ v- 5S!2APd ,`҇:Q44GR gJ@Y1\OSuG Y>BN\;(d{ [{n_*s;i-E>|נY?@^Qan?Pu&^1 32 Q9,ť[0-Z M5Ǿ"Bш@P3{iHe]wY,6~պs ίZ;8/jnpY`@~n?qk\ƀ `\wHL \u+ab<5?,Pࠐa4c&搌ΥQfQ0+K =Um~ %l35+zStoC?d svb}(z#{H(gs>#>&o5H7˧bJ惵(skvyA#qd7/exs}D2<D*LW@ѽ:!C01} 䵰z>*䢬>Cfa*LQUL`T]&}gիtBAW<+0sV3=g,pbo}y,͝[g, pY1{f}4@ܯ^`cŏ3#'Έ eJཎNzo.ċ6 k~ZE:_$%S@VA;1h/0mڴ[t gm3ʟ[Ŭ4>r$@5Xםk~]<@1j|u7aj1۱g,E^YC:KeRW6yBdqHAjEZI\0* xy1AbnoXb΄#K7쮈XZu҈ 厭6 ;t XSX;jzӣK 1mQ YUA2QE36haڌTȟ#=*Z6!uxX@UbbP.DhH}Œu;ϳWo^?Z&n앢kMj=;eKW|:HcC7͡:Ni x_[fyCTEGAу#Io㱃9xcߕĊ۽v8U֭mRGkV*3`(yd||ўWGx ;S6Z/^qEh|Fdhipkխ^QآFF&, 0o{Р%=4Fs%ߟP&2 98կsߚln9u'>E<9FѬn57-P6ɪ}GqMDuM{įֈ_O'&? GwQ|Iq&6^|1;Eē[`w3DţnRkFָ;y6ҿ[-n[-Su+ `O \1a߶loxqDGmNy,O>W}믳?X^ԫQ@.ľT/Mи5uΪN7~o}paiHIC ;>.U"kU0׏,תR^Mڎ:T9u~[`{;G~tCKIN>}66_ѵ;'L]Q^Y<]HHfNtn\Xv,14d.Ba\Q7Zpy)lwM8+(ZT$zUx ))F8]``Iv1x燴#g|^ ڠw^kV6& @h=7Q:>R<>ܪ}=Jdo> )E.*)9&wlh5nulTx&Zү{͐o}gN-@IDATbc^Ԕ솦<XJ8e?)zgzM[@솟c`ر%DZoțh +†/}]b)AFܬo19Z6kۋ>v&}ӱzaٳeYm!?ȋ577k"nJ*{#c >u^#԰&ip OSj:,$XtmWQ#S4Xv4َ-7c;ιo@gp0=fU^CQt:gaX@J%Y`TسpB t* lǜ{zp54yF, .M\W٤nNVYy, 3$v! u1¡vi3-KX\;7cڷ:(7-u:/xlfb4B;9&&H,۸G h}1;O)`eFd2nݻ{ I)؁bE&pv?OSn@(r7G({ԓ(< ^<*Nq2s F!\ͤ.i&w_VyPcXXm?}n6-y۲9. `SKRt?}Z&3B%A5}: $Y>+TMx:51E\N,@/\ܨXv3EDJ ok|`^~ pα]hEs> ?d:B/ҤKg.$I_U|?\U_l2'S`VSIo(Vg?Sܸ]Ep oGMөjFخqXzesqE>8w4P(pR$msRؠ Eq=ꣵkW/}ٮPν;6zw{g ,#Q0˕(7Ǡ;P kŰ{{ʄeø?@!x2>dMT6|ybB% lEX oB p-BN . eQBn&%߽mc Iydʬ"hʋIlb;ҬIeJ! 6=r gbx`Qx{CKqmڲV //v1~,U*T2gMv+bgzmCqI8r|ؾ?N"+Mc|#z#raEI+~UʧA1'zŲB ONJg.GNOGQj= }?-v4{$LlLG~][!A)Y{̨͒ש"qS˰O#-1 Q\iC:'n"ȡ`):(-+X؝Q'#Mh FjSmZn5;a}|kGBBҿdj'XHO.#0Ic``xZfy !(6~R?b5M%o[8}̒*j3(d '2z%@i ALwhՠ([2 a+Ha[uwݎR8}F#1ՖGn60׷n' !DăbQt uB|iTb![Je/NY|| AAdywNP1z80F ML1فxP.!u&#^Yf&_ Q` W5 ԳЇwb'Ck$lm@-AŲf&ݱ4>}^ƕK !ZY&q^='٣5+!Iqu_xoj46:,cF岲d| ' v1^;:;9:wwAhQ:'$#K^~\FjВ7תRCͻ)T">C,:4+>i JM<1+wې}>h,(>#C>& oZ7)"ԴdЊa JS8#d-K?%Yq"l yi a¨%ΰoJ -+zf>@AFq5vxtܴ1pgz,Zr()AZ {|2nAw_Ag?4~2X֧ˀQJ`NVBeqcHGf 02|潜cW{҂qy h@i"m~E&)EuqL i2vV^}GNpo{LSURvcWPzl쥵Zo[jbhJ j -R7F 4S*w~qq~B&!'~?ou4AX ,Ϲ(AG :B-R : uNß*U?K`^xCFD49}PDa*߶ZRk"nM?,Zh[gF2ѣ2-OYAuyPkd;#@4r4V6: Ԃ^?~a0 J`@ڦaZV[X ݁c2xo'>!!$VKF:~B]q]R00E-4 (T} aOĘխX^3҇_х7(,yD0S)>+zrwW>U}qXVg׺Q)m'rFvh-UQ#']qύKraBRldEMNʨ&~ L4an>~_uR蟱j>h0;m;6ӗb܅x)Tpab\7ː Aeb,*Äk뾣2/O< }{x3K3.f:3:{׷jIWV{i]ye@(*5bFQ鉞To4[q uuWPP+W2ӂQsֺa-/]%oeZãVh6s|YwŝG2pIaְbP՘f /A,_'~Y7MsHg28}\^j4@nf^8rKO}w54bdlGXV|[3̡[{S3[MlN?8Z{;̵7a%9'a⨈ ֏K%{Vkz Rތ0fx[E 8 iDIfsK疢uM:bZWQ&#;Chհhm̕ HO̕2dhOA _!hTijڋ?KDH)%ԊegxߥBgofnEmp%n2XZNFY}-Lj>>ijbkY7M!wv,B`D&C_'zUGdujiDB/qSpo5ulQOܲO܆~͉##>A+4A}k2dTh_gERO1*kOuN9CpVsˌ }4`:MRjЂmCát}~>*␱%;L F,80i ߱Q44bt_p-tMʀW[dyZ?-l0 ᰻ԫڅy "F#@x[<\iMf窎~a6<,Vn']d9r"{AK~-hxFR>>{g'(kOLNvJN?zPxLJ?ܞ70AdR0:L=w#џvl}b4Bq~_$YPUg~-7`' ~ (*~CXw/iB` Z8|լB&7]ѣ8D )$MԳmS¬kݪ99@F=uUW1r;:5)Ń#"gC%2#-R(N0,_6˧ FZANiW%63h38(ps^i4;Hn? EًϢ<;sq5Z@[9t@-L6Q-Vժ9;d>ƟuO6(?Oup lŨ#lsaꭉ2P|'jz(| Z$<|ᏟwW Ja#!L"ْ=" $<+Y 4f b#q9}JG`C pWYAf 63qIwoX0nMgJ?1að`wMFN14>Q[kDS.g3b%\]ug'N:;r8<#,K굞 O$dx*-S˟\O2a0S cb7ٶ{zk̉FߔoݔImcc\9VMN؃h4R/G:dIǿ `\M쒫[g<GKd=cb. ayE)q_3qǜeMN' '&"~M̀*V B&B,7L PݮBt{b|f h?19'|Φ|0c15.o4mI4J됈[u+9;߻ڀ;+Of"LXX2VS2V8GZ@'$ձԝr۩eRP1;'Bkq9f6dF3ujc[oh%S&_qx羣's&c(Si0$wߙbB2;έ#MjqyœyL07O`p!|K}#Y?MG`B2Y }HᯅEVmoJI">CQ=x-dt2h2<,u]5*˳gO <{t;t@z0 HפJ?婘&|!k9b3Sv g/݂-IKP)PQ׵N==[t6͎ @&a b;ĪOQ &S^; T/%Ҳzq/+K)!Bw?ōA$sG IfiwMvepni5jEGu26~aoedݟ^}1ޞ>w4d^:zCxǠ]-]c Nl#b6ةum_w윉?DhrTV 8CʵB7 $<.XRX.õ q-|,:R6OV"]y74IJ%Q!Wvƈ mz"Zu;!óS6ǣ6̓h5ҾÀًqp 969+bÝ74jz%?]#Bu`v2\jL|4̨HHeOyLaÈ)Ā?_{?-&r ϑv=RI)EtrJ kiaMB e=vhXq,!@~Ajzl,WXkHns|5|fŜo  < "Pt l `QBvc8e}8,*F,2Bu`_VlsT]o|,p=sL AkfW 6i"f \X{4UJ]|:hzyM)Aqx-MWN.o/4Ͳ[gJM9sg^{->Yg Mv$Q15F φa|Y^Me>7 M{׈klb1igdV]s^h>\Θ[ʇBg ߇p>"L_A:U:0 .(,,}U6c9?rHvB 3i2pb;$8 kL `_EQ,l`ߒp8h1vtu쁵/x *BCw>~a\ݿ- kO*yS=knGKZ߯@3+>)>( ]-qja`D?'%B?q͊@ i^-#e[*AF{ؼ]F,~9QajBZ[Jf[oSf/oCf,Կ4,V(@[),{8U^5B4a9{] LѮ$Rclxj,'o^* n@:5䴿^BgrrҦZU1!7ͧh]F'?۠?jL (YoEۜB M6 y%V0ꀰocǮA-I{|2a9T A){ӱ |,AOw 'AYDoȈz&hD:lOrGK!#FF(~.c{xb3k45 䙌C|j2ނv$kV Y{Y x0?kHLOQuLf\*01F*~sk+  whn?T3u/y@H_Q)85Wfesr?Pi!)H2,7sʧ;y' <eK Gl9}31 u]TVq+=! j4P0rucûݴ/rR}dם߃-pqÆy@hs/oAq.%MAD5#_2x<Ľ6c[nh!LZ"^Ҝ 7zAxXvG!*tPW0^Qd=(Unu0Q @#dXU0>xz<]tȢ1Z}p&T4.O qlO#;&BF6LDKWGǨyVn ̭G:>b=f?}xhݟhszĀ #(fdRx!6ZsFUL HRiOgcKj-,aiI:36qp }p^-40 Mz'w}hddUar_61#'}4 {>>D ˕8ʗ(lx|hI ﻩ/Z7 15R0 1" 9Iپ o{ʖ #(Lx߃̛[#5ӻ`lv'HiPvC2r}kݎ+*͠jO`8f!h1ft[ )",S|+*9&4k4cP6BCAC i!Z{CS-{9 Oa$3Zhg]dΐ90@43zөYz{)6;賃_K3D+M4aØ'6>Ae~41 #6k^bYt|^Ř<ݎ/b.9κ*o臵Wc_LIȌy}J 4^֥4`K M<~M@/R>k%$\W󯺢JSu<(hpZ7G_3B ڟi&[yUFH,HI6?dL0bN%}AdBL4iQ(] 6G eY6ɐʤIQ`bEQ0C``)V縷܅sfzl&e4y$Ϳ(`4SvH#cQ xʷRIcHs_zaG;$޺Q-UjЏ7ba4$d6jTQ^%$KM^lOQ/;M$>iNg pe>21p4P )7t&Ȱߠ *bpq|l)lp /붔.ݕ}pG'0i '<)XFڕ+, Y}/* )8 S$iBԣΞq-L*c)dY;ORO\| 'wjwh |+@L胓%*:wa#$Ɋ~Wwx49  jzz *khٖz`HL;ȢC \5ʽȹd-!ɣj7sS>\SUFM\S NL'[:^m;5rK֪"PKS:+u XюQ !촣d.>/[MUFd ӭ[5,'V\կ잳)p\݊6F.\IӼzjڰhi^OmgWfvӣ7nӾ3PapdyĀYb` _h Y*W J #IO=eoIoN09ݿY=&>}ቋ! |h3+'}:MS4sڦL/2UaLx飩Z,Kter((Da6YQhD=v;(=Fc;GhÄ❉W0&D:/yj{1U&׮xaĮH͉G7}G6pGVi^=fFKƼ>~7C>n)>yWO{>5aɔ1?&BwE3Pо](hӇ[&L 01[е],mKsH!9IWӀB i^7މB2#o >ee_yt[4A.5_wPCI)Š5 MMmA"C1'ێ:6IcTב|~!3QJ MT`BvC!7}+Rxه_yei=x`[?<ͫ'%pƼ>Ct/3zUϽ=S! ]{=߅of?ӷ9ط8 R0%dQͳU=~5x\Sλȝ'yޣÉpu`s9?v3mR smi ]W1Gfg&hYfe8~aeO(ddW5g;q$hoi^=53ZRy}:O0d>Re;9oBx0OYhׇ/@[>tbm4,] @fL  Uf' 5|vFi5;w΍kZ(]//'`e_0MIaä лcS!A4%!!5ߨZK;~_nT^|뀟4b'i$Yjk-&01`b@€ ,5k|BΚDM($I>uyTƚHY0ȸ 4_ cplYRm~ W=+Ig[`e7 _i= [а^y%ń\4IUW@FI/\?MZ*rݷ>#O= l6[>l* <{&01 iD mi{.GcW`t?TmZ ǝXwoÐ7#ou:t>42%A#(ݩ0C& J>43N&FkS,zq_ Y JaSm1 pRMV65[UMʹ'Klۯ6q-~Cb2 :C ʌtA0#\ἊK-:]ckU)ؾ5Z5O`pGT,g=i /eƅ3j7~ b`h,;>rĂށ$}O&t..j?fBpOo|Vد5]6x&|}Ka犜+x@8J(fY4?jU;;ԷaFz`T0hP(fә swh΅+̘p; QɃ 6&T-BtEPp+#hv;*Mc?]pYTVD1~\$ H (&X0$Ì(bfOͮcNa% 1 \?(t '`^bkiE6PnBxm⨈xmǻʹ`;COKxq[|OJu՜X<%%דojÁZA: ;sg||so?3hSFMCŠ-Rd`i- rrrbblŋgٵ}Y"@FB9J,6E jLSQu2)\?f5lնA:u - W>lؔXCAE+WJ>5|$6[bZy2fy f\Oq p8S.$\;yx{6o܃)TPuM~pV &014i, jVw!#Fw?EZ;hC6=Ww-~-Rhz7u5mP~DK0zt:zeK3t ƂuKX`ڴi~+ rvc߹/9y^;gVg5;_M1Z8놎|U~z=`OĦ?Jc8|Lׄe;UmЈ}Vq\x} tC]p4*Z}L Q|}s¹+>{$ܴn&#B A& @8< ɹ%S@@1 <{+iUH:Q5 ҧc5ql3\/,xt|:y(:V{sa65j &5NׅIn~(2yKpbI\eY24Du89QO58WVM< 8P_3S0Q?`;>842c,ߖoWMk'ʼMP>Kpr웈Guy&}[7}1dDd=\3 >/;dL})#a 8 }{ s] D<G{cv{#~1*Ƞ7G_xyXQmt ^_ atI@L~hBqٓ?À?)Hj-(ANr^((s&}JAL(!sBanP  Tc}PuAmRpPƵL01`bG9mc0m;vݐ*U(t@{a1z+jO-̼J`՛`P~jb^C7ACC~ٓ Kg}M8c&>>s%/G_k8CKߢ :?װ@"yJv[Aaؐ,pqPDg׋(;8G9}. ˒ %z6,YF\g0ݞ)[OL.i:=9>d.##^ayuZè=39/@}ٖV~d,мWaC1xdB>a@}y_1{FA1 <>Իt &P]U~{S1ϹM  Zv5 ZuθI3Xu/anG^\+8J&01+ F:bԖC CG1~dw޴ .Rh:5Q RWƺ?.ٗσ`ܩix A)DkEKo=  +-^.5L-Ϻ$ &'|iX|6qSj  (C$˕!j? ܉uB8aS܏9X47fYS֖2˼6A?<k b?€?5d xp.yȜ_ayooe|I'ҖjuB JxP )M)KPGUj(n9o,czȈ10qfCw~ ߌ0哑Y~ipjye,gp$qѭɝuc\ܛea|I: aRT tI>D!]-&b1r%n]LܚGOj]yP>U0+/emu/ɇ`_۴/7r-̹hV\,lG+eKeYz%-=HeM& }d%z ǢkCb \tTKd,;R`8n4 M pq9JԁlIB_+PtGFըPk0>WH#MڣUS1pԻYG+skn &L L-77v?C  mI*+{-b7l N˅;:-Α-U<:XUO-v1}m ^u<^Qۄvjb{L=;4iv;S@~!zy<Imz1;ɗx@]jxT=` t=ƇMj ue@Mw ܵn/{~9=/ W-W}T~dԇK}Ȍ&5w\xytiT&}~^GF: ^Ϟ3SfKxW813r?%d{=lb@1mīWZ4os&qZF-hlUμ'mH%$^@BBmpJ R FRMYƿ>>%֧/bG?ZʰS^ǁd`|C&ȕ-EJ:E'TA ]d;f٭6 FKO.뮻rS0 @_hGtܛr_† 1-R*`@)lb8È0{¨>ꥤ?0)#S&ǀgu۽u1tLoJy7CC'~01yE:˹AM_Ht'&2 3c%K5r at$%h@Qi!o;+B$2tV>OrtbJrtڃ/DASc^{ 6htƒ.͢H +6.2Q#^:N<܏J*u P=f; E^zТR%BԏH<!AKH8u}bjEZ Z槨1Gxy1AbnoXb΄#K7쮸z~Zun/]ڰ[VYY| )K>c}|#F-6`^01'H tsv34vط`Z XhՑ#ozꅚF(>!AaHwGDZ%mz0qAg ͺ~(61d)ꈷquVoC{!Мy"/#_灁E_r+dl4An;Y K- ųNF[u=9(F? WUj75\BJw{\ssi9< YݭYZxD.>u~Z%L|"/k<AWs`7#w15 `TBRؖ}BnXל"Ǐ~ W]PTdXŦ?ٕŭp'{:~`s|vgƢ.I x=6~H\i%co׫C/F3Z_KfD f<6-cM (֞y#f|7M- 4wOs057z2sqIx&P@/$RL5L~yb{e}_w0@АQclh㦼fDo͓u U 4#"' U뀀VZ qnuxz@U:?}R}xk2ף| UפR\hq+xK '+j?U bKzF!Prx,BNjdtmgM]X ME\Tz &:d[u`TE(S! ƆItn4g& eBR? fý^o1\bW߯_H.tXq=U]US@@x9n>T#mL<^n8>OzfըptSׯV+Dv%`Ѷi0ƲNa"@q:wv}lj@!w_Ï>(PRP( k;r up\֌Z=$vSb(kk]Ec@9k\\S{'$gqLǪ\Uz[2WvZ)UL[ԹB@!p2,9qK7OA8e-Sqp0e6GU /nJW_1~27\\\䩅Й.RS?G=ҢSǸĊ(?ͦJ:l`S8SJPqpdpW {[ f#F5Nj_7a/>q֮N'9SSs}$(7C(AQ9}}}9VYg'a${Ԝޫu\BƂ5zb v8by7Xz{Y ApMB@!p 8KHn+ ceҝT u@%$^ZN`0^xd0xyѡgNHR-4sJzeT˫wz~h .@3;!`"}\fww_2g!b9샷`] >nH0UӒcr+Q{Ac16眩\qg1beD&o++]}WNEQÛs IMtl}n.t?jUԹtYU q -\l }gTzUs4u? Cݑ׉zgG/%_/oPwq5(>iediKdiOi6=B凜 ԡ̫H"%J5* cpb#ne:!94WhD-H'PjԸnHX˲+$QiP"]I<`o@ Z%$к]Gb8]eNwR7₏' ,C9YAA|<7 dMK{E.gyuҚw#vr11fb+kBxdl7}Nme ]' uEjI0ɮzcFzācqkFP կLɩvg ֊ڃ~\^;jߌڋ~Y숁 X^ 5䜘a7ms_FI)KIE60}kg '%]I AgjX'D0Dky0W;c6.sZ$װj\t . rC.c'M|h0c&w(ڝiߑ38чi_ccKIƕƁۺ;n kwQ~KjZVˁqԮi(YX3fG`( %%hhuA!E5=ܯIOzx? '?Z4k+hȹylMLNӯeLny.+3Ä؏ L6%Q[>XY̆(󗒨{v9r:.  j S% `6@tm9҈'ݼ2ѓ X`0i>TJ\c1VJ$6j~ԩeq.fiWIe6x^>  7dAWZffކu("KJM K /e)!52ij,j 3g_qq޸2ddZ(ah G;NjTGs6&9C* L͌o -8bpV ZEsUkԭF;RZFXڶXͨ#onM^팷mOҡ *fPa(Љ]|(56)zz5X␒X~qǛUsEޭ-H3 >^tfR D&/^M( ?WG /HbVa"Ə((f?xdc Zi|iXtK3>Ka~H>9ِ_+g ƔPg2RH ]Jӻ> mrKm4tH4l'YgfQݐԥ2YJ&(E73*`X"dv ܻZUyB;Ys ʓ|ʌٖB5cM-d AqǕҌye[hcl@ QPz0g̨ j^VnЄ bQo :8 ]Z5njm/Hs]d屷}`N}40p8s? l43o5d <;KЅ/-EdvR;V(-A?d6#&Y5=ԶR3UB^+X McU L\:4'Tz2,R#G\oU끧JTh\P\aG**3?J1PR*j7nTZ6)jW)({\  ߓd4ztP_h<6 ǀ*=Ѯh_Oi?P.tY FU=}m-O}ғ*}җROwwj5=[yyՅh6@c|P-hɆ}"s?UPxifw=tՉd.*xiZ/y)~kJwZ2g""7g.83~4|1o3adwؕmX&| l=` ^*j7nwhC^ainu)\z@@c+&!^7Q'tFݚABQ!j*aXiq/<%_۶oڒ 1B4̆rMG( {tӄ+J1DNwp=@*Qdbo~0oG&9+½t5!BJ!oI>JG+xK6 ={iϮ{3ۣX@!P>a^hP-CXt9elAT#$n.+|iaĽ($N {/9t\)j7l-#ӝ=\`Ć DhQU0Rcذê  9(P,1h_y=ƻ_OB*=!aԡ0rTnv\Y2QX{Ϩk r@TfOA՘pdm#qq4J'.+wЭ#=\N]7۶riM~]< XLz0U Oj+c֌+/N> +Nv%/[?U.TyzxW^X!pc$6})I}+!)lHiԺcq\60rtCB@!pmrlFDDJ5orGԪ{[5A1Tm1]N(|9}=CNl b! ?zr_T!йUCSvQ Wn4U6A`vԗFPA;nh?C ;iv q==Ȍwp8g<[2qjpQTȸ1A!!wDGB%mSsl1^!P(n^FYS`6==C|tpf7U.: G6sc8m ]fc?;|z_{馫&jX!fx1Mi._:p{DZ5!M8hd\?|5+]sp!Tv B\{-uXIDȸQI;voHJ6:R /V\iЅڳS #gw]LOÒZ6/@E!c6vEQGsAHF3/ً3-A[;QKq3A+ (B1F9%q46~XIHc|0ߍȸϻTrxU0+m ƾqmoUhv#4IYRUiHFÚaN{3u!lZ|m_fSm/睯aĜ?>KXM$KW_N!PJ&Efxɼc8"ORUn?$(8CpxL% ,Rx)g C.N1J0 Aҁ$5χ:rIcRvm&c32SG\7VOq`|twVFP@sRIl1-/099w%GBO@ڗ>|,/eᕸbukk1by6LVg|,zZ:[X}թfp e·J~ ]ت| @kal #K;M*S {[()<voۘnҒ*XI؃^ZPhߔ HHBGxS2j`ֶ H*$ ,]ܽږ#DX}3yosǪ~Č \Uxﺇ iƘo&=l{D22-X Zh@_>n%M %xNbN jB+4_SQYDE3E<`id>ep`W*3r5+87z1W,Fha-êwԱ3ԤnSmX'DTrhZ?lq= +_hXtMiwE 8YI2sQ団 DiZq@[ؠ Ψ(k]f?B'rH5f|ӣD~BYQQ.hSq{## YyQ vK6^?V欄'GIh!(Ɯ[c=5qilu\H"+.]nU8K @ a)&9Wv$:<U&<H1' F.cHAEjTޘp7}Iy ;5rj%C91ʖ}ci=tRP uKEnݸ6~h)w(ӹ`أ sh8"6b<-֯L k=7wyCbY?Q\nؾsĢpV.} NfA}obG<9T_ v$ӆY9wk1x?Vsv|bRuc6 Ngl5АO>$L0?K.<{QQ~o4X}S3xg$rǛE>ul5^5VPdAja$0Ck6>Tݜuߣ&FC05jӲ-3.];PAz1"^7?e RdގA=# n1"80GҀ<%ÓlX5RYDlX@(%\27=m&+*DqW*5Hſ7%jJw$.y":. Eb~,@bq}b_fIY KH\f|ٻ*A5Zw3H7W-*BPv v[FöPrR TϷ?&ˌu<9yAE2[FCþpٖ;ikb[~<͘˼0y$T5̌1]y8|_cR ǞZ6vWp+Vjlݟ4 c{@}iDƼC@X dAƇcJј\F#Wm4U.B`vԗ\@, ԊTp[sl[p۟$]wfy]/a;u?s Y^}c}f=}2/7L=Z/Kʴ_GaXUF|_cOlj]b4_, A*+?l?b`/]Y} }ߡSMn ̂`6`>)! OπLs/Rㇽۻ9Խ‹F9LcsgH)3iK(w=VEv8=}k;\G>y^Kv7l Z& @~0Ua~dKwhݬ ʄ{.}~3c#\ F,8F'lPZ.1HvN֦&*N6|&Bgm7;*Z2 l;h Ҋ`lFm6?s\TAJ7$PƏ+N=ty%g̍b~Dȅz#v1-TriFjڅqJ!S uD`BxdqSY,fPF5[ ?.lJ>_VlXdYmֳ/k|d6$QPu-rZyMm= ?-v]=3 /0>`?[j=_,hwpȢw+ JLojevTѻu7 돍[.!6\N7UhcD8޻eçWo9"!' JQ{A*(0YiiI){m|nվY>w"I ^ðlwؚX$"^;B! aȩ+7PSq.GY>y'-k1aleסkC2g³׶S9Ld/XTK>?G5g)D" Fch'bu[Xـ " m{NԴV) ǵ*%YO{UQ=*p vb2 }Ēnum^*Ե231"Vd3u_8Gg. aR @р$ǣ⷟>IfCJ6*@UR9~`u}Q~O}eP_Gfs[n&mod64ak{+D!A@1R`W;&+4u(c6wi (б 9ƒ5+2$MAN|P`9Y`6x~oYk'%U6 #p` 8pom1c}e5_ZHIfCQ)Fyi)ʃ7_~}U/69uBܻkaɥE\pEl+|VO=c/ [xq^QƏ| |sS-5=E  w{9FϞc7M{p`< n]o+N iWP:r@`gkQ4ɑ֘Llf UX2ehR7ҁТ{h}M{Ģkً"p!1!'C&1`X :DTQXIh f(_yvBf[ .zi>a9WffbLYs7& P}lkgg[D5|U4oDz6ReC lɘU.\IĆ d6 DHfV%d߇~5祦6}l 85V-_睷sM . jږncUO٢'&b3/~Ym0pu fC;.B"+$yr֍&Rl ^ Ox9r<QuLhX I ;m)B1iǵd1vg]1kw꾁"٪\DիlľA:5{yCibu>F:2G@x炵nvfl$yQVx(މ"b8g; +4bqa<|jlUJaNaܙzź|:7߬E[?`O*nJ=qqY왙I))N9w1.^׏ݥT#d+R\ ,Xթ eZ?صEh㦭|}1O~$%AVVw;-G|ʋ|]lNMIJTO/HNK{w7 I5*ym0V,A8WPX@'1\ (Tnj@L .q/gql˗v}Ou`6"鹄$Ȓ0!h<i@boQGN3AQVz `f@s4,o[&D uKw2=4Hk%5BmIZ3Q,mƘ[8ZL7̆x})}fM [kz|$9nk dFٱh>DAg>vQTvM2h[L 09 ]{A}N30ƱA0!ѻ!^x..|{ybXgM*a7 yR@oU9 6+R(J@VFeh*ek%qgy@|X ƻmBSXuZ>FŶwGSYhSQ]IgK0%xz%TxVDY*m4gjatS!~̚s4pIZugfOfr@,,`Obr R2g*y {DRKz.ia㋺_{a6z8Usvx|,$Qc9zd@ ƞrR2늀l?)R L AN]1QR,H!P"%xO=D d ##d3*m܁7N.R`dȱ Z>©:u@*R(#Me^J4lj7 ;H0['Ґ\[y{{!yy(J$^B*mL1QDl|9#\vaۉ f8CunҕβX^V\@jӂ@#[BV+3tӗvJO;卯%ɣ$il?xX8~"2_;0ǒǒG70J7$!|[ B@'ɞq ' :|H!P*CA10rc6y |}}TNfs*߸qcP2h_9v]d!7<ȍBb=WPA`vԗ\oH?u(z_FM{dӝ:=~ZW3%n|ԙU:ϪC ޖm%vŜ8Ah=mۤ.#_*t"0(a e4fr{褰ɀ)glTc4_BQى /`xWF;!~>t1 /o{ #қ_&/C ;ǧFK3c@4 *>cÖѐȅ'lhKDoE@;L~1 |qDJ*p Ge?ügTz6a\@; ݐ%sS`DޫgGL])GH֨훏v8$J2!9J&=թeCl@ѯs+NMFbHKݱ {+3VzQY;6x}UDdW7tļ1YAiƇGcrLL'+qpJ7& q|Hh}ćm4kvDPk#& jVc=ߨe{^=Į4q(YY`6]&ʖpO?HJJC}=b#usYɖ`#7~fmCbe[ ɑC\4=AvO|*`k ł&GvvDL&f/?̥ygXjߜ Yթ״&ъlR6~#6x\7Fc&n<׸/9c:y_^bbt3$!q4rDcl48HB$`ڟ7{[M__>[C^yt( z3,RRbj(x,)+psA6t{ /[1yEY[=jBgqQ5~?E.$bu.*BGM;JQWQﻬ7+,,}`ePcpe~ #0!,?Rό|h#`d1cX{ӭMfz}ՇH7 8eI~w:3 xDxHN\Ǩ@uA!P+*};~Yv 288E( ޓ 1*yJvkBjw<nj) QA 0׎GGݾ\5=OKNc6XKFPԱB@!P( g!PQ%0X M8G8U]*xfل{`cx+$G읿_sh{ ua6RrB@!P( @I l@&/?[pl|~Gq`ۦz6vM9GI Җ р{[x:e[);֮qϦha΀d6d/9xLU B@!P(\^ը0Ѹ jڷe{v1h6ڀUQN?F~|9ޟ?y1DW҃T= B@!P\E`+bGAޫ~A5[1Ъ оuBRjk۶{`np:/}*HRP( B@! رvrlܰ" ~p^ǍgQ OE771ms?8Nھ-p]7TSR( B@!PR oWgۦ(-Y"~rS%Z(ECG B@!PC0V0 e]xY:uYqEF{:r讹}}@mHS&RFS+ B@!(7`އ%TCC> ,O \Dž,3E8k/fz2k>Q'M͚=Ȩ8eEP=P( BE9uhXO0>^:e(mxT`.oK @z5=IچltJ]Z7*k=D_Bf*K/D{-C }ujՀ,tYfZ๊pq8> 򓦗m.SB@!P( Ur/ 0R!Ԩ=}>/q4 l{|¥|(cno/AŲP-]Swζf45W\h;fr[i.WHU B@!P(?^@- kap3=<l@b~ө%ڵ5(?=vwo~aߵbZw$jUG˒s43'['F ȹIݢt|<8'5"8{1~\NMN{mc-/Oڴ=0kvURq"T ֍sT$Cr46~XIHc|<4K4\BO;|>7Id>f5JHLuS`p[&3<:4~]r|%ZR,Rzi¨[z:ݤ9;i;t6=nJƅEX-I#8j$,< NUOB@!P( l "Oj(:OC(.2 jL>IDe6SBR /,px{ćS]Vyq&.;JLVA;6իPfNj:h@cMV_Ly?VAUs2SmyGzFECkg)f<@ɩ`.,ҁjW@?ز!M3:O.F]`2?s/3;uĵq#}a7OG@o q$Aq9\iKl5-t=\|b ku8>?wgq5tߦrT( B"I`Jf)oO?oJJM{/Qs]IMWy7sR3ؙ ߟ|GKp1)U0%>x[B>҅jsKbrӔiF`iƯ^cW2Ub>#pT̶VR( @ dRN(径Y\;ypU?jh,u`u%Ie<[ʖ}E͌pp}60~KO] wbrRO+ B@!(9v{9HFغgIq0.lϞ҃)(ѐao@ؔ@ZŶneWZP傔q:TT=yԹUTS止Gcu]v?+*v0C%iޣhUQLsMb a?Ѕ֫)Y0=9LHkW7.ڝtߠ).\|%(3>h]:rZH^jZի0uv#_"N2:_K-"n)faPVX6|t=j#GHZvWZV̚<|vdmvV B@!H22/'&󦴌,EId ^ ī곍$_/fBt4䒍Tݜ#9p6æM"yv4eaЌr[JR'na$Xj* Jf^ed iCq1RM0p;۲a|x?ǫt=LmR7/TKo9@%t`xE>a?oo/O bFSVs7=+= r969L얝ωVNHP+:Y_᥅Fc4 TqBXĸl]{o4ᅁ:9aȄ"?#-Jϰ*VtDx z5X4gEI>k0uzG5Mpȩ#ݓnwq8P[ßn1Ƈ9Ҫ[ҿkP0lkHVE:, #^ ϱh.Oa5Oܞ3 00Ü6K4.;MfϿOJBnt'Y3'ݝrؾw_6o\bfNиnWI3S\ xƔ=m@vk.cqS"#Owixf+R( #%6~P{ޜdԢzzQ#V^yoRPZ)JrGCTjFšZUa A 9?OfrҋLӡy=ڰp_Sl m!d:{f.l=tB JowVZ#Zխ=΃'MP *dg{eQa޴~M$fIvoyXFb20b)x)@<fszkUǧF>͌F3%_x{x$^;%G*"kS 5i# ^e<=[t9N˟ (4!|zlŒŲOMȓ1hwf4қYRG4jmt2{}žÏ] f4ښi6_Ӝf)z03\Wݼ|8T?ZM_Wl5f9l^!P(* (yL_bھ5;;ic!yPw`D[M~<>o:ywŪ?xR^</ڟ>w{{qRxw4[ V#ȟkvz_h"zlҒ{鵙-fV}D۹UC$ԫL`l:6(b¾A`z?5zldf/{޺zyǺ`A_/9i;qQkeΠ$/Si]sޅǏLXPbD3sDL~Ni~<_ޓA{ըRg͔g5mƬWOÙIaSO igaq[ِJ9{KCE1y+80uڜl榍F*q-,CfV<^>ҁHY[OL}5çSD>q\XYfd>c&xgV/"r%Igrwo23| $?%2Ҫk8]5O'ONDRRDg_ZҏT+sxR4 Br"`յܚ4 ܰd}p =hmx!Tw\Dѷ${:N![|eju$V=~/> Nh9*yӣwJgk,XMG\&'|{X56#x6t0_ifGzwyKWO@2rȩyV~YvvV2o;ҍ:fd N%5W^|.|ګB fk8ֳoLZ2srXu*g)H p۱$#j| qaխqMϥ8hIR[YJ B@!3ğ0I73/Xa~\ L%%Ly0FFxc<# ΖѐK`wx*)–0Q|yþE2{PQ|Ѿhg=;!'d0!J+F&,líFv=O%5gBw3UMtUƱw$`bXw^}졻  UX4+H̿^@XVx}5ߛ(~|WI̐/lpXruvWYӼXL#-zjڕs(F|yjP( b"  ldX{ӭM` }s?AQ%+7}.$M>KIIœB2N!Ä)L!n/f5ټ>N]HbFEl H4Csvakf˥ݜ~AZgfܪ{ӈ;<rZR[,슖)WpjI=JMl_rswr-޻l1Ӳfv-I2LfU_90jC9K5^`fmͯs0y_aeg,V=+&[ן({gJ&<=>%2On/P()ِTiRx>}ڼbW.%+-Q8^h7 )[W.Ì%!%8{Zxl.%XMg5^xapt}ZZχ M6f b\n{fgXE&z7C?ht.y.|FU-PϜ LjժOYOm8|zQ^S{_E3 @E 6tU] * I7 Ec)*HoC{3s&dRHyw?s{w3{O5Hpx*ϫBs^?ɾqBTԉʔGO*ވߡ,Q]}9rK }`nZm2 6Vx{;r|v:5^6c-v9޵bfee2 uc Gk4)G|a*31:R0` y`Dnib"f"ph"lȹrE]{jӮsNABBBlVC5,\ܜ̬#{ٿcXbK>ĊK 6 ACDАuAҸ-8"x/Zdu(oUz@@@j ÔJ|,e*‡D rO&r/}8s]yBz5g  LN6q| >׵|!p@`>1q5SB4FCj ' (yɰw*v†b]R]j*|Z.!'Ȼp[lA@@j^}Bޙ6djlaKɗ!,Q4VìÈ.,|ZBPnP\Br%!˝>IFL:Gkp@@@eУR! !/=rEQp\ .'Ţ<K!@m!P|pGi-ăԆdqr A\MN2-љnFk3+9<*nt3!>:*yaC:'p?Yx>A4sP,"YV6tsQ㣇uj˞[k}{tP؊qxMyW2shaAmDj$9ο?ͱ r4aC/p>%\۲g ؇H[V{%F'K   PU2&\2AK/ZZ7 کmm_'Ib:ɤiMXq\j!p|pyio#j͓j7;)Zw$]ԥoI= e 4y%Q%v+{6e5!z?OcwR쒌P:]UuC(a11Ss[9 -#!uᜅ46Gd#DQF}9H>?"aYP65vn*U-'+_zIPB@@@@F} -MxVh4JUKD1OI>g˖RHG\?۪͍*@U,<2{|SYsрQ"-}VEz<* NyFIyK}@I    "cJ,2ͧ)NR0IY?0P ;GyJ~Uޓk!\t @@@ US%_1 d{0Y޶|Hdyf<!/B?Z"[   u*_ NhԴͲaQA}PV30my3Yu'JDeKA[=A}VnTQeH]@@@j0>lƏcfk}^hpꭴt.J&B4Gn6#ܰRkyқ/>Lv[妫ϧVMCUe>[;ˢatǠnvӮC'誎 Acі#B.GEqk@@@@j*d VY^4VNloC:,rz'?RS)]QϿW^А6b3`SyQZ=Ԡ^;v ߎI4ooV6 YΜ?߂O]prPHW#Q)%& Bә`{]mOvkM}lܠ7}4o7= m"5oR-{p^3R׊,4N_(HfLC#PjF5 rN:3߼'?;{Vj&O/\sUzX8a4ԢIթ 5]ѨQVMƏ^ې)+ iF2 +lՔnbȭ=#+IquhݔrGY𐐚Mo{S ~p ] mԻK[j G2>'vZiGA?O'…>{#$V*j(    p H4L,% ճ Q*LI?(%# I=;M}ޣ' 'Y9ttu5:hDHϚJևRט5 6.4bA{l{x8egGG4jL,hȞREi:Ϻ)%q &Rv~u!͔.\FXv۞#sog4c!$XDeA^= aC 2Qy)*K !.fB -IQZL% &<72Iaᓺ + ex:rķ]etkߒ^%۩^p гX<ᨍmyS;E"&[Fض/~\NO$v鷻whAK7h=$7gc y;pe;GAب@@@|L,%&d3/Ջ?7 ֿ8H\y}B* !f9XpHct9Dmtt_IIQ$E.D@wh]{q54x1pSUO0yuC 1#5Yi h׿#'nb͇YP'VCj6>E    P dҘPGZ̗dxv(tofJ{w*x ZNI5lԙM䋿9thT7Z1}18gFԹT^6Iw;!k!vh$<_J<}^0s.=1m-Xҗ_x9䴌"Eʦb ّ\|Wf~7^Eݖd*E*E(=otԾ <6Z%ۃb    ' ǟcz yXy$oNaeu;uA@Yqظ0]Ѳ 忳у5Ig.GAugjq^ūXIWme!⬾8Z7oܐoޣ/#Z}?;doWʭ Ձ3t]tkBb7n}:ΦZ׳Թj>MJD;1zuYĉw֯BΈpp66j= F6ҵ9"ejo|̗EEJW    B 2:vev\/ḿeG=u7˸ oީ Y-Q %O`lwcҷF_׸' k $],^*W*)H}%%y3Y!m2[$bzUɿʆFز(}K4v^k\|5'l( c%O^d!m}poL W!j=G.kb!rlFeɡ(USf-dBg#k#%fMw0O%oYzRg4AbTNc.R\*pF5iW!lԄQBA@@@"C"ML^BY޼I-a>,7:׶F \N\/gٷ^ߓ$֕PhDVWz~\l\x Ƙ׹U h-ĭYu0 Wh=bN3kT~MIERG^b:    +.Vdmi쯱i/q;WUvHݼI#Z>12<]>xF= yU5Nҟ4+j+ )@Ǣ{WJ)+[W¦SwXS,Mt-뫡 Wq tl m!cuz]_7> 2 o&L Uk͛][_nVB73BUv\zj%[#qS~[7kҤ ((_㍆Y̻TM{eA=%{#L܍Ke]g|DodvoQCèǕRyV?qm+ՎI4W*MؐUdIOQ-^4UCyyj1'- t/Cm{|2?x@1{vCލ{]HvՖp~|[ac{4 5ZpՕz,Z_kzM;Gn^G#f z7S5]ѿy=V07C Cxzrv nxfH4v2ڮEa<[$S~kH̟$fdSZllMkJtM'P}|^!Ӎulӌ7cSxu禾WwnClkm'?4[UX(}p~;dӬ$dV9X;]sse릴!#0r}[Ázڲ֨;o,QVߌ:/1ot*ovF7wƧkw׆Oր8@@@kx YTҮ,2k5t-k"=3m"5bA5bIOؼoNsizIߓ{nhل&F&$luX׮XDUc dƏ{(oTuEuOC}֌XjKrdx͡R;e?גv:E(:#.9{SeԌbYS!np2_@?:, hh?&s].7JP y&&WjŢRG=xꂾ hR\ "iб5!A; Stz$'h.]e:5v(FE+23}+MrzZ#; ͉pV]P/ǿ@nm 1Lcf&ރ@ jJt~OK6tFnv΅䔌j6ZĮIvdl?p\pGT"0LrrlBh*g. Q"m8K'zl$ṇST͵$û9q%N֨ԣf}9Z]'Ox&SY}+ ɩ37Wlw UZEȗ2PiK,L-Jԏb I/֊   'KtIDATϫĢA&R8df>-V746ݯt9efa[V}P"~.,j Q͊ܗ_q͘~^rr]Zdn[ݗB|EOR3t(=1rr6wEЪ)nNٔIe~=>33WngrZ-YΧZv_YGUees2{QꋕIFcJW龂C⊵ '`QT}@ '`L&JE?\ѢyM_MA6k+>Eթ/WKNσ8?0?B[^+YVį}M3Ĕi,Ul՞}/6dD7>a5E^DLzD4; YhǁD 9}I$b"&>"#"\ӥhUzJ 2޼QxzFQK.Z|$KwiSy@Vl5xW\ /0E   P B[)lT / /%3nݯ­ ^t)%`ealr%]Jn%*UR`h¨;t>!R/=qe÷]/#6<q*eH. K"f=,ɑDy;Fi}3ziq2'4zDzXyAӜ ~U>t_lLD湆` @@@d.Q&`ú߻xDeѓ|Y} yϧ߅!h{P+<{ OB8$a%Lhp%5U9ede>=>;aE4eIb(z+Eq# 2"!rѱˤ9r9cGY6cӠ~ZhR'ȄG|&RwZPBjrdڇ'ϥ7LKKư,Q 9B@A&eR1'33-}eX F'>Dݾ%"xoҙdںn,4ҹfy] z7>6~[tD"HH|p'ǎ4lDVB ' B۸rx/V,xѧpe :DV_69"~"lϮ!pa;{gbM,ptٺ2"N5huϑth~G"hHL(S/FlȮ}mqC:Xi#57r`eBa2v95jyC,Ԥ4]ͻ}V3_$ hl}TlۊsW/gnaj)Q/ڥWMw޾{%r'}6.e;[ N 2ckJY&],w:I@Led5*1ѵG}-kVn}z5W./~!ZJ. v:5^6쓅xҎٽt͢gfah4̂ ̧~޶,iHumU>)leUB@*4֑/V>~zl鼎VRޜ7a7G}yN\toW:҃7)/SYe l81 yEM7;gqGp2|UW_Ѯs׮!۬6NGA.+''++%+=pYlo&ľ(6,W;ve>4I$g3g;SڡB-6?-6$c~.wn\?7W7x.1q<6p;rmkFvb@E!>)cӣk@@HpR_? 3ϒ2^ p2ILPuEؐ)l@`(>d$QF?/c,E08K>,-! Xmʍ?^*P@u=O ]hX%V..-)g6\CҴȘQf /7<&`݂6;ׅYЈz>`糮,Tz£y2ɱ,+6~N08g>ogmsO2%S11 bA-vh-)#X{nbȿ)dTU͆:X)*X^|0XW!1^iF}6ͭz]t9sY㜿G1'vDt|0Ovy ++Y2(7:&9Ƶۦ미=19Yݜ)SK}׏(SCr:ݢ867.J/v='G H"k:b(^{L6{fUy}lv(ǜVyM4gV3{9u__ۧ-y{lHR$aLX;q7Ƶc3PcI?m#bzaCi+i;^ohZ;Y,îd!hHQCMUB@@P1Svڭ#Z&E !\8{K8W9/b :&f x[А}R(1N.)HkA~/`MSU*FA"_BJ/h@ frWBE,P kC&4Kp2햳NӼ%([TXRz&Gak#'xxYS2$T)4yL+E9H4.dԜC.?}gN9o5kq )R#@$;ygڒ)Zn6 pJ{c16Ș)e1m.HcqŹ f8<Z[؞l9s֨<X;6kn|@ ߣ2{|=̜fsHuFދD.#_50}z@vC:KMucJaMnq]_1M߉}S#Rj8>X 7@@k,(l5TF/pkmXb-|sn>*2]I<'GebWF'W5ZkV&L+[*-.;&\tbhxܳ,% uw]/+Ŏ^z)H/rT]kx>?;fV(R 46R),oz!eFb$n]mx5'>"'C=J?rR򅬻xyڑclԴ'ͭ6ۈDƳ<,l`cS>܆I22Q+NpժgIj-UtՈa;ƆYo8t?>3gUXykm)߲]bw    3xDiP-hIbE"ۼ81 zXml4LIhB cN 5 o\||3/IvRyG˕բJ㍆EI*&K&LnaPV=R]XVS0^}qy{s8 oᙎk(qɥ+,'9"T@u0_*7bY[ †[S'̍^zUUz\]FV)kzu=׫)h^ 6xR|Z#ڗQ QHrNyQ29ysY]+XxY.oEQD@@@4N7/@n wz+Cbxs)ީ@|Ra/z%Õ=nyd O^y@@@@@$>)lԽ\p@IFZ RY%@@@@@@@@@@@@@@@@@@@@@@@r|r5*Ϯ~96E۬:bO@ײ^jJ VŲ`VDBuu@@@@@@6LJIi=y9׌LDKO9^mrǛ3Ҍ)ɡ폾:qbVGL뭩uß|4͙>cGl㡕}D6}UG|/ͥ{3+[a-(Rߺ?-9b,^ .T7"*;}IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/network1-services.svg0000644000175000017500000014032300000000000026441 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:56:09 +0000Canvas 1Layer 1 Controller NodeSQL DatabaseServiceBlock Storage Nodes Object Storage NodesNetworking Option 1: Provider NetworksService LayoutCore componentOptional componentMessage QueueIdentityImage ServiceComputeManagementNetworkingManagementBlock StorageManagementNetwork Time ServiceOrchestrationTelemetryManagementObject StorageProxy ServiceNetworkingDHCP Agent Compute NodesKVM HypervisorComputeNetworkingLinux Bridge AgentTelemetryAgentTelemetryAgent(s)NetworkingML2 Plug-inObject StorageAccount ServiceObject StorageContainer ServiceObject StorageObject ServiceBlock StorageVolume ServiceTelemetryAgentiSCSI TargetServiceNetworkingLinux Bridge AgentLinux NetworkUtilitiesLinux NetworkUtilitiesShared File SystemServiceShared File SystemManagementNoSQL DatabaseServiceNetworkingMetadata AgentDatabaseManagement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/network2-services.graffle0000644000175000017500000000770100000000000027253 0ustar00coreycorey00000000000000[s6_Ԧ MǷ6n|K$یfv` PJRqLRrk9|`I p@\~:N(95sC^Oߝ!rm_{'gOR|css{0ٞqzx:3 4ܼ4U&ezQ?L6Ohۀf'TW:>;fv0=$[(|?JyIűʾ̪nĹe#7=RPkڨ]o>͐\֞^^$)mNlRzi|Әp\΋!fSܚK1K Չ.ohjjnieU.cEi:wEkpk+zrsq㷪=TfȠ$ۈULIoBqڥmP~? z "/RY'|#on.ؑldqa^v!{݄F2LvBA֝5%gNڒ(\$BPpj[JVcC ;骲:{DH¦wdBg4ܑwd#h %D6"F=e6G7\%gHGneĦp KZ8& pD8>vqw>c3J$9*dR SWE,"GE^^$ Z1,jQJ,A%0vL9qťtldj2R"#H$_c^Fڻ}DJ…$`',8 ]۲!\QeED!v' ;|s9;a _Fk%x';=m&2EfK(ѻg"zeCNUGqW}j\d`\GlqN w;o3t,2fp$j#tH:Q23Je2&- $̲%r5i! Hn'OF UOg{<ФLvs8XfcH<$;9C{iAEM[XS ̇[M^l̑C4{F@[UE㎨C! uֽi^ w݊ߑ!ta56n9/W ضq);:O'52K'#TUtᄒ iO+-If`"V'G<8/)3}`/rEw^jύfGwSufnl?K7غnx [\Ë(m̨>};N0rf.pQ QEfqf"tw6 Jb"36SHK,AE3X?NdGC.+{(Pkr[ sɳ~n7x#Kptcq˨rbRpGZV64eTU4hT6dwj fԵAu2[#pl.,fCUg,C'uNŪx{at;H$d1=lʡ L`$w\*l 9vЗ[7(禰'#򉝞ixz&p_U:oRD-;*SSik2.&3(;R )BJJ-Еsud!8Or'W#Mqr2Ln99dKdI1Qh<s[HFXffΘc±m m<4Pܱ5ftC| >ӵ"df 6t{W qnʯߙ; m e0j<̔eߩhL~Q}W 2bͦa$Ar!_}~WC\Cpf4bI~'yk_w 5 2 1 2@IDATx|Eg])ҋ_a{m}m* E@r\](bAJ}_}}E}l;v RTZM6l.. IxN<3 `L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 4+Yņ#@\g*yyA(&.l @N>/_%a~GpA#,߷ zȑf4lZ qzejE9Yo8s<@&дtuz^? ǚp]1(zt}n cNCj=K3C+euf/~sU3JJV>]ZǴ2tWƭq˲‡Xlڪp.+{7]K59H4%X'=7mhn2 <%yzvpx/)(V|{#v?oJRMRN=`7]`ieҩn 8mJujnE)HowenmuK&A7OP[.<"G`w*KݯX uu7UҍB=dՇſosOhXUR8*2=%.[gKJO[//W8QI?kMDqKOX.5=3^4?|Ϥ6O %qHy8H =xĘ@r~By{ߖib/}r=8޿dOp\Mc7ׅB=b1vo](Q?cLe`aeWkT 4eh:4zٔ6MT&zo goqEFM%l)U)KRs4)&8_l*`FD@({L gV4?ZG[X-,A)R;] 70\rЇ顿DKcf;o]y#)ŭBjYoKhaYeŌ''fC Sϟ ٳTz](u*+/x4~+~j;L*GIuO}uljcL :7уaS&PMt5 ~tSoRVlNPP"2h|  ThEl tUV$fN"G#jsuiAėwzi9,[H!pPyACٹ (4EzOg]LA;,B aG@iyd݇1Q{jfAذr6Eb}w9sgr/ f7U<8&;9wuڕ| Чh-4޹)ѡAHu~vh!8>()'x<'7'F"/'A#?>:Aɟ^j P0uORo&$={iL7m~PAÎr^(m4 {FvuL% z@\MPm~9K}yS{ "ت^ÔQmz}kh~Q]s.V7 M@J͜!ױD g mR-7ͳgw1dBLwT4l s` |=HGPyZ&j:'Tc2]O&G#QB 䜊3GaڋӱM0s蜟);3u6顡a-A`UC%0QՈ>%2"EzXĝy0$]  A@:;ѐH] ~=HňoyG8%<ݩ͕`sS\(|AfG[Ii$!Nimr ~ ]nty [;-{{>vcM\r@}]?yܷiZx'Z9Wce[: 3 3(CP܏Ԙ5EP'*(zqߘw>x/|Nxt䒇cɼ6b$ĩc9 /F=`8"j|9XLz^xm2e_rSF쳥*~GY[B'&FZ áHgwcϓ(o=>0{2"V9iڷG7gTWСLVR+,+Q(pz!irP*ڦ T"OwxJ,1CG8Ѓ2y^_5*9 @и{yw=uA;j:(xPnQ6ūl>>3=>! ڭ㌲[/BtlO41ԩQFbXռ>qB=P_r8vyT2Ƀh={:|)B7r܏d:UJiʾ(6%%*,e]Knx $?C o.1١Kvs>eqy[יf X ;Ey sު~gJA4Ss#}…fFX!hM6/_(|6ol(vvbf48!5N[~y4A[/"Mjk6y]~zGL9PeX΍s!?<);xG՟hKWeY({3QΩqkUڋ]SotRNhSl1AHkbmL4q;Fț IvtE@#EF޶W7VU( |)gt!efex { QJNW~Mt}:iX:)zJd^3+4xpv.1-K \ʎo뾗8iLj 1vϨxQTcxJ,1m kFPt0N\}Fh>㝋{wfi0!_RQ/dyy-ڏyӆR۶pMw>3d6̀o&Ii `!;Q"v"(͢ cnA좣LP PC_6~Dz<.qѯ G7h9⌆=F\N@JRvXNf4:f)ݓvo;#=/}.PjR1Q'l}W)<`J0c:\4ȍ! 8Ρ ǼTϺ q fE*د4F칟)n.cLvn+=nASlk̊d;F"D93ڤGSAہ=hȰ㹷L450X(S}>2wZȇQhrI@6FPg=KzAԯߌɴ)t ;:'Xz񇺉 c B!-AGx>/WAAL$:"~5=+2׾\u:8>K?;j#"UH-&wזe |u[-[((+L6TG{;.B9(L2.g4Tu>@JĻo}YlPAoI_t}=BUoes dF ,hTS |B  Bw bA 'd\}t<\G>JF0NA٢Ⰴv]a5%,trSe#uۡci24yzJ)ﱅTu]xt w縌.}D*Z.eF2LՔzu{gI4 ^OŎE2ŞZȟ\vDjkPbkmJ<*]E'jgj7Ї uspw4;%"Q^6*owH|cZV<'vo'H| C/'b0 GLLv uhv0çsE:/3p?ޞiuM+҇q_蕾C <-F>" б,]Av+ jQ%dٖ+"VB<9qґz\$^ k!mG}:.o$hZiœV!S&(ԅ4`644Ow?6{}x;9]%ۂ UI4F`@WDdSn."Ou:bߨBٯT#v·C{sJg|Zh4Pǎ59*Dž>l_]3>v-ԟZJ^+t4NSl )r;FsiƱ|Q"dpQ0pXSXkٵ߄(ByHxbW޺ F7mEmv""w,Yq=ôfCW*EjTs[?:45f<`? e[<-vM< 銧N:$dK#᝹\vcۋST~gV#An-%_Fډ[BۍWi" ~;z4|J0ɰxeLCG"[Bcfu>pfHf2|Pl)qK]Y^Xs0 ]"g{AOC߃>ԩb+g1؅E?;x0)^ǐBcÖP6Eȴw?O Cw-4yY;UVa⅐Lǚ nN+>3F{DFnGw oV݋ѨO{:Ǟ؋z9;>Jho9*\8\eLy0!a {W.ubOhg>uZ Z8Gۏ(M_#͢cЎ/Η{4{u gmŞc_ޗNhQ>3<#9dYwcγi0e͆>vy.X%N[ yPTu&$QQqNإ;])6sc⠶TfFBM:آ;kyτ>hpE=c}0[DI3ݩG|SnS/%*)E-m .Al#;ֶm-?lgK"կ;3* rjD_]X o!*rZy$zv@=əyEB,dE ~)!ΣMێ:hij+aEzjU9 0F6 $-E~T/ IBTeAx;~0th|ޘ{M:Fi.b؍Ե IM />~0*ߋ{PߡrQwryY=Y{vm6 FyOl9:cbp01.-ўD7osP:EPQTW _l+N X:SH&po;|tݺ&+.jwي)<,a0knBvpޤz.IM aB/IFtxooĈC7Ez!8ꌗm. l=pxLDT߻fX+}/_Ng+N&[R֓HS=zLu^_SnkJvđB-~5g6VDFqS-k)0f4e5gGwWCx;C0gV5Wef?ԱYߢX]vY9q"AgTC'Ҍ |ܱWQ9"MNXt7iep(Fˊ]o:1c^1d1gWj)ɶʦRlsA"&w9>л7H\R߇iڑwxn̎;cI&Fh𡥊F-&/n(:=ZLi& #ܳory4aTڧQ#dܙSB0!rrFHH F3 ܣ]Nʍp.| > tgnfxJB#o$nm9rgկH/ծa|AǬu O2Ոeıyn< o?ؾ~ IksHrd U^ )uq<_+wbgD8"j12QZjA sJ>PaOSE|*6t(=<ߗdיn t{U ~O@Hg o %)آMA0t/+\&T>H%KUc-E>͒hJуT^(Mcǧ$8g4tͤi븻-!up`f{(e UyN33@ϯs!2z+E.GQ/i*{wTx!=ì<=ksA\^b>mdFdivvEQ.?7j`uP`A(3Ǽ." FŐ@7Yw6mλ;#"M hw=x-Ϝ놖Xㅺu"u_- N ОB^ŧg_q?]wf<"b#@ȞPfo{L7aYv] $7ApLa<6Ө.)㫸Ӹ~Ol7*0\S&qNڋz AaTReJ C\ڡ*U)tN-޵gYNo^d,?MKF4nC e:FoBzox܏EWWl7|5h2}h(?h) 5&\OxjgeV_ -[@F>į 9aeb(XflpE0E )_}&Q*=EgSjw-eڏh?JuDR{-|vE;< }v [ޅY5~+}=ٹE|R񝌅u r@,?>*iqrxǙ(xmsܭUa"%M G!oPVhfRY 7)֍.e1lwP>F(#nH4R H|-ζ`PE)lG}Zp,Zw~8[=3io^δ17gȁ!e%X+ #/^w zԣgP U[DuWmi!+, moBcKϞK<~NBWOzĝL xf#tF`&\w@k{djp;:=HrG_gm- Mt&FD~=:;*zSvCX^>!yy ^ _|v4|" F̉g_ {|Y}c7Ty\jmA` *Uï1BSW3!(R>Srh] 9(y6KviCqW•p:{#zw{+g|W:+MvpPwc`ʾ k+R}ٗ0^G?w!B0g 9={D^gGb GuRV^oBc8ޕ'#xW[iCQ/nH\='4H=s5TFj@y18|?CgKM&w]!ZGRVb}Q*P1md%e*M.RC_-y(jx> ImԱv^[j~0h7=^yZ$Z:، $>@ @*o~:in8ړK6cyLOmWŪbPZ$eS %l̂!38=m-wS]ܨ^sbw v뺞ۭT<]VE hX;A8":2{Ǭ6dNA(v|C.uY9CǦ:.FE7Jw5w)1C[FIt5MmU1R:_/״4*oZuGXb6JS>Yk,"ox:R.5MƼuYsyvnֽ ְsm_qS<.`L 0D`a#Q$&v{9-Q66?|GԿ|͙:u[SA׽E᷷ S"@p* }bS0t]i.d~3&vHN#`L n=%_F81֤v pѦ) Jb(ML`/4?Ogii41&)㰘`m B]ge:yɮJsAN` %;6{桦eђ3@!F `-SG}[_:483g@s srMm5(b:쟌ǡu'/[f_OGQ09re [sR>JߤО;~?~=tJс> ~>{5MNH}4# a-س>k`:4AY؉3~$ Jb5U{)GHFch,xB/Iw% J J[ án!hv@u;<W6'SzV">QRݍ͘`L~ tNu&Fw8SӦmq粍[=|QNguss.vۇVXm0m|D #|]g;BJGOPUHP~SiɱMe>='/>sAN֛cC=&zSO];` Ft.lI k3s:A4+f@>s #)K8FzWݻ/G;S,⺷/)l. 93&@cTMY7v` #~3:A6I&K1,l*Unj$?ӳfEzc6Ls>QʞRKfoq[eq)Tj ~X@:AV %G|ψLiCZ})"N *3"l}ŵ\Gr$6>`LjT2&v[PrTR4ɻR9$lNjm3&+`ʴC3SWёs/jmYt#0RC*[QsҔam(::ڏH 8b6EC Ҳ/:G w^"BձV 0&!:&Hq3gvebOk.>!r*(E,[V+9} ,r @pa{_jc27YsY03҃g0icrޭ.}|c8C#Nv~s3h`L 0&hFJnEY-R"|Δ$ʛom Å 3,JGxԈlOV/`L h=v@Y޾9e5i{̷UJQ \$d8D7"sP t'% Y$+IH䜁 RZ? 0&Zq3gvJ(., XԍfS^s̕]\v3NS[ ufJ&0YsYrԞ('YqJ,qKLO ȳL 0& d'BcRzč9;SkNѼҜnzve|Z%L 0&#`9h{6> ~^fʲ6&C3z~6Zؠ/; gL 0& js6(^"i~~5U'RKmLרBO_5*1`L AnsI8%*{"Te00!bNr,`@]|i|zUJ^DQc#`L C=5^n5IUrOoi9QMB /'pPw4Zw5:  v`L >h6Inw IʮTiUD5 DF MZd!pܹm/nvEwLj*I|k2j% (C֌Θ,k6}mt1D ]կ,3+kC: )}C)3' !9{t^λn*n?.v,lΊm&@z(SZ'mnbvgA$]mFUԡRZ˪fV0ӃYzYs r7D !Ye#7^3aBP?`?vgJKb){w?XX_T13, -]_\*5)={%=&䌶οCKD9c/oSʗZcښCPFyzqYoSJ%,둌@hjs_,q1=z"=_"=/n?<1VE}& ♍qcW 0S6ly^ ;^o umRDO}R#)YK`$Dx!'zE L k*r) 9{(xml5;rѹ$\< YZҔt|<7Ϟabt/KC=EKSC9tJ[g>X.OVXJaYC,l4I HIxSӴ eݼ 1=E)9$ܥ-;_(qBhF*(?RhYF8DN=Ò"^5D<AQ=qsE=+4y#=CZ\#Jx9ARnC@e EpbM _3&PmIj[6Nå]ԧwcĠ )>D,~g!0!Xu:SVN}e%[x.?n3il[a]Aw`Oz)0Dg7cL:Wb]S͜eux 9R}oBt-<$h `L}Z1,6,͚>R퇔C8Gy3͒7 Y)Զ3g,iK~r:'j?s+x \hU[9: K|h8* e}F rۧ~0pu@hߺeGMQ%B07/$4Q*:H FN#eHP\* =K'rX!R:!L={|_L] :rRF`Σǭ;- "vn7JfEpǓ V @/GǁPOϹ?K>3&&C.cRoN;*CPIr~Ơd͆&:&Q80t7h ݣ]a]!S!N~[MB#=]K 2a rE;oskHwa4' XoBc١Qo;τdL.%;蜞{ i5ه&ke=!XL NRm/I\V.$cni/Ipnێ7o_ouj7"Nno]SŢ~h7آ B YB؆&2nk_ JiFX.vlI'Bkի9ּUнܰV|@ԡ :R;]>5+N.6G\^h'6*P{uc9GQ>߫0 S+1q76xbO9#PoK EM"MԱXA뇄wo~QNȞyi Mh&4*DsGf /,̴U~g2 Z1 WCvIxyF'`$Ϫ[BYga" x/B5zd,[,jTi86 ݾQ"gHVR٣#mcΦqr{q]gB+KP;Vۇ;f92nyLgO|C+V]9{6qO:SEī"*W MfLǫ~0ID~Nǝ恵5Mk7)VFE{N6R#\d(asSVk ^Jh7sӳvWZun٣&3p8ZT]Eg[:n؂.t#3 Oӧn}*W $&i{AZ߁%ff|E \_Bi+o6 ,OS!hC;x^N䶧J~GqnQ=CQzޭMFhP|RhUQBwDQm 8-'l>:\2 O];bMN.Y31ݘǛN *1'@Svw`WՉ @k#赺Hɞ[6Cj](;hVMdQ] Q3LQ!8hw"g*ztpLb}Ld0:a~(5xڎvYfUѢW.ϡshvk3#c>綰U6mS(exSqa OT!LKVqѡ+,J'ATށHGjdupҘsCcbL >ޮ!ת06Λx)ƓNԐUSQ+.HkU|Z˥$yKoN;nuh/~Ǘ{Z7>>ʌ:v+֕cN]cWڹw\Ǹ`$!8G#Ws< mrTw>oN}wx@To(w>fLvR5 cά2'QCy[uxҳ3WHkTvln\Fmx>n* r$hjG3/*lTq'w6OC.Z< YO4Qy!Qub>華V1A G;6l=uʔOc++;H@Jm<4OA:cCP[RI_ȑ#M/?W ju|#N{On gd^Bkt!B?e݆v7 y*"}+:T)gRf  ډaϑGᲡ|?EΙTW<&>zf 2QYKEM q(֝oUt]Wy'=N%Oq q v)ֱF_L? !OxgzSs/X5Ǵ6%WavU}C`gWk 睎'|݊r6؃ lFupgͩ۾s=^[.eB_Xߴ!fsc 5EkюK%}ҼЧʢX_Nm~k# Ozj=q쾽ԵտU3uWZ b"iPL i8>;#/'p;Dz((WazjTb{MB}jȀ+#{_ٙ+q|~7.ɴO:mjiM&G $@Ӊsv ݁$|<ߎ׺U=[VD\uvm)}DP5~4Eo66_aدo[%kJte!jvANZ_S?])`?*(/D2H)ʅ-ݻgѦ͝ӳC>]G 6L(102cz޼ =c 4QdmL֭gj'}-ڤz60XL|P|{}z䫧W7K4xGBhWz ڼD(k@=ԟGy=hCؿX& > `}_,_Zf}v-c f=Twi#hJhATӹʲOMTV9#s>x)ZMđA|nኳĵ.>hGjs @yFyGyHyWN=4kn#ZC|XIBk5eӹCZ:GxC.7i)Yl9W7;gN=5E XFR4t6AӺtރ\ޱ~wo쾌ɨ a?u*F^Ejph! {!4pAG$hm'?:5DX 2 ;t(1 qRWŕ1CgCxe*џ h|r}iŭ-bMOS r(ilT[89{a^kRc=aRxΣ!'I17[Pv>z!|Tf=/_ 2t&e 7sf#ǐZ= /{>isNc9oI˻jIJSn2z齼v"y-/6$F\s5$SlG"@yJyKyiWNK%Uʶx>dT_0ذHM#NAJ(`# 63fژaڂ1!TP~ɱzjN9daly3/u Hpݻ|)D,oe?/fAn.Awok>j{~5TNOEͫZuWLw.]6!4w_ŕ G8<ݽj./lar6!1| PmoDOTMQg%$IHO3?7| 8Qm[F[0pvF tR,wW\˯&H@%RJ]? m'ͮ}2`B@:kRnvuMIKz x?͢3>j! Q_ү-JOg0<6ާʴ*7*]Ebf&~-BMQ5_MuG{xFYS_z6ӳ^a41|o h ͉fYOs1tf7j@%;'ܺ:awx *6PbۼM?xo U4EWA>.cqrK4g?GXpmz B=kZJ93J $ilܧEۯ.:H\%-j <un@tڧB6+Yv c~gKZhw;}v?xޗJ<^B#8M[ |ӿ3H&ԠW> 5V&R.] @Fǫd>a;F}(0zb^;(^%9fOEF@K)#G뀏I4B'zNAQٰgyOO}*gTDwz$@(!=Ifnږ==sv9sEjجX~fa){t!Llp53Wfq p y( [[0Ǹ8/}=ǥk;8b< `Q}WFL1zR&U4oiiQYY8}wHrܝ d`u"jV$h+7^Tyq" aV,/F v%Wŋ uk}vqmb=^P"JѻCSѵu#m/'$ 2NW}-Hy۶p< yӱQcKE\IL%Wv } 2ā琔b.eqY[mY,1~˺>.}hV9,8+8;*"N僃D5ŽC3 8k+)O[_Lt'XĜ8'ET%Wp`YKzJJ=㯄 /f%eg>E_|0" 3@BZ?:z7(37J Z6,wOyHBbq};~CTf[V RLe8"A 7x1(r~Z ew4V[#†Q;sSǗϬCG*(~^I4ĕ?%324qE)hPt ¸aF9`w+^V up ߞDqbYMGfO~bVٔ21mJ9\!js K ^r<~1sz)lFQ?!Cu'0>G3ȝgƪPn@^ @o\Q:M{i0<ѧb3F"%lDNpI%vH$?Avʸ҉ pBuj5Y GN_ )TNxÅkw>vB4p4}:6 `eqiµaDk"nj\[#Vl/^OM#k0Ө_7ԆCz[6>#l+['+/^/X]*~*hڲ6=bQe*Á}:7+{+4PӒbh`',uj!vi4^\ڴTs@KTQ%,T64kz@1}e7>T4@2:!'@3j=W[ѱE}Y]¸x^m~2I*~\I3uX j Za/V˂^xO bb?ef ׁFO1WCn>q@/a]R6 澰z/dfc{†)eC T'ЃlO{xE RxhHiӸݯԔWo\INKt`T@ow- :0Q44((޳ϦJȷ&_ ΈEg{I9Q՗ҾwGu@IкIq&R_Yb_"#ټ~dyQ4iAX_q=!,$HP>m -Hk|CʉvM#SgBĨ?&VYϿvg͎`Q-ĆG9j>)3mE]n]QBywAe`4HԂ,@%O`Mќ#w$MW1k rVK N2n._^CD:!d1۩,v>%<{IL9rZ2dU<ڽ(|I*h1gQ&{ (eZ,BBzFbc`rD$h%$AC!'oByis V, 25=CDжHcGnwL.24{iil1D., %Z֗ǣE ֈ%%K?.h> uAnz!hڵ=,vघ4n@DИ|@>5rPQ{.F (Pn0cNC0z_%3H{µ+(/&hMz=YE+EFu$cs\fa?Cey !ՅWhc劤닞횈a=J&V$ j%$V2:4 cңmSأ]rn[L>)p*AT G#ɄqVw$&e2^ W)/ NcNCD% H1ZQPEz8phReh:6-^VM /B0ڌчa=JiZHA*ׂv3'\~DvoiDR"#M8x2N}Ƞ*m9Cj0쮇zObh|f/(Vn/n:4mI't& 2~%uk%&^WZJ}6HoM{C:/5GtVـ-!TbԞ <,K8)jZ4\Gy?f00``pMݯ`c4zu(iUА@iRrגX$Б&dP U*=M%WǙY٫ &!CInJhD"c@ v&WpxySҬRfZkܞ"͝CЎp=J[(]46Z:u^LmZ ?ك/(@^ ;.8"94s!覎&XJ"+ 5Fddb d *ixZ4+Z< /8ٛo^?=S];4J$v=2O杍]&דPG`AdH/\\$sW"H۪e2u³$hTBT8g.(΍%K+7L4F4T[؇W [SϠ.~8Yu(`a W5^Npa]$Ș 5Wr+uNʕ]2{ b5_qarey*SA"a26q)QQJ .!A2Tt+j1o&VXMW`/sĻw*TP\ɾ|J5-}x.hq_-8;phfg 3~?8xß8l [^K*ðآ¯CyILlz߁@=4 [a0K/T}=o̝.҇Ӈ+ͨ1`aFvIb:W.#1W#)D1ZiX%”s#˰t%3IZjc{4? lf7C IS-j0%%cE0X/ٸ'W3;mip5[+BS3YMh-䳴.8dzCT:dѳAtk.l+ S:-Mh=ҍ!H:jQ`iQqZ631%>LS5B3[t@GfjH#Z ЁfWtPg^ pnOhPכ$&gm^.\g(0YƎvh+9'VUqn`~ @:&Zjڨ݂FH ,|@ҍ\= DӯGknEs*F4`@q^= x&;q3@1`qc7\}~3DYR4IP֍)qO} "lo ̑{Yך1WGA0:Ty8ݼ @04ZU;4ATJoZQ<Fg`_V^2FZ6D%R@ΣaIQ2d: MwB2Kˉh[t۱VOy#,0ߟD2x4_Sx6dG?fO"2MdQџ(T̎b9,.Ng6HXT,~5o'|Q(<l S jj臒$G 1 VNj;~"CTx2`Dvtn?԰12ͫ}.)l~sg9eã;[荨dy#p@U1 (q>/?,tNB-9ضs"j&* 00``]S܇Ri[Fumk hOva1 [{J&Q\g;df[>~Sz$er[oPbY;b㇠@<Jp[${o܃.a} ;(}b#Uw .`+qBb W9u*0n(JU2@Nip@LŅ!cO~=ݿs6vi+?{m?P]ubfv)p/h[v>|1]H?/ *BJE=qÌM8[[=OА'(Ҍ|yd>*34 5!Vۮ8َ# C4XΙ|FY;\}u2)(ba;{.#J9 #"7:g ΞPP?yG84~tn ,p6ϵB@A߭0hvکߠ[V,s0 0MxizA u)1/K4YL y?`'&o7*p :-0_Pt#>ƛ޻->g 00Pz0@<"i1K2å2΀Sr*<Kl}95bD`ٲoc`Vm8.Z(JaAYUb#L~&KXpLHrQ|o` 5# 3jKbr_9sekm% :t5No|-AI60g+Ac`Hn: :jZ 'BTk _?<)|n1khP75eBrp= HI^;Vc3oϊ}h:iNoY(0Peaҩ9Cn2݀m64Y 8(dA_x)2oFT`A=NBo:#6aDxS+<⿞WY+b_I|ܠ"WNM R40``T`@1j_*^xI 3ݜM2&My7M~("8Ϗ߱y=?&G?8{_N}eO=Щ!l8# /f'*CFA]zFǃo 2CVq&}eʔ{ѽMQT<,9"z+aե%+1WbkCVy*#d.2oJлaeZTBW;=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&chܕ}\YQW0&;̥bd((퐙%>]+X{6o1z h"_ONE䖚]%lwBfGd+o leQV5lKŃ1iQzznjֻhndAOuë LQ{?ƾphT8t2y gDi1kedNJIi\GQ'X#x "ddfVWފX6 Z+_kwgDLhCz6q8֪=F Zq̯2o{Di:4K+7.,w˫I1kICzbGf/Zf.81>/(BI'd&xE j6UՊ5(?-,s#kw6@?eݞ2 ӯS 1w}=o̝t*vqE 'jTb8ls=KalYܣz=4%x7!Ptj1tnHp`m:b1%`aV@So=5o'5` X1szѡy`HÆ"l9!Q_ĵT"o=/[7ԤPp֞Xum0d&lqۦԩn10vcYq1ɸGLe~T9,DQ[@?YiS>Gٻ`cɰ:m֕VQ#3vPq;JCKou$]F+bGIgd &H fvoj( 2Zԗ+ևp+^`zUFϯ=mq0iWu˱@иnx tn)ƪC UʌN`IɝZYm׬$b!#h娡 AHMf 2ַn'ա?KK"5Ih zm*C%dVsH'f^YQa9Uqo!nCi_ ZԓUڎ> o.U*/Z7z"yi R^&QC ^>c1iVi5+W<  p9!⭩қV)i͘ ΓM=9{ϔkSQeYIhȏ 'vz&&o3)54sz?o[QQP&:vĜ$5!n[ ׮t2&(%y@MI~~Bg,sVy>CqJPx􇩔^Oj#'D]@IDAT8dQ4kӖFRNӠ PHv$̒17j`r5gQ}txr򙂌B"ԩ-s%l13 8qͯߦٗ :l`XCE֡紏c^tvʵ$KFzzEޠک4!-,ߴKbcc;.-"ʖ9 :S(mWn+$qQ>$R)g~@9a2p@Ԃ)b2\vF4-~ky}9F5^aÓݒ lr=LBQC@#sF*ULGY'r4ӡjIe䦭U={G,mtL"P +&=0߅:HNg~y'EAs'b/n"_b}IGY ZɫY{2ogkT`S=ƊyyM[yNr++9KFZZlbJ6ɾs.q EЦ@?-o_Aq e=C_QD$t>p_ks}zJBx>ځy]0$ LI y: N~`A GA jv$' 4]m9a#n(:?JXjԪ@+l6_ރ82\b>/|Dī{?“вa-33?MlA ?lԖQdk՞60Dj:BlX;cAAk˷йV*p^Պ1Mg!؅)cvh_YjKeFE-N9y€4<"#4Af 5\ޫ45Fˆ={G.rl`ٲ??ʅT'?/)Y1n,,^xxݙrOm 0`- G?I?`zQs<N'FOzdۿKŭ1y2KܙSkêTyj*/07c1Vi]Ȉ(ukTUv! WYΠg˹"tv4&6dm/W^>c219vKV%0F0QQMۡyX:kCm&"Ni꣣NwSrsefvn\]p}P0oYP/~ ,4NAqch6b)(pҌ@ˣ(t !{}^&} Ŝyn%l痴M1r6t dd H> )@njS'ٴK:ep`,zqu2"ʠ*=g>ej]a(ld ުDmaЗk2ʁ]9T0AXpC,L͇NML|;%3/dz03X;s̾-FjW98uWUq9c:{﨓t$BuM_ Pma:3i g hUc}C֞'|: JqN~odQK'(*}Po\p;SAf˗oS׿ފuc 8{ӺOг706J}0;K7>(''_kkU=+XwT[x>6_rݴmꏓZA^5mS{y" xQ5nC 3:-]BTn&`qQJ ðohT4[= /_ze'֢n\z&VIӷS k T04B&.0< |5cV?IFOWCt.a8GA2 ا]8JCb76hb+d$8n:qp+AgX,d&($}$0]Ka;ѶQ[ +IܶfŁV5w]Pr6WJ؉]M|jkmՁv 3ce@$ ؉ W>ZuLe1dPi{6OW:Qӂrߝ8eZ?^t3Ͷ?4]we Dv iLs[lP?h)a`Kd0 `6苲1'ͯ.vZpW `7n{ۼܥ˖l Sh_f|Z &S†l&>RE j6i1[$< pPJ틯;.zڻ+8wpNK70d[ hK1'%3pt'|Ux uf6Oꍾ-)96ѥ\mBYpj~ɐcI?aR:Ξ{I^4>/L7 Žú!q`ٷV}xv'Q0F*e+gD@݂t}sHarfc>Ι;ww*(3 %WQ{1< _ʨBtuuٱv5Ɖ妄 eFl4H(@JU U믥+^luլMό51GpNZRw?W~kݦXדeJs7rqдK*uѷ 3^2sàe^U߲)aò2Q Y0㪑/fdX cx` \|+Ws62 J 2 })% \HJ+J(hCII i7?@*CLBM\FcN`2N[઒QsAL}˕?23.~rƘ !6hIaGz(3O~^FVAP Ǖj|p' P({WhDi*xM+vϔL8h֧d=BHCm0+ V M/`y~BҔjҔCkBF߈@WcGqZlVv* DM9th&FnF+J3q [{*Nլ*^|Vu1hq6JkEyO!.!QIuco`c`b,&o).uQ;胈P4BCAP2JP8qIj՜הbw5~ռc ;8:]ElDtCYۿjہ<&TgK+my Ej<돿v]O yzjt1r@:,;&UlZ ŰP9l(W,+#X,A(:wgH75m[EIHOd~U; JPƒTpN!yEoNy]=gw~_Gjn 3ws̃>h):DUkwp 4M6ZᴮkPp51M:Y}LcCmH9K,ok3<>8@L˟" sy&um(q3OT^JB0ݥ.Ej#&KTRVx,âE>*AC} фQPnd8js_X0裰_yOч72jP=SKo"~8M;1phƃ9J1P\LOrÑD-hMȘ}8^kFbGsgy6/  [9{" \ІFwPM y^0R3rg(ݩB1ZAQ{vc`@LiMZ=JSBWEU=V bg*R2I=c[jLy\\PuQ\LzyOߌtBAW+^=V=-7p;r?%dk9cs $\Ki/dTX>O-k Bh昚 Gv޸x~<8fu9?cZ*NYocG"2_L{y#/0 %+koFs c2Qޭ! 0`o7"-R6 )V-}ڔŬ N&s֌v2\= 6W<1=wz?r29oc5|>w̶1L@P3>& [nVd4ךly蔌FE2 ֋qCAuKWkWsIkc ȼ梉 tUK0'Nkǡ{WT>}'Z3)k/YBHt5~RExKԮ͌ hQl:l';Lsfd 4QI3@@R?kɖT?ȴ S_;{(:Xl:Uz<1ؠ%^x7{@ן-10qʴ~lSW8h5r,ٸW:ٷ}.;Eo&# DHjC3QlpPY50 I%_zV2vz2.?J|'a1׋tEnHI_Fyr~`20AuЮZWy!h~'G^נY_ZXtjIj!X&s>z+4%x˾c596^~7EքǍavF=hc$Ly0ݛ2v?R3,pXgxbGE2S(7x_Eѷ;E`ltomϤ4ݭKFKH iϤ DAWtӟ0Yޫ^@jLĮI/Qf5б%܅+ PNC D0B f-O##UwDV5 YsLOo ?叇__Ea}=C//'ԴW&†^FŃr-ɒ& s}MgV>KRQҌo1BQ-xNIҮ=qTu7bMCzStY?O;a6dݱy}ã{wa c1g'߂:h})c J0cVB:waӴY=胇0渟9v mF<ͨ>ptrAK FCcr3 "j*YD,_#$hRd,ŐNvY.83 C8/XKNщ4zB #wxϺeʔBO+] nqkvc?5 Rw6{g0h5%) D,A!3cȅ}RCO:}-gI1xz7am1/o]<MhiM-Zm|N\}xᐽQtmX F""kT\"[w !0gc`S. pXtW,,>@>M9)9OpE_NdMꆧfTdDF AY+rskgB:-܇ZImԯL' Q>-zJ9x~("C'ĪyMl?l1R: :/2Y#N%P1q+^}<[JZ*X57]"T ErLaƄ0 OhӉӣ-O8lŹ8gM:t?o6Fƾt` 4$nX˔ xUc\:oɱ昗 zv>2Cy޴Gg|L% %l[093 V`40bqFk7o*NI oMiDЩ3^%D*BVAC[1?ڲ>.,3&sǽ>ײzl۲m82h2W1ֳ 31slO ^~y|11B SjK/KnzzyՠDA&dҒo_;Z08:lŵrwoZIb"l}i@P 6Om3OMS̠S 2Jad ܊ 8aj0G@[;NvBuw[=N >D|l h=977{~fK1؇T~+8t9~KB_z|=2rcAcV8ofbjVJp<9yRj8i0T/#~xN-8p *g s[&o#F(ÛrY)=p~a|~bѶi]ڍv,f-(LwO{nHeQDN-ZmTUT^lPS ή^E1P/t>w_{m˾pF2-k/88%=%:EDIGfpo%^IL%Wv)Šxod FpYYPqIk;+{o@,1~.i0(>%).ڣ,?\up2zM%dR lV"o=we.~(@wNQq#Z6%*a2ZW1+cWxRѠvu\$j7ЈUBXaZTvxe.qH<~g\aLŏ7!JE3bň֍k%Ȯ>^xO bRWL!:W\ c'LT)i Qn~ z7aY6jp)l6 ܵaC;w:hH^`VhJX>"f=8@jFᙰs x` \| +7fS†aiӧiݠSSҩ 1dn++%TV\qok7VS2Ydޣ'_ 1CgsWN` o^,k%hJ[|fex†*A(N^&{uh >KeH:Ccөe}Au(jU"ˇvI]OK!Jn\й{K7?2̤i +ɱ780e fab3nx/q5)Y|:$9؊;~h+dx-",.6џnw ~ZpQ:ե0nxwg3mKaD%iyپL81f"^4բ:pq QZ=ۈи)ߟrBh!\Ly-@xsF^ǀtAfI?n7e$e@˱KԶ^df͂BW.@M:@fVfZZJՔqۏJE <+FČ 4%6mO $N-; τnw)ngiԶihV/B2H/<0,WWNܗcTN&`9z&NٜPkv\fkSDhP`xq,dP8t ?}:I!V ٨ZQ9CnTĠ@sF,MʘȆ"}[ ~Cf:u'! #f^0\ R^nv$&UH{fA}|01^W=Dں ڥ?E,_.-sU /B ^oZnz=grqT=jYzrs섖m#R`R2džfVV@?&4⽿4oDv2iPAm>Nmp)x4.&{ NEVpw3Z..z6d丢h#+A]?a+wh1W"c@ C-qPA hpOp# 4IՠSZv xmWt†Y2&~ob_FZճu t5I4ƾT@&D]%(="1GȨYF"\R$ЙČN9|.p Z&& ($BA1l)`Vg@S;gHwTC|N*Yt&$A 5FR\b\$WF\$[fx]4DlPxiX *pD'3 qx hޗmcƂBjM.ӨQTy}$4$АlJƃТAXךEC&Ѐb}.jmN揢W'⪱k;2*Ws.CɀPcqXp"ɔ8dޕV#qA:bvItIΧ ZEѻCs1 jU~[%PZ _*Mf] Z{٬kU̫{ٓyOHoKX#w]:Wh^?BA O݌w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Fl{T_G[-Z[K,#|K7fUHB o2u̳IP4+F['햮ɚ&44Wil ^J4m??kw'rS#Y`Izj8Z7+ѱQ@!u_5 o=MԷn!)xT 23gP[Rb(I*„'~gdشyim [\ TN$M=M~Y3Зbo2R5pjk7X?[ˊI;@a tܧ H }{M9!iX3|#<׳]ShNe flZ]x-w0|(9,5߇s]]h5uVх_YZYżiiaPchqN'FO|>mJ'Sbs3O}^{w^~[2pDgNA49ÞAzƄJ-;4ybpWd"jt8zgg8G&$%-V!^? z?N_Ö?[F~9&oqզ–uau_7TT!"^9 )]6 S?-{b7s.P1b?ɍC`5A?u<&Sy/FctGo_?ߛ''xn0Z_5N ss#s;U{;C2u?nr1巑.KwϕBA1hY٤?]РB"LG]K/M_w⼣5~u=`.Z\Y{W̻ۀXVs$hd33ղ}GG(pp"oP-+Ǣ>N̅]W /)*8֗sfh3hVH3o;O\_i4QD_N5Ej(,,vBI gBAs9ph*̻C#Ԣ0T75(!{9pKd/ϐDVcgum)%۾yCmvy4%dKOԡy}1{*= "rڐh:1}HyUbGI)l8ʁc/c992Q6F 5[Zذj@|xQsC 㞛2/&4ZQ-;ȵuJA$9+;V>C/D?Ks e]t.axw`Mnu(*'!<\('}FsL GiEr'䍯T2r(ӫoiëd#$H9.иG[}7}q̏/7#U0_>nRW=tF$lÈ9{Wx=0̻vaWcЩT7:P^bT5ⳙYN0' ta œ){ yrٖ乏Vn .S4,m ģі{0r|ߺ l1M :uttj1܊ ފY)W9--u:J{2"l*7FU^ فYUUK6t#p0CґW+DN/)d>`XkᒡJl: `US.׿/x58 S0sܑa߱X-lP??sDL:wVUhĖL,0ڇ/W 'v&Qq)v,/Oohx}úQQ*]Bx\Ȃܼ>r (Wa؝?H#z+ײe *U+Yރ%_ӹKW.f3ŪUUi=Z5YќA%Va!o'8%l}.g~﷾{1RL~pc>qK #zzs ë)g '=%lx4DF~` pbJ=0YKE#<=<4g- y6tBn4<:pܙēs0!&>ȝ%i陂!Uj۲BP:CF?2Rˁs2\m31 .m^ Wӗ1 #5tԜh#kёjϪa9ZM!FF h ;*A8cuEZ>!V{GAӓנS}Gz-:u$yn@@IDATnWm8nXSΒd88wLdj UԯUMbyMqAhI6!9#A%E`LѺ? `Gɇ|nDo̝t#t{::'G$s&DAc3#Sȥpf8$.kW,ͦ#*fD9fh>#|I/4Yt= BeE Yڽm˟@>:+&Am͘QȖK\$:4'Fm/G#ÑF~uQ[mib(K+A޳pTS}a8jqzRDD6X*x;z 2贄`A^tJswz¦z~~ T|Qff.\Pr!y1tm1'D:꫏r7_Z Pp֞GJew3+u`{ڿ8n۴ N>?8y ,OJ9`(@q|Y6>k!5d\4,!!?,\H* ň__ $:v8:%b@ &W.ugհk p y?iAoi)GGi:uABnWיQwPVbmO~2cEW/AFFe3L'(0P ea0ka3VϹ^i6!uX1d3329"+aMi_Xup*=l4 zoO~Z.FT75[4%*Z* 'M#Sa-裞*;rY<=a@QU\=?,ԐPz0渇T4yLt$)*0\A<[iQL}~2@0ٙsg[.;V^QFyeHU(( 1'D GwhwuۺOvS1AY/gsVxɊqβl^[6)NLo#:;{汗>hu(QU{65c4{l sz{|gK?vӶY>8КAna7_LMϰ PBf|v.ƥ"b պ1eBUe_@@A,wﵹ]/joy՜TH t7fk ^ӗ`{ Q&Gm;9MYnie[?, v> D_=,Rv9Pϖҩ9Cn&yw \%Pzal2A(N?lVmxßU||hDΕ:l(VoۏPAgG{ hs|iOۖ=ܴuwEa9#+$  [c͊WS`uǽΞKpt8)2qƉT{m,(+ ~@A=')te bo9w\r-ɒΗ# ((zc_t 7fufF #޷S &~﫷Goo#` O.)gX1oնR!lS%rK*q@axuo1 -zFYe$`6m}Tv11hb3>>d> Db" v~t>hS"%I={{#|=m:ȣHKMLI3ׁ7b1 A&pKM*KǡPP),Haq;`~QfsJPe o\7BBiհa)G2A!fa犲s}zJrvlC >9/FgHjŘ3~~!e w?a*VfT ҂߼)jWsh}Z/H?]6 wPG<>fY?WG˰4C&(AdDɗ>pddHt: 뀏w!{ED@@+bAuum*+hT'm] ((;{%t If&gxf&mf2޼w}{޽ӎ!t}9HFu9=qNV 8!Fh2BD EFg̘aA~0Mv˷`ŠȰ4/ɀ: \e@#E^Ƞ˴".-x"1A(_& ǕP5p u=[T%12}0˔*l(?0Tk:Nm0j1˓}(}=Gg` D:0pC+: qwrˁ7#sm8t0)#cS!are< (l9珿;nfflY '†[>sWVm.m}$g?nƟOoXX^ 2A^>{4k̷@ 3+;~]6"}2QmMO[~sjݴr>g?p<\P߄B` 5{V2ptcts;|.4ʽOzF :W?vKͣFi\t& Sh󚫀:,y9kB!]sU5v:A)wIP=c 5 uՄ 1&y𔚷zP"ddf2KQzЌ$|u3fB;hn2&`8殱Z3c'.]y{fN YoPtZ WEP׶+ѿϚ6!_C?S=<|G"cI00:kY1@H>ŃfPc[)L p\`:mʙ嬨Vr]4)~dH0!:Vi{9>yi岄xxZD"_1.+j}wM6~(?95lؙz ykڲ䶵;Z0eIIq\(L$i¨ՠ~}6Ү;m{2 Pۿo &?KHg4(p@J:1 f<[2v_joW+ A0p͌H3w[ S:ɳaNMw#\V sVjJ7˹ #0md07bڞm%_i CxK7}.Vn٧'):QU n&{K͖H OJֹhpN{1.i0\tʤc/Qo l0 i8Nްdfl4:tQQ^[!LZ3gʥ{^Me36Fϛ*\8f*ۨ;zX+8oBc+46=j9ϴ ˰ s.H1~z_S /l+RtH`$?ӂhLܸObx䉌b5L[Lմnu5 陸 RN_^GdtީsxV ݳ.Go;}2Ѩj U?}*$xH0rrF,G}տkKmE N 6H=\$m`kg!!! >ۨ?\bc60d x,Af3iLxH1%t>4J}/yͧ8&FD_?9~D쫒cDcg.vQ5Dp7r=ݞIe0/anEh^ ПY@`(L-cU2|FJ"&QY@>_1 SХa`r0 yGwk=TYI2|cN7W-Z}a~oB ܲ/ǎGY -u /˪Ϡ4#d&ݠ A0y-l`_b~ا_s~;,20XZ¾p}&̣:Egp$lY⛭+(6>bFesh!]K_Me>&.w n'.[yC҇}9K!Gm`L`M;J:&rzuYFMX<ꈀUCQ S).0P>#y~ /]-tvΛ' +"p?kxII;S:|pKdD)waG~Ү&'P5?idĂUKFzů6?1 "3I3 bFC;yFI!/޳qnjV>lK1#(ˎ?Ї [vɉ\B%mp#ft 5G6"nσG6v]~0p6f9]G+ڣC"WVT#uRvQDؠ-uEaݿ"CwQ/qZ\B[VP溜,E߰tZ gCk0w&C@yM7)336662b.Cf!c?Iێkk4,,|LxX9'q qq4bPZD߻F 5dd'Sk'V<6iWRΟ8tp0r*8r BFf2bFEg8&i,$%X͘JۤuF6nQ2T%C˄wRRR/8["mPРch5H`7ÑVyyDӫ$rHC̴dX3M$iB-I·YVY_ 9_a!sg9r4WK {s=1[nބg` PX;ͦ~A/ 2d8 %om7uOZX0ug$4>On[ 2{ ?@9W #5"l1ەV4X<)Xھ훷cۇc # M ;$w)$4BSQipYz#k|N,acoEagB>0ɩZr{@(mnBoj>`h;p:к"ٮM1{MkO5 A~ /;2,6 $-A4@0h1q0 Qɕup3R,M)H_7Ga"4FP 5sBrMH6!X}( Fg~F\hGNa8[Ä|`zH˪I9-ǁ.f-svs0K8*o0RxԾ AB;K ~;2,؇"`y&E.u9aL*QvC ~7t t"4"¨ɽ8t B?0 }]xbpI41ЦqMPCGAmC,|"VN2.~uWo;GJ:iڕuBP*P[ԯ9*syK<фJ0&01AƓqd@rҔB 6pgdT^c‘CaCH^>+H mA4bFq?€}Hƾ: m^iD5Ǐ9~L f]\;2n.A^w,,L5WJUvլeՌEUߎM*~cyӟP ~yAh-A"%0*5}h9DgVO=}l_ |'2 d2dOƁ .09x?53+y2I]fƑY ?EOI#(d=-^Y%JèEL ؇yN6 !G囒zGF|Chs͘  Êqˠij9#TX.o6#ѣcY6c],%81]LrǍ}FGLC -H?Mf]#?6{㾨7y7s jk480뱣!g9L!C~oYgX꾤箶oZGjX+Թ+= /4gvyC#QA~ (wh6KNV^T†4WO:n{Hyp6V,YZ po TVBI ,O{l ,'gM}LGjޯ *}XrЇeV! ;E)e>lONx%ؚ9%mOI9^Ŧpe̜{Q_d_Y=~]-**kLiВy>R68;T|jU. *19U!h >oL?u ֌Xa c6 @RYcm ɩAV1& lG5$*1< -¤B>TzFhII.[Oz]GkZ5qBy)Yλ^ӯBԸcϤ/ꙋe0MU7Sƌ&^dr^M&r+T(m}T J_]?o[O<ʹd%M0rAiT\RժmZb^۠ Rŋ[:4AۮM'J-{Q'0rVO"llwL]SZEhnzK>u^s"B^=SU+12G{5,Og3|ŭU-T@K ZLNMG ЧϏ~5jE-+d}Ix> 4ŻMTujWlںجn$gm-0˻ǔ7aΫc]im;tA.QA׽[^ErT|}Ya j  :m״6:v*H8{k$`rסZP|iDc+/[  蚓yq['=`e*4.$Wg a) kTR[}/yc0 &}}罁^ԄXlA?%~W+vppUpUf,`46ㇿ󮦦]K2 #6>ɖ~*ga F a>UĄ*I#]!"3ۤx=/ &3֙aos~;B9l7f2sz㩻 -ΒJJ*EF@#Q1d/ͪ].a:CE4z9^%Rذ<[/>дm=,]èyh`WM{~0z\w(p4/IhOT,Tﻭ%#!p#`Q^~dP翬AK㯼^ߚw $㜌u )ij]9a}gCN WSe_1EE)M<ʗ)m7T^=GG厁C%A/s-<2%d0b}7Vޜ{Wv/&hf#t.sPpR%C޳1k8W:5Sffm,sz05^ V7ٞ)h oU#} m٤53j n DbZhMFhm h $뉃c%B;qn 硫@:f _#m&`lW J>4r?B2,[5%{ 5*Sa~DC wV UQMN.&(J^5M:Լ"q%G0|9e?n؄$hnu9.Q8i6,ٰG]Pr~_aJtŚna ͆VX/i:EFam`\\Lf˗JD]i8LxKhu1Hb|'GUP8׆VSG yG/ڠsB'tZQ>ON-6 $w *VԳ$G4[7e:7F-^` y|A yDvR}tW>i#s#ġ GЧ&`? r%}5NDvwGOH:TsK4sK0I&#nU5$0_pa>GpW3̜,2agI%b3a,B ԽpL:)Թ%$‰zS3#SobClK[}>/Q3x 1p7> <҇}WVVZ`(>?g|/[OWWϝК;u:+.Lq1e?#ϕg›Az'A/>ư(QpW6k kd蝕'(hӉ%-~M0>}Zs\Tty)ky虿 L $@,NfBBzcQV;f[ CU΀ :a loo?5z>Áܱf2Wl=gǯgb Vn `_ٿq2g=i@(wr`ҩ7qQ)Wd8Aafp3a2#>%)ZD^x:c8l~2mzm:- TwAK`eQ &\!Ft@WeqWo[g.Wpk^{ ٳPk+酎\Ԃ6gVU^nda#L%..jq;ȸv}l'M2 ίǾ/t.\^}G2i)!5|f`)@ 6| nHOزz7[W+$do"l-ZF34@h0N4~La!z"#WTPbۄ ea!1"]73K@6kUeJ30م+=[#gEzUM7hv/gTeJ:iZ֛ԧ?.P\=d@!NcVqH͍ޡ>t{WL[$(b'ґ+}J# -L-۴WmuT㱘KVćj: ^K/Ę1x߭5 5nx͓z| x k!!^6#!L=:b' j9 j&}'̲ (<'MM_f [ryHOկrg虋[vQ*\vmsTͤjٰZ> qS6vաI?YAK6A+yvZyZ8Tu3I]?a*4AMOQ5G+սM#=WxqЁҵqʬ9j`(lp~M[lzA}233zYi%le!Xs%0槐ncFx&dA%߿os&''ƢٴM iS_[5ĦldL1>4c"KHcB<ʭܲ_lH8{6{i35Y'&+@ISSޣ1jC.:'7_h(^ۙ:)E̎f<t( w[X}Hgհ[ %KP8 F–`c <+ɋ Ax=GΨ/~Y*Z 4-`Ԯuٝ=.h3\1ձ~MjW ҞujaM~sr~uG`ʓ<WWCP}]U R>u^-VA+uy$@ v~5>mi]8B0ҡ=z cSl٠ڪZ:X_Rukq5QH3L O\׬X$2) sVmSJFAOBs4ܽ'4XaԌ.㫋 WS-\/jԲMڍ4)^:DXɈ,ɔM+ ddfĥ$&^F=wl9 :x̾F~&Qgx)J6Q”Ud- hIpm'A3&!\MR.ߢm 7(LhJ>Ȱ; i\[5SUpz!9턀@L,fZ[Af& }6TȪ ;+ҡw6 :ym<0r^Fc2!5-CM|:0AW-i-˳ܦY]+3,wԩV7vN-% 3R%F~?jDqhn0U7jFiƽZ̲067PH]WVO'ݷA@Z I?mbB*WeU*ܽ.†*lP}6e9G׾vnۃ6|/n'1 >42G \A k`X_Ahms&kn_ө C(]ȼrCc d2 կV,fA]j@ӓ`i߿KsdWf*"Ghswn -OZz`"Yaud֩CBipnY\EZd9k4\p\Ї~wo9+ $γ9!19E7cKUX>4orww9>QՂ`4w0gm5m:5e2Sw̼'m&L ldnBa ffa4 VB4 gԭ֬j.ȤbgaOp-`$ގ@!=Ԭ tVcu,F ]P{^.h.('jCGpy{6<&P+ʿ~EFjӞ:]Uy,0&:z܃ue@ㆨ>HhPkJ@5h{sF} G!1T2dPf{ Fy7 Eq2&#xLDaMcNa? qυ ҟGԚޱd+̫{K`8Xf>{+d<툛fx5S)#M=> }oB p,ߴO1J  Q'T[Z$ +"]QS5LovگgSK 9}n8ϋ-4yd^ժauhn3 *-^MЦu+jtcDӫ2\w)e<" 1܋ fdh"+⎂S^lYu1 }ȱ? a")'Ft$Hө9#ϚQ*e5N}vjfts0/⺡.҇XV S~`ߖoիd;ɗ@{b<Ͻ`-NG>t4(Ly y#/N3iߧ}ViN W/hZ;| p%YqU~}ܣ]g'eO_^5UEkL\-y#}K2`^j h p^Vnt1hN#LBnGpp0MM[Ќ~ IAGj32KuS= ƿzﵟG;tG'yYf\#Q1#i6|z[D:}|{1'P~YVɄMp8i 0h%P`p/D04{6-@ظ= zcZ/umBH:Rr 4O@{lﰾud'gmڣc]ZW:©5 7%E8b;-J;(}:gE ٖd3aR ,)>Q'QQ`Q'NXYn#*>r C3UhN; ''Ġ J'N$~&~_/.F _oUSƌoQ@@ Tt"3ڨos亘4UBX,@{0[0Cp KȳhFj49C_2>m@aT:6[]BIK;yubng9O 0@`@Vd_MpHF2Fr_:2ߋhB3N "dȾ(gkѿZsZ m4W>\|GU7>Rv6_{Q |ҩ߀fݘylܠsVSFn'-zFq!t1~:Y;vަڱj(ʖK&3r3oB#sДs3 $}S,˘d %sobĀ`C yC炃Wbs@´6|DYb`}co;o|s:؜j \ W ĸVGߺ ץ I.hi1NR':2iOg@ |q*_b:R Y_ :[i`RH`!g p.]kYi_~e}>ZgQCk \Iʙ{>ĀLԲG)@ +!<^ ֵG6~wYn0@WѶM{~0z\w(p!hTގi;|mTsh^zcLՠ/Yiۺ'{Q_>Ӏ%&2-w^ī|c3<#]]N]MDӺȩsW#)S˜IrS*BN9gfI1CR!@s$UiT !J!жq-~a,j xxt:=sʲ~lX6Fir؜SY ЮC'w#Mp'{!P6NYQ;JA`Wrw}7yNԁ4Y{ ii[(lXc!.<A|bJCA㉻{[LA#/md_ٿAAzqCq8Z=;z On\&jT ֟=8d?NVA2iפLj Qc594Ij>nͬOE2J‡#RSFUѿb"Nzwh[z!|gglNF?H $L6X1B }H,/V/ݮav`jc"fQB-wU#g[ςV٫w[ቑnL HMzf *W^'խUC<,\푋f܊ ǾZ#5Ť9DQL[KߜGct7CS9uI@ kZZvc\=|3*g>]0ȾhJܼy2eE_JFóx1k' @cek`ҧ]͖/l帀дھ}^F1婻XVm=v)뷘lx}< 8ac̚Vb U *+d yqpcF^ q~Es* 7o橇tXO +jFܪF>+x^LC3c˲m酪`d;,aרq,&~{aӿߘ7X 3W(o~ΞN?e?^|@kXp3jvazZiOj_N>˷ybXo퓵zAZCWa. vhԓ.iSJO?mz92BnJN9Irn-[{hRA`b@ίZ}gYd(p#؅;=s>.m"bj8n$4yqxQwé16jb`oXQ5 ]7 )dyQ)f]$Ha /Uy\MԙKBC]Μ*V rܻ}>X @`Z~E#dj˜y1_ւRk'遁])˥BUJ_HǤke9A{>j+15 GxhH %gG=v !x*v@ȕ[+Db{j9H<2b РK݉6G9h hEjr3\^Hbl?zm7@x^) l=~uz<.1Eժ\^/zM8~F 6Q7:)hh%KۢA \cü؄8 .f4p=44\+ y91 HjLNYAunvJc]~/d|\1곟h3Ɩ 4lDaz^07ڌQCk5PIM5#q:ѵU#>9Ԑ0h@r#qHZtAm?Ea)ƻ##¸B"`w^ ق >(LKL,ELwjonϳWqwѯ+t~7xp0&#kW)[2m$d$O[n6{Wmpܡ@c HownTs;  6O?Ї?X-{ekZc4mLJ( qPI=j"?z0?sF3`|(d6r"92A!> CfN~5G{R mo˷-̟ö9DN4ejJr{c] 3gs1GSd=dX Ud H ~Fÿn ko AۺN}҃s_N&LM7ʕV -CidOIJgn4/\))R'Dq+x- ZA] |`61MhX%a=GjmĹ`W -}?w~$LYSܣqA1LA(= lRZphlke-HunnM<d nƽdKzIA5W $*3bͤtd&D-\CQa,fvnAAC +U2mz >*:&NnQ"Yh AA8I2sx9yh*U*bN'9Nq8`tAA+\M(\_cIŽ3({\w)/ϻ&ظHrVm+߱ Df  \L8 Z8jUCSœU[UEIy[o SlDXЅ˗2{I"ֈ,[*W:K FX7`*峂ЯW;!r0c-Y)(: *+!x%後(,} H>< NjJ-ދHY `~_]Gpt̢॑./냨i ,{ƵsDr2%=F>00kkRwBum:G~_nJֹUś IziE@ aסȪNQ@ 9f) W#ČAZ3;|f?T.''%mVZt[ {kw"ph^oZRs$}H3F%Y߳?w+{"OH yĬ&P?[toPvs;U)__ӚQ+!Yx ؙ :4A&]xO B Ft2zI 5"ô,`0g/Gb95}ڦv!EDpnGyu'q5(h zΧd+,<F&u2F9#!#) 0,WW w Ѽ 3vjw L ) Ѭ~50i]OJN- fbJ^E`Cy"MyL;ӂ<<-xaYs6e.j64 Z{s>l4qzm:/nWb4K?&ܳ\Ca; +c*W5F+D@ -a4%p=G /ߦvk ̝qhF?|.~ZoNE/P"#^.鳴&d"=\O6~ŭʽd(A6` }ҳc{ AM+#-66& +M(Y>$^.yp1O 'WWRd.cm zWr2֍Kh־ŕ[goW-0cF|r+rSO\I N8gs /+y<$\7Ea`25I m̍n%vҎ%t(iimg@ HIAқ+m̌-+D"lpG`scGi&.,ra%-v.lpAW ܝ}P=`?o'&-Ѥ mKÄ LL>+$FӺӏ! {Xjׯss]L'3H2^(ap{x htmhnvZ]o\d$.^B+|Z6PxΗJjPRlt\Ae0ymY-!bi-|qA4'))Ək]yKōCŘ3SwY,P“]k5V, oå4ztaw[4*(ЮPp,6JW&;qsp/8Xy9x\g*љ00I Y< \꼧@tB̙y[ W<8OGM6'j4³ƕ4hk*†lI܉^l[>g|O/^nwy~ELHe3$ "VkF%3~ %=q qF&i'"oj# p`,c<!Tb3J0A:U/1۸v0\ͦ6Y':S-%7&[% ӇwEa />*U˂_ot Z!85*8 }?-zoICgP(|.¿j.#2;φ A4 .JGh7I \D^_ТY!s29 F|D:y3ͦS(O-?o1'+W۶ I^3}c0*(0 4$lpz599.q5xv:VH7 tdN&5NB^hڝpl^襤8rH8Y  eUdG 2A&9hZEyc?=+q:\w6(5`RCM\lt.}00nKB3V3$]qYk\MSSYnQem #70À뿿EV׳|6x. ^ߓ-5g@܀e'N#h{OJ־\lAUw5 grMAm>#5_MfO BGo ,DzM{uHpп X@ҮIl-6;\D7ƍ~S)<A>H~{s'5}j٫"E~jQ:N3Z}N"d$7=j9ϴ ˰P`q҃0 &}7>8^Ea؎iP%u0 {kf{W GyҽVؑ2v!͒ȄԝMػk}2 WB (4E&*Vi/p?h}J3!S/wGgGݯ׌}H^λ9 )܁6 5$/4_FN͇f03 o.bf=ig 1 ͟1/e[a{qum>t4Ll(N"cPFGt4W/yp/EFM씙lqs\BO{99e>u$6&!]|fWTΈ]y<Ą@zzz{|mG hc?[?4WNKZBUF>?[7kŇz +%FrMSD'_Y!CO>k@ӝGDI2Q %񌿱2g/*1a*>_jZE^锋FFs@FFffR%LOjAUu !66GZuk"5 }JsG""7a5{sخ`% (hx굉yG~Yg ߛ# 'm&=WSiرOkc,l2W>5!p0u _ʨBtu–+nw0 &}172x>8q&pE^ysբevfjCx}7O1@F„j-nKKoj3h. 2g2~p"w[}%h Q24XGM!H[|'~DmR~U0(xn ÑiVuE:w&h3Wα[e-GTh#l\8z>dHffF0PWJ,acU_'Br~ ۘ dA&߿os&''rEaat~HAa#WE(63EV֑Iˆ["#JYG.nBji:UfyRl6?2.~A9^pAaf6qL (8R--@}C}ѲhF!Wm[4 d{[ N$B*))h87Ij5lܢdxK 6#< _ii))q)Oܿg-G^A @ws8~;*nc/HKnp Z?5i۱qz +6O!!.U UEk,)7pMCFzzJvy2fyb) a_v5-5>91Cwܡ)TPqUs# {&=64B`s&xL-P`ֲ-q6OCX6s|ma8$U)ȍ8۾y;}8&#AI.?!DQP`ߒ) s yY4A:eqlBȾaۃk;d2.왤&Y'7cm,59AHFaCߦV3Ā0 c(fsx "g&D/ 6LL"pak:{ʪ!X}HaduJFAפ OQ#4" 3 t̥#3Iq@e}R'! Y> ǞY!2d`>L Ǝ/O8aimS8wpjZ l͖BC_lLc68ɐYy vC^\lBa@&{a32 (pc{qLp:)} "F(p3HcjeiD)4p^MҨ0eY8~2np`"}CL 6"lo|կ2mMx<0G,Yaa6F6^ON ufaտkK:6(6dq܃Jdo:zmP}Os. W5Sffm,;9 Sa^}o8yKS!:6A%,mM ha095Mw,C>?ը_@`GO7y>ɰZcR6kU+ۺnH=6[^aw)o S\+GMC'}tOOKp=DŽ羕y-S!x=rr\pHpаVeiRa|S*2"\,ТA uq-lPcrLߥ$թE}}=^Mga68ii}Y33SSN)<`_OE UjvǨUrz#m- @HMP5tMiun{L-ծ+vZMA[ c}UKVr@ F#GKK]†f U+Q́+|)P"pjڂO1^Gίۛ?u 0-AKկQ?*r6+upHs!v,`ǼiWbx4 '#XV4Mg%wsُԽJ8e2S#u+ Ql|-h]NS&NxĬldvr)ݴn5a0ۮ@U*Qc1 |&Рf%s[W6RJ:A Z9>G0KӑUj:rN a#55ȑ?ZӅ߆/#rG*D粷<[/?дm=, YK;^W^ytHfG6=qQ/ߍA9':IrC}*eSb}u0M*nKyxA-闏zɯk2.qN:[jr҅4 5dBʖSG X0qh4xu,_GߎM0qV~5%\v3tQ-h @[R.')WUP*J/Fuyu[.-O&Z-yN_nR;R]V ym}\ZX(D<,>?sVmKXj 11FAC0|#,X[umO4<ڍQ?H-8((Q/4;0vnkb (-Qc5dHwSиAhdq64mk?%XMQͽOܿ1iC>{`w:}z:s&M5MRs.c#@Yp9'Z"LOl؃z3T<0Փ M˪0o/.ro#idFYg{-!#[@!j.']OCp>DYQ6> C B/[5-; wjوF*C=c;pUz%U*,T]]Nщ ذZa_-BNFlX~aŊShJg,1b1,SQ+ ICsޕ_Z\]c0j$` >"pxz as 4]|8Eԟ?g[ؠ_U?/٬Cv>TO@ Ɗsrٻ*_J Bg.WEbuUY@QZWĊAEz%@PC =$47{/&3oΝ;瞹{d bW%h/ Gs>XvLJM;z|VQ{p% xc-cV-Mxdӣvԯը#+wkȣL@4U`z*͕i,܎Qt5R>taJEGr}؂`OQ/+imS?M`yihyaifˣVHfB*S#H5arڛ qxU!uC[Nbtp[OE>놼 ACy.sS6  '$ۧHh.g>vdXűm)&02}1(4A 0S'C0x}\ Ν5}N. 1{#uk]8kky(e &xZٱoy3oIs꾻6ܴ@MīK}i~Iksɸ%hi4UB]K0aVY0 |}'ReaIo:}8 㥫B҇!4450tڒb !(V MAC 7W,[cJFH3 O/sK2>.6V/^YBi4ؿgҥx%l3gjNm4qx6fs"333â3 Gkʕ1 Zİ-ڣ-c Fh2f'XBEU4y&C,b_(Khw\#L:V@JdQ% 9VθàGy,}x=o8wcRF4R.}Vƫ˝o&kJd2ӱe&߈ }<<Ý ԟ\ Q%''H N-r9t ɏXw`t31ݲpkɦ־$h6 Ť|4:oJ&-.)\x-?kB w=̭$3JmWz}/}8R %t C^>>m[h>,WFdW_Ș:QSbquV6)4Ӆ ⋈jbRq7|Q#JśM2:b ?ٯ߸_vn{2҄'}Z:A[[L8 ]p2OQ4JitfgN* 38Tuz2sSě܇9fݜ`3_&a${-#qF2!g.D 6'us˾|Js ֊Dfx i))u.Wr%Ass}Y(!08iZ,O}$fa#^XqvEOCJbxvC3wt7qi_gt’ {#x"WK74)U*V)%O?/_c"c!):;ȸ9mBD%c;hRؠ)Ǿ KzŎC є>{~_h0-Nܘyްh&RIHZ:`ng9/׭[ڳß,X[EfBd>\ס`! {+o.9"}&aM)r::c%p{/S ;4 V0THIZ!=+۱.~h )FZ Zj6|'r4+i~*MY}Ӡq9/̓>_ | hF@1Z)ߜ5RAʈpqy=adO&kO1q+/\I?&\ -PxhH7ċj~0IQ.2(J==|!'<܆5yYKU{"X P"DƈFo=|M{bg&u8]?rFl!@_D6koo{1|QrhѠxsTj%Y{vh"V]nb.0h8-WD68hw"0tc ;H?ib͎2n}mсa9>2:qD{w}%fuEa" d{{ܭe׎J,GF7Mίj/{X}{v&.+aӍU"-`6e9Io<*W#&%RRe\e& #q篊 # vBs6ʕAJ!š`"1t-;¦X L։Q&^4]UNڷ%u&Z7m!\G-*ּ%`4Gv E{bV[e0D?.>yHy&]v)}{7w &!,*Aѫ̇ Ы3ܺx[|KA6f-c$3L@N8s!A%Ad&} P6CM|~r$#xv1ص7BC -@J@ }};`=RX?qd"<'i/Ea!2'La.KK0w!P`)D4*(W V0{k*N$Ȭb%JD21W;C0+ށiުz7ч3 ݣ}ZF1 KBxcK&jʱH"Jb  q$Ȑ\'a,(t=0b/ t즆C= ?Bh .%$~oǴ,۱cFh|# E֍<-RD#/Fœ]ZwՎjY f0!\VVXК&7Bab jYHf+9N P/qzwA:F!xeө9!@T7ZsL=?{i #MvGǩӂN֠ar%"y uaM Kiv=f L jW ]MJSs++Q!IOMJc=GLbRYCHiAb.H h+|RbU`u\P)Gcwʕ+X}!lbOm0ϡA0:ayTETi>4<2.J-iqj1iɏ9ǜA>xsnq |A枓-/lmsI2+ |n@bW1 CxLp9}<6W XA'ԾV YLP-us6e-K&&xpyhDtA-'D^ )8OԇZ~wECM+ʀiV@4z~uʼoͿ a!sҪ+kExOHՊ%^h!Ha[Fb*K2E)p%#AGFġҷVsH&4Q#5$ hV?Dl#*bexd:h##꺈h~a0 5WLԼK=ꞎ yBk|ΪVopI&W+:Oj|&zÒUJAj 0ɷ+j)Pٯ@筕Np:}-WPg/%_y$$q,RKl ;>Z@8ŠC͸ԫ.賱mQ5ɕ#8)C`vD?}ߔb`,3(QBr|t8 ' ܓө -`)hhb>RI`_U?*M+dW<l*T0ֳz?mRXV qBAБQ&IM N)d1I!N9)lufT ]M-Vk k:5~ | $"R2 "U \8`^ aoQ`oS@s!DZ mM&|&ruur#ΩMe.6y2T8 (`m|568x&EwpNmV;Ndʨ] V;@<GnCnSŸqs10i~lY7Ql=IdTheܫ1h>E9gs{nN>>)ׂ l+ /?28σ"̾MN:˾pG|(81ϝ:A^Wޜ8J- θ^^$by?k#,HPN`Y>R 4)!5Ur\%ҼB7g0J΅KA02XUA:"AkظǺsрPrmKNF+𝡆BEUq |e&f@$NFt^f Ԏ#1U ñ,FX36#iq@|q|B7M8/rha|j $ɉ[ik212]3Ri{;T_a46jC!w"}򽸧hUѧ.\^nvm9u-sJ#~eT_`2wFײ}('lk[V9QPYOnɌyBw m;{*xd-Y *}úi䣉s&Y`&A†ծk>f>kcqq,h) nkxdI4Oד5h2օ.sfXҌ7eCi//UϞ*6R֮+:3f9d0b#4qzw8oi3Czy`5Bk͹"Λ=mbnQ6lA?c@ c1bV[mF\b5 u= ԊWup h:u~BG(tYjR2 Lyb/1qwpe ? fZ!4Rxiʗd4@"X2 9QƀdS3d]NTG.dg<ջ3֠ s:~+SR9r LLpʭŹ&͚<mZ۴.g[f\w9†~կ00[+䞸^N2J!7xۻ̻!ULB͒.LBjen.ȽpvRO׾&4kR F˼4axC'/UԲ>MXu:-MhV14GtUiג7N'N}r)ZUYUc@ǀWb@1jgL nʼb{\f«^^fO[0~|my ^?9eh#1WEءe޽LQ|pFYEM뚠( hB:?=N%}5]ذAaNb_brAǀ(V_D?Y2 ,Zu--LYC˔)7ѽ CEdeP ڍ줂=0CaU\ytJ5!iRK_^Pgc1S1)+fTV0un V"K!`ٖLU]v$bcO9PpdtLL2F3DkYܓSļśǦH>%n#;3\3dnaHߏ%^U) ixqypɆȧsPݶ(m~y–Abp\Bd.2ojbZ/3ֲeJWJ#{ \ɉck@_M4lקϨ6-2huެ0I<8[4 X)aޣN5~Ni`r~VγO?VHHA|ޞQ}<]?U #)3L҅["Dr ʼ񗮉o7cm!50<2o Fo# |ޭVh$glB! 0 33Uk=;j;yly8Qcy*+?\dOD$3WA@v\ W=a;kۿs 5Eg鸏;npo!uC[NΥOBqޝ'_Jz+j8vlVޡwOA*;wU1аeډ'IB ԠqH&+@@[52Va t( QG@[sA!Cvd.-Q1{>q)!?/6Nf֪.R"ز7V26Yeh&RQ ѫdvmAu5'=Eq94C箈4#iw2- O-a&x1X!<w*Q 5( yaAG]kB<{)Aģ1Gq7XǾZ㠤Q};$Epά.\S=%Bf@O71f@gr~qzmVO܋"u2'Z-|{6r8] n9j7g? Z5eҝu}8߇!6qX㷤|yO`>0a>y O_4GOj,u; ]db &nN 7Y¸50`$mʬ4!B8qG<&/?{I[ZF 1@p\,Ek#$NL~0g/ 8zB_*BR;ؓ_!:7puNo龜MX}pB2uQ LH`CE[ŢiB @;f>w|醂@=oq2Cȣ2t焑 ]&@!7 F!z=3MO yz/x`g"yf_9UoFe"ynN^)3t\%=8ua ՍY gKJ%8vVNޏ+bo)s d ܰ3]Z7L WK;&6OK2 I(y`vdڲ GO]{/dž-~^!" p ԙ WR}U 43R@sY Glfw&U[ש񀍷Զ1e;LU/\pPN 6\!UDfvUŎ\a75R^9(@BlPsZj /Kג%#&)Z7hfՄ RRSsn9.7SMjnC[g6KK#TbԂb53O^tvDln}H&5JU+UmFFh.c'XBxNFP!BJAimtm)(Zjߵ:%R3`տs aNLhY pq4%d<iּZ\j񽘐GC7`~X{IffqtJޏ} a/E'Kk NnFm2 &' x2T#z& V"o>3Jj.ho[QQ@e {cD?_ !j ot2W))yy_X!7>V\Id6͖) x.5f*q&1Ӥ paƜ_EwݐZV_y)'10mA3j]!T yM;04tCSosh4%4!k -džb%0)/T8aCisu,%O:[y ҳ:4JkEq1fNiZV^e`BU BCB4jV[uj _\QU Q0n0crshOrp~( OA] H]az+?S〓5 ||T-x\79A4mwcLKI!NZBv FaZ SlO=reXt*yC! ]Ky{75&Qtb&ʴ|Qz#fǁfwը2|Z5z{G>3/%&pаDo+ " }W5mg=fj:lh0$Ύ>,ه/uHez<˺f@K2k>+`>Ld~.~4ݵc^6ˠOHj W䝹 2N'jlKͤWNGOQ3AW+(+/>(͢XW0z`dXM jWegRX!J N%B wU`MFIS{:{-ڬYύ8x}d $K0hū"єL֎@T stŒeࠪoCoLUӂҒYk+w<ڴM*Q{+SH$+Sb^);yEBk;N!y)i:U*^z]NH`Г&r&&W`5-i :UbGiOBN{[B;[/<'/z[X ,†_^,N[hiclxiqĐ6,q TfFF|rZ6֜Yͥ= z8^+;˖!-/|+t @x dlACb QJQBaCf~m;RY}N7R/6A҄ؑl^ ZgZug_Q uH3<Z&U5wԖ&D>4c #am$6_FZM?ymR cxor +??[l6yQl: &Yyӑ{T`2Ĵ-YHA N-vBC0k$좚P}p#9`9?1 %PXR-a/=2(_-tA }d j0HKdQ`ʒ2wԯ , 9oZGD`[#C9[}6 øwł@*F傊rCҿ$h($ڔg(I:]~\燖hDQj[$a?'WiPs[x~F \ipL0 i}'|ˊ, ϫO#㍻c Ɔ2Z#M]Z! 44_'i A4eRO(lFY@Ҹiwv*Y SR};Ht.qͷ|9 "PTڲqԡy? mPA|niCasճA[Ǡrlч;k "i7WMxf]Ԣiͥ= ~^j=y8"pYj`tJjBU}4ڰHЎWD vvT2Z2K2 4fJf9J!(* ͧ80ڛ𱡉[cWU+ٱ/CB{<@<T!`~ ɘu>rƣR 7<8bscrҀ!= Hz[X[ ecRӣDM(:a f@ƋN(s7$ťiN0߁ VGȐ?_]p!~vmf?6GJT5#*jk{B&$-&}p\s7]I8 n[@ `<PP`|Bbrle?URKXB ]$W &O24/`2HP3/X9i2Bp~34o[qޒdϺr>dvc8DfR>h Ci^[GǾ>x\?P'm92 >'zN[~FBYhΐY,)0 nLμx\2WIR+pqe' $I_Qc EKZQ/Ly F j^)L(CMmng8~IMr5 @DYcc~ s ʷ\9As0y"Ȋ2%<jw$ ]!,GS2އ<4*DvW]fY&HeW1r~qUh_ϫvzX%),A$.-K#h`VV:<]S&)L0X[g/%`;QAy{@k1aTs,s"MI3fu6}1sʮ<YA f楋?00Tz^XAftp (ݣ줰?.S6DA n~s4n("}\ۯy'%†ۙ8gᏉAhf6Hbf\O;i1+_.D(l(Ӛڣ&i )'qu{̠)Q0돏{+cc (ҟ hjhnģs&Ge) _ \܋/go Thf^GLffh2x0Dp? Zpu+hl Ԃs^` F:*Fr9_Ѧֻ`$@( !9}><hwjY4<3f99y.񇳤mEĜ 2]t=fYeam1:*"xtl8@AN@E 7SSnߵׯ\X~m j=m "чz[-_⥶8jxð'l'))Ii)<h'k`Z~rU@!41;`sg0Z#35d %g rA!=\hvk ɥ}{Bm{~y|tMZԶ8<,|ڭ`J%IВY>=l岭*'ba?Ap^)ď' q vn(lFzșs:}!w >8>!#u5{ŵkT=ǃrBɝ&O^gˆ7G;<]P8ѫ3n30 (YVeoH{A1hB2dEh3\J.OH_ܓ =Me[f|r%̰b2:IGU{-=90k v`+2SmwT`N~ZN-5q,f|b9{-iZ&t8Dj\Ut&:S p)͔?yȸG_fB4Á6`5'c-ZOQ~JP ֤p@1?MZ*;j̓7&?;9;!.~?c#Dl;Za?u(*Fj "˅ l-qs0ɓtʥO#@vD0O^ mp<.DXh:VC~b+lDUY!: zN-B" kN2B|BYD?<|:e|F l ÛL)0^IiL됛H&րߕYś9)l(f3Rܪ?ҩπ.wO*BjBZÖ4@Bq['KUఱN <of2 sS†2RĬӇz5v}P v*F(uư+ x V H 䯱)={sx4S-5~УV92O 0te.764bgj=>^}-'^ C6 !MȥgdɈA4!c E<KͽE(-LB_#%hXkWvexα[1֙Fk>gצuaѻ"b1";;k@M|˙q]` @zfYᙰ}|*6fפ$u0(at`I:})8>ɑ+J(j#FjSRCa/_\߀0&;d4etZOFf)F }"Ǐ̌+߼7s  6hIaCM8ԡ XfgyF[+,c-1=kaDvh6}~m 0ͼ܈Yb[n_WFtG2i:6W{i"+M6Sڣ͝5}9 mpCn;E'Z0WׁÚXN-?S{g*ʼnDHII6E8Y&u7m\ַ|nx, rFFZZRڍ FFE@c  |Zs*JǏwjZA~~*A5MyşxU7joo ,n̴djĊSKc;fFɗN;zhԾx" 8nP# sXb -lÅ {Yʅs?yurSt~Nn8s61ctj1 #h4ʗ- 3jbPVoPc(fNH!VEMΉ͞6Qd a&@MJD0tT|y~u, c S:~%f9-F?v4}#F/~sW*TB5sp(PqA-RpP%dt1`LiI?m6}OIkǑŽ?ٝlYчÛ aVX d,м†V^\qY7a@MYP̞iP<>f9u/NVRN>v}F(pg(Ԫy=q{u]O$*f^>'½78VUcV:1@ Ho-X#X^U€ I 6h6^1Dx̉ KAC1a@1 pc_jbpX Q JEgz8T 8+zSt>ڑ!~ nqWݞpqo8﷍b`VNM(S^TF֍kیrP}U\ L{?<92O?mouo/qw-3nk7y4c0D PC1 ?(M~۩<+ N8d{u Ǎ̈`h^+*QTy^yWч;2JP^5SKo$AWA^>ŏ֢>TjsvgS6:40N6}+16d8xR0ng? klώF&6/:^mo#{LxvV]ݯ0G&nl@DQ/SmR\dҌYM_̜|6T9p㋑Į&|/2:xbTZIAեGI1]A|3 \5^p@{XOw:»‰vo ?<ʱΩ PAa̝ # LwYq%_UyˈM{unDcUd3:{bd2#HW!tt%VEN5\!INH ym1< ֧ވ'q9ڍ+b+CcA]7 (j#bIT GqhSEՇcy{I2EZ`󋔞MUߙq+U=pA_7'Z1x[3CX_i(o맻 ~i\ CL&c]A[WPaeHv;:}8 άazz; y3{f̧niτ2|/sqzyp;Dnc.jr1V(wv0F^9C8yh/ g1>q [c4+,~Ǖ;sW!! B )i1?q BϦ݇Eqb$qOًįv)pR^?85/[F{|0Kedf˨SJu.Qj%1ad}C$3,!0-\a`MR"nz 2`0Zk]j͚sS[V[N(ȕVw422!$?4`?UۢDx)|m:~Ja6qU!Di& ,Vn/]O{vHuH )j߳'){dXw1~̃E՘= & $ >3c7eWY!/)X1ЏIrjnSӽ0Ϝ8}yJVZϘ8MJ :}n܌O2,-&!Wժ$SϤm{Vk`zϝgZp5+a@//j.c숞岐+s*>3}[V&NY9ޤkƒ[![g; AcԆɅ20-Rr3%D8'z6S==8$|ˋAQ)&f_) ϼ ^d)ԪVI/\$ 9ɂf"]d?j+aHyMT+D8b} kU};`Z%(5KjT g˓?EKqcV/Ǩ)jհj57w^+~Y!5]O-{^|ˊ,U'׮&݀ .VU;D="Q = m}}CEpiKI١ORxk LꌉB[7i@`FVORLMπ$# {ӿ>'zwY@b͓nr.}@a4&.Ē|!L6P#dMVXdc!l]DM|tx7v6;s W| :*t|& zb!esc д)L?|?to%ͨ(bF.V}+hE m:bh1chAʋ;4b8El; _{iZ H*y!vړ{zwm#jмB[rޝ'e}|A~%~3ӔmfxO4iSg #9%]>yN<;?XohjSCJߊ-ډ A$hh~\-,1]Ӻ0Jt4j\@ jJ/CaC+p8u\-l`W8+0PJ@IDATjD wN^)ˉ.Pc3P_:ڹK>^rΛE8̃Z!PG!TZ8>3gM6[1{{a.}(-ajT?pd<]!/'ĂD,XGVSr̻q-1)%lW@fzV$qHcuEMh6/o-"|uKBt޿f*dܫU}Ss CǕ=O$ ZOAXTqL|9ѱy]y> ѷhNk{'GSP#AafQ:4#z~#@iJӯSN,e, g) Y2FS͛ (eo6oq%tN09M:c9/WG3?~;ˡPA5u󗅙9DQ„ 3t;Azzu_J Ns])lwl^4~dPn @hMݲ0>2;3(p 3q{շp}^CA@N$:h^x/üX-WO|{.i0qNuԔij(d}BU&N!̽Bd\]<6nPH>tVw+%Fݤ50Wy?K%8p5ډi[G TOGbu/< ѳij27-ɂ;rAjIn#6>%}BI1px@GzAy[m!U*noʹԋta^G_4mf?9w uTRSni7QұtMcj.6C.uڲ\r9_$ȓu^[oݗR5$9ΪVa}9W V mGAѽs^1@QbA_ޡ޻ C7+_`2L2-1H+: w0])j)_Q{9q>fԔWYVS;uK``WnPPLp@[[#EY^{ZԞdO~Y+um-RK8fG{Zcx!iaUSx ~WӨ1htDM0<$|UZEAcL8&_?y}#n[Ex:z >Ɛ0b*E&w{3c?ǯ6DVKKK!>`8 , W=W+hT{^D3\uk.E]YͼvULD(U#lP ꜽ=UuMF[&,:%7;31;jԑnS;My 3):pK-ȘSV.}#&M&G_NSy9, B@  L4*myaimkG|kj>7{Dn.ĭe"X^ej}ՋqxU!uC[N*IpXN>5Ԯ. =$mN ӧ,Ps;>Fjc}d>vdXєN)˹ oMlH@7/#.+#az1>шUapgŝm4_҇'SsrvEZקX'mޣf 8׈usN}\Qj|R`eC>exwB?mI;;p7;%p#8FJ9.]K4m0Ynf_NFi\ϯђt(e> ޯ_[j^Gc Ti7n-U{n9aSP6Hs;ȐjTS&l  O_ n\v5 lξҥh6HSv^ N3Wm~Ά9ٌ4ƈg2E'`?-Pȸ%hh?OGi4 F1;aǺ*r /\_ zGJSu- ՗Os6UQh4GP{[Uj{p>qǪKdbHWJ΀')QLǖ|#2l4` Ojb~,99j' H6$iŇȝ2~B˷-XT1R{wMecڹi׮\Iƛ(4u'ﲾxṂQzGoyq0>Ӕ?(pߍMu("RUr,&˲HE30bBL;zX[L:ƕ; ~;hODlZO7M+6*&1To[鮻~}3HmhFa4F/عq-dI n[4OtJAykvׯ_cof禃@X4#2x9SF)_>Qx]|KCg DFD(]Fm%~#RR!9𒠹y}1,19E}3@9Ut9kUWuGdt 5acٖI9N0 Y@l\8s*{>UzΤU+U@h߾P#B7e"Z;N+_t3-WL[ QvqEc6ԍYmXhq z4npW/ݬ(@ NQnѺ|;^1r h `R.CM<_ Z:Ug~czyθUk?Wa3i+׏=4y-W/^G+t E5۔IS؊ Μ9p|\l,7/'$j0#Q<>!&#UQxt= kЇ7&*BSƹK2$:t"^Ln D'r!):%;ȸqbN2d8-Ǿ KzŎC є> : 02Yɑ 7Y 7%lf?9M+X) .s8ynО}d*-74CޖJpסt` v`LިʛK`*wHIخcStʅ|cSgսi2AZ%l`IX (_ pb}{ThICLjX*40^_!;jM YT8Έ{bDƵĚ⯬dU*ՙ bն( ƘnA_zPmTp}pm>>[1KDo` aQqbh 1[&LU^d1?Dhz!]PC- p2gƵ Hݢ<HL˶hZ=Se˂ Ahvp*q!qBF7 ίj4{X}{vWc 陙&fg>h S1^&5F"MG47D:媶ոkؿ#lO]G9 9^>a);~ygb eV^^ajO''%dff|2m\ΖTQA!CKBqG 法`JR{I>&'ಁ;4@8tc背6VEBNyTdMK$ Aw&[&b6=OJa#.HI*$ʒW{vh'd:hA@ .$̪ tAbx϶WQt:Q -D}D*22$ 2v;2T0VgȧGVջ|o> YO! !8nag$ݬэ*J &+caQBX4+)l1)0ˈ¸"2>9-f6O0v+Q҆'2pD?BҢbԵLh9V&իۤY3_ x*b!+;+##--)ƍ$l흗v!DgefMyş' z\D8?>>e[bfJrbbc1QGEATSeޞoL5LOO~-ә,aC1 ^ GpGc@ 5N[{l+oM EyN /L^uE3{Դnuq%P'LMƾb[18Ybs 1k/d\>Idݎzc!TmR]ѸN5\oICQ`mE{r_y)(.fs߹ަQmws.AK7ʷoVG6"y}dm"?'@Aa0KKI#Mщ)lPpNH Yܪ,h\C, 5v=lKjhYΕD_l-|/{pJ5!7%Lɬq W'cgs<^J iHäqGS-k '+CS&Qeo|>^*1%hNQd3Iom5<Y"O.4&S5z}1ҽXOtj &Sis^ kU걏8~沈x'ԅU"7cřڅD&D]d UԚhd_r}KACi5J̤i_8;اL{7y2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS ҅!<&x}Eo-6+4S,7[(p)]ElO/|p(5F,%Dkh[he a (T}uw ޹5V=QtQXP _P00  0s{_lVUA+ x\jEs}D8s*`c w+U18fz:5v4՛Eisen8xdXw1~̃ѤE#>Fc%lp— qSz7%܈ńqO!4nJb<Խ8t37[ʺhƌ#g6*Pɶoi+Jăz7)KK,o cƏ#{}HJK\ւiׂVtl)R6 ns=LlZ4rnKe* G6oIψ8t̎.\IYCiN_-1XMNBW܄A\ef2?mdbIz:<~Vaзg. l`| loٮ|\y:}~ì+GQFUѵu#A ux}͎yaI7,)i/.n0"AKPwK_j84#Q[RbLBrYyMOҳh5ө -`)h(aFui7b@1Ϫ2X7럙:ih7 >eCáޓt",O}JA&2qBQ&IC N)d1I!Nyl8SmU1Za{ʟ58( \JEᯖna^l0 Eo /J m|iOIcIA>aІLxQՂ# -O*(\|wjijŲUoRx73F"!8t\ttfb3`$ҵ;҇WWju]>=ܽTٿӳBz6N۪9^y׹eCk௓)iuz#x^2*ր-ťk428@"+(>`e&Eo ۬&w2pQw":x؏؇اq^#ai'Nl42u4msgMߤi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ߺWTE(L%lɳ|/5/a KxyNvlg)ؿ߄=|yQ^|P> :wG"YPU{9 Xw ~= 1LV(<}v诣ktczpeGQBy!uY^GӪ^sTbzJڈ)/gXzelXt qYRXGae71`bĀbB? JK4ݺDέ4?mYи 9s0u^K>W}2 %qL TSE=9 !ej< 53!06̺L C>-ň2_hAnzr;lTqk6}`WFԴ|R}C>$B8z5ǀ倫ZU+r!jRq&Ucj0Di鈩Adqgا##0+iMS$d&t=ə vL 0BU~-=+?Ky~͏u)' av4vo/~,A=xc:Uγ?j>Gk31pi0\vsbG?Ce7f &D6Ί'Np}Le/~O>ffҵMcW^b䄯!dh6mJSo,ZXkoL/zB"?%h8RcԪR$Ps{#2dv9Wd'LsYWف0$)Z\ȴ+8)ht'loۨaɦQTSr^H߹ͨhtZh2*𬭏#ޝdd5훉mBᐉ}uʶGbR  sx(h߼Lޖ&C1/,]qF&/,@_ߨ]L :MW̮2#4qpOj9ヷޘ>ÞMnuѧ 1΃q29*vہUu)NixFp"~bmr f J( ш4~P]?̵~&\ -B[Q5Lm܁#31 FH"Sͭ wZz]ׄs!ZAFeVv}7"^iRWd%z;|쌘|؆Le!ZD]$zTvr8-̻t:1@(:bTӇ2I3k#'.n~J3?E4ukGO;6}ABvs;,rϳ9Nc;z]~2dgDYM--t&A}\QAg-X AYw疢s Zi٩Eq33I{3@Đ:x/,A<ظN P 0vO35|./v̻91"|d*71:G6S.v~D-cNK :xj3ȐCaf?뤐CdrH;lPOY9ˤlA$BOGmHr>GFN*7[ pqM=k!ێ`'|m':6̓">^{5[#4Y>a:LsCqdg}|=x-QjA)h>=:4#ׇ$ oKr$ i KE7^AZϙ绿{v 36Ap5$]E2µo*Ue+NIS(Nmu47sEW86;0 ?^)cb4n.9#1) if}QѾE}([fy4xzxKGvh>gߎz^DKƐӘ^ja[3bH?\sv怦KZh9:l ͫӵh U*2=j| h~wmUhdt߱ eäy^*Ƽ>#fsSbꉫ;5M$v=Ie V))0'$O&k!܅aNE8Bd!a ňg4.#w]M0i۳ힸ2>,GN٥`1ijG?ǑU^A} ʅ4W\]b6N3 4H6q)M?(ןvojt).o0랷/NI~9iݾ'_rsj<AV:L w'LaH3?Ϡ6 BI\ա(;񲆵!&T]ZA@{\:0 *|Y bs5 'wH`BĀ792---61%M3q>sSg^҄_Y yIq Y 9ijfs~ty?k&0ǡ\&@Шv58ҧ3򾄁=&3s,]CjQȃ>q>1aKpX3%\lp8Y}Oe0uy 5mֻ j0%%Km;YE/A}. kW0 {]2o'@[MVWE}мX3j&0btZ*b< ]cӻr_Ύ.ײS66R;R8F INJ<ֲa-6*@ƃJR!s  9#%.$V>H/hWs8:aZdvDEᏰ?˳0WU=m);x⵩ޟ)Dāȱ'XD tDZfԑۿrV8BY#<~LiS} BDӺ)_a 7okg)OXo?D1gޝ)Gs-Ws@W/fKӧq` c#ޞ&7VjDYhN.zo0׏{w.GFA#>.joF;8~]{X 06 9 :`c-" Ќp_E'h)\(Xy/R=/3ϔ-^8fibf ?Ҥm0ܵu#)0. sKAZ0/K^/ wmꖇ+pwG k#ʝ{XObt~ABe)M?yYYhcgNZ\Q25jSKx~A8 Qz,urdIS } <_ L'>\H 2懡pC !v3za,;~<ѿk&!zdžu~ݨZ jíy:'N*M>ƀ C{s  ~!80#2}ޝ>Ojh.# |we jUŘDk7tZm&սaFPÆ É@ZA: M &o SqoѴǹ ˬ1nv?ڬdJ}a+K^sD4Rp9|8.(!#\\-w~#==viN#5Pt)\` W $3¥1)P((Ow1ɸX:(AC}W .n1Z^T R[\L((%B6@'VBfؘ-IkL Z xK BP/3o][Yn>؄BaÑoaJPoa1!МbC(q͈ E?=zhʭ6JD|ߗJa!wBf6J$QE@"Kn;Of %pJH0@QgO)-jYUmVGl}X&쒙{qϐ^ Qa3lbvl\w g&` p9כV-411<b(h0H$n+^9Hf/ɂ+׀|1ktԱO)d|*<8Pvx֞QD6Aؼy:.ߏRPTmRp}{r}zMV;aA `_`0 pѴ`@Sok~_F}.r0ŁؓDf9AwRWmTX3^4n6jb1M^,W%}=4SQ.AAy%;\1>)t,d&( }a#0Kq}7(|eQRJarms-]}7eοZ;|Yzhڮ`FW|:Ātf1)\5j ?bi_?4" |1}r=aVlAq߉Hhkh\gpTjE|Zѫ _Eo C5?d6w6;}VGX;l >McPÙ %zj^2)l(fzP)l0KV̟ 9%6Aqc0gn!!/e9W.{:_J;&6,P])~HB, Gܛۢ]f6o^6LhH<xY@䴴ڽsMJ{E^sޕV`>(!bh9\, ~:7Өeaaqa cF@2Ze )67p_fK,O,Ox?!ĿHKOK=|>ġ=ބ2B \7(ptkE֜@ :< O7#nqdh޶6[_]ƥٹ.aS Iur8ϑ8Bq `Jp]><`oȤ) ss6x&}d/z> \?vkf;C%<'O[=WY>.N68|a7^+afèPM( bgd3sYNK`҇[M5ޠ"Bш@Pg1{HU|e8߮p C%`p~yQsK3d܍~kƀ `\wHL : A*fA}l!㇅ 2b p€?2Fsid I>4Ʌ7DWAqP†:sQ ^7E1{?/Aͣ8ԺB鉠hK3̽P0!`c2EYb( "Q}y6~p0w8]|#AfMi0.zVQP^EwdT]w%8l|gҨMf L\*~|wΡ0Чњk}5~ o.whVrA*J(W0$s& 6T3?@.î>d”oU9‚ˤbx/ H'pz=k?9ԵYNO1zΪjLuPԩ^Y޲†Z,^\oqW=Xr0I~;& 6T@;z{jc}|j!*#zЅHSZp,˼)"z&}x ެC6Z1:v#" 6>=ワ;Od,Ba?1׭֌wVPę'Ҕ:]r'v7c}޸X{Y.p`?cgļĶ}((qnN%o.*{DU[,"Z#ΞKZ47_QX,ÙDjM7c m?\] |{跢u¬Yl)tIh:%XwM3dĸ uy6 ѵs ̣q~H \[b6/Cڎ>nhO۝TX>I2V3oڬP@6gd=19M{uj}&q> b(QemHޢn*gɇS۰:v`ݮk]Z7zˇ?5"&z.ckBx?$֎m>fMxUԩ#hV@88xnt_ fkZWn\G ؼh߼CO ?,X%v<& )1fXh1by+؇nN'L\we; !y+E"[6[5^IDTWb:LcE1`sQFN@IDATiP_[f\q[t_M:kӢ^#aU|+zKtnݤ޹U#M\5S9J{^Y5Ll>d]{I`%^}@9gAmФnu[&_`>AKzi=ExKxC9\/+z~xYFЍz#1)U!׊6MiA t7w_wֶX[pxٻERJxpVMTPV<2kѣC3q/v o|5GTTN…5_LCM'ȣ^1|\xbp˃`Nשfỹ/?nPxvu:Ԍ;C/ZG`kGqtCKOK;u&!_ө?nm&OS[6)K M,]S\1i.hp-8r"^ ] ʅ4]Jh8(Z.HvmnςY⒃us$֧7GA_/P6ac_{ :w.>Z@P20QpofGHV \l#pQ2H(<@WQp\ѷVBոWiyYZ>`گRK/M|IߤPRΜOIRREh4BRB!bb{b@S8s|+˅gOOԫ!X$k!99mLZfɸ8ү|%~χ/tff&\1E(@nUo^~YCA=MA(^mPxr-7>=f@2=>xBkI;f[[+MA8&m V32Mk9?%=w:,wsҤi@6^q (կ r8lMTWuh.j)(3GDYh+.E!=/Й]Д'K 'zG ~Qi%4|Ef7|'M?#^@S@@Q}=y5Ybs\Z%=4,|j^EGs8LٙN Jee |(, ܬ5O `gmk㧯noZla;i!d)1tq V$OX8(fxmg #n%bOƋ<5b3. :@YTݎMنs)pLcbٰaV 8*ҝN',G*Ze3x-hSy`<ٿk;լߩưҤLpH ]@D`5'A0Cbp0 l^ݢ{(;\Lo9]*ƒZرa46F9co@[У}3#ښkMKt?bA)^{dHIKU+32ML{j=[{wBGN}GOjG (-63ζqy+:f M01+9 O >7  x#T&p4Nq2t`s9 }#VfG FO:uotj.ļQ1P6>1I?C`Ǚ檈&r:5AE\ؿ /?2XvuFD ok|`^| pα]͕L(dAV=8}.}GOx{ʱM*֬t^UFr3,foU1hp;Rxo٪cw`.7n7kVNm#>QvHmĵ]Z of/OMWN 'O"&vE˭,y7n6h3aXoubƛfTd,ʄ"#+Yؾ8;s^|?tYƹ˅פ nYe .zsҙҟb>,=A*z~~Ar`hnB1{{wo? T IL\G 4[!GЫ]۰a=Jܿ[k!%87ʯ0ڮLGa ҍ#^Y{ uެ'00e|䕴P)lg%lpa`!AAmB)"$Nٱ9"lq+4vjrX;ĊGfBGVݱ$Za|J(_V`R56wQr1r5rdFM\=?wZT*&^^\޲,zT cw\zG?,B *~Jfz̮u;DۦuĂ[]F$>vF[Il '@Һ5"!/ȁ>^\x@ҊO@fP I[=B(AةВw &ծzQs'3ϡc}qfqx-fpskPQֻb17zj{&iÙdfA4uEFȇo"ȡU`]):(-+X۝ d'#M}k FjSŬ^mݚ]\ٶhy5Yd2,C'V$Al00<„I=tlv Si[~}ich2iwnkq*aK€' 6]R/r m=bӒ,8 }цq*rK rL"!)YLsм0դpWDV̺n~)l=-j Ka`+:ɓS|f">d?_QRyu\RiT↫ډvɬ~&2o:/"ԴdЊa JS8#d-O?%Yq"l yiou K aߔAZV"|Fhȁ]ےp5vXl1pgz,P-qz9wA=>cZ ҋSϡ |l?@ ,{cEDt|P+Nw,¸1"rBM?vc[{҂q-|4 49Ik ֍pL i2vہש*'8wF]PC=JZؕ&5D'[8pah& ƖR3ixZ6PeNƸ8!]7'}=_~y4AfQ^~I |'<4&Sm̰- `jsoO+MuoDߓ{!poҫ@bmJ*ݫTX'puJ@L'A wCqkQO >X ,Ϟq~CF!C)aP͏9m'?ğءZ?z`^~#Fӓ54Y}Xd!Z^ZoWk".M?$:XOThB2NX22qYՑ֨.G>m5i 7@}muԂ^?~a0/J`@թEfV[X 2o`w>!$VU+eD:v\]3]Rp0Dh[hP41iR]g "$3X֓`4S=|(eu]Tešcqc[y]Ftiq( ܃ |hh'胻2 +l'SϞ³_L>6j,vY%hCGѿ#7z\i Emt=7R/ŏ H95-&#{+$\02҄'Mo8Ly1֝ ]ƪ{%숶dhcO_w璤P k"YLwpBh++B0=&\[!x)@з4p1}>3|wUМ~W6ծ&͟.C\xԈGfnLewaл\ڤnuN r)+'-5g[4"9iUPO?Z+ Ej>M㢞՝{#"'p4F!;dR)h5,T5f KPe(l E0g!_M|SR ·j W0YS]/Q{ sh/$m>6nR+vž2rMx$I0uB\ *&ےBVUkGToFu|&~1[Ѝ}} GFs}cRУxY}uNǟxٻv=V*ʁQ>}dZ!턹`Tm)H዆6ḋ?"Uĝ}벢?Q@p&RdJ 4b/ORx73nDpⷥ١畬2X~[NFY}-Lj1>ijbky7M,B`DV#]rntQ\nmZ:?DžsG ZZ̜R]9ԭ]Sr^qQP̂:T7|0'&TɬQU}5>K >ܪY>ׅs35…:[ .р4"+pZdfCA7B M"Qix[G7:+xA4۹ ^N,k`1j'ْ0:6.Іn|o 6+>Vus%oÎCb喽!N=؋iA#w$ nLPdȵtiG47H? {Lwʈ[7f;̪N>Sryx]HGgࡀ]?1AMLD(T:.s7|Hb6Twՙwwם@_ _MGIP9elndGjD!lQmE"XI?Dw'Ԥ0Z*@NBWk(AfO>U)pD߆NMs QYfVL)dzP@Z'{Ux/#nx4֫Hـy42k!4Yd$v~|cYTşXnO~m|nn}e^(9A?&P vVj՜2ѿNO8tl7.;~jɄlstnꭏ2P|jɦxZ$<OR%R(hr1Ll@x7̌MH"ob\:px?^"ySv+RL#(;,kB>0tBؐpBi"rxk/ H."rc $Եkb5}B*=%J>t }Ii:cb7\.^? > ڒ&G_?-j\oWڀ;+%Of"LXX2V2V8Gz!21wHկ[k1SbT艳1]-l x7^A 'asM?-r/@νGLP`^I348_!zthh69h O s.0ta;}h pZ>\䁏 ^~e@0/oK֋: Cߔvç2&D n%r8.edjyX,]ꞻj3Ugu,!$\*>y8ϳ94rXntMӄ~!Dr-Y,OFs5ُg`Ke Ea Ծuzj ym^c 21XR1ĪO &S; TjW(R3^VMSF#BW_ŵA$sG INiwMvek2kЊ0>dlߚɺ/ Lc]})|[_yq ÇA}ه]c Nl#b6ةξumOw윉DhrT9V 8CʵB7 $<NXRX.۵ q9|,:2!OV2uy7,Բ9Q WvzbD%Voz"Zu"óS6ǣ!̓hӢ5ҾOɀqp9b[DYDNr^Z70 ⟮m PҷL*_=83*>y!<aĔymbĀb얉€Eud]F'\LӢqm:-L5C״p&s!ɍ c=vhXq,9t~Njzf,WRk'ƒ=QO>XNy7K%p\(G `h6FO0(1|+B2s^^䔿Z#KF^+6>"2>׌BC8n߹sL A}kfW 6i"f `f6 1D5Rmc$o~x^CzC\4^FӕSYz7yK MҙCӃ`5x /$'4D4G*SiDDlƗ噄dYɱS|S`O~ڴwkX\H-fǐP1`Zu{pfjf_:cn{T:+ ov~gõffx3 l7Up@1&¶)pY #sxtPʁ̤Pr"D!C&0GQ,l`_p`HkU(c|@~Q4}YZo)֞U9{/ݎS9UfV|SP鿁@͍e@2j8T~NM!3L_2.},hZŤW/)ƽzq&l3"]%h/6E?9ۯKT.0̐Ɩ2ٸƫŌy+ )rJ`>OAw7(q2[6@D ^@^ן!Nѝ(om_aߤu~ŬwP{?:u1> d-gWOU(dp4U *_Oٰ`;{E@^ }v`cL46o'&J8mEva&BCގl : <`+*po䰱Q,I{.>vu Vw#w;{с=gXEg ا;aY =b\TS/[ZSA\c60ānn*8`vܯv&ڎ|Z,^Q^MN9vL͆mob1p;ЭxJ!d4AQba^3I1tOz׮;Ʋ!V!1rA{X}ڈ+.v hb7"QmM&S>vVCd"ϡ~{_>Fuq8lQ.%Bظq_4\!ld}cܡu]|sGFGkQ.~{xl\Q+ 013+ )p[W#dƵ&KrDo 27sj07 ٶ_ +b;_!4FLj <=>1ڛD 6#վt7;]Z{uhCYԠBojCޮ _&|)scԼ+!@_uLW~}Ԣu1 |o|pwvFCݴY>)Q>Gw>gSUkRx6Z AngI*h-,SI:6uKpS|^',0{֧ M{-gydTTmaM>wW61ק0}4\"|}6h\U+e6r|G4B f=ǞS?Ϻyutmu/ &ZypPX=Gţ!0P2֞2$/L.r02aȜ/9Coe y!HS@iRW%dec^SPBG6+ pՕ&j}ctH;PSsgU 84yoAKNMd79,M1S)߉ 1KKFrS9fG! oK3A+7h#S0IHJf&f2iɠ5ڹeCdo(v:&srDTE\F la9Q1ƲرݽwTh"OJiSPB)"o)؝sޟy4>L="'xC:dj吟Fmpg1q`9FT2T1$}$^>?*e`Sv{8ͪ 09_+b©$;|N/}S4WH;\svYR؅͡RKaLjBh^IV\^vAhj$ ,Ak\&M7BЛc1h$,sStH&LBZ6!ɼGa09_ըMjhFC&\胝p=`dxYU 4kY:f$J5Z3Z)9a09J69 @ OI&Z#SW\N޳;j*f͜wF9+1G4W\:eӜ:k"vN_'`@w:gp4SA 2-`pq">!6<0YXE$ݖNEDd*Wm/dR#*4ߡ\X{ZC#3t2wPlf&T 9 )lDT1pU"BÜh:U0{6qi`i7]Կ20CrjqYÚ^$ fYAPPF fv1Aߋ뷪kKn޶C6Ģm%pu7L (aC4gA֗vp.yrʖ%V͚|s}zJ4A4=%f!)zù7z.fRqx+A5rRKMǺW/S#R:2"[QB! [pE O|DS5Me`gp8? jCiXص_ޯgv ]ԌPf ľ]eԩ0j8ٴY%SϞ%Q4}֛gU_!C#"FokW:APj09CٷY=&;}0h#*C-W8@S|M֐O_F S7MRuƣ9_g ɷC?s஼'BjǏQ&̕NBRMO]]"ޚ2M1ѥ#Z~[KѧK+qlk~~Htn&ѳS3X΂sQnn\J~VÐ̽@/3 lOC0 8/m2rpo$MeԽܠNH>ʖ)S.{p>~0$S'D7Xa@s $`$QͳRTOO —γ`O~eaM(\sΏL. (\  E1*QX 4iq6GzzP^cxW_K48jXo<}&-(ҧۂvD|k'b cj5g^?hUŊo 4p/31۝ +jBs;g7V J L(0K̈ØkΗG6F'Z4 틊>i,+ &c'pԼkb0M 2@Ξ=qnƵoiLfv>gQt(/-qoӪe&&ӳ7tC1 >BIRpp>fjWڜMN˕9. rVl<^[GII L6)fPcn&L %l(HffWR/qECoy[L(B pG;\1>3l)l0A1 &} Hd,I/ cphߤFM7ZRD aҹo9.޾qٿ2_i=pUbXx`=c,7kV#^Ao QcG)[#Q]}1k Ӑ7 sͤ3~N2]t;4V 6f2X(%p Aaۤ2 ~wxh```PB0`&twVaH8vp~`bĀ70DŽm)5ioFcg`tQmZKǝXoÈnt:t2G>:&N%C1eة/+cA:98mb&?ŢOaҬ6p7:zwϫغIsFZ&E?`/]uNgpX'_%S<̨H>2Pb}(U`V^oSMyA]47-;ʙ`mpGT,g=uSϼV1y׍sgnq05@ &򋁑{qz#L?͚ж~Hӵn[hh`|=w< =YYIt}q:%}r|9#p24P \ϢmkV~PBµ ӧ#TMgVx&c a;?gAryH2D%2 J0P>&Jك VGмyrW%)q~=BU._V{dDV$,C>av )#E4]?cNaͨ)1 \?(t0- N&EfS@, Aㅩ"ժ[oт =-I{mI}#:V97 y*`S"Zj_ȁz[A: ;sgRRsOs[Ԭ^CF `G/> ȂrCwn۹i>tVi Qkλj2 &}9% &€``uAUժ.F1nb$[S:u6dS)z1rlQBӯZK>]jUM߹BqMꢡP 4Cv;8X{zx 끶^^6̚5+`]arPΎ{;S=&U2]x)#'Gp6?NʑG_ `ʦJc8 |քe;Umȉ Vvq\d tތJhU"T|1u8Ugo ?'RCnǏHڎu18vd %]#KP$L(]I>ޠ֩qm>;6وcqzaSͲ9% zuޞ< eT H9ǠaY\|GJl;-LDS3#EAyߩ6M 2ϱ:L@=.2( aQvv#Cf 6x # c(cS--~uSͱ7胈P4BCAP2FA\K\70MҨbfYu3p (ܫukP _vp^&#M>yj &U~~;ˁaN )4U{ec]O xcƜ|kԀVVB0hԴ?|ʔ q$5iJxOElq*nAbu }cL vϺ$ [ #֨/>FtW.} ^sNGN~[*F_JАoM w֍!sqoG7rCH`+?`! :h1u۰+q}72 3I)xg'kmAm!oN~vFEM~#ÜfeYC@IDATP|Urv:_ZeUB͏T6}/X#+A9-0_^)0 ZAWBԻ9,`DN r( sAla0&*AC}N yO,Q tdᢢEwdTq]3v\!4j7uø30p~\;P V`b@1ĺ|70c7m0tϩ3XaK@vX.TЉ?Xqm hnV։-6rt}Hl Yks`îFzhVԨD`=؆3 ;gIy /ܴq`Mj#ԋGHRãxW/]8-6>B nRAͻhypU}髴Ou}aj}B#>\Cf0[bCͣua5飰,>82 \^xZx=c9umw9'*SB3&L F<UPb럽@!$ޛ""X'=lD=)곋 QD; @HϽ$6777&)ΙئKNOEhO_gDMuf]|.XS:~cH0zlk~'ZG͎%03q+yBP>6J#JRy5fZ;I٦DoWxG K1Xk(zOh{mțܶbfϱxpR~,ԇM M܆3W{0Ǣs`0/ 愻wEpΝb6dk0`̏Mvj^1 X/@&PgC obXa̩Q[TK5>3񡏛Ly;Ot~X…< %t :@](@fˢ þPgއh#*`dEfLye/XOxq0l:|<&q~hTL;LIW}//{{D|56lo iH<zWOLr@/ļl_}epfbJ^Fzy"hǵxʒc?eM6슗3SH/y,3fٌ NeS]F&oDQn?Nc*c gӄ]b1M[^8WۿG,nmb;6?_q1936 ׻kJk8OE`3hӴeܧKx Gj|7z˹%K2Z7 {wjN6o(-*}8=p?47l~Թ٦ {9oGϭ>멞*@RsׇQ& /bb_D/pXf:\2ʶ7crwESD'ZppFzWf0o̅@s.2y./kuqmr==Kǟ蔫{ۢe4dd1рrY@1\N\< |a}L)',yz=z,99ě[؍̩t|n薻Qɍ^$zn%`AҕP1bNr%r`v%e݈ wpOKxE=:y,d>Oz'p@IRCU:h|ozZ~,R% jTb>s>Y_r;SQUUTV\x! w#ky5:3Ķ3'&Z'dȜȐJ,7i؀^Ϗf^X,_̈f<55cM H֞y#n|1:Z7]ȍi{SDw,c:G^߉pHp ^"!xvTg`˳Kmr6uOTbc+Mz9խM}̈'@(9dkiygӢp%`G@3lyjMhWCYY=ΡٿYJss7gpK2VOTrdRtO] aɣ)/8xcYG΁53郂L8j,5*knmտLNR+0ow~ ßVqƇ ]eTd:g]CbF2ʀAaVywyfbm`n)u^ebN e'5t:2.qZ_ɣSfl4+ts>oHO76L kԵݓ\˽L{Fao3\b\o:Bܡ+͊V91*cB/C 9vtCVs_vRUAjT@yx?Sl ^vyqa`߾yi0ЗdYYk\u{{bH5m?| DP櫦*D`FTiNC5::{H{ɉRmQުfy7:􎿍Uck=f}b|7)5ͣ=W9i*/ {ggO, Ba}{0%9FYVFRˬ酡~[CHZWm[t.)^Hef|37(ME/&Jc@෵{jե#|K1\ԥBlt8)gQ n] ]˷ёDlDԫCw؅XJ;Xx38.U7W焾HK HP6Pc~iwR:y&`<{KzԦIjk6Ӂcg8(eDd˾c"mqdX䙫(`u@_3RCƬyı {]Kzhx"qiT7(q4θY] #ޞzvFFcnhPP?{$BRp;lL JJJLYJ`0hxygAHPtXy9oHRRRs6* (RTVs/" Hٜq)vMdplAяxT gR$Km"}g/Ёg~Z4v `N{4dtA)k>bu~ػ#6 ώD[M^tGNmќeCӿ[L&M}rHaasnR- {T¸+]Z-hxL~\vĝ ww7ؽ- ^H0>5 +Ow.vߜ֩EF(zcfnra};8"BPRZ =qMze~2!cWMĶQbwmնX"l1OЭ5vNHGÝܹc5twwfAHd"J@ah03Pct\KixHOHXym mS:6s7}\M|<=yhe1F'U]EUB&"W~1Ͼcf2E&pi~,ޒ9a1)۹3qH&5lvgOyeK_Y@uupٸ5k`X!.3>#MԖզHbF43?QV?7'ѱ#gћ6=vqMԥW>>/$ 4k66ml6 =aNuiHf#2.1Q]`|-t76Zy?K- \{Y 48@6C…d+WPkfVM>p`^ğgs#Ҍ̬\y|!$7f}-KrH3$k9:vx䜡eeenܠn[JS #؜v1K. 1{ J;؛lzLa>N`s%wVLb6ܡUVD`BԔKh.O080+ a =4W-mc1OFiڲhoulf/z;NҡI1=C!$MˀrQJfahm8\)H,?*S vX-+w$mA@aՖc0Gx'2JM|d.2ƀ\av5̬udմB|8غxdE;9ubp;;B}&_ wKQ#fԶiz@ 9,J߼Ϸ|ԣUYҔwPQl9 1OW3X>l{3;TWW@ښ !9JU=.Eע+g`21{A 1c#u&E*6 xSӺ>oQ~ϘXV &pg2*g_Ó)dy^L-л>w2g|IЭod:y>6 P{}FV6I-,H=>'՞a ǃ\0.QWaym *еP[+HݱӉb{}] a&Cz*B~<>_WR@58=-S ;fns m%{R4@x;z=Iɧ|)yYTyC.5(6i:vp* Tb\Je)StUU8&.Qsa]䚯J`icv/-ͥ4a{VVM boݸ>%$sx*T  ~ 6q)Xn37tnҸZXfKc-ɹ5 'ԟ\Kxjx{[| 9ҺpHl(z ɹ¥'U*R!P$ ;`4@z?ѯ_O>-\KxsOjTl w?aE=/@z|@ ŧD} /;&q;,:z`O~VٳgP]e7gڊ#ʁ:v ig ξZY0#ؕo0y)WR1ٵG֑Ԫq`,$$#ٳn`|0Xeilp5cҋ_9v5=1UjÓ?`(=ab 6DU8>p-gnRB5nx'nan]E޴7$s<`k(.ޝZ=GT})**^!P#܊EWddDL-$5u6`pKo{7pB̭p8rhԔx 9-Ĩ6oC3<[ n[\blC?/qM^H)$b]/݆aۣUJsO_M!`(j3>dfv>z,@&<_[ii-KD^;\de_V Wѷ8,]1# G}oyﵧX]c,{do<{ֳ c\Ki*esAW#7߆7rq|`|öb?XB@!``쳵_|iىB"9Oro?x=niXd1~tRL 1kjxK (o*Q Z: pfoqW?ߘv0rl+{_u. TNH.VѽFhO@:1%1S{I)yy^d?=p7rWd4'ز|qWB%@)RT ̱XW+٫+juy %{ߑ \TædLĤZ(V׳pUʪkTkPW\C0bk0|G[Wk!&q]պ> !AWy+S]qhC1YNf#a!R`q8V=~okMu톒lT€^?E W@`;F$?zUULL XpNSusXE&1tѰaӳ 577VvHMܮ-|@X GEۮ"W( @Lb4*iUFMFL~p9.Ù l,\.: G$p-1alc9[Ew2o9(&?7ЮC~Ԁ! {uiHD@h<8THnD^0.'4X1NUePT e^`!d<بݪQoHJ6:Ti.ްFԍ: /ó{_/.nD_(?<A!b6vĞ{y e8LH[2tnLB:!:Ϟ &l:B6s!J[۩s;3jdˋcJ\0LxUV2\H8.9q@BerR\{k? 6`k0#wEؽ nd=~@z,䠣o>vB$ C{]>s!_c%r ų- ov*+ (B_ۦy%q$>Qı4f?[pz1g908Q#! ^E8KWh͎Ľ71nZap ONi@6:{g?KoÝ> 9kAMɟcYr--v|PeQ$2RUe/`X3~tMtpZ$OL 5үϾcv J6. UokwKS^ET,_2]-kU2#l&|铟bS[o^).}!b@\2_tUD`BԔ8 oG2X`z3$Ć\od+sˋq%f5@oh/JMˢXxWpC(U_Adžs*Th[EC[5&ᅿ%ٖLgXEf_p-?VGA~)BL*PS)u(_Ce~C'7χ:nL9V'v-&cSi 8NcT `308czf{yZp{yYxCиAhm|~؊-wd9];_hXߎtu-i?Rda$𮃧h Qҍ{J Dh횊AzQ.~2W};AsjLn1OtNs`-A'.t`Mv5lԑldo`7W QX]$,пD QQve-G);9@*ۉ @Ѻb)tT H8lS̿hnaQmv{sYD=4sn!̥};fs%厬ܗ]4v\eqY~B`l T;AWVo?H#u* S?_LBa)s.&/_N').005ҍ=lN9Esb5cA?EZ4f݄7u~ݢBs֐*ZxzoE#F@:AE [$dCG?7^ ~_C}b'vk `݊.IiCbhoI2pi?O$O x鉶2YrCe.}Pd ' @h;qO1`6տuΒM3˒+>  _VlؘTP!L+ƛԫ]-{عn \=p`MU̳N#|woa*8`-[x#\5W s]I6{o^IwPeC^|81/<`.eLwԃqol--&$.3YҝCٰNGegMK='++rF&}N_yq4Tx˘bJ! <pk񇱂]،}Dp`yK'ذ^k:eذ(Ơ(%u(mz9>;#]6T:>0և 5k ղ'mݸ/OxpLcpI8";ˁCRYˑAۈ@-vql&lB"^Gh[S Ǖ){oI\\Ua- $<(%ҹ4^-ѴhXD)?ldO La "ag7~_y6G;Jד/k9"Co3cш1n$8A$\1/[׶+?җtLZ*)Ϫx^UaKbx"as{;W ap% w5\U ޽08hd4 Ƈ]K! ̊+ԣuZ.qkArmKaB|lѲ5=Vx4jJ$A=|mVW\aO~'-}` +->\b|84^Eܾ7 e-Khefr"Z84{ĒT3 o'i\ !q,Ro"Hf"wQn=ߝvIm~T{ę~V.@ LP1X `v3/ߩm6Ϗ?]KE'O{۝DԨ;ع$|pvF'*M6֌|'Bgm79"2 l;*h `lF}>?}\U0X H\㣸Ʃbo⨬3E5[Ҏ9N5UWrf G[4[ۥ pk$II5dgB@!D&D7yZKb6`4j9Œx@|loŁ{ŁH׺|iq%!q͍ܫ~8g#]8>\QLd&rxc Lq(Osq>7Xrs?r ( kKY*^g)J>9.%֥[__?rWjQp9y%%Ljp{ӺO._nB \0qMnoq"a|9NOOkWÃb{Xߙ$N̕) @w7mU@/7)0gۢH!( D1"C@*-2C⼓Vx8AksEu>™0GA,v5>fD_٪<6n9Hb!1ql^y:`6A.`0%*%-ϼm1ՑḖm光 ^~`U_E ]Wկ+/P=@)ZT~Je6vrP?iګ5$Opbֳ**B%.B!LCXv \ө̆kpP0` ,6}&~ߐ^ɚyt:Ʊ}?-eԧ|$!%EpP9Q9`wcӿyⓏ}hfNNN) NslB @ZHwn\Gj` p 8B,pt\ tl 䮆3Saf+u4=ij+ f^|CXz^?__/ jφ]Ybы:(@̅duӘP&,]1g#tz"͒{i+[EDK m&pkpQ;p|Hfv%h_-~sҚ =~&| ~ \S# ։`ËJ_ydWu1d]?cf1 ?l- o/@L *. *E`=m6L@/rD. FX|#Ѱy/0 ;mw&R F$0#I)WĢR9q"vf@Ix{8[+z5AlĹQXP(^3ĴZd;,ʜz<``Nƌd66>1CW_E+ _^y*?*l|2m㢦An5ܽb¸3-eĿlݩk{xrwSG`HɸrCqvo?ʕ cw)(Xj|058> R:LK~֝hڼ_?`=,bwmG,_c!3L _;QTqC2[, 8 ݕgXPC"R}S30ƹA0!;c??!Yp6!|yXTzð1o02yTs2m"?WP̬KT+Jcq'">Of#00Ѿ]LªSu 1>ƪFZVO3k5ӓ$_/ě\~M˟y`0meOK+i߆AO$f2k:%Aja֓ߙ9v<Xڟ Β1|bdDRxGw<>/@M \" 9w Zr녓 @*R(l#őLO;htJ0sdM?;jeFr3J,Ϙ,o{vF9~`~-KeIf#:G_6Z2ZJ;b `6 l3?VTE3.BgG#|/mx( ޔ}-YZ9w`AygyOq&=j:70W%sE _rdz_%*gE  fL~m ' o^8{KVasбE$>K 9؜y>Z6 nG]Ys:ԨVbGIȁj19k"2좶}pZ-N/k4"%XJӔgc&`ia<2ĄK0<[[N@?|"U1:rXz3 fA1-t>-F=>,Xf4€)r!d>ҸP@Z(o{|%c"Vb̙_Y |yDJ*p rq9_Gro; *4ܬ)pzE @se֍Gz^%ِy?)NI};zKƂـc@6B~Y}y1݈%!Mm޿opO"zD8lkCT.Yi`6¨Pv~ggz=Ԣ uNEx̂TLH&a C,օ ʄ20a&j޷"U g+{"[,,fo~B=+gu ǂi6x}y]Dy凿A`k,ְ@g ZyeH帐gLrA*k|%S`Z9MF>rr .R2ƏOyp! \yX&6De;D5%}@PB`0hޠ￷Iw0zMiNP=1l' ,=˕5 u(+A21e͍fu&kj  jH,q8:rEsˁ*`k ł&D0S^"l}J`~0;2թ״*Ee}픺PEFŁg`=&cEG|*s|`=kY?{tZx.ܗ0gH&Cޓ8ymy16pԅB@!P0be#?;",$G87rEQs>>ʕxnZ" '?`60 ztR2\;(X| AtZ9$aժ4+}R&hGA(rax'At3TZ-4+\PU*mkثJ.c/o!O,,o}aePcpeQ(WE5e ΈL>p@2Yƛ'|5]+PTy' k>eF}tc0iCdRfy? <+׍hDxEH.\ǨHuF@k>ƇK?6DG>G~ ZT3l` J7dg\zozm41#jJhDǴc{޺4=:lPkB@!P(B@ӡzlM`8pp>+~y9,ag. e+FF-hٿ{_ZG100&06Bl8*SB@!P( %ـ*MA"ܙd##jW8GY*V р{[x{8^3\ٶz6D}s%!%| crT( B@!`ڨF'e`TӞMڵXîEX @/=l:\gN99:"#`Kq,K\`2u d6l0( B@!P(*T®66!ծ,敿|ﴪy l?o;E39YWRO;oI&XP8xHfx*]t&%٨0*B@!P(Jm,Bd0|'yqx#|kQR  $G0`, 7FK!vy;rPF9@S( B@!pq6۩\#d =~-_੒)#E.TB␪rG)030$HTRũ B@!P$XQ`. /_ 9-vd0\TQ y!Q[!%p*WB@!P(X5Xbjl!f6IfgT̄l@J~FwyU9+ B@!puz lBw*62nqQ̆58F&iJd4&D0S^R6+FES* B@!`)w5է$6]я `GZ`ɡ<נRPL"lP( B"0!j@}Fl\TRX3OqƳ;OpH&eJP( B@!L,i yɉ|}n&(̤i6UEׯ0K[,'-_{*W@1  B@!P(CQ S߾^_D"xY-KT -ɩLGs6;+äsx RWQv B@!P(j  A˻am55_o/S)rm#E{-=VXǦOnU('p5B@!P(0^F?>~o6;'a3cH=7š:y,Tﴮbԫ_3n4/G9Pr^Q( B@!P\z?ш֦nucF7OU_{'=˷;c̤ B*z]!P( Bx4jJ:+fq㇆4 GֆV`Ք]o=pLO4U)Qĕd B@!P(W̗8lPlY^1#*FZ5AH O=ǀyeSCjB@!P( +␿ gh BߐG?;d]L`"f"f6G4H߳i&n4wӹa] B@!P(!^@͇_:opp"el;p?V>ndeP369fvBO\;F [1Q'[<Ȩ8EPP( B5e 4#frCհ{`6==#|tPdWN.\,\Vl9@W3 ҃&ie1u ukۤH&gVx<˷\Mxrrs)^"뼵hסS\ eM^G]4\; #Wn =-=ۃᙪ0iz/P( B@!PZT~%BJ6GHP-?3(5;i=4nԱe$\ɠl^H1Mױ]Z7/@A!ln{6=BH}ЩsIy+Xfؠ.򥌹PT7+B{"9;&Ojp1tMo8H˞GeMb'Ln?6B@!P(QJ~8u#v<&z󱻄Tt\?75 yL/H'$r şH[XEӃ6:L EZ\TO8O!AԶi dH'w 7i̤vƜMJq.v/ G֡+fAzޛ\7qfE eO4[^OHX곟7`YӰ~0My#khC4?g'ǃҜCjgItMǸ)r-[Iӧp|Y%V~#.8]+v* B@!P*T /h&xѝKm^}HJHdvS7eo,QePRfhz:gB "l,c UzQ-)n-آ!xQVhm}Ix]^ױeCٯI,o3^((Ow#3EfK^Y94zxjQ>N2JMˤwо4NT?P֍i[SHi m1Q4_kxt? 9>^ đ0'Fj-bvi'ruV8MVےn廌S8 B@!`Q);RYe [tǛR 1rPãiBaEZF=}wR؜v`. o.RYP[n}a?kM,s`W h$0#"ߋ;qCCUB3$}"ÂEqY6LHR z7vP챳&$9"o/.#K^;Qs4Q2rL8_`6,dž{M)hTLoߌӏm>?̚~5mB@!P(C@J1 Lt\ 9a; D`Aݠ$Pe;h)Y6$Kf $9>12ŭFa%2s& wҟkw1ipE^I54Իahmh'!PْP<-\]Ljak6 [#''?{3_e+̩Qٛr[)fjB@!P(ʈrA)e̢A~b~ߑxJy}R* gǁ' Ò&ϲ$BJcz6Yi`؍J#|PoA tP9t՞9$yz\P#azdRڧ#}r:vȋjtqQP¬Q ~Tb%N 33c&;K1+?XU"B@!P(j !8l %,6ZRKp#5 'TdI+c"  /OwZRem0CNNV"wHO!H8vĞa}; cjxc/VHpnf".N0Dw {\$ ,oZy V$i;`nBu${ԶEGΣle-04>ވ/G9翭4c4%?6B@!P(5 QS3b&/N-qy#W*Cz'.O>%% ~#ҵs*`c:&KB@!P(.EעppUծ±8+]=^1*R( B#鮺\ӑWB%"Q8|Uϲ!w4>%zwLx]Hc8f }p۽mGe8g\ B@!P(Bi?ulM ~R'TF֍ g4 Aqfح { :Dn&q E_0t`J*Aۊp*B@!P(GiSа>~P0K6$MJ9p3IT;nwu߭C ,iQЉ#h[A6:̒^Ԧi}5`u)/;hHw &]?.Do>vx_hp.F][7oRQW3m-E ~Ask"FC60#`~^N8 Eq|8(y+G",lKz÷3riv~^vr@!qfwBؽ- VF7.iQƨ B@!P(g/<`&x2=ȡPS^TC rPjZ&aő,XK<Ȍܪ/5 Kp4U|{q>;'Rph6΄ocuft{.bFAvnAF P!nzڦ`*ăթsWо#GvE[A xypmPIڸ3ZuE@?+'&FpKSFԍ7 ~f,ш~i_[%v]R *Yt r~]93:jkULP( @3̜ˤȒZBBa]St#c?Pjըh;IYETt!9m'R0ujQ9Գ?Ew4Ԣ:fTdulgq':4QT@GЎ4O;w-h!"pjӤ=Č9vn/̬\y| \c=)b*%D ѦGiBîXç%48pԯ[#s4;IH1dt:w5CM3qH i\55%F,usljB@!P8̬K)ief6)=3fUYjh0nZ.򮽤H%TL@]ȚRa#?_VJtw| $\\ɠiE[8e?`RnsW էmް^{kB3suyI+8(@H-~IfV"#NxgDq%]dg牨 rr΅b6kѨ,i=?yh46cQʥ승FRT(8bevrF% $<`vuBw훲={j, TJKmO&t`x鉶v_'fFw>TJ"؞\dcTIzz5v:% x&w޳Vr3Zـ΁]_+o7=;#r9V9La6鳧Q~2e$t[ ?R( QSƙuI~Ќ7qOfOL8.*S2ihފ,bHW,O݃UMs{aW7w&Olx ώ|/===<,[Qoe-_uxI5(n4yxErYS^ @:Ob5Hp٣kn螊003'Ҡ ]&w(͞}$FRdXxJAjɆ{/8$_Nlt]\\t;3Bnv!"(44Z6u; }kI0ȶBucKlBSHgM摡,E0&Oi ͒ԑR58. [eʠPD[-y bNy)̔|њL4G<|S#b3yk#D0:Y{{bR>YhBf=gޙ /n M/A:xjT?OLz!穮=n\ڶf94X0dskwUߟ=%=@l`׆^U+ PԜxkܿ(`iқBIB %=d=''1Ng>|g̨̳q,JZ+8op8#hRA;76Z#([|0&¶y潭 ?q^~@L++I3&}Kit!D򓸄kpkR0p''EX|oߋ8 m]H[HxBzk,?שHfiĜ7m+4[LP^9_o/',Vi<;)OdB Y\N3YqXM}gۣ_jG'q#4)1 e_nLh.zq}8YJN:?ܦw@X+V<++5fH"nWF{hx_Ri64KъG:KwJ{vS&hS)B1ş!/Mi8S{ޥ.IH3Rz '.& qN%i'(lmVϡ tG POUQZbT-ٱ eѵSu߆<_%"t%$A ҈RSd] L=б8]kbi"`L@3ҀalTʈ:e2Evd|hB+2U5$(h˓p`4*:2s4CN^U5ap?']0#.<—ƗYu:/ߏ$lhN1^9g͆W0֏J,p0zGW f9诗2Y-W$Jm{'H@Ț3poU5 ?~H*[!KBqiIR m~ Gʖ5<Q+ M %/8֋y?(WJ9P^Jj$h6$i奀QxG1o+: ԡ 0&@% 肆.lfz嗦[o *T9[!@㛚._avv)taz7j%&dR?WZKQ˲f?d#n)܎V͒<N(숄7m Vyٵty졞2MPFo}ZGa/,ӫu中kE`"f^`h!ަǽ \m#{$'ןs!-\ދ -&=ڣW^Yc:!| 6Bs& 3P&d\}ݪa>Pw侊qHAƖ}IRn׭ =.;AV$x#IvW` &K l1*?'zf9gn]"Xb4&3Ԩ"8Lû<& YJA??xqf>"6¯#?Hc. t*K¡Fd~JuvU>V oEL7/pfHy*XlS[pmi:%O,IkFϫLZF%sbVVgdL 0B&44$b]_dvuTGLHqd4Ɯƞz(+h8mj\WP $Va ~(T<_?a" Rcm`/3ĉ 4!^gp҈T%lM pΫ4IQ8M榞 Ύ;{} (jG}F-B6>,O;%Gxk`UJVk x +ͱ6W5 yX>goI:2'Li7Ә`L5A4وt[w+6,i"1ԍb?ΡqqPD.h HR5 44Mp"ePZ/""@FQ9$}Sܜ 0&@"OF髶.@i%IAZ +"^ 92( $H8Z 6ȌN*hw:؉' 8n[2%̸Y{ Ggko(*uL`L 0F&F}8M> j54_4>4v$XfC$|Z ]PAkPbT uS+6&bY\Vj]#v6Y_3&`ABRQua&F͆Q 9Ѕ:fCj5$ti)^AJIa;>`aRCY~&$k`L 0&P_`t[q_6.d'$pa4of]h2(Oł†?UzM&sTSTuL26~s_`L 4hA_e F'SPyh]AG0xI[FkYrK*ދ'ܔh" C[H{ӝhmi3 E9+`L 0&аQ&F.訟a>6g7HPY(iQTP\l&8r`L 0&@EhE†&p}zt*xJR{,VtU&i,|bRP#c^Qu^o5|k8 && /Iz51L 0&@}!P|H{E(ă&hqrpAA\IUrM)S֯{@LEդ>/lP/(6vcه!|4x9zI&: ;L 0&+jԈ;:u.m~:IWui#x]S=G݇%ɔp8T.i/8!a/Bd(ZV}0q1Rq`L 0&̛ޢ.c.0NsA4NxclڶE7oE8tzFgtN'Yq\ꌰAiHyT+ب*H6iHoI4=iDG{&ǻ\_uYCb}eL 0&i)ˈq1V>u@JBS|`e_(αMlxuJ0yt̔뜊Nu 5o6 li#DG!: 0&W$ c/:,Io!hH SzJ=#C-˽,BW//T=~kL 0&@Dի_޷uDXlfZ$4>TZk\r>Y+pw:؉kW =.`L 0Gn}{$ 4@V4y=p]߁re!&KsJU)oӘ`L 0&|([ZJs?| Q;{ UhiIذ"һYT k'!\`L 0& El=Lh0GNzI7V/mQ4.QO_$`L 0&Ѐ H*VDt %Hh6ڰѨ@>(r;&MrO֮ bSy;|Rqn܈е&pX5Ґ`L 0:L2C 0Y,CUl& )Vl'sl]ڵOuhHCyO~P=;j{&yqaTozBpȲhfZ;Ǘ}bǡh_h&xBv:žS.m˔ Ac(viлPHWH]=`L 0&@"@B4alh8b]ۮ}ybޒM_"HY+^ۣ ԆWol< veh$<}A,g/7$tYiײ𭢢~jwu.ڻo<ݖurL 0&`G}D2D<(d=G=-55=cK^|t  G2;v? peޅx[Ś>L53&`L \vz%E]Q;edBHPv4 9%I$h<wۏcaVrL 0&)4``j͢T/l$XjH s%=;bzCS蜾7f᨞cth"ڷjf#4_JI.H@xp4Ʋi3`ЃquC! 2#ݚ5YŰ.C  Otլ#^} =`L 0&@'@I}Bk2_IŒ !PdiQ߯̔Z 9b-JKa K:̠BS@S/#)Z0,b1ϜĒs8$ rχa Y|+N %?IDATC/GϹ{dDZ Zj ԗX7=+X7i02a*仲d.m|}rMwVK~TୋR3:{QR({\1S$TThlcL 0&>Lv>A!}npWf7a$o0Cu^M=5V\'=&h\Ua*%ģwޠi6R.m՜{-V| !✶9ZlDv@[jw CxVkCVm+ZI[L$(t2T-t_7jsC߶C+!ĠzhQ?h'̭]ΐph.x5Q6 Vn/~}?õ6O7K{NM~`w3;6 yWG>N0j|𧻓jWѱpA&`L 0_!0;2'.fyqs "G3ݾ=^ړpY{!d宱0>ЖEa OiK麏Fi^@a)P: 0yX*+Sy T(o.&cIKBW˲S+KaK,;a=p^4. ߭$P<}@=[/Jym+X͓?_$v?S*hY-g#dcH[J\ culTcL 0&PTV4]po%l"_yhE(2kڴhMqNy+|`*@BAy M FXFRtN}}8HKCKCE4 ,lԅQ62&`L*C*MN^ӂ ZM4|K~^ ,͇![}jIX(A'L 0&`9۞r6֟}MņJJL 0&`L \ٸ$!L 0&`LPhm6FLΩ8ǭ+15 #!0u**eb,upBӉz?`L 0_!e(.6_c,q;˪Ozi rnټT+GjpWփ.U^x[~^c6"c& TYaEHEt۾&TNYں?~ #T:92:*̍S 0&j*Whffq\ m&ر:kqVmmMÂU>.ߪԵtbSإǷ˶hy͒вDc8{+Դj媧 0B2nү`fOx5.ڎg4sާʽBRQ`L ,0r7.˺jsIN٫s_)*zu.][sW=GR|nUAB.[S*hPkڇӗ9a#!vP/uޱg~V/yL 0&(CvnݸG7PЮ_>]wkLoƫ:kxf~{n֮i;;7_Z'4vlGPzv (oo6q7$qq3vo%wвs⋅ymwIt.;vr=l}N~?NʾR,$w)ӇEv<ѾE3]Ճ`y}+iP=)yn^7׉a4&`L :ЪDxeV̧#w&_.\/BfNʥFx^F !ն=ڛ6|ۮ]ڵ'XxnwnŒ m?V1;E0բBYd6B cp>I>=̵F{ AyqGLf'>-lD[#>p}x^ԩq8ևp`L 032s]D~erkĵD+U*xou٫XggL 0&9Z SzSw8Hp{^T<3Llh*hmNC[ dv'0H@GS>4-e'L DMK+yF&Wjq]+׋7WF> \V"M!@Ž"6҄mhjZ'.9^̆TSz$i'5 *s*QRKŒJ%?zwSk_T] QQҰK2ׄS]xJ^34kDL 0&El=$lhZ9&lľcT\jLH66ًpMh+(RY!Bi*^xXT;'EnhD69OxL(}f.nrx7~:~ JhѬH0ާhu>,*[^HHQd˦ ^*JG,"@\2^TssҲ bջvsS?&N>/hYrئUץi/kw2=ھ} s0cu^QP(Lh9ck.|)Wd)lX3}Fi?Ȕ5;Q?L60  2")* {]>dw߻=3=+G,ݸG\i+L 0&@'O&P^Hzx~EE4 _E A5N['MڪS@qBͻYKY^į~HL|mȔi@c,L:≻o9V&#׉ӊTQeX!s$YD\>%? wkwVcE5HFR#P w5{.Zv$=维Ne=P Xf˒|P`W;P ^`U0&4!so 5EuR1#*SOrH3 nt) ;S*UR`!b܈{4|U5 #rM~"g#h w=\Y%`l`afQXV$ )PrEm#!C47O>!7v{h|S΍kgCFл@;( z7>qmL 0&h~_b)E7$HP}p&''g4i\͗<}.Cݥ3>:N4Zݲ/I:v`׫H^V0AB ^jNe#&`L 0O :,I&7i C'M$rP Ɯo+I|컯@HРHSOFlЮq˪IX({\knU6s`L 0R 44!M[t۰;)BMRSm>E'hy]Ǖ[̬WnϿ$dQATdFU͛g'{޿nR_>e;ې *1Y*Z6dv iy3R'^l,L 0& FE3VPco;tӝwv:AX}MKV+ *|@áҎa- :~`^M0 .1uסBҜ)ok]6rY" UQo=TCxzaYlcSH~$č; %!mK*[GvgH˕֡ `L *tTootC: #ݓrK]u?(lG:$ ;傂 ذq[$T_LZ :'ҀEx;FX hi>Hq' #IoA#DʽB-iĸI({BV'LAxQ2I'k>M&nV3&7|GF9oF/6V*p]k*4ߧ@DEB^|1H 4$TS$`FCӠ{ՠ.adtЌ܀@Fܨ W_|#.(#̒yI kviOXh")̘9!ѶieQxmk9 0&@k>S:۟hrjtk]'G"AE  @!i#7HG>hu. IȩSl3mX,l`}+.9ձ32M(%bHO#aos!gE=YPi$OͤB1q"H$8^:H[܇0ک磼X`S^ig%KRMbXt_$E i8US3>2e-xWЪ|D+FLFQӑ&&URύy>oZv{H/8;WRD&WTfhJqd8 E~ۡ Nb]z9UC`dVh|v7S /g6wdNPUL16n#JAc}>wd5/de܀{Ct}G)7 cEwAI&*H^f6>cQot:662xrݶ90&'3y(:RLH+ K&4  ƎI`4u=R:3(Y'3[[5v, (5$r`%_e% M!!Pdiz:ů~"T5~TLc1MK&[[t߭Yѹ&A#| *POt-~G+"Ob[H~N3QcK􋖂gj1wy4ɓ<G٧\Te "d$Lߥ2tI-u~yFB+g/"G,NqI24YA"oIRMhS3U2F H1'}SHR૚ uZTc!U:Gz}pa2(/$L'1l ķsc?.N s^PپO?/߅8/пn1h{ϊckko~@2Q/72&}I( 5q' Oi=:e58a!C+'i%N΍& }/b(!wz!%Ba {.R@TO$4bΛZ-v&e/͜//?d+dJI8y(xSuYqXM}gۣA`1$H-e0b`/Or a`]x?b32(/[ G"jE?`L +([l}"/jKr;~ӗp' k糢#@HƐ"i.h,NGa}| '7$R|R؀8+$ۢ_υS>W{ͪLoP1H{̘_ݗk{t@Q.yI4at̔렝q`_=Hg4 -uO-\˽Yq1IcՆ>ȣ h)]РrV@@M]׃&%99d (?' e#U7U賦p8?kC zV>{8O=ji# 2BDit[dFA2U4(S*R(]s`L 0C>@ELk5[jR$&pAGq]7"pе>t^J.>#HIAQc>0VQ=}*uÔ}E2QiPT|KHB09HRTZ@"&lonI'ІUyN,j:琝_PLI7ТQ5%LўBe Osi I$8HSBi#{IAT>(\ d?~0^:/9U@C\`u@b줍-Om/diiRJڌ tIz:q4kSF OĿ퐒'ߙwOyGS2/t)Nj* C}(ƐUDi k$ULHK8(v</p(ڗw= ^۷~?ONDr. 3bn_ѪY1shT^:cO!yq=Ԙ0wif߈HاQ-MV5;}0T%+/F'05㲿վbHG$ߏ)Z8CL 0:@B0`5 y{IӦqv8C7vL&Ѫrja\?^9|!&IViXqr _ck9\y!mXdrd̔ژ;H 8(e+98&h:ԉX2\Ӥũi"߇v/v -8aBrV5[ R݁%xGL.E<]@NS9)bJ,>"6!?Hc. t*K¡{UTl`2bͺƥFCxgLtNkp[af&Rs y,]^uC0M\mBwQUӡWugP?vI=x$n$k Ac/hcG'Ch*9_(gN9y %%G>#XXȺϑsN¨i&-˰D܏Z6P"m(LNP^r6zq`%jߖǪjоݦ0kyCp>3iqMU5 }0K|#r:`L 0&|&.)LACsŪv91 zX-J2FOf4*/ߘ_Fy+N&3-/-KEy}FӽR̈́M *+UY=2*=>/2>{:_3&`L%VYIO`:r9`ZTnڻ*5M ӳ{`L 4T5z\[` Cx^[ϬR0}?L 0&`>C' LϪB|f.EC$Af9gl0y\›Ϫuaӿ&I`L 4,iF7 N\6kX&nphN&{V 0&`L TL'5fXՋL9rw+C ϙ7xby*;W~`L 0&<Ia֞6csO*RجgCK%9;`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&P=>{WF,/^ܮǐ̒i؉K&`L 0&pI{MW1Vv\5;>_!PP:3&`L 0&pi !e8Ŀxw!Blvĉ ~C&On&)P!O$Nxi_'~`L 0&(%Pg jI@#e> 7s Z{gwy[-eG0DOӏ*5>YzZ]3B0}{/\L 0&`Lr\Vv3(>pKώޫn\h# XŴb=zw=w 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&~GIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/network2-services.svg0000644000175000017500000014221000000000000026437 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:55:33 +0000Canvas 1Layer 1 Controller NodeSQL DatabaseServiceBlock Storage Nodes Object Storage NodesNetworking Option 2: Self-Service NetworksService LayoutCore componentOptional componentMessage QueueIdentityImage ServiceComputeManagementNetworkingManagementBlock StorageManagementNetwork Time ServiceOrchestrationDatabaseManagementObject StorageProxy ServiceNetworkingL3 AgentNetworkingDHCP Agent Compute NodesKVM HypervisorComputeNetworkingLinux Bridge AgentTelemetryAgentTelemetryAgent(s)NetworkingML2 Plug-inObject StorageAccount ServiceObject StorageContainer ServiceObject StorageObject ServiceBlock StorageVolume ServiceShared File SystemServiceiSCSI TargetServiceNetworkingMetadata AgentNetworkingLinux Bridge AgentLinux NetworkUtilitiesLinux NetworkUtilitiesShared File SystemManagementTelemetryAgentNoSQL DatabaseServiceTelemetryManagement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/networklayout.graffle0000644000175000017500000000766400000000000026616 0ustar00coreycorey00000000000000][W۸~nOYSnY2C!=Zg G ;uRfVٲs- $Ųm-k~W 8 KE~//>Y/~}|_[Gl[0ݝMzխ-`wudA{/iz~~nK-eqO Vb [ v;mjḰO_?v.^iU DmmmU_ JUW%8 2"?%O2$˳A-q/ IDO5m~9П~{p>qo)r`:]dANsUM~A6ZkqDxBHrz.pRHԇlFJ(9Нy2$(aԭ0Ņ 6~_E- *3*F+UR02MXNugc] -):U$cxAw1qȱI `x)C9IO:D|2Yqwjljj@w"$*8ͯcI_ǘ@#q_&_NiY{Y~@S4GgPdC_P~Z9=<@#Nf s ,X;X܌! n3`@*\9y0AIXhLt\D`aY`f2u!!\@̼=7W|3y輐^qX⼗H ͞=+L].I40ɨ]DF H:r&sQ|w*)iSUcM܏-'s6?(>+#@fCrK@Jl/cuTXUP/P%T P]. "1u;ܣt=J'.tQ#mJsbsh!r<]Yi^pyq_'=~846xخ?0ͭqPӹcA + f1P^*lCh0\+@ui6q8AV@Y< k̒Ȃ EEZ.!`윾#Oqǘd.#Ai&BE&X&Lrg"vr$0%ΤڝWEgh[=&= =l]83[0w}~e;1\ ' \BRtVW c+j0rMȃ.]MT`f*< [C{&1uf9!&kSC\L9%7+7!.HbS'\ֆc#`@j2:-WS$M7ԴcWuQpmU Ɛ SЮ ٭y9*ƓhA{DPHŜze10fc0`@f0a Y$ 0"^>Հ϶"  |X.g{a;s!xH`T}z 7dA6dA6.xPU"C{1gx={c 潦=+eÉ\3ߌ8f2Nў9q<ߏ8DÖfcޘ" Ysﶊwo7$bry=x (MM{6qv \/bvg \~6'`C@O,+Kp2h0ia`5`ak.嘛oᒽCjd $oԿQ4(jBB1xH@C  \=aB 03Q}qa E!@MkYB]ga= 2ؗ PU`'Q񅦳jꚿZ5tM]P`c!bΖʵ2%L ~:po~ .cZ2.]7F2k Ʉ.fP\0Z̻ь.^8HB3^ >!2ejA*k/n+ _fUO!y DU6_T)3ñcXñc >r%KƱoX}rñc }DrMcajcvu1\Xl(fsJ Ң94H+naƢj"xw`xH~\aЍ-)4s0w{tkN*OF@/B$#A *"ܥLkD%Bpu.? b6#z "qƙ˄K_cFŷbk^sup/fPӆY*spC:8V&=&q1l1v̘;ݱv{GhnayKaf;J (3vGõ;Oh:cRpbxPT\W "`x7~T>ܓWqVs^@_):kv= z2UAQqWT?H}m21L@eQE٨"f܎AToK;NkѬ-ڻ*q\bt} L~0|Yg4@Cͮ!?hIh`q;~“JvW[!WbI\4=U`sjƽ8U~H:0d.\<ZEg+^k#NӸ^&Ђx(1*2-;eP^û8 #B2@Ɗ6ӿ~ Fy]c}~9o݄ɻ:on;GG/?O/ [xNz2t+I/4:+QGi:TG-|39as抃<(Lc1]<Ɲr| ZWYO|лGq./SP #o9td{? /F+ D+mNtj,Y A~s`5Cչy,adk_j5? ve)l2& D֓Ca[_Fl˻xڈ0rG%%8K+Mc#"9JU&CZJ?S&OD˧\vߠpPSZ4jC-WhaMzՍ땯ǽ-m;fhFh8BAq5?XrNa)~:;H././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/figures/networklayout.png0000644000175000017500000031160300000000000025763 0ustar00coreycorey00000000000000PNG  IHDR7أsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx`g{S *"v,$i=;-T *X dqI.KHrN`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L G`;1iӆc1&@Er`LmӦԋe#%v8:. +%eo )pþ.Nj~ 0&Ph\_&/|qݣ3?l]Qmvz"_\Q< }](08&@#w͸LnPy~ *sEmV()53M 3~nFڤ,˪?Hk]g~%E32S7՟sMkjCi@RJ\:ĠQ٩6<?i.=ݓ7R0x@-t.:?)5/:&iriD^.@VzʫV#ge>dNB'+H>[;S3Cz[l}K)kQ.%&Llǵi+eNќ/W˔gg"87#7VZ's{Ts2S3 o{z7CoB>K7&C )SgO^j6#:8nC~GVz) (Z S2RTJH1-rc5w:tO^;&54C֌ x(r+n=` kj51Z!`qqrr/L<[ ҿOh()2c2ujb~L:UfCe u 0D5 brS@mPdʽ hfć:{ IGPz 9]b,wOv:xPr>drv RG0LƎOqx{r1}N5A%HL {S+> `v:? ^L!Aih5f$:R{`Z8= ҴNvO;7 4ɗB,gxtEaCit^RCu;h)yH0Boi͵T:tC9{!l'l-MjuM!tO\9NPw83dB\2;c 4;6 8,wr~j6}ư|IK '΋L 0P[!gLRYI#UPsw=XlEȆQ@3 YTH(2aR _ИB?=eNS:^8⶧=P;&yFA,-W+SIk1i#y 4wF^4΄ēk+q D9]S2'I@WzJe=vNZqm̚`^bWfiy), 2x p&"0'=u&}63oOrBi we]rB4xAiu(4v `e%wݒ,I-(ր]9'Bt_ҚZ`ŗy:P#aL"*v :ba,j)0 WjW -P$Nċ0@-y)T_쎳ϐ0mDhƔ)m82C;@{R@؏ 0 ":s+@uKnqw;9ߚPlƮbʇMn?#{PL===y^C`]:#de)tW hIמV(*ierjf f u<{sq8_i}J_"GFMqndT#Κ2Le+5}Y~ G+ˆRbx5A;=iĈ) Kׄ߸`蛟1$ڦTApϙ@`Mm^tn2)4&47J'6H+wߍA[j3hW)Ph*koH;[͕:dLOhdF)xMk!]A]hz ڂ͇%Mp-hWetv,aeh)BA1!Ҁ_t"LiyzFH"8)A-ݦ4NWp_'!9)ǀH[w 6fn5}-奂opq:Cifv/.Fbp9T=QTJE bWTk%rdξ 2ZC(Xe@INv]@a`h'˽OcO&5j3G>Gku3E4Sʫb J3:uEєP3 >ox5Ox@ifS~!!A@6R>+Uq1{tE!M.o-okpBs)c56mz"*o_tS}ӆrܤI8'0pߡU^Z%J8B[=~ڴS C~Ǵ๦TZx04 _߼|a_?c}hM)` @tm#7 0Z&@Nx%ECf'@!>9CpvޯX.=[&m|=o F?ya:;#E/aQrA6?n53YVU֞֎9l==%e %e|LqXq  儬uvPN7Ɲ9E0}4!|1tm:Xbס& Z ]-,G{͟ûx؊(U| ڄĹZ79 +=—x?E.ì|kE+iPbG{1n\܅CX:\;GwF\Z+\CCҲ`kj3MHZM+c<+#m&!D-P5̝$c61|Aśpu=EȺ#^&ԭOBkI{h9ϥtyim?·t:svS<]#4BXT@x;4J lwohmh&] yd&9đ|@58/ Me㧊ʉu&LÁx/ ZG\ԥڃ|'p_\r6mu@f=?\)t{e?&<1&??֒ 玓麝Lʫ-kvΓ[^԰oDm7vNOt5WX*D&)(*έvĊRm8k[72t ]5ox}- 6w0#qX^\gL`_Cn`L Iɸjc4Wz@&6?§L 0&P1g[e%UC`C̥0& <0H&hL PsAL 0KMT BFq͙hhXmhWg`lhL?JW˙3$HQ)`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0! ki 8euVRCj\. (!r ɡ9>>y)SaL G`LjhSkkB2;#mo?HecvճV!wRBAF|j@[䞈5ѩ[p>-3gLpqf oUJ zm(W 4 }t„hVhjRBeaIJ)W8&O[<@eN7WJ]d42>,-3&а$O<])%1^HٰZȭauKi#O^Z]MJ˸SQ)os2S w9?& 䔌yi[j➬'Bɏ2&P$eޢLyR>?2M`tABK%:6ab_G8q)¬׆3΋ 4+_fQ=p~Oț`LȂ6#O}nmfLXWCO,:Aew,WKæ%9)Wcu~P295uܨv=M1P ho٩YɞPL TۭlOa<# QEA>@HX39᫑$IcN?ʛhYna^ "%*A^|{nm3#]tNNM=ygp!? ÅVrUruB5Ktv?-5fgoc'>P}<Osj hNη&e܎[" S3a}mCW9FH [ߒ %߭O7SV3l}2Ttl"윦Lq9/”։|p[sSgvvGɎPkOANAVP (smO=o/ TvO{G 0݉z}(RShTik)MsnpP1&pX >bI 3jZY/VxDq/ 1-=@eV}0;k1=P<<[%?KMViN= FKi38Tԯu{kuĕ74Ų ڌiDjo?6.1KTT%mC5ӻl0+^KkT/w'9a{FW?P%xY^*Vi0dzXԠ7":V{ڐvD5ސKw+}o}pSAډ]ߗBqVO ">ڬ)9Sm^ b(I9:i5z32KQ!٨fJQњ5r>O,~*UvwO;v,[E(ߦ2qeUɬ|&s2Ҟ=#lk0U>ZUyގ>Y~ D/G{ͼTHsL-?@0";Qn eć֣)%N|s*9l {Cql}eJ{EZ<1uo }wS|x(jJ(]/b+@?hKΨ揷yy8R2:Rѩݰ.‰LYyz*~:C笌Se:Mx gJ4hf;H&4Yr+yFos3Nƍ6:1ՏF.h)~D +p2s/B{:M_NWT(8!ۓS ӔimPجTj)pjXnyWs2X5v֍9\rt^նak!LÁg+}9L 0Z% U $z}_6@c>h ե*[#OOpD]CYoyɩOY-Z^L:ͩ^l)w xƻ[fP8FA/q2ʞ2Nymip#<}4@s A~g-4)Da? DP+5yLi@o#(~vE>X?H:)%1'(gőo_9z.끔ݧŭN**$Oy!tΤx{?S9`~l?9z8LI<*NRv[Rz4۝_Ujz>gL G`Lji^ 00a\ [-S~Io@8[_>N͜_ 'i O_It?[-kƤeTJ{ײS&[ihvkӪ]:;)]h$J;F,)ͩ-Hr/7?>fL TDc)RKɄ%: -x9= Oe+i 9S T=oaBꈼE\kazŗy'i+ˆ>>e!i1nsx|, >wH#F QeY:kfh|NV!}>U D[ (J\,oiyd*lUŢ$CI;T !U[h2{lc或FҮ|p։rdԮb; y{oT_5 qZߤծo&~mz4D o7HF떃o9!Z;(7cnjW4eg~7d*^WvFS F7ST 0&P!,!85u{fO=`J Uy6Pjݨ'9 <KaOb\ːᄏ4vXIS3Ʋ(v@}w~˕g-vC%>|xMqngየ(%3|,ڞs>b_[!" iDxFAz>LhmDbnx-+\cu27EoE膽WtUdW)Ŋ?5Qn-~_v\"Lu fyUϠOd>@Fj;Q@Ч7 %aj¤-`LOVRW^xEQyz@Cjv1AzJ|(ny~$܎s?,4)/P8@'tL`#:M4:&N{uu srA|I?@"7XtCtFاJ,v'CnoS?1F*.n'`}u_gd:I9vA7ҧ$|j 5 N:G[{u'1UHSis|Jq}r[`L _ ,ȕ]Px~{mcjyt)0Zt{`7ztZ5ʎ'u̳6^dUf >ho=(IU[~|0c(r+Xo*'ɝOF`J%%$lr.~WiR{6B-Bk&w! jNNg$S wƊY㩰=gx4UG&[)Yx(VUډlJ`z1uVxees?m-[>cL OY ,d%Ȝ(wWy+5gPt5|>t%A] Tf!dMJB5=}LC؊D~Sef v!_/"&h+OD)Edz'/ rO_K6Ui]scrhg\==Pb>}! gsttuYa;z8\sO}Nfs2&ڥg}j!/aeyw|KU&{?gQ~Pm~YĶjN48BTXd`/p-s&8xg^vH#Uq-D9ڕ|'*-pտ}|/)DOa0\ m W6WUY l:yODx ?jEq@feeLywg/m{ Zb~<˟fCIpmrϔ\F=~F1M݉02UiO|Nhp~-5yd7|X.;h8F1^hdLu fx̬؎W=U%z|9ظΧ&6p4.}K#6WďFޚөeց>hVMQgMԉ#ܥgt(O uV-up6vf0lkMaΙ1&Pe]?LGҤ^H:Ee0tdY#YvLUPB].Nn\>`L 0&0ͺi HCbںsCf Z2gL 0&@(huEWa֤a3wbڭzk 5 s`L D)`ضeL/EԶJDh. qL 0$GOcF2EE$kten. }L 0MTҞcچ}uw`p(7fP =N`uTrs_Rj]qdSƤF[T A^'U "]QSr8\mos_;޶YSJ<6:l3KT2i}cqg 6}PRLBG;r]\ ЪV$]~K8/ˍo>+;&@ &hc,%UN<SӤiL JPACh{ԤI&3[Wn9K箻82Dz 0&Ppƫ}c`Jg"Uv辧:t8ڜrkhpD%ngahZ;W>dL ;7qFAfōY؆6;hď8:5yHʡL 0:OD7 D>6K,?WpD dL`5t o!4~6y֧Cahث#񌊍93f8pؕOل}c dܠK+QNtv+*Rc _p%%[E? jAs_.tÎ`L deMR]7%1J "VQ"QSK%: Uh>5RVS.n9N9A$7AhMQB95o*-2ZkvHLĶZr"&=o" CPVfj2mfIZo4H,ơiz ^}Eܨ+E˫}_y3_v&t)RBQQImR-I`1yl1&@$}c%JաHj ; f\J6!kHLQh EG. v'/<)"yN8:-ѣ0ny3_kP3M4}qm-?ƫk"]ljӆg_ƚu"n B cN0wA3JqPy"gtal,佂Z떢{P9W-ahUk<ۄw,Y(q} V׏O|. "<(\9 נ"Yo؛f+O'_NS^62L 0~mϡӶ` WF_u8Z^{aq/)!h&Ns0w6O$oZ[>~0E_|+; MKo=HkK~ƺMB s+_!y(|Sanz~y+(9?&B$`*J[4vH{kjsX>1Ӧt!Ur!]ۉI"#pYWo5$1ޖA¿xHhut$-?ް@!E G^2 [FB_[RoZz-G)O>ul=_5TgfϮ$F>K[k]ԯM& + H(S´יqaj/b%2gv8XZZ*WȱM{\6vL 0&qo,sX:Bm]yY +EzUX8-82&E E A76 YBm\&/͑scL \^$?QčA;ƌ )oY"!N0)',+xҽXh 9xK&@ 0&54Gmq-@høo4}`ubow[HNG玢`޻3OE,yO=g-E>3@0q}͟a\QWv1֞PKzL#(:X6 s?"G"Ct`xEͷr 8N8v:`djiͿ- }# ԇgZo7w`pR!V-vmq6g)m[U#NKanͶGCW*|Q߷ <}ɢK-Μ%Uk-(DOd`T] }cy}5zÒӤi9&ѱ[*.ZyDfT3٫(2 ɌB K9(|hi!Y 0&/=[Y ƪa7^|,/![aJJ7"Ksj[w@hMxA)bcz@ơP8AX5nl AI!>$QRf2 sH80)5 4C ) WB6n$b/>O cƠ +riCW:`@Ӈqx*:5ŅE{scH K z}؜-# gm0q*9MH&<+|5]W|T^(zwSEEܵ'2#ԋ#2&@]!PoFsoE}Ye}u o=B잔>Ӿ;1;!'}~+ٛs6+(<T㓆l0LDu$V-_zwG`Ff?QK Gr-b@' ^.kyHߘ8\0rU)Ujl5#$Z̀ے >Bm>mB@IDAT]2塄hsP`NٺQgPW_u\?&(f+ֳӦQ#4-ť67Vx'* #I%$0['u_:zsfG{ UG;l~}9W';N`5L@ \{`4.ٯv>&ؐx1}cp2SYqHQ[KKi-O>,*yi2~7Gc<n~[v[yYq<&ApҺ~3?Ew=l9 9~˖7VbX}#d )‡dҔ$ 覲܂?|ʮF'}Za(" |g ~]~^+/k}}s逾޻w dL 0 ?p]bͦ0SJJA%AC7n/{،,q7KطoW"j@$&m--ViDWъYVc^$_HA[(hjA|с;5v (!(5܎%_ӊaL 0pHNv)9'c;λvcR]tkI}n4)ȏww_k"Nsl}c ?7v?|1|D2$Z 'HjIZQWtc+o}7WMwޮFߗklc 28Rq7Ʒi\vC Rz8N5U~:4mv/\Z[fL |0ncD>RWF$eۡ}Gjr^E˾Zs= qXVN/z;5+$]D6$ShOZ HjI$[h=[fm; ;W޸ۮ꘭;E]ײYhPlsՋ@rں+lo>ԵkyM4zt3_' ۙ]/&.vL 0!zd|Jd^7r] ʿJE`\˟)m^nݿoI}!d[2;47jJFZad [5(&1_%/[Bie vD7m~@7 J,_5+Wl8G5t}AWqEE*0_F@܈q0F +ԋr6?~{"t~nذ^!+~ڋ锖A#x0=-^~p3Ď 0&TiZ̘2ewY25D)>~U94Z=? [27e~,!T*9j߈lEؕѹr%B4xDzkۛ=h,,7G:v>WϸF͢9N#á7bۢٴz_k~[dA-`ZW%۴m=< rph/Z[?Lsҧ9cA ;O8(Fz r3cIITy?ƪ KX"flShٖgBjd$ h[P {mmqLoþ¯ OM#6ZzxҞ )_Е~z/o]WjhP2q͛aIHhzI+̎ 0&PuR͆L{Z,A0|xwarr]#Ge:!BtaQwX(v?}ceNjo,SBya` N2{m DPKH[k;:1bOl6Z4r6b嫥%4F!hki1;xO_',G؞=}_Ԝg;L 0`80v1;8^8ݔH#gߥdS= ,S):-\]&0*9hXy 'lm[ljχU*;}L g$jji! ăVڈ d`oOahapҥ{m4A4ԭ <łrlu}gwk'TvL 0 t/=*tiu {2H5 Zy1RS-=m>澱ݿўaĩƒ"_5BrTBv,<{}E¬@iv6/1W8?$WjG; S!5&e[1v$d̦pvKlT|gL uHLox^/J7Q+]~Zg85}cpko:74+#mF8kB- k$>h-㈍)I7u_=NNE8P+Eb}?=ά M>S1&*$0:%:,~vإּVU5TגzA7noaku`,/a+e P{-JB->$&s׸l(m'oYP 33S@蚜c9ˆvN3?+0&Po `LJa=RVR{"aQսQ4S^]ˏ4{[bQK.`"B=7%yƲթgI>D3.ǫqˬ3k,JnTj-C;Tfgrů;#HD6VRp8?>y;Qn&il6NEzumǬn}U;_&vm3Wʼ[kEf_XuK,v6iiƹn{R2eL5k{Y`}SCC4-LA~sq Xȯ?-#B8ϟRүgߕ&*&:<>F^| էćOB\]^'Uv(AHuXCX=LJr-[CI(%QFO(VJ0 M7[R3aڻ?vHfJԏPG[ XhzHֲM+r/y麁֯O?36NT*j]0&&gWsL~Rq0:fMbc J4EZ&']UXkh|.-i'pܽu 2>qf,2t{O̺K|Hk&M XoBS-ڶE -~ [d͚Ծ ׋T?*S}=v g9Bhk7!ZL{\Bm)zD",Va7ϝvg MKM)z 6 l{鐳,^C6{9P؏ 00V*̻RHd{ZtɳVkNgս&E5%I&Mp=TH\Q|H868#kW$ap^j@'bo 7 SbeŦχ݀ʍƀ;\z? L Zk? Ĉ+"PѰ Kz9^սU(@jjItG=qmȆrJۺ>[sK V2G%ik;4 `2S C5@ KqBK/6Zkhbfw}4Ќ` }cBZk$4 *dWKnnN"B- -1 ch}M6ıI^1lNZ5 O8sbb܍ `P mi׭ށesΨX3c=BGIc4զX+oK71:W5gH Z2=vǜqɕRĴ]Eӟa6:a+S{m_+Cv!>?ZBbEfwSho6pz2$PJ)̫{k ؒսxے% KĮjrYL @ ҄Z--4ot ٶ8k`|3*6,c#Mjqe]11I7f# ]nI <@^OYdXb΢H]a*1RED?-.V׵kϯ/p1&PDPkМ1_W[p`jّGUdPDeQ*ޯ"c5B: `Kk i qH1߲f.0hPex. HUho6=몴Oҥ+%+ɞL &Íl un 075m,6勯UwV!1Uu\Kj1E{&%Eu2#iJ6:m8_,%y(t0I+1Qa(1`3c_*5i&j{ /4a o}U)S(קXkU 0C ZhmM-}'-^ǫzU' _SYd$/lLL6hRL3~[-xJL&)ruq%ۊC2mb#z_ ~e6Ðg[_،xṠ]C[5*/ǣ$gBʆb[~(K"%|w.k'~ߟ#kZon(,]]w{chms?"e'>m\^PdšЀ}0%սer@Mn. 6͢bu(tɞ} jTtKà%ݰm)O[ WVSKf=Z?jHlit'Ahݗ}Ɋ4v>^Thxv X MA@%@{oľKMӃfoh{؋y܊/ 3Bj1&pX DF\СxYH$ nm%Aգ0Y 0K1P'lpmğ^},Owӫ֋I'k?ek/P611- M[fk"y~eȐzqMPVT?|wځf{ gku/U|9Ǝ^'*qHiu$EjO{-^~HD`L TD d.)5}>6uؑ@Z`k{l~EU31H-f1 g:(]z!E"%b`AF*=%l! 6,X k%\ K؁Ns߼y PICd$@uz;%Y:xVc!E+}!V=P%, 3o/ʿH"U}: )5fժ?FjNtڮÎX8>fӓ ;.Gau@9 0(!0PyyWS0'9]vby筘xn8 }?=t@wCcEppoЇm%Ia|~k\_'9^`+ Xc֗u//("(syWF2Ne^5r tZjsܤ :U-q(Bdo?03l(w)<.Kd ȺZ\pQ+r aA$aaݫ^QAja`P)ϣ zo!,- 07 =26fC%1A:Yxd:}V`-#7}lXH٠Aj닲F8EN84lc&+FAR.ffk2KiVo,7|Oͺ8_ULT:QZ(:H$Y؇u 1 h dHUE=,VıKVzdV_ ug&U_Φ * e/ ^}ҵcpN20wmQTwIݥKEOjlο(`!(Yb-5 c ֽ0*L74b>ZhXj7baP(`Q+^46iГ _Z6"q18ak;c{Kl'ײ;M>{嫗Kw{ria:紲 E +kke,oEX]o Q {pB!^fͺWYLJyg']/`0(0@m5^\,.aJѬ&n?޹ +;Ћ:VR{:^|ܳbֻY8ˀ[]6 kI ׂ/c\X##lTt,emllNlX%{S];Z4gvƦlFoZN =0:nI&tG~sxg5gѡ!Zk]A N% fÐ¥[kП ek5jVzE"Y5,3؆nZ͞]]Ѿ ;P>5< ׸eX+ݶ"0\@j&ҙq)]zU`I\*WY[vSsk4ƽS˺*% ={qvۚu}WM|=4(X)p*sj6 bxyBG\ϵGɞ@y`*}aP*(x\& e`Y8% øжI쁬(e"m ZԊB8wbz@̥rۇ0KQhM]RL@L%fS}jc t]>)8DR x`#;li&||կh'iֽTy$uhVֽ>aú< T2e=7cA! r<C]BU|pr P#NsjXhMu{yk@$ݡo:-A66;ϰoɨϠA j9VRBQZ R_&%ZiQfi! lM\&i.L~x@\bo0<仴}+xCG4/q@Nݡ4]v=<sV?;܆l WAG jr=XvQw\6@$q[;g)=10yFdP fꝨh>%7@5>qo/(\tz&ۧfOuF+Kbu!hr{g3ȰׯhAU@}*fN8u1='' ; E,QtmgYC^:"yKqmvڮ `pa1`51զ R/VQ2h;9@{Xkbk\UTE+{݂7 u)A?v?  ^ea㒦&UINY5>q8lmx1&'c/Ac<ט.O%NҁE۱j^4-0B 8u#`Y*Q&nBm}nmF3Jvʇ'%3g{L\=uOy0(@B3aGy<}j+uY>׸FĊ%ūjۖ> /$o@spSfθifF>ȓ R|_ŞCdh,z  8Opu< !tp~ԮBң օ["1hZT X#iOqڵB;;z.,DOۑyRq(}(Jix*q39KQ1zR$Zw8TIվ%\ (.; 쏌b{.jvѰpHVewHBW9 k[T`t*)@nF4YèILtבЮ`JC܁}wz8,ܵouqhU/5+)bQ $$PY*8%BBّpv(,B9vk3T,ҷ5^|3!nA_ >֡yO\ 3ɴ 86bPA*}Δ?uc59#FvsָH\BR .î( 4V!c THۡ"5!7~ӧ~ġչEy29Y´Hh{ZzqmØx4?%9MOI5U .)yĈ[vFyއ"]M5+)aRP\$j7w X; Zj0 @`ܮ,-JĢlzsi])MS#[}ǘNMe"Q-خm€@EQuT1t;(z9qonO hD ME P+ h 6$ͭ%N-j+,vٽэa"',Y#=QENMAT;uK|o_Gpͥ%$k^#?35p`-jS !S1sɟ6iv6oT3w`o$p_{}ý{Q gPA&@ZёEw?95*@`2ʦB-gO[B6qkI0W$.%@BBB~9ѧqN%%Iv(Vji#S;n;dg͛~_÷?NHUW9.-˹:)*"/a,cRy &LˇKP;*nd%XbN:r:5=BţuRg/vqt׹zp*w@CÆp;Y}t l]siGaBR=sdž ߽A:ƅ$*%25\Fʒ'!뗮@J_}+ ޽{#$,<*0$$l2s@e4MsN{QIiaiqѝ[6oښf;:LOJ .%0Kt% < ww$g{+V o,o]%㹮n*)(ׁ=GDYTd*V>%DEݠU#Z+%[PОov_ѩ[fKL>V(1\ (0gByP w O5(bP@#@A쩓2 WscMD'`F`h'@Fq[ղmú,\4e>P.6\ "@=vD*uWUOO]N[3a ?4뽲FXvd^ Y@N? &pײ_DҖŻCOMM?h{^&mGpn;ݳOLR7aMQ1Lmx#F# /GjQ|^ |vUMb$_K|"A2WPZo['tCE KK8t']vuPGP@*`,; %̱ /o”Q=6'%=`}q J%?! 鷥u{[sM=! PLP~>Ċi0(s¤Ӭ+kӠZsjoLT{=ޚ64ݚ@ǟ}v 鎞 'Rmt5n-D'җTq:]V\Zzĩ/DZeĕPhs0|y}:WPu0^CTg_ڛ;Q7JVuF쌃{ʀY/Pú8E@+ˡU$t.gw2Av0Ve_ڴRt`uPK͛Sͭ/9zjZwGw:@+_Ntz^xH66qtߘ]w2ow΁yBb{ ǸΛB&b"1.?myz=B>ztMz߱(?i5)t*RH*vHfPiR{ RZ"1t U ql铩7:A&AހV D)t!Jш.o0Ki(}]ƭ aF&--7W^~kj[ qh Ђ8JcmljK*j߮*`9 VLRWN(_#ND?ZL`Α&Z| 7eW俖S\{sdzONHz'N_ZҦNIU #d6fNF$Q>DwNtY3PKl2|'dh29cf'b:SB&6t]":YtS6g^Z&0ք{'&?b_~l5Pvں=]v[>`-J r_Aɯ5 ҳh_#/"J` YJl&4XyDŋӭj'Pd/@K +%dI?k=[q #!;'_,߫4}^Ka]eMd {5~x&vVZ!"`ev^q{" bUŏ 9|nu ]ko F;zRܦC(вv»3?嗟rhڄan m0q`ct(D}+ۏodi X ]{x'-Ż'|AxF~/١;YkBpsIi#H@.eγSӛy_ҌNU L?8Q>m֗8/A\P^r~18GDvy0#\)\#Oعr7Іv3:GA뭴g{6!΂/O%n`럽PK]uRℤqЉh\q*o9wknΒ=y3G,R<8a{,Frd'V0VB%_\Z U:"dwݫEo") _WF@^Ckh?j9 bGhm31>.5%;. $? qAy7v9 JWQ\MI~͔ĝmȑP=5Y-oWQ^OvGz LfcI;(p>cPfp~W0iWW  @bToewhnAch)ν$rH{' (W͉tQ G\^4^OJjrb*WgZO{raʿ}}S]wuNGyۅß~ɵOADr..,<* ];n?`Q}OKNzҕ+lwNG}X=f%p1H0YGNy0%MIwh!6 -Ey";\URʔ"o:n ,|@g}êJJϙ2ݲAQvhY Z(ݥh׬?Oc7:G] 䁕sUߏ:6Ƭ 4 dJgQwû~] J3zLt֦>>>05"j͠}c%,j$U'% ͜I&'N]V PC:gYpw'Á2WRg Vio @ӯ,hz%"8piHzr%ĵU'8DCj l4%,yNrsp'-Eg.i:DnϧKಿS( )t1 0}oMPbATڸ6&P|[)EJ8cg&}iZxẔ+; MOr6k,xVPrfJloH7u>6W, ';Xʫmy}ޜ H.&MNա@׻,!IWn͑ngbtX/r>/ڛט-]x$$>ORG"UńY +Z83AE F92oLO;OU :s0Sk 7Ŗ@ySTpP(@)U7b&˪#W6+9S'1_p ]w4:#?-Zn@B:Xz 5)Vl}^窔08triɓE@9+p:oHWɛHtCs&s䏚Gvb;8  bmjV/ɜAho|'Hq*oJGaա} hnŏ.B *V+ybP,Y,ıY+G8S|Sp "~e(=7u/6g_ė֎dE3v;}W&/!W{Z{PUHa&9>-`S.'Bߵdz iO0w v\@.kpS=aٙY֦pu=m[f-5ңĽu>#'O5yi _8Os7ݸ4՟< C";K*)Y L);]9? Ԁ* ptRhHTgjjL|`\vAaȉa:! Օ&eAl…_n>Fp7`|4,I6^FEyk2uUmYK.OHv}j|"h*:@?̐i.K}XN'J*3 C{\cicEoz7fmo0vLLoŌp";3o7=%hJ N&?qXp+1yT,?@YD$ܩˏEQ5J}m-ȷF PKMW^մAp1A ZyʮjC@w_FZMą8<Ɗj'xk`R5d1(O0&sE{lw[{ (@[(i;XGXH`4 q4}R4J@nZ{ ؒ_~bC_ւkKpHǀ8M9`g 3 rM 4DU]Wjjgt$ x.OS hm0ڜhi)h WEaEARI; ' 䄦OhۂyUX Fw'(x;c b#`]/4:@I$pXۤ @JXm:;VLR#&Cؐ]ߎ'-fȂ(m&"N&/A5~G;0XZؖ=CrM>V͉zЃsLi]`A{d?anrȞ(zCj"ԔkS[y?[]p+)Mu}!Y7eOB(p) 5780D\O:Rw-8Wĺ8UOӺbVR\̆]}-v9=zŲnf;ի`Kz著ފIg{$56<@ZéyCen5Y%ӛF{BSƶx(lb~]X~#ni87b<P_W MtZ{M98yzIDsôr'ɥMz*sq%h[r+a[yH/* ؄ Eb,:NID2wޡuN-mŃ s~Nrx(o@m:NtTqj'_ <ա(/8*r@$},04b,$tzz{saЯ54xǍ TB{~EX}vs\ 'Sys0fܯd3HT^‘ S( Aa>]4sYBa~uifT|5uSa6mY.lsƍ!W\[Zn܎DrkrUT! 5N*.6Uƿg 2_hEVal`p/7Vl(7 z6 voau伊Ƿ79O*Yʿ͖XC_!m"9 'VRa^@ÖNNԎ- I)#F.&@y8t* Nͤ m 6,J\Z~5S%@DAbfY=6mONu k)yne^NewSl!u\?a2&v_\ъ/{9H'Wc>Yȏd:]@s:_1nU9tE!LONށn;46݅3rk'ذFO 4 l-~&aV dMW6^6|O0 }0L |pȅf)8z]ϧ)] %4Ywu7tRxln۶kukXLLwS9*W~R gFrvjDܨQRM{2 05ayL&A9_0<xlDe䪓@;iw hDHzM\NJCAھhGfE=BuA"qXL=\߂>zb陂L!_h~H@-vm%  s`#` Pv' `ˉ?9G2w'DߡMCg[I-@qEg8=_#9pFC$9SS.΍W2ddj쇴ð:tq"|y6C٩y);_OkalX'3J@Q|:yqCH?J W7S? ;Wͧ5;x**m=SUz,-)i* tXNg PžVHb*at3@mM)g{ _VCV~_6`Uw׆I݇0`.pS#o[zꋑfSʩGr+2gb ӿ%!e;}F1@NjSN9 ?K<%qqwNJӵSN7x7!ǘC>y'3HLm=$Kգ~y6~V89U{7j)YJ?z*seq0_SNY#v.p{it-wj>|b76v B3-a|}ςIfam&]Ӭ+9RnW2lh;A͓S4-ޑQQQuhAP·RMU!; Wc\ Y(XCqe ja4= ' 0h .m]QAYxF ã`6d6\~̖ӭOU?9^,e@k,ߪZEBg*^v8 xm Je$6(`PP93(pJQl88 c>8QjVg9&Nj &G{ |F΍`ab pW" ]OI\Y8B6t쩓tѫ~kY"ęte}Z0edUk#y?fڪQ#AGbd6?+)kPQ v(E)( Bi54`%M uXUJ$#+p+5mIxVǸ&}4|,8 hx.dff[:Üqrt[^WwZiLL*duadN3@INFL Ԧ$^\L>VHbh(PW*OH>`hX>v3ͻXawN үup^ƾl͹ylklsێlods& qa-@f\ *tNjQ$ as,,Щ@SU- @I" d~y[ʧ x0KX[:Ep- * ^Mh6l -pp;Mf̢ͧ]]xӅLGyP-$ ͒-J!vS\X=3.0! XȾ@p q(JC& %<s$S૩wi&Lrp?|Ы +46B 4& ԕ#3(`P(p9]0CfHlX.7e:ޡ4#Q*XVn@봞NYk6hppp!!9I 4R>%XȾ UͲP-2<0MHW|KTښ&-5ԩ];M@o=Afg`9$LpuK] ^54Ew8ZglTvB8EU. p?ќ&J2׏4IfOqS8V)>-֌s5qqCphlߌ@ qiv>~{ԏXΙas(<1% }jфZU|K)6<os7*hŅ,Vmgq]+O㩮) Vis8MFzvyw*]s8M.Fi[@Ns]|'ř,áݢ39Eٍ2+B,Tdw_+VBAejf-i8ݠν ^oGbqߐ}^*5 5(PY[c!f @ V띶 ɵݗbw6]b97vJߘÎ  fD};VsAdz#fJ:(\Ʊ5e Cl%Tucᛘ2'gTpZe(2lNTYsGv;BHHjt.rE- %у}!ί8/DN-ꜜVkjFs{\ ٓ&5ZSU?)0a(Dh\᣶ -Ms?!m3 >J°-^e}% xAKirn6:m@pCΒgM<NCÆ\%- ӚA }N|/it@MZX,Uާ> J[dxDx,U>(zŻVݴqǫfVEv6bLls{/$ mu Zuj㓒S/ [G-gd5(PH|>fQpoظ4}j+*u_mKDkQn?:4) TZb0!,L+֬Xk jY=H1k}.($A?.={hYN_-iއӠ[ԔΪy? 6,~g[K66u=\nQ-ُ(#Ux{/k,7ԺxOքQAPm=H=T3?:O /Y:jT^fx*{wcV%>!bVdPȻ\dx PKyi 5p~D q1KVl$j]u4}vTН*CcrZ;Rhւ{{ $,3_- phGgI&)տo2Auj>5å=c[L0D?u@oTg,ia w]i'u@fW'N9h3V;WGn*R~gҒk͜=ڍ&s1g$RpO4:O3D=8Spl`ΜЦ9ǧ: NT=_բ_57 NdHr}W #ӎoW֦^s2BNj!Ueqg[jʹids LM\<>XXL%q><Ur4n$мsiϺa2 -t7}6|*g kסi h'M: [,ߧ%ŽGl#?)@gA'o0VhsQppVǝ^nC쯾GS%4[zԗ+9v{5=sOb2ּm@$S Ԭu[@ŋFY-LOğpY#K$䆶ێ`e64\qhDd]ŒICuD& ]N@ZZ2 rG&.#ڠR྾Oxz5hKGR;|i9&mepTMPeB C2 Fv\&c^Lȇ ;7 P:Q`yW/787ɼwРNB]0`9M{=ژrYҮ,̊r 8CYN#siλln ]f<׀ ڌ7q's^_" 5qנL#o Ԓ00TC #ԠSƯ(廌ZɨQ~{ˮûvvxbY?fܷmsύRJotӎyvL晥Y+Vdk\]jP/^,#7)+q2 6,UBUaŊ;=s8.N4\г[4!,H`:X%P;:ej9HV~E^Pzh  }Dϐp:K,Z ZWZZM k5 x>A )R)VfVϼacƯ[vm}>sեtO7 ~4> c'> Y5 ۈ6-ww]azhЗq˯ <9mdųޭfz5ރ%>Ę 'Pq!.@zJbc5hP P`-sS >tGS|=tT꘸qULB>E;%x|bMR7rm\48gb1rH.sTo 8?Լ_jpbByylC 𤝆a&sZ5!q,Kc*tÕLؘ?0۷0 B'ͤ0W#c+ymfA㼚G3uAV;L޴Ob~BͤvmߘepTFѴW^Ǐa~Z_A,X8tV0KYɜ> ؿ~zs7o5Z5aiӹ/kT"j>j+T-ɓz6uX|S)Uv<=y:,oZ?vnU |G.\x,Ctw8Iű3-)\c1f㩉{Ɛ1vjj]7TE|'s&>R,IRB)k, sdִ _hrXHBW֜5T1Ю7:f-u=Wb0+@r u}256TzjMV]+.'υ!Zpa/8F2AZd r榖^j- s^q\/c{1 9ʄn`[S_fj~CN6|ȯb#Zĵuqd]M!@&GiG2K>xA2_2ױl5g 8 XѪnVbsN_ӮT: +S /Ƚ;mzXSs!,Ƿa3Sڳ Nپ'~=AVHKWlw6]KP %̄Kހ:cѩrH +p&_]e VIքG[ sw":nf>!u^ЙWu!qXАu:7hg\bV,jZr`vlJm>D{!fq?~E^t{{sb?]AL} =d6?jC/xwl½T>48g|RHS\UiKiK) !43W!t4~ƌHQ(m` oMMLiMeVtB`> Wv->5uA7/*cKs{Ae[ y;ָ5 t[~}aqn, sn&Thà@M)/IZ@ 2nnױ9CEW99VR72QW+f2كeZijd0ҕ=ɫ1gffqB@CP:G V̙!`e p@Pm[b[|E* & s|NkR_sU}<.1y> IoX*4["^;eO^OTJN>v5q>ō`-+•G=؞R@ Y ,wJr'W ER*C֙͏::CIgpiC#PXj9GBHNG!W\tfVBRKWxH}vlE$V>i;;{ۀJ,;sZO{ιSc›ufzRF%`>ZK3OwmMF<Jenl,4y”WԹ##}Q˂ƂPp,hU,# 7Jޒi~qX<[܅(#XiLhÕ.[_l)dY2 Iw˜辂ГJqN~78H.mNTE8G`?]qctu"*cਲ਼/ڛi^(kkc NK~\|;7m1޵B ';Fcqsc eb@߰}"- |x u((ξ#.:0t!+!C,[vaԣ%5PȻAO ߗ0<GXBCO1Dk3|{P2;ryB L׽ڽEQ,Th\(xiƃlBF.||R#%./S-^nr6 N@%Y`mγѶ>3}%ltIOHpepC'Yc g3׭6Ե-~ьY^j+:vkr|դO&.GgU(s O:-gYAmDSf 蟚"f:TЂ+e o~a폊,]6~# ;(%#c?g]ג9uڿ=thczrtC[6(ԨzWxUm@Ol@N h˥7Nh߲̉h?Q2񿼓HREf,j]`r} Wnw4ߜihKw8!;,'!vv*J4yE7>QG^r]uOU/AIg/\p/뷔BC[l뀏x۳{wtzH K@A+H %) *[*XPTDQ@)RhJIEZ ]yK#B~{3;;ٹgyVQݢ^ %Xr3cߠ?N~Rǩ  1[B)&, |Y}!ꄏnOLv\߰!Gp=HG̋?}9@D (>~݇Ȃ:b^L5Nq8@#:*"GAiB ÊLy(a:*K~:Zd̆ٗ`T(D[zUqD?fuz/Ǡ}phGٜLA9; /vJWJWҴ2쾔|?m3BD SGp["mBi[–f`1`-Oq" $L)3aׁS,II5WZl#G_Z9"4#246dsvRVU|%,pXO,ҀJnƷPΚ)ܦ׋:7jǵ¹:ԾvZ2< iËjrؚ[#G#:q1Q.>%Uk ~UpJr+a Sl! 7ah6. S[NgpRq+|o?2`|Wzҁ^mGcNha\8WUcu [ֿsߡC* ~0wMT־8 (G~=S YW| Z=˷,<*՚tw1>~Y ^ _5L2NsN>jsp;xf9*@B^?9~7$|?ދ#XVgY3oFQm  a8 QoBZ;!9lѫPy87*QX_h_C(} 'm'DVZou`RzXSN qQ1!A>ǚ^:hׁeS ]+ D{ff;K Oع}ZJBr Չf@@IDAT4buכh(g }Ib^oܺ]Yw˷M62f D[rW\ Z=L?VLӊՃDd"9dl:$; -:{L0t?4JKOmء hahS&R⟂; LG vZ;o/TO_/%yAY?5U UAWA~B ް#In]T`  g>ZF_7yBpo#Bȸ/M )x']pϞSB^uucZ7HgokyVޡC3>w"UqQ[)?u[Y #2_h% rT L})5= U,x^yoQ 8k:|= '*]I}`;q\"U$7`ݝnԃyd!$~~QеSnjGDLbӟ[&0~PjяbXpw|%D %^V` 2N~E8?o;ѤWA'Liim䪈@b4 g6˴p\ӏ+Lx|/}hzEjm%k l[:rNbCO:?o S/\l6\h0KjasJ{Y6g[()w.ɴd%(Okv?+Wir,.I_ T O >REz;tszf@{BH@=9$8Aօ4;ZrtFTg;JKӧe%OafΉN^ XO水&MhJZqV1l3w /k]%-e-Z/;Au[ofl J}?tS+oi ݻjbY.em2\ |s"=z?V;ƕ6U4]{I3[ZV}kpI-%GKJ TN.y>q6UmnRtL.˛3)̇!aګ 9- bS-O`.L($m,<'6ɟ;it{H(Qߣ'\C+^^vAΝ{uC2~&5ϰ,&-JWչO%!@;x]y]ˠk܈ْH{3g΁Lפ!]\ZP[ʔ`_τV͘z̊-v-♒cf9֨iMi֛e_S'}̶׮FYƫ]ɴwʧdʃ[H+{gK1VVclovgd"ײC{FE:Ѿ;/-p!qhi*ʽ?zuw:!_vMg͛8`8 /) $b 620ц]2O1UGw\(,6׵~ٞ(DYڡScXkt Ty=!u>(+w嬟g},+b f4&AUg`Hp5=OW6~(YK'@R{w#+i@ urKjl4hgDuJRiAȝb"ڿO:WZ{xSQmykުSvJF@Xridl>=IE}iT+bLk4X#01 XF:dJZ4SJq6t ]yq?Ɓ NVHukP1=UMBeXqYPRDrQ$ U$\R[Jt?9IS)Epx~ALYY ,6g_107h{΂׫9- [@mSY z<_UQq:tG@U91t[qWZoOZq5#Jk~OC"Ur2e5j, Iu,5a]@=U|Fy y.}&/Xet xͮ3b_`[a۫ԒesM& ,d߱ڟQ}I{`4q;r(L}Eq%,QfdR3%eK@R"Γޡî\cVp>=AstyA]eqVV{PZ?(m̙[kK1/MUjfؒzѸs> <,QDߑ,Zpj\vRx.^3VH2#6?D T&%=#"A>]/s#:>>AEGD3pfsb~]E`|tCiV#?U6/]򛲌 S W}Kf_ؖH8- ~!pn  )З]hӢ F{6AwÊψ/ޚFo ǾSlSڑ(1-ANͤc?翮 +B4 1FHJK欬+ [Zr> ңf>gъtΦ.—p֤YGIG锰+,3S>d-ZZ6vK>lǎ0/o a+bk>~Fm|J,yF)W.}JZKDla"JD R#;qĔ}Is.*.8‰"N>pӚdp%| g*/@Y" E bT( \ v13yAےCǜ ^}Lm _9rm\] 4EBQFF s. H˅'$AHm&dw+Xp{(&-ҸA`S#ZR9 r)$EWK,㘨Kj˃[julIG`Oh7̱< <(!O%{GMh/+8y#ͫXMSRB_N6|4URy%: #Stdj]ʞ7)dr …9eLOڼq6WjVS?Z3vCl D:E)%WM1 ߪM0g!z|5*}WE GunSKAZܒ-f0zP|hq`F[p(6Mx)O3j<=08ڣmw&.M ά,k(`Q LXq(7UuSă}nnkԐFx*f⟓%%N=V´m|}nY㏙WHMvIV#\!H0t̠˥⤶0\BDG'N 9H@S3seztۢ7W>m-&bA|G&hzfS"qE"|TB24ܰ޸+##f@B"nςXDR1Yxt C@0d;wz-KK3@&kXw *j*$M]-;; -:t mݪu5uz#=l˹33.;Ͽ&&B{ т&%,sBϱ! Ěs.Sh%ASCҍaw3ύ^K!CI_Vk>>rRޑE Z,{ l#Zұd*lۃo{ujUa3J1B 3\Xy{[$$]ξ@8 +NZJ^73MYrU] cW˃4 BA hȒ,t;'"_(Kml4)ANj\SLyYf+IA.Lġ2|~v^Z:Djʁ7,X(q/-nu}yy0ul J-V962|&wwg;dٲ8m0|eA olw?AUu\C4ѹFj5FqB-q;ԓޗ.={I KD`IRDr 7MJKE XǬ&sZR(^rAjVl{-[A1SMlm'^cm^ (7_5b~l0 vkWh&ܱ0xU0mv8sOAoIPf֒yTVWX:tF*U^:7"4 TImAbKLf) '3輾DMSIU+zP9 mAy ޽]ŸC{kݨ#-Z|3tJ(272N懢T]52r[ nJn+:J;"~ov[-[@?aX3E'L^5E$Qv"I5"kDl4BI "׈&%AB`i2 삯!SI- Ɛ[OuƤCU0p~yϿ厶e*[At/ 4˴]uVDvӛxyGʾ8wkJz,pR+6Q7Q- *4Gȼm?Kl}Dzn/ᨈb&Nꉢ9nƷ}DбGq)Աr**-`)9 `^7p$%jC3R\S{x0k&x/v}AOSļ+X,٫q8Z\Jjuz}#ڳ47gc>?Zzus-m1n_ڵPb,]ln1-z49Ra{ի!@6DHsA _^4j@P|\;J={k%,fp85O"D4I-=ؽuglF ChRw,՝Ywcs/{A._מ~0K%.$/2 VK[VE6# n0n6l;\3{m|⻢(Di]LJ] leMM8@CӬ>a˾\;y5Y'0bJVV%Ncj/bY}t-! k׊'clB&sؕ9wcLl҈Y֮wHzYC1R3, ̼WC,md>O g^}TQeV`)^7r{1*~a_qѷirKP RXML$0.'VHUjdymw&{SS.RLɔp=pj&D:hѡ[M 8F2&LaG1ׇIc=2|xwZu>S2ؕio0q5`L?[eIfH-C:yiu6>ufٺ? C1/W`~r;0Cn,y,{J3fJՋN?9k~Z}9uN`E#gQ$jvѝ,r߳hRn{8xDj~RmSpًW2 f,6!w C1Ur+;Hm &_Ng62]P+&6nk$%Ͽr)Lde$_d9gTKB;odV1-̺uOzmv4Q6݁ܗ幀@_ܤ+/}m'\ʓ@_xp(Cb;V>]]p<rr$5$IkAܣkh;LDU[ RKDZuZ2ַuGZQCni&Ϟ/퐽-Dz\w`ƛopD-8pƻm/xY%la޹&}~).*԰Po"K||N8.>ٱ#<$vEXWm7ye,f;x8]rL0}ZKkLR@35k?=Z74|ZŞ9c]5ҋD`ϰqu]!I7xA9qH< ܯ.!.WJcb Th%a[k݂n18RC0Sݒ] J*{$mخƷs,eL%s(hqX۶Gq-#tv}G#P]TR{Mxu1XE2IO:B6ĖٻB9 c8bAffuC>891S.ܷz fSY_abz|ѯ}G#PpR[:9@ң`ʙ/,H˾S MUV l `u] RºĶ$ Ge8u*#P;2b-T_zKbO;!۠!]{%ZGC@G? ,tVXifEf Fz.jcSVZ UTx,ԖBw|xy{c๒ ;F`^7WvB 輻&`yy+v59J {}[u!۱]rmjԘyoРUk+g#0|B]Ͷ"x k ^UC}19(#SS~ bdžp*Ԗ`wPʋH 8k:|= yiUwj"ԂL:&cg1$m2}r3t/U29#OAza" 80?9(lĂ9+TguA 1u䤽]^~T;IOLJnC)WG>G#pR[ʻBN],x 32? mm`Ab/bYqK/f^wW\yfV1떝p}{5 S [Z 1l2vwXf/I ݻ2]y[*Ghc7Ey&;9y# I~PBx|;Bٜ\~ 4G#P-श6;֒/q#&8g`SI2i䓧|L״蝿@RI@w];5}Ϊ5,{L ֛e_h՜SG&? ~>2ߏcB1Z> |$[7,}v9=l"6x-u͋!G#Pu(~N۪WK^L3j),PRҍšX2 F nH0Ӈ@*gņ!5 D6s HiA],_Z"mQ zWFI&A_ *B/%kCjލ8s8'Z&r:}[k.)ҙX+wfRN"2mtJLC#`1_%_ :2.TwLr:_w,m]L;Gb ts TUg+03]E6 D.g!ɡ6c{ȹcKy;+D!X@fo.WJXXˊpJ'Zf1exw3C>~ ]bSL_`ƒ @7{ռALע*շn G\Ԯӑc^xWAMΕn,%a*eDK2}z4yxG yZq ',Y-- KHN0E~=:V6p8BSIJds2eBSt72iV{c$ap#fZ+i?-:}g`&uŧX/ڋ?b Uң-ΑA LTs]?D=LfcPffݵ/Wtx‘Y,' ]U#"0B:*Bٿ<'- !L@6婘~s8U (ED@%E' ;Fz8hܹ w%`"LgtA7UVgvhx< *Hvn?߽M[Cz*ZFb^DaJmiѕW\tub?ea7P\AG|z92 WJw5ϼJD4g,"/6 kP3yS8D$ڋ| ;E/kRV!RiPPTLzMGh$XBH86=80|J]G@%!YɅ;)AU3O)a8FG2$RK8d6't,5e]jF::weD#ϝ9E8\yB`1Qb;ӒHI;W?C٧ŀ lG#x%!DhoٶYN6\5lNw#@~r%RKSV  3 u]q@ >B_?Algy<8GR$RKkV#--;tA$R"@~߿tz ~%m㑰&&G#p-4RKC j3pong ƌ7=oRL;آjƒp!|zy]}*tNQ#;wD7Ode6ׯ྆r4Fh-%&}g4 <p܏"ɡY#/{ۖ0hp?[RCz͊N`l8,G[ klf{woܺ_3hw+~@xsW;YIBMa_Ӱ{G@=e>tV/-mHX;V xA l~H!!o4CEiyi#ESDڡI圜,i_킻huzHg88,Zgdg^xI:hR ܃6ZH-W=5:G K?60iu5/AOSB%ֺkC'pm{j)_)&z)iDXq9.!F?1I/ݕ[XMwaU7.0懃VtrQ%/Q[R۠ȪȎ$I^5R-Ctqt,_S=(JE6f߈ U0wګ[lS~,.>9q^ WbnU$Nh䫸G*J!0.j>A{d̩; &twU<ZDČ/NA$ԦI2ɇWAĊ G“/] D`Ȉ/k,|eyu99ݞm0ހ1a}]s1# YR~׺^[+d(A6G̈~Px*%cAZ֏p_#$5p3uƇ Z—w],A+؊ if;@!ov[oEێɯ?Nԭw-|Ud%Sn?63tkEGIg8[Ȩc݉ M{ӈ-_ Q L@,8 dUORW  |K ;JE@ݑY]$ضC =n>YT~mۖN6lR72{w0>:bֲSPznq\c>4=n2)L;~ƬϜdRKid֙ih: u&RFt8YJ mUnj߮O~STKwT/! l@Oum<,DH[3Ŷ%gamw$.Ϗ: ]ځMľM-q8$O>cH$-tRK$DaGd$&mMТaCBId~,º v]~pgңx@ (>ĵHҪCCKHzE:i/u&#Hb;v,U'yHjH+7Ē=j$$y0:qrlײdb|3|j׃EI]ڙ36dkj6xN ԶmHl@3Aq85IgE:8- #5"*;CԉdEnD X-(@`K=7sjG;[,ۆO^ )Aj 'D#f8@Վ佧ze3 >r}u|iG#( FK0HĉN|NP&HH:/b ̮paR/$Iɶ'Oc[}d2&5 <#vtZ; }?փa]w異Rm #/֍?3;PUL0ńb#vӸU9NjfAQɦi~^9#K76Q$ZOu*2ae 2̍d>i;ܸi]0UN^9y0Bܼ.ݗyqWOrں Fq_Ķ8<$>~XyQCwUkV.!t L#>mGwzɞ=[ A7#8dR <qoAdTJ1 '‹pcnGv.-5-6L/8Ƽs\lpLXFH?RȖGBkqp8TZ Y˯vYz@Ng ΁bEtĈ:,Ym\˝'"pO{@>I?]- c4wha-@ _; y,:SHL-{+m  Ov6?9TFQ0O6mkMۉfh9ݾIQVBɴm ޘِM:KF+\"G5 mEA?v̩YY-_Sz=yCn?)Жmn) t]N\jjh4J!d;D`СRȊEì9B2<r 4Pd|} u"2by06'xfEۄ=_zHYӔqQ-k4aTruFb T"||vbcOϚ݇Ӧsks #9ѹՃA wވVvyM~-ɴЛ-Uitbمb4@+?"6֠;kq1QMx7~t.]~(/ξ(KԘ^so|dZ]@`gZFJ> ס-ߝ"G֩]W(]mH:$ #GUh#A`<KځLaIJZ\Yq3&Quwf ]/ǣ8D}SABHY*$z⢧Us9EXԵqH |&h] bB:4qOO#(!Zؓ|`cÑ:4pjR2}6EG݄&mU *a?֊9eł=уOzmLZO h{8Hć5pWC3rvZ8A0 FF %i{vS%гC̛=|WͷoPM(hҟNb>}sgN; |?5=ݏc:1j9^D7C@v{}hz&&PoGͺKb($IMSPz=m lM- mQ~OY$7˴f>!.| 2yH ߙ4TݼSIo~ )ۉ( L3iB5U Bɸm4K%2%f~ T9Z4.87 /f:tqYacu(pqayl/ Qo]ochwktqa C4 W.C#F5ctqnGSW&"D3i]"u,(Wlo"2y?5Qݴk竂𝒥|L WX 3e w&--:Qvh?Ѫmp.s֡CE8 qKoй,H襄BzI9>HԞ@ \ik2E>kA bl3AZ-l<xhD\(.YǞi5H-G?$lG;L=urlP?IZtD=o9(Hu%:{# Y["1?k2nF]ֲ *Ząǡ @octy*-e%H4vg`Nlik ;ǯ[ΥXݳ11ݍu8pdȧGyz/U* LN#Ç\Ń 9e^n1Ԟ Z-bG2j8 h`sn%ж4{;mI溧Fikِk4]P`ajA-ـeR*u3}uG}Wg@IDATl#,2j$ o<=2cҗ.»_ZlM>6NL Z( uYzW6*GgeZ՝$dN+ZdXP^#~4<3 Oxv&zcZx I-FcVAq9)6bVma ghfWnq,3u%tFO? =@0nzS F8ӔcTxbA6%)hSzMP. N]xrߡy%RH q;F?cXuaA l uy*qcY9ʃx-ҍB0~w7=4-ҳfFR/ 'Mo-2!"KAu,u{[/V30nCȰW߫7xZȨ 9s17U5O"D7~2VԓEoRz}u)mqnkҀzy@55k̾l=í 7JGB™.%VTKvC>-MZ Zv76la )Xr^^ FH[Rz/Q3QaT&T6tWHi| "97&82Ihg?"DT̜J^vEP &G@l^EgG 7DbJ:< R1dӛ-])IDAN<;aR`, ڒ).Xp4?=L$ ~U̎QBㆪ/z 3CW m.ꍶNKo*}T\|sy] < (Ջq6WV04 ֩(^ŵ`<>`^Xw]M}\ZN_S!E}[0VeZ.c}e /w9T2\صv [r}!Ѥ!vhZƄ7wpA|h&a4ËEi!d>G<LHXXIᙎ=F6kfjbjXso*°iPQ @,iAMRI%V$ 2 UHyڐّX_qRs~4{V<93`Aݡw~m) :p"a/zv3  ^ťHCQcf蛗.S}O"~{g\2lF%lʿzG|);K`dԬGD|/Si@?[\G0(s-(,)` ݡ=1ԱT(dM"̠ہWqJ?KtJRג굀DMU,A=v&l3HXMf?V?͈ N njy7bwe;gX)}쩎mX3oX\I/}gkjoٟP75>+֝>>u'a}fe5/?|P{i6}+֩VtЇ#];OTdz@ .K$g ?#0:r%BEFs5(~xiq/[|`>zBR > T2 űlNq곜mF+L&'umȺmk}N?)ts^R0Mye+Tm)j[UM.GK}@{ׯ z1#< ήs[wrp{avah^U]݃.Na2؍(3Sg8ن3~ 났QbЁ.;|a9 ZA_=Y]܁mjXa m *j:~+ o@ꙥ \/-dzAHC*"T&@)UPhK7$lΖa{1!?B'+2kסTY !#.s*j.ü&L^}˶OjP<}-X=>r`N^pO# :!Y0OI^~~i5MM))O [(OqGRWCGթΙ-lX&]f3X !f!mm_l^f$~ǴL,XԴTF&gsl%$'!!`Q32v;zũogα%}c>Ò.#hKxcDny^#o#yIv(sƻEl5V[LDs/+:o5Ou1Fl$iMJnjMÃ'(ӱjǛbH8PtfSXRZ@Insl a=Ii'(-m^Ҟl}SAP H&YEppӦ;ǩk(ND:-|^H aGf9=VG\ E J{U)B%MKkkaMRKS@aw/71nf΄VS l:JUfIj'}$ŏ̥S}(zH[iu8k _) >F :-=9^ G} /@Ob-ʬH-̸"J9ͷ 5wZh!p.m1KG,IXYgّmZ-9"^8jy IV TTZy ÂnH)x4X&O[X3x9dyܣ؉Dz7¡BNڛ 5{66䠪N;EUc҅Sw JH]͹KOO 3$c )'>J69~aO7`D u8DJd&xC04`z<6zAzS$.f½vܝX W(X O{ix􂩸W0FsO:c6BK@=ٝlu:ZFzá+ I-GhQϦOo\{ ?+φI_/}]+l5a{}QsιpӛqAy)VCjx>u"[x 7O$클  3`B_1~%[A:`g_DwRC8^p~xԿ H&!vXD9e7t7Dj҆Iou{E"MiCdB_B'5D9Y ~rx|W i{`ul{< RAFS@h|*'AkЦ> DQķrMy_xmNحn8E$Ve^ZBȹ=um0vyyKO4po( _!ILoF:ILA. /x(.Av}oʸr; ɯŽD$ As?;u(2S|r&* lAz4o.zlS<˖iB~~?U%$E|*sЍu:ӣ l2H&-Z,[ ՀС%[Z7wb[{ M}l$3tdݠ#錤D5I/EAgvAÂ4W]㗘匝 ߎyqPfXTPj4+k_l+އ쌗Wġcι%a I7j|yZYܯ=Z:u7[ ɚXk9{Ѕe%$ŹÇK޶E}}λT6mhfgU]0Џlj4 bCO:_ yV>{>Ym} sL/958>:Iy/#>NO?\ T`lXDQd镼 T,MH $А( ͻWn/"4.Ԯ%mS}G\J\֑ WUw~T黖hCVZ9[%PA 4Q/7"wؒW~R^6SX1 6ag$mf djRu0_NUI S:gs:9fcGj{{3Rm zR;cɶ`#֘K3Tr+z,LcΗA2ٜ>#(.~{{W~b_^;{*N_la t[:fKAK ^izSuZ$ʢyJ(%O)cť))8N"^ynx>dD,@GSfLx!VIj]6򝹊GtRr'.,_o\ȭ]HUbr;w[Z%M }ڲ46M " pGvb@.u&@%uFT#[ҙ%o^KS?%J  eLeV 홸wdd!=%eEîc_ Z,9B`w# Anhf ) & caaAZle, 턖% z62S&REG@7 ϘleS,JRlM,[T:I@U@X{*=$ٛx۬;9陪Lކ@P|堄:UǴAb7l{RB^;쒖Z'ZЉ v5EvXY0sB-& ^𺮩_K;5_zś대':@%"wPK屏6)VAw=~x ħ+Ԅv'Yҹ_m֞ׯszN曐4l6D_gi˹qHw}IuaHz:!%nrj0E Ì_^@%$5ۼkQt\7_JS ܮmc7Ñ^hp܋xStq3ް/GќԖv~#LG Sp}}Tl@c.3ZM{ig2C3"rVxv_]8&/lx»]fbq%$ķݙx2DƩ}KFg:ࣨfSHHoߧbAPgj,@ 볋"X({Rg ݰIe2ogs4LW]%bʁ"MtmrEҔWJ) o`;7.Oݭmj7تi-ƫHl )LX-Nwi72Ehu([Vt_-~dr?렂-55k&Z}{oha0 I-u`S;o2< *  Oec`@V qQdR+-G#.hv䲃pi;C  M {Hߨ>;b+ϝ{A?3;u`iO0eخ%g7-Y"7p#*ҰcKFɂn Z@2iߊ idw6ZplPgkjc4ocO 114l[mkqn[~-v;Qk6R@چvj$\ՙ'SeC@v1]71qÈNUlڋ>`2}^Gz- Z'C=[t|d7[PK~Rd\I^DrMA= k;)AD)X%-H9Ly]܄E8ؒCG5ڄ9t c!vxkI?a[4gZ~3+762ؘek &vZ/bf325-˙lkxyv|_ zCGW "Dx윋A$_qXNkA /IwiGߒᗲ<˥5V^xȞͿShɺ [WN.`G2w2 ]WӬۄ@xϘ*ydѨ*!_Z-R4#I,]I~hgLK~ |h^SgjW0, ݊0#q|Z.J* *w5@^ؚbk|3gfGGnlY.,N-[́1 aYkz&;>gs# -F@;A ='k(܁T=mɮȮnj^n?ɓr'CvbÌëfX-d ̋9p?[1(o[-|>DT>de{v(LtAh߃[Mj5_ZTjbbbZa,#7$"v<)D]R[X:{,V=sm6/\_mamyP? gjZǚV7vu-X#߂A#sǾо3:(֍〒8~diArI‹~tx[A.F+rɱyݝwfV^y:'S]{M`1cIX s CS S &C LY0Wͣn/A26*KS]Wu_}xD?Sd..)v)S3 Uާ/[5owH:cmH].X?ި9آ5Ѱg?Lp5:6կZj+ZZD&ckp7cSa5e}k*מ =$ϝ7o=q>dI& HIFЄ~_8\'nн0HndW2lVԓagm )lc$mLWlEAٯs4U@MVhșVg_Hn[ezydFIuAZ,˖R]S^}5ԝYΈS3a!U!###):]}Wם;!B5#0&>R*rfRY!L(Gl] }P{b k E>Tg%.BzX4:ɇoX[invGۧ:XZk 7Z:_Dl >X]YD1Au*t9d>o6sg|xO%nukSUƀjc#^c뼨 6\,+@"s18F7bPشQU9~ejQC\lވi؉ɗAd&ICCQvCeR mf-6H+5;ev% }Rc=Laķ 7sAp2~)̓b㘸$̦&6e#2]!mJ%8z[je˂IAɰtVmʹ֭ZM՞(W=ba75k 'j8_$ʟgMS+ʟU&p5RҔP}l[nC^׃-M"$c JG$Vڳm>bFA=B;;a[WvvaAd~ebۀm#&7) 6Q{j (K6ni|a_ 9~6)ł폁)6 86'{,ExcIvzC=hX-hv\l YM)Q)C`l^goFsMK{RXƭ6nc?۬HTXl|"${+#B+]*o-ރ@l[麦^' |-'Ot JRU6_}AUYD,e"%n$ ]O~FG#w^/(B:@@:g ֻ6o|+K8׬Ko}9%CR[6l'9.]ph:uz`}Tm=6Ze릎ְlkwsjۛy~;~\b " "K^Tj4$b3 dpFSZ^ꓔsgp˞=BK?$lgpQ GsY Ra0R(跠PwO9Mw-|!z#.> Ee-2d<1 5Ocim4k=108|/+sp>t-9knիQoz@B{v_J$G#YFXwd{5.V2V9sXQ#dy)avKtC[{Ȍp?n2­cF̣aV,nan+jp./@/6aeMObBnOIHz5*:g!  o~RwH0э C}WivՎ7{ZxdKT"z]6A]QB]/BE .!ͭ!jiBxvc ݙ1~|/$zHϓ0"4zelغfA_=Msč' r%[Ɣ)' hQY7˟?|0//T h$/\cHr&܍zW9ʇ|3^J3/L/T쒽Fc@OfOiit\kHvS5j#FC@ƒqIT%d?@M9ig6h Pa9M rCd9} 6~ؔ T8SF Q?6ms#3aHEi›YY'=헭QP1`Q"TZTG8٫赯iJUԒaUW\T~7%EDjfN.WO~zjөKGGxDሰ(c2jMjWsr޲We"N$=8YeboZH}b-xYᴻc mWio<d.@f7yR ;.33PQUtu&G뇊 >ϪmM#؁Pá.,bA,گXE]44C 4 ],,[eD^i<4.UD̟W;~|*f'=U Act|>xw"Uʬz*LČl!12GǬ6ݎHũ_DN 3LlL)7$"d逎 OŽ5Kx~afly0xZa투,N "F 5H_"18nsg=%%]e(6}*qgrkۉ9ǎD)k~>~yw^{"jKc)o2\C[<nɞ/j~hlT|)40{ҚHO^v"&0R GFD+acJiȒOW_c=kSV*}?lI  ] rm!q. ";O_d`مdn=+sƵ#G&$Y[rs%Jcsd-X!xvݴ,];3m~mY%d䠂D/!mD^57NQ<3-;9bE-iյWwEuhm6ؐGIgW-e0 iRh8"&w[a6?>H.rWV8✰1Hj&Q$Qm;ɽnĻج ف6^>sY>obj3N {zMI-uvEl, PNV˼k7sTSZtdarȅ#/1eZ]BjژYS4Or Il2Y#bK//?`/:`bbc[ˆ6_2Kqj JZ" taĄ?S`IfD{~0-WUHv /j$##l[u7 ^=nnk|Ȑwd^aﵸFV#9Cx-?JmV t^`؏ٯl!{7#/s=miۚ>7Ӛ7ԑU·r׍T~#Wjb 4}]Gj-1SHw1LB[Fw†6$Duט`BOh_1sE fTv [ڊ:'^#pbEv@!@S:[b+LD-@`L|t83)d _Ѷ y&lq o]wdz;Nc\F ;)7}f=7sa\;taNY3F{ƳrYք!{~vrxܤp>ԍYwY<ƹeޥX,d`6.Y;9,tfr+fd2y奨[zHXؿcL?pȸosL o0/Un}0$4GJj^O)YXKL|xyޗ7Anǖm5<ڛt֗֘wҢ0ҡyumVaZsE2DUUb큃9tC]mչ_M#Y4si3\*XdH̸CZyH4߶_kYʡ~ A_B޼w:Cj~ ʼҘ~8$HmJ|'3z㠿ڈ量u!mD6jDVub"=B톄8ۘwwѵ XHIY`e-8 i-}'?r59 9q& 4JO!*2X*9~2= x÷!Lq9&'zH5k\3PpxkN ɲ]S(6!VӐ׼>5PՒ<줣PUԿ&VԦ\,3O0sO&AaPښ{C gzLɢޟnDtfJ"\*hUG·dDf-ī!OdRn DZ\ ^BE|:9ќfkZYe#kkoa~)tzZGD2ba[O/;bZ1О߬%u?i':c}vfp9b^D7.rI{$'m!)ZAtVRaX3U1Q!i+M,g{0ѕɬݻ05媉!N,?zXe̝ zf]' \޿ҘϕеT̔P/tjJ#C9̊" T8l#F*ɉtSxW?6u۳R bO$?.Mn^ ^t= fHHrs:WMY9$?]?eM۽wY\V뙿*6",j4C>lVLnݒ{0oE7dn^6g<Əg Y ,"i<<D&7nPN<Լv ~B"nL )NtKb [bkb3֤uǎ}\Lfa%s}{ ;}~c'=n6evs.llyw7{9/MwVbzaMlUY+hJʯ](%Y8˖BZ²E3S4ӭGz )g+,ˈ}6ԗPccE8 ZFd1fw==sX Nps`=> o6۸n^,$:$ x Яryf#p%lGx9/' R2& Z.{*$qDf>ekw @JoCc}ufalLOe)=i˟d.fkfBl쓭#m5b:Y 6qEqf6zyjV6urvk.lh6ƽv ;~X"#lmzעUUuusJk_yXd,!T="!FZһ-] - ZZ BKEP=Y"$>J&LUk]oc䘄Xhr {G 0VXqyt͐ I,Ivܽ8[RO>t& vW\0Ӟ]M47w7oiKZ.59٬CT4m~Fˀb[ꝏ.`q*kHhU F(<uE&__NLk{oa#f]+QX`]ssaԯ~ݿ9l:By3UmU^=}8-9W2@HMGE@ oңZz0vMQ,!eA\ouJ)<f\MY.۫0I6EDfb/bc`8u&w6 gim(CY홀yٰ uwk3_w:}MyL}:k@IDAT:DSGg a(nؒD!`׋نՎt7\Y g0G,䢒-Xufptio4ٮe=tlOлn5L@_6x%j"z G5]2K&fpvnl+ڴcK`q~.6V*oݰ/ET8],{Ef&>ҭ9Խc/[ =#X'Z"3Oy 3kX8f5;axˬcc!qf֞X4 _aO>lOn"aF1=;rwlWD5Y^,$$=39TvWQٍpm^J!(.!'HY( $I]-0k*c#^d[ :r}ˣ/mKtosVeg*0' ܬ^[d9@9Ծ,u.׺iI:6X?g^B_܋oA#55Kvc \R!g 32Ϣߘs/ts}ei,w"׳^sSx3ȷu@aqj<;wʌ׷'Sڶ>7?7Oq& 4J*YlJDZ@ 82ٔ6B-3 ev{LB{ǥGe9twlc++ ty*X(ƺ mg8iХĢLMgJmc_8?݈ǔ槫™wwXf-];3me3OV oxRxs弞$?6!A@IM.r'Eh"@ sa W?T8j9K?zm'5{S -q,4K: Sp@|˅OaPqmC=BAu}q]nܐYt`*3AJ r^` HqRu$تA F@X8!8,kY{(:qi  A P.bWDf~+URN}||8p|KyPʷ7M<7v-˛_:e^,gw+.yx<+$1HKG&EFf me]nuۄD@TDW-X$ቩQfU]X`P[kV+Dmk*;#^YH* z MgrR{[81ە2 lW]ʼk6Z:gJ{">ݙwZ~5,,;UCtkRӯFWR< OZacA  Uh9(U@@HjH*LOzf8wG* M>hbшSK0bĈ"[KQzE ܜ#MJ.˳TE0KX !!LK\z!_18+OwŸ{E?i !ۘ~ycu XB!9Fičz5cO&Lb j^e ˸>y9F`޵Pkg: sm6I~BՊ`Gcߧ[z L`\~$,6#2"#O{4JK4v3n_ aKl.S"5ִ{AϿ⚤k׬`3A@iaN/!}\,>cҳ%W(м yl Z!>V ~KW6Imeu> \ɲ7͹Q[ LR۪>./kYnբZ-I䙉?fn~zAfψ!E,"D}ʿ:#Aj [R!}q@וPbSp l;K'8-}}jri?j嗇y@Ĥ4֮E*5bX,2ϋMeQ5neOsL phm پV[lrmt_-a2*(0Tݐ}0h;o/NE&V K&AZ),Xf荟"p3Vv<f0"02S]c^^X? j"@@ pF DW "m ! :i^~{t96faz, \e [fHH+$)8#ub˗D^MfbM$.);GcaID'[q piqI0T%7d"qI[ҵ.8|ޅCoŢvGH폤Wَ-noԜ HەEthX“p5,-sK\V2@ f$%`.zoY[8]H8 E:Bs$gC`#A:$y $J·h!8*SC”oMpUJZ"aa#)-٭Fj tՃvA9Vݙm]MOF )-@0CqImV58ޔ7ੱhCڈu~kмy~jxDEf.WO~^/lզShaQ,DF.5nor޲WeR_")%5+#-" !Z巪R羬 H/5 u`ٳϯY5ڒ>Ø)%wܒ[F(^"E.Ah#JA@%KQ!jFHj}@@ PLތqNOkZd4"7yAz-i踶SjZ"DV SJK$']t/14YcJh)[Aj}@@ PXմs~ί-"+I4DLFl}&5I_+VRZ"$57")5 1.@:Ƿжf/@@ j; TG "K'WԗaԒu"D~ʹ8D**U@@ (Hܒ-[ڈںWkZR'0mdB #R[(2@ t";f&N=*|1ufiO[Z%>A$85%'"kn8Kaua* GӞ[`#-}m7ϤyfLɭIbMK"(Nl}I.P@̔)&M&&8bX @"Ҳ-}$t<*u г6Km?|f'7EODGZ~ghj{|U94H#kbww_L1 ^ P26_}(m{U U@@ 5N}'s>kɃ8zbB;gqhh녆RI#VxzH?= 7<ޠY34(~([@`/Բ+KO3ǧ T@@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ Fs.u[ X@@ NG@:" +%rhfUg+@4#g3pZ ,1߅\RGK.6jgZy:C4X trT'=ͺ:Oe,ЖX_螿{Λݽ8e\w}ղ$Oٷ\ba ϧ{qTO)[ ظlRyzx ޹* gMR{xȐt\s nr*[L[*1}Oc-WsǵB/r{ٕ:nV$kU3j6/jc^FRB2/)Jӝɜ`zg)e&MʌMBTsKz%"pVHkj $b_+V=#IR6+]S!M1bMEA(I߆$@SLƅ 0d:+q ?f!Z T .;3_8\Nגή) D>$%;1y89G,'q4+6!i $\0u$Iy"%qŸƽzmmd^] c:yaɖS]G(ꫡrWKVA5Il0bXώ1 IɨuI }+^Z}y~ZK=-~ PV,0M L&Szfr 8]3~P-]іwwd {*2?6cfCSȯyS00Y-:c^St}3r1U޾hݣ֜*bG wGY<2go#C/&O2G/q7M!NV3f5 ҿ8CY+K!W$=⌙|v;Yx ~ň,kUn;ա Q>woIu/deL]`xJ P7J'Zۏ@BRҒ@k] Rҩ*>bKZȝh+:_jAc!b3b  R܇X3G:Rk3}wjKq 5AS•lI_AI>HMk yXBB,!A88-),Kh*] POW]+@hAS:Px0.RWg1h<3ft_4<'Wϰ9n/#?zű(ѓQOQOìa,cYXUύ _+I,KT#1 b z NBeLj[ ubVyZ]=lfLN(sY| m*5Q)57W+#yaa.*vʣ9`;1^Eda4Ha\5{"W\~ӸUS!] 4nisA,B18k`֖ߟw?Yʁ1/Y3"_gJ~oSqlE:wj"݊<ͫy;Wo`֓nT߂#ujԒ>l:jb<O[]+10HN+^dTg$:Mpd1A\yqlܻa3Ə0KܪYe?ˉ'Ouo/ܧ,51M#n!%Jpp:*sS^ X+8&6бx L8RU,$C2gɢ8cgi:#)ҽ3__l\OAqSڦ=[eq)"ߡ jcp 2$s(/Q l҅ԃc7I(B XUy I6QAWK9_:oe.~ӻ\44{l¤T]b{pƭ-룣{2fu;kJX+!0޹03q²gHfN:OF'L͸i"|_<456.)I9lAđOϦY=F<9Ɗ&Io fsf;^ T+c$Ps 9_>`w/R|e;LWHRJD$-) 1AKyY='x婧r@Jp]000@x'ҿMeYdns9Ӆ>loN taPA5g fv8i.w݌m/"j= AF+yk<=$<|qDEÙ*W}| [q [ Uӽ7+r;TUtҔ"vÀq;wxsցfxJoHDh!mIS-ķ03q` m\Sv[0?F<@g >3 BL# 69Ѭ7vxnÇ_Ըo}D=AZvle{8xgX`Y{=<ڶ>eAyҸ3nAB 3#%=f>]oN|dD\ wƽa1V*f ^>?8j#{kaC=i)ȗKb Zwaa- ?8 BnY̕W0ͦzɮ橀* =rYH& VW@X%3$tUU7A"x3Z(q"Ńy%?F޴r%9dFJtS#uu,ztj)K͠h"mcy3aUJBI.;'y(?OCU]qjeHCRmZq-<JszK4<jOIJJQ76:P Gm}gAJ-jYP8 WByqfS"坺\pd\ߠߠ:* @EVz(:D+lV@:߻?\bU:地nbpH3L -:Aqiȉ,?>E!Vo:) mR= V8wGh& CR^+"*=E)w$F$vBy0\y%{u6h{d,N~=M4=ůT;Yto. 5u8Huy=[w|?Dhjlu?vz_zWH72ó u{=tmߤKAˠ"pمO?X.@r Ft1 -2;27n k;,8g*HʟJ y 5bX_q$ _IIG&U%5U $]E! i!'[qim tΓHE hQqF`VZ\H/4\ K:UjV _{`),uy!HzftiiԸ^Hvof$NAjړ3eJx)W}D؟̏P%>(3C9CUί!K1YbڿGybn5 * 0m|ayL"32+}I[,@$ˠfO 9ypt|pKhJs5^'խE/lz5(ށ~!$ط~xOfc&a ÜڡX8J KPYU'&6z@}$yL[?k;GF'LC"}?yfavzƁ, ͟~鱖H%&jLK_\>mOIKO`+]]ẓzb:8f|Z=A X =dº5/C7k^1;}&+U" d ɮ7}(3 />RKHvl{oyҔb,x)H>Q˩P/Xקbfw7 ò>m|*G#ad˭h/BX.`rr $ì9j^(oX,WJC,;臮B=4nT_qlS}-Ƴ9޿ f|C!]`~5)Ox'ǚqiܬ @yX&B%\Џ>l2WȒ@2O[4PeɓʪPHú(nSiQڰ&tz3v1E-ž'e {LR֐ 8LҧG e"dY]{"gPR]h! ;U/ъM=3ij1danZ.gNokQR$wyg]v Sz&o=ԡݾ)όfa1Z{/ "O@:&R1"4K[58*f)tSʯfEn18^ P>o&O]W9$;@'#P=::.T HĆUyT93FLb&lQtp8-zF?3+M TȲk~0AJ 2GZH cXB#-YR w@{cދᎵ\nDH"" 'NbsaD&TV  !߆)i[ h+נD ~aTVy)d@5& UvUW + y Ub%@pi~T (dM@@ T mdI6YUͰ~(x Qm@, H0Ww$0EKnRoSD&Mj>ƹ[kKK{9UO5'0g&%|.-8qi}/ރqI! wc)ϞbnِvԷS 8zjZU5ab֕WC.g%{] `CIVի%ͣ!Y.o/UU?CGxw>.h0[b@sq|kG+%s%<Iq}uu_!m|Tluq,8dV.$gȘQc}>\%hxB&EQ[@-a ϕ$--u,*^ck#L87vlM(7ITu-15IYG{QΤAhig!}y'އl\@0#PhbY OviXr6U&bHbn ezBþƳoL|E˶Χ焍T'pv:|[ly ) -B *fCZGAn3+>d&_~dW$-· r63>~?̀ihʣw3/sCrGz\k7nY}Pi\ǯ0vC,ZmZ'%BYǜ7j^v':}ZDYP|@Hf^ Tj'TviAp-MU җ!AXU!iLSi 穒I ]S^$ixS0\xTDN3YdHp9B~68:!~d-MKeܚi^ P!1*-mAn񅶥:yePiӰU|]jrވqN, NuNn;1:՛.ŷSO.OfJ~Y!1 Szg^647G<7by-\l!-Y>)'\gB<%~Y`+@ PXmr/5 ;SHgX6m_<0kܾAWZuǣ,ݻ5)zql|S0)tn|I:A lq19Gռ}tMo }Y;fSA~w|Eڿb  OÙ)Nk^OC+iɣU?h҈|PH047_Y 9E4ɐRrqcBv0 SqZo|q,"5htiF5ODY1XDfxC;I, -Յ%1h s)XοW] 1Hi1n龞jh Tixy ).Nқm"wܷk]1og4,{yݱ3Ihᓳ̤cmui\qmҖGN7T湹xڝP"(zt1#T%56KzhҤ n?&$e҆ 1]yYwe^ I %_N^;tY>7=7*sHg/5-[vAyLl||h3K"%a\>d%|fb/ :OTI|!4-6,#@b#ۑ n&wQ0%oJVuoMp R я6\MA@#,$-[bP;tα.3>-9Є@̼*'jZjt޹GQ}TPJ-ڢԶ~Vj[>Vl$\D@# "$Zm+_[ `mՂ B= lFvy;s͜3f&6 ӷdTB ?x 7 Я6Ό9:cR.ۖh:O @8`cjA$sO5_N` ye(аi,B>n8[ճh1cu^8۾1RyЪ?'\9X`n`1 G$; PֺD~˱4 A4PjM n#:l.opppTXyh6CElжj/KrWuXޫ.6Yzs&I"IHrQ,޽߮Lkގk%/Iz@gu6^2} ڴIjNZ]A                                                                                                                             $ ?RO Uw-0dzЧo6塅ZBM>˷jYxK$e!LXroݫoE 5Z+=ZH}08|,!V,~%h^yN!/VUe/'_  g.)}k"*()Z~PFo:/ܼhQ0VYw)Q/0bjTZBE.Vxy@`=fygl?Ef?Se4y^W 5^:_Hp O$@&q8R+1_J^hWTFgX^\IYt%-((iiU^S8$/ L)/ܨÖP+1`m^B`IH* ^ KKHr(bP8[@7O%bC7 d )fȁA$xoerpr AW, F`w9@I@9Ee)+ >+E0Nr`,ThSn/q7ah8̤g{MÚfԡ0-J[l{]M5X @FZA|RrA'p6p3U#Oo;.W1LyrQۖJB/>91@IH dD5v0'wK ?L/%&OWxf&]%{Cc @H[5+€uhŘ9gkeJ$qšPGYRޖ1AKcYf H@J)l`֡MWp46e9(Xe88C @Vj.Aȴ>Ռs`jxȩESO2JD$@OQB ڹs^A \RZ(%GBp^㳬,]Y[8֓x7? .Yf5y@WdLOdzB?\;[R8,3u E,BP`]R.2C(5xLv-UT ךkOCRZȋO!,r{L:X4\ߋmƿ;GuX(f7Q߂; [ S"na;,MnE[cJwpw>8,ýeYs&m$0Gd|$C {/ˡІo pD%/k&b#ELH֚PŭR?$&ov]G;Jڼ/Cݮ3>W~ P+&~ѤkڈXmMWmpWm͢>>&{[8p F]ubzin.+0Y`} BŕK]\\rM #^.# x-W\ќH/Wһ;pI^ˊDoUWK!k`pFsW)3csqy0Yw`q1V#^,35^1o>9~ຸ[0)IȋM? gORޖ+\Ι*bIxO- Ƕ8j/_Kn,Fᄐݹp}e XMXJ"XXގǓ:yby;w6MU9i׮AK"aޝ@U4X0t.4>U8Zw  cQV@_bAc#DXv)׽9Cf?˲>o~w \ϟ> VAy ̛6u:vQc>fnSiOP^gisnCQfv?0 &.,߿V͜2}]mEY66TO֤C[%%hM'{=Vv-/Aʿw$i xkAq3=feBwMLK+s4 7,kq@j=מS/3ls@tai#G<AO<}d(-}q=ofd9t׸C:à|0`b6eЈ'e)RKZ+3BeyC¿7*}mo(G>S8jRQ0:k_eh:6,oCoVװvԵ,~z00V~äu։Ǽ%wd84pS[kt5Im4as :Wfߝwe+F<̀s4\u;jȩO{Wouj.u7?V2w?jO֫1!q)&Cx!,,ptUml(]v"}O]alxW|F{&-_Тc1n ֑T]O*H?6S|~cC*: Je)ᑫXX-P,mijnP5UDQ\f2ϫ֝Ph & ocI#4xvvmJ-YC:,<%#G[b-_`٭܆0_>ܮjt˾" 0D*{&'p%`儧O>j/r;qB:쨵ƍbcZOl/Sk4`\\q/7_^Rپ: >uݿ8u KE-K^zwxFAªnd w;qdRja1LzN{M ]POuⶼICE0Γ5O2w&3&ښ޶qi3a8t'$_^ ?Lͺ"љXSMMg1xW eP'waI/BuךΜlCwL+F7]ae>e%}SB xC:|ǧH L4ԌNE7cTLz̨R{%d dg@ږZ3[ZQqvhxHI@cg:pf˰'?5{yY< >f`,$2;[N'JC{;L{ d?-"jy3vZ:E^ ?Ưn%dB˧b|)/I$@$^ [561kw z]~ً ;$3kLjg3(2ho}RR    NVPQzQY楊8?!f1S$l$`~QYn@eL$@$iȨY\^m[!ivw2B_m(v!H ; leTs Ė d]e]R -imOؾ/=1gT^鉵ڇu+Zx 5Thd:A#Ozhg[V*[,,5jv=% |!QKmfCj(Gc*:Buh[ҿ0B{0GAM`B0|r zG_M1VXJ?44k1!Xiu5УU /=  e&85ccB)PK]Lyu"3 6 Rl6  _1y(:d C{-:cIiIGN˭h#fL:|pg̨-7?oXZX8\}DK$@$L(֡?1T1:b-u_fۗncceY/7?]tEW Produced by OmniGraffle 6.5.2 2016-04-26 15:08:44 +0000Canvas 1Layer 1 Controller Node 1 Compute Node 1Network LayoutManagement network10.0.0.0/24Provider network203.0.113.0/24 Block Storage Node 1 Object Storage Node 2 Object Storage Node 1Interface 2(unnumbered)Interface 2(unnumbered)InternetInterface 110.0.0.11/24Interface 110.0.0.31/24Interface 110.0.0.41/24Interface 110.0.0.52/24Interface 110.0.0.51/24NATCore componentOptional component ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/index.rst0000644000175000017500000000123300000000000022516 0ustar00coreycorey00000000000000.. _networking: ===================================== Networking service Installation Guide ===================================== .. toctree:: :maxdepth: 1 overview.rst common/get-started-networking.rst concepts.rst install-obs.rst install-rdo.rst install-ubuntu.rst ovn/index.rst This chapter explains how to install and configure the Networking service (neutron) using the :ref:`provider networks ` or :ref:`self-service networks ` option. For more information about the Networking service including virtual networking components, layout, and traffic flows, see the :doc:`OpenStack Networking Guide `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/install-obs.rst0000644000175000017500000000052400000000000023640 0ustar00coreycorey00000000000000.. _networking-obs: ============================================================ Install and configure for openSUSE and SUSE Linux Enterprise ============================================================ .. toctree:: :maxdepth: 2 environment-networking-obs.rst controller-install-obs.rst compute-install-obs.rst verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/install-rdo.rst0000644000175000017500000000052700000000000023644 0ustar00coreycorey00000000000000.. _networking-rdo: ============================================================= Install and configure for Red Hat Enterprise Linux and CentOS ============================================================= .. toctree:: :maxdepth: 2 environment-networking-rdo.rst controller-install-rdo.rst compute-install-rdo.rst verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/install-ubuntu.rst0000644000175000017500000000041400000000000024375 0ustar00coreycorey00000000000000.. _networking-ubuntu: ================================ Install and configure for Ubuntu ================================ .. toctree:: :maxdepth: 2 environment-networking-ubuntu.rst controller-install-ubuntu.rst compute-install-ubuntu.rst verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/overview.rst0000644000175000017500000001517300000000000023265 0ustar00coreycorey00000000000000======== Overview ======== The OpenStack project is an open source cloud computing platform that supports all types of cloud environments. The project aims for simple implementation, massive scalability, and a rich set of features. Cloud computing experts from around the world contribute to the project. OpenStack provides an Infrastructure-as-a-Service (IaaS) solution through a variety of complementary services. Each service offers an Application Programming Interface (API) that facilitates this integration. This guide covers step-by-step deployment of the major OpenStack services using a functional example architecture suitable for new users of OpenStack with sufficient Linux experience. This guide is not intended to be used for production system installations, but to create a minimum proof-of-concept for the purpose of learning about OpenStack. After becoming familiar with basic installation, configuration, operation, and troubleshooting of these OpenStack services, you should consider the following steps toward deployment using a production architecture: * Determine and implement the necessary core and optional services to meet performance and redundancy requirements. * Increase security using methods such as firewalls, encryption, and service policies. * Implement a deployment tool such as Ansible, Chef, Puppet, or Salt to automate deployment and management of the production environment. .. _overview-example-architectures: Example architecture ~~~~~~~~~~~~~~~~~~~~ The example architecture requires at least two nodes (hosts) to launch a basic virtual machine (VM) or instance. Optional services such as Block Storage and Object Storage require additional nodes. .. important:: The example architecture used in this guide is a minimum configuration, and is not intended for production system installations. It is designed to provide a minimum proof-of-concept for the purpose of learning about OpenStack. For information on creating architectures for specific use cases, or how to determine which architecture is required, see the `Architecture Design Guide `_. This example architecture differs from a minimal production architecture as follows: * Networking agents reside on the controller node instead of one or more dedicated network nodes. * Overlay (tunnel) traffic for self-service networks traverses the management network instead of a dedicated network. For more information on production architectures, see the `Architecture Design Guide `_, `OpenStack Operations Guide `_, and :doc:`OpenStack Networking Guide `. .. _figure-hwreqs: .. figure:: figures/hwreqs.png :alt: Hardware requirements **Hardware requirements** Controller ---------- The controller node runs the Identity service, Image service, management portions of Compute, management portion of Networking, various Networking agents, and the Dashboard. It also includes supporting services such as an SQL database, message queue, and Network Time Protocol (NTP). Optionally, the controller node runs portions of the Block Storage, Object Storage, Orchestration, and Telemetry services. The controller node requires a minimum of two network interfaces. Compute ------- The compute node runs the hypervisor portion of Compute that operates instances. By default, Compute uses the kernel-based VM (KVM) hypervisor. The compute node also runs a Networking service agent that connects instances to virtual networks and provides firewalling services to instances via security groups. You can deploy more than one compute node. Each node requires a minimum of two network interfaces. Block Storage ------------- The optional Block Storage node contains the disks that the Block Storage and Shared File System services provision for instances. For simplicity, service traffic between compute nodes and this node uses the management network. Production environments should implement a separate storage network to increase performance and security. You can deploy more than one block storage node. Each node requires a minimum of one network interface. Object Storage -------------- The optional Object Storage node contain the disks that the Object Storage service uses for storing accounts, containers, and objects. For simplicity, service traffic between compute nodes and this node uses the management network. Production environments should implement a separate storage network to increase performance and security. This service requires two nodes. Each node requires a minimum of one network interface. You can deploy more than two object storage nodes. Networking ~~~~~~~~~~ Choose one of the following virtual networking options. .. _network1: Networking Option 1: Provider networks -------------------------------------- The provider networks option deploys the OpenStack Networking service in the simplest way possible with primarily layer-2 (bridging/switching) services and VLAN segmentation of networks. Essentially, it bridges virtual networks to physical networks and relies on physical network infrastructure for layer-3 (routing) services. Additionally, a DHCP @#TTT``` kR@.GF1Ǯɞ^3&ce|M(!粿?k2VtTTl6n[.IPID>nL~O*TפゥH(JBYYY2l6cll LIN압 d&#@@ PTd:NP(L+&U!(Y!U5 d2V+/F^7k&Ɋ '#L.{II yeR_Qk'ihrٓ]J>HJB*_X\.FFFPRRqh2 E"އ#վ3 J]T>D2<ձE"b1&n_;YrPwӚʮT*R0>>L-! y_#v\`0Ȼiez @\λOx39T*0 e$( Y](B*S鱥R)X}dz @R5i%I52פR5~|&% M5)ʞ{T*Y=LTl\٪#d{2YD"$IB3&S}O~45xfULIkRT~}/++6d2%7[4~?N<X,oEĢ =&DR/va `&|LudQH$0 TNv @4ߙ=1b'_8?@tP%ۖcz‘>>x3\_g!/vd۲QTפ@ H0ezM „/l7ǎl{H5꼦{=T|_ڞkre&d}}fsM „==yν8S]Hu?JO=UYSor2>>/N'a4>Z4 , n7r9xڲ`A$A$%|QXXl[^^h4/`41<ueQZZo*!B!*e1:: J a}/@ &)e!B!H$ʵrr|>(J,Z*js3 ӉAO&:nsG!BH,A~\.0hhh`r`peс^}QʠR`6S#B!rL<|A4˲hkkL&Ciir¼rA01hŨ B!\Z-VZBm Kk4,\04`B!\"::: 144h4r,\0y2N9NtFsy!BB!x+C(bsyh8 j54B!O*l6sl65pm v""B!73\ט˲hmmV\,,,^\B!rY`4 B!|>۹i^t#bK,!B@& ̙3˲o>ZpB-T*lB!rIH$}}}pyMsb bÆ fB!B.eyMNErB!\! ysin6i B!BH"R`&1sB!B.[FK.G};gP(X,L&Db,B!B.;>FrmN e`Xuu\dB!+Z~w:D"9OsN!# ! B!BrH.sF" <9 4i B!BrOrB7'#\.{| !B!Q^^ ˅`0`0P* !B!! QXXߏfN+y4b1r9@;yB!BXOΕD(//GEEE'B!䊥T*nwN{aXvY!B!Rc2'-14-!B!#˹:x{Lՠ !B@ZbTVV6}eFf"B!d0 P(@ad2aB!tۼwL&Cuuu&B!?N:4hD"缩B!H 8`Y6wIAnFlP8!B!$[4r9 l޻NB!BV01f:6~:( B! P( JQRRrh⤿B!B򣷷@+y 4D"jkk!JQUUϤ !B!4u:Fa{ !B!@>0::Ϥ !B!\6ƋMsK!BJ 4!B!$;F#r9! sN^W$$B!Gww7|>:::sN^k2 ZBz>IB!B~3`{@ H$ B!B.cy~#j3iB!BH5Ј>f"d?]vt5]χ]va۶mtعsg^@!ّH$"(gP%B.Ashb׮]8qV\״ !娩ٖf3Pr996.ۦ˳ZFOOL&QVVB!d``6s6v:NIR@aaa>&$~A ࣏>Ž;P^^R Ӊn|3Ayy9r9,Yz*a%o}[ ̝={?Ϲ~aҥK!  /p3::o~󛨩T*d#<ѴX,d"B܉~?|>_k00LltL;e~?^/6l؀|K_BEE^u|k_bݻ3NcΝD"{ qע;wDcc#}{=^3<ddB!d^ؙ^Fy 40:;;,!7g/~q 7/[oSO=~8렦 5k`ɒ%ן|I>5k]wn݊^z >`<B!dC^NB!. ̕ <Xldk?#`Ϟ=YË/u֡ ~CII ?e !ryqٴhr~ðZذaC~-X,Dсv( gB!\z=PZZ \t( $běIAk<8|*,˂eY\صkW}yNgB!\&OLmy 4b1(d2Y>&$D"L&Ν;Zkk+~kB,Vwvv&}D$ XvO!˓nG$ᦹUF^hD"B"Pw rEضmN:zƲ,s׿r0~T*JxpC!+G4~eF^[4(, BV+jkk~7ހ\.Gee%vؑtpz&vލhnnh4 4!y. v.[46ף@|>|R,[,_IB!B2 W]uUY7c4!B!sK@.`0*B]$u !Bɷ|>tvvtd0LB!LT޻N;iB!BH5 3iB!BH5Ј*~B!BH~d2J$CNB!r\.GmmmN'hk8HB!Z|B$Y:y 4R) LK!B/D"9K'-jrLS>v¶m۠ sΔG"|E}}=d2}/1?=l=VZbß};::wwXx1QTT+V?a77⮻yߏ2r,_/gUd20C=\jl߾V+>ϣ UUUx֖V29;L!29eQ `ywaϞ=Ϥ޽Xaدi9c^V*[ne}Q_2Ya~}?/fw>7MvÆ ,0 7ܐvYHWbY6nXXV* _*O7ofa͸ldy8}4h؂c{1&T0d{{{Y NU:s +bB!6mz=F6w^t f;v`aݗeY׿5+ فe3;;wda~m Wm2W8f,YձS?10 ׿Nydd29BHZZZ:U2{h)2b1L&SZ`Y_Wxۿկ"s~la֭jm91-Zv_سg֭[Ǎ伿0y۷oχ?ٖܶ-|DKK n;ew=fP(M/ɾdr!LfHRgPA.ǎH$ªUx׬YP#yHe`` à<ᵑX,׿5 %%% H& b߾}ضmیT*Ƕ;vlcLU_aq+EEEPTkq)޾6m|ݍ_:pO^&!B׋@ a:wٖYB!$&d*6 eee =bjC2Νë[,z 555hjjŽ;`4gO{v8X`:;;~Aӕm.\X; x8q:twws_O=:;;yfTWWc(**Ç:ٞ3B!$L>u:%PWWTuY $]>/eL&1Ɇqw@R'?I}+xgqC,'̪޽{QWW f7p8oŋOG?B!^6S`7n~;s=' ~hjj®]/c׮]8~8m=mzٜ3B!$[1.H$vX,|&M.r@ k~ry@P$ ξP(.Mr 'UV!GE$ʕ+s~lh[q!͛؊ٔRy7PRRk&'#˱n:lܸI7poٖ-]WqhV7p $4,h4P(v鞇L%BEVT*d2Yҡ@\2N0 ~@ V&*~!:::zl!S<^z 7tS2%T#|I]Me޽زeKLDQ%2ٳ~e˖*͙-IwDg}7x<m ʾ\2U29;@!dJRAP4-3}$ʎX,gdO u;yCE IDAT$n&`ҥx3p8?WFׯ_>cd#<|M_gϞٳgy@UU҂kF. CGG6l؀{7y8v;N\[ldb/ JLʖ/Ʉ'xO<:z~3TWW_:wo|xW|Gcc#Ν;_(--Wi߷LC&<^/^/X>T LT{!J׋L@C,vLcw=mnn֣h4 O? ٌ{:L&?vz=l p!:t(kr۷W~m8NH$466'#<2eK޽{! e˖{ yfZߏIIRc O?4{1oŊhnnƿۿ7s=y״GLyұ, ˅~ $((**BII j5( Wp8 .ԩS8{,V+ (V^x:nI$9r6sZM²,9χZ▐[zouVɉP(.X,C,`0EEEY!!S @ss39|>DQFTVV Zŋ8p|>mۆo=I9s "kL72;\sʹc8EQ\xB!4 T*!H,b1ׅ*c||. ###BBFww7t:i brYlchhPWW:|v_}}=DGF;{L)->~!˖-W҄B.8y$Ƹm0͐JN%( !JT*T*- àԥ\V+(--… x###xwގuak  ! s^ 4!\q1DQNC}}=\.WB?ɁFC,浒aʕPT9*!;^yZϟǁ q=ছnI`0Ǐh4f}A8:DBep DQb455}}}I[/ҙaa bhhj8wV+FGGcڵ(..U XM6axx#i566`>ysjkks6|>ttt@j5Аdh4p\4s!\.?~,B.cڵCww7􃊩,!ZgϞE(ÇqUW4+!$XL&CUUt:asD"Mi&8N?_{r~A=M.s5RXP'N˲P(X~=\.\.WgxB!AVcڵ8rB97'!Qw\.GEE6Keee뮻q!|ӟFuu5>ϣ>!F Vt(V.oL(8D3V NG!srT$zd2t:TTTmii@ ի166 225}\.T*֮]?P͸kK.I @&F]]]IrE$aƍظq#~bhhwun=I'C^ TU4Egg'y BT*T*ՇaA|>n,h4~C,555AR Bx#y2<<'O Jpև;ð!HT"FZm/^*UaZ pytuua. pjCCC\R(P(--Eyy9  -tũSxk"bzh4>#^CCC@$;ƭP(P]])񠨨555bAuu5 <ו+ M{l`"(..ZFAQQQZ"$D" xb\.֥%Pk^{-ZZZpw_Daaa^'@UU\.F#Riҙ@J^xmmmܓFsUu_n ع;VRn]+ThooD ӧt:l2z }D"X,Dgg'v{[ ^/^/z{{m @riX,pC%}2 PhD"d2cp8*555p\տsݚoll & v>O LA8NuF"`0d2dF$dt D"4 JKKap8rD$A.# hjjiclY|b Xx1Ņ\i-dJv &[&aѢEx>B \JL&D"`ttw= nBhZ466rSvvD"x<Ӊ#GėJj-HUjϤ3 $Be6+qQab_Z6.qxp2 㟰J$D"3,p8`0P(`0]~n;&uad\{j?. |W]u3v *I>BbYfO$Aaa!D"6Y)ݴD"onGgg'ABii)/_NeI>g?ٌ?;hݝr\. Jp8QntGEEEnd̙34W]u E.pj-?mppi章jZeee/@ 'Oдb9>g ]. ?VW֬Gct6l,`0"%%%(..Faa!T*ՌP0 X2ͬ366aʔeY9sgϞŊ+O~2W72cb͚5{^|L+&Ny_X H F3-08661bbÆ p:pFGGrpA,\lJ Xp!FGGaZ!PXXB bttðX,D"SeY~DQZ ݼ $?lѸbkN]]]Q DɄ*(J\p@"V·]&~2 'E|Øߏ'6\#3gbŊi{- i9ֆ{fL" Z6o]b}mJ%z=Yyt:,?Vxi}bZ166atR˲Ȕ` pl1<Uh4ax<L&ĉp\ʕ+) SF{7Yl Ԗimĺd]ǺN'ɮX{I^ X algY/^ą  O.l6d2FavaE$ BRJ fɄ`Y$=LOUp\hkkL&CWWVkZ}#7KJ`hhEEE҂SNAVvV]Q^^!8 r׋lFee%bqҮx}}}(((HY;wN'|A8$?B׏h4rc[s9TB!8hZ]'NnfRcFsVL^L(@x  u [v@|9{@C$}}}y]X.ѣܔRuuuJ$yx_hqKz\CE" @)b ia#nz{{,΅pr3 )nItKw}. 8p}nT*L&H$PEQx^.؈x~XVXVn*.Ư~k4s^T*r-X|9B!֯_sa߾}8vt:*++gZZF0n@oo/^/V^tF?@mm-f3`xOⳟ, ^/Wi)++0kw'ʨj*8qV/^Dqq1t:ݴi+S|?nju3zhW|W\ @#b  f&/i;vcXC-gfT8q:.y :dҮROH#]{") a4IetQWZPB2 k4ZDdspphhh#C@o L vcccD" ʢE`=zdlNxZh4J$TTT@*ŋܼS-ΧRh"l6ףz'Vn?hg;+8Ӆ+ =3M>B?kRkwlb]b Ja\Bʠh@otuPa &?=^ hДaހM5uذd1#l0XhѴS/ ۮj`^VEWW?ѣضm***Lk׮{p8NK ðllNhx<cttMMM8tb [/P]] HJUVL3yhT.S>pjXr%8ߏ$ɎIGAA y- b<.|^ W/|ĆT- I*24Wm7oƷK\$mOe:Џց~x$b 6e+.^Ntuu!NK."fG… m& uuu D"u=-[O~}L믿'oӰZZ溼XVB!0 = BnGyy9N.7007|/^RD{{;~m,[ R& z( \x;q\܈uËlk}.UWsaP*0Foo/dJ:5"@*>wy͐dXd 4 v;Μ9íQ 4bF#B!ڸ/>y*-"u: 6^@LdBww75$ 9}JEKK 7VW_Lb<+//f^>y;$7anAKr78S8o&Δ" ֌AE>J4]N:˸~: :u hkkdÇyMiOkPհX,_f|ͼ'z;v ~455qOEw͛EhddzXf 6mڔ# xwL ږJBee%ׇ)xu:~?, .O \Px6@@~{g-[pcXqq$@ @$ys\.z[w_W >w ^?k!Ja61003l6B3ʲgpfM=I3Jߔ ik/6y-+I[3fb;o 7nDss3N'zzz xǼ„B!=zL&C}}}Xl&7^㗿%e^ײ Z@ 8NTVVb - ,x뭷0>>ш VM.{ ҂O~y[|'l6XV,YZwL7W>mmmFhmmڵkia"Wl/Bwy'UfoA 9 4OmbLJŤ *1A"V^&A !Y>b̃LrΘ7>Khu a͚5ͧ7J^S[[;V@ ՊqT*TVVNĉj`]]]T*ŪUX,xg+@&a͚5I;fa7%-c {lx~0 ` rm^\b\.( n7 ?a(hL}֩*N3ck>ĞB!מ$|ga0щ IDATI)v}@g ?0o/^X|9|>\.N:RZ* DEa&kIAfq7@ ~ovʿ |XlAcc#f3, ^}U񉬦Af&= ܔH$il˖χ_pYCя^{?7c8x .\e˖%q2[/p-"~x:+H|РT*xbx<tuu_( ,^&gcl4 hLĜGoo/Ν;3VQ__\n+9F x饗0<< `b :z{ PSg?P˛p'7@z1S. 'O VbŊ--hW~{x+Tqj%HOO:&MK8fM p4WC.gΜj5601F"f&D+PUUE)B$xWJ0}\.^~enUؼy3:;;  y 9‘o>S/E0cz^>|OF$lW_M6} a0YŜVUUј hHs=Bt 81u>IZM[ %3!`>,cP2o#;lƹs縙kt:]-z\wxxz8z( *++=͆v޶΄cD"+,4^UUQ?;I-2$FFFGqEEEXlk||N3! R\cX{/J%Kpy@]]uC‘zE\~JI2۔J%n77}t$I|R{Ę I; V$AB|&,AI1KvX3gԄb all4B!WLdK @gg'QUU[B࣏>;-g6ºu`4y]fp\h4 FMh ߏfD"466{?~?z{{v)`>غu+}] `0ƹsLt7dnwxA04E*3X@*x [0 K/<tftҴD T*M#<,B!HG[4b]z KZ ݎ%K@"5YHVZ y +)dya ő#GEyOGGGa6rH${/ouaٲe <׾gK^W*m`c܀`Z `C!PH &8NmbHą`XrlZ]+i{~g;TV=uZs?| ۋE `ʕ.o6& BX,F\\ Rgi f ܹsZ(..FDDۃT"'cf A^^rrrс%K-GGG1444;P$ض&$^C&]/bϞ=gAvnZ}} .\+pl^S6u=BLXpq\pV())*L&d2Z)UGuEߛ1s9/ 55j0 (//n¿8r[:x|t~vELk.ƢE\pۺ /Nō+BॱbqyB,###eee.k~+ ̔Ud2[>44Tv_2Kû8Iy+4makf؂VbSDxx8ى$$$8Z LZ od a'|!$%%!%% aժUl7{L,\iiilyO7|3 gҥKAjjKlvwwBf2_fΝhҥK)j!|kɀ?Ƙfj )) tl1&Tz\&IF[[^|E{.###x饗x'p kߴb):Ӌq:_|jkk񐓓˗{]M[O)v& 8N OZTePp5$M3 ]Ҝ\3rj\N !"ٌTTT zB`` ~a~N$CO?Ż Ʉ%K@*U*p ػw/jkkg  o&@dd$RSSAd2kXj/_NUZhrss_}}}HOOwjٌ˗/ug z0hE&9yd޽|>;wuώ`ZFqq1Μ91\p+WD@{#\000 0ԟ{LDE\\t:Ν;d 0p'{ k̙3PTn:ċ/hjjoO?A{w](J azSSra2104q+b^ZFHHx<fZZ*jF=K~:ra6-]PҥKyєN 8 +3zм0;G$ؿL9&9#jE0RY%6oތt: fˍ^Guu5.\H Eň˗e\0Xr%cؒZƧ~F`. ^?b1bccCK쎊D"AGGkTVVbӦM`MtM(((/\t QQQHOOwi!1Lbf>,j;YsA߼yʳ{nbܹ:2LCQQN:шj,]\B}}=T*W:ꈚF/VE`>:½᥶ƱcIy\.x<V+JKKZ#F5sl]BaQžx^>\z1f'ttt:nehp8dddS^O&Q\.rrrJݠe}xpRV |8TSNgbpA1tn X F2n@~~>kq+AL4*h4Ă hd233i&MTT|Aȑ#8wH #ɔ+>tfV 4%I,5 J]gC嗧XB·hdQYYr$&&z\kb@AA:o0 ҃jq̙)yMi: +h:m݆ jP(JZzM0Q)**O? BFr$P*td2b1QZZ{W"ߓ[ZZxLf+ BqqDɄ7}o|q &2K0wݑ :pĴ9$51H|lٴ ~-[[~z.\@ff& E`` Ο?2ץ Ξ= D1I {_b̛7J˗cʕ$//<D[[L&B Na9qqqq:YǞ Rj$1aB35f: c4 $ bccۋ_Ue )=6b\QQUV1*S$é| \ KR:Gcڵoxb#99HHH>_Widd$q,[ !!!?Mo0@5f3M>DV78R)-[5kxc׉%dɠ"(..rM=x@\$K@$P)C70z3|&9)d@XXFFF`2p),Zm{.88JiDff&D"p)P(D0LP*BTTRy$ ՅӧO 7nDtt4;fQyF---wRdM ֣1`wmFRz=JJJ Hpq8p\.GHH֮]GWXXD'N;867P/^oGbb"5QZ 8}4Eqa9sh4իsNDEEQ۷/؞Y$ilU3fA_ÇZ}o:V\.{X,Q9 &bhbdBllA===l6C.MAAA.˱KR4773KJqI miE /^!`8z(,W_Qر;vHکcFN(CT#$$ b/U >446gҥ?i:= ޙ0(N^zF$g1pͼN_ آ"lL^:L&\p-& J{*J 11 DwXV+χhD? T:-nl*>`SƜ]]]d%&&"##*{VZ:VNvWrlXKC٠P(DTTFwXXG)O?xK>722ћᭅw8T\8{,YG@HHk Tv`NcH%mݺu~mmm Ell,V+o>;v ~!*mݺ1\.nVj~hjjErr2, PYY*3F}}=0::z Rdbb 00nKwvvWAP9]hllq\111hnn<m묬,p\( ! Ȩh|)úbz60݋~S-b2q,>/9 S$y̹%AAA Ehh(`2Vb~UH7nXBGўdt'\z G$ؿL9&9#INfWTtj0>>/!66Ἦ8h@RRi$jP(GPkիW  Guu5^}UC`J}a׮][<Ѝ77đ#G0|'y> شiM~HMMiZxw+O`ٲeXl>cBg<'|MMMX~=~_!22A<ߏ+W⦛nr{ ' X . @y(j*F(jjjDZ[[#g* $.#44w"I<|>2,S @FFc#IKEm -́>AV+/Og,a  <\.z###(e# 3+ѰwAzcEtD[[4 @QQuēL)x &K@%L^'ALQE~]lCfo _8PVVFu!* +&&uJ6LBWW-HD/n.=`2 s&}YVh2z 6d,HriLcӪmx7wm#d SF UMԶ+JMq8Q" S(5ge1 y3KïD#-- ===J>5 T k!3 ӃK.!22Ŵ---Tճ>nqqqQ~/Z6Ayu^ތOVm#̗r  tRá\.|>* ! }&EB<~ty'DM PF62\J.F>RRRPZZEQX\G/nVK,$$$P?}YvR8\*L2y \,DS'GH'e`&ASq OEOP(`j,bB`L*rMF) S^+fcccB("..qqq{x8BCT$ P Rl0""Ą mWr8X$UAo\.aaaHJJr](++CGGdNgX Fq|BK%d T&Az͢<աK,o&Xٌ{ 5~.᭷† ?}zKl'9{3&%k}g/THqmu}#A8,Ky= 5C(yʺ5tU}bH'Grr21>> b|||[(k|ppŸ۱f QnשJ5HJJ1,wQT XL XVtttMcHtxؼy3paRDcNc;::L尗OQ}9 9sND՜sEArr2eKOO SÖ5eiE)z̵Cˎ=+V !.1䤥NK+f|!557o涫: !)) HLL{_їK&jw+0<>@\X(N>z ֤?H'{8/!?..Υ%0:::4H$455y T*h0T*FGG1666#]dbePP>eKH)30*'77^%~!yڶ6\p<O}Q7n܈?8vx"&&Na>/z!*Ò\,:KI;% aZrHhmmuMWE06ttt@B&!&&fR`_3D"$ 𩇂= Q+rV蹞ebC+8ʹuB4?ȍ@?CzzTd$BCC!i5u% qqqHMME~~> b "''nuÙWTT@Z,d`CQ R?ՊooccĮ]1d[9y$^}U3AHJJlƅ \%%%#<6>>TooɄR[ll,*K,L&tvv---hllV%{72I~ T| -lG5`k+N ;繴3̳Si$Q&Y\x5z no P3ۄ}ᐐb X|9t:ۺLݛG111ǞAy<:X+A4±P(3n O<RRRF HRs?{ENN go|ϟǚ5k JJf_~nڷqF*$]o XlUQ k3CMWb}V'Nu[ę}R[5ٮ+9H㚕z [PՎRH$ѾSq37Bڊv ω5I<3HJJ‡~F`ڵx]6jHlذo`X"]@@8W^y'N@SScΝx'sNL&D}}=L&5 q|hllDww7bbbPZZuT\?> R۲EUaBxxTcb*+Ѱwlk?2Z) /y؏43 tY9fXa6&䜡p}N:|M#&zh4i%s]`I!JV \$p\{l ۷cnPZǺt1T%O2GGG"jeA*_áɒ /EKĶ 7Og?ǻw idBG9 [g@nz$~=owo|;b .<+&G֥yxx ,/aaaVu\gR u#y*cQ>v TTT^É'h$#..O>$r9nЖ۷wX Rb3Zvֈ/q٤^!K\3x<0?j1Tq"LݧDИ;%H(dNKK:k4 OR B#Z^ ##D"ABBrssw^dffҪMW!1gϞő#GoGAAjjjpX\HJJ`ΝxgP\\±1={8}4z=^|EYYP$2dL2?ag̓ Ñt aqR2Rw8!uHM9yn֥KH ÑB= r!_E#

>===p8k" ., HHHeqZHY%K7@̙3())ADD7ߤʵP(DMM]Kd1j*>|yyy(..FFFzzzpI LҐ;ؿ?yT*lڴɧOh48w***PQQAk2|HKKCoo/ڇ_db}MNH$FFMtji &%2o>3>eF &(kk/M@! YV9NlWdR Vz mغu+zzz['e˖… UAAAK^ 0 /_D"FX`p]wa߾}DII dc.\d qi(]7XLAAAسg~`ddwqK<"HHHX,Fll,222j=zOƺu뜪kL*O5>>BlfwD3fUUUx",Y}{hii7ɤ+1|> 2pR,]%,|@ ヒ6TTA,n݊۷cժU믿7ѣGC1 Gll,\."""3hp8JmmmU9yXyR eKRAe̓{(Yfq+3/9CJ Rө,ZJNg' /:_ʚuE<4#=B[ryzpQ 50K HabjE``uvZZȘdBuu5z{{gQh\.N֍> EYYvuu SO=T Ò?IIIŋg>, A`ݔv:ׇ_ؾ};vލF^Cp 7pfW/djC(l6Ӛ<Zyތ)X!;h)~zÍxۡÛNPjnDZ_o?]:R-H mŅIjPj֙o)Yx)y-CJ~3^q3_ ׇa&L&*ieeexP]] Z 0o޼#q=*nYV444)kZ!QYY锈x7"))i$ca@XRAd2|>wߜP1SDDDHOO˅h\.( gEnn.vڅFz*Z8z(;C'hğgܯW=`^-W񐑑A5|2xpq.J<'WJQ¶{10>L69ǍFl;{Ρ(! ϯ\/.\q$4Y\ƙH}фV,P.tt}{뮻pYbhW.o)?ΝkjTUUAVVVƆL1VUVQ.oC1s=̋BP 11q ,<@U3wNx7`۶m< ސ9U/ kjZV>}RtRd***000e˖!<<z?y?qܬo@'pGE? o؂z^y\i> }.tX˭_YeqpA/7S1HB\7nB7oXֆ2Z"""Ak0סhс~ƛؚ5kf͚Y8z(F#JJJ`2H$s}̷~ Ʉ 6ɓl21@ee%^z%& af:c*'%PXXHc,//GQQȶ{ طMB+:|1GCף ]]] Ddd$BCCRl6CVCVchhj(00k׮EAA[+*@ h`p[p)s}7DCXX:;;V= $$O<>w}㴶?Ė-[w^6?cK5VgZ8fIbQQQd z{{l4֢cuR8z T)ǡدA73+v^7&`R](>WҸE B)R K,yjD"AQQB!r TfBJ8z=f3\n"& P(hkkCKK 022^rP(Dii)n$'' h4Fdd$\õB?8ϟDBp9`8q deeQQFqJ{/^7W^yK.e La #!!aF_=% IDAT*hLaaaXj*++100Z3g ##/I़OڏrG@@2 9@2tsYfa: ׅAV8Tu]@dL\.+V@AA*++qYO&:zA|><Abl6l6CCAhh[cҥ(**Q,Ehh(1<<x<';=WcvJ7\+eYmH]HHli[~Eqq1x ?ƍjh(îJEVV֯__3f%gqDþ7(' χ@|x/^FR"ʓ 1vŗkGB/ࣽÈ\.G?K4<999ɁbAWWۋALk D$%%!11QQQlr5 .DIII x<Ϗ0/V>B{{;mʕ *Vmco8x `#>>QQQ-" Aq8^U|RzԼބJѝ ^3&לT9[YE`"d`򶩩 Rrn=pB#sdOTR9h_D"p\9@(鱘Eddd z7oN8EfBAZL&֭[)rs-|zSMmI`Fck:]9,X@`1MHIIac)+(yF#⃃AS}T.H˔$I$,=!2@`y:;;QPPyh4Xζ(,bbdee툎FYY~m_eՊVTTTP>-[0\9V ո|2E$H`Νt$$$!,X2 z=:::fP(*1u?cFA|hD"u yM0+􎞄YIw &M!Y C,l6DŔ7`/zAZZr9FGGqyb8trssQPP@OVjhhh˗100@ԄO>[l}Zhll`z4sw,2]P@.eO<#p8lP(D^^GX`& :n ~hDGG{ƈ jL|^{NW7iLspt9idqM=&A D j`0TT8<`67`ŊS^oؿ?~򓟠oɩ+Wʕ+3Ϡ§R,&"|8s j_IIIJJN9>'N~rtuu1O(b֭ػw/nѣGiy-g%88bB"),166t:oFQQZ[[!`XPXXȖeŴc&~%mmml cq!>B4f1u΃`Hsh^ Q!bf#5ߧ#\gRD\\ W0 x饗{a``999xgqw{uٜR8{,?W^SO=4oqtwwC  ++ =^Td2ᗿ%~S${o޼~)m|;s9vZKJԾ'غu+>͛7{%/#00K.ӧa4q9,Xؿ?:;;ىo|> H$˥t:BPZZM6QETb߾}Xh-[K ƩSP[[Hxf3FFF 8PTT _5?% QXX&`⪃_}n3#""J k-BVd'%YMnnx2R)z=m:,q>󑐐;vwu; ,Cm6J===x嗑E믿v9/^NGGCC^~i=>mmmxGs=ɡmT{ă>< TVVo+P^M6!,, K4h4hjjBpp0q?'O E&cN<(l۶ "ǎCuu5͛#..nR шV\xmmmNx<hs3AAAEZZҐN|W4|֓i}NLV$YZ-***-\pFىK.`͚5 )q[a +噖z߄Aw\JQ}L AN{R8߅,r^XGn] 6 o~T*TUUaÆ #ЩsΡ?O?MXd DltPTQ\\ңшR466bddLU.ذaF#?NmcT0Gq/]vaя~DmqA@"xu=XLF#*++i$ !Q__z{#55Φ{j477Se]`JcB*")) H$NTV+100^*O•+''sB!r9B!b1^l<4:Z" B AAAdFGG122QcGDD 77b`bF0::ZDDD ++k5Y6!! EQQNJÁVP d +?6X2r{,rjL[8X,@RWx +ܺu*6 ?)|>>(~oŪU|H4%+&Gff&`0 EQ?~v횴lbÆ صkrIɓ'ݒ3>( CBTBTB([l0a6jRɲIccchkkCww7/_A(((@VVPTt.]D X,0^IOO7ߌ 444<||jbbZQB HNNBOY g Ʉ^hZrTcll Cnn.oߎM6᫯rH ZO w DAAʐ6||{`24|A@,#$$f `/jZV1ܯD"##3%:!!DUUJKKc-xCh W,u +#E=Dq-@]' /"cVpX8apwww#..i;gNϟ y/"aÆy7Us3t/:Җ.Z BEF"PEE"W(BE@J[BZFK73ivr~&&m_hx{y>VT_~'pʂZƹs믃`„ f팊CyyyI.!!!Fii)rssQ\\ FÆ*ԠY:99ALL RRRP]]۷oɓ1qDܾ}ׯ_۷QPP`+bXҸ{еkW@ ;;G5+dnݺAR!RJZz Àax~=z΅0 OOOxzzOJKK!J! }/ ӳΰAPχT*ũS xyy!449>Fǁɉ9h4(..ƹsP\\lc鉮]ŅoH#R\nSR >_gۆQ(Fd [Xl&ǘC"UT 5<|. :0`|v:=:ث5ph 2 ^zᩧ+?a g8;tujX;A~eVsCjjplwww`ȑ(++CRR{A5Xl-D"SI# px~KbE͛'bԨQq;^kͫ!lq8Ӳeпp\xzzwu>3~ƬaVcV0rx xRSST*l ;;;B6*''2 娪2,P*4@Xv-vڅ\_~Xjz  6`HIIyyyرcGj5/_#G!!!xpBǚ^vB^^b1BBB`̞=M֊:XSiʔ2'''gΜFK Hxbڵ O0<)ݻ7ƌ???"??3f!_ 9N(vs_ J|˫]^=aLֆ!!!򐜜l$QQQ/׮]C jOӦM޽{qq3UeGηMrR***8S: ðID"7 \NFԅX,/V!B T!Cի9s& T'N %%u4bʔ);v,~(J31{r 2X`w^_BVV>cW^y۶mëHHR\zOF Vݥ} pc4tP={UUUGaa!m,ԒP{{{tQQQJ%Ν;J2T*̜9سgKIQf/FXqp drxTAaT1c D"())1a4WTf-gUa xzz reeeEZZNNN޽{߀>} )) f9AODGG[l1)5̴zc}ܼy&S\. Byy9 p\\\Dl( ŖYW\?3g7xqCe;+_Ƅ qFK}K/R'''D"xzzZuw6xGpudff"0BII P(hXiP㍷Da@AEERSSQPP` PѣG#<< 233!BR!Q#CC@a!F ZL_:>>>CϞ=q VaV6Q$_\. ]0 j5T*j5r9c:u?ۍjʔ)S~zl޼~!Cȯ 30 ê45w^u1p@p8={>h9s0dȐ6Ҿ`nNCMM  +i0 iB-믿 jԿ .{l߾}b g b !Ek!'6K?N< Xv-Bxb6qxxg~zTVV/_MKKCLL x ]]oe2֔ϟX-Z999 w^^CHHgsU1b/Mٵ&GE@@Պ~R:6\.NNNuP( N,L:iHVE~~~iubm֭ٳ=z`ј:u* p_=JjoĦ!EEEP(h8XX>}k׮Ann.T*z=Cp8j ne|#00 DJJ d2]RшF~~>"77IۤF-22z;qqJB0{i4Vј_8FIFHĪرc?`?Sf E}eO ֭[34i>>0=t:3at4ylٲJJJ ~c̙s|}}tNk3g߶Y2BXhhc gzlҤIxGo>$&&G}7|W9Sb+!]TmN:𰐬l+aO%%%(--Eeee;{{{6≰JٳgYƴNB@AA !J!AÁ\]] ///p8HR#;;l7ݻwor N^φ* >4 """0k,\Ҧc/_1mPOP($$$vvvutҐ۷-"E;www\p'0l0XtisP՘~mlڴ 2fVUUaXhu2Z~={dsz۫T*^ pwwǤI!k;D IDAT~8yafb<6U]]7|C/B!0n8_0p5 1b;f~n a*k:ex }2m48fg?8.]?lc?((bD GlJUrf BY1iii(,,xDr(--JFa AN.(kreٳ-@iLJO;qycƌAbb"BCC1|pTTT >><ǎ3+̸|r{\r7oބX,ѣG1p&ͳ}K.Fpp0\]]q:u zk׮Ųe˚4CHI&~\.޽{1~x3 !!{_koaԩBY%`JJ4h0sL 8r'N1c0|QPP>} ,^cm+++ɓ'gϞpwwǵkװi& 86Yuh48s gaRIHBB|-nwԐ4@W?'| O/MJjKP(ÇՕD"Fz-RUUժO=n" $))h4k oo .DRTTd-2Tr9L;F+8r!f_5+&䣿HZ1]}i?~<@-=9uTzF#-#m{ ϟO}Y޽3f ~~~: ł˗/kjj5L̓E"x<^VnoC" ""шСC PPٙR;Hq t,^:"hQ,((Xpp0 ))~#[~hn_yyy8w4rqѣ-kƜ8qjdz5u8pn݊͛7d] ҦTVVղj;ӦMk)PRnzvbt ++pA"9'|$pYEs-ﳳnj;v};u;t(,,DBBj5&IOOZҥYk~6*:TWWc޼y2e xFJP(Ħi͆7P(e珋wap45r*>Npnn`;1qJ1.U1됌3š5k{nVQڵkصkChCTUU^P(4+DWyy9Kb~mUM1UhJ?7n[hѢnl,YLM65:O bXj=S(KOo_6/ħ0W_\.̇C TgƶKeF"ҋv$^z%lٲիF ޽vZRJ'Nč7ޗ`S!ҥKX~}V*֒~qqqil=o_5[0s *x{{C Xm6{h<*Xcƌ+aBVczeQ!:x뭷눊$ q;w>~gVݴ=N屣3sjNwC=zz%Ph48c{Rß'J"q1,Yr_|pBZ YJ&L@bb"6n܈hI_|>aaaذaO>'Ol[Z_|Æ ܹs#By)++BBkbSGΎ- b^7n`bܹx0f#~&wSSS꺯X7oFHH-Z#))  ÇC*bҥlp⥅H|qrCQ7CRw~:m{;o\!P ֭۷Vkײ-QTx'q[ .l„F9uOQʻ~***pIPڽ4ecǎ}AFFP(cB\nſҳ\rԩ&i X?s @!ŋYZ&D"45;tBIyyYW;;zuۗ,YBw^ZQAPj4\~.%v*',zkߘ|M.&hL޶>r9'S*ڵkk~ײeG5(K/Z{嗛-o~nJӧO[o:HR2w:_NNN;w.7o^P(>hHbb"IHH 1Gcɒ%EO? III :t(9sfB#p@a+++qIiB.\qaԨQ$ذ 0* ~WNReѐ^,aW qݱXͿ,Y:|ɒ%ѣ>&&^Grr2caf}Xܐ9sXcŋ`'|#==׮]\f5@ _~8 4SN0عs'J% 6j8t&Olؒ5P(`}cռik0u!YŋpҳgO {~~~O:۶m#<TTTXcoF5k֘wIB!4hPZ*s碬 ٶكQFL&Ν;k7hչ#F@R!..(++ɓ'!XRx۷oþ}p%tܹ+زiNۆCl hm Ϸإmmlh|p8zj"*®T*:?%%.]ZD"iz=:e˖_AA}Qcf5k@V5s8Jctť|tqWnRXdPG~: !.X( %7|$f}S(&>>T!999CiiipJKKqa=cĉXr%f͚//3gO>1^r%zUVa~37NӧFpppmmۆlٲfb}Ŝ9ss&ҜiNۆxG-?;PT(,,D```-Ŧ<!!!D ~X؍6___c:_׳,FԩS(++cem塞 GT*_XQQ֭[CQQӑ9## ΕBi)0:4nmVhL]r(@'{a1XGL%C!7oz6ϯIm?DygP(o[ԺqssÓO> R۷o8k׮Gpp0R)^5fL+ud&_Cs֧9mΝ;(**jќ ^Ͼ'J BP(p[d郒dggٍ u[|.ڍzǭ9\>f 3;^RRJUVK.k޽]⩧jpJkٽGkg1_}Q>n`6_:L=*m3q Bi"J]ٳ 4i}s8|03z ggg888`tY#F0P$&&";;Xx1BCC1}͡9k vADuYƽaSGCTOR-n2SN|駬M//kjj\X >o_n\.Ǹq㐖Y0 ;wx 2c U x;:bYX6l$kw lp0L_/)Vբ8kt&w'^) };a ۷cHTٳǏou7oc öm{… 5{79r$0rH8;;ԩS 6Ds~גa 4+V-[fb,],b4eMǨ͒mmذ2I9z(7|0$<3X~=*++_/_֭[.YZZ 7xU?qD+W`ݺukK/!99GFbb"͎?Lbq4i\)Ĝ8 k⻡^p3P(P*BŌ^}ٙ+|$/Ƕ b5<޾|^y矘8q"֭[DCaÆ r Vxɓ͛7d2j5F;vnݺa„ X~=>޹sgGFnݐuÇ% 99kشi,XЮrQRR@ YkjZK[zyy+fVT7|H$" I=/bѧQ*7pg nPNQPXcR:2Z\+*$ {]ft$םH`^>= +&)GʑBt<~xd(={󻶑Ϝ"sRRRҨM+jooO+رc & Ҟ89}zzF|U`PjZT^mu) {N@י|h:1M B?zJ&jY2QiܹmP(pEh4 yyy… arqqqHIIA||5:ZîG{_êW޴dzm^|gիX~=nU*&Mcǎa7n\*J ##s(GB6 ohM'5վ?ispANÃwttȑ#qQӧOʕ+-effN<'OZ9p@h z-t 6m[o'''L>k֬+ۮw8w}]:t~-\]]1e;ܹsOsŤIPUU#Gׯ^{5ŵhD(43 >z6MW(HII`Ne) ݡhpٕܽ'tn6f  I1o@ sQ*Aw/  aB(JHU*8޽(/G'^^G<we%OMrypȱ~O֨j?IU m2iB<|K>_Oz׊ 3KC7bѹ/C,|wzvBD'/նP&jjTUɅu*^ؽW #z}e Ca2nLvL1\aB1ҚRvȝ;wP(___p\cOP(X,\.oqLJ L˗/0zm ^ ` 6OiŅfFD!3Ѿ~l;HF'zXBiO(!^X{(~8wœX" JPT~N.`?T IDAT`([Ug# o\G~ua $w)T*`PR*86FAAl9tD*qc+DPzz:hz0׊ YUݏ ړ9ؐ| ߑc0#P| sDKَa"dJHw^B."=Âo5h .yoiSGCVm[vv6uf;zmmQ( }ZDb<޵;JbrӔUQξtu@~p /8ۉjDjø(FQC|o]GNe6MDӔ(pjXp>Nl?]˅D~0w?ykπX*9\B@FS(f0+u4<\.6ۧP(>X7n=GrJKxwCOr*"1TWi *J\X|݉X,@i \ED}]\v(YGCSnC.E>$0]=\~_ss RA>4z<aRZZ\.!jy6u4\.t:D&IZ Bi_>&+\DbTtz=dNl2i5z=*QdzkX"rc.AR=DFAtF7uLIM nT^&98&lk?x;@O:!4wCjjqD@jqn߂H~yx0M0 <(4TpnP\#TK08{FQ>7S^[YGPi {5j5J?;]Jɾ LJM!c AՊAaQ([<<==:ҥ-P(r8b}>:4_@VEY3ao\ pll}2y Mnf/c:o9?n`יּn{;\U8p#v<"@ zBa#~qt2˅J>(P'Bi65*Or[MP(#r a66UJ%M'vBjM\Su:Muz=x] =!8uep4n!\ӣYGcۥBo_} \J1?XeF⹻ҩT:-^i1k kP]cr[eڟNFkb Rjk 7q4yzCIhʹx|opmR3bh`0_\{i+BxbNQ( ᠳ è0?Mj R.wV\.5R &!0b{ LB{y.Ptf{al:fJ'G{ӥ+Z-gg(<>BL &>3""(n=:Vx""N;ByxqvvP(Uh0ĆUj5^^^28BP( bcPXX4hv5۴@ @PPB!:wlˡ;,* * RZs BP( %SM8[NBP\\ J ؛$Qnrc\\hR BP(JǦ;* 5wriܜQBP( Bi0MajHkR ך[[ BP(c%p]kCU9#F`UMx6 BP(N@@JJJzM 0 kJi=H( BP(999P(Ȁ՜ Baxyyrh BP( @.0M 66VP(P(l1BP( B\u*//JBP( B:ITBP( B=9 >zP( BP(Ν; VMgO BP(CDii) ZΦ[CS( BP(h4cSGaA(SNBP( Bj{kֶyFFFT*nܸ}zGZZhȶBP( ch$&L޽{t( BL<YYYqYŋoΝ;1K :B.C.̪cN_""P( Bs!?Smlxg1m4( 0I_xV]R]];746w4JB Z$BP( ^Ǝ ///u#FήѤYJZm)[?#lv̸1{F1cbccs\]]ѩS'|/ mҰ{nL:m0sL(J̝;}64wwwwի;w.z쉜l޼Ǐǹs'௾*lق_~z\.Gjj*pB@II b̟?]t#Gb \xvjÆ 1m4PTHIIAbb"bccnrMΎP(X,\.$m\.' $!!fg_2UTTj?~*VkaիWcܹXr%<==uV|b:wB!:t(>:ۋb x$&&bΝشi}YhCJ]ǚ.uͿ94ZΎBUU֦ pFѯ_6MDѠp8xxx( By`3g~wlݺ &MGC7nnnr31 [lAll,35np94* {=Z-̙-[?4222Vښ.0 BBBpB꽆^kcJ{DbmZ["ΆL&Nؾ};v؁={t( By7n<==e㏐H$5j}uPt6B֯_oіat:3[nn.~_}L>YYYuJz-Oлwo!]&Loָ0a֜uiz /-mM֦^g{555puuj߽FΝYo^^^m9v1MP( u>}:6nXl}?۷/;#|D"DGG#,, = -JM6a̙9r$+ڥK?X˖-Çh"$&&bذa={0o<,[ r4izܼy6mBDDx뭷oaԩxW#G`޽pZ׺`&]g{FVŋPl [ҦRٻ8;u{Hڢ^,ɖlnS 6`0PCKp K M /wlY%Y͒6mwfݩ]iW#=Gܹs;sg39p8$999՚3ܿB!ĕ{ꩧ@JMso~~/} ʿۿ`t:7w'>~?Eko0|~?+Fxx8{ ۶m`0?#<@XX_صk7oVOhd<C^z%z{{ggxDFFw^~oDze˸ۧ֩s.V/{)H8y摝Z ccca0PeʓӄB! +v?rӜE徦(Y 0 B!"tuucuP'KJJRWp8?^M!B!.mhhgb2h4$$$PXX̙@CѰpBuMd3_!B0N!V^ҥK\l38 -Yp¨~B!B8 pe00 AӜ 4ڵk),,b`69seӅB!.\^I]ղxb$Ü Oll,1z]5!B!nllj C׳tR\9m &3g0<< @tt4FӃ6L!B`q8PWWJNN&))iNH]UU=== VHMMC!B\v&jkk=+..&===H5k|jFFyyyX,"##%B!ה:;;immnZ@pPSSCsszuшjfIHH !!4ưtL| o6w !BfZ1͌2::JRR555tvvz엜 ""ܹ @d2466쓕RUFFzuFV"*++=.GZZmeddģ텅cDFFzdEatvvz,q)))٬n!;;ۧ(2Q^^DpG ݧ555#""(((i{ZZ^usYUUL*++=ڞf8Z-466288n cy|.F-[Q S]]H}Tbsn{s2??sNS__:'͛GOOUW9Lt: 6Qss2Q||<)))~IDkkG}:'xoϟ@dtt4TTTL,))\:'}0<<mnsE744ÃɁ:::}u:vI"|NSQQ񹌍%##5㽙9BLL9YXXH__.\;,t:>BKKǵ=>>TG9'RRRnu__!''g礿TBGdddQQQSjtIIw]"=ɂ}bGFF1Hm\lQQQqhs8&y|\t:lj 7s9;Seܼ<52EAJg,[ժcbflNn.>( :lFAQ\ljx+ͲgLI9'9jIOOXRRRHMM Si]GcEoŋKqq1%%%z}III)#//HmZy3ͺvb07o|x222|"--55ûaaaw잿t:mw---Dmyz`4),,v[w0""|2=??mk4Yo{lsFe'&&SvNNtϥw###.w]AAaaazƒSFff߶VNX IDATz |dRSS}͝9SIm Xm='}.}t9礿ϥ'$$>s2%%wq~~>\Kﶻ2IHH:'?bϼk_Im }}MLJJ{NLTOvNN(++k礿#x}}x`0;ydxxx@m9(==}}WG~]y 9ʖhZE!,,D.\ٰa˖-####T `/o;v;VU8ι]ӡq8f55h4l6Uш(X,>0&I-C׫(_\ek4^@e 5 WjUbl6np8}`3G99W:'1?tJ!B1)!B!#B!bI!B!qh!B!fB!B'B!bI!B!qK"aV+>WB!b@#9rZ/_\#!B!&'CBVUo.a/B!\&Fh&&Ě!B15pbrݎFA:B!B\&$B!FN !B!fB!B'B!bI!B!qh!B!fB!B'B!bI!B!qrP[[G rB!!`ll~Gk#B!ĥЩLMv=5B!bj$Yr%^!f*L$h!Ls8Ӽz|Wt JK%#%ĸXŒ VcZN.RQ[y⳯N5+y AbI!3o`ĹrӸe  qY6*w$+ cb\3!BFԣ ?WݶqT7\ bKh!4uW`i J\mo( O3/7;(xݼCEᛟ R>Cғ׾^ !!SʎAbbb5lw8p8~K0񈉊{Eq^{dF !7M\/)>5AcwL:ީq|,y;cǣwߦ.`Zٴk-}2Q!`ۋR,3~[?W(IZR"ϿϗQ~/7colw w:g5G !ꆋ4ܣw ws@ak7}(_m]ݼLY}.4nΞ^^|{3 -d? wn<~b"Y4~3aaf8t,=DGFn0QotǕZb)?D[W7v={B 4v:^ Fxs6lǭ# J?~Rq1QQ|䖛0߿LWo ?+#`OorR6j2]vv[pl>;9}N?2jCwl~kVfZ/"7@mc&h4ZVj|x4ޱA. dBLbxte&!@w2\>8zB1S2<2ʹe8y#pwʪkdwY Ýھ) ]S[웨4f"518qBBI*fΕ/ r7TmNvמF_>c{!:2CΪ޵n w_˞Ԫ{ٻNd,Y*^x]>^»#ԣp~{`#9Y~/B,h!$N+WoZ\ ?NMl߰zygePnd0Gh}Q>eL\lk ;@dD87Xo[%Eܸ| 1QQ͌4 i|SJ\L4V,e8b2@DXsQܽ~:bxt2&dtL[T]VSlBhw}/bٸYJo1,+uA̭6jMlwR ?W117f71D(;=V 84ucӫv뚚ʏ~⬏512j""<̣KKxȽtuW 4:{z'ꝑ /+nzJKr6123IFYu+9)\&aZѴAׇVyE^f%!ycqpxXu{BLT}0Իa6<3lX6Niļ 1叺ZVGo dv[mbJ(c@mc) ,.ޯn8+#9h/Rhqu46f!&vO pf\| )VwFQ'3Yp7l2mOoE4]{!S.FתSS Ԧ{^Ɔ+yq8^޸chҲK5xuv*}wOZͫ涝e| 5ξl1iu%ˏoBUB?,i+Ȉp鏑V0~ɀne߽Fը1p^koX=Ȁ<.YEqA6S\*WSS&8Wu K<&3Psi.x̆ĉm-!# !chxb(HlT\?c+.矿Umԧ8`4m64náOۛh$'==z?omVD!ZxoŒFXo=kh4|㳏mA#77qc6L̝B\EFF_VKxx8K \M/̏VH(91}ff'/[m65l_ׄ#}qzFR^]ww]{kZ>;gńWbd4Ab``@v?F$IIIB\>Zm6(j5+0MFՂҫ]s2.'06Nw+ep|bn@chnn)?l6Lss3t󉏗IB\)u"OBcpˮY,)6dݔD` 66"##KZL&FGGSݮ,^WB\ՒB1]h… TUUz222̤l@m5uʩdƲl29qĔ+B!ĕ~Igg'}}}IJJ )))$&&oΝf+6{&Oo/>9&&?B!B\ 4vECCC Q[[` //fYdaXUYVt:뉈 ** N>>B!¿뺧866ӧikkSzIOO'>>~֦!z L-?~Ht-l.p)Oŋyd]Z[[dhhhJm 66RSS=1!Bq}n>vz= , '''/6.;jaI&%&t hoo… ׯ"6.,'Tp ]^=uG/#::A vFFFhmm!++ V;B!ĵ 48pf,JKK"v,j1ۜ*l[@Q(b<|Y1˱l沿v6F8w,UUU>/NBBQQQ`bp8cddA>%??0v!BkuhXV=lFQ-Z^ًj69AO [8fݼ xŭ"f?{eW'z:X,3Kff&111Z,vSOOoZbLXQdQ0oZEbA{{;X,, 3o< %B!]]qIPUVx l:u+50 |XsZߦ\}jg3w;yG<4|PWK@k*i4,-ܘˊq588j/xb(!Bq͹uwiiO)yez|;d m|$ǎ9Lf3\Z,fjYa/?[7o$+ cc|Xχ/E ydz+픕1440f̟?B!D躮 'mǗud*ٌDAfa;#1z:8oGoA X}6lJV+rJYW!w 蠿EQ())xWoxUlWDSW`yQ>i5X~=999pQv' !Bkuhtttnk/>Y'Ke3Rfx?QA' U=,]Tٳg'B!E144(ZDuёe3 {Yp!466B!\7@llz=N^=!. ݧpk;as8Xx>}ZB!v\ 7jK6cww'Qe˖0L+L!B"pM:jgN͘lݱ"""X`n+~ !Bkuh ג4*2ٌ)g3|_{̾s1tuu+T!B"L&.t~:2ײ^ ם?m}ݎVU.B!ĵ4\pM leg3& <=+ RK߿@>00\!B@#** FlfddAI>ٌջl}yL 4""" B!p]8kzbTfL1 lى1qEfܰLB!\@BB$njSPfv\ lhhHd!Bk5hfN>MMM ===d'4gӮd~> nu`poB!]+0멨tFF񑑔ffs"v6}uղ [YK EKK 0qB!׆k2ĉtvvےY`ڡsݞ% v6#@@Ebmٲe3<bNiĻO];<:=n{Lc* N^b{<6lc Y1qGpqֲ t$3 gd57ȱ ½5(S4d6j{(LHg=DS_.;1((ܒ_Κ P67Ů(4'Ā p88t7pG`(B!B__Gnpe˖yO/<7doJ6cܸڟ?%5Ümn'q{.Ϋ׉2 k[qݺhJ3էu$Gq`rkk'NÆZZE iiY޺Z:l c{xW荏e…h4,  !BPa*** wO<,f3w-wpˍ7r, Go&<<O; @(:~'Tv:_G.lȟu5l;n֮f?κuP& |B!DH١S###U-Z.akX?1խ;ss3w 'g7CE[+?za1͆Vqo+tub۹{9 k0./!H~}*;;"-:s36zp}dsy?B!9!h`ۉ6MEK3;xܙ,IiF&Yf:OXG8~o}؊j﹏(Ws1_L<~rh~Q/6͸`6as8&f9^3pav,[l !BNq80uwws w}:^/ ۊ޿{xn2V;ЊI{M3@]:TYeg=sޙ?^__GR39vp뭷r_E|FB݇wvb𱇂\#!DBrT{{;)))jdC~Xn6c~;P%./k>\HS{jg^1P ԄbB!"d1<< qqgOK6ODӫԲV|vrj毂B!"\a2lhZgl e\6)5xdggH!BB.pe3"#'uu\l,fwev׻ǎNl6 !B9. ׸}Ѩnka:! ,SW^1aT?#ҢcJ6s+&9^LxMp۩ ,, B!sZxjelƋ_2?%K~5S]EmwUfCMyOUk {6Շ?NJNR/ʸv,mZ O&..~B!D@m6˟7-}}<٧I@wég3PPc]zn]f6Cժ;foY^Oe3P@oRUUEtt4d4B!BT]ܕɰ6O'ʲ.tv+Ś={-_6[ΝƂBgwbGExCU^QƏO߸/JBbT$}}wΝ'.}}7|;xtFcMέw7~lP\gxO;7UŬcc@͠57bZ'ښyv$%%)iwP#oi قlVws-B!BGFG'? Ն@8d_i=_@^??ũ<@Qׇ}:[%'>ƾ^)^H]w7U]ޕ﹏he4OQۿp'i|WG>lC>oi"Lwk3'ol;7{ko$)|mLfZ_}|gV2cxj z=1QQ~=];8qΞ^1ʆY,+7^AINN#>6E iR⯨W9a>>^؈5fp|'*vv$jc,H鴔+Vh~T]=q ʜ104DCs+onA B7!h$&&jDEkn[sp0?޲g{>˰|3˲)?wezd$/a<ޥ*7/17s?e}_6û,λ:d3Q/ϻ^ٌK+@Y f#>V\C=ɓ'=~9V+,Y'N55t #""%wVʅd$q]OT51zC&/7Z&ȵ}?DP[V-<VCJBvm,_]ONbcٲ jv1<: @~f:W, rبHizl FqBz FǏNss3}t鴒yph4X' ''sQ__@tt4k׮CZmncĚ111|Jfy:H 83s!Zz_z{_U_<¼Vh3ܻ~-/o8Wbqׂ ]٭XZ4edeQ!hs9f8{,111DGGp vZFiu~1aazGo?mb+f5ՔJp|A֭[Gww7|oh4h"JKKljfacll ٌb`0&pr(pxSSӴr8K5r-WF{{;zF:`0 H;3i~n6q1 RY̪NŒr3҃\^{~|̽V!?+utB4/^Lww7#G~z"""SO1p=-z_'fWxAv;[ǟͥ+2lƛO}WO=5JٌWouוٌqjy8xxOm}(%p-zj:;;ٿ?"Y|G[ Z-ZVZhiiitww3::Z5::zl 8M\ Lcc:ȻG~]GXXz?<3Ӛ UT6ML4n1"~ѣtسg 3fBZՠ֌t[3`Dh\*ͭgpN(cjN _3XE`8y ߇06~Q\Y'a\)WQ>UkpN[%[781wΞ1xa!6(X Qwop[xx`|0H^Gxbw,;3 IDAT'&ck̖w:u7f<ly(TX4}_RPj0xo]װBDzV y\;xJfqqql[r 0 *^wYҥ """\εZ-V+.:zS+uT*WcFr\Jj4H@޽1uT 2scpjxwqF\v {2 +V[o0ad2r9V^':%>3aԨQx$]fÒ%K0zhEV/^_~[lܹswsHtkkWl8u\ň_ϝWkS_I3!ޚ19f>N^v_ut+S~Y)>9z'e43G۰`Hd0[fRい# Ǯm׮<߮] Xw<O& GgF=jU߬S8t5 o<&9*Xv`7v߿}pEzMw]9@aa!"##˗/DA.ǧ֖"% f5UulKC) {ՊFk6qQ8WVVΜ9S+ˡP(RP(hFZdEWJ5̸gѣ79s.\7x6My`p{7KSRRPPPNvQI&91qD8~x$UGZHK4k=zeee())Aee%:GDsq7|lFDQ\i_ʉ 邧FELP|5 /.rqɑ s V(J PuZZ- <6#X 8297ڵ¼}pӭ:oOݻ'NDvv6a0ncsP(u&#Ud2 >/^DII BP..=zpYy2I;frh ++y?+rI$%%՘g"//6 BJ++dJR]YY NA\I[9cΎ>ѣGqGxx8rrr#`3#p5F,88:G]vL&Õ+WZ [BL4{?ѣGcͰX,P(g36"l(3۪+恇1;%z=~56 ߽ oz=af|BqD߆lifEܼn PcP7}{EWpXSj@Zz:fΜ ??? //M΄ӖrU)p|9-"QQQ.˕dŋ]A6>, N8!uٸɓ'qܹF+֕HMM"["Y#Qa6ݔHLLĊ+mG}"66V 8YUx,YӧFVVt1[&AR5ŋj`xʧRP!"lxkC.B鴨{ya%שD O/U:㩰h`E  i`0*;~r~f2AVrJl[/8s~n Պr\|aaa())AAA 7Q(nҪdoTA.#,,>u:yEQą TvAAr^o-]%[4:L5ҍ7*"aЭ[7 }d2u߄C qk׮4CZFYYYsm6L&SNŋӮd2] њ y9Z4*OWݰ-4 f+t¾ 9H =q_f?hĺCX:~ ^޾9"H_<8,.>>lvCk084`ltLoGdd8YToݳ7^O{Z51(8y$ (..LmL&C||<ʃhX,j }qdggkxxxg(--1ѨUZ"yhU(J3Q[c_}f Fff&FSB{el64>nm:ѸFDc[3֌9H/({wn =OO CNq!>u?&cLƿ3p|~K'M\-Ƨ`RLaE1q*T*do7 łZQs`r^'G;&b8-\'oՊkͽZckCRd2SRdҢaÆtX, Nbcc]nGqqYܥ5*k]fb٤/G9jUM-W~a۶mHMMuuVi?uXYfbóѭŕXz~7G;oootEU{wJ8u*}n RN@HwՊeaٮm1 6Jw13b_!s3]tm2Zm"9|>P- kVՊ\1@l6Z, d2BCCPjGzzzuǧ֩yd;Z"Y[{YMիW_C=$}~O8kb…&gHKKZjJ(]m:u*V\_~ӧ[oA`n?"ju-;Fܘʱ1kQ8U+S<^"r:Tpq*)*dV+T457jAL&l*///3eee0LR""Vk6ǹݺus=uƉL(J1Z"YUk%TӟuVx.u$&&B<3xgRvZ9D}{W_}q9R/ָhS3Htkk @ƶf^y\^+UZU[*ZrW Qme+d+S Bkx4\.GBBBlH;ktEwGXv-d2֬Y#sw}?#V\AP !!> }װj*CW断>o&BBB_xcjО[ګvh@B!)r9,.dk[5;;5e]["i4Ƅ8pˀ{r/___,YK,jUG#<,u{'裏Jk8h)J x`8Yթ'~~~x"}oС[3LnvGkgE% 5;Z2nGD7tEƃ8j jz*_[?B ,;$اR5xxH{ѦZ3n5p>7qv#⪫5CE:WTT[%rE5'QaِTeƴfվІL{iD1g/§B֯_?{ Gs'jqH6Y{OGTMD烨jp)s "^~6ݚtjاrvؚZ{:A76APTTRD,pף5OB Q:Z *cj$"U?3E!Hd2TTTҥK|nsYy/۰ C/_CXYeh\\#'p}\ӡT*qY@HHRRFdǴǛ9ZDە>ƥ#*mώ}QMgFDmZjDFFN:GNGRq@VIZKh`GU#s?o@q\?^l6rr;u\nw/ZGODmSI4 ,, ɑ?SXjPX XJЪPQR,8w3R O>f+|MfMjtw?8zQ݃(wpkkقmr ޕxWmݩW^={6"##q)d o ̖qC%зo_J ds=8s 䢈 !>6 6.(JL0'N*sEnn.Bv̮oDkLVXnWhMj50f#//;wDyϺBߢq5Vgƕ¢Z ?_ 㙰lX<8}"‚[*[||i4p[Xho)cF +jQ1>꿘8" }CΉ1ՕXV694.jnuEU&wn8:\!0d>> C^ݻCʕ+ضm t?u},ѭ ,8.r.b`ZrH< ՊJaԷ-jlnZz ZFfvR̞tlF+0#$^:Jjf<ίa ֦=2[P"%} 7^ʿ"Nu`NDK;$7l8rm‰'p!:u jkֹsg$$$ !!*ȐV СCa0ɓzjP/e*C8nb_ zUSƖ]C@@RR"00 >>>BNCAڵki6B޽{{n9{e`"y~j5j<5uW$oV2XVN;ip3Řfp3oب_7?>nmn:l9t(((+W> ⌋V˗ا}u q&ߏJˆ#0qDt ʪ1)pOOO $$\ny8iqNu/xyz`6 CiUga۾_sS쇾zbDWd4̲J]^xahPhTUTTgڵk.d2xzzBP@TBX,X,z]T(A@xx8bbbłK@z>}H ̥K#GTT]")) Æ pqIBT!]ĔJ%4 <== ___)#joS6aǁ qxlz[-GpS(ԖҀeM^c@.Cnj8J:ot UM&BWVW |c) (]M:Ґ2 Mknʽ:q~}7}]r6&!77W^NjpɄ{J1(mZqdggҥK(((@yy9L&r9<==`zB@@@N-4>>>8p |}]HZ00ͰllX,AJ jԵ#8u<{?CEQtZh6Hڛݦ OQhԥZ::zf(BP@P@O3af!22hpVprzBϞ=ٕl6/}j7|H+GEԱjKҊ`^ճ>^QaC޶<==pT*1tPi1;͆… @hhjlt())AAA ?9l\d2D]ԴsXlԴt)Ɉ$Dd2Dqq1DQDaa! c<<6' !u7AxHp+GF>l6>Ej AZ9*"jNCD<ٌh<裍.#-- ^scdk׮];vhPڍƌ,V+|Ңkcu$w|S&@.gV!b?,X P({ݻwcܸq"uwqv뷇r֭[AVO>SNETTj5ԩSaQ3^ϟNj/s[[c>}FѣGgvO q||ߟZ1")9lܱ[~݊QctDǧ~BM6aر8pfΜYf;qz_a#))HIDAT) ?裏b݈ƴi? BZZZ1_}]1Xt)f̘={6bC߯E!==?ʰ>jňڗl;b5m޼Y|(pB{7bnDF#;vL~ Sڵh0n{)l6~~~#<"(]VLNNv:f+jH ?={Ew}נn~ꫯ6jg1hӧ7Mt@Q|__Ĵ-H["ֻ,ϜaQt &4h[… ~~n<Ɍq%lٲÔ{n`ڴiz?jt xWtL&1?{СCaX ?Λ >fG_}-{j*HD՝e~*I]Ĉ[1*"j*No[gϞDGW֫W/c:B96lرc\y|Ϟ=p&O\-tٳgcuJff&t:d{rJ۷#Fc3f j,?88wھi&L<Vj}1qD$_/_FDDDנAÌ3umr-[W<}x/a2!"ݴWiVZ;D6o $lzQ0ip= ܂-9vrN>3gHݦjRZZG}jeŋnq}Fx֋[fiMT*d8Qr ѥK DQđ#Gws.4}઻5]CC#m8sb+FFԶ_c$Cc3V܁F=84fVNJJ `ԩ5c00sL>}WFM۶m˗K.ܮrѫW/|7=PT={vزe F ///"44r ]hAъQRRRm_}5}ܹ3pF3ݽ^|bBo~k'DQI݉?QGaXy~,8z*KڮT(pLLct+FGDĮSЩS';2;i夤tY`wߍT_‚ L&Ñdl޼˖-ŋ]^Sؒ7@a…P*uAJ+1114h~z$''ǧ֮_/^DDDD$###:ƾYY Uĥ58!}{`=AX,7Z^.~'_#,$VCBQ}6LE FT (DkȻZiὛ&މȰw%F=7?}:u k֬ADD4;L'|ݔ <ةlш3gbӦMxWklooo|.ر:7ܹ3믿z<ŋÇCիQTT6oތ3gH:L&,Z^[cwSsǝÓy~evť:p=v8D"ᅬOyL}\]B.l6cӦMNB@ZZ W_}/{ Δ)Sx?<<՜9s駟:bCwHJJիWkFh2cЉ:11(kGDƆ٢sN|WN~gw_DcҤIؾ};^~en pڷj9{qZ *b]u^z:Xp!.]/]o۷oGpp0ƍW'OK.ſoaXlF&bClW^a*F6nɌS|nk(De㒈:R O]OMD_lѠ[x1V^Bxyyv8Nf̘ ={V݉׿bŊFhhhkC7ԪGVh*> 60ѠzAtt4~L$$$O>[;6ݺu`ҥ10aн{ʯY<9f]-BvvvO@Dԁl6d@߾}'4y… Xhz+:u*BBB }Zx Zh,_6ljM=(xIFKjw޼yԩ-[ܷ@DԱDD|&N4I .\?ę3g+VnhѢz_e4޳+6mly~@|86ltʕ+!> "00cǎEfffﵡm׫|""j8&D!x`rF^e#*((ӺΛ7Fѩ;oǏښNIhL&øq㐞^{GKٳg3U4 6DDT;&D!xxxh4g0e#4j(tҶuAjͬV+x CTB}ʪM1}Fޘ׶!Q1 !,, ׮]bqnXpڵj(14UPP@VH(v؁|_~Ç#**r_x 3f [ٳ+5{#6r Ƽ 00Q$"1 aРAX,8|tXj wT}dgg?oL{w}7FCHw0qmvv6"1u4L4C3gA;}ʕd3g`0 --Zee#Y,`֬YX|ykCD^m "FϑᴸdhZ/iP5hE܏ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎ """""r;&DDDDDvL4h1 """""cADDDDDnD܎í+?g;@ IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/figures/ovn-initial-resources.svg0000644000175000017500000023370300000000000030116 0ustar00coreycorey00000000000000 image/svg+xml External/Provider Net"datacentre" cirros192.168..99.5 10.0.0.1 (vlan10) router 110.0.0.23 /192.168.99.1 10.0.0.130 (FIP) Virtual resources eth0 10.0.0.0/24 (vlan10) private 192.23.0.0/24 public 10.0.0.0/24 eth0 eth1 undercloud ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/figures/tripleo-ovn-arch.png0000644000175000017500000034213400000000000027032 0ustar00coreycorey00000000000000PNG  IHDR0bKGD IDATxw|S?і-o㍍1 #BBCnmyn{~7%iv!@e6e -Ku:ヰ?aXeA!B!!v !B!B!QA!BB!QA!BB!QA!BB!QA!BB!QA!BB!މ]2^zz&!xHLT Tn˄B&|uM͸ZY4fE#L`F+)NKALbB!cư,z=w]GB|@ @jb<ŜI.!hL0ũ 5x8?"H0;oV,ExC!²@r|S|mC)IWF#6ZȥReR .1!d  hjkGSk;jP]:hP j] B !QQ1:UtN\pUWF{t_P^]3.:,#@&#΂9Y^,!B F NA)xѵ {d웝0mCB^zt-) 5;h OX99S/ t|݃ #&xԴJ$Ɍw%j/B튞yPvv9m''HDB&NƔ$<ݚi+V!K!dQ&mm LȰP( xEANV߽ۮC#A!xwv솺mˏoEXFOo/tz#, !L*\*T*"<)+Gu}.UT 휞(GK!8@KMo/2\ًW Br\zuz\ժ55i"J D!6Z$LHCB}xk^m&ݍ>$!BFm-3%P{`V!nܼ϶Ds{olO^)IV> o^ym2Qz;h4!dXA=#g FyO]qۯ?8rSy;?!2\Gx|uǠZ~7v:8gb}64+x˭O{SRЫcS8|^KIxl$:~ dL}#3%,o_'B0vЩr2)V+ F{{?5)4&{ij?!et\wzV:Ғ >z:BҒ1% QBxh0d )/!ĻLf3zuz4 ͸r ݚnUĵꛘ܍帯0#+1XkjAyU5jB>h4`ܻ`.XWu+a98o&|uN afMmN1 U4Ȥ )!E"!48SS --8]v'J/Br_QX0χH8LvI g( 2!|.аb`9},N.И? #h{'z\CNs0Bw"*"|BD? cߕKܥq(Z;:V;Åxч=4t\qF%Z;:ۿBϭ0 "p BH%bcf[jذ `4µvZdLś?~O> BDB!ȟ?<hhişK+u\`pʖG!xP3p^$Wo8haL) /k7`2y/?=j)$P(Yx/!?;{_7nr*a0gF.]rB!#@c9X8Cb"Lhx)xN]^ZR~ڋ9='B\l;ę1(7{]܊NJ! h@HP Xxi0 ݚ\Q޼|޺z HZþock+Zچv7`0x-!ķ d) ElqQHMHqJ.FAvғGV.цa=1h0[,֣Ök>+u ~ܓ`[#gJp BKT,US3yi5ZY~k0aa|yN] 2j|uT dbBLLwi !8|mN?cKƚ!}M;d֩csA0xxRjq:S2q27l""4R2I6bcXdށvwu9,׫pzbcf"fuTLLǥmŇ0szΈH'?M5u|v{ ` 5xAWB(0@w;f[͆F{qz^y!&@SYXuZywΌ\ܸy hzr"20-# 1JGAN\QWWp| d5d{JeV]*^s_*4'NI?+ŒBYvZ +bnJS[;#Zdx˯ Oetҡ7dR cBFyfB Pl<~߽8w*:ǵ@yU |=n,=s\qQ(Ȟ+'όhz;,V/8X.n Xx>V,{B3Sanx?وgwOr)_:u9 [8,@HPPBX; pgAʫjb5-(ݨųkWn4K[،^ ;8rŒB&9~Ad$cݰXZ`sFG@Y:Wpێ-~S̃!:2kBni)+GvJ9:2s''")6g/]}pYn;6Zמzԭf2 7n`-!H4f N^P; :`8' xh,;{ԑ2@^Vy+n5[MxO O9\Qr-_N]GNpJE~ܓ ɢV Ic0 Bu"'=? FXV|y~ҳWF{x~cR|&>۶rہcغ^zt-NqdfWRE%:Rk\a!HMGANbwq3{b"/>϶ug?pU4m*)]*7b|5 lNn;*"o4ĭf|g?9pWkG'*kp a!Xd!uM/5)=(LfFf({x~aRNdNV?I~eY+orjhiŕUf{.E\to'7H>`SՍd#~s.WfaHzG*jj20L`7\L*\GA$q6le|P5t\Qg߼'B2$5m]n`݃^.@d2oY-7 n ,`XgPr _mGRG F>j_u._BEu-|`۽g1 ! Qw?ߌ<DBKɜUuX:w)_=ആ ?H$qL >߾=1C L$n\Bt[&\.hď|k$dh`Ы0c٣щ_|-7^x ?aXO7[N )[ z `bm-&"FH#JX=d! ύXrKǧ''i۱u+TߪSY͙S3X"—[bn[#rdŘ"9$>.| šey=Y5\ w!;5r-< 4LX3lc-1}imY*(8 e l_z! 29FɾCkX BXWFQk++ +n1-# 2F=651{遶WG՞\QdxZN_~K"eB-_+RX1{cN@#Pg׮0[,Ph ~ I|CCK+ϱ#/!`׃1!HC&> 3+dWZf.|h;!r DѶw[WVҡT"/Uנ޶P%ð% N1K ?D5 f'U 2h,;B,3a!n쩾'T?pykѡܵuD7Af7d8 ֣P5}z=Waۓ\&skWsޫF1aJ^, ܹ5(De#Kz p3_ ! qI:&ӠlaϞw>@ctĥkF<&ZZUH_ )3_{ iƋ]tѥVrmr{ pe n쩾Ntc:byӭf.m{O3 ͭܗ[^ /#(2?I\D;W釓n8]vݚ-lw/>Ј8NLuO)q^, Jý6r+?Aیqy߫Aic\ hm"-j<5~zb.ՊGŋj`Y2 b"rSNŴi䝹())X,FRR?Lᴩ\q Zt[:=w޵9̞n@4ɕvrW "H0sz-<9Fn7T)I^.q|$佀ƫxՊÇ :5kJ8~8, R)R)1w\D?ڱ,ӧOٳARROeс;wB2 "YYY(,,Dp{kLvBww7㑞TB]szV:v<  XsإسNƆo 4j64Cc *`t{2*)H zƂN537 4U߄Fۋ/=R'e%K?۱,lڪv~ekC|^=0w1qBIIIHJrf.N>^L2eP0   Š㛚b@,cܹ={rݍ;w,bƌHLLJɓ'uVD"bH$`وI0LXЈ)I=_MQ܂/ۇP@ f?v//#6j/+k<ѣjFCDLIFp`4^XV\Y}f{m I&!&KOQq׈zYjƩ_>r>{,N8VLф!##٨w+(--ѣGbΜ94XƂaDEEaǎرc0 󑚚OXX\B`۶mh4ňO<1^P) q^cZ]@=EQS߀|OM+>Dz-];XrٰQhmOHLs#d" LM}U>hi'*X⏬f== 04!Hڵk(..}h( atttw~:ݍ^'Hd20iZl߾`HK3\pp0 U IDATlN؈Wb.[R 4Z1_vdBDX(,@dx>ؼ˨ne[19K?~G3H3Ғ@ZYtp/xC eHh;oօA[ޡwd7U㫯Bcc#"""PTT?^ ::QQQpr0 VnooǶmىp2|>A<$$R!l2صk`ZhA"c䵉Fpwwv[I ՊF0 EXH0 F[φG#(@y64q3=Tb2anUdT`Nd;ܷ[`6[ yn'uuwl_FNƤ*r#q{׌p{qo3g"''&B @TBP 0-`YoR) !6ay4L68Ƒ#GPZZ ^ӧcܹOP ::AAAg'׻VCF >h0Bf]j5B!{i뚚Q&;SFy߹\2⢢ 0ZYX,:TP*"]q/z)J$d4'$翌\f{4B!r9L&r9p eH:iNb̛7+˲sHR$$$ ((r|F 83g --y+v Jrώa-lҟUg&;?4)I(😕/ӁSgqw4oJLx" ` xUeeeXx1$ b1t:L*** ѣ~v=ġGs59w4w F;c}-|ѷZnL-2.z2V+[ukng蛁`ŅJ-@.ChpbcS<)*0:E#:"?pC+MٵqjFy2ň9ۢ9g~gTt}(~[`Z~f)ՊB45b:rH$bD"\.:RJuuu0QB箯j@`؍֣aZ,  @aj \9Ʋ-ؐxWTFr|,x)lؾ{vjR\ё÷, øŐ ]?`4b8x,4Zd,;)R5ZՕ z4;ܻzu-&{%cd0WQzǮaZq+ewwzZcxA<;vV$9M$:!91   EKK V+FS HRR3GLL "##=˲+Z HZ Z 9BP@*rCzzzP( Ss$<(+{?{JB<~ hDck#dR)b]e0Z36AmC|3xb۾8z<~HN\.c񡏇D<>`4lPBm`fY٭V[GWw['5gBMX)ϼ5jrpCh- =މ Ì:DI;A @2x0 e!R59=г, DLNoAOߐ#aPdffr\c7o Ȉ{.c49WR?쪿V}oL&½"Ubv]*>>4\L)*0V2 Hp2Ų9"X!cX5"@cpw"6Z/t9WO#XuyOr^OX,t rA1[;\ 4fyETV\:el[@X69%jq"p@ȧ ٌcnŋ`Y 7 [yC/2M"OGم8r  ߓe03 W^@DhoᅇZ^QA&ckP:._+=6BBr!L}vGAT.Ɠj<9\.C.CӡF ''g03Dd[a4D" 4 j5F#RRRz 7hVVuPށii^S̞|id2ccˆ 47 gPT DPFEٴs/} aB+4% H^+.Kb˞yr#Gۊr g`D !,GNL1CIҠS9xG/\.Gjj*҂#GXx1hXQO8hR VUUUhiiO >>ޭradX Νry>/{?@LGV+_.WT܄/dhprb<u].V=d8 Š`ܥ+X`b=zݱpN_+CC![f[aal b*U=T!H𫗟a$/·X%2)tzn7b3LI}Zn5VTp[NűsY#a=d2,t `dm._@S-UT`[ϕ8%`!n>RkچxZ֩Qs( qzX-%Dpl{nLLNT*h4Fddd jw,b# n(Woo/oL9spA'?Cs*V()) ܺ+Ȟ|*`) b\_#d$#73 oEx#'M&lܱMmx{C 2[+T c_KXʋ;W*0 BHLR͹ aCe;U>h=׿FXtu}<""IՊrBA,cn]ӕg•a 88:555h4h4dw'ruZGV#11%d 5S͞|* GKh8z>ӧOwl Qr q=A,"77i@Oi%lY\HjRkRR#&3̜xeb1t#n56mQ$ذ}wܔ pK.faaJ̠A.@˶ f .ڹh29Np&pIj'l<+<+cG Bz0>ꉡSgZwg}l/NT*466B@!::)))`i'{4[h4"--+RĽЩ70;@ @rr2׃PZZ JUV<+{0<|>UBo0ع$/_͵&dW\PejemA:Npgى]jü/)*0HBxv؟VSfԵ#9eEҞ4Uވxc4'NCPP-Z5V F7רN`jE^^}pGþd`@gg'Gr9͛: O$564Ν`9r7<=9+ޅL2"2u\#bpK_@d7ۋ @OT#>fm3}bwO{w}r}hDTTpere2\& pT*Enn.ZFII NCMB@ss3h둚Mw'z4G F/ (iPuSՍ^rx9 <!ar-rsf~&Ktp_z@B+88we?vw)~*PɅ?هNYVף["++ \Ǖ TbFiT*CBBxb^ W8@Cbbbc[{l6˨u{*|IEpHj+&lܮܨBcKF#R)b(B\&T;%3DK,o}֎N_oT`:JƀNB<D[[5k'xH4s *`DԵC_1-OvY  )) IIIשhkks: iTGGʠhjaW w\cn%  D"D"xGc5&>/@cؾ0C,ad]8:uO33Rap'֐9u|{sh4P*P(N]t:T X,.=8@p### UUU#)#:׮]JhT*EVV[01s4ȗEqy|?7dazfc61 j`O̻SNZBD"fpjO+~*s4PԐy!H͛lP* dZzuʕSP^^\P(JD"hZjj#==  e HF]W.;@uu5xD&55~h9v@blL$F:|w[H9cHGNK eN~y\h>Bg9&p|oI;<1t O$gш.0dgR466f3]ʲ,z{{j@@@TB @Ѡ*#W6j >>2 u`uqau42I|V-`?ᜑ)WF!5!E3taLJ։yjc`6 " BE'VɅ?{ [ˆW8rbbbX. cxj2] fJL&P(DPPBBB a2*h5 hmmE[[=Jth4hkks,i?OF @$lC=d.@(p/*u 'Gc"L oU^IVu{QQQ(**ݻP"S`E.Zv :~t#uPT\8>cѢEXz5؈nd2aݺuHǎlSOCRaӦM8wADDJ%J+Ck.se ͝vdrZ#d"D"2,c [ҵĬ9HT"`@mCJ.Rϩ|;,sv[5W*/NB8>$t$"իW>," wmA1oPAydCp߬>X .H$BRrM2id2~6hZX,@X&]'$$ॗ^Çq!,@(J$$$ ;;{X50ヺʯ{,@\g 8:$D^57;~NeX^xp3+Vy1!>)J!!!.]Bbb"|Mok\<5,Y0Wa KAq]ozuuu7g0Pݸx"$ V+f3U5mgg'D"֬Y5k֠ à C<&:И /x']kjqx| +ܺӗ͡SO}9n{拡 c jX T`1ZyQ!o/݋]veY,_E9u?;V QPW>g}x 5Zv9#=q]#]wlƍ7   SAEiI ֎N4`4B&"^ntȶ ʐPʚg(㱕̕v!޳e"55gΜvq z' X~gL^ȩ(?::n쎡bPww7ojcw͛77|OHêf> tk2 4## x?z,O(DzdnP׭ [7_jNM ̱u8cl(#LW\+WBhZ^W^Eee7&NݾYH:Y׷eE6:ٳزe PTT8Y`0`dx'TƖ9rHKKsiXfXpqTUU>m"NdU_s˲Pi4ݸo/a}-lq_3c9k7|_lƿοyvȗ3*0΄W^S| i#/Dqq18S;>+;J>Y0""6a,+-hŰ)g "??~-֯_dff1cƸօ{gKr-R.{f?ϨEAA QTTt:6o B1jeYٳsiX;;;~z<3c:n&2@Wǿul`?=(`DШQh1[Ü$\i\ncaA>G?v|k5v?:j`; ʌ18,n?9rډD"ܹ.]1uTn1:Hˀ=8,j|x9FVDkBPeeeػw/tR,_|̫gO?1}t5 `X~=~_### ~rss!Hj,*++73\ֳo5gn]N85J> ?S;~NʋB</fS H^I@vFkV0o6R{4!1 ~8|0K ,\V4!ygٌ#G  .ƔnȠ |.АX:w6vobl;qvkP^Uet{ywe`^R "hm5VWz՜R' IDAT[,LI[;ڹ <ܮ|(p^_qx*0W+/NBwB)SW\r |e hjjBzz:fΜ!x 4Z-6mڄH'?Wb1Vu `4ł!f4o>7+b1;ҥKòe˸!]6GC?Gbb"~|qDeP5>h"[9t^.عNI!A gu푾\r{; 69~hZj^z%v'[6w*U^4tr܃a}}Kď{0ڹZ+]Ϟ>}׳^1 `IJp4U|5h_ǡS=wd2`08-`,XDöU֦&XnN:rAV#,,{OP //+WBT|NZO?xWUE||<<h4@ll־hnnc!33s\e㋿ePCq;+0l)ANz**L /qkjΜ9سgPTT4 łRٳo]-0\:xIlk|6h8QzJΣ{?"41p"adee!==jµkF=v…x9VX1%pCk@vVHb ;|m3EGro._={@(bxcCt:m@_jddM6(,,fǎƥHrrrp9̜9ө< 4t:[ u\F {_lAYuX,/@ @^V,@Nz*`gXlm)nB/c?,=mET` R#=ߎzޜh%ٲ!>0[4獧SܤH2e˖aʔ)8wZ-`$&&b߾}#Z  *5;DU;pS;Vt7yE`d4(:_ᵷ"77ضmT*f30eʔa[[[[Gaʕ.gnr0xGa47ҥKDs>WDT{@=^۷#..nLÜh?IJePXXKl%6rV^6 P^1"a?3oJ,]!\&G3F=s!| ̧NJ5$1BwyZmhEE~zŃ>w#Y:u-:`OÛ"""… 0 |HT Ʉ^{mwH$<lkr޽ZP(P*P(1uuuظq#z)ߜ䠤G`@ff&bccGh0}E=:wQTYVξ$BHX(Ȯ./::qq}7EQ1a BY:תt't'yֽoSsϱ^|DX'Z}?~}Ip[;2;Yx5|RDDDOkkd?f͚r;wڭWP 99 Ç[O@ˮS!g`z ]Rݼѵ /njv?b *U;52 񈏏wYR)ƌ1c0y#??׮]`D"A]]FN l3h?%m[)9`Y!I/ӪA$I"::1Np%D  A`ǼG2f @~2cpB` 4s-\2QDDDxDDDjMMMŹs޷i&tA7z$ȼ.7噪wJ?'MiG;fU晥G}qe`#!*)?cܟhIi/g &yV\p*xnM?VٽRvC+:(#'y!1ׯǺuj1zhƂ |r,[ ˖-P_JJJ0xf9l]ol[7]pA٭7r wC@ڔ HZ6K,ƍB 0{l&^bbUR9sIdd$bׄ CϏ٧OW'N p!t:6n܈M6a&2GH>NYA/!~?W'YmS@r?ʠm'OӧOGMM l~XWWm۶aɒ%̵ 믿f07mڄ9sX%1c|A+壽8 V yf^ Zl+V@ll,<<<еkWmOG06ЯW)=; R{a@Z `Pަ4vsANQW@$Ғo^r9&$@JɉpNo3m8@S6q6 6'\Z(c+'F9lm $Y r?#m^H+DDD,i5ԂǏc}0k,]d |6rVk׮6um*ɑVU$!W2h@`H%]s뙨I;wDhh(ʔKHH@TT:QFaƍ9 L>˗/~{Z]p!>3Z A0Jo>>>>( 77mג%K׿VJbؒ_`4{lDEE9t%KP[[ ZuPڶm-m q+X5Uk*z y4)"""ٸr_iܹ7n{KеkW{=վ e|OZO[PD*nBڔ ;\Ò%KcFnذ/nVnذaΜ9tdJlܸ-jr+9C"..֭|||p6I¢!JۥYM1,$ao7FA 5uԂl1fvIewÑ_xm(BI&eP("ڵk9+iOX5=Xٳ!&_N^LDFFb7n~7|is%+BaSa%K`ݢ4*A*AzǼy󐑑>m_]ҥ ͛>tA(irn<ކՆ\Hvr)@x{,X[:e/.oWK+DDDJE.,VgBőM)SDŗ6.c.Aݻ6lbcG?èQ石a5Uǔ шyãC #_<Avv6/#E`4bmi)`""c}BGT${衇Vѷo_޽SL?O pCSc?T|SںUt#F@NNNAmj*\9ߴi~imQhY ( /۷?ih4d2H<ƅi<=DGI% hm^O !ۿ~oT(M@r#Ʒ{8M W; /uNFT!'$KLL;#A A'p,^Uɱ#<\8p3gؼ)yWꫯ'G e%_5XE}׿W^y]_~%x/6"HJ Jj0<4Z k;2P4H/-}-)*<2 Ζ7/`ǘjw $A[nT tFFطHx5:%""DEuDGG3=vFOOOUܪ*>N]ؾO'j+&ojsPNtVZRǕ :j"I?3V\:|IVֽTbxF?/PRV+!"$صeA(Mq&H'GRb~Z?$om߻ 4h :*g« 딈P(f}}\ؓG{eggclS<3f +<@|C<˜w܉:̟?]a{.ƌ .8Z77A<.Go'Ncq"'JZtu_cZzZxdVr@Lxdՠ/HH硼9 8Z R&!EǏGQQ"""PZZ;wwOgM{ <-IR/$>3L`s)G\UMAd5icGeMhx>~oOgh3x>0uXi)4k[HOOŋO`ڴi=eKs碨sΝ~8{S^I}ȫЪK?f95""Mqђ //1bll _;G̱T*E|>ׯAC9M6w|ťS 0/Qx4WШhLjuOުW_}5kmzs bc1#gU)@]P'N,BS4`x= Xш-{<=<0ePuh8K:g៬bz`LڰZoVda\B뭹N9A[~6SіNDDxGE%y7[5k;v@[M VWgJH۞@}Ȇ#ٸ|En})p; S!Ja0:-8BlT^||!6K-& Р@w ׏q&HM ј(Ǭd&%0y?d8Ex:P3>x-"&2=r_Mϟw.Bdx{{3>ܱ8EFt1 8/mQ_ H)F.OHq3}TOXFUqṆG)Q⃢!DEGO=(.Vp- Mz;zᇫhIxy~泧@oCxngkp.3Ѣ!"(NՓJwgi?yT*ZN4 yάbKN+z<:k,z~R"6{^5غHHO` {MRS= ǡr4vQ"OIxe8l>{ {(F+ 0lNKD qvs;م\|gq~p…6VGDD`ӦME--N5BoCD=Д:m] HfCmD*!MIq1%4MӸyr2FB. BS= ߇'1a(,|-3e-/a"`A:d /)""[9D[(shcOOO,YݻwEDGG^r{ )uxZO IDAT-! B:ŧ~ AѨhLC4zsMP58FW|HMQL ш7|N7~M77K%rN5pW]{jt Yib)222p6M E))&ٸ[^#ʍ޸/⌀y8x 1yd_~jHMMiڵCWqf% Vܻf9gpmV[^M|uS:e0ܪcTo07o@?_xhR<-!ۆ$ Pi"#liUYN!#ychCRIdI e˖iTTK-@^-c5s+ٹ7__m#|[1qFjjڭѠF =Ϣ?t22eoMmVۮ,j\'%K/[4:^?x DJt5򿩘鿤IBuɬa$n +έ(QZ w-tooz"$>^PM 4 1۳RЁP4S\Ji%dDUW3\&Au1ޡ 'm[4~'Nh5:+kV:, 0*BCp fek}- c4sn_|rQD 80?4p10aX&XIKDb4opI]~xH0h(+=xT&{-$Ddhp׃ECGW])kXl~ɤ J&Evƪ Vs,%}\&%1yt4yZP`Æ [op۵Fq%AtxX5i={8"XZ:ChGM2<,8!+ᓋ%Eqɍiw$Ab.ᝢjҒqF JCHIȡkQެL&b"EvlMj|{$8|HKJā#״%=h*@+œ4?h< |ԠVJ >=[' n) > 5V.{k 9MGMCqR6iE0j(>-+e8ktZFj/s&ⷜ(5Tk c7g q~&X!!_*g퉈Hsu7ܞFu_M @DMH2NZ8<:Ȭc֧_iQ~v)2[(a7: kn <,83uO ]b0qoB05ҿz>S4m_oo2?@4H``QXh$hx'3'Y6,8}Ӑu2 obVڲRf4 5cPވ a˕Dch8|.zZ4=ktz43pЍ6~`zk㥈%T*DcL8aM(A- PK dx)n u´#LuB$K&@F8`IhhoTq[:W!c#7S|RfjTzr3)#Wi@JzuG4HBFڤ>}(FExqF+PzICA`#MkFKaDJ=i,n.b;T5tG 7EVB# HIQOE-~2Iq2r(oFD$$êvHQ5q+p5y&QC n4@<4f$2xQl!Dw kH=/K0a>)@BS4JQ_S4\R ϛťQᠽ(-剧} 5uL+6HywQ4`'鉧ρWB.3  [Pt ) Un0u9iG\,͝ɫp""5>$'ts&|$ ,{d6?pZ=40EMPȱZ ~@&l I$>Y,$4)L_hHp\WAAQ1"5;F h?#N >^^e>d:kJޞ=i,u?*,[|Q9Pc"±l,[m+~xilq?p}<' # JhqkVR I3Əadf /O HMCxd?rO+/*7Rc@Z z xO,IHm=лGXjHpJƙP|4: #зgx*e ɰh=x~=r 1ք~zbt(=m7([8rΜÁ#piJtx(d>R˱x9٫yp$I 24i=1,eq/, \tr%Qa!XLzAe UR |AMQe9c3(jkR2D2M$A2il'ˁ%|,`gmW#0\[0wxN6Db񌩠(*T5еQ7O~&IҐ/ U(,)EUudB.C/ah<2u":Uh'G I0&J-h.ҹ(lT} ݑd^a$c3#Jap03K,F#6EQz +M9l4Q4rOcZ|]a=FDn=$ ?qR{b% 9C\ +!J)""{6ܰ\hРC1@8A0ƾ0:]"¡P^EAQ1~=e+KVgQhWCE!b(yXtj?$A (詀Rၠ? j3qAQW ??6ޞ/\eEGet6n.f}<IXDDĄz'EC ~HĿ_xp C$ h^S~ @B("""Lj̲Ņ=|SgWwIYj/hIh^Q>߄ɈB.GV[E%8y2[L{+""" Q*ǎC@X/M(LhWw{2WU^R@ LKAf4xzABG6B6(rL==q=U0=ZP jԬ$o V W%5Z5ip\c0c1߈L*7b"#su\S]kw[Ω{9.NR^ ABl(PwzÖf}GmQ=n`Gy`(O]r둘$DqhZܯFe*/zh4:QM4M^~o$'tEdWz֩ߺ 1T ]ˌELdKWz>g嫜+"cL%Tbh>4bքvf _o/DS@VťGu3^x|!gp6P4(+y Ix ݯ@{tP(M'/\B3zFԉ8NLd8'G;% C/9ND'JBQ=)|ENFeȩsxzd^ǥ0Ǿ?"Y<9wue sP#3y< `N^߱PYoߕDѠfwPXEhP Bv |.ܼSDGaq)>ذdTdǥ0(,.Ы{f6\˷.J LZ[2ԩ;2ȝY[Bp EQí2h%UTxXxy\e2N4MC!g2C4u(*ό u]~34{:d=$5*1^vjh;gc"ѭKA{=1WbC8- q 4 Nl+O g#ǥp0R_/@<) F#/FhP DOLJŖí޽Jl+DcCYmBgOG_oŗv& c犪j|a.>:z#IDTjpEܽo|mlڵ|q)Ե>{ w%;yn<[^.1c3V79seKX é/"p2tA)\yvmqWQx9Ϙ㒫3{l̜96 d /X|gl /$YP87Xh ]UU7xǜ9s0w\ܺu_cxQFz vZ}L+:K,|W7䱹x"~ic0߻w^[Fpp0ϟS2Y_|hƠA@|||~?vÍlh1=N9Iqdh¼ߘh<;T4 #[nz/~ul>s%Xg^\qi𲆭]cNeB=j4 $;`ڴix饗p!ېJ5km4obx嗙)Xx1틏?~~~ؾ};/_u!%%rV… 3j5^~et ,p4X\<.?EY)M{}]L0+V={rJDEE_~˗CPO>AVVVE~/ 4luW@TXJc򈡐 Vu?1m? Gi:l۶ ,ᅬm۶1,''b׮]_bfhC|r\p^n‡~n-,%WǞۺu+&O+WԩSLkH$Çg7D||Lq%~rw*$16s$9& `AR*ꞛX8N>0c \v ֭C=Ll򱇥3O K.ӧ駟>`[oLڵ j֭s?q'`ѢEk.f h^B|p]c0 6ñm6ܾ}ݺuc~;Ç[ѣGC5{W_}eum֬Y8r^|Eh4; bvls܂ bU.>>%%{i]_|3WPb9}/RJ9#G4p!:H~ӣ~ «)/Ns`^MBQ`7<P(iu= d*++c~k\v A I$IСCtjضmv,$ VNÇ#..#Gtw"y)\:yʪ2\-Q*ח/]z#K%,-iwEEEVUp:ʳ|w/k+T4Bk+mw; 0w^@ZHU=4e|||̓$ ???nK >.?`B^\,nq4 V V EJ@pp0Sr'11ǎMӌK\.D"T*… tRf/OӴQUU~ Ƿ~k˝iϟ!%UUUZ摃hP\v=D|Tls٦ Bl]op`GR L # $A0I ӎpCrMTTUC7KrT*ńa݆> w`Fx5 Vw V/S54 ]WT0(-/ >\s0 /u]4ϣǎ fgg111Lʶ7aF" ==G\Ea͚5 ##?>`HĀ_V??Xrl:-m-ؚ  C/$ />Sm;=:ؾ u,TztHpeL>@Xg^FubXb{?Z;"'3wϖia޽FBB~7ܹO=t) IDAT"免b K$ؽ{w_k֬yP__0 2 .\zWŋ1`)z\.o bx.:: u -h#Qƈ#0eX:ΜGߟhħnM(==@iR=\ EhP9zP@顀$OYuWO\cOu& 6ۺ''\(;m>,}Y$O>ET%..o6جGnv=>>d-$/po㒋裏2ZW_mF$V\+WB~i06\.uI֢Y9WdA( n]qL? Xlt\ N#4,x\r5~+&"`v=mػn/i6u׎K.Ѱa޽ZkXIDlT hHN$h A(ĜX[|gL;>Nj=3WKwV-\6PД!C"8T*aan i#e:Jٱ.$/  шnGNgA ߶:-N"c`,?%&R`rT"xy1IEDZB]al>2չTj5\T(={SxRu,W>ܫl2ZD%g#BY;b\UVZW0Ǿ`72χ``46{m|N!ڦA$uforykٷMd_^oxyA*`0 {U*D^Du*]! 0_//e2*)K@4JW2:}cX_.8dTjPR#ЗG半FJb$SL[b|[]Vi[ku:߹>t%P4H/-}Cu(d>Z`rQL#5k\Y"IV7IHsWyhT"QPRzm#BDcQ[n#)Pz(P`E̹pg i %fPȑѻm$'t'qqL:PĥkV0- 0~ tg氂b{6WR4]VSزgq?OE[-RAM+j(*xEܚJ )=Y߯VjV^ W`ti}sB maI={= 29}57h>}En j.Q2& dK+}˻ 7 Dtv_ms(-A~l>鍨"*,CsEw. ,f:FTj5RBxHͲ~?}~ޞe靈Zsw;DG`{63 eo/5VǾcmO6m(ERQb}9ب,Fcv l+©KWp.|5J1Lnwq5W5vb`򈡜9jpN]Š7n=Y"Bj{__,6zzk[.AB*`4~U5=+ {z$G%(**3>f9$p˯AQtzvAfj2zDDV׭L0OEm`hĞHO ].1) rjCor}qGhpLۗV>EC.ax{zTLI]!N C=1GQ6`4ZE+^ʉ+_GMm=M٘p,{dVh~l!HYx ̂e{}RۃHYE9FW4P[J .,|mPEdR[Ir}  \zJpe@v+~"E ߶\uK)͞AӦA8y:bBLhz~A|PvN{hlOZuZ-jj'Yk)qёX<|}7(Hū8s"Cp.K(f#8  FVFGO]WAm}ߡ\eNJ ߖ=ފ[E%LaoV{ t Iż8㍗>O݁7+wWE`У[W<:A{Ec1eL? ~4hB.T<"\ mV&'? *,ݢֶ*eH1r`:==~ʋn͈%^J%L}S9ok@Z u\1״{_^^(N߳QQ%iI1zpFE:]/O%[2.]OQX"2=0,/ҒUG;kka㢣`?1֯GbރҮ~A(l[lqnRH]N4@d7vk$'t?g!YF~>蛜qC"%'Ec:uJFE>$1<Caq n$GiovIi6ݺDaPV  ( HH~>>Mn_߫'+LRUceL*E@{(2HXL8kB% )~ A5YX1O~ރVBo[HFa (,.5簃ϘL7QRvZY%IMYI=\̻as{g##x0Au=5q,fn>T57ĩQbKipö{XDB&2&̲R4,덍`b0oQX,\qJ<`n mY$ft Dh^N!~%Dhe2DEFC$sW9hX7G7s/ǰq%JU "Wn"\B$bꮈpH]J5ƅa`F((.AF*9 ͷ?ǥ7{L4|-* 6з :ŧ^j,č;Ep-~=Fu]Qpѥ5"l+Y٭/6F,7 8+o[DFȽpx4񝻚m?R2<rCh` r++r9+J#`0PŵBǸU`'uuqWN_X<\|[(hF s%oNi Kw~9 Ģ'» .FP<1k*>߾Uՠ( ?£3tuDDXNFSB\0Pꊉ4=Wd2)e(~>xlTdx(At&Wb^KN߅`6b F\**2<2ylV2xzx`1ތ3V9wkq\h&HA׃$ Ȥyb]k,U4g`?^v9T/6ᙕo+vЏ99{*umGOcg7p(A`քiP][bhaoGO0T*AJbzӯWukL[!Q|s=G!..* Я_?$''cr}e/"Pe!s&gA} 2'޴k/g@٬ \8po .}d2)g4Z59vJ`҈]?BU*^x$%%!663gĥKF^QUso0Ji.H`\D5ZWDu$ 8!|ڵk|``{xb=z7oơC3f5ZxU""lrX}\f15 2z~>_^%>ٲ_lۅ7Ǿ?ix)0l Rpo,ZnP5):4^ͣ8*hƜ9s;v ¤IpqB$ëDDܝ{G)|°Lx({k< LMa=r\p5Z-@&9d4m4M;$Ʊc; )) xw HzjV+45ފ5j&пYР@@rr2@"c欨z(D:4Mc>9Žу3{`硳qM,_}ARRfΜǏ3x7@Qbcc4(Z+V %%cƌ~۬AAAHOotOV(1b0s^Hog|^{nfm"Gk-sIQᥢ8Ƿm &,.MwFkNay.??wM?G[dKxbl a0{H !{5ifӦ/Ms{mo67M3(Y !a1`l8AXȶdYxޯW=GƃjŊXb / 99[lj닡 yB|G|j)d2<~yIɸy5,Çcٲeϟ'Ov5te˖99cYgEҠ"2v]s%o_`ܭQhv%ra:?6 ˲8pn6n[p!?nymОA_?E3фlيVw^紛xeXzn3'V ш͛7#!!&MBbb"&O?O\[,@JT(Z^oFu$2M&mydXC]6oԗ15+ՍXVH%2$CWoΞp?6toukZ  @a0 XdlD"AUSScCHFΛ3xg0>D͵F{1-=uW/!X|1>z?+8xbO b< ^z%p?5::z@=%%ž DFFG !}b':ov=Cu"#ɹmA*%ynHueYmێv%vA/ Euu5jkk0/8B8&&騨p.W^^hzH↮^QU~iSz|?F+ArBOvåj?E"͛CA(ڧd2r( 2>џ$dŊw(g678h-'ct:a+:|NT1c£kr,~˾NH$‚ ~B߃ʳPTVB]]C>Ʉb͚ON)2667o#3f;MȥI CyeYgojsd}fzW^AUU^|Ef1~KJJbqX ,jjj`p5lٲ/%''l6;\/AqFdgg^@cc#t:~j_p9!r{}=ttܥmvv&N-B4j<&?59]vAcʕHLLĆ py<V\M6ᡇBBB͛)U"~!1ol{므α~D&44bב AAˤI%\O{idvc~6W5͏?sDhrO!ae\,7RhZz ~cϾ¾c'n"dxe:7;ZFƛ;ih~;ELf39]Gs8^oH DMz-oQ@nBod%{xtfKV=Z.\:L4h0 Pbl ΚܬLJ0<?$G:뛚>A: f`) Y\ё_)LF?V /`Pw,ˡ']s`49vʤRܻfŨZ6Q&Wi)4cXtϔ^ysh轟oP41@І|/eW-}mBeR$ajdLMMFX! bV@n|\ƗGQcJr"rr< r8w33E{tBtDžv9Ek [ep7$q0MhD}S jQV^t>0 +CCEvrS+6oKaXچƖ6t"6lRH$DFF:a#Fƈxܜ"o;w;qH\ Ȥ(d2eRjԈ Cp=|ܥuCUuS*xQцe9/8c%þ0[a p޼uþ6!9Xl!b"h)w D$b1 kOӹ* 9'mBRbδ= !x]G- kĝKANn?i)ml7gpc x{iI&Ngjŕ:9}^*r/*'0,2%;h +7[HFu&`̙6 rSmD_hh VթmnN6|{m8|}WkzABA z 8 ًk.ޅym턐%>*_׭FU}JV -njš3YFCRB,kX`rdVdɄ^+?5S&bP fK@LX قhd FtD␕I(pM>@ pʗh g~`2.#:<!R7y ޸0V ]+{x4ǝ2(a{dBN8ۈ>YKm,&+b@ Ȏr6,_PϧA"u RxG״Z,,?1g ~r $dl _ҪTA,&!_b;KHߋ'om~HCKZh쑩yoN4ne2suyB J4FJ?1 )S8qİ/Sohu !d0Xz5kyƦMP( 6 #`աiT#,66jxzXx1JKKyG{N8YfyѨs^H!CeZD[Ξ=E``߶\NF^сxpBQ+mW$a`F.JQpp0O"cf~-VZѵ (CҡmЮ NzRVVݻw;q}\k!4*Db^a[ZNȢSn,~K}mxT*żype-iiiq 7yhp]G(''9992 6L,cŊͥScJBnn dܡDMBիmJ& h4̚5")!UQ7HHHh|! 딛\hG4Itvvŋ.'R;ٝO᤿!RYYYwhXj}QJ2!pD aDW}pϭ=p!qH$ѣGFy 8AFDZz bmBF;J4$ z^.dz~}0L t\>K_rȲ\<?|̋QBFk׮… `YSLABByrzMW=mD&x!J4ܔ[K#%4kD"̷l˲,l6BW>۶mRشiݺ֩8헡 F, <'O; vZ̘1> >6Qz,{ܫ,ˢB2 FMBʱh\vdnOpWEVWc! % K<(oS+PD}|]DgXn5?<2:7]~P(Radd!gγ8ttt8777{LDhMTvшG:y0>{=2l6YfA* *.YʲI41H(:= .t^}}(f O{ Ug8FBR]]XhV\飨>P+00J4|NyOPPrssyYV9rdR!"vc'~YQd2 x<,, wxbahid^GPPo;X(:] ,D¿.ٳj 9SŤo4DB(0 O#**wܺ B$4Ag(𮀀cYC:j"S8:z#!dwP344O<-[F_h:E#x yh[O j h@##!//N^w:ֵWDZ̈́ ~9+VBM6 &MBUU 2ڤC(}R PЮj,!Mh`O>@yG%k x; {T*Œ%KsN(ƺu,e=8k. c5T1Ê}ĉ(((^Grr2nvT*󃂂(J490 T*Aպ M_],'7trk\mjJ\Z D82v"ܫq5"F! 7(ke>rss a JxhҌg0 D>rY$[xww׸"cYߡ'z>F4z{{zB&p@?-`\.ܹsG"$% #5o!9e{A ؽ儐Q]]>fySO A"9:EЈ.h#@Ed2Jp/b$VXA vǩ$wp( -iQ&O (uX"0!d`Z`` z!9“VVGBʿx> s !5:& d8GѣGa6 6 ""b6[Q JM TaS-!d0}U8'0ucPۣ7$&&á-))iDExF\%41z={Ԅ>EҿPQج@|w0m[`*LH @zz:.]ЮT*vZ)2GTTrssQSSeGk%n RI'm(..vjDZdi1uIԞ8LT!φ sNadggcŊL&d"##0(pҤaXo*"y5ٌK.J.&s`gF@jvq{$^w!kllDgg'"##{T*]w݅b]B!W_}A%j1A8V[+˂pt<;{ހojnnbNCCCbccBީfj"%%klFTXwS]LYG!njϰw^ԩS0`=@Bp\U#1qqj UoՅG:gdd`#℥j?@ ?} ?]_<vfM d0%%%صkFӱ'xBط<(aN\ .Y}OTbټXÇ="8v㍗؇K`ַzt}B&N[b۶mI8*25C߃hGBJ4|D,SrhIIӶC!`_!("9 ptK>tn_ŋ{ԭh:)鯥r ?]v/.qz{{T0 q*}p LG8CUUGוȃ1C g}O9292 6mh }nւ9&PTT`#z_BJ4|D,JEXtSBv/D 2es8Ά}/@quX"d<xrr2{9*GVks~N2G| N;<%d+E"fm wyZV{x"d mJXn|A+D4m|ճ!dpō_ SVA8 /V(> 71 J)%lVmY"cȔP9:Bg6o>>}Vʼn'rJ?DFƋ [ڜ(ɔK߭iPA(pPhC9UA=ի8x UV!44t[s-{qC/R ӟBڂW!|x_~%\ÈxòTX\#`MYrC!= M mG ؼyO-mβTHFyoZaؼyINks.|CO-qY4Wm>  J4*JhxwX}ԩSNfZG{+e1uOp4_x2Ξ=cǎ z\.G^^""Uũ?'饮 ed2^P&![652 Ftt4"##baa!zzzF ӟ*4s =8 IDATaz+u%`#SRR29x'V}8ΆRD"h24hI$oYRR.\laƌ^#..jȑ##zoDG!qăm#!/w+PK'@HH" Jpj>G,Z> 7%?D'i>'YR!>>~4{lChkkAZ,n "S7 5jv7ᓚ'| ,`1@׀~;< L]~F4᫯Kd2tuJ]U?_)!SX!aZQSS{\!3sM%Q"f}0jyp,-111hii'B-wI53Wg#$aqME%PUUԮhh3!p,6QtG*PN.&%;,&ٌ_z3<FZ(;o0t'@ԇql6C$ :*Ԅ.BT(:BѣN;͞=~1c%dB+((={x-Y ~N0k3MkNǧ߱qYXzj!SFA FHMMMصk 1w\,YwPBƉ鹏FB?hA|OTbҤIc.Y)C҇|j@Op{/Q8ϟRe˖ "26 dI CO"6PQC=r `S '*zH Fћ w83h`V_24׌\į=gX|9M$#e-8=h{\(V@- !# |q~gg5*eeDap50D /lA݅-UO)\SX~=&M什Ȅbp{\9s/b6TVVj"** &MݧJ4deYbޏi 0 d2SbaZaZi)2lT/O-}h_'J\i9X G]?V}wC b>{(xX[ND @|%@hRr0V#`Ϟ=hmmEHH.]$F1]g9 >Y]UDL^>ÌeNF  7n)l՟ț\.m7h"x3<bt"+ b3Qۀ˟g?y`?}o~aO2n`Y j@, BHB&,C褥 *;:`))).?cbbb8:2Q5ębtyNؤug_Ǡfb@BAqL&muKQQ=ɸGիɔH=$ސuU zԍtu;@GhDglBdꝣr8EEEطo}J\.?(?GG&ߢпa)B'-컿@L-X4A&`Dcю\6R1tvM툃".nũmwNA xsNu_~##]O9\h=PH0H䡼@̥Y?qDCS%nh oD.2 ÁqCCd'4$zVm5V}5Z -wvpo?k/ZG@ Ux4su B3&˲?AʢDnB}7i@$r7|E~3|f24$rS;LDCAH$ gΜ9(,,Dos +W~D%"S!2em5V}m5tmO>Tת9waY Π 7 aYPG@XJDn0 ˇdL:kPE?":=硎Eڂ_@06G) shI!`ft, \.VuhcY2b .Νl/\0[Iɸ1 =h>]gy_q\լs^ՈSj<7;aXag k֬Af&RG7zQqm-*t Rw \CG!oxmj\PԤ@lME`XD111>}:m2 WFvv##͢Guw0霧|Ct#Ug% %Xj5ϟ*XVDEE!&&ƋoDmm- m3gDzz(p]l8s`١)C2x/w$8֊mETד~ Gk"99PT1cbxbVhoi* J!\n1,:T`[>  >,:T'uHa2f&b}4APAD%c?S5<OGݷk3`*8VdPCU(C2 @&W^yN>w-ZFqęhą hocAAAѨ 4kjV0, PB F Ӌ6l6.B7h+T(ݬm׏-(SB mhiidBYY]]yGɾ 9PME_"*jtdHMM˗m~[sBh fA!v]ďJӡ---j>m i9[{DOL鼭OXlpX Ucs 08澰VUUU(--qHIIs'*OfCk{0*N}Y?@U]@ @vv6.Fή?`5k<'0, * S$tvvB.#** 2D6;h`fL~!Zt0 `Y,h4BP (( >ٌ'O jyyyP*c@kih>XD]E`~dהe~2ɨ>{,oL$KPLUHó ̈́LEӹ%%%dgʁ[Xs~ E£(-lDRR0o:vC`x6_Ad2 %a!|X,hiiAkk+`0P(DPP4 "##l6;hhh⩧sێt:?mm7{x4 `2`6,R)T*d28COO} 2 ɘ4iҸI͆|^.]U_SC6Xe˖T}X*4aPMux$PcM8VEgg'zzz`4a4jVhxvvv`YQ82zj\!zG(e➕I& vc콆KjAT]PW=.|ܡ-0|_ATڝ5f!%mQ\%M"7pNCee%xK.#66IIIHhhr9|NӧOǺugQVVf{XҥKGd% 㑔ZQF1=ӟYߊAǵh/TTZo.4Æ  0Uܯ!UC贍H}nkkõkH@HRl6:trsf$E9yh}k8Ujttt@  //ǝEZh8oʣ@ F]BdzW5^u7C "vL]J0!D'f5_}Vj˨rةBT"$43ڌTuv' )#\@RX8J}݅B!&MZoؿ?sj-"4t2_t WMHMME]]Μ92ut2 HMMo >}:=uVs/V޶^/ 者ٻ9XH$’%Kat ~6g0kqhjj•+W70 rd2{,f3L&_Lr0 ~V8A= d)!ɰdD7^o4a0 D"BB@@@^l@]{)~ڎ+ȃ!x%Ɵc2:qz DQQ}zH$Bl\M|r+aڮw1%@܄D,NIҸImoGCC}\.ǴiwL\9s o{,##s#rO$C 11{'%CD"ٳ/badgg#>>~Dhu\AoEԗshЧ©u9$z[Ұ%XB+DRm5t %JOCPLhl^bttdFxx84̀_9F VEGG i\\S(Fq##8,"55ᨩAkk+Fǀc"88 ;l(Duh( ui@$]5B-J4 l6_^=O":g!O,tvvF#@ L&_h4.777x|'_M,\ QZZ N1cbbfNע,H$/U .aUw- b7qgq7g4A31șN)XMMM(.. DZZ"""~?Vt#m/:@ D y`Q 5`Z0 -fx;ao& G!fvT6ÓD/q@{g%xdggEcQAA្{lXgYvF^&6)ѣl6Yrl"-eRu{k=pO/`hkKhػlϰB_t7dQy(Pi"2:>T֡P哿iLYYNؼoKFF-OC2nYw@bb"jB>>(JDQf1<<,M(G]]őrIܲZ[[ywnJ$h%Y۞&ݻ\2aMyޖSDϼu@3uT5c-/ ,rND+\? 6{1ѹ&~?7?R)g'*z.cQ(rGIp+9TSKXXWbR*1 wWrwvvJeHHa^<~BIQ۫Qsf}}1L3w\hooJ'C`` DEER yf^uk4544PTTo,X@ln0%%%$=z_hZ)ufzz: $%%QQQ$OE>P($ߟ$%pp"DLL F׍FL&CZZEcP[[k6!iiie΋M\ml|kˣ,Iָ9[ەkʏͿ%uc47爎|5z}Ӆj_ALehhw=NHR# V3C Μ9=Z>e띊D:c"ed&5:Ǚ3g$)???fϞMRRӇęs8WME.zQ)BvR2J{sdXlxmNՎR&ejl8(|s#cXZ  [JELL IIIr* '硇XWW'ODEYbCq'B\222hoow-.ScȦMX,h4jҮDV*DRRjZg V\\ϗTygZE;XlE>/LJGʇmլ?WqXds\ _)i{]!G1QrbZt-'jrt7}';>) !k`$''`ɬH<---NYx"k7FǏKygϞٳyb Iw'쫔 ֦sלb ?FR\ 53psFK GHf^JE||z@֬Y#{n*1}1LelQ/t???̙CFF===:մh2X,~_zmɭzM5zkϣW{qNW2gg ]|s=ؼy34}LM+Bn]=1l!%%%EI d&Nii G3gΜ+Z)#srmhfN<0*˗!]ϩ>|:Z pfpU%+YB.\=04D㰙yqJ)jkaaGR* dYB"gbUX8]mhZz=}}}={P q\d`̙NmЀ^G/^,)ڵ ɵte}P(*S]hB222Xr%TWWcX(,,I+̎lWbnA`ժU.f|‰JBT Wք:er *FbVjZ/^'˸Lꢲxمj.$9%A(ӟ^Ŧ^J%+W ̓oOvE[nDZhE½12[+\@61N45:ЏM4DǠyZŊIGc204662<<|YL1L$%% ؃_yI<˲q)twwS^^.tFt:9ڎMw=âE0' *?:j?q::~Ec=Nnƌkyr_ oMeHaKmc޲ >z5*e dffNsIRu)0w\Ʌgh/w(/#gAs2Sf9Yѐ<ܷcwp7O%Po8Y=Y &# U։(>g徎77C9s&YYYbeey7a޽/qv`FssSEpOȖE?BAff&555vR2ݦ<6M>EٍZ= C{GhGqDZr ER4fT`Q9GE-("Swrgq|t_W|G5|RSIJJ չ͌$cw+t:(W?>W=TWWSTT XCfrHcc#易~~ <õ n09eF}i7ե)]؋҆K###8r> @Vdd5h(JIX,DO _Nb-z<<[BWc{IG/[k$]<=?} 69sVZ.xd쌮J?RkYj ݻy # uq1 0,Yu]s<d"Fx:-)..d)NkO^6Df*++l\u].WR5{q)!tsrY [3 cB*@?E,p_=}]jTͱ{&LPP(rA 6L``K+MIݱ#ACTJ2GJ֭[ڵkb~W˔ 8aR0llvzVc;dƒ)}ϙ3皩r9ysW"#smp}b0DTYc1xUq3*%\UO &{&f gӦMߪ=g&((mŹs˓2Nl%bg*}rZVڈFPPvۤSNwDQz:o<*\7s_/.pM`"{c:Tn[&>%~svm444()))SyNBBZV:/LkNшv͘1Fq*eb zV`\񞉷:"{tXV*++/qPTlذ?|LL N_h!!!Rwە$\-.UٱcDŽgrxx8˖-;Z*}6KWDa&i\2sH|@-q|ר dtrpMpBt  qEvfêףQji|-[Qu3g"6'Ϟ1Il21-ZDxx8`͛7)?+V`ݺuT*͛\=JXTLCEfq뮻عsdLG DVZg߹⽏xT2㴻nQzۂs^^D%x{V!rrr2:gbcc'%##5htttgqișZ&$$M)X e T,t{t>&gr=:?9͒?ARRG6-{FPi&={$t:ioo'::8@IIU$\ Lyy9ҹ0eff3gɸCJo… Q(8ϕW;0)e.S/a4|}}l:\.j)@IInM`` 3f 11J͋(~у=˔])X <8ktV,Ż ΧKOOgppш zyՊ`f3MMMJeeeR⣹R];ZMttSRRضmȊ+d׿ fQRR@pp0/F~ke*TO륟>A}}z޻ޖQW2]xgo^z=h4eU4:::8{?00L)l9gx2!bv<W j Wd}v)mjjlI2v3g SI EV%Jl@dd$T*vIaa!J($w6Q[[ `J%  vzkwK"lz=%c)`$531֌}k KKa Hnn.7nDVCFFF"sU*!"*ٳg3sL a~?t~X GϖOLfNm<)"cQ|܌  &BࡇP)@ƕbIɘ5k.V q6Zj{F=ʲe SS@E)TRRԍưYN:b?-"2:QsW8o6rO읨k;KGakvd9@@WKoUDDg~O\H߂f5Y4,l'~IcS|@tysw7G9_h ^ٯe%LF"shTWWSZZ(n:P(,+a˯~yq'Gw,;^("cr2Vr (>NfqPB¹|wˮ%AZƙ&)dJJ of˖-3r%Ygx@rꑑde phT~ ׿N|||{j:u hl={m8bBBB o0lm,SA,෱Z ԝ=qw:6fn1kڲW1 !VK^Ƥ 82K^wvog>BbsQ]ܡkaDvޗJ ye22W144DAA`:\|9>>>&x)?tpL cqMejρgQ\WmW|Ȯ~XYLRvQ^^NUUg1l6P(X` `Zy.?U%Apq1HBB63gmwa^xyXjՔ`ppÇZb|q#""so\~!) 5ZƯUk!JfCb@3#%>p-7zCf鶯b@v_6% prlj=?a̙=!XOwMViE22WMFII V___/_JbߟhU9AڞvO㿙t.;|<֫45򝏸=Neu0??>zk}iln}'Lƶ^:1۸pm+===TVV@BB1 VF1 BO<̞=???سg6m"(([oʉ'(..ߟM6jSNv H^^h4裏GZZ' +Wdܹh4ɑjڌǜ9sꢹ ʪ(l6q c0aڵ288HEEZtǡh8 זDjf8ZZZ7|3FII 3f̠:RRR{w8 =Nx:K`u*'X_ j10cWXR1)f}aiOo;)0Ѩ|}qZ-^hiiq"%RFF¸*\MHLL v#Pb0lbZ0Y,V@ %<' N\>_ܴ|rD;w~o}n@8Oc|[誵'-wRJk8oPC:eq7LPݹ =a3O}m%b# = ___V+Τt-ZDFFJ $ Ŏw$G6"##X,9{SC~~$H:֐ff4~ee%zQ@;x"66t^z5?> jZ\ .VHuuɔxRŶo]NOk;}9!jv&<{_1g/[~ad IDATXwYh6vO__ߴv+}n\WEh4J f̘![aE7܃q==g~ϯne%|VUgՕ\>o#SuemK_2^>*+`E |ʼnܽDuܖ7 _zӣssfv6>4CNB`UR*?>wsy&n^37lß҆ۋV|Tewv` >8GT:Y \gX̙ @E~Q]OFFZVZ{uu5#/Y0ܹsf#w^nJyy9---dffz &cGP`$aU,Mejӱf}=ᘶNugߡ︝ӧ=ܷ]Z&X|4Ӛ1,65---ckk8ħ B\8W.p,lk\܃~vmƛ9_?s_x˘;>Jo:b1y1dsK*h硫q|Tfҙλ8x)dEɩ;GfL4%Қ)-kȈ"E4Djz7u3em1x5tttPYYIBB´ul_X,`e̋/8FYVzBCCٸq#o6]]]}݇ ^AT ңP(%666l`0L(|*TTЀRjj66d\gppPz55a {=w%)7UѴ}}}d'kFhh脬222W4aaaygGoAp?ZݾS\$2v_=s>5~} ãl=ZBhjOtETR318"ڌ >>[eDbE  l1Cu?Q/&R3/}FE)wUjхZ#:nܝKYJFkk+`Æ 8~8P6a4X,t: LXXظȀo:mܸ )}d<˞={{hmmSV4!00A3 8f1 };Z]Eߞ{$xsVkVݥ˖:W0 ImXP?JgΜ^H}z=|y򗿸wj㈡pرcn׽k.צ&^|Eo;nxՅRdxxaePVVlEF~(R͌Tos!yckKX9'k* o7,eċV@`TT6$$dYed.WEñ;<:Zqv=("c݃v7^J,^-?{2UIC|罷*+R_+'~u lǟkUƚͪU*ƔxG@E4#deeaZ1 0 GRJ%111Nq SyރlAFϘa֬YAAA] wJx/\o1ZZrx=Mv^GeO&? f*[hs!?? z p9*Ν{W##3=I1;)}(dzj9 ܃31=՝QiQM6z\mq@h jpcWd_Z$<< ٳ]]iJtlHMM']%!!F(MŸLjOI?'_Rej4i$\zqv\ p]wQ\lOIdk]kGL!#s9-Ect$N*N\1p׋Lm={z*ƻtG/=x&L ;ަpLL 555 I;+{jrùsc &((,w2Q(̟?\zzze̙,^(LhmŵB3o.މ/|6 827P(tJ޻w$_z]-eMW~>FCsDͼO3}^%cOI^J?f}'?Lwan/#9Ju f$}('^ی~d1tK+w;[h9N>}6o]k.{K'ڄ'~(Ҙ Nc̝;sIF# KN.f͚5mhDFFJEHMMeElgW޳,i Y6)>w%}^ɨp%Ժ/g0sLhnn&==???z/XLXX_%Yfi&^x|A{P~gg'}$\K_fz{{ǽ+X,:;;9r䈓s/"Ѭ^(DQ3gԄ yRA,Bww7ǎov0}}};wRJsNibéS1=Ӟ*_H Yp!GEדˊ+HNNaֺQُ= x3fׯo~CCYP ^@wa Sz&ͥ#7U{#8z~T)60=-h*~gM)eee$''P(Xp!6mBVSVV&5 K.%,,lJs-˒%Kj(󑑑4\B 66VKmm-))) ۼ&0Z({EĻWa߳so裂U4 `_ ?;4y|.SDZIjl^Z rJM$'';1d T* APXXld2DKK몬$00Po|zzGBBnﭪJcM&SQQdr}}vΞ=fϏ ?8̪Ubp)JKK{|Ea]c_asқ M` Fֶg(k&ϥםs<1GޣEKl*BMƺVJ|>~Fg(5>}:OMehh4(ji\TZ>J#)..MR2Xv-K. υB \7JKKX,J~s}(ﵵ:t5kpwj BFҌQ2Y@W ['҇|CZ k&DF``D!GքIZ (2 VyB_GQ3D\nצv3Qu?F!4nIY\%J̾l޼+WNkW)ϟ+uAff&Gh4RZZʂ Xt>w6D6M^+$? لKrX8$$=4ȳ(GWgqb4oJr??Maz-EFRТ|qB"x򺭬HJljQ>=s8祂\AFægytrVp֡"gl`y\6+BY\@|fewy }Dw7∰ W[n^/ OnjS%KpL&OfʕtRNDDDHq*˗qFf3gϞrl2:2S#(( 6PRRBcc#Fbf̘_'u_׆<}"Q$wm'6=V (KO΢-}u_<ԁ }myNݵeKG񏢽*s~ '۞& , DOSe@dARCi=Utmgi.y _Ǥϯ3_dm}#ORwio${\NJ|nHks59sɄ`mf'k f̙Hhh()))+_~L`e2.SXK~j5ץ\W4ԙ0[(~W0/. =_R7o"XadD@ JB!_~%%%mwS7Fyg;BλhrV$$qϿvaZ=cS+e+ѡ.?nN"! /\ ̓a'C(lGaYVXٵQBBwu8qBܲh4,['N`28~8˖-[or"""~Z500@qq1W>Fۻw/(JRRRyWtN( Oyy9*$nFx $)=jٸq#6mwߕ9tT*ضmwwOwѣGAT͛$''ɪUb)) #Rɂ HNN&??!F#jWןXrnVs#50 r;ݮw(;v5\?Qȑ/vG,CGV.mU:n`O5_'t~&5~CyoO]]Ta:!"ZfҥY\ \U҇EII Z ~Yh=ywrɍp{??#ח#B,D`nL!A6 fv3k!}4cn& 49B:F+<sc ϧamlH]`vDѱ-Vx1 ;fgHwrYLV+p2_Mw-wBxxS:ZG;Jss3ŵJ͢E(**Q86nȂ d%&Qi4L:+%3Pرcj{{G7kl`xx|isAFFrU}:r;(c6[nŷE}joRc; 3XN{R VsN3J937 )3J"c% ZRاOeҍFPK}J%im=Ĉ8m_}s#porR6Z%7Vk?v36_%86=V? `}܄u=$d8xϗpK_Ǔ7DLx3|gzkl9#`3'P\\<A&ZQF?TrRectXƙ yµd8}4:l>w1$eoH:u_aV`>VbѰZ8{l@jkT**++gY* ! ikp (J p*((Da\'r]g&wBQ\xIހyd2T*}vPرc0(//G{{{R$@Iu4670Vr#ip7mD3w ܸq#F.?}2踙P4 kzM0?uM]yféSrBd JMPfw[? HJJ02ӰJP(ڵkA1 Be6\u6/7q|*x=DT`e #~^' O9J T~|f)->V+!:0J|u:lN 9xxL)f^c:r"\1mBi+_?uP-vOna^_P(sǂQ4|+ڄDe1&'AdS3 w^L&κ\Ey@AGG?1+_EAAԩe#6>|@u_qI̓ BB.\a@u=3LY5jnh'ޜ bU2 vnrrro|a> J,BP__I AtX @z^x^,˯VLCQTT K,ck!55)))!-(G CZ:|lذ+WDgg't:/Wii)Cl6ۡB'B2&. 6-]RbwG(++CQQnaB.k֬AEE[B!r9GTC8\. jZh4\ às-Bhf.\o/~ جW0 .B˔XN7Rm$ ݢ"t T)Y. ?sur_p[@q 9CF"!\1\R 18PP9h@^8$n7.2 V;.7BDزe vڅ~jqBqzhhhpCKK ?1w|4d2X[nEQQ:;;1< P\\ J 7(-- ZŦL/ 鿮T*,, !H$P(HKKZ `hh(@rTWWCTرcx-|> d?!ؾ};6oތtuuMƇ=do$RSSt`PFSSS4B2_Dx< T\UR(8MMM|P( رcP(V4d2ܹs \  33ǡMMM@4˖-Z:::2@C"_ƥKV92d8" @Y$8P9\P:ݙ͛{n vC"nNN߃T.C&AR}+vWAvv6JKKtZ|P@bD"D",;>FRpsB(E^/Ο?gϢ1(M$j5***n: ]]]{/ϕ$^|￞D JTz>j7>aV hZ###hhhbŊ$Bf3`UT.#55* ^V+) Z))) p0L^v-oeX(bժUDCC~m>@.N(CC,7|3n7\ |Ų:VlYjx<Pj_wF}}=?WTXt)222088 λR1<<^X^^xkqp8p\|v) _υK*B&IeZp: pwϒdP*|YJ|,&twwOzZƆ M^ e8 . K eYl2 HII3gx{aa! ҂3gdb`ժUXz5 U ax?~ZVVT*Rpbjn8%! !ĈlÁ .TӖJHOOGjj*gt###ʬ@ @NNJJJEBYʕ+QeAee%NR(7KA122ƂT*EJJ Ưv||RK,Ann.b1!t^h pmh4HKKCNNaNJJ Ng"PUU[/|Y8b1:,*XϢ 0L^( @$Mj5Je<._n  4$%%ATtzlW'&''cҥ|%tJJJPZZ:ūBYL&.ddd 11q. :={صk}ݠcg <^z ҂?8x Z[[!tRΝ;mO<^ytM[p8oɓ'NlԄm۶b;@II FGGQ[[SO=={ࡇB]]~򓟠괷Z8z(|I_7B|A>L1YDe˖!;;{dP(sh6R Vx8cY~aȌB7oF 2000\>&h2D*++ۋÇjժEݎj\|(((6ټ^/֮] #GU5ؽ{78oGO㣏>ۧo{(((ַG}?Y4?233|rjŠP&ҥY !!eeep뭷bÆ DII 󑟟B 6m-܂۷cŊHOOJ,Zag("E$!777gϞťK/|!Ν;~ /+WLl|>3<())j9k׋~8`PUU+\7}{xǦYFC(%GTbXf U2(̂V4\.qݻ`w{O?Rd2G?QL&ç UUU˱tRC׿uTr25Btz+n&~?яn:~ӦMxWlW~7t IDAT X|;|P|+S\fRpw*u?/{yd|_xt:tjT*( TWWQQWWa~B]]ݴxJ o~3^ԐQB!ȑ#g?Yk֭;ee \|~rt,;o?Nh4R^^񽽽DVUV;zD($rw~DB'cڵDՆܷn::=JgF7u;qa>|B /=rHKK =| x-s}}? O>l6vZ"]͛7o ^[[+QQQA233C۲e IHH?0ʊtEn,˒˗ロaXȅ `"AP,hשX|ug;}O?~qM3 ̮|f"[(p/6P/s:>XbŴ⻇'ʲn:,[ ǎB霴@eyٳ'lɮSO=N<gxx8z(zzzxm*l 8XЊF,Lv.HII0 JKKx#._`z|~)=6߇N ~!n)o}\.|^{ vی[IOOڗqR YYY0L!u:ݤ)[cie;dh4WU8N. /ގ?~)j}9 h\EAф\Ύ*\0 ?Y2P*|}O8壯êUB,m.q?{y_xވwߍÇ㤟TArr2Ba>֬YBjkkk֬RĎ;Fl6<())_?=JJJ{ȑ#r l6[DBZZZS(U4bæؓdQߪX,Wʊ0;vo߾ =qxq]wo)y뭷`2L̥l>f03K/X=܃/2ް}\.9sqr/B@ƤÇ5"n{/o;w}'nx׃^> GO<eCg Zƚ5kzP( ~B>4 J%d2FGGtoVUs'=?Jd_̕l>0CCC ?O[l 7pɓ'҂oa-Eoc͚5w^TUUqFu]A-##?OOOw ^7HNNO~p㕕ᡇ3<;;wD]]^z%u]ևP}D^ף%$$,{fZ*&ק4!%%%6 eqAdee#M#?G^^Z-vy8_!}7BI󛟟`i_ۣ_̕la3c㗿%W E7ATP__9991]=HMMO?{ v~IxꩧBFF~cNGwy `׮]AEcAaa!T*դS(EU&Ksoo9zh^ݤcu.|K_"~3>`cccO<Q*tkD(W_}5옭+WLz XR>h?m"}Le|fϞ=d˖-18 zGZb6Cz_אik9BZZZP(No S}H$|_08p@*pF}|~_V;=٦{>}̆:)sEMM ~Gu,˲C]]>:t:l6z:u*dJ]afz0 Q]];v&HP(1]՝|cΝ{ x駱g߿F}LaÆ: =٦{>}̆b)~vz h4XePXX/11;vjL&n eJ,xE#_ݩΗ>{99],he l}LY Acxx8By,ˆ0 P(C"-{P,{(>䓹B2~,&^/jkk122w7oFF( %wEI IQ( ~,&z{{V2JKKAPfjѠP( e`a6! tD/0h4DVVV" 2Pbvl6CRч B1fFL& LP* 7n abMP"==wB TXtww^QP(x<h oX҂˗Cww7NgvT+iii ~  TXl6\tOuz؈dUBPfۍvFEUyrT*EMM :;;t:V Z=bS(ʴA`ʧNA{{;V\9GRQ(†8:u V5>d2Y}qMP( W`?NP(ȸ\. c8t:ݔ @S(|Z48H$p !+b BV+fs?`;%%֭  p8W&!99HJJ[~ BPEc# ֠}(..kP(q,ˤ1###hooҥKE[BTB/B9P(|*=49C{{;jP(EKKK b122RHKKCNNm 111@ = e!CET*ENNNO]]](**9)ʂ 0L2"v\lP(HLL)g)ʢ*%K/h񠽽s$Bďc)x<D@^^^ȶSaF( ?TX;(S(x:L(҂磱1b{T JnO 2_ED"Aiii[ xP(B, l6^/233xǃ/ƭdQXXBz{{zP(RT*T*R 46BPfh,R !088P|$&&εX e nnNcZZZn:c"?EEE(** B}2e( B{^/z{{a0xbbH(Jl۶-h;˲8|pT B@ @P 99b6eDIIc(ʌ.hP(Z݇j! l(..F[[[vňقQ6f=)B  -Y4dgE9S( B8ffVbyyya:z={vJc* ر#~NAhաgP'z 2(2,/)5+QVTt@- 2_QfX,^ۃ-ؼysȠm6%9AEEE/^SB 'jQЈd-vl\VCD]()A- Z[[׋\KP(Bd2E,ֹXbE9s&&Yr9J%j5Bkn|xS>qN;h?0(B~v&S\.B&I e:z nGF[w/ Ɛǧh~r P]hP Y_PP@+S(3 q8ufs}hZlڴ)sAmW(VT*'Aх. B àbI6DEI ECF[q<+їA&iPkA O> 9ٰaRRR@" tB"DyJcM(aZ!HZ<_љZ?=D"![7V#U(9? M-eR u 6R(PEѣGaZ+ lݺVڥP"@jd(`Xz!tR,Y$dz =B͛lv7oc`Jq H+B{O^?t=}oX{nfP*@TGee,KD_X 4iLC P(JZFbb"gt!``3/aߖRo܅KC+O !^~nHhEz e2A O> &$BY,X,tvvh4MF8&˅'N$ˡVyO1[#Q/c* vS6a揯(eŅxKA,V~ %TѠc4q鐕~r9n 1]ɡ,Bf3N:S&<>˅\.>0[V8f}ظ޶,XwkV⋻o_˻8u<F˹[v7o[VRo~^B ϻ2j(((p+2Ohoo!vT*EII /_B$''ϩ{JovBK, 3{AB.lџR_T[ޛi1)hPBRVV0}%Peqe?~mB5E0HJJRyV!~޾eL7.ǎv|P](/.䷝Z7#Q( \H P(ʕ+qԩ+MMMMh4js ;`0/tol6c˖-PA}$%%h ]˟h4$%%!11j̽OϢeUUMN 1? ċb7C~QB%,EEE!;w[ltJ шE(!t! bD"jFD22یF2񥽷ϡD»-T"?~><܋xo_uDׂ2PE2LN/_ƪU@2 %<---hoo\Lf B|. I=4,ԋ@sg74j~f.DAB/\W'XjFPcO=1ܿ}5^X=^6[鄛eqiSPTѠL; Е)²,jkk(++эR IDAThPR1{B, , $ a0 Ѐ1TVVΊ;Qss3Z[[n>,n.7.\}9\[m_]Zʢ.nah`qע~w@p>y;nZ'Xpi3RMdb{v}~SF{H%lZ]#.])q8w:C1i BN'v;6p:^Xd ʠp8&U*&vc``"6m+wwwܹsQ/VuJJFvv6uF[^ˤ\d%hXq#Q|CÄȴ|x'X4rJƿ3ٍQ8sϫJ%g?@Á<7xרPcҢ@[w/>8~.7aw1T[͆N'`hQ,>U-q<>)jѠLt'ʝp\XjՂz0 |l hI$( h4h4UWoiiZ6ls(vv8N,[ ND"AeeeŊW4$''#-- iiiHM_`6nkRM`9V<0[o_-T n/Ӆ?Wy d]=`92RqjlY*k˹ Ì+Z`{E b!ٷ2:t(( ^zmX,455A>b14 !!fbq\p8Z0LUBDQ( Ԡ*5 Z[[a4! cd2x^d2h4fvXmX,L&CBB dZ lS(rjr:6ç~ͿJJD !㊽B߄\KP(c*b388qH"zlx/&!O~SByZ4(FNNBDmm-֮]|W\W % :xvqb1J%222z}}}c9 ^+N%L&.]z,Fd2FO|979!!n]===0P*HOOGbbb1(OG5T*п pl}N?Ҿhmh|b=6Z_-0KB,Fe^Q^^0OdddgΜAuu5$~˯!8w&HP\\8bjԾO( B4x`Fhmm rHJJZT*T*X,ϑeY^YZ|Z͆ttt !!GAAaએ:X̌BĎFFJѫAYPE2mB_0 !ʼn'P]] ur:Qd16 z>ds$rIII@ff&FGGöx tB&tB$!;;H$8,9KR$&&B,C*`tt###˲l|2Z[[QPP%K\Va;-9"GPb#5Y+#IP6NVB!֭[Pa"v'Oڵk<Ggg'daŊ>Ż>S{>[=YǥZED`x.LV!5"""Xd !!m ӧO3i|( rT+y $ LBBp@ 1`T,YÁ\.G.hPa8Op`XkJ*>cڵSUUEGGUUU$$$ xn@  JO#>?>1qAK?Iʓ02[[[ٳg~~~DDDڋE #<'NHa3V\ʒBFVtk0 ^$2֭['xrd@ 1ʹp8p:\.4 jVKPPИFSL$!9 8@\\iii2:DGGSZZJee%ѽ^7Y<#!neo37F r`0P__,NGXXbDr_檪*'--Mʣn -_pnw}W2.T* Y@ &v/rEƅ\.GTJ0m6Agl FCRR1@!99wv{n7TVVOJJʰy8L&yyy 9%IIIRWW7_P}PqڵK*ebbAUUU!=鈈@#ˑd8NnTYRRt:lH}GGϟS2u>L_ ϡVYp!'Nk].eeeCjj*z~Pr].x*KDBBtA #-;vP]]-=~zrAAAS ʼn':bccL&L&t}_N#&&Mee%v &;;[x04>IPP˗/nLon+ 44t@:::8vTHR1}td\.F#K pn ɫ ɄfCP5`@ v=*dffBCCÀ:::dDGG… O?ln: CCrNʔ)S8<-qt:9<-ѣGpx233Q455QYYn==#!300ロÇs)/_NLL F4t:ݐ)fdL:hjjjp\Cn7---L&̙3l6N8n'11qXg"! ϣh5k)))?^r­bZ9rd2HJJ:O3%7X{|U`dz֬Yի$??_zR^@ &&&1LXVV,jhZ !44Nc*((LBBB\nFcnLnn.j| ah :yLaa!u}LZV*++q\SPP@{{;r HZ[[x ^_9T Hbb"aaab8s L>}sNEEUUU>u\RqF***cʔ)L2Nh… >l Ν˩ShmmԩSz04`,X@kk/ Vxd2qnլ<3gNneee}iM&h@Ѱh"Μ9Ccc#d2222=@ BQQtA& V+Usr\Ʉ"z%3~Wku\ fΜf,[LFX,l6c#Q((J6X`Hvv6Fz}aaגȥFDQǃqv1L̚5|)**BK#@ ϗ* "###<Idggw;,Zðm @JJ SLz{`v =All,~~~d2d\nd2 B !466R__OKK .K2:BBB6mڸkd+ !--nBe2ɴh2e2FQlS__/7)**f͚%6jJP$V*QnKlwSd* IHH , MMM|DDD0sL4DZBOLt:˗/j3fJCCÀE/ RlR!-d2QTT@\\\7#sjdo{N[[0{FEJttt * tb61 466ʒ%KFP__Ͼ}(--}L*4Frr2ӕЯYSN JƪU8yeP﫪"@ |b\.Z3gJkz]]%%%}(700kn`enf3T*/^Lvv6jE^^O̱BI\~&]ӟEe߾}|k_c…,_͛7sر~5kpVe|K_"55Tϟϖ-[ŵ̾>3Z-0 `p:0m4T}}}i`Z%88˗@yy9'NX# )i_v~#6l]̻ 9 BvImm-=ܠߟCI P #@ 8d2Tjxv Ogr… %OMM ϟBI@zfl6n<-BTT:+W 6o̒%KҗĮ]b ?rmqcx駩;W]uo6W^y%شiW_}5yq]w`֭[ǎ;Ǯ!Q*L2뮻gz_|+,֯_ρϟ~;zV\ɖ-[yF#f…K}cV!onn@ jQ=l Uhlmmb0{lbcc{Oދ! BTJ_dbiԟ/ٳgimmTdff'sӟT{\)?R[[ˮ]xLj׿5}7|4ѣGy7ػw/n,>3^x{9N8uνW{{;| SNԩS?ӧ{"9qo6{wyGzm̈́s!vɎ;x.z$ch\nF@ |OUGa٤RƋB?2SfCC.9sHI`R!˥XONZ\ ҂Z&((녅rV5k]wV+_ * B%K6:oF\.lقFa\=<'OpB͛G[[Uƍ@Tr7FAAwމJBTl2Pyyy."tJ0鈌:O/N/Fd֭ f3' --(^~ev;yyyڵk1Jyyy Uzz:ѼKvΜ9Ν;ٸqc=d2j%/ħ~ʅ N3xn]vIp 9sLƪU11=!DGGHkk+X\NXX( s'I3g$55z~ǛB?Z2#bp󩬀ߢ IDATр/99o&Ȉdڴi@  6cǎIaRJ4<8&ۗ̚oߎb[f FݎR$33s2Gah&-$%%QZZŋ$%%f׿tRi Xt8?Q0<<דʹs!!!̛7oX#c69zT<&&,ɃBUUVu&:?7gQYYի Yf7yLj233immh4L ˗{nVZ%XҥK\.'55e˖IEEGMHH`̙>YMC &VCIFDff&)))@B  hiӦ1m4l6۷Ozt:>ynN>Mee?:^O@@zNgw@ |)TgΜ9Bk4)//kaenbccԩSl6>s,X@DDĀ>RCC Xx1TUUd׮]^bOAAaaaFXte_ZZȀsł`V\!!!h5?@ @SST(--X3R#cAfCPpB=j,]gzjg(JΝܹs%Eʝw `%:ttƥ7sJCCԝ>"//o@o@0mtAiiiFFȞѐt:X,,XJn=z.!..իWٸL.Kqa<,\ S`4n]èzC3vs@ &'ʬ/lw}rX,dgg !xk>Z=䮦a.ÇIJJ[>o$$$ Y@"@0YIycol *vrHLL… X!mCC ;ˉ@C(??(d2.Kj̝;A+R\\̇~|ѣGV`R9!;&tJtR\\<ెahVd\4z_^,f3ʹ:}˕W^5\ʕ+!##8,w+zTTTPPPnra08v臨_$%5󄷺n6l 77wPsl2 &;rǏ tzN0!f[[/*dzx"i\xdh܀6mْj*,X@ZZMR>o28v2 ,{VkpַyJ.ꫯ&77wޕ+WrdN<9"H}}=2 N]]݀NDy[Ij7*02jjj:Ob<^᪖].DO?4N:,F@ "]Oj5}^{םN琚aXocÆ Frcht𫯯NkdvvڅF.AGG6ςr(((Ҝ`20:<@׷nO<tZr%[X~=< ]wyǫ \6ny衇$cロ n:v}Yt)c͚5=zrb {+lڴիWsKrF#[l!''ŋV}Qjkk{Xx1of>V(/666zn  2ϟOYYfxFB&t.o0}tJTB Z\pᲞȧ~ܹs  ի`Q*۟k_d2qٳ'|ǎ/ jLƧ~~3fϞݧ{W_}ZwΝ;馛ͪU~ǹs{HLLdܹݻwFuu5n'|lݺ9sy'N`( nVp뭷rg}Fss3wqlذgyO>^zI*G;{'yl6y)ah}@cc#uuu éI pgɨdlذ v;2!ud2III .*`Z9tL:ug*=6;:l}XNWۭ F'*, |rJH?{= m/MVV{eӦM8qI_Wz/w}7d2jΞ=穩a˖-(JfϞ_Ϯ];w. v X`AË;BʮJ)**"??7xRILL wu>6l&r="nv,WAL22eٲe8pp=֭[w'<<_c^cd@g!ł` 22[))Sp!222x/III ˗/Gvu9t-*jikk W_-fIJJ}y{.\4M&r|Ls !L&c߿ÇYv-/"111̝;7,"Fsq)h4or7n Ng.G_c:Njkkŋh§R׾Fjj*3fBTJLL O<1())aڴiBm1yg|~&6KMXwbV-j%yTz*cbc'9t*V+FCCn9h.\wa͚5 ԰w^^~^  JLFee)ɸ馛馛hiiGWO?d2bcctݻGBg܁ydwߕ d2 C4LQ zYfqiZ[[g7!//$RRRaX,jjj̺uرc/&==Cu-'ΜpT?>f񚱞ΐpݴHEsssl6.[ۃXx1? !33 9s)۾Ntt4/wܹwa2={6t:FyyCћ osT$|w/^ssEΝ;q[[_inn 33k)BA?trZ[[f߾};vRSI&((JJB&aZlfL\իWVILLd۶m>}˗={[n7颢7ol6Μ9#i4>{[ 2ӧ©Sp\tt od{hIU:2E F8Qv9dh5C(rP}NTP rh1q kGʚ%W uf^2.RxQT=X~=z֗2}]w|n߇i ^z'|ECrrrdX,z)P͛7OO}_2=O~Š+Z$%%qK|['硇;aee%o۩R9s <ܗ+/$͕נ t:G!77wT*䐝j%//FfϞB_)SEjjj*M}}=9sF: Yt)466Ѐh岽%T* %,,8 t@cccbcc̔", 'N>GJł z5ꨭETAXXؐ{A+˧7fdh5jғ D?ߡVa8"5jհUnMfJ*/.v<%sJa苿d˜ĕf;'Ncy||S(}ͅɇZ|̘1[ne˖rQ T3{h4ꪫP*t3%CATBtt4"ȓpnxtRWWG]]<44BCCtcZ)RssP|u$,o"W?r` >&=^IodDYwx9zz4> 66 hr9TUUIyxHII8YpɴR!k|_l6KFFtt4It5CC $PWWGEEE[[mmm=rHHH 99ÇX,a0;ছnɓR\\/ 44tf̘Aff&HYCcZ|~=tL``O)ΣfWbbPYY tN˔)SFzЩ\ ˗Lj#ÃL&K+PQ[f ғzjByM &,DgJt$3Sr|1ťdgn7MMM؈\.'))+YMPFdd$R/ M]]---:u NG`` iii1}tiPG021KKKy{\kΦ@LvvOLِ(::ZJ3Fz y32r9Kn%gȐJys…T*TBPOVWwl61B3@~х chTTI?`,/?ϥ}?Ϟ={Z̝;G}tؿC: , 0餾^j"qIȼy񔗗{͹zǼyd2g!… ٳ|R@~eqFjj*EEӘV\Ijj*UUUDdP7&GG_~{Ϋ7pv;hnn7 44gyo{Jp*nNgg0./--jh3|Ο?j%$$"""]VϏsa:::8x 㦛n=;v`ʕ~ٌ~0UVŋimm[΍V%==!n!r<d2)))L2jJF@jĐ@-KDFFj**++1 455 墣ëQWW{(aJR y<JTdtDMyyx nn}ّ*Z?: hӵv~y NxvvϞ=9rݻwKg_~3Q|0\.ɸqѕKvS|b(pB=näp]w}voNVV˖-7 xˊ???N<)=Obb">04qZ&##4\ܧj $44pE(j6mӦMp{O˥nΜ9wp8hii2 ??? >oX;h%tWLOO7$ms'NCp :tʌ3~ xضmnг`N> t*2 #<LTT7,سgaaaRc4 /_?pX h4rĉaQr*P/^̱chkkJ/^L\\MHt8Nj56H·CC xzN\.V*|) T*TV;\fg2hjjF+tg͚UI}6Wlp}*U8_cGII ׯ+`턄o;vjNqqgC׿;suc[oE8w=䧤aʺv}wp d1F5**g= ,tݥ<}W.U8ù{IIIa^evm~9boI\.'uʒ%K8rwXg]zuӥОFVbtBJΞ=KQQ NGyz)$Rd2dժU,_Q72FS2<)R8~Tzc$L*L&O44_|͛7racϞ=tttHhDŃnֱM0åwU8׿J \.tbٰT^ξNoeQ=hLDF?*͛7Cv{ST}z42<>ʉ'z\w=l*D?})999 UjK( {_y(h-BBB&>ith&J~]~$#7 n7ߤ֮]MY[ovsܹFy7xcDصkf͒ѼɄx||<ӧO'''+V~zVXANNfb{(B ܹsKoeNofH J%/O>APVh4RCQj{7a͚5TVVRPP =gؿ?W_}ޘ;jڕ^äT*3gdʕ,Z$72)!bH(2=TTT`XlR|_hUW 腎=0뮻۷Zn6oO>᭷kˍSO=׿uV+K'-66&{V]]P8{,ŽM= 風u#uGGC.{U&ju]{pxrAEb/M>際{VOC:T*\ϟϡCéл\.N>kƿoi}tݔ?^{M226mD`` EEE@g1[Gn7֭vf㦛n7oesџ1ikk7 oz?qܹ^͛7ݻwsQ/_.=_YYIUUU{vBI}j6ڳt )djtUmvM6+z5z/E&2LN>` ??+VꫯrW~٦Q- ΝĉR|{-c$ 5,1f3\ps#˹馛СCR~_ zrr-/^'..~vo>?r-ɓ' #55ucox?_{ .< r->ٳhy1}]d2^{m= zFU^=?>&JN{QwsJ?N e<#% J $j*ΗRV]K^QT*>5vkG7C#*,lL7d2ΝK^^eeeb6曩aF*++/[*񓒒aѢEd2)(''???pqfΜIVVJ2/,,$77GvLٳdFFd RIhh(SL!)))S멯)]Y|Ý']ظq#߿"ByM-ϼ*r| NIyBr\Vy*/{q;?~ >ɱ3ThP ߸ZC)j˻{0[:e:]`۟:C_|{(җ&77WHrqݔQXXHMM @dd$L6timm\ǒ%Khnn_ŋ@gCxbbb #00V+YFcc#uuuTUUQWWFJJ 7|3*rT*{00!%AI34)D iZj5r*s9VK..]yXYQE eq`>kV<ْ{?D"^fzZGGG888@(B,磱o !!!P(]EzAy^x3}{yBNaQXRwø@X98Kڳ7!>lW nbc\팈O=VAL"^aQ]w7O`=rzX9^4 Qݻ; %CB@~~> 0 j5}QQQpwwP(^@ Fĵ}`MMMGii)Ο?mZ 0 F'OgŊ+t.]y|"** ^svv!&'˭Kَ+ FF`ڟ "2  eU\[X>g0˲(. { 2"$/ )6w{.Y"#padmahY`@Ͳo͘1ùv - N7_'7oDIIIс8"F:ͧ " Buu5Ν;e0`Jl\t UUUmٜH$BPP^JenV.C,CѠ|>2 R2 rnnn_@Bz#3χϿfrم>8ZOa0Aq},wUNש|zu mz]0} F<5D b%_ͳRz?5h\m{{iu=˲Fee%P[[۩=P(I*p?Wo{5FNNNJpuu/|||Ԅ2;D"A̲fbFan BLe-ܹo.>L@S~gwM"?|^}ﻫ8b;xy҆F ”0&鿝#Z2vcB{,anBSSh4B pwtjx"ʸ6@www.ӕT*X,6IЀ&в_$((v<C!_Rp%gbΔ Q\iUU#y{c6< v8 l^%;;H]McY8ry=IS]]߀ Ďı7h,|<{::B(@,It!A-"S!AX<5;eY444 Z{2DLFE%DBDeØpȬW&U M.\ e8x<l36 !nt7;aX:}BlGbժUx<Əoq͛71{lxyyA,cРAX~٫JyyyHJJˆ# ˡP(0rHܹC{:;ƍ;w. !0x`$''w0XnBBB~!))Sl2GLL ƍץ˗={txE!IGrOd5SK1ui޺xJKv\m#99YYY>zVDDD@TbXf  Kbٲe&}?SYXx1̙r̜9/zΎ~:2331|p;Xd zjDFF1{=,[ !!!/0zh^g϶:s444رcybY B!wtzvٕeu:EHd&ݎ=^MzollllXljjIYǞ?kKOOg+**L566fy<[PP`qmjŊ,v˿W|>3i_t) =zcueRΧuVV*III,VTZ]'B8"oL+atz=N PJ$1e;ʶB!:ĉӧ&Md>o<,۷sm03feY\rⱺ;UhhKZv atR;vz̑!C_~v޷o|?SO[@BA9󉬳TXؖzec:8qj!nmʲ:Gk .:ʕ+صk^[vv6x<"""LlkF^ypww7{}Q@BW_?{wv"Bz}G#eÆ`Đv\Q0p@!//Ϥ=##@KJJJaÆaС>~Goܸ Ō3 Jo>Z.V DbJKKs3@!6htȜݸxMeb?鿝=Yqzw},">>J֭[rJD"4ݭӖ&L<:))).HMMEJJ ͛>o8::Z<Α___[6 HHH@||88&Mœ9suV,Z9ep{CƖs߿&Lh7hvk׮EAA֭[gumB5 y}l|SGO شs~=͂܅ﭷByy9Μ9cǎGii)BBBkZL2ؽ{7^yN_um,EMM 4wj~~~Vՙ9rssR}myWWWcժU;w.RRPTV]hBhpp=} l, |.7yu<<%j|q ^`[fǕ\b5j\]]w^,~:SNšC}v{@Kz֩p,LZ|+sA&I#]]]F|~>#ŋ; 4!X,7.{Ye.^So6T`zAZ~:tk6\{/>LGAFnVHJJf͚ŵz8x mۆiӦ;Vř3gߥp5,O>LPmoڴiX~gΜiq|gطott}||T*~˗/RГ,=&1kqErv?/Lz CB:B7soo4ݜOa^'cͨ.OiIDAT0HNNFHH0DFFbԨQ\ߪ*!&&PP*` "## .ƍnVw0d-J`` gϞNSٳD@@jkk\pqqixd|嗈Ctt4ߛko|Gu|}}7xd|gA[l T*1uvЁBHÿ'qI-w KoU~"CaVN&Mw2'' iua\|/kװrJ@bbI!JǏCVC.c\r7nrrrc^sgǿ0[  d,_ݍ[v-i&Fbb"cxyDBHc+ix{#l@</O(\]b_JUTAuT][g֏aC19j+X '#$b1bEшfŃx< Ƥ1ؗ[c{)@!۸Q\9ȹtG6[,g"r0(\^! 4!ؖ^o@^M\SAueU?{srt'>O=e#Ķ( p1 TBӬ C1@GG1 ݨ!B!;@!B!6G!B!( B!B!@B!bshB!Bl B!BQA!B9!9^!B!Rߊ7IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/figures/tripleo-ovn-arch.svg0000644000175000017500000051267300000000000027054 0ustar00coreycorey00000000000000 image/svg+xml External/Provider Net VM1 VM2 VM3   vlan20InternalAPI vlan10External Controller Node 1..3 ... vlan50Tenant/geneve br-ex br-int eth0 eth0 eth0 eth1 Compute Node 1 vlan20InternalAPI ... vlan50Tenant/geneve br-ex br-int undercloud 10.0.0.0/24 Internal APINBDB/SBDB 172.17.0.0/24 TenantEncapsulation 172.16.0.0/24 Control Plane 192.24.0.0/24 virt host net 192.23.0.0/24 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/index.rst0000644000175000017500000000023600000000000023322 0ustar00coreycorey00000000000000.. ========================= OVN Install Documentation ========================= .. toctree:: :maxdepth: 1 manual_install.rst tripleo_install.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/manual_install.rst0000644000175000017500000002604500000000000025224 0ustar00coreycorey00000000000000.. _manual_install: ============================== Manual install & Configuration ============================== This document discusses what is required for manual installation or integration into a production OpenStack deployment tool of conventional architectures that include the following types of nodes: * Controller - Runs OpenStack control plane services such as REST APIs and databases. * Network - Runs the layer-2, layer-3 (routing), DHCP, and metadata agents for the Networking service. Some agents optional. Usually provides connectivity between provider (public) and project (private) networks via NAT and floating IP addresses. .. note:: Some tools deploy these services on controller nodes. * Compute - Runs the hypervisor and layer-2 agent for the Networking service. Packaging --------- Open vSwitch (OVS) includes OVN beginning with version 2.5 and considers it experimental. The Networking service integration for OVN is now one of the in-tree Neutron drivers so should be delivered with ``neutron`` package, but older versions of this integration were delivered with independent package, typically ``networking-ovn``. Building OVS from source automatically installs OVN. For deployment tools using distribution packages, the ``openvswitch-ovn`` package for RHEL/CentOS and compatible distributions automatically installs ``openvswitch`` as a dependency. Ubuntu/Debian includes ``ovn-central``, ``ovn-host``, ``ovn-docker``, and ``ovn-common`` packages that pull in the appropriate Open vSwitch dependencies as needed. A ``python-networking-ovn`` RPM may be obtained for Fedora or CentOS from the RDO project. A package based on the ``master`` branch of ``networking-ovn`` can be found at https://trunk.rdoproject.org/. Fedora and CentOS RPM builds of OVS and OVN from the ``master`` branch of ``ovs`` can be found in this COPR repository: https://copr.fedorainfracloud.org/coprs/leifmadsen/ovs-master/. Controller nodes ---------------- Each controller node runs the OVS service (including dependent services such as ``ovsdb-server``) and the ``ovn-northd`` service. However, only a single instance of the ``ovsdb-server`` and ``ovn-northd`` services can operate in a deployment. However, deployment tools can implement active/passive high-availability using a management tool that monitors service health and automatically starts these services on another node after failure of the primary node. See the :doc:`/ovn/faq/index` for more information. #. Install the ``openvswitch-ovn`` and ``networking-ovn`` packages. #. Start the OVS service. The central OVS service starts the ``ovsdb-server`` service that manages OVN databases. Using the *systemd* unit: .. code-block:: console # systemctl start openvswitch Using the ``ovs-ctl`` script: .. code-block:: console # /usr/share/openvswitch/scripts/ovs-ctl start --system-id="random" #. Configure the ``ovsdb-server`` component. By default, the ``ovsdb-server`` service only permits local access to databases via Unix socket. However, OVN services on compute nodes require access to these databases. * Permit remote database access. .. code-block:: console # ovn-nbctl set-connection ptcp:6641:0.0.0.0 -- \ set connection . inactivity_probe=60000 # ovn-sbctl set-connection ptcp:6642:0.0.0.0 -- \ set connection . inactivity_probe=60000 # if using the VTEP functionality: # ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6640:0.0.0.0 Replace ``0.0.0.0`` with the IP address of the management network interface on the controller node to avoid listening on all interfaces. .. note:: Permit remote access to TCP ports: 6640 (OVS) to VTEPS (if you use vteps), 6642 (SBDB) to hosts running neutron-server, gateway nodes that run ovn-controller, and compute node services like ovn-controller an ovn-metadata-agent. 6641 (NBDB) to hosts running neutron-server. #. Start the ``ovn-northd`` service. Using the *systemd* unit: .. code-block:: console # systemctl start ovn-northd Using the ``ovn-ctl`` script: .. code-block:: console # /usr/share/openvswitch/scripts/ovn-ctl start_northd Options for *start_northd*: .. code-block:: console # /usr/share/openvswitch/scripts/ovn-ctl start_northd --help # ... # DB_NB_SOCK="/usr/local/etc/openvswitch/nb_db.sock" # DB_NB_PID="/usr/local/etc/openvswitch/ovnnb_db.pid" # DB_SB_SOCK="usr/local/etc/openvswitch/sb_db.sock" # DB_SB_PID="/usr/local/etc/openvswitch/ovnsb_db.pid" # ... #. Configure the Networking server component. The Networking service implements OVN as an ML2 driver. Edit the ``/etc/neutron/neutron.conf`` file: * Enable the ML2 core plug-in. .. code-block:: ini [DEFAULT] ... core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin * Enable the OVN layer-3 service. .. code-block:: ini [DEFAULT] ... service_plugins = networking_ovn.l3.l3_ovn.OVNL3RouterPlugin #. Configure the ML2 plug-in. Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file: * Configure the OVN mechanism driver, network type drivers, self-service (tenant) network types, and enable the port security extension. .. code-block:: ini [ml2] ... mechanism_drivers = ovn type_drivers = local,flat,vlan,geneve tenant_network_types = geneve extension_drivers = port_security overlay_ip_version = 4 .. note:: To enable VLAN self-service networks, make sure that OVN version 2.11 (or higher) is used, then add ``vlan`` to the ``tenant_network_types`` option. The first network type in the list becomes the default self-service network type. To use IPv6 for all overlay (tunnel) network endpoints, set the ``overlay_ip_version`` option to ``6``. * Configure the Geneve ID range and maximum header size. The IP version overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) is added to the maximum header size based on the ML2 ``overlay_ip_version`` option. .. code-block:: ini [ml2_type_geneve] ... vni_ranges = 1:65536 max_header_size = 38 .. note:: The Networking service uses the ``vni_ranges`` option to allocate network segments. However, OVN ignores the actual values. Thus, the ID range only determines the quantity of Geneve networks in the environment. For example, a range of ``5001:6000`` defines a maximum of 1000 Geneve networks. * Optionally, enable support for VLAN provider and self-service networks on one or more physical networks. If you specify only the physical network, only administrative (privileged) users can manage VLAN networks. Additionally specifying a VLAN ID range for a physical network enables regular (non-privileged) users to manage VLAN networks. The Networking service allocates the VLAN ID for each self-service network using the VLAN ID range for the physical network. .. code-block:: ini [ml2_type_vlan] ... network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID Replace ``PHYSICAL_NETWORK`` with the physical network name and optionally define the minimum and maximum VLAN IDs. Use a comma to separate each physical network. For example, to enable support for administrative VLAN networks on the ``physnet1`` network and self-service VLAN networks on the ``physnet2`` network using VLAN IDs 1001 to 2000: .. code-block:: ini network_vlan_ranges = physnet1,physnet2:1001:2000 * Enable security groups. .. code-block:: ini [securitygroup] ... enable_security_group = true .. note:: The ``firewall_driver`` option under ``[securitygroup]`` is ignored since the OVN ML2 driver itself handles security groups. * Configure OVS database access and L3 scheduler .. code-block:: ini [ovn] ... ovn_nb_connection = tcp:IP_ADDRESS:6641 ovn_sb_connection = tcp:IP_ADDRESS:6642 ovn_l3_scheduler = OVN_L3_SCHEDULER .. note:: Replace ``IP_ADDRESS`` with the IP address of the controller node that runs the ``ovsdb-server`` service. Replace ``OVN_L3_SCHEDULER`` with ``leastloaded`` if you want the scheduler to select a compute node with the least number of gateway ports or ``chance`` if you want the scheduler to randomly select a compute node from the available list of compute nodes. * Set ovn-cms-options with enable-chassis-as-gw in Open_vSwitch table's external_ids column. Then if this chassis has proper bridge mappings, it will be selected for scheduling gateway routers. .. code-block:: console # ovs-vsctl set open . external-ids:ovn-cms-options=enable-chassis-as-gw #. Start the ``neutron-server`` service. Network nodes ------------- Deployments using OVN native layer-3 and DHCP services do not require conventional network nodes because connectivity to external networks (including VTEP gateways) and routing occurs on compute nodes. Compute nodes ------------- Each compute node runs the OVS and ``ovn-controller`` services. The ``ovn-controller`` service replaces the conventional OVS layer-2 agent. #. Install the ``openvswitch-ovn`` and ``networking-ovn`` packages. #. Start the OVS service. Using the *systemd* unit: .. code-block:: console # systemctl start openvswitch Using the ``ovs-ctl`` script: .. code-block:: console # /usr/share/openvswitch/scripts/ovs-ctl start --system-id="random" #. Configure the OVS service. * Use OVS databases on the controller node. .. code-block:: console # ovs-vsctl set open . external-ids:ovn-remote=tcp:IP_ADDRESS:6642 Replace ``IP_ADDRESS`` with the IP address of the controller node that runs the ``ovsdb-server`` service. * Enable one or more overlay network protocols. At a minimum, OVN requires enabling the ``geneve`` protocol. Deployments using VTEP gateways should also enable the ``vxlan`` protocol. .. code-block:: console # ovs-vsctl set open . external-ids:ovn-encap-type=geneve,vxlan .. note:: Deployments without VTEP gateways can safely enable both protocols. * Configure the overlay network local endpoint IP address. .. code-block:: console # ovs-vsctl set open . external-ids:ovn-encap-ip=IP_ADDRESS Replace ``IP_ADDRESS`` with the IP address of the overlay network interface on the compute node. #. Start the ``ovn-controller`` service. Using the *systemd* unit: .. code-block:: console # systemctl start ovn-controller Using the ``ovn-ctl`` script: .. code-block:: console # /usr/share/openvswitch/scripts/ovn-ctl start_controller Verify operation ---------------- #. Each compute node should contain an ``ovn-controller`` instance. .. code-block:: console # ovn-sbctl show ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/ovn/tripleo_install.rst0000644000175000017500000002430600000000000025423 0ustar00coreycorey00000000000000.. _tripleo_install: ============================= TripleO/RDO based deployments ============================= `TripleO `_ is a project aimed at installing, upgrading and operating OpenStack clouds using OpenStack's own cloud facilities as the foundation. `RDO `_ is the OpenStack distribution that runs on top of CentOS, and can be deployed via TripleO. `TripleO Quickstart`_ is an easy way to try out TripleO in a libvirt virtualized environment. In this document we will stick to the details of installing a 3 controller + 1 compute in high availability through TripleO Quickstart, but the non-quickstart details in this document also work with TripleO. .. _`TripleO Quickstart`: https://github.com/openstack/tripleo-quickstart/blob/master/README.rst .. note:: This deployment requires 32GB for the VMs, so your host may have >32GB of RAM at least. If you have 32GB I recommend to trim down the compute node memory in "config/nodes/3ctlr_1comp.yml" to 2GB and controller nodes to 5GB. Deployment steps ================ #. Download the quickstart.sh script with curl: .. code-block:: console $ curl -O https://raw.githubusercontent.com/openstack/tripleo-quickstart/master/quickstart.sh #. Install the necessary dependencies by running: .. code-block:: console $ bash quickstart.sh --install-deps #. Clone the tripleo-quickstart and neutron repositories: .. code-block:: console $ git clone https://opendev.org/openstack/tripleo-quickstart $ git clone https://opendev.org/openstack/neutron #. Once you're done, run quickstart as follows (3 controller HA + 1 compute): .. code-block:: console # Exporting the tags is a workaround until the bug # https://bugs.launchpad.net/tripleo/+bug/1737602 is resolved $ export ansible_tags="untagged,provision,environment,libvirt,\ undercloud-scripts,undercloud-inventory,overcloud-scripts,\ undercloud-setup,undercloud-install,undercloud-post-install,\ overcloud-prep-config" $ bash ./quickstart.sh --tags $ansible_tags --teardown all \ --release master-tripleo-ci \ --nodes tripleo-quickstart/config/nodes/3ctlr_1comp.yml \ --config neutron/tools/tripleo/ovn.yml \ $VIRTHOST .. note:: When deploying directly on ``localhost`` use the loopback address 127.0.0.2 as your $VIRTHOST. The loopback address 127.0.0.1 is reserved by ansible. Also make sure that 127.0.0.2 is accessible via public keys:: $ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys .. note:: You can adjust RAM/VCPUs if you want by editing *config/nodes/3ctlr_1comp.yml* before running the above command. If you have enough memory stick to the defaults. We recommend using 8GB of RAM for the controller nodes. #. When quickstart has finished you will have 5 VMs ready to be used, 1 for the undercloud (TripleO's node to deploy your openstack from), 3 VMs for controller nodes and 1 VM for the compute node. #. Log in into the undercloud: .. code-block:: console $ ssh -F ~/.quickstart/ssh.config.ansible undercloud #. Prepare overcloud container images: .. code-block:: console [stack@undercloud ~]$ ./overcloud-prep-containers.sh #. Run inside the undercloud: .. code-block:: console [stack@undercloud ~]$ ./overcloud-deploy.sh #. Grab a coffee, that may take around 1 hour (depending on your hardware). #. If anything goes wrong, go to IRC on freenode, and ask on #oooq Description of the environment ============================== Once deployed, inside the undercloud root directory two files are present: stackrc and overcloudrc, which will let you connect to the APIs of the undercloud (managing the openstack node), and to the overcloud (where your instances would live). We can find out the existing controller/computes this way: .. code-block:: console [stack@undercloud ~]$ source stackrc (undercloud) [stack@undercloud ~]$ openstack server list -c Name -c Networks -c Flavor +-------------------------+------------------------+--------------+ | Name | Networks | Flavor | +-------------------------+------------------------+--------------+ | overcloud-controller-1 | ctlplane=192.168.24.16 | oooq_control | | overcloud-controller-0 | ctlplane=192.168.24.14 | oooq_control | | overcloud-controller-2 | ctlplane=192.168.24.12 | oooq_control | | overcloud-novacompute-0 | ctlplane=192.168.24.13 | oooq_compute | +-------------------------+------------------------+--------------+ Network architecture of the environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. image:: figures/tripleo-ovn-arch.png :alt: TripleO Quickstart single NIC with vlans :align: center Connecting to one of the nodes via ssh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We can connect to the IP address in the `openstack server list` we showed before. .. code-block:: console (undercloud) [stack@undercloud ~]$ ssh heat-admin@192.168.24.16 Last login: Wed Feb 21 14:11:40 2018 from 192.168.24.1 [heat-admin@overcloud-controller-1 ~]$ ps fax | grep ovn-controller 20422 ? S cirros-0.4.0-x86_64-disk.img openstack image create "cirros" --file cirros-0.4.0-x86_64-disk.img \ --disk-format qcow2 --container-format bare --public openstack network create public --provider-physical-network datacentre \ --provider-network-type vlan \ --provider-segment 10 \ --external --share openstack subnet create --network public public --subnet-range 10.0.0.0/24 \ --allocation-pool start=10.0.0.20,end=10.0.0.250 \ --dns-nameserver 8.8.8.8 --gateway 10.0.0.1 \ --no-dhcp openstack network create private openstack subnet create --network private private \ --subnet-range 192.168.99.0/24 openstack router create router1 openstack router set --external-gateway public router1 openstack router add subnet router1 private openstack security group create test openstack security group rule create --ingress --protocol tcp \ --dst-port 22 test openstack security group rule create --ingress --protocol icmp test openstack security group rule create --egress test openstack flavor create m1.tiny --disk 1 --vcpus 1 --ram 64 PRIV_NET=$(openstack network show private -c id -f value) openstack server create --flavor m1.tiny --image cirros \ --nic net-id=$PRIV_NET --security-group test \ --wait cirros openstack floating ip create --floating-ip-address 10.0.0.130 public openstack server add floating ip cirros 10.0.0.130 .. note:: You can now log in into the instance if you want. In a CirrOS >0.4.0 image, the login account is cirros. The password is *gocubsgo*. .. code-block:: console (overcloud) [stack@undercloud ~]$ ssh cirros@10.0.0.130 cirros@10.0.0.130's password: $ ip a | grep eth0 -A 10 2: eth0: mtu 1442 qdisc pfifo_fast qlen 1000 link/ether fa:16:3e:85:b4:66 brd ff:ff:ff:ff:ff:ff inet 192.168.99.5/24 brd 192.168.99.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::f816:3eff:fe85:b466/64 scope link valid_lft forever preferred_lft forever $ ping 10.0.0.1 PING 10.0.0.1 (10.0.0.1): 56 data bytes 64 bytes from 10.0.0.1: seq=0 ttl=63 time=2.145 ms 64 bytes from 10.0.0.1: seq=1 ttl=63 time=1.025 ms 64 bytes from 10.0.0.1: seq=2 ttl=63 time=0.836 ms ^C --- 10.0.0.1 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.836/1.335/2.145 ms $ ping 8.8.8.8 PING 8.8.8.8 (8.8.8.8): 56 data bytes 64 bytes from 8.8.8.8: seq=0 ttl=52 time=3.943 ms 64 bytes from 8.8.8.8: seq=1 ttl=52 time=4.519 ms 64 bytes from 8.8.8.8: seq=2 ttl=52 time=3.778 ms $ curl http://169.254.169.254/2009-04-04/meta-data/instance-id i-00000002 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/doc/source/install/shared/0000755000175000017500000000000000000000000022124 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/shared/edit_hosts_file.txt0000644000175000017500000000136600000000000026037 0ustar00coreycorey00000000000000Edit the ``/etc/hosts`` file to contain the following: .. path /etc/hosts .. code-block:: none # controller 10.0.0.11 controller # compute1 10.0.0.31 compute1 # block1 10.0.0.41 block1 # object1 10.0.0.51 object1 # object2 10.0.0.52 object2 .. end .. warning:: Some distributions add an extraneous entry in the ``/etc/hosts`` file that resolves the actual hostname to another loopback IP address such as ``127.0.1.1``. You must comment out or remove this entry to prevent name resolution problems. **Do not remove the 127.0.0.1 entry.** .. note:: This guide includes host entries for optional services in order to reduce complexity should you choose to deploy them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/shared/note_configuration_vary_by_distribution.rst0000644000175000017500000000046600000000000033112 0ustar00coreycorey00000000000000.. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/verify-option1.rst0000644000175000017500000000270300000000000024305 0ustar00coreycorey00000000000000Networking Option 1: Provider networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * List agents to verify successful launch of the neutron agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | 0400c2f6-4d3b-44bc-89fa-99093432f3bf | Metadata agent | controller | None | True | UP | neutron-metadata-agent | | 83cf853d-a2f2-450a-99d7-e9c6fc08f4c3 | DHCP agent | controller | nova | True | UP | neutron-dhcp-agent | | ec302e51-6101-43cf-9f19-88a78613cbee | Linux bridge agent | compute | None | True | UP | neutron-linuxbridge-agent | | fcb9bc6e-22b1-43bc-9054-272dd517d025 | Linux bridge agent | controller | None | True | UP | neutron-linuxbridge-agent | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ .. end The output should indicate three agents on the controller node and one agent on each compute node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/verify-option2.rst0000644000175000017500000000313200000000000024303 0ustar00coreycorey00000000000000Networking Option 2: Self-service networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * List agents to verify successful launch of the neutron agents: .. code-block:: console $ openstack network agent list +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ | f49a4b81-afd6-4b3d-b923-66c8f0517099 | Metadata agent | controller | None | True | UP | neutron-metadata-agent | | 27eee952-a748-467b-bf71-941e89846a92 | Linux bridge agent | controller | None | True | UP | neutron-linuxbridge-agent | | 08905043-5010-4b87-bba5-aedb1956e27a | Linux bridge agent | compute1 | None | True | UP | neutron-linuxbridge-agent | | 830344ff-dc36-4956-84f4-067af667a0dc | L3 agent | controller | nova | True | UP | neutron-l3-agent | | dd3644c9-1a3a-435a-9282-eb306b4b0391 | DHCP agent | controller | nova | True | UP | neutron-dhcp-agent | +--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+ .. end The output should indicate four agents on the controller node and one agent on each compute node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/install/verify.rst0000644000175000017500000002065700000000000022726 0ustar00coreycorey00000000000000Verify operation ~~~~~~~~~~~~~~~~ .. note:: Perform these commands on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. List loaded extensions to verify successful launch of the ``neutron-server`` process: .. code-block:: console $ openstack extension list --network +---------------------------+---------------------------+----------------------------+ | Name | Alias | Description | +---------------------------+---------------------------+----------------------------+ | Default Subnetpools | default-subnetpools | Provides ability to mark | | | | and use a subnetpool as | | | | the default | | Availability Zone | availability_zone | The availability zone | | | | extension. | | Network Availability Zone | network_availability_zone | Availability zone support | | | | for network. | | Port Binding | binding | Expose port bindings of a | | | | virtual port to external | | | | application | | agent | agent | The agent management | | | | extension. | | Subnet Allocation | subnet_allocation | Enables allocation of | | | | subnets from a subnet pool | | DHCP Agent Scheduler | dhcp_agent_scheduler | Schedule networks among | | | | dhcp agents | | Neutron external network | external-net | Adds external network | | | | attribute to network | | | | resource. | | Neutron Service Flavors | flavors | Flavor specification for | | | | Neutron advanced services | | Network MTU | net-mtu | Provides MTU attribute for | | | | a network resource. | | Network IP Availability | network-ip-availability | Provides IP availability | | | | data for each network and | | | | subnet. | | Quota management support | quotas | Expose functions for | | | | quotas management per | | | | tenant | | Provider Network | provider | Expose mapping of virtual | | | | networks to physical | | | | networks | | Multi Provider Network | multi-provider | Expose mapping of virtual | | | | networks to multiple | | | | physical networks | | Address scope | address-scope | Address scopes extension. | | Subnet service types | subnet-service-types | Provides ability to set | | | | the subnet service_types | | | | field | | Resource timestamps | standard-attr-timestamp | Adds created_at and | | | | updated_at fields to all | | | | Neutron resources that | | | | have Neutron standard | | | | attributes. | | Neutron Service Type | service-type | API for retrieving service | | Management | | providers for Neutron | | | | advanced services | | resources: subnet, | | more L2 and L3 resources. | | subnetpool, port, router | | | | Neutron Extra DHCP opts | extra_dhcp_opt | Extra options | | | | configuration for DHCP. | | | | For example PXE boot | | | | options to DHCP clients | | | | can be specified (e.g. | | | | tftp-server, server-ip- | | | | address, bootfile-name) | | Resource revision numbers | standard-attr-revisions | This extension will | | | | display the revision | | | | number of neutron | | | | resources. | | Pagination support | pagination | Extension that indicates | | | | that pagination is | | | | enabled. | | Sorting support | sorting | Extension that indicates | | | | that sorting is enabled. | | security-group | security-group | The security groups | | | | extension. | | RBAC Policies | rbac-policies | Allows creation and | | | | modification of policies | | | | that control tenant access | | | | to resources. | | standard-attr-description | standard-attr-description | Extension to add | | | | descriptions to standard | | | | attributes | | Port Security | port-security | Provides port security | | Allowed Address Pairs | allowed-address-pairs | Provides allowed address | | | | pairs | | project_id field enabled | project-id | Extension that indicates | | | | that project_id field is | | | | enabled. | +---------------------------+---------------------------+----------------------------+ .. end .. note:: Actual output may differ slightly from this example. You can perform further testing of your networking using the `neutron-sanity-check command line client `_. Use the verification section for the networking option that you chose to deploy. .. toctree:: verify-option1.rst verify-option2.rst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/doc/source/ovn/0000755000175000017500000000000000000000000020012 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/doc/source/ovn/faq/0000755000175000017500000000000000000000000020561 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/ovn/faq/index.rst0000644000175000017500000001431700000000000022430 0ustar00coreycorey00000000000000.. _ovn_faq: ========================== Frequently Asked Questions ========================== **Q: What are the key differences between ML2/ovs and ML2/ovn?** +---------------+---------------------------+--------------------------------+ | Detail | ml2/ovs | ml2/ovn | +===============+===========================+================================+ | agent/server | rabbit mq messaging + RPC.| ovsdb protocol on the | | communication | | NorthBound and SouthBound | | | | databases. | +---------------+---------------------------+--------------------------------+ | l3ha | routers expose an "ha" | routers don't expose an "ha" | | API | field that can be disabled| field, and will make use of HA | | | or enabled by admin with a| as soon as there is more than | | | deployment default. | one network node available. | +---------------+---------------------------+--------------------------------+ | l3ha | qrouter namespace with | ovn-controller configures | | dataplane | keepalive process and an | specific OpenFlow rules, and | | | internal ha network for | enables BFD protocol over | | | VRRP traffic. | tunnel endpoints to detect | | | | connectivity issues to nodes. | +---------------+---------------------------+--------------------------------+ | DVR | exposes the "distributed" | no "distributed" flag is shown | | API | flag on routers only | or available on routers via | | | modifiable by admin. | API. | +---------------+---------------------------+--------------------------------+ | DVR | uses namespaces, veths, | Uses OpenFlow rules on the | | dataplane | ip routing, ip rules and | compute nodes. | | | iptables on the compute | | | | nodes. | | +---------------+---------------------------+--------------------------------+ | E/W traffic | goes through network nodes| completely distributed in | | | when the router is not | all cases. | | | distributed (DVR). | | +---------------+---------------------------+--------------------------------+ | Metadata | Metadata service is | Metadata is completely | | Service | provided by the qrouters | distributed across compute | | | or dhcp namespaces in the | nodes, and served from the | | | network nodes. | ovnmeta-xxxxx-xxxx namespace. | +---------------+---------------------------+--------------------------------+ | DHCP | DHCP is provided via | DHCP is provided by OpenFlow | | Service | qdhcp-xxxxx-xxx namespaces| and ovn-controller, being | | | which run dnsmasq inside. | distributed across computes. | +---------------+---------------------------+--------------------------------+ | Trunk | Trunk ports are built | Trunk ports live in br-int | | Ports | by creating br-trunk-xxx | as OpenFlow rules, while | | | bridges and patch ports. | subports are directly attached | | | | to br-int. | +---------------+---------------------------+--------------------------------+ **Q: Why can't I use the distributed or ha flags of routers?** Networking OVN implements HA and distributed in a transparent way for the administrator and users. HA will be automatically used on routers as soon as more than two gateway nodes are detected. And distributed floating IPs will be used as soon as it's configured (see next question). **Q: Does OVN support DVR or distributed L3 routing?** Yes, it's controlled by a single flag in configuration. DVR will be used for floating IPs if the ovn / enable_distributed_floating_ip flag is configured to True in the neutron server configuration, being a deployment wide setting. In contrast to ML2/ovs which was able to specify this setting per router (only admin). Although ovn driver does not expose the "distributed" flag of routers throught the API. **Q: Does OVN support integration with physical switches?** OVN currently integrates with physical switches by optionally using them as VTEP gateways from logical to physical networks and via integrations provided by the Neutron ML2 framework, hierarchical port binding. **Q: What's the status of HA for ovn driver and OVN?** Typically, multiple copies of neutron-server are run across multiple servers and uses a load balancer. The neutron ML2 mechanism driver provided by ovn driver supports this deployment model. DHCP and metadata services are distributed across compute nodes, and don't depend on the network nodes. The network controller portion of OVN is distributed - an instance of the ovn-controller service runs on every hypervisor. OVN also includes some central components for control purposes. ovn-northd is a centralized service that does some translation between the northbound and southbound databases in OVN. Currently, you only run this service once. You can manage it in an active/passive HA mode using something like Pacemaker. The OVN project plans to allow this service to be horizontally scaled both for scaling and HA reasons. This will allow it to be run in an active/active HA mode. OVN also makes use of ovsdb-server for the OVN northbound and southbound databases. ovsdb-server supports active/passive HA using replication. For more information, see: http://docs.openvswitch.org/en/latest/topics/ovsdb-replication/ A typical deployment would use something like Pacemaker to manage the active/passive HA process. Clients would be pointed at a virtual IP address. When the HA manager detects a failure of the master, the virtual IP would be moved and the passive replica would become the new master. See :doc:`/admin/ovn/ovn` for links to more details on OVN's architecture. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/ovn/gaps.rst0000644000175000017500000000522600000000000021503 0ustar00coreycorey00000000000000.. _ovn_gaps: Gaps from ML2/OVS ================= This is a list of some of the currently known gaps between ML2/OVS and OVN. It is not a complete list, but is enough to be used as a starting point for implementors working on closing these gaps. A TODO list for OVN is located at [1]_. * Port forwarding Currently ML2/OVS supports Port Forwarding in the North/South plane. Specific L4 Ports of the Floating IP can be directed to a specific FixedIP:PortNumber of a VM, so that different services running in a VM can be isolated, and can communicate with external networks easily. This is a relatively new extension, support would need to be added to OVN. One possible way would be to use the OVN native load balancing feature. An OVN load balancer is expressed in the OVN northbound load_balancer table. Normally the VIP and its members are expressed as [2]_: .. code-block:: console VIP:PORT = MEMBER1:MPORT1, MEMBER2:MPORT2 The same could be extended for port forwarding as: FIP:PORT = PRIVATE_IP:PRIV_PORT * Security Groups logging API Currently ML2/OVS, with the OpenvSwitch firewall, supports a log file where security groups events are logged to be consumed by a security entity. This allows users to have a way to check if an instance is trying to execute restricted operations, or access restricted ports in remote servers. This is a relatively new extension, support would need to be added to OVN. * QoS DSCP support Currently ML2/OVS supports QoS DSCP tagging and egress bandwidth limiting. Those are basic QoS features that while integrated in the OVS/OVN C core are not integrated (or fully tested) in the neutron OVN mechanism driver. * QoS for Layer 3 IPs Currently the Neutron L3-agent supports floating IP and gateway IP bandwidth limiting based on Linux TC. Networking-ovn L3 had a prototype implementation [3]_ based on the meter of openvswitch [4]_ utility that has been abandoned. This is supported in user space datapath only, or kernel versions 4.15+ [5]_. * QoS Minimum Bandwidth support Currently ML2/OVS supports QoS Minimum Bandwidth limiting, but it is not supported in OVN. * BGP support Currently ML2/OVS supports making a tenant subnet routable via BGP, and can announce host routes for both floating and fixed IP addresses. References ---------- .. [1] https://github.com/ovn-org/ovn/blob/master/TODO.rst .. [2] https://github.com/ovn-org/ovn/blob/master/ovn-nb.ovsschema#L160 .. [3] https://review.opendev.org/#/c/539826/ .. [4] https://github.com/openvswitch/ovs/commit/66d89287269ca7e2f7593af0920e910d7f9bcc38 .. [5] https://github.com/torvalds/linux/blob/master/net/openvswitch/meter.h ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/ovn/index.rst0000644000175000017500000000025700000000000021657 0ustar00coreycorey00000000000000.. meta:: :keywords: ovn, networking-ovn, OpenStack, neutron ========== OVN Driver ========== .. toctree:: :maxdepth: 1 migration.rst gaps.rst faq/index.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/ovn/migration.rst0000644000175000017500000003267400000000000022551 0ustar00coreycorey00000000000000.. _ovn_migration: Migration Strategy ================== This document details an in-place migration strategy from ML2/OVS to ML2/OVN in either ovs-firewall or ovs-hybrid mode for a TripleO OpenStack deployment. For non TripleO deployments, please refer to the file ``migration/README.rst`` and the ansible playbook ``migration/migrate-to-ovn.yml``. Overview -------- The migration process is orchestrated through the shell script ovn_migration.sh, which is provided with the OVN driver. The administrator uses ovn_migration.sh to perform readiness steps and migration from the undercloud node. The readiness steps, such as host inventory production, DHCP and MTU adjustments, prepare the environment for the procedure. Subsequent steps start the migration via Ansible. Plan for a 24-hour wait after the setup-mtu-t1 step to allow VMs to catch up with the new MTU size. The default neutron ML2/OVS configuration has a dhcp_lease_duration of 86400 seconds (24h). Also, if there are instances using static IP assignment, the administrator should be ready to update the MTU of those instances to the new value of 8 bytes less than the ML2/OVS (VXLAN) MTU value. For example, the typical 1500 MTU network value that makes VXLAN tenant networks use 1450 bytes of MTU will need to change to 1442 under Geneve. Or under the same overlay network, a GRE encapsulated tenant network would use a 1458 MTU, but again a 1442 MTU for Geneve. If there are instances which use DHCP but don't support lease update during the T1 period the administrator will need to reboot them to ensure that MTU is updated inside those instances. Steps for migration ------------------- Perform the following steps in the overcloud/undercloud ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Ensure that you have updated to the latest openstack/neutron version. Perform the following steps in the undercloud ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. Install python-networking-ovn-migration-tool. .. code-block:: console yum install python-networking-ovn-migration-tool 2. Create a working directory on the undercloud, and copy the ansible playbooks .. code-block:: console mkdir ~/ovn_migration cd ~/ovn_migration cp -rfp /usr/share/ansible/networking-ovn-migration/playbooks . 3. Create ``~/overcloud-deploy-ovn.sh`` script in your ``$HOME``. This script must source your stackrc file, and then execute an ``openstack overcloud deploy`` with your original deployment parameters, plus the following environment files, added to the end of the command in the following order: When your network topology is DVR and your compute nodes have connectivity to the external network: .. code-block:: console -e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-dvr-ha.yaml \ -e $HOME/ovn-extras.yaml When your compute nodes don't have external connectivity and you don't use DVR: .. code-block:: console -e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-ha.yaml \ -e $HOME/ovn-extras.yaml Make sure that all users have execution privileges on the script, because it will be called by ovn_migration.sh/ansible during the migration process. .. code-block:: console $ chmod a+x ~/overcloud-deploy-ovn.sh 4. To configure the parameters of your migration you can set the environment variables that will be used by ``ovn_migration.sh``. You can skip setting any values matching the defaults. * STACKRC_FILE - must point to your stackrc file in your undercloud. Default: ~/stackrc * OVERCLOUDRC_FILE - must point to your overcloudrc file in your undercloud. Default: ~/overcloudrc * OVERCLOUD_OVN_DEPLOY_SCRIPT - must point to the script described in step 1. Default: ~/overcloud-deploy-ovn.sh * PUBLIC_NETWORK_NAME - Name of your public network. Default: 'public'. To support migration validation, this network must have available floating IPs, and those floating IPs must be pingable from the undercloud. If that's not possible please configure VALIDATE_MIGRATION to False. * IMAGE_NAME - Name/ID of the glance image to us for booting a test server. Default:'cirros'. If the image does not exist it will automatically download and use cirros during the pre-validation / post-validation process. * VALIDATE_MIGRATION - Create migration resources to validate the migration. The migration script, before starting the migration, boot a server and validates that the server is reachable after the migration. Default: True. * SERVER_USER_NAME - User name to use for logging into the migration instances. Default: 'cirros'. * DHCP_RENEWAL_TIME - DHCP renewal time in seconds to configure in DHCP agent configuration file. This renewal time is used only temporarily during migration to ensure a synchronized MTU switch across the networks. Default: 30 .. warning:: Please note that VALIDATE_MIGRATION requires enough quota (2 available floating ips, 2 networks, 2 subnets, 2 instances, and 2 routers as admin). For example: .. code-block:: console $ export PUBLIC_NETWORK_NAME=my-public-network $ ovn_migration.sh ......... 5. Run ``ovn_migration.sh generate-inventory`` to generate the inventory file - ``hosts_for_migration`` and ``ansible.cfg``. Please review ``hosts_for_migration`` for correctness. .. code-block:: console $ ovn_migration.sh generate-inventory At this step the script will inspect the TripleO ansible inventory and generate an inventory of hosts, specifically tagged to work with the migration playbooks. 6. Run ``ovn_migration.sh setup-mtu-t1`` .. code-block:: console $ ovn_migration.sh setup-mtu-t1 This lowers the T1 parameter of the internal neutron DHCP servers configuring the ``dhcp_renewal_time`` in /var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini in all the nodes where DHCP agent is running. We lower the T1 parameter to make sure that the instances start refreshing the DHCP lease quicker (every 30 seconds by default) during the migration proccess. The reason why we force this is to make sure that the MTU update happens quickly across the network during step 8, this is very important because during those 30 seconds there will be connectivity issues with bigger packets (MTU missmatchess across the network), this is also why step 7 is very important, even though we reduce T1, the previous T1 value the instances leased from the DHCP server will be much higher (24h by default) and we need to wait those 24h to make sure they have updated T1. After migration the DHCP T1 parameter returns to normal values. 7. If you are using VXLAN or GRE tenant networking, ``wait at least 24 hours`` before continuing. This will allow VMs to catch up with the new MTU size of the next step. .. warning:: If you are using VXLAN or GRE networks, this 24-hour wait step is critical. If you are using VLAN tenant networks you can proceed to the next step without delay. .. warning:: If you have any instance with static IP assignment on VXLAN or GRE tenant networks, you must manually modify the configuration of those instances. If your instances don't honor the T1 parameter of DHCP they will need to be rebooted. to configure the new geneve MTU, which is the current VXLAN MTU minus 8 bytes. For instance, if the VXLAN-based MTU was 1450, change it to 1442. .. note:: 24 hours is the time based on default configuration. It actually depends on /var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini dhcp_renewal_time and /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf dhcp_lease_duration parameters. (defaults to 86400 seconds) .. note:: Please note that migrating a deployment which uses VLAN for tenant/project networks is not recommended at this time because of a bug in core ovn, full support is being worked out here: https://mail.openvswitch.org/pipermail/ovs-dev/2018-May/347594.html One way to verify that the T1 parameter has propagated to existing VMs is to connect to one of the compute nodes, and run ``tcpdump`` over one of the VM taps attached to a tenant network. If T1 propegation was a success, you should see that requests happen on an interval of approximately 30 seconds. .. code-block:: console [heat-admin@overcloud-novacompute-0 ~]$ sudo tcpdump -i tap52e872c2-e6 port 67 or port 68 -n tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on tap52e872c2-e6, link-type EN10MB (Ethernet), capture size 262144 bytes 13:17:28.954675 IP 192.168.99.5.bootpc > 192.168.99.3.bootps: BOOTP/DHCP, Request from fa:16:3e:6b:41:3d, length 300 13:17:28.961321 IP 192.168.99.3.bootps > 192.168.99.5.bootpc: BOOTP/DHCP, Reply, length 355 13:17:56.241156 IP 192.168.99.5.bootpc > 192.168.99.3.bootps: BOOTP/DHCP, Request from fa:16:3e:6b:41:3d, length 300 13:17:56.249899 IP 192.168.99.3.bootps > 192.168.99.5.bootpc: BOOTP/DHCP, Reply, length 355 .. note:: This verification is not possible with cirros VMs. The cirros udhcpc implementation does not obey DHCP option 58 (T1). Please try this verification on a port that belongs to a full linux VM. We recommend you to check all the different types of workloads your system runs (Windows, different flavors of linux, etc..). 8. Run ``ovn_migration.sh reduce-mtu``. This lowers the MTU of the pre migration VXLAN and GRE networks. The tool will ignore non-VXLAN/GRE networks, so if you use VLAN for tenant networks it will be fine if you find this step not doing anything. .. code-block:: console $ ovn_migration.sh reduce-mtu This step will go network by network reducing the MTU, and tagging with ``adapted_mtu`` the networks which have been already handled. Every time a network is updated all the existing L3/DHCP agents connected to such network will update their internal leg MTU, instances will start fetching the new MTU as the DHCP T1 timer expires. As explained before, instances not obeying the DHCP T1 parameter will need to be restarted, and instances with static IP assignment will need to be manually updated. 9. Make TripleO ``prepare the new container images`` for OVN. If your deployment didn't have a containers-prepare-parameter.yaml, you can create one with: .. code-block:: console $ test -f $HOME/containers-prepare-parameter.yaml || \ openstack tripleo container image prepare default \ --output-env-file $HOME/containers-prepare-parameter.yaml If you had to create the file, please make sure it's included at the end of your $HOME/overcloud-deploy-ovn.sh and $HOME/overcloud-deploy.sh Change the neutron_driver in the containers-prepare-parameter.yaml file to ovn: .. code-block:: console $ sed -i -E 's/neutron_driver:([ ]\w+)/neutron_driver: ovn/' $HOME/containers-prepare-parameter.yaml You can verify with: .. code-block:: console $ grep neutron_driver $HOME/containers-prepare-parameter.yaml neutron_driver: ovn Then update the images: .. code-block:: console $ openstack tripleo container image prepare \ --environment-file $HOME/containers-prepare-parameter.yaml .. note:: It's important to provide the full path to your containers-prepare-parameter.yaml otherwise the command will finish very quickly and won't work (current version doesn't seem to output any error). During this step TripleO will build a list of containers, pull them from the remote registry and push them to your deployment local registry. 10. Run ``ovn_migration.sh start-migration`` to kick start the migration process. .. code-block:: console $ ovn_migration.sh start-migration During this step, this is what will happen: * Create pre-migration resources (network and VM) to validate existing deployment and final migration. * Update the overcloud stack to deploy OVN alongside reference implementation services using a temporary bridge "br-migration" instead of br-int. * Start the migration process: 1. generate the OVN north db by running neutron-ovn-db-sync util 2. clone the existing resources from br-int to br-migration, so OVN can find the same resources UUIDS over br-migration 3. re-assign ovn-controller to br-int instead of br-migration 4. cleanup network namespaces (fip, snat, qrouter, qdhcp), 5. remove any unnecessary patch ports on br-int 6. remove br-tun and br-migration ovs bridges 7. delete qr-*, ha-* and qg-* ports from br-int (via neutron netns cleanup) * Delete neutron agents and neutron HA internal networks from the database via API. * Validate connectivity on pre-migration resources. * Delete pre-migration resources. * Create post-migration resources. * Validate connectivity on post-migration resources. * Cleanup post-migration resources. * Re-run deployment tool to update OVN on br-int, this step ensures that the TripleO database is updated with the final integration bridge. * Run an extra validation round to ensure the final state of the system is fully operational. Migration is complete !!! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/pdf-index.rst0000644000175000017500000000155200000000000021623 0ustar00coreycorey00000000000000:orphan: .. Copyright 2011- OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. include:: _intro.rst .. toctree:: :maxdepth: 2 install/index admin/index configuration/index cli/index ovn/index reference/rest-api feature_classification/index contributor/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/doc/source/reference/0000755000175000017500000000000000000000000021146 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/reference/rest-api.rst0000644000175000017500000000022200000000000023420 0ustar00coreycorey00000000000000============= API Reference ============= The reference of the OpenStack networking API is found at https://docs.openstack.org/api-ref/network/. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/etc/0000755000175000017500000000000000000000000015716 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/README.policy.yaml.txt0000644000175000017500000000020600000000000021651 0ustar00coreycorey00000000000000To generate the sample policy.yaml file, run the following command from the top level of the neutron directory: tox -e genpolicy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/README.txt0000644000175000017500000000046100000000000017415 0ustar00coreycorey00000000000000To generate the sample neutron configuration files, run the following command from the top level of the neutron directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/api-paste.ini0000644000175000017500000000271500000000000020307 0ustar00coreycorey00000000000000[composite:neutron] use = egg:Paste#urlmap /: neutronversions_composite /v2.0: neutronapi_v2_0 [composite:neutronapi_v2_0] use = call:neutron.auth:pipeline_factory noauth = cors http_proxy_to_wsgi request_id catch_errors osprofiler extensions neutronapiapp_v2_0 keystone = cors http_proxy_to_wsgi request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0 [composite:neutronversions_composite] use = call:neutron.auth:pipeline_factory noauth = cors http_proxy_to_wsgi neutronversions keystone = cors http_proxy_to_wsgi neutronversions [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:catch_errors] paste.filter_factory = oslo_middleware:CatchErrors.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = neutron [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [filter:keystonecontext] paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:extensions] paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory [app:neutronversions] paste.app_factory = neutron.pecan_wsgi.app:versions_factory [app:neutronapiapp_v2_0] paste.app_factory = neutron.api.v2.router:APIRouter.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9590416 neutron-16.0.0.0b2.dev214/etc/neutron/0000755000175000017500000000000000000000000017410 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9590416 neutron-16.0.0.0b2.dev214/etc/neutron/plugins/0000755000175000017500000000000000000000000021071 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1750436 neutron-16.0.0.0b2.dev214/etc/neutron/plugins/ml2/0000755000175000017500000000000000000000000021563 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/plugins/ml2/.placeholder0000644000175000017500000000000000000000000024034 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1790435 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/0000755000175000017500000000000000000000000021507 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/debug.filters0000644000175000017500000000115200000000000024166 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # This is needed because we should ping # from inside a namespace which requires root # _alt variants allow to match -c and -w in any order # (used by NeutronDebugAgent.ping_all) ping: CommandFilter, ping, root ping6: CommandFilter, ping6, root # "sleep" command, only for testing sleep: RegExpFilter, sleep, root, sleep, \d+ kill_sleep: KillFilter, root, sleep, -9 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/dhcp.filters0000644000175000017500000000215200000000000024017 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # dhcp-agent dnsmasq: CommandFilter, dnsmasq, root # dhcp-agent uses kill as well, that's handled by the generic KillFilter # it looks like these are the only signals needed, per # neutron/agent/linux/dhcp.py kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15 kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15 # dnsmasq kill script filter kill_dnsmasq_script: CommandFilter, dnsmasq-kill, root ovs-vsctl: CommandFilter, ovs-vsctl, root mm-ctl: CommandFilter, mm-ctl, root dhcp_release: CommandFilter, dhcp_release, root dhcp_release6: CommandFilter, dhcp_release6, root # haproxy haproxy: RegExpFilter, haproxy, root, haproxy, -f, .* kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/dibbler.filters0000644000175000017500000000141000000000000024500 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # Filters for the dibbler-based reference implementation of the pluggable # Prefix Delegation driver. Other implementations using an alternative agent # should include a similar filter in this folder. # prefix_delegation_agent dibbler-client: CommandFilter, dibbler-client, root kill_dibbler-client: KillFilter, root, dibbler-client, -9 # dibbler kill script filter kill_dibbler_script: CommandFilter, dibbler-kill, root # dibbler-client kill script filter kill_dibbler-client_script: CommandFilter, dibbler-client-kill, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/ebtables.filters0000644000175000017500000000044100000000000024661 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] ebtables: CommandFilter, ebtables, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/ipset-firewall.filters0000644000175000017500000000053400000000000026032 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # neutron/agent/linux/iptables_firewall.py # "ipset", "-A", ... ipset: CommandFilter, ipset, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/iptables-firewall.filters0000644000175000017500000000151600000000000026512 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # neutron/agent/linux/iptables_firewall.py # "iptables-save", ... iptables-save: CommandFilter, iptables-save, root iptables-restore: CommandFilter, iptables-restore, root ip6tables-save: CommandFilter, ip6tables-save, root ip6tables-restore: CommandFilter, ip6tables-restore, root # neutron/agent/linux/iptables_firewall.py # "iptables", "-A", ... iptables: CommandFilter, iptables, root ip6tables: CommandFilter, ip6tables, root # neutron/agent/linux/iptables_firewall.py sysctl: CommandFilter, sysctl, root # neutron/agent/linux/ip_conntrack.py conntrack: CommandFilter, conntrack, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/l3.filters0000644000175000017500000000671400000000000023427 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # arping arping: CommandFilter, arping, root # l3_agent sysctl: CommandFilter, sysctl, root route: CommandFilter, route, root radvd: CommandFilter, radvd, root # haproxy haproxy: RegExpFilter, haproxy, root, haproxy, -f, .* kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP # haproxy kill script filter kill_haproxy_script: CommandFilter, haproxy-kill, root kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root # l3_tc_lib l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1 l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32 l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, mtu, 64kb, drop, flowid, :1 l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, mtu, 64kb, drop, flowid, :1 # For ip monitor kill_ip_monitor: KillFilter, root, ip, -9 # ovs_lib (if OVSInterfaceDriver is used) ovs-vsctl: CommandFilter, ovs-vsctl, root # iptables_manager iptables-save: CommandFilter, iptables-save, root iptables-restore: CommandFilter, iptables-restore, root ip6tables-save: CommandFilter, ip6tables-save, root ip6tables-restore: CommandFilter, ip6tables-restore, root # Keepalived keepalived: CommandFilter, keepalived, root kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9 # keepalived kill script filter kill_keepalived_script: CommandFilter, keepalived-kill, root # l3 agent to delete floatingip's conntrack state conntrack: CommandFilter, conntrack, root # keepalived state change monitor keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root # The following filters are used to kill the keepalived state change monitor. # Since the monitor runs as a Python script, the system reports that the # command of the process to be killed is python. # TODO(mlavalle) These kill filters will be updated once we come up with a # mechanism to kill using the name of the script being executed by Python kill_keepalived_monitor_py: KillFilter, root, python, -15, -9 kill_keepalived_monitor_py3: KillFilter, root, python3, -15, -9 kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15, -9 kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15, -9 # For e.g. RHEL8 neutron-keepalived-state-change is run by "system python" # which is /usr/libexec/platform-python3.6 so this should be in filters also. # Path /usr/libexec isn't in PATH by default so it has to be given here as # absolute path kill_keepalived_monitor_platform_py: KillFilter, root, /usr/libexec/platform-python, -15, -9 kill_keepalived_monitor_platform_py36: KillFilter, root, /usr/libexec/platform-python3.6, -15, -9 # neutron-keepalived-state-change-monitor kill script filter kill_neutron-keepalived-state-change-monitor_script: CommandFilter, neutron-keepalived-state-change-monitor-kill, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/linuxbridge-plugin.filters0000644000175000017500000000117000000000000026710 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # linuxbridge-agent # unclear whether both variants are necessary, but I'm transliterating # from the old mechanism brctl: CommandFilter, brctl, root bridge: CommandFilter, bridge, root sysctl: CommandFilter, sysctl, root # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/netns-cleanup.filters0000644000175000017500000000045700000000000025663 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # netns-cleanup netstat: CommandFilter, netstat, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/openvswitch-plugin.filters0000644000175000017500000000151500000000000026750 0ustar00coreycorey00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # openvswitch-agent # unclear whether both variants are necessary, but I'm transliterating # from the old mechanism ovs-vsctl: CommandFilter, ovs-vsctl, root # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl ovs-ofctl: CommandFilter, ovs-ofctl, root kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 ovsdb-client: CommandFilter, ovsdb-client, root # ip_lib ip: IpFilter, ip, root find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* ip_exec: IpNetnsExecFilter, ip, root # needed for FDB extension bridge: CommandFilter, bridge, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/neutron/rootwrap.d/privsep.filters0000644000175000017500000000227100000000000024573 0ustar00coreycorey00000000000000# Command filters to allow privsep daemon to be started via rootwrap. # # This file should be owned by (and only-writeable by) the root user [Filters] # By installing the following, the local admin is asserting that: # # 1. The python module load path used by privsep-helper # command as root (as started by sudo/rootwrap) is trusted. # 2. Any oslo.config files matching the --config-file # arguments below are trusted. # 3. Users allowed to run sudo/rootwrap with this configuration(*) are # also allowed to invoke python "entrypoint" functions from # --privsep_context with the additional (possibly root) privileges # configured for that context. # # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root # # In particular, the oslo.config and python module path must not # be writeable by the unprivileged user. # oslo.privsep default neutron context privsep: PathFilter, privsep-helper, root, --config-file, /etc/(?!\.\.).*, --privsep_context, neutron.privileged.default, --privsep_sock_path, / # NOTE: A second `--config-file` arg can also be added above. Since # many neutron components are installed like that (eg: by devstack). # Adjust to suit local requirements. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1790435 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/0000755000175000017500000000000000000000000022121 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/dhcp_agent.ini0000644000175000017500000000026300000000000024717 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/dhcp_agent.ini.sample wrap_width = 79 namespace = neutron.az.agent namespace = neutron.base.agent namespace = neutron.dhcp.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/l3_agent.ini0000644000175000017500000000025700000000000024322 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/l3_agent.ini.sample wrap_width = 79 namespace = neutron.az.agent namespace = neutron.base.agent namespace = neutron.l3.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/linuxbridge_agent.ini0000644000175000017500000000023500000000000026314 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/linuxbridge_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.linuxbridge.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/macvtap_agent.ini0000644000175000017500000000022500000000000025432 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/macvtap_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.macvtap.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/metadata_agent.ini0000644000175000017500000000022500000000000025557 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/metadata_agent.ini.sample wrap_width = 79 namespace = neutron.metadata.agent namespace = oslo.log namespace = oslo.cache././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/metering_agent.ini0000644000175000017500000000023600000000000025613 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/metering_agent.ini.sample wrap_width = 79 namespace = neutron.base.agent namespace = neutron.metering.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/ml2_conf.ini0000644000175000017500000000020200000000000024313 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/ml2_conf.ini.sample wrap_width = 79 namespace = neutron.ml2 namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/neutron.conf0000644000175000017500000000103700000000000024463 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron.conf.sample wrap_width = 79 namespace = neutron namespace = neutron.agent namespace = neutron.db namespace = neutron.extensions namespace = nova.auth namespace = ironic.auth namespace = oslo.log namespace = oslo.db namespace = oslo.policy namespace = oslo.privsep namespace = oslo.concurrency namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.service.sslutils namespace = oslo.service.wsgi namespace = keystonemiddleware.auth_token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/neutron_ovn_metadata_agent.ini0000644000175000017500000000021700000000000030214 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron_ovn_metadata_agent.ini.sample wrap_width = 79 namespace = neutron.ovn.metadata.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/openvswitch_agent.ini0000644000175000017500000000026400000000000026353 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/openvswitch_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.ovs.agent namespace = neutron.ml2.xenapi namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/ovn.ini0000644000175000017500000000016500000000000023426 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/ovn.ini.sample wrap_width = 79 namespace = neutron.ml2.ovn namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-config-generator/sriov_agent.ini0000644000175000017500000000022100000000000025135 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/neutron/plugins/ml2/sriov_agent.ini.sample wrap_width = 79 namespace = neutron.ml2.sriov.agent namespace = oslo.log ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1790435 neutron-16.0.0.0b2.dev214/etc/oslo-policy-generator/0000755000175000017500000000000000000000000022153 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/oslo-policy-generator/policy.conf0000644000175000017500000000010300000000000024313 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/policy.yaml.sample namespace = neutron ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/etc/rootwrap.conf0000644000175000017500000000242200000000000020442 0ustar00coreycorey00000000000000# Configuration for neutron-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/etc/neutron/kill_scripts # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR # Rootwrap daemon exits after this seconds of inactivity daemon_timeout=600 [xenapi] # XenAPI configuration is only required by the L2 agent if it is to # target a XenServer/XCP compute host's dom0. xenapi_connection_url= xenapi_connection_username=root xenapi_connection_password= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/lower-constraints.txt0000644000175000017500000000523700000000000021410 0ustar00coreycorey00000000000000alabaster==0.7.10 alembic==0.8.10 amqp==2.1.1 appdirs==1.4.3 astroid==2.1.0 Babel==2.3.4 bandit==1.1.0 bashate==0.5.1 beautifulsoup4==4.6.0 cachetools==2.0.0 cffi==1.13.2 cliff==2.8.0 cmd2==0.8.0 contextlib2==0.4.0 coverage==4.0 ddt==1.0.1 debtcollector==1.19.0 decorator==3.4.0 deprecation==1.0 doc8==0.6.0 docutils==0.11 dogpile.cache==0.6.2 dulwich==0.15.0 eventlet==0.18.2 extras==1.0.0 fasteners==0.7.0 fixtures==3.0.0 flake8-import-order==0.12 flake8==2.6.2 future==0.16.0 futurist==1.2.0 gitdb==0.6.4 GitPython==1.0.1 greenlet==0.4.10 hacking==1.1.0 httplib2==0.9.1 imagesize==0.7.1 iso8601==0.1.11 Jinja2==2.10 jmespath==0.9.0 jsonpatch==1.16 jsonpointer==1.13 jsonschema==2.6.0 keystoneauth1==3.14.0 keystonemiddleware==4.17.0 kombu==4.0.0 linecache2==1.0.0 logilab-common==1.4.1 logutils==0.3.5 Mako==0.4.0 MarkupSafe==1.0 mccabe==0.2.1 mock==3.0.0 monotonic==0.6;python_version<'3.3' mox3==0.20.0 msgpack-python==0.4.0 munch==2.1.0 netaddr==0.7.18 netifaces==0.10.4 neutron-lib==2.2.0 openstackdocstheme==1.30.0 openstacksdk==0.31.2 os-client-config==1.28.0 os-ken==0.3.0 os-service-types==1.7.0 os-vif==1.15.1 os-xenapi==0.3.1 osc-lib==1.8.0 oslo.cache==1.26.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.19.2 oslo.db==4.37.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==5.29.0 oslo.middleware==3.31.0 oslo.policy==1.30.0 oslo.privsep==1.32.0 oslo.reports==1.18.0 oslo.rootwrap==5.8.0 oslo.serialization==2.18.0 oslo.service==1.24.0 oslo.upgradecheck==0.1.0 oslo.utils==3.33.0 oslo.versionedobjects==1.35.1 oslotest==3.2.0 osprofiler==2.3.0 ovs==2.8.0 ovsdbapp==1.0.0 Paste==2.0.2 PasteDeploy==1.5.0 pbr==4.0.0 pecan==1.3.2 pep8==1.5.7 pika-pool==0.1.3 pika==0.10.0 positional==1.2.1 prettytable==0.7.2 psutil==3.2.2 pycadf==1.1.0 pycodestyle==2.4.0 pycparser==2.18 pyflakes==0.8.1 Pygments==2.2.0 pyinotify==0.9.6 pylint==2.2.0 PyMySQL==0.7.6 pyOpenSSL==17.1.0 pyparsing==2.1.0 pyperclip==1.5.27 pyroute2==0.5.7 python-dateutil==2.5.3 python-designateclient==2.7.0 python-editor==1.0.3 python-keystoneclient==3.8.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 python-novaclient==9.1.0 python-subunit==1.0.0 pytz==2013.6 PyYAML==3.12 reno==2.5.0 repoze.lru==0.7 requests==2.14.2 requestsexceptions==1.2.0 rfc3986==0.3.1 Routes==2.3.1 simplejson==3.5.1 six==1.10.0 smmap==0.9.0 snowballstemmer==1.2.1 Sphinx==1.6.5 sphinxcontrib-websupport==1.0.1 sqlalchemy-migrate==0.11.0 SQLAlchemy==1.2.0 sqlparse==0.2.2 statsd==3.2.1 stestr==1.0.0 stevedore==1.20.0 Tempita==0.5.2 tenacity==4.4.0 testrepository==0.0.18 testresources==2.0.0 testscenarios==0.4 testtools==2.2.0 tooz==1.58.0 tinyrpc==0.6 traceback2==1.4.0 vine==1.1.4 waitress==1.1.0 WebOb==1.8.2 WebTest==2.0.27 wrapt==1.7.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1830435 neutron-16.0.0.0b2.dev214/neutron/0000755000175000017500000000000000000000000016635 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/__init__.py0000644000175000017500000000202300000000000020743 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext from debtcollector import removals import six if six.PY2: # pylint: disable=unexpected-keyword-arg gettext.install('neutron', unicode=1) else: gettext.install('neutron') six.moves.builtins.__dict__['_'] = removals.remove( message='Builtin _ translation function is deprecated in OpenStack; ' 'use the function from _i18n module for your project.')(_) # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/_i18n.py0000644000175000017500000000203100000000000020121 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "neutron" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1830435 neutron-16.0.0.0b2.dev214/neutron/agent/0000755000175000017500000000000000000000000017733 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/__init__.py0000644000175000017500000000000000000000000022032 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/agent_extensions_manager.py0000644000175000017500000000457000000000000025362 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import stevedore from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config LOG = log.getLogger(__name__) agent_ext_mgr_config.register_agent_ext_manager_opts() class AgentExtensionsManager(stevedore.named.NamedExtensionManager): """Manage agent extensions.""" def __init__(self, conf, namespace): super(AgentExtensionsManager, self).__init__( namespace, conf.agent.extensions, invoke_on_load=True, name_order=True) LOG.info("Loaded agent extensions: %s", self.names()) def initialize(self, connection, driver_type, agent_api=None): """Initialize enabled agent extensions. :param connection: RPC connection that can be reused by extensions to define their RPC endpoints :param driver_type: a string that defines the agent type to the extension. Can be used by the extension to choose the right backend implementation. :param agent_api: an AgentAPI instance that provides an API to interact with the agent that the manager is running in. """ # Initialize each agent extension in the list. for extension in self: LOG.info("Initializing agent extension '%s'", extension.name) # If the agent has provided an agent_api object, this object will # be passed to all interested extensions. This object must be # consumed by each such extension before the extension's # initialize() method is called, as the initialization step # relies on the agent_api already being available. extension.obj.consume_api(agent_api) extension.obj.initialize(connection, driver_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1950436 neutron-16.0.0.0b2.dev214/neutron/agent/common/0000755000175000017500000000000000000000000021223 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/__init__.py0000644000175000017500000000000000000000000023322 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/async_process.py0000644000175000017500000002566400000000000024465 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import eventlet import eventlet.event from eventlet.green import subprocess import eventlet.queue from neutron_lib.utils import helpers from oslo_log import log as logging from neutron._i18n import _ from neutron.agent.common import ip_lib from neutron.agent.common import utils from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) class AsyncProcessException(Exception): pass class AsyncProcess(object): """Manages an asynchronous process. This class spawns a new process via subprocess and uses greenthreads to read stderr and stdout asynchronously into queues that can be read via repeatedly calling iter_stdout() and iter_stderr(). If respawn_interval is non-zero, any error in communicating with the managed process will result in the process and greenthreads being cleaned up and the process restarted after the specified interval. Example usage: >>> import time >>> proc = AsyncProcess(['ping']) >>> proc.start() >>> time.sleep(5) >>> proc.stop() >>> for line in proc.iter_stdout(): ... print(line) """ def __init__(self, cmd, run_as_root=False, respawn_interval=None, namespace=None, log_output=False, die_on_error=False): """Constructor. :param cmd: The list of command arguments to invoke. :param run_as_root: The process should run with elevated privileges. :param respawn_interval: Optional, the interval in seconds to wait to respawn after unexpected process death. Respawn will only be attempted if a value of 0 or greater is provided. :param namespace: Optional, start the command in the specified namespace. :param log_output: Optional, also log received output. :param die_on_error: Optional, kills the process on stderr output. """ self.cmd_without_namespace = cmd self._cmd = ip_lib.add_namespace_to_cmd(cmd, namespace) self.run_as_root = run_as_root if respawn_interval is not None and respawn_interval < 0: raise ValueError(_('respawn_interval must be >= 0 if provided.')) self.respawn_interval = respawn_interval self._process = None self._pid = None self._is_running = False self._kill_event = None self._reset_queues() self._watchers = [] self.log_output = log_output self.die_on_error = die_on_error @property def cmd(self): return ' '.join(self._cmd) def _reset_queues(self): self._stdout_lines = eventlet.queue.LightQueue() self._stderr_lines = eventlet.queue.LightQueue() def is_active(self): # If using sudo rootwrap as a root_helper, we have to wait until sudo # spawns rootwrap and rootwrap spawns the process. self.pid will make # sure to get the correct pid. return utils.pid_invoked_with_cmdline( self.pid, self.cmd_without_namespace) def start(self, block=False): """Launch a process and monitor it asynchronously. :param block: Block until the process has started. :raises utils.WaitTimeout if blocking is True and the process did not start in time. """ LOG.debug('Launching async process [%s].', self.cmd) if self._is_running: raise AsyncProcessException(_('Process is already started')) else: self._spawn() if block: common_utils.wait_until_true(self.is_active) def stop(self, block=False, kill_signal=None, kill_timeout=None): """Halt the process and watcher threads. :param block: Block until the process has stopped. :param kill_signal: Number of signal that will be sent to the process when terminating the process :param kill_timeout: If given, process will be killed with SIGKILL if timeout will be reached and process will still be running :raises utils.WaitTimeout if blocking is True and the process did not stop in time. """ kill_signal = kill_signal or getattr(signal, 'SIGKILL', signal.SIGTERM) if self._is_running: LOG.debug('Halting async process [%s].', self.cmd) self._kill(kill_signal, kill_timeout) else: raise AsyncProcessException(_('Process is not running.')) if block: common_utils.wait_until_true(lambda: not self.is_active()) def _spawn(self): """Spawn a process and its watchers.""" self._is_running = True self._pid = None self._kill_event = eventlet.event.Event() self._process, cmd = utils.create_process(self._cmd, run_as_root=self.run_as_root) self._watchers = [] for reader in (self._read_stdout, self._read_stderr): # Pass the stop event directly to the greenthread to # ensure that assignment of a new event to the instance # attribute does not prevent the greenthread from using # the original event. watcher = eventlet.spawn(self._watch_process, reader, self._kill_event) self._watchers.append(watcher) @property def pid(self): if self._process: if not self._pid: self._pid = utils.get_root_helper_child_pid( self._process.pid, self.cmd_without_namespace, run_as_root=self.run_as_root) return self._pid def _kill(self, kill_signal, kill_timeout=None): """Kill the process and the associated watcher greenthreads.""" pid = self.pid if pid: self._is_running = False self._pid = None self._kill_process_and_wait(pid, kill_signal, kill_timeout) # Halt the greenthreads if they weren't already. if self._kill_event: self._kill_event.send() self._kill_event = None def _kill_process_and_wait(self, pid, kill_signal, kill_timeout=None): kill_result = self._kill_process(pid, kill_signal) if kill_result is False: return kill_result if self._process: try: self._process.wait(kill_timeout) except subprocess.TimeoutExpired: LOG.warning("Process %(pid)s [%(cmd)s] still running after " "%(timeout)d seconds. Sending %(signal)d to kill " "it.", {'pid': pid, 'cmd': self.cmd, 'timeout': kill_timeout, 'signal': signal.SIGKILL}) return self._kill_process(pid, signal.SIGKILL) return True def _kill_process(self, pid, kill_signal): try: # A process started by a root helper will be running as # root and need to be killed via the same helper. utils.kill_process(pid, kill_signal, self.run_as_root) except Exception: LOG.exception('An error occurred while killing [%s].', self.cmd) return False return True def _handle_process_error(self): """Kill the async process and respawn if necessary.""" stdout = list(self.iter_stdout()) stderr = list(self.iter_stderr()) LOG.debug('Halting async process [%s] in response to an error. stdout:' ' [%s] - stderr: [%s]', self.cmd, stdout, stderr) self._kill(getattr(signal, 'SIGKILL', signal.SIGTERM)) if self.respawn_interval is not None and self.respawn_interval >= 0: eventlet.sleep(self.respawn_interval) LOG.debug('Respawning async process [%s].', self.cmd) try: self.start() except AsyncProcessException: # Process was already respawned by someone else... pass def _watch_process(self, callback, kill_event): while not kill_event.ready(): try: output = callback() if not output and output != "": break except Exception: LOG.exception('An error occurred while communicating ' 'with async process [%s].', self.cmd) break # Ensure that watching a process with lots of output does # not block execution of other greenthreads. eventlet.sleep() # self._is_running being True indicates that the loop was # broken out of due to an error in the watched process rather # than the loop condition being satisfied. if self._is_running: self._is_running = False self._handle_process_error() def _read(self, stream, queue): data = stream.readline() if data: data = helpers.safe_decode_utf8(data.strip()) queue.put(data) return data def _read_stdout(self): data = self._read(self._process.stdout, self._stdout_lines) if self.log_output: LOG.debug('Output received from [%(cmd)s]: %(data)s', {'cmd': self.cmd, 'data': data}) return data def _read_stderr(self): data = self._read(self._process.stderr, self._stderr_lines) if self.log_output: LOG.error('Error received from [%(cmd)s]: %(err)s', {'cmd': self.cmd, 'err': data}) if self.die_on_error: LOG.error("Process [%(cmd)s] dies due to the error: %(err)s", {'cmd': self.cmd, 'err': data}) # the callback caller will use None to indicate the need to bail # out of the thread return None return data def _iter_queue(self, queue, block): while True: try: yield queue.get(block=block) except eventlet.queue.Empty: break def iter_stdout(self, block=False): return self._iter_queue(self._stdout_lines, block) def iter_stderr(self, block=False): return self._iter_queue(self._stderr_lines, block) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/base_polling.py0000644000175000017500000000366200000000000024242 0ustar00coreycorey00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class BasePollingManager(object): def __init__(self): self._force_polling = False self._polling_completed = True def force_polling(self): self._force_polling = True def polling_completed(self): self._polling_completed = True def _is_polling_required(self): raise NotImplementedError() @property def is_polling_required(self): # Always consume the updates to minimize polling. polling_required = self._is_polling_required() # Polling is required regardless of whether updates have been # detected. if self._force_polling: self._force_polling = False polling_required = True # Polling is required if not yet done for previously detected # updates. if not self._polling_completed: polling_required = True if polling_required: # Track whether polling has been completed to ensure that # polling can be required until the caller indicates via a # call to polling_completed() that polling has been # successfully performed. self._polling_completed = False return polling_required class AlwaysPoll(BasePollingManager): @property def is_polling_required(self): return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/ip_lib.py0000644000175000017500000000200500000000000023030 0ustar00coreycorey00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os if os.name == 'nt': from neutron.agent.windows import ip_lib from neutron.conf.agent import windows OPTS = windows.IP_LIB_OPTS_WINDOWS else: from neutron.agent.linux import ip_lib from neutron.conf.agent import linux OPTS = linux.IP_LIB_OPTS_LINUX IPWrapper = ip_lib.IPWrapper IPDevice = ip_lib.IPDevice add_namespace_to_cmd = ip_lib.add_namespace_to_cmd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/ovs_lib.py0000644000175000017500000014607600000000000023250 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import operator import random import time import uuid from neutron_lib import constants as p_const from neutron_lib import exceptions from neutron_lib.services.qos import constants as qos_constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import idlutils import debtcollector import six import tenacity from neutron._i18n import _ from neutron.agent.common import ip_lib from neutron.agent.common import utils from neutron.agent.ovsdb import impl_idl from neutron.common import _constants as common_constants from neutron.common import utils as common_utils from neutron.conf.agent import ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants UINT64_BITMASK = (1 << 64) - 1 # Special return value for an invalid OVS ofport INVALID_OFPORT = -1 UNASSIGNED_OFPORT = [] # OVS bridge fail modes FAILMODE_SECURE = 'secure' FAILMODE_STANDALONE = 'standalone' # special values for cookies COOKIE_ANY = object() ovs_conf.register_ovs_agent_opts() LOG = logging.getLogger(__name__) OVS_DEFAULT_CAPS = { 'datapath_types': [], 'iface_types': [], } # It's default queue, all packets not tagged with 'set_queue' will go through # this one QOS_DEFAULT_QUEUE = 0 _SENTINEL = object() CTRL_RATE_LIMIT_MIN = 100 CTRL_BURST_LIMIT_MIN = 25 def _ovsdb_result_pending(result): """Return True if ovsdb indicates the result is still pending.""" # ovsdb can return '[]' for an ofport that has not yet been assigned return result == [] def _ovsdb_retry(fn): """Decorator for retrying when OVS has yet to assign an ofport. The instance's ovsdb_timeout is used as the max waiting time. This relies on the fact that instance methods receive self as the first argument. """ @six.wraps(fn) def wrapped(*args, **kwargs): self = args[0] new_fn = tenacity.retry( reraise=True, retry=tenacity.retry_if_result(_ovsdb_result_pending), wait=tenacity.wait_exponential(multiplier=0.02, max=1), stop=tenacity.stop_after_delay( self.ovsdb_timeout))(fn) return new_fn(*args, **kwargs) return wrapped class VifPort(object): def __init__(self, port_name, ofport, vif_id, vif_mac, switch): self.port_name = port_name self.ofport = ofport self.vif_id = vif_id self.vif_mac = vif_mac self.switch = switch def __str__(self): return ("iface-id=%s, vif_mac=%s, port_name=%s, ofport=%s, " "bridge_name=%s") % ( self.vif_id, self.vif_mac, self.port_name, self.ofport, self.switch.br_name) class BaseOVS(object): def __init__(self): self.ovsdb_timeout = cfg.CONF.OVS.ovsdb_timeout self.ovsdb = impl_idl.api_factory() self._hw_offload = None def add_manager(self, connection_uri, timeout=_SENTINEL): """Have ovsdb-server listen for manager connections :param connection_uri: Manager target string :param timeout: The Manager probe_interval timeout value (defaults to ovsdb_timeout) """ if timeout is _SENTINEL: timeout = cfg.CONF.OVS.ovsdb_timeout with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.add_manager(connection_uri)) if timeout: txn.add( self.ovsdb.db_set('Manager', connection_uri, ('inactivity_probe', timeout * 1000))) def get_manager(self): return self.ovsdb.get_manager().execute() def remove_manager(self, connection_uri): self.ovsdb.remove_manager(connection_uri).execute() def add_bridge(self, bridge_name, datapath_type=constants.OVS_DATAPATH_SYSTEM): br = OVSBridge(bridge_name, datapath_type=datapath_type) br.create() return br def delete_bridge(self, bridge_name): self.ovsdb.del_br(bridge_name).execute() def bridge_exists(self, bridge_name): return self.ovsdb.br_exists(bridge_name).execute() def port_exists(self, port_name): cmd = self.ovsdb.db_get('Port', port_name, 'name') return bool(cmd.execute(check_error=False, log_errors=False)) def get_bridge_for_iface(self, iface): return self.ovsdb.iface_to_br(iface).execute() def get_bridges(self): return self.ovsdb.list_br().execute(check_error=True) def get_bridge_external_bridge_id(self, bridge, check_error=False, log_errors=True): return self.ovsdb.br_get_external_id(bridge, 'bridge-id').execute( check_error=check_error, log_errors=log_errors) def set_db_attribute(self, table_name, record, column, value, check_error=False, log_errors=True): self.ovsdb.db_set(table_name, record, (column, value)).execute( check_error=check_error, log_errors=log_errors) def clear_db_attribute(self, table_name, record, column): self.ovsdb.db_clear(table_name, record, column).execute() def db_get_val(self, table, record, column, check_error=False, log_errors=True): return self.ovsdb.db_get(table, record, column).execute( check_error=check_error, log_errors=log_errors) @property def config(self): """A dict containing the only row from the root Open_vSwitch table This row contains several columns describing the Open vSwitch install and the system on which it is installed. Useful keys include: datapath_types: a list of supported datapath types iface_types: a list of supported interface types ovs_version: the OVS version """ return self.ovsdb.db_list("Open_vSwitch").execute()[0] @property def capabilities(self): _cfg = self.config return {k: _cfg.get(k, OVS_DEFAULT_CAPS[k]) for k in OVS_DEFAULT_CAPS} @property def is_hw_offload_enabled(self): if self._hw_offload is None: self._hw_offload = self.config.get('other_config', {}).get('hw-offload', '').lower() == 'true' return self._hw_offload # Map from version string to on-the-wire protocol version encoding: OF_PROTOCOL_TO_VERSION = { constants.OPENFLOW10: 1, constants.OPENFLOW11: 2, constants.OPENFLOW12: 3, constants.OPENFLOW13: 4, constants.OPENFLOW14: 5, constants.OPENFLOW15: 6, } def version_from_protocol(protocol): if protocol not in OF_PROTOCOL_TO_VERSION: raise Exception(_("unknown OVS protocol string, cannot compare: " "%(protocol)s, (known: %(known)s)") % {'protocol': protocol, 'known': list(OF_PROTOCOL_TO_VERSION)}) return OF_PROTOCOL_TO_VERSION[protocol] class OVSBridge(BaseOVS): def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM): super(OVSBridge, self).__init__() self.br_name = br_name self.datapath_type = datapath_type self._default_cookie = generate_random_cookie() self._highest_protocol_needed = constants.OPENFLOW10 self._min_bw_qos_id = uuidutils.generate_uuid() # TODO(jlibosva): Revert initial_protocols once launchpad bug 1852221 # is fixed and new openvswitch containing the fix is # released. self.initial_protocols = { constants.OPENFLOW10, constants.OPENFLOW13, constants.OPENFLOW14} self.initial_protocols.add(self._highest_protocol_needed) @property def default_cookie(self): return self._default_cookie def set_agent_uuid_stamp(self, val): self._default_cookie = val def set_controller(self, controllers): self.ovsdb.set_controller(self.br_name, controllers).execute(check_error=True) def del_controller(self): self.ovsdb.del_controller(self.br_name).execute(check_error=True) def get_controller(self): return self.ovsdb.get_controller(self.br_name).execute( check_error=True) def _set_bridge_fail_mode(self, mode): self.ovsdb.set_fail_mode(self.br_name, mode).execute(check_error=True) def set_secure_mode(self): self._set_bridge_fail_mode(FAILMODE_SECURE) def set_standalone_mode(self): self._set_bridge_fail_mode(FAILMODE_STANDALONE) def add_protocols(self, *protocols): self.ovsdb.db_add('Bridge', self.br_name, 'protocols', *protocols).execute(check_error=True) def use_at_least_protocol(self, protocol): """Calls to ovs-ofctl will use a protocol version >= 'protocol'""" self.add_protocols(protocol) self._highest_protocol_needed = max(self._highest_protocol_needed, protocol, key=version_from_protocol) self.initial_protocols.add(self._highest_protocol_needed) def set_igmp_snooping_state(self, state): state = bool(state) other_config = { 'mcast-snooping-disable-flood-unregistered': str(state)} with self.ovsdb.transaction() as txn: txn.add( self.ovsdb.db_set('Bridge', self.br_name, ('mcast_snooping_enable', state))) txn.add( self.ovsdb.db_set('Bridge', self.br_name, ('other_config', other_config))) def create(self, secure_mode=False): other_config = { 'mac-table-size': str(cfg.CONF.OVS.bridge_mac_table_size)} with self.ovsdb.transaction() as txn: txn.add( self.ovsdb.add_br(self.br_name, datapath_type=self.datapath_type)) # the ovs-ofctl commands below in run_ofctl use OF10, so we # need to ensure that this version is enabled ; we could reuse # add_protocols, but doing ovsdb.db_add avoids doing two # transactions txn.add( self.ovsdb.db_add('Bridge', self.br_name, 'protocols', *self.initial_protocols)) txn.add( self.ovsdb.db_set('Bridge', self.br_name, ('other_config', other_config))) if secure_mode: txn.add(self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE)) def destroy(self): self.delete_bridge(self.br_name) def add_port(self, port_name, *interface_attr_tuples): with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.add_port(self.br_name, port_name)) if interface_attr_tuples: txn.add(self.ovsdb.db_set('Interface', port_name, *interface_attr_tuples)) return self.get_port_ofport(port_name) def replace_port(self, port_name, *interface_attr_tuples): """Replace existing port or create it, and configure port interface.""" # NOTE(xiaohhui): If del_port is inside the transaction, there will # only be one command for replace_port. This will cause the new port # not be found by system, which will lead to Bug #1519926. self.ovsdb.del_port(port_name).execute() with self.ovsdb.transaction() as txn: txn.add(self.ovsdb.add_port(self.br_name, port_name, may_exist=False)) # NOTE(mangelajo): Port is added to dead vlan (4095) by default # until it's handled by the neutron-openvswitch-agent. Otherwise it # becomes a trunk port on br-int (receiving traffic for all vlans), # and also triggers issues on ovs-vswitchd related to the # datapath flow revalidator thread, see lp#1767422 txn.add(self.ovsdb.db_set( 'Port', port_name, ('tag', constants.DEAD_VLAN_TAG))) # TODO(mangelajo): We could accept attr tuples for the Port too # but, that could potentially break usage of this function in # stable branches (where we need to backport). # https://review.opendev.org/#/c/564825/4/neutron/agent/common/ # ovs_lib.py@289 if interface_attr_tuples: txn.add(self.ovsdb.db_set('Interface', port_name, *interface_attr_tuples)) def delete_port(self, port_name): self.ovsdb.del_port(port_name, self.br_name).execute() def run_ofctl(self, cmd, args, process_input=None): debtcollector.deprecate("Use of run_ofctl is " "deprecated", removal_version='V') full_args = ["ovs-ofctl", cmd, "-O", self._highest_protocol_needed, self.br_name] + args # TODO(kevinbenton): This error handling is really brittle and only # detects one specific type of failure. The callers of this need to # be refactored to expect errors so we can re-raise and they can # take appropriate action based on the type of error. for i in range(1, 11): try: return utils.execute(full_args, run_as_root=True, process_input=process_input) except Exception as e: if "failed to connect to socket" in str(e): LOG.debug("Failed to connect to OVS. Retrying " "in 1 second. Attempt: %s/10", i) time.sleep(1) continue LOG.error("Unable to execute %(cmd)s. Exception: " "%(exception)s", {'cmd': full_args, 'exception': e}) break def count_flows(self): flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] return len(flow_list) - 1 def remove_all_flows(self): self.run_ofctl("del-flows", []) @_ovsdb_retry def _get_port_val(self, port_name, port_val): return self.db_get_val("Interface", port_name, port_val) def get_port_ofport(self, port_name): """Get the port's assigned ofport, retrying if not yet assigned.""" ofport = INVALID_OFPORT try: ofport = self._get_port_val(port_name, "ofport") except tenacity.RetryError: LOG.exception("Timed out retrieving ofport on port %s.", port_name) return ofport @_ovsdb_retry def _get_datapath_id(self): return self.db_get_val('Bridge', self.br_name, 'datapath_id') def get_datapath_id(self): try: return self._get_datapath_id() except tenacity.RetryError: # if ovs fails to find datapath_id then something is likely to be # broken here LOG.exception("Timed out retrieving datapath_id on bridge %s.", self.br_name) raise RuntimeError(_('No datapath_id on bridge %s') % self.br_name) def do_action_flows(self, action, kwargs_list, use_bundle=False): # we can't mix strict and non-strict, so we'll use the first kw # and check against other kw being different strict = kwargs_list[0].get('strict', False) for kw in kwargs_list: if action == 'del': if kw.get('cookie') == COOKIE_ANY: # special value COOKIE_ANY was provided, unset # cookie to match flows whatever their cookie is kw.pop('cookie') if kw.get('cookie_mask'): # non-zero cookie mask raise Exception(_("cookie=COOKIE_ANY but cookie_mask " "set to %s") % kw.get('cookie_mask')) elif 'cookie' in kw: # a cookie was specified, use it kw['cookie'] = check_cookie_mask(kw['cookie']) else: # nothing was specified about cookies, use default kw['cookie'] = "%d/-1" % self._default_cookie else: if 'cookie' not in kw: kw['cookie'] = self._default_cookie if action in ('mod', 'del'): if kw.pop('strict', False) != strict: msg = ("cannot mix 'strict' and not 'strict' in a batch " "call") raise exceptions.InvalidInput(error_message=msg) else: if kw.pop('strict', False): msg = "cannot use 'strict' with 'add' action" raise exceptions.InvalidInput(error_message=msg) extra_param = ["--strict"] if strict else [] if action == 'del' and {} in kwargs_list: # the 'del' case simplifies itself if kwargs_list has at least # one item that matches everything self.run_ofctl('%s-flows' % action, []) else: flow_strs = [_build_flow_expr_str(kw, action, strict) for kw in kwargs_list] LOG.debug("Processing %d OpenFlow rules.", len(flow_strs)) if use_bundle: extra_param.append('--bundle') step = common_constants.AGENT_RES_PROCESSING_STEP for i in range(0, len(flow_strs), step): self.run_ofctl('%s-flows' % action, extra_param + ['-'], '\n'.join(flow_strs[i:i + step])) def add_flow(self, **kwargs): self.do_action_flows('add', [kwargs]) def mod_flow(self, **kwargs): self.do_action_flows('mod', [kwargs]) def delete_flows(self, **kwargs): self.do_action_flows('del', [kwargs]) def dump_flows_for_table(self, table): return self.dump_flows_for(table=table) def dump_flows_for(self, **kwargs): retval = None if "cookie" in kwargs: kwargs["cookie"] = check_cookie_mask(str(kwargs["cookie"])) flow_str = ",".join("=".join([key, str(val)]) for key, val in kwargs.items()) flows = self.run_ofctl("dump-flows", [flow_str]) if flows: retval = '\n'.join(item for item in flows.splitlines() if is_a_flow_line(item)) return retval def dump_all_flows(self): return [f for f in self.run_ofctl("dump-flows", []).splitlines() if is_a_flow_line(f)] def deferred(self, **kwargs): return DeferredOVSBridge(self, **kwargs) def add_tunnel_port(self, port_name, remote_ip, local_ip, tunnel_type=p_const.TYPE_GRE, vxlan_udp_port=p_const.VXLAN_UDP_PORT, dont_fragment=True, tunnel_csum=False, tos=None): attrs = [('type', tunnel_type)] # TODO(twilson) This is an OrderedDict solely to make a test happy options = collections.OrderedDict() vxlan_uses_custom_udp_port = ( tunnel_type == p_const.TYPE_VXLAN and vxlan_udp_port != p_const.VXLAN_UDP_PORT ) if vxlan_uses_custom_udp_port: options['dst_port'] = str(vxlan_udp_port) options['df_default'] = str(dont_fragment).lower() options['remote_ip'] = remote_ip options['local_ip'] = local_ip options['in_key'] = 'flow' options['out_key'] = 'flow' # NOTE(moshele): pkt_mark is not upported when using ovs hw-offload, # therefore avoid clear mark on encapsulating packets when it's # enabled if not self.is_hw_offload_enabled: options['egress_pkt_mark'] = '0' if tunnel_csum: options['csum'] = str(tunnel_csum).lower() if tos: options['tos'] = str(tos) attrs.append(('options', options)) return self.add_port(port_name, *attrs) def add_patch_port(self, local_name, remote_name): attrs = [('type', 'patch'), ('options', {'peer': remote_name})] return self.add_port(local_name, *attrs) def get_iface_name_list(self): # get the interface name list for this bridge return self.ovsdb.list_ifaces(self.br_name).execute(check_error=True) def get_port_name_list(self): # get the port name list for this bridge return self.ovsdb.list_ports(self.br_name).execute(check_error=True) def get_port_stats(self, port_name): return self.db_get_val("Interface", port_name, "statistics") def get_ports_attributes(self, table, columns=None, ports=None, check_error=True, log_errors=True, if_exists=False): port_names = ports or self.get_port_name_list() if not port_names: return [] return (self.ovsdb.db_list(table, port_names, columns=columns, if_exists=if_exists). execute(check_error=check_error, log_errors=log_errors)) # returns a VIF object for each VIF port def get_vif_ports(self, ofport_filter=None): edge_ports = [] port_info = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) for port in port_info: name = port['name'] external_ids = port['external_ids'] ofport = port['ofport'] if ofport_filter and ofport in ofport_filter: continue if "iface-id" in external_ids and "attached-mac" in external_ids: p = VifPort(name, ofport, external_ids["iface-id"], external_ids["attached-mac"], self) edge_ports.append(p) return edge_ports def get_vif_port_to_ofport_map(self): results = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) port_map = {} for r in results: # fall back to basic interface name key = self.portid_from_external_ids(r['external_ids']) or r['name'] try: port_map[key] = int(r['ofport']) except TypeError: # port doesn't yet have an ofport entry so we ignore it pass return port_map def get_vif_port_set(self): edge_ports = set() results = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) for result in results: if result['ofport'] == UNASSIGNED_OFPORT: LOG.warning("Found not yet ready openvswitch port: %s", result['name']) elif result['ofport'] == INVALID_OFPORT: LOG.warning("Found failed openvswitch port: %s", result['name']) elif 'attached-mac' in result['external_ids']: port_id = self.portid_from_external_ids(result['external_ids']) if port_id: edge_ports.add(port_id) return edge_ports def portid_from_external_ids(self, external_ids): if 'iface-id' in external_ids: return external_ids['iface-id'] def get_port_tag_dict(self): """Get a dict of port names and associated vlan tags. e.g. the returned dict is of the following form:: {u'int-br-eth2': [], u'patch-tun': [], u'qr-76d9e6b6-21': 1, u'tapce5318ff-78': 1, u'tape1400310-e6': 1} The TAG ID is only available in the "Port" table and is not available in the "Interface" table queried by the get_vif_port_set() method. """ results = self.get_ports_attributes( 'Port', columns=['name', 'tag'], if_exists=True) return {p['name']: p['tag'] for p in results} def get_vifs_by_ids(self, port_ids): interface_info = self.get_ports_attributes( "Interface", columns=["name", "external_ids", "ofport"], if_exists=True) by_id = {x['external_ids'].get('iface-id'): x for x in interface_info} result = {} for port_id in port_ids: result[port_id] = None if port_id not in by_id: LOG.info("Port %(port_id)s not present in bridge " "%(br_name)s", {'port_id': port_id, 'br_name': self.br_name}) continue pinfo = by_id[port_id] if not self._check_ofport(port_id, pinfo): continue mac = pinfo['external_ids'].get('attached-mac') result[port_id] = VifPort(pinfo['name'], pinfo['ofport'], port_id, mac, self) return result @staticmethod def _check_ofport(port_id, port_info): if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: LOG.warning("ofport: %(ofport)s for VIF: %(vif)s " "is not a positive integer", {'ofport': port_info['ofport'], 'vif': port_id}) return False return True def get_vif_port_by_id(self, port_id): ports = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), ('external_ids', '!=', {'attached-mac': ''}), columns=['external_ids', 'name', 'ofport']).execute() for port in ports: if self.br_name != self.get_bridge_for_iface(port['name']): continue if not self._check_ofport(port_id, port): continue mac = port['external_ids'].get('attached-mac') return VifPort(port['name'], port['ofport'], port_id, mac, self) LOG.info("Port %(port_id)s not present in bridge %(br_name)s", {'port_id': port_id, 'br_name': self.br_name}) def delete_ports(self, all_ports=False): if all_ports: port_names = self.get_port_name_list() else: port_names = (port.port_name for port in self.get_vif_ports()) for port_name in port_names: self.delete_port(port_name) def get_local_port_mac(self): """Retrieve the mac of the bridge's local port.""" address = ip_lib.IPDevice(self.br_name).link.address if address: return address else: msg = _('Unable to determine mac address for %s') % self.br_name raise Exception(msg) def set_controllers_connection_mode(self, connection_mode): """Set bridge controllers connection mode. :param connection_mode: "out-of-band" or "in-band" """ self.set_controller_field('connection_mode', connection_mode) def set_controllers_inactivity_probe(self, interval): """Set bridge controllers inactivity probe interval. :param interval: inactivity_probe value in seconds. """ self.set_controller_field('inactivity_probe', interval * 1000) def _set_egress_bw_limit_for_port(self, port_name, max_kbps, max_burst_kbps): with self.ovsdb.transaction(check_error=True) as txn: txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_rate', max_kbps))) txn.add(self.ovsdb.db_set('Interface', port_name, ('ingress_policing_burst', max_burst_kbps))) def create_egress_bw_limit_for_port(self, port_name, max_kbps, max_burst_kbps): self._set_egress_bw_limit_for_port( port_name, max_kbps, max_burst_kbps) def get_egress_bw_limit_for_port(self, port_name): max_kbps = self.db_get_val('Interface', port_name, 'ingress_policing_rate') max_burst_kbps = self.db_get_val('Interface', port_name, 'ingress_policing_burst') max_kbps = max_kbps or None max_burst_kbps = max_burst_kbps or None return max_kbps, max_burst_kbps def delete_egress_bw_limit_for_port(self, port_name): if not self.port_exists(port_name): return self._set_egress_bw_limit_for_port( port_name, 0, 0) def find_qos(self, port_name): qos = self.ovsdb.db_find( 'QoS', ('external_ids', '=', {'id': port_name}), columns=['_uuid', 'other_config']).execute(check_error=True) if qos: return qos[0] def find_queue(self, port_name, queue_type): queues = self.ovsdb.db_find( 'Queue', ('external_ids', '=', {'id': port_name, 'queue_type': str(queue_type)}), columns=['_uuid', 'other_config']).execute(check_error=True) if queues: return queues[0] def _update_bw_limit_queue(self, txn, port_name, queue_uuid, queue_type, other_config): if queue_uuid: txn.add(self.ovsdb.db_set( 'Queue', queue_uuid, ('other_config', other_config))) else: external_ids = {'id': port_name, 'queue_type': str(queue_type)} queue_uuid = txn.add( self.ovsdb.db_create( 'Queue', external_ids=external_ids, other_config=other_config)) return queue_uuid def _update_bw_limit_profile(self, txn, port_name, qos_uuid, queue_uuid, queue_type, qos_other_config): queues = {queue_type: queue_uuid} if qos_uuid: txn.add(self.ovsdb.db_set( 'QoS', qos_uuid, ('queues', queues))) txn.add(self.ovsdb.db_set( 'QoS', qos_uuid, ('other_config', qos_other_config))) else: external_ids = {'id': port_name} qos_uuid = txn.add( self.ovsdb.db_create( 'QoS', external_ids=external_ids, type='linux-htb', queues=queues, other_config=qos_other_config)) return qos_uuid def _update_bw_limit_profile_dpdk(self, txn, port_name, qos_uuid, other_config): if qos_uuid: txn.add(self.ovsdb.db_set( 'QoS', qos_uuid, ('other_config', other_config))) else: external_ids = {'id': port_name} qos_uuid = txn.add( self.ovsdb.db_create( 'QoS', external_ids=external_ids, type='egress-policer', other_config=other_config)) return qos_uuid def _update_ingress_bw_limit_for_port( self, port_name, max_bw_in_bits, max_burst_in_bits): qos_other_config = { 'max-rate': str(max_bw_in_bits) } queue_other_config = { 'max-rate': str(max_bw_in_bits), 'burst': str(max_burst_in_bits), } qos = self.find_qos(port_name) queue = self.find_queue(port_name, QOS_DEFAULT_QUEUE) qos_uuid = qos['_uuid'] if qos else None queue_uuid = queue['_uuid'] if queue else None with self.ovsdb.transaction(check_error=True) as txn: queue_uuid = self._update_bw_limit_queue( txn, port_name, queue_uuid, QOS_DEFAULT_QUEUE, queue_other_config ) qos_uuid = self._update_bw_limit_profile( txn, port_name, qos_uuid, queue_uuid, QOS_DEFAULT_QUEUE, qos_other_config ) txn.add(self.ovsdb.db_set( 'Port', port_name, ('qos', qos_uuid))) def _update_ingress_bw_limit_for_dpdk_port( self, port_name, max_bw_in_bits, max_burst_in_bits): # cir and cbs should be set in bytes instead of bits qos_other_config = { 'cir': str(max_bw_in_bits / 8), 'cbs': str(max_burst_in_bits / 8) } qos = self.find_qos(port_name) qos_uuid = qos['_uuid'] if qos else None with self.ovsdb.transaction(check_error=True) as txn: qos_uuid = self._update_bw_limit_profile_dpdk( txn, port_name, qos_uuid, qos_other_config) txn.add(self.ovsdb.db_set( 'Port', port_name, ('qos', qos_uuid))) def update_ingress_bw_limit_for_port(self, port_name, max_kbps, max_burst_kbps): max_bw_in_bits = max_kbps * p_const.SI_BASE max_burst_in_bits = max_burst_kbps * p_const.SI_BASE port_type = self._get_port_val(port_name, "type") if port_type in constants.OVS_DPDK_PORT_TYPES: self._update_ingress_bw_limit_for_dpdk_port( port_name, max_bw_in_bits, max_burst_in_bits) else: self._update_ingress_bw_limit_for_port( port_name, max_bw_in_bits, max_burst_in_bits) def get_ingress_bw_limit_for_port(self, port_name): max_kbps = None qos_max_kbps = None queue_max_kbps = None max_burst_kbit = None qos_res = self.find_qos(port_name) if qos_res: other_config = qos_res['other_config'] max_bw_in_bits = other_config.get('max-rate') if max_bw_in_bits is not None: qos_max_kbps = int(max_bw_in_bits) / p_const.SI_BASE queue_res = self.find_queue(port_name, QOS_DEFAULT_QUEUE) if queue_res: other_config = queue_res['other_config'] max_bw_in_bits = other_config.get('max-rate') if max_bw_in_bits is not None: queue_max_kbps = int(max_bw_in_bits) / p_const.SI_BASE max_burst_in_bits = other_config.get('burst') if max_burst_in_bits is not None: max_burst_kbit = ( int(max_burst_in_bits) / p_const.SI_BASE) if qos_max_kbps == queue_max_kbps: max_kbps = qos_max_kbps else: LOG.warning("qos max-rate %(qos_max_kbps)s is not equal to " "queue max-rate %(queue_max_kbps)s", {'qos_max_kbps': qos_max_kbps, 'queue_max_kbps': queue_max_kbps}) return max_kbps, max_burst_kbit def get_ingress_bw_limit_for_dpdk_port(self, port_name): max_kbps = None max_burst_kbit = None res = self.find_qos(port_name) if res: other_config = res['other_config'] max_bw_in_bytes = other_config.get("cir") if max_bw_in_bytes is not None: max_kbps = common_utils.bits_to_kilobits( common_utils.bytes_to_bits(int(float(max_bw_in_bytes))), p_const.SI_BASE) max_burst_in_bytes = other_config.get("cbs") if max_burst_in_bytes is not None: max_burst_kbit = common_utils.bits_to_kilobits( common_utils.bytes_to_bits(int(float(max_burst_in_bytes))), p_const.SI_BASE) return max_kbps, max_burst_kbit def delete_ingress_bw_limit_for_port(self, port_name): qos = self.find_qos(port_name) queue = self.find_queue(port_name, QOS_DEFAULT_QUEUE) does_port_exist = self.port_exists(port_name) with self.ovsdb.transaction(check_error=True) as txn: if does_port_exist: txn.add(self.ovsdb.db_clear("Port", port_name, 'qos')) if qos: txn.add(self.ovsdb.db_destroy('QoS', qos['_uuid'])) if queue: txn.add(self.ovsdb.db_destroy('Queue', queue['_uuid'])) def set_controller_field(self, field, value): attr = [(field, value)] controllers = self.db_get_val('Bridge', self.br_name, 'controller') controllers = [controllers] if isinstance( controllers, uuid.UUID) else controllers with self.ovsdb.transaction(check_error=True) as txn: for controller_uuid in controllers: txn.add(self.ovsdb.db_set( 'Controller', controller_uuid, *attr)) def set_controller_rate_limit(self, controller_rate_limit): """Set bridge controller_rate_limit :param controller_rate_limit: at least 100 """ if controller_rate_limit < CTRL_RATE_LIMIT_MIN: LOG.info("rate limit's value must be at least 100") controller_rate_limit = CTRL_RATE_LIMIT_MIN self.set_controller_field( 'controller_rate_limit', controller_rate_limit) def set_controller_burst_limit(self, controller_burst_limit): """Set bridge controller_burst_limit :param controller_burst_limit: at least 25 """ if controller_burst_limit < CTRL_BURST_LIMIT_MIN: LOG.info("burst limit's value must be at least 25") controller_burst_limit = CTRL_BURST_LIMIT_MIN self.set_controller_field( 'controller_burst_limit', controller_burst_limit) def set_datapath_id(self, datapath_id): dpid_cfg = {'datapath-id': datapath_id} self.set_db_attribute('Bridge', self.br_name, 'other_config', dpid_cfg, check_error=True) def get_egress_min_bw_for_port(self, port_id): queue = self._find_queue(port_id) if not queue: return min_bps = queue['other_config'].get('min-rate') return int(int(min_bps) / 1000) if min_bps else None def _set_queue_for_minimum_bandwidth(self, queue_num): # reg4 is used to memoize if queue was set or not. If it is first visit # to table 0 for a packet (i.e. reg4 == 0), set queue and memoize (i.e. # load 1 to reg4), then goto table 0 again. The packet will be handled # as usual when the second visit to table 0. self.add_flow( table=constants.LOCAL_SWITCHING, in_port=queue_num, reg4=0, priority=200, actions=("set_queue:%s,load:1->NXM_NX_REG4[0]," "resubmit(,%s)" % (queue_num, constants.LOCAL_SWITCHING))) def _unset_queue_for_minimum_bandwidth(self, queue_num): self.delete_flows( table=constants.LOCAL_SWITCHING, in_port=queue_num, reg4=0) def update_minimum_bandwidth_queue(self, port_id, egress_port_names, queue_num, min_kbps): queue_num = int(queue_num) queue_id = self._update_queue(port_id, queue_num, min_kbps=min_kbps) qos_id, qos_queues = self._find_qos() if qos_queues: qos_queues[queue_num] = queue_id else: qos_queues = {queue_num: queue_id} qos_id = self._update_qos( qos_id=qos_id, queues=qos_queues) for egress_port_name in egress_port_names: self._set_port_qos(egress_port_name, qos_id=qos_id) self._set_queue_for_minimum_bandwidth(queue_num) return qos_id def delete_minimum_bandwidth_queue(self, port_id): queue = self._find_queue(port_id) if not queue: return queue_num = int(queue['external_ids']['queue-num']) self._unset_queue_for_minimum_bandwidth(queue_num) qos_id, qos_queues = self._find_qos() if not qos_queues: return if queue_num in qos_queues.keys(): qos_queues.pop(queue_num) self._update_qos( qos_id=qos_id, queues=qos_queues) self._delete_queue(queue['_uuid']) def clear_minimum_bandwidth_qos(self): qoses = self._list_qos( qos_type=qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH) for qos in qoses: qos_id = qos['_uuid'] queues = {num: queue.uuid for num, queue in qos['queues'].items()} ports = self.ovsdb.db_find( 'Port', ('qos', '=', qos_id), colmuns=['name']).execute(check_error=True) for port in ports: self._set_port_qos(port['name']) self.ovsdb.db_destroy('QoS', qos_id).execute(check_error=True) for queue_uuid in queues.values(): self._delete_queue(queue_uuid) def _update_queue(self, port_id, queue_num, max_kbps=None, max_burst_kbps=None, min_kbps=None): other_config = {} if max_kbps: other_config[six.u('max-rate')] = six.u(str(max_kbps * 1000)) if max_burst_kbps: other_config[six.u('burst')] = six.u(str(max_burst_kbps * 1000)) if min_kbps: other_config[six.u('min-rate')] = six.u(str(min_kbps * 1000)) queue = self._find_queue(port_id) if queue and queue['_uuid']: if queue['other_config'] != other_config: self.set_db_attribute('Queue', queue['_uuid'], 'other_config', other_config, check_error=True) else: # NOTE(ralonsoh): "external_ids" is a map of string-string pairs external_ids = { 'port': str(port_id), 'type': str(qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), 'queue-num': str(queue_num)} self.ovsdb.db_create( 'Queue', other_config=other_config, external_ids=external_ids).execute(check_error=True) queue = self._find_queue(port_id) return queue['_uuid'] def _find_queue(self, port_id, _type=None): # NOTE(ralonsoh): in ovsdb native library, '{>=}' operator is not # implemented yet. This is a workaround: list all queues and compare # the external_ids key needed. _type = _type or qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH queues = self._list_queues(port=port_id, _type=_type) if queues: return queues[0] return None def _list_queues(self, _type=None, port=None): queues = self.ovsdb.db_list( 'Queue', columns=['_uuid', 'external_ids', 'other_config']).execute( check_error=True) if port: queues = [queue for queue in queues if queue['external_ids'].get('port') == str(port)] if _type: queues = [queue for queue in queues if queue['external_ids'].get('type') == str(_type)] return queues def _delete_queue(self, queue_id): try: self.ovsdb.db_destroy('Queue', queue_id).execute(check_error=True) except idlutils.RowNotFound: LOG.info('OVS Queue %s was already deleted', queue_id) except RuntimeError as exc: with excutils.save_and_reraise_exception(): if 'referential integrity violation' not in str(exc): return qos_regs = self._list_qos() qos_uuids = [] for qos_reg in qos_regs: queue_nums = [num for num, q in qos_reg['queues'].items() if q.uuid == queue_id] if queue_nums: qos_uuids.append(str(qos_reg['_uuid'])) LOG.error('Queue %(queue)s was still in use by the following ' 'QoS rules: %(qoses)s', {'queue': str(queue_id), 'qoses': ', '.join(sorted(qos_uuids))}) def _update_qos(self, qos_id=None, queues=None): queues = queues or {} if not qos_id: external_ids = {'id': self._min_bw_qos_id, '_type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH} self.ovsdb.db_create( 'QoS', type='linux-htb', queues=queues, external_ids=external_ids).execute(check_error=True) qos_id, _ = self._find_qos() else: self.clear_db_attribute('QoS', qos_id, 'queues') if queues: self.set_db_attribute('QoS', qos_id, 'queues', queues, check_error=True) return qos_id def _list_qos(self, _id=None, qos_type=None): external_ids = {} if _id: external_ids['id'] = _id if qos_type: external_ids['_type'] = qos_type if external_ids: return self.ovsdb.db_find( 'QoS', ('external_ids', '=', external_ids), colmuns=['_uuid', 'queues']).execute(check_error=True) return self.ovsdb.db_find( 'QoS', colmuns=['_uuid', 'queues']).execute(check_error=True) def _find_qos(self): qos_regs = self._list_qos(_id=self._min_bw_qos_id) if qos_regs: queues = {num: queue.uuid for num, queue in qos_regs[0]['queues'].items()} return qos_regs[0]['_uuid'], queues return None, None def _set_port_qos(self, port_name, qos_id=None): if qos_id: self.set_db_attribute('Port', port_name, 'qos', qos_id, check_error=True) else: self.clear_db_attribute('Port', port_name, 'qos') def get_bridge_ports(self, port_type=None): port_names = self.get_port_name_list() + [self.br_name] ports = self.get_ports_attributes('Interface', ports=port_names, columns=['name', 'type'], if_exists=True) or [] if port_type is None: return ports elif not isinstance(port_type, list): port_type = [port_type] return [port['name'] for port in ports if port['type'] in port_type] def __enter__(self): self.create() return self def __exit__(self, exc_type, exc_value, exc_tb): self.destroy() class DeferredOVSBridge(object): '''Deferred OVSBridge. This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge and defers their application until apply_flows call in order to perform bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing OVSBridge and DeferredOVSBridge uses. This class can be used as a context, in such case apply_flows is called on __exit__ except if an exception is raised. This class is not thread-safe, that's why for every use a new instance must be implemented. ''' ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port' def __init__(self, br, full_ordered=False, order=('add', 'mod', 'del'), use_bundle=False): '''Constructor. :param br: wrapped bridge :param full_ordered: Optional, disable flow reordering (slower) :param order: Optional, define in which order flow are applied :param use_bundle: Optional, a bool whether --bundle should be passed to all ofctl commands. Default is set to False. ''' self.br = br self.full_ordered = full_ordered self.order = order if not self.full_ordered: self.weights = dict((y, x) for x, y in enumerate(self.order)) self.action_flow_tuples = [] self.use_bundle = use_bundle def __getattr__(self, name): if name in self.ALLOWED_PASSTHROUGHS: return getattr(self.br, name) raise AttributeError(name) def add_flow(self, **kwargs): self.action_flow_tuples.append(('add', kwargs)) def mod_flow(self, **kwargs): self.action_flow_tuples.append(('mod', kwargs)) def delete_flows(self, **kwargs): self.action_flow_tuples.append(('del', kwargs)) def apply_flows(self): action_flow_tuples = self.action_flow_tuples self.action_flow_tuples = [] if not action_flow_tuples: return if not self.full_ordered: action_flow_tuples.sort(key=lambda af: self.weights[af[0]]) grouped = itertools.groupby(action_flow_tuples, key=operator.itemgetter(0)) itemgetter_1 = operator.itemgetter(1) for action, action_flow_list in grouped: flows = list(map(itemgetter_1, action_flow_list)) self.br.do_action_flows(action, flows, self.use_bundle) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: self.apply_flows() else: LOG.exception("OVS flows could not be applied on bridge %s", self.br.br_name) def _build_flow_expr_str(flow_dict, cmd, strict): flow_expr_arr = [] actions = None if cmd == 'add': flow_expr_arr.append("hard_timeout=%s" % flow_dict.pop('hard_timeout', '0')) flow_expr_arr.append("idle_timeout=%s" % flow_dict.pop('idle_timeout', '0')) flow_expr_arr.append("priority=%s" % flow_dict.pop('priority', '1')) elif 'priority' in flow_dict: if not strict: msg = _("Cannot match priority on flow deletion or modification " "without 'strict'") raise exceptions.InvalidInput(error_message=msg) if cmd != 'del': if "actions" not in flow_dict: msg = _("Must specify one or more actions on flow addition" " or modification") raise exceptions.InvalidInput(error_message=msg) actions = "actions=%s" % flow_dict.pop('actions') for key, value in flow_dict.items(): if key == 'proto': flow_expr_arr.append(value) else: flow_expr_arr.append("%s=%s" % (key, str(value))) if actions: flow_expr_arr.append(actions) return ','.join(flow_expr_arr) def generate_random_cookie(): # The OpenFlow spec forbids use of -1 return random.randrange(UINT64_BITMASK) def check_cookie_mask(cookie): cookie = str(cookie) if '/' not in cookie: return cookie + '/-1' else: return cookie def is_a_flow_line(line): # this is used to filter out from ovs-ofctl dump-flows the lines that # are not flow descriptions but mere indications of the type of openflow # message that was used ; e.g.: # # # ovs-ofctl dump-flows br-int # NXST_FLOW reply (xid=0x4): # cookie=0xb7dff131a697c6a5, duration=2411726.809s, table=0, ... # cookie=0xb7dff131a697c6a5, duration=2411726.786s, table=23, ... # cookie=0xb7dff131a697c6a5, duration=2411726.760s, table=24, ... # return 'NXST' not in line and 'OFPST' not in line ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/ovsdb_monitor.py0000644000175000017500000001256300000000000024470 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from neutron.agent.common import async_process from neutron.agent.ovsdb import api as ovsdb from neutron.agent.ovsdb.native import helpers from neutron.common import utils LOG = logging.getLogger(__name__) OVSDB_ACTION_INITIAL = 'initial' OVSDB_ACTION_INSERT = 'insert' OVSDB_ACTION_DELETE = 'delete' OVSDB_ACTION_NEW = 'new' class OvsdbMonitor(async_process.AsyncProcess): """Manages an invocation of 'ovsdb-client monitor'.""" def __init__(self, table_name, columns=None, format=None, respawn_interval=None, ovsdb_connection=None): self.table_name = table_name if ovsdb_connection: # if ovsdb connection is configured (e.g. tcp:ip:port), use it, # and there is no need to run as root helpers.enable_connection_uri(ovsdb_connection) cmd = ['ovsdb-client', 'monitor', ovsdb_connection, table_name] run_as_root = False else: cmd = ['ovsdb-client', 'monitor', table_name] run_as_root = True if columns: cmd.append(','.join(columns)) if format: cmd.append('--format=%s' % format) super(OvsdbMonitor, self).__init__(cmd, run_as_root=run_as_root, respawn_interval=respawn_interval, log_output=True, die_on_error=False) self.new_events = {'added': [], 'removed': [], 'modified': []} def get_events(self): self.process_events() events = self.new_events self.new_events = {'added': [], 'removed': [], 'modified': []} return events def start(self, block=False, timeout=5): super(OvsdbMonitor, self).start() if block: utils.wait_until_true(self.is_active) class SimpleInterfaceMonitor(OvsdbMonitor): """Monitors the Interface table of the local host's ovsdb for changes. The has_updates() method indicates whether changes to the ovsdb Interface table have been detected since the monitor started or since the previous access. """ def __init__(self, respawn_interval=None, ovsdb_connection=None): super(SimpleInterfaceMonitor, self).__init__( 'Interface', columns=['name', 'ofport', 'external_ids'], format='json', respawn_interval=respawn_interval, ovsdb_connection=ovsdb_connection ) @property def has_updates(self): """Indicate whether the ovsdb Interface table has been updated. If the monitor process is not active an error will be logged since it won't be able to communicate any update. This situation should be temporary if respawn_interval is set. """ if not self.is_active(): LOG.error("%s monitor is not active", self.table_name) else: self.process_events() return bool(self.new_events['added'] or self.new_events['removed'] or self.new_events['modified']) def process_events(self): devices_added = [] devices_removed = [] devices_modified = [] dev_to_ofport = {} for row in self.iter_stdout(): json = jsonutils.loads(row).get('data') for ovs_id, action, name, ofport, external_ids in json: if external_ids: external_ids = ovsdb.val_to_py(external_ids) if ofport: ofport = ovsdb.val_to_py(ofport) device = {'name': name, 'ofport': ofport, 'external_ids': external_ids} if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT): devices_added.append(device) elif action == OVSDB_ACTION_DELETE: devices_removed.append(device) elif action == OVSDB_ACTION_NEW: # We'll receive this event for "initial", "insert" # and "modify" actions. If ever needed, the old state # can also be included in the processed event as per # https://tools.ietf.org/html/rfc7047#section-4.1.6 if device not in devices_added: devices_modified.append(device) dev_to_ofport[name] = ofport self.new_events['added'].extend(devices_added) self.new_events['removed'].extend(devices_removed) self.new_events['modified'].extend(devices_modified) # update any events with ofports received from 'new' action for event in self.new_events['added']: event['ofport'] = dev_to_ofport.get(event['name'], event['ofport']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/placement_report.py0000644000175000017500000002176100000000000025147 0ustar00coreycorey00000000000000# Copyright 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants as nlib_const from neutron_lib.placement import constants as place_const from neutron_lib.placement import utils as place_utils from oslo_log import log as logging LOG = logging.getLogger(__name__) class DeferredCall(object): '''Store a callable for later calling. This is hardly more than a parameterless lambda, but this way it's much easier to add a __str__ method to help logging. ''' def __init__(self, func, *args, **kwargs): self.func = func self.args = args self.kwargs = kwargs def __str__(self): return '%s(%s)' % ( self.func.__name__, ', '.join([repr(x) for x in self.args] + ['%s=%s' % (k, repr(v)) for k, v in self.kwargs.items()])) def execute(self): return self.func(*self.args, **self.kwargs) class PlacementState(object): '''Represents the desired state of the Placement DB. This represents the state of one Neutron agent and the physical devices handled by it. The sync operation is one-way from Neutron to Placement. The state known by Neutron always overrides what was previously stored in Placement. In order to sync the state known to us on top of another state known to Placement the most generic solution would entail: * Storing state as returned by 'show' methods. * Diffing two state objects and representing the diff results in terms of create/update/delete methods as appropriate. * Maybe having an alternate constructor so we can express the current state known to Placement (and queried by us via 'show' methods) as a PlacementState object. That way we could diff between either two heartbeats following each other, or a heartbeat and Placement contents. Fortunately the Placement API has update methods for many of its resources with create-or-update-all semantics. Therefore we have a chance to make this class simpler and only know about 'update' methods. This also avoids the diffing logic. By ignoring 'delete' here, we leave a few cleanup operations for the admin, that needs to be documented. For example deleting no longer used physnet traits. The methods below return DeferredCall objects containing a code reference to one of the Placement client lib methods plus the arguments to be passed to those methods. So you can just execute() those DeferredCalls when appropriate. ''' def __init__(self, rp_bandwidths, rp_inventory_defaults, driver_uuid_namespace, agent_type, hypervisor_rps, device_mappings, supported_vnic_types, client): self._rp_bandwidths = rp_bandwidths self._rp_inventory_defaults = rp_inventory_defaults self._driver_uuid_namespace = driver_uuid_namespace self._agent_type = agent_type self._hypervisor_rps = hypervisor_rps self._device_mappings = device_mappings self._supported_vnic_types = supported_vnic_types self._client = client def _deferred_update_physnet_traits(self): traits = [] for physnet, devices in self._device_mappings.items(): for device in devices: if device in self._rp_bandwidths: traits.append( DeferredCall( self._client.update_trait, name=place_utils.physnet_trait(physnet))) return traits def _deferred_update_vnic_type_traits(self): traits = [] for vnic_type in self._supported_vnic_types: traits.append( DeferredCall( self._client.update_trait, name=place_utils.vnic_type_trait(vnic_type))) return traits def deferred_update_traits(self): traits = [] traits += self._deferred_update_physnet_traits() traits += self._deferred_update_vnic_type_traits() return traits def _deferred_create_agent_rps(self): # While an instance of this class represents a single agent, # that agent is allowed to handle devices of multiple hypervisors. # Since each hypervisor has its own root resource provider # we must create an agent RP under each hypervisor RP. rps = [] for hypervisor in self._hypervisor_rps.values(): agent_rp_name = '%s:%s' % (hypervisor['name'], self._agent_type) agent_rp_uuid = place_utils.agent_resource_provider_uuid( self._driver_uuid_namespace, hypervisor['name']) rps.append( DeferredCall( self._client.ensure_resource_provider, resource_provider={ 'name': agent_rp_name, 'uuid': agent_rp_uuid, 'parent_provider_uuid': hypervisor['uuid']})) return rps def _deferred_create_device_rps(self): rps = [] for device in self._rp_bandwidths: hypervisor = self._hypervisor_rps[device] rp_name = '%s:%s:%s' % ( hypervisor['name'], self._agent_type, device) rp_uuid = place_utils.device_resource_provider_uuid( self._driver_uuid_namespace, hypervisor['name'], device) rps.append( DeferredCall( self._client.ensure_resource_provider, {'name': rp_name, 'uuid': rp_uuid, 'parent_provider_uuid': hypervisor['uuid']})) return rps def deferred_create_resource_providers(self): agent_rps = self._deferred_create_agent_rps() device_rps = self._deferred_create_device_rps() rps = [] rps.extend(agent_rps) rps.extend(device_rps) return rps def deferred_update_resource_provider_traits(self): rp_traits = [] physnet_trait_mappings = {} for physnet, devices in self._device_mappings.items(): for device in devices: physnet_trait_mappings[device] = place_utils.physnet_trait( physnet) vnic_type_traits = [place_utils.vnic_type_trait(vnic_type) for vnic_type in self._supported_vnic_types] for device in self._rp_bandwidths: rp_uuid = place_utils.device_resource_provider_uuid( self._driver_uuid_namespace, self._hypervisor_rps[device]['name'], device) traits = [] traits.append(physnet_trait_mappings[device]) traits.extend(vnic_type_traits) rp_traits.append( DeferredCall( self._client.update_resource_provider_traits, resource_provider_uuid=rp_uuid, traits=traits)) return rp_traits def deferred_update_resource_provider_inventories(self): rp_inventories = [] for device, bw_values in self._rp_bandwidths.items(): rp_uuid = place_utils.device_resource_provider_uuid( self._driver_uuid_namespace, self._hypervisor_rps[device]['name'], device) inventories = {} for direction, rp_class in ( (nlib_const.EGRESS_DIRECTION, place_const.CLASS_NET_BW_EGRESS_KBPS), (nlib_const.INGRESS_DIRECTION, place_const.CLASS_NET_BW_INGRESS_KBPS)): if bw_values[direction] is not None: inventory = dict(self._rp_inventory_defaults) inventory['total'] = bw_values[direction] inventories[rp_class] = inventory if inventories: rp_inventories.append( DeferredCall( self._client.update_resource_provider_inventories, resource_provider_uuid=rp_uuid, inventories=inventories)) return rp_inventories def deferred_sync(self): state = [] state += self.deferred_update_traits() state += self.deferred_create_resource_providers() state += self.deferred_update_resource_provider_traits() state += self.deferred_update_resource_provider_inventories() return state ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/polling.py0000644000175000017500000000471000000000000023243 0ustar00coreycorey00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import eventlet from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import async_process from neutron.agent.common import base_polling from neutron.agent.common import ovsdb_monitor from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants LOG = logging.getLogger(__name__) @contextlib.contextmanager def get_polling_manager(minimize_polling=False, ovsdb_monitor_respawn_interval=( constants.DEFAULT_OVSDBMON_RESPAWN)): if minimize_polling: pm = InterfacePollingMinimizer( ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval) pm.start() else: pm = base_polling.AlwaysPoll() try: yield pm finally: if minimize_polling: pm.stop() class InterfacePollingMinimizer(base_polling.BasePollingManager): """Monitors ovsdb to determine when polling is required.""" def __init__( self, ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN): super(InterfacePollingMinimizer, self).__init__() self._monitor = ovsdb_monitor.SimpleInterfaceMonitor( respawn_interval=ovsdb_monitor_respawn_interval, ovsdb_connection=cfg.CONF.OVS.ovsdb_connection) def start(self): self._monitor.start(block=True) def stop(self): try: self._monitor.stop() except async_process.AsyncProcessException: LOG.debug("InterfacePollingMinimizer was not running when stopped") def _is_polling_required(self): # Maximize the chances of update detection having a chance to # collect output. eventlet.sleep() return self._monitor.has_updates def get_events(self): return self._monitor.get_events() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/resource_processing_queue.py0000644000175000017500000001555300000000000027075 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import time from oslo_utils import timeutils from oslo_utils import uuidutils from six.moves import queue as Queue class ResourceUpdate(object): """Encapsulates a resource update An instance of this object carries the information necessary to prioritize and process a request to update a resource. Priority values are ordered from higher (0) to lower (>0) by the caller, and are therefore not defined here, but must be done by the consumer. """ def __init__(self, id, priority, action=None, resource=None, timestamp=None, tries=5): self.priority = priority self.timestamp = timestamp if not timestamp: self.timestamp = timeutils.utcnow() self.id = id self.action = action self.resource = resource self.tries = tries # NOTE: Because one resource can be processed multiple times, this # update_id will be used for tracking one resource processing # procedure. self.update_id = uuidutils.generate_uuid() self.create_time = self.start_time = time.time() def set_start_time(self): # Set the start_time to 'now' - can be used by callers to help # track time spent in procedures. self.start_time = time.time() @property def time_elapsed_since_create(self): return time.time() - self.create_time @property def time_elapsed_since_start(self): # Time elapsed between processing start and end. return time.time() - self.start_time def __lt__(self, other): """Implements priority among updates Lower numerical priority always gets precedence. When comparing two updates of the same priority then the one with the earlier timestamp gets precedence. In the unlikely event that the timestamps are also equal it falls back to a simple comparison of ids meaning the precedence is essentially random. """ if self.priority != other.priority: return self.priority < other.priority if self.timestamp != other.timestamp: return self.timestamp < other.timestamp return self.id < other.id def hit_retry_limit(self): return self.tries < 0 class ExclusiveResourceProcessor(object): """Manager for access to a resource for processing This class controls access to a resource in a non-blocking way. The first instance to be created for a given ID is granted exclusive access to the resource. Other instances may be created for the same ID while the first instance has exclusive access. If that happens then it doesn't block and wait for access. Instead, it signals to the master instance that an update came in with the timestamp. This way, a thread will not block to wait for access to a resource. Instead it effectively signals to the thread that is working on the resource that something has changed since it started working on it. That thread will simply finish its current iteration and then repeat. This class keeps track of the last time that resource data was fetched and processed. The timestamp that it keeps must be before when the data used to process the resource last was fetched from the database. But, as close as possible. The timestamp should not be recorded, however, until the resource has been processed using the fetch data. """ _masters = {} _resource_timestamps = {} def __init__(self, id): self._id = id if id not in self._masters: self._masters[id] = self self._queue = Queue.PriorityQueue(-1) self._master = self._masters[id] def _i_am_master(self): return self == self._master def __enter__(self): return self def __exit__(self, type, value, traceback): if self._i_am_master(): del self._masters[self._id] def _get_resource_data_timestamp(self): return self._resource_timestamps.get(self._id, datetime.datetime.min) def fetched_and_processed(self, timestamp): """Records the timestamp after it is used to update the resource""" new_timestamp = max(timestamp, self._get_resource_data_timestamp()) self._resource_timestamps[self._id] = new_timestamp def queue_update(self, update): """Queues an update from a worker This is the queue used to keep new updates that come in while a resource is being processed. These updates have already bubbled to the front of the ResourceProcessingQueue. """ self._master._queue.put(update) def updates(self): """Processes the resource until updates stop coming Only the master instance will process the resource. However, updates may come in from other workers while it is in progress. This method loops until they stop coming. """ while self._i_am_master(): if self._queue.empty(): return # Get the update from the queue even if it is old. update = self._queue.get() # Process the update only if it is fresh. if self._get_resource_data_timestamp() < update.timestamp: yield update class ResourceProcessingQueue(object): """Manager of the queue of resources to process.""" def __init__(self): self._queue = Queue.PriorityQueue() def add(self, update): update.tries -= 1 self._queue.put(update) def each_update_to_next_resource(self): """Grabs the next resource from the queue and processes This method uses a for loop to process the resource repeatedly until updates stop bubbling to the front of the queue. """ next_update = self._queue.get() with ExclusiveResourceProcessor(next_update.id) as rp: # Queue the update whether this worker is the master or not. rp.queue_update(next_update) # Here, if the current worker is not the master, the call to # rp.updates() will not yield and so this will essentially be a # noop. for update in rp.updates(): update.set_start_time() yield (rp, update) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/common/utils.py0000644000175000017500000000633100000000000022740 0ustar00coreycorey00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from neutron.conf.agent import common as config from neutron.conf.agent.database import agents_db if os.name == 'nt': from neutron.agent.windows import utils else: from neutron.agent.linux import utils LOG = logging.getLogger(__name__) config.register_root_helper(cfg.CONF) agents_db.register_db_agents_opts() INTERFACE_NAMESPACE = 'neutron.interface_drivers' create_process = utils.create_process kill_process = utils.kill_process execute = utils.execute get_root_helper_child_pid = utils.get_root_helper_child_pid pid_invoked_with_cmdline = utils.pid_invoked_with_cmdline def load_interface_driver(conf, get_networks_callback=None): """Load interface driver for agents like DHCP or L3 agent. :param conf: Driver configuration object :param get_networks_callback: A callback to get network information. This will be passed as additional keyword argument to the interface driver. :raises SystemExit of 1 if driver cannot be loaded """ try: loaded_class = runtime.load_class_by_alias_or_classname( INTERFACE_NAMESPACE, conf.interface_driver) return loaded_class(conf, get_networks_callback=get_networks_callback) except ImportError: LOG.error("Error loading interface driver '%s'", conf.interface_driver) raise SystemExit(1) def is_agent_down(heart_beat_time): return timeutils.is_older_than(heart_beat_time, cfg.CONF.agent_down_time) # TODO(bence romsics): rehome this to neutron_lib.placement.utils def default_rp_hypervisors(hypervisors, device_mappings): """Fill config option 'resource_provider_hypervisors' with defaults. Default hypervisor names to socket.gethostname(). Since libvirt knows itself by the same name, the default is good for libvirt. :param hypervisors: Config option 'resource_provider_hypervisors' as parsed by oslo.config, that is a dict with keys of physical devices and values of hypervisor names. :param device_mappings: Device mappings standardized to the list-valued format. """ default_hypervisor = socket.gethostname() rv = {} for _physnet, devices in device_mappings.items(): for device in devices: if device in hypervisors: rv[device] = hypervisors[device] else: rv[device] = default_hypervisor return rv ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1950436 neutron-16.0.0.0b2.dev214/neutron/agent/dhcp/0000755000175000017500000000000000000000000020651 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/dhcp/__init__.py0000644000175000017500000000000000000000000022750 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/dhcp/agent.py0000644000175000017500000012733100000000000022330 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import threading import eventlet from neutron_lib.agent import constants as agent_consts from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_utils import fileutils from oslo_utils import importutils from oslo_utils import timeutils import six from neutron._i18n import _ from neutron.agent.common import resource_processing_queue as queue from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.common import utils from neutron import manager LOG = logging.getLogger(__name__) _SYNC_STATE_LOCK = lockutils.ReaderWriterLock() DEFAULT_PRIORITY = 255 DHCP_PROCESS_GREENLET_MAX = 32 DHCP_PROCESS_GREENLET_MIN = 8 DELETED_PORT_MAX_AGE = 86400 DHCP_READY_PORTS_SYNC_MAX = 64 def _sync_lock(f): """Decorator to block all operations for a global sync call.""" @six.wraps(f) def wrapped(*args, **kwargs): with _SYNC_STATE_LOCK.write_lock(): return f(*args, **kwargs) return wrapped def _wait_if_syncing(f): """Decorator to wait if any sync operations are in progress.""" @six.wraps(f) def wrapped(*args, **kwargs): with _SYNC_STATE_LOCK.read_lock(): return f(*args, **kwargs) return wrapped class DhcpAgent(manager.Manager): """DHCP agent service manager. Note that the public methods of this class are exposed as the server side of an rpc interface. The neutron server uses neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the client side to execute the methods here. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ target = oslo_messaging.Target(version='1.0') def __init__(self, host=None, conf=None): super(DhcpAgent, self).__init__(host=host) self.needs_resync_reasons = collections.defaultdict(list) self.dhcp_ready_ports = set() self.dhcp_prio_ready_ports = set() self.conf = conf or cfg.CONF # If 'resync_throttle' is configured more than 'resync_interval' by # mistake, raise exception and log with message. if self.conf.resync_throttle > self.conf.resync_interval: msg = _("DHCP agent must have resync_throttle <= resync_interval") LOG.exception(msg) raise exceptions.InvalidConfigurationOption( opt_name='resync_throttle', opt_value=self.conf.resync_throttle) self._periodic_resync_event = threading.Event() self.cache = NetworkCache() self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, self.conf.host) # create dhcp dir to store dhcp info dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) fileutils.ensure_tree(dhcp_dir, mode=0o755) self.dhcp_version = self.dhcp_driver_cls.check_version() self._populate_networks_cache() # keep track of mappings between networks and routers for # metadata processing self._metadata_routers = {} # {network_id: router_id} self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='dhcp') self._pool_size = DHCP_PROCESS_GREENLET_MIN self._pool = eventlet.GreenPool(size=self._pool_size) self._queue = queue.ResourceProcessingQueue() self._network_bulk_allocations = {} def init_host(self): self.sync_state() def _populate_networks_cache(self): """Populate the networks cache when the DHCP-agent starts.""" try: existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( self.conf ) for net_id in existing_networks: net = dhcp.NetModel({"id": net_id, "subnets": [], "non_local_subnets": [], "ports": []}) self.cache.put(net) except NotImplementedError: # just go ahead with an empty networks cache LOG.debug("The '%s' DHCP-driver does not support retrieving of a " "list of existing networks", self.conf.dhcp_driver) def after_start(self): self.run() LOG.info("DHCP agent started") def run(self): """Activate the DHCP agent.""" self.periodic_resync() self.start_ready_ports_loop() eventlet.spawn_n(self._process_loop) if self.conf.bulk_reload_interval: eventlet.spawn_n(self._reload_bulk_allocations) def _reload_bulk_allocations(self): while True: for network_id in self._network_bulk_allocations.keys(): network = self.cache.get_network_by_id(network_id) self.call_driver('bulk_reload_allocations', network) del self._network_bulk_allocations[network_id] eventlet.greenthread.sleep(self.conf.bulk_reload_interval) def call_driver(self, action, network, **action_kwargs): """Invoke an action on a DHCP driver instance.""" LOG.debug('Calling driver for network: %(net)s action: %(action)s', {'net': network.id, 'action': action}) if self.conf.bulk_reload_interval and action == 'reload_allocations': LOG.debug("Call deferred to bulk load") self._network_bulk_allocations[network.id] = True return True if action == 'bulk_reload_allocations': action = 'reload_allocations' try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self._process_monitor, self.dhcp_version, self.plugin_rpc) getattr(driver, action)(**action_kwargs) return True except exceptions.Conflict: # No need to resync here, the agent will receive the event related # to a status update for the network LOG.debug('Unable to %(action)s dhcp for %(net_id)s: there ' 'is a conflict with its current state; please ' 'check that the network and/or its subnet(s) ' 'still exist.', {'net_id': network.id, 'action': action}) except exceptions.SubnetMismatchForPort as e: # FIXME(kevinbenton): get rid of this once bug/1627480 is fixed LOG.debug("Error configuring DHCP port, scheduling resync: %s", e) self.schedule_resync(e, network.id) except Exception as e: if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure': # Don't resync if port could not be created because of an IP # allocation failure. When the subnet is updated with a new # allocation pool or a port is deleted to free up an IP, this # will automatically be retried on the notification self.schedule_resync(e, network.id) if (isinstance(e, oslo_messaging.RemoteError) and e.exc_type == 'NetworkNotFound' or isinstance(e, exceptions.NetworkNotFound)): LOG.debug("Network %s has been removed from the agent " "or deleted from DB.", network.id) else: LOG.exception('Unable to %(action)s dhcp for %(net_id)s.', {'net_id': network.id, 'action': action}) def schedule_resync(self, reason, network_id=None): """Schedule a resync for a given network and reason. If no network is specified, resync all networks. """ self.needs_resync_reasons[network_id].append(reason) self._periodic_resync_event.set() # Yield to allow other threads that may be ready to run. # This helps prevent one thread from acquiring the same lock over and # over again, in which case no other threads waiting on the # "dhcp-agent" lock would make any progress. eventlet.greenthread.sleep(0) @_sync_lock def sync_state(self, networks=None): """Sync the local DHCP state with Neutron. If no networks are passed, or 'None' is one of the networks, sync all of the networks. """ only_nets = set([] if (not networks or None in networks) else networks) LOG.info('Synchronizing state') pool = eventlet.GreenPool(self.conf.num_sync_threads) known_network_ids = set(self.cache.get_network_ids()) try: active_networks = self.plugin_rpc.get_active_networks_info( enable_dhcp_filter=False) LOG.info('All active networks have been fetched through RPC.') active_network_ids = set(network.id for network in active_networks) for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) except Exception as e: self.schedule_resync(e, deleted_id) LOG.exception('Unable to sync network state on ' 'deleted network %s', deleted_id) for network in active_networks: if (not only_nets or # specifically resync all network.id not in known_network_ids or # missing net network.id in only_nets): # specific network to sync pool.spawn(self.safe_configure_dhcp_for_network, network) pool.waitall() # we notify all ports in case some were created while the agent # was down self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets)) LOG.info('Synchronizing state complete') except Exception as e: if only_nets: for network_id in only_nets: self.schedule_resync(e, network_id) else: self.schedule_resync(e) LOG.exception('Unable to sync network state.') def _dhcp_ready_ports_loop(self): """Notifies the server of any ports that had reservations setup.""" while True: # this is just watching sets so we can do it really frequently eventlet.sleep(0.1) prio_ports_to_send = set() ports_to_send = set() for port_count in range(min(len(self.dhcp_prio_ready_ports) + len(self.dhcp_ready_ports), DHCP_READY_PORTS_SYNC_MAX)): if self.dhcp_prio_ready_ports: prio_ports_to_send.add(self.dhcp_prio_ready_ports.pop()) continue ports_to_send.add(self.dhcp_ready_ports.pop()) if prio_ports_to_send or ports_to_send: try: self.plugin_rpc.dhcp_ready_on_ports(prio_ports_to_send | ports_to_send) LOG.info("DHCP configuration for ports %s is completed", prio_ports_to_send | ports_to_send) continue except oslo_messaging.MessagingTimeout: LOG.error("Timeout notifying server of ports ready. " "Retrying...") except Exception: LOG.exception("Failure notifying DHCP server of " "ready DHCP ports. Will retry on next " "iteration.") self.dhcp_prio_ready_ports |= prio_ports_to_send self.dhcp_ready_ports |= ports_to_send def start_ready_ports_loop(self): """Spawn a thread to push changed ports to server.""" eventlet.spawn(self._dhcp_ready_ports_loop) @utils.exception_logger() def _periodic_resync_helper(self): """Resync the dhcp state at the configured interval and throttle.""" while True: # threading.Event.wait blocks until the internal flag is true. It # returns the internal flag on exit, so it will always return True # except if a timeout is given and the operation times out. if self._periodic_resync_event.wait(self.conf.resync_interval): LOG.debug("Resync event has been scheduled") clear_periodic_resync_event = self._periodic_resync_event.clear # configure throttler for clear_periodic_resync_event to # introduce delays between resync state events. throttled_clear_periodic_resync_event = utils.throttler( self.conf.resync_throttle)(clear_periodic_resync_event) throttled_clear_periodic_resync_event() if self.needs_resync_reasons: # be careful to avoid a race with additions to list # from other threads reasons = self.needs_resync_reasons self.needs_resync_reasons = collections.defaultdict(list) for net, r in reasons.items(): if not net: net = "*" LOG.debug("resync (%(network)s): %(reason)s", {"reason": r, "network": net}) self.sync_state(reasons.keys()) def periodic_resync(self): """Spawn a thread to periodically resync the dhcp state.""" eventlet.spawn(self._periodic_resync_helper) def safe_get_network_info(self, network_id): try: network = self.plugin_rpc.get_network_info(network_id) if not network: LOG.debug('Network %s has been deleted.', network_id) return network except Exception as e: self.schedule_resync(e, network_id) LOG.exception('Network %s info call failed.', network_id) def enable_dhcp_helper(self, network_id): """Enable DHCP for a network that meets enabling criteria.""" network = self.safe_get_network_info(network_id) if network: self.configure_dhcp_for_network(network) @utils.exception_logger() def safe_configure_dhcp_for_network(self, network): try: network_id = network.get('id') LOG.info('Starting network %s dhcp configuration', network_id) self.configure_dhcp_for_network(network) LOG.info('Finished network %s dhcp configuration', network_id) except (exceptions.NetworkNotFound, RuntimeError): LOG.warning('Network %s may have been deleted and ' 'its resources may have already been disposed.', network.id) def configure_dhcp_for_network(self, network): if not network.admin_state_up: return for subnet in network.subnets: if subnet.enable_dhcp: if self.call_driver('enable', network): self.update_isolated_metadata_proxy(network) self.cache.put(network) # After enabling dhcp for network, mark all existing # ports as ready. So that the status of ports which are # created before enabling dhcp can be updated. self.dhcp_ready_ports |= {p.id for p in network.ports} break self._resize_process_pool() def disable_dhcp_helper(self, network_id): """Disable DHCP for a network known to the agent.""" network = self.cache.get_network_by_id(network_id) if network: # NOTE(yamahata): Kill the metadata proxy process # unconditionally, as in the case where a network # is deleted, all the subnets and ports are deleted # before this function is called, so determining if # the proxy should be terminated is error prone. # destroy_monitored_metadata_proxy() is a noop when # there is no process running. self.disable_isolated_metadata_proxy(network) if self.call_driver('disable', network): self.cache.remove(network) self._resize_process_pool() def refresh_dhcp_helper(self, network_id): """Refresh or disable DHCP for a network depending on the current state of the network. """ old_network = self.cache.get_network_by_id(network_id) if not old_network: # DHCP current not running for network. return self.enable_dhcp_helper(network_id) network = self.safe_get_network_info(network_id) if not network: return if not any(s for s in network.subnets if s.enable_dhcp): self.disable_dhcp_helper(network.id) return old_non_local_subnets = getattr(old_network, 'non_local_subnets', []) new_non_local_subnets = getattr(network, 'non_local_subnets', []) old_cidrs = [s.cidr for s in (old_network.subnets + old_non_local_subnets) if s.enable_dhcp] new_cidrs = [s.cidr for s in (network.subnets + new_non_local_subnets) if s.enable_dhcp] if old_cidrs == new_cidrs: self.call_driver('reload_allocations', network) self.cache.put(network) elif self.call_driver('restart', network): self.cache.put(network) # mark all ports as active in case the sync included # new ports that we hadn't seen yet. self.dhcp_ready_ports |= {p.id for p in network.ports} # Update the metadata proxy after the dhcp driver has been updated self.update_isolated_metadata_proxy(network) def network_create_end(self, context, payload): """Handle the network.create.end notification event.""" update = queue.ResourceUpdate(payload['network']['id'], payload.get('priority', DEFAULT_PRIORITY), action='_network_create', resource=payload) self._queue.add(update) @_wait_if_syncing def _network_create(self, payload): network_id = payload['network']['id'] self.enable_dhcp_helper(network_id) def network_update_end(self, context, payload): """Handle the network.update.end notification event.""" update = queue.ResourceUpdate(payload['network']['id'], payload.get('priority', DEFAULT_PRIORITY), action='_network_update', resource=payload) self._queue.add(update) @_wait_if_syncing def _network_update(self, payload): network_id = payload['network']['id'] if payload['network']['admin_state_up']: self.enable_dhcp_helper(network_id) else: self.disable_dhcp_helper(network_id) def network_delete_end(self, context, payload): """Handle the network.delete.end notification event.""" update = queue.ResourceUpdate(payload['network_id'], payload.get('priority', DEFAULT_PRIORITY), action='_network_delete', resource=payload) self._queue.add(update) @_wait_if_syncing def _network_delete(self, payload): network_id = payload['network_id'] self.disable_dhcp_helper(network_id) def subnet_update_end(self, context, payload): """Handle the subnet.update.end notification event.""" update = queue.ResourceUpdate(payload['subnet']['network_id'], payload.get('priority', DEFAULT_PRIORITY), action='_subnet_update', resource=payload) self._queue.add(update) @_wait_if_syncing def _subnet_update(self, payload): network_id = payload['subnet']['network_id'] self.refresh_dhcp_helper(network_id) # Use the update handler for the subnet create event. subnet_create_end = subnet_update_end def _get_network_lock_id(self, payload): """Determine which lock to hold when servicing an RPC event""" # TODO(alegacy): in a future release this function can be removed and # uses of it can be replaced with payload['network_id']. It exists # only to satisfy backwards compatibility between older servers and # newer agents. Once the 'network_id' attribute is guaranteed to be # sent by the server on all *_delete_end events then it can be removed. if 'network_id' in payload: return payload['network_id'] elif 'subnet_id' in payload: subnet_id = payload['subnet_id'] network = self.cache.get_network_by_subnet_id(subnet_id) return network.id if network else None elif 'port_id' in payload: port_id = payload['port_id'] port = self.cache.get_port_by_id(port_id) return port.network_id if port else None def subnet_delete_end(self, context, payload): """Handle the subnet.delete.end notification event.""" network_id = self._get_network_lock_id(payload) if not network_id: return update = queue.ResourceUpdate(network_id, payload.get('priority', DEFAULT_PRIORITY), action='_subnet_delete', resource=payload) self._queue.add(update) @_wait_if_syncing def _subnet_delete(self, payload): network_id = self._get_network_lock_id(payload) if not network_id: return subnet_id = payload['subnet_id'] network = self.cache.get_network_by_subnet_id(subnet_id) if not network: return self.refresh_dhcp_helper(network.id) @lockutils.synchronized('resize_greenpool') def _resize_process_pool(self): num_nets = len(self.cache.get_network_ids()) pool_size = max([DHCP_PROCESS_GREENLET_MIN, min([DHCP_PROCESS_GREENLET_MAX, num_nets])]) if pool_size == self._pool_size: return LOG.info("Resizing dhcp processing queue green pool size to: %d", pool_size) self._pool.resize(pool_size) self._pool_size = pool_size def _process_loop(self): LOG.debug("Starting _process_loop") while True: self._pool.spawn_n(self._process_resource_update) def _process_resource_update(self): for tmp, update in self._queue.each_update_to_next_resource(): method = getattr(self, update.action) method(update.resource) def port_update_end(self, context, payload): """Handle the port.update.end notification event.""" updated_port = dhcp.DictModel(payload['port']) if self.cache.is_port_message_stale(updated_port): LOG.debug("Discarding stale port update: %s", updated_port) return update = queue.ResourceUpdate(updated_port.network_id, payload.get('priority', DEFAULT_PRIORITY), action='_port_update', resource=updated_port) self._queue.add(update) @_wait_if_syncing def _port_update(self, updated_port): if self.cache.is_port_message_stale(updated_port): LOG.debug("Discarding stale port update: %s", updated_port) return network = self.cache.get_network_by_id(updated_port.network_id) if not network: return # treat update as a create event self.reload_allocations(updated_port, network, prio=True) def reload_allocations(self, port, network, prio=False): LOG.info("Trigger reload_allocations for port %s", port) driver_action = 'reload_allocations' if self._is_port_on_this_agent(port): orig = self.cache.get_port_by_id(port['id']) # assume IP change if not in cache orig = orig or {'fixed_ips': []} old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []} new_ips = {i['ip_address'] for i in port['fixed_ips']} old_subs = {i['subnet_id'] for i in orig['fixed_ips'] or []} new_subs = {i['subnet_id'] for i in port['fixed_ips']} if new_subs != old_subs: # subnets being serviced by port have changed, this could # indicate a subnet_delete is in progress. schedule a # resync rather than an immediate restart so we don't # attempt to re-allocate IPs at the same time the server # is deleting them. self.schedule_resync("Agent port was modified", port.network_id) return elif old_ips != new_ips: LOG.debug("Agent IPs on network %s changed from %s to %s", network.id, old_ips, new_ips) driver_action = 'restart' self.cache.put_port(port) self.call_driver(driver_action, network) if prio: self.dhcp_prio_ready_ports.add(port.id) else: self.dhcp_ready_ports.add(port.id) self.update_isolated_metadata_proxy(network) def _is_port_on_this_agent(self, port): thishost = utils.get_dhcp_agent_device_id( port['network_id'], self.conf.host) return port['device_id'] == thishost def port_create_end(self, context, payload): """Handle the port.create.end notification event.""" created_port = dhcp.DictModel(payload['port']) update = queue.ResourceUpdate(created_port.network_id, payload.get('priority', DEFAULT_PRIORITY), action='_port_create', resource=created_port) self._queue.add(update) @_wait_if_syncing def _port_create(self, created_port): network = self.cache.get_network_by_id(created_port.network_id) if not network: return new_ips = {i['ip_address'] for i in created_port['fixed_ips']} for port_cached in network.ports: # if in the same network there are ports cached with the same # ip address but different MAC address and/or different id, # this indicate that the cache is out of sync cached_ips = {i['ip_address'] for i in port_cached['fixed_ips']} if (new_ips.intersection(cached_ips) and (created_port['id'] != port_cached['id'] or created_port['mac_address'] != port_cached['mac_address'])): self.schedule_resync("Duplicate IP addresses found, " "DHCP cache is out of sync", created_port.network_id) return self.reload_allocations(created_port, network, prio=True) def port_delete_end(self, context, payload): """Handle the port.delete.end notification event.""" network_id = self._get_network_lock_id(payload) if not network_id: return update = queue.ResourceUpdate(network_id, payload.get('priority', DEFAULT_PRIORITY), action='_port_delete', resource=payload) self._queue.add(update) @_wait_if_syncing def _port_delete(self, payload): network_id = self._get_network_lock_id(payload) if not network_id: return port_id = payload['port_id'] port = self.cache.get_port_by_id(port_id) self.cache.add_to_deleted_ports(port_id) if not port: return network = self.cache.get_network_by_id(port.network_id) self.cache.remove_port(port) if self._is_port_on_this_agent(port): # the agent's port has been deleted. disable the service # and add the network to the resync list to create # (or acquire a reserved) port. self.call_driver('disable', network) self.schedule_resync("Agent port was deleted", port.network_id) else: self.call_driver('reload_allocations', network) self.update_isolated_metadata_proxy(network) def update_isolated_metadata_proxy(self, network): """Spawn or kill metadata proxy. According to return from driver class, spawn or kill the metadata proxy process. Spawn an existing metadata proxy or kill a nonexistent metadata proxy will just silently return. """ should_enable_metadata = self.dhcp_driver_cls.should_enable_metadata( self.conf, network) if should_enable_metadata: self.enable_isolated_metadata_proxy(network) else: self.disable_isolated_metadata_proxy(network) def enable_isolated_metadata_proxy(self, network): # The proxy might work for either a single network # or all the networks connected via a router # to the one passed as a parameter kwargs = {'network_id': network.id} # When the metadata network is enabled, the proxy might # be started for the router attached to the network if self.conf.enable_metadata_network: router_ports = [port for port in network.ports if (port.device_owner in constants.ROUTER_INTERFACE_OWNERS)] if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: LOG.warning("%(port_num)d router ports found on the " "metadata access network. Only the port " "%(port_id)s, for router %(router_id)s " "will be considered", {'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id}) all_subnets = self.dhcp_driver_cls._get_all_subnets(network) if self.dhcp_driver_cls.has_metadata_subnet(all_subnets): kwargs = {'router_id': router_ports[0].device_id} self._metadata_routers[network.id] = ( router_ports[0].device_id) metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( self._process_monitor, network.namespace, dhcp.METADATA_PORT, self.conf, bind_address=dhcp.METADATA_DEFAULT_IP, **kwargs) def disable_isolated_metadata_proxy(self, network): if (self.conf.enable_metadata_network and network.id in self._metadata_routers): uuid = self._metadata_routers[network.id] is_router_id = True else: uuid = network.id is_router_id = False metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy( self._process_monitor, uuid, self.conf, network.namespace) if is_router_id: del self._metadata_routers[network.id] class DhcpPluginApi(object): """Agent side of the dhcp rpc API. This class implements the client side of an rpc interface. The server side of this interface can be found in neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, and update_dhcp_port methods. 1.5 - Added dhcp_ready_on_ports 1.7 - Added get_networks 1.8 - Added get_dhcp_port """ def __init__(self, topic, host): self.host = host target = oslo_messaging.Target( topic=topic, namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) @property def context(self): # TODO(kevinbenton): the context should really be passed in to each of # these methods so a call can be tracked all of the way through the # system but that will require a larger refactor to pass the context # everywhere. We just generate a new one here on each call so requests # can be independently tracked server side. return context.get_admin_context_without_session() def get_active_networks_info(self, **kwargs): """Make a remote process call to retrieve all network info.""" cctxt = self.client.prepare(version='1.1') networks = cctxt.call(self.context, 'get_active_networks_info', host=self.host, **kwargs) return [dhcp.NetModel(n) for n in networks] def get_network_info(self, network_id): """Make a remote process call to retrieve network info.""" cctxt = self.client.prepare() network = cctxt.call(self.context, 'get_network_info', network_id=network_id, host=self.host) if network: return dhcp.NetModel(network) def create_dhcp_port(self, port): """Make a remote process call to create the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'create_dhcp_port', port=port, host=self.host) if port: return dhcp.DictModel(port) def update_dhcp_port(self, port_id, port): """Make a remote process call to update the dhcp port.""" cctxt = self.client.prepare(version='1.1') port = cctxt.call(self.context, 'update_dhcp_port', port_id=port_id, port=port, host=self.host) if port: return dhcp.DictModel(port) def release_dhcp_port(self, network_id, device_id): """Make a remote process call to release the dhcp port.""" cctxt = self.client.prepare() return cctxt.call(self.context, 'release_dhcp_port', network_id=network_id, device_id=device_id, host=self.host) def get_dhcp_port(self, port_id): """Make a remote process call to retrieve the dhcp port.""" cctxt = self.client.prepare(version='1.8') port = cctxt.call(self.context, 'get_dhcp_port', port_id=port_id) if port: return dhcp.DictModel(port) def dhcp_ready_on_ports(self, port_ids): """Notify the server that DHCP is configured for the port.""" cctxt = self.client.prepare(version='1.5') return cctxt.call(self.context, 'dhcp_ready_on_ports', port_ids=port_ids) def get_networks(self, filters=None, fields=None): """Get networks. :param filters: The filters to apply. E.g {"id" : ["", ...]} :param fields: A list of fields to collect, e.g ["id", "subnets"]. :return: A list of NetModel where each object represent a network. """ cctxt = self.client.prepare(version='1.7') nets = cctxt.call(self.context, 'get_networks', filters=filters, fields=fields) return [dhcp.NetModel(net) for net in nets] class NetworkCache(object): """Agent cache of the current network state.""" def __init__(self): self.cache = {} self.subnet_lookup = {} self.port_lookup = {} self._deleted_ports = set() self._deleted_ports_ts = [] self.cleanup_loop = loopingcall.FixedIntervalLoopingCall( self.cleanup_deleted_ports) self.cleanup_loop.start(DELETED_PORT_MAX_AGE, initial_delay=DELETED_PORT_MAX_AGE) def is_port_message_stale(self, payload): orig = self.get_port_by_id(payload['id']) or {} if orig.get('revision_number', 0) > payload.get('revision_number', 0): return True if payload['id'] in self._deleted_ports: return True return False def get_port_ids(self, network_ids=None): if not network_ids: return self.port_lookup.keys() return (p_id for p_id, net in self.port_lookup.items() if net in network_ids) def get_network_ids(self): return self.cache.keys() def get_network_by_id(self, network_id): return self.cache.get(network_id) def get_network_by_subnet_id(self, subnet_id): return self.cache.get(self.subnet_lookup.get(subnet_id)) def get_network_by_port_id(self, port_id): return self.cache.get(self.port_lookup.get(port_id)) def put(self, network): if network.id in self.cache: self.remove(self.cache[network.id]) self.cache[network.id] = network non_local_subnets = getattr(network, 'non_local_subnets', []) for subnet in (network.subnets + non_local_subnets): self.subnet_lookup[subnet.id] = network.id for port in network.ports: self.port_lookup[port.id] = network.id def remove(self, network): del self.cache[network.id] non_local_subnets = getattr(network, 'non_local_subnets', []) for subnet in (network.subnets + non_local_subnets): del self.subnet_lookup[subnet.id] for port in network.ports: del self.port_lookup[port.id] def put_port(self, port): network = self.get_network_by_id(port.network_id) for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) self.port_lookup[port.id] = network.id def remove_port(self, port): network = self.get_network_by_port_id(port.id) for index in range(len(network.ports)): if network.ports[index] == port: del network.ports[index] del self.port_lookup[port.id] break def get_port_by_id(self, port_id): network = self.get_network_by_port_id(port_id) if network: for port in network.ports: if port.id == port_id: return port def get_state(self): net_ids = self.get_network_ids() num_nets = len(net_ids) num_subnets = 0 num_ports = 0 for net_id in net_ids: network = self.get_network_by_id(net_id) non_local_subnets = getattr(network, 'non_local_subnets', []) num_subnets += len(network.subnets) num_subnets += len(non_local_subnets) num_ports += len(network.ports) return {'networks': num_nets, 'subnets': num_subnets, 'ports': num_ports} def add_to_deleted_ports(self, port_id): if port_id not in self._deleted_ports: self._deleted_ports.add(port_id) self._deleted_ports_ts.append((timeutils.utcnow_ts(), port_id)) def cleanup_deleted_ports(self): """Cleanup the "self._deleted_ports" set based on the current TS The variable "self._deleted_ports_ts" contains a timestamp ordered list of tuples (timestamp, port_id). Every port older than the current timestamp minus "timestamp_delta" will be deleted from "self._deleted_ports" and "self._deleted_ports_ts". """ timestamp_min = timeutils.utcnow_ts() - DELETED_PORT_MAX_AGE idx = None for idx, (ts, port_id) in enumerate(self._deleted_ports_ts): if ts > timestamp_min: break self._deleted_ports.remove(port_id) if idx: self._deleted_ports_ts = self._deleted_ports_ts[idx:] class DhcpAgentWithStateReport(DhcpAgent): def __init__(self, host=None, conf=None): super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { 'binary': 'neutron-dhcp-agent', 'host': host, 'availability_zone': self.conf.AGENT.availability_zone, 'topic': topics.DHCP_AGENT, 'configurations': { 'dhcp_driver': self.conf.dhcp_driver, 'dhcp_lease_duration': self.conf.dhcp_lease_duration, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_DHCP} report_interval = self.conf.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() agent_status = self.state_rpc.report_state( ctx, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info("Agent has just been revived. " "Scheduling full sync") self.schedule_resync("Agent has just been revived") except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() self.run() return except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") if self.agent_state.pop('start_flag', None): self.run() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.schedule_resync(_("Agent updated: %(payload)s") % {"payload": payload}) LOG.info("agent_updated by server side %s!", payload) def after_start(self): LOG.info("DHCP agent started") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/dhcp_agent.py0000644000175000017500000000360600000000000022406 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib.agent import topics from oslo_config import cfg from oslo_service import service from neutron.common import config as common_config from neutron.conf.agent import common as config from neutron.conf.agent import dhcp as dhcp_config from neutron.conf.agent.metadata import config as meta_conf from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron import service as neutron_service def register_options(conf): config.register_interface_driver_opts_helper(conf) config.register_agent_state_opts_helper(conf) config.register_availability_zone_opts_helper(conf) dhcp_config.register_agent_dhcp_opts(conf) meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, conf) config.register_interface_opts(conf) config.register_root_helper(conf) ovs_conf.register_ovs_opts(conf) def main(): register_options(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() config.setup_privsep() server = neutron_service.Service.create( binary='neutron-dhcp-agent', topic=topics.DHCP_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport') service.launch(cfg.CONF, server, restart_method='mutate').wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/firewall.py0000644000175000017500000001456400000000000022124 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib import six from neutron_lib.api.definitions import port_security as psec from neutron_lib import constants as n_const from neutron_lib.utils import runtime DIRECTION_IP_PREFIX = {n_const.INGRESS_DIRECTION: 'source_ip_prefix', n_const.EGRESS_DIRECTION: 'dest_ip_prefix'} # List of ICMPv6 types that should be permitted (ingress) by default. This list # depends on iptables conntrack behavior of recognizing ICMP errors (types 1-4) # as related traffic. ICMPV6_ALLOWED_INGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY, n_const.ICMPV6_TYPE_NS, n_const.ICMPV6_TYPE_NA) # List of ICMPv6 types that should be permitted (egress) by default. ICMPV6_ALLOWED_EGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY, n_const.ICMPV6_TYPE_RS, n_const.ICMPV6_TYPE_NS, n_const.ICMPV6_TYPE_NA) def port_sec_enabled(port): return port.get(psec.PORTSECURITY, True) def load_firewall_driver_class(driver): return runtime.load_class_by_alias_or_classname( 'neutron.agent.firewall_drivers', driver) @six.add_metaclass(abc.ABCMeta) class FirewallDriver(object): """Firewall Driver base class. Defines methods that any driver providing security groups and provider firewall functionality should implement. Note port attribute should have information of security group ids and security group rules. the dict of port should have device : interface name fixed_ips: ips of the device mac_address: mac_address of the device security_groups: [sgid, sgid] security_group_rules : [ rule, rule ] the rule must contain ethertype and direction the rule may contain security_group_id, protocol, port_min, port_max source_ip_prefix, source_port_min, source_port_max, dest_ip_prefix, and remote_group_id Note: source_group_ip in REST API should be converted by this rule if direction is ingress: remote_group_ip will be a source_ip_prefix if direction is egress: remote_group_ip will be a dest_ip_prefix Note: remote_group_id in REST API should be converted by this rule if direction is ingress: remote_group_id will be a list of source_ip_prefix if direction is egress: remote_group_id will be a list of dest_ip_prefix remote_group_id will also remaining membership update management """ # OVS agent installs arp spoofing openflow rules. If firewall is capable # of handling that, ovs agent doesn't need to install the protection. provides_arp_spoofing_protection = False @abc.abstractmethod def prepare_port_filter(self, port): """Prepare filters for the port. This method should be called before the port is created. """ def apply_port_filter(self, port): """Apply port filter. Once this method returns, the port should be firewalled appropriately. This method should as far as possible be a no-op. It's vastly preferred to get everything set up in prepare_port_filter. """ raise NotImplementedError() @abc.abstractmethod def update_port_filter(self, port): """Refresh security group rules from data store Gets called when a port gets added to or removed from the security group the port is a member of or if the group gains or looses a rule. """ def remove_port_filter(self, port): """Stop filtering port.""" raise NotImplementedError() def filter_defer_apply_on(self): """Defer application of filtering rule.""" pass def filter_defer_apply_off(self): """Turn off deferral of rules and apply the rules now.""" pass @property def ports(self): """Returns filtered ports.""" pass @contextlib.contextmanager def defer_apply(self): """Defer apply context.""" self.filter_defer_apply_on() try: yield finally: self.filter_defer_apply_off() def update_security_group_members(self, sg_id, ips): """Update group members in a security group.""" raise NotImplementedError() def update_security_group_rules(self, sg_id, rules): """Update rules in a security group.""" raise NotImplementedError() def security_group_updated(self, action_type, sec_group_ids, device_id=None): """Called when a security group is updated. Note: This method needs to be implemented by the firewall drivers which use enhanced RPC for security_groups. """ raise NotImplementedError() def process_trusted_ports(self, port_ids): """Process ports that are trusted and shouldn't be filtered.""" pass def remove_trusted_ports(self, port_ids): pass class NoopFirewallDriver(FirewallDriver): """Noop Firewall Driver. Firewall driver which does nothing. This driver is for disabling the firewall functionality. """ def prepare_port_filter(self, port): pass def apply_port_filter(self, port): pass def update_port_filter(self, port): pass def remove_port_filter(self, port): pass def filter_defer_apply_on(self): pass def filter_defer_apply_off(self): pass @property def ports(self): return {} def update_security_group_members(self, sg_id, ips): pass def update_security_group_rules(self, sg_id, rules): pass def security_group_updated(self, action_type, sec_group_ids, device_id=None): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1950436 neutron-16.0.0.0b2.dev214/neutron/agent/l2/0000755000175000017500000000000000000000000020250 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/__init__.py0000644000175000017500000000000000000000000022347 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1950436 neutron-16.0.0.0b2.dev214/neutron/agent/l2/extensions/0000755000175000017500000000000000000000000022447 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/extensions/__init__.py0000644000175000017500000000000000000000000024546 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/extensions/fdb_population.py0000644000175000017500000001704400000000000026034 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib.agent import l2_extension from neutron_lib import constants from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging from neutron.agent.linux import bridge_lib from neutron.conf.agent import l2_ext_fdb_population from neutron.plugins.ml2.drivers.linuxbridge.agent.common import ( constants as linux_bridge_constants) from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as ovs_constants) l2_ext_fdb_population.register_fdb_population_opts() LOG = logging.getLogger(__name__) class FdbPopulationAgentExtension( l2_extension.L2AgentExtension): """The FDB population is an agent extension to OVS or linux bridge who's objective is to update the FDB table for existing instance using normal port, thus enabling communication between SR-IOV instances and normal instances. Additional information describing the problem can be found here: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConJapan2014_makita_0.pdf """ # FDB udpates are triggered for ports with a certain device_owner only: # - device owner "compute": updates the FDB with normal port instances, # required in order to enable communication between # SR-IOV direct port instances and normal port instance. # - device owner "router_interface": updates the FDB with OVS/LB ports, # required in order to enable communication for SR-IOV instances # with floating ip that are located with the network node. # - device owner "DHCP": updates the FDB with the dhcp server. # When the lease expires a unicast renew message is sent # to the dhcp server. In case the FDB is not updated # the message will be sent to the wire, causing the message # to get lost in case the sender uses direct port and is # located on the same hypervisor as the network node. PERMITTED_DEVICE_OWNERS = {constants.DEVICE_OWNER_COMPUTE_PREFIX, constants.DEVICE_OWNER_ROUTER_INTF, constants.DEVICE_OWNER_DHCP} class FdbTableTracker(object): """FDB table tracker is a helper class intended to keep track of the existing FDB rules. """ def __init__(self, devices): self.device_to_macs = {} self.portid_to_mac = {} # update macs already in the physical interface's FDB table for device in devices: try: _stdout = bridge_lib.FdbInterface.show(device) except RuntimeError as e: LOG.warning( 'Unable to find FDB Interface %(device)s. ' 'Exception: %(e)s', {'device': device, 'e': e}) continue self.device_to_macs[device] = _stdout.split()[::3] def update_port(self, device, port_id, mac): # check if device is updated if self.device_to_macs.get(device) == mac: return # delete invalid port_id's mac from the FDB, # in case the port was updated to another mac self.delete_port([device], port_id) # update port id self.portid_to_mac[port_id] = mac # check if rule for mac already exists if mac in self.device_to_macs[device]: return try: bridge_lib.FdbInterface.add(mac, device) except RuntimeError as e: LOG.warning( 'Unable to add mac %(mac)s ' 'to FDB Interface %(device)s. ' 'Exception: %(e)s', {'mac': mac, 'device': device, 'e': e}) return self.device_to_macs[device].append(mac) def delete_port(self, devices, port_id): mac = self.portid_to_mac.get(port_id) if mac is None: LOG.warning('Port Id %(port_id)s does not have a rule for ' 'devices %(devices)s in FDB table', {'port_id': port_id, 'devices': devices}) return for device in devices: if mac in self.device_to_macs[device]: try: bridge_lib.FdbInterface.delete(mac, device) except RuntimeError as e: LOG.warning( 'Unable to delete mac %(mac)s ' 'from FDB Interface %(device)s. ' 'Exception: %(e)s', {'mac': mac, 'device': device, 'e': e}) return self.device_to_macs[device].remove(mac) del self.portid_to_mac[port_id] # class FdbPopulationAgentExtension implementation: def initialize(self, connection, driver_type): """Perform FDB Agent Extension initialization.""" valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE, ovs_constants.EXTENSION_DRIVER_TYPE) if driver_type not in valid_driver_types: LOG.error('FDB extension is only supported for OVS and ' 'linux bridge agent, currently uses ' '%(driver_type)s', {'driver_type': driver_type}) sys.exit(1) self.device_mappings = helpers.parse_mappings( cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) devices = self._get_devices() if not devices: LOG.error('Invalid configuration provided for FDB extension: ' 'no physical devices') sys.exit(1) self.fdb_tracker = self.FdbTableTracker(devices) def handle_port(self, context, details): """Handle agent FDB population extension for port.""" device_owner = details['device_owner'] if self._is_valid_device_owner(device_owner): mac = details['mac_address'] port_id = details['port_id'] physnet = details.get('physical_network') if physnet and physnet in self.device_mappings: for device in self.device_mappings[physnet]: self.fdb_tracker.update_port(device, port_id, mac) def delete_port(self, context, details): """Delete port from FDB population extension.""" port_id = details['port_id'] devices = self._get_devices() self.fdb_tracker.delete_port(devices, port_id) def _get_devices(self): def _flatten_list(l): return [item for sublist in l for item in sublist] return _flatten_list(self.device_mappings.values()) def _is_valid_device_owner(self, device_owner): for permitted_device_owner in self.PERMITTED_DEVICE_OWNERS: if device_owner.startswith(permitted_device_owner): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/extensions/qos.py0000644000175000017500000002760700000000000023637 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from neutron_lib.agent import l2_extension from neutron_lib import constants from neutron_lib.services.qos import constants as qos_consts from oslo_concurrency import lockutils from oslo_log import log as logging import six from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron import manager LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class QosAgentDriver(object): """Defines stable abstract interface for QoS Agent Driver. QoS Agent driver defines the interface to be implemented by Agent for applying QoS Rules on a port. """ # Each QoS driver should define the set of rule types that it supports, and # corresponding handlers that has the following names: # # create_ # update_ # delete_ # # where is one of VALID_RULE_TYPES # There is exception from this rule for deletion of rules with # attribute direction set to ingress (e.g. bandwidth limit rule). # For deletion of such rule types delete handler has following name: # delete__ingress @abc.abstractmethod def initialize(self): """Perform QoS agent driver initialization. """ def create(self, port, qos_policy): """Apply QoS rules on port for the first time. :param port: port object. :param qos_policy: the QoS policy to be applied on port. """ self._handle_update_create_rules('create', port, qos_policy) def consume_api(self, agent_api): """Consume the AgentAPI instance from the QoSAgentExtension class This allows QosAgentDrivers to gain access to resources limited to the NeutronAgent when this method is overridden. :param agent_api: An instance of an agent specific API """ def update(self, port, qos_policy): """Apply QoS rules on port. :param port: port object. :param qos_policy: the QoS policy to be applied on port. """ self._handle_update_create_rules('update', port, qos_policy) def delete(self, port, qos_policy=None): """Remove QoS rules from port. :param port: port object. :param qos_policy: the QoS policy to be removed from port. """ if qos_policy is None: for rule_type in self.SUPPORTED_RULES: self._handle_rule_delete(port, rule_type) if self._rule_type_has_ingress_direction(rule_type): self._handle_rule_delete(port, rule_type, ingress=True) else: for rule in self._iterate_rules(qos_policy.rules): self._handle_rule_delete( port, rule.rule_type, ingress=self._rule_is_ingress_direction(rule)) def _iterate_rules(self, rules): for rule in rules: rule_type = rule.rule_type if rule_type in self.SUPPORTED_RULES: yield rule else: LOG.warning('Unsupported QoS rule type for %(rule_id)s: ' '%(rule_type)s; skipping', {'rule_id': rule.id, 'rule_type': rule_type}) def _handle_rule_delete(self, port, rule_type, ingress=False): handler_name = "".join(("delete_", rule_type)) if ingress: handler_name = "%s_%s" % (handler_name, constants.INGRESS_DIRECTION) handler = getattr(self, handler_name) handler(port) def _handle_update_create_rules(self, action, port, qos_policy): for rule in self._iterate_rules(qos_policy.rules): if rule.should_apply_to_port(port): handler_name = "".join((action, "_", rule.rule_type)) handler = getattr(self, handler_name) handler(port, rule) else: LOG.debug("Port %(port)s excluded from QoS rule %(rule)s", {'port': port, 'rule': rule.id}) def _rule_type_has_ingress_direction(self, rule_type): supported_rule = self.SUPPORTED_RULES[rule_type] if qos_consts.DIRECTION not in supported_rule.keys(): return False return (constants.INGRESS_DIRECTION in supported_rule[qos_consts.DIRECTION]['type:values']) def _rule_is_ingress_direction(self, rule): rule_direction = getattr(rule, "direction", constants.EGRESS_DIRECTION) return rule_direction == constants.INGRESS_DIRECTION class PortPolicyMap(object): def __init__(self): # we cannot use a dict of sets here because port dicts are not hashable self.qos_policy_ports = collections.defaultdict(dict) self.known_policies = {} self.port_policies = {} def get_ports(self, policy): return self.qos_policy_ports[policy.id].values() def get_policy(self, policy_id): return self.known_policies.get(policy_id) def update_policy(self, policy): self.known_policies[policy.id] = policy def has_policy_changed(self, port, policy_id): return self.port_policies.get(port['port_id']) != policy_id def get_port_policy(self, port): policy_id = self.port_policies.get(port['port_id']) if policy_id: return self.get_policy(policy_id) def set_port_policy(self, port, policy): """Attach a port to policy and return any previous policy on port.""" port_id = port['port_id'] old_policy = self.get_port_policy(port) self.known_policies[policy.id] = policy self.port_policies[port_id] = policy.id self.qos_policy_ports[policy.id][port_id] = port if old_policy and old_policy.id != policy.id: del self.qos_policy_ports[old_policy.id][port_id] return old_policy def clean_by_port(self, port): """Detach port from policy and cleanup data we don't need anymore.""" port_id = port['port_id'] if port_id in self.port_policies: del self.port_policies[port_id] for qos_policy_id, port_dict in self.qos_policy_ports.items(): if port_id in port_dict: del port_dict[port_id] if not port_dict: self._clean_policy_info(qos_policy_id) return LOG.debug("QoS extension did not have information on port %s", port_id) def _clean_policy_info(self, qos_policy_id): del self.qos_policy_ports[qos_policy_id] del self.known_policies[qos_policy_id] class QosAgentExtension(l2_extension.L2AgentExtension): SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY] def initialize(self, connection, driver_type): """Initialize agent extension.""" self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self.qos_driver = manager.NeutronManager.load_class_for_provider( 'neutron.qos.agent_drivers', driver_type)() self.qos_driver.consume_api(self.agent_api) self.qos_driver.initialize() self.policy_map = PortPolicyMap() self._register_rpc_consumers(connection) def consume_api(self, agent_api): """Allows an extension to gain access to resources internal to the neutron agent and otherwise unavailable to the extension. """ self.agent_api = agent_api def _register_rpc_consumers(self, connection): """Allows an extension to receive notifications of updates made to items of interest. """ endpoints = [resources_rpc.ResourcesPushRpcCallback()] for resource_type in self.SUPPORTED_RESOURCE_TYPES: # We assume that the neutron server always broadcasts the latest # version known to the agent registry.register(self._handle_notification, resource_type) topic = resources_rpc.resource_type_versioned_topic(resource_type) connection.create_consumer(topic, endpoints, fanout=True) @lockutils.synchronized('qos-port') def _handle_notification(self, context, resource_type, qos_policies, event_type): # server does not allow to remove a policy that is attached to any # port, so we ignore DELETED events. Also, if we receive a CREATED # event for a policy, it means that there are no ports so far that are # attached to it. That's why we are interested in UPDATED events only if event_type == events.UPDATED: for qos_policy in qos_policies: self._process_update_policy(qos_policy) @lockutils.synchronized('qos-port') def handle_port(self, context, port): """Handle agent QoS extension for port. This method applies a new policy to a port using the QoS driver. Update events are handled in _handle_notification. """ port_id = port['port_id'] port_qos_policy_id = port.get('qos_policy_id') network_qos_policy_id = port.get('network_qos_policy_id') qos_policy_id = port_qos_policy_id or network_qos_policy_id if qos_policy_id is None: self._process_reset_port(port) return if not self.policy_map.has_policy_changed(port, qos_policy_id): return qos_policy = self.policy_map.get_policy( qos_policy_id) or self.resource_rpc.pull( context, resources.QOS_POLICY, qos_policy_id) if qos_policy is None: LOG.info("QoS policy %(qos_policy_id)s applied to port " "%(port_id)s is not available on server, " "it has been deleted. Skipping.", {'qos_policy_id': qos_policy_id, 'port_id': port_id}) self._process_reset_port(port) else: old_qos_policy = self.policy_map.set_port_policy(port, qos_policy) # Before applying the new rules, the old rules should be cleared, # even if the old_qos_policy is None, # to avoid the data being out of sync before the l2-agent starts. self.qos_driver.delete(port, old_qos_policy) if qos_policy.rules: self.qos_driver.create(port, qos_policy) def delete_port(self, context, port): self._process_reset_port(port) def _policy_rules_modified(self, old_policy, policy): return not (len(old_policy.rules) == len(policy.rules) and all(i in old_policy.rules for i in policy.rules)) def _process_update_policy(self, qos_policy): old_qos_policy = self.policy_map.get_policy(qos_policy.id) if old_qos_policy: if self._policy_rules_modified(old_qos_policy, qos_policy): for port in self.policy_map.get_ports(qos_policy): # NOTE(QoS): for now, just reflush the rules on the port. # Later, we may want to apply the difference # between the old and new rule lists. self.qos_driver.delete(port, old_qos_policy) self.qos_driver.update(port, qos_policy) self.policy_map.update_policy(qos_policy) def _process_reset_port(self, port): self.policy_map.clean_by_port(port) self.qos_driver.delete(port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/extensions/qos_linux.py0000644000175000017500000000220600000000000025042 0ustar00coreycorey00000000000000# Copyright (c) 2017 Cloudbase Solutions # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l2.extensions import qos from neutron.agent.linux import tc_lib class QosLinuxAgentDriver(qos.QosAgentDriver): def _get_egress_burst_value(self, rule): """Return burst value used for egress bandwidth limitation. Because Egress bw_limit is done on ingress qdisc by LB and ovs drivers so it will return burst_value used by tc on as ingress_qdisc. """ return tc_lib.TcCommand.get_ingress_qdisc_burst_value( rule.max_kbps, rule.max_burst_kbps) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l2/l2_agent_extensions_manager.py0000644000175000017500000000440500000000000026271 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron.agent import agent_extensions_manager as agent_ext_manager from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config LOG = log.getLogger(__name__) L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions' def register_opts(conf): agent_ext_mgr_config.register_agent_ext_manager_opts(conf) class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): """Manage l2 agent extensions. The handle_port and delete_port methods are guaranteed to be attributes of each extension because they have been marked as abc.abstractmethod in the extensions' abstract class. """ def __init__(self, conf): super(L2AgentExtensionsManager, self).__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE) def handle_port(self, context, data): """Notify all agent extensions to handle port.""" for extension in self: if hasattr(extension.obj, 'handle_port'): extension.obj.handle_port(context, data) else: LOG.error( "Agent Extension '%(name)s' does not " "implement method handle_port", {'name': extension.name} ) def delete_port(self, context, data): """Notify all agent extensions to delete port.""" for extension in self: if hasattr(extension.obj, 'delete_port'): extension.obj.delete_port(context, data) else: LOG.error( "Agent Extension '%(name)s' does not " "implement method delete_port", {'name': extension.name} ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1990438 neutron-16.0.0.0b2.dev214/neutron/agent/l3/0000755000175000017500000000000000000000000020251 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/__init__.py0000644000175000017500000000000000000000000022350 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/agent.py0000644000175000017500000012473100000000000021731 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import eventlet import netaddr from neutron_lib.agent import constants as agent_consts from neutron_lib.agent import topics from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_const from neutron_lib import context as n_context from neutron_lib.exceptions import l3 as l3_exc from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_config import cfg from oslo_context import context as common_context from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import timeutils from osprofiler import profiler from neutron.agent.common import resource_processing_queue as queue from neutron.agent.common import utils as common_utils from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_edge_ha_router from neutron.agent.l3 import dvr_edge_router as dvr_router from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import ha from neutron.agent.l3 import ha_router from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import l3_agent_extensions_manager as l3_ext_manager from neutron.agent.l3 import legacy_router from neutron.agent.l3 import namespace_manager from neutron.agent.linux import external_process from neutron.agent.linux import pd from neutron.agent.linux import utils as linux_utils from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.common import utils from neutron import manager LOG = logging.getLogger(__name__) # Number of routers to fetch from server at a time on resync. # Needed to reduce load on server side and to speed up resync on agent side. SYNC_ROUTERS_MAX_CHUNK_SIZE = 256 SYNC_ROUTERS_MIN_CHUNK_SIZE = 32 # Priorities - lower value is higher priority PRIORITY_RELATED_ROUTER = 0 PRIORITY_RPC = 1 PRIORITY_SYNC_ROUTERS_TASK = 2 PRIORITY_PD_UPDATE = 3 # Actions DELETE_ROUTER = 1 DELETE_RELATED_ROUTER = 2 ADD_UPDATE_ROUTER = 3 ADD_UPDATE_RELATED_ROUTER = 4 PD_UPDATE = 5 RELATED_ACTION_MAP = {DELETE_ROUTER: DELETE_RELATED_ROUTER, ADD_UPDATE_ROUTER: ADD_UPDATE_RELATED_ROUTER} ROUTER_PROCESS_GREENLET_MAX = 32 ROUTER_PROCESS_GREENLET_MIN = 8 def log_verbose_exc(message, router_payload): LOG.exception(message) LOG.debug("Payload:\n%s", utils.DelayedStringRenderer(jsonutils.dumps, router_payload, indent=5)) class L3PluginApi(object): """Agent side of the l3 agent RPC API. API version history: 1.0 - Initial version. 1.1 - Floating IP operational status updates 1.2 - DVR support: new L3 plugin methods added. - get_ports_by_subnet - get_agent_gateway_port Needed by the agent when operating in DVR/DVR_SNAT mode 1.3 - Get the list of activated services 1.4 - Added L3 HA update_router_state. This method was reworked in to update_ha_routers_states 1.5 - Added update_ha_routers_states 1.6 - Added process_prefix_update 1.7 - DVR support: new L3 plugin methods added. - delete_agent_gateway_port 1.8 - Added address scope information 1.9 - Added get_router_ids 1.10 Added update_all_ha_network_port_statuses 1.11 Added get_host_ha_router_count 1.12 Added get_networks 1.13 Removed get_external_network_id """ def __init__(self, topic, host): self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) @utils.timecost def get_routers(self, context, router_ids=None): """Make a remote process call to retrieve the sync data for routers.""" cctxt = self.client.prepare() return cctxt.call(context, 'sync_routers', host=self.host, router_ids=router_ids) @utils.timecost def update_all_ha_network_port_statuses(self, context): """Make a remote process call to update HA network port status.""" cctxt = self.client.prepare(version='1.10') return cctxt.call(context, 'update_all_ha_network_port_statuses', host=self.host) @utils.timecost def get_router_ids(self, context): """Make a remote process call to retrieve scheduled routers ids.""" cctxt = self.client.prepare(version='1.9') return cctxt.call(context, 'get_router_ids', host=self.host) @utils.timecost def update_floatingip_statuses(self, context, router_id, fip_statuses): """Call the plugin update floating IPs's operational status.""" cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'update_floatingip_statuses', router_id=router_id, fip_statuses=fip_statuses) @utils.timecost def get_ports_by_subnet(self, context, subnet_id): """Retrieve ports by subnet id.""" cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'get_ports_by_subnet', host=self.host, subnet_id=subnet_id) @utils.timecost def get_agent_gateway_port(self, context, fip_net): """Get or create an agent_gateway_port.""" cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'get_agent_gateway_port', network_id=fip_net, host=self.host) @utils.timecost def get_service_plugin_list(self, context): """Make a call to get the list of activated services.""" cctxt = self.client.prepare(version='1.3') return cctxt.call(context, 'get_service_plugin_list') @utils.timecost def update_ha_routers_states(self, context, states): """Update HA routers states.""" cctxt = self.client.prepare(version='1.5') return cctxt.cast(context, 'update_ha_routers_states', host=self.host, states=states) @utils.timecost def process_prefix_update(self, context, prefix_update): """Process prefix update whenever prefixes get changed.""" cctxt = self.client.prepare(version='1.6') return cctxt.call(context, 'process_prefix_update', subnets=prefix_update) @utils.timecost def delete_agent_gateway_port(self, context, fip_net): """Delete Floatingip_agent_gateway_port.""" cctxt = self.client.prepare(version='1.7') return cctxt.call(context, 'delete_agent_gateway_port', host=self.host, network_id=fip_net) @utils.timecost def get_host_ha_router_count(self, context): """Make a call to get the count of HA router.""" cctxt = self.client.prepare(version='1.11') return cctxt.call(context, 'get_host_ha_router_count', host=self.host) def get_networks(self, context, filters=None, fields=None): """Get networks. :param context: Security context :param filters: The filters to apply. E.g {"id" : ["", ...]} :param fields: A list of fields to collect, e.g ["id", "subnets"]. :return: A list of dicts where each dict represent a network object. """ cctxt = self.client.prepare(version='1.12') return cctxt.call( context, 'get_networks', filters=filters, fields=fields) class RouterFactory(object): def __init__(self): self._routers = {} def register(self, features, router_cls): """Register router class which implements BaseRouterInfo Features which is a list of strings converted to frozenset internally for key uniqueness. :param features: a list of strings of router's features :param router_cls: a router class which implements BaseRouterInfo """ self._routers[frozenset(features)] = router_cls def create(self, features, **kwargs): """Create router instance with registered router class :param features: a list of strings of router's features :param kwargs: arguments for router class :returns: a router instance which implements BaseRouterInfo :raises: n_exc.RouterNotFoundInRouterFactory """ try: router = self._routers[frozenset(features)] return router(**kwargs) except KeyError: exc = l3_exc.RouterNotFoundInRouterFactory( router_id=kwargs['router_id'], features=features) LOG.exception(exc.msg) raise exc @profiler.trace_cls("l3-agent") class L3NATAgent(ha.AgentMixin, dvr.AgentMixin, manager.Manager): """Manager for L3NatAgent API version history: 1.0 initial Version 1.1 changed the type of the routers parameter to the routers_updated method. It was previously a list of routers in dict format. It is now a list of router IDs only. Per rpc versioning rules, it is backwards compatible. 1.2 - DVR support: new L3 agent methods added. - add_arp_entry - del_arp_entry 1.3 - fipnamespace_delete_on_ext_net - to delete fipnamespace after the external network is removed Needed by the L3 service when dealing with DVR 1.4 - support network_update to get MTU updates """ target = oslo_messaging.Target(version='1.4') def __init__(self, host, conf=None): if conf: self.conf = conf else: self.conf = cfg.CONF self.check_config() self.router_info = {} self.router_factory = RouterFactory() self._register_router_cls(self.router_factory) self._check_config_params() self.process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='router') self._context = n_context.get_admin_context_without_session() self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host) self.driver = common_utils.load_interface_driver( self.conf, get_networks_callback=functools.partial( self.plugin_rpc.get_networks, self.context)) self.fullsync = True self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE self._exiting = False # Get the HA router count from Neutron Server # This is the first place where we contact neutron-server on startup # so retry in case its not ready to respond. while True: try: self.ha_router_count = int( self.plugin_rpc.get_host_ha_router_count(self.context)) except oslo_messaging.MessagingTimeout as e: LOG.warning('l3-agent cannot contact neutron server ' 'to retrieve HA router count. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.', {'msg': e}) continue break LOG.info("Agent HA routers count %s", self.ha_router_count) self.init_extension_manager(self.plugin_rpc) self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, self.metadata_driver) # L3 agent router processing green pool self._pool_size = ROUTER_PROCESS_GREENLET_MIN self._pool = eventlet.GreenPool(size=self._pool_size) self._queue = queue.ResourceProcessingQueue() super(L3NATAgent, self).__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = netutils.is_ipv6_enabled() self.pd = pd.PrefixDelegation(self.context, self.process_monitor, self.driver, self.plugin_rpc.process_prefix_update, self.create_pd_router_update, self.conf) # Consume network updates to trigger router resync consumers = [[topics.NETWORK, topics.UPDATE]] agent_rpc.create_consumers([self], topics.AGENT, consumers) self._check_ha_router_process_status() def check_config(self): if self.conf.cleanup_on_shutdown: LOG.warning("cleanup_on_shutdown is set to True, so L3 agent will " "cleanup all its routers when exiting, " "data-plane will be affected.") def _check_ha_router_process_status(self): """Check HA router VRRP process status in network node. Check if the HA router HA routers VRRP (keepalived) process count and state change python monitor process count meet the expected quantity. If so, l3-agent will not call neutron to set all related HA port to down state, this can prevent some unexpected VRRP re-election. If not, a physical host may have down and just restarted, set HA network port status to DOWN. """ if (self.conf.agent_mode not in [lib_const.L3_AGENT_MODE_DVR_SNAT, lib_const.L3_AGENT_MODE_LEGACY]): return if self.ha_router_count <= 0: return # HA routers VRRP (keepalived) process count vrrp_pcount = linux_utils.get_process_count_by_name("keepalived") LOG.debug("VRRP process count %s.", vrrp_pcount) # HA routers state change python monitor process count vrrp_st_pcount = linux_utils.get_process_count_by_name( "neutron-keepalived-state-change") LOG.debug("neutron-keepalived-state-change process count %s.", vrrp_st_pcount) # Due to the process structure design of keepalived and the current # config of l3-ha router, it will run one main 'keepalived' process # and a child 'VRRP' process. So in the following check, we divided # number of processes by 2 to match the ha router count. if (not (vrrp_pcount / 2 >= self.ha_router_count and vrrp_st_pcount >= self.ha_router_count)): LOG.debug("Call neutron server to set HA port to DOWN state.") try: # We set HA network port status to DOWN to let l2 agent # update it to ACTIVE after wiring. This allows us to spawn # keepalived only when l2 agent finished wiring the port. self.plugin_rpc.update_all_ha_network_port_statuses( self.context) except Exception: LOG.exception('update_all_ha_network_port_statuses failed') def _register_router_cls(self, factory): factory.register([], legacy_router.LegacyRouter) factory.register(['ha'], ha_router.HaRouter) if self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT: factory.register(['distributed'], dvr_router.DvrEdgeRouter) factory.register(['ha', 'distributed'], dvr_edge_ha_router.DvrEdgeHaRouter) else: factory.register(['distributed'], dvr_local_router.DvrLocalRouter) factory.register(['ha', 'distributed'], dvr_local_router.DvrLocalRouter) def _check_config_params(self): """Check items in configuration files. Check for required and invalid configuration items. The actual values are not verified for correctness. """ if not self.conf.interface_driver: msg = 'An interface driver must be specified' LOG.error(msg) raise SystemExit(1) if self.conf.ipv6_gateway: # ipv6_gateway configured. Check for valid v6 link-local address. try: msg = ("%s used in config as ipv6_gateway is not a valid " "IPv6 link-local address.") ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway) if ip_addr.version != 6 or not ip_addr.is_link_local(): LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1) except netaddr.AddrFormatError: LOG.error(msg, self.conf.ipv6_gateway) raise SystemExit(1) def _create_router(self, router_id, router): kwargs = { 'agent': self, 'router_id': router_id, 'router': router, 'use_ipv6': self.use_ipv6, 'agent_conf': self.conf, 'interface_driver': self.driver, } features = [] if router.get('distributed'): features.append('distributed') kwargs['host'] = self.host if router.get('ha'): features.append('ha') kwargs['state_change_callback'] = self.enqueue_state_change if router.get('distributed') and router.get('ha'): # Case 1: If the router contains information about the HA interface # and if the requesting agent is a DVR_SNAT agent then go ahead # and create a HA router. # Case 2: If the router does not contain information about the HA # interface this means that this DVR+HA router needs to host only # the edge side of it, typically because it's landing on a node # that needs to provision a router namespace because of a DVR # service port (e.g. DHCP). So go ahead and create a regular DVR # edge router. if (not router.get(lib_const.HA_INTERFACE_KEY) or self.conf.agent_mode != lib_const.L3_AGENT_MODE_DVR_SNAT): features.remove('ha') kwargs.pop('state_change_callback') return self.router_factory.create(features, **kwargs) @lockutils.synchronized('resize_greenpool') def _resize_process_pool(self): pool_size = max([ROUTER_PROCESS_GREENLET_MIN, min([ROUTER_PROCESS_GREENLET_MAX, len(self.router_info)])]) if pool_size == self._pool_size: return LOG.info("Resizing router processing queue green pool size to: %d", pool_size) self._pool.resize(pool_size) self._pool_size = pool_size def _router_added(self, router_id, router): ri = self._create_router(router_id, router) registry.notify(resources.ROUTER, events.BEFORE_CREATE, self, router=ri) self.router_info[router_id] = ri # If initialize() fails, cleanup and retrigger complete sync try: ri.initialize(self.process_monitor) except Exception: with excutils.save_and_reraise_exception(): del self.router_info[router_id] LOG.exception('Error while initializing router %s', router_id) self.namespaces_manager.ensure_router_cleanup(router_id) try: ri.delete() except Exception: LOG.exception('Error while deleting router %s', router_id) self._resize_process_pool() def _safe_router_removed(self, router_id): """Try to delete a router and return True if successful.""" # The l3_ext_manager API expects a router dict, look it up ri = self.router_info.get(router_id) try: self._router_removed(ri, router_id) if ri: self.l3_ext_manager.delete_router(self.context, ri.router) except Exception: LOG.exception('Error while deleting router %s', router_id) return False self._resize_process_pool() return True def _router_removed(self, ri, router_id): """Delete the router and stop the auxiliary processes This stops the auxiliary processes (keepalived, keepvalived-state- change, radvd, etc) and deletes the router ports and the namespace. The "router_info" cache is updated too at the beginning of the process, to avoid any other concurrent process to handle the router being deleted. If an exception is raised, the "router_info" cache is restored. """ if ri is None: LOG.warning("Info for router %s was not found. " "Performing router cleanup", router_id) self.namespaces_manager.ensure_router_cleanup(router_id) return registry.publish(resources.ROUTER, events.BEFORE_DELETE, self, payload=events.DBEventPayload( self.context, states=(ri,), resource_id=router_id)) del self.router_info[router_id] try: ri.delete() except Exception: with excutils.save_and_reraise_exception(): self.router_info[router_id] = ri registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri) def init_extension_manager(self, connection): l3_ext_manager.register_opts(self.conf) self.agent_api = l3_ext_api.L3AgentExtensionAPI(self.router_info, self.router_factory) self.l3_ext_manager = ( l3_ext_manager.L3AgentExtensionsManager(self.conf)) self.l3_ext_manager.initialize( connection, lib_const.L3_AGENT_MODE, self.agent_api) def router_deleted(self, context, router_id): """Deal with router deletion RPC message.""" LOG.debug('Got router deleted notification for %s', router_id) update = queue.ResourceUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) self._queue.add(update) def routers_updated(self, context, routers): """Deal with routers modification and creation RPC message.""" LOG.debug('Got routers updated notification :%s', routers) if routers: # This is needed for backward compatibility if isinstance(routers[0], dict): routers = [router['id'] for router in routers] for id in routers: update = queue.ResourceUpdate( id, PRIORITY_RPC, action=ADD_UPDATE_ROUTER) self._queue.add(update) def router_removed_from_agent(self, context, payload): LOG.debug('Got router removed from agent :%r', payload) router_id = payload['router_id'] update = queue.ResourceUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) self._queue.add(update) def router_added_to_agent(self, context, payload): LOG.debug('Got router added to agent :%r', payload) self.routers_updated(context, payload) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] for ri in self.router_info.values(): ports = list(ri.internal_ports) if ri.ex_gw_port: ports.append(ri.ex_gw_port) port_belongs = lambda p: p['network_id'] == network_id if any(port_belongs(p) for p in ports): update = queue.ResourceUpdate( ri.router_id, PRIORITY_SYNC_ROUTERS_TASK) self._resync_router(update) def _process_router_if_compatible(self, router): # Either ex_net_id or handle_internal_only_routers must be set ex_net_id = (router['external_gateway_info'] or {}).get('network_id') if not ex_net_id and not self.conf.handle_internal_only_routers: raise l3_exc.RouterNotCompatibleWithAgent(router_id=router['id']) if router['id'] not in self.router_info: self._process_added_router(router) else: self._process_updated_router(router) def _process_added_router(self, router): self._router_added(router['id'], router) ri = self.router_info[router['id']] ri.router = router ri.process() registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri) self.l3_ext_manager.add_router(self.context, router) def _process_updated_router(self, router): ri = self.router_info[router['id']] is_dvr_snat_agent = (self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT) is_dvr_only_agent = (self.conf.agent_mode in [lib_const.L3_AGENT_MODE_DVR, lib_const.L3_AGENT_MODE_DVR_NO_EXTERNAL]) old_router_ha_interface = ri.router.get(lib_const.HA_INTERFACE_KEY) current_router_ha_interface = router.get(lib_const.HA_INTERFACE_KEY) ha_interface_change = ((old_router_ha_interface is None and current_router_ha_interface is not None) or (old_router_ha_interface is not None and current_router_ha_interface is None)) is_dvr_ha_router = router.get('distributed') and router.get('ha') if is_dvr_snat_agent and is_dvr_ha_router and ha_interface_change: LOG.debug("Removing HA router %s, since it is not bound to " "the current agent, and recreating regular DVR router " "based on service port requirements.", router['id']) if self._safe_router_removed(router['id']): self._process_added_router(router) else: is_ha_router = getattr(ri, 'ha_state', False) # For HA routers check that DB state matches actual state if router.get('ha') and not is_dvr_only_agent and is_ha_router: self.check_ha_state_for_router( router['id'], router.get(lib_const.HA_ROUTER_STATE_KEY)) ri.router = router registry.notify(resources.ROUTER, events.BEFORE_UPDATE, self, router=ri) ri.process() registry.notify( resources.ROUTER, events.AFTER_UPDATE, self, router=ri) self.l3_ext_manager.update_router(self.context, router) def _resync_router(self, router_update, priority=PRIORITY_SYNC_ROUTERS_TASK): # Don't keep trying to resync if it's failing if router_update.hit_retry_limit(): LOG.warning("Hit retry limit with router update for %s, action %s", router_update.id, router_update.action) if router_update.action != DELETE_ROUTER: LOG.debug("Deleting router %s", router_update.id) self._safe_router_removed(router_update.id) return router_update.timestamp = timeutils.utcnow() router_update.priority = priority router_update.resource = None # Force the agent to resync the router self._queue.add(router_update) def _process_router_update(self): if self._exiting: return for rp, update in self._queue.each_update_to_next_resource(): LOG.info("Starting router update for %s, action %s, priority %s, " "update_id %s. Wait time elapsed: %.3f", update.id, update.action, update.priority, update.update_id, update.time_elapsed_since_create) if update.action == PD_UPDATE: self.pd.process_prefix_update() LOG.info("Finished a router update for %s IPv6 PD, " "update_id. %s. Time elapsed: %.3f", update.id, update.update_id, update.time_elapsed_since_start) continue routers = [update.resource] if update.resource else [] not_delete_no_routers = (update.action != DELETE_ROUTER and not routers) related_action = update.action in (DELETE_RELATED_ROUTER, ADD_UPDATE_RELATED_ROUTER) if not_delete_no_routers or related_action: try: update.timestamp = timeutils.utcnow() routers = self.plugin_rpc.get_routers(self.context, [update.id]) except Exception: msg = "Failed to fetch router information for '%s'" LOG.exception(msg, update.id) self._resync_router(update) continue # For a related action, verify the router is still hosted here, # since it could have just been deleted and we don't want to # add it back. if related_action: routers = [r for r in routers if r['id'] == update.id] if not routers: removed = self._safe_router_removed(update.id) if not removed: self._resync_router(update) else: # need to update timestamp of removed router in case # there are older events for the same router in the # processing queue (like events from fullsync) in order to # prevent deleted router re-creation rp.fetched_and_processed(update.timestamp) LOG.info("Finished a router update for %s, update_id %s. " "Time elapsed: %.3f", update.id, update.update_id, update.time_elapsed_since_start) continue if not self._process_routers_if_compatible(routers, update): self._resync_router(update) continue rp.fetched_and_processed(update.timestamp) LOG.info("Finished a router update for %s, update_id %s. " "Time elapsed: %.3f", update.id, update.update_id, update.time_elapsed_since_start) def _process_routers_if_compatible(self, routers, update): process_result = True for router in routers: if router['id'] != update.id: # Don't do the work here, instead create a new update and # enqueue it, since there could be another thread working # on it already and we don't want to race. new_action = RELATED_ACTION_MAP.get( update.action, ADD_UPDATE_RELATED_ROUTER) new_update = queue.ResourceUpdate( router['id'], priority=PRIORITY_RELATED_ROUTER, action=new_action) self._queue.add(new_update) LOG.debug('Queued a router update for %(router_id)s ' '(related router %(related_router_id)s). ' 'Original event action %(action)s, ' 'priority %(priority)s. ' 'New event action %(new_action)s, ' 'priority %(new_priority)s', {'router_id': router['id'], 'related_router_id': update.id, 'action': update.action, 'priority': update.priority, 'new_action': new_update.action, 'new_priority': new_update.priority}) continue try: self._process_router_if_compatible(router) except l3_exc.RouterNotCompatibleWithAgent as e: log_verbose_exc(e.msg, router) # Was the router previously handled by this agent? if router['id'] in self.router_info: LOG.error("Removing incompatible router '%s'", router['id']) self._safe_router_removed(router['id']) except Exception: log_verbose_exc( "Failed to process compatible router: %s" % update.id, router) process_result = False return process_result def _process_routers_loop(self): LOG.debug("Starting _process_routers_loop") while not self._exiting: self._pool.spawn_n(self._process_router_update) # NOTE(kevinbenton): this is set to 1 second because the actual interval # is controlled by a FixedIntervalLoopingCall in neutron/service.py that # is responsible for task execution. @periodic_task.periodic_task(spacing=1, run_immediately=True) def periodic_sync_routers_task(self, context): if not self.fullsync: return LOG.debug("Starting fullsync periodic_sync_routers_task") # self.fullsync is True at this point. If an exception -- caught or # uncaught -- prevents setting it to False below then the next call # to periodic_sync_routers_task will re-enter this code and try again. # Context manager self.namespaces_manager captures a picture of # namespaces *before* fetch_and_sync_all_routers fetches the full list # of routers from the database. This is important to correctly # identify stale ones. try: with self.namespaces_manager as ns_manager: self.fetch_and_sync_all_routers(context, ns_manager) except l3_exc.AbortSyncRouters: self.fullsync = True def fetch_and_sync_all_routers(self, context, ns_manager): prev_router_ids = set(self.router_info) curr_router_ids = set() timestamp = timeutils.utcnow() router_ids = [] chunk = [] is_snat_agent = (self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT) try: router_ids = self.plugin_rpc.get_router_ids(context) # fetch routers by chunks to reduce the load on server and to # start router processing earlier for i in range(0, len(router_ids), self.sync_routers_chunk_size): chunk = router_ids[i:i + self.sync_routers_chunk_size] routers = self.plugin_rpc.get_routers(context, chunk) LOG.debug('Processing :%r', routers) for r in routers: curr_router_ids.add(r['id']) ns_manager.keep_router(r['id']) if r.get('distributed'): # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get( 'network_id') if ext_net_id: ns_manager.keep_ext_net(ext_net_id) elif is_snat_agent and not r.get('ha'): ns_manager.ensure_snat_cleanup(r['id']) update = queue.ResourceUpdate( r['id'], PRIORITY_SYNC_ROUTERS_TASK, resource=r, action=ADD_UPDATE_ROUTER, timestamp=timestamp) self._queue.add(update) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( self.sync_routers_chunk_size // 2, SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.error('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s', self.sync_routers_chunk_size) else: LOG.error('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or ' 'just inoperable', self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: failed_routers = chunk or router_ids LOG.exception("Failed synchronizing routers '%s' " "due to RPC error", failed_routers) raise l3_exc.AbortSyncRouters() self.fullsync = False LOG.debug("periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if self.sync_routers_chunk_size < SYNC_ROUTERS_MAX_CHUNK_SIZE: self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, SYNC_ROUTERS_MAX_CHUNK_SIZE) # Delete routers that have disappeared since the last sync for router_id in prev_router_ids - curr_router_ids: ns_manager.keep_router(router_id) update = queue.ResourceUpdate(router_id, PRIORITY_SYNC_ROUTERS_TASK, timestamp=timestamp, action=DELETE_ROUTER) self._queue.add(update) @property def context(self): # generate a new request-id on each call to make server side tracking # of RPC calls easier. self._context.request_id = common_context.generate_request_id() return self._context def after_start(self): # Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It # calls this method here. So Removing this after_start() would break # vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent # can have L3NATAgentWithStateReport as its base class instead of # L3NATAgent. eventlet.spawn_n(self._process_routers_loop) LOG.info("L3 agent started") def stop(self): LOG.info("Stopping L3 agent") if self.conf.cleanup_on_shutdown: self._exiting = True for router in self.router_info.values(): router.delete() def create_pd_router_update(self): router_id = None update = queue.ResourceUpdate(router_id, PRIORITY_PD_UPDATE, timestamp=timeutils.utcnow(), action=PD_UPDATE) self._queue.add(update) class L3NATAgentWithStateReport(L3NATAgent): def __init__(self, host, conf=None): super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { 'binary': 'neutron-l3-agent', 'host': host, 'availability_zone': self.conf.AGENT.availability_zone, 'topic': topics.L3_AGENT, 'configurations': { 'agent_mode': self.conf.agent_mode, 'handle_internal_only_routers': self.conf.handle_internal_only_routers, 'interface_driver': self.conf.interface_driver, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': lib_const.AGENT_TYPE_L3} report_interval = self.conf.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): num_ex_gw_ports = 0 num_interfaces = 0 num_floating_ips = 0 router_infos = self.router_info.values() num_routers = len(router_infos) for ri in router_infos: ex_gw_port = ri.get_ex_gw_port() if ex_gw_port: num_ex_gw_ports += 1 num_interfaces += len(ri.router.get(lib_const.INTERFACE_KEY, [])) num_floating_ips += len(ri.router.get(lib_const.FLOATINGIP_KEY, [])) configurations = self.agent_state['configurations'] configurations['routers'] = num_routers configurations['ex_gw_ports'] = num_ex_gw_ports configurations['interfaces'] = num_interfaces configurations['floating_ips'] = num_floating_ips try: agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info('Agent has just been revived. ' 'Doing a full sync.') self.fullsync = True self.agent_state.pop('start_flag', None) except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() return except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") def after_start(self): eventlet.spawn_n(self._process_routers_loop) LOG.info("L3 agent started") # Do the report state before we do the first full sync. self._report_state() self.pd.after_start() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.fullsync = True LOG.info("agent_updated by server side %s!", payload) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr.py0000644000175000017500000000543500000000000021425 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref from neutron.agent.l3 import dvr_fip_ns class AgentMixin(object): def __init__(self, host): # dvr data self._fip_namespaces = weakref.WeakValueDictionary() super(AgentMixin, self).__init__(host) def get_fip_ns(self, ext_net_id): # TODO(Carl) is this necessary? Code that this replaced was careful to # convert these to string like this so I preserved that. ext_net_id = str(ext_net_id) fip_ns = self._fip_namespaces.get(ext_net_id) if fip_ns and not fip_ns.destroyed: return fip_ns fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, self.conf, self.driver, self.use_ipv6) self._fip_namespaces[ext_net_id] = fip_ns return fip_ns def get_ports_by_subnet(self, subnet_id): return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id) def _update_arp_entry(self, context, payload, action): router_id = payload['router_id'] ri = self.router_info.get(router_id) if not ri: return arp_table = payload['arp_table'] ip = arp_table['ip_address'] mac = arp_table['mac_address'] subnet_id = arp_table['subnet_id'] device, device_exists = ri.get_arp_related_dev(subnet_id) ri._update_arp_entry(ip, mac, subnet_id, action, device, device_exists=device_exists) def add_arp_entry(self, context, payload): """Add arp entry into router namespace. Called from RPC.""" self._update_arp_entry(context, payload, 'add') def del_arp_entry(self, context, payload): """Delete arp entry from router namespace. Called from RPC.""" self._update_arp_entry(context, payload, 'delete') def fipnamespace_delete_on_ext_net(self, context, ext_net_id): """Delete fip namespace after external network removed.""" fip_ns = self.get_fip_ns(ext_net_id) if fip_ns.agent_gateway_port and not fip_ns.destroyed: fip_ns.unsubscribe(ext_net_id) fip_ns.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_edge_ha_router.py0000644000175000017500000001303200000000000024451 0ustar00coreycorey00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron.agent.l3 import dvr_edge_router from neutron.agent.l3 import ha_router from neutron.agent.l3 import router_info class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, ha_router.HaRouter): """Router class which represents a centralized SNAT DVR router with HA capabilities. """ def __init__(self, host, *args, **kwargs): super(DvrEdgeHaRouter, self).__init__(host, *args, **kwargs) self.enable_snat = None @property def ha_namespace(self): if self.snat_namespace: return self.snat_namespace.name return None def internal_network_added(self, port): # Call RouterInfo's internal_network_added (Plugs the port, adds IP) router_info.RouterInfo.internal_network_added(self, port) for subnet in port['subnets']: self._set_subnet_arp_info(subnet['id']) self._snat_redirect_add_from_port(port) if not self.get_ex_gw_port() or not self._is_this_snat_host(): return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return self._plug_ha_router_port( sn_port, self._get_snat_int_device_name, constants.SNAT_INT_DEV_PREFIX) def add_centralized_floatingip(self, fip, fip_cidr): interface_name = self.get_snat_external_device_interface_name( self.get_ex_gw_port()) self._add_vip(fip_cidr, interface_name) self.set_ha_port() if (self.is_router_master() and self.ha_port and self.ha_port['status'] == constants.PORT_STATUS_ACTIVE): return super(DvrEdgeHaRouter, self).add_centralized_floatingip( fip, fip_cidr) else: return constants.FLOATINGIP_STATUS_ACTIVE def remove_centralized_floatingip(self, fip_cidr): self._remove_vip(fip_cidr) if self.is_router_master(): super(DvrEdgeHaRouter, self).remove_centralized_floatingip( fip_cidr) def get_centralized_fip_cidr_set(self): ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return set() interface_name = self.get_snat_external_device_interface_name( ex_gw_port) return set(self._get_cidrs_from_keepalived(interface_name)) def external_gateway_added(self, ex_gw_port, interface_name): super(DvrEdgeHaRouter, self).external_gateway_added( ex_gw_port, interface_name) for port in self.get_snat_interfaces(): snat_interface_name = self._get_snat_int_device_name(port['id']) self._disable_ipv6_addressing_on_interface(snat_interface_name) self._add_vips( self.get_snat_port_for_internal_port(port), snat_interface_name) self._add_gateway_vip(ex_gw_port, interface_name) self._disable_ipv6_addressing_on_interface(interface_name) def external_gateway_removed(self, ex_gw_port, interface_name): for port in self.snat_ports: snat_interface = self._get_snat_int_device_name(port['id']) self.driver.unplug(snat_interface, namespace=self.ha_namespace, prefix=constants.SNAT_INT_DEV_PREFIX) self._clear_vips(snat_interface) super(DvrEdgeHaRouter, self)._external_gateway_removed( ex_gw_port, interface_name) self._clear_vips(interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): ha_router.HaRouter.external_gateway_updated(self, ex_gw_port, interface_name) def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): link_up = self.external_gateway_link_up() self._plug_external_gateway(ex_gw_port, interface_name, ns_name, link_up=link_up) def _is_this_snat_host(self): return self.agent_conf.agent_mode == constants.L3_AGENT_MODE_DVR_SNAT def _dvr_internal_network_removed(self, port): super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port) sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return self._clear_vips(self._get_snat_int_device_name(sn_port['id'])) def _plug_snat_port(self, port): """Used by _create_dvr_gateway in DvrEdgeRouter.""" interface_name = self._get_snat_int_device_name(port['id']) self.driver.plug(port['network_id'], port['id'], interface_name, port['mac_address'], namespace=self.snat_namespace.name, prefix=constants.SNAT_INT_DEV_PREFIX, mtu=port.get('mtu')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_edge_router.py0000644000175000017500000004105000000000000024002 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as lib_constants from oslo_log import log as logging from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): def __init__(self, host, *args, **kwargs): super(DvrEdgeRouter, self).__init__(host, *args, **kwargs) self.snat_namespace = dvr_snat_ns.SnatNamespace( self.router_id, self.agent_conf, self.driver, self.use_ipv6) self.snat_iptables_manager = None def get_gw_ns_name(self): return self.snat_namespace.name def external_gateway_added(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_added( ex_gw_port, interface_name) if self._is_this_snat_host(): self._create_dvr_gateway(ex_gw_port, interface_name) # NOTE: When a router is created without a gateway the routes get # added to the router namespace, but if we wanted to populate # the same routes to the snat namespace after the gateway port # is added, we need to call routes_updated here. self.routes_updated([], self.router['routes']) elif self.snat_namespace.exists(): # This is the case where the snat was moved manually or # rescheduled to a different agent when the agent was dead. LOG.debug("SNAT was moved or rescheduled to a different host " "and does not match with the current host. This is " "a stale namespace %s and will be cleared from the " "current dvr_snat host.", self.snat_namespace.name) self.external_gateway_removed(ex_gw_port, interface_name) def _list_centralized_floating_ip_cidrs(self): # Compute a list of addresses this gw is supposed to have. # This avoids unnecessarily removing those addresses and # causing a momentarily network outage. floating_ips = self.get_floating_ips() return [common_utils.ip_to_cidr(ip['floating_ip_address']) for ip in floating_ips if ip.get(lib_constants.DVR_SNAT_BOUND)] def external_gateway_updated(self, ex_gw_port, interface_name): if not self._is_this_snat_host(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) if self.snat_namespace.exists(): LOG.debug("SNAT was rescheduled to host %s. Clearing snat " "namespace.", self.router.get('gw_port_host')) return self.external_gateway_removed( ex_gw_port, interface_name) return if not self.snat_namespace.exists(): # SNAT might be rescheduled to this agent; need to process like # newly created gateway return self.external_gateway_added(ex_gw_port, interface_name) else: preserve_ips = self._list_centralized_floating_ip_cidrs() self._external_gateway_added(ex_gw_port, interface_name, self.snat_namespace.name, preserve_ips) def _external_gateway_removed(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port, interface_name) if not self._is_this_snat_host() and not self.snat_namespace.exists(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) return self.driver.unplug(interface_name, namespace=self.snat_namespace.name, prefix=router.EXTERNAL_DEV_PREFIX) def external_gateway_removed(self, ex_gw_port, interface_name): self._external_gateway_removed(ex_gw_port, interface_name) if self.snat_namespace.exists(): self.snat_namespace.delete() def internal_network_added(self, port): super(DvrEdgeRouter, self).internal_network_added(port) # TODO(gsagie) some of this checks are already implemented # in the base class, think how to avoid re-doing them if not self._is_this_snat_host(): return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) interface_name = self._get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['fixed_ips'], sn_port['mac_address'], interface_name, lib_constants.SNAT_INT_DEV_PREFIX, mtu=sn_port.get('mtu')) def _dvr_internal_network_removed(self, port): super(DvrEdgeRouter, self)._dvr_internal_network_removed(port) if not self.ex_gw_port: return sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return if not self._is_this_snat_host(): return snat_interface = self._get_snat_int_device_name(sn_port['id']) ns_name = self.snat_namespace.name prefix = lib_constants.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def _plug_snat_port(self, port): interface_name = self._get_snat_int_device_name(port['id']) self._internal_network_added( self.snat_namespace.name, port['network_id'], port['id'], port['fixed_ips'], port['mac_address'], interface_name, lib_constants.SNAT_INT_DEV_PREFIX, mtu=port.get('mtu')) def initialize(self, process_monitor): self._create_snat_namespace() super(DvrEdgeRouter, self).initialize(process_monitor) def _create_dvr_gateway(self, ex_gw_port, gw_interface_name): # connect snat_ports to br_int from SNAT namespace for port in self.get_snat_interfaces(): self._plug_snat_port(port) self._external_gateway_added(ex_gw_port, gw_interface_name, self.snat_namespace.name, preserve_ips=[]) self.snat_iptables_manager = iptables_manager.IptablesManager( namespace=self.snat_namespace.name, use_ipv6=self.use_ipv6) self._initialize_address_scope_iptables(self.snat_iptables_manager) def _create_snat_namespace(self): """Create SNAT namespace.""" # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that creates a gateway for a dvr. The first step # is to move the creation of the snat namespace here self.snat_namespace.create() return self.snat_namespace def _get_snat_int_device_name(self, port_id): long_name = lib_constants.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _is_this_snat_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug("gw_port_host missing from router: %s", self.router['id']) return host == self.host def _handle_router_snat_rules(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self)._handle_router_snat_rules( ex_gw_port, interface_name) if not self._is_this_snat_host(): return if not self.get_ex_gw_port(): return if not self.snat_iptables_manager: LOG.debug("DVR router: no snat rules to be handled") return with self.snat_iptables_manager.defer_apply(): self._empty_snat_chains(self.snat_iptables_manager) # NOTE: float-snat should be added for the # centralized floating-ips supported by the # snat namespace. self.snat_iptables_manager.ipv4['nat'].add_rule( 'snat', '-j $float-snat') self._add_snat_rules(ex_gw_port, self.snat_iptables_manager, interface_name) def update_routing_table(self, operation, route): if self.get_ex_gw_port() and self._is_this_snat_host(): ns_name = self.snat_namespace.name # NOTE: For now let us apply the static routes both in SNAT # namespace and Router Namespace, to reduce the complexity. if self.snat_namespace.exists(): super(DvrEdgeRouter, self)._update_routing_table( operation, route, namespace=ns_name) else: LOG.error("The SNAT namespace %s does not exist for " "the router.", ns_name) super(DvrEdgeRouter, self).update_routing_table(operation, route) def delete(self): super(DvrEdgeRouter, self).delete() if self.snat_namespace.exists(): self.snat_namespace.delete() def process_address_scope(self): super(DvrEdgeRouter, self).process_address_scope() if not self._is_this_snat_host(): return if not self.snat_iptables_manager: LOG.debug("DVR router: no snat rules to be handled") return # Prepare address scope iptables rule for dvr snat interfaces internal_ports = self.get_snat_interfaces() ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self._get_snat_int_device_name) # Prepare address scope iptables rule for external port external_port = self.get_ex_gw_port() if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) for ip_version in (lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) with self.snat_iptables_manager.defer_apply(): self._add_address_scope_mark( self.snat_iptables_manager, ports_scopemark) def _delete_stale_external_devices(self, interface_name): if not self.snat_namespace.exists(): return ns_ip = ip_lib.IPWrapper(namespace=self.snat_namespace.name) for d in ns_ip.get_devices(): if (d.name.startswith(router.EXTERNAL_DEV_PREFIX) and d.name != interface_name): LOG.debug('Deleting stale external router device: %s', d.name) self.driver.unplug( d.name, namespace=self.snat_namespace.name, prefix=router.EXTERNAL_DEV_PREFIX) def get_snat_external_device_interface_name(self, ex_gw_port): long_name = router.EXTERNAL_DEV_PREFIX + ex_gw_port['id'] return long_name[:self.driver.DEV_NAME_LEN] def get_centralized_fip_cidr_set(self): """Returns the fip_cidr set for centralized floatingips.""" ex_gw_port = self.get_ex_gw_port() # Don't look for centralized FIP cidrs if gw_port not exists or # this is not snat host if (not ex_gw_port or not self._is_this_snat_host() or not self.snat_namespace.exists()): return set() interface_name = self.get_snat_external_device_interface_name( ex_gw_port) return set([addr['cidr'] for addr in ip_lib.get_devices_with_ip( self.snat_namespace.name, name=interface_name)]) def get_router_cidrs(self, device): """Over-ride the get_router_cidrs function to return the list. This function is overridden to provide the complete list of floating_ip cidrs that the router hosts. This includes the centralized floatingip cidr list and the regular floatingip cidr list that are bound to fip namespace. """ fip_cidrs = super(DvrEdgeRouter, self).get_router_cidrs(device) centralized_cidrs = self.get_centralized_fip_cidr_set() return fip_cidrs | centralized_cidrs def remove_centralized_floatingip(self, fip_cidr): """Function to handle the centralized Floatingip remove.""" if not self.get_ex_gw_port(): return if not self._is_this_snat_host(): return interface_name = self.get_snat_external_device_interface_name( self.get_ex_gw_port()) device = ip_lib.IPDevice( interface_name, namespace=self.snat_namespace.name) device.delete_addr_and_conntrack_state(fip_cidr) self.process_floating_ip_nat_rules_for_centralized_floatingip() def add_centralized_floatingip(self, fip, fip_cidr): """Function to handle the centralized Floatingip addition.""" if not self.get_ex_gw_port(): return if not self._is_this_snat_host(): return interface_name = self.get_snat_external_device_interface_name( self.get_ex_gw_port()) try: ip_lib.add_ip_address(fip_cidr, interface_name, namespace=self.snat_namespace.name) except ip_lib.IpAddressAlreadyExists: pass except RuntimeError: LOG.warning("Unable to configure IP address for centralized " "floating IP: %s", fip['id']) return lib_constants.FLOATINGIP_STATUS_ERROR self.process_floating_ip_nat_rules_for_centralized_floatingip() # Send a GARP message on the external interface for the # centralized floatingip configured. ip_lib.send_ip_addr_adv_notif(self.snat_namespace.name, interface_name, fip['floating_ip_address']) return lib_constants.FLOATINGIP_STATUS_ACTIVE def _centralized_floating_forward_rules(self, floating_ip, fixed_ip): to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) if self.snat_iptables_manager.random_fully: to_source += ' --random-fully' return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('float-snat', to_source)] def _set_floating_ip_nat_rules_for_centralized_floatingip(self, fip): if fip.get(lib_constants.DVR_SNAT_BOUND): fixed = fip['fixed_ip_address'] fip_ip = fip['floating_ip_address'] for chain, rule in self._centralized_floating_forward_rules( fip_ip, fixed): self.snat_iptables_manager.ipv4['nat'].add_rule( chain, rule, tag='floating_ip') def process_floating_ip_nat_rules_for_centralized_floatingip(self): self.snat_iptables_manager.ipv4['nat'].clear_rules_by_tag( 'floating_ip') floating_ips = self.get_floating_ips() for fip in floating_ips: self._set_floating_ip_nat_rules_for_centralized_floatingip(fip) self.snat_iptables_manager.apply() def process_floating_ip_nat_rules(self): if self._is_this_snat_host(): self.process_floating_ip_nat_rules_for_centralized_floatingip() # Cover mixed dvr_snat and compute node, aka a dvr_snat node has both # centralized and distributed floating IPs. super(DvrEdgeRouter, self).process_floating_ip_nat_rules() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_fip_ns.py0000644000175000017500000005214300000000000022761 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import os from neutron_lib import constants as lib_constants from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.utils import runtime from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _ from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) FIP_NS_PREFIX = 'fip-' FIP_EXT_DEV_PREFIX = 'fg-' FIP_2_ROUTER_DEV_PREFIX = 'fpr-' ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX # Route Table index for FIPs FIP_RT_TBL = 16 # Rule priority range for FIPs FIP_PR_START = 32768 FIP_PR_END = FIP_PR_START + 40000 # Fixed rule priority for Fast Path Exit rules FAST_PATH_EXIT_PR = 80000 class FipNamespace(namespaces.Namespace): def __init__(self, ext_net_id, agent_conf, driver, use_ipv6): name = self._get_ns_name(ext_net_id) super(FipNamespace, self).__init__( name, agent_conf, driver, use_ipv6) self._ext_net_id = ext_net_id self.agent_conf = agent_conf self.driver = driver self.use_ipv6 = use_ipv6 self.agent_gateway_port = None self._subscribers = set() path = os.path.join(agent_conf.state_path, 'fip-priorities') self._rule_priorities = frpa.FipRulePriorityAllocator(path, FIP_PR_START, FIP_PR_END) self._iptables_manager = iptables_manager.IptablesManager( namespace=self.get_name(), use_ipv6=self.use_ipv6) path = os.path.join(agent_conf.state_path, 'fip-linklocal-networks') self.local_subnets = lla.LinkLocalAllocator( path, lib_constants.DVR_FIP_LL_CIDR) self.destroyed = False self._stale_fips_checked = False @classmethod def _get_ns_name(cls, ext_net_id): return namespaces.build_ns_name(FIP_NS_PREFIX, ext_net_id) def get_name(self): return self._get_ns_name(self._ext_net_id) def get_ext_device_name(self, port_id): return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_int_device_name(self, router_id): return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] def get_rtr_ext_device_name(self, router_id): return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN] def has_subscribers(self): return len(self._subscribers) != 0 def subscribe(self, external_net_id): is_first = not self.has_subscribers() self._subscribers.add(external_net_id) return is_first def unsubscribe(self, external_net_id): self._subscribers.discard(external_net_id) return not self.has_subscribers() def allocate_rule_priority(self, floating_ip): return self._rule_priorities.allocate(floating_ip) def deallocate_rule_priority(self, floating_ip): self._rule_priorities.release(floating_ip) @contextlib.contextmanager def _fip_port_lock(self, interface_name): # Use a namespace and port-specific lock semaphore to allow for # concurrency lock_name = 'port-lock-' + self.name + '-' + interface_name with lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX): try: yield except Exception: with excutils.save_and_reraise_exception(): LOG.error('DVR: FIP namespace config failure ' 'for interface %s', interface_name) def create_or_update_gateway_port(self, agent_gateway_port): interface_name = self.get_ext_device_name(agent_gateway_port['id']) # The lock is used to make sure another thread doesn't call to # update the gateway port before we are done initializing things. with self._fip_port_lock(interface_name): is_first = self.subscribe(agent_gateway_port['network_id']) if is_first: # Check for subnets that are populated for the agent # gateway port that was created on the server. if 'subnets' not in agent_gateway_port: self.unsubscribe(agent_gateway_port['network_id']) LOG.debug('DVR: Missing subnet in agent_gateway_port: %s', agent_gateway_port) return self._create_gateway_port(agent_gateway_port, interface_name) else: try: self._update_gateway_port( agent_gateway_port, interface_name) except Exception: # If an exception occurs at this point, then it is # good to clean up the namespace that has been created # and reraise the exception in order to resync the router with excutils.save_and_reraise_exception(): self.unsubscribe(agent_gateway_port['network_id']) self.delete() LOG.exception('DVR: Gateway update in ' 'FIP namespace failed') def _create_gateway_port(self, ex_gw_port, interface_name): """Create namespace, request port creationg from Plugin, then configure Floating IP gateway port. """ self.create() LOG.debug("DVR: adding gateway interface: %s", interface_name) ns_name = self.get_name() self.driver.plug(ex_gw_port['network_id'], ex_gw_port['id'], interface_name, ex_gw_port['mac_address'], namespace=ns_name, prefix=FIP_EXT_DEV_PREFIX, mtu=ex_gw_port.get('mtu')) # Remove stale fg devices ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) devices = ip_wrapper.get_devices() for device in devices: name = device.name if name.startswith(FIP_EXT_DEV_PREFIX) and name != interface_name: LOG.debug('DVR: unplug: %s', name) self.driver.unplug(name, namespace=ns_name, prefix=FIP_EXT_DEV_PREFIX) ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name, clean_connections=True) gw_cidrs = [sn['cidr'] for sn in ex_gw_port['subnets'] if sn.get('cidr')] self.driver.set_onlink_routes( interface_name, ns_name, ex_gw_port.get('extra_subnets', []), preserve_ips=gw_cidrs, is_ipv6=False) self.agent_gateway_port = ex_gw_port cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] ip_wrapper.netns.execute(cmd, check_exit_code=False) def create(self): LOG.debug("DVR: add fip namespace: %s", self.name) # parent class will ensure the namespace exists and turn-on forwarding super(FipNamespace, self).create() ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1, root_namespace=True) # no connection tracking needed in fip namespace self._iptables_manager.ipv4['raw'].add_rule('PREROUTING', '-j CT --notrack') self._iptables_manager.apply() def delete(self): self.destroyed = True self._delete() self.agent_gateway_port = None @namespaces.check_ns_existence def _delete(self): ip_wrapper = ip_lib.IPWrapper(namespace=self.name) for d in ip_wrapper.get_devices(): if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX): # internal link between IRs and FIP NS ip_wrapper.del_veth(d.name) elif d.name.startswith(FIP_EXT_DEV_PREFIX): # single port from FIP NS to br-ext # TODO(carl) Where does the port get deleted? LOG.debug('DVR: unplug: %s', d.name) self.driver.unplug(d.name, namespace=self.name, prefix=FIP_EXT_DEV_PREFIX) # TODO(mrsmith): add LOG warn if fip count != 0 LOG.debug('DVR: destroy fip namespace: %s', self.name) super(FipNamespace, self).delete() def _check_for_gateway_ip_change(self, new_agent_gateway_port): def get_gateway_ips(gateway_port): gw_ips = {} if gateway_port: for subnet in gateway_port.get('subnets', []): gateway_ip = subnet.get('gateway_ip', None) if gateway_ip: ip_version = common_utils.get_ip_version(gateway_ip) gw_ips[ip_version] = gateway_ip return gw_ips new_gw_ips = get_gateway_ips(new_agent_gateway_port) old_gw_ips = get_gateway_ips(self.agent_gateway_port) return new_gw_ips != old_gw_ips def get_fip_table_indexes(self, ip_version): ip_rules_list = ip_lib.list_ip_rules(self.get_name(), ip_version) tbl_index_list = [] for ip_rule in ip_rules_list: tbl_index = ip_rule['table'] if tbl_index in ['local', 'default', 'main']: continue tbl_index_list.append(tbl_index) return tbl_index_list def _add_default_gateway_for_fip(self, gw_ip, ip_device, tbl_index): """Adds default gateway for fip based on the tbl_index passed.""" if tbl_index is None: ip_version = common_utils.get_ip_version(gw_ip) tbl_index_list = self.get_fip_table_indexes(ip_version) for tbl_index in tbl_index_list: ip_device.route.add_gateway(gw_ip, table=tbl_index) else: ip_device.route.add_gateway(gw_ip, table=tbl_index) def _add_rtr_ext_route_rule_to_route_table(self, ri, fip_2_rtr, fip_2_rtr_name): """Creates external route table and adds routing rules.""" # TODO(Swami): Rename the _get_snat_idx function to some # generic name that can be used for SNAT and FIP rt_tbl_index = ri._get_snat_idx(fip_2_rtr) interface_name = self.get_ext_device_name( self.agent_gateway_port['id']) try: # The lock is used to make sure another thread doesn't call to # update the gateway route before we are done initializing things. with self._fip_port_lock(interface_name): self._update_gateway_route(self.agent_gateway_port, interface_name, tbl_index=rt_tbl_index) except Exception: # If an exception occurs at this point, then it is # good to unsubscribe this external network so that # the next call will trigger the interface to be plugged. # We reraise the exception in order to resync the router. with excutils.save_and_reraise_exception(): self.unsubscribe(self.agent_gateway_port['network_id']) self.agent_gateway_port = None LOG.exception('DVR: Gateway setup in FIP namespace ' 'failed') # Now add the filter match rule for the table. ip_lib.add_ip_rule(namespace=self.get_name(), ip=str(fip_2_rtr.ip), iif=fip_2_rtr_name, table=rt_tbl_index, priority=rt_tbl_index) def _update_gateway_port(self, agent_gateway_port, interface_name): if (not self.agent_gateway_port or self._check_for_gateway_ip_change(agent_gateway_port)): # Caller already holding lock self._update_gateway_route( agent_gateway_port, interface_name, tbl_index=None) # Cache the agent gateway port after successfully updating # the gateway route, so that checking on self.agent_gateway_port # will be a valid check self.agent_gateway_port = agent_gateway_port gw_cidrs = [sn['cidr'] for sn in agent_gateway_port['subnets'] if sn.get('cidr')] self.driver.set_onlink_routes( interface_name, self.get_name(), agent_gateway_port.get('extra_subnets', []), preserve_ips=gw_cidrs, is_ipv6=False) def _update_gateway_route(self, agent_gateway_port, interface_name, tbl_index): ns_name = self.get_name() ipd = ip_lib.IPDevice(interface_name, namespace=ns_name) # If the 'fg-' device doesn't exist in the namespace then trying # to send advertisements or configure the default route will just # throw exceptions. Unsubscribe this external network so that # the next call will trigger the interface to be plugged. if not ipd.exists(): LOG.warning('DVR: FIP gateway port with interface ' 'name: %(device)s does not exist in the given ' 'namespace: %(ns)s', {'device': interface_name, 'ns': ns_name}) msg = _('DVR: Gateway update route in FIP namespace failed, retry ' 'should be attempted on next call') raise l3_exc.FloatingIpSetupException(msg) for fixed_ip in agent_gateway_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address']) for subnet in agent_gateway_port['subnets']: gw_ip = subnet.get('gateway_ip') if gw_ip: is_gateway_not_in_subnet = not ipam_utils.check_subnet_ip( subnet.get('cidr'), gw_ip) if is_gateway_not_in_subnet: ipd.route.add_route(gw_ip, scope='link') self._add_default_gateway_for_fip(gw_ip, ipd, tbl_index) else: current_gateway = ipd.route.get_gateway() if current_gateway and current_gateway.get('gateway'): ipd.route.delete_gateway(current_gateway.get('gateway')) def _add_cidr_to_device(self, device, ip_cidr): to = common_utils.cidr_to_ip(ip_cidr) if not device.addr.list(to=to): device.addr.add(ip_cidr, add_broadcast=False) def delete_rtr_2_fip_link(self, ri): """Delete the interface between router and FloatingIP namespace.""" LOG.debug("Delete FIP link interfaces for router: %s", ri.router_id) rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.get_int_device_name(ri.router_id) fip_ns_name = self.get_name() # remove default route entry if ri.rtr_fip_subnet is None: # see if there is a local subnet in the cache ri.rtr_fip_subnet = self.local_subnets.lookup(ri.router_id) if ri.rtr_fip_subnet: rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name) if device.exists(): device.route.delete_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) if self.agent_gateway_port: interface_name = self.get_ext_device_name( self.agent_gateway_port['id']) fg_device = ip_lib.IPDevice( interface_name, namespace=fip_ns_name) if fg_device.exists(): # Remove the fip namespace rules and routes associated to # fpr interface route table. tbl_index = ri._get_snat_idx(fip_2_rtr) # Flush the table fg_device.route.flush(lib_constants.IP_VERSION_4, table=tbl_index) fg_device.route.flush(lib_constants.IP_VERSION_6, table=tbl_index) # Remove the rule lookup # /0 addresses for IPv4 and IPv6 are used to pass # IP protocol version information based on a # link-local address IP version. Using any of those # is equivalent to using 'from all' for iproute2. rule_ip = lib_constants.IP_ANY[fip_2_rtr.ip.version] ip_lib.delete_ip_rule(fip_ns_name, ip=rule_ip, iif=fip_2_rtr_name, table=tbl_index, priority=tbl_index) self.local_subnets.release(ri.router_id) ri.rtr_fip_subnet = None # Check for namespace before deleting the device if not self.destroyed: fns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) if fns_ip.device(fip_2_rtr_name).exists(): fns_ip.del_veth(fip_2_rtr_name) def create_rtr_2_fip_link(self, ri): """Create interface between router and Floating IP namespace.""" LOG.debug("Create FIP link interfaces for router %s", ri.router_id) rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.get_int_device_name(ri.router_id) fip_ns_name = self.get_name() # add link local IP to interface if ri.rtr_fip_subnet is None: ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id) rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair() rtr_2_fip_dev = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name) fip_2_rtr_dev = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not rtr_2_fip_dev.exists(): ip_wrapper = ip_lib.IPWrapper(namespace=ri.ns_name) rtr_2_fip_dev, fip_2_rtr_dev = ip_wrapper.add_veth(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) rtr_2_fip_dev.link.set_up() fip_2_rtr_dev.link.set_up() mtu = ri.get_ex_gw_port().get('mtu') if mtu: rtr_2_fip_dev.link.set_mtu(mtu) fip_2_rtr_dev.link.set_mtu(mtu) self._add_cidr_to_device(rtr_2_fip_dev, str(rtr_2_fip)) self._add_cidr_to_device(fip_2_rtr_dev, str(fip_2_rtr)) # Add permanant ARP entries on each side of veth pair rtr_2_fip_dev.neigh.add(common_utils.cidr_to_ip(fip_2_rtr), fip_2_rtr_dev.link.address) fip_2_rtr_dev.neigh.add(common_utils.cidr_to_ip(rtr_2_fip), rtr_2_fip_dev.link.address) self._add_rtr_ext_route_rule_to_route_table(ri, fip_2_rtr, fip_2_rtr_name) # add default route for the link local interface rtr_2_fip_dev.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL) def scan_fip_ports(self, ri): # scan system for any existing fip ports rtr_2_fip_interface = self.get_rtr_ext_device_name(ri.router_id) device = ip_lib.IPDevice(rtr_2_fip_interface, namespace=ri.ns_name) if device.exists(): if len(ri.get_router_cidrs(device)): self.rtr_fip_connect = True else: self.rtr_fip_connect = False # On upgrade, there could be stale IP addresses configured, check # and remove them once. # TODO(haleyb): this can go away after a cycle or two if not self._stale_fips_checked: stale_cidrs = ( ip for ip in router_info.RouterInfo.get_router_cidrs( ri, device) if common_utils.is_cidr_host(ip)) for ip_cidr in stale_cidrs: LOG.debug("Removing stale floating ip %s from interface " "%s in namespace %s", ip_cidr, rtr_2_fip_interface, ri.ns_name) device.delete_addr_and_conntrack_state(ip_cidr) self._stale_fips_checked = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_local_router.py0000644000175000017500000011237300000000000024177 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import collections import netaddr from neutron_lib import constants as lib_constants from oslo_log import log as logging from oslo_utils import excutils import six from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_router_base from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.privileged.agent.linux import ip_lib as priv_ip_lib LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index MASK_30 = 0x3fffffff # Tracks the arp entry cache Arp_entry = collections.namedtuple( 'Arp_entry', 'ip mac subnet_id operation') class DvrLocalRouter(dvr_router_base.DvrRouterBase): def __init__(self, host, *args, **kwargs): super(DvrLocalRouter, self).__init__(host, *args, **kwargs) self.floating_ips_dict = {} # Linklocal subnet for router and floating IP namespace link self.rtr_fip_subnet = None self.rtr_fip_connect = False self.fip_ns = None self._pending_arp_set = set() def migrate_centralized_floating_ip(self, fip, interface_name, device): # Remove the centralized fip first and then add fip to the host ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_removed_dist(ip_cidr) # Now add the floating_ip to the current host return self.floating_ip_added_dist(fip, ip_cidr) def floating_forward_rules(self, fip): """Override this function defined in router_info for dvr routers.""" if not self.fip_ns: return [] if fip.get(lib_constants.DVR_SNAT_BOUND): return [] # For dvr_no_external node should not process any floating IP # iptables rules. if (self.agent_conf.agent_mode == lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): return [] fixed_ip = fip['fixed_ip_address'] floating_ip = fip['floating_ip_address'] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) dnat_from_floatingip_to_fixedip = ( 'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % ( floating_ip, rtr_2_fip_name, fixed_ip)) to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' snat_from_fixedip_to_floatingip = ('float-snat', to_source) return [dnat_from_floatingip_to_fixedip, snat_from_fixedip_to_floatingip] def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): if not self.fip_ns: return [] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) mark_traffic_to_floating_ip = ( 'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % ( floating_ip, rtr_2_fip_name, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def add_centralized_floatingip(self, fip, fip_cidr): """Implements floatingip in centralized network node. This is a dummy function and is overridden in dvr_edge_router.py to add the floatingip function to the snat namespace. """ def remove_centralized_floatingip(self, fip_cidr): """Removes floatingip from centralized network node. This is a dummy function and is overridden in dvr_edge_router.py to remove the floatingip function from the snat namespace. """ def floating_ip_added_dist(self, fip, fip_cidr): """Add floating IP to respective namespace based on agent mode.""" if fip.get(lib_constants.DVR_SNAT_BOUND): return self.add_centralized_floatingip(fip, fip_cidr) if not self._check_if_floatingip_bound_to_host(fip): # TODO(Swami): Need to figure out what status # should be returned when the floating IP is # not destined for this agent and if the floating # IP is configured in a different compute host. # This should not happen once we fix the server # side code, but still a check to make sure if # the floating IP is intended for this host should # be done. return # dvr_no_external host should not process any floating IP route rules. if (self.agent_conf.agent_mode == lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): return floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] self._add_floating_ip_rule(floating_ip, fixed_ip) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) # Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, __ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_ip_addr_adv_notif(fip_ns_name, interface_name, floating_ip) return lib_constants.FLOATINGIP_STATUS_ACTIVE def _add_floating_ip_rule(self, floating_ip, fixed_ip): rule_pr = self.fip_ns.allocate_rule_priority(floating_ip) self.floating_ips_dict[floating_ip] = (fixed_ip, rule_pr) ip_lib.add_ip_rule(namespace=self.ns_name, ip=fixed_ip, table=dvr_fip_ns.FIP_RT_TBL, priority=int(str(rule_pr))) def _remove_floating_ip_rule(self, floating_ip): if floating_ip in self.floating_ips_dict: fixed_ip, rule_pr = self.floating_ips_dict[floating_ip] ip_lib.delete_ip_rule(self.ns_name, ip=fixed_ip, table=dvr_fip_ns.FIP_RT_TBL, priority=int(str(rule_pr))) self.fip_ns.deallocate_rule_priority(floating_ip) # TODO(rajeev): Handle else case - exception/log? def floating_ip_removed_dist(self, fip_cidr): """Remove floating IP from FIP namespace.""" centralized_fip_cidrs = self.get_centralized_fip_cidr_set() if fip_cidr in centralized_fip_cidrs: self.remove_centralized_floatingip(fip_cidr) return floating_ip = fip_cidr.split('/')[0] fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.lookup( self.router_id) if self.rtr_fip_subnet: rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() self._remove_floating_ip_rule(floating_ip) device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, via=str(rtr_2_fip.ip)) return device def floating_ip_moved_dist(self, fip): """Handle floating IP move between fixed IPs.""" floating_ip = fip['floating_ip_address'] self._remove_floating_ip_rule(floating_ip) self._add_floating_ip_rule(floating_ip, fip['fixed_ip_address']) def add_floating_ip(self, fip, interface_name, device): # Special Handling for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) return self.floating_ip_added_dist(fip, ip_cidr) def remove_floating_ip(self, device, ip_cidr): fip_2_rtr_device = self.floating_ip_removed_dist(ip_cidr) if fip_2_rtr_device: fip_2_rtr_device.delete_conntrack_state(ip_cidr) def move_floating_ip(self, fip): self.floating_ip_moved_dist(fip) return lib_constants.FLOATINGIP_STATUS_ACTIVE def _get_internal_port(self, subnet_id): """Return internal router port based on subnet_id.""" router_ports = self.router.get(lib_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips'] for f in fips: if f['subnet_id'] == subnet_id: return port def _cache_arp_entry(self, ip, mac, subnet_id, operation): """Cache the arp entries if device not ready.""" arp_entry_tuple = Arp_entry(ip=ip, mac=mac, subnet_id=subnet_id, operation=operation) self._pending_arp_set.add(arp_entry_tuple) def _process_arp_cache_for_internal_port(self, subnet_id): """Function to process the cached arp entries.""" arp_remove = set() device, device_exists = self.get_arp_related_dev(subnet_id) for arp_entry in self._pending_arp_set: if subnet_id == arp_entry.subnet_id: try: state = self._update_arp_entry( arp_entry.ip, arp_entry.mac, arp_entry.subnet_id, arp_entry.operation, device=device, device_exists=device_exists) except Exception: state = False if state: # If the arp update was successful, then # go ahead and add it to the remove set arp_remove.add(arp_entry) self._pending_arp_set -= arp_remove def _delete_arp_cache_for_internal_port(self, subnet_id): """Function to delete the cached arp entries.""" arp_delete = set() for arp_entry in self._pending_arp_set: if subnet_id == arp_entry.subnet_id: arp_delete.add(arp_entry) self._pending_arp_set -= arp_delete def _update_arp_entry( self, ip, mac, subnet_id, operation, device, device_exists=True): """Add or delete arp entry into router namespace for the subnet.""" try: if device_exists: if operation == 'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac) return True else: if operation == 'add': LOG.warning("Device %s does not exist so ARP entry " "cannot be updated, will cache " "information to be applied later " "when the device exists", device) self._cache_arp_entry(ip, mac, subnet_id, operation) return False except Exception: with excutils.save_and_reraise_exception(): LOG.exception("DVR: Failed updating arp entry") def get_arp_related_dev(self, subnet_id): port = self._get_internal_port(subnet_id) # update arp entry only if the subnet is attached to the router if not port: return None, False interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) device_exists = device.exists() return device, device_exists def _set_subnet_arp_info(self, subnet_id): """Set ARP info retrieved from Plugin for existing ports.""" # TODO(Carl) Can we eliminate the need to make this RPC while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) ignored_device_owners = ( lib_constants.ROUTER_INTERFACE_OWNERS + tuple(common_utils.get_dvr_allowed_address_pair_device_owners())) device, device_exists = self.get_arp_related_dev(subnet_id) for p in subnet_ports: if p['device_owner'] not in ignored_device_owners: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add', device=device, device_exists=device_exists) self._process_arp_cache_for_internal_port(subnet_id) @staticmethod def _get_snat_idx(ip_cidr): """Generate index for DVR snat rules and route tables. The index value has to be 32 bits or less but more than the system generated entries i.e. 32768. For IPv4 use the numeric value of the cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits. Use the freed range to extend smaller values so that they become greater than system generated entries. """ net = netaddr.IPNetwork(ip_cidr) if net.version == 6: if isinstance(ip_cidr, six.text_type): ip_cidr = ip_cidr.encode() # Needed for Python 3.x # the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to reserve upper range to extend smaller values snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30) if snat_idx < 32768: snat_idx = snat_idx + MASK_30 else: snat_idx = net.value return snat_idx def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr, snat_idx): try: ns_ip_device.route.delete_gateway(gw_ip_addr, table=snat_idx) except priv_ip_lib.NetworkInterfaceNotFound: pass def _stale_ip_rule_cleanup(self, namespace, ns_ipd, ip_version): ip_rules_list = ip_lib.list_ip_rules(namespace, ip_version) snat_table_list = [] for ip_rule in ip_rules_list: snat_table = ip_rule['table'] priority = ip_rule['priority'] if snat_table in ['local', 'default', 'main']: continue if (ip_version == lib_constants.IP_VERSION_4 and snat_table in range(dvr_fip_ns.FIP_PR_START, dvr_fip_ns.FIP_PR_END)): continue gateway_cidr = ip_rule['from'] ip_lib.delete_ip_rule(namespace, ip=gateway_cidr, table=snat_table, priority=priority) snat_table_list.append(snat_table) for tb in snat_table_list: ns_ipd.route.flush(ip_version, table=tb) def gateway_redirect_cleanup(self, rtr_interface): ns_ipd = ip_lib.IPDevice(rtr_interface, namespace=self.ns_name) self._stale_ip_rule_cleanup(self.ns_name, ns_ipd, lib_constants.IP_VERSION_4) self._stale_ip_rule_cleanup(self.ns_name, ns_ipd, lib_constants.IP_VERSION_6) def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add): """Adds or removes rules and routes for SNAT redirection.""" cmd = ['net.ipv4.conf.%s.send_redirects=0' % sn_int] try: ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) for port_fixed_ip in sn_port['fixed_ips']: # Iterate and find the gateway IP address matching # the IP version port_ip_addr = port_fixed_ip['ip_address'] port_ip_vers = netaddr.IPAddress(port_ip_addr).version for gw_fixed_ip in gateway['fixed_ips']: gw_ip_addr = gw_fixed_ip['ip_address'] if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers: sn_port_cidr = common_utils.ip_to_cidr( port_ip_addr, port_fixed_ip['prefixlen']) snat_idx = self._get_snat_idx(sn_port_cidr) if is_add: ns_ipd.route.add_gateway(gw_ip_addr, table=snat_idx) ip_lib.add_ip_rule(namespace=self.ns_name, ip=sn_port_cidr, table=snat_idx, priority=snat_idx) ip_lib.sysctl(cmd, namespace=self.ns_name) else: self._delete_gateway_device_if_exists(ns_ipd, gw_ip_addr, snat_idx) ip_lib.delete_ip_rule(self.ns_name, ip=sn_port_cidr, table=snat_idx, priority=snat_idx) except Exception: if is_add: exc = 'DVR: error adding redirection logic' else: exc = ('DVR: snat remove failed to clear the rule ' 'and device') LOG.exception(exc) def _snat_redirect_add(self, gateway, sn_port, sn_int): """Adds rules and routes for SNAT redirection.""" self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True) def _snat_redirect_remove(self, gateway, sn_port, sn_int): """Removes rules and routes for SNAT redirection.""" self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False) def internal_network_added(self, port): super(DvrLocalRouter, self).internal_network_added(port) # NOTE: The following function _set_subnet_arp_info # should be called to dynamically populate the arp # entries for the dvr services ports into the router # namespace. This does not have dependency on the # external_gateway port or the agent_mode. ex_gw_port = self.get_ex_gw_port() for subnet in port['subnets']: self._set_subnet_arp_info(subnet['id']) if ex_gw_port: # Check for address_scopes here if gateway exists. address_scopes_match = self._check_if_address_scopes_match( port, ex_gw_port) if (address_scopes_match and (self.agent_conf.agent_mode in [lib_constants.L3_AGENT_MODE_DVR, lib_constants.L3_AGENT_MODE_DVR_SNAT])): self._add_interface_routing_rule_to_router_ns(port) self._add_interface_route_to_fip_ns(port) self._snat_redirect_add_from_port(port) def _snat_redirect_add_from_port(self, port): ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return address_scopes_match = self._check_if_address_scopes_match( port, ex_gw_port) if (address_scopes_match and (self.agent_conf.agent_mode in [lib_constants.L3_AGENT_MODE_DVR, lib_constants.L3_AGENT_MODE_DVR_SNAT])): return sn_port = self.get_snat_port_for_internal_port(port) if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port, port, interface_name) def _dvr_internal_network_removed(self, port): # Clean up the cached arp entries related to the port subnet for subnet in port['subnets']: self._delete_arp_cache_for_internal_port(subnet) if not self.ex_gw_port: return # Delete DVR address_scope static route for the removed interface # Check for address_scopes here. address_scopes_match = self._check_if_address_scopes_match( port, self.ex_gw_port) if (address_scopes_match and (self.agent_conf.agent_mode in [lib_constants.L3_AGENT_MODE_DVR, lib_constants.L3_AGENT_MODE_DVR_SNAT])): self._delete_interface_route_in_fip_ns(port) self._delete_interface_routing_rule_in_router_ns(port) # If address scopes match there is no need to cleanup the # snat redirect rules, hence return here. return sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return # DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port, port, interface_name) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrLocalRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): """Filter Floating Agent GW port for the external network.""" fip_ports = self.router.get( lib_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in fip_ports if p['network_id'] == ext_net_id), None) def get_snat_external_device_interface_name(self, port_id): pass def get_external_device_interface_name(self, ex_gw_port): fip_int = self.fip_ns.get_int_device_name(self.router_id) if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()): return self.fip_ns.get_rtr_ext_device_name(self.router_id) def enable_snat_redirect_rules(self, ex_gw_port): for p in self.internal_ports: gateway = self.get_snat_port_for_internal_port(p) if not gateway: continue address_scopes_match = self._check_if_address_scopes_match( p, ex_gw_port) if (not address_scopes_match or (self.agent_conf.agent_mode == lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL)): internal_dev = self.get_internal_device_name(p['id']) self._snat_redirect_add(gateway, p, internal_dev) def disable_snat_redirect_rules(self, ex_gw_port): for p in self.internal_ports: gateway = self.get_snat_port_for_internal_port( p, self.snat_ports) if not gateway: continue address_scopes_match = self._check_if_address_scopes_match( p, ex_gw_port) if (not address_scopes_match or (self.agent_conf.agent_mode == lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL)): internal_dev = self.get_internal_device_name(p['id']) self._snat_redirect_remove(gateway, p, internal_dev) def external_gateway_added(self, ex_gw_port, interface_name): # TODO(Carl) Refactor external_gateway_added/updated/removed to use # super class implementation where possible. Looks like preserve_ips, # and ns_name are the key differences. cmd = ['net.ipv4.conf.all.send_redirects=0'] ip_lib.sysctl(cmd, namespace=self.ns_name) self.enable_snat_redirect_rules(ex_gw_port) for port in self.get_snat_interfaces(): for ip in port['fixed_ips']: subnet_id = ip['subnet_id'] device, device_exists = self.get_arp_related_dev(subnet_id) self._update_arp_entry(ip['ip_address'], port['mac_address'], subnet_id, 'add', device=device, device_exists=device_exists) def external_gateway_updated(self, ex_gw_port, interface_name): pass def process_floating_ip_nat_rules(self): """Configure NAT rules for the router's floating IPs. Configures iptables rules for the floating ips of the given router """ # Clear out all iptables rules for floating ips self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. for fip in floating_ips: # If floating IP is snat_bound, then the iptables rule should # not be installed to qrouter namespace, since the mixed snat # namespace may already install it. if fip.get(lib_constants.DVR_SNAT_BOUND): continue # Rebuild iptables rules for the floating ip. for chain, rule in self.floating_forward_rules(fip): self.iptables_manager.ipv4['nat'].add_rule( chain, rule, tag='floating_ip') self.iptables_manager.apply() def external_gateway_removed(self, ex_gw_port, interface_name): # TODO(Carl) Should this be calling process_snat_dnat_for_fip? self.process_floating_ip_nat_rules() if self.fip_ns: to_fip_interface_name = ( self.get_external_device_interface_name(ex_gw_port)) self.process_floating_ip_addresses(to_fip_interface_name) # Remove the router to fip namespace connection after the # gateway is removed. self.fip_ns.delete_rtr_2_fip_link(self) self.rtr_fip_connect = False # NOTE:_snat_redirect_remove should be only called when the # gateway is cleared and should not be called when the gateway # is moved or rescheduled. if not self.router.get('gw_port'): self.disable_snat_redirect_rules(ex_gw_port) def _handle_router_snat_rules(self, ex_gw_port, interface_name): """Configures NAT rules for Floating IPs for DVR.""" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return ext_device_name = self.get_external_device_interface_name(ex_gw_port) floatingips = self.get_floating_ips() if not ext_device_name or not floatingips: # Without router to fip device, or without any floating ip, # the snat rules should not be added return # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') rule = self._prevent_snat_for_internal_traffic_rule(ext_device_name) self.iptables_manager.ipv4['nat'].add_rule(*rule) def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) # DVR local router will use rfp port as external port ext_port = self.get_ex_gw_port() if not ext_port: return ports_scopemark ext_device_name = self.get_external_device_interface_name(ext_port) if not ext_device_name: return ports_scopemark ext_scope = self._get_external_address_scope() ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) ports_scopemark[lib_constants.IP_VERSION_4][ext_device_name] = ( ext_scope_mark) return ports_scopemark def _check_if_floatingip_bound_to_host(self, fip): """Check if the floating IP is bound to this host.""" return self.host in (fip.get('host'), fip.get('dest_host')) def process_external(self): if self.agent_conf.agent_mode != ( lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): ex_gw_port = self.get_ex_gw_port() if ex_gw_port: self.create_dvr_external_gateway_on_agent(ex_gw_port) self.connect_rtr_2_fip() super(DvrLocalRouter, self).process_external() def _check_rtr_2_fip_connect(self): """Checks if the rtr to fip connect exists, if not sets to false.""" fip_ns_name = self.fip_ns.get_name() if ip_lib.network_namespace_exists(fip_ns_name): fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if not ip_lib.device_exists(fip_2_rtr_name, namespace=fip_ns_name): self.rtr_fip_connect = False def connect_rtr_2_fip(self): self._check_rtr_2_fip_connect() if self.fip_ns.agent_gateway_port and not self.rtr_fip_connect: ex_gw_port = self.get_ex_gw_port() self.fip_ns.create_rtr_2_fip_link(self) self.set_address_scope_interface_routes(ex_gw_port) self.rtr_fip_connect = True self.routes_updated([], self.router['routes']) def _check_if_address_scopes_match(self, int_port, ex_gw_port): """Checks and returns the matching state for v4 or v6 scopes.""" int_port_addr_scopes = int_port.get('address_scopes', {}) ext_port_addr_scopes = ex_gw_port.get('address_scopes', {}) key = ( lib_constants.IP_VERSION_6 if self._port_has_ipv6_subnet(int_port) else lib_constants.IP_VERSION_4) # NOTE: DVR does not support IPv6 for the floating namespace yet, so # until we fix it, we probably should use the snat redirect path for # the ports that have IPv6 address configured. int_port_addr_value = int_port_addr_scopes.get(str(key)) # If the address scope of the interface is none, then don't need # to compare and just return. if int_port_addr_value is None: return False if ((key != lib_constants.IP_VERSION_6) and int_port_addr_scopes.get(str(key)) in ext_port_addr_scopes.values()): return True return False def _delete_interface_route_in_fip_ns(self, router_port): rtr_2_fip_ip, fip_2_rtr_name = self.get_rtr_fip_ip_and_interface_name() fip_ns_name = self.fip_ns.get_name() if ip_lib.network_namespace_exists(fip_ns_name): device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not device.exists(): return for subnet in router_port['subnets']: rtr_port_cidr = subnet['cidr'] device.route.delete_route(rtr_port_cidr, via=str(rtr_2_fip_ip)) def _add_interface_route_to_fip_ns(self, router_port): rtr_2_fip_ip, fip_2_rtr_name = self.get_rtr_fip_ip_and_interface_name() fip_ns_name = self.fip_ns.get_name() if ip_lib.network_namespace_exists(fip_ns_name): device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not device.exists(): return for subnet in router_port['subnets']: rtr_port_cidr = subnet['cidr'] device.route.add_route(rtr_port_cidr, str(rtr_2_fip_ip)) def _add_interface_routing_rule_to_router_ns(self, router_port): for subnet in router_port['subnets']: rtr_port_cidr = subnet['cidr'] ip_lib.add_ip_rule(namespace=self.ns_name, ip=rtr_port_cidr, table=dvr_fip_ns.FIP_RT_TBL, priority=dvr_fip_ns.FAST_PATH_EXIT_PR) def _delete_interface_routing_rule_in_router_ns(self, router_port): for subnet in router_port['subnets']: rtr_port_cidr = subnet['cidr'] ip_lib.delete_ip_rule(self.ns_name, ip=rtr_port_cidr, table=dvr_fip_ns.FIP_RT_TBL, priority=dvr_fip_ns.FAST_PATH_EXIT_PR) def get_rtr_fip_ip_and_interface_name(self): """Function that returns the router to fip interface name and ip.""" if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, __ = self.rtr_fip_subnet.get_pair() fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) return rtr_2_fip.ip, fip_2_rtr_name def set_address_scope_interface_routes(self, ex_gw_port): """Sets routing rules for router interfaces if addr scopes match.""" for port in self.internal_ports: if self._check_if_address_scopes_match(port, ex_gw_port): self._add_interface_routing_rule_to_router_ns(port) self._add_interface_route_to_fip_ns(port) def create_dvr_external_gateway_on_agent(self, ex_gw_port): fip_agent_port = self.get_floating_agent_gw_interface( ex_gw_port['network_id']) if not fip_agent_port: fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port( self.agent.context, ex_gw_port['network_id']) LOG.debug("FloatingIP agent gateway port received from the " "plugin: %s", fip_agent_port) self.fip_ns.create_or_update_gateway_port(fip_agent_port) def update_routing_table(self, operation, route): # TODO(Swami): The static routes should be added to the # specific namespace based on the availability of the # network interfaces. In the case of DVR the static routes # for local internal router networks can be added to the # snat_namespace and router_namespace but should not be # added to the fip namespace. Likewise the static routes # for the external router networks should only be added to # the snat_namespace and fip_namespace. # The current code adds static routes to all namespaces in # order to reduce the complexity. This should be revisited # later. if self.fip_ns and self.fip_ns.agent_gateway_port: fip_ns_name = self.fip_ns.get_name() agent_gw_port = self.fip_ns.agent_gateway_port route_apply = self._check_if_route_applicable_to_fip_namespace( route, agent_gw_port) if route_apply: if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() tbl_index = self._get_snat_idx(fip_2_rtr) self._update_fip_route_table_with_next_hop_routes( operation, route, fip_ns_name, tbl_index) super(DvrLocalRouter, self).update_routing_table(operation, route) def _update_fip_route_table_with_next_hop_routes(self, operation, route, fip_ns_name, tbl_index): cmd = ['ip', 'route', operation, 'to', route['destination'], 'via', route['nexthop'], 'table', tbl_index] ip_wrapper = ip_lib.IPWrapper(namespace=fip_ns_name) if ip_wrapper.netns.exists(fip_ns_name): ip_wrapper.netns.execute(cmd, check_exit_code=False) else: LOG.debug("The FIP namespace %(ns)s does not exist for " "router %(id)s", {'ns': fip_ns_name, 'id': self.router_id}) def _check_if_route_applicable_to_fip_namespace(self, route, agent_gateway_port): ip_cidrs = common_utils.fixed_ip_cidrs(agent_gateway_port['fixed_ips']) nexthop_cidr = netaddr.IPAddress(route['nexthop']) for gw_cidr in ip_cidrs: gw_subnet_cidr = netaddr.IPNetwork(gw_cidr) # NOTE: In the case of DVR routers apply the extra routes # on the FIP namespace only if it is associated with the # external agent gateway subnets. if nexthop_cidr in gw_subnet_cidr: return True return False def get_router_cidrs(self, device): """As no floatingip will be set on the rfp device. Get floatingip from the route of fip namespace. """ if not self.fip_ns: return set() fip_ns_name = self.fip_ns.get_name() fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) if not device.exists(): return set() if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, _fip_2_rtr = self.rtr_fip_subnet.get_pair() exist_routes = device.route.list_routes( lib_constants.IP_VERSION_4, via=str(rtr_2_fip.ip)) return {common_utils.ip_to_cidr(route['cidr']) for route in exist_routes} def process(self): ex_gw_port = self.get_ex_gw_port() if ex_gw_port: self.fip_ns = self.agent.get_fip_ns(ex_gw_port['network_id']) self.fip_ns.scan_fip_ports(self) super(DvrLocalRouter, self).process() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_router_base.py0000644000175000017500000000375600000000000024023 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log as logging from neutron.agent.l3 import router_info as router LOG = logging.getLogger(__name__) class DvrRouterBase(router.RouterInfo): def __init__(self, host, *args, **kwargs): super(DvrRouterBase, self).__init__(*args, **kwargs) self.host = host self.snat_ports = None def process(self): super(DvrRouterBase, self).process() # NOTE: Keep a copy of the interfaces around for when they are removed self.snat_ports = self.get_snat_interfaces() def get_snat_interfaces(self): return self.router.get(constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_port_for_internal_port(self, int_port, snat_ports=None): """Return the SNAT port for the given internal interface port.""" if snat_ports is None: snat_ports = self.get_snat_interfaces() if not snat_ports: return fixed_ips = int_port['fixed_ips'] subnet_ids = [fixed_ip['subnet_id'] for fixed_ip in fixed_ips] for p in snat_ports: for ip in p['fixed_ips']: if ip['subnet_id'] in subnet_ids: return p LOG.error('DVR: SNAT port not found in the list ' '%(snat_list)s for the given router ' 'internal port %(int_p)s', { 'snat_list': snat_ports, 'int_p': int_port}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/dvr_snat_ns.py0000644000175000017500000000462000000000000023145 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log as logging from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) SNAT_NS_PREFIX = 'snat-' class SnatNamespace(namespaces.Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self.get_snat_ns_name(router_id) super(SnatNamespace, self).__init__( name, agent_conf, driver, use_ipv6) def create(self): super(SnatNamespace, self).create() # This might be an HA router namespaces and it should not have # ip_nonlocal_bind enabled ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 0) # Set nf_conntrack_tcp_loose to 0 to ensure mid-stream # TCP conversations aren't taken over by SNAT cmd = ['net.netfilter.nf_conntrack_tcp_loose=0'] ip_lib.sysctl(cmd, namespace=self.name) @classmethod def get_snat_ns_name(cls, router_id): return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id) @namespaces.check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(): if d.name.startswith(constants.SNAT_INT_DEV_PREFIX): LOG.debug('Unplugging DVR device %s', d.name) self.driver.unplug(d.name, namespace=self.name, prefix=constants.SNAT_INT_DEV_PREFIX) elif d.name.startswith(namespaces.EXTERNAL_DEV_PREFIX): self.driver.unplug( d.name, namespace=self.name, prefix=namespaces.EXTERNAL_DEV_PREFIX) # TODO(mrsmith): delete ext-gw-port LOG.debug('DVR: destroy snat ns: %s', self.name) super(SnatNamespace, self).delete() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1990438 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/0000755000175000017500000000000000000000000022450 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/__init__.py0000644000175000017500000000000000000000000024547 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/conntrack_helper.py0000644000175000017500000002663100000000000026353 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.agent import l3_extension from neutron_lib import constants from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc LOG = logging.getLogger(__name__) DEFAULT_CONNTRACK_HELPER_CHAIN = 'cth' CONNTRACK_HELPER_PREFIX = 'cthelper-' CONNTRACK_HELPER_CHAIN_PREFIX = DEFAULT_CONNTRACK_HELPER_CHAIN + '-' class ConntrackHelperMapping(object): def __init__(self): self._managed_conntrack_helpers = {} """ router_conntrack_helper_mapping = { router_id_1: set(cth_id_1, cth_id_2), router_id_2: set(cth_id_3, cth_id_4) } """ self._router_conntrack_helper_mapping = collections.defaultdict(set) def set_conntrack_helpers(self, conntrack_helpers): for cth in conntrack_helpers: self._router_conntrack_helper_mapping[cth.router_id].add(cth.id) self._managed_conntrack_helpers[cth.id] = cth def update_conntrack_helpers(self, conntrack_helpers): for cth in conntrack_helpers: if (cth.id not in self._router_conntrack_helper_mapping[cth.router_id]): self._router_conntrack_helper_mapping[cth.router_id].add( cth.id) self._managed_conntrack_helpers[cth.id] = cth def get_conntack_helper(self, conntrack_helper_id): return self._managed_conntrack_helpers.get(conntrack_helper_id) def get_managed_conntrack_helpers(self): return self._managed_conntrack_helpers def del_conntrack_helpers(self, conntrack_helpers): for cth in conntrack_helpers: if not self.get_conntack_helper(cth.id): continue del self._managed_conntrack_helpers[cth.id] self._router_conntrack_helper_mapping[cth.router_id].remove( cth.id) if not self._router_conntrack_helper_mapping[cth.router_id]: del self._router_conntrack_helper_mapping[cth.router_id] def clear_by_router_id(self, router_id): router_cth_ids = self._router_conntrack_helper_mapping.get(router_id) if not router_cth_ids: return for cth_id in router_cth_ids: del self._managed_conntrack_helpers[cth_id] del self._router_conntrack_helper_mapping[router_id] def check_conntrack_helper_changes(self, new_cth): old_cth = self.get_conntack_helper(new_cth.id) return old_cth != new_cth class ConntrackHelperAgentExtension(l3_extension.L3AgentExtension): SUPPORTED_RESOURCE_TYPES = [resources.CONNTRACKHELPER] def initialize(self, connection, driver_type): self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self._register_rpc_consumers() self.mapping = ConntrackHelperMapping() def _register_rpc_consumers(self): registry.register(self._handle_notification, resources.CONNTRACKHELPER) self._connection = n_rpc.Connection() endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic = resources_rpc.resource_type_versioned_topic( resources.CONNTRACKHELPER) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() def consume_api(self, agent_api): self.agent_api = agent_api @lockutils.synchronized('conntrack-helpers') def _handle_notification(self, context, resource_type, conntrack_helpers, event_type): for conntrack_helper in conntrack_helpers: router_info = self.agent_api.get_router_info( conntrack_helper.router_id) if not router_info: return iptables_manager = self._get_iptables_manager(router_info) if event_type == events.CREATED: self._process_create([conntrack_helper], iptables_manager) elif event_type == events.UPDATED: self._process_update([conntrack_helper], iptables_manager) elif event_type == events.DELETED: self._process_delete([conntrack_helper], iptables_manager) def _get_chain_name(self, id): return (CONNTRACK_HELPER_CHAIN_PREFIX + id)[ :constants.MAX_IPTABLES_CHAIN_LEN_WRAP] def _install_default_rules(self, iptables_manager, version): default_rule = '-j %s-%s' % (iptables_manager.wrap_name, DEFAULT_CONNTRACK_HELPER_CHAIN) if version == constants.IPv4: iptables_manager.ipv4['raw'].add_chain( DEFAULT_CONNTRACK_HELPER_CHAIN) iptables_manager.ipv4['raw'].add_rule('PREROUTING', default_rule) elif version == constants.IPv6: iptables_manager.ipv6['raw'].add_chain( DEFAULT_CONNTRACK_HELPER_CHAIN) iptables_manager.ipv6['raw'].add_rule('PREROUTING', default_rule) iptables_manager.apply() def _get_chain_rules_list(self, conntrack_helper, wrap_name): chain_name = self._get_chain_name(conntrack_helper.id) chain_rule_list = [(DEFAULT_CONNTRACK_HELPER_CHAIN, '-j %s-%s' % (wrap_name, chain_name))] chain_rule_list.append((chain_name, '-p %(proto)s --dport %(dport)s -j CT ' '--helper %(helper)s' % {'proto': conntrack_helper.protocol, 'dport': conntrack_helper.port, 'helper': conntrack_helper.helper})) return chain_rule_list def _rule_apply(self, iptables_manager, conntrack_helper): tag = CONNTRACK_HELPER_PREFIX + conntrack_helper.id iptables_manager.ipv4['raw'].clear_rules_by_tag(tag) iptables_manager.ipv6['raw'].clear_rules_by_tag(tag) for chain, rule in self._get_chain_rules_list( conntrack_helper, iptables_manager.wrap_name): if chain not in iptables_manager.ipv4['raw'].chains: iptables_manager.ipv4['raw'].add_chain(chain) if chain not in iptables_manager.ipv6['raw'].chains: iptables_manager.ipv6['raw'].add_chain(chain) iptables_manager.ipv4['raw'].add_rule(chain, rule, tag=tag) iptables_manager.ipv6['raw'].add_rule(chain, rule, tag=tag) def _process_create(self, conntrack_helpers, iptables_manager): if not conntrack_helpers: return if (DEFAULT_CONNTRACK_HELPER_CHAIN not in iptables_manager.ipv4['raw'].chains): self._install_default_rules(iptables_manager, constants.IPv4) if (DEFAULT_CONNTRACK_HELPER_CHAIN not in iptables_manager.ipv6['raw'].chains): self._install_default_rules(iptables_manager, constants.IPv6) for conntrack_helper in conntrack_helpers: self._rule_apply(iptables_manager, conntrack_helper) iptables_manager.apply() self.mapping.set_conntrack_helpers(conntrack_helpers) def _process_update(self, conntrack_helpers, iptables_manager): if not conntrack_helpers: return for conntrack_helper in conntrack_helpers: if not self.mapping.check_conntrack_helper_changes( conntrack_helper): LOG.debug("Skip conntrack helper %s for update, as there is " "no difference between the memory managed by agent", conntrack_helper.id) continue current_chain = self._get_chain_name(conntrack_helper.id) iptables_manager.ipv4['raw'].remove_chain(current_chain) iptables_manager.ipv6['raw'].remove_chain(current_chain) self._rule_apply(iptables_manager, conntrack_helper) iptables_manager.apply() self.mapping.update_conntrack_helpers(conntrack_helpers) def _process_delete(self, conntrack_helpers, iptables_manager): if not conntrack_helpers: return for conntrack_helper in conntrack_helpers: chain_name = self._get_chain_name(conntrack_helper.id) iptables_manager.ipv4['raw'].remove_chain(chain_name) iptables_manager.ipv6['raw'].remove_chain(chain_name) iptables_manager.apply() self.mapping.del_conntrack_helpers(conntrack_helpers) def _get_iptables_manager(self, router_info): if router_info.router.get('distributed'): return router_info.snat_iptables_manager return router_info.iptables_manager def check_local_conntrack_helpers(self, context, router_info): local_ct_helpers = set(self.mapping.get_managed_conntrack_helpers() .keys()) new_ct_helpers = [] updated_cth_helpers = [] current_ct_helpers = set() ct_helpers = self.resource_rpc.bulk_pull( context, resources.CONNTRACKHELPER, filter_kwargs={ 'router_id': router_info.router['id']}) for cth in ct_helpers: # Split request conntrack helpers into update, new and current if (cth.id in self.mapping.get_managed_conntrack_helpers() and self.mapping.check_conntrack_helper_changes(cth)): updated_cth_helpers.append(cth) elif cth.id not in self.mapping.get_managed_conntrack_helpers(): new_ct_helpers.append(cth) current_ct_helpers.add(cth.id) remove_ct_helpers = [ self.mapping.get_managed_conntrack_helpers().get(cth_id) for cth_id in local_ct_helpers.difference(current_ct_helpers)] iptables_manager = self._get_iptables_manager(router_info) self._process_update(updated_cth_helpers, iptables_manager) self._process_create(new_ct_helpers, iptables_manager) self._process_delete(remove_ct_helpers, iptables_manager) def process_conntrack_helper(self, context, data): router_info = self.agent_api.get_router_info(data['id']) if not router_info: LOG.debug("Router %s is not managed by this agent. " "It was possibly deleted concurrently.", data['id']) return self.check_local_conntrack_helpers(context, router_info) @lockutils.synchronized('conntrack-helpers') def add_router(self, context, data): self.process_conntrack_helper(context, data) @lockutils.synchronized('conntrack-helpers') def update_router(self, context, data): self.process_conntrack_helper(context, data) def delete_router(self, context, data): self.mapping.clear_by_router_id(data['id']) def ha_state_change(self, context, data): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/port_forwarding.py0000644000175000017500000004746200000000000026245 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import netaddr from neutron_lib.agent import l3_extension from neutron_lib import constants from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.common import coordination LOG = logging.getLogger(__name__) DEFAULT_PORT_FORWARDING_CHAIN = 'fip-pf' PORT_FORWARDING_PREFIX = 'fip_portforwarding-' PORT_FORWARDING_CHAIN_PREFIX = 'pf-' class RouterFipPortForwardingMapping(object): def __init__(self): self.managed_port_forwardings = {} """ fip_port_forwarding = { fip_id_1: set(pf_id1, pf_id2), fip_id_2: set(pf_id3, pf_id4) } """ self.fip_port_forwarding = collections.defaultdict(set) """ router_fip_mapping = { router_id_1: set(fip_id_1, fip_id_2), router_id_2: set(fip_id_3, fip_id_4) } """ self.router_fip_mapping = collections.defaultdict(set) @lockutils.synchronized('port-forwarding-cache') def set_port_forwardings(self, port_forwardings): for port_forwarding in port_forwardings: self._set_fip_port_forwarding(port_forwarding.floatingip_id, port_forwarding, port_forwarding.router_id) @lockutils.synchronized('port-forwarding-cache') def update_port_forwardings(self, port_forwardings): for port_forwarding in port_forwardings: self.managed_port_forwardings[port_forwarding.id] = port_forwarding @lockutils.synchronized('port-forwarding-cache') def del_port_forwardings(self, port_forwardings): for port_forwarding in port_forwardings: if not self.managed_port_forwardings.get(port_forwarding.id): continue self.managed_port_forwardings.pop(port_forwarding.id) self.fip_port_forwarding[port_forwarding.floatingip_id].remove( port_forwarding.id) if not self.fip_port_forwarding[port_forwarding.floatingip_id]: self.fip_port_forwarding.pop(port_forwarding.floatingip_id) self.router_fip_mapping[port_forwarding.router_id].remove( port_forwarding.floatingip_id) if not self.router_fip_mapping[port_forwarding.router_id]: del self.router_fip_mapping[port_forwarding.router_id] def _set_fip_port_forwarding(self, fip_id, pf, router_id): self.router_fip_mapping[router_id].add(fip_id) self.fip_port_forwarding[fip_id].add(pf.id) self.managed_port_forwardings[pf.id] = pf @lockutils.synchronized('port-forwarding-cache') def clear_by_fip(self, fip_id, router_id): self.router_fip_mapping[router_id].remove(fip_id) if len(self.router_fip_mapping[router_id]) == 0: del self.router_fip_mapping[router_id] for pf_id in self.fip_port_forwarding[fip_id]: del self.managed_port_forwardings[pf_id] del self.fip_port_forwarding[fip_id] @lockutils.synchronized('port-forwarding-cache') def check_port_forwarding_changes(self, new_pf): old_pf = self.managed_port_forwardings.get(new_pf.id) return old_pf != new_pf class PortForwardingAgentExtension(l3_extension.L3AgentExtension): SUPPORTED_RESOURCE_TYPES = [resources.PORTFORWARDING] def initialize(self, connection, driver_type): self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self._register_rpc_consumers() self.mapping = RouterFipPortForwardingMapping() def _register_rpc_consumers(self): registry.register(self._handle_notification, resources.PORTFORWARDING) self._connection = n_rpc.Connection() endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic = resources_rpc.resource_type_versioned_topic( resources.PORTFORWARDING) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() def consume_api(self, agent_api): self.agent_api = agent_api def _handle_notification(self, context, resource_type, forwardings, event_type): for forwarding in forwardings: self._process_port_forwarding_event( context, forwarding, event_type) def _store_local(self, pf_objs, event_type): if event_type == events.CREATED: self.mapping.set_port_forwardings(pf_objs) elif event_type == events.UPDATED: self.mapping.update_port_forwardings(pf_objs) elif event_type == events.DELETED: self.mapping.del_port_forwardings(pf_objs) def _get_fip_rules(self, port_forward, wrap_name): chain_rule_list = [] pf_chain_name = self._get_port_forwarding_chain_name(port_forward.id) chain_rule_list.append((DEFAULT_PORT_FORWARDING_CHAIN, '-j %s-%s' % (wrap_name, pf_chain_name))) floating_ip_address = str(port_forward.floating_ip_address) protocol = port_forward.protocol internal_ip_address = str(port_forward.internal_ip_address) internal_port = port_forward.internal_port external_port = port_forward.external_port chain_rule = (pf_chain_name, '-d %s/32 -p %s -m %s --dport %s ' '-j DNAT --to-destination %s:%s' % ( floating_ip_address, protocol, protocol, external_port, internal_ip_address, internal_port)) chain_rule_list.append(chain_rule) return chain_rule_list def _rule_apply(self, iptables_manager, port_forwarding, rule_tag): iptables_manager.ipv4['nat'].clear_rules_by_tag(rule_tag) if DEFAULT_PORT_FORWARDING_CHAIN not in iptables_manager.ipv4[ 'nat'].chains: self._install_default_rules(iptables_manager) for chain, rule in self._get_fip_rules( port_forwarding, iptables_manager.wrap_name): if chain not in iptables_manager.ipv4['nat'].chains: iptables_manager.ipv4['nat'].add_chain(chain) iptables_manager.ipv4['nat'].add_rule(chain, rule, tag=rule_tag) @coordination.synchronized('port-forwarding-{namespace}') def _process_create(self, port_forwardings, ri, interface_name, namespace, iptables_manager): if not port_forwardings: return device = ip_lib.IPDevice(interface_name, namespace=namespace) is_distributed = ri.router.get('distributed') ha_port = ri.router.get(constants.HA_INTERFACE_KEY, None) fip_statuses = {} for port_forwarding in port_forwardings: # check if the port forwarding is managed in this agent from # OVO and router rpc. if port_forwarding.id in self.mapping.managed_port_forwardings: LOG.debug("Skip port forwarding %s for create, as it had been " "managed by agent", port_forwarding.id) continue existing_cidrs = ri.get_router_cidrs(device) fip_ip = str(port_forwarding.floating_ip_address) fip_cidr = str(netaddr.IPNetwork(fip_ip)) status = '' if fip_cidr not in existing_cidrs: try: if not is_distributed: fip_statuses[port_forwarding.floatingip_id] = ( ri.add_floating_ip( {'floating_ip_address': fip_ip}, interface_name, device)) else: if not ha_port: device.addr.add(fip_cidr) ip_lib.send_ip_addr_adv_notif(namespace, interface_name, fip_ip) else: ri._add_vip(fip_cidr, interface_name) status = constants.FLOATINGIP_STATUS_ACTIVE except Exception: # Any error will causes the fip status to be set 'ERROR' status = constants.FLOATINGIP_STATUS_ERROR LOG.warning("Unable to configure floating IP %(fip_id)s " "for port forwarding %(pf_id)s", {'fip_id': port_forwarding.floatingip_id, 'pf_id': port_forwarding.id}) else: if not ha_port: ip_lib.send_ip_addr_adv_notif(namespace, interface_name, fip_ip) if status: fip_statuses[port_forwarding.floatingip_id] = status if ha_port and ha_port['status'] == constants.PORT_STATUS_ACTIVE: ri.enable_keepalived() for port_forwarding in port_forwardings: rule_tag = PORT_FORWARDING_PREFIX + port_forwarding.id self._rule_apply(iptables_manager, port_forwarding, rule_tag) iptables_manager.apply() self._sending_port_forwarding_fip_status(ri, fip_statuses) self._store_local(port_forwardings, events.CREATED) def _sending_port_forwarding_fip_status(self, ri, statuses): if not statuses: return LOG.debug('Sending Port Forwarding floating ip ' 'statuses: %s', statuses) # Update floating IP status on the neutron server ri.agent.plugin_rpc.update_floatingip_statuses( ri.agent.context, ri.router_id, statuses) def _get_resource_by_router(self, ri): is_distributed = ri.router.get('distributed') ex_gw_port = ri.get_ex_gw_port() if not is_distributed: interface_name = ri.get_external_device_interface_name(ex_gw_port) namespace = ri.ns_name iptables_manager = ri.iptables_manager else: interface_name = ri.get_snat_external_device_interface_name( ex_gw_port) namespace = ri.snat_namespace.name iptables_manager = ri.snat_iptables_manager return interface_name, namespace, iptables_manager def _check_if_need_process(self, ri, force=False): # force means the request comes from, if True means it comes from OVO, # as we get a actually port forwarding object, then we need to check in # the following steps. But False, means it comes from router rpc. if not ri or not ri.get_ex_gw_port() or ( not force and not ri.fip_managed_by_port_forwardings): # agent doesn't hold the router. pass # This router doesn't own a gw port. pass # This router doesn't hold a port forwarding mapping. pass return False is_distributed = ri.router.get('distributed') agent_mode = ri.agent_conf.agent_mode if (is_distributed and agent_mode in [constants.L3_AGENT_MODE_DVR_NO_EXTERNAL, constants.L3_AGENT_MODE_DVR]): # just support centralized cases return False if is_distributed and not ri.snat_namespace.exists(): return False return True def _process_port_forwarding_event(self, context, port_forwarding, event_type): router_id = port_forwarding.router_id ri = self._get_router_info(router_id) if not self._check_if_need_process(ri, force=True): return (interface_name, namespace, iptables_manager) = self._get_resource_by_router(ri) if event_type == events.CREATED: self._process_create( [port_forwarding], ri, interface_name, namespace, iptables_manager) elif event_type == events.UPDATED: self._process_update([port_forwarding], iptables_manager, interface_name, namespace) elif event_type == events.DELETED: self._process_delete( context, [port_forwarding], ri, interface_name, namespace, iptables_manager) @coordination.synchronized('port-forwarding-{namespace}') def _process_update(self, port_forwardings, iptables_manager, interface_name, namespace): if not port_forwardings: return device = ip_lib.IPDevice(interface_name, namespace=namespace) for port_forwarding in port_forwardings: # check if port forwarding change from OVO and router rpc if not self.mapping.check_port_forwarding_changes(port_forwarding): LOG.debug("Skip port forwarding %s for update, as there is no " "difference between the memory managed by agent", port_forwarding.id) continue current_chain = self._get_port_forwarding_chain_name( port_forwarding.id) iptables_manager.ipv4['nat'].remove_chain(current_chain) ori_pf = self.mapping.managed_port_forwardings[port_forwarding.id] device.delete_socket_conntrack_state( str(ori_pf.floating_ip_address), ori_pf.external_port, protocol=ori_pf.protocol) rule_tag = PORT_FORWARDING_PREFIX + port_forwarding.id self._rule_apply(iptables_manager, port_forwarding, rule_tag) iptables_manager.apply() self._store_local(port_forwardings, events.UPDATED) @coordination.synchronized('port-forwarding-{namespace}') def _process_delete(self, context, port_forwardings, ri, interface_name, namespace, iptables_manager): if not port_forwardings: return device = ip_lib.IPDevice(interface_name, namespace=namespace) for port_forwarding in port_forwardings: current_chain = self._get_port_forwarding_chain_name( port_forwarding.id) iptables_manager.ipv4['nat'].remove_chain(current_chain) fip_address = str(port_forwarding.floating_ip_address) device.delete_socket_conntrack_state( fip_address, port_forwarding.external_port, protocol=port_forwarding.protocol) iptables_manager.apply() fip_id_cidrs = set([(pf.floatingip_id, str(pf.floating_ip_address)) for pf in port_forwardings]) self._sync_and_remove_fip(context, fip_id_cidrs, device, ri) self._store_local(port_forwardings, events.DELETED) def _sync_and_remove_fip(self, context, fip_id_cidrs, device, ri): if not fip_id_cidrs: return ha_port = ri.router.get(constants.HA_INTERFACE_KEY) fip_ids = [item[0] for item in fip_id_cidrs] pfs = self.resource_rpc.bulk_pull(context, resources.PORTFORWARDING, filter_kwargs={ 'floatingip_id': fip_ids}) exist_fips = set() fip_status = {} for pf in pfs: exist_fips.add(pf.floatingip_id) for fip_id_cidr in fip_id_cidrs: if fip_id_cidr[0] not in exist_fips: if ha_port: ri._remove_vip(fip_id_cidr[1]) else: device.delete_addr_and_conntrack_state(fip_id_cidr[1]) fip_status[fip_id_cidr[0]] = 'DOWN' if ha_port: ri.enable_keepalived() self._sending_port_forwarding_fip_status(ri, fip_status) for fip_id in fip_status.keys(): self.mapping.clear_by_fip(fip_id, ri.router_id) def _get_router_info(self, router_id): router_info = self.agent_api.get_router_info(router_id) if router_info: return router_info LOG.debug("Router %s is not managed by this agent. " "It was possibly deleted concurrently.", router_id) def _get_port_forwarding_chain_name(self, pf_id): chain_name = PORT_FORWARDING_CHAIN_PREFIX + pf_id return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP] def _install_default_rules(self, iptables_manager): default_rule = '-j %s-%s' % (iptables_manager.wrap_name, DEFAULT_PORT_FORWARDING_CHAIN) iptables_manager.ipv4['nat'].add_chain(DEFAULT_PORT_FORWARDING_CHAIN) iptables_manager.ipv4['nat'].add_rule('PREROUTING', default_rule) iptables_manager.apply() def check_local_port_forwardings(self, context, ri, fip_ids): pfs = self.resource_rpc.bulk_pull(context, resources.PORTFORWARDING, filter_kwargs={ 'floatingip_id': fip_ids}) (interface_name, namespace, iptable_manager) = self._get_resource_by_router(ri) local_pfs = set(self.mapping.managed_port_forwardings.keys()) new_pfs = [] updated_pfs = [] current_pfs = set() for pf in pfs: # check the request port forwardings, and split them into # update, new, current part from router rpc if pf.id in self.mapping.managed_port_forwardings: if self.mapping.check_port_forwarding_changes(pf): updated_pfs.append(pf) else: new_pfs.append(pf) current_pfs.add(pf.id) remove_pf_ids_set = local_pfs - current_pfs remove_pfs = [self.mapping.managed_port_forwardings[pf_id] for pf_id in remove_pf_ids_set] self._process_update(updated_pfs, iptable_manager, interface_name, namespace) self._process_create(new_pfs, ri, interface_name, namespace, iptable_manager) self._process_delete(context, remove_pfs, ri, interface_name, namespace, iptable_manager) def process_port_forwarding(self, context, data): ri = self._get_router_info(data['id']) if not self._check_if_need_process(ri): return self.check_local_port_forwardings( context, ri, ri.fip_managed_by_port_forwardings) def add_router(self, context, data): """Handle a router add event. Called on router create. :param context: RPC context. :param data: Router data. """ self.process_port_forwarding(context, data) def update_router(self, context, data): """Handle a router update event. Called on router update. :param context: RPC context. :param data: Router data. """ self.process_port_forwarding(context, data) def delete_router(self, context, data): """Handle a router delete event. :param context: RPC context. :param data: Router data. """ pass def ha_state_change(self, context, data): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1990438 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/qos/0000755000175000017500000000000000000000000023252 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/qos/__init__.py0000644000175000017500000000000000000000000025351 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/qos/base.py0000644000175000017500000001604700000000000024546 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib import constants from neutron_lib.db import constants as db_consts from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.agent.linux import l3_tc_lib as tc_lib from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc LOG = logging.getLogger(__name__) SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': constants.VALID_DIRECTIONS} } } # We use the default values to illustrate: # 1. QoS policy does not have some direction `bandwidth_limit`, then we use # the default value. # 2. default value 0 will be treated as no limit. # 3. if one IP's rate was changed from x to 0, the extension will do # a tc filter clean procedure. IP_DEFAULT_RATE = 0 IP_DEFAULT_BURST = 0 class RateLimitMaps(object): def __init__(self, lock_name): self.qos_policy_resources = collections.defaultdict(dict) self.known_policies = {} self.resource_policies = {} self.lock_name = lock_name def update_policy(self, policy): @lockutils.synchronized(self.lock_name) def _update_policy(): self.known_policies[policy.id] = policy return _update_policy() def get_policy(self, policy_id): @lockutils.synchronized(self.lock_name) def _get_policy(): return self.known_policies.get(policy_id) return _get_policy() def get_resources(self, policy): @lockutils.synchronized(self.lock_name) def _get_resources(): return self.qos_policy_resources[policy.id].values() return _get_resources() def get_resource_policy(self, resource): @lockutils.synchronized(self.lock_name) def _get_resource_policy(): policy_id = self.resource_policies.get(resource) return self.known_policies.get(policy_id) return _get_resource_policy() def set_resource_policy(self, resource, policy): """Attach a resource to policy and return any previous policy on resource. """ @lockutils.synchronized(self.lock_name) def _set_resource_policy(): policy_id = self.resource_policies.get(resource) old_policy = self.known_policies.get(policy_id) self.known_policies[policy.id] = policy self.resource_policies[resource] = policy.id self.qos_policy_resources[policy.id][resource] = resource if old_policy and old_policy.id != policy.id: del self.qos_policy_resources[old_policy.id][resource] _set_resource_policy() def clean_by_resource(self, resource): """Detach resource from policy and cleanup data we don't need anymore. """ @lockutils.synchronized(self.lock_name) def _clean_by_resource(): if resource in self.resource_policies: del self.resource_policies[resource] for (qos_policy_id, res_dict) in self.qos_policy_resources.items(): if resource in res_dict: del res_dict[resource] if not res_dict: self._clean_policy_info(qos_policy_id) return LOG.debug("L3 QoS extension did not have " "information on floating IP %s", resource) _clean_by_resource() def _clean_policy_info(self, qos_policy_id): del self.qos_policy_resources[qos_policy_id] del self.known_policies[qos_policy_id] class L3QosAgentExtensionBase(object): SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY] def consume_api(self, agent_api): self.agent_api = agent_api def _handle_notification(self, context, resource_type, qos_policies, event_type): pass def _process_update_policy(self, qos_policy): pass def _policy_rules_modified(self, old_policy, policy): return not (len(old_policy.rules) == len(policy.rules) and all(i in old_policy.rules for i in policy.rules)) def _register_rpc_consumers(self): registry.register(self._handle_notification, resources.QOS_POLICY) self._connection = n_rpc.Connection() endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic = resources_rpc.resource_type_versioned_topic( resources.QOS_POLICY) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() def _get_tc_wrapper(self, device): return tc_lib.FloatingIPTcCommand(device.name, namespace=device.namespace) def get_policy_rates(self, policy): rates = {} for rule in policy.rules: # NOTE(liuyulong): for now, the L3 agent QoS extensions only # use ``bandwidth_limit`` rules. if rule.rule_type in SUPPORTED_RULES: if rule.direction not in rates: rates[rule.direction] = {"rate": rule.max_kbps, "burst": rule.max_burst_kbps} # The return rates dict must contain all directions. If there is no # one specific direction QoS rule, use the default values. for direction in constants.VALID_DIRECTIONS: if direction not in rates: LOG.debug("Policy %(id)s does not have '%(direction)s' " "bandwidth_limit rule, use default value instead.", {"id": policy.id, "direction": direction}) rates[direction] = {"rate": IP_DEFAULT_RATE, "burst": IP_DEFAULT_BURST} return rates def _get_router_info(self, router_id): router_info = self.agent_api.get_router_info(router_id) if router_info: return router_info LOG.debug("Router %s is not managed by this agent. " "It was possibly deleted concurrently.", router_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/qos/fip.py0000644000175000017500000003523500000000000024412 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import l3_extension from neutron_lib import constants from neutron_lib.services.qos import constants as qos_consts from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.agent.l3.extensions.qos import base as qos_base from neutron.agent.linux import ip_lib from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.common import coordination LOG = logging.getLogger(__name__) class RouterFipRateLimitMaps(qos_base.RateLimitMaps): LOCK_NAME = "fip-qos-cache" def __init__(self): """Initialize RouterFipRateLimitMaps The router_floating_ips will be: router_floating_ips = { router_id_1: set(fip1, fip2), router_id_2: set(), # default } """ self.router_floating_ips = {} """ The rate limits dict will be: xxx_ratelimits = { fip_1: (rate, burst), fip_2: (IP_DEFAULT_RATE, IP_DEFAULT_BURST), # default fip_3: (1, 2), fip_4: (3, 4), } """ self.ingress_ratelimits = {} self.egress_ratelimits = {} super(RouterFipRateLimitMaps, self).__init__(self.LOCK_NAME) def find_fip_router_id(self, fip): @lockutils.synchronized(self.lock_name) def _find_fip_router_id(): for router_id, ips in self.router_floating_ips.items(): if fip in ips: return router_id return _find_fip_router_id() def get_router_floating_ips(self, router_id): @lockutils.synchronized(self.lock_name) def _get_router_floating_ips(): return self.router_floating_ips.pop( router_id, []) return _get_router_floating_ips() def remove_fip_ratelimit_cache(self, direction, fip): @lockutils.synchronized(self.lock_name) def _remove_fip_ratelimit_cache(): rate_limits = getattr(self, direction + "_ratelimits") rate_limits.pop(fip, None) _remove_fip_ratelimit_cache() def set_fip_ratelimit_cache(self, direction, fip, rate, burst): @lockutils.synchronized(self.lock_name) def _set_fip_ratelimit_cache(): rate_limits = getattr(self, direction + "_ratelimits") rate_limits[fip] = (rate, burst) _set_fip_ratelimit_cache() def get_fip_ratelimit_cache(self, direction, fip): @lockutils.synchronized(self.lock_name) def _get_fip_ratelimit_cache(): rate_limits = getattr(self, direction + "_ratelimits") rate, burst = rate_limits.get(fip, (qos_base.IP_DEFAULT_RATE, qos_base.IP_DEFAULT_BURST)) return rate, burst return _get_fip_ratelimit_cache() def remove_fip_all_cache(self, fip): for direction in constants.VALID_DIRECTIONS: self.remove_fip_ratelimit_cache(direction, fip) self.clean_by_resource(fip) def clean_router_all_fip_cache(self, router_id): floating_ips = self.router_floating_ips.pop( router_id, []) for fip in floating_ips: self.remove_fip_all_cache(fip) class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase, l3_extension.L3AgentExtension): def initialize(self, connection, driver_type): """Initialize agent extension.""" self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self.fip_qos_map = RouterFipRateLimitMaps() self._register_rpc_consumers() def _handle_notification(self, context, resource_type, qos_policies, event_type): if event_type == events.UPDATED: for qos_policy in qos_policies: self._process_update_policy(qos_policy) def _process_update_policy(self, qos_policy): old_qos_policy = self.fip_qos_map.get_policy(qos_policy.id) if old_qos_policy: if self._policy_rules_modified(old_qos_policy, qos_policy): for fip in self.fip_qos_map.get_resources(qos_policy): router_id = self.fip_qos_map.find_fip_router_id(fip) router_info = self._get_router_info(router_id) if not router_info: continue device = self._get_rate_limit_ip_device(router_info) dvr_fip_device = self._get_dvr_fip_device(router_info) if not device and not dvr_fip_device: LOG.debug("Router %s does not have a floating IP " "related device, skipping.", router_id) continue rates = self.get_policy_rates(qos_policy) if device: self.process_ip_rates(fip, device, rates) if dvr_fip_device: self.process_ip_rates( fip, dvr_fip_device, rates, with_cache=False) self.fip_qos_map.update_policy(qos_policy) @coordination.synchronized('qos-floating-ip-{ip}') def process_ip_rate_limit(self, ip, direction, device, rate, burst): tc_wrapper = self._get_tc_wrapper(device) if (rate == qos_base.IP_DEFAULT_RATE and burst == qos_base.IP_DEFAULT_BURST): # According to the agreements of default value definition, # floating IP bandwidth was changed to default value (no limit). # NOTE: l3_tc_lib will ignore exception FilterIDForIPNotFound. tc_wrapper.clear_ip_rate_limit(direction, ip) self.fip_qos_map.remove_fip_ratelimit_cache(direction, ip) return # Finally just set it, l3_tc_lib will clean the old rules if exists. tc_wrapper.set_ip_rate_limit(direction, ip, rate, burst) def _get_rate_limit_ip_device(self, router_info): ex_gw_port = router_info.get_ex_gw_port() if not ex_gw_port: return agent_mode = router_info.agent_conf.agent_mode is_distributed_router = router_info.router.get('distributed') if is_distributed_router and agent_mode == ( constants.L3_AGENT_MODE_DVR_SNAT): # DVR edge (or DVR edge ha) router if not router_info._is_this_snat_host(): return name = router_info.get_snat_external_device_interface_name( ex_gw_port) else: # DVR local router # Legacy/HA router name = router_info.get_external_device_interface_name(ex_gw_port) if not name: # DVR local router in dvr_no_external agent mode may not have # such rfp-device. return namespace = router_info.get_gw_ns_name() return ip_lib.IPDevice(name, namespace=namespace) def _remove_fip_rate_limit(self, device, fip_ip): tc_wrapper = self._get_tc_wrapper(device) for direction in constants.VALID_DIRECTIONS: if device.exists(): tc_wrapper.clear_ip_rate_limit(direction, fip_ip) self.fip_qos_map.remove_fip_ratelimit_cache(direction, fip_ip) def get_fip_qos_rates(self, context, fip, policy_id): if policy_id is None: self.fip_qos_map.clean_by_resource(fip) # process_ip_rate_limit will treat value 0 as # cleaning the tc filters if exits or no action. return {constants.INGRESS_DIRECTION: { "rate": qos_base.IP_DEFAULT_RATE, "burst": qos_base.IP_DEFAULT_BURST}, constants.EGRESS_DIRECTION: { "rate": qos_base.IP_DEFAULT_RATE, "burst": qos_base.IP_DEFAULT_BURST}} policy = self.resource_rpc.pull( context, resources.QOS_POLICY, policy_id) self.fip_qos_map.set_resource_policy(fip, policy) return self.get_policy_rates(policy) def process_ip_rates(self, fip, device, rates, with_cache=True): for direction in constants.VALID_DIRECTIONS: rate = rates.get(direction) if with_cache: old_rate, old_burst = self.fip_qos_map.get_fip_ratelimit_cache( direction, fip) if old_rate == rate['rate'] and old_burst == rate['burst']: # Two possibilities here: # 1. Floating IP rate limit does not change. # 2. Floating IP bandwidth does not limit. continue self.process_ip_rate_limit( fip, direction, device, rate['rate'], rate['burst']) self.fip_qos_map.set_fip_ratelimit_cache( direction, fip, rate['rate'], rate['burst']) else: tc_wrapper = self._get_tc_wrapper(device) if (rate['rate'] == qos_base.IP_DEFAULT_RATE and rate['burst'] == qos_base.IP_DEFAULT_BURST): # Default value is no limit tc_wrapper.clear_ip_rate_limit(direction, fip) else: tc_wrapper.set_ip_rate_limit(direction, fip, rate['rate'], rate['burst']) def _get_dvr_fip_device(self, router_info): is_distributed_router = router_info.router.get('distributed') agent_mode = router_info.agent_conf.agent_mode if is_distributed_router and agent_mode == ( constants.L3_AGENT_MODE_DVR_SNAT): gw_port = router_info.get_ex_gw_port() if gw_port and router_info.fip_ns: rfp_dev_name = router_info.get_external_device_interface_name( gw_port) if router_info.router_namespace.exists() and rfp_dev_name: return ip_lib.IPDevice( rfp_dev_name, namespace=router_info.ns_name) def process_floating_ip_addresses(self, context, router_info): # Loop all the router floating ips, the corresponding floating IP tc # rules will be configured: # 1. for legacy and HA router, it will be all floating IPs to qg-device # of qrouter-namespace in (all ha router hosted) network node. # 2. for dvr router, we can do this simple. No matter the agent # type is dvr or dvr_snat, we can just set all the # floating IP tc rules to the corresponding device: # 2.1 for dvr local router in compute node: # the namespace is qrouter-x, and the device is rfp-device. # 2.2 for dvr edge (ha) router in network node: # the namespace is snat-x, and the device is qg-device. # 3. for dvr local router, if agent_mod is dvr_no_external, no # floating IP rules will be configured. # 4. for dvr router in snat node, we should process the floating # IP QoS again in qrouter-namespace to cover the mixed deployment # with nova-compute scenario. is_distributed_router = router_info.router.get('distributed') agent_mode = router_info.agent_conf.agent_mode LOG.debug("Start processing floating IP QoS for " "router %(router_id)s, router " "distributed: %(distributed)s, " "agent mode: %(agent_mode)s", {"router_id": router_info.router_id, "distributed": is_distributed_router, "agent_mode": agent_mode}) if is_distributed_router and agent_mode == ( constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): # condition 3: dvr local router and dvr_no_external agent return device = self._get_rate_limit_ip_device(router_info) dvr_fip_device = self._get_dvr_fip_device(router_info) if not device and not dvr_fip_device: LOG.debug("No relevant QoS device found " "for router: %s", router_info.router_id) return floating_ips = (router_info.get_floating_ips() + router_info.get_port_forwarding_fips()) current_fips = self.fip_qos_map.router_floating_ips.get( router_info.router_id, set()) new_fips = set() for fip in floating_ips: fip_addr = fip['floating_ip_address'] new_fips.add(fip_addr) rates = self.get_fip_qos_rates(context, fip_addr, fip.get(qos_consts.QOS_POLICY_ID)) if device: self.process_ip_rates(fip_addr, device, rates) if dvr_fip_device: # NOTE(liuyulong): for scenario 4 (mixed dvr_snat and compute # node), because floating IP qos rates may have been # processed in dvr snat-namespace, so here the cache was # already set. We just install the rules to the device in # qrouter-namesapce. self.process_ip_rates( fip_addr, dvr_fip_device, rates, with_cache=False) self.fip_qos_map.router_floating_ips[router_info.router_id] = new_fips fips_removed = current_fips - new_fips for fip in fips_removed: if device: self._remove_fip_rate_limit(device, fip) if dvr_fip_device: self._remove_fip_rate_limit(dvr_fip_device, fip) self.fip_qos_map.clean_by_resource(fip) def add_router(self, context, data): router_info = self._get_router_info(data['id']) if router_info: self.process_floating_ip_addresses(context, router_info) def update_router(self, context, data): router_info = self._get_router_info(data['id']) if router_info: self.process_floating_ip_addresses(context, router_info) def delete_router(self, context, data): self.fip_qos_map.clean_router_all_fip_cache(data['id']) def ha_state_change(self, context, data): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/qos/gateway_ip.py0000644000175000017500000001746500000000000025772 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.agent import l3_extension from neutron_lib import constants from oslo_log import log as logging from neutron.agent.l3.extensions.qos import base as qos_base from neutron.agent.linux import ip_lib from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.common import coordination LOG = logging.getLogger(__name__) class RouterGatewayIPQosAgentExtension(qos_base.L3QosAgentExtensionBase, l3_extension.L3AgentExtension): def initialize(self, connection, driver_type): """Initialize agent extension.""" self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self._register_rpc_consumers() self.gateway_ip_qos_map = qos_base.RateLimitMaps( "gateway-ip-qos-cache") def _handle_notification(self, context, resource_type, qos_policies, event_type): if event_type == events.UPDATED: for qos_policy in qos_policies: self._process_update_policy(qos_policy) def _process_router_gateway_after_policy_update( self, router_id, qos_policy): router_info = self._get_router_info(router_id) if not router_info: return ex_gw_port = router_info.get_ex_gw_port() if not ex_gw_port: return interface_name = router_info.get_external_device_name( ex_gw_port['id']) device = self._get_gateway_tc_rule_device( router_info, interface_name) if not device.exists(): return tc_wrapper = self._get_tc_wrapper(device) # Clear all old gateway IP tc rules first. self._empty_router_gateway_rate_limits(router_info, tc_wrapper) rates = self.get_policy_rates(qos_policy) self.gateway_ip_qos_map.set_resource_policy( router_info.router_id, qos_policy) self._set_gateway_tc_rules( router_info, tc_wrapper, ex_gw_port, rates) def _process_update_policy(self, qos_policy): old_qos_policy = self.gateway_ip_qos_map.get_policy(qos_policy.id) if old_qos_policy: if self._policy_rules_modified(old_qos_policy, qos_policy): router_ids = self.gateway_ip_qos_map.get_resources( qos_policy) for router_id in list(router_ids): self._process_router_gateway_after_policy_update( router_id, qos_policy) self.gateway_ip_qos_map.update_policy(qos_policy) def add_router(self, context, data): router_info = self._get_router_info(data['id']) if router_info: self.process_gateway_rate_limit(context, router_info) def update_router(self, context, data): router_info = self._get_router_info(data['id']) if router_info: self.process_gateway_rate_limit(context, router_info) def delete_router(self, context, data): # Remove the router and policy map in case the router deletion with # gateway. self.gateway_ip_qos_map.clean_by_resource(data['id']) def ha_state_change(self, context, data): pass def process_gateway_rate_limit(self, context, router_info): is_distributed_router = router_info.router.get('distributed') agent_mode = router_info.agent_conf.agent_mode LOG.debug("Start processing gateway IP QoS for " "router %(router_id)s, router " "distributed: %(distributed)s, " "agent mode: %(agent_mode)s", {"router_id": router_info.router_id, "distributed": is_distributed_router, "agent_mode": agent_mode}) if is_distributed_router and agent_mode in ( constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): # Dvr local router and dvr_no_external agent do not process # gateway IPs. return self._handle_router_gateway_rate_limit(context, router_info) @coordination.synchronized('qos-gateway-ip-{router_info.router_id}') def _empty_router_gateway_rate_limits(self, router_info, tc_wrapper): self.gateway_ip_qos_map.clean_by_resource(router_info.router_id) for ip in router_info.qos_gateway_ips: for direction in constants.VALID_DIRECTIONS: tc_wrapper.clear_ip_rate_limit(direction, ip) router_info.qos_gateway_ips.clear() def _handle_router_gateway_rate_limit(self, context, router_info): ex_gw_port = router_info.get_ex_gw_port() if not ex_gw_port: return interface_name = router_info.get_external_device_name( ex_gw_port['id']) device = self._get_gateway_tc_rule_device(router_info, interface_name) if not device.exists(): return tc_wrapper = self._get_tc_wrapper(device) # Clear all old gateway IP tc rules first. self._empty_router_gateway_rate_limits(router_info, tc_wrapper) rates = self._get_rates_by_policy(context, router_info) if not rates: return self._set_gateway_tc_rules(router_info, tc_wrapper, ex_gw_port, rates) def _get_gateway_tc_rule_device(self, router_info, interface_name): is_distributed_router = router_info.router.get('distributed') agent_mode = router_info.agent_conf.agent_mode namespace = router_info.ns_name if (is_distributed_router and agent_mode == constants.L3_AGENT_MODE_DVR_SNAT): namespace = router_info.snat_namespace.name return ip_lib.IPDevice(interface_name, namespace=namespace) def _get_rates_by_policy(self, context, router_info): gateway_info = router_info.router.get('external_gateway_info') if not gateway_info: return policy_id = gateway_info.get('qos_policy_id') if not policy_id: return policy = self.resource_rpc.pull( context, resources.QOS_POLICY, policy_id) self.gateway_ip_qos_map.set_resource_policy( router_info.router_id, policy) return self.get_policy_rates(policy) @coordination.synchronized('qos-gateway-ip-{router_info.router_id}') def _set_gateway_tc_rules(self, router_info, tc_wrapper, ex_gw_port, rates): for ip_addr in ex_gw_port['fixed_ips']: ex_gw_ip = ip_addr['ip_address'] ip_ver = netaddr.IPAddress(ex_gw_ip).version if ip_ver == constants.IP_VERSION_4: self._set_gateway_ip_rate_limit(tc_wrapper, ex_gw_ip, rates) router_info.qos_gateway_ips.add(ex_gw_ip) def _set_gateway_ip_rate_limit(self, tc_wrapper, ex_gw_ip, rates): for direction in constants.VALID_DIRECTIONS: rate = rates.get(direction) if (rate['rate'] == qos_base.IP_DEFAULT_RATE and rate['burst'] == qos_base.IP_DEFAULT_BURST): continue tc_wrapper.set_ip_rate_limit(direction, ex_gw_ip, rate['rate'], rate['burst']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/extensions/snat_log.py0000644000175000017500000000300300000000000024624 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import l3_extension from oslo_log import log as logging from neutron.services.logapi.agent.l3 import base from neutron.services.logapi.agent import log_extension as log_ext from neutron.services.logapi.rpc import agent as agent_rpc LOG = logging.getLogger(__name__) SNAT_LOG_DRIVER = 'snat_log' class SNATLoggingExtension(base.L3LoggingExtensionBase, l3_extension.L3AgentExtension): def initialize(self, connection, driver_type): """Initialize SNAT logging agent extension""" snat_log_cls = self._load_driver_cls( log_ext.LOGGING_DRIVERS_NAMESPACE, SNAT_LOG_DRIVER) self.log_driver = snat_log_cls(self.agent_api) self.resource_rpc = agent_rpc.LoggingApiStub() self._register_rpc_consumers() self.log_driver.initialize(self.resource_rpc) LOG.info("Loaded SNAT logging driver") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/fip_rule_priority_allocator.py0000644000175000017500000000373200000000000026436 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3.item_allocator import ItemAllocator class FipPriority(object): def __init__(self, index): self.index = index def __repr__(self): return str(self.index) def __hash__(self): return hash(self.__repr__()) def __eq__(self, other): if isinstance(other, FipPriority): return (self.index == other.index) else: return False class FipRulePriorityAllocator(ItemAllocator): """Manages allocation of floating ips rule priorities. IP rule priorities assigned to DVR floating IPs need to be preserved over L3 agent restarts. This class provides an allocator which saves the priorities to a datastore which will survive L3 agent restarts. """ def __init__(self, data_store_path, priority_rule_start, priority_rule_end): """Create the necessary pool and create the item allocator using ',' as the delimiter and FipRulePriorityAllocator as the class type """ pool = set(FipPriority(str(s)) for s in range(priority_rule_start, priority_rule_end)) super(FipRulePriorityAllocator, self).__init__(data_store_path, FipPriority, pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/ha.py0000644000175000017500000002451600000000000021223 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import threading import eventlet from neutron_lib import constants from oslo_log import log as logging from oslo_utils import fileutils import webob from neutron.agent.linux import utils as agent_utils from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG = 4096 TRANSLATION_MAP = {'master': constants.HA_ROUTER_STATE_ACTIVE, 'backup': constants.HA_ROUTER_STATE_STANDBY, 'fault': constants.HA_ROUTER_STATE_STANDBY, 'unknown': constants.HA_ROUTER_STATE_UNKNOWN} class KeepalivedStateChangeHandler(object): def __init__(self, agent): self.agent = agent @webob.dec.wsgify(RequestClass=webob.Request) def __call__(self, req): router_id = req.headers['X-Neutron-Router-Id'] state = req.headers['X-Neutron-State'] self.enqueue(router_id, state) def enqueue(self, router_id, state): LOG.debug('Handling notification for router ' '%(router_id)s, state %(state)s', {'router_id': router_id, 'state': state}) self.agent.enqueue_state_change(router_id, state) class L3AgentKeepalivedStateChangeServer(object): def __init__(self, agent, conf): self.agent = agent self.conf = conf agent_utils.ensure_directory_exists_without_file( self.get_keepalived_state_change_socket_path(self.conf)) @classmethod def get_keepalived_state_change_socket_path(cls, conf): return os.path.join(conf.state_path, 'keepalived-state-change') def run(self): server = agent_utils.UnixDomainWSGIServer( 'neutron-keepalived-state-change', num_threads=self.conf.ha_keepalived_state_change_server_threads) server.start(KeepalivedStateChangeHandler(self.agent), self.get_keepalived_state_change_socket_path(self.conf), workers=0, backlog=KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG) server.wait() class AgentMixin(object): def __init__(self, host): self._init_ha_conf_path() super(AgentMixin, self).__init__(host) # BatchNotifier queue is needed to ensure that the HA router # state change sequence is under the proper order. self.state_change_notifier = batch_notifier.BatchNotifier( self._calculate_batch_duration(), self.notify_server) eventlet.spawn(self._start_keepalived_notifications_server) self._transition_states = {} self._transition_state_mutex = threading.Lock() def _get_router_info(self, router_id): try: return self.router_info[router_id] except KeyError: LOG.info('Router %s is not managed by this agent. It was ' 'possibly deleted concurrently.', router_id) def check_ha_state_for_router(self, router_id, current_state): ri = self._get_router_info(router_id) if not ri: return ha_state = ri.ha_state if current_state != TRANSLATION_MAP[ha_state]: LOG.debug("Updating server with state %(state)s for router " "%(router_id)s", {'router_id': router_id, 'state': ha_state}) self.state_change_notifier.queue_event((router_id, ha_state)) def _start_keepalived_notifications_server(self): state_change_server = ( L3AgentKeepalivedStateChangeServer(self, self.conf)) state_change_server.run() def _calculate_batch_duration(self): # Set the BatchNotifier interval to ha_vrrp_advert_int, # default 2 seconds. return self.conf.ha_vrrp_advert_int def _update_transition_state(self, router_id, new_state=None): with self._transition_state_mutex: transition_state = self._transition_states.get(router_id) if new_state: self._transition_states[router_id] = new_state else: self._transition_states.pop(router_id, None) return transition_state def enqueue_state_change(self, router_id, state): """Inform the server about the new router state This function will also update the metadata proxy, the radvd daemon, process the prefix delegation and inform to the L3 extensions. If the HA router changes to "master", this transition will be delayed for at least "ha_vrrp_advert_int" seconds. When the "master" router transitions to "backup", "keepalived" will set the rest of HA routers to "master" until it decides which one should be the only "master". The transition from "backup" to "master" and then to "backup" again, should not be registered in the Neutron server. :param router_id: router ID :param state: ['master', 'backup'] """ if not self._update_transition_state(router_id, state): eventlet.spawn_n(self._enqueue_state_change, router_id, state) eventlet.sleep(0) def _enqueue_state_change(self, router_id, state): # NOTE(ralonsoh): move 'master' and 'backup' constants to n-lib if state == 'master': eventlet.sleep(self.conf.ha_vrrp_advert_int) if self._update_transition_state(router_id) != state: # If the current "transition state" is not the initial "state" sent # to update the router, that means the actual router state is the # same as the "transition state" (e.g.: backup-->master-->backup). return ri = self._get_router_info(router_id) if ri is None: return state_change_data = {"router_id": router_id, "state": state, "host": ri.agent.host} LOG.info('Router %(router_id)s transitioned to %(state)s on ' 'agent %(host)s', state_change_data) # Set external gateway port link up or down according to state if state == 'master': ri.set_external_gw_port_link_status(link_up=True, set_gw=True) elif state == 'backup': ri.set_external_gw_port_link_status(link_up=False) else: LOG.warning('Router %s has status %s, ' 'no action to router gateway device.', router_id, state) # TODO(dalvarez): Fix bug 1677279 by moving the IPv6 parameters # configuration to keepalived-state-change in order to remove the # dependency that currently exists on l3-agent running for the IPv6 # failover. ri.ha_state = state self._configure_ipv6_params(ri, state) if self.conf.enable_metadata_proxy: self._update_metadata_proxy(ri, router_id, state) self._update_radvd_daemon(ri, state) self.pd.process_ha_state(router_id, state == 'master') self.state_change_notifier.queue_event((router_id, state)) self.l3_ext_manager.ha_state_change(self.context, state_change_data) def _configure_ipv6_params(self, ri, state): if not self.use_ipv6: return ipv6_forwarding_enable = state == 'master' if ri.router.get('distributed', False): namespace = ri.ha_namespace else: namespace = ri.ns_name if ipv6_forwarding_enable: ri.driver.configure_ipv6_forwarding( namespace, 'all', ipv6_forwarding_enable) # If ipv6 is enabled on the platform, ipv6_gateway config flag is # not set and external_network associated to the router does not # include any IPv6 subnet, enable the gateway interface to accept # Router Advts from upstream router for default route on master # instances as well as ipv6 forwarding. Otherwise, disable them. ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id'] if ex_gw_port_id: interface_name = ri.get_external_device_name(ex_gw_port_id) ri._configure_ipv6_params_on_gw( ri.ex_gw_port, namespace, interface_name, ipv6_forwarding_enable) def _update_metadata_proxy(self, ri, router_id, state): # NOTE(slaweq): Since the metadata proxy is spawned in the qrouter # namespace and not in the snat namespace, even standby DVR-HA # routers needs to serve metadata requests to local ports. if state == 'master' or ri.router.get('distributed', False): LOG.debug('Spawning metadata proxy for router %s', router_id) self.metadata_driver.spawn_monitored_metadata_proxy( self.process_monitor, ri.ns_name, self.conf.metadata_port, self.conf, router_id=ri.router_id) else: LOG.debug('Closing metadata proxy for router %s', router_id) self.metadata_driver.destroy_monitored_metadata_proxy( self.process_monitor, ri.router_id, self.conf, ri.ns_name) def _update_radvd_daemon(self, ri, state): # Radvd has to be spawned only on the Master HA Router. If there are # any state transitions, we enable/disable radvd accordingly. if state == 'master': ri.enable_radvd() else: ri.disable_radvd() def notify_server(self, batched_events): translated_states = dict((router_id, TRANSLATION_MAP[state]) for router_id, state in batched_events) LOG.debug('Updating server with HA routers states %s', translated_states) self.plugin_rpc.update_ha_routers_states( self.context, translated_states) def _init_ha_conf_path(self): ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path) fileutils.ensure_tree(ha_full_path, mode=0o755) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/ha_router.py0000644000175000017500000005425500000000000022626 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import signal import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_consts from neutron_lib.utils import runtime from oslo_log import log as logging from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as router from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import keepalived from neutron.common import utils as common_utils from neutron.extensions import revisions from neutron.extensions import timestamp LOG = logging.getLogger(__name__) HA_DEV_PREFIX = 'ha-' IP_MONITOR_PROCESS_SERVICE = 'ip_monitor' SIGTERM_TIMEOUT = 10 KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME = ( "neutron-keepalived-state-change-monitor") # TODO(liuyulong): move to neutron-lib? STATE_CHANGE_PROC_NAME = 'neutron-keepalived-state-change' # The multiplier is used to compensate execution time of function sending # SIGHUP to keepalived process. The constant multiplies ha_vrrp_advert_int # config option and the result is the throttle delay. THROTTLER_MULTIPLIER = 1.5 class HaRouterNamespace(namespaces.RouterNamespace): """Namespace for HA router. This namespace sets the ip_nonlocal_bind to 0 for HA router namespaces. It does so to prevent sending gratuitous ARPs for interfaces that got VIP removed in the middle of processing. It also disables ipv6 forwarding by default. Forwarding will be enabled during router configuration processing only for the master node. It has to be disabled on all other nodes to avoid sending MLD packets which cause lost connectivity to Floating IPs. """ def create(self): super(HaRouterNamespace, self).create(ipv6_forwarding=False) # HA router namespaces should not have ip_nonlocal_bind enabled ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 0) class HaRouter(router.RouterInfo): def __init__(self, state_change_callback, *args, **kwargs): super(HaRouter, self).__init__(*args, **kwargs) self.ha_port = None self.keepalived_manager = None self.state_change_callback = state_change_callback self._ha_state = None self._ha_state_path = None def create_router_namespace_object( self, router_id, agent_conf, iface_driver, use_ipv6): return HaRouterNamespace( router_id, agent_conf, iface_driver, use_ipv6) @property def ha_state_path(self): if not self._ha_state_path and self.keepalived_manager: self._ha_state_path = (self.keepalived_manager. get_full_config_file_path('state')) return self._ha_state_path @property def ha_priority(self): return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY) @property def ha_vr_id(self): return self.router.get('ha_vr_id') def _check_and_set_real_state(self): # When the physical host was down/up, the 'master' router may still # have its original state in the _ha_state_path file. We directly # reset it to 'backup'. if (not self.keepalived_manager.check_processes() and os.path.exists(self.ha_state_path) and self.ha_state == 'master'): self.ha_state = 'backup' @property def ha_state(self): if self._ha_state: return self._ha_state try: with open(self.ha_state_path, 'r') as f: self._ha_state = f.read() except (OSError, IOError): LOG.debug('Error while reading HA state for %s', self.router_id) return self._ha_state or 'unknown' @ha_state.setter def ha_state(self, new_state): self._ha_state = new_state try: with open(self.ha_state_path, 'w') as f: f.write(new_state) except (OSError, IOError): LOG.error('Error while writing HA state for %s', self.router_id) @property def ha_namespace(self): return self.ns_name def is_router_master(self): """this method is normally called before the ha_router object is fully initialized """ if self.router.get('_ha_state') == 'active': return True else: return False def initialize(self, process_monitor): ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) if not ha_port: msg = ("Unable to process HA router %s without HA port" % self.router_id) LOG.exception(msg) raise Exception(msg) super(HaRouter, self).initialize(process_monitor) self.set_ha_port() self._init_keepalived_manager(process_monitor) self._check_and_set_real_state() self.ha_network_added() self.update_initial_state(self.state_change_callback) self.spawn_state_change_monitor(process_monitor) def _init_keepalived_manager(self, process_monitor): self.keepalived_manager = keepalived.KeepalivedManager( self.router['id'], keepalived.KeepalivedConf(), process_monitor, conf_path=self.agent_conf.ha_confs_path, namespace=self.ha_namespace, throttle_restart_value=( self.agent_conf.ha_vrrp_advert_int * THROTTLER_MULTIPLIER)) config = self.keepalived_manager.config interface_name = self.get_ha_device_name() subnets = self.ha_port.get('subnets', []) ha_port_cidrs = [subnet['cidr'] for subnet in subnets] instance = keepalived.KeepalivedInstance( 'BACKUP', interface_name, self.ha_vr_id, ha_port_cidrs, nopreempt=True, advert_int=self.agent_conf.ha_vrrp_advert_int, priority=self.ha_priority, vrrp_health_check_interval=( self.agent_conf.ha_vrrp_health_check_interval), ha_conf_dir=self.keepalived_manager.get_conf_dir()) instance.track_interfaces.append(interface_name) if self.agent_conf.ha_vrrp_auth_password: # TODO(safchain): use oslo.config types when it will be available # in order to check the validity of ha_vrrp_auth_type instance.set_authentication(self.agent_conf.ha_vrrp_auth_type, self.agent_conf.ha_vrrp_auth_password) config.add_instance(instance) def enable_keepalived(self): self.keepalived_manager.spawn() def disable_keepalived(self): if not self.keepalived_manager: LOG.debug('Error while disabling keepalived for %s - no manager', self.router_id) return self.keepalived_manager.disable() conf_dir = self.keepalived_manager.get_conf_dir() shutil.rmtree(conf_dir) def _get_keepalived_instance(self): return self.keepalived_manager.config.get_instance(self.ha_vr_id) def _get_primary_vip(self): return self._get_keepalived_instance().get_primary_vip() def get_ha_device_name(self): return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN] def ha_network_added(self): interface_name = self.get_ha_device_name() self.driver.plug(self.ha_port['network_id'], self.ha_port['id'], interface_name, self.ha_port['mac_address'], namespace=self.ha_namespace, prefix=HA_DEV_PREFIX, mtu=self.ha_port.get('mtu')) ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips']) self.driver.init_l3(interface_name, ip_cidrs, namespace=self.ha_namespace, preserve_ips=[self._get_primary_vip()]) def ha_network_removed(self): if not self.ha_port: LOG.debug('Error while removing HA network for %s - no port', self.router_id) return self.driver.unplug(self.get_ha_device_name(), namespace=self.ha_namespace, prefix=HA_DEV_PREFIX) self.ha_port = None def _add_vips(self, port, interface_name): for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']): self._add_vip(ip_cidr, interface_name) def _add_vip(self, ip_cidr, interface, scope=None): instance = self._get_keepalived_instance() instance.add_vip(ip_cidr, interface, scope) def _remove_vip(self, ip_cidr): instance = self._get_keepalived_instance() instance.remove_vip_by_ip_address(ip_cidr) def _clear_vips(self, interface): instance = self._get_keepalived_instance() instance.remove_vips_vroutes_by_interface(interface) def _get_cidrs_from_keepalived(self, interface_name): instance = self._get_keepalived_instance() return instance.get_existing_vip_ip_addresses(interface_name) def get_router_cidrs(self, device): return set(self._get_cidrs_from_keepalived(device.name)) def routes_updated(self, old_routes, new_routes): instance = self._get_keepalived_instance() instance.virtual_routes.extra_routes = [ keepalived.KeepalivedVirtualRoute( route['destination'], route['nexthop']) for route in new_routes] super(HaRouter, self).routes_updated(old_routes, new_routes) def _add_default_gw_virtual_route(self, ex_gw_port, interface_name): gateway_ips = self._get_external_gw_ips(ex_gw_port) default_gw_rts = [] instance = self._get_keepalived_instance() for gw_ip in gateway_ips: # TODO(Carl) This is repeated everywhere. A method would # be nice. default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version] default_gw_rts.append(keepalived.KeepalivedVirtualRoute( default_gw, gw_ip, interface_name)) instance.virtual_routes.gateway_routes = default_gw_rts def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name): extra_subnets = ex_gw_port.get('extra_subnets', []) instance = self._get_keepalived_instance() onlink_route_cidrs = set(s['cidr'] for s in extra_subnets) instance.virtual_routes.extra_subnets = [ keepalived.KeepalivedVirtualRoute( onlink_route_cidr, None, interface_name, scope='link') for onlink_route_cidr in onlink_route_cidrs] def _should_delete_ipv6_lladdr(self, ipv6_lladdr): """Only the master should have any IP addresses configured. Let keepalived manage IPv6 link local addresses, the same way we let it manage IPv4 addresses. If the router is not in the master state, we must delete the address first as it is autoconfigured by the kernel. """ manager = self.keepalived_manager if manager.get_process().active: if self.ha_state != 'master': conf = manager.get_conf_on_disk() managed_by_keepalived = conf and ipv6_lladdr in conf if managed_by_keepalived: return False else: return False return True def _disable_ipv6_addressing_on_interface(self, interface_name): """Disable IPv6 link local addressing on the device and add it as a VIP to keepalived. This means that the IPv6 link local address will only be present on the master. """ device = ip_lib.IPDevice(interface_name, namespace=self.ha_namespace) ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address) if self._should_delete_ipv6_lladdr(ipv6_lladdr): self.driver.configure_ipv6_ra(self.ha_namespace, interface_name, n_consts.ACCEPT_RA_DISABLED) device.addr.flush(n_consts.IP_VERSION_6) else: self.driver.configure_ipv6_ra( self.ha_namespace, interface_name, n_consts.ACCEPT_RA_WITHOUT_FORWARDING) self._remove_vip(ipv6_lladdr) self._add_vip(ipv6_lladdr, interface_name, scope='link') def _add_gateway_vip(self, ex_gw_port, interface_name): self._add_vips(ex_gw_port, interface_name) self._add_default_gw_virtual_route(ex_gw_port, interface_name) self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name) def add_floating_ip(self, fip, interface_name, device): fip_ip = fip['floating_ip_address'] ip_cidr = common_utils.ip_to_cidr(fip_ip) self._add_vip(ip_cidr, interface_name) return n_consts.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): self._remove_vip(ip_cidr) to = common_utils.cidr_to_ip(ip_cidr) if device.addr.list(to=to): super(HaRouter, self).remove_floating_ip(device, ip_cidr) def internal_network_updated(self, interface_name, ip_cidrs, mtu): self.driver.set_mtu(interface_name, mtu, namespace=self.ns_name, prefix=router.INTERNAL_DEV_PREFIX) self._clear_vips(interface_name) self._disable_ipv6_addressing_on_interface(interface_name) for ip_cidr in ip_cidrs: self._add_vip(ip_cidr, interface_name) def _plug_ha_router_port(self, port, name_getter, prefix): port_id = port['id'] interface_name = name_getter(port_id) self.driver.plug(port['network_id'], port_id, interface_name, port['mac_address'], namespace=self.ha_namespace, prefix=prefix, mtu=port.get('mtu')) self._disable_ipv6_addressing_on_interface(interface_name) self._add_vips(port, interface_name) def internal_network_added(self, port): self._plug_ha_router_port( port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX) def internal_network_removed(self, port): super(HaRouter, self).internal_network_removed(port) interface_name = self.get_internal_device_name(port['id']) self._clear_vips(interface_name) def _get_state_change_monitor_process_manager(self): return external_process.ProcessManager( self.agent_conf, '%s.monitor' % self.router_id, None, service=KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME, default_cmd_callback=self._get_state_change_monitor_callback(), run_as_root=True) def _get_state_change_monitor_callback(self): ha_device = self.get_ha_device_name() ha_cidr = self._get_primary_vip() config_dir = self.keepalived_manager.get_conf_dir() state_change_log = ( "%s/neutron-keepalived-state-change.log") % config_dir def callback(pid_file): cmd = [ STATE_CHANGE_PROC_NAME, '--router_id=%s' % self.router_id, '--namespace=%s' % self.ha_namespace, '--conf_dir=%s' % config_dir, '--log-file=%s' % state_change_log, '--monitor_interface=%s' % ha_device, '--monitor_cidr=%s' % ha_cidr, '--pid_file=%s' % pid_file, '--state_path=%s' % self.agent_conf.state_path, '--user=%s' % os.geteuid(), '--group=%s' % os.getegid()] return cmd return callback def spawn_state_change_monitor(self, process_monitor): pm = self._get_state_change_monitor_process_manager() pm.enable() process_monitor.register( self.router_id, IP_MONITOR_PROCESS_SERVICE, pm) LOG.debug("Router %(router_id)s %(process)s pid %(pid)d", {"router_id": self.router_id, "process": KEEPALIVED_STATE_CHANGE_MONITOR_SERVICE_NAME, "pid": pm.pid}) def destroy_state_change_monitor(self, process_monitor): if not self.ha_port: LOG.debug('Error while destroying state change monitor for %s - ' 'no port', self.router_id) return pm = self._get_state_change_monitor_process_manager() process_monitor.unregister( self.router_id, IP_MONITOR_PROCESS_SERVICE) pm.disable(sig=str(int(signal.SIGTERM))) try: common_utils.wait_until_true(lambda: not pm.active, timeout=SIGTERM_TIMEOUT) except common_utils.WaitTimeout: pm.disable(sig=str(int(signal.SIGKILL))) def update_initial_state(self, callback): addresses = ip_lib.get_devices_with_ip(self.ha_namespace, name=self.get_ha_device_name()) cidrs = (address['cidr'] for address in addresses) ha_cidr = self._get_primary_vip() state = 'master' if ha_cidr in cidrs else 'backup' self.ha_state = state callback(self.router_id, state) @staticmethod def _gateway_ports_equal(port1, port2): def _get_filtered_dict(d, ignore): return {k: v for k, v in d.items() if k not in ignore} keys_to_ignore = set([portbindings.HOST_ID, timestamp.UPDATED, revisions.REVISION]) port1_filtered = _get_filtered_dict(port1, keys_to_ignore) port2_filtered = _get_filtered_dict(port2, keys_to_ignore) return port1_filtered == port2_filtered def external_gateway_added(self, ex_gw_port, interface_name): link_up = self.external_gateway_link_up() self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name, link_up=link_up) self._add_gateway_vip(ex_gw_port, interface_name) self._disable_ipv6_addressing_on_interface(interface_name) # Enable RA and IPv6 forwarding only for master instances. This will # prevent backup routers from sending packets to the upstream switch # and disrupt connections. enable = self.ha_state == 'master' self._configure_ipv6_params_on_gw(ex_gw_port, self.ns_name, interface_name, enable) def external_gateway_updated(self, ex_gw_port, interface_name): self._plug_external_gateway( ex_gw_port, interface_name, self.ha_namespace) ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips']) for old_gateway_cidr in ip_cidrs: self._remove_vip(old_gateway_cidr) self._add_gateway_vip(ex_gw_port, interface_name) def external_gateway_removed(self, ex_gw_port, interface_name): self._clear_vips(interface_name) if self.ha_state == 'master': super(HaRouter, self).external_gateway_removed(ex_gw_port, interface_name) else: # We are not the master node, so no need to delete ip addresses. self.driver.unplug(interface_name, namespace=self.ns_name, prefix=router.EXTERNAL_DEV_PREFIX) def delete(self): if self.process_monitor: self.destroy_state_change_monitor(self.process_monitor) self.disable_keepalived() self.ha_network_removed() super(HaRouter, self).delete() def set_ha_port(self): ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) if not ha_port: return # NOTE: once HA port is set, it MUST remain this value no matter what # the server return. Because there is race condition between l3-agent # side sync router info for processing and server side router deleting. # TODO(liuyulong): make sure router HA ports never change. if not self.ha_port or (self.ha_port and self.ha_port['status'] != ha_port['status']): self.ha_port = ha_port def process(self): super(HaRouter, self).process() self.set_ha_port() LOG.debug("Processing HA router with HA port: %s", self.ha_port) if (self.ha_port and self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE): self.enable_keepalived() @runtime.synchronized('enable_radvd') def enable_radvd(self, internal_ports=None): if (self.keepalived_manager.get_process().active and self.ha_state == 'master'): super(HaRouter, self).enable_radvd(internal_ports) def external_gateway_link_up(self): # Check HA router ha_state for its gateway port link state. # 'backup' instance will not link up the gateway port. return self.ha_state == 'master' def set_external_gw_port_link_status(self, link_up, set_gw=False): link_state = "up" if link_up else "down" LOG.info('Set router %s gateway device link state to %s.', self.router_id, link_state) ex_gw_port = self.get_ex_gw_port() ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or self.ex_gw_port and self.ex_gw_port['id']) if ex_gw_port_id: interface_name = self.get_external_device_name(ex_gw_port_id) ns_name = self.get_gw_ns_name() self.driver.set_link_status(interface_name, ns_name, link_up=link_up) if link_up and set_gw: preserve_ips = self.get_router_preserve_ips() self._external_gateway_settings(ex_gw_port, interface_name, ns_name, preserve_ips) self.routes_updated([], self.routes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/item_allocator.py0000644000175000017500000001203300000000000023620 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from neutron._i18n import _ LOG = logging.getLogger(__name__) class ItemAllocator(object): """Manages allocation of items from a pool Some of the allocations such as link local addresses used for routing inside the fip namespaces need to persist across agent restarts to maintain consistency. Persisting such allocations in the neutron database is unnecessary and would degrade performance. ItemAllocator utilizes local file system to track allocations made for objects of a given class. The persistent datastore is a file. The records are one per line of the format: keyvalue. For example if the delimiter is a ',' (the default value) then the records will be: key,value (one per line) """ def __init__(self, state_file, ItemClass, item_pool, delimiter=','): """Read the file with previous allocations recorded. See the note in the allocate method for more detail. """ self.ItemClass = ItemClass self.state_file = state_file self.allocations = {} self.remembered = {} self.pool = item_pool read_error = False for line in self._read(): try: key, saved_value = line.strip().split(delimiter) self.remembered[key] = self.ItemClass(saved_value) except ValueError: read_error = True LOG.warning("Invalid line in %(file)s, " "ignoring: %(line)s", {'file': state_file, 'line': line}) self.pool.difference_update(self.remembered.values()) if read_error: LOG.debug("Re-writing file %s due to read error", state_file) self._write_allocations() def lookup(self, key): """Try to lookup an item of ItemClass type. See if there are any current or remembered allocations for the key. """ if key in self.allocations: return self.allocations[key] if key in self.remembered: self.allocations[key] = self.remembered.pop(key) return self.allocations[key] def allocate(self, key): """Try to allocate an item of ItemClass type. I expect this to work in all cases because I expect the pool size to be large enough for any situation. Nonetheless, there is some defensive programming in here. Since the allocations are persisted, there is the chance to leak allocations which should have been released but were not. This leak could eventually exhaust the pool. So, if a new allocation is needed, the code first checks to see if there are any remembered allocations for the key. If not, it checks the free pool. If the free pool is empty then it dumps the remembered allocations to free the pool. This final desperate step will not happen often in practice. """ entry = self.lookup(key) if entry: return entry if not self.pool: # Desperate times. Try to get more in the pool. self.pool.update(self.remembered.values()) self.remembered.clear() if not self.pool: # The number of address pairs allocated from the # pool depends upon the prefix length specified # in DVR_FIP_LL_CIDR raise RuntimeError(_("Cannot allocate item of type: " "%(class)s from pool using file %(file)s") % {'class': self.ItemClass, 'file': self.state_file}) self.allocations[key] = self.pool.pop() self._write_allocations() return self.allocations[key] def release(self, key): if self.lookup(key): self.pool.add(self.allocations.pop(key)) self._write_allocations() def _write_allocations(self): current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] current.extend(remembered) self._write(current) def _write(self, lines): with open(self.state_file, "w") as f: f.writelines(lines) def _read(self): if not os.path.exists(self.state_file): return [] with open(self.state_file) as f: return f.readlines() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/keepalived_state_change.py0000644000175000017500000001504200000000000025443 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import threading import httplib2 import netaddr from oslo_config import cfg from oslo_log import log as logging from six.moves import queue from neutron._i18n import _ from neutron.agent.l3 import ha from neutron.agent.linux import daemon from neutron.agent.linux import ip_lib from neutron.agent.linux import utils as agent_utils from neutron.common import config from neutron.conf.agent.l3 import keepalived from neutron import privileged LOG = logging.getLogger(__name__) class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection): def __init__(self, *args, **kwargs): # Old style super initialization is required! agent_utils.UnixDomainHTTPConnection.__init__( self, *args, **kwargs) self.socket_path = ( ha.L3AgentKeepalivedStateChangeServer. get_keepalived_state_change_socket_path(cfg.CONF)) class MonitorDaemon(daemon.Daemon): def __init__(self, pidfile, router_id, user, group, namespace, conf_dir, interface, cidr): self.router_id = router_id self.namespace = namespace self.conf_dir = conf_dir self.interface = interface self.cidr = cidr self.monitor = None self.event_stop = threading.Event() self.event_started = threading.Event() self.queue = queue.Queue() super(MonitorDaemon, self).__init__(pidfile, uuid=router_id, user=user, group=group) def run(self): self._thread_ip_monitor = threading.Thread( target=ip_lib.ip_monitor, args=(self.namespace, self.queue, self.event_stop, self.event_started)) self._thread_read_queue = threading.Thread( target=self.read_queue, args=(self.queue, self.event_stop, self.event_started)) self._thread_ip_monitor.start() self._thread_read_queue.start() self.handle_initial_state() self._thread_read_queue.join() def read_queue(self, _queue, event_stop, event_started): event_started.wait() while not event_stop.is_set(): try: event = _queue.get(timeout=2) except queue.Empty: event = None if not event: continue if event['name'] == self.interface and event['cidr'] == self.cidr: new_state = 'master' if event['event'] == 'added' else 'backup' self.write_state_change(new_state) self.notify_agent(new_state) elif event['name'] != self.interface and event['event'] == 'added': # Send GARPs for all new router interfaces. # REVISIT(jlibosva): keepalived versions 1.2.19 and below # contain bug where gratuitous ARPs are not sent on receiving # SIGHUP signal. This is a workaround to this bug. keepalived # has this issue fixed since 1.2.20 but the version is not # packaged in some distributions (RHEL/CentOS/Ubuntu Xenial). # Remove this code once new keepalived versions are available. self.send_garp(event) def handle_initial_state(self): try: state = 'backup' ip = ip_lib.IPDevice(self.interface, self.namespace) for address in ip.addr.list(): if address.get('cidr') == self.cidr: state = 'master' self.write_state_change(state) self.notify_agent(state) break LOG.debug('Initial status of router %s is %s', self.router_id, state) except Exception: LOG.exception('Failed to get initial status of router %s', self.router_id) def write_state_change(self, state): with open(os.path.join( self.conf_dir, 'state'), 'w') as state_file: state_file.write(state) LOG.debug('Wrote router %s state %s', self.router_id, state) def notify_agent(self, state): resp, content = httplib2.Http().request( # Note that the message is sent via a Unix domain socket so that # the URL doesn't matter. 'http://127.0.0.1/', headers={'X-Neutron-Router-Id': self.router_id, 'X-Neutron-State': state}, connection_type=KeepalivedUnixDomainConnection) if resp.status != 200: raise Exception(_('Unexpected response: %s') % resp) LOG.debug('Notified agent router %s, state %s', self.router_id, state) def send_garp(self, event): """Send gratuitous ARP for given event.""" ip_address = str(netaddr.IPNetwork(event['cidr']).ip) ip_lib.send_ip_addr_adv_notif( self.namespace, event['name'], ip_address, log_exception=False, use_eventlet=False ) LOG.debug('Sent GARP to %(ip_address)s from %(device_name)s', {'ip_address': ip_address, 'device_name': event['name']}) def handle_sigterm(self, signum, frame): self.event_stop.set() self._thread_read_queue.join(timeout=5) super(MonitorDaemon, self).handle_sigterm(signum, frame) def configure(conf): config.init(sys.argv[1:]) conf.set_override('log_dir', cfg.CONF.conf_dir) conf.set_override('debug', True) conf.set_override('use_syslog', True) config.setup_logging() privileged.default.set_client_mode(False) def main(): keepalived.register_cli_l3_agent_keepalived_opts() keepalived.register_l3_agent_keepalived_opts() configure(cfg.CONF) MonitorDaemon(cfg.CONF.pid_file, cfg.CONF.router_id, cfg.CONF.user, cfg.CONF.group, cfg.CONF.namespace, cfg.CONF.conf_dir, cfg.CONF.monitor_interface, cfg.CONF.monitor_cidr).start() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/l3_agent_extension_api.py0000644000175000017500000000567000000000000025254 0ustar00coreycorey00000000000000# Copyright 2016 Comcast # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import ip_lib class L3AgentExtensionAPI(object): '''Implements the Agent API for the L3 agent. Extensions can gain access to this API by overriding the consume_api method which has been added to the AgentCoreResourceExtension class. The purpose of this API is to give L3 agent extensions access to the agent's RouterInfo object. ''' def __init__(self, router_info, router_factory): self._router_info = router_info self._router_factory = router_factory def _local_namespaces(self): local_ns_list = ip_lib.list_network_namespaces() return set(local_ns_list) def get_router_hosting_port(self, port_id): """Given a port_id, look up the router associated with that port in local namespace. Returns a RouterInfo object (or None if the router is not found). """ if port_id: local_namespaces = self._local_namespaces() for router_info in self._router_info.values(): if router_info.ns_name in local_namespaces: for port in router_info.internal_ports: if port['id'] == port_id: return router_info def get_routers_in_project(self, project_id): """Given a project_id, return a list of routers that are all in the given project. Returns empty list if the project_id provided doesn't evaluate to True. """ if project_id: return [ri for ri in self._router_info.values() if ri.router['project_id'] == project_id] else: return [] def is_router_in_namespace(self, router_id): """Given a router_id, make sure that the router is in a local namespace. """ local_namespaces = self._local_namespaces() ri = self._router_info.get(router_id) return ri and ri.ns_name in local_namespaces def get_router_info(self, router_id): """Return RouterInfo for the given router id.""" return self._router_info.get(router_id) def register_router(self, features, router_cls): """Register router class with the given features. This is for the plugin to ovrride with their own ``router_info`` class. """ self._router_factory.register(features, router_cls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/l3_agent_extensions_manager.py0000644000175000017500000000612300000000000026272 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from neutron.agent import agent_extensions_manager as agent_ext_manager from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config LOG = log.getLogger(__name__) L3_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l3.extensions' def register_opts(conf): agent_ext_mgr_config.register_agent_ext_manager_opts(conf) class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): """Manage l3 agent extensions.""" def __init__(self, conf): super(L3AgentExtensionsManager, self).__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE) def add_router(self, context, data): """Notify all agent extensions to add router.""" for extension in self: if hasattr(extension.obj, 'add_router'): extension.obj.add_router(context, data) else: LOG.error( "Agent Extension '%(name)s' does not " "implement method add_router", {'name': extension.name} ) def update_router(self, context, data): """Notify all agent extensions to update router.""" for extension in self: if hasattr(extension.obj, 'update_router'): extension.obj.update_router(context, data) else: LOG.error( "Agent Extension '%(name)s' does not " "implement method update_router", {'name': extension.name} ) def delete_router(self, context, data): """Notify all agent extensions to delete router.""" for extension in self: if hasattr(extension.obj, 'delete_router'): extension.obj.delete_router(context, data) else: LOG.error( "Agent Extension '%(name)s' does not " "implement method delete_router", {'name': extension.name} ) def ha_state_change(self, context, data): """Notify all agent extensions for HA router state change.""" for extension in self: if hasattr(extension.obj, 'ha_state_change'): extension.obj.ha_state_change(context, data) else: LOG.warning( "Agent Extension '%(name)s' does not " "implement method ha_state_change", {'name': extension.name} ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/legacy_router.py0000644000175000017500000000244700000000000023476 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as lib_constants from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib class LegacyRouter(router.RouterInfo): def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return lib_constants.FLOATINGIP_STATUS_ERROR # As GARP is processed in a distinct thread the call below # won't raise an exception to be handled. ip_lib.send_ip_addr_adv_notif(self.ns_name, interface_name, fip['floating_ip_address']) return lib_constants.FLOATINGIP_STATUS_ACTIVE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/link_local_allocator.py0000644000175000017500000000427600000000000025003 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.agent.l3.item_allocator import ItemAllocator class LinkLocalAddressPair(netaddr.IPNetwork): def __init__(self, addr): super(LinkLocalAddressPair, self).__init__(addr) def get_pair(self): """Builds an address pair from the first and last addresses. """ # TODO(kevinbenton): the callers of this seem only interested in an IP, # so we should just return two IPAddresses. return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)), netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen))) class LinkLocalAllocator(ItemAllocator): """Manages allocation of link local IP addresses. These link local addresses are used for routing inside the fip namespaces. The associations need to persist across agent restarts to maintain consistency. Without this, there is disruption in network connectivity as the agent rewires the connections with the new IP address associations. Persisting these in the database is unnecessary and would degrade performance. """ def __init__(self, data_store_path, subnet): """Create the necessary pool and item allocator using ',' as the delimiter and LinkLocalAllocator as the class type """ subnet = netaddr.IPNetwork(subnet) pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31)) super(LinkLocalAllocator, self).__init__(data_store_path, LinkLocalAddressPair, pool) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/namespace_manager.py0000644000175000017500000001331000000000000024247 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) class NamespaceManager(object): """Keeps track of namespaces that need to be cleaned up. This is a context manager that looks to clean up stale namespaces that have not been touched by the end of the "with" statement it is called in. This formalizes the pattern used in the L3 agent which enumerated all of the namespaces known to the system before a full sync. Then, after the full sync completed, it cleaned up any that were not touched during the sync. The agent and this context manager use method keep_router to communicate. In the "with" statement, the agent calls keep_router to record the id's of the routers whose namespaces should be preserved. Any other router and snat namespace present in the system will be deleted by the __exit__ method of this context manager This pattern can be more generally applicable to other resources besides namespaces in the future because it is idempotent and, as such, does not rely on state recorded at runtime in the agent so it handles agent restarts gracefully. """ ns_prefix_to_class_map = { namespaces.NS_PREFIX: namespaces.RouterNamespace, dvr_snat_ns.SNAT_NS_PREFIX: dvr_snat_ns.SnatNamespace, dvr_fip_ns.FIP_NS_PREFIX: dvr_fip_ns.FipNamespace, } def __init__(self, agent_conf, driver, metadata_driver=None): """Initialize the NamespaceManager. :param agent_conf: configuration from l3 agent :param driver: to perform operations on devices :param metadata_driver: used to cleanup stale metadata proxy processes """ self.agent_conf = agent_conf self.driver = driver self._clean_stale = True self.metadata_driver = metadata_driver if metadata_driver: self.process_monitor = external_process.ProcessMonitor( config=agent_conf, resource_type='router') def __enter__(self): self._all_namespaces = set() self._ids_to_keep = set() if self._clean_stale: self._all_namespaces = self.list_all() return self def __exit__(self, exc_type, value, traceback): # TODO(carl) Preserves old behavior of L3 agent where cleaning # namespaces was only done once after restart. Still a good idea? if exc_type: # An exception occurred in the caller's with statement return False if not self._clean_stale: # No need to cleanup return True self._clean_stale = False for ns in self._all_namespaces: _ns_prefix, ns_id = self.get_prefix_and_id(ns) if ns_id in self._ids_to_keep: continue self._cleanup(_ns_prefix, ns_id) return True def keep_router(self, router_id): self._ids_to_keep.add(router_id) def keep_ext_net(self, ext_net_id): self._ids_to_keep.add(ext_net_id) def get_prefix_and_id(self, ns_name): """Get the prefix and id from the namespace name. :param ns_name: The name of the namespace :returns: tuple with prefix and id or None if no prefix matches """ prefix = namespaces.get_prefix_from_ns_name(ns_name) if prefix in self.ns_prefix_to_class_map: identifier = namespaces.get_id_from_ns_name(ns_name) return (prefix, identifier) def is_managed(self, ns_name): """Return True if the namespace name passed belongs to this manager.""" return self.get_prefix_and_id(ns_name) is not None def list_all(self): """Get a set of all namespaces on host managed by this manager.""" try: namespaces = ip_lib.list_network_namespaces() return set(ns for ns in namespaces if self.is_managed(ns)) except RuntimeError: LOG.exception('RuntimeError in obtaining namespace list for ' 'namespace cleanup.') return set() def ensure_router_cleanup(self, router_id): """Performs cleanup for a router""" for ns in self.list_all(): if ns.endswith(router_id): ns_prefix, ns_id = self.get_prefix_and_id(ns) self._cleanup(ns_prefix, ns_id) def ensure_snat_cleanup(self, router_id): prefix = dvr_snat_ns.SNAT_NS_PREFIX self._cleanup(prefix, router_id) def _cleanup(self, ns_prefix, ns_id): ns_class = self.ns_prefix_to_class_map[ns_prefix] ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False) try: if self.metadata_driver: # cleanup stale metadata proxy processes first self.metadata_driver.destroy_monitored_metadata_proxy( self.process_monitor, ns_id, self.agent_conf, ns.name) ns.delete() except RuntimeError: LOG.exception('Failed to destroy stale namespace %s', ns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/namespaces.py0000644000175000017500000001202100000000000022736 0ustar00coreycorey00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from oslo_log import log as logging from oslo_utils import excutils from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) NS_PREFIX = 'qrouter-' INTERNAL_DEV_PREFIX = 'qr-' EXTERNAL_DEV_PREFIX = 'qg-' # TODO(Carl) It is odd that this file needs this. It is a dvr detail. ROUTER_2_FIP_DEV_PREFIX = 'rfp-' def build_ns_name(prefix, identifier): """Builds a namespace name from the given prefix and identifier :param prefix: The prefix which must end with '-' for legacy reasons :param identifier: The id associated with the namespace """ return prefix + identifier def get_prefix_from_ns_name(ns_name): """Parses prefix from prefix-identifier :param ns_name: The name of a namespace :returns: The prefix ending with a '-' or None if there is no '-' """ dash_index = ns_name.find('-') if 0 <= dash_index: return ns_name[:dash_index + 1] def get_id_from_ns_name(ns_name): """Parses identifier from prefix-identifier :param ns_name: The name of a namespace :returns: Identifier or None if there is no - to end the prefix """ dash_index = ns_name.find('-') if 0 <= dash_index: return ns_name[dash_index + 1:] def check_ns_existence(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if not self.exists(): LOG.warning('Namespace %(name)s does not exist. Skipping ' '%(func)s', {'name': self.name, 'func': f.__name__}) return try: return f(self, *args, **kwargs) except RuntimeError: with excutils.save_and_reraise_exception() as ctx: if not self.exists(): LOG.debug('Namespace %(name)s was concurrently deleted', self.name) ctx.reraise = False return wrapped class Namespace(object): def __init__(self, name, agent_conf, driver, use_ipv6): self.name = name self.ip_wrapper_root = ip_lib.IPWrapper() self.agent_conf = agent_conf self.driver = driver self.use_ipv6 = use_ipv6 def create(self, ipv6_forwarding=True): # See networking (netdev) tree, file # Documentation/networking/ip-sysctl.txt for an explanation of # these sysctl values. ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name) cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1'] ip_wrapper.netns.execute(cmd) # 1. Reply only if the target IP address is local address configured # on the incoming interface; and # 2. Always use the best local address cmd = ['sysctl', '-w', 'net.ipv4.conf.all.arp_ignore=1'] ip_wrapper.netns.execute(cmd) cmd = ['sysctl', '-w', 'net.ipv4.conf.all.arp_announce=2'] ip_wrapper.netns.execute(cmd) if self.use_ipv6: cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=%d' % int(ipv6_forwarding)] ip_wrapper.netns.execute(cmd) def delete(self): try: self.ip_wrapper_root.netns.delete(self.name) except RuntimeError: msg = 'Failed trying to delete namespace: %s' LOG.exception(msg, self.name) def exists(self): return self.ip_wrapper_root.netns.exists(self.name) class RouterNamespace(Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self._get_ns_name(router_id) super(RouterNamespace, self).__init__( name, agent_conf, driver, use_ipv6) @classmethod def _get_ns_name(cls, router_id): return build_ns_name(NS_PREFIX, router_id) @check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(): if d.name.startswith(INTERNAL_DEV_PREFIX): # device is on default bridge self.driver.unplug(d.name, namespace=self.name, prefix=INTERNAL_DEV_PREFIX) elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX): ns_ip.del_veth(d.name) elif d.name.startswith(EXTERNAL_DEV_PREFIX): self.driver.unplug( d.name, namespace=self.name, prefix=EXTERNAL_DEV_PREFIX) super(RouterNamespace, self).delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3/router_info.py0000644000175000017500000015351500000000000023170 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import netaddr from neutron_lib import constants as lib_constants from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.utils import helpers from oslo_log import log as logging import six from neutron._i18n import _ from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import ra from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX FLOATINGIP_STATUS_NOCHANGE = object() ADDRESS_SCOPE_MARK_MASK = "0xffff0000" ADDRESS_SCOPE_MARK_ID_MIN = 1024 ADDRESS_SCOPE_MARK_ID_MAX = 2048 DEFAULT_ADDRESS_SCOPE = "noscope" @six.add_metaclass(abc.ABCMeta) class BaseRouterInfo(object): def __init__(self, agent, router_id, router, agent_conf, interface_driver, use_ipv6=False): self.agent = agent self.router_id = router_id # Invoke the setter for establishing initial SNAT action self._snat_enabled = None self.router = router self.agent_conf = agent_conf self.driver = interface_driver self.use_ipv6 = use_ipv6 self.internal_ports = [] self.ns_name = None self.process_monitor = None def initialize(self, process_monitor): """Initialize the router on the system. This differs from __init__ in that this method actually affects the system creating namespaces, starting processes, etc. The other merely initializes the python object. This separates in-memory object initialization from methods that actually go do stuff to the system. :param process_monitor: The agent's process monitor instance. """ self.process_monitor = process_monitor @property def router(self): return self._router @router.setter def router(self, value): self._router = value if not self._router: return # enable_snat by default if it wasn't specified by plugin self._snat_enabled = self._router.get('enable_snat', True) @abc.abstractmethod def delete(self, agent): pass @abc.abstractmethod def process(self, agent): """Process updates to this router This method is the point where the agent requests that updates be applied to this router. :param agent: Passes the agent in order to send RPC messages. """ pass def get_ex_gw_port(self): return self.router.get('gw_port') def get_gw_ns_name(self): return self.ns_name def get_internal_device_name(self, port_id): return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_external_device_name(self, port_id): return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN] def get_external_device_interface_name(self, ex_gw_port): return self.get_external_device_name(ex_gw_port['id']) class RouterInfo(BaseRouterInfo): def __init__(self, agent, router_id, router, agent_conf, interface_driver, use_ipv6=False): super(RouterInfo, self).__init__(agent, router_id, router, agent_conf, interface_driver, use_ipv6) self.ex_gw_port = None self.fip_map = {} self.pd_subnets = {} self.floating_ips = set() ns = self.create_router_namespace_object( router_id, agent_conf, interface_driver, use_ipv6) self.router_namespace = ns self.ns_name = ns.name self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN, ADDRESS_SCOPE_MARK_ID_MAX)) self._address_scope_to_mark_id = { DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()} self.iptables_manager = iptables_manager.IptablesManager( use_ipv6=use_ipv6, namespace=self.ns_name) self.initialize_address_scope_iptables() self.initialize_metadata_iptables() self.routes = [] # radvd is a neutron.agent.linux.ra.DaemonMonitor self.radvd = None self.centralized_port_forwarding_fip_set = set() self.fip_managed_by_port_forwardings = None self.qos_gateway_ips = set() def initialize(self, process_monitor): super(RouterInfo, self).initialize(process_monitor) self.radvd = ra.DaemonMonitor(self.router_id, self.ns_name, process_monitor, self.get_internal_device_name, self.agent_conf) self.router_namespace.create() def create_router_namespace_object( self, router_id, agent_conf, iface_driver, use_ipv6): return namespaces.RouterNamespace( router_id, agent_conf, iface_driver, use_ipv6) def is_router_master(self): return True def _update_routing_table(self, operation, route, namespace): cmd = ['ip', 'route', operation, 'to', route['destination'], 'via', route['nexthop']] ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(cmd, check_exit_code=False) def update_routing_table(self, operation, route): self._update_routing_table(operation, route, self.ns_name) def routes_updated(self, old_routes, new_routes): adds, removes = helpers.diff_list_of_dict(old_routes, new_routes) for route in adds: LOG.debug("Added route entry is '%s'", route) # remove replaced route from deleted route for del_route in removes: if route['destination'] == del_route['destination']: removes.remove(del_route) # replace success even if there is no existing route self.update_routing_table('replace', route) for route in removes: LOG.debug("Removed route entry is '%s'", route) self.update_routing_table('delete', route) def get_floating_ips(self): """Filter Floating IPs to be hosted on this agent.""" return self.router.get(lib_constants.FLOATINGIP_KEY, []) def get_port_forwarding_fips(self): """Get router port forwarding floating IPs.""" return self.router.get('_pf_floatingips', []) def floating_forward_rules(self, fip): fixed_ip = fip['fixed_ip_address'] floating_ip = fip['floating_ip_address'] to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' % (floating_ip, fixed_ip)), ('float-snat', to_source)] def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): mark_traffic_to_floating_ip = ( 'floatingip', '-d %s/32 -j MARK --set-xmark %s' % ( floating_ip, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def get_address_scope_mark_mask(self, address_scope=None): if not address_scope: address_scope = DEFAULT_ADDRESS_SCOPE if address_scope not in self._address_scope_to_mark_id: self._address_scope_to_mark_id[address_scope] = ( self.available_mark_ids.pop()) mark_id = self._address_scope_to_mark_id[address_scope] # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK) def get_port_address_scope_mark(self, port): """Get the IP version 4 and 6 address scope mark for the port :param port: A port dict from the RPC call :returns: A dict mapping the address family to the address scope mark """ port_scopes = port.get('address_scopes', {}) address_scope_mark_masks = ( (int(k), self.get_address_scope_mark_mask(v)) for k, v in port_scopes.items()) return collections.defaultdict(self.get_address_scope_mark_mask, address_scope_mark_masks) def process_floating_ip_nat_rules(self): """Configure NAT rules for the router's floating IPs. Configures iptables rules for the floating ips of the given router """ # Clear out all iptables rules for floating ips self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip') floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. for fip in floating_ips: # Rebuild iptables rules for the floating ip. for chain, rule in self.floating_forward_rules(fip): self.iptables_manager.ipv4['nat'].add_rule(chain, rule, tag='floating_ip') self.iptables_manager.apply() def _process_pd_iptables_rules(self, prefix, subnet_id): """Configure iptables rules for prefix delegated subnets""" ext_scope = self._get_external_address_scope() ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) ex_gw_device = self.get_external_device_name( self.get_ex_gw_port()['id']) scope_rule = self.address_scope_mangle_rule(ex_gw_device, ext_scope_mark) self.iptables_manager.ipv6['mangle'].add_rule( 'scope', '-d %s ' % prefix + scope_rule, tag=('prefix_delegation_%s' % subnet_id)) def process_floating_ip_address_scope_rules(self): """Configure address scope related iptables rules for the router's floating IPs. """ # Clear out all iptables rules for floating ips self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip') all_floating_ips = self.get_floating_ips() ext_scope = self._get_external_address_scope() # Filter out the floating ips that have fixed ip in the same address # scope. Because the packets for them will always be in one address # scope, no need to manipulate MARK/CONNMARK for them. floating_ips = [fip for fip in all_floating_ips if fip.get('fixed_ip_address_scope') != ext_scope] if floating_ips: ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) ports_scopemark = self._get_address_scope_mark() devices_in_ext_scope = { device for device, mark in ports_scopemark[lib_constants.IP_VERSION_4].items() if mark == ext_scope_mark} # Add address scope for floatingip egress for device in devices_in_ext_scope: self.iptables_manager.ipv4['mangle'].add_rule( 'float-snat', '-o %s -j MARK --set-xmark %s' % (device, ext_scope_mark), tag='floating_ip') # Loop once to ensure that floating ips are configured. for fip in floating_ips: # Rebuild iptables rules for the floating ip. fip_ip = fip['floating_ip_address'] # Send the floating ip traffic to the right address scope fixed_ip = fip['fixed_ip_address'] fixed_scope = fip.get('fixed_ip_address_scope') internal_mark = self.get_address_scope_mark_mask(fixed_scope) mangle_rules = self.floating_mangle_rules( fip_ip, fixed_ip, internal_mark) for chain, rule in mangle_rules: self.iptables_manager.ipv4['mangle'].add_rule( chain, rule, tag='floating_ip') def process_snat_dnat_for_fip(self): try: self.process_floating_ip_nat_rules() except Exception: # TODO(salv-orlando): Less broad catching msg = _('L3 agent failure to setup NAT for floating IPs') LOG.exception(msg) raise l3_exc.FloatingIpSetupException(msg) def _add_fip_addr_to_device(self, fip, device): """Configures the floating ip address on the device. """ try: ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) device.addr.add(ip_cidr) return True except RuntimeError: # any exception occurred here should cause the floating IP # to be set in error state LOG.warning("Unable to configure IP address for " "floating IP: %s", fip['id']) def add_floating_ip(self, fip, interface_name, device): raise NotImplementedError() def migrate_centralized_floating_ip(self, fip, interface_name, device): """Implements centralized->distributed floating IP migration. Overridden in dvr_local_router.py """ return FLOATINGIP_STATUS_NOCHANGE def gateway_redirect_cleanup(self, rtr_interface): pass def remove_floating_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) def move_floating_ip(self, fip): return lib_constants.FLOATINGIP_STATUS_ACTIVE def remove_external_gateway_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) def get_router_cidrs(self, device): return set([addr['cidr'] for addr in device.addr.list()]) def get_centralized_fip_cidr_set(self): return set() def process_floating_ip_addresses(self, interface_name): """Configure IP addresses on router's external gateway interface. Ensures addresses for existing floating IPs and cleans up those that should not longer be configured. """ fip_statuses = {} if interface_name is None: LOG.debug('No Interface for floating IPs router: %s', self.router['id']) return fip_statuses device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) existing_cidrs = self.get_router_cidrs(device) new_cidrs = set() gw_cidrs = self._get_gw_ips_cidr() centralized_fip_cidrs = self.get_centralized_fip_cidr_set() floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. for fip in floating_ips: fip_ip = fip['floating_ip_address'] ip_cidr = common_utils.ip_to_cidr(fip_ip) new_cidrs.add(ip_cidr) fip_statuses[fip['id']] = lib_constants.FLOATINGIP_STATUS_ACTIVE if ip_cidr not in existing_cidrs: fip_statuses[fip['id']] = self.add_floating_ip( fip, interface_name, device) LOG.debug('Floating ip %(id)s added, status %(status)s', {'id': fip['id'], 'status': fip_statuses.get(fip['id'])}) elif (fip_ip in self.fip_map and self.fip_map[fip_ip] != fip['fixed_ip_address']): LOG.debug("Floating IP was moved from fixed IP " "%(old)s to %(new)s", {'old': self.fip_map[fip_ip], 'new': fip['fixed_ip_address']}) fip_statuses[fip['id']] = self.move_floating_ip(fip) elif (ip_cidr in centralized_fip_cidrs and fip.get('host') == self.host): LOG.debug("Floating IP is migrating from centralized " "to distributed: %s", fip) fip_statuses[fip['id']] = self.migrate_centralized_floating_ip( fip, interface_name, device) elif fip_statuses[fip['id']] == fip['status']: # mark the status as not changed. we can't remove it because # that's how the caller determines that it was removed fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE fips_to_remove = ( ip_cidr for ip_cidr in (existing_cidrs - new_cidrs - gw_cidrs - self.centralized_port_forwarding_fip_set) if common_utils.is_cidr_host(ip_cidr)) for ip_cidr in fips_to_remove: LOG.debug("Removing floating ip %s from interface %s in " "namespace %s", ip_cidr, interface_name, self.ns_name) self.remove_floating_ip(device, ip_cidr) return fip_statuses def _get_gw_ips_cidr(self): gw_cidrs = set() ex_gw_port = self.get_ex_gw_port() if ex_gw_port: for ip_addr in ex_gw_port['fixed_ips']: ex_gw_ip = ip_addr['ip_address'] addr = netaddr.IPAddress(ex_gw_ip) if addr.version == lib_constants.IP_VERSION_4: gw_cidrs.add(common_utils.ip_to_cidr(ex_gw_ip)) return gw_cidrs def configure_fip_addresses(self, interface_name): try: return self.process_floating_ip_addresses(interface_name) except Exception: # TODO(salv-orlando): Less broad catching msg = _('L3 agent failure to setup floating IPs') LOG.exception(msg) raise l3_exc.FloatingIpSetupException(msg) def put_fips_in_error_state(self): fip_statuses = {} for fip in self.router.get(lib_constants.FLOATINGIP_KEY, []): fip_statuses[fip['id']] = lib_constants.FLOATINGIP_STATUS_ERROR return fip_statuses def delete(self): self.router['gw_port'] = None self.router[lib_constants.INTERFACE_KEY] = [] self.router[lib_constants.FLOATINGIP_KEY] = [] self.process_delete() self.disable_radvd() self.router_namespace.delete() def _internal_network_updated(self, port, subnet_id, prefix, old_prefix, updated_cidrs): interface_name = self.get_internal_device_name(port['id']) if prefix != lib_constants.PROVISIONAL_IPV6_PD_PREFIX: fixed_ips = port['fixed_ips'] for fixed_ip in fixed_ips: if fixed_ip['subnet_id'] == subnet_id: v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) if v6addr not in updated_cidrs: self.driver.add_ipv6_addr(interface_name, v6addr, self.ns_name) else: self.driver.delete_ipv6_addr_with_prefix(interface_name, old_prefix, self.ns_name) def _internal_network_added(self, ns_name, network_id, port_id, fixed_ips, mac_address, interface_name, prefix, mtu=None): LOG.debug("adding internal network: prefix(%s), port(%s)", prefix, port_id) self.driver.plug(network_id, port_id, interface_name, mac_address, namespace=ns_name, prefix=prefix, mtu=mtu) ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips) self.driver.init_router_port( interface_name, ip_cidrs, namespace=ns_name) for fixed_ip in fixed_ips: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address']) def internal_network_added(self, port): network_id = port['network_id'] port_id = port['id'] fixed_ips = port['fixed_ips'] mac_address = port['mac_address'] interface_name = self.get_internal_device_name(port_id) self._internal_network_added(self.ns_name, network_id, port_id, fixed_ips, mac_address, interface_name, INTERNAL_DEV_PREFIX, mtu=port.get('mtu')) def internal_network_removed(self, port): interface_name = self.get_internal_device_name(port['id']) LOG.debug("removing internal network: port(%s) interface(%s)", port['id'], interface_name) if ip_lib.device_exists(interface_name, namespace=self.ns_name): self.driver.unplug(interface_name, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) def _get_existing_devices(self): ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name) ip_devs = ip_wrapper.get_devices() return [ip_dev.name for ip_dev in ip_devs] def _update_internal_ports_cache(self, port): # NOTE(slaweq): self.internal_ports is a list of port objects but # when it is updated in _process_internal_ports() method, # but it can be based only on indexes of elements in # self.internal_ports as index of element to updated is unknown. # It has to be done based on port_id and this method is doing exactly # that. for index, p in enumerate(self.internal_ports): if p['id'] == port['id']: self.internal_ports[index] = port break else: self.internal_ports.append(port) @staticmethod def _get_updated_ports(existing_ports, current_ports): updated_ports = [] current_ports_dict = {p['id']: p for p in current_ports} for existing_port in existing_ports: current_port = current_ports_dict.get(existing_port['id']) if current_port: fixed_ips_changed = ( sorted(existing_port['fixed_ips'], key=helpers.safe_sort_key) != sorted(current_port['fixed_ips'], key=helpers.safe_sort_key)) mtu_changed = existing_port['mtu'] != current_port['mtu'] if fixed_ips_changed or mtu_changed: updated_ports.append(current_port) return updated_ports @staticmethod def _port_has_ipv6_subnet(port): if 'subnets' in port: for subnet in port['subnets']: if (netaddr.IPNetwork(subnet['cidr']).version == 6 and subnet['cidr'] != lib_constants.PROVISIONAL_IPV6_PD_PREFIX): return True def enable_radvd(self, internal_ports=None): LOG.debug('Spawning radvd daemon in router device: %s', self.router_id) if not internal_ports: internal_ports = self.internal_ports self.radvd.enable(internal_ports) def disable_radvd(self): if self.radvd: LOG.debug('Terminating radvd daemon in router device: %s', self.router_id) self.radvd.disable() def internal_network_updated(self, interface_name, ip_cidrs, mtu): self.driver.set_mtu(interface_name, mtu, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) self.driver.init_router_port( interface_name, ip_cidrs=ip_cidrs, namespace=self.ns_name) def address_scope_mangle_rule(self, device_name, mark_mask): return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask) def address_scope_filter_rule(self, device_name, mark_mask): return '-o %s -m mark ! --mark %s -j DROP' % ( device_name, mark_mask) def _process_internal_ports(self): existing_port_ids = set(p['id'] for p in self.internal_ports) internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) current_port_ids = set(p['id'] for p in internal_ports if p['admin_state_up']) new_port_ids = current_port_ids - existing_port_ids new_ports = [p for p in internal_ports if p['id'] in new_port_ids] old_ports = [p for p in self.internal_ports if p['id'] not in current_port_ids] updated_ports = self._get_updated_ports(self.internal_ports, internal_ports) enable_ra = False for p in old_ports: self.internal_network_removed(p) LOG.debug("removing port %s from internal_ports cache", p) self.internal_ports.remove(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) for subnet in p['subnets']: if ipv6_utils.is_ipv6_pd_enabled(subnet): self.agent.pd.disable_subnet(self.router_id, subnet['id']) del self.pd_subnets[subnet['id']] for p in new_ports: self.internal_network_added(p) LOG.debug("appending port %s to internal_ports cache", p) self._update_internal_ports_cache(p) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) for subnet in p['subnets']: if ipv6_utils.is_ipv6_pd_enabled(subnet): interface_name = self.get_internal_device_name(p['id']) self.agent.pd.enable_subnet(self.router_id, subnet['id'], subnet['cidr'], interface_name, p['mac_address']) if (subnet['cidr'] != lib_constants.PROVISIONAL_IPV6_PD_PREFIX): self.pd_subnets[subnet['id']] = subnet['cidr'] updated_cidrs = [] for p in updated_ports: self._update_internal_ports_cache(p) interface_name = self.get_internal_device_name(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) LOG.debug("updating internal network for port %s", p) updated_cidrs += ip_cidrs self.internal_network_updated( interface_name, ip_cidrs, p['mtu']) enable_ra = enable_ra or self._port_has_ipv6_subnet(p) # Check if there is any pd prefix update for p in internal_ports: if p['id'] in (set(current_port_ids) & set(existing_port_ids)): for subnet in p.get('subnets', []): if ipv6_utils.is_ipv6_pd_enabled(subnet): old_prefix = self.agent.pd.update_subnet( self.router_id, subnet['id'], subnet['cidr']) if old_prefix: self._internal_network_updated(p, subnet['id'], subnet['cidr'], old_prefix, updated_cidrs) self.pd_subnets[subnet['id']] = subnet['cidr'] enable_ra = True # Enable RA if enable_ra: self.enable_radvd(internal_ports) existing_devices = self._get_existing_devices() current_internal_devs = set(n for n in existing_devices if n.startswith(INTERNAL_DEV_PREFIX)) current_port_devs = set(self.get_internal_device_name(port_id) for port_id in current_port_ids) stale_devs = current_internal_devs - current_port_devs for stale_dev in stale_devs: LOG.debug('Deleting stale internal router device: %s', stale_dev) self.agent.pd.remove_stale_ri_ifname(self.router_id, stale_dev) self.driver.unplug(stale_dev, namespace=self.ns_name, prefix=INTERNAL_DEV_PREFIX) def _list_floating_ip_cidrs(self): # Compute a list of addresses this router is supposed to have. # This avoids unnecessarily removing those addresses and # causing a momentarily network outage. floating_ips = self.get_floating_ips() return [common_utils.ip_to_cidr(ip['floating_ip_address']) for ip in floating_ips] def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name, link_up=True): self.driver.plug(ex_gw_port['network_id'], ex_gw_port['id'], interface_name, ex_gw_port['mac_address'], namespace=ns_name, prefix=EXTERNAL_DEV_PREFIX, mtu=ex_gw_port.get('mtu'), link_up=link_up) def _get_external_gw_ips(self, ex_gw_port): gateway_ips = [] if 'subnets' in ex_gw_port: gateway_ips = [subnet['gateway_ip'] for subnet in ex_gw_port['subnets'] if subnet['gateway_ip']] if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): # No IPv6 gateway is available, but IPv6 is enabled. if self.agent_conf.ipv6_gateway: # ipv6_gateway configured, use address for default route. gateway_ips.append(self.agent_conf.ipv6_gateway) return gateway_ips def _add_route_to_gw(self, ex_gw_port, device_name, namespace, preserve_ips): # Note: ipv6_gateway is an ipv6 LLA # and so doesn't need a special route for subnet in ex_gw_port.get('subnets', []): is_gateway_not_in_subnet = (subnet['gateway_ip'] and not ipam_utils.check_subnet_ip( subnet['cidr'], subnet['gateway_ip'])) if is_gateway_not_in_subnet: preserve_ips.append(subnet['gateway_ip']) device = ip_lib.IPDevice(device_name, namespace=namespace) device.route.add_route(subnet['gateway_ip'], scope='link') def _configure_ipv6_params_on_gw(self, ex_gw_port, ns_name, interface_name, enabled): if not self.use_ipv6: return disable_ra = not enabled if enabled: gateway_ips = self._get_external_gw_ips(ex_gw_port) if not self.is_v6_gateway_set(gateway_ips): # There is no IPv6 gw_ip, use RouterAdvt for default route. self.driver.configure_ipv6_ra( ns_name, interface_name, lib_constants.ACCEPT_RA_WITH_FORWARDING) else: # Otherwise, disable it disable_ra = True if disable_ra: self.driver.configure_ipv6_ra(ns_name, interface_name, lib_constants.ACCEPT_RA_DISABLED) self.driver.configure_ipv6_forwarding(ns_name, interface_name, enabled) # This will make sure the 'all' setting is the same as the interface, # which is needed for forwarding to work. Don't disable once it's # been enabled so as to not send spurious MLDv2 packets out. if enabled: self.driver.configure_ipv6_forwarding(ns_name, 'all', enabled) def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)", ex_gw_port, interface_name, ns_name) self._plug_external_gateway(ex_gw_port, interface_name, ns_name) self._external_gateway_settings(ex_gw_port, interface_name, ns_name, preserve_ips) def _external_gateway_settings(self, ex_gw_port, interface_name, ns_name, preserve_ips): # Build up the interface and gateway IP addresses that # will be added to the interface. ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) gateway_ips = self._get_external_gw_ips(ex_gw_port) self._add_route_to_gw(ex_gw_port, device_name=interface_name, namespace=ns_name, preserve_ips=preserve_ips) self.driver.init_router_port( interface_name, ip_cidrs, namespace=ns_name, extra_subnets=ex_gw_port.get('extra_subnets', []), preserve_ips=preserve_ips, clean_connections=True) device = ip_lib.IPDevice(interface_name, namespace=ns_name) current_gateways = set() for ip_version in (lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6): gateway = device.route.get_gateway(ip_version=ip_version) if gateway and gateway.get('via'): current_gateways.add(gateway.get('via')) for ip in current_gateways - set(gateway_ips): device.route.delete_gateway(ip) for ip in gateway_ips: device.route.add_gateway(ip) self._configure_ipv6_params_on_gw(ex_gw_port, ns_name, interface_name, True) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, fixed_ip['ip_address']) def is_v6_gateway_set(self, gateway_ips): """Check to see if list of gateway_ips has an IPv6 gateway. """ # Note - don't require a try-except here as all # gateway_ips elements are valid addresses, if they exist. return any(netaddr.IPAddress(gw_ip).version == 6 for gw_ip in gateway_ips) def get_router_preserve_ips(self): preserve_ips = self._list_floating_ip_cidrs() + list( self.centralized_port_forwarding_fip_set) preserve_ips.extend(self.agent.pd.get_preserve_ips(self.router_id)) return preserve_ips def external_gateway_added(self, ex_gw_port, interface_name): preserve_ips = self.get_router_preserve_ips() self._external_gateway_added( ex_gw_port, interface_name, self.ns_name, preserve_ips) def external_gateway_updated(self, ex_gw_port, interface_name): preserve_ips = self.get_router_preserve_ips() self._external_gateway_added( ex_gw_port, interface_name, self.ns_name, preserve_ips) def external_gateway_removed(self, ex_gw_port, interface_name): LOG.debug("External gateway removed: port(%s), interface(%s)", ex_gw_port, interface_name) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) for ip_addr in ex_gw_port['fixed_ips']: prefixlen = ip_addr.get('prefixlen') self.remove_external_gateway_ip(device, common_utils.ip_to_cidr( ip_addr['ip_address'], prefixlen)) self.driver.unplug(interface_name, namespace=self.ns_name, prefix=EXTERNAL_DEV_PREFIX) @staticmethod def _gateway_ports_equal(port1, port2): return port1 == port2 def _delete_stale_external_devices(self, interface_name): existing_devices = self._get_existing_devices() stale_devs = [dev for dev in existing_devices if dev.startswith(EXTERNAL_DEV_PREFIX) and dev != interface_name] for stale_dev in stale_devs: LOG.debug('Deleting stale external router device: %s', stale_dev) self.agent.pd.remove_gw_interface(self.router['id']) self.driver.unplug(stale_dev, namespace=self.ns_name, prefix=EXTERNAL_DEV_PREFIX) def _process_external_gateway(self, ex_gw_port): # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or self.ex_gw_port and self.ex_gw_port['id']) interface_name = None if ex_gw_port_id: interface_name = self.get_external_device_name(ex_gw_port_id) if ex_gw_port: if not self.ex_gw_port: self.external_gateway_added(ex_gw_port, interface_name) self.agent.pd.add_gw_interface(self.router['id'], interface_name) elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port): self.external_gateway_updated(ex_gw_port, interface_name) elif not ex_gw_port and self.ex_gw_port: self.external_gateway_removed(self.ex_gw_port, interface_name) self.agent.pd.remove_gw_interface(self.router['id']) elif not ex_gw_port and not self.ex_gw_port: for p in self.internal_ports: interface_name = self.get_internal_device_name(p['id']) self.gateway_redirect_cleanup(interface_name) self._delete_stale_external_devices(interface_name) # Process SNAT rules for external gateway gw_port = self._router.get('gw_port') self._handle_router_snat_rules(gw_port, interface_name) def _prevent_snat_for_internal_traffic_rule(self, interface_name): return ( 'POSTROUTING', '! -o %(interface_name)s -m conntrack ' '! --ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name): dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = ( self._prevent_snat_for_internal_traffic_rule(interface_name)) # Makes replies come back through the router to reverse DNAT ext_in_mark = self.agent_conf.external_ingress_mark to_source = ('-m mark ! --mark %s/%s ' '-m conntrack --ctstate DNAT ' '-j SNAT --to-source %s' % (ext_in_mark, lib_constants.ROUTER_MARK_MASK, ex_gw_ip)) if self.iptables_manager.random_fully: to_source += ' --random-fully' snat_internal_traffic_to_floating_ip = ('snat', to_source) return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip, snat_internal_traffic_to_floating_ip] def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name): to_source = '-o %s -j SNAT --to-source %s' % (interface_name, ex_gw_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' return [('snat', to_source)] def external_gateway_mangle_rules(self, interface_name): mark = self.agent_conf.external_ingress_mark mark_packets_entering_external_gateway_port = ( 'mark', '-i %s -j MARK --set-xmark %s/%s' % (interface_name, mark, lib_constants.ROUTER_MARK_MASK)) return [mark_packets_entering_external_gateway_port] def _empty_snat_chains(self, iptables_manager): iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') iptables_manager.ipv4['nat'].empty_chain('snat') iptables_manager.ipv4['mangle'].empty_chain('mark') iptables_manager.ipv4['mangle'].empty_chain('POSTROUTING') def _add_snat_rules(self, ex_gw_port, iptables_manager, interface_name): self.process_external_port_address_scope_routing(iptables_manager) if ex_gw_port: # ex_gw_port should not be None in this case # NAT rules are added only if ex_gw_port has an IPv4 address for ip_addr in ex_gw_port['fixed_ips']: ex_gw_ip = ip_addr['ip_address'] if netaddr.IPAddress(ex_gw_ip).version == 4: if self._snat_enabled: rules = self.external_gateway_nat_snat_rules( ex_gw_ip, interface_name) for rule in rules: iptables_manager.ipv4['nat'].add_rule(*rule) rules = self.external_gateway_nat_fip_rules( ex_gw_ip, interface_name) for rule in rules: iptables_manager.ipv4['nat'].add_rule(*rule) rules = self.external_gateway_mangle_rules(interface_name) for rule in rules: iptables_manager.ipv4['mangle'].add_rule(*rule) break def _handle_router_snat_rules(self, ex_gw_port, interface_name): self._empty_snat_chains(self.iptables_manager) self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') self._add_snat_rules(ex_gw_port, self.iptables_manager, interface_name) def _process_external_on_delete(self): fip_statuses = {} try: ex_gw_port = self.get_ex_gw_port() self._process_external_gateway(ex_gw_port) if not ex_gw_port: return interface_name = self.get_external_device_interface_name( ex_gw_port) fip_statuses = self.configure_fip_addresses(interface_name) except l3_exc.FloatingIpSetupException: # All floating IPs must be put in error state LOG.exception("Failed to process floating IPs.") fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(fip_statuses) def process_external(self): fip_statuses = {} try: with self.iptables_manager.defer_apply(): ex_gw_port = self.get_ex_gw_port() self._process_external_gateway(ex_gw_port) if not ex_gw_port: return # Process SNAT/DNAT rules and addresses for floating IPs self.process_snat_dnat_for_fip() # Once NAT rules for floating IPs are safely in place # configure their addresses on the external gateway port interface_name = self.get_external_device_interface_name( ex_gw_port) fip_statuses = self.configure_fip_addresses(interface_name) except (l3_exc.FloatingIpSetupException, l3_exc.IpTablesApplyException): # All floating IPs must be put in error state LOG.exception("Failed to process floating IPs.") fip_statuses = self.put_fips_in_error_state() finally: self.update_fip_statuses(fip_statuses) def update_fip_statuses(self, fip_statuses): # Identify floating IPs which were disabled existing_floating_ips = self.floating_ips self.floating_ips = set(fip_statuses.keys()) for fip_id in existing_floating_ips - self.floating_ips: fip_statuses[fip_id] = lib_constants.FLOATINGIP_STATUS_DOWN # filter out statuses that didn't change fip_statuses = {f: stat for f, stat in fip_statuses.items() if stat != FLOATINGIP_STATUS_NOCHANGE} if not fip_statuses: return LOG.debug('Sending floating ip statuses: %s', fip_statuses) # Update floating IP status on the neutron server self.agent.plugin_rpc.update_floatingip_statuses( self.agent.context, self.router_id, fip_statuses) def initialize_address_scope_iptables(self): self._initialize_address_scope_iptables(self.iptables_manager) def _initialize_address_scope_iptables(self, iptables_manager): # Add address scope related chains iptables_manager.ipv4['mangle'].add_chain('scope') iptables_manager.ipv6['mangle'].add_chain('scope') iptables_manager.ipv4['mangle'].add_chain('floatingip') iptables_manager.ipv4['mangle'].add_chain('float-snat') iptables_manager.ipv4['filter'].add_chain('scope') iptables_manager.ipv6['filter'].add_chain('scope') iptables_manager.ipv4['filter'].add_rule('FORWARD', '-j $scope') iptables_manager.ipv6['filter'].add_rule('FORWARD', '-j $scope') # Add rules for marking traffic for address scopes mark_new_ingress_address_scope_by_interface = ( '-j $scope') copy_address_scope_for_existing = ( '-m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000') mark_new_ingress_address_scope_by_floatingip = ( '-j $floatingip') save_mark_to_connmark = ( '-m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000') iptables_manager.ipv4['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_interface) iptables_manager.ipv4['mangle'].add_rule( 'PREROUTING', copy_address_scope_for_existing) # The floating ip scope rules must come after the CONNTRACK rules # because the (CONN)MARK targets are non-terminating (this is true # despite them not being documented as such) and the floating ip # rules need to override the mark from CONNMARK to cross scopes. iptables_manager.ipv4['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_floatingip) iptables_manager.ipv4['mangle'].add_rule( 'float-snat', save_mark_to_connmark) iptables_manager.ipv6['mangle'].add_rule( 'PREROUTING', mark_new_ingress_address_scope_by_interface) iptables_manager.ipv6['mangle'].add_rule( 'PREROUTING', copy_address_scope_for_existing) def initialize_metadata_iptables(self): # Always mark incoming metadata requests, that way any stray # requests that arrive before the filter metadata redirect # rule is installed will be dropped. mark_metadata_for_internal_interfaces = ( '-d 169.254.169.254/32 ' '-i %(interface_name)s ' '-p tcp -m tcp --dport 80 ' '-j MARK --set-xmark %(value)s/%(mask)s' % {'interface_name': INTERNAL_DEV_PREFIX + '+', 'value': self.agent_conf.metadata_access_mark, 'mask': lib_constants.ROUTER_MARK_MASK}) self.iptables_manager.ipv4['mangle'].add_rule( 'PREROUTING', mark_metadata_for_internal_interfaces) def _get_port_devicename_scopemark(self, ports, name_generator): devicename_scopemark = {lib_constants.IP_VERSION_4: dict(), lib_constants.IP_VERSION_6: dict()} for p in ports: device_name = name_generator(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) port_as_marks = self.get_port_address_scope_mark(p) for ip_version in {common_utils.get_ip_version(cidr) for cidr in ip_cidrs}: devicename_scopemark[ip_version][device_name] = ( port_as_marks[ip_version]) return devicename_scopemark def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) # Prepare address scope iptables rule for external port external_port = self.get_ex_gw_port() if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) for ip_version in (lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) return ports_scopemark def _add_address_scope_mark(self, iptables_manager, ports_scopemark): external_device_name = None external_port = self.get_ex_gw_port() if external_port: external_device_name = self.get_external_device_name( external_port['id']) # Process address scope iptables rules for ip_version in (lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6): scopemarks = ports_scopemark[ip_version] iptables = iptables_manager.get_tables(ip_version) iptables['mangle'].empty_chain('scope') iptables['filter'].empty_chain('scope') dont_block_external = (ip_version == lib_constants.IP_VERSION_4 and self._snat_enabled and external_port) for device_name, mark in scopemarks.items(): # Add address scope iptables rule iptables['mangle'].add_rule( 'scope', self.address_scope_mangle_rule(device_name, mark)) if dont_block_external and device_name == external_device_name: continue iptables['filter'].add_rule( 'scope', self.address_scope_filter_rule(device_name, mark)) for subnet_id, prefix in self.pd_subnets.items(): if prefix != lib_constants.PROVISIONAL_IPV6_PD_PREFIX: self._process_pd_iptables_rules(prefix, subnet_id) def process_ports_address_scope_iptables(self): ports_scopemark = self._get_address_scope_mark() self._add_address_scope_mark(self.iptables_manager, ports_scopemark) def _get_external_address_scope(self): external_port = self.get_ex_gw_port() if not external_port: return scopes = external_port.get('address_scopes', {}) return scopes.get(str(lib_constants.IP_VERSION_4)) def process_external_port_address_scope_routing(self, iptables_manager): if not self._snat_enabled: return external_port = self.get_ex_gw_port() if not external_port: return external_devicename = self.get_external_device_name( external_port['id']) # Saves the originating address scope by saving the packet MARK to # the CONNMARK for new connections so that returning traffic can be # match to it. rule = ('-o %s -m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000' % external_devicename) iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', rule) address_scope = self._get_external_address_scope() if not address_scope: return # Prevents snat within the same address scope rule = '-o %s -m connmark --mark %s -j ACCEPT' % ( external_devicename, self.get_address_scope_mark_mask(address_scope)) iptables_manager.ipv4['nat'].add_rule('snat', rule) def process_address_scope(self): with self.iptables_manager.defer_apply(): self.process_ports_address_scope_iptables() self.process_floating_ip_address_scope_rules() @common_utils.exception_logger() def process_delete(self): """Process the delete of this router This method is the point where the agent requests that this router be deleted. This is a separate code path from process in that it avoids any changes to the qrouter namespace that will be removed at the end of the operation. :param agent: Passes the agent in order to send RPC messages. """ LOG.debug("Process delete, router %s", self.router['id']) if self.router_namespace.exists(): self._process_internal_ports() self.agent.pd.sync_router(self.router['id']) self._process_external_on_delete() else: LOG.warning("Can't gracefully delete the router %s: " "no router namespace found", self.router['id']) @common_utils.exception_logger() def process(self): LOG.debug("Process updates, router %s", self.router['id']) self.centralized_port_forwarding_fip_set = set(self.router.get( 'port_forwardings_fip_set', set())) self._process_internal_ports() self.agent.pd.sync_router(self.router['id']) self.process_external() self.process_address_scope() # Process static routes for router self.routes_updated(self.routes, self.router['routes']) self.routes = self.router['routes'] # Update ex_gw_port on the router info cache self.ex_gw_port = self.get_ex_gw_port() self.fip_map = dict((fip['floating_ip_address'], fip['fixed_ip_address']) for fip in self.get_floating_ips()) self.fip_managed_by_port_forwardings = self.router.get( 'fip_managed_by_port_forwardings') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/l3_agent.py0000644000175000017500000000412500000000000022003 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib.agent import topics from oslo_config import cfg from oslo_service import service from neutron.common import config as common_config from neutron.conf.agent import common as config from neutron.conf.agent.l3 import config as l3_config from neutron.conf.agent.l3 import ha as ha_conf from neutron.conf.agent.metadata import config as meta_conf from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron import service as neutron_service def register_opts(conf): l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf) ha_conf.register_l3_agent_ha_opts(conf) meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, conf) config.register_interface_driver_opts_helper(conf) config.register_agent_state_opts_helper(conf) config.register_interface_opts(conf) config.register_external_process_opts(conf) config.register_pddriver_opts(conf) config.register_ra_opts(conf) config.register_availability_zone_opts_helper(conf) ovs_conf.register_ovs_opts(conf) def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'): register_opts(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() config.setup_privsep() server = neutron_service.Service.create( binary='neutron-l3-agent', topic=topics.L3_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager=manager) service.launch(cfg.CONF, server, restart_method='mutate').wait() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2030437 neutron-16.0.0.0b2.dev214/neutron/agent/linux/0000755000175000017500000000000000000000000021072 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/__init__.py0000644000175000017500000000000000000000000023171 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/bridge_lib.py0000644000175000017500000001126700000000000023535 0ustar00coreycorey00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_utils import excutils from neutron.agent.linux import ip_lib # NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap # devices are listed here (i.e. when using Xen) BRIDGE_FS = "/sys/class/net/" BRIDGE_INTERFACE_FS = BRIDGE_FS + "%(bridge)s/brif/%(interface)s" BRIDGE_INTERFACES_FS = BRIDGE_FS + "%s/brif/" BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + "%s/brport" BRIDGE_PATH_FOR_DEVICE = BRIDGE_PORT_FS_FOR_DEVICE + '/bridge' def is_bridged_interface(interface): if not interface: return False else: return os.path.exists(BRIDGE_PORT_FS_FOR_DEVICE % interface) def get_interface_ifindex(interface): try: with open(os.path.join(BRIDGE_FS, interface, 'ifindex'), 'r') as fh: return int(fh.read().strip()) except (IOError, ValueError): pass def get_bridge_names(): return os.listdir(BRIDGE_FS) class BridgeDevice(ip_lib.IPDevice): def _ip_link(self, cmd): cmd = ['ip', 'link'] + cmd ip_wrapper = ip_lib.IPWrapper(self.namespace) return ip_wrapper.netns.execute(cmd, run_as_root=True) @classmethod def addbr(cls, name, namespace=None): bridge = cls(name, namespace, 'bridge') try: bridge.link.create() except RuntimeError: with excutils.save_and_reraise_exception() as ectx: ectx.reraise = not bridge.exists() return bridge @classmethod def get_interface_bridge(cls, interface): try: path = os.readlink(BRIDGE_PATH_FOR_DEVICE % interface) except OSError: return None else: name = path.rpartition('/')[-1] return cls(name) def delbr(self): return self.link.delete() def addif(self, interface): return self._ip_link(['set', 'dev', interface, 'master', self.name]) def delif(self, interface): return self._ip_link(['set', 'dev', interface, 'nomaster']) def setfd(self, fd): return self._ip_link(['set', 'dev', self.name, 'type', 'bridge', 'forward_delay', str(fd)]) def disable_stp(self): return self._ip_link(['set', 'dev', self.name, 'type', 'bridge', 'stp_state', 0]) def owns_interface(self, interface): return os.path.exists( BRIDGE_INTERFACE_FS % {'bridge': self.name, 'interface': interface}) def get_interfaces(self): try: return os.listdir(BRIDGE_INTERFACES_FS % self.name) except OSError: return [] class FdbInterface(object): """provide basic functionality to edit the FDB table""" @staticmethod def _execute_bridge(cmd, namespace, **kwargs): ip_wrapper = ip_lib.IPWrapper(namespace) return ip_wrapper.netns.execute(cmd, run_as_root=True, **kwargs) @classmethod def _cmd(cls, op, mac, dev, ip_dst, namespace, **kwargs): cmd = ['bridge', 'fdb', op, mac, 'dev', dev] if ip_dst is not None: cmd += ['dst', ip_dst] cls._execute_bridge(cmd, namespace, **kwargs) @classmethod def add(cls, mac, dev, ip_dst=None, namespace=None, **kwargs): return cls._cmd('add', mac, dev, ip_dst, namespace, **kwargs) @classmethod def append(cls, mac, dev, ip_dst=None, namespace=None, **kwargs): return cls._cmd('append', mac, dev, ip_dst, namespace, **kwargs) @classmethod def replace(cls, mac, dev, ip_dst=None, namespace=None, **kwargs): return cls._cmd('replace', mac, dev, ip_dst, namespace, **kwargs) @classmethod def delete(cls, mac, dev, ip_dst=None, namespace=None, **kwargs): return cls._cmd('delete', mac, dev, ip_dst, namespace, **kwargs) @classmethod def show(cls, dev=None, namespace=None, **kwargs): cmd = ['bridge', 'fdb', 'show'] if dev: cmd += ['dev', dev] return cls._execute_bridge(cmd, namespace, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/daemon.py0000644000175000017500000002012200000000000022704 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import fcntl import grp import logging as std_logging from logging import handlers import os import pwd import signal import sys from neutron_lib import exceptions from oslo_log import log as logging import setproctitle import six from neutron._i18n import _ LOG = logging.getLogger(__name__) DEVNULL = object() # Note: We can't use sys.std*.fileno() here. sys.std* objects may be # random file-like objects that may not match the true system std* fds # - and indeed may not even have a file descriptor at all (eg: test # fixtures that monkey patch fixtures.StringStream onto sys.stdout). # Below we always want the _real_ well-known 0,1,2 Unix fds during # os.dup2 manipulation. STDIN_FILENO = 0 STDOUT_FILENO = 1 STDERR_FILENO = 2 def setuid(user_id_or_name): try: new_uid = int(user_id_or_name) except (TypeError, ValueError): new_uid = pwd.getpwnam(user_id_or_name).pw_uid if new_uid != 0: try: os.setuid(new_uid) except OSError: msg = _('Failed to set uid %s') % new_uid LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) def setgid(group_id_or_name): try: new_gid = int(group_id_or_name) except (TypeError, ValueError): new_gid = grp.getgrnam(group_id_or_name).gr_gid if new_gid != 0: try: os.setgid(new_gid) except OSError: msg = _('Failed to set gid %s') % new_gid LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) def unwatch_log(): """Replace WatchedFileHandler handlers by FileHandler ones. Neutron logging uses WatchedFileHandler handlers but they do not support privileges drop, this method replaces them by FileHandler handlers supporting privileges drop. """ log_root = logging.getLogger(None).logger to_replace = [h for h in log_root.handlers if isinstance(h, handlers.WatchedFileHandler)] for handler in to_replace: # NOTE(cbrandily): we use default delay(=False) to ensure the log file # is opened before privileges drop. new_handler = std_logging.FileHandler(handler.baseFilename, mode=handler.mode, encoding=handler.encoding) log_root.removeHandler(handler) log_root.addHandler(new_handler) def drop_privileges(user=None, group=None): """Drop privileges to user/group privileges.""" if user is None and group is None: return if os.geteuid() != 0: msg = _('Root permissions are required to drop privileges.') LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) if group is not None: try: os.setgroups([]) except OSError: msg = _('Failed to remove supplemental groups') LOG.critical(msg) raise exceptions.FailToDropPrivilegesExit(msg) setgid(group) if user is not None: setuid(user) LOG.info("Process runs with uid/gid: %(uid)s/%(gid)s", {'uid': os.getuid(), 'gid': os.getgid()}) class Pidfile(object): def __init__(self, pidfile, procname, uuid=None): self.pidfile = pidfile self.procname = procname self.uuid = uuid try: self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: LOG.exception("Error while handling pidfile: %s", pidfile) sys.exit(1) def __str__(self): return self.pidfile def unlock(self): fcntl.flock(self.fd, fcntl.LOCK_UN) def write(self, pid): os.ftruncate(self.fd, 0) os.write(self.fd, six.b("%s" % pid)) os.fsync(self.fd) def read(self): try: pid = int(os.read(self.fd, 128)) os.lseek(self.fd, 0, os.SEEK_SET) return pid except ValueError: return def is_running(self): pid = self.read() if not pid: return False cmdline = '/proc/%s/cmdline' % pid try: with open(cmdline, "r") as f: exec_out = f.readline() return self.procname in exec_out and (not self.uuid or self.uuid in exec_out) except IOError: return False class Daemon(object): """A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL, procname=sys.executable, uuid=None, user=None, group=None): """Note: pidfile may be None.""" self.stdin = stdin self.stdout = stdout self.stderr = stderr self.procname = procname self.pidfile = (Pidfile(pidfile, procname, uuid) if pidfile is not None else None) self.user = user self.group = group def _fork(self): try: pid = os.fork() if pid > 0: os._exit(0) except OSError: LOG.exception('Fork failed') sys.exit(1) def daemonize(self): """Daemonize process by doing Stevens double fork.""" # flush any buffered data before fork/dup2. if self.stdout is not DEVNULL: self.stdout.flush() if self.stderr is not DEVNULL: self.stderr.flush() # sys.std* may not match STD{OUT,ERR}_FILENO. Tough. for f in (sys.stdout, sys.stderr): f.flush() # fork first time self._fork() # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # fork second time self._fork() # redirect standard file descriptors with open(os.devnull, 'w+') as devnull: stdin = devnull if self.stdin is DEVNULL else self.stdin stdout = devnull if self.stdout is DEVNULL else self.stdout stderr = devnull if self.stderr is DEVNULL else self.stderr os.dup2(stdin.fileno(), STDIN_FILENO) os.dup2(stdout.fileno(), STDOUT_FILENO) os.dup2(stderr.fileno(), STDERR_FILENO) if self.pidfile is not None: # write pidfile atexit.register(self.delete_pid) signal.signal(signal.SIGTERM, self.handle_sigterm) self.pidfile.write(os.getpid()) def delete_pid(self): if self.pidfile is not None: os.remove(str(self.pidfile)) def handle_sigterm(self, signum, frame): sys.exit(0) def start(self): """Start the daemon.""" self._parent_proctitle = setproctitle.getproctitle() if self.pidfile is not None and self.pidfile.is_running(): self.pidfile.unlock() LOG.error('Pidfile %s already exist. Daemon already ' 'running?', self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run() def _set_process_title(self): proctitle = "%s (%s)" % (self.procname, self._parent_proctitle) setproctitle.setproctitle(proctitle) def run(self): """Override this method and call super().run when subclassing Daemon. start() will call this method after the process has daemonized. """ self._set_process_title() unwatch_log() drop_privileges(self.user, self.group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/dhcp.py0000644000175000017500000022034200000000000022365 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import copy import itertools import os import re import shutil import time import netaddr from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.utils import file as file_utils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import netutils from oslo_utils import uuidutils import six from neutron._i18n import _ from neutron.agent.common import utils as agent_common_utils from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.cmd import runtime_checks as checks from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) UDP = 'udp' TCP = 'tcp' DNS_PORT = 53 DHCPV4_PORT = 67 DHCPV6_PORT = 547 METADATA_DEFAULT_PREFIX = 16 METADATA_DEFAULT_IP = '169.254.169.254' METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, METADATA_DEFAULT_PREFIX) METADATA_PORT = 80 WIN2k3_STATIC_DNS = 249 NS_PREFIX = 'qdhcp-' DNSMASQ_SERVICE_NAME = 'dnsmasq' DHCP_RELEASE_TRIES = 3 DHCP_RELEASE_TRIES_SLEEP = 0.3 # this variable will be removed when neutron-lib is updated with this value DHCP_OPT_CLIENT_ID_NUM = 61 class DictModel(collections.abc.MutableMapping): """Convert dict into an object that provides attribute access to values.""" __slots__ = ['_dictmodel_internal_storage'] def __init__(self, *args, **kwargs): """Convert dict values to DictModel values.""" temp_dict = dict(*args) self._dictmodel_internal_storage = {} def needs_upgrade(item): """Check if `item` is a dict and needs to be changed to DictModel. """ return isinstance(item, dict) and not isinstance(item, DictModel) def upgrade(item): """Upgrade item if it needs to be upgraded.""" if needs_upgrade(item): return DictModel(item) else: return item for key, value in itertools.chain(temp_dict.items(), kwargs.items()): if isinstance(value, (list, tuple)): # Keep the same type but convert dicts to DictModels self._dictmodel_internal_storage[key] = type(value)( (upgrade(item) for item in value) ) elif needs_upgrade(value): # Change dict instance values to DictModel instance values self._dictmodel_internal_storage[key] = DictModel(value) else: self._dictmodel_internal_storage[key] = value def __getattr__(self, name): try: if name == '_dictmodel_internal_storage': return super(DictModel, self).__getattr__(name) return self.__getitem__(name) except KeyError as e: raise AttributeError(e) def __setattr__(self, name, value): if name == '_dictmodel_internal_storage': super(DictModel, self).__setattr__(name, value) else: self._dictmodel_internal_storage[name] = value def __delattr__(self, name): del self._dictmodel_internal_storage[name] def __str__(self): pairs = ['%s=%s' % (k, v) for k, v in self._dictmodel_internal_storage.items()] return ', '.join(sorted(pairs)) def __getitem__(self, name): return self._dictmodel_internal_storage[name] def __setitem__(self, name, value): self._dictmodel_internal_storage[name] = value def __delitem__(self, name): del self._dictmodel_internal_storage[name] def __iter__(self): return iter(self._dictmodel_internal_storage) def __len__(self): return len(self._dictmodel_internal_storage) def __copy__(self): return type(self)(self) def __deepcopy__(self, memo): cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result result._dictmodel_internal_storage = copy.deepcopy( self._dictmodel_internal_storage) return result class NetModel(DictModel): def __init__(self, *args, **kwargs): super(NetModel, self).__init__(*args, **kwargs) self._ns_name = "%s%s" % (NS_PREFIX, self.id) @property def namespace(self): return self._ns_name @six.add_metaclass(abc.ABCMeta) class DhcpBase(object): def __init__(self, conf, network, process_monitor, version=None, plugin=None): self.conf = conf self.network = network self.process_monitor = process_monitor self.device_manager = DeviceManager(self.conf, plugin) self.version = version @abc.abstractmethod def enable(self): """Enables DHCP for this network.""" @abc.abstractmethod def disable(self, retain_port=False, block=False): """Disable dhcp for this network.""" def restart(self): """Restart the dhcp service for the network.""" self.disable(retain_port=True, block=True) self.enable() @abc.abstractproperty def active(self): """Boolean representing the running state of the DHCP server.""" @abc.abstractmethod def reload_allocations(self): """Force the DHCP server to reload the assignment database.""" @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" raise NotImplementedError() @classmethod def check_version(cls): """Execute version checks on DHCP server.""" raise NotImplementedError() @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated""" raise NotImplementedError() @classmethod def should_enable_metadata(cls, conf, network): """True if the metadata-proxy should be enabled for the network.""" raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class DhcpLocalProcess(DhcpBase): PORTS = [] def __init__(self, conf, network, process_monitor, version=None, plugin=None): super(DhcpLocalProcess, self).__init__(conf, network, process_monitor, version, plugin) self.confs_dir = self.get_confs_dir(conf) self.network_conf_dir = os.path.join(self.confs_dir, network.id) fileutils.ensure_tree(self.network_conf_dir, mode=0o755) @staticmethod def get_confs_dir(conf): return os.path.abspath(os.path.normpath(conf.dhcp_confs)) def get_conf_file_name(self, kind): """Returns the file name for a given kind of config file.""" return os.path.join(self.network_conf_dir, kind) def _remove_config_files(self): shutil.rmtree(self.network_conf_dir, ignore_errors=True) @staticmethod def _get_all_subnets(network): non_local_subnets = getattr(network, 'non_local_subnets', []) return network.subnets + non_local_subnets def _enable_dhcp(self): """check if there is a subnet within the network with dhcp enabled.""" for subnet in self.network.subnets: if subnet.enable_dhcp: return True return False def enable(self): """Enables DHCP for this network by spawning a local process.""" try: common_utils.wait_until_true(self._enable, timeout=300) except common_utils.WaitTimeout: LOG.error("Failed to start DHCP process for network %s", self.network.id) def _enable(self): try: if self.active: self.disable(retain_port=True, block=True) if self._enable_dhcp(): fileutils.ensure_tree(self.network_conf_dir, mode=0o755) interface_name = self.device_manager.setup(self.network) self.interface_name = interface_name self.spawn_process() return True except exceptions.ProcessExecutionError as error: LOG.debug("Spawning DHCP process for network %s failed; " "Error: %s", self.network.id, error) return False def _get_process_manager(self, cmd_callback=None): return external_process.ProcessManager( conf=self.conf, uuid=self.network.id, namespace=self.network.namespace, service=DNSMASQ_SERVICE_NAME, default_cmd_callback=cmd_callback, pid_file=self.get_conf_file_name('pid'), run_as_root=True) def disable(self, retain_port=False, block=False): """Disable DHCP for this network by killing the local process.""" self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME) self._get_process_manager().disable() if block: common_utils.wait_until_true(lambda: not self.active) if not retain_port: self._destroy_namespace_and_port() self._remove_config_files() def _destroy_namespace_and_port(self): try: self.device_manager.destroy(self.network, self.interface_name) except RuntimeError: LOG.warning('Failed trying to delete interface: %s', self.interface_name) try: ip_lib.delete_network_namespace(self.network.namespace) except RuntimeError: LOG.warning('Failed trying to delete namespace: %s', self.network.namespace) def _get_value_from_conf_file(self, kind, converter=None): """A helper function to read a value from one of the state files.""" file_name = self.get_conf_file_name(kind) msg = _('Error while reading %s') try: with open(file_name, 'r') as f: try: return converter(f.read()) if converter else f.read() except ValueError: msg = _('Unable to convert value in %s') except IOError: msg = _('Unable to access %s') LOG.debug(msg, file_name) return None @property def interface_name(self): return self._get_value_from_conf_file('interface') @interface_name.setter def interface_name(self, value): interface_file_path = self.get_conf_file_name('interface') file_utils.replace_file(interface_file_path, value) @property def active(self): return self._get_process_manager().active @abc.abstractmethod def spawn_process(self): pass class Dnsmasq(DhcpLocalProcess): # The ports that need to be opened when security policies are active # on the Neutron port used for DHCP. These are provided as a convenience # for users of this class. PORTS = {constants.IP_VERSION_4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)], constants.IP_VERSION_6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)], } _SUBNET_TAG_PREFIX = 'subnet-%s' _PORT_TAG_PREFIX = 'port-%s' _ID = 'id:' _IS_DHCP_RELEASE6_SUPPORTED = None @classmethod def check_version(cls): pass @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" confs_dir = cls.get_confs_dir(conf) try: return [ c for c in os.listdir(confs_dir) if uuidutils.is_uuid_like(c) ] except OSError: return [] def _build_cmdline_callback(self, pid_file): # We ignore local resolv.conf if dns servers are specified # or if local resolution is explicitly disabled. _no_resolv = ( '--no-resolv' if self.conf.dnsmasq_dns_servers or not self.conf.dnsmasq_local_resolv else '') cmd = [ 'dnsmasq', '--no-hosts', _no_resolv, '--pid-file=%s' % pid_file, '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'), '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'), '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'), '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'), '--dhcp-match=set:ipxe,175', '--dhcp-userclass=set:ipxe6,iPXE', '--local-service', '--bind-dynamic', ] if not self.device_manager.driver.bridged: cmd += [ '--bridge-interface=%s,tap*' % self.interface_name, ] possible_leases = 0 for subnet in self._get_all_subnets(self.network): mode = None # if a subnet is specified to have dhcp disabled if not subnet.enable_dhcp: continue if subnet.ip_version == 4: mode = 'static' else: # Note(scollins) If the IPv6 attributes are not set, set it as # static to preserve previous behavior addr_mode = getattr(subnet, 'ipv6_address_mode', None) ra_mode = getattr(subnet, 'ipv6_ra_mode', None) if (addr_mode in [constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS] or not addr_mode and not ra_mode): mode = 'static' cidr = netaddr.IPNetwork(subnet.cidr) if self.conf.dhcp_lease_duration == -1: lease = 'infinite' else: lease = '%ss' % self.conf.dhcp_lease_duration # mode is optional and is not set - skip it if mode: if subnet.ip_version == 4: cmd.append('--dhcp-range=%s%s,%s,%s,%s,%s' % ('set:', self._SUBNET_TAG_PREFIX % subnet.id, cidr.network, mode, cidr.netmask, lease)) else: if cidr.prefixlen < 64: LOG.debug('Ignoring subnet %(subnet)s, CIDR has ' 'prefix length < 64: %(cidr)s', {'subnet': subnet.id, 'cidr': cidr}) continue cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' % ('set:', self._SUBNET_TAG_PREFIX % subnet.id, cidr.network, mode, cidr.prefixlen, lease)) possible_leases += cidr.size mtu = getattr(self.network, 'mtu', 0) # Do not advertise unknown mtu if mtu > 0: cmd.append('--dhcp-option-force=option:mtu,%d' % mtu) # Cap the limit because creating lots of subnets can inflate # this possible lease cap. cmd.append('--dhcp-lease-max=%d' % min(possible_leases, self.conf.dnsmasq_lease_max)) if self.conf.dhcp_renewal_time > 0: cmd.append('--dhcp-option-force=option:T1,%ds' % self.conf.dhcp_renewal_time) if self.conf.dhcp_rebinding_time > 0: cmd.append('--dhcp-option-force=option:T2,%ds' % self.conf.dhcp_rebinding_time) cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file) for server in self.conf.dnsmasq_dns_servers: cmd.append('--server=%s' % server) if self.conf.dns_domain: cmd.append('--domain=%s' % self.conf.dns_domain) if self.conf.dhcp_broadcast_reply: cmd.append('--dhcp-broadcast') if self.conf.dnsmasq_base_log_dir: log_dir = os.path.join( self.conf.dnsmasq_base_log_dir, self.network.id) try: if not os.path.exists(log_dir): os.makedirs(log_dir) except OSError: LOG.error('Error while create dnsmasq log dir: %s', log_dir) else: log_filename = os.path.join(log_dir, 'dhcp_dns_log') cmd.append('--log-queries') cmd.append('--log-dhcp') cmd.append('--log-facility=%s' % log_filename) return cmd def spawn_process(self): """Spawn the process, if it's not spawned already.""" # we only need to generate the lease file the first time dnsmasq starts # rather than on every reload since dnsmasq will keep the file current self._output_init_lease_file() self._spawn_or_reload_process(reload_with_HUP=False) def _spawn_or_reload_process(self, reload_with_HUP): """Spawns or reloads a Dnsmasq process for the network. When reload_with_HUP is True, dnsmasq receives a HUP signal, or it's reloaded if the process is not running. """ self._output_config_files() pm = self._get_process_manager( cmd_callback=self._build_cmdline_callback) pm.enable(reload_cfg=reload_with_HUP, ensure_active=True) self.process_monitor.register(uuid=self.network.id, service_name=DNSMASQ_SERVICE_NAME, monitored_process=pm) def _is_dhcp_release6_supported(self): if self._IS_DHCP_RELEASE6_SUPPORTED is None: self._IS_DHCP_RELEASE6_SUPPORTED = checks.dhcp_release6_supported() if not self._IS_DHCP_RELEASE6_SUPPORTED: LOG.warning("dhcp_release6 is not present on this system, " "will not call it again.") return self._IS_DHCP_RELEASE6_SUPPORTED def _release_lease(self, mac_address, ip, ip_version, client_id=None, server_id=None, iaid=None): """Release a DHCP lease.""" if ip_version == constants.IP_VERSION_6: if not self._is_dhcp_release6_supported(): return cmd = ['dhcp_release6', '--iface', self.interface_name, '--ip', ip, '--client-id', client_id, '--server-id', server_id, '--iaid', iaid] else: cmd = ['dhcp_release', self.interface_name, ip, mac_address] if client_id: cmd.append(client_id) ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace) try: ip_wrapper.netns.execute(cmd, run_as_root=True) except RuntimeError as e: # when failed to release single lease there's # no need to propagate error further LOG.warning('DHCP release failed for %(cmd)s. ' 'Reason: %(e)s', {'cmd': cmd, 'e': e}) def _output_config_files(self): self._output_hosts_file() self._output_addn_hosts_file() self._output_opts_file() def reload_allocations(self): """Rebuild the dnsmasq config and signal the dnsmasq to reload.""" # If all subnets turn off dhcp, kill the process. if not self._enable_dhcp(): self.disable() LOG.debug('Killing dnsmasq for network since all subnets have ' 'turned off DHCP: %s', self.network.id) return if not self.interface_name: # we land here if above has been called and we receive port # delete notifications for the network LOG.debug('Agent does not have an interface on this network ' 'anymore, skipping reload: %s', self.network.id) return self._release_unused_leases() self._spawn_or_reload_process(reload_with_HUP=True) LOG.debug('Reloading allocations for network: %s', self.network.id) self.device_manager.update(self.network, self.interface_name) def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets): """Sort fixed_ips so that stateless IPv6 subnets appear first. For example, If a port with v6 extra_dhcp_opts is on a network with IPv4 and IPv6 stateless subnets. Then dhcp host file will have below 2 entries for same MAC, fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for IPv4 dhcp) fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd (entry for stateless IPv6 for v6 options) dnsmasq internal details for processing host file entries 1) dnsmasq reads the host file from EOF. 2) So it first picks up stateless IPv6 entry, fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd 3) But dnsmasq doesn't have sufficient checks to skip this entry and pick next entry, to process dhcp IPv4 request. 4) So dnsmasq uses this entry to process dhcp IPv4 request. 5) As there is no ip in this entry, dnsmasq logs "no address available" and fails to send DHCPOFFER message. As we rely on internal details of dnsmasq to understand and fix the issue, Ihar sent a mail to dnsmasq-discuss mailing list http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/ 009650.html So if we reverse the order of writing entries in host file, so that entry for stateless IPv6 comes first, then dnsmasq can correctly fetch the IPv4 address. """ return sorted( fixed_ips, key=lambda fip: ((fip.subnet_id in v6_nets) and ( v6_nets[fip.subnet_id].ipv6_address_mode == ( constants.DHCPV6_STATELESS))), reverse=True) def _merge_alloc_addr6_list(self, fixed_ips, v6_nets): """Merge fixed_ips to ipv6 addr lists If a port have multiple IPv6 addresses in the same subnet, merge the into one entry listing all the addresess, creating a single dhcp-host entry with the list of addresses defined allow dnsmasq to make all addresses available as requests for leases arrive. See dnsmasq-discuss mailing list: http://lists.thekelleys.org.uk/ pipermail/dnsmasq-discuss/2020q1/013743.html """ by_subnet = {} NewFip = collections.namedtuple('NewFip', 'subnet_id ip_address') merged = [] for fip in fixed_ips: if (fip.subnet_id in v6_nets and v6_nets[fip.subnet_id].ipv6_address_mode == ( constants.DHCPV6_STATEFUL)): if fip.subnet_id not in by_subnet: by_subnet.update({fip.subnet_id: []}) by_subnet[fip.subnet_id].append(fip.ip_address) else: merged.append(fip) for subnet_id in by_subnet: addr6_list = ','.join([self._format_address_for_dnsmasq(ip) for ip in by_subnet[subnet_id]]) merged.append(NewFip(subnet_id=subnet_id, ip_address=addr6_list)) return merged def _get_dns_assignment(self, ip_address, dns_assignment): """Get DNS assignment hostname and fqdn In dnsmasq it is not possible to configure two dhcp-host entries mapped to a single client mac address with IP addresses in the same subnet. When recieving a requst dnsmasq will match on the first entry in it's config, and lease that address. The second entry will never be used. For IPv6 it is possible to add multiple IPv6 addresses to a single dhcp-host entry by placing a list of addresses in brackets, i.e [addr1][addr2][...]. See dnsmasq mailing list: http://lists.thekelleys.org.uk/pipermail/ dnsmasq-discuss/2020q1/013671.html. Since we cannot have two hostnames in the dhcp-host entry this method picks the first hostname and fqdn it find's matching one of the IP's in the fixed-ips in dns_assignment or the hostname is generated based on the first fixed-ip. :param ip_address: IP address or a list of IPv6 addresses :param dns_ip_map: DNS IP Mapping :param dns_assignment: DNS assignments :return: hostname, fqdn """ hostname, fqdn = None, None ip_addresses = ip_address.replace('[', '').split(']') if dns_assignment: dns_ip_map = {d.ip_address: d for d in dns_assignment} for addr in ip_addresses: # If dns_name attribute is supported by ports API, return the # dns_assignment generated by the Neutron server. Otherwise, # generate hostname and fqdn locally (previous behaviour) if addr in dns_ip_map: hostname = dns_ip_map[addr].hostname fqdn = dns_ip_map[addr].fqdn break if hostname is None: hostname = ('host-%s' % ip_addresses[0].replace('.', '-').replace(':', '-')) fqdn = hostname if self.conf.dns_domain: fqdn = '%s.%s' % (fqdn, self.conf.dns_domain) return hostname, fqdn def _iter_hosts(self, merge_addr6_list=False): """Iterate over hosts. For each host on the network we yield a tuple containing: ( port, # a DictModel instance representing the port. alloc, # a DictModel instance of the allocated ip and subnet. # if alloc is None, it means there is no need to allocate # an IPv6 address because of stateless DHCPv6 network. host_name, # Host name. name, # Canonical hostname in the format 'hostname[.domain]'. no_dhcp, # A flag indicating that the address doesn't need a DHCP # IP address. no_opts, # A flag indication that options shouldn't be written ) """ v6_nets = dict((subnet.id, subnet) for subnet in self._get_all_subnets(self.network) if subnet.ip_version == 6) for port in self.network.ports: fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips, v6_nets) # TODO(hjensas): Drop this conditional and option once distros # generally have dnsmasq supporting addr6 list and range. if self.conf.dnsmasq_enable_addr6_list and merge_addr6_list: fixed_ips = self._merge_alloc_addr6_list(fixed_ips, v6_nets) # Confirm whether Neutron server supports dns_name attribute in the # ports API dns_assignment = getattr(port, 'dns_assignment', None) for alloc in fixed_ips: no_dhcp = False no_opts = False if alloc.subnet_id in v6_nets: addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode no_dhcp = addr_mode in (constants.IPV6_SLAAC, constants.DHCPV6_STATELESS) # we don't setup anything for SLAAC. It doesn't make sense # to provide options for a client that won't use DHCP no_opts = addr_mode == constants.IPV6_SLAAC hostname, fqdn = self._get_dns_assignment(alloc.ip_address, dns_assignment) yield (port, alloc, hostname, fqdn, no_dhcp, no_opts) def _get_port_extra_dhcp_opts(self, port): return getattr(port, edo_ext.EXTRADHCPOPTS, False) def _output_init_lease_file(self): """Write a fake lease file to bootstrap dnsmasq. The generated file is passed to the --dhcp-leasefile option of dnsmasq. This is used as a bootstrapping mechanism to avoid NAKing active leases when a dhcp server is scheduled to another agent. Using a leasefile will also prevent dnsmasq from NAKing or ignoring renewals after a restart. Format is as follows: epoch-timestamp mac_addr ip_addr hostname client-ID """ filename = self.get_conf_file_name('leases') buf = six.StringIO() LOG.debug('Building initial lease file: %s', filename) # we make up a lease time for the database entry if self.conf.dhcp_lease_duration == -1: # Even with an infinite lease, a client may choose to renew a # previous lease on reboot or interface bounce so we should have # an entry for it. # Dnsmasq timestamp format for an infinite lease is 0. timestamp = 0 else: timestamp = int(time.time()) + self.conf.dhcp_lease_duration dhcpv4_enabled_subnet_ids = [ s.id for s in self._get_all_subnets(self.network) if s.enable_dhcp and s.ip_version == constants.IP_VERSION_4] for host_tuple in self._iter_hosts(): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple # don't write ip address which belongs to a dhcp disabled subnet # or an IPv6 subnet. if no_dhcp or alloc.subnet_id not in dhcpv4_enabled_subnet_ids: continue # all that matters is the mac address and IP. the hostname and # client ID will be overwritten on the next renewal. buf.write('%s %s %s * *\n' % (timestamp, port.mac_address, alloc.ip_address)) contents = buf.getvalue() file_utils.replace_file(filename, contents) LOG.debug('Done building initial lease file %s with contents:\n%s', filename, contents) return filename @staticmethod def _format_address_for_dnsmasq(address): # (dzyu) Check if it is legal ipv6 address, if so, need wrap # it with '[]' to let dnsmasq to distinguish MAC address from # IPv6 address. if netaddr.valid_ipv6(address): return '[%s]' % address return address def _output_hosts_file(self): """Writes a dnsmasq compatible dhcp hosts file. The generated file is sent to the --dhcp-hostsfile option of dnsmasq, and lists the hosts on the network which should receive a dhcp lease. Each line in this file is in the form:: 'mac_address,FQDN,ip_address' IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in this file if it did not give a lease to a host listed in it (e.g.: multiple dnsmasq instances on the same network if this network is on multiple network nodes). This file is only defining hosts which should receive a dhcp lease, the hosts resolution in itself is defined by the `_output_addn_hosts_file` method. """ buf = six.StringIO() filename = self.get_conf_file_name('host') LOG.debug('Building host file: %s', filename) dhcp_enabled_subnet_ids = [s.id for s in self._get_all_subnets(self.network) if s.enable_dhcp] # NOTE(ihrachyshka): the loop should not log anything inside it, to # avoid potential performance drop when lots of hosts are dumped for host_tuple in self._iter_hosts(merge_addr6_list=True): port, alloc, hostname, name, no_dhcp, no_opts = host_tuple if no_dhcp: if not no_opts and self._get_port_extra_dhcp_opts(port): buf.write('%s,%s%s\n' % ( port.mac_address, 'set:', self._PORT_TAG_PREFIX % port.id)) continue # don't write ip address which belongs to a dhcp disabled subnet. if alloc.subnet_id not in dhcp_enabled_subnet_ids: continue ip_address = self._format_address_for_dnsmasq(alloc.ip_address) if self._get_port_extra_dhcp_opts(port): client_id = self._get_client_id(port) if client_id and len(port.extra_dhcp_opts) > 1: buf.write('%s,%s%s,%s,%s,%s%s\n' % (port.mac_address, self._ID, client_id, name, ip_address, 'set:', self._PORT_TAG_PREFIX % port.id)) elif client_id and len(port.extra_dhcp_opts) == 1: buf.write('%s,%s%s,%s,%s\n' % (port.mac_address, self._ID, client_id, name, ip_address)) else: buf.write('%s,%s,%s,%s%s\n' % (port.mac_address, name, ip_address, 'set:', self._PORT_TAG_PREFIX % port.id)) else: buf.write('%s,%s,%s\n' % (port.mac_address, name, ip_address)) file_utils.replace_file(filename, buf.getvalue()) LOG.debug('Done building host file %s', filename) return filename def _get_client_id(self, port): if self._get_port_extra_dhcp_opts(port): for opt in port.extra_dhcp_opts: if opt.opt_name in (edo_ext.DHCP_OPT_CLIENT_ID, DHCP_OPT_CLIENT_ID_NUM, str(DHCP_OPT_CLIENT_ID_NUM)): return opt.opt_value def _read_hosts_file_leases(self, filename): leases = set() try: with open(filename) as f: for l in f.readlines(): host = l.strip().split(',') mac = host[0] client_id = None if host[1].startswith('set:'): continue if host[1].startswith(self._ID): ip = host[3].strip('[]') client_id = host[1][len(self._ID):] else: ip = host[2].strip('[]') leases.add((ip, mac, client_id)) except (OSError, IOError): LOG.debug('Error while reading hosts file %s', filename) return leases def _read_leases_file_leases(self, filename, ip_version=None): """Read dnsmasq dhcp leases file Read information from leases file, which is needed to pass to dhcp_release6 command line utility if some of these leases are not needed anymore each line in dnsmasq leases file is one of the following * duid entry: duid server_duid There MUST be single duid entry per file * ipv4 entry: space separated list - The expiration time (seconds since unix epoch) or duration (if dnsmasq is compiled with HAVE_BROKEN_RTC) of the lease. 0 means infinite. - The link address, in format XX-YY:YY:YY[...], where XX is the ARP hardware type. "XX-" may be omitted for Ethernet. - The IPv4 address - The hostname (sent by the client or assigned by dnsmasq) or '*' for none. - The client identifier (colon-separated hex bytes) or '*' for none. * ipv6 entry: space separated list - The expiration time or duration - The IAID as a Big Endian decimal number, prefixed by T for IA_TAs (temporary addresses). - The IPv6 address - The hostname or '*' - The client DUID (colon-separated hex bytes) or '*' if unknown original discussion is in dnsmasq mailing list http://lists.thekelleys.org.uk/pipermail/\ dnsmasq-discuss/2016q2/010595.html :param filename: leases file :param ip_version: IP version of entries to return, or None for all :return: dict, keys are IP(v6) addresses, values are dicts containing iaid, client_id and server_id """ leases = {} server_id = None if os.path.exists(filename): with open(filename) as f: for l in f.readlines(): if l.startswith('duid'): if not server_id: server_id = l.strip().split()[1] continue else: LOG.warning('Multiple DUID entries in %s ' 'lease file, dnsmasq is possibly ' 'not functioning properly', filename) continue parts = l.strip().split() if len(parts) != 5: LOG.warning('Invalid lease entry %s found in %s ' 'lease file, ignoring', parts, filename) continue (iaid, ip, client_id) = parts[1], parts[2], parts[4] ip = ip.strip('[]') if (ip_version and netaddr.IPAddress(ip).version != ip_version): continue leases[ip] = {'iaid': iaid, 'client_id': client_id, 'server_id': server_id } return leases def _release_unused_leases(self): filename = self.get_conf_file_name('host') old_leases = self._read_hosts_file_leases(filename) leases_filename = self.get_conf_file_name('leases') cur_leases = self._read_leases_file_leases(leases_filename) if not cur_leases: return v4_leases = set() for (k, v) in cur_leases.items(): # IPv4 leases have a MAC, IPv6 ones do not, so we must ignore if netaddr.IPAddress(k).version == constants.IP_VERSION_4: # treat '*' as None, see note in _read_leases_file_leases() client_id = v['client_id'] if client_id == '*': client_id = None v4_leases.add((k, v['iaid'], client_id)) new_leases = set() for port in self.network.ports: client_id = self._get_client_id(port) for alloc in port.fixed_ips: new_leases.add((alloc.ip_address, port.mac_address, client_id)) # If an entry is in the leases or host file(s), but doesn't have # a fixed IP on a corresponding neutron port, consider it stale. entries_to_release = (v4_leases | old_leases) - new_leases if not entries_to_release: return # If the VM advertises a client ID in its lease, but its not set in # the port's Extra DHCP Opts, the lease will not be filtered above. # Release the lease only if client ID is set in port DB and a mismatch # Otherwise the lease is released when other ports are deleted/updated entries_with_no_client_id = set() for ip, mac, client_id in entries_to_release: if client_id: entry_no_client_id = (ip, mac, None) if (entry_no_client_id in old_leases and entry_no_client_id in new_leases): entries_with_no_client_id.add((ip, mac, client_id)) entries_to_release -= entries_with_no_client_id # Try DHCP_RELEASE_TRIES times to release a lease, re-reading the # file each time to see if it's still there. We loop +1 times to # check the lease file one last time before logging any remaining # entries. for i in range(DHCP_RELEASE_TRIES + 1): entries_not_present = set() for ip, mac, client_id in entries_to_release: try: entry = cur_leases[ip] except KeyError: entries_not_present.add((ip, mac, client_id)) continue # if not the final loop, try and release if i < DHCP_RELEASE_TRIES: ip_version = netaddr.IPAddress(ip).version if ip_version == constants.IP_VERSION_6: client_id = entry['client_id'] self._release_lease(mac, ip, ip_version, client_id, entry['server_id'], entry['iaid']) # Remove elements that were not in the current leases file, # no need to look for them again, and see if we're done. entries_to_release -= entries_not_present if not entries_to_release: break if i < DHCP_RELEASE_TRIES: time.sleep(DHCP_RELEASE_TRIES_SLEEP) cur_leases = self._read_leases_file_leases(leases_filename) if not cur_leases: break else: LOG.warning("Could not release DHCP leases for these IP " "addresses after %d tries: %s", DHCP_RELEASE_TRIES, ', '.join(ip for ip, m, c in entries_to_release)) def _output_addn_hosts_file(self): """Writes a dnsmasq compatible additional hosts file. The generated file is sent to the --addn-hosts option of dnsmasq, and lists the hosts on the network which should be resolved even if the dnsmasq instance did not give a lease to the host (see the `_output_hosts_file` method). Each line in this file is in the same form as a standard /etc/hosts file. """ buf = six.StringIO() for host_tuple in self._iter_hosts(): port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple # It is compulsory to write the `fqdn` before the `hostname` in # order to obtain it in PTR responses. if alloc: buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) addn_hosts = self.get_conf_file_name('addn_hosts') file_utils.replace_file(addn_hosts, buf.getvalue()) return addn_hosts def _output_opts_file(self): """Write a dnsmasq compatible options file.""" options, subnet_index_map = self._generate_opts_per_subnet() options += self._generate_opts_per_port(subnet_index_map) name = self.get_conf_file_name('opts') file_utils.replace_file(name, '\n'.join(options)) return name def _generate_opts_per_subnet(self): options = [] subnets_without_nameservers = set() if self.conf.enable_isolated_metadata or self.conf.force_metadata: subnet_to_interface_ip = self._make_subnet_interface_ip_map() isolated_subnets = self.get_isolated_subnets(self.network) for subnet in self._get_all_subnets(self.network): addr_mode = getattr(subnet, 'ipv6_address_mode', None) segment_id = getattr(subnet, 'segment_id', None) if (not subnet.enable_dhcp or (subnet.ip_version == 6 and addr_mode == constants.IPV6_SLAAC)): continue if subnet.dns_nameservers: if ((subnet.ip_version == 4 and subnet.dns_nameservers == ['0.0.0.0']) or (subnet.ip_version == 6 and subnet.dns_nameservers == ['::'])): # Special case: Do not announce DNS servers options.append( self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, 'dns-server')) else: options.append( self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs( subnet.ip_version, subnet.dns_nameservers)))) else: # use the dnsmasq ip as nameservers only if there is no # dns-server submitted by the server # Here is something to check still subnets_without_nameservers.add(subnet.id) if self.conf.dns_domain and subnet.ip_version == 6: # This should be change also options.append( self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, "domain-search", ''.join(self.conf.dns_domain))) gateway = subnet.gateway_ip host_routes = [] for hr in subnet.host_routes: if hr.destination == constants.IPv4_ANY: if not gateway: gateway = hr.nexthop else: host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) # Add host routes for isolated network segments if ((self.conf.force_metadata or (isolated_subnets[subnet.id] and self.conf.enable_isolated_metadata)) and subnet.ip_version == 4): subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) if subnet_dhcp_ip: host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) ) elif not isolated_subnets[subnet.id] and gateway: host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, gateway) ) if subnet.ip_version == 4: for s in self._get_all_subnets(self.network): sub_segment_id = getattr(s, 'segment_id', None) if (s.ip_version == 4 and s.cidr != subnet.cidr and sub_segment_id == segment_id): host_routes.insert(0, "%s,0.0.0.0" % s.cidr) if host_routes: if gateway: host_routes.append("%s,%s" % (constants.IPv4_ANY, gateway)) options.append( self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, 'classless-static-route', ','.join(host_routes))) options.append( self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, WIN2k3_STATIC_DNS, ','.join(host_routes))) if gateway: options.append(self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, 'router', gateway)) else: options.append(self._format_option( subnet.ip_version, self._SUBNET_TAG_PREFIX % subnet.id, 'router')) return options, subnets_without_nameservers def _generate_opts_per_port(self, subnets_without_nameservers): options = [] dhcp_ips = collections.defaultdict(list) for port in self.network.ports: if self._get_port_extra_dhcp_opts(port): port_ip_versions = set( [netaddr.IPAddress(ip.ip_address).version for ip in port.fixed_ips]) for opt in port.extra_dhcp_opts: if opt.opt_name in (edo_ext.DHCP_OPT_CLIENT_ID, DHCP_OPT_CLIENT_ID_NUM, str(DHCP_OPT_CLIENT_ID_NUM)): continue opt_ip_version = opt.ip_version if opt_ip_version in port_ip_versions: options.append( self._format_option( opt_ip_version, self._PORT_TAG_PREFIX % port.id, opt.opt_name, opt.opt_value)) else: LOG.info("Cannot apply dhcp option %(opt)s " "because it's ip_version %(version)d " "is not in port's address IP versions", {'opt': opt.opt_name, 'version': opt_ip_version}) # provides all dnsmasq ip as dns-server if there is more than # one dnsmasq for a subnet and there is no dns-server submitted # by the server if port.device_owner == constants.DEVICE_OWNER_DHCP: for ip in port.fixed_ips: if ip.subnet_id not in subnets_without_nameservers: continue dhcp_ips[ip.subnet_id].append(ip.ip_address) for subnet_id, ips in dhcp_ips.items(): for ip_version in (4, 6): vx_ips = [ip for ip in ips if netaddr.IPAddress(ip).version == ip_version] if len(vx_ips) > 1: options.append( self._format_option( ip_version, self._SUBNET_TAG_PREFIX % subnet_id, 'dns-server', ','.join( Dnsmasq._convert_to_literal_addrs(ip_version, vx_ips)))) return options def _make_subnet_interface_ip_map(self): subnet_lookup = dict( (netaddr.IPNetwork(subnet.cidr), subnet.id) for subnet in self.network.subnets ) retval = {} for addr in ip_lib.get_devices_with_ip(self.network.namespace, name=self.interface_name): ip_net = netaddr.IPNetwork(addr['cidr']) if ip_net in subnet_lookup: retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0] return retval def _format_option(self, ip_version, tag, option, *args): """Format DHCP option by option name or code.""" option = str(option) pattern = "(tag:(.*),)?(.*)$" matches = re.match(pattern, option) extra_tag = matches.groups()[0] option = matches.groups()[2] # NOTE(TheJulia): prepending option6 to any DHCPv6 option is # indicated as required in the dnsmasq man page for version 2.79. # Testing reveals that the man page is correct, option is not # honored if not in the format "option6:$NUM". For IPv4 we # only apply if the option is non-numeric. if ip_version == constants.IP_VERSION_6: option = 'option6:%s' % option elif not option.isdigit(): option = 'option:%s' % option if extra_tag: tags = ('tag:' + tag, extra_tag[:-1], '%s' % option) else: tags = ('tag:' + tag, '%s' % option) return ','.join(tags + args) @staticmethod def _convert_to_literal_addrs(ip_version, ips): if ip_version == 4: return ips return ['[' + ip + ']' for ip in ips] @classmethod def get_isolated_subnets(cls, network): """Returns a dict indicating whether or not a subnet is isolated A subnet is considered non-isolated if there is a port connected to the subnet, and the port's ip address matches that of the subnet's gateway. The port must be owned by a neutron router. """ isolated_subnets = collections.defaultdict(lambda: True) all_subnets = cls._get_all_subnets(network) subnets = dict((subnet.id, subnet) for subnet in all_subnets) for port in network.ports: if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS: continue for alloc in port.fixed_ips: if (alloc.subnet_id in subnets and subnets[alloc.subnet_id].gateway_ip == alloc.ip_address): isolated_subnets[alloc.subnet_id] = False return isolated_subnets @staticmethod def has_metadata_subnet(subnets): """Check if the subnets has a metadata subnet.""" meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR) if any(netaddr.IPNetwork(s.cidr) in meta_cidr for s in subnets): return True return False @classmethod def should_enable_metadata(cls, conf, network): """Determine whether the metadata proxy is needed for a network This method returns True for truly isolated networks (ie: not attached to a router) when enable_isolated_metadata is True, or for all the networks when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local CIDR, thus characterizing it as a "metadata" network. The metadata network is used by solutions which do not leverage the l3 agent for providing access to the metadata service via logical routers built with 3rd party backends. """ # Only IPv4 subnets, with dhcp enabled, will use the metadata proxy. all_subnets = cls._get_all_subnets(network) v4_dhcp_subnets = [s for s in all_subnets if s.ip_version == 4 and s.enable_dhcp] if not v4_dhcp_subnets: return False if conf.force_metadata: return True if not conf.enable_isolated_metadata: return False if (conf.enable_metadata_network and cls.has_metadata_subnet(all_subnets)): return True isolated_subnets = cls.get_isolated_subnets(network) return any(isolated_subnets[s.id] for s in v4_dhcp_subnets) class DeviceManager(object): def __init__(self, conf, plugin): self.conf = conf self.plugin = plugin self.driver = agent_common_utils.load_interface_driver( conf, get_networks_callback=self.plugin.get_networks) def get_interface_name(self, network, port): """Return interface(device) name for use by the DHCP process.""" return self.driver.get_device_name(port) def get_device_id(self, network): """Return a unique DHCP device ID for this host on the network.""" # There could be more than one dhcp server per network, so create # a device id that combines host and network ids return common_utils.get_dhcp_agent_device_id(network.id, self.conf.host) def _set_default_route_ip_version(self, network, device_name, ip_version): device = ip_lib.IPDevice(device_name, namespace=network.namespace) gateway = device.route.get_gateway(ip_version=ip_version) if gateway: gateway = gateway.get('gateway') for subnet in network.subnets: skip_subnet = ( subnet.ip_version != ip_version or not subnet.enable_dhcp or subnet.gateway_ip is None) if skip_subnet: continue if subnet.ip_version == constants.IP_VERSION_6: # This is duplicating some of the API checks already done, # but some of the functional tests call directly prefixlen = netaddr.IPNetwork(subnet.cidr).prefixlen if prefixlen == 0 or prefixlen > 126: continue modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] addr_mode = getattr(subnet, 'ipv6_address_mode', None) ra_mode = getattr(subnet, 'ipv6_ra_mode', None) if (prefixlen != 64 and (addr_mode in modes or ra_mode in modes)): continue if gateway != subnet.gateway_ip: LOG.debug('Setting IPv%(version)s gateway for dhcp netns ' 'on net %(n)s to %(ip)s', {'n': network.id, 'ip': subnet.gateway_ip, 'version': ip_version}) # Check for and remove the on-link route for the old # gateway being replaced, if it is outside the subnet is_old_gateway_not_in_subnet = (gateway and not ipam_utils.check_subnet_ip( subnet.cidr, gateway)) if is_old_gateway_not_in_subnet: onlink = device.route.list_onlink_routes(ip_version) existing_onlink_routes = set(r['cidr'] for r in onlink) if gateway in existing_onlink_routes: device.route.delete_route(gateway, scope='link') is_new_gateway_not_in_subnet = (subnet.gateway_ip and not ipam_utils.check_subnet_ip( subnet.cidr, subnet.gateway_ip)) if is_new_gateway_not_in_subnet: device.route.add_route(subnet.gateway_ip, scope='link') device.route.add_gateway(subnet.gateway_ip) return # No subnets on the network have a valid gateway. Clean it up to avoid # confusion from seeing an invalid gateway here. if gateway is not None: LOG.debug('Removing IPv%(version)s gateway for dhcp netns on ' 'net %(n)s', {'n': network.id, 'version': ip_version}) device.route.delete_gateway(gateway) def _set_default_route(self, network, device_name): """Sets the default gateway for this dhcp namespace. This method is idempotent and will only adjust the route if adjusting it would change it from what it already is. This makes it safe to call and avoids unnecessary perturbation of the system. """ for ip_version in (constants.IP_VERSION_4, constants.IP_VERSION_6): self._set_default_route_ip_version(network, device_name, ip_version) def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets): """Set up the existing DHCP port, if there is one.""" # To avoid pylint thinking that port might be undefined after # the following loop... port = None # Look for an existing DHCP port for this network. for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: # If using gateway IPs on this port, we can skip the # following code, whose purpose is just to review and # update the Neutron-allocated IP addresses for the # port. if self.driver.use_gateway_ips: return port # Otherwise break out, as we now have the DHCP port # whose subnets and addresses we need to review. break else: return None # Compare what the subnets should be against what is already # on the port. dhcp_enabled_subnet_ids = set(dhcp_subnets) port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) # If those differ, we need to call update. if dhcp_enabled_subnet_ids != port_subnet_ids: # Collect the subnets and fixed IPs that the port already # has, for subnets that are still in the DHCP-enabled set. wanted_fixed_ips = [] for fixed_ip in port.fixed_ips: if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: wanted_fixed_ips.append( {'subnet_id': fixed_ip.subnet_id, 'ip_address': fixed_ip.ip_address}) # Add subnet IDs for new DHCP-enabled subnets. wanted_fixed_ips.extend( dict(subnet_id=s) for s in dhcp_enabled_subnet_ids - port_subnet_ids) # Update the port to have the calculated subnets and fixed # IPs. The Neutron server will allocate a fresh IP for # each subnet that doesn't already have one. port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'fixed_ips': wanted_fixed_ips}}) if not port: raise exceptions.Conflict() return port def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets): """Setup the reserved DHCP port, if there is one.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Checking for a reserved port.', {'device_id': device_id, 'network_id': network.id}) for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT: port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'device_id': device_id}}) if port: return port def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets): """Create and set up new DHCP port for the specified network.""" LOG.debug('DHCP port %(device_id)s on network %(network_id)s' ' does not yet exist. Creating new one.', {'device_id': device_id, 'network_id': network.id}) # Make a list of the subnets that need a unique IP address for # this DHCP port. if self.driver.use_gateway_ips: unique_ip_subnets = [] else: unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets] port_dict = dict( name='', admin_state_up=True, device_id=device_id, network_id=network.id, tenant_id=network.tenant_id, fixed_ips=unique_ip_subnets) return self.plugin.create_dhcp_port({'port': port_dict}) def _check_dhcp_port_subnet(self, dhcp_port, dhcp_subnets, network): """Check if DHCP port IPs are in the range of the DHCP subnets FIXME(kevinbenton): ensure we have the IPs we actually need. can be removed once bug/1627480 is fixed """ if self.driver.use_gateway_ips: return expected = set(dhcp_subnets) actual = {fip.subnet_id for fip in dhcp_port.fixed_ips} missing = expected - actual if not missing: return LOG.debug('Requested DHCP port with IPs on subnets %(expected)s ' 'but only got IPs on subnets %(actual)s.', {'expected': expected, 'actual': actual}) updated_dhcp_port = self.plugin.get_dhcp_port(dhcp_port.id) actual = {fip.subnet_id for fip in updated_dhcp_port.fixed_ips} missing = expected - actual if missing: raise exceptions.SubnetMismatchForPort( port_id=updated_dhcp_port.id, subnet_id=list(missing)[0]) self._update_dhcp_port(network, updated_dhcp_port) LOG.debug('Previous DHCP port information: %(dhcp_port)s. Updated ' 'DHCP port information: %(updated_dhcp_port)s.', {'dhcp_port': dhcp_port, 'updated_dhcp_port': updated_dhcp_port}) def setup_dhcp_port(self, network): """Create/update DHCP port for the host if needed and return port.""" # The ID that the DHCP port will have (or already has). device_id = self.get_device_id(network) # Get the set of DHCP-enabled local subnets on this network. dhcp_subnets = {subnet.id: subnet for subnet in network.subnets if subnet.enable_dhcp} # There are 3 cases: either the DHCP port already exists (but # might need to be updated for a changed set of subnets); or # some other code has already prepared a 'reserved' DHCP port, # and we just need to adopt that; or we need to create a new # DHCP port. Try each of those in turn until we have a DHCP # port. for setup_method in (self._setup_existing_dhcp_port, self._setup_reserved_dhcp_port, self._setup_new_dhcp_port): dhcp_port = setup_method(network, device_id, dhcp_subnets) if dhcp_port: break else: raise exceptions.Conflict() self._check_dhcp_port_subnet(dhcp_port, dhcp_subnets, network) # Convert subnet_id to subnet dict fixed_ips = [dict(subnet_id=fixed_ip.subnet_id, ip_address=fixed_ip.ip_address, subnet=dhcp_subnets[fixed_ip.subnet_id]) for fixed_ip in dhcp_port.fixed_ips # we don't care about any ips on subnets irrelevant # to us (e.g. auto ipv6 addresses) if fixed_ip.subnet_id in dhcp_subnets] ips = [DictModel(item) if isinstance(item, dict) else item for item in fixed_ips] dhcp_port.fixed_ips = ips return dhcp_port def _update_dhcp_port(self, network, port): for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) def _cleanup_stale_devices(self, network, dhcp_port): """Unplug any devices found in the namespace except for dhcp_port.""" LOG.debug("Cleaning stale devices for network %s", network.id) skip_dev_name = (self.driver.get_device_name(dhcp_port) if dhcp_port else None) ns_ip = ip_lib.IPWrapper(namespace=network.namespace) if not ns_ip.netns.exists(network.namespace): return for d in ns_ip.get_devices(): # delete all devices except current active DHCP port device if d.name != skip_dev_name: LOG.debug("Found stale device %s, deleting", d.name) try: self.unplug(d.name, network) except Exception: LOG.exception("Exception during stale " "dhcp device cleanup") def plug(self, network, port, interface_name): """Plug device settings for the network's DHCP on this host.""" self.driver.plug(network.id, port.id, interface_name, port.mac_address, namespace=network.namespace, mtu=network.get('mtu')) def setup(self, network): """Create and initialize a device for network's DHCP on this host.""" try: port = self.setup_dhcp_port(network) except Exception: with excutils.save_and_reraise_exception(): # clear everything out so we don't leave dangling interfaces # if setup never succeeds in the future. self._cleanup_stale_devices(network, dhcp_port=None) self._update_dhcp_port(network, port) interface_name = self.get_interface_name(network, port) # Disable acceptance of RAs in the namespace so we don't # auto-configure an IPv6 address since we explicitly configure # them on the device. This must be done before any interfaces # are plugged since it could receive an RA by the time # plug() returns, so we have to create the namespace first. # It must also be done in the case there is an existing IPv6 # address here created via SLAAC, since it will be deleted # and added back statically in the call to init_l3() below. if network.namespace: ip_lib.IPWrapper().ensure_namespace(network.namespace) ip_lib.set_ip_nonlocal_bind_for_namespace(network.namespace, 1, root_namespace=True) if netutils.is_ipv6_enabled(): self.driver.configure_ipv6_ra(network.namespace, 'default', constants.ACCEPT_RA_DISABLED) if ip_lib.ensure_device_is_ready(interface_name, namespace=network.namespace): LOG.debug('Reusing existing device: %s.', interface_name) # force mtu on the port for in case it was changed for the network mtu = getattr(network, 'mtu', 0) if mtu: self.driver.set_mtu(interface_name, mtu, namespace=network.namespace) else: try: self.plug(network, port, interface_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unable to plug DHCP port for ' 'network %s. Releasing port.', network.id) # We should unplug the interface in bridge side. self.unplug(interface_name, network) self.plugin.release_dhcp_port(network.id, port.device_id) self.fill_dhcp_udp_checksums(namespace=network.namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) if self.driver.use_gateway_ips: # For each DHCP-enabled subnet, add that subnet's gateway # IP address to the Linux device for the DHCP port. for subnet in network.subnets: if not subnet.enable_dhcp: continue gateway = subnet.gateway_ip if gateway: net = netaddr.IPNetwork(subnet.cidr) ip_cidrs.append('%s/%s' % (gateway, net.prefixlen)) if self.conf.force_metadata or self.conf.enable_isolated_metadata: ip_cidrs.append(METADATA_DEFAULT_CIDR) self.driver.init_l3(interface_name, ip_cidrs, namespace=network.namespace) self._set_default_route(network, interface_name) self._cleanup_stale_devices(network, port) return interface_name def update(self, network, device_name): """Update device settings for the network's DHCP on this host.""" self._set_default_route(network, device_name) def unplug(self, device_name, network): """Unplug device settings for the network's DHCP on this host.""" self.driver.unplug(device_name, namespace=network.namespace) def destroy(self, network, device_name): """Destroy the device used for the network's DHCP on this host.""" if device_name: self.unplug(device_name, network) else: LOG.debug('No interface exists for network %s', network.id) self.plugin.release_dhcp_port(network.id, self.get_device_id(network)) def fill_dhcp_udp_checksums(self, namespace): """Ensure DHCP reply packets always have correct UDP checksums.""" iptables_mgr = iptables_manager.IptablesManager(use_ipv6=True, nat=False, namespace=namespace) ipv4_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % constants.DHCP_RESPONSE_PORT) ipv6_rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % constants.DHCPV6_CLIENT_PORT) iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule) iptables_mgr.ipv6['mangle'].add_rule('POSTROUTING', ipv6_rule) iptables_mgr.apply() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/dibbler.py0000644000175000017500000001610700000000000023054 0ustar00coreycorey00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import jinja2 from neutron_lib import constants as lib_const from neutron_lib.utils import file as file_utils from oslo_config import cfg from oslo_log import log as logging import six from neutron.agent.linux import external_process from neutron.agent.linux import pd from neutron.agent.linux import pd_driver from neutron.agent.linux import utils LOG = logging.getLogger(__name__) PD_SERVICE_NAME = 'dibbler' CONFIG_TEMPLATE = jinja2.Template(""" # Config for dibbler-client. # Use enterprise number based duid duid-type duid-en {{ enterprise_number }} {{ va_id }} # 8 (Debug) is most verbose. 7 (Info) is usually the best option log-level 8 # No automatic downlink address assignment downlink-prefix-ifaces "none" # Use script to notify l3_agent of assigned prefix script {{ script_path }} # Ask for prefix over the external gateway interface iface {{ interface_name }} { # Bind to generated LLA bind-to-address {{ bind_address }} # ask for address {% if hint_prefix != None %} pd 1 { prefix {{ hint_prefix }} } {% else %} pd 1 {% endif %} } """) # The first line must be #!/usr/bin/env bash SCRIPT_TEMPLATE = jinja2.Template("""#!/usr/bin/env bash exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }} """) class PDDibbler(pd_driver.PDDriverBase): def __init__(self, router_id, subnet_id, ri_ifname): super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname) self.requestor_id = "%s:%s:%s" % (self.router_id, self.subnet_id, self.ri_ifname) self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs, self.requestor_id) self.prefix_path = "%s/prefix" % self.dibbler_client_working_area self.pid_path = "%s/client.pid" % self.dibbler_client_working_area self.converted_subnet_id = self.subnet_id.replace('-', '') def _is_dibbler_client_running(self): return utils.get_value_from_file(self.pid_path) def _generate_dibbler_conf(self, ex_gw_ifname, lla, hint_prefix): dcwa = self.dibbler_client_working_area script_path = utils.get_conf_file_name(dcwa, 'notify', 'sh', True) buf = six.StringIO() buf.write('%s' % SCRIPT_TEMPLATE.render( prefix_path=self.prefix_path, l3_agent_pid=os.getpid())) file_utils.replace_file(script_path, buf.getvalue()) os.chmod(script_path, 0o744) dibbler_conf = utils.get_conf_file_name(dcwa, 'client', 'conf', False) buf = six.StringIO() buf.write('%s' % CONFIG_TEMPLATE.render( enterprise_number=cfg.CONF.vendor_pen, va_id='0x%s' % self.converted_subnet_id, script_path='"%s/notify.sh"' % dcwa, interface_name='"%s"' % ex_gw_ifname, bind_address='%s' % lla, hint_prefix=hint_prefix)) file_utils.replace_file(dibbler_conf, buf.getvalue()) return dcwa def _spawn_dibbler(self, pmon, router_ns, dibbler_conf): def callback(pid_file): dibbler_cmd = ['dibbler-client', 'start', '-w', '%s' % dibbler_conf] return dibbler_cmd pm = external_process.ProcessManager( uuid=self.requestor_id, default_cmd_callback=callback, namespace=router_ns, service=PD_SERVICE_NAME, conf=cfg.CONF, pid_file=self.pid_path) pm.enable(reload_cfg=False) pmon.register(uuid=self.requestor_id, service_name=PD_SERVICE_NAME, monitored_process=pm) def enable(self, pmon, router_ns, ex_gw_ifname, lla, prefix=None): LOG.debug("Enable IPv6 PD for router %s subnet %s ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) if not self._is_dibbler_client_running(): dibbler_conf = self._generate_dibbler_conf(ex_gw_ifname, lla, prefix) self._spawn_dibbler(pmon, router_ns, dibbler_conf) LOG.debug("dibbler client enabled for router %s subnet %s" " ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) def disable(self, pmon, router_ns, switch_over=False): LOG.debug("Disable IPv6 PD for router %s subnet %s ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) dcwa = self.dibbler_client_working_area def callback(pid_file): dibbler_cmd = ['dibbler-client', 'stop', '-w', '%s' % dcwa] return dibbler_cmd pmon.unregister(uuid=self.requestor_id, service_name=PD_SERVICE_NAME) pm = external_process.ProcessManager( uuid=self.requestor_id, namespace=router_ns, service=PD_SERVICE_NAME, conf=cfg.CONF, pid_file=self.pid_path) if switch_over: pm.disable() else: pm.disable(get_stop_command=callback) shutil.rmtree(dcwa, ignore_errors=True) LOG.debug("dibbler client disabled for router %s subnet %s " "ri_ifname %s", self.router_id, self.subnet_id, self.ri_ifname) def get_prefix(self): prefix = utils.get_value_from_file(self.prefix_path) if not prefix: prefix = lib_const.PROVISIONAL_IPV6_PD_PREFIX return prefix @staticmethod def get_sync_data(): try: requestor_ids = os.listdir(cfg.CONF.pd_confs) except OSError: return [] sync_data = [] requestors = (r.split(':') for r in requestor_ids if r.count(':') == 2) for router_id, subnet_id, ri_ifname in requestors: pd_info = pd.PDInfo() pd_info.router_id = router_id pd_info.subnet_id = subnet_id pd_info.ri_ifname = ri_ifname pd_info.driver = PDDibbler(router_id, subnet_id, ri_ifname) pd_info.client_started = ( pd_info.driver._is_dibbler_client_running()) pd_info.prefix = pd_info.driver.get_prefix() sync_data.append(pd_info) return sync_data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/external_process.py0000644000175000017500000002531100000000000025026 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import os.path import eventlet from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils import psutil import six from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.conf.agent import common as agent_cfg LOG = logging.getLogger(__name__) agent_cfg.register_external_process_opts() agent_cfg.register_process_monitor_opts(cfg.CONF) @six.add_metaclass(abc.ABCMeta) class MonitoredProcess(object): @abc.abstractproperty def active(self): """Boolean representing the running state of the process.""" @abc.abstractmethod def enable(self): """Enable the service, or respawn the process.""" class ProcessManager(MonitoredProcess): """An external process manager for Neutron spawned processes. Note: The manager expects uuid to be in cmdline. """ def __init__(self, conf, uuid, namespace=None, service=None, pids_path=None, default_cmd_callback=None, cmd_addl_env=None, pid_file=None, run_as_root=False, custom_reload_callback=None): self.conf = conf self.uuid = uuid self.namespace = namespace self.default_cmd_callback = default_cmd_callback self.cmd_addl_env = cmd_addl_env self.pids_path = pids_path or self.conf.external_pids self.pid_file = pid_file self.run_as_root = run_as_root or self.namespace is not None self.custom_reload_callback = custom_reload_callback self.kill_scripts_path = cfg.CONF.AGENT.kill_scripts_path if service: self.service_pid_fname = 'pid.' + service self.service = service else: self.service_pid_fname = 'pid' self.service = 'default-service' fileutils.ensure_tree(os.path.dirname(self.get_pid_file_name()), mode=0o755) def enable(self, cmd_callback=None, reload_cfg=False, ensure_active=False): if not self.active: if not cmd_callback: cmd_callback = self.default_cmd_callback cmd = cmd_callback(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env, run_as_root=self.run_as_root) elif reload_cfg: self.reload_cfg() if ensure_active: common_utils.wait_until_true(lambda: self.active) def reload_cfg(self): if self.custom_reload_callback: self.disable(get_stop_command=self.custom_reload_callback) else: self.disable('HUP') def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env, run_as_root=self.run_as_root) else: cmd = self.get_kill_cmd(sig, pid) utils.execute(cmd, run_as_root=self.run_as_root) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('%{service}s process for %(uuid)s pid %(pid)d is stale, ' 'ignoring signal %(signal)s', {'service': self.service, 'uuid': self.uuid, 'pid': pid, 'signal': sig}) else: LOG.debug('No %(service)s process started for %(uuid)s', {'service': self.service, 'uuid': self.uuid}) def get_kill_cmd(self, sig, pid): if self.kill_scripts_path: kill_file = "%s-kill" % self.service if os.path.isfile(os.path.join(self.kill_scripts_path, kill_file)): return [kill_file, sig, pid] return ['kill', '-%s' % (sig), pid] def get_pid_file_name(self): """Returns the file name for a given kind of config file.""" if self.pid_file: return self.pid_file else: return utils.get_conf_file_name(self.pids_path, self.uuid, self.service_pid_fname) @property def pid(self): """Last known pid for this external process spawned for this uuid.""" return utils.get_value_from_file(self.get_pid_file_name(), int) @property def active(self): cmdline = self.cmdline return self.uuid in cmdline if cmdline else False @property def cmdline(self): pid = self.pid if not pid: return try: return ' '.join(psutil.Process(pid).cmdline()) except (psutil.NoSuchProcess, psutil.AccessDenied): return ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service']) class ProcessMonitor(object): def __init__(self, config, resource_type): """Handle multiple process managers and watch over all of them. :param config: oslo config object with the agent configuration. :type config: oslo_config.ConfigOpts :param resource_type: can be dhcp, router, etc. :type resource_type: str """ self._config = config self._resource_type = resource_type self._monitored_processes = {} if self._config.AGENT.check_child_processes_interval: self._spawn_checking_thread() def register(self, uuid, service_name, monitored_process): """Start monitoring a process. The given monitored_process will be tied to it's uuid+service_name replacing the old one if it existed already. The monitored_process should be enabled before registration, otherwise ProcessMonitor could try to enable the process itself, which could lead to double enable and if unlucky enough, two processes running, and also errors in the logs. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. :param monitored_process: MonitoredProcess we want to monitor. """ service_id = ServiceId(uuid, service_name) self._monitored_processes[service_id] = monitored_process def unregister(self, uuid, service_name): """Stop monitoring a process. The uuid+service_name will be removed from the monitored processes. The service must be disabled **after** unregistering, otherwise if process monitor checks after you disable the process, and before you unregister it, the process will be respawned, and left orphaned into the system. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. """ service_id = ServiceId(uuid, service_name) self._monitored_processes.pop(service_id, None) def stop(self): """Stop the process monitoring. This method will stop the monitoring thread, but no monitored process will be stopped. """ self._monitor_processes = False def _spawn_checking_thread(self): self._monitor_processes = True eventlet.spawn(self._periodic_checking_thread) @lockutils.synchronized("_check_child_processes") def _check_child_processes(self): # we build the list of keys before iterating in the loop to cover # the case where other threads add or remove items from the # dictionary which otherwise will cause a RuntimeError for service_id in list(self._monitored_processes): pm = self._monitored_processes.get(service_id) if pm and not pm.active: LOG.error("%(service)s for %(resource_type)s " "with uuid %(uuid)s not found. " "The process should not have died", {'service': service_id.service, 'resource_type': self._resource_type, 'uuid': service_id.uuid}) self._execute_action(service_id) eventlet.sleep(0) def _periodic_checking_thread(self): while self._monitor_processes: eventlet.sleep(self._config.AGENT.check_child_processes_interval) eventlet.spawn(self._check_child_processes) def _execute_action(self, service_id): action = self._config.AGENT.check_child_processes_action action_function = getattr(self, "_%s_action" % action) action_function(service_id) def _respawn_action(self, service_id): LOG.warning("Respawning %(service)s for uuid %(uuid)s", {'service': service_id.service, 'uuid': service_id.uuid}) self._monitored_processes[service_id].enable() def _exit_action(self, service_id): LOG.error("Exiting agent as programmed in check_child_processes_" "actions") self._exit_handler(service_id.uuid, service_id.service) def _exit_handler(self, uuid, service): """This is an exit handler for the ProcessMonitor. It will be called if the administrator configured the exit action in check_child_processes_actions, and one of our external processes die unexpectedly. """ LOG.error("Exiting agent because of a malfunction with the " "%(service)s process identified by uuid %(uuid)s", {'service': service, 'uuid': uuid}) raise SystemExit(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/interface.py0000644000175000017500000005013100000000000023404 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import time import netaddr from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import excutils from pyroute2.netlink import exceptions as pyroute2_exc import six from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.common import utils LOG = logging.getLogger(__name__) def _get_veth(name1, name2, namespace2): return (ip_lib.IPDevice(name1), ip_lib.IPDevice(name2, namespace=namespace2)) @six.add_metaclass(abc.ABCMeta) class LinuxInterfaceDriver(object): DEV_NAME_LEN = constants.LINUX_DEV_LEN DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX def __init__(self, conf, **kwargs): self.conf = conf self._mtu_update_warn_logged = False @property def use_gateway_ips(self): """Whether to use gateway IPs instead of unique IP allocations. In each place where the DHCP agent runs, and for each subnet for which DHCP is handling out IP addresses, the DHCP port needs - at the Linux level - to have an IP address within that subnet. Generally this needs to be a unique Neutron-allocated IP address, because the subnet's underlying L2 domain is bridged across multiple compute hosts and network nodes, and for HA there may be multiple DHCP agents running on that same bridged L2 domain. However, if the DHCP ports - on multiple compute/network nodes but for the same network - are _not_ bridged to each other, they do not need each to have a unique IP address. Instead they can all share the same address from the relevant subnet. This works, without creating any ambiguity, because those ports are not all present on the same L2 domain, and because no data within the network is ever sent to that address. (DHCP requests are broadcast, and it is the network's job to ensure that such a broadcast will reach at least one of the available DHCP servers. DHCP responses will be sent _from_ the DHCP port address.) Specifically, for networking backends where it makes sense, the DHCP agent allows all DHCP ports to use the subnet's gateway IP address, and thereby to completely avoid any unique IP address allocation. This behaviour is selected by running the DHCP agent with a configured interface driver whose 'use_gateway_ips' property is True. When an operator deploys Neutron with an interface driver that makes use_gateway_ips True, they should also ensure that a gateway IP address is defined for each DHCP-enabled subnet, and that the gateway IP address doesn't change during the subnet's lifetime. """ return False def init_l3(self, device_name, ip_cidrs, namespace=None, preserve_ips=None, clean_connections=False): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device clean_connections: Boolean to indicate if we should cleanup connections associated to removed ips """ preserve_ips = preserve_ips or [] device = ip_lib.IPDevice(device_name, namespace=namespace) # The LLA generated by the operating system is not known to # Neutron, so it would be deleted if we added it to the 'previous' # list here default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address) cidrs = set() remove_ips = set() # normalize all the IP addresses first for ip_cidr in ip_cidrs: net = netaddr.IPNetwork(ip_cidr) # Convert to compact IPv6 address because the return values of # "ip addr list" are compact. if net.version == 6: ip_cidr = str(net) cidrs.add(ip_cidr) # Determine the addresses that must be added and removed for address in device.addr.list(): cidr = address['cidr'] dynamic = address['dynamic'] # skip the IPv6 link-local if cidr == default_ipv6_lla: # it's already configured, leave it alone cidrs.discard(cidr) continue if cidr in preserve_ips: continue # Statically created addresses are OK, dynamically created # addresses must be removed and replaced if cidr in cidrs and not dynamic: cidrs.remove(cidr) continue remove_ips.add(cidr) # Clean up any old addresses. This must be done first since there # could be a dynamic address being replaced with a static one. for ip_cidr in remove_ips: if clean_connections: device.delete_addr_and_conntrack_state(ip_cidr) else: device.addr.delete(ip_cidr) # add any new addresses for ip_cidr in cidrs: device.addr.add(ip_cidr) def init_router_port(self, device_name, ip_cidrs, namespace, preserve_ips=None, extra_subnets=None, clean_connections=False): """Set the L3 settings for a router interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device clean_connections: Boolean to indicate if we should cleanup connections associated to removed ips extra_subnets: An iterable of cidrs to add as routes without address """ LOG.debug("init_router_port: device_name(%s), namespace(%s)", device_name, namespace) self.init_l3(device_name=device_name, ip_cidrs=ip_cidrs, namespace=namespace, preserve_ips=preserve_ips or [], clean_connections=clean_connections) self.set_onlink_routes(device_name, namespace, extra_subnets, preserve_ips) def set_onlink_routes(self, device_name, namespace, extra_subnets, preserve_ips=None, is_ipv6=True): """Manage on-link routes (routes without an associate address) :param device_name: interface name :param namespace: namespace name :param extra_subnets: subnets attached to this interface without an IP address set in this interface :param preserve_ips: IPs or CIDRs not to be deleted from the device on-link route list """ device = ip_lib.IPDevice(device_name, namespace=namespace) new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or []) preserve_ips = set(preserve_ips if preserve_ips else []) onlink = device.route.list_onlink_routes(constants.IP_VERSION_4) if is_ipv6: onlink += device.route.list_onlink_routes(constants.IP_VERSION_6) existing_onlink_cidrs = set(r['cidr'] for r in onlink) for route in new_onlink_cidrs - existing_onlink_cidrs: LOG.debug('Adding onlink route (%s)', route) device.route.add_onlink_route(route) for route in existing_onlink_cidrs - new_onlink_cidrs - preserve_ips: LOG.debug('Deleting onlink route (%s)', route) device.route.delete_onlink_route(route) def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'): device = ip_lib.IPDevice(device_name, namespace=namespace) net = netaddr.IPNetwork(v6addr) device.addr.add(str(net), scope) def delete_ipv6_addr(self, device_name, v6addr, namespace): device = ip_lib.IPDevice(device_name, namespace=namespace) device.delete_addr_and_conntrack_state(v6addr) def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace): """Delete the first listed IPv6 address that falls within a given prefix. """ device = ip_lib.IPDevice(device_name, namespace=namespace) net = netaddr.IPNetwork(prefix) for address in device.addr.list(scope='global', filters=['permanent']): ip_address = netaddr.IPNetwork(address['cidr']) if ip_address in net: device.delete_addr_and_conntrack_state(address['cidr']) break def get_ipv6_llas(self, device_name, namespace): kwargs = {'family': utils.get_socket_address_family( constants.IP_VERSION_6), 'scope': 'link'} return ip_lib.get_devices_with_ip(namespace, name=device_name, **kwargs) def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exceptions.BridgeDoesNotExist(bridge=bridge) def get_device_name(self, port): return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] @staticmethod def configure_ipv6_ra(namespace, dev_name, value): """Configure handling of IPv6 Router Advertisements on an interface. See common/constants.py for possible values. """ cmd = ['net.ipv6.conf.%(dev)s.accept_ra=%(value)s' % {'dev': dev_name, 'value': value}] ip_lib.sysctl(cmd, namespace=namespace) @staticmethod def configure_ipv6_forwarding(namespace, dev_name, enabled): """Configure IPv6 forwarding on an interface.""" cmd = ['net.ipv6.conf.%(dev)s.forwarding=%(enabled)s' % {'dev': dev_name, 'enabled': int(enabled)}] ip_lib.sysctl(cmd, namespace=namespace) @abc.abstractmethod def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None, link_up=True): """Plug in the interface only for new devices that don't exist yet.""" def plug(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None, link_up=True): if not ip_lib.device_exists(device_name, namespace=namespace): self.plug_new(network_id, port_id, device_name, mac_address, bridge, namespace, prefix, mtu, link_up) else: LOG.info("Device %s already exists", device_name) if mtu: self.set_mtu( device_name, mtu, namespace=namespace, prefix=prefix) else: LOG.warning("No MTU configured for port %s", port_id) @abc.abstractmethod def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" @property def bridged(self): """Whether the DHCP port is bridged to the VM TAP interfaces. When the DHCP port is bridged to the TAP interfaces for the VMs for which it is providing DHCP service - as is the case for most Neutron network implementations - the DHCP server only needs to listen on the DHCP port, and will still receive DHCP requests from all the relevant VMs. If the DHCP port is not bridged to the relevant VM TAP interfaces, the DHCP server needs to listen explicitly on those TAP interfaces, and to treat those as aliases of the DHCP port where the IP subnet is defined. """ return True def set_mtu(self, device_name, mtu, namespace=None, prefix=None): """Set MTU on the interface.""" if not self._mtu_update_warn_logged: LOG.warning("Interface driver cannot update MTU for ports") self._mtu_update_warn_logged = True def set_link_status(self, device_name, namespace=None, link_up=True): ns_dev = ip_lib.IPWrapper(namespace=namespace).device(device_name) if not ns_dev.exists(): LOG.debug("Device %s may concurrently be deleted.", device_name) return if link_up: ns_dev.link.set_up() else: ns_dev.link.set_down() class NullDriver(LinuxInterfaceDriver): def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None, link_up=True): pass def unplug(self, device_name, bridge=None, namespace=None, prefix=None): pass class OVSInterfaceDriver(LinuxInterfaceDriver): """Driver for creating an internal interface on an OVS bridge.""" DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX def __init__(self, conf, **kwargs): super(OVSInterfaceDriver, self).__init__(conf, **kwargs) if self.conf.ovs_use_veth: self.DEV_NAME_PREFIX = 'ns-' def _get_tap_name(self, dev_name, prefix=None): if self.conf.ovs_use_veth: dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, constants.TAP_DEVICE_PREFIX) return dev_name def _ovs_add_port(self, bridge, device_name, port_id, mac_address, internal=True): attrs = [('external_ids', {'iface-id': port_id, 'iface-status': 'active', 'attached-mac': mac_address})] if internal: attrs.insert(0, ('type', 'internal')) ovs = ovs_lib.OVSBridge(bridge) ovs.replace_port(device_name, *attrs) def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None, link_up=True): """Plug in the interface.""" if not bridge: bridge = self.conf.OVS.integration_bridge self.check_bridge_exists(bridge) ip = ip_lib.IPWrapper() tap_name = self._get_tap_name(device_name, prefix) if self.conf.ovs_use_veth: # Create ns_dev in a namespace if one is configured. root_dev, ns_dev = ip.add_veth(tap_name, device_name, namespace2=namespace) root_dev.disable_ipv6() else: ns_dev = ip.device(device_name) internal = not self.conf.ovs_use_veth self._ovs_add_port(bridge, tap_name, port_id, mac_address, internal=internal) for i in range(9): # workaround for the OVS shy port syndrome. ports sometimes # hide for a bit right after they are first created. # see bug/1618987 try: ns_dev.link.set_address(mac_address) break except RuntimeError as e: LOG.warning("Got error trying to set mac, retrying: %s", str(e)) time.sleep(1) else: # didn't break, we give it one last shot without catching ns_dev.link.set_address(mac_address) # Add an interface created by ovs to the namespace. if not self.conf.ovs_use_veth and namespace: try: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) except (pyroute2_exc.NetlinkError, OSError): # To prevent the namespace failure from blasting OVS, the OVS # port creation should be reverted. Possible exceptions: # - NetlinkError in case of duplicated interface # - OSError in case of corrupted namespace LOG.warning("Failed to plug interface %s into bridge %s, " "cleaning up", device_name, bridge) with excutils.save_and_reraise_exception(): ovs = ovs_lib.OVSBridge(bridge) ovs.delete_port(tap_name) # NOTE(ihrachys): the order here is significant: we must set MTU after # the device is moved into a namespace, otherwise OVS bridge does not # allow to set MTU that is higher than the least of all device MTUs on # the bridge if mtu: self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix) else: LOG.warning("No MTU configured for port %s", port_id) if link_up: ns_dev.link.set_up() if self.conf.ovs_use_veth: root_dev.link.set_up() def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.OVS.integration_bridge tap_name = self._get_tap_name(device_name, prefix) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge) try: ovs.delete_port(tap_name) if self.conf.ovs_use_veth: device = ip_lib.IPDevice(device_name, namespace=namespace) device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error("Failed unplugging interface '%s'", device_name) def set_mtu(self, device_name, mtu, namespace=None, prefix=None): if self.conf.ovs_use_veth: tap_name = self._get_tap_name(device_name, prefix) root_dev, ns_dev = _get_veth( tap_name, device_name, namespace2=namespace) root_dev.link.set_mtu(mtu) else: ns_dev = ip_lib.IPWrapper(namespace=namespace).device(device_name) ns_dev.link.set_mtu(mtu) class BridgeInterfaceDriver(LinuxInterfaceDriver): """Driver for creating bridge interfaces.""" DEV_NAME_PREFIX = 'ns-' def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None, link_up=True): """Plugin the interface.""" ip = ip_lib.IPWrapper() # Enable agent to define the prefix tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, constants.TAP_DEVICE_PREFIX) # Create ns_veth in a namespace if one is configured. root_veth, ns_veth = ip.add_veth(tap_name, device_name, namespace2=namespace) root_veth.disable_ipv6() ns_veth.link.set_address(mac_address) if mtu: self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix) else: LOG.warning("No MTU configured for port %s", port_id) root_veth.link.set_up() if link_up: ns_veth.link.set_up() def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" device = ip_lib.IPDevice(device_name, namespace=namespace) try: device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error("Failed unplugging interface '%s'", device_name) def set_mtu(self, device_name, mtu, namespace=None, prefix=None): tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, constants.TAP_DEVICE_PREFIX) root_dev, ns_dev = _get_veth( tap_name, device_name, namespace2=namespace) root_dev.link.set_mtu(mtu) ns_dev.link.set_mtu(mtu) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/ip_conntrack.py0000644000175000017500000002475400000000000024132 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import eventlet import netaddr from neutron_lib import constants from neutron_lib import exceptions from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.agent.linux import utils as linux_utils LOG = logging.getLogger(__name__) CONTRACK_MGRS = {} MAX_CONNTRACK_ZONES = 65535 ZONE_START = 4097 WORKERS = 8 class IpConntrackUpdate(object): """Encapsulates a conntrack update An instance of this object carries the information necessary to process a request to update the conntrack table. """ def __init__(self, device_info_list, rule, remote_ips): self.device_info_list = device_info_list self.rule = rule self.remote_ips = remote_ips def __repr__(self): return ('' % (self.device_info_list, self.rule, self.remote_ips)) @lockutils.synchronized('conntrack') def get_conntrack(get_rules_for_table_func, filtered_ports, unfiltered_ports, execute=None, namespace=None, zone_per_port=False): try: return CONTRACK_MGRS[namespace] except KeyError: ipconntrack = IpConntrackManager(get_rules_for_table_func, filtered_ports, unfiltered_ports, execute, namespace, zone_per_port) CONTRACK_MGRS[namespace] = ipconntrack return CONTRACK_MGRS[namespace] class IpConntrackManager(object): """Smart wrapper for ip conntrack.""" def __init__(self, get_rules_for_table_func, filtered_ports, unfiltered_ports, execute=None, namespace=None, zone_per_port=False): self.get_rules_for_table_func = get_rules_for_table_func self.execute = execute or linux_utils.execute self.namespace = namespace self.filtered_ports = filtered_ports self.unfiltered_ports = unfiltered_ports self.zone_per_port = zone_per_port # zone per port vs per network self._populate_initial_zone_map() self._queue = eventlet.queue.LightQueue() self._start_process_queue() def _start_process_queue(self): LOG.debug("Starting ip_conntrack _process_queue_worker() threads") pool = eventlet.GreenPool(size=WORKERS) for i in range(WORKERS): pool.spawn_n(self._process_queue_worker) def _process_queue_worker(self): # While it's technically not necessary to have this method, the # 'while True' could just be in _process_queue(), the tests have # to be able to drain the queue without blocking, so _process_queue() # is made standalone. while True: self._process_queue() def _process_queue(self): update = None try: # this will block until an entry gets added to the queue update = self._queue.get() if update.remote_ips: for remote_ip in update.remote_ips: self._delete_conntrack_state( update.device_info_list, update.rule, remote_ip) else: self._delete_conntrack_state( update.device_info_list, update.rule) except Exception: LOG.exception("Failed to process ip_conntrack queue entry: %s", update) def _process(self, device_info_list, rule, remote_ips=None): # queue the update to allow the caller to resume its work update = IpConntrackUpdate(device_info_list, rule, remote_ips) self._queue.put(update) @staticmethod def _generate_conntrack_cmd_by_rule(rule, namespace): ethertype = rule.get('ethertype') protocol = rule.get('protocol') direction = rule.get('direction') cmd = ['conntrack', '-D'] if protocol is not None: # 0 is IP in /etc/protocols, but conntrack will throw an error if str(protocol) == '0': protocol = 'ip' cmd.extend(['-p', str(protocol)]) cmd.extend(['-f', str(ethertype).lower()]) cmd.append('-d' if direction == 'ingress' else '-s') cmd_ns = [] if namespace: cmd_ns.extend(['ip', 'netns', 'exec', namespace]) cmd_ns.extend(cmd) return cmd_ns def _get_conntrack_cmds(self, device_info_list, rule, remote_ip=None): conntrack_cmds = set() cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace) ethertype = rule.get('ethertype') for device_info in device_info_list: zone_id = self.get_device_zone(device_info, create=False) if not zone_id: LOG.debug("No zone for device %(dev)s. Will not try to " "clear conntrack state. Zone map: %(zm)s", {'dev': device_info['device'], 'zm': self._device_zone_map}) continue ips = device_info.get('fixed_ips', []) for ip in ips: net = netaddr.IPNetwork(ip) if str(net.version) not in ethertype: continue ip_cmd = [str(net.ip), '-w', zone_id] if remote_ip and str( netaddr.IPNetwork(remote_ip).version) in ethertype: if rule.get('direction') == 'ingress': direction = '-s' else: direction = '-d' ip_cmd.extend([direction, str(remote_ip)]) conntrack_cmds.add(tuple(cmd + ip_cmd)) return conntrack_cmds def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None): conntrack_cmds = self._get_conntrack_cmds(device_info_list, rule, remote_ip) for cmd in conntrack_cmds: try: self.execute(list(cmd), run_as_root=True, check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception("Failed execute conntrack command %s", cmd) def delete_conntrack_state_by_rule(self, device_info_list, rule): self._process(device_info_list, rule) def delete_conntrack_state_by_remote_ips(self, device_info_list, ethertype, remote_ips): for direction in ['ingress', 'egress']: rule = {'ethertype': str(ethertype).lower(), 'direction': direction} self._process(device_info_list, rule, remote_ips) def _populate_initial_zone_map(self): """Setup the map between devices and zones based on current rules.""" self._device_zone_map = {} rules = self.get_rules_for_table_func('raw') for rule in rules: match = re.match(r'.* --physdev-in (?P[a-zA-Z0-9\-]+)' r'.* -j CT --zone (?P\d+).*', rule) if match: # strip off any prefix that the interface is using short_port_id = ( match.group('dev')[constants.LINUX_DEV_PREFIX_LEN:]) self._device_zone_map[short_port_id] = int(match.group('zone')) LOG.debug("Populated conntrack zone map: %s", self._device_zone_map) def _device_key(self, port): # we have to key the device_zone_map based on the fragment of the # UUID that shows up in the interface name. This is because the initial # map is populated strictly based on interface names that we don't know # the full UUID of. if self.zone_per_port: identifier = port['device'][constants.LINUX_DEV_PREFIX_LEN:] else: identifier = port['network_id'] return identifier[:(constants.LINUX_DEV_LEN - constants.LINUX_DEV_PREFIX_LEN)] def get_device_zone(self, port, create=True): device_key = self._device_key(port) try: return self._device_zone_map[device_key] except KeyError: if create: return self._generate_device_zone(device_key) def _free_zones_from_removed_ports(self): """Clears any entries from the zone map of removed ports.""" existing_ports = [ self._device_key(port) for port in (list(self.filtered_ports.values()) + list(self.unfiltered_ports.values())) ] removed = set(self._device_zone_map) - set(existing_ports) for dev in removed: self._device_zone_map.pop(dev, None) def _generate_device_zone(self, short_device_id): """Generates a unique conntrack zone for the passed in ID.""" try: zone = self._find_open_zone() except exceptions.CTZoneExhaustedError: # Free some zones and try again, repeat failure will not be caught self._free_zones_from_removed_ports() zone = self._find_open_zone() self._device_zone_map[short_device_id] = zone LOG.debug("Assigned CT zone %(z)s to device %(dev)s.", {'z': zone, 'dev': short_device_id}) return self._device_zone_map[short_device_id] def _find_open_zone(self): # call set to dedup because old ports may be mapped to the same zone. zones_in_use = sorted(set(self._device_zone_map.values())) if not zones_in_use: return ZONE_START # attempt to increment onto the highest used zone first. if we hit the # end, go back and look for any gaps left by removed devices. last = zones_in_use[-1] if last < MAX_CONNTRACK_ZONES: return max(last + 1, ZONE_START) for index, used in enumerate(zones_in_use): if used - index != ZONE_START: # gap found, let's use it! return index + ZONE_START # conntrack zones exhausted :( :( raise exceptions.CTZoneExhaustedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/ip_lib.py0000644000175000017500000016245000000000000022712 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import re import threading import time import eventlet import netaddr from neutron_lib import constants from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils from pyroute2.netlink import exceptions as netlink_exceptions from pyroute2.netlink import rtnl from pyroute2.netlink.rtnl import ifaddrmsg from pyroute2.netlink.rtnl import ifinfmsg from pyroute2 import NetlinkError from pyroute2 import netns from neutron._i18n import _ from neutron.agent.common import utils from neutron.common import utils as common_utils from neutron.privileged.agent.linux import ip_lib as privileged LOG = logging.getLogger(__name__) IP_NONLOCAL_BIND = 'net.ipv4.ip_nonlocal_bind' LOOPBACK_DEVNAME = 'lo' FB_TUNNEL_DEVICE_NAMES = ['gre0', 'gretap0', 'tunl0', 'erspan0', 'sit0', 'ip6tnl0', 'ip6gre0'] IP_RULE_TABLES = {'default': 253, 'main': 254, 'local': 255} IP_RULE_TABLES_NAMES = {v: k for k, v in IP_RULE_TABLES.items()} # Rule indexes: pyroute2.netlink.rtnl # Rule names: https://www.systutorials.com/docs/linux/man/8-ip-rule/ # NOTE(ralonsoh): 'masquerade' type is printed as 'nat' in 'ip rule' command IP_RULE_TYPES = {0: 'unspecified', 1: 'unicast', 6: 'blackhole', 7: 'unreachable', 8: 'prohibit', 10: 'nat'} IP_ADDRESS_SCOPE = {rtnl.rtscopes['RT_SCOPE_UNIVERSE']: 'global', rtnl.rtscopes['RT_SCOPE_SITE']: 'site', rtnl.rtscopes['RT_SCOPE_LINK']: 'link', rtnl.rtscopes['RT_SCOPE_HOST']: 'host'} IP_ADDRESS_SCOPE_NAME = {v: k for k, v in IP_ADDRESS_SCOPE.items()} IP_ADDRESS_EVENTS = {'RTM_NEWADDR': 'added', 'RTM_DELADDR': 'removed'} SYS_NET_PATH = '/sys/class/net' DEFAULT_GW_PATTERN = re.compile(r"via (\S+)") METRIC_PATTERN = re.compile(r"metric (\S+)") DEVICE_NAME_PATTERN = re.compile(r"(\d+?): (\S+?):.*") # NOTE: no metric is interpreted by the kernel as having the highest priority # (value 0). "ip route" uses the netlink API to communicate with the kernel. In # IPv6, when the metric value is not set is translated as 1024 as default: # https://access.redhat.com/solutions/3659171 IP_ROUTE_METRIC_DEFAULT = {constants.IP_VERSION_4: 0, constants.IP_VERSION_6: 1024} def remove_interface_suffix(interface): """Remove a possible "@" suffix from an interface' name. This suffix can appear in some kernel versions, and intends on specifying, for example, a veth's pair. However, this interface name is useless to us as further 'ip' commands require that the suffix be removed. """ # If '@' is not present, this will do nothing. return interface.partition("@")[0] class AddressNotReady(exceptions.NeutronException): message = _("Failure waiting for address %(address)s to " "become ready: %(reason)s") class InvalidArgument(exceptions.NeutronException): message = _("Invalid value %(value)s for parameter %(parameter)s " "provided.") class SubProcessBase(object): def __init__(self, namespace=None, log_fail_as_error=True): self.namespace = namespace self.log_fail_as_error = log_fail_as_error try: self.force_root = cfg.CONF.ip_lib_force_root except cfg.NoSuchOptError: # Only callers that need to force use of the root helper # need to register the option. self.force_root = False def _run(self, options, command, args): if self.namespace: return self._as_root(options, command, args) elif self.force_root: # Force use of the root helper to ensure that commands # will execute in dom0 when running under XenServer/XCP. return self._execute(options, command, args, run_as_root=True) else: return self._execute(options, command, args) def _as_root(self, options, command, args, use_root_namespace=False): namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, run_as_root=True, namespace=namespace) def _execute(self, options, command, args, run_as_root=False, namespace=None): opt_list = ['-%s' % o for o in options] ip_cmd = add_namespace_to_cmd(['ip'], namespace) cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(cmd, run_as_root=run_as_root, log_fail_as_error=self.log_fail_as_error) def set_log_fail_as_error(self, fail_with_error): self.log_fail_as_error = fail_with_error def get_log_fail_as_error(self): return self.log_fail_as_error class IPWrapper(SubProcessBase): def __init__(self, namespace=None): super(IPWrapper, self).__init__(namespace=namespace) self.netns = IpNetnsCommand(self) def device(self, name): return IPDevice(name, namespace=self.namespace) def get_devices_info(self, exclude_loopback=True, exclude_fb_tun_devices=True): devices = get_devices_info(self.namespace) retval = [] for device in devices: if (exclude_loopback and device['name'] == LOOPBACK_DEVNAME or exclude_fb_tun_devices and device['name'] in FB_TUNNEL_DEVICE_NAMES): continue retval.append(device) return retval def get_devices(self, exclude_loopback=True, exclude_fb_tun_devices=True): retval = [] try: devices = privileged.get_device_names(self.namespace) except privileged.NetworkNamespaceNotFound: return retval for name in devices: if (exclude_loopback and name == LOOPBACK_DEVNAME or exclude_fb_tun_devices and name in FB_TUNNEL_DEVICE_NAMES): continue retval.append(IPDevice(name, namespace=self.namespace)) return retval def get_device_by_ip(self, ip): """Get the IPDevice from system which has ip configured. @param ip: look for the device holding this ip. If this is None, None is returned. @type ip: str. """ if not ip: return None cidr = common_utils.ip_to_cidr(ip) kwargs = {'address': common_utils.cidr_to_ip(cidr)} if not common_utils.is_cidr_host(cidr): kwargs['mask'] = common_utils.cidr_mask_length(cidr) devices = get_devices_with_ip(self.namespace, **kwargs) if not devices: # Search by broadcast address. broadcast = common_utils.cidr_broadcast_address(cidr) if broadcast: devices = get_devices_with_ip(self.namespace, broadcast=broadcast) if devices: return IPDevice(devices[0]['name'], namespace=self.namespace) def add_tuntap(self, name, mode='tap'): privileged.create_interface( name, self.namespace, "tuntap", mode=mode) return IPDevice(name, namespace=self.namespace) def add_veth(self, name1, name2, namespace2=None): peer = {'ifname': name2} if namespace2 is None: namespace2 = self.namespace else: self.ensure_namespace(namespace2) peer['net_ns_fd'] = namespace2 privileged.create_interface( name1, self.namespace, 'veth', peer=peer) return (IPDevice(name1, namespace=self.namespace), IPDevice(name2, namespace=namespace2)) def add_macvtap(self, name, src_dev, mode='bridge'): privileged.create_interface(name, self.namespace, "macvtap", physical_interface=src_dev, mode=mode) return IPDevice(name, namespace=self.namespace) def del_veth(self, name): """Delete a virtual interface between two namespaces.""" privileged.delete_interface(name, self.namespace) def add_dummy(self, name): """Create a Linux dummy interface with the given name.""" privileged.create_interface(name, self.namespace, "dummy") return IPDevice(name, namespace=self.namespace) def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) lo = ip.device(LOOPBACK_DEVNAME) lo.link.set_up() else: ip = IPWrapper(namespace=name) return ip def namespace_is_empty(self): return not self.get_devices() def garbage_collect_namespace(self): """Conditionally destroy the namespace if it is empty.""" if self.namespace and self.netns.exists(self.namespace): if self.namespace_is_empty(): self.netns.delete(self.namespace) return True return False def add_device_to_namespace(self, device): if self.namespace: device.link.set_netns(self.namespace) def add_vlan(self, name, physical_interface, vlan_id): privileged.create_interface(name, self.namespace, "vlan", physical_interface=physical_interface, vlan_id=vlan_id) return IPDevice(name, namespace=self.namespace) def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None, local=None, srcport=None, dstport=None, proxy=False): kwargs = {'vxlan_id': vni} if group: kwargs['vxlan_group'] = group if dev: kwargs['physical_interface'] = dev if ttl: kwargs['vxlan_ttl'] = ttl if tos: kwargs['vxlan_tos'] = tos if local: kwargs['vxlan_local'] = local if proxy: kwargs['vxlan_proxy'] = proxy # tuple: min,max if srcport: if len(srcport) == 2 and srcport[0] <= srcport[1]: kwargs['vxlan_port_range'] = (str(srcport[0]), str(srcport[1])) else: raise exceptions.NetworkVxlanPortRangeError( vxlan_range=srcport) if dstport: kwargs['vxlan_port'] = dstport privileged.create_interface(name, self.namespace, "vxlan", **kwargs) return (IPDevice(name, namespace=self.namespace)) class IPDevice(SubProcessBase): def __init__(self, name, namespace=None, kind='link'): super(IPDevice, self).__init__(namespace=namespace) self._name = name self.kind = kind self.link = IpLinkCommand(self) self.addr = IpAddrCommand(self) self.route = IpRouteCommand(self) self.neigh = IpNeighCommand(self) def __eq__(self, other): return (other is not None and self.name == other.name and self.namespace == other.namespace) def __str__(self): return self.name def __repr__(self): return "" % (self._name, self.namespace) def exists(self): """Return True if the device exists in the namespace.""" return privileged.interface_exists(self.name, self.namespace) def delete_addr_and_conntrack_state(self, cidr): """Delete an address along with its conntrack state This terminates any active connections through an IP. :param cidr: the IP address for which state should be removed. This can be passed as a string with or without /NN. A netaddr.IPAddress or netaddr.Network representing the IP address can also be passed. """ self.addr.delete(cidr) self.delete_conntrack_state(cidr) def delete_conntrack_state(self, cidr): """Delete conntrack state rules Deletes both rules (if existing), the destination and the reply one. """ ip_str = str(netaddr.IPNetwork(cidr).ip) ip_wrapper = IPWrapper(namespace=self.namespace) # Delete conntrack state for ingress traffic # If 0 flow entries have been deleted # conntrack -D will return 1 try: ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception("Failed deleting ingress connection state of" " floatingip %s", ip_str) # Delete conntrack state for egress traffic try: ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception("Failed deleting egress connection state of" " floatingip %s", ip_str) def delete_socket_conntrack_state(self, cidr, dport, protocol): ip_str = str(netaddr.IPNetwork(cidr).ip) ip_wrapper = IPWrapper(namespace=self.namespace) cmd = ["conntrack", "-D", "-d", ip_str, '-p', protocol, '--dport', dport] try: ip_wrapper.netns.execute(cmd, check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: LOG.exception("Failed deleting ingress connection state of " "socket %(ip)s:%(port)s", {'ip': ip_str, 'port': dport}) def disable_ipv6(self): if not netutils.is_ipv6_enabled(): return sysctl_name = re.sub(r'\.', '/', self.name) cmd = ['net.ipv6.conf.%s.disable_ipv6=1' % sysctl_name] return sysctl(cmd, namespace=self.namespace) @property def name(self): if self._name: return self._name[:constants.DEVICE_NAME_MAX_LEN] return self._name @name.setter def name(self, name): self._name = name class IpCommandBase(object): COMMAND = '' def __init__(self, parent): self._parent = parent def _run(self, options, args): return self._parent._run(options, self.COMMAND, args) def _as_root(self, options, args, use_root_namespace=False): return self._parent._as_root(options, self.COMMAND, args, use_root_namespace=use_root_namespace) class IpDeviceCommandBase(IpCommandBase): @property def name(self): return self._parent.name @property def kind(self): return self._parent.kind class IpLinkCommand(IpDeviceCommandBase): COMMAND = 'link' def set_address(self, mac_address): privileged.set_link_attribute( self.name, self._parent.namespace, address=mac_address) def set_allmulticast_on(self): privileged.set_link_flags( self.name, self._parent.namespace, ifinfmsg.IFF_ALLMULTI) def set_mtu(self, mtu_size): try: privileged.set_link_attribute( self.name, self._parent.namespace, mtu=mtu_size) except NetlinkError as e: if e.code == errno.EINVAL: raise InvalidArgument(parameter="MTU", value=mtu_size) raise def set_up(self): privileged.set_link_attribute( self.name, self._parent.namespace, state='up') def set_down(self): privileged.set_link_attribute( self.name, self._parent.namespace, state='down') def set_netns(self, namespace): privileged.set_link_attribute( self.name, self._parent.namespace, net_ns_fd=namespace) self._parent.namespace = namespace def set_name(self, name): privileged.set_link_attribute( self.name, self._parent.namespace, ifname=name) self._parent.name = name def set_alias(self, alias_name): privileged.set_link_attribute( self.name, self._parent.namespace, ifalias=alias_name) def create(self): privileged.create_interface(self.name, self._parent.namespace, self.kind) def delete(self): privileged.delete_interface(self.name, self._parent.namespace) @property def address(self): return self.attributes.get('link/ether') @property def state(self): return self.attributes.get('state') @property def allmulticast(self): return self.attributes.get('allmulticast') @property def mtu(self): return self.attributes.get('mtu') @property def qdisc(self): return self.attributes.get('qdisc') @property def qlen(self): return self.attributes.get('qlen') @property def alias(self): return self.attributes.get('alias') @property def link_kind(self): return self.attributes.get('link_kind') @property def attributes(self): return privileged.get_link_attributes(self.name, self._parent.namespace) @property def exists(self): return privileged.interface_exists(self.name, self._parent.namespace) class IpAddrCommand(IpDeviceCommandBase): COMMAND = 'addr' def add(self, cidr, scope='global', add_broadcast=True): add_ip_address(cidr, self.name, self._parent.namespace, scope, add_broadcast) def delete(self, cidr): delete_ip_address(cidr, self.name, self._parent.namespace) def flush(self, ip_version): flush_ip_addresses(ip_version, self.name, self._parent.namespace) def list(self, scope=None, to=None, filters=None, ip_version=None): """Get device details of a device named .""" def filter_device(device, filters): # Accepted filters: dynamic, permanent, tentative, dadfailed. for filter in filters: if filter == 'permanent' and device['dynamic']: return False elif not device[filter]: return False return True kwargs = {} if to: cidr = common_utils.ip_to_cidr(to) kwargs = {'address': common_utils.cidr_to_ip(cidr)} if not common_utils.is_cidr_host(cidr): kwargs['mask'] = common_utils.cidr_mask_length(cidr) if scope: kwargs['scope'] = IP_ADDRESS_SCOPE_NAME[scope] if ip_version: kwargs['family'] = common_utils.get_socket_address_family( ip_version) devices = get_devices_with_ip(self._parent.namespace, name=self.name, **kwargs) if not filters: return devices filtered_devices = [] for device in (device for device in devices if filter_device(device, filters)): filtered_devices.append(device) return filtered_devices def wait_until_address_ready(self, address, wait_time=30): """Wait until an address is no longer marked 'tentative' raises AddressNotReady if times out or address not present on interface """ def is_address_ready(): try: addr_info = self.list(to=address)[0] except IndexError: raise AddressNotReady( address=address, reason=_('Address not present on interface')) if not addr_info['tentative']: return True if addr_info['dadfailed']: raise AddressNotReady( address=address, reason=_('Duplicate address detected')) return False errmsg = _("Exceeded %s second limit waiting for " "address to leave the tentative state.") % wait_time common_utils.wait_until_true( is_address_ready, timeout=wait_time, sleep=0.20, exception=AddressNotReady(address=address, reason=errmsg)) class IpRouteCommand(IpDeviceCommandBase): COMMAND = 'route' def __init__(self, parent, table=None): super(IpRouteCommand, self).__init__(parent) self._table = table def add_gateway(self, gateway, metric=None, table=None, scope='global'): self.add_route(None, via=gateway, table=table, metric=metric, scope=scope) def delete_gateway(self, gateway, table=None, scope=None): self.delete_route(None, device=self.name, via=gateway, table=table, scope=scope) def list_routes(self, ip_version, scope=None, via=None, table=None, **kwargs): table = table or self._table return list_ip_routes(self._parent.namespace, ip_version, scope=scope, via=via, table=table, device=self.name, **kwargs) def list_onlink_routes(self, ip_version): routes = self.list_routes(ip_version, scope='link') return [r for r in routes if not r['source_prefix']] def add_onlink_route(self, cidr): self.add_route(cidr, scope='link') def delete_onlink_route(self, cidr): self.delete_route(cidr, device=self.name, scope='link') def get_gateway(self, scope=None, table=None, ip_version=constants.IP_VERSION_4): routes = self.list_routes(ip_version, scope=scope, table=table) for route in routes: if route['via'] and route['cidr'] in constants.IP_ANY.values(): return route def flush(self, ip_version, table=None, **kwargs): for route in self.list_routes(ip_version, table=table): self.delete_route(route['cidr'], device=route['device'], via=route['via'], table=table, **kwargs) def add_route(self, cidr, via=None, table=None, metric=None, scope=None, **kwargs): table = table or self._table add_ip_route(self._parent.namespace, cidr, device=self.name, via=via, table=table, metric=metric, scope=scope, **kwargs) def delete_route(self, cidr, device=None, via=None, table=None, scope=None, **kwargs): table = table or self._table delete_ip_route(self._parent.namespace, cidr, device=device, via=via, table=table, scope=scope, **kwargs) class IPRoute(SubProcessBase): def __init__(self, namespace=None, table=None): super(IPRoute, self).__init__(namespace=namespace) self.name = None self.route = IpRouteCommand(self, table=table) class IpNeighCommand(IpDeviceCommandBase): COMMAND = 'neigh' def add(self, ip_address, mac_address, **kwargs): add_neigh_entry(ip_address, mac_address, self.name, self._parent.namespace, **kwargs) def delete(self, ip_address, mac_address, **kwargs): delete_neigh_entry(ip_address, mac_address, self.name, self._parent.namespace, **kwargs) def dump(self, ip_version, **kwargs): return dump_neigh_entries(ip_version, self.name, self._parent.namespace, **kwargs) def flush(self, ip_version, ip_address): """Flush neighbour entries Given address entry is removed from neighbour cache (ARP or NDP). To flush all entries pass string 'all' as an address. :param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively :param ip_address: The prefix selecting the neighbours to flush """ # NOTE(haleyb): There is no equivalent to 'flush' in pyroute2 self._as_root([ip_version], ('flush', 'to', ip_address)) class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' def add(self, name): create_network_namespace(name) wrapper = IPWrapper(namespace=name) wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1']) return wrapper def delete(self, name): delete_network_namespace(name) def execute(self, cmds, addl_env=None, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False): ns_params = [] if self._parent.namespace: run_as_root = True ns_params = ['ip', 'netns', 'exec', self._parent.namespace] env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in addl_env.items()]) cmd = ns_params + env_params + list(cmds) return utils.execute(cmd, check_exit_code=check_exit_code, extra_ok_codes=extra_ok_codes, log_fail_as_error=log_fail_as_error, run_as_root=run_as_root) def exists(self, name): return network_namespace_exists(name) def vlan_in_use(segmentation_id, namespace=None): """Return True if VLAN ID is in use by an interface, else False.""" interfaces = get_devices_info(namespace) vlans = {interface.get('vlan_id') for interface in interfaces if interface.get('vlan_id')} return segmentation_id in vlans def vxlan_in_use(segmentation_id, namespace=None): """Return True if VXLAN VNID is in use by an interface, else False.""" interfaces = get_devices_info(namespace) vxlans = {interface.get('vxlan_id') for interface in interfaces if interface.get('vxlan_id')} return segmentation_id in vxlans def device_exists(device_name, namespace=None): """Return True if the device exists in the namespace.""" return IPDevice(device_name, namespace=namespace).exists() def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None): """Return True if the device with the given IP addresses and MAC address exists in the namespace. """ try: device = IPDevice(device_name, namespace=namespace) if mac and mac != device.link.address: return False device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()] for ip_cidr in ip_cidrs: if ip_cidr not in device_ip_cidrs: return False except RuntimeError: return False else: return True def get_device_mac(device_name, namespace=None): """Return the MAC address of the device.""" return IPDevice(device_name, namespace=namespace).link.address def get_device_mtu(device_name, namespace=None): """Return the MTU value of the device.""" return IPDevice(device_name, namespace=namespace).link.mtu NetworkNamespaceNotFound = privileged.NetworkNamespaceNotFound NetworkInterfaceNotFound = privileged.NetworkInterfaceNotFound IpAddressAlreadyExists = privileged.IpAddressAlreadyExists def add_ip_address(cidr, device, namespace=None, scope='global', add_broadcast=True): """Add an IP address. :param cidr: IP address to add, in CIDR notation :param device: Device name to use in adding address :param namespace: The name of the namespace in which to add the address :param scope: scope of address being added :param add_broadcast: should broadcast address be added """ net = netaddr.IPNetwork(cidr) broadcast = None if add_broadcast and net.version == 4: # NOTE(slaweq): in case if cidr is /32 net.broadcast is None so # same IP address as cidr should be set as broadcast broadcast = str(net.broadcast or net.ip) privileged.add_ip_address( net.version, str(net.ip), net.prefixlen, device, namespace, scope, broadcast) def delete_ip_address(cidr, device, namespace=None): """Delete an IP address. :param cidr: IP address to delete, in CIDR notation :param device: Device name to use in deleting address :param namespace: The name of the namespace in which to delete the address """ net = netaddr.IPNetwork(cidr) privileged.delete_ip_address( net.version, str(net.ip), net.prefixlen, device, namespace) def flush_ip_addresses(ip_version, device, namespace=None): """Flush all IP addresses. :param ip_version: IP version of addresses to flush :param device: Device name to use in flushing addresses :param namespace: The name of the namespace in which to flush the addresses """ privileged.flush_ip_addresses(ip_version, device, namespace) def get_routing_table(ip_version, namespace=None): """Return a list of dictionaries, each representing a route. @param ip_version: the routes of version to return, for example 4 @param namespace @return: a list of dictionaries, each representing a route. The dictionary format is: {'destination': cidr, 'nexthop': ip, 'device': device_name, 'scope': scope} """ # oslo.privsep turns lists to tuples in its IPC code. Change it back return list(privileged.get_routing_table(ip_version, namespace)) # NOTE(haleyb): These neighbour functions live outside the IpNeighCommand # class since not all callers require it. def add_neigh_entry(ip_address, mac_address, device, namespace=None, **kwargs): """Add a neighbour entry. :param ip_address: IP address of entry to add :param mac_address: MAC address of entry to add :param device: Device name to use in adding entry :param namespace: The name of the namespace in which to add the entry """ ip_version = common_utils.get_ip_version(ip_address) privileged.add_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs) def delete_neigh_entry(ip_address, mac_address, device, namespace=None, **kwargs): """Delete a neighbour entry. :param ip_address: IP address of entry to delete :param mac_address: MAC address of entry to delete :param device: Device name to use in deleting entry :param namespace: The name of the namespace in which to delete the entry """ ip_version = common_utils.get_ip_version(ip_address) privileged.delete_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs) def dump_neigh_entries(ip_version, device=None, namespace=None, **kwargs): """Dump all neighbour entries. :param ip_version: IP version of entries to show (4 or 6) :param device: Device name to use in dumping entries :param namespace: The name of the namespace in which to dump the entries :param kwargs: Callers add any filters they use as kwargs :return: a list of dictionaries, each representing a neighbour. The dictionary format is: {'dst': ip_address, 'lladdr': mac_address, 'device': device_name} """ return list(privileged.dump_neigh_entries(ip_version, device, namespace, **kwargs)) def create_network_namespace(namespace, **kwargs): """Create a network namespace. :param namespace: The name of the namespace to create :param kwargs: Callers add any filters they use as kwargs """ privileged.create_netns(namespace, **kwargs) def delete_network_namespace(namespace, **kwargs): """Delete a network namespace. :param namespace: The name of the namespace to delete :param kwargs: Callers add any filters they use as kwargs """ privileged.remove_netns(namespace, **kwargs) def list_network_namespaces(**kwargs): """List all network namespace entries. :param kwargs: Callers add any filters they use as kwargs """ if cfg.CONF.AGENT.use_helper_for_ns_read: return privileged.list_netns(**kwargs) else: return netns.listnetns(**kwargs) def network_namespace_exists(namespace, try_is_ready=False, **kwargs): """Check if a network namespace exists. :param namespace: The name of the namespace to check :param try_is_ready: Try to open the namespace to know if the namespace is ready to be operated. :param kwargs: Callers add any filters they use as kwargs """ if not try_is_ready: output = list_network_namespaces(**kwargs) return namespace in output try: privileged.open_namespace(namespace) return True except (RuntimeError, OSError): pass return False def list_namespace_pids(namespace): """List namespace process PIDs :param namespace: (string) the name of the namespace :return: (tuple) """ return privileged.list_ns_pids(namespace) def ensure_device_is_ready(device_name, namespace=None): dev = IPDevice(device_name, namespace=namespace) try: # Ensure the device has a MAC address and is up, even if it is already # up. if not dev.link.exists or not dev.link.address: LOG.error("Device %s cannot be used as it has no MAC " "address", device_name) return False dev.link.set_up() except RuntimeError: return False return True def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True, log_fail_as_error=False) return any(arg in line for line in stderr.split('\n')) def _arping(ns_name, iface_name, address, count, log_exception): # Due to a Linux kernel bug*, it's advised to spread gratuitous updates # more, injecting an interval between consequent packets that is longer # than 1s which is currently hardcoded** in arping. To achieve that, we # call arping tool the 'count' number of times, each issuing a single ARP # update, and wait between iterations. # # * https://patchwork.ozlabs.org/patch/760372/ # ** https://github.com/iputils/iputils/pull/86 first = True # Since arping is used to send gratuitous ARP, a response is # not expected. In some cases (no response) and with some # platforms (>=Ubuntu 14.04), arping exit code can be 1. extra_ok_codes = [1] ip_wrapper = IPWrapper(namespace=ns_name) for i in range(count): if not first: # hopefully enough for kernel to get out of locktime loop time.sleep(2) # On the second (and subsequent) arping calls, we can get a # "bind: Cannot assign requested address" error since # the IP address might have been deleted concurrently. # We will log an error below if this isn't the case, so # no need to have execute() log one as well. extra_ok_codes = [1, 2] first = False # some Linux kernels* don't honour REPLYs. Send both gratuitous REQUEST # and REPLY packets (REQUESTs are left for backwards compatibility for # in case if some network peers, vice versa, honor REPLYs and not # REQUESTs) # # * https://patchwork.ozlabs.org/patch/763016/ for arg in ('-U', '-A'): arping_cmd = ['arping', arg, '-I', iface_name, '-c', 1, # Pass -w to set timeout to ensure exit if interface # removed while running '-w', 1.5, address] try: ip_wrapper.netns.execute(arping_cmd, extra_ok_codes=extra_ok_codes) except Exception as exc: # Since this is spawned in a thread and executed 2 seconds # apart, something may have been deleted while we were # sleeping. Downgrade message to info and return early # unless it was the first try. exists = device_exists_with_ips_and_mac(iface_name, [address], mac=None, namespace=ns_name) msg = _("Failed sending gratuitous ARP to %(addr)s on " "%(iface)s in namespace %(ns)s: %(err)s") logger_method = LOG.exception if not (log_exception and (first or exists)): logger_method = LOG.info logger_method(msg, {'addr': address, 'iface': iface_name, 'ns': ns_name, 'err': exc}) if not exists: LOG.info("Interface %(iface)s or address %(addr)s " "in namespace %(ns)s was deleted concurrently", {'iface': iface_name, 'addr': address, 'ns': ns_name}) return def send_ip_addr_adv_notif( ns_name, iface_name, address, count=3, log_exception=True, use_eventlet=True): """Send advance notification of an IP address assignment. If the address is in the IPv4 family, send gratuitous ARP. If the address is in the IPv6 family, no advance notification is necessary, since the Neighbor Discovery Protocol (NDP), Duplicate Address Discovery (DAD), and (for stateless addresses) router advertisements (RAs) are sufficient for address resolution and duplicate address detection. :param ns_name: Namespace name which GARPs are gonna be sent from. :param iface_name: Name of interface which GARPs are gonna be sent from. :param address: Advertised IP address. :param count: (Optional) How many GARPs are gonna be sent. Default is 3. :param log_exception: (Optional) True if possible failures should be logged on exception level. Otherwise they are logged on WARNING level. Default is True. :param use_eventlet: (Optional) True if the arping command will be spawned using eventlet, False to use Python threads (threading). """ def arping(): _arping(ns_name, iface_name, address, count, log_exception) if count > 0 and netaddr.IPAddress(address).version == 4: if use_eventlet: eventlet.spawn_n(arping) else: threading.Thread(target=arping).start() def sysctl(cmd, namespace=None, log_fail_as_error=True): """Run sysctl command 'cmd' @param cmd: a list containing the sysctl command to run @param namespace: network namespace to run command in @param log_fail_as_error: failure logged as LOG.error execute() doesn't return the exit status of the command it runs, it returns stdout and stderr. Setting check_exit_code=True will cause it to raise a RuntimeError if the exit status of the command is non-zero, which in sysctl's case is an error. So we're normalizing that into zero (success) and one (failure) here to mimic what "echo $?" in a shell would be. This is all because sysctl is too verbose and prints the value you just set on success, unlike most other utilities that print nothing. execute() will have dumped a message to the logs with the actual output on failure, so it's not lost, and we don't need to print it here. """ cmd = ['sysctl', '-w'] + cmd ip_wrapper = IPWrapper(namespace=namespace) try: ip_wrapper.netns.execute(cmd, run_as_root=True, log_fail_as_error=log_fail_as_error) except RuntimeError as rte: LOG.warning( "Setting %(cmd)s in namespace %(ns)s failed: %(err)s.", {'cmd': cmd, 'ns': namespace, 'err': rte}) return 1 return 0 def add_namespace_to_cmd(cmd, namespace=None): """Add an optional namespace to the command.""" return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd def get_ipv6_lladdr(mac_addr): return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local() def get_ip_nonlocal_bind(namespace=None): """Get kernel option value of ip_nonlocal_bind in given namespace.""" cmd = ['sysctl', '-bn', IP_NONLOCAL_BIND] ip_wrapper = IPWrapper(namespace) return int(ip_wrapper.netns.execute(cmd, run_as_root=True)) def set_ip_nonlocal_bind(value, namespace=None, log_fail_as_error=True): """Set sysctl knob of ip_nonlocal_bind to given value.""" cmd = ['%s=%d' % (IP_NONLOCAL_BIND, value)] return sysctl(cmd, namespace=namespace, log_fail_as_error=log_fail_as_error) def set_ip_nonlocal_bind_for_namespace(namespace, value, root_namespace=False): """Set ip_nonlocal_bind but don't raise exception on failure.""" failed = set_ip_nonlocal_bind(value, namespace=namespace, log_fail_as_error=False) if failed and root_namespace: # Somewhere in the 3.19 kernel timeframe ip_nonlocal_bind was # changed to be a per-namespace attribute. To be backwards # compatible we need to try both if at first we fail. LOG.debug('Namespace (%s) does not support setting %s, ' 'trying in root namespace', namespace, IP_NONLOCAL_BIND) return set_ip_nonlocal_bind(value) if failed: LOG.warning( "%s will not be set to %d in the root namespace in order to " "not break DVR, which requires this value be set to 1. This " "may introduce a race between moving a floating IP to a " "different network node, and the peer side getting a " "populated ARP cache for a given floating IP address.", IP_NONLOCAL_BIND, value) def get_ipv6_forwarding(device, namespace=None): """Get kernel value of IPv6 forwarding for device in given namespace.""" cmd = ['sysctl', '-b', "net.ipv6.conf.%s.forwarding" % device] ip_wrapper = IPWrapper(namespace) return int(ip_wrapper.netns.execute(cmd, run_as_root=True)) def _parse_ip_rule(rule, ip_version): """Parse a pyroute2 rule and returns a dictionary Parameters contained in the returned dictionary: - priority: rule priority - from: source IP address - to: (optional) destination IP address - type: rule type (see RULE_TYPES) - table: table name or number (see RULE_TABLES) - fwmark: (optional) FW mark - iif: (optional) input interface name - oif: (optional) output interface name :param rule: pyroute2 rule dictionary :param ip_version: IP version (4, 6) :return: dictionary with IP rule information """ parsed_rule = {'priority': str(rule['attrs'].get('FRA_PRIORITY', 0))} from_ip = rule['attrs'].get('FRA_SRC') if from_ip: parsed_rule['from'] = common_utils.ip_to_cidr( from_ip, prefix=rule['src_len']) if common_utils.is_cidr_host(parsed_rule['from']): parsed_rule['from'] = common_utils.cidr_to_ip(parsed_rule['from']) else: parsed_rule['from'] = constants.IP_ANY[ip_version] to_ip = rule['attrs'].get('FRA_DST') if to_ip: parsed_rule['to'] = common_utils.ip_to_cidr( to_ip, prefix=rule['dst_len']) if common_utils.is_cidr_host(parsed_rule['to']): parsed_rule['to'] = common_utils.cidr_to_ip(parsed_rule['to']) parsed_rule['type'] = IP_RULE_TYPES[rule['action']] table_num = rule['attrs']['FRA_TABLE'] for table_name in (name for (name, index) in IP_RULE_TABLES.items() if index == table_num): parsed_rule['table'] = table_name break else: parsed_rule['table'] = str(table_num) fwmark = rule['attrs'].get('FRA_FWMARK') if fwmark: fwmask = rule['attrs'].get('FRA_FWMASK') parsed_rule['fwmark'] = '{0:#x}/{1:#x}'.format(fwmark, fwmask) iifname = rule['attrs'].get('FRA_IIFNAME') if iifname: parsed_rule['iif'] = iifname oifname = rule['attrs'].get('FRA_OIFNAME') if oifname: parsed_rule['oif'] = oifname return parsed_rule def list_ip_rules(namespace, ip_version): """List all IP rules in a namespace :param namespace: namespace name :param ip_version: IP version (4, 6) :return: list of dictionaries with the rules information """ rules = privileged.list_ip_rules(namespace, ip_version) return [_parse_ip_rule(rule, ip_version) for rule in rules] def _make_pyroute2_args(ip, iif, table, priority, to): """Returns a dictionary of arguments to be used in pyroute rule commands :param ip: (string) source IP or CIDR address (IPv4, IPv6) :param iif: (string) input interface name :param table: (string, int) table number (as an int or a string) or table name ('default', 'main', 'local') :param priority: (string, int) rule priority :param to: (string) destination IP or CIDR address (IPv4, IPv6) :return: a dictionary with the kwargs needed in pyroute rule commands """ ip_version = common_utils.get_ip_version(ip) # In case we need to add a rule based on an incoming interface, no # IP address is given; the rule default source ("from") address is # "all". cmd_args = {'family': common_utils.get_socket_address_family(ip_version)} if iif: cmd_args['iifname'] = iif else: cmd_args['src'] = common_utils.cidr_to_ip(ip) cmd_args['src_len'] = common_utils.cidr_mask(ip) if to: cmd_args['dst'] = common_utils.cidr_to_ip(to) cmd_args['dst_len'] = common_utils.cidr_mask(to) if table: cmd_args['table'] = IP_RULE_TABLES.get(table) or int(table) if priority: cmd_args['priority'] = int(priority) return cmd_args def _exist_ip_rule(rules, ip, iif, table, priority, to): """Check if any rule matches the conditions""" for rule in rules: if iif and rule.get('iif') != iif: continue if not iif and rule['from'] != ip: continue if table and rule.get('table') != str(table): continue if priority and rule['priority'] != str(priority): continue if to and rule.get('to') != to: continue break else: return False return True def add_ip_rule(namespace, ip, iif=None, table=None, priority=None, to=None): """Create an IP rule in a namespace :param namespace: (string) namespace name :param ip: (string) source IP or CIDR address (IPv4, IPv6) :param iif: (Optional) (string) input interface name :param table: (Optional) (string, int) table number :param priority: (Optional) (string, int) rule priority :param to: (Optional) (string) destination IP or CIDR address (IPv4, IPv6) """ ip_version = common_utils.get_ip_version(ip) rules = list_ip_rules(namespace, ip_version) if _exist_ip_rule(rules, ip, iif, table, priority, to): return cmd_args = _make_pyroute2_args(ip, iif, table, priority, to) privileged.add_ip_rule(namespace, **cmd_args) def delete_ip_rule(namespace, ip, iif=None, table=None, priority=None, to=None): """Delete an IP rule in a namespace :param namespace: (string) namespace name :param ip: (string) source IP or CIDR address (IPv4, IPv6) :param iif: (Optional) (string) input interface name :param table: (Optional) (string, int) table number :param priority: (Optional) (string, int) rule priority :param to: (Optional) (string) destination IP or CIDR address (IPv4, IPv6) """ cmd_args = _make_pyroute2_args(ip, iif, table, priority, to) privileged.delete_ip_rule(namespace, **cmd_args) def get_attr(pyroute2_obj, attr_name): """Get an attribute from a PyRoute2 object""" rule_attrs = pyroute2_obj.get('attrs', []) for attr in (attr for attr in rule_attrs if attr[0] == attr_name): return attr[1] def _parse_ip_address(pyroute2_address, device_name): ip = get_attr(pyroute2_address, 'IFA_ADDRESS') ip_length = pyroute2_address['prefixlen'] event = IP_ADDRESS_EVENTS.get(pyroute2_address.get('event')) cidr = common_utils.ip_to_cidr(ip, prefix=ip_length) flags = get_attr(pyroute2_address, 'IFA_FLAGS') dynamic = not bool(flags & ifaddrmsg.IFA_F_PERMANENT) tentative = bool(flags & ifaddrmsg.IFA_F_TENTATIVE) dadfailed = bool(flags & ifaddrmsg.IFA_F_DADFAILED) scope = IP_ADDRESS_SCOPE[pyroute2_address['scope']] return {'name': device_name, 'cidr': cidr, 'scope': scope, 'broadcast': get_attr(pyroute2_address, 'IFA_BROADCAST'), 'dynamic': dynamic, 'tentative': tentative, 'dadfailed': dadfailed, 'event': event} def _parse_link_device(namespace, device, **kwargs): """Parse pytoute2 link device information For each link device, the IP address information is retrieved and returned in a dictionary. IP address scope: http://linux-ip.net/html/tools-ip-address.html """ retval = [] name = get_attr(device, 'IFLA_IFNAME') ip_addresses = privileged.get_ip_addresses(namespace, index=device['index'], **kwargs) for ip_address in ip_addresses: retval.append(_parse_ip_address(ip_address, name)) return retval def get_devices_with_ip(namespace, name=None, **kwargs): link_args = {} if name: link_args['ifname'] = name devices = privileged.get_link_devices(namespace, **link_args) retval = [] for parsed_ips in (_parse_link_device(namespace, device, **kwargs) for device in devices): retval += parsed_ips return retval def get_devices_info(namespace, **kwargs): devices = privileged.get_link_devices(namespace, **kwargs) retval = {} for device in devices: ret = {'index': device['index'], 'name': get_attr(device, 'IFLA_IFNAME'), 'operstate': get_attr(device, 'IFLA_OPERSTATE'), 'linkmode': get_attr(device, 'IFLA_LINKMODE'), 'mtu': get_attr(device, 'IFLA_MTU'), 'promiscuity': get_attr(device, 'IFLA_PROMISCUITY'), 'mac': get_attr(device, 'IFLA_ADDRESS'), 'broadcast': get_attr(device, 'IFLA_BROADCAST')} ifla_link = get_attr(device, 'IFLA_LINK') if ifla_link: ret['parent_index'] = ifla_link ifla_linkinfo = get_attr(device, 'IFLA_LINKINFO') if ifla_linkinfo: ret['kind'] = get_attr(ifla_linkinfo, 'IFLA_INFO_KIND') ifla_data = get_attr(ifla_linkinfo, 'IFLA_INFO_DATA') if ret['kind'] == 'vxlan': ret['vxlan_id'] = get_attr(ifla_data, 'IFLA_VXLAN_ID') ret['vxlan_group'] = get_attr(ifla_data, 'IFLA_VXLAN_GROUP') ret['vxlan_link_index'] = get_attr(ifla_data, 'IFLA_VXLAN_LINK') elif ret['kind'] == 'vlan': ret['vlan_id'] = get_attr(ifla_data, 'IFLA_VLAN_ID') retval[device['index']] = ret for device in retval.values(): if device.get('parent_index'): parent_device = retval.get(device['parent_index']) if parent_device: device['parent_name'] = parent_device['name'] elif device.get('vxlan_link_index'): device['vxlan_link_name'] = ( retval[device['vxlan_link_index']]['name']) return list(retval.values()) def ip_monitor(namespace, queue, event_stop, event_started): """Monitor IP address changes If namespace is not None, this function must be executed as root user, but cannot use privsep because is a blocking function and can exhaust the number of working threads. """ def get_device_name(index): try: with privileged.get_iproute(namespace) as ip: device = ip.link('get', index=index) if device: attrs = device[0].get('attrs', []) for attr in (attr for attr in attrs if attr[0] == 'IFLA_IFNAME'): return attr[1] except netlink_exceptions.NetlinkError as e: if e.code == errno.ENODEV: return raise def read_ip_updates(_ip, _queue): """Read Pyroute2.IPRoute input socket The aim of this function is to open and bind an IPRoute socket only for reading the netlink changes; no other operations are done with this opened socket. This function is executed in a separate thread, dedicated only to this task. """ _ip.bind(async_cache=True) try: while True: ip_addresses = _ip.get() for ip_address in ip_addresses: _queue.put(ip_address) except EOFError: pass _queue = eventlet.Queue() try: cache_devices = {} with privileged.get_iproute(namespace) as ip: for device in ip.get_links(): cache_devices[device['index']] = get_attr(device, 'IFLA_IFNAME') _ip = privileged.get_iproute(namespace) ip_updates_thread = threading.Thread(target=read_ip_updates, args=(_ip, _queue)) ip_updates_thread.start() event_started.set() while not event_stop.is_set(): try: ip_address = _queue.get(timeout=1) except eventlet.queue.Empty: continue if 'index' in ip_address and 'prefixlen' in ip_address: index = ip_address['index'] name = (get_device_name(index) or cache_devices.get(index)) if not name: continue cache_devices[index] = name queue.put(_parse_ip_address(ip_address, name)) _ip.close() ip_updates_thread.join(timeout=5) except OSError as e: if e.errno == errno.ENOENT: raise privileged.NetworkNamespaceNotFound(netns_name=namespace) raise def add_ip_route(namespace, cidr, device=None, via=None, table=None, metric=None, scope=None, **kwargs): """Add an IP route""" if table: table = IP_RULE_TABLES.get(table, table) ip_version = common_utils.get_ip_version(cidr or via) privileged.add_ip_route(namespace, cidr, ip_version, device=device, via=via, table=table, metric=metric, scope=scope, **kwargs) def list_ip_routes(namespace, ip_version, scope=None, via=None, table=None, device=None, **kwargs): """List IP routes""" def get_device(index, devices): for device in (d for d in devices if d['index'] == index): return get_attr(device, 'IFLA_IFNAME') table = table if table else 'main' table = IP_RULE_TABLES.get(table, table) routes = privileged.list_ip_routes(namespace, ip_version, device=device, table=table, **kwargs) devices = privileged.get_link_devices(namespace) ret = [] for route in routes: cidr = get_attr(route, 'RTA_DST') if cidr: cidr = '%s/%s' % (cidr, route['dst_len']) else: cidr = constants.IP_ANY[ip_version] table = int(get_attr(route, 'RTA_TABLE')) metric = (get_attr(route, 'RTA_PRIORITY') or IP_ROUTE_METRIC_DEFAULT[ip_version]) value = { 'table': IP_RULE_TABLES_NAMES.get(table, table), 'source_prefix': get_attr(route, 'RTA_PREFSRC'), 'cidr': cidr, 'scope': IP_ADDRESS_SCOPE[int(route['scope'])], 'device': get_device(int(get_attr(route, 'RTA_OIF')), devices), 'via': get_attr(route, 'RTA_GATEWAY'), 'metric': metric, } ret.append(value) if scope: ret = [route for route in ret if route['scope'] == scope] if via: ret = [route for route in ret if route['via'] == via] return ret def delete_ip_route(namespace, cidr, device=None, via=None, table=None, scope=None, **kwargs): """Delete an IP route""" if table: table = IP_RULE_TABLES.get(table, table) ip_version = common_utils.get_ip_version(cidr or via) privileged.delete_ip_route(namespace, cidr, ip_version, device=device, via=via, table=table, scope=scope, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/ip_link_support.py0000644000175000017500000000717700000000000024701 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from neutron_lib import exceptions as n_exc from oslo_log import log as logging from neutron._i18n import _ from neutron.agent.linux import utils LOG = logging.getLogger(__name__) class IpLinkSupportError(n_exc.NeutronException): pass class UnsupportedIpLinkCommand(IpLinkSupportError): message = _("ip link command is not supported: %(reason)s") class InvalidIpLinkCapability(IpLinkSupportError): message = _("ip link capability %(capability)s is not supported") class IpLinkConstants(object): IP_LINK_CAPABILITY_STATE = "state" IP_LINK_CAPABILITY_VLAN = "vlan" IP_LINK_CAPABILITY_RATE = "rate" IP_LINK_CAPABILITY_MIN_TX_RATE = "min_tx_rate" IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk" IP_LINK_SUB_CAPABILITY_QOS = "qos" class IpLinkSupport(object): VF_BLOCK_REGEX = r"\[ vf NUM(?P.*) \] \]" CAPABILITY_REGEX = r"\[ %s (.*)" SUB_CAPABILITY_REGEX = r"\[ %(cap)s (.*) \[ %(subcap)s (.*)" @classmethod def get_vf_mgmt_section(cls): """Parses ip link help output, and gets vf block""" output = cls._get_ip_link_output() vf_block_pattern = re.search(cls.VF_BLOCK_REGEX, output, re.DOTALL | re.MULTILINE) if vf_block_pattern: return vf_block_pattern.group("vf_block") @classmethod def vf_mgmt_capability_supported(cls, vf_section, capability, subcapability=None): """Validate vf capability support Checks if given vf capability (and sub capability if given) supported :param vf_section: vf Num block content :param capability: for example: vlan, rate, spoofchk, state :param subcapability: for example: qos """ if not vf_section: return False if subcapability: regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability, "subcap": subcapability} else: regex = cls.CAPABILITY_REGEX % capability pattern_match = re.search(regex, vf_section, re.DOTALL | re.MULTILINE) return pattern_match is not None @classmethod def _get_ip_link_output(cls): """Gets the output of the ip link help command Runs ip link help command and stores its output Note: ip link help return error and writes its output to stderr so we get the output from there. however, if this issue will be solved and the command will write to stdout, we will get the output from there too. """ try: ip_cmd = ['ip', 'link', 'help'] _stdout, _stderr = utils.execute( ip_cmd, check_exit_code=False, return_stderr=True, log_fail_as_error=False) except Exception as e: LOG.exception("Failed executing ip command") raise UnsupportedIpLinkCommand(reason=e) return _stdout or _stderr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/ipset_manager.py0000644000175000017500000001714200000000000024267 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import netaddr from neutron.agent.linux import utils as linux_utils from oslo_concurrency import lockutils IPSET_ADD_BULK_THRESHOLD = 5 NET_PREFIX = 'N' SWAP_SUFFIX = '-n' IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX) class IpsetManager(object): """Smart wrapper for ipset. Keeps track of ip addresses per set, using bulk or single ip add/remove for smaller changes. """ def __init__(self, execute=None, namespace=None): self.execute = execute or linux_utils.execute self.namespace = namespace self.ipset_sets = {} def _sanitize_addresses(self, addresses): """This method converts any address to ipset format. If an address has a mask of /0 we need to cover to it to a mask of /1 as ipset does not support /0 length addresses. Instead we use two /1's to represent the /0. """ sanitized_addresses = [] for ip in addresses: ip = netaddr.IPNetwork(ip) if ip.prefixlen == 0: if ip.version == 4: sanitized_addresses.append('0.0.0.0/1') sanitized_addresses.append('128.0.0.0/1') elif ip.version == 6: sanitized_addresses.append('::/1') sanitized_addresses.append('8000::/1') else: sanitized_addresses.append(str(ip)) return sanitized_addresses @staticmethod def get_name(id, ethertype): """Returns the given ipset name for an id+ethertype pair. This reference can be used from iptables. """ name = NET_PREFIX + ethertype + id return name[:IPSET_NAME_MAX_LENGTH] def set_name_exists(self, set_name): """Returns true if the set name is known to the manager.""" return set_name in self.ipset_sets def set_members(self, id, ethertype, member_ips): """Create or update a specific set by name and ethertype. It will make sure that a set is created, updated to add / remove new members, or swapped atomically if that's faster, and return added / removed members. """ member_ips = self._sanitize_addresses(member_ips) set_name = self.get_name(id, ethertype) add_ips = self._get_new_set_ips(set_name, member_ips) del_ips = self._get_deleted_set_ips(set_name, member_ips) if add_ips or del_ips or not self.set_name_exists(set_name): self.set_members_mutate(set_name, ethertype, member_ips) return add_ips, del_ips def set_members_mutate(self, set_name, ethertype, member_ips): with lockutils.lock('neutron-ipset-%s' % self.namespace, external=True): if not self.set_name_exists(set_name): # The initial creation is handled with create/refresh to # avoid any downtime for existing sets (i.e. avoiding # a flush/restore), as the restore operation of ipset is # additive to the existing set. self._create_set(set_name, ethertype) self._refresh_set(set_name, member_ips, ethertype) # TODO(majopela,shihanzhang,haleyb): Optimize this by # gathering the system ipsets at start. So we can determine # if a normal restore is enough for initial creation. # That should speed up agent boot up time. else: add_ips = self._get_new_set_ips(set_name, member_ips) del_ips = self._get_deleted_set_ips(set_name, member_ips) if (len(add_ips) + len(del_ips) < IPSET_ADD_BULK_THRESHOLD): self._add_members_to_set(set_name, add_ips) self._del_members_from_set(set_name, del_ips) else: self._refresh_set(set_name, member_ips, ethertype) def destroy(self, id, ethertype, forced=False): with lockutils.lock('neutron-ipset-%s' % self.namespace, external=True): set_name = self.get_name(id, ethertype) self._destroy(set_name, forced) def _add_member_to_set(self, set_name, member_ip): cmd = ['ipset', 'add', '-exist', set_name, member_ip] self._apply(cmd) self.ipset_sets[set_name].append(member_ip) def _refresh_set(self, set_name, member_ips, ethertype): new_set_name = set_name + SWAP_SUFFIX set_type = self._get_ipset_set_type(ethertype) process_input = ["create %s hash:net family %s" % (new_set_name, set_type)] for ip in member_ips: process_input.append("add %s %s" % (new_set_name, ip)) self._restore_sets(process_input) self._swap_sets(new_set_name, set_name) self._destroy(new_set_name, True) self.ipset_sets[set_name] = copy.copy(member_ips) def _del_member_from_set(self, set_name, member_ip): cmd = ['ipset', 'del', set_name, member_ip] self._apply(cmd, fail_on_errors=False) self.ipset_sets[set_name].remove(member_ip) def _create_set(self, set_name, ethertype): cmd = ['ipset', 'create', '-exist', set_name, 'hash:net', 'family', self._get_ipset_set_type(ethertype)] self._apply(cmd) self.ipset_sets[set_name] = [] def _apply(self, cmd, input=None, fail_on_errors=True): input = '\n'.join(input) if input else None cmd_ns = [] if self.namespace: cmd_ns.extend(['ip', 'netns', 'exec', self.namespace]) cmd_ns.extend(cmd) self.execute(cmd_ns, run_as_root=True, process_input=input, check_exit_code=fail_on_errors) def _get_new_set_ips(self, set_name, expected_ips): new_member_ips = (set(expected_ips) - set(self.ipset_sets.get(set_name, []))) return list(new_member_ips) def _get_deleted_set_ips(self, set_name, expected_ips): deleted_member_ips = (set(self.ipset_sets.get(set_name, [])) - set(expected_ips)) return list(deleted_member_ips) def _add_members_to_set(self, set_name, add_ips): for ip in add_ips: if ip not in self.ipset_sets[set_name]: self._add_member_to_set(set_name, ip) def _del_members_from_set(self, set_name, del_ips): for ip in del_ips: if ip in self.ipset_sets[set_name]: self._del_member_from_set(set_name, ip) def _get_ipset_set_type(self, ethertype): return 'inet6' if ethertype == 'IPv6' else 'inet' def _restore_sets(self, process_input): cmd = ['ipset', 'restore', '-exist'] self._apply(cmd, process_input) def _swap_sets(self, src_set, dest_set): cmd = ['ipset', 'swap', src_set, dest_set] self._apply(cmd) def _destroy(self, set_name, forced=False): if set_name in self.ipset_sets or forced: cmd = ['ipset', 'destroy', set_name] self._apply(cmd, fail_on_errors=False) self.ipset_sets.pop(set_name, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/iptables_comments.py0000644000175000017500000000355400000000000025163 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """iptables comments""" # Do not translate these comments. These comments cannot contain a quote or # an escape character because they will end up in a call to iptables and # could interfere with other parameters. SNAT_OUT = 'Perform source NAT on outgoing traffic.' UNMATCH_DROP = 'Default drop rule for unmatched traffic.' VM_INT_SG = 'Direct traffic from the VM interface to the security group chain.' SG_TO_VM_SG = 'Jump to the VM specific chain.' INPUT_TO_SG = 'Direct incoming traffic from VM to the security group chain.' PAIR_ALLOW = 'Allow traffic from defined IP/MAC pairs.' PAIR_DROP = 'Drop traffic without an IP/MAC allow rule.' DHCP_CLIENT = 'Allow DHCP client traffic.' DHCP_SPOOF = 'Prevent DHCP Spoofing by VM.' UNMATCHED = 'Send unmatched traffic to the fallback chain.' INVALID_DROP = ("Drop packets that appear related to an existing connection " "(e.g. TCP ACK/FIN) but do not have an entry in conntrack.") ALLOW_ASSOC = ('Direct packets associated with a known session to the RETURN ' 'chain.') PORT_SEC_ACCEPT = 'Accept all packets when port security is disabled.' TRUSTED_ACCEPT = 'Accept all packets when port is trusted.' IPV6_RA_DROP = 'Drop IPv6 Router Advts from VM Instance.' IPV6_ICMP_ALLOW = 'Allow IPv6 ICMP traffic.' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/iptables_firewall.py0000644000175000017500000013234500000000000025144 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import ctypes from ctypes import util import sys import netaddr from neutron_lib import constants from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils from neutron.agent import firewall from neutron.agent.linux import ip_conntrack from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils as a_utils from neutron.common import _constants as const from neutron.common import utils as c_utils LOG = logging.getLogger(__name__) SG_CHAIN = 'sg-chain' SPOOF_FILTER = 'spoof-filter' CHAIN_NAME_PREFIX = {constants.INGRESS_DIRECTION: 'i', constants.EGRESS_DIRECTION: 'o', SPOOF_FILTER: 's'} IPSET_DIRECTION = {constants.INGRESS_DIRECTION: 'src', constants.EGRESS_DIRECTION: 'dst'} comment_rule = iptables_manager.comment_rule libc = ctypes.CDLL(util.find_library('libc.so.6')) def get_hybrid_port_name(port_name): return (constants.TAP_DEVICE_PREFIX + port_name)[:constants.LINUX_DEV_LEN] class mac_iptables(netaddr.mac_eui48): """mac format class for netaddr to match iptables representation.""" word_sep = ':' class IptablesFirewallDriver(firewall.FirewallDriver): """Driver which enforces security groups through iptables rules.""" IPTABLES_DIRECTION = {constants.INGRESS_DIRECTION: 'physdev-out', constants.EGRESS_DIRECTION: 'physdev-in'} CONNTRACK_ZONE_PER_PORT = False def __init__(self, namespace=None): self.iptables = iptables_manager.IptablesManager( state_less=True, use_ipv6=netutils.is_ipv6_enabled(), namespace=namespace) # TODO(majopela, shihanzhang): refactor out ipset to a separate # driver composed over this one self.ipset = ipset_manager.IpsetManager(namespace=namespace) # list of port which has security group self.filtered_ports = {} self.unfiltered_ports = {} self.trusted_ports = [] self.ipconntrack = ip_conntrack.get_conntrack( self.iptables.get_rules_for_table, self.filtered_ports, self.unfiltered_ports, namespace=namespace, zone_per_port=self.CONNTRACK_ZONE_PER_PORT) self._add_fallback_chain_v4v6() self._defer_apply = False self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None # List of security group rules for ports residing on this host self.sg_rules = {} self.pre_sg_rules = None # List of security group member ips for ports residing on this host self.sg_members = collections.defaultdict( lambda: collections.defaultdict(list)) self.pre_sg_members = None self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset self.updated_rule_sg_ids = set() self.updated_sg_members = set() self.devices_with_updated_sg_members = collections.defaultdict(list) self._iptables_protocol_name_map = {} self._check_netfilter_for_bridges() @staticmethod def _check_netfilter_for_bridges(): """Check if br_netfilter is loaded and the needed flags for IPtables""" log_warning = False if not a_utils.execute( ['sysctl', '-N', 'net.bridge'], run_as_root=True, log_fail_as_error=False, check_exit_code=False): LOG.warning('Kernel module br_netfilter is not loaded.') log_warning = True if not log_warning: for proto in ('arp', 'ip', 'ip6'): key = 'net.bridge.bridge-nf-call-%stables' % proto enabled = a_utils.execute( ['sysctl', '-b', key], run_as_root=True, log_fail_as_error=False, check_exit_code=False) if enabled == '1': status = 'enabled' log_method = LOG.debug else: status = 'disabled' log_method = LOG.warning log_warning = True log_method('Key %(key)s is %(status)s', {'key': key, 'status': status}) if log_warning: LOG.warning('Please ensure that netfilter options for bridge are ' 'enabled to provide working security groups.') @property def ports(self): return dict(self.filtered_ports, **self.unfiltered_ports) def _update_remote_security_group_members(self, sec_group_ids): for sg_id in sec_group_ids: for device in self.filtered_ports.values(): if sg_id in device.get('security_group_source_groups', []): self.devices_with_updated_sg_members[sg_id].append(device) def security_group_updated(self, action_type, sec_group_ids, device_ids=None): device_ids = device_ids or [] if action_type == 'sg_rule': self.updated_rule_sg_ids.update(sec_group_ids) elif action_type == 'sg_member': if device_ids: self.updated_sg_members.update(device_ids) else: self._update_remote_security_group_members(sec_group_ids) def process_trusted_ports(self, port_ids): """Process ports that are trusted and shouldn't be filtered.""" for port in port_ids: if port not in self.trusted_ports: jump_rule = self._generate_trusted_port_rules(port) self._add_rules_to_chain_v4v6( 'FORWARD', jump_rule, jump_rule, comment=ic.TRUSTED_ACCEPT) self.trusted_ports.append(port) def remove_trusted_ports(self, port_ids): for port in port_ids: if port in self.trusted_ports: jump_rule = self._generate_trusted_port_rules(port) self._remove_rule_from_chain_v4v6( 'FORWARD', jump_rule, jump_rule) self.trusted_ports.remove(port) def _generate_trusted_port_rules(self, port): rt = '-m physdev --%%s %s --physdev-is-bridged -j ACCEPT' % ( self._get_device_name(port)) return [rt % (self.IPTABLES_DIRECTION[constants.INGRESS_DIRECTION]), rt % (self.IPTABLES_DIRECTION[constants.EGRESS_DIRECTION])] def update_security_group_rules(self, sg_id, sg_rules): LOG.debug("Update rules of security group (%s)", sg_id) self.sg_rules[sg_id] = sg_rules def update_security_group_members(self, sg_id, sg_members): LOG.debug("Update members of security group (%s)", sg_id) self.sg_members[sg_id] = collections.defaultdict(list, sg_members) if self.enable_ipset: self._update_ipset_members(sg_id, sg_members) def _update_ipset_members(self, sg_id, sg_members): devices = self.devices_with_updated_sg_members.pop(sg_id, None) for ip_version, current_ips in sg_members.items(): add_ips, del_ips = self.ipset.set_members( sg_id, ip_version, current_ips) if devices and del_ips: # remove prefix from del_ips ips = [str(netaddr.IPNetwork(del_ip).ip) for del_ip in del_ips] self.ipconntrack.delete_conntrack_state_by_remote_ips( devices, ip_version, ips) def _set_ports(self, port): if not firewall.port_sec_enabled(port): self.unfiltered_ports[port['device']] = port self.filtered_ports.pop(port['device'], None) else: self.filtered_ports[port['device']] = port self.unfiltered_ports.pop(port['device'], None) def _unset_ports(self, port): self.unfiltered_ports.pop(port['device'], None) self.filtered_ports.pop(port['device'], None) def _remove_conntrack_entries_from_port_deleted(self, port): device_info = self.filtered_ports.get(port['device']) if not device_info: return for ethertype in [constants.IPv4, constants.IPv6]: self.ipconntrack.delete_conntrack_state_by_remote_ips( [device_info], ethertype, set()) def prepare_port_filter(self, port): LOG.debug("Preparing device (%s) filter", port['device']) self._set_ports(port) # each security group has it own chains self._setup_chains() return self.iptables.apply() def update_port_filter(self, port): LOG.debug("Updating device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info('Attempted to update port filter which is not ' 'filtered %s', port['device']) return self._remove_chains() self._set_ports(port) self._setup_chains() return self.iptables.apply() def remove_port_filter(self, port): LOG.debug("Removing device (%s) filter", port['device']) if port['device'] not in self.ports: LOG.info('Attempted to remove port filter which is not ' 'filtered %r', port) return self._remove_chains() self._remove_conntrack_entries_from_port_deleted(port) self._unset_ports(port) self._setup_chains() return self.iptables.apply() def _add_accept_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=True) def _remove_rule_port_sec(self, port, direction): self._update_port_sec_rules(port, direction, add=False) def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules): for rule in ipv4_rules: self.iptables.ipv4['filter'].remove_rule(chain_name, rule) for rule in ipv6_rules: self.iptables.ipv6['filter'].remove_rule(chain_name, rule) def _setup_chains(self): """Setup ingress and egress chain for a port.""" if not self._defer_apply: self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) def _setup_chains_apply(self, ports, unfiltered_ports): self._add_chain_by_name_v4v6(SG_CHAIN) # sort by port so we always do this deterministically between # agent restarts and don't cause unnecessary rule differences for pname in sorted(ports): port = ports[pname] self._add_conntrack_jump(port) self._setup_chain(port, constants.INGRESS_DIRECTION) self._setup_chain(port, constants.EGRESS_DIRECTION) self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT') self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT') for port in unfiltered_ports.values(): self._add_accept_rule_port_sec(port, constants.INGRESS_DIRECTION) self._add_accept_rule_port_sec(port, constants.EGRESS_DIRECTION) def _remove_chains(self): """Remove ingress and egress chain for a port.""" if not self._defer_apply: self._remove_chains_apply(self.filtered_ports, self.unfiltered_ports) def _remove_chains_apply(self, ports, unfiltered_ports): for port in ports.values(): self._remove_chain(port, constants.INGRESS_DIRECTION) self._remove_chain(port, constants.EGRESS_DIRECTION) self._remove_chain(port, SPOOF_FILTER) self._remove_conntrack_jump(port) for port in unfiltered_ports.values(): self._remove_rule_port_sec(port, constants.INGRESS_DIRECTION) self._remove_rule_port_sec(port, constants.EGRESS_DIRECTION) self._remove_chain_by_name_v4v6(SG_CHAIN) def _setup_chain(self, port, DIRECTION): self._add_chain(port, DIRECTION) self._add_rules_by_security_group(port, DIRECTION) def _remove_chain(self, port, DIRECTION): chain_name = self._port_chain_name(port, DIRECTION) self._remove_chain_by_name_v4v6(chain_name) def _add_fallback_chain_v4v6(self): self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP) def _add_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].add_chain(chain_name) self.iptables.ipv6['filter'].add_chain(chain_name) def _remove_chain_by_name_v4v6(self, chain_name): self.iptables.ipv4['filter'].remove_chain(chain_name) self.iptables.ipv6['filter'].remove_chain(chain_name) def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules, top=False, comment=None): for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule(chain_name, rule, top=top, comment=comment) for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule, top=top, comment=comment) def _get_device_name(self, port): if not isinstance(port, dict): return port return port['device'] def _update_port_sec_rules(self, port, direction, add=False): # add/remove rules in FORWARD and INPUT chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j ACCEPT' % (self.IPTABLES_DIRECTION[direction], device)] if add: self._add_rules_to_chain_v4v6( 'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule) if direction == constants.EGRESS_DIRECTION: if add: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT) else: self._remove_rule_from_chain_v4v6( 'INPUT', jump_rule, jump_rule) def _add_chain(self, port, direction): chain_name = self._port_chain_name(port, direction) self._add_chain_by_name_v4v6(chain_name) # Note(nati) jump to the security group chain (SG_CHAIN) # This is needed because the packet may much two rule in port # if the two port is in the same host # We accept the packet at the end of SG_CHAIN. # jump to the security group chain device = self._get_device_name(port) jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, SG_CHAIN)] # Security group chain has to be applied before unfiltered # or trusted ports self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule, top=True, comment=ic.VM_INT_SG) # jump to the chain based on the device jump_rule = ['-m physdev --%s %s --physdev-is-bridged ' '-j $%s' % (self.IPTABLES_DIRECTION[direction], device, chain_name)] self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule, comment=ic.SG_TO_VM_SG) if direction == constants.EGRESS_DIRECTION: self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule, comment=ic.INPUT_TO_SG) def _get_br_device_name(self, port): return ('brq' + port['network_id'])[:constants.LINUX_DEV_LEN] def _get_jump_rules(self, port, create=True): zone = self.ipconntrack.get_device_zone(port, create=create) if not zone: return [] br_dev = self._get_br_device_name(port) port_dev = self._get_device_name(port) # match by interface for bridge input match_interface = '-i %s' match_physdev = '-m physdev --physdev-in %s' port_sg_rules = self._get_port_sg_rules(port) if self._are_sg_rules_stateful(port_sg_rules): # comment to prevent duplicate warnings for different devices using # same bridge. truncate start to remove prefixes comment = 'Set zone for %s' % port['device'][4:] conntrack = '--zone %s' % self.ipconntrack.get_device_zone(port) else: comment = 'Make %s stateless' % port['device'][4:] conntrack = '--notrack' rules = [] for dev, match in ((br_dev, match_physdev), (br_dev, match_interface), (port_dev, match_physdev)): match = match % dev rule = '%s -m comment --comment "%s" -j CT %s' % (match, comment, conntrack) rules.append(rule) return rules def _get_port_sg_rules(self, port): port_sg_rules = [] if not any(port.get('device_owner', '').startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES): port_sg_ids = port.get('security_groups', []) if port_sg_ids: for rule in self.sg_rules.get(port_sg_ids[0], []): if self.enable_ipset: port_sg_rules.append(rule) break else: port_sg_rules.extend( self._expand_sg_rule_with_remote_ips( rule, port, constants.INGRESS_DIRECTION)) if port_sg_rules: break else: port_sg_rules.extend( self._expand_sg_rule_with_remote_ips( rule, port, constants.EGRESS_DIRECTION)) if port_sg_rules: break return port_sg_rules @staticmethod def _are_sg_rules_stateful(security_group_rules): for rule in security_group_rules: return rule.get('stateful', True) return True def _add_conntrack_jump(self, port): for jump_rule in self._get_jump_rules(port): self._add_raw_rule('PREROUTING', jump_rule) def _remove_conntrack_jump(self, port): for jump_rule in self._get_jump_rules(port, create=False): self._remove_raw_rule('PREROUTING', jump_rule) def _add_raw_rule(self, chain, rule, comment=None): self.iptables.ipv4['raw'].add_rule(chain, rule, comment=comment) self.iptables.ipv6['raw'].add_rule(chain, rule, comment=comment) def _remove_raw_rule(self, chain, rule): self.iptables.ipv4['raw'].remove_rule(chain, rule) self.iptables.ipv6['raw'].remove_rule(chain, rule) def _split_sgr_by_ethertype(self, security_group_rules): ipv4_sg_rules = [] ipv6_sg_rules = [] for rule in security_group_rules: if rule.get('ethertype') == constants.IPv4: ipv4_sg_rules.append(rule) elif rule.get('ethertype') == constants.IPv6: if rule.get('protocol') in const.IPV6_ICMP_LEGACY_PROTO_LIST: rule['protocol'] = constants.PROTO_NAME_IPV6_ICMP ipv6_sg_rules.append(rule) return ipv4_sg_rules, ipv6_sg_rules def _select_sgr_by_direction(self, port, direction): return [rule for rule in port.get('security_group_rules', []) if rule['direction'] == direction] def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules): if mac_ip_pairs: chain_name = self._port_chain_name(port, SPOOF_FILTER) table.add_chain(chain_name) for mac, ip in mac_ip_pairs: if ip is None: # If fixed_ips is [] this rule will be added to the end # of the list after the allowed_address_pair rules. table.add_rule(chain_name, '-m mac --mac-source %s -j RETURN' % mac.upper(), comment=ic.PAIR_ALLOW) else: # we need to convert it into a prefix to match iptables ip = c_utils.ip_to_cidr(ip) table.add_rule(chain_name, '-s %s -m mac --mac-source %s -j RETURN' % (ip, mac.upper()), comment=ic.PAIR_ALLOW) table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP) rules.append('-j $%s' % chain_name) def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, mac_ipv6_pairs): mac = str(netaddr.EUI(mac, dialect=mac_iptables)) if netaddr.IPNetwork(ip_address).version == 4: mac_ipv4_pairs.append((mac, ip_address)) else: mac_ipv6_pairs.append((mac, ip_address)) lla = str(netutils.get_ipv6_addr_by_EUI64( constants.IPv6_LLA_PREFIX, mac)) if (mac, lla) not in mac_ipv6_pairs: # only add once so we don't generate duplicate rules mac_ipv6_pairs.append((mac, lla)) def _spoofing_rule(self, port, ipv4_rules, ipv6_rules): # Fixed rules for traffic sourced from unspecified addresses: 0.0.0.0 # and :: # Allow dhcp client discovery and request ipv4_rules += [comment_rule('-s 0.0.0.0/32 -d 255.255.255.255/32 ' '-p udp -m udp --sport 68 --dport 67 ' '-j RETURN', comment=ic.DHCP_CLIENT)] # Allow neighbor solicitation and multicast listener discovery # from the unspecified address for duplicate address detection for icmp6_type in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES: ipv6_rules += [comment_rule('-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j RETURN' % icmp6_type, comment=ic.IPV6_ICMP_ALLOW)] mac_ipv4_pairs = [] mac_ipv6_pairs = [] if isinstance(port.get('allowed_address_pairs'), list): for address_pair in port['allowed_address_pairs']: self._build_ipv4v6_mac_ip_list(address_pair['mac_address'], address_pair['ip_address'], mac_ipv4_pairs, mac_ipv6_pairs) for ip in port['fixed_ips']: self._build_ipv4v6_mac_ip_list(port['mac_address'], ip, mac_ipv4_pairs, mac_ipv6_pairs) if not port['fixed_ips']: mac_ipv4_pairs.append((port['mac_address'], None)) mac_ipv6_pairs.append((port['mac_address'], None)) self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'], mac_ipv4_pairs, ipv4_rules) self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'], mac_ipv6_pairs, ipv6_rules) # Fixed rules for traffic after source address is verified # Allow dhcp client renewal and rebinding ipv4_rules += [comment_rule('-p udp -m udp --sport 68 --dport 67 ' '-j RETURN', comment=ic.DHCP_CLIENT)] # Drop Router Advts from the port. ipv6_rules += [comment_rule('-p ipv6-icmp -m icmp6 --icmpv6-type %s ' '-j DROP' % constants.ICMPV6_TYPE_RA, comment=ic.IPV6_RA_DROP)] ipv6_rules += [comment_rule('-p ipv6-icmp -j RETURN', comment=ic.IPV6_ICMP_ALLOW)] ipv6_rules += [comment_rule('-p udp -m udp --sport 546 ' '--dport 547 ' '-j RETURN', comment=ic.DHCP_CLIENT)] def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules): # Note(nati) Drop dhcp packet from VM ipv4_rules += [comment_rule('-p udp -m udp --sport 67 ' '--dport 68 ' '-j DROP', comment=ic.DHCP_SPOOF)] ipv6_rules += [comment_rule('-p udp -m udp --sport 547 ' '--dport 546 ' '-j DROP', comment=ic.DHCP_SPOOF)] def _accept_inbound_icmpv6(self): # Allow multicast listener, neighbor solicitation and # neighbor advertisement into the instance icmpv6_rules = [] for icmp6_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES: icmpv6_rules += ['-p ipv6-icmp -m icmp6 --icmpv6-type %s ' '-j RETURN' % icmp6_type] return icmpv6_rules def _select_sg_rules_for_port(self, port, direction): """Select rules from the security groups the port is member of.""" port_sg_ids = port.get('security_groups', []) port_rules = [] for sg_id in port_sg_ids: for rule in self.sg_rules.get(sg_id, []): if rule['direction'] == direction: if self.enable_ipset: port_rules.append(rule) else: port_rules.extend( self._expand_sg_rule_with_remote_ips( rule, port, direction)) return port_rules def _expand_sg_rule_with_remote_ips(self, rule, port, direction): """Expand a remote group rule to rule per remote group IP.""" remote_group_id = rule.get('remote_group_id') if remote_group_id: ethertype = rule['ethertype'] port_ips = port.get('fixed_ips', []) for ip in self.sg_members[remote_group_id][ethertype]: if ip not in port_ips: ip_rule = rule.copy() direction_ip_prefix = firewall.DIRECTION_IP_PREFIX[ direction] ip_prefix = str(netaddr.IPNetwork(ip).cidr) ip_rule[direction_ip_prefix] = ip_prefix yield ip_rule else: yield rule def _get_remote_sg_ids(self, port, direction=None): sg_ids = port.get('security_groups', []) remote_sg_ids = {constants.IPv4: set(), constants.IPv6: set()} for sg_id in sg_ids: for rule in self.sg_rules.get(sg_id, []): if not direction or rule['direction'] == direction: remote_sg_id = rule.get('remote_group_id') ether_type = rule.get('ethertype') if remote_sg_id and ether_type: remote_sg_ids[ether_type].add(remote_sg_id) return remote_sg_ids def _add_rules_by_security_group(self, port, direction): # select rules for current port and direction security_group_rules = self._select_sgr_by_direction(port, direction) security_group_rules += self._select_sg_rules_for_port(port, direction) # split groups by ip version # for ipv4, iptables command is used # for ipv6, iptables6 command is used ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype( security_group_rules) ipv4_iptables_rules = [] ipv6_iptables_rules = [] # include fixed egress/ingress rules if direction == constants.EGRESS_DIRECTION: self._add_fixed_egress_rules(port, ipv4_iptables_rules, ipv6_iptables_rules) elif direction == constants.INGRESS_DIRECTION: ipv6_iptables_rules += self._accept_inbound_icmpv6() # include IPv4 and IPv6 iptable rules from security group ipv4_iptables_rules += self._convert_sgr_to_iptables_rules( ipv4_sg_rules) ipv6_iptables_rules += self._convert_sgr_to_iptables_rules( ipv6_sg_rules) # finally add the rules to the port chain for a given direction self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction), ipv4_iptables_rules, ipv6_iptables_rules) def _add_fixed_egress_rules(self, port, ipv4_iptables_rules, ipv6_iptables_rules): self._spoofing_rule(port, ipv4_iptables_rules, ipv6_iptables_rules) self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules) def _generate_ipset_rule_args(self, sg_rule, remote_gid): ethertype = sg_rule.get('ethertype') ipset_name = self.ipset.get_name(remote_gid, ethertype) if not self.ipset.set_name_exists(ipset_name): # NOTE(mangelajo): ipsets for empty groups are not created # thus we can't reference them. return None ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')] args = self._generate_protocol_and_port_args(sg_rule) args += ['-m set', '--match-set', ipset_name, ipset_direction] args += ['-j RETURN'] return args def _generate_protocol_and_port_args(self, sg_rule): is_port = (sg_rule.get('source_port_range_min') is not None or sg_rule.get('port_range_min') is not None) args = self._protocol_arg(sg_rule.get('protocol'), is_port) args += self._port_arg('sport', sg_rule.get('protocol'), sg_rule.get('source_port_range_min'), sg_rule.get('source_port_range_max')) args += self._port_arg('dport', sg_rule.get('protocol'), sg_rule.get('port_range_min'), sg_rule.get('port_range_max')) return args def _generate_plain_rule_args(self, sg_rule): # These arguments MUST be in the format iptables-save will # display them: source/dest, protocol, sport, dport, target # Otherwise the iptables_manager code won't be able to find # them to preserve their [packet:byte] counts. args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix')) args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix')) args += self._generate_protocol_and_port_args(sg_rule) args += ['-j RETURN'] return args def _convert_sg_rule_to_iptables_args(self, sg_rule): remote_gid = sg_rule.get('remote_group_id') if self.enable_ipset and remote_gid: return self._generate_ipset_rule_args(sg_rule, remote_gid) else: return self._generate_plain_rule_args(sg_rule) def _convert_sgr_to_iptables_rules(self, security_group_rules): iptables_rules = [] self._allow_established(iptables_rules) seen_sg_rules = set() for rule in security_group_rules: args = self._convert_sg_rule_to_iptables_args(rule) if args: rule_command = ' '.join(args) if rule_command in seen_sg_rules: # since these rules are from multiple security groups, # there may be duplicates so we prune them out here continue seen_sg_rules.add(rule_command) iptables_rules.append(rule_command) self._drop_invalid_packets(iptables_rules) iptables_rules += [comment_rule('-j $sg-fallback', comment=ic.UNMATCHED)] return iptables_rules def _drop_invalid_packets(self, iptables_rules): # Always drop invalid packets iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP', comment=ic.INVALID_DROP)] return iptables_rules def _allow_established(self, iptables_rules): # Allow established connections iptables_rules += [comment_rule( '-m state --state RELATED,ESTABLISHED -j RETURN', comment=ic.ALLOW_ASSOC)] return iptables_rules def _local_protocol_name_map(self): local_protocol_name_map = {} try: class protoent(ctypes.Structure): _fields_ = [("p_name", ctypes.c_char_p), ("p_aliases", ctypes.POINTER(ctypes.c_char_p)), ("p_proto", ctypes.c_int)] libc.getprotoent.restype = ctypes.POINTER(protoent) libc.setprotoent(0) while True: pr = libc.getprotoent() if not pr: break r = pr[0] p_name = helpers.safe_decode_utf8(r.p_name) local_protocol_name_map[str(r.p_proto)] = p_name except Exception: LOG.exception("Unable to create local protocol name map: %s", sys.exc_info()[0]) finally: libc.endprotoent() return local_protocol_name_map def _protocol_name_map(self): if not self._iptables_protocol_name_map: tmp_map = constants.IPTABLES_PROTOCOL_NAME_MAP.copy() tmp_map.update(self._local_protocol_name_map()) self._iptables_protocol_name_map = tmp_map return self._iptables_protocol_name_map def _iptables_protocol_name(self, protocol): # protocol zero is a special case and requires no '-p' if protocol and protocol != '0': return self._protocol_name_map().get(protocol, protocol) def _protocol_arg(self, protocol, is_port): iptables_rule = [] rule_protocol = self._iptables_protocol_name(protocol) # protocol zero is a special case and requires no '-p' if rule_protocol: iptables_rule = ['-p', rule_protocol] if (is_port and rule_protocol in constants.IPTABLES_PROTOCOL_MAP): # iptables adds '-m protocol' when the port number is specified iptables_rule += [ '-m', constants.IPTABLES_PROTOCOL_MAP[rule_protocol] ] return iptables_rule def _port_arg(self, direction, protocol, port_range_min, port_range_max): args = [] if port_range_min is None: return args protocol = self._iptables_protocol_name(protocol) if protocol in ['icmp', 'ipv6-icmp']: protocol_type = 'icmpv6' if protocol == 'ipv6-icmp' else 'icmp' # Note(xuhanp): port_range_min/port_range_max represent # icmp type/code when protocol is icmp or icmpv6 args += ['--%s-type' % protocol_type, '%s' % port_range_min] # icmp code can be 0 so we cannot use "if port_range_max" here if port_range_max is not None: args[-1] += '/%s' % port_range_max elif protocol in const.SG_PORT_PROTO_NAMES: # iptables protocols that support --dport, --sport and -m multiport if port_range_min == port_range_max: if protocol in const.IPTABLES_MULTIPORT_ONLY_PROTOCOLS: # use -m multiport, but without a port range args += ['-m', 'multiport', '--%ss' % direction, '%s' % port_range_min] else: args += ['--%s' % direction, '%s' % port_range_min] else: args += ['-m', 'multiport', '--%ss' % direction, '%s:%s' % (port_range_min, port_range_max)] return args def _ip_prefix_arg(self, direction, ip_prefix): # NOTE (nati) : source_group_id is converted to list of source_ # ip_prefix in server side if ip_prefix: if '/' not in ip_prefix: # we need to convert it into a prefix to match iptables ip_prefix = c_utils.ip_to_cidr(ip_prefix) elif ip_prefix.endswith('/0'): # an allow for every address is not a constraint so # iptables drops it return [] return ['-%s' % direction, ip_prefix] return [] def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) def filter_defer_apply_on(self): if not self._defer_apply: self.iptables.defer_apply_on() self._pre_defer_filtered_ports = dict(self.filtered_ports) self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports) self.pre_sg_members = dict(self.sg_members) self.pre_sg_rules = dict(self.sg_rules) self._defer_apply = True def _remove_unused_security_group_info(self): """Remove any unnecessary local security group info or unused ipsets. This function has to be called after applying the last iptables rules, so we're in a point where no iptable rule depends on an ipset we're going to delete. """ filtered_ports = self.filtered_ports.values() remote_sgs_to_remove = self._determine_remote_sgs_to_remove( filtered_ports) for ip_version, remote_sg_ids in remote_sgs_to_remove.items(): if self.enable_ipset: self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids) self._remove_sg_members(remote_sgs_to_remove) # Remove unused security group rules for remove_group_id in self._determine_sg_rules_to_remove( filtered_ports): self.sg_rules.pop(remove_group_id, None) def _determine_remote_sgs_to_remove(self, filtered_ports): """Calculate which remote security groups we don't need anymore. We do the calculation for each ip_version. """ sgs_to_remove_per_ipversion = {constants.IPv4: set(), constants.IPv6: set()} remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion( filtered_ports) for ip_version, remote_group_id_set in remote_group_id_sets.items(): sgs_to_remove_per_ipversion[ip_version].update( set(self.pre_sg_members) - remote_group_id_set) return sgs_to_remove_per_ipversion def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports): """Given a port, calculates the remote sg references by ip_version.""" remote_group_id_sets = {constants.IPv4: set(), constants.IPv6: set()} for port in filtered_ports: remote_sg_ids = self._get_remote_sg_ids(port) for ip_version in (constants.IPv4, constants.IPv6): remote_group_id_sets[ip_version] |= remote_sg_ids[ip_version] return remote_group_id_sets def _determine_sg_rules_to_remove(self, filtered_ports): """Calculate which security groups need to be removed. We find out by subtracting our previous sg group ids, with the security groups associated to a set of ports. """ port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports) return set(self.pre_sg_rules) - port_group_ids def _get_sg_ids_set_for_ports(self, filtered_ports): """Get the port security group ids as a set.""" port_group_ids = set() for port in filtered_ports: port_group_ids.update(port.get('security_groups', [])) return port_group_ids def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids): """Remove system ipsets matching the provided parameters.""" for remote_sg_id in remote_sg_ids: self.ipset.destroy(remote_sg_id, ip_version) def _remove_sg_members(self, remote_sgs_to_remove): """Remove sg_member entries.""" ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4) ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6) for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set): if sg_id in self.sg_members: del self.sg_members[sg_id] def _find_deleted_sg_rules(self, sg_id): del_rules = list() for pre_rule in self.pre_sg_rules.get(sg_id, []): if pre_rule not in self.sg_rules.get(sg_id, []): del_rules.append(pre_rule) return del_rules def _find_devices_on_security_group(self, sg_id): device_list = list() for device in self.filtered_ports.values(): if sg_id in device.get('security_groups', []): device_list.append(device) return device_list def _clean_deleted_sg_rule_conntrack_entries(self): deleted_sg_ids = set() for sg_id in set(self.updated_rule_sg_ids): del_rules = self._find_deleted_sg_rules(sg_id) if not del_rules: continue device_list = self._find_devices_on_security_group(sg_id) for rule in del_rules: self.ipconntrack.delete_conntrack_state_by_rule( device_list, rule) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.updated_rule_sg_ids.remove(id) def _clean_updated_sg_member_conntrack_entries(self): updated_device_ids = set() for device in set(self.updated_sg_members): sec_group_change = False device_info = self.filtered_ports.get(device) pre_device_info = self._pre_defer_filtered_ports.get(device) if not device_info or not pre_device_info: continue for sg_id in pre_device_info.get('security_groups', []): if sg_id not in device_info.get('security_groups', []): sec_group_change = True break if not sec_group_change: continue for ethertype in [constants.IPv4, constants.IPv6]: self.ipconntrack.delete_conntrack_state_by_remote_ips( [device_info], ethertype, set()) updated_device_ids.add(device) for id in updated_device_ids: self.updated_sg_members.remove(id) def _clean_deleted_remote_sg_members_conntrack_entries(self): deleted_sg_ids = set() for sg_id, devices in self.devices_with_updated_sg_members.items(): for ethertype in [constants.IPv4, constants.IPv6]: pre_ips = self._get_sg_members( self.pre_sg_members, sg_id, ethertype) cur_ips = self._get_sg_members( self.sg_members, sg_id, ethertype) ips = (pre_ips - cur_ips) if devices and ips: self.ipconntrack.delete_conntrack_state_by_remote_ips( devices, ethertype, ips) deleted_sg_ids.add(sg_id) for id in deleted_sg_ids: self.devices_with_updated_sg_members.pop(id, None) def _remove_conntrack_entries_from_sg_updates(self): self._clean_deleted_sg_rule_conntrack_entries() self._clean_updated_sg_member_conntrack_entries() if not self.enable_ipset: self._clean_deleted_remote_sg_members_conntrack_entries() def _get_sg_members(self, sg_info, sg_id, ethertype): return set(sg_info.get(sg_id, {}).get(ethertype, [])) def filter_defer_apply_off(self): if self._defer_apply: self._defer_apply = False self._remove_chains_apply(self._pre_defer_filtered_ports, self._pre_defer_unfiltered_ports) self._setup_chains_apply(self.filtered_ports, self.unfiltered_ports) self.iptables.defer_apply_off() self._remove_conntrack_entries_from_sg_updates() self._remove_unused_security_group_info() self._pre_defer_filtered_ports = None self._pre_defer_unfiltered_ports = None class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): OVS_HYBRID_PLUG_REQUIRED = True CONNTRACK_ZONE_PER_PORT = True def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) def _get_br_device_name(self, port): return ('qvb' + port['device'])[:constants.LINUX_DEV_LEN] def _get_device_name(self, port): device_name = super( OVSHybridIptablesFirewallDriver, self)._get_device_name(port) return get_hybrid_port_name(device_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/iptables_manager.py0000644000175000017500000010146100000000000024744 0ustar00coreycorey00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # based on # https://github.com/openstack/nova/blob/master/nova/network/linux_net.py """Implements iptables rules using linux utilities.""" import collections import contextlib import difflib import os import re import sys from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.utils import runtime from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import utils as linux_utils from neutron.common import _constants as n_const from neutron.common import utils from neutron.conf.agent import common as config LOG = logging.getLogger(__name__) config.register_iptables_opts(cfg.CONF) # NOTE(vish): Iptables supports chain names of up to 28 characters, and we # add up to 12 characters to binary_name which is used as a prefix, # so we limit it to 16 characters. # (max_chain_name_length - len('-POSTROUTING') == 16) def get_binary_name(): """Grab the name of the binary we're running in.""" return os.path.basename(sys.argv[0])[:16].replace(' ', '_') binary_name = get_binary_name() # Number of iptables rules to print before and after a rule that causes a # a failure during iptables-restore IPTABLES_ERROR_LINES_OF_CONTEXT = 5 # RESOURCE_PROBLEM in include/xtables.h XTABLES_RESOURCE_PROBLEM_CODE = 4 # xlock wait interval, in microseconds XLOCK_WAIT_INTERVAL = 200000 def comment_rule(rule, comment): if not cfg.CONF.AGENT.comment_iptables_rules or not comment: return rule # iptables-save outputs the comment before the jump so we need to match # that order so _find_last_entry works comment = '-m comment --comment "%s"' % comment if rule.startswith('-j'): # this is a jump only rule so we just put the comment first return '%s %s' % (comment, rule) try: jpos = rule.index(' -j ') return ' '.join((rule[:jpos], comment, rule[jpos + 1:])) except ValueError: return '%s %s' % (rule, comment) def get_chain_name(chain_name, wrap=True): if wrap: return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP] else: return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_NOWRAP] class IptablesRule(object): """An iptables rule. You shouldn't need to use this class directly, it's only used by IptablesManager. """ def __init__(self, chain, rule, wrap=True, top=False, binary_name=binary_name, tag=None, comment=None): self.chain = get_chain_name(chain, wrap) self.rule = rule self.wrap = wrap self.top = top self.wrap_name = binary_name[:16] self.tag = tag self.comment = comment def __eq__(self, other): return ((self.chain == other.chain) and (self.rule == other.rule) and (self.top == other.top) and (self.wrap == other.wrap)) def __ne__(self, other): return not self == other def __str__(self): if self.wrap: chain = '%s-%s' % (self.wrap_name, self.chain) else: chain = self.chain rule = '-A %s %s' % (chain, self.rule) # If self.rule is '' the above will cause a trailing space, which # could cause us to not match on save/restore, so strip it now. return comment_rule(rule.strip(), self.comment) class IptablesTable(object): """An iptables table.""" def __init__(self, binary_name=binary_name): self.rules = [] self.remove_rules = [] self.chains = set() self.unwrapped_chains = set() self.remove_chains = set() self.wrap_name = binary_name[:16] def add_chain(self, name, wrap=True): """Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is -, so if neutron-openvswitch-agent creates a chain named 'OUTPUT', it'll actually end up being named 'neutron-openvswi-OUTPUT'. """ name = get_chain_name(name, wrap) if wrap: self.chains.add(name) else: self.unwrapped_chains.add(name) def _select_chain_set(self, wrap): if wrap: return self.chains else: return self.unwrapped_chains def remove_chain(self, name, wrap=True): """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. """ name = get_chain_name(name, wrap) chain_set = self._select_chain_set(wrap) if name not in chain_set: LOG.debug('Attempted to remove chain %s which does not exist', name) return chain_set.remove(name) if not wrap: # non-wrapped chains and rules need to be dealt with specially, # so we keep a list of them to be iterated over in apply() self.remove_chains.add(name) # Add rules to remove that have a matching chain name or # a matching jump chain jump_snippet = '-j %s' % name self.remove_rules += [str(r) for r in self.rules if r.chain == name or jump_snippet in r.rule] else: jump_snippet = '-j %s-%s' % (self.wrap_name, name) # Remove rules from list that have a matching chain name or # a matching jump chain self.rules = [r for r in self.rules if r.chain != name and jump_snippet not in r.rule] def add_rule(self, chain, rule, wrap=True, top=False, tag=None, comment=None): """Add a rule to the table. This is just like what you'd feed to iptables, just without the '-A ' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly. """ chain = get_chain_name(chain, wrap) if wrap and chain not in self.chains: raise LookupError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join( self._wrap_target_chain(e, wrap) for e in rule.split(' ')) self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name, tag, comment)) def _wrap_target_chain(self, s, wrap): if s.startswith('$'): s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap))) return s def remove_rule(self, chain, rule, wrap=True, top=False, comment=None): """Remove a rule from a chain. Note: The rule must be exactly identical to the one that was added. You cannot switch arguments around like you can with the iptables CLI tool. """ chain = get_chain_name(chain, wrap) try: if '$' in rule: rule = ' '.join( self._wrap_target_chain(e, wrap) for e in rule.split(' ')) self.rules.remove(IptablesRule(chain, rule, wrap, top, self.wrap_name, comment=comment)) if not wrap: self.remove_rules.append(str(IptablesRule(chain, rule, wrap, top, self.wrap_name, comment=comment))) except ValueError: LOG.warning('Tried to remove rule that was not there:' ' %(chain)r %(rule)r %(wrap)r %(top)r', {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) def _get_chain_rules(self, chain, wrap): chain = get_chain_name(chain, wrap) return [rule for rule in self.rules if rule.chain == chain and rule.wrap == wrap] def empty_chain(self, chain, wrap=True): """Remove all rules from a chain.""" chained_rules = self._get_chain_rules(chain, wrap) for rule in chained_rules: self.rules.remove(rule) def clear_rules_by_tag(self, tag): if not tag: return rules = [rule for rule in self.rules if rule.tag == tag] for rule in rules: self.rules.remove(rule) class IptablesManager(object): """Wrapper for iptables. See IptablesTable for some usage docs A number of chains are set up to begin with. First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its name is not wrapped, so it's shared between the various neutron workers. It's intended for rules that need to live at the top of the FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables. For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains are wrapped, meaning that the "real" INPUT chain has a rule that jumps to the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named "local" which is jumped to from neutron-filter-top. For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are wrapped in the same was as the built-in filter chains. Additionally, there's a snat chain that is applied after the POSTROUTING chain. """ # Flag to denote we've already tried and used -w successfully, so don't # run iptables-restore without it. use_table_lock = False # Flag to denote iptables supports --random-fully argument _random_fully = None def __init__(self, _execute=None, state_less=False, use_ipv6=False, nat=True, namespace=None, binary_name=binary_name): if _execute: self.execute = _execute else: self.execute = linux_utils.execute self.use_ipv6 = use_ipv6 self.namespace = namespace self.iptables_apply_deferred = False self.wrap_name = binary_name[:16] self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)} self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)} # Add a neutron-filter-top chain. It's intended to be shared # among the various neutron components. It sits at the very top # of FORWARD and OUTPUT. for tables in [self.ipv4, self.ipv6]: tables['filter'].add_chain('neutron-filter-top', wrap=False) tables['filter'].add_rule('FORWARD', '-j neutron-filter-top', wrap=False, top=True) tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top', wrap=False, top=True) tables['filter'].add_chain('local') tables['filter'].add_rule('neutron-filter-top', '-j $local', wrap=False) self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)}) self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)}) # Wrap the built-in chains builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}, 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}} builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']}) builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']}) self._configure_builtin_chains(builtin_chains) if not state_less: self.initialize_mangle_table() if nat: self.initialize_nat_table() def initialize_mangle_table(self): self.ipv4.update( {'mangle': IptablesTable(binary_name=self.wrap_name)}) self.ipv6.update( {'mangle': IptablesTable(binary_name=self.wrap_name)}) builtin_chains = { 4: {'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT', 'POSTROUTING']}, 6: {'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT', 'POSTROUTING']}} self._configure_builtin_chains(builtin_chains) # Add a mark chain to mangle PREROUTING chain. It is used to # identify ingress packets from a certain interface. self.ipv4['mangle'].add_chain('mark') self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark') def initialize_nat_table(self): self.ipv4.update( {'nat': IptablesTable(binary_name=self.wrap_name)}) builtin_chains = { 4: {'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']}} self._configure_builtin_chains(builtin_chains) # Add a neutron-postrouting-bottom chain. It's intended to be # shared among the various neutron components. We set it as the # last chain of POSTROUTING chain. self.ipv4['nat'].add_chain('neutron-postrouting-bottom', wrap=False) self.ipv4['nat'].add_rule( 'POSTROUTING', '-j neutron-postrouting-bottom', wrap=False) # We add a snat chain to the shared neutron-postrouting-bottom # chain so that it's applied last. self.ipv4['nat'].add_chain('snat') self.ipv4['nat'].add_rule('neutron-postrouting-bottom', '-j $snat', wrap=False, comment=ic.SNAT_OUT) # And then we add a float-snat chain and jump to first thing in # the snat chain. self.ipv4['nat'].add_chain('float-snat') self.ipv4['nat'].add_rule('snat', '-j $float-snat') def _configure_builtin_chains(self, builtin_chains): for ip_version in builtin_chains: if ip_version == 4: tables = self.ipv4 elif ip_version == 6: tables = self.ipv6 for table, chains in builtin_chains[ip_version].items(): for chain in chains: tables[table].add_chain(chain) tables[table].add_rule(chain, '-j $%s' % (chain), wrap=False) def get_tables(self, ip_version): return {4: self.ipv4, 6: self.ipv6}[ip_version] def get_chain(self, table, chain, ip_version=4, wrap=True): try: requested_table = self.get_tables(ip_version)[table] except KeyError: return [] return requested_table._get_chain_rules(chain, wrap) def is_chain_empty(self, table, chain, ip_version=4, wrap=True): return not self.get_chain(table, chain, ip_version, wrap) @contextlib.contextmanager def defer_apply(self): """Defer apply context.""" self.defer_apply_on() try: yield finally: try: self.defer_apply_off() except l3_exc.IpTablesApplyException: # already in the format we want, just reraise raise except Exception: msg = _('Failure applying iptables rules') LOG.exception(msg) raise l3_exc.IpTablesApplyException(msg) def defer_apply_on(self): self.iptables_apply_deferred = True def defer_apply_off(self): self.iptables_apply_deferred = False self._apply() def apply(self): if self.iptables_apply_deferred: return return self._apply() def _apply(self): lock_name = 'iptables' if self.namespace: lock_name += '-' + self.namespace # NOTE(ihrachys) we may get rid of the lock once all supported # platforms get iptables with 999eaa241212d3952ddff39a99d0d55a74e3639e # ("iptables-restore: support acquiring the lock.") with lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX, True): first = self._apply_synchronized() if not cfg.CONF.AGENT.debug_iptables_rules: return first LOG.debug('List of IPTables Rules applied: %s', '\n'.join(first)) second = self._apply_synchronized() if second: msg = (_("IPTables Rules did not converge. Diff: %s") % '\n'.join(second)) LOG.error(msg) raise l3_exc.IpTablesApplyException(msg) return first def get_rules_for_table(self, table): """Runs iptables-save on a table and returns the results.""" args = ['iptables-save', '-t', table] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args return self.execute(args, run_as_root=True).split('\n') def _get_version(self): # Output example is "iptables v1.6.2" args = ['iptables', '--version'] version = str(self.execute(args, run_as_root=True).split()[1][1:]) LOG.debug("IPTables version installed: %s", version) return version @property def random_fully(self): if self._random_fully is not None: return self._random_fully version = self._get_version() self.__class__._random_fully = utils.is_version_greater_equal( version, n_const.IPTABLES_RANDOM_FULLY_VERSION) return self._random_fully @property def xlock_wait_time(self): # give agent some time to report back to server return str(max(int(cfg.CONF.AGENT.report_interval / 3.0), 1)) def _do_run_restore(self, args, commands, lock=False): args = args[:] if lock: args += ['-w', self.xlock_wait_time, '-W', XLOCK_WAIT_INTERVAL] try: kwargs = {} if lock else {'log_fail_as_error': False} self.execute(args, process_input='\n'.join(commands), run_as_root=True, **kwargs) except RuntimeError as error: return error def _run_restore(self, args, commands): # If we've already tried and used -w successfully, don't # run iptables-restore without it. if self.use_table_lock: return self._do_run_restore(args, commands, lock=True) err = self._do_run_restore(args, commands) if (isinstance(err, exceptions.ProcessExecutionError) and err.returncode == XTABLES_RESOURCE_PROBLEM_CODE): # maybe we run on a platform that includes iptables commit # 999eaa241212d3952ddff39a99d0d55a74e3639e (for example, latest # RHEL) and failed because of xlock acquired by another # iptables process running in parallel. Try to use -w to # acquire xlock. err = self._do_run_restore(args, commands, lock=True) if not err: self.__class__.use_table_lock = True return err def _log_restore_err(self, err, commands): try: line_no = int(re.search( 'iptables-restore: line ([0-9]+?) failed', str(err)).group(1)) context = IPTABLES_ERROR_LINES_OF_CONTEXT log_start = max(0, line_no - context) log_end = line_no + context except AttributeError: # line error wasn't found, print all lines instead log_start = 0 log_end = len(commands) log_lines = ('%7d. %s' % (idx, l) for idx, l in enumerate( commands[log_start:log_end], log_start + 1) ) LOG.error("IPTablesManager.apply failed to apply the " "following set of iptables rules:\n%s", '\n'.join(log_lines)) def _apply_synchronized(self): """Apply the current in-memory set of iptables rules. This will create a diff between the rules from the previous runs and replace them with the current set of rules. This happens atomically, thanks to iptables-restore. Returns a list of the changes that were sent to iptables-save. """ s = [('iptables', self.ipv4)] if self.use_ipv6: s += [('ip6tables', self.ipv6)] all_commands = [] # variable to keep track all commands for return val for cmd, tables in s: args = ['%s-save' % (cmd,)] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args try: save_output = self.execute(args, run_as_root=True) except RuntimeError: # We could be racing with a cron job deleting namespaces. # It is useless to try to apply iptables rules over and # over again in a endless loop if the namespace does not # exist. with excutils.save_and_reraise_exception() as ctx: if (self.namespace and not ip_lib.network_namespace_exists(self.namespace)): ctx.reraise = False LOG.error("Namespace %s was deleted during IPTables " "operations.", self.namespace) return [] all_lines = save_output.split('\n') commands = [] # Traverse tables in sorted order for predictable dump output for table_name in sorted(tables): table = tables[table_name] # isolate the lines of the table we are modifying start, end = self._find_table(all_lines, table_name) old_rules = all_lines[start:end] # generate the new table state we want new_rules = self._modify_rules(old_rules, table, table_name) # generate the iptables commands to get between the old state # and the new state changes = _generate_path_between_rules(old_rules, new_rules) if changes: # if there are changes to the table, we put on the header # and footer that iptables-save needs commands += (['# Generated by iptables_manager'] + ['*%s' % table_name] + changes + ['COMMIT', '# Completed by iptables_manager']) if not commands: continue all_commands += commands # always end with a new line commands.append('') args = ['%s-restore' % (cmd,), '-n'] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args err = self._run_restore(args, commands) if err: self._log_restore_err(err, commands) raise err LOG.debug("IPTablesManager.apply completed with success. %d iptables " "commands were issued", len(all_commands)) return all_commands def _find_table(self, lines, table_name): if len(lines) < 3: # length only <2 when fake iptables return (0, 0) try: start = lines.index('*%s' % table_name) except ValueError: # Couldn't find table_name LOG.debug('Unable to find table %s', table_name) return (0, 0) end = lines[start:].index('COMMIT') + start + 1 return (start, end) def _find_rules_index(self, lines): seen_chains = False rules_index = 0 for rules_index, rule in enumerate(lines): if not seen_chains: if rule.startswith(':'): seen_chains = True else: if not rule.startswith(':'): break if not seen_chains: rules_index = 2 return rules_index def _modify_rules(self, current_lines, table, table_name): # Chains are stored as sets to avoid duplicates. # Sort the output chains here to make their order predictable. unwrapped_chains = sorted(table.unwrapped_chains) chains = sorted(table.chains) rules = set(map(str, table.rules)) # we don't want to change any rules that don't belong to us so we start # the new_filter with these rules # there are some rules that belong to us but they don't have the wrap # name. we want to add them in the right location in case our new rules # changed the order # (e.g. '-A FORWARD -j neutron-filter-top') new_filter = [line.strip() for line in current_lines if self.wrap_name not in line and line.strip() not in rules] # generate our list of chain names our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains] # the unwrapped chains (e.g. neutron-filter-top) may already exist in # the new_filter since they aren't marked by the wrap_name so we only # want to add them if they aren't already there our_chains += [':%s' % name for name in unwrapped_chains if not any(':%s' % name in s for s in new_filter)] our_top_rules = [] our_bottom_rules = [] for rule in table.rules: rule_str = str(rule) if rule.top: # rule.top == True means we want this rule to be at the top. our_top_rules += [rule_str] else: our_bottom_rules += [rule_str] our_chains_and_rules = our_chains + our_top_rules + our_bottom_rules # locate the position immediately after the existing chains to insert # our chains and rules rules_index = self._find_rules_index(new_filter) new_filter[rules_index:rules_index] = our_chains_and_rules def _weed_out_removes(line): # remove any rules or chains from the filter that were slated # for removal if line.startswith(':'): chain = line[1:] if chain in table.remove_chains: table.remove_chains.remove(chain) return False else: if line in table.remove_rules: table.remove_rules.remove(line) return False # Leave it alone return True seen_lines = set() # TODO(kevinbenton): remove this function and the next one. They are # just oversized brooms to sweep bugs under the rug!!! We generate the # rules and we shouldn't be generating duplicates. def _weed_out_duplicates(line): if line in seen_lines: thing = 'chain' if line.startswith(':') else 'rule' LOG.warning("Duplicate iptables %(thing)s detected. This " "may indicate a bug in the iptables " "%(thing)s generation code. Line: %(line)s", {'thing': thing, 'line': line}) return False seen_lines.add(line) # Leave it alone return True new_filter.reverse() new_filter = [line for line in new_filter if _weed_out_duplicates(line) and _weed_out_removes(line)] new_filter.reverse() # flush lists, just in case a rule or chain marked for removal # was already gone. (chains is a set, rules is a list) table.remove_chains.clear() table.remove_rules = [] return new_filter def _get_traffic_counters_cmd_tables(self, chain, wrap=True): name = get_chain_name(chain, wrap) cmd_tables = [('iptables', key) for key, table in self.ipv4.items() if name in table._select_chain_set(wrap)] if self.use_ipv6: cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items() if name in table._select_chain_set(wrap)] return cmd_tables def get_traffic_counters(self, chain, wrap=True, zero=False): """Return the sum of the traffic counters of all rules of a chain.""" cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) if not cmd_tables: LOG.warning('Attempted to get traffic counters of chain %s ' 'which does not exist', chain) return name = get_chain_name(chain, wrap) acc = {'pkts': 0, 'bytes': 0} for cmd, table in cmd_tables: args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x', '-w', self.xlock_wait_time] if zero: args.append('-Z') if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args current_table = self.execute(args, run_as_root=True) current_lines = current_table.split('\n') for line in current_lines[2:]: if not line: break data = line.split() if (len(data) < 2 or not data[0].isdigit() or not data[1].isdigit()): break acc['pkts'] += int(data[0]) acc['bytes'] += int(data[1]) return acc def _generate_path_between_rules(old_rules, new_rules): """Generates iptables commands to get from old_rules to new_rules. This function diffs the two rule sets and then calculates the iptables commands necessary to get from the old rules to the new rules using insert and delete commands. """ old_by_chain = _get_rules_by_chain(old_rules) new_by_chain = _get_rules_by_chain(new_rules) old_chains, new_chains = set(old_by_chain.keys()), set(new_by_chain.keys()) # all referenced chains should be declared at the top before rules. # NOTE(kevinbenton): sorting and grouping chains is for determinism in # tests. iptables doesn't care about the order here statements = [':%s - [0:0]' % c for c in sorted(new_chains - old_chains)] sg_chains = [] other_chains = [] for chain in sorted(old_chains | new_chains): if '-sg-' in chain: sg_chains.append(chain) else: other_chains.append(chain) for chain in other_chains + sg_chains: statements += _generate_chain_diff_iptables_commands( chain, old_by_chain[chain], new_by_chain[chain]) # unreferenced chains get the axe for chain in sorted(old_chains - new_chains): statements += ['-X %s' % chain] return statements def _get_rules_by_chain(rules): by_chain = collections.defaultdict(list) for line in rules: if line.startswith(':'): chain = line[1:].split(' ', 1)[0] # even though this is a default dict, we need to manually add # chains to ensure that ones without rules are included because # they might be a jump reference if chain not in by_chain: by_chain[chain] = [] elif line.startswith('-A'): chain = line[3:].split(' ', 1)[0] by_chain[chain].append(line) return by_chain def _generate_chain_diff_iptables_commands(chain, old_chain_rules, new_chain_rules): # keep track of the old index because we have to insert rules # in the right position old_index = 1 statements = [] for line in difflib.ndiff(old_chain_rules, new_chain_rules): if line.startswith('?'): # skip ? because that's a guide string for intraline differences continue elif line.startswith('-'): # line deleted statements.append('-D %s %d' % (chain, old_index)) # since we are removing a line from the old rules, we # backup the index by 1 old_index -= 1 elif line.startswith('+'): # line added # strip the chain name since we have to add it before the index rule = line[5:].split(' ', 1)[-1] # IptablesRule does not add trailing spaces for rules, so we # have to detect that here by making sure this chain isn't # referencing itself if rule == chain: rule = '' # rule inserted at this position statements.append('-I %s %d %s' % (chain, old_index, rule)) old_index += 1 return statements ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/keepalived.py0000644000175000017500000005026000000000000023560 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import itertools import os import netaddr from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.utils import file as file_utils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.common import utils VALID_STATES = ['MASTER', 'BACKUP'] VALID_AUTH_TYPES = ['AH', 'PASS'] HA_DEFAULT_PRIORITY = 50 PRIMARY_VIP_RANGE_SIZE = 24 KEEPALIVED_SERVICE_NAME = 'keepalived' KEEPALIVED_EMAIL_FROM = 'neutron@openstack.local' KEEPALIVED_ROUTER_ID = 'neutron' GARP_MASTER_DELAY = 60 HEALTH_CHECK_NAME = 'ha_health_check' LOG = logging.getLogger(__name__) def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE): """Get a free IP range, from parent_range, of the specified size. :param parent_range: String representing an IP range. E.g: '169.254.0.0/16' :param excluded_ranges: A list of strings to be excluded from parent_range :param size: What should be the size of the range returned? :return: A string representing an IP range """ free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges) for cidr in free_cidrs.iter_cidrs(): if cidr.prefixlen <= size: return '%s/%s' % (cidr.network, size) raise ValueError(_('Network of size %(size)s, from IP range ' '%(parent_range)s excluding IP ranges ' '%(excluded_ranges)s was not found.') % {'size': size, 'parent_range': parent_range, 'excluded_ranges': excluded_ranges}) class InvalidInstanceStateException(exceptions.NeutronException): message = _('Invalid instance state: %(state)s, valid states are: ' '%(valid_states)s') def __init__(self, **kwargs): if 'valid_states' not in kwargs: kwargs['valid_states'] = ', '.join(VALID_STATES) super(InvalidInstanceStateException, self).__init__(**kwargs) class InvalidAuthenticationTypeException(exceptions.NeutronException): message = _('Invalid authentication type: %(auth_type)s, ' 'valid types are: %(valid_auth_types)s') def __init__(self, **kwargs): if 'valid_auth_types' not in kwargs: kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES) super(InvalidAuthenticationTypeException, self).__init__(**kwargs) class KeepalivedVipAddress(object): """A virtual address entry of a keepalived configuration.""" def __init__(self, ip_address, interface_name, scope=None): self.ip_address = ip_address self.interface_name = interface_name self.scope = scope def __eq__(self, other): return (isinstance(other, KeepalivedVipAddress) and self.ip_address == other.ip_address) def __str__(self): return '[%s, %s, %s]' % (self.ip_address, self.interface_name, self.scope) def build_config(self): result = '%s dev %s' % (self.ip_address, self.interface_name) if self.scope: result += ' scope %s' % self.scope return result class KeepalivedVirtualRoute(object): """A virtual route entry of a keepalived configuration.""" def __init__(self, destination, nexthop, interface_name=None, scope=None): self.destination = destination self.nexthop = nexthop self.interface_name = interface_name self.scope = scope def build_config(self): output = self.destination if self.nexthop: output += ' via %s' % self.nexthop if self.interface_name: output += ' dev %s' % self.interface_name if self.scope: output += ' scope %s' % self.scope return output class KeepalivedInstanceRoutes(object): def __init__(self): self.gateway_routes = [] self.extra_routes = [] self.extra_subnets = [] def remove_routes_on_interface(self, interface_name): self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes if gw_rt.interface_name != interface_name] # NOTE(amuller): extra_routes are initialized from the router's # 'routes' attribute. These routes do not have an interface # parameter and so cannot be removed via an interface_name lookup. self.extra_subnets = [route for route in self.extra_subnets if route.interface_name != interface_name] @property def routes(self): return self.gateway_routes + self.extra_routes + self.extra_subnets def __len__(self): return len(self.routes) def build_config(self): return itertools.chain([' virtual_routes {'], (' %s' % route.build_config() for route in self.routes), [' }']) class KeepalivedInstance(object): """Instance section of a keepalived configuration.""" def __init__(self, state, interface, vrouter_id, ha_cidrs, priority=HA_DEFAULT_PRIORITY, advert_int=None, mcast_src_ip=None, nopreempt=False, garp_master_delay=GARP_MASTER_DELAY, vrrp_health_check_interval=0, ha_conf_dir=None): self.name = 'VR_%s' % vrouter_id if state not in VALID_STATES: raise InvalidInstanceStateException(state=state) self.state = state self.interface = interface self.vrouter_id = vrouter_id self.priority = priority self.nopreempt = nopreempt self.advert_int = advert_int self.mcast_src_ip = mcast_src_ip self.garp_master_delay = garp_master_delay self.track_interfaces = [] self.vips = [] self.virtual_routes = KeepalivedInstanceRoutes() self.authentication = None self.track_script = None self.primary_vip_range = get_free_range( parent_range=constants.PRIVATE_CIDR_RANGE, excluded_ranges=[constants.METADATA_CIDR, constants.DVR_FIP_LL_CIDR] + ha_cidrs, size=PRIMARY_VIP_RANGE_SIZE) if vrrp_health_check_interval > 0: self.track_script = KeepalivedTrackScript( vrrp_health_check_interval, ha_conf_dir, self.vrouter_id) def set_authentication(self, auth_type, password): if auth_type not in VALID_AUTH_TYPES: raise InvalidAuthenticationTypeException(auth_type=auth_type) self.authentication = (auth_type, password) def add_vip(self, ip_cidr, interface_name, scope): vip = KeepalivedVipAddress(ip_cidr, interface_name, scope) if vip not in self.vips: self.vips.append(vip) else: LOG.debug('VIP %s already present in %s', vip, self.vips) def remove_vips_vroutes_by_interface(self, interface_name): self.vips = [vip for vip in self.vips if vip.interface_name != interface_name] self.virtual_routes.remove_routes_on_interface(interface_name) def remove_vip_by_ip_address(self, ip_address): self.vips = [vip for vip in self.vips if vip.ip_address != ip_address] def get_existing_vip_ip_addresses(self, interface_name): return [vip.ip_address for vip in self.vips if vip.interface_name == interface_name] def _build_track_interface_config(self): return itertools.chain( [' track_interface {'], (' %s' % i for i in self.track_interfaces), [' }']) def get_primary_vip(self): """Return an address in the primary_vip_range CIDR, with the router's VRID in the host section. For example, if primary_vip_range is 169.254.0.0/24, and this router's VRID is 5, the result is 169.254.0.5. Using the VRID assures that the primary VIP is consistent amongst HA router instances on different nodes. """ ip = (netaddr.IPNetwork(self.primary_vip_range).network + self.vrouter_id) return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE))) def _build_vips_config(self): # NOTE(amuller): The primary VIP must be consistent in order to avoid # keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and # SIGHUP'ing keepalived can remove virtual routers, including the # router's default gateway. # We solve this by never changing the VIP in the virtual_ipaddress # section, herein known as the primary VIP. # The only interface known to exist for HA routers is the HA interface # (self.interface). We generate an IP on that device and use it as the # primary VIP. The other VIPs (Internal interfaces IPs, the external # interface IP and floating IPs) are placed in the # virtual_ipaddress_excluded section. primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface) vips_result = [' virtual_ipaddress {', ' %s' % primary.build_config(), ' }'] if self.vips: vips_result.extend( itertools.chain([' virtual_ipaddress_excluded {'], (' %s' % vip.build_config() for vip in sorted(self.vips, key=lambda vip: vip.ip_address)), [' }'])) return vips_result def _build_virtual_routes_config(self): return itertools.chain([' virtual_routes {'], (' %s' % route.build_config() for route in self.virtual_routes), [' }']) def build_config(self): if self.track_script: config = self.track_script.build_config_preamble() self.track_script.routes = self.virtual_routes.gateway_routes self.track_script.vips = self.vips else: config = [] config.extend(['vrrp_instance %s {' % self.name, ' state %s' % self.state, ' interface %s' % self.interface, ' virtual_router_id %s' % self.vrouter_id, ' priority %s' % self.priority, ' garp_master_delay %s' % self.garp_master_delay]) if self.nopreempt: config.append(' nopreempt') if self.advert_int: config.append(' advert_int %s' % self.advert_int) if self.authentication: auth_type, password = self.authentication authentication = [' authentication {', ' auth_type %s' % auth_type, ' auth_pass %s' % password, ' }'] config.extend(authentication) if self.mcast_src_ip: config.append(' mcast_src_ip %s' % self.mcast_src_ip) if self.track_interfaces: config.extend(self._build_track_interface_config()) config.extend(self._build_vips_config()) if len(self.virtual_routes): config.extend(self.virtual_routes.build_config()) if self.track_script: config.extend(self.track_script.build_config()) config.append('}') return config class KeepalivedConf(object): """A keepalived configuration.""" def __init__(self): self.reset() def reset(self): self.instances = {} def add_instance(self, instance): self.instances[instance.vrouter_id] = instance def get_instance(self, vrouter_id): return self.instances.get(vrouter_id) def build_config(self): config = ['global_defs {', ' notification_email_from %s' % KEEPALIVED_EMAIL_FROM, ' router_id %s' % KEEPALIVED_ROUTER_ID, '}' ] for instance in self.instances.values(): config.extend(instance.build_config()) return config def get_config_str(self): """Generates and returns the keepalived configuration. :return: Keepalived configuration string. """ return '\n'.join(self.build_config()) class KeepalivedManager(object): """Wrapper for keepalived. This wrapper permits to write keepalived config files, to start/restart keepalived process. """ def __init__(self, resource_id, config, process_monitor, conf_path, namespace=None, throttle_restart_value=None): self.resource_id = resource_id self.config = config self.namespace = namespace self.process_monitor = process_monitor self.conf_path = conf_path # configure throttler for spawn to introduce delay between SIGHUPs, # otherwise keepalived master may unnecessarily flip to slave if throttle_restart_value is not None: self._throttle_spawn(throttle_restart_value) # pylint: disable=method-hidden def _throttle_spawn(self, threshold): self.spawn = utils.throttler(threshold)(self.spawn) def get_conf_dir(self): confs_dir = os.path.abspath(os.path.normpath(self.conf_path)) conf_dir = os.path.join(confs_dir, self.resource_id) return conf_dir def get_full_config_file_path(self, filename, ensure_conf_dir=True): conf_dir = self.get_conf_dir() if ensure_conf_dir: fileutils.ensure_tree(conf_dir, mode=0o755) return os.path.join(conf_dir, filename) def _output_config_file(self): config_str = self.config.get_config_str() LOG.debug("Router %s keepalived config: %s", self.resource_id, config_str) config_path = self.get_full_config_file_path('keepalived.conf') file_utils.replace_file(config_path, config_str) return config_path @staticmethod def _safe_remove_pid_file(pid_file): try: os.remove(pid_file) except OSError as e: if e.errno != errno.ENOENT: LOG.error("Could not delete file %s, keepalived can " "refuse to start.", pid_file) def get_vrrp_pid_file_name(self, base_pid_file): return '%s-vrrp' % base_pid_file def get_conf_on_disk(self): config_path = self.get_full_config_file_path('keepalived.conf') try: with open(config_path) as conf: return conf.read() except (OSError, IOError) as e: if e.errno != errno.ENOENT: raise def spawn(self): config_path = self._output_config_file() for key, instance in self.config.instances.items(): if instance.track_script: instance.track_script.write_check_script() keepalived_pm = self.get_process() vrrp_pm = self._get_vrrp_process( self.get_vrrp_pid_file_name(keepalived_pm.get_pid_file_name())) keepalived_pm.default_cmd_callback = ( self._get_keepalived_process_callback(vrrp_pm, config_path)) keepalived_pm.enable(reload_cfg=True) self.process_monitor.register(uuid=self.resource_id, service_name=KEEPALIVED_SERVICE_NAME, monitored_process=keepalived_pm) LOG.debug('Keepalived spawned with config %s', config_path) def disable(self): self.process_monitor.unregister(uuid=self.resource_id, service_name=KEEPALIVED_SERVICE_NAME) pm = self.get_process() pm.disable(sig='15') def check_processes(self): keepalived_pm = self.get_process() vrrp_pm = self._get_vrrp_process( self.get_vrrp_pid_file_name(keepalived_pm.get_pid_file_name())) return keepalived_pm.active and vrrp_pm.active def get_process(self): return external_process.ProcessManager( cfg.CONF, self.resource_id, self.namespace, service=KEEPALIVED_SERVICE_NAME, pids_path=self.conf_path) def _get_vrrp_process(self, pid_file): return external_process.ProcessManager( cfg.CONF, self.resource_id, self.namespace, pid_file=pid_file) def _get_keepalived_process_callback(self, vrrp_pm, config_path): def callback(pid_file): # If keepalived process crashed unexpectedly, the vrrp process # will be orphan and prevent keepalived process to be spawned. # A check here will let the l3-agent to kill the orphan process # and spawn keepalived successfully. if vrrp_pm.active: vrrp_pm.disable() self._safe_remove_pid_file(pid_file) self._safe_remove_pid_file(self.get_vrrp_pid_file_name(pid_file)) cmd = ['keepalived', '-P', '-f', config_path, '-p', pid_file, '-r', self.get_vrrp_pid_file_name(pid_file)] if logging.is_debug_enabled(cfg.CONF): cmd.append('-D') return cmd return callback class KeepalivedTrackScript(KeepalivedConf): """Track script generator for Keepalived""" def __init__(self, interval, conf_dir, vr_id): self.interval = interval self.conf_dir = conf_dir self.vr_id = vr_id self.routes = [] self.vips = [] def build_config_preamble(self): config = ['', 'vrrp_script %s_%s {' % (HEALTH_CHECK_NAME, self.vr_id), ' script "%s"' % self._get_script_location(), ' interval %s' % self.interval, ' fall 2', ' rise 2', '}', ''] return config def _is_needed(self): """Check if track script is needed by checking amount of routes. :return: True/False """ return len(self.routes) > 0 def build_config(self): if not self._is_needed(): return '' config = [' track_script {', ' %s_%s' % (HEALTH_CHECK_NAME, self.vr_id), ' }'] return config def build_script(self): return itertools.chain(['#!/bin/bash -eu'], ['%s' % self._check_ip_assigned()], ('%s' % self._add_ip_addr(route.nexthop) for route in self.routes if route.nexthop), ) def _add_ip_addr(self, ip_addr): cmd = { 4: 'ping', 6: 'ping6', }.get(netaddr.IPAddress(ip_addr).version) return '%s -c 1 -w 1 %s 1>/dev/null || exit 1' % (cmd, ip_addr) def _check_ip_assigned(self): cmd = 'ip a | grep %s || exit 0' return cmd % netaddr.IPNetwork(self.vips[0].ip_address).ip if len( self.vips) else '' def _get_script_str(self): """Generates and returns bash script to verify connectivity. :return: Bash script code """ return '\n'.join(self.build_script()) def _get_script_location(self): return os.path.join(self.conf_dir, 'ha_check_script_%s.sh' % self.vr_id) def write_check_script(self): if not self._is_needed(): return file_utils.replace_file( self._get_script_location(), self._get_script_str(), 0o520) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/l3_tc_lib.py0000644000175000017500000001640300000000000023302 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.agent.linux import tc_lib LOG = logging.getLogger(__name__) # NOTE(slaweq): in iproute 4.15 chain value was added to filter output and this # needs to be included in REGEX FILTER_ID_REGEX = re.compile( r"filter protocol ip u32 (fh|chain \d+ fh) (\w+::\w+) *") FILTER_STATS_REGEX = re.compile(r"Sent (\w+) bytes (\w+) pkts *") class FloatingIPTcCommandBase(ip_lib.IPDevice): def _execute_tc_cmd(self, cmd, **kwargs): cmd = ['tc'] + cmd ip_wrapper = ip_lib.IPWrapper(self.namespace) return ip_wrapper.netns.execute(cmd, run_as_root=True, **kwargs) def _get_qdisc_id_for_filter(self, direction): qdiscs = tc_lib.list_tc_qdiscs(self.name, namespace=self.namespace) qdisc_type = (tc_lib.TC_QDISC_TYPE_HTB if direction == constants.EGRESS_DIRECTION else tc_lib.TC_QDISC_TYPE_INGRESS) for qdisc in (qd for qd in qdiscs if qd['qdisc_type'] == qdisc_type): return qdisc['handle'] def _add_qdisc(self, direction): if direction == constants.EGRESS_DIRECTION: tc_lib.add_tc_qdisc( self.name, 'htb', parent='root', namespace=self.namespace) else: tc_lib.add_tc_qdisc( self.name, 'ingress', namespace=self.namespace) def _get_filters(self, qdisc_id): cmd = ['-p', '-s', '-d', 'filter', 'show', 'dev', self.name, 'parent', qdisc_id, 'prio', 1] return self._execute_tc_cmd(cmd) def _get_filterid_for_ip(self, qdisc_id, ip): filterids_for_ip = [] filters_output = self._get_filters(qdisc_id) if not filters_output: raise exceptions.FilterIDForIPNotFound(ip=ip) filter_lines = filters_output.split('\n') for line in filter_lines: line = line.strip() m = FILTER_ID_REGEX.match(line) if m: filter_id = m.group(2) # It matched, so ip/32 is not here. continue continue elif not line.startswith('match'): continue parts = line.split(" ") if ip + '/32' in parts: filterids_for_ip.append(filter_id) if len(filterids_for_ip) > 1: raise exceptions.MultipleFilterIDForIPFound(ip=ip) elif len(filterids_for_ip) == 0: raise exceptions.FilterIDForIPNotFound(ip=ip) return filterids_for_ip[0] def _del_filter_by_id(self, qdisc_id, filter_id): cmd = ['filter', 'del', 'dev', self.name, 'parent', qdisc_id, 'prio', 1, 'handle', filter_id, 'u32'] self._execute_tc_cmd(cmd) def _get_qdisc_filters(self, qdisc_id): filterids = [] filters_output = self._get_filters(qdisc_id) if not filters_output: return filterids filter_lines = filters_output.split('\n') for line in filter_lines: line = line.strip() m = FILTER_ID_REGEX.match(line) if m: filter_id = m.group(2) filterids.append(filter_id) return filterids def _add_filter(self, qdisc_id, direction, ip, rate, burst): rate_value = "%s%s" % (rate, tc_lib.BW_LIMIT_UNIT) burst_value = "%s%s" % ( tc_lib.TcCommand.get_ingress_qdisc_burst_value(rate, burst), tc_lib.BURST_UNIT ) protocol = ['protocol', 'ip'] prio = ['prio', 1] _match = 'src' if direction == constants.EGRESS_DIRECTION else 'dst' match = ['u32', 'match', 'ip', _match, ip] police = ['police', 'rate', rate_value, 'burst', burst_value, 'mtu', '64kb', 'drop', 'flowid', ':1'] args = protocol + prio + match + police cmd = ['filter', 'add', 'dev', self.name, 'parent', qdisc_id] + args self._execute_tc_cmd(cmd) def _get_or_create_qdisc(self, direction): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: self._add_qdisc(direction) qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: raise exceptions.FailedToAddQdiscToDevice(direction=direction, device=self.name) return qdisc_id class FloatingIPTcCommand(FloatingIPTcCommandBase): def clear_all_filters(self, direction): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: return filterids = self._get_qdisc_filters(qdisc_id) for filter_id in filterids: self._del_filter_by_id(qdisc_id, filter_id) def get_filter_id_for_ip(self, direction, ip): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: return return self._get_filterid_for_ip(qdisc_id, ip) def get_existing_filter_ids(self, direction): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: return return self._get_qdisc_filters(qdisc_id) def delete_filter_ids(self, direction, filterids): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: return for filter_id in filterids: self._del_filter_by_id(qdisc_id, filter_id) def set_ip_rate_limit(self, direction, ip, rate, burst): qdisc_id = self._get_or_create_qdisc(direction) try: filter_id = self._get_filterid_for_ip(qdisc_id, ip) LOG.debug("Filter %(filter)s for IP %(ip)s in %(direction)s " "qdisc already existed, removing.", {'filter': filter_id, 'ip': ip, 'direction': direction}) self._del_filter_by_id(qdisc_id, filter_id) except exceptions.FilterIDForIPNotFound: pass LOG.debug("Adding filter for IP %(ip)s in %(direction)s.", {'ip': ip, 'direction': direction}) self._add_filter(qdisc_id, direction, ip, rate, burst) def clear_ip_rate_limit(self, direction, ip): qdisc_id = self._get_qdisc_id_for_filter(direction) if not qdisc_id: return try: filter_id = self._get_filterid_for_ip(qdisc_id, ip) self._del_filter_by_id(qdisc_id, filter_id) except exceptions.FilterIDForIPNotFound: LOG.debug("No filter found for IP %(ip)s in %(direction)s, " "skipping deletion.", {'ip': ip, 'direction': direction}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/of_monitor.py0000644000175000017500000000550100000000000023620 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import eventlet from neutron.agent.common import async_process class OFEvent(object): def __init__(self, event_type, flow): self.event_type = event_type self.flow = flow class OFMonitor(async_process.AsyncProcess): """Wrapper over 'ovs-ofctl monitor'. This is an interactive OpenFlow monitor. By default, when the object is instantiated, the monitor process is started. To retrieve the pending events, the property "of_events" can be retrieved. NOTE(ralonsoh): 'ovs-ofctl monitor' command is sending existing flows to stdout pipe (startup first messages) and next incoming messages to stderr pipe. That's why this class joins both outputs in one single queue (self._queue). """ EVENT_RE = re.compile( r"event=(?PADDED|DELETED|MODIFIED) (?P.*)") def __init__(self, bridge_name, namespace=None, respawn_interval=None, start=True): cmd = ['ovs-ofctl', 'monitor', bridge_name, 'watch:', '--monitor'] super(OFMonitor, self).__init__(cmd, run_as_root=True, respawn_interval=respawn_interval, namespace=namespace) if start: self.start() self._queue = eventlet.queue.Queue() eventlet.spawn(self._read_and_enqueue, self.iter_stdout) eventlet.spawn(self._read_and_enqueue, self.iter_stderr) def _read_and_enqueue(self, iter): for event_line in iter(block=True): event = self._parse_event_line(event_line) if event: self._queue.put(event) @property def of_events(self): events = [] while not self._queue.empty(): events.append(self._queue.get()) return events def _parse_event_line(self, event_line): match = self.EVENT_RE.match(event_line) if match is None: return return OFEvent(match.group('action'), match.group('flow')) def start(self, **kwargs): if not self._is_running: super(OFMonitor, self).start(block=True) def stop(self, **kwargs): if self._is_running: super(OFMonitor, self).stop(block=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2030437 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/0000755000175000017500000000000000000000000025330 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/__init__.py0000644000175000017500000000132500000000000027442 0ustar00coreycorey00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux.openvswitch_firewall import firewall OVSFirewallDriver = firewall.OVSFirewallDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/constants.py0000644000175000017500000000310700000000000027717 0ustar00coreycorey00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants OF_STATE_NOT_TRACKED = "-trk" OF_STATE_TRACKED = "+trk" OF_STATE_NEW_NOT_ESTABLISHED = "+new-est" OF_STATE_NOT_ESTABLISHED = "-est" OF_STATE_ESTABLISHED = "+est" OF_STATE_ESTABLISHED_NOT_REPLY = "+est-rel-rpl" OF_STATE_ESTABLISHED_REPLY = "+est-rel+rpl" OF_STATE_RELATED = "-new-est+rel-inv" OF_STATE_INVALID = "+trk+inv" OF_STATE_NEW = "+new" OF_STATE_NOT_REPLY_NOT_NEW = "-new-rpl" CT_MARK_NORMAL = '0x0' CT_MARK_INVALID = '0x1' REG_PORT = 5 REG_NET = 6 # for logging remote group rule REG_REMOTE_GROUP = 7 PROTOCOLS_WITH_PORTS = (constants.PROTO_NAME_SCTP, constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP) # Only map protocols that need special handling REVERSE_IP_PROTOCOL_MAP_WITH_PORTS = { constants.IP_PROTOCOL_MAP[proto]: proto for proto in PROTOCOLS_WITH_PORTS} ethertype_to_dl_type_map = { constants.IPv4: constants.ETHERTYPE_IP, constants.IPv6: constants.ETHERTYPE_IPV6, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/exceptions.py0000644000175000017500000000213100000000000030060 0ustar00coreycorey00000000000000# Copyright 2016, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron._i18n import _ class OVSFWPortNotFound(exceptions.NeutronException): message = _("Port %(port_id)s is not managed by this agent.") class OVSFWTagNotFound(exceptions.NeutronException): message = _( "Cannot get tag for port %(port_name)s from its other_config: " "%(other_config)s") class OVSFWPortNotHandled(exceptions.NeutronException): message = ("Port %(port_id)s is not handled by the firewall.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/firewall.py0000644000175000017500000015652000000000000027520 0ustar00coreycorey00000000000000# Copyright 2015 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import copy import eventlet import netaddr from neutron_lib.callbacks import events as callbacks_events from neutron_lib.callbacks import registry as callbacks_registry from neutron_lib.callbacks import resources as callbacks_resources from neutron_lib import constants as lib_const from neutron_lib.plugins import utils as p_utils from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.agent import firewall from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import exceptions from neutron.agent.linux.openvswitch_firewall import iptables from neutron.agent.linux.openvswitch_firewall import rules from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts LOG = logging.getLogger(__name__) def _replace_register(flow_params, register_number, register_value): """Replace value from flows to given register number 'register_value' key in dictionary will be replaced by register number given by 'register_number' :param flow_params: Dictionary containing defined flows :param register_number: The number of register where value will be stored :param register_value: Key to be replaced by register number """ try: reg_port = flow_params[register_value] del flow_params[register_value] flow_params['reg{:d}'.format(register_number)] = reg_port except KeyError: pass def create_reg_numbers(flow_params): """Replace reg_(port|net) values with defined register numbers""" _replace_register(flow_params, ovsfw_consts.REG_PORT, 'reg_port') _replace_register(flow_params, ovsfw_consts.REG_NET, 'reg_net') _replace_register( flow_params, ovsfw_consts.REG_REMOTE_GROUP, 'reg_remote_group') def get_segmentation_id_from_other_config(bridge, port_name): """Return segmentation_id stored in OVSDB other_config metadata. :param bridge: OVSBridge instance where port is. :param port_name: Name of the port. """ try: other_config = bridge.db_get_val( 'Port', port_name, 'other_config') network_type = other_config.get('network_type') if lib_const.TYPE_VLAN == network_type: return int(other_config.get('segmentation_id')) except (TypeError, ValueError): pass def get_network_type_from_other_config(bridge, port_name): """Return network_type stored in OVSDB other_config metadata. :param bridge: OVSBridge instance where port is. :param port_name: Name of the port. """ other_config = bridge.db_get_val('Port', port_name, 'other_config') return other_config.get('network_type') def get_physical_network_from_other_config(bridge, port_name): """Return physical_network stored in OVSDB other_config metadata. :param bridge: OVSBridge instance where port is. :param port_name: Name of the port. """ other_config = bridge.db_get_val('Port', port_name, 'other_config') return other_config.get('physical_network') def get_tag_from_other_config(bridge, port_name): """Return tag stored in OVSDB other_config metadata. :param bridge: OVSBridge instance where port is. :param port_name: Name of the port. :raises OVSFWTagNotFound: In case tag cannot be found in OVSDB. """ other_config = None try: other_config = bridge.db_get_val( 'Port', port_name, 'other_config') return int(other_config['tag']) except (KeyError, TypeError, ValueError): raise exceptions.OVSFWTagNotFound( port_name=port_name, other_config=other_config) class SecurityGroup(object): def __init__(self, id_): self.id = id_ self.raw_rules = [] self.remote_rules = [] self.members = {} self.ports = set() def update_rules(self, rules): """Separate raw and remote rules. If a rule has a protocol field, it is normalized to a number here in order to ease later processing. """ self.raw_rules = [] self.remote_rules = [] for rule in copy.deepcopy(rules): protocol = rule.get('protocol') if protocol is not None: if protocol.isdigit(): rule['protocol'] = int(protocol) elif (rule.get('ethertype') == lib_const.IPv6 and protocol == lib_const.PROTO_NAME_ICMP): rule['protocol'] = lib_const.PROTO_NUM_IPV6_ICMP else: rule['protocol'] = lib_const.IP_PROTOCOL_MAP.get( protocol, protocol) if 'remote_group_id' in rule: self.remote_rules.append(rule) else: self.raw_rules.append(rule) def get_ethertype_filtered_addresses(self, ethertype): return self.members.get(ethertype, []) class OFPort(object): def __init__(self, port_dict, ovs_port, vlan_tag, segment_id=None, network_type=None, physical_network=None): self.id = port_dict['device'] self.vlan_tag = vlan_tag self.segment_id = segment_id self.mac = ovs_port.vif_mac self.lla_address = str(netutils.get_ipv6_addr_by_EUI64( lib_const.IPv6_LLA_PREFIX, self.mac)) self.ofport = ovs_port.ofport self.sec_groups = list() self.fixed_ips = port_dict.get('fixed_ips', []) self.neutron_port_dict = port_dict.copy() self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4) self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6) self.network_type = network_type self.physical_network = physical_network @staticmethod def _get_allowed_pairs(port_dict, version): aap_dict = port_dict.get('allowed_address_pairs', set()) return {(aap['mac_address'], aap['ip_address']) for aap in aap_dict if netaddr.IPNetwork(aap['ip_address']).version == version} @property def all_allowed_macs(self): macs = {item[0] for item in self.allowed_pairs_v4.union( self.allowed_pairs_v6)} macs.add(self.mac) return macs @property def ipv4_addresses(self): return [ip_addr for ip_addr in self.fixed_ips if netaddr.IPAddress(ip_addr).version == 4] @property def ipv6_addresses(self): return [ip_addr for ip_addr in self.fixed_ips if netaddr.IPAddress(ip_addr).version == 6] def update(self, port_dict): self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4) self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6) # Neighbour discovery uses LLA self.allowed_pairs_v6.add((self.mac, self.lla_address)) self.fixed_ips = port_dict.get('fixed_ips', []) self.neutron_port_dict = port_dict.copy() class SGPortMap(object): def __init__(self): self.ports = {} self.sec_groups = {} # Maps port_id to ofport number self.unfiltered = {} def get_sg(self, sg_id): return self.sec_groups.get(sg_id, None) def get_or_create_sg(self, sg_id): try: sec_group = self.sec_groups[sg_id] except KeyError: sec_group = SecurityGroup(sg_id) self.sec_groups[sg_id] = sec_group return sec_group def delete_sg(self, sg_id): del self.sec_groups[sg_id] def create_port(self, port, port_dict): self.ports[port.id] = port self.update_port(port, port_dict) def update_port(self, port, port_dict): for sec_group in self.sec_groups.values(): sec_group.ports.discard(port) port.sec_groups = [self.get_or_create_sg(sg_id) for sg_id in port_dict['security_groups']] for sec_group in port.sec_groups: sec_group.ports.add(port) port.update(port_dict) def remove_port(self, port): for sec_group in port.sec_groups: sec_group.ports.discard(port) del self.ports[port.id] def update_rules(self, sg_id, rules): sec_group = self.get_or_create_sg(sg_id) sec_group.update_rules(rules) def update_members(self, sg_id, members): sec_group = self.get_or_create_sg(sg_id) sec_group.members = members class ConjIdMap(object): """Handle conjunction ID allocations and deallocations.""" def __new__(cls): if not hasattr(cls, '_instance'): cls._instance = super(ConjIdMap, cls).__new__(cls) return cls._instance def __init__(self): self.id_map = collections.defaultdict(self._conj_id_factory) self.id_free = collections.deque() self.max_id = 0 def _conj_id_factory(self): # If there is any freed ID, use one. if self.id_free: return self.id_free.popleft() # Allocate new one. It must be divisible by 8. (See the next function.) self.max_id += 8 return self.max_id def get_conj_id(self, sg_id, remote_sg_id, direction, ethertype): """Return a conjunction ID specified by the arguments. Allocate one if necessary. The returned ID is divisible by 8, as there are 4 priority levels (see rules.flow_priority_offset) and 2 conjunction IDs are needed per priority. """ if direction not in [lib_const.EGRESS_DIRECTION, lib_const.INGRESS_DIRECTION]: raise ValueError(_("Invalid direction '%s'") % direction) if ethertype not in [lib_const.IPv4, lib_const.IPv6]: raise ValueError(_("Invalid ethertype '%s'") % ethertype) return self.id_map[(sg_id, remote_sg_id, direction, ethertype)] def delete_sg(self, sg_id): """Free all conj_ids associated with the sg_id and return a list of (remote_sg_id, conj_id), which are no longer in use. """ result = [] for k in list(self.id_map.keys()): if sg_id in k[0:2]: conj_id = self.id_map.pop(k) result.append((k[1], conj_id)) self.id_free.append(conj_id) return result class ConjIPFlowManager(object): """Manage conj_id allocation and remote securitygroups derived conjunction flows. Flows managed by this class is of form: nw_src=10.2.3.4,reg_net=0xf00 actions=conjunction(123,1/2) These flows are managed per network and are usually per remote_group_id, but flows from different remote_group need to be merged on shared networks, where the complexity arises and this manager is needed. """ def __init__(self, driver): self.conj_id_map = ConjIdMap() self.driver = driver # The following two are dict of dicts and are indexed like: # self.x[vlan_tag][(direction, ethertype)] self.conj_ids = collections.defaultdict(dict) self.flow_state = collections.defaultdict( lambda: collections.defaultdict(dict)) def _build_addr_conj_id_map(self, ethertype, sg_conj_id_map): """Build a map of addr -> list of conj_ids.""" addr_to_conj = collections.defaultdict(list) for remote_id, conj_id_set in sg_conj_id_map.items(): remote_group = self.driver.sg_port_map.get_sg(remote_id) if not remote_group or not remote_group.members: LOG.debug('No member for SG %s', remote_id) continue for addr in remote_group.get_ethertype_filtered_addresses( ethertype): addr_to_conj[addr].extend(conj_id_set) return addr_to_conj def _update_flows_for_vlan_subr(self, direction, ethertype, vlan_tag, flow_state, addr_to_conj): """Do the actual flow updates for given direction and ethertype.""" current_ips = set(flow_state.keys()) self.driver.delete_flows_for_ip_addresses( current_ips - set(addr_to_conj.keys()), direction, ethertype, vlan_tag) for addr, conj_ids in addr_to_conj.items(): conj_ids.sort() if flow_state.get(addr) == conj_ids: continue for flow in rules.create_flows_for_ip_address( addr, direction, ethertype, vlan_tag, conj_ids): self.driver._add_flow(**flow) def update_flows_for_vlan(self, vlan_tag): """Install action=conjunction(conj_id, 1/2) flows, which depend on IP addresses of remote_group_id. """ for (direction, ethertype), sg_conj_id_map in ( self.conj_ids[vlan_tag].items()): # TODO(toshii): optimize when remote_groups have # no address overlaps. addr_to_conj = self._build_addr_conj_id_map( ethertype, sg_conj_id_map) self._update_flows_for_vlan_subr( direction, ethertype, vlan_tag, self.flow_state[vlan_tag][(direction, ethertype)], addr_to_conj) self.flow_state[vlan_tag][(direction, ethertype)] = addr_to_conj def add(self, vlan_tag, sg_id, remote_sg_id, direction, ethertype, priority_offset): """Get conj_id specified by the arguments and notify the manager that (remote_sg_id, direction, ethertype, conj_id) flows need to be populated on the vlan_tag network. A caller must call update_flows_for_vlan to have the change in effect. """ conj_id = self.conj_id_map.get_conj_id( sg_id, remote_sg_id, direction, ethertype) + priority_offset * 2 if (direction, ethertype) not in self.conj_ids[vlan_tag]: self.conj_ids[vlan_tag][(direction, ethertype)] = ( collections.defaultdict(set)) self.conj_ids[vlan_tag][(direction, ethertype)][remote_sg_id].add( conj_id) return conj_id def sg_removed(self, sg_id): """Handle SG removal events. Free all conj_ids associated with the sg_id and clean up obsolete entries from the self.conj_ids map. Unlike the add method, it also updates flows. """ id_list = self.conj_id_map.delete_sg(sg_id) unused_dict = collections.defaultdict(set) for remote_sg_id, conj_id in id_list: unused_dict[remote_sg_id].add(conj_id) for vlan_tag, vlan_conj_id_map in self.conj_ids.items(): update = False for sg_conj_id_map in vlan_conj_id_map.values(): for remote_sg_id, unused in unused_dict.items(): if (remote_sg_id in sg_conj_id_map and sg_conj_id_map[remote_sg_id] & unused): sg_conj_id_map[remote_sg_id] -= unused if not sg_conj_id_map[remote_sg_id]: del sg_conj_id_map[remote_sg_id] update = True if update: self.update_flows_for_vlan(vlan_tag) class OVSFirewallDriver(firewall.FirewallDriver): REQUIRED_PROTOCOLS = [ ovs_consts.OPENFLOW10, ovs_consts.OPENFLOW11, ovs_consts.OPENFLOW12, ovs_consts.OPENFLOW13, ovs_consts.OPENFLOW14, ] provides_arp_spoofing_protection = True def __init__(self, integration_bridge): """Initialize object :param integration_bridge: Bridge on which openflow rules will be applied """ self.permitted_ethertypes = cfg.CONF.SECURITYGROUP.permitted_ethertypes self.int_br = self.initialize_bridge(integration_bridge) self.sg_port_map = SGPortMap() self.conj_ip_manager = ConjIPFlowManager(self) self.sg_to_delete = set() self._update_cookie = None self._deferred = False self.iptables_helper = iptables.Helper(self.int_br.br) self.iptables_helper.load_driver_if_needed() self._initialize_firewall() callbacks_registry.subscribe( self._init_firewall_callback, callbacks_resources.AGENT, callbacks_events.OVS_RESTARTED) def _init_firewall_callback(self, resource, event, trigger, payload=None): LOG.info("Reinitialize Openvswitch firewall after OVS restart.") self._initialize_firewall() def _initialize_firewall(self): self._drop_all_unmatched_flows() self._initialize_common_flows() self._initialize_third_party_tables() @contextlib.contextmanager def update_cookie_context(self): try: self._update_cookie = self.int_br.br.request_cookie() yield finally: self.int_br.br.unset_cookie(self._update_cookie) self._update_cookie = None def security_group_updated(self, action_type, sec_group_ids, device_ids=None): """The current driver doesn't make use of this method. It exists here to avoid NotImplementedError raised from the parent class's method. """ def _accept_flow(self, **flow): for f in rules.create_accept_flows(flow): self._add_flow(**f) def _add_flow(self, **kwargs): dl_type = kwargs.get('dl_type') create_reg_numbers(kwargs) if isinstance(dl_type, int): kwargs['dl_type'] = "0x{:04x}".format(dl_type) if self._update_cookie: kwargs['cookie'] = self._update_cookie if self._deferred: self.int_br.add_flow(**kwargs) else: self.int_br.br.add_flow(**kwargs) def _delete_flows(self, **kwargs): create_reg_numbers(kwargs) if self._deferred: self.int_br.delete_flows(**kwargs) else: self.int_br.br.delete_flows(**kwargs) def _strict_delete_flow(self, **kwargs): """Delete given flow right away even if bridge is deferred. Delete command will use strict delete. """ create_reg_numbers(kwargs) self.int_br.br.delete_flows(strict=True, **kwargs) @staticmethod def initialize_bridge(int_br): int_br.add_protocols(*OVSFirewallDriver.REQUIRED_PROTOCOLS) return int_br.deferred(full_ordered=True, use_bundle=True) def _drop_all_unmatched_flows(self): for table in ovs_consts.OVS_FIREWALL_TABLES: self.int_br.br.add_flow(table=table, priority=0, actions='drop') def _initialize_common_flows(self): # Remove conntrack information from tracked packets self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=110, ct_state=ovsfw_consts.OF_STATE_TRACKED, actions='ct_clear,' 'resubmit(,%d)' % ovs_consts.BASE_EGRESS_TABLE, ) def _initialize_third_party_tables(self): self.int_br.br.add_flow( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=1, actions='normal') self.int_br.br.add_flow( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, priority=1, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) for table in (ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, ovs_consts.DROPPED_TRAFFIC_TABLE): self.int_br.br.add_flow( table=table, priority=0, actions='drop') def get_ovs_port(self, port_id): ovs_port = self.int_br.br.get_vif_port_by_id(port_id) if not ovs_port: raise exceptions.OVSFWPortNotFound(port_id=port_id) return ovs_port def get_ovs_ports(self, port_ids): return self.int_br.br.get_vifs_by_ids(port_ids) def _get_port_vlan_tag(self, port_name): return get_tag_from_other_config(self.int_br.br, port_name) def _get_port_segmentation_id(self, port_name): return get_segmentation_id_from_other_config( self.int_br.br, port_name) def _get_port_network_type(self, port_name): return get_network_type_from_other_config( self.int_br.br, port_name) def _get_port_physical_network(self, port_name): return get_physical_network_from_other_config( self.int_br.br, port_name) def get_ofport(self, port): port_id = port['device'] return self.sg_port_map.ports.get(port_id) def get_or_create_ofport(self, port): """Get ofport specified by port['device'], checking and reflecting ofport changes. If ofport is nonexistent, create and return one. """ port_id = port['device'] ovs_port = self.get_ovs_port(port_id) try: of_port = self.sg_port_map.ports[port_id] except KeyError: port_vlan_id = self._get_port_vlan_tag(ovs_port.port_name) segment_id = self._get_port_segmentation_id( ovs_port.port_name) network_type = self._get_port_network_type( ovs_port.port_name) physical_network = self._get_port_physical_network( ovs_port.port_name) of_port = OFPort(port, ovs_port, port_vlan_id, segment_id, network_type, physical_network) self.sg_port_map.create_port(of_port, port) else: if of_port.ofport != ovs_port.ofport: self.sg_port_map.remove_port(of_port) of_port = OFPort(port, ovs_port, of_port.vlan_tag, of_port.segment_id) self.sg_port_map.create_port(of_port, port) else: self.sg_port_map.update_port(of_port, port) return of_port def is_port_managed(self, port): return port['device'] in self.sg_port_map.ports def prepare_port_filter(self, port): self.iptables_helper.cleanup_port(port) if not firewall.port_sec_enabled(port): self._initialize_egress_no_port_security(port['device']) return try: old_of_port = self.get_ofport(port) of_port = self.get_or_create_ofport(port) if old_of_port: LOG.info("Initializing port %s that was already initialized.", port['device']) self._update_flows_for_port(of_port, old_of_port) else: self._set_port_filters(of_port) except exceptions.OVSFWPortNotFound as not_found_error: LOG.info("port %(port_id)s does not exist in ovsdb: %(err)s.", {'port_id': port['device'], 'err': not_found_error}) except exceptions.OVSFWTagNotFound as tag_not_found: LOG.info("Tag was not found for port %(port_id)s: %(err)s.", {'port_id': port['device'], 'err': tag_not_found}) def update_port_filter(self, port): """Update rules for given port Current existing filtering rules are removed and new ones are generated based on current loaded security group rules and members. """ if not firewall.port_sec_enabled(port): self.remove_port_filter(port) self._initialize_egress_no_port_security(port['device']) return elif not self.is_port_managed(port): try: self._remove_egress_no_port_security(port['device']) except exceptions.OVSFWPortNotHandled as e: LOG.debug(e) else: self.prepare_port_filter(port) return try: # Make sure delete old allowed_address_pair MACs because # allowed_address_pair MACs will be updated in # self.get_or_create_ofport(port) old_of_port = self.get_ofport(port) of_port = self.get_or_create_ofport(port) if old_of_port: self._update_flows_for_port(of_port, old_of_port) else: self._set_port_filters(of_port) except exceptions.OVSFWPortNotFound as not_found_error: LOG.info("port %(port_id)s does not exist in ovsdb: %(err)s.", {'port_id': port['device'], 'err': not_found_error}) # If port doesn't exist in ovsdb, lets ensure that there are no # leftovers self.remove_port_filter(port) except exceptions.OVSFWTagNotFound as tag_not_found: LOG.info("Tag was not found for port %(port_id)s: %(err)s.", {'port_id': port['device'], 'err': tag_not_found}) def _set_port_filters(self, of_port): self.initialize_port_flows(of_port) self.add_flows_from_rules(of_port) def _update_flows_for_port(self, of_port, old_of_port): with self.update_cookie_context(): self._set_port_filters(of_port) # Flush the flows caused by changes made to deferred bridge. The reason # is that following delete_all_port_flows() call uses --strict # parameter that cannot be combined with other non-strict rules, hence # all parameters with --strict are applied right away. In order to # avoid applying delete rules with --strict *before* # _set_port_filters() we dump currently cached flows here. self.int_br.apply_flows() self.delete_all_port_flows(old_of_port) # Rewrite update cookie with default cookie self._set_port_filters(of_port) def remove_port_filter(self, port): """Remove port from firewall All flows related to this port are removed from ovs. Port is also removed from ports managed by this firewall. """ if self.is_port_managed(port): of_port = self.get_ofport(port) self.delete_all_port_flows(of_port) self.sg_port_map.remove_port(of_port) for sec_group in of_port.sec_groups: self._schedule_sg_deletion_maybe(sec_group.id) def update_security_group_rules(self, sg_id, rules): self.sg_port_map.update_rules(sg_id, rules) def update_security_group_members(self, sg_id, member_ips): self.sg_port_map.update_members(sg_id, member_ips) if not member_ips: self._schedule_sg_deletion_maybe(sg_id) def _schedule_sg_deletion_maybe(self, sg_id): """Schedule possible deletion of the given SG. This function must be called when the number of ports associated to sg_id drops to zero, as it isn't possible to know SG deletions from agents due to RPC API design. """ sec_group = self.sg_port_map.get_or_create_sg(sg_id) if not sec_group.members or not sec_group.ports: self.sg_to_delete.add(sg_id) def _cleanup_stale_sg(self): sg_to_delete = self.sg_to_delete self.sg_to_delete = set() for sg_id in sg_to_delete: sec_group = self.sg_port_map.get_sg(sg_id) if sec_group.members and sec_group.ports: # sec_group is still in use continue self.conj_ip_manager.sg_removed(sg_id) self.sg_port_map.delete_sg(sg_id) def process_trusted_ports(self, port_ids): """Pass packets from these ports directly to ingress pipeline.""" ovs_ports = self.get_ovs_ports(port_ids) for port_id in port_ids: self._initialize_egress_no_port_security(port_id, ovs_ports=ovs_ports) # yield to let other greenthreads proceed eventlet.sleep(0) def remove_trusted_ports(self, port_ids): for port_id in port_ids: try: self._remove_egress_no_port_security(port_id) except exceptions.OVSFWPortNotHandled as e: LOG.debug(e) def filter_defer_apply_on(self): self._deferred = True def filter_defer_apply_off(self): if self._deferred: self._cleanup_stale_sg() self.int_br.apply_flows() self._deferred = False @property def ports(self): return {id_: port.neutron_port_dict for id_, port in self.sg_port_map.ports.items()} def install_vlan_direct_flow(self, mac, segment_id, ofport, local_vlan): # If the port segment_id is not None/0, the # port's network type must be VLAN type. if segment_id: self._add_flow( table=ovs_consts.TRANSIENT_TABLE, priority=90, dl_dst=mac, dl_vlan='0x%x' % segment_id, actions='set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'strip_vlan,resubmit(,{:d})'.format( ofport, ovsfw_consts.REG_PORT, # This always needs the local vlan. local_vlan, ovsfw_consts.REG_NET, ovs_consts.BASE_INGRESS_TABLE) ) def delete_vlan_direct_flow(self, mac, segment_id): if segment_id: self._strict_delete_flow(priority=90, table=ovs_consts.TRANSIENT_TABLE, dl_dst=mac, dl_vlan=segment_id) def initialize_port_flows(self, port): """Set base flows for port :param port: OFPort instance """ # Identify egress flow self._add_flow( table=ovs_consts.TRANSIENT_TABLE, priority=100, in_port=port.ofport, actions='set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_EGRESS_TABLE) ) # Identify ingress flows for mac_addr in port.all_allowed_macs: self.install_vlan_direct_flow( mac_addr, port.segment_id, port.ofport, port.vlan_tag) self._add_flow( table=ovs_consts.TRANSIENT_TABLE, priority=90, dl_dst=mac_addr, dl_vlan='0x%x' % port.vlan_tag, actions='set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'strip_vlan,resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_INGRESS_TABLE), ) self._initialize_egress(port) self._initialize_ingress(port) def _initialize_egress_ipv6_icmp(self, port): for icmp_type in firewall.ICMPV6_ALLOWED_EGRESS_TYPES: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=95, in_port=port.ofport, reg_port=port.ofport, dl_type=lib_const.ETHERTYPE_IPV6, nw_proto=lib_const.PROTO_NUM_IPV6_ICMP, icmp_type=icmp_type, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) def _initialize_egress_no_port_security(self, port_id, ovs_ports=None): try: if ovs_ports is not None: ovs_port = ovs_ports.get(port_id) if not ovs_port: raise exceptions.OVSFWPortNotFound(port_id=port_id) else: ovs_port = self.get_ovs_port(port_id) vlan_tag = self._get_port_vlan_tag(ovs_port.port_name) except exceptions.OVSFWTagNotFound: # It's a patch port, don't set anything return except exceptions.OVSFWPortNotFound as not_found_e: LOG.error("Initializing unfiltered port %(port_id)s that does not " "exist in ovsdb: %(err)s.", {'port_id': port_id, 'err': not_found_e}) return self.sg_port_map.unfiltered[port_id] = (ovs_port, vlan_tag) self._add_flow( table=ovs_consts.TRANSIENT_TABLE, priority=100, in_port=ovs_port.ofport, actions='set_field:%d->reg%d,' 'set_field:%d->reg%d,' 'resubmit(,%d)' % ( ovs_port.ofport, ovsfw_consts.REG_PORT, vlan_tag, ovsfw_consts.REG_NET, ovs_consts.ACCEPT_OR_INGRESS_TABLE) ) self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=80, reg_port=ovs_port.ofport, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) tunnel_direct_info = { "network_type": self._get_port_network_type(ovs_port.port_name), "physical_network": self._get_port_physical_network( ovs_port.port_name) } self.install_accepted_egress_direct_flow( ovs_port.vif_mac, vlan_tag, ovs_port.ofport, tunnel_direct_info=tunnel_direct_info) def _remove_egress_no_port_security(self, port_id): try: ovs_port, vlan_tag = self.sg_port_map.unfiltered[port_id] except KeyError: raise exceptions.OVSFWPortNotHandled(port_id=port_id) self._delete_flows( table=ovs_consts.TRANSIENT_TABLE, in_port=ovs_port.ofport ) self._delete_flows( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, reg_port=ovs_port.ofport ) self.delete_accepted_egress_direct_flow( ovs_port.vif_mac, vlan_tag) del self.sg_port_map.unfiltered[port_id] def _initialize_egress(self, port): """Identify egress traffic and send it to egress base""" self._initialize_egress_ipv6_icmp(port) # Apply mac/ip pairs for IPv4 allowed_pairs = port.allowed_pairs_v4.union( {(port.mac, ip_addr) for ip_addr in port.ipv4_addresses}) for mac_addr, ip_addr in allowed_pairs: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=95, in_port=port.ofport, reg_port=port.ofport, dl_src=mac_addr, dl_type=lib_const.ETHERTYPE_ARP, arp_spa=ip_addr, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=65, reg_port=port.ofport, dl_type=lib_const.ETHERTYPE_IP, in_port=port.ofport, dl_src=mac_addr, nw_src=ip_addr, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_EGRESS_TABLE, ovsfw_consts.REG_NET) ) # Apply mac/ip pairs for IPv6 allowed_pairs = port.allowed_pairs_v6.union( {(port.mac, ip_addr) for ip_addr in port.ipv6_addresses}) for mac_addr, ip_addr in allowed_pairs: self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=65, reg_port=port.ofport, in_port=port.ofport, dl_type=lib_const.ETHERTYPE_IPV6, dl_src=mac_addr, ipv6_src=ip_addr, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_EGRESS_TABLE, ovsfw_consts.REG_NET) ) # DHCP discovery for dl_type, src_port, dst_port in ( (lib_const.ETHERTYPE_IP, 68, 67), (lib_const.ETHERTYPE_IPV6, 546, 547)): self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=80, reg_port=port.ofport, in_port=port.ofport, dl_type=dl_type, nw_proto=lib_const.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE) ) # Ban dhcp service running on an instance for dl_type, src_port, dst_port in ( (lib_const.ETHERTYPE_IP, 67, 68), (lib_const.ETHERTYPE_IPV6, 547, 546)): self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=70, in_port=port.ofport, reg_port=port.ofport, dl_type=dl_type, nw_proto=lib_const.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Drop Router Advertisements from instances self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=70, in_port=port.ofport, reg_port=port.ofport, dl_type=lib_const.ETHERTYPE_IPV6, nw_proto=lib_const.PROTO_NUM_IPV6_ICMP, icmp_type=lib_const.ICMPV6_TYPE_RA, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Allow custom ethertypes for permitted_ethertype in self.permitted_ethertypes: if permitted_ethertype[:2] == '0x': try: hex_ethertype = hex(int(permitted_ethertype, base=16)) action = ('resubmit(,%d)' % ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=95, dl_type=hex_ethertype, reg_port=port.ofport, actions=action ) continue except ValueError: pass LOG.warning("Custom ethertype %(permitted_ethertype)s is not " "a hexadecimal number.", {'permitted_ethertype': permitted_ethertype}) # Drop all remaining egress connections self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, priority=10, in_port=port.ofport, reg_port=port.ofport, actions='ct_clear,' 'resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Fill in accept_or_ingress table by checking that traffic is ingress # and if not, accept it for mac_addr in port.all_allowed_macs: self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=100, dl_dst=mac_addr, reg_net=port.vlan_tag, actions='set_field:{:d}->reg{:d},resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, ovs_consts.BASE_INGRESS_TABLE), ) for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=90, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, actions='ct(commit,zone=NXM_NX_REG{:d}[0..15]),' 'resubmit(,{:d})'.format( ovsfw_consts.REG_NET, ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE) ) self._add_flow( table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, priority=80, reg_port=port.ofport, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) tunnel_direct_info = {"network_type": port.network_type, "physical_network": port.physical_network} self.install_accepted_egress_direct_flow( port.mac, port.vlan_tag, port.ofport, tunnel_direct_info=tunnel_direct_info) def install_accepted_egress_direct_flow(self, mac, vlan_tag, dst_port, tunnel_direct_info=None): if not cfg.CONF.AGENT.explicitly_egress_direct: return # Prevent flood for accepted egress traffic self._add_flow( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=12, dl_dst=mac, reg_net=vlan_tag, actions='output:{:d}'.format(dst_port) ) # The former flow may not match, that means the destination port is # not in this host. So, we direct the packet to mapped bridge(s). if tunnel_direct_info: patch_ofport = ovs_lib.INVALID_OFPORT if tunnel_direct_info["network_type"] in ( lib_const.TYPE_VXLAN, lib_const.TYPE_GRE, lib_const.TYPE_GENEVE): # Some ports like router internal gateway will not install # the l2pop related flows, so we will transmit the ARP request # packet to tunnel bridge use NORMAL action as usual. port_name = cfg.CONF.OVS.int_peer_patch_port patch_ofport = self.int_br.br.get_port_ofport(port_name) elif tunnel_direct_info["network_type"] == lib_const.TYPE_VLAN: physical_network = tunnel_direct_info["physical_network"] if not physical_network: return bridge_mappings = helpers.parse_mappings( cfg.CONF.OVS.bridge_mappings) bridge = bridge_mappings.get(physical_network) port_name = p_utils.get_interface_name( bridge, prefix=ovs_consts.PEER_INTEGRATION_PREFIX) patch_ofport = self.int_br.br.get_port_ofport(port_name) if patch_ofport is not ovs_lib.INVALID_OFPORT: self._add_flow( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=10, dl_src=mac, dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", reg_net=vlan_tag, actions='mod_vlan_vid:{:d},output:{:d}'.format( vlan_tag, patch_ofport) ) def delete_accepted_egress_direct_flow(self, mac, vlan_tag): self._delete_flows( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, dl_dst=mac, reg_net=vlan_tag) self._delete_flows( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, dl_src=mac, reg_net=vlan_tag) def _initialize_tracked_egress(self, port): # Drop invalid packets self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, ct_state=ovsfw_consts.OF_STATE_INVALID, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Drop traffic for removed sg rules self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, reg_port=port.ofport, ct_mark=ovsfw_consts.CT_MARK_INVALID, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) for state in ( ovsfw_consts.OF_STATE_ESTABLISHED_REPLY, ovsfw_consts.OF_STATE_RELATED, ): self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=50, ct_state=state, ct_mark=ovsfw_consts.CT_MARK_NORMAL, reg_port=port.ofport, ct_zone=port.vlan_tag, actions='resubmit(,%d)' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) ) self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=40, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.RULES_EGRESS_TABLE, priority=40, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_ESTABLISHED, actions="ct(commit,zone=NXM_NX_REG{:d}[0..15]," "exec(set_field:{:s}->ct_mark))".format( ovsfw_consts.REG_NET, ovsfw_consts.CT_MARK_INVALID) ) def _initialize_ingress_ipv6_icmp(self, port): for icmp_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES: self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=100, reg_port=port.ofport, dl_type=lib_const.ETHERTYPE_IPV6, nw_proto=lib_const.PROTO_NUM_IPV6_ICMP, icmp_type=icmp_type, actions='output:{:d}'.format(port.ofport) ) def _initialize_ingress(self, port): # Allow incoming ARPs self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=100, dl_type=lib_const.ETHERTYPE_ARP, reg_port=port.ofport, actions='output:{:d}'.format(port.ofport) ) self._initialize_ingress_ipv6_icmp(port) # DHCP offers for dl_type, src_port, dst_port in ( (lib_const.ETHERTYPE_IP, 67, 68), (lib_const.ETHERTYPE_IPV6, 547, 546)): self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=95, reg_port=port.ofport, dl_type=dl_type, nw_proto=lib_const.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, actions='output:{:d}'.format(port.ofport) ) # Track untracked for dl_type in (lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6): self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, priority=90, reg_port=port.ofport, dl_type=dl_type, ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format( ovs_consts.RULES_INGRESS_TABLE, ovsfw_consts.REG_NET) ) self._add_flow( table=ovs_consts.BASE_INGRESS_TABLE, ct_state=ovsfw_consts.OF_STATE_TRACKED, priority=80, reg_port=port.ofport, actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE) ) def _initialize_tracked_ingress(self, port): # Drop invalid packets self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, ct_state=ovsfw_consts.OF_STATE_INVALID, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Drop traffic for removed sg rules self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, reg_port=port.ofport, ct_mark=ovsfw_consts.CT_MARK_INVALID, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) # Allow established and related connections for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY, ovsfw_consts.OF_STATE_RELATED): self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=50, reg_port=port.ofport, ct_state=state, ct_mark=ovsfw_consts.CT_MARK_NORMAL, ct_zone=port.vlan_tag, actions='output:{:d}'.format(port.ofport) ) self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=40, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED, actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) for ethertype in [lib_const.ETHERTYPE_IP, lib_const.ETHERTYPE_IPV6]: self._add_flow( table=ovs_consts.RULES_INGRESS_TABLE, priority=40, dl_type=ethertype, reg_port=port.ofport, ct_state=ovsfw_consts.OF_STATE_ESTABLISHED, actions="ct(commit,zone=NXM_NX_REG{:d}[0..15]," "exec(set_field:{:s}->ct_mark))".format( ovsfw_consts.REG_NET, ovsfw_consts.CT_MARK_INVALID) ) def _add_non_ip_conj_flows(self, port): """Install conjunction flows that don't depend on IP address of remote groups, which consist of actions=conjunction(conj_id, 2/2) flows and actions=accept flows. The remaining part is done by ConjIPFlowManager. """ port_rules = collections.defaultdict(list) for sec_group_id, rule in ( self._create_remote_rules_generator_for_port(port)): direction = rule['direction'] ethertype = rule['ethertype'] protocol = rule.get('protocol') priority_offset = rules.flow_priority_offset(rule) conj_id = self.conj_ip_manager.add(port.vlan_tag, sec_group_id, rule['remote_group_id'], direction, ethertype, priority_offset) LOG.debug("Created conjunction %(conj_id)s for SG %(sg_id)s " "referencing remote SG ID %(remote_sg_id)s on port " "%(port_id)s.", {'conj_id': conj_id, 'sg_id': sec_group_id, 'remote_sg_id': rule['remote_group_id'], 'port_id': port.id}) rule1 = rule.copy() del rule1['remote_group_id'] port_rules_key = (direction, ethertype, protocol) port_rules[port_rules_key].append((rule1, conj_id)) for (direction, ethertype, protocol), rule_conj_list in ( port_rules.items()): all_conj_ids = set() for rule, conj_id in rule_conj_list: all_conj_ids.add(conj_id) if protocol in [lib_const.PROTO_NUM_SCTP, lib_const.PROTO_NUM_TCP, lib_const.PROTO_NUM_UDP]: rule_conj_list = rules.merge_port_ranges(rule_conj_list) else: rule_conj_list = rules.merge_common_rules(rule_conj_list) for rule, conj_ids in rule_conj_list: flows = rules.create_flows_from_rule_and_port( rule, port, conjunction=True) for flow in rules.substitute_conjunction_actions( flows, 2, conj_ids): self._add_flow(**flow) # Install accept flows and store conj_id to reg7 for future process for conj_id in all_conj_ids: for flow in rules.create_conj_flows( port, conj_id, direction, ethertype): flow['actions'] = "set_field:{:d}->reg{:d},{:s}".format( flow['conj_id'], ovsfw_consts.REG_REMOTE_GROUP, flow['actions'] ) self._add_flow(**flow) def add_flows_from_rules(self, port): self._initialize_tracked_ingress(port) self._initialize_tracked_egress(port) LOG.debug('Creating flow rules for port %s that is port %d in OVS', port.id, port.ofport) for rule in self._create_rules_generator_for_port(port): # NOTE(toshii): A better version of merge_common_rules and # its friend should be applied here in order to avoid # overlapping flows. flows = rules.create_flows_from_rule_and_port(rule, port) LOG.debug("RULGEN: Rules generated for flow %s are %s", rule, flows) for flow in flows: self._accept_flow(**flow) self._add_non_ip_conj_flows(port) self.conj_ip_manager.update_flows_for_vlan(port.vlan_tag) def _create_rules_generator_for_port(self, port): for sec_group in port.sec_groups: for rule in sec_group.raw_rules: yield rule def _create_remote_rules_generator_for_port(self, port): for sec_group in port.sec_groups: for rule in sec_group.remote_rules: yield sec_group.id, rule def delete_all_port_flows(self, port): """Delete all flows for given port""" for mac_addr in port.all_allowed_macs: self._strict_delete_flow(priority=90, table=ovs_consts.TRANSIENT_TABLE, dl_dst=mac_addr, dl_vlan=port.vlan_tag) self.delete_vlan_direct_flow(mac_addr, port.segment_id) self._delete_flows(table=ovs_consts.ACCEPT_OR_INGRESS_TABLE, dl_dst=mac_addr, reg_net=port.vlan_tag) self.delete_accepted_egress_direct_flow( port.mac, port.vlan_tag) self._strict_delete_flow(priority=100, table=ovs_consts.TRANSIENT_TABLE, in_port=port.ofport) self._delete_flows(reg_port=port.ofport) def delete_flows_for_ip_addresses( self, ip_addresses, direction, ethertype, vlan_tag): if not cfg.CONF.AGENT.explicitly_egress_direct: return for ip_addr in ip_addresses: # Generate deletion template with bogus conj_id. flows = rules.create_flows_for_ip_address( ip_addr, direction, ethertype, vlan_tag, [0]) for f in flows: # The following del statements are partly for # complying the OpenFlow spec. It forbids the use of # these field in non-strict delete flow messages, and # the actions field is bogus anyway. del f['actions'] del f['priority'] self._delete_flows(**f) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/iptables.py0000644000175000017500000000700200000000000027504 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const def get_device_port_name(port_id): return ('qvo' + port_id)[:n_const.LINUX_DEV_LEN] def get_iptables_driver_instance(): """Load hybrid iptables firewall driver.""" from neutron.agent.linux import iptables_firewall class HybridIptablesHelper( iptables_firewall.OVSHybridIptablesFirewallDriver): """Don't remove conntrack when removing iptables rules.""" def _remove_conntrack_entries_from_port_deleted(self, port): pass return HybridIptablesHelper() def is_bridge_cleaned(bridge): other_config = bridge.db_get_val( 'Bridge', bridge.br_name, 'other_config') return other_config.get(Helper.CLEANED_METADATA, '').lower() == 'true' class Helper(object): """Helper to avoid loading firewall driver. The main purpose is to avoid loading iptables driver for cases where no ports have hybrid plugging on given node. The helper stores metadata for iptables cleanup into br-int ovsdb Bridge table. Specifically it checks for other_config['iptables_cleaned'] boolean value. """ HYBRID_PORT_PREFIX = 'qvo' CLEANED_METADATA = 'iptables_cleaned' def __init__(self, int_br): self.int_br = int_br self.hybrid_ports = None self.iptables_driver = None def load_driver_if_needed(self): self.hybrid_ports = self.get_hybrid_ports() if self.hybrid_ports and self.has_not_been_cleaned: self.iptables_driver = get_iptables_driver_instance() def get_hybrid_ports(self): """Return True if there is a port with hybrid plugging.""" return { port_name for port_name in self.int_br.get_port_name_list() if port_name.startswith(self.HYBRID_PORT_PREFIX)} def cleanup_port(self, port): if not self.iptables_driver: return device_name = get_device_port_name(port['device']) try: self.hybrid_ports.remove(device_name) except KeyError: # It's not a hybrid plugged port return # TODO(jlibosva): Optimize, add port to firewall without installing # iptables rules and then call remove from firewall self.iptables_driver.prepare_port_filter(port) self.iptables_driver.remove_port_filter(port) if not self.hybrid_ports: self.mark_as_cleaned() # Let GC remove iptables driver self.iptables_driver = None @property def has_not_been_cleaned(self): return not is_bridge_cleaned(self.int_br) def mark_as_cleaned(self): # TODO(jlibosva): Make it a single transaction other_config = self.int_br.db_get_val( 'Bridge', self.int_br.br_name, 'other_config') other_config[self.CLEANED_METADATA] = 'true' self.int_br.set_db_attribute( 'Bridge', self.int_br.br_name, 'other_config', other_config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/openvswitch_firewall/rules.py0000644000175000017500000003247200000000000027044 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import netaddr from neutron_lib import constants as n_consts from neutron._i18n import _ from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts CT_STATES = [ ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY, ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED] FLOW_FIELD_FOR_IPVER_AND_DIRECTION = { (n_consts.IP_VERSION_4, n_consts.EGRESS_DIRECTION): 'nw_dst', (n_consts.IP_VERSION_6, n_consts.EGRESS_DIRECTION): 'ipv6_dst', (n_consts.IP_VERSION_4, n_consts.INGRESS_DIRECTION): 'nw_src', (n_consts.IP_VERSION_6, n_consts.INGRESS_DIRECTION): 'ipv6_src', } FORBIDDEN_PREFIXES = (n_consts.IPv4_ANY, n_consts.IPv6_ANY) def is_valid_prefix(ip_prefix): # IPv6 have multiple ways how to describe ::/0 network, converting to # IPNetwork and back to string unifies it return (ip_prefix and str(netaddr.IPNetwork(ip_prefix)) not in FORBIDDEN_PREFIXES) def _assert_mergeable_rules(rule_conj_list): """Assert a given (rule, conj_ids) list has mergeable rules. The given rules must be the same except for port_range_{min,max} differences. """ rule_tmpl = rule_conj_list[0][0].copy() rule_tmpl.pop('port_range_min', None) rule_tmpl.pop('port_range_max', None) for rule, conj_id in rule_conj_list[1:]: rule1 = rule.copy() rule1.pop('port_range_min', None) rule1.pop('port_range_max', None) if rule_tmpl != rule1: raise RuntimeError( _("Incompatible SG rules detected: %(rule1)s and %(rule2)s. " "They cannot be merged. This should not happen.") % {'rule1': rule_tmpl, 'rule2': rule}) def merge_common_rules(rule_conj_list): """Take a list of (rule, conj_id) and merge elements with the same rules. Return a list of (rule, conj_id_list). """ if len(rule_conj_list) == 1: rule, conj_id = rule_conj_list[0] return [(rule, [conj_id])] _assert_mergeable_rules(rule_conj_list) rule_conj_map = collections.defaultdict(list) for rule, conj_id in rule_conj_list: rule_conj_map[(rule.get('port_range_min'), rule.get('port_range_max'))].append(conj_id) result = [] rule_tmpl = rule_conj_list[0][0] rule_tmpl.pop('port_range_min', None) rule_tmpl.pop('port_range_max', None) for (port_min, port_max), conj_ids in rule_conj_map.items(): rule = rule_tmpl.copy() if port_min is not None: rule['port_range_min'] = port_min if port_max is not None: rule['port_range_max'] = port_max result.append((rule, conj_ids)) return result def _merge_port_ranges_helper(port_range_item): # Sort with 'port' but 'min' things must come first. port, m, dummy = port_range_item return port * 2 + (0 if m == 'min' else 1) def merge_port_ranges(rule_conj_list): """Take a list of (rule, conj_id) and transform into a list whose rules don't overlap. Return a list of (rule, conj_id_list). """ if len(rule_conj_list) == 1: rule, conj_id = rule_conj_list[0] return [(rule, [conj_id])] _assert_mergeable_rules(rule_conj_list) port_ranges = [] for rule, conj_id in rule_conj_list: port_ranges.append((rule.get('port_range_min', 1), 'min', conj_id)) port_ranges.append((rule.get('port_range_max', 65535), 'max', conj_id)) port_ranges.sort(key=_merge_port_ranges_helper) # The idea here is to scan the port_ranges list in an ascending order, # keeping active conjunction IDs and range in cur_conj and cur_range_min. # A 'min' port_ranges item means an addition to cur_conj, while a 'max' # item means a removal. result = [] rule_tmpl = rule_conj_list[0][0] cur_conj = {} cur_range_min = None for port, m, conj_id in port_ranges: if m == 'min': if conj_id in cur_conj: cur_conj[conj_id] += 1 continue if cur_conj and cur_range_min != port: rule = rule_tmpl.copy() rule['port_range_min'] = cur_range_min rule['port_range_max'] = port - 1 result.append((rule, list(cur_conj.keys()))) cur_range_min = port cur_conj[conj_id] = 1 else: if cur_conj[conj_id] > 1: cur_conj[conj_id] -= 1 continue if cur_range_min <= port: rule = rule_tmpl.copy() rule['port_range_min'] = cur_range_min rule['port_range_max'] = port result.append((rule, list(cur_conj.keys()))) # The next port range without 'port' starts from (port + 1) cur_range_min = port + 1 del cur_conj[conj_id] if (len(result) == 1 and result[0][0]['port_range_min'] == 1 and result[0][0]['port_range_max'] == 65535): del result[0][0]['port_range_min'] del result[0][0]['port_range_max'] return result def flow_priority_offset(rule, conjunction=False): """Calculate flow priority offset from rule. Whether the rule belongs to conjunction flows or not is decided upon existence of rule['remote_group_id'] but can be overridden to be True using the optional conjunction arg. """ conj_offset = 0 if 'remote_group_id' in rule or conjunction else 4 protocol = rule.get('protocol') if protocol is None: return conj_offset if protocol in [n_consts.PROTO_NUM_ICMP, n_consts.PROTO_NUM_IPV6_ICMP]: if 'port_range_min' not in rule: return conj_offset + 1 elif 'port_range_max' not in rule: return conj_offset + 2 return conj_offset + 3 def create_flows_from_rule_and_port(rule, port, conjunction=False): """Create flows from given args. For description of the optional conjunction arg, see flow_priority_offset. """ ethertype = rule['ethertype'] direction = rule['direction'] dst_ip_prefix = rule.get('dest_ip_prefix') src_ip_prefix = rule.get('source_ip_prefix') flow_template = { 'priority': 70 + flow_priority_offset(rule, conjunction), 'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype], 'reg_port': port.ofport, } if is_valid_prefix(dst_ip_prefix): flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[( utils.get_ip_version(dst_ip_prefix), n_consts.EGRESS_DIRECTION)] ] = dst_ip_prefix if is_valid_prefix(src_ip_prefix): flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[( utils.get_ip_version(src_ip_prefix), n_consts.INGRESS_DIRECTION)] ] = src_ip_prefix flows = create_protocol_flows(direction, flow_template, port, rule) return flows def populate_flow_common(direction, flow_template, port): """Initialize common flow fields.""" if direction == n_consts.INGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE flow_template['actions'] = "output:{:d}".format(port.ofport) elif direction == n_consts.EGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE # Traffic can be both ingress and egress, check that no ingress rules # should be applied flow_template['actions'] = 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE) return flow_template def create_protocol_flows(direction, flow_template, port, rule): flow_template = populate_flow_common(direction, flow_template.copy(), port) protocol = rule.get('protocol') if protocol is not None: flow_template['nw_proto'] = protocol if protocol in [n_consts.PROTO_NUM_ICMP, n_consts.PROTO_NUM_IPV6_ICMP]: flows = create_icmp_flows(flow_template, rule) else: flows = create_port_range_flows(flow_template, rule) return flows or [flow_template] def create_port_range_flows(flow_template, rule): protocol = ovsfw_consts.REVERSE_IP_PROTOCOL_MAP_WITH_PORTS.get( rule.get('protocol')) if protocol is None: return [] flows = [] src_port_match = '{:s}_src'.format(protocol) src_port_min = rule.get('source_port_range_min') src_port_max = rule.get('source_port_range_max') dst_port_match = '{:s}_dst'.format(protocol) dst_port_min = rule.get('port_range_min') dst_port_max = rule.get('port_range_max') dst_port_range = [] if dst_port_min and dst_port_max: dst_port_range = utils.port_rule_masking(dst_port_min, dst_port_max) src_port_range = [] if src_port_min and src_port_max: src_port_range = utils.port_rule_masking(src_port_min, src_port_max) for port in src_port_range: flow = flow_template.copy() flow[src_port_match] = port if dst_port_range: for port in dst_port_range: dst_flow = flow.copy() dst_flow[dst_port_match] = port flows.append(dst_flow) else: flows.append(flow) else: for port in dst_port_range: flow = flow_template.copy() flow[dst_port_match] = port flows.append(flow) return flows def create_icmp_flows(flow_template, rule): icmp_type = rule.get('port_range_min') if icmp_type is None: return flow = flow_template.copy() flow['icmp_type'] = icmp_type icmp_code = rule.get('port_range_max') if icmp_code is not None: flow['icmp_code'] = icmp_code return [flow] def _flow_priority_offset_from_conj_id(conj_id): "Return a flow priority offset encoded in a conj_id." # A base conj_id, which is returned by ConjIdMap.get_conj_id, is a # multiple of 8, and we use 2 conj_ids per offset. return conj_id % 8 // 2 def create_flows_for_ip_address(ip_address, direction, ethertype, vlan_tag, conj_ids): """Create flows from a rule and an ip_address derived from remote_group_id """ # Group conj_ids per priority. conj_id_lists = [[] for i in range(4)] for conj_id in conj_ids: conj_id_lists[ _flow_priority_offset_from_conj_id(conj_id)].append(conj_id) ip_prefix = str(netaddr.IPNetwork(ip_address).cidr) flow_template = { 'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype], 'reg_net': vlan_tag, # needed for project separation } ip_ver = utils.get_ip_version(ip_prefix) if direction == n_consts.EGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE elif direction == n_consts.INGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE flow_template[FLOW_FIELD_FOR_IPVER_AND_DIRECTION[( ip_ver, direction)]] = ip_prefix result = [] for offset, conj_id_list in enumerate(conj_id_lists): if not conj_id_list: continue flow_template['priority'] = 70 + offset result.extend( substitute_conjunction_actions([flow_template], 1, conj_id_list)) return result def create_accept_flows(flow): flow['ct_state'] = CT_STATES[0] result = [flow.copy()] flow['ct_state'] = CT_STATES[1] if flow['table'] == ovs_consts.RULES_INGRESS_TABLE: flow['actions'] = ( 'ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s},' 'resubmit(,{:d})'.format( ovsfw_consts.REG_NET, flow['actions'], ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE) ) result.append(flow) return result def substitute_conjunction_actions(flows, dimension, conj_ids): result = [] for flow in flows: for i in range(2): new_flow = flow.copy() new_flow['ct_state'] = CT_STATES[i] new_flow['actions'] = ','.join( ["conjunction(%d,%d/2)" % (s + i, dimension) for s in conj_ids]) result.append(new_flow) return result def create_conj_flows(port, conj_id, direction, ethertype): """Generate "accept" flows for a given conjunction ID.""" flow_template = { 'priority': 70 + _flow_priority_offset_from_conj_id(conj_id), 'conj_id': conj_id, 'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype], # This reg_port matching is for delete_all_port_flows. # The matching is redundant as it has been done by # conjunction(...,2/2) flows and flows can be summarized # without this. 'reg_port': port.ofport, } flow_template = populate_flow_common(direction, flow_template, port) flows = create_accept_flows(flow_template) flows[1]['conj_id'] += 1 return flows ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/pd.py0000644000175000017500000004045000000000000022052 0ustar00coreycorey00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import signal import eventlet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.utils import runtime from oslo_log import log as logging from oslo_utils import netutils import six from stevedore import driver from neutron.common import utils LOG = logging.getLogger(__name__) class PrefixDelegation(object): def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb, agent_conf): self.context = context self.pmon = pmon self.intf_driver = intf_driver self.notifier = notifier self.routers = {} self.pd_update_cb = pd_update_cb self.agent_conf = agent_conf self.pd_dhcp_driver = driver.DriverManager( namespace='neutron.agent.linux.pd_drivers', name=agent_conf.prefix_delegation_driver, ).driver registry.subscribe(add_router, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(update_router, resources.ROUTER, events.AFTER_UPDATE) registry.subscribe(remove_router, resources.ROUTER, events.AFTER_DELETE) self._get_sync_data() def _is_pd_master_router(self, router): return router['master'] @runtime.synchronized("l3-agent-pd") def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac): router = self.routers.get(router_id) if router is None: return pd_info = router['subnets'].get(subnet_id) if not pd_info: pd_info = PDInfo(ri_ifname=ri_ifname, mac=mac) router['subnets'][subnet_id] = pd_info pd_info.bind_lla = self._get_lla(mac) if pd_info.sync: pd_info.mac = mac pd_info.old_prefix = prefix elif self._is_pd_master_router(router): self._add_lla(router, pd_info.get_bind_lla_with_mask()) def _delete_pd(self, router, pd_info): if not self._is_pd_master_router(router): return self._delete_lla(router, pd_info.get_bind_lla_with_mask()) if pd_info.client_started: pd_info.driver.disable(self.pmon, router['ns_name']) @runtime.synchronized("l3-agent-pd") def disable_subnet(self, router_id, subnet_id): prefix_update = {} router = self.routers.get(router_id) if not router: return pd_info = router['subnets'].get(subnet_id) if not pd_info: return self._delete_pd(router, pd_info) if self._is_pd_master_router(router): prefix_update[subnet_id] = n_const.PROVISIONAL_IPV6_PD_PREFIX LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) del router['subnets'][subnet_id] @runtime.synchronized("l3-agent-pd") def update_subnet(self, router_id, subnet_id, prefix): router = self.routers.get(router_id) if router is not None: pd_info = router['subnets'].get(subnet_id) if pd_info and pd_info.old_prefix != prefix: old_prefix = pd_info.old_prefix pd_info.old_prefix = prefix pd_info.prefix = prefix return old_prefix @runtime.synchronized("l3-agent-pd") def add_gw_interface(self, router_id, gw_ifname): router = self.routers.get(router_id) if not router: return router['gw_interface'] = gw_ifname if not self._is_pd_master_router(router): return prefix_update = {} for pd_info in six.itervalues(router['subnets']): # gateway is added after internal router ports. # If a PD is being synced, and if the prefix is available, # send update if prefix out of sync; If not available, # start the PD client bind_lla_with_mask = pd_info.get_bind_lla_with_mask() if pd_info.sync: pd_info.sync = False if pd_info.client_started: if pd_info.prefix != pd_info.old_prefix: prefix_update['subnet_id'] = pd_info.prefix else: self._delete_lla(router, bind_lla_with_mask) self._add_lla(router, bind_lla_with_mask) else: self._add_lla(router, bind_lla_with_mask) if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) def delete_router_pd(self, router): if not self._is_pd_master_router(router): return prefix_update = {} for subnet_id, pd_info in router['subnets'].items(): self._delete_lla(router, pd_info.get_bind_lla_with_mask()) if pd_info.client_started: pd_info.driver.disable(self.pmon, router['ns_name']) pd_info.prefix = None pd_info.client_started = False prefix = n_const.PROVISIONAL_IPV6_PD_PREFIX prefix_update[subnet_id] = prefix if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) @runtime.synchronized("l3-agent-pd") def remove_gw_interface(self, router_id): router = self.routers.get(router_id) if router is not None: router['gw_interface'] = None self.delete_router_pd(router) @runtime.synchronized("l3-agent-pd") def get_preserve_ips(self, router_id): preserve_ips = [] router = self.routers.get(router_id) if router is not None: for pd_info in six.itervalues(router['subnets']): preserve_ips.append(pd_info.get_bind_lla_with_mask()) return preserve_ips @runtime.synchronized("l3-agent-pd") def sync_router(self, router_id): router = self.routers.get(router_id) if router is not None and router['gw_interface'] is None: self.delete_router_pd(router) @runtime.synchronized("l3-agent-pd") def remove_stale_ri_ifname(self, router_id, stale_ifname): router = self.routers.get(router_id) if router is not None: subnet_to_delete = None for subnet_id, pd_info in six.iteritems(router['subnets']): if pd_info.ri_ifname == stale_ifname: self._delete_pd(router, pd_info) subnet_to_delete = subnet_id break if subnet_to_delete: del router['subnets'][subnet_to_delete] @staticmethod def _get_lla(mac): lla = netutils.get_ipv6_addr_by_EUI64(n_const.IPv6_LLA_PREFIX, mac) return lla def _get_llas(self, gw_ifname, ns_name): try: return self.intf_driver.get_ipv6_llas(gw_ifname, ns_name) except RuntimeError: # The error message was printed as part of the driver call # This could happen if the gw_ifname was removed # simply return and exit the thread return def _add_lla(self, router, lla_with_mask): if router['gw_interface']: self.intf_driver.add_ipv6_addr(router['gw_interface'], lla_with_mask, router['ns_name'], 'link') # There is a delay before the LLA becomes active. # This is because the kernel runs DAD to make sure LLA uniqueness # Spawn a thread to wait for the interface to be ready self._spawn_lla_thread(router['gw_interface'], router['ns_name'], lla_with_mask) def _spawn_lla_thread(self, gw_ifname, ns_name, lla_with_mask): eventlet.spawn_n(self._ensure_lla_task, gw_ifname, ns_name, lla_with_mask) def _delete_lla(self, router, lla_with_mask): if lla_with_mask and router['gw_interface']: try: self.intf_driver.delete_ipv6_addr(router['gw_interface'], lla_with_mask, router['ns_name']) except RuntimeError: # Ignore error if the lla doesn't exist pass def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask): # It would be insane for taking so long unless DAD test failed # In that case, the subnet would never be assigned a prefix. utils.wait_until_true(functools.partial(self._lla_available, gw_ifname, ns_name, lla_with_mask), timeout=n_const.LLA_TASK_TIMEOUT, sleep=2) def _lla_available(self, gw_ifname, ns_name, lla_with_mask): llas = self._get_llas(gw_ifname, ns_name) if self._is_lla_active(lla_with_mask, llas): LOG.debug("LLA %s is active now", lla_with_mask) self.pd_update_cb() return True @staticmethod def _is_lla_active(lla_with_mask, llas): for lla in llas: if lla_with_mask == lla['cidr']: return not lla['tentative'] return False @runtime.synchronized("l3-agent-pd") def process_ha_state(self, router_id, master): router = self.routers.get(router_id) if router is None or router['master'] == master: return router['master'] = master if master: for pd_info in six.itervalues(router['subnets']): bind_lla_with_mask = pd_info.get_bind_lla_with_mask() self._add_lla(router, bind_lla_with_mask) else: for pd_info in six.itervalues(router['subnets']): self._delete_lla(router, pd_info.get_bind_lla_with_mask()) if pd_info.client_started: pd_info.driver.disable(self.pmon, router['ns_name'], switch_over=True) pd_info.client_started = False @runtime.synchronized("l3-agent-pd") def process_prefix_update(self): LOG.debug("Processing IPv6 PD Prefix Update") prefix_update = {} for router_id, router in self.routers.items(): if not (self._is_pd_master_router(router) and router['gw_interface']): continue llas = None for subnet_id, pd_info in router['subnets'].items(): if pd_info.client_started: prefix = pd_info.driver.get_prefix() if prefix != pd_info.prefix: pd_info.prefix = prefix prefix_update[subnet_id] = prefix else: if not llas: llas = self._get_llas(router['gw_interface'], router['ns_name']) if self._is_lla_active(pd_info.get_bind_lla_with_mask(), llas): if not pd_info.driver: pd_info.driver = self.pd_dhcp_driver( router_id, subnet_id, pd_info.ri_ifname) prefix = None if (pd_info.prefix != n_const.PROVISIONAL_IPV6_PD_PREFIX): prefix = pd_info.prefix pd_info.driver.enable(self.pmon, router['ns_name'], router['gw_interface'], pd_info.bind_lla, prefix) pd_info.client_started = True if prefix_update: LOG.debug("Update server with prefixes: %s", prefix_update) self.notifier(self.context, prefix_update) def after_start(self): LOG.debug('SIGUSR1 signal handler set') signal.signal(signal.SIGUSR1, self._handle_sigusr1) def _handle_sigusr1(self, signum, frame): """Update PD on receiving SIGUSR1. The external DHCPv6 client uses SIGUSR1 to notify agent of prefix changes. """ self.pd_update_cb() def _get_sync_data(self): sync_data = self.pd_dhcp_driver.get_sync_data() for pd_info in sync_data: router_id = pd_info.router_id if not self.routers.get(router_id): self.routers[router_id] = {'master': True, 'gw_interface': None, 'ns_name': None, 'subnets': {}} new_pd_info = PDInfo(pd_info=pd_info) subnets = self.routers[router_id]['subnets'] subnets[pd_info.subnet_id] = new_pd_info @runtime.synchronized("l3-agent-pd") def remove_router(resource, event, l3_agent, **kwargs): router_id = kwargs['router'].router_id router = l3_agent.pd.routers.get(router_id) l3_agent.pd.delete_router_pd(router) del l3_agent.pd.routers[router_id]['subnets'] del l3_agent.pd.routers[router_id] def get_router_entry(ns_name, master): return {'master': master, 'gw_interface': None, 'ns_name': ns_name, 'subnets': {}} @runtime.synchronized("l3-agent-pd") def add_router(resource, event, l3_agent, **kwargs): added_router = kwargs['router'] router = l3_agent.pd.routers.get(added_router.router_id) gw_ns_name = added_router.get_gw_ns_name() master = added_router.is_router_master() if not router: l3_agent.pd.routers[added_router.router_id] = ( get_router_entry(gw_ns_name, master)) else: # This will happen during l3 agent restart router['ns_name'] = gw_ns_name router['master'] = master @runtime.synchronized("l3-agent-pd") def update_router(resource, event, l3_agent, **kwargs): updated_router = kwargs['router'] router = l3_agent.pd.routers.get(updated_router.router_id) if not router: LOG.exception("Router to be updated is not in internal routers " "list: %s", updated_router.router_id) else: router['ns_name'] = updated_router.get_gw_ns_name() class PDInfo(object): """A class to simplify storing and passing of information relevant to Prefix Delegation operations for a given subnet. """ def __init__(self, pd_info=None, ri_ifname=None, mac=None): if pd_info is None: self.prefix = n_const.PROVISIONAL_IPV6_PD_PREFIX self.old_prefix = n_const.PROVISIONAL_IPV6_PD_PREFIX self.ri_ifname = ri_ifname self.mac = mac self.bind_lla = None self.sync = False self.driver = None self.client_started = False else: self.prefix = pd_info.prefix self.old_prefix = None self.ri_ifname = pd_info.ri_ifname self.mac = None self.bind_lla = None self.sync = True self.driver = pd_info.driver self.client_started = pd_info.client_started def get_bind_lla_with_mask(self): bind_lla_with_mask = '%s/64' % self.bind_lla return bind_lla_with_mask ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/pd_driver.py0000644000175000017500000000340400000000000023423 0ustar00coreycorey00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron.conf.agent import common as agent_conf agent_conf.register_pddriver_opts() @six.add_metaclass(abc.ABCMeta) class PDDriverBase(object): def __init__(self, router_id, subnet_id, ri_ifname): self.router_id = router_id self.subnet_id = subnet_id self.ri_ifname = ri_ifname @abc.abstractmethod def enable(self, pmon, router_ns, ex_gw_ifname, lla): """Enable IPv6 Prefix Delegation for this PDDriver on the given external interface, with the given link local address """ @abc.abstractmethod def disable(self, pmon, router_ns): """Disable IPv6 Prefix Delegation for this PDDriver """ @abc.abstractmethod def get_prefix(self): """Get the current assigned prefix for this PDDriver from the PD agent. If no prefix is currently assigned, return neutron_lib.constants.PROVISIONAL_IPV6_PD_PREFIX """ @staticmethod @abc.abstractmethod def get_sync_data(): """Get the latest router_id, subnet_id, and ri_ifname from the PD agent so that the PDDriver can be kept up to date """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/ra.py0000644000175000017500000001632100000000000022051 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pwd from itertools import chain as iter_chain import jinja2 import netaddr from neutron_lib import constants from neutron_lib.utils import file as file_utils from oslo_log import log as logging import six from neutron.agent.linux import external_process from neutron.agent.linux import utils RADVD_SERVICE_NAME = 'radvd' RADVD_SERVICE_CMD = 'radvd' # We can configure max of 3 DNS servers in radvd RDNSS section. MAX_RDNSS_ENTRIES = 3 LOG = logging.getLogger(__name__) CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }} { AdvSendAdvert on; MinRtrAdvInterval {{ min_rtr_adv_interval }}; MaxRtrAdvInterval {{ max_rtr_adv_interval }}; {% if network_mtu >= constants.IPV6_MIN_MTU %} AdvLinkMTU {{network_mtu}}; {% endif %} {% if constants.DHCPV6_STATELESS in ra_modes %} AdvOtherConfigFlag on; {% endif %} {% if constants.DHCPV6_STATEFUL in ra_modes %} AdvManagedFlag on; {% endif %} {% if dns_servers %} RDNSS {% for dns in dns_servers %} {{ dns }} {% endfor %} {}; {% endif %} {% for prefix in auto_config_prefixes %} prefix {{ prefix }} { AdvOnLink on; AdvAutonomous on; }; {% endfor %} {% for prefix in stateful_config_prefixes %} prefix {{ prefix }} { AdvOnLink on; AdvAutonomous off; }; {% endfor %} }; """) class DaemonMonitor(object): """Manage the data and state of an radvd process.""" def __init__(self, router_id, router_ns, process_monitor, dev_name_helper, agent_conf): self._router_id = router_id self._router_ns = router_ns self._process_monitor = process_monitor self._dev_name_helper = dev_name_helper self._agent_conf = agent_conf def _generate_radvd_conf(self, router_ports): radvd_conf = utils.get_conf_file_name(self._agent_conf.ra_confs, self._router_id, 'radvd.conf', True) buf = six.StringIO() for p in router_ports: subnets = p.get('subnets', []) v6_subnets = [subnet for subnet in subnets if netaddr.IPNetwork(subnet['cidr']).version == 6] if not v6_subnets: continue ra_modes = {subnet['ipv6_ra_mode'] for subnet in v6_subnets} auto_config_prefixes = [ subnet['cidr'] for subnet in v6_subnets if (subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS) ] stateful_config_prefixes = [ subnet['cidr'] for subnet in v6_subnets if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL ] interface_name = self._dev_name_helper(p['id']) slaac_subnets = [ subnet for subnet in v6_subnets if subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC ] dns_servers = list(iter_chain(*[ subnet['dns_nameservers'] for subnet in slaac_subnets if subnet.get('dns_nameservers') ])) network_mtu = p.get('mtu', 0) buf.write('%s' % CONFIG_TEMPLATE.render( ra_modes=list(ra_modes), interface_name=interface_name, auto_config_prefixes=auto_config_prefixes, stateful_config_prefixes=stateful_config_prefixes, dns_servers=dns_servers[0:MAX_RDNSS_ENTRIES], n_const=constants, constants=constants, min_rtr_adv_interval=self._agent_conf.min_rtr_adv_interval, max_rtr_adv_interval=self._agent_conf.max_rtr_adv_interval, network_mtu=int(network_mtu))) contents = buf.getvalue() LOG.debug("radvd config = %s", contents) # radvd conf file can't be writeable by self/group file_utils.replace_file(radvd_conf, contents, file_mode=0o444) return radvd_conf def _get_radvd_process_manager(self, callback=None): return external_process.ProcessManager( uuid=self._router_id, default_cmd_callback=callback, namespace=self._router_ns, service=RADVD_SERVICE_NAME, conf=self._agent_conf, run_as_root=True) def _spawn_radvd(self, radvd_conf): def callback(pid_file): if not self._agent_conf.radvd_user: radvd_user = pwd.getpwuid(os.geteuid()).pw_name elif self._agent_conf.radvd_user == 'root': radvd_user = None else: radvd_user = self._agent_conf.radvd_user # we need to use -m syslog and f.e. not -m stderr (the default) # or -m stderr_syslog so that radvd 2.0+ will close stderr and # exit after daemonization; otherwise, the current thread will # be locked waiting for result from radvd that won't ever come # until the process dies radvd_cmd = [RADVD_SERVICE_CMD, '-C', '%s' % radvd_conf, '-p', '%s' % pid_file, '-m', 'syslog'] if radvd_user: radvd_cmd += ['-u', '%s' % radvd_user] return radvd_cmd pm = self._get_radvd_process_manager(callback) pm.enable(reload_cfg=True) self._process_monitor.register(uuid=self._router_id, service_name=RADVD_SERVICE_NAME, monitored_process=pm) LOG.debug("radvd enabled for router %s", self._router_id) def enable(self, router_ports): for p in router_ports: for subnet in p['subnets']: if netaddr.IPNetwork(subnet['cidr']).version == 6: LOG.debug("Enable IPv6 RA for router %s", self._router_id) radvd_conf = self._generate_radvd_conf(router_ports) self._spawn_radvd(radvd_conf) return # Kill the daemon if it's running self.disable() def disable(self): self._process_monitor.unregister(uuid=self._router_id, service_name=RADVD_SERVICE_NAME) pm = self._get_radvd_process_manager() pm.disable() utils.remove_conf_files(self._agent_conf.ra_confs, self._router_id) LOG.debug("radvd disabled for router %s", self._router_id) @property def enabled(self): return self._get_radvd_process_manager().active ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/tc_lib.py0000644000175000017500000005667000000000000022716 0ustar00coreycorey00000000000000# Copyright 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import re import netaddr from neutron_lib import exceptions from neutron_lib.exceptions import qos as qos_exc from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from pyroute2.iproute import linux as iproute_linux from pyroute2.netlink import rtnl from pyroute2.netlink.rtnl.tcmsg import common as rtnl_common from neutron._i18n import _ from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.privileged.agent.linux import tc_lib as priv_tc_lib LOG = logging.getLogger(__name__) INGRESS_QDISC_ID = "ffff:" MAX_MTU_VALUE = 65535 LATENCY_UNIT = "ms" BW_LIMIT_UNIT = "kbit" # kilobits per second in tc's notation BURST_UNIT = "kbit" # kilobits in tc's notation # Those are RATES (bits per second) and SIZE (bytes) unit names from tc manual UNITS = { "k": 1, "m": 2, "g": 3, "t": 4 } filters_pattern = re.compile(r"police \w+ rate (\w+) burst (\w+)") tbf_pattern = re.compile( r"qdisc (\w+) \w+: \w+ refcnt \d rate (\w+) burst (\w+) \w*") TC_QDISC_TYPE_HTB = 'htb' TC_QDISC_TYPE_TBF = 'tbf' TC_QDISC_TYPE_INGRESS = 'ingress' TC_QDISC_TYPES = [TC_QDISC_TYPE_HTB, TC_QDISC_TYPE_TBF, TC_QDISC_TYPE_INGRESS] TC_QDISC_PARENT = {'root': rtnl.TC_H_ROOT, 'ingress': rtnl.TC_H_INGRESS} TC_QDISC_PARENT_NAME = {v: k for k, v in TC_QDISC_PARENT.items()} TC_CLASS_MAX_FLOWID = 0xffff # NOTE(ralonsoh): VXLAN header: +28 bytes from the outer MAC header (TC # initial offset) # - VXLAN flags: 1 byte # - Reserved: 3 bytes # - VNI: 3 bytes --> VXLAN_VNI_OFFSET = 32 (+32 from the TC initial offset) # - Reserved: 1 byte VXLAN_INNER_SRC_MAC_OFFSET = 42 VXLAN_VNI_OFFSET = 32 class InvalidKernelHzValue(exceptions.NeutronException): message = _("Kernel HZ value %(value)s is not valid. This value must be " "greater than 0.") class InvalidUnit(exceptions.NeutronException): message = _("Unit name '%(unit)s' is not valid.") def convert_to_kilobits(value, base): value = value.lower() if "bit" in value: input_in_bits = True value = value.replace("bit", "") else: input_in_bits = False value = value.replace("b", "") # if it is now bare number then it is in bits, so we return it simply if value.isdigit(): value = int(value) if input_in_bits: return utils.bits_to_kilobits(value, base) else: bits_value = utils.bytes_to_bits(value) return utils.bits_to_kilobits(bits_value, base) unit = value[-1:] if unit not in UNITS.keys(): raise InvalidUnit(unit=unit) val = int(value[:-1]) if input_in_bits: bits_value = val * (base ** UNITS[unit]) else: bits_value = utils.bytes_to_bits(val * (base ** UNITS[unit])) return utils.bits_to_kilobits(bits_value, base) def _get_attr(pyroute2_obj, attr_name): """Get an attribute in a pyroute object pyroute2 object attributes are stored under a key called 'attrs'. This key contains a tuple of tuples. E.g.: pyroute2_obj = {'attrs': (('TCA_KIND': 'htb'), ('TCA_OPTIONS': {...}))} :param pyroute2_obj: (dict) pyroute2 object :param attr_name: (string) first value of the tuple we are looking for :return: (object) second value of the tuple, None if the tuple doesn't exist """ rule_attrs = pyroute2_obj.get('attrs', []) for attr in (attr for attr in rule_attrs if attr[0] == attr_name): return attr[1] return def _get_tbf_burst_value(rate, burst_limit, kernel_hz): min_burst_value = float(rate) / float(kernel_hz) return max(min_burst_value, burst_limit) def _calc_burst(rate, buffer): """Calculate burst rate :param rate: (int) rate in bytes per second. :param buffer: (int) buffer size in bytes. :return: (int) burst in bytes """ # NOTE(ralonsoh): this function is based in # pyroute2.netlink.rtnl.tcmsg.common.calc_xmittime return int(math.ceil( float(buffer * rate) / (rtnl_common.TIME_UNITS_PER_SEC * rtnl_common.tick_in_usec))) def _calc_min_rate(burst): """Calculate minimum rate (bytes per second) accepted by Pyroute2 When creating a TC policy class, this function calculates the minimum rate (bytes/sec) accepted by Pyroute2. This method is based on pyroute2.netlink.rtnl.tcmsg.common.calc_xmittime :param rate: (int) rate in bytes per second. :param burst: (int) burst in bytes. :return: (int) minimum accepted rate in bytes per second. """ return max(8, math.ceil((rtnl_common.TIME_UNITS_PER_SEC * rtnl_common.tick_in_usec * burst) / 2**32)) def _calc_latency_ms(limit, burst, rate): """Calculate latency value, in ms :param limit: (int) pyroute2 limit value :param burst: (int) burst in bytes :param rate: (int) maximum bandwidth in kbytes per second :return: (int) latency, in ms """ return int(math.ceil( float((limit - burst) * rtnl_common.TIME_UNITS_PER_SEC) / (rate * 1000))) def _handle_from_hex_to_string(handle): """Convert TC handle from hex to string :param handle: (int) TC handle :return: (string) handle formatted to string: 0xMMMMmmmm -> "M:m" """ minor = format(handle & 0xFFFF, 'x') major = format((handle & 0xFFFF0000) >> 16, 'x') return ':'.join([major, minor]) def _mac_to_pyroute2_keys(mac, offset): """Convert a MAC address to a list of filter keys For example: MAC: '01:23:45:67:89:0a', offset: 8 keys: ['0x01234567/0xffffffff+8', '0x890a0000/0xffff0000+12'] :param mac: (string) MAC address :param offset: (int) natural number, offset bytes number from the IP header """ int_mac = int(netaddr.EUI(mac)) high_value = int_mac >> 16 high_mask = 0xffffffff high_offset = offset high = {'value': high_value, 'mask': high_mask, 'offset': high_offset, 'key': (hex(high_value) + '/' + hex(high_mask) + '+' + str(high_offset))} low_value = (int_mac & 0xffff) << 16 low_mask = 0xffff0000 low_offset = offset + 4 low = {'value': low_value, 'mask': low_mask, 'offset': low_offset, 'key': hex(low_value) + '/' + hex(low_mask) + '+' + str(low_offset)} return [high, low] class TcCommand(ip_lib.IPDevice): def __init__(self, name, kernel_hz, namespace=None): if kernel_hz <= 0: raise InvalidKernelHzValue(value=kernel_hz) super(TcCommand, self).__init__(name, namespace=namespace) self.kernel_hz = kernel_hz @staticmethod def get_ingress_qdisc_burst_value(bw_limit, burst_limit): """Return burst value used in ingress qdisc. If burst value is not specified given than it will be set to default rate to ensure that limit for TCP traffic will work well """ if not burst_limit: return float(bw_limit) * qos_consts.DEFAULT_BURST_RATE return burst_limit def get_filters_bw_limits(self, qdisc_id=INGRESS_QDISC_ID): filters = list_tc_filters(self.name, qdisc_id, namespace=self.namespace) if filters: return filters[0].get('rate_kbps'), filters[0].get('burst_kb') return None, None def get_tbf_bw_limits(self): qdiscs = list_tc_qdiscs(self.name, namespace=self.namespace) if not qdiscs: return None, None qdisc = qdiscs[0] if qdisc['qdisc_type'] != 'tbf': return None, None return qdisc['max_kbps'], qdisc['burst_kb'] def set_filters_bw_limit(self, bw_limit, burst_limit): """Set ingress qdisc and filter for police ingress traffic on device This will allow to police traffic incoming to interface. It means that it is fine to limit egress traffic from instance point of view. """ # because replace of tc filters is not working properly and it's adding # new filters each time instead of replacing existing one first old # ingress qdisc should be deleted and then added new one so update will # be called to do that: return self.update_filters_bw_limit(bw_limit, burst_limit) def set_tbf_bw_limit(self, bw_limit, burst_limit, latency_value): """Set/update token bucket filter qdisc on device This will allow to limit speed of packets going out from interface. It means that it is fine to limit ingress traffic from instance point of view. """ return add_tc_qdisc(self.name, 'tbf', parent='root', max_kbps=bw_limit, burst_kb=burst_limit, latency_ms=latency_value, kernel_hz=self.kernel_hz, namespace=self.namespace) def update_filters_bw_limit(self, bw_limit, burst_limit): self.delete_filters_bw_limit() add_tc_qdisc(self.name, 'ingress', namespace=self.namespace) return self._add_policy_filter(bw_limit, burst_limit) def delete_filters_bw_limit(self): # NOTE(slaweq): For limit traffic egress from instance we need to use # qdisc "ingress" because it is ingress traffic from interface POV: delete_tc_qdisc(self.name, is_ingress=True, raise_interface_not_found=False, raise_qdisc_not_found=False, namespace=self.namespace) def delete_tbf_bw_limit(self): delete_tc_qdisc(self.name, parent='root', raise_interface_not_found=False, raise_qdisc_not_found=False, namespace=self.namespace) def _add_policy_filter(self, bw_limit, burst_limit, qdisc_id=INGRESS_QDISC_ID): # NOTE(slaweq): it is made in exactly same way how openvswitch is doing # it when configuing ingress traffic limit on port. It can be found in # lib/netdev-linux.c#L4698 in openvswitch sources: add_tc_filter_policy(self.name, qdisc_id, bw_limit, burst_limit, MAX_MTU_VALUE, 'drop', priority=49) def add_tc_qdisc(device, qdisc_type, parent=None, handle=None, latency_ms=None, max_kbps=None, burst_kb=None, kernel_hz=None, namespace=None): """Add/replace a TC qdisc on a device pyroute2 input parameters: - rate (min bw): bytes/second - burst: bytes - latency: us :param device: (string) device name :param qdisc_type: (string) qdisc type (TC_QDISC_TYPES) :param parent: (string) qdisc parent class ('root', '2:10') :param handle: (string, int) (required for HTB) major handler identifier (0xffff0000, '1', '1:', '1:0') [1] :param latency_ms: (string, int) (required for TBF) latency time in ms :param max_kbps: (string, int) (required for TBF) maximum bandwidth in kbits per second. :param burst_kb: (string, int) (required for TBF) maximum bandwidth in kbits. :param kernel_hz: (string, int) (required for TBF) kernel HZ. :param namespace: (string) (optional) namespace name [1] https://lartc.org/howto/lartc.qdisc.classful.html """ if qdisc_type and qdisc_type not in TC_QDISC_TYPES: raise qos_exc.TcLibQdiscTypeError( qdisc_type=qdisc_type, supported_qdisc_types=TC_QDISC_TYPES) args = {'kind': qdisc_type} if qdisc_type in ['htb', 'ingress']: if handle: args['handle'] = str(handle).split(':')[0] + ':0' elif qdisc_type == 'tbf': if not latency_ms or not max_kbps or not kernel_hz: raise qos_exc.TcLibQdiscNeededArguments( qdisc_type=qdisc_type, needed_arguments=['latency_ms', 'max_kbps', 'kernel_hz']) args['burst'] = int( _get_tbf_burst_value(max_kbps, burst_kb, kernel_hz) * 1024 / 8) args['rate'] = int(max_kbps * 1024 / 8) args['latency'] = latency_ms * 1000 if parent: args['parent'] = rtnl.TC_H_ROOT if parent == 'root' else parent priv_tc_lib.add_tc_qdisc(device, namespace=namespace, **args) def list_tc_qdiscs(device, namespace=None): """List all TC qdiscs of a device :param device: (string) device name :param namespace: (string) (optional) namespace name :return: (list) TC qdiscs """ qdiscs = priv_tc_lib.list_tc_qdiscs(device, namespace=namespace) retval = [] for qdisc in qdiscs: qdisc_attrs = { 'qdisc_type': _get_attr(qdisc, 'TCA_KIND'), 'parent': TC_QDISC_PARENT_NAME.get( qdisc['parent'], _handle_from_hex_to_string(qdisc['parent'])), 'handle': _handle_from_hex_to_string(qdisc['handle'])} if qdisc_attrs['qdisc_type'] == 'tbf': tca_options = _get_attr(qdisc, 'TCA_OPTIONS') tca_tbf_parms = _get_attr(tca_options, 'TCA_TBF_PARMS') qdisc_attrs['max_kbps'] = int(tca_tbf_parms['rate'] * 8 / 1024) burst_bytes = _calc_burst(tca_tbf_parms['rate'], tca_tbf_parms['buffer']) qdisc_attrs['burst_kb'] = int(burst_bytes * 8 / 1024) qdisc_attrs['latency_ms'] = _calc_latency_ms( tca_tbf_parms['limit'], burst_bytes, tca_tbf_parms['rate']) retval.append(qdisc_attrs) return retval def delete_tc_qdisc(device, parent=None, is_ingress=False, raise_interface_not_found=True, raise_qdisc_not_found=True, namespace=None): """Delete a TC qdisc of a device :param device: (string) device name :param parent: (string) (optional) qdisc parent class ('root', '2:10') :param is_ingress: (bool) (optional) if qdisc type is 'ingress' :param raise_interface_not_found: (bool) (optional) raise exception if the interface doesn't exist :param raise_qdisc_not_found: (bool) (optional) raise exception if the qdisc doesn't exist :param namespace: (string) (optional) namespace name """ qdisc_type = 'ingress' if is_ingress else None if parent: parent = rtnl.TC_H_ROOT if parent == 'root' else parent priv_tc_lib.delete_tc_qdisc( device, parent=parent, kind=qdisc_type, raise_interface_not_found=raise_interface_not_found, raise_qdisc_not_found=raise_qdisc_not_found, namespace=namespace) def add_tc_policy_class(device, parent, classid, max_kbps, min_kbps=None, burst_kb=None, namespace=None): """Add a TC policy class :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param classid: (string) major:minor handler identifier ('10:20') :param max_kbps: (int) maximum bandwidth in kbps :param min_kbps: (int) (optional) minimum bandwidth in kbps :param burst_kb: (int) (optional) burst size in kb :param namespace: (string) (optional) namespace name :return: """ parent = TC_QDISC_PARENT.get(parent, parent) if not burst_kb: burst_kb = max_kbps * qos_consts.DEFAULT_BURST_RATE # NOTE(ralonsoh): pyroute2 input parameters and units [1]: # - rate (min bw): bytes/second # - ceil (max bw): bytes/second # - burst: bytes # [1] https://www.systutorials.com/docs/linux/man/8-tc/ kwargs = {'ceil': int(max_kbps * 1024 / 8), 'burst': int(burst_kb * 1024 / 8)} rate = int((min_kbps or 0) * 1024 / 8) min_rate = _calc_min_rate(kwargs['burst']) if min_rate > rate: LOG.warning('TC HTB class policy rate %(rate)s (bytes/second) is ' 'lower than the minimum accepted %(min_rate)s ' '(bytes/second), for device %(device)s, qdisc ' '%(qdisc)s and classid %(classid)s', {'rate': rate, 'min_rate': min_rate, 'device': device, 'qdisc': parent, 'classid': classid}) rate = min_rate kwargs['rate'] = rate priv_tc_lib.add_tc_policy_class(device, parent, classid, 'htb', namespace=namespace, **kwargs) def list_tc_policy_class(device, namespace=None): """List all TC policy classes of a device :param device: (string) device name :param namespace: (string) (optional) namespace name :return: (list) TC policy classes """ def get_params(tca_options, qdisc_type): if qdisc_type not in TC_QDISC_TYPES: return None, None, None tca_params = _get_attr(tca_options, 'TCA_' + qdisc_type.upper() + '_PARMS') burst_kb = int( _calc_burst(tca_params['rate'], tca_params['buffer']) * 8 / 1024) max_kbps = int(tca_params['ceil'] * 8 / 1024) min_kbps = int(tca_params['rate'] * 8 / 1024) return max_kbps, min_kbps, burst_kb tc_classes = priv_tc_lib.list_tc_policy_classes(device, namespace=namespace) classes = [] for tc_class in tc_classes: index = tc_class['index'] parent = TC_QDISC_PARENT_NAME.get( tc_class['parent'], _handle_from_hex_to_string(tc_class['parent'])) classid = _handle_from_hex_to_string(tc_class['handle']) qdisc_type = _get_attr(tc_class, 'TCA_KIND') tca_options = _get_attr(tc_class, 'TCA_OPTIONS') max_kbps, min_kbps, burst_kb = get_params(tca_options, qdisc_type) tc_class_data = {'device': device, 'index': index, 'namespace': namespace, 'parent': parent, 'classid': classid, 'qdisc_type': qdisc_type, 'min_kbps': min_kbps, 'max_kbps': max_kbps, 'burst_kb': burst_kb} tca_stats = _get_attr(tc_class, 'TCA_STATS') if tca_stats: tc_class_data['stats'] = tca_stats classes.append(tc_class_data) return classes def delete_tc_policy_class(device, parent, classid, namespace=None): """Delete a TC policy class of a device. :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param classid: (string) major:minor handler identifier ('10:20') :param namespace: (string) (optional) namespace name """ priv_tc_lib.delete_tc_policy_class(device, parent, classid, namespace=namespace) def add_tc_filter_vxlan(device, parent, classid, src_mac, vxlan_id, namespace=None): """Add a TC filter to match VXLAN traffic based on the VM mac and the VNI. :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param classid: (string) major:minor handler identifier ('10:20') :param src_mac: (string) source MAC address to match (VM mac) :param vxlan_id: (int) VXLAN ID (VNI) :param namespace: (string) (optional) namespace name """ keys = [hex(int(vxlan_id << 8)) + '/0xffffff00+' + str(VXLAN_VNI_OFFSET)] keys += [key['key'] for key in _mac_to_pyroute2_keys(src_mac, VXLAN_INNER_SRC_MAC_OFFSET)] priv_tc_lib.add_tc_filter_match32(device, parent, 1, classid, keys, namespace=namespace) def add_tc_filter_match_mac(device, parent, classid, mac, offset=0, priority=0, protocol=None, namespace=None): """Add a TC filter in a device to match a MAC address. :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param classid: (string) major:minor handler identifier ('10:20') :param mac: (string) MAC address to match :param offset: (int) (optional) match offset, starting from the outer packet IP header :param priority: (int) (optional) filter priority (lower priority, higher preference) :param protocol: (int) (optional) traffic filter protocol; if None, all will be matched. :param namespace: (string) (optional) namespace name """ keys = [key['key'] for key in _mac_to_pyroute2_keys(mac, offset)] priv_tc_lib.add_tc_filter_match32(device, parent, priority, classid, keys, protocol=protocol, namespace=namespace) def add_tc_filter_policy(device, parent, rate_kbps, burst_kb, mtu, action, priority=0, protocol=None, namespace=None): """Add a TC filter in a device to set a policy. :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param rate_kbps: (int) rate in kbits/second :param burst_kb: (int) burst in kbits :param mtu: (int) MTU size (bytes) :param action: (string) filter policy action :param priority: (int) (optional) filter priority (lower priority, higher preference) :param protocol: (int) (optional) traffic filter protocol; if None, all will be matched. :param namespace: (string) (optional) namespace name """ rate = int(rate_kbps * 1024 / 8) burst = int(burst_kb * 1024 / 8) priv_tc_lib.add_tc_filter_policy(device, parent, priority, rate, burst, mtu, action, protocol=protocol, namespace=namespace) def list_tc_filters(device, parent, namespace=None): """List TC filter in a device :param device: (string) device name :param parent: (string) qdisc parent class ('root', 'ingress', '2:10') :param namespace: (string) (optional) namespace name """ parent = iproute_linux.transform_handle(parent) filters = priv_tc_lib.list_tc_filters(device, parent, namespace=namespace) retval = [] for filter in filters: tca_options = _get_attr(filter, 'TCA_OPTIONS') if not tca_options: continue tca_u32_sel = _get_attr(tca_options, 'TCA_U32_SEL') if not tca_u32_sel: continue keys = [] for key in tca_u32_sel['keys']: key_off = key['key_off'] value = 0 for i in range(4): value = (value << 8) + (key_off & 0xff) key_off = key_off >> 8 keys.append({'value': value, 'mask': key['key_val'], 'offset': key['key_offmask']}) value = {'keys': keys} tca_u32_police = _get_attr(tca_options, 'TCA_U32_POLICE') if tca_u32_police: tca_police_tbf = _get_attr(tca_u32_police, 'TCA_POLICE_TBF') if tca_police_tbf: value['rate_kbps'] = int(tca_police_tbf['rate'] * 8 / 1024) value['burst_kb'] = int( _calc_burst(tca_police_tbf['rate'], tca_police_tbf['burst']) * 8 / 1024) value['mtu'] = tca_police_tbf['mtu'] retval.append(value) return retval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/utils.py0000644000175000017500000004127000000000000022610 0ustar00coreycorey00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import grp import os import pwd import shlex import socket import threading import time import eventlet from eventlet.green import subprocess from neutron_lib import exceptions from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging from oslo_rootwrap import client from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import fileutils from six.moves import http_client as httplib from neutron._i18n import _ from neutron.agent.linux import xenapi_root_helper from neutron.common import utils from neutron.conf.agent import common as config from neutron import wsgi LOG = logging.getLogger(__name__) class RootwrapDaemonHelper(object): __client = None __lock = threading.Lock() def __new__(cls): """There is no reason to instantiate this class""" raise NotImplementedError() @classmethod def get_client(cls): with cls.__lock: if cls.__client is None: if (xenapi_root_helper.ROOT_HELPER_DAEMON_TOKEN == cfg.CONF.AGENT.root_helper_daemon): cls.__client = xenapi_root_helper.XenAPIClient() else: cls.__client = client.Client( shlex.split(cfg.CONF.AGENT.root_helper_daemon)) return cls.__client def addl_env_args(addl_env): """Build arguments for adding additional environment vars with env""" # NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the # command instead of a CommandFilter. if addl_env is None: return [] return ['env'] + ['%s=%s' % pair for pair in addl_env.items()] def create_process(cmd, run_as_root=False, addl_env=None): """Create a process object for the given command. The return value will be a tuple of the process object and the list of command arguments used to create it. """ cmd = list(map(str, addl_env_args(addl_env) + cmd)) if run_as_root: cmd = shlex.split(config.get_root_helper(cfg.CONF)) + cmd LOG.debug("Running command: %s", cmd) obj = utils.subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return obj, cmd def execute_rootwrap_daemon(cmd, process_input, addl_env): cmd = list(map(str, addl_env_args(addl_env) + cmd)) # NOTE(twilson) oslo_rootwrap.daemon will raise on filter match # errors, whereas oslo_rootwrap.cmd converts them to return codes. # In practice, no neutron code should be trying to execute something that # would throw those errors, and if it does it should be fixed as opposed to # just logging the execution error. LOG.debug("Running command (rootwrap daemon): %s", cmd) client = RootwrapDaemonHelper.get_client() try: return client.execute(cmd, process_input) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Rootwrap error running command: %s", cmd) def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False): try: if process_input is not None: _process_input = encodeutils.to_utf8(process_input) else: _process_input = None if run_as_root and cfg.CONF.AGENT.root_helper_daemon: returncode, _stdout, _stderr = ( execute_rootwrap_daemon(cmd, process_input, addl_env)) else: obj, cmd = create_process(cmd, run_as_root=run_as_root, addl_env=addl_env) _stdout, _stderr = obj.communicate(_process_input) returncode = obj.returncode obj.stdin.close() _stdout = helpers.safe_decode_utf8(_stdout) _stderr = helpers.safe_decode_utf8(_stderr) extra_ok_codes = extra_ok_codes or [] if returncode and returncode not in extra_ok_codes: msg = _("Exit code: %(returncode)d; " "Stdin: %(stdin)s; " "Stdout: %(stdout)s; " "Stderr: %(stderr)s") % { 'returncode': returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} if log_fail_as_error: LOG.error(msg) if check_exit_code: raise exceptions.ProcessExecutionError(msg, returncode=returncode) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one time.sleep(0) return (_stdout, _stderr) if return_stderr else _stdout def find_child_pids(pid, recursive=False): """Retrieve a list of the pids of child processes of the given pid. It can also find all children through the hierarchy if recursive=True """ try: raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='], log_fail_as_error=False) except exceptions.ProcessExecutionError as e: # Unexpected errors are the responsibility of the caller with excutils.save_and_reraise_exception() as ctxt: # Exception has already been logged by execute no_children_found = e.returncode == 1 if no_children_found: ctxt.reraise = False return [] child_pids = [x.strip() for x in raw_pids.split('\n') if x.strip()] if recursive: for child in child_pids: child_pids = child_pids + find_child_pids(child, True) return child_pids def find_parent_pid(pid): """Retrieve the pid of the parent process of the given pid. If the pid doesn't exist in the system, this function will return None """ try: ppid = execute(['ps', '-o', 'ppid=', pid], log_fail_as_error=False) except exceptions.ProcessExecutionError as e: # Unexpected errors are the responsibility of the caller with excutils.save_and_reraise_exception() as ctxt: # Exception has already been logged by execute no_such_pid = e.returncode == 1 if no_such_pid: ctxt.reraise = False return return ppid.strip() def get_process_count_by_name(name): """Find the process count by name.""" try: out = execute(['ps', '-C', name, '-o', 'comm='], log_fail_as_error=False) except exceptions.ProcessExecutionError: with excutils.save_and_reraise_exception(reraise=False): return 0 return len(out.strip('\n').split('\n')) def find_fork_top_parent(pid): """Retrieve the pid of the top parent of the given pid through a fork. This function will search the top parent with its same cmdline. If the given pid has no parent, its own pid will be returned """ while True: ppid = find_parent_pid(pid) if (ppid and ppid != pid and pid_invoked_with_cmdline(ppid, get_cmdline_from_pid(pid))): pid = ppid else: return pid def kill_process(pid, signal, run_as_root=False): """Kill the process with the given pid using the given signal.""" try: execute(['kill', '-%d' % signal, pid], run_as_root=run_as_root) except exceptions.ProcessExecutionError: if process_is_running(pid): raise def _get_conf_base(cfg_root, uuid, ensure_conf_dir): # TODO(mangelajo): separate responsibilities here, ensure_conf_dir # should be a separate function conf_dir = os.path.abspath(os.path.normpath(cfg_root)) conf_base = os.path.join(conf_dir, uuid) if ensure_conf_dir: fileutils.ensure_tree(conf_dir, mode=0o755) return conf_base def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False): """Returns the file name for a given kind of config file.""" conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir) return "%s.%s" % (conf_base, cfg_file) def get_value_from_file(filename, converter=None): try: with open(filename, 'r') as f: try: return converter(f.read()) if converter else f.read() except ValueError: LOG.error('Unable to convert value in %s', filename) except IOError as error: LOG.debug('Unable to access %(filename)s; Error: %(error)s', {'filename': filename, 'error': error}) def remove_conf_files(cfg_root, uuid): conf_base = _get_conf_base(cfg_root, uuid, False) for file_path in glob.iglob("%s.*" % conf_base): os.unlink(file_path) def get_root_helper_child_pid(pid, expected_cmd, run_as_root=False): """Get the first non root_helper child pid in the process hierarchy. If root helper was used, two or more processes would be created: - a root helper process (e.g. sudo myscript) - possibly a rootwrap script (e.g. neutron-rootwrap) - a child process (e.g. myscript) - possibly its child processes Killing the root helper process will leave the child process running, re-parented to init, so the only way to ensure that both die is to target the child process directly. """ pid = str(pid) if run_as_root: while True: try: # We shouldn't have more than one child per process # so keep getting the children of the first one pid = find_child_pids(pid)[0] except IndexError: return # We never found the child pid with expected_cmd # If we've found a pid with no root helper, return it. # If we continue, we can find transient children. if pid_invoked_with_cmdline(pid, expected_cmd): break return pid def remove_abs_path(cmd): """Remove absolute path of executable in cmd Note: New instance of list is returned :param cmd: parsed shlex command (e.g. ['/bin/foo', 'param1', 'param two']) """ if cmd and os.path.isabs(cmd[0]): cmd = list(cmd) cmd[0] = os.path.basename(cmd[0]) return cmd def process_is_running(pid): """Find if the given PID is running in the system. """ return pid and os.path.exists('/proc/%s' % pid) def get_cmdline_from_pid(pid): if not process_is_running(pid): return [] # NOTE(jh): Even after the above check, the process may terminate # before the open below happens try: with open('/proc/%s/cmdline' % pid, 'r') as f: cmdline = f.readline().split('\0')[:-1] except IOError: return [] # NOTE(slaweq): sometimes it may happen that values in # /proc/{pid}/cmdline are separated by space instead of NUL char, # in such case we would have everything in one element of cmdline_args # list and it would not match to expected cmd so we need to try to # split it by spaces if len(cmdline) == 1: cmdline = cmdline[0].split(' ') LOG.debug("Found cmdline %s for process with PID %s.", cmdline, pid) return cmdline def cmd_matches_expected(cmd, expected_cmd): abs_cmd = remove_abs_path(cmd) abs_expected_cmd = remove_abs_path(expected_cmd) if abs_cmd != abs_expected_cmd: # Commands executed with #! are prefixed with the script # executable. Check for the expected cmd being a subset of the # actual cmd to cover this possibility. abs_cmd = remove_abs_path(abs_cmd[1:]) return abs_cmd == abs_expected_cmd def pid_invoked_with_cmdline(pid, expected_cmd): """Validate process with given pid is running with provided parameters """ cmd = get_cmdline_from_pid(pid) return cmd_matches_expected(cmd, expected_cmd) def ensure_directory_exists_without_file(path): dirname = os.path.dirname(path) if os.path.isdir(dirname): try: os.unlink(path) except OSError: with excutils.save_and_reraise_exception() as ctxt: if not os.path.exists(path): ctxt.reraise = False else: fileutils.ensure_tree(dirname, mode=0o755) def is_effective_user(user_id_or_name): """Returns True if user_id_or_name is effective user (id/name).""" euid = os.geteuid() if str(user_id_or_name) == str(euid): return True effective_user_name = pwd.getpwuid(euid).pw_name return user_id_or_name == effective_user_name def is_effective_group(group_id_or_name): """Returns True if group_id_or_name is effective group (id/name).""" egid = os.getegid() if str(group_id_or_name) == str(egid): return True effective_group_name = grp.getgrgid(egid).gr_name return group_id_or_name == effective_group_name class UnixDomainHTTPConnection(httplib.HTTPConnection): """Connection class for HTTP over UNIX domain socket.""" def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPConnection.__init__(self, host, port, strict) self.timeout = timeout self.socket_path = cfg.CONF.metadata_proxy_socket def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) if self.timeout: self.sock.settimeout(self.timeout) self.sock.connect(self.socket_path) class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol): def __init__(self, *args): # NOTE(yamahata): from eventlet v0.22 HttpProtocol.__init__ # signature was changed by changeset of # 7f53465578543156e7251e243c0636e087a8445f # Both have server as last arg, but first arg(s) differ server = args[-1] # Because the caller is eventlet.wsgi.Server.process_request, # the number of arguments will dictate if it is new or old style. if len(args) == 2: conn_state = args[0] client_address = conn_state[0] if not client_address: conn_state[0] = ('', 0) # base class is old-style, so super does not work properly eventlet.wsgi.HttpProtocol.__init__(self, conn_state, server) elif len(args) == 3: request = args[0] client_address = args[1] if not client_address: client_address = ('', 0) # base class is old-style, so super does not work properly # NOTE: eventlet 0.22 or later changes the number of args to 2. # If we install eventlet 0.22 or later into a venv for pylint, # pylint complains this. Let's skip it. (bug 1791178) # pylint: disable=too-many-function-args eventlet.wsgi.HttpProtocol.__init__( self, request, client_address, server) else: eventlet.wsgi.HttpProtocol.__init__(self, *args) class UnixDomainWSGIServer(wsgi.Server): def __init__(self, name, num_threads=None): self._socket = None self._launcher = None self._server = None super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True, num_threads=num_threads) def start(self, application, file_socket, workers, backlog, mode=None): self._socket = eventlet.listen(file_socket, family=socket.AF_UNIX, backlog=backlog) if mode is not None: os.chmod(file_socket, mode) self._launch(application, workers=workers) def _run(self, application, socket): """Start a WSGI service in a new green thread.""" logger = logging.getLogger('eventlet.wsgi.server') eventlet.wsgi.server(socket, application, max_size=self.num_threads, protocol=UnixDomainHttpProtocol, log=logger, log_format=cfg.CONF.wsgi_log_format) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/linux/xenapi_root_helper.py0000644000175000017500000000701300000000000025333 0ustar00coreycorey00000000000000# Copyright (c) 2016 Citrix System. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """xenapi root helper For xenapi, we may need to run some commands in dom0 with additional privilege. This xenapi root helper contains the class of XenAPIClient to support it: XenAPIClient will keep a XenAPI session to dom0 and allow to run commands in dom0 via calling XenAPI plugin. The XenAPI plugin is responsible to determine whether a command is safe to execute. """ from os_xenapi.client import session from os_xenapi.client import XenAPI from oslo_config import cfg from oslo_log import log as logging from oslo_rootwrap import cmd as oslo_rootwrap_cmd from oslo_serialization import jsonutils from neutron.conf.agent import xenapi_conf ROOT_HELPER_DAEMON_TOKEN = 'xenapi_root_helper' # nosec RC_UNKNOWN_XENAPI_ERROR = 80 MSG_UNAUTHORIZED = "Unauthorized command" MSG_NOT_FOUND = "Executable not found" XENAPI_PLUGIN_FAILURE_ID = "XENAPI_PLUGIN_FAILURE" LOG = logging.getLogger(__name__) xenapi_conf.register_xenapi_opts(cfg.CONF) class XenAPIClient(object): def __init__(self): self._session = self._create_session( cfg.CONF.xenapi.connection_url, cfg.CONF.xenapi.connection_username, cfg.CONF.xenapi.connection_password) def _call_plugin(self, plugin, fn, args): return self._session.call_plugin(plugin, fn, args) def _create_session(self, url, username, password): return session.XenAPISession(url, username, password, originator="neutron") def _get_return_code(self, failure_details): # The details will be as: # [XENAPI_PLUGIN_FAILURE_ID, methodname, except_class_name, message] # We can distinguish the error type by checking the message string. if (len(failure_details) == 4 and XENAPI_PLUGIN_FAILURE_ID == failure_details[0]): if (MSG_UNAUTHORIZED == failure_details[3]): return oslo_rootwrap_cmd.RC_UNAUTHORIZED elif (MSG_NOT_FOUND == failure_details[3]): return oslo_rootwrap_cmd.RC_NOEXECFOUND # otherwise we get unexpected exception. return RC_UNKNOWN_XENAPI_ERROR def execute(self, cmd, stdin=None): out = "" err = "" if cmd is None or len(cmd) == 0: err = "No command specified." return oslo_rootwrap_cmd.RC_NOCOMMAND, out, err try: result_raw = self._call_plugin( 'netwrap.py', 'run_command', {'cmd': jsonutils.dumps(cmd), 'cmd_input': jsonutils.dumps(stdin)}) result = jsonutils.loads(result_raw) returncode = result['returncode'] out = result['out'] err = result['err'] return returncode, out, err except XenAPI.Failure as failure: LOG.exception('Failed to execute command: %s', cmd) returncode = self._get_return_code(failure.details) return returncode, out, err ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2030437 neutron-16.0.0.0b2.dev214/neutron/agent/metadata/0000755000175000017500000000000000000000000021513 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/metadata/__init__.py0000644000175000017500000000000000000000000023612 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/metadata/agent.py0000644000175000017500000003251300000000000023167 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import context from neutron_lib import rpc as n_rpc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_utils import encodeutils import requests import six from six.moves import urllib import webob from neutron._i18n import _ from neutron.agent.linux import utils as agent_utils from neutron.agent import rpc as agent_rpc from neutron.common import cache_utils as cache from neutron.common import ipv6_utils from neutron.conf.agent.metadata import config LOG = logging.getLogger(__name__) MODE_MAP = { config.USER_MODE: 0o644, config.GROUP_MODE: 0o664, config.ALL_MODE: 0o666, } class MetadataPluginAPI(object): """Agent-side RPC for metadata agent-to-plugin interaction. This class implements the client side of an rpc interface used by the metadata service to make calls back into the Neutron plugin. The server side is defined in neutron.api.rpc.handlers.metadata_rpc.MetadataRpcCallback. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. API version history: 1.0 - Initial version. """ def __init__(self, topic): target = oslo_messaging.Target( topic=topic, namespace=constants.RPC_NAMESPACE_METADATA, version='1.0') self.client = n_rpc.get_client(target) def get_ports(self, context, filters): cctxt = self.client.prepare() return cctxt.call(context, 'get_ports', filters=filters) class MetadataProxyHandler(object): def __init__(self, conf): self.conf = conf self._cache = cache.get_cache(self.conf) self.plugin_rpc = MetadataPluginAPI(topics.PLUGIN) self.context = context.get_admin_context_without_session() @webob.dec.wsgify(RequestClass=webob.Request) def __call__(self, req): try: LOG.debug("Request: %s", req) instance_id, tenant_id = self._get_instance_and_tenant_id(req) if instance_id: res = self._proxy_request(instance_id, tenant_id, req) if isinstance(res, webob.exc.HTTPNotFound): LOG.info("The instance: %s is not present anymore, " "skipping cache...", instance_id) instance_id, tenant_id = self._get_instance_and_tenant_id( req, skip_cache=True) if instance_id: return self._proxy_request(instance_id, tenant_id, req) return res else: return webob.exc.HTTPNotFound() except Exception: LOG.exception("Unexpected error.") msg = _('An unknown error has occurred. ' 'Please try your request again.') explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) def _get_ports_from_server(self, router_id=None, ip_address=None, networks=None): """Get ports from server.""" filters = self._get_port_filters(router_id, ip_address, networks) return self.plugin_rpc.get_ports(self.context, filters) def _get_port_filters(self, router_id=None, ip_address=None, networks=None): filters = {} if router_id: filters['device_id'] = [router_id] filters['device_owner'] = constants.ROUTER_INTERFACE_OWNERS if ip_address: filters['fixed_ips'] = {'ip_address': [ip_address]} if networks: filters['network_id'] = networks return filters @cache.cache_method_results def _get_router_networks(self, router_id, skip_cache=False): """Find all networks connected to given router.""" internal_ports = self._get_ports_from_server(router_id=router_id) return tuple(p['network_id'] for p in internal_ports) @cache.cache_method_results def _get_ports_for_remote_address(self, remote_address, networks, skip_cache=False): """Get list of ports that has given ip address and are part of given networks. :param networks: list of networks in which the ip address will be searched for :param skip_cache: when have to skip getting entry from cache """ return self._get_ports_from_server(networks=networks, ip_address=remote_address) def _get_ports(self, remote_address, network_id=None, router_id=None, skip_cache=False): """Search for all ports that contain passed ip address and belongs to given network. If no network is passed ports are searched on all networks connected to given router. Either one of network_id or router_id must be passed. :param skip_cache: when have to skip getting entry from cache """ if network_id: networks = (network_id,) elif router_id: networks = self._get_router_networks(router_id, skip_cache=skip_cache) else: raise TypeError(_("Either one of parameter network_id or router_id" " must be passed to _get_ports method.")) return self._get_ports_for_remote_address(remote_address, networks, skip_cache=skip_cache) def _get_instance_and_tenant_id(self, req, skip_cache=False): remote_address = req.headers.get('X-Forwarded-For') network_id = req.headers.get('X-Neutron-Network-ID') router_id = req.headers.get('X-Neutron-Router-ID') # Only one should be given, drop since it could be spoofed if network_id and router_id: LOG.debug("Both network and router IDs were specified in proxy " "request, but only a single one of the two is allowed, " "dropping") return None, None ports = self._get_ports(remote_address, network_id, router_id, skip_cache=skip_cache) LOG.debug("Gotten ports for remote_address %(remote_address)s, " "network_id %(network_id)s, router_id %(router_id)s are: " "%(ports)s", {"remote_address": remote_address, "network_id": network_id, "router_id": router_id, "ports": ports}) if len(ports) == 1: return ports[0]['device_id'], ports[0]['tenant_id'] return None, None def _proxy_request(self, instance_id, tenant_id, req): headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Tenant-ID': tenant_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id) } nova_host_port = ipv6_utils.valid_ipv6_url( self.conf.nova_metadata_host, self.conf.nova_metadata_port) url = urllib.parse.urlunsplit(( self.conf.nova_metadata_protocol, nova_host_port, req.path_info, req.query_string, '')) disable_ssl_certificate_validation = self.conf.nova_metadata_insecure if self.conf.auth_ca_cert and not disable_ssl_certificate_validation: verify_cert = self.conf.auth_ca_cert else: verify_cert = not disable_ssl_certificate_validation client_cert = None if self.conf.nova_client_cert and self.conf.nova_client_priv_key: client_cert = (self.conf.nova_client_cert, self.conf.nova_client_priv_key) resp = requests.request(method=req.method, url=url, headers=headers, data=req.body, cert=client_cert, verify=verify_cert) if resp.status_code == 200: req.response.content_type = resp.headers['content-type'] req.response.body = resp.content LOG.debug(str(resp)) return req.response elif resp.status_code == 403: LOG.warning( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' ) return webob.exc.HTTPForbidden() elif resp.status_code == 400: return webob.exc.HTTPBadRequest() elif resp.status_code == 404: return webob.exc.HTTPNotFound() elif resp.status_code == 409: return webob.exc.HTTPConflict() elif resp.status_code == 500: msg = _( 'Remote metadata server experienced an internal server error.' ) LOG.warning(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status_code) def _sign_instance_id(self, instance_id): secret = self.conf.metadata_proxy_shared_secret secret = encodeutils.to_utf8(secret) instance_id = encodeutils.to_utf8(instance_id) return hmac.new(secret, instance_id, hashlib.sha256).hexdigest() class UnixDomainMetadataProxy(object): def __init__(self, conf): self.conf = conf agent_utils.ensure_directory_exists_without_file( cfg.CONF.metadata_proxy_socket) def _init_state_reporting(self): self.context = context.get_admin_context_without_session() self.failed_state_report = False self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.agent_state = { 'binary': 'neutron-metadata-agent', 'host': cfg.CONF.host, 'topic': 'N/A', 'configurations': { 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, 'nova_metadata_host': cfg.CONF.nova_metadata_host, 'nova_metadata_port': cfg.CONF.nova_metadata_port, 'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats, }, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_METADATA} report_interval = cfg.CONF.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state( self.context, self.agent_state, use_call=self.agent_state.get('start_flag')) except AttributeError: # This means the server does not support report_state LOG.warning('Neutron server does not support state report.' ' State report for this agent will be disabled.') self.heartbeat.stop() return except Exception: self.failed_state_report = True LOG.exception("Failed reporting state!") return if self.failed_state_report: self.failed_state_report = False LOG.info('Successfully reported state after a previous failure.') self.agent_state.pop('start_flag', None) def _get_socket_mode(self): mode = self.conf.metadata_proxy_socket_mode if mode == config.DEDUCE_MODE: user = self.conf.metadata_proxy_user if (not user or user == '0' or user == 'root' or agent_utils.is_effective_user(user)): # user is agent effective user or root => USER_MODE mode = config.USER_MODE else: group = self.conf.metadata_proxy_group if not group or agent_utils.is_effective_group(group): # group is agent effective group => GROUP_MODE mode = config.GROUP_MODE else: # otherwise => ALL_MODE mode = config.ALL_MODE return MODE_MAP[mode] def run(self): server = agent_utils.UnixDomainWSGIServer('neutron-metadata-agent') server.start(MetadataProxyHandler(self.conf), self.conf.metadata_proxy_socket, workers=self.conf.metadata_workers, backlog=self.conf.metadata_backlog, mode=self._get_socket_mode()) self._init_state_reporting() server.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/metadata/driver.py0000644000175000017500000002644200000000000023370 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import grp import os import pwd from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process LOG = logging.getLogger(__name__) METADATA_SERVICE_NAME = 'metadata-proxy' HAPROXY_SERVICE = 'haproxy' PROXY_CONFIG_DIR = "ns-metadata-proxy" _HAPROXY_CONFIG_TEMPLATE = """ global log /dev/log local0 %(log_level)s log-tag %(log_tag)s user %(user)s group %(group)s maxconn 1024 pidfile %(pidfile)s daemon defaults log global mode http option httplog option dontlognull option http-server-close option forwardfor retries 3 timeout http-request 30s timeout connect 30s timeout client 32s timeout server 32s timeout http-keep-alive 30s listen listener bind %(host)s:%(port)s server metadata %(unix_socket_path)s http-request del-header X-Neutron-%(res_type_del)s-ID http-request set-header X-Neutron-%(res_type)s-ID %(res_id)s """ class InvalidUserOrGroupException(Exception): pass class HaproxyConfigurator(object): def __init__(self, network_id, router_id, unix_socket_path, host, port, user, group, state_path, pid_file): self.network_id = network_id self.router_id = router_id if network_id is None and router_id is None: raise exceptions.NetworkIdOrRouterIdRequiredError() self.host = host self.port = port self.user = user self.group = group self.state_path = state_path self.unix_socket_path = unix_socket_path self.pidfile = pid_file self.log_level = ( 'debug' if logging.is_debug_enabled(cfg.CONF) else 'info') # log-tag will cause entries to have the string pre-pended, so use # the uuid haproxy will be started with. Additionally, if it # starts with "haproxy" then things will get logged to # /var/log/haproxy.log on Debian distros, instead of to syslog. uuid = network_id or router_id self.log_tag = "haproxy-" + METADATA_SERVICE_NAME + "-" + uuid def create_config_file(self): """Create the config file for haproxy.""" # Need to convert uid/gid into username/group try: username = pwd.getpwuid(int(self.user)).pw_name except (ValueError, KeyError): try: username = pwd.getpwnam(self.user).pw_name except KeyError: raise InvalidUserOrGroupException( _("Invalid user/uid: '%s'") % self.user) try: groupname = grp.getgrgid(int(self.group)).gr_name except (ValueError, KeyError): try: groupname = grp.getgrnam(self.group).gr_name except KeyError: raise InvalidUserOrGroupException( _("Invalid group/gid: '%s'") % self.group) cfg_info = { 'host': self.host, 'port': self.port, 'unix_socket_path': self.unix_socket_path, 'user': username, 'group': groupname, 'pidfile': self.pidfile, 'log_level': self.log_level, 'log_tag': self.log_tag } # If using the network ID, delete any spurious router ID that might # have been in the request, same for network ID when using router ID. if self.network_id: cfg_info['res_type'] = 'Network' cfg_info['res_id'] = self.network_id cfg_info['res_type_del'] = 'Router' else: cfg_info['res_type'] = 'Router' cfg_info['res_id'] = self.router_id cfg_info['res_type_del'] = 'Network' haproxy_cfg = _HAPROXY_CONFIG_TEMPLATE % cfg_info LOG.debug("haproxy_cfg = %s", haproxy_cfg) cfg_dir = self.get_config_path(self.state_path) # uuid has to be included somewhere in the command line so that it can # be tracked by process_monitor. self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id']) if not os.path.exists(cfg_dir): os.makedirs(cfg_dir) with open(self.cfg_path, "w") as cfg_file: cfg_file.write(haproxy_cfg) @staticmethod def get_config_path(state_path): return os.path.join(state_path or cfg.CONF.state_path, PROXY_CONFIG_DIR) @staticmethod def cleanup_config_file(uuid, state_path): """Delete config file created when metadata proxy was spawned.""" # Delete config file if it exists cfg_path = os.path.join( HaproxyConfigurator.get_config_path(state_path), "%s.conf" % uuid) try: os.unlink(cfg_path) except OSError as ex: # It can happen that this function is called but metadata proxy # was never spawned so its config file won't exist if ex.errno != errno.ENOENT: raise class MetadataDriver(object): monitors = {} def __init__(self, l3_agent): self.metadata_port = l3_agent.conf.metadata_port self.metadata_access_mark = l3_agent.conf.metadata_access_mark registry.subscribe( after_router_added, resources.ROUTER, events.AFTER_CREATE) registry.subscribe( after_router_updated, resources.ROUTER, events.AFTER_UPDATE) registry.subscribe( before_router_removed, resources.ROUTER, events.BEFORE_DELETE) @classmethod def metadata_filter_rules(cls, port, mark): return [('INPUT', '-m mark --mark %s/%s -j ACCEPT' % (mark, constants.ROUTER_MARK_MASK)), ('INPUT', '-p tcp -m tcp --dport %s ' '-j DROP' % port)] @classmethod def metadata_nat_rules(cls, port): return [('PREROUTING', '-d 169.254.169.254/32 ' '-i %(interface_name)s ' '-p tcp -m tcp --dport 80 -j REDIRECT ' '--to-ports %(port)s' % {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+', 'port': port})] @classmethod def _get_metadata_proxy_user_group(cls, conf): user = conf.metadata_proxy_user or str(os.geteuid()) group = conf.metadata_proxy_group or str(os.getegid()) return user, group @classmethod def _get_metadata_proxy_callback(cls, bind_address, port, conf, network_id=None, router_id=None): def callback(pid_file): metadata_proxy_socket = conf.metadata_proxy_socket user, group = ( cls._get_metadata_proxy_user_group(conf)) haproxy = HaproxyConfigurator(network_id, router_id, metadata_proxy_socket, bind_address, port, user, group, conf.state_path, pid_file) haproxy.create_config_file() proxy_cmd = [HAPROXY_SERVICE, '-f', haproxy.cfg_path] return proxy_cmd return callback @classmethod def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, bind_address="0.0.0.0", network_id=None, router_id=None): uuid = network_id or router_id callback = cls._get_metadata_proxy_callback( bind_address, port, conf, network_id=network_id, router_id=router_id) pm = cls._get_metadata_proxy_process_manager(uuid, conf, ns_name=ns_name, callback=callback) pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) cls.monitors[router_id] = pm @classmethod def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name): monitor.unregister(uuid, METADATA_SERVICE_NAME) pm = cls._get_metadata_proxy_process_manager(uuid, conf, ns_name=ns_name) pm.disable() # Delete metadata proxy config file HaproxyConfigurator.cleanup_config_file(uuid, cfg.CONF.state_path) cls.monitors.pop(uuid, None) @classmethod def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None, callback=None): return external_process.ProcessManager( conf=conf, uuid=router_id, namespace=ns_name, service=HAPROXY_SERVICE, default_cmd_callback=callback) def after_router_added(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver for c, r in proxy.metadata_filter_rules(proxy.metadata_port, proxy.metadata_access_mark): router.iptables_manager.ipv4['filter'].add_rule(c, r) for c, r in proxy.metadata_nat_rules(proxy.metadata_port): router.iptables_manager.ipv4['nat'].add_rule(c, r) router.iptables_manager.apply() if not isinstance(router, ha_router.HaRouter): proxy.spawn_monitored_metadata_proxy( l3_agent.process_monitor, router.ns_name, proxy.metadata_port, l3_agent.conf, router_id=router.router_id) def after_router_updated(resource, event, l3_agent, **kwargs): router = kwargs['router'] proxy = l3_agent.metadata_driver if (not proxy.monitors.get(router.router_id) and not isinstance(router, ha_router.HaRouter)): proxy.spawn_monitored_metadata_proxy( l3_agent.process_monitor, router.ns_name, proxy.metadata_port, l3_agent.conf, router_id=router.router_id) def before_router_removed(resource, event, l3_agent, payload=None): router = payload.latest_state proxy = l3_agent.metadata_driver proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor, router.router['id'], l3_agent.conf, router.ns_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/metadata_agent.py0000644000175000017500000000264700000000000023254 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron.agent.metadata import agent from neutron.common import cache_utils as cache from neutron.common import config from neutron.common import utils from neutron.conf.agent import common as agent_conf from neutron.conf.agent.metadata import config as meta LOG = logging.getLogger(__name__) def main(): meta.register_meta_conf_opts(meta.SHARED_OPTS) meta.register_meta_conf_opts(meta.UNIX_DOMAIN_METADATA_PROXY_OPTS) meta.register_meta_conf_opts(meta.METADATA_PROXY_HANDLER_OPTS) cache.register_oslo_configs(cfg.CONF) agent_conf.register_agent_state_opts_helper(cfg.CONF) config.init(sys.argv[1:]) config.setup_logging() utils.log_opt_values(LOG) proxy = agent.UnixDomainMetadataProxy(cfg.CONF) proxy.run() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/0000755000175000017500000000000000000000000020535 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/__init__.py0000644000175000017500000000000000000000000022634 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/0000755000175000017500000000000000000000000022315 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/__init__.py0000644000175000017500000000000000000000000024414 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/agent.py0000644000175000017500000004476000000000000024000 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import re from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.ovn.metadata import driver as metadata_driver from neutron.agent.ovn.metadata import ovsdb from neutron.agent.ovn.metadata import server as metadata_server from neutron.common.ovn import constants as ovn_const from neutron.common import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config from neutron_lib import constants as n_const from oslo_concurrency import lockutils from oslo_log import log from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import event as row_event from ovsdbapp.backend.ovs_idl import vlog import six LOG = log.getLogger(__name__) _SYNC_STATE_LOCK = lockutils.ReaderWriterLock() CHASSIS_METADATA_LOCK = 'chassis_metadata_lock' NS_PREFIX = 'ovnmeta-' MAC_PATTERN = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) OVN_VIF_PORT_TYPES = ("", "external", ) MetadataPortInfo = collections.namedtuple('MetadataPortInfo', ['mac', 'ip_addresses']) def _sync_lock(f): """Decorator to block all operations for a global sync call.""" @six.wraps(f) def wrapped(*args, **kwargs): with _SYNC_STATE_LOCK.write_lock(): return f(*args, **kwargs) return wrapped class ConfigException(Exception): """Misconfiguration of the agent This exception is raised when agent detects its wrong configuration. Typically agent should resync when this is raised. """ class PortBindingChassisEvent(row_event.RowEvent): def __init__(self, metadata_agent): self.agent = metadata_agent table = 'Port_Binding' events = (self.ROW_UPDATE,) super(PortBindingChassisEvent, self).__init__( events, table, None) self.event_name = self.__class__.__name__ def run(self, event, row, old): # Check if the port has been bound/unbound to our chassis and update # the metadata namespace accordingly. resync = False if row.type not in OVN_VIF_PORT_TYPES: return with _SYNC_STATE_LOCK.read_lock(): try: LOG.info(self.LOG_MSG, row.logical_port, str(row.datapath.uuid)) self.agent.update_datapath(str(row.datapath.uuid)) except ConfigException: # We're now in the reader lock mode, we need to exit the # context and then use writer lock resync = True if resync: self.agent.resync() class PortBindingChassisCreatedEvent(PortBindingChassisEvent): LOG_MSG = "Port %s in datapath %s bound to our chassis" def match_fn(self, event, row, old): try: return (row.chassis[0].name == self.agent.chassis and not old.chassis) except (IndexError, AttributeError): return False class PortBindingChassisDeletedEvent(PortBindingChassisEvent): LOG_MSG = "Port %s in datapath %s unbound from our chassis" def match_fn(self, event, row, old): try: return (old.chassis[0].name == self.agent.chassis and not row.chassis) except (IndexError, AttributeError): return False class ChassisCreateEvent(row_event.RowEvent): """Row create event - Chassis name == our_chassis. On connection, we get a dump of all chassis so if we catch a creation of our own chassis it has to be a reconnection. In this case, we need to do a full sync to make sure that we capture all changes while the connection to OVSDB was down. """ def __init__(self, metadata_agent): self.agent = metadata_agent self.first_time = True table = 'Chassis' events = (self.ROW_CREATE,) super(ChassisCreateEvent, self).__init__( events, table, (('name', '=', self.agent.chassis),)) self.event_name = self.__class__.__name__ def run(self, event, row, old): if self.first_time: self.first_time = False else: # NOTE(lucasagomes): Re-register the ovn metadata agent # with the local chassis in case its entry was re-created # (happens when restarting the ovn-controller) self.agent.register_metadata_agent() LOG.info("Connection to OVSDB established, doing a full sync") self.agent.sync() class SbGlobalUpdateEvent(row_event.RowEvent): """Row update event on SB_Global table.""" def __init__(self, metadata_agent): self.agent = metadata_agent table = 'SB_Global' events = (self.ROW_UPDATE,) super(SbGlobalUpdateEvent, self).__init__(events, table, None) self.event_name = self.__class__.__name__ def run(self, event, row, old): self.agent.sb_idl.update_metadata_health_status( self.agent.chassis, row.nb_cfg).execute() class MetadataAgent(object): def __init__(self, conf): self.conf = conf vlog.use_python_logger(max_level=config.get_ovn_ovsdb_log_level()) self._process_monitor = external_process.ProcessMonitor( config=self.conf, resource_type='metadata') def _load_config(self): self.chassis = self._get_own_chassis_name() self.ovn_bridge = self._get_ovn_bridge() LOG.debug("Loaded chassis %s and ovn bridge %s.", self.chassis, self.ovn_bridge) @_sync_lock def resync(self): """Resync the agent. Reload the configuration and sync the agent again. """ self._load_config() self.sync() def start(self): # Open the connection to OVS database self.ovs_idl = ovsdb.MetadataAgentOvsIdl().start() self._load_config() # Launch the server that will act as a proxy between the VM's and Nova. proxy = metadata_server.UnixDomainMetadataProxy(self.conf) proxy.run() # Open the connection to OVN SB database. self.sb_idl = ovsdb.MetadataAgentOvnSbIdl( chassis=self.chassis, events=[PortBindingChassisCreatedEvent(self), PortBindingChassisDeletedEvent(self), ChassisCreateEvent(self), SbGlobalUpdateEvent(self)]).start() # Do the initial sync. self.sync() # Register the agent with its corresponding Chassis self.register_metadata_agent() proxy.wait() def register_metadata_agent(self): # NOTE(lucasagomes): db_add() will not overwrite the UUID if # it's already set. ext_ids = { ovn_const.OVN_AGENT_METADATA_ID_KEY: uuidutils.generate_uuid()} self.sb_idl.db_add('Chassis', self.chassis, 'external_ids', ext_ids).execute(check_error=True) def _get_own_chassis_name(self): """Return the external_ids:system-id value of the Open_vSwitch table. As long as ovn-controller is running on this node, the key is guaranteed to exist and will include the chassis name. """ ext_ids = self.ovs_idl.db_get( 'Open_vSwitch', '.', 'external_ids').execute() return ext_ids['system-id'] def _get_ovn_bridge(self): """Return the external_ids:ovn-bridge value of the Open_vSwitch table. This is the OVS bridge used to plug the metadata ports to. If the key doesn't exist, this method will return 'br-int' as default. """ ext_ids = self.ovs_idl.db_get( 'Open_vSwitch', '.', 'external_ids').execute() try: return ext_ids['ovn-bridge'] except KeyError: LOG.warning("Can't read ovn-bridge external-id from OVSDB. Using " "br-int instead.") return 'br-int' @_sync_lock def sync(self): """Agent sync. This function will make sure that all networks with ports in our chassis are serving metadata. Also, it will tear down those namespaces which were serving metadata but are no longer needed. """ metadata_namespaces = self.ensure_all_networks_provisioned() system_namespaces = tuple( ns.decode('utf-8') if isinstance(ns, bytes) else ns for ns in ip_lib.list_network_namespaces()) unused_namespaces = [ns for ns in system_namespaces if ns.startswith(NS_PREFIX) and ns not in metadata_namespaces] for ns in unused_namespaces: self.teardown_datapath(self._get_datapath_name(ns)) @staticmethod def _get_veth_name(datapath): return ['{}{}{}'.format(n_const.TAP_DEVICE_PREFIX, datapath[:10], i) for i in [0, 1]] @staticmethod def _get_datapath_name(namespace): return namespace[len(NS_PREFIX):] @staticmethod def _get_namespace_name(datapath): return NS_PREFIX + datapath def _vif_ports(self, ports): return (p for p in ports if p.type in OVN_VIF_PORT_TYPES) def teardown_datapath(self, datapath): """Unprovision this datapath to stop serving metadata. This function will shutdown metadata proxy if it's running and delete the VETH pair, the OVS port and the namespace. """ self.update_chassis_metadata_networks(datapath, remove=True) namespace = self._get_namespace_name(datapath) ip = ip_lib.IPWrapper(namespace) # If the namespace doesn't exist, return if not ip.netns.exists(namespace): return LOG.info("Cleaning up %s namespace which is not needed anymore", namespace) metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy( self._process_monitor, datapath, self.conf, namespace) veth_name = self._get_veth_name(datapath) self.ovs_idl.del_port(veth_name[0]).execute() if ip_lib.device_exists(veth_name[0]): ip_lib.IPWrapper().del_veth(veth_name[0]) ip.garbage_collect_namespace() def update_datapath(self, datapath): """Update the metadata service for this datapath. This function will: * Provision the namespace if it wasn't already in place. * Update the namespace if it was already serving metadata (for example, after binding/unbinding the first/last port of a subnet in our chassis). * Tear down the namespace if there are no more ports in our chassis for this datapath. """ ports = self.sb_idl.get_ports_on_chassis(self.chassis) datapath_ports = [p for p in self._vif_ports(ports) if str(p.datapath.uuid) == datapath] if datapath_ports: self.provision_datapath(datapath) else: self.teardown_datapath(datapath) def provision_datapath(self, datapath): """Provision the datapath so that it can serve metadata. This function will create the namespace and VETH pair if needed and assign the IP addresses to the interface corresponding to the metadata port of the network. It will also remove existing IP addresses that are no longer needed. :return: The metadata namespace name of this datapath """ LOG.debug("Provisioning datapath %s", datapath) port = self.sb_idl.get_metadata_port_network(datapath) # If there's no metadata port or it doesn't have a MAC or IP # addresses, then tear the namespace down if needed. This might happen # when there are no subnets yet created so metadata port doesn't have # an IP address. if not (port and port.mac and port.external_ids.get(ovn_const.OVN_CIDRS_EXT_ID_KEY, None)): LOG.debug("There is no metadata port for datapath %s or it has no " "MAC or IP addresses configured, tearing the namespace " "down if needed", datapath) self.teardown_datapath(datapath) return # First entry of the mac field must be the MAC address. match = MAC_PATTERN.match(port.mac[0].split(' ')[0]) # If it is not, we can't provision the namespace. Tear it down if # needed and log the error. if not match: LOG.error("Metadata port for datapath %s doesn't have a MAC " "address, tearing the namespace down if needed", datapath) self.teardown_datapath(datapath) return mac = match.group() ip_addresses = set( port.external_ids[ovn_const.OVN_CIDRS_EXT_ID_KEY].split(' ')) ip_addresses.add(ovn_const.METADATA_DEFAULT_CIDR) metadata_port = MetadataPortInfo(mac, ip_addresses) # Create the VETH pair if it's not created. Also the add_veth function # will create the namespace for us. namespace = self._get_namespace_name(datapath) veth_name = self._get_veth_name(datapath) ip1 = ip_lib.IPDevice(veth_name[0]) if ip_lib.device_exists(veth_name[1], namespace): ip2 = ip_lib.IPDevice(veth_name[1], namespace) else: LOG.debug("Creating VETH %s in %s namespace", veth_name[1], namespace) # Might happen that the end in the root namespace exists even # though the other end doesn't. Make sure we delete it first if # that's the case. if ip1.exists(): ip1.link.delete() ip1, ip2 = ip_lib.IPWrapper().add_veth( veth_name[0], veth_name[1], namespace) # Make sure both ends of the VETH are up ip1.link.set_up() ip2.link.set_up() # Configure the MAC address. ip2.link.set_address(metadata_port.mac) dev_info = ip2.addr.list() # Configure the IP addresses on the VETH pair and remove those # that we no longer need. current_cidrs = {dev['cidr'] for dev in dev_info} for ipaddr in current_cidrs - metadata_port.ip_addresses: ip2.addr.delete(ipaddr) for ipaddr in metadata_port.ip_addresses - current_cidrs: # NOTE(dalvarez): metadata only works on IPv4. We're doing this # extra check here because it could be that the metadata port has # an IPv6 address if there's an IPv6 subnet with SLAAC in its # network. Neutron IPAM will autoallocate an IPv6 address for every # port in the network. if utils.get_ip_version(ipaddr) == 4: ip2.addr.add(ipaddr) # Check that this port is not attached to any other OVS bridge. This # can happen when the OVN bridge changes (for example, during a # migration from ML2/OVS). ovs_bridges = set(self.ovs_idl.list_br().execute()) try: ovs_bridges.remove(self.ovn_bridge) except KeyError: LOG.warning("Configured OVN bridge %s cannot be found in " "the system. Resyncing the agent.", self.ovn_bridge) raise ConfigException() if ovs_bridges: with self.ovs_idl.transaction() as txn: for br in ovs_bridges: txn.add(self.ovs_idl.del_port(veth_name[0], bridge=br, if_exists=True)) # Configure the OVS port and add external_ids:iface-id so that it # can be tracked by OVN. self.ovs_idl.add_port(self.ovn_bridge, veth_name[0]).execute() self.ovs_idl.db_set( 'Interface', veth_name[0], ('external_ids', {'iface-id': port.logical_port})).execute() # Spawn metadata proxy if it's not already running. metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( self._process_monitor, namespace, ovn_const.METADATA_PORT, self.conf, bind_address=ovn_const.METADATA_DEFAULT_IP, network_id=datapath) self.update_chassis_metadata_networks(datapath) return namespace def ensure_all_networks_provisioned(self): """Ensure that all datapaths are provisioned. This function will make sure that all datapaths with ports bound to our chassis have its namespace, VETH pair and OVS port created and metadata proxy is up and running. :return: A list with the namespaces that are currently serving metadata """ # Retrieve all VIF ports in our Chassis ports = self.sb_idl.get_ports_on_chassis(self.chassis) datapaths = {str(p.datapath.uuid) for p in self._vif_ports(ports)} namespaces = [] # Make sure that all those datapaths are serving metadata for datapath in datapaths: netns = self.provision_datapath(datapath) if netns: namespaces.append(netns) return namespaces # NOTE(lucasagomes): Even tho the metadata agent is a multi-process # application, there's only one Southbound database IDL instance in # the agent which handles the OVSDB events therefore we do not need # the external=True parameter in the @synchronized decorator. @lockutils.synchronized(CHASSIS_METADATA_LOCK) def update_chassis_metadata_networks(self, datapath, remove=False): """Update metadata networks hosted in this chassis. Add or remove a datapath from the list of current datapaths that we're currently serving metadata. """ current_dps = self.sb_idl.get_chassis_metadata_networks(self.chassis) updated = False if remove: if datapath in current_dps: current_dps.remove(datapath) updated = True else: if datapath not in current_dps: current_dps.append(datapath) updated = True if updated: with self.sb_idl.create_transaction(check_error=True) as txn: txn.add(self.sb_idl.set_chassis_metadata_networks( self.chassis, current_dps)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/driver.py0000644000175000017500000001725100000000000024170 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import grp import os import pwd from neutron.agent.linux import external_process from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ LOG = logging.getLogger(__name__) METADATA_SERVICE_NAME = 'metadata-proxy' HAPROXY_SERVICE = 'haproxy' PROXY_CONFIG_DIR = "ovn-metadata-proxy" _HAPROXY_CONFIG_TEMPLATE = """ global log /dev/log local0 %(log_level)s user %(user)s group %(group)s maxconn 1024 pidfile %(pidfile)s daemon defaults log global mode http option httplog option dontlognull option http-server-close option forwardfor retries 3 timeout http-request 30s timeout connect 30s timeout client 32s timeout server 32s timeout http-keep-alive 30s listen listener bind %(host)s:%(port)s server metadata %(unix_socket_path)s http-request add-header X-OVN-%(res_type)s-ID %(res_id)s """ class InvalidUserOrGroupException(Exception): pass class HaproxyConfigurator(object): def __init__(self, network_id, router_id, unix_socket_path, host, port, user, group, state_path, pid_file): self.network_id = network_id self.router_id = router_id if network_id is None and router_id is None: raise exceptions.NetworkIdOrRouterIdRequiredError() self.host = host self.port = port self.user = user self.group = group self.state_path = state_path self.unix_socket_path = unix_socket_path self.pidfile = pid_file self.log_level = ( 'debug' if logging.is_debug_enabled(cfg.CONF) else 'info') def create_config_file(self): """Create the config file for haproxy.""" # Need to convert uid/gid into username/group try: username = pwd.getpwuid(int(self.user)).pw_name except (ValueError, KeyError): try: username = pwd.getpwnam(self.user).pw_name except KeyError: raise InvalidUserOrGroupException( _("Invalid user/uid: '%s'") % self.user) try: groupname = grp.getgrgid(int(self.group)).gr_name except (ValueError, KeyError): try: groupname = grp.getgrnam(self.group).gr_name except KeyError: raise InvalidUserOrGroupException( _("Invalid group/gid: '%s'") % self.group) cfg_info = { 'host': self.host, 'port': self.port, 'unix_socket_path': self.unix_socket_path, 'user': username, 'group': groupname, 'pidfile': self.pidfile, 'log_level': self.log_level } if self.network_id: cfg_info['res_type'] = 'Network' cfg_info['res_id'] = self.network_id else: cfg_info['res_type'] = 'Router' cfg_info['res_id'] = self.router_id haproxy_cfg = _HAPROXY_CONFIG_TEMPLATE % cfg_info LOG.debug("haproxy_cfg = %s", haproxy_cfg) cfg_dir = self.get_config_path(self.state_path) # uuid has to be included somewhere in the command line so that it can # be tracked by process_monitor. self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id']) if not os.path.exists(cfg_dir): os.makedirs(cfg_dir) with open(self.cfg_path, "w") as cfg_file: cfg_file.write(haproxy_cfg) @staticmethod def get_config_path(state_path): return os.path.join(state_path or cfg.CONF.state_path, PROXY_CONFIG_DIR) @staticmethod def cleanup_config_file(uuid, state_path): """Delete config file created when metadata proxy was spawned.""" # Delete config file if it exists cfg_path = os.path.join( HaproxyConfigurator.get_config_path(state_path), "%s.conf" % uuid) try: os.unlink(cfg_path) except OSError as ex: # It can happen that this function is called but metadata proxy # was never spawned so its config file won't exist if ex.errno != errno.ENOENT: raise class MetadataDriver(object): monitors = {} @classmethod def _get_metadata_proxy_user_group(cls, conf): user = conf.metadata_proxy_user or str(os.geteuid()) group = conf.metadata_proxy_group or str(os.getegid()) return user, group @classmethod def _get_metadata_proxy_callback(cls, bind_address, port, conf, network_id=None, router_id=None): def callback(pid_file): metadata_proxy_socket = conf.metadata_proxy_socket user, group = ( cls._get_metadata_proxy_user_group(conf)) haproxy = HaproxyConfigurator(network_id, router_id, metadata_proxy_socket, bind_address, port, user, group, conf.state_path, pid_file) haproxy.create_config_file() proxy_cmd = [HAPROXY_SERVICE, '-f', haproxy.cfg_path] return proxy_cmd return callback @classmethod def spawn_monitored_metadata_proxy(cls, monitor, ns_name, port, conf, bind_address="0.0.0.0", network_id=None, router_id=None): uuid = network_id or router_id callback = cls._get_metadata_proxy_callback( bind_address, port, conf, network_id=network_id, router_id=router_id) pm = cls._get_metadata_proxy_process_manager(uuid, conf, ns_name=ns_name, callback=callback) pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) cls.monitors[router_id] = pm @classmethod def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name): monitor.unregister(uuid, METADATA_SERVICE_NAME) pm = cls._get_metadata_proxy_process_manager(uuid, conf, ns_name=ns_name) pm.disable() # Delete metadata proxy config file HaproxyConfigurator.cleanup_config_file(uuid, cfg.CONF.state_path) cls.monitors.pop(uuid, None) @classmethod def _get_metadata_proxy_process_manager(cls, router_id, conf, ns_name=None, callback=None): return external_process.ProcessManager( conf=conf, uuid=router_id, namespace=ns_name, service=HAPROXY_SERVICE, default_cmd_callback=callback) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/ovsdb.py0000644000175000017500000000551300000000000024010 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ovs.db import idl from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.schema.open_vswitch import impl_idl as idl_ovs import tenacity from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl): SCHEMA = 'OVN_Southbound' def __init__(self, chassis=None, events=None, tables=None): connection_string = config.get_ovn_sb_connection() ovsdb_monitor._check_and_set_ssl_files(self.SCHEMA) helper = self._get_ovsdb_helper(connection_string) if tables is None: tables = ('Chassis', 'Encap', 'Port_Binding', 'Datapath_Binding', 'SB_Global') for table in tables: helper.register_table(table) super(MetadataAgentOvnSbIdl, self).__init__( None, connection_string, helper) if chassis and 'Chassis' in tables: self.tables['Chassis'].condition = [['name', '==', chassis]] if events: self.notify_handler.watch_events(events) @tenacity.retry( wait=tenacity.wait_exponential(max=180), reraise=True) def _get_ovsdb_helper(self, connection_string): return idlutils.get_schema_helper(connection_string, self.SCHEMA) def start(self): conn = connection.Connection( self, timeout=config.get_ovn_ovsdb_timeout()) return impl_idl_ovn.OvsdbSbOvnIdl(conn) class MetadataAgentOvsIdl(object): def start(self): connection_string = config.cfg.CONF.ovs.ovsdb_connection helper = idlutils.get_schema_helper(connection_string, 'Open_vSwitch') tables = ('Open_vSwitch', 'Bridge', 'Port', 'Interface') for table in tables: helper.register_table(table) ovs_idl = idl.Idl(connection_string, helper) ovs_idl._session.reconnect.set_probe_interval( config.get_ovn_ovsdb_probe_interval()) conn = connection.Connection( ovs_idl, timeout=config.cfg.CONF.ovs.ovsdb_connection_timeout) return idl_ovs.OvsdbIdl(conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata/server.py0000644000175000017500000001645100000000000024204 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import hmac from neutron._i18n import _ from neutron.agent.linux import utils as agent_utils from neutron.agent.ovn.metadata import ovsdb from neutron.common import ipv6_utils from neutron.common.ovn import constants as ovn_const from neutron.conf.agent.metadata import config from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import requests import six from six.moves import urllib import webob LOG = logging.getLogger(__name__) MODE_MAP = { config.USER_MODE: 0o644, config.GROUP_MODE: 0o664, config.ALL_MODE: 0o666, } class MetadataProxyHandler(object): def __init__(self, conf): self.conf = conf self.subscribe() def subscribe(self): registry.subscribe(self.post_fork_initialize, resources.PROCESS, events.AFTER_INIT) def post_fork_initialize(self, resource, event, trigger, payload=None): # We need to open a connection to OVN SouthBound database for # each worker so that we can process the metadata requests. self.sb_idl = ovsdb.MetadataAgentOvnSbIdl( tables=('Port_Binding', 'Datapath_Binding')).start() @webob.dec.wsgify(RequestClass=webob.Request) def __call__(self, req): try: LOG.debug("Request: %s", req) instance_id, project_id = self._get_instance_and_project_id(req) if instance_id: return self._proxy_request(instance_id, project_id, req) else: return webob.exc.HTTPNotFound() except Exception: LOG.exception("Unexpected error.") msg = _('An unknown error has occurred. ' 'Please try your request again.') explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) def _get_instance_and_project_id(self, req): remote_address = req.headers.get('X-Forwarded-For') network_id = req.headers.get('X-OVN-Network-ID') ports = self.sb_idl.get_network_port_bindings_by_ip(network_id, remote_address) if len(ports) == 1: external_ids = ports[0].external_ids return (external_ids[ovn_const.OVN_DEVID_EXT_ID_KEY], external_ids[ovn_const.OVN_PROJID_EXT_ID_KEY]) return None, None def _proxy_request(self, instance_id, tenant_id, req): headers = { 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), 'X-Instance-ID': instance_id, 'X-Tenant-ID': tenant_id, 'X-Instance-ID-Signature': self._sign_instance_id(instance_id) } nova_host_port = ipv6_utils.valid_ipv6_url( self.conf.nova_metadata_host, self.conf.nova_metadata_port) url = urllib.parse.urlunsplit(( self.conf.nova_metadata_protocol, nova_host_port, req.path_info, req.query_string, '')) disable_ssl_certificate_validation = self.conf.nova_metadata_insecure if self.conf.auth_ca_cert and not disable_ssl_certificate_validation: verify_cert = self.conf.auth_ca_cert else: verify_cert = not disable_ssl_certificate_validation client_cert = None if self.conf.nova_client_cert and self.conf.nova_client_priv_key: client_cert = (self.conf.nova_client_cert, self.conf.nova_client_priv_key) resp = requests.request(method=req.method, url=url, headers=headers, data=req.body, cert=client_cert, verify=verify_cert) if resp.status_code == 200: req.response.content_type = resp.headers['content-type'] req.response.body = resp.content LOG.debug(str(resp)) return req.response elif resp.status_code == 403: LOG.warning( 'The remote metadata server responded with Forbidden. This ' 'response usually occurs when shared secrets do not match.' ) return webob.exc.HTTPForbidden() elif resp.status_code == 400: return webob.exc.HTTPBadRequest() elif resp.status_code == 404: return webob.exc.HTTPNotFound() elif resp.status_code == 409: return webob.exc.HTTPConflict() elif resp.status_code == 500: msg = _( 'Remote metadata server experienced an internal server error.' ) LOG.warning(msg) explanation = six.text_type(msg) return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status_code) def _sign_instance_id(self, instance_id): secret = self.conf.metadata_proxy_shared_secret secret = encodeutils.to_utf8(secret) instance_id = encodeutils.to_utf8(instance_id) return hmac.new(secret, instance_id, hashlib.sha256).hexdigest() class UnixDomainMetadataProxy(object): def __init__(self, conf): self.conf = conf agent_utils.ensure_directory_exists_without_file( cfg.CONF.metadata_proxy_socket) def _get_socket_mode(self): mode = self.conf.metadata_proxy_socket_mode if mode == config.DEDUCE_MODE: user = self.conf.metadata_proxy_user if (not user or user == '0' or user == 'root' or agent_utils.is_effective_user(user)): # user is agent effective user or root => USER_MODE mode = config.USER_MODE else: group = self.conf.metadata_proxy_group if not group or agent_utils.is_effective_group(group): # group is agent effective group => GROUP_MODE mode = config.GROUP_MODE else: # otherwise => ALL_MODE mode = config.ALL_MODE return MODE_MAP[mode] def run(self): self.server = agent_utils.UnixDomainWSGIServer( 'networking-ovn-metadata-agent') self.server.start(MetadataProxyHandler(self.conf), self.conf.metadata_proxy_socket, workers=self.conf.metadata_workers, backlog=self.conf.metadata_backlog, mode=self._get_socket_mode()) def wait(self): self.server.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovn/metadata_agent.py0000644000175000017500000000260400000000000024047 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron.common import config from neutron.common import utils from oslo_config import cfg from oslo_log import log as logging from neutron.agent.ovn.metadata import agent from neutron.conf.agent.metadata import config as meta from neutron.conf.agent.ovn.metadata import config as ovn_meta LOG = logging.getLogger(__name__) def main(): ovn_meta.register_meta_conf_opts(meta.SHARED_OPTS) ovn_meta.register_meta_conf_opts(meta.UNIX_DOMAIN_METADATA_PROXY_OPTS) ovn_meta.register_meta_conf_opts(meta.METADATA_PROXY_HANDLER_OPTS) ovn_meta.register_meta_conf_opts(ovn_meta.OVS_OPTS, group='ovs') config.init(sys.argv[1:]) config.setup_logging() ovn_meta.setup_privsep() utils.log_opt_values(LOG) agt = agent.MetadataAgent(cfg.CONF) agt.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/0000755000175000017500000000000000000000000021050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/__init__.py0000644000175000017500000000000000000000000023147 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/api.py0000644000175000017500000000204400000000000022173 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid def val_to_py(val): """Convert a json ovsdb return value to native python object""" if isinstance(val, collections.Sequence) and len(val) == 2: if val[0] == "uuid": return uuid.UUID(val[1]) elif val[0] == "set": return [val_to_py(x) for x in val[1]] elif val[0] == "map": return {val_to_py(x): val_to_py(y) for x, y in val[1]} return val ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/impl_idl.py0000644000175000017500000000643300000000000023221 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import moves from oslo_config import cfg from ovsdbapp.backend.ovs_idl import command from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.backend.ovs_idl import transaction from ovsdbapp.backend.ovs_idl import vlog from ovsdbapp.schema.open_vswitch import impl_idl from neutron.agent.ovsdb.native import connection as n_connection from neutron.conf.agent import ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants NeutronOVSDBTransaction = moves.moved_class( impl_idl.OvsVsctlTransaction, 'NeutronOVSDBTransaction', __name__) VswitchdInterfaceAddException = moves.moved_class( impl_idl.VswitchdInterfaceAddException, 'VswitchdInterfaceAddException', __name__) Transaction = moves.moved_class(transaction.Transaction, 'Transaction', __name__) ovs_conf.register_ovs_agent_opts() _connection = None _idl_monitor = None def api_factory(): global _connection global _idl_monitor if _connection is None: _idl_monitor = n_connection.OvsIdlMonitor() _connection = connection.Connection( idl=_idl_monitor, timeout=cfg.CONF.OVS.ovsdb_timeout) return NeutronOvsdbIdl(_connection, _idl_monitor) class OvsCleanup(command.BaseCommand): def __init__(self, api, bridge, all_ports=False): super(OvsCleanup, self).__init__(api) self.bridge = bridge self.all_ports = all_ports def run_idl(self, txn): br = idlutils.row_by_value(self.api.idl, 'Bridge', 'name', self.bridge) for port in br.ports: if not any(self.is_deletable_port(iface) for iface in port.interfaces): continue br.delvalue('ports', port) for iface in port.interfaces: iface.delete() port.delete() def is_deletable_port(self, port): # Deletable defined as "looks like vif port and not set to skip delete" if self.all_ports: return True if constants.SKIP_CLEANUP in port.external_ids: return False if not all(field in port.external_ids for field in ('iface-id', 'attached-mac')): return False return True class NeutronOvsdbIdl(impl_idl.OvsdbIdl): def __init__(self, connection, idl_monitor): max_level = None if cfg.CONF.OVS.ovsdb_debug else vlog.INFO vlog.use_python_logger(max_level=max_level) self.idl_monitor = idl_monitor super(NeutronOvsdbIdl, self).__init__(connection) def ovs_cleanup(self, bridges, all_ports=False): return OvsCleanup(self, bridges, all_ports) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/0000755000175000017500000000000000000000000022336 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/__init__.py0000644000175000017500000000000000000000000024435 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/commands.py0000644000175000017500000000134600000000000024515 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ovsdbapp.schema.open_vswitch import commands from neutron.common import _deprecate _deprecate._MovedGlobals(commands) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/connection.py0000644000175000017500000001102200000000000025043 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import threading from debtcollector import moves from oslo_config import cfg from ovs.db import idl from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import connection as _connection from ovsdbapp.backend.ovs_idl import event as idl_event from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp import event as ovsdb_event import tenacity from neutron.agent.ovsdb.native import exceptions as ovsdb_exc from neutron.agent.ovsdb.native import helpers from neutron.conf.agent import ovsdb_api TransactionQueue = moves.moved_class(_connection.TransactionQueue, 'TransactionQueue', __name__) Connection = moves.moved_class(_connection.Connection, 'Connection', __name__) LOG = logging.getLogger(__name__) ovsdb_api.register_ovsdb_api_opts() def configure_ssl_conn(): """Configures required settings for an SSL based OVSDB client connection :return: None """ req_ssl_opts = {'ssl_key_file': cfg.CONF.OVS.ssl_key_file, 'ssl_cert_file': cfg.CONF.OVS.ssl_cert_file, 'ssl_ca_cert_file': cfg.CONF.OVS.ssl_ca_cert_file} for ssl_opt, ssl_file in req_ssl_opts.items(): if not ssl_file: raise ovsdb_exc.OvsdbSslRequiredOptError(ssl_opt=ssl_opt) elif not os.path.exists(ssl_file): raise ovsdb_exc.OvsdbSslConfigNotFound(ssl_file=ssl_file) # TODO(ihrachys): move to ovsdbapp Stream.ssl_set_private_key_file(req_ssl_opts['ssl_key_file']) Stream.ssl_set_certificate_file(req_ssl_opts['ssl_cert_file']) Stream.ssl_set_ca_cert_file(req_ssl_opts['ssl_ca_cert_file']) class BridgeCreateEvent(idl_event.RowEvent): def __init__(self, agent): self.agent = agent table = 'Bridge' super(BridgeCreateEvent, self).__init__((self.ROW_CREATE, ), table, None) self.event_name = 'BridgeCreateEvent' def run(self, event, row, old): LOG.debug('%s, bridge name: %s', self.event_name, row.name) self.agent.add_bridge(str(row.name)) class OvsIdl(idl.Idl): SCHEMA = 'Open_vSwitch' def __init__(self): self._ovsdb_connection = cfg.CONF.OVS.ovsdb_connection if self._ovsdb_connection.startswith('ssl:'): configure_ssl_conn() helper = self._get_ovsdb_helper(self._ovsdb_connection) helper.register_all() super(OvsIdl, self).__init__(self._ovsdb_connection, helper) self.notify_handler = ovsdb_event.RowEventHandler() @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.02), stop=tenacity.stop_after_delay(1), reraise=True) def _do_get_schema_helper(self, connection): return idlutils.get_schema_helper(connection, self.SCHEMA) def _get_ovsdb_helper(self, connection): try: return idlutils.get_schema_helper(connection, self.SCHEMA) except Exception: helpers.enable_connection_uri(connection) return self._do_get_schema_helper(connection) def notify(self, event, row, updates=None): self.notify_handler.notify(event, row, updates) class OvsIdlMonitor(OvsIdl): def __init__(self): super(OvsIdlMonitor, self).__init__() self._lock = threading.Lock() self._bridges_to_monitor = [] self._bridges_added_list = [] def start_bridge_monitor(self, bridge_names): if not bridge_names: return self._bridges_to_monitor = bridge_names event = BridgeCreateEvent(self) self.notify_handler.watch_event(event) def add_bridge(self, bridge_name): with self._lock: if bridge_name in self._bridges_to_monitor: self._bridges_added_list.append(bridge_name) @property def bridges_added(self): with self._lock: bridges = self._bridges_added_list self._bridges_added_list = [] return bridges ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/exceptions.py0000644000175000017500000000204000000000000025065 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as e from neutron._i18n import _ class OvsdbSslConfigNotFound(e.NeutronException): message = _("Specified SSL file %(ssl_file)s could not be found") class OvsdbSslRequiredOptError(e.NeutronException): message = _("Required 'ovs' group option %(ssl_opt)s not set. SSL " "configuration options are required when using SSL " "ovsdb_connection URI") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/helpers.py0000644000175000017500000000157100000000000024356 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from ovsdbapp.schema.open_vswitch import helpers from neutron.agent.common import utils enable_connection_uri = functools.partial( helpers.enable_connection_uri, execute=utils.execute, run_as_root=True, log_fail_as_error=False, check_exit_code=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/ovsdb/native/vlog.py0000644000175000017500000000132300000000000023656 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ovsdbapp.backend.ovs_idl import vlog from neutron.common import _deprecate _deprecate._MovedGlobals(vlog) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/resource_cache.py0000644000175000017500000002646600000000000023275 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import context as n_ctx from neutron_lib import rpc as n_rpc from oslo_log import log as logging from neutron._i18n import _ from neutron.api.rpc.callbacks.consumer import registry as registry_rpc from neutron.api.rpc.callbacks import events as events_rpc from neutron.api.rpc.handlers import resources_rpc from neutron import objects LOG = logging.getLogger(__name__) objects.register_objects() class RemoteResourceCache(object): """Retrieves and stashes logical resources in their OVO format. This is currently only compatible with OVO objects that have an ID. """ def __init__(self, resource_types): self.resource_types = resource_types self._cache_by_type_and_id = {rt: {} for rt in self.resource_types} self._deleted_ids_by_type = {rt: set() for rt in self.resource_types} # track everything we've asked the server so we don't ask again self._satisfied_server_queries = set() self._puller = resources_rpc.ResourcesPullRpcApi() def _type_cache(self, rtype): if rtype not in self.resource_types: raise RuntimeError(_("Resource cache not tracking %s") % rtype) return self._cache_by_type_and_id[rtype] def start_watcher(self): self._watcher = RemoteResourceWatcher(self) def get_resource_by_id(self, rtype, obj_id, agent_restarted=False): """Returns None if it doesn't exist.""" if obj_id in self._deleted_ids_by_type[rtype]: return None cached_item = self._type_cache(rtype).get(obj_id) if cached_item: return cached_item # try server in case object existed before agent start self._flood_cache_for_query(rtype, id=(obj_id, ), agent_restarted=agent_restarted) return self._type_cache(rtype).get(obj_id) def _flood_cache_for_query(self, rtype, agent_restarted=False, **filter_kwargs): """Load info from server for first query. Queries the server if this is the first time a given query for rtype has been issued. """ query_ids = self._get_query_ids(rtype, filter_kwargs) if query_ids.issubset(self._satisfied_server_queries): # we've already asked the server this question so we don't # ask directly again because any updates will have been # pushed to us return context = n_ctx.get_admin_context() resources = self._puller.bulk_pull(context, rtype, filter_kwargs=filter_kwargs) for resource in resources: if self._is_stale(rtype, resource): # if the server was slow enough to respond the object may have # been updated already and pushed to us in another thread. LOG.debug("Ignoring stale update for %s: %s", rtype, resource) continue self.record_resource_update(context, rtype, resource, agent_restarted=agent_restarted) LOG.debug("%s resources returned for queries %s", len(resources), query_ids) self._satisfied_server_queries.update(query_ids) def _get_query_ids(self, rtype, filters): """Turns filters for a given rypte into a set of query IDs. This can result in multiple queries due to the nature of the query processing on the server side. Since multiple values are treated as an OR condition, a query for {'id': ('1', '2')} is equivalent to a query for {'id': ('1',)} and {'id': ('2')}. This method splits the former into the latter to ensure we aren't asking the server something we already know. """ query_ids = set() for k, values in tuple(sorted(filters.items())): if len(values) > 1: for v in values: new_filters = filters.copy() new_filters[k] = (v, ) query_ids.update(self._get_query_ids(rtype, new_filters)) break else: # no multiple value filters left so add an ID query_ids.add((rtype, ) + tuple(sorted(filters.items()))) return query_ids def get_resources(self, rtype, filters): """Find resources that match key:values in filters dict. If the attribute on the object is a list, each value is checked if it is in the list. The values in the dicionary for a single key are matched in an OR fashion. """ self._flood_cache_for_query(rtype, **filters) def match(obj): for key, values in filters.items(): for value in values: attr = getattr(obj, key) if isinstance(attr, (list, tuple, set)): # attribute is a list so we check if value is in # list if value in attr: break elif value == attr: break else: # no match found for this key return False return True return self.match_resources_with_func(rtype, match) def match_resources_with_func(self, rtype, matcher): """Returns a list of all resources satisfying func matcher.""" # TODO(kevinbenton): this is O(N), offer better lookup functions return [r for r in self._type_cache(rtype).values() if matcher(r)] def _is_stale(self, rtype, resource): """Determines if a given resource update is safe to ignore. It can be safe to ignore if it has already been deleted or if we have a copy with a higher revision number. """ if resource.id in self._deleted_ids_by_type[rtype]: return True existing = self._type_cache(rtype).get(resource.id) if existing and existing.revision_number > resource.revision_number: # NOTE(kevinbenton): we could be strict and check for >=, but this # makes us more tolerant of bugs on the server where we forget to # bump the revision_number. return True return False def record_resource_update(self, context, rtype, resource, agent_restarted=False): """Takes in an OVO and generates an event on relevant changes. A change is deemed to be relevant if it is not stale and if any fields changed beyond the revision number and update time. Both creates and updates are handled in this function. """ if self._is_stale(rtype, resource): LOG.debug("Ignoring stale update for %s: %s", rtype, resource) return existing = self._type_cache(rtype).get(resource.id) self._type_cache(rtype)[resource.id] = resource changed_fields = self._get_changed_fields(existing, resource) if not changed_fields: LOG.debug("Received resource %s update without any changes: %s", rtype, resource.id) return if existing: LOG.debug("Resource %s %s updated (revision_number %s->%s). " "Old fields: %s New fields: %s", rtype, existing.id, existing.revision_number, resource.revision_number, {f: existing.get(f) for f in changed_fields}, {f: resource.get(f) for f in changed_fields}) else: LOG.debug("Received new resource %s: %s", rtype, resource) # local notification for agent internals to subscribe to registry.notify(rtype, events.AFTER_UPDATE, self, context=context, changed_fields=changed_fields, existing=existing, updated=resource, resource_id=resource.id, agent_restarted=agent_restarted) def record_resource_delete(self, context, rtype, resource_id): # deletions are final, record them so we never # accept new data for the same ID. LOG.debug("Resource %s deleted: %s", rtype, resource_id) # TODO(kevinbenton): we need a way to expire items from the set at # some TTL so it doesn't grow indefinitely with churn if resource_id in self._deleted_ids_by_type[rtype]: LOG.debug("Skipped duplicate delete event for %s", resource_id) return self._deleted_ids_by_type[rtype].add(resource_id) existing = self._type_cache(rtype).pop(resource_id, None) # local notification for agent internals to subscribe to registry.notify(rtype, events.AFTER_DELETE, self, context=context, existing=existing, resource_id=resource_id) def _get_changed_fields(self, old, new): """Returns changed fields excluding update time and revision.""" new = new.to_dict() changed = set(new) if old: for k, v in old.to_dict().items(): if v == new.get(k): changed.discard(k) for ignore in ('revision_number', 'updated_at'): changed.discard(ignore) return changed class RemoteResourceWatcher(object): """Converts RPC callback notifications to local registry notifications. This allows a constructor to listen for RPC callbacks for a given dictionary of resources and fields desired. This watcher will listen to the RPC callbacks as sent on the wire and handle things like out-of-order message detection and throwing away updates to fields the constructor doesn't care about. All watched resources must be primary keyed on a field called 'id' and have a standard attr revision number. """ def __init__(self, remote_resource_cache): self.rcache = remote_resource_cache self._init_rpc_listeners() def _init_rpc_listeners(self): endpoints = [resources_rpc.ResourcesPushRpcCallback()] self._connection = n_rpc.Connection() for rtype in self.rcache.resource_types: registry_rpc.register(self.resource_change_handler, rtype) topic = resources_rpc.resource_type_versioned_topic(rtype) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() def resource_change_handler(self, context, rtype, resources, event_type): for r in resources: if event_type == events_rpc.DELETED: self.rcache.record_resource_delete(context, rtype, r.id) else: # creates and updates are treated equally self.rcache.record_resource_update(context, rtype, r) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/rpc.py0000644000175000017500000004224600000000000021101 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import itertools import netaddr from neutron_lib.agent import topics from neutron_lib.api.definitions import portbindings_extended as pb_ext from neutron_lib.callbacks import events as callback_events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources as callback_resources from neutron_lib import constants from neutron_lib.plugins import utils from neutron_lib import rpc as lib_rpc from oslo_log import log as logging import oslo_messaging from oslo_utils import uuidutils from neutron.agent import resource_cache from neutron.api.rpc.callbacks import resources from neutron.common import _constants as n_const from neutron import objects LOG = logging.getLogger(__name__) BINDING_DEACTIVATE = 'binding_deactivate' def create_consumers(endpoints, prefix, topic_details, start_listening=True): """Create agent RPC consumers. :param endpoints: The list of endpoints to process the incoming messages. :param prefix: Common prefix for the plugin/agent message queues. :param topic_details: A list of topics. Each topic has a name, an operation, and an optional host param keying the subscription to topic.host for plugin calls. :param start_listening: if True, it starts the processing loop :returns: A common Connection. """ connection = lib_rpc.Connection() for details in topic_details: topic, operation, node_name = itertools.islice( itertools.chain(details, [None]), 3) topic_name = topics.get_topic_name(prefix, topic, operation) connection.create_consumer(topic_name, endpoints, fanout=True) if node_name: node_topic_name = '%s.%s' % (topic_name, node_name) connection.create_consumer(node_topic_name, endpoints, fanout=False) if start_listening: connection.consume_in_threads() return connection class PluginReportStateAPI(object): """RPC client used to report state back to plugin. This class implements the client side of an rpc interface. The server side can be found in neutron.db.agents_db.AgentExtRpcCallback. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.2', namespace=constants.RPC_NAMESPACE_STATE) self.client = lib_rpc.get_client(target) def has_alive_neutron_server(self, context, **kwargs): cctxt = self.client.prepare() return cctxt.call(context, 'has_alive_neutron_server', **kwargs) def report_state(self, context, agent_state, use_call=False): cctxt = self.client.prepare( timeout=lib_rpc.TRANSPORT.conf.rpc_response_timeout) # add unique identifier to a report # that can be logged on server side. # This create visible correspondence between events on # the agent and on the server agent_state['uuid'] = uuidutils.generate_uuid() kwargs = { 'agent_state': {'agent_state': agent_state}, 'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT), } method = cctxt.call if use_call else cctxt.cast return method(context, 'report_state', **kwargs) class PluginApi(object): '''Agent side of the rpc API. API version history: 1.0 - Initial version. 1.3 - get_device_details rpc signature upgrade to obtain 'host' and return value to include fixed_ips and device_owner for the device port 1.4 - tunnel_sync rpc signature upgrade to obtain 'host' 1.5 - Support update_device_list and get_devices_details_list_and_failed_devices 1.6 - Support get_network_details 1.7 - Support get_ports_by_vnic_type_and_host 1.8 - Rename agent_restarted to refresh_tunnels in update_device_list to reflect its expanded purpose ''' def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = lib_rpc.get_client(target) def get_device_details(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'get_device_details', device=device, agent_id=agent_id, host=host) def get_devices_details_list(self, context, devices, agent_id, host=None): cctxt = self.client.prepare(version='1.3') return cctxt.call(context, 'get_devices_details_list', devices=devices, agent_id=agent_id, host=host) def get_devices_details_list_and_failed_devices(self, context, devices, agent_id, host=None, **kwargs): """Get devices details and the list of devices that failed. This method returns the devices details. If an error is thrown when retrieving the devices details, the device is put in a list of failed devices. """ cctxt = self.client.prepare(version='1.5') return cctxt.call( context, 'get_devices_details_list_and_failed_devices', devices=devices, agent_id=agent_id, host=host) def get_network_details(self, context, network, agent_id, host=None): cctxt = self.client.prepare(version='1.6') return cctxt.call(context, 'get_network_details', network=network, agent_id=agent_id, host=host) def update_device_down(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'update_device_down', device=device, agent_id=agent_id, host=host) def update_device_up(self, context, device, agent_id, host=None): cctxt = self.client.prepare() return cctxt.call(context, 'update_device_up', device=device, agent_id=agent_id, host=host) def update_device_list(self, context, devices_up, devices_down, agent_id, host, refresh_tunnels=False): cctxt = self.client.prepare(version='1.8') ret_devices_up = [] failed_devices_up = [] ret_devices_down = [] failed_devices_down = [] step = n_const.RPC_RES_PROCESSING_STEP devices_up = list(devices_up) devices_down = list(devices_down) for i in range(0, max(len(devices_up), len(devices_down)), step): # Divide-and-conquer RPC timeout ret = cctxt.call(context, 'update_device_list', devices_up=devices_up[i:i + step], devices_down=devices_down[i:i + step], agent_id=agent_id, host=host, refresh_tunnels=refresh_tunnels) ret_devices_up.extend(ret.get("devices_up", [])) failed_devices_up.extend(ret.get("failed_devices_up", [])) ret_devices_down.extend(ret.get("devices_down", [])) failed_devices_down.extend(ret.get("failed_devices_down", [])) return {'devices_up': ret_devices_up, 'failed_devices_up': failed_devices_up, 'devices_down': ret_devices_down, 'failed_devices_down': failed_devices_down} def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None): cctxt = self.client.prepare(version='1.4') return cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type, host=host) def get_ports_by_vnic_type_and_host(self, context, vnic_type, host): cctxt = self.client.prepare(version='1.7') return cctxt.call(context, 'get_ports_by_vnic_type_and_host', vnic_type=vnic_type, host=host) class CacheBackedPluginApi(PluginApi): RESOURCE_TYPES = [resources.PORT, resources.SECURITYGROUP, resources.SECURITYGROUPRULE, resources.NETWORK, resources.SUBNET] def __init__(self, *args, **kwargs): super(CacheBackedPluginApi, self).__init__(*args, **kwargs) self.remote_resource_cache = None self._create_cache_for_l2_agent() def register_legacy_notification_callbacks(self, legacy_interface): """Emulates the server-side notifications from ml2 AgentNotifierApi. legacy_interface is an object with 'delete'/'update' methods for core resources. """ self._legacy_interface = legacy_interface for e in (callback_events.AFTER_UPDATE, callback_events.AFTER_DELETE): for r in (resources.PORT, resources.NETWORK): registry.subscribe(self._legacy_notifier, r, e) def _legacy_notifier(self, rtype, event, trigger, context, resource_id, **kwargs): """Checks if legacy interface is expecting calls for resource. looks for port_update, network_delete, etc and calls them with the payloads the handlers are expecting (an ID). """ rtype = rtype.lower() # all legacy handlers don't camelcase agent_restarted = kwargs.pop("agent_restarted", None) method, host_with_activation, host_with_deactivation = ( self._get_method_host(rtype, event, **kwargs)) if not hasattr(self._legacy_interface, method): # TODO(kevinbenton): once these notifications are stable, emit # a deprecation warning for legacy handlers return # If there is a binding deactivation, we must also notify the # corresponding activation if method == BINDING_DEACTIVATE: self._legacy_interface.binding_deactivate( context, port_id=resource_id, host=host_with_deactivation) self._legacy_interface.binding_activate( context, port_id=resource_id, host=host_with_activation) else: payload = {rtype: {'id': resource_id}, '%s_id' % rtype: resource_id} if method == "port_update" and agent_restarted is not None: # Mark ovs-agent restart for local port_update payload["agent_restarted"] = agent_restarted getattr(self._legacy_interface, method)(context, **payload) def _get_method_host(self, rtype, event, **kwargs): """Constructs the name of method to be called in the legacy interface. If the event received is a port update that contains a binding activation where a previous binding is deactivated, the method name is 'binding_deactivate' and the host where the binding has to be deactivated is returned. Otherwise, the method name is constructed from rtype and the event received and the host is None. """ is_delete = event == callback_events.AFTER_DELETE suffix = 'delete' if is_delete else 'update' method = "%s_%s" % (rtype, suffix) host_with_activation = None host_with_deactivation = None if is_delete or rtype != callback_resources.PORT: return method, host_with_activation, host_with_deactivation # A port update was received. Find out if it is a binding activation # where a previous binding was deactivated BINDINGS = pb_ext.COLLECTION_NAME if BINDINGS in kwargs.get('changed_fields', set()): existing_active_binding = ( utils.get_port_binding_by_status_and_host( getattr(kwargs['existing'], 'bindings', []), constants.ACTIVE)) updated_active_binding = ( utils.get_port_binding_by_status_and_host( getattr(kwargs['updated'], 'bindings', []), constants.ACTIVE)) if (existing_active_binding and updated_active_binding and existing_active_binding.host != updated_active_binding.host): if (utils.get_port_binding_by_status_and_host( getattr(kwargs['updated'], 'bindings', []), constants.INACTIVE, host=existing_active_binding.host)): method = BINDING_DEACTIVATE host_with_activation = updated_active_binding.host host_with_deactivation = existing_active_binding.host return method, host_with_activation, host_with_deactivation def get_devices_details_list_and_failed_devices(self, context, devices, agent_id, host=None, agent_restarted=False): result = {'devices': [], 'failed_devices': []} for device in devices: try: result['devices'].append( self.get_device_details(context, device, agent_id, host, agent_restarted)) except Exception: LOG.exception("Failed to get details for device %s", device) result['failed_devices'].append(device) return result def get_device_details(self, context, device, agent_id, host=None, agent_restarted=False): port_obj = self.remote_resource_cache.get_resource_by_id( resources.PORT, device, agent_restarted) if not port_obj: LOG.debug("Device %s does not exist in cache.", device) return {'device': device} if not port_obj.binding_levels: LOG.warning("Device %s is not bound.", port_obj) return {'device': device} segment = port_obj.binding_levels[-1].segment if not segment: LOG.debug("Device %s is not bound to any segment.", port_obj) return {'device': device} binding = utils.get_port_binding_by_status_and_host( port_obj.bindings, constants.ACTIVE, raise_if_not_found=True, port_id=port_obj.id) if (port_obj.device_owner.startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX) and binding[pb_ext.HOST] != host): LOG.debug("Device %s has no active binding in this host", port_obj) return {'device': device, constants.NO_ACTIVE_BINDING: True} net = self.remote_resource_cache.get_resource_by_id( resources.NETWORK, port_obj.network_id) net_qos_policy_id = net.qos_policy_id # match format of old RPC interface mac_addr = str(netaddr.EUI(str(port_obj.mac_address), dialect=netaddr.mac_unix_expanded)) entry = { 'device': device, 'device_id': port_obj.device_id, 'network_id': port_obj.network_id, 'port_id': port_obj.id, 'mac_address': mac_addr, 'admin_state_up': port_obj.admin_state_up, 'network_type': segment.network_type, 'segmentation_id': segment.segmentation_id, 'physical_network': segment.physical_network, 'fixed_ips': [{'subnet_id': o.subnet_id, 'ip_address': str(o.ip_address)} for o in port_obj.fixed_ips], 'device_owner': port_obj.device_owner, 'allowed_address_pairs': [{'mac_address': o.mac_address, 'ip_address': o.ip_address} for o in port_obj.allowed_address_pairs], 'port_security_enabled': getattr(port_obj.security, 'port_security_enabled', True), 'qos_policy_id': port_obj.qos_policy_id, 'network_qos_policy_id': net_qos_policy_id, 'profile': binding.profile, 'vif_type': binding.vif_type, 'vnic_type': binding.vnic_type, 'security_groups': list(port_obj.security_group_ids) } LOG.debug("Returning: %s", entry) return entry def get_devices_details_list(self, context, devices, agent_id, host=None): return [self.get_device_details(context, device, agent_id, host) for device in devices] def _create_cache_for_l2_agent(self): """Create a push-notifications cache for L2 agent related resources.""" objects.register_objects() rcache = resource_cache.RemoteResourceCache(self.RESOURCE_TYPES) rcache.start_watcher() self.remote_resource_cache = rcache ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/securitygroups_rpc.py0000644000175000017500000002647700000000000024300 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from neutron_lib.api.definitions import rbac_security_groups as rbac_sg_apidef from neutron_lib.api.definitions import stateful_security_group as stateful_sg from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from neutron.agent import firewall from neutron.common import _constants as common_constants from neutron.conf.agent import securitygroups_rpc as sc_cfg LOG = logging.getLogger(__name__) sc_cfg.register_securitygroups_opts() def is_firewall_enabled(): return cfg.CONF.SECURITYGROUP.enable_security_group def _disable_extension(extension, aliases): if extension in aliases: aliases.remove(extension) def disable_security_group_extension_by_config(aliases): if not is_firewall_enabled(): LOG.info('Disabled security-group extension.') _disable_extension('security-group', aliases) _disable_extension(rbac_sg_apidef.ALIAS, aliases) _disable_extension(stateful_sg.ALIAS, aliases) LOG.info('Disabled allowed-address-pairs extension.') _disable_extension('allowed-address-pairs', aliases) class SecurityGroupAgentRpc(object): """Enables SecurityGroup agent support in agent implementations.""" def __init__(self, context, plugin_rpc, local_vlan_map=None, defer_refresh_firewall=False, integration_bridge=None): self.context = context self.plugin_rpc = plugin_rpc self.init_firewall(defer_refresh_firewall, integration_bridge) def _get_trusted_devices(self, device_ids, devices): trusted_devices = [] # Devices which are already added in firewall ports should # not be treated as trusted devices but as regular ports all_devices = devices.copy() all_devices.update(self.firewall.ports) device_names = [ dev['device'] for dev in all_devices.values()] for device_id in device_ids: if (device_id not in all_devices.keys() and device_id not in device_names): trusted_devices.append(device_id) return trusted_devices def init_firewall(self, defer_refresh_firewall=False, integration_bridge=None): firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop' LOG.debug("Init firewall settings (driver=%s)", firewall_driver) firewall_class = firewall.load_firewall_driver_class(firewall_driver) try: self.firewall = firewall_class( integration_bridge=integration_bridge) except TypeError: self.firewall = firewall_class() # The following flag will be set to true if port filter must not be # applied as soon as a rule or membership notification is received self.defer_refresh_firewall = defer_refresh_firewall # Stores devices for which firewall should be refreshed when # deferred refresh is enabled. self.devices_to_refilter = set() self._use_enhanced_rpc = None @property def use_enhanced_rpc(self): if self._use_enhanced_rpc is None: self._use_enhanced_rpc = ( self._check_enhanced_rpc_is_supported_by_server()) return self._use_enhanced_rpc def _check_enhanced_rpc_is_supported_by_server(self): try: self.plugin_rpc.security_group_info_for_devices( self.context, devices=[]) except oslo_messaging.UnsupportedVersion: LOG.warning('security_group_info_for_devices rpc call not ' 'supported by the server, falling back to old ' 'security_group_rules_for_devices which scales ' 'worse.') return False return True def skip_if_noopfirewall_or_firewall_disabled(func): @functools.wraps(func) def decorated_function(self, *args, **kwargs): if (isinstance(self.firewall, firewall.NoopFirewallDriver) or not is_firewall_enabled()): LOG.info("Skipping method %s as firewall is disabled " "or configured as NoopFirewallDriver.", func.__name__) else: return func(self, # pylint: disable=not-callable *args, **kwargs) return decorated_function @skip_if_noopfirewall_or_firewall_disabled def init_ovs_dvr_firewall(self, dvr_agent): dvr_agent.set_firewall(self.firewall) @skip_if_noopfirewall_or_firewall_disabled def prepare_devices_filter(self, device_ids): if not device_ids: return LOG.info("Preparing filters for devices %s", device_ids) self._apply_port_filter(device_ids) def _apply_port_filter(self, device_ids, update_filter=False): step = common_constants.AGENT_RES_PROCESSING_STEP if self.use_enhanced_rpc: devices = {} security_groups = {} security_group_member_ips = {} for i in range(0, len(device_ids), step): devices_info = self.plugin_rpc.security_group_info_for_devices( self.context, list(device_ids)[i:i + step]) devices.update(devices_info['devices']) security_groups.update(devices_info['security_groups']) security_group_member_ips.update(devices_info['sg_member_ips']) else: devices = self.plugin_rpc.security_group_rules_for_devices( self.context, list(device_ids)) trusted_devices = self._get_trusted_devices(device_ids, devices) with self.firewall.defer_apply(): if self.use_enhanced_rpc: LOG.debug("Update security group information for ports %s", devices.keys()) self._update_security_group_info( security_groups, security_group_member_ips) for device in devices.values(): if update_filter: LOG.debug("Update port filter for %s", device['device']) self.firewall.update_port_filter(device) else: LOG.debug("Prepare port filter for %s", device['device']) self.firewall.prepare_port_filter(device) self.firewall.process_trusted_ports(trusted_devices) def _update_security_group_info(self, security_groups, security_group_member_ips): LOG.debug("Update security group information") for sg_id, sg_rules in security_groups.items(): self.firewall.update_security_group_rules(sg_id, sg_rules) for remote_sg_id, member_ips in security_group_member_ips.items(): self.firewall.update_security_group_members( remote_sg_id, member_ips) def security_groups_rule_updated(self, security_groups): LOG.info("Security group " "rule updated %r", security_groups) self._security_group_updated( security_groups, 'security_groups', 'sg_rule') def security_groups_member_updated(self, security_groups): LOG.info("Security group " "member updated %r", security_groups) self._security_group_updated( security_groups, 'security_group_source_groups', 'sg_member') def _security_group_updated(self, security_groups, attribute, action_type): devices = [] sec_grp_set = set(security_groups) for device in self.firewall.ports.values(): if sec_grp_set & set(device.get(attribute, [])): devices.append(device['device']) if devices: if self.use_enhanced_rpc: self.firewall.security_group_updated(action_type, sec_grp_set) if self.defer_refresh_firewall: LOG.debug("Adding %s devices to the list of devices " "for which firewall needs to be refreshed", devices) self.devices_to_refilter |= set(devices) else: self.refresh_firewall(devices) def remove_devices_filter(self, device_ids): if not device_ids: return LOG.info("Remove device filter for %r", device_ids) with self.firewall.defer_apply(): for device_id in device_ids: device = self.firewall.ports.get(device_id) if device: self.firewall.remove_port_filter(device) else: self.firewall.remove_trusted_ports([device_id]) @skip_if_noopfirewall_or_firewall_disabled def refresh_firewall(self, device_ids=None): LOG.info("Refresh firewall rules") if not device_ids: device_ids = self.firewall.ports.keys() if not device_ids: LOG.info("No ports here to refresh firewall") return self._apply_port_filter(device_ids, update_filter=True) def firewall_refresh_needed(self): return bool(self.devices_to_refilter) def setup_port_filters(self, new_devices, updated_devices): """Configure port filters for devices. This routine applies filters for new devices and refreshes firewall rules when devices have been updated, or when there are changes in security group membership or rules. :param new_devices: set containing identifiers for new devices :param updated_devices: set containing identifiers for updated devices """ # These data structures are cleared here in order to avoid # losing updates occurring during firewall refresh devices_to_refilter = self.devices_to_refilter self.devices_to_refilter = set() # We must call prepare_devices_filter() after we've grabbed # self.devices_to_refilter since an update for a new port # could arrive while we're processing, and we need to make # sure we don't skip it. It will get handled the next time. if new_devices: LOG.debug("Preparing device filters for %d new devices", len(new_devices)) self.prepare_devices_filter(new_devices) if self.use_enhanced_rpc and updated_devices: self.firewall.security_group_updated('sg_member', [], updated_devices) # If a device is both in new and updated devices # avoid reprocessing it updated_devices = ((updated_devices | devices_to_refilter) - new_devices) if updated_devices: LOG.debug("Refreshing firewall for %d devices", len(updated_devices)) self.refresh_firewall(updated_devices) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/agent/windows/0000755000175000017500000000000000000000000021425 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/windows/__init__.py0000644000175000017500000000000000000000000023524 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/windows/ip_lib.py0000644000175000017500000000477700000000000023254 0ustar00coreycorey00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netifaces from oslo_log import log as logging LOG = logging.getLogger(__name__) class IPWrapper(object): def get_device_by_ip(self, ip): if not ip: return for device in self.get_devices(): if device.device_has_ip(ip): return device def get_devices(self): try: return [IPDevice(iface) for iface in netifaces.interfaces()] except (OSError, MemoryError): LOG.error("Failed to get network interfaces.") return [] class IPDevice(object): def __init__(self, name): self.name = name self.link = IPLink(self) def read_ifaddresses(self): try: device_addresses = netifaces.ifaddresses(self.name) except ValueError: LOG.error("The device does not exist on the system: %s.", self.name) return except OSError: LOG.error("Failed to get interface addresses: %s.", self.name) return return device_addresses def device_has_ip(self, ip): device_addresses = self.read_ifaddresses() if device_addresses is None: return False addresses = [ip_addr['addr'] for ip_addr in device_addresses.get(netifaces.AF_INET, []) + device_addresses.get(netifaces.AF_INET6, [])] return ip in addresses class IPLink(object): def __init__(self, parent): self._parent = parent @property def address(self): device_addresses = self._parent.read_ifaddresses() if device_addresses is None: return False return [eth_addr['addr'] for eth_addr in device_addresses.get(netifaces.AF_LINK, [])] def add_namespace_to_cmd(cmd, namespace=None): """Add an optional namespace to the command.""" return cmd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/agent/windows/utils.py0000644000175000017500000001234200000000000023141 0ustar00coreycorey00000000000000# Copyright 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os import eventlet from eventlet import tpool from neutron_lib import exceptions from neutron_lib.utils import helpers from oslo_log import log as logging from oslo_utils import encodeutils import six from neutron._i18n import _ if os.name == 'nt': import wmi LOG = logging.getLogger(__name__) # subprocess.Popen will spawn two threads consuming stdout/stderr when passing # data through stdin. We need to make sure that *native* threads will be used # as pipes are blocking on Windows. subprocess = eventlet.patcher.original('subprocess') subprocess.threading = eventlet.patcher.original('threading') def create_process(cmd, run_as_root=False, addl_env=None, tpool_proxy=True): cmd = list(map(str, cmd)) LOG.debug("Running command: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) popen = subprocess.Popen obj = popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, preexec_fn=None, close_fds=False) if tpool_proxy and eventlet.getcurrent().parent: # If we intend to access the process streams, we need to wrap this # in a tpool proxy object, avoding blocking other greenthreads. # # The 'file' type is not available on Python 3.x. file_type = getattr(six.moves.builtins, 'file', io.IOBase) obj = tpool.Proxy(obj, autowrap=(file_type, )) return obj, cmd def _get_wmi_process(pid): if not pid: return None conn = wmi.WMI() processes = conn.Win32_Process(ProcessId=pid) if processes: return processes[0] return None def kill_process(pid, signal, run_as_root=False): """Kill the process with the given pid using the given signal.""" process = _get_wmi_process(pid) try: if process: process.Terminate() except Exception: if _get_wmi_process(pid): raise def execute(cmd, process_input=None, addl_env=None, check_exit_code=True, return_stderr=False, log_fail_as_error=True, extra_ok_codes=None, run_as_root=False, do_decode=True): if process_input is not None: _process_input = encodeutils.to_utf8(process_input) else: _process_input = None obj, cmd = create_process(cmd, addl_env=addl_env, tpool_proxy=False) _stdout, _stderr = avoid_blocking_call(obj.communicate, _process_input) obj.stdin.close() _stdout = helpers.safe_decode_utf8(_stdout) _stderr = helpers.safe_decode_utf8(_stderr) m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdin: %(stdin)s\n" "Stdout: %(stdout)s\nStderr: %(stderr)s") % \ {'cmd': cmd, 'code': obj.returncode, 'stdin': process_input or '', 'stdout': _stdout, 'stderr': _stderr} extra_ok_codes = extra_ok_codes or [] if obj.returncode and obj.returncode in extra_ok_codes: obj.returncode = None log_msg = m.strip().replace('\n', '; ') if obj.returncode and log_fail_as_error: LOG.error(log_msg) else: LOG.debug(log_msg) if obj.returncode and check_exit_code: raise exceptions.ProcessExecutionError(m, returncode=obj.returncode) return (_stdout, _stderr) if return_stderr else _stdout def avoid_blocking_call(f, *args, **kwargs): """Ensure that the method "f" will not block other greenthreads. Performs the call to the function "f" received as parameter in a different thread using tpool.execute when called from a greenthread. This will ensure that the function "f" will not block other greenthreads. If not called from a greenthread, it will invoke the function "f" directly. The function "f" will receive as parameters the arguments "args" and keyword arguments "kwargs". """ # Note that eventlet.getcurrent will always return a greenlet object. # In case of a greenthread, the parent greenlet will always be the hub # loop greenlet. if eventlet.getcurrent().parent: return tpool.execute(f, *args, **kwargs) else: return f(*args, **kwargs) def get_root_helper_child_pid(pid, expected_cmd, run_as_root=False): # We don't use a root helper on Windows. return str(pid) def process_is_running(pid): """Find if the given PID is running in the system.""" return _get_wmi_process(pid) is not None def pid_invoked_with_cmdline(pid, expected_cmd): process = _get_wmi_process(pid) if not process: return False command = process.CommandLine return command == " ".join(expected_cmd) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/api/0000755000175000017500000000000000000000000017406 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/__init__.py0000644000175000017500000000000000000000000021505 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/api_common.py0000644000175000017500000004646300000000000022116 0ustar00coreycorey00000000000000# Copyright 2011 Citrix System. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib.api import attributes from neutron_lib import constants from neutron_lib.db import model_base from neutron_lib import exceptions from oslo_config import cfg import oslo_i18n from oslo_log import log as logging from oslo_serialization import jsonutils from six.moves import urllib from webob import exc from neutron._i18n import _ from neutron.api import extensions from neutron import wsgi LOG = logging.getLogger(__name__) def ensure_if_match_supported(): """Raises exception if 'if-match' revision matching unsupported.""" if 'revision-if-match' in (extensions.PluginAwareExtensionManager. get_instance().extensions): return msg = _("This server does not support constraining operations based on " "revision numbers") raise exceptions.BadRequest(resource='if-match', msg=msg) def check_request_for_revision_constraint(request): """Parses, verifies, and returns a constraint from a request.""" revision_number = None for e in getattr(request.if_match, 'etags', []): if e.startswith('revision_number='): if revision_number is not None: msg = _("Multiple revision_number etags are not supported.") raise exceptions.BadRequest(resource='if-match', msg=msg) ensure_if_match_supported() try: revision_number = int(e.split('revision_number=')[1]) except ValueError: msg = _("Revision number etag must be in the format of " "revision_number=") raise exceptions.BadRequest(resource='if-match', msg=msg) return revision_number def is_filter_validation_enabled(): return 'filter-validation' in (extensions.PluginAwareExtensionManager. get_instance().extensions) def get_filters(request, attr_info, skips=None, is_filter_validation_supported=False): return get_filters_from_dict(request.GET.dict_of_lists(), attr_info, skips, is_filter_validation_supported) def get_filters_from_dict(data, attr_info, skips=None, is_filter_validation_supported=False): """Extracts the filters from a dict of query parameters. Returns a dict of lists for the filters: check=a&check=b&name=Bob& becomes: {'check': [u'a', u'b'], 'name': [u'Bob']} """ attributes.populate_project_info(attr_info) is_empty_string_supported = is_empty_string_filtering_supported() skips = skips or [] res = {} invalid_keys = [] check_is_filter = False if is_filter_validation_supported and is_filter_validation_enabled(): check_is_filter = True for key, values in data.items(): if key in skips or hasattr(model_base.BASEV2, key): continue values = [v for v in values if v or (v == "" and is_empty_string_supported)] key_attr_info = attr_info.get(key, {}) if check_is_filter and not key_attr_info.get('is_filter'): invalid_keys.append(key) continue if 'convert_list_to' in key_attr_info: values = key_attr_info['convert_list_to'](values) elif 'convert_to' in key_attr_info: convert_to = key_attr_info['convert_to'] values = [convert_to(v) for v in values] if values: res[key] = values if invalid_keys: msg = _("%s is invalid attribute for filtering") % invalid_keys raise exc.HTTPBadRequest(explanation=msg) return res def is_empty_string_filtering_supported(): return 'empty-string-filtering' in (extensions.PluginAwareExtensionManager. get_instance().extensions) def get_previous_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[0][id_key] params['marker'] = marker params['page_reverse'] = True return "%s?%s" % (prepare_url(get_path_url(request)), urllib.parse.urlencode(params)) def get_next_link(request, items, id_key): params = request.GET.copy() params.pop('marker', None) if items: marker = items[-1][id_key] params['marker'] = marker params.pop('page_reverse', None) return "%s?%s" % (prepare_url(get_path_url(request)), urllib.parse.urlencode(params)) def prepare_url(orig_url): """Takes a link and swaps in network_link_prefix if set.""" prefix = cfg.CONF.network_link_prefix # Copied directly from nova/api/openstack/common.py if not prefix: return orig_url url_parts = list(urllib.parse.urlsplit(orig_url)) prefix_parts = list(urllib.parse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] url_parts[2] = prefix_parts[2] + url_parts[2] return urllib.parse.urlunsplit(url_parts).rstrip('/') def get_path_url(request): """Return correct link if X-Forwarded-Proto exists in headers.""" protocol = request.headers.get('X-Forwarded-Proto') parsed = urllib.parse.urlparse(request.path_url) if protocol and parsed.scheme != protocol: new_parsed = urllib.parse.ParseResult( protocol, parsed.netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) return urllib.parse.urlunparse(new_parsed) else: return request.path_url def get_limit_and_marker(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If limit == 0, it means we needn't pagination, then return None. """ max_limit = _get_pagination_max_limit() limit = _get_limit_param(request) if max_limit > 0: limit = min(max_limit, limit) or max_limit if not limit: return None, None marker = request.GET.get('marker', None) return limit, marker def _get_pagination_max_limit(): max_limit = -1 if (cfg.CONF.pagination_max_limit.lower() != constants.PAGINATION_INFINITE): try: max_limit = int(cfg.CONF.pagination_max_limit) if max_limit == 0: raise ValueError() except ValueError: LOG.warning("Invalid value for pagination_max_limit: %s. It " "should be an integer greater to 0", cfg.CONF.pagination_max_limit) return max_limit def _get_limit_param(request): """Extract integer limit from request or fail.""" limit = request.GET.get('limit', 0) try: limit = int(limit) if limit >= 0: return limit except ValueError: pass msg = _("Limit must be an integer 0 or greater and not '%s'") % limit raise exceptions.BadRequest(resource='limit', msg=msg) def list_args(request, arg): """Extracts the list of arg from request.""" return [v for v in request.GET.getall(arg) if v] def get_sorts(request, attr_info): """Extract sort_key and sort_dir from request. Return as: [(key1, value1), (key2, value2)] """ attributes.populate_project_info(attr_info) sort_keys = list_args(request, "sort_key") sort_dirs = list_args(request, "sort_dir") if len(sort_keys) != len(sort_dirs): msg = _("The number of sort_keys and sort_dirs must be same") raise exc.HTTPBadRequest(explanation=msg) valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] valid_sort_keys = set(attr for attr, schema in attr_info.items() if schema.get('is_sort_key', False)) absent_keys = [x for x in sort_keys if x not in valid_sort_keys] if absent_keys: msg = _("%s is invalid attribute for sort_keys") % absent_keys raise exc.HTTPBadRequest(explanation=msg) invalid_dirs = [x for x in sort_dirs if x not in valid_dirs] if invalid_dirs: msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, " "valid value is '%(asc)s' and '%(desc)s'") % {'invalid_dirs': invalid_dirs, 'asc': constants.SORT_DIRECTION_ASC, 'desc': constants.SORT_DIRECTION_DESC}) raise exc.HTTPBadRequest(explanation=msg) return list(zip(sort_keys, [x == constants.SORT_DIRECTION_ASC for x in sort_dirs])) def get_page_reverse(request): data = request.GET.get('page_reverse', 'False') return data.lower() == "true" def get_pagination_links(request, items, limit, marker, page_reverse, key="id"): key = key if key else 'id' links = [] if not limit: return links if not (len(items) < limit and not page_reverse): links.append({"rel": "next", "href": get_next_link(request, items, key)}) if not (len(items) < limit and page_reverse): links.append({"rel": "previous", "href": get_previous_link(request, items, key)}) return links def is_native_pagination_supported(plugin): native_pagination_attr_name = ("_%s__native_pagination_support" % plugin.__class__.__name__) return getattr(plugin, native_pagination_attr_name, False) def is_native_sorting_supported(plugin): native_sorting_attr_name = ("_%s__native_sorting_support" % plugin.__class__.__name__) return getattr(plugin, native_sorting_attr_name, False) def is_filter_validation_supported(plugin): filter_validation_attr_name = ("_%s__filter_validation_support" % plugin.__class__.__name__) return getattr(plugin, filter_validation_attr_name, False) class PaginationHelper(object): def __init__(self, request, primary_key='id'): self.request = request self.primary_key = primary_key def update_fields(self, original_fields, fields_to_add): pass def update_args(self, args): pass def paginate(self, items): return items def get_links(self, items): return {} class PaginationEmulatedHelper(PaginationHelper): def __init__(self, request, primary_key='id'): super(PaginationEmulatedHelper, self).__init__(request, primary_key) self.limit, self.marker = get_limit_and_marker(request) self.page_reverse = get_page_reverse(request) def update_fields(self, original_fields, fields_to_add): if not original_fields: return if self.primary_key not in original_fields: original_fields.append(self.primary_key) fields_to_add.append(self.primary_key) def paginate(self, items): if not self.limit: return items if not items: return [] # first, calculate the base index for pagination if self.marker: i = 0 for item in items: if item[self.primary_key] == self.marker: break i += 1 else: # if marker is not found, return nothing return [] else: i = len(items) if self.page_reverse else 0 if self.page_reverse: # don't wrap return items[max(i - self.limit, 0):i] else: if self.marker: # skip the matched marker i += 1 return items[i:i + self.limit] def get_links(self, items): return get_pagination_links( self.request, items, self.limit, self.marker, self.page_reverse, self.primary_key) class PaginationNativeHelper(PaginationEmulatedHelper): def update_args(self, args): if self.primary_key not in dict(args.get('sorts', [])).keys(): args.setdefault('sorts', []).append((self.primary_key, True)) args.update({'limit': self.limit, 'marker': self.marker, 'page_reverse': self.page_reverse}) def paginate(self, items): return items class NoPaginationHelper(PaginationHelper): pass class SortingHelper(object): def __init__(self, request, attr_info): pass def update_args(self, args): pass def update_fields(self, original_fields, fields_to_add): pass def sort(self, items): return items class SortingEmulatedHelper(SortingHelper): def __init__(self, request, attr_info): super(SortingEmulatedHelper, self).__init__(request, attr_info) self.sort_dict = get_sorts(request, attr_info) def update_fields(self, original_fields, fields_to_add): if not original_fields: return for key in dict(self.sort_dict).keys(): if key not in original_fields: original_fields.append(key) fields_to_add.append(key) def sort(self, items): def cmp_func(obj1, obj2): for key, direction in self.sort_dict: o1 = obj1[key] o2 = obj2[key] if o1 is None and o2 is None: ret = 0 elif o1 is None and o2 is not None: ret = -1 elif o1 is not None and o2 is None: ret = 1 else: ret = (o1 > o2) - (o1 < o2) if ret: return ret * (1 if direction else -1) return 0 return sorted(items, key=functools.cmp_to_key(cmp_func)) class SortingNativeHelper(SortingHelper): def __init__(self, request, attr_info): self.sort_dict = get_sorts(request, attr_info) def update_args(self, args): args['sorts'] = self.sort_dict class NoSortingHelper(SortingHelper): pass def convert_exception_to_http_exc(e, faults, language): serializer = wsgi.JSONDictSerializer() if isinstance(e, exceptions.MultipleExceptions): converted_exceptions = [ convert_exception_to_http_exc(inner, faults, language) for inner in e.inner_exceptions] # if no internal exceptions, will be handled as single exception if converted_exceptions: codes = {c.code for c in converted_exceptions} if len(codes) == 1: # all error codes are the same so we can maintain the code # and just concatenate the bodies joined_msg = "\n".join( (jsonutils.loads(c.body)['NeutronError']['message'] for c in converted_exceptions)) new_body = jsonutils.loads(converted_exceptions[0].body) new_body['NeutronError']['message'] = joined_msg converted_exceptions[0].body = serializer.serialize(new_body) return converted_exceptions[0] else: # multiple error types so we turn it into a Conflict with the # inner codes and bodies packed in new_exception = exceptions.Conflict() inner_error_strings = [] for c in converted_exceptions: c_body = jsonutils.loads(c.body) err = ('HTTP %s %s: %s' % ( c.code, c_body['NeutronError']['type'], c_body['NeutronError']['message'])) inner_error_strings.append(err) new_exception.msg = "\n".join(inner_error_strings) return convert_exception_to_http_exc( new_exception, faults, language) e = translate(e, language) body = serializer.serialize( {'NeutronError': get_exception_data(e)}) kwargs = {'body': body, 'content_type': 'application/json'} if isinstance(e, exc.HTTPException): # already an HTTP error, just update with content type and body e.body = body e.content_type = kwargs['content_type'] return e faults_tuple = tuple(faults.keys()) + (exceptions.NeutronException,) if isinstance(e, faults_tuple): for fault in faults: if isinstance(e, fault): mapped_exc = faults[fault] break else: mapped_exc = exc.HTTPInternalServerError return mapped_exc(**kwargs) if isinstance(e, NotImplementedError): # NOTE(armando-migliaccio): from a client standpoint # it makes sense to receive these errors, because # extensions may or may not be implemented by # the underlying plugin. So if something goes south, # because a plugin does not implement a feature, # returning 500 is definitely confusing. kwargs['body'] = serializer.serialize( {'NotImplementedError': get_exception_data(e)}) return exc.HTTPNotImplemented(**kwargs) # NOTE(jkoelker) Everything else is 500 # Do not expose details of 500 error to clients. msg = _('Request Failed: internal server error while ' 'processing your request.') msg = translate(msg, language) kwargs['body'] = serializer.serialize( {'NeutronError': get_exception_data(exc.HTTPInternalServerError(msg))}) return exc.HTTPInternalServerError(**kwargs) def get_exception_data(e): """Extract the information about an exception. Neutron client for the v2 API expects exceptions to have 'type', 'message' and 'detail' attributes.This information is extracted and converted into a dictionary. :param e: the exception to be reraised :returns: a structured dict with the exception data """ err_data = {'type': e.__class__.__name__, 'message': e, 'detail': ''} return err_data def translate(translatable, locale): """Translates the object to the given locale. If the object is an exception its translatable elements are translated in place, if the object is a translatable string it is translated and returned. Otherwise, the object is returned as-is. :param translatable: the object to be translated :param locale: the locale to translate to :returns: the translated object, or the object as-is if it was not translated """ localize = oslo_i18n.translate if isinstance(translatable, exceptions.NeutronException): translatable.msg = localize(translatable.msg, locale) elif isinstance(translatable, exc.HTTPError): translatable.detail = localize(translatable.detail, locale) elif isinstance(translatable, Exception): translatable.message = localize(translatable, locale) else: return localize(translatable, locale) return translatable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/extensions.py0000644000175000017500000006175600000000000022176 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import imp import os from neutron_lib.api import extensions as api_extensions from neutron_lib import exceptions from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base import routes import webob.dec import webob.exc from neutron._i18n import _ from neutron import extensions as core_extensions from neutron.plugins.common import constants as const from neutron.services import provider_configuration from neutron import wsgi LOG = logging.getLogger(__name__) EXTENSION_SUPPORTED_CHECK_MAP = {} _PLUGIN_AGNOSTIC_EXTENSIONS = set() def register_custom_supported_check(alias, f, plugin_agnostic=False): '''Register a custom function to determine if extension is supported. Consequent calls for the same alias replace the registered function. :param alias: API extension alias name :param f: custom check function that returns True if extension is supported :param plugin_agnostic: if False, don't require a plugin to claim support with supported_extension_aliases. If True, a plugin must claim the extension is supported. ''' EXTENSION_SUPPORTED_CHECK_MAP[alias] = f if plugin_agnostic: _PLUGIN_AGNOSTIC_EXTENSIONS.add(alias) class ActionExtensionController(wsgi.Controller): def __init__(self, application): self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler def action(self, request, id): input_dict = self._deserialize(request.body, request.get_content_type()) for action_name, handler in self.action_handlers.items(): if action_name in input_dict: return handler(input_dict, request, id) # no action handler found (bump to downstream application) response = self.application return response class RequestExtensionController(wsgi.Controller): def __init__(self, application): self.application = application self.handlers = [] def add_handler(self, handler): self.handlers.append(handler) def process(self, request, *args, **kwargs): res = request.get_response(self.application) # currently request handlers are un-ordered for handler in self.handlers: response = handler(request, res) return response class ExtensionController(wsgi.Controller): def __init__(self, extension_manager): self.extension_manager = extension_manager @staticmethod def _translate(ext): ext_data = {} ext_data['name'] = ext.get_name() ext_data['alias'] = ext.get_alias() ext_data['description'] = ext.get_description() ext_data['updated'] = ext.get_updated() ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, request): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, request, id): # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions.get(id, None) if not ext: raise webob.exc.HTTPNotFound( _("Extension with alias %s does not exist") % id) return dict(extension=self._translate(ext)) def delete(self, request, id): msg = _('Resource not found.') raise webob.exc.HTTPNotFound(msg) def create(self, request): msg = _('Resource not found.') raise webob.exc.HTTPNotFound(msg) class ExtensionMiddleware(base.ConfigurableMiddleware): """Extensions middleware for WSGI.""" def __init__(self, application, ext_mgr=None): self.ext_mgr = (ext_mgr or ExtensionManager(get_extensions_path())) mapper = routes.Mapper() # extended resources for resource in self.ext_mgr.get_resources(): path_prefix = resource.path_prefix if resource.parent: path_prefix = (resource.path_prefix + "/%s/{%s_id}" % (resource.parent["collection_name"], resource.parent["member_name"])) LOG.debug('Extended resource: %s', resource.collection) for action, method in resource.collection_actions.items(): conditions = dict(method=[method]) path = "/%s/%s" % (resource.collection, action) with mapper.submapper(controller=resource.controller, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) for action, method in resource.collection_methods.items(): conditions = dict(method=[method]) path = "/%s" % resource.collection with mapper.submapper(controller=resource.controller, action=action, path_prefix=path_prefix, conditions=conditions) as submap: submap.connect(path_prefix + path, path) submap.connect(path_prefix + path + "_format", "%s.:(format)" % path) mapper.resource(resource.collection, resource.collection, controller=resource.controller, member=resource.member_actions, parent_resource=resource.parent, path_prefix=path_prefix) # extended actions action_controllers = self._action_ext_controllers(application, self.ext_mgr, mapper) for action in self.ext_mgr.get_actions(): LOG.debug('Extended action: %s', action.action_name) controller = action_controllers[action.collection] controller.add_action(action.action_name, action.handler) # extended requests req_controllers = self._request_ext_controllers(application, self.ext_mgr, mapper) for request_ext in self.ext_mgr.get_request_extensions(): LOG.debug('Extended request: %s', request_ext.key) controller = req_controllers[request_ext.key] controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) super(ExtensionMiddleware, self).__init__(application) @classmethod def factory(cls, global_config, **local_config): """Paste factory.""" def _factory(app): return cls(app, global_config, **local_config) return _factory def _action_ext_controllers(self, application, ext_mgr, mapper): """Return a dict of ActionExtensionController-s by collection.""" action_controllers = {} for action in ext_mgr.get_actions(): if action.collection not in action_controllers.keys(): controller = ActionExtensionController(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', controller=controller, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', controller=controller, conditions=dict(method=['POST'])) action_controllers[action.collection] = controller return action_controllers def _request_ext_controllers(self, application, ext_mgr, mapper): """Returns a dict of RequestExtensionController-s by collection.""" request_ext_controllers = {} for req_ext in ext_mgr.get_request_extensions(): if req_ext.key not in request_ext_controllers.keys(): controller = RequestExtensionController(application) mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=controller, conditions=req_ext.conditions) mapper.connect(req_ext.url_route, action='process', controller=controller, conditions=req_ext.conditions) request_ext_controllers[req_ext.key] = controller return request_ext_controllers @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Route the incoming request with router.""" req.environ['extended.app'] = self.application return self._router @staticmethod @webob.dec.wsgify(RequestClass=wsgi.Request) def _dispatch(req): """Dispatch the request. Returns the routed WSGI app's response or defers to the extended application. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return req.environ['extended.app'] app = match['controller'] return app def plugin_aware_extension_middleware_factory(global_config, **local_config): """Paste factory.""" def _factory(app): ext_mgr = PluginAwareExtensionManager.get_instance() return ExtensionMiddleware(app, ext_mgr=ext_mgr) return _factory class ExtensionManager(object): """Load extensions from the configured extension path. See tests/unit/extensions/foxinsocks.py for an example extension implementation. """ def __init__(self, path): LOG.info('Initializing extension manager.') self.path = path self.extensions = {} self._load_all_extensions() def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionController(self))) for ext in self.extensions.values(): resources.extend(ext.get_resources()) return resources def get_pecan_resources(self): """Returns a list of PecanResourceExtension objects.""" resources = [] for ext in self.extensions.values(): resources.extend(ext.get_pecan_resources()) return resources def get_actions(self): """Returns a list of ActionExtension objects.""" actions = [] for ext in self.extensions.values(): actions.extend(ext.get_actions()) return actions def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] for ext in self.extensions.values(): request_exts.extend(ext.get_request_extensions()) return request_exts def extend_resources(self, version, attr_map): """Extend resources with additional resources or attributes. :param attr_map: the existing mapping from resource name to attrs definition. After this function, we will extend the attr_map if an extension wants to extend this map. """ processed_exts = {} exts_to_process = self.extensions.copy() check_optionals = True # Iterate until there are unprocessed extensions or if no progress # is made in a whole iteration while exts_to_process: processed_ext_count = len(processed_exts) for ext_name, ext in list(exts_to_process.items()): # Process extension only if all required extensions # have been processed already required_exts_set = set(ext.get_required_extensions()) if required_exts_set - set(processed_exts): continue optional_exts_set = set(ext.get_optional_extensions()) if check_optionals and optional_exts_set - set(processed_exts): continue extended_attrs = ext.get_extended_resources(version) for res, resource_attrs in extended_attrs.items(): res_to_update = attr_map.setdefault(res, {}) if self._is_sub_resource(res_to_update): # in the case of an existing sub-resource, we need to # update the parameters content rather than overwrite # it, and also keep the description of the parent # resource unmodified res_to_update['parameters'].update( resource_attrs['parameters']) else: res_to_update.update(resource_attrs) processed_exts[ext_name] = ext del exts_to_process[ext_name] if len(processed_exts) == processed_ext_count: # if we hit here, it means there are unsatisfied # dependencies. try again without optionals since optionals # are only necessary to set order if they are present. if check_optionals: check_optionals = False continue # Exit loop as no progress was made break if exts_to_process: unloadable_extensions = set(exts_to_process.keys()) LOG.error("Unable to process extensions (%s) because " "the configured plugins do not satisfy " "their requirements. Some features will not " "work as expected.", ', '.join(unloadable_extensions)) self._check_faulty_extensions(unloadable_extensions) # Extending extensions' attributes map. for ext in processed_exts.values(): ext.update_attributes_map(attr_map) def _is_sub_resource(self, resource): return ('parent' in resource and isinstance(resource['parent'], dict) and 'member_name' in resource['parent'] and 'parameters' in resource) def _check_faulty_extensions(self, faulty_extensions): """Raise for non-default faulty extensions. Gracefully fail for defective default extensions, which will be removed from the list of loaded extensions. """ default_extensions = set(const.DEFAULT_SERVICE_PLUGINS.values()) if not faulty_extensions <= default_extensions: raise exceptions.ExtensionsNotFound( extensions=list(faulty_extensions)) else: # Remove the faulty extensions so that they do not show during # ext-list for ext in faulty_extensions: try: del self.extensions[ext] except KeyError: pass def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name="%(name)s" alias="%(alias)s" ' 'description="%(desc)s" updated="%(updated)s"', {'name': extension.get_name(), 'alias': extension.get_alias(), 'desc': extension.get_description(), 'updated': extension.get_updated()}) except AttributeError: LOG.exception("Exception loading extension") return False return isinstance(extension, api_extensions.ExtensionDescriptor) def _load_all_extensions(self): """Load extensions from the configured path. The extension name is constructed from the module_name. If your extension module is named widgets.py, the extension class within that module should be 'Widgets'. See tests/unit/extensions/foxinsocks.py for an example extension implementation. """ for path in self.path.split(':'): if os.path.exists(path): self._load_all_extensions_from_path(path) else: LOG.error("Extension path '%s' doesn't exist!", path) def _load_all_extensions_from_path(self, path): # Sorting the extension list makes the order in which they # are loaded predictable across a cluster of load-balanced # Neutron Servers for f in sorted(os.listdir(path)): try: LOG.debug('Loading extension file: %s', f) mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) ext_path = os.path.join(path, f) if file_ext.lower() == '.py' and not mod_name.startswith('_'): mod = imp.load_source(mod_name, ext_path) ext_name = mod_name.capitalize() new_ext_class = getattr(mod, ext_name, None) if not new_ext_class: LOG.warning('Did not find expected name ' '"%(ext_name)s" in %(file)s', {'ext_name': ext_name, 'file': ext_path}) continue new_ext = new_ext_class() self.add_extension(new_ext) except Exception as exception: LOG.warning("Extension file %(f)s wasn't loaded due to " "%(exception)s", {'f': f, 'exception': exception}) def add_extension(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.get_alias() LOG.info('Loaded extension: %s', alias) if alias in self.extensions: raise exceptions.DuplicatedExtension(alias=alias) self.extensions[alias] = ext class PluginAwareExtensionManager(ExtensionManager): _instance = None def __init__(self, path, plugins): self.plugins = plugins super(PluginAwareExtensionManager, self).__init__(path) self.check_if_plugin_extensions_loaded() def _check_extension(self, extension): """Check if an extension is supported by any plugin.""" extension_is_valid = super(PluginAwareExtensionManager, self)._check_extension(extension) if not extension_is_valid: return False alias = extension.get_alias() if alias in EXTENSION_SUPPORTED_CHECK_MAP: return EXTENSION_SUPPORTED_CHECK_MAP[alias]() return (self._plugins_support(extension) and self._plugins_implement_interface(extension)) def _plugins_support(self, extension): alias = extension.get_alias() supports_extension = alias in self.get_supported_extension_aliases() if not supports_extension: LOG.info("Extension %s not supported by any of loaded " "plugins", alias) return supports_extension def _plugins_implement_interface(self, extension): if extension.get_plugin_interface() is None: return True for plugin in self.plugins.values(): if isinstance(plugin, extension.get_plugin_interface()): return True LOG.warning("Loaded plugins do not implement extension " "%s interface", extension.get_alias()) return False @classmethod def get_instance(cls): if cls._instance is None: service_plugins = directory.get_plugins() cls._instance = cls(get_extensions_path(service_plugins), service_plugins) return cls._instance def get_plugin_supported_extension_aliases(self, plugin): """Return extension aliases supported by a given plugin""" aliases = set() # we also check all classes that the plugins inherit to see if they # directly provide support for an extension for item in [plugin] + plugin.__class__.mro(): try: aliases |= set( getattr(item, "supported_extension_aliases", [])) except TypeError: # we land here if a class has a @property decorator for # supported extension aliases. They only work on objects. pass return aliases def get_supported_extension_aliases(self): """Gets extension aliases supported by all plugins.""" aliases = set() for plugin in self.plugins.values(): aliases |= self.get_plugin_supported_extension_aliases(plugin) aliases |= { alias for alias, func in EXTENSION_SUPPORTED_CHECK_MAP.items() if func() } return aliases @classmethod def clear_instance(cls): cls._instance = None def check_if_plugin_extensions_loaded(self): """Check if an extension supported by a plugin has been loaded.""" plugin_extensions = self.get_supported_extension_aliases() missing_aliases = plugin_extensions - set(self.extensions) missing_aliases -= _PLUGIN_AGNOSTIC_EXTENSIONS if missing_aliases: raise exceptions.ExtensionsNotFound( extensions=list(missing_aliases)) class RequestExtension(object): """Extend requests and responses of core Neutron OpenStack API controllers. Provide a way to add data to responses and handle custom request data that is sent to core Neutron OpenStack API controllers. """ def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler self.conditions = dict(method=[method]) self.key = "%s-%s" % (method, url_route) class ActionExtension(object): """Add custom actions to core Neutron OpenStack API controllers.""" def __init__(self, collection, action_name, handler): self.collection = collection self.action_name = action_name self.handler = handler class ResourceExtension(object): """Add top level resources to the OpenStack API in Neutron.""" def __init__(self, collection, controller, parent=None, path_prefix="", collection_actions=None, member_actions=None, attr_map=None, collection_methods=None): collection_actions = collection_actions or {} collection_methods = collection_methods or {} member_actions = member_actions or {} attr_map = attr_map or {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.collection_methods = collection_methods self.member_actions = member_actions self.path_prefix = path_prefix self.attr_map = attr_map # Returns the extension paths from a config entry and the __path__ # of neutron.extensions def get_extensions_path(service_plugins=None): paths = collections.OrderedDict() # Add Neutron core extensions paths[core_extensions.__path__[0]] = 1 if service_plugins: # Add Neutron *-aas extensions for plugin in service_plugins.values(): neutron_mod = provider_configuration.NeutronModule( plugin.__module__.split('.')[0]) try: paths[neutron_mod.module().extensions.__path__[0]] = 1 except AttributeError: # Occurs normally if module has no extensions sub-module pass # Add external/other plugins extensions if cfg.CONF.api_extensions_path: for path in cfg.CONF.api_extensions_path.split(":"): paths[path] = 1 LOG.debug("get_extension_paths = %s", paths) # Re-build the extension string path = ':'.join(paths) return path def append_api_extensions_path(paths): paths = list(set([cfg.CONF.api_extensions_path] + paths)) cfg.CONF.set_override('api_extensions_path', ':'.join([p for p in paths if p])) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2070436 neutron-16.0.0.0b2.dev214/neutron/api/rpc/0000755000175000017500000000000000000000000020172 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/__init__.py0000644000175000017500000000000000000000000022271 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2110438 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/0000755000175000017500000000000000000000000023213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/__init__.py0000644000175000017500000000000000000000000025312 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py0000644000175000017500000003713600000000000027370 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random from neutron_lib.agent import topics from neutron_lib.api import extensions from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging # Priorities - lower value is higher priority PRIORITY_NETWORK_CREATE = 0 PRIORITY_NETWORK_UPDATE = 1 PRIORITY_NETWORK_DELETE = 2 PRIORITY_SUBNET_UPDATE = 3 PRIORITY_SUBNET_DELETE = 4 # In order to improve port dhcp provisioning when nova concurrently create # multiple vms, I classify the port_create_end message to two levels, the # high-level message only cast to one agent, the low-level message cast to all # other agent. In this way, When there are a large number of ports that need to # be processed, we can dispatch the high priority message of port to different # agent, so that the processed port will not block other port's processing in # other dhcp agents. PRIORITY_PORT_CREATE_HIGH = 5 PRIORITY_PORT_CREATE_LOW = 6 PRIORITY_PORT_UPDATE = 6 PRIORITY_PORT_DELETE = 6 METHOD_PRIORITY_MAP = { 'network_create_end': PRIORITY_NETWORK_CREATE, 'network_update_end': PRIORITY_NETWORK_UPDATE, 'network_delete_end': PRIORITY_NETWORK_DELETE, 'subnet_create_end': PRIORITY_SUBNET_UPDATE, 'subnet_update_end': PRIORITY_SUBNET_UPDATE, 'subnet_delete_end': PRIORITY_SUBNET_DELETE, 'port_create_end': PRIORITY_PORT_CREATE_LOW, 'port_update_end': PRIORITY_PORT_UPDATE, 'port_delete_end': PRIORITY_PORT_DELETE } LOG = logging.getLogger(__name__) class DhcpAgentNotifyAPI(object): """API for plugin to notify DHCP agent. This class implements the client side of an rpc interface. The server side is neutron.agent.dhcp.agent.DhcpAgent. For more information about changing rpc interfaces, please see doc/source/contributor/internals/rpc_api.rst. """ # It seems dhcp agent does not support bulk operation VALID_RESOURCES = ['network', 'subnet', 'port'] VALID_METHOD_NAMES = ['network.create.end', 'network.update.end', 'network.delete.end', 'subnet.create.end', 'subnet.update.end', 'subnet.delete.end', 'port.create.end', 'port.update.end', 'port.delete.end'] def __init__(self, topic=topics.DHCP_AGENT, plugin=None): self._unsubscribed_resources = [] self._plugin = plugin target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) # register callbacks for router interface changes registry.subscribe(self._after_router_interface_created, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self._after_router_interface_deleted, resources.ROUTER_INTERFACE, events.AFTER_DELETE) # register callbacks for events pertaining resources affecting DHCP callback_resources = ( resources.NETWORK, resources.NETWORKS, resources.PORT, resources.PORTS, resources.SUBNET, resources.SUBNETS, ) if not cfg.CONF.dhcp_agent_notification: return for resource in callback_resources: registry.subscribe(self._send_dhcp_notification, resource, events.BEFORE_RESPONSE) self.uses_native_notifications = {} for resource in (resources.NETWORK, resources.PORT, resources.SUBNET): self.uses_native_notifications[resource] = {'create': False, 'update': False, 'delete': False} registry.subscribe(self._native_event_send_dhcp_notification, resource, events.AFTER_CREATE) registry.subscribe(self._native_event_send_dhcp_notification, resource, events.AFTER_UPDATE) registry.subscribe(self._native_event_send_dhcp_notification, resource, events.AFTER_DELETE) @property def plugin(self): if self._plugin is None: self._plugin = directory.get_plugin() return self._plugin def _schedule_network(self, context, network, existing_agents): """Schedule the network to new agents :return: all agents associated with the network """ new_agents = self.plugin.schedule_network(context, network) or [] if new_agents: for agent in new_agents: self._cast_message( context, 'network_create_end', {'network': {'id': network['id']}, 'priority': PRIORITY_NETWORK_CREATE}, agent['host']) elif not existing_agents: LOG.warning('Unable to schedule network %s: no agents ' 'available; will retry on subsequent port ' 'and subnet creation events.', network['id']) return new_agents + existing_agents def _get_enabled_agents(self, context, network, agents, method, payload): """Get the list of agents who can provide services.""" if not agents: return [] network_id = network['id'] enabled_agents = agents if not cfg.CONF.enable_services_on_agents_with_admin_state_down: enabled_agents = [x for x in agents if x.admin_state_up] active_agents = [x for x in agents if x.is_active] len_enabled_agents = len(enabled_agents) len_active_agents = len(active_agents) if len_active_agents < len_enabled_agents: LOG.warning("Only %(active)d of %(total)d DHCP agents " "associated with network '%(net_id)s' " "are marked as active, so notifications " "may be sent to inactive agents.", {'active': len_active_agents, 'total': len_enabled_agents, 'net_id': network_id}) if not enabled_agents: num_ports = self.plugin.get_ports_count( context, {'network_id': [network_id]}) notification_required = ( num_ports > 0 and len(network['subnets']) >= 1) if notification_required: LOG.error("Will not send event %(method)s for network " "%(net_id)s: no agent available. Payload: " "%(payload)s", {'method': method, 'net_id': network_id, 'payload': payload}) return enabled_agents def _is_reserved_dhcp_port(self, port): return port.get('device_id') == constants.DEVICE_ID_RESERVED_DHCP_PORT def _notify_agents(self, context, method, payload, network_id): """Notify all the agents that are hosting the network.""" payload['priority'] = METHOD_PRIORITY_MAP.get(method) # fanout is required as we do not know who is "listening" no_agents = not extensions.is_extension_supported( self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS) fanout_required = method == 'network_delete_end' or no_agents # we do nothing on network creation because we want to give the # admin the chance to associate an agent to the network manually cast_required = method != 'network_create_end' if fanout_required: self._fanout_message(context, method, payload) elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) if 'subnet' in payload and payload['subnet'].get('segment_id'): # if segment_id exists then the segment service plugin # must be loaded segment_plugin = directory.get_plugin('segments') segment = segment_plugin.get_segment( context, payload['subnet']['segment_id']) network['candidate_hosts'] = segment['hosts'] agents = self.plugin.get_dhcp_agents_hosting_networks( context, [network_id], hosts=network.get('candidate_hosts')) # schedule the network first, if needed schedule_required = ( method == 'subnet_create_end' or method == 'port_create_end' and not self._is_reserved_dhcp_port(payload['port'])) if schedule_required: agents = self._schedule_network(admin_ctx, network, agents) if not agents: LOG.debug("Network %s is not hosted by any dhcp agent", network_id) return enabled_agents = self._get_enabled_agents( context, network, agents, method, payload) if method == 'port_create_end': high_agent = enabled_agents.pop( random.randint(0, len(enabled_agents) - 1)) self._notify_high_priority_agent( context, copy.deepcopy(payload), high_agent) for agent in enabled_agents: self._cast_message( context, method, payload, agent.host, agent.topic) def _notify_high_priority_agent(self, context, payload, agent): payload['priority'] = PRIORITY_PORT_CREATE_HIGH self._cast_message(context, "port_create_end", payload, agent.host, agent.topic) def _cast_message(self, context, method, payload, host, topic=topics.DHCP_AGENT): """Cast the payload to the dhcp agent running on the host.""" cctxt = self.client.prepare(topic=topic, server=host) cctxt.cast(context, method, payload=payload) def _fanout_message(self, context, method, payload): """Fanout the payload to all dhcp agents.""" cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, payload=payload) def network_removed_from_agent(self, context, network_id, host): self._cast_message(context, 'network_delete_end', {'network_id': network_id, 'priority': PRIORITY_NETWORK_DELETE}, host) def network_added_to_agent(self, context, network_id, host): self._cast_message(context, 'network_create_end', {'network': {'id': network_id}, 'priority': PRIORITY_NETWORK_CREATE}, host) def agent_updated(self, context, admin_state_up, host): self._cast_message(context, 'agent_updated', {'admin_state_up': admin_state_up}, host) def _after_router_interface_created(self, resource, event, trigger, **kwargs): self._notify_agents(kwargs['context'], 'port_create_end', {'port': kwargs['port']}, kwargs['port']['network_id']) def _after_router_interface_deleted(self, resource, event, trigger, **kwargs): self._notify_agents(kwargs['context'], 'port_delete_end', {'port_id': kwargs['port']['id']}, kwargs['port']['network_id']) def _native_event_send_dhcp_notification(self, resource, event, trigger, context, **kwargs): action = event.replace('after_', '') # we unsubscribe the _send_dhcp_notification method now that we know # the loaded core plugin emits native resource events if resource not in self._unsubscribed_resources: self.uses_native_notifications[resource][action] = True if all(self.uses_native_notifications[resource].values()): # only unsubscribe the API level listener if we are # receiving all event types for this resource self._unsubscribed_resources.append(resource) registry.unsubscribe_by_resource(self._send_dhcp_notification, resource) method_name = '.'.join((resource, action, 'end')) payload = kwargs[resource] data = {resource: payload} if resource == resources.PORT: if self._only_status_changed(kwargs.get('original_port'), kwargs.get('port')): # don't waste time updating the DHCP agent for status updates return self.notify(context, data, method_name) def _only_status_changed(self, orig, new): # a status change will manifest as a bumped revision number, a new # updated_at timestamp, and a new status. If that's all that changed, # return True, else False if not orig or not new: return False if set(orig.keys()) != set(new.keys()): return False for k in orig.keys(): if k in ('status', 'updated_at', 'revision_number'): continue if orig[k] != new[k]: return False return True def _send_dhcp_notification(self, resource, event, trigger, payload=None): action = payload.action.split('_')[0] if (resource in self.uses_native_notifications and self.uses_native_notifications[resource][action]): return data = payload.latest_state if payload.collection_name and payload.collection_name in data: for body in data[payload.collection_name]: item = {resource: body} self.notify(payload.context, item, payload.method_name) else: self.notify(payload.context, data, payload.method_name) def notify(self, context, data, method_name): # data is {'key' : 'value'} with only one key if method_name not in self.VALID_METHOD_NAMES: return obj_type = list(data.keys())[0] if obj_type not in self.VALID_RESOURCES: return obj_value = data[obj_type] network_id = None if obj_type == 'network' and 'id' in obj_value: network_id = obj_value['id'] elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value: network_id = obj_value['network_id'] if not network_id: return method_name = method_name.replace(".", "_") if method_name.endswith("_delete_end"): if 'id' in obj_value: payload = {obj_type + '_id': obj_value['id']} if obj_type != 'network': payload['network_id'] = network_id self._notify_agents(context, method_name, payload, network_id) else: self._notify_agents(context, method_name, data, network_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py0000644000175000017500000001657000000000000026767 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import random from neutron_lib.agent import topics from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_log import log as logging import oslo_messaging from neutron.api.rpc.agentnotifiers import utils as ag_utils LOG = logging.getLogger(__name__) # default messaging timeout is 60 sec, so 2 here is chosen to not block API # call for more than 2 minutes AGENT_NOTIFY_MAX_ATTEMPTS = 2 class L3AgentNotifyAPI(object): """API for plugin to notify L3 agent.""" def __init__(self, topic=topics.L3_AGENT): target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _notification_host(self, context, method, host, use_call=False, **kwargs): """Notify the agent that is hosting the router.""" LOG.debug('Notify agent at %(host)s the message ' '%(method)s', {'host': host, 'method': method}) cctxt = self.client.prepare(server=host) rpc_method = (ag_utils.retry(cctxt.call, AGENT_NOTIFY_MAX_ATTEMPTS) if use_call else cctxt.cast) rpc_method(context, method, **kwargs) def _agent_notification(self, context, method, router_ids, operation, shuffle_agents): """Notify changed routers to hosting l3 agents.""" adminContext = context if context.is_admin else context.elevated() plugin = directory.get_plugin(plugin_constants.L3) for router_id in router_ids: hosts = plugin.get_hosts_to_notify(adminContext, router_id) if shuffle_agents: random.shuffle(hosts) for host in hosts: LOG.debug('Notify agent at %(topic)s.%(host)s the message ' '%(method)s', {'topic': topics.L3_AGENT, 'host': host, 'method': method}) cctxt = self.client.prepare(topic=topics.L3_AGENT, server=host, version='1.1') cctxt.cast(context, method, routers=[router_id]) def _agent_notification_arp(self, context, method, router_id, operation, data): """Notify arp details to l3 agents hosting router.""" if not router_id: return dvr_arptable = {'router_id': router_id, 'arp_table': data} LOG.debug('Fanout dvr_arptable update: %s', dvr_arptable) cctxt = self.client.prepare(fanout=True, version='1.2') cctxt.cast(context, method, payload=dvr_arptable) def _notification(self, context, method, router_ids, operation, shuffle_agents, schedule_routers=True): """Notify all the agents that are hosting the routers.""" plugin = directory.get_plugin(plugin_constants.L3) if not plugin: LOG.error('No plugin for L3 routing registered. Cannot notify ' 'agents with the message %s', method) return if extensions.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): adminContext = (context.is_admin and context or context.elevated()) if schedule_routers: plugin.schedule_routers(adminContext, router_ids) self._agent_notification( context, method, router_ids, operation, shuffle_agents) else: cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, routers=router_ids) def _notification_fanout(self, context, method, router_id=None, **kwargs): """Fanout the information to all L3 agents. This function will fanout the router_id or ext_net_id to the L3 Agents. """ ext_net_id = kwargs.get('ext_net_id') if router_id: kwargs['router_id'] = router_id LOG.debug('Fanout notify agent at %(topic)s the message ' '%(method)s on router %(router_id)s', {'topic': topics.L3_AGENT, 'method': method, 'router_id': router_id}) if ext_net_id: LOG.debug('Fanout notify agent at %(topic)s the message ' '%(method)s for external_network %(ext_net_id)s', {'topic': topics.L3_AGENT, 'method': method, 'ext_net_id': ext_net_id}) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, **kwargs) def agent_updated(self, context, admin_state_up, host): self._notification_host(context, 'agent_updated', host, payload={'admin_state_up': admin_state_up}) def router_deleted(self, context, router_id): self._notification_fanout(context, 'router_deleted', router_id) def routers_updated(self, context, router_ids, operation=None, data=None, shuffle_agents=False, schedule_routers=True): if router_ids: self._notification(context, 'routers_updated', router_ids, operation, shuffle_agents, schedule_routers) def add_arp_entry(self, context, router_id, arp_table, operation=None): self._agent_notification_arp(context, 'add_arp_entry', router_id, operation, arp_table) def del_arp_entry(self, context, router_id, arp_table, operation=None): self._agent_notification_arp(context, 'del_arp_entry', router_id, operation, arp_table) def delete_fipnamespace_for_ext_net(self, context, ext_net_id): self._notification_fanout( context, 'fipnamespace_delete_on_ext_net', ext_net_id=ext_net_id) def router_removed_from_agent(self, context, router_id, host): self._notification_host(context, 'router_removed_from_agent', host, payload={'router_id': router_id}) def router_added_to_agent(self, context, router_ids, host): # need to use call here as we want to be sure agent received # notification and router will not be "lost". However using call() # itself is not a guarantee, calling code should handle exceptions and # retry self._notification_host(context, 'router_added_to_agent', host, use_call=True, payload=router_ids) def routers_updated_on_host(self, context, router_ids, host): self._notification_host(context, 'routers_updated', host, routers=router_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py0000644000175000017500000001146500000000000030261 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import topics from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_log import log as logging import oslo_messaging from neutron.db import agentschedulers_db LOG = logging.getLogger(__name__) class MeteringAgentNotifyAPI(object): """API for plugin to notify L3 metering agent.""" def __init__(self, topic=topics.METERING_AGENT): self.topic = topic target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _agent_notification(self, context, method, routers): """Notify l3 metering agents hosted by l3 agent hosts.""" adminContext = context if context.is_admin else context.elevated() plugin = directory.get_plugin(plugin_constants.L3) l3_routers = {} state = agentschedulers_db.get_admin_state_up_filter() for router in routers: l3_agents = plugin.get_l3_agents_hosting_routers( adminContext, [router['id']], admin_state_up=state, active=True) for l3_agent in l3_agents: LOG.debug('Notify metering agent at %(topic)s.%(host)s ' 'the message %(method)s', {'topic': self.topic, 'host': l3_agent.host, 'method': method}) l3_router = l3_routers.get(l3_agent.host, []) l3_router.append(router) l3_routers[l3_agent.host] = l3_router for host, routers in l3_routers.items(): cctxt = self.client.prepare(server=host) cctxt.cast(context, method, routers=routers) def _notification_fanout(self, context, method, router_id): LOG.debug('Fanout notify metering agent at %(topic)s the message ' '%(method)s on router %(router_id)s', {'topic': self.topic, 'method': method, 'router_id': router_id}) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, router_id=router_id) def _notification_host(self, context, method, host, **kwargs): """Notify the agent that is hosting the router.""" LOG.debug('Notify agent at %(host)s the message ' '%(method)s', {'host': host, 'method': method}) cctxt = self.client.prepare(server=host) cctxt.cast(context, method, **kwargs) def _notification(self, context, method, routers): """Notify all the agents that are hosting the routers.""" plugin = directory.get_plugin(plugin_constants.L3) if extensions.is_extension_supported( plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): self._agent_notification(context, method, routers) else: cctxt = self.client.prepare(fanout=True) cctxt.cast(context, method, routers=routers) def router_deleted(self, context, router_id): self._notification_fanout(context, 'router_deleted', router_id) def routers_updated(self, context, routers): if routers: self._notification(context, 'routers_updated', routers) def update_metering_label_rules(self, context, routers): self._notification(context, 'update_metering_label_rules', routers) def add_metering_label_rule(self, context, routers): self._notification(context, 'add_metering_label_rule', routers) def remove_metering_label_rule(self, context, routers): self._notification(context, 'remove_metering_label_rule', routers) def add_metering_label(self, context, routers): self._notification(context, 'add_metering_label', routers) def remove_metering_label(self, context, routers): self._notification(context, 'remove_metering_label', routers) def routers_updated_on_host(self, context, router_ids, host): """Notify router updates to specific hosts hosting DVR routers.""" self._notification_host(context, 'routers_updated', host, routers=router_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/agentnotifiers/utils.py0000644000175000017500000000436700000000000024737 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils LOG = logging.getLogger(__name__) def _call_with_retry(max_attempts): """A wrapper to retry a function using rpc call in case of MessagingException. Retries the decorated function in case of MessagingException of some kind (a timeout, client send error etc). If maximum attempts are exceeded, the exception which occurred during last attempt is reraised. """ def wrapper(f): def func_wrapper(*args, **kwargs): # (ivasilevskaya) think of a more informative data to log action = '%(func)s' % {'func': getattr(f, '__name__', f)} for attempt in range(1, max_attempts + 1): try: return f(*args, **kwargs) except oslo_messaging.MessagingException: with excutils.save_and_reraise_exception( reraise=False) as ctxt: LOG.warning( 'Failed to execute %(action)s. %(attempt)d out' ' of %(max_attempts)d', {'attempt': attempt, 'max_attempts': max_attempts, 'action': action}) if attempt == max_attempts: ctxt.reraise = True return func_wrapper return wrapper def retry(func, max_attempts): """Adds the retry logic to original function and returns a partial. The returned partial can be called with the same arguments as the original function. """ return _call_with_retry(max_attempts)(func) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2110438 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/0000755000175000017500000000000000000000000022111 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/__init__.py0000644000175000017500000000000000000000000024210 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2110438 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/consumer/0000755000175000017500000000000000000000000023744 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/consumer/__init__.py0000644000175000017500000000000000000000000026043 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/consumer/registry.py0000644000175000017500000000323200000000000026166 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import resource_manager # TODO(ajo): consider adding locking to _get_manager, it's # safe for eventlet, but not for normal threading. def _get_manager(): return resource_manager.ConsumerResourceCallbacksManager() def register(callback, resource_type): # TODO(kevinbenton): remove this on debt collection callback.__dict__['_ACCEPTS_CONTEXT'] = True _get_manager().register(callback, resource_type) def unsubscribe(callback, resource_type): _get_manager().unregister(callback, resource_type) def push(context, resource_type, resource_list, event_type): """Push resource list into all registered callbacks for the event type.""" callbacks = _get_manager().get_callbacks(resource_type) for callback in callbacks: if callback._ACCEPTS_CONTEXT: callback(context, resource_type, resource_list, event_type) else: # backwards compat for callback listeners that don't take # context and resource_type callback(resource_list, event_type) def clear(): _get_manager().clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/events.py0000644000175000017500000000125400000000000023771 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. CREATED = 'created' UPDATED = 'updated' DELETED = 'deleted' VALID = ( CREATED, UPDATED, DELETED ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/exceptions.py0000644000175000017500000000237700000000000024655 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron._i18n import _ class CallbackWrongResourceType(exceptions.NeutronException): message = _('Callback for %(resource_type)s returned wrong resource type') class CallbackNotFound(exceptions.NeutronException): message = _('Callback for %(resource_type)s not found') class CallbacksMaxLimitReached(exceptions.NeutronException): message = _("Cannot add multiple callbacks for %(resource_type)s") class NoAgentDbMixinImplemented(exceptions.NeutronException): message = _("RPC callbacks mechanism needs the implementation of " "AgentDbMixin in the plugin, as so far it's only designed " "to work with agents") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2110438 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/producer/0000755000175000017500000000000000000000000023734 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/producer/__init__.py0000644000175000017500000000000000000000000026033 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/producer/registry.py0000644000175000017500000000370000000000000026156 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks import resource_manager from neutron.objects import base # TODO(ajo): consider adding locking: it's safe for eventlet but not # for other types of threading. def _get_manager(): return resource_manager.ProducerResourceCallbacksManager() def provide(callback, resource_type): """Register a callback as a producer for the resource type. This callback will be used to produce resources of corresponding type for interested parties. """ _get_manager().register(callback, resource_type) def unprovide(callback, resource_type): """Unregister a callback for corresponding resource type.""" _get_manager().unregister(callback, resource_type) def clear(): """Clear all callbacks.""" _get_manager().clear() def pull(resource_type, resource_id, **kwargs): """Get resource object that corresponds to resource id. The function will return an object that is provided by resource producer. :returns: NeutronObject """ callback = _get_manager().get_callback(resource_type) obj = callback(resource_type, resource_id, **kwargs) if obj: if (not isinstance(obj, base.NeutronObject) or resource_type != obj.obj_name()): raise exceptions.CallbackWrongResourceType( resource_type=resource_type) return obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/resource_manager.py0000644000175000017500000001115200000000000026004 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from neutron_lib.callbacks import exceptions from oslo_log import log as logging import six from neutron.api.rpc.callbacks import exceptions as rpc_exc from neutron.api.rpc.callbacks import resources LOG = logging.getLogger(__name__) # TODO(QoS): split the registry/resources_rpc modules into two separate things: # one for pull and one for push APIs def _validate_resource_type(resource_type): if not resources.is_valid_resource_type(resource_type): raise exceptions.Invalid(element='resource', value=resource_type) @six.add_metaclass(abc.ABCMeta) class ResourceCallbacksManager(object): """A callback system that allows information providers in a loose manner. """ # This hook is to allow tests to get new objects for the class _singleton = True def __new__(cls, *args, **kwargs): if not cls._singleton: return super(ResourceCallbacksManager, cls).__new__(cls) if not hasattr(cls, '_instance'): cls._instance = super(ResourceCallbacksManager, cls).__new__(cls) return cls._instance @abc.abstractmethod def _add_callback(self, callback, resource_type): pass @abc.abstractmethod def _delete_callback(self, callback, resource_type): pass def register(self, callback, resource_type): """Register a callback for a resource type. :param callback: the callback. It must raise or return NeutronObject. :param resource_type: must be a valid resource type. """ LOG.debug("Registering callback for %s", resource_type) _validate_resource_type(resource_type) self._add_callback(callback, resource_type) def unregister(self, callback, resource_type): """Unregister callback from the registry. :param callback: the callback. :param resource_type: must be a valid resource type. """ LOG.debug("Unregistering callback for %s", resource_type) _validate_resource_type(resource_type) self._delete_callback(callback, resource_type) @abc.abstractmethod def clear(self): """Brings the manager to a clean state.""" def get_subscribed_types(self): return list(self._callbacks.keys()) class ProducerResourceCallbacksManager(ResourceCallbacksManager): _callbacks = dict() def _add_callback(self, callback, resource_type): if resource_type in self._callbacks: raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type) self._callbacks[resource_type] = callback def _delete_callback(self, callback, resource_type): try: del self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) def clear(self): self._callbacks = dict() def get_callback(self, resource_type): _validate_resource_type(resource_type) try: return self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) class ConsumerResourceCallbacksManager(ResourceCallbacksManager): _callbacks = collections.defaultdict(set) def _add_callback(self, callback, resource_type): self._callbacks[resource_type].add(callback) def _delete_callback(self, callback, resource_type): try: self._callbacks[resource_type].remove(callback) if not self._callbacks[resource_type]: del self._callbacks[resource_type] except KeyError: raise rpc_exc.CallbackNotFound(resource_type=resource_type) def clear(self): self._callbacks = collections.defaultdict(set) def get_callbacks(self, resource_type): """Return the callback if found, None otherwise. :param resource_type: must be a valid resource type. """ _validate_resource_type(resource_type) callbacks = self._callbacks[resource_type] if not callbacks: raise rpc_exc.CallbackNotFound(resource_type=resource_type) return callbacks ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/resources.py0000644000175000017500000000533200000000000024500 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron.objects import conntrack_helper from neutron.objects.logapi import logging_resource as log_object from neutron.objects import network from neutron.objects import port_forwarding from neutron.objects import ports from neutron.objects.qos import policy from neutron.objects import securitygroup from neutron.objects import subnet from neutron.objects import trunk # Supported types LOGGING_RESOURCE = log_object.Log.obj_name() TRUNK = trunk.Trunk.obj_name() QOS_POLICY = policy.QosPolicy.obj_name() SUBPORT = trunk.SubPort.obj_name() PORT = ports.Port.obj_name() NETWORK = network.Network.obj_name() SUBNET = subnet.Subnet.obj_name() SECURITYGROUP = securitygroup.SecurityGroup.obj_name() SECURITYGROUPRULE = securitygroup.SecurityGroupRule.obj_name() PORTFORWARDING = port_forwarding.PortForwarding.obj_name() CONNTRACKHELPER = conntrack_helper.ConntrackHelper.obj_name() _VALID_CLS = ( policy.QosPolicy, trunk.Trunk, trunk.SubPort, ports.Port, subnet.Subnet, network.Network, securitygroup.SecurityGroup, securitygroup.SecurityGroupRule, log_object.Log, port_forwarding.PortForwarding, conntrack_helper.ConntrackHelper, ) _TYPE_TO_CLS_MAP = {cls.obj_name(): cls for cls in _VALID_CLS} LOCAL_RESOURCE_VERSIONS = { resource_type: cls.VERSION for resource_type, cls in _TYPE_TO_CLS_MAP.items() } def get_resource_type(resource_cls): if not resource_cls: return None if not hasattr(resource_cls, 'obj_name'): return None return resource_cls.obj_name() def register_resource_class(resource_cls): resource_type = get_resource_type(resource_cls) if not resource_type: msg = _("cannot find resource type for %s class") % resource_cls raise ValueError(msg) if resource_type not in _TYPE_TO_CLS_MAP: _TYPE_TO_CLS_MAP[resource_type] = resource_cls if resource_type not in LOCAL_RESOURCE_VERSIONS: LOCAL_RESOURCE_VERSIONS[resource_type] = resource_cls.VERSION def is_valid_resource_type(resource_type): return resource_type in _TYPE_TO_CLS_MAP def get_resource_cls(resource_type): return _TYPE_TO_CLS_MAP.get(resource_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/callbacks/version_manager.py0000644000175000017500000002511700000000000025650 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import pprint import time from neutron_lib.plugins import directory from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.callbacks import exceptions LOG = logging.getLogger(__name__) VERSIONS_TTL = 60 # NOTE(mangelajo): if we import this globally we end up with a (very # long) circular dependency, this can be fixed if we # stop importing all exposed classes in # neutron.api.rpc.callbacks.resources and provide # a decorator to expose classes def _import_resources(): return importutils.import_module('neutron.api.rpc.callbacks.resources') def _import_agents_db(): return importutils.import_module('neutron.db.agents_db') AgentConsumer = collections.namedtuple('AgentConsumer', ['agent_type', 'host']) AgentConsumer.__repr__ = lambda self: '%s@%s' % self class ResourceConsumerTracker(object): """Class passed down to collect consumer's resource versions. This class is responsible for fetching the local versions of resources, and letting the called function register every consumer's resource version. This class is passed down to the plugin get_agents_resource_versions currently, as the only expected consumers are agents so far. Later on, this class can also be used to recalculate, for each resource type, the collection of versions that are local or known by one or more consumers. """ def __init__(self): # Initialize with the local (server) versions, as we always want # to send those. Agents, as they upgrade, will need the latest version, # and there is a corner case we'd not be covering otherwise: # 1) one or several neutron-servers get disconnected from rpc (while # running) # 2) a new agent comes up, with the latest version and it reports # 2 ways: # a) via status report (which will be stored in the database) # b) via fanout call to all neutron servers, this way, all of them # get their version set updated right away without the need to # re-fetch anything from the database. # 3) the neutron-servers get back online to the rpc bus, but they # lost the fanout message. # # TODO(mangelajo) To cover this case we may need a callback from oslo # messaging to get notified about disconnections/reconnections to the # rpc bus, invalidating the consumer version cache when we receive such # callback. self._versions = self._get_local_resource_versions() self._versions_by_consumer = collections.defaultdict(dict) self._needs_recalculation = False self.last_report = None def _get_local_resource_versions(self): resources = _import_resources() local_resource_versions = collections.defaultdict(set) for resource_type, version in ( resources.LOCAL_RESOURCE_VERSIONS.items()): local_resource_versions[resource_type].add(version) return local_resource_versions # TODO(mangelajo): add locking with _recalculate_versions if we ever # move out of green threads. def _set_version(self, consumer, resource_type, version): """Set or update a consumer resource type version.""" self._versions[resource_type].add(version) consumer_versions = self._versions_by_consumer[consumer] prev_version = consumer_versions.get(resource_type, None) if version: consumer_versions[resource_type] = version else: consumer_versions.pop(resource_type, None) if prev_version != version: # If a version got updated/changed in a consumer, we need to # recalculate the main dictionary of versions based on the # new _versions_by_consumer. # We defer the recalculation until every consumer version has # been set for all of its resource types. self._needs_recalculation = True LOG.debug("Version for resource type %(resource_type)s changed " "%(prev_version)s to %(version)s on " "consumer %(consumer)s", {'resource_type': resource_type, 'version': version, 'prev_version': prev_version, 'consumer': consumer}) def set_versions(self, consumer, versions): """Set or update an specific consumer resource types. :param consumer: should be an AgentConsumer object, with agent_type and host set. This acts as the unique ID for the agent. :param versions: should be a dictionary in the following format: {'QosPolicy': '1.1', 'SecurityGroup': '1.0', 'Port': '1.0'} """ for resource_type, resource_version in versions.items(): self._set_version(consumer, resource_type, resource_version) if versions: self._cleanup_removed_versions(consumer, versions) else: self._handle_no_set_versions(consumer) def _cleanup_removed_versions(self, consumer, versions): """Check if any version report has been removed, and cleanup.""" prev_resource_types = set( self._versions_by_consumer[consumer].keys()) cur_resource_types = set(versions.keys()) removed_resource_types = prev_resource_types - cur_resource_types if removed_resource_types: LOG.debug("Removing stale tracked versions: %s", removed_resource_types) for resource_type in removed_resource_types: self._set_version(consumer, resource_type, None) def _handle_no_set_versions(self, consumer): """Handle consumers reporting no versions.""" if self._versions_by_consumer[consumer]: self._needs_recalculation = True LOG.debug("Clearing versions for consumer %s", consumer) self._versions_by_consumer[consumer] = {} def get_resource_versions(self, resource_type): """Fetch the versions necessary to notify all consumers.""" if self._needs_recalculation: self._recalculate_versions() self._needs_recalculation = False return copy.copy(self._versions[resource_type]) def report(self): """Output debug information about the consumer versions.""" format = lambda versions: pprint.pformat(dict(versions), indent=4) debug_dict = {'pushed_versions': format(self._versions), 'consumer_versions': format(self._versions_by_consumer)} if self.last_report != debug_dict: self.last_report = debug_dict LOG.debug('Tracked resource versions report:\n' 'pushed versions:\n%(pushed_versions)s\n\n' 'consumer versions:\n%(consumer_versions)s\n', debug_dict) # TODO(mangelajo): Add locking if we ever move out of greenthreads. def _recalculate_versions(self): """Recalculate the _versions set. Re-fetch the local (server) versions and expand with consumers' versions. """ versions = self._get_local_resource_versions() for versions_dict in self._versions_by_consumer.values(): for res_type, res_version in versions_dict.items(): versions[res_type].add(res_version) self._versions = versions class CachedResourceConsumerTracker(object): """This class takes care of the caching logic of versions.""" def __init__(self): # This is TTL expiration time, 0 means it will be expired at start self._expires_at = 0 self._versions = ResourceConsumerTracker() def _update_consumer_versions(self): new_tracker = ResourceConsumerTracker() neutron_plugin = directory.get_plugin() agents_db = _import_agents_db() # If you use RPC callbacks, your plugin needs to implement # AgentsDbMixin so that we know which resource versions your # agents consume via RPC, please note that rpc_callbacks are # only designed to work with agents currently. if isinstance(neutron_plugin, agents_db.AgentDbMixin): neutron_plugin.get_agents_resource_versions(new_tracker) else: raise exceptions.NoAgentDbMixinImplemented() # preserve last report state so we don't duplicate logs on refresh new_tracker.last_report = self._versions.last_report self._versions = new_tracker self._versions.report() def _check_expiration(self): if time.time() > self._expires_at: self._update_consumer_versions() self._expires_at = time.time() + VERSIONS_TTL def get_resource_versions(self, resource_type): self._check_expiration() return self._versions.get_resource_versions(resource_type) def update_versions(self, consumer, resource_versions): self._versions.set_versions(consumer, resource_versions) def report(self): self._check_expiration() self._versions.report() _cached_version_tracker = None # NOTE(ajo): add locking if we ever stop using greenthreads def _get_cached_tracker(): global _cached_version_tracker if not _cached_version_tracker: _cached_version_tracker = CachedResourceConsumerTracker() return _cached_version_tracker def get_resource_versions(resource_type): """Return the set of versions expected by the consumers of a resource.""" return _get_cached_tracker().get_resource_versions(resource_type) def update_versions(consumer, resource_versions): """Update the resources' versions for a consumer id.""" _get_cached_tracker().update_versions(consumer, resource_versions) def report(): """Report resource versions in debug logs.""" _get_cached_tracker().report() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2110438 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/0000755000175000017500000000000000000000000021772 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/__init__.py0000644000175000017500000000000000000000000024071 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/dhcp_rpc.py0000644000175000017500000003724500000000000024141 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import itertools import operator from neutron_lib.api.definitions import portbindings from neutron_lib.api import extensions from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib.plugins import utils as p_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from neutron._i18n import _ from neutron.common import utils from neutron.db import provisioning_blocks from neutron.extensions import segment as segment_ext from neutron.quota import resource_registry LOG = logging.getLogger(__name__) class DhcpRpcCallback(object): """DHCP agent RPC callback in plugin implementations. This class implements the server side of an rpc interface. The client side of this interface can be found in neutron.agent.dhcp.agent.DhcpPluginApi. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # API version history: # 1.0 - Initial version. # 1.1 - Added get_active_networks_info, create_dhcp_port, # and update_dhcp_port methods. # 1.2 - Removed get_dhcp_port. When removing a method (Making a # backwards incompatible change) you would normally bump the # major version. However, since the method was unused in the # RPC client for many releases, it should be OK to bump the # minor release instead and claim RPC compatibility with the # last few client versions. # 1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP # agent since Juno, so similar rationale for not bumping the # major version as above applies here too. # 1.4 - Removed update_lease_expiration. It's not used by reference # DHCP agent since Juno, so similar rationale for not bumping the # major version as above applies here too. # 1.5 - Added dhcp_ready_on_ports. # 1.6 - Removed get_active_networks. It's not used by reference # DHCP agent since Havana, so similar rationale for not bumping # the major version as above applies here too. # 1.7 - Add get_networks # 1.8 - Add get_dhcp_port target = oslo_messaging.Target( namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, version='1.8') def _get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active networks.""" host = kwargs.get('host') plugin = directory.get_plugin() if extensions.is_extension_supported( plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.network_auto_schedule: plugin.auto_schedule_networks(context, host) nets = plugin.list_active_networks_on_active_dhcp_agent( context, host) else: filters = dict(admin_state_up=[True]) nets = plugin.get_networks(context, filters=filters) return nets def _port_action(self, plugin, context, port, action): """Perform port operations taking care of concurrency issues.""" try: if action == 'create_port': return p_utils.create_port(plugin, context, port) elif action == 'update_port': return plugin.update_port(context, port['id'], port) else: msg = _('Unrecognized action') raise exceptions.Invalid(message=msg) except (db_exc.DBReferenceError, exceptions.NetworkNotFound, exceptions.SubnetNotFound, exceptions.InvalidInput, exceptions.IpAddressGenerationFailure) as e: with excutils.save_and_reraise_exception(reraise=False) as ctxt: if isinstance(e, exceptions.IpAddressGenerationFailure): # Check if the subnet still exists and if it does not, # this is the reason why the ip address generation failed. # In any other unlikely event re-raise try: subnet_id = port['port']['fixed_ips'][0]['subnet_id'] plugin.get_subnet(context, subnet_id) except exceptions.SubnetNotFound: pass else: ctxt.reraise = True if ctxt.reraise: net_id = port['port']['network_id'] LOG.warning("Action %(action)s for network %(net_id)s " "could not complete successfully: " "%(reason)s", {"action": action, "net_id": net_id, 'reason': e}) def _group_by_network_id(self, res): grouped = {} keyfunc = operator.itemgetter('network_id') for net_id, values in itertools.groupby(sorted(res, key=keyfunc), keyfunc): grouped[net_id] = list(values) return grouped def get_active_networks_info(self, context, **kwargs): """Returns all the networks/subnets/ports in system.""" host = kwargs.get('host') LOG.debug('get_active_networks_info from %s', host) networks = self._get_active_networks(context, **kwargs) plugin = directory.get_plugin() filters = {'network_id': [network['id'] for network in networks]} ports = plugin.get_ports(context, filters=filters) # default is to filter subnets based on 'enable_dhcp' flag if kwargs.get('enable_dhcp_filter', True): filters['enable_dhcp'] = [True] # NOTE(kevinbenton): we sort these because the agent builds tags # based on position in the list and has to restart the process if # the order changes. subnets = sorted(plugin.get_subnets(context, filters=filters), key=operator.itemgetter('id')) # Handle the possibility that the dhcp agent(s) only has connectivity # inside a segment. If the segment service plugin is loaded and # there are active dhcp enabled subnets, then filter out the subnets # that are not on the host's segment. seg_plug = directory.get_plugin( segment_ext.SegmentPluginBase.get_plugin_type()) seg_subnets = [subnet for subnet in subnets if subnet.get('segment_id')] nonlocal_subnets = [] if seg_plug and seg_subnets: host_segment_ids = seg_plug.get_segments_by_hosts(context, [host]) # Gather the ids of all the subnets that are on a segment that # this host touches seg_subnet_ids = {subnet['id'] for subnet in seg_subnets if subnet['segment_id'] in host_segment_ids} # Gather the ids of all the networks that are routed routed_net_ids = {seg_subnet['network_id'] for seg_subnet in seg_subnets} # Remove the subnets with segments that are not in the same # segments as the host. Do this only for the networks that are # routed because we want non-routed networks to work as # before. nonlocal_subnets = [subnet for subnet in seg_subnets if subnet['id'] not in seg_subnet_ids] subnets = [subnet for subnet in subnets if subnet['network_id'] not in routed_net_ids or subnet['id'] in seg_subnet_ids] grouped_subnets = self._group_by_network_id(subnets) grouped_nonlocal_subnets = self._group_by_network_id(nonlocal_subnets) grouped_ports = self._group_by_network_id(ports) for network in networks: network['subnets'] = grouped_subnets.get(network['id'], []) network['non_local_subnets'] = ( grouped_nonlocal_subnets.get(network['id'], [])) network['ports'] = grouped_ports.get(network['id'], []) return networks def get_network_info(self, context, **kwargs): """Retrieve and return extended information about a network.""" network_id = kwargs.get('network_id') host = kwargs.get('host') LOG.debug('Network %(network_id)s requested from ' '%(host)s', {'network_id': network_id, 'host': host}) plugin = directory.get_plugin() try: network = plugin.get_network(context, network_id) except exceptions.NetworkNotFound: LOG.debug("Network %s could not be found, it might have " "been deleted concurrently.", network_id) return filters = dict(network_id=[network_id]) subnets = plugin.get_subnets(context, filters=filters) seg_plug = directory.get_plugin( segment_ext.SegmentPluginBase.get_plugin_type()) nonlocal_subnets = [] if seg_plug and subnets: seg_subnets = [subnet for subnet in subnets if subnet.get('segment_id')] # If there are no subnets with segments, then this is not a routed # network and no filtering should take place. if seg_subnets: segment_ids = seg_plug.get_segments_by_hosts(context, [host]) # There might be something to do if no segment_ids exist that # are mapped to this host. However, it seems that if this # host is not mapped to any segments and this is a routed # network, then this host shouldn't have even been scheduled # to. nonlocal_subnets = [subnet for subnet in seg_subnets if subnet['segment_id'] not in segment_ids] subnets = [subnet for subnet in seg_subnets if subnet['segment_id'] in segment_ids] # NOTE(kevinbenton): we sort these because the agent builds tags # based on position in the list and has to restart the process if # the order changes. network['subnets'] = sorted(subnets, key=operator.itemgetter('id')) network['non_local_subnets'] = sorted(nonlocal_subnets, key=operator.itemgetter('id')) network['ports'] = plugin.get_ports(context, filters=filters) return network @db_api.retry_db_errors def release_dhcp_port(self, context, **kwargs): """Release the port currently being used by a DHCP agent.""" host = kwargs.get('host') network_id = kwargs.get('network_id') device_id = kwargs.get('device_id') LOG.debug('DHCP port deletion for %(network_id)s request from ' '%(host)s', {'network_id': network_id, 'host': host}) plugin = directory.get_plugin() plugin.delete_ports_by_device_id(context, device_id, network_id) @oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure) @db_api.retry_db_errors @resource_registry.mark_resources_dirty def create_dhcp_port(self, context, **kwargs): """Create and return dhcp port information. If an expected failure occurs, a None port is returned. """ host = kwargs.get('host') # Note(pbondar): Create deep copy of port to prevent operating # on changed dict if RetryRequest is raised port = copy.deepcopy(kwargs.get('port')) LOG.debug('Create dhcp port %(port)s ' 'from %(host)s.', {'port': port, 'host': host}) port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP port['port'][portbindings.HOST_ID] = host if 'mac_address' not in port['port']: port['port']['mac_address'] = constants.ATTR_NOT_SPECIFIED plugin = directory.get_plugin() return self._port_action(plugin, context, port, 'create_port') def _is_dhcp_agent_hosting_network(self, plugin, context, host, network_id): """Check whether a DHCP agent (host) is hosting a network.""" agents = plugin.get_dhcp_agents_hosting_networks(context, [network_id], hosts=[host]) return len(agents) != 0 @oslo_messaging.expected_exceptions(exceptions.NetworkNotFound) @oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure) @db_api.retry_db_errors def update_dhcp_port(self, context, **kwargs): """Update the dhcp port.""" host = kwargs.get('host') port = kwargs.get('port') port['id'] = kwargs.get('port_id') port['port'][portbindings.HOST_ID] = host plugin = directory.get_plugin() try: network_id = port['port']['network_id'] old_port = plugin.get_port(context, port['id']) if (old_port['device_id'] != constants.DEVICE_ID_RESERVED_DHCP_PORT and old_port['device_id'] != utils.get_dhcp_agent_device_id(network_id, host)): return if not self._is_dhcp_agent_hosting_network(plugin, context, host, network_id): LOG.warning("The DHCP agent on %(host)s does not host the " "network %(net_id)s.", {"host": host, "net_id": network_id}) raise exceptions.NetworkNotFound(net_id=network_id) LOG.debug('Update dhcp port %(port)s ' 'from %(host)s.', {'port': port, 'host': host}) return self._port_action(plugin, context, port, 'update_port') except exceptions.PortNotFound: LOG.debug('Host %(host)s tried to update port ' '%(port_id)s which no longer exists.', {'host': host, 'port_id': port['id']}) @db_api.retry_db_errors def get_dhcp_port(self, context, **kwargs): """Retrieve the DHCP port""" port_id = kwargs.get('port_id') plugin = directory.get_plugin() return plugin.get_port(context, port_id) @db_api.retry_db_errors def dhcp_ready_on_ports(self, context, port_ids): for port_id in port_ids: provisioning_blocks.provisioning_complete( context, port_id, resources.PORT, provisioning_blocks.DHCP_ENTITY) def get_networks(self, context, filters=None, fields=None): """Retrieve and return a list of networks.""" # NOTE(adrianc): This RPC is being used by out of tree interface # drivers, MultiInterfaceDriver and IPoIBInterfaceDriver, located in # networking-mlnx. plugin = directory.get_plugin() return plugin.get_networks( context, filters=filters, fields=fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/dvr_rpc.py0000644000175000017500000001261000000000000024003 0ustar00coreycorey00000000000000# Copyright 2014, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging LOG = logging.getLogger(__name__) class DVRServerRpcApi(object): """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side can be found below: DVRServerRpcCallback. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # 1.0 Initial Version # 1.1 Support for passing 'fixed_ips' in get_subnet_for_dvr function. # Passing 'subnet" will be deprecated in the next release. def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='1.0', namespace=constants.RPC_NAMESPACE_DVR) self.client = n_rpc.get_client(target) @log_helpers.log_method_call def get_dvr_mac_address_by_host(self, context, host): cctxt = self.client.prepare() return cctxt.call(context, 'get_dvr_mac_address_by_host', host=host) @log_helpers.log_method_call def get_dvr_mac_address_list(self, context): cctxt = self.client.prepare() return cctxt.call(context, 'get_dvr_mac_address_list') @log_helpers.log_method_call def get_ports_on_host_by_subnet(self, context, host, subnet): """Get DVR serviced ports on given host and subnet.""" cctxt = self.client.prepare() return cctxt.call(context, 'get_ports_on_host_by_subnet', host=host, subnet=subnet) @log_helpers.log_method_call def get_subnet_for_dvr(self, context, subnet, fixed_ips): cctxt = self.client.prepare() return cctxt.call( context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips) class DVRServerRpcCallback(object): """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side can be found above: DVRServerRpcApi. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # History # 1.0 Initial version # 1.1 Support for passing the 'fixed_ips" in get_subnet_for_dvr. # Passing subnet will be deprecated in the next release. target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_DVR) @property def plugin(self): if not getattr(self, '_plugin', None): self._plugin = directory.get_plugin() return self._plugin def get_dvr_mac_address_list(self, context): return self.plugin.get_dvr_mac_address_list(context) def get_dvr_mac_address_by_host(self, context, **kwargs): host = kwargs.get('host') LOG.debug("DVR Agent requests mac_address for host %s", host) return self.plugin.get_dvr_mac_address_by_host(context, host) def get_ports_on_host_by_subnet(self, context, **kwargs): """Get DVR serviced ports for given host and subnet.""" host = kwargs.get('host') subnet = kwargs.get('subnet') LOG.debug("DVR Agent requests list of VM ports on host %s", host) return self.plugin.get_ports_on_host_by_subnet(context, host, subnet) def get_subnet_for_dvr(self, context, **kwargs): fixed_ips = kwargs.get('fixed_ips') subnet = kwargs.get('subnet') return self.plugin.get_subnet_for_dvr( context, subnet, fixed_ips=fixed_ips) class DVRAgentRpcApiMixin(object): """Plugin-side RPC (stub) for plugin-to-agent interaction.""" DVR_RPC_VERSION = "1.0" def _get_dvr_update_topic(self): return topics.get_topic_name(self.topic, topics.DVR, topics.UPDATE) def dvr_mac_address_update(self, context, dvr_macs): """Notify dvr mac address updates.""" if not dvr_macs: return cctxt = self.client.prepare(topic=self._get_dvr_update_topic(), version=self.DVR_RPC_VERSION, fanout=True) cctxt.cast(context, 'dvr_mac_address_update', dvr_macs=dvr_macs) class DVRAgentRpcCallbackMixin(object): """Agent-side RPC (implementation) for plugin-to-agent interaction.""" def dvr_mac_address_update(self, context, **kwargs): """Callback for dvr_mac_addresses update. :param dvr_macs: list of updated dvr_macs """ dvr_macs = kwargs.get('dvr_macs', []) LOG.debug("dvr_macs updated on remote: %s", dvr_macs) self.dvr_agent.dvr_mac_address_update(dvr_macs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/l3_rpc.py0000644000175000017500000004055600000000000023540 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib import context as neutron_context from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging import oslo_messaging LOG = logging.getLogger(__name__) class L3RpcCallback(object): """L3 agent RPC callback in plugin implementations.""" # 1.0 L3PluginApi BASE_RPC_API_VERSION # 1.1 Support update_floatingip_statuses # 1.2 Added methods for DVR support # 1.3 Added a method that returns the list of activated services # 1.4 Added L3 HA update_router_state. This method was later removed, # since it was unused. The RPC version was not changed # 1.5 Added update_ha_routers_states # 1.6 Added process_prefix_update to support IPv6 Prefix Delegation # 1.7 Added method delete_agent_gateway_port for DVR Routers # 1.8 Added address scope information # 1.9 Added get_router_ids # 1.10 Added update_all_ha_network_port_statuses # 1.11 Added get_host_ha_router_count # 1.12 Added get_networks target = oslo_messaging.Target(version='1.12') @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = directory.get_plugin() return self._plugin @property def l3plugin(self): if not hasattr(self, '_l3plugin'): self._l3plugin = directory.get_plugin(plugin_constants.L3) return self._l3plugin def update_all_ha_network_port_statuses(self, context, host): """Set HA network port to DOWN for HA routers hosted on This will update HA network port status to down for all HA routers hosted on . This is needed to avoid l3 agent spawning keepalived when l2 agent not yet wired the port. This can happen after a system reboot that has wiped out flows, etc and the L2 agent hasn't started up yet. The port will still be ACTIVE in the data model and the L3 agent will use that info to mistakenly think that L2 network is ready. By forcing into DOWN, we will require the L2 agent to essentially ack that the port is indeed ACTIVE by reacting to the port update and calling update_device_up. """ if not extensions.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): return device_filter = { 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF], 'status': [constants.PORT_STATUS_ACTIVE]} ports = self.plugin.get_ports(context, filters=device_filter) ha_ports = [p['id'] for p in ports if p.get(portbindings.HOST_ID) == host] if not ha_ports: return LOG.debug("L3 agent on host %(host)s requested for fullsync, so " "setting HA network ports %(ha_ports)s status to DOWN.", {"host": host, "ha_ports": ha_ports}) for p in ha_ports: self.plugin.update_port( context, p, {'port': {'status': constants.PORT_STATUS_DOWN}}) def get_router_ids(self, context, host): """Returns IDs of routers scheduled to l3 agent on This will autoschedule unhosted routers to l3 agent on and then return all ids of routers scheduled to it. """ if extensions.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): if cfg.CONF.router_auto_schedule: self.l3plugin.auto_schedule_routers(context, host) return self.l3plugin.list_router_ids_on_host(context, host) @db_api.retry_db_errors def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, router_ids @return: a list of routers with their interfaces and floating_ips """ router_ids = kwargs.get('router_ids') host = kwargs.get('host') context = neutron_context.get_admin_context() LOG.debug('Sync routers for ids %(router_ids)s in %(host)s', {'router_ids': router_ids, 'host': host}) routers = self._routers_to_sync(context, router_ids, host) if extensions.is_extension_supported( self.plugin, constants.PORT_BINDING_EXT_ALIAS): self._ensure_host_set_on_ports(context, host, routers) # refresh the data structure after ports are bound routers = self._routers_to_sync(context, router_ids, host) pf_plugin = directory.get_plugin(plugin_constants.PORTFORWARDING) if pf_plugin: pf_plugin.sync_port_forwarding_fip(context, routers) LOG.debug('The sync data for ids %(router_ids)s in %(host)s is: ' '%(routers)s', {'router_ids': router_ids, 'host': host, 'routers': routers}) return routers def _routers_to_sync(self, context, router_ids, host=None): if extensions.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) else: routers = self.l3plugin.get_sync_data(context, router_ids) return routers def _ensure_host_set_on_ports(self, context, host, routers): for router in routers: LOG.debug("Checking router: %(id)s for host: %(host)s", {'id': router['id'], 'host': host}) if router.get('gw_port') and router.get('distributed'): # '' is used to effectively clear binding of a gw port if not # bound (snat is not hosted on any l3 agent) gw_port_host = router.get('gw_port_host') or '' self._ensure_host_set_on_port(context, gw_port_host, router.get('gw_port'), router['id'], ha_router_port=router.get('ha')) for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []): self._ensure_host_set_on_port( context, gw_port_host, p, router['id'], ha_router_port=router.get('ha')) else: self._ensure_host_set_on_port( context, host, router.get('gw_port'), router['id'], ha_router_port=router.get('ha')) for interface in router.get(constants.INTERFACE_KEY, []): self._ensure_host_set_on_port( context, host, interface, router['id'], ha_router_port=router.get('ha')) interface = router.get(constants.HA_INTERFACE_KEY) if interface: self._ensure_host_set_on_port(context, host, interface, router['id']) def _ensure_host_set_on_port(self, context, host, port, router_id=None, ha_router_port=False): not_bound = port and port.get(portbindings.VIF_TYPE) in ( portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND) if (port and host is not None and (port.get('device_owner') != constants.DEVICE_OWNER_DVR_INTERFACE and port.get(portbindings.HOST_ID) != host or not_bound)): # Ports owned by non-HA routers are bound again if they're # already bound but the router moved to another host. if not ha_router_port: # All ports, including ports created for SNAT'ing for # DVR are handled here try: LOG.debug("Updating router %(router)s port %(port)s " "binding host %(host)s", {"router": router_id, "port": port['id'], "host": host}) self.plugin.update_port( context, port['id'], {'port': {portbindings.HOST_ID: host}}) # updating port's host to pass actual info to l3 agent port[portbindings.HOST_ID] = host except exceptions.PortNotFound: LOG.debug("Port %(port)s not found while updating " "agent binding for router %(router)s.", {"port": port['id'], "router": router_id}) # Ports owned by HA routers should only be bound once, if # they are unbound. These ports are moved when an agent reports # that one of its routers moved to the active state. else: if not port.get(portbindings.HOST_ID): active_host = ( self.l3plugin.get_active_host_for_ha_router( context, router_id)) if active_host: host = active_host # If there is currently no active router instance (For # example it's a new router), the host that requested # the routers (Essentially a random host) will do. The # port binding will be corrected when an active is # elected. try: LOG.debug("Updating router %(router)s port %(port)s " "binding host %(host)s", {"router": router_id, "port": port['id'], "host": host}) self.plugin.update_port( context, port['id'], {'port': {portbindings.HOST_ID: host}}) except exceptions.PortNotFound: LOG.debug("Port %(port)s not found while updating " "agent binding for router %(router)s.", {"port": port['id'], "router": router_id}) elif (port and port.get('device_owner') == constants.DEVICE_OWNER_DVR_INTERFACE): # Ports that are DVR interfaces have multiple bindings (based on # of hosts on which DVR router interfaces are spawned). Such # bindings are created/updated here by invoking # update_distributed_port_binding self.plugin.update_distributed_port_binding( context, port['id'], {'port': {portbindings.HOST_ID: host, 'device_id': router_id}}) def get_service_plugin_list(self, context, **kwargs): return directory.get_plugins().keys() def get_host_ha_router_count(self, context, host): return self.l3plugin.get_host_ha_router_count(context, host) @db_api.retry_db_errors def update_floatingip_statuses(self, context, router_id, fip_statuses): """Update operational status for a floating IP.""" with db_api.CONTEXT_WRITER.using(context): for (floatingip_id, status) in fip_statuses.items(): LOG.debug("New status for floating IP %(floatingip_id)s: " "%(status)s", {'floatingip_id': floatingip_id, 'status': status}) try: self.l3plugin.update_floatingip_status(context, floatingip_id, status) except l3_exc.FloatingIPNotFound: LOG.debug("Floating IP: %s no longer present.", floatingip_id) # Find all floating IPs known to have been the given router # for which an update was not received. Set them DOWN mercilessly # This situation might occur for some asynchronous backends if # notifications were missed known_router_fips = self.l3plugin.get_floatingips( context, {'last_known_router_id': [router_id]}) # Consider only floating ips which were disassociated in the API # FIXME(salv-orlando): Filtering in code should be avoided. # the plugin should offer a way to specify a null filter fips_to_disable = (fip['id'] for fip in known_router_fips if not fip['router_id']) for fip_id in fips_to_disable: self.l3plugin.update_floatingip_status( context, fip_id, constants.FLOATINGIP_STATUS_DOWN) def get_ports_by_subnet(self, context, **kwargs): """DVR: RPC called by dvr-agent to get all ports for subnet.""" subnet_id = kwargs.get('subnet_id') LOG.debug("DVR: subnet_id: %s", subnet_id) return self.l3plugin.get_ports_under_dvr_connected_subnet( context, subnet_id) @db_api.retry_db_errors def get_agent_gateway_port(self, context, **kwargs): """Get Agent Gateway port for FIP. l3 agent expects an Agent Gateway Port to be returned for this query. """ network_id = kwargs.get('network_id') host = kwargs.get('host') admin_ctx = neutron_context.get_admin_context() agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists( admin_ctx, network_id, host) self._ensure_host_set_on_port(admin_ctx, host, agent_port) LOG.debug('Agent Gateway port returned : %(agent_port)s with ' 'host %(host)s', {'agent_port': agent_port, 'host': host}) return agent_port @db_api.retry_db_errors def update_ha_routers_states(self, context, **kwargs): """Update states for HA routers. Get a map of router_id to its HA state on a host and update the DB. State must be in: ('active', 'standby'). """ states = kwargs.get('states') host = kwargs.get('host') LOG.debug('Updating HA routers states on host %s: %s', host, states) self.l3plugin.update_routers_states(context, states, host) def process_prefix_update(self, context, **kwargs): subnets = kwargs.get('subnets') updated_subnets = [] for subnet_id, prefix in subnets.items(): updated_subnets.append(self.plugin.update_subnet( context, subnet_id, {'subnet': {'cidr': prefix}})) return updated_subnets @db_api.retry_db_errors def delete_agent_gateway_port(self, context, **kwargs): """Delete Floatingip agent gateway port.""" network_id = kwargs.get('network_id') host = kwargs.get('host') admin_ctx = neutron_context.get_admin_context() self.l3plugin.delete_floatingip_agent_gateway_port( admin_ctx, host, network_id) def get_networks(self, context, filters=None, fields=None): """Retrieve and return a list of networks.""" # NOTE(adrianc): This RPC is being used by out of tree interface # drivers, MultiInterfaceDriver and IPoIBInterfaceDriver, located in # networking-mlnx. return self.plugin.get_networks( context, filters=filters, fields=fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/metadata_rpc.py0000644000175000017500000000304500000000000024772 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants from neutron_lib.plugins import directory import oslo_messaging class MetadataRpcCallback(object): """Metadata agent RPC callback in plugin implementations. This class implements the server side of an rpc interface used by the metadata service to make calls back into the Neutron plugin. The client side is defined in neutron.agent.metadata.agent.MetadataPluginAPI. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # 1.0 MetadataPluginAPI BASE_RPC_API_VERSION target = oslo_messaging.Target(version='1.0', namespace=constants.RPC_NAMESPACE_METADATA) @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = directory.get_plugin() return self._plugin def get_ports(self, context, filters): return self.plugin.get_ports(context, filters=filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/resources_rpc.py0000644000175000017500000002664300000000000025235 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import exceptions from neutron_lib import rpc as n_rpc from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from neutron._i18n import _ from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks import exceptions as rpc_exc from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.objects import base as obj_base LOG = logging.getLogger(__name__) class ResourcesRpcError(exceptions.NeutronException): pass class InvalidResourceTypeClass(ResourcesRpcError): message = _("Invalid resource type %(resource_type)s") class ResourceNotFound(ResourcesRpcError): message = _("Resource %(resource_id)s of type %(resource_type)s " "not found") def _validate_resource_type(resource_type): if not resources.is_valid_resource_type(resource_type): raise InvalidResourceTypeClass(resource_type=resource_type) def _resource_to_class(resource_type): _validate_resource_type(resource_type) # we've already validated the resource type, so we are pretty sure the # class is there => no need to validate it specifically return resources.get_resource_cls(resource_type) def resource_type_versioned_topic(resource_type, version=None): """Return the topic for a resource type. If no version is provided, the latest version of the object will be used. """ _validate_resource_type(resource_type) cls = resources.get_resource_cls(resource_type) return topics.RESOURCE_TOPIC_PATTERN % {'resource_type': resource_type, 'version': version or cls.VERSION} class ResourcesPullRpcApi(object): """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side can be found below: ResourcesPullRpcCallback. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls) target = oslo_messaging.Target( topic=topics.PLUGIN, version='1.1', namespace=constants.RPC_NAMESPACE_RESOURCES) cls._instance.client = n_rpc.get_client(target) return cls._instance @log_helpers.log_method_call def pull(self, context, resource_type, resource_id): resource_type_cls = _resource_to_class(resource_type) cctxt = self.client.prepare() primitive = cctxt.call( context, 'pull', resource_type=resource_type, version=resource_type_cls.VERSION, resource_id=resource_id) if primitive is None: raise ResourceNotFound(resource_type=resource_type, resource_id=resource_id) return resource_type_cls.clean_obj_from_primitive(primitive) @log_helpers.log_method_call def bulk_pull(self, context, resource_type, filter_kwargs=None): resource_type_cls = _resource_to_class(resource_type) cctxt = self.client.prepare() primitives = cctxt.call( context, 'bulk_pull', resource_type=resource_type, version=resource_type_cls.VERSION, filter_kwargs=filter_kwargs) return [resource_type_cls.clean_obj_from_primitive(primitive) for primitive in primitives] class ResourcesPullRpcCallback(object): """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side can be found above: ResourcesPullRpcApi. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ # History # 1.0 Initial version # 1.1 Added bulk_pull target = oslo_messaging.Target( version='1.1', namespace=constants.RPC_NAMESPACE_RESOURCES) @oslo_messaging.expected_exceptions(rpc_exc.CallbackNotFound) def pull(self, context, resource_type, version, resource_id): obj = prod_registry.pull(resource_type, resource_id, context=context) if obj: return obj.obj_to_primitive(target_version=version) @oslo_messaging.expected_exceptions(rpc_exc.CallbackNotFound) def bulk_pull(self, context, resource_type, version, filter_kwargs=None): filter_kwargs = filter_kwargs or {} resource_type_cls = _resource_to_class(resource_type) # TODO(kevinbenton): add in producer registry so producers can add # hooks to mangle these things like they can with 'pull'. return [obj.obj_to_primitive(target_version=version) for obj in resource_type_cls.get_objects(context, _pager=None, **filter_kwargs)] class ResourcesPushToServersRpcApi(object): """Publisher-side RPC (stub) for plugin-to-plugin fanout interaction. This class implements the client side of an rpc interface. The receiver side can be found below: ResourcesPushToServerRpcCallback. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ def __init__(self): target = oslo_messaging.Target( topic=topics.SERVER_RESOURCE_VERSIONS, version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) self.client = n_rpc.get_client(target) @log_helpers.log_method_call def report_agent_resource_versions(self, context, agent_type, agent_host, version_map): """Fan out all the agent resource versions to other servers.""" cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'report_agent_resource_versions', agent_type=agent_type, agent_host=agent_host, version_map=version_map) class ResourcesPushToServerRpcCallback(object): """Receiver-side RPC (implementation) for plugin-to-plugin interaction. This class implements the receiver side of an rpc interface. The client side can be found above: ResourcePushToServerRpcApi. For more information on this RPC interface, see doc/source/devref/rpc_callbacks.rst. """ # History # 1.0 Initial version target = oslo_messaging.Target( version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) @log_helpers.log_method_call def report_agent_resource_versions(self, context, agent_type, agent_host, version_map): consumer_id = version_manager.AgentConsumer(agent_type=agent_type, host=agent_host) version_manager.update_versions(consumer_id, version_map) class ResourcesPushRpcApi(object): """Plugin-side RPC for plugin-to-agents interaction. This interface is designed to push versioned object updates to interested agents using fanout topics. This class implements the caller side of an rpc interface. The receiver side can be found below: ResourcesPushRpcCallback. """ def __init__(self): target = oslo_messaging.Target( namespace=constants.RPC_NAMESPACE_RESOURCES) self.client = n_rpc.get_client(target) def _prepare_object_fanout_context(self, obj, resource_version, rpc_version): """Prepare fanout context, one topic per object type.""" obj_topic = resource_type_versioned_topic(obj.obj_name(), resource_version) return self.client.prepare(fanout=True, topic=obj_topic, version=rpc_version) @staticmethod def _classify_resources_by_type(resource_list): resources_by_type = collections.defaultdict(list) for resource in resource_list: resource_type = resources.get_resource_type(resource) resources_by_type[resource_type].append(resource) return resources_by_type def push(self, context, resource_list, event_type): """Push an event and list of resources to agents, batched per type. When a list of different resource types is passed to this method, the push will be sent as separate individual list pushes, one per resource type. """ resources_by_type = self._classify_resources_by_type(resource_list) LOG.debug( "Pushing event %s for resources: %s", event_type, {t: ["ID=%s,revision_number=%s" % ( getattr(obj, 'id', None), getattr(obj, 'revision_number', None)) for obj in resources_by_type[t]] for t in resources_by_type}) for resource_type, type_resources in resources_by_type.items(): self._push(context, resource_type, type_resources, event_type) def _push(self, context, resource_type, resource_list, event_type): """Push an event and list of resources of the same type to agents.""" _validate_resource_type(resource_type) for version in version_manager.get_resource_versions(resource_type): cctxt = self._prepare_object_fanout_context( resource_list[0], version, rpc_version='1.1') dehydrated_resources = [ resource.obj_to_primitive(target_version=version) for resource in resource_list] cctxt.cast(context, 'push', resource_list=dehydrated_resources, event_type=event_type) class ResourcesPushRpcCallback(object): """Agent-side RPC for plugin-to-agents interaction. This class implements the receiver for notification about versioned objects resource updates used by neutron.api.rpc.callbacks. You can find the caller side in ResourcesPushRpcApi. """ # History # 1.0 Initial version # 1.1 push method introduces resource_list support target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_RESOURCES) @oslo_messaging.expected_exceptions(rpc_exc.CallbackNotFound) def push(self, context, **kwargs): """Push receiver, will always receive resources of the same type.""" resource_list = kwargs['resource_list'] event_type = kwargs['event_type'] resource_objs = [ obj_base.NeutronObject.clean_obj_from_primitive(resource) for resource in resource_list] resource_type = resources.get_resource_type(resource_objs[0]) cons_registry.push(context, resource_type, resource_objs, event_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/rpc/handlers/securitygroups_rpc.py0000644000175000017500000003627700000000000026336 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.agent import topics from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.utils import net from oslo_log import log as logging import oslo_messaging from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.db import securitygroups_rpc_base as sg_rpc_base LOG = logging.getLogger(__name__) class SecurityGroupServerRpcApi(object): """RPC client for security group methods in the plugin. This class implements the client side of an rpc interface. This interface is used by agents to call security group related methods implemented on the plugin side. The other side of this interface is defined in SecurityGroupServerRpcCallback. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ def __init__(self, topic): target = oslo_messaging.Target( topic=topic, version='1.0', namespace=constants.RPC_NAMESPACE_SECGROUP) self.client = n_rpc.get_client(target) def security_group_rules_for_devices(self, context, devices): LOG.debug("Get security group rules " "for devices via rpc %r", devices) cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'security_group_rules_for_devices', devices=devices) def security_group_info_for_devices(self, context, devices): LOG.debug("Get security group information for devices via rpc %r", devices) cctxt = self.client.prepare(version='1.2') return cctxt.call(context, 'security_group_info_for_devices', devices=devices) class SecurityGroupServerRpcCallback(object): """Callback for SecurityGroup agent RPC in plugin implementations. This class implements the server side of an rpc interface. The client side can be found in SecurityGroupServerRpcApi. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # API version history: # 1.1 - Initial version # 1.2 - security_group_info_for_devices introduced as an optimization # NOTE: target must not be overridden in subclasses # to keep RPC API version consistent across plugins. target = oslo_messaging.Target(version='1.2', namespace=constants.RPC_NAMESPACE_SECGROUP) @property def plugin(self): return directory.get_plugin() def _get_devices_info(self, context, devices): return dict( (port['id'], port) for port in self.plugin.get_ports_from_devices(context, devices) if port and not net.is_port_trusted(port) ) def security_group_rules_for_devices(self, context, **kwargs): """Callback method to return security group rules for each port. also convert remote_group_id rule to source_ip_prefix and dest_ip_prefix rule :params devices: list of devices :returns: port correspond to the devices with security group rules """ devices_info = kwargs.get('devices') ports = self._get_devices_info(context, devices_info) return self.plugin.security_group_rules_for_ports(context, ports) def security_group_info_for_devices(self, context, **kwargs): """Return security group information for requested devices. :params devices: list of devices :returns: sg_info{ 'security_groups': {sg_id: [rule1, rule2]} 'sg_member_ips': {sg_id: {'IPv4': set(), 'IPv6': set()}} 'devices': {device_id: {device_info}} } Note that sets are serialized into lists by rpc code. """ devices_info = kwargs.get('devices') ports = self._get_devices_info(context, devices_info) return self.plugin.security_group_info_for_ports(context, ports) class SecurityGroupAgentRpcApiMixin(object): """RPC client for security group methods to the agent. This class implements the client side of an rpc interface. This interface is used by plugins to call security group methods implemented on the agent side. The other side of this interface can be found in SecurityGroupAgentRpcCallbackMixin. For more information about changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. """ # history # 1.1 Support Security Group RPC SG_RPC_VERSION = "1.1" def _get_security_group_topic(self): return topics.get_topic_name(self.topic, topics.SECURITY_GROUP, topics.UPDATE) def security_groups_rule_updated(self, context, security_groups): """Notify rule updated security groups.""" if not security_groups: return cctxt = self.client.prepare(version=self.SG_RPC_VERSION, topic=self._get_security_group_topic(), fanout=True) cctxt.cast(context, 'security_groups_rule_updated', security_groups=security_groups) def security_groups_member_updated(self, context, security_groups): """Notify member updated security groups.""" if not security_groups: return cctxt = self.client.prepare(version=self.SG_RPC_VERSION, topic=self._get_security_group_topic(), fanout=True) cctxt.cast(context, 'security_groups_member_updated', security_groups=security_groups) class SecurityGroupAgentRpcCallbackMixin(object): """A mix-in that enable SecurityGroup support in agent implementations. This class implements the server side of an rpc interface. The client side can be found in SecurityGroupAgentRpcApiMixin. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. The sg_agent reference implementation is available in neutron/agent """ # mix-in object should be have sg_agent sg_agent = None def _security_groups_agent_not_set(self): LOG.warning("Security group agent binding currently not set. " "This should be set by the end of the init " "process.") def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug("Security group rule updated on remote: %s", security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_rule_updated(security_groups) def security_groups_member_updated(self, context, **kwargs): """Callback for security group member update. :param security_groups: list of updated security_groups """ security_groups = kwargs.get('security_groups', []) LOG.debug("Security group member updated on remote: %s", security_groups) if not self.sg_agent: return self._security_groups_agent_not_set() self.sg_agent.security_groups_member_updated(security_groups) class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin): """Agent-side replacement for SecurityGroupServerRpcApi using local data. This provides the same methods as SecurityGroupServerRpcApi but it reads from the updates delivered to the push notifications cache rather than calling the server. """ def __init__(self, rcache): self.rcache = rcache registry.subscribe(self._clear_child_sg_rules, 'SecurityGroup', events.AFTER_DELETE) registry.subscribe(self._add_child_sg_rules, 'SecurityGroup', events.AFTER_UPDATE) # set this attr so agent can adjust the timeout of the client self.client = resources_rpc.ResourcesPullRpcApi().client def register_legacy_sg_notification_callbacks(self, sg_agent): self._sg_agent = sg_agent registry.subscribe(self._handle_sg_rule_delete, 'SecurityGroupRule', events.AFTER_DELETE) registry.subscribe(self._handle_sg_rule_update, 'SecurityGroupRule', events.AFTER_UPDATE) registry.subscribe(self._handle_sg_member_delete, 'Port', events.AFTER_DELETE) registry.subscribe(self._handle_sg_member_update, 'Port', events.AFTER_UPDATE) def security_group_info_for_devices(self, context, devices): ports = self._get_devices_info(context, devices) result = self.security_group_info_for_ports(context, ports) return result def security_group_rules_for_devices(self, context, devices): # this is the legacy method that should never be called since # security_group_info_for_devices will never throw an unsupported # error. raise NotImplementedError() def _add_child_sg_rules(self, rtype, event, trigger, context, updated, **kwargs): # whenever we receive a full security group, add all child rules # because the server won't emit events for the individual rules on # creation. for rule in updated.rules: self.rcache.record_resource_update(context, 'SecurityGroupRule', rule) def _clear_child_sg_rules(self, rtype, event, trigger, context, existing, **kwargs): if not existing: return # the server can delete an entire security group without notifying # about the security group rules. so we need to emulate a rule deletion # when a security group is removed. filters = {'security_group_id': (existing.id, )} for rule in self.rcache.get_resources('SecurityGroupRule', filters): self.rcache.record_resource_delete(context, 'SecurityGroupRule', rule.id) def _handle_sg_rule_delete(self, rtype, event, trigger, context, existing, **kwargs): if not existing: return sg_id = existing.security_group_id self._sg_agent.security_groups_rule_updated([sg_id]) def _handle_sg_rule_update(self, rtype, event, trigger, context, existing, updated, **kwargs): sg_id = updated.security_group_id self._sg_agent.security_groups_rule_updated([sg_id]) def _handle_sg_member_delete(self, rtype, event, trigger, context, existing, **kwargs): # received on port delete sgs = set(existing.security_group_ids) if existing else set() if sgs: self._sg_agent.security_groups_member_updated(sgs) def _handle_sg_member_update(self, rtype, event, trigger, context, existing, updated, changed_fields, **kwargs): # received on port update sgs = set(existing.security_group_ids) if existing else set() if not changed_fields.intersection({'security_group_ids', 'fixed_ips', 'allowed_address_pairs'}): # none of the relevant fields to SG calculations changed return sgs.update({sg_id for sg_id in updated.security_group_ids}) if sgs: self._sg_agent.security_groups_member_updated(sgs) def _get_devices_info(self, context, devices): # NOTE(kevinbenton): this format is required by the sg code, it is # defined in get_port_from_device and mimics # make_port_dict_with_security_groups in ML2 db result = {} for device in devices: ovo = self.rcache.get_resource_by_id('Port', device) if not ovo: continue port = ovo.to_dict() # the caller expects trusted ports to be excluded from the result if net.is_port_trusted(port): continue port['security_groups'] = list(ovo.security_group_ids) port['security_group_rules'] = [] port['security_group_source_groups'] = [] port['fixed_ips'] = [str(f['ip_address']) for f in port['fixed_ips']] # NOTE(kevinbenton): this id==device is only safe for OVS. a lookup # will be required for linux bridge and others that don't have the # full port UUID port['device'] = port['id'] port['port_security_enabled'] = getattr( ovo.security, 'port_security_enabled', True) result[device] = port return result def _select_ips_for_remote_group(self, context, remote_group_ids): if not remote_group_ids: return {} ips_by_group = {rg: set() for rg in remote_group_ids} filters = {'security_group_ids': tuple(remote_group_ids)} for p in self.rcache.get_resources('Port', filters): port_ips = [str(addr.ip_address) for addr in p.fixed_ips + p.allowed_address_pairs] for sg_id in p.security_group_ids: if sg_id in ips_by_group: ips_by_group[sg_id].update(set(port_ips)) return ips_by_group def _select_rules_for_ports(self, context, ports): if not ports: return [] results = [] sg_ids = set((sg_id for p in ports.values() for sg_id in p['security_group_ids'])) rules_by_sgid = collections.defaultdict(list) for sg_id in sg_ids: filters = {'security_group_id': (sg_id, )} for r in self.rcache.get_resources('SecurityGroupRule', filters): rules_by_sgid[r.security_group_id].append(r) for p in ports.values(): for sg_id in p['security_group_ids']: for rule in rules_by_sgid[sg_id]: results.append((p['id'], rule.to_dict())) return results def _select_sg_ids_for_ports(self, context, ports): sg_ids = set((sg_id for p in ports.values() for sg_id in p['security_group_ids'])) return [(sg_id, ) for sg_id in sg_ids] def _is_security_group_stateful(self, context, sg_id): sg = self.rcache.get_resource_by_id(resources.SECURITYGROUP, sg_id) return sg.stateful ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/api/v2/0000755000175000017500000000000000000000000017735 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/v2/__init__.py0000644000175000017500000000000000000000000022034 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/v2/base.py0000644000175000017500000011161300000000000021224 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy from neutron_lib.api import attributes from neutron_lib.api import faults from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib import rpc as n_rpc from neutron_lib.services import constants as service_const from oslo_log import log as logging from oslo_policy import policy as oslo_policy from oslo_utils import excutils import webob.exc from neutron._i18n import _ from neutron.api import api_common from neutron.api.v2 import resource as wsgi_resource from neutron import policy from neutron import quota from neutron.quota import resource_registry LOG = logging.getLogger(__name__) class Controller(object): LIST = 'list' SHOW = 'show' CREATE = 'create' UPDATE = 'update' DELETE = 'delete' @property def plugin(self): return self._plugin @property def resource(self): return self._resource @property def attr_info(self): return self._attr_info @property def member_actions(self): return self._member_actions @property def allow_pagination(self): return self._allow_pagination @property def allow_sorting(self): return self._allow_sorting def _init_policy_attrs(self): """Create the list of attributes required by policy. If the attribute map contains a tenant_id policy, then include project_id to bring the resource into the brave new world. :return: sorted list of attributes required by policy """ policy_attrs = {name for (name, info) in self._attr_info.items() if info.get('required_by_policy')} if 'tenant_id' in policy_attrs: policy_attrs.add('project_id') # Could use list(), but sorted() makes testing easier. return sorted(policy_attrs) def __init__(self, plugin, collection, resource, attr_info, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): if member_actions is None: member_actions = [] self._plugin = plugin self._collection = collection.replace('-', '_') self._resource = resource.replace('-', '_') self._attr_info = attr_info self._allow_bulk = allow_bulk self._allow_pagination = allow_pagination self._allow_sorting = allow_sorting self._native_bulk = self._is_native_bulk_supported() self._native_pagination = self._is_native_pagination_supported() self._native_sorting = self._is_native_sorting_supported() self._filter_validation = self._is_filter_validation_supported() self._policy_attrs = self._init_policy_attrs() self._notifier = n_rpc.get_notifier('network') self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: # Native pagination need native sorting support if not self._native_sorting: raise exceptions.Invalid( _("Native pagination depend on native sorting") ) if not self._allow_sorting: LOG.info("Allow sorting is enabled because native " "pagination requires native sorting") self._allow_sorting = True self.parent = parent if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] else: self._parent_id_name = None parent_part = '' self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_part, self._collection), self.SHOW: 'get%s_%s' % (parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, self._resource) def _get_primary_key(self, default_primary_key='id'): for key, value in self._attr_info.items(): if value.get('primary_key', False): return key return default_primary_key def _is_native_bulk_supported(self): native_bulk_attr_name = ("_%s__native_bulk_support" % self._plugin.__class__.__name__) return getattr(self._plugin, native_bulk_attr_name, False) def _is_native_pagination_supported(self): return api_common.is_native_pagination_supported(self._plugin) def _is_native_sorting_supported(self): return api_common.is_native_sorting_supported(self._plugin) def _is_filter_validation_supported(self): return api_common.is_filter_validation_supported(self._plugin) def _exclude_attributes_by_policy(self, context, data): """Identifies attributes to exclude according to authZ policies. Return a list of attribute names which should be stripped from the response returned to the user because the user is not authorized to see them. """ attributes_to_exclude = [] for attr_name in data.keys(): # TODO(amotoki): At now, all attribute maps have tenant_id and # determine excluded attributes based on tenant_id. # We need to migrate tenant_id to project_id later # as attr_info is referred to in various places and we need # to check all logis carefully. if attr_name == 'project_id': continue attr_data = self._attr_info.get(attr_name) if attr_data and attr_data['is_visible']: if policy.check( context, '%s:%s' % (self._plugin_handlers[self.SHOW], attr_name), data, might_not_exist=True, pluralized=self._collection): # this attribute is visible, check next one continue # if the code reaches this point then either the policy check # failed or the attribute was not visible in the first place attributes_to_exclude.append(attr_name) # TODO(amotoki): As mentioned in the above TODO, # we treat project_id and tenant_id equivalently. # This should be migrated to project_id in Ocata. if attr_name == 'tenant_id': attributes_to_exclude.append('project_id') return attributes_to_exclude def _view(self, context, data, fields_to_strip=None): """Build a view of an API resource. :param context: the neutron context :param data: the object for which a view is being created :param fields_to_strip: attributes to remove from the view :returns: a view of the object which includes only attributes visible according to API resource declaration and authZ policies. """ fields_to_strip = ((fields_to_strip or []) + self._exclude_attributes_by_policy(context, data)) return self._filter_attributes(data, fields_to_strip) def _filter_attributes(self, data, fields_to_strip=None): if not fields_to_strip: return data return dict(item for item in data.items() if (item[0] not in fields_to_strip)) def _do_field_list(self, original_fields): fields_to_add = None # don't do anything if fields were not specified in the request if original_fields: fields_to_add = [attr for attr in self._policy_attrs if attr not in original_fields] original_fields.extend(self._policy_attrs) return original_fields, fields_to_add def __getattr__(self, name): if name in self._member_actions: @db_api.retry_db_errors def _handle_action(request, id, **kwargs): arg_list = [request.context, id] # Ensure policy engine is initialized policy.init() # Fetch the resource and verify if the user can access it try: parent_id = kwargs.get(self._parent_id_name) resource = self._item(request, id, do_authz=True, field_list=None, parent_id=parent_id) except oslo_policy.PolicyNotAuthorized: msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) body = kwargs.pop('body', None) # Explicit comparison with None to distinguish from {} if body is not None: arg_list.append(body) # It is ok to raise a 403 because accessibility to the # object was checked earlier in this method policy.enforce(request.context, name, resource, pluralized=self._collection) ret_value = getattr(self._plugin, name)(*arg_list, **kwargs) # It is simply impossible to predict whether one of this # actions alters resource usage. For instance a tenant port # is created when a router interface is added. Therefore it is # important to mark as dirty resources whose counters have # been altered by this operation resource_registry.set_resources_dirty(request.context) return ret_value return _handle_action else: raise AttributeError() def _get_pagination_helper(self, request): if self._allow_pagination and self._native_pagination: return api_common.PaginationNativeHelper(request, self._primary_key) elif self._allow_pagination: return api_common.PaginationEmulatedHelper(request, self._primary_key) return api_common.NoPaginationHelper(request, self._primary_key) def _get_sorting_helper(self, request): if self._allow_sorting and self._native_sorting: return api_common.SortingNativeHelper(request, self._attr_info) elif self._allow_sorting: return api_common.SortingEmulatedHelper(request, self._attr_info) return api_common.NoSortingHelper(request, self._attr_info) def _items(self, request, do_authz=False, parent_id=None): """Retrieves and formats a list of elements of the requested entity.""" # NOTE(salvatore-orlando): The following ensures that fields which # are needed for authZ policy validation are not stripped away by the # plugin before returning. original_fields, fields_to_add = self._do_field_list( api_common.list_args(request, 'fields')) filters = api_common.get_filters( request, self._attr_info, ['fields', 'sort_key', 'sort_dir', 'limit', 'marker', 'page_reverse'], is_filter_validation_supported=self._filter_validation) kwargs = {'filters': filters, 'fields': original_fields} sorting_helper = self._get_sorting_helper(request) pagination_helper = self._get_pagination_helper(request) sorting_helper.update_args(kwargs) sorting_helper.update_fields(original_fields, fields_to_add) pagination_helper.update_args(kwargs) pagination_helper.update_fields(original_fields, fields_to_add) if parent_id: kwargs[self._parent_id_name] = parent_id obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST]) obj_list = obj_getter(request.context, **kwargs) obj_list = sorting_helper.sort(obj_list) obj_list = pagination_helper.paginate(obj_list) # Check authz if do_authz: # FIXME(salvatore-orlando): obj_getter might return references to # other resources. Must check authZ on them too. # Omit items from list that should not be visible tmp_list = [] for obj in obj_list: self._set_parent_id_into_ext_resources_request( request, obj, parent_id, is_get=True) if policy.check( request.context, self._plugin_handlers[self.SHOW], obj, plugin=self._plugin, pluralized=self._collection): tmp_list.append(obj) obj_list = tmp_list # Use the first element in the list for discriminating which attributes # should be filtered out because of authZ policies # fields_to_add contains a list of attributes added for request policy # checks but that were not required by the user. They should be # therefore stripped fields_to_strip = fields_to_add or [] if obj_list: fields_to_strip += self._exclude_attributes_by_policy( request.context, obj_list[0]) collection = {self._collection: [self._filter_attributes( obj, fields_to_strip=fields_to_strip) for obj in obj_list]} pagination_links = pagination_helper.get_links(obj_list) if pagination_links: collection[self._collection + "_links"] = pagination_links # Synchronize usage trackers, if needed resource_registry.resync_resource( request.context, self._resource, request.context.tenant_id) return collection def _item(self, request, id, do_authz=False, field_list=None, parent_id=None): """Retrieves and formats a single element of the requested entity.""" kwargs = {'fields': field_list} action = self._plugin_handlers[self.SHOW] if parent_id: kwargs[self._parent_id_name] = parent_id obj_getter = getattr(self._plugin, action) obj = obj_getter(request.context, id, **kwargs) self._set_parent_id_into_ext_resources_request( request, obj, parent_id, is_get=True) # Check authz # FIXME(salvatore-orlando): obj_getter might return references to # other resources. Must check authZ on them too. if do_authz: policy.enforce(request.context, action, obj, pluralized=self._collection) return obj @db_api.retry_db_errors def index(self, request, **kwargs): """Returns a list of the requested entity.""" parent_id = kwargs.get(self._parent_id_name) # Ensure policy engine is initialized policy.init() return self._items(request, True, parent_id) @db_api.retry_db_errors def show(self, request, id, **kwargs): """Returns detailed information about the requested entity.""" try: # NOTE(salvatore-orlando): The following ensures that fields # which are needed for authZ policy validation are not stripped # away by the plugin before returning. field_list, added_fields = self._do_field_list( api_common.list_args(request, "fields")) parent_id = kwargs.get(self._parent_id_name) # Ensure policy engine is initialized policy.init() return {self._resource: self._view(request.context, self._item(request, id, do_authz=True, field_list=field_list, parent_id=parent_id), fields_to_strip=added_fields)} except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None): objs = [] try: for item in body[self._collection]: kwargs = {self._resource: item} if parent_id: kwargs[self._parent_id_name] = parent_id fields_to_strip = self._exclude_attributes_by_policy( request.context, item) objs.append(self._filter_attributes( obj_creator(request.context, **kwargs), fields_to_strip=fields_to_strip)) return objs # Note(salvatore-orlando): broad catch as in theory a plugin # could raise any kind of exception except Exception: with excutils.save_and_reraise_exception(): for obj in objs: obj_deleter = getattr(self._plugin, self._plugin_handlers[self.DELETE]) try: kwargs = ({self._parent_id_name: parent_id} if parent_id else {}) obj_deleter(request.context, obj['id'], **kwargs) except Exception: # broad catch as our only purpose is to log the # exception LOG.exception("Unable to undo add for " "%(resource)s %(id)s", {'resource': self._resource, 'id': obj['id']}) # TODO(salvatore-orlando): The object being processed when the # plugin raised might have been created or not in the db. # We need a way for ensuring that if it has been created, # it is then deleted def create(self, request, body=None, **kwargs): self._notifier.info(request.context, self._resource + '.create.start', body) return self._create(request, body, **kwargs) @db_api.retry_db_errors def _create(self, request, body, **kwargs): """Creates a new instance of the requested entity.""" parent_id = kwargs.get(self._parent_id_name) body = Controller.prepare_request_body(request.context, body, True, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.CREATE] # Check authz if self._collection in body: # Have to account for bulk create items = body[self._collection] else: items = [body] # Ensure policy engine is initialized policy.init() # Store requested resource amounts grouping them by tenant # This won't work with multiple resources. However because of the # current structure of this controller there will hardly be more than # one resource for which reservations are being made request_deltas = collections.defaultdict(int) for item in items: self._validate_network_tenant_ownership(request, item[self._resource]) # For ext resources policy check, we support two types, such as # parent_id is in request body, another type is parent_id is in # request url, which we can get from kwargs. self._set_parent_id_into_ext_resources_request( request, item[self._resource], parent_id) policy.enforce(request.context, action, item[self._resource], pluralized=self._collection) if 'tenant_id' not in item[self._resource]: # no tenant_id - no quota check continue tenant_id = item[self._resource]['tenant_id'] request_deltas[tenant_id] += 1 # Quota enforcement reservations = [] try: for (tenant, delta) in request_deltas.items(): reservation = quota.QUOTAS.make_reservation( request.context, tenant, {self._resource: delta}, self._plugin) reservations.append(reservation) except exceptions.QuotaResourceUnknown as e: # We don't want to quota this resource LOG.debug(e) def notify(create_result): # Ensure usage trackers for all resources affected by this API # operation are marked as dirty with db_api.CONTEXT_WRITER.using(request.context): # Commit the reservation(s) for reservation in reservations: quota.QUOTAS.commit_reservation( request.context, reservation.reservation_id) resource_registry.set_resources_dirty(request.context) notifier_method = self._resource + '.create.end' self._notifier.info(request.context, notifier_method, create_result) registry.publish(self._resource, events.BEFORE_RESPONSE, self, payload=events.APIEventPayload( request.context, notifier_method, action, request_body=body, states=({}, create_result,), collection_name=self._collection)) return create_result def do_create(body, bulk=False, emulated=False): kwargs = {self._parent_id_name: parent_id} if parent_id else {} if bulk and not emulated: obj_creator = getattr(self._plugin, "%s_bulk" % action) else: obj_creator = getattr(self._plugin, action) try: if emulated: return self._emulate_bulk_create(obj_creator, request, body, parent_id) else: if self._collection in body: # This is weird but fixing it requires changes to the # plugin interface kwargs.update({self._collection: body}) else: kwargs.update({self._resource: body}) return obj_creator(request.context, **kwargs) except Exception: # In case of failure the plugin will always raise an # exception. Cancel the reservation with excutils.save_and_reraise_exception(): for reservation in reservations: quota.QUOTAS.cancel_reservation( request.context, reservation.reservation_id) if self._collection in body and self._native_bulk: # plugin does atomic bulk create operations objs = do_create(body, bulk=True) # Use first element of list to discriminate attributes which # should be removed because of authZ policies fields_to_strip = self._exclude_attributes_by_policy( request.context, objs[0]) return notify({self._collection: [self._filter_attributes( obj, fields_to_strip=fields_to_strip) for obj in objs]}) else: if self._collection in body: # Emulate atomic bulk behavior objs = do_create(body, bulk=True, emulated=True) return notify({self._collection: objs}) else: obj = do_create(body) return notify({self._resource: self._view(request.context, obj)}) def delete(self, request, id, **kwargs): """Deletes the specified entity.""" if request.body: msg = _('Request body is not supported in DELETE.') raise webob.exc.HTTPBadRequest(msg) self._notifier.info(request.context, self._resource + '.delete.start', {self._resource + '_id': id}) return self._delete(request, id, **kwargs) @db_api.retry_db_errors def _delete(self, request, id, **kwargs): action = self._plugin_handlers[self.DELETE] # Check authz policy.init() parent_id = kwargs.get(self._parent_id_name) obj = self._item(request, id, parent_id=parent_id) try: policy.enforce(request.context, action, obj, pluralized=self._collection) except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist if policy does not authorize SHOW with excutils.save_and_reraise_exception() as ctxt: if not policy.check(request.context, self._plugin_handlers[self.SHOW], obj, pluralized=self._collection): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) obj_deleter = getattr(self._plugin, action) obj_deleter(request.context, id, **kwargs) # A delete operation usually alters resource usage, so mark affected # usage trackers as dirty resource_registry.set_resources_dirty(request.context) notifier_method = self._resource + '.delete.end' result = {self._resource: self._view(request.context, obj)} notifier_payload = {self._resource + '_id': id} notifier_payload.update(result) self._notifier.info(request.context, notifier_method, notifier_payload) registry.publish(self._resource, events.BEFORE_RESPONSE, self, payload=events.APIEventPayload( request.context, notifier_method, action, states=({}, obj, result,), collection_name=self._collection)) def update(self, request, id, body=None, **kwargs): """Updates the specified entity's attributes.""" try: payload = body.copy() except AttributeError: msg = _("Invalid format: %s") % request.body raise exceptions.BadRequest(resource='body', msg=msg) payload['id'] = id self._notifier.info(request.context, self._resource + '.update.start', payload) return self._update(request, id, body, **kwargs) @db_api.retry_db_errors def _update(self, request, id, body, **kwargs): body = Controller.prepare_request_body(request.context, body, False, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.UPDATE] # Load object to check authz # but pass only attributes in the original body and required # by the policy engine to the policy 'brain' field_list = [name for (name, value) in self._attr_info.items() if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] # Ensure policy engine is initialized policy.init() parent_id = kwargs.get(self._parent_id_name) # If the parent_id exist, we should get orig_obj with # self._parent_id_name field. if parent_id and self._parent_id_name not in field_list: field_list.append(self._parent_id_name) orig_obj = self._item(request, id, field_list=field_list, parent_id=parent_id) orig_object_copy = copy.copy(orig_obj) orig_obj.update(body[self._resource]) # Make a list of attributes to be updated to inform the policy engine # which attributes are set explicitly so that it can distinguish them # from the ones that are set to their default values. orig_obj[constants.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys() # Then get the ext_parent_id, format to ext_parent_parent_resource_id if self._parent_id_name in orig_obj: self._set_parent_id_into_ext_resources_request( request, orig_obj, parent_id) try: policy.enforce(request.context, action, orig_obj, pluralized=self._collection) except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist if policy does not authorize SHOW with excutils.save_and_reraise_exception() as ctxt: if not policy.check(request.context, self._plugin_handlers[self.SHOW], orig_obj, pluralized=self._collection): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) if self._native_bulk and hasattr(self._plugin, "%s_bulk" % action): obj_updater = getattr(self._plugin, "%s_bulk" % action) else: obj_updater = getattr(self._plugin, action) kwargs = {self._resource: body} if parent_id: kwargs[self._parent_id_name] = parent_id obj = obj_updater(request.context, id, **kwargs) # Usually an update operation does not alter resource usage, but as # there might be side effects it might be worth checking for changes # in resource usage here as well (e.g: a tenant port is created when a # router interface is added) resource_registry.set_resources_dirty(request.context) result = {self._resource: self._view(request.context, obj)} notifier_method = self._resource + '.update.end' self._notifier.info(request.context, notifier_method, result) registry.publish(self._resource, events.BEFORE_RESPONSE, self, payload=events.APIEventPayload( request.context, notifier_method, action, request_body=body, states=(orig_object_copy, result,), collection_name=self._collection)) return result @staticmethod def prepare_request_body(context, body, is_create, resource, attr_info, allow_bulk=False): """Verifies required attributes are in request body. Also checking that an attribute is only specified if it is allowed for the given operation (create/update). Attribute with default values are considered to be optional. body argument must be the deserialized body. """ collection = resource + "s" if not body: raise webob.exc.HTTPBadRequest(_("Resource body required")) LOG.debug("Request body: %(body)s", {'body': body}) try: if collection in body: if not allow_bulk: raise webob.exc.HTTPBadRequest(_("Bulk operation " "not supported")) if not body[collection]: raise webob.exc.HTTPBadRequest(_("Resources required")) bulk_body = [ Controller.prepare_request_body( context, item if resource in item else {resource: item}, is_create, resource, attr_info, allow_bulk) for item in body[collection] ] return {collection: bulk_body} res_dict = body.get(resource) except (AttributeError, TypeError): msg = _("Body contains invalid data") raise webob.exc.HTTPBadRequest(msg) if res_dict is None: msg = _("Unable to find '%s' in request body") % resource raise webob.exc.HTTPBadRequest(msg) if not isinstance(res_dict, dict): msg = _("Object '%s' contains invalid data") % resource raise webob.exc.HTTPBadRequest(msg) attr_ops = attributes.AttributeInfo(attr_info) attr_ops.populate_project_id(context, res_dict, is_create) attributes.populate_project_info(attr_info) attr_ops.verify_attributes(res_dict) if is_create: # POST attr_ops.fill_post_defaults( res_dict, exc_cls=webob.exc.HTTPBadRequest) else: # PUT for attr, attr_vals in attr_info.items(): if attr in res_dict and not attr_vals['allow_put']: msg = _("Cannot update read-only attribute %s") % attr raise webob.exc.HTTPBadRequest(msg) attr_ops.convert_values(res_dict, exc_cls=webob.exc.HTTPBadRequest) return body def _validate_network_tenant_ownership(self, request, resource_item): # TODO(salvatore-orlando): consider whether this check can be folded # in the policy engine if (request.context.is_admin or request.context.is_advsvc or self._resource not in ('port', 'subnet')): return network = self._plugin.get_network( request.context, resource_item['network_id']) # do not perform the check on shared networks if network.get('shared'): return network_owner = network['tenant_id'] if network_owner != resource_item['tenant_id']: # NOTE(kevinbenton): we raise a 404 to hide the existence of the # network from the tenant since they don't have access to it. msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def _set_parent_id_into_ext_resources_request( self, request, resource_item, parent_id, is_get=False): if not parent_id: return # This will pass most create/update/delete cases if not is_get and (request.context.is_admin or request.context.is_advsvc or self.parent['member_name'] not in service_const.EXT_PARENT_RESOURCE_MAPPING or resource_item.get(self._parent_id_name)): return # Then we arrive here, that means the request or get obj contains # ext_parent. If this func is called by list/get, and it contains # _parent_id_name. We need to re-add the ex_parent prefix to policy. if is_get: if (not request.context.is_admin or not request.context.is_advsvc and self.parent['member_name'] in service_const.EXT_PARENT_RESOURCE_MAPPING): resource_item.setdefault( "%s_%s" % (constants.EXT_PARENT_PREFIX, self._parent_id_name), parent_id) # If this func is called by create/update/delete, we just add. else: resource_item.setdefault( "%s_%s" % (constants.EXT_PARENT_PREFIX, self._parent_id_name), parent_id) def create_resource(collection, resource, plugin, params, allow_bulk=False, member_actions=None, parent=None, allow_pagination=False, allow_sorting=False): controller = Controller(plugin, collection, resource, params, allow_bulk, member_actions=member_actions, parent=parent, allow_pagination=allow_pagination, allow_sorting=allow_sorting) return wsgi_resource.Resource(controller, faults.FAULT_MAP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/v2/resource.py0000644000175000017500000001244100000000000022140 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers redux """ from oslo_log import log as logging import webob.dec import webob.exc from neutron.api import api_common from neutron.common import utils from neutron import wsgi LOG = logging.getLogger(__name__) class Request(wsgi.Request): pass def Resource(controller, faults=None, deserializers=None, serializers=None, action_status=None): """Represents an API entity resource and the associated serialization and deserialization logic """ default_deserializers = {'application/json': wsgi.JSONDeserializer()} default_serializers = {'application/json': wsgi.JSONDictSerializer()} format_types = {'json': 'application/json'} action_status = action_status or dict(create=201, delete=204) default_deserializers.update(deserializers or {}) default_serializers.update(serializers or {}) deserializers = default_deserializers serializers = default_serializers faults = faults or {} @webob.dec.wsgify(RequestClass=Request) def resource(request): route_args = request.environ.get('wsgiorg.routing_args') if route_args: args = route_args[1].copy() else: args = {} # NOTE(jkoelker) by now the controller is already found, remove # it from the args if it is in the matchdict args.pop('controller', None) fmt = args.pop('format', None) action = args.pop('action', None) content_type = format_types.get(fmt, request.best_match_content_type()) language = request.best_match_language() deserializer = deserializers.get(content_type) serializer = serializers.get(content_type) try: if request.body: args['body'] = deserializer.deserialize(request.body)['body'] # Routes library is dumb and cuts off everything after last dot (.) # as format. At the same time, it doesn't enforce format suffix, # which combined makes it impossible to pass a 'id' with dots # included (the last section after the last dot is lost). This is # important for some API extensions like tags where the id is # really a tag name that can contain special characters. # # To work around the Routes behaviour, we will attach the suffix # back to id if it's not one of supported formats (atm json only). # This of course won't work for the corner case of a tag name that # actually ends with '.json', but there seems to be no better way # to tackle it without breaking API backwards compatibility. if fmt is not None and fmt not in format_types: args['id'] = '.'.join([args['id'], fmt]) revision_number = api_common.check_request_for_revision_constraint( request) if revision_number is not None: request.context.set_transaction_constraint( controller._collection, args['id'], revision_number) method = getattr(controller, action) result = method(request=request, **args) except Exception as e: mapped_exc = api_common.convert_exception_to_http_exc(e, faults, language) if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500: LOG.info('%(action)s failed (client error): %(exc)s', {'action': action, 'exc': mapped_exc}) else: LOG.exception('%(action)s failed: %(details)s', { 'action': action, 'details': utils.extract_exc_details(e), } ) raise mapped_exc status = action_status.get(action, 200) body = serializer.serialize(result) # NOTE(jkoelker) Comply with RFC2616 section 9.7 if status == 204: content_type = '' body = None return webob.Response(request=request, status=status, content_type=content_type, body=body) # NOTE(blogan): this is something that is needed for the transition to # pecan. This will allow the pecan code to have a handle on the controller # for an extension so it can reuse the code instead of forcing every # extension to rewrite the code for use with pecan. setattr(resource, 'controller', controller) setattr(resource, 'action_status', action_status) return resource ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/v2/resource_helper.py0000644000175000017500000001026300000000000023477 0ustar00coreycorey00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_log import log as logging from neutron.api import extensions from neutron.api.v2 import base from neutron.quota import resource_registry LOG = logging.getLogger(__name__) def build_plural_mappings(special_mappings, resource_map): """Create plural to singular mapping for all resources. Allows for special mappings to be provided, for particular cases.. Otherwise, will strip off the last character for normal mappings, like routers -> router, unless the plural name ends with 'ies', in which case the singular form will end with a 'y' (e.g.: policy/policies) """ plural_mappings = {} for plural in resource_map: singular = special_mappings.get(plural) if not singular: if plural.endswith('ies'): singular = "%sy" % plural[:-3] else: singular = plural[:-1] plural_mappings[plural] = singular return plural_mappings def build_resource_info(plural_mappings, resource_map, which_service, action_map=None, register_quota=False, translate_name=False, allow_bulk=False): """Build resources for advanced services. Takes the resource information, and singular/plural mappings, and creates API resource objects for advanced services extensions. Will optionally translate underscores to dashes in resource names, register the resource, and accept action information for resources. :param plural_mappings: mappings between singular and plural forms :param resource_map: attribute map for the WSGI resources to create :param which_service: The name of the service for which the WSGI resources are being created. This name will be used to pass the appropriate plugin to the WSGI resource. It can be set to None or "CORE" to create WSGI resources for the core plugin :param action_map: custom resource actions :param register_quota: it can be set to True to register quotas for the resource(s) being created :param translate_name: replaces underscores with dashes :param allow_bulk: True if bulk create are allowed """ resources = [] if not which_service: which_service = constants.CORE if action_map is None: action_map = {} plugin = directory.get_plugin(which_service) path_prefix = getattr(plugin, "path_prefix", "") LOG.debug('Service %(service)s assigned prefix: %(prefix)s', {'service': which_service, 'prefix': path_prefix}) for collection_name in resource_map: resource_name = plural_mappings[collection_name] params = resource_map.get(collection_name, {}) if translate_name: collection_name = collection_name.replace('_', '-') if register_quota: resource_registry.register_resource_by_name(resource_name) member_actions = action_map.get(resource_name, {}) controller = base.create_resource( collection_name, resource_name, plugin, params, member_actions=member_actions, allow_bulk=allow_bulk, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( collection_name, controller, path_prefix=path_prefix, member_actions=member_actions, attr_map=params) resources.append(resource) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/v2/router.py0000644000175000017500000000155700000000000021637 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.pecan_wsgi import app as pecan_app def APIRouter(**local_config): return pecan_app.v2_factory(None, **local_config) def _factory(global_config, **local_config): return pecan_app.v2_factory(global_config, **local_config) setattr(APIRouter, 'factory', _factory) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/api/views/0000755000175000017500000000000000000000000020543 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/views/__init__.py0000644000175000017500000000000000000000000022642 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/api/views/versions.py0000644000175000017500000000341000000000000022763 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from neutron.api import api_common def get_view_builder(req): base_url = req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """Object initialization. :param base_url: url of the root wsgi application """ self.base_url = api_common.prepare_url(base_url) def build(self, version_data): """Generic method used to generate a version entity.""" version = { "id": version_data["id"], "status": version_data["status"], "links": self._build_links(version_data), } return version def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" href = self.generate_href(version_data["id"]) links = [ { "rel": "self", "href": href, }, ] return links def generate_href(self, version_number): """Create an url that refers to a specific version_number.""" return os.path.join(self.base_url, version_number, '') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/auth.py0000644000175000017500000000323600000000000020154 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base import webob.dec import webob.exc LOG = logging.getLogger(__name__) class NeutronKeystoneContext(base.ConfigurableMiddleware): """Make a request context from keystone headers.""" @webob.dec.wsgify def __call__(self, req): ctx = context.Context.from_environ(req.environ) if not ctx.user_id: LOG.debug("X_USER_ID is not found in request") return webob.exc.HTTPUnauthorized() # Inject the context... req.environ['neutron.context'] = ctx return self.application def pipeline_factory(loader, global_conf, **local_conf): """Create a paste pipeline based on the 'auth_strategy' config option.""" pipeline = local_conf[cfg.CONF.auth_strategy] pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/cmd/0000755000175000017500000000000000000000000017400 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/__init__.py0000644000175000017500000000210700000000000021511 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging as sys_logging from oslo_reports import guru_meditation_report as gmr from neutron import version # During the call to gmr.TextGuruMeditation.setup_autorun(), Guru Meditation # Report tries to start logging. Set a handler here to accommodate this. logger = sys_logging.getLogger(None) if not logger.handlers: logger.addHandler(sys_logging.StreamHandler()) _version_string = version.version_info.release_string() gmr.TextGuruMeditation.setup_autorun(version=_version_string) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/0000755000175000017500000000000000000000000021226 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/__init__.py0000644000175000017500000000120600000000000023336 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/0000755000175000017500000000000000000000000022507 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/__init__.py0000644000175000017500000000000000000000000024606 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/dhcp.py0000644000175000017500000000120600000000000023776 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import dhcp_agent def main(): dhcp_agent.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/l3.py0000644000175000017500000000120200000000000023372 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import l3_agent def main(): l3_agent.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/metadata.py0000644000175000017500000000121600000000000024641 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import metadata_agent def main(): metadata_agent.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/agents/ovn_metadata.py0000644000175000017500000000122200000000000025520 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.ovn import metadata_agent def main(): metadata_agent.main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/0000755000175000017500000000000000000000000022707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/__init__.py0000644000175000017500000000000000000000000025006 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py0000644000175000017500000000131000000000000030520 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import \ neutron.plugins.ml2.drivers.linuxbridge.agent.linuxbridge_neutron_agent \ as agent_main def main(): agent_main.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py0000644000175000017500000000130000000000000027636 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.macvtap.agent import ( macvtap_neutron_agent as agent_main) def main(): agent_main.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py0000644000175000017500000000132100000000000027015 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.plugins.ml2.drivers.openvswitch.agent.main as agent_main def main(): agent_main.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py0000644000175000017500000000127300000000000030207 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.plugins.ml2.drivers.mech_sriov.agent.sriov_nic_agent \ as agent_main def main(): agent_main.main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2150438 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/server/0000755000175000017500000000000000000000000022534 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/server/__init__.py0000644000175000017500000000150600000000000024647 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import server from neutron.server import rpc_eventlet from neutron.server import wsgi_eventlet def main(): server.boot_server(wsgi_eventlet.eventlet_wsgi_server) def main_rpc_eventlet(): server.boot_server(rpc_eventlet.eventlet_rpc_server) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.219044 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/services/0000755000175000017500000000000000000000000023051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/services/__init__.py0000644000175000017500000000000000000000000025150 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/services/metering_agent.py0000644000175000017500000000124100000000000026411 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.metering.agents import metering_agent def main(): metering_agent.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/eventlet/usage_audit.py0000644000175000017500000000337000000000000024075 0ustar00coreycorey00000000000000# Copyright (c) 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cron script to generate usage notifications for networks, ports and subnets. """ import sys from neutron_lib import context from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron.common import config from neutron import manager def main(): config.init(sys.argv[1:]) config.setup_logging() cxt = context.get_admin_context() manager.init() plugin = directory.get_plugin() l3_plugin = directory.get_plugin(constants.L3) notifier = n_rpc.get_notifier('network') for network in plugin.get_networks(cxt): notifier.info(cxt, 'network.exists', {'network': network}) for subnet in plugin.get_subnets(cxt): notifier.info(cxt, 'subnet.exists', {'subnet': subnet}) for port in plugin.get_ports(cxt): notifier.info(cxt, 'port.exists', {'port': port}) for router in l3_plugin.get_routers(cxt): notifier.info(cxt, 'router.exists', {'router': router}) for floatingip in l3_plugin.get_floatingips(cxt): notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/ipset_cleanup.py0000644000175000017500000000634200000000000022612 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.linux import utils from neutron.common import config from neutron.conf.agent import cmd as command LOG = logging.getLogger(__name__) def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ conf = cfg.CONF command.register_cmd_opts(command.ip_opts, conf) return conf def remove_iptables_reference(ipset): # Remove any iptables reference to this IPset cmd = ['iptables-save'] if 'IPv4' in ipset else ['ip6tables-save'] iptables_save = utils.execute(cmd, run_as_root=True) if ipset in iptables_save: cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables'] cmd += ['-w', '10'] # wait for xlock release LOG.info("Removing iptables rule for IPset: %s", ipset) for rule in iptables_save.splitlines(): if '--match-set %s ' % ipset in rule and rule.startswith('-A'): # change to delete params = rule.split() params[0] = '-D' try: utils.execute(cmd + params, run_as_root=True) except Exception: LOG.exception('Error, unable to remove iptables rule ' 'for IPset: %s', ipset) def destroy_ipset(conf, ipset): # If there is an iptables reference and we don't remove it, the # IPset removal will fail below if conf.force: remove_iptables_reference(ipset) LOG.info("Destroying IPset: %s", ipset) cmd = ['ipset', 'destroy', ipset] try: utils.execute(cmd, run_as_root=True) except Exception: LOG.exception('Error, unable to destroy IPset: %s', ipset) def cleanup_ipsets(conf): # Identify ipsets for destruction. LOG.info("Destroying IPsets with prefix: %s", conf.prefix) cmd = ['ipset', '-L', '-n'] ipsets = utils.execute(cmd, run_as_root=True) for ipset in ipsets.split('\n'): if conf.allsets or ipset.startswith(conf.prefix): destroy_ipset(conf, ipset) LOG.info("IPset cleanup completed successfully") def main(): """Main method for cleaning up IPsets. The utility is designed to clean-up after the forced or unexpected termination of Neutron agents. The --allsets flag should only be used as part of the cleanup of a devstack installation as it will blindly destroy all IPsets. """ conf = setup_conf() conf() config.setup_logging() cleanup_ipsets(conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/keepalived_state_change.py0000644000175000017500000000130700000000000024571 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3 import keepalived_state_change def main(): keepalived_state_change.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/linuxbridge_cleanup.py0000644000175000017500000000501200000000000023773 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging from neutron.conf.agent import common as config from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_neutron_agent LOG = logging.getLogger(__name__) def remove_empty_bridges(): try: interface_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error("Parsing physical_interface_mappings failed: %s.", e) sys.exit(1) LOG.info("Interface mappings: %s.", interface_mappings) try: bridge_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error("Parsing bridge_mappings failed: %s.", e) sys.exit(1) LOG.info("Bridge mappings: %s.", bridge_mappings) lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) bridge_names = lb_manager.get_deletable_bridges() for bridge_name in bridge_names: if lb_manager.get_tap_devices_count(bridge_name): continue try: lb_manager.delete_bridge(bridge_name) LOG.info("Linux bridge %s deleted", bridge_name) except RuntimeError: LOG.exception("Linux bridge %s delete failed", bridge_name) LOG.info("Linux bridge cleanup completed successfully") def main(): """Main method for cleaning up empty linux bridges. This tool deletes every empty linux bridge managed by linuxbridge agent (brq.* linux bridges) except thes ones defined using bridge_mappings option in section LINUX_BRIDGE (created by deployers). This tool should not be called during an instance create, migrate, etc. as it can delete a linux bridge about to be used by nova. """ cfg.CONF(sys.argv[1:]) config.setup_logging() config.setup_privsep() remove_empty_bridges() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/netns_cleanup.py0000644000175000017500000002420000000000000022606 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import re import signal import time from neutron_lib import constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.agent.common import ovs_lib from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import config from neutron.conf.agent import cmd from neutron.conf.agent import common as agent_config from neutron.conf.agent import dhcp as dhcp_config LOG = logging.getLogger(__name__) NS_PREFIXES = { 'dhcp': [dhcp.NS_PREFIX], 'l3': [namespaces.NS_PREFIX, dvr_snat_ns.SNAT_NS_PREFIX, dvr_fip_ns.FIP_NS_PREFIX], } SIGTERM_WAITTIME = 10 NETSTAT_PIDS_REGEX = re.compile(r'.* (?P\d{2,6})/.*') class PidsInNamespaceException(Exception): pass class FakeDhcpPlugin(object): """Fake RPC plugin to bypass any RPC calls.""" def __getattribute__(self, name): def fake_method(*args): pass return fake_method def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ conf = cfg.CONF cmd.register_cmd_opts(cmd.netns_opts, conf) agent_config.register_interface_driver_opts_helper(conf) dhcp_config.register_agent_dhcp_opts(conf) agent_config.register_interface_opts() return conf def _get_dhcp_process_monitor(config): return external_process.ProcessMonitor(config=config, resource_type='dhcp') def kill_dhcp(conf, namespace): """Disable DHCP for a network if DHCP is still active.""" network_id = namespace.replace(dhcp.NS_PREFIX, '') dhcp_driver = importutils.import_object( conf.dhcp_driver, conf=conf, process_monitor=_get_dhcp_process_monitor(conf), network=dhcp.NetModel({'id': network_id}), plugin=FakeDhcpPlugin()) if dhcp_driver.active: dhcp_driver.disable() def eligible_for_deletion(conf, namespace, force=False): """Determine whether a namespace is eligible for deletion. Eligibility is determined by having only the lo device or if force is passed as a parameter. """ if conf.agent_type: prefixes = NS_PREFIXES.get(conf.agent_type) else: prefixes = itertools.chain(*NS_PREFIXES.values()) ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes), constants.UUID_PATTERN) # filter out namespaces without UUID as the name if not re.match(ns_mangling_pattern, namespace): return False ip = ip_lib.IPWrapper(namespace=namespace) return force or ip.namespace_is_empty() def unplug_device(device): orig_log_fail_as_error = device.get_log_fail_as_error() device.set_log_fail_as_error(False) try: device.link.delete() except RuntimeError: device.set_log_fail_as_error(orig_log_fail_as_error) # Maybe the device is OVS port, so try to delete ovs = ovs_lib.BaseOVS() bridge_name = ovs.get_bridge_for_iface(device.name) if bridge_name: bridge = ovs_lib.OVSBridge(bridge_name) bridge.delete_port(device.name) else: LOG.debug('Unable to find bridge for device: %s', device.name) finally: device.set_log_fail_as_error(orig_log_fail_as_error) def find_listen_pids_namespace(namespace): """Retrieve a list of pids of listening processes within the given netns. It executes netstat -nlp and returns a set of unique pairs """ ip = ip_lib.IPWrapper(namespace=namespace) pids = set() cmd = ['netstat', '-nlp'] output = ip.netns.execute(cmd, run_as_root=True) for line in output.splitlines(): m = NETSTAT_PIDS_REGEX.match(line) if m: pids.add(m.group('pid')) return pids def wait_until_no_listen_pids_namespace(namespace, timeout=SIGTERM_WAITTIME): """Poll listening processes within the given namespace. If after timeout seconds, there are remaining processes in the namespace, then a PidsInNamespaceException will be thrown. """ # NOTE(dalvarez): This function can block forever if # find_listen_pids_in_namespace never returns which is really unlikely. We # can't use wait_until_true because we might get interrupted by eventlet # Timeout during our I/O with rootwrap daemon and that will lead to errors # in subsequent calls to utils.execute grabbing always the output of the # previous command start = end = time.time() while end - start < timeout: if not find_listen_pids_namespace(namespace): return time.sleep(1) end = time.time() raise PidsInNamespaceException def _kill_listen_processes(namespace, force=False): """Identify all listening processes within the given namespace. Then, for each one, find its top parent with same cmdline (in case this process forked) and issue a SIGTERM to all of them. If force is True, then a SIGKILL will be issued to all parents and all their children. Also, this function returns the number of listening processes. """ pids = find_listen_pids_namespace(namespace) pids_to_kill = {utils.find_fork_top_parent(pid) for pid in pids} kill_signal = signal.SIGTERM if force: kill_signal = signal.SIGKILL children = [utils.find_child_pids(pid, True) for pid in pids_to_kill] pids_to_kill.update(itertools.chain.from_iterable(children)) for pid in pids_to_kill: # Throw a warning since this particular cleanup may need a specific # implementation in the right module. Ideally, netns_cleanup wouldn't # kill any processes as the responsible module should've killed them # before cleaning up the namespace LOG.warning("Killing (%(signal)d) [%(pid)s] %(cmdline)s", {'signal': kill_signal, 'pid': pid, 'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80] }) try: utils.kill_process(pid, kill_signal, run_as_root=True) except Exception as ex: LOG.error('An error occurred while killing ' '[%(pid)s]: %(msg)s', {'pid': pid, 'msg': ex}) return len(pids) def kill_listen_processes(namespace): """Kill all processes listening within the given namespace. First it tries to kill them using SIGTERM, waits until they die gracefully and then kills remaining processes (if any) with SIGKILL """ if _kill_listen_processes(namespace, force=False): try: wait_until_no_listen_pids_namespace(namespace) except PidsInNamespaceException: _kill_listen_processes(namespace, force=True) # Allow some time for remaining processes to die wait_until_no_listen_pids_namespace(namespace) def destroy_namespace(conf, namespace, force=False): """Destroy a given namespace. If force is True, then dhcp (if it exists) will be disabled and all devices will be forcibly removed. """ try: ip = ip_lib.IPWrapper(namespace=namespace) if force: kill_dhcp(conf, namespace) # NOTE: The dhcp driver will remove the namespace if is it empty, # so a second check is required here. if ip.netns.exists(namespace): try: kill_listen_processes(namespace) except PidsInNamespaceException: # This is unlikely since, at this point, we have SIGKILLed # all remaining processes but if there are still some, log # the error and continue with the cleanup LOG.error('Not all processes were killed in %s', namespace) for device in ip.get_devices(): unplug_device(device) ip.garbage_collect_namespace() except Exception: LOG.exception('Error unable to destroy namespace: %s', namespace) def cleanup_network_namespaces(conf): # Identify namespaces that are candidates for deletion. candidates = [ns for ns in ip_lib.list_network_namespaces() if eligible_for_deletion(conf, ns, conf.force)] if candidates: time.sleep(2) for namespace in candidates: destroy_namespace(conf, namespace, conf.force) def main(): """Main method for cleaning up network namespaces. This method will make two passes checking for namespaces to delete. The process will identify candidates, sleep, and call garbage collect. The garbage collection will re-verify that the namespace meets the criteria for deletion (ie it is empty). The period of sleep and the 2nd pass allow time for the namespace state to settle, so that the check prior deletion will re-confirm the namespace is empty. The utility is designed to clean-up after the forced or unexpected termination of Neutron agents. The --force flag should only be used as part of the cleanup of a devstack installation as it will blindly purge namespaces and their devices. This option also kills any lingering DHCP instances. """ conf = setup_conf() conf() config.setup_logging() agent_config.setup_privsep() cleanup_network_namespaces(conf) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.219044 neutron-16.0.0.0b2.dev214/neutron/cmd/ovn/0000755000175000017500000000000000000000000020202 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/ovn/__init__.py0000644000175000017500000000000000000000000022301 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/ovn/migration_mtu.py0000644000175000017500000001031500000000000023432 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from openstack import connection # TODO(dalvarez): support also GRE GENEVE_TO_VXLAN_OVERHEAD = 8 def get_connection(): user_domain_id = os.environ.get('OS_USER_DOMAIN_ID', 'default') project_domain_id = os.environ.get('OS_PROJECT_DOMAIN_ID', 'default') conn = connection.Connection(auth_url=os.environ['OS_AUTH_URL'], project_name=os.environ['OS_PROJECT_NAME'], username=os.environ['OS_USERNAME'], password=os.environ['OS_PASSWORD'], user_domain_id=user_domain_id, project_domain_id=project_domain_id) return conn def verify_network_mtu(): print("Verifying the tenant network mtu's") conn = get_connection() success = True for network in conn.network.networks(): if network.provider_physical_network is None and ( network.provider_network_type == 'vxlan') and ( 'adapted_mtu' not in network.tags): print("adapted_mtu tag is not set for the Network " "[" + str(network.name) + "]") success = False if success: print("All the networks are set to expected mtu value") else: print("Some tenant networks need to have their MTU updated to a " "lower value.") return success def update_network_mtu(): print("Updating the tenant network mtu") conn = get_connection() for network in conn.network.networks(): try: if network.provider_physical_network is None and ( network.provider_network_type == 'vxlan') and ( 'adapted_mtu' not in network.tags): print("Updating the mtu and the tag 'adapted_mtu" " of the network - " + str(network.name)) new_tags = list(network.tags) new_tags.append('adapted_mtu') conn.network.update_network( network, mtu=int(network.mtu) - GENEVE_TO_VXLAN_OVERHEAD) conn.network.set_tags(network, new_tags) except Exception as e: print("Exception occured while updating the MTU:" + str(e)) return False return True def print_usage(): print('Invalid options:') print('Usage: %s mtu' % sys.argv[0]) def main(): """Tool for updating the networks MTU's pre migration. This lowers the MTU of the pre migration VXLAN and GRE networks. The tool will ignore non-VXLAN/GRE networks, so if you use VLAN for tenant networks it will be fine if you find this step not doing anything. This step will go network by network reducing the MTU, and tagging with adapted_mtu the networks which have been already handled. Every time a network is updated all the existing L3/DHCP agents connected to such network will update their internal leg MTU, instances will start fetching the new MTU as the DHCP T1 timer expires. As explained before, instances not obeying the DHCP T1 parameter will need to be restarted, and instances with static IP assignment will need to be manually updated. """ if len(sys.argv) < 3: print_usage() sys.exit(1) retval = 1 if sys.argv[1] == "update" and sys.argv[2] == "mtu": if update_network_mtu(): retval = 0 elif sys.argv[1] == "verify" and sys.argv[2] == "mtu": if verify_network_mtu(): retval = 0 else: print_usage() sys.exit(retval) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/ovn/neutron_ovn_db_sync_util.py0000644000175000017500000001765600000000000025705 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib.agent import topics from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log as logging from neutron.conf.agent import securitygroups_rpc from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron import manager from neutron import opts as neutron_options from neutron.plugins.ml2.drivers.ovn.mech_driver import mech_driver from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.plugins.ml2 import plugin as ml2_plugin LOG = logging.getLogger(__name__) class Ml2Plugin(ml2_plugin.Ml2Plugin): def _setup_dhcp(self): pass def _start_rpc_notifiers(self): # Override the notifier so that when calling the ML2 plugin to create # resources, it doesn't crash trying to notify subscribers. self.notifier = AgentNotifierApi(topics.AGENT) class OVNMechanismDriver(mech_driver.OVNMechanismDriver): def subscribe(self): pass def post_fork_initialize(self, resource, event, trigger, **kwargs): pass @property def ovn_client(self): return self._ovn_client # Since we are not using the ovn mechanism driver while syncing, # we override the post and pre commit methods so that original ones are # not called. def create_port_precommit(self, context): pass def create_port_postcommit(self, context): port = context.current self.ovn_client.create_port(context, port) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): port = context.current original_port = context.original self.ovn_client.update_port(context, port, original_port) def delete_port_precommit(self, context): pass def delete_port_postcommit(self, context): port = copy.deepcopy(context.current) port['network'] = context.network.current # FIXME(lucasagomes): PortContext does not have a session, therefore # we need to use the _plugin_context attribute. self.ovn_client.delete_port(context._plugin_context, port['id'], port_object=port) class AgentNotifierApi(object): """Default Agent Notifier class for ovn-db-sync-util. This class implements empty methods so that when creating resources in the core plugin, the original ones don't get called and don't interfere with the syncing process. """ def __init__(self, topic): self.topic = topic self.topic_network_delete = topics.get_topic_name(topic, topics.NETWORK, topics.DELETE) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) self.topic_port_delete = topics.get_topic_name(topic, topics.PORT, topics.DELETE) self.topic_network_update = topics.get_topic_name(topic, topics.NETWORK, topics.UPDATE) def network_delete(self, context, network_id): pass def port_update(self, context, port, network_type, segmentation_id, physical_network): pass def port_delete(self, context, port_id): pass def network_update(self, context, network): pass def security_groups_provider_updated(self, context, devices_to_udpate=None): pass def setup_conf(): conf = cfg.CONF ml2_group, ml2_opts = neutron_options.list_ml2_conf_opts()[0] cfg.CONF.register_cli_opts(ml2_opts, ml2_group) cfg.CONF.register_cli_opts(securitygroups_rpc.security_group_opts, 'SECURITYGROUP') ovn_group, ovn_opts = ovn_conf.list_opts()[0] cfg.CONF.register_cli_opts(ovn_opts, group=ovn_group) db_group, neutron_db_opts = db_options.list_opts()[0] cfg.CONF.register_cli_opts(neutron_db_opts, db_group) return conf def main(): """Main method for syncing neutron networks and ports with ovn nb db. This script provides a utility for syncing the OVN Northbound Database with the Neutron database. This script is used for the migration from ML2/OVS to ML2/OVN. """ conf = setup_conf() # if no config file is passed or no configuration options are passed # then load configuration from /etc/neutron/neutron.conf try: conf(project='neutron') except TypeError: LOG.error('Error parsing the configuration values. Please verify.') return logging.setup(conf, 'neutron_ovn_db_sync_util') LOG.info('Started Neutron OVN db sync') mode = ovn_conf.get_ovn_neutron_sync_mode() if mode not in [ovn_db_sync.SYNC_MODE_LOG, ovn_db_sync.SYNC_MODE_REPAIR]: LOG.error( 'Invalid sync mode : ["%s"]. Should be "log" or "repair"', mode) return # Validate and modify core plugin and ML2 mechanism drivers for syncing. if (cfg.CONF.core_plugin.endswith('.Ml2Plugin') or cfg.CONF.core_plugin == 'ml2'): cfg.CONF.core_plugin = ( 'neutron.cmd.ovn.neutron_ovn_db_sync_util.Ml2Plugin') if not cfg.CONF.ml2.mechanism_drivers: LOG.error('please use --config-file to specify ' 'neutron and ml2 configuration file.') return if 'ovn' not in cfg.CONF.ml2.mechanism_drivers: LOG.error('No "ovn" mechanism driver found : "%s".', cfg.CONF.ml2.mechanism_drivers) return cfg.CONF.set_override('mechanism_drivers', ['ovn-sync'], 'ml2') conf.service_plugins = [ 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin'] else: LOG.error('Invalid core plugin : ["%s"].', cfg.CONF.core_plugin) return try: conn = impl_idl_ovn.get_connection(impl_idl_ovn.OvsdbNbOvnIdl) ovn_api = impl_idl_ovn.OvsdbNbOvnIdl(conn) except RuntimeError: LOG.error('Invalid --ovn-ovn_nb_connection parameter provided.') return try: sb_conn = impl_idl_ovn.get_connection(impl_idl_ovn.OvsdbSbOvnIdl) ovn_sb_api = impl_idl_ovn.OvsdbSbOvnIdl(sb_conn) except RuntimeError: LOG.error('Invalid --ovn-ovn_sb_connection parameter provided.') return manager.init() core_plugin = directory.get_plugin() ovn_driver = core_plugin.mechanism_manager.mech_drivers['ovn-sync'].obj ovn_driver._nb_ovn = ovn_api ovn_driver._sb_ovn = ovn_sb_api synchronizer = ovn_db_sync.OvnNbSynchronizer( core_plugin, ovn_api, ovn_sb_api, mode, ovn_driver) LOG.info('Sync for Northbound db started with mode : %s', mode) synchronizer.do_sync() LOG.info('Sync completed for Northbound db') sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( core_plugin, ovn_sb_api, ovn_driver) LOG.info('Sync for Southbound db started with mode : %s', mode) sb_synchronizer.do_sync() LOG.info('Sync completed for Southbound db') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/ovs_cleanup.py0000644000175000017500000000514200000000000022272 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import ovs_lib from neutron.common import config from neutron.conf.agent import cmd from neutron.conf.agent import common as agent_config from neutron.conf.agent.l3 import config as l3_config from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.conf import service as service_config LOG = logging.getLogger(__name__) # Default ovsdb_timeout value for this script. # It allows to clean bridges with even thousands of ports. CLEANUP_OVSDB_TIMEOUT = 600 def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ conf = cfg.CONF cmd.register_cmd_opts(cmd.ovs_opts, conf) l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf) agent_config.register_interface_driver_opts_helper(conf) agent_config.register_interface_opts() service_config.register_service_opts(service_config.RPC_EXTRA_OPTS, conf) ovs_conf.register_ovs_agent_opts(conf) conf.set_default("ovsdb_timeout", CLEANUP_OVSDB_TIMEOUT, "OVS") return conf def main(): """Main method for cleaning up OVS bridges. The utility cleans up the integration bridges used by Neutron. """ conf = setup_conf() conf() config.setup_logging() do_main(conf) def do_main(conf): configuration_bridges = set([conf.OVS.integration_bridge]) ovs = ovs_lib.BaseOVS() ovs_bridges = set(ovs.get_bridges()) available_configuration_bridges = configuration_bridges & ovs_bridges if conf.ovs_all_ports: bridges = ovs_bridges else: bridges = available_configuration_bridges for bridge in bridges: LOG.info("Cleaning bridge: %s", bridge) ovs.ovsdb.ovs_cleanup(bridge, conf.ovs_all_ports).execute(check_error=True) LOG.info("OVS cleanup completed successfully") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/pd_notify.py0000644000175000017500000000250700000000000021751 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cisco Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import sys from neutron_lib.utils import file as file_utils def main(): """Expected arguments: sys.argv[1] - The add/update/delete operation performed by the PD agent sys.argv[2] - The file where the new prefix should be written sys.argv[3] - The process ID of the L3 agent to be notified of this change """ operation = sys.argv[1] prefix_fname = sys.argv[2] agent_pid = sys.argv[3] prefix = os.getenv('PREFIX1', "::") if operation in ["add", "update"]: file_utils.replace_file(prefix_fname, "%s/64" % prefix) elif operation == "delete": file_utils.replace_file(prefix_fname, "::/64") os.kill(int(agent_pid), signal.SIGUSR1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/runtime_checks.py0000644000175000017500000000243500000000000022761 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.linux import utils as agent_utils LOG = logging.getLogger(__name__) # NOTE: Runtime checks are strongly discouraged in favor of sanity checks # which would be run at system setup time. Please consider writing a # sanity check instead. def dhcp_release6_supported(): try: cmd = ['dhcp_release6', '--help'] env = {'LC_ALL': 'C'} agent_utils.execute(cmd, addl_env=env) except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking dhcp_release6. " "Exception: %s", e) return False return True ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.219044 neutron-16.0.0.0b2.dev214/neutron/cmd/sanity/0000755000175000017500000000000000000000000020707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/sanity/__init__.py0000644000175000017500000000000000000000000023006 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/sanity/checks.py0000644000175000017500000004617600000000000022537 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import distutils import re import shutil import tempfile import netaddr from neutron_lib import constants as n_consts from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from oslo_utils import versionutils from neutron.agent.common import ovs_lib from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_link_support from neutron.agent.linux import keepalived from neutron.agent.linux import utils as agent_utils from neutron.cmd import runtime_checks from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as ovs_const LOG = logging.getLogger(__name__) MINIMUM_DNSMASQ_VERSION = '2.67' DNSMASQ_VERSION_DHCP_RELEASE6 = '2.76' DNSMASQ_VERSION_HOST_ADDR6_LIST = '2.81' DIRECT_PORT_QOS_MIN_OVS_VERSION = '2.11' MINIMUM_DIBBLER_VERSION = '1.0.1' CONNTRACK_GRE_MODULE = 'nf_conntrack_proto_gre' def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'): name = common_utils.get_rand_device_name(prefix='vxlantest-') with ovs_lib.OVSBridge(name) as br: port = br.add_tunnel_port(from_ip, to_ip, n_consts.TYPE_VXLAN) return port != ovs_lib.INVALID_OFPORT def ovs_geneve_supported(from_ip='192.0.2.3', to_ip='192.0.2.4'): name = common_utils.get_rand_device_name(prefix='genevetest-') with ovs_lib.OVSBridge(name) as br: port = br.add_tunnel_port(from_ip, to_ip, n_consts.TYPE_GENEVE) return port != ovs_lib.INVALID_OFPORT def iproute2_vxlan_supported(): ip = ip_lib.IPWrapper() name = common_utils.get_rand_device_name(prefix='vxlantest-') port = ip.add_vxlan(name, 3000) ip.del_veth(name) return name == port.name def patch_supported(): name, peer_name, patch_name = common_utils.get_related_rand_device_names( ['patchtest-', 'peertest0-', 'peertest1-']) with ovs_lib.OVSBridge(name) as br: port = br.add_patch_port(patch_name, peer_name) return port != ovs_lib.INVALID_OFPORT def nova_notify_supported(): try: import neutron.notifiers.nova # noqa since unused return True except ImportError: return False def ofctl_arg_supported(cmd, **kwargs): """Verify if ovs-ofctl binary supports cmd with **kwargs. :param cmd: ovs-ofctl command to use for test. :param **kwargs: arguments to test with the command. :returns: a boolean if the supplied arguments are supported. """ br_name = common_utils.get_rand_device_name(prefix='br-test-') with ovs_lib.OVSBridge(br_name) as test_br: full_args = ["ovs-ofctl", cmd, test_br.br_name, ovs_lib._build_flow_expr_str(kwargs, cmd.split('-')[0], False)] try: agent_utils.execute(full_args, run_as_root=True) except RuntimeError as e: LOG.debug("Exception while checking supported feature via " "command %s. Exception: %s", full_args, e) return False except Exception: LOG.exception("Unexpected exception while checking supported" " feature via command: %s", full_args) return False else: return True def arp_responder_supported(): mac = netaddr.EUI('dead:1234:beef', dialect=netaddr.mac_unix) ip = netaddr.IPAddress('240.0.0.1') actions = ovs_const.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip} return ofctl_arg_supported(cmd='add-flow', table=21, priority=1, proto='arp', dl_vlan=42, nw_dst='%s' % ip, actions=actions) def arp_header_match_supported(): return ofctl_arg_supported(cmd='add-flow', table=24, priority=1, proto='arp', arp_op='0x2', arp_spa='1.1.1.1', actions="NORMAL") def icmpv6_header_match_supported(): return ofctl_arg_supported(cmd='add-flow', table=ovs_const.ARP_SPOOF_TABLE, priority=1, dl_type=n_consts.ETHERTYPE_IPV6, nw_proto=n_consts.PROTO_NUM_IPV6_ICMP, icmp_type=n_consts.ICMPV6_TYPE_NA, nd_target='fdf8:f53b:82e4::10', actions="NORMAL") def _vf_management_support(required_caps): is_supported = True try: vf_section = ip_link_support.IpLinkSupport.get_vf_mgmt_section() for cap in required_caps: if not ip_link_support.IpLinkSupport.vf_mgmt_capability_supported( vf_section, cap): is_supported = False LOG.debug("ip link command does not support " "vf capability '%(cap)s'", {'cap': cap}) except ip_link_support.UnsupportedIpLinkCommand: LOG.exception("Unexpected exception while checking supported " "ip link command") return False return is_supported def vf_management_supported(): required_caps = ( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) return _vf_management_support(required_caps) def vf_extended_management_supported(): required_caps = ( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_STATE, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE, ) return _vf_management_support(required_caps) def netns_read_requires_helper(): nsname = "netnsreadtest-" + uuidutils.generate_uuid() ip_lib.create_network_namespace(nsname) try: # read without root_helper. if exists, not required. exists = ip_lib.network_namespace_exists(nsname) finally: ip_lib.delete_network_namespace(nsname) return not exists def get_minimal_dnsmasq_version_supported(): return MINIMUM_DNSMASQ_VERSION def get_dnsmasq_version_with_dhcp_release6(): return DNSMASQ_VERSION_DHCP_RELEASE6 def get_dnsmasq_version_with_host_addr6_list(): return DNSMASQ_VERSION_HOST_ADDR6_LIST def get_ovs_version_for_qos_direct_port_support(): return DIRECT_PORT_QOS_MIN_OVS_VERSION def dnsmasq_local_service_supported(): cmd = ['dnsmasq', '--test', '--local-service'] env = {'LC_ALL': 'C'} obj, cmd = agent_utils.create_process(cmd, addl_env=env) _stdout, _stderr = obj.communicate() returncode = obj.returncode if returncode == 127: LOG.debug("Exception while checking dnsmasq version. " "dnsmasq: No such file or directory") return False elif returncode == 1: return False return True def dnsmasq_version_supported(): try: cmd = ['dnsmasq', '--version'] env = {'LC_ALL': 'C'} out = agent_utils.execute(cmd, addl_env=env) m = re.search(r"version (\d+\.\d+)", out) ver = distutils.version.StrictVersion(m.group(1) if m else '0.0') if ver < distutils.version.StrictVersion(MINIMUM_DNSMASQ_VERSION): return False if (cfg.CONF.dnsmasq_enable_addr6_list is True and ver < distutils.version.StrictVersion( DNSMASQ_VERSION_HOST_ADDR6_LIST)): LOG.warning('Support for multiple IPv6 addresses in host ' 'entries was introduced in dnsmasq version ' '%(required)s. Found dnsmasq version %(current)s, ' 'which does not support this feature. Unless support ' 'for multiple IPv6 addresses was backported to the ' 'running build of dnsmasq, the configuration option ' 'dnsmasq_enable_addr6_list should be set to False.', {'required': DNSMASQ_VERSION_HOST_ADDR6_LIST, 'current': ver}) except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking minimal dnsmasq version. " "Exception: %s", e) return False return True def ovs_qos_direct_port_supported(): try: cmd = ['ovs-vsctl', '-V'] out = agent_utils.execute(cmd) matched_line = re.search(r"ovs-vsctl.*", out) matched_version = re.search(r"(\d+\.\d+)", matched_line.group(0)) ver = versionutils.convert_version_to_tuple(matched_version.group(1) if matched_version else '0.0') minver = versionutils.convert_version_to_tuple( DIRECT_PORT_QOS_MIN_OVS_VERSION) if ver < minver: return False except (OSError, RuntimeError, ValueError) as e: LOG.debug("Exception while checking minimal ovs version " "required for supporting direct ports QoS rules. " "Exception: %s", e) return False return True def dhcp_release6_supported(): return runtime_checks.dhcp_release6_supported() def bridge_firewalling_enabled(): for proto in ('arp', 'ip', 'ip6'): knob = 'net.bridge.bridge-nf-call-%stables' % proto cmd = ['sysctl', '-b', knob] try: out = agent_utils.execute(cmd) except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while extracting %(knob)s. " "Exception: %(e)s", {'knob': knob, 'e': e}) return False if out == '0': return False return True class KeepalivedIPv6Test(object): def __init__(self, ha_port, gw_port, gw_vip, default_gw): self.ha_port = ha_port self.gw_port = gw_port self.gw_vip = gw_vip self.default_gw = default_gw self.manager = None self.config = None self.config_path = None self.nsname = "keepalivedtest-" + uuidutils.generate_uuid() self.pm = None self.orig_interval = cfg.CONF.AGENT.check_child_processes_interval def configure(self): config = keepalived.KeepalivedConf() instance1 = keepalived.KeepalivedInstance('MASTER', self.ha_port, 1, ['169.254.192.0/18'], advert_int=5) instance1.track_interfaces.append(self.ha_port) # Configure keepalived with an IPv6 address (gw_vip) on gw_port. vip_addr1 = keepalived.KeepalivedVipAddress(self.gw_vip, self.gw_port) instance1.vips.append(vip_addr1) # Configure keepalived with an IPv6 default route on gw_port. gateway_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv6_ANY, self.default_gw, self.gw_port) instance1.virtual_routes.gateway_routes = [gateway_route] config.add_instance(instance1) self.config = config def start_keepalived_process(self): # Disable process monitoring for Keepalived process. cfg.CONF.set_override('check_child_processes_interval', 0, 'AGENT') self.pm = external_process.ProcessMonitor(cfg.CONF, 'router') # Create a temp directory to store keepalived configuration. self.config_path = tempfile.mkdtemp() # Instantiate keepalived manager with the IPv6 configuration. self.manager = keepalived.KeepalivedManager( 'router1', self.config, namespace=self.nsname, process_monitor=self.pm, conf_path=self.config_path) self.manager.spawn() def verify_ipv6_address_assignment(self, gw_dev): process = self.manager.get_process() common_utils.wait_until_true(lambda: process.active) def _gw_vip_assigned(): iface_ip = gw_dev.addr.list(ip_version=6, scope='global') if iface_ip: return self.gw_vip == iface_ip[0]['cidr'] common_utils.wait_until_true(_gw_vip_assigned) def __enter__(self): ip_lib.create_network_namespace(self.nsname) return self def __exit__(self, exc_type, exc_value, exc_tb): if self.pm: self.pm.stop() if self.manager: self.manager.disable() if self.config_path: shutil.rmtree(self.config_path, ignore_errors=True) ip_lib.delete_network_namespace(self.nsname) cfg.CONF.set_override('check_child_processes_interval', self.orig_interval, 'AGENT') def keepalived_ipv6_supported(): """Check if keepalived supports IPv6 functionality. Validation is done as follows. 1. Create a namespace. 2. Create OVS bridge with two ports (ha_port and gw_port) 3. Move the ovs ports to the namespace. 4. Spawn keepalived process inside the namespace with IPv6 configuration. 5. Verify if IPv6 address is assigned to gw_port. 6. Verify if IPv6 default route is configured by keepalived. """ br_name, ha_port, gw_port = common_utils.get_related_rand_device_names( ['ka-test-', ha_router.HA_DEV_PREFIX, namespaces.INTERNAL_DEV_PREFIX]) gw_vip = 'fdf8:f53b:82e4::10/64' expected_default_gw = 'fe80:f816::1' with ovs_lib.OVSBridge(br_name) as br: with KeepalivedIPv6Test(ha_port, gw_port, gw_vip, expected_default_gw) as ka: br.add_port(ha_port, ('type', 'internal')) br.add_port(gw_port, ('type', 'internal')) ha_dev = ip_lib.IPDevice(ha_port) gw_dev = ip_lib.IPDevice(gw_port) ha_dev.link.set_netns(ka.nsname) gw_dev.link.set_netns(ka.nsname) ha_dev.link.set_up() gw_dev.link.set_up() ha_dev.addr.add('169.254.192.8/18') ka.configure() ka.start_keepalived_process() ka.verify_ipv6_address_assignment(gw_dev) default_gw = gw_dev.route.get_gateway(ip_version=6) if default_gw: default_gw = default_gw['via'] return expected_default_gw == default_gw def ovsdb_native_supported(): # Running the test should ensure we are configured for OVSDB native try: ovs = ovs_lib.BaseOVS() ovs.get_bridges() return True except ImportError as ex: LOG.error("Failed to import required modules. Ensure that the " "python-openvswitch package is installed. Error: %s", ex) except Exception: LOG.exception("Unexpected exception occurred.") return False def ovs_conntrack_supported(): br_name = common_utils.get_rand_device_name(prefix="ovs-test-") with ovs_lib.OVSBridge(br_name) as br: try: br.add_protocols(*["OpenFlow%d" % i for i in range(10, 15)]) except RuntimeError as e: LOG.debug("Exception while checking ovs conntrack support: %s", e) return False return ofctl_arg_supported(cmd='add-flow', ct_state='+trk', actions='drop') def ebtables_supported(): try: cmd = ['ebtables', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ebtables. " "Exception: %s", e) return False def ipset_supported(): try: cmd = ['ipset', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ipset. " "Exception: %s", e) return False def ip6tables_supported(): try: cmd = ['ip6tables', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed ip6tables. " "Exception: %s", e) return False def conntrack_supported(): try: cmd = ['conntrack', '--version'] agent_utils.execute(cmd) return True except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking for installed conntrack. " "Exception: %s", e) return False def get_minimal_dibbler_version_supported(): return MINIMUM_DIBBLER_VERSION def dibbler_version_supported(): try: cmd = ['dibbler-client', 'help'] out = agent_utils.execute(cmd) return '-w' in out except (OSError, RuntimeError, IndexError, ValueError) as e: LOG.debug("Exception while checking minimal dibbler version. " "Exception: %s", e) return False def _fix_ip_nonlocal_bind_root_value(original_value): current_value = ip_lib.get_ip_nonlocal_bind(namespace=None) if current_value != original_value: ip_lib.set_ip_nonlocal_bind(value=original_value, namespace=None) def ip_nonlocal_bind(): nsname1 = "ipnonlocalbind1-" + uuidutils.generate_uuid() nsname2 = "ipnonlocalbind2-" + uuidutils.generate_uuid() ip_lib.create_network_namespace(nsname1) try: ip_lib.create_network_namespace(nsname2) try: original_value = ip_lib.get_ip_nonlocal_bind(namespace=None) try: ip_lib.set_ip_nonlocal_bind(value=0, namespace=nsname1) ip_lib.set_ip_nonlocal_bind(value=1, namespace=nsname2) ns1_value = ip_lib.get_ip_nonlocal_bind(namespace=nsname1) finally: _fix_ip_nonlocal_bind_root_value(original_value) except RuntimeError as e: LOG.debug("Exception while checking ip_nonlocal_bind. " "Exception: %s", e) return False finally: ip_lib.delete_network_namespace(nsname2) finally: ip_lib.delete_network_namespace(nsname1) return ns1_value == 0 def gre_conntrack_supported(): cmd = ['modinfo', CONNTRACK_GRE_MODULE] try: return agent_utils.execute(cmd, log_fail_as_error=False) except exceptions.ProcessExecutionError: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/sanity_check.py0000644000175000017500000004226500000000000022427 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.agent import dhcp_agent from neutron.cmd.sanity import checks from neutron.common import config from neutron.conf.agent import securitygroups_rpc from neutron.conf.db import l3_hamode_db from neutron.conf.plugins.ml2 import config as ml2_conf from neutron.conf.plugins.ml2.drivers import linuxbridge as lb_conf from neutron.conf.plugins.ml2.drivers import ovs_conf LOG = logging.getLogger(__name__) def setup_conf(): ovs_conf.register_ovs_agent_opts(cfg.CONF) lb_conf.register_linuxbridge_opts(cfg.CONF) ml2_conf.register_ml2_plugin_opts(cfg.CONF) securitygroups_rpc.register_securitygroups_opts(cfg.CONF) dhcp_agent.register_options(cfg.CONF) l3_hamode_db.register_db_l3_hamode_opts(cfg.CONF) class BoolOptCallback(cfg.BoolOpt): def __init__(self, name, callback, **kwargs): if 'default' not in kwargs: kwargs['default'] = False self.callback = callback super(BoolOptCallback, self).__init__(name, **kwargs) def check_ovs_vxlan(): result = checks.ovs_vxlan_supported() if not result: LOG.error('Check for Open vSwitch VXLAN support failed. ' 'Please ensure that the version of openvswitch ' 'being used has VXLAN support.') return result def check_ovs_geneve(): result = checks.ovs_geneve_supported() if not result: LOG.error('Check for Open vSwitch Geneve support failed. ' 'Please ensure that the version of openvswitch ' 'and kernel being used has Geneve support.') return result def check_iproute2_vxlan(): result = checks.iproute2_vxlan_supported() if not result: LOG.error('Check for iproute2 VXLAN support failed. Please ensure ' 'that the iproute2 has VXLAN support.') return result def check_ovs_patch(): result = checks.patch_supported() if not result: LOG.error('Check for Open vSwitch patch port support failed. ' 'Please ensure that the version of openvswitch ' 'being used has patch port support or disable features ' 'requiring patch ports (gre/vxlan, etc.).') return result def check_read_netns(): required = checks.netns_read_requires_helper() if not required and cfg.CONF.AGENT.use_helper_for_ns_read: LOG.warning("The user that is executing neutron can read the " "namespaces without using the root_helper. Disable " "the use_helper_for_ns_read option to avoid a " "performance impact.") # Don't fail because nothing is actually broken. Just not optimal. result = True elif required and not cfg.CONF.AGENT.use_helper_for_ns_read: LOG.error("The user that is executing neutron does not have " "permissions to read the namespaces. Enable the " "use_helper_for_ns_read configuration option.") result = False else: # everything is configured appropriately result = True return result # NOTE(ihrachyshka): since the minimal version is currently capped due to # missing hwaddr matching in dnsmasq < 2.67, a better version of the check # would actually start dnsmasq server and issue a DHCP request using a IPv6 # DHCP client. def check_dnsmasq_version(): result = checks.dnsmasq_version_supported() if not result: LOG.error('The installed version of dnsmasq is too old. ' 'Please update to at least version %s.', checks.get_minimal_dnsmasq_version_supported()) return result def check_ovs_qos_direct_ports_supported(): result = checks.ovs_qos_direct_port_supported() if not result: LOG.error('The installed version of OVS does not support ' 'QoS rules for direct ports. ' 'Please update to version %s or newer.', checks.get_ovs_version_for_qos_direct_port_support()) return result def check_dnsmasq_local_service_supported(): result = checks.dnsmasq_local_service_supported() if not result: LOG.error('The installed version of dnsmasq is too old. ' 'Please update to a version supporting the ' '--local-service option.') return result def check_keepalived_ipv6_support(): result = checks.keepalived_ipv6_supported() if not result: LOG.error('The installed version of keepalived does not support ' 'IPv6. Please update to at least version 1.2.10 for ' 'IPv6 support.') return result def check_dibbler_version(): result = checks.dibbler_version_supported() if not result: LOG.error('The installed version of dibbler-client is too old. ' 'Please update to at least version %s.', checks.get_minimal_dibbler_version_supported()) return result def check_nova_notify(): result = checks.nova_notify_supported() if not result: LOG.error('Nova notifications are enabled, but novaclient is not ' 'installed. Either disable nova notifications or ' 'install python-novaclient.') return result def check_arp_responder(): result = checks.arp_responder_supported() if not result: LOG.error('Check for Open vSwitch ARP responder support failed. ' 'Please ensure that the version of openvswitch ' 'being used has ARP flows support.') return result def check_arp_header_match(): result = checks.arp_header_match_supported() if not result: LOG.error('Check for Open vSwitch support of ARP header matching ' 'failed. ARP spoofing suppression will not work. A ' 'newer version of OVS is required.') return result def check_icmpv6_header_match(): result = checks.icmpv6_header_match_supported() if not result: LOG.error('Check for Open vSwitch support of ICMPv6 header ' 'matching failed. ICMPv6 Neighbor Advt spoofing (part ' 'of arp spoofing) suppression will not work. A newer ' 'version of OVS is required.') return result def check_vf_management(): result = checks.vf_management_supported() if not result: LOG.error('Check for VF management support failed. ' 'Please ensure that the version of ip link ' 'being used has VF support.') return result def check_vf_extended_management(): result = checks.vf_extended_management_supported() if not result: LOG.error('Check for VF extended management support failed. ' 'Please ensure that the version of ip link ' 'being used has VF extended support: version ' '"iproute2-ss140804", git tag "v3.16.0"') return result def check_ovsdb_native(): result = checks.ovsdb_native_supported() if not result: LOG.error('Check for native OVSDB support failed.') return result def check_ovs_conntrack(): result = checks.ovs_conntrack_supported() if not result: LOG.error('Check for Open vSwitch support of conntrack support ' 'failed. OVS/CT firewall will not work. A newer ' 'version of OVS (2.5+) and linux kernel (4.3+) are ' 'required. See ' 'https://github.com/openvswitch/ovs/blob/master/FAQ.md ' 'for more information.') return result def check_gre_conntrack(): result = checks.gre_conntrack_supported() if not result: LOG.warning('Kernel module %s is not loaded. GRE tunnels from ' 'VM to VM will not work with OVS firewall driver.', checks.CONNTRACK_GRE_MODULE) return result def check_ebtables(): result = checks.ebtables_supported() if not result: LOG.error('Cannot run ebtables. Please ensure that it ' 'is installed.') return result def check_ipset(): result = checks.ipset_supported() if not result: LOG.error('Cannot run ipset. Please ensure that it ' 'is installed.') return result def check_ip6tables(): result = checks.ip6tables_supported() if not result: LOG.error('Cannot run ip6tables. Please ensure that it ' 'is installed.') return result def check_conntrack(): result = checks.conntrack_supported() if not result: LOG.error('Cannot run conntrack. Please ensure that it ' 'is installed.') return result def check_dhcp_release6(): result = checks.dhcp_release6_supported() if not result: LOG.error('No dhcp_release6 tool detected. The installed version ' 'of dnsmasq does not support releasing IPv6 leases. ' 'Please update to at least version %s if you need this ' 'feature. If you do not use IPv6 stateful subnets you ' 'can continue to use this version of dnsmasq, as ' 'other IPv6 address assignment mechanisms besides ' 'stateful DHCPv6 should continue to work without ' 'the dhcp_release6 utility. ' 'Current version of dnsmasq is ok if other checks ' 'pass.', checks.get_dnsmasq_version_with_dhcp_release6()) return result def check_bridge_firewalling_enabled(): result = checks.bridge_firewalling_enabled() if not result: LOG.error('Bridge firewalling is not enabled. It may be the case ' 'that bridge and/or br_netfilter kernel modules are not ' 'loaded. Alternatively, corresponding sysctl settings ' 'may be overridden to disable it by default.') return result def check_ip_nonlocal_bind(): result = checks.ip_nonlocal_bind() if not result: LOG.error('This kernel does not isolate ip_nonlocal_bind kernel ' 'option in namespaces. Please update to kernel ' 'version > 3.19.') return result # Define CLI opts to test specific features, with a callback for the test OPTS = [ BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False, help=_('Check for OVS vxlan support')), BoolOptCallback('ovs_geneve', check_ovs_geneve, default=False, help=_('Check for OVS Geneve support')), BoolOptCallback('iproute2_vxlan', check_iproute2_vxlan, default=False, help=_('Check for iproute2 vxlan support')), BoolOptCallback('ovs_patch', check_ovs_patch, default=False, help=_('Check for patch port support')), BoolOptCallback('nova_notify', check_nova_notify, help=_('Check for nova notification support')), BoolOptCallback('arp_responder', check_arp_responder, help=_('Check for ARP responder support')), BoolOptCallback('arp_header_match', check_arp_header_match, help=_('Check for ARP header match support')), BoolOptCallback('icmpv6_header_match', check_icmpv6_header_match, help=_('Check for ICMPv6 header match support')), BoolOptCallback('vf_management', check_vf_management, help=_('Check for VF management support')), BoolOptCallback('vf_extended_management', check_vf_extended_management, help=_('Check for VF extended management support')), BoolOptCallback('read_netns', check_read_netns, help=_('Check netns permission settings')), BoolOptCallback('dnsmasq_local_service_supported', check_dnsmasq_local_service_supported, help=_('Check for local-service support in dnsmasq')), BoolOptCallback('ovs_qos_direct_port_supported', check_ovs_qos_direct_ports_supported, help=_('Check if the ovs supports QoS for direct ports')), BoolOptCallback('dnsmasq_version', check_dnsmasq_version, help=_('Check minimal dnsmasq version'), deprecated_for_removal=True, deprecated_since='Pike'), BoolOptCallback('ovsdb_native', check_ovsdb_native, help=_('Check ovsdb native interface support')), BoolOptCallback('ovs_conntrack', check_ovs_conntrack, help=_('Check ovs conntrack support')), BoolOptCallback('gre_conntrack', check_gre_conntrack, help=_('Check if conntrack for gre tunnels traffic is ' 'supported')), BoolOptCallback('ebtables_installed', check_ebtables, help=_('Check ebtables installation')), BoolOptCallback('keepalived_ipv6_support', check_keepalived_ipv6_support, help=_('Check keepalived IPv6 support')), BoolOptCallback('dibbler_version', check_dibbler_version, help=_('Check minimal dibbler version'), deprecated_for_removal=True, deprecated_since='Pike'), BoolOptCallback('ipset_installed', check_ipset, help=_('Check ipset installation')), BoolOptCallback('ip6tables_installed', check_ip6tables, help=_('Check ip6tables installation')), BoolOptCallback('conntrack_installed', check_conntrack, help=_('Check conntrack installation')), BoolOptCallback('dhcp_release6', check_dhcp_release6, help=_('Check dhcp_release6 installation')), BoolOptCallback('bridge_firewalling', check_bridge_firewalling_enabled, help=_('Check bridge firewalling'), default=False), BoolOptCallback('ip_nonlocal_bind', check_ip_nonlocal_bind, help=_('Check ip_nonlocal_bind kernel option works with ' 'network namespaces.'), default=False), ] def enable_tests_from_config(): """If a test can depend on configuration, use this function to set the appropriate CLI option to enable that test. It will then be possible to run all necessary tests, just by passing in the appropriate configs. """ cfg.CONF.set_default('vf_management', True) cfg.CONF.set_default('arp_header_match', True) cfg.CONF.set_default('icmpv6_header_match', True) if 'vxlan' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_vxlan', True) if 'geneve' in cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_geneve', True) if ('vxlan' in cfg.CONF.ml2.type_drivers or cfg.CONF.VXLAN.enable_vxlan): cfg.CONF.set_default('iproute2_vxlan', True) if cfg.CONF.AGENT.tunnel_types: cfg.CONF.set_default('ovs_patch', True) if not cfg.CONF.OVS.use_veth_interconnection: cfg.CONF.set_default('ovs_patch', True) if (cfg.CONF.notify_nova_on_port_status_changes or cfg.CONF.notify_nova_on_port_data_changes): cfg.CONF.set_default('nova_notify', True) if cfg.CONF.AGENT.arp_responder: cfg.CONF.set_default('arp_responder', True) if not cfg.CONF.AGENT.use_helper_for_ns_read: cfg.CONF.set_default('read_netns', True) if cfg.CONF.dhcp_driver == 'neutron.agent.linux.dhcp.Dnsmasq': cfg.CONF.set_default('dnsmasq_local_service_supported', True) cfg.CONF.set_default('dnsmasq_version', True) if cfg.CONF.l3_ha: cfg.CONF.set_default('keepalived_ipv6_support', True) cfg.CONF.set_default('ip_nonlocal_bind', True) if cfg.CONF.SECURITYGROUP.enable_ipset: cfg.CONF.set_default('ipset_installed', True) if cfg.CONF.SECURITYGROUP.enable_security_group: cfg.CONF.set_default('ip6tables_installed', True) if ('sriovnicswitch' in cfg.CONF.ml2.mechanism_drivers and 'qos' in cfg.CONF.ml2.extension_drivers): cfg.CONF.set_default('vf_extended_management', True) if cfg.CONF.SECURITYGROUP.firewall_driver in ( 'iptables', 'iptables_hybrid', ('neutron.agent.linux.iptables_firewall.' 'IptablesFirewallDriver'), ('neutron.agent.linux.iptables_firewall.' 'OVSHybridIptablesFirewallDriver'), ): cfg.CONF.set_default('bridge_firewalling', True) def all_tests_passed(): return all(opt.callback() for opt in OPTS if cfg.CONF.get(opt.name)) def main(): setup_conf() cfg.CONF.register_cli_opts(OPTS) cfg.CONF.set_override('use_stderr', True) config.setup_logging() config.init(sys.argv[1:], default_config_files=[]) if cfg.CONF.config_file: enable_tests_from_config() return 0 if all_tests_passed() else 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/status.py0000644000175000017500000000615600000000000021305 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.utils import runtime from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log as logging from oslo_upgradecheck import upgradecheck from neutron.conf import common as neutron_conf_base from neutron.conf import service as neutron_conf_service CHECKS_ENTRYPOINTS = 'neutron.status.upgrade.checks' LOG = logging.getLogger(__name__) def load_checks(): checks = [] ns_plugin = runtime.NamespacedPlugins(CHECKS_ENTRYPOINTS) # TODO(slaweq): stop using private attribute of runtime.NamespacedPlugins # class when it will provide some better way to access extensions for module_name, module in ns_plugin._extensions.items(): try: project_checks_class = module.entry_point.load() project_checks = project_checks_class().get_checks() if project_checks: checks += project_checks except Exception as e: LOG.exception("Checks class %(entrypoint)s failed to load. " "Error: %(err)s", {'entrypoint': module_name, 'err': e}) continue return tuple(checks) def setup_conf(conf=cfg.CONF): """Setup the cfg for the status check utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during checks. """ neutron_conf_base.register_core_common_config_opts(conf) neutron_conf_service.register_service_opts( neutron_conf_service.SERVICE_OPTS, cfg.CONF) db_options.set_defaults(conf) return conf class Checker(upgradecheck.UpgradeCommands): """Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. Check methods here must not rely on the neutron object model since they should be able to run against both N and N-1 releases. Any queries to the database should be done through the sqlalchemy query language directly like the database schema migrations. """ # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = load_checks() def main(): conf = setup_conf() return upgradecheck.main( conf, project='neutron', upgrade_command=Checker()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.219044 neutron-16.0.0.0b2.dev214/neutron/cmd/upgrade_checks/0000755000175000017500000000000000000000000022347 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/upgrade_checks/__init__.py0000644000175000017500000000000000000000000024446 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/upgrade_checks/base.py0000644000175000017500000000215100000000000023632 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class BaseChecks(object): """Base class providing upgrade checks. Stadium projects which want to provide their own upgrade checks to neutron-status CLI tool should inherit from this class. Each check method have to accept neutron.cmd.status.Checker class as an argument because all checkes will be run in context of this class. """ @abc.abstractmethod def get_checks(self): """Get tuple with check methods and check names to run.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/cmd/upgrade_checks/checks.py0000644000175000017500000002330400000000000024163 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib import context from neutron_lib.db import model_query from oslo_config import cfg from oslo_serialization import jsonutils from oslo_upgradecheck import upgradecheck from neutron._i18n import _ from neutron.cmd.upgrade_checks import base from neutron.db.models import agent as agent_model from neutron.db import models_v2 OVN_ALEMBIC_TABLE_NAME = "ovn_alembic_version" LAST_NETWORKING_OVN_EXPAND_HEAD = "e55d09277410" LAST_NETWORKING_OVN_CONTRACT_HEAD = "1d271ead4eb6" def get_agents(agt_type): """Get agent information from Database :param agt_type: agent type, one of constants.AGENT_TYPE_* :return: list of database query results """ filters = {'agent_type': [agt_type]} ctx = context.get_admin_context() query = model_query.get_collection_query(ctx, agent_model.Agent, filters=filters) return query.all() def get_l3_agents(): return get_agents(constants.AGENT_TYPE_L3) def get_nic_switch_agents(): return get_agents(constants.AGENT_TYPE_NIC_SWITCH) def get_networks(): ctx = context.get_admin_context() query = model_query.get_collection_query(ctx, models_v2.Network) return query.all() def table_exists(table_name): ctx = context.get_admin_context() tables = [t[0] for t in ctx.session.execute("SHOW TABLES;")] return table_name in tables def get_ovn_db_revisions(): ctx = context.get_admin_context() return [row[0] for row in ctx.session.execute( "SELECT version_num from %s;" % OVN_ALEMBIC_TABLE_NAME)] # nosec class CoreChecks(base.BaseChecks): def get_checks(self): return [ (_("Gateway external network"), self.gateway_external_network_check), (_("External network bridge"), self.external_network_bridge_check), (_("Worker counts configured"), self.worker_count_check), (_("Networking-ovn database revision"), self.ovn_db_revision_check), (_("NIC Switch agent check kernel"), self.nic_switch_agent_min_kernel_check) ] @staticmethod def worker_count_check(checker): if cfg.CONF.api_workers and cfg.CONF.rpc_workers: return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("Number of workers already " "defined in config")) else: return upgradecheck.Result( upgradecheck.Code.WARNING, _("The default number of workers " "has changed. Please see release notes for the new values, " "but it is strongly encouraged for deployers to manually " "set the values for api_workers and rpc_workers.")) @staticmethod def external_network_bridge_check(checker): if not cfg.CONF.database.connection: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Database connection string is not set. Check of usage of " "'external_network_bridge' config option in L3 agents " "can't be done")) agents_with_external_bridge = [] for agent in get_l3_agents(): config_string = agent.get('configurations') if not config_string: continue config = jsonutils.loads(config_string) if config.get("external_network_bridge"): agents_with_external_bridge.append(agent.get("host")) if agents_with_external_bridge: return upgradecheck.Result( upgradecheck.Code.WARNING, _("L3 agents on hosts %s are still using " "'external_network_bridge' config option to provide " "gateway connectivity. This option is now removed. " "Migration of routers from those L3 agents will be " "required to connect them to external network through " "integration bridge.") % agents_with_external_bridge) else: return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("L3 agents are using integration bridge to connect external " "gateways")) @staticmethod def gateway_external_network_check(checker): if not cfg.CONF.database.connection: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Database connection string is not set. Check of usage of " "'gateway_external_network_id' config option in L3 agents " "can't be done")) agents_with_gateway_external_net = [] for agent in get_l3_agents(): config_string = agent.get('configurations') if not config_string: continue config = jsonutils.loads(config_string) if config.get("gateway_external_network_id"): agents_with_gateway_external_net.append(agent.get("host")) if agents_with_gateway_external_net: agents_list = ", ".join(agents_with_gateway_external_net) return upgradecheck.Result( upgradecheck.Code.WARNING, _("L3 agents on hosts %s are still using " "'gateway_external_network_id' config option to configure " "external network used as gateway for routers. " "This option is now removed and routers on those hosts can " "use multiple external networks as gateways.") % agents_list) else: return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("L3 agents can use multiple networks as external gateways.")) @staticmethod def network_mtu_check(checker): if not cfg.CONF.database.connection: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Database connection string is not set. Check of 'mtu' in " "networks can't be done")) networks_with_empty_mtu_attr = [] for network in get_networks(): mtu = network.get('mtu', None) if not mtu: networks_with_empty_mtu_attr.append(network.get("id")) if networks_with_empty_mtu_attr: networks_list = ", ".join(networks_with_empty_mtu_attr) return upgradecheck.Result( upgradecheck.Code.WARNING, _("The 'mtu' attribute of networks %s are not set " "This attribute can't be null now.") % networks_list) else: return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("The 'mtu' attribute of all networks are set.")) @staticmethod def ovn_db_revision_check(checker): if not cfg.CONF.database.connection: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Database connection string is not set. Check of " "networking-ovn database revision can't be done.")) if not table_exists(OVN_ALEMBIC_TABLE_NAME): return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("Networking-ovn alembic version table don't exists in " "the database yet.")) revisions = get_ovn_db_revisions() if (LAST_NETWORKING_OVN_EXPAND_HEAD not in revisions or LAST_NETWORKING_OVN_CONTRACT_HEAD not in revisions): return upgradecheck.Result( upgradecheck.Code.FAILURE, _("Networking-ovn database tables are not up to date. " "Please firts update networking-ovn to the latest version " "from Train release.")) return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("Networking-ovn database tables are up to date.")) @staticmethod def nic_switch_agent_min_kernel_check(checker): # TODO(adrianc): This was introduced in U release, consider removing # in 1-2 cycles. # Background: Issue with old kernel is appernet in CentOS 7 and older. # U release is the first release that moves from CentOS-7 to CentOS-8, # this was added as a "heads-up" for operators to make sure min kernel # requirement is fullfiled. if not cfg.CONF.database.connection: return upgradecheck.Result( upgradecheck.Code.WARNING, _("Database connection string is not set. " "Check for NIC Switch agent can't be done.")) agents = get_nic_switch_agents() if len(agents): hosts = ','.join([agent.get("host") for agent in agents]) return upgradecheck.Result( upgradecheck.Code.WARNING, _("NIC Switch agents detected on hosts %s, please ensure the " "hosts run with a kernel version 3.13 or newer.") % hosts) else: return upgradecheck.Result( upgradecheck.Code.SUCCESS, _("No NIC Switch agents detected.")) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.219044 neutron-16.0.0.0b2.dev214/neutron/common/0000755000175000017500000000000000000000000020125 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/__init__.py0000644000175000017500000000000000000000000022224 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/_constants.py0000644000175000017500000000466000000000000022660 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants # NOTE(boden): This module is common constants for neutron only. # Any constants used outside of neutron should go into neutron-lib. # Security group protocols that support ports SG_PORT_PROTO_NUMS = [ constants.PROTO_NUM_DCCP, constants.PROTO_NUM_SCTP, constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP, constants.PROTO_NUM_UDPLITE ] SG_PORT_PROTO_NAMES = [ constants.PROTO_NAME_DCCP, constants.PROTO_NAME_SCTP, constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP, constants.PROTO_NAME_UDPLITE ] # iptables protocols that only support --dport and --sport using -m multiport IPTABLES_MULTIPORT_ONLY_PROTOCOLS = [ constants.PROTO_NAME_UDPLITE ] # Legacy IPv6 ICMP protocol list IPV6_ICMP_LEGACY_PROTO_LIST = [constants.PROTO_NAME_ICMP, constants.PROTO_NAME_IPV6_ICMP_LEGACY] # Number of resources for neutron agent side functions to deal # with large sets. # Setting this value does not count on special conditions, it is just a human # countable or scalable number. [1] gives us the method to test the scale # issue. And we have tested the value of 1000, 500, 200, 100. But for 100, # ovs-agent will have a lower timeout probability. And according to the # testing result, step size 100 can indeed cost about 10% much more time # than 500/1000. But such extra time looks inevitably needed to be sacrificed # for the restart success rate. # [1] http://paste.openstack.org/show/745685/ AGENT_RES_PROCESSING_STEP = 100 # Number of resources for neutron to divide the large RPC # call data sets. RPC_RES_PROCESSING_STEP = 20 # IPtables version to support --random-fully option. # Do not move this constant to neutron-lib, since it is temporary IPTABLES_RANDOM_FULLY_VERSION = '1.6.2' # Segmentation ID pool; DB select limit to improve the performace. IDPOOL_SELECT_SIZE = 100 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/_deprecate.py0000644000175000017500000001470600000000000022602 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Provide a deprecation method for globals. NOTE: This module may be a candidate for adoption by debtcollector. """ import inspect import sys import debtcollector from neutron._i18n import _ class _MovedGlobals(object): """Override a module to deprecate moved globals. This class is used when globals (attributes of a module) need to be marked as deprecated. It can be used in either or both of two ways: 1. By specifying a default new module, all accesses to a global in the source module will emit a warning if the global does not exist in the source module and it does exist in the new module. This way is intended to be used when many globals are moved from one module to another. 2. By explicitly deprecating individual globals with the _moved_global() function, see below. This class must be called from the last line in a module, as follows: ``_deprecate._MovedGlobals(default_new_module)`` or ``_deprecate._MovedGlobals()`` Args: :param default_new_module: The default new location for moved globals :type default_new_module: module or None Attributes: :ivar _mg__my_globals: The current vars() of the source module :type _mg__my_globals: dict :ivar _mg__default_new_mod: The default location for moved globals :type _mg__default_new_mod: module or None :ivar _mg__old_ref: The original reference to the source module :type _mg__old_ref: module :cvar _mg__moves: Moves (and renames) not involving default_new_module :type _mg__moves: dict NOTE: An instance of _MovedGlobals overrides the module it is called from, so instance and class variables appear in the module namespace. To prevent collisions with existing globals, the instance and class variable names here are prefixed with ``_mg__``. """ # Here we store individual moves and renames. This is a dict where # key = (old_module, old_name) # value = (new_module, new_name) # If new_module is the same as old_module then it is a rename in place. _mg__moves = {} def __init__(self, default_new_module=None): # To avoid infinite recursion at inspect.getsourcelines() below we # must initialize self._mg__my_globals early here. self._mg__my_globals = {} self._mg__default_new_mod = default_new_module caller_frame = inspect.stack()[1][0] caller_line = inspect.getframeinfo(caller_frame).lineno source_module = inspect.getmodule(caller_frame) src_mod_last_line = len(inspect.getsourcelines(source_module)[0]) if caller_line < src_mod_last_line: raise SystemExit(_("_MovedGlobals() not called from last " "line in %s") % source_module.__file__) self._mg__my_globals = vars(source_module) # When we return from here we override the sys.modules[] entry # for the source module with this instance. We must keep a # reference to the original module to prevent it from being # garbage collected. self._mg__old_ref = source_module sys.modules[source_module.__name__] = self def __getattr__(self, name): value = self._mg__my_globals.get(name) if not name.startswith("__") and not inspect.ismodule(value): old_module = self._mg__old_ref specified_move = self._mg__moves.get((old_module, name)) if specified_move: new_module, new_name = specified_move else: new_module, new_name = self._mg__default_new_mod, name if new_module and new_name in vars(new_module): old_location = '%s.%s' % (old_module.__name__, name) new_location = '%s.%s' % (new_module.__name__, new_name) changed = 'renamed' if old_module == new_module else 'moved' debtcollector.deprecate( old_location, message='%s to %s' % (changed, new_location), stacklevel=4) return vars(new_module)[new_name] try: return self._mg__my_globals[name] except KeyError: raise AttributeError( _("'module' object has no attribute '%s'") % name) def __setattr__(self, name, val): if name.startswith('_mg__'): return super(_MovedGlobals, self).__setattr__(name, val) self._mg__my_globals[name] = val def __delattr__(self, name): if name not in self._mg__my_globals: raise AttributeError( _("'module' object has no attribute '%s'") % name) self._mg__my_globals.pop(name) def _moved_global(old_name, new_module=None, new_name=None): """Deprecate a single attribute in a module. This function is used to move an attribute to a module that differs from _mg__default_new_mod in _MovedGlobals. It also handles renames. NOTE: This function has no effect if _MovedGlobals() is not called at the end of the module containing the attribute. [TODO(HenryG): Figure out a way of asserting on this.] :param old_name: The name of the attribute that was moved/renamed. :type old_name: str :param new_module: The new module where the attribute is now. :type new_module: module :param new_name: The new name of the attribute. :type new_name: str """ if not (new_module or new_name): raise AssertionError(_("'new_module' and 'new_name' " "must not be both None")) if isinstance(new_module, _MovedGlobals): # The new module has been shimmed, get the original new_module = new_module._mg__old_ref old_module = inspect.getmodule(inspect.stack()[1][0]) # caller's module new_module = new_module or old_module new_name = new_name or old_name _MovedGlobals._mg__moves[(old_module, old_name)] = (new_module, new_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/cache_utils.py0000644000175000017500000001053200000000000022763 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from neutron_lib.utils import helpers from oslo_cache import core as cache from oslo_config import cfg from oslo_log import log as logging from oslo_utils import reflection from neutron._i18n import _ LOG = logging.getLogger(__name__) def register_oslo_configs(conf): cache.configure(conf) def get_cache(conf): """Used to get cache client""" if conf.cache.enabled: return _get_cache_region(conf) else: return False def _get_cache_region(conf): region = cache.create_region() cache.configure_cache_region(conf, region) return region def _get_memory_cache_region(expiration_time=5): conf = cfg.ConfigOpts() register_oslo_configs(conf) cache_conf_dict = { 'enabled': True, 'backend': 'oslo_cache.dict', 'expiration_time': expiration_time, } for k, v in cache_conf_dict.items(): conf.set_override(k, v, group='cache') return _get_cache_region(conf) class cache_method_results(object): """This decorator is intended for object methods only.""" def __init__(self, func): self.func = func functools.update_wrapper(self, func) self._first_call = True self._not_cached = cache.NO_VALUE def _get_from_cache(self, target_self, *args, skip_cache=False, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) func_name = "%(module)s.%(class)s.%(func_name)s" % { 'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__, } key = (func_name,) + args if kwargs: key += helpers.dict2tuple(kwargs) # oslo.cache expects a string or a buffer key = str(key) if not skip_cache: try: item = target_self._cache.get(key) except TypeError: LOG.debug("Method %(func_name)s cannot be cached due to " "unhashable parameters: args: %(args)s, kwargs: " "%(kwargs)s", {'func_name': func_name, 'args': args, 'kwargs': kwargs}) return self.func(target_self, *args, **kwargs) else: LOG.debug('Skipping getting result from cache for %s.', func_name) item = self._not_cached if item is self._not_cached: item = self.func(target_self, *args, **kwargs) target_self._cache.set(key, item) return item def __call__(self, target_self, *args, **kwargs): target_self_cls_name = reflection.get_class_name(target_self, fully_qualified=False) if not hasattr(target_self, '_cache'): raise NotImplementedError( _("Instance of class %(module)s.%(class)s must contain _cache " "attribute") % { 'module': target_self.__module__, 'class': target_self_cls_name}) if not target_self._cache: if self._first_call: LOG.debug("Instance of class %(module)s.%(class)s doesn't " "contain attribute _cache therefore results " "cannot be cached for %(func_name)s.", {'module': target_self.__module__, 'class': target_self_cls_name, 'func_name': self.func.__name__}) self._first_call = False return self.func(target_self, *args, **kwargs) return self._get_from_cache(target_self, *args, **kwargs) def __get__(self, obj, objtype): return functools.partial(self.__call__, obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/config.py0000644000175000017500000001226600000000000021753 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Neutron """ import sys from keystoneauth1 import loading as ks_loading from neutron_lib.api import validators from neutron_lib import rpc as n_rpc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_middleware import cors from oslo_service import wsgi from neutron._i18n import _ from neutron.conf import common as common_config from neutron import policy from neutron import version LOG = logging.getLogger(__name__) # Jam here any extra log level default you care about. This helps keep # Neutron logs lean. EXTRA_LOG_LEVEL_DEFAULTS = [ 'OFPHandler=INFO', 'OfctlService=INFO', 'os_ken.base.app_manager=INFO', 'os_ken.controller.controller=INFO', ] # Register the configuration options common_config.register_core_common_config_opts() # Ensure that the control exchange is set correctly oslo_messaging.set_transport_defaults(control_exchange='neutron') NOVA_CONF_SECTION = 'nova' ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION) # Register the nova configuration options common_config.register_nova_opts() ks_loading.register_auth_conf_options(cfg.CONF, common_config.PLACEMENT_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, common_config.PLACEMENT_CONF_SECTION) # Register the placement configuration options common_config.register_placement_opts() logging.register_options(cfg.CONF) # Register the ironic configuration options ks_loading.register_auth_conf_options(cfg.CONF, common_config.IRONIC_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, common_config.IRONIC_CONF_SECTION) ks_loading.register_adapter_conf_options(cfg.CONF, common_config.IRONIC_CONF_SECTION) common_config.register_ironic_opts() def init(args, default_config_files=None, **kwargs): cfg.CONF(args=args, project='neutron', version='%%(prog)s %s' % version.version_info.release_string(), default_config_files=default_config_files, **kwargs) n_rpc.init(cfg.CONF) # Validate that the base_mac is of the correct format msg = validators.validate_regex(cfg.CONF.base_mac, validators.MAC_PATTERN) if msg: msg = _("Base MAC: %s") % msg raise Exception(msg) def setup_logging(): """Sets up the logging options for a log with supplied name.""" product_name = "neutron" # We use the oslo.log default log levels and add only the extra levels # that Neutron needs. logging.set_defaults(default_log_levels=logging.get_default_log_levels() + EXTRA_LOG_LEVEL_DEFAULTS) logging.setup(cfg.CONF, product_name) LOG.info("Logging enabled!") LOG.info("%(prog)s version %(version)s", {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv)) def reset_service(): # Reset worker in case SIGHUP is called. # Note that this is called only in case a service is running in # daemon mode. setup_logging() set_config_defaults() policy.refresh() def load_paste_app(app_name): """Builds and returns a WSGI app from a paste config file. :param app_name: Name of the application to load """ loader = wsgi.Loader(cfg.CONF) # Log the values of registered opts if cfg.CONF.debug: cfg.CONF.log_opt_values(LOG, logging.DEBUG) app = loader.load_app(app_name) return app def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'OpenStack-Volume-microversion'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/coordination.py0000644000175000017500000000570100000000000023172 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Coordination and locking utilities.""" import inspect import decorator from oslo_concurrency import lockutils from oslo_log import log from oslo_utils import timeutils import six LOG = log.getLogger(__name__) def synchronized(lock_name): """Synchronization decorator. :param str lock_name: Lock name. Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one process will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. Lock name can be formatted using Python format string syntax:: @synchronized('{f_name}-{resource.id}-{snap[name]}') def foo(self, resource, snap): ... Available field names are: decorated function parameters and `f_name` as a decorated function name. """ @decorator.decorator def _synchronized(f, *a, **k): if six.PY2: # pylint: disable=deprecated-method call_args = inspect.getcallargs(f, *a, **k) else: sig = inspect.signature(f).bind(*a, **k) sig.apply_defaults() call_args = sig.arguments call_args['f_name'] = f.__name__ lock_format_name = lock_name.format(**call_args) t1 = timeutils.now() t2 = None try: with lockutils.lock(lock_format_name): t2 = timeutils.now() LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' 'waited %(wait_secs)0.3fs', {'name': lock_format_name, 'function': f.__name__, 'wait_secs': (t2 - t1)}) return f(*a, **k) finally: t3 = timeutils.now() if t2 is None: held_secs = "N/A" else: held_secs = "%0.3fs" % (t3 - t2) LOG.debug('Lock "%(name)s" released by "%(function)s" :: held ' '%(held_secs)s', {'name': lock_format_name, 'function': f.__name__, 'held_secs': held_secs}) return _synchronized ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/eventlet_utils.py0000644000175000017500000000265100000000000023551 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import eventlet from oslo_utils import importutils def monkey_patch(): # NOTE(slaweq): to workaround issue with import cycles in # eventlet < 0.22.0; # This issue is fixed in eventlet with patch # https://github.com/eventlet/eventlet/commit/b756447bab51046dfc6f1e0e299cc997ab343701 # For details please check https://bugs.launchpad.net/neutron/+bug/1745013 hub = eventlet.hubs.get_hub() hub.is_available = lambda: True if os.name != 'nt': eventlet.monkey_patch() p_c_e = importutils.import_module('pyroute2.config.asyncio') p_c_e.asyncio_config() else: # eventlet monkey patching the os module causes subprocess.Popen to # fail on Windows when using pipes due to missing non-blocking IO # support. eventlet.monkey_patch(os=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ipv6_utils.py0000644000175000017500000000361100000000000022604 0ustar00coreycorey00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ IPv6-related utilities and helper functions. """ import netaddr from neutron_lib import constants as const from oslo_log import log from oslo_utils import netutils LOG = log.getLogger(__name__) def is_auto_address_subnet(subnet): """Check if subnet is an auto address subnet.""" modes = [const.IPV6_SLAAC, const.DHCPV6_STATELESS] return (subnet['ipv6_address_mode'] in modes or subnet['ipv6_ra_mode'] in modes) def is_eui64_address(ip_address): """Check if ip address is EUI64.""" ip = netaddr.IPAddress(ip_address) # '0xfffe' addition is used to build EUI-64 from MAC (RFC4291) # Look for it in the middle of the EUI-64 part of address return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000) def is_ipv6_pd_enabled(subnet): """Returns True if the subnetpool_id of the given subnet is equal to constants.IPV6_PD_POOL_ID """ return subnet.get('subnetpool_id') == const.IPV6_PD_POOL_ID def valid_ipv6_url(host, port): """Given a host and a port returns a valid URL RFC2732 https://tools.ietf.org/html/rfc2732 square brackets always required in ipv6 URI. """ if netutils.is_valid_ipv6(host): uri = '[%s]:%s' % (host, port) else: uri = '%s:%s' % (host, port) return uri ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.223044 neutron-16.0.0.0b2.dev214/neutron/common/ovn/0000755000175000017500000000000000000000000020727 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/__init__.py0000644000175000017500000000000000000000000023026 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/acl.py0000644000175000017500000004272500000000000022052 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import netaddr from neutron_lib import constants as const from neutron_lib import exceptions as n_exceptions from oslo_config import cfg from neutron._i18n import _ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils # Convert the protocol number from integer to strings because that's # how Neutron will pass it to us PROTOCOL_NAME_TO_NUM_MAP = {k: str(v) for k, v in const.IP_PROTOCOL_MAP.items()} # Create a map from protocol numbers to names PROTOCOL_NUM_TO_NAME_MAP = {v: k for k, v in PROTOCOL_NAME_TO_NUM_MAP.items()} # Group of transport protocols supported TRANSPORT_PROTOCOLS = (const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, const.PROTO_NAME_SCTP, PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_TCP], PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_UDP], PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_SCTP]) # Group of versions of the ICMP protocol supported ICMP_PROTOCOLS = (const.PROTO_NAME_ICMP, const.PROTO_NAME_IPV6_ICMP, const.PROTO_NAME_IPV6_ICMP_LEGACY, PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_ICMP], PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_IPV6_ICMP], PROTOCOL_NAME_TO_NUM_MAP[const.PROTO_NAME_IPV6_ICMP_LEGACY]) class ProtocolNotSupported(n_exceptions.NeutronException): message = _('The protocol "%(protocol)s" is not supported. Valid ' 'protocols are: %(valid_protocols)s; or protocol ' 'numbers ranging from 0 to 255.') def is_sg_enabled(): return cfg.CONF.SECURITYGROUP.enable_security_group def acl_direction(r, port=None, port_group=None): if r['direction'] == const.INGRESS_DIRECTION: portdir = 'outport' else: portdir = 'inport' if port: return '%s == "%s"' % (portdir, port['id']) return '%s == @%s' % (portdir, port_group) def acl_ethertype(r): match = '' ip_version = None icmp = None if r['ethertype'] == const.IPv4: match = ' && ip4' ip_version = 'ip4' icmp = 'icmp4' elif r['ethertype'] == const.IPv6: match = ' && ip6' ip_version = 'ip6' icmp = 'icmp6' return match, ip_version, icmp def acl_remote_ip_prefix(r, ip_version): if not r['remote_ip_prefix']: return '' src_or_dst = 'src' if r['direction'] == const.INGRESS_DIRECTION else 'dst' return ' && %s.%s == %s' % (ip_version, src_or_dst, r['remote_ip_prefix']) def _get_protocol_number(protocol): if protocol is None: return try: protocol = int(protocol) if 0 <= protocol <= 255: return str(protocol) except (ValueError, TypeError): protocol = PROTOCOL_NAME_TO_NUM_MAP.get(protocol) if protocol is not None: return protocol raise ProtocolNotSupported( protocol=protocol, valid_protocols=', '.join(PROTOCOL_NAME_TO_NUM_MAP)) def acl_protocol_and_ports(r, icmp): match = '' protocol = _get_protocol_number(r.get('protocol')) if protocol is None: return match min_port = r.get('port_range_min') max_port = r.get('port_range_max') if protocol in TRANSPORT_PROTOCOLS: protocol = PROTOCOL_NUM_TO_NAME_MAP[protocol] match += ' && %s' % protocol if min_port is not None and min_port == max_port: match += ' && %s.dst == %d' % (protocol, min_port) else: if min_port is not None: match += ' && %s.dst >= %d' % (protocol, min_port) if max_port is not None: match += ' && %s.dst <= %d' % (protocol, max_port) elif protocol in ICMP_PROTOCOLS: protocol = icmp match += ' && %s' % protocol if min_port is not None: match += ' && %s.type == %d' % (protocol, min_port) if max_port is not None: match += ' && %s.code == %d' % (protocol, max_port) else: match += ' && ip.proto == %s' % protocol return match def add_acls_for_drop_port_group(pg_name): acl_list = [] for direction, p in (('from-lport', 'inport'), ('to-lport', 'outport')): acl = {"port_group": pg_name, "priority": ovn_const.ACL_PRIORITY_DROP, "action": ovn_const.ACL_ACTION_DROP, "log": False, "name": [], "severity": [], "direction": direction, "match": '%s == @%s && ip' % (p, pg_name)} acl_list.append(acl) return acl_list def drop_all_ip_traffic_for_port(port): acl_list = [] for direction, p in (('from-lport', 'inport'), ('to-lport', 'outport')): lswitch = utils.ovn_name(port['network_id']) lport = port['id'] acl = {"lswitch": lswitch, "lport": lport, "priority": ovn_const.ACL_PRIORITY_DROP, "action": ovn_const.ACL_ACTION_DROP, "log": False, "name": [], "severity": [], "direction": direction, "match": '%s == "%s" && ip' % (p, port['id']), "external_ids": {'neutron:lport': port['id']}} acl_list.append(acl) return acl_list def add_sg_rule_acl_for_port(port, r, match): dir_map = {const.INGRESS_DIRECTION: 'to-lport', const.EGRESS_DIRECTION: 'from-lport'} acl = {"lswitch": utils.ovn_name(port['network_id']), "lport": port['id'], "priority": ovn_const.ACL_PRIORITY_ALLOW, "action": ovn_const.ACL_ACTION_ALLOW_RELATED, "log": False, "name": [], "severity": [], "direction": dir_map[r['direction']], "match": match, "external_ids": {'neutron:lport': port['id'], ovn_const.OVN_SG_RULE_EXT_ID_KEY: r['id']}} return acl def add_sg_rule_acl_for_port_group(port_group, r, match): dir_map = {const.INGRESS_DIRECTION: 'to-lport', const.EGRESS_DIRECTION: 'from-lport'} acl = {"port_group": port_group, "priority": ovn_const.ACL_PRIORITY_ALLOW, "action": ovn_const.ACL_ACTION_ALLOW_RELATED, "log": False, "name": [], "severity": [], "direction": dir_map[r['direction']], "match": match, ovn_const.OVN_SG_RULE_EXT_ID_KEY: r['id']} return acl def add_acl_dhcp(port, subnet, ovn_dhcp=True): # Allow DHCP requests for OVN native DHCP service, while responses are # allowed in ovn-northd. # Allow both DHCP requests and responses to pass for other DHCP services. # We do this even if DHCP isn't enabled for the subnet acl_list = [] if not ovn_dhcp: acl = {"lswitch": utils.ovn_name(port['network_id']), "lport": port['id'], "priority": ovn_const.ACL_PRIORITY_ALLOW, "action": ovn_const.ACL_ACTION_ALLOW, "log": False, "name": [], "severity": [], "direction": 'to-lport', "match": ('outport == "%s" && ip4 && ip4.src == %s && ' 'udp && udp.src == 67 && udp.dst == 68' ) % (port['id'], subnet['cidr']), "external_ids": {'neutron:lport': port['id']}} acl_list.append(acl) acl = {"lswitch": utils.ovn_name(port['network_id']), "lport": port['id'], "priority": ovn_const.ACL_PRIORITY_ALLOW, "action": ovn_const.ACL_ACTION_ALLOW, "log": False, "name": [], "severity": [], "direction": 'from-lport', "match": ('inport == "%s" && ip4 && ' 'ip4.dst == {255.255.255.255, %s} && ' 'udp && udp.src == 68 && udp.dst == 67' ) % (port['id'], subnet['cidr']), "external_ids": {'neutron:lport': port['id']}} acl_list.append(acl) return acl_list def _get_subnet_from_cache(plugin, admin_context, subnet_cache, subnet_id): if subnet_id in subnet_cache: return subnet_cache[subnet_id] else: subnet = plugin.get_subnet(admin_context, subnet_id) if subnet: subnet_cache[subnet_id] = subnet return subnet def _get_sg_ports_from_cache(plugin, admin_context, sg_ports_cache, sg_id): if sg_id in sg_ports_cache: return sg_ports_cache[sg_id] else: filters = {'security_group_id': [sg_id]} sg_ports = plugin._get_port_security_group_bindings( admin_context, filters) if sg_ports: sg_ports_cache[sg_id] = sg_ports return sg_ports def _get_sg_from_cache(plugin, admin_context, sg_cache, sg_id): if sg_id in sg_cache: return sg_cache[sg_id] else: sg = plugin.get_security_group(admin_context, sg_id) if sg: sg_cache[sg_id] = sg return sg def acl_remote_group_id(r, ip_version, ovn=None): if not r['remote_group_id']: return '' src_or_dst = 'src' if r['direction'] == const.INGRESS_DIRECTION else 'dst' if (ovn and ovn.is_port_groups_supported()): addrset_name = utils.ovn_pg_addrset_name(r['remote_group_id'], ip_version) else: addrset_name = utils.ovn_addrset_name(r['remote_group_id'], ip_version) return ' && %s.%s == $%s' % (ip_version, src_or_dst, addrset_name) def _add_sg_rule_acl_for_port(port, r): # Update the match based on which direction this rule is for (ingress # or egress). match = acl_direction(r, port) # Update the match for IPv4 vs IPv6. ip_match, ip_version, icmp = acl_ethertype(r) match += ip_match # Update the match if an IPv4 or IPv6 prefix was specified. match += acl_remote_ip_prefix(r, ip_version) # Update the match if remote group id was specified. match += acl_remote_group_id(r, ip_version) # Update the match for the protocol (tcp, udp, icmp) and port/type # range if specified. match += acl_protocol_and_ports(r, icmp) # Finally, create the ACL entry for the direction specified. return add_sg_rule_acl_for_port(port, r, match) def _add_sg_rule_acl_for_port_group(port_group, r, ovn): # Update the match based on which direction this rule is for (ingress # or egress). match = acl_direction(r, port_group=port_group) # Update the match for IPv4 vs IPv6. ip_match, ip_version, icmp = acl_ethertype(r) match += ip_match # Update the match if an IPv4 or IPv6 prefix was specified. match += acl_remote_ip_prefix(r, ip_version) # Update the match if remote group id was specified. match += acl_remote_group_id(r, ip_version, ovn) # Update the match for the protocol (tcp, udp, icmp) and port/type # range if specified. match += acl_protocol_and_ports(r, icmp) # Finally, create the ACL entry for the direction specified. return add_sg_rule_acl_for_port_group(port_group, r, match) def _acl_columns_name_severity_supported(nb_idl): columns = list(nb_idl._tables['ACL'].columns) return ('name' in columns) and ('severity' in columns) def add_acls_for_sg_port_group(ovn, security_group, txn): for r in security_group['security_group_rules']: acl = _add_sg_rule_acl_for_port_group( utils.ovn_port_group_name(security_group['id']), r, ovn) txn.add(ovn.pg_acl_add(**acl)) def update_acls_for_security_group(plugin, admin_context, ovn, security_group_id, security_group_rule, sg_ports_cache=None, is_add_acl=True): # Skip ACLs if security groups aren't enabled if not is_sg_enabled(): return # Check if ACL log name and severity supported or not keep_name_severity = _acl_columns_name_severity_supported(ovn) # If we're using a Port Group for this SG, just update it. # Otherwise, keep the old behavior. if (ovn.is_port_groups_supported() and not ovn.get_address_set(security_group_id)): acl = _add_sg_rule_acl_for_port_group( utils.ovn_port_group_name(security_group_id), security_group_rule, ovn) # Remove ACL log name and severity if not supported if is_add_acl: if not keep_name_severity: acl.pop('name') acl.pop('severity') ovn.pg_acl_add(**acl).execute(check_error=True) else: ovn.pg_acl_del(acl['port_group'], acl['direction'], acl['priority'], acl['match']).execute( check_error=True) return # Get the security group ports. sg_ports_cache = sg_ports_cache or {} sg_ports = _get_sg_ports_from_cache(plugin, admin_context, sg_ports_cache, security_group_id) # ACLs associated with a security group may span logical switches sg_port_ids = [binding['port_id'] for binding in sg_ports] sg_port_ids = list(set(sg_port_ids)) port_list = plugin.get_ports(admin_context, filters={'id': sg_port_ids}) if not port_list: return acl_new_values_dict = {} update_port_list = [] # NOTE(lizk): We can directly locate the affected acl records, # so no need to compare new acl values with existing acl objects. for port in port_list: # Skip trusted port if utils.is_lsp_trusted(port): continue update_port_list.append(port) acl = _add_sg_rule_acl_for_port(port, security_group_rule) # Remove lport and lswitch since we don't need them acl.pop('lport') acl.pop('lswitch') # Remove ACL log name and severity if not supported, if not keep_name_severity: acl.pop('name') acl.pop('severity') acl_new_values_dict[port['id']] = acl if not update_port_list: return lswitch_names = {p['network_id'] for p in update_port_list} ovn.update_acls(list(lswitch_names), iter(update_port_list), acl_new_values_dict, need_compare=False, is_add_acl=is_add_acl).execute(check_error=True) def add_acls(plugin, admin_context, port, sg_cache, subnet_cache, ovn): acl_list = [] # Skip ACLs if security groups aren't enabled if not is_sg_enabled(): return acl_list sec_groups = utils.get_lsp_security_groups(port) if not sec_groups: # If it is a trusted port or port security is disabled, allow all # traffic. So don't add any ACLs. if utils.is_lsp_trusted(port) or not utils.is_port_security_enabled( port): return acl_list # if port security is enabled, drop all traffic. return drop_all_ip_traffic_for_port(port) # Drop all IP traffic to and from the logical port by default. acl_list += drop_all_ip_traffic_for_port(port) # Add DHCP ACLs. port_subnet_ids = set() for ip in port['fixed_ips']: if netaddr.IPNetwork(ip['ip_address']).version != 4: continue subnet = _get_subnet_from_cache(plugin, admin_context, subnet_cache, ip['subnet_id']) # Ignore duplicate DHCP ACLs for the subnet. if subnet['id'] not in port_subnet_ids: acl_list += add_acl_dhcp(port, subnet, True) port_subnet_ids.add(subnet['id']) # We create an ACL entry for each rule on each security group applied # to this port. for sg_id in sec_groups: sg = _get_sg_from_cache(plugin, admin_context, sg_cache, sg_id) for r in sg['security_group_rules']: acl = _add_sg_rule_acl_for_port(port, r) acl_list.append(acl) # Remove ACL log name and severity if not supported, if not _acl_columns_name_severity_supported(ovn): for acl in acl_list: acl.pop('name') acl.pop('severity') return acl_list def acl_port_ips(port): # Skip ACLs if security groups aren't enabled if not is_sg_enabled(): return {'ip4': [], 'ip6': []} ip_list = [x['ip_address'] for x in port.get('fixed_ips', [])] ip_list.extend(utils.get_allowed_address_pairs_ip_addresses(port)) return utils.sort_ips_by_version(ip_list) def filter_acl_dict(acl, extra_fields=None): if extra_fields is None: extra_fields = [] extra_fields.extend(ovn_const.ACL_EXPECTED_COLUMNS_NBDB) return {k: acl[k] for k in extra_fields} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/constants.py0000644000175000017500000001757100000000000023330 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib.api.definitions import portbindings from neutron_lib import constants as const import six # TODO(lucasagomes): Remove OVN_SG_NAME_EXT_ID_KEY in the Rocky release OVN_SG_NAME_EXT_ID_KEY = 'neutron:security_group_name' OVN_SG_EXT_ID_KEY = 'neutron:security_group_id' OVN_SG_RULE_EXT_ID_KEY = 'neutron:security_group_rule_id' OVN_ML2_MECH_DRIVER_NAME = 'ovn' OVN_NETWORK_NAME_EXT_ID_KEY = 'neutron:network_name' OVN_NETWORK_MTU_EXT_ID_KEY = 'neutron:mtu' OVN_PORT_NAME_EXT_ID_KEY = 'neutron:port_name' OVN_PORT_FIP_EXT_ID_KEY = 'neutron:port_fip' OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name' OVN_ROUTER_IS_EXT_GW = 'neutron:is_ext_gw' OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id' OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids' OVN_PHYSNET_EXT_ID_KEY = 'neutron:provnet-physical-network' OVN_NETTYPE_EXT_ID_KEY = 'neutron:provnet-network-type' OVN_SEGID_EXT_ID_KEY = 'neutron:provnet-segmentation-id' OVN_PROJID_EXT_ID_KEY = 'neutron:project_id' OVN_DEVID_EXT_ID_KEY = 'neutron:device_id' OVN_CIDRS_EXT_ID_KEY = 'neutron:cidrs' OVN_FIP_EXT_ID_KEY = 'neutron:fip_id' OVN_FIP_PORT_EXT_ID_KEY = 'neutron:fip_port_id' OVN_FIP_EXT_MAC_KEY = 'neutron:fip_external_mac' OVN_REV_NUM_EXT_ID_KEY = 'neutron:revision_number' OVN_QOS_POLICY_EXT_ID_KEY = 'neutron:qos_policy_id' OVN_SG_IDS_EXT_ID_KEY = 'neutron:security_group_ids' OVN_DEVICE_OWNER_EXT_ID_KEY = 'neutron:device_owner' OVN_LIVENESS_CHECK_EXT_ID_KEY = 'neutron:liveness_check_at' METADATA_LIVENESS_CHECK_EXT_ID_KEY = 'neutron:metadata_liveness_check_at' OVN_PORT_BINDING_PROFILE = portbindings.PROFILE OVN_PORT_BINDING_PROFILE_PARAMS = [{'parent_name': six.string_types, 'tag': six.integer_types}, {'vtep-physical-switch': six.string_types, 'vtep-logical-switch': six.string_types}] MIGRATING_ATTR = 'migrating_to' OVN_ROUTER_PORT_OPTION_KEYS = ['router-port', 'nat-addresses'] OVN_GATEWAY_CHASSIS_KEY = 'redirect-chassis' OVN_CHASSIS_REDIRECT = 'chassisredirect' OVN_GATEWAY_NAT_ADDRESSES_KEY = 'nat-addresses' OVN_DROP_PORT_GROUP_NAME = 'neutron_pg_drop' OVN_ROUTER_PORT_GW_MTU_OPTION = 'gateway_mtu' OVN_PROVNET_PORT_NAME_PREFIX = 'provnet-' # Agent extension constants OVN_AGENT_DESC_KEY = 'neutron:description' OVN_AGENT_METADATA_SB_CFG_KEY = 'neutron:ovn-metadata-sb-cfg' OVN_AGENT_METADATA_DESC_KEY = 'neutron:description-metadata' OVN_AGENT_METADATA_ID_KEY = 'neutron:ovn-metadata-id' OVN_CONTROLLER_AGENT = 'OVN Controller agent' OVN_CONTROLLER_GW_AGENT = 'OVN Controller Gateway agent' OVN_METADATA_AGENT = 'OVN Metadata agent' # OVN ACLs have priorities. The highest priority ACL that matches is the one # that takes effect. Our choice of priority numbers is arbitrary, but it # leaves room above and below the ACLs we create. We only need two priorities. # The first is for all the things we allow. The second is for dropping traffic # by default. ACL_PRIORITY_ALLOW = 1002 ACL_PRIORITY_DROP = 1001 ACL_ACTION_DROP = 'drop' ACL_ACTION_ALLOW_RELATED = 'allow-related' ACL_ACTION_ALLOW = 'allow' # When a OVN L3 gateway is created, it needs to be bound to a chassis. In # case a chassis is not found OVN_GATEWAY_INVALID_CHASSIS will be set in # the options column of the Logical Router. This value is used to detect # unhosted router gateways to schedule. OVN_GATEWAY_INVALID_CHASSIS = 'neutron-ovn-invalid-chassis' SUPPORTED_DHCP_OPTS = { 4: ['netmask', 'router', 'dns-server', 'log-server', 'lpr-server', 'swap-server', 'ip-forward-enable', 'policy-filter', 'default-ttl', 'mtu', 'router-discovery', 'router-solicitation', 'arp-timeout', 'ethernet-encap', 'tcp-ttl', 'tcp-keepalive', 'nis-server', 'ntp-server', 'tftp-server'], 6: ['server-id', 'dns-server', 'domain-search']} DHCPV6_STATELESS_OPT = 'dhcpv6_stateless' # When setting global DHCP options, these options will be ignored # as they are required for basic network functions and will be # set by Neutron. GLOBAL_DHCP_OPTS_BLACKLIST = { 4: ['server_id', 'lease_time', 'mtu', 'router', 'server_mac', 'dns_server', 'classless_static_route'], 6: ['dhcpv6_stateless', 'dns_server', 'server_id']} CHASSIS_DATAPATH_NETDEV = 'netdev' CHASSIS_IFACE_DPDKVHOSTUSER = 'dpdkvhostuser' OVN_IPV6_ADDRESS_MODES = { const.IPV6_SLAAC: const.IPV6_SLAAC, const.DHCPV6_STATEFUL: const.DHCPV6_STATEFUL.replace('-', '_'), const.DHCPV6_STATELESS: const.DHCPV6_STATELESS.replace('-', '_') } DB_MAX_RETRIES = 60 DB_INITIAL_RETRY_INTERVAL = 0.5 DB_MAX_RETRY_INTERVAL = 1 TXN_COMMITTED = 'committed' INITIAL_REV_NUM = -1 ACL_EXPECTED_COLUMNS_NBDB = ( 'external_ids', 'direction', 'log', 'priority', 'name', 'action', 'severity', 'match') # Resource types TYPE_NETWORKS = 'networks' TYPE_PORTS = 'ports' TYPE_SECURITY_GROUP_RULES = 'security_group_rules' TYPE_ROUTERS = 'routers' TYPE_ROUTER_PORTS = 'router_ports' TYPE_SECURITY_GROUPS = 'security_groups' TYPE_FLOATINGIPS = 'floatingips' TYPE_SUBNETS = 'subnets' _TYPES_PRIORITY_ORDER = ( TYPE_NETWORKS, TYPE_SECURITY_GROUPS, TYPE_SUBNETS, TYPE_ROUTERS, TYPE_PORTS, TYPE_ROUTER_PORTS, TYPE_FLOATINGIPS, TYPE_SECURITY_GROUP_RULES) # The order in which the resources should be created or updated by the # maintenance task: Root ones first and leafs at the end. MAINTENANCE_CREATE_UPDATE_TYPE_ORDER = { t: n for n, t in enumerate(_TYPES_PRIORITY_ORDER, 1)} # The order in which the resources should be deleted by the maintenance # task: Leaf ones first and roots at the end. MAINTENANCE_DELETE_TYPE_ORDER = { t: n for n, t in enumerate(reversed(_TYPES_PRIORITY_ORDER), 1)} # The addresses field to set in the logical switch port which has a # peer router port (connecting to the logical router). DEFAULT_ADDR_FOR_LSP_WITH_PEER = 'router' # FIP ACTIONS FIP_ACTION_ASSOCIATE = 'fip_associate' FIP_ACTION_DISASSOCIATE = 'fip_disassociate' # Loadbalancer constants LRP_PREFIX = "lrp-" RE_PORT_FROM_GWC = re.compile(r'(%s)([\w-]+)_([\w-]+)' % LRP_PREFIX) LB_VIP_PORT_PREFIX = "ovn-lb-vip-" LB_EXT_IDS_LS_REFS_KEY = 'ls_refs' LB_EXT_IDS_LR_REF_KEY = 'lr_ref' LB_EXT_IDS_POOL_PREFIX = 'pool_' LB_EXT_IDS_LISTENER_PREFIX = 'listener_' LB_EXT_IDS_MEMBER_PREFIX = 'member_' LB_EXT_IDS_VIP_KEY = 'neutron:vip' LB_EXT_IDS_VIP_FIP_KEY = 'neutron:vip_fip' LB_EXT_IDS_VIP_PORT_ID_KEY = 'neutron:vip_port_id' # Hash Ring constants HASH_RING_NODES_TIMEOUT = 60 HASH_RING_TOUCH_INTERVAL = 30 HASH_RING_CACHE_TIMEOUT = 30 HASH_RING_ML2_GROUP = 'mechanism_driver' # Maximum chassis count where a gateway port can be hosted MAX_GW_CHASSIS = 5 UNKNOWN_ADDR = 'unknown' PORT_CAP_SWITCHDEV = 'switchdev' # TODO(lucasagomes): Create constants for other LSP types LSP_TYPE_LOCALNET = 'localnet' LSP_TYPE_VIRTUAL = 'virtual' LSP_TYPE_EXTERNAL = 'external' LSP_OPTIONS_VIRTUAL_PARENTS_KEY = 'virtual-parents' LSP_OPTIONS_VIRTUAL_IP_KEY = 'virtual-ip' HA_CHASSIS_GROUP_DEFAULT_NAME = 'default_ha_chassis_group' HA_CHASSIS_GROUP_HIGHEST_PRIORITY = 32767 # TODO(lucasagomes): Move this to neutron-lib later. # Metadata constants METADATA_DEFAULT_PREFIX = 16 METADATA_DEFAULT_IP = '169.254.169.254' METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP, METADATA_DEFAULT_PREFIX) METADATA_PORT = 80 # OVN igmp options MCAST_SNOOP = 'mcast_snoop' MCAST_FLOOD_UNREGISTERED = 'mcast_flood_unregistered' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/exceptions.py0000644000175000017500000000262000000000000023462 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from neutron._i18n import _ class RevisionConflict(n_exc.NeutronException): message = _('OVN revision number for %(resource_id)s (type: ' '%(resource_type)s) is equal or higher than the given ' 'resource. Skipping update') class UnknownResourceType(n_exc.NeutronException): message = _('Uknown resource type: %(resource_type)s') class StandardAttributeIDNotFound(n_exc.NeutronException): message = _('Standard attribute ID not found for %(resource_uuid)s') class HashRingIsEmpty(n_exc.NeutronException): message = _('Hash Ring returned empty when hashing "%(key)s". ' 'This should never happen in a normal situation, please ' 'check the status of your cluster') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/extensions.py0000644000175000017500000000321700000000000023503 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(russellb) This remains in its own file (vs constants.py) because we want # to be able to easily import it and export the info without any dependencies # on external imports. # NOTE(russellb) If you update these lists, please also update # doc/source/features.rst and the current release note. ML2_SUPPORTED_API_EXTENSIONS_OVN_L3 = [ 'router', 'extraroute', 'ext-gw-mode', 'fip-port-details', 'pagination', 'sorting', 'project-id', 'dns-integration', ] ML2_SUPPORTED_API_EXTENSIONS = [ 'address-scope', 'agent', 'allowed-address-pairs', 'auto-allocated-topology', 'availability_zone', 'binding', 'default-subnetpools', 'external-net', 'extra_dhcp_opt', 'multi-provider', 'net-mtu', 'network_availability_zone', 'network-ip-availability', 'port-security', 'provider', 'quotas', 'rbac-policies', 'standard-attr-revisions', 'security-group', 'standard-attr-description', 'subnet_allocation', 'standard-attr-tag', 'standard-attr-timestamp', 'trunk', 'quota_details', ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/hash_ring_manager.py0000644000175000017500000000714000000000000024737 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log from oslo_utils import timeutils import six from tooz import hashring from neutron.common.ovn import constants from neutron.common.ovn import exceptions from neutron.db import ovn_hash_ring_db as db_hash_ring from neutron_lib import context LOG = log.getLogger(__name__) class HashRingManager(object): def __init__(self, group_name): self._hash_ring = None self._last_time_loaded = None self._cache_startup_timeout = True self._group = group_name self.admin_ctx = context.get_admin_context() @property def _wait_startup_before_caching(self): # NOTE(lucasagomes): Some events are processed at the service's # startup time and since many services may be started concurrently # we do not want to use a cached hash ring at that point. This # method checks if the created_at and updated_at columns from the # nodes in the ring from this host is equal, and if so it means # that the service just started. # If the startup timeout already expired, there's no reason to # keep reading from the DB. At this point this will always # return False if not self._cache_startup_timeout: return False nodes = db_hash_ring.get_active_nodes( self.admin_ctx, constants.HASH_RING_CACHE_TIMEOUT, self._group, from_host=True) dont_cache = nodes and nodes[0].created_at == nodes[0].updated_at if not dont_cache: self._cache_startup_timeout = False return dont_cache def _load_hash_ring(self, refresh=False): cache_timeout = timeutils.utcnow() - datetime.timedelta( seconds=constants.HASH_RING_CACHE_TIMEOUT) # Refresh the cache if: # - Refreshed is forced (refresh=True) # - Service just started (_wait_startup_before_caching) # - Hash Ring is not yet instantiated # - Cache has timed out if (refresh or self._wait_startup_before_caching or self._hash_ring is None or not self._hash_ring.nodes or cache_timeout >= self._last_time_loaded): nodes = db_hash_ring.get_active_nodes( self.admin_ctx, constants.HASH_RING_NODES_TIMEOUT, self._group) self._hash_ring = hashring.HashRing({node.node_uuid for node in nodes}) self._last_time_loaded = timeutils.utcnow() def refresh(self): self._load_hash_ring(refresh=True) def get_node(self, key): self._load_hash_ring() # tooz expects a byte string for the hash if isinstance(key, six.string_types): key = key.encode('utf-8') try: # We need to pop the value from the set. If empty, # KeyError is raised return self._hash_ring[key].pop() except KeyError: raise exceptions.HashRingIsEmpty(key=key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/ovn/utils.py0000644000175000017500000004104100000000000022441 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import inspect import os import re import netaddr from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import l3 from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.utils import net as n_utils from oslo_utils import netutils from oslo_utils import strutils from ovs.db import idl from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from neutron._i18n import _ from neutron.common.ovn import constants from neutron.common.ovn import exceptions as ovn_exc DNS_RESOLVER_FILE = "/etc/resolv.conf" AddrPairsDiff = collections.namedtuple( 'AddrPairsDiff', ['added', 'removed', 'changed']) def ovn_name(id): # The name of the OVN entry will be neutron- # This is due to the fact that the OVN application checks if the name # is a UUID. If so then there will be no matches. # We prefix the UUID to enable us to use the Neutron UUID when # updating, deleting etc. return 'neutron-%s' % id def ovn_lrouter_port_name(id): # The name of the OVN lrouter port entry will be lrp- # This is to distinguish with the name of the connected lswitch patch port, # which is named with neutron port uuid, so that OVS patch ports are # generated properly. The pairing patch port names will be: # - patch-lrp--to- # - patch--to-lrp- # lrp stands for Logical Router Port return constants.LRP_PREFIX + '%s' % id def ovn_provnet_port_name(network_id): # The name of OVN lswitch provider network port entry will be # provnet-. The port is created for network having # provider:physical_network attribute. return constants.OVN_PROVNET_PORT_NAME_PREFIX + '%s' % network_id def ovn_vhu_sockpath(sock_dir, port_id): # Frame the socket path of a virtio socket return os.path.join( sock_dir, # this parameter will become the virtio port name, # so it should not exceed IFNAMSIZ(16). (const.VHOST_USER_DEVICE_PREFIX + port_id)[:14]) def ovn_addrset_name(sg_id, ip_version): # The name of the address set for the given security group id and ip # version. The format is: # as-- # with all '-' replaced with '_'. This replacement is necessary # because OVN doesn't support '-' in an address set name. return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_') def ovn_pg_addrset_name(sg_id, ip_version): # The name of the address set for the given security group id modelled as a # Port Group and ip version. The format is: # pg-- # with all '-' replaced with '_'. This replacement is necessary # because OVN doesn't support '-' in an address set name. return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_') def ovn_port_group_name(sg_id): # The name of the port group for the given security group id. # The format is: pg-. return ('pg-%s' % sg_id).replace('-', '_') def is_network_device_port(port): return port.get('device_owner', '').startswith( const.DEVICE_OWNER_PREFIXES) def get_lsp_dhcp_opts(port, ip_version): # Get dhcp options from Neutron port, for setting DHCP_Options row # in OVN. lsp_dhcp_disabled = False lsp_dhcp_opts = {} if is_network_device_port(port): lsp_dhcp_disabled = True else: for edo in port.get(edo_ext.EXTRADHCPOPTS, []): if edo['ip_version'] != ip_version: continue if edo['opt_name'] == 'dhcp_disabled' and ( edo['opt_value'] in ['True', 'true']): # OVN native DHCP is disabled on this port lsp_dhcp_disabled = True # Make sure return value behavior not depends on the order and # content of the extra DHCP options for the port lsp_dhcp_opts.clear() break if edo['opt_name'] not in ( constants.SUPPORTED_DHCP_OPTS[ip_version]): continue opt = edo['opt_name'].replace('-', '_') lsp_dhcp_opts[opt] = edo['opt_value'] return (lsp_dhcp_disabled, lsp_dhcp_opts) def is_lsp_trusted(port): return n_utils.is_port_trusted(port) if port.get('device_owner') else False def is_lsp_ignored(port): # Since the floating IP port is not bound to any chassis, packets from vm # destined to floating IP will be dropped. To overcome this, we do not # create/update floating IP port in OVN. return port.get('device_owner') in [const.DEVICE_OWNER_FLOATINGIP] def get_lsp_security_groups(port, skip_trusted_port=True): # In other agent link OVS, skipping trusted port is processed in security # groups RPC. We haven't that step, so we do it here. return [] if (skip_trusted_port and is_lsp_trusted(port) ) else port.get('security_groups', []) def is_snat_enabled(router): return router.get(l3.EXTERNAL_GW_INFO, {}).get('enable_snat', True) def is_port_security_enabled(port): return port.get(psec.PORTSECURITY) def validate_and_get_data_from_binding_profile(port): if (constants.OVN_PORT_BINDING_PROFILE not in port or not validators.is_attr_set( port[constants.OVN_PORT_BINDING_PROFILE])): return {} param_set = {} param_dict = {} for param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS: param_keys = param_set.keys() for param_key in param_keys: try: param_dict[param_key] = (port[ constants.OVN_PORT_BINDING_PROFILE][param_key]) except KeyError: pass if len(param_dict) == 0: continue if len(param_dict) != len(param_keys): msg = _('Invalid binding:profile. %s are all ' 'required.') % param_keys raise n_exc.InvalidInput(error_message=msg) if (len(port[constants.OVN_PORT_BINDING_PROFILE]) != len( param_keys)): msg = _('Invalid binding:profile. too many parameters') raise n_exc.InvalidInput(error_message=msg) break if not param_dict: return {} for param_key, param_type in param_set.items(): if param_type is None: continue param_value = param_dict[param_key] if not isinstance(param_value, param_type): msg = _('Invalid binding:profile. %(key)s %(value)s ' 'value invalid type') % {'key': param_key, 'value': param_value} raise n_exc.InvalidInput(error_message=msg) # Make sure we can successfully look up the port indicated by # parent_name. Just let it raise the right exception if there is a # problem. if 'parent_name' in param_set: plugin = directory.get_plugin() plugin.get_port(n_context.get_admin_context(), param_dict['parent_name']) if 'tag' in param_set: tag = int(param_dict['tag']) if tag < 0 or tag > 4095: msg = _('Invalid binding:profile. tag "%s" must be ' 'an integer between 0 and 4095, inclusive') % tag raise n_exc.InvalidInput(error_message=msg) return param_dict def is_dhcp_options_ignored(subnet): # Don't insert DHCP_Options entry for v6 subnet with 'SLAAC' as # 'ipv6_address_mode', since DHCPv6 shouldn't work for this mode. return (subnet['ip_version'] == const.IP_VERSION_6 and subnet.get('ipv6_address_mode') == const.IPV6_SLAAC) def get_ovn_ipv6_address_mode(address_mode): return constants.OVN_IPV6_ADDRESS_MODES[address_mode] def get_revision_number(resource, resource_type): """Get the resource's revision number based on its type.""" if resource_type in (constants.TYPE_NETWORKS, constants.TYPE_PORTS, constants.TYPE_SECURITY_GROUP_RULES, constants.TYPE_ROUTERS, constants.TYPE_ROUTER_PORTS, constants.TYPE_SECURITY_GROUPS, constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS): return resource['revision_number'] else: raise ovn_exc.UnknownResourceType(resource_type=resource_type) def remove_macs_from_lsp_addresses(addresses): """Remove the mac addreses from the Logical_Switch_Port addresses column. :param addresses: The list of addresses from the Logical_Switch_Port. Example: ["80:fa:5b:06:72:b7 158.36.44.22", "ff:ff:ff:ff:ff:ff 10.0.0.2"] :returns: A list of IP addesses (v4 and v6) """ ip_list = [] for addr in addresses: ip_list.extend([x for x in addr.split() if (netutils.is_valid_ipv4(x) or netutils.is_valid_ipv6(x))]) return ip_list def get_allowed_address_pairs_ip_addresses(port): """Return a list of IP addresses from port's allowed_address_pairs. :param port: A neutron port :returns: A list of IP addesses (v4 and v6) """ return [x['ip_address'] for x in port.get('allowed_address_pairs', []) if 'ip_address' in x] def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port): """Return a list of IP addresses from ovn port. Return a list of IP addresses equivalent of Neutron's port allowed_address_pairs column using the data in the OVN port. :param ovn_port: A OVN port :returns: A list of IP addesses (v4 and v6) """ addresses = remove_macs_from_lsp_addresses(ovn_port.addresses) port_security = remove_macs_from_lsp_addresses(ovn_port.port_security) return [x for x in port_security if x not in addresses] def get_ovn_port_security_groups(ovn_port, skip_trusted_port=True): info = {'security_groups': ovn_port.external_ids.get( constants.OVN_SG_IDS_EXT_ID_KEY, '').split(), 'device_owner': ovn_port.external_ids.get( constants.OVN_DEVICE_OWNER_EXT_ID_KEY, '')} return get_lsp_security_groups(info, skip_trusted_port=skip_trusted_port) def get_ovn_port_addresses(ovn_port): addresses = remove_macs_from_lsp_addresses(ovn_port.addresses) port_security = remove_macs_from_lsp_addresses(ovn_port.port_security) return list(set(addresses + port_security)) def sort_ips_by_version(addresses): ip_map = {'ip4': [], 'ip6': []} for addr in addresses: ip_version = netaddr.IPNetwork(addr).version ip_map['ip%d' % ip_version].append(addr) return ip_map def is_lsp_router_port(port): return port.get('device_owner') in [const.DEVICE_OWNER_ROUTER_INTF, const.DEVICE_OWNER_ROUTER_GW] def get_lrouter_ext_gw_static_route(ovn_router): # TODO(lucasagomes): Remove the try...except block after OVS 2.8.2 # is tagged. try: return [route for route in getattr(ovn_router, 'static_routes', []) if strutils.bool_from_string(getattr( route, 'external_ids', {}).get( constants.OVN_ROUTER_IS_EXT_GW, 'false'))] except KeyError: pass def get_lrouter_snats(ovn_router): return [n for n in getattr(ovn_router, 'nat', []) if n.type == 'snat'] def get_lrouter_non_gw_routes(ovn_router): routes = [] # TODO(lucasagomes): Remove the try...except block after OVS 2.8.2 # is tagged. try: for route in getattr(ovn_router, 'static_routes', []): external_ids = getattr(route, 'external_ids', {}) if strutils.bool_from_string( external_ids.get(constants.OVN_ROUTER_IS_EXT_GW, 'false')): continue routes.append({'destination': route.ip_prefix, 'nexthop': route.nexthop}) except KeyError: pass return routes def is_ovn_l3(l3_plugin): return hasattr(l3_plugin, '_ovn_client_inst') def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE): resolvers = [] if not os.path.exists(resolver_file): return resolvers with open(resolver_file, 'r') as rconf: for line in rconf.readlines(): if not line.startswith('nameserver'): continue line = line.split('nameserver')[1].strip() ipv4 = re.search(r'^(?:[0-9]{1,3}\.){3}[0-9]{1,3}', line) if ipv4: resolvers.append(ipv4.group(0)) return resolvers def get_port_subnet_ids(port): fixed_ips = [ip for ip in port['fixed_ips']] return [f['subnet_id'] for f in fixed_ips] def get_ovsdb_connection(connection_string, schema, timeout, tables=None): helper = idlutils.get_schema_helper(connection_string, schema) if tables: for table in tables: helper.register_table(table) else: helper.register_all() return connection.Connection(idl.Idl(connection_string, helper), timeout) def get_method_class(method): if not inspect.ismethod(method): return return method.__self__.__class__ def ovn_metadata_name(id_): """Return the OVN metadata name based on an id.""" return 'metadata-%s' % id_ def is_gateway_chassis_invalid(chassis_name, gw_chassis, physnet, chassis_physnets): """Check if gateway chassis is invalid @param chassis_name: gateway chassis name @type chassis_name: string @param gw_chassis: List of gateway chassis in the system @type gw_chassis: [] @param physnet: physical network associated to chassis_name @type physnet: string @param chassis_physnets: Dictionary linking chassis with their physnets @type chassis_physnets: {} @return Boolean """ if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS: return True elif chassis_name not in chassis_physnets: return True elif physnet and physnet not in chassis_physnets.get(chassis_name): return True elif gw_chassis and chassis_name not in gw_chassis: return True return False def is_provider_network(network): return external_net.EXTERNAL in network def is_neutron_dhcp_agent_port(port): """Check if the given DHCP port belongs to Neutron DHCP agents The DHCP ports with the device_id equals to 'reserved_dhcp_port' or starting with the word 'dhcp' belongs to the Neutron DHCP agents. """ return (port['device_owner'] == const.DEVICE_OWNER_DHCP and (port['device_id'] == const.DEVICE_ID_RESERVED_DHCP_PORT or port['device_id'].startswith('dhcp'))) def compute_address_pairs_diff(ovn_port, neutron_port): """Compute the differences in the allowed_address_pairs field.""" ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port( ovn_port) neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port) added = set(neutron_ap) - set(ovn_ap) removed = set(ovn_ap) - set(neutron_ap) return AddrPairsDiff(added, removed, changed=any(added or removed)) def is_gateway_chassis(chassis): """Check if the given chassis is a gateway chassis""" external_ids = getattr(chassis, 'external_ids', {}) return ('enable-chassis-as-gw' in external_ids.get( 'ovn-cms-options', '').split(',')) def get_port_capabilities(port): """Return a list of port's capabilities""" return port.get(portbindings.PROFILE, {}).get('capabilities', []) def get_port_id_from_gwc_row(row): """Return a port_id from gwc row The Gateway_Chassis row stores router port_id in the row name attribute: -_ :param row: A Gateway_Chassis table row. :returns: String containing router port_id. """ return constants.RE_PORT_FROM_GWC.search(row.name).group(2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/profiler.py0000644000175000017500000000361600000000000022327 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging import osprofiler.initializer from osprofiler import opts as profiler_opts import osprofiler.web CONF = cfg.CONF profiler_opts.set_defaults(CONF) LOG = logging.getLogger(__name__) def setup(name, host='0.0.0.0'): # nosec """Setup OSprofiler notifier and enable profiling. :param name: name of the service, that will be profiled :param host: host (either host name or host address) the service will be running on. By default host will be set to 0.0.0.0, but more specified host name / address usage is highly recommended. """ if CONF.profiler.enabled: osprofiler.initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="neutron", service=name, host=host ) LOG.info("OSProfiler is enabled.\n" "Traces provided from the profiler " "can only be subscribed to using the same HMAC keys that " "are configured in Neutron's configuration file " "under the [profiler] section.\n To disable OSprofiler " "set in /etc/neutron/neutron.conf:\n" "[profiler]\n" "enabled=false") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/test_lib.py0000644000175000017500000000405300000000000022306 0ustar00coreycorey00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # describes parameters used by different unit/functional tests # a plugin-specific testing mechanism should import this dictionary # and override the values in it if needed (e.g., run_tests.py in # neutron/plugins/openvswitch/ ) test_config = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/common/utils.py0000644000175000017500000010571400000000000021647 0ustar00coreycorey00000000000000# Copyright 2011, VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Borrowed from nova code base, more utilities will be added/borrowed as and # when needed. """Utilities and helper functions.""" import datetime import functools import importlib import os import os.path import random import re import signal import socket import sys import threading import time import uuid import eventlet from eventlet.green import subprocess import netaddr from neutron_lib import constants as n_const from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.services.trunk import constants as trunk_constants from neutron_lib.utils import helpers from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils from osprofiler import profiler import pkg_resources import neutron from neutron._i18n import _ from neutron.api import api_common TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" LOG = logging.getLogger(__name__) DEFAULT_THROTTLER_VALUE = 2 _SEPARATOR_REGEX = re.compile(r'[/\\]+') class WaitTimeout(Exception): """Default exception coming from wait_until_true() function.""" class TimerTimeout(n_exc.NeutronException): message = _('Timer timeout expired after %(timeout)s second(s).') class LockWithTimer(object): def __init__(self, threshold): self._threshold = threshold self.timestamp = 0 self._lock = threading.Lock() def acquire(self): return self._lock.acquire(False) def release(self): return self._lock.release() def time_to_wait(self): return self.timestamp - time.time() + self._threshold # REVISIT(jlibosva): Some parts of throttler may be similar to what # neutron.notifiers.batch_notifier.BatchNotifier does. They # could be refactored and unified. def throttler(threshold=DEFAULT_THROTTLER_VALUE): """Throttle number of calls to a function to only once per 'threshold'. """ def decorator(f): lock_with_timer = LockWithTimer(threshold) @functools.wraps(f) def wrapper(*args, **kwargs): if lock_with_timer.acquire(): try: fname = f.__name__ time_to_wait = lock_with_timer.time_to_wait() if time_to_wait > 0: LOG.debug("Call of function %s scheduled, sleeping " "%.1f seconds", fname, time_to_wait) # Decorated function has been called recently, wait. eventlet.sleep(time_to_wait) lock_with_timer.timestamp = time.time() finally: lock_with_timer.release() LOG.debug("Calling throttled function %s", fname) return f(*args, **kwargs) return wrapper return decorator def _subprocess_setup(): # Python installs a SIGPIPE handler by default. This is usually not what # non-Python subprocesses expect. signal.signal(signal.SIGPIPE, signal.SIG_DFL) def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, env=None, preexec_fn=_subprocess_setup, close_fds=True): return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, close_fds=close_fds, env=env) def get_first_host_ip(net, ip_version): return str(netaddr.IPAddress(net.first + 1, ip_version)) def log_opt_values(log): cfg.CONF.log_opt_values(log, logging.DEBUG) def get_dhcp_agent_device_id(network_id, host): # Split host so as to always use only the hostname and # not the domain name. This will guarantee consistency # whether a local hostname or an fqdn is passed in. local_hostname = host.split('.')[0] host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) return 'dhcp%s-%s' % (host_uuid, network_id) class exception_logger(object): """Wrap a function and log raised exception :param logger: the logger to log the exception default is LOG.exception :returns: origin value if no exception raised; re-raise the exception if any occurred """ def __init__(self, logger=None): self.logger = logger def __call__(self, func): if self.logger is None: LOG = logging.getLogger(func.__module__) self.logger = LOG.exception def call(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception(): self.logger(e) return call def get_other_dvr_serviced_device_owners(host_dvr_for_dhcp=True): """Return device_owner names for ports that should be serviced by DVR This doesn't return DEVICE_OWNER_COMPUTE_PREFIX since it is a prefix, not a complete device_owner name, so should be handled separately (see is_dvr_serviced() below) """ device_owners = [n_const.DEVICE_OWNER_LOADBALANCER, n_const.DEVICE_OWNER_LOADBALANCERV2, trunk_constants.TRUNK_SUBPORT_OWNER] if host_dvr_for_dhcp: device_owners.append(n_const.DEVICE_OWNER_DHCP) return device_owners def get_dvr_allowed_address_pair_device_owners(): """Return device_owner names for allowed_addr_pair ports serviced by DVR This just returns the device owners that are used by the allowed_address_pair ports. Right now only the device_owners shown below are used by the allowed_address_pair ports. Later if other device owners are used for allowed_address_pairs those device_owners should be added to the list below. """ # TODO(Swami): Convert these methods to constants. # Add the constants variable to the neutron-lib return [n_const.DEVICE_OWNER_LOADBALANCER, n_const.DEVICE_OWNER_LOADBALANCERV2] def is_dvr_serviced(device_owner): """Check if the port need to be serviced by DVR Helper function to check the device owners of the ports in the compute and service node to make sure if they are required for DVR or any service directly or indirectly associated with DVR. """ return (device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX) or device_owner in get_other_dvr_serviced_device_owners()) def is_fip_serviced(device_owner): """Check if the port can be assigned a floating IP Helper function to check the device owner of a port can be assigned a floating IP. """ return device_owner != n_const.DEVICE_OWNER_DHCP def ip_to_cidr(ip, prefix=None): """Convert an ip with no prefix to cidr notation :param ip: An ipv4 or ipv6 address. Convertable to netaddr.IPNetwork. :param prefix: Optional prefix. If None, the default 32 will be used for ipv4 and 128 for ipv6. """ net = netaddr.IPNetwork(ip) if prefix is not None: # Can't pass ip and prefix separately. Must concatenate strings. net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix)) return str(net) def cidr_to_ip(ip_cidr): """Strip the cidr notation from an ip cidr or ip :param ip_cidr: An ipv4 or ipv6 address, with or without cidr notation """ net = netaddr.IPNetwork(ip_cidr) return str(net.ip) def cidr_mask(ip_cidr): """Returns the subnet mask length from a cidr :param ip_cidr: An ipv4 or ipv6 cidr mask length """ return netaddr.IPNetwork(ip_cidr).netmask.netmask_bits() def fixed_ip_cidrs(fixed_ips): """Create a list of a port's fixed IPs in cidr notation. :param fixed_ips: A neutron port's fixed_ips dictionary """ return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen')) for fixed_ip in fixed_ips] def is_cidr_host(cidr): """Determines if the cidr passed in represents a single host network :param cidr: Either an ipv4 or ipv6 cidr. :returns: True if the cidr is /32 for ipv4 or /128 for ipv6. :raises ValueError: raises if cidr does not contain a '/'. This disallows plain IP addresses specifically to avoid ambiguity. """ if '/' not in str(cidr): raise ValueError(_("cidr doesn't contain a '/'")) net = netaddr.IPNetwork(cidr) if net.version == 4: return net.prefixlen == n_const.IPv4_BITS return net.prefixlen == n_const.IPv6_BITS def cidr_mask_length(cidr): """Returns the mask length of a cidr :param cidr: (string) either an ipv4 or ipv6 cidr or a host IP. :returns: (int) mask length of a cidr; in case of host IP, the mask length will be 32 (IPv4) or 128 (IPv6) """ return netaddr.IPNetwork(cidr).netmask.netmask_bits() def cidr_broadcast_address(cidr): """Returns the broadcast address of a cidr :param cidr: (string, netaddr.IPNetwork, netaddr.IPAddress) either an ipv4 or ipv6 cidr or a host IP. :returns: (string) broadcast address of the cidr, None if the cidr has no broadcast domain """ broadcast = netaddr.IPNetwork(cidr).broadcast if broadcast: return str(broadcast) def get_ip_version(ip_or_cidr): return netaddr.IPNetwork(ip_or_cidr).version def ip_version_from_int(ip_version_int): if ip_version_int == 4: return n_const.IPv4 if ip_version_int == 6: return n_const.IPv6 raise ValueError(_('Illegal IP version number')) def get_network_length(ip_version): """Returns the network length depeding on the IP version""" return (n_const.IPv4_BITS if ip_version == n_const.IP_VERSION_4 else n_const.IPv6_BITS) def get_socket_address_family(ip_version): """Returns the address family depending on the IP version""" return (int(socket.AF_INET if ip_version == n_const.IP_VERSION_4 else socket.AF_INET6)) def is_version_greater_equal(version1, version2): """Returns True if version1 is greater or equal than version2 else False""" return (pkg_resources.parse_version(version1) >= pkg_resources.parse_version(version2)) class DelayedStringRenderer(object): """Takes a callable and its args and calls when __str__ is called Useful for when an argument to a logging statement is expensive to create. This will prevent the callable from being called if it's never converted to a string. """ def __init__(self, function, *args, **kwargs): self.function = function self.args = args self.kwargs = kwargs def __str__(self): return str(self.function(*self.args, **self.kwargs)) def _hex_format(port, mask=0): def hex_str(num): return format(num, '#06x') if mask > 0: return "%s/%s" % (hex_str(port), hex_str(0xffff & ~mask)) return hex_str(port) def _gen_rules_port_min(port_min, top_bit): """Generate rules for port_min Encode a port range range(port_min, (port_min | (top_bit - 1)) + 1) into a set of bit value/masks. """ # Processing starts with setting up mask and top_bit variables to their # maximum. Top_bit has the form (1000000) with '1' pointing to the register # being processed, while mask has the form (0111111) with '1' showing # possible range to be covered. # With each rule generation cycle, mask and top_bit are bit shifted to the # right. When top_bit reaches 0 it means that last register was processed. # Let port_min be n bits long, top_bit = 1 << k, 0<=k<=n-1. # Each cycle step checks the following conditions: # 1). port & mask == 0 # This means that remaining bits k..1 are equal to '0' and can be # covered by a single port/mask rule. # If condition 1 doesn't fit, then both top_bit and mask are bit # shifted to the right and condition 2 is checked: # 2). port & top_bit == 0 # This means that kth port bit is equal to '0'. By setting it to '1' # and masking other (k-1) bits all ports in range # [P, P + 2^(k-1)-1] are guaranteed to be covered. # Let p_k be equal to port first (n-k) bits with rest set to 0. # Then P = p_k | top_bit. # Correctness proof: # The remaining range to be encoded in a cycle is calculated as follows: # R = [port_min, port_min | mask]. # If condition 1 holds, then a rule that covers R is generated and the job # is done. # If condition 2 holds, then the rule emitted will cover 2^(k-1) values # from the range. Remaining range R will shrink by 2^(k-1). # If condition 2 doesn't hold, then even after top_bit/mask shift in next # iteration the value of R won't change. # Full cycle example for range [40, 64): # port=0101000, top_bit=1000000, k=6 # * step 1, k=6, R=[40, 63] # top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0100000, mask=0011111 -> condition 2 doesn't hold # * step 2, k=5, R=[40, 63] # top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0010000, mask=0001111 -> condition 2 holds -> 011xxxx or # 0x0030/fff0 # * step 3, k=4, R=[40, 47] # top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0001000, mask=0000111 -> condition 2 doesn't hold # * step 4, k=3, R=[40, 47] # top_bit=0001000, mask=0000111 -> condition 1 holds -> 0101xxx or # 0x0028/fff8 # rules=[0x0030/fff0, 0x0028/fff8] rules = [] mask = top_bit - 1 while True: if (port_min & mask) == 0: # greedy matched a streak of '0' in port_min rules.append(_hex_format(port_min, mask)) break top_bit >>= 1 mask >>= 1 if (port_min & top_bit) == 0: # matched next '0' in port_min to substitute for '1' in resulting # rule rules.append(_hex_format(port_min & ~mask | top_bit, mask)) return rules def _gen_rules_port_max(port_max, top_bit): """Generate rules for port_max Encode a port range range(port_max & ~(top_bit - 1), port_max + 1) into a set of bit value/masks. """ # Processing starts with setting up mask and top_bit variables to their # maximum. Top_bit has the form (1000000) with '1' pointing to the register # being processed, while mask has the form (0111111) with '1' showing # possible range to be covered. # With each rule generation cycle, mask and top_bit are bit shifted to the # right. When top_bit reaches 0 it means that last register was processed. # Let port_max be n bits long, top_bit = 1 << k, 0<=k<=n-1. # Each cycle step checks the following conditions: # 1). port & mask == mask # This means that remaining bits k..1 are equal to '1' and can be # covered by a single port/mask rule. # If condition 1 doesn't fit, then both top_bit and mask are bit # shifted to the right and condition 2 is checked: # 2). port & top_bit == top_bit # This means that kth port bit is equal to '1'. By setting it to '0' # and masking other (k-1) bits all ports in range # [P, P + 2^(k-1)-1] are guaranteed to be covered. # Let p_k be equal to port first (n-k) bits with rest set to 0. # Then P = p_k | ~top_bit. # Correctness proof: # The remaining range to be encoded in a cycle is calculated as follows: # R = [port_max & ~mask, port_max]. # If condition 1 holds, then a rule that covers R is generated and the job # is done. # If condition 2 holds, then the rule emitted will cover 2^(k-1) values # from the range. Remaining range R will shrink by 2^(k-1). # If condition 2 doesn't hold, then even after top_bit/mask shift in next # iteration the value of R won't change. # Full cycle example for range [64, 105]: # port=1101001, top_bit=1000000, k=6 # * step 1, k=6, R=[64, 105] # top_bit=1000000, mask=0111111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0100000, mask=0011111 -> condition 2 holds -> 10xxxxx or # 0x0040/ffe0 # * step 2, k=5, R=[96, 105] # top_bit=0100000, mask=0011111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0010000, mask=0001111 -> condition 2 doesn't hold # * step 3, k=4, R=[96, 105] # top_bit=0010000, mask=0001111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0001000, mask=0000111 -> condition 2 holds -> 1100xxx or # 0x0060/fff8 # * step 4, k=3, R=[104, 105] # top_bit=0001000, mask=0000111 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0000100, mask=0000011 -> condition 2 doesn't hold # * step 5, k=2, R=[104, 105] # top_bit=0000100, mask=0000011 -> condition 1 doesn't hold, shifting # mask/top_bit # top_bit=0000010, mask=0000001 -> condition 2 doesn't hold # * step 6, k=1, R=[104, 105] # top_bit=0000010, mask=0000001 -> condition 1 holds -> 1101001 or # 0x0068 # rules=[0x0040/ffe0, 0x0060/fff8, 0x0068] rules = [] mask = top_bit - 1 while True: if (port_max & mask) == mask: # greedy matched a streak of '1' in port_max rules.append(_hex_format(port_max & ~mask, mask)) break top_bit >>= 1 mask >>= 1 if (port_max & top_bit) == top_bit: # matched next '1' in port_max to substitute for '0' in resulting # rule rules.append(_hex_format(port_max & ~mask & ~top_bit, mask)) return rules def port_rule_masking(port_min, port_max): """Translate a range [port_min, port_max] into a set of bitwise matches. Each match has the form 'port/mask'. The port and mask are 16-bit numbers written in hexadecimal prefixed by 0x. Each 1-bit in mask requires that the corresponding bit in port must match. Each 0-bit in mask causes the corresponding bit to be ignored. """ # Let binary representation of port_min and port_max be n bits long and # have first m bits in common, 0 <= m <= n. # If remaining (n - m) bits of given ports define 2^(n-m) values, then # [port_min, port_max] range is covered by a single rule. # For example: # n = 6 # port_min = 16 (binary 010000) # port_max = 23 (binary 010111) # Ports have m=3 bits in common with the remaining (n-m)=3 bits # covering range [0, 2^3), which equals to a single 010xxx rule. The algo # will return [0x0010/fff8]. # Else [port_min, port_max] range will be split into 2: range [port_min, T) # and [T, port_max]. Let p_m be the common part of port_min and port_max # with other (n-m) bits set to 0. Then T = p_m | 1 << (n-m-1). # For example: # n = 7 # port_min = 40 (binary 0101000) # port_max = 105 (binary 1101001) # Ports have m=0 bits in common, p_m=000000. Then T=1000000 and the # initial range [40, 105] is divided into [40, 64) and [64, 105]. # Each of the ranges will be processed separately, then the generated rules # will be merged. # Check port_max >= port_min. if port_max < port_min: raise ValueError(_("'port_max' is smaller than 'port_min'")) bitdiff = port_min ^ port_max if bitdiff == 0: # port_min == port_max return [_hex_format(port_min)] # for python3.x, bit_length could be used here top_bit = 1 while top_bit <= bitdiff: top_bit <<= 1 if (port_min & (top_bit - 1) == 0 and port_max & (top_bit - 1) == top_bit - 1): # special case, range of 2^k ports is covered return [_hex_format(port_min, top_bit - 1)] top_bit >>= 1 rules = [] rules.extend(_gen_rules_port_min(port_min, top_bit)) rules.extend(_gen_rules_port_max(port_max, top_bit)) return rules def create_object_with_dependency(creator, dep_getter, dep_creator, dep_id_attr, dep_deleter): """Creates an object that binds to a dependency while handling races. creator is a function that expected to take the result of either dep_getter or dep_creator. The result of dep_getter and dep_creator must have an attribute of dep_id_attr be used to determine if the dependency changed during object creation. dep_deleter will be called with a the result of dep_creator if the creator function fails due to a non-dependency reason or the retries are exceeded. dep_getter should return None if the dependency does not exist. dep_creator can raise a DBDuplicateEntry to indicate that a concurrent create of the dependency occurred and the process will restart to get the concurrently created one. This function will return both the created object and the dependency it used/created. This function protects against all of the cases where the dependency can be concurrently removed by catching exceptions and restarting the process of creating the dependency if one no longer exists. It will give up after neutron_lib.db.api.MAX_RETRIES and raise the exception it encounters after that. """ result, dependency, dep_id, made_locally = None, None, None, False for attempts in range(1, db_api.MAX_RETRIES + 1): # we go to max + 1 here so the exception handlers can raise their # errors at the end try: dependency = dep_getter() if not dependency: dependency = dep_creator() made_locally = True dep_id = getattr(dependency, dep_id_attr) except db_exc.DBDuplicateEntry: # dependency was concurrently created. with excutils.save_and_reraise_exception() as ctx: if attempts < db_api.MAX_RETRIES: # sleep for a random time between 0 and 1 second to # make sure a concurrent worker doesn't retry again # at exactly the same time time.sleep(random.uniform(0, 1)) ctx.reraise = False continue try: result = creator(dependency) break except Exception: with excutils.save_and_reraise_exception() as ctx: # check if dependency we tried to use was removed during # object creation if attempts < db_api.MAX_RETRIES: dependency = dep_getter() if not dependency or dep_id != getattr(dependency, dep_id_attr): ctx.reraise = False continue # we have exceeded retries or have encountered a non-dependency # related failure so we try to clean up the dependency if we # created it before re-raising if made_locally and dependency: try: dep_deleter(dependency) except Exception: LOG.exception("Failed cleaning up dependency %s", dep_id) return result, dependency def transaction_guard(f): """Ensures that the context passed in is not in a transaction. Various Neutron methods modifying resources have assumptions that they will not be called inside of a transaction because they perform operations that expect all data to be committed to the database (e.g. ML2 postcommit calls) and/or they have side effects on external systems. So calling them in a transaction can lead to consistency errors on failures since the side effect will not be reverted on a DB rollback. If you receive this error, you must alter your code to handle the fact that the thing you are calling can have side effects so using transactions to undo on failures is not possible. """ @functools.wraps(f) def inner(self, context, *args, **kwargs): # FIXME(kevinbenton): get rid of all uses of this flag if (context.session.is_active and getattr(context, 'GUARD_TRANSACTION', True)): raise RuntimeError(_("Method %s cannot be called within a " "transaction.") % f) return f(self, context, *args, **kwargs) return inner def wait_until_true(predicate, timeout=60, sleep=1, exception=None): """Wait until callable predicate is evaluated as True :param predicate: Callable deciding whether waiting should continue. Best practice is to instantiate predicate with functools.partial() :param timeout: Timeout in seconds how long should function wait. :param sleep: Polling interval for results in seconds. :param exception: Exception instance to raise on timeout. If None is passed (default) then WaitTimeout exception is raised. """ try: with eventlet.Timeout(timeout): while not predicate(): eventlet.sleep(sleep) except eventlet.Timeout: if exception is not None: # pylint: disable=raising-bad-type raise exception raise WaitTimeout(_("Timed out after %d seconds") % timeout) class classproperty(object): def __init__(self, f): self.func = f def __get__(self, obj, owner): return self.func(owner) _NO_ARGS_MARKER = object() def attach_exc_details(e, msg, args=_NO_ARGS_MARKER): e._error_context_msg = msg e._error_context_args = args def extract_exc_details(e): for attr in ('_error_context_msg', '_error_context_args'): if not hasattr(e, attr): return u'No details.' details = e._error_context_msg args = e._error_context_args if args is _NO_ARGS_MARKER: return details return details % args def import_modules_recursively(topdir): '''Import and return all modules below the topdir directory.''' topdir = _SEPARATOR_REGEX.sub('/', topdir) modules = [] for root, dirs, files in os.walk(topdir): for file_ in files: if file_[-3:] != '.py': continue module = file_[:-3] if module == '__init__': continue import_base = _SEPARATOR_REGEX.sub('.', root) # NOTE(ihrachys): in Python3, or when we are not located in the # directory containing neutron code, __file__ is absolute, so we # should truncate it to exclude PYTHONPATH prefix prefixlen = len(os.path.dirname(neutron.__file__)) import_base = 'neutron' + import_base[prefixlen:] module = '.'.join([import_base, module]) if module not in sys.modules: importlib.import_module(module) modules.append(module) return modules def get_rand_name(max_length=None, prefix='test'): """Return a random string. The string will start with 'prefix' and will be exactly 'max_length'. If 'max_length' is None, then exactly 8 random characters, each hexadecimal, will be added. In case len(prefix) <= len(max_length), ValueError will be raised to indicate the problem. """ return get_related_rand_names([prefix], max_length)[0] def get_rand_device_name(prefix='test'): return get_rand_name( max_length=n_const.DEVICE_NAME_MAX_LEN, prefix=prefix) def get_related_rand_names(prefixes, max_length=None): """Returns a list of the prefixes with the same random characters appended :param prefixes: A list of prefix strings :param max_length: The maximum length of each returned string :returns: A list with each prefix appended with the same random characters """ if max_length: length = max_length - max(len(p) for p in prefixes) if length <= 0: raise ValueError( _("'max_length' must be longer than all prefixes")) else: length = 8 rndchrs = helpers.get_random_string(length) return [p + rndchrs for p in prefixes] def get_related_rand_device_names(prefixes): return get_related_rand_names(prefixes, max_length=n_const.DEVICE_NAME_MAX_LEN) def bytes_to_bits(value): return value * 8 def bits_to_kilobits(value, base): # NOTE(slaweq): round up that even 1 bit will give 1 kbit as a result return int((value + (base - 1)) / base) def disable_extension_by_service_plugin(core_plugin, service_plugin): if ('filter-validation' in core_plugin.supported_extension_aliases and not api_common.is_filter_validation_supported(service_plugin)): core_plugin.supported_extension_aliases.remove('filter-validation') LOG.info('Disable filter validation extension by service plugin ' '%s.', service_plugin.__class__.__name__) def get_port_fixed_ips_set(port): return set([ip["ip_address"] for ip in port.get("fixed_ips", [])]) def port_ip_changed(new_port, original_port): if not new_port or not original_port: return False # Quantity is not same, so it is changed. if (len(new_port.get("fixed_ips", [])) != len(original_port.get("fixed_ips", []))): return True # IPs can be placed in any order, so use python set to verify the # fixed IP addresses. if (get_port_fixed_ips_set(new_port) != get_port_fixed_ips_set(original_port)): return True return False def validate_rp_bandwidth(rp_bandwidths, device_names): """Validate resource provider bandwidths against device names. :param rp_bandwidths: Dict containing resource provider bandwidths, in the form: {'phy1': {'ingress': 100, 'egress': 100}} :param device_names: A set of the device names given in bridge_mappings in case of ovs-agent or in physical_device_mappings in case of sriov-agent :raises ValueError: In case of the devices (keys) in the rp_bandwidths dict are not in the device_names set. """ for dev_name in rp_bandwidths: if dev_name not in device_names: raise ValueError(_( "Invalid resource_provider_bandwidths: " "Device name %(dev_name)s is missing from " "device mappings") % {'dev_name': dev_name}) class Timer(object): """Timer context manager class This class creates a context that: - Triggers a timeout exception if the timeout is set. - Returns the time elapsed since the context was initialized. - Returns the time spent in the context once it's closed. The timeout exception can be suppressed; when the time expires, the context finishes without rising TimerTimeout. NOTE(ralonsoh): this class, when a timeout is defined, cannot be used in other than the main thread. When a timeout is defined, an alarm signal is set. Only the main thread is allowed to set a signal handler and the signal handlers are always executed in this main thread [1]. [1] https://docs.python.org/3/library/signal.html#signals-and-threads """ def __init__(self, timeout=None, raise_exception=True): self.start = self.delta = None self._timeout = int(timeout) if timeout else None self._timeout_flag = False self._raise_exception = raise_exception def _timeout_handler(self, *_): self._timeout_flag = True if self._raise_exception: raise TimerTimeout(timeout=self._timeout) self.__exit__() def __enter__(self): self.start = datetime.datetime.now() if self._timeout: signal.signal(signal.SIGALRM, self._timeout_handler) signal.alarm(self._timeout) return self def __exit__(self, *_): if self._timeout: signal.alarm(0) self.delta = datetime.datetime.now() - self.start def __getattr__(self, item): return getattr(self.delta, item) def __iter__(self): self._raise_exception = False return self.__enter__() def next(self): # pragma: no cover # NOTE(ralonsoh): Python 2 support. if not self._timeout_flag: return datetime.datetime.now() raise StopIteration() def __next__(self): # pragma: no cover # NOTE(ralonsoh): Python 3 support. return self.next() def __del__(self): signal.alarm(0) @property def delta_time_sec(self): return (datetime.datetime.now() - self.start).total_seconds() def collect_profiler_info(): p = profiler.get() if p: return { "hmac_key": p.hmac_key, "base_id": p.get_base_id(), "parent_id": p.get_id(), } def spawn(func, *args, **kwargs): """As eventlet.spawn() but with osprofiler initialized in the new threads osprofiler stores the profiler instance in thread local storage, therefore in new threads (including eventlet threads) osprofiler comes uninitialized by default. This spawn() is a stand-in replacement for eventlet.spawn() but we re-initialize osprofiler in threads spawn()-ed. """ profiler_info = collect_profiler_info() @functools.wraps(func) def wrapper(*args, **kwargs): if profiler_info: profiler.init(**profiler_info) return func(*args, **kwargs) return eventlet.spawn(wrapper, *args, **kwargs) def spawn_n(func, *args, **kwargs): """See spawn() above""" profiler_info = collect_profiler_info() @functools.wraps(func) def wrapper(*args, **kwargs): if profiler_info: profiler.init(**profiler_info) return func(*args, **kwargs) return eventlet.spawn_n(wrapper, *args, **kwargs) def timecost(f): call_id = uuidutils.generate_uuid() message_base = ("Time-cost: call %(call_id)s function %(fname)s ") % { "call_id": call_id, "fname": f.__name__} end_message = (message_base + "took %(seconds).3fs seconds to run") @timeutils.time_it(LOG, message=end_message, min_duration=None) def wrapper(*args, **kwargs): LOG.debug(message_base + "start") ret = f(*args, **kwargs) return ret return wrapper ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.223044 neutron-16.0.0.0b2.dev214/neutron/conf/0000755000175000017500000000000000000000000017562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/__init__.py0000644000175000017500000000000000000000000021661 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.223044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/0000755000175000017500000000000000000000000020660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/__init__.py0000644000175000017500000000000000000000000022757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/agent_extensions_manager.py0000644000175000017500000000155500000000000026307 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ AGENT_EXT_MANAGER_OPTS = [ cfg.ListOpt('extensions', default=[], help=_('Extensions list to use')), ] def register_agent_ext_manager_opts(cfg=cfg.CONF): cfg.register_opts(AGENT_EXT_MANAGER_OPTS, 'agent') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/cmd.py0000644000175000017500000000330000000000000021771 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ ip_opts = [ cfg.BoolOpt('allsets', default=False, help=_('Destroy all IPsets.')), cfg.BoolOpt('force', default=False, help=_('Destroy IPsets even if there is an iptables ' 'reference.')), cfg.StrOpt('prefix', default='N', # ipset_manager.NET_PREFIX help=_('String prefix used to match IPset names.')), ] netns_opts = [ cfg.BoolOpt('force', default=False, help=_('Delete the namespace by removing all devices.')), cfg.StrOpt('agent-type', choices=['dhcp', 'l3'], help=_('Cleanup resources of a specific agent type only.')), ] ovs_opts = [ cfg.BoolOpt('ovs_all_ports', default=False, help=_('True to delete all ports on all the OpenvSwitch ' 'bridges. False to delete ports created by ' 'Neutron and Nova on the integration bridge.')) ] def register_cmd_opts(opts, cfg=cfg.CONF): cfg.register_cli_opts(opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/common.py0000644000175000017500000002234400000000000022527 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shlex from oslo_config import cfg from oslo_privsep import priv_context from neutron._i18n import _ from neutron.common import config EXTERNAL_PROCESS_OPTS = [ cfg.StrOpt('external_pids', default='$state_path/external/pids', help=_('Location to store child pid files')), ] PD_OPTS = [ cfg.StrOpt('pd_dhcp_driver', default='dibbler', help=_('Service to handle DHCPv6 Prefix delegation.')), ] PD_DRIVER_OPTS = [ cfg.StrOpt('pd_confs', default='$state_path/pd', help=_('Location to store IPv6 PD files.')), cfg.StrOpt('vendor_pen', default='8888', help=_("A decimal value as Vendor's Registered Private " "Enterprise Number as required by RFC3315 DUID-EN.")), ] INTERFACE_OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', deprecated_for_removal=True, deprecated_reason='This variable is a duplicate of ' 'OVS.integration_bridge. To be removed in W.', help=_('Name of Open vSwitch bridge to use')), cfg.BoolOpt('ovs_use_veth', default=False, help=_("Uses veth for an OVS interface or not. " "Support kernels with limited namespace support " "(e.g. RHEL 6.5) and rate limiting on router's gateway " "port so long as ovs_use_veth is set to " "True.")), ] RA_OPTS = [ cfg.StrOpt('ra_confs', default='$state_path/ra', help=_('Location to store IPv6 RA config files')), cfg.IntOpt('min_rtr_adv_interval', default=30, help=_('MinRtrAdvInterval setting for radvd.conf')), cfg.IntOpt('max_rtr_adv_interval', default=100, help=_('MaxRtrAdvInterval setting for radvd.conf')), ] ROOT_HELPER_OPTS = [ cfg.StrOpt('root_helper', default='sudo', help=_("Root helper application. " "Use 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' " "to use the real root filter facility. Change to 'sudo' " "to skip the filtering and just run the command " "directly.")), cfg.BoolOpt('use_helper_for_ns_read', default=True, help=_("Use the root helper when listing the namespaces on a " "system. This may not be required depending on the " "security configuration. If the root helper is " "not required, set this to False for a performance " "improvement.")), # We can't just use root_helper=sudo neutron-rootwrap-daemon $cfg because # it isn't appropriate for long-lived processes spawned with create_process # Having a bool use_rootwrap_daemon option precludes specifying the # rootwrap daemon command, which may be necessary for Xen? cfg.StrOpt('root_helper_daemon', help=_(""" Root helper daemon application to use when possible. Use 'sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' to run rootwrap in "daemon mode" which has been reported to improve performance at scale. For more information on running rootwrap in "daemon mode", see: https://docs.openstack.org/oslo.rootwrap/latest/user/usage.html#daemon-mode For the agent which needs to execute commands in Dom0 in the hypervisor of XenServer, this option should be set to 'xenapi_root_helper', so that it will keep a XenAPI session to pass commands to Dom0. """)), ] AGENT_STATE_OPTS = [ cfg.FloatOpt('report_interval', default=30, help=_('Seconds between nodes reporting state to server; ' 'should be less than agent_down_time, best if it ' 'is half or less than agent_down_time.')), cfg.BoolOpt('log_agent_heartbeats', default=False, help=_('Log agent heartbeats')), ] INTERFACE_DRIVER_OPTS = [ cfg.StrOpt('interface_driver', help=_("The driver used to manage the virtual interface.")), ] IPTABLES_OPTS = [ cfg.BoolOpt('comment_iptables_rules', default=True, help=_("Add comments to iptables rules. " "Set to false to disallow the addition of comments to " "generated iptables rules that describe each rule's " "purpose. System must support the iptables comments " "module for addition of comments.")), cfg.BoolOpt('debug_iptables_rules', default=False, help=_("Duplicate every iptables difference calculation to " "ensure the format being generated matches the format " "of iptables-save. This option should not be turned " "on for production systems because it imposes a " "performance penalty.")), ] PROCESS_MONITOR_OPTS = [ cfg.StrOpt('check_child_processes_action', default='respawn', choices=['respawn', 'exit'], help=_('Action to be executed when a child process dies')), cfg.IntOpt('check_child_processes_interval', default=60, help=_('Interval between checks of child process liveness ' '(seconds), use 0 to disable')), cfg.StrOpt('kill_scripts_path', default='/etc/neutron/kill_scripts/', help=_('Location of scripts used to kill external processes. ' 'Names of scripts here must follow the pattern: ' '"-kill" where is name of ' 'the process which should be killed using this script. ' 'For example, kill script for dnsmasq process should be ' 'named "dnsmasq-kill". ' 'If path is set to None, then default "kill" command ' 'will be used to stop processes.')), ] AVAILABILITY_ZONE_OPTS = [ # The default AZ name "nova" is selected to match the default # AZ name in Nova and Cinder. cfg.StrOpt('availability_zone', max_length=255, default='nova', help=_("Availability zone of this node")), ] def get_log_args(conf, log_file_name, **kwargs): cmd_args = [] if conf.debug: cmd_args.append('--debug') if (conf.log_dir or conf.log_file): cmd_args.append('--log-file=%s' % log_file_name) log_dir = None if conf.log_dir and conf.log_file: log_dir = os.path.dirname( os.path.join(conf.log_dir, conf.log_file)) elif conf.log_dir: log_dir = conf.log_dir elif conf.log_file: log_dir = os.path.dirname(conf.log_file) if log_dir: cmd_args.append('--log-dir=%s' % log_dir) else: if conf.use_syslog: cmd_args.append('--use-syslog') if conf.syslog_log_facility: cmd_args.append( '--syslog-log-facility=%s' % conf.syslog_log_facility) return cmd_args def register_external_process_opts(cfg=cfg.CONF): cfg.register_opts(EXTERNAL_PROCESS_OPTS) def register_pd_opts(cfg=cfg.CONF): cfg.register_opts(PD_OPTS) def register_pddriver_opts(cfg=cfg.CONF): cfg.register_opts(PD_DRIVER_OPTS) def register_interface_opts(cfg=cfg.CONF): cfg.register_opts(INTERFACE_OPTS) def register_ra_opts(cfg=cfg.CONF): cfg.register_opts(RA_OPTS) def register_root_helper(conf=cfg.CONF): conf.register_opts(ROOT_HELPER_OPTS, 'AGENT') def register_agent_state_opts_helper(conf): conf.register_opts(AGENT_STATE_OPTS, 'AGENT') def register_interface_driver_opts_helper(conf): conf.register_opts(INTERFACE_DRIVER_OPTS) def register_iptables_opts(conf): conf.register_opts(IPTABLES_OPTS, 'AGENT') def register_process_monitor_opts(conf): conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT') def register_availability_zone_opts_helper(conf): conf.register_opts(AVAILABILITY_ZONE_OPTS, 'AGENT') def get_root_helper(conf): return conf.AGENT.root_helper def setup_conf(): bind_opts = [ cfg.StrOpt('state_path', default='/var/lib/neutron', help=_("Where to store Neutron state files. " "This directory must be writable by the agent.")), ] conf = cfg.ConfigOpts() conf.register_opts(bind_opts) return conf # add a logging setup method here for convenience setup_logging = config.setup_logging def setup_privsep(): priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF))) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.231044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/database/0000755000175000017500000000000000000000000022424 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/database/__init__.py0000644000175000017500000000000000000000000024523 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/database/agents_db.py0000644000175000017500000000463400000000000024733 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ AGENT_OPTS = [ cfg.IntOpt('agent_down_time', default=75, help=_("Seconds to regard the agent is down; should be at " "least twice report_interval, to be sure the " "agent is down for good.")), cfg.StrOpt('dhcp_load_type', default='networks', choices=['networks', 'subnets', 'ports'], help=_('Representing the resource type whose load is being ' 'reported by the agent. This can be "networks", ' '"subnets" or "ports". ' 'When specified (Default is networks), the server will ' 'extract particular load sent as part of its agent ' 'configuration object from the agent report state, ' 'which is the number of resources being consumed, at ' 'every report_interval.' 'dhcp_load_type can be used in combination with ' 'network_scheduler_driver = ' 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler ' 'When the network_scheduler_driver is WeightScheduler, ' 'dhcp_load_type can be configured to represent the ' 'choice for the resource being balanced. ' 'Example: dhcp_load_type=networks')), cfg.BoolOpt('enable_new_agents', default=True, help=_("Agent starts with admin_state_up=False when " "enable_new_agents=False. In the case, user's " "resources will not be scheduled automatically to the " "agent until admin changes admin_state_up to True.")), ] def register_db_agents_opts(conf=cfg.CONF): conf.register_opts(AGENT_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/database/agentschedulers_db.py0000644000175000017500000000444100000000000026626 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ AGENTS_SCHEDULER_OPTS = [ cfg.StrOpt('network_scheduler_driver', default='neutron.scheduler.' 'dhcp_agent_scheduler.WeightScheduler', help=_('Driver to use for scheduling network to DHCP agent')), cfg.BoolOpt('network_auto_schedule', default=True, help=_('Allow auto scheduling networks to DHCP agent.')), cfg.BoolOpt('allow_automatic_dhcp_failover', default=True, help=_('Automatically remove networks from offline DHCP ' 'agents.')), cfg.IntOpt('dhcp_agents_per_network', default=1, min=1, help=_('Number of DHCP agents scheduled to host a tenant ' 'network. If this number is greater than 1, the ' 'scheduler automatically assigns multiple DHCP agents ' 'for a given tenant network, providing high ' 'availability for DHCP service.')), cfg.BoolOpt('enable_services_on_agents_with_admin_state_down', default=False, help=_('Enable services on an agent with admin_state_up ' 'False. If this option is False, when admin_state_up ' 'of an agent is turned False, services on it will be ' 'disabled. Agents with admin_state_up False are not ' 'selected for automatic scheduling regardless of this ' 'option. But manual scheduling to such agents is ' 'available if this option is True.')), ] def register_db_agentschedulers_opts(conf=cfg.CONF): conf.register_opts(AGENTS_SCHEDULER_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/dhcp.py0000644000175000017500000001517600000000000022162 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DHCP_AGENT_OPTS = [ cfg.IntOpt('resync_interval', default=5, help=_("The DHCP agent will resync its state with Neutron to " "recover from any transient notification or RPC errors. " "The interval is maximum number of seconds between " "attempts. The resync can be done more often based on " "the events triggered.")), cfg.IntOpt('resync_throttle', default=1, help=_("Throttle the number of resync state events between the " "local DHCP state and Neutron to only once per " "'resync_throttle' seconds. The value of throttle " "introduces a minimum interval between resync state " "events. Otherwise the resync may end up in a " "busy-loop. The value must be less than " "resync_interval.")), cfg.StrOpt('dhcp_driver', default='neutron.agent.linux.dhcp.Dnsmasq', help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("The DHCP server can assist with providing metadata " "support on isolated networks. Setting this value to " "True will cause the DHCP server to append specific " "host routes to the DHCP request. The metadata service " "will only be activated when the subnet does not " "contain any router port. The guest instance must be " "configured to request host routes via DHCP (Option " "121). This option doesn't have any effect when " "force_metadata is set to True.")), cfg.BoolOpt('force_metadata', default=False, help=_("In some cases the Neutron router is not present to " "provide the metadata IP but the DHCP server can be " "used to provide this info. Setting this value will " "force the DHCP server to append specific host routes " "to the DHCP request. If this option is set, then the " "metadata service will be activated for all the " "networks.")), cfg.BoolOpt('enable_metadata_network', default=False, help=_("Allows for serving metadata requests coming from a " "dedicated metadata access network whose CIDR is " "169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send " "metadata:1 request. In this case DHCP Option 121 will " "not be injected in VMs, as they will be able to reach " "169.254.169.254 through a router. This option " "requires enable_isolated_metadata = True.")), cfg.IntOpt('num_sync_threads', default=4, help=_('Number of threads to use during sync process. ' 'Should not exceed connection pool size configured on ' 'server.')), cfg.IntOpt('bulk_reload_interval', default=0, min=0, help=_('Time to sleep between reloading the DHCP allocations. ' 'This will only be invoked if the value is not 0. ' 'If a network has N updates in X seconds then ' 'we will reload once with the port changes in the X ' 'seconds and not N times.')), ] DHCP_OPTS = [ cfg.StrOpt('dhcp_confs', default='$state_path/dhcp', help=_('Location to store DHCP server config files.')), ] DNSMASQ_OPTS = [ cfg.StrOpt('dnsmasq_config_file', default='', help=_('Override the default dnsmasq settings ' 'with this file.')), cfg.ListOpt('dnsmasq_dns_servers', default=[], help=_('Comma-separated list of the DNS servers which will be ' 'used as forwarders.')), cfg.StrOpt('dnsmasq_base_log_dir', help=_("Base log dir for dnsmasq logging. " "The log contains DHCP and DNS log information and " "is useful for debugging issues with either DHCP or " "DNS. If this section is null, disable dnsmasq log.")), cfg.BoolOpt('dnsmasq_local_resolv', default=False, help=_("Enables the dnsmasq service to provide name " "resolution for instances via DNS resolvers on the " "host running the DHCP agent. Effectively removes the " "'--no-resolv' option from the dnsmasq process " "arguments. Adding custom DNS resolvers to the " "'dnsmasq_dns_servers' option disables this feature.")), cfg.IntOpt( 'dnsmasq_lease_max', default=(2 ** 24), help=_('Limit number of leases to prevent a denial-of-service.')), cfg.BoolOpt('dhcp_broadcast_reply', default=False, help=_("Use broadcast in DHCP replies.")), cfg.IntOpt('dhcp_renewal_time', default=0, help=_("DHCP renewal time T1 (in seconds). If set to 0, it " "will default to half of the lease time.")), cfg.IntOpt('dhcp_rebinding_time', default=0, help=_("DHCP rebinding time T2 (in seconds). If set to 0, it " "will default to 7/8 of the lease time.")), cfg.BoolOpt('dnsmasq_enable_addr6_list', default=False, help=_("Enable dhcp-host entry with list of addresses when " "port has multiple IPv6 addresses in the same subnet.")) ] def register_agent_dhcp_opts(cfg=cfg.CONF): cfg.register_opts(DHCP_AGENT_OPTS) cfg.register_opts(DHCP_OPTS) cfg.register_opts(DNSMASQ_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l2_ext_fdb_population.py0000644000175000017500000000243500000000000025520 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ # if shared_physical_device_mappings is not configured KeyError will be thrown fdb_population_opt = [ cfg.ListOpt('shared_physical_device_mappings', default=[], help=_("Comma-separated list of " ": tuples mapping " "physical network names to the agent's node-specific " "shared physical network device between " "SR-IOV and OVS or SR-IOV and linux bridge")) ] def register_fdb_population_opts(cfg=cfg.CONF): cfg.register_opts(fdb_population_opt, 'FDB') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.231044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l3/0000755000175000017500000000000000000000000021176 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l3/__init__.py0000644000175000017500000000000000000000000023275 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l3/config.py0000644000175000017500000001440200000000000023016 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_config import cfg from neutron._i18n import _ OPTS = [ cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY, choices=(constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_LEGACY, constants.L3_AGENT_MODE_DVR_NO_EXTERNAL), help=_("The working mode for the agent. Allowed modes are: " "'legacy' - this preserves the existing behavior " "where the L3 agent is deployed on a centralized " "networking node to provide L3 services like DNAT, " "and SNAT. Use this mode if you do not want to " "adopt DVR. 'dvr' - this mode enables DVR " "functionality and must be used for an L3 agent " "that runs on a compute host. 'dvr_snat' - this " "enables centralized SNAT support in conjunction " "with DVR. This mode must be used for an L3 agent " "running on a centralized node (or in single-host " "deployments, e.g. devstack). " "'dvr_no_external' - this mode enables only East/West " "DVR routing functionality for a L3 agent that runs on " "a compute host, the North/South functionality such " "as DNAT and SNAT will be provided by the centralized " "network node that is running in 'dvr_snat' mode. " "This mode should be used when there is no " "external network connectivity on the compute host.")), cfg.PortOpt('metadata_port', default=9697, help=_("TCP Port used by Neutron metadata namespace proxy.")), cfg.BoolOpt('handle_internal_only_routers', default=True, help=_("Indicates that this L3 agent should also handle " "routers that do not have an external network gateway " "configured. This option should be True only for a " "single agent in a Neutron deployment, and may be " "False for all agents if all routers must have an " "external network gateway.")), cfg.StrOpt('ipv6_gateway', default='', help=_("With IPv6, the network used for the external gateway " "does not need to have an associated subnet, since the " "automatically assigned link-local address (LLA) can " "be used. However, an IPv6 gateway address is needed " "for use as the next-hop for the default route. " "If no IPv6 gateway address is configured here, " "(and only then) the neutron router will be configured " "to get its default route from router advertisements " "(RAs) from the upstream router; in which case the " "upstream router must also be configured to send " "these RAs. " "The ipv6_gateway, when configured, should be the LLA " "of the interface on the upstream router. If a " "next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated " "to the network and not through this parameter. ")), cfg.StrOpt('prefix_delegation_driver', default='dibbler', help=_('Driver used for ipv6 prefix delegation. This needs to ' 'be an entry point defined in the ' 'neutron.agent.linux.pd_drivers namespace. See ' 'setup.cfg for entry points included with the neutron ' 'source.')), cfg.BoolOpt('enable_metadata_proxy', default=True, help=_("Allow running metadata proxy.")), cfg.StrOpt('metadata_access_mark', default='0x1', help=_('Iptables mangle mark used to mark metadata valid ' 'requests. This mark will be masked with 0xffff so ' 'that only the lower 16 bits will be used.')), cfg.StrOpt('external_ingress_mark', default='0x2', help=_('Iptables mangle mark used to mark ingress from ' 'external network. This mark will be masked with ' '0xffff so that only the lower 16 bits will be used.')), cfg.StrOpt('radvd_user', default='', help=_('The username passed to radvd, used to drop root ' 'privileges and change user ID to username and group ID ' 'to the primary group of username. If no user specified ' '(by default), the user executing the L3 agent will be ' 'passed. If "root" specified, because radvd is spawned ' 'as root, no "username" parameter will be passed.')), cfg.BoolOpt('cleanup_on_shutdown', default=False, help=_('Delete all routers on L3 agent shutdown. For L3 HA ' 'routers it includes a shutdown of keepalived and ' 'the state change monitor. NOTE: Setting to True ' 'could affect the data plane when stopping or ' 'restarting the L3 agent.')), ] def register_l3_agent_config_opts(opts, cfg=cfg.CONF): cfg.register_opts(opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l3/ha.py0000644000175000017500000000510600000000000022142 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.utils import host from oslo_config import cfg from neutron._i18n import _ from neutron.agent.linux import keepalived OPTS = [ cfg.StrOpt('ha_confs_path', default='$state_path/ha_confs', help=_('Location to store keepalived config files')), cfg.StrOpt('ha_vrrp_auth_type', default='PASS', choices=keepalived.VALID_AUTH_TYPES, help=_('VRRP authentication type')), cfg.StrOpt('ha_vrrp_auth_password', help=_('VRRP authentication password'), secret=True), cfg.IntOpt('ha_vrrp_advert_int', default=2, help=_('The advertisement interval in seconds')), cfg.IntOpt('ha_keepalived_state_change_server_threads', default=(1 + host.cpu_count()) // 2, sample_default='(1 + ) / 2', min=1, help=_('Number of concurrent threads for ' 'keepalived server connection requests. ' 'More threads create a higher CPU load ' 'on the agent node.')), cfg.IntOpt('ha_vrrp_health_check_interval', default=0, help=_('The VRRP health check interval in seconds. Values > 0 ' 'enable VRRP health checks. Setting it to 0 disables ' 'VRRP health checks. Recommended value is 5. ' 'This will cause pings to be sent to the gateway ' 'IP address(es) - requires ICMP_ECHO_REQUEST ' 'to be enabled on the gateway. ' 'If gateway fails, all routers will be reported ' 'as master, and master election will be repeated ' 'in round-robin fashion, until one of the router ' 'restore the gateway connection.')), ] def register_l3_agent_ha_opts(cfg=cfg.CONF): cfg.register_opts(OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/l3/keepalived.py0000644000175000017500000000327600000000000023671 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ CLI_OPTS = [ cfg.StrOpt('router_id', help=_('ID of the router')), cfg.StrOpt('namespace', help=_('Namespace of the router')), cfg.StrOpt('conf_dir', help=_('Path to the router directory')), cfg.StrOpt('monitor_interface', help=_('Interface to monitor')), cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor')), cfg.StrOpt('pid_file', help=_('Path to PID file for this process')), cfg.StrOpt('user', help=_('User (uid or name) running this process ' 'after its initialization')), cfg.StrOpt('group', help=_('Group (gid or name) running this process ' 'after its initialization')) ] OPTS = [ cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location of Metadata Proxy UNIX domain ' 'socket')) ] def register_cli_l3_agent_keepalived_opts(conf=cfg.CONF): conf.register_cli_opts(CLI_OPTS) def register_l3_agent_keepalived_opts(conf=cfg.CONF): conf.register_opts(OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/linux.py0000644000175000017500000000165200000000000022375 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from oslo_config import cfg IP_LIB_OPTS_LINUX = [ cfg.BoolOpt('ip_lib_force_root', default=False, help=_('Force ip_lib calls to use the root helper')), ] def register_iplib_opts(cfg=cfg.CONF): cfg.register_opts(IP_LIB_OPTS_LINUX) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.231044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/metadata/0000755000175000017500000000000000000000000022440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/metadata/__init__.py0000644000175000017500000000000000000000000024537 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/metadata/config.py0000644000175000017500000001104200000000000024255 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.utils import host from oslo_config import cfg from neutron._i18n import _ DEDUCE_MODE = 'deduce' USER_MODE = 'user' GROUP_MODE = 'group' ALL_MODE = 'all' SOCKET_MODES = (DEDUCE_MODE, USER_MODE, GROUP_MODE, ALL_MODE) SHARED_OPTS = [ cfg.StrOpt('metadata_proxy_socket', default='$state_path/metadata_proxy', help=_('Location for Metadata Proxy UNIX domain socket.')), cfg.StrOpt('metadata_proxy_user', default='', help=_("User (uid or name) running metadata proxy after " "its initialization (if empty: agent effective " "user).")), cfg.StrOpt('metadata_proxy_group', default='', help=_("Group (gid or name) running metadata proxy after " "its initialization (if empty: agent effective " "group).")) ] METADATA_PROXY_HANDLER_OPTS = [ cfg.StrOpt('auth_ca_cert', help=_("Certificate Authority public key (CA cert) " "file for ssl")), cfg.HostAddressOpt('nova_metadata_host', default='127.0.0.1', help=_("IP address or DNS name of Nova metadata " "server.")), cfg.PortOpt('nova_metadata_port', default=8775, help=_("TCP Port used by Nova metadata server.")), cfg.StrOpt('metadata_proxy_shared_secret', default='', help=_('When proxying metadata requests, Neutron signs the ' 'Instance-ID header with a shared secret to prevent ' 'spoofing. You may select any string for a secret, ' 'but it must match here and in the configuration used ' 'by the Nova Metadata Server. NOTE: Nova uses the same ' 'config key, but in [neutron] section.'), secret=True), cfg.StrOpt('nova_metadata_protocol', default='http', choices=['http', 'https'], help=_("Protocol to access nova metadata, http or https")), cfg.BoolOpt('nova_metadata_insecure', default=False, help=_("Allow to perform insecure SSL (https) requests to " "nova metadata")), cfg.StrOpt('nova_client_cert', default='', help=_("Client certificate for nova metadata api server.")), cfg.StrOpt('nova_client_priv_key', default='', help=_("Private key of client certificate.")) ] UNIX_DOMAIN_METADATA_PROXY_OPTS = [ cfg.StrOpt('metadata_proxy_socket_mode', default=DEDUCE_MODE, choices=SOCKET_MODES, help=_("Metadata Proxy UNIX domain socket mode, 4 values " "allowed: " "'deduce': deduce mode from metadata_proxy_user/group " "values, " "'user': set metadata proxy socket mode to 0o644, to " "use when metadata_proxy_user is agent effective user " "or root, " "'group': set metadata proxy socket mode to 0o664, to " "use when metadata_proxy_group is agent effective " "group or root, " "'all': set metadata proxy socket mode to 0o666, to use " "otherwise.")), cfg.IntOpt('metadata_workers', default=host.cpu_count() // 2, sample_default=' / 2', help=_('Number of separate worker processes for metadata ' 'server (defaults to half of the number of CPUs)')), cfg.IntOpt('metadata_backlog', default=4096, help=_('Number of backlog requests to configure the ' 'metadata server socket with')) ] def register_meta_conf_opts(opts, cfg=cfg.CONF): cfg.register_opts(opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.231044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovn/0000755000175000017500000000000000000000000021462 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovn/__init__.py0000644000175000017500000000000000000000000023561 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.231044 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovn/metadata/0000755000175000017500000000000000000000000023242 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovn/metadata/__init__.py0000644000175000017500000000000000000000000025341 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovn/metadata/config.py0000644000175000017500000000350400000000000025063 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import shlex from neutron.conf.agent.metadata import config as meta_conf from oslo_config import cfg from oslo_privsep import priv_context from neutron._i18n import _ OVS_OPTS = [ cfg.StrOpt('ovsdb_connection', default='unix:/usr/local/var/run/openvswitch/db.sock', help=_('The connection string for the native OVSDB backend.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.IntOpt('ovsdb_connection_timeout', default=180, help=_('Timeout in seconds for the OVSDB ' 'connection transaction')) ] def register_meta_conf_opts(opts, cfg=cfg.CONF, group=None): cfg.register_opts(opts, group=group) def list_metadata_agent_opts(): return [ ('DEFAULT', itertools.chain( meta_conf.SHARED_OPTS, meta_conf.METADATA_PROXY_HANDLER_OPTS, meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS) ), ('ovs', OVS_OPTS) ] def get_root_helper(conf): return conf.AGENT.root_helper def setup_privsep(): priv_context.init(root_helper=shlex.split(get_root_helper(cfg.CONF))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovs_conf.py0000644000175000017500000000435300000000000023053 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ # Default timeout for ovsdb commands DEFAULT_OVSDB_TIMEOUT = 10 OPTS = [ cfg.IntOpt('ovsdb_timeout', default=DEFAULT_OVSDB_TIMEOUT, help=_('Timeout in seconds for ovsdb commands. ' 'If the timeout expires, ovsdb commands will fail with ' 'ALARMCLOCK error.')), cfg.IntOpt('bridge_mac_table_size', default=50000, help=_('The maximum number of MAC addresses to learn on ' 'a bridge managed by the Neutron OVS agent. Values ' 'outside a reasonable range (10 to 1,000,000) might be ' 'overridden by Open vSwitch according to the ' 'documentation.')), cfg.BoolOpt('igmp_snooping_enable', default=False, help=_('Enable IGMP snooping for integration bridge. If this ' 'option is set to True, support for Internet Group ' 'Management Protocol (IGMP) is enabled in integration ' 'bridge. ' 'Setting this option to True will also enable Open ' 'vSwitch mcast-snooping-disable-flood-unregistered ' 'flag. This option will disable flooding of ' 'unregistered multicast packets to all ports. ' 'The switch will send unregistered multicast packets ' 'only to ports connected to multicast routers.')), ] def register_ovs_agent_opts(cfg=cfg.CONF): cfg.register_opts(OPTS, 'OVS') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/ovsdb_api.py0000644000175000017500000000366400000000000023211 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from oslo_config import cfg API_OPTS = [ cfg.StrOpt('ovsdb_connection', default='tcp:127.0.0.1:6640', help=_('The connection string for the OVSDB backend. ' 'Will be used for all ovsdb commands and ' 'by ovsdb-client when monitoring' )), cfg.StrOpt('ssl_key_file', help=_('The SSL private key file to use when interacting with ' 'OVSDB. Required when using an "ssl:" prefixed ' 'ovsdb_connection' )), cfg.StrOpt('ssl_cert_file', help=_('The SSL certificate file to use when interacting ' 'with OVSDB. Required when using an "ssl:" prefixed ' 'ovsdb_connection' )), cfg.StrOpt('ssl_ca_cert_file', help=_('The Certificate Authority (CA) certificate to use ' 'when interacting with OVSDB. Required when using an ' '"ssl:" prefixed ovsdb_connection' )), cfg.BoolOpt('ovsdb_debug', default=False, help=_('Enable OVSDB debug logs')), ] def register_ovsdb_api_opts(cfg=cfg.CONF): cfg.register_opts(API_OPTS, 'OVS') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/securitygroups_rpc.py0000644000175000017500000000333700000000000025213 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from neutron._i18n import _ security_group_opts = [ cfg.StrOpt( 'firewall_driver', help=_('Driver for security groups firewall in the L2 agent')), cfg.BoolOpt( 'enable_security_group', default=True, help=_( 'Controls whether the neutron security group API is enabled ' 'in the server. It should be false when using no security ' 'groups or using the nova security group API.')), cfg.BoolOpt( 'enable_ipset', default=True, help=_('Use ipset to speed-up the iptables based security groups. ' 'Enabling ipset support requires that ipset is installed on L2 ' 'agent node.')), cfg.ListOpt( 'permitted_ethertypes', default=[], help=_('Comma-separated list of ethertypes to be permitted, in ' 'hexadecimal (starting with "0x"). For example, "0x4008" ' 'to permit InfiniBand.')) ] def register_securitygroups_opts(cfg=cfg.CONF): cfg.register_opts(security_group_opts, 'SECURITYGROUP') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/windows.py0000644000175000017500000000122600000000000022725 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IP_LIB_OPTS_WINDOWS = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/agent/xenapi_conf.py0000644000175000017500000000237400000000000023531 0ustar00coreycorey00000000000000# Copyright 2016 Citrix Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ XENAPI_CONF_SECTION = 'xenapi' XENAPI_OPTS = [ cfg.StrOpt('connection_url', help=_("URL for connection to XenServer/Xen Cloud Platform.")), cfg.StrOpt('connection_username', help=_("Username for connection to XenServer/Xen Cloud " "Platform.")), cfg.StrOpt('connection_password', help=_("Password for connection to XenServer/Xen Cloud " "Platform."), secret=True) ] def register_xenapi_opts(cfg=cfg.CONF): cfg.register_opts(XENAPI_OPTS, group=XENAPI_CONF_SECTION) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/common.py0000644000175000017500000002414200000000000021427 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.utils import net from oslo_config import cfg from oslo_service import wsgi from neutron._i18n import _ core_opts = [ cfg.HostAddressOpt('bind_host', default='0.0.0.0', help=_("The host IP to bind to.")), cfg.PortOpt('bind_port', default=9696, help=_("The port to bind to")), cfg.StrOpt('api_extensions_path', default="", help=_("The path for API extensions. " "Note that this can be a colon-separated list of paths. " "For example: api_extensions_path = " "extensions:/path/to/more/exts:/even/more/exts. " "The __path__ of neutron.extensions is appended to " "this, so if your extensions are in there you don't " "need to specify them here.")), cfg.StrOpt('auth_strategy', default='keystone', help=_("The type of authentication to use")), cfg.StrOpt('core_plugin', help=_("The core plugin Neutron will use")), cfg.ListOpt('service_plugins', default=[], help=_("The service plugins Neutron will use")), cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", help=_("The base MAC address Neutron will use for VIFs. " "The first 3 octets will remain unchanged. If the 4th " "octet is not 00, it will also be used. The others " "will be randomly generated.")), cfg.BoolOpt('allow_bulk', default=True, help=_("Allow the usage of the bulk API")), cfg.StrOpt('pagination_max_limit', default="-1", help=_("The maximum number of items returned in a single " "response, value was 'infinite' or negative integer " "means no limit")), cfg.ListOpt('default_availability_zones', default=[], help=_("Default value of availability zone hints. The " "availability zone aware schedulers use this when " "the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a " "comma separated string. This value can be empty. " "In this case, even if availability_zone_hints for " "a resource is empty, availability zone is " "considered for high availability while scheduling " "the resource.")), cfg.IntOpt('max_dns_nameservers', default=5, help=_("Maximum number of DNS nameservers per subnet")), cfg.IntOpt('max_subnet_host_routes', default=20, help=_("Maximum number of host routes per subnet")), cfg.BoolOpt('ipv6_pd_enabled', default=False, help=_("Enables IPv6 Prefix Delegation for automatic subnet " "CIDR allocation. " "Set to True to enable IPv6 Prefix Delegation for " "subnet allocation in a PD-capable environment. Users " "making subnet creation requests for IPv6 subnets " "without providing a CIDR or subnetpool ID will be " "given a CIDR via the Prefix Delegation mechanism. " "Note that enabling PD will override the behavior of " "the default IPv6 subnetpool.")), cfg.IntOpt('dhcp_lease_duration', default=86400, help=_("DHCP lease duration (in seconds). Use -1 to tell " "dnsmasq to use infinite lease times.")), cfg.StrOpt('dns_domain', default='openstacklocal', help=_('Domain to use for building the hostnames')), cfg.StrOpt('external_dns_driver', help=_('Driver for external DNS integration.')), cfg.BoolOpt('dhcp_agent_notification', default=True, help=_("Allow sending resource operation" " notification to DHCP agent")), cfg.BoolOpt('allow_overlapping_ips', default=False, help=_("Allow overlapping IP support in Neutron. " "Attention: the following parameter MUST be set to " "False if Neutron is being used in conjunction with " "Nova security groups.")), cfg.HostAddressOpt('host', default=net.get_hostname(), sample_default='example.domain', help=_("Hostname to be used by the Neutron server, " "agents and services running on this machine. " "All the agents and services running on this " "machine must use the same host value.")), cfg.StrOpt("network_link_prefix", help=_("This string is prepended to the normal URL that is " "returned in links to the OpenStack Network API. If it " "is empty (the default), the URLs are returned " "unchanged.")), cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, help=_("Send notification to nova when port status changes")), cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, help=_("Send notification to nova when port data (fixed_ips/" "floatingip) changes so nova can update its cache.")), cfg.IntOpt('send_events_interval', default=2, help=_('Number of seconds between sending events to nova if ' 'there are any events to send.')), cfg.StrOpt('setproctitle', default='on', help=_("Set process name to match child worker role. " "Available options are: 'off' - retains the previous " "behavior; 'on' - renames processes to " "'neutron-server: role (original string)'; " "'brief' - renames the same as 'on', but without the " "original string, such as 'neutron-server: role'.")), cfg.StrOpt('ipam_driver', default='internal', help=_("Neutron IPAM (IP address management) driver to use. " "By default, the reference implementation of the " "Neutron IPAM driver is used.")), cfg.BoolOpt('vlan_transparent', default=False, help=_('If True, then allow plugins that support it to ' 'create VLAN transparent networks.')), cfg.BoolOpt('filter_validation', default=True, help=_('If True, then allow plugins to decide ' 'whether to perform validations on filter parameters. ' 'Filter validation is enabled if this config ' 'is turned on and it is supported by all plugins')), cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU, deprecated_name='segment_mtu', deprecated_group='ml2', help=_('MTU of the underlying physical network. Neutron uses ' 'this value to calculate MTU for all virtual network ' 'components. For flat and VLAN networks, neutron uses ' 'this value without modification. For overlay networks ' 'such as VXLAN, neutron automatically subtracts the ' 'overlay protocol overhead from this value. Defaults ' 'to 1500, the standard value for Ethernet.')) ] core_cli_opts = [ cfg.StrOpt('state_path', default='/var/lib/neutron', help=_("Where to store Neutron state files. " "This directory must be writable by the agent.")), ] def register_core_common_config_opts(cfg=cfg.CONF): cfg.register_opts(core_opts) cfg.register_cli_opts(core_cli_opts) wsgi.register_opts(cfg) NOVA_CONF_SECTION = 'nova' nova_opts = [ cfg.StrOpt('region_name', help=_('Name of nova region to use. Useful if keystone manages' ' more than one region.')), cfg.StrOpt('endpoint_type', default='public', choices=['public', 'admin', 'internal'], help=_('Type of the nova endpoint to use. This endpoint will' ' be looked up in the keystone catalog and should be' ' one of public, internal or admin.')), ] def register_nova_opts(cfg=cfg.CONF): cfg.register_opts(nova_opts, group=NOVA_CONF_SECTION) PLACEMENT_CONF_SECTION = 'placement' placement_opts = [ cfg.StrOpt('region_name', help=_('Name of placement region to use. Useful if keystone ' 'manages more than one region.')), cfg.StrOpt('endpoint_type', default='public', choices=['public', 'admin', 'internal'], help=_('Type of the placement endpoint to use. This endpoint ' 'will be looked up in the keystone catalog and should ' 'be one of public, internal or admin.')), ] def register_placement_opts(cfg=cfg.CONF): cfg.register_opts(placement_opts, group=PLACEMENT_CONF_SECTION) IRONIC_CONF_SECTION = 'ironic' ironic_opts = [ cfg.BoolOpt('enable_notifications', default=False, help=_("Send notification events to ironic. (For example on " "relevant port status changes.)")), ] def register_ironic_opts(cfg=cfg.CONF): cfg.register_opts(ironic_opts, group=IRONIC_CONF_SECTION) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/db/0000755000175000017500000000000000000000000020147 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/__init__.py0000644000175000017500000000000000000000000022246 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/dvr_mac_db.py0000644000175000017500000000265200000000000022606 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DVR_MAC_ADDRESS_OPTS = [ cfg.StrOpt('dvr_base_mac', default="fa:16:3f:00:00:00", help=_("The base mac address used for unique " "DVR instances by Neutron. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will " "also be used. The others will be randomly generated. " "The 'dvr_base_mac' *must* be different from " "'base_mac' to avoid mixing them up with MAC's " "allocated for tenant ports. A 4 octet example would be " "dvr_base_mac = fa:16:3f:4f:00:00. The default is 3 " "octet")), ] def register_db_dvr_mac_opts(conf=cfg.CONF): conf.register_opts(DVR_MAC_ADDRESS_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/extraroute_db.py0000644000175000017500000000163600000000000023376 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ EXTRA_ROUTE_OPTS = [ # TODO(nati): use quota framework when it support quota for attributes cfg.IntOpt('max_routes', default=30, help=_("Maximum number of routes per router")), ] def register_db_extraroute_opts(conf=cfg.CONF): conf.register_opts(EXTRA_ROUTE_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/l3_agentschedulers_db.py0000644000175000017500000000251700000000000024751 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ L3_AGENTS_SCHEDULER_OPTS = [ cfg.StrOpt('router_scheduler_driver', default='neutron.scheduler.l3_agent_scheduler.' 'LeastRoutersScheduler', help=_('Driver to use for scheduling ' 'router to a default L3 agent')), cfg.BoolOpt('router_auto_schedule', default=True, help=_('Allow auto scheduling of routers to L3 agent.')), cfg.BoolOpt('allow_automatic_l3agent_failover', default=False, help=_('Automatically reschedule routers from offline L3 ' 'agents to online L3 agents.')), ] def register_db_l3agentschedulers_opts(conf=cfg.CONF): conf.register_opts(L3_AGENTS_SCHEDULER_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/l3_dvr_db.py0000644000175000017500000000300000000000000022350 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ ROUTER_DISTRIBUTED_OPTS = [ cfg.BoolOpt('router_distributed', default=False, help=_("System-wide flag to determine the type of router " "that tenants can create. Only admin can override.")), cfg.BoolOpt('enable_dvr', default=True, help=_("Determine if setup is configured for DVR. If False, " "DVR API extension will be disabled.")), cfg.BoolOpt('host_dvr_for_dhcp', default=True, help=_("Flag to determine if hosting a DVR local router to " "the DHCP agent is desired. If False, any L3 function " "supported by the DHCP agent instance will not be " "possible, for instance: DNS.")), ] def register_db_l3_dvr_opts(conf=cfg.CONF): conf.register_opts(ROUTER_DISTRIBUTED_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/l3_gwmode_db.py0000644000175000017500000000164300000000000023052 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ L3GWMODE_OPTS = [ cfg.BoolOpt('enable_snat_by_default', default=True, help=_('Define the default value of enable_snat if not ' 'provided in external_gateway_info.')) ] def register_db_l3_gwmode_opts(conf=cfg.CONF): conf.register_opts(L3GWMODE_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/l3_hamode_db.py0000644000175000017500000000357500000000000023033 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_config import cfg from neutron._i18n import _ L3_HA_OPTS = [ cfg.BoolOpt('l3_ha', default=False, help=_('Enable HA mode for virtual routers.')), cfg.IntOpt('max_l3_agents_per_router', default=3, help=_("Maximum number of L3 agents which a HA router will be " "scheduled on. If it is set to 0 then the router will " "be scheduled on every agent.")), cfg.StrOpt('l3_ha_net_cidr', default=n_const.L3_HA_NET_CIDR, help=_('Subnet used for the l3 HA admin network.')), cfg.StrOpt('l3_ha_network_type', default='', help=_("The network type to use when creating the HA network " "for an HA router. By default or if empty, the first " "'tenant_network_types' is used. This is helpful when " "the VRRP traffic should use a specific network which " "is not the default one.")), cfg.StrOpt('l3_ha_network_physical_name', default='', help=_("The physical network name with which the HA network " "can be created.")) ] def register_db_l3_hamode_opts(conf=cfg.CONF): conf.register_opts(L3_HA_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/db/migration_cli.py0000644000175000017500000000314700000000000023346 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import pkg_resources from neutron._i18n import _ MIGRATION_ENTRYPOINTS = 'neutron.db.alembic_migrations' migration_entrypoints = { entrypoint.name: entrypoint for entrypoint in pkg_resources.iter_entry_points(MIGRATION_ENTRYPOINTS) } INSTALLED_SUBPROJECTS = [project_ for project_ in migration_entrypoints] CORE_OPTS = [ cfg.StrOpt('subproject', choices=INSTALLED_SUBPROJECTS, help=(_("The subproject to execute the command against. " "Can be one of: '%s'.") % "', '".join(INSTALLED_SUBPROJECTS))) ] DB_OPTS = [ cfg.StrOpt('connection', default='', secret=True, help=_('URL to database')), cfg.StrOpt('engine', default='', help=_('Database engine for which script will be generated ' 'when using offline migration.')), ] def register_db_cli_opts(conf): conf.register_cli_opts(CORE_OPTS) conf.register_cli_opts(DB_OPTS, 'database') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/extensions/0000755000175000017500000000000000000000000021761 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/extensions/__init__.py0000644000175000017500000000000000000000000024060 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/extensions/allowedaddresspairs.py0000644000175000017500000000177500000000000026401 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ allowed_address_pair_opts = [ # TODO(limao): use quota framework when it support quota for attributes cfg.IntOpt('max_allowed_address_pair', default=10, help=_("Maximum number of allowed address pairs")), ] def register_allowed_address_pair_opts(cfg=cfg.CONF): cfg.register_opts(allowed_address_pair_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/extensions/conntrack_helper.py0000644000175000017500000000372000000000000025656 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_config import cfg from neutron._i18n import _ conntrack_helper_opts = [ cfg.ListOpt('allowed_conntrack_helpers', default=[ {'amanda': n_const.PROTO_NAME_TCP}, {'ftp': n_const.PROTO_NAME_TCP}, {'h323': n_const.PROTO_NAME_UDP}, {'h323': n_const.PROTO_NAME_TCP}, {'irc': n_const.PROTO_NAME_TCP}, {'netbios-ns': n_const.PROTO_NAME_UDP}, {'pptp': n_const.PROTO_NAME_TCP}, {'sane': n_const.PROTO_NAME_TCP}, {'sip': n_const.PROTO_NAME_UDP}, {'sip': n_const.PROTO_NAME_TCP}, {'snmp': n_const.PROTO_NAME_UDP}, {'tftp': n_const.PROTO_NAME_UDP} ], item_type=cfg.types.Dict(), sample_default=[ {'tftp': 'udp'}, {'ftp': 'tcp'}, {'sip': 'tcp'}, {'sip': 'udp'} ], help=_('Defines the allowed conntrack helpers, and ' 'conntack helper module protocol constraints.') ) ] def register_conntrack_helper_opts(cfg=cfg.CONF): cfg.register_opts(conntrack_helper_opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/0000755000175000017500000000000000000000000021243 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/__init__.py0000644000175000017500000000000000000000000023342 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/0000755000175000017500000000000000000000000021735 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/__init__.py0000644000175000017500000000000000000000000024034 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/config.py0000644000175000017500000000722100000000000023556 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ ml2_opts = [ cfg.ListOpt('type_drivers', default=['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve'], help=_("List of network type driver entrypoints to be loaded " "from the neutron.ml2.type_drivers namespace.")), cfg.ListOpt('tenant_network_types', default=['local'], help=_("Ordered list of network_types to allocate as tenant " "networks. The default value 'local' is useful for " "single-box testing but provides no connectivity " "between hosts.")), cfg.ListOpt('mechanism_drivers', default=[], help=_("An ordered list of networking mechanism driver " "entrypoints to be loaded from the " "neutron.ml2.mechanism_drivers namespace.")), cfg.ListOpt('extension_drivers', default=[], help=_("An ordered list of extension driver " "entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. " "For example: extension_drivers = port_security,qos")), cfg.IntOpt('path_mtu', default=0, help=_('Maximum size of an IP packet (MTU) that can traverse ' 'the underlying physical network infrastructure without ' 'fragmentation when using an overlay/tunnel protocol. ' 'This option allows specifying a physical network MTU ' 'value that differs from the default global_physnet_mtu ' 'value.')), cfg.ListOpt('physical_network_mtus', default=[], help=_("A list of mappings of physical networks to MTU " "values. The format of the mapping is " ":. This mapping allows " "specifying a physical network MTU value that " "differs from the default global_physnet_mtu value.")), cfg.StrOpt('external_network_type', help=_("Default network type for external networks when no " "provider attributes are specified. By default it is " "None, which means that if provider attributes are not " "specified while creating external networks then they " "will have the same type as tenant networks. Allowed " "values for external_network_type config option depend " "on the network type values configured in type_drivers " "config option.")), cfg.IntOpt('overlay_ip_version', default=4, help=_("IP version of all overlay (tunnel) network endpoints. " "Use a value of 4 for IPv4 or 6 for IPv6.")) ] def register_ml2_plugin_opts(cfg=cfg.CONF): cfg.register_opts(ml2_opts, "ml2") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/0000755000175000017500000000000000000000000023413 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/__init__.py0000644000175000017500000000000000000000000025512 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/agent.py0000644000175000017500000000324000000000000025062 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " "timeout won't be changed")), cfg.IntOpt('dscp', min=0, max=63, help=_("The DSCP value to use for outer headers during tunnel " "encapsulation.")), cfg.BoolOpt('dscp_inherit', default=False, help=_("If set to True, the DSCP value of tunnel " "interfaces is overwritten and set to inherit. " "The DSCP value of the inner header is then " "copied to the outer header.")), ] def register_agent_opts(cfg=cfg.CONF): cfg.register_opts(agent_opts, "AGENT") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/driver_type.py0000644000175000017500000000723500000000000026330 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from oslo_config import cfg from neutron._i18n import _ gre_opts = [ cfg.ListOpt('tunnel_id_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of GRE tunnel IDs that are " "available for tenant network allocation")) ] flat_opts = [ cfg.ListOpt('flat_networks', default='*', help=_("List of physical_network names with which flat " "networks can be created. Use default '*' to allow " "flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks.")) ] geneve_opts = [ cfg.ListOpt('vni_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of Geneve VNI IDs that are " "available for tenant network allocation")), cfg.IntOpt('max_header_size', default=p_const.GENEVE_ENCAP_MIN_OVERHEAD, help=_("Geneve encapsulation header size is dynamic, this " "value is used to calculate the maximum MTU " "for the driver. " "The default size for this field is 30, which is the " "size of the Geneve header without any additional " "option headers.")), ] vxlan_opts = [ cfg.ListOpt('vni_ranges', default=[], help=_("Comma-separated list of : tuples " "enumerating ranges of VXLAN VNI IDs that are " "available for tenant network allocation")), cfg.StrOpt('vxlan_group', help=_("Multicast group for VXLAN. When configured, will " "enable sending all broadcast traffic to this multicast " "group. When left unconfigured, will disable multicast " "VXLAN mode.")), ] vlan_opts = [ cfg.ListOpt('network_vlan_ranges', default=[], help=_("List of :: or " " specifying physical_network names " "usable for VLAN provider and tenant networks, as " "well as ranges of VLAN tags on each available for " "allocation to tenant networks.")) ] def register_ml2_drivers_gre_opts(cfg=cfg.CONF): cfg.register_opts(gre_opts, "ml2_type_gre") def register_ml2_drivers_flat_opts(cfg=cfg.CONF): cfg.register_opts(flat_opts, "ml2_type_flat") def register_ml2_drivers_geneve_opts(cfg=cfg.CONF): cfg.register_opts(geneve_opts, "ml2_type_geneve") def register_ml2_drivers_vxlan_opts(cfg=cfg.CONF): cfg.register_opts(vxlan_opts, "ml2_type_vxlan") def register_ml2_drivers_vlan_opts(cfg=cfg.CONF): cfg.register_opts(vlan_opts, "ml2_type_vlan") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/linuxbridge.py0000644000175000017500000001366400000000000026313 0ustar00coreycorey00000000000000# Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DEFAULT_BRIDGE_MAPPINGS = [] DEFAULT_INTERFACE_MAPPINGS = [] DEFAULT_VXLAN_GROUP = '224.0.0.1' DEFAULT_KERNEL_HZ_VALUE = 250 # [Hz] DEFAULT_TC_TBF_LATENCY = 50 # [ms] vxlan_opts = [ cfg.BoolOpt('enable_vxlan', default=True, help=_("Enable VXLAN on the agent. Can be enabled when " "agent is managed by ml2 plugin using linuxbridge " "mechanism driver")), cfg.IntOpt('ttl', help=_("TTL for vxlan interface protocol packets.")), cfg.IntOpt('tos', deprecated_for_removal=True, help=_("TOS for vxlan interface protocol packets. This option " "is deprecated in favor of the dscp option in the AGENT " "section and will be removed in a future release. " "To convert the TOS value to DSCP, divide by 4.")), cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, help=_("Multicast group(s) for vxlan interface. A range of " "group addresses may be specified by using CIDR " "notation. Specifying a range allows different VNIs to " "use different group addresses, reducing or eliminating " "spurious broadcast traffic to the tunnel endpoints. " "To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This " "setting must be the same on all the agents.")), cfg.IPOpt('local_ip', help=_("IP address of local overlay (tunnel) network endpoint. " "Use either an IPv4 or IPv6 address that resides on one " "of the host network interfaces. The IP version of this " "value must match the value of the 'overlay_ip_version' " "option in the ML2 plug-in configuration file on the " "neutron server node(s).")), cfg.PortOpt('udp_srcport_min', default=0, help=_("The minimum of the UDP source port range used for " "VXLAN communication.")), cfg.PortOpt('udp_srcport_max', default=0, help=_("The maximum of the UDP source port range used for " "VXLAN communication.")), cfg.PortOpt('udp_dstport', help=_("The UDP port used for VXLAN communication. By " "default, the Linux kernel doesn't use the IANA " "assigned standard value, so if you want to use it, " "this option must be set to 4789. It is not set by " "default because of backward compatibiltiy.")), cfg.BoolOpt('l2_population', default=False, help=_("Extension to use alongside ml2 plugin's l2population " "mechanism driver. It enables the plugin to populate " "VXLAN forwarding table.")), cfg.BoolOpt('arp_responder', default=False, help=_("Enable local ARP responder which provides local " "responses instead of performing ARP broadcast into " "the overlay. Enabling local ARP responder is not " "fully compatible with the allowed-address-pairs " "extension.") ), cfg.ListOpt('multicast_ranges', default=[], help=_("Optional comma-separated list of " ":: triples " "describing how to assign a multicast address to " "VXLAN according to its VNI ID.")), ] bridge_opts = [ cfg.ListOpt('physical_interface_mappings', default=DEFAULT_INTERFACE_MAPPINGS, help=_("Comma-separated list of " ": tuples " "mapping physical network names to the agent's " "node-specific physical network interfaces to be used " "for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should " "have mappings to appropriate interfaces on each " "agent.")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("List of :")), ] qos_options = [ cfg.IntOpt('kernel_hz', default=DEFAULT_KERNEL_HZ_VALUE, help=_("Value of host kernel tick rate (hz) for calculating " "minimum burst value in bandwidth limit rules for " "a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information.")), cfg.IntOpt('tbf_latency', default=DEFAULT_TC_TBF_LATENCY, help=_("Value of latency (ms) for calculating size of queue " "for a port with QoS. See tc-tbf manual for more " "information.")) ] def register_linuxbridge_opts(cfg=cfg.CONF): cfg.register_opts(vxlan_opts, "VXLAN") cfg.register_opts(bridge_opts, "LINUX_BRIDGE") cfg.register_opts(qos_options, "QOS") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/macvtap.py0000644000175000017500000000267200000000000025427 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DEFAULT_INTERFACE_MAPPINGS = [] macvtap_opts = [ cfg.ListOpt('physical_interface_mappings', default=DEFAULT_INTERFACE_MAPPINGS, help=_("Comma-separated list of " ": tuples " "mapping physical network names to the agent's " "node-specific physical network interfaces to be used " "for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should " "have mappings to appropriate interfaces on each " "agent.")), ] def register_macvtap_opts(cfg=cfg.CONF): cfg.register_opts(macvtap_opts, "macvtap") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/mech_sriov/0000755000175000017500000000000000000000000025551 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/mech_sriov/__init__.py0000644000175000017500000000000000000000000027650 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/mech_sriov/agent_common.py0000644000175000017500000001065100000000000030574 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron._i18n import _ DEFAULT_DEVICE_MAPPINGS = [] DEFAULT_EXCLUDE_DEVICES = [] sriov_nic_opts = [ cfg.ListOpt('physical_device_mappings', default=DEFAULT_DEVICE_MAPPINGS, help=_("Comma-separated list of " ": tuples mapping " "physical network names to the agent's node-specific " "physical network device interfaces of SR-IOV physical " "function to be used for VLAN networks. All physical " "networks listed in network_vlan_ranges on the server " "should have mappings to appropriate interfaces on " "each agent.")), cfg.ListOpt('exclude_devices', default=DEFAULT_EXCLUDE_DEVICES, help=_("Comma-separated list of " ": tuples, mapping " "network_device to the agent's node-specific list of " "virtual functions that should not be used for virtual " "networking. vfs_to_exclude is a semicolon-separated " "list of virtual functions to exclude from " "network_device. The network_device in the mapping " "should appear in the physical_device_mappings " "list.")), cfg.ListOpt('resource_provider_bandwidths', default=[], help=_("Comma-separated list of " ":: tuples, " "showing the available bandwidth for the given device " "in the given direction. The direction is meant from " "VM perspective. Bandwidth is measured in kilobits per " "second (kbps). The device must appear in " "physical_device_mappings as the value. But not all " "devices in physical_device_mappings must be listed " "here. For a device not listed here we neither create " "a resource provider in placement nor report " "inventories against. An omitted direction means we do " "not report an inventory for the corresponding " "class.")), cfg.DictOpt('resource_provider_hypervisors', default={}, help=_("Mapping of network devices to hypervisors: " ":,... " "hypervisor name is used to locate the parent of the " "resource provider tree. Only needs to be set in the " "rare case when the hypervisor name is different from " "the DEFAULT.host config option value as known by the " "nova-compute managing that hypervisor.")), cfg.DictOpt('resource_provider_inventory_defaults', default={'allocation_ratio': 1.0, 'min_unit': 1, 'step_size': 1, 'reserved': 0}, help=_("Key:value pairs to specify defaults used " "while reporting resource provider inventories. " "Possible keys with their types: " "allocation_ratio:float, " "max_unit:int, min_unit:int, " "reserved:int, step_size:int, " "See also: " "https://docs.openstack.org/api-ref/placement/" "#update-resource-provider-inventories")), ] def register_agent_sriov_nic_opts(cfg=cfg.CONF): cfg.register_opts(sriov_nic_opts, 'SRIOV_NIC') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/mech_sriov/mech_sriov_conf.py0000644000175000017500000000262700000000000031275 0ustar00coreycorey00000000000000# Copyright (c) 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ sriov_driver_opts = [ cfg.ListOpt('vnic_type_blacklist', default=[], help=_("Comma-separated list of VNIC types for which support " "is administratively prohibited by the mechanism " "driver. Please note that the supported vnic_types " "depend on your network interface card, on the kernel " "version of your operating system, and on other " "factors. " "In case of sriov mechanism driver the valid " "VNIC types are direct, macvtap and direct-physical.")), ] def register_sriov_mech_driver_opts(cfg=cfg.CONF): cfg.register_opts(sriov_driver_opts, "SRIOV_DRIVER") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/openvswitch/0000755000175000017500000000000000000000000025764 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000030063 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/openvswitch/mech_ovs_conf.py0000644000175000017500000000347000000000000031152 0ustar00coreycorey00000000000000# Copyright (c) 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ ovs_driver_opts = [ cfg.ListOpt('vnic_type_blacklist', default=[], help=_("Comma-separated list of VNIC types for which support " "is administratively prohibited by the mechanism " "driver. Please note that the supported vnic_types " "depend on your network interface card, on the kernel " "version of your operating system, and on other " "factors, like OVS version. In case of ovs mechanism " "driver the valid vnic types are normal and direct. " "Note that direct is supported only from kernel 4.8, " "and from ovs 2.8.0. Bind DIRECT (SR-IOV) port allows " "to offload the OVS flows using tc to the SR-IOV NIC. " "This allows to support hardware offload via tc and " "that allows us to manage the VF by OpenFlow control " "plane using representor net-device.")), ] def register_ovs_mech_driver_opts(cfg=cfg.CONF): cfg.register_opts(ovs_driver_opts, "OVS_DRIVER") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.235044 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/ovn/0000755000175000017500000000000000000000000024215 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/ovn/__init__.py0000644000175000017500000000000000000000000026314 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py0000644000175000017500000003000100000000000026370 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from oslo_config import cfg from oslo_log import log as logging from ovsdbapp.backend.ovs_idl import vlog from neutron._i18n import _ from neutron.conf.agent import ovs_conf LOG = logging.getLogger(__name__) EXTRA_LOG_LEVEL_DEFAULTS = [ ] VLOG_LEVELS = {'CRITICAL': vlog.CRITICAL, 'ERROR': vlog.ERROR, 'WARNING': vlog.WARN, 'INFO': vlog.INFO, 'DEBUG': vlog.DEBUG} ovn_opts = [ cfg.StrOpt('ovn_nb_connection', default='tcp:127.0.0.1:6641', help=_('The connection string for the OVN_Northbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_nb_private_key, ovn_nb_certificate and ' 'ovn_nb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_nb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-NB-DB')), cfg.StrOpt('ovn_nb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_nb_private_key')), cfg.StrOpt('ovn_nb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.StrOpt('ovn_sb_connection', default='tcp:127.0.0.1:6642', help=_('The connection string for the OVN_Southbound OVSDB.\n' 'Use tcp:IP:PORT for TCP connection.\n' 'Use ssl:IP:PORT for SSL connection. The ' 'ovn_sb_private_key, ovn_sb_certificate and ' 'ovn_sb_ca_cert are mandatory.\n' 'Use unix:FILE for unix domain socket connection.')), cfg.StrOpt('ovn_sb_private_key', default='', help=_('The PEM file with private key for SSL connection to ' 'OVN-SB-DB')), cfg.StrOpt('ovn_sb_certificate', default='', help=_('The PEM file with certificate that certifies the ' 'private key specified in ovn_sb_private_key')), cfg.StrOpt('ovn_sb_ca_cert', default='', help=_('The PEM file with CA certificate that OVN should use to' ' verify certificates presented to it by SSL peers')), cfg.IntOpt('ovsdb_connection_timeout', default=180, help=_('Timeout in seconds for the OVSDB ' 'connection transaction')), cfg.IntOpt('ovsdb_retry_max_interval', default=180, help=_('Max interval in seconds between ' 'each retry to get the OVN NB and SB IDLs')), cfg.IntOpt('ovsdb_probe_interval', min=0, default=60000, help=_('The probe interval in for the OVSDB session in ' 'milliseconds. If this is zero, it disables the ' 'connection keepalive feature. If non-zero the value ' 'will be forced to at least 1000 milliseconds. Defaults ' 'to 60 seconds.')), cfg.StrOpt('neutron_sync_mode', default='log', choices=('off', 'log', 'repair'), help=_('The synchronization mode of OVN_Northbound OVSDB ' 'with Neutron DB.\n' 'off - synchronization is off \n' 'log - during neutron-server startup, ' 'check to see if OVN is in sync with ' 'the Neutron database. ' ' Log warnings for any inconsistencies found so' ' that an admin can investigate \n' 'repair - during neutron-server startup, automatically' ' create resources found in Neutron but not in OVN.' ' Also remove resources from OVN' ' that are no longer in Neutron.')), cfg.BoolOpt('ovn_l3_mode', default=True, deprecated_for_removal=True, deprecated_reason="This option is no longer used. Native L3 " "support in OVN is always used.", help=_('Whether to use OVN native L3 support. Do not change ' 'the value for existing deployments that contain ' 'routers.')), cfg.StrOpt("ovn_l3_scheduler", default='leastloaded', choices=('leastloaded', 'chance'), help=_('The OVN L3 Scheduler type used to schedule router ' 'gateway ports on hypervisors/chassis. \n' 'leastloaded - chassis with fewest gateway ports ' 'selected \n' 'chance - chassis randomly selected')), cfg.BoolOpt('enable_distributed_floating_ip', default=False, help=_('Enable distributed floating IP support.\n' 'If True, the NAT action for floating IPs will be done ' 'locally and not in the centralized gateway. This ' 'saves the path to the external network. This requires ' 'the user to configure the physical network map ' '(i.e. ovn-bridge-mappings) on each compute node.')), cfg.StrOpt("vif_type", deprecated_for_removal=True, deprecated_reason="The port VIF type is now determined based " "on the OVN chassis information when the " "port is bound to a host.", default=portbindings.VIF_TYPE_OVS, help=_("Type of VIF to be used for ports valid values are " "(%(ovs)s, %(dpdk)s) default %(ovs)s") % { "ovs": portbindings.VIF_TYPE_OVS, "dpdk": portbindings.VIF_TYPE_VHOST_USER}, choices=[portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER]), cfg.StrOpt("vhost_sock_dir", default="/var/run/openvswitch", help=_("The directory in which vhost virtio socket " "is created by all the vswitch daemons")), cfg.IntOpt('dhcp_default_lease_time', default=(12 * 60 * 60), help=_('Default least time (in seconds) to use with ' 'OVN\'s native DHCP service.')), cfg.StrOpt("ovsdb_log_level", default="INFO", choices=list(VLOG_LEVELS.keys()), help=_("The log level used for OVSDB")), cfg.BoolOpt('ovn_metadata_enabled', default=False, help=_('Whether to use metadata service.')), cfg.ListOpt('dns_servers', default=[], help=_("Comma-separated list of the DNS servers which will be " "used as forwarders if a subnet's dns_nameservers " "field is empty. If both subnet's dns_nameservers and " "this option is empty, then the DNS resolvers on the " "host running the neutron server will be used.")), cfg.DictOpt('ovn_dhcp4_global_options', default={}, help=_("Dictionary of global DHCPv4 options which will be " "automatically set on each subnet upon creation and " "on all existing subnets when Neutron starts.\n" "An empty value for a DHCP option will cause that " "option to be unset globally.\n" "EXAMPLES:\n" "- ntp_server:1.2.3.4,wpad:1.2.3.5 - Set ntp_server " "and wpad\n" "- ntp_server:,wpad:1.2.3.5 - Unset ntp_server and " "set wpad\n" "See the ovn-nb(5) man page for available options.")), cfg.DictOpt('ovn_dhcp6_global_options', default={}, help=_("Dictionary of global DHCPv6 options which will be " "automatically set on each subnet upon creation and " "on all existing subnets when Neutron starts.\n" "An empty value for a DHCP option will cause that " "option to be unset globally.\n" "EXAMPLES:\n" "- ntp_server:1.2.3.4,wpad:1.2.3.5 - Set ntp_server " "and wpad\n" "- ntp_server:,wpad:1.2.3.5 - Unset ntp_server and " "set wpad\n" "See the ovn-nb(5) man page for available options.")), cfg.BoolOpt('ovn_emit_need_to_frag', default=False, help=_('Configure OVN to emit "need to frag" packets in ' 'case of MTU mismatch.\n' 'Before enabling this configuration make sure that ' 'its supported by the host kernel (version >= 5.2) ' 'or by checking the output of the following command: \n' 'ovs-appctl -t ovs-vswitchd dpif/show-dp-features ' 'br-int | grep "Check pkt length action".')), ] cfg.CONF.register_opts(ovn_opts, group='ovn') ovs_conf.register_ovs_agent_opts() def list_opts(): return [ ('ovn', ovn_opts), ('ovs', ovs_conf.OPTS) ] def get_ovn_nb_connection(): return cfg.CONF.ovn.ovn_nb_connection def get_ovn_nb_private_key(): return cfg.CONF.ovn.ovn_nb_private_key def get_ovn_nb_certificate(): return cfg.CONF.ovn.ovn_nb_certificate def get_ovn_nb_ca_cert(): return cfg.CONF.ovn.ovn_nb_ca_cert def get_ovn_sb_connection(): return cfg.CONF.ovn.ovn_sb_connection def get_ovn_sb_private_key(): return cfg.CONF.ovn.ovn_sb_private_key def get_ovn_sb_certificate(): return cfg.CONF.ovn.ovn_sb_certificate def get_ovn_sb_ca_cert(): return cfg.CONF.ovn.ovn_sb_ca_cert def get_ovn_ovsdb_timeout(): return cfg.CONF.ovn.ovsdb_connection_timeout def get_ovn_ovsdb_retry_max_interval(): return cfg.CONF.ovn.ovsdb_retry_max_interval def get_ovn_ovsdb_probe_interval(): return cfg.CONF.ovn.ovsdb_probe_interval def get_ovn_neutron_sync_mode(): return cfg.CONF.ovn.neutron_sync_mode def is_ovn_l3(): return cfg.CONF.ovn.ovn_l3_mode def get_ovn_l3_scheduler(): return cfg.CONF.ovn.ovn_l3_scheduler def is_ovn_distributed_floating_ip(): return cfg.CONF.ovn.enable_distributed_floating_ip def get_ovn_vhost_sock_dir(): return cfg.CONF.ovn.vhost_sock_dir def get_ovn_dhcp_default_lease_time(): return cfg.CONF.ovn.dhcp_default_lease_time def get_ovn_ovsdb_log_level(): return VLOG_LEVELS[cfg.CONF.ovn.ovsdb_log_level] def is_ovn_metadata_enabled(): return cfg.CONF.ovn.ovn_metadata_enabled def get_dns_servers(): return cfg.CONF.ovn.dns_servers def get_global_dhcpv4_opts(): return cfg.CONF.ovn.ovn_dhcp4_global_options def get_global_dhcpv6_opts(): return cfg.CONF.ovn.ovn_dhcp6_global_options def is_ovn_emit_need_to_frag_enabled(): return cfg.CONF.ovn.ovn_emit_need_to_frag def is_igmp_snooping_enabled(): return cfg.CONF.OVS.igmp_snooping_enable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/plugins/ml2/drivers/ovs_conf.py0000644000175000017500000002451600000000000025611 0ustar00coreycorey00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_config import cfg from neutron._i18n import _ from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants DEFAULT_BRIDGE_MAPPINGS = [] DEFAULT_TUNNEL_TYPES = [] ovs_opts = [ cfg.StrOpt('integration_bridge', default='br-int', deprecated_name='ovs_integration_bridge', help=_("Integration bridge to use. " "Do not change this parameter unless you have a good " "reason to. This is the name of the OVS integration " "bridge. There is one per hypervisor. The integration " "bridge acts as a virtual 'patch bay'. All VM VIFs are " "attached to this bridge and then 'patched' according " "to their network connectivity.")), cfg.StrOpt('tunnel_bridge', default='br-tun', help=_("Tunnel bridge to use.")), cfg.StrOpt('int_peer_patch_port', default='patch-tun', help=_("Peer patch port in integration bridge for tunnel " "bridge.")), cfg.StrOpt('tun_peer_patch_port', default='patch-int', help=_("Peer patch port in tunnel bridge for integration " "bridge.")), cfg.IPOpt('local_ip', help=_("IP address of local overlay (tunnel) network endpoint. " "Use either an IPv4 or IPv6 address that resides on one " "of the host network interfaces. The IP version of this " "value must match the value of the 'overlay_ip_version' " "option in the ML2 plug-in configuration file on the " "neutron server node(s).")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("Comma-separated list of : " "tuples mapping physical network names to the agent's " "node-specific Open vSwitch bridge names to be used " "for flat and VLAN networks. The length of bridge " "names should be no more than 11. Each bridge must " "exist, and should have a physical network interface " "configured as a port. All physical networks " "configured on the server should have mappings to " "appropriate bridges on each agent. " "Note: If you remove a bridge from this " "mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the " "agent anymore.")), cfg.ListOpt('resource_provider_bandwidths', default=[], help=_("Comma-separated list of " ":: tuples, showing " "the available bandwidth for the given bridge in the " "given direction. The direction is meant from VM " "perspective. Bandwidth is measured in kilobits per " "second (kbps). The bridge must appear in " "bridge_mappings as the value. But not all bridges in " "bridge_mappings must be listed here. For a bridge not " "listed here we neither create a resource provider in " "placement nor report inventories against. An omitted " "direction means we do not report an inventory for the " "corresponding class.")), cfg.DictOpt('resource_provider_hypervisors', default={}, help=_("Mapping of bridges to hypervisors: " ":,... " "hypervisor name is used to locate the parent of the " "resource provider tree. Only needs to be set in the " "rare case when the hypervisor name is different from " "the DEFAULT.host config option value as known by the " "nova-compute managing that hypervisor.")), cfg.DictOpt('resource_provider_inventory_defaults', default={'allocation_ratio': 1.0, 'min_unit': 1, 'step_size': 1, 'reserved': 0}, help=_("Key:value pairs to specify defaults used " "while reporting resource provider inventories. " "Possible keys with their types: " "allocation_ratio:float, " "max_unit:int, min_unit:int, " "reserved:int, step_size:int, " "See also: " "https://docs.openstack.org/api-ref/placement/" "#update-resource-provider-inventories")), cfg.BoolOpt('use_veth_interconnection', default=False, help=_("Use veths instead of patch ports to interconnect the " "integration bridge to physical networks. " "Support kernel without Open vSwitch patch port " "support so long as it is set to True.")), cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM, choices=[constants.OVS_DATAPATH_SYSTEM, constants.OVS_DATAPATH_NETDEV], help=_("OVS datapath to use. 'system' is the default value and " "corresponds to the kernel datapath. To enable the " "userspace datapath set this value to 'netdev'.")), cfg.StrOpt('vhostuser_socket_dir', default=constants.VHOST_USER_SOCKET_DIR, help=_("OVS vhost-user socket directory.")), cfg.IPOpt('of_listen_address', default='127.0.0.1', help=_("Address to listen on for OpenFlow connections.")), cfg.PortOpt('of_listen_port', default=6633, help=_("Port to listen on for OpenFlow connections.")), cfg.IntOpt('of_connect_timeout', default=300, help=_("Timeout in seconds to wait for " "the local switch connecting the controller.")), cfg.IntOpt('of_request_timeout', default=300, help=_("Timeout in seconds to wait for a single " "OpenFlow request.")), cfg.IntOpt('of_inactivity_probe', default=10, help=_("The inactivity_probe interval in seconds for the local " "switch connection to the controller. " "A value of 0 disables inactivity probes.")), ] agent_opts = [ cfg.BoolOpt('minimize_polling', default=True, help=_("Minimize polling by monitoring ovsdb for interface " "changes.")), cfg.IntOpt('ovsdb_monitor_respawn_interval', default=constants.DEFAULT_OVSDBMON_RESPAWN, help=_("The number of seconds to wait before respawning the " "ovsdb monitor after losing communication with it.")), cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, help=_("Network types supported by the agent " "(gre, vxlan and/or geneve).")), cfg.PortOpt('vxlan_udp_port', default=n_const.VXLAN_UDP_PORT, help=_("The UDP port to use for VXLAN tunnels.")), cfg.IntOpt('veth_mtu', default=9000, help=_("MTU size of veth interfaces")), cfg.BoolOpt('l2_population', default=False, help=_("Use ML2 l2population mechanism driver to learn " "remote MAC and IPs and improve tunnel scalability.")), cfg.BoolOpt('arp_responder', default=False, help=_("Enable local ARP responder if it is supported. " "Requires OVS 2.1 and ML2 l2population driver. " "Allows the switch (when supporting an overlay) " "to respond to an ARP request locally without " "performing a costly ARP broadcast into the overlay. " "NOTE: If enable_distributed_routing is set to True " "then arp_responder will automatically be set to True " "in the agent, regardless of the setting in the config " "file.")), cfg.BoolOpt('dont_fragment', default=True, help=_("Set or un-set the don't fragment (DF) bit on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), cfg.BoolOpt('enable_distributed_routing', default=False, help=_("Make the l2 agent run in DVR mode.")), cfg.BoolOpt('drop_flows_on_start', default=False, help=_("Reset flow table on start. Setting this to True will " "cause brief traffic interruption.")), cfg.BoolOpt('tunnel_csum', default=False, help=_("Set or un-set the tunnel header checksum on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), cfg.BoolOpt('baremetal_smartnic', default=False, help=_("Enable the agent to process Smart NIC ports.")), cfg.BoolOpt('explicitly_egress_direct', default=False, help=_("When set to True, the accepted egress unicast " "traffic will not use action NORMAL. The accepted " "egress packets will be taken care of in the final " "egress tables direct output flows for unicast " "traffic.")), ] def register_ovs_agent_opts(cfg=cfg.CONF): cfg.register_opts(ovs_opts, "OVS") cfg.register_opts(agent_opts, "AGENT") def register_ovs_opts(cfg=cfg.CONF): cfg.register_opts(ovs_opts, "OVS") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.239044 neutron-16.0.0.0b2.dev214/neutron/conf/policies/0000755000175000017500000000000000000000000021371 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/__init__.py0000644000175000017500000000567300000000000023515 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import itertools import six from neutron.conf.policies import address_scope from neutron.conf.policies import agent from neutron.conf.policies import auto_allocated_topology from neutron.conf.policies import availability_zone from neutron.conf.policies import base from neutron.conf.policies import flavor from neutron.conf.policies import floatingip from neutron.conf.policies import floatingip_pools from neutron.conf.policies import floatingip_port_forwarding from neutron.conf.policies import l3_conntrack_helper from neutron.conf.policies import logging from neutron.conf.policies import metering from neutron.conf.policies import network from neutron.conf.policies import network_ip_availability from neutron.conf.policies import network_segment_range from neutron.conf.policies import port from neutron.conf.policies import qos from neutron.conf.policies import rbac from neutron.conf.policies import router from neutron.conf.policies import security_group from neutron.conf.policies import segment from neutron.conf.policies import service_type from neutron.conf.policies import subnet from neutron.conf.policies import subnetpool from neutron.conf.policies import trunk def list_rules(): return itertools.chain( base.list_rules(), address_scope.list_rules(), agent.list_rules(), auto_allocated_topology.list_rules(), availability_zone.list_rules(), flavor.list_rules(), floatingip.list_rules(), floatingip_pools.list_rules(), floatingip_port_forwarding.list_rules(), l3_conntrack_helper.list_rules(), logging.list_rules(), metering.list_rules(), network.list_rules(), network_ip_availability.list_rules(), network_segment_range.list_rules(), port.list_rules(), qos.list_rules(), rbac.list_rules(), router.list_rules(), security_group.list_rules(), segment.list_rules(), service_type.list_rules(), subnet.list_rules(), subnetpool.list_rules(), trunk.list_rules(), ) def reload_default_policies(): for name, module in globals().items(): if (inspect.ismodule(module) and module.__name__.startswith(__package__)): # NOTE: pylint checks function args wrongly. # pylint: disable=too-many-function-args six.moves.reload_module(module) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/address_scope.py0000644000175000017500000000517700000000000024573 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/address-scopes' RESOURCE_PATH = '/address-scopes/{id}' rules = [ policy.RuleDefault( 'shared_address_scopes', 'field:address_scopes:shared=True', 'Definition of a shared address scope' ), policy.DocumentedRuleDefault( 'create_address_scope', base.RULE_ANY, 'Create an address scope', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_address_scope:shared', base.RULE_ADMIN_ONLY, 'Create a shared address scope', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_address_scope', base.policy_or(base.RULE_ADMIN_OR_OWNER, 'rule:shared_address_scopes'), 'Get an address scope', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_address_scope', base.RULE_ADMIN_OR_OWNER, 'Update an address scope', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_address_scope:shared', base.RULE_ADMIN_ONLY, 'Update ``shared`` attribute of an address scope', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_address_scope', base.RULE_ADMIN_OR_OWNER, 'Delete an address scope', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/agent.py0000644000175000017500000000742300000000000023047 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/agents' RESOURCE_PATH = '/agents/{id}' rules = [ policy.DocumentedRuleDefault( 'get_agent', base.RULE_ADMIN_ONLY, 'Get an agent', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_agent', base.RULE_ADMIN_ONLY, 'Update an agent', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_agent', base.RULE_ADMIN_ONLY, 'Delete an agent', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_dhcp-network', base.RULE_ADMIN_ONLY, 'Add a network to a DHCP agent', [ { 'method': 'POST', 'path': '/agents/{agent_id}/dhcp-networks', }, ] ), policy.DocumentedRuleDefault( 'get_dhcp-networks', base.RULE_ADMIN_ONLY, 'List networks on a DHCP agent', [ { 'method': 'GET', 'path': '/agents/{agent_id}/dhcp-networks', }, ] ), policy.DocumentedRuleDefault( 'delete_dhcp-network', base.RULE_ADMIN_ONLY, 'Remove a network from a DHCP agent', [ { 'method': 'DELETE', 'path': '/agents/{agent_id}/dhcp-networks/{network_id}', }, ] ), policy.DocumentedRuleDefault( 'create_l3-router', base.RULE_ADMIN_ONLY, 'Add a router to an L3 agent', [ { 'method': 'POST', 'path': '/agents/{agent_id}/l3-routers', }, ] ), policy.DocumentedRuleDefault( 'get_l3-routers', base.RULE_ADMIN_ONLY, 'List routers on an L3 agent', [ { 'method': 'GET', 'path': '/agents/{agent_id}/l3-routers', }, ] ), policy.DocumentedRuleDefault( 'delete_l3-router', base.RULE_ADMIN_ONLY, 'Remove a router from an L3 agent', [ { 'method': 'DELETE', 'path': '/agents/{agent_id}/l3-routers/{router_id}', }, ] ), policy.DocumentedRuleDefault( 'get_dhcp-agents', base.RULE_ADMIN_ONLY, 'List DHCP agents hosting a network', [ { 'method': 'GET', 'path': '/networks/{network_id}/dhcp-agents', }, ] ), policy.DocumentedRuleDefault( 'get_l3-agents', base.RULE_ADMIN_ONLY, 'List L3 agents hosting a router', [ { 'method': 'GET', 'path': '/routers/{router_id}/l3-agents', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/auto_allocated_topology.py0000644000175000017500000000244400000000000026663 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base RESOURCE_PATH = '/auto-allocated-topology/{project_id}' rules = [ policy.DocumentedRuleDefault( 'get_auto_allocated_topology', base.RULE_ADMIN_OR_OWNER, "Get a project's auto-allocated topology", [ { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_auto_allocated_topology', base.RULE_ADMIN_OR_OWNER, "Delete a project's auto-allocated topology", [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/availability_zone.py0000644000175000017500000000165200000000000025454 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base rules = [ policy.DocumentedRuleDefault( 'get_availability_zone', base.RULE_ANY, 'List availability zones', [ { 'method': 'GET', 'path': '/availability_zones', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/base.py0000644000175000017500000000605400000000000022662 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy def policy_and(*args): return ' and '.join(args) def policy_or(*args): return ' or '.join(args) # TODO(amotoki): Define these in neutron-lib once what constants are required # from stadium and 3rd party projects. # As of now, the following are candidates. RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_ONLY = 'rule:admin_only' RULE_ANY = 'rule:regular_user' RULE_ADVSVC = 'rule:context_is_advsvc' RULE_ADMIN_OR_NET_OWNER = 'rule:admin_or_network_owner' RULE_ADMIN_OR_NET_OWNER_OR_ADVSVC = policy_or(RULE_ADMIN_OR_NET_OWNER, RULE_ADVSVC) RULE_ADMIN_OR_PARENT_OWNER = 'rule:admin_or_ext_parent_owner' rules = [ policy.RuleDefault( 'context_is_admin', 'role:admin', description='Rule for cloud admin access'), policy.RuleDefault( 'owner', 'tenant_id:%(tenant_id)s', description='Rule for resource owner access'), policy.RuleDefault( 'admin_or_owner', policy_or('rule:context_is_admin', 'rule:owner'), description='Rule for admin or owner access'), policy.RuleDefault( 'context_is_advsvc', 'role:advsvc', description='Rule for advsvc role access'), policy.RuleDefault( 'admin_or_network_owner', policy_or('rule:context_is_admin', 'tenant_id:%(network:tenant_id)s'), description='Rule for admin or network owner access'), policy.RuleDefault( 'admin_owner_or_network_owner', policy_or('rule:owner', RULE_ADMIN_OR_NET_OWNER), description=('Rule for resource owner, ' 'admin or network owner access')), policy.RuleDefault( 'admin_only', 'rule:context_is_admin', description='Rule for admin-only access'), policy.RuleDefault( 'regular_user', '', description='Rule for regular user access'), # TODO(amotoki): Should be renamed to shared_network? It seems clearer. policy.RuleDefault( 'shared', 'field:networks:shared=True', description='Rule of shared network'), policy.RuleDefault( 'default', RULE_ADMIN_OR_OWNER, description='Default access rule'), policy.RuleDefault( 'admin_or_ext_parent_owner', policy_or('rule:context_is_admin', 'tenant_id:%(ext_parent:tenant_id)s'), description='Rule for common parent owner check'), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/flavor.py0000644000175000017500000001016500000000000023237 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base FLAVOR_COLLECTION_PATH = '/flavors' FLAVOR_RESOURCE_PATH = '/flavors/{id}' PROFILE_COLLECTION_PATH = '/service_profiles' PROFILE_RESOURCE_PATH = '/service_profiles/{id}' ASSOC_COLLECTION_PATH = '/flavors/{flavor_id}/service_profiles' ASSOC_RESOURCE_PATH = '/flavors/{flavor_id}/service_profiles/{profile_id}' rules = [ policy.DocumentedRuleDefault( 'create_flavor', base.RULE_ADMIN_ONLY, 'Create a flavor', [ { 'method': 'POST', 'path': FLAVOR_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_flavor', base.RULE_ANY, 'Get a flavor', [ { 'method': 'GET', 'path': FLAVOR_COLLECTION_PATH, }, { 'method': 'GET', 'path': FLAVOR_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_flavor', base.RULE_ADMIN_ONLY, 'Update a flavor', [ { 'method': 'PUT', 'path': FLAVOR_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_flavor', base.RULE_ADMIN_ONLY, 'Delete a flavor', [ { 'method': 'DELETE', 'path': FLAVOR_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_service_profile', base.RULE_ADMIN_ONLY, 'Create a service profile', [ { 'method': 'POST', 'path': PROFILE_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_service_profile', base.RULE_ADMIN_ONLY, 'Get a service profile', [ { 'method': 'GET', 'path': PROFILE_COLLECTION_PATH, }, { 'method': 'GET', 'path': PROFILE_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_service_profile', base.RULE_ADMIN_ONLY, 'Update a service profile', [ { 'method': 'PUT', 'path': PROFILE_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_service_profile', base.RULE_ADMIN_ONLY, 'Delete a service profile', [ { 'method': 'DELETE', 'path': PROFILE_RESOURCE_PATH, }, ] ), policy.RuleDefault( 'get_flavor_service_profile', base.RULE_ANY, ('Get a flavor associated with a given service profiles. ' 'There is no corresponding GET operations in API currently. ' 'This rule is currently referred only in the DELETE ' 'of flavor_service_profile.') ), policy.DocumentedRuleDefault( 'create_flavor_service_profile', base.RULE_ADMIN_ONLY, 'Associate a flavor with a service profile', [ { 'method': 'POST', 'path': ASSOC_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_flavor_service_profile', base.RULE_ADMIN_ONLY, 'Disassociate a flavor with a service profile', [ { 'method': 'DELETE', 'path': ASSOC_RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/floatingip.py0000644000175000017500000000417300000000000024104 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/floatingips' RESOURCE_PATH = '/floatingips/{id}' rules = [ policy.DocumentedRuleDefault( 'create_floatingip', base.RULE_ANY, 'Create a floating IP', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_floatingip:floating_ip_address', base.RULE_ADMIN_ONLY, 'Create a floating IP with a specific IP address', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_floatingip', base.RULE_ADMIN_OR_OWNER, 'Get a floating IP', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_floatingip', base.RULE_ADMIN_OR_OWNER, 'Update a floating IP', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_floatingip', base.RULE_ADMIN_OR_OWNER, 'Delete a floating IP', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/floatingip_pools.py0000644000175000017500000000164400000000000025320 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base rules = [ policy.DocumentedRuleDefault( 'get_floatingip_pool', base.RULE_ANY, 'Get floating IP pools', [ { 'method': 'GET', 'path': '/floatingip_pools', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/floatingip_port_forwarding.py0000644000175000017500000000413300000000000027366 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/floatingips/{floatingip_id}/port_forwardings' RESOURCE_PATH = ('/floatingips/{floatingip_id}' '/port_forwardings/{port_forwarding_id}') rules = [ policy.DocumentedRuleDefault( 'create_floatingip_port_forwarding', base.RULE_ADMIN_OR_PARENT_OWNER, 'Create a floating IP port forwarding', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_floatingip_port_forwarding', base.RULE_ADMIN_OR_PARENT_OWNER, 'Get a floating IP port forwarding', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_floatingip_port_forwarding', base.RULE_ADMIN_OR_PARENT_OWNER, 'Update a floating IP port forwarding', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_floatingip_port_forwarding', base.RULE_ADMIN_OR_PARENT_OWNER, 'Delete a floating IP port forwarding', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/l3_conntrack_helper.py0000644000175000017500000000412500000000000025664 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/routers/{router_id}/conntrack_helpers' RESOURCE_PATH = ('/routers/{router_id}' '/conntrack_helpers/{conntrack_helper_id}') rules = [ policy.DocumentedRuleDefault( 'create_router_conntrack_helper', base.RULE_ADMIN_OR_PARENT_OWNER, 'Create a router conntrack helper', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_router_conntrack_helper', base.RULE_ADMIN_OR_PARENT_OWNER, 'Get a router conntrack helper', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_router_conntrack_helper', base.RULE_ADMIN_OR_PARENT_OWNER, 'Update a router conntrack helper', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_router_conntrack_helper', base.RULE_ADMIN_OR_PARENT_OWNER, 'Delete a router conntrack helper', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/logging.py0000644000175000017500000000406400000000000023375 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/log/logs' RESOURCE_PATH = '/log/logs/{id}' rules = [ policy.DocumentedRuleDefault( 'get_loggable_resource', base.RULE_ADMIN_ONLY, 'Get loggable resources', [ { 'method': 'GET', 'path': '/log/loggable-resources', }, ] ), policy.DocumentedRuleDefault( 'create_log', base.RULE_ADMIN_ONLY, 'Create a network log', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_log', base.RULE_ADMIN_ONLY, 'Get a network log', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_log', base.RULE_ADMIN_ONLY, 'Update a network log', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_log', base.RULE_ADMIN_ONLY, 'Delete a network log', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/metering.py0000644000175000017500000000526500000000000023565 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base LABEL_COLLECTION_PATH = '/metering/metering-labels' LABEL_RESOURCE_PATH = '/metering/metering-labels/{id}' RULE_COLLECTION_PATH = '/metering/metering-label-rules' RULE_RESOURCE_PATH = '/metering/metering-label-rules/{id}' rules = [ policy.DocumentedRuleDefault( 'create_metering_label', base.RULE_ADMIN_ONLY, 'Create a metering label', [ { 'method': 'POST', 'path': LABEL_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_metering_label', base.RULE_ADMIN_ONLY, 'Get a metering label', [ { 'method': 'GET', 'path': LABEL_COLLECTION_PATH, }, { 'method': 'GET', 'path': LABEL_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_metering_label', base.RULE_ADMIN_ONLY, 'Delete a metering label', [ { 'method': 'DELETE', 'path': LABEL_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_metering_label_rule', base.RULE_ADMIN_ONLY, 'Create a metering label rule', [ { 'method': 'POST', 'path': RULE_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_metering_label_rule', base.RULE_ADMIN_ONLY, 'Get a metering label rule', [ { 'method': 'GET', 'path': RULE_COLLECTION_PATH, }, { 'method': 'GET', 'path': RULE_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_metering_label_rule', base.RULE_ADMIN_ONLY, 'Delete a metering label rule', [ { 'method': 'DELETE', 'path': RULE_RESOURCE_PATH, }, ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/network.py0000644000175000017500000001356700000000000023450 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/networks' RESOURCE_PATH = '/networks/{id}' ACTION_POST = [ {'method': 'POST', 'path': COLLECTION_PATH}, ] ACTION_PUT = [ {'method': 'PUT', 'path': RESOURCE_PATH}, ] ACTION_DELETE = [ {'method': 'DELETE', 'path': RESOURCE_PATH}, ] ACTION_GET = [ {'method': 'GET', 'path': COLLECTION_PATH}, {'method': 'GET', 'path': RESOURCE_PATH}, ] rules = [ policy.RuleDefault( 'external', 'field:networks:router:external=True', 'Definition of an external network'), policy.DocumentedRuleDefault( 'create_network', base.RULE_ANY, 'Create a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:shared', base.RULE_ADMIN_ONLY, 'Create a shared network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:router:external', base.RULE_ADMIN_ONLY, 'Create an external network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:is_default', base.RULE_ADMIN_ONLY, 'Specify ``is_default`` attribute when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:port_security_enabled', base.RULE_ANY, 'Specify ``port_security_enabled`` attribute when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:segments', base.RULE_ADMIN_ONLY, 'Specify ``segments`` attribute when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:provider:network_type', base.RULE_ADMIN_ONLY, 'Specify ``provider:network_type`` when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:provider:physical_network', base.RULE_ADMIN_ONLY, 'Specify ``provider:physical_network`` when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'create_network:provider:segmentation_id', base.RULE_ADMIN_ONLY, 'Specify ``provider:segmentation_id`` when creating a network', ACTION_POST ), policy.DocumentedRuleDefault( 'get_network', base.policy_or(base.RULE_ADMIN_OR_OWNER, 'rule:shared', 'rule:external', base.RULE_ADVSVC), 'Get a network', ACTION_GET ), policy.DocumentedRuleDefault( 'get_network:router:external', base.RULE_ANY, 'Get ``router:external`` attribute of a network', ACTION_GET ), policy.DocumentedRuleDefault( 'get_network:segments', base.RULE_ADMIN_ONLY, 'Get ``segments`` attribute of a network', ACTION_GET ), policy.DocumentedRuleDefault( 'get_network:provider:network_type', base.RULE_ADMIN_ONLY, 'Get ``provider:network_type`` attribute of a network', ACTION_GET ), policy.DocumentedRuleDefault( 'get_network:provider:physical_network', base.RULE_ADMIN_ONLY, 'Get ``provider:physical_network`` attribute of a network', ACTION_GET ), policy.DocumentedRuleDefault( 'get_network:provider:segmentation_id', base.RULE_ADMIN_ONLY, 'Get ``provider:segmentation_id`` attribute of a network', ACTION_GET ), policy.DocumentedRuleDefault( 'update_network', base.RULE_ADMIN_OR_OWNER, 'Update a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:segments', base.RULE_ADMIN_ONLY, 'Update ``segments`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:shared', base.RULE_ADMIN_ONLY, 'Update ``shared`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:provider:network_type', base.RULE_ADMIN_ONLY, 'Update ``provider:network_type`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:provider:physical_network', base.RULE_ADMIN_ONLY, 'Update ``provider:physical_network`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:provider:segmentation_id', base.RULE_ADMIN_ONLY, 'Update ``provider:segmentation_id`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:router:external', base.RULE_ADMIN_ONLY, 'Update ``router:external`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:is_default', base.RULE_ADMIN_ONLY, 'Update ``is_default`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_network:port_security_enabled', base.RULE_ADMIN_OR_OWNER, 'Update ``port_security_enabled`` attribute of a network', ACTION_PUT ), policy.DocumentedRuleDefault( 'delete_network', base.RULE_ADMIN_OR_OWNER, 'Delete a network', ACTION_DELETE ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/network_ip_availability.py0000644000175000017500000000210300000000000026652 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base rules = [ policy.DocumentedRuleDefault( 'get_network_ip_availability', base.RULE_ADMIN_ONLY, 'Get network IP availability', [ { 'method': 'GET', 'path': '/network-ip-availabilities', }, { 'method': 'GET', 'path': '/network-ip-availabilities/{network_id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/network_segment_range.py0000644000175000017500000000375700000000000026346 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/network_segment_ranges' RESOURCE_PATH = '/network_segment_ranges/{id}' rules = [ policy.DocumentedRuleDefault( 'create_network_segment_range', base.RULE_ADMIN_ONLY, 'Create a network segment range', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_network_segment_range', base.RULE_ADMIN_ONLY, 'Get a network segment range', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_network_segment_range', base.RULE_ADMIN_ONLY, 'Update a network segment range', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_network_segment_range', base.RULE_ADMIN_ONLY, 'Delete a network segment range', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/port.py0000644000175000017500000002231000000000000022725 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/ports' RESOURCE_PATH = '/ports/{id}' ACTION_POST = [ {'method': 'POST', 'path': COLLECTION_PATH}, ] ACTION_PUT = [ {'method': 'PUT', 'path': RESOURCE_PATH}, ] ACTION_DELETE = [ {'method': 'DELETE', 'path': RESOURCE_PATH}, ] ACTION_GET = [ {'method': 'GET', 'path': COLLECTION_PATH}, {'method': 'GET', 'path': RESOURCE_PATH}, ] rules = [ policy.RuleDefault( 'network_device', 'field:port:device_owner=~^network:', 'Definition of port with network device_owner'), policy.RuleDefault( 'admin_or_data_plane_int', base.policy_or('rule:context_is_admin', 'role:data_plane_integrator'), 'Rule for data plane integration'), policy.DocumentedRuleDefault( 'create_port', base.RULE_ANY, 'Create a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:device_owner', base.policy_or('not rule:network_device', base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify ``device_owner`` attribute when creting a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:mac_address', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify ``mac_address`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:fixed_ips', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER, 'rule:shared'), 'Specify ``fixed_ips`` information when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:fixed_ips:ip_address', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify IP address in ``fixed_ips`` when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:fixed_ips:subnet_id', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER, 'rule:shared'), 'Specify subnet ID in ``fixed_ips`` when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:port_security_enabled', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify ``port_security_enabled`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:binding:host_id', base.RULE_ADMIN_ONLY, 'Specify ``binding:host_id`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:binding:profile', base.RULE_ADMIN_ONLY, 'Specify ``binding:profile`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:binding:vnic_type', base.RULE_ANY, 'Specify ``binding:vnic_type`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:allowed_address_pairs', base.RULE_ADMIN_OR_NET_OWNER, 'Specify ``allowed_address_pairs`` attribute when creating a port', ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:allowed_address_pairs:mac_address', base.RULE_ADMIN_OR_NET_OWNER, ('Specify ``mac_address` of `allowed_address_pairs`` ' 'attribute when creating a port'), ACTION_POST ), policy.DocumentedRuleDefault( 'create_port:allowed_address_pairs:ip_address', base.RULE_ADMIN_OR_NET_OWNER, ('Specify ``ip_address`` of ``allowed_address_pairs`` ' 'attribute when creating a port'), ACTION_POST ), policy.DocumentedRuleDefault( 'get_port', base.policy_or(base.RULE_ADVSVC, 'rule:admin_owner_or_network_owner'), 'Get a port', ACTION_GET ), policy.DocumentedRuleDefault( 'get_port:binding:vif_type', base.RULE_ADMIN_ONLY, 'Get ``binding:vif_type`` attribute of a port', ACTION_GET ), policy.DocumentedRuleDefault( 'get_port:binding:vif_details', base.RULE_ADMIN_ONLY, 'Get ``binding:vif_details`` attribute of a port', ACTION_GET ), policy.DocumentedRuleDefault( 'get_port:binding:host_id', base.RULE_ADMIN_ONLY, 'Get ``binding:host_id`` attribute of a port', ACTION_GET ), policy.DocumentedRuleDefault( 'get_port:binding:profile', base.RULE_ADMIN_ONLY, 'Get ``binding:profile`` attribute of a port', ACTION_GET ), policy.DocumentedRuleDefault( 'get_port:resource_request', base.RULE_ADMIN_ONLY, 'Get ``resource_request`` attribute of a port', ACTION_GET ), # TODO(amotoki): Add get_port:binding:vnic_type # TODO(amotoki): Add get_port:binding:data_plane_status policy.DocumentedRuleDefault( 'update_port', base.policy_or(base.RULE_ADMIN_OR_OWNER, base.RULE_ADVSVC), 'Update a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:device_owner', base.policy_or('not rule:network_device', base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Update ``device_owner`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:mac_address', base.policy_or(base.RULE_ADMIN_ONLY, base.RULE_ADVSVC), 'Update ``mac_address`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:fixed_ips', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify ``fixed_ips`` information when updating a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:fixed_ips:ip_address', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Specify IP address in ``fixed_ips`` information when updating a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:fixed_ips:subnet_id', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER, 'rule:shared'), 'Specify subnet ID in ``fixed_ips`` information when updating a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:port_security_enabled', base.policy_or(base.RULE_ADVSVC, base.RULE_ADMIN_OR_NET_OWNER), 'Update ``port_security_enabled`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:binding:host_id', base.RULE_ADMIN_ONLY, 'Update ``binding:host_id`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:binding:profile', base.RULE_ADMIN_ONLY, 'Update ``binding:profile`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:binding:vnic_type', base.policy_or(base.RULE_ADMIN_OR_OWNER, base.RULE_ADVSVC), 'Update ``binding:vnic_type`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:allowed_address_pairs', base.RULE_ADMIN_OR_NET_OWNER, 'Update ``allowed_address_pairs`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:allowed_address_pairs:mac_address', base.RULE_ADMIN_OR_NET_OWNER, ('Update ``mac_address`` of ``allowed_address_pairs`` ' 'attribute of a port'), ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:allowed_address_pairs:ip_address', base.RULE_ADMIN_OR_NET_OWNER, ('Update ``ip_address`` of ``allowed_address_pairs`` ' 'attribute of a port'), ACTION_PUT ), policy.DocumentedRuleDefault( 'update_port:data_plane_status', 'rule:admin_or_data_plane_int', 'Update ``data_plane_status`` attribute of a port', ACTION_PUT ), policy.DocumentedRuleDefault( 'delete_port', base.policy_or(base.RULE_ADVSVC, 'rule:admin_owner_or_network_owner'), 'Delete a port', ACTION_DELETE ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/qos.py0000644000175000017500000002314700000000000022554 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base rules = [ policy.DocumentedRuleDefault( 'get_policy', base.RULE_ANY, 'Get QoS policies', [ { 'method': 'GET', 'path': '/qos/policies', }, { 'method': 'GET', 'path': '/qos/policies/{id}', }, ] ), policy.DocumentedRuleDefault( 'create_policy', base.RULE_ADMIN_ONLY, 'Create a QoS policy', [ { 'method': 'POST', 'path': '/qos/policies', }, ] ), policy.DocumentedRuleDefault( 'update_policy', base.RULE_ADMIN_ONLY, 'Update a QoS policy', [ { 'method': 'PUT', 'path': '/qos/policies/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_policy', base.RULE_ADMIN_ONLY, 'Delete a QoS policy', [ { 'method': 'DELETE', 'path': '/qos/policies/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_rule_type', base.RULE_ANY, 'Get available QoS rule types', [ { 'method': 'GET', 'path': '/qos/rule-types', }, { 'method': 'GET', 'path': '/qos/rule-types/{rule_type}', }, ] ), policy.DocumentedRuleDefault( 'get_policy_bandwidth_limit_rule', base.RULE_ANY, 'Get a QoS bandwidth limit rule', [ { 'method': 'GET', 'path': '/qos/policies/{policy_id}/bandwidth_limit_rules', }, { 'method': 'GET', 'path': ('/qos/policies/{policy_id}/' 'bandwidth_limit_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'create_policy_bandwidth_limit_rule', base.RULE_ADMIN_ONLY, 'Create a QoS bandwidth limit rule', [ { 'method': 'POST', 'path': '/qos/policies/{policy_id}/bandwidth_limit_rules', }, ] ), policy.DocumentedRuleDefault( 'update_policy_bandwidth_limit_rule', base.RULE_ADMIN_ONLY, 'Update a QoS bandwidth limit rule', [ { 'method': 'PUT', 'path': ('/qos/policies/{policy_id}/' 'bandwidth_limit_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'delete_policy_bandwidth_limit_rule', base.RULE_ADMIN_ONLY, 'Delete a QoS bandwidth limit rule', [ { 'method': 'DELETE', 'path': ('/qos/policies/{policy_id}/' 'bandwidth_limit_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'get_policy_dscp_marking_rule', base.RULE_ANY, 'Get a QoS DSCP marking rule', [ { 'method': 'GET', 'path': '/qos/policies/{policy_id}/dscp_marking_rules', }, { 'method': 'GET', 'path': ('/qos/policies/{policy_id}/' 'dscp_marking_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'create_policy_dscp_marking_rule', base.RULE_ADMIN_ONLY, 'Create a QoS DSCP marking rule', [ { 'method': 'POST', 'path': '/qos/policies/{policy_id}/dscp_marking_rules', }, ] ), policy.DocumentedRuleDefault( 'update_policy_dscp_marking_rule', base.RULE_ADMIN_ONLY, 'Update a QoS DSCP marking rule', [ { 'method': 'PUT', 'path': ('/qos/policies/{policy_id}/' 'dscp_marking_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'delete_policy_dscp_marking_rule', base.RULE_ADMIN_ONLY, 'Delete a QoS DSCP marking rule', [ { 'method': 'DELETE', 'path': ('/qos/policies/{policy_id}/' 'dscp_marking_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'get_policy_minimum_bandwidth_rule', base.RULE_ANY, 'Get a QoS minimum bandwidth rule', [ { 'method': 'GET', 'path': '/qos/policies/{policy_id}/minimum_bandwidth_rules', }, { 'method': 'GET', 'path': ('/qos/policies/{policy_id}/' 'minimum_bandwidth_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'create_policy_minimum_bandwidth_rule', base.RULE_ADMIN_ONLY, 'Create a QoS minimum bandwidth rule', [ { 'method': 'POST', 'path': '/qos/policies/{policy_id}/minimum_bandwidth_rules', }, ] ), policy.DocumentedRuleDefault( 'update_policy_minimum_bandwidth_rule', base.RULE_ADMIN_ONLY, 'Update a QoS minimum bandwidth rule', [ { 'method': 'PUT', 'path': ('/qos/policies/{policy_id}/' 'minimum_bandwidth_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'delete_policy_minimum_bandwidth_rule', base.RULE_ADMIN_ONLY, 'Delete a QoS minimum bandwidth rule', [ { 'method': 'DELETE', 'path': ('/qos/policies/{policy_id}/' 'minimum_bandwidth_rules/{rule_id}'), }, ] ), policy.DocumentedRuleDefault( 'get_alias_bandwidth_limit_rule', 'rule:get_policy_bandwidth_limit_rule', 'Get a QoS bandwidth limit rule through alias', [ { 'method': 'GET', 'path': '/qos/alias_bandwidth_limit_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'update_alias_bandwidth_limit_rule', 'rule:update_policy_bandwidth_limit_rule', 'Update a QoS bandwidth limit rule through alias', [ { 'method': 'PUT', 'path': '/qos/alias_bandwidth_limit_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'delete_alias_bandwidth_limit_rule', 'rule:delete_policy_bandwidth_limit_rule', 'Delete a QoS bandwidth limit rule through alias', [ { 'method': 'DELETE', 'path': '/qos/alias_bandwidth_limit_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'get_alias_dscp_marking_rule', 'rule:get_policy_dscp_marking_rule', 'Get a QoS DSCP marking rule through alias', [ { 'method': 'GET', 'path': '/qos/alias_dscp_marking_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'update_alias_dscp_marking_rule', 'rule:update_policy_dscp_marking_rule', 'Update a QoS DSCP marking rule through alias', [ { 'method': 'PUT', 'path': '/qos/alias_dscp_marking_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'delete_alias_dscp_marking_rule', 'rule:delete_policy_dscp_marking_rule', 'Delete a QoS DSCP marking rule through alias', [ { 'method': 'DELETE', 'path': '/qos/alias_dscp_marking_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'get_alias_minimum_bandwidth_rule', 'rule:get_policy_minimum_bandwidth_rule', 'Get a QoS minimum bandwidth rule through alias', [ { 'method': 'GET', 'path': '/qos/alias_minimum_bandwidth_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'update_alias_minimum_bandwidth_rule', 'rule:update_policy_minimum_bandwidth_rule', 'Update a QoS minimum bandwidth rule through alias', [ { 'method': 'PUT', 'path': '/qos/alias_minimum_bandwidth_rules/{rule_id}/', }, ] ), policy.DocumentedRuleDefault( 'delete_alias_minimum_bandwidth_rule', 'rule:delete_policy_minimum_bandwidth_rule', 'Delete a QoS minimum bandwidth rule through alias', [ { 'method': 'DELETE', 'path': '/qos/alias_minimum_bandwidth_rules/{rule_id}/', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/rbac.py0000644000175000017500000000533100000000000022654 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/rbac-policies' RESOURCE_PATH = '/rbac-policies/{id}' rules = [ policy.RuleDefault( 'restrict_wildcard', base.policy_or('(not field:rbac_policy:target_tenant=*)', base.RULE_ADMIN_ONLY), 'Definition of a wildcard target_tenant'), policy.DocumentedRuleDefault( 'create_rbac_policy', base.RULE_ANY, 'Create an RBAC policy', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_rbac_policy:target_tenant', 'rule:restrict_wildcard', 'Specify ``target_tenant`` when creating an RBAC policy', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_rbac_policy', base.RULE_ADMIN_OR_OWNER, 'Update an RBAC policy', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_rbac_policy:target_tenant', base.policy_and('rule:restrict_wildcard', base.RULE_ADMIN_OR_OWNER), 'Update ``target_tenant`` attribute of an RBAC policy', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_rbac_policy', base.RULE_ADMIN_OR_OWNER, 'Get an RBAC policy', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_rbac_policy', base.RULE_ADMIN_OR_OWNER, 'Delete an RBAC policy', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/router.py0000644000175000017500000001227500000000000023272 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/routers' RESOURCE_PATH = '/routers/{id}' ACTION_POST = [ {'method': 'POST', 'path': COLLECTION_PATH}, ] ACTION_PUT = [ {'method': 'PUT', 'path': RESOURCE_PATH}, ] ACTION_DELETE = [ {'method': 'DELETE', 'path': RESOURCE_PATH}, ] ACTION_GET = [ {'method': 'GET', 'path': COLLECTION_PATH}, {'method': 'GET', 'path': RESOURCE_PATH}, ] rules = [ policy.DocumentedRuleDefault( 'create_router', base.RULE_ANY, 'Create a router', ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:distributed', base.RULE_ADMIN_ONLY, 'Specify ``distributed`` attribute when creating a router', ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:ha', base.RULE_ADMIN_ONLY, 'Specify ``ha`` attribute when creating a router', ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:external_gateway_info', base.RULE_ADMIN_OR_OWNER, 'Specify ``external_gateway_info`` information when creating a router', ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:external_gateway_info:network_id', base.RULE_ADMIN_OR_OWNER, ('Specify ``network_id`` in ``external_gateway_info`` information ' 'when creating a router'), ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:external_gateway_info:enable_snat', base.RULE_ADMIN_ONLY, ('Specify ``enable_snat`` in ``external_gateway_info`` information ' 'when creating a router'), ACTION_POST ), policy.DocumentedRuleDefault( 'create_router:external_gateway_info:external_fixed_ips', base.RULE_ADMIN_ONLY, ('Specify ``external_fixed_ips`` in ``external_gateway_info`` ' 'information when creating a router'), ACTION_POST ), policy.DocumentedRuleDefault( 'get_router', base.RULE_ADMIN_OR_OWNER, 'Get a router', ACTION_GET ), policy.DocumentedRuleDefault( 'get_router:distributed', base.RULE_ADMIN_ONLY, 'Get ``distributed`` attribute of a router', ACTION_GET ), policy.DocumentedRuleDefault( 'get_router:ha', base.RULE_ADMIN_ONLY, 'Get ``ha`` attribute of a router', ACTION_GET ), policy.DocumentedRuleDefault( 'update_router', base.RULE_ADMIN_OR_OWNER, 'Update a router', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:distributed', base.RULE_ADMIN_ONLY, 'Update ``distributed`` attribute of a router', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:ha', base.RULE_ADMIN_ONLY, 'Update ``ha`` attribute of a router', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:external_gateway_info', base.RULE_ADMIN_OR_OWNER, 'Update ``external_gateway_info`` information of a router', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:external_gateway_info:network_id', base.RULE_ADMIN_OR_OWNER, ('Update ``network_id`` attribute of ``external_gateway_info`` ' 'information of a router'), ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:external_gateway_info:enable_snat', base.RULE_ADMIN_ONLY, ('Update ``enable_snat`` attribute of ``external_gateway_info`` ' 'information of a router'), ACTION_PUT ), policy.DocumentedRuleDefault( 'update_router:external_gateway_info:external_fixed_ips', base.RULE_ADMIN_ONLY, ('Update ``external_fixed_ips`` attribute of ' '``external_gateway_info`` information of a router'), ACTION_PUT ), policy.DocumentedRuleDefault( 'delete_router', base.RULE_ADMIN_OR_OWNER, 'Delete a router', ACTION_DELETE ), policy.DocumentedRuleDefault( 'add_router_interface', base.RULE_ADMIN_OR_OWNER, 'Add an interface to a router', [ { 'method': 'PUT', 'path': '/routers/{id}/add_router_interface', }, ] ), policy.DocumentedRuleDefault( 'remove_router_interface', base.RULE_ADMIN_OR_OWNER, 'Remove an interface from a router', [ { 'method': 'PUT', 'path': '/routers/{id}/remove_router_interface', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/security_group.py0000644000175000017500000000737100000000000025036 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base SG_COLLECTION_PATH = '/security-groups' SG_RESOURCE_PATH = '/security-groups/{id}' RULE_COLLECTION_PATH = '/security-group-rules' RULE_RESOURCE_PATH = '/security-group-rules/{id}' RULE_ADMIN_OR_SG_OWNER = 'rule:admin_or_sg_owner' RULE_ADMIN_OWNER_OR_SG_OWNER = 'rule:admin_owner_or_sg_owner' rules = [ policy.RuleDefault( 'admin_or_sg_owner', base.policy_or('rule:context_is_admin', 'tenant_id:%(security_group:tenant_id)s'), description='Rule for admin or security group owner access'), policy.RuleDefault( 'admin_owner_or_sg_owner', base.policy_or('rule:owner', RULE_ADMIN_OR_SG_OWNER), description=('Rule for resource owner, ' 'admin or security group owner access')), # TODO(amotoki): admin_or_owner is the right rule? # Does an empty string make more sense for create_security_group? policy.DocumentedRuleDefault( 'create_security_group', base.RULE_ADMIN_OR_OWNER, 'Create a security group', [ { 'method': 'POST', 'path': SG_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_security_group', base.RULE_ANY, 'Get a security group', [ { 'method': 'GET', 'path': SG_COLLECTION_PATH, }, { 'method': 'GET', 'path': SG_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_security_group', base.RULE_ADMIN_OR_OWNER, 'Update a security group', [ { 'method': 'PUT', 'path': SG_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_security_group', base.RULE_ADMIN_OR_OWNER, 'Delete a security group', [ { 'method': 'DELETE', 'path': SG_RESOURCE_PATH, }, ] ), # TODO(amotoki): admin_or_owner is the right rule? # Does an empty string make more sense for create_security_group_rule? policy.DocumentedRuleDefault( 'create_security_group_rule', base.RULE_ADMIN_OR_OWNER, 'Create a security group rule', [ { 'method': 'POST', 'path': RULE_COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_security_group_rule', RULE_ADMIN_OWNER_OR_SG_OWNER, 'Get a security group rule', [ { 'method': 'GET', 'path': RULE_COLLECTION_PATH, }, { 'method': 'GET', 'path': RULE_RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_security_group_rule', base.RULE_ADMIN_OR_OWNER, 'Delete a security group rule', [ { 'method': 'DELETE', 'path': RULE_RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/segment.py0000644000175000017500000000344500000000000023413 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/segments' RESOURCE_PATH = '/segments/{id}' rules = [ policy.DocumentedRuleDefault( 'create_segment', base.RULE_ADMIN_ONLY, 'Create a segment', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_segment', base.RULE_ADMIN_ONLY, 'Get a segment', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_segment', base.RULE_ADMIN_ONLY, 'Update a segment', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_segment', base.RULE_ADMIN_ONLY, 'Delete a segment', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/service_type.py0000644000175000017500000000164600000000000024453 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base rules = [ policy.DocumentedRuleDefault( 'get_service_provider', base.RULE_ANY, 'Get service providers', [ { 'method': 'GET', 'path': '/service-providers', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/subnet.py0000644000175000017500000000511500000000000023245 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/subnets' RESOURCE_PATH = '/subnets/{id}' ACTION_POST = [ {'method': 'POST', 'path': COLLECTION_PATH}, ] ACTION_PUT = [ {'method': 'PUT', 'path': RESOURCE_PATH}, ] ACTION_DELETE = [ {'method': 'DELETE', 'path': RESOURCE_PATH}, ] ACTION_GET = [ {'method': 'GET', 'path': COLLECTION_PATH}, {'method': 'GET', 'path': RESOURCE_PATH}, ] rules = [ policy.DocumentedRuleDefault( 'create_subnet', base.RULE_ADMIN_OR_NET_OWNER, 'Create a subnet', ACTION_POST ), policy.DocumentedRuleDefault( 'create_subnet:segment_id', base.RULE_ADMIN_ONLY, 'Specify ``segment_id`` attribute when creating a subnet', ACTION_POST ), policy.DocumentedRuleDefault( 'create_subnet:service_types', base.RULE_ADMIN_ONLY, 'Specify ``service_types`` attribute when creating a subnet', ACTION_POST ), policy.DocumentedRuleDefault( 'get_subnet', base.policy_or(base.RULE_ADMIN_OR_OWNER, 'rule:shared'), 'Get a subnet', ACTION_GET ), policy.DocumentedRuleDefault( 'get_subnet:segment_id', base.RULE_ADMIN_ONLY, 'Get ``segment_id`` attribute of a subnet', ACTION_GET ), policy.DocumentedRuleDefault( 'update_subnet', base.RULE_ADMIN_OR_NET_OWNER, 'Update a subnet', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_subnet:segment_id', base.RULE_ADMIN_ONLY, 'Update ``segment_id`` attribute of a subnet', ACTION_PUT ), policy.DocumentedRuleDefault( 'update_subnet:service_types', base.RULE_ADMIN_ONLY, 'Update ``service_types`` attribute of a subnet', ACTION_PUT ), policy.DocumentedRuleDefault( 'delete_subnet', base.RULE_ADMIN_OR_NET_OWNER, 'Delete a subnet', ACTION_DELETE, ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/subnetpool.py0000644000175000017500000000755300000000000024147 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/subnetpools' RESOURCE_PATH = '/subnetpools/{id}' ONBOARD_PATH = '/subnetpools/{id}/onboard_network_subnets' ADD_PREFIXES_PATH = '/subnetpools/{id}/add_prefixes' REMOVE_PREFIXES_PATH = '/subnetpools/{id}/remove_prefixes' rules = [ policy.RuleDefault( 'shared_subnetpools', 'field:subnetpools:shared=True', 'Definition of a shared subnetpool' ), policy.DocumentedRuleDefault( 'create_subnetpool', base.RULE_ANY, 'Create a subnetpool', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_subnetpool:shared', base.RULE_ADMIN_ONLY, 'Create a shared subnetpool', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'create_subnetpool:is_default', base.RULE_ADMIN_ONLY, 'Specify ``is_default`` attribute when creating a subnetpool', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_subnetpool', base.policy_or(base.RULE_ADMIN_OR_OWNER, 'rule:shared_subnetpools'), 'Get a subnetpool', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_subnetpool', base.RULE_ADMIN_OR_OWNER, 'Update a subnetpool', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_subnetpool:is_default', base.RULE_ADMIN_ONLY, 'Update ``is_default`` attribute of a subnetpool', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_subnetpool', base.RULE_ADMIN_OR_OWNER, 'Delete a subnetpool', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'onboard_network_subnets', base.RULE_ADMIN_OR_OWNER, 'Onboard existing subnet into a subnetpool', [ { 'method': 'Put', 'path': ONBOARD_PATH, }, ] ), policy.DocumentedRuleDefault( 'add_prefixes', base.RULE_ADMIN_OR_OWNER, 'Add prefixes to a subnetpool', [ { 'method': 'Put', 'path': ADD_PREFIXES_PATH, }, ] ), policy.DocumentedRuleDefault( 'remove_prefixes', base.RULE_ADMIN_OR_OWNER, 'Remove unallocated prefixes from a subnetpool', [ { 'method': 'Put', 'path': REMOVE_PREFIXES_PATH, }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/policies/trunk.py0000644000175000017500000000510700000000000023111 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from neutron.conf.policies import base COLLECTION_PATH = '/trunks' RESOURCE_PATH = '/trunks/{id}' rules = [ policy.DocumentedRuleDefault( 'create_trunk', base.RULE_ANY, 'Create a trunk', [ { 'method': 'POST', 'path': COLLECTION_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_trunk', base.RULE_ADMIN_OR_OWNER, 'Get a trunk', [ { 'method': 'GET', 'path': COLLECTION_PATH, }, { 'method': 'GET', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'update_trunk', base.RULE_ADMIN_OR_OWNER, 'Update a trunk', [ { 'method': 'PUT', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'delete_trunk', base.RULE_ADMIN_OR_OWNER, 'Delete a trunk', [ { 'method': 'DELETE', 'path': RESOURCE_PATH, }, ] ), policy.DocumentedRuleDefault( 'get_subports', base.RULE_ANY, 'List subports attached to a trunk', [ { 'method': 'GET', 'path': '/trunks/{id}/get_subports', }, ] ), policy.DocumentedRuleDefault( 'add_subports', base.RULE_ADMIN_OR_OWNER, 'Add subports to a trunk', [ { 'method': 'PUT', 'path': '/trunks/{id}/add_subports', }, ] ), policy.DocumentedRuleDefault( 'remove_subports', base.RULE_ADMIN_OR_OWNER, 'Delete subports from a trunk', [ { 'method': 'PUT', 'path': '/trunks/{id}/remove_subports', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/profiling.py0000644000175000017500000000211400000000000022123 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ PROFILER_OPTS = [ cfg.BoolOpt('enable_code_profiling', default=False, help=_('Enable code execution profiling with cProfile. ' 'Profiling data are logged at DEBUG level.')), cfg.IntOpt('code_profiling_calls_to_log', default=50, help=_('Number of calls from the cProfile report to log')), ] def register_profiling_opts(conf=cfg.CONF): conf.register_opts(PROFILER_OPTS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/quota.py0000644000175000017500000000741500000000000021274 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ QUOTA_DB_MODULE = 'neutron.db.quota.driver' QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver' QUOTAS_CFG_GROUP = 'QUOTAS' DEFAULT_QUOTA = -1 DEFAULT_QUOTA_NETWORK = 100 DEFAULT_QUOTA_SUBNET = 100 DEFAULT_QUOTA_PORT = 500 DEFAULT_QUOTA_SG = 10 DEFAULT_QUOTA_SG_RULE = 100 DEFAULT_QUOTA_ROUTER = 10 DEFAULT_QUOTA_FIP = 50 DEFAULT_QUOTA_RBAC = 10 # quota_opts from neutron/quota/__init__.py # renamed quota_opts to core_quota_opts core_quota_opts = [ cfg.IntOpt('default_quota', default=DEFAULT_QUOTA, help=_('Default number of resource allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_network', default=DEFAULT_QUOTA_NETWORK, help=_('Number of networks allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_subnet', default=DEFAULT_QUOTA_SUBNET, help=_('Number of subnets allowed per tenant, ' 'A negative value means unlimited.')), cfg.IntOpt('quota_port', default=DEFAULT_QUOTA_PORT, help=_('Number of ports allowed per tenant. ' 'A negative value means unlimited.')), cfg.StrOpt('quota_driver', default=QUOTA_DB_DRIVER, help=_('Default driver to use for quota checks.')), cfg.BoolOpt('track_quota_usage', default=True, help=_('Keep in track in the database of current resource ' 'quota usage. Plugins which do not leverage the ' 'neutron database should set this flag to False.')), ] # security_group_quota_opts from neutron/extensions/securitygroup.py security_group_quota_opts = [ cfg.IntOpt('quota_security_group', default=DEFAULT_QUOTA_SG, help=_('Number of security groups allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_security_group_rule', default=DEFAULT_QUOTA_SG_RULE, help=_('Number of security rules allowed per tenant. ' 'A negative value means unlimited.')), ] # l3_quota_opts from neutron/extensions/l3.py l3_quota_opts = [ cfg.IntOpt('quota_router', default=DEFAULT_QUOTA_ROUTER, help=_('Number of routers allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_floatingip', default=DEFAULT_QUOTA_FIP, help=_('Number of floating IPs allowed per tenant. ' 'A negative value means unlimited.')), ] # rbac_quota_opts from neutron/extensions/rbac.py rbac_quota_opts = [ cfg.IntOpt('quota_rbac_policy', default=DEFAULT_QUOTA_RBAC, deprecated_name='quota_rbac_entry', help=_('Default number of RBAC entries allowed per tenant. ' 'A negative value means unlimited.')) ] def register_quota_opts(opts, cfg=cfg.CONF): cfg.register_opts(opts, QUOTAS_CFG_GROUP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/service.py0000644000175000017500000000407000000000000021575 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ SERVICE_OPTS = [ cfg.IntOpt('periodic_interval', default=40, help=_('Seconds between running periodic tasks.')), cfg.IntOpt('api_workers', help=_('Number of separate API worker processes for service. ' 'If not specified, the default is equal to the number ' 'of CPUs available for best performance, capped by ' 'potential RAM usage.')), cfg.IntOpt('rpc_workers', help=_('Number of RPC worker processes for service. ' 'If not specified, the default is equal to half the ' 'number of API workers.')), cfg.IntOpt('rpc_state_report_workers', default=1, help=_('Number of RPC worker processes dedicated to state ' 'reports queue.')), cfg.IntOpt('periodic_fuzzy_delay', default=5, help=_('Range of seconds to randomly delay when starting the ' 'periodic task scheduler to reduce stampeding. ' '(Disable by setting to 0)')), ] RPC_EXTRA_OPTS = [ cfg.IntOpt('rpc_response_max_timeout', default=600, help=_('Maximum seconds to wait for a response from an RPC ' 'call.')), ] def register_service_opts(opts, conf=cfg.CONF): conf.register_opts(opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.239044 neutron-16.0.0.0b2.dev214/neutron/conf/services/0000755000175000017500000000000000000000000021405 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/services/__init__.py0000644000175000017500000000000000000000000023504 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/services/extdns_designate_driver.py0000644000175000017500000000553700000000000026674 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading from oslo_config import cfg from neutron._i18n import _ designate_opts = [ cfg.StrOpt('url', help=_('URL for connecting to designate')), cfg.StrOpt('admin_username', help=_('Username for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_password', help=_('Password for connecting to designate in admin ' 'context'), secret=True), cfg.StrOpt('admin_tenant_id', help=_('Tenant id for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_tenant_name', help=_('Tenant name for connecting to designate in admin ' 'context')), cfg.StrOpt('admin_auth_url', help=_('Authorization URL for connecting to designate in admin ' 'context')), cfg.BoolOpt('allow_reverse_dns_lookup', default=True, help=_('Allow the creation of PTR records')), cfg.IntOpt( 'ipv4_ptr_zone_prefix_size', default=24, help=_('Number of bits in an ipv4 PTR zone that will be considered ' 'network prefix. It has to align to byte boundary. Minimum ' 'value is 8. Maximum value is 24. As a consequence, range ' 'of values is 8, 16 and 24')), cfg.IntOpt( 'ipv6_ptr_zone_prefix_size', default=120, help=_('Number of bits in an ipv6 PTR zone that will be considered ' 'network prefix. It has to align to nyble boundary. Minimum ' 'value is 4. Maximum value is 124. As a consequence, range ' 'of values is 4, 8, 12, 16,..., 124')), cfg.StrOpt('ptr_zone_email', default='', help=_('The email address to be used when creating PTR zones. ' 'If not specified, the email address will be ' 'admin@')), ] def register_designate_opts(CONF=cfg.CONF): CONF.register_opts(designate_opts, 'designate') loading.register_auth_conf_options(CONF, 'designate') loading.register_session_conf_options( conf=CONF, group='designate', deprecated_opts={'cafile': [cfg.DeprecatedOpt('ca_cert')]}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/services/logging.py0000644000175000017500000000226700000000000023414 0ustar00coreycorey00000000000000# Copyright 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from neutron._i18n import _ log_driver_opts = [ cfg.IntOpt( 'rate_limit', default=100, min=100, help=_('Maximum packets logging per second.')), cfg.IntOpt( 'burst_limit', default=25, min=25, help=_('Maximum number of packets per rate_limit.')), cfg.StrOpt( 'local_output_log_base', help=_('Output logfile path on agent side, default syslog file.')), ] def register_log_driver_opts(cfg=cfg.CONF): cfg.register_opts(log_driver_opts, 'network_log') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/services/metering_agent.py0000644000175000017500000000224700000000000024754 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ metering_agent_opts = [ cfg.StrOpt('driver', default='neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver', help=_("Metering driver")), cfg.IntOpt('measure_interval', default=30, help=_("Interval between two metering measures")), cfg.IntOpt('report_interval', default=300, help=_("Interval between two metering reports")), ] def register_metering_agent_opts(cfg=cfg.CONF): cfg.register_opts(metering_agent_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/services/provider_configuration.py0000644000175000017500000000207100000000000026540 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ serviceprovider_opts = [ cfg.MultiStrOpt('service_provider', default=[], help=_('Defines providers for advanced services ' 'using the format: ' '::[:default]')) ] def register_service_provider_opts(cfg=cfg.CONF): cfg.register_opts(serviceprovider_opts, 'service_providers') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/conf/wsgi.py0000644000175000017500000000235600000000000021113 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_service import wsgi from neutron._i18n import _ socket_opts = [ cfg.IntOpt('backlog', default=4096, help=_("Number of backlog requests to configure " "the socket with")), cfg.IntOpt('retry_until_window', default=30, help=_("Number of seconds to keep retrying to listen")), cfg.BoolOpt('use_ssl', default=False, help=_('Enable SSL on the API server')), ] def register_socket_opts(cfg=cfg.CONF): cfg.register_opts(socket_opts) wsgi.register_opts(cfg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2430441 neutron-16.0.0.0b2.dev214/neutron/core_extensions/0000755000175000017500000000000000000000000022044 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/core_extensions/__init__.py0000644000175000017500000000000000000000000024143 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/core_extensions/base.py0000644000175000017500000000323100000000000023327 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six NETWORK = 'network' PORT = 'port' EVENT_CREATE = 'create' EVENT_UPDATE = 'update' CORE_RESOURCES = [NETWORK, PORT] @six.add_metaclass(abc.ABCMeta) class CoreResourceExtension(object): @abc.abstractmethod def process_fields(self, context, resource_type, event_type, requested_resource, actual_resource): """Process extension fields. :param context: neutron api request context :param resource_type: core resource type (one of CORE_RESOURCES) :param event_type: kind of event triggering this action (update, create) :param requested_resource: resource dict that contains extension fields :param actual_resource: actual resource dict known to plugin """ @abc.abstractmethod def extract_fields(self, resource_type, resource): """Extract extension fields. :param resource_type: core resource type (one of CORE_RESOURCES) :param resource: resource dict that contains extension fields """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/core_extensions/qos.py0000644000175000017500000001165700000000000023232 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from neutron_lib.exceptions import qos as qos_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from neutron.core_extensions import base from neutron.objects.qos import policy as policy_object class QosCoreResourceExtension(base.CoreResourceExtension): @property def plugin_loaded(self): if not hasattr(self, '_plugin_loaded'): self._plugin_loaded = ( plugin_constants.QOS in directory.get_plugins()) return self._plugin_loaded def _check_policy_change_permission(self, context, old_policy): """An existing policy can be modified only if one of the following is true: the policy's tenant is the context's tenant the policy is shared with the tenant Using is_accessible expresses these conditions. """ if not (policy_object.QosPolicy.is_accessible(context, old_policy)): raise qos_exc.PolicyRemoveAuthorizationError( policy_id=old_policy.id) def _update_port_policy(self, context, port, port_changes): old_policy = policy_object.QosPolicy.get_port_policy( context.elevated(), port['id']) if old_policy: self._check_policy_change_permission(context, old_policy) old_policy.detach_port(port['id']) qos_policy_id = port_changes.get(qos_consts.QOS_POLICY_ID) if qos_policy_id is not None: policy = policy_object.QosPolicy.get_policy_obj( context, qos_policy_id) policy.attach_port(port['id']) port[qos_consts.QOS_POLICY_ID] = qos_policy_id def _create_network_policy(self, context, network, network_changes): qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID) if not qos_policy_id: policy_obj = policy_object.QosPolicyDefault.get_object( context, project_id=network['project_id']) if policy_obj is not None: qos_policy_id = policy_obj.qos_policy_id if qos_policy_id is not None: policy = policy_object.QosPolicy.get_policy_obj( context, qos_policy_id) policy.attach_network(network['id']) network[qos_consts.QOS_POLICY_ID] = qos_policy_id def _update_network_policy(self, context, network, network_changes): old_policy = policy_object.QosPolicy.get_network_policy( context.elevated(), network['id']) if old_policy: self._check_policy_change_permission(context, old_policy) old_policy.detach_network(network['id']) qos_policy_id = network_changes.get(qos_consts.QOS_POLICY_ID) if qos_policy_id is not None: policy = policy_object.QosPolicy.get_policy_obj( context, qos_policy_id) policy.attach_network(network['id']) network[qos_consts.QOS_POLICY_ID] = qos_policy_id def _exec(self, method_name, context, kwargs): with db_api.CONTEXT_WRITER.using(context): return getattr(self, method_name)(context=context, **kwargs) def process_fields(self, context, resource_type, event_type, requested_resource, actual_resource): if (qos_consts.QOS_POLICY_ID in requested_resource and self.plugin_loaded): method_name = ('_%(event)s_%(resource)s_policy' % {'event': event_type, 'resource': resource_type}) self._exec(method_name, context, {resource_type: actual_resource, "%s_changes" % resource_type: requested_resource}) def extract_fields(self, resource_type, resource): if not self.plugin_loaded: return {} binding = resource['qos_policy_binding'] qos_policy_id = binding['policy_id'] if binding else None retval = {qos_consts.QOS_POLICY_ID: qos_policy_id} if resource_type == base.PORT: network_binding = resource.get('qos_network_policy_binding') qos_net_policy_id = (network_binding['policy_id'] if network_binding else None) retval[qos_consts.QOS_NETWORK_POLICY_ID] = qos_net_policy_id return retval ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2550442 neutron-16.0.0.0b2.dev214/neutron/db/0000755000175000017500000000000000000000000017222 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/__init__.py0000644000175000017500000000000000000000000021321 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/_utils.py0000644000175000017500000000566700000000000021111 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NOTE: This module shall not be used by external projects. It will be moved to neutron-lib in due course, and then it can be used from there. """ import contextlib from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from oslo_log import log as logging from oslo_utils import excutils LOG = logging.getLogger(__name__) @contextlib.contextmanager def _noop_context_manager(): yield def safe_creation(context, create_fn, delete_fn, create_bindings, transaction=True): '''This function wraps logic of object creation in safe atomic way. In case of exception, object is deleted. More information when this method could be used can be found in developer guide - Effective Neutron: Database interaction section. https://docs.openstack.org/neutron/latest/contributor/effective_neutron.html :param context: context :param create_fn: function without arguments that is called to create object and returns this object. :param delete_fn: function that is called to delete an object. It is called with object's id field as an argument. :param create_bindings: function that is called to create bindings for an object. It is called with object's id field as an argument. :param transaction: if true the whole operation will be wrapped in a transaction. if false, no transaction will be used. ''' cm = (db_api.CONTEXT_WRITER.using(context) if transaction else _noop_context_manager()) with cm: obj = create_fn() try: value = create_bindings(obj['id']) except Exception: with excutils.save_and_reraise_exception(): try: delete_fn(obj['id']) except Exception as e: LOG.error("Cannot clean up created object %(obj)s. " "Exception: %(exc)s", {'obj': obj['id'], 'exc': e}) return obj, value def model_query(context, model): query = context.session.query(model) # define basic filter condition for model query query_filter = None if db_utils.model_query_scope_is_project(context, model): query_filter = (model.tenant_id == context.tenant_id) if query_filter is not None: query = query.filter(query_filter) return query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/address_scope_db.py0000644000175000017500000001356600000000000023072 0ustar00coreycorey00000000000000# Copyright (c) 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import address_scope as apidef from neutron_lib.api.definitions import network as net_def from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import address_scope as api_err from oslo_utils import uuidutils from neutron._i18n import _ from neutron.extensions import address_scope as ext_address_scope from neutron.objects import address_scope as obj_addr_scope from neutron.objects import base as base_obj from neutron.objects import subnetpool as subnetpool_obj @resource_extend.has_resource_extenders class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase): """Mixin class to add address scope to db_base_plugin_v2.""" __native_bulk_support = True @staticmethod def _make_address_scope_dict(address_scope, fields=None): res = address_scope.to_dict() return db_utils.resource_fields(res, fields) def _get_address_scope(self, context, id): obj = obj_addr_scope.AddressScope.get_object(context, id=id) if obj is None: raise api_err.AddressScopeNotFound(address_scope_id=id) return obj def is_address_scope_owned_by_tenant(self, context, id): """Check if address scope id is owned by the tenant or not. AddressScopeNotFound is raised if the - address scope id doesn't exist or - if the (unshared) address scope id is not owned by this tenant. @return Returns true if the user is admin or tenant is owner Returns false if the address scope id is shared and not owned by the tenant. """ address_scope = self._get_address_scope(context, id) return context.is_admin or ( address_scope.tenant_id == context.tenant_id) def get_ip_version_for_address_scope(self, context, id): address_scope = self._get_address_scope(context, id) return address_scope.ip_version def create_address_scope(self, context, address_scope): """Create an address scope.""" a_s = address_scope['address_scope'] address_scope_id = a_s.get('id') or uuidutils.generate_uuid() pool_args = {'project_id': a_s['tenant_id'], 'id': address_scope_id, 'name': a_s['name'], 'shared': a_s['shared'], 'ip_version': a_s['ip_version']} address_scope = obj_addr_scope.AddressScope(context, **pool_args) address_scope.create() return self._make_address_scope_dict(address_scope) def update_address_scope(self, context, id, address_scope): a_s = address_scope['address_scope'] address_scope = self._get_address_scope(context, id) if address_scope.shared and not a_s.get('shared', True): reason = _("Shared address scope can't be unshared") raise api_err.AddressScopeUpdateError( address_scope_id=id, reason=reason) address_scope.update_fields(a_s) address_scope.update() return self._make_address_scope_dict(address_scope) def get_address_scope(self, context, id, fields=None): address_scope = self._get_address_scope(context, id) return self._make_address_scope_dict(address_scope, fields) def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) address_scopes = obj_addr_scope.AddressScope.get_objects( context, _pager=pager, **filters) return [ self._make_address_scope_dict(addr_scope, fields) for addr_scope in address_scopes ] def get_address_scopes_count(self, context, filters=None): return obj_addr_scope.AddressScope.count(context, **filters) def delete_address_scope(self, context, id): with db_api.CONTEXT_WRITER.using(context): if subnetpool_obj.SubnetPool.get_objects(context, address_scope_id=id): raise api_err.AddressScopeInUse(address_scope_id=id) address_scope = self._get_address_scope(context, id) address_scope.delete() @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_network_dict_address_scope(network_res, network_db): network_res[apidef.IPV4_ADDRESS_SCOPE] = None network_res[apidef.IPV6_ADDRESS_SCOPE] = None subnetpools = {subnet.subnetpool for subnet in network_db.subnets if subnet.subnetpool} for subnetpool in subnetpools: # A network will be constrained to only one subnetpool per address # family. Retrieve the address scope of subnetpools as the address # scopes of network. as_id = subnetpool[apidef.ADDRESS_SCOPE_ID] if subnetpool['ip_version'] == constants.IP_VERSION_4: network_res[apidef.IPV4_ADDRESS_SCOPE] = as_id if subnetpool['ip_version'] == constants.IP_VERSION_6: network_res[apidef.IPV6_ADDRESS_SCOPE] = as_id return network_res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/agents_db.py0000644000175000017500000006102400000000000021525 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from eventlet import greenthread from neutron_lib.agent import constants as agent_consts from neutron_lib.api import converters from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api import extensions from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import agent as agent_exc from neutron_lib.exceptions import availability_zone as az_exc from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils import six from neutron.agent.common import utils from neutron.api.rpc.callbacks import version_manager from neutron.conf.agent.database import agents_db from neutron.db.models import agent as agent_model from neutron.extensions import _availability_zone_filter_lib as azfil_ext from neutron.extensions import agent as ext_agent from neutron.extensions import availability_zone as az_ext from neutron.objects import agent as agent_obj LOG = logging.getLogger(__name__) agents_db.register_db_agents_opts() # this is the ratio from agent_down_time to the time we use to consider # the agents down for considering their resource versions in the # version_manager callback DOWNTIME_VERSIONS_RATIO = 2 RESOURCE_AGENT_TYPE_MAP = { 'network': constants.AGENT_TYPE_DHCP, 'router': constants.AGENT_TYPE_L3, } AZ_ATTRIBUTE_MAP = { 'name': { 'agent_key': 'availability_zone', 'convert_to': lambda x: x, }, 'resource': { 'agent_key': 'agent_type', 'convert_to': lambda x: RESOURCE_AGENT_TYPE_MAP.get(x, x), } } def get_availability_zones_by_agent_type(context, agent_type, availability_zones): """Get list of availability zones based on agent type""" agents = agent_obj.Agent._get_agents_by_availability_zones_and_agent_type( context, agent_type=agent_type, availability_zones=availability_zones) return set(agent.availability_zone for agent in agents) class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase): """Mixin class to add availability_zone extension to AgentDbMixin.""" _is_az_filter_supported = None @property def is_az_filter_supported(self): supported = self._is_az_filter_supported if supported is None: supported = False for plugin in directory.get_plugins().values(): if extensions.is_extension_supported(plugin, azfil_ext.ALIAS): supported = True break self._is_az_filter_supported = supported return self._is_az_filter_supported def _list_availability_zones(self, context, filters=None): result = {} filters = filters or {} if self._is_az_filter_supported or self.is_az_filter_supported: filters = self._adjust_az_filters(filters) agents = agent_obj.Agent.get_objects(context, **filters) for agent in agents: if not agent.availability_zone: continue if agent.agent_type == constants.AGENT_TYPE_DHCP: resource = 'network' elif agent.agent_type == constants.AGENT_TYPE_L3: resource = 'router' else: continue key = (agent.availability_zone, resource) value = agent.admin_state_up or result.get(key, False) result[key] = 'available' if value else 'unavailable' return result def _adjust_az_filters(self, filters): # The intersect of sets gets us applicable filter keys (others ignored) common_keys = six.viewkeys(filters) & six.viewkeys(AZ_ATTRIBUTE_MAP) for key in common_keys: filter_key = AZ_ATTRIBUTE_MAP[key]['agent_key'] filter_vals = filters.pop(key) if filter_vals: filter_vals = [AZ_ATTRIBUTE_MAP[key]['convert_to'](v) for v in filter_vals] filters.setdefault(filter_key, []) filters[filter_key] += filter_vals return filters @db_api.retry_if_session_inactive() def get_availability_zones(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return a list of availability zones.""" if self._is_az_filter_supported or self.is_az_filter_supported: filter_states = filters.pop('state', []) # NOTE(hichihara): 'tenant_id' is dummy for policy check. # it is not visible via API. return [{'state': v, 'name': k[0], 'resource': k[1], 'tenant_id': context.tenant_id} for k, v in self._list_availability_zones( context, filters).items() if not filter_states or v in filter_states] else: # NOTE(hichihara): 'tenant_id' is dummy for policy check. # it is not visible via API. return [{'state': v, 'name': k[0], 'resource': k[1], 'tenant_id': context.tenant_id} for k, v in self._list_availability_zones( context, filters).items()] @db_api.retry_if_session_inactive() def validate_availability_zones(self, context, resource_type, availability_zones): """Verify that the availability zones exist.""" if not availability_zones: return if resource_type == 'network': agent_type = constants.AGENT_TYPE_DHCP elif resource_type == 'router': agent_type = constants.AGENT_TYPE_L3 else: return azs = get_availability_zones_by_agent_type( context, agent_type, availability_zones) diff = set(availability_zones) - set(azs) if diff: raise az_exc.AvailabilityZoneNotFound(availability_zone=diff.pop()) class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): """Mixin class to add agent extension to db_base_plugin_v2.""" def _get_agent(self, context, id): agent = agent_obj.Agent.get_object(context, id=id) if not agent: raise agent_exc.AgentNotFound(id=id) return agent @db_api.retry_if_session_inactive() def get_enabled_agent_on_host(self, context, agent_type, host): """Return agent of agent_type for the specified host.""" agent = agent_obj.Agent.get_object(context, agent_type=agent_type, host=host, admin_state_up=True) if not agent: LOG.debug('No enabled %(agent_type)s agent on host ' '%(host)s', {'agent_type': agent_type, 'host': host}) return if utils.is_agent_down(agent.heartbeat_timestamp): LOG.warning('%(agent_type)s agent %(agent_id)s is not active', {'agent_type': agent_type, 'agent_id': agent.id}) return agent @staticmethod def is_agent_considered_for_versions(agent_dict): return not timeutils.is_older_than(agent_dict['heartbeat_timestamp'], cfg.CONF.agent_down_time * DOWNTIME_VERSIONS_RATIO) def get_configuration_dict(self, agent_db): return self._get_dict(agent_db, 'configurations') def _get_dict(self, agent_db, dict_name, ignore_missing=False): json_value = None try: json_value = getattr(agent_db, dict_name) # TODO(tuanvu): after all agent_db is converted to agent_obj, # we no longer need this. # Without this check, some unit tests will fail # because some of json_values are dict already if not isinstance(json_value, dict): conf = jsonutils.loads(json_value) else: conf = json_value except Exception: if json_value or not ignore_missing: msg = ('Dictionary %(dict_name)s for agent %(agent_type)s ' 'on host %(host)s is invalid.') LOG.warning(msg, {'dict_name': dict_name, 'agent_type': agent_db.agent_type, 'host': agent_db.host}) conf = {} return conf def _get_agent_load(self, agent): configs = agent.get('configurations', {}) load_type = None load = 0 if(agent['agent_type'] == constants.AGENT_TYPE_DHCP): load_type = cfg.CONF.dhcp_load_type if load_type: load = int(configs.get(load_type, 0)) return load def _make_agent_dict(self, agent, fields=None): attr = agent_apidef.RESOURCE_ATTRIBUTE_MAP.get( agent_apidef.COLLECTION_NAME) res = dict((k, agent[k]) for k in attr if k not in ['alive', 'configurations']) res['alive'] = not utils.is_agent_down( res['heartbeat_timestamp'] ) res['configurations'] = self._get_dict(agent, 'configurations') res['resource_versions'] = self._get_dict(agent, 'resource_versions', ignore_missing=True) res['availability_zone'] = agent['availability_zone'] res['resources_synced'] = agent['resources_synced'] return db_utils.resource_fields(res, fields) @db_api.retry_if_session_inactive() def delete_agent(self, context, id): agent = self._get_agent(context, id) registry.publish(resources.AGENT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, states=(agent,), resource_id=id)) agent.delete() @db_api.retry_if_session_inactive() def update_agent(self, context, id, agent): agent_data = agent['agent'] with db_api.CONTEXT_WRITER.using(context): agent = self._get_agent(context, id) agent.update_fields(agent_data) agent.update() return self._make_agent_dict(agent) @db_api.retry_if_session_inactive() def get_agents_db(self, context, filters=None): # TODO(annp): keep this method for backward compatibility, # will need to clean it up later query = model_query.get_collection_query(context, agent_model.Agent, filters=filters) return query.all() @db_api.retry_if_session_inactive() def get_agent_objects(self, context, filters=None): filters = filters or {} return agent_obj.Agent.get_objects(context, **filters) @db_api.retry_if_session_inactive() def get_agents(self, context, filters=None, fields=None): filters = filters or {} alive = filters and filters.pop('alive', None) agents = agent_obj.Agent.get_objects(context, **filters) if alive: alive = converters.convert_to_boolean(alive[0]) agents = [agent for agent in agents if agent.is_active == alive] return [self._make_agent_dict(agent, fields=fields) for agent in agents] @db_api.retry_db_errors def agent_health_check(self): """Scan agents and log if some are considered dead.""" agents = self.get_agents(context.get_admin_context(), filters={'admin_state_up': [True]}) dead_agents = [agent for agent in agents if not agent['alive']] if dead_agents: data = '%20s %20s %s\n' % ('Type', 'Last heartbeat', "host") data += '\n'.join(['%20s %20s %s' % (agent['agent_type'], agent['heartbeat_timestamp'], agent['host']) for agent in dead_agents]) LOG.warning("Agent healthcheck: found %(count)s dead agents " "out of %(total)s:\n%(data)s", {'count': len(dead_agents), 'total': len(agents), 'data': data}) else: LOG.debug("Agent healthcheck: found %s active agents", len(agents)) def _get_agent_by_type_and_host(self, context, agent_type, host): agent_objs = agent_obj.Agent.get_objects(context, agent_type=agent_type, host=host) if not agent_objs: raise agent_exc.AgentNotFoundByTypeHost(agent_type=agent_type, host=host) if len(agent_objs) > 1: raise agent_exc.MultipleAgentFoundByTypeHost( agent_type=agent_type, host=host) return agent_objs[0] @db_api.retry_if_session_inactive() def get_agent(self, context, id, fields=None): agent = self._get_agent(context, id) return self._make_agent_dict(agent, fields) @db_api.retry_if_session_inactive() def filter_hosts_with_network_access( self, context, network_id, candidate_hosts): """Filter hosts with access to network_id. This method returns a subset of candidate_hosts with the ones with network access to network_id. A plugin can overload this method to define its own host network_id based filter. """ return candidate_hosts def _log_heartbeat(self, state, agent_db, agent_conf, agent_timestamp): if agent_conf.get('log_agent_heartbeats'): delta = timeutils.utcnow() - agent_db.heartbeat_timestamp LOG.info("Heartbeat received from %(type)s agent on " "host %(host)s, uuid %(uuid)s after %(delta)s, sent at " "%(agent_timestamp)s", {'type': agent_db.agent_type, 'host': agent_db.host, 'uuid': state.get('uuid'), 'delta': delta, 'agent_timestamp': agent_timestamp}) @db_api.retry_if_session_inactive() def create_or_update_agent(self, context, agent_state, agent_timestamp=None): """Registers new agent in the database or updates existing. Returns tuple of agent status and state. Status is from server point of view: alive, new or revived. It could be used by agent to do some sync with the server if needed. """ status = agent_consts.AGENT_ALIVE with db_api.CONTEXT_WRITER.using(context): res_keys = ['agent_type', 'binary', 'host', 'topic'] res = dict((k, agent_state[k]) for k in res_keys) if 'availability_zone' in agent_state: res['availability_zone'] = agent_state['availability_zone'] configurations_dict = agent_state.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) resource_versions_dict = agent_state.get('resource_versions') if resource_versions_dict: res['resource_versions'] = jsonutils.dumps( resource_versions_dict) res['load'] = self._get_agent_load(agent_state) current_time = timeutils.utcnow() try: agent = self._get_agent_by_type_and_host( context, agent_state['agent_type'], agent_state['host']) agent_state_orig = copy.deepcopy(agent_state) if not agent.is_active: status = agent_consts.AGENT_REVIVED if 'resource_versions' not in agent_state: # updating agent_state with resource_versions taken # from db so that # _update_local_agent_resource_versions() will call # version_manager and bring it up to date agent_state['resource_versions'] = self._get_dict( agent, 'resource_versions', ignore_missing=True) res['heartbeat_timestamp'] = current_time if agent_state.get('start_flag'): res['started_at'] = current_time greenthread.sleep(0) self._log_heartbeat(agent_state, agent, configurations_dict, agent_timestamp) agent.update_fields(res) agent.update() event_type = events.AFTER_UPDATE except agent_exc.AgentNotFoundByTypeHost: agent_state_orig = None greenthread.sleep(0) res['created_at'] = current_time res['started_at'] = current_time res['heartbeat_timestamp'] = current_time res['admin_state_up'] = cfg.CONF.enable_new_agents agent = agent_obj.Agent(context=context, **res) greenthread.sleep(0) agent.create() event_type = events.AFTER_CREATE self._log_heartbeat(agent_state, agent, configurations_dict, agent_timestamp) status = agent_consts.AGENT_NEW greenthread.sleep(0) agent_state['agent_status'] = status agent_state['admin_state_up'] = agent.admin_state_up registry.publish(resources.AGENT, event_type, self, payload=events.DBEventPayload( context=context, metadata={ 'host': agent_state['host'], 'plugin': self, 'status': status }, states=(agent_state_orig, ), desired_state=agent_state, resource_id=agent.id )) return status, agent_state def _get_agents_considered_for_versions(self): up_agents = self.get_agents(context.get_admin_context(), filters={'admin_state_up': [True]}) return filter(self.is_agent_considered_for_versions, up_agents) def get_agents_resource_versions(self, tracker): """Get the known agent resource versions and update the tracker. This function looks up into the database and updates every agent resource versions. This method is called from version_manager when the cached information has passed TTL. :param tracker: receives a version_manager.ResourceConsumerTracker """ for agent in self._get_agents_considered_for_versions(): resource_versions = agent.get('resource_versions', {}) consumer = version_manager.AgentConsumer( agent_type=agent['agent_type'], host=agent['host']) tracker.set_versions(consumer, resource_versions) class AgentExtRpcCallback(object): """Processes the rpc report in plugin implementations. This class implements the server side of an rpc interface. The client side can be found in neutron.agent.rpc.PluginReportStateAPI. For more information on changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst. API version history: 1.0 - Initial version. 1.1 - report_state now returns agent state. 1.2 - add method has_alive_neutron_server. """ target = oslo_messaging.Target(version='1.2', namespace=constants.RPC_NAMESPACE_STATE) START_TIME = timeutils.utcnow() def __init__(self, plugin=None): super(AgentExtRpcCallback, self).__init__() self.plugin = plugin # TODO(ajo): fix the resources circular dependency issue by dynamically # registering object types in the RPC callbacks api resources_rpc = importutils.import_module( 'neutron.api.rpc.handlers.resources_rpc') # Initialize RPC api directed to other neutron-servers self.server_versions_rpc = resources_rpc.ResourcesPushToServersRpcApi() def has_alive_neutron_server(self, context, **kwargs): return True @db_api.retry_if_session_inactive() def report_state(self, context, **kwargs): """Report state from agent to server. Returns - agent's status: AGENT_NEW, AGENT_REVIVED, AGENT_ALIVE """ time = kwargs['time'] time = timeutils.parse_strtime(time) agent_state = kwargs['agent_state']['agent_state'] self._check_clock_sync_on_agent_start(agent_state, time) if self.START_TIME > time: time_agent = datetime.datetime.isoformat(time) time_server = datetime.datetime.isoformat(self.START_TIME) log_dict = {'agent_time': time_agent, 'server_time': time_server} LOG.debug("Stale message received with timestamp: %(agent_time)s. " "Skipping processing because it's older than the " "server start timestamp: %(server_time)s", log_dict) return if not self.plugin: self.plugin = directory.get_plugin() agent_status, agent_state = self.plugin.create_or_update_agent( context, agent_state, time) self._update_local_agent_resource_versions(context, agent_state) return agent_status def _update_local_agent_resource_versions(self, context, agent_state): resource_versions_dict = agent_state.get('resource_versions') if not resource_versions_dict: return version_manager.update_versions( version_manager.AgentConsumer(agent_type=agent_state['agent_type'], host=agent_state['host']), resource_versions_dict) # report other neutron-servers about this quickly self.server_versions_rpc.report_agent_resource_versions( context, agent_state['agent_type'], agent_state['host'], resource_versions_dict) def _check_clock_sync_on_agent_start(self, agent_state, agent_time): """Checks if the server and the agent times are in sync. Method checks if the agent time is in sync with the server time on start up. Ignores it, on subsequent re-connects. """ if agent_state.get('start_flag'): time_server_now = timeutils.utcnow() diff = abs(timeutils.delta_seconds(time_server_now, agent_time)) if diff > cfg.CONF.agent_down_time: agent_name = agent_state['agent_type'] time_agent = datetime.datetime.isoformat(agent_time) host = agent_state['host'] log_dict = {'host': host, 'agent_name': agent_name, 'agent_time': time_agent, 'threshold': cfg.CONF.agent_down_time, 'serv_time': (datetime.datetime.isoformat (time_server_now)), 'diff': diff} LOG.error("Message received from the host: %(host)s " "during the registration of %(agent_name)s has " "a timestamp: %(agent_time)s. This differs from " "the current server timestamp: %(serv_time)s by " "%(diff)s seconds, which is more than the " "threshold agent down " "time: %(threshold)s.", log_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/agentschedulers_db.py0000644000175000017500000005554000000000000023432 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random import time from neutron_lib import constants from neutron_lib import context as ncontext from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import agent as agent_exc from neutron_lib.exceptions import dhcpagentscheduler as das_exc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import timeutils from sqlalchemy.orm import exc from neutron.agent.common import utils as agent_utils from neutron.common import utils from neutron.conf.agent.database import agentschedulers_db from neutron.db import agents_db from neutron.db.availability_zone import network as network_az from neutron.extensions import dhcpagentscheduler from neutron.objects import network from neutron import worker as neutron_worker LOG = logging.getLogger(__name__) agentschedulers_db.register_db_agentschedulers_opts() class AgentSchedulerDbMixin(agents_db.AgentDbMixin): """Common class for agent scheduler mixins.""" # agent notifiers to handle agent update operations; # should be updated by plugins; agent_notifiers = { constants.AGENT_TYPE_DHCP: None, constants.AGENT_TYPE_L3: None, } @staticmethod def is_eligible_agent(active, agent): if active is None: # filtering by activeness is disabled, all agents are eligible return True else: # note(rpodolyaka): original behaviour is saved here: if active # filter is set, only agents which are 'up' # (i.e. have a recent heartbeat timestamp) # are eligible, even if active is False if agent_utils.is_agent_down(agent['heartbeat_timestamp']): LOG.warning('Agent %(agent)s is down. Type: %(type)s, host: ' '%(host)s, heartbeat: %(heartbeat)s', {'agent': agent['id'], 'type': agent['agent_type'], 'host': agent['host'], 'heartbeat': agent['heartbeat_timestamp']}) return False return True def update_agent(self, context, id, agent): original_agent = self.get_agent(context, id) result = super(AgentSchedulerDbMixin, self).update_agent( context, id, agent) agent_data = agent['agent'] agent_notifier = self.agent_notifiers.get(original_agent['agent_type']) if (agent_notifier and 'admin_state_up' in agent_data and original_agent['admin_state_up'] != agent_data['admin_state_up']): agent_notifier.agent_updated(context, agent_data['admin_state_up'], original_agent['host']) return result def add_agent_status_check_worker(self, function): # TODO(enikanorov): make interval configurable rather than computed interval = max(cfg.CONF.agent_down_time // 2, 1) # add random initial delay to allow agents to check in after the # neutron server first starts. random to offset multiple servers initial_delay = random.randint(interval, interval * 2) check_worker = neutron_worker.PeriodicWorker(function, interval, initial_delay) self.add_worker(check_worker) def agent_dead_limit_seconds(self): return cfg.CONF.agent_down_time * 2 def wait_down_agents(self, agent_type, agent_dead_limit): """Gives chance for agents to send a heartbeat.""" # check for an abrupt clock change since last check. if a change is # detected, sleep for a while to let the agents check in. tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary', timeutils.utcnow()) if tdelta.total_seconds() > cfg.CONF.agent_down_time: LOG.warning("Time since last %s agent reschedule check has " "exceeded the interval between checks. Waiting " "before check to allow agents to send a heartbeat " "in case there was a clock adjustment.", agent_type) time.sleep(agent_dead_limit) self._clock_jump_canary = timeutils.utcnow() def get_cutoff_time(self, agent_dead_limit): cutoff = timeutils.utcnow() - datetime.timedelta( seconds=agent_dead_limit) return cutoff def reschedule_resources_from_down_agents(self, agent_type, get_down_bindings, agent_id_attr, resource_id_attr, resource_name, reschedule_resource, rescheduling_failed): """Reschedule resources from down neutron agents if admin state is up. """ agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents(agent_type, agent_dead_limit) context = ncontext.get_admin_context() try: down_bindings = get_down_bindings(context, agent_dead_limit) agents_back_online = set() for binding in down_bindings: binding_agent_id = getattr(binding, agent_id_attr) binding_resource_id = getattr(binding, resource_id_attr) if binding_agent_id in agents_back_online: continue else: # we need new context to make sure we use different DB # transaction - otherwise we may fetch same agent record # each time due to REPEATABLE_READ isolation level context = ncontext.get_admin_context() agent = self._get_agent(context, binding_agent_id) if agent.is_active: agents_back_online.add(binding_agent_id) continue LOG.warning( "Rescheduling %(resource_name)s %(resource)s from agent " "%(agent)s because the agent did not report to the server " "in the last %(dead_time)s seconds.", {'resource_name': resource_name, 'resource': binding_resource_id, 'agent': binding_agent_id, 'dead_time': agent_dead_limit}) try: reschedule_resource(context, binding_resource_id) except (rescheduling_failed, oslo_messaging.RemoteError): # Catch individual rescheduling errors here # so one broken one doesn't stop the iteration. LOG.exception("Failed to reschedule %(resource_name)s " "%(resource)s", {'resource_name': resource_name, 'resource': binding_resource_id}) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception("Exception encountered during %(resource_name)s " "rescheduling.", {'resource_name': resource_name}) class DhcpAgentSchedulerDbMixin(dhcpagentscheduler .DhcpAgentSchedulerPluginBase, AgentSchedulerDbMixin): """Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2. """ network_scheduler = None def add_periodic_dhcp_agent_status_check(self): if not cfg.CONF.allow_automatic_dhcp_failover: LOG.info("Skipping periodic DHCP agent status check because " "automatic network rescheduling is disabled.") return self.add_agent_status_check_worker( self.remove_networks_from_down_agents ) def is_eligible_agent(self, context, active, agent): # eligible agent is active or starting up return (AgentSchedulerDbMixin.is_eligible_agent(active, agent) or self.agent_starting_up(context, agent)) def agent_starting_up(self, context, agent): """Check if agent was just started. Method returns True if agent is in its 'starting up' period. Return value depends on amount of networks assigned to the agent. It doesn't look at latest heartbeat timestamp as it is assumed that this method is called for agents that are considered dead. """ agent_dead_limit = datetime.timedelta( seconds=self.agent_dead_limit_seconds()) network_count = network.NetworkDhcpAgentBinding.count( context, dhcp_agent_id=agent['id']) # amount of networks assigned to agent affect amount of time we give # it so startup. Tests show that it's more or less sage to assume # that DHCP agent processes each network in less than 2 seconds. # So, give it this additional time for each of the networks. additional_time = datetime.timedelta(seconds=2 * network_count) LOG.debug("Checking if agent starts up and giving it additional %s", additional_time) agent_expected_up = (agent['started_at'] + agent_dead_limit + additional_time) return agent_expected_up > timeutils.utcnow() def _schedule_network(self, context, network_id, dhcp_notifier): LOG.info("Scheduling unhosted network %s", network_id) try: # TODO(enikanorov): have to issue redundant db query # to satisfy scheduling interface network = self.get_network(context, network_id) agents = self.schedule_network(context, network) if not agents: LOG.info("Failed to schedule network %s, " "no eligible agents or it might be " "already scheduled by another server", network_id) return if not dhcp_notifier: return for agent in agents: LOG.info("Adding network %(net)s to agent " "%(agent)s on host %(host)s", {'net': network_id, 'agent': agent.id, 'host': agent.host}) dhcp_notifier.network_added_to_agent( context, network_id, agent.host) except Exception: # catching any exception during scheduling # so if _schedule_network is invoked in the loop it could # continue in any case LOG.exception("Failed to schedule network %s", network_id) def _filter_bindings(self, context, bindings): """Skip bindings for which the agent is dead, but starting up.""" # to save few db calls: store already checked agents in dict # id -> is_agent_starting_up checked_agents = {} for binding in bindings: try: agent_id = binding.db_obj.dhcp_agent['id'] if agent_id not in checked_agents: if self.agent_starting_up(context, binding.db_obj.dhcp_agent): # When agent starts and it has many networks to process # it may fail to send state reports in defined interval # The server will consider it dead and try to remove # networks from it. checked_agents[agent_id] = True LOG.debug("Agent %s is starting up, skipping", agent_id) else: checked_agents[agent_id] = False if not checked_agents[agent_id]: yield binding except exc.ObjectDeletedError: # we're not within a transaction, so object can be lost # because underlying row is removed, just ignore this issue LOG.debug("binding was removed concurrently, skipping it") def remove_networks_from_down_agents(self): """Remove networks from down DHCP agents if admin state is up. Reschedule them if configured so. """ agent_dead_limit = self.agent_dead_limit_seconds() self.wait_down_agents('DHCP', agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit) context = ncontext.get_admin_context() try: down_bindings = network.NetworkDhcpAgentBinding.get_down_bindings( context, cutoff) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) dead_bindings = [b for b in self._filter_bindings(context, down_bindings)] agents = self.get_agent_objects( context, {'agent_type': [constants.AGENT_TYPE_DHCP]}) if not agents: # No agents configured so nothing to do. return active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)] if not active_agents: LOG.warning("No DHCP agents available, " "skipping rescheduling") return for binding in dead_bindings: LOG.warning("Removing network %(network)s from agent " "%(agent)s because the agent did not report " "to the server in the last %(dead_time)s " "seconds.", {'network': binding.network_id, 'agent': binding.dhcp_agent_id, 'dead_time': agent_dead_limit}) # save binding object to avoid ObjectDeletedError # in case binding is concurrently deleted from the DB saved_binding = {'net': binding.network_id, 'agent': binding.dhcp_agent_id} try: # do not notify agent if it considered dead # so when it is restarted it won't see network delete # notifications on its queue self.remove_network_from_dhcp_agent(context, binding.dhcp_agent_id, binding.network_id, notify=False) except das_exc.NetworkNotHostedByDhcpAgent: # measures against concurrent operation LOG.debug("Network %(net)s already removed from DHCP " "agent %(agent)s", saved_binding) # still continue and allow concurrent scheduling attempt except Exception: LOG.exception("Unexpected exception occurred while " "removing network %(net)s from agent " "%(agent)s", saved_binding) if cfg.CONF.network_auto_schedule: self._schedule_network( context, saved_binding['net'], dhcp_notifier) except Exception: # we want to be thorough and catch whatever is raised # to avoid loop abortion LOG.exception("Exception encountered during network " "rescheduling") def get_dhcp_agents_hosting_networks( self, context, network_ids, active=None, admin_state_up=None, hosts=None): if not network_ids: return [] # get all the NDAB objects, which will also fetch (from DB) # the related dhcp_agent objects because of the synthetic field bindings = network.NetworkDhcpAgentBinding.get_objects( context, network_id=network_ids) # get the already fetched dhcp_agent objects agent_objs = [binding.db_obj.dhcp_agent for binding in bindings] # filter the dhcp_agent objects on admin_state_up if admin_state_up is not None: agent_objs = [agent for agent in agent_objs if agent.admin_state_up == admin_state_up] # filter the dhcp_agent objects on hosts if hosts: agent_objs = [agent for agent in agent_objs if agent.host in hosts] # finally filter if the agents are eligible return [agent for agent in agent_objs if self.is_eligible_agent(context, active, agent)] def add_network_to_dhcp_agent(self, context, id, network_id): self._get_network(context, network_id) with db_api.CONTEXT_WRITER.using(context): agent_db = self._get_agent(context, id) if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or not services_available(agent_db['admin_state_up'])): raise das_exc.InvalidDHCPAgent(id=id) dhcp_agents = self.get_dhcp_agents_hosting_networks( context, [network_id]) for dhcp_agent in dhcp_agents: if id == dhcp_agent.id: raise das_exc.NetworkHostedByDHCPAgent( network_id=network_id, agent_id=id) self.network_scheduler.resource_filter.bind( context, [agent_db], network_id, force_scheduling=True) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) if dhcp_notifier: dhcp_notifier.network_added_to_agent( context, network_id, agent_db.host) def remove_network_from_dhcp_agent(self, context, id, network_id, notify=True): agent = self._get_agent(context, id) binding_obj = network.NetworkDhcpAgentBinding.get_object( context, network_id=network_id, dhcp_agent_id=id) if not binding_obj: raise das_exc.NetworkNotHostedByDhcpAgent( network_id=network_id, agent_id=id) # reserve the port, so the ip is reused on a subsequent add device_id = utils.get_dhcp_agent_device_id(network_id, agent['host']) filters = dict(device_id=[device_id]) ports = self.get_ports(context, filters=filters) # NOTE(kevinbenton): there should only ever be one port per # DHCP agent per network so we don't have to worry about one # update_port passing and another failing for port in ports: port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT try: self.update_port(context, port['id'], dict(port=port)) except n_exc.PortNotFound: LOG.debug("DHCP port %s has been deleted concurrently", port['id']) binding_obj.delete() if not notify: return dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) if dhcp_notifier: dhcp_notifier.network_removed_from_agent( context, network_id, agent.host) def list_networks_on_dhcp_agent(self, context, id): objs = network.NetworkDhcpAgentBinding.get_objects(context, dhcp_agent_id=id) net_ids = [item.network_id for item in objs] if net_ids: return {'networks': self.get_networks(context, filters={'id': net_ids})} else: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, id) return {'networks': []} def list_active_networks_on_active_dhcp_agent(self, context, host): try: agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_DHCP, host) except agent_exc.AgentNotFoundByTypeHost: LOG.debug("DHCP Agent not found on host %s", host) return [] if not services_available(agent.admin_state_up): return [] query = network.NetworkDhcpAgentBinding.get_objects( context, dhcp_agent_id=agent.id) net_ids = [item.network_id for item in query] if net_ids: return self.get_networks( context, filters={'id': net_ids, 'admin_state_up': [True]} ) else: return [] def list_dhcp_agents_hosting_network(self, context, network_id): dhcp_agents = self.get_dhcp_agents_hosting_networks( context, [network_id]) agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents] if agent_ids: return { 'agents': self.get_agents(context, filters={'id': agent_ids})} else: return {'agents': []} def schedule_network(self, context, created_network): if self.network_scheduler and cfg.CONF.network_auto_schedule: return self.network_scheduler.schedule( self, context, created_network) def auto_schedule_networks(self, context, host): if self.network_scheduler: self.network_scheduler.auto_schedule_networks(self, context, host) class AZDhcpAgentSchedulerDbMixin(DhcpAgentSchedulerDbMixin, network_az.NetworkAvailabilityZoneMixin): """Mixin class to add availability_zone supported DHCP agent scheduler.""" def get_network_availability_zones(self, network): zones = {agent.availability_zone for agent in network.dhcp_agents} return list(zones) # helper functions for readability. def services_available(admin_state_up): if cfg.CONF.enable_services_on_agents_with_admin_state_down: # Services are available regardless admin_state_up return True return admin_state_up def get_admin_state_up_filter(): if cfg.CONF.enable_services_on_agents_with_admin_state_down: # Avoid filtering on admin_state_up at all return None # Filters on admin_state_up is True return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2550442 neutron-16.0.0.0b2.dev214/neutron/db/allowed_address_pairs/0000755000175000017500000000000000000000000023554 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/allowed_address_pairs/__init__.py0000644000175000017500000000000000000000000025653 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/allowedaddresspairs_db.py0000644000175000017500000001465200000000000024305 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import port as port_def from neutron_lib.api import validators from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.objects import exceptions from neutron_lib.utils import net as net_utils from neutron.objects.port.extensions import (allowedaddresspairs as obj_addr_pair) @resource_extend.has_resource_extenders class AllowedAddressPairsMixin(object): """Mixin class for allowed address pairs.""" def _process_create_allowed_address_pairs(self, context, port, allowed_address_pairs): if not validators.is_attr_set(allowed_address_pairs): return [] try: with db_api.CONTEXT_WRITER.using(context): for address_pair in allowed_address_pairs: # use port.mac_address if no mac address in address pair if 'mac_address' not in address_pair: address_pair['mac_address'] = port['mac_address'] # retain string format as passed through API mac_address = net_utils.AuthenticEUI( address_pair['mac_address']) ip_address = net_utils.AuthenticIPNetwork( address_pair['ip_address']) pair_obj = obj_addr_pair.AllowedAddressPair( context, port_id=port['id'], mac_address=mac_address, ip_address=ip_address) pair_obj.create() except exceptions.NeutronDbObjectDuplicateEntry: raise addr_exc.DuplicateAddressPairInRequest( mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) return allowed_address_pairs def get_allowed_address_pairs(self, context, port_id): pairs = obj_addr_pair.AllowedAddressPair.get_objects( context, port_id=port_id) return [self._make_allowed_address_pairs_dict(pair.db_obj) for pair in pairs] @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_allowed_address_pairs(port_res, port_db): # If port_db is provided, allowed address pairs will be accessed via # sqlalchemy models. As they're loaded together with ports this # will not cause an extra query. allowed_address_pairs = [ AllowedAddressPairsMixin._make_allowed_address_pairs_dict( address_pair) for address_pair in port_db.allowed_address_pairs] port_res[addr_apidef.ADDRESS_PAIRS] = allowed_address_pairs return port_res def _delete_allowed_address_pairs(self, context, id): obj_addr_pair.AllowedAddressPair.delete_objects( context, port_id=id) @staticmethod def _make_allowed_address_pairs_dict(allowed_address_pairs, fields=None): res = {'mac_address': allowed_address_pairs['mac_address'], 'ip_address': allowed_address_pairs['ip_address']} return db_utils.resource_fields(res, fields) def _has_address_pairs(self, port): return (validators.is_attr_set( port['port'][addr_apidef.ADDRESS_PAIRS]) and port['port'][addr_apidef.ADDRESS_PAIRS] != []) def _check_update_has_allowed_address_pairs(self, port): """Determine if request has an allowed address pair. Return True if the port parameter has a non-empty 'allowed_address_pairs' attribute. Otherwise returns False. """ return (addr_apidef.ADDRESS_PAIRS in port['port'] and self._has_address_pairs(port)) def _check_update_deletes_allowed_address_pairs(self, port): """Determine if request deletes address pair. Return True if port has an allowed address pair and its value is either [] or not is_attr_set, otherwise return False """ return (addr_apidef.ADDRESS_PAIRS in port['port'] and not self._has_address_pairs(port)) def is_address_pairs_attribute_updated(self, port, update_attrs): """Check if the address pairs attribute is being updated. Returns True if there is an update. This can be used to decide if a port update notification should be sent to agents or third party controllers. """ new_pairs = update_attrs.get(addr_apidef.ADDRESS_PAIRS) if new_pairs is None: return False old_pairs = port.get(addr_apidef.ADDRESS_PAIRS) # Missing or unchanged address pairs in attributes mean no update return new_pairs != old_pairs def update_address_pairs_on_port(self, context, port_id, port, original_port, updated_port): """Update allowed address pairs on port. Returns True if an update notification is required. Notification is not done here because other changes on the port may need notification. This method is expected to be called within a transaction. """ new_pairs = port['port'].get(addr_apidef.ADDRESS_PAIRS) if self.is_address_pairs_attribute_updated(original_port, port['port']): updated_port[addr_apidef.ADDRESS_PAIRS] = new_pairs self._delete_allowed_address_pairs(context, port_id) self._process_create_allowed_address_pairs( context, updated_port, new_pairs) return True return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2550442 neutron-16.0.0.0b2.dev214/neutron/db/availability_zone/0000755000175000017500000000000000000000000022727 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/availability_zone/__init__.py0000644000175000017500000000000000000000000025026 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/availability_zone/network.py0000644000175000017500000000273600000000000025002 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import network as net_def from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.db import resource_extend from neutron_lib.plugins import directory from neutron.extensions import network_availability_zone as net_az @resource_extend.has_resource_extenders class NetworkAvailabilityZoneMixin(net_az.NetworkAvailabilityZonePluginBase): """Mixin class to enable network's availability zone attributes.""" @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_availability_zone(net_res, net_db): net_res[az_def.AZ_HINTS] = az_validator.convert_az_string_to_list( net_db[az_def.AZ_HINTS]) plugin = directory.get_plugin() net_res[az_def.COLLECTION_NAME] = ( plugin.get_network_availability_zones(net_db)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/availability_zone/router.py0000644000175000017500000000416000000000000024622 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api import extensions from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import resource_extend from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.db import l3_attrs_db @resource_extend.has_resource_extenders @registry.has_registry_receivers class RouterAvailabilityZoneMixin(l3_attrs_db.ExtraAttributesMixin): """Mixin class to enable router's availability zone attributes.""" @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _add_az_to_response(router_res, router_db): l3_plugin = directory.get_plugin(constants.L3) if not extensions.is_extension_supported( l3_plugin, 'router_availability_zone'): return router_res['availability_zones'] = ( l3_plugin.get_router_availability_zones(router_db)) @registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE]) def _process_az_request(self, resource, event, trigger, context, router, router_db, **kwargs): if az_def.AZ_HINTS in router: self.validate_availability_zones(context, 'router', router[az_def.AZ_HINTS]) self.set_extra_attr_value(context, router_db, az_def.AZ_HINTS, router[az_def.AZ_HINTS]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/data_plane_status_db.py0000644000175000017500000000406500000000000023741 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import data_plane_status as dps_lib from neutron.objects.port.extensions import data_plane_status as dps_obj class DataPlaneStatusMixin(object): """Mixin class to add data plane status to a port""" def _process_create_port_data_plane_status(self, context, data, res): obj = dps_obj.PortDataPlaneStatus( context, port_id=res['id'], data_plane_status=data[dps_lib.DATA_PLANE_STATUS]) obj.create() res[dps_lib.DATA_PLANE_STATUS] = data[dps_lib.DATA_PLANE_STATUS] def _process_update_port_data_plane_status(self, context, data, res): if dps_lib.DATA_PLANE_STATUS not in data: return obj = dps_obj.PortDataPlaneStatus.get_object(context, port_id=res['id']) if obj: obj.data_plane_status = data[dps_lib.DATA_PLANE_STATUS] obj.update() res[dps_lib.DATA_PLANE_STATUS] = data[dps_lib.DATA_PLANE_STATUS] else: self._process_create_port_data_plane_status(context, data, res) @staticmethod def _extend_port_data_plane_status(port_res, port_db): port_res[dps_lib.DATA_PLANE_STATUS] = None if port_db.get(dps_lib.DATA_PLANE_STATUS): port_res[dps_lib.DATA_PLANE_STATUS] = ( port_db[dps_lib.DATA_PLANE_STATUS].data_plane_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/db_base_plugin_common.py0000644000175000017500000003575100000000000024114 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr import six from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnetpool as subnetpool_def from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions from neutron_lib.utils import net from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.db import models_v2 from neutron.objects import base as base_obj from neutron.objects import ports as port_obj from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj LOG = logging.getLogger(__name__) def convert_result_to_dict(f): @functools.wraps(f) def inner(*args, **kwargs): result = f(*args, **kwargs) if result is None: return None elif isinstance(result, list): return [r.to_dict() for r in result] else: return result.to_dict() return inner def filter_fields(f): @functools.wraps(f) def inner_filter(*args, **kwargs): result = f(*args, **kwargs) fields = kwargs.get('fields') if not fields: try: pos = f.__code__.co_varnames.index('fields') fields = args[pos] except (IndexError, ValueError): return result do_filter = lambda d: {k: v for k, v in d.items() if k in fields} if isinstance(result, list): return [do_filter(obj) for obj in result] else: return do_filter(result) return inner_filter def make_result_with_fields(f): @functools.wraps(f) def inner(*args, **kwargs): fields = kwargs.get('fields') result = f(*args, **kwargs) if fields is None: return result elif isinstance(result, list): return [db_utils.resource_fields(r, fields) for r in result] else: return db_utils.resource_fields(result, fields) return inner class DbBasePluginCommon(object): """Stores getters and helper methods for db_base_plugin_v2 All private getters and simple helpers like _make_*_dict were moved from db_base_plugin_v2. More complicated logic and public methods left in db_base_plugin_v2. Main purpose of this class is to make getters accessible for Ipam backends. """ @staticmethod def _generate_macs(mac_count=1): mac_maker = net.random_mac_generator(cfg.CONF.base_mac.split(':')) return [six.next(mac_maker) for x in range(mac_count)] @db_api.CONTEXT_READER def _is_mac_in_use(self, context, network_id, mac_address): return port_obj.Port.objects_exist(context, network_id=network_id, mac_address=mac_address) @staticmethod def _delete_ip_allocation(context, network_id, subnet_id, ip_address): # Delete the IP address from the IPAllocate table LOG.debug("Delete allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id}) port_obj.IPAllocation.delete_objects( context, network_id=network_id, ip_address=ip_address, subnet_id=subnet_id) @staticmethod @db_api.CONTEXT_WRITER def _store_ip_allocation(context, ip_address, network_id, subnet_id, port_id): LOG.debug("Allocated IP %(ip_address)s " "(%(network_id)s/%(subnet_id)s/%(port_id)s)", {'ip_address': ip_address, 'network_id': network_id, 'subnet_id': subnet_id, 'port_id': port_id}) allocated = port_obj.IPAllocation( context, network_id=network_id, port_id=port_id, ip_address=ip_address, subnet_id=subnet_id) # NOTE(lujinluo): Add IPAllocations obj to the port fixed_ips # in Port OVO integration, i.e. the same way we did in # Ib32509d974c8654131112234bcf19d6eae8f7cca allocated.create() def _make_subnet_dict(self, subnet, fields=None, context=None): res = {'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], } res['gateway_ip'] = str( subnet['gateway_ip']) if subnet['gateway_ip'] else None # TODO(korzen) this method can get subnet as DB object or Subnet OVO, # so temporary workaround will be to fill in the fields in separate # ways. After converting all code pieces to use Subnet OVO, the latter # 'else' can be deleted if isinstance(subnet, subnet_obj.Subnet): res['cidr'] = str(subnet.cidr) res['allocation_pools'] = [{'start': str(pool.start), 'end': str(pool.end)} for pool in subnet.allocation_pools] res['host_routes'] = [{'destination': str(route.destination), 'nexthop': str(route.nexthop)} for route in subnet.host_routes] res['dns_nameservers'] = [str(dns.address) for dns in subnet.dns_nameservers] res['shared'] = subnet.shared # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet.db_obj) else: res['cidr'] = subnet['cidr'] res['allocation_pools'] = [{'start': pool['first_ip'], 'end': pool['last_ip']} for pool in subnet['allocation_pools']] res['host_routes'] = [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in subnet['routes']] res['dns_nameservers'] = [dns['address'] for dns in subnet['dns_nameservers']] # The shared attribute for a subnet is the same # as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) return db_utils.resource_fields(res, fields) def _make_subnetpool_dict(self, subnetpool, fields=None): default_prefixlen = str(subnetpool['default_prefixlen']) min_prefixlen = str(subnetpool['min_prefixlen']) max_prefixlen = str(subnetpool['max_prefixlen']) res = {'id': subnetpool['id'], 'name': subnetpool['name'], 'tenant_id': subnetpool['tenant_id'], 'default_prefixlen': default_prefixlen, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'is_default': subnetpool['is_default'], 'shared': subnetpool['shared'], 'prefixes': [str(prefix.cidr) for prefix in subnetpool['prefixes']], 'ip_version': subnetpool['ip_version'], 'default_quota': subnetpool['default_quota'], 'address_scope_id': subnetpool['address_scope_id']} resource_extend.apply_funcs( subnetpool_def.COLLECTION_NAME, res, subnetpool.db_obj) return db_utils.resource_fields(res, fields) def _make_port_dict(self, port, fields=None, process_extensions=True, with_fixed_ips=True): mac = port["mac_address"] if isinstance(mac, netaddr.EUI): mac.dialect = netaddr.mac_unix_expanded res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": str(mac), "admin_state_up": port["admin_state_up"], "status": port["status"], "device_id": port["device_id"], "device_owner": port["device_owner"]} if with_fixed_ips: res["fixed_ips"] = [ {'subnet_id': ip["subnet_id"], 'ip_address': str( ip["ip_address"])} for ip in port["fixed_ips"]] # Call auxiliary extend functions, if any if process_extensions: port_data = port if isinstance(port, port_obj.Port): port_data = port.db_obj resource_extend.apply_funcs( port_def.COLLECTION_NAME, res, port_data) return db_utils.resource_fields(res, fields) def _get_network(self, context, id): try: network = model_query.get_by_id(context, models_v2.Network, id) except exc.NoResultFound: raise exceptions.NetworkNotFound(net_id=id) return network def _get_subnet(self, context, id): # TODO(slaweq): remove this method when all will be switched to use OVO # objects only try: subnet = model_query.get_by_id(context, models_v2.Subnet, id) except exc.NoResultFound: raise exceptions.SubnetNotFound(subnet_id=id) return subnet def _get_subnet_object(self, context, id): subnet = subnet_obj.Subnet.get_object(context, id=id) if not subnet: raise exceptions.SubnetNotFound(subnet_id=id) return subnet def _get_subnetpool(self, context, id): subnetpool = subnetpool_obj.SubnetPool.get_object( context, id=id) if not subnetpool: raise exceptions.SubnetPoolNotFound(subnetpool_id=id) return subnetpool def _get_port(self, context, id): try: port = model_query.get_by_id(context, models_v2.Port, id) except exc.NoResultFound: raise exceptions.PortNotFound(port_id=id) return port def _get_route_by_subnet(self, context, subnet_id): return subnet_obj.Route.get_objects(context, subnet_id=subnet_id) def _get_router_gw_ports_by_network(self, context, network_id): return port_obj.Port.get_objects( context, network_id=network_id, device_owner=constants.DEVICE_OWNER_ROUTER_GW) @db_api.CONTEXT_READER def _get_subnets_by_network(self, context, network_id): return subnet_obj.Subnet.get_objects(context, network_id=network_id) @db_api.CONTEXT_READER def _get_subnets_by_subnetpool(self, context, subnetpool_id): return subnet_obj.Subnet.get_objects(context, subnetpool_id=subnetpool_id) def _get_subnets(self, context, filters=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} # turn the CIDRs into a proper subnets if filters.get('cidr'): filters.update( {'cidr': [netaddr.IPNetwork(x).cidr for x in filters['cidr']]}) return subnet_obj.Subnet.get_objects(context, _pager=pager, validate_filters=False, **filters) def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): res = {'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']]} res['shared'] = self._is_network_shared(context, network.rbac_entries) # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(net_def.COLLECTION_NAME, res, network) return db_utils.resource_fields(res, fields) def _is_network_shared(self, context, rbac_entries): # The shared attribute for a network now reflects if the network # is shared to the calling tenant via an RBAC entry. matches = ('*',) + ((context.tenant_id,) if context else ()) for entry in rbac_entries: if (entry.action == 'access_as_shared' and entry.target_tenant in matches): return True return False def _make_subnet_args(self, detail, subnet, subnetpool_id): args = {'project_id': detail.tenant_id, 'id': detail.subnet_id, 'name': subnet['name'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'cidr': detail.subnet_cidr, 'subnetpool_id': subnetpool_id, 'enable_dhcp': subnet['enable_dhcp'], 'gateway_ip': detail.gateway_ip, 'description': subnet.get('description')} if subnet['ip_version'] == 6 and subnet['enable_dhcp']: if validators.is_attr_set(subnet['ipv6_ra_mode']): args['ipv6_ra_mode'] = subnet['ipv6_ra_mode'] if validators.is_attr_set(subnet['ipv6_address_mode']): args['ipv6_address_mode'] = subnet['ipv6_address_mode'] return args def _make_fixed_ip_dict(self, ips): # Excludes from dict all keys except subnet_id and ip_address return [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in ips] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/db_base_plugin_v2.py0000644000175000017500000023534700000000000023156 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings as portbindings_def from neutron_lib.api.definitions import subnetpool as subnetpool_def from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as ctx from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as ndb_utils from neutron_lib import exceptions as exc from neutron_lib.exceptions import address_scope as addr_scope_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as os_db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import exc as sql_exc from sqlalchemy import func from sqlalchemy import not_ from neutron._i18n import _ from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.common import ipv6_utils from neutron.common import utils from neutron.db import db_base_plugin_common from neutron.db import ipam_pluggable_backend from neutron.db import models_v2 from neutron.db import rbac_db_mixin as rbac_mixin from neutron.db import standardattrdescription_db as stattr_db from neutron.extensions import subnetpool_prefix_ops from neutron import ipam from neutron.ipam import exceptions as ipam_exc from neutron.ipam import subnet_alloc from neutron import neutron_plugin_base_v2 from neutron.objects import address_scope as address_scope_obj from neutron.objects import base as base_obj from neutron.objects import network as network_obj from neutron.objects import ports as port_obj from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj LOG = logging.getLogger(__name__) # Ports with the following 'device_owner' values will not prevent # network deletion. If delete_network() finds that all ports on a # network have these owners, it will explicitly delete each port # and allow network deletion to continue. Similarly, if delete_subnet() # finds out that all existing IP Allocations are associated with ports # with these owners, it will allow subnet deletion to proceed with the # IP allocations being cleaned up by cascade. AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] def _check_subnet_not_used(context, subnet_id): try: registry.publish( resources.SUBNET, events.BEFORE_DELETE, None, payload=events.DBEventPayload(context, resource_id=subnet_id)) except exceptions.CallbackFailure as e: raise exc.SubnetInUse(subnet_id=subnet_id, reason=e) def _update_subnetpool_dict(orig_pool, new_pool): updated = dict((k, v) for k, v in orig_pool.to_dict().items() if k not in orig_pool.synthetic_fields or k == 'shared') new_pool = new_pool.copy() new_prefixes = new_pool.pop('prefixes', constants.ATTR_NOT_SPECIFIED) for k, v in new_pool.items(): if k not in orig_pool.fields_no_update: updated[k] = v if new_prefixes is not constants.ATTR_NOT_SPECIFIED: orig_ip_set = netaddr.IPSet(orig_pool.prefixes) new_ip_set = netaddr.IPSet(new_prefixes) if not orig_ip_set.issubset(new_ip_set): msg = _("Existing prefixes must be " "a subset of the new prefixes") raise exc.IllegalSubnetPoolPrefixUpdate(msg=msg) new_ip_set.compact() updated['prefixes'] = [str(prefix.cidr) for prefix in new_ip_set.iter_cidrs()] else: updated['prefixes'] = [str(prefix) for prefix in orig_pool.prefixes] return updated def _port_filter_hook(context, original_model, conditions): # Apply the port filter only in non-admin and non-advsvc context if ndb_utils.model_query_scope_is_project(context, original_model): conditions |= (models_v2.Port.network_id.in_( context.session.query(models_v2.Network.id). filter(context.project_id == models_v2.Network.project_id). subquery())) return conditions @registry.has_registry_receivers class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, neutron_plugin_base_v2.NeutronPluginBaseV2, rbac_mixin.RbacPluginMixin, stattr_db.StandardAttrDescriptionMixin): """V2 Neutron plugin interface implementation using SQLAlchemy models. Whenever a non-read call happens the plugin will call an event handler class method (e.g., network_created()). The result is that this class can be sub-classed by other classes that add custom behaviors on certain events. """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # This attribute specifies whether the plugin supports or not # filter validations. Name mangling is used in # order to ensure it is qualified by class __filter_validation_support = False def has_native_datastore(self): return True def __new__(cls, *args, **kwargs): model_query.register_hook( models_v2.Port, "port", query_hook=None, filter_hook=_port_filter_hook, result_filters=None) return super(NeutronDbPluginV2, cls).__new__(cls, *args, **kwargs) def __init__(self): self.set_ipam_backend() if (cfg.CONF.notify_nova_on_port_status_changes or cfg.CONF.notify_nova_on_port_data_changes): # Import nova conditionally to support the use case of Neutron # being used outside of an OpenStack context. from neutron.notifiers import nova self.nova_notifier = nova.Notifier.get_instance() # NOTE(arosen) These event listeners are here to hook into when # port status changes and notify nova about their change. if cfg.CONF.notify_nova_on_port_status_changes: db_api.sqla_listen(models_v2.Port, 'after_insert', self.nova_notifier.send_port_status) db_api.sqla_listen(models_v2.Port, 'after_update', self.nova_notifier.send_port_status) db_api.sqla_listen( models_v2.Port.status, 'set', self.nova_notifier.record_port_status_changed) if cfg.CONF.ironic.enable_notifications: # Import ironic notifier conditionally from neutron.notifiers import ironic self.ironic_notifier = ironic.Notifier.get_instance() @registry.receives(resources.RBAC_POLICY, [events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE]) def validate_network_rbac_policy_change(self, resource, event, trigger, payload=None): return self._validate_network_rbac_policy_change( resource, event, trigger, payload.context, payload) @db_api.retry_if_session_inactive() def _validate_network_rbac_policy_change(self, resource, event, trigger, context, payload): """Validates network RBAC policy changes. On creation, verify that the creator is an admin or that it owns the network it is sharing. On update and delete, make sure the tenant losing access does not have resources that depend on that access. """ object_type = payload.metadata.get('object_type') policy = (payload.request_body if event == events.BEFORE_CREATE else payload.latest_state) if object_type != 'network' or policy['action'] != 'access_as_shared': # we only care about shared network policies return # The object a policy targets cannot be changed so we can look # at the original network for the update event as well. net = self._get_network(context, policy['object_id']) if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE): # we still have to verify that the caller owns the network because # _get_network will succeed on a shared network if not context.is_admin and net['tenant_id'] != context.tenant_id: msg = _("Only admins can manipulate policies on networks " "they do not own") raise exc.InvalidInput(error_message=msg) tenant_to_check = None self_sharing = policy['target_tenant'] == net['tenant_id'] if self_sharing: return if event == events.BEFORE_UPDATE: new_tenant = payload.request_body['target_tenant'] if policy['target_tenant'] != new_tenant: tenant_to_check = policy['target_tenant'] if event == events.BEFORE_DELETE: tenant_to_check = policy['target_tenant'] if tenant_to_check: self.ensure_no_tenant_ports_on_network(net['id'], net['tenant_id'], tenant_to_check) def ensure_no_tenant_ports_on_network(self, network_id, net_tenant_id, tenant_id): ctx_admin = ctx.get_admin_context() ports = model_query.query_with_hooks(ctx_admin, models_v2.Port).filter( models_v2.Port.network_id == network_id) if tenant_id == '*': # for the wildcard we need to get all of the rbac entries to # see if any allow the remaining ports on the network. # any port with another RBAC entry covering it or one belonging to # the same tenant as the network owner is ok other_rbac_objs = network_obj.NetworkRBAC.get_objects( ctx_admin, object_id=network_id, action='access_as_shared') allowed_tenants = [rbac['target_tenant'] for rbac in other_rbac_objs if rbac.target_tenant != tenant_id] allowed_tenants.append(net_tenant_id) ports = ports.filter( ~models_v2.Port.tenant_id.in_(allowed_tenants)) else: # if there is a wildcard rule, we can return early because it # allows any ports if network_obj.NetworkRBAC.get_object( ctx_admin, object_id=network_id, action='access_as_shared', target_tenant='*'): return ports = ports.filter(models_v2.Port.tenant_id == tenant_id) if ports.count(): raise exc.InvalidSharedSetting(network=network_id) def set_ipam_backend(self): self.ipam = ipam_pluggable_backend.IpamPluggableBackend() def _validate_host_route(self, route, ip_version): try: netaddr.IPNetwork(route['destination']) netaddr.IPAddress(route['nexthop']) except netaddr.core.AddrFormatError: err_msg = _("Invalid route: %s") % route raise exc.InvalidInput(error_message=err_msg) except ValueError: # netaddr.IPAddress would raise this err_msg = _("Invalid route: %s") % route raise exc.InvalidInput(error_message=err_msg) self._validate_ip_version(ip_version, route['nexthop'], 'nexthop') self._validate_ip_version(ip_version, route['destination'], 'destination') def _validate_shared_update(self, context, id, original, updated): # The only case that needs to be validated is when 'shared' # goes from True to False if updated['shared'] == original.shared or updated['shared']: return ports = model_query.query_with_hooks( context, models_v2.Port).filter(models_v2.Port.network_id == id) ports = ports.filter(not_(models_v2.Port.device_owner.startswith( constants.DEVICE_OWNER_NETWORK_PREFIX))) subnets = subnet_obj.Subnet.get_objects(context, network_id=id) tenant_ids = set([port['tenant_id'] for port in ports] + [subnet['tenant_id'] for subnet in subnets]) # raise if multiple tenants found or if the only tenant found # is not the owner of the network if (len(tenant_ids) > 1 or len(tenant_ids) == 1 and original.tenant_id not in tenant_ids): self._validate_projects_have_access_to_network( original, tenant_ids) def _validate_projects_have_access_to_network(self, network, project_ids): ctx_admin = ctx.get_admin_context() other_rbac_objs = network_obj.NetworkRBAC.get_objects( ctx_admin, object_id=network.id, action='access_as_shared') allowed_projects = {rbac['target_tenant'] for rbac in other_rbac_objs if rbac.target_tenant != '*'} allowed_projects.add(network.project_id) if project_ids - allowed_projects: raise exc.InvalidSharedSetting(network=network.name) def _validate_ipv6_attributes(self, subnet, cur_subnet): if cur_subnet: self._validate_ipv6_update_dhcp(subnet, cur_subnet) return ra_mode_set = validators.is_attr_set(subnet.get('ipv6_ra_mode')) address_mode_set = validators.is_attr_set( subnet.get('ipv6_address_mode')) self._validate_ipv6_dhcp(ra_mode_set, address_mode_set, subnet['enable_dhcp']) if ra_mode_set and address_mode_set: self._validate_ipv6_combination(subnet['ipv6_ra_mode'], subnet['ipv6_address_mode']) if address_mode_set or ra_mode_set: self._validate_eui64_applicable(subnet) def _validate_eui64_applicable(self, subnet): # Per RFC 4862, section 5.5.3, prefix length and interface # id together should be equal to 128. Currently neutron supports # EUI64 interface id only, thus limiting the prefix # length to be 64 only. if ipv6_utils.is_auto_address_subnet(subnet): if netaddr.IPNetwork(subnet['cidr']).prefixlen != 64: msg = _('Invalid CIDR %s for IPv6 address mode. ' 'OpenStack uses the EUI-64 address format, ' 'which requires the prefix to be /64') raise exc.InvalidInput( error_message=(msg % subnet['cidr'])) def _validate_ipv6_combination(self, ra_mode, address_mode): if ra_mode != address_mode: msg = _("ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode " "set to '%(addr_mode)s' is not valid. " "If both attributes are set, they must be the same value" ) % {'ra_mode': ra_mode, 'addr_mode': address_mode} raise exc.InvalidInput(error_message=msg) def _validate_ipv6_dhcp(self, ra_mode_set, address_mode_set, enable_dhcp): if (ra_mode_set or address_mode_set) and not enable_dhcp: msg = _("ipv6_ra_mode or ipv6_address_mode cannot be set when " "enable_dhcp is set to False") raise exc.InvalidInput(error_message=msg) def _validate_ipv6_update_dhcp(self, subnet, cur_subnet): if ('enable_dhcp' in subnet and not subnet['enable_dhcp']): msg = _("Cannot disable enable_dhcp with " "ipv6 attributes set") ra_mode_set = validators.is_attr_set(subnet.get('ipv6_ra_mode')) address_mode_set = validators.is_attr_set( subnet.get('ipv6_address_mode')) if ra_mode_set or address_mode_set: raise exc.InvalidInput(error_message=msg) old_ra_mode_set = validators.is_attr_set( cur_subnet.get('ipv6_ra_mode')) old_address_mode_set = validators.is_attr_set( cur_subnet.get('ipv6_address_mode')) if old_ra_mode_set or old_address_mode_set: raise exc.InvalidInput(error_message=msg) def _create_bulk(self, resource, context, request_items): objects = [] collection = "%ss" % resource items = request_items[collection] try: with db_api.CONTEXT_WRITER.using(context): for item in items: obj_creator = getattr(self, 'create_%s' % resource) objects.append(obj_creator(context, item)) except Exception: with excutils.save_and_reraise_exception(): LOG.error("An exception occurred while creating " "the %(resource)s:%(item)s", {'resource': resource, 'item': item}) return objects @db_api.retry_if_session_inactive() def create_network_bulk(self, context, networks): return self._create_bulk('network', context, networks) @db_api.retry_if_session_inactive() def create_network(self, context, network): """Handle creation of a single network.""" net_db = self.create_network_db(context, network) return self._make_network_dict(net_db, process_extensions=False, context=context) def create_network_db(self, context, network): # single request processing n = network['network'] with db_api.CONTEXT_WRITER.using(context): args = {'tenant_id': n['tenant_id'], 'id': n.get('id') or uuidutils.generate_uuid(), 'name': n['name'], 'mtu': n.get('mtu', constants.DEFAULT_NETWORK_MTU), 'admin_state_up': n['admin_state_up'], 'status': n.get('status', constants.NET_STATUS_ACTIVE), 'description': n.get('description')} network = models_v2.Network(**args) context.session.add(network) if n['shared']: np_rbac_args = {'project_id': network.project_id, 'object_id': network.id, 'action': 'access_as_shared', 'target_tenant': '*'} np_rbac_obj = network_obj.NetworkRBAC(context, **np_rbac_args) np_rbac_obj.create() return network @db_api.retry_if_session_inactive() def update_network(self, context, id, network): n = network['network'] with db_api.CONTEXT_WRITER.using(context): network = self._get_network(context, id) # validate 'shared' parameter if 'shared' in n: entry = None for item in network.rbac_entries: if (item.action == 'access_as_shared' and item.target_tenant == '*'): entry = item break setattr(network, 'shared', bool(entry)) self._validate_shared_update(context, id, network, n) update_shared = n.pop('shared') if update_shared and not entry: np_rbac_args = {'project_id': network.project_id, 'object_id': network.id, 'action': 'access_as_shared', 'target_tenant': '*'} np_rbac_obj = network_obj.NetworkRBAC(context, **np_rbac_args) np_rbac_obj.create() elif not update_shared and entry: network_obj.NetworkRBAC.delete_objects( context, object_id=network.id, action='access_as_shared', target_tenant='*') # TODO(ihrachys) Below can be removed when we make sqlalchemy # event listeners in neutron_lib/db/api.py to refresh expired # attributes. # # First trigger expiration of rbac_entries. context.session.flush() # Then fetch state for _make_network_dict use outside session # context. getattr(network, 'rbac_entries') # The filter call removes attributes from the body received from # the API that are logically tied to network resources but are # stored in other database tables handled by extensions network.update( ndb_utils.filter_non_model_columns(n, models_v2.Network)) return self._make_network_dict(network, context=context) def _ensure_network_not_in_use(self, context, net_id): non_auto_ports = context.session.query( models_v2.Port.id).filter_by(network_id=net_id).filter( ~models_v2.Port.device_owner.in_(AUTO_DELETE_PORT_OWNERS)) if non_auto_ports.count(): raise exc.NetworkInUse(net_id=net_id) @db_api.retry_if_session_inactive() def delete_network(self, context, id): registry.publish(resources.NETWORK, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, resource_id=id)) self._ensure_network_not_in_use(context, id) with db_api.CONTEXT_READER.using(context): auto_delete_port_ids = [p.id for p in context.session.query( models_v2.Port.id).filter_by(network_id=id).filter( models_v2.Port.device_owner.in_(AUTO_DELETE_PORT_OWNERS))] for port_id in auto_delete_port_ids: try: self.delete_port(context.elevated(), port_id) except exc.PortNotFound: # Don't raise if something else concurrently deleted the port LOG.debug("Ignoring PortNotFound when deleting port '%s'. " "The port has already been deleted.", port_id) # clean up subnets subnets = self._get_subnets_by_network(context, id) with db_api.exc_to_retry(os_db_exc.DBReferenceError): # retry reference errors so we can check the port type and # cleanup if a network-owned port snuck in without failing for subnet in subnets: self._delete_subnet(context, subnet) with db_api.CONTEXT_WRITER.using(context): network_db = self._get_network(context, id) network = self._make_network_dict(network_db, context=context) registry.notify(resources.NETWORK, events.PRECOMMIT_DELETE, self, context=context, network_id=id) # We expire network_db here because precommit deletion # might have left the relationship stale, for example, # if we deleted a segment. context.session.expire(network_db) network_db = self._get_network(context, id) context.session.delete(network_db) registry.notify(resources.NETWORK, events.AFTER_DELETE, self, context=context, network=network) @db_api.retry_if_session_inactive() def get_network(self, context, id, fields=None): network = self._get_network(context, id) return self._make_network_dict(network, fields, context=context) @db_api.retry_if_session_inactive() def _get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = ndb_utils.get_marker_obj(self, context, 'network', limit, marker) return model_query.get_collection( context, models_v2.Network, # if caller needs postprocessing, it should implement it explicitly dict_func=None, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @db_api.retry_if_session_inactive() def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): make_network_dict = functools.partial(self._make_network_dict, context=context) return [ make_network_dict(net, fields) for net in self._get_networks( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) ] @db_api.retry_if_session_inactive() def get_networks_count(self, context, filters=None): return model_query.get_collection_count(context, models_v2.Network, filters=filters) @db_api.retry_if_session_inactive() def create_subnet_bulk(self, context, subnets): return self._create_bulk('subnet', context, subnets) def _validate_ip_version(self, ip_version, addr, name): """Check IP field of a subnet match specified ip version.""" ip = netaddr.IPNetwork(addr) if ip.version != ip_version: data = {'name': name, 'addr': addr, 'ip_version': ip_version} msg = _("%(name)s '%(addr)s' does not match " "the ip_version '%(ip_version)s'") % data raise exc.InvalidInput(error_message=msg) def _validate_subnet(self, context, s, cur_subnet=None): """Validate a subnet spec.""" # This method will validate attributes which may change during # create_subnet() and update_subnet(). # The method requires the subnet spec 's' has 'ip_version' field. # If 's' dict does not have 'ip_version' field in an API call # (e.g., update_subnet()), you need to set 'ip_version' field # before calling this method. ip_ver = s['ip_version'] if validators.is_attr_set(s.get('cidr')): self._validate_ip_version(ip_ver, s['cidr'], 'cidr') # TODO(watanabe.isao): After we found a way to avoid the re-sync # from the agent side, this restriction could be removed. if cur_subnet: dhcp_was_enabled = cur_subnet.enable_dhcp else: dhcp_was_enabled = False if s.get('enable_dhcp') and not dhcp_was_enabled: subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen error_message = _("Subnet has a prefix length that is " "incompatible with DHCP service enabled") if ((ip_ver == 4 and subnet_prefixlen > 30) or (ip_ver == 6 and subnet_prefixlen > 126)): raise exc.InvalidInput(error_message=error_message) net = netaddr.IPNetwork(s['cidr']) if net.is_multicast(): error_message = _("Multicast IP subnet is not supported " "if enable_dhcp is True") raise exc.InvalidInput(error_message=error_message) elif net.is_loopback(): error_message = _("Loopback IP subnet is not supported " "if enable_dhcp is True") raise exc.InvalidInput(error_message=error_message) elif ip_ver == constants.IP_VERSION_4 and net.first == 0: error_message = _("First IP '0.0.0.0' of network is not " "supported if enable_dhcp is True.") raise exc.InvalidInput(error_message=error_message) if validators.is_attr_set(s.get('gateway_ip')): self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip') is_gateway_not_valid = ( ipam.utils.check_gateway_invalid_in_subnet( s['cidr'], s['gateway_ip'])) if is_gateway_not_valid: error_message = _("Gateway is not valid on subnet") raise exc.InvalidInput(error_message=error_message) # Ensure the gateway IP is not assigned to any port # skip this check in case of create (s parameter won't have id) # NOTE(salv-orlando): There is slight chance of a race, when # a subnet-update and a router-interface-add operation are # executed concurrently if cur_subnet and not ipv6_utils.is_ipv6_pd_enabled(s): with db_api.CONTEXT_READER.using(context): # TODO(electrocucaracha): Look a solution for Join in OVO ipal = models_v2.IPAllocation alloc_qry = context.session.query(ipal.port_id) alloc_qry = alloc_qry.join("port", "routerport") gateway_ip = str(cur_subnet['gateway_ip']) allocated = alloc_qry.filter( ipal.ip_address == gateway_ip, ipal.subnet_id == cur_subnet['id']).first() if allocated and allocated.port_id: raise exc.GatewayIpInUse( ip_address=gateway_ip, port_id=allocated.port_id) if validators.is_attr_set(s.get('dns_nameservers')): if len(s['dns_nameservers']) > cfg.CONF.max_dns_nameservers: raise exc.DNSNameServersExhausted( subnet_id=s.get('id', _('new subnet')), quota=cfg.CONF.max_dns_nameservers) for dns in s['dns_nameservers']: try: netaddr.IPAddress(dns) except Exception: raise exc.InvalidInput( error_message=(_("Error parsing dns address %s") % dns)) self._validate_ip_version(ip_ver, dns, 'dns_nameserver') if validators.is_attr_set(s.get('host_routes')): if len(s['host_routes']) > cfg.CONF.max_subnet_host_routes: raise exc.HostRoutesExhausted( subnet_id=s.get('id', _('new subnet')), quota=cfg.CONF.max_subnet_host_routes) # check if the routes are all valid for rt in s['host_routes']: self._validate_host_route(rt, ip_ver) if ip_ver == 4: if validators.is_attr_set(s.get('ipv6_ra_mode')): raise exc.InvalidInput( error_message=(_("ipv6_ra_mode is not valid when " "ip_version is 4"))) if validators.is_attr_set(s.get('ipv6_address_mode')): raise exc.InvalidInput( error_message=(_("ipv6_address_mode is not valid when " "ip_version is 4"))) if ip_ver == 6: self._validate_ipv6_attributes(s, cur_subnet) def _validate_subnet_for_pd(self, subnet): """Validates that subnet parameters are correct for IPv6 PD""" if (subnet.get('ip_version') != constants.IP_VERSION_6): reason = _("Prefix Delegation can only be used with IPv6 " "subnets.") raise exc.BadRequest(resource='subnets', msg=reason) mode_list = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] ra_mode = subnet.get('ipv6_ra_mode') if ra_mode not in mode_list: reason = _("IPv6 RA Mode must be SLAAC or Stateless for " "Prefix Delegation.") raise exc.BadRequest(resource='subnets', msg=reason) address_mode = subnet.get('ipv6_address_mode') if address_mode not in mode_list: reason = _("IPv6 Address Mode must be SLAAC or Stateless for " "Prefix Delegation.") raise exc.BadRequest(resource='subnets', msg=reason) def _update_router_gw_ports(self, context, network, subnet): l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin: gw_ports = self._get_router_gw_ports_by_network(context, network['id']) router_ids = [p.device_id for p in gw_ports] for id in router_ids: try: self._update_router_gw_port(context, id, network, subnet) except l3_exc.RouterNotFound: LOG.debug("Router %(id)s was concurrently deleted while " "updating GW port for subnet %(s)s", {'id': id, 's': subnet}) def _update_router_gw_port(self, context, router_id, network, subnet): l3plugin = directory.get_plugin(plugin_constants.L3) ctx_admin = context.elevated() ext_subnets_dict = {s['id']: s for s in network['subnets']} router = l3plugin.get_router(ctx_admin, router_id) external_gateway_info = router['external_gateway_info'] # Get all stateful (i.e. non-SLAAC/DHCPv6-stateless) fixed ips fips = [f for f in external_gateway_info['external_fixed_ips'] if not ipv6_utils.is_auto_address_subnet( ext_subnets_dict[f['subnet_id']])] num_fips = len(fips) # Don't add the fixed IP to the port if it already # has a stateful fixed IP of the same IP version if num_fips > 1: return if num_fips == 1 and netaddr.IPAddress( fips[0]['ip_address']).version == subnet['ip_version']: return external_gateway_info['external_fixed_ips'].append( {'subnet_id': subnet['id']}) info = {'router': {'external_gateway_info': external_gateway_info}} l3plugin.update_router(context, router_id, info) @db_api.retry_if_session_inactive() def _create_subnet_postcommit(self, context, result, network=None, ipam_subnet=None): if not network: network = self._get_network(context, result['network_id']) if not ipam_subnet: ipam_subnet = self.ipam.get_subnet(context, result['id']) if hasattr(network, 'external') and network.external: self._update_router_gw_ports(context, network, result) # If this subnet supports auto-addressing, then update any # internal ports on the network with addresses for this subnet. if ipv6_utils.is_auto_address_subnet(result): updated_ports = self.ipam.add_auto_addrs_on_network_ports( context, result, ipam_subnet) for port_id in updated_ports: port_info = {'port': {'id': port_id}} try: self.update_port(context, port_id, port_info) except exc.PortNotFound: LOG.debug("Port %(p)s concurrently deleted while adding " "address for new subnet %(s)s.", {'p': port_id, 's': result}) def _get_subnetpool_id(self, context, subnet): """Return the subnetpool id for this request :param subnet: The subnet dict from the request """ use_default_subnetpool = subnet.get('use_default_subnetpool') if use_default_subnetpool == constants.ATTR_NOT_SPECIFIED: use_default_subnetpool = False subnetpool_id = subnet.get('subnetpool_id') if subnetpool_id == constants.ATTR_NOT_SPECIFIED: subnetpool_id = None if use_default_subnetpool and subnetpool_id: msg = _('subnetpool_id and use_default_subnetpool cannot both be ' 'specified') raise exc.BadRequest(resource='subnets', msg=msg) if subnetpool_id: return subnetpool_id if not use_default_subnetpool: return cidr = subnet.get('cidr') if validators.is_attr_set(cidr): ip_version = netaddr.IPNetwork(cidr).version else: ip_version = subnet.get('ip_version') if not validators.is_attr_set(ip_version): msg = _('ip_version must be specified in the absence of ' 'cidr and subnetpool_id') raise exc.BadRequest(resource='subnets', msg=msg) if ip_version == 6 and cfg.CONF.ipv6_pd_enabled: return constants.IPV6_PD_POOL_ID subnetpool = self.get_default_subnetpool(context, ip_version) if subnetpool: return subnetpool['id'] msg = _('No default subnetpool found for IPv%s') % ip_version raise exc.BadRequest(resource='subnets', msg=msg) @db_api.retry_if_session_inactive() def create_subnet(self, context, subnet): result, net, ipam_sub = self._create_subnet_precommit(context, subnet) self._create_subnet_postcommit(context, result, net, ipam_sub) return result def _create_subnet_precommit(self, context, subnet): """Creates subnet in DB, returns result, network, and ipam_subnet.""" s = subnet['subnet'] cidr = s.get('cidr', constants.ATTR_NOT_SPECIFIED) prefixlen = s.get('prefixlen', constants.ATTR_NOT_SPECIFIED) has_cidr = validators.is_attr_set(cidr) has_prefixlen = validators.is_attr_set(prefixlen) if has_cidr and has_prefixlen: msg = _('cidr and prefixlen must not be supplied together') raise exc.BadRequest(resource='subnets', msg=msg) if has_cidr: # turn the CIDR into a proper subnet net = netaddr.IPNetwork(s['cidr']) subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) subnetpool_id = self._get_subnetpool_id(context, s) if not subnetpool_id and not has_cidr: msg = _('a subnetpool must be specified in the absence of a cidr') raise exc.BadRequest(resource='subnets', msg=msg) if subnetpool_id: self.ipam.validate_pools_with_subnetpool(s) if subnetpool_id == constants.IPV6_PD_POOL_ID: if has_cidr: # We do not currently support requesting a specific # cidr with IPv6 prefix delegation. Set the subnetpool_id # to None and allow the request to continue as normal. subnetpool_id = None self._validate_subnet(context, s) else: prefix = constants.PROVISIONAL_IPV6_PD_PREFIX subnet['subnet']['cidr'] = prefix self._validate_subnet_for_pd(s) else: if not has_cidr: msg = _('A cidr must be specified in the absence of a ' 'subnet pool') raise exc.BadRequest(resource='subnets', msg=msg) self._validate_subnet(context, s) with db_api.CONTEXT_WRITER.using(context): network = self._get_network(context, subnet['subnet']['network_id']) subnet, ipam_subnet = self.ipam.allocate_subnet(context, network, subnet['subnet'], subnetpool_id) # TODO(ihrachys): make sqlalchemy refresh expired relationships getattr(network, 'subnets') result = self._make_subnet_dict(subnet, context=context) return result, network, ipam_subnet def _update_allocation_pools(self, subnet): """Gets new allocation pools and formats them correctly""" allocation_pools = self.ipam.generate_pools(subnet['cidr'], subnet['gateway_ip']) return [{'start': str(netaddr.IPAddress(p.first, subnet['ip_version'])), 'end': str(netaddr.IPAddress(p.last, subnet['ip_version']))} for p in allocation_pools] @db_api.retry_if_session_inactive() def update_subnet(self, context, id, subnet): """Update the subnet with new info. The change however will not be realized until the client renew the dns lease or we support gratuitous DHCP offers """ result, orig = self._update_subnet_precommit(context, id, subnet) return self._update_subnet_postcommit(context, orig, result) def _update_subnet_precommit(self, context, id, subnet): """All subnet update operations safe to enclose in a transaction. :param context: neutron api request context :param id: subnet id :param subnet: API request dictionary """ s = subnet['subnet'] new_cidr = s.get('cidr') subnet_obj = self._get_subnet_object(context, id) orig = self._make_subnet_dict(subnet_obj, fields=None, context=context) # Fill 'ip_version' and 'allocation_pools' fields with the current # value since _validate_subnet() expects subnet spec has 'ip_version' # and 'allocation_pools' fields. s['ip_version'] = subnet_obj.ip_version s['cidr'] = subnet_obj.cidr s['id'] = subnet_obj.id s['project_id'] = subnet_obj.project_id s['tenant_id'] = subnet_obj.project_id s['subnetpool_id'] = subnet_obj.subnetpool_id # Fill 'network_id' field with the current value since this is expected # by _validate_segment() in ipam_pluggable_backend. s['network_id'] = subnet_obj.network_id self._validate_subnet(context, s, cur_subnet=subnet_obj) db_pools = [netaddr.IPRange(p.start, p.end) for p in subnet_obj.allocation_pools] if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s): # This is an ipv6 prefix delegation-enabled subnet being given an # updated cidr by the process_prefix_update RPC s['cidr'] = netaddr.IPNetwork(new_cidr, s['ip_version']) # Update gateway_ip and allocation pools based on new cidr s['gateway_ip'] = utils.get_first_host_ip( s['cidr'], s['ip_version']) s['allocation_pools'] = self._update_allocation_pools(s) range_pools = None if s.get('allocation_pools') is not None: # Convert allocation pools to IPRange to simplify future checks range_pools = self.ipam.pools_to_ip_range(s['allocation_pools']) self.ipam.validate_allocation_pools(range_pools, s['cidr']) s['allocation_pools'] = range_pools # If either gateway_ip or allocation_pools were specified subnet_gateway = (subnet_obj.gateway_ip if subnet_obj.gateway_ip else None) gateway_ip = s.get('gateway_ip', subnet_gateway) gateway_ip_changed = gateway_ip != subnet_gateway if gateway_ip_changed or s.get('allocation_pools') is not None: pools = range_pools if range_pools is not None else db_pools if gateway_ip: self.ipam.validate_gw_out_of_pools(gateway_ip, pools) kwargs = {'context': context, 'original_subnet': orig, 'request': s} registry.notify(resources.SUBNET, events.BEFORE_UPDATE, self, **kwargs) with db_api.CONTEXT_WRITER.using(context): subnet, changes = self.ipam.update_db_subnet(context, id, s, db_pools) return self._make_subnet_dict(subnet, context=context), orig @property def l3_rpc_notifier(self): if not hasattr(self, '_l3_rpc_notifier'): self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() return self._l3_rpc_notifier @l3_rpc_notifier.setter def l3_rpc_notifier(self, value): self._l3_rpc_notifier = value def _update_subnet_postcommit(self, context, orig, result): """Subnet update operations that happen after transaction completes. :param context: neutron api request context :param orig: subnet dictionary representing state before update :param result: subnet dictionary representing state after update """ update_ports_needed = (result['cidr'] != orig['cidr'] and ipv6_utils.is_ipv6_pd_enabled(result)) if update_ports_needed: # Find ports that have not yet been updated # with an IP address by Prefix Delegation, and update them filters = {'fixed_ips': {'subnet_id': [result['id']]}} ports = self.get_ports(context, filters=filters) routers = [] for port in ports: for ip in port['fixed_ips']: if ip['subnet_id'] == result['id']: if (port['device_owner'] in constants.ROUTER_INTERFACE_OWNERS): routers.append(port['device_id']) ip['ip_address'] = result['gateway_ip'] else: # We remove ip_address and pass only PD subnet_id # in port's fixed_ip for port_update. Later, IPAM # drivers will allocate eui64 address with new # prefix when they find PD subnet_id in port's # fixed_ip. ip.pop('ip_address', None) self.update_port(context, port['id'], {'port': port}) # Send router_update to l3_agent if routers: self.l3_rpc_notifier.routers_updated(context, routers) kwargs = {'context': context, 'subnet': result, 'original_subnet': orig} registry.notify(resources.SUBNET, events.AFTER_UPDATE, self, **kwargs) return result @db_api.CONTEXT_READER def _subnet_get_user_allocation(self, context, subnet_id): """Check if there are any user ports on subnet and return first.""" return port_obj.IPAllocation.get_alloc_by_subnet_id( context, subnet_id, AUTO_DELETE_PORT_OWNERS) @db_api.CONTEXT_READER def _subnet_check_ip_allocations_internal_router_ports(self, context, subnet_id): # Do not delete the subnet if IP allocations for internal # router ports still exist allocs = port_obj.IPAllocation.get_alloc_by_subnet_id( context, subnet_id, constants.ROUTER_INTERFACE_OWNERS, False) if allocs: LOG.debug("Subnet %s still has internal router ports, " "cannot delete", subnet_id) raise exc.SubnetInUse(subnet_id=subnet_id) @db_api.retry_if_session_inactive() def _remove_subnet_from_port(self, context, sub_id, port_id, auto_subnet): try: fixed = [f for f in self.get_port(context, port_id)['fixed_ips'] if f['subnet_id'] != sub_id] if auto_subnet: # special flag to avoid re-allocation on auto subnets fixed.append({'subnet_id': sub_id, 'delete_subnet': True}) data = {port_def.RESOURCE_NAME: {'fixed_ips': fixed}} self.update_port(context, port_id, data) except exc.PortNotFound: # port is gone return except exc.SubnetNotFound as e: # another subnet in the fixed ips was concurrently removed. retry raise os_db_exc.RetryRequest(e) def _ensure_no_user_ports_on_subnet(self, context, id): alloc = self._subnet_get_user_allocation(context, id) if alloc: LOG.info("Found port (%(port_id)s, %(ip)s) having IP " "allocation on subnet " "%(subnet)s, cannot delete", {'ip': alloc.ip_address, 'port_id': alloc.port_id, 'subnet': id}) raise exc.SubnetInUse(subnet_id=id) @db_api.retry_if_session_inactive() def _remove_subnet_ip_allocations_from_ports(self, context, subnet): # Do not allow a subnet to be deleted if a router is attached to it self._subnet_check_ip_allocations_internal_router_ports( context, subnet.id) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if not is_auto_addr_subnet: # we only automatically remove IP addresses from user ports if # the IPs come from auto allocation subnets. self._ensure_no_user_ports_on_subnet(context, subnet.id) net_allocs = (context.session.query(models_v2.IPAllocation.port_id). filter_by(subnet_id=subnet.id)) port_ids_on_net = [ipal.port_id for ipal in net_allocs] for port_id in port_ids_on_net: self._remove_subnet_from_port(context, subnet.id, port_id, auto_subnet=is_auto_addr_subnet) @db_api.retry_if_session_inactive() def delete_subnet(self, context, id): LOG.debug("Deleting subnet %s", id) # Make sure the subnet isn't used by other resources _check_subnet_not_used(context, id) subnet = self._get_subnet_object(context, id) registry.publish(resources.SUBNET, events.PRECOMMIT_DELETE_ASSOCIATIONS, self, payload=events.DBEventPayload(context, resource_id=subnet.id)) self._remove_subnet_ip_allocations_from_ports(context, subnet) self._delete_subnet(context, subnet) def _delete_subnet(self, context, subnet): with db_api.exc_to_retry(sql_exc.IntegrityError), \ db_api.CONTEXT_WRITER.using(context): registry.notify(resources.SUBNET, events.PRECOMMIT_DELETE, self, context=context, subnet_id=subnet.id) subnet.delete() # Delete related ipam subnet manually, # since there is no FK relationship self.ipam.delete_subnet(context, subnet.id) registry.notify(resources.SUBNET, events.AFTER_DELETE, self, context=context, subnet=subnet.to_dict()) @db_api.retry_if_session_inactive() def get_subnet(self, context, id, fields=None): subnet_obj = self._get_subnet_object(context, id) return self._make_subnet_dict(subnet_obj, fields, context=context) @db_api.retry_if_session_inactive() def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): subnet_objs = self._get_subnets(context, filters, sorts, limit, marker, page_reverse) return [ self._make_subnet_dict(subnet_object, fields, context) for subnet_object in subnet_objs ] @db_api.retry_if_session_inactive() def get_subnets_count(self, context, filters=None): filters = filters or {} return subnet_obj.Subnet.count(context, validate_filters=False, **filters) @db_api.retry_if_session_inactive() def get_subnets_by_network(self, context, network_id): return [self._make_subnet_dict(subnet_obj) for subnet_obj in self._get_subnets_by_network(context, network_id)] def _validate_address_scope_id(self, context, address_scope_id, subnetpool_id, sp_prefixes, ip_version): """Validate the address scope before associating. Subnetpool can associate with an address scope if - the tenant user is the owner of both the subnetpool and address scope - the user is associating the subnetpool with a shared address scope - there is no prefix conflict with the existing subnetpools associated with the address scope. - the address family of the subnetpool and address scope are the same """ if not validators.is_attr_set(address_scope_id): return address_scope = self._get_address_scope(context, address_scope_id) is_accessible = ( address_scope_obj.AddressScope.is_accessible( context, address_scope ) ) if not is_accessible: raise exc.IllegalSubnetPoolAssociationToAddressScope( subnetpool_id=subnetpool_id, address_scope_id=address_scope_id) as_ip_version = self.get_ip_version_for_address_scope(context, address_scope_id) if ip_version != as_ip_version: raise exc.IllegalSubnetPoolIpVersionAssociationToAddressScope( subnetpool_id=subnetpool_id, address_scope_id=address_scope_id, ip_version=as_ip_version) self._check_subnetpool_address_scope_network_affinity( context, subnetpool_id, ip_version) subnetpools = subnetpool_obj.SubnetPool.get_objects( context, address_scope_id=address_scope_id) new_set = netaddr.IPSet(sp_prefixes) for sp in subnetpools: if sp.id == subnetpool_id: continue sp_set = netaddr.IPSet(sp.prefixes) if sp_set.intersection(new_set): raise exc.AddressScopePrefixConflict() def _check_subnetpool_address_scope_network_affinity(self, context, subnetpool_id, ip_version): """Check whether updating a subnet pool's address scope is allowed. - Identify the subnets that would be re-scoped - Identify the networks that would be affected by re-scoping - Find all subnets associated with the affected networks - Perform set difference (all - to_be_rescoped) - If the set difference yields non-zero result size, re-scoping the subnet pool will leave subnets in different address scopes and result in address scope / network affinity violations so raise an exception to block the operation. """ # TODO(tidwellr) potentially lots of subnets here, optimize this code subnets_to_rescope = self._get_subnets_by_subnetpool(context, subnetpool_id) rescoped_subnet_ids = set() affected_source_network_ids = set() for subnet in subnets_to_rescope: rescoped_subnet_ids.add(subnet.id) affected_source_network_ids.add(subnet.network_id) all_network_subnets = subnet_obj.Subnet.get_objects( context, network_id=affected_source_network_ids, ip_version=ip_version) all_affected_subnet_ids = set( [subnet.id for subnet in all_network_subnets]) # Use set difference to identify the subnets that would be # violating address scope affinity constraints if the subnet # pool's address scope was changed. violations = all_affected_subnet_ids.difference(rescoped_subnet_ids) if violations: raise addr_scope_exc.NetworkAddressScopeAffinityError() def _check_subnetpool_update_allowed(self, context, subnetpool_id, address_scope_id): """Check if the subnetpool can be updated or not. If the subnetpool is associated to a shared address scope not owned by the tenant, then the subnetpool cannot be updated. """ if not self.is_address_scope_owned_by_tenant(context, address_scope_id): msg = _("subnetpool %(subnetpool_id)s cannot be updated when" " associated with shared address scope " "%(address_scope_id)s") % { 'subnetpool_id': subnetpool_id, 'address_scope_id': address_scope_id} raise exc.IllegalSubnetPoolUpdate(reason=msg) def _check_default_subnetpool_exists(self, context, ip_version): """Check if a default already exists for the given IP version. There can only be one default subnetpool for each IP family. Raise an InvalidInput error if a default has already been set. """ if self.get_default_subnetpool(context, ip_version): msg = _("A default subnetpool for this IP family has already " "been set. Only one default may exist per IP family") raise exc.InvalidInput(error_message=msg) @db_api.retry_if_session_inactive() def create_subnetpool(self, context, subnetpool): sp = subnetpool['subnetpool'] sp_reader = subnet_alloc.SubnetPoolReader(sp) if sp_reader.is_default: self._check_default_subnetpool_exists(context, sp_reader.ip_version) self._validate_address_scope_id(context, sp_reader.address_scope_id, sp_reader.id, sp_reader.prefixes, sp_reader.ip_version) pool_args = {'project_id': sp['tenant_id'], 'id': sp_reader.id, 'name': sp_reader.name, 'ip_version': sp_reader.ip_version, 'default_prefixlen': sp_reader.default_prefixlen, 'min_prefixlen': sp_reader.min_prefixlen, 'max_prefixlen': sp_reader.max_prefixlen, 'is_default': sp_reader.is_default, 'shared': sp_reader.shared, 'default_quota': sp_reader.default_quota, 'address_scope_id': sp_reader.address_scope_id, 'description': sp_reader.description, 'prefixes': sp_reader.prefixes} subnetpool = subnetpool_obj.SubnetPool(context, **pool_args) subnetpool.create() return self._make_subnetpool_dict(subnetpool) @db_api.retry_if_session_inactive() def update_subnetpool(self, context, id, subnetpool): new_sp = subnetpool['subnetpool'] with db_api.CONTEXT_WRITER.using(context): orig_sp = self._get_subnetpool(context, id=id) updated = _update_subnetpool_dict(orig_sp, new_sp) reader = subnet_alloc.SubnetPoolReader(updated) if reader.is_default and not orig_sp.is_default: self._check_default_subnetpool_exists(context, reader.ip_version) if orig_sp.address_scope_id: self._check_subnetpool_update_allowed(context, id, orig_sp.address_scope_id) self._validate_address_scope_id(context, reader.address_scope_id, id, reader.prefixes, reader.ip_version) address_scope_changed = ( orig_sp.address_scope_id != reader.address_scope_id) orig_sp.update_fields(reader.subnetpool) orig_sp.update() if address_scope_changed: # Notify about the update of subnetpool's address scope registry.publish(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, self.update_subnetpool, payload=events.DBEventPayload( context, resource_id=id)) for key in ['min_prefixlen', 'max_prefixlen', 'default_prefixlen']: updated['key'] = str(updated[key]) resource_extend.apply_funcs(subnetpool_def.COLLECTION_NAME, updated, orig_sp.db_obj) return updated @db_api.retry_if_session_inactive() def get_subnetpool(self, context, id, fields=None): subnetpool = self._get_subnetpool(context, id) return self._make_subnetpool_dict(subnetpool, fields) @db_api.retry_if_session_inactive() def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} subnetpools = subnetpool_obj.SubnetPool.get_objects( context, _pager=pager, validate_filters=False, **filters) return [ self._make_subnetpool_dict(pool, fields) for pool in subnetpools ] @db_api.retry_if_session_inactive() def get_default_subnetpool(self, context, ip_version): """Retrieve the default subnetpool for the given IP version.""" filters = {'is_default': True, 'ip_version': ip_version} subnetpool = self.get_subnetpools(context, filters=filters) if subnetpool: return subnetpool[0] @db_api.retry_if_session_inactive() def delete_subnetpool(self, context, id): with db_api.CONTEXT_WRITER.using(context): subnetpool = self._get_subnetpool(context, id=id) if subnet_obj.Subnet.objects_exist(context, subnetpool_id=id): reason = _("Subnet pool has existing allocations") raise exc.SubnetPoolDeleteError(reason=reason) subnetpool.delete() @db_api.retry_if_session_inactive() def onboard_network_subnets(self, context, subnetpool_id, network_info): network_id = network_info.get('network_id') if not validators.is_attr_set(network_id): msg = _("network_id must be specified.") raise exc.InvalidInput(error_message=msg) if not network_obj.Network.objects_exist(context, id=network_id): raise exc.NetworkNotFound(net_id=network_id) subnetpool = subnetpool_obj.SubnetPool.get_object(context, id=subnetpool_id) if not subnetpool: raise exc.SubnetPoolNotFound(subnetpool_id=id) subnets_to_onboard = subnet_obj.Subnet.get_objects( context, network_id=network_id, ip_version=subnetpool.ip_version) self._onboard_network_subnets(context, subnets_to_onboard, subnetpool) if subnetpool.address_scope_id: # Notify all affected routers of any address scope changes registry.publish(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, self.onboard_network_subnets, payload=events.DBEventPayload( context, resource_id=subnetpool_id)) onboard_info = [] for subnet in subnets_to_onboard: onboard_info.append({'id': subnet.id, 'cidr': subnet.cidr}) return onboard_info def _onboard_network_subnets(self, context, subnets_to_onboard, subnetpool): allocated_prefix_set = netaddr.IPSet( [x.cidr for x in subnet_obj.Subnet.get_objects( context, subnetpool_id=subnetpool.id)]) prefixes_to_add = [] for subnet in subnets_to_onboard: to_onboard_ipset = netaddr.IPSet([subnet.cidr]) if to_onboard_ipset & allocated_prefix_set: args = {'subnet_id': subnet.id, 'cidr': subnet.cidr, 'subnetpool_id': subnetpool.id} msg = _('Onboarding subnet %(subnet_id)s: %(cidr)s conflicts ' 'with allocated prefixes in subnet pool ' '%(subnetpool_id)s') % args raise exc.IllegalSubnetPoolUpdate(reason=msg) prefixes_to_add.append(subnet.cidr) with db_api.CONTEXT_WRITER.using(context): new_sp_prefixes = subnetpool.prefixes + prefixes_to_add sp_update_req = {'subnetpool': {'prefixes': new_sp_prefixes}} self.update_subnetpool(context, subnetpool.id, sp_update_req) for subnet in subnets_to_onboard: subnet.subnetpool_id = subnetpool.id subnet.update() def _check_mac_addr_update(self, context, port, new_mac, device_owner): if (device_owner and device_owner.startswith( constants.DEVICE_OWNER_NETWORK_PREFIX)): raise exc.UnsupportedPortDeviceOwner( op=_("mac address update"), port_id=id, device_owner=device_owner) def _create_db_port_obj(self, context, port_data): mac_address = port_data.pop('mac_address', None) if mac_address: if self._is_mac_in_use(context, port_data['network_id'], mac_address): raise exc.MacAddressInUse(net_id=port_data['network_id'], mac=mac_address) else: mac_address = self._generate_macs()[0] db_port = models_v2.Port(mac_address=mac_address, **port_data) context.session.add(db_port) return db_port @db_api.retry_if_session_inactive() def create_port(self, context, port): db_port = self.create_port_db(context, port) return self._make_port_dict(db_port, process_extensions=False) @db_api.retry_if_session_inactive() def create_port_bulk(self, context, ports): return self._create_bulk('port', context, ports) def create_port_db(self, context, port): p = port['port'] port_id = p.get('id') or uuidutils.generate_uuid() network_id = p['network_id'] if p.get('device_owner'): self._enforce_device_owner_not_router_intf_or_device_id( context, p.get('device_owner'), p.get('device_id'), p['tenant_id']) port_data = dict(tenant_id=p['tenant_id'], name=p['name'], id=port_id, network_id=network_id, admin_state_up=p['admin_state_up'], status=p.get('status', constants.PORT_STATUS_ACTIVE), device_id=p['device_id'], device_owner=p['device_owner'], description=p.get('description')) if p.get('mac_address') is not constants.ATTR_NOT_SPECIFIED: port_data['mac_address'] = p.get('mac_address') with db_api.CONTEXT_WRITER.using(context): # Ensure that the network exists. self._get_network(context, network_id) # Create the port db_port = self._create_db_port_obj(context, port_data) p['mac_address'] = db_port['mac_address'] try: self.ipam.allocate_ips_for_port_and_store( context, port, port_id) db_port['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_IMMEDIATE) except ipam_exc.DeferIpam: db_port['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_DEFERRED) fixed_ips = p['fixed_ips'] if validators.is_attr_set(fixed_ips) and not fixed_ips: # [] was passed explicitly as fixed_ips. An unaddressed port. db_port['ip_allocation'] = ipalloc_apidef.IP_ALLOCATION_NONE return db_port def _validate_port_for_update(self, context, db_port, new_port, new_mac): changed_owner = 'device_owner' in new_port current_owner = (new_port.get('device_owner') or db_port['device_owner']) changed_device_id = new_port.get('device_id') != db_port['device_id'] current_device_id = new_port.get('device_id') or db_port['device_id'] if current_owner and changed_device_id or changed_owner: self._enforce_device_owner_not_router_intf_or_device_id( context, current_owner, current_device_id, db_port['tenant_id']) if new_mac and new_mac != db_port['mac_address']: self._check_mac_addr_update(context, db_port, new_mac, current_owner) @db_api.retry_if_session_inactive() def update_port(self, context, id, port): new_port = port['port'] with db_api.CONTEXT_WRITER.using(context): db_port = self._get_port(context, id) new_mac = new_port.get('mac_address') self._validate_port_for_update(context, db_port, new_port, new_mac) # Note: _make_port_dict is called here to load extension data # (specifically host binding). The IPAM plugin is separate from # the core plugin, so extensions are not loaded. # # The IPAM code could cheat and get it directly from db_port but it # would have to know about the implementation (remember ml2 has its # own port binding schema that differs from the generic one) # # This code could extract just the port binding host here and pass # that in. The problem is that db_base_plugin_common shouldn't # know anything about port binding. This compromise sends IPAM a # port_dict with all of the extension data loaded. try: self.ipam.update_port( context, old_port_db=db_port, old_port=self._make_port_dict(db_port), new_port=new_port) except ipam_exc.IpAddressAllocationNotFound as e: # If a port update and a subnet delete interleave, there is a # chance that the IPAM update operation raises this exception. # Rather than throwing that up to the user under some sort of # conflict, bubble up a retry instead that should bring things # back to sanity. raise os_db_exc.RetryRequest(e) except ipam_exc.IPAddressChangeNotAllowed as e: raise exc.BadRequest(resource='ports', msg=e) return self._make_port_dict(db_port) @db_api.retry_if_session_inactive() def delete_port(self, context, id): with db_api.CONTEXT_WRITER.using(context): self.ipam.delete_port(context, id) def delete_ports_by_device_id(self, context, device_id, network_id=None): with db_api.CONTEXT_READER.using(context): query = (context.session.query(models_v2.Port.id) .enable_eagerloads(False) .filter(models_v2.Port.device_id == device_id)) if network_id: query = query.filter(models_v2.Port.network_id == network_id) port_ids = [p[0] for p in query] for port_id in port_ids: try: self.delete_port(context, port_id) except exc.PortNotFound: # Don't raise if something else concurrently deleted the port LOG.debug("Ignoring PortNotFound when deleting port '%s'. " "The port has already been deleted.", port_id) @db_api.retry_if_session_inactive() @db_api.CONTEXT_READER def get_port(self, context, id, fields=None): port = self._get_port(context, id) return self._make_port_dict(port, fields) def _get_ports_query(self, context, filters=None, *args, **kwargs): Port = models_v2.Port IPAllocation = models_v2.IPAllocation limit = kwargs.pop('limit', None) filters = filters or {} fixed_ips = filters.pop('fixed_ips', {}) mac_address = filters.pop('mac_address', {}) vif_type = filters.pop(portbindings_def.VIF_TYPE, None) query = model_query.get_collection_query(context, Port, filters=filters, *args, **kwargs) ip_addresses = fixed_ips.get('ip_address') subnet_ids = fixed_ips.get('subnet_id') if vif_type is not None: query = query.filter(Port.port_bindings.any(vif_type=vif_type)) if mac_address: lowered_macs = [x.lower() for x in mac_address] query = query.filter(func.lower(Port.mac_address).in_( lowered_macs)) if ip_addresses: query = query.filter( Port.fixed_ips.any(IPAllocation.ip_address.in_(ip_addresses))) if subnet_ids: query = query.filter( Port.fixed_ips.any(IPAllocation.subnet_id.in_(subnet_ids))) if limit: query = query.limit(limit) return query @db_api.retry_if_session_inactive() def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = ndb_utils.get_marker_obj(self, context, 'port', limit, marker) query = self._get_ports_query(context, filters=filters, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) items = [self._make_port_dict(c, fields) for c in query] if limit and page_reverse: items.reverse() return items @db_api.retry_if_session_inactive() def get_ports_count(self, context, filters=None): return self._get_ports_query(context, filters).count() def _enforce_device_owner_not_router_intf_or_device_id(self, context, device_owner, device_id, tenant_id): """Prevent tenants from replacing the device id of router ports with a router uuid belonging to another tenant. """ if device_owner not in constants.ROUTER_INTERFACE_OWNERS: return if not context.is_admin: # check to make sure device_id does not match another tenants # router. if device_id: if hasattr(self, 'get_router'): try: ctx_admin = context.elevated() router = self.get_router(ctx_admin, device_id) except l3_exc.RouterNotFound: return else: l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin: try: ctx_admin = context.elevated() router = l3plugin.get_router(ctx_admin, device_id) except l3_exc.RouterNotFound: return else: # raise as extension doesn't support L3 anyways. raise exc.DeviceIDNotOwnedByTenant( device_id=device_id) if tenant_id != router['tenant_id']: raise exc.DeviceIDNotOwnedByTenant(device_id=device_id) @db_api.retry_if_session_inactive() def add_prefixes(self, context, subnetpool_id, body): prefixes = subnetpool_prefix_ops.get_operation_request_body(body) with db_api.CONTEXT_WRITER.using(context): subnetpool = subnetpool_obj.SubnetPool.get_object( context, id=subnetpool_id) if not subnetpool: raise exc.SubnetPoolNotFound(subnetpool_id=id) if len(prefixes) == 0: # No prefixes were included in the request, simply return return {'prefixes': subnetpool.prefixes} new_sp_prefixes = subnetpool.prefixes + prefixes sp_update_req = {'subnetpool': {'prefixes': new_sp_prefixes}} sp = self.update_subnetpool(context, subnetpool_id, sp_update_req) return {'prefixes': sp['prefixes']} @db_api.retry_if_session_inactive() def remove_prefixes(self, context, subnetpool_id, body): prefixes = subnetpool_prefix_ops.get_operation_request_body(body) with db_api.CONTEXT_WRITER.using(context): subnetpool = subnetpool_obj.SubnetPool.get_object( context, id=subnetpool_id) if not subnetpool: raise exc.SubnetPoolNotFound(subnetpool_id=id) if len(prefixes) == 0: # No prefixes were included in the request, simply return return {'prefixes': subnetpool.prefixes} all_prefix_set = netaddr.IPSet(subnetpool.prefixes) removal_prefix_set = netaddr.IPSet([x for x in prefixes]) if all_prefix_set.isdisjoint(removal_prefix_set): # The prefixes requested for removal are not in the prefix # list making this a no-op, so simply return. return {'prefixes': subnetpool.prefixes} subnets = subnet_obj.Subnet.get_objects( context, subnetpool_id=subnetpool_id) allocated_prefix_set = netaddr.IPSet([x.cidr for x in subnets]) if not allocated_prefix_set.isdisjoint(removal_prefix_set): # One or more of the prefixes requested for removal have # been allocated by a real subnet, raise an exception to # indicate this. msg = _("One or more the prefixes to be removed is in use " "by a subnet.") raise exc.IllegalSubnetPoolPrefixUpdate(msg=msg) new_prefixes = all_prefix_set.difference(removal_prefix_set) new_prefixes.compact() subnetpool.prefixes = [str(x) for x in new_prefixes.iter_cidrs()] subnetpool.update() return {'prefixes': subnetpool.prefixes} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/dns_db.py0000644000175000017500000002672300000000000021037 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import dns as dns_exc from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.objects import floatingip as fip_obj from neutron.objects import network from neutron.objects import ports as port_obj from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) class DNSActionsData(object): def __init__(self, current_dns_name=None, current_dns_domain=None, previous_dns_name=None, previous_dns_domain=None): self.current_dns_name = current_dns_name self.current_dns_domain = current_dns_domain self.previous_dns_name = previous_dns_name self.previous_dns_domain = previous_dns_domain @resource_extend.has_resource_extenders class DNSDbMixin(object): """Mixin class to add DNS methods to db_base_plugin_v2.""" _dns_driver = None @property def dns_driver(self): if self._dns_driver: return self._dns_driver if not cfg.CONF.external_dns_driver: return try: self._dns_driver = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return self._dns_driver except ImportError: LOG.exception("ImportError exception occurred while loading " "the external DNS service driver") raise dns_exc.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) @staticmethod @resource_extend.extends([l3_apidef.FLOATINGIPS]) def _extend_floatingip_dict_dns(floatingip_res, floatingip_db): floatingip_res['dns_domain'] = '' floatingip_res['dns_name'] = '' if floatingip_db.dns: floatingip_res['dns_domain'] = floatingip_db.dns['dns_domain'] floatingip_res['dns_name'] = floatingip_db.dns['dns_name'] return floatingip_res def _process_dns_floatingip_create_precommit(self, context, floatingip_data, req_data): # expects to be called within a plugin's session dns_domain = req_data.get(dns_apidef.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if not self.dns_driver: return dns_name = req_data[dns_apidef.DNSNAME] self._validate_floatingip_dns(dns_name, dns_domain) current_dns_name, current_dns_domain = ( self._get_requested_state_for_external_dns_service_create( context, floatingip_data, req_data)) dns_actions_data = None if current_dns_name and current_dns_domain: fip_obj.FloatingIPDNS( context, floatingip_id=floatingip_data['id'], dns_name=req_data[dns_apidef.DNSNAME], dns_domain=req_data[dns_apidef.DNSDOMAIN], published_dns_name=current_dns_name, published_dns_domain=current_dns_domain).create() dns_actions_data = DNSActionsData( current_dns_name=current_dns_name, current_dns_domain=current_dns_domain) floatingip_data['dns_name'] = dns_name floatingip_data['dns_domain'] = dns_domain return dns_actions_data def _process_dns_floatingip_create_postcommit(self, context, floatingip_data, dns_actions_data): if not dns_actions_data: return self._add_ips_to_external_dns_service( context, dns_actions_data.current_dns_domain, dns_actions_data.current_dns_name, [floatingip_data['floating_ip_address']]) def _process_dns_floatingip_update_precommit(self, context, floatingip_data): # expects to be called within a plugin's session if not extensions.is_extension_supported( self._core_plugin, dns_apidef.ALIAS): return if not self.dns_driver: return dns_data_db = fip_obj.FloatingIPDNS.get_object( context, floatingip_id=floatingip_data['id']) if dns_data_db and dns_data_db['dns_name']: # dns_name and dns_domain assigned for floating ip. It doesn't # matter whether they are defined for internal port return current_dns_name, current_dns_domain = ( self._get_requested_state_for_external_dns_service_update( context, floatingip_data)) if dns_data_db: if (dns_data_db['published_dns_name'] != current_dns_name or dns_data_db['published_dns_domain'] != current_dns_domain): dns_actions_data = DNSActionsData( previous_dns_name=dns_data_db['published_dns_name'], previous_dns_domain=dns_data_db['published_dns_domain']) if current_dns_name and current_dns_domain: dns_data_db['published_dns_name'] = current_dns_name dns_data_db['published_dns_domain'] = current_dns_domain dns_actions_data.current_dns_name = current_dns_name dns_actions_data.current_dns_domain = current_dns_domain else: dns_data_db.delete() return dns_actions_data else: return if current_dns_name and current_dns_domain: fip_obj.FloatingIPDNS( context, floatingip_id=floatingip_data['id'], dns_name='', dns_domain='', published_dns_name=current_dns_name, published_dns_domain=current_dns_domain).create() return DNSActionsData(current_dns_name=current_dns_name, current_dns_domain=current_dns_domain) def _process_dns_floatingip_update_postcommit(self, context, floatingip_data, dns_actions_data): if not dns_actions_data: return if dns_actions_data.previous_dns_name: self._delete_floatingip_from_external_dns_service( context, dns_actions_data.previous_dns_domain, dns_actions_data.previous_dns_name, [floatingip_data['floating_ip_address']]) if dns_actions_data.current_dns_name: self._add_ips_to_external_dns_service( context, dns_actions_data.current_dns_domain, dns_actions_data.current_dns_name, [floatingip_data['floating_ip_address']]) def _process_dns_floatingip_delete(self, context, floatingip_data): if not extensions.is_extension_supported( self._core_plugin, dns_apidef.ALIAS): return dns_data_db = fip_obj.FloatingIPDNS.get_object( context, floatingip_id=floatingip_data['id']) if dns_data_db: self._delete_floatingip_from_external_dns_service( context, dns_data_db['published_dns_domain'], dns_data_db['published_dns_name'], [floatingip_data['floating_ip_address']]) def _validate_floatingip_dns(self, dns_name, dns_domain): if dns_domain and not dns_name: msg = _("dns_domain cannot be specified without a dns_name") raise n_exc.BadRequest(resource='floatingip', msg=msg) if dns_name and not dns_domain: msg = _("dns_name cannot be specified without a dns_domain") raise n_exc.BadRequest(resource='floatingip', msg=msg) def _get_internal_port_dns_data(self, context, floatingip_data): port_dns = port_obj.PortDNS.get_object( context, port_id=floatingip_data['port_id']) if not (port_dns and port_dns['dns_name']): return None, None net_dns = network.NetworkDNSDomain.get_net_dns_from_port( context=context, port_id=floatingip_data['port_id']) if not net_dns: return port_dns['dns_name'], None return port_dns['dns_name'], net_dns['dns_domain'] def _delete_floatingip_from_external_dns_service(self, context, dns_domain, dns_name, records): ips = [str(r) for r in records] try: self.dns_driver.delete_record_set(context, dns_domain, dns_name, ips) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error deleting Floating IP data from external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. IP addresses '%(ips)s'. DNS " "service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(ips)}) def _get_requested_state_for_external_dns_service_create(self, context, floatingip_data, req_data): fip_dns_name = req_data[dns_apidef.DNSNAME] if fip_dns_name: return fip_dns_name, req_data[dns_apidef.DNSDOMAIN] if floatingip_data['port_id']: return self._get_internal_port_dns_data(context, floatingip_data) return None, None def _get_requested_state_for_external_dns_service_update(self, context, floatingip_data): if floatingip_data['port_id']: return self._get_internal_port_dns_data(context, floatingip_data) return None, None def _add_ips_to_external_dns_service(self, context, dns_domain, dns_name, records): ips = [str(r) for r in records] try: self.dns_driver.create_record_set(context, dns_domain, dns_name, ips) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error publishing floating IP data in external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. DNS service driver message " "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/dvr_mac_db.py0000644000175000017500000002121400000000000021654 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import dvr as dvr_exc from neutron_lib.objects import exceptions from neutron_lib.plugins import directory from neutron_lib.utils import net from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from sqlalchemy import or_ from neutron.common import utils from neutron.conf.db import dvr_mac_db from neutron.conf.db import l3_dvr_db from neutron.db import models_v2 from neutron.extensions import dvr as ext_dvr from neutron.objects import router from neutron.plugins.ml2 import models as ml2_models LOG = logging.getLogger(__name__) dvr_mac_db.register_db_dvr_mac_opts() l3_dvr_db.register_db_l3_dvr_opts() def get_ports_query_by_subnet_and_ip(context, subnet, ip_addresses=None): query = context.session.query(models_v2.Port) query = query.join(models_v2.IPAllocation) query = query.filter( models_v2.Port.id == models_v2.IPAllocation.port_id, models_v2.IPAllocation.subnet_id == subnet) if ip_addresses: query = query.filter( models_v2.IPAllocation.ip_address.in_(ip_addresses)) return query @registry.has_registry_receivers class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): """Mixin class to add dvr mac address to db_plugin_base_v2.""" @property def plugin(self): try: if self._plugin is not None: return self._plugin except AttributeError: pass self._plugin = directory.get_plugin() return self._plugin @staticmethod @db_api.retry_if_session_inactive() def _db_delete_mac_associated_with_agent(context, agent): host = agent['host'] plugin = directory.get_plugin() if [a for a in plugin.get_agents(context, filters={'host': [host]}) if a['id'] != agent['id']]: # there are still agents on this host, don't mess with the mac # entry until they are all deleted. return if not router.DVRMacAddress.delete_objects(context, host=host): return # notify remaining agents so they cleanup flows dvr_macs = plugin.get_dvr_mac_address_list(context) plugin.notifier.dvr_mac_address_update(context, dvr_macs) @staticmethod @registry.receives(resources.AGENT, [events.BEFORE_DELETE]) def _delete_mac_associated_with_agent(resource, event, trigger, payload=None): DVRDbMixin._db_delete_mac_associated_with_agent( payload.context, payload.latest_state) @db_api.CONTEXT_READER def _get_dvr_mac_address_by_host(self, context, host): dvr_obj = router.DVRMacAddress.get_object(context, host=host) if not dvr_obj: raise dvr_exc.DVRMacAddressNotFound(host=host) return self._make_dvr_mac_address_dict(dvr_obj) @utils.transaction_guard @db_api.retry_if_session_inactive() def _create_dvr_mac_address_retry(self, context, host, base_mac): with db_api.CONTEXT_WRITER.using(context): mac_address = net.get_random_mac(base_mac) dvr_mac_binding = router.DVRMacAddress( context, host=host, mac_address=netaddr.EUI(mac_address)) dvr_mac_binding.create() LOG.debug("Generated DVR mac for host %(host)s " "is %(mac_address)s", {'host': host, 'mac_address': mac_address}) dvr_macs = self.get_dvr_mac_address_list(context) # TODO(vivek): improve scalability of this fanout by # sending a single mac address rather than the entire set self.notifier.dvr_mac_address_update(context, dvr_macs) return self._make_dvr_mac_address_dict(dvr_mac_binding) def _create_dvr_mac_address(self, context, host): """Create DVR mac address for a given host.""" base_mac = cfg.CONF.dvr_base_mac.split(':') try: return self._create_dvr_mac_address_retry(context, host, base_mac) except exceptions.NeutronDbObjectDuplicateEntry: LOG.error("MAC generation error after %s attempts", db_api.MAX_RETRIES) raise n_exc.HostMacAddressGenerationFailure(host=host) @db_api.CONTEXT_READER def get_dvr_mac_address_list(self, context): return [ dvr_mac.to_dict() for dvr_mac in router.DVRMacAddress.get_objects(context) ] def get_dvr_mac_address_by_host(self, context, host): """Determine the MAC for the DVR port associated to host.""" if not host: return try: return self._get_dvr_mac_address_by_host(context, host) except dvr_exc.DVRMacAddressNotFound: return self._create_dvr_mac_address(context, host) def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None): return {'host': dvr_mac_entry['host'], 'mac_address': str(dvr_mac_entry['mac_address'])} @log_helpers.log_method_call @db_api.retry_if_session_inactive() def get_ports_on_host_by_subnet(self, context, host, subnet): """Returns DVR serviced ports on a given subnet in the input host This method returns ports that need to be serviced by DVR. :param context: rpc request context :param host: host id to match and extract ports of interest :param subnet: subnet id to match and extract ports of interest :returns: list -- Ports on the given subnet in the input host """ host_dvr_for_dhcp = cfg.CONF.host_dvr_for_dhcp query = context.session.query(models_v2.Port) query = query.join(ml2_models.PortBinding) query = query.join(models_v2.IPAllocation) query = query.filter( models_v2.Port.id == ml2_models.PortBinding.port_id, models_v2.Port.id == models_v2.IPAllocation.port_id, ml2_models.PortBinding.host == host, models_v2.IPAllocation.subnet_id == subnet) owner_filter = or_( models_v2.Port.device_owner.startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( utils.get_other_dvr_serviced_device_owners(host_dvr_for_dhcp))) ports_query = query.filter(owner_filter) ports = [ self.plugin._make_port_dict(port, process_extensions=False, with_fixed_ips=False) for port in ports_query.all() ] LOG.debug("Returning list of dvr serviced ports on host %(host)s" " for subnet %(subnet)s ports %(ports)s", {'host': host, 'subnet': subnet, 'ports': ports}) return ports @log_helpers.log_method_call @db_api.retry_if_session_inactive() def get_subnet_for_dvr(self, context, subnet, fixed_ips=None): if fixed_ips: subnet_data = fixed_ips[0]['subnet_id'] else: subnet_data = subnet try: subnet_info = self.plugin.get_subnet( context, subnet_data) except n_exc.SubnetNotFound: return {} else: # retrieve the gateway port on this subnet if fixed_ips: ip_address = fixed_ips[0]['ip_address'] else: ip_address = subnet_info['gateway_ip'] query = get_ports_query_by_subnet_and_ip( context, subnet, [ip_address]) internal_gateway_ports = query.all() if not internal_gateway_ports: LOG.error("Could not retrieve gateway port " "for subnet %s", subnet_info) return {} internal_port = internal_gateway_ports[0] subnet_info['gateway_mac'] = internal_port['mac_address'] return subnet_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/external_net_db.py0000644000175000017500000002605400000000000022740 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import network as net_def from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import external_net as extnet_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from sqlalchemy.sql import expression as expr from neutron._i18n import _ from neutron.db import models_v2 from neutron.extensions import rbac as rbac_ext from neutron.objects import network as net_obj from neutron.objects import router as l3_obj def _network_filter_hook(context, original_model, conditions): if conditions is not None and not hasattr(conditions, '__iter__'): conditions = (conditions, ) # Apply the external network filter only in non-admin and non-advsvc # context if db_utils.model_query_scope_is_project(context, original_model): # the table will already be joined to the rbac entries for the # shared check so we don't need to worry about ensuring that rbac_model = original_model.rbac_entries.property.mapper.class_ tenant_allowed = ( (rbac_model.action == 'access_as_external') & (rbac_model.target_tenant == context.tenant_id) | (rbac_model.target_tenant == '*')) conditions = expr.or_(tenant_allowed, *conditions) return conditions def _network_result_filter_hook(query, filters): vals = filters and filters.get(extnet_apidef.EXTERNAL, []) if not vals: return query if vals[0]: return query.filter(models_v2.Network.external.has()) return query.filter(~models_v2.Network.external.has()) @resource_extend.has_resource_extenders @registry.has_registry_receivers class External_net_db_mixin(object): """Mixin class to add external network methods to db_base_plugin_v2.""" def __new__(cls, *args, **kwargs): model_query.register_hook( models_v2.Network, "external_net", query_hook=None, filter_hook=_network_filter_hook, result_filters=_network_result_filter_hook) return super(External_net_db_mixin, cls).__new__(cls, *args, **kwargs) def _network_is_external(self, context, net_id): return net_obj.ExternalNetwork.objects_exist( context, network_id=net_id) @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_network_dict_l3(network_res, network_db): # Comparing with None for converting uuid into bool network_res[extnet_apidef.EXTERNAL] = network_db.external is not None return network_res def _process_l3_create(self, context, net_data, req_data): external = req_data.get(extnet_apidef.EXTERNAL) external_set = validators.is_attr_set(external) if not external_set: return if external: net_obj.ExternalNetwork( context, network_id=net_data['id']).create() net_rbac_args = {'project_id': net_data['tenant_id'], 'object_id': net_data['id'], 'action': 'access_as_external', 'target_tenant': '*'} net_obj.NetworkRBAC(context, **net_rbac_args).create() net_data[extnet_apidef.EXTERNAL] = external def _process_l3_update(self, context, net_data, req_data, allow_all=True): new_value = req_data.get(extnet_apidef.EXTERNAL) net_id = net_data['id'] if not validators.is_attr_set(new_value): return if net_data.get(extnet_apidef.EXTERNAL) == new_value: return if new_value: net_obj.ExternalNetwork( context, network_id=net_id).create() net_data[extnet_apidef.EXTERNAL] = True if allow_all: net_rbac_args = {'project_id': net_data['tenant_id'], 'object_id': net_id, 'action': 'access_as_external', 'target_tenant': '*'} net_obj.NetworkRBAC(context, **net_rbac_args).create() else: # must make sure we do not have any external gateway ports # (and thus, possible floating IPs) on this network before # allow it to be update to external=False if context.session.query(models_v2.Port.id).filter_by( device_owner=constants.DEVICE_OWNER_ROUTER_GW, network_id=net_data['id']).first(): raise extnet_exc.ExternalNetworkInUse(net_id=net_id) net_obj.ExternalNetwork.delete_objects( context, network_id=net_id) net_obj.NetworkRBAC.delete_objects( context, object_id=net_id, action='access_as_external') net_data[extnet_apidef.EXTERNAL] = False def _process_l3_delete(self, context, network_id): l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin: l3plugin.delete_disassociated_floatingips(context, network_id) @registry.receives(resources.RBAC_POLICY, [events.BEFORE_CREATE]) def _process_ext_policy_create(self, resource, event, trigger, payload=None): object_type = payload.metadata.get('object_type') policy = payload.request_body context = payload.context if (object_type != 'network' or policy['action'] != 'access_as_external'): return net = self.get_network(context, policy['object_id']) if not context.is_admin and net['tenant_id'] != context.tenant_id: msg = _("Only admins can manipulate policies on networks they " "do not own") raise n_exc.InvalidInput(error_message=msg) if not self._network_is_external(context, policy['object_id']): # we automatically convert the network into an external network self._process_l3_update(context, net, {extnet_apidef.EXTERNAL: True}, allow_all=False) @registry.receives(resources.RBAC_POLICY, [events.AFTER_DELETE]) def _process_ext_policy_delete(self, resource, event, trigger, payload=None): object_type = payload.metadata.get('object_type') policy = payload.latest_state context = payload.context if (object_type != 'network' or policy['action'] != 'access_as_external'): return # If the network still have rbac policies, we should not # update external attribute. if net_obj.NetworkRBAC.count(context, object_id=policy['object_id'], action='access_as_external'): return net = self.get_network(context, policy['object_id']) self._process_l3_update(context, net, {extnet_apidef.EXTERNAL: False}) @registry.receives(resources.RBAC_POLICY, (events.BEFORE_UPDATE, events.BEFORE_DELETE)) def _validate_ext_not_in_use_by_tenant(self, resource, event, trigger, payload=None): object_type = payload.metadata.get('object_type') policy = payload.latest_state context = payload.context if (object_type != 'network' or policy['action'] != 'access_as_external'): return new_project = None if event == events.BEFORE_UPDATE: new_project = payload.request_body['target_tenant'] if new_project == policy['target_tenant']: # nothing to validate if the tenant didn't change return gw_ports = context.session.query(models_v2.Port.id).filter_by( device_owner=constants.DEVICE_OWNER_ROUTER_GW, network_id=policy['object_id']) gw_ports = [gw_port[0] for gw_port in gw_ports] if policy['target_tenant'] != '*': filters = { 'gw_port_id': gw_ports, 'project_id': policy['target_tenant'] } # if there is a wildcard entry we can safely proceed without the # router lookup because they will have access either way if net_obj.NetworkRBAC.count( context, object_id=policy['object_id'], action='access_as_external', target_tenant='*'): return router_exist = l3_obj.Router.objects_exist(context, **filters) else: # deleting the wildcard is okay as long as the tenants with # attached routers have their own entries and the network is # not the default external network. if net_obj.ExternalNetwork.objects_exist( context, network_id=policy['object_id'], is_default=True): msg = _("Default external networks must be shared to " "everyone.") raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'], details=msg) projects = net_obj.NetworkRBAC.get_projects( context, object_id=policy['object_id'], action='access_as_external') projects_with_entries = [project for project in projects if project != '*'] if new_project: projects_with_entries.append(new_project) router_exist = l3_obj.Router.check_routers_not_owned_by_projects( context, gw_ports, projects_with_entries) if router_exist: msg = _("There are routers attached to this network that " "depend on this policy for access.") raise rbac_ext.RbacPolicyInUse(object_id=policy['object_id'], details=msg) @registry.receives(resources.NETWORK, [events.BEFORE_DELETE]) def _before_network_delete_handler(self, resource, event, trigger, payload=None): self._process_l3_delete(payload.context, payload.resource_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2590442 neutron-16.0.0.0b2.dev214/neutron/db/extra_dhcp_opt/0000755000175000017500000000000000000000000022225 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/extra_dhcp_opt/__init__.py0000644000175000017500000000000000000000000024324 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/extra_dhcp_opt/models.py0000644000175000017500000000370100000000000024063 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class ExtraDhcpOpt(model_base.BASEV2, model_base.HasId): """Represent a generic concept of extra options associated to a port. Each port may have none to many dhcp opts associated to it that can define specifically different or extra options to DHCP clients. These will be written to the /opts files, and each option's tag will be referenced in the /host file. """ port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False) opt_name = sa.Column(sa.String(64), nullable=False) opt_value = sa.Column(sa.String(255), nullable=False) ip_version = sa.Column(sa.Integer, server_default='4', nullable=False) __table_args__ = (sa.UniqueConstraint( 'port_id', 'opt_name', 'ip_version', name='uniq_extradhcpopts0portid0optname0ipversion'), model_base.BASEV2.__table_args__,) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load extra_dhcp_opts bindings ports = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("dhcp_opts", lazy='subquery', cascade='delete')) revises_on_change = ('ports', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/extradhcpopt_db.py0000644000175000017500000001364100000000000022753 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import port as port_def from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron.objects.port.extensions import extra_dhcp_opt as obj_extra_dhcp from neutron.objects import ports as port_obj @resource_extend.has_resource_extenders class ExtraDhcpOptMixin(object): """Mixin class to add extra options to the DHCP opts file and associate them to a port. """ def _is_valid_opt_value(self, opt_name, opt_value): # If the dhcp opt is blank-able, it shouldn't be saved to the DB in # case that the value is None if opt_name in edo_ext.VALID_BLANK_EXTRA_DHCP_OPTS: return opt_value is not None # Otherwise, it shouldn't be saved to the DB in case that the value # is None or empty return bool(opt_value) def _process_port_create_extra_dhcp_opts(self, context, port, extra_dhcp_opts): if not extra_dhcp_opts: return port with db_api.CONTEXT_WRITER.using(context): for dopt in extra_dhcp_opts: if self._is_valid_opt_value(dopt['opt_name'], dopt['opt_value']): ip_version = dopt.get('ip_version', 4) extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt( context, port_id=port['id'], opt_name=dopt['opt_name'], opt_value=dopt['opt_value'], ip_version=ip_version) extra_dhcp_obj.create() return self._extend_port_extra_dhcp_opts_dict(context, port) def _extend_port_extra_dhcp_opts_dict(self, context, port): port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding( context, port['id']) def _get_port_extra_dhcp_opts_binding(self, context, port_id): opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects( context, port_id=port_id) # TODO(mhickey): When port serilization is available then # the object list should be returned instead return [{'opt_name': r.opt_name, 'opt_value': r.opt_value, 'ip_version': r.ip_version} for r in opts] def _update_extra_dhcp_opts_on_port(self, context, id, port, updated_port=None): # It is not necessary to update in a transaction, because # its called from within one from ovs_neutron_plugin. dopts = port['port'].get(edo_ext.EXTRADHCPOPTS) if dopts: opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects( context, port_id=id) # if there are currently no dhcp_options associated to # this port, Then just insert the new ones and be done. with db_api.CONTEXT_WRITER.using(context): for upd_rec in dopts: for opt in opts: if (opt['opt_name'] == upd_rec['opt_name'] and opt['ip_version'] == upd_rec.get( 'ip_version', 4)): # to handle deleting of a opt from the port. if upd_rec['opt_value'] is None: opt.delete() else: if (self._is_valid_opt_value( opt['opt_name'], upd_rec['opt_value']) and opt['opt_value'] != upd_rec['opt_value']): opt['opt_value'] = upd_rec['opt_value'] opt.update() break else: if self._is_valid_opt_value( upd_rec['opt_name'], upd_rec['opt_value']): ip_version = upd_rec.get('ip_version', 4) extra_dhcp_obj = obj_extra_dhcp.ExtraDhcpOpt( context, port_id=id, opt_name=upd_rec['opt_name'], opt_value=upd_rec['opt_value'], ip_version=ip_version) extra_dhcp_obj.create() if updated_port: edolist = self._get_port_extra_dhcp_opts_binding(context, id) updated_port[edo_ext.EXTRADHCPOPTS] = edolist return bool(dopts) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_extra_dhcp_opt(res, port): if isinstance(port, port_obj.Port): port_dhcp_options = port.get('dhcp_options') else: port_dhcp_options = port.dhcp_opts res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name, 'opt_value': dho.opt_value, 'ip_version': dho.ip_version} for dho in port_dhcp_options] return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/extraroute_db.py0000644000175000017500000002341700000000000022452 0ustar00coreycorey00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.exceptions import extraroute as xroute_exc from neutron_lib.utils import helpers from neutron_lib.utils import net as net_utils from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.conf.db import extraroute_db from neutron.db import l3_db from neutron.objects import router as l3_obj LOG = logging.getLogger(__name__) extraroute_db.register_db_extraroute_opts() @resource_extend.has_resource_extenders class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): """Mixin class to support extra route configuration on router.""" @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_router_dict_extraroute(router_res, router_db): router_res['routes'] = (ExtraRoute_dbonly_mixin. _make_extra_route_list( router_db['route_list'] )) def update_router(self, context, id, router): r = router['router'] if 'routes' in r: with db_api.CONTEXT_WRITER.using(context): # check if route exists and have permission to access router_db = self._get_router(context, id) old_router = self._make_router_dict(router_db) routes_added, routes_removed = self._update_extra_routes( context, router_db, r['routes']) router_data = copy.deepcopy(r) router_data['routes_added'] = routes_added router_data['routes_removed'] = routes_removed registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=router_data, states=(old_router,), resource_id=id, desired_state=router_db)) return super(ExtraRoute_dbonly_mixin, self).update_router( context, id, router) def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): # Note(nati): Nexthop should be connected, # so we need to check # nexthop belongs to one of cidrs of the router ports if not netaddr.all_matching_cidrs(nexthop, cidrs): raise xroute_exc.InvalidRoutes( routes=routes, reason=_('the nexthop is not connected with router')) # Note(nati) nexthop should not be same as fixed_ips if nexthop in ips: raise xroute_exc.InvalidRoutes( routes=routes, reason=_('the nexthop is used by router')) def _validate_routes(self, context, router_id, routes): if len(routes) > cfg.CONF.max_routes: raise xroute_exc.RoutesExhausted( router_id=router_id, quota=cfg.CONF.max_routes) context = context.elevated() filters = {'device_id': [router_id]} ports = self._core_plugin.get_ports(context, filters) cidrs = [] ips = [] for port in ports: for ip in port['fixed_ips']: cidrs.append(self._core_plugin.get_subnet( context, ip['subnet_id'])['cidr']) ips.append(ip['ip_address']) for route in routes: self._validate_routes_nexthop( cidrs, ips, routes, route['nexthop']) def _update_extra_routes(self, context, router, routes): self._validate_routes(context, router['id'], routes) old_routes = self._get_extra_routes_by_router_id(context, router['id']) added, removed = helpers.diff_list_of_dict(old_routes, routes) LOG.debug('Added routes are %s', added) for route in added: l3_obj.RouterRoute( context, router_id=router['id'], destination=net_utils.AuthenticIPNetwork(route['destination']), nexthop=netaddr.IPAddress(route['nexthop'])).create() LOG.debug('Removed routes are %s', removed) for route in removed: l3_obj.RouterRoute.get_object( context, router_id=router['id'], destination=route['destination'], nexthop=route['nexthop']).delete() return added, removed @staticmethod def _make_extra_route_list(extra_routes): # NOTE(yamamoto): the extra_routes argument is either object or db row return [{'destination': str(route['destination']), 'nexthop': str(route['nexthop'])} for route in extra_routes] def _get_extra_routes_by_router_id(self, context, id): router_objs = l3_obj.RouterRoute.get_objects(context, router_id=id) return self._make_extra_route_list(router_objs) def _confirm_router_interface_not_in_use(self, context, router_id, subnet_id): super(ExtraRoute_dbonly_mixin, self)._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._core_plugin.get_subnet(context, subnet_id) subnet_cidr = netaddr.IPNetwork(subnet['cidr']) extra_routes = self._get_extra_routes_by_router_id(context, router_id) for route in extra_routes: if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]): raise xroute_exc.RouterInterfaceInUseByRoute( router_id=router_id, subnet_id=subnet_id) @staticmethod def _add_extra_routes(old, add): """Add two lists of extra routes. Exact duplicates (both destination and nexthop) in old and add are merged into one item. Same destinations with different nexthops are accepted and all of them are returned. Overlapping destinations are accepted and all of them are returned. """ routes_dict = {} # its values are sets of nexthops for r in old + add: dst = r['destination'] nexthop = r['nexthop'] if dst not in routes_dict: routes_dict[dst] = set() routes_dict[dst].add(nexthop) routes_list = [] for dst, nexthops in routes_dict.items(): for nexthop in nexthops: routes_list.append({'destination': dst, 'nexthop': nexthop}) return routes_list @staticmethod def _remove_extra_routes(old, remove): """Remove the 2nd list of extra routes from the first. Since we care about the end state if an extra route to be removed is already missing from old, that's not an error, but accepted. """ routes_dict = {} # its values are sets of nexthops for r in old: dst = r['destination'] nexthop = r['nexthop'] if dst not in routes_dict: routes_dict[dst] = set() routes_dict[dst].add(nexthop) for r in remove: dst = r['destination'] nexthop = r['nexthop'] if dst in routes_dict: routes_dict[dst].discard(nexthop) routes_list = [] for dst, nexthops in routes_dict.items(): for nexthop in nexthops: routes_list.append({'destination': dst, 'nexthop': nexthop}) return routes_list @db_api.retry_if_session_inactive() def add_extraroutes(self, context, router_id, body=None): # NOTE(bence romsics): The input validation is delayed until # update_router() validates the whole set of routes. Until then # do not trust 'routes'. routes = body['router']['routes'] with db_api.CONTEXT_WRITER.using(context): old_routes = self._get_extra_routes_by_router_id( context, router_id) router = self.update_router( context, router_id, {'router': {'routes': self._add_extra_routes(old_routes, routes)}}) return {'router': router} @db_api.retry_if_session_inactive() def remove_extraroutes(self, context, router_id, body=None): # NOTE(bence romsics): The input validation is delayed until # update_router() validates the whole set of routes. Until then # do not trust 'routes'. routes = body['router']['routes'] with db_api.CONTEXT_WRITER.using(context): old_routes = self._get_extra_routes_by_router_id( context, router_id) router = self.update_router( context, router_id, {'router': {'routes': self._remove_extra_routes(old_routes, routes)}}) return {'router': router} class ExtraRoute_db_mixin(ExtraRoute_dbonly_mixin, l3_db.L3_NAT_db_mixin): """Mixin class to support extra route configuration on router with rpc.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/flavors_db.py0000644000175000017500000002530300000000000021720 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import flavors as flav_exc from oslo_db import exception as db_exc from oslo_log import log as logging from neutron.db import servicetype_db as sdb from neutron.objects import base as base_obj from neutron.objects import flavor as obj_flavor LOG = logging.getLogger(__name__) class FlavorsDbMixin(object): """Class to support flavors and service profiles.""" def _get_flavor(self, context, flavor_id): flavor = obj_flavor.Flavor.get_object(context, id=flavor_id) if not flavor: raise flav_exc.FlavorNotFound(flavor_id=flavor_id) return flavor def _get_service_profile(self, context, sp_id): service_profile = obj_flavor.ServiceProfile.get_object( context, id=sp_id) if not service_profile: raise flav_exc.ServiceProfileNotFound(sp_id=sp_id) return service_profile @staticmethod def _make_flavor_dict(flavor_obj, fields=None): res = {'id': flavor_obj['id'], 'name': flavor_obj['name'], 'description': flavor_obj['description'], 'service_type': flavor_obj['service_type'], 'enabled': flavor_obj['enabled'], 'service_profiles': list(flavor_obj['service_profile_ids'])} return db_utils.resource_fields(res, fields) @staticmethod def _make_service_profile_dict(sp_obj, fields=None): res = {'id': sp_obj['id'], 'description': sp_obj['description'], 'driver': sp_obj['driver'], 'enabled': sp_obj['enabled'], 'metainfo': sp_obj['metainfo'], 'flavors': list(sp_obj['flavor_ids'])} return db_utils.resource_fields(res, fields) def _ensure_flavor_not_in_use(self, context, flavor_id): """Checks that flavor is not associated with service instance.""" # Future TODO(enikanorov): check that there is no binding to # instances. Shall address in future upon getting the right # flavor supported driver # NOTE(kevinbenton): sqlalchemy utils has a cool dependent # objects function we can use to quickly query all tables # that have a foreign key ref to flavors. Or we could replace # the call to this with callback events. pass def _ensure_service_profile_not_in_use(self, context, sp_id): """Ensures no current bindings to flavors exist.""" if obj_flavor.FlavorServiceProfileBinding.objects_exist( context, service_profile_id=sp_id): raise flav_exc.ServiceProfileInUse(sp_id=sp_id) def _validate_driver(self, context, driver): """Confirms a non-empty driver is a valid provider.""" service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': driver}) if not providers: raise flav_exc.ServiceProfileDriverNotFound(driver=driver) def create_flavor(self, context, flavor): fl = flavor['flavor'] obj = obj_flavor.Flavor( context, name=fl['name'], description=fl['description'], service_type=fl['service_type'], enabled=fl['enabled']) obj.create() return self._make_flavor_dict(obj) def update_flavor(self, context, flavor_id, flavor): with db_api.CONTEXT_WRITER.using(context): self._ensure_flavor_not_in_use(context, flavor_id) fl_obj = self._get_flavor(context, flavor_id) fl_obj.update_fields(flavor['flavor']) fl_obj.update() return self._make_flavor_dict(fl_obj) def get_flavor(self, context, flavor_id, fields=None): fl = self._get_flavor(context, flavor_id) return self._make_flavor_dict(fl, fields) def delete_flavor(self, context, flavor_id): # NOTE(kevinbenton): we need to fix _ensure_flavor_not_in_use, # but the fix is non-trivial since multiple services can use # flavors so for now we just capture the foreign key violation # to detect if it's in use. try: with db_api.CONTEXT_WRITER.using(context): self._ensure_flavor_not_in_use(context, flavor_id) self._get_flavor(context, flavor_id).delete() except db_exc.DBReferenceError: raise flav_exc.FlavorInUse(flavor_id=flavor_id) def get_flavors(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} flavor_objs = obj_flavor.Flavor.get_objects(context, _pager=pager, **filters) return [self._make_flavor_dict(flavor_object, fields) for flavor_object in flavor_objs] def create_flavor_service_profile(self, context, service_profile, flavor_id): sp = service_profile['service_profile'] with db_api.CONTEXT_WRITER.using(context): if obj_flavor.FlavorServiceProfileBinding.objects_exist( context, service_profile_id=sp['id'], flavor_id=flavor_id): raise flav_exc.FlavorServiceProfileBindingExists( sp_id=sp['id'], fl_id=flavor_id) obj_flavor.FlavorServiceProfileBinding( context, service_profile_id=sp['id'], flavor_id=flavor_id).create() fl_obj = self._get_flavor(context, flavor_id) return self._make_flavor_dict(fl_obj) def delete_flavor_service_profile(self, context, service_profile_id, flavor_id): if (obj_flavor.FlavorServiceProfileBinding.delete_objects( context, service_profile_id=service_profile_id, flavor_id=flavor_id) == 0): raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id=service_profile_id, fl_id=flavor_id) @staticmethod def get_flavor_service_profile(context, service_profile_id, flavor_id, fields=None): if not obj_flavor.FlavorServiceProfileBinding.objects_exist( context, service_profile_id=service_profile_id, flavor_id=flavor_id): raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id=service_profile_id, fl_id=flavor_id) res = {'service_profile_id': service_profile_id, 'flavor_id': flavor_id} return db_utils.resource_fields(res, fields) def create_service_profile(self, context, service_profile): sp = service_profile['service_profile'] if sp['driver']: self._validate_driver(context, sp['driver']) else: if not sp['metainfo']: raise flav_exc.ServiceProfileEmpty() obj = obj_flavor.ServiceProfile( context, description=sp['description'], driver=sp['driver'], enabled=sp['enabled'], metainfo=sp['metainfo']) obj.create() return self._make_service_profile_dict(obj) def update_service_profile(self, context, service_profile_id, service_profile): sp = service_profile['service_profile'] if sp.get('driver'): self._validate_driver(context, sp['driver']) with db_api.CONTEXT_WRITER.using(context): self._ensure_service_profile_not_in_use(context, service_profile_id) sp_obj = self._get_service_profile(context, service_profile_id) sp_obj.update_fields(sp) sp_obj.update() return self._make_service_profile_dict(sp_obj) def get_service_profile(self, context, sp_id, fields=None): sp_db = self._get_service_profile(context, sp_id) return self._make_service_profile_dict(sp_db, fields) def delete_service_profile(self, context, sp_id): with db_api.CONTEXT_WRITER.using(context): self._ensure_service_profile_not_in_use(context, sp_id) self._get_service_profile(context, sp_id).delete() def get_service_profiles(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} sp_objs = obj_flavor.ServiceProfile.get_objects(context, _pager=pager, **filters) return [self._make_service_profile_dict(sp_obj, fields) for sp_obj in sp_objs] def get_flavor_next_provider(self, context, flavor_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """From flavor, choose service profile and find provider for driver.""" objs = obj_flavor.FlavorServiceProfileBinding.get_objects( context, flavor_id=flavor_id) if not objs: raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id='', fl_id=flavor_id) # Get the service profile from the first binding # TODO(jwarendt) Should become a scheduling framework instead sp_obj = self._get_service_profile(context, objs[0].service_profile_id) if not sp_obj.enabled: raise flav_exc.ServiceProfileDisabled() LOG.debug("Found driver %s.", sp_obj.driver) service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': sp_obj.driver}) if not providers: raise flav_exc.ServiceProfileDriverNotFound( driver=sp_obj.driver) LOG.debug("Found providers %s.", providers) res = {'driver': sp_obj.driver, 'provider': providers[0].get('name')} return [db_utils.resource_fields(res, fields)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/ipam_backend_mixin.py0000644000175000017500000010212100000000000023372 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import itertools import netaddr from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as exc from neutron_lib.exceptions import address_scope as addr_scope_exc from neutron_lib.utils import net as net_utils from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc as orm_exc from neutron._i18n import _ from neutron.common import ipv6_utils from neutron.db import db_base_plugin_common from neutron.db import models_v2 from neutron.extensions import segment from neutron.ipam import exceptions as ipam_exceptions from neutron.ipam import utils as ipam_utils from neutron.objects import address_scope as addr_scope_obj from neutron.objects import network as network_obj from neutron.objects import subnet as subnet_obj from neutron.services.segments import exceptions as segment_exc LOG = logging.getLogger(__name__) class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): """Contains IPAM specific code which is common for both backends. """ # Tracks changes in ip allocation for port using namedtuple Changes = collections.namedtuple('Changes', 'add original remove') @staticmethod def _gateway_ip_str(subnet, cidr_net): if subnet.get('gateway_ip') is const.ATTR_NOT_SPECIFIED: if subnet.get('ip_version') == const.IP_VERSION_6: gateway_ip = netaddr.IPNetwork(cidr_net).network pd_net = netaddr.IPNetwork(const.PROVISIONAL_IPV6_PD_PREFIX) if gateway_ip == pd_net.network: return else: gateway_ip = netaddr.IPNetwork(cidr_net).network + 1 return str(gateway_ip) return subnet.get('gateway_ip') @staticmethod def pools_to_ip_range(ip_pools): ip_range_pools = [] for ip_pool in ip_pools: try: ip_range_pools.append(netaddr.IPRange(ip_pool['start'], ip_pool['end'])) except netaddr.AddrFormatError: LOG.info("Found invalid IP address in pool: " "%(start)s - %(end)s:", {'start': ip_pool['start'], 'end': ip_pool['end']}) raise exc.InvalidAllocationPool(pool=ip_pool) return ip_range_pools def delete_subnet(self, context, subnet_id): pass def validate_pools_with_subnetpool(self, subnet): """Verifies that allocation pools are set correctly Allocation pools can be set for specific subnet request only """ has_allocpool = validators.is_attr_set(subnet['allocation_pools']) is_any_subnetpool_request = not validators.is_attr_set(subnet['cidr']) if is_any_subnetpool_request and has_allocpool: reason = _("allocation_pools allowed only " "for specific subnet requests.") raise exc.BadRequest(resource='subnets', msg=reason) def _validate_ip_version_with_subnetpool(self, subnet, subnetpool): """Validates ip version for subnet_pool and requested subnet""" ip_version = subnet.get('ip_version') has_ip_version = validators.is_attr_set(ip_version) if has_ip_version and ip_version != subnetpool.ip_version: args = {'req_ver': str(subnet['ip_version']), 'pool_ver': str(subnetpool.ip_version)} reason = _("Cannot allocate IPv%(req_ver)s subnet from " "IPv%(pool_ver)s subnet pool") % args raise exc.BadRequest(resource='subnets', msg=reason) def _update_db_port(self, context, db_port, new_port, network_id, new_mac): # Remove all attributes in new_port which are not in the port DB model # and then update the port if (new_mac and new_mac != db_port.mac_address and self._is_mac_in_use(context, network_id, new_mac)): raise exc.MacAddressInUse(net_id=network_id, mac=new_mac) db_port.update(db_utils.filter_non_model_columns(new_port, models_v2.Port)) def _update_subnet_host_routes(self, context, id, s): def _combine(ht): return "{}_{}".format(ht['destination'], ht['nexthop']) old_route_list = self._get_route_by_subnet(context, id) new_route_set = set([_combine(route) for route in s['host_routes']]) old_route_set = set([_combine(route) for route in old_route_list]) for route_str in old_route_set - new_route_set: for route in old_route_list: if _combine(route) == route_str: route.delete() for route_str in new_route_set - old_route_set: route = subnet_obj.Route( context, destination=net_utils.AuthenticIPNetwork( route_str.partition("_")[0]), nexthop=netaddr.IPAddress(route_str.partition("_")[2]), subnet_id=id) route.create() # Gather host routes for result new_routes = [] for route_str in new_route_set: new_routes.append( {'destination': route_str.partition("_")[0], 'nexthop': route_str.partition("_")[2]}) del s["host_routes"] return new_routes def _update_subnet_dns_nameservers(self, context, id, s): new_dns_addr_list = s["dns_nameservers"] # NOTE(changzhi) delete all dns nameservers from db # when update subnet's DNS nameservers. And store new # nameservers with order one by one. subnet_obj.DNSNameServer.delete_objects(context, subnet_id=id) for order, server in enumerate(new_dns_addr_list): dns = subnet_obj.DNSNameServer(context, address=server, order=order, subnet_id=id) dns.create() del s["dns_nameservers"] return new_dns_addr_list @db_api.CONTEXT_WRITER def _update_subnet_allocation_pools(self, context, subnet_id, s): subnet_obj.IPAllocationPool.delete_objects(context, subnet_id=subnet_id) pools = [(netaddr.IPAddress(p.first, p.version).format(), netaddr.IPAddress(p.last, p.version).format()) for p in s['allocation_pools']] for p in pools: subnet_obj.IPAllocationPool(context, start=p[0], end=p[1], subnet_id=subnet_id).create() # Gather new pools for result result_pools = [{'start': p[0], 'end': p[1]} for p in pools] del s['allocation_pools'] return result_pools def _update_subnet_service_types(self, context, subnet_id, s): subnet_obj.SubnetServiceType.delete_objects(context, subnet_id=subnet_id) updated_types = s.pop('service_types') for service_type in updated_types: new_type = subnet_obj.SubnetServiceType(context, subnet_id=subnet_id, service_type=service_type) new_type.create() return updated_types def update_db_subnet(self, context, subnet_id, s, oldpools): changes = {} if "dns_nameservers" in s: changes['dns_nameservers'] = ( self._update_subnet_dns_nameservers(context, subnet_id, s)) if "host_routes" in s: changes['host_routes'] = self._update_subnet_host_routes( context, subnet_id, s) if "allocation_pools" in s: changes['allocation_pools'] = ( self._update_subnet_allocation_pools(context, subnet_id, s)) if "service_types" in s: changes['service_types'] = ( self._update_subnet_service_types(context, subnet_id, s)) subnet_obj = self._get_subnet_object(context, subnet_id) subnet_obj.update_fields(s) subnet_obj.update() return subnet_obj, changes def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. Does not apply to subnets with temporary IPv6 Prefix Delegation CIDRs (::/64). """ new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) # Disallow subnets with prefix length 0 as they will lead to # dnsmasq failures (see bug 1362651). # This is not a discrimination against /0 subnets. # A /0 subnet is conceptually possible but hardly a practical # scenario for neutron's use cases. for cidr in new_subnet_ipset.iter_cidrs(): if cidr.prefixlen == 0: err_msg = _("0 is not allowed as CIDR prefix length") raise exc.InvalidInput(error_message=err_msg) if cfg.CONF.allow_overlapping_ips: subnet_list = network.subnets else: subnet_list = self._get_subnets(context) for subnet in subnet_list: if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and str(subnet.cidr) != const.PROVISIONAL_IPV6_PD_PREFIX): # don't give out details of the overlapping subnet err_msg = ("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet" % {'cidr': new_subnet_cidr, 'network_id': network.id}) LOG.info("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)", {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) raise exc.InvalidInput(error_message=err_msg) def _validate_network_subnetpools(self, network, subnet_ip_version, new_subnetpool, network_scope): """Validate all subnets on the given network have been allocated from the same subnet pool as new_subnetpool if no address scope is used. If address scopes are used, validate that all subnets on the given network participate in the same address scope. """ # 'new_subnetpool' might just be the Prefix Delegation ID ipv6_pd_subnetpool = new_subnetpool == const.IPV6_PD_POOL_ID # Check address scope affinities if network_scope: if (ipv6_pd_subnetpool or new_subnetpool and new_subnetpool.address_scope_id != network_scope.id): raise addr_scope_exc.NetworkAddressScopeAffinityError() # Checks for situations where address scopes aren't involved for subnet in network.subnets: if ipv6_pd_subnetpool: # Check the prefix delegation case. Since there is no # subnetpool object, we just check against the PD ID. if (subnet.ip_version == const.IP_VERSION_6 and subnet.subnetpool_id != const.IPV6_PD_POOL_ID): raise exc.NetworkSubnetPoolAffinityError() else: if new_subnetpool: # In this case we have the new subnetpool object, so # we can check the ID and IP version. if (subnet.subnetpool_id != new_subnetpool.id and subnet.ip_version == new_subnetpool.ip_version and not network_scope): raise exc.NetworkSubnetPoolAffinityError() else: if (subnet.subnetpool_id and subnet.ip_version == subnet_ip_version): raise exc.NetworkSubnetPoolAffinityError() def validate_allocation_pools(self, ip_pools, subnet_cidr): """Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR. """ subnet = netaddr.IPNetwork(subnet_cidr) subnet_first_ip = netaddr.IPAddress(subnet.first + 1) # last address is broadcast in v4 subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4)) LOG.debug("Performing IP validity checks on allocation pools") ip_sets = [] for ip_pool in ip_pools: start_ip = netaddr.IPAddress(ip_pool.first, ip_pool.version) end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version) if (start_ip.version != subnet.version or end_ip.version != subnet.version): LOG.info("Specified IP addresses do not match " "the subnet IP version") raise exc.InvalidAllocationPool(pool=ip_pool) if start_ip < subnet_first_ip or end_ip > subnet_last_ip: LOG.info("Found pool larger than subnet " "CIDR:%(start)s - %(end)s", {'start': start_ip, 'end': end_ip}) raise exc.OutOfBoundsAllocationPool( pool=ip_pool, subnet_cidr=subnet_cidr) # Valid allocation pool # Create an IPSet for it for easily verifying overlaps ip_sets.append(netaddr.IPSet(ip_pool.cidrs())) LOG.debug("Checking for overlaps among allocation pools " "and gateway ip") ip_ranges = ip_pools[:] # Use integer cursors as an efficient way for implementing # comparison and avoiding comparing the same pair twice for l_cursor in range(len(ip_sets)): for r_cursor in range(l_cursor + 1, len(ip_sets)): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] LOG.info("Found overlapping ranges: %(l_range)s and " "%(r_range)s", {'l_range': l_range, 'r_range': r_range}) raise exc.OverlappingAllocationPools( pool_1=l_range, pool_2=r_range, subnet_cidr=subnet_cidr) def _validate_segment(self, context, network_id, segment_id, action=None, old_segment_id=None): segments = subnet_obj.Subnet.get_values( context, 'segment_id', network_id=network_id) associated_segments = set(segments) if None in associated_segments and len(associated_segments) > 1: raise segment_exc.SubnetsNotAllAssociatedWithSegments( network_id=network_id) if action == 'update' and old_segment_id != segment_id: # Check the current state of segments and subnets on the network # before allowing migration from non-routed to routed network. if len(segments) > 1: raise segment_exc.SubnetsNotAllAssociatedWithSegments( network_id=network_id) if (None not in associated_segments and segment_id not in associated_segments): raise segment_exc.SubnetSegmentAssociationChangeNotAllowed() if network_obj.NetworkSegment.count( context, network_id=network_id) > 1: raise segment_exc.NoUpdateSubnetWhenMultipleSegmentsOnNetwork( network_id=network_id) if segment_id: segment = network_obj.NetworkSegment.get_object(context, id=segment_id) if segment.network_id != network_id: raise segment_exc.NetworkIdsDontMatch( subnet_network=network_id, segment_id=segment_id) if segment.is_dynamic: raise segment_exc.SubnetCantAssociateToDynamicSegment() def _get_subnet_for_fixed_ip(self, context, fixed, subnets): # Subnets are all the subnets belonging to the same network. if not subnets: msg = _('IP allocation requires subnets for network') raise exc.InvalidInput(error_message=msg) if 'subnet_id' in fixed: def get_matching_subnet(): for subnet in subnets: if subnet['id'] == fixed['subnet_id']: return subnet subnet = get_matching_subnet() if not subnet: subnet_obj = self._get_subnet_object(context, fixed['subnet_id']) msg = (_("Failed to create port on network %(network_id)s" ", because fixed_ips included invalid subnet " "%(subnet_id)s") % {'network_id': subnet_obj.network_id, 'subnet_id': fixed['subnet_id']}) raise exc.InvalidInput(error_message=msg) # Ensure that the IP is valid on the subnet if ('ip_address' in fixed and not ipam_utils.check_subnet_ip(subnet['cidr'], fixed['ip_address'], fixed['device_owner'])): raise exc.InvalidIpForSubnet(ip_address=fixed['ip_address']) return subnet if 'ip_address' not in fixed: msg = _('IP allocation requires subnet_id or ip_address') raise exc.InvalidInput(error_message=msg) for subnet in subnets: if ipam_utils.check_subnet_ip(subnet['cidr'], fixed['ip_address'], fixed['device_owner']): return subnet raise exc.InvalidIpForNetwork(ip_address=fixed['ip_address']) def generate_pools(self, cidr, gateway_ip): return ipam_utils.generate_pools(cidr, gateway_ip) def _prepare_allocation_pools(self, allocation_pools, cidr, gateway_ip): """Returns allocation pools represented as list of IPRanges""" if not validators.is_attr_set(allocation_pools): return self.generate_pools(cidr, gateway_ip) ip_range_pools = self.pools_to_ip_range(allocation_pools) self.validate_allocation_pools(ip_range_pools, cidr) if gateway_ip: self.validate_gw_out_of_pools(gateway_ip, ip_range_pools) return ip_range_pools def validate_gw_out_of_pools(self, gateway_ip, pools): for pool_range in pools: if netaddr.IPAddress(gateway_ip) in pool_range: raise exc.GatewayConflictWithAllocationPools( pool=pool_range, ip_address=gateway_ip) def _is_ip_required_by_subnet(self, context, subnet_id, device_owner): # For ports that are not router ports, retain any automatic # (non-optional, e.g. IPv6 SLAAC) addresses. # NOTE: Need to check the SNAT ports for DVR routers here since # they consume an IP. if device_owner in const.ROUTER_INTERFACE_OWNERS_SNAT: return True subnet_obj = self._get_subnet_object(context, subnet_id) return not (ipv6_utils.is_auto_address_subnet(subnet_obj) and not ipv6_utils.is_ipv6_pd_enabled(subnet_obj)) def _get_changed_ips_for_port(self, context, original_ips, new_ips, device_owner): """Calculate changes in IPs for the port.""" # Collect auto addressed subnet ids that has to be removed on update delete_subnet_ids = set(ip['subnet_id'] for ip in new_ips if ip.get('delete_subnet')) ips = [ip for ip in new_ips if ip.get('subnet_id') not in delete_subnet_ids] add_ips, prev_ips, remove_candidates = [], [], [] # Consider fixed_ips that specify a specific address first to see if # they already existed in original_ips or are completely new. orig_by_ip = {ip['ip_address']: ip for ip in original_ips} for ip in ips: if 'ip_address' not in ip: continue original = orig_by_ip.pop(ip['ip_address'], None) if original: prev_ips.append(original) else: add_ips.append(ip) # Consider fixed_ips that don't specify ip_address. Try to match them # up with originals to see if they can be reused. Create a new map of # the remaining, unmatched originals for this step. orig_by_subnet = collections.defaultdict(list) for ip in orig_by_ip.values(): orig_by_subnet[ip['subnet_id']].append(ip) for ip in ips: if 'ip_address' in ip: continue orig = orig_by_subnet.get(ip['subnet_id']) if not orig: add_ips.append(ip) continue # Try to match this new request up with an existing IP orig_ip = orig.pop() if ipv6_utils.is_eui64_address(orig_ip['ip_address']): # In case of EUI64 address, the prefix may have changed so # we want to make sure IPAM gets a chance to re-allocate # it. This is safe in general because EUI-64 addresses # always come out the same given the prefix doesn't change. add_ips.append(ip) remove_candidates.append(orig_ip) else: # Reuse the existing address on this subnet. prev_ips.append(orig_ip) # Iterate through any unclaimed original ips (orig_by_subnet) *and* the # remove_candidates with this compound chain. maybe_remove = itertools.chain( itertools.chain.from_iterable(orig_by_subnet.values()), remove_candidates) # Mark ip for removing if it is not found in new_ips # and subnet requires ip to be set manually. # For auto addressed subnet leave ip unchanged # unless it is explicitly marked for delete. remove_ips = [] for ip in maybe_remove: subnet_id = ip['subnet_id'] ip_required = self._is_ip_required_by_subnet(context, subnet_id, device_owner) if ip_required or subnet_id in delete_subnet_ids: remove_ips.append(ip) else: prev_ips.append(ip) return self.Changes(add=add_ips, original=prev_ips, remove=remove_ips) def delete_port(self, context, port_id): query = (context.session.query(models_v2.Port). enable_eagerloads(False).filter_by(id=port_id)) # Use of the ORM mapper is needed for ensuring appropriate resource # tracking; otherwise SQL Alchemy events won't be triggered. # For more info check 'caveats' in doc/source/devref/quota.rst try: context.session.delete(query.first()) except orm_exc.UnmappedInstanceError: LOG.debug("Port %s was not found and therefore no delete " "operation was performed", port_id) def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): network_scope = addr_scope_obj.AddressScope.get_network_address_scope( context, network.id, subnet_args['ip_version']) # 'subnetpool' is not necessarily an object subnetpool = subnet_args.get('subnetpool_id') if subnetpool and subnetpool != const.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, subnetpool) self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['ip_version'], subnetpool, network_scope) service_types = subnet_args.pop('service_types', []) segment_id = subnet_args.get('segment_id') if segment_id: # TODO(slaweq): integrate check if segment exists in # self._validate_segment() method segment = network_obj.NetworkSegment.get_object(context, id=segment_id) if not segment: raise segment_exc.SegmentNotFound(segment_id=segment_id) subnet = subnet_obj.Subnet(context, **subnet_args) subnet.create() # TODO(slaweq): when check is segment exists will be integrated in # self._validate_segment() method, it should be moved to be done before # subnet object is created self._validate_segment(context, network['id'], segment_id) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = subnet_obj.DNSNameServer(context, address=server, order=order, subnet_id=subnet.id) dns.create() if validators.is_attr_set(host_routes): for rt in host_routes: route = subnet_obj.Route( context, subnet_id=subnet.id, destination=net_utils.AuthenticIPNetwork( rt['destination']), nexthop=netaddr.IPAddress(rt['nexthop'])) route.create() if validators.is_attr_set(service_types): for service_type in service_types: service_type_obj = subnet_obj.SubnetServiceType( context, subnet_id=subnet.id, service_type=service_type) service_type_obj.create() self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet_obj.Subnet.get_object(context, id=subnet.id) def _classify_subnets(self, context, subnets): """Split into v4, v6 stateless and v6 stateful subnets""" v4, v6_stateful, v6_stateless = [], [], [] for subnet in subnets: if subnet['ip_version'] == 4: v4.append(subnet) elif not ipv6_utils.is_auto_address_subnet(subnet): v6_stateful.append(subnet) else: v6_stateless.append(subnet) return v4, v6_stateful, v6_stateless def _update_ips_for_pd_subnet(self, context, subnets, fixed_ips, mac_address=None): fixed_ip_list = [] subnet_set = {fixed['subnet_id'] for fixed in fixed_ips if 'subnet_id' in fixed} pd_subnets = [s for s in subnets if (s['id'] in subnet_set and ipv6_utils.is_ipv6_pd_enabled(s))] for subnet in pd_subnets: # Already checked subnet validity in _get_subnet_for_fixed_ip if mac_address: fixed_ip_list.append({'subnet_id': subnet['id'], 'subnet_cidr': subnet['cidr'], 'eui64_address': True, 'mac': mac_address}) else: fixed_ip_list.append({'subnet_id': subnet['id']}) return fixed_ip_list def _ipam_get_subnets(self, context, network_id, host, service_type=None, fixed_configured=False, fixed_ips=None): """Return eligible subnets If no eligible subnets are found, determine why and potentially raise an appropriate error. """ subnets = subnet_obj.Subnet.find_candidate_subnets( context, network_id, host, service_type, fixed_configured, fixed_ips) if subnets: subnet_dicts = [self._make_subnet_dict(subnet, context=context) for subnet in subnets] # Give priority to subnets with service_types return sorted( subnet_dicts, key=lambda subnet: not subnet.get('service_types')) if subnet_obj.Subnet.network_has_no_subnet( context, network_id, host, service_type): return [] raise ipam_exceptions.IpAddressGenerationFailureNoMatchingSubnet( network_id=network_id, service_type=service_type) def _make_subnet_args(self, detail, subnet, subnetpool_id): args = super(IpamBackendMixin, self)._make_subnet_args( detail, subnet, subnetpool_id) if validators.is_attr_set(subnet.get(segment.SEGMENT_ID)): args['segment_id'] = subnet[segment.SEGMENT_ID] if validators.is_attr_set(subnet.get('service_types')): args['service_types'] = subnet['service_types'] return args def update_port(self, context, old_port_db, old_port, new_port): """Update the port IPs Updates the port's IPs based on any new fixed_ips passed in or if deferred IP allocation is in effect because allocation requires host binding information that wasn't provided until port update. :param old_port_db: The port database record :param old_port: A port dict created by calling _make_port_dict. This must be called before calling this method in order to load data from extensions, specifically host binding. :param new_port: The new port data passed through the API. """ old_host = old_port.get(portbindings.HOST_ID) new_host = new_port.get(portbindings.HOST_ID) host = new_host if validators.is_attr_set(new_host) else old_host changes = self.update_port_with_ips(context, host, old_port_db, new_port, new_port.get('mac_address')) fixed_ips_requested = validators.is_attr_set(new_port.get('fixed_ips')) old_ips = old_port.get('fixed_ips') deferred_ip_allocation = ( old_port.get('ip_allocation') == ipalloc_apidef.IP_ALLOCATION_DEFERRED and host and not old_host and not old_ips and not fixed_ips_requested) if not deferred_ip_allocation: # Check that any existing IPs are valid on the new segment new_host_requested = host and host != old_host if old_ips and new_host_requested and not fixed_ips_requested: valid_subnets = self._ipam_get_subnets( context, old_port['network_id'], host, service_type=old_port.get('device_owner')) valid_subnet_ids = {s['id'] for s in valid_subnets} for fixed_ip in old_ips: if fixed_ip['subnet_id'] not in valid_subnet_ids: raise segment_exc.HostNotCompatibleWithFixedIps( host=host, port_id=old_port['id']) return changes # Allocate as if this were the port create. port_copy = copy.deepcopy(old_port) port_copy['fixed_ips'] = const.ATTR_NOT_SPECIFIED port_copy.update(new_port) context.session.expire(old_port_db, ['fixed_ips']) ips = self.allocate_ips_for_port_and_store( context, {'port': port_copy}, port_copy['id']) getattr(old_port_db, 'fixed_ips') # refresh relationship before return return self.Changes(add=ips, original=[], remove=[]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/ipam_pluggable_backend.py0000644000175000017500000007251600000000000024226 0ustar00coreycorey00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.objects import utils as obj_utils from neutron_lib.plugins import constants as plugin_consts from neutron_lib.plugins import directory from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.objects import ports as port_obj from neutron.objects import subnet as obj_subnet LOG = logging.getLogger(__name__) def get_ip_update_not_allowed_device_owner_list(): l3plugin = directory.get_plugin(plugin_consts.L3) # The following list is for IPAM to prevent direct update of port # IP address. Currently it only has some L3 related types. # L2 plugin can add the same list here, but for now it is not required. return getattr(l3plugin, 'IP_UPDATE_NOT_ALLOWED_LIST', []) def is_neutron_built_in_router(context, router_id): l3plugin = directory.get_plugin(plugin_consts.L3) return bool(l3plugin and l3plugin.router_supports_scheduling(context, router_id)) class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): def _get_failed_ips(self, all_ips, success_ips): ips_list = (ip_dict['ip_address'] for ip_dict in success_ips) return (ip_dict['ip_address'] for ip_dict in all_ips if ip_dict['ip_address'] not in ips_list) def _safe_rollback(self, func, *args, **kwargs): """Calls rollback actions and catch all exceptions. All exceptions are catched and logged here to prevent rewriting original exception that triggered rollback action. """ try: func(*args, **kwargs) except Exception as e: LOG.warning("Revert failed with: %s", e) def _ipam_deallocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): """Deallocate set of ips over IPAM. If any single ip deallocation fails, tries to allocate deallocated ip addresses with fixed ip request """ deallocated = [] try: for ip in ips: try: ipam_subnet = ipam_driver.get_subnet(ip['subnet_id']) ipam_subnet.deallocate(ip['ip_address']) deallocated.append(ip) except n_exc.SubnetNotFound: LOG.debug("Subnet was not found on ip deallocation: %s", ip) except Exception: with excutils.save_and_reraise_exception(): if not ipam_driver.needs_rollback(): return LOG.debug("An exception occurred during IP deallocation.") if revert_on_fail and deallocated: LOG.debug("Reverting deallocation") # In case of deadlock allocate fails with db error # and rewrites original exception preventing db_retry # wrappers from restarting entire api request. self._safe_rollback(self._ipam_allocate_ips, context, ipam_driver, port, deallocated, revert_on_fail=False) elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, deallocated)) LOG.error("IP deallocation failed on " "external system for %s", addresses) return deallocated def _ipam_allocate_ips(self, context, ipam_driver, port, ips, revert_on_fail=True): """Allocate set of ips over IPAM. If any single ip allocation fails, tries to deallocate all allocated ip addresses. """ allocated = [] factory = ipam_driver.get_address_request_factory() # we need to start with entries that asked for a specific IP in case # those IPs happen to be next in the line for allocation for ones that # didn't ask for a specific IP ips.sort(key=lambda x: 'ip_address' not in x) try: for ip in ips: # By default IP info is dict, used to allocate single ip # from single subnet. # IP info can be list, used to allocate single ip from # multiple subnets ip_list = [ip] if isinstance(ip, dict) else ip subnets = [ip_dict['subnet_id'] for ip_dict in ip_list] try: ip_request = factory.get_request(context, port, ip_list[0]) ipam_allocator = ipam_driver.get_allocator(subnets) ip_address, subnet_id = ipam_allocator.allocate(ip_request) except ipam_exc.IpAddressGenerationFailureAllSubnets: raise n_exc.IpAddressGenerationFailure( net_id=port['network_id']) allocated.append({'ip_address': ip_address, 'subnet_id': subnet_id}) except Exception: with excutils.save_and_reraise_exception(): if not ipam_driver.needs_rollback(): return LOG.debug("An exception occurred during IP allocation.") if revert_on_fail and allocated: LOG.debug("Reverting allocation") # In case of deadlock deallocation fails with db error # and rewrites original exception preventing db_retry # wrappers from restarting entire api request. self._safe_rollback(self._ipam_deallocate_ips, context, ipam_driver, port, allocated, revert_on_fail=False) elif not revert_on_fail and ips: addresses = ', '.join(self._get_failed_ips(ips, allocated)) LOG.error("IP allocation failed on " "external system for %s", addresses) return allocated def _ipam_update_allocation_pools(self, context, ipam_driver, subnet): factory = ipam_driver.get_subnet_request_factory() subnet_request = factory.get_request(context, subnet, None) ipam_driver.update_subnet(subnet_request) def delete_subnet(self, context, subnet_id): ipam_driver = driver.Pool.get_instance(None, context) ipam_driver.remove_subnet(subnet_id) def get_subnet(self, context, subnet_id): ipam_driver = driver.Pool.get_instance(None, context) return ipam_driver.get_subnet(subnet_id) def allocate_ips_for_port_and_store(self, context, port, port_id): # Make a copy of port dict to prevent changing # incoming dict by adding 'id' to it. # Deepcopy doesn't work correctly in this case, because copy of # ATTR_NOT_SPECIFIED object happens. Address of copied object doesn't # match original object, so 'is' check fails # TODO(njohnston): Different behavior is required depending on whether # a Port object is used or not; once conversion to OVO is complete only # the first 'if' will be needed if isinstance(port, port_obj.Port): port_copy = {"port": self._make_port_dict( port, process_extensions=False)} elif 'port' in port: port_copy = {'port': port['port'].copy()} else: port_copy = {'port': port.copy()} port_copy['port']['id'] = port_id network_id = port_copy['port']['network_id'] ips = [] try: ips = self._allocate_ips_for_port(context, port_copy) for ip in ips: ip_address = ip['ip_address'] subnet_id = ip['subnet_id'] IpamPluggableBackend._store_ip_allocation( context, ip_address, network_id, subnet_id, port_id) return ips except Exception: with excutils.save_and_reraise_exception(): if ips: ipam_driver = driver.Pool.get_instance(None, context) if not ipam_driver.needs_rollback(): return LOG.debug("An exception occurred during port creation. " "Reverting IP allocation") self._safe_rollback(self._ipam_deallocate_ips, context, ipam_driver, port_copy['port'], ips, revert_on_fail=False) def _allocate_ips_for_port(self, context, port): """Allocate IP addresses for the port. IPAM version. If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP addresses for the port. If port['fixed_ips'] contains an IP address or a subnet_id then allocate an IP address accordingly. """ p = port['port'] fixed_configured = p['fixed_ips'] is not constants.ATTR_NOT_SPECIFIED fixed_ips = p['fixed_ips'] if fixed_configured else [] subnets = self._ipam_get_subnets(context, network_id=p['network_id'], host=p.get(portbindings.HOST_ID), service_type=p.get('device_owner'), fixed_configured=fixed_configured, fixed_ips=fixed_ips) v4, v6_stateful, v6_stateless = self._classify_subnets( context, subnets) if fixed_configured: ips = self._test_fixed_ips_for_port(context, p["network_id"], p['fixed_ips'], p['device_owner'], subnets) else: ips = [] version_subnets = [v4, v6_stateful] for subnets in version_subnets: if subnets: ips.append([{'subnet_id': s['id']} for s in subnets]) ips.extend(self._get_auto_address_ips(v6_stateless, p)) ipam_driver = driver.Pool.get_instance(None, context) return self._ipam_allocate_ips(context, ipam_driver, p, ips) def _get_auto_address_ips(self, v6_stateless_subnets, port, exclude_subnet_ids=None): exclude_subnet_ids = exclude_subnet_ids or [] ips = [] is_router_port = ( port['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT) if not is_router_port: for subnet in v6_stateless_subnets: if subnet['id'] not in exclude_subnet_ids: # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets # are implicitly included. ips.append({'subnet_id': subnet['id'], 'subnet_cidr': subnet['cidr'], 'eui64_address': True, 'mac': port['mac_address']}) return ips def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, device_owner, subnets): """Test fixed IPs for port. Check that configured subnets are valid prior to allocating any IPs. Include the subnet_id in the result if only an IP address is configured. :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork, InvalidIpForSubnet """ fixed_ip_list = [] for fixed in fixed_ips: fixed['device_owner'] = device_owner subnet = self._get_subnet_for_fixed_ip(context, fixed, subnets) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if ('ip_address' in fixed and subnet['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX): if (is_auto_addr_subnet and device_owner not in constants.ROUTER_INTERFACE_OWNERS): raise ipam_exc.AllocationOnAutoAddressSubnet( ip=fixed['ip_address'], subnet_id=subnet['id']) fixed_ip_list.append({'subnet_id': subnet['id'], 'ip_address': fixed['ip_address']}) else: # A scan for auto-address subnets on the network is done # separately so that all such subnets (not just those # listed explicitly here by subnet ID) are associated # with the port. if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or not is_auto_addr_subnet): fixed_ip_list.append({'subnet_id': subnet['id']}) return fixed_ip_list def _check_ip_changed_by_version(self, context, ip_list, version): for ip in ip_list: ip_address = ip.get('ip_address') subnet_id = ip.get('subnet_id') if ip_address: ip_addr = netaddr.IPAddress(ip_address) if ip_addr.version == version: return True elif subnet_id: subnet = obj_subnet.Subnet.get_object(context, id=subnet_id) if subnet and subnet.ip_version == version: return True return False def _update_ips_for_port(self, context, port, host, original_ips, new_ips, mac): """Add or remove IPs from the port. IPAM version""" added = [] removed = [] changes = self._get_changed_ips_for_port( context, original_ips, new_ips, port['device_owner']) not_allowed_list = get_ip_update_not_allowed_device_owner_list() if (port['device_owner'] in not_allowed_list and is_neutron_built_in_router(context, port['device_id'])): ip_v4_changed = self._check_ip_changed_by_version( context, changes.remove + changes.add, constants.IP_VERSION_4) if ip_v4_changed: raise ipam_exc.IPAddressChangeNotAllowed(port_id=port['id']) try: subnets = self._ipam_get_subnets( context, network_id=port['network_id'], host=host, service_type=port.get('device_owner'), fixed_configured=True, fixed_ips=changes.add + changes.original) except ipam_exc.DeferIpam: subnets = [] # Check if the IP's to add are OK to_add = self._test_fixed_ips_for_port( context, port['network_id'], changes.add, port['device_owner'], subnets) if port['device_owner'] not in constants.ROUTER_INTERFACE_OWNERS: to_add += self._update_ips_for_pd_subnet( context, subnets, changes.add, mac) ipam_driver = driver.Pool.get_instance(None, context) if changes.remove: removed = self._ipam_deallocate_ips(context, ipam_driver, port, changes.remove) v6_stateless = self._classify_subnets( context, subnets)[2] handled_subnet_ids = [ip['subnet_id'] for ip in to_add + changes.original + changes.remove] to_add.extend(self._get_auto_address_ips( v6_stateless, port, handled_subnet_ids)) if to_add: added = self._ipam_allocate_ips(context, ipam_driver, port, to_add) return self.Changes(add=added, original=changes.original, remove=removed) @db_api.CONTEXT_WRITER def save_allocation_pools(self, context, subnet, allocation_pools): for pool in allocation_pools: first_ip = str(netaddr.IPAddress(pool.first, pool.version)) last_ip = str(netaddr.IPAddress(pool.last, pool.version)) obj_subnet.IPAllocationPool( context, subnet_id=subnet['id'], start=first_ip, end=last_ip).create() def update_port_with_ips(self, context, host, db_port, new_port, new_mac): changes = self.Changes(add=[], original=[], remove=[]) auto_assign_subnets = [] if new_mac: original = self._make_port_dict(db_port, process_extensions=False) if original.get('mac_address') != new_mac: original_ips = original.get('fixed_ips', []) # NOTE(hjensas): Only set the default for 'fixed_ips' in # new_port if the original port or new_port actually have IPs. # Setting the default to [] breaks deferred IP allocation. # See Bug: https://bugs.launchpad.net/neutron/+bug/1811905 if original_ips or new_port.get('fixed_ips'): new_ips = new_port.setdefault('fixed_ips', original_ips) new_ips_subnets = [new_ip['subnet_id'] for new_ip in new_ips] for orig_ip in original_ips: if ipv6_utils.is_eui64_address(orig_ip.get('ip_address')): subnet_to_delete = {} subnet_to_delete['subnet_id'] = orig_ip['subnet_id'] subnet_to_delete['delete_subnet'] = True auto_assign_subnets.append(subnet_to_delete) try: i = new_ips_subnets.index(orig_ip['subnet_id']) new_ips[i] = subnet_to_delete except ValueError: new_ips.append(subnet_to_delete) if 'fixed_ips' in new_port: original = self._make_port_dict(db_port, process_extensions=False) changes = self._update_ips_for_port(context, db_port, host, original["fixed_ips"], new_port['fixed_ips'], new_mac) try: # Expire the fixed_ips of db_port in current transaction, because # it will be changed in the following operation and the latest # data is expected. context.session.expire(db_port, ['fixed_ips']) # Check if the IPs need to be updated network_id = db_port['network_id'] for ip in changes.remove: self._delete_ip_allocation(context, network_id, ip['subnet_id'], ip['ip_address']) for ip in changes.add: self._store_ip_allocation( context, ip['ip_address'], network_id, ip['subnet_id'], db_port.id) self._update_db_port(context, db_port, new_port, network_id, new_mac) if auto_assign_subnets: port_copy = copy.deepcopy(original) port_copy.update(new_port) port_copy['fixed_ips'] = auto_assign_subnets self.allocate_ips_for_port_and_store( context, {'port': port_copy}, port_copy['id']) getattr(db_port, 'fixed_ips') # refresh relationship before return except Exception: with excutils.save_and_reraise_exception(): if 'fixed_ips' in new_port: ipam_driver = driver.Pool.get_instance(None, context) if not ipam_driver.needs_rollback(): return LOG.debug("An exception occurred during port update.") if changes.add: LOG.debug("Reverting IP allocation.") self._safe_rollback(self._ipam_deallocate_ips, context, ipam_driver, db_port, changes.add, revert_on_fail=False) if changes.remove: LOG.debug("Reverting IP deallocation.") self._safe_rollback(self._ipam_allocate_ips, context, ipam_driver, db_port, changes.remove, revert_on_fail=False) return changes def delete_port(self, context, id): # Get fixed_ips list before port deletion port = self._get_port(context, id) ipam_driver = driver.Pool.get_instance(None, context) super(IpamPluggableBackend, self).delete_port(context, id) # Deallocating ips via IPAM after port is deleted locally. # So no need to do rollback actions on remote server # in case of fail to delete port locally self._ipam_deallocate_ips(context, ipam_driver, port, port['fixed_ips']) def update_db_subnet(self, context, id, s, old_pools): subnet = obj_subnet.Subnet.get_object(context, id=id) old_segment_id = subnet.segment_id if subnet else None if 'segment_id' in s: self._validate_segment( context, s['network_id'], s['segment_id'], action='update', old_segment_id=old_segment_id) # 'allocation_pools' is removed from 's' in # _update_subnet_allocation_pools (ipam_backend_mixin), # so create unchanged copy for ipam driver subnet_copy = copy.deepcopy(s) subnet, changes = super(IpamPluggableBackend, self).update_db_subnet( context, id, s, old_pools) ipam_driver = driver.Pool.get_instance(None, context) # Set old allocation pools if no new pools are provided by user. # Passing old pools allows to call ipam driver on each subnet update # even if allocation pools are not changed. So custom ipam drivers # are able to track other fields changes on subnet update. if 'allocation_pools' not in subnet_copy: subnet_copy['allocation_pools'] = old_pools self._ipam_update_allocation_pools(context, ipam_driver, subnet_copy) return subnet, changes def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet): """For an auto-address subnet, add addrs for ports on the net.""" # TODO(ataraday): switched for writer when flush_on_subtransaction # will be available for neutron with context.session.begin(subtransactions=True): network_id = subnet['network_id'] ports = port_obj.Port.get_objects( context, network_id=network_id, device_owner=obj_utils.NotIn( constants.ROUTER_INTERFACE_OWNERS_SNAT)) updated_ports = [] ipam_driver = driver.Pool.get_instance(None, context) factory = ipam_driver.get_address_request_factory() for port in ports: # Find candidate subnets based on host_id and existing # fixed_ips. This will filter subnets on other segments. Only # allocate if this subnet is a valid candidate. p = self._make_port_dict(port) fixed_configured = (p['fixed_ips'] is not constants.ATTR_NOT_SPECIFIED) subnet_candidates = obj_subnet.Subnet.find_candidate_subnets( context, network_id, p.get(portbindings.HOST_ID), p.get('device_owner'), fixed_configured, p.get('fixed_ips')) if subnet['id'] not in [s['id'] for s in subnet_candidates]: continue ip = {'subnet_id': subnet['id'], 'subnet_cidr': subnet['cidr'], 'eui64_address': True, 'mac': port.mac_address} ip_request = factory.get_request(context, port, ip) try: ip_address = ipam_subnet.allocate(ip_request) allocated = port_obj.IPAllocation( context, network_id=network_id, port_id=port.id, ip_address=ip_address, subnet_id=subnet['id']) # Do the insertion of each IP allocation entry within # the context of a nested transaction, so that the entry # is rolled back independently of other entries whenever # the corresponding port has been deleted; since OVO # already opens a nested transaction, we don't need to do # it explicitly here. allocated.create() updated_ports.append(port.id) except db_exc.DBReferenceError: LOG.debug("Port %s was deleted while updating it with an " "IPv6 auto-address. Ignoring.", port.id) LOG.debug("Reverting IP allocation for %s", ip_address) # Do not fail if reverting allocation was unsuccessful try: ipam_subnet.deallocate(ip_address) except Exception: LOG.debug("Reverting IP allocation failed for %s", ip_address) except ipam_exc.IpAddressAlreadyAllocated: LOG.debug("Port %s got IPv6 auto-address in a concurrent " "create or update port request. Ignoring.", port.id) return updated_ports def allocate_subnet(self, context, network, subnet, subnetpool_id): subnetpool = None if subnetpool_id and not subnetpool_id == constants.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, id=subnetpool_id) self._validate_ip_version_with_subnetpool(subnet, subnetpool) # gateway_ip and allocation pools should be validated or generated # only for specific request if subnet['cidr'] is not constants.ATTR_NOT_SPECIFIED: subnet['gateway_ip'] = self._gateway_ip_str(subnet, subnet['cidr']) subnet['allocation_pools'] = self._prepare_allocation_pools( subnet['allocation_pools'], subnet['cidr'], subnet['gateway_ip']) ipam_driver = driver.Pool.get_instance(subnetpool, context) subnet_factory = ipam_driver.get_subnet_request_factory() subnet_request = subnet_factory.get_request(context, subnet, subnetpool) ipam_subnet = ipam_driver.allocate_subnet(subnet_request) # get updated details with actually allocated subnet subnet_request = ipam_subnet.get_details() try: subnet = self._save_subnet(context, network, self._make_subnet_args( subnet_request, subnet, subnetpool_id), subnet['dns_nameservers'], subnet['host_routes'], subnet_request) obj_subnet.NetworkSubnetLock.lock_subnet(context, network.id, subnet.id) except Exception: # Note(pbondar): Third-party ipam servers can't rely # on transaction rollback, so explicit rollback call needed. # IPAM part rolled back in exception handling # and subnet part is rolled back by transaction rollback. with excutils.save_and_reraise_exception(): if not ipam_driver.needs_rollback(): return LOG.debug("An exception occurred during subnet creation. " "Reverting subnet allocation.") self._safe_rollback(self.delete_subnet, context, subnet_request.subnet_id) return subnet, ipam_subnet ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_agentschedulers_db.py0000644000175000017500000005673700000000000024041 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.exceptions import agent as agent_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging from neutron.agent.common import utils as agent_utils from neutron.conf.db import l3_agentschedulers_db from neutron.db import agentschedulers_db from neutron.db.models import l3agent as rb_model from neutron.extensions import l3agentscheduler from neutron.extensions import router_availability_zone as router_az from neutron.objects import agent as ag_obj from neutron.objects import base as base_obj from neutron.objects import l3agent as rb_obj from neutron.objects import router as l3_objs LOG = logging.getLogger(__name__) l3_agentschedulers_db.register_db_l3agentschedulers_opts() class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agentschedulers_db.AgentSchedulerDbMixin): """Mixin class to add l3 agent scheduler extension to plugins using the l3 agent for routing. """ router_scheduler = None def add_periodic_l3_agent_status_check(self): if not cfg.CONF.allow_automatic_l3agent_failover: LOG.info("Skipping period L3 agent status check because " "automatic router rescheduling is disabled.") return self.add_agent_status_check_worker( self.reschedule_routers_from_down_agents) def reschedule_routers_from_down_agents(self): """Reschedule routers from down l3 agents if admin state is up.""" self.reschedule_resources_from_down_agents( agent_type='L3', get_down_bindings=self.get_down_router_bindings, agent_id_attr='l3_agent_id', resource_id_attr='router_id', resource_name='router', reschedule_resource=self.reschedule_router, rescheduling_failed=l3agentscheduler.RouterReschedulingFailed) def get_down_router_bindings(self, context, agent_dead_limit): cutoff = self.get_cutoff_time(agent_dead_limit) return rb_obj.RouterL3AgentBinding.get_down_router_bindings( context, cutoff) def _get_agent_mode(self, agent_db): agent_conf = self.get_configuration_dict(agent_db) return agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) def validate_agent_router_combination(self, context, agent, router): """Validate if the router can be correctly assigned to the agent. :raises: RouterL3AgentMismatch if attempting to assign DVR router to legacy agent. :raises: InvalidL3Agent if attempting to assign router to an unsuitable agent (disabled, type != L3, incompatible configuration) :raises: DVRL3CannotAssignToDvrAgent if attempting to assign a router to an agent in 'dvr' mode. """ if agent['agent_type'] != constants.AGENT_TYPE_L3: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) agent_mode = self._get_agent_mode(agent) if agent_mode in [constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_NO_EXTERNAL]: raise l3agentscheduler.DVRL3CannotAssignToDvrAgent() if (agent_mode == constants.L3_AGENT_MODE_LEGACY and router.get('distributed')): raise l3agentscheduler.RouterL3AgentMismatch( router_id=router['id'], agent_id=agent['id']) is_suitable_agent = ( agentschedulers_db.services_available(agent['admin_state_up']) and self.get_l3_agent_candidates(context, router, [agent], ignore_admin_state=True)) if not is_suitable_agent: raise l3agentscheduler.InvalidL3Agent(id=agent['id']) def check_agent_router_scheduling_needed(self, context, agent, router): """Check if the router scheduling is needed. :raises: RouterHostedByL3Agent if router is already assigned to a different agent. :returns: True if scheduling is needed, otherwise False """ router_id = router['id'] agent_id = agent['id'] bindings = rb_obj.RouterL3AgentBinding.get_objects(context, router_id=router_id) if not bindings: return True for binding in bindings: if binding.l3_agent_id == agent_id: # router already bound to the agent we need return False if router.get('ha'): return True # legacy router case: router is already bound to some agent raise l3agentscheduler.RouterHostedByL3Agent( router_id=router_id, agent_id=bindings[0].l3_agent_id) def create_router_to_agent_binding(self, context, agent, router): """Create router to agent binding.""" router_id = router['id'] agent_id = agent['id'] if self.router_scheduler: plugin = directory.get_plugin(plugin_constants.L3) try: if router.get('ha'): self.router_scheduler.create_ha_port_and_bind( plugin, context, router['id'], router['tenant_id'], agent, is_manual_scheduling=True) else: self.router_scheduler.bind_router( plugin, context, router_id, agent.id) except db_exc.DBError: raise l3agentscheduler.RouterSchedulingFailed( router_id=router_id, agent_id=agent_id) def add_router_to_l3_agent(self, context, agent_id, router_id): """Add a l3 agent to host a router.""" if not self.router_supports_scheduling(context, router_id): raise l3agentscheduler.RouterDoesntSupportScheduling( router_id=router_id) with db_api.CONTEXT_WRITER.using(context): router = self.get_router(context, router_id) agent = self._get_agent(context, agent_id) self.validate_agent_router_combination(context, agent, router) if not self.check_agent_router_scheduling_needed( context, agent, router): return self.create_router_to_agent_binding(context, agent, router) l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if l3_notifier: l3_notifier.router_added_to_agent( context, [router_id], agent.host) def _check_router_retain_needed(self, context, router, host): """Check whether a router needs to be retained on a host. Check whether there are DVR serviceable ports owned by the host of an l3 agent. If so, then the routers should be retained. """ if not host or not router.get('distributed'): return False plugin = directory.get_plugin(plugin_constants.L3) subnet_ids = plugin.get_subnet_ids_on_router(context, router['id']) return plugin._check_dvr_serviceable_ports_on_host(context, host, subnet_ids) def remove_router_from_l3_agent(self, context, agent_id, router_id): """Remove the router from l3 agent. After removal, the router will be non-hosted until there is update which leads to re-schedule or be added to another agent manually. """ agent = self._get_agent(context, agent_id) agent_mode = self._get_agent_mode(agent) if agent_mode in [constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_NO_EXTERNAL]: raise l3agentscheduler.DVRL3CannotRemoveFromDvrAgent() self._unbind_router(context, router_id, agent_id) router = self.get_router(context, router_id) if router.get('ha'): plugin = directory.get_plugin(plugin_constants.L3) plugin.delete_ha_interfaces_on_host(context, router_id, agent.host) l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if not l3_notifier: return # NOTE(Swami): Need to verify if there are DVR serviceable # ports owned by this agent. If owned by this agent, then # the routers should be retained. This flag will be used # to check if there are valid routers in this agent. retain_router = self._check_router_retain_needed(context, router, agent.host) if retain_router: l3_notifier.routers_updated_on_host( context, [router_id], agent.host) else: l3_notifier.router_removed_from_agent( context, router_id, agent.host) def _unbind_router(self, context, router_id, agent_id): rb_obj.RouterL3AgentBinding.delete_objects( context, router_id=router_id, l3_agent_id=agent_id) def _unschedule_router(self, context, router_id, agents_ids): with db_api.CONTEXT_WRITER.using(context): for agent_id in agents_ids: self._unbind_router(context, router_id, agent_id) def reschedule_router(self, context, router_id, candidates=None): """Reschedule router to (a) new l3 agent(s) Remove the router from the agent(s) currently hosting it and schedule it again """ cur_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] with db_api.CONTEXT_WRITER.using(context): cur_agents_ids = [agent['id'] for agent in cur_agents] self._unschedule_router(context, router_id, cur_agents_ids) self.schedule_router(context, router_id, candidates=candidates) new_agents = self.list_l3_agents_hosting_router( context, router_id)['agents'] if not new_agents: raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) self._notify_agents_router_rescheduled(context, router_id, cur_agents, new_agents) def _notify_agents_router_rescheduled(self, context, router_id, old_agents, new_agents): l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3) if not l3_notifier: return old_hosts = [agent['host'] for agent in old_agents] new_hosts = [agent['host'] for agent in new_agents] router = self.get_router(context, router_id) for host in set(old_hosts) - set(new_hosts): retain_router = self._check_router_retain_needed( context, router, host) if retain_router: l3_notifier.routers_updated_on_host( context, [router_id], host) else: l3_notifier.router_removed_from_agent( context, router_id, host) for agent in new_agents: try: l3_notifier.router_added_to_agent( context, [router_id], agent['host']) except oslo_messaging.MessagingException: self._unbind_router(context, router_id, agent['id']) raise l3agentscheduler.RouterReschedulingFailed( router_id=router_id) def list_routers_on_l3_agent(self, context, agent_id): binding_objs = rb_obj.RouterL3AgentBinding.get_objects( context, l3_agent_id=agent_id) router_ids = [item.router_id for item in binding_objs] if router_ids: return {'routers': self.get_routers(context, filters={'id': router_ids})} else: # Exception will be thrown if the requested agent does not exist. self._get_agent(context, agent_id) return {'routers': []} def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if extensions.is_extension_supported( self, constants.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) return self.get_sync_data(context, router_ids=router_ids, active=True) def list_router_ids_on_host(self, context, host, router_ids=None, with_dvr=True): try: agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) except agent_exc.AgentNotFoundByTypeHost: return [] if not agentschedulers_db.services_available(agent.admin_state_up): return [] return self._get_router_ids_for_agent(context, agent, router_ids, with_dvr) def get_host_ha_router_count(self, context, host): router_ids = self.list_router_ids_on_host(context, host, with_dvr=False) up_routers = l3_objs.Router.get_objects(context, id=router_ids, admin_state_up=True) return len(l3_objs.RouterExtraAttributes.get_objects( context, router_id=[obj.id for obj in up_routers], ha=True)) def _get_router_ids_for_agent(self, context, agent, router_ids, with_dvr=True): """Get IDs of routers that the agent should host Overridden for DVR to handle agents in 'dvr' mode which have no explicit bindings with routers """ filters = {'l3_agent_id': agent.id} if router_ids: filters['router_id'] = router_ids bindings = rb_obj.RouterL3AgentBinding.get_objects(context, **filters) return [item.router_id for item in bindings] def list_active_sync_routers_on_active_l3_agent( self, context, host, router_ids): agent = self._get_agent_by_type_and_host( context, constants.AGENT_TYPE_L3, host) if not agentschedulers_db.services_available(agent.admin_state_up): LOG.info("Agent has its services disabled. Returning " "no active routers. Agent: %s", agent) return [] scheduled_router_ids = self._get_router_ids_for_agent( context, agent, router_ids) diff = set(router_ids or []) - set(scheduled_router_ids or []) if diff: LOG.debug("Agent requested router IDs not scheduled to it. " "Scheduled: %(sched)s. Unscheduled: %(diff)s. " "Agent: %(agent)s.", {'sched': scheduled_router_ids, 'diff': diff, 'agent': agent}) if scheduled_router_ids: return self._get_active_l3_agent_routers_sync_data( context, host, agent, scheduled_router_ids) return [] def get_l3_agents_hosting_routers(self, context, router_ids, admin_state_up=None, active=None): if not router_ids: return [] record_objs = rb_obj.RouterL3AgentBinding.get_objects( context, router_id=router_ids) if admin_state_up is not None: l3_agents = ag_obj.Agent.get_objects( context, id=[obj.l3_agent_id for obj in record_objs], admin_state_up=admin_state_up) else: l3_agents = [ ag_obj.Agent.get_object(context, id=obj.l3_agent_id) for obj in record_objs ] if active is not None: l3_agents = [l3_agent for l3_agent in l3_agents if not agent_utils.is_agent_down( l3_agent['heartbeat_timestamp'])] return l3_agents def _get_l3_agents_hosting_routers(self, context, router_ids): if not router_ids: return [] return ( rb_obj.RouterL3AgentBinding.get_l3_agents_by_router_ids( context, router_ids)) def list_l3_agents_hosting_router(self, context, router_id): with db_api.CONTEXT_READER.using(context): agents = self._get_l3_agents_hosting_routers( context, [router_id]) return {'agents': [self._make_agent_dict(agent) for agent in agents]} def get_routers_l3_agents_count(self, context): """Return a map between routers and agent counts for all routers.""" # TODO(sshank): This portion needs Router OVO integration when it is # merged. l3_model_list = l3_objs.RouterExtraAttributes.get_router_agents_count( context) return [(self._make_router_dict(router_model), agent_count if agent_count else 0) for router_model, agent_count in l3_model_list] def get_l3_agents(self, context, active=None, filters=None): agent_filters = {'agent_type': constants.AGENT_TYPE_L3} if active is not None: agent_filters['admin_state_up'] = active config_filters = [] if filters: for key, value in filters.items(): column = ag_obj.Agent.fields.get(key, None) if column: if not value: return [] agent_modes = filters.pop('agent_modes', []) if agent_modes: config_filters = set('\"agent_mode\": \"%s\"' % agent_mode for agent_mode in agent_modes) agent_filters.update(filters) agent_objs = [] if config_filters: for conf_filter in config_filters: agent_objs.extend(ag_obj.Agent.get_objects_by_agent_mode( context, conf_filter, **agent_filters)) else: agent_objs = ag_obj.Agent.get_objects(context, **agent_filters) return [l3_agent for l3_agent in agent_objs if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent( active, l3_agent)] def get_l3_agent_candidates(self, context, sync_router, l3_agents, ignore_admin_state=False): """Get the valid l3 agents for the router from a list of l3_agents. It will not return agents in 'dvr' mode or in 'dvr_no_external' mode for a dvr router as dvr routers are not explicitly scheduled to l3 agents on compute nodes """ candidates = [] is_router_distributed = sync_router.get('distributed', False) for l3_agent in l3_agents: if not ignore_admin_state and not l3_agent.admin_state_up: # ignore_admin_state True comes from manual scheduling # where admin_state_up judgement is already done. continue agent_conf = self.get_configuration_dict(l3_agent) agent_mode = agent_conf.get(constants.L3_AGENT_MODE, constants.L3_AGENT_MODE_LEGACY) if (agent_mode == constants.L3_AGENT_MODE_DVR or agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL or (agent_mode == constants.L3_AGENT_MODE_LEGACY and is_router_distributed)): continue router_id = agent_conf.get('router_id', None) if router_id and router_id != sync_router['id']: continue handle_internal_only_routers = agent_conf.get( 'handle_internal_only_routers', True) ex_net_id = (sync_router['external_gateway_info'] or {}).get( 'network_id') if not ex_net_id and not handle_internal_only_routers: continue candidates.append(l3_agent) return candidates def auto_schedule_routers(self, context, host): if self.router_scheduler: self.router_scheduler.auto_schedule_routers(self, context, host) def schedule_router(self, context, router, candidates=None): if self.router_scheduler: return self.router_scheduler.schedule( self, context, router, candidates=candidates) def schedule_routers(self, context, routers): """Schedule the routers to l3 agents.""" for router in routers: self.schedule_router(context, router, candidates=None) def get_l3_agent_with_min_routers(self, context, agent_ids): if not agent_ids: return None agents = ag_obj.Agent.get_l3_agent_with_min_routers( context, agent_ids) return agents def get_hosts_to_notify(self, context, router_id): """Returns all hosts to send notification about router update""" state = agentschedulers_db.get_admin_state_up_filter() agents = self.get_l3_agents_hosting_routers( context, [router_id], admin_state_up=state, active=True) return [a.host for a in agents] def get_vacant_binding_index(self, context, router_id, is_manual_scheduling=False): """Return a vacant binding_index to use and whether or not it exists. Each RouterL3AgentBinding has a binding_index which is unique per router_id, and when creating a single binding we require to find a 'vacant' binding_index which isn't yet used - for example if we have bindings with indices 1 and 3, then clearly binding_index == 2 is free. :returns: binding_index. """ num_agents = self.get_number_of_agents_for_scheduling(context) pager = base_obj.Pager(sorts=[('binding_index', True)]) bindings = rb_obj.RouterL3AgentBinding.get_objects( context, _pager=pager, router_id=router_id) binding_indices = [b.binding_index for b in bindings] all_indicies = set(range(rb_model.LOWEST_BINDING_INDEX, num_agents + 1)) open_slots = sorted(list(all_indicies - set(binding_indices))) if open_slots: return open_slots[0] # Last chance: if this is a manual scheduling, we're gonna allow # creation of a binding_index even if it will exceed # max_l3_agents_per_router. if is_manual_scheduling: return max(all_indicies) + 1 return -1 class AZL3AgentSchedulerDbMixin(L3AgentSchedulerDbMixin, router_az.RouterAvailabilityZonePluginBase): """Mixin class to add availability_zone supported l3 agent scheduler.""" def get_router_availability_zones(self, router): return list({agent.availability_zone for agent in router.l3_agents}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_attrs_db.py0000644000175000017500000000567400000000000022010 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.db import resource_extend from oslo_config import cfg from neutron._i18n import _ from neutron.db.models import l3_attrs def get_attr_info(): """Returns api visible attr names and their default values.""" return {'distributed': {'default': cfg.CONF.router_distributed}, 'ha': {'default': cfg.CONF.l3_ha}, 'ha_vr_id': {'default': 0}, 'availability_zone_hints': { 'default': '[]', 'transform_to_db': az_validator.convert_az_list_to_string, 'transform_from_db': az_validator.convert_az_string_to_list} } @resource_extend.has_resource_extenders class ExtraAttributesMixin(object): """Mixin class to enable router's extra attributes.""" @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_extra_router_dict(router_res, router_db): extra_attrs = router_db['extra_attributes'] or {} for name, info in get_attr_info().items(): from_db = info.get('transform_from_db', lambda x: x) router_res[name] = from_db(extra_attrs.get(name, info['default'])) def _ensure_extra_attr_model(self, context, router_db): if not router_db['extra_attributes']: kwargs = {k: v['default'] for k, v in get_attr_info().items()} kwargs['router_id'] = router_db['id'] new = l3_attrs.RouterExtraAttributes(**kwargs) context.session.add(new) router_db['extra_attributes'] = new def set_extra_attr_value(self, context, router_db, key, value): if not context.session.is_active: raise RuntimeError(_("set_extra_attr_value cannot be called " "out of a transaction.")) # set a single value explicitly if key in get_attr_info(): info = get_attr_info()[key] to_db = info.get('transform_to_db', lambda x: x) self._ensure_extra_attr_model(context, router_db) router_db['extra_attributes'].update({key: to_db(value)}) return raise RuntimeError(_("Tried to set a key '%s' that doesn't exist " "in the extra attributes table.") % key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_db.py0000644000175000017500000027620500000000000020573 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import netaddr from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_ctx from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as lib_db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from neutron_lib import rpc as n_rpc from neutron_lib.services import base as base_services from oslo_log import log as logging from oslo_utils import uuidutils from sqlalchemy import orm from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.common import ipv6_utils from neutron.common import utils from neutron.db import _utils as db_utils from neutron.db.models import l3 as l3_models from neutron.db import models_v2 from neutron.db import standardattrdescription_db as st_attr from neutron.extensions import l3 from neutron.extensions import qos_fip from neutron.objects import base as base_obj from neutron.objects import port_forwarding from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj from neutron import worker as neutron_worker LOG = logging.getLogger(__name__) DEVICE_OWNER_HA_REPLICATED_INT = constants.DEVICE_OWNER_HA_REPLICATED_INT DEVICE_OWNER_ROUTER_INTF = constants.DEVICE_OWNER_ROUTER_INTF DEVICE_OWNER_ROUTER_GW = constants.DEVICE_OWNER_ROUTER_GW DEVICE_OWNER_FLOATINGIP = constants.DEVICE_OWNER_FLOATINGIP EXTERNAL_GW_INFO = l3_apidef.EXTERNAL_GW_INFO # Maps API field to DB column # API parameter name and Database column names may differ. # Useful to keep the filtering between API and Database. API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'} CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status') FIP_ASSOC_MSG = ('Floating IP %(fip_id)s %(assoc)s. External IP: %(ext_ip)s, ' 'port: %(port_id)s.') @registry.has_registry_receivers class L3_NAT_dbonly_mixin(l3.RouterPluginBase, base_services.WorkerBase, st_attr.StandardAttrDescriptionMixin): """Mixin class to add L3/NAT router methods to db_base_plugin_v2.""" router_device_owners = ( DEVICE_OWNER_HA_REPLICATED_INT, DEVICE_OWNER_ROUTER_INTF, DEVICE_OWNER_ROUTER_GW, DEVICE_OWNER_FLOATINGIP ) _dns_integration = None _fip_qos = None def __new__(cls, *args, **kwargs): inst = super(L3_NAT_dbonly_mixin, cls).__new__(cls, *args, **kwargs) inst._start_janitor() return inst @staticmethod @registry.receives(resources.PORT, [events.BEFORE_DELETE]) def _prevent_l3_port_delete_callback(resource, event, trigger, payload=None): l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin and payload.metadata['port_check']: l3plugin.prevent_l3_port_deletion( payload.context, payload.resource_id) @property def _is_dns_integration_supported(self): if self._dns_integration is None: self._dns_integration = ( extensions.is_extension_supported( self._core_plugin, 'dns-integration') or extensions.is_extension_supported( self._core_plugin, 'dns-domain-ports')) return self._dns_integration @property def _is_fip_qos_supported(self): if self._fip_qos is None: # Check L3 service plugin self._fip_qos = extensions.is_extension_supported( self, qos_fip.FIP_QOS_ALIAS) return self._fip_qos @property def _core_plugin(self): return directory.get_plugin() def _start_janitor(self): """Starts the periodic job that cleans up broken complex resources. This job will look for things like floating IP ports without an associated floating IP and delete them 5 minutes after detection. """ interval = 60 * 5 # only every 5 minutes. cleanups should be rare initial_delay = random.randint(0, interval) # splay multiple servers janitor = neutron_worker.PeriodicWorker(self._clean_garbage, interval, initial_delay) self.add_worker(janitor) def _clean_garbage(self): if not hasattr(self, '_candidate_broken_fip_ports'): self._candidate_broken_fip_ports = set() context = n_ctx.get_admin_context() candidates = self._get_dead_floating_port_candidates(context) # just because a port is in 'candidates' doesn't necessarily mean # it's broken, we could have just caught it before it was updated. # We confirm by waiting until the next call of this function to see # if it persists. to_cleanup = candidates & self._candidate_broken_fip_ports self._candidate_broken_fip_ports = candidates - to_cleanup for port_id in to_cleanup: # ensure it wasn't just a failure to update device_id before we # delete it try: self._fix_or_kill_floating_port(context, port_id) except Exception: LOG.exception("Error cleaning up floating IP port: %s", port_id) def _fix_or_kill_floating_port(self, context, port_id): pager = base_obj.Pager(limit=1) fips = l3_obj.FloatingIP.get_objects( context, _pager=pager, floating_port_id=port_id) if fips: LOG.warning("Found incorrect device_id on floating port " "%(pid)s, correcting to %(fip)s.", {'pid': port_id, 'fip': fips[0].id}) self._core_plugin.update_port( context, port_id, {'port': {'device_id': fips[0].id}}) else: LOG.warning("Found floating IP port %s without floating IP, " "deleting.", port_id) self._core_plugin.delete_port( context, port_id, l3_port_check=False) registry.notify(resources.FLOATING_IP, events.AFTER_DELETE, self, context=context, **fips[0]) def _get_dead_floating_port_candidates(self, context): filters = {'device_id': ['PENDING'], 'device_owner': [DEVICE_OWNER_FLOATINGIP]} return {p['id'] for p in self._core_plugin.get_ports(context, filters)} def _get_router(self, context, router_id): try: router = model_query.get_by_id( context, l3_models.Router, router_id) except exc.NoResultFound: raise l3_exc.RouterNotFound(router_id=router_id) return router def _make_router_dict(self, router, fields=None, process_extensions=True): res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS) if router['gw_port_id']: ext_gw_info = { 'network_id': router.gw_port['network_id'], 'external_fixed_ips': [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in router.gw_port['fixed_ips']]} else: ext_gw_info = None res.update({ EXTERNAL_GW_INFO: ext_gw_info, 'gw_port_id': router['gw_port_id'], }) # NOTE(salv-orlando): The following assumes this mixin is used in a # class inheriting from CommonDbMixin, which is true for all existing # plugins. if process_extensions: resource_extend.apply_funcs(l3_apidef.ROUTERS, res, router) return lib_db_utils.resource_fields(res, fields) def _create_router_db(self, context, router, tenant_id): """Create the DB object.""" router.setdefault('id', uuidutils.generate_uuid()) router['tenant_id'] = tenant_id registry.notify(resources.ROUTER, events.BEFORE_CREATE, self, context=context, router=router) with context.session.begin(subtransactions=True): # pre-generate id so it will be available when # configuring external gw port router_db = l3_models.Router( id=router['id'], tenant_id=router['tenant_id'], name=router['name'], admin_state_up=router['admin_state_up'], status=constants.ACTIVE, description=router.get('description')) context.session.add(router_db) registry.notify(resources.ROUTER, events.PRECOMMIT_CREATE, self, context=context, router=router, router_id=router['id'], router_db=router_db) return router_db def _update_gw_for_create_router(self, context, gw_info, router_id): if gw_info: router_db = self._get_router(context, router_id) self._update_router_gw_info(context, router_id, gw_info, router=router_db) @db_api.retry_if_session_inactive() def create_router(self, context, router): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, None) create = functools.partial(self._create_router_db, context, r, r['tenant_id']) delete = functools.partial(self.delete_router, context) update_gw = functools.partial(self._update_gw_for_create_router, context, gw_info) router_db, _unused = db_utils.safe_creation(context, create, delete, update_gw, transaction=False) new_router = self._make_router_dict(router_db) registry.notify(resources.ROUTER, events.AFTER_CREATE, self, context=context, router_id=router_db.id, router=new_router, request_attrs=r, router_db=router_db) return new_router def _update_router_db(self, context, router_id, data): """Update the DB object.""" with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, router_id) old_router = self._make_router_dict(router_db) if data: router_db.update(data) registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=data, states=(old_router,), resource_id=router_id, desired_state=router_db)) return router_db @db_api.retry_if_session_inactive() def update_router(self, context, id, router): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, constants.ATTR_NOT_SPECIFIED) original = self.get_router(context, id) if gw_info != constants.ATTR_NOT_SPECIFIED: # Update the gateway outside of the DB update since it involves L2 # calls that don't make sense to rollback and may cause deadlocks # in a transaction. self._update_router_gw_info(context, id, gw_info) router_db = self._update_router_db(context, id, r) updated = self._make_router_dict(router_db) registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, context=context, router_id=id, old_router=original, router=updated, request_attrs=r, router_db=router_db) return updated def _create_router_gw_port(self, context, router, network_id, ext_ips): # Port has no 'tenant-id', as it is hidden from user port_data = {'tenant_id': '', # intentionally not set 'network_id': network_id, 'fixed_ips': ext_ips or constants.ATTR_NOT_SPECIFIED, 'device_id': router['id'], 'device_owner': DEVICE_OWNER_ROUTER_GW, 'admin_state_up': True, 'name': ''} gw_port = plugin_utils.create_port( self._core_plugin, context.elevated(), {'port': port_data}) if not gw_port['fixed_ips']: LOG.debug('No IPs available for external network %s', network_id) with plugin_utils.delete_port_on_error( self._core_plugin, context.elevated(), gw_port['id']): with context.session.begin(subtransactions=True): router.gw_port = self._core_plugin._get_port( context.elevated(), gw_port['id']) router_port = l3_obj.RouterPort( context, router_id=router.id, port_id=gw_port['id'], port_type=DEVICE_OWNER_ROUTER_GW ) context.session.add(router) router_port.create() def _validate_gw_info(self, context, gw_port, info, ext_ips): network_id = info['network_id'] if info else None if network_id: network_db = self._core_plugin._get_network(context, network_id) if not network_db.external: msg = _("Network %s is not an external network") % network_id raise n_exc.BadRequest(resource='router', msg=msg) if ext_ips: subnets = self._core_plugin.get_subnets_by_network(context, network_id) for s in subnets: if not s['gateway_ip']: continue for ext_ip in ext_ips: if ext_ip.get('ip_address') == s['gateway_ip']: msg = _("External IP %s is the same as the " "gateway IP") % ext_ip.get('ip_address') raise n_exc.BadRequest(resource='router', msg=msg) return network_id # NOTE(yamamoto): This method is an override point for plugins # inheriting this class. Do not optimize this out. def router_gw_port_has_floating_ips(self, context, router_id): """Return True if the router's gateway port is serving floating IPs.""" return bool(self.get_floatingips_count(context, {'router_id': [router_id]})) def _delete_current_gw_port(self, context, router_id, router, new_network_id): """Delete gw port if attached to an old network.""" port_requires_deletion = ( router.gw_port and router.gw_port['network_id'] != new_network_id) if not port_requires_deletion: return admin_ctx = context.elevated() old_network_id = router.gw_port['network_id'] if self.router_gw_port_has_floating_ips(admin_ctx, router_id): raise l3_exc.RouterExternalGatewayInUseByFloatingIp( router_id=router_id, net_id=router.gw_port['network_id']) gw_ips = [x['ip_address'] for x in router.gw_port['fixed_ips']] gw_port_id = router.gw_port['id'] self._delete_router_gw_port_db(context, router) self._core_plugin.delete_port( admin_ctx, gw_port_id, l3_port_check=False) with context.session.begin(subtransactions=True): context.session.refresh(router) # TODO(boden): normalize metadata metadata = {'network_id': old_network_id, 'new_network_id': new_network_id, 'gateway_ips': gw_ips} registry.publish(resources.ROUTER_GATEWAY, events.AFTER_DELETE, self, payload=events.DBEventPayload( context, states=(router,), metadata=metadata, resource_id=router_id)) def _delete_router_gw_port_db(self, context, router): with context.session.begin(subtransactions=True): router.gw_port = None if router not in context.session: context.session.add(router) try: registry.publish(resources.ROUTER_GATEWAY, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, states=(router,), resource_id=router.id)) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3_exc.RouterInUse(router_id=router.id, reason=e) def _create_gw_port(self, context, router_id, router, new_network_id, ext_ips): new_valid_gw_port_attachment = ( new_network_id and (not router.gw_port or router.gw_port['network_id'] != new_network_id)) if new_valid_gw_port_attachment: subnets = self._core_plugin.get_subnets_by_network(context, new_network_id) try: registry.publish( resources.ROUTER_GATEWAY, events.BEFORE_CREATE, self, payload=events.DBEventPayload( context, request_body=router, metadata={ 'network_id': new_network_id, 'subnets': subnets}, resource_id=router_id)) except exceptions.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error self._check_for_dup_router_subnets(context, router, new_network_id, subnets, include_gateway=True) self._create_router_gw_port(context, router, new_network_id, ext_ips) gw_ips = [x['ip_address'] for x in router.gw_port['fixed_ips']] registry.publish(resources.ROUTER_GATEWAY, events.AFTER_CREATE, self._create_gw_port, payload=events.DBEventPayload( context, states=(router,), metadata={'gateway_ips': gw_ips, 'network_id': new_network_id}, resource_id=router_id)) def _update_current_gw_port(self, context, router_id, router, ext_ips): self._core_plugin.update_port(context.elevated(), router.gw_port['id'], {'port': {'fixed_ips': ext_ips}}) context.session.expire(router.gw_port) def _update_router_gw_info(self, context, router_id, info, router=None): # TODO(salvatore-orlando): guarantee atomic behavior also across # operations that span beyond the model classes handled by this # class (e.g.: delete_port) router = router or self._get_router(context, router_id) gw_port = router.gw_port ext_ips = info.get('external_fixed_ips') if info else [] ext_ip_change = self._check_for_external_ip_change( context, gw_port, ext_ips) network_id = self._validate_gw_info(context, gw_port, info, ext_ips) if gw_port and ext_ip_change and gw_port['network_id'] == network_id: self._update_current_gw_port(context, router_id, router, ext_ips) else: self._delete_current_gw_port(context, router_id, router, network_id) self._create_gw_port(context, router_id, router, network_id, ext_ips) def _check_for_external_ip_change(self, context, gw_port, ext_ips): # determine if new external IPs differ from the existing fixed_ips if not ext_ips: # no external_fixed_ips were included return False if not gw_port: return True subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips']) new_subnet_ids = set(f['subnet_id'] for f in ext_ips if f.get('subnet_id')) subnet_change = not new_subnet_ids == subnet_ids if subnet_change: return True ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips']) new_ip_addresses = set(f['ip_address'] for f in ext_ips if f.get('ip_address')) ip_address_change = not ip_addresses == new_ip_addresses return ip_address_change def _ensure_router_not_in_use(self, context, router_id): """Ensure that no internal network interface is attached to the router. """ router = self._get_router(context, router_id) device_owner = self._get_device_owner(context, router) if any(rp.port_type == device_owner for rp in router.attached_ports): raise l3_exc.RouterInUse(router_id=router_id) return router @db_api.retry_if_session_inactive() def delete_router(self, context, id): registry.publish(resources.ROUTER, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, resource_id=id)) # TODO(nati) Refactor here when we have router insertion model router = self._ensure_router_not_in_use(context, id) original = self._make_router_dict(router) self._delete_current_gw_port(context, id, router, None) with context.session.begin(subtransactions=True): context.session.refresh(router) router_ports = router.attached_ports for rp in router_ports: self._core_plugin.delete_port(context.elevated(), rp.port.id, l3_port_check=False) with context.session.begin(subtransactions=True): context.session.refresh(router) registry.notify(resources.ROUTER, events.PRECOMMIT_DELETE, self, context=context, router_db=router, router_id=id) # we bump the revision even though we are about to delete to throw # staledataerror if something snuck in with a new interface router.bump_revision() context.session.flush() context.session.delete(router) registry.notify(resources.ROUTER, events.AFTER_DELETE, self, context=context, router_id=id, original=original) @db_api.retry_if_session_inactive() def get_router(self, context, id, fields=None): router = self._get_router(context, id) return self._make_router_dict(router, fields) @db_api.retry_if_session_inactive() def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = lib_db_utils.get_marker_obj( self, context, 'router', limit, marker) return model_query.get_collection(context, l3_models.Router, self._make_router_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @db_api.retry_if_session_inactive() def get_routers_count(self, context, filters=None): return model_query.get_collection_count(context, l3_models.Router, filters=filters) def _check_for_dup_router_subnets(self, context, router, network_id, new_subnets, include_gateway=False): # It's possible these ports are on the same network, but # different subnets. new_subnet_ids = {s['id'] for s in new_subnets} router_subnets = [] for p in (rp.port for rp in router.attached_ports): for ip in p['fixed_ips']: if ip['subnet_id'] in new_subnet_ids: msg = (_("Router already has a port on subnet %s") % ip['subnet_id']) raise n_exc.BadRequest(resource='router', msg=msg) gw_owner = (p.get('device_owner') == DEVICE_OWNER_ROUTER_GW) if include_gateway == gw_owner: router_subnets.append(ip['subnet_id']) # Ignore temporary Prefix Delegation CIDRs new_subnets = [s for s in new_subnets if s['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX] id_filter = {'id': router_subnets} subnets = self._core_plugin.get_subnets(context.elevated(), filters=id_filter) for sub in subnets: cidr = sub['cidr'] ipnet = netaddr.IPNetwork(cidr) for s in new_subnets: new_cidr = s['cidr'] new_ipnet = netaddr.IPNetwork(new_cidr) match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr]) match2 = netaddr.all_matching_cidrs(ipnet, [new_cidr]) if match1 or match2: data = {'subnet_cidr': new_cidr, 'subnet_id': s['id'], 'cidr': cidr, 'sub_id': sub['id']} msg = (_("Cidr %(subnet_cidr)s of subnet " "%(subnet_id)s overlaps with cidr %(cidr)s " "of subnet %(sub_id)s") % data) raise n_exc.BadRequest(resource='router', msg=msg) def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" # NOTE(armando-migliaccio): in the base case this is invariant return DEVICE_OWNER_ROUTER_INTF def _validate_interface_info(self, interface_info, for_removal=False): port_id_specified = interface_info and 'port_id' in interface_info subnet_id_specified = interface_info and 'subnet_id' in interface_info if not (port_id_specified or subnet_id_specified): msg = _("Either subnet_id or port_id must be specified") raise n_exc.BadRequest(resource='router', msg=msg) for key in ('port_id', 'subnet_id'): if key not in interface_info: continue err = validators.validate_uuid(interface_info[key]) if err: raise n_exc.BadRequest(resource='router', msg=err) if not for_removal: if port_id_specified and subnet_id_specified: msg = _("Cannot specify both subnet-id and port-id") raise n_exc.BadRequest(resource='router', msg=msg) return port_id_specified, subnet_id_specified def _check_router_port(self, context, port_id, device_id): """Check that a port is available for an attachment to a router :param context: The context of the request. :param port_id: The port to be attached. :param device_id: This method will check that device_id corresponds to the device_id of the port. It raises PortInUse exception if it doesn't. :returns: The port description returned by the core plugin. :raises: PortInUse if the device_id is not the same as the port's one. :raises: BadRequest if the port has no fixed IP. """ port = self._core_plugin.get_port(context, port_id) if port['device_id'] != device_id: raise n_exc.PortInUse(net_id=port['network_id'], port_id=port['id'], device_id=port['device_id']) if not port['fixed_ips']: msg = _('Router port must have at least one fixed IP') raise n_exc.BadRequest(resource='router', msg=msg) return port def _validate_port_in_range_or_admin(self, context, subnets, port): if context.is_admin: return subnets_by_id = {} for s in subnets: addr_set = netaddr.IPSet() for range in s['allocation_pools']: addr_set.add(netaddr.IPRange(netaddr.IPAddress(range['start']), netaddr.IPAddress(range['end']))) subnets_by_id[s['id']] = (addr_set, s['project_id'],) for subnet_id, ip in [(fix_ip['subnet_id'], fix_ip['ip_address'],) for fix_ip in port['fixed_ips']]: if (ip not in subnets_by_id[subnet_id][0] and context.project_id != subnets_by_id[subnet_id][1]): msg = (_('Cannot add interface to router because specified ' 'port %(port)s has an IP address out of the ' 'allocation pool of subnet %(subnet)s, which is not ' 'owned by the project making the request') % {'port': port['id'], 'subnet': subnet_id}) raise n_exc.BadRequest(resource='router', msg=msg) def _validate_router_port_info(self, context, router, port_id): with db_api.autonested_transaction(context.session): # check again within transaction to mitigate race port = self._check_router_port(context, port_id, router.id) # Only allow one router port with IPv6 subnets per network id if self._port_has_ipv6_address(port): for existing_port in (rp.port for rp in router.attached_ports): if (existing_port['network_id'] == port['network_id'] and self._port_has_ipv6_address(existing_port)): msg = _("Cannot have multiple router ports with the " "same network id if both contain IPv6 " "subnets. Existing port %(p)s has IPv6 " "subnet(s) and network id %(nid)s") raise n_exc.BadRequest(resource='router', msg=msg % { 'p': existing_port['id'], 'nid': existing_port['network_id']}) fixed_ips = [ip for ip in port['fixed_ips']] subnets = [] for fixed_ip in fixed_ips: subnet = self._core_plugin.get_subnet(context, fixed_ip['subnet_id']) subnets.append(subnet) if subnets: self._check_for_dup_router_subnets(context, router, port['network_id'], subnets) # Keep the restriction against multiple IPv4 subnets if len([s for s in subnets if s['ip_version'] == 4]) > 1: msg = _("Cannot have multiple " "IPv4 subnets on router port") raise n_exc.BadRequest(resource='router', msg=msg) self._validate_port_in_range_or_admin(context, subnets, port) return port, subnets def _notify_attaching_interface(self, context, router_db, port, interface_info): """Notify third party code that an interface is being attached to a router :param context: The context of the request. :param router_db: The router db object having an interface attached. :param port: The port object being attached to the router. :param interface_info: The requested interface attachment info passed to add_router_interface. :raises: RouterInterfaceAttachmentConflict if a third party code prevent the port to be attach to the router. """ try: registry.notify(resources.ROUTER_INTERFACE, events.BEFORE_CREATE, self, context=context, router_db=router_db, port=port, interface_info=interface_info, router_id=router_db.id, network_id=port['network_id']) except exceptions.CallbackFailure as e: # raise the underlying exception reason = (_('cannot perform router interface attachment ' 'due to %(reason)s') % {'reason': e}) raise l3_exc.RouterInterfaceAttachmentConflict(reason=reason) def _add_interface_by_port(self, context, router, port_id, owner): # Update owner before actual process in order to avoid the # case where a port might get attached to a router without the # owner successfully updating due to an unavailable backend. self._core_plugin.update_port( context, port_id, {'port': {'device_id': router.id, 'device_owner': owner}}) return self._validate_router_port_info(context, router, port_id) def _port_has_ipv6_address(self, port): for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: return True def _find_ipv6_router_port_by_network(self, context, router, net_id): router_dev_owner = self._get_device_owner(context, router) for port in router.attached_ports: p = port['port'] if p['device_owner'] != router_dev_owner: # we don't want any special purpose internal ports continue if p['network_id'] == net_id and self._port_has_ipv6_address(p): return port def _add_interface_by_subnet(self, context, router, subnet_id, owner): subnet = self._core_plugin.get_subnet(context, subnet_id) if not subnet['gateway_ip']: msg = _('Subnet for router interface must have a gateway IP') raise n_exc.BadRequest(resource='router', msg=msg) if subnet['project_id'] != context.project_id and not context.is_admin: msg = (_('Cannot add interface to router because subnet %s is not ' 'owned by project making the request') % subnet_id) raise n_exc.BadRequest(resource='router', msg=msg) if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None and subnet['ipv6_address_mode'] is not None): msg = (_('IPv6 subnet %s configured to receive RAs from an ' 'external router cannot be added to Neutron Router.') % subnet['id']) raise n_exc.BadRequest(resource='router', msg=msg) self._check_for_dup_router_subnets(context, router, subnet['network_id'], [subnet]) fixed_ip = {'ip_address': subnet['gateway_ip'], 'subnet_id': subnet['id']} if (subnet['ip_version'] == 6 and not ipv6_utils.is_ipv6_pd_enabled(subnet)): # Add new prefix to an existing ipv6 port with the same network id # if one exists port = self._find_ipv6_router_port_by_network(context, router, subnet['network_id']) if port: fixed_ips = list(map(dict, port['port']['fixed_ips'])) fixed_ips.append(fixed_ip) return (self._core_plugin.update_port( context, port['port_id'], {'port': {'fixed_ips': fixed_ips}}), [subnet], False) port_data = {'tenant_id': router.tenant_id, 'network_id': subnet['network_id'], 'fixed_ips': [fixed_ip], 'admin_state_up': True, 'device_id': router.id, 'device_owner': owner, 'name': ''} return plugin_utils.create_port( self._core_plugin, context, {'port': port_data}), [subnet], True @staticmethod def _make_router_interface_info( router_id, tenant_id, port_id, network_id, subnet_id, subnet_ids): return { 'id': router_id, 'tenant_id': tenant_id, 'port_id': port_id, 'network_id': network_id, 'subnet_id': subnet_id, # deprecated by IPv6 multi-prefix 'subnet_ids': subnet_ids } @db_api.retry_if_session_inactive() def add_router_interface(self, context, router_id, interface_info=None): router = self._get_router(context, router_id) add_by_port, add_by_sub = self._validate_interface_info(interface_info) device_owner = self._get_device_owner(context, router_id) # This should be True unless adding an IPv6 prefix to an existing port new_router_intf = True cleanup_port = False if add_by_port: port_id = interface_info['port_id'] port = self._check_router_port(context, port_id, '') revert_value = {'device_id': '', 'device_owner': port['device_owner']} with plugin_utils.update_port_on_error( self._core_plugin, context, port_id, revert_value): port, subnets = self._add_interface_by_port( context, router, port_id, device_owner) # add_by_subnet is not used here, because the validation logic of # _validate_interface_info ensures that either of add_by_* is True. else: port, subnets, new_router_intf = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) cleanup_port = new_router_intf # only cleanup port we created revert_value = {'device_id': '', 'device_owner': port['device_owner']} if cleanup_port: mgr = plugin_utils.delete_port_on_error( self._core_plugin, context, port['id']) else: mgr = plugin_utils.update_port_on_error( self._core_plugin, context, port['id'], revert_value) if new_router_intf: with mgr: self._notify_attaching_interface(context, router_db=router, port=port, interface_info=interface_info) self._add_router_port( context, port['id'], router.id, device_owner) gw_ips = [] gw_network_id = None if router.gw_port: gw_network_id = router.gw_port.network_id gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] registry.notify(resources.ROUTER_INTERFACE, events.AFTER_CREATE, self, context=context, network_id=gw_network_id, gateway_ips=gw_ips, cidrs=[x['cidr'] for x in subnets], subnets=subnets, port_id=port['id'], router_id=router_id, port=port, new_interface=new_router_intf, interface_info=interface_info) with context.session.begin(subtransactions=True): context.session.refresh(router) return self._make_router_interface_info( router.id, port['tenant_id'], port['id'], port['network_id'], subnets[-1]['id'], [subnet['id'] for subnet in subnets]) @db_api.retry_if_session_inactive() def _add_router_port(self, context, port_id, router_id, device_owner): l3_obj.RouterPort( context, port_id=port_id, router_id=router_id, port_type=device_owner ).create() # Update owner after actual process again in order to # make sure the records in routerports table and ports # table are consistent. self._core_plugin.update_port( context, port_id, {'port': {'device_id': router_id, 'device_owner': device_owner}}) def _check_router_interface_not_in_use(self, router_id, subnet_id): context = n_ctx.get_admin_context() subnet = self._core_plugin.get_subnet(context, subnet_id) subnet_cidr = netaddr.IPNetwork(subnet['cidr']) fip_objs = l3_obj.FloatingIP.get_objects(context, router_id=router_id) pf_plugin = directory.get_plugin(plugin_constants.PORTFORWARDING) if pf_plugin: fip_ids = [fip_obj.id for fip_obj in fip_objs] pf_objs = port_forwarding.PortForwarding.get_objects( context, floatingip_id=fip_ids) for pf_obj in pf_objs: if (pf_obj.internal_ip_address and pf_obj.internal_ip_address in subnet_cidr): raise l3_exc.RouterInterfaceInUseByFloatingIP( router_id=router_id, subnet_id=subnet_id) for fip_obj in fip_objs: if (fip_obj.fixed_ip_address and fip_obj.fixed_ip_address in subnet_cidr): raise l3_exc.RouterInterfaceInUseByFloatingIP( router_id=router_id, subnet_id=subnet_id) def _confirm_router_interface_not_in_use(self, context, router_id, subnet_id): try: registry.publish( resources.ROUTER_INTERFACE, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, metadata={'subnet_id': subnet_id}, resource_id=router_id)) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3_exc.RouterInUse(router_id=router_id, reason=e) self._check_router_interface_not_in_use(router_id, subnet_id) def _remove_interface_by_port(self, context, router_id, port_id, subnet_id, owner): ports = port_obj.Port.get_ports_by_router_and_port( context, router_id, owner, port_id) if len(ports) < 1: raise l3_exc.RouterInterfaceNotFound( router_id=router_id, port_id=port_id) port = ports[0] port_subnet_ids = [fixed_ip['subnet_id'] for fixed_ip in port['fixed_ips']] if subnet_id and subnet_id not in port_subnet_ids: raise n_exc.SubnetMismatchForPort( port_id=port_id, subnet_id=subnet_id) subnets = [self._core_plugin.get_subnet(context, port_subnet_id) for port_subnet_id in port_subnet_ids] for port_subnet_id in port_subnet_ids: self._confirm_router_interface_not_in_use( context, router_id, port_subnet_id) self._core_plugin.delete_port(context, port['id'], l3_port_check=False) return port, subnets def _remove_interface_by_subnet(self, context, router_id, subnet_id, owner): self._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._core_plugin.get_subnet(context, subnet_id) ports = port_obj.Port.get_ports_by_router_and_network( context, router_id, owner, subnet['network_id']) for p in ports: try: p = self._core_plugin.get_port(context, p.id) except n_exc.PortNotFound: continue port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']] if subnet_id in port_subnets and len(port_subnets) > 1: # multiple prefix port - delete prefix from port fixed_ips = [dict(fip) for fip in p['fixed_ips'] if fip['subnet_id'] != subnet_id] self._core_plugin.update_port( context, p['id'], {'port': {'fixed_ips': fixed_ips}}) return (p, [subnet]) elif subnet_id in port_subnets: # only one subnet on port - delete the port self._core_plugin.delete_port(context, p['id'], l3_port_check=False) return (p, [subnet]) raise l3_exc.RouterInterfaceNotFoundForSubnet( router_id=router_id, subnet_id=subnet_id) @db_api.retry_if_session_inactive() def remove_router_interface(self, context, router_id, interface_info): remove_by_port, _ = self._validate_interface_info(interface_info, for_removal=True) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') device_owner = self._get_device_owner(context, router_id) if remove_by_port: port, subnets = self._remove_interface_by_port(context, router_id, port_id, subnet_id, device_owner) else: port, subnets = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) gw_network_id = None gw_ips = [] router = self._get_router(context, router_id) if router.gw_port: gw_network_id = router.gw_port.network_id gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] registry.notify(resources.ROUTER_INTERFACE, events.AFTER_DELETE, self, context=context, cidrs=[x['cidr'] for x in subnets], network_id=gw_network_id, gateway_ips=gw_ips, port=port, router_id=router_id, interface_info=interface_info) with context.session.begin(subtransactions=True): context.session.refresh(router) return self._make_router_interface_info(router_id, port['tenant_id'], port['id'], port['network_id'], subnets[0]['id'], [subnet['id'] for subnet in subnets]) def _get_floatingip(self, context, id): floatingip = l3_obj.FloatingIP.get_object(context, id=id) if not floatingip: raise l3_exc.FloatingIPNotFound(floatingip_id=id) return floatingip def _make_floatingip_dict(self, floatingip, fields=None, process_extensions=True): floating_ip_address = (str(floatingip.floating_ip_address) if floatingip.floating_ip_address else None) fixed_ip_address = (str(floatingip.fixed_ip_address) if floatingip.fixed_ip_address else None) res = {'id': floatingip.id, 'tenant_id': floatingip.project_id, 'floating_ip_address': floating_ip_address, 'floating_network_id': floatingip.floating_network_id, 'router_id': floatingip.router_id, 'port_id': floatingip.fixed_port_id, 'fixed_ip_address': fixed_ip_address, 'status': floatingip.status} # NOTE(mlavalle): The following assumes this mixin is used in a # class inheriting from CommonDbMixin, which is true for all existing # plugins. # TODO(lujinluo): Change floatingip.db_obj to floatingip once all # codes are migrated to use Floating IP OVO object. if process_extensions: resource_extend.apply_funcs( l3_apidef.FLOATINGIPS, res, floatingip.db_obj) return lib_db_utils.resource_fields(res, fields) def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): subnet = self._core_plugin.get_subnet(context, internal_subnet_id) return self.get_router_for_floatingip( context, internal_port, subnet, external_network_id) # NOTE(yamamoto): This method is an override point for plugins # inheriting this class. Do not optimize this out. def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): """Find a router to handle the floating-ip association. :param internal_port: The port for the fixed-ip. :param internal_subnet: The subnet for the fixed-ip. :param external_network_id: The external network for floating-ip. :raises: ExternalGatewayForFloatingIPNotFound if no suitable router is found. """ # Find routers(with router_id and interface address) that # connect given internal subnet and the external network. # Among them, if the router's interface address matches # with subnet's gateway-ip, return that router. # Otherwise return the first router. RouterPort = l3_models.RouterPort gw_port = orm.aliased(models_v2.Port, name="gw_port") # TODO(lujinluo): Need IPAllocation and Port object routerport_qry = context.session.query( RouterPort.router_id, models_v2.IPAllocation.ip_address).join( RouterPort.port, models_v2.Port.fixed_ips).filter( models_v2.Port.network_id == internal_port['network_id'], RouterPort.port_type.in_(constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, gw_port.device_owner == DEVICE_OWNER_ROUTER_GW ).distinct() first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3_exc.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id']) def _port_ipv4_fixed_ips(self, port): return [ip for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 4] def _internal_fip_assoc_data(self, context, fip, tenant_id): """Retrieve internal port data for floating IP. Retrieve information concerning the internal port where the floating IP should be associated to. """ internal_port = self._core_plugin.get_port(context, fip['port_id']) if internal_port['tenant_id'] != tenant_id and not context.is_admin: port_id = fip['port_id'] msg = (_('Cannot process floating IP association with ' 'Port %s, since that port is owned by a ' 'different tenant') % port_id) raise n_exc.BadRequest(resource='floatingip', msg=msg) internal_subnet_id = None if not utils.is_fip_serviced(internal_port.get('device_owner')): msg = _('Port %(id)s is unable to be assigned a floating IP') raise n_exc.BadRequest(resource='floatingip', msg=msg) if fip.get('fixed_ip_address'): internal_ip_address = fip['fixed_ip_address'] if netaddr.IPAddress(internal_ip_address).version != 4: msg = (_('Cannot process floating IP association with %s, ' 'since that is not an IPv4 address') % internal_ip_address) raise n_exc.BadRequest(resource='floatingip', msg=msg) for ip in internal_port['fixed_ips']: if ip['ip_address'] == internal_ip_address: internal_subnet_id = ip['subnet_id'] if not internal_subnet_id: msg = (_('Port %(id)s does not have fixed ip %(address)s') % {'id': internal_port['id'], 'address': internal_ip_address}) raise n_exc.BadRequest(resource='floatingip', msg=msg) else: ipv4_fixed_ips = self._port_ipv4_fixed_ips(internal_port) if not ipv4_fixed_ips: msg = (_('Cannot add floating IP to port %s that has ' 'no fixed IPv4 addresses') % internal_port['id']) raise n_exc.BadRequest(resource='floatingip', msg=msg) if len(ipv4_fixed_ips) > 1: msg = (_('Port %s has multiple fixed IPv4 addresses. Must ' 'provide a specific IPv4 address when assigning a ' 'floating IP') % internal_port['id']) raise n_exc.BadRequest(resource='floatingip', msg=msg) internal_ip_address = ipv4_fixed_ips[0]['ip_address'] internal_subnet_id = ipv4_fixed_ips[0]['subnet_id'] return internal_port, internal_subnet_id, internal_ip_address def _get_assoc_data(self, context, fip, floatingip_obj): """Determine/extract data associated with the internal port. When a floating IP is associated with an internal port, we need to extract/determine some data associated with the internal port, including the internal_ip_address, and router_id. The confirmation of the internal port whether owned by the tenant who owns the floating IP will be confirmed by _get_router_for_floatingip. """ (internal_port, internal_subnet_id, internal_ip_address) = self._internal_fip_assoc_data( context, fip, floatingip_obj.project_id) router_id = self._get_router_for_floatingip( context, internal_port, internal_subnet_id, floatingip_obj.floating_network_id) if self.is_router_distributed(context, router_id): if not plugin_utils.can_port_be_bound_to_virtual_bridge( internal_port): msg = _('Port VNIC type is not valid to associate a FIP in ' 'DVR mode') raise n_exc.BadRequest(resource='floatingip', msg=msg) return (fip['port_id'], internal_ip_address, router_id) def _check_and_get_fip_assoc(self, context, fip, floatingip_obj): port_id = internal_ip_address = router_id = None if fip.get('fixed_ip_address') and not fip.get('port_id'): msg = _("fixed_ip_address cannot be specified without a port_id") raise n_exc.BadRequest(resource='floatingip', msg=msg) if fip.get('port_id'): port_id, internal_ip_address, router_id = self._get_assoc_data( context, fip, floatingip_obj) if port_id == floatingip_obj.fixed_port_id: # Floating IP association is not changed. return port_id, internal_ip_address, router_id fip_exists = l3_obj.FloatingIP.objects_exist( context, fixed_port_id=fip['port_id'], floating_network_id=floatingip_obj.floating_network_id, fixed_ip_address=netaddr.IPAddress(internal_ip_address)) if fip_exists: floating_ip_address = (str(floatingip_obj.floating_ip_address) if floatingip_obj.floating_ip_address else None) raise l3_exc.FloatingIPPortAlreadyAssociated( port_id=fip['port_id'], fip_id=floatingip_obj.id, floating_ip_address=floating_ip_address, fixed_ip=internal_ip_address, net_id=floatingip_obj.floating_network_id) if fip and 'port_id' not in fip and floatingip_obj.fixed_port_id: # NOTE(liuyulong): without the fix of bug #1610045 here could # also let floating IP can be dissociated with an empty # updating dict. fip['port_id'] = floatingip_obj.fixed_port_id port_id, internal_ip_address, router_id = self._get_assoc_data( context, fip, floatingip_obj) # Condition for floating IP with binding port forwarding if not floatingip_obj.fixed_port_id and floatingip_obj.router_id: router_id = floatingip_obj.router_id # After all upper conditions, if updating API dict is submitted with # {'port_id': null}, then the floating IP cloud also be dissociated. return port_id, internal_ip_address, router_id def _update_fip_assoc(self, context, fip, floatingip_obj): previous_router_id = floatingip_obj.router_id port_id, internal_ip_address, router_id = ( self._check_and_get_fip_assoc(context, fip, floatingip_obj)) association_event = None if floatingip_obj.fixed_port_id != port_id: association_event = bool(port_id) floatingip_obj.fixed_ip_address = ( netaddr.IPAddress(internal_ip_address) if internal_ip_address else None) floatingip_obj.fixed_port_id = port_id floatingip_obj.router_id = router_id floatingip_obj.last_known_router_id = previous_router_id if 'description' in fip: floatingip_obj.description = fip['description'] floating_ip_address = (str(floatingip_obj.floating_ip_address) if floatingip_obj.floating_ip_address else None) return {'fixed_ip_address': internal_ip_address, 'fixed_port_id': port_id, 'router_id': router_id, 'last_known_router_id': previous_router_id, 'floating_ip_address': floating_ip_address, 'floating_network_id': floatingip_obj.floating_network_id, 'floating_ip_id': floatingip_obj.id, 'context': context, 'association_event': association_event} def _is_ipv4_network(self, context, net_id): net = self._core_plugin._get_network(context, net_id) return any(s.ip_version == 4 for s in net.subnets) def _create_floatingip(self, context, floatingip, initial_status=constants.FLOATINGIP_STATUS_ACTIVE): try: registry.publish(resources.FLOATING_IP, events.BEFORE_CREATE, self, payload=events.DBEventPayload( context, request_body=floatingip)) except exceptions.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error fip = floatingip['floatingip'] fip_id = uuidutils.generate_uuid() f_net_id = fip['floating_network_id'] if not self._core_plugin._network_is_external(context, f_net_id): msg = _("Network %s is not a valid external network") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) if not self._is_ipv4_network(context, f_net_id): msg = _("Network %s does not contain any IPv4 subnet") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) # This external port is never exposed to the tenant. # it is used purely for internal system and admin use when # managing floating IPs. port = {'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, 'admin_state_up': True, 'device_id': 'PENDING', 'device_owner': DEVICE_OWNER_FLOATINGIP, 'status': constants.PORT_STATUS_NOTAPPLICABLE, 'name': ''} # Both subnet_id and floating_ip_address are accepted, if # floating_ip_address is not in the subnet, # InvalidIpForSubnet exception will be raised. fixed_ip = {} if validators.is_attr_set(fip.get('subnet_id')): fixed_ip['subnet_id'] = fip['subnet_id'] if validators.is_attr_set(fip.get('floating_ip_address')): fixed_ip['ip_address'] = fip['floating_ip_address'] if fixed_ip: port['fixed_ips'] = [fixed_ip] # 'status' in port dict could not be updated by default, use # check_allow_post to stop the verification of system external_port = plugin_utils.create_port( self._core_plugin, context.elevated(), {'port': port}, check_allow_post=False) with plugin_utils.delete_port_on_error( self._core_plugin, context.elevated(), external_port['id']),\ context.session.begin(subtransactions=True): # Ensure IPv4 addresses are allocated on external port external_ipv4_ips = self._port_ipv4_fixed_ips(external_port) if not external_ipv4_ips: raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) floating_fixed_ip = external_ipv4_ips[0] floating_ip_address = floating_fixed_ip['ip_address'] floatingip_obj = l3_obj.FloatingIP( context, id=fip_id, project_id=fip['tenant_id'], status=initial_status, floating_network_id=fip['floating_network_id'], floating_ip_address=floating_ip_address, floating_port_id=external_port['id'], description=fip.get('description')) # Update association with internal port # and define external IP address assoc_result = self._update_fip_assoc(context, fip, floatingip_obj) floatingip_obj.create() floatingip_dict = self._make_floatingip_dict( floatingip_obj, process_extensions=False) if self._is_dns_integration_supported: dns_data = self._process_dns_floatingip_create_precommit( context, floatingip_dict, fip) if self._is_fip_qos_supported: self._process_extra_fip_qos_create(context, fip_id, fip) floatingip_obj = l3_obj.FloatingIP.get_object( context, id=floatingip_obj.id) floatingip_db = floatingip_obj.db_obj registry.notify(resources.FLOATING_IP, events.PRECOMMIT_CREATE, self, context=context, floatingip=fip, floatingip_id=fip_id, floatingip_db=floatingip_db) self._core_plugin.update_port( context.elevated(), external_port['id'], {'port': {'device_id': fip_id, 'project_id': fip['tenant_id']}}) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, **assoc_result) if assoc_result['association_event']: LOG.info(FIP_ASSOC_MSG, {'fip_id': assoc_result['floating_ip_id'], 'ext_ip': assoc_result['floating_ip_address'], 'port_id': assoc_result['fixed_port_id'], 'assoc': 'associated'}) if self._is_dns_integration_supported: self._process_dns_floatingip_create_postcommit(context, floatingip_dict, dns_data) # TODO(lujinluo): Change floatingip_db to floatingip_obj once all # codes are migrated to use Floating IP OVO object. resource_extend.apply_funcs(l3_apidef.FLOATINGIPS, floatingip_dict, floatingip_db) return floatingip_dict @db_api.retry_if_session_inactive() def create_floatingip(self, context, floatingip, initial_status=constants.FLOATINGIP_STATUS_ACTIVE): return self._create_floatingip(context, floatingip, initial_status) def _update_floatingip(self, context, id, floatingip): try: registry.publish(resources.FLOATING_IP, events.BEFORE_UPDATE, self, payload=events.DBEventPayload( context, request_body=floatingip, resource_id=id)) except exceptions.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error fip = floatingip['floatingip'] with context.session.begin(subtransactions=True): floatingip_obj = self._get_floatingip(context, id) old_floatingip = self._make_floatingip_dict(floatingip_obj) old_fixed_port_id = floatingip_obj.fixed_port_id assoc_result = self._update_fip_assoc(context, fip, floatingip_obj) floatingip_obj.update() floatingip_dict = self._make_floatingip_dict(floatingip_obj) if self._is_dns_integration_supported: dns_data = self._process_dns_floatingip_update_precommit( context, floatingip_dict) if self._is_fip_qos_supported: self._process_extra_fip_qos_update(context, floatingip_obj, fip, old_floatingip) floatingip_obj = l3_obj.FloatingIP.get_object( context, id=floatingip_obj.id) floatingip_db = floatingip_obj.db_obj registry.notify(resources.FLOATING_IP, events.PRECOMMIT_UPDATE, self, floatingip=floatingip, floatingip_db=floatingip_db, old_floatingip=old_floatingip, **assoc_result) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, **assoc_result) if assoc_result['association_event'] is not None: port_id = old_fixed_port_id or assoc_result['fixed_port_id'] assoc = ('associated' if assoc_result['association_event'] else 'disassociated') LOG.info(FIP_ASSOC_MSG, {'fip_id': assoc_result['floating_ip_id'], 'ext_ip': assoc_result['floating_ip_address'], 'port_id': port_id, 'assoc': assoc}) if self._is_dns_integration_supported: self._process_dns_floatingip_update_postcommit(context, floatingip_dict, dns_data) # TODO(lujinluo): Change floatingip_db to floatingip_obj once all # codes are migrated to use Floating IP OVO object. resource_extend.apply_funcs(l3_apidef.FLOATINGIPS, floatingip_dict, floatingip_db) return old_floatingip, floatingip_dict def _floatingips_to_router_ids(self, floatingips): return list(set([floatingip['router_id'] for floatingip in floatingips if floatingip['router_id']])) @db_api.retry_if_session_inactive() def update_floatingip(self, context, id, floatingip): _old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) return floatingip @db_api.retry_if_session_inactive() def update_floatingip_status(self, context, floatingip_id, status): """Update operational status for floating IP in neutron DB.""" return l3_obj.FloatingIP.update_object( context, {'status': status}, id=floatingip_id) @registry.receives(resources.PORT, [events.PRECOMMIT_DELETE]) def _precommit_delete_port_callback( self, resource, event, trigger, **kwargs): if (kwargs['port']['device_owner'] == constants.DEVICE_OWNER_FLOATINGIP): registry.notify(resources.FLOATING_IP, events.PRECOMMIT_DELETE, self, **kwargs) def _delete_floatingip(self, context, id): floatingip = self._get_floatingip(context, id) floatingip_dict = self._make_floatingip_dict(floatingip) if self._is_dns_integration_supported: self._process_dns_floatingip_delete(context, floatingip_dict) # Foreign key cascade will take care of the removal of the # floating IP record once the port is deleted. We can't start # a transaction first to remove it ourselves because the delete_port # method will yield in its post-commit activities. self._core_plugin.delete_port(context.elevated(), floatingip.floating_port_id, l3_port_check=False) registry.notify(resources.FLOATING_IP, events.AFTER_DELETE, self, context=context, **floatingip_dict) return floatingip_dict @db_api.retry_if_session_inactive() def delete_floatingip(self, context, id): self._delete_floatingip(context, id) @db_api.retry_if_session_inactive() def get_floatingip(self, context, id, fields=None): floatingip = self._get_floatingip(context, id) return self._make_floatingip_dict(floatingip, fields) @db_api.retry_if_session_inactive() def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} for key, val in API_TO_DB_COLUMN_MAP.items(): if key in filters: filters[val] = filters.pop(key) floatingip_objs = l3_obj.FloatingIP.get_objects( context, _pager=pager, validate_filters=False, **filters) floatingip_dicts = [ self._make_floatingip_dict(floatingip_obj, fields) for floatingip_obj in floatingip_objs ] return floatingip_dicts @db_api.retry_if_session_inactive() def delete_disassociated_floatingips(self, context, network_id): fip_objs = l3_obj.FloatingIP.get_objects( context, floating_network_id=network_id, router_id=None, fixed_port_id=None) for fip in fip_objs: self.delete_floatingip(context, fip.id) @db_api.retry_if_session_inactive() def get_floatingips_count(self, context, filters=None): filters = filters or {} return l3_obj.FloatingIP.count(context, **filters) def _router_exists(self, context, router_id): try: self.get_router(context.elevated(), router_id) return True except l3_exc.RouterNotFound: return False def prevent_l3_port_deletion(self, context, port_id): """Checks to make sure a port is allowed to be deleted. Raises an exception if this is not the case. This should be called by any plugin when the API requests the deletion of a port, since some ports for L3 are not intended to be deleted directly via a DELETE to /ports, but rather via other API calls that perform the proper deletion checks. """ try: port = self._core_plugin.get_port(context, port_id) except n_exc.PortNotFound: # non-existent ports don't need to be protected from deletion return if port['device_owner'] not in self.router_device_owners: return # Raise port in use only if the port has IP addresses # Otherwise it's a stale port that can be removed fixed_ips = port['fixed_ips'] if not fixed_ips: LOG.debug("Port %(port_id)s has owner %(port_owner)s, but " "no IP address, so it can be deleted", {'port_id': port['id'], 'port_owner': port['device_owner']}) return # NOTE(kevinbenton): we also check to make sure that the # router still exists. It's possible for HA router interfaces # to remain after the router is deleted if they encounter an # error during deletion. # Elevated context in case router is owned by another tenant if port['device_owner'] == DEVICE_OWNER_FLOATINGIP: if not l3_obj.FloatingIP.objects_exist( context, id=port['device_id']): LOG.debug("Floating IP %(f_id)s corresponding to port " "%(port_id)s no longer exists, allowing deletion.", {'f_id': port['device_id'], 'port_id': port['id']}) return elif not self._router_exists(context, port['device_id']): LOG.debug("Router %(router_id)s corresponding to port " "%(port_id)s no longer exists, allowing deletion.", {'router_id': port['device_id'], 'port_id': port['id']}) return reason = _('has device owner %s') % port['device_owner'] raise n_exc.ServicePortInUse(port_id=port['id'], reason=reason) @db_api.retry_if_session_inactive() def disassociate_floatingips(self, context, port_id, do_notify=True): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. This parameter is ignored. @return: set of router-ids that require notification updates """ with context.session.begin(subtransactions=True): floating_ip_objs = l3_obj.FloatingIP.get_objects( context, fixed_port_id=port_id) router_ids = {fip.router_id for fip in floating_ip_objs} old_fips = {fip.id: fip.to_dict() for fip in floating_ip_objs} values = {'fixed_port_id': None, 'fixed_ip_address': None, 'router_id': None} l3_obj.FloatingIP.update_objects( context, values, fixed_port_id=port_id) for fip in floating_ip_objs: registry.notify(resources.FLOATING_IP, events.PRECOMMIT_UPDATE, self, context=context, floatingip={l3_apidef.FLOATINGIP: values}, floatingip_db=fip, old_floatingip=old_fips[fip.id], router_ids=router_ids) for fip in floating_ip_objs: assoc_result = { 'fixed_ip_address': None, 'fixed_port_id': None, 'router_id': None, 'floating_ip_address': fip.floating_ip_address, 'floating_network_id': fip.floating_network_id, 'floating_ip_id': fip.id, 'context': context, 'router_ids': router_ids, 'association_event': False, } # Process DNS record removal after committing the transaction if self._is_dns_integration_supported: self._process_dns_floatingip_delete(context, fip.to_dict()) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self, **assoc_result) for fip in old_fips.values(): LOG.info(FIP_ASSOC_MSG, {'fip_id': fip['id'], 'ext_ip': fip['floating_ip_address'], 'port_id': fip['fixed_port_id'], 'assoc': 'disassociated'}) return router_ids def _get_floatingips_by_port_id(self, context, port_id): """Helper function to retrieve the fips associated with a port_id.""" return l3_obj.FloatingIP.get_objects(context, fixed_port_id=port_id) def _build_routers_list(self, context, routers, gw_ports): """Subclasses can override this to add extra gateway info""" return routers def _make_router_dict_with_gw_port(self, router, fields): result = self._make_router_dict(router, fields) if router.get('gw_port'): result['gw_port'] = self._core_plugin._make_port_dict( router['gw_port']) return result def _get_sync_routers(self, context, router_ids=None, active=None): """Query routers and their gw ports for l3 agent. Query routers with the router_ids. The gateway ports, if any, will be queried too. l3 agent has an option to deal with only one router id. In addition, when we need to notify the agent the data about only one router (when modification of router, its interfaces, gw_port and floatingips), we will have router_ids. @param router_ids: the list of router ids which we want to query. if it is None, all of routers will be queried. @return: a list of dicted routers with dicted gw_port populated if any """ filters = {'id': router_ids} if router_ids else {} if active is not None: filters['admin_state_up'] = [active] router_dicts = model_query.get_collection( context, l3_models.Router, self._make_router_dict_with_gw_port, filters=filters) if not router_dicts: return [] gw_ports = dict((r['gw_port']['id'], r['gw_port']) for r in router_dicts if r.get('gw_port')) return self._build_routers_list(context, router_dicts, gw_ports) def _make_floatingip_dict_with_scope(self, floatingip_obj, scope_id): d = self._make_floatingip_dict(floatingip_obj) d['fixed_ip_address_scope'] = scope_id return d def _get_sync_floating_ips(self, context, router_ids): """Query floating_ips that relate to list of router_ids with scope. This is different than the regular get_floatingips in that it finds the address scope of the fixed IP. The router needs to know this to distinguish it from other scopes. There are a few redirections to go through to discover the address scope from the floating ip. """ if not router_ids: return [] return [ self._make_floatingip_dict_with_scope(*scoped_fip) for scoped_fip in l3_obj.FloatingIP.get_scoped_floating_ips( context, router_ids) ] def _get_sync_interfaces(self, context, router_ids, device_owners=None): """Query router interfaces that relate to list of router_ids.""" device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF, DEVICE_OWNER_HA_REPLICATED_INT] if not router_ids: return [] # TODO(lujinluo): Need Port as synthetic field objs = l3_obj.RouterPort.get_objects( context, router_id=router_ids, port_type=list(device_owners)) interfaces = [self._core_plugin._make_port_dict(rp.db_obj.port) for rp in objs] return interfaces @staticmethod def _each_port_having_fixed_ips(ports): for port in ports or []: fixed_ips = port.get('fixed_ips', []) if not fixed_ips: # Skip ports without IPs, which can occur if a subnet # attached to a router is deleted LOG.info("Skipping port %s as no IP is configure on " "it", port['id']) continue yield port def _get_subnets_by_network_list(self, context, network_ids): if not network_ids: return {} query = context.session.query(models_v2.Subnet, models_v2.SubnetPool.address_scope_id) query = query.outerjoin( models_v2.SubnetPool, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id) query = query.filter(models_v2.Subnet.network_id.in_(network_ids)) fields = ['id', 'cidr', 'gateway_ip', 'dns_nameservers', 'network_id', 'ipv6_ra_mode', 'subnetpool_id'] def make_subnet_dict_with_scope(row): subnet_db, address_scope_id = row subnet = self._core_plugin._make_subnet_dict( subnet_db, fields, context=context) subnet['address_scope_id'] = address_scope_id return subnet subnets_by_network = dict((id, []) for id in network_ids) for subnet in (make_subnet_dict_with_scope(row) for row in query): subnets_by_network[subnet['network_id']].append(subnet) return subnets_by_network def _get_mtus_by_network_list(self, context, network_ids): if not network_ids: return {} filters = {'id': network_ids} fields = ['id', 'mtu'] networks = self._core_plugin.get_networks(context, filters=filters, fields=fields) mtus_by_network = dict((network['id'], network.get('mtu', 0)) for network in networks) return mtus_by_network def _populate_mtu_and_subnets_for_ports(self, context, ports): """Populate ports with subnets. These ports already have fixed_ips populated. """ network_ids = [p['network_id'] for p in self._each_port_having_fixed_ips(ports)] mtus_by_network = self._get_mtus_by_network_list(context, network_ids) subnets_by_network = self._get_subnets_by_network_list( context, network_ids) for port in self._each_port_having_fixed_ips(ports): port['subnets'] = [] port['extra_subnets'] = [] port['address_scopes'] = {constants.IP_VERSION_4: None, constants.IP_VERSION_6: None} scopes = {} for subnet in subnets_by_network[port['network_id']]: scope = subnet['address_scope_id'] cidr = netaddr.IPNetwork(subnet['cidr']) scopes[cidr.version] = scope # If this subnet is used by the port (has a matching entry # in the port's fixed_ips), then add this subnet to the # port's subnets list, and populate the fixed_ips entry # entry with the subnet's prefix length. subnet_info = {'id': subnet['id'], 'cidr': subnet['cidr'], 'gateway_ip': subnet['gateway_ip'], 'dns_nameservers': subnet['dns_nameservers'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'subnetpool_id': subnet['subnetpool_id']} for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == subnet['id']: port['subnets'].append(subnet_info) prefixlen = cidr.prefixlen fixed_ip['prefixlen'] = prefixlen break else: # This subnet is not used by the port. port['extra_subnets'].append(subnet_info) port['address_scopes'].update(scopes) port['mtu'] = mtus_by_network.get(port['network_id'], 0) def _process_floating_ips(self, context, routers_dict, floating_ips): for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: router_floatingips = router.get(constants.FLOATINGIP_KEY, []) router_floatingips.append(floating_ip) router[constants.FLOATINGIP_KEY] = router_floatingips def _process_interfaces(self, routers_dict, interfaces): for interface in interfaces: router = routers_dict.get(interface['device_id']) if router: router_interfaces = router.get(constants.INTERFACE_KEY, []) router_interfaces.append(interface) router[constants.INTERFACE_KEY] = router_interfaces def _get_router_info_list(self, context, router_ids=None, active=None, device_owners=None): """Query routers and their related floating_ips, interfaces.""" with context.session.begin(subtransactions=True): routers = self._get_sync_routers(context, router_ids=router_ids, active=active) router_ids = [router['id'] for router in routers] interfaces = self._get_sync_interfaces( context, router_ids, device_owners) floating_ips = self._get_sync_floating_ips(context, router_ids) return (routers, interfaces, floating_ips) def get_sync_data(self, context, router_ids=None, active=None): routers, interfaces, floating_ips = self._get_router_info_list( context, router_ids=router_ids, active=active) ports_to_populate = [router['gw_port'] for router in routers if router.get('gw_port')] + interfaces self._populate_mtu_and_subnets_for_ports(context, ports_to_populate) routers_dict = dict((router['id'], router) for router in routers) self._process_floating_ips(context, routers_dict, floating_ips) self._process_interfaces(routers_dict, interfaces) return list(routers_dict.values()) def is_router_distributed(self, context, router_id): """Returns if a router is distributed or not If DVR extension is not enabled, no router will be distributed. This function is overridden in L3_NAT_with_dvr_db_mixin in case the DVR extension is loaded. """ return False @registry.has_registry_receivers class L3RpcNotifierMixin(object): """Mixin class to add rpc notifier attribute to db_base_plugin_v2.""" @staticmethod @registry.receives(resources.PORT, [events.AFTER_DELETE]) def _notify_routers_callback(resource, event, trigger, **kwargs): context = kwargs['context'] router_ids = kwargs['router_ids'] l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin: l3plugin.notify_routers_updated(context, router_ids) else: LOG.debug('%s not configured', plugin_constants.L3) @staticmethod @registry.receives(resources.SUBNET, [events.AFTER_UPDATE]) def _notify_subnet_gateway_ip_update(resource, event, trigger, **kwargs): l3plugin = directory.get_plugin(plugin_constants.L3) if not l3plugin: return context = kwargs['context'] orig = kwargs['original_subnet'] updated = kwargs['subnet'] if orig['gateway_ip'] == updated['gateway_ip']: return network_id = updated['network_id'] subnet_id = updated['id'] query = context.session.query(models_v2.Port.device_id).filter_by( network_id=network_id, device_owner=DEVICE_OWNER_ROUTER_GW) query = query.join(models_v2.Port.fixed_ips).filter( models_v2.IPAllocation.subnet_id == subnet_id) router_ids = set(port.device_id for port in query) for router_id in router_ids: l3plugin.notify_router_updated(context, router_id) @staticmethod @registry.receives(resources.PORT, [events.AFTER_UPDATE]) def _notify_gateway_port_ip_changed(resource, event, trigger, **kwargs): l3plugin = directory.get_plugin(plugin_constants.L3) if not l3plugin: return new_port = kwargs.get('port') original_port = kwargs.get('original_port') if original_port['device_owner'] != constants.DEVICE_OWNER_ROUTER_GW: return if utils.port_ip_changed(new_port, original_port): l3plugin.notify_router_updated(kwargs['context'], new_port['device_id']) @staticmethod @registry.receives(resources.SUBNETPOOL_ADDRESS_SCOPE, [events.AFTER_UPDATE]) def _notify_subnetpool_address_scope_update(resource, event, trigger, payload=None): context = payload.context subnetpool_id = payload.resource_id router_ids = l3_obj.RouterPort.get_router_ids_by_subnetpool( context, subnetpool_id) l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin: l3plugin.notify_routers_updated(context, router_ids) else: LOG.debug('%s not configured', plugin_constants.L3) @property def l3_rpc_notifier(self): if not hasattr(self, '_l3_rpc_notifier'): self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() return self._l3_rpc_notifier @l3_rpc_notifier.setter def l3_rpc_notifier(self, value): self._l3_rpc_notifier = value def notify_router_updated(self, context, router_id, operation=None): if router_id: self.l3_rpc_notifier.routers_updated( context, [router_id], operation) def notify_routers_updated(self, context, router_ids, operation=None, data=None): if router_ids: self.l3_rpc_notifier.routers_updated( context, router_ids, operation, data) def notify_router_deleted(self, context, router_id): self.l3_rpc_notifier.router_deleted(context, router_id) class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): """Mixin class to add rpc notifier methods to db_base_plugin_v2.""" def create_router(self, context, router): router_dict = super(L3_NAT_db_mixin, self).create_router(context, router) if router_dict.get('external_gateway_info'): self.notify_router_updated(context, router_dict['id'], None) return router_dict def update_router(self, context, id, router): router_dict = super(L3_NAT_db_mixin, self).update_router(context, id, router) self.notify_router_updated(context, router_dict['id'], None) return router_dict def delete_router(self, context, id): super(L3_NAT_db_mixin, self).delete_router(context, id) self.notify_router_deleted(context, id) def notify_router_interface_action( self, context, router_interface_info, action): l3_method = '%s_router_interface' % action super(L3_NAT_db_mixin, self).notify_routers_updated( context, [router_interface_info['id']], l3_method, {'subnet_id': router_interface_info['subnet_id']}) mapping = {'add': 'create', 'remove': 'delete'} notifier = n_rpc.get_notifier('network') router_event = 'router.interface.%s' % mapping[action] notifier.info(context, router_event, {'router_interface': router_interface_info}) def add_router_interface(self, context, router_id, interface_info=None): router_interface_info = super( L3_NAT_db_mixin, self).add_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'add') return router_interface_info def remove_router_interface(self, context, router_id, interface_info): router_interface_info = super( L3_NAT_db_mixin, self).remove_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'remove') return router_interface_info def create_floatingip(self, context, floatingip, initial_status=constants.FLOATINGIP_STATUS_ACTIVE): floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip( context, floatingip, initial_status) router_id = floatingip_dict['router_id'] self.notify_router_updated(context, router_id, 'create_floatingip') return floatingip_dict def update_floatingip(self, context, id, floatingip): old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) router_ids = self._floatingips_to_router_ids( [old_floatingip, floatingip]) super(L3_NAT_db_mixin, self).notify_routers_updated( context, router_ids, 'update_floatingip', {}) return floatingip def delete_floatingip(self, context, id): floating_ip = self._delete_floatingip(context, id) self.notify_router_updated(context, floating_ip['router_id'], 'delete_floatingip') def disassociate_floatingips(self, context, port_id, do_notify=True): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. @return: set of router-ids that require notification updates if do_notify is False, otherwise None. """ router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips( context, port_id, do_notify) if do_notify: self.notify_routers_updated(context, router_ids) # since caller assumes that we handled notifications on its # behalf, return nothing return return router_ids def notify_routers_updated(self, context, router_ids): super(L3_NAT_db_mixin, self).notify_routers_updated( context, list(router_ids), 'disassociate_floatingips', {}) def _migrate_router_ports(self, context, router_db, old_owner, new_owner): """Update the model to support the dvr case of a router.""" for rp in router_db.attached_ports: if rp.port_type == old_owner: rp.port_type = new_owner rp.port.device_owner = new_owner ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_dvr_db.py0000644000175000017500000016363600000000000021451 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings_extended from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import priority_group from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import agent as agent_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.objects import exceptions as o_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_log import helpers as log_helper from oslo_log import log as logging from oslo_utils import excutils import six from neutron._i18n import _ from neutron.api import extensions from neutron.common import utils as n_utils from neutron.conf.db import l3_dvr_db from neutron.db import dvr_mac_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db.models import allowed_address_pair as aap_models from neutron.db import models_v2 from neutron.extensions import _admin_state_down_before_update_lib from neutron.ipam import utils as ipam_utils from neutron.objects import agent as ag_obj from neutron.objects import l3agent as rb_obj from neutron.objects import router as l3_obj LOG = logging.getLogger(__name__) l3_dvr_db.register_db_l3_dvr_opts() _IS_ADMIN_STATE_DOWN_NECESSARY = None def is_admin_state_down_necessary(): global _IS_ADMIN_STATE_DOWN_NECESSARY if _IS_ADMIN_STATE_DOWN_NECESSARY is None: _IS_ADMIN_STATE_DOWN_NECESSARY = \ _admin_state_down_before_update_lib.ALIAS in (extensions. PluginAwareExtensionManager.get_instance().extensions) return _IS_ADMIN_STATE_DOWN_NECESSARY # TODO(slaweq): this should be moved to neutron_lib.plugins.utils module def is_port_bound(port): active_binding = plugin_utils.get_port_binding_by_status_and_host( port.get("port_bindings", []), const.ACTIVE) if not active_binding: LOG.warning("Binding for port %s was not found.", port) return False return active_binding[portbindings_extended.VIF_TYPE] not in [ portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED] @registry.has_registry_receivers class DVRResourceOperationHandler(object): """Contains callbacks for DVR operations. This can be implemented as a mixin or can be intantiated as a stand-alone object. Either way, it will subscribe itself to the relevant L3 events and use the plugin directory to find the L3 plugin to make calls to it as necessary. """ related_dvr_router_hosts = {} related_dvr_router_routers = {} @property def l3plugin(self): return directory.get_plugin(plugin_constants.L3) @registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _set_distributed_flag(self, resource, event, trigger, context, router, router_db, **kwargs): """Event handler to set distributed flag on creation.""" dist = is_distributed_router(router) router['distributed'] = dist self.l3plugin.set_extra_attr_value(context, router_db, 'distributed', dist) def _validate_router_migration(self, context, router_db, router_res, old_router=None): """Allow transition only when admin_state_up=False""" admin_state_down_extension_loaded = is_admin_state_down_necessary() # to preserve extant API behavior, only check the distributed attribute # of old_router when the "router-admin-state-down-before-update" shim # API extension is loaded. Don't bother checking if old_router is # "None" if old_router and admin_state_down_extension_loaded: original_distributed_state = old_router.get('distributed') else: original_distributed_state = router_db.extra_attributes.distributed requested_distributed_state = router_res.get('distributed', None) distributed_changed = ( requested_distributed_state is not None and requested_distributed_state != original_distributed_state) if not distributed_changed: return False # to preserve old API behavior, only check old_router if shim API # extension has been loaded if admin_state_down_extension_loaded and old_router: # if one OR both routers is still up, the *collective* # "admin_state_up" should be True and we should throw the # BadRequest exception below. admin_state_up = (old_router.get('admin_state_up') or router_db.get('admin_state_up')) else: admin_state_up = router_db.get('admin_state_up') if admin_state_up: msg = _("Cannot change the 'distributed' attribute of active " "routers. Please set router admin_state_up to False " "prior to upgrade") raise n_exc.BadRequest(resource='router', msg=msg) # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify( resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3_exc.RouterInUse(router_id=router_db['id'], reason=e) return True @registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _handle_distributed_migration(self, resource, event, trigger, payload=None): """Event handler for router update migration to distributed.""" if not self._validate_router_migration( payload.context, payload.desired_state, payload.request_body, payload.states[0]): return migrating_to_distributed = ( not payload.desired_state.extra_attributes.distributed and payload.request_body.get('distributed') is True) if migrating_to_distributed: if payload.states[0]['ha']: old_owner = const.DEVICE_OWNER_HA_REPLICATED_INT else: old_owner = const.DEVICE_OWNER_ROUTER_INTF self.l3plugin._migrate_router_ports( payload.context, payload.desired_state, old_owner=old_owner, new_owner=const.DEVICE_OWNER_DVR_INTERFACE) else: if payload.request_body.get('ha'): new_owner = const.DEVICE_OWNER_HA_REPLICATED_INT else: new_owner = const.DEVICE_OWNER_ROUTER_INTF self.l3plugin._migrate_router_ports( payload.context, payload.desired_state, old_owner=const.DEVICE_OWNER_DVR_INTERFACE, new_owner=new_owner) cur_agents = self.l3plugin.list_l3_agents_hosting_router( payload.context, payload.resource_id)['agents'] for agent in cur_agents: self.l3plugin._unbind_router( payload.context, payload.resource_id, agent['id']) self.l3plugin.set_extra_attr_value( payload.context, payload.desired_state, 'distributed', migrating_to_distributed) @registry.receives(resources.ROUTER, [events.AFTER_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _delete_distributed_port_bindings_after_change(self, resource, event, trigger, context, router_id, router, request_attrs, router_db, **kwargs): old_router = kwargs['old_router'] if (old_router and old_router['distributed'] and not router['distributed']): self._core_plugin.delete_distributed_port_bindings_by_router_id( context.elevated(), router_db['id']) @registry.receives(resources.ROUTER, [events.AFTER_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _delete_snat_interfaces_after_change(self, resource, event, trigger, context, router_id, router, request_attrs, router_db, **kwargs): if (router.get(l3_apidef.EXTERNAL_GW_INFO) and not router['distributed']): old_router = kwargs['old_router'] if old_router and old_router['distributed']: self.delete_csnat_router_interface_ports( context.elevated(), router_db) @registry.receives(resources.ROUTER, [events.AFTER_CREATE, events.AFTER_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _create_snat_interfaces_after_change(self, resource, event, trigger, context, router_id, router, request_attrs, router_db, **kwargs): if (not router.get(l3_apidef.EXTERNAL_GW_INFO) or not router['distributed']): # we don't care if it's not distributed or not attached to an # external network return if event == events.AFTER_UPDATE: # after an update, we check to see if it was a migration or a # gateway attachment old_router = kwargs['old_router'] do_create = (not old_router['distributed'] or not old_router.get(l3_apidef.EXTERNAL_GW_INFO)) if not do_create: return if not self._create_snat_intf_ports_if_not_exists(context.elevated(), router_db): LOG.debug("SNAT interface ports not created: %s", router_db['id']) return router_db def _get_snat_interface_ports_for_router(self, context, router_id): """Return all existing snat_router_interface ports.""" objs = l3_obj.RouterPort.get_objects( context, router_id=router_id, port_type=const.DEVICE_OWNER_ROUTER_SNAT) # TODO(lujinluo): Need Port as synthetic field ports = [self.l3plugin._core_plugin._make_port_dict(rp.db_obj.port) for rp in objs] return ports def _add_csnat_router_interface_port( self, context, router, network_id, subnets, do_pop=True): """Add SNAT interface to the specified router and subnet.""" port_data = {'tenant_id': '', 'network_id': network_id, 'fixed_ips': subnets, 'device_id': router.id, 'device_owner': const.DEVICE_OWNER_ROUTER_SNAT, 'admin_state_up': True, 'name': ''} snat_port = plugin_utils.create_port( self._core_plugin, context, {'port': port_data}) if not snat_port: msg = _("Unable to create the SNAT Interface Port") raise n_exc.BadRequest(resource='router', msg=msg) with plugin_utils.delete_port_on_error(self.l3plugin._core_plugin, context.elevated(), snat_port['id']): l3_obj.RouterPort( context, port_id=snat_port['id'], router_id=router.id, port_type=const.DEVICE_OWNER_ROUTER_SNAT ).create() if do_pop: return self.l3plugin._populate_mtu_and_subnets_for_ports( context, [snat_port]) return snat_port def _create_snat_intf_ports_if_not_exists(self, context, router): """Function to return the snat interface port list. This function will return the snat interface port list if it exists. If the port does not exist it will create new ports and then return the list. """ port_list = self._get_snat_interface_ports_for_router( context, router.id) if port_list: self._populate_mtu_and_subnets_for_ports(context, port_list) return port_list int_ports = ( rp.port for rp in router.attached_ports if rp.port_type == const.DEVICE_OWNER_DVR_INTERFACE ) LOG.info('SNAT interface port list does not exist, creating one') v6_subnets = [] network = None for intf in int_ports: if intf.fixed_ips: # Passing the subnet for the port to make sure the IP's # are assigned on the right subnet if multiple subnet # exists for fixed_ip in intf['fixed_ips']: ip_version = n_utils.get_ip_version( fixed_ip.get('ip_address')) if ip_version == const.IP_VERSION_4: snat_port = self._add_csnat_router_interface_port( context, router, intf['network_id'], [{'subnet_id': fixed_ip['subnet_id']}], do_pop=False) port_list.append(snat_port) else: v6_subnets.append( {"subnet_id": fixed_ip['subnet_id']}) network = intf['network_id'] if v6_subnets: snat_port = self._add_csnat_router_interface_port( context, router, network, v6_subnets, do_pop=False) port_list.append(snat_port) if port_list: self.l3plugin._populate_mtu_and_subnets_for_ports( context, port_list) return port_list @registry.receives(resources.ROUTER_GATEWAY, [events.AFTER_DELETE]) def _delete_dvr_internal_ports(self, event, trigger, resource, payload=None): """GW port AFTER_DELETE event handler to cleanup DVR ports. This event is emitted when a router gateway port is being deleted, so go ahead and delete the csnat ports and the floatingip agent gateway port associated with the dvr router. """ if not is_distributed_router(payload.latest_state): return if not payload.metadata.get('new_network_id'): self.delete_csnat_router_interface_ports( payload.context.elevated(), payload.latest_state) network_id = payload.metadata.get('network_id') # NOTE(Swami): Delete the Floatingip agent gateway port # on all hosts when it is the last gateway port in the # given external network. filters = {'network_id': [network_id], 'device_owner': [const.DEVICE_OWNER_ROUTER_GW]} ext_net_gw_ports = self._core_plugin.get_ports( payload.context.elevated(), filters) if not ext_net_gw_ports: self.delete_floatingip_agent_gateway_port( payload.context.elevated(), None, network_id) # Send the information to all the L3 Agent hosts # to clean up the fip namespace as it is no longer required. self.l3plugin.l3_rpc_notifier.delete_fipnamespace_for_ext_net( payload.context, network_id) def _delete_fip_agent_port(self, context, network_id, host_id): try: l3_agent_db = self._get_agent_by_type_and_host( context, const.AGENT_TYPE_L3, host_id) except agent_exc.AgentNotFoundByTypeHost: LOG.warning("%(ag)s agent not found for the given host: %(host)s", {'ag': const.AGENT_TYPE_L3, 'host': host_id}) return try: l3_obj.DvrFipGatewayPortAgentBinding( context, network_id=network_id, agent_id=l3_agent_db['id']).delete() except n_exc.ObjectNotFound: pass def delete_floatingip_agent_gateway_port(self, context, host_id, ext_net_id): """Function to delete FIP gateway port with given ext_net_id.""" # delete any fip agent gw port device_filter = {'device_owner': [const.DEVICE_OWNER_AGENT_GW], 'network_id': [ext_net_id]} ports = self._core_plugin.get_ports(context, filters=device_filter) for p in ports: if not host_id or p[portbindings.HOST_ID] == host_id: self._core_plugin.ipam.delete_port(context, p['id']) self._delete_fip_agent_port( context, ext_net_id, p[portbindings.HOST_ID]) if host_id: return def _get_ports_for_allowed_address_pair_ip(self, context, network_id, fixed_ip): """Return all active ports associated with the allowed_addr_pair ip.""" query = context.session.query( models_v2.Port).filter( models_v2.Port.id == aap_models.AllowedAddressPair.port_id, aap_models.AllowedAddressPair.ip_address == fixed_ip, models_v2.Port.network_id == network_id, models_v2.Port.admin_state_up == True) # noqa return query.all() @registry.receives(resources.FLOATING_IP, [events.AFTER_UPDATE]) def _create_dvr_floating_gw_port(self, resource, event, trigger, context, router_id, fixed_port_id, floating_ip_id, floating_network_id, fixed_ip_address, association_event, **kwargs): """Create floating agent gw port for DVR. Floating IP Agent gateway port will be created when a floatingIP association happens. """ if association_event and router_id: admin_ctx = context.elevated() router_dict = self.get_router(admin_ctx, router_id) # Check if distributed router and then create the # FloatingIP agent gateway port if router_dict.get('distributed'): hostid = self._get_dvr_service_port_hostid(context, fixed_port_id) if hostid: # FIXME (Swami): This FIP Agent Gateway port should be # created only once and there should not be a duplicate # for the same host. Until we find a good solution for # augmenting multiple server requests we should use the # existing flow. fip_agent_port = ( self.create_fip_agent_gw_port_if_not_exists( admin_ctx, floating_network_id, hostid)) LOG.debug("FIP Agent gateway port: %s", fip_agent_port) else: # If not hostid check if the fixed ip provided has to # deal with allowed_address_pairs for a given service # port. Get the port_dict, inherit the service port host # and device owner(if it does not exist). port = self._core_plugin.get_port( admin_ctx, fixed_port_id) allowed_device_owners = ( n_utils.get_dvr_allowed_address_pair_device_owners()) # NOTE: We just need to deal with ports that do not # have a device_owner and ports that are owned by the # dvr service ports except for the compute port and # dhcp port. if (port['device_owner'] == "" or port['device_owner'] in allowed_device_owners): addr_pair_active_service_port_list = ( self._get_ports_for_allowed_address_pair_ip( admin_ctx, port['network_id'], fixed_ip_address)) if not addr_pair_active_service_port_list: return @registry.receives(resources.ROUTER_INTERFACE, [events.BEFORE_CREATE]) @db_api.retry_if_session_inactive() def _add_csnat_on_interface_create(self, resource, event, trigger, context, router_db, port, **kwargs): """Event handler to for csnat port creation on interface creation.""" if not router_db.extra_attributes.distributed or not router_db.gw_port: return admin_context = context.elevated() self._add_csnat_router_interface_port( admin_context, router_db, port['network_id'], [{'subnet_id': port['fixed_ips'][-1]['subnet_id']}]) @registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_CREATE]) @db_api.retry_if_session_inactive() def _update_snat_v6_addrs_after_intf_update(self, resource, event, trigger, context, subnets, port, router_id, new_interface, **kwargs): if new_interface: # _add_csnat_on_interface_create handler deals with new ports return # if not a new interface, the interface was added to a new subnet, # which is the first in this list subnet = subnets[0] if not subnet or subnet['ip_version'] != 6: return # NOTE: For IPv6 additional subnets added to the same # network we need to update the CSNAT port with respective # IPv6 subnet # Add new prefix to an existing ipv6 csnat port with the # same network id if one exists admin_ctx = context.elevated() router = self.l3plugin._get_router(admin_ctx, router_id) cs_port = self._find_v6_router_port_by_network_and_device_owner( router, subnet['network_id'], const.DEVICE_OWNER_ROUTER_SNAT) if not cs_port: return new_fixed_ip = {'subnet_id': subnet['id']} fixed_ips = list(cs_port['fixed_ips']) fixed_ips.append(new_fixed_ip) try: updated_port = self._core_plugin.update_port( admin_ctx, cs_port['id'], {'port': {'fixed_ips': fixed_ips}}) except Exception: with excutils.save_and_reraise_exception(): # we need to try to undo the updated router # interface from above so it's not out of sync # with the csnat port. # TODO(kevinbenton): switch to taskflow to manage # these rollbacks. @db_api.retry_db_errors def revert(): # TODO(kevinbenton): even though we get the # port each time, there is a potential race # where we update the port with stale IPs if # another interface operation is occurring at # the same time. This can be fixed in the # future with a compare-and-swap style update # using the revision number of the port. p = self._core_plugin.get_port(admin_ctx, port['id']) rollback_fixed_ips = [ip for ip in p['fixed_ips'] if ip['subnet_id'] != subnet['id']] upd = {'port': {'fixed_ips': rollback_fixed_ips}} self._core_plugin.update_port(admin_ctx, port['id'], upd) try: revert() except Exception: LOG.exception("Failed to revert change " "to router port %s.", port['id']) LOG.debug("CSNAT port updated for IPv6 subnet: %s", updated_port) def _find_v6_router_port_by_network_and_device_owner(self, router, net_id, device_owner): for port in router.attached_ports: p = port['port'] if (p['network_id'] == net_id and p['device_owner'] == device_owner and self.l3plugin._port_has_ipv6_address(p)): return self.l3plugin._core_plugin._make_port_dict(p) def _check_for_multiprefix_csnat_port_and_update(self, context, router, network_id, subnet_id): """Checks if the csnat port contains multiple ipv6 prefixes. If the csnat port contains multiple ipv6 prefixes for the given network when a router interface is deleted, make sure we don't delete the port when a single subnet is deleted and just update it with the right fixed_ip. This function returns true if it is a multiprefix port. """ if router.gw_port: # If router has a gateway port, check if it has IPV6 subnet cs_port = ( self._find_v6_router_port_by_network_and_device_owner( router, network_id, const.DEVICE_OWNER_ROUTER_SNAT)) if cs_port: fixed_ips = ( [fixedip for fixedip in cs_port['fixed_ips'] if fixedip['subnet_id'] != subnet_id]) if len(fixed_ips) == len(cs_port['fixed_ips']): # The subnet being detached from router is not part of # ipv6 router port. No need to update the multiprefix. return False if fixed_ips: # multiple prefix port - delete prefix from port self.l3plugin._core_plugin.update_port( context.elevated(), cs_port['id'], {'port': {'fixed_ips': fixed_ips}}) return True return False @registry.receives(resources.ROUTER_INTERFACE, [events.BEFORE_DELETE]) def _cache_related_dvr_routers_info_before_interface_removal( self, resource, event, trigger, payload=None): router_id = payload.resource_id subnet_id = payload.metadata.get("subnet_id") context = payload.context router = self.l3plugin._get_router(context, router_id) if not router.extra_attributes.distributed: return cache_key = (router_id, subnet_id) existing_hosts = self.related_dvr_router_hosts.pop(cache_key, set()) other_hosts = set(self._get_other_dvr_hosts(context, router_id)) self.related_dvr_router_hosts[cache_key] = existing_hosts | other_hosts existing_routers = self.related_dvr_router_routers.pop(cache_key, set()) other_routers = set(self._get_other_dvr_router_ids_connected_router( context, router_id)) self.related_dvr_router_routers[cache_key] = ( existing_routers | other_routers) @registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_DELETE]) @db_api.retry_if_session_inactive() def _cleanup_after_interface_removal(self, resource, event, trigger, context, port, interface_info, router_id, **kwargs): """Handler to cleanup distributed resources after intf removal.""" router = self.l3plugin._get_router(context, router_id) if not router.extra_attributes.distributed: return # we calculate which hosts to notify by checking the hosts for # the removed port's subnets and then subtract out any hosts still # hosting the router for the remaining interfaces router_hosts_for_removed = self.l3plugin._get_dvr_hosts_for_subnets( context, subnet_ids={ip['subnet_id'] for ip in port['fixed_ips']}) router_hosts_after = self.l3plugin._get_dvr_hosts_for_router( context, router_id) removed_hosts = set(router_hosts_for_removed) - set(router_hosts_after) if removed_hosts: # Get hosts where this router is placed as "related" to other dvr # routers and don't remove it from such hosts related_hosts = self._get_other_dvr_hosts(context, router_id) agents = self.l3plugin.get_l3_agents( context, filters={'host': removed_hosts}) bindings = rb_obj.RouterL3AgentBinding.get_objects( context, router_id=router_id) snat_binding = bindings.pop() if bindings else None connected_dvr_routers = set( self.l3plugin._get_other_dvr_router_ids_connected_router( context, router_id)) for agent in agents: is_this_snat_agent = ( snat_binding and snat_binding.l3_agent_id == agent['id']) if (not is_this_snat_agent and agent['host'] not in related_hosts): self.l3plugin.l3_rpc_notifier.router_removed_from_agent( context, router_id, agent['host']) for connected_router_id in connected_dvr_routers: connected_router_hosts = set( self.l3plugin._get_dvr_hosts_for_router( context, connected_router_id)) connected_router_hosts |= set( self._get_other_dvr_hosts( context, connected_router_id)) if agent['host'] not in connected_router_hosts: self.l3plugin.l3_rpc_notifier.\ router_removed_from_agent( context, connected_router_id, agent['host']) # if subnet_id not in interface_info, request was to remove by port sub_id = (interface_info.get('subnet_id') or port['fixed_ips'][0]['subnet_id']) self._cleanup_related_hosts_after_interface_removal( context, router_id, sub_id) self._cleanup_related_routers_after_interface_removal( context, router_id, sub_id) is_multiple_prefix_csport = ( self._check_for_multiprefix_csnat_port_and_update( context, router, port['network_id'], sub_id)) if not is_multiple_prefix_csport: # Single prefix port - go ahead and delete the port self.delete_csnat_router_interface_ports( context.elevated(), router, subnet_id=sub_id) def _cleanup_related_hosts_after_interface_removal( self, context, router_id, subnet_id): router_hosts = self.l3plugin._get_dvr_hosts_for_router( context, router_id) cache_key = (router_id, subnet_id) related_dvr_router_hosts_before = self.related_dvr_router_hosts.pop( cache_key, set()) related_dvr_router_hosts_after = set(self._get_other_dvr_hosts( context, router_id)) related_dvr_router_hosts_before -= set(router_hosts) related_removed_hosts = ( related_dvr_router_hosts_before - related_dvr_router_hosts_after) if related_removed_hosts: agents = self.l3plugin.get_l3_agents( context, filters={'host': related_removed_hosts}) bindings = rb_obj.RouterL3AgentBinding.get_objects( context, router_id=router_id) snat_binding = bindings.pop() if bindings else None for agent in agents: is_this_snat_agent = ( snat_binding and snat_binding.l3_agent_id == agent['id']) if not is_this_snat_agent: self.l3plugin.l3_rpc_notifier.router_removed_from_agent( context, router_id, agent['host']) def _cleanup_related_routers_after_interface_removal( self, context, router_id, subnet_id): router_hosts = self.l3plugin._get_dvr_hosts_for_router( context, router_id) cache_key = (router_id, subnet_id) related_dvr_routers_before = self.related_dvr_router_routers.pop( cache_key, set()) related_dvr_routers_after = set( self._get_other_dvr_router_ids_connected_router( context, router_id)) related_routers_to_remove = ( related_dvr_routers_before - related_dvr_routers_after) for related_router in related_routers_to_remove: related_router_hosts = self.l3plugin._get_dvr_hosts_for_router( context, related_router) hosts_to_remove = set(router_hosts) - set(related_router_hosts) for host in hosts_to_remove: self.l3plugin.l3_rpc_notifier.router_removed_from_agent( context, related_router, host) def delete_csnat_router_interface_ports(self, context, router, subnet_id=None): # Each csnat router interface port is associated # with a subnet, so we need to pass the subnet id to # delete the right ports. filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT], 'device_id': [router.id]} c_snat_ports = self.l3plugin._core_plugin.get_ports( context, filters=filters ) for p in c_snat_ports: if subnet_id is None or not p['fixed_ips']: if not p['fixed_ips']: LOG.info("CSNAT port has no IPs: %s", p) self.l3plugin._core_plugin.delete_port(context, p['id'], l3_port_check=False) else: if p['fixed_ips'][0]['subnet_id'] == subnet_id: LOG.debug("Subnet matches: %s", subnet_id) self.l3plugin._core_plugin.delete_port(context, p['id'], l3_port_check=False) class _DVRAgentInterfaceMixin(object): """Contains calls made by the DVR scheduler and RPC interface. Must be instantiated as a mixin with the L3 plugin. """ def _get_snat_sync_interfaces(self, context, router_ids): """Query router interfaces that relate to list of router_ids.""" if not router_ids: return [] objs = l3_obj.RouterPort.get_objects( context, router_id=router_ids, port_type=const.DEVICE_OWNER_ROUTER_SNAT) interfaces = collections.defaultdict(list) for rp in objs: # TODO(lujinluo): Need Port as synthetic field interfaces[rp.router_id].append( self._core_plugin._make_port_dict(rp.db_obj.port)) LOG.debug("Return the SNAT ports: %s", interfaces) return interfaces def _get_gateway_port_host(self, context, router, gw_ports): binding_objs = rb_obj.RouterL3AgentBinding.get_objects( context, router_id=[router['id']]) if not binding_objs: return gw_port_id = router['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): l3_agent = ag_obj.Agent.get_object(context, id=binding_objs[0].l3_agent_id) return l3_agent.host def _build_routers_list(self, context, routers, gw_ports): # Perform a single query up front for all routers routers = super(_DVRAgentInterfaceMixin, self)._build_routers_list( context, routers, gw_ports) for router in routers: gw_port_host = self._get_gateway_port_host( context, router, gw_ports) LOG.debug("Set router %s gateway port host: %s", router['id'], gw_port_host) router['gw_port_host'] = gw_port_host return routers def _process_routers(self, context, routers, agent): routers_dict = {} snat_intfs_by_router_id = self._get_snat_sync_interfaces( context, [r['id'] for r in routers]) fip_agent_gw_ports = None LOG.debug("FIP Agent: %s ", agent.id) for router in routers: routers_dict[router['id']] = router if router['gw_port_id']: snat_router_intfs = snat_intfs_by_router_id[router['id']] LOG.debug("SNAT ports returned: %s ", snat_router_intfs) router[const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs if not fip_agent_gw_ports: fip_agent_gw_ports = self._get_fip_agent_gw_ports( context, agent.id) LOG.debug("FIP Agent ports: %s", fip_agent_gw_ports) router[const.FLOATINGIP_AGENT_INTF_KEY] = ( fip_agent_gw_ports) return routers_dict @staticmethod def _get_floating_ip_host(floating_ip): """Function to return floating IP host binding state By default, floating IPs are not bound to any host. Instead of using the empty string to denote this use a constant. """ return floating_ip.get('host', const.FLOATING_IP_HOST_UNBOUND) def _process_floating_ips_dvr(self, context, routers_dict, floating_ips, host, agent): LOG.debug("FIP Agent : %s ", agent.id) for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: if router['distributed']: if self._skip_floating_ip_for_mismatched_agent_or_host( floating_ip, agent, host): continue router_floatingips = router.get(const.FLOATINGIP_KEY, []) router_floatingips.append(floating_ip) router[const.FLOATINGIP_KEY] = router_floatingips def _skip_floating_ip_for_mismatched_agent_or_host(self, floating_ip, agent, host): """Function to check if floating IP processing can be skipped.""" fip_host = self._get_floating_ip_host(floating_ip) # Skip if it is unbound if fip_host == const.FLOATING_IP_HOST_UNBOUND: return True # Skip if the given agent is in the wrong mode - SNAT bound # requires DVR_SNAT agent. agent_mode = self._get_agent_mode(agent) if (agent_mode in [const.L3_AGENT_MODE_LEGACY, const.L3_AGENT_MODE_DVR] and floating_ip.get(const.DVR_SNAT_BOUND)): return True # Skip if it is bound, but not to the given host fip_dest_host = floating_ip.get('dest_host') if (fip_host != const.FLOATING_IP_HOST_NEEDS_BINDING and fip_host != host and fip_dest_host is None): return True # not being skipped, log host LOG.debug("Floating IP host: %s", fip_host) return False def _get_fip_agent_gw_ports(self, context, fip_agent_id): """Return list of floating agent gateway ports for the agent.""" if not fip_agent_id: return [] filters = {'device_id': [fip_agent_id], 'device_owner': [const.DEVICE_OWNER_AGENT_GW]} ports = self._core_plugin.get_ports(context.elevated(), filters) LOG.debug("Return the FIP ports: %s ", ports) return ports @log_helper.log_method_call def _get_dvr_sync_data(self, context, host, agent, router_ids=None, active=None): routers, interfaces, floating_ips = self._get_router_info_list( context, router_ids=router_ids, active=active, device_owners=const.ROUTER_INTERFACE_OWNERS) dvr_router_ids = set(router['id'] for router in routers if is_distributed_router(router)) floating_ip_port_ids = [fip['port_id'] for fip in floating_ips if fip['router_id'] in dvr_router_ids] if floating_ip_port_ids: port_filter = {'id': floating_ip_port_ids} ports = self._core_plugin.get_ports(context, port_filter) port_dict = {} for port in ports: # Make sure that we check for cases were the port # might be in a pre-live migration state or also # check for the portbinding profile 'migrating_to' # key for the host. port_profile = port.get(portbindings.PROFILE) port_in_migration = ( port_profile and port_profile.get('migrating_to') == host) # All unbound ports with floatingip irrespective of # the device owner should be included as valid ports # and updated. if port_in_migration or self._is_unbound_port(port): port_dict.update({port['id']: port}) continue port_host = port[portbindings.HOST_ID] if port_host: l3_agent_on_host = self.get_l3_agents( context, filters={'host': [port_host]}) l3_agent_mode = '' if len(l3_agent_on_host): l3_agent_mode = self._get_agent_mode( l3_agent_on_host[0]) requesting_agent_mode = self._get_agent_mode(agent) # Consider the ports where the portbinding host and # request host match. if port_host == host: # Check for agent type before adding the port_dict. # For VMs that are hosted on the dvr_no_external # agent and if the request is coming from the same # agent on re-syncs then we need to add the appropriate # port['agent'] before updating the dict. if (l3_agent_mode == ( const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and requesting_agent_mode == ( const.L3_AGENT_MODE_DVR_NO_EXTERNAL)): port['agent'] = ( const.L3_AGENT_MODE_DVR_NO_EXTERNAL) port_dict.update({port['id']: port}) # Consider the ports where the portbinding host and # request host does not match. else: # If the agent requesting is dvr_snat but # the portbinding host resides in dvr_no_external # agent then include the port. if (l3_agent_mode == ( const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and requesting_agent_mode == ( const.L3_AGENT_MODE_DVR_SNAT)): port['agent'] = ( const.L3_AGENT_MODE_DVR_NO_EXTERNAL) port_dict.update({port['id']: port}) # Add the port binding host to the floatingip dictionary for fip in floating_ips: # Assume no host binding required fip['host'] = const.FLOATING_IP_HOST_UNBOUND vm_port = port_dict.get(fip['port_id'], None) if vm_port: # Default value if host port-binding required fip['host'] = const.FLOATING_IP_HOST_NEEDS_BINDING port_host = vm_port[portbindings.HOST_ID] if port_host: fip['dest_host'] = ( self._get_dvr_migrating_service_port_hostid( context, fip['port_id'], port=vm_port)) vm_port_agent_mode = vm_port.get('agent', None) if (vm_port_agent_mode != const.L3_AGENT_MODE_DVR_NO_EXTERNAL): # For floatingip configured on ports that do not # reside on a 'dvr_no_external' agent, add the # fip host binding, else it will be created # in the 'dvr_snat' agent. fip['host'] = port_host # Handle the case were there is no host binding # for the private ports that are associated with # floating ip. if fip['host'] == const.FLOATING_IP_HOST_NEEDS_BINDING: fip[const.DVR_SNAT_BOUND] = True routers_dict = self._process_routers(context, routers, agent) self._process_floating_ips_dvr(context, routers_dict, floating_ips, host, agent) ports_to_populate = [] for router in routers_dict.values(): if router.get('gw_port'): ports_to_populate.append(router['gw_port']) if router.get(const.FLOATINGIP_AGENT_INTF_KEY): ports_to_populate += router[const.FLOATINGIP_AGENT_INTF_KEY] if router.get(const.SNAT_ROUTER_INTF_KEY): ports_to_populate += router[const.SNAT_ROUTER_INTF_KEY] ports_to_populate += interfaces self._populate_mtu_and_subnets_for_ports(context, ports_to_populate) self._process_interfaces(routers_dict, interfaces) return list(routers_dict.values()) def _is_unbound_port(self, port): """Check for port-bindings irrespective of device_owner.""" return not port[portbindings.HOST_ID] def _get_dvr_service_port_hostid(self, context, port_id, port=None): """Returns the portbinding host_id for dvr service port.""" port_db = port or self._core_plugin.get_port(context, port_id) return port_db[portbindings.HOST_ID] or None def _get_dvr_migrating_service_port_hostid(self, context, port_id, port=None): """Returns the migrating host_id from the migrating profile.""" port_db = port or self._core_plugin.get_port(context, port_id) port_profile = port_db.get(portbindings.PROFILE) port_dest_host = None if port_profile: port_dest_host = port_profile.get('migrating_to') return port_dest_host def _get_agent_gw_ports_exist_for_network( self, context, network_id, host, agent_id): """Return agent gw port if exist, or None otherwise.""" if not network_id: LOG.debug("Network not specified") return filters = { 'network_id': [network_id], 'device_id': [agent_id], 'device_owner': [const.DEVICE_OWNER_AGENT_GW] } ports = self._core_plugin.get_ports(context, filters) if ports: return ports[0] def check_for_fip_and_create_agent_gw_port_on_host_if_not_exists( self, context, port, host): """Create fip agent_gw_port on host if not exists""" fips = self._get_floatingips_by_port_id(context, port['id']) if not fips: return fip = fips[0] network_id = fip.get('floating_network_id') self.create_fip_agent_gw_port_if_not_exists( context.elevated(), network_id, host) def create_fip_agent_gw_port_if_not_exists(self, context, network_id, host): """Function to return the FIP Agent GW port. This function will create a FIP Agent GW port if required. If the port already exists, it will return the existing port and will not create a new one. """ try: l3_agent_db = self._get_agent_by_type_and_host( context, const.AGENT_TYPE_L3, host) except agent_exc.AgentNotFoundByTypeHost: LOG.warning("%(ag)s agent not found for the given host: %(host)s", {'ag': const.AGENT_TYPE_L3, 'host': host}) return if not l3_agent_db: return l3_agent_mode = self._get_agent_mode(l3_agent_db) if l3_agent_mode == const.L3_AGENT_MODE_DVR_NO_EXTERNAL: return LOG.debug("Agent ID exists: %s", l3_agent_db['id']) agent_port = self._get_agent_gw_ports_exist_for_network( context, network_id, host, l3_agent_db['id']) if not agent_port: LOG.info("Floating IP Agent Gateway port for network %(network)s " "does not exist on host %(host)s. Creating one.", {'network': network_id, 'host': host}) fip_agent_port_obj = l3_obj.DvrFipGatewayPortAgentBinding( context, network_id=network_id, agent_id=l3_agent_db['id'] ) try: fip_agent_port_obj.create() except o_exc.NeutronDbObjectDuplicateEntry: LOG.debug("Floating IP Agent Gateway port for network " "%(network)s already exists on host %(host)s. " "Probably it was just created by other worker.", {'network': network_id, 'host': host}) agent_port = self._get_agent_gw_ports_exist_for_network( context, network_id, host, l3_agent_db['id']) LOG.debug("Floating IP Agent Gateway port %(gw)s found " "for the destination host: %(dest_host)s", {'gw': agent_port, 'dest_host': host}) else: port_data = {'tenant_id': '', 'network_id': network_id, 'device_id': l3_agent_db['id'], 'device_owner': const.DEVICE_OWNER_AGENT_GW, portbindings.HOST_ID: host, 'admin_state_up': True, 'name': ''} agent_port = plugin_utils.create_port( self._core_plugin, context, {'port': port_data}) if not agent_port: fip_agent_port_obj.delete() msg = _("Unable to create Floating IP Agent Gateway port") raise n_exc.BadRequest(resource='router', msg=msg) LOG.debug("Floating IP Agent Gateway port %(gw)s created " "for the destination host: %(dest_host)s", {'gw': agent_port, 'dest_host': host}) self._populate_mtu_and_subnets_for_ports(context, [agent_port]) return agent_port def _get_subnet_id_for_given_fixed_ip(self, context, fixed_ip, port_dict): """Returns the subnet_id that matches the fixedip on a network.""" filters = {'network_id': [port_dict['network_id']]} subnets = self._core_plugin.get_subnets(context, filters) for subnet in subnets: if ipam_utils.check_subnet_ip(subnet['cidr'], fixed_ip): return subnet['id'] class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin, DVRResourceOperationHandler, l3_attrs_db.ExtraAttributesMixin, l3_db.L3_NAT_db_mixin): """Mixin class to enable DVR support.""" router_device_owners = ( l3_db.L3_NAT_db_mixin.router_device_owners + (const.DEVICE_OWNER_DVR_INTERFACE, const.DEVICE_OWNER_ROUTER_SNAT, const.DEVICE_OWNER_AGENT_GW)) def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" router_is_uuid = isinstance(router, six.string_types) if router_is_uuid: router = self._get_router(context, router) if is_distributed_router(router): return const.DEVICE_OWNER_DVR_INTERFACE return super(L3_NAT_with_dvr_db_mixin, self)._get_device_owner(context, router) @db_api.retry_if_session_inactive() def create_floatingip(self, context, floatingip, initial_status=const.FLOATINGIP_STATUS_ACTIVE): floating_ip = self._create_floatingip( context, floatingip, initial_status) self._notify_floating_ip_change(context, floating_ip) return floating_ip def get_dvr_agent_on_host(self, context, fip_host): agent_filters = {'host': [fip_host]} return self.get_l3_agents(context, filters=agent_filters) def _notify_floating_ip_change(self, context, floating_ip): router_id = floating_ip['router_id'] fixed_port_id = floating_ip['port_id'] # we need to notify agents only in case Floating IP is associated if not router_id or not fixed_port_id: return try: # using admin context as router may belong to admin tenant router = self._get_router(context.elevated(), router_id) except l3_exc.RouterNotFound: LOG.warning("Router %s was not found. " "Skipping agent notification.", router_id) return if is_distributed_router(router): host = self._get_dvr_service_port_hostid(context, fixed_port_id) dest_host = self._get_dvr_migrating_service_port_hostid( context, fixed_port_id) if host is not None: l3_agent_on_host = self.get_dvr_agent_on_host( context, host) if not l3_agent_on_host: LOG.warning("No valid L3 agent found for the given host: " "%s", host) return agent_mode = self._get_agent_mode(l3_agent_on_host[0]) if agent_mode == const.L3_AGENT_MODE_DVR_NO_EXTERNAL: # If the agent hosting the fixed port is in # 'dvr_no_external' mode, then set the host to None, # since we would be centralizing the floatingip for # those fixed_ports. host = None if host is not None: self.l3_rpc_notifier.routers_updated_on_host( context, [router_id], host) if dest_host and dest_host != host: self.l3_rpc_notifier.routers_updated_on_host( context, [router_id], dest_host) else: centralized_agent_list = self.list_l3_agents_hosting_router( context.elevated(), router_id)['agents'] for agent in centralized_agent_list: self.l3_rpc_notifier.routers_updated_on_host( context, [router_id], agent['host']) else: self.notify_router_updated(context, router_id) @db_api.retry_if_session_inactive() def update_floatingip(self, context, id, floatingip): old_floatingip, floatingip = self._update_floatingip( context, id, floatingip) self._notify_floating_ip_change(context, old_floatingip) if (floatingip['router_id'] != old_floatingip['router_id'] or floatingip['port_id'] != old_floatingip['port_id']): self._notify_floating_ip_change(context, floatingip) return floatingip @db_api.retry_if_session_inactive() def delete_floatingip(self, context, id): floating_ip = self._delete_floatingip(context, id) self._notify_floating_ip_change(context, floating_ip) @db_api.retry_if_session_inactive() def is_router_distributed(self, context, router_id): if router_id: return is_distributed_router( self.get_router(context.elevated(), router_id)) return False def get_ports_under_dvr_connected_subnet(self, context, subnet_id): query = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id) ports = [p for p in query.all() if is_port_bound(p)] return [ self.l3plugin._core_plugin._make_port_dict( port, process_extensions=False) for port in ports ] def is_distributed_router(router): """Return True if router to be handled is distributed.""" # See if router is a request body if isinstance(router, dict): requested_router_type = router.get('distributed') # If not, see if router DB or OVO object contains Extra Attributes elif router.extra_attributes: requested_router_type = router.extra_attributes.distributed else: requested_router_type = None if validators.is_attr_set(requested_router_type): return requested_router_type return cfg.CONF.router_distributed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_dvr_ha_scheduler_db.py0000644000175000017500000000367600000000000024154 0ustar00coreycorey00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron.db.l3_dvrscheduler_db as l3agent_dvr_sch_db import neutron.db.l3_hascheduler_db as l3_ha_sch_db class L3_DVR_HA_scheduler_db_mixin(l3agent_dvr_sch_db.L3_DVRsch_db_mixin, l3_ha_sch_db.L3_HA_scheduler_db_mixin): def get_dvr_routers_to_remove(self, context, port_id, get_related_hosts_info=True): """Returns info about which routers should be removed In case dvr serviceable port was deleted we need to check if any dvr routers should be removed from l3 agent on port's host """ remove_router_info = super( L3_DVR_HA_scheduler_db_mixin, self).get_dvr_routers_to_remove( context, port_id, get_related_hosts_info) # Process the router information which was returned to make # sure we don't delete routers which have dvrhs snat bindings. processed_remove_router_info = [] for router_info in remove_router_info: router_id = router_info['router_id'] agent_id = router_info['agent_id'] if not self._check_router_agent_ha_binding( context, router_id, agent_id): processed_remove_router_info.append(router_info) return processed_remove_router_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_dvrscheduler_db.py0000644000175000017500000007033600000000000023342 0ustar00coreycorey00000000000000# (c) Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.api import extensions from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from sqlalchemy import or_ from neutron.common import utils as n_utils from neutron.conf.db import l3_dvr_db as l3_dvr_db_conf from neutron.db import agentschedulers_db from neutron.db import l3_agentschedulers_db as l3agent_sch_db from neutron.db import l3_dvr_db from neutron.db.models import l3 as l3_models from neutron.db import models_v2 from neutron.objects import l3agent as rb_obj from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import models as ml2_models LOG = logging.getLogger(__name__) l3_dvr_db_conf.register_db_l3_dvr_opts() class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): """Mixin class for L3 DVR scheduler. DVR currently supports the following use cases: - East/West (E/W) traffic between VMs: this is handled in a distributed manner across Compute Nodes without a centralized element. This includes E/W traffic between VMs on the same Compute Node. - North/South traffic for Floating IPs (FIP N/S): this is supported on the distributed routers on Compute Nodes when there is external network connectivity and on centralized nodes when the port is not bound or when the agent is configured as 'dvr_no_external'. - North/South traffic for SNAT (SNAT N/S): this is supported via a centralized element that handles the SNAT traffic. To support these use cases, DVR routers rely on an L3 agent that runs on a central node (also known as Network Node or Service Node), as well as, L3 agents that run individually on each Compute Node of an OpenStack cloud. Each L3 agent creates namespaces to route traffic according to the use cases outlined above. The mechanism adopted for creating and managing these namespaces is via (Router, Agent) binding and Scheduling in general. The main difference between distributed routers and centralized ones is that in the distributed case, multiple bindings will exist, one for each of the agents participating in the routed topology for the specific router. These bindings are created in the following circumstances: - A subnet is added to a router via router-interface-add, and that subnet has running VM's deployed in it. A binding will be created between the router and any L3 agent whose Compute Node is hosting the VM(s). - An external gateway is set to a router via router-gateway-set. A binding will be created between the router and the L3 agent running centrally on the Network Node. Therefore, any time a router operation occurs (create, update or delete), scheduling will determine whether the router needs to be associated to an L3 agent, just like a regular centralized router, with the difference that, in the distributed case, the bindings required are established based on the state of the router and the Compute Nodes. """ def dvr_handle_new_service_port(self, context, port, dest_host=None, unbound_migrate=False, router_id=None): """Handle new dvr service port creation. When a new dvr service port is created, this function will schedule a dvr router to new compute node if needed and notify l3 agent on that node. The 'dest_host' will provide the destination host of the port in case of service port migration. If an unbound port migrates and becomes a bound port, send notification to the snat_agents and to the bound host. """ port_host = dest_host or port[portbindings.HOST_ID] l3_agent_on_host = (self.get_l3_agents( context, filters={'host': [port_host]}) or [None])[0] if not l3_agent_on_host: return if dest_host and router_id is not None: # Make sure we create the floatingip agent gateway port # for the destination node if fip is associated with this # fixed port l3plugin = directory.get_plugin(plugin_constants.L3) router = l3plugin._get_router(context, router_id) if l3_dvr_db.is_distributed_router(router): (l3plugin. check_for_fip_and_create_agent_gw_port_on_host_if_not_exists( context, port, dest_host)) else: LOG.debug("Port-in-Migration: Floating IP has a non-" "distributed router %(router_id)s", {'router_id': router_id}) subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']] router_ids = self.get_dvr_routers_by_subnet_ids(context, subnet_ids) if not router_ids: return agent_port_host_match = False if unbound_migrate: # This might be a case were it is migrating from unbound # to a bound port. # In that case please forward the notification to the # snat_nodes hosting the routers. # Make a call here to notify the snat nodes. snat_agent_list = self.get_dvr_snat_agent_list(context) for agent in snat_agent_list: LOG.debug('DVR: Handle new unbound migration port, ' 'host %(host)s, router_ids %(router_ids)s', {'host': agent.host, 'router_ids': router_ids}) self.l3_rpc_notifier.routers_updated_on_host( context, router_ids, agent.host) if agent.host == port_host: agent_port_host_match = True if not agent_port_host_match: connected_router_ids = set(router_ids) for router_id in router_ids: connected_router_ids.update( self._get_other_dvr_router_ids_connected_router( context, router_id)) LOG.debug('DVR: Handle new service port, host %(host)s, ' 'router ids %(router_ids)s', {'host': port_host, 'router_ids': connected_router_ids}) self.l3_rpc_notifier.routers_updated_on_host( context, connected_router_ids, port_host) def get_dvr_snat_agent_list(self, context): agent_filters = {'agent_modes': [n_const.L3_AGENT_MODE_DVR_SNAT]} state = agentschedulers_db.get_admin_state_up_filter() return self.get_l3_agents(context, active=state, filters=agent_filters) def get_dvr_routers_by_subnet_ids(self, context, subnet_ids): """Gets the dvr routers on vmport subnets.""" if not subnet_ids: return set() router_ids = set() filter_sub = {'fixed_ips': {'subnet_id': subnet_ids}, 'device_owner': [n_const.DEVICE_OWNER_DVR_INTERFACE]} subnet_ports = self._core_plugin.get_ports( context, filters=filter_sub) for subnet_port in subnet_ports: router_ids.add(subnet_port['device_id']) return router_ids def get_subnet_ids_on_router(self, context, router_id, keep_gateway_port=True): """Return subnet IDs for interfaces attached to the given router.""" subnet_ids = set() filter_rtr = {'device_id': [router_id]} int_ports = self._core_plugin.get_ports(context, filters=filter_rtr) for int_port in int_ports: if (not keep_gateway_port and int_port['device_owner'] == n_const.DEVICE_OWNER_ROUTER_GW): continue int_ips = int_port['fixed_ips'] if int_ips: int_subnet = int_ips[0]['subnet_id'] subnet_ids.add(int_subnet) else: LOG.debug('DVR: Could not find a subnet id ' 'for router %s', router_id) return subnet_ids def get_dvr_routers_to_remove(self, context, deleted_port, get_related_hosts_info=True): """Returns info about which routers should be removed In case dvr serviceable port was deleted we need to check if any dvr routers should be removed from l3 agent on port's host """ if not n_utils.is_dvr_serviced(deleted_port['device_owner']): return [] admin_context = context.elevated() port_host = deleted_port[portbindings.HOST_ID] subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']] router_ids = self.get_dvr_routers_by_subnet_ids(admin_context, subnet_ids) if not router_ids: LOG.debug('No DVR routers for this DVR port %(port)s ' 'on host %(host)s', {'port': deleted_port['id'], 'host': port_host}) return [] agent = self._get_agent_by_type_and_host( context, n_const.AGENT_TYPE_L3, port_host) removed_router_info = [] # NOTE(Swami): If host has any serviceable ports, # we should not remove the router namespace of the # port as well as the connected routers namespace. # After all serviceable ports in the host for the # connected routers are deleted, then we can remove # the router namespace. host_has_serviceable_port = False for router_id in router_ids: if rb_obj.RouterL3AgentBinding.objects_exist(context, router_id=router_id, l3_agent_id=agent.id): # not removing from the agent hosting SNAT for the router continue if self._check_for_rtr_serviceable_ports( admin_context, router_id, port_host): # once we found a serviceable port there is no need to # check further host_has_serviceable_port = True break self._unbind_dvr_port_before_delete(context, router_id, port_host) info = {'router_id': router_id, 'host': port_host, 'agent_id': str(agent.id)} removed_router_info.append(info) # Now collect the connected router info as well to remove # it from the agent, only if there is not a serviceable port. if not host_has_serviceable_port: related_router_ids = set() for router_id in router_ids: connected_dvr_router_ids = set( self._get_other_dvr_router_ids_connected_router( context, router_id)) related_router_ids |= connected_dvr_router_ids related_router_ids = [r_id for r_id in related_router_ids if r_id not in list(router_ids)] for router_id in related_router_ids: if self._check_for_rtr_serviceable_ports( admin_context, router_id, port_host): # once we found a serviceable port there is no need to # check further host_has_serviceable_port = True break self._unbind_dvr_port_before_delete(context, router_id, port_host) info = {'router_id': router_id, 'host': port_host, 'agent_id': str(agent.id)} removed_router_info.append(info) LOG.debug("Router info to be deleted: %s", removed_router_info) return removed_router_info def _check_for_rtr_serviceable_ports( self, admin_context, router_id, port_host): subnet_ids = self.get_subnet_ids_on_router(admin_context, router_id) return self._check_dvr_serviceable_ports_on_host( admin_context, port_host, subnet_ids) def _unbind_dvr_port_before_delete( self, context, router_id, port_host): filter_rtr = {'device_id': [router_id], 'device_owner': [n_const.DEVICE_OWNER_DVR_INTERFACE]} int_ports = self._core_plugin.get_ports( context.elevated(), filters=filter_rtr) for port in int_ports: dvr_binding = (ml2_db. get_distributed_port_binding_by_host( context, port['id'], port_host)) if dvr_binding: # unbind this port from router dvr_binding['router_id'] = None dvr_binding.update(dvr_binding) def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): if extensions.is_extension_supported( self, n_const.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, agent, router_ids=router_ids, active=True) return self._get_dvr_sync_data(context, host, agent, router_ids=router_ids, active=True) def get_hosts_to_notify(self, context, router_id): """Returns all hosts to send notification about router update""" hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify( context, router_id) router = self.get_router(context.elevated(), router_id) if router.get('distributed', False): dvr_hosts = self._get_dvr_hosts_for_router(context, router_id) dvr_hosts = set(dvr_hosts) - set(hosts) dvr_hosts |= self._get_other_dvr_hosts(context, router_id) state = agentschedulers_db.get_admin_state_up_filter() agents = self.get_l3_agents(context, active=state, filters={'host': dvr_hosts}) hosts += [a.host for a in agents] return hosts def _get_dvr_hosts_for_router(self, context, router_id): """Get a list of hosts where specified DVR router should be hosted It will first get IDs of all subnets connected to the router and then get a set of hosts where all dvr serviceable ports on those subnets are bound """ subnet_ids = self.get_subnet_ids_on_router(context, router_id) hosts = self._get_dvr_hosts_for_subnets(context, subnet_ids) LOG.debug('Hosts for router %s: %s', router_id, hosts) return hosts def _get_other_dvr_hosts(self, context, router_id): """Get a list of hosts where specified DVR router should be hosted It will search DVR hosts based on other dvr routers connected to the router. """ dvr_hosts = set() connected_dvr_routers = ( self._get_other_dvr_router_ids_connected_router( context, router_id)) for dvr_router in connected_dvr_routers: dvr_hosts |= set( self._get_dvr_hosts_for_router(context, dvr_router)) LOG.debug('Hosts for other DVR routers connected to router ' '%(router_id)s: %(dvr_hosts)s', {'router_id': router_id, 'dvr_hosts': dvr_hosts}) return dvr_hosts def _get_dvr_hosts_for_subnets(self, context, subnet_ids): """Get a list of hosts with DVR servicable ports on subnet_ids.""" host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp Binding = ml2_models.PortBinding Port = models_v2.Port IPAllocation = models_v2.IPAllocation query = context.session.query(Binding.host).distinct() query = query.join(Binding.port) query = query.join(Port.fixed_ips) query = query.filter(IPAllocation.subnet_id.in_(subnet_ids)) owner_filter = or_( Port.device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX), Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners(host_dvr_dhcp))) query = query.filter(owner_filter) hosts = [item[0] for item in query if item[0] != ''] return hosts def _get_dvr_subnet_ids_on_host_query(self, context, host): host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp query = context.session.query( models_v2.IPAllocation.subnet_id).distinct() query = query.join(models_v2.IPAllocation.port) query = query.join(models_v2.Port.port_bindings) query = query.filter(ml2_models.PortBinding.host == host) owner_filter = or_( models_v2.Port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners(host_dvr_dhcp))) query = query.filter(owner_filter) return query def _get_dvr_router_ids_for_host(self, context, host): subnet_ids_on_host_query = self._get_dvr_subnet_ids_on_host_query( context, host) query = context.session.query(models_v2.Port.device_id).distinct() query = query.filter( models_v2.Port.device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE) query = query.join(models_v2.Port.fixed_ips) query = query.filter( models_v2.IPAllocation.subnet_id.in_(subnet_ids_on_host_query)) router_ids = [item[0] for item in query] LOG.debug('DVR routers on host %s: %s', host, router_ids) return router_ids def _get_other_dvr_router_ids_connected_router(self, context, router_id): # TODO(slaweq): move this method to RouterPort OVO object subnet_ids = self.get_subnet_ids_on_router(context, router_id) RouterPort = l3_models.RouterPort query = context.elevated().session.query(RouterPort.router_id) query = query.join(models_v2.Port) query = query.join( models_v2.Subnet, models_v2.Subnet.network_id == models_v2.Port.network_id) query = query.filter( models_v2.Subnet.id.in_(subnet_ids), RouterPort.port_type == n_const.DEVICE_OWNER_DVR_INTERFACE ).distinct() query = query.filter(RouterPort.router_id != router_id) return [item[0] for item in query] def _get_router_ids_for_agent(self, context, agent_db, router_ids, with_dvr=True): result_set = set(super(L3_DVRsch_db_mixin, self)._get_router_ids_for_agent( context, agent_db, router_ids, with_dvr)) if not with_dvr: return result_set LOG.debug("Routers %(router_ids)s bound to L3 agent in host %(host)s", {'router_ids': result_set, 'host': agent_db['host']}) router_ids = set(router_ids or []) if router_ids and result_set == router_ids: # no need for extra dvr checks if requested routers are # explicitly scheduled to the agent return list(result_set) # dvr routers are not explicitly scheduled to agents on hosts with # dvr serviceable ports, so need special handling if (self._get_agent_mode(agent_db) in [n_const.L3_AGENT_MODE_DVR, n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL, n_const.L3_AGENT_MODE_DVR_SNAT]): dvr_routers = self._get_dvr_router_ids_for_host(context, agent_db['host']) if not router_ids: result_set |= set(dvr_routers) else: for router_id in (router_ids - result_set): subnet_ids = self.get_subnet_ids_on_router( context, router_id, keep_gateway_port=False) if (subnet_ids and self._check_dvr_serviceable_ports_on_host( context, agent_db['host'], list(subnet_ids))): result_set.add(router_id) LOG.debug("Routers %(router_ids)s are scheduled or have " "serviceable ports in host %(host)s", {'router_ids': result_set, 'host': agent_db['host']}) for router_id in router_ids: result_set |= set( self._get_other_dvr_router_ids_connected_router( context, router_id)) LOG.debug("Router IDs %(router_ids)s for agent in host %(host)s", {'router_ids': result_set, 'host': agent_db['host']}) return list(result_set) @log_helpers.log_method_call def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids): """Check for existence of dvr serviceable ports on host :param context: request context :param host: host to look ports on :param subnet_ids: IDs of subnets to look ports on :return: return True if dvr serviceable port exists on host, otherwise return False """ # db query will return ports for all subnets if subnet_ids is empty, # so need to check first if not subnet_ids: return False # The port binding profile filter for host performs a "contains" # operation. This produces a LIKE expression targeting a sub-string # match: column LIKE '%' || || '%'. # Add quotes to force an exact match of the host name in the port # binding profile dictionary. profile_host = "\"%s\"" % host Binding = ml2_models.PortBinding IPAllocation = models_v2.IPAllocation Port = models_v2.Port host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp query = context.session.query(Binding) query = query.join(Binding.port) query = query.join(Port.fixed_ips) query = query.filter( IPAllocation.subnet_id.in_(subnet_ids)) query = query.filter( ml2_models.PortBinding.status == n_const.ACTIVE) device_filter = or_( models_v2.Port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX), models_v2.Port.device_owner.in_( n_utils.get_other_dvr_serviced_device_owners(host_dvr_dhcp))) query = query.filter(device_filter) host_filter = or_( ml2_models.PortBinding.host == host, ml2_models.PortBinding.profile.contains(profile_host)) query = query.filter(host_filter) return query.first() is not None def _notify_l3_agent_new_port(resource, event, trigger, **kwargs): LOG.debug('Received %(resource)s %(event)s', { 'resource': resource, 'event': event}) port = kwargs.get('port') if not port: return if n_utils.is_dvr_serviced(port['device_owner']): l3plugin = directory.get_plugin(plugin_constants.L3) context = kwargs['context'] l3plugin.dvr_handle_new_service_port(context, port) def _notify_port_delete(event, resource, trigger, **kwargs): context = kwargs['context'] port = kwargs['port'] get_related_hosts_info = kwargs.get("get_related_hosts_info", True) l3plugin = directory.get_plugin(plugin_constants.L3) removed_routers = l3plugin.get_dvr_routers_to_remove( context, port, get_related_hosts_info) for info in removed_routers: l3plugin.l3_rpc_notifier.router_removed_from_agent( context, info['router_id'], info['host']) def _notify_l3_agent_port_update(resource, event, trigger, **kwargs): new_port = kwargs.get('port') original_port = kwargs.get('original_port') is_fixed_ips_changed = n_utils.port_ip_changed(new_port, original_port) if (original_port['device_owner'] in [n_const.DEVICE_OWNER_HA_REPLICATED_INT, n_const.DEVICE_OWNER_ROUTER_SNAT, n_const.DEVICE_OWNER_ROUTER_GW] and not is_fixed_ips_changed): return if new_port and original_port: l3plugin = directory.get_plugin(plugin_constants.L3) context = kwargs['context'] new_port_host = new_port.get(portbindings.HOST_ID) original_port_host = original_port.get(portbindings.HOST_ID) is_new_port_binding_changed = ( new_port_host and new_port_host != original_port_host) is_bound_port_moved = ( original_port_host and original_port_host != new_port_host) fip_router_id = None dest_host = None new_port_profile = new_port.get(portbindings.PROFILE) if new_port_profile: dest_host = new_port_profile.get('migrating_to') if is_new_port_binding_changed or is_bound_port_moved or dest_host: fips = l3plugin._get_floatingips_by_port_id( context, port_id=original_port['id']) fip = fips[0] if fips else None if fip: fip_router_id = fip['router_id'] if is_bound_port_moved: removed_routers = l3plugin.get_dvr_routers_to_remove( context, original_port, get_related_hosts_info=False) if removed_routers: removed_router_args = { 'context': context, 'port': original_port, 'removed_routers': removed_routers, 'get_related_hosts_info': False, } _notify_port_delete( event, resource, trigger, **removed_router_args) def _should_notify_on_fip_update(): if not fip_router_id: return False for info in removed_routers: if info['router_id'] == fip_router_id: return False try: router = l3plugin._get_router(context, fip_router_id) except l3_exc.RouterNotFound: return False return l3_dvr_db.is_distributed_router(router) if _should_notify_on_fip_update(): l3plugin.l3_rpc_notifier.routers_updated_on_host( context, [fip_router_id], original_port[portbindings.HOST_ID]) # If dest_host is set, then the port profile has changed # and this port is in migration. The call below will # pre-create the router on the new host # If the original_port is None, then it is a migration # from unbound to bound. if (is_new_port_binding_changed or dest_host): if (not original_port[portbindings.HOST_ID] and not original_port['device_owner']): l3plugin.dvr_handle_new_service_port(context, new_port, unbound_migrate=True) else: l3plugin.dvr_handle_new_service_port( context, new_port, dest_host=dest_host, router_id=fip_router_id) return def subscribe(): registry.subscribe( _notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE) registry.subscribe( _notify_port_delete, resources.PORT, events.AFTER_DELETE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_fip_pools_db.py0000644000175000017500000000612200000000000022632 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import fip64 from neutron_lib.api import extensions from neutron_lib import constants as lib_const from neutron_lib.db import utils as lib_db_utils from neutron_lib.plugins import directory from neutron.extensions import floatingip_pools as fip_pools_ext from neutron.objects import base as base_obj from neutron.objects import network as net_obj from neutron.objects import subnet as subnet_obj class FloatingIPPoolsDbMixin(object): """Class to support floating IP pool.""" _is_v6_supported = None @staticmethod def _make_floatingip_pool_dict(context, subnet, fields=None): res = {'subnet_id': subnet.id, 'subnet_name': subnet.name, 'tenant_id': context.tenant_id, 'network_id': subnet.network_id, 'cidr': str(subnet.cidr)} return lib_db_utils.resource_fields(res, fields) def get_floatingip_pools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return information for available floating IP pools""" pager = base_obj.Pager(sorts, limit, page_reverse, marker) net_ids = [n.network_id for n in net_obj.ExternalNetwork.get_objects(context)] # NOTE(hongbin): Use elevated context to make sure we have enough # permission to retrieve subnets that are not in current tenant # but belongs to external networks shared with current tenant. admin_context = context.elevated() subnet_objs = subnet_obj.Subnet.get_objects(admin_context, _pager=pager, network_id=net_ids) return [self._make_floatingip_pool_dict(context, obj, fields) for obj in subnet_objs if (obj.ip_version == lib_const.IP_VERSION_4 or self.is_v6_supported)] @property def is_v6_supported(self): supported = self._is_v6_supported if supported is None: supported = False for plugin in directory.get_plugins().values(): if extensions.is_extension_supported(plugin, fip64.ALIAS): supported = True break self._is_v6_supported = supported return supported class FloatingIPPoolsMixin(FloatingIPPoolsDbMixin, fip_pools_ext.FloatingIPPoolPluginBase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_fip_port_details.py0000644000175000017500000000272000000000000023522 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.db import resource_extend def _make_port_details_dict(port): return {'name': port['name'], 'network_id': port['network_id'], 'mac_address': port['mac_address'], 'admin_state_up': port['admin_state_up'], 'status': port['status'], 'device_id': port['device_id'], 'device_owner': port['device_owner']} @resource_extend.has_resource_extenders class Fip_port_details_db_mixin(object): """Mixin class to enable floating IP's port_details attributes.""" @staticmethod @resource_extend.extends([l3_apidef.FLOATINGIPS]) def _extend_fip_dict_device_id(fip_res, fip_db): if fip_db.fixed_port: fip_res['port_details'] = _make_port_details_dict( fip_db.fixed_port) else: fip_res['port_details'] = None return fip_res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_fip_qos.py0000644000175000017500000000512100000000000021631 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.db import resource_extend from neutron_lib.services.qos import constants as qos_consts from neutron.objects.qos import policy as policy_object @resource_extend.has_resource_extenders class FloatingQoSDbMixin(object): """Mixin class to enable floating IP's QoS extra attributes.""" @staticmethod @resource_extend.extends([l3_apidef.FLOATINGIPS]) def _extend_extra_fip_dict(fip_res, fip_db): if fip_db.get('qos_policy_binding'): fip_res[qos_consts.QOS_POLICY_ID] = ( fip_db.qos_policy_binding.policy_id) else: fip_res[qos_consts.QOS_POLICY_ID] = None return fip_res def _create_fip_qos_db(self, context, fip_id, policy_id): policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) policy.attach_floatingip(fip_id) def _delete_fip_qos_db(self, context, fip_id, policy_id): policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) policy.detach_floatingip(fip_id) def _process_extra_fip_qos_create(self, context, fip_id, fip): qos_policy_id = fip.get(qos_consts.QOS_POLICY_ID) if not qos_policy_id: return self._create_fip_qos_db(context, fip_id, qos_policy_id) def _process_extra_fip_qos_update( self, context, floatingip_obj, fip, old_floatingip): if qos_consts.QOS_POLICY_ID not in fip: # No qos_policy_id in API input, do nothing return new_qos_policy_id = fip.get(qos_consts.QOS_POLICY_ID) old_qos_policy_id = old_floatingip.get(qos_consts.QOS_POLICY_ID) if old_qos_policy_id == new_qos_policy_id: return if old_qos_policy_id: self._delete_fip_qos_db(context, floatingip_obj['id'], old_qos_policy_id) if not new_qos_policy_id: return self._create_fip_qos_db( context, floatingip_obj['id'], new_qos_policy_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_gateway_ip_qos.py0000644000175000017500000001202700000000000023207 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # Copyright 2017 Letv Cloud Computing # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import qos_gateway_ip from neutron_lib.api import extensions from neutron_lib.db import resource_extend from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.objects.qos import policy as policy_object LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin): """Mixin class to add router gateway IP's QoS extra attributes.""" _gw_ip_qos = None @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_router_dict_gw_qos(router_res, router_db): if router_db.gw_port_id and router_db.get('qos_policy_binding'): policy_id = router_db.qos_policy_binding.policy_id router_res[l3_apidef.EXTERNAL_GW_INFO].update( {qos_consts.QOS_POLICY_ID: policy_id}) @property def _is_gw_ip_qos_supported(self): if self._gw_ip_qos is None: # Check L3 service plugin self._gw_ip_qos = extensions.is_extension_supported( self, qos_gateway_ip.ALIAS) return self._gw_ip_qos def _create_gw_ip_qos_db(self, context, router_id, policy_id): policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) policy.attach_router(router_id) def _delete_gw_ip_qos_db(self, context, router_id, policy_id): policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) policy.detach_router(router_id) def _update_router_gw_info(self, context, router_id, info, router=None): # Calls superclass, pass router db object for avoiding re-loading router = super(L3_gw_ip_qos_dbonly_mixin, self)._update_router_gw_info( context, router_id, info, router) if self._is_gw_ip_qos_supported and router.gw_port: self._update_router_gw_qos_policy(context, router_id, info, router) return router def _get_router_gateway_policy_binding(self, context, router_id): router = self._get_router(context, router_id) return router.qos_policy_binding def _update_router_gw_qos_policy(self, context, router_id, info, router): if not info or qos_consts.QOS_POLICY_ID not in info: # An explicit 'None' for `qos_polcy_id` indicates to clear # the router gateway IP policy. So if info does not have # the key `qos_polcy_id`, we can not decide what behavior # to be done, then directly return here. return new_qos_policy_id = info[qos_consts.QOS_POLICY_ID] if router.qos_policy_binding: old_qos_policy_id = router.qos_policy_binding.policy_id if old_qos_policy_id == new_qos_policy_id: return if old_qos_policy_id: self._delete_gw_ip_qos_db(context, router_id, old_qos_policy_id) with context.session.begin(subtransactions=True): context.session.refresh(router) if new_qos_policy_id: self._create_gw_ip_qos_db( context, router_id, new_qos_policy_id) def _build_routers_list(self, context, routers, gw_ports): routers = super(L3_gw_ip_qos_dbonly_mixin, self)._build_routers_list( context, routers, gw_ports) if not self._is_gw_ip_qos_supported: return routers for rtr in routers: gw_port_id = rtr['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): rtr['gw_port'] = gw_ports[gw_port_id] router_gateway_policy_binding = ( self._get_router_gateway_policy_binding( context, rtr['id'])) qos_policy_id = None if router_gateway_policy_binding: qos_policy_id = router_gateway_policy_binding.policy_id rtr['gw_port'][qos_consts.QOS_POLICY_ID] = qos_policy_id return routers class L3_gw_ip_qos_db_mixin(L3_gw_ip_qos_dbonly_mixin, l3_db.L3_NAT_db_mixin): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_gwmode_db.py0000644000175000017500000001002300000000000022115 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import resource_extend from oslo_config import cfg import sqlalchemy as sa from sqlalchemy import sql from neutron.conf.db import l3_gwmode_db from neutron.db import l3_db from neutron.db.models import l3 as l3_models l3_gwmode_db.register_db_l3_gwmode_opts() # Modify the Router Data Model adding the enable_snat attribute setattr(l3_models.Router, 'enable_snat', sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False)) @resource_extend.has_resource_extenders class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): """Mixin class to add configurable gateway modes.""" @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_router_dict_gw_mode(router_res, router_db): if router_db.gw_port_id: nw_id = router_db.gw_port['network_id'] router_res[l3_apidef.EXTERNAL_GW_INFO].update({ 'network_id': nw_id, 'enable_snat': router_db.enable_snat, 'external_fixed_ips': [ {'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in router_db.gw_port['fixed_ips'] ] }) def _update_router_gw_info(self, context, router_id, info, router=None): # Load the router only if necessary if not router: router = self._get_router(context, router_id) with context.session.begin(subtransactions=True): old_router = self._make_router_dict(router) router.enable_snat = self._get_enable_snat(info) router_body = {l3_apidef.ROUTER: {l3_apidef.EXTERNAL_GW_INFO: info}} registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=router_body, states=(old_router,), resource_id=router_id, desired_state=router)) # Calls superclass, pass router db object for avoiding re-loading super(L3_NAT_dbonly_mixin, self)._update_router_gw_info( context, router_id, info, router=router) # Returning the router might come back useful if this # method is overridden in child classes return router @staticmethod def _get_enable_snat(info): if info and 'enable_snat' in info: return info['enable_snat'] # if enable_snat is not specified then use the default value return cfg.CONF.enable_snat_by_default def _build_routers_list(self, context, routers, gw_ports): routers = super(L3_NAT_dbonly_mixin, self)._build_routers_list( context, routers, gw_ports) for rtr in routers: gw_port_id = rtr['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): rtr['gw_port'] = gw_ports[gw_port_id] # Add enable_snat key rtr['enable_snat'] = rtr[ l3_apidef.EXTERNAL_GW_INFO]['enable_snat'] return routers class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, l3_db.L3_NAT_db_mixin): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_hamode_db.py0000644000175000017500000011022100000000000022071 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import random import netaddr from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_ha_mode as l3_ext_ha_apidef from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import priority_group from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import l3_ext_ha_mode as l3ha_exc from neutron_lib.objects import exceptions as obj_base from neutron_lib.plugins import utils as p_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils import six import sqlalchemy as sa from sqlalchemy import exc as sql_exc from sqlalchemy import orm from neutron._i18n import _ from neutron.common import utils as n_utils from neutron.conf.db import l3_hamode_db from neutron.db import _utils as db_utils from neutron.db.availability_zone import router as router_az_db from neutron.db import l3_dvr_db from neutron.objects import base from neutron.objects import l3_hamode from neutron.objects import router as l3_obj VR_ID_RANGE = set(range(1, 255)) MAX_ALLOCATION_TRIES = 10 UNLIMITED_AGENTS_PER_ROUTER = 0 LOG = logging.getLogger(__name__) l3_hamode_db.register_db_l3_hamode_opts() @registry.has_registry_receivers class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, router_az_db.RouterAvailabilityZoneMixin): """Mixin class to add high availability capability to routers.""" def _verify_configuration(self): self.ha_cidr = cfg.CONF.l3_ha_net_cidr try: net = netaddr.IPNetwork(self.ha_cidr) except netaddr.AddrFormatError: raise l3ha_exc.HANetworkCIDRNotValid(cidr=self.ha_cidr) if ('/' not in self.ha_cidr or net.network != net.ip): raise l3ha_exc.HANetworkCIDRNotValid(cidr=self.ha_cidr) self._check_num_agents_per_router() def _check_num_agents_per_router(self): max_agents = cfg.CONF.max_l3_agents_per_router if max_agents != UNLIMITED_AGENTS_PER_ROUTER and max_agents < 1: raise l3ha_exc.HAMaximumAgentsNumberNotValid(max_agents=max_agents) def __new__(cls, *args, **kwargs): inst = super(L3_HA_NAT_db_mixin, cls).__new__(cls, *args, **kwargs) inst._verify_configuration() return inst def get_ha_network(self, context, tenant_id): pager = base.Pager(limit=1) results = l3_hamode.L3HARouterNetwork.get_objects( context, _pager=pager, project_id=tenant_id) return results.pop() if results else None def _get_allocated_vr_id(self, context, network_id): vr_id_objs = l3_hamode.L3HARouterVRIdAllocation.get_objects( context, network_id=network_id) allocated_vr_ids = set(a.vr_id for a in vr_id_objs) - set([0]) return allocated_vr_ids def _get_vr_id(self, context, network_id): allocated_vr_ids = self._get_allocated_vr_id(context, network_id) available_vr_ids = VR_ID_RANGE - allocated_vr_ids if not available_vr_ids: return None return random.choice(list(available_vr_ids)) @db_api.retry_if_session_inactive() def _ensure_vr_id(self, context, router_db, ha_network): router_id = router_db.id network_id = ha_network.network_id # TODO(kevinbenton): let decorator handle duplicate retry # like in review.opendev.org/#/c/367179/1/neutron/db/l3_hamode_db.py for count in range(MAX_ALLOCATION_TRIES): try: # NOTE(kevinbenton): we disallow subtransactions because the # retry logic will bust any parent transactions with db_api.CONTEXT_WRITER.using(context): if router_db.extra_attributes.ha_vr_id: LOG.debug( "Router %(router_id)s has already been " "allocated a ha_vr_id %(ha_vr_id)d!", {'router_id': router_id, 'ha_vr_id': router_db.extra_attributes.ha_vr_id}) return old_router = self._make_router_dict(router_db) vr_id = self._get_vr_id(context, network_id) if vr_id is None: raise l3ha_exc.NoVRIDAvailable(router_id=router_id) allocation = l3_hamode.L3HARouterVRIdAllocation( context, network_id=network_id, vr_id=vr_id) allocation.create() router_db.extra_attributes.ha_vr_id = allocation.vr_id LOG.debug( "Router %(router_id)s has been allocated a ha_vr_id " "%(ha_vr_id)d.", {'router_id': router_id, 'ha_vr_id': allocation.vr_id}) router_body = {l3_apidef.ROUTER: {l3_ext_ha_apidef.HA_INFO: True, 'ha_vr_id': allocation.vr_id}} registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=router_body, states=(old_router,), resource_id=router_id, desired_state=router_db)) return allocation.vr_id except obj_base.NeutronDbObjectDuplicateEntry: LOG.info("Attempt %(count)s to allocate a VRID in the " "network %(network)s for the router %(router)s", {'count': count, 'network': network_id, 'router': router_id}) raise l3ha_exc.MaxVRIDAllocationTriesReached( network_id=network_id, router_id=router_id, max_tries=MAX_ALLOCATION_TRIES) @db_api.retry_if_session_inactive() def _delete_vr_id_allocation(self, context, ha_network, vr_id): l3_hamode.L3HARouterVRIdAllocation.delete_objects( context, network_id=ha_network.network_id, vr_id=vr_id) def _create_ha_subnet(self, context, network_id, tenant_id): args = {'network_id': network_id, 'tenant_id': '', 'name': constants.HA_SUBNET_NAME % tenant_id, 'ip_version': 4, 'cidr': cfg.CONF.l3_ha_net_cidr, 'enable_dhcp': False, 'gateway_ip': None} return p_utils.create_subnet(self._core_plugin, context, {'subnet': args}) def _create_ha_network_tenant_binding(self, context, tenant_id, network_id): ha_network = l3_hamode.L3HARouterNetwork( context, project_id=tenant_id, network_id=network_id) ha_network.create() # we need to check if someone else just inserted at exactly the # same time as us because there is no constrain in L3HARouterNetwork # that prevents multiple networks per tenant if l3_hamode.L3HARouterNetwork.count( context, project_id=tenant_id) > 1: # we need to throw an error so our network is deleted # and the process is started over where the existing # network will be selected. raise db_exc.DBDuplicateEntry(columns=['tenant_id']) return ha_network def _add_ha_network_settings(self, network): if cfg.CONF.l3_ha_network_type: network[providernet.NETWORK_TYPE] = cfg.CONF.l3_ha_network_type if cfg.CONF.l3_ha_network_physical_name: network[providernet.PHYSICAL_NETWORK] = ( cfg.CONF.l3_ha_network_physical_name) def _create_ha_network(self, context, tenant_id): admin_ctx = context.elevated() args = {'network': {'name': constants.HA_NETWORK_NAME % tenant_id, 'tenant_id': '', 'shared': False, 'admin_state_up': True}} self._add_ha_network_settings(args['network']) creation = functools.partial(p_utils.create_network, self._core_plugin, admin_ctx, args) content = functools.partial(self._create_ha_network_tenant_binding, admin_ctx, tenant_id) deletion = functools.partial(self._core_plugin.delete_network, admin_ctx) network, ha_network = db_utils.safe_creation( context, creation, deletion, content, transaction=False) try: self._create_ha_subnet(admin_ctx, network['id'], tenant_id) except Exception: with excutils.save_and_reraise_exception(): self._core_plugin.delete_network(admin_ctx, network['id']) return ha_network def get_number_of_agents_for_scheduling(self, context): """Return number of agents on which the router will be scheduled.""" num_agents = len( self.get_l3_agents( context, active=True, filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY, constants.L3_AGENT_MODE_DVR_SNAT]})) max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: LOG.info("Number of active agents lower than " "max_l3_agents_per_router. L3 agents " "available: %s", num_agents) else: num_agents = max_agents return num_agents @db_api.retry_if_session_inactive() def _create_ha_port_binding(self, context, router_id, port_id): try: l3_obj.RouterPort( context, port_id=port_id, router_id=router_id, port_type=constants.DEVICE_OWNER_ROUTER_HA_INTF).create() portbinding = l3_hamode.L3HARouterAgentPortBinding( context, port_id=port_id, router_id=router_id) portbinding.create() return portbinding except db_exc.DBReferenceError as e: with excutils.save_and_reraise_exception() as ctxt: if isinstance(e.inner_exception, sql_exc.IntegrityError): ctxt.reraise = False LOG.debug( 'Failed to create HA router agent PortBinding, ' 'Router %s has already been removed ' 'by concurrent operation', router_id) raise l3_exc.RouterNotFound(router_id=router_id) def add_ha_port(self, context, router_id, network_id, tenant_id): # NOTE(kevinbenton): we have to block any ongoing transactions because # our exception handling will try to delete the port using the normal # core plugin API. If this function is called inside of a transaction # the exception will mangle the state, cause the delete call to fail, # and end up relying on the DB rollback to remove the port instead of # proper delete_port call. if context.session.is_active: raise RuntimeError(_('add_ha_port cannot be called inside of a ' 'transaction.')) args = {'tenant_id': '', 'network_id': network_id, 'admin_state_up': True, 'device_id': router_id, 'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF, 'name': constants.HA_PORT_NAME % tenant_id} creation = functools.partial(p_utils.create_port, self._core_plugin, context, {'port': args}) content = functools.partial(self._create_ha_port_binding, context, router_id) deletion = functools.partial(self._core_plugin.delete_port, context, l3_port_check=False) port, binding = db_utils.safe_creation(context, creation, deletion, content, transaction=False) # _create_ha_port_binding returns the binding object now and # to populate agent relation db_obj is used. return binding.db_obj def _delete_ha_interfaces(self, context, router_id): admin_ctx = context.elevated() device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF]} ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter) for port in ports: self._core_plugin.delete_port(admin_ctx, port['id'], l3_port_check=False) def delete_ha_interfaces_on_host(self, context, router_id, host): admin_ctx = context.elevated() port_ids = (binding.port_id for binding in self.get_ha_router_port_bindings(admin_ctx, [router_id], host)) for port_id in port_ids: self._core_plugin.delete_port(admin_ctx, port_id, l3_port_check=False) def _notify_router_updated(self, context, router_id): self.l3_rpc_notifier.routers_updated( context, [router_id], shuffle_agents=True) @classmethod def _is_ha(cls, router): ha = router.get('ha') if not validators.is_attr_set(ha): ha = cfg.CONF.l3_ha return ha def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" router_is_uuid = isinstance(router, six.string_types) if router_is_uuid: router = self._get_router(context, router) if (is_ha_router(router) and not l3_dvr_db.is_distributed_router(router)): return constants.DEVICE_OWNER_HA_REPLICATED_INT return super(L3_HA_NAT_db_mixin, self)._get_device_owner(context, router) @n_utils.transaction_guard def _ensure_vr_id_and_network(self, context, router_db): """Attach vr_id to router while tolerating network deletes.""" creator = functools.partial(self._ensure_vr_id, context, router_db) dep_getter = functools.partial(self.get_ha_network, context, router_db.tenant_id) dep_creator = functools.partial(self._create_ha_network, context, router_db.tenant_id) dep_deleter = functools.partial(self._delete_ha_network, context) dep_id_attr = 'network_id' return n_utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[1] @registry.receives(resources.ROUTER, [events.BEFORE_CREATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) @db_api.retry_if_session_inactive() def _before_router_create(self, resource, event, trigger, context, router, **kwargs): """Event handler to create HA resources before router creation.""" if not self._is_ha(router): return # ensure the HA network exists before we start router creation so # we can provide meaningful errors back to the user if no network # can be allocated if not self.get_ha_network(context, router['tenant_id']): self._create_ha_network(context, router['tenant_id']) @registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _precommit_router_create(self, resource, event, trigger, context, router, router_db, **kwargs): """Event handler to set ha flag and status on creation.""" is_ha = self._is_ha(router) router['ha'] = is_ha self.set_extra_attr_value(context, router_db, 'ha', is_ha) if not is_ha: return # This will throw an exception if there aren't enough agents to # handle this HA router self.get_number_of_agents_for_scheduling(context) ha_net = self.get_ha_network(context, router['tenant_id']) if not ha_net: # net was deleted, throw a retry to start over to create another raise db_exc.RetryRequest( l3ha_exc.HANetworkConcurrentDeletion( tenant_id=router['tenant_id'])) @registry.receives(resources.ROUTER, [events.AFTER_CREATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _after_router_create(self, resource, event, trigger, context, router_id, router, router_db, **kwargs): if not router['ha']: return try: self.schedule_router(context, router_id) router['ha_vr_id'] = router_db.extra_attributes.ha_vr_id self._notify_router_updated(context, router_id) except Exception as e: with excutils.save_and_reraise_exception() as ctx: if isinstance(e, l3ha_exc.NoVRIDAvailable): ctx.reraise = False LOG.warning("No more VRIDs for router: %s", e) else: LOG.exception("Failed to schedule HA router %s.", router_id) router['status'] = self._update_router_db( context, router_id, {'status': constants.ERROR})['status'] @registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _validate_migration(self, resource, event, trigger, payload=None): """Event handler on precommit update to validate migration.""" original_ha_state = payload.states[0]['ha'] requested_ha_state = payload.request_body.get('ha') ha_changed = (requested_ha_state is not None and requested_ha_state != original_ha_state) if not ha_changed: return if payload.desired_state.admin_state_up: msg = _('Cannot change HA attribute of active routers. Please ' 'set router admin_state_up to False prior to upgrade') raise n_exc.BadRequest(resource='router', msg=msg) if requested_ha_state: # This will throw HANotEnoughAvailableAgents if there aren't # enough l3 agents to handle this router. self.get_number_of_agents_for_scheduling(payload.context) old_owner = constants.DEVICE_OWNER_ROUTER_INTF new_owner = constants.DEVICE_OWNER_HA_REPLICATED_INT else: old_owner = constants.DEVICE_OWNER_HA_REPLICATED_INT new_owner = constants.DEVICE_OWNER_ROUTER_INTF ha_network = self.get_ha_network(payload.context, payload.desired_state.tenant_id) self._delete_vr_id_allocation( payload.context, ha_network, payload.desired_state.extra_attributes.ha_vr_id) payload.desired_state.extra_attributes.ha_vr_id = None if (payload.request_body.get('distributed') or payload.states[0]['distributed']): self.set_extra_attr_value(payload.context, payload.desired_state, 'ha', requested_ha_state) return self._migrate_router_ports( payload.context, payload.desired_state, old_owner=old_owner, new_owner=new_owner) self.set_extra_attr_value( payload.context, payload.desired_state, 'ha', requested_ha_state) @registry.receives(resources.ROUTER, [events.AFTER_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _reconfigure_ha_resources(self, resource, event, trigger, context, router_id, old_router, router, router_db, **kwargs): """Event handler to react to changes after HA flag has been updated.""" ha_changed = old_router['ha'] != router['ha'] if not ha_changed: return requested_ha_state = router['ha'] # The HA attribute has changed. First unbind the router from agents # to force a proper re-scheduling to agents. # TODO(jschwarz): This will have to be more selective to get HA + DVR # working (Only unbind from dvr_snat nodes). self._unbind_ha_router(context, router_id) if not requested_ha_state: self._delete_ha_interfaces(context, router_db.id) # always attempt to cleanup the network as the router is # deleted. the core plugin will stop us if its in use ha_network = self.get_ha_network(context, router_db.tenant_id) if ha_network: self.safe_delete_ha_network(context, ha_network, router_db.tenant_id) self.schedule_router(context, router_id) self._notify_router_updated(context, router_db.id) def _delete_ha_network(self, context, net): admin_ctx = context.elevated() self._core_plugin.delete_network(admin_ctx, net.network_id) def safe_delete_ha_network(self, context, ha_network, tenant_id): try: # reference the attr inside the try block before we attempt # to delete the network and potentially invalidate the # relationship net_id = ha_network.network_id self._delete_ha_network(context, ha_network) except (n_exc.NetworkNotFound, orm.exc.ObjectDeletedError): LOG.debug( "HA network for tenant %s was already deleted.", tenant_id) except sa.exc.InvalidRequestError: LOG.info("HA network %s can not be deleted.", net_id) except n_exc.NetworkInUse: # network is still in use, this is normal so we don't # log anything pass else: LOG.info("HA network %(network)s was deleted as " "no HA routers are present in tenant " "%(tenant)s.", {'network': net_id, 'tenant': tenant_id}) @registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _release_router_vr_id(self, resource, event, trigger, context, router_db, **kwargs): """Event handler for removal of VRID during router delete.""" if router_db.extra_attributes.ha: ha_network = self.get_ha_network(context, router_db.tenant_id) if ha_network: self._delete_vr_id_allocation( context, ha_network, router_db.extra_attributes.ha_vr_id) @registry.receives(resources.ROUTER, [events.AFTER_DELETE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) @db_api.retry_if_session_inactive() def _cleanup_ha_network(self, resource, event, trigger, context, router_id, original, **kwargs): """Event handler to attempt HA network deletion after router delete.""" if not original['ha']: return ha_network = self.get_ha_network(context, original['tenant_id']) if not ha_network: return # always attempt to cleanup the network as the router is # deleted. the core plugin will stop us if its in use self.safe_delete_ha_network(context, ha_network, original['tenant_id']) def _unbind_ha_router(self, context, router_id): for agent in self.get_l3_agents_hosting_routers(context, [router_id]): self.remove_router_from_l3_agent(context, agent['id'], router_id) def get_ha_router_port_bindings(self, context, router_ids, host=None): if not router_ids: return [] return ( l3_hamode.L3HARouterAgentPortBinding.get_l3ha_filter_host_router( context, router_ids, host)) @staticmethod def _check_router_agent_ha_binding(context, router_id, agent_id): return l3_hamode.L3HARouterAgentPortBinding.objects_exist( context, router_id=router_id, l3_agent_id=agent_id) def _get_bindings_and_update_router_state_for_dead_agents(self, context, router_id): """Return bindings. In case if dead agents were detected update router states on this agent. """ with db_api.CONTEXT_WRITER.using(context): bindings = self.get_ha_router_port_bindings(context, [router_id]) router_active_agents_dead = [] router_standby_agents_dead = [] # List agents where router is active and agent is dead # and agents where router is standby and agent is dead for binding in bindings: if not (binding.agent.is_active and binding.agent.admin_state_up): if binding.state == constants.HA_ROUTER_STATE_ACTIVE: router_active_agents_dead.append(binding.agent) elif binding.state == constants.HA_ROUTER_STATE_STANDBY: router_standby_agents_dead.append(binding.agent) if router_active_agents_dead: # Just check if all l3_agents are down # then assuming some communication issue if (len(router_active_agents_dead) + len(router_standby_agents_dead) == len(bindings)): # Make router status as unknown because # agent communication may be issue but router # may still be active. We do not know the # exact status of router. state = constants.HA_ROUTER_STATE_UNKNOWN else: # Make router status as standby on all dead agents # as some other agents are alive , router can become # active on them after some time state = constants.HA_ROUTER_STATE_STANDBY for dead_agent in router_active_agents_dead: self.update_routers_states(context, {router_id: state}, dead_agent.host) if router_active_agents_dead: return self.get_ha_router_port_bindings(context, [router_id]) return bindings def get_l3_bindings_hosting_router_with_ha_states( self, context, router_id): """Return a list of [(agent, ha_state), ...].""" bindings = self._get_bindings_and_update_router_state_for_dead_agents( context, router_id) return [(binding.agent, binding.state) for binding in bindings if binding.agent is not None] def get_active_host_for_ha_router(self, context, router_id): bindings = self.get_l3_bindings_hosting_router_with_ha_states( context, router_id) # TODO(amuller): In case we have two or more actives, this method # needs to return the last agent to become active. This requires # timestamps for state changes. Otherwise, if a host goes down # and another takes over, we'll have two actives. In this case, # if an interface is added to a router, its binding might be wrong # and l2pop would not work correctly. return next( (agent.host for agent, state in bindings if state == constants.HA_ROUTER_STATE_ACTIVE), None) @log_helpers.log_method_call def _process_sync_ha_data(self, context, routers, host, is_any_dvr_agent): routers_dict = dict((router['id'], router) for router in routers) bindings = self.get_ha_router_port_bindings(context, routers_dict.keys(), host) for binding in bindings: port = binding.port if not port: # Filter the HA router has no ha port here LOG.info("HA router %s is missing HA router port " "bindings. Skipping it.", binding.router_id) routers_dict.pop(binding.router_id) continue port_dict = self._core_plugin._make_port_dict(port) router = routers_dict.get(binding.router_id) router[constants.HA_INTERFACE_KEY] = port_dict router[constants.HA_ROUTER_STATE_KEY] = binding.state interfaces = [] for router in routers_dict.values(): interface = router.get(constants.HA_INTERFACE_KEY) if interface: interfaces.append(interface) self._populate_mtu_and_subnets_for_ports(context, interfaces) # If this is a DVR+HA router, then we want to always return it even # though it's missing the '_ha_interface' key. The agent will have # to figure out what kind of router setup is needed. return [r for r in list(routers_dict.values()) if (is_any_dvr_agent or not r.get('ha') or r.get(constants.HA_INTERFACE_KEY))] @log_helpers.log_method_call def get_ha_sync_data_for_host(self, context, host, agent, router_ids=None, active=None): agent_mode = self._get_agent_mode(agent) dvr_agent_mode = ( agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT, constants.L3_AGENT_MODE_DVR, constants.L3_AGENT_MODE_DVR_NO_EXTERNAL]) if (dvr_agent_mode and extensions.is_extension_supported( self, constants.L3_DISTRIBUTED_EXT_ALIAS)): # DVR has to be handled differently sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data( context, router_ids, active) return self._process_sync_ha_data( context, sync_data, host, dvr_agent_mode) @classmethod def _set_router_states(cls, context, bindings, states): for binding in bindings: try: # NOTE(ralonsoh): to be migrated to the new facade. with context.session.begin(subtransactions=True): binding.state = states[binding.router_id] except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError): # Take concurrently deleted routers in to account pass @db_api.retry_if_session_inactive() def update_routers_states(self, context, states, host): """Receive dict of router ID to state and update them all.""" bindings = self.get_ha_router_port_bindings( context, router_ids=states.keys(), host=host) self._set_router_states(context, bindings, states) self._update_router_port_bindings(context, states, host) def _update_router_port_bindings(self, context, states, host): admin_ctx = context.elevated() device_filter = {'device_id': list(states.keys()), 'device_owner': [constants.DEVICE_OWNER_HA_REPLICATED_INT, constants.DEVICE_OWNER_ROUTER_SNAT, constants.DEVICE_OWNER_ROUTER_GW]} ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter) active_ports = ( port for port in ports if states[port['device_id']] == constants.HA_ROUTER_STATE_ACTIVE) for port in active_ports: try: self._core_plugin.update_port( admin_ctx, port['id'], {port_def.RESOURCE_NAME: {portbindings.HOST_ID: host}}) except (orm.exc.StaleDataError, orm.exc.ObjectDeletedError, n_exc.PortNotFound): # Take concurrently deleted interfaces in to account pass def _get_gateway_port_host(self, context, router, gw_ports): if not router.get('ha'): return super(L3_HA_NAT_db_mixin, self)._get_gateway_port_host( context, router, gw_ports) gw_port_id = router['gw_port_id'] gateway_port = gw_ports.get(gw_port_id) if not gw_port_id or not gateway_port: return gateway_port_status = gateway_port['status'] gateway_port_binding_host = gateway_port[portbindings.HOST_ID] admin_ctx = context.elevated() router_id = router['id'] ha_bindings = self.get_l3_bindings_hosting_router_with_ha_states( admin_ctx, router_id) LOG.debug("HA router %(router_id)s gateway port %(gw_port_id)s " "binding host: %(host)s, status: %(status)s", {"router_id": router_id, "gw_port_id": gateway_port['id'], "host": gateway_port_binding_host, "status": gateway_port_status}) for ha_binding_agent, ha_binding_state in ha_bindings: if ha_binding_state != constants.HA_ROUTER_STATE_ACTIVE: continue # For create router gateway, the gateway port may not be ACTIVE # yet, so we return 'master' host directly. if gateway_port_status != constants.PORT_STATUS_ACTIVE: return ha_binding_agent.host # Do not let the original 'master' (current is backup) host, # override the gateway port binding host. if (gateway_port_status == constants.PORT_STATUS_ACTIVE and ha_binding_agent.host == gateway_port_binding_host): return ha_binding_agent.host LOG.debug("No gateway port host retrieved. HA router %(router_id)s " "gateway port %(gw_port_id)s " "binding host: %(host)s, status: %(status)s, " "router HA bindings: %(ha_bindings)s", {"router_id": router_id, "gw_port_id": gateway_port['id'], "host": gateway_port_binding_host, "status": gateway_port_status, "ha_bindings": ha_bindings}) def is_ha_router(router): """Return True if router to be handled is ha.""" try: # See if router is a DB object first requested_router_type = router.extra_attributes.ha except AttributeError: # if not, try to see if it is a request body requested_router_type = router.get('ha') if validators.is_attr_set(requested_router_type): return requested_router_type return cfg.CONF.l3_ha def is_ha_router_port(context, device_owner, router_id): if device_owner == constants.DEVICE_OWNER_HA_REPLICATED_INT: return True elif device_owner == constants.DEVICE_OWNER_ROUTER_SNAT: return l3_obj.RouterExtraAttributes.objects_exist( context, router_id=router_id, ha=True) else: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/l3_hascheduler_db.py0000644000175000017500000000605000000000000023127 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron.db import l3_agentschedulers_db as l3_sch_db from neutron.objects import agent as ag_obj class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin): def get_l3_agents_ordered_by_num_routers(self, context, agent_ids): if not agent_ids: return [] return ag_obj.Agent.get_l3_agents_ordered_by_num_routers( context, agent_ids) def _get_agents_dict_for_router(self, agents_and_states): agents = [] for agent, ha_state in agents_and_states: l3_agent_dict = self._make_agent_dict(agent) l3_agent_dict['ha_state'] = ha_state agents.append(l3_agent_dict) return {'agents': agents} def list_l3_agents_hosting_router(self, context, router_id): with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, router_id) if router_db.extra_attributes.ha: agents = self.get_l3_bindings_hosting_router_with_ha_states( context, router_id) else: agents = self._get_l3_agents_hosting_routers( context, [router_id]) agents = [(agent, None) for agent in agents] return self._get_agents_dict_for_router(agents) def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs): new_port = kwargs.get('port') original_port = kwargs.get('original_port') context = kwargs.get('context') host = new_port.get(portbindings.HOST_ID) if new_port and original_port and host: new_device_owner = new_port.get('device_owner', '') if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and new_port['status'] == constants.PORT_STATUS_ACTIVE and original_port['status'] != new_port['status']): l3plugin = directory.get_plugin(plugin_constants.L3) l3plugin.l3_rpc_notifier.routers_updated_on_host( context, [new_port['device_id']], host) def subscribe(): registry.subscribe( _notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2590442 neutron-16.0.0.0b2.dev214/neutron/db/metering/0000755000175000017500000000000000000000000021034 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/metering/__init__.py0000644000175000017500000000000000000000000023133 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/metering/metering_db.py0000644000175000017500000002342000000000000023666 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import metering as metering_exc from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api from neutron.db import l3_dvr_db from neutron.extensions import metering from neutron.objects import base as base_obj from neutron.objects import metering as metering_objs from neutron.objects import router as l3_obj class MeteringDbMixin(metering.MeteringPluginBase): def __init__(self): self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() @staticmethod def _make_metering_label_dict(metering_label, fields=None): res = {'id': metering_label['id'], 'name': metering_label['name'], 'description': metering_label['description'], 'shared': metering_label['shared'], 'tenant_id': metering_label['tenant_id']} return db_utils.resource_fields(res, fields) def create_metering_label(self, context, metering_label): m = metering_label['metering_label'] metering_obj = metering_objs.MeteringLabel( context, id=uuidutils.generate_uuid(), description=m['description'], project_id=m['tenant_id'], name=m['name'], shared=m['shared']) metering_obj.create() return self._make_metering_label_dict(metering_obj) def _get_metering_label(self, context, label_id): metering_label = metering_objs.MeteringLabel.get_object(context, id=label_id) if not metering_label: raise metering_exc.MeteringLabelNotFound(label_id=label_id) return metering_label def delete_metering_label(self, context, label_id): deleted = metering_objs.MeteringLabel.delete_objects( context, id=label_id) if not deleted: raise metering_exc.MeteringLabelNotFound(label_id=label_id) def get_metering_label(self, context, label_id, fields=None): return self._make_metering_label_dict( self._get_metering_label(context, label_id), fields) def get_metering_labels(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) metering_labels = metering_objs.MeteringLabel.get_objects(context, _pager=pager, **filters) return [self._make_metering_label_dict(ml) for ml in metering_labels] @staticmethod def _make_metering_label_rule_dict(metering_label_rule, fields=None): res = {'id': metering_label_rule['id'], 'metering_label_id': metering_label_rule['metering_label_id'], 'direction': metering_label_rule['direction'], 'remote_ip_prefix': metering_label_rule['remote_ip_prefix'], 'excluded': metering_label_rule['excluded']} return db_utils.resource_fields(res, fields) def get_metering_label_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) metering_label_rules = metering_objs.MeteringLabelRule.get_objects( context, _pager=pager, **filters) return [self._make_metering_label_rule_dict(mlr) for mlr in metering_label_rules] def _get_metering_label_rule(self, context, rule_id): metering_label_rule = metering_objs.MeteringLabelRule.get_object( context, id=rule_id) if not metering_label_rule: raise metering_exc.MeteringLabelRuleNotFound(rule_id=rule_id) return metering_label_rule def get_metering_label_rule(self, context, rule_id, fields=None): return self._make_metering_label_rule_dict( self._get_metering_label_rule(context, rule_id), fields) def _validate_cidr(self, context, label_id, remote_ip_prefix, direction, excluded): r_ips = self.get_metering_label_rules(context, filters={'metering_label_id': [label_id], 'direction': [direction], 'excluded': [excluded]}, fields=['remote_ip_prefix']) cidrs = [r['remote_ip_prefix'] for r in r_ips] new_cidr_ipset = netaddr.IPSet([remote_ip_prefix]) if (netaddr.IPSet(cidrs) & new_cidr_ipset): raise metering_exc.MeteringLabelRuleOverlaps( remote_ip_prefix=remote_ip_prefix) def create_metering_label_rule(self, context, metering_label_rule): m = metering_label_rule['metering_label_rule'] try: with db_api.CONTEXT_WRITER.using(context): label_id = m['metering_label_id'] ip_prefix = m['remote_ip_prefix'] direction = m['direction'] excluded = m['excluded'] self._validate_cidr(context, label_id, ip_prefix, direction, excluded) rule = metering_objs.MeteringLabelRule( context, id=uuidutils.generate_uuid(), metering_label_id=label_id, direction=direction, excluded=m['excluded'], remote_ip_prefix=netaddr.IPNetwork(ip_prefix)) rule.create() except db_exc.DBReferenceError: raise metering_exc.MeteringLabelNotFound(label_id=label_id) return self._make_metering_label_rule_dict(rule) def delete_metering_label_rule(self, context, rule_id): with db_api.CONTEXT_WRITER.using(context): rule = self._get_metering_label_rule(context, rule_id) rule.delete() return self._make_metering_label_rule_dict(rule) def _get_metering_rules_dict(self, metering_label): rules = [] for rule in metering_label.rules: rule_dict = self._make_metering_label_rule_dict(rule) rules.append(rule_dict) return rules def _make_router_dict(self, router): distributed = l3_dvr_db.is_distributed_router(router) res = {'id': router['id'], 'name': router['name'], 'tenant_id': router['tenant_id'], 'admin_state_up': router['admin_state_up'], 'status': router['status'], 'gw_port_id': router['gw_port_id'], 'distributed': distributed, constants.METERING_LABEL_KEY: []} return res def _process_sync_metering_data(self, context, labels): routers = None routers_dict = {} for label in labels: if label.shared: if not routers: routers = l3_obj.Router.get_objects(context) else: filters = { 'id': [router.id for router in label.db_obj.routers]} routers = l3_obj.Router.get_objects(context, **filters) for router in routers: if not router['admin_state_up']: continue router_dict = routers_dict.get( router['id'], self._make_router_dict(router)) rules = self._get_metering_rules_dict(label) data = {'id': label['id'], 'rules': rules} router_dict[constants.METERING_LABEL_KEY].append(data) routers_dict[router['id']] = router_dict return list(routers_dict.values()) def get_sync_data_for_rule(self, context, rule): label = metering_objs.MeteringLabel.get_object( context, id=rule['metering_label_id']) if label.shared: routers = l3_obj.Router.get_objects(context) else: filters = {'id': [router.id for router in label.db_obj.routers]} routers = l3_obj.Router.get_objects(context, **filters) routers_dict = {} for router in routers: router_dict = routers_dict.get(router['id'], self._make_router_dict(router)) data = {'id': label['id'], 'rule': rule} router_dict[constants.METERING_LABEL_KEY].append(data) routers_dict[router['id']] = router_dict return list(routers_dict.values()) def get_sync_data_metering(self, context, label_id=None): filters = {'id': [label_id]} if label_id else {} labels = metering_objs.MeteringLabel.get_objects( context, **filters) return self._process_sync_metering_data(context, labels) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/metering/metering_rpc.py0000644000175000017500000000413700000000000024071 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants as consts from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_log import log as logging import oslo_messaging LOG = logging.getLogger(__name__) class MeteringRpcCallbacks(object): target = oslo_messaging.Target(version='1.0') def __init__(self, meter_plugin): self.meter_plugin = meter_plugin def get_sync_data_metering(self, context, **kwargs): l3_plugin = directory.get_plugin(plugin_constants.L3) if not l3_plugin: return metering_data = self.meter_plugin.get_sync_data_metering(context) host = kwargs.get('host') if not extensions.is_extension_supported( l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host: return metering_data else: agents = l3_plugin.get_l3_agents(context, filters={'host': [host]}) if not agents: LOG.error('Unable to find agent on host %s.', host) return router_ids = [] for agent in agents: routers = l3_plugin.list_routers_on_l3_agent(context, agent.id) router_ids += [router['id'] for router in routers['routers']] if not router_ids: return else: return [ router for router in metering_data if router['id'] in router_ids ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2590442 neutron-16.0.0.0b2.dev214/neutron/db/migration/0000755000175000017500000000000000000000000021213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/README0000644000175000017500000000021500000000000022071 0ustar00coreycorey00000000000000See doc/source/contributor/alembic_migrations.rst Rendered at https://docs.openstack.org/neutron/latest/contributor/alembic_migrations.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/__init__.py0000644000175000017500000002260200000000000023326 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import functools from alembic import context from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron._i18n import _ # Neutron milestones for upgrade aliases LIBERTY = 'liberty' MITAKA = 'mitaka' NEWTON = 'newton' OCATA = 'ocata' PIKE = 'pike' QUEENS = 'queens' ROCKY = 'rocky' STEIN = 'stein' TRAIN = 'train' USSURI = 'ussuri' NEUTRON_MILESTONES = [ # earlier milestones were not tagged LIBERTY, MITAKA, NEWTON, OCATA, PIKE, QUEENS, ROCKY, STEIN, TRAIN, # Do not add the milestone until the end of the release ] def skip_if_offline(func): """Decorator for skipping migrations in offline mode.""" @functools.wraps(func) def decorator(*args, **kwargs): if context.is_offline_mode(): return return func(*args, **kwargs) return decorator def raise_if_offline(func): """Decorator for raising if a function is called in offline mode.""" @functools.wraps(func) def decorator(*args, **kwargs): if context.is_offline_mode(): raise RuntimeError(_("%s cannot be called while in offline mode") % func.__name__) return func(*args, **kwargs) return decorator @raise_if_offline def schema_has_table(table_name): """Check whether the specified table exists in the current schema. This method cannot be executed in offline mode. """ bind = op.get_bind() insp = sa.engine.reflection.Inspector.from_engine(bind) return table_name in insp.get_table_names() @raise_if_offline def schema_has_column(table_name, column_name): """Check whether the specified column exists in the current schema. This method cannot be executed in offline mode. """ bind = op.get_bind() insp = sa.engine.reflection.Inspector.from_engine(bind) # first check that the table exists if not schema_has_table(table_name): return # check whether column_name exists in table columns return column_name in [column['name'] for column in insp.get_columns(table_name)] @raise_if_offline def alter_column_if_exists(table_name, column_name, **kwargs): """Alter a column only if it exists in the schema.""" if schema_has_column(table_name, column_name): op.alter_column(table_name, column_name, **kwargs) @raise_if_offline def drop_table_if_exists(table_name): if schema_has_table(table_name): op.drop_table(table_name) @raise_if_offline def rename_table_if_exists(old_table_name, new_table_name): if schema_has_table(old_table_name): op.rename_table(old_table_name, new_table_name) def alter_enum_add_value(table, column, enum, nullable, server_default=None): '''If we need to expand Enum values for some column - for PostgreSQL this can be done with ALTER TYPE function. For MySQL, it can be done with ordinary alembic alter_column function. :param table:table name :param column: column name :param enum: sqlalchemy Enum with updated values :param nullable: existing nullable for column. :param server_default: existing or new server_default for the column ''' bind = op.get_bind() engine = bind.engine if engine.name == 'postgresql': values = {'name': enum.name, 'values': ", ".join("'" + i + "'" for i in enum.enums), 'column': column, 'table': table, 'server_default': server_default} if server_default is not None: op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s" " DROP DEFAULT" % values) op.execute("ALTER TYPE %(name)s rename to old_%(name)s" % values) op.execute("CREATE TYPE %(name)s AS enum (%(values)s)" % values) op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s TYPE " "%(name)s USING %(column)s::text::%(name)s " % values) if server_default is not None: op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s" " SET DEFAULT '%(server_default)s'" % values) op.execute("DROP TYPE old_%(name)s" % values) else: op.alter_column(table, column, type_=enum, existing_nullable=nullable, server_default=server_default) def alter_enum(table, column, enum_type, nullable, server_default=None, do_drop=True, do_rename=True, do_create=True): """Alter a enum type column. Set the do_xx parameters only when the modified enum type is used by multiple columns. Else don't provide these parameters. :param server_default: existing or new server_default for the column :param do_drop: set to False when modified column is not the last one use this enum :param do_rename: set to False when modified column is not the first one use this enum :param do_create: set to False when modified column is not the first one use this enum """ bind = op.get_bind() engine = bind.engine if engine.name == 'postgresql': values = {'table': table, 'column': column, 'name': enum_type.name} if do_rename: op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values) if do_create: enum_type.create(bind, checkfirst=False) op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO " "old_%(column)s" % values) op.add_column(table, sa.Column(column, enum_type, nullable=nullable, server_default=server_default)) op.execute("UPDATE %(table)s SET %(column)s = " # nosec "old_%(column)s::text::%(name)s" % values) op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values) if do_drop: op.execute("DROP TYPE old_%(name)s" % values) else: op.alter_column(table, column, type_=enum_type, existing_nullable=nullable, server_default=server_default) def create_table_if_not_exist_psql(table_name, values): if op.get_bind().engine.dialect.server_version_info < (9, 1, 0): op.execute("CREATE LANGUAGE plpgsql") op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$" "BEGIN EXECUTE $1; END;" "$$ LANGUAGE plpgsql STRICT;") op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as " "$$ SELECT exists(select 1 from pg_class where relname=$1);" "$$ language sql STRICT;") op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) " "WHERE NOT table_exist(%(name)r);" % {'name': table_name, 'columns': values}) def get_unique_constraints_map(table): inspector = reflection.Inspector.from_engine(op.get_bind()) return { tuple(sorted(cons['column_names'])): cons['name'] for cons in inspector.get_unique_constraints(table) } def remove_fk_unique_constraints(table, foreign_keys): unique_constraints_map = get_unique_constraints_map(table) for fk in foreign_keys: constraint_name = unique_constraints_map.get( tuple(sorted(fk['constrained_columns']))) if constraint_name: op.drop_constraint( constraint_name=constraint_name, table_name=table, type_="unique" ) def remove_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.drop_constraint( constraint_name=fk['name'], table_name=table, type_='foreignkey' ) def create_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.create_foreign_key( constraint_name=fk['name'], source_table=table, referent_table=fk['referred_table'], local_cols=fk['constrained_columns'], remote_cols=fk['referred_columns'], ondelete=fk['options'].get('ondelete') ) @contextlib.contextmanager def remove_fks_from_table(table, remove_unique_constraints=False): try: inspector = reflection.Inspector.from_engine(op.get_bind()) foreign_keys = inspector.get_foreign_keys(table) remove_foreign_keys(table, foreign_keys) if remove_unique_constraints: remove_fk_unique_constraints(table, foreign_keys) yield finally: create_foreign_keys(table, foreign_keys) def pk_on_alembic_version_table(): inspector = reflection.Inspector.from_engine(op.get_bind()) pk = inspector.get_pk_constraint('alembic_version') if not pk['constrained_columns']: op.create_primary_key(op.f('pk_alembic_version'), 'alembic_version', ['version_num']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic.ini0000644000175000017500000000177500000000000023322 0ustar00coreycorey00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # default to an empty string because the Neutron migration cli will # extract the correct value and set it programmatically before alembic is fully # invoked. sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2630444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/0000755000175000017500000000000000000000000025043 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/__init__.py0000644000175000017500000000000000000000000027142 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/agent_init_ops.py0000644000175000017500000000357600000000000030432 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for agent management extension # This module only manages the 'agents' table. Binding tables are created # in the modules for relevant resources from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'agents', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('agent_type', sa.String(length=255), nullable=False), sa.Column('binary', sa.String(length=255), nullable=False), sa.Column('topic', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False, server_default=sa.sql.true()), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('started_at', sa.DateTime(), nullable=False), sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('configurations', sa.String(length=4095), nullable=False), sa.Column('load', sa.Integer(), server_default='0', nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('agent_type', 'host', name='uniq_agents0agent_type0host')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/brocade_init_ops.py0000644000175000017500000000511200000000000030717 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the Mellanox plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'brocadenetworks', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vlan', sa.String(length=10), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'brocadeports', sa.Column('port_id', sa.String(length=36), nullable=False, server_default=''), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('physical_interface', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.String(length=36), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'ml2_brocadenetworks', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vlan', sa.String(length=10), nullable=True), sa.Column('segment_id', sa.String(length=36), nullable=True), sa.Column('network_type', sa.String(length=10), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_brocadeports', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('physical_interface', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.String(length=36), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/cisco_init_ops.py0000644000175000017500000003667100000000000030436 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for cisco plugin from alembic import op import sqlalchemy as sa segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment', name='segment_type') profile_type = sa.Enum('network', 'policy', name='profile_type') network_profile_type = sa.Enum('vlan', 'vxlan', name='network_profile_type') def upgrade(): op.create_table( 'cisco_policy_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'cisco_network_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('segment_type', segment_type, nullable=False), sa.Column('sub_type', sa.String(length=255), nullable=True), sa.Column('segment_range', sa.String(length=255), nullable=True), sa.Column('multicast_ip_index', sa.Integer(), nullable=True, server_default='0'), sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), sa.Column('physical_network', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'cisco_n1kv_vxlan_allocations', sa.Column('vxlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_network_profiles.id'], ondelete='CASCADE', name='cisco_n1kv_vxlan_allocations_ibfk_1'), sa.PrimaryKeyConstraint('vxlan_id')) op.create_table( 'cisco_n1kv_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), autoincrement=False, nullable=False, server_default=sa.sql.false()), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_network_profiles.id'], ondelete='CASCADE', name='cisco_n1kv_vlan_allocations_ibfk_1')) op.create_table( 'cisco_credentials', sa.Column('credential_id', sa.String(length=255), nullable=True), sa.Column('credential_name', sa.String(length=255), nullable=False), sa.Column('user_name', sa.String(length=255), nullable=True), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('type', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('credential_name')) op.create_table( 'cisco_qos_policies', sa.Column('qos_id', sa.String(length=255), nullable=True), sa.Column('tenant_id', sa.String(length=255), nullable=False), sa.Column('qos_name', sa.String(length=255), nullable=False), sa.Column('qos_desc', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('tenant_id', 'qos_name')) op.create_table( 'cisco_n1kv_profile_bindings', sa.Column('profile_type', profile_type, nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=False, server_default='TENANT_ID_NOT_SET'), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id', 'profile_id')) op.create_table( 'cisco_n1kv_vmnetworks', sa.Column('name', sa.String(length=80), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('port_count', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id'], ), sa.PrimaryKeyConstraint('name')) op.create_table( 'cisco_n1kv_trunk_segments', sa.Column('trunk_segment_id', sa.String(length=36), nullable=False), sa.Column('segment_id', sa.String(length=36), nullable=False), sa.Column('dot1qtag', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag')) op.create_table( 'cisco_provider_networks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=255), nullable=False), sa.Column('segmentation_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'cisco_n1kv_multi_segments', sa.Column('multi_segment_id', sa.String(length=36), nullable=False), sa.Column('segment1_id', sa.String(length=36), nullable=False), sa.Column('segment2_id', sa.String(length=36), nullable=False), sa.Column('encap_profile_name', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id', 'segment2_id')) op.create_table( 'cisco_n1kv_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('multicast_ip', sa.String(length=32), nullable=True), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_network_profiles.id']), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'cisco_n1kv_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'cisco_csr_identifier_map', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('ipsec_site_conn_id', sa.String(length=36), primary_key=True), sa.Column('csr_tunnel_id', sa.Integer(), nullable=False), sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False), sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['ipsec_site_conn_id'], ['ipsec_site_connections.id'], ondelete='CASCADE') ) op.create_table( 'cisco_ml2_apic_host_links', sa.Column('host', sa.String(length=255), nullable=False), sa.Column('ifname', sa.String(length=64), nullable=False), sa.Column('ifmac', sa.String(length=32), nullable=True), sa.Column('swid', sa.String(length=32), nullable=False), sa.Column('module', sa.String(length=32), nullable=False), sa.Column('port', sa.String(length=32), nullable=False), sa.PrimaryKeyConstraint('host', 'ifname')) op.create_table( 'cisco_ml2_apic_names', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('neutron_type', sa.String(length=32), nullable=False), sa.Column('apic_name', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('neutron_id', 'neutron_type')) op.create_table( 'cisco_ml2_apic_contracts', sa.Column('tenant_id', sa.String(length=255), index=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id']), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'cisco_hosting_devices', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('complementary_id', sa.String(length=36), nullable=True), sa.Column('device_id', sa.String(length=255), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('management_port_id', sa.String(length=36), nullable=True), sa.Column('protocol_port', sa.Integer(), nullable=True), sa.Column('cfg_agent_id', sa.String(length=36), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('status', sa.String(length=16), nullable=True), sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ), sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('id') ) op.create_table( 'cisco_port_mappings', sa.Column('logical_resource_id', sa.String(length=36), nullable=False), sa.Column('logical_port_id', sa.String(length=36), nullable=False), sa.Column('port_type', sa.String(length=32), nullable=True), sa.Column('network_type', sa.String(length=32), nullable=True), sa.Column('hosting_port_id', sa.String(length=36), nullable=True), sa.Column('segmentation_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id') ) op.create_table( 'cisco_router_mappings', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('auto_schedule', sa.Boolean(), nullable=False), sa.Column('hosting_device_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['hosting_device_id'], ['cisco_hosting_devices.id'], ondelete='SET NULL'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id') ) op.create_table( 'cisco_ml2_n1kv_policy_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('vsm_ip', sa.String(length=16), nullable=False), sa.PrimaryKeyConstraint('id', 'vsm_ip'), ) op.create_table( 'cisco_ml2_n1kv_network_profiles', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('segment_type', network_profile_type, nullable=False), sa.Column('segment_range', sa.String(length=255), nullable=True), sa.Column('multicast_ip_index', sa.Integer(), nullable=True), sa.Column('multicast_ip_range', sa.String(length=255), nullable=True), sa.Column('sub_type', sa.String(length=255), nullable=True), sa.Column('physical_network', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id'), ) op.create_table( 'cisco_ml2_n1kv_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), ) op.create_table( 'cisco_ml2_n1kv_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('segmentation_id', sa.Integer(), autoincrement=False), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['profile_id'], ['cisco_ml2_n1kv_network_profiles.id']), sa.PrimaryKeyConstraint('network_id') ) op.create_table( 'cisco_ml2_n1kv_vxlan_allocations', sa.Column('vxlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_ml2_n1kv_network_profiles.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('vxlan_id') ) op.create_table( 'cisco_ml2_n1kv_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), autoincrement=False, nullable=False), sa.Column('network_profile_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_profile_id'], ['cisco_ml2_n1kv_network_profiles.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('physical_network', 'vlan_id') ) op.create_table( 'cisco_ml2_n1kv_profile_bindings', sa.Column('profile_type', profile_type, nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=False, server_default='tenant_id_not_set'), sa.Column('profile_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id', 'profile_id') ) op.create_table( 'ml2_ucsm_port_profiles', sa.Column('vlan_id', sa.Integer(), nullable=False), sa.Column('profile_id', sa.String(length=64), nullable=False), sa.Column('created_on_ucs', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('vlan_id') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/core_init_ops.py0000644000175000017500000001554400000000000030262 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for core resources from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'networks', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('mtu', sa.Integer(), nullable=True), sa.Column('vlan_transparent', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ports', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('device_id', sa.String(length=255), nullable=False), sa.Column('device_owner', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id']), sa.UniqueConstraint('network_id', 'mac_address', name='uniq_ports0network_id0mac_address'), sa.PrimaryKeyConstraint('id'), sa.Index(op.f('ix_ports_network_id_device_owner'), 'network_id', 'device_owner'), sa.Index(op.f('ix_ports_network_id_mac_address'), 'network_id', 'mac_address')) op.create_table( 'subnets', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('cidr', sa.String(length=64), nullable=False), sa.Column('gateway_ip', sa.String(length=64), nullable=True), sa.Column('enable_dhcp', sa.Boolean(), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('ipv6_ra_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_ra_modes'), nullable=True), sa.Column('ipv6_address_mode', sa.Enum('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless', name='ipv6_address_modes'), nullable=True), sa.Column('subnetpool_id', sa.String(length=36), nullable=True, index=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'dnsnameservers', sa.Column('address', sa.String(length=128), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('address', 'subnet_id')) op.create_table( 'ipallocationpools', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=True), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'subnetroutes', sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id')) op.create_table( 'ipallocations', sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id')) op.create_table( 'ipavailabilityranges', sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['allocation_pool_id'], ['ipallocationpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'), sa.UniqueConstraint( 'first_ip', 'allocation_pool_id', name='uniq_ipavailabilityranges0first_ip0allocation_pool_id'), sa.UniqueConstraint( 'last_ip', 'allocation_pool_id', name='uniq_ipavailabilityranges0last_ip0allocation_pool_id')) op.create_table( 'networkdhcpagentbindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/dvr_init_opts.py0000644000175000017500000000507300000000000030305 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for dvr from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'dvr_host_macs', sa.Column('host', sa.String(length=255), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False, unique=True), sa.PrimaryKeyConstraint('host') ) op.create_table( 'ml2_dvr_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('vif_type', sa.String(length=64), nullable=False), sa.Column('vif_details', sa.String(length=4095), nullable=False, server_default=''), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), sa.Column(u'status', sa.String(16), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'host') ) op.create_table( 'csnat_l3_agent_bindings', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('l3_agent_id', sa.String(length=36), nullable=False), sa.Column('host_id', sa.String(length=255), nullable=True), sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id', 'l3_agent_id') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/env.py0000644000175000017500000000713600000000000026214 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import context from neutron_lib.db import model_base from oslo_config import cfg import sqlalchemy as sa from sqlalchemy import event # noqa from neutron.db.migration.alembic_migrations import external from neutron.db.migration import autogen from neutron.db.migration.connection import DBConnection from neutron.db.migration.models import head # noqa try: # NOTE(mriedem): This is to register the DB2 alembic code which # is an optional runtime dependency. from ibm_db_alembic.ibm_db import IbmDbImpl # noqa # pylint: disable=unused-import except ImportError: pass MYSQL_ENGINE = None # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # set the target for 'autogenerate' support target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object_, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"): # skip indexes created by SQLAlchemy autoincrement=True # on composite PK integer columns return False else: return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with either a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() connection = config.attributes.get('connection') with DBConnection(neutron_config.database.connection, connection) as conn: context.configure( connection=conn, target_metadata=target_metadata, include_object=include_object, process_revision_directives=autogen.process_revision_directives ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/external.py0000644000175000017500000001020300000000000027233 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # These tables are in the neutron database, but their models have moved # to separate repositories. We skip the migration checks for these tables. VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs', 'ipsec_site_connections', 'cisco_csr_identifier_map', 'ikepolicies'] # Neutron-lbaas is retired, but we need to keep this for the models until # we decide to remove the tables. LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors', 'poolstatisticss', 'members', 'poolloadbalanceragentbindings', 'poolmonitorassociations'] FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies'] # Arista ML2 driver Models moved to openstack/networking-arista REPO_ARISTA_TABLES = [ 'arista_provisioned_nets', 'arista_provisioned_vms', 'arista_provisioned_tenants', ] # BGP models in openstack/neutron-dynamic-routing REPO_NEUTRON_DYNAMIC_ROUTING_TABLES = [ 'bgp_speakers', 'bgp_peers', 'bgp_speaker_network_bindings', 'bgp_speaker_peer_bindings', 'bgp_speaker_dragent_bindings', ] # Models moved to openstack/networking-cisco REPO_CISCO_TABLES = [ 'cisco_ml2_apic_contracts', 'cisco_ml2_apic_names', 'cisco_ml2_apic_host_links', 'cisco_ml2_n1kv_policy_profiles', 'cisco_ml2_n1kv_network_profiles', 'cisco_ml2_n1kv_port_bindings', 'cisco_ml2_n1kv_network_bindings', 'cisco_ml2_n1kv_vxlan_allocations', 'cisco_ml2_n1kv_vlan_allocations', 'cisco_ml2_n1kv_profile_bindings', 'cisco_ml2_nexusport_bindings', 'cisco_ml2_nexus_nve', 'ml2_nexus_vxlan_allocations', 'ml2_nexus_vxlan_mcast_groups', 'ml2_ucsm_port_profiles', 'cisco_hosting_devices', 'cisco_port_mappings', 'cisco_router_mappings', ] # VMware-NSX models moved to openstack/vmware-nsx REPO_VMWARE_TABLES = [ 'tz_network_bindings', 'neutron_nsx_network_mappings', 'neutron_nsx_security_group_mappings', 'neutron_nsx_port_mappings', 'neutron_nsx_router_mappings', 'multi_provider_networks', 'networkconnections', 'networkgatewaydevicereferences', 'networkgatewaydevices', 'networkgateways', 'maclearningstates', 'qosqueues', 'portqueuemappings', 'networkqueuemappings', 'lsn_port', 'lsn', 'nsxv_router_bindings', 'nsxv_edge_vnic_bindings', 'nsxv_edge_dhcp_static_bindings', 'nsxv_internal_networks', 'nsxv_internal_edges', 'nsxv_security_group_section_mappings', 'nsxv_rule_mappings', 'nsxv_port_vnic_mappings', 'nsxv_router_ext_attributes', 'nsxv_tz_network_bindings', 'nsxv_port_index_mappings', 'nsxv_firewall_rule_bindings', 'nsxv_spoofguard_policy_network_mappings', 'nsxv_vdr_dhcp_bindings', 'vcns_router_bindings', ] # Brocade models are in openstack/networking-brocade REPO_BROCADE_TABLES = [ 'brocadenetworks', 'brocadeports', 'ml2_brocadenetworks', 'ml2_brocadeports', ] # BigSwitch models are in openstack/networking-bigswitch REPO_BIGSWITCH_TABLES = [ 'consistencyhashes', 'routerrules', 'nexthops', ] # Nuage models are in github.com/nuagenetworks/nuage-openstack-neutron REPO_NUAGE_TABLES = [ 'nuage_net_partitions', 'nuage_net_partition_router_mapping', 'nuage_provider_net_bindings', 'nuage_subnet_l2dom_mapping', ] TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES + REPO_ARISTA_TABLES + REPO_NEUTRON_DYNAMIC_ROUTING_TABLES + REPO_CISCO_TABLES + REPO_VMWARE_TABLES + REPO_BROCADE_TABLES + REPO_BIGSWITCH_TABLES + REPO_NUAGE_TABLES) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/firewall_init_ops.py0000644000175000017500000000672000000000000031133 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for firewall service plugin from alembic import op import sqlalchemy as sa action_types = sa.Enum('allow', 'deny', name='firewallrules_action') def upgrade(): op.create_table( 'firewall_policies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('audited', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewalls', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewalls_ibfk_1'), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewall_rules', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('source_ip_address', sa.String(length=46), nullable=True), sa.Column('destination_ip_address', sa.String(length=46), nullable=True), sa.Column('source_port_range_min', sa.Integer(), nullable=True), sa.Column('source_port_range_max', sa.Integer(), nullable=True), sa.Column('destination_port_range_min', sa.Integer(), nullable=True), sa.Column('destination_port_range_max', sa.Integer(), nullable=True), sa.Column('action', action_types, nullable=True), sa.Column('enabled', sa.Boolean(), nullable=True), sa.Column('position', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewall_rules_ibfk_1'), sa.PrimaryKeyConstraint('id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/l3_init_ops.py0000644000175000017500000001553500000000000027650 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for l3 extension from alembic import op import sqlalchemy as sa l3_ha_states = sa.Enum('active', 'standby', name='l3_ha_states') def create_routerroutes(): op.create_table( 'routerroutes', sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id')) def upgrade(): op.create_table( 'externalnetworks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'routers', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('gw_port_id', sa.String(length=36), nullable=True), sa.Column('enable_snat', sa.Boolean(), nullable=False, server_default=sa.sql.true()), sa.ForeignKeyConstraint(['gw_port_id'], ['ports.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'floatingips', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('floating_ip_address', sa.String(length=64), nullable=False), sa.Column('floating_network_id', sa.String(length=36), nullable=False), sa.Column('floating_port_id', sa.String(length=36), nullable=False), sa.Column('fixed_port_id', sa.String(length=36), nullable=True), sa.Column('fixed_ip_address', sa.String(length=64), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('last_known_router_id', sa.String(length=36), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.ForeignKeyConstraint(['fixed_port_id'], ['ports.id'], ), sa.ForeignKeyConstraint(['floating_port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), sa.PrimaryKeyConstraint('id')) create_routerroutes() op.create_table( 'routerl3agentbindings', sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('l3_agent_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id', 'l3_agent_id')) op.create_table( 'router_extra_attributes', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('distributed', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('service_router', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('ha', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('ha_vr_id', sa.Integer()), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id') ) op.create_table('ha_router_agent_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('l3_agent_id', sa.String(length=36), nullable=True), sa.Column('state', l3_ha_states, server_default='standby'), sa.PrimaryKeyConstraint('port_id'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'], ondelete='CASCADE')) op.create_table('ha_router_networks', sa.Column('tenant_id', sa.String(length=255), nullable=False, primary_key=True), sa.Column('network_id', sa.String(length=36), nullable=False, primary_key=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE')) op.create_table('ha_router_vrid_allocations', sa.Column('network_id', sa.String(length=36), nullable=False, primary_key=True), sa.Column('vr_id', sa.Integer(), nullable=False, primary_key=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE')) op.create_table( 'routerports', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('port_type', sa.String(length=255)), sa.PrimaryKeyConstraint('router_id', 'port_id'), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE' ), sa.ForeignKeyConstraint( ['port_id'], ['ports.id'], ondelete='CASCADE' ), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/lb_init_ops.py0000644000175000017500000000274000000000000027721 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the port security extension from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'network_states', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id')) op.create_table( 'network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py0000644000175000017500000001561100000000000031734 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for the load balancer service plugin from alembic import op import sqlalchemy as sa protocols = sa.Enum('HTTP', 'HTTPS', 'TCP', name='lb_protocols') session_persistence_type = sa.Enum('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE', name='sesssionpersistences_type') lb_methods = sa.Enum('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP', name='pools_lb_method') health_monitor_type = sa.Enum('PING', 'TCP', 'HTTP', 'HTTPS', name='healthmontiors_type') def upgrade(): op.create_table( 'healthmonitors', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('type', health_monitor_type, nullable=False), sa.Column('delay', sa.Integer(), nullable=False), sa.Column('timeout', sa.Integer(), nullable=False), sa.Column('max_retries', sa.Integer(), nullable=False), sa.Column('http_method', sa.String(length=16), nullable=True), sa.Column('url_path', sa.String(length=255), nullable=True), sa.Column('expected_codes', sa.String(length=64), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'vips', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('protocol_port', sa.Integer(), nullable=False), sa.Column('protocol', protocols, nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('connection_limit', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('pool_id')) op.create_table( 'pools', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('vip_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('protocol', protocols, nullable=False), sa.Column('lb_method', lb_methods, nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'sessionpersistences', sa.Column('vip_id', sa.String(length=36), nullable=False), sa.Column('type', session_persistence_type, nullable=False), sa.Column('cookie_name', sa.String(length=1024), nullable=True), sa.ForeignKeyConstraint(['vip_id'], ['vips.id'], ), sa.PrimaryKeyConstraint('vip_id')) op.create_table( 'poolloadbalanceragentbindings', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('agent_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('pool_id')) op.create_table( 'members', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('address', sa.String(length=64), nullable=False), sa.Column('protocol_port', sa.Integer(), nullable=False), sa.Column('weight', sa.Integer(), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('pool_id', 'address', 'protocol_port', name='uniq_member0pool_id0address0port')) op.create_table( 'poolmonitorassociations', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('monitor_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], ), sa.PrimaryKeyConstraint('pool_id', 'monitor_id')) op.create_table( 'poolstatisticss', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('bytes_in', sa.BigInteger(), nullable=False), sa.Column('bytes_out', sa.BigInteger(), nullable=False), sa.Column('active_connections', sa.BigInteger(), nullable=False), sa.Column('total_connections', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ), sa.PrimaryKeyConstraint('pool_id')) op.create_table( u'embrane_pool_port', sa.Column(u'pool_id', sa.String(length=36), nullable=False), sa.Column(u'port_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], name=u'embrane_pool_port_ibfk_1'), sa.ForeignKeyConstraint(['port_id'], [u'ports.id'], name=u'embrane_pool_port_ibfk_2'), sa.PrimaryKeyConstraint(u'pool_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/metering_init_ops.py0000644000175000017500000000374500000000000031144 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the metering service plugin from alembic import op import sqlalchemy as sa direction = sa.Enum('ingress', 'egress', name='meteringlabels_direction') def create_meteringlabels(): op.create_table( 'meteringlabels', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), server_default=sa.sql.false(), nullable=True), sa.PrimaryKeyConstraint('id')) def upgrade(): create_meteringlabels() op.create_table( 'meteringlabelrules', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('direction', direction, nullable=True), sa.Column('remote_ip_prefix', sa.String(length=64), nullable=True), sa.Column('metering_label_id', sa.String(length=36), nullable=False), sa.Column('excluded', sa.Boolean(), nullable=True, server_default=sa.sql.false()), sa.ForeignKeyConstraint(['metering_label_id'], ['meteringlabels.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/ml2_init_ops.py0000644000175000017500000001762300000000000030024 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for ML2 plugin and drivers from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ml2_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id'), sa.Index(op.f('ix_ml2_vlan_allocations_physical_network_allocated'), 'physical_network', 'allocated')) op.create_table( 'ml2_vxlan_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('udp_port', sa.Integer(), autoincrement=False, nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.UniqueConstraint('host', name='unique_ml2_vxlan_endpoints0host'), sa.PrimaryKeyConstraint('ip_address')) op.create_table( 'ml2_gre_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.UniqueConstraint('host', name='unique_ml2_gre_endpoints0host'), sa.PrimaryKeyConstraint('ip_address')) op.create_table( 'ml2_vxlan_allocations', sa.Column('vxlan_vni', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false(), index=True), sa.PrimaryKeyConstraint('vxlan_vni')) op.create_table( 'ml2_gre_allocations', sa.Column('gre_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false(), index=True), sa.PrimaryKeyConstraint('gre_id')) op.create_table( 'ml2_flat_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.PrimaryKeyConstraint('physical_network')) op.create_table( 'ml2_network_segments', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('is_dynamic', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.Column('segment_index', sa.Integer(), nullable=False, server_default='0'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_port_bindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False, server_default=''), sa.Column('vif_type', sa.String(length=64), nullable=False), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), sa.Column('vif_details', sa.String(length=4095), nullable=False, server_default=''), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'ml2_port_binding_levels', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('level', sa.Integer(), autoincrement=False, nullable=False), sa.Column('driver', sa.String(length=64), nullable=True), sa.Column('segment_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['segment_id'], ['ml2_network_segments.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('port_id', 'host', 'level') ) op.create_table( 'cisco_ml2_nexusport_bindings', sa.Column('binding_id', sa.Integer(), nullable=False), sa.Column('port_id', sa.String(length=255), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('switch_ip', sa.String(length=255), nullable=True), sa.Column('instance_id', sa.String(length=255), nullable=True), sa.Column('vni', sa.Integer(), nullable=True), sa.Column('is_provider_vlan', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.PrimaryKeyConstraint('binding_id'), ) op.create_table( 'arista_provisioned_nets', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('segmentation_id', sa.Integer(), autoincrement=False, nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'arista_provisioned_vms', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('vm_id', sa.String(length=255), nullable=True), sa.Column('host_id', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'arista_provisioned_tenants', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'ml2_nexus_vxlan_allocations', sa.Column('vxlan_vni', sa.Integer(), nullable=False, autoincrement=False), sa.Column('allocated', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.PrimaryKeyConstraint('vxlan_vni') ) op.create_table( 'ml2_nexus_vxlan_mcast_groups', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('mcast_group', sa.String(length=64), nullable=False), sa.Column('associated_vni', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['associated_vni'], ['ml2_nexus_vxlan_allocations.vxlan_vni'], ondelete='CASCADE') ) op.create_table( 'cisco_ml2_nexus_nve', sa.Column('vni', sa.Integer(), nullable=False), sa.Column('switch_ip', sa.String(length=255), nullable=True), sa.Column('device_id', sa.String(length=255), nullable=True), sa.Column('mcast_group', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('vni', 'switch_ip', 'device_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/nec_init_ops.py0000644000175000017500000001044700000000000030074 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for NEC plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ofcportmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcroutermappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'routerproviders', sa.Column('provider', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'ofctenantmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcfiltermappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'ofcnetworkmappings', sa.Column('ofc_id', sa.String(length=255), nullable=False), sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('neutron_id'), sa.UniqueConstraint('ofc_id')) op.create_table( 'packetfilters', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('priority', sa.Integer(), nullable=False), sa.Column('action', sa.String(length=16), nullable=False), sa.Column('in_port', sa.String(length=36), nullable=True), sa.Column('src_mac', sa.String(length=32), nullable=False), sa.Column('dst_mac', sa.String(length=32), nullable=False), sa.Column('eth_type', sa.Integer(), nullable=False), sa.Column('src_cidr', sa.String(length=64), nullable=False), sa.Column('dst_cidr', sa.String(length=64), nullable=False), sa.Column('protocol', sa.String(length=16), nullable=False), sa.Column('src_port', sa.Integer(), nullable=False), sa.Column('dst_port', sa.Integer(), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['in_port'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'portinfos', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('datapath_id', sa.String(length=36), nullable=False), sa.Column('port_no', sa.Integer(), nullable=False), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.Column('mac', sa.String(length=32), nullable=False), sa.ForeignKeyConstraint(['id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/nsxv_initial_opts.py0000644000175000017500000001542500000000000031200 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa appliance_sizes_enum = sa.Enum('compact', 'large', 'xlarge', 'quadlarge', name='nsxv_router_bindings_appliance_size') edge_types_enum = sa.Enum('service', 'vdr', name='nsxv_router_bindings_edge_type') internal_network_purpose_enum = sa.Enum('inter_edge_net', name='nsxv_internal_networks_purpose') internal_edge_purpose_enum = sa.Enum('inter_edge_net', name='nsxv_internal_edges_purpose') tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup', name='nsxv_tz_network_bindings_binding_type') router_types_enum = sa.Enum('shared', 'exclusive', name='nsxv_router_type') def upgrade(): op.create_table( 'nsxv_router_bindings', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=True), sa.Column('lswitch_id', sa.String(length=36), nullable=True), sa.Column('appliance_size', appliance_sizes_enum, nullable=True), sa.Column('edge_type', edge_types_enum, nullable=True), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'nsxv_internal_networks', sa.Column('network_purpose', internal_network_purpose_enum, nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_purpose')) op.create_table( 'nsxv_internal_edges', sa.Column('ext_ip_address', sa.String(length=64), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.Column('purpose', internal_edge_purpose_enum, nullable=True), sa.PrimaryKeyConstraint('ext_ip_address')) op.create_table( 'nsxv_firewall_rule_bindings', sa.Column('rule_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('rule_vse_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('rule_id', 'edge_id')) op.create_table( 'nsxv_edge_dhcp_static_bindings', sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('binding_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('edge_id', 'mac_address')) op.create_table( 'nsxv_edge_vnic_bindings', sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('vnic_index', sa.Integer(), nullable=False), sa.Column('tunnel_index', sa.Integer(), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('edge_id', 'vnic_index', 'tunnel_index')) op.create_table( 'nsxv_spoofguard_policy_network_mappings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('policy_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'nsxv_security_group_section_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('ip_section_id', sa.String(length=100), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'nsxv_tz_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('binding_type', tz_binding_type_enum, nullable=False), sa.Column('phy_uuid', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid', 'vlan_id')) op.create_table( 'nsxv_port_vnic_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=42), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_id')) op.create_table( 'nsxv_port_index_mappings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('device_id', sa.String(length=255), nullable=False), sa.Column('index', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), sa.UniqueConstraint('device_id', 'index')) op.create_table( 'nsxv_rule_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_rule_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_rule_id')) op.create_table( 'nsxv_router_ext_attributes', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('distributed', sa.Boolean(), nullable=False), sa.Column('router_type', router_types_enum, default='exclusive', nullable=False), sa.Column('service_router', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/nuage_init_opts.py0000644000175000017500000000667000000000000030615 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for Nuage plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nuage_net_partitions', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=64), nullable=True), sa.Column('l3dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('l2dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('isolated_zone', sa.String(length=64), nullable=True), sa.Column('shared_zone', sa.String(length=64), nullable=True), sa.PrimaryKeyConstraint('id'), ) op.create_table( 'nuage_subnet_l2dom_mapping', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('net_partition_id', sa.String(length=36), nullable=True), sa.Column('nuage_subnet_id', sa.String(length=36), nullable=True, unique=True), sa.Column('nuage_l2dom_tmplt_id', sa.String(length=36), nullable=True), sa.Column('nuage_user_id', sa.String(length=36), nullable=True), sa.Column('nuage_group_id', sa.String(length=36), nullable=True), sa.Column('nuage_managed_subnet', sa.Boolean(), nullable=True), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['net_partition_id'], ['nuage_net_partitions.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id'), ) op.create_table( 'nuage_net_partition_router_mapping', sa.Column('net_partition_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('nuage_router_id', sa.String(length=36), nullable=True, unique=True), sa.Column('nuage_rtr_rd', sa.String(length=36), nullable=True), sa.Column('nuage_rtr_rt', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['net_partition_id'], ['nuage_net_partitions.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('net_partition_id', 'router_id'), ) op.create_table( 'nuage_provider_net_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint( ['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py0000644000175000017500000001043500000000000032724 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for extensions: # allowedaddresspairs # extradhcpopts # portbindings # quotas # routedserviceinsertion # servicetype from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'providerresourceassociations', sa.Column('provider_name', sa.String(length=255), nullable=False), sa.Column('resource_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('provider_name', 'resource_id'), sa.UniqueConstraint('resource_id')) op.create_table( 'quotas', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('resource', sa.String(length=255), nullable=True), sa.Column('limit', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'allowedaddresspairs', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('mac_address', sa.String(length=32), nullable=False), sa.Column('ip_address', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'mac_address', 'ip_address')) op.create_table( 'portbindingports', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table( 'extradhcpopts', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('opt_name', sa.String(length=64), nullable=False), sa.Column('opt_value', sa.String(length=255), nullable=False), sa.Column('ip_version', sa.Integer(), server_default='4', nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'port_id', 'opt_name', 'ip_version', name='uniq_extradhcpopts0portid0optname0ipversion')) op.create_table('subnetpools', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('default_prefixlen', sa.Integer(), nullable=False), sa.Column('min_prefixlen', sa.Integer(), nullable=False), sa.Column('max_prefixlen', sa.Integer(), nullable=False), sa.Column('shared', sa.Boolean(), nullable=False), sa.Column('default_quota', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table('subnetpoolprefixes', sa.Column('cidr', sa.String(length=64), nullable=False), sa.Column('subnetpool_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnetpool_id'], ['subnetpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('cidr', 'subnetpool_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/other_plugins_init_ops.py0000644000175000017500000000473200000000000032211 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for plugins: # bigswitch # metaplugin from alembic import op import sqlalchemy as sa def upgrade(): # metaplugin op.create_table( 'networkflavors', sa.Column('flavor', sa.String(length=255), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'routerflavors', sa.Column('flavor', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id')) # big switch op.create_table( 'routerrules', sa.Column('id', sa.Integer(), nullable=False), sa.Column('source', sa.String(length=64), nullable=False), sa.Column('destination', sa.String(length=64), nullable=False), sa.Column('action', sa.String(length=10), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'nexthops', sa.Column('rule_id', sa.Integer(), nullable=False), sa.Column('nexthop', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['rule_id'], ['routerrules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('rule_id', 'nexthop')) op.create_table( 'consistencyhashes', sa.Column('hash_id', sa.String(255), primary_key=True), sa.Column('hash', sa.String(255), nullable=False) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/ovs_init_ops.py0000644000175000017500000000414200000000000030131 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the OVS plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'ovs_tunnel_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('id', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('ip_address'), sa.UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id')) op.create_table( 'ovs_tunnel_allocations', sa.Column('tunnel_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('tunnel_id')) op.create_table( 'ovs_vlan_allocations', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'vlan_id')) op.create_table( 'ovs_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/portsec_init_ops.py0000644000175000017500000000265400000000000031007 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the port security extension from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'networksecuritybindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('port_security_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'portsecuritybindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('port_security_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/script.py.mako0000644000175000017500000000202700000000000027650 0ustar00coreycorey00000000000000# Copyright ${create_date.year} OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa ${imports if imports else ""} """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} % endif def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/secgroup_init_ops.py0000644000175000017500000000612700000000000031156 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for security group extension from alembic import op import sqlalchemy as sa rule_direction_enum = sa.Enum('ingress', 'egress', name='securitygrouprules_direction') def upgrade(): op.create_table( 'securitygroups', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'securitygrouprules', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.Column('remote_group_id', sa.String(length=36), nullable=True), sa.Column('direction', rule_direction_enum, nullable=True), sa.Column('ethertype', sa.String(length=40), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('port_range_min', sa.Integer(), nullable=True), sa.Column('port_range_max', sa.Integer(), nullable=True), sa.Column('remote_ip_prefix', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['remote_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'securitygroupportbindings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id']), sa.PrimaryKeyConstraint('port_id', 'security_group_id')) op.create_table( 'default_security_group', sa.Column('tenant_id', sa.String(length=255), nullable=False), sa.Column('security_group_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tenant_id'), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete="CASCADE")) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2670443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/0000755000175000017500000000000000000000000026713 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000644000175000017500000000001500000000000030630 0ustar00coreycorey000000000000005c85685d616d ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD0000644000175000017500000000001500000000000030372 0ustar00coreycorey00000000000000e88badaa9591 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/README0000644000175000017500000000024300000000000027572 0ustar00coreycorey00000000000000This directory contains the migration scripts for the Neutron project. Please see the README in neutron/db/migration on how to use and generate new migrations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/kilo_initial.py0000644000175000017500000000554500000000000031745 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import migration from neutron.db.migration.alembic_migrations import agent_init_ops from neutron.db.migration.alembic_migrations import brocade_init_ops from neutron.db.migration.alembic_migrations import cisco_init_ops from neutron.db.migration.alembic_migrations import core_init_ops from neutron.db.migration.alembic_migrations import dvr_init_opts from neutron.db.migration.alembic_migrations import firewall_init_ops from neutron.db.migration.alembic_migrations import l3_init_ops from neutron.db.migration.alembic_migrations import lb_init_ops from neutron.db.migration.alembic_migrations import loadbalancer_init_ops from neutron.db.migration.alembic_migrations import metering_init_ops from neutron.db.migration.alembic_migrations import ml2_init_ops from neutron.db.migration.alembic_migrations import nec_init_ops from neutron.db.migration.alembic_migrations import nsxv_initial_opts from neutron.db.migration.alembic_migrations import nuage_init_opts from neutron.db.migration.alembic_migrations import other_extensions_init_ops from neutron.db.migration.alembic_migrations import other_plugins_init_ops from neutron.db.migration.alembic_migrations import ovs_init_ops from neutron.db.migration.alembic_migrations import portsec_init_ops from neutron.db.migration.alembic_migrations import secgroup_init_ops from neutron.db.migration.alembic_migrations import vmware_init_ops from neutron.db.migration.alembic_migrations import vpn_init_ops """kilo_initial Revision ID: kilo Revises: None """ # revision identifiers, used by Alembic. revision = 'kilo' down_revision = None def upgrade(): migration.pk_on_alembic_version_table() agent_init_ops.upgrade() core_init_ops.upgrade() l3_init_ops.upgrade() secgroup_init_ops.upgrade() portsec_init_ops.upgrade() other_extensions_init_ops.upgrade() lb_init_ops.upgrade() ovs_init_ops.upgrade() ml2_init_ops.upgrade() dvr_init_opts.upgrade() firewall_init_ops.upgrade() loadbalancer_init_ops.upgrade() vpn_init_ops.upgrade() metering_init_ops.upgrade() brocade_init_ops.upgrade() cisco_init_ops.upgrade() nec_init_ops.upgrade() other_plugins_init_ops.upgrade() vmware_init_ops.upgrade() nuage_init_opts.upgrade() nsxv_initial_opts.upgrade() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9670417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/0000755000175000017500000000000000000000000030365 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2670443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/0000755000175000017500000000000000000000000032202 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcf0000644000175000017500000000361100000000000033263 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add geneve ml2 type driver Revision ID: 11926bcfe72d Revises: 2e5352a0ad4d Create Date: 2015-08-27 19:56:16.356522 """ # revision identifiers, used by Alembic. revision = '11926bcfe72d' down_revision = '2e5352a0ad4d' def contract_creation_exceptions(): """These elements were created by mistake in the contract branch.""" return { sa.Table: ['ml2_geneve_allocations', 'ml2_geneve_endpoints'], sa.Index: ['ml2_geneve_allocations'] } def upgrade(): op.create_table( 'ml2_geneve_allocations', sa.Column('geneve_vni', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), server_default=sa.sql.false(), nullable=False), sa.PrimaryKeyConstraint('geneve_vni'), ) op.create_index(op.f('ix_ml2_geneve_allocations_allocated'), 'ml2_geneve_allocations', ['allocated'], unique=False) op.create_table( 'ml2_geneve_endpoints', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('host', sa.String(length=255), nullable=True), sa.PrimaryKeyConstraint('ip_address'), sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'), ) ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a1608350000644000175000017500000000165600000000000033126 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Metaplugin removal Revision ID: 2a16083502f3 Revises: 5498d17be016 Create Date: 2015-06-16 09:11:10.488566 """ # revision identifiers, used by Alembic. revision = '2a16083502f3' down_revision = '5498d17be016' def upgrade(): op.drop_table('networkflavors') op.drop_table('routerflavors') ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a00000644000175000017500000000244400000000000033177 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration """Add missing foreign keys Revision ID: 2e5352a0ad4d Revises: 2a16083502f3 Create Date: 2015-08-20 12:43:09.110427 """ # revision identifiers, used by Alembic. revision = '2e5352a0ad4d' down_revision = '2a16083502f3' TABLE_NAME = 'flavorserviceprofilebindings' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraints = inspector.get_foreign_keys(TABLE_NAME) for fk in fk_constraints: fk['options']['ondelete'] = 'CASCADE' migration.remove_foreign_keys(TABLE_NAME, fk_constraints) migration.create_foreign_keys(TABLE_NAME, fk_constraints) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/300180840000644000175000017500000000157700000000000033046 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db.migration import cli """Initial no-op Liberty contract rule. Revision ID: 30018084ec99 Revises: None Create Date: 2015-06-22 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = '30018084ec99' down_revision = 'kilo' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca40000644000175000017500000000307400000000000033335 0ustar00coreycorey00000000000000# Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron.db import migration """Drop cisco monolithic tables Revision ID: 4af11ca47297 Revises: 11926bcfe72d Create Date: 2015-08-13 08:01:19.709839 """ # revision identifiers, used by Alembic. revision = '4af11ca47297' down_revision = '11926bcfe72d' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.drop_table('cisco_n1kv_port_bindings') op.drop_table('cisco_n1kv_network_bindings') op.drop_table('cisco_n1kv_multi_segments') op.drop_table('cisco_provider_networks') op.drop_table('cisco_n1kv_trunk_segments') op.drop_table('cisco_n1kv_vmnetworks') op.drop_table('cisco_n1kv_profile_bindings') op.drop_table('cisco_qos_policies') op.drop_table('cisco_credentials') op.drop_table('cisco_n1kv_vlan_allocations') op.drop_table('cisco_n1kv_vxlan_allocations') op.drop_table('cisco_network_profiles') op.drop_table('cisco_policy_profiles') ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebf0000644000175000017500000000467400000000000033604 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa """network_rbac Revision ID: 4ffceebfada Revises: 30018084ec99 Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '4ffceebfada' down_revision = '30018084ec99' depends_on = ('8675309a5c4f',) # A simple model of the networks table with only the fields needed for # the migration. network = sa.Table('networks', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False)) networkrbacs = sa.Table( 'networkrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False)) def upgrade(): op.bulk_insert(networkrbacs, get_values()) op.drop_column('networks', 'shared') # the shared column on subnets was just an internal representation of the # shared status of the network it was related to. This is now handled by # other logic so we just drop it. op.drop_column('subnets', 'shared') def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(network).filter(network.c.shared).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': row[1], 'target_tenant': '*', 'action': 'access_as_shared'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17b0000644000175000017500000000215300000000000033215 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Drop legacy OVS and LB plugin tables Revision ID: 5498d17be016 Revises: 4ffceebfada Create Date: 2015-06-25 14:08:30.984419 """ # revision identifiers, used by Alembic. revision = '5498d17be016' down_revision = '4ffceebfada' def upgrade(): op.drop_table('ovs_network_bindings') op.drop_table('ovs_vlan_allocations') op.drop_table('network_bindings') op.drop_table('ovs_tunnel_allocations') op.drop_table('network_states') op.drop_table('ovs_tunnel_endpoints') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2710443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/0000755000175000017500000000000000000000000031644 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000025500000000000011457 xustar0000000000000000151 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f0000644000175000017500000000211600000000000033146 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """address scope support in subnetpool Revision ID: 1b4c6e320f79 Revises: 1c844d1677f7 Create Date: 2015-07-03 09:48:39.491058 """ # revision identifiers, used by Alembic. revision = '1b4c6e320f79' down_revision = '1c844d1677f7' def upgrade(): op.add_column('subnetpools', sa.Column('address_scope_id', sa.String(length=36), nullable=True)) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d16770000644000175000017500000000204000000000000033017 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add order to dnsnameservers Revision ID: 1c844d1677f7 Revises: 26c371498592 Create Date: 2015-07-21 22:59:03.383850 """ # revision identifiers, used by Alembic. revision = '1c844d1677f7' down_revision = '26c371498592' def upgrade(): op.add_column('dnsnameservers', sa.Column('order', sa.Integer(), server_default='0', nullable=False)) ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c37149850000644000175000017500000000176600000000000032760 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """subnetpool hash Revision ID: 26c371498592 Revises: 45f955889773 Create Date: 2015-06-02 21:18:19.942076 """ # revision identifiers, used by Alembic. revision = '26c371498592' down_revision = '45f955889773' def upgrade(): op.add_column( 'subnetpools', sa.Column('hash', sa.String(36), nullable=False, server_default='')) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ff0000644000175000017500000000412100000000000033151 0ustar00coreycorey00000000000000# Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Flavor framework Revision ID: 313373c0ffee Revises: 52c5312f6baf Create Date: 2014-07-17 03:00:00.00 """ # revision identifiers, used by Alembic. revision = '313373c0ffee' down_revision = '52c5312f6baf' def upgrade(): op.create_table( 'flavors', sa.Column('id', sa.String(36)), sa.Column('name', sa.String(255)), sa.Column('description', sa.String(1024)), sa.Column('enabled', sa.Boolean, nullable=False, server_default=sa.sql.true()), sa.Column('service_type', sa.String(36), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'serviceprofiles', sa.Column('id', sa.String(36)), sa.Column('description', sa.String(1024)), sa.Column('driver', sa.String(1024), nullable=False), sa.Column('enabled', sa.Boolean, nullable=False, server_default=sa.sql.true()), sa.Column('metainfo', sa.String(4096)), sa.PrimaryKeyConstraint('id') ) op.create_table( 'flavorserviceprofilebindings', sa.Column('service_profile_id', sa.String(36), nullable=False), sa.Column('flavor_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['service_profile_id'], ['serviceprofiles.id']), sa.ForeignKeyConstraint(['flavor_id'], ['flavors.id']), sa.PrimaryKeyConstraint('service_profile_id', 'flavor_id') ) ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a0000644000175000017500000000231600000000000033230 0ustar00coreycorey00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants import sqlalchemy as sa from neutron.db import migration """Add dns_name to Port Revision ID: 34af2b5c5a59 Revises: 9859ac9c136 Create Date: 2015-08-23 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '34af2b5c5a59' down_revision = '9859ac9c136' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.add_column('ports', sa.Column('dns_name', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=True)) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e320000644000175000017500000000277100000000000033110 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db.migration import cli """nsxv_vdr_metadata.py Revision ID: 354db87e3225 Revises: kilo Create Date: 2015-04-19 14:59:15.102609 """ # revision identifiers, used by Alembic. revision = '354db87e3225' down_revision = 'kilo' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): op.create_table( 'nsxv_vdr_dhcp_bindings', sa.Column('vdr_router_id', sa.String(length=36), nullable=False), sa.Column('dhcp_router_id', sa.String(length=36), nullable=False), sa.Column('dhcp_edge_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('vdr_router_id'), sa.UniqueConstraint( 'dhcp_router_id', name='unique_nsxv_vdr_dhcp_bindings0dhcp_router_id'), sa.UniqueConstraint( 'dhcp_edge_id', name='unique_nsxv_vdr_dhcp_bindings0dhcp_edge_id')) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f95588970000644000175000017500000000273000000000000032772 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from sqlalchemy import sql """quota_usage Revision ID: 45f955889773 Revises: 8675309a5c4f Create Date: 2015-04-17 08:09:37.611546 """ # revision identifiers, used by Alembic. revision = '45f955889773' down_revision = '8675309a5c4f' def upgrade(): op.create_table( 'quotausages', sa.Column('tenant_id', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('resource', sa.String(length=255), nullable=False, primary_key=True, index=True), sa.Column('dirty', sa.Boolean(), nullable=False, server_default=sql.false()), sa.Column('in_use', sa.Integer(), nullable=False, server_default='0'), sa.Column('reserved', sa.Integer(), nullable=False, server_default='0')) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f00000644000175000017500000000461000000000000033074 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """qos db changes Revision ID: 48153cb5f051 Revises: 1b4c6e320f79 Create Date: 2015-06-24 17:03:34.965101 """ # revision identifiers, used by Alembic. revision = '48153cb5f051' down_revision = '1b4c6e320f79' def upgrade(): op.create_table( 'qos_policies', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False), sa.Column('tenant_id', sa.String(length=255), index=True)) op.create_table( 'qos_network_policy_bindings', sa.Column('policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('network_id', sa.String(length=36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, unique=True)) op.create_table( 'qos_port_policy_bindings', sa.Column('policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('port_id', sa.String(length=36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True)) op.create_table( 'qos_bandwidth_limit_rules', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('qos_policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, unique=True), sa.Column('max_kbps', sa.Integer()), sa.Column('max_burst_kbps', sa.Integer())) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6b0000644000175000017500000000226600000000000033077 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Initial operations in support of address scopes """ # revision identifiers, used by Alembic. revision = '52c5312f6baf' down_revision = '599c6a226151' def upgrade(): op.create_table( 'address_scopes', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('shared', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id')) ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a22610000644000175000017500000000551100000000000033024 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """neutrodb_ipam Revision ID: 599c6a226151 Revises: 354db87e3225 Create Date: 2015-03-08 18:12:08.962378 """ # revision identifiers, used by Alembic. revision = '599c6a226151' down_revision = '354db87e3225' def upgrade(): op.create_table( 'ipamsubnets', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('neutron_subnet_id', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipamallocations', sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('status', sa.String(length=36), nullable=True), sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['ipam_subnet_id'], ['ipamsubnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('ip_address', 'ipam_subnet_id')) op.create_table( 'ipamallocationpools', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['ipam_subnet_id'], ['ipamsubnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipamavailabilityranges', sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['allocation_pool_id'], ['ipamallocationpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'), sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id', 'first_ip', 'allocation_pool_id'), sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id', 'last_ip', 'allocation_pool_id')) ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c0000644000175000017500000000316100000000000033026 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """network_rbac Revision ID: 8675309a5c4f Revises: 313373c0ffee Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '8675309a5c4f' down_revision = '313373c0ffee' def upgrade(): op.create_table( 'networkrbacs', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint( 'action', 'object_id', 'target_tenant', name='uniq_networkrbacs0tenant_target0object_id0action')) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c130000644000175000017500000000311500000000000033111 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """quota_reservations Revision ID: 9859ac9c136 Revises: 48153cb5f051 Create Date: 2015-03-11 06:40:56.775075 """ # revision identifiers, used by Alembic. revision = '9859ac9c136' down_revision = '48153cb5f051' def upgrade(): op.create_table( 'reservations', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('expiration', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'resourcedeltas', sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('reservation_id', sa.String(length=36), nullable=False), sa.Column('amount', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('resource', 'reservation_id')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9670417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/0000755000175000017500000000000000000000000030161 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2710443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/0000755000175000017500000000000000000000000031776 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b29409320000644000175000017500000000155500000000000033006 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Drop embrane plugin table Revision ID: 1b294093239c Revises: 4af11ca47297 Create Date: 2015-10-09 14:07:59.968597 """ # revision identifiers, used by Alembic. revision = '1b294093239c' down_revision = '4af11ca47297' def upgrade(): op.drop_table('embrane_pool_port') ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_sheduling_refactoring.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d0000644000175000017500000000534200000000000033144 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """DVR sheduling refactoring Revision ID: 2b4c2465d44b Revises: 8a6d8bdae39 Create Date: 2015-12-23 07:39:49.062767 """ # revision identifiers, used by Alembic. revision = '2b4c2465d44b' down_revision = '8a6d8bdae39' ROUTER_ATTR_TABLE = 'router_extra_attributes' ROUTER_BINDING_TABLE = 'routerl3agentbindings' CSNAT_BINDING_TABLE = 'csnat_l3_agent_bindings' def upgrade(): transfer_snat_bindings() op.drop_table(CSNAT_BINDING_TABLE) def transfer_snat_bindings(): router_attr_table = sa.Table(ROUTER_ATTR_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('distributed', sa.Boolean),) csnat_binding = sa.Table(CSNAT_BINDING_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) router_binding = sa.Table(ROUTER_BINDING_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): # first delete all bindings for dvr routers from # routerl3agentbindings as this might be bindings with l3 agents # on compute nodes for router_attr in session.query( router_attr_table).filter(router_attr_table.c.distributed): session.execute(router_binding.delete( router_binding.c.router_id == router_attr.router_id)) # now routerl3agentbindings will only contain bindings for snat # portion of the router for csnat_binding in session.query(csnat_binding): session.execute( router_binding.insert().values( router_id=csnat_binding.router_id, l3_agent_id=csnat_binding.l3_agent_id)) # this commit is necessary to allow further operations session.commit() ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_desc.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfc0000644000175000017500000000422500000000000033533 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """standard_desc Revision ID: 4ffceebfcdc Revises: 5ffceebfada Create Date: 2016-02-10 23:12:04.012457 """ # revision identifiers, used by Alembic. revision = '4ffceebfcdc' down_revision = '5ffceebfada' depends_on = ('0e66c5227a8a',) neutron_milestone = [migration.MITAKA] # A simple model of the security groups table with only the fields needed for # the migration. securitygroups = sa.Table('securitygroups', sa.MetaData(), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.Column('description', sa.String(length=255))) standardattr = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('description', sa.String(length=255))) def upgrade(): migrate_values() op.drop_column('securitygroups', 'description') def migrate_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(securitygroups): values.append({'id': row[0], 'description': row[1]}) with session.begin(subtransactions=True): for value in values: session.execute( standardattr.update().values( description=value['description']).where( standardattr.c.id == value['id'])) # this commit appears to be necessary to allow further operations session.commit() ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_network_external.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfa0000644000175000017500000000461000000000000033530 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa """network_rbac_external Revision ID: 5ffceebfada Revises: c6c112992c9 Create Date: 2015-06-14 13:12:04.012457 """ # revision identifiers, used by Alembic. revision = '5ffceebfada' down_revision = 'c6c112992c9' depends_on = () # A simple model of the external network table with only the fields needed for # the migration. external = sa.Table('externalnetworks', sa.MetaData(), sa.Column('network_id', sa.String(length=36), nullable=False)) network = sa.Table('networks', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255))) networkrbacs = sa.Table( 'networkrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False)) def upgrade(): op.bulk_insert(networkrbacs, get_values()) def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] net_to_tenant_id = {} for row in session.query(network).all(): net_to_tenant_id[row[0]] = row[1] for row in session.query(external).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': net_to_tenant_id[row[0]], 'target_tenant': '*', 'action': 'access_as_external'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae0000644000175000017500000000643100000000000033373 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """standardattributes migration Revision ID: 8a6d8bdae39 Revises: 1b294093239c Create Date: 2015-09-10 03:12:04.012457 """ # revision identifiers, used by Alembic. revision = '8a6d8bdae39' down_revision = '1b294093239c' depends_on = ('32e5974ada25',) # basic model of the tables with required field for migration TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups', 'floatingips', 'routers', 'securitygrouprules') TABLE_MODELS = [ (table, sa.Table(table, sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('standard_attr_id', sa.BigInteger(), nullable=True))) for table in TABLES ] standardattrs = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False)) def upgrade(): generate_records_for_existing() for table, model in TABLE_MODELS: # add constraint(s) now that everything is populated on that table. # note that some MariaDB versions will *not* allow the ALTER to # NOT NULL on a column that has an FK constraint, so we set NOT NULL # first, then the FK constraint. op.alter_column(table, 'standard_attr_id', nullable=False, existing_type=sa.BigInteger(), existing_nullable=True, existing_server_default=False) op.create_foreign_key( constraint_name=None, source_table=table, referent_table='standardattributes', local_cols=['standard_attr_id'], remote_cols=['id'], ondelete='CASCADE') op.create_unique_constraint( constraint_name='uniq_%s0standard_attr_id' % table, table_name=table, columns=['standard_attr_id']) def generate_records_for_existing(): session = sa.orm.Session(bind=op.get_bind()) values = [] with session.begin(subtransactions=True): for table, model in TABLE_MODELS: for row in session.query(model): # NOTE(kevinbenton): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter res = session.execute( standardattrs.insert().values(resource_type=table)) session.execute( model.update().values( standard_attr_id=res.inserted_primary_key[0]).where( model.c.id == row[0])) # this commit is necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c1129920000644000175000017500000000437400000000000033074 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from neutron.db import rbac_db_models """rbac_qos_policy Revision ID: c6c112992c9 Revises: 8a6d8bdae39 Create Date: 2015-11-25 18:45:03.831359 """ # revision identifiers, used by Alembic. revision = 'c6c112992c9' down_revision = 'e3278ee65050' depends_on = ('15e43b934f81',) qos_rbacs = sa.Table( 'qospolicyrbacs', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False)) # A simple model of the qos_policies table with only the fields needed for # the migration. qos_policy = sa.Table('qos_policies', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False)) def upgrade(): op.bulk_insert(qos_rbacs, get_values()) op.drop_column('qos_policies', 'shared') def get_values(): session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(qos_policy).filter(qos_policy.c.shared).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'tenant_id': row[1], 'target_tenant': '*', 'action': rbac_db_models.ACCESS_SHARED}) session.commit() return values ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec_plugin_tables.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee650000644000175000017500000000222400000000000033156 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Drop NEC plugin tables Revision ID: e3278ee65050 Revises: 2b4c2465d44b Create Date: 2016-02-15 18:50:56.870043 """ # revision identifiers, used by Alembic. revision = 'e3278ee65050' down_revision = '2b4c2465d44b' def upgrade(): op.drop_table('ofcnetworkmappings') op.drop_table('ofcportmappings') op.drop_table('ofcroutermappings') op.drop_table('ofcfiltermappings') op.drop_table('ofctenantmappings') op.drop_table('portinfos') op.drop_table('routerproviders') op.drop_table('packetfilters') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2750444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0000755000175000017500000000000000000000000031440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_to_standard_attr.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a80000644000175000017500000000212000000000000032752 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """Add desc to standard attr table Revision ID: 0e66c5227a8a Revises: 3894bccad37f Create Date: 2016-02-02 10:50:34.238563 """ # revision identifiers, used by Alembic. revision = '0e66c5227a8a' down_revision = '3894bccad37f' neutron_milestone = [migration.MITAKA] def upgrade(): op.add_column('standardattributes', sa.Column('description', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f8810000644000175000017500000000216500000000000033055 0ustar00coreycorey00000000000000# Copyright 2015 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from sqlalchemy import sql """add is_default to subnetpool Revision ID: 13cfb89f881a Revises: 59cb5b6cf4d Create Date: 2015-09-30 15:58:31.170153 """ # revision identifiers, used by Alembic. revision = '13cfb89f881a' down_revision = '59cb5b6cf4d' def upgrade(): op.add_column('subnetpools', sa.Column('is_default', sa.Boolean(), server_default=sql.false(), nullable=False)) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_model_data.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214820000644000175000017500000000730500000000000032677 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add dynamic routing model data Revision ID: 15be73214821 Create Date: 2015-07-29 13:16:08.604175 """ # revision identifiers, used by Alembic. revision = '15be73214821' down_revision = '19f26505c74f' def upgrade(): op.create_table( 'bgp_speakers', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('local_as', sa.Integer, nullable=False, autoincrement=False), sa.Column('ip_version', sa.Integer, nullable=False, autoincrement=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('advertise_floating_ip_host_routes', sa.Boolean(), nullable=False), sa.Column('advertise_tenant_networks', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table( 'bgp_peers', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('auth_type', sa.String(length=16), nullable=False), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('peer_ip', sa.String(length=64), nullable=False), sa.Column('remote_as', sa.Integer, nullable=False, autoincrement=False), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.PrimaryKeyConstraint('id') ) op.create_table( 'bgp_speaker_network_bindings', sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('ip_version', sa.Integer, nullable=False, autoincrement=False), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'bgp_speaker_id', 'ip_version') ) op.create_table( 'bgp_speaker_peer_bindings', sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.Column('bgp_peer_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['bgp_peer_id'], ['bgp_peers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('bgp_speaker_id', 'bgp_peer_id') ) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f80000644000175000017500000000366200000000000032773 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """rbac_qos_policy Revision ID: 15e43b934f81 Revises: 1df244e556f5 Create Date: 2015-11-25 18:45:03.819115 """ # revision identifiers, used by Alembic. revision = '15e43b934f81' down_revision = 'b4caf27aae4' def upgrade(): op.create_table('qospolicyrbacs', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['qos_policies.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('target_tenant', 'object_id', 'action')) op.create_index(op.f('ix_qospolicyrbacs_tenant_id'), 'qospolicyrbacs', ['tenant_id'], unique=False) ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_allocated_topology.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c740000644000175000017500000000320500000000000032702 0ustar00coreycorey00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from sqlalchemy import sql """ Auto Allocated Topology - aka Get-Me-A-Network Revision ID: 19f26505c74f Revises: 1df244e556f5 Create Date: 2015-11-20 11:27:53.419742 """ # revision identifiers, used by Alembic. revision = '19f26505c74f' down_revision = '1df244e556f5' def upgrade(): op.create_table( 'auto_allocated_topologies', sa.Column('tenant_id', sa.String(length=255), primary_key=True), sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='SET NULL'), ) op.add_column('externalnetworks', sa.Column('is_default', sa.Boolean(), nullable=False, server_default=sql.false())) ././@PaxHeader0000000000000000000000000000026100000000000011454 xustar0000000000000000155 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique_ha_router_agent_port_bindings.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f0000644000175000017500000000462300000000000033047 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import exceptions import sqlalchemy as sa from neutron._i18n import _ """add_unique_ha_router_agent_port_bindings Revision ID: 1df244e556f5 Revises: 34af2b5c5a59 Create Date: 2015-10-02 18:06:01.696742 """ # revision identifiers, used by Alembic. revision = '1df244e556f5' down_revision = '659bf3d90664' UNIQUE_NAME = 'uniq_ha_router_agent_port_bindings0port_id0l3_agent_id' TABLE_NAME = 'ha_router_agent_port_bindings' ha_router_agent_port_bindings = sa.Table( 'ha_router_agent_port_bindings', sa.MetaData(), sa.Column('port_id', sa.String(36)), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36))) class DuplicateL3HARouterAgentPortBinding(exceptions.Conflict): message = _("Duplicate L3HARouterAgentPortBinding is created for " "router(s) %(router)s. Database cannot be upgraded. Please, " "remove all duplicates before upgrading the database.") def upgrade(): op.create_unique_constraint(UNIQUE_NAME, TABLE_NAME, ['router_id', 'l3_agent_id']) def check_sanity(connection): res = get_duplicate_l3_ha_port_bindings(connection) if res: raise DuplicateL3HARouterAgentPortBinding(router=", ".join(res)) def get_duplicate_l3_ha_port_bindings(connection): insp = sa.engine.reflection.Inspector.from_engine(connection) if 'ha_router_agent_port_bindings' not in insp.get_table_names(): return {} session = sa.orm.Session(bind=connection.connect()) query = (session.query(ha_router_agent_port_bindings.c.router_id) .group_by(ha_router_agent_port_bindings.c.router_id, ha_router_agent_port_bindings.c.l3_agent_id) .having(sa.func.count() > 1)).all() return [q[0] for q in query] ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_support.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7530000644000175000017500000000223200000000000032777 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """tag support Revision ID: 2f9e956e7532 Revises: 31ed664953e6 Create Date: 2016-01-21 08:11:49.604182 """ # revision identifiers, used by Alembic. revision = '2f9e956e7532' down_revision = '31ed664953e6' def upgrade(): op.create_table( 'tags', sa.Column('standard_attr_id', sa.BigInteger(), sa.ForeignKey('standardattributes.id', ondelete='CASCADE'), nullable=False, primary_key=True), sa.Column('tag', sa.String(length=60), nullable=False, primary_key=True) ) ././@PaxHeader0000000000000000000000000000026100000000000011454 xustar0000000000000000155 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resource_versions_row_to_agent_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e0000644000175000017500000000177200000000000032774 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add resource_versions row to agent table Revision ID: 31ed664953e6 Revises: c3a73f615e4 Create Date: 2016-01-15 13:41:30.016915 """ # revision identifiers, used by Alembic. revision = '31ed664953e6' down_revision = '15e43b934f81' def upgrade(): op.add_column('agents', sa.Column('resource_versions', sa.String(length=8191))) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada20000644000175000017500000000255600000000000033045 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add standard attribute table Revision ID: 32e5974ada25 Revises: 13cfb89f881a Create Date: 2015-09-10 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '32e5974ada25' down_revision = '13cfb89f881a' TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups', 'floatingips', 'routers', 'securitygrouprules') def upgrade(): op.create_table( 'standardattributes', sa.Column('id', sa.BigInteger(), autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id') ) for table in TABLES: op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad370000644000175000017500000000210500000000000033117 0ustar00coreycorey00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add_timestamp_to_base_resources Revision ID: 3894bccad37f Revises: 2f9e956e7532 Create Date: 2016-03-01 04:19:58.852612 """ # revision identifiers, used by Alembic. revision = '3894bccad37f' down_revision = '2f9e956e7532' def upgrade(): for column_name in ['created_at', 'updated_at']: op.add_column( 'standardattributes', sa.Column(column_name, sa.DateTime(), nullable=True) ) ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d0000644000175000017500000000167700000000000033216 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add availability zone Revision ID: 59cb5b6cf4d Revises: 34af2b5c5a59 Create Date: 2015-01-20 14:38:47.156574 """ # revision identifiers, used by Alembic. revision = '59cb5b6cf4d' down_revision = '34af2b5c5a59' def upgrade(): op.add_column('agents', sa.Column('availability_zone', sa.String(length=255))) ././@PaxHeader0000000000000000000000000000027300000000000011457 xustar0000000000000000165 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attributes_to_support_external_dns_integration.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90660000644000175000017500000000745600000000000033006 0ustar00coreycorey00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants import sqlalchemy as sa """Add tables and attributes to support external DNS integration Revision ID: 659bf3d90664 Revises: c3a73f615e4 Create Date: 2015-09-11 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '659bf3d90664' down_revision = 'c3a73f615e4' def upgrade(): op.create_table('networkdnsdomains', sa.Column('network_id', sa.String(length=36), nullable=False, index=True), sa.Column('dns_domain', sa.String( length=constants.FQDN_FIELD_SIZE), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table('floatingipdnses', sa.Column('floatingip_id', sa.String(length=36), nullable=False, index=True), sa.Column('dns_name', sa.String( length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('dns_domain', sa.String( length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('published_dns_name', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('published_dns_domain', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.ForeignKeyConstraint(['floatingip_id'], ['floatingips.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('floatingip_id')) op.create_table('portdnses', sa.Column('port_id', sa.String(length=36), nullable=False, index=True), sa.Column('current_dns_name', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('current_dns_domain', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('previous_dns_name', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.Column('previous_dns_domain', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dragent_model_data.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae40000644000175000017500000000264100000000000033251 0ustar00coreycorey00000000000000# Copyright 2016 Huawei Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add_bgp_dragent_model_data Revision ID: b4caf27aae4 Revises: 15be7321482 Create Date: 2015-08-20 17:05:31.038704 """ # revision identifiers, used by Alembic. revision = 'b4caf27aae4' down_revision = '15be73214821' def upgrade(): op.create_table( 'bgp_speaker_dragent_bindings', sa.Column('agent_id', sa.String(length=36), primary_key=True), sa.Column('bgp_speaker_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), ) ././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e40000644000175000017500000000172000000000000033036 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add ip_version to AddressScope Revision ID: c3a73f615e4 Revises: 13cfb89f881a Create Date: 2015-10-08 17:34:32.231256 """ # revision identifiers, used by Alembic. revision = 'c3a73f615e4' down_revision = 'dce3ec7a25c9' def upgrade(): op.add_column('address_scopes', sa.Column('ip_version', sa.Integer(), nullable=False)) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c0000644000175000017500000000173700000000000033263 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add router availability zone Revision ID: dce3ec7a25c9 Revises: ec7fcfbf72ee Create Date: 2015-09-17 09:36:17.468901 """ # revision identifiers, used by Alembic. revision = 'dce3ec7a25c9' down_revision = 'ec7fcfbf72ee' def upgrade(): op.add_column('router_extra_attributes', sa.Column('availability_zone_hints', sa.String(length=255))) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72e0000644000175000017500000000172100000000000033347 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add network availability zone Revision ID: ec7fcfbf72ee Revises: 32e5974ada25 Create Date: 2015-09-17 09:21:51.257579 """ # revision identifiers, used by Alembic. revision = 'ec7fcfbf72ee' down_revision = '32e5974ada25' def upgrade(): op.add_column('networks', sa.Column('availability_zone_hints', sa.String(length=255))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9670417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/0000755000175000017500000000000000000000000030225 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2750444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/0000755000175000017500000000000000000000000032042 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000026400000000000011457 xustar0000000000000000158 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/2e0d7a8a1586_add_binding_index_to_routerl3agentbinding.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/2e0d7a8a10000644000175000017500000000471500000000000033270 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import defaultdict from alembic import op import sqlalchemy as sa """Add binding index to RouterL3AgentBinding Revision ID: 2e0d7a8a1586 Revises: 97c25b0d2353 Create Date: 2016-09-01 14:01:57.263289 """ # revision identifiers, used by Alembic. revision = '2e0d7a8a1586' down_revision = '97c25b0d2353' ROUTER_L3_AGENT_BINDING = 'routerl3agentbindings' def contract_creation_exceptions(): """Add a new binding_index to ensure that no over-creation of the bindings is possible. """ return { sa.Column: ['%s.binding_index' % ROUTER_L3_AGENT_BINDING] } def upgrade(): op.add_column(ROUTER_L3_AGENT_BINDING, sa.Column('binding_index', sa.Integer(), nullable=False, server_default='1')) bindings_table = sa.Table( ROUTER_L3_AGENT_BINDING, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36)), sa.Column('binding_index', sa.Integer, nullable=False, server_default='1'), ) routers_to_bindings = defaultdict(list) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for result in session.query(bindings_table): routers_to_bindings[result.router_id].append(result) for bindings in routers_to_bindings.values(): for index, result in enumerate(bindings): session.execute(bindings_table.update().values( binding_index=index + 1).where( bindings_table.c.router_id == result.router_id).where( bindings_table.c.l3_agent_id == result.l3_agent_id)) session.commit() op.create_unique_constraint( 'uniq_router_l3_agent_binding0router_id0binding_index0', ROUTER_L3_AGENT_BINDING, ['router_id', 'binding_index']) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e0000644000175000017500000001372000000000000033216 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants as const from oslo_utils import uuidutils import sqlalchemy as sa """migrate to pluggable ipam """ # revision identifiers, used by Alembic. revision = '3b935b28e7a0' down_revision = 'a8b517cff8ab' # A simple models for tables with only the fields needed for the migration. neutron_subnet = sa.Table('subnets', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False)) ipam_subnet = sa.Table('ipamsubnets', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('neutron_subnet_id', sa.String(length=36), nullable=True)) ip_allocation_pool = sa.Table('ipallocationpools', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('subnet_id', sa.String(length=36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False)) ipam_allocation_pool = sa.Table('ipamallocationpools', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('ipam_subnet_id', sa.String(length=36), sa.ForeignKey('ipamsubnets.id', ondelete="CASCADE"), nullable=False), sa.Column('first_ip', sa.String(length=64), nullable=False), sa.Column('last_ip', sa.String(length=64), nullable=False)) ip_allocation = sa.Table('ipallocations', sa.MetaData(), sa.Column('ip_address', sa.String(length=64), nullable=False), sa.Column('subnet_id', sa.String(length=36), sa.ForeignKey('subnets.id', ondelete="CASCADE"))) ipam_allocation = sa.Table('ipamallocations', sa.MetaData(), sa.Column('ip_address', sa.String(length=64), nullable=False, primary_key=True), sa.Column('ipam_subnet_id', sa.String(length=36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True), sa.Column('status', sa.String(length=36))) def upgrade(): """Migrate data to pluggable ipam reference driver. Tables 'subnets', 'ipallocationpools' and 'ipallocations' are API exposed and always contain up to date data independently from the ipam driver in use, so they can be used as a reliable source of data. This migration cleans up tables for reference ipam driver and rebuilds them from API exposed tables. So this migration will work correctly for both types of users: - Who used build-in ipam implementation; Their ipam data will be migrated to reference ipam driver tables, and reference ipam driver becomes default driver. - Who switched to reference ipam before Newton; Existent reference ipam driver tables are cleaned up and all ipam data is regenerated from API exposed tables. All existent subnets and ports are still usable after upgrade. """ session = sa.orm.Session(bind=op.get_bind()) # Make sure destination tables are clean session.execute(ipam_subnet.delete()) session.execute(ipam_allocation_pool.delete()) session.execute(ipam_allocation.delete()) map_neutron_id_to_ipam = {} subnet_values = [] for subnet_id, in session.query(neutron_subnet): ipam_id = uuidutils.generate_uuid() map_neutron_id_to_ipam[subnet_id] = ipam_id subnet_values.append(dict( id=ipam_id, neutron_subnet_id=subnet_id)) op.bulk_insert(ipam_subnet, subnet_values) ipam_pool_values = [] pools = session.query(ip_allocation_pool) for pool in pools: new_pool_id = uuidutils.generate_uuid() ipam_pool_values.append(dict( id=new_pool_id, ipam_subnet_id=map_neutron_id_to_ipam[pool.subnet_id], first_ip=pool.first_ip, last_ip=pool.last_ip)) op.bulk_insert(ipam_allocation_pool, ipam_pool_values) ipam_allocation_values = [] for ip_alloc in session.query(ip_allocation): ipam_allocation_values.append(dict( ip_address=ip_alloc.ip_address, status=const.IPAM_ALLOCATION_STATUS_ALLOCATED, ipam_subnet_id=map_neutron_id_to_ipam[ip_alloc.subnet_id])) op.bulk_insert(ipam_allocation, ipam_allocation_values) session.commit() ././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/4bcd4df1f426_rename_ml2_dvr_port_bindings.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/4bcd4df1f0000644000175000017500000000204500000000000033427 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Rename ml2_dvr_port_bindings Revision ID: 4bcd4df1f426 Revises: 8fd3918ef6f4 Create Date: 2016-06-02 14:06:04.112998 """ # revision identifiers, used by Alembic. revision = '4bcd4df1f426' down_revision = '8fd3918ef6f4' OLD_REFERRED_TABLE_NAME = 'ml2_dvr_port_bindings' NEW_REFERRED_TABLE_NAME = 'ml2_distributed_port_bindings' def upgrade(): op.rename_table(OLD_REFERRED_TABLE_NAME, NEW_REFERRED_TABLE_NAME) ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/5c85685d616d_remove_availability_ranges.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/5c85685d60000644000175000017500000000162700000000000033154 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron.db import migration """Remove availability ranges.""" revision = '5c85685d616d' down_revision = '2e0d7a8a1586' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): op.drop_table('ipavailabilityranges') op.drop_table('ipamavailabilityranges') ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/7bbb25278f53_device_owner_ha_replicate_int.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/7bbb252780000644000175000017500000000503000000000000033207 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants import sqlalchemy as sa """device_owner_ha_replicate_int Revision ID: 7bbb25278f53 Revises: 4ffceebfcdc Create Date: 2016-03-22 10:00:43.245503 """ # revision identifiers, used by Alembic. revision = '7bbb25278f53' down_revision = '4ffceebfcdc' ROUTER_ATTR_TABLE = 'router_extra_attributes' ROUTER_PORTS_TABLE = 'routerports' PORTS_TABLE = 'ports' def upgrade(): update_device_owner_ha_replicated_interface() def update_device_owner_ha_replicated_interface(): router_attr_table = sa.Table(ROUTER_ATTR_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('ha', sa.Boolean),) routerports = sa.Table(ROUTER_PORTS_TABLE, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('port_type', sa.String(255))) ports = sa.Table(PORTS_TABLE, sa.MetaData(), sa.Column('device_owner', sa.String(255)), sa.Column('device_id', sa.String(255))) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for router_attr in session.query( router_attr_table).filter(router_attr_table.c.ha): session.execute(routerports.update().values( port_type=constants.DEVICE_OWNER_HA_REPLICATED_INT).where( routerports.c.router_id == router_attr.router_id).where( routerports.c.port_type == constants.DEVICE_OWNER_ROUTER_INTF)) session.execute(ports.update().values( device_owner=constants.DEVICE_OWNER_HA_REPLICATED_INT).where( ports.c.device_id == router_attr.router_id).where( ports.c.device_owner == constants.DEVICE_OWNER_ROUTER_INTF)) session.commit() ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec0000644000175000017500000000762000000000000033453 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """rename tenant to project Revision ID: 7d9d8eeec6ad Create Date: 2016-06-29 19:42:17.862721 """ # revision identifiers, used by Alembic. revision = '7d9d8eeec6ad' down_revision = 'a84ccf28f06a' depends_on = ('5abc0278ca73',) _INSPECTOR = None def get_inspector(): """Reuse inspector""" global _INSPECTOR if _INSPECTOR: return _INSPECTOR else: bind = op.get_bind() _INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind) return _INSPECTOR def get_tables(): """Returns hardcoded list of tables which have ``tenant_id`` column. DB head can be changed. To prevent possible problems, when models will be updated, return hardcoded list of tables, up-to-date for this day. Output retrieved by using: >>> metadata = head.get_metadata() >>> all_tables = metadata.sorted_tables >>> tenant_tables = [] >>> for table in all_tables: ... for column in table.columns: ... if column.name == 'tenant_id': ... tenant_tables.append((table, column)) """ tables = [ 'address_scopes', 'floatingips', 'meteringlabels', 'networkrbacs', 'networks', 'ports', 'qos_policies', 'qospolicyrbacs', 'quotas', 'reservations', 'routers', 'securitygrouprules', 'securitygroups', 'subnetpools', 'subnets', 'trunks', 'auto_allocated_topologies', 'default_security_group', 'ha_router_networks', 'quotausages', ] return tables def get_columns(table): """Returns list of columns for given table.""" inspector = get_inspector() return inspector.get_columns(table) def get_data(): """Returns combined list of tuples: [(table, column)]. List is built, based on retrieved tables, where column with name ``tenant_id`` exists. """ output = [] tables = get_tables() for table in tables: columns = get_columns(table) for column in columns: if column['name'] == 'tenant_id': output.append((table, column)) return output def alter_column(table, column): old_name = 'tenant_id' new_name = 'project_id' op.alter_column( table_name=table, column_name=old_name, new_column_name=new_name, existing_type=column['type'], existing_nullable=column['nullable'] ) def recreate_index(index, table_name): old_name = index['name'] new_name = old_name.replace('tenant', 'project') op.drop_index(op.f(old_name), table_name) op.create_index(new_name, table_name, ['project_id']) def upgrade(): inspector = get_inspector() data = get_data() for table, column in data: alter_column(table, column) indexes = inspector.get_indexes(table) for index in indexes: if 'tenant_id' in index['name']: recreate_index(index, table) def contract_creation_exceptions(): """Special migration for the blueprint to support Keystone V3. We drop all tenant_id columns and create project_id columns instead. """ return { sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()], sa.Index: get_tables() } ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/89ab9a816d70_rename_ml2_network_segments.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/89ab9a8160000644000175000017500000000343400000000000033225 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from sqlalchemy.engine import reflection """Rename ml2_network_segments table Revision ID: 89ab9a816d70 Revises: 7bbb25278f53 Create Date: 2016-03-22 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '89ab9a816d70' down_revision = '7bbb25278f53' TABLE_NAME = 'ml2_port_binding_levels' OLD_REFERRED_TABLE_NAME = 'ml2_network_segments' NEW_REFERRED_TABLE_NAME = 'networksegments' def upgrade(): fk_name = delete_foreign_key_constraint() op.rename_table(OLD_REFERRED_TABLE_NAME, NEW_REFERRED_TABLE_NAME) op.create_foreign_key( constraint_name=fk_name, source_table=TABLE_NAME, referent_table=NEW_REFERRED_TABLE_NAME, local_cols=['segment_id'], remote_cols=['id'], ondelete="SET NULL" ) def delete_foreign_key_constraint(): inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraints = inspector.get_foreign_keys(TABLE_NAME) for fk in fk_constraints: if fk['referred_table'] == OLD_REFERRED_TABLE_NAME: op.drop_constraint( constraint_name=fk['name'], table_name=TABLE_NAME, type_='foreignkey' ) return fk['name'] ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/8fd3918ef6f4_add_segment_host_mapping.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/8fd3918ef0000644000175000017500000000345500000000000033315 0ustar00coreycorey00000000000000# Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add segment_host_mapping table. Revision ID: 8fd3918ef6f4 Revises: c879c5e1ee90 Create Date: 2016-02-25 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '8fd3918ef6f4' down_revision = 'c879c5e1ee90' def upgrade(): op.create_table('segmenthostmappings', sa.Column('segment_id', sa.String(length=36), index=True, nullable=False), sa.Column('host', sa.String(255), index=True, nullable=False), sa.PrimaryKeyConstraint('segment_id', 'host'), sa.ForeignKeyConstraint(['segment_id'], ['networksegments.id'], ondelete='CASCADE')) def contract_creation_exceptions(): """Return create exceptions. These elements depend on the networksegments table which was renamed in the contract branch. """ return { sa.Table: ['segmenthostmappings'], sa.Index: ['segmenthostmappings'] } ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/97c25b0d2353_add_name_desc.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/97c25b0d20000644000175000017500000000722600000000000033215 0ustar00coreycorey00000000000000# Copyright 2016 NEC Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add Name and Description to the networksegments table """ # revision identifiers, used by Alembic. revision = '97c25b0d2353' down_revision = 'b12a3ef66e62' depends_on = ('89ab9a816d70',) # As this script depends on another migration which was a contract script, # therefore the following column addition ( which should have been in an # expand phase ) is also submitted in the contract phase. For information # about the expand and contract scripts and how the depends_on works, please # refer TBL = 'networksegments' TBL_MODEL = sa.Table(TBL, sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) standardattrs = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False)) def update_existing_records(): session = sa.orm.Session(bind=op.get_bind()) values = [] with session.begin(subtransactions=True): for row in session.query(TBL_MODEL): # NOTE from kevinbenton: without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter res = session.execute( standardattrs.insert().values(resource_type=TBL) ) session.execute( TBL_MODEL.update().values( standard_attr_id=res.inserted_primary_key[0]).where( TBL_MODEL.c.id == row[0]) ) # this commit is necessary to allow further operations session.commit() return values def upgrade(): op.add_column(TBL, sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) op.add_column(TBL, sa.Column('name', sa.String(255), nullable=True)) update_existing_records() op.alter_column(TBL, 'standard_attr_id', nullable=False, existing_type=sa.BigInteger(), existing_nullable=True, existing_server_default=False) # add the constraint now that everything is populated on that table op.create_foreign_key( constraint_name=None, source_table=TBL, referent_table='standardattributes', local_cols=['standard_attr_id'], remote_cols=['id'], ondelete='CASCADE') op.create_unique_constraint( constraint_name='uniq_%s0standard_attr_id' % TBL, table_name=TBL, columns=['standard_attr_id']) def contract_creation_exceptions(): """Return create exceptions. These elements depend on the networksegments table which are added in the contract branch. """ return { sa.Column: ['networksegments.name', 'networksegments.standard_attr_id'], } ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f0000644000175000017500000000520400000000000033356 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants import sqlalchemy as sa """migrate dns name from port""" # revision identifiers, used by Alembic. revision = 'a84ccf28f06a' down_revision = 'b67e765a3524' depends_on = ('a963b38d82f4',) ports = sa.Table( 'ports', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('dns_name', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=True)) portdnses = sa.Table('portdnses', sa.MetaData(), sa.Column('port_id', sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True), sa.Column('dns_name', sa.String(length=255), nullable=False), sa.Column('current_dns_name', sa.String(255), nullable=False), sa.Column('current_dns_domain', sa.String(255), nullable=False), sa.Column('previous_dns_name', sa.String(255), nullable=False), sa.Column('previous_dns_domain', sa.String(255), nullable=False)) def migrate_records_for_existing(): session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for row in session.query(ports): if row[1]: res = session.execute(portdnses.update().values( dns_name=row[1]).where(portdnses.c.port_id == row[0])) if res.rowcount == 0: session.execute(portdnses.insert().values( port_id=row[0], current_dns_name='', current_dns_domain='', previous_dns_name='', previous_dns_domain='', dns_name=row[1])) session.commit() def upgrade(): migrate_records_for_existing() op.drop_column('ports', 'dns_name') ././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff0000644000175000017500000000517400000000000033362 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from neutron_lib import constants import sqlalchemy as sa """Add routerport bindings for L3 HA Revision ID: a8b517cff8ab Revises: a8b517cff8ab Create Date: 2016-07-18 14:31:45.725516 """ # revision identifiers, used by Alembic. revision = 'a8b517cff8ab' down_revision = '7d9d8eeec6ad' HA_AGENT_BINDINGS = 'ha_router_agent_port_bindings' ROUTER_PORTS = 'routerports' def upgrade(): ha_bindings = sa.Table( HA_AGENT_BINDINGS, sa.MetaData(), sa.Column('port_id', sa.String(36)), sa.Column('router_id', sa.String(36)), sa.Column('l3_agent_id', sa.String(36)), sa.Column('state', sa.Enum(constants.HA_ROUTER_STATE_ACTIVE, constants.HA_ROUTER_STATE_STANDBY, name='l3_ha_states')) ) router_ports = sa.Table(ROUTER_PORTS, sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('port_id', sa.String(36)), sa.Column('port_type', sa.String(255))) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): router_port_tuples = set() for ha_bind in session.query(ha_bindings): router_port_tuples.add((ha_bind.router_id, ha_bind.port_id)) # we have to remove any from the bulk insert that may already exist # as a result of Ifd3e007aaf2a2ed8123275aa3a9f540838e3c003 being # back-ported for router_port in session.query(router_ports).filter( router_ports.c.port_type == constants.DEVICE_OWNER_ROUTER_HA_INTF): router_port_tuples.discard((router_port.router_id, router_port.port_id)) new_records = [dict(router_id=router_id, port_id=port_id, port_type=constants.DEVICE_OWNER_ROUTER_HA_INTF) for router_id, port_id in router_port_tuples] op.bulk_insert(router_ports, new_records) session.commit() ././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/b12a3ef66e62_add_standardattr_to_qos_policies.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/b12a3ef660000644000175000017500000000617400000000000033274 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add standardattr to qos policies Revision ID: b12a3ef66e62 Revises: 3b935b28e7a0 Create Date: 2016-08-18 14:10:30.021055 """ # revision identifiers, used by Alembic. revision = 'b12a3ef66e62' down_revision = '3b935b28e7a0' depends_on = ('67daae611b6e',) # basic model of the tables with required field for migration TABLE = 'qos_policies' TABLE_MODEL = sa.Table(TABLE, sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) standardattrs = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False), sa.Column('description', sa.String(length=255), nullable=True)) def upgrade(): generate_records_for_existing() # add the constraint now that everything is populated on that table op.alter_column(TABLE, 'standard_attr_id', nullable=False, existing_type=sa.BigInteger(), existing_nullable=True, existing_server_default=False) op.create_unique_constraint( constraint_name='uniq_%s0standard_attr_id' % TABLE, table_name=TABLE, columns=['standard_attr_id']) op.drop_column(TABLE, 'description') op.create_foreign_key( constraint_name=None, source_table=TABLE, referent_table='standardattributes', local_cols=['standard_attr_id'], remote_cols=['id'], ondelete='CASCADE') def generate_records_for_existing(): session = sa.orm.Session(bind=op.get_bind()) values = [] with session.begin(subtransactions=True): for row in session.query(TABLE_MODEL): # NOTE(kevinbenton): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter res = session.execute( standardattrs.insert().values(resource_type=TABLE, description=row[1]) ) session.execute( TABLE_MODEL.update().values( standard_attr_id=res.inserted_primary_key[0]).where( TABLE_MODEL.c.id == row[0]) ) # this commit is necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000025200000000000011454 xustar0000000000000000148 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a30000644000175000017500000000156300000000000033223 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Remove mtu column from networks. Revision ID: b67e765a3524 Revises: 4bcd4df1f426 Create Date: 2016-07-17 02:07:36.625196 """ # revision identifiers, used by Alembic. revision = 'b67e765a3524' down_revision = '4bcd4df1f426' def upgrade(): op.drop_column('networks', 'mtu') ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/c879c5e1ee90_add_segment_id_to_subnet.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/contract/c879c5e1e0000644000175000017500000000261300000000000033304 0ustar00coreycorey00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add segment_id to subnet """ # revision identifiers, used by Alembic. revision = 'c879c5e1ee90' down_revision = '89ab9a816d70' def upgrade(): op.add_column('subnets', sa.Column('segment_id', sa.String(length=36), nullable=True)) op.create_foreign_key( None, 'subnets', 'networksegments', ['segment_id'], ['id']) def contract_creation_exceptions(): """The networksegments table was renamed in the contract branch. Because the column being added has a foreign key dependency on a column in a table that was renamed in the contract branch, this column must also be added in the contract branch. """ return { sa.Column: ['subnets.segment_id'] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2790444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/0000755000175000017500000000000000000000000031504 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceaf0000644000175000017500000000421600000000000033163 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import exceptions import sqlalchemy as sa from neutron._i18n import _ """uniq_routerports0port_id Revision ID: 030a959ceafa Revises: 3d0e74aa7d37 Create Date: 2016-06-21 11:33:13.043879 """ # revision identifiers, used by Alembic. revision = '030a959ceafa' down_revision = '3d0e74aa7d37' routerports = sa.Table( 'routerports', sa.MetaData(), sa.Column('router_id', sa.String(36)), sa.Column('port_id', sa.String(36)), sa.Column('port_type', sa.String(255))) class DuplicatePortRecordinRouterPortdatabase(exceptions.Conflict): message = _("Duplicate port(s) %(port_id)s records exist in routerports " "database. Database cannot be upgraded. Please remove all " "duplicated records before upgrading the database.") def upgrade(): op.create_unique_constraint( 'uniq_routerports0port_id', 'routerports', ['port_id']) def check_sanity(connection): res = get_duplicate_port_records_in_routerport_database(connection) if res: raise DuplicatePortRecordinRouterPortdatabase(port_id=",".join(res)) def get_duplicate_port_records_in_routerport_database(connection): insp = sa.engine.reflection.Inspector.from_engine(connection) if 'routerports' not in insp.get_table_names(): return [] session = sa.orm.Session(bind=connection.connect()) query = (session.query(routerports.c.port_id) .group_by(routerports.c.port_id) .having(sa.func.count() > 1)).all() return [q[0] for q in query] ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/0f5bef0f87d4_add_qos_minimum_bandwidth_rules.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/0f5bef0f87d0000644000175000017500000000327500000000000033256 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants import sqlalchemy as sa """add_qos_minimum_bandwidth_rules Revision ID: 0f5bef0f87d4 Revises: a5648cfeeadf Create Date: 2016-07-29 14:33:37.243487 """ # revision identifiers, used by Alembic. revision = '0f5bef0f87d4' down_revision = 'a5648cfeeadf' def upgrade(): op.create_table( 'qos_minimum_bandwidth_rules', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('qos_policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, index=True), sa.Column('min_kbps', sa.Integer()), sa.Column('direction', sa.Enum(constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION, name='directions'), nullable=False, server_default=constants.EGRESS_DIRECTION), sa.UniqueConstraint( 'qos_policy_id', 'direction', name='qos_minimum_bandwidth_rules0qos_policy_id0direction') ) ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/30107ab6a3ee_provisioning_blocks.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/30107ab6a3e0000644000175000017500000000233600000000000033067 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """provisioning_blocks.py Revision ID: 30107ab6a3ee Revises: d3435b514502 Create Date: 2016-04-15 05:59:59.000001 """ # revision identifiers, used by Alembic. revision = '30107ab6a3ee' down_revision = 'd3435b514502' def upgrade(): op.create_table( 'provisioningblocks', sa.Column('standard_attr_id', sa.BigInteger(), sa.ForeignKey('standardattributes.id', ondelete='CASCADE'), nullable=False, primary_key=True), sa.Column('entity', sa.String(length=255), nullable=False, primary_key=True), ) ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d30000644000175000017500000000212500000000000033155 0ustar00coreycorey00000000000000# Copyright 2016 Mirantis # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add flavor_id to Router Revision ID: 3d0e74aa7d37 Revises: a963b38d82f4 Create Date: 2016-05-05 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '3d0e74aa7d37' down_revision = 'a963b38d82f4' def upgrade(): op.add_column('routers', sa.Column('flavor_id', sa.String(length=36), sa.ForeignKey('flavors.id'), nullable=True)) ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/45f8dd33480b_qos_dscp_db_addition.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/45f8dd334800000644000175000017500000000233100000000000033026 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """qos dscp db addition Revision ID: 45f8dd33480b Revises: 0e66c5227a8a Create Date: 2015-12-03 07:16:24.742290 """ # revision identifiers, used by Alembic. revision = '45f8dd33480b' down_revision = '0e66c5227a8a' def upgrade(): op.create_table( 'qos_dscp_marking_rules', sa.Column('id', sa.String(length=36), primary_key=True), sa.Column('qos_policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, unique=True), sa.Column('dscp_mark', sa.Integer())) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca70000644000175000017500000000473200000000000033163 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from sqlalchemy import sql """Add support for VLAN trunking""" revision = '5abc0278ca73' down_revision = '45f8dd33480b' def upgrade(): op.create_table( 'trunks', sa.Column('admin_state_up', sa.Boolean(), nullable=False, server_default=sql.true()), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('status', sa.String(length=16), nullable=False, server_default='ACTIVE'), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['standard_attr_id'], ['standardattributes.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('port_id'), sa.UniqueConstraint('standard_attr_id') ) op.create_table( 'subports', sa.Column('port_id', sa.String(length=36)), sa.Column('trunk_id', sa.String(length=36), nullable=False), sa.Column('segmentation_type', sa.String(length=32), nullable=False), sa.Column('segmentation_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['trunk_id'], ['trunks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), sa.UniqueConstraint( 'trunk_id', 'segmentation_type', 'segmentation_id', name='uniq_subport0trunk_id0segmentation_type0segmentation_id') ) ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/5cd92597d11d_add_ip_allocation_to_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/5cd92597d110000644000175000017500000000204700000000000033033 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """Add ip_allocation to port """ # revision identifiers, used by Alembic. revision = '5cd92597d11d' down_revision = '6b461a21bcfc' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): op.add_column('ports', sa.Column('ip_allocation', sa.String(length=16), nullable=True)) ././@PaxHeader0000000000000000000000000000025200000000000011454 xustar0000000000000000148 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/67daae611b6e_add_standard_attr_to_qos_policies.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/67daae611b60000644000175000017500000000172200000000000033160 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add standardattr to qos policies Revision ID: 67daae611b6e Revises: a5648cfeeadf Create Date: 2016-08-18 14:10:30.021015 """ revision = '67daae611b6e' down_revision = '0f5bef0f87d4' TABLE = 'qos_policies' def upgrade(): op.add_column(TABLE, sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) ././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/6b461a21bcfc_uniq_floatingips0floating_network_.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/6b461a21bcf0000644000175000017500000000471100000000000033153 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import exceptions import sqlalchemy as sa from neutron._i18n import _ """uniq_floatingips0floating_network_id0fixed_port_id0fixed_ip_addr Revision ID: 6b461a21bcfc Revises: 67daae611b6e Create Date: 2016-06-03 16:00:38.273324 """ # revision identifiers, used by Alembic. revision = '6b461a21bcfc' down_revision = '67daae611b6e' floatingips = sa.Table( 'floatingips', sa.MetaData(), sa.Column('floating_network_id', sa.String(36)), sa.Column('fixed_port_id', sa.String(36)), sa.Column('fixed_ip_address', sa.String(64))) class DuplicateFloatingIPforOneFixedIP(exceptions.Conflict): message = _("Duplicate Floating IPs were created for fixed IP " "addresse(s) %(fixed_ip_address)s. Database cannot " "be upgraded. Please remove all duplicate Floating " "IPs before upgrading the database.") def upgrade(): op.create_unique_constraint( 'uniq_floatingips0floatingnetworkid0fixedportid0fixedipaddress', 'floatingips', ['floating_network_id', 'fixed_port_id', 'fixed_ip_address']) def check_sanity(connection): res = get_duplicate_floating_ip_for_one_fixed_ip(connection) if res: raise DuplicateFloatingIPforOneFixedIP(fixed_ip_address=",".join(res)) def get_duplicate_floating_ip_for_one_fixed_ip(connection): insp = sa.engine.reflection.Inspector.from_engine(connection) if 'floatingips' not in insp.get_table_names(): return [] session = sa.orm.Session(bind=connection.connect()) query = (session.query(floatingips.c.fixed_ip_address) .group_by(floatingips.c.floating_network_id, floatingips.c.fixed_port_id, floatingips.c.fixed_ip_address) .having(sa.func.count() > 1)).all() return [q[0] for q in query if q[0] is not None] ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeead0000644000175000017500000000241300000000000033326 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add support for Subnet Service Types Revision ID: a5648cfeeadf Revises: 030a959ceafa Create Date: 2016-03-15 18:00:00.190173 """ # revision identifiers, used by Alembic. revision = 'a5648cfeeadf' down_revision = '030a959ceafa' def upgrade(): op.create_table( 'subnet_service_types', sa.Column('subnet_id', sa.String(length=36)), sa.Column('service_type', sa.String(length=255)), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id', 'service_type') ) ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f0000644000175000017500000000163200000000000033114 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add dns name to portdnses""" # revision identifiers, used by Alembic. revision = 'a963b38d82f4' down_revision = 'c415aab1c048' def upgrade(): op.add_column('portdnses', sa.Column('dns_name', sa.String(length=255), nullable=False)) ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/c415aab1c048_add_revisions_column.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/c415aab1c040000644000175000017500000000203500000000000033137 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add revisions table Revision ID: c415aab1c048 Revises: 30107ab6a3ee Create Date: 2016-04-11 03:16:24.742290 """ # revision identifiers, used by Alembic. revision = 'c415aab1c048' down_revision = '30107ab6a3ee' def upgrade(): op.add_column( 'standardattributes', sa.Column('revision_number', sa.BigInteger(), nullable=False, server_default='0')) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/d3435b514502_add_device_id_index_to_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/newton/expand/d3435b514500000644000175000017500000000170000000000000032730 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op """Add device_id index to Port Revision ID: d3435b514502 Revises: 5abc0278ca73 Create Date: 2016-04-25 22:13:16.676761 """ # revision identifiers, used by Alembic. revision = 'd3435b514502' down_revision = '5abc0278ca73' def upgrade(): op.create_index('ix_ports_device_id', 'ports', ['device_id'], unique=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9670417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/0000755000175000017500000000000000000000000030002 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2790444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/expand/0000755000175000017500000000000000000000000031261 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/expand/929c968efe70_add_pk_version_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/expand/929c968efe700000644000175000017500000000163700000000000032777 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import migration """add_pk_version_table Revision ID: 929c968efe70 Revises: 5cd92597d11d Create Date: 2017-01-12 07:17:33.677770 """ # revision identifiers, used by Alembic. revision = '929c968efe70' down_revision = '5cd92597d11d' def upgrade(): migration.pk_on_alembic_version_table() ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/expand/a9c43481023c_extend_ml2_port_bindings.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ocata/expand/a9c43481023c0000644000175000017500000000461000000000000032655 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants import sqlalchemy as sa from sqlalchemy.engine.reflection import Inspector as insp from neutron.db import migration """extend_pk_with_host_and_add_status_to_ml2_port_binding Revision ID: a9c43481023c Revises: 5cd92597d11d Create Date: 2016-11-22 11:48:43.479552 """ # revision identifiers, used by Alembic. revision = 'a9c43481023c' down_revision = '929c968efe70' MYSQL_ENGINE = 'mysql' ML2_PORT_BINDING = 'ml2_port_bindings' neutron_milestone = [migration.OCATA] def upgrade(): bind = op.get_bind() engine = bind.engine op.add_column(ML2_PORT_BINDING, sa.Column('status', sa.String(length=16), nullable=False, server_default=constants.ACTIVE)) if (engine.name == MYSQL_ENGINE): op.execute("ALTER TABLE ml2_port_bindings DROP PRIMARY KEY," "ADD PRIMARY KEY(port_id, host);") else: inspector = insp.from_engine(bind) pk_constraint = inspector.get_pk_constraint(ML2_PORT_BINDING) op.drop_constraint(pk_constraint.get('name'), ML2_PORT_BINDING, type_='primary') op.create_primary_key(op.f('pk_ml2_port_bindings'), ML2_PORT_BINDING, ['port_id', 'host']) def expand_drop_exceptions(): """Drop and extend the ML2 port bindings key contraint Drop the existing primary key constraint and then extend it to include host as the primary key to support multiple bindings for the same port. This is needed to use drop in expand migration to pass test_branches. It is safe to recreate primary key in expand as it is backward compatible. """ return { sa.Constraint: ['ml2_port_bindings_pkey'] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9670417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/0000755000175000017500000000000000000000000027643 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2790444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/0000755000175000017500000000000000000000000031122 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000025700000000000011461 xustar0000000000000000153 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/2b42d90729da_qos_add_direction_to_bw_limit_rule_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/2b42d90729da_0000644000175000017500000000531000000000000032740 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants import sqlalchemy as sa from neutron.db import migration """qos add direction to bw_limit_rule table Revision ID: 2b42d90729da Revises: 804a3c76314c Create Date: 2017-04-03 20:56:00.169599 """ # revision identifiers, used by Alembic. revision = '2b42d90729da' down_revision = '804a3c76314c' policies_table_name = "qos_policies" bw_limit_table_name = "qos_bandwidth_limit_rules" direction_enum = sa.Enum( constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION, name="directions" ) def upgrade(): if op.get_context().bind.dialect.name == 'postgresql': direction_enum.create(op.get_bind(), checkfirst=True) with migration.remove_fks_from_table(bw_limit_table_name, remove_unique_constraints=True): op.add_column(bw_limit_table_name, sa.Column("direction", direction_enum, server_default=constants.EGRESS_DIRECTION, nullable=False)) op.create_unique_constraint( op.f('qos_bandwidth_rules0qos_policy_id0direction'), bw_limit_table_name, ['qos_policy_id', 'direction']) def expand_drop_exceptions(): """Drop and replace the QoS policy foreign key contraint Drop the existing QoS policy foreign key uniq constraint and then replace it with new unique constraint for pair (policy_id, direction). As names of constraints are different in MySQL and PGSQL there is need to add both variants to drop exceptions. """ # TODO(slaweq): replace hardcoded constaints names with names get directly # from database model after bug # https://bugs.launchpad.net/neutron/+bug/1685352 will be closed return { sa.ForeignKeyConstraint: [ "qos_bandwidth_limit_rules_ibfk_1", # MySQL name "qos_bandwidth_limit_rules_qos_policy_id_fkey" # PGSQL name ], sa.UniqueConstraint: [ "qos_policy_id", # MySQL name "qos_bandwidth_limit_rules_qos_policy_id_key" # PGSQL name ] } ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/349b6fd605a6_add_dns_domain_to_portdnses.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/349b6fd605a6_0000644000175000017500000000221200000000000032744 0ustar00coreycorey00000000000000# Copyright 2017 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants import sqlalchemy as sa """Add dns_domain to portdnses Revision ID: 349b6fd605a6 Revises: c8c222d42aa9 Create Date: 2017-04-15 00:22:47.618593 """ # revision identifiers, used by Alembic. revision = '349b6fd605a6' down_revision = 'c8c222d42aa9' def upgrade(): op.add_column('portdnses', sa.Column('dns_domain', sa.String(length=constants.FQDN_FIELD_SIZE), nullable=False, server_default='')) ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/62c781cb6192_add_qos_policies_default_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/62c781cb6192_0000644000175000017500000000244700000000000032674 0ustar00coreycorey00000000000000# Copyright 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add is default to qos policies Revision ID: 62c781cb6192 Revises: 2b42d90729da Create Date: 2017-02-07 13:28:35.894357 """ # revision identifiers, used by Alembic. revision = '62c781cb6192' down_revision = '2b42d90729da' def upgrade(): op.create_table( 'qos_policies_default', sa.Column('qos_policy_id', sa.String(length=36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False, index=True, primary_key=True), ) ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/7d32f979895f_add_mtu_for_networks.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/7d32f979895f_0000644000175000017500000000223500000000000032720 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """add mtu for networks Revision ID: 7d32f979895f Revises: c8c222d42aa9 Create Date: 2017-07-13 19:25:29.204547 """ # revision identifiers, used by Alembic. revision = '7d32f979895f' down_revision = '349b6fd605a6' # require the migration rule that dropped the mtu column in the past depends_on = ('b67e765a3524',) neutron_milestone = [migration.PIKE] def upgrade(): op.add_column('networks', sa.Column('mtu', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/804a3c76314c_add_data_plane_status_to_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/804a3c76314c_0000644000175000017500000000243400000000000032661 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """Add data_plane_status to Port Revision ID: 804a3c76314c Revises: a9c43481023c Create Date: 2017-01-17 13:51:45.737987 """ # revision identifiers, used by Alembic. revision = '804a3c76314c' down_revision = 'a9c43481023c' def upgrade(): op.create_table('portdataplanestatuses', sa.Column('port_id', sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True), sa.Column('data_plane_status', sa.String(length=16), nullable=True)) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/c8c222d42aa9_logging_api.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/pike/expand/c8c222d42aa9_0000644000175000017500000000415100000000000033015 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron_lib.db import constants as db_const """logging api Revision ID: c8c222d42aa9 Revises: 62c781cb6192 Create Date: 2017-05-30 11:51:08.173604 """ # revision identifiers, used by Alembic. revision = 'c8c222d42aa9' down_revision = '62c781cb6192' def upgrade(): op.create_table( 'logs', sa.Column('project_id', sa.String(length=db_const.PROJECT_ID_FIELD_SIZE), nullable=True, index=True), sa.Column('id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=False), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.Column('name', sa.String(length=db_const.NAME_FIELD_SIZE), nullable=True), sa.Column('resource_type', sa.String(length=36), nullable=False), sa.Column('resource_id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=True, index=True), sa.Column('target_id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=True, index=True), sa.Column('event', sa.String(length=255), nullable=False), sa.Column('enabled', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['standard_attr_id'], ['standardattributes.id'], ondelete='CASCADE'), sa.UniqueConstraint('standard_attr_id')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/queens/0000755000175000017500000000000000000000000030213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2790444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/queens/expand/0000755000175000017500000000000000000000000031472 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/queens/expand/594422d373ee_fip_qos.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/queens/expand/594422d373e0000644000175000017500000000271500000000000032741 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron_lib.db import constants as db_const from neutron.db import migration """fip qos Revision ID: 594422d373ee Revises: 7d32f979895f Create Date: 2016-04-26 17:16:10.323756 """ # revision identifiers, used by Alembic. revision = '594422d373ee' down_revision = '7d32f979895f' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.QUEENS] def upgrade(): op.create_table( 'qos_fip_policy_bindings', sa.Column('policy_id', sa.String(length=db_const.UUID_FIELD_SIZE), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False), sa.Column('fip_id', sa.String(length=db_const.UUID_FIELD_SIZE), sa.ForeignKey('floatingips.id', ondelete='CASCADE'), nullable=False, unique=True)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/0000755000175000017500000000000000000000000030042 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2790444 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/expand/0000755000175000017500000000000000000000000031321 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/expand/61663558142c_add_ha_router_state.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/expand/61663558142c0000644000175000017500000000240700000000000032570 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants import sqlalchemy as sa from neutron.db import migration """Add unknown state to HA router Revision ID: 61663558142c Revises: 594422d373ee Create Date: 2017-05-18 14:31:45.725516 """ revision = '61663558142c' down_revision = '594422d373ee' ha_port_bindings_table_name = "ha_router_agent_port_bindings" new_enum = sa.Enum( constants.HA_ROUTER_STATE_ACTIVE, constants.HA_ROUTER_STATE_STANDBY, constants.HA_ROUTER_STATE_UNKNOWN, name='l3_ha_states' ) def upgrade(): migration.alter_enum_add_value(ha_port_bindings_table_name, 'state', new_enum, True, server_default='standby') ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/expand/867d39095bf4_port_forwarding.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/rocky/expand/867d39095bf40000644000175000017500000000427400000000000032751 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """port forwarding Revision ID: 867d39095bf4 Revises: 61663558142c Create Date: 2018-01-15 01:52:31.308888 """ from alembic import op import sqlalchemy as sa from neutron_lib.db import constants # revision identifiers, used by Alembic. revision = '867d39095bf4' down_revision = '61663558142c' def upgrade(): op.create_table( 'portforwardings', sa.Column('id', sa.String(length=constants.UUID_FIELD_SIZE), nullable=False), sa.Column('floatingip_id', sa.String(length=constants.UUID_FIELD_SIZE), nullable=False), sa.Column('external_port', sa.Integer(), nullable=False), sa.Column('internal_neutron_port_id', sa.String(length=constants.UUID_FIELD_SIZE), nullable=False), sa.Column('protocol', sa.String(length=40), nullable=False), sa.Column('socket', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['floatingip_id'], ['floatingips.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['internal_neutron_port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('floatingip_id', 'external_port', name='uniq_port_forwardings0floatingip_id0' 'external_port'), sa.UniqueConstraint('internal_neutron_port_id', 'socket', name='uniq_port_forwardings0' 'internal_neutron_port_id0socket') ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/0000755000175000017500000000000000000000000030035 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2830443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/0000755000175000017500000000000000000000000031314 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/0ff9e3881597_network_segment_range.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/0ff9e38815970000644000175000017500000000376300000000000032752 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """network segment ranges Revision ID: 0ff9e3881597 Revises: fb0167bd9639 Create Date: 2019-02-27 14:40:15.492884 """ # revision identifiers, used by Alembic. revision = '0ff9e3881597' down_revision = 'fb0167bd9639' network_segment_range_network_type = sa.Enum( 'vlan', 'vxlan', 'gre', 'geneve', name='network_segment_range_network_type') def upgrade(): op.create_table( 'network_segment_ranges', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('default', sa.Boolean(), nullable=False), sa.Column('shared', sa.Boolean(), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('network_type', network_segment_range_network_type, nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('minimum', sa.Integer(), nullable=True), sa.Column('maximum', sa.Integer(), nullable=True), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['standard_attr_id'], ['standardattributes.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('standard_attr_id') ) ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/195176fb410d_router_gateway_ip_qos.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/195176fb410d0000644000175000017500000000275200000000000032722 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # Copyright 2017 Letv Cloud Computing # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """router gateway IP QoS Revision ID: 195176fb410d Revises: cada2437bf41 Create Date: 2016-04-28 12:38:09.872706 """ from alembic import op import sqlalchemy as sa from neutron_lib.db import constants as db_const # revision identifiers, used by Alembic. revision = '195176fb410d' down_revision = 'cada2437bf41' def upgrade(): op.create_table( 'qos_router_gw_policy_bindings', sa.Column('policy_id', sa.String(length=db_const.UUID_FIELD_SIZE), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True), sa.Column('router_id', sa.String(length=db_const.UUID_FIELD_SIZE), sa.ForeignKey('routers.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True)) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/9bfad3f1e780_support_shared_security_groups.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/9bfad3f1e7800000644000175000017500000000360600000000000033147 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """support shared security groups Revision ID: 9bfad3f1e780 Revises: 0ff9e3881597 Create Date: 2019-02-05 15:24:45.011378 """ # revision identifiers, used by Alembic. revision = '9bfad3f1e780' down_revision = '0ff9e3881597' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.STEIN] def upgrade(): op.create_table( 'securitygrouprbacs', sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('target_tenant', 'object_id', 'action', name='uniq_securitygrouprbacs0' 'target_tenant0object_id0action') ) op.create_index(op.f('ix_securitygrouprbacs_project_id'), 'securitygrouprbacs', ['project_id'], unique=False) ././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/cada2437bf41_add_propagate_uplink_status_to_port.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/cada2437bf410000644000175000017500000000254500000000000033132 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add propagate_uplink_status to port Revision ID: cada2437bf41 Revises: d72db3e25539 Create Date: 2018-11-29 19:25:12.197590 """ # revision identifiers, used by Alembic. revision = 'cada2437bf41' down_revision = 'd72db3e25539' def upgrade(): op.create_table('portuplinkstatuspropagation', sa.Column('port_id', sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True), sa.Column('propagate_uplink_status', sa.Boolean(), nullable=False, server_default=sa.sql.false())) ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/d72db3e25539_modify_uniq_port_forwarding.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/d72db3e255390000644000175000017500000000525500000000000033010 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """modify uniq port forwarding Revision ID: d72db3e25539 Revises: 867d39095bf4 Create Date: 2018-10-12 19:51:11.981394 """ from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = 'd72db3e25539' down_revision = '867d39095bf4' TABLE_NAME = 'portforwardings' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) foreign_keys = inspector.get_foreign_keys(TABLE_NAME) migration.remove_foreign_keys(TABLE_NAME, foreign_keys) unique_constraints = inspector.get_unique_constraints(TABLE_NAME) for constraint in unique_constraints: op.drop_constraint( constraint_name=constraint['name'], table_name=TABLE_NAME, type_="unique" ) op.create_unique_constraint( constraint_name=('uniq_port_forwardings0floatingip_id0' 'external_port0protocol'), table_name=TABLE_NAME, columns=['floatingip_id', 'external_port', 'protocol'] ) op.create_unique_constraint( constraint_name=('uniq_port_forwardings0internal_neutron_port_id0' 'socket0protocol'), table_name=TABLE_NAME, columns=['internal_neutron_port_id', 'socket', 'protocol'] ) migration.create_foreign_keys(TABLE_NAME, foreign_keys) def expand_drop_exceptions(): """Drop and replace the unique constraints for table portforwardings Drop the existing portforwardings foreign key uniq constraints and then replace them with new unique constraints with column ``protocol``. This is needed to use drop in expand migration to pass test_branches. """ return { sa.Constraint: [ "portforwardings_ibfk_1", "portforwardings_ibfk_2", "uniq_port_forwardings0floatingip_id0external_port", "uniq_port_forwardings0internal_neutron_port_id0socket", "portforwardings_floatingip_id_fkey", "portforwardings_internal_neutron_port_id_fkey", ] } ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/fb0167bd9639_agent_resources_synced.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/stein/expand/fb0167bd96390000644000175000017500000000176300000000000033014 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """agent_resources_synced Revision ID: fb0167bd9639 Revises: 195176fb410d Create Date: 2019-01-04 12:34:44.563725 """ # revision identifiers, used by Alembic. revision = 'fb0167bd9639' down_revision = '195176fb410d' def upgrade(): op.add_column( 'agents', sa.Column('resources_synced', sa.Boolean(), server_default=None)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/0000755000175000017500000000000000000000000030030 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2830443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/expand/0000755000175000017500000000000000000000000031307 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/expand/63fd95af7dcd_conntrack_helper.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/expand/63fd95af7dcd0000644000175000017500000000323600000000000033227 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants as db_const import sqlalchemy as sa """conntrack helper Revision ID: 63fd95af7dcd Revises: 9bfad3f1e780 Create Date: 2019-03-26 15:37:20.996070 """ # revision identifiers, used by Alembic. revision = '63fd95af7dcd' down_revision = '9bfad3f1e780' def upgrade(): op.create_table( 'conntrack_helpers', sa.Column('id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=False, primary_key=True), sa.Column('router_id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=False), sa.Column('protocol', sa.String(length=40), nullable=False), sa.Column('port', sa.Integer(), nullable=False), sa.Column('helper', sa.String(length=64), nullable=False), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ondelete='CASCADE'), sa.UniqueConstraint( 'router_id', 'protocol', 'port', 'helper', name='uniq_conntrack_helpers0router_id0protocol0port0helper') ) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/expand/c613d0b82681_subnet_force_network_id.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/train/expand/c613d0b826810000644000175000017500000000215600000000000032711 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from neutron.db import migration """subnet force network id Revision ID: c613d0b82681 Revises: 63fd95af7dcd Create Date: 2019-08-19 11:15:14.443244 """ # revision identifiers, used by Alembic. revision = 'c613d0b82681' down_revision = '63fd95af7dcd' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.TRAIN] def upgrade(): op.alter_column('subnets', 'network_id', nullable=False, existing_type=sa.String(36)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/0000755000175000017500000000000000000000000030245 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2830443 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/0000755000175000017500000000000000000000000031524 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/18a7e90ae768_add_security_group_stateful.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/18a7e90ae760000644000175000017500000000212600000000000033131 0ustar00coreycorey00000000000000# Copyright 2018 NOKIA # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add security group stateful Revision ID: 18a7e90ae768 Revises: 2217c4222de6 Create Date: 2018-04-26 14:44:52.635576 """ # revision identifiers, used by Alembic. revision = '18a7e90ae768' down_revision = '2217c4222de6' def upgrade(): op.add_column('securitygroups', sa.Column('stateful', sa.Boolean(), server_default=sa.sql.true(), nullable=False)) ././@PaxHeader0000000000000000000000000000025700000000000011461 xustar0000000000000000153 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/2217c4222de6_add_dvr_fip_gateway_port_network_table.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/2217c4222de0000644000175000017500000000242000000000000033026 0ustar00coreycorey00000000000000# Copyright 2020 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """add dvr FIP gateway port network table Revision ID: 2217c4222de6 Revises: Ibac91d24da2 Create Date: 2020-01-13 01:47:11.649472 """ # revision identifiers, used by Alembic. revision = '2217c4222de6' down_revision = 'Ibac91d24da2' def upgrade(): op.create_table( 'dvr_fip_gateway_port_network', sa.Column('network_id', sa.String(length=36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True), sa.Column('agent_id', sa.String(length=36), sa.ForeignKey('agents.id', ondelete='CASCADE'), primary_key=True) ) ././@PaxHeader0000000000000000000000000000025400000000000011456 xustar0000000000000000150 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/263d454a9655_add_dns_publish_fixed_ip_to_subnets.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/263d454a9650000644000175000017500000000322700000000000032773 0ustar00coreycorey00000000000000# Copyright 2019 x-ion GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib.db import constants as db_const import sqlalchemy as sa """Add table and relations for subnet dns_publish_fixed_ip attribute Revision ID: 263d454a9655 Revises: a010322604bc Create Date: 2019-05-24 10:00:00.000000 """ # revision identifiers, used by Alembic. revision = '263d454a9655' down_revision = 'a010322604bc' def upgrade(): op.create_table('subnet_dns_publish_fixed_ips', sa.Column('subnet_id', sa.String(length=db_const.UUID_FIELD_SIZE), nullable=False, index=True), sa.Column('dns_publish_fixed_ip', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id')) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/86274d77933e_change_mtu_to_not_null.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/86274d779330000644000175000017500000000323600000000000032726 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from neutron_lib import constants import sqlalchemy as sa """change_mtu_to_not_null Revision ID: 86274d77933e Revises: c3e9d13c4367 Create Date: 2019-08-30 15:52:30.015146 """ # revision identifiers, used by Alembic. revision = '86274d77933e' down_revision = 'c3e9d13c4367' networks = sa.Table( 'networks', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('mtu', sa.Integer(), nullable=True)) def upgrade_existing_records(): session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for row in session.query(networks): if row[1] is None: session.execute(networks.update().values( mtu=constants.DEFAULT_NETWORK_MTU).where( networks.c.id == row[0])) session.commit() def upgrade(): upgrade_existing_records() op.alter_column('networks', 'mtu', nullable=False, server_default=str(constants.DEFAULT_NETWORK_MTU), existing_type=sa.INTEGER()) ././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/Ibac91d24da2_port_forwarding_description.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/Ibac91d24da0000644000175000017500000000522300000000000033300 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sqlalchemy as sa from alembic import op """port forwarding rule description Revision ID: Ibac91d24da2 Revises: 263d454a9655 Create Date: 2019-07-13 10:00:00.000000 """ # revision identifiers, used by Alembic. revision = 'Ibac91d24da2' down_revision = '263d454a9655' TABLE_NAME = 'portforwardings' TABLE_MODEL = sa.Table(TABLE_NAME, sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) STDATTRS_TABLE = sa.Table( 'standardattributes', sa.MetaData(), sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('resource_type', sa.String(length=255), nullable=False)) def update_existing_records(): session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for row in session.query(TABLE_MODEL): res = session.execute( STDATTRS_TABLE.insert().values(resource_type=TABLE_NAME) ) session.execute( TABLE_MODEL.update().values( standard_attr_id=res.inserted_primary_key[0]).where( TABLE_MODEL.c.id == row[0]) ) session.commit() def upgrade(): op.add_column(TABLE_NAME, sa.Column('standard_attr_id', sa.BigInteger(), nullable=True)) update_existing_records() op.alter_column(TABLE_NAME, 'standard_attr_id', nullable=False, existing_type=sa.BigInteger(), existing_nullable=True, existing_server_default=False) op.create_foreign_key( constraint_name=None, source_table=TABLE_NAME, referent_table='standardattributes', local_cols=['standard_attr_id'], remote_cols=['id'], ondelete='CASCADE') op.create_unique_constraint( constraint_name='uniq_%s0standard_attr_id' % TABLE_NAME, table_name=TABLE_NAME, columns=['standard_attr_id']) ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/a010322604bc_network_subnet_update_lock.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/a010322604b0000644000175000017500000000222600000000000032735 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa """network subnet update lock Revision ID: a010322604bc Revises: f4b9654dd40c Create Date: 2019-11-20 18:05:00.812058 """ # revision identifiers, used by Alembic. revision = 'a010322604bc' down_revision = 'f4b9654dd40c' def upgrade(): op.create_table( 'network_subnet_lock', sa.Column('network_id', sa.String(length=36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True), sa.Column('subnet_id', sa.String(length=36)) ) ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/c3e9d13c4367_add_binding_index_to_.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/c3e9d13c4360000644000175000017500000000442000000000000033122 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from collections import defaultdict from alembic import op import sqlalchemy as sa """Add binding index to NetworkDhcpAgentBindings Revision ID: c3e9d13c4367 Revises: c613d0b82681 Create Date: 2019-08-20 18:42:39.647676 """ # revision identifiers, used by Alembic. revision = 'c3e9d13c4367' down_revision = 'c613d0b82681' NETWORK_DHCP_AGENT_BINDING = 'networkdhcpagentbindings' def upgrade(): op.add_column(NETWORK_DHCP_AGENT_BINDING, sa.Column('binding_index', sa.Integer(), nullable=False, server_default='1')) bindings_table = sa.Table( NETWORK_DHCP_AGENT_BINDING, sa.MetaData(), sa.Column('network_id', sa.String(36)), sa.Column('dhcp_agent_id', sa.String(36)), sa.Column('binding_index', sa.Integer, nullable=False, server_default='1'), ) networks_to_bindings = defaultdict(list) session = sa.orm.Session(bind=op.get_bind()) with session.begin(subtransactions=True): for result in session.query(bindings_table): networks_to_bindings[result.network_id].append(result) for bindings in networks_to_bindings.values(): for index, result in enumerate(bindings): session.execute(bindings_table.update().values( binding_index=index + 1).where( bindings_table.c.network_id == result.network_id).where( bindings_table.c.dhcp_agent_id == result.dhcp_agent_id)) session.commit() op.create_unique_constraint( 'uniq_network_dhcp_agent_binding0network_id0binding_index0', NETWORK_DHCP_AGENT_BINDING, ['network_id', 'binding_index']) ././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/e4e236b0e1ff_add_rbac_support_for_address_scope.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/e4e236b0e1f0000644000175000017500000000574300000000000033206 0ustar00coreycorey00000000000000# Copyright 2020 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import sql """add_rbac_support_for_address_scope Revision ID: e4e236b0e1ff Revises: 18a7e90ae768 Create Date: 2020-03-12 11:24:07.435031 """ # revision identifiers, used by Alembic. revision = 'e4e236b0e1ff' down_revision = '18a7e90ae768' depends_on = ('7d9d8eeec6ad',) def upgrade(): address_scope_rbacs = op.create_table( 'addressscoperbacs', sa.MetaData(), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['address_scopes.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('target_tenant', 'object_id', 'action', name='uniq_address_scopes_rbacs0' 'target_tenant0object_id0action') ) op.alter_column('address_scopes', 'shared', server_default=sql.false()) op.bulk_insert(address_scope_rbacs, get_rbac_policies_for_shared_address_scopes()) op.create_index(op.f('ix_addressscoperbacs_project_id'), 'addressscoperbacs', ['project_id'], unique=False) def get_rbac_policies_for_shared_address_scopes(): # A simple model of the address_scopes table with only the fields needed # for the migration. address_scope = sa.Table( 'address_scopes', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False) ) session = sa.orm.Session(bind=op.get_bind()) shared_address_scopes = session.query(address_scope).filter( address_scope.c.shared).all() values = [] for row in shared_address_scopes: values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'project_id': row[1], 'target_tenant': '*', 'action': 'access_as_shared'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/e88badaa9591_add_rbac_support_for_subnetpool.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/e88badaa9590000644000175000017500000000557000000000000033302 0ustar00coreycorey00000000000000# Copyright 2020 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import sql """add rbac support for subnetpool Revision ID: e88badaa9591 Revises: e4e236b0e1ff Create Date: 2020-02-10 12:30:30.060646 """ # revision identifiers, used by Alembic. revision = 'e88badaa9591' down_revision = 'e4e236b0e1ff' depends_on = ('7d9d8eeec6ad',) def upgrade(): subnetpool_rbacs = op.create_table( 'subnetpoolrbacs', sa.MetaData(), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('target_tenant', sa.String(length=255), nullable=False), sa.Column('action', sa.String(length=255), nullable=False), sa.Column('object_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['object_id'], ['subnetpools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('target_tenant', 'object_id', 'action', name='uniq_subnetpools_rbacs0' 'target_tenant0object_id0action') ) op.alter_column('subnetpools', 'shared', server_default=sql.false()) op.bulk_insert( subnetpool_rbacs, get_rbac_policies_for_shared_subnetpools() ) op.create_index(op.f('ix_subnetpoolrbacs_project_id'), 'subnetpoolrbacs', ['project_id'], unique=False) def get_rbac_policies_for_shared_subnetpools(): # A simple model of the subnetpools table with only the fields needed for # the migration. subnetpool = sa.Table( 'subnetpools', sa.MetaData(), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('shared', sa.Boolean(), nullable=False) ) session = sa.orm.Session(bind=op.get_bind()) values = [] for row in session.query(subnetpool).filter(subnetpool.c.shared).all(): values.append({'id': uuidutils.generate_uuid(), 'object_id': row[0], 'project_id': row[1], 'target_tenant': '*', 'action': 'access_as_shared'}) # this commit appears to be necessary to allow further operations session.commit() return values ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/f4b9654dd40c_ovn_backend.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/versions/ussuri/expand/f4b9654dd400000644000175000017500000000535000000000000033131 0ustar00coreycorey00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection """ovn backend Revision ID: f4b9654dd40c Revises: 86274d77933e Create Date: 2019-11-25 13:09:31.367837 """ # revision identifiers, used by Alembic. revision = 'f4b9654dd40c' down_revision = '86274d77933e' OVN_REVISION_NUMBERS = 'ovn_revision_numbers' OVN_HASH_RING = 'ovn_hash_ring' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) table_names = inspector.get_table_names() if OVN_REVISION_NUMBERS in table_names and OVN_HASH_RING in table_names: op.alter_column(OVN_REVISION_NUMBERS, 'revision_number', nullable=False, server_default='0', existing_type=sa.BIGINT(), existing_nullable=False) return op.create_table( OVN_REVISION_NUMBERS, sa.Column('standard_attr_id', sa.BigInteger, nullable=True), sa.Column('resource_uuid', sa.String(36), nullable=False, index=True), sa.Column('resource_type', sa.String(36), nullable=False, index=True), sa.Column('revision_number', sa.BigInteger, nullable=False, server_default='0'), sa.Column('created_at', sa.DateTime, nullable=False, default=sa.func.now()), sa.Column('updated_at', sa.TIMESTAMP, default=sa.func.now(), onupdate=sa.func.now(), nullable=True), sa.ForeignKeyConstraint( ['standard_attr_id'], ['standardattributes.id'], ondelete='SET NULL'), sa.PrimaryKeyConstraint('resource_uuid', 'resource_type') ) op.create_table( OVN_HASH_RING, sa.Column('node_uuid', sa.String(36), nullable=False, index=True), sa.Column('group_name', sa.String(length=256), nullable=False, index=True), sa.Column('hostname', sa.String(length=256), nullable=False), sa.Column('created_at', sa.DateTime, nullable=False, default=sa.func.now()), sa.Column('updated_at', sa.DateTime, nullable=False, default=sa.func.now()), sa.PrimaryKeyConstraint('node_uuid', 'group_name'), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/vmware_init_ops.py0000644000175000017500000002220000000000000030616 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for VMware plugins from alembic import op import sqlalchemy as sa net_binding_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', name='tz_network_bindings_binding_type') l2gw_segmentation_type = sa.Enum('flat', 'vlan', name='networkconnections_segmentation_type') qos_marking = sa.Enum('untrusted', 'trusted', name='qosqueues_qos_marking') def upgrade(): op.create_table( 'tz_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('binding_type', net_binding_type, nullable=False), sa.Column('phy_uuid', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.Integer(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'binding_type', 'phy_uuid', 'vlan_id')) op.create_table( 'multi_provider_networks', sa.Column('network_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'vcns_router_bindings', sa.Column('status', sa.String(length=16), nullable=False), sa.Column('status_description', sa.String(length=255), nullable=True), sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=16), nullable=True), sa.Column('lswitch_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('router_id')) op.create_table( 'networkgateways', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('default', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'networkconnections', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('network_gateway_id', sa.String(length=36), nullable=True), sa.Column('network_id', sa.String(length=36), nullable=True), sa.Column('segmentation_type', l2gw_segmentation_type, nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id'), sa.UniqueConstraint('network_gateway_id', 'segmentation_type', 'segmentation_id')) op.create_table( 'qosqueues', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('default', sa.Boolean(), nullable=True, server_default=sa.sql.false()), sa.Column('min', sa.Integer(), nullable=False), sa.Column('max', sa.Integer(), nullable=True), sa.Column('qos_marking', qos_marking, nullable=True), sa.Column('dscp', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'networkqueuemappings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('queue_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'portqueuemappings', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('queue_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['queue_id'], ['qosqueues.id'], ), sa.PrimaryKeyConstraint('port_id', 'queue_id')) op.create_table( 'maclearningstates', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('mac_learning_enabled', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) op.create_table('neutron_nsx_port_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_port_id', sa.String(length=36), nullable=False), sa.Column('nsx_switch_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'lsn', sa.Column('net_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('lsn_id')) op.create_table( 'lsn_port', sa.Column('lsn_port_id', sa.String(length=36), nullable=False), sa.Column('lsn_id', sa.String(length=36), nullable=False), sa.Column('sub_id', sa.String(length=36), nullable=False, unique=True), sa.Column('mac_addr', sa.String(length=32), nullable=False, unique=True), sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('lsn_port_id')) op.create_table( 'neutron_nsx_network_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['networks.id'], ondelete='CASCADE'), # There might be multiple switches for a neutron network sa.PrimaryKeyConstraint('neutron_id', 'nsx_id'), ) op.create_table( 'neutron_nsx_router_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['neutron_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id'), ) op.create_table( 'neutron_nsx_security_group_mappings', sa.Column('neutron_id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id', 'nsx_id')) op.create_table( 'networkgatewaydevicereferences', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('network_gateway_id', sa.String(length=36), nullable=True), sa.Column('interface_name', sa.String(length=64), nullable=True), sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name')) op.create_table( 'networkgatewaydevices', sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('nsx_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('connector_type', sa.String(length=10), nullable=True), sa.Column('connector_ip', sa.String(length=64), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.PrimaryKeyConstraint('id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/alembic_migrations/vpn_init_ops.py0000644000175000017500000001425200000000000030130 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for IPSEC VPN service plugin from alembic import op import sqlalchemy as sa auth_algorithms = sa.Enum('sha1', name='vpn_auth_algorithms') encryption_algorithms = sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192', name='vpn_encrypt_algorithms') encapsulation_modes = sa.Enum('tunnel', 'transport', name='ipsec_encapsulations') lifetime_unit_types = sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units') transform_protocols = sa.Enum('esp', 'ah', 'ah-esp', name='ipsec_transform_protocols') pfs_types = sa.Enum('group2', 'group5', 'group14', name='vpn_pfs') phase1_negotiation_modes = sa.Enum('main', name='ike_phase1_mode') ike_versions = sa.Enum('v1', 'v2', name='ike_versions') initiator_types = sa.Enum('bi-directional', 'response-only', name='vpn_initiators') dpd_actions = sa.Enum('hold', 'clear', 'restart', 'disabled', 'restart-by-peer', name='vpn_dpd_actions') def upgrade(): op.create_table( 'ipsecpolicies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('transform_protocol', transform_protocols, nullable=False), sa.Column('auth_algorithm', auth_algorithms, nullable=False), sa.Column('encryption_algorithm', encryption_algorithms, nullable=False), sa.Column('encapsulation_mode', encapsulation_modes, nullable=False), sa.Column('lifetime_units', lifetime_unit_types, nullable=False), sa.Column('lifetime_value', sa.Integer(), nullable=False), sa.Column('pfs', pfs_types, nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'ikepolicies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('auth_algorithm', auth_algorithms, nullable=False), sa.Column('encryption_algorithm', encryption_algorithms, nullable=False), sa.Column('phase1_negotiation_mode', phase1_negotiation_modes, nullable=False), sa.Column('lifetime_units', lifetime_unit_types, nullable=False), sa.Column('lifetime_value', sa.Integer(), nullable=False), sa.Column('ike_version', ike_versions, nullable=False), sa.Column('pfs', pfs_types, nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table( 'vpnservices', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('router_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ), sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipsec_site_connections', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('peer_address', sa.String(length=255), nullable=False), sa.Column('peer_id', sa.String(length=255), nullable=False), sa.Column('route_mode', sa.String(length=8), nullable=False), sa.Column('mtu', sa.Integer(), nullable=False), sa.Column('initiator', initiator_types, nullable=False), sa.Column('auth_mode', sa.String(length=16), nullable=False), sa.Column('psk', sa.String(length=255), nullable=False), sa.Column('dpd_action', dpd_actions, nullable=False), sa.Column('dpd_interval', sa.Integer(), nullable=False), sa.Column('dpd_timeout', sa.Integer(), nullable=False), sa.Column('status', sa.String(length=16), nullable=False), sa.Column('admin_state_up', sa.Boolean(), nullable=False), sa.Column('vpnservice_id', sa.String(length=36), nullable=False), sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False), sa.Column('ikepolicy_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id'], ), sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id'], ), sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id'], ), sa.PrimaryKeyConstraint('id')) op.create_table( 'ipsecpeercidrs', sa.Column('cidr', sa.String(length=32), nullable=False), sa.Column('ipsec_site_connection_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['ipsec_site_connection_id'], ['ipsec_site_connections.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/autogen.py0000644000175000017500000000756200000000000023241 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic.operations import ops from alembic.util import Dispatcher from alembic.util import rev_id as new_rev_id from neutron._i18n import _ from neutron.db.migration import cli _ec_dispatcher = Dispatcher() def process_revision_directives(context, revision, directives): directives[:] = [ directive for directive in _assign_directives(context, directives) ] def _assign_directives(context, directives, phase=None): for directive in directives: decider = _ec_dispatcher.dispatch(directive) if phase is None: phases = cli.MIGRATION_BRANCHES else: phases = (phase,) for phase in phases: decided = decider(context, directive, phase) if decided: yield decided @_ec_dispatcher.dispatch_for(ops.MigrationScript) def _migration_script_ops(context, directive, phase): """Generate a new ops.MigrationScript() for a given phase. E.g. given an ops.MigrationScript() directive from a vanilla autogenerate and an expand/contract phase name, produce a new ops.MigrationScript() which contains only those sub-directives appropriate to "expand" or "contract". Also ensure that the branch directory exists and that the correct branch labels/depends_on/head revision are set up. """ version_path = cli._get_version_branch_path( context.config, release=cli.CURRENT_RELEASE, branch=phase) autogen_kwargs = {} cli._check_bootstrap_new_branch(phase, version_path, autogen_kwargs) op = ops.MigrationScript( new_rev_id(), ops.UpgradeOps(ops=[ d for d in _assign_directives( context, directive.upgrade_ops.ops, phase) ]), ops.DowngradeOps(ops=[]), message=directive.message, **autogen_kwargs ) if not op.upgrade_ops.is_empty(): return op @_ec_dispatcher.dispatch_for(ops.AddConstraintOp) @_ec_dispatcher.dispatch_for(ops.CreateIndexOp) @_ec_dispatcher.dispatch_for(ops.CreateTableOp) @_ec_dispatcher.dispatch_for(ops.AddColumnOp) def _expands(context, directive, phase): if phase == 'expand': return directive else: return None @_ec_dispatcher.dispatch_for(ops.DropConstraintOp) @_ec_dispatcher.dispatch_for(ops.DropIndexOp) @_ec_dispatcher.dispatch_for(ops.DropTableOp) @_ec_dispatcher.dispatch_for(ops.DropColumnOp) def _contracts(context, directive, phase): if phase == 'contract': return directive else: return None @_ec_dispatcher.dispatch_for(ops.AlterColumnOp) def _alter_column(context, directive, phase): is_expand = phase == 'expand' if is_expand and ( directive.modify_nullable is True ): return directive elif not is_expand and ( directive.modify_nullable is False ): return directive else: raise NotImplementedError( _("Don't know if operation is an expand or " "contract at the moment: %s") % directive) @_ec_dispatcher.dispatch_for(ops.ModifyTableOps) def _modify_table_ops(context, directive, phase): op = ops.ModifyTableOps( directive.table_name, ops=[ d for d in _assign_directives(context, directive.ops, phase) ], schema=directive.schema) if not op.is_empty(): return op ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/cli.py0000644000175000017500000005576100000000000022352 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from logging import config as logging_config import os from alembic import command as alembic_command from alembic import config as alembic_config from alembic import environment from alembic import migration as alembic_migration from alembic import script as alembic_script from alembic import util as alembic_util from oslo_config import cfg from oslo_utils import fileutils from oslo_utils import importutils import six from neutron._i18n import _ from neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration.connection import DBConnection HEAD_FILENAME = 'HEAD' HEADS_FILENAME = 'HEADS' CONTRACT_HEAD_FILENAME = 'CONTRACT_HEAD' EXPAND_HEAD_FILENAME = 'EXPAND_HEAD' CURRENT_RELEASE = migration.USSURI RELEASES = ( migration.LIBERTY, migration.MITAKA, migration.NEWTON, migration.OCATA, migration.PIKE, migration.QUEENS, migration.ROCKY, migration.STEIN, migration.TRAIN, migration.USSURI, ) EXPAND_BRANCH = 'expand' CONTRACT_BRANCH = 'contract' MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH) neutron_alembic_ini = os.path.join(os.path.dirname(__file__), 'alembic.ini') CONF = cfg.ConfigOpts() migration_cli.register_db_cli_opts(CONF) log_error = alembic_util.err log_warning = alembic_util.warn log_info = alembic_util.msg def do_alembic_command(config, cmd, revision=None, desc=None, **kwargs): args = [] if revision: args.append(revision) project = config.get_main_option('neutron_project') if desc: log_info(_('Running %(cmd)s (%(desc)s) for %(project)s ...') % {'cmd': cmd, 'desc': desc, 'project': project}) else: log_info(_('Running %(cmd)s for %(project)s ...') % {'cmd': cmd, 'project': project}) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: log_error(six.text_type(e)) log_info(_('OK')) def _get_alembic_entrypoint(project): if project not in migration_cli.migration_entrypoints: log_error(_('Sub-project %s not installed.') % project) return migration_cli.migration_entrypoints[project] def do_generic_show(config, cmd): kwargs = {'verbose': CONF.command.verbose} do_alembic_command(config, cmd, **kwargs) def do_check_migration(config, cmd): do_alembic_command(config, 'branches') validate_revisions(config) validate_head_files(config) def add_alembic_subparser(sub, cmd): return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__) def add_branch_options(parser): group = parser.add_mutually_exclusive_group() group.add_argument('--expand', action='store_true') group.add_argument('--contract', action='store_true') return group def _find_milestone_revisions(config, milestone, branch=None): """Return the revision(s) for a given milestone.""" script = alembic_script.ScriptDirectory.from_config(config) return [ (m.revision, label) for m in _get_revisions(script) for label in (m.branch_labels or [None]) if milestone in getattr(m.module, 'neutron_milestone', []) and (branch is None or branch in m.branch_labels) ] def do_upgrade(config, cmd): branch = None if ((CONF.command.revision or CONF.command.delta) and (CONF.command.expand or CONF.command.contract)): raise SystemExit(_( 'Phase upgrade options do not accept revision specification')) if CONF.command.expand: branch = EXPAND_BRANCH revision = _get_branch_head(EXPAND_BRANCH) elif CONF.command.contract: branch = CONTRACT_BRANCH revision = _get_branch_head(CONTRACT_BRANCH) elif not CONF.command.revision and not CONF.command.delta: raise SystemExit(_('You must provide a revision or relative delta')) else: revision = CONF.command.revision or '' if '-' in revision: raise SystemExit(_('Negative relative revision (downgrade) not ' 'supported')) delta = CONF.command.delta if delta: if '+' in revision: raise SystemExit(_('Use either --delta or relative revision, ' 'not both')) if delta < 0: raise SystemExit(_('Negative delta (downgrade) not supported')) revision = '%s+%d' % (revision, delta) # leave branchless 'head' revision request backward compatible by # applying all heads in all available branches. if revision == 'head': revision = 'heads' if revision in migration.NEUTRON_MILESTONES: expand_revisions = _find_milestone_revisions(config, revision, EXPAND_BRANCH) contract_revisions = _find_milestone_revisions(config, revision, CONTRACT_BRANCH) # Expand revisions must be run before contract revisions revisions = expand_revisions + contract_revisions else: revisions = [(revision, branch)] for revision, branch in revisions: if not CONF.command.sql: run_sanity_checks(config, revision) do_alembic_command(config, cmd, revision=revision, desc=branch, sql=CONF.command.sql) def no_downgrade(config, cmd): raise SystemExit(_("Downgrade no longer supported")) def do_stamp(config, cmd): do_alembic_command(config, cmd, revision=CONF.command.revision, sql=CONF.command.sql) def _get_branch_head(branch): '''Get the latest @head specification for a branch.''' return '%s@head' % branch def _check_bootstrap_new_branch(branch, version_path, addn_kwargs): addn_kwargs['version_path'] = version_path addn_kwargs['head'] = _get_branch_head(branch) if not os.path.exists(version_path): # Bootstrap initial directory structure fileutils.ensure_tree(version_path, mode=0o755) def do_revision(config, cmd): kwargs = { 'message': CONF.command.message, 'autogenerate': CONF.command.autogenerate, 'sql': CONF.command.sql, } branches = [] if CONF.command.expand: kwargs['head'] = 'expand@head' branches.append(EXPAND_BRANCH) elif CONF.command.contract: kwargs['head'] = 'contract@head' branches.append(CONTRACT_BRANCH) else: branches = MIGRATION_BRANCHES if not CONF.command.autogenerate: for branch in branches: args = copy.copy(kwargs) version_path = _get_version_branch_path( config, release=CURRENT_RELEASE, branch=branch) _check_bootstrap_new_branch(branch, version_path, args) do_alembic_command(config, cmd, **args) else: # autogeneration code will take care of enforcing proper directories do_alembic_command(config, cmd, **kwargs) update_head_files(config) def _get_release_labels(labels): result = set() for label in labels: # release labels were introduced Liberty for a short time and dropped # in that same release cycle result.add('%s_%s' % (migration.LIBERTY, label)) return result def _compare_labels(revision, expected_labels): # validate that the script has expected labels only bad_labels = revision.branch_labels - expected_labels if bad_labels: # NOTE(ihrachyshka): this hack is temporary to accommodate those # projects that already initialized their branches with liberty_* # labels. Let's notify them about the deprecation for now and drop it # later. bad_labels_with_release = (revision.branch_labels - _get_release_labels(expected_labels)) if not bad_labels_with_release: log_warning( _('Release aware branch labels (%s) are deprecated. ' 'Please switch to expand@ and contract@ ' 'labels.') % bad_labels) return script_name = os.path.basename(revision.path) log_error( _('Unexpected label for script %(script_name)s: %(labels)s') % {'script_name': script_name, 'labels': bad_labels} ) def _validate_single_revision_labels(script_dir, revision, label=None): expected_labels = set() if label is not None: expected_labels.add(label) _compare_labels(revision, expected_labels) # if it's not the root element of the branch, expect the parent of the # script to have the same label if revision.down_revision is not None: down_revision = script_dir.get_revision(revision.down_revision) _compare_labels(down_revision, expected_labels) def _validate_revision(script_dir, revision): for branch in MIGRATION_BRANCHES: if branch in revision.path: _validate_single_revision_labels( script_dir, revision, label=branch) return # validate script from branchless part of migration rules _validate_single_revision_labels(script_dir, revision) def validate_revisions(config): script_dir = alembic_script.ScriptDirectory.from_config(config) revisions = _get_revisions(script_dir) for revision in revisions: _validate_revision(script_dir, revision) branchpoints = _get_branch_points(script_dir) if len(branchpoints) > 1: branchpoints = ', '.join(p.revision for p in branchpoints) log_error( _('Unexpected number of alembic branch points: %(branchpoints)s') % {'branchpoints': branchpoints} ) def _get_revisions(script): return list(script.walk_revisions(base='base', head='heads')) def _get_branch_points(script): branchpoints = [] for revision in _get_revisions(script): if revision.is_branch_point: branchpoints.append(revision) return branchpoints def _get_heads_map(config): script = alembic_script.ScriptDirectory.from_config(config) heads = script.get_heads() head_map = {} for head in heads: if CONTRACT_BRANCH in script.get_revision(head).branch_labels: head_map[CONTRACT_BRANCH] = head else: head_map[EXPAND_BRANCH] = head return head_map def _check_head(branch_name, head_file, head): try: with open(head_file) as file_: observed_head = file_.read().strip() except IOError: pass else: if observed_head != head: log_error( _('%(branch)s HEAD file does not match migration timeline ' 'head, expected: %(head)s') % {'branch': branch_name.title(), 'head': head}) def validate_head_files(config): '''Check that HEAD files contain the latest head for the branch.''' contract_head = _get_contract_head_file_path(config) expand_head = _get_expand_head_file_path(config) if not os.path.exists(contract_head) or not os.path.exists(expand_head): log_warning(_("Repository does not contain HEAD files for " "contract and expand branches.")) return head_map = _get_heads_map(config) _check_head(CONTRACT_BRANCH, contract_head, head_map[CONTRACT_BRANCH]) _check_head(EXPAND_BRANCH, expand_head, head_map[EXPAND_BRANCH]) def update_head_files(config): '''Update HEAD files with the latest branch heads.''' head_map = _get_heads_map(config) contract_head = _get_contract_head_file_path(config) expand_head = _get_expand_head_file_path(config) with open(contract_head, 'w+') as f: f.write(head_map[CONTRACT_BRANCH] + '\n') with open(expand_head, 'w+') as f: f.write(head_map[EXPAND_BRANCH] + '\n') old_head_file = _get_head_file_path(config) old_heads_file = _get_heads_file_path(config) for file_ in (old_head_file, old_heads_file): fileutils.delete_if_exists(file_) def _get_current_database_heads(config): with DBConnection(config.neutron_config.database.connection) as conn: opts = { 'version_table': get_alembic_version_table(config) } context = alembic_migration.MigrationContext.configure( conn, opts=opts) return context.get_current_heads() def has_offline_migrations(config, cmd): heads_map = _get_heads_map(config) if heads_map[CONTRACT_BRANCH] not in _get_current_database_heads(config): # If there is at least one contract revision not applied to database, # it means we should shut down all neutron-server instances before # proceeding with upgrade. project = config.get_main_option('neutron_project') log_info(_('Need to apply migrations from %(project)s ' 'contract branch. This will require all Neutron ' 'server instances to be shutdown before ' 'proceeding with the upgrade.') % {"project": project}) return True return False def add_command_parsers(subparsers): for name in ['current', 'history', 'branches', 'heads']: parser = add_alembic_subparser(subparsers, name) parser.set_defaults(func=do_generic_show) parser.add_argument('--verbose', action='store_true', help='Display more verbose output for the ' 'specified command') help_text = (getattr(alembic_command, 'branches').__doc__ + ' and validate head file') parser = subparsers.add_parser('check_migration', help=help_text) parser.set_defaults(func=do_check_migration) parser = add_alembic_subparser(subparsers, 'upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.add_argument('--mysql-engine', default='', help='Change MySQL storage engine of current ' 'existing tables') add_branch_options(parser) parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser('downgrade', help="(No longer supported)") parser.add_argument('None', nargs='?', help="Downgrade not supported") parser.set_defaults(func=no_downgrade) parser = add_alembic_subparser(subparsers, 'stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = add_alembic_subparser(subparsers, 'revision') parser.add_argument('-m', '--message') parser.add_argument('--sql', action='store_true') group = add_branch_options(parser) group.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=do_revision) parser = subparsers.add_parser( 'has_offline_migrations', help='Determine whether there are pending migration scripts that ' 'require full shutdown for all services that directly access ' 'database.') parser.set_defaults(func=has_offline_migrations) command_opt = cfg.SubCommandOpt('command', title='Command', help=_('Available commands'), handler=add_command_parsers) CONF.register_cli_opt(command_opt) def _get_project_base(config): '''Return the base python namespace name for a project.''' script_location = config.get_main_option('script_location') return script_location.split(':')[0].split('.')[0] def _get_package_root_dir(config): root_module = importutils.try_import(_get_project_base(config)) if not root_module: project = config.get_main_option('neutron_project') log_error(_("Failed to locate source for %s.") % project) # The root_module.__file__ property is a path like # '/opt/stack/networking-foo/networking_foo/__init__.py' # We return just # '/opt/stack/networking-foo' return os.path.dirname(os.path.dirname(root_module.__file__)) def _get_root_versions_dir(config): '''Return root directory that contains all migration rules.''' root_dir = _get_package_root_dir(config) script_location = config.get_main_option('script_location') # Script location is something like: # 'project_base.db.migration:alembic_migrations' # Convert it to: # 'project_base/db/migration/alembic_migrations/versions' part1, part2 = script_location.split(':') parts = part1.split('.') + part2.split('.') + ['versions'] # Return the absolute path to the versions dir return os.path.join(root_dir, *parts) def _get_head_file_path(config): '''Return the path of the file that contains single head.''' return os.path.join( _get_root_versions_dir(config), HEAD_FILENAME) def _get_heads_file_path(config): '''Get heads file path Return the path of the file that was once used to maintain the list of latest heads. ''' return os.path.join( _get_root_versions_dir(config), HEADS_FILENAME) def _get_contract_head_file_path(config): '''Return the path of the file that is used to maintain contract head''' return os.path.join( _get_root_versions_dir(config), CONTRACT_HEAD_FILENAME) def _get_expand_head_file_path(config): '''Return the path of the file that is used to maintain expand head''' return os.path.join( _get_root_versions_dir(config), EXPAND_HEAD_FILENAME) def _get_version_branch_path(config, release=None, branch=None): version_path = _get_root_versions_dir(config) if branch and release: return os.path.join(version_path, release, branch) return version_path def _set_version_locations(config): '''Make alembic see all revisions in all migration branches.''' split_branches = False version_paths = [_get_version_branch_path(config)] for release in RELEASES: for branch in MIGRATION_BRANCHES: version_path = _get_version_branch_path(config, release, branch) if split_branches or os.path.exists(version_path): split_branches = True version_paths.append(version_path) config.set_main_option('version_locations', ' '.join(version_paths)) def _get_installed_entrypoint(subproject): '''Get the entrypoint for the subproject, which must be installed.''' if subproject not in migration_cli.migration_entrypoints: log_error(_('Package %s not installed') % subproject) return migration_cli.migration_entrypoints[subproject] def _get_subproject_script_location(subproject): '''Get the script location for the installed subproject.''' entrypoint = _get_installed_entrypoint(subproject) return ':'.join([entrypoint.module_name, entrypoint.attrs[0]]) def _get_subproject_base(subproject): '''Get the import base name for the installed subproject.''' entrypoint = _get_installed_entrypoint(subproject) return entrypoint.module_name.split('.')[0] def get_alembic_version_table(config): script_dir = alembic_script.ScriptDirectory.from_config(config) alembic_version_table = [None] def alembic_version_table_from_env(rev, context): alembic_version_table[0] = context.version_table return [] with environment.EnvironmentContext(config, script_dir, fn=alembic_version_table_from_env): script_dir.run_env() return alembic_version_table[0] def get_alembic_configs(): '''Return a list of alembic configs, one per project. ''' # Get the script locations for the specified or installed projects. # Which projects to get script locations for is determined by the CLI # options as follows: # --subproject P # only subproject P (where P can be neutron) # (none specified) # neutron and all installed subprojects script_locations = {} if CONF.subproject: script_location = _get_subproject_script_location(CONF.subproject) script_locations[CONF.subproject] = script_location else: for subproject in migration_cli.migration_entrypoints: script_locations[subproject] = _get_subproject_script_location( subproject) # Return a list of alembic configs from the projects in the # script_locations dict. If neutron is in the list it is first. configs = [] project_seq = sorted(script_locations.keys()) # Core neutron must be the first project if there is more than one if len(project_seq) > 1 and 'neutron' in project_seq: project_seq.insert(0, project_seq.pop(project_seq.index('neutron'))) for project in project_seq: config = alembic_config.Config(neutron_alembic_ini) config.set_main_option('neutron_project', project) script_location = script_locations[project] config.set_main_option('script_location', script_location) _set_version_locations(config) config.neutron_config = CONF configs.append(config) return configs def get_neutron_config(): # Neutron's alembic config is always the first one return get_alembic_configs()[0] def run_sanity_checks(config, revision): script_dir = alembic_script.ScriptDirectory.from_config(config) def check_sanity(rev, context): # TODO(ihrachyshka): here we use internal API for alembic; we may need # alembic to expose implicit_base= argument into public # iterate_revisions() call for script in script_dir.revision_map.iterate_revisions( revision, rev, implicit_base=True): if hasattr(script.module, 'check_sanity'): script.module.check_sanity(context.connection) return [] with environment.EnvironmentContext(config, script_dir, fn=check_sanity, starting_rev=None, destination_rev=revision): script_dir.run_env() def get_engine_config(): return [obj for obj in migration_cli.DB_OPTS if obj.name == 'engine'] def main(): # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(neutron_alembic_ini) CONF(project='neutron') return_val = False for config in get_alembic_configs(): # TODO(gongysh) enable logging return_val |= bool(CONF.command.func(config, CONF.command.name)) if CONF.command.name == 'has_offline_migrations' and not return_val: log_info(_('No offline migrations pending.')) return return_val ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/connection.py0000644000175000017500000000273600000000000023734 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import session class DBConnection(object): """Context manager class which handles a DB connection. An existing connection can be passed as a parameter. When nested block is complete the new connection will be closed. This class is not thread safe. """ def __init__(self, connection_url, connection=None): self.connection = connection self.connection_url = connection_url self.new_engine = False def __enter__(self): self.new_engine = self.connection is None if self.new_engine: self.engine = session.create_engine(self.connection_url) self.connection = self.engine.connect() return self.connection def __exit__(self, type, value, traceback): if self.new_engine: try: self.connection.close() finally: self.engine.dispose() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2830443 neutron-16.0.0.0b2.dev214/neutron/db/migration/models/0000755000175000017500000000000000000000000022476 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/models/__init__.py0000644000175000017500000000000000000000000024575 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/migration/models/head.py0000644000175000017500000000346600000000000023762 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The module provides all database models at current HEAD. Its purpose is to create comparable metadata with current database schema. Based on this comparison database can be healed with healing migration. """ import os.path from neutron_lib.db import model_base from neutron.common import utils from neutron.db import agentschedulers_db # noqa from neutron.db.extra_dhcp_opt import models as edo_models # noqa from neutron.db import l3_dvrscheduler_db # noqa from neutron.db import l3_gwmode_db # noqa from neutron.db import models from neutron.db import models_v2 # noqa from neutron.db.port_security import models as ps_models # noqa from neutron.db.qos import models as qos_models # noqa from neutron.db.quota import models as quota_models # noqa from neutron.db import rbac_db_models # noqa from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa from neutron.plugins.ml2 import models as ml2_models # noqa from neutron.services.auto_allocate import models as aa_models # noqa from neutron.services.trunk import models as trunk_models # noqa utils.import_modules_recursively(os.path.dirname(models.__file__)) def get_metadata(): return model_base.BASEV2.metadata ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/models/0000755000175000017500000000000000000000000020505 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/README0000644000175000017500000000052200000000000021364 0ustar00coreycorey00000000000000This directory is designed to contain all SQLAlchemy models shipped with core Neutron. * The expected directory structure is flat, except for the ML2 plugins. All ML2 plugin models should fall under the plugins subdirectory (i.e. plugins/ml2/gre_allocation). * Module names should use singular forms for nouns (port.py, not ports.py). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/__init__.py0000644000175000017500000000000000000000000022604 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/address_scope.py0000644000175000017500000000326500000000000023703 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import sql from neutron.db import rbac_db_models class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a neutron address scope.""" __tablename__ = "address_scopes" name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=False) # TODO(imalinovskiy): drop this field when contract migrations will be # allowed again # NOTE(imalinovskiy): this field cannot be removed from model due to # functional test test_models_sync, trailing underscore is required to # prevent conflicts with RBAC code shared_ = sa.Column("shared", sa.Boolean, nullable=False, server_default=sql.false()) ip_version = sa.Column(sa.Integer(), nullable=False) rbac_entries = sa.orm.relationship(rbac_db_models.AddressScopeRBAC, backref='address_scopes', lazy='subquery', cascade='all, delete, delete-orphan') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/agent.py0000644000175000017500000000536400000000000022165 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import sql from neutron.agent.common import utils class Agent(model_base.BASEV2, model_base.HasId): """Represents agents running in neutron deployments.""" __table_args__ = ( sa.UniqueConstraint('agent_type', 'host', name='uniq_agents0agent_type0host'), model_base.BASEV2.__table_args__ ) # L3 agent, DHCP agent, OVS agent, LinuxBridge agent_type = sa.Column(sa.String(255), nullable=False) binary = sa.Column(sa.String(255), nullable=False) # TOPIC is a fanout exchange topic topic = sa.Column(sa.String(255), nullable=False) # TOPIC.host is a target topic host = sa.Column(sa.String(255), nullable=False) availability_zone = sa.Column(sa.String(255)) admin_state_up = sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False) # the time when first report came from agents created_at = sa.Column(sa.DateTime, nullable=False) # the time when first report came after agents start started_at = sa.Column(sa.DateTime, nullable=False) # updated when agents report heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False) # description is note for admin user description = sa.Column(sa.String(db_const.DESCRIPTION_FIELD_SIZE)) # configurations: a json dict string, I think 4095 is enough configurations = sa.Column(sa.String(4095), nullable=False) # resource_versions: json dict, 8191 allows for ~256 resource versions # assuming ~32byte length "'name': 'ver'," # the whole row limit is 65535 bytes in mysql resource_versions = sa.Column(sa.String(8191)) # load - number of resources hosted by the agent load = sa.Column(sa.Integer, server_default='0', nullable=False) # resources_synced: nullable boolean, success of last sync to Placement resources_synced = sa.Column( sa.Boolean, default=None, server_default=None, nullable=True) @property def is_active(self): return not utils.is_agent_down(self.heartbeat_timestamp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/allowed_address_pair.py0000644000175000017500000000237400000000000025234 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class AllowedAddressPair(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("allowed_address_pairs", lazy="subquery", cascade="delete")) revises_on_change = ('port', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/conntrack_helper.py0000644000175000017500000000336200000000000024404 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import l3 from neutron_lib.db import constants as db_const class ConntrackHelper(model_base.BASEV2, model_base.HasId): __tablename__ = 'conntrack_helpers' router_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('routers.id', ondelete="CASCADE"), nullable=False) protocol = sa.Column(sa.String(40), nullable=False) port = sa.Column(sa.Integer, nullable=False) helper = sa.Column(sa.String(64), nullable=False) __table_args__ = ( sa.UniqueConstraint( router_id, protocol, port, helper, name='uniq_conntrack_helpers0router_id0protocol0port0helper'), ) router = orm.relationship(l3.Router, load_on_pending=True, backref=orm.backref("conntrack_helpers", lazy='subquery', uselist=True, cascade='delete')) revises_on_change = ('router', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/data_plane_status.py0000644000175000017500000000247700000000000024564 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class PortDataPlaneStatus(model_base.BASEV2): __tablename__ = 'portdataplanestatuses' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True) data_plane_status = sa.Column(sa.String(16), nullable=True) port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("data_plane_status", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('port', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/dns.py0000644000175000017500000001235300000000000021647 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db.models import l3 as l3_models from neutron.db import models_v2 class NetworkDNSDomain(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True, index=True) dns_domain = sa.Column(sa.String(255), nullable=False) # Add a relationship to the Network model in order to instruct # SQLAlchemy to eagerly load this association network = orm.relationship(models_v2.Network, load_on_pending=True, backref=orm.backref("dns_domain", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('network', ) class FloatingIPDNS(model_base.BASEV2): __tablename__ = 'floatingipdnses' floatingip_id = sa.Column(sa.String(36), sa.ForeignKey('floatingips.id', ondelete="CASCADE"), primary_key=True, index=True) dns_name = sa.Column(sa.String(255), nullable=False) dns_domain = sa.Column(sa.String(255), nullable=False) published_dns_name = sa.Column(sa.String(255), nullable=False) published_dns_domain = sa.Column(sa.String(255), nullable=False) # Add a relationship to the FloatingIP model in order to instruct # SQLAlchemy to eagerly load this association floatingip = orm.relationship(l3_models.FloatingIP, load_on_pending=True, backref=orm.backref("dns", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('floatingip', ) class PortDNS(model_base.BASEV2): __tablename__ = 'portdnses' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True) current_dns_name = sa.Column(sa.String(255), nullable=False) current_dns_domain = sa.Column(sa.String(255), nullable=False) previous_dns_name = sa.Column(sa.String(255), nullable=False) previous_dns_domain = sa.Column(sa.String(255), nullable=False) dns_name = sa.Column(sa.String(255), nullable=False) dns_domain = sa.Column(sa.String(constants.FQDN_FIELD_SIZE), nullable=False, server_default='') # Add a relationship to the Port model in order to instruct # SQLAlchemy to eagerly load this association port = orm.relationship(models_v2.Port, load_on_pending=True, backref=orm.backref("dns", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('port', ) class SubnetDNSPublishFixedIP(model_base.BASEV2): __tablename__ = "subnet_dns_publish_fixed_ips" subnet_id = sa.Column(sa.String(constants.UUID_FIELD_SIZE), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True, index=True) dns_publish_fixed_ip = sa.Column(sa.Boolean(), nullable=False, server_default=sql.false()) # Add a relationship to the Subnet model in order to instruct # SQLAlchemy to eagerly load this association subnet = orm.relationship(models_v2.Subnet, load_on_pending=True, backref=orm.backref("dns_publish_fixed_ip", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('subnet', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/dvr.py0000644000175000017500000000326600000000000021661 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa class DistributedVirtualRouterMacAddress(model_base.BASEV2): """Represents a v2 neutron distributed virtual router mac address.""" __tablename__ = 'dvr_host_macs' host = sa.Column(sa.String(255), primary_key=True, nullable=False) mac_address = sa.Column(sa.String(32), nullable=False, unique=True) class DvrFipGatewayPortAgentBinding(model_base.BASEV2): """Represents a binding of agent's FIP gateway port and L3 agent. Each L3 DVR agent can only have one FIP gateway port per network. This table represents this constraint. """ __tablename__ = 'dvr_fip_gateway_port_network' network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), primary_key=True) agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete="CASCADE"), primary_key=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/external_net.py0000644000175000017500000000275700000000000023562 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db import models_v2 class ExternalNetwork(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) # introduced by auto-allocated-topology extension is_default = sa.Column(sa.Boolean(), nullable=False, server_default=sql.false()) # Add a relationship to the Network model in order to instruct # SQLAlchemy to eagerly load this association network = orm.relationship( models_v2.Network, load_on_pending=True, backref=orm.backref("external", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('network', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/flavor.py0000644000175000017500000000451400000000000022354 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm class Flavor(model_base.BASEV2, model_base.HasId): name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.LONG_DESCRIPTION_FIELD_SIZE)) enabled = sa.Column(sa.Boolean, nullable=False, default=True, server_default=sa.sql.true()) # Make it True for multi-type flavors service_type = sa.Column(sa.String(36), nullable=True) class ServiceProfile(model_base.BASEV2, model_base.HasId): description = sa.Column(sa.String(db_const.LONG_DESCRIPTION_FIELD_SIZE)) driver = sa.Column(sa.String(1024), nullable=False) enabled = sa.Column(sa.Boolean, nullable=False, default=True, server_default=sa.sql.true()) metainfo = sa.Column(sa.String(4096)) class FlavorServiceProfileBinding(model_base.BASEV2): flavor_id = sa.Column(sa.String(36), sa.ForeignKey("flavors.id", ondelete="CASCADE"), nullable=False, primary_key=True) flavor = orm.relationship(Flavor, backref=orm.backref( "service_profiles", lazy='subquery', cascade="all, delete-orphan")) service_profile_id = sa.Column(sa.String(36), sa.ForeignKey("serviceprofiles.id", ondelete="CASCADE"), nullable=False, primary_key=True) service_profile = orm.relationship(ServiceProfile, backref="flavors") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/l3.py0000644000175000017500000001305100000000000021375 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import l3agent as rb_model from neutron.db import models_v2 from neutron.db import standard_attr class RouterPort(model_base.BASEV2): router_id = sa.Column( sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) port_id = sa.Column( sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, unique=True) revises_on_change = ('router', ) # The port_type attribute is redundant as the port table already specifies # it in DEVICE_OWNER.However, this redundancy enables more efficient # queries on router ports, and also prevents potential error-prone # conditions which might originate from users altering the DEVICE_OWNER # property of router ports. port_type = sa.Column(sa.String(db_const.DEVICE_OWNER_FIELD_SIZE)) port = orm.relationship( models_v2.Port, backref=orm.backref('routerport', uselist=False, cascade="all,delete"), lazy='joined') class Router(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron router.""" name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) gw_port = orm.relationship(models_v2.Port, lazy='joined') flavor_id = sa.Column(sa.String(36), sa.ForeignKey("flavors.id"), nullable=True) attached_ports = orm.relationship( RouterPort, backref=orm.backref('router', load_on_pending=True), lazy='subquery') l3_agents = orm.relationship( 'Agent', lazy='subquery', viewonly=True, secondary=rb_model.RouterL3AgentBinding.__table__) api_collections = [l3_apidef.ROUTERS] collection_resource_map = {l3_apidef.ROUTERS: l3_apidef.ROUTER} tag_support = True class FloatingIP(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a floating IP address. This IP address may or may not be allocated to a tenant, and may or may not be associated with an internal port/ip address/router. """ floating_ip_address = sa.Column(sa.String(64), nullable=False) floating_network_id = sa.Column(sa.String(36), nullable=False) floating_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False) # The ORM-level "delete" cascade relationship between port and floating_ip # is required for causing the in-Python event "after_delete" that needs for # proper quota management in case when cascade removal of the floating_ip # happens after removal of the floating_port port = orm.relationship(models_v2.Port, backref=orm.backref('floating_ips', cascade='all,delete-orphan'), foreign_keys='FloatingIP.floating_port_id') fixed_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) fixed_port = orm.relationship(models_v2.Port, foreign_keys='FloatingIP.fixed_port_id', lazy='joined') fixed_ip_address = sa.Column(sa.String(64)) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id')) # Additional attribute for keeping track of the router where the floating # ip was associated in order to be able to ensure consistency even if an # asynchronous backend is unavailable when the floating IP is disassociated last_known_router_id = sa.Column(sa.String(36)) status = sa.Column(sa.String(16)) router = orm.relationship(Router, backref='floating_ips') __table_args__ = ( sa.UniqueConstraint( floating_network_id, fixed_port_id, fixed_ip_address, name=('uniq_floatingips0floatingnetworkid' '0fixedportid0fixedipaddress')), model_base.BASEV2.__table_args__,) api_collections = [l3_apidef.FLOATINGIPS] collection_resource_map = {l3_apidef.FLOATINGIPS: l3_apidef.FLOATINGIP} tag_support = True class RouterRoute(model_base.BASEV2, models_v2.Route): router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) router = orm.relationship(Router, load_on_pending=True, backref=orm.backref("route_list", lazy='subquery', cascade='delete')) revises_on_change = ('router', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/l3_attrs.py0000644000175000017500000000416500000000000022620 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm class RouterExtraAttributes(model_base.BASEV2): """Additional attributes for a Virtual Router.""" # NOTE(armando-migliaccio): this model can be a good place to # add extension attributes to a Router model. Each case needs # to be individually examined, however 'distributed' and other # simple ones fit the pattern well. __tablename__ = "router_extra_attributes" router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) # Whether the router is a legacy (centralized) or a distributed one distributed = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) # Whether the router is to be considered a 'service' router service_router = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) ha = sa.Column(sa.Boolean, default=False, server_default=sa.sql.false(), nullable=False) ha_vr_id = sa.Column(sa.Integer()) # Availability Zone support availability_zone_hints = sa.Column(sa.String(255)) router = orm.relationship( 'Router', load_on_pending=True, backref=orm.backref("extra_attributes", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('router', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/l3agent.py0000644000175000017500000000305200000000000022414 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import agent as agent_model LOWEST_BINDING_INDEX = 1 class RouterL3AgentBinding(model_base.BASEV2): """Represents binding between neutron routers and L3 agents.""" __table_args__ = ( sa.UniqueConstraint( 'router_id', 'binding_index', name='uniq_router_l3_agent_binding0router_id0binding_index0'), model_base.BASEV2.__table_args__ ) router_id = sa.Column(sa.String(36), sa.ForeignKey("routers.id", ondelete='CASCADE'), primary_key=True) l3_agent = orm.relation(agent_model.Agent) l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) binding_index = sa.Column(sa.Integer, nullable=False, server_default=str(LOWEST_BINDING_INDEX)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/l3ha.py0000644000175000017500000000640200000000000021710 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants as n_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import agent as agent_model from neutron.db import models_v2 class L3HARouterAgentPortBinding(model_base.BASEV2): """Represent agent binding state of a HA router port. A HA Router has one HA port per agent on which it is spawned. This binding table stores which port is used for a HA router by a L3 agent. """ __tablename__ = 'ha_router_agent_port_bindings' __table_args__ = ( sa.UniqueConstraint( 'router_id', 'l3_agent_id', name='uniq_ha_router_agent_port_bindings0port_id0l3_agent_id'), model_base.BASEV2.__table_args__ ) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, primary_key=True) port = orm.relationship(models_v2.Port) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='CASCADE'), nullable=False) l3_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE')) agent = orm.relationship(agent_model.Agent) state = sa.Column(sa.Enum(n_const.HA_ROUTER_STATE_ACTIVE, n_const.HA_ROUTER_STATE_STANDBY, n_const.HA_ROUTER_STATE_UNKNOWN, name='l3_ha_states'), default=n_const.HA_ROUTER_STATE_STANDBY, server_default=n_const.HA_ROUTER_STATE_STANDBY) class L3HARouterNetwork(model_base.BASEV2, model_base.HasProjectPrimaryKey): """Host HA network for a tenant. One HA Network is used per tenant, all HA router ports are created on this network. """ __tablename__ = 'ha_router_networks' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False, primary_key=True) network = orm.relationship(models_v2.Network) class L3HARouterVRIdAllocation(model_base.BASEV2): """VRID allocation per HA network. Keep a track of the VRID allocations per HA network. """ __tablename__ = 'ha_router_vrid_allocations' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False, primary_key=True) vr_id = sa.Column(sa.Integer(), nullable=False, primary_key=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/loggingapi.py0000644000175000017500000000267100000000000023205 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from neutron.db import standard_attr class Log(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents neutron logging resource database""" __tablename__ = 'logs' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) resource_type = sa.Column(sa.String(36), nullable=False) resource_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), nullable=True, index=True) event = sa.Column(sa.String(255), nullable=False) target_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), nullable=True, index=True) enabled = sa.Column(sa.Boolean()) api_collections = ['logs'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/metering.py0000644000175000017500000000371100000000000022673 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db.models import l3 as l3_models class MeteringLabelRule(model_base.BASEV2, model_base.HasId): direction = sa.Column(sa.Enum('ingress', 'egress', name='meteringlabels_direction')) remote_ip_prefix = sa.Column(sa.String(64)) metering_label_id = sa.Column(sa.String(36), sa.ForeignKey("meteringlabels.id", ondelete="CASCADE"), nullable=False) excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false()) class MeteringLabel(model_base.BASEV2, model_base.HasId, model_base.HasProject): name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) description = sa.Column(sa.String(db_const.LONG_DESCRIPTION_FIELD_SIZE)) rules = orm.relationship(MeteringLabelRule, backref="label", cascade="delete", lazy="subquery") routers = orm.relationship( l3_models.Router, primaryjoin="MeteringLabel.tenant_id==Router.tenant_id", foreign_keys='MeteringLabel.tenant_id', lazy='subquery', uselist=True) shared = sa.Column(sa.Boolean, default=False, server_default=sql.false()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/network_segment_range.py0000644000175000017500000000624100000000000025451 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network_segment_range as range_apidef from neutron_lib import constants from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from neutron.db import standard_attr class NetworkSegmentRange(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents network segment range data.""" __tablename__ = 'network_segment_ranges' # user-defined network segment range name name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True) # defines whether the network segment range is loaded from host config # files and used as the default range when there is no other available default = sa.Column(sa.Boolean, default=False, nullable=False) # defines whether multiple tenants can use this network segment range shared = sa.Column(sa.Boolean, default=True, nullable=False) # the project_id is the subject that the policy will affect. this may # also be a wildcard '*' to indicate all tenants or it may be a role if # neutron gets better integration with keystone project_id = sa.Column(sa.String(db_const.PROJECT_ID_FIELD_SIZE), nullable=True) # network segment range network type network_type = sa.Column(sa.Enum( constants.TYPE_VLAN, constants.TYPE_VXLAN, constants.TYPE_GRE, constants.TYPE_GENEVE, name='network_segment_range_network_type'), nullable=False) # network segment range physical network, only applicable for VLAN. physical_network = sa.Column(sa.String(64)) # minimum segmentation id value minimum = sa.Column(sa.Integer) # maximum segmentation id value maximum = sa.Column(sa.Integer) api_collections = [range_apidef.COLLECTION_NAME] collection_resource_map = { range_apidef.COLLECTION_NAME: range_apidef.RESOURCE_NAME} tag_support = True def __init__(self, *args, **kwargs): super(NetworkSegmentRange, self).__init__(*args, **kwargs) self.project_id = None if self.shared else kwargs['project_id'] is_vlan = self.network_type == constants.TYPE_VLAN self.physical_network = kwargs['physical_network'] if is_vlan else None def __repr__(self): return "" % ( self.id, self.name, str(self.shared), self.project_id, self.network_type, self.physical_network, self.minimum, self.maximum) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/ovn.py0000644000175000017500000000472200000000000021666 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy.dialects import sqlite class OVNRevisionNumbers(model_base.BASEV2): __tablename__ = 'ovn_revision_numbers' standard_attr_id = sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), sa.ForeignKey('standardattributes.id', ondelete='SET NULL'), nullable=True) resource_uuid = sa.Column(sa.String(36), nullable=False, index=True) resource_type = sa.Column(sa.String(36), nullable=False, index=True) revision_number = sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), server_default='0', default=0, nullable=False) created_at = sa.Column( sa.DateTime().with_variant( sqlite.DATETIME(truncate_microseconds=True), 'sqlite'), default=sa.func.now(), nullable=False) updated_at = sa.Column(sa.TIMESTAMP, default=sa.func.now(), onupdate=sa.func.now(), nullable=True) __table_args__ = ( sa.PrimaryKeyConstraint( resource_uuid, resource_type, name='ovn_revision_numbers0resource_uuid0resource_type'), model_base.BASEV2.__table_args__ ) class OVNHashRing(model_base.BASEV2): __tablename__ = 'ovn_hash_ring' node_uuid = sa.Column(sa.String(36), nullable=False, index=True) group_name = sa.Column(sa.String(256), nullable=False, index=True) hostname = sa.Column(sa.String(256), nullable=False) created_at = sa.Column(sa.DateTime(), default=sa.func.now(), nullable=False) updated_at = sa.Column(sa.DateTime(), default=sa.func.now(), nullable=False) __table_args__ = ( sa.PrimaryKeyConstraint( node_uuid, group_name, name='ovn_hash_ring0node_uuid0group_name'), model_base.BASEV2.__table_args__ ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/0000755000175000017500000000000000000000000022166 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/__init__.py0000644000175000017500000000000000000000000024265 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/0000755000175000017500000000000000000000000022660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/__init__.py0000644000175000017500000000000000000000000024757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/flatallocation.py0000644000175000017500000000200700000000000026225 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa class FlatAllocation(model_base.BASEV2): """Represent persistent allocation state of a physical network. If a record exists for a physical network, then that physical network has been allocated as a flat network. """ __tablename__ = 'ml2_flat_allocations' physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/geneveallocation.py0000644000175000017500000000333400000000000026554 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import sql from neutron_lib.db import model_base class GeneveAllocation(model_base.BASEV2): __tablename__ = 'ml2_geneve_allocations' geneve_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) @classmethod def get_segmentation_id(cls): return cls.geneve_vni @property def segmentation_id(self): return self.geneve_vni @staticmethod def primary_keys(): return {'geneve_vni'} class GeneveEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_geneve_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_geneve_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/gre_allocation_endpoints.py0000644000175000017500000000337000000000000030302 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import sql class GreAllocation(model_base.BASEV2): __tablename__ = 'ml2_gre_allocations' gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) @classmethod def get_segmentation_id(cls): return cls.gre_id @property def segmentation_id(self): return self.gre_id @staticmethod def primary_keys(): return {'gre_id'} class GreEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_gre_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_gre_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/vlanallocation.py0000644000175000017500000000370500000000000026245 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa class VlanAllocation(model_base.BASEV2): """Represent allocation state of a vlan_id on a physical network. If allocated is False, the vlan_id on the physical_network is available for allocation to a tenant network. If allocated is True, the vlan_id on the physical_network is in use, either as a tenant or provider network. When an allocation is released, if the vlan_id for the physical_network is inside the pool described by VlanTypeDriver.network_vlan_ranges, then allocated is set to False. If it is outside the pool, the record is deleted. """ __tablename__ = 'ml2_vlan_allocations' __table_args__ = ( sa.Index('ix_ml2_vlan_allocations_physical_network_allocated', 'physical_network', 'allocated'), model_base.BASEV2.__table_args__,) physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False) @classmethod def get_segmentation_id(cls): return cls.vlan_id @property def segmentation_id(self): return self.vlan_id @staticmethod def primary_keys(): return {'vlan_id', 'physical_network'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/plugins/ml2/vxlanallocation.py0000644000175000017500000000351000000000000026427 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import sql class VxlanAllocation(model_base.BASEV2): __tablename__ = 'ml2_vxlan_allocations' vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, autoincrement=False) allocated = sa.Column(sa.Boolean, nullable=False, default=False, server_default=sql.false(), index=True) @classmethod def get_segmentation_id(cls): return cls.vxlan_vni @property def segmentation_id(self): return self.vxlan_vni @staticmethod def primary_keys(): return {'vxlan_vni'} class VxlanEndpoints(model_base.BASEV2): """Represents tunnel endpoint in RPC mode.""" __tablename__ = 'ml2_vxlan_endpoints' __table_args__ = ( sa.UniqueConstraint('host', name='unique_ml2_vxlan_endpoints0host'), model_base.BASEV2.__table_args__ ) ip_address = sa.Column(sa.String(64), primary_key=True) udp_port = sa.Column(sa.Integer, nullable=False) host = sa.Column(sa.String(255), nullable=True) def __repr__(self): return "" % self.ip_address ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/port_forwarding.py0000644000175000017500000000517600000000000024276 0ustar00coreycorey00000000000000# Copyright 2018 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import l3 from neutron.db import models_v2 from neutron.db import standard_attr from neutron_lib.api.definitions import fip_pf_description as apidef from neutron_lib.db import constants as db_const class PortForwarding(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId): __table_args__ = ( sa.UniqueConstraint('floatingip_id', 'external_port', 'protocol', name='uniq_port_forwardings0floatingip_id0' 'external_port0protocol'), sa.UniqueConstraint('internal_neutron_port_id', 'socket', 'protocol', name='uniq_port_forwardings0' 'internal_neutron_port_id0socket0' 'protocol') ) floatingip_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('floatingips.id', ondelete="CASCADE"), nullable=False) external_port = sa.Column(sa.Integer, nullable=False) internal_neutron_port_id = sa.Column( sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False) protocol = sa.Column(sa.String(40), nullable=False) socket = sa.Column(sa.String(36), nullable=False) port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("port_forwardings", lazy='subquery', uselist=True, cascade='delete') ) floating_ip = orm.relationship( l3.FloatingIP, load_on_pending=True, backref=orm.backref("port_forwardings", lazy='subquery', uselist=True, cascade='delete') ) revises_on_change = ('floating_ip', 'port',) api_collections = [apidef.ALIAS] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/portbinding.py0000644000175000017500000000234700000000000023404 0ustar00coreycorey00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class PortBindingPort(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False) port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("portbinding", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('port', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/provisioning_block.py0000644000175000017500000000222700000000000024762 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from neutron.db import standard_attr class ProvisioningBlock(model_base.BASEV2): # the standard attr id of the thing we want to block standard_attr_id = ( sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"), primary_key=True)) # the entity that wants to block the status change (e.g. L2 Agent) entity = sa.Column(sa.String(255), nullable=False, primary_key=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/securitygroup.py0000644000175000017500000001117000000000000024003 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db import models_v2 from neutron.db import rbac_db_models from neutron.db import standard_attr from neutron.extensions import securitygroup as sg class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron security group.""" name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) stateful = sa.Column(sa.Boolean, default=True, server_default=sql.true(), nullable=False) rbac_entries = sa.orm.relationship(rbac_db_models.SecurityGroupRBAC, backref='security_group', lazy='subquery', cascade='all, delete, delete-orphan') api_collections = [sg.SECURITYGROUPS] collection_resource_map = {sg.SECURITYGROUPS: 'security_group'} tag_support = True class DefaultSecurityGroup(model_base.BASEV2, model_base.HasProjectPrimaryKey): __tablename__ = 'default_security_group' security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) security_group = orm.relationship( SecurityGroup, lazy='joined', backref=orm.backref('default_security_group', cascade='all,delete'), primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id", ) class SecurityGroupPortBinding(model_base.BASEV2): """Represents binding between neutron ports and security profiles.""" port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete='CASCADE'), primary_key=True) security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id"), primary_key=True) revises_on_change = ('ports', ) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load security group bindings ports = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("security_groups", lazy='joined', cascade='delete')) class SecurityGroupRule(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron security group rule.""" security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) remote_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=True) revises_on_change = ('security_group', ) direction = sa.Column(sa.Enum('ingress', 'egress', name='securitygrouprules_direction')) ethertype = sa.Column(sa.String(40)) protocol = sa.Column(sa.String(40)) port_range_min = sa.Column(sa.Integer) port_range_max = sa.Column(sa.Integer) remote_ip_prefix = sa.Column(sa.String(255)) security_group = orm.relationship( SecurityGroup, load_on_pending=True, backref=orm.backref('rules', cascade='all,delete', lazy='dynamic'), primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") source_group = orm.relationship( SecurityGroup, backref=orm.backref('source_rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") api_collections = [sg.SECURITYGROUPRULES] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/segment.py0000644000175000017500000000623300000000000022525 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 from neutron.db import standard_attr from neutron.extensions import segment # Some standalone plugins need a DB table to store provider # network information. Initially there was no such table, # but in Mitaka the ML2 NetworkSegment table was promoted here. class NetworkSegment(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId): """Represent persistent state of a network segment. A network segment is a portion of a neutron network with a specific physical realization. A neutron network can consist of one or more segments. """ network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False) network_type = sa.Column(sa.String(32), nullable=False) physical_network = sa.Column(sa.String(64)) segmentation_id = sa.Column(sa.Integer) is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False, server_default=sa.sql.false()) segment_index = sa.Column(sa.Integer, nullable=False, server_default='0') name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True) network = orm.relationship(models_v2.Network, backref=orm.backref("segments", lazy='subquery', cascade='delete')) api_collections = [segment.SEGMENTS] class SegmentHostMapping(model_base.BASEV2): segment_id = sa.Column(sa.String(36), sa.ForeignKey('networksegments.id', ondelete="CASCADE"), primary_key=True, index=True, nullable=False) host = sa.Column(sa.String(255), primary_key=True, index=True, nullable=False) # Add a relationship to the NetworkSegment model in order to instruct # SQLAlchemy to eagerly load this association network_segment = orm.relationship( NetworkSegment, load_on_pending=True, backref=orm.backref("segment_host_mapping", lazy='subquery', cascade='delete')) revises_on_change = ('network_segment', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/servicetype.py0000644000175000017500000000212300000000000023417 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa class ProviderResourceAssociation(model_base.BASEV2): provider_name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=False, primary_key=True) # should be manually deleted on resource deletion resource_id = sa.Column(sa.String(36), nullable=False, primary_key=True, unique=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/subnet_service_type.py0000644000175000017500000000337600000000000025151 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class SubnetServiceType(model_base.BASEV2): """Subnet Service Types table""" __tablename__ = "subnet_service_types" subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE")) # Service types must be valid device owners, therefore share max length service_type = sa.Column(sa.String( length=db_const.DEVICE_OWNER_FIELD_SIZE)) subnet = orm.relationship(models_v2.Subnet, load_on_pending=True, backref=orm.backref('service_types', lazy='subquery', cascade='all, delete-orphan', uselist=True)) __table_args__ = ( sa.PrimaryKeyConstraint('subnet_id', 'service_type'), model_base.BASEV2.__table_args__ ) revises_on_change = ('subnet', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/tag.py0000644000175000017500000000231000000000000021626 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import standard_attr class Tag(model_base.BASEV2): standard_attr_id = sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"), nullable=False, primary_key=True) tag = sa.Column(sa.String(60), nullable=False, primary_key=True) standard_attr = orm.relationship( 'StandardAttribute', load_on_pending=True, backref=orm.backref('tags', lazy='subquery', viewonly=True)) revises_on_change = ('standard_attr', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models/uplink_status_propagation.py0000644000175000017500000000254200000000000026372 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class PortUplinkStatusPropagation(model_base.BASEV2): __tablename__ = 'portuplinkstatuspropagation' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True, index=True) propagate_uplink_status = sa.Column(sa.Boolean(), nullable=False, server_default=sa.sql.false()) port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("propagate_uplink_status", lazy='joined', uselist=False, cascade='delete')) revises_on_change = ('port', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/models_v2.py0000644000175000017500000003300700000000000021471 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnetpool as subnetpool_def from neutron_lib import constants from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.db.network_dhcp_agent_binding import models as ndab_model from neutron.db import rbac_db_models from neutron.db import standard_attr class IPAllocationPool(model_base.BASEV2, model_base.HasId): """Representation of an allocation pool in a Neutron subnet.""" subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=True) first_ip = sa.Column(sa.String(64), nullable=False) last_ip = sa.Column(sa.String(64), nullable=False) def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IPAllocation(model_base.BASEV2): """Internal representation of allocated IP addresses in a Neutron subnet. """ port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=True) ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), nullable=False, primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=False, primary_key=True) revises_on_change = ('port', ) class Route(object): """mixin of a route.""" destination = sa.Column(sa.String(64), nullable=False, primary_key=True) nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) class SubnetRoute(model_base.BASEV2, Route): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) class Port(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a port on a Neutron v2 network.""" name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), nullable=False) fixed_ips = orm.relationship(IPAllocation, backref=orm.backref('port', load_on_pending=True), lazy='subquery', cascade='all, delete-orphan', order_by=(IPAllocation.ip_address, IPAllocation.subnet_id)) mac_address = sa.Column(sa.String(32), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) status = sa.Column(sa.String(16), nullable=False) device_id = sa.Column(sa.String(db_const.DEVICE_ID_FIELD_SIZE), nullable=False) device_owner = sa.Column(sa.String(db_const.DEVICE_OWNER_FIELD_SIZE), nullable=False) ip_allocation = sa.Column(sa.String(16)) __table_args__ = ( sa.Index( 'ix_ports_network_id_mac_address', 'network_id', 'mac_address'), sa.Index( 'ix_ports_network_id_device_owner', 'network_id', 'device_owner'), sa.Index('ix_ports_device_id', 'device_id'), sa.UniqueConstraint( network_id, mac_address, name='uniq_ports0network_id0mac_address'), model_base.BASEV2.__table_args__ ) api_collections = [port_def.COLLECTION_NAME] collection_resource_map = {port_def.COLLECTION_NAME: port_def.RESOURCE_NAME} tag_support = True def __init__(self, id=None, tenant_id=None, project_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None, **kwargs): super(Port, self).__init__(**kwargs) self.id = id self.project_id = project_id or tenant_id self.name = name self.network_id = network_id self.mac_address = mac_address self.admin_state_up = admin_state_up self.device_owner = device_owner self.device_id = device_id # Since this is a relationship only set it if one is passed in. if fixed_ips: self.fixed_ips = fixed_ips # NOTE(arosen): status must be set last as an event is triggered on! self.status = status class DNSNameServer(model_base.BASEV2): """Internal representation of a DNS nameserver.""" address = sa.Column(sa.String(128), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) order = sa.Column(sa.Integer, nullable=False, server_default='0') class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a neutron subnet. When a subnet is created the first and last entries will be created. These are used for the IP allocation. """ name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'), nullable=False) # Added by the segments service plugin segment_id = sa.Column(sa.String(36), sa.ForeignKey('networksegments.id')) subnetpool_id = sa.Column(sa.String(36), index=True) # NOTE: Explicitly specify join conditions for the relationship because # subnetpool_id in subnet might be 'prefix_delegation' when the IPv6 Prefix # Delegation is enabled subnetpool = orm.relationship( 'SubnetPool', lazy='joined', foreign_keys='Subnet.subnetpool_id', primaryjoin='Subnet.subnetpool_id==SubnetPool.id') ip_version = sa.Column(sa.Integer, nullable=False) cidr = sa.Column(sa.String(64), nullable=False) gateway_ip = sa.Column(sa.String(64)) network_standard_attr = orm.relationship( 'StandardAttribute', lazy='subquery', viewonly=True, secondary='networks', uselist=False, load_on_pending=True) revises_on_change = ('network_standard_attr', ) allocation_pools = orm.relationship(IPAllocationPool, backref='subnet', lazy="subquery", cascade='delete') enable_dhcp = sa.Column(sa.Boolean()) dns_nameservers = orm.relationship(DNSNameServer, backref='subnet', cascade='all, delete, delete-orphan', order_by=DNSNameServer.order, lazy='subquery') routes = orm.relationship(SubnetRoute, backref='subnet', cascade='all, delete, delete-orphan', lazy='subquery') ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_ra_modes'), nullable=True) ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, name='ipv6_address_modes'), nullable=True) # subnets don't have their own rbac_entries, they just inherit from # the network rbac entries rbac_entries = orm.relationship( rbac_db_models.NetworkRBAC, lazy='subquery', uselist=True, foreign_keys='Subnet.network_id', primaryjoin='Subnet.network_id==NetworkRBAC.object_id') api_collections = [subnet_def.COLLECTION_NAME] collection_resource_map = {subnet_def.COLLECTION_NAME: subnet_def.RESOURCE_NAME} tag_support = True class SubnetPoolPrefix(model_base.BASEV2): """Represents a neutron subnet pool prefix """ __tablename__ = 'subnetpoolprefixes' cidr = sa.Column(sa.String(64), nullable=False, primary_key=True) subnetpool_id = sa.Column(sa.String(36), sa.ForeignKey('subnetpools.id', ondelete='CASCADE'), nullable=False, primary_key=True) class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a neutron subnet pool. """ name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) ip_version = sa.Column(sa.Integer, nullable=False) default_prefixlen = sa.Column(sa.Integer, nullable=False) min_prefixlen = sa.Column(sa.Integer, nullable=False) max_prefixlen = sa.Column(sa.Integer, nullable=False) # TODO(imalinovskiy): drop this field when contract migrations will be # allowed again # NOTE(imalinovskiy): this field cannot be removed from model due to # functional test test_models_sync, trailing underscore is required to # prevent conflicts with RBAC code shared_ = sa.Column("shared", sa.Boolean, nullable=False, server_default=sql.false()) is_default = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) default_quota = sa.Column(sa.Integer, nullable=True) hash = sa.Column(sa.String(36), nullable=False, server_default='') address_scope_id = sa.Column(sa.String(36), nullable=True) prefixes = orm.relationship(SubnetPoolPrefix, backref='subnetpools', cascade='all, delete, delete-orphan', lazy='subquery') rbac_entries = sa.orm.relationship(rbac_db_models.SubnetPoolRBAC, backref='subnetpools', lazy='subquery', cascade='all, delete, delete-orphan') api_collections = [subnetpool_def.COLLECTION_NAME] collection_resource_map = {subnetpool_def.COLLECTION_NAME: subnetpool_def.RESOURCE_NAME} tag_support = True class Network(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron network.""" name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) subnets = orm.relationship( Subnet, lazy="subquery") status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) vlan_transparent = sa.Column(sa.Boolean, nullable=True) rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref=orm.backref('network', load_on_pending=True), lazy='subquery', cascade='all, delete, delete-orphan') availability_zone_hints = sa.Column(sa.String(255)) mtu = sa.Column(sa.Integer, nullable=False, default=constants.DEFAULT_NETWORK_MTU, server_default=str(constants.DEFAULT_NETWORK_MTU)) dhcp_agents = orm.relationship( 'Agent', lazy='subquery', viewonly=True, secondary=ndab_model.NetworkDhcpAgentBinding.__table__) api_collections = [net_def.COLLECTION_NAME] collection_resource_map = {net_def.COLLECTION_NAME: net_def.RESOURCE_NAME} tag_support = True class NetworkSubnetLock(model_base.BASEV2): """Auxiliary table to lock each network subnet updates. This table is used to synchronize the subnet creation per network. If several requests to create subnets on a network are processed at the same time (even in different servers), this database lock will prevent the creation of several subnets with overlapping CIDRs by updating the network register in the table each time a subnet is created. """ __tablename__ = 'network_subnet_lock' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True) subnet_id = sa.Column(sa.String(36)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/network_dhcp_agent_binding/0000755000175000017500000000000000000000000024561 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/network_dhcp_agent_binding/__init__.py0000644000175000017500000000000000000000000026660 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/network_dhcp_agent_binding/models.py0000644000175000017500000000317600000000000026425 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import agent as agent_model LOWEST_BINDING_INDEX = 1 class NetworkDhcpAgentBinding(model_base.BASEV2): """Represents binding between neutron networks and DHCP agents.""" __table_args__ = ( sa.UniqueConstraint( 'network_id', 'binding_index', name='uniq_network_dhcp_agent_binding0network_id0binding_index0'), model_base.BASEV2.__table_args__ ) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete='CASCADE'), primary_key=True) dhcp_agent = orm.relation(agent_model.Agent, lazy='subquery') dhcp_agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), primary_key=True) binding_index = sa.Column(sa.Integer, nullable=False, server_default=str(LOWEST_BINDING_INDEX)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/network_ip_availability_db.py0000644000175000017500000001635400000000000025165 0ustar00coreycorey00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from neutron_lib.db import api as db_api import six from sqlalchemy import func import neutron.db.models_v2 as mod NETWORK_ID = 'network_id' NETWORK_NAME = 'network_name' SUBNET_ID = 'subnet_id' SUBNET_NAME = 'subnet_name' SUPPORTED_FILTERS = { NETWORK_ID: mod.Network.id, NETWORK_NAME: mod.Network.name, 'tenant_id': mod.Network.tenant_id, 'project_id': mod.Network.project_id, 'ip_version': mod.Subnet.ip_version, } SUPPORTED_FILTER_KEYS = six.viewkeys(SUPPORTED_FILTERS) class IpAvailabilityMixin(object): """Mixin class to query for IP availability.""" # Columns common to all queries common_columns = [ mod.Network.id.label(NETWORK_ID), mod.Subnet.id.label(SUBNET_ID), mod.Subnet.cidr, mod.Subnet.ip_version ] # Columns for the network/subnet and used_ip counts network_used_ips_columns = list(common_columns) network_used_ips_columns.append(mod.Network.name.label(NETWORK_NAME)) network_used_ips_columns.append(mod.Network.tenant_id) network_used_ips_columns.append(mod.Subnet.name.label(SUBNET_NAME)) # Aggregate query computed column network_used_ips_computed_columns = [ func.count(mod.IPAllocation.subnet_id).label('used_ips')] # Columns for total_ips query total_ips_columns = list(common_columns) total_ips_columns.append(mod.IPAllocationPool.first_ip) total_ips_columns.append(mod.IPAllocationPool.last_ip) @classmethod def get_network_ip_availabilities(cls, context, filters=None): """Get IP availability stats on a per subnet basis. Returns a list of network summaries which internally contains a list of subnet summaries. The used_ip and total_ip counts are returned at both levels. """ # Fetch total_ips by subnet subnet_total_ips_dict = cls._generate_subnet_total_ips_dict(context, filters) # Query network/subnet data along with used IP counts record_and_count_query = cls._build_network_used_ip_query(context, filters) # Assemble results result_dict = {} for row in record_and_count_query: cls._add_result(row, result_dict, subnet_total_ips_dict.get(row.subnet_id, 0)) # Convert result back into the list it expects net_ip_availabilities = list(six.viewvalues(result_dict)) return net_ip_availabilities @classmethod @db_api.CONTEXT_READER def _build_network_used_ip_query(cls, context, filters): # Generate a query to gather network/subnet/used_ips. # Ensure query is tolerant of missing child table data (outerjoins) # Process these outerjoin columns assuming their values may be None query = context.session.query() query = query.add_columns(*cls.network_used_ips_columns) query = query.add_columns(*cls.network_used_ips_computed_columns) query = query.outerjoin(mod.Subnet, mod.Network.id == mod.Subnet.network_id) query = query.outerjoin(mod.IPAllocation, mod.Subnet.id == mod.IPAllocation.subnet_id) query = query.group_by(*cls.network_used_ips_columns) return cls._adjust_query_for_filters(query, filters) @classmethod @db_api.CONTEXT_READER def _build_total_ips_query(cls, context, filters): query = context.session.query() query = query.add_columns(*cls.total_ips_columns) query = query.outerjoin(mod.Subnet, mod.Network.id == mod.Subnet.network_id) query = query.outerjoin( mod.IPAllocationPool, mod.Subnet.id == mod.IPAllocationPool.subnet_id) return cls._adjust_query_for_filters(query, filters) @classmethod def _generate_subnet_total_ips_dict(cls, context, filters): """Generates a dict whose key=subnet_id, value=total_ips in subnet""" # Query to get total_ips counts total_ips_query = cls._build_total_ips_query(context, filters) subnet_totals_dict = {} for row in total_ips_query: # Skip networks without subnets if not row.subnet_id: continue # Add IPAllocationPool data if row.last_ip: pool_total = netaddr.IPRange( netaddr.IPAddress(row.first_ip), netaddr.IPAddress(row.last_ip)).size cur_total = subnet_totals_dict.get(row.subnet_id, 0) subnet_totals_dict[row.subnet_id] = cur_total + pool_total else: subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork( row.cidr, version=row.ip_version).size return subnet_totals_dict @classmethod def _adjust_query_for_filters(cls, query, filters): # The intersect of sets gets us applicable filter keys (others ignored) common_keys = six.viewkeys(filters) & SUPPORTED_FILTER_KEYS for key in common_keys: filter_vals = filters[key] if filter_vals: query = query.filter(SUPPORTED_FILTERS[key].in_(filter_vals)) return query @classmethod def _add_result(cls, db_row, result_dict, subnet_total_ips): # Find network in results. Create and add if missing if db_row.network_id in result_dict: network = result_dict[db_row.network_id] else: network = {NETWORK_ID: db_row.network_id, NETWORK_NAME: db_row.network_name, 'tenant_id': db_row.tenant_id, 'project_id': db_row.tenant_id, 'subnet_ip_availability': [], 'used_ips': 0, 'total_ips': 0} result_dict[db_row.network_id] = network # Only add subnet data if outerjoin rows have it if db_row.subnet_id: cls._add_subnet_data_to_net(db_row, network, subnet_total_ips) @classmethod def _add_subnet_data_to_net(cls, db_row, network_dict, subnet_total_ips): subnet = { SUBNET_ID: db_row.subnet_id, 'ip_version': db_row.ip_version, 'cidr': db_row.cidr, SUBNET_NAME: db_row.subnet_name, 'used_ips': db_row.used_ips if db_row.used_ips else 0, 'total_ips': subnet_total_ips } # Attach subnet result and rollup subnet sums into the parent network_dict['subnet_ip_availability'].append(subnet) network_dict['total_ips'] += subnet['total_ips'] network_dict['used_ips'] += subnet['used_ips'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/ovn_hash_ring_db.py0000644000175000017500000000467000000000000023074 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from neutron_lib.db import api as db_api from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from neutron.db.models import ovn as ovn_models CONF = cfg.CONF # NOTE(ralonsoh): this was migrated from networking-ovn to neutron and should # be refactored to be integrated in a OVO. def add_node(context, group_name, node_uuid=None): if node_uuid is None: node_uuid = uuidutils.generate_uuid() with db_api.CONTEXT_WRITER.using(context): context.session.add(ovn_models.OVNHashRing( node_uuid=node_uuid, hostname=CONF.host, group_name=group_name)) return node_uuid def remove_nodes_from_host(context, group_name): with db_api.CONTEXT_WRITER.using(context): context.session.query(ovn_models.OVNHashRing).filter( ovn_models.OVNHashRing.hostname == CONF.host, ovn_models.OVNHashRing.group_name == group_name).delete() def _touch(context, **filter_args): with db_api.CONTEXT_WRITER.using(context): context.session.query(ovn_models.OVNHashRing).filter_by( **filter_args).update({'updated_at': timeutils.utcnow()}) def touch_nodes_from_host(context, group_name): _touch(context, hostname=CONF.host, group_name=group_name) def touch_node(context, node_uuid): _touch(context, node_uuid=node_uuid) def get_active_nodes(context, interval, group_name, from_host=False): limit = timeutils.utcnow() - datetime.timedelta(seconds=interval) with db_api.CONTEXT_READER.using(context): query = context.session.query(ovn_models.OVNHashRing).filter( ovn_models.OVNHashRing.updated_at >= limit, ovn_models.OVNHashRing.group_name == group_name) if from_host: query = query.filter_by(hostname=CONF.host) return query.all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/ovn_revision_numbers_db.py0000644000175000017500000002224500000000000024521 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.orm import exc from neutron.common.ovn import utils as ovn_utils from neutron.db.models import l3 # noqa from neutron.db.models import ovn as ovn_models from neutron.db.models import securitygroup # noqa from neutron.db import models_v2 # noqa from neutron.db import standard_attr LOG = log.getLogger(__name__) CONF = cfg.CONF STD_ATTR_MAP = standard_attr.get_standard_attr_resource_model_map() # NOTE(ralonsoh): to be moved to neutron-lib TYPE_NETWORKS = 'networks' TYPE_PORTS = 'ports' TYPE_SECURITY_GROUP_RULES = 'security_group_rules' TYPE_ROUTERS = 'routers' TYPE_ROUTER_PORTS = 'router_ports' TYPE_SECURITY_GROUPS = 'security_groups' TYPE_FLOATINGIPS = 'floatingips' TYPE_SUBNETS = 'subnets' _TYPES_PRIORITY_ORDER = ( TYPE_NETWORKS, TYPE_SECURITY_GROUPS, TYPE_SUBNETS, TYPE_ROUTERS, TYPE_PORTS, TYPE_ROUTER_PORTS, TYPE_FLOATINGIPS, TYPE_SECURITY_GROUP_RULES) # The order in which the resources should be created or updated by the # maintenance task: Root ones first and leafs at the end. MAINTENANCE_CREATE_UPDATE_TYPE_ORDER = { t: n for n, t in enumerate(_TYPES_PRIORITY_ORDER, 1)} # The order in which the resources should be deleted by the maintenance # task: Leaf ones first and roots at the end. MAINTENANCE_DELETE_TYPE_ORDER = { t: n for n, t in enumerate(reversed(_TYPES_PRIORITY_ORDER), 1)} INITIAL_REV_NUM = -1 # Time (in seconds) used to identify if an entry is new before considering # it an inconsistency INCONSISTENCIES_OLDER_THAN = 60 # 1:2 mapping for OVN, neutron router ports are simple ports, but # for OVN we handle LSP & LRP objects if STD_ATTR_MAP: STD_ATTR_MAP[TYPE_ROUTER_PORTS] = STD_ATTR_MAP[TYPE_PORTS] # NOTE(ralonsoh): to be moved to neutron-lib class StandardAttributeIDNotFound(n_exc.NeutronException): message = 'Standard attribute ID not found for %(resource_uuid)s' # NOTE(ralonsoh): to be moved to neutron-lib class UnknownResourceType(n_exc.NeutronException): message = 'Uknown resource type: %(resource_type)s' def _get_standard_attr_id(context, resource_uuid, resource_type): try: row = context.session.query(STD_ATTR_MAP[resource_type]).filter_by( id=resource_uuid).one() return row.standard_attr_id except exc.NoResultFound: raise StandardAttributeIDNotFound(resource_uuid=resource_uuid) @db_api.retry_if_session_inactive() def create_initial_revision(context, resource_uuid, resource_type, revision_number=INITIAL_REV_NUM, may_exist=False): LOG.debug('create_initial_revision uuid=%s, type=%s, rev=%s', resource_uuid, resource_type, revision_number) db_func = context.session.merge if may_exist else context.session.add with context.session.begin(subtransactions=True): std_attr_id = _get_standard_attr_id( context, resource_uuid, resource_type) row = ovn_models.OVNRevisionNumbers( resource_uuid=resource_uuid, resource_type=resource_type, standard_attr_id=std_attr_id, revision_number=revision_number) db_func(row) @db_api.retry_if_session_inactive() def delete_revision(context, resource_uuid, resource_type): LOG.debug('delete_revision(%s)', resource_uuid) with context.session.begin(subtransactions=True): row = context.session.query(ovn_models.OVNRevisionNumbers).filter_by( resource_uuid=resource_uuid, resource_type=resource_type).one_or_none() if row: context.session.delete(row) def _ensure_revision_row_exist(context, resource, resource_type): """Ensure the revision row exists. Ensure the revision row exist before we try to bump its revision number. This method is part of the migration plan to deal with resources that have been created prior to the database sync work getting merged. """ # TODO(lucasagomes): As the docstring says, this method was created to # deal with objects that already existed before the sync work. I believe # that we can remove this method after few development cycles. Or, # if we decide to make a migration script as well. with context.session.begin(subtransactions=True): if not context.session.query(ovn_models.OVNRevisionNumbers).filter_by( resource_uuid=resource['id'], resource_type=resource_type).one_or_none(): LOG.warning( 'No revision row found for %(res_uuid)s (type: ' '%(res_type)s) when bumping the revision number. ' 'Creating one.', {'res_uuid': resource['id'], 'res_type': resource_type}) create_initial_revision(context, resource['id'], resource_type) @db_api.retry_if_session_inactive() def get_revision_row(context, resource_uuid): try: with context.session.begin(subtransactions=True): return context.session.query( ovn_models.OVNRevisionNumbers).filter_by( resource_uuid=resource_uuid).one() except exc.NoResultFound: pass @db_api.retry_if_session_inactive() def bump_revision(context, resource, resource_type): revision_number = ovn_utils.get_revision_number(resource, resource_type) with context.session.begin(subtransactions=True): _ensure_revision_row_exist(context, resource, resource_type) std_attr_id = _get_standard_attr_id( context, resource['id'], resource_type) row = context.session.merge(ovn_models.OVNRevisionNumbers( standard_attr_id=std_attr_id, resource_uuid=resource['id'], resource_type=resource_type)) if revision_number < row.revision_number: LOG.debug( 'Skip bumping the revision number for %(res_uuid)s (type: ' '%(res_type)s) to %(rev_num)d. A higher version is already ' 'registered in the database (%(new_rev)d)', {'res_type': resource_type, 'res_uuid': resource['id'], 'rev_num': revision_number, 'new_rev': row.revision_number}) return row.revision_number = revision_number context.session.merge(row) LOG.info('Successfully bumped revision number for resource ' '%(res_uuid)s (type: %(res_type)s) to %(rev_num)d', {'res_uuid': resource['id'], 'res_type': resource_type, 'rev_num': revision_number}) def get_inconsistent_resources(context): """Get a list of inconsistent resources. :returns: A list of objects which the revision number from the ovn_revision_number and standardattributes tables differs. """ sort_order = sa.case(value=ovn_models.OVNRevisionNumbers.resource_type, whens=MAINTENANCE_CREATE_UPDATE_TYPE_ORDER) time_ = (timeutils.utcnow() - datetime.timedelta(seconds=INCONSISTENCIES_OLDER_THAN)) with context.session.begin(subtransactions=True): query = context.session.query(ovn_models.OVNRevisionNumbers).join( standard_attr.StandardAttribute, ovn_models.OVNRevisionNumbers.standard_attr_id == standard_attr.StandardAttribute.id) # Filter out new entries query = query.filter( standard_attr.StandardAttribute.created_at < time_) # Filter for resources which revision_number differs query = query.filter( ovn_models.OVNRevisionNumbers.revision_number != standard_attr.StandardAttribute.revision_number) return query.order_by(sort_order).all() def get_deleted_resources(context): """Get a list of resources that failed to be deleted in OVN. Get a list of resources that have been deleted from neutron but not in OVN. Once a resource is deleted in Neutron the ``standard_attr_id`` foreign key in the ovn_revision_numbers table will be set to NULL. Upon successfully deleting the resource in OVN the entry in the ovn_revision_number should also be deleted but if something fails the entry will be kept and returned in this list so the maintenance thread can later fix it. """ sort_order = sa.case(value=ovn_models.OVNRevisionNumbers.resource_type, whens=MAINTENANCE_DELETE_TYPE_ORDER) with context.session.begin(subtransactions=True): return context.session.query(ovn_models.OVNRevisionNumbers).filter_by( standard_attr_id=None).order_by(sort_order).all() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/port_security/0000755000175000017500000000000000000000000022135 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/port_security/__init__.py0000644000175000017500000000000000000000000024234 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/port_security/models.py0000644000175000017500000000374700000000000024005 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 class PortSecurityBinding(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be to able to # instruct SQLAlchemy to eagerly load port security binding port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) revises_on_change = ('port',) class NetworkSecurityBinding(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) port_security_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model in order to be able to instruct # SQLAlchemy to eagerly load default port security setting for ports # on this network network = orm.relationship( models_v2.Network, load_on_pending=True, backref=orm.backref("port_security", uselist=False, cascade='delete', lazy='joined')) revises_on_change = ('network',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/portbindings_base.py0000644000175000017500000000304600000000000023273 0ustar00coreycorey00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port as port_def from neutron_lib.db import resource_extend from neutron_lib.plugins import directory @resource_extend.has_resource_extenders class PortBindingBaseMixin(object): # Initialized by core plugin or ml2 mechanism driver(s) base_binding_dict = None def _process_portbindings_create_and_update(self, context, port_data, port): self.extend_port_dict_binding(port, None) def extend_port_dict_binding(self, port_res, port_db): if self.base_binding_dict: port_res.update(self.base_binding_dict) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_binding(port_res, port_db): plugin = directory.get_plugin() if not isinstance(plugin, PortBindingBaseMixin): return plugin.extend_port_dict_binding(port_res, port_db) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/portbindings_db.py0000644000175000017500000001051000000000000022740 0ustar00coreycorey00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings from neutron_lib.api import validators from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.plugins import directory from neutron.db.models import portbinding as pmodels from neutron.db import models_v2 from neutron.db import portbindings_base def _port_model_hook(context, original_model, query): query = query.outerjoin( pmodels.PortBindingPort, (original_model.id == pmodels.PortBindingPort.port_id)) return query def _port_result_filter_hook(query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query query = query.filter(pmodels.PortBindingPort.host.in_(values)) return query @resource_extend.has_resource_extenders class PortBindingMixin(portbindings_base.PortBindingBaseMixin): def __new__(cls, *args, **kwargs): model_query.register_hook( models_v2.Port, "portbindings_port", query_hook=_port_model_hook, filter_hook=None, result_filters=_port_result_filter_hook) return super(PortBindingMixin, cls).__new__(cls, *args, **kwargs) def _process_portbindings_create_and_update(self, context, port_data, port): binding_profile = port.get(portbindings.PROFILE) binding_profile_set = validators.is_attr_set(binding_profile) if not binding_profile_set and binding_profile is not None: del port[portbindings.PROFILE] binding_vnic = port.get(portbindings.VNIC_TYPE) binding_vnic_set = validators.is_attr_set(binding_vnic) if not binding_vnic_set and binding_vnic is not None: del port[portbindings.VNIC_TYPE] # REVISIT(irenab) Add support for vnic_type for plugins that # can handle more than one type. # Currently implemented for ML2 plugin that does not use # PortBindingMixin. host = port_data.get(portbindings.HOST_ID) host_set = validators.is_attr_set(host) with db_api.CONTEXT_WRITER.using(context): bind_port = context.session.query( pmodels.PortBindingPort).filter_by(port_id=port['id']).first() if host_set: if not bind_port: context.session.add( pmodels.PortBindingPort(port_id=port['id'], host=host)) else: bind_port.host = host else: host = bind_port.host if bind_port else None self._extend_port_dict_binding_host(port, host) def get_port_host(self, context, port_id): with db_api.CONTEXT_READER.using(context): bind_port = ( context.session.query(pmodels.PortBindingPort.host). filter_by(port_id=port_id). first() ) return bind_port.host if bind_port else None def _extend_port_dict_binding_host(self, port_res, host): super(PortBindingMixin, self).extend_port_dict_binding( port_res, None) port_res[portbindings.HOST_ID] = host def extend_port_dict_binding(self, port_res, port_db): host = port_db.portbinding.host if port_db.portbinding else None self._extend_port_dict_binding_host(port_res, host) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_binding(port_res, port_db): plugin = directory.get_plugin() if not isinstance(plugin, PortBindingMixin): return plugin.extend_port_dict_binding(port_res, port_db) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/portsecurity_db.py0000644000175000017500000000545600000000000023027 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import validators from neutron_lib.db import resource_extend from neutron_lib.plugins import directory from neutron_lib.utils import net from neutron.db import portsecurity_db_common @resource_extend.has_resource_extenders class PortSecurityDbMixin(portsecurity_db_common.PortSecurityDbCommon): @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME, port_def.COLLECTION_NAME]) def _extend_port_security_dict(response_data, db_data): plugin = directory.get_plugin() if ('port-security' in getattr(plugin, 'supported_extension_aliases', [])): super(PortSecurityDbMixin, plugin)._extend_port_security_dict( response_data, db_data) def _determine_port_security_and_has_ip(self, context, port): """Returns a tuple of booleans (port_security_enabled, has_ip). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. has_ip is if the port is associated with an ip or not. """ has_ip = self._ip_on_port(port) # we don't apply security groups for dhcp, router if port.get('device_owner') and net.is_port_trusted(port): return (False, has_ip) if validators.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] # If port has an ip and security_groups are passed in # conveniently set port_security_enabled to true this way # user doesn't also have to pass in port_security_enabled=True # when creating ports. elif has_ip and validators.is_attr_set(port.get('security_groups')): port_security_enabled = True else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return (port_security_enabled, has_ip) def _ip_on_port(self, port): return bool(port.get('fixed_ips')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/portsecurity_db_common.py0000644000175000017500000001016500000000000024370 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_security as psec from neutron_lib.db import utils as db_utils from neutron.objects import network from neutron.objects.port.extensions import port_security as p_ps class PortSecurityDbCommon(object): """Mixin class to add port security.""" @staticmethod def _extend_port_security_dict(response_data, db_data): if db_data.get('port_security') is None: response_data[psec.PORTSECURITY] = psec.DEFAULT_PORT_SECURITY else: response_data[psec.PORTSECURITY] = ( db_data['port_security'][psec.PORTSECURITY]) def _process_port_security_create(self, context, obj_cls, res_name, req, res): obj = obj_cls( context, id=res['id'], port_security_enabled=req[psec.PORTSECURITY] ) obj.create() res[psec.PORTSECURITY] = req[psec.PORTSECURITY] return self._make_port_security_dict(obj, res_name) def _process_port_port_security_create(self, context, port_req, port_res): self._process_port_security_create( context, p_ps.PortSecurity, 'port', port_req, port_res) def _process_network_port_security_create(self, context, network_req, network_res): self._process_port_security_create( context, network.NetworkPortSecurity, 'network', network_req, network_res) def _get_security_binding(self, context, obj_cls, res_id): obj = obj_cls.get_object(context, id=res_id) # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; return default value return obj.port_security_enabled if obj else psec.DEFAULT_PORT_SECURITY def _get_network_security_binding(self, context, network_id): return self._get_security_binding( context, network.NetworkPortSecurity, network_id) def _get_port_security_binding(self, context, port_id): return self._get_security_binding(context, p_ps.PortSecurity, port_id) def _process_port_port_security_update(self, context, port_req, port_res): self._process_port_security_update( context, p_ps.PortSecurity, 'port', port_req, port_res) def _process_network_port_security_update(self, context, network_req, network_res): self._process_port_security_update( context, network.NetworkPortSecurity, 'network', network_req, network_res) def _process_port_security_update(self, context, obj_cls, res_name, req, res): if psec.PORTSECURITY not in req: return port_security_enabled = req[psec.PORTSECURITY] obj = obj_cls.get_object(context, id=res['id']) if obj: obj.port_security_enabled = port_security_enabled obj.update() res[psec.PORTSECURITY] = port_security_enabled else: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; create the binding model self._process_port_security_create( context, obj_cls, res_name, req, res) @staticmethod def _make_port_security_dict(res, res_name, fields=None): res_ = {'%s_id' % res_name: res.id, psec.PORTSECURITY: res.port_security_enabled} return db_utils.resource_fields(res_, fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/provisioning_blocks.py0000644000175000017500000002005700000000000023663 0ustar00coreycorey00000000000000# Copyright 2016 Mirantis, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from oslo_log import log as logging from neutron._i18n import _ from neutron.db import models_v2 from neutron.objects import provisioning_blocks as pb_obj LOG = logging.getLogger(__name__) PROVISIONING_COMPLETE = 'provisioning_complete' # identifiers for the various entities that participate in provisioning DHCP_ENTITY = 'DHCP' L2_AGENT_ENTITY = 'L2' # TODO(sshank): Change to object later on when complete integration of Port # OVO is complete. Currently 'extend_port_dict' in ext_test fails when changed # to OVO here. _RESOURCE_TO_MODEL_MAP = {resources.PORT: models_v2.Port} def add_model_for_resource(resource, model): """Adds a mapping between a callback resource and a DB model.""" _RESOURCE_TO_MODEL_MAP[resource] = model @db_api.retry_if_session_inactive() def add_provisioning_component(context, object_id, object_type, entity): """Adds a provisioning block by an entity to a given object. Adds a provisioning block to the DB for object_id with an identifier of the entity that is doing the provisioning. While an object has these provisioning blocks present, this module will not emit any callback events indicating that provisioning has completed. Any logic that depends on multiple disjoint components may use these blocks and subscribe to the PROVISIONING_COMPLETE event to know when all components have completed. :param context: neutron api request context :param object_id: ID of object that has been provisioned :param object_type: callback resource type of the object :param entity: The entity that has provisioned the object """ log_dict = {'entity': entity, 'oid': object_id, 'otype': object_type} # we get an object's ID, so we need to convert that into a standard attr id standard_attr_id = _get_standard_attr_id(context, object_id, object_type) if not standard_attr_id: return if pb_obj.ProvisioningBlock.objects_exist( context, standard_attr_id=standard_attr_id, entity=entity): # an entry could be leftover from a previous transition that hasn't # yet been provisioned. (e.g. multiple updates in a short period) LOG.debug("Ignored duplicate provisioning block setup for %(otype)s " "%(oid)s by entity %(entity)s.", log_dict) return pb_obj.ProvisioningBlock( context, standard_attr_id=standard_attr_id, entity=entity).create() LOG.debug("Transition to ACTIVE for %(otype)s object %(oid)s " "will not be triggered until provisioned by entity %(entity)s.", log_dict) @db_api.retry_if_session_inactive() def remove_provisioning_component(context, object_id, object_type, entity, standard_attr_id=None): """Remove a provisioning block for an object without triggering a callback. Removes a provisioning block without triggering a callback. A user of this module should call this when a block is no longer correct. If the block has been satisfied, the 'provisioning_complete' method should be called. :param context: neutron api request context :param object_id: ID of object that has been provisioned :param object_type: callback resource type of the object :param entity: The entity that has provisioned the object :param standard_attr_id: Optional ID to pass to the function to avoid the extra DB lookup to translate the object_id into the standard_attr_id. :return: boolean indicating whether or not a record was deleted """ standard_attr_id = standard_attr_id or _get_standard_attr_id( context, object_id, object_type) if not standard_attr_id: return False if pb_obj.ProvisioningBlock.delete_objects( context, standard_attr_id=standard_attr_id, entity=entity): return True else: return False @db_api.retry_if_session_inactive() def provisioning_complete(context, object_id, object_type, entity): """Mark that the provisioning for object_id has been completed by entity. Marks that an entity has finished provisioning an object. If there are no remaining provisioning components, a callback will be triggered indicating that provisioning has been completed for the object. Subscribers to this callback must be idempotent because it may be called multiple times in high availability deployments. :param context: neutron api request context :param object_id: ID of object that has been provisioned :param object_type: callback resource type of the object :param entity: The entity that has provisioned the object """ log_dict = {'oid': object_id, 'entity': entity, 'otype': object_type} # this can't be called in a transaction to avoid REPEATABLE READ # tricking us into thinking there are remaining provisioning components if context.session.is_active: raise RuntimeError(_("Must not be called in a transaction")) standard_attr_id = _get_standard_attr_id(context, object_id, object_type) if not standard_attr_id: return if remove_provisioning_component(context, object_id, object_type, entity, standard_attr_id): LOG.debug("Provisioning for %(otype)s %(oid)s completed by entity " "%(entity)s.", log_dict) # now with that committed, check if any records are left. if None, emit # an event that provisioning is complete. if not pb_obj.ProvisioningBlock.objects_exist( context, standard_attr_id=standard_attr_id): LOG.debug("Provisioning complete for %(otype)s %(oid)s triggered by " "entity %(entity)s.", log_dict) registry.publish(object_type, PROVISIONING_COMPLETE, 'neutron.db.provisioning_blocks', payload=events.DBEventPayload( context, resource_id=object_id)) @db_api.retry_if_session_inactive() def is_object_blocked(context, object_id, object_type): """Return boolean indicating if object has a provisioning block. :param context: neutron api request context :param object_id: ID of object that has been provisioned :param object_type: callback resource type of the object """ standard_attr_id = _get_standard_attr_id(context, object_id, object_type) if not standard_attr_id: # object doesn't exist so it has no blocks return False return pb_obj.ProvisioningBlock.objects_exist( context, standard_attr_id=standard_attr_id) def _get_standard_attr_id(context, object_id, object_type): model = _RESOURCE_TO_MODEL_MAP.get(object_type) if not model: raise RuntimeError(_("Could not find model for %s. If you are " "adding provisioning blocks for a new resource " "you must call add_model_for_resource during " "initialization for your type.") % object_type) obj = (context.session.query(model.standard_attr_id). enable_eagerloads(False). filter_by(id=object_id).first()) if not obj: # concurrent delete LOG.debug("Could not find standard attr ID for object %s.", object_id) return return obj.standard_attr_id ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2910445 neutron-16.0.0.0b2.dev214/neutron/db/qos/0000755000175000017500000000000000000000000020024 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/qos/__init__.py0000644000175000017500000000000000000000000022123 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/qos/models.py0000644000175000017500000002046500000000000021670 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from neutron.db.models import l3 from neutron.db import models_v2 from neutron.db import rbac_db_models from neutron.db import standard_attr class QosPolicy(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): __tablename__ = 'qos_policies' name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) rbac_entries = sa.orm.relationship(rbac_db_models.QosPolicyRBAC, backref='qos_policy', lazy='subquery', cascade='all, delete, delete-orphan') api_collections = ['policies'] collection_resource_map = {'policies': 'policy'} tag_support = True class QosNetworkPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_network_policy_bindings' policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) revises_on_change = ('network', ) network = sa.orm.relationship( models_v2.Network, load_on_pending=True, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) port = sa.orm.relationship( models_v2.Port, primaryjoin='QosNetworkPolicyBinding.network_id == Port.network_id', foreign_keys=network_id, backref=sa.orm.backref('qos_network_policy_binding', uselist=False, viewonly=True, lazy='joined')) class QosFIPPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_fip_policy_bindings' policy_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) fip_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('floatingips.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) revises_on_change = ('floatingip', ) floatingip = sa.orm.relationship( l3.FloatingIP, load_on_pending=True, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) class QosRouterGatewayIPPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_router_gw_policy_bindings' policy_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) router_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE), sa.ForeignKey('routers.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) revises_on_change = ('router', ) router = sa.orm.relationship( l3.Router, load_on_pending=True, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) class QosPortPolicyBinding(model_base.BASEV2): __tablename__ = 'qos_port_policy_bindings' policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True, primary_key=True) revises_on_change = ('port', ) port = sa.orm.relationship( models_v2.Port, load_on_pending=True, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) class QosPolicyDefault(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex): __tablename__ = 'qos_policies_default' qos_policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False) revises_on_change = ('qos_policy',) qos_policy = sa.orm.relationship(QosPolicy, load_on_pending=True) class QosBandwidthLimitRule(model_base.HasId, model_base.BASEV2): __tablename__ = 'qos_bandwidth_limit_rules' qos_policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False) max_kbps = sa.Column(sa.Integer) max_burst_kbps = sa.Column(sa.Integer) revises_on_change = ('qos_policy', ) qos_policy = sa.orm.relationship(QosPolicy, load_on_pending=True) direction = sa.Column(sa.Enum(constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION, name="directions"), default=constants.EGRESS_DIRECTION, server_default=constants.EGRESS_DIRECTION, nullable=False) __table_args__ = ( sa.UniqueConstraint( qos_policy_id, direction, name="qos_bandwidth_rules0qos_policy_id0direction"), model_base.BASEV2.__table_args__ ) class QosDscpMarkingRule(model_base.HasId, model_base.BASEV2): __tablename__ = 'qos_dscp_marking_rules' qos_policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, unique=True) dscp_mark = sa.Column(sa.Integer) revises_on_change = ('qos_policy', ) qos_policy = sa.orm.relationship(QosPolicy, load_on_pending=True) class QosMinimumBandwidthRule(model_base.HasId, model_base.BASEV2): __tablename__ = 'qos_minimum_bandwidth_rules' qos_policy_id = sa.Column(sa.String(36), sa.ForeignKey('qos_policies.id', ondelete='CASCADE'), nullable=False, index=True) min_kbps = sa.Column(sa.Integer) direction = sa.Column(sa.Enum(constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION, name='directions'), nullable=False, server_default=constants.EGRESS_DIRECTION) revises_on_change = ('qos_policy', ) qos_policy = sa.orm.relationship(QosPolicy, load_on_pending=True) __table_args__ = ( sa.UniqueConstraint( qos_policy_id, direction, name='qos_minimum_bandwidth_rules0qos_policy_id0direction'), model_base.BASEV2.__table_args__ ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2950444 neutron-16.0.0.0b2.dev214/neutron/db/quota/0000755000175000017500000000000000000000000020353 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/quota/__init__.py0000644000175000017500000000000000000000000022452 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/quota/api.py0000644000175000017500000002201500000000000021476 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime from neutron_lib.db import api as db_api from neutron.objects import quota as quota_obj # Wrapper for utcnow - needed for mocking it in unit tests def utcnow(): return datetime.datetime.utcnow() class QuotaUsageInfo(collections.namedtuple( 'QuotaUsageInfo', ['resource', 'tenant_id', 'used', 'dirty'])): """Information about resource quota usage.""" class ReservationInfo(collections.namedtuple( 'ReservationInfo', ['reservation_id', 'tenant_id', 'expiration', 'deltas'])): """Information about a resource reservation.""" @db_api.retry_if_session_inactive() def get_quota_usage_by_resource_and_tenant(context, resource, tenant_id): """Return usage info for a given resource and tenant. :param context: Request context :param resource: Name of the resource :param tenant_id: Tenant identifier :returns: a QuotaUsageInfo instance """ result = quota_obj.QuotaUsage.get_object_dirty_protected( context, resource=resource, project_id=tenant_id) if not result: return return QuotaUsageInfo(result.resource, result.project_id, result.in_use, result.dirty) @db_api.retry_if_session_inactive() def get_quota_usage_by_resource(context, resource): objs = quota_obj.QuotaUsage.get_objects(context, resource=resource) return [QuotaUsageInfo(item.resource, item.project_id, item.in_use, item.dirty) for item in objs] @db_api.retry_if_session_inactive() def get_quota_usage_by_tenant_id(context, tenant_id): objs = quota_obj.QuotaUsage.get_objects(context, project_id=tenant_id) return [QuotaUsageInfo(item.resource, tenant_id, item.in_use, item.dirty) for item in objs] @db_api.retry_if_session_inactive() def set_quota_usage(context, resource, tenant_id, in_use=None, delta=False): """Set resource quota usage. :param context: instance of neutron context with db session :param resource: name of the resource for which usage is being set :param tenant_id: identifier of the tenant for which quota usage is being set :param in_use: integer specifying the new quantity of used resources, or a delta to apply to current used resource :param delta: Specifies whether in_use is an absolute number or a delta (default to False) """ with db_api.CONTEXT_WRITER.using(context): usage_data = quota_obj.QuotaUsage.get_object( context, resource=resource, project_id=tenant_id) if not usage_data: # Must create entry usage_data = quota_obj.QuotaUsage( context, resource=resource, project_id=tenant_id) usage_data.create() # Perform explicit comparison with None as 0 is a valid value if in_use is not None: if delta: in_use = usage_data.in_use + in_use usage_data.in_use = in_use # After an explicit update the dirty bit should always be reset usage_data.dirty = False usage_data.update() return QuotaUsageInfo(usage_data.resource, usage_data.project_id, usage_data.in_use, usage_data.dirty) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER def set_quota_usage_dirty(context, resource, tenant_id, dirty=True): """Set quota usage dirty bit for a given resource and tenant. :param resource: a resource for which quota usage if tracked :param tenant_id: tenant identifier :param dirty: the desired value for the dirty bit (defaults to True) :returns: 1 if the quota usage data were updated, 0 otherwise. """ obj = quota_obj.QuotaUsage.get_object( context, resource=resource, project_id=tenant_id) if obj: obj.dirty = dirty obj.update() return 1 return 0 @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True): """Set quota usage dirty bit for a given tenant and multiple resources. :param resources: list of resource for which the dirty bit is going to be set :param tenant_id: tenant identifier :param dirty: the desired value for the dirty bit (defaults to True) :returns: the number of records for which the bit was actually set. """ filters = {'project_id': tenant_id} if resources: filters['resource'] = resources objs = quota_obj.QuotaUsage.get_objects(context, **filters) for obj in objs: obj.dirty = dirty obj.update() return len(objs) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER def set_all_quota_usage_dirty(context, resource, dirty=True): """Set the dirty bit on quota usage for all tenants. :param resource: the resource for which the dirty bit should be set :returns: the number of tenants for which the dirty bit was actually updated """ # TODO(manjeets) consider squashing this method with # set_resources_quota_usage_dirty objs = quota_obj.QuotaUsage.get_objects(context, resource=resource) for obj in objs: obj.dirty = dirty obj.update() return len(objs) @db_api.retry_if_session_inactive() def create_reservation(context, tenant_id, deltas, expiration=None): # This method is usually called from within another transaction. # Consider using begin_nested expiration = expiration or (utcnow() + datetime.timedelta(0, 120)) delta_objs = [] for (resource, delta) in deltas.items(): delta_objs.append(quota_obj.ResourceDelta( context, resource=resource, amount=delta)) reserv_obj = quota_obj.Reservation( context, project_id=tenant_id, expiration=expiration, resource_deltas=delta_objs) reserv_obj.create() return ReservationInfo(reserv_obj['id'], reserv_obj['project_id'], reserv_obj['expiration'], dict((delta.resource, delta.amount) for delta in reserv_obj.resource_deltas)) @db_api.retry_if_session_inactive() def get_reservation(context, reservation_id): reserv_obj = quota_obj.Reservation.get_object(context, id=reservation_id) if not reserv_obj: return return ReservationInfo(reserv_obj['id'], reserv_obj['project_id'], reserv_obj['expiration'], dict((delta.resource, delta.amount) for delta in reserv_obj.resource_deltas)) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER def remove_reservation(context, reservation_id, set_dirty=False): reservation = quota_obj.Reservation.get_object(context, id=reservation_id) if not reservation: # TODO(salv-orlando): Raise here and then handle the exception? return tenant_id = reservation.project_id resources = [delta.resource for delta in reservation.resource_deltas] reservation.delete() if set_dirty: # quota_usage for all resource involved in this reservation must # be marked as dirty set_resources_quota_usage_dirty(context, resources, tenant_id) return 1 @db_api.retry_if_session_inactive() def get_reservations_for_resources(context, tenant_id, resources, expired=False): """Retrieve total amount of reservations for specified resources. :param context: Neutron context with db session :param tenant_id: Tenant identifier :param resources: Resources for which reserved amounts should be fetched :param expired: False to fetch active reservations, True to fetch expired reservations (defaults to False) :returns: a dictionary mapping resources with corresponding deltas """ # NOTE(manjeets) we are using utcnow() here because it # can be mocked easily where as datetime is built in type # mock.path does not allow mocking built in types. return quota_obj.Reservation.get_total_reservations_map( context, utcnow(), tenant_id, resources, expired) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER def remove_expired_reservations(context, tenant_id=None): return quota_obj.Reservation.delete_expired(context, utcnow(), tenant_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/quota/driver.py0000644000175000017500000003422600000000000022227 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import attributes from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_log import log from neutron.db.quota import api as quota_api from neutron.objects import quota as quota_obj from neutron.quota import resource as res LOG = log.getLogger(__name__) class DbQuotaDriver(object): """Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ @staticmethod def get_default_quotas(context, resources, tenant_id): """Given a list of resources, retrieve the default quotas set for a tenant. :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :param tenant_id: The ID of the tenant to return default quotas for. :return: dict from resource name to dict of name and limit """ # Currently the tenant_id parameter is unused, since all tenants # share the same default values. This may change in the future so # we include tenant-id to remain backwards compatible. return dict((key, resource.default) for key, resource in resources.items()) @staticmethod @db_api.retry_if_session_inactive() def get_tenant_quotas(context, resources, tenant_id): """Given a list of resources, retrieve the quotas for the given tenant. If no limits are found for the specified tenant, the operation returns the default limits. :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :param tenant_id: The ID of the tenant to return quotas for. :return: dict from resource name to dict of name and limit """ # init with defaults tenant_quota = dict((key, resource.default) for key, resource in resources.items()) # update with tenant specific limits quota_objs = quota_obj.Quota.get_objects(context, project_id=tenant_id) for item in quota_objs: tenant_quota[item['resource']] = item['limit'] return tenant_quota @staticmethod @db_api.retry_if_session_inactive() def get_detailed_tenant_quotas(context, resources, tenant_id): """Given a list of resources and a sepecific tenant, retrieve the detailed quotas (limit, used, reserved). :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :return dict: mapping resource name in dict to its corresponding limit used and reserved. Reserved currently returns default value of 0 """ res_reserve_info = quota_api.get_reservations_for_resources( context, tenant_id, resources.keys()) tenant_quota_ext = {} for key, resource in resources.items(): if isinstance(resource, res.TrackedResource): used = resource.count_used(context, tenant_id, resync_usage=False) else: # NOTE(ihrachys) .count won't use the plugin we pass, but we # pass it regardless to keep the quota driver API intact plugins = directory.get_plugins() plugin = plugins.get(key, plugins[constants.CORE]) used = resource.count(context, plugin, tenant_id) tenant_quota_ext[key] = { 'limit': resource.default, 'used': used, 'reserved': res_reserve_info.get(key, 0), } # update with specific tenant limits quota_objs = quota_obj.Quota.get_objects(context, project_id=tenant_id) for item in quota_objs: tenant_quota_ext[item['resource']]['limit'] = item['limit'] return tenant_quota_ext @staticmethod @db_api.retry_if_session_inactive() def delete_tenant_quota(context, tenant_id): """Delete the quota entries for a given tenant_id. After deletion, this tenant will use default quota values in conf. Raise a "not found" error if the quota for the given tenant was never defined. """ if quota_obj.Quota.delete_objects(context, project_id=tenant_id) < 1: # No record deleted means the quota was not found raise exceptions.TenantQuotaNotFound(tenant_id=tenant_id) @staticmethod @db_api.retry_if_session_inactive() def get_all_quotas(context, resources): """Given a list of resources, retrieve the quotas for the all tenants. :param context: The request context, for access checks. :param resources: A dictionary of the registered resource keys. :return: quotas list of dict of tenant_id:, resourcekey1: resourcekey2: ... """ tenant_default = dict((key, resource.default) for key, resource in resources.items()) all_tenant_quotas = {} for quota in quota_obj.Quota.get_objects(context): tenant_id = quota['project_id'] # avoid setdefault() because only want to copy when actually # required tenant_quota = all_tenant_quotas.get(tenant_id) if tenant_quota is None: tenant_quota = tenant_default.copy() tenant_quota['tenant_id'] = tenant_id attributes.populate_project_info(tenant_quota) all_tenant_quotas[tenant_id] = tenant_quota tenant_quota[quota['resource']] = quota['limit'] # Convert values to a list to as caller expect an indexable iterable, # where python3's dict_values does not support indexing return list(all_tenant_quotas.values()) @staticmethod @db_api.retry_if_session_inactive() def update_quota_limit(context, tenant_id, resource, limit): tenant_quotas = quota_obj.Quota.get_objects( context, project_id=tenant_id, resource=resource) if tenant_quotas: tenant_quotas[0].limit = limit tenant_quotas[0].update() else: quota_obj.Quota(context, project_id=tenant_id, resource=resource, limit=limit).create() def _get_quotas(self, context, tenant_id, resources): """Retrieves the quotas for specific resources. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param tenant_id: the tenant_id to check quota. :param resources: A dictionary of the registered resources. """ # Grab and return the quotas (without usages) quotas = DbQuotaDriver.get_tenant_quotas( context, resources, tenant_id) return dict((k, v) for k, v in quotas.items()) def _handle_expired_reservations(self, context, tenant_id): LOG.debug("Deleting expired reservations for tenant:%s", tenant_id) # Delete expired reservations (we don't want them to accrue # in the database) quota_api.remove_expired_reservations( context, tenant_id=tenant_id) @db_api.retry_if_session_inactive() def make_reservation(self, context, tenant_id, resources, deltas, plugin): # Lock current reservation table # NOTE(salv-orlando): This routine uses DB write locks. # These locks are acquired by the count() method invoked on resources. # Please put your shotguns aside. # A non locking algorithm for handling reservation is feasible, however # it will require two database writes even in cases when there are not # concurrent reservations. # For this reason it might be advisable to handle contention using # this kind of locks and paying the cost of a write set certification # failure when a MySQL Galera cluster is employed. Also, this class of # locks should be ok to use when support for sending "hotspot" writes # to a single node will be available. requested_resources = deltas.keys() with db_api.CONTEXT_WRITER.using(context): # get_tenant_quotes needs in input a dictionary mapping resource # name to BaseResosurce instances so that the default quota can be # retrieved current_limits = self.get_tenant_quotas( context, resources, tenant_id) unlimited_resources = set([resource for (resource, limit) in current_limits.items() if limit < 0]) # Do not even bother counting resources and calculating headroom # for resources with unlimited quota LOG.debug("Resources %s have unlimited quota limit. It is not " "required to calculate headroom ", ",".join(unlimited_resources)) requested_resources = (set(requested_resources) - unlimited_resources) # Gather current usage information # TODO(salv-orlando): calling count() for every resource triggers # multiple queries on quota usage. This should be improved, however # this is not an urgent matter as the REST API currently only # allows allocation of a resource at a time # NOTE: pass plugin too for compatibility with CountableResource # instances current_usages = dict( (resource, resources[resource].count( context, plugin, tenant_id, resync_usage=False)) for resource in requested_resources) # Adjust for expired reservations. Apparently it is cheaper than # querying every time for active reservations and counting overall # quantity of resources reserved expired_deltas = quota_api.get_reservations_for_resources( context, tenant_id, requested_resources, expired=True) # Verify that the request can be accepted with current limits resources_over_limit = [] for resource in requested_resources: expired_reservations = expired_deltas.get(resource, 0) total_usage = current_usages[resource] - expired_reservations res_headroom = current_limits[resource] - total_usage LOG.debug(("Attempting to reserve %(delta)d items for " "resource %(resource)s. Total usage: %(total)d; " "quota limit: %(limit)d; headroom:%(headroom)d"), {'resource': resource, 'delta': deltas[resource], 'total': total_usage, 'limit': current_limits[resource], 'headroom': res_headroom}) if res_headroom < deltas[resource]: resources_over_limit.append(resource) if expired_reservations: self._handle_expired_reservations(context, tenant_id) if resources_over_limit: raise exceptions.OverQuota(overs=sorted(resources_over_limit)) # Success, store the reservation # TODO(salv-orlando): Make expiration time configurable return quota_api.create_reservation( context, tenant_id, deltas) def commit_reservation(self, context, reservation_id): # Do not mark resource usage as dirty. If a reservation is committed, # then the relevant resources have been created. Usage data for these # resources has therefore already been marked dirty. quota_api.remove_reservation(context, reservation_id, set_dirty=False) def cancel_reservation(self, context, reservation_id): # Mark resource usage as dirty so the next time both actual resources # used and reserved will be recalculated quota_api.remove_reservation(context, reservation_id, set_dirty=True) def limit_check(self, context, tenant_id, resources, values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param tenant_id: The tenant_id to check the quota. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) # Get the applicable quotas quotas = self._get_quotas(context, tenant_id, resources) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if 0 <= quotas[key] < val] if overs: raise exceptions.OverQuota(overs=sorted(overs)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/quota/models.py0000644000175000017500000000457600000000000022224 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql class ResourceDelta(model_base.BASEV2): resource = sa.Column(sa.String(255), primary_key=True) reservation_id = sa.Column(sa.String(36), sa.ForeignKey('reservations.id', ondelete='CASCADE'), primary_key=True, nullable=False) # Requested amount of resource amount = sa.Column(sa.Integer) class Reservation(model_base.BASEV2, model_base.HasId, model_base.HasProjectNoIndex): expiration = sa.Column(sa.DateTime()) resource_deltas = orm.relationship(ResourceDelta, backref='reservation', lazy="joined", cascade='all, delete-orphan') class Quota(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represent a single quota override for a tenant. If there is no row for a given tenant id and resource, then the default for the deployment is used. """ resource = sa.Column(sa.String(255)) limit = sa.Column(sa.Integer) class QuotaUsage(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex): """Represents the current usage for a given resource.""" resource = sa.Column(sa.String(255), nullable=False, primary_key=True, index=True) dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) in_use = sa.Column(sa.Integer, nullable=False, server_default="0") reserved = sa.Column(sa.Integer, nullable=False, server_default="0") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/rbac_db_mixin.py0000644000175000017500000001526000000000000022360 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as c_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.objects import exceptions as o_exc from neutron.extensions import rbac as ext_rbac from neutron.objects import base as base_obj from neutron.objects import rbac as rbac_obj class RbacPluginMixin(object): """Plugin mixin that implements the RBAC DB operations.""" object_type_cache = {} supported_extension_aliases = ['rbac-policies'] @db_api.retry_if_session_inactive() def create_rbac_policy(self, context, rbac_policy): e = rbac_policy['rbac_policy'] try: registry.publish(resources.RBAC_POLICY, events.BEFORE_CREATE, self, payload=events.DBEventPayload( context, request_body=e, metadata={'object_type': e['object_type']})) except c_exc.CallbackFailure as e: raise n_exc.InvalidInput(error_message=e) rbac_class = ( rbac_obj.RBACBaseObject.get_type_class_map()[e['object_type']]) try: rbac_args = {'project_id': e['project_id'], 'object_id': e['object_id'], 'action': e['action'], 'target_tenant': e['target_tenant']} _rbac_obj = rbac_class(context, **rbac_args) _rbac_obj.create() except o_exc.NeutronDbObjectDuplicateEntry: raise ext_rbac.DuplicateRbacPolicy() return self._make_rbac_policy_dict(_rbac_obj) @staticmethod def _make_rbac_policy_dict(entry, fields=None): res = {f: entry[f] for f in ('id', 'project_id', 'target_tenant', 'action', 'object_id')} res['object_type'] = entry.db_model.object_type return db_utils.resource_fields(res, fields) @db_api.retry_if_session_inactive() def update_rbac_policy(self, context, id, rbac_policy): pol = rbac_policy['rbac_policy'] entry = self._get_rbac_policy(context, id) object_type = entry.db_model.object_type try: registry.publish(resources.RBAC_POLICY, events.BEFORE_UPDATE, self, payload=events.DBEventPayload( context, request_body=pol, states=(entry,), resource_id=id, metadata={'object_type': object_type})) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id, details=ex) entry.update_fields(pol) entry.update() return self._make_rbac_policy_dict(entry) @db_api.retry_if_session_inactive() def delete_rbac_policy(self, context, id): entry = self._get_rbac_policy(context, id) object_type = entry.db_model.object_type try: registry.publish(resources.RBAC_POLICY, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, states=(entry,), resource_id=id, metadata={'object_type': object_type})) except c_exc.CallbackFailure as ex: raise ext_rbac.RbacPolicyInUse(object_id=entry.object_id, details=ex) # make a dict copy because deleting the entry will nullify its # object_id link to network entry_dict = entry.to_dict() entry.delete() registry.publish(resources.RBAC_POLICY, events.AFTER_DELETE, self, payload=events.DBEventPayload( context, states=(entry_dict,), resource_id=id, metadata={'object_type': object_type})) self.object_type_cache.pop(id, None) def _get_rbac_policy(self, context, id): object_type = self._get_object_type(context, id) rbac_class = rbac_obj.RBACBaseObject.get_type_class_map()[object_type] _rbac_obj = rbac_class.get_object(context, id=id) if not _rbac_obj: raise ext_rbac.RbacPolicyNotFound(id=id, object_type=object_type) return _rbac_obj @db_api.retry_if_session_inactive() def get_rbac_policy(self, context, id, fields=None): return self._make_rbac_policy_dict( self._get_rbac_policy(context, id), fields=fields) @db_api.retry_if_session_inactive() def get_rbac_policies(self, context, filters=None, fields=None, sorts=None, limit=None, page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse) filters = filters or {} object_types = filters.pop('object_type', None) rbac_classes_to_query = [ o for t, o in rbac_obj.RBACBaseObject.get_type_class_map().items() if not object_types or t in object_types] rbac_objs = [] for rbac_class in rbac_classes_to_query: rbac_objs += rbac_class.get_objects(context, _pager=pager, **filters) return [self._make_rbac_policy_dict(_rbac_obj, fields) for _rbac_obj in rbac_objs] def _get_object_type(self, context, entry_id): """Scans all RBAC tables for an ID to figure out the type. This will be an expensive operation as the number of RBAC tables grows. The result is cached since object types cannot be updated for a policy. """ if entry_id in self.object_type_cache: return self.object_type_cache[entry_id] for otype, rbac_class in \ rbac_obj.RBACBaseObject.get_type_class_map().items(): if rbac_class.count(context, id=entry_id): self.object_type_cache[entry_id] = otype return otype raise ext_rbac.RbacPolicyNotFound(id=entry_id, object_type='unknown') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/rbac_db_models.py0000644000175000017500000001073200000000000022516 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.db import constants as db_const from neutron_lib.db import model_base from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory import sqlalchemy as sa from sqlalchemy.ext import declarative from sqlalchemy.orm import validates from neutron._i18n import _ ACCESS_SHARED = 'access_as_shared' ACCESS_EXTERNAL = 'access_as_external' class InvalidActionForType(n_exc.InvalidInput): message = _("Invalid action '%(action)s' for object type " "'%(object_type)s'. Valid actions: %(valid_actions)s") class RBACColumns(model_base.HasId, model_base.HasProject): """Mixin that object-specific RBAC tables should inherit. All RBAC tables should inherit directly from this one because the RBAC code uses the __subclasses__() method to discover the RBAC types. """ # the target_tenant is the subject that the policy will affect. this may # also be a wildcard '*' to indicate all tenants or it may be a role if # neutron gets better integration with keystone target_tenant = sa.Column(sa.String(db_const.PROJECT_ID_FIELD_SIZE), nullable=False) action = sa.Column(sa.String(255), nullable=False) @abc.abstractproperty def object_type(self): # this determines the name that users will use in the API # to reference the type. sub-classes should set their own pass @declarative.declared_attr def __table_args__(cls): return ( sa.UniqueConstraint('target_tenant', 'object_id', 'action'), model_base.BASEV2.__table_args__ ) @validates('action') def _validate_action(self, key, action): if action not in self.get_valid_actions(): raise InvalidActionForType( action=action, object_type=self.object_type, valid_actions=self.get_valid_actions()) return action @staticmethod @abc.abstractmethod def get_valid_actions(): # object table needs to override this to return an interable # with the valid actions rbac entries pass def get_type_model_map(): return {table.object_type: table for table in RBACColumns.__subclasses__()} def _object_id_column(foreign_key): return sa.Column(sa.String(36), sa.ForeignKey(foreign_key, ondelete="CASCADE"), nullable=False) class NetworkRBAC(RBACColumns, model_base.BASEV2): """RBAC table for networks.""" object_id = _object_id_column('networks.id') object_type = 'network' revises_on_change = ('network', ) @staticmethod def get_valid_actions(): actions = (ACCESS_SHARED,) pl = directory.get_plugin() if 'external-net' in pl.supported_extension_aliases: actions += (ACCESS_EXTERNAL,) return actions class QosPolicyRBAC(RBACColumns, model_base.BASEV2): """RBAC table for qos policies.""" object_id = _object_id_column('qos_policies.id') object_type = 'qos_policy' @staticmethod def get_valid_actions(): return (ACCESS_SHARED,) class SecurityGroupRBAC(RBACColumns, model_base.BASEV2): """RBAC table for security groups.""" object_id = _object_id_column('securitygroups.id') object_type = 'security_group' @staticmethod def get_valid_actions(): return (ACCESS_SHARED,) class AddressScopeRBAC(RBACColumns, model_base.BASEV2): """RBAC table for address_scope.""" object_id = _object_id_column('address_scopes.id') object_type = 'address_scope' @staticmethod def get_valid_actions(): return (ACCESS_SHARED,) class SubnetPoolRBAC(RBACColumns, model_base.BASEV2): """RBAC table for subnetpool.""" object_id = _object_id_column('subnetpools.id') object_type = 'subnetpool' @staticmethod def get_valid_actions(): return (ACCESS_SHARED,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/securitygroups_db.py0000644000175000017500000012562200000000000023360 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.api.definitions import port as port_def from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as context_lib from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.utils import helpers from neutron_lib.utils import net from oslo_log import log as logging from oslo_utils import uuidutils import six from sqlalchemy.orm import scoped_session from neutron._i18n import _ from neutron.common import _constants as const from neutron.db.models import securitygroup as sg_models from neutron.db import rbac_db_mixin as rbac_mixin from neutron.extensions import securitygroup as ext_sg from neutron.objects import base as base_obj from neutron.objects import ports as port_obj from neutron.objects import securitygroup as sg_obj from neutron import quota LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders @registry.has_registry_receivers class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase, rbac_mixin.RbacPluginMixin): """Mixin class to add security group to db_base_plugin_v2.""" __native_bulk_support = True def create_security_group_bulk(self, context, security_groups): return self._create_bulk('security_group', context, security_groups) def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs): # NOTE(armax): a callback exception here will prevent the request # from being processed. This is a hook point for backend's validation; # we raise to propagate the reason for the failure. try: if 'payload' in kwargs: # TODO(boden): remove shim once all callbacks use payloads registry.publish(res, event, self, payload=kwargs['payload']) else: registry.notify(res, event, self, **kwargs) except exceptions.CallbackFailure as e: if exc_cls: reason = (_('cannot perform %(event)s due to %(reason)s') % {'event': event, 'reason': e}) raise exc_cls(reason=reason, id=id) @db_api.retry_if_session_inactive() def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, payload=events.DBEventPayload( context, metadata={'is_default': default_sg}, request_body=security_group, desired_state=s)) tenant_id = s['tenant_id'] stateful = s.get('stateful', True) if not default_sg: self._ensure_default_security_group(context, tenant_id) else: existing_def_sg_id = self._get_default_sg_id(context, tenant_id) if existing_def_sg_id is not None: # default already exists, return it return self.get_security_group(context, existing_def_sg_id) with db_api.CONTEXT_WRITER.using(context): sg = sg_obj.SecurityGroup( context, id=s.get('id') or uuidutils.generate_uuid(), description=s['description'], project_id=tenant_id, name=s['name'], is_default=default_sg, stateful=stateful) sg.create() delta = len(ext_sg.sg_supported_ethertypes) delta = delta * 2 if default_sg else delta reservation = quota.QUOTAS.make_reservation( context, tenant_id, {'security_group_rule': delta}, self) for ethertype in ext_sg.sg_supported_ethertypes: if default_sg: # Allow intercommunication ingress_rule = sg_obj.SecurityGroupRule( context, id=uuidutils.generate_uuid(), project_id=tenant_id, security_group_id=sg.id, direction='ingress', ethertype=ethertype, remote_group_id=sg.id) ingress_rule.create() sg.rules.append(ingress_rule) egress_rule = sg_obj.SecurityGroupRule( context, id=uuidutils.generate_uuid(), project_id=tenant_id, security_group_id=sg.id, direction='egress', ethertype=ethertype) egress_rule.create() sg.rules.append(egress_rule) sg.obj_reset_changes(['rules']) quota.QUOTAS.commit_reservation(context, reservation.reservation_id) # fetch sg from db to load the sg rules with sg model. sg = sg_obj.SecurityGroup.get_object(context, id=sg.id) secgroup_dict = self._make_security_group_dict(sg) kwargs['security_group'] = secgroup_dict self._registry_notify(resources.SECURITY_GROUP, events.PRECOMMIT_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict @db_api.retry_if_session_inactive() def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # If default_sg is True do not call _ensure_default_security_group() # so this can be done recursively. Context.tenant_id is checked # because all the unit tests do not explicitly set the context on # GETS. TODO(arosen) context handling can probably be improved here. filters = filters or {} if not default_sg and context.tenant_id: tenant_id = filters.get('tenant_id') if tenant_id: tenant_id = tenant_id[0] else: tenant_id = context.tenant_id self._ensure_default_security_group(context, tenant_id) pager = base_obj.Pager( sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) sg_objs = sg_obj.SecurityGroup.get_objects( context, _pager=pager, validate_filters=False, fields=fields, **filters) return [self._make_security_group_dict(obj, fields) for obj in sg_objs] @db_api.retry_if_session_inactive() def get_security_groups_count(self, context, filters=None): filters = filters or {} return sg_obj.SecurityGroup.count( context, validate_filters=False, **filters) @db_api.retry_if_session_inactive() def get_security_group(self, context, id, fields=None, tenant_id=None): """Tenant id is given to handle the case when creating a security group rule on behalf of another use. """ if tenant_id: tmp_context_tenant_id = context.tenant_id context.tenant_id = tenant_id try: with db_api.CONTEXT_READER.using(context): ret = self._make_security_group_dict(self._get_security_group( context, id, fields=fields), fields) if (fields is None or len(fields) == 0 or 'security_group_rules' in fields): rules = self.get_security_group_rules( context_lib.get_admin_context(), {'security_group_id': [id]}) ret['security_group_rules'] = rules finally: if tenant_id: context.tenant_id = tmp_context_tenant_id return ret def _get_security_group(self, context, id, fields=None): sg = sg_obj.SecurityGroup.get_object(context, fields=fields, id=id) if sg is None: raise ext_sg.SecurityGroupNotFound(id=id) return sg def _check_security_group(self, context, id, tenant_id=None): if tenant_id: tmp_context_tenant_id = context.tenant_id context.tenant_id = tenant_id try: if not sg_obj.SecurityGroup.objects_exist(context, id=id): raise ext_sg.SecurityGroupNotFound(id=id) finally: if tenant_id: context.tenant_id = tmp_context_tenant_id @db_api.retry_if_session_inactive() def delete_security_group(self, context, id): filters = {'security_group_id': [id]} with db_api.CONTEXT_READER.using(context): ports = self._get_port_security_group_bindings(context, filters) if ports: raise ext_sg.SecurityGroupInUse(id=id) # confirm security group exists sg = self._get_security_group(context, id, fields=['id', 'name']) if sg['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() kwargs = { 'context': context, 'security_group_id': id, 'security_group': sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, payload=events.DBEventPayload( context, states=(sg,), resource_id=id)) with db_api.CONTEXT_WRITER.using(context): # pass security_group_rule_ids to ensure # consistency with deleted rules # get security_group_bindings and security_group one more time # so that they will be attached for session where sg will be # deleted ports = self._get_port_security_group_bindings(context, filters) sg = self._get_security_group(context, id) kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules] kwargs['security_group'] = self._make_security_group_dict(sg) self._registry_notify(resources.SECURITY_GROUP, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, **kwargs) sg.delete() kwargs.pop('security_group') registry.notify(resources.SECURITY_GROUP, events.AFTER_DELETE, self, **kwargs) @db_api.retry_if_session_inactive() def update_security_group(self, context, id, security_group): s = security_group['security_group'] if 'stateful' in s: with db_api.CONTEXT_READER.using(context): sg = self._get_security_group(context, id) if s['stateful'] != sg['stateful']: filters = {'security_group_id': [id]} ports = self._get_port_security_group_bindings(context, filters) if ports: raise ext_sg.SecurityGroupInUse(id=id) kwargs = { 'context': context, 'security_group_id': id, 'security_group': s, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_UPDATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) with db_api.CONTEXT_WRITER.using(context): sg = self._get_security_group(context, id) if sg.name == 'default' and 'name' in s: raise ext_sg.SecurityGroupCannotUpdateDefault() sg_dict = self._make_security_group_dict(sg) kwargs['original_security_group'] = sg_dict sg.update_fields(s) sg.update() sg_dict = self._make_security_group_dict(sg) kwargs['security_group'] = sg_dict self._registry_notify( resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE, exc_cls=ext_sg.SecurityGroupConflict, payload=events.DBEventPayload( context, request_body=s, states=(kwargs['original_security_group'],), resource_id=id, desired_state=sg_dict)) registry.notify(resources.SECURITY_GROUP, events.AFTER_UPDATE, self, **kwargs) return sg_dict def _make_security_group_dict(self, security_group, fields=None): res = {'id': security_group['id'], 'name': security_group['name'], 'stateful': security_group['stateful'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} if security_group.rules: res['security_group_rules'] = [ self._make_security_group_rule_dict(r.db_obj) for r in security_group.rules ] else: res['security_group_rules'] = [] resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res, security_group.db_obj) return db_utils.resource_fields(res, fields) @staticmethod def _make_security_group_binding_dict(security_group, fields=None): res = {'port_id': security_group['port_id'], 'security_group_id': security_group['security_group_id']} return db_utils.resource_fields(res, fields) @db_api.retry_if_session_inactive() def _create_port_security_group_binding(self, context, port_id, security_group_id): with db_api.CONTEXT_WRITER.using(context): db = sg_models.SecurityGroupPortBinding( port_id=port_id, security_group_id=security_group_id) context.session.add(db) def _get_port_security_group_bindings(self, context, filters=None, fields=None): return model_query.get_collection( context, sg_models.SecurityGroupPortBinding, self._make_security_group_binding_dict, filters=filters, fields=fields) @db_api.retry_if_session_inactive() def _delete_port_security_group_bindings(self, context, port_id): with db_api.CONTEXT_WRITER.using(context): query = model_query.query_with_hooks( context, sg_models.SecurityGroupPortBinding) bindings = query.filter( sg_models.SecurityGroupPortBinding.port_id == port_id) for binding in bindings: context.session.delete(binding) @db_api.retry_if_session_inactive() def create_security_group_rule_bulk(self, context, security_group_rules): return self._create_bulk('security_group_rule', context, security_group_rules) @db_api.retry_if_session_inactive() def create_security_group_rule_bulk_native(self, context, security_group_rules): rules = security_group_rules['security_group_rules'] scoped_session(context.session) security_group_id = self._validate_security_group_rules( context, security_group_rules) with db_api.CONTEXT_WRITER.using(context): self._check_for_duplicate_rules(context, security_group_id, rules) ret = [] for rule_dict in rules: res_rule_dict = self._create_security_group_rule( context, rule_dict, validate=False) ret.append(res_rule_dict) for rdict in ret: registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self, context=context, security_group_rule=rdict) return ret @db_api.retry_if_session_inactive() def create_security_group_rule(self, context, security_group_rule): res = self._create_security_group_rule(context, security_group_rule) registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, self, context=context, security_group_rule=res) return res def _create_security_group_rule(self, context, security_group_rule, validate=True): if validate: sg_id = self._validate_security_group_rule(context, security_group_rule) rule_dict = security_group_rule['security_group_rule'] remote_ip_prefix = rule_dict.get('remote_ip_prefix') if remote_ip_prefix: remote_ip_prefix = net.AuthenticIPNetwork(remote_ip_prefix) protocol = rule_dict.get('protocol') if protocol: # object expects strings only protocol = six.text_type(protocol) args = { 'id': (rule_dict.get('id') or uuidutils.generate_uuid()), 'project_id': rule_dict['tenant_id'], 'security_group_id': rule_dict['security_group_id'], 'direction': rule_dict['direction'], 'remote_group_id': rule_dict.get('remote_group_id'), 'ethertype': rule_dict['ethertype'], 'protocol': protocol, 'remote_ip_prefix': remote_ip_prefix, 'description': rule_dict.get('description'), } port_range_min = self._safe_int(rule_dict['port_range_min']) if port_range_min is not None: args['port_range_min'] = port_range_min port_range_max = self._safe_int(rule_dict['port_range_max']) if port_range_max is not None: args['port_range_max'] = port_range_max kwargs = { 'context': context, 'security_group_rule': args } self._registry_notify(resources.SECURITY_GROUP_RULE, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) with db_api.CONTEXT_WRITER.using(context): if validate: self._check_for_duplicate_rules(context, sg_id, [security_group_rule]) sg_rule = sg_obj.SecurityGroupRule(context, **args) sg_rule.create() # fetch sg_rule from db to load the sg rules with sg model # otherwise a DetachedInstanceError can occur for model extensions sg_rule = sg_obj.SecurityGroupRule.get_object(context, id=sg_rule.id) res_rule_dict = self._make_security_group_rule_dict(sg_rule.db_obj) kwargs['security_group_rule'] = res_rule_dict self._registry_notify( resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE, exc_cls=ext_sg.SecurityGroupConflict, **kwargs) return res_rule_dict def _get_ip_proto_number(self, protocol): if protocol is None: return # According to bug 1381379, protocol is always set to string to avoid # problems with comparing int and string in PostgreSQL. Here this # string is converted to int to give an opportunity to use it as # before. if protocol in constants.IP_PROTOCOL_NAME_ALIASES: protocol = constants.IP_PROTOCOL_NAME_ALIASES[protocol] return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol)) def _get_ip_proto_name_and_num(self, protocol, ethertype=None): if protocol is None: return protocol = str(protocol) # Force all legacy IPv6 ICMP protocol names to be 'ipv6-icmp', and # protocol number 1 to be 58 if ethertype == constants.IPv6: if protocol in const.IPV6_ICMP_LEGACY_PROTO_LIST: protocol = constants.PROTO_NAME_IPV6_ICMP elif protocol == str(constants.PROTO_NUM_ICMP): protocol = str(constants.PROTO_NUM_IPV6_ICMP) if protocol in constants.IP_PROTOCOL_MAP: return [protocol, str(constants.IP_PROTOCOL_MAP.get(protocol))] elif protocol in constants.IP_PROTOCOL_NUM_TO_NAME_MAP: return [constants.IP_PROTOCOL_NUM_TO_NAME_MAP.get(protocol), protocol] return [protocol, protocol] def _safe_int(self, port_range): if port_range is None: return try: return int(port_range) except (ValueError, TypeError): msg = "port range must be an integer" raise n_exc.InvalidInput(error_message=msg) def _validate_port_range(self, rule): """Check that port_range is valid.""" if rule['port_range_min'] is None and rule['port_range_max'] is None: return if not rule['protocol']: raise ext_sg.SecurityGroupProtocolRequiredWithPorts() ip_proto = self._get_ip_proto_number(rule['protocol']) # Not all firewall_driver support all these protocols, # but being strict here doesn't hurt. if (ip_proto in const.SG_PORT_PROTO_NUMS or ip_proto in const.SG_PORT_PROTO_NAMES): if rule['port_range_min'] == 0 or rule['port_range_max'] == 0: raise ext_sg.SecurityGroupInvalidPortValue(port=0) elif (rule['port_range_min'] is not None and rule['port_range_max'] is not None and rule['port_range_min'] <= rule['port_range_max']): # When min/max are the same it is just a single port pass else: raise ext_sg.SecurityGroupInvalidPortRange() elif ip_proto in [constants.PROTO_NUM_ICMP, constants.PROTO_NUM_IPV6_ICMP]: for attr, field in [('port_range_min', 'type'), ('port_range_max', 'code')]: if rule[attr] is not None and not (0 <= rule[attr] <= 255): raise ext_sg.SecurityGroupInvalidIcmpValue( field=field, attr=attr, value=rule[attr]) if (rule['port_range_min'] is None and rule['port_range_max'] is not None): raise ext_sg.SecurityGroupMissingIcmpType( value=rule['port_range_max']) else: # Only the protocols above support ports, raise otherwise. if (rule['port_range_min'] is not None or rule['port_range_max'] is not None): port_protocols = ( ', '.join(s.upper() for s in const.SG_PORT_PROTO_NAMES)) raise ext_sg.SecurityGroupInvalidProtocolForPort( protocol=ip_proto, valid_port_protocols=port_protocols) def _make_canonical_port_range(self, rule): if (rule['port_range_min'] == constants.PORT_RANGE_MIN and rule['port_range_max'] == constants.PORT_RANGE_MAX): LOG.info('Project %(project)s added a security group rule ' 'specifying the entire port range (%(min)s - ' '%(max)s). It was automatically converted to not ' 'have a range to better optimize it for the backend ' 'security group implementation(s).', {'project': rule['tenant_id'], 'min': rule['port_range_min'], 'max': rule['port_range_max']}) rule['port_range_min'] = rule['port_range_max'] = None def _validate_ethertype_and_protocol(self, rule): """Check if given ethertype and protocol are valid or not""" if rule['protocol'] in [constants.PROTO_NAME_IPV6_ENCAP, constants.PROTO_NAME_IPV6_FRAG, constants.PROTO_NAME_IPV6_ICMP, constants.PROTO_NAME_IPV6_ICMP_LEGACY, constants.PROTO_NAME_IPV6_NONXT, constants.PROTO_NAME_IPV6_OPTS, constants.PROTO_NAME_IPV6_ROUTE, str(constants.PROTO_NUM_IPV6_ENCAP), str(constants.PROTO_NUM_IPV6_FRAG), str(constants.PROTO_NUM_IPV6_ICMP), str(constants.PROTO_NUM_IPV6_NONXT), str(constants.PROTO_NUM_IPV6_OPTS), str(constants.PROTO_NUM_IPV6_ROUTE)]: if rule['ethertype'] == constants.IPv4: raise ext_sg.SecurityGroupEthertypeConflictWithProtocol( ethertype=rule['ethertype'], protocol=rule['protocol']) def _validate_single_tenant_and_group(self, security_group_rules): """Check that all rules belong to the same security group and tenant """ sg_groups = set() tenants = set() for rule_dict in security_group_rules['security_group_rules']: rule = rule_dict['security_group_rule'] sg_groups.add(rule['security_group_id']) if len(sg_groups) > 1: raise ext_sg.SecurityGroupNotSingleGroupRules() tenants.add(rule['tenant_id']) if len(tenants) > 1: raise ext_sg.SecurityGroupRulesNotSingleTenant() return sg_groups.pop() def _make_canonical_ipv6_icmp_protocol(self, rule): if rule.get('ethertype') == constants.IPv6: if rule.get('protocol') in const.IPV6_ICMP_LEGACY_PROTO_LIST: LOG.info('Project %(project)s added a security group rule ' 'with legacy IPv6 ICMP protocol name %(protocol)s, ' '%(new_protocol)s should be used instead. It was ' 'automatically converted.', {'project': rule['tenant_id'], 'protocol': rule['protocol'], 'new_protocol': constants.PROTO_NAME_IPV6_ICMP}) rule['protocol'] = constants.PROTO_NAME_IPV6_ICMP elif rule.get('protocol') == str(constants.PROTO_NUM_ICMP): LOG.info('Project %(project)s added a security group rule ' 'with legacy IPv6 ICMP protocol number %(protocol)s, ' '%(new_protocol)s should be used instead. It was ' 'automatically converted.', {'project': rule['tenant_id'], 'protocol': rule['protocol'], 'new_protocol': str(constants.PROTO_NUM_IPV6_ICMP)}) rule['protocol'] = str(constants.PROTO_NUM_IPV6_ICMP) def _validate_security_group_rule(self, context, security_group_rule): rule = security_group_rule['security_group_rule'] self._make_canonical_ipv6_icmp_protocol(rule) self._make_canonical_port_range(rule) self._validate_port_range(rule) self._validate_ip_prefix(rule) self._validate_ethertype_and_protocol(rule) if rule['remote_ip_prefix'] and rule['remote_group_id']: raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() remote_group_id = rule['remote_group_id'] # Check that remote_group_id exists for tenant if remote_group_id: self._check_security_group(context, remote_group_id, tenant_id=rule['tenant_id']) security_group_id = rule['security_group_id'] # Confirm that the tenant has permission # to add rules to this security group. self._check_security_group(context, security_group_id, tenant_id=rule['tenant_id']) return security_group_id @staticmethod def _validate_sgs_for_port(security_groups): if not security_groups: return if not len(set(sg.stateful for sg in security_groups)) == 1: msg = ("Cannot apply both stateful and stateless security " "groups on the same port at the same time") raise ext_sg.SecurityGroupConflict(reason=msg) def _validate_security_group_rules(self, context, security_group_rules): sg_id = self._validate_single_tenant_and_group(security_group_rules) for rule in security_group_rules['security_group_rules']: self._validate_security_group_rule(context, rule) return sg_id def _make_security_group_rule_dict(self, security_group_rule, fields=None): res = {'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_group_id': security_group_rule['remote_group_id']} resource_extend.apply_funcs(ext_sg.SECURITYGROUPRULES, res, security_group_rule) return db_utils.resource_fields(res, fields) def _rule_to_key(self, rule): def _normalize_rule_value(key, value): # This string is used as a placeholder for str(None), but shorter. none_char = '+' if key == 'remote_ip_prefix': all_address = ['0.0.0.0/0', '::/0', None] if value in all_address: return none_char elif value is None: return none_char elif key == 'protocol': return str(self._get_ip_proto_name_and_num( value, ethertype=rule.get('ethertype'))) return str(value) comparison_keys = [ 'direction', 'ethertype', 'port_range_max', 'port_range_min', 'protocol', 'remote_group_id', 'remote_ip_prefix', 'security_group_id' ] return '_'.join([_normalize_rule_value(x, rule.get(x)) for x in comparison_keys]) def _check_for_duplicate_rules(self, context, security_group_id, new_security_group_rules): # First up, check for any duplicates in the new rules. new_rules_set = set() for i in new_security_group_rules: rule_key = self._rule_to_key(i['security_group_rule']) if rule_key in new_rules_set: raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) new_rules_set.add(rule_key) # Now, let's make sure none of the new rules conflict with # existing rules; note that we do *not* store the db rules # in the set, as we assume they were already checked, # when added. sg = self.get_security_group(context, security_group_id) if sg: for i in sg['security_group_rules']: rule_key = self._rule_to_key(i) if rule_key in new_rules_set: raise ext_sg.SecurityGroupRuleExists(rule_id=i.get('id')) def _validate_ip_prefix(self, rule): """Check that a valid cidr was specified as remote_ip_prefix No need to check that it is in fact an IP address as this is already validated by attribute validators. Check that rule ethertype is consistent with remote_ip_prefix ip type. Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). """ input_prefix = rule['remote_ip_prefix'] if input_prefix: addr = netaddr.IPNetwork(input_prefix) # set input_prefix to always include the netmask: rule['remote_ip_prefix'] = str(addr) # check consistency of ethertype with addr version if rule['ethertype'] != "IPv%d" % (addr.version): raise ext_sg.SecurityGroupRuleParameterConflict( ethertype=rule['ethertype'], cidr=input_prefix) @db_api.retry_if_session_inactive() def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} pager = base_obj.Pager( sorts=sorts, marker=marker, limit=limit, page_reverse=page_reverse) project_id = filters.get('project_id') or filters.get('tenant_id') if project_id: project_id = project_id[0] else: project_id = context.project_id if project_id: self._ensure_default_security_group(context, project_id) if not filters and context.project_id and not context.is_admin: rule_ids = sg_obj.SecurityGroupRule.get_security_group_rule_ids( context.project_id) filters = {'id': rule_ids} # NOTE(slaweq): use admin context here to be able to get all rules # which fits filters' criteria. Later in policy engine rules will be # filtered and only those which are allowed according to policy will # be returned rule_objs = sg_obj.SecurityGroupRule.get_objects( context_lib.get_admin_context(), _pager=pager, validate_filters=False, **filters ) return [ self._make_security_group_rule_dict(obj.db_obj, fields) for obj in rule_objs ] @db_api.retry_if_session_inactive() def get_security_group_rule(self, context, id, fields=None): # NOTE(slaweq): use admin context here to be able to get all rules # which fits filters' criteria. Later in policy engine rules will be # filtered and only those which are allowed according to policy will # be returned security_group_rule = self._get_security_group_rule( context_lib.get_admin_context(), id) return self._make_security_group_rule_dict( security_group_rule.db_obj, fields) def _get_security_group_rule(self, context, id): sgr = sg_obj.SecurityGroupRule.get_object(context, id=id) if sgr is None: raise ext_sg.SecurityGroupRuleNotFound(id=id) return sgr @db_api.retry_if_session_inactive() def delete_security_group_rule(self, context, id): kwargs = { 'context': context, 'security_group_rule_id': id } self._registry_notify(resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, id=id, exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs) with db_api.CONTEXT_WRITER.using(context): sgr = self._get_security_group_rule(context, id) kwargs['security_group_id'] = sgr['security_group_id'] self._registry_notify(resources.SECURITY_GROUP_RULE, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupRuleInUse, id=id, **kwargs) sgr.delete() registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_DELETE, self, **kwargs) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_security_group(port_res, port_db): # Security group bindings will be retrieved from the SQLAlchemy # model. As they're loaded eagerly with ports because of the # joined load they will not cause an extra query. if isinstance(port_db, port_obj.Port): port_res[ext_sg.SECURITYGROUPS] = port_db.security_group_ids else: security_group_ids = [sec_group_mapping['security_group_id'] for sec_group_mapping in port_db.security_groups] port_res[ext_sg.SECURITYGROUPS] = security_group_ids return port_res def _process_port_create_security_group(self, context, port, security_groups): self._validate_sgs_for_port(security_groups) if validators.is_attr_set(security_groups): for sg in security_groups: self._create_port_security_group_binding(context, port['id'], sg.id) # Convert to list as a set might be passed here and # this has to be serialized port[ext_sg.SECURITYGROUPS] = ([sg.id for sg in security_groups] if security_groups else []) def _get_default_sg_id(self, context, tenant_id): default_group = sg_obj.DefaultSecurityGroup.get_object( context, project_id=tenant_id, ) if default_group: return default_group.security_group_id @registry.receives(resources.PORT, [events.BEFORE_CREATE, events.BEFORE_UPDATE]) @registry.receives(resources.NETWORK, [events.BEFORE_CREATE]) def _ensure_default_security_group_handler(self, resource, event, trigger, context, **kwargs): if event == events.BEFORE_UPDATE: tenant_id = kwargs['original_' + resource]['tenant_id'] else: tenant_id = kwargs[resource]['tenant_id'] if tenant_id: self._ensure_default_security_group(context, tenant_id) def _ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. :returns: the default security group id for given tenant. """ default_group_id = self._get_default_sg_id(context, tenant_id) if default_group_id: return default_group_id security_group = { 'security_group': {'name': 'default', 'tenant_id': tenant_id, 'description': _('Default security group')} } return self.create_security_group(context, security_group, default_sg=True)['id'] def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups on port belonging to tenant) """ port = port['port'] if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): return if port.get('device_owner') and net.is_port_trusted(port): return port_sg = port.get(ext_sg.SECURITYGROUPS, []) tenant_id = port.get('tenant_id') sg_objs = sg_obj.SecurityGroup.get_objects(context, id=port_sg) valid_groups = set( g.id for g in sg_objs if (not tenant_id or g.tenant_id == tenant_id or sg_obj.SecurityGroup.is_shared_with_tenant( context, g.id, tenant_id)) ) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing)) return sg_objs def _ensure_default_security_group_on_port(self, context, port): # we don't apply security groups for dhcp, router port = port['port'] if port.get('device_owner') and net.is_port_trusted(port): return port_sg = port.get(ext_sg.SECURITYGROUPS) if port_sg is None or not validators.is_attr_set(port_sg): port_project = port.get('tenant_id') default_sg = self._ensure_default_security_group(context, port_project) port[ext_sg.SECURITYGROUPS] = [default_sg] def _check_update_deletes_security_groups(self, port): """Return True if port has as a security group and it's value is either [] or not is_attr_set, otherwise return False """ if (ext_sg.SECURITYGROUPS in port['port'] and not (validators.is_attr_set( port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def _check_update_has_security_groups(self, port): """Return True if port has security_groups attribute set and its not empty, or False otherwise. This method is called both for port create and port update. """ if (ext_sg.SECURITYGROUPS in port['port'] and (validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def update_security_group_on_port(self, context, id, port, original_port, updated_port): """Update security groups on port. This method returns a flag which indicates request notification is required and does not perform notification itself. It is because another changes for the port may require notification. """ need_notify = False port_updates = port['port'] if (ext_sg.SECURITYGROUPS in port_updates and not helpers.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), port_updates[ext_sg.SECURITYGROUPS])): # delete the port binding and read it with the new rules sgs = self._get_security_groups_on_port(context, port) port_updates[ext_sg.SECURITYGROUPS] = [sg.id for sg in sgs] self._delete_port_security_group_bindings(context, id) self._process_port_create_security_group( context, updated_port, sgs) need_notify = True else: updated_port[ext_sg.SECURITYGROUPS] = ( original_port[ext_sg.SECURITYGROUPS]) return need_notify ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/securitygroups_rpc_base.py0000644000175000017500000004670200000000000024552 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.utils import helpers from neutron._i18n import _ from neutron.db.models import allowed_address_pair as aap_models from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import securitygroup as ext_sg from neutron.objects import securitygroup as sg_obj DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix', 'egress': 'dest_ip_prefix'} DHCP_RULE_PORT = {4: (67, 68, const.IPv4), 6: (547, 546, const.IPv6)} @registry.has_registry_receivers class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin): """Mixin class to add agent-based security group implementation.""" @registry.receives(resources.PORT, [events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE]) def notify_sg_on_port_change(self, resource, event, trigger, context, port, *args, **kwargs): """Trigger notification to other SG members on port changes.""" if event == events.AFTER_UPDATE: original_port = kwargs.get('original_port') self.check_and_notify_security_group_member_changed( context, original_port, port) else: self.notify_security_groups_member_updated(context, port) def create_security_group_rule(self, context, security_group_rule): rule = super(SecurityGroupServerNotifierRpcMixin, self).create_security_group_rule(context, security_group_rule) sgids = [rule['security_group_id']] self.notifier.security_groups_rule_updated(context, sgids) return rule def create_security_group_rule_bulk(self, context, security_group_rules): rules = super(SecurityGroupServerNotifierRpcMixin, self).create_security_group_rule_bulk_native( context, security_group_rules) sgids = set([r['security_group_id'] for r in rules]) self.notifier.security_groups_rule_updated(context, list(sgids)) return rules def delete_security_group_rule(self, context, sgrid): rule = self.get_security_group_rule(context, sgrid) super(SecurityGroupServerNotifierRpcMixin, self).delete_security_group_rule(context, sgrid) self.notifier.security_groups_rule_updated(context, [rule['security_group_id']]) def check_and_notify_security_group_member_changed( self, context, original_port, updated_port): sg_change = not helpers.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), updated_port.get(ext_sg.SECURITYGROUPS)) if sg_change: self.notify_security_groups_member_updated_bulk( context, [original_port, updated_port]) elif original_port['fixed_ips'] != updated_port['fixed_ips']: self.notify_security_groups_member_updated(context, updated_port) def is_security_group_member_updated(self, context, original_port, updated_port): """Check security group member updated or not. This method returns a flag which indicates request notification is required and does not perform notification itself. It is because another changes for the port may require notification. """ need_notify = False if (original_port['fixed_ips'] != updated_port['fixed_ips'] or original_port['mac_address'] != updated_port['mac_address'] or not helpers.compare_elements( original_port.get(ext_sg.SECURITYGROUPS), updated_port.get(ext_sg.SECURITYGROUPS))): need_notify = True return need_notify def notify_security_groups_member_updated_bulk(self, context, ports): """Notify update event of security group members for ports. The agent setups the iptables rule to allow ingress packet from the dhcp server (as a part of provider rules), so we need to notify an update of dhcp server ip address to the plugin agent. """ sec_groups = set() for port in ports: # NOTE (Swami): ROUTER_INTERFACE_OWNERS check is required # since it includes the legacy router interface device owners # and DVR router interface device owners. if (port['device_owner'] not in [const.DEVICE_OWNER_DHCP, const.ROUTER_INTERFACE_OWNERS]): sec_groups |= set(port.get(ext_sg.SECURITYGROUPS)) if sec_groups: self.notifier.security_groups_member_updated( context, list(sec_groups)) def notify_security_groups_member_updated(self, context, port): self.notify_security_groups_member_updated_bulk(context, [port]) class SecurityGroupInfoAPIMixin(object): """API for retrieving security group info for SG agent code.""" def get_port_from_device(self, context, device): """Get port dict from device name on an agent. Subclass must provide this method or get_ports_from_devices. :param device: device name which identifies a port on the agent side. What is specified in "device" depends on a plugin agent implementation. For example, it is a port ID in OVS agent and netdev name in Linux Bridge agent. :return: port dict returned by DB plugin get_port(). In addition, it must contain the following fields in the port dict returned. - device - security_groups - security_group_rules, - security_group_source_groups - fixed_ips """ raise NotImplementedError(_("%s must implement get_port_from_device " "or get_ports_from_devices.") % self.__class__.__name__) def get_ports_from_devices(self, context, devices): """Bulk method of get_port_from_device. Subclasses may override this to provide better performance for DB queries, backend calls, etc. """ return [self.get_port_from_device(context, device) for device in devices] def security_group_info_for_ports(self, context, ports): sg_info = {'devices': ports, 'security_groups': {}, 'sg_member_ips': {}} rules_in_db = self._select_rules_for_ports(context, ports) remote_security_group_info = {} for (port_id, rule_in_db) in rules_in_db: remote_gid = rule_in_db.get('remote_group_id') security_group_id = rule_in_db.get('security_group_id') ethertype = rule_in_db['ethertype'] if ('security_group_source_groups' not in sg_info['devices'][port_id]): sg_info['devices'][port_id][ 'security_group_source_groups'] = [] if remote_gid: if (remote_gid not in sg_info['devices'][port_id][ 'security_group_source_groups']): sg_info['devices'][port_id][ 'security_group_source_groups'].append(remote_gid) if remote_gid not in remote_security_group_info: remote_security_group_info[remote_gid] = {} if ethertype not in remote_security_group_info[remote_gid]: # this set will be serialized into a list by rpc code remote_security_group_info[remote_gid][ethertype] = set() direction = rule_in_db['direction'] stateful = self._is_security_group_stateful(context, security_group_id) rule_dict = { 'direction': direction, 'ethertype': ethertype, 'stateful': stateful} for key in ('protocol', 'port_range_min', 'port_range_max', 'remote_ip_prefix', 'remote_group_id'): if rule_in_db.get(key) is not None: if key == 'remote_ip_prefix': direction_ip_prefix = DIRECTION_IP_PREFIX[direction] rule_dict[direction_ip_prefix] = rule_in_db[key] continue rule_dict[key] = rule_in_db[key] if security_group_id not in sg_info['security_groups']: sg_info['security_groups'][security_group_id] = [] if rule_dict not in sg_info['security_groups'][security_group_id]: sg_info['security_groups'][security_group_id].append( rule_dict) # Update the security groups info if they don't have any rules sg_ids = self._select_sg_ids_for_ports(context, ports) for (sg_id, ) in sg_ids: if sg_id not in sg_info['security_groups']: sg_info['security_groups'][sg_id] = [] sg_info['sg_member_ips'] = remote_security_group_info # the provider rules do not belong to any security group, so these # rules still reside in sg_info['devices'] [port_id] self._apply_provider_rule(context, sg_info['devices']) return self._get_security_group_member_ips(context, sg_info) def _get_security_group_member_ips(self, context, sg_info): ips = self._select_ips_for_remote_group( context, sg_info['sg_member_ips'].keys()) for sg_id, member_ips in ips.items(): for ip in member_ips: ethertype = 'IPv%d' % netaddr.IPNetwork(ip).version if ethertype in sg_info['sg_member_ips'][sg_id]: sg_info['sg_member_ips'][sg_id][ethertype].add(ip) return sg_info def _select_remote_group_ids(self, ports): remote_group_ids = [] for port in ports.values(): for rule in port.get('security_group_rules'): remote_group_id = rule.get('remote_group_id') if remote_group_id: remote_group_ids.append(remote_group_id) return remote_group_ids def _convert_remote_group_id_to_ip_prefix(self, context, ports): remote_group_ids = self._select_remote_group_ids(ports) ips = self._select_ips_for_remote_group(context, remote_group_ids) for port in ports.values(): updated_rule = [] for rule in port.get('security_group_rules'): remote_group_id = rule.get('remote_group_id') direction = rule.get('direction') direction_ip_prefix = DIRECTION_IP_PREFIX[direction] if not remote_group_id: updated_rule.append(rule) continue port['security_group_source_groups'].append(remote_group_id) base_rule = rule for ip in ips[remote_group_id]: if ip in port.get('fixed_ips', []): continue ip_rule = base_rule.copy() version = netaddr.IPNetwork(ip).version ethertype = 'IPv%s' % version if base_rule['ethertype'] != ethertype: continue ip_rule[direction_ip_prefix] = str( netaddr.IPNetwork(ip).cidr) updated_rule.append(ip_rule) port['security_group_rules'] = updated_rule return ports def _add_ingress_dhcp_rule(self, port): for ip_version in (4, 6): # only allow DHCP servers to talk to the appropriate IP address # to avoid getting leases that don't match the Neutron IPs prefix = '32' if ip_version == 4 else '128' dests = ['%s/%s' % (ip, prefix) for ip in port['fixed_ips'] if netaddr.IPNetwork(ip).version == ip_version] if ip_version == 4: # v4 dhcp servers can also talk to broadcast dests.append('255.255.255.255/32') elif ip_version == 6: # v6 dhcp responses can target link-local addresses dests.append('fe80::/64') source_port, dest_port, ethertype = DHCP_RULE_PORT[ip_version] for dest in dests: dhcp_rule = {'direction': 'ingress', 'ethertype': ethertype, 'protocol': 'udp', 'port_range_min': dest_port, 'port_range_max': dest_port, 'source_port_range_min': source_port, 'source_port_range_max': source_port, 'dest_ip_prefix': dest} port['security_group_rules'].append(dhcp_rule) def _add_ingress_ra_rule(self, port): has_v6 = [ip for ip in port['fixed_ips'] if netaddr.IPNetwork(ip).version == 6] if not has_v6: return ra_rule = {'direction': 'ingress', 'ethertype': const.IPv6, 'protocol': const.PROTO_NAME_IPV6_ICMP, 'source_port_range_min': const.ICMPV6_TYPE_RA} port['security_group_rules'].append(ra_rule) def _apply_provider_rule(self, context, ports): for port in ports.values(): self._add_ingress_ra_rule(port) self._add_ingress_dhcp_rule(port) def security_group_rules_for_ports(self, context, ports): rules_in_db = self._select_rules_for_ports(context, ports) for (port_id, rule_in_db) in rules_in_db: port = ports[port_id] direction = rule_in_db['direction'] rule_dict = { 'security_group_id': rule_in_db['security_group_id'], 'direction': direction, 'ethertype': rule_in_db['ethertype'], } for key in ('protocol', 'port_range_min', 'port_range_max', 'remote_ip_prefix', 'remote_group_id'): if rule_in_db.get(key) is not None: if key == 'remote_ip_prefix': direction_ip_prefix = DIRECTION_IP_PREFIX[direction] rule_dict[direction_ip_prefix] = rule_in_db[key] continue rule_dict[key] = rule_in_db[key] port['security_group_rules'].append(rule_dict) self._apply_provider_rule(context, ports) return self._convert_remote_group_id_to_ip_prefix(context, ports) def _select_ips_for_remote_group(self, context, remote_group_ids): """Get all IP addresses (including allowed addr pairs) for each sg. Return dict of lists of IPs keyed by group_id. """ raise NotImplementedError() def _select_rules_for_ports(self, context, ports): """Get all security group rules associated with a list of ports. Return list of tuples of (port_id, sg_rule) """ raise NotImplementedError() def _select_sg_ids_for_ports(self, context, ports): """Return security group IDs for a list of ports. Return list of tuples with a single element of sg_id. """ raise NotImplementedError() def _is_security_group_stateful(self, context, sg_id): """Return whether the security group is stateful or not. Return True if the security group associated with the given ID is stateful, else False. """ return True class SecurityGroupServerRpcMixin(SecurityGroupInfoAPIMixin, SecurityGroupServerNotifierRpcMixin): """Server-side RPC mixin using DB for SG notifications and responses.""" @db_api.retry_if_session_inactive() def _select_sg_ids_for_ports(self, context, ports): if not ports: return [] sg_binding_port = sg_models.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id query = context.session.query(sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() @db_api.retry_if_session_inactive() def _select_rules_for_ports(self, context, ports): if not ports: return [] sg_binding_port = sg_models.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id sgr_sgid = sg_models.SecurityGroupRule.security_group_id query = context.session.query(sg_binding_port, sg_models.SecurityGroupRule) query = query.join(sg_models.SecurityGroupRule, sgr_sgid == sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() @db_api.retry_if_session_inactive() def _select_ips_for_remote_group(self, context, remote_group_ids): ips_by_group = {} if not remote_group_ids: return ips_by_group for remote_group_id in remote_group_ids: ips_by_group[remote_group_id] = set() ip_port = models_v2.IPAllocation.port_id sg_binding_port = sg_models.SecurityGroupPortBinding.port_id sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id # Join the security group binding table directly to the IP allocation # table instead of via the Port table skip an unnecessary intermediary query = context.session.query(sg_binding_sgid, models_v2.IPAllocation.ip_address, aap_models.AllowedAddressPair.ip_address) query = query.join(models_v2.IPAllocation, ip_port == sg_binding_port) # Outerjoin because address pairs may be null and we still want the # IP for the port. query = query.outerjoin( aap_models.AllowedAddressPair, sg_binding_port == aap_models.AllowedAddressPair.port_id) query = query.filter(sg_binding_sgid.in_(remote_group_ids)) # Each allowed address pair IP record for a port beyond the 1st # will have a duplicate regular IP in the query response since # the relationship is 1-to-many. Dedup with a set for security_group_id, ip_address, allowed_addr_ip in query: ips_by_group[security_group_id].add(ip_address) if allowed_addr_ip: ips_by_group[security_group_id].add(allowed_addr_ip) return ips_by_group @db_api.retry_if_session_inactive() def _is_security_group_stateful(self, context, sg_id): return sg_obj.SecurityGroup.get_sg_by_id(context, sg_id).stateful ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/segments_db.py0000644000175000017500000001712500000000000022074 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import segment as segment_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from neutron_lib.plugins.ml2 import api as ml2_api from oslo_log import log as logging from oslo_utils import uuidutils from neutron.objects import base as base_obj from neutron.objects import network as network_obj from neutron.services.segments import exceptions as segments_exceptions LOG = logging.getLogger(__name__) NETWORK_TYPE = segment_def.NETWORK_TYPE PHYSICAL_NETWORK = segment_def.PHYSICAL_NETWORK SEGMENTATION_ID = segment_def.SEGMENTATION_ID NETWORK_ID = 'network_id' def _make_segment_dict(obj): """Make a segment dictionary out of an object.""" return {'id': obj.id, NETWORK_TYPE: obj.network_type, PHYSICAL_NETWORK: obj.physical_network, SEGMENTATION_ID: obj.segmentation_id, NETWORK_ID: obj.network_id} def add_network_segment(context, network_id, segment, segment_index=0, is_dynamic=False): with db_api.CONTEXT_WRITER.using(context): netseg_obj = network_obj.NetworkSegment( context, id=uuidutils.generate_uuid(), network_id=network_id, network_type=segment.get(NETWORK_TYPE), physical_network=segment.get(PHYSICAL_NETWORK), segmentation_id=segment.get(SEGMENTATION_ID), segment_index=segment_index, is_dynamic=is_dynamic) netseg_obj.create() registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, trigger=add_network_segment, context=context, segment=netseg_obj) segment['id'] = netseg_obj.id LOG.info("Added segment %(id)s of type %(network_type)s for network " "%(network_id)s", {'id': netseg_obj.id, 'network_type': netseg_obj.network_type, 'network_id': netseg_obj.network_id}) def update_network_segment(context, segment_id, segmentation_id): with db_api.CONTEXT_WRITER.using(context): netseg_obj = network_obj.NetworkSegment.get_object(context, id=segment_id) if not netseg_obj: raise segments_exceptions.SegmentNotFound(segment_id=segment_id) netseg_obj[ml2_api.SEGMENTATION_ID] = segmentation_id netseg_obj.update() LOG.info("Updated segment %(id)s, segmentation_id: %(segmentation_id)s)", {'id': segment_id, 'segmentation_id': segmentation_id}) def get_network_segments(context, network_id, filter_dynamic=False): return get_networks_segments( context, [network_id], filter_dynamic)[network_id] def get_networks_segments(context, network_ids, filter_dynamic=False): if not network_ids: return {} with db_api.CONTEXT_READER.using(context): filters = { 'network_id': network_ids, } if filter_dynamic is not None: filters['is_dynamic'] = filter_dynamic objs = network_obj.NetworkSegment.get_objects(context, **filters) result = {net_id: [] for net_id in network_ids} for record in objs: result[record.network_id].append(_make_segment_dict(record)) return result def get_segment_by_id(context, segment_id): with db_api.CONTEXT_READER.using(context): net_obj = network_obj.NetworkSegment.get_object(context, id=segment_id) if net_obj: return _make_segment_dict(net_obj) def get_dynamic_segment(context, network_id, physical_network=None, segmentation_id=None): """Return a dynamic segment for the filters provided if one exists.""" with db_api.CONTEXT_READER.using(context): filters = { 'network_id': network_id, 'is_dynamic': True, } if physical_network: filters['physical_network'] = physical_network if segmentation_id: filters['segmentation_id'] = segmentation_id pager = base_obj.Pager(limit=1) objs = network_obj.NetworkSegment.get_objects( context, _pager=pager, **filters) if objs: return _make_segment_dict(objs[0]) else: LOG.debug("No dynamic segment found for " "Network:%(network_id)s, " "Physical network:%(physnet)s, " "segmentation_id:%(segmentation_id)s", {'network_id': network_id, 'physnet': physical_network, 'segmentation_id': segmentation_id}) def delete_network_segment(context, segment_id): """Release a dynamic segment for the params provided if one exists.""" with db_api.CONTEXT_WRITER.using(context): network_obj.NetworkSegment.delete_objects(context, id=segment_id) def network_segments_exist_in_range(context, network_type, physical_network, segment_range=None): """Check whether one or more network segments exist in a range.""" with db_api.CONTEXT_READER.using(context): filters = { 'network_type': network_type, 'physical_network': physical_network, } segment_objs = network_obj.NetworkSegment.get_objects( context, **filters) if segment_range: minimum_id = segment_range['minimum'] maximum_id = segment_range['maximum'] segment_objs = [ segment for segment in segment_objs if minimum_id <= segment.segmentation_id <= maximum_id] return len(segment_objs) > 0 def min_max_actual_segments_in_range(context, network_type, physical_network, segment_range=None): """Return the minimum and maximum segmentation IDs used in a network segment range """ with db_api.CONTEXT_READER.using(context): filters = { 'network_type': network_type, 'physical_network': physical_network, } pager = base_obj.Pager() # (NOTE) True means ASC, False is DESC pager.sorts = [('segmentation_id', True)] segment_objs = network_obj.NetworkSegment.get_objects( context, _pager=pager, **filters) if segment_range: minimum_id = segment_range['minimum'] maximum_id = segment_range['maximum'] segment_objs = [ segment for segment in segment_objs if minimum_id <= segment.segmentation_id <= maximum_id] if segment_objs: return (segment_objs[0].segmentation_id, segment_objs[-1].segmentation_id) else: LOG.debug("No existing segment found for " "Network type:%(network_type)s, " "Physical network:%(physical_network)s", {'network_type': network_type, 'physical_network': physical_network}) return None, None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/servicetype_db.py0000644000175000017500000001057700000000000022615 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from itertools import chain from oslo_log import log as logging from neutron.objects import servicetype as servicetype_obj from neutron.services import provider_configuration as pconf LOG = logging.getLogger(__name__) class ServiceTypeManager(object): """Manage service type objects in Neutron.""" _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): self.config = {} def add_provider_configuration(self, service_type, configuration): """Add or update the provider configuration for the service type.""" LOG.debug('Adding provider configuration for service %s', service_type) self.config.update({service_type: configuration}) def get_service_providers(self, context, filters=None, fields=None): if filters and 'service_type' in filters: return list(chain.from_iterable( self.config[svc_type].get_service_providers(filters, fields) for svc_type in filters['service_type'] if svc_type in self.config) ) return list( chain.from_iterable( self.config[p].get_service_providers(filters, fields) for p in self.config) ) def get_default_service_provider(self, context, service_type): """Return the default provider for a given service type.""" filters = {'service_type': [service_type], 'default': [True]} providers = self.get_service_providers(context, filters=filters) # By construction we expect at most a single item in provider if not providers: raise pconf.DefaultServiceProviderNotFound( service_type=service_type ) return providers[0] def get_provider_names_by_resource_ids(self, context, resource_ids): objs = servicetype_obj.ProviderResourceAssociation.get_objects( context, resource_id=resource_ids) return {rec.resource_id: rec.provider_name for rec in objs} def add_resource_association(self, context, service_type, provider_name, resource_id, expire_session=True): r = self.get_service_providers(context, filters={'service_type': [service_type], 'name': [provider_name]}) if not r: raise pconf.ServiceProviderNotFound(provider=provider_name, service_type=service_type) # we don't actually need service type for association. # resource_id is unique and belongs to specific service # which knows its type servicetype_obj.ProviderResourceAssociation( context, provider_name=provider_name, resource_id=resource_id).create() # NOTE(blogan): the ProviderResourceAssociation relationship will not # be populated if a resource was created before this. The expire_all # will force the session to go retrieve the new data when that # resource will be read again. It has been suggested that we can # crawl through everything in the mapper to find the resource with # the ID that matches resource_id and expire that one, but we can # just start with this. # NOTE(ralonsoh): to be removed once the new engine facade is fully # implanted in Neutron. if expire_session: context.session.expire_all() def del_resource_associations(self, context, resource_ids): if not resource_ids: return servicetype_obj.ProviderResourceAssociation.delete_objects( context, resource_id=resource_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/standard_attr.py0000644000175000017500000000165700000000000022437 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.db import standard_attr # TODO(boden): remove shims when all consumers use lib for this module StandardAttribute = standard_attr.StandardAttribute HasStandardAttributes = standard_attr.HasStandardAttributes get_standard_attr_resource_model_map = ( standard_attr.get_standard_attr_resource_model_map) get_tag_resource_parent_map = standard_attr.get_tag_resource_parent_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/standardattrdescription_db.py0000644000175000017500000000214500000000000025202 0ustar00coreycorey00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import resource_extend from neutron.db import standard_attr @resource_extend.has_resource_extenders class StandardAttrDescriptionMixin(object): supported_extension_aliases = ['standard-attr-description'] @staticmethod @resource_extend.extends( list(standard_attr.get_standard_attr_resource_model_map())) def _extend_standard_attr_description(res, db_object): if not hasattr(db_object, 'description'): return res['description'] = db_object.description ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/subnet_service_type_mixin.py0000644000175000017500000000233700000000000025066 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.db import resource_extend @resource_extend.has_resource_extenders class SubnetServiceTypeMixin(object): """Mixin class to extend subnet with service type attribute""" @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _extend_subnet_service_types(subnet_res, subnet_db): subnet_res['service_types'] = [service_type['service_type'] for service_type in subnet_db.service_types] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/uplink_status_propagation_db.py0000644000175000017500000000256100000000000025555 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import uplink_status_propagation as usp from neutron.objects.port.extensions import uplink_status_propagation as \ usp_obj class UplinkStatusPropagationMixin(object): """Mixin class to add uplink propagation to a port""" def _process_create_port(self, context, data, res): obj = usp_obj.PortUplinkStatusPropagation( context, port_id=res['id'], propagate_uplink_status=data[usp.PROPAGATE_UPLINK_STATUS]) obj.create() res[usp.PROPAGATE_UPLINK_STATUS] = data[usp.PROPAGATE_UPLINK_STATUS] @staticmethod def _extend_port_dict(port_res, port_db): usp_db = port_db.get(usp.PROPAGATE_UPLINK_STATUS) port_res[usp.PROPAGATE_UPLINK_STATUS] = ( usp_db.propagate_uplink_status if usp_db else False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/db/vlantransparent_db.py0000644000175000017500000000232500000000000023465 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.db import resource_extend @resource_extend.has_resource_extenders class Vlantransparent_db_mixin(object): """Mixin class to add vlan transparent methods to db_base_plugin_v2.""" @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_network_dict_vlan_transparent(network_res, network_db): network_res[vlan_apidef.VLANTRANSPARENT] = ( network_db.vlan_transparent) return network_res ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.2950444 neutron-16.0.0.0b2.dev214/neutron/debug/0000755000175000017500000000000000000000000017723 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/debug/README0000644000175000017500000000271200000000000020605 0ustar00coreycorey00000000000000Debug Helper Script for Neutron - Configure export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/debug.ini or export NEUTRON_TEST_CONFIG_FILE=/etc/neutron/l3_agent.ini you can also specify config file by --config-file option - Usage neutron-debug commands probe-create Create probe port - create port and interface, then plug it in. This commands returns a port id of a probe port. A probe port is a port which is used to test. The port id is probe id. We can have multiple probe probes in a network, in order to check connectivity between ports. neutron-debug probe-exec probe_id_1 'nc -l 192.168.100.3 22' neutron-debug probe-exec probe_id_2 'nc -vz 192.168.100.4 22' Note: You should use a user and a tenant who has permission to modify network and subnet if you want to probe. For example, you need to be admin user if you want to probe external network. probe-delete Delete probe - delete port then uplug probe-exec 'command' Exec commands on the namespace of the probe `probe-exec ` 'interactive command' Exec interactive command (eg, ssh) probe-list List probes probe-clear Clear All probes ping-all --id --timeout 1 (optional) ping-all is all-in-one command to ping all fixed ip's in all network or a specified network. In the command probe is automatically created if needed. neutron-debug extends the shell of neutronclient, so you can use all the commands of neutron ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/debug/__init__.py0000644000175000017500000000000000000000000022022 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/debug/commands.py0000644000175000017500000001046500000000000022104 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cliff import lister from neutronclient.common import utils from neutronclient.neutron import v2_0 as client from neutronclient.neutron.v2_0 import port from neutron._i18n import _ class ProbeCommand(client.NeutronCommand): def get_debug_agent(self): return self.app.debug_agent class CreateProbe(ProbeCommand): """Create probe port and interface, then plug it in.""" def get_parser(self, prog_name): parser = super(CreateProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='network_id', help=_('ID of network to probe')) parser.add_argument( '--device-owner', default='network', choices=['network', 'compute'], help=_('Owner type of the device: network/compute')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() probe_port = debug_agent.create_probe(parsed_args.id, parsed_args.device_owner) self.log.info(_('Probe created : %s '), probe_port.id) class DeleteProbe(ProbeCommand): """Delete probe - delete port then uplug.""" def get_parser(self, prog_name): parser = super(DeleteProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to delete')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() debug_agent.delete_probe(parsed_args.id) self.log.info(_('Probe %s deleted'), parsed_args.id) class ListProbe(ProbeCommand, lister.Lister): """List probes.""" _formatters = {'fixed_ips': port._format_fixed_ips, } def take_action(self, parsed_args): debug_agent = self.get_debug_agent() info = debug_agent.list_probes() columns = sorted(info[0].keys()) if info else [] return (columns, (utils.get_item_properties( s, columns, formatters=self._formatters, ) for s in info), ) class ClearProbe(ProbeCommand): """Clear All probes.""" def take_action(self, parsed_args): debug_agent = self.get_debug_agent() cleared_probes_count = debug_agent.clear_probes() self.log.info('%d probe(s) deleted', cleared_probes_count) class ExecProbe(ProbeCommand): """Exec commands on the namespace of the probe.""" def get_parser(self, prog_name): parser = super(ExecProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to execute command')) parser.add_argument( 'command', metavar='command', nargs='?', default=None, help=_('Command to execute')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() result = debug_agent.exec_command(parsed_args.id, parsed_args.command) self.app.stdout.write(result + '\n') class PingAll(ProbeCommand): """Ping all fixed_ip.""" def get_parser(self, prog_name): parser = super(PingAll, self).get_parser(prog_name) parser.add_argument( '--timeout', metavar='', default=10, help=_('Ping timeout')) parser.add_argument( '--id', metavar='network_id', default=None, help=_('ID of network')) return parser def take_action(self, parsed_args): debug_agent = self.get_debug_agent() result = debug_agent.ping_all(parsed_args.id, timeout=parsed_args.timeout) self.app.stdout.write(result + '\n') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/debug/debug_agent.py0000644000175000017500000001545400000000000022552 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import shlex import socket import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_log import log as logging from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) DEVICE_OWNER_NETWORK_PROBE = constants.DEVICE_OWNER_NETWORK_PREFIX + 'probe' DEVICE_OWNER_COMPUTE_PROBE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'probe' class NeutronDebugAgent(object): def __init__(self, conf, client, driver): self.conf = conf self.client = client self.driver = driver def _get_namespace(self, port): return "qprobe-%s" % port.id def create_probe(self, network_id, device_owner='network'): network = self._get_network(network_id) port = self._create_port(network, device_owner) interface_name = self.driver.get_device_name(port) namespace = self._get_namespace(port) if ip_lib.device_exists(interface_name, namespace=namespace): LOG.debug('Reusing existing device: %s.', interface_name) else: self.driver.plug(network.id, port.id, interface_name, port.mac_address, namespace=namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace) return port def _get_subnet(self, subnet_id): subnet_dict = self.client.show_subnet(subnet_id)['subnet'] return dhcp.DictModel(subnet_dict) def _get_network(self, network_id): network_dict = self.client.show_network(network_id)['network'] network = dhcp.DictModel(network_dict) # pylint: disable=assigning-non-slot network.external = network_dict.get('router:external') obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets] network.subnets = obj_subnet return network def clear_probes(self): """Returns number of deleted probes""" ports = self.client.list_ports( device_id=socket.gethostname(), device_owner=[DEVICE_OWNER_NETWORK_PROBE, DEVICE_OWNER_COMPUTE_PROBE]) info = ports['ports'] for port in info: self.delete_probe(port['id']) return len(info) def delete_probe(self, port_id): port = dhcp.DictModel(self.client.show_port(port_id)['port']) namespace = self._get_namespace(port) if ip_lib.network_namespace_exists(namespace): self.driver.unplug(self.driver.get_device_name(port), namespace=namespace) try: ip_lib.delete_network_namespace(namespace) except Exception: LOG.warning('Failed to delete namespace %s', namespace) else: self.driver.unplug(self.driver.get_device_name(port)) self.client.delete_port(port.id) def list_probes(self): ports = self.client.list_ports( device_owner=[DEVICE_OWNER_NETWORK_PROBE, DEVICE_OWNER_COMPUTE_PROBE]) info = ports['ports'] for port in info: port['device_name'] = self.driver.get_device_name( dhcp.DictModel(port)) return info def exec_command(self, port_id, command=None): port = dhcp.DictModel(self.client.show_port(port_id)['port']) ip = ip_lib.IPWrapper() namespace = self._get_namespace(port) if not command: return "sudo ip netns exec %s" % self._get_namespace(port) namespace = ip.ensure_namespace(namespace) return namespace.netns.execute(shlex.split(command)) def ensure_probe(self, network_id): ports = self.client.list_ports(network_id=network_id, device_id=socket.gethostname(), device_owner=DEVICE_OWNER_NETWORK_PROBE) info = ports.get('ports', []) if info: return dhcp.DictModel(info[0]) else: return self.create_probe(network_id) def ping_all(self, network_id=None, timeout=1): if network_id: ports = self.client.list_ports(network_id=network_id)['ports'] else: ports = self.client.list_ports()['ports'] result = "" for port in ports: probe = self.ensure_probe(port['network_id']) if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE: continue for fixed_ip in port['fixed_ips']: address = fixed_ip['ip_address'] subnet = self._get_subnet(fixed_ip['subnet_id']) if subnet.ip_version == 4: ping_command = 'ping' else: ping_command = 'ping6' result += self.exec_command(probe.id, '%s -c 1 -w %s %s' % (ping_command, timeout, address)) return result def _create_port(self, network, device_owner): host = self.conf.host body = {'port': {'admin_state_up': True, 'network_id': network.id, 'device_id': '%s' % socket.gethostname(), 'device_owner': '%s:probe' % device_owner, 'tenant_id': network.tenant_id, portbindings.HOST_ID: host, 'fixed_ips': [dict(subnet_id=s.id) for s in network.subnets]}} port_dict = self.client.create_port(body)['port'] port = dhcp.DictModel(port_dict) # pylint: disable=assigning-non-slot port.network = network for fixed_ip in port.fixed_ips: fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id) return port ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/debug/shell.py0000644000175000017500000000677400000000000021422 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_utils import importutils from neutron._i18n import _ from neutron.agent.common import utils from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.debug import debug_agent from neutronclient.common import exceptions as exc from neutronclient import shell COMMAND_V2 = { 'probe-create': importutils.import_class( 'neutron.debug.commands.CreateProbe'), 'probe-delete': importutils.import_class( 'neutron.debug.commands.DeleteProbe'), 'probe-list': importutils.import_class( 'neutron.debug.commands.ListProbe'), 'probe-clear': importutils.import_class( 'neutron.debug.commands.ClearProbe'), 'probe-exec': importutils.import_class( 'neutron.debug.commands.ExecProbe'), 'ping-all': importutils.import_class( 'neutron.debug.commands.PingAll'), # TODO(nati) ping, netcat , nmap, bench } COMMANDS = {'2.0': COMMAND_V2} class NeutronDebugShell(shell.NeutronShell): def __init__(self, api_version): super(NeutronDebugShell, self).__init__(api_version) for k, v in COMMANDS[api_version].items(): self.command_manager.add_command(k, v) def build_option_parser(self, description, version): parser = super(NeutronDebugShell, self).build_option_parser( description, version) default = ( shell.env('NEUTRON_TEST_CONFIG_FILE') or shell.env('QUANTUM_TEST_CONFIG_FILE') ) parser.add_argument( '--config-file', default=default, help=_('Config file for interface driver ' '(You may also use l3_agent.ini)')) return parser def initialize_app(self, argv): super(NeutronDebugShell, self).initialize_app(argv) if not self.options.config_file: raise exc.CommandError( _("You must provide a config file for bridge -" " either --config-file or env[NEUTRON_TEST_CONFIG_FILE]")) client = self.client_manager.neutron config.register_interface_opts() config.register_interface_driver_opts_helper(cfg.CONF) ovs_conf.register_ovs_opts(cfg.CONF) cfg.CONF(['--config-file', self.options.config_file]) config.setup_logging() driver = utils.load_interface_driver(cfg.CONF) self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, client, driver) self.log.warning('This tool is deprecated and will be removed ' 'in the future to be replaced with a more ' 'powerful troubleshooting toolkit.') def main(argv=None): return NeutronDebugShell(shell.NEUTRON_API_VERSION).run( argv or sys.argv[1:]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3110447 neutron-16.0.0.0b2.dev214/neutron/extensions/0000755000175000017500000000000000000000000021034 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/__init__.py0000644000175000017500000000000000000000000023133 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/_admin_state_down_before_update_lib.py0000644000175000017500000000244300000000000030601 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ TODO(mattw4): This module should be deleted once neutron-lib containing https://review.openstack.org/#/c/634509/ change is released. """ from neutron_lib.api.definitions import l3 as l3_apidef ALIAS = 'router-admin-state-down-before-update' IS_SHIM_EXTENSION = True IS_STANDARD_ATTR_EXTENSION = False NAME = "Enforce Router's Admin State Down Before Update Extension" DESCRIPTION = ('Ensure that the admin state of a router is down ' '(admin_state_up=False) before updating the distributed ' 'attribute') UPDATED_TIMESTAMP = '2019-04-08 13:30:00' RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCE_ATTRIBUTE_MAP = {} ACTION_MAP = {} REQUIRED_EXTENSIONS = [l3_apidef.ALIAS] OPTIONAL_EXTENSIONS = [] ACTION_STATUS = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/_availability_zone_filter_lib.py0000644000175000017500000000225100000000000027445 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ TODO(hongbin): This module should be deleted once neutron-lib containing https://review.opendev.org/#/c/577545/ change is released. """ from neutron_lib.api.definitions import availability_zone as az ALIAS = 'availability_zone_filter' IS_SHIM_EXTENSION = True IS_STANDARD_ATTR_EXTENSION = False NAME = 'Availability Zone Filter Extension' DESCRIPTION = 'Add filter parameters to AvailabilityZone resource' UPDATED_TIMESTAMP = '2018-06-22T10:00:00-00:00' RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCE_ATTRIBUTE_MAP = {} ACTION_MAP = {} REQUIRED_EXTENSIONS = [ az.ALIAS ] OPTIONAL_EXTENSIONS = [] ACTION_STATUS = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/_filter_validation_lib.py0000644000175000017500000000206500000000000026075 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This module should be deleted once neutron-lib containing https://review.opendev.org/#/c/580190/ change is released. """ ALIAS = 'filter-validation' IS_SHIM_EXTENSION = True IS_STANDARD_ATTR_EXTENSION = False NAME = 'Filter parameters validation' DESCRIPTION = 'Provides validation on filter parameters.' UPDATED_TIMESTAMP = '2018-03-21T10:00:00-00:00' RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCE_ATTRIBUTE_MAP = {} ACTION_MAP = {} REQUIRED_EXTENSIONS = [] OPTIONAL_EXTENSIONS = [] ACTION_STATUS = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/_standard_attr_segment_lib.py0000644000175000017500000000212100000000000026743 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ TODO(hongbin): This module should be deleted once neutron-lib containing https://review.opendev.org/#/c/562331/ change is released. """ ALIAS = 'standard-attr-segment' IS_SHIM_EXTENSION = True IS_STANDARD_ATTR_EXTENSION = True NAME = 'Standard Attribute Segment Extension' DESCRIPTION = 'Add standard attributes to Segment resource' UPDATED_TIMESTAMP = '2018-04-09T10:00:00-00:00' RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCE_ATTRIBUTE_MAP = {} ACTION_MAP = {} REQUIRED_EXTENSIONS = [] OPTIONAL_EXTENSIONS = [] ACTION_STATUS = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/address_scope.py0000644000175000017500000000477700000000000024243 0ustar00coreycorey00000000000000# Copyright (c) 2015 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import address_scope as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory import six from neutron.api import extensions from neutron.api.v2 import base class Address_scope(api_extensions.APIExtensionDescriptor): """Extension class supporting Address Scopes.""" api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() collection_name = apidef.COLLECTION_NAME.replace('_', '-') params = apidef.RESOURCE_ATTRIBUTE_MAP.get( apidef.COLLECTION_NAME, dict()) controller = base.create_resource(collection_name, apidef.RESOURCE_NAME, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, attr_map=params) return [ex] @six.add_metaclass(abc.ABCMeta) class AddressScopePluginBase(object): @abc.abstractmethod def create_address_scope(self, context, address_scope): pass @abc.abstractmethod def update_address_scope(self, context, id, address_scope): pass @abc.abstractmethod def get_address_scope(self, context, id, fields=None): pass @abc.abstractmethod def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def delete_address_scope(self, context, id): pass def get_address_scopes_count(self, context, filters=None): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/admin_state_down_before_update.py0000644000175000017500000000143000000000000027607 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import _admin_state_down_before_update_lib as apidef from neutron_lib.api import extensions class Admin_state_down_before_update(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/agent.py0000644000175000017500000000532400000000000022510 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron_lib.api.definitions import agent as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib import exceptions from neutron_lib.plugins import directory import six from neutron.api import extensions from neutron.api.v2 import base class Agent(api_extensions.APIExtensionDescriptor): """Agent management extension.""" api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() params = apidef.RESOURCE_ATTRIBUTE_MAP.get(apidef.COLLECTION_NAME) controller = base.create_resource(apidef.COLLECTION_NAME, apidef.RESOURCE_NAME, plugin, params) ex = extensions.ResourceExtension(apidef.COLLECTION_NAME, controller) return [ex] @six.add_metaclass(abc.ABCMeta) class AgentPluginBase(object): """REST API to operate the Agent. All of method must be in an admin context. """ def create_agent(self, context, agent): """Create agent. This operation is not allow in REST API. @raise exceptions.BadRequest: """ raise exceptions.BadRequest() @abc.abstractmethod def delete_agent(self, context, id): """Delete agent. Agents register themselves on reporting state. But if an agent does not report its status for a long time (for example, it is dead forever. ), admin can remove it. Agents must be disabled before being removed. """ pass @abc.abstractmethod def update_agent(self, context, agent): """Disable or Enable the agent. Description also can be updated. Some agents cannot be disabled, such as plugins, services. An error code should be reported in this case. @raise exceptions.BadRequest: """ pass @abc.abstractmethod def get_agents(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_agent(self, context, id, fields=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/agent_resources_synced.py0000644000175000017500000000142100000000000026141 0ustar00coreycorey00000000000000# Copyright (c) 2019 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import agent_resources_synced as apidef from neutron_lib.api import extensions class Agent_resources_synced(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/allowedaddresspairs.py0000644000175000017500000000177100000000000025450 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api import extensions from neutron.conf.extensions import allowedaddresspairs as addr_pair addr_pair.register_allowed_address_pair_opts() class Allowedaddresspairs(extensions.APIExtensionDescriptor): """Extension class supporting allowed address pairs.""" api_definition = addr_apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/auto_allocated_topology.py0000644000175000017500000000300500000000000026320 0ustar00coreycorey00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import auto_allocated_topology from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base class Auto_allocated_topology(api_extensions.APIExtensionDescriptor): api_definition = auto_allocated_topology @classmethod def get_resources(cls): params = auto_allocated_topology.RESOURCE_ATTRIBUTE_MAP.get( auto_allocated_topology.COLLECTION_NAME, dict()) controller = base.create_resource( auto_allocated_topology.COLLECTION_NAME, auto_allocated_topology.ALIAS, directory.get_plugin(auto_allocated_topology.ALIAS), params, allow_bulk=False) return [extensions.ResourceExtension( auto_allocated_topology.ALIAS, controller)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/availability_zone.py0000644000175000017500000000372200000000000025117 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory import six from neutron.api import extensions from neutron.api.v2 import base class Availability_zone(api_extensions.APIExtensionDescriptor): """Availability zone extension.""" api_definition = az_def @classmethod def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() params = az_def.RESOURCE_ATTRIBUTE_MAP.get(az_def.COLLECTION_NAME) controller = base.create_resource(az_def.COLLECTION_NAME, az_def.RESOURCE_NAME, plugin, params) ex = extensions.ResourceExtension(az_def.COLLECTION_NAME, controller) return [ex] @six.add_metaclass(abc.ABCMeta) class AvailabilityZonePluginBase(object): """REST API to operate the Availability Zone.""" @abc.abstractmethod def get_availability_zones(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return availability zones which a resource belongs to""" @abc.abstractmethod def validate_availability_zones(self, context, resource_type, availability_zones): """Verify that the availability zones exist.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/availability_zone_filter.py0000644000175000017500000000141400000000000026460 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import _availability_zone_filter_lib as apidef from neutron_lib.api import extensions class Availability_zone_filter(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/data_plane_status.py0000644000175000017500000000150100000000000025076 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import data_plane_status from neutron_lib.api import extensions class Data_plane_status(extensions.APIExtensionDescriptor): api_definition = data_plane_status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/default_subnetpools.py0000644000175000017500000000150400000000000025467 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import default_subnetpools as api_def from neutron_lib.api import extensions class Default_subnetpools(extensions.APIExtensionDescriptor): """Extension class supporting default subnetpools.""" api_definition = api_def ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/dhcpagentscheduler.py0000644000175000017500000001112400000000000025241 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import dhcpagentscheduler as apidef from neutron_lib.api.definitions import network as net_apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.api import faults from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc import six from neutron.api import extensions from neutron.api.v2 import resource from neutron import policy from neutron import wsgi class NetworkSchedulerController(wsgi.Controller): def index(self, request, **kwargs): plugin = directory.get_plugin() policy.enforce(request.context, "get_%s" % apidef.DHCP_NETS, {}) return plugin.list_networks_on_dhcp_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = directory.get_plugin() policy.enforce(request.context, "create_%s" % apidef.DHCP_NET, {}) agent_id = kwargs['agent_id'] network_id = body['network_id'] result = plugin.add_network_to_dhcp_agent(request.context, agent_id, network_id) notify(request.context, 'dhcp_agent.network.add', network_id, agent_id) return result def delete(self, request, id, **kwargs): plugin = directory.get_plugin() policy.enforce(request.context, "delete_%s" % apidef.DHCP_NET, {}) agent_id = kwargs['agent_id'] result = plugin.remove_network_from_dhcp_agent(request.context, agent_id, id) notify(request.context, 'dhcp_agent.network.remove', id, agent_id) return result class DhcpAgentsHostingNetworkController(wsgi.Controller): def index(self, request, **kwargs): plugin = directory.get_plugin() policy.enforce(request.context, "get_%s" % apidef.DHCP_AGENTS, {}) return plugin.list_dhcp_agents_hosting_network( request.context, kwargs['network_id']) class Dhcpagentscheduler(api_extensions.APIExtensionDescriptor): """Extension class supporting dhcp agent scheduler. """ api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name=agent_apidef.RESOURCE_NAME, collection_name=agent_apidef.COLLECTION_NAME) controller = resource.Resource(NetworkSchedulerController(), faults.FAULT_MAP) exts.append(extensions.ResourceExtension( apidef.DHCP_NETS, controller, parent)) parent = dict(member_name=net_apidef.RESOURCE_NAME, collection_name=net_apidef.COLLECTION_NAME) controller = resource.Resource(DhcpAgentsHostingNetworkController(), faults.FAULT_MAP) exts.append(extensions.ResourceExtension( apidef.DHCP_AGENTS, controller, parent)) return exts @six.add_metaclass(abc.ABCMeta) class DhcpAgentSchedulerPluginBase(object): """REST API to operate the DHCP agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def add_network_to_dhcp_agent(self, context, id, network_id): pass @abc.abstractmethod def remove_network_from_dhcp_agent(self, context, id, network_id): pass @abc.abstractmethod def list_networks_on_dhcp_agent(self, context, id): pass @abc.abstractmethod def list_dhcp_agents_hosting_network(self, context, network_id): pass def notify(context, action, network_id, agent_id): info = {'id': agent_id, 'network_id': network_id} notifier = n_rpc.get_notifier('network') notifier.info(context, action, {'agent': info}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/dns.py0000644000175000017500000000153400000000000022175 0ustar00coreycorey00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api import extensions class Dns(extensions.APIExtensionDescriptor): """Extension class supporting DNS Integration.""" api_definition = dns_apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/dns_domain_ports.py0000644000175000017500000000157000000000000024753 0ustar00coreycorey00000000000000# Copyright (c) 2017 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns_domain_ports as apidef from neutron_lib.api import extensions class Dns_domain_ports(extensions.APIExtensionDescriptor): """Extension class supporting dns_domain attribute for ports.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/dvr.py0000644000175000017500000000220200000000000022175 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import dvr as apidef from neutron_lib.api import extensions import six class Dvr(extensions.APIExtensionDescriptor): """Extension class supporting distributed virtual router.""" api_definition = apidef @six.add_metaclass(abc.ABCMeta) class DVRMacAddressPluginBase(object): @abc.abstractmethod def get_dvr_mac_address_list(self, context): pass @abc.abstractmethod def get_dvr_mac_address_by_host(self, context, host): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/empty_string_filtering.py0000644000175000017500000000141400000000000026175 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import empty_string_filtering as apidef from neutron_lib.api import extensions class Empty_string_filtering(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/expose_l3_conntrack_helper.py0000644000175000017500000000152000000000000026706 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import expose_l3_conntrack_helper as apidef from neutron_lib.api import extensions class Expose_l3_conntrack_helper(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/expose_port_forwarding_in_fip.py0000644000175000017500000000140100000000000027517 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import expose_port_forwarding_in_fip as apiref from neutron_lib.api import extensions class Expose_port_forwarding_in_fip(extensions.APIExtensionDescriptor): api_definition = apiref ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/external_net.py0000644000175000017500000000151200000000000024075 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api import extensions class External_net(extensions.APIExtensionDescriptor): api_definition = extnet_apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/extra_dhcp_opt.py0000644000175000017500000000141100000000000024406 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import extra_dhcp_opt from neutron_lib.api import extensions class Extra_dhcp_opt(extensions.APIExtensionDescriptor): api_definition = extra_dhcp_opt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/extraroute.py0000644000175000017500000000147100000000000023613 0ustar00coreycorey00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import extraroute as apidef from neutron_lib.api import extensions class Extraroute(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/extraroute_atomic.py0000644000175000017500000000243600000000000025151 0ustar00coreycorey00000000000000# Copyright 2019 Ericsson Software Technology # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import extraroute_atomic from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants from neutron.api.v2 import resource_helper class Extraroute_atomic(api_extensions.APIExtensionDescriptor): api_definition = extraroute_atomic @classmethod def get_resources(cls): plural_mappings = resource_helper.build_plural_mappings( {}, extraroute_atomic.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, extraroute_atomic.RESOURCE_ATTRIBUTE_MAP, constants.L3, action_map=extraroute_atomic.ACTION_MAP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/filter_validation.py0000644000175000017500000000231200000000000025103 0ustar00coreycorey00000000000000# Copyright (c) 2017 Huawei Technology, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from oslo_config import cfg from oslo_log import log as logging from neutron.extensions import _filter_validation_lib as apidef LOG = logging.getLogger(__name__) def _disable_extension_by_config(aliases): if not cfg.CONF.filter_validation: if 'filter-validation' in aliases: aliases.remove('filter-validation') LOG.info('Disabled filter validation extension.') class Filter_validation(extensions.APIExtensionDescriptor): """Extension class supporting filter validation.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/fip_pf_description.py0000644000175000017500000000135400000000000025257 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import fip_pf_description as apidef from neutron_lib.api import extensions class Fip_pf_description(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/fip_port_details.py0000644000175000017500000000147700000000000024746 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import fip_port_details as apidef from neutron_lib.api import extensions class Fip_port_details(extensions.APIExtensionDescriptor): """Extension class adding port_details to Floating IP.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/flavors.py0000644000175000017500000000450500000000000023066 0ustar00coreycorey00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import flavors as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper class Flavors(api_extensions.APIExtensionDescriptor): api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, apidef.RESOURCE_ATTRIBUTE_MAP) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, constants.FLAVORS) plugin = directory.get_plugin(constants.FLAVORS) for collection_name in apidef.SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parent') params = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=apidef.API_PREFIX, attr_map=params) resources.append(resource) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/floating_ip_port_forwarding.py0000644000175000017500000001001600000000000027165 0ustar00coreycorey00000000000000# Copyright (c) 2018 OpenStack Foundation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants as plugin_consts from neutron_lib.plugins import directory from neutron_lib.services import base as service_base import six from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper class Floating_ip_port_forwarding(api_extensions.APIExtensionDescriptor): """Floating IP Port Forwarding API extension.""" api_definition = apidef @classmethod def get_plugin_interface(cls): return PortForwardingPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" special_mappings = {'floatingips': 'floatingip'} plural_mappings = resource_helper.build_plural_mappings( special_mappings, itertools.chain( apidef.RESOURCE_ATTRIBUTE_MAP, apidef.SUB_RESOURCE_ATTRIBUTE_MAP)) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, plugin_consts.PORTFORWARDING, translate_name=True, allow_bulk=True) plugin = directory.get_plugin(plugin_consts.PORTFORWARDING) parent = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[ apidef.COLLECTION_NAME].get('parent') params = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[apidef.COLLECTION_NAME].get( 'parameters') controller = base.create_resource(apidef.COLLECTION_NAME, apidef.RESOURCE_NAME, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( apidef.COLLECTION_NAME, controller, parent, attr_map=params) resources.append(resource) return resources @six.add_metaclass(abc.ABCMeta) class PortForwardingPluginBase(service_base.ServicePluginBase): path_prefix = apidef.API_PREFIX @classmethod def get_plugin_type(cls): return plugin_consts.PORTFORWARDING def get_plugin_description(self): return "Port Forwarding Service Plugin" @abc.abstractmethod def create_floatingip_port_forwarding(self, context, floatingip_id, port_forwarding): pass @abc.abstractmethod def update_floatingip_port_forwarding(self, context, id, floatingip_id, port_forwarding): pass @abc.abstractmethod def get_floatingip_port_forwarding(self, context, id, floatingip_id, fields=None): pass @abc.abstractmethod def get_floatingip_port_forwardings(self, context, floatingip_id=None, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def delete_floatingip_port_forwarding(self, context, id, floatingip_id): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/floatingip_pools.py0000644000175000017500000000363000000000000024760 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib.api.definitions import floatingip_pools as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants import six from neutron.api.v2 import resource_helper class Floatingip_pools(api_extensions.APIExtensionDescriptor): """Neutron floating IP pool api extension.""" api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, itertools.chain(apidef.RESOURCE_ATTRIBUTE_MAP)) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, constants.L3, translate_name=True, allow_bulk=True) return resources @six.add_metaclass(abc.ABCMeta) class FloatingIPPoolPluginBase(object): @abc.abstractmethod def get_floatingip_pools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all floating ip pools.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/ip_allocation.py0000644000175000017500000000162200000000000024224 0ustar00coreycorey00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import ip_allocation as apidef from neutron_lib.api import extensions class Ip_allocation(extensions.APIExtensionDescriptor): """Extension indicates when ports use deferred or no IP allocation.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/ip_substring_port_filtering.py0000644000175000017500000000163600000000000027233 0ustar00coreycorey00000000000000# Copyright (c) 2017 Huawei Technology, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import ip_substring_port_filtering as apidef from neutron_lib.api import extensions class Ip_substring_port_filtering(extensions.APIExtensionDescriptor): """Extension class supporting IP substring port filtering.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l2_adjacency.py0000644000175000017500000000216500000000000023730 0ustar00coreycorey00000000000000# Copyright (c) 2016 NEC Technologies Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l2_adjacency as apidef from neutron_lib.api import extensions class L2_adjacency(extensions.APIExtensionDescriptor): """Extension class supporting L2 Adjacency for Routed Networks The following class is used by neutron's extension framework to provide metadata related to the L2 Adjacency for Neutron Routed Network, exposing the same to clients. No new resources have been defined by this extension. """ api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3.py0000644000175000017500000000570400000000000021732 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api import extensions from neutron_lib.plugins import constants import six from neutron.api.v2 import resource_helper from neutron.conf import quota # Register the configuration options quota.register_quota_opts(quota.l3_quota_opts) class L3(extensions.APIExtensionDescriptor): api_definition = l3_apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, l3_apidef.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, l3_apidef.RESOURCE_ATTRIBUTE_MAP, constants.L3, action_map=l3_apidef.ACTION_MAP, register_quota=True) @six.add_metaclass(abc.ABCMeta) class RouterPluginBase(object): @abc.abstractmethod def create_router(self, context, router): pass @abc.abstractmethod def update_router(self, context, id, router): pass @abc.abstractmethod def get_router(self, context, id, fields=None): pass @abc.abstractmethod def delete_router(self, context, id): pass @abc.abstractmethod def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def add_router_interface(self, context, router_id, interface_info=None): pass @abc.abstractmethod def remove_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def create_floatingip(self, context, floatingip): pass @abc.abstractmethod def update_floatingip(self, context, id, floatingip): pass @abc.abstractmethod def get_floatingip(self, context, id, fields=None): pass @abc.abstractmethod def delete_floatingip(self, context, id): pass @abc.abstractmethod def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def get_routers_count(self, context, filters=None): raise NotImplementedError() def get_floatingips_count(self, context, filters=None): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3_conntrack_helper.py0000644000175000017500000001001000000000000025315 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib.api.definitions import l3_conntrack_helper as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants as plugin_consts from neutron_lib.plugins import directory from neutron_lib.services import base as service_base import six from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron.conf.extensions import conntrack_helper as cth_conf cth_conf.register_conntrack_helper_opts() class L3_conntrack_helper(api_extensions.APIExtensionDescriptor): """Router conntrack helpers API extension.""" api_definition = apidef @classmethod def get_plugin_interface(cls): return ConntrackHelperPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" special_mappings = {'routers': 'router'} plural_mappings = resource_helper.build_plural_mappings( special_mappings, itertools.chain( apidef.RESOURCE_ATTRIBUTE_MAP, apidef.SUB_RESOURCE_ATTRIBUTE_MAP)) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, plugin_consts.CONNTRACKHELPER, translate_name=True, allow_bulk=True) plugin = directory.get_plugin(plugin_consts.CONNTRACKHELPER) parent = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[ apidef.COLLECTION_NAME].get('parent') params = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[apidef.COLLECTION_NAME].get( 'parameters') controller = base.create_resource(apidef.COLLECTION_NAME, apidef.RESOURCE_NAME, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( apidef.COLLECTION_NAME, controller, parent, attr_map=params) resources.append(resource) return resources @six.add_metaclass(abc.ABCMeta) class ConntrackHelperPluginBase(service_base.ServicePluginBase): path_prefix = apidef.API_PREFIX @classmethod def get_plugin_type(cls): return plugin_consts.CONNTRACKHELPER def get_plugin_description(self): return "Conntrack Helper Service Plugin" @abc.abstractmethod def create_router_conntrack_helper(self, context, router_id, conntrack_helper): pass @abc.abstractmethod def update_router_conntrack_helper(self, context, id, router_id, conntrack_helper): pass @abc.abstractmethod def get_router_conntrack_helper(self, context, id, router_id, fields=None): pass @abc.abstractmethod def get_router_conntrack_helpers(self, context, router_id=None, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def delete_router_conntrack_helper(self, context, id, router_id): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3_ext_gw_mode.py0000644000175000017500000000146400000000000024312 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3_ext_gw_mode as apidef from neutron_lib.api import extensions class L3_ext_gw_mode(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3_ext_ha_mode.py0000644000175000017500000000153700000000000024266 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3_ext_ha_mode as apidef from neutron_lib.api import extensions class L3_ext_ha_mode(extensions.APIExtensionDescriptor): """Extension class supporting virtual router in HA mode.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3_flavors.py0000644000175000017500000000143000000000000023456 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import l3_flavors as apidef from neutron_lib.api import extensions class L3_flavors(extensions.APIExtensionDescriptor): """Extension class supporting flavors for routers.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3_port_ip_change_not_allowed.py0000644000175000017500000000143200000000000027354 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3_port_ip_change_not_allowed as apidef from neutron_lib.api import extensions class L3_port_ip_change_not_allowed(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/l3agentscheduler.py0000644000175000017500000001603000000000000024642 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions as api_extensions from neutron_lib.api import faults from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.exceptions import agent as agent_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_log import log as logging import six import webob.exc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import resource from neutron import policy from neutron import wsgi LOG = logging.getLogger(__name__) L3_ROUTER = 'l3-router' L3_ROUTERS = L3_ROUTER + 's' L3_AGENT = 'l3-agent' L3_AGENTS = L3_AGENT + 's' class RouterSchedulerController(wsgi.Controller): def get_plugin(self): plugin = directory.get_plugin(plugin_constants.L3) if not plugin: LOG.error('No plugin for L3 routing registered to handle ' 'router scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_ROUTERS, {}) return plugin.list_routers_on_l3_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "create_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] router_id = body['router_id'] result = plugin.add_router_to_l3_agent(request.context, agent_id, router_id) notify(request.context, 'l3_agent.router.add', router_id, agent_id) return result def delete(self, request, id, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "delete_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] result = plugin.remove_router_from_l3_agent(request.context, agent_id, id) notify(request.context, 'l3_agent.router.remove', id, agent_id) return result class L3AgentsHostingRouterController(wsgi.Controller): def get_plugin(self): plugin = directory.get_plugin(plugin_constants.L3) if not plugin: LOG.error('No plugin for L3 routing registered to handle ' 'router scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_AGENTS, {}) return plugin.list_l3_agents_hosting_router( request.context, kwargs['router_id']) class L3agentscheduler(api_extensions.ExtensionDescriptor): """Extension class supporting l3 agent scheduler. """ @classmethod def get_name(cls): return "L3 Agent Scheduler" @classmethod def get_alias(cls): return constants.L3_AGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedule routers among l3 agents" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(RouterSchedulerController(), faults.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_ROUTERS, controller, parent)) parent = dict(member_name="router", collection_name="routers") controller = resource.Resource(L3AgentsHostingRouterController(), faults.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_AGENTS, controller, parent)) return exts def get_extended_resources(self, version): return {} class InvalidL3Agent(agent_exc.AgentNotFound): message = _("Agent %(id)s is not a L3 Agent or has been disabled") class RouterHostedByL3Agent(exceptions.Conflict): message = _("The router %(router_id)s has been already hosted " "by the L3 Agent %(agent_id)s.") class RouterSchedulingFailed(exceptions.Conflict): message = _("Failed scheduling router %(router_id)s to " "the L3 Agent %(agent_id)s.") class RouterReschedulingFailed(exceptions.Conflict): message = _("Failed rescheduling router %(router_id)s: " "no eligible l3 agent found.") class RouterL3AgentMismatch(exceptions.Conflict): message = _("Cannot host distributed router %(router_id)s " "on legacy L3 agent %(agent_id)s.") class DVRL3CannotAssignToDvrAgent(exceptions.Conflict): message = _("Not allowed to manually assign a router to an " "agent in 'dvr' mode.") class DVRL3CannotRemoveFromDvrAgent(exceptions.Conflict): message = _("Not allowed to manually remove a router from " "an agent in 'dvr' mode.") class RouterDoesntSupportScheduling(exceptions.Conflict): message = _("Router %(router_id)s does not support agent scheduling.") @six.add_metaclass(abc.ABCMeta) class L3AgentSchedulerPluginBase(object): """REST API to operate the l3 agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def add_router_to_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def remove_router_from_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def list_routers_on_l3_agent(self, context, id): pass @abc.abstractmethod def list_l3_agents_hosting_router(self, context, router_id): pass def router_supports_scheduling(self, context, router_id): """Override this method to conditionally schedule routers.""" return True def notify(context, action, router_id, agent_id): info = {'id': agent_id, 'router_id': router_id} notifier = n_rpc.get_notifier('router') notifier.info(context, action, {'agent': info}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/logging.py0000644000175000017500000000542400000000000023041 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib.api.definitions import logging as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants as plugin_const from neutron_lib.services import base as service_base import six from neutron.api.v2 import resource_helper class Logging(api_extensions.APIExtensionDescriptor): """Neutron logging api extension.""" api_definition = apidef @classmethod def get_plugin_interface(cls): return LoggingPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, itertools.chain(apidef.RESOURCE_ATTRIBUTE_MAP)) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, plugin_const.LOG_API, translate_name=True, allow_bulk=True) return resources @six.add_metaclass(abc.ABCMeta) class LoggingPluginBase(service_base.ServicePluginBase): path_prefix = apidef.API_PREFIX def get_plugin_description(self): return "Logging API Service Plugin" @classmethod def get_plugin_type(cls): return plugin_const.LOG_API @abc.abstractmethod def get_logs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_log(self, context, log_id, fields=None): pass @abc.abstractmethod def create_log(self, context, log): pass @abc.abstractmethod def update_log(self, context, log_id, log): pass @abc.abstractmethod def delete_log(self, context, log_id): pass @abc.abstractmethod def get_loggable_resources(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/metering.py0000644000175000017500000000672000000000000023225 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import metering as metering_apidef from neutron_lib.api import extensions from neutron_lib.plugins import constants from neutron_lib.services import base as service_base import six from neutron.api.v2 import resource_helper class Metering(extensions.APIExtensionDescriptor): api_definition = metering_apidef @classmethod def get_plugin_interface(cls): return MeteringPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, metering_apidef.RESOURCE_ATTRIBUTE_MAP) # PCM: Metering sets pagination and sorting to True. Do we have cfg # entries for these so can be read? Else, must pass in. return resource_helper.build_resource_info( plural_mappings, metering_apidef.RESOURCE_ATTRIBUTE_MAP, constants.METERING, translate_name=True, allow_bulk=True) @six.add_metaclass(abc.ABCMeta) class MeteringPluginBase(service_base.ServicePluginBase): def get_plugin_description(self): return constants.METERING @classmethod def get_plugin_type(cls): return constants.METERING @abc.abstractmethod def create_metering_label(self, context, metering_label): """Create a metering label.""" pass def update_metering_label(self, context, id, metering_label): """Update a metering label.""" raise NotImplementedError() @abc.abstractmethod def delete_metering_label(self, context, label_id): """Delete a metering label.""" pass @abc.abstractmethod def get_metering_label(self, context, label_id, fields=None): """Get a metering label.""" pass @abc.abstractmethod def get_metering_labels(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all metering labels.""" pass @abc.abstractmethod def create_metering_label_rule(self, context, metering_label_rule): """Create a metering label rule.""" pass def update_metering_label_rule(self, context, id, metering_label_rule): """Update a metering label rule.""" raise NotImplementedError() @abc.abstractmethod def get_metering_label_rule(self, context, rule_id, fields=None): """Get a metering label rule.""" pass @abc.abstractmethod def delete_metering_label_rule(self, context, rule_id): """Delete a metering label rule.""" pass @abc.abstractmethod def get_metering_label_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all metering label rules.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/multiprovidernet.py0000644000175000017500000000245200000000000025025 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import multiprovidernet as apidef from neutron_lib.api import extensions class Multiprovidernet(extensions.APIExtensionDescriptor): """Extension class supporting multiple provider networks. This class is used by neutron's extension framework to make metadata about the multiple provider network extension available to clients. No new resources are defined by this extension. Instead, the existing network resource's request and response messages are extended with 'segments' attribute. With admin rights, network dictionaries returned will also include 'segments' attribute. """ api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/netmtu.py0000644000175000017500000000151500000000000022724 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network_mtu as apidef from neutron_lib.api import extensions class Netmtu(extensions.APIExtensionDescriptor): """Extension class supporting network MTU.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/netmtu_writable.py0000644000175000017500000000147700000000000024624 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network_mtu_writable as apidef from neutron_lib.api import extensions class Netmtu_writable(extensions.APIExtensionDescriptor): """Extension class supporting writable network MTU.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/network_availability_zone.py0000644000175000017500000000204600000000000026666 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron_lib.api.definitions import network_availability_zone as apidef from neutron_lib.api import extensions import six class Network_availability_zone(extensions.APIExtensionDescriptor): """Network availability zone extension.""" api_definition = apidef @six.add_metaclass(abc.ABCMeta) class NetworkAvailabilityZonePluginBase(object): @abc.abstractmethod def get_network_availability_zones(self, network): """Return availability zones which a network belongs to""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/network_ip_availability.py0000644000175000017500000000317100000000000026323 0ustar00coreycorey00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import network_ip_availability as apidef from neutron_lib.api import extensions as api_extensions import neutron.api.extensions as extensions import neutron.api.v2.base as base import neutron.services.network_ip_availability.plugin as plugin class Network_ip_availability(api_extensions.APIExtensionDescriptor): """Extension class supporting network ip availability information.""" api_definition = apidef @classmethod def get_resources(cls): """Returns Extended Resource for service type management.""" resource_attributes = apidef.RESOURCE_ATTRIBUTE_MAP[ apidef.RESOURCE_PLURAL] controller = base.create_resource( apidef.RESOURCE_PLURAL, apidef.RESOURCE_NAME, plugin.NetworkIPAvailabilityPlugin.get_instance(), resource_attributes) return [extensions.ResourceExtension(apidef.COLLECTION_NAME, controller, attr_map=resource_attributes)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/network_segment_range.py0000644000175000017500000001522100000000000025776 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import network_segment_range as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_log import log as logging import six from neutron.api import extensions from neutron.api.v2 import base LOG = logging.getLogger(__name__) class Network_segment_range(api_extensions.APIExtensionDescriptor): """Extension class supporting Network segment ranges. This class is used by neutron's extension framework to make metadata about the network segment range extension available to clients. With admin rights, one will be able to create, update, read and delete the values. """ api_definition = apidef @classmethod def get_resources(cls): """Returns extension resources""" plugin = directory.get_plugin(plugin_constants.NETWORK_SEGMENT_RANGE) collection_name = apidef.COLLECTION_NAME.replace('_', '-') params = apidef.RESOURCE_ATTRIBUTE_MAP.get(apidef.COLLECTION_NAME, dict()) controller = base.create_resource(collection_name, apidef.RESOURCE_NAME, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, attr_map=params) return [ex] @classmethod def get_plugin_interface(cls): return NetworkSegmentRangePluginBase @six.add_metaclass(abc.ABCMeta) class NetworkSegmentRangePluginBase(service_base.ServicePluginBase): """REST API to manage network segment ranges. All methods must be in an admin context. """ @classmethod def get_plugin_type(cls): return plugin_constants.NETWORK_SEGMENT_RANGE def get_plugin_description(self): return "Adds network segment ranges to Neutron resources" @abc.abstractmethod def create_network_segment_range(self, context, network_segment_range): """Create a network segment range. Create a network segment range, which represents the range of L2 segments for tenant network allocation. :param context: neutron api request context :param network_segment_range: dictionary describing the network segment range, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron_lib/api/definitions/network_segment_range.py`. """ pass @abc.abstractmethod def delete_network_segment_range(self, context, id): """Delete a network segment range. :param context: neutron api request context :param id: UUID representing the network segment range to delete. """ pass @abc.abstractmethod def update_network_segment_range(self, context, id, network_segment_range): """Update values of a network segment range. :param context: neutron api request context :param id: UUID representing the network segment range to update. :param network_segment_range: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron_lib/api/definitions/network_segment_range.py`. """ pass @abc.abstractmethod def get_network_segment_ranges(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of network segment ranges. The contents of the list depends on the filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network segment range as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron_lib/api/definitions/ network_segment_range.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a network segment range dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron_lib/api/definitions/ network_segment_range.py`. Only these fields will be returned. :param sorts: A list of (key, direction) tuples. direction: True == ASC, False == DESC :param limit: maximum number of items to return :param marker: the last item of the previous page; when used, returns next results after the marker resource. :param page_reverse: True if sort direction is reversed. """ pass @abc.abstractmethod def get_network_segment_range(self, context, id, fields=None): """Retrieve a network segment range. :param context: neutron api request context :param id: UUID representing the network segment range to fetch. :param fields: a list of strings that are valid keys in a network segment range dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron_lib/api/definitions/ network_segment_range.py`. Only these fields will be returned. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/pagination.py0000644000175000017500000000171000000000000023536 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import pagination as apidef from neutron_lib.api import extensions as api_extensions from neutron.api import extensions class Pagination(api_extensions.APIExtensionDescriptor): """Fake extension that indicates that pagination is enabled.""" api_definition = apidef extensions.register_custom_supported_check( apidef.ALIAS, lambda: True, plugin_agnostic=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/port_mac_address_regenerate.py0000644000175000017500000000153200000000000027121 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import port_mac_address_regenerate from neutron_lib.api import extensions as api_extensions class Port_mac_address_regenerate(api_extensions.APIExtensionDescriptor): """Extension to support port MAC address regeneration""" api_definition = port_mac_address_regenerate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/port_resource_request.py0000644000175000017500000000150500000000000026052 0ustar00coreycorey00000000000000# Copyright (c) 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_resource_request from neutron_lib.api import extensions as api_extensions class Port_resource_request(api_extensions.APIExtensionDescriptor): api_definition = port_resource_request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/portbindings.py0000644000175000017500000000210300000000000024104 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.api import extensions class Portbindings(extensions.APIExtensionDescriptor): """Extension class supporting port bindings. This class is used by neutron's extension framework to make metadata about the port bindings available to external applications. With admin rights one will be able to update and read the values. """ api_definition = portbindings ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/portbindings_extended.py0000644000175000017500000000457600000000000026004 0ustar00coreycorey00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings_extended as pbe_ext from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base EXT_ALIAS = pbe_ext.ALIAS class Portbindings_extended(api_extensions.ExtensionDescriptor): """Extension class supporting port bindings. This class is used by neutron's extension framework to make metadata about the port bindings available to external applications. With admin rights one will be able to update and read the values. """ @classmethod def get_name(cls): return pbe_ext.NAME @classmethod def get_alias(cls): return pbe_ext.ALIAS @classmethod def get_description(cls): return pbe_ext.DESCRIPTION @classmethod def get_updated(cls): return pbe_ext.UPDATED_TIMESTAMP @classmethod def get_resources(cls): plugin = directory.get_plugin() params = pbe_ext.SUB_RESOURCE_ATTRIBUTE_MAP[ pbe_ext.COLLECTION_NAME]['parameters'] parent = pbe_ext.SUB_RESOURCE_ATTRIBUTE_MAP[ pbe_ext.COLLECTION_NAME]['parent'] controller = base.create_resource( pbe_ext.COLLECTION_NAME, pbe_ext.RESOURCE_NAME, plugin, params, member_actions=pbe_ext.ACTION_MAP[pbe_ext.RESOURCE_NAME], parent=parent, allow_pagination=True, allow_sorting=True, ) exts = [ extensions.ResourceExtension( pbe_ext.COLLECTION_NAME, controller, parent, member_actions=pbe_ext.ACTION_MAP[pbe_ext.RESOURCE_NAME], attr_map=params, ), ] return exts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/portsecurity.py0000644000175000017500000000154000000000000024162 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_security from neutron_lib.api import extensions class Portsecurity(extensions.APIExtensionDescriptor): """Extension class supporting port security.""" api_definition = port_security ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/project_id.py0000644000175000017500000000205300000000000023530 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import project_id as apidef from neutron_lib.api import extensions as api_extensions from neutron.api import extensions class Project_id(api_extensions.APIExtensionDescriptor): """Extension that indicates that project_id is enabled. This extension indicates that the Keystone V3 'project_id' field is supported in the API. """ api_definition = apidef extensions.register_custom_supported_check( apidef.ALIAS, lambda: True, plugin_agnostic=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/providernet.py0000644000175000017500000000243100000000000023747 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import provider_net from neutron_lib.api import extensions class Providernet(extensions.APIExtensionDescriptor): """Extension class supporting provider networks. This class is used by neutron's extension framework to make metadata about the provider network extension available to clients. No new resources are defined by this extension. Instead, the existing network resource's request and response messages are extended with attributes in the provider namespace. With admin rights, network dictionaries returned will also include provider attributes. """ api_definition = provider_net ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos.py0000644000175000017500000002316200000000000022214 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools import re from neutron_lib.api.definitions import qos as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib.services import base as service_base import six from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron.objects.qos import rule as rule_object class Qos(api_extensions.APIExtensionDescriptor): """Quality of Service API extension.""" api_definition = apidef @classmethod def get_plugin_interface(cls): return QoSPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" special_mappings = {'policies': 'policy'} plural_mappings = resource_helper.build_plural_mappings( special_mappings, itertools.chain( apidef.RESOURCE_ATTRIBUTE_MAP, apidef.SUB_RESOURCE_ATTRIBUTE_MAP)) resources = resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, constants.QOS, translate_name=True, allow_bulk=True) plugin = directory.get_plugin(constants.QOS) for collection_name in apidef.SUB_RESOURCE_ATTRIBUTE_MAP: resource_name = collection_name[:-1] parent = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[ collection_name].get('parent') params = apidef.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=apidef.API_PREFIX, attr_map=params) resources.append(resource) return resources @six.add_metaclass(abc.ABCMeta) class QoSPluginBase(service_base.ServicePluginBase): path_prefix = apidef.API_PREFIX # The rule object type to use for each incoming rule-related request. rule_objects = {'bandwidth_limit': rule_object.QosBandwidthLimitRule, 'dscp_marking': rule_object.QosDscpMarkingRule, 'minimum_bandwidth': rule_object.QosMinimumBandwidthRule} # Patterns used to call method proxies for all policy-rule-specific # method calls (see __getattr__ docstring, below). qos_rule_method_patterns = [ re.compile( r"^((create|update|delete)_policy_(?P.*)_rule)$"), re.compile( r"^(get_policy_(?P.*)_(rules|rule))$"), # The following entry handles rule alias calls re.compile( r"^((update|delete|get)_alias_(?P.*)_rule)$"), ] def __getattr__(self, attrib): """Implement method proxies for all policy-rule-specific requests. For a given request type (such as to update a rule), a single method will handle requests for all rule types. For example, the update_policy_rule method will handle requests for both update_policy_dscp_marking_rule and update_policy_bandwidth_limit_rule. In the case of rule alias calls, the update_rule method will handle requests for both update_dscp_marking_rule and update_bandwidth_limit_rule. :param attrib: the requested method; in the normal case, this will be, for example, "update_policy_dscp_marking_rule" :type attrib: str """ # Find and call the proxy method that implements the requested one. for pattern in self.qos_rule_method_patterns: res = re.match(pattern, attrib) if res: rule_type = res.group('rule_type') if rule_type in self.rule_objects: # Remove the rule_type value (plus underscore) from attrib # in order to get the proxy method name. So, for instance, # from "delete_policy_dscp_marking_rule" we'll get # "delete_policy_rule". proxy_method = attrib.replace(rule_type + '_', '') proxy_method = proxy_method.replace('alias_', '') rule_cls = self.rule_objects[rule_type] return self._call_proxy_method(proxy_method, rule_cls) # If we got here, then either attrib matched no pattern or the # rule_type embedded in attrib wasn't in self.rule_objects. raise AttributeError(attrib) def _call_proxy_method(self, method_name, rule_cls): """Call proxy method. We need to add the rule_cls, obtained from the self.rule_objects dictionary, to the incoming args. The context is passed to proxy method as first argument; the remaining args will follow rule_cls. Some of the incoming method calls have the policy rule name as one of the keys in the kwargs. For instance, the incoming kwargs for the create_policy_bandwidth_limit_rule take this form: { 'bandwidth_limit_rule': { u'bandwidth_limit_rule': { 'max_burst_kbps': 0, u'max_kbps': u'100', 'tenant_id': u'a8a31c9434ff431cb789c809777505ec'} }, 'policy_id': u'46985da5-9684-402e-b0d7-b7adac909c3a' } We need to generalize this structure for all rule types so will (effectively) rename the rule-specific keyword (e.g., in the above, the first occurrence of 'bandwidth_limit_rule') to be 'rule_data'. :param method_name: the name of the method to call :type method_name: str :param rule_cls: the rule class, which is sent as an argument to the proxy method :type rule_cls: a class from the rule_object (qos.objects.rule) module """ def _make_call(method_name, rule_cls, *args, **kwargs): context = args[0] args_list = list(args[1:]) params = kwargs rule_data_name = rule_cls.rule_type + "_rule" alias_rule_data_name = 'alias_' + rule_data_name if rule_data_name in params: params['rule_data'] = params.pop(rule_data_name) elif alias_rule_data_name in params: params['rule_data'] = params.pop(alias_rule_data_name) return getattr(self, method_name)( context, rule_cls, *args_list, **params ) return lambda *args, **kwargs: _make_call( method_name, rule_cls, *args, **kwargs) def get_plugin_description(self): return "QoS Service Plugin for ports and networks" @classmethod def get_plugin_type(cls): return constants.QOS @abc.abstractmethod def get_rule_type(self, context, rule_type_name, fields=None): pass @abc.abstractmethod def get_rule_types(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def create_policy(self, context, policy): pass @abc.abstractmethod def update_policy(self, context, policy_id, policy): pass @abc.abstractmethod def delete_policy(self, context, policy_id): pass @abc.abstractmethod def get_policy(self, context, policy_id, fields=None): pass @abc.abstractmethod def get_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def create_policy_rule(self, context, rule_cls, policy_id, rule_data): pass @abc.abstractmethod def update_policy_rule(self, context, rule_cls, rule_id, policy_id, rule_data): pass @abc.abstractmethod def delete_policy_rule(self, context, rule_cls, rule_id, policy_id): pass @abc.abstractmethod def get_policy_rule(self, context, rule_cls, rule_id, policy_id, fields=None): pass @abc.abstractmethod def get_policy_rules(self, context, rule_cls, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def update_rule(self, context, rule_cls, rule_id, rule_data): pass @abc.abstractmethod def delete_rule(self, context, rule_cls, rule_id): pass @abc.abstractmethod def get_rule(self, context, rule_cls, rule_id, fields=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_bw_limit_direction.py0000644000175000017500000000447400000000000026147 0ustar00coreycorey00000000000000# Copyright (c) 2017 OVH SAS # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos as qos_apidef from neutron_lib.api import extensions as api_extensions from neutron_lib import constants as common_constants # The name of the extension. NAME = "Direction for QoS bandwidth limit rule" # The alias of the extension. ALIAS = "qos-bw-limit-direction" # The description of the extension. DESCRIPTION = ("Allow to configure QoS bandwidth limit rule with specific " "direction: ingress or egress") # The list of required extensions. REQUIRED_EXTENSIONS = [qos_apidef.ALIAS] # The list of optional extensions. OPTIONAL_EXTENSIONS = None # The resource attribute map for the extension. SUB_RESOURCE_ATTRIBUTE_MAP = { qos_apidef.BANDWIDTH_LIMIT_RULES: { 'parameters': { 'direction': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': common_constants.EGRESS_DIRECTION, 'validate': { 'type:values': common_constants.VALID_DIRECTIONS}}} } } class Qos_bw_limit_direction(api_extensions.ExtensionDescriptor): @classmethod def get_name(cls): return NAME @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return DESCRIPTION @classmethod def get_updated(cls): return "2017-04-10T10:00:00-00:00" def get_required_extensions(self): return REQUIRED_EXTENSIONS or [] def get_optional_extensions(self): return OPTIONAL_EXTENSIONS or [] def get_extended_resources(self, version): if version == "2.0": return SUB_RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_bw_minimum_ingress.py0000644000175000017500000000151000000000000026162 0ustar00coreycorey00000000000000# Copyright (c) 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_bw_minimum_ingress from neutron_lib.api import extensions as api_extensions class Qos_bw_minimum_ingress(api_extensions.APIExtensionDescriptor): api_definition = qos_bw_minimum_ingress ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_default.py0000644000175000017500000000147000000000000023716 0ustar00coreycorey00000000000000# Copyright (c) 2017 Intel Corporation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_default as apidef from neutron_lib.api import extensions class Qos_default(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_fip.py0000644000175000017500000000321500000000000023047 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3 from neutron_lib.api import extensions from neutron_lib.services.qos import constants as qos_consts FIP_QOS_ALIAS = "qos-fip" EXTENDED_ATTRIBUTES_2_0 = { l3.FLOATINGIPS: { qos_consts.QOS_POLICY_ID: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'validate': {'type:uuid_or_none': None}} } } class Qos_fip(extensions.ExtensionDescriptor): """Extension class supporting floating IP QoS in all router.""" @classmethod def get_name(cls): return "Floating IP QoS" @classmethod def get_alias(cls): return FIP_QOS_ALIAS @classmethod def get_description(cls): return "The floating IP Quality of Service extension" @classmethod def get_updated(cls): return "2017-07-20T00:00:00-00:00" def get_required_extensions(self): return ["router", "qos"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_gateway_ip.py0000644000175000017500000000162100000000000024421 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # Copyright 2017 Letv Cloud Computing # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_gateway_ip as apidef from neutron_lib.api import extensions class Qos_gateway_ip(extensions.APIExtensionDescriptor): """Extension class supporting gateway IP rate limit in all router.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_port_network_policy.py0000644000175000017500000000152000000000000026402 0ustar00coreycorey00000000000000# Copyright (c) 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_port_network_policy from neutron_lib.api import extensions as api_extensions class Qos_port_network_policy(api_extensions.APIExtensionDescriptor): api_definition = qos_port_network_policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_rule_type_details.py0000644000175000017500000000152700000000000026012 0ustar00coreycorey00000000000000# Copyright (c) 2017 OVH SAS # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_rule_type_details as apidef from neutron_lib.api import extensions as api_extensions class Qos_rule_type_details(api_extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/qos_rules_alias.py0000644000175000017500000000262000000000000024573 0ustar00coreycorey00000000000000# Copyright (c) 2018 Huawei Technology, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import qos_rules_alias as apidef from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import constants from neutron.api.v2 import resource_helper class Qos_rules_alias(api_extensions.APIExtensionDescriptor): """Extension class supporting QoS rules alias API resources.""" api_definition = apidef @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, apidef.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, apidef.RESOURCE_ATTRIBUTE_MAP, constants.QOS, translate_name=True, allow_bulk=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/quotasv2.py0000644000175000017500000001376000000000000023201 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions as api_extensions from neutron_lib.api import faults from neutron_lib.db import constants as const from neutron_lib import exceptions from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import importutils import webob from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron import quota from neutron.quota import resource_registry from neutron import wsgi DEFAULT_QUOTAS_ACTION = 'default' RESOURCE_NAME = 'quota' RESOURCE_COLLECTION = RESOURCE_NAME + "s" QUOTAS = quota.QUOTAS DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver' EXTENDED_ATTRIBUTES_2_0 = { RESOURCE_COLLECTION: {} } class QuotaSetsController(wsgi.Controller): def __init__(self, plugin): self._resource_name = RESOURCE_NAME self._plugin = plugin self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) self._update_extended_attributes = True def _update_attributes(self): for quota_resource in resource_registry.get_all_resources().keys(): attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION] attr_dict[quota_resource] = { 'allow_post': False, 'allow_put': True, 'convert_to': converters.convert_to_int, 'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]}, 'is_visible': True} self._update_extended_attributes = False def _get_quotas(self, request, tenant_id): return self._driver.get_tenant_quotas( request.context, resource_registry.get_all_resources(), tenant_id) def default(self, request, id): if id != request.context.tenant_id: self._check_admin(request.context, reason=_("Only admin is authorized " "to access quotas for another tenant")) return {self._resource_name: self._driver.get_default_quotas( context=request.context, resources=resource_registry.get_all_resources(), tenant_id=id)} def create(self, request, body=None): msg = _('POST requests are not supported on this resource.') raise webob.exc.HTTPNotImplemented(msg) def index(self, request): context = request.context self._check_admin(context) return {self._resource_name + "s": self._driver.get_all_quotas( context, resource_registry.get_all_resources())} def tenant(self, request): """Retrieve the tenant info in context.""" context = request.context if not context.tenant_id: raise exceptions.QuotaMissingTenant() return {'tenant': {'tenant_id': context.tenant_id}} def show(self, request, id): if id != request.context.tenant_id: self._check_admin(request.context, reason=_("Only admin is authorized " "to access quotas for another tenant")) return {self._resource_name: self._get_quotas(request, id)} def _check_admin(self, context, reason=_("Only admin can view or configure quota")): if not context.is_admin: raise exceptions.AdminRequired(reason=reason) def delete(self, request, id): self._check_admin(request.context) self._driver.delete_tenant_quota(request.context, id) def update(self, request, id, body=None): self._check_admin(request.context) if self._update_extended_attributes: self._update_attributes() body = base.Controller.prepare_request_body( request.context, body, False, self._resource_name, EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]) for key, value in body[self._resource_name].items(): self._driver.update_quota_limit(request.context, id, key, value) return {self._resource_name: self._get_quotas(request, id)} class Quotasv2(api_extensions.ExtensionDescriptor): """Quotas management support.""" extensions.register_custom_supported_check( RESOURCE_COLLECTION, lambda: True, plugin_agnostic=True) @classmethod def get_name(cls): return "Quota management support" @classmethod def get_alias(cls): return RESOURCE_COLLECTION @classmethod def get_description(cls): description = 'Expose functions for quotas management' if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER: description += ' per tenant' return description @classmethod def get_updated(cls): return "2012-07-29T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" controller = resource.Resource( QuotaSetsController(directory.get_plugin()), faults=faults.FAULT_MAP) return [extensions.ResourceExtension( Quotasv2.get_alias(), controller, member_actions={DEFAULT_QUOTAS_ACTION: 'GET'}, collection_actions={'tenant': 'GET'})] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/quotasv2_detail.py0000644000175000017500000000635600000000000024526 0ustar00coreycorey00000000000000# Copyright 2017 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions as api_extensions from neutron_lib.api import faults from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_config import cfg from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import resource from neutron.extensions import quotasv2 from neutron.quota import resource_registry DETAIL_QUOTAS_ACTION = 'details' RESOURCE_NAME = 'quota' ALIAS = RESOURCE_NAME + '_' + DETAIL_QUOTAS_ACTION QUOTA_DRIVER = cfg.CONF.QUOTAS.quota_driver RESOURCE_COLLECTION = RESOURCE_NAME + "s" DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver' EXTENDED_ATTRIBUTES_2_0 = { RESOURCE_COLLECTION: {} } class DetailQuotaSetsController(quotasv2.QuotaSetsController): def _get_detailed_quotas(self, request, tenant_id): return self._driver.get_detailed_tenant_quotas( request.context, resource_registry.get_all_resources(), tenant_id) def details(self, request, id): if id != request.context.project_id: # Check if admin if not request.context.is_admin: reason = _("Only admin is authorized to access quotas for" " another tenant") raise n_exc.AdminRequired(reason=reason) return {self._resource_name: self._get_detailed_quotas(request, id)} class Quotasv2_detail(api_extensions.ExtensionDescriptor): """Quota details management support.""" # Ensure new extension is not loaded with old conf driver. extensions.register_custom_supported_check( ALIAS, lambda: QUOTA_DRIVER == DB_QUOTA_DRIVER, plugin_agnostic=True) @classmethod def get_name(cls): return "Quota details management support" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return 'Expose functions for quotas usage statistics per project' @classmethod def get_updated(cls): return "2017-02-10T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Extension Resources.""" controller = resource.Resource( DetailQuotaSetsController(directory.get_plugin()), faults=faults.FAULT_MAP) return [extensions.ResourceExtension( RESOURCE_COLLECTION, controller, member_actions={'details': 'GET'}, collection_actions={'tenant': 'GET'})] def get_extended_resources(self, version): return EXTENDED_ATTRIBUTES_2_0 if version == "2.0" else {} def get_required_extensions(self): return ["quotas"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/rbac.py0000644000175000017500000001221200000000000022313 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions as api_extensions from neutron_lib.db import constants as db_const from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import base from neutron.conf import quota from neutron.db import rbac_db_models from neutron.quota import resource_registry class RbacPolicyNotFound(n_exc.NotFound): message = _("RBAC policy of type %(object_type)s with ID %(id)s not found") class RbacPolicyInUse(n_exc.Conflict): message = _("RBAC policy on object %(object_id)s cannot be removed " "because other objects depend on it.\nDetails: %(details)s") class DuplicateRbacPolicy(n_exc.Conflict): message = _("An RBAC policy already exists with those values.") class RbacPolicyInitError(n_exc.PolicyInitError): message = _("Failed to create RBAC policy on object %(object_id)s " "because %(reason)s.") def convert_valid_object_type(otype): normalized = otype.strip().lower() if normalized in rbac_db_models.get_type_model_map(): return normalized msg = _("'%s' is not a valid RBAC object type") % otype raise n_exc.InvalidInput(error_message=msg) RESOURCE_NAME = 'rbac_policy' RESOURCE_COLLECTION = 'rbac_policies' RESOURCE_ATTRIBUTE_MAP = { RESOURCE_COLLECTION: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True, 'is_filter': True}, 'object_type': {'allow_post': True, 'allow_put': False, 'convert_to': convert_valid_object_type, 'is_visible': True, 'default': None, 'is_filter': True, 'enforce_policy': True}, 'object_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'enforce_policy': True, 'is_filter': True}, 'target_tenant': {'allow_post': True, 'allow_put': True, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True, 'enforce_policy': True, 'is_filter': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True, 'is_visible': True, 'is_filter': True}, 'action': {'allow_post': True, 'allow_put': False, # action depends on type so validation has to occur in # the extension 'validate': { 'type:string': db_const.DESCRIPTION_FIELD_SIZE}, # we set enforce_policy so operators can define policies # that restrict actions 'is_visible': True, 'enforce_policy': True, 'is_filter': True} } } # Register the configuration options quota.register_quota_opts(quota.rbac_quota_opts) class Rbac(api_extensions.ExtensionDescriptor): """RBAC policy support.""" @classmethod def get_name(cls): return "RBAC Policies" @classmethod def get_alias(cls): return 'rbac-policies' @classmethod def get_description(cls): return ("Allows creation and modification of policies that control " "tenant access to resources.") @classmethod def get_updated(cls): return "2015-06-17T12:15:12-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() params = RESOURCE_ATTRIBUTE_MAP['rbac_policies'] collection_name = 'rbac-policies' resource_name = 'rbac_policy' resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=False, allow_sorting=True) return [extensions.ResourceExtension(collection_name, controller, attr_map=params)] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/rbac_address_scope.py0000644000175000017500000000160200000000000025212 0ustar00coreycorey00000000000000# Copyright (c) 2020 Cloudification GmbH. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import rbac_address_scope from neutron_lib.api import extensions class Rbac_address_scope(extensions.APIExtensionDescriptor): """Extension class supporting address scope RBAC.""" api_definition = rbac_address_scope ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/rbac_security_groups.py0000644000175000017500000000160100000000000025641 0ustar00coreycorey00000000000000# Copyright (c) 2019 Salesforce. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import rbac_security_groups from neutron_lib.api import extensions class Rbac_security_groups(extensions.APIExtensionDescriptor): """Extension class supporting security groups RBAC.""" api_definition = rbac_security_groups ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/rbac_subnetpool.py0000644000175000017500000000156600000000000024577 0ustar00coreycorey00000000000000# Copyright (c) 2020 Cloudification GmbH. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import rbac_subnetpool from neutron_lib.api import extensions class Rbac_subnetpool(extensions.APIExtensionDescriptor): """Extension class supporting subnetpool RBAC.""" api_definition = rbac_subnetpool ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/revisionifmatch.py0000644000175000017500000000151100000000000024576 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import revisionifmatch as apidef from neutron_lib.api import extensions as api_extensions class Revisionifmatch(api_extensions.APIExtensionDescriptor): """Indicate that If-Match constraints on revision_number are supported.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/revisions.py0000644000175000017500000000301500000000000023426 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron.extensions import stdattrs_common REVISION = 'revision_number' REVISION_BODY = { REVISION: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None, 'is_filter': True}, } class Revisions(extensions.ExtensionDescriptor): """Extension to expose revision number of standard attr resources.""" @classmethod def get_name(cls): return "Resource revision numbers" @classmethod def get_alias(cls): return "standard-attr-revisions" @classmethod def get_description(cls): return ("This extension will display the revision number of neutron " "resources.") @classmethod def get_updated(cls): return "2016-04-11T10:00:00-00:00" def get_extended_resources(self, version): if version != "2.0": return {} return stdattrs_common.stdattrs_extended_resources(REVISION_BODY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/router_availability_zone.py0000644000175000017500000000204000000000000026507 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from neutron_lib.api.definitions import router_availability_zone as apidef from neutron_lib.api import extensions import six class Router_availability_zone(extensions.APIExtensionDescriptor): """Router availability zone extension.""" api_definition = apidef @six.add_metaclass(abc.ABCMeta) class RouterAvailabilityZonePluginBase(object): @abc.abstractmethod def get_router_availability_zones(self, router): """Return availability zones which a router belongs to.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/routerservicetype.py0000644000175000017500000000156300000000000025216 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import routerservicetype as apidef from neutron_lib.api import extensions class Routerservicetype(extensions.APIExtensionDescriptor): """Extension class supporting router service type.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/security_groups_port_filtering.py0000644000175000017500000000165500000000000027772 0ustar00coreycorey00000000000000# Copyright (c) 2018 Nokia. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import security_groups_port_filtering from neutron_lib.api import extensions class Security_groups_port_filtering(extensions.APIExtensionDescriptor): """Extension class supporting filtering port depend on security group.""" api_definition = security_groups_port_filtering ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/securitygroup.py0000644000175000017500000003457300000000000024346 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from neutron_lib.api import converters from neutron_lib.api import extensions as api_extensions from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib.db import constants as db_const from neutron_lib import exceptions from neutron_lib.plugins import directory from oslo_utils import netutils import six from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import base from neutron.conf import quota from neutron.extensions import standardattrdescription as stdattr_ext from neutron.quota import resource_registry # Security group Exceptions class SecurityGroupInvalidPortRange(exceptions.InvalidInput): message = _("For TCP/UDP protocols, port_range_min must be " "<= port_range_max") class SecurityGroupInvalidProtocolForPort(exceptions.InvalidInput): message = _("Ports cannot be specified for protocol %(protocol)s. " "Ports are only supported for %(valid_port_protocols)s.") class SecurityGroupInvalidPortValue(exceptions.InvalidInput): message = _("Invalid value for port %(port)s") class SecurityGroupInvalidIcmpValue(exceptions.InvalidInput): message = _("Invalid value for ICMP %(field)s (%(attr)s) " "%(value)s. It must be 0 to 255.") class SecurityGroupEthertypeConflictWithProtocol(exceptions.InvalidInput): message = _("Invalid ethertype %(ethertype)s for protocol " "%(protocol)s.") class SecurityGroupMissingIcmpType(exceptions.InvalidInput): message = _("ICMP code (port-range-max) %(value)s is provided" " but ICMP type (port-range-min) is missing.") class SecurityGroupInUse(exceptions.InUse): message = _("Security Group %(id)s %(reason)s.") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") super(SecurityGroupInUse, self).__init__(**kwargs) class SecurityGroupCannotRemoveDefault(exceptions.InUse): message = _("Insufficient rights for removing default security group.") class SecurityGroupCannotUpdateDefault(exceptions.InUse): message = _("Updating default security group not allowed.") class SecurityGroupDefaultAlreadyExists(exceptions.InUse): message = _("Default security group already exists.") class SecurityGroupRuleInvalidProtocol(exceptions.InvalidInput): message = _("Security group rule protocol %(protocol)s not supported. " "Only protocol values %(values)s and integer representations " "[0 to 255] are supported.") class SecurityGroupRulesNotSingleTenant(exceptions.InvalidInput): message = _("Multiple tenant_ids in bulk security group rule create" " not allowed") class SecurityGroupRemoteGroupAndRemoteIpPrefix(exceptions.InvalidInput): message = _("Only remote_ip_prefix or remote_group_id may " "be provided.") class SecurityGroupProtocolRequiredWithPorts(exceptions.InvalidInput): message = _("Must also specify protocol if port range is given.") class SecurityGroupNotSingleGroupRules(exceptions.InvalidInput): message = _("Only allowed to update rules for " "one security profile at a time") class SecurityGroupNotFound(exceptions.NotFound): message = _("Security group %(id)s does not exist") class SecurityGroupRuleNotFound(exceptions.NotFound): message = _("Security group rule %(id)s does not exist") class DuplicateSecurityGroupRuleInPost(exceptions.InUse): message = _("Duplicate Security Group Rule in POST.") class SecurityGroupRuleExists(exceptions.InUse): message = _("Security group rule already exists. Rule id is %(rule_id)s.") class SecurityGroupRuleInUse(exceptions.InUse): message = _("Security Group Rule %(id)s %(reason)s.") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") super(SecurityGroupRuleInUse, self).__init__(**kwargs) class SecurityGroupRuleParameterConflict(exceptions.InvalidInput): message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s") class SecurityGroupConflict(exceptions.Conflict): message = _("Error %(reason)s while attempting the operation.") class SecurityGroupRuleInvalidEtherType(exceptions.InvalidInput): message = _("Security group rule for ethertype '%(ethertype)s' not " "supported. Allowed values are %(values)s.") def convert_protocol(value): if value is None: return try: val = int(value) if 0 <= val <= 255: # Set value of protocol number to string due to bug 1381379, # PostgreSQL fails when it tries to compare integer with string, # that exists in db. return str(value) raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) except (ValueError, TypeError): if value.lower() in sg_supported_protocols: return value.lower() raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) except AttributeError: raise SecurityGroupRuleInvalidProtocol( protocol=value, values=sg_supported_protocols) def convert_ethertype_to_case_insensitive(value): if isinstance(value, six.string_types): for ethertype in sg_supported_ethertypes: if ethertype.lower() == value.lower(): return ethertype raise SecurityGroupRuleInvalidEtherType( ethertype=value, values=sg_supported_ethertypes) def convert_validate_port_value(port): if port is None: return port if netutils.is_valid_port(port): return int(port) else: raise SecurityGroupInvalidPortValue(port=port) def convert_ip_prefix_to_cidr(ip_prefix): if not ip_prefix: return try: cidr = netaddr.IPNetwork(ip_prefix) return str(cidr) except (ValueError, TypeError, netaddr.AddrFormatError): raise exceptions.InvalidCIDR(input=ip_prefix) def _validate_name_not_default(data, max_len=db_const.NAME_FIELD_SIZE): msg = validators.validate_string(data, max_len) if msg: return msg if data.lower() == "default": raise SecurityGroupDefaultAlreadyExists() validators.add_validator('name_not_default', _validate_name_not_default) sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys())) sg_supported_ethertypes = ['IPv4', 'IPv6'] SECURITYGROUPS = 'security_groups' SECURITYGROUPRULES = 'security_group_rules' # Attribute Map RESOURCE_ATTRIBUTE_MAP = { SECURITYGROUPS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'is_filter': True, 'is_sort_key': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'is_filter': True, 'is_sort_key': True, 'validate': { 'type:name_not_default': db_const.NAME_FIELD_SIZE}}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_sort_key': True, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True, 'is_filter': True}, SECURITYGROUPRULES: {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, SECURITYGROUPRULES: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'is_filter': True, 'is_sort_key': True, 'primary_key': True}, 'security_group_id': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'required_by_policy': True, 'is_sort_key': True, 'is_filter': True}, 'remote_group_id': {'allow_post': True, 'allow_put': False, 'default': None, 'is_visible': True, 'is_sort_key': True, 'is_filter': True}, 'direction': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'is_filter': True, 'is_sort_key': True, 'validate': {'type:values': ['ingress', 'egress']}}, 'protocol': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'is_sort_key': True, 'is_filter': True, 'convert_to': convert_protocol}, 'port_range_min': {'allow_post': True, 'allow_put': False, 'convert_to': convert_validate_port_value, 'default': None, 'is_visible': True, 'is_sort_key': True, 'is_filter': True}, 'port_range_max': {'allow_post': True, 'allow_put': False, 'convert_to': convert_validate_port_value, 'default': None, 'is_visible': True, 'is_sort_key': True, 'is_filter': True}, 'ethertype': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': 'IPv4', 'is_filter': True, 'is_sort_key': True, 'convert_to': convert_ethertype_to_case_insensitive, 'validate': {'type:values': sg_supported_ethertypes}}, 'remote_ip_prefix': {'allow_post': True, 'allow_put': False, 'default': None, 'is_visible': True, 'is_sort_key': True, 'is_filter': True, 'convert_to': convert_ip_prefix_to_cidr}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_sort_key': True, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True, 'is_filter': True}, } } EXTENDED_ATTRIBUTES_2_0 = { 'ports': {SECURITYGROUPS: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'is_filter': True, 'convert_to': converters.convert_none_to_empty_list, 'validate': {'type:uuid_list': None}, 'default': const.ATTR_NOT_SPECIFIED}}} # Register the configuration options quota.register_quota_opts(quota.security_group_quota_opts) class Securitygroup(api_extensions.ExtensionDescriptor): """Security group extension.""" @classmethod def get_name(cls): return "security-group" @classmethod def get_alias(cls): return "security-group" @classmethod def get_description(cls): return "The security groups extension." @classmethod def get_updated(cls): return "2012-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = directory.get_plugin() for resource_name in ['security_group', 'security_group_rule']: collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Securitygroup, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) + list(RESOURCE_ATTRIBUTE_MAP.items())) else: return {} def get_required_extensions(self): return [stdattr_ext.Standardattrdescription.get_alias()] @six.add_metaclass(abc.ABCMeta) class SecurityGroupPluginBase(object): @abc.abstractmethod def create_security_group(self, context, security_group): pass @abc.abstractmethod def update_security_group(self, context, id, security_group): pass @abc.abstractmethod def delete_security_group(self, context, id): pass @abc.abstractmethod def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_security_group(self, context, id, fields=None): pass @abc.abstractmethod def create_security_group_rule(self, context, security_group_rule): pass @abc.abstractmethod def delete_security_group_rule(self, context, id): pass @abc.abstractmethod def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_security_group_rule(self, context, id, fields=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/segment.py0000644000175000017500000002320300000000000023050 0ustar00coreycorey00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import converters from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api import extensions as api_extensions from neutron_lib import constants from neutron_lib.db import constants as db_const from neutron_lib.plugins import directory import six from neutron.api import extensions from neutron.api.v2 import base from neutron.extensions import _standard_attr_segment_lib as stdattrseg_apidef from neutron.extensions import standardattrdescription as ext_stddesc SEGMENT = 'segment' SEGMENTS = '%ss' % SEGMENT SEGMENT_ID = 'segment_id' NETWORK_TYPE = 'network_type' PHYSICAL_NETWORK = 'physical_network' SEGMENTATION_ID = 'segmentation_id' NAME_LEN = db_const.NAME_FIELD_SIZE DESC_LEN = db_const.DESCRIPTION_FIELD_SIZE # Attribute Map RESOURCE_ATTRIBUTE_MAP = { SEGMENTS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_filter': True, 'is_sort_key': True, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_sort_key': True, 'is_visible': False}, 'network_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_filter': True, 'is_sort_key': True, 'is_visible': True}, PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': False, 'default': constants.ATTR_NOT_SPECIFIED, 'validate': {'type:string': providernet.PHYSICAL_NETWORK_MAX_LEN}, 'is_filter': True, 'is_sort_key': True, 'is_visible': True}, NETWORK_TYPE: {'allow_post': True, 'allow_put': False, 'validate': {'type:string': providernet.NETWORK_TYPE_MAX_LEN}, 'is_filter': True, 'is_sort_key': True, 'is_visible': True}, SEGMENTATION_ID: {'allow_post': True, 'allow_put': False, 'default': constants.ATTR_NOT_SPECIFIED, 'convert_to': converters.convert_to_int, 'is_sort_key': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'validate': {'type:string_or_none': NAME_LEN}, 'is_filter': True, 'is_sort_key': True, 'is_visible': True} }, subnet_def.COLLECTION_NAME: { SEGMENT_ID: {'allow_post': True, 'allow_put': False, 'default': None, 'validate': {'type:uuid_or_none': None}, 'is_filter': True, 'is_visible': True, }, }, } class Segment(api_extensions.ExtensionDescriptor): """Extension class supporting Segments.""" @classmethod def get_name(cls): return "Segment" @classmethod def get_alias(cls): return "segment" @classmethod def get_description(cls): return "Segments extension." @classmethod def get_updated(cls): return "2016-02-24T17:00:00-00:00" @classmethod def get_resources(cls): """Returns Extended Resource for service type management.""" resource_attributes = RESOURCE_ATTRIBUTE_MAP[SEGMENTS] controller = base.create_resource( SEGMENTS, SEGMENT, directory.get_plugin(SEGMENTS), resource_attributes, allow_pagination=True, allow_sorting=True) return [extensions.ResourceExtension(SEGMENTS, controller, attr_map=resource_attributes)] def update_attributes_map(self, attributes): super(Segment, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} def get_required_extensions(self): return [ext_stddesc.Standardattrdescription.get_alias(), stdattrseg_apidef.ALIAS] @six.add_metaclass(abc.ABCMeta) class SegmentPluginBase(object): @abc.abstractmethod def create_segment(self, context, segment): """Create a segment. Create a segment, which represents an L2 segment of a network. :param context: neutron api request context :param segment: dictionary describing the segment, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. All keys will be populated. """ @abc.abstractmethod def update_segment(self, context, uuid, segment): """Update values of a segment. :param context: neutron api request context :param uuid: UUID representing the segment to update. :param segment: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. """ @abc.abstractmethod def get_segment(self, context, uuid, fields=None): """Retrieve a segment. :param context: neutron api request context :param uuid: UUID representing the segment to fetch. :param fields: a list of strings that are valid keys in a segment dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. Only these fields will be returned. """ @abc.abstractmethod def get_segments(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of segments. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a segment as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a segment dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. Only these fields will be returned. """ @abc.abstractmethod def delete_segment(self, context, uuid): """Delete a segment. :param context: neutron api request context :param uuid: UUID representing the segment to delete. """ @abc.abstractmethod def get_segments_count(self, context, filters=None): """Return the number of segments. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a segment as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/extensions/segment.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. """ def get_plugin_description(self): return "Network Segments" @classmethod def get_plugin_type(cls): return SEGMENTS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/segments_peer_subnet_host_routes.py0000644000175000017500000000146000000000000030265 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import segments_peer_subnet_host_routes from neutron_lib.api import extensions class Segments_peer_subnet_host_routes(extensions.APIExtensionDescriptor): api_definition = segments_peer_subnet_host_routes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/servicetype.py0000644000175000017500000000305400000000000023752 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import servicetype as apidef from neutron_lib.api import extensions as api_extensions from neutron.api import extensions from neutron.api.v2 import base from neutron.db import servicetype_db class Servicetype(api_extensions.APIExtensionDescriptor): api_definition = apidef @classmethod def get_resources(cls): """Returns Extended Resource for service type management.""" attr_map = apidef.RESOURCE_ATTRIBUTE_MAP[apidef.COLLECTION_NAME] collection_name = apidef.COLLECTION_NAME.replace('_', '-') controller = base.create_resource( collection_name, apidef.RESOURCE_NAME, servicetype_db.ServiceTypeManager.get_instance(), attr_map) return [extensions.ResourceExtension(collection_name, controller, attr_map=attr_map)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/sorting.py0000644000175000017500000000167700000000000023106 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import sorting as apidef from neutron_lib.api import extensions as api_extensions from neutron.api import extensions class Sorting(api_extensions.APIExtensionDescriptor): """Fake extension that indicates that sorting is enabled.""" extensions.register_custom_supported_check( apidef.ALIAS, lambda: True, plugin_agnostic=True ) api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/standard_attr_segment.py0000644000175000017500000000140600000000000025763 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import _standard_attr_segment_lib as apidef from neutron_lib.api import extensions class Standard_attr_segment(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/standardattrdescription.py0000644000175000017500000000331000000000000026342 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib.db import constants as db_const from neutron.extensions import stdattrs_common DESCRIPTION_BODY = { 'description': {'allow_post': True, 'allow_put': True, 'validate': { 'type:string': db_const.DESCRIPTION_FIELD_SIZE}, 'is_visible': True, 'default': '', 'is_filter': True} } class Standardattrdescription(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "standard-attr-description" @classmethod def get_alias(cls): return "standard-attr-description" @classmethod def get_description(cls): return "Extension to add descriptions to standard attributes" @classmethod def get_updated(cls): return "2016-02-10T10:00:00-00:00" def get_optional_extensions(self): return ['security-group', 'router'] def get_extended_resources(self, version): if version != "2.0": return {} return stdattrs_common.stdattrs_extended_resources(DESCRIPTION_BODY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/stateful_security_group.py0000644000175000017500000000161000000000000026376 0ustar00coreycorey00000000000000# Copyright (c) 2018 Nokia. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import stateful_security_group from neutron_lib.api import extensions class Stateful_security_group(extensions.APIExtensionDescriptor): """Extension class supporting stateful security group.""" api_definition = stateful_security_group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/stdattrs_common.py0000644000175000017500000000217600000000000024634 0ustar00coreycorey00000000000000# Copyright (c) 2018 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from neutron.db import standard_attr def stdattrs_extended_resources(attributes): r_map = standard_attr.get_standard_attr_resource_model_map( include_resources=True, include_sub_resources=False) sr_map = standard_attr.get_standard_attr_resource_model_map( include_resources=False, include_sub_resources=True) return dict(itertools.chain( {r: attributes for r in r_map}.items(), {sr: {'parameters': attributes} for sr in sr_map}.items() )) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnet_dns_publish_fixed_ip.py0000644000175000017500000000154700000000000027156 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet_dns_publish_fixed_ip as apidef from neutron_lib.api import extensions class Subnet_dns_publish_fixed_ip(extensions.APIExtensionDescriptor): """Extension class supporting dns_publish_fixed_ip attribute for subnet.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnet_onboard.py0000644000175000017500000000270400000000000024415 0ustar00coreycorey00000000000000# (c) Copyright 2017 Hewlett Packard Enterprise Development LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet_onboard as subnet_onboard_def from neutron_lib.api.definitions import subnetpool as subnetpool_def from neutron_lib.api import extensions from neutron.api.v2 import resource_helper class Subnet_onboard(extensions.APIExtensionDescriptor): """API extension for subnet onboard.""" api_definition = subnet_onboard_def @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, subnetpool_def.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, subnetpool_def.RESOURCE_ATTRIBUTE_MAP, None, action_map=subnet_onboard_def.ACTION_MAP, register_quota=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnet_segmentid_writable.py0000644000175000017500000000152400000000000026640 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet_segmentid_writable as apidef from neutron_lib.api import extensions class Subnet_segmentid_writable(extensions.APIExtensionDescriptor): """Extension class supporting writable subnet segment_id.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnet_service_types.py0000644000175000017500000000150300000000000025651 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnet_service_types as apidef from neutron_lib.api import extensions class Subnet_service_types(extensions.APIExtensionDescriptor): """Extension class supporting subnet service types.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnetallocation.py0000644000175000017500000000255300000000000024761 0ustar00coreycorey00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants class Subnetallocation(extensions.ExtensionDescriptor): """Extension class supporting subnet allocation.""" @classmethod def get_name(cls): return "Subnet Allocation" @classmethod def get_alias(cls): return constants.SUBNET_ALLOCATION_EXT_ALIAS @classmethod def get_description(cls): return "Enables allocation of subnets from a subnet pool" @classmethod def get_updated(cls): return "2015-03-30T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/subnetpool_prefix_ops.py0000644000175000017500000000357000000000000026043 0ustar00coreycorey00000000000000# (c) Copyright 2019 SUSE LLC # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import subnetpool as subnetpool_def from neutron_lib.api.definitions import subnetpool_prefix_ops \ as subnetpool_prefix_ops_def from neutron_lib.api import extensions import webob.exc from neutron._i18n import _ from neutron.api.v2 import resource_helper def get_operation_request_body(body): if not isinstance(body, dict): msg = _('Request body contains invalid data') raise webob.exc.HTTPBadRequest(msg) prefixes = body.get('prefixes') if not prefixes or not isinstance(prefixes, list): msg = _('Request body contains invalid data') raise webob.exc.HTTPBadRequest(msg) return prefixes class Subnetpool_prefix_ops(extensions.APIExtensionDescriptor): """API extension for subnet onboard.""" api_definition = subnetpool_prefix_ops_def @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, subnetpool_def.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, subnetpool_def.RESOURCE_ATTRIBUTE_MAP, None, action_map=subnetpool_prefix_ops_def.ACTION_MAP, register_quota=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/tag_ports_during_bulk_creation.py0000644000175000017500000000162600000000000027666 0ustar00coreycorey00000000000000# Copyright (c) 2019 Verizon Media # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import \ tag_ports_during_bulk_creation as apidef from neutron_lib.api import extensions class Tag_ports_during_bulk_creation(extensions.APIExtensionDescriptor): """Extension to tag ports during bulk creation.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/tagging.py0000644000175000017500000002241300000000000023030 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy from neutron_lib.api.definitions import port from neutron_lib.api import extensions as api_extensions from neutron_lib.api import faults from neutron_lib.api import validators from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.services import base as service_base import six import webob.exc from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import resource as api_resource from neutron.db import standard_attr TAG = 'tag' TAGS = TAG + 's' TAGS_ANY = TAGS + '-any' NOT_TAGS = 'not-' + TAGS NOT_TAGS_ANY = NOT_TAGS + '-any' MAX_TAG_LEN = 60 TAG_PLUGIN_TYPE = 'TAG' TAG_SUPPORTED_RESOURCES = standard_attr.get_tag_resource_parent_map() TAG_ATTRIBUTE_MAP = { TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'is_filter': True}, TAGS_ANY: {'allow_post': False, 'allow_put': False, 'is_visible': False, 'is_filter': True}, NOT_TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': False, 'is_filter': True}, NOT_TAGS_ANY: {'allow_post': False, 'allow_put': False, 'is_visible': False, 'is_filter': True}, } TAG_ATTRIBUTE_MAP_PORTS = copy.deepcopy(TAG_ATTRIBUTE_MAP) TAG_ATTRIBUTE_MAP_PORTS[TAGS] = { 'allow_post': True, 'allow_put': False, 'validate': {'type:list_of_unique_strings': MAX_TAG_LEN}, 'default': [], 'is_visible': True, 'is_filter': True } class TagResourceNotFound(exceptions.NotFound): message = _("Resource %(resource)s %(resource_id)s could not be found.") class TagNotFound(exceptions.NotFound): message = _("Tag %(tag)s could not be found.") def validate_tag(tag): msg = validators.validate_string(tag, MAX_TAG_LEN) if msg: raise exceptions.InvalidInput(error_message=msg) def validate_tags(body): if not isinstance(body, dict) or 'tags' not in body: raise exceptions.InvalidInput(error_message=_("Invalid tags body")) msg = validators.validate_list_of_unique_strings(body['tags'], MAX_TAG_LEN) if msg: raise exceptions.InvalidInput(error_message=msg) def notify_tag_action(context, action, parent, parent_id, tags=None): notifier = n_rpc.get_notifier('network') tag_event = 'tag.%s' % action # TODO(hichihara): Add 'updated_at' into payload payload = {'parent_resource': parent, 'parent_resource_id': parent_id} if tags is not None: payload['tags'] = tags notifier.info(context, tag_event, payload) class TaggingController(object): def __init__(self): self.plugin = directory.get_plugin(TAG_PLUGIN_TYPE) self.supported_resources = TAG_SUPPORTED_RESOURCES def _get_parent_resource_and_id(self, kwargs): for key in kwargs: for resource in self.supported_resources: if key == self.supported_resources[resource] + '_id': return resource, kwargs[key] return None, None def index(self, request, **kwargs): # GET /v2.0/networks/{network_id}/tags parent, parent_id = self._get_parent_resource_and_id(kwargs) return self.plugin.get_tags(request.context, parent, parent_id) def show(self, request, id, **kwargs): # GET /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = self._get_parent_resource_and_id(kwargs) return self.plugin.get_tag(request.context, parent, parent_id, id) def create(self, request, **kwargs): # not supported # POST /v2.0/networks/{network_id}/tags raise webob.exc.HTTPNotFound("not supported") def update(self, request, id, **kwargs): # PUT /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = self._get_parent_resource_and_id(kwargs) notify_tag_action(request.context, 'create.start', parent, parent_id, [id]) result = self.plugin.update_tag(request.context, parent, parent_id, id) notify_tag_action(request.context, 'create.end', parent, parent_id, [id]) return result def update_all(self, request, body, **kwargs): # PUT /v2.0/networks/{network_id}/tags # body: {"tags": ["aaa", "bbb"]} validate_tags(body) parent, parent_id = self._get_parent_resource_and_id(kwargs) notify_tag_action(request.context, 'update.start', parent, parent_id, body['tags']) result = self.plugin.update_tags(request.context, parent, parent_id, body) notify_tag_action(request.context, 'update.end', parent, parent_id, body['tags']) return result def delete(self, request, id, **kwargs): # DELETE /v2.0/networks/{network_id}/tags/{tag} # id == tag validate_tag(id) parent, parent_id = self._get_parent_resource_and_id(kwargs) notify_tag_action(request.context, 'delete.start', parent, parent_id, [id]) result = self.plugin.delete_tag(request.context, parent, parent_id, id) notify_tag_action(request.context, 'delete.end', parent, parent_id, [id]) return result def delete_all(self, request, **kwargs): # DELETE /v2.0/networks/{network_id}/tags parent, parent_id = self._get_parent_resource_and_id(kwargs) notify_tag_action(request.context, 'delete_all.start', parent, parent_id) result = self.plugin.delete_tags(request.context, parent, parent_id) notify_tag_action(request.context, 'delete_all.end', parent, parent_id) return result class Tagging(api_extensions.ExtensionDescriptor): """Extension class supporting tags.""" @classmethod def get_name(cls): return ("Tag support for resources with standard attribute: %s" % ', '.join(TAG_SUPPORTED_RESOURCES.values())) @classmethod def get_alias(cls): return "standard-attr-tag" @classmethod def get_description(cls): return "Enables to set tag on resources with standard attribute." @classmethod def get_updated(cls): return "2017-01-01T00:00:00-00:00" def get_required_extensions(self): return [] @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] action_status = {'index': 200, 'show': 204, 'update': 201, 'update_all': 200, 'delete': 204, 'delete_all': 204} controller = api_resource.Resource(TaggingController(), faults.FAULT_MAP, action_status=action_status) collection_methods = {"delete_all": "DELETE", "update_all": "PUT"} exts = [] for collection_name, member_name in TAG_SUPPORTED_RESOURCES.items(): if 'security_group' in collection_name: collection_name = collection_name.replace('_', '-') parent = {'member_name': member_name, 'collection_name': collection_name} exts.append(extensions.ResourceExtension( TAGS, controller, parent, collection_methods=collection_methods)) return exts def get_extended_resources(self, version): if version != "2.0": return {} EXTENDED_ATTRIBUTES_2_0 = {} for collection_name in TAG_SUPPORTED_RESOURCES: if collection_name == port.COLLECTION_NAME: EXTENDED_ATTRIBUTES_2_0[collection_name] = ( TAG_ATTRIBUTE_MAP_PORTS) else: EXTENDED_ATTRIBUTES_2_0[collection_name] = TAG_ATTRIBUTE_MAP return EXTENDED_ATTRIBUTES_2_0 @six.add_metaclass(abc.ABCMeta) class TagPluginBase(service_base.ServicePluginBase): """REST API to operate the Tag.""" def get_plugin_description(self): return "Tag support" @classmethod def get_plugin_type(cls): return TAG_PLUGIN_TYPE @abc.abstractmethod def get_tags(self, context, resource, resource_id): pass @abc.abstractmethod def get_tag(self, context, resource, resource_id, tag): pass @abc.abstractmethod def update_tags(self, context, resource, resource_id, body): pass @abc.abstractmethod def update_tag(self, context, resource, resource_id, tag): pass @abc.abstractmethod def delete_tags(self, context, resource, resource_id): pass @abc.abstractmethod def delete_tag(self, context, resource, resource_id, tag): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/timestamp.py0000644000175000017500000000347400000000000023421 0ustar00coreycorey00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron.extensions import stdattrs_common # Attribute Map CREATED = 'created_at' UPDATED = 'updated_at' TIMESTAMP_BODY = { CREATED: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None }, UPDATED: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None }, } class Timestamp(extensions.ExtensionDescriptor): """Extension class supporting timestamp. This class is used by neutron's extension framework for adding timestamp to neutron core resources. """ @classmethod def get_name(cls): return "Resource timestamps" @classmethod def get_alias(cls): return "standard-attr-timestamp" @classmethod def get_description(cls): return ("Adds created_at and updated_at fields to all Neutron " "resources that have Neutron standard attributes.") @classmethod def get_updated(cls): return "2016-09-12T10:00:00-00:00" def get_extended_resources(self, version): if version != "2.0": return {} return stdattrs_common.stdattrs_extended_resources(TIMESTAMP_BODY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/trunk.py0000644000175000017500000000240100000000000022546 0ustar00coreycorey00000000000000# Copyright (c) 2016 ZTE Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import trunk from neutron_lib.api import extensions from neutron.api.v2 import resource_helper class Trunk(extensions.APIExtensionDescriptor): """Trunk API extension.""" api_definition = trunk @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, trunk.RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info( plural_mappings, trunk.RESOURCE_ATTRIBUTE_MAP, trunk.ALIAS, action_map=trunk.ACTION_MAP, register_quota=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/trunk_details.py0000644000175000017500000000211300000000000024253 0ustar00coreycorey00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import trunk_details from neutron_lib.api import extensions # NOTE(armax): because of the API machinery, this extension must be on # its own. This aims at providing subport information for ports that # are parent in a trunk so that consumers of the Neutron API, like Nova # can efficiently access trunk information for things like metadata or # config-drive configuration. class Trunk_details(extensions.APIExtensionDescriptor): api_definition = trunk_details ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/uplink_status_propagation.py0000644000175000017500000000142200000000000026715 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import uplink_status_propagation as apidef from neutron_lib.api import extensions class Uplink_status_propagation(extensions.APIExtensionDescriptor): api_definition = apidef ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/extensions/vlantransparent.py0000644000175000017500000000230700000000000024632 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import vlantransparent as apidef from neutron_lib.api import extensions from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) def _disable_extension_by_config(aliases): if not cfg.CONF.vlan_transparent: if 'vlan-transparent' in aliases: aliases.remove('vlan-transparent') LOG.info('Disabled vlantransparent extension.') class Vlantransparent(extensions.APIExtensionDescriptor): """Extension class supporting vlan transparent networks.""" api_definition = apidef ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/hacking/0000755000175000017500000000000000000000000020241 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/hacking/__init__.py0000644000175000017500000000000000000000000022340 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/hacking/checks.py0000644000175000017500000002227500000000000022063 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from neutron_lib.hacking import checks def flake8ext(f): """Decorator to indicate flake8 extension. This is borrowed from hacking.core.flake8ext(), but at now it is used only for unit tests to know which are neutron flake8 extensions. """ f.name = __name__ return f # Guidelines for writing new hacking checks # # - Use only for Neutron specific tests. OpenStack general tests # should be submitted to the common 'hacking' module. # - Pick numbers in the range N3xx. Find the current test with # the highest allocated number and then pick the next value. # - Keep the test method code in the source file ordered based # on the N3xx value. # - List the new rule in the top level HACKING.rst file # - Add test cases for each new rule to # neutron/tests/unit/hacking/test_checks.py filter_match = re.compile(r".*filter\(lambda ") tests_imports_dot = re.compile(r"\bimport[\s]+neutron.tests\b") tests_imports_from1 = re.compile(r"\bfrom[\s]+neutron.tests\b") tests_imports_from2 = re.compile(r"\bfrom[\s]+neutron[\s]+import[\s]+tests\b") @flake8ext def check_assert_called_once_with(logical_line, filename): """N322 - Try to detect unintended calls of nonexistent mock methods like: assertCalledOnceWith assert_has_called called_once_with """ if 'neutron/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledoncewith', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) @flake8ext def check_asserttruefalse(logical_line, filename): """N328 - Don't use assertEqual(True/False, observed).""" if 'neutron/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line): msg = ("N328: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) @flake8ext def check_assertempty(logical_line, filename): """N330 - Enforce using assertEqual parameter ordering in case of empty objects. """ if 'neutron/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties if re.search(reg, logical_line): yield (0, msg) @flake8ext def check_assertisinstance(logical_line, filename): """N331 - Enforce using assertIsInstance.""" if 'neutron/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) @flake8ext def check_assertequal_for_httpcode(logical_line, filename): """N332 - Enforce correct oredering for httpcode in assertEqual.""" msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'neutron/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) @flake8ext def check_oslo_i18n_wrapper(logical_line, filename, noqa): """N340 - Check for neutron.i18n usage. Okay(neutron/foo/bar.py): from neutron._i18n import _ Okay(neutron_fwaas/foo/bar.py): from neutron_fwaas._i18n import _ N340(neutron/foo/bar.py): from neutron.i18n import _ N340(neutron_fwaas/foo/bar.py): from neutron_fwaas.i18n import _ N340(neutron_fwaas/foo/bar.py): from neutron.i18n import _ N340(neutron_fwaas/foo/bar.py): from neutron._i18n import _ Okay(neutron/foo/bar.py): from neutron.i18n import _ # noqa """ if noqa: return split_line = logical_line.split() modulename = os.path.normpath(filename).split('/')[0] bad_i18n_module = '%s.i18n' % modulename if (len(split_line) > 1 and split_line[0] in ('import', 'from')): if (split_line[1] == bad_i18n_module or modulename != 'neutron' and split_line[1] in ('neutron.i18n', 'neutron._i18n')): msg = ("N340: %(found)s is found. Use %(module)s._i18n instead." % {'found': split_line[1], 'module': modulename}) yield (0, msg) @flake8ext def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): """N341 - Check usage of builtins gettext _(). Okay(neutron/foo.py): from neutron._i18n import _\n_('foo') N341(neutron/foo.py): _('foo') Okay(neutron/_i18n.py): _('foo') Okay(neutron/i18n.py): _('foo') Okay(neutron/foo.py): _('foo') # noqa """ if noqa: return modulename = os.path.normpath(filename).split('/')[0] if '%s/tests' % modulename in filename: return if os.path.basename(filename) in ('i18n.py', '_i18n.py'): return token_values = [t[1] for t in tokens] i18n_wrapper = '%s._i18n' % modulename if '_' in token_values: i18n_import_line_found = False for line in lines: split_line = [elm.rstrip(',') for elm in line.split()] if (len(split_line) > 1 and split_line[0] == 'from' and split_line[1] == i18n_wrapper and '_' in split_line): i18n_import_line_found = True break if not i18n_import_line_found: msg = ("N341: _ from python builtins module is used. " "Use _ from %s instead." % i18n_wrapper) yield (0, msg) @flake8ext def check_no_imports_from_tests(logical_line, filename, noqa): """N343 Production code must not import from neutron.tests.* """ msg = ("N343 Production code must not import from neutron.tests.*") if noqa: return if 'neutron/tests/' in filename: return for regex in tests_imports_dot, tests_imports_from1, tests_imports_from2: if re.match(regex, logical_line): yield(0, msg) @flake8ext def check_python3_no_filter(logical_line): """N344 - Use list comprehension instead of filter(lambda).""" msg = ("N344: Use list comprehension instead of " "filter(lambda obj: test(obj), data) on python3.") if filter_match.match(logical_line): yield(0, msg) # TODO(boden): rehome this check to neutron-lib @flake8ext def check_no_sqlalchemy_event_import(logical_line, filename, noqa): """N346 - Use neutron_lib.db.api.sqla_listen rather than sqlalchemy.""" if noqa: return is_import = (logical_line.startswith('import') or logical_line.startswith('from')) if not is_import: return for kw in ('sqlalchemy', 'event'): if kw not in logical_line: return yield (0, "N346: Register sqlalchemy events through " "neutron_lib.db.api.sqla_listen so they can be cleaned up " "between unit tests") def factory(register): checks.factory(register) register(check_assert_called_once_with) register(check_asserttruefalse) register(check_assertempty) register(check_assertisinstance) register(check_assertequal_for_httpcode) register(check_oslo_i18n_wrapper) register(check_builtins_gettext) register(check_no_imports_from_tests) register(check_python3_no_filter) register(check_no_sqlalchemy_event_import) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/ipam/0000755000175000017500000000000000000000000017563 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/__init__.py0000644000175000017500000000000000000000000021662 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/driver.py0000644000175000017500000001502500000000000021433 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg import six from neutron.ipam import requests as ipam_req from neutron import manager @six.add_metaclass(abc.ABCMeta) class Pool(object): """Interface definition for an IPAM driver. There should be an instance of the driver for every subnet pool. """ def __init__(self, subnetpool, context): """Initialize pool :param subnetpool: SubnetPool of the address space to use. :type subnetpool: dict """ self._subnetpool = subnetpool self._context = context @classmethod def get_instance(cls, subnet_pool, context): """Returns an instance of the configured IPAM driver :param subnet_pool: Subnet pool of the address space to use. :type subnet_pool: dict :returns: An instance of Driver for the given subnet pool """ ipam_driver_name = cfg.CONF.ipam_driver mgr = manager.NeutronManager driver_class = mgr.load_class_for_provider('neutron.ipam_drivers', ipam_driver_name) return driver_class(subnet_pool, context) @abc.abstractmethod def allocate_subnet(self, request): """Allocates a subnet based on the subnet request :param request: Describes the allocation requested. :type request: An instance of a sub-class of SubnetRequest :returns: An instance of Subnet :raises: RequestNotSupported, IPAMAlreadyAllocated """ @abc.abstractmethod def get_subnet(self, subnet_id): """Gets the matching subnet if it has been allocated :param subnet_id: the subnet identifier :type subnet_id: str uuid :returns: An instance of IPAM Subnet :raises: IPAMAllocationNotFound """ @abc.abstractmethod def update_subnet(self, request): """Updates an already allocated subnet This is used to notify the external IPAM system of updates to a subnet. :param request: Update the subnet to match this request :type request: An instance of a sub-class of SpecificSubnetRequest :returns: An instance of IPAM Subnet :raises: RequestNotSupported, IPAMAllocationNotFound """ @abc.abstractmethod def remove_subnet(self, subnet_id): """Removes an allocation The initial reference implementation will probably do nothing. :param subnet_id: the subnet identifier :type subnet_id: str uuid :raises: IPAMAllocationNotFound """ def get_subnet_request_factory(self): """Returns default SubnetRequestFactory Can be overridden on driver level to return custom factory """ return ipam_req.SubnetRequestFactory def get_address_request_factory(self): """Returns default AddressRequestFactory Can be overridden on driver level to return custom factory """ return ipam_req.AddressRequestFactory @abc.abstractmethod def get_allocator(self, subnet_ids): """Gets an allocator for subnets passed in :param subnet_ids: ids for subnets from which the IP can be allocated :returns: An instance of IPAM SubnetGroup :raises: TODO(Carl) What sort of errors do we need to plan for? """ def needs_rollback(self): """Whether driver needs an explicit rollback when operations fail. A driver that (de)allocates resources in the same DB transaction passed to it by Neutron will not want explicit rollback. A truly external IPAM system would need to return True for sure. The default is True since all drivers were assumed to be designed to need it from the start. :returns: True if driver needs to be called on rollback """ return True @six.add_metaclass(abc.ABCMeta) class Subnet(object): """Interface definition for an IPAM subnet A subnet would typically be associated with a network but may not be. It could represent a dynamically routed IP address space in which case the normal network and broadcast addresses would be useable. It should always be a routable block of addresses and representable in CIDR notation. """ @abc.abstractmethod def allocate(self, address_request): """Allocates an IP address based on the request passed in :param address_request: Specifies what to allocate. :type address_request: An instance of a subclass of AddressRequest :returns: A netaddr.IPAddress :raises: AddressNotAvailable, AddressOutsideAllocationPool, AddressOutsideSubnet """ @abc.abstractmethod def deallocate(self, address): """Returns a previously allocated address to the pool :param address: The address to give back. :type address: A netaddr.IPAddress or convertible to one. :returns: None :raises: IPAMAllocationNotFound """ @abc.abstractmethod def get_details(self): """Returns the details of the subnet :returns: An instance of SpecificSubnetRequest with the subnet detail. """ @six.add_metaclass(abc.ABCMeta) class SubnetGroup(object): """Interface definition for a filtered group of IPAM Subnets Allocates from a group of semantically equivalent subnets. The list of candidate subnets *may* be ordered by preference but all of the subnets must be suitable for fulfilling the request. For example, all of them must be associated with the network we're trying to allocate an address for. """ @abc.abstractmethod def allocate(self, address_request): """Allocates an IP address based on the request passed in :param address_request: Specifies what to allocate. :type address_request: An instance of a subclass of AddressRequest :returns: A netaddr.IPAddress, subnet_id tuple :raises: AddressNotAvailable, AddressOutsideAllocationPool, AddressOutsideSubnet, IpAddressGenerationFailureAllSubnets """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/0000755000175000017500000000000000000000000021241 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/__init__.py0000644000175000017500000000000000000000000023340 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/neutrondb_ipam/0000755000175000017500000000000000000000000024247 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/neutrondb_ipam/__init__.py0000644000175000017500000000000000000000000026346 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/neutrondb_ipam/db_api.py0000644000175000017500000001232500000000000026042 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from oslo_utils import uuidutils from neutron.objects import ipam as ipam_objs # Database operations for Neutron's DB-backed IPAM driver class IpamSubnetManager(object): @classmethod def load_by_neutron_subnet_id(cls, context, neutron_subnet_id): objs = ipam_objs.IpamSubnet.get_objects( context, neutron_subnet_id=neutron_subnet_id) return objs.pop() if objs else None def __init__(self, ipam_subnet_id, neutron_subnet_id): self._ipam_subnet_id = ipam_subnet_id self._neutron_subnet_id = neutron_subnet_id @property def neutron_id(self): return self._neutron_subnet_id def create(self, context): """Create database models for an IPAM subnet. This method creates a subnet resource for the IPAM driver and associates it with its neutron identifier, if specified. :param context: neutron api request context :returns: the idenfier of created IPAM subnet """ if not self._ipam_subnet_id: self._ipam_subnet_id = uuidutils.generate_uuid() ipam_objs.IpamSubnet( context, id=self._ipam_subnet_id, neutron_subnet_id=self._neutron_subnet_id).create() return self._ipam_subnet_id @classmethod def delete(cls, context, neutron_subnet_id): """Delete IPAM subnet. IPAM subnet no longer has foreign key to neutron subnet, so need to perform delete manually :param context: neutron api request context :param neutron_subnet_id: neutron subnet id associated with ipam subnet """ return ipam_objs.IpamSubnet.delete_objects( context, neutron_subnet_id=neutron_subnet_id) def create_pool(self, context, pool_start, pool_end): """Create an allocation pool for the subnet. This method does not perform any validation on parameters; it simply persist data on the database. :param pool_start: string expressing the start of the pool :param pool_end: string expressing the end of the pool :return: the newly created pool object. """ ip_pool_obj = ipam_objs.IpamAllocationPool( context, ipam_subnet_id=self._ipam_subnet_id, first_ip=pool_start, last_ip=pool_end) ip_pool_obj.create() return ip_pool_obj def delete_allocation_pools(self, context): """Remove all allocation pools for the current subnet. :param context: neutron api request context """ ipam_objs.IpamAllocationPool.delete_objects( context, ipam_subnet_id=self._ipam_subnet_id) def list_pools(self, context): """Return pools for the current subnet.""" return ipam_objs.IpamAllocationPool.get_objects( context, ipam_subnet_id=self._ipam_subnet_id) def check_unique_allocation(self, context, ip_address): """Validate that the IP address on the subnet is not in use.""" return not ipam_objs.IpamAllocation.objects_exist( context, ipam_subnet_id=self._ipam_subnet_id, status=const.IPAM_ALLOCATION_STATUS_ALLOCATED, ip_address=ip_address) def list_allocations(self, context, status=const.IPAM_ALLOCATION_STATUS_ALLOCATED): """Return current allocations for the subnet. :param context: neutron api request context :param status: IP allocation status :returns: a list of IpamAllocation OVO objects """ return ipam_objs.IpamAllocation.get_objects( context, ipam_subnet_id=self._ipam_subnet_id, status=status) def create_allocation(self, context, ip_address, status=const.IPAM_ALLOCATION_STATUS_ALLOCATED): """Create an IP allocation entry. :param context: neutron api request context :param ip_address: the IP address to allocate :param status: IP allocation status """ ipam_objs.IpamAllocation( context, ip_address=ip_address, status=status, ipam_subnet_id=self._ipam_subnet_id).create() def delete_allocation(self, context, ip_address): """Remove an IP allocation for this subnet. :param context: neutron api request context :param ip_address: IP address for which the allocation entry should be removed. :returns: number of deleted allocation entries. """ return ipam_objs.IpamAllocation.delete_objects( context, ipam_subnet_id=self._ipam_subnet_id, ip_address=ip_address) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/neutrondb_ipam/db_models.py0000644000175000017500000000601700000000000026555 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm as sa_orm # Database models used by the neutron DB IPAM driver # NOTE(salv-orlando): The following data model creates redundancy with # models_v2.IPAllocationPool. This level of data redundancy could be tolerated # considering that the following model is specific to the IPAM driver logic. # It therefore represents an internal representation of a subnet allocation # pool and can therefore change in the future, where as # models_v2.IPAllocationPool is the representation of IP allocation pools in # the management layer and therefore its evolution is subject to APIs backward # compatibility policies class IpamAllocationPool(model_base.BASEV2, model_base.HasId): """Representation of an allocation pool in a Neutron subnet.""" ipam_subnet_id = sa.Column(sa.String(36), sa.ForeignKey('ipamsubnets.id', ondelete="CASCADE"), nullable=False) first_ip = sa.Column(sa.String(64), nullable=False) last_ip = sa.Column(sa.String(64), nullable=False) def __repr__(self): return "%s - %s" % (self.first_ip, self.last_ip) class IpamSubnet(model_base.BASEV2, model_base.HasId): """Association between IPAM entities and neutron subnets. For subnet data persistency - such as cidr and gateway IP, the IPAM driver relies on Neutron's subnet model as source of truth to limit data redundancy. """ neutron_subnet_id = sa.Column(sa.String(36), nullable=True) allocation_pools = sa_orm.relationship(IpamAllocationPool, backref='subnet', lazy="joined", cascade='delete') class IpamAllocation(model_base.BASEV2): """Model class for IP Allocation requests. """ ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) status = sa.Column(sa.String(36)) # The subnet identifier is redundant but come handy for looking up # IP addresses to remove. ipam_subnet_id = sa.Column(sa.String(36), sa.ForeignKey('ipamsubnets.id', ondelete="CASCADE"), primary_key=True, nullable=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/drivers/neutrondb_ipam/driver.py0000644000175000017500000004136000000000000026120 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import random import netaddr from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils from neutron._i18n import _ from neutron.ipam import driver as ipam_base from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron.ipam import utils as ipam_utils LOG = log.getLogger(__name__) MAX_WIN = 1000 MULTIPLIER = 100 MAX_WIN_MULTI = MAX_WIN * MULTIPLIER class NeutronDbSubnet(ipam_base.Subnet): """Manage IP addresses for Neutron DB IPAM driver. This class implements the strategy for IP address allocation and deallocation for the Neutron DB IPAM driver. """ @classmethod def create_allocation_pools(cls, subnet_manager, context, pools, cidr): for pool in pools: # IPv6 addresses that start '::1', '::2', etc cause IP version # ambiguity when converted to integers by pool.first and pool.last. # Infer the IP version from the subnet cidr. ip_version = cidr.version subnet_manager.create_pool( context, netaddr.IPAddress(pool.first, ip_version).format(), netaddr.IPAddress(pool.last, ip_version).format()) @classmethod def create_from_subnet_request(cls, subnet_request, ctx): ipam_subnet_id = uuidutils.generate_uuid() subnet_manager = ipam_db_api.IpamSubnetManager( ipam_subnet_id, subnet_request.subnet_id) # Create subnet resource subnet_manager.create(ctx) # If allocation pools are not specified, define them around # the subnet's gateway IP if not subnet_request.allocation_pools: pools = ipam_utils.generate_pools(subnet_request.subnet_cidr, subnet_request.gateway_ip) else: pools = subnet_request.allocation_pools # Create IPAM allocation pools cls.create_allocation_pools(subnet_manager, ctx, pools, subnet_request.subnet_cidr) return cls(ipam_subnet_id, ctx, cidr=subnet_request.subnet_cidr, allocation_pools=pools, gateway_ip=subnet_request.gateway_ip, tenant_id=subnet_request.tenant_id, subnet_id=subnet_request.subnet_id) @classmethod def load(cls, neutron_subnet_id, ctx): """Load an IPAM subnet from the database given its neutron ID. :param neutron_subnet_id: neutron subnet identifier. """ ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id( ctx, neutron_subnet_id) if not ipam_subnet: LOG.error("IPAM subnet referenced to " "Neutron subnet %s does not exist", neutron_subnet_id) raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id) pools = [] for pool in ipam_subnet.allocation_pools: pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip'])) neutron_subnet_obj = cls._fetch_subnet(ctx, neutron_subnet_id) return cls(ipam_subnet['id'], ctx, cidr=neutron_subnet_obj.cidr, allocation_pools=pools, gateway_ip=neutron_subnet_obj.gateway_ip, tenant_id=neutron_subnet_obj.tenant_id, subnet_id=neutron_subnet_id) @classmethod def _fetch_subnet(cls, context, id): plugin = directory.get_plugin() return plugin._get_subnet_object(context, id) def __init__(self, internal_id, ctx, cidr=None, allocation_pools=None, gateway_ip=None, tenant_id=None, subnet_id=None): # NOTE: In theory it could have been possible to grant the IPAM # driver direct access to the database. While this is possible, # it would have led to duplicate code and/or non-trivial # refactorings in neutron.db.db_base_plugin_v2. # This is because in the Neutron V2 plugin logic DB management is # encapsulated within the plugin. self._cidr = cidr self._pools = allocation_pools self._gateway_ip = gateway_ip self._tenant_id = tenant_id self._subnet_id = subnet_id self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id, self._subnet_id) self._context = ctx def _verify_ip(self, context, ip_address): """Verify whether IP address can be allocated on subnet. :param context: neutron api request context :param ip_address: String representing the IP address to verify :raises: InvalidInput, IpAddressAlreadyAllocated """ # Ensure that the IP's are unique if not self.subnet_manager.check_unique_allocation(context, ip_address): raise ipam_exc.IpAddressAlreadyAllocated( subnet_id=self.subnet_manager.neutron_id, ip=ip_address) # Ensure that the IP is valid on the subnet if not ipam_utils.check_subnet_ip(self._cidr, ip_address): raise ipam_exc.InvalidIpForSubnet( subnet_id=self.subnet_manager.neutron_id, ip=ip_address) def _generate_ip(self, context, prefer_next=False): """Generate an IP address from the set of available addresses.""" return self._generate_ips(context, prefer_next)[0] def _generate_ips(self, context, prefer_next=False, num_addresses=1): """Generate a set of IPs from the set of available addresses.""" allocated_ips = [] requested_num_addresses = num_addresses allocations = self.subnet_manager.list_allocations(context) # It is better not to use 'netaddr.IPSet.add', # because _compact_single_network in 'IPSet.add' # is quite time consuming. ip_allocations = netaddr.IPSet( [netaddr.IPAddress(allocation.ip_address) for allocation in allocations]) for ip_pool in self.subnet_manager.list_pools(context): ip_set = netaddr.IPSet() ip_set.add(netaddr.IPRange(ip_pool.first_ip, ip_pool.last_ip)) av_set = ip_set.difference(ip_allocations) if av_set.size == 0: continue if av_set.size < requested_num_addresses: # All addresses of the address pool are allocated # for the first time and the remaining addresses # will be allocated in the next address pools. allocated_num_addresses = av_set.size else: # All expected addresses can be assigned in this loop. allocated_num_addresses = requested_num_addresses if prefer_next: allocated_ip_pool = list(itertools.islice( av_set, allocated_num_addresses)) allocated_ips.extend([str(allocated_ip) for allocated_ip in allocated_ip_pool]) requested_num_addresses -= allocated_num_addresses if requested_num_addresses: # More addresses need to be allocated in the next loop. continue return allocated_ips window = min(av_set.size, MAX_WIN) # NOTE(gryf): If there is more than one address, make the window # bigger, so that are chances to fulfill demanded amount of IPs. if allocated_num_addresses > 1: window = min(av_set.size, allocated_num_addresses * MULTIPLIER, MAX_WIN_MULTI) if window < allocated_num_addresses: continue else: # Maximize randomness by using the random module's built in # sampling function av_ips = list(itertools.islice(av_set, 0, window)) allocated_ip_pool = random.sample(av_ips, allocated_num_addresses) allocated_ips.extend([str(allocated_ip) for allocated_ip in allocated_ip_pool]) requested_num_addresses -= allocated_num_addresses if requested_num_addresses: # More addresses need to be allocated in the next loop. continue return allocated_ips raise ipam_exc.IpAddressGenerationFailure( subnet_id=self.subnet_manager.neutron_id) def allocate(self, address_request): # NOTE(pbondar): Ipam driver is always called in context of already # running transaction, which is started on create_port or upper level. # To be able to do rollback/retry actions correctly ipam driver # should not create new nested transaction blocks. # NOTE(salv-orlando): It would probably better to have a simpler # model for address requests and just check whether there is a # specific IP address specified in address_request if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests # Check availability of requested IP ip_address = str(address_request.address) self._verify_ip(self._context, ip_address) else: prefer_next = isinstance(address_request, ipam_req.PreferNextAddressRequest) ip_address = self._generate_ip(self._context, prefer_next) # Create IP allocation request object # The only defined status at this stage is 'ALLOCATED'. # More states will be available in the future - e.g.: RECYCLABLE try: # TODO(ataraday): revisit this after objects switched to # new enginefacade with self._context.session.begin(subtransactions=True): # NOTE(kevinbenton): we use a subtransaction to force # a flush here so we can capture DBReferenceErrors due # to concurrent subnet deletions. (galera would deadlock # later on final commit) self.subnet_manager.create_allocation(self._context, ip_address) except db_exc.DBReferenceError: raise n_exc.SubnetNotFound( subnet_id=self.subnet_manager.neutron_id) return ip_address def bulk_allocate(self, address_request): # The signature of this function differs from allocate only in that it # returns a list of addresses, as opposed to a single address. if not isinstance(address_request, ipam_req.BulkAddressRequest): return [self.allocate(address_request)] num_addrs = address_request.num_addresses allocated_ip_pool = self._generate_ips(self._context, False, num_addrs) # Create IP allocation request objects try: with self._context.session.begin(subtransactions=True): for ip_address in allocated_ip_pool: self.subnet_manager.create_allocation(self._context, ip_address) except db_exc.DBReferenceError: raise n_exc.SubnetNotFound( subnet_id=self.subnet_manager.neutron_id) return allocated_ip_pool def deallocate(self, address): # This is almost a no-op because the Neutron DB IPAM driver does not # delete IPAllocation objects at every deallocation. The only # operation it performs is to delete an IPRequest entry. count = self.subnet_manager.delete_allocation( self._context, address) # count can hardly be greater than 1, but it can be 0... if not count: raise ipam_exc.IpAddressAllocationNotFound( subnet_id=self.subnet_manager.neutron_id, ip_address=address) def _no_pool_changes(self, context, pools): """Check if pool updates in db are required.""" db_pools = self.subnet_manager.list_pools(context) iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip) for pool in db_pools] return pools == iprange_pools def update_allocation_pools(self, pools, cidr): # Pools have already been validated in the subnet request object which # was sent to the subnet pool driver. Further validation should not be # required. if self._no_pool_changes(self._context, pools): return self.subnet_manager.delete_allocation_pools(self._context) self.create_allocation_pools(self.subnet_manager, self._context, pools, cidr) self._pools = pools def get_details(self): """Return subnet data as a SpecificSubnetRequest""" return ipam_req.SpecificSubnetRequest( self._tenant_id, self.subnet_manager.neutron_id, self._cidr, self._gateway_ip, self._pools) class NeutronDbPool(subnet_alloc.SubnetAllocator): """Subnet pools backed by Neutron Database. As this driver does not implement yet the subnet pool concept, most operations are either trivial or no-ops. """ def get_subnet(self, subnet_id): """Retrieve an IPAM subnet. :param subnet_id: Neutron subnet identifier :returns: a NeutronDbSubnet instance """ return NeutronDbSubnet.load(subnet_id, self._context) def allocate_subnet(self, subnet_request): """Create an IPAMSubnet object for the provided cidr. This method does not actually do any operation in the driver, given its simplified nature. :param cidr: subnet's CIDR :returns: a NeutronDbSubnet instance """ if self._subnetpool: subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request) subnet_request = subnet.get_details() # SubnetRequest must be an instance of SpecificSubnet if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest): raise ipam_exc.InvalidSubnetRequestType( subnet_type=type(subnet_request)) return NeutronDbSubnet.create_from_subnet_request(subnet_request, self._context) def update_subnet(self, subnet_request): """Update subnet info the in the IPAM driver. The only update subnet information the driver needs to be aware of are allocation pools. """ if not subnet_request.subnet_id: raise ipam_exc.InvalidSubnetRequest( reason=_("An identifier must be specified when updating " "a subnet")) if subnet_request.allocation_pools is None: LOG.debug("Update subnet request for subnet %s did not specify " "new allocation pools, there is nothing to do", subnet_request.subnet_id) return subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context) cidr = netaddr.IPNetwork(subnet._cidr) subnet.update_allocation_pools(subnet_request.allocation_pools, cidr) return subnet def remove_subnet(self, subnet_id): """Remove data structures for a given subnet. IPAM-related data has no foreign key relationships to neutron subnet, so removing ipam subnet manually """ count = ipam_db_api.IpamSubnetManager.delete(self._context, subnet_id) if count < 1: LOG.error("IPAM subnet referenced to " "Neutron subnet %s does not exist", subnet_id) raise n_exc.SubnetNotFound(subnet_id=subnet_id) def needs_rollback(self): return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/exceptions.py0000644000175000017500000000605300000000000022322 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron._i18n import _ class InvalidSubnetRequestType(exceptions.BadRequest): message = _("Cannot handle subnet of type %(subnet_type)s") class AddressCalculationFailure(exceptions.NeutronException): message = _("Unable to calculate %(address_type)s address because of:" "%(reason)s") class InvalidAddressType(exceptions.NeutronException): message = _("Unknown address type %(address_type)s") class IpAddressAllocationNotFound(exceptions.NeutronException): message = _("Unable to find IP address %(ip_address)s on subnet " "%(subnet_id)s") class IpAddressAlreadyAllocated(exceptions.Conflict): message = _("IP address %(ip)s already allocated in subnet %(subnet_id)s") class InvalidIpForSubnet(exceptions.BadRequest): message = _("IP address %(ip)s does not belong to subnet %(subnet_id)s") class InvalidAddressRequest(exceptions.BadRequest): message = _("The address allocation request could not be satisfied " "because: %(reason)s") class InvalidSubnetRequest(exceptions.BadRequest): message = _("The subnet request could not be satisfied because: " "%(reason)s") class IPAddressChangeNotAllowed(exceptions.BadRequest): message = _("IP updates for port %(port_id)s are not allowed") class AllocationOnAutoAddressSubnet(exceptions.InvalidInput): message = _("IPv6 address %(ip)s cannot be directly " "assigned to a port on subnet %(subnet_id)s as the " "subnet is configured for automatic addresses") class IpAddressGenerationFailure(exceptions.Conflict): message = _("No more IP addresses available for subnet %(subnet_id)s.") class IpAddressGenerationFailureAllSubnets(IpAddressGenerationFailure): message = _("No more IP addresses available.") class IpAddressGenerationFailureNoMatchingSubnet(IpAddressGenerationFailure): message = _("No valid service subnet for the given device owner, " "network %(network_id)s, service type %(service_type)s.") class IPAllocationFailed(exceptions.NeutronException): message = _("IP allocation failed. Try again later.") class IpamValueInvalid(exceptions.Conflict): def __init__(self, message=None): self.message = message super(IpamValueInvalid, self).__init__() class DeferIpam(exceptions.NeutronException): message = _("Exception used to signal that IP allocation is deferred") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/requests.py0000644000175000017500000002762500000000000022024 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from neutron_lib.api import validators from neutron_lib import constants from oslo_utils import netutils from oslo_utils import uuidutils import six from neutron._i18n import _ from neutron.common import utils as common_utils from neutron.ipam import exceptions as ipam_exc @six.add_metaclass(abc.ABCMeta) class SubnetPool(object): """Represents a pool of IPs available inside an address scope.""" @six.add_metaclass(abc.ABCMeta) class SubnetRequest(object): """Carries the data needed to make a subnet request The data validated and carried by an instance of this class is the data that is common to any type of request. This class shouldn't be instantiated on its own. Rather, a subclass of this class should be used. """ def __init__(self, tenant_id, subnet_id, gateway_ip=None, allocation_pools=None): """Initialize and validate :param tenant_id: The tenant id who will own the subnet :type tenant_id: str uuid :param subnet_id: Neutron's subnet ID :type subnet_id: str uuid :param gateway_ip: An IP to reserve for the subnet gateway. :type gateway_ip: None or convertible to netaddr.IPAddress :param allocation_pools: The pool from which IPAM should allocate addresses. The allocator *may* allow allocating addresses outside of this range if specifically requested. :type allocation_pools: A list of netaddr.IPRange. None if not specified. """ self._tenant_id = tenant_id self._subnet_id = subnet_id self._gateway_ip = None self._allocation_pools = None if gateway_ip is not None: self._gateway_ip = netaddr.IPAddress(gateway_ip) if allocation_pools is not None: allocation_pools = sorted(allocation_pools) previous = None for pool in allocation_pools: if not isinstance(pool, netaddr.ip.IPRange): raise TypeError(_("Ranges must be netaddr.IPRange")) if previous and pool.first <= previous.last: raise ValueError(_("Ranges must not overlap")) previous = pool if 1 < len(allocation_pools): # Checks that all the ranges are in the same IP version. # IPRange sorts first by ip version so we can get by with just # checking the first and the last range having sorted them # above. first_version = allocation_pools[0].version last_version = allocation_pools[-1].version if first_version != last_version: raise ValueError(_("Ranges must be in the same IP " "version")) self._allocation_pools = allocation_pools if self.gateway_ip and self.allocation_pools: if self.gateway_ip.version != self.allocation_pools[0].version: raise ValueError(_("Gateway IP version inconsistent with " "allocation pool version")) @property def tenant_id(self): return self._tenant_id @property def subnet_id(self): return self._subnet_id @property def gateway_ip(self): return self._gateway_ip @property def allocation_pools(self): return self._allocation_pools def _validate_with_subnet(self, subnet_cidr): if self.allocation_pools: if subnet_cidr.version != self.allocation_pools[0].version: raise ipam_exc.IpamValueInvalid(_( "allocation_pools use the wrong ip version")) for pool in self.allocation_pools: if pool not in subnet_cidr: raise ipam_exc.IpamValueInvalid(_( "allocation_pools are not in the subnet")) class AnySubnetRequest(SubnetRequest): """A template for allocating an unspecified subnet from IPAM Support for this type of request in a driver is optional. For example, the initial reference implementation will not support this. The API has no way of creating a subnet without a specific address until subnet-allocation is implemented. """ WILDCARDS = {constants.IPv4: '0.0.0.0', constants.IPv6: '::'} def __init__(self, tenant_id, subnet_id, version, prefixlen, gateway_ip=None, allocation_pools=None): """Initialize AnySubnetRequest :param version: Either constants.IPv4 or constants.IPv6 :param prefixlen: The prefix len requested. Must be within the min and max allowed. :type prefixlen: int """ super(AnySubnetRequest, self).__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, allocation_pools=allocation_pools) net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen)) self._validate_with_subnet(net) self._prefixlen = prefixlen @property def prefixlen(self): return self._prefixlen class SpecificSubnetRequest(SubnetRequest): """A template for allocating a specified subnet from IPAM The initial reference implementation will probably just allow any allocation, even overlapping ones. This can be expanded on by future blueprints. """ def __init__(self, tenant_id, subnet_id, subnet_cidr, gateway_ip=None, allocation_pools=None): """Initialize SpecificSubnetRequest :param subnet: The subnet requested. Can be IPv4 or IPv6. However, when IPAM tries to fulfill this request, the IP version must match the version of the address scope being used. :type subnet: netaddr.IPNetwork or convertible to one """ super(SpecificSubnetRequest, self).__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, allocation_pools=allocation_pools) self._subnet_cidr = netaddr.IPNetwork(subnet_cidr) self._validate_with_subnet(self._subnet_cidr) @property def subnet_cidr(self): return self._subnet_cidr @property def prefixlen(self): return self._subnet_cidr.prefixlen @six.add_metaclass(abc.ABCMeta) class AddressRequest(object): """Abstract base class for address requests""" class SpecificAddressRequest(AddressRequest): """For requesting a specified address from IPAM""" def __init__(self, address): """Initialize SpecificAddressRequest :param address: The address being requested :type address: A netaddr.IPAddress or convertible to one. """ super(SpecificAddressRequest, self).__init__() self._address = netaddr.IPAddress(address) @property def address(self): return self._address class BulkAddressRequest(AddressRequest): """For requesting a batch of available addresses from IPAM""" def __init__(self, num_addresses): """Initialize BulkAddressRequest :param num_addresses: The quantity of IP addresses being requested :type num_addresses: int """ super(BulkAddressRequest, self).__init__() self._num_addresses = num_addresses @property def num_addresses(self): return self._num_addresses class AnyAddressRequest(AddressRequest): """Used to request any available address from the pool.""" class PreferNextAddressRequest(AnyAddressRequest): """Used to request next available IP address from the pool.""" class AutomaticAddressRequest(SpecificAddressRequest): """Used to create auto generated addresses, such as EUI64""" EUI64 = 'eui64' def _generate_eui64_address(self, **kwargs): if set(kwargs) != set(['prefix', 'mac']): raise ipam_exc.AddressCalculationFailure( address_type='eui-64', reason=_('must provide exactly 2 arguments - cidr and MAC')) prefix = kwargs['prefix'] mac_address = kwargs['mac'] return netutils.get_ipv6_addr_by_EUI64(prefix, mac_address) _address_generators = {EUI64: _generate_eui64_address} def __init__(self, address_type=EUI64, **kwargs): """Initialize AutomaticAddressRequest This constructor builds an automatic IP address. Parameter needed for generating it can be passed as optional keyword arguments. :param address_type: the type of address to generate. It could be an eui-64 address, a random IPv6 address, or an ipv4 link-local address. For the Kilo release only eui-64 addresses will be supported. """ address_generator = self._address_generators.get(address_type) if not address_generator: raise ipam_exc.InvalidAddressType(address_type=address_type) address = address_generator(self, **kwargs) super(AutomaticAddressRequest, self).__init__(address) class RouterGatewayAddressRequest(AddressRequest): """Used to request allocating the special router gateway address.""" class AddressRequestFactory(object): """Builds request using ip info Additional parameters(port and context) are not used in default implementation, but planned to be used in sub-classes provided by specific ipam driver, """ @classmethod def get_request(cls, context, port, ip_dict): """Initialize AddressRequestFactory :param context: context (not used here, but can be used in sub-classes) :param port: port dict (not used here, but can be used in sub-classes) :param ip_dict: dict that can contain 'ip_address', 'mac' and 'subnet_cidr' keys. Request to generate is selected depending on this ip_dict keys. :return: returns prepared AddressRequest (specific or any) """ if ip_dict.get('ip_address'): return SpecificAddressRequest(ip_dict['ip_address']) elif ip_dict.get('eui64_address'): return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'], mac=ip_dict['mac']) elif port['device_owner'] == constants.DEVICE_OWNER_DHCP: # preserve previous behavior of DHCP ports choosing start of pool return PreferNextAddressRequest() else: return AnyAddressRequest() class SubnetRequestFactory(object): """Builds request using subnet info""" @classmethod def get_request(cls, context, subnet, subnetpool): cidr = subnet.get('cidr') subnet_id = subnet.get('id', uuidutils.generate_uuid()) is_any_subnetpool_request = not validators.is_attr_set(cidr) if is_any_subnetpool_request: prefixlen = subnet['prefixlen'] if not validators.is_attr_set(prefixlen): prefixlen = int(subnetpool['default_prefixlen']) return AnySubnetRequest( subnet['tenant_id'], subnet_id, common_utils.ip_version_from_int(subnetpool['ip_version']), prefixlen) else: return SpecificSubnetRequest(subnet['tenant_id'], subnet_id, cidr, subnet.get('gateway_ip'), subnet.get('allocation_pools')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/subnet_alloc.py0000644000175000017500000004476700000000000022631 0ustar00coreycorey00000000000000# Copyright (c) 2015 Hewlett-Packard Co. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import operator import netaddr from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron._i18n import _ from neutron.db import models_v2 from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.ipam import utils as ipam_utils class SubnetAllocator(driver.Pool): """Class for handling allocation of subnet prefixes from a subnet pool. This class leverages the pluggable IPAM interface where possible to make merging into IPAM framework easier in future cycles. """ def __init__(self, subnetpool, context): super(SubnetAllocator, self).__init__(subnetpool, context) self._sp_helper = SubnetPoolHelper() def _lock_subnetpool(self): """Lock subnetpool associated row. This method disallows to allocate concurrently 2 subnets in the same subnetpool, it's required to ensure non-overlapping cidrs in the same subnetpool. """ with db_api.CONTEXT_READER.using(self._context): current_hash = ( self._context.session.query(models_v2.SubnetPool.hash) .filter_by(id=self._subnetpool['id']).scalar()) if current_hash is None: # NOTE(cbrandily): subnetpool has been deleted raise exceptions.SubnetPoolNotFound( subnetpool_id=self._subnetpool['id']) new_hash = uuidutils.generate_uuid() # NOTE(cbrandily): the update disallows 2 concurrent subnet allocation # to succeed: at most 1 transaction will succeed, others will be # rolled back and be caught in neutron.db.v2.base with db_api.CONTEXT_WRITER.using(self._context): query = ( self._context.session.query(models_v2.SubnetPool).filter_by( id=self._subnetpool['id'], hash=current_hash)) count = query.update({'hash': new_hash}) if not count: raise db_exc.RetryRequest(exceptions.SubnetPoolInUse( subnet_pool_id=self._subnetpool['id'])) def _get_allocated_cidrs(self): with db_api.CONTEXT_READER.using(self._context): query = self._context.session.query(models_v2.Subnet.cidr) subnets = query.filter_by(subnetpool_id=self._subnetpool['id']) return (x.cidr for x in subnets) def _get_available_prefix_list(self): prefixes = (x.cidr for x in self._subnetpool.prefixes) allocations = self._get_allocated_cidrs() prefix_set = netaddr.IPSet(iterable=prefixes) allocation_set = netaddr.IPSet(iterable=allocations) available_set = prefix_set.difference(allocation_set) available_set.compact() return sorted(available_set.iter_cidrs(), key=operator.attrgetter('prefixlen'), reverse=True) def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit): return math.pow(2, quota_unit - prefixlen) def _allocations_used_by_tenant(self, quota_unit): subnetpool_id = self._subnetpool['id'] tenant_id = self._subnetpool['tenant_id'] with db_api.CONTEXT_READER.using(self._context): qry = self._context.session.query(models_v2.Subnet.cidr) allocations = qry.filter_by(subnetpool_id=subnetpool_id, tenant_id=tenant_id) value = 0 for allocation in allocations: prefixlen = netaddr.IPNetwork(allocation.cidr).prefixlen value += self._num_quota_units_in_prefixlen(prefixlen, quota_unit) return value def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen): quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit( self._subnetpool['ip_version']) quota = self._subnetpool.get('default_quota') if quota: used = self._allocations_used_by_tenant(quota_unit) requested_units = self._num_quota_units_in_prefixlen(prefixlen, quota_unit) if used + requested_units > quota: raise exceptions.SubnetPoolQuotaExceeded() def _allocate_any_subnet(self, request): with db_api.CONTEXT_WRITER.using(self._context): self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) prefix_pool = self._get_available_prefix_list() for prefix in prefix_pool: if request.prefixlen >= prefix.prefixlen: subnet = next(prefix.subnet(request.prefixlen)) gateway_ip = request.gateway_ip if not gateway_ip: gateway_ip = subnet.network + 1 pools = ipam_utils.generate_pools(subnet.cidr, gateway_ip) return IpamSubnet(request.tenant_id, request.subnet_id, subnet.cidr, gateway_ip=gateway_ip, allocation_pools=pools) msg = _("Insufficient prefix space to allocate subnet size /%s") raise exceptions.SubnetAllocationError( reason=msg % str(request.prefixlen)) def _allocate_specific_subnet(self, request): with db_api.CONTEXT_WRITER.using(self._context): self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) cidr = request.subnet_cidr available = self._get_available_prefix_list() matched = netaddr.all_matching_cidrs(cidr, available) if len(matched) == 1 and matched[0].prefixlen <= cidr.prefixlen: return IpamSubnet(request.tenant_id, request.subnet_id, cidr, gateway_ip=request.gateway_ip, allocation_pools=request.allocation_pools) msg = _("Cannot allocate requested subnet from the available " "set of prefixes") raise exceptions.SubnetAllocationError(reason=msg) def allocate_subnet(self, request): max_prefixlen = int(self._subnetpool['max_prefixlen']) min_prefixlen = int(self._subnetpool['min_prefixlen']) if request.prefixlen > max_prefixlen: raise exceptions.MaxPrefixSubnetAllocationError( prefixlen=request.prefixlen, max_prefixlen=max_prefixlen) if request.prefixlen < min_prefixlen: raise exceptions.MinPrefixSubnetAllocationError( prefixlen=request.prefixlen, min_prefixlen=min_prefixlen) if isinstance(request, ipam_req.AnySubnetRequest): return self._allocate_any_subnet(request) elif isinstance(request, ipam_req.SpecificSubnetRequest): return self._allocate_specific_subnet(request) else: msg = _("Unsupported request type") raise exceptions.SubnetAllocationError(reason=msg) def get_subnet(self, subnet_id): raise NotImplementedError() def update_subnet(self, request): raise NotImplementedError() def remove_subnet(self, subnet_id): raise NotImplementedError() def get_allocator(self, subnet_ids): return IpamSubnetGroup(self, subnet_ids) class IpamSubnet(driver.Subnet): def __init__(self, tenant_id, subnet_id, cidr, gateway_ip=None, allocation_pools=None): self._req = ipam_req.SpecificSubnetRequest( tenant_id, subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=allocation_pools) def allocate(self, address_request): raise NotImplementedError() def deallocate(self, address): raise NotImplementedError() def get_details(self): return self._req class IpamSubnetGroup(driver.SubnetGroup): def __init__(self, driver, subnet_ids): self._driver = driver self._subnet_ids = subnet_ids def allocate(self, address_request): '''Originally, the Neutron pluggable IPAM backend would ask the driver to try to allocate an IP from each subnet in turn, one by one. This implementation preserves that behavior so that existing drivers work as they did before while giving them the opportunity to optimize it by overridding the implementation. ''' for subnet_id in self._subnet_ids: try: ipam_subnet = self._driver.get_subnet(subnet_id) return ipam_subnet.allocate(address_request), subnet_id except ipam_exc.IpAddressGenerationFailure: continue raise ipam_exc.IpAddressGenerationFailureAllSubnets() class SubnetPoolReader(object): '''Class to assist with reading a subnetpool, loading defaults, and inferring IP version from prefix list. Provides a common way of reading a stored model or a create request with default table attributes. ''' MIN_PREFIX_TYPE = 'min' MAX_PREFIX_TYPE = 'max' DEFAULT_PREFIX_TYPE = 'default' _sp_helper = None def __init__(self, subnetpool): self._read_prefix_info(subnetpool) self._sp_helper = SubnetPoolHelper() self._read_id(subnetpool) self._read_prefix_bounds(subnetpool) self._read_attrs(subnetpool, ['tenant_id', 'name', 'is_default', 'shared']) self.description = subnetpool.get('description') self._read_address_scope(subnetpool) self.subnetpool = {'id': self.id, 'name': self.name, 'project_id': self.tenant_id, 'prefixes': self.prefixes, 'min_prefix': self.min_prefix, 'min_prefixlen': self.min_prefixlen, 'max_prefix': self.max_prefix, 'max_prefixlen': self.max_prefixlen, 'default_prefix': self.default_prefix, 'default_prefixlen': self.default_prefixlen, 'default_quota': self.default_quota, 'address_scope_id': self.address_scope_id, 'is_default': self.is_default, 'shared': self.shared, 'description': self.description} def _read_attrs(self, subnetpool, keys): for key in keys: setattr(self, key, subnetpool[key]) def _ip_version_from_cidr(self, cidr): return netaddr.IPNetwork(cidr).version def _prefixlen_from_cidr(self, cidr): return netaddr.IPNetwork(cidr).prefixlen def _read_id(self, subnetpool): id = subnetpool.get('id', constants.ATTR_NOT_SPECIFIED) if id is constants.ATTR_NOT_SPECIFIED: id = uuidutils.generate_uuid() self.id = id def _read_prefix_bounds(self, subnetpool): ip_version = self.ip_version default_min = self._sp_helper.default_min_prefixlen(ip_version) default_max = self._sp_helper.default_max_prefixlen(ip_version) self._read_prefix_bound(self.MIN_PREFIX_TYPE, subnetpool, default_min) self._read_prefix_bound(self.MAX_PREFIX_TYPE, subnetpool, default_max) self._read_prefix_bound(self.DEFAULT_PREFIX_TYPE, subnetpool, self.min_prefixlen) self._sp_helper.validate_min_prefixlen(self.min_prefixlen, self.max_prefixlen) self._sp_helper.validate_max_prefixlen(self.max_prefixlen, ip_version) self._sp_helper.validate_default_prefixlen(self.min_prefixlen, self.max_prefixlen, self.default_prefixlen) def _read_prefix_bound(self, type, subnetpool, default_bound=None): prefixlen_attr = type + '_prefixlen' prefix_attr = type + '_prefix' prefixlen = subnetpool.get(prefixlen_attr, constants.ATTR_NOT_SPECIFIED) wildcard = self._sp_helper.wildcard(self.ip_version) if prefixlen is constants.ATTR_NOT_SPECIFIED and default_bound: prefixlen = default_bound if prefixlen is not constants.ATTR_NOT_SPECIFIED: prefix_cidr = '/'.join((wildcard, str(prefixlen))) setattr(self, prefix_attr, prefix_cidr) setattr(self, prefixlen_attr, prefixlen) def _read_prefix_info(self, subnetpool): prefix_list = subnetpool['prefixes'] if not prefix_list: raise exceptions.EmptySubnetPoolPrefixList() ip_version = None for prefix in prefix_list: if not ip_version: ip_version = netaddr.IPNetwork(prefix).version elif netaddr.IPNetwork(prefix).version != ip_version: raise exceptions.PrefixVersionMismatch() self.default_quota = subnetpool.get('default_quota') if self.default_quota is constants.ATTR_NOT_SPECIFIED: self.default_quota = None self.ip_version = ip_version self.prefixes = self._compact_subnetpool_prefix_list(prefix_list) def _read_address_scope(self, subnetpool): address_scope_id = subnetpool.get('address_scope_id', constants.ATTR_NOT_SPECIFIED) if address_scope_id is constants.ATTR_NOT_SPECIFIED: address_scope_id = None self.address_scope_id = address_scope_id def _compact_subnetpool_prefix_list(self, prefix_list): """Compact any overlapping prefixes in prefix_list and return the result """ ip_set = netaddr.IPSet() for prefix in prefix_list: ip_set.add(netaddr.IPNetwork(prefix)) ip_set.compact() return [x.cidr for x in ip_set.iter_cidrs()] class SubnetPoolHelper(object): _PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS, 'wildcard': '0.0.0.0', 'default_min_prefixlen': 8, # IPv4 quota measured in units of /32 'quota_units': 32}, 6: {'max_prefixlen': constants.IPv6_BITS, 'wildcard': '::', 'default_min_prefixlen': 64, # IPv6 quota measured in units of /64 'quota_units': 64}} def validate_min_prefixlen(self, min_prefixlen, max_prefixlen): if min_prefixlen < 0: raise exceptions.UnsupportedMinSubnetPoolPrefix( prefix=min_prefixlen, version=4) if min_prefixlen > max_prefixlen: raise exceptions.IllegalSubnetPoolPrefixBounds( prefix_type='min_prefixlen', prefixlen=min_prefixlen, base_prefix_type='max_prefixlen', base_prefixlen=max_prefixlen) def validate_max_prefixlen(self, prefixlen, ip_version): max = self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen'] if prefixlen > max: raise exceptions.IllegalSubnetPoolPrefixBounds( prefix_type='max_prefixlen', prefixlen=prefixlen, base_prefix_type='ip_version_max', base_prefixlen=max) def validate_default_prefixlen(self, min_prefixlen, max_prefixlen, default_prefixlen): if default_prefixlen < min_prefixlen: raise exceptions.IllegalSubnetPoolPrefixBounds( prefix_type='default_prefixlen', prefixlen=default_prefixlen, base_prefix_type='min_prefixlen', base_prefixlen=min_prefixlen) if default_prefixlen > max_prefixlen: raise exceptions.IllegalSubnetPoolPrefixBounds( prefix_type='default_prefixlen', prefixlen=default_prefixlen, base_prefix_type='max_prefixlen', base_prefixlen=max_prefixlen) def wildcard(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['wildcard'] def default_max_prefixlen(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['max_prefixlen'] def default_min_prefixlen(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['default_min_prefixlen'] def ip_version_subnetpool_quota_unit(self, ip_version): return self._PREFIX_VERSION_INFO[ip_version]['quota_units'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/ipam/utils.py0000644000175000017500000000555300000000000021305 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants def check_subnet_ip(cidr, ip_address, port_owner=''): """Validate that the IP address is on the subnet.""" ip = netaddr.IPAddress(ip_address) net = netaddr.IPNetwork(cidr) # Check that the IP is valid on subnet. In IPv4 this cannot be the # network or the broadcast address if net.version == constants.IP_VERSION_6: # NOTE(njohnston): In some cases the code cannot know the owner of the # port. In these cases port_owner should an empty string, and we pass # it through here. return ((port_owner in (constants.ROUTER_PORT_OWNERS + ('', )) or ip != net.network) and ip in net) else: return ip != net.network and ip != net.broadcast and ip in net def check_gateway_invalid_in_subnet(cidr, gateway): """Check whether the gw IP address is invalid on the subnet.""" ip = netaddr.IPAddress(gateway) net = netaddr.IPNetwork(cidr) # Check whether the gw IP is in-valid on subnet. # If gateway is in the subnet, it cannot be the # 'network' or the 'broadcast address (only in IPv4)'. # If gateway is out of subnet, there is no way to # check since we don't have gateway's subnet cidr. return (ip in net and (net.version == constants.IP_VERSION_4 and ip in (net.network, net[-1]))) def generate_pools(cidr, gateway_ip): """Create IP allocation pools for a specified subnet The Neutron API defines a subnet's allocation pools as a list of IPRange objects for defining the pool range. """ # Auto allocate the pool around gateway_ip net = netaddr.IPNetwork(cidr) ip_version = net.version first = netaddr.IPAddress(net.first, ip_version) last = netaddr.IPAddress(net.last, ip_version) if first == last: # handle single address subnet case return [netaddr.IPRange(first, last)] first_ip = first + 1 # last address is broadcast in v4 last_ip = last - (ip_version == 4) if first_ip >= last_ip: # /31 lands here return [] ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip)) if gateway_ip: ipset.remove(netaddr.IPAddress(gateway_ip, ip_version)) return list(ipset.iter_ipranges()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9750416 neutron-16.0.0.0b2.dev214/neutron/locale/0000755000175000017500000000000000000000000020074 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/de/0000755000175000017500000000000000000000000020464 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/de/LC_MESSAGES/0000755000175000017500000000000000000000000022251 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/de/LC_MESSAGES/neutron.po0000644000175000017500000031203300000000000024305 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Ettore Atalan , 2014 # Andreas Jaeger , 2016. #zanata # Frank Kloeker , 2016. #zanata # Andreas Jaeger , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-09-26 12:47+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: German\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Befehl: %(cmd)s\n" "Beendigungscode: %(code)s\n" "Standardeingabe: %(stdin)s\n" "Standardausgabe: %(stdout)s\n" "Standardfehler: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s-HEAD-Datei stimmt nicht mit Migrationszeitplan für HEAD überein. " "Erwartet: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s ist ein ungültiger Wert für 'sort_dirs'; gültige Werte sind " "'%(asc)s' und '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s untersagt für %(tunnel)s-Anbieter-Netz" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" "%(name)s '%(addr)s' stimmt nicht mit 'ip_version' '%(ip_version)s' überein" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s kann nicht im Offlinemodus aufgerufen werden" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s ist ein ungültiges Attribut für 'sort_keys'" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s muss get_port_from_device oder get_ports_from_devices implementieren." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s untersagt für VLAN-Provider-Netz" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s untersagt für einfaches Anbieternetzwerk" #, python-format msgid "%s prohibited for local provider network" msgstr "%s untersagt für lokales Anbieternetzwerk" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' ist kein gültiger RBAC-Objekttyp" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' wird für die Filterung nicht unterstützt" #, python-format msgid "'module' object has no attribute '%s'" msgstr "Das 'module'-Objekt hat kein Attribut '%s'." msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' ist kleiner als 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "0 ist als Länge für CIDR-Präfix nicht zulässig" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Ein cidr muss angegeben werden, wenn kein Teilnetzpool vorhanden ist" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Ein Dezimalwert als Registered Private Enterprise Number des Anbieters, wie " "es durch RFC3315 DUID-EN gefordert wird." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Es ist bereits ein externes Standardnetz vorhanden: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Es wurde bereits ein Standardsubnetzpool für diese IP-Familie definiert. Pro " "IP-Familie darf nur ein Standardpool vorhanden sein. " msgid "" "A list of mappings of physical networks to MTU values. The format of the " "mapping is :. This mapping allows specifying a physical " "network MTU value that differs from the default global_physnet_mtu value." msgstr "" "Eine Liste der Zuordnungen von physischen Netzen zu MTU-Werten. Das Format " "der Zuordnung ist :. Diese Zuordnung lässt die Angabe " "eines physischen Netz-MTU-Werts zu, der sich vom Standardwert für " "global_physnet_mtu unterscheidet." msgid "A metering driver must be specified" msgstr "Ein Messungstreiber muss angegeben sein" msgid "Access to this resource was denied." msgstr "Zugriff auf diese Ressource wurde verweigert." msgid "Action to be executed when a child process dies" msgstr "" "Aktion, die ausgeführt werden soll, wenn ein untergeordneter Prozess " "abgebrochen wird" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Fügen Sie Kommentare zu iptables-Regeln hinzu. Setzen Sie den Wert auf " "'false', um das Hinzufügen von Kommentaren zu generierten iptables-Regeln, " "die den Zweck der einzelnen Regeln beschreiben, zu unterbinden. Das System " "muss das Modul für iptables-Kommentare zum Hinzufügen von Kommentaren " "unterstützen. " msgid "Address not present on interface" msgstr "Adresse an der Schnittstelle nicht vorhanden." msgid "Adds test attributes to core resources." msgstr "Fügt Testattribute zu Kernressourcen hinzu." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "Agent %(id)s ist kein L3-Agent oder wurde inaktiviert" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "Agent startet mit admin_state_up=False, wenn enable_new_agents=False. In " "diesem Fall werden die Ressourcen eines Benutzers nur dann automatisch für " "den Agenten geplant, wenn der Administrator admin_state_up auf True festlegt." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agent aktualisiert: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Automatische Netzzuordnung zum DHCP-Agenten zulassen." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Automatische Routerzuordnung zum L3-Agenten zulassen." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Überschneidung bei IP-Support in Neutron zulassen. Achtung: Die folgenden " "Parameter müssen auf 'False' gesetzt werden, wenn Neutron zusammen mit Nova-" "Sicherheitsgruppen verwendet wird." msgid "Allow running metadata proxy." msgstr "Aktiven Metadaten-Proxy zulassen." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Senden von Benachrichtigungen zu Ressourcenoperationen an den DHCP-Agenten " "zulassen" msgid "Allow the creation of PTR records" msgstr "Erstellen von PTR-Datensätzen zulassen" msgid "Allow the usage of the bulk API" msgstr "Nutzung der Massenzuweisungs-API zulassen" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Durchführung von unsicheren SSL-Anforderungen (HTTPS) an Nova-Metadaten" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Ermöglicht die Bereitstellung von Metadatenanforderungen aus einem " "dedizierten Metadatenzugriffsnetz mit der CIDR 169.254.169.254/16 (oder " "einem längeren Präfix), das mit einem Neutron-Router verbunden ist, über den " "die VMs Anforderungen vom Typ metadata:1 senden. In diesem Fall wird die " "DHCP-Option 121 nicht in die VMs injiziert, da sie 169.254.169.254 über " "einen Router erreichen können. Diese Option setzt die Einstellung " "'enable_isolated_metadata = True' voraus." msgid "An RBAC policy already exists with those values." msgstr "Es ist bereits eine RBAC-Richtlinie mit diesen Werten vorhanden." msgid "An identifier must be specified when updating a subnet" msgstr "" "Bei der Aktualisierung eines Subnetzes muss ein Bezeichner angegeben werden." msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Sortierte Liste von Eingangspunkten für Erweiterungstreiber, die aus dem " "Namensraum neutron.ml2.extension_drivers geladen werden sollen. Beispiel: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Sortierte Liste der Eingangspunkte für Netzmechanismustreiber die aus dem " "Namensbereich neutron.ml2.mechanism_drivers geladen werden." msgid "An unknown error has occurred. Please try your request again." msgstr "" "Ein unbekannter Fehler ist aufgetreten. Stellen Sie Ihre Anforderung erneut." msgid "Async process didn't respawn" msgstr "Der asynchrone Prozess hat keinen erneuten Prozess erstellt." msgid "Authorization URL for connecting to designate in admin context" msgstr "" "Autorisierungs-URL zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Automatically remove networks from offline DHCP agents." msgstr "Netze automatisch von DHCP-Agenten, die offline sind, entfernen." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Automatische Neuterminierung für Router von Offline-L3-Agenten zu Online-L3-" "Agenten." msgid "Availability zone of this node" msgstr "Verfügbarkeitszone dieses Knotens" msgid "Available commands" msgstr "Verfügbare Befehle" #, python-format msgid "Base MAC: %s" msgstr "Basis-MAC-Adresse: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Basisprotokollverzeichnis für dnsmasq-Protokollierung. Das Protokoll enthält " "DHCP- und DNS-Protokollinformationen und ist für das Debugging von Problemen " "mit DHCP oder DNS nützlich. Wenn dieser Abschnitt null ist, dann " "deaktivieren Sie das dnsmasq-Protokoll." msgid "Body contains invalid data" msgstr "Hauptteil enthält ungültige Daten" msgid "Bulk operation not supported" msgstr "Massenoperation nicht unterstützt" msgid "CIDR to monitor" msgstr "Zu überwachendes CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Callback nach %(resource_type)s nicht gefunden" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "Callback nach %(resource_type)s hat den falschen Ressourcentyp zurückgegeben" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Zu Port %s, der keine statischen IPv4-Adressen besitzt, kann keine " "dynamische IP hinzugefügt werden" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "Es können nicht mehrere Callbacks nach %(resource_type)s hinzugefügt werden" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "IPv%(req_ver)s-Teilnetz kann nicht aus IPv%(pool_ver)s-Teilnetzpool " "zugeordnet werden" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Das angeforderte Teilnetz kann nicht aus der verfügbaren Gruppe mit Präfixen " "zugeordnet werden" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" "enable_dhcp kann nicht inaktiviert werden, wenn ipv6-Attribute gesetzt sind" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Teilnetz des Typs %(subnet_type)s kann nicht behandelt werden" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Mehrere IPv4-Subnetze an Router-Port nicht möglich" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Mehrere Routerports können nicht dieselbe Netz-ID verwenden, wenn beide IPv6-" "Teilnetze enthalten. Der vorhandene Port %(p)s verfügt über das IPv6-" "Teilnetz und die Netz-ID %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Der verteilte Router %(router_id)s kann am traditionellen L3-Agenten " "%(agent_id)s nicht gehostet werden." msgid "Cannot specify both subnet-id and port-id" msgstr "Angabe sowohl von Teilnetz-ID als auch von Port-ID nicht möglich" msgid "Cannot understand JSON" msgstr "Kann JSON nicht verstehen" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Schreibgeschütztes Attribut %s kann nicht aktualisiert werden" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "Öffentliche Schlüsseldatei der Zertifizierungsstelle für SSL" msgid "Check ebtables installation" msgstr "Installation von ebtables überprüfen" msgid "Check for ARP header match support" msgstr "Auf Unterstützung des Vergleichs von ARP-Headern überprüfen" msgid "Check for ARP responder support" msgstr "Überprüfen Sie, ob ARP-Responder unterstützt werden" msgid "Check for ICMPv6 header match support" msgstr "Auf Unterstützung des Vergleichs von ICMPv6-Headern überprüfen" msgid "Check for OVS Geneve support" msgstr "Auf OVS-Geneve-Unterstützung überprüfen" msgid "Check for OVS vxlan support" msgstr "Überprüfen Sie, ob OVS-VXLAN-Unterstützung vorliegt" msgid "Check for VF management support" msgstr "Überprüfen Sie, ob VF-Management unterstützt wird" msgid "Check for iproute2 vxlan support" msgstr "Überprüfen Sie, ob iproute2-VXLAN-Unterstützung vorliegt" msgid "Check for nova notification support" msgstr "Überprüfen Sie, ob Nova-Benachrichtigungen unterstützt werden" msgid "Check for patch port support" msgstr "Überprüfen Sie, ob Patch-Ports unterstützt werden" msgid "Check ip6tables installation" msgstr "Überprüfen Sie die ip6tables-Installation." msgid "Check ipset installation" msgstr "Überprüfen Sie die ipset-Installation." msgid "Check keepalived IPv6 support" msgstr "IPv6-Unterstützung von keepalived überprüfen" msgid "Check minimal dibbler version" msgstr "Mindestversion von dibbler überprüfen" msgid "Check minimal dnsmasq version" msgstr "Überprüfen Sie die Mindestversion für dnsmasq" msgid "Check netns permission settings" msgstr "Überprüfen Sie die netns-Berechtigungseinstellungen" msgid "Check ovs conntrack support" msgstr "Überprüfen Sie, ob OVS-Conntrack-Unterstützung vorhanden ist." msgid "Check ovsdb native interface support" msgstr "Unterstützung für native ovsdb-Schnittstelle überprüfen" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Überschneidungen zwischen CIDR %(subnet_cidr)s von Teilnetz %(subnet_id)s " "und CIDR %(cidr)s von Teilnetz %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Bereinigen Sie nur Ressourcen mit einem bestimmten Agententyp." msgid "Client certificate for nova metadata api server." msgstr "Clientzertifikat zu API-Server für Nova-Metadaten." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der eine Netzeinheit der " "knotenspezifischen Liste virtueller Funktionen des Agenten zugeordnet wird, " "die nicht für den virtuellen Netzbetrieb verwendet werden sollen. Bei " "'Auszuschließende_VFS' handelt es sich um eine durch Semikolons getrennte " "Liste virtueller Funktionen, die aus der Netzeinheit auszuschließen sind. " "Die Netzeinheit in der Zuordnung sollte in der Liste " "'physical_device_mappings' angezeigt werden." msgid "" "Comma-separated list of : tuples mapping physical " "network names to the agent's node-specific Open vSwitch bridge names to be " "used for flat and VLAN networks. The length of bridge names should be no " "more than 11. Each bridge must exist, and should have a physical network " "interface configured as a port. All physical networks configured on the " "server should have mappings to appropriate bridges on each agent. Note: If " "you remove a bridge from this mapping, make sure to disconnect it from the " "integration bridge as it won't be managed by the agent anymore." msgstr "" "Liste mit durch Kommas voneinander getrennten Tuplen vom Typ " ":, in der die physischen Netznamen den " "knotenspezifischen Open vSwitch-Brückennamen des Agenten zugeordnet sind, " "die für einfache und VLAN-Netze verwendet werden sollen. Die Länge der " "Brückennamen darf 11 Zeichen nicht überschreiten. Jede Brücke muss vorhanden " "sein und eine als Port konfigurierte physische Netzschnittstelle haben. Alle " "auf dem Server konfigurierten physischen Netze müssen Zuordnungen zu den " "entsprechenden Brücken in jedem Agenten haben. Hinweis: Wenn Sie aus dieser " "Zuordnung eine Brücke entfernen, stellen Sie sicher, dass Sie die Verbindung " "der Brücke zur Integrationsbrücke unterbrechen, da sie nicht mehr vom " "Agenten verwaltet wird." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der die Namen physischer Netze den " "knotenspezifischen Netzeinheitenschnittstellen des Agenten mit der " "physischen SR-IOV-Funktion zugeordnet wird, die für VLAN-Netze verwendet " "werden soll. Alle physischen Netze, die in 'network_vlan_ranges' auf dem " "Server aufgeführt sind, sollten entsprechenden Schnittstellen in jedem " "Agenten zugeordnet werden." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Liste mit durch Kommas voneinander getrennten Tupeln vom Typ " ":, in der die physischen Netznamen den " "knotenspezifischen, physischen Netzschnittstellen des Agenten zugeordnet " "sind, die für einfache und VLNA-Netze verwendet werden sollen. Alle " "physischen Netze, die über die Eigenschaft 'network_vlan_ranges' auf dem " "Server aufgelistet sind, müssen Zuordnungen zu den entsprechenden " "Schnittstellen in jedem Agenten haben." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste von : Tupeln, die Bereiche " "von GRE-Tunnel-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste mit den :-Tupeln, die die " "Bereiche der Geneve-VNI-IDs aufzählen, die für eine Nutzernetzzuordnung " "verfügbar sind" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Durch Kommas getrennte Liste von : Tupeln, die Bereiche " "von VXLAN-VNI-IDs aufzählen, die für eine Nutzernetzzuordnung verfügbar sind" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Durch Kommas getrennte Liste der DNS-Server, die künftig als " "Weiterleitungsserver verwendet werden." msgid "Command to execute" msgstr "Auszuführender Befehl" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Konfigurationsdatei für Schnittstellentreiber (Sie können auch 'l3_agent." "ini' verwenden)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Kollidierender Wert bei Ethernet-Typ %(ethertype)s für CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Steuert, ob die Neutron-Sicherheitsgruppen-API im Server aktiviert ist. " "Sollte 'false' sein, wenn keine Sicherheitsgruppen verwendet werden oder " "wenn die Nova-Sicherheitsgruppen-API verwendet wird." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Keine Bindung an %(host)s:%(port)s möglich nach Versuch über %(time)d " "Sekunden" msgid "Could not deserialize data" msgstr "Daten konnten nicht deserialisiert werden" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP-Leasedauer (in Sekunden). Verwenden Sie -1, damit dnsmasq unbegrenzte " "Leasedauern verwendet." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "DVR-Implementierungen für VXLAN/GRE/Geneve-Underlays erfordern die " "Aktivierung von L2-pop sowohl auf der Agenten- als auch auf der Serverseite." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Datenbankengine, für die bei Verwendung der Offline-Migration ein Script " "generiert wird." msgid "Default driver to use for quota checks." msgstr "Standardtreiber zur Verwendung für Kontingentprüfungen." msgid "Default external networks must be shared to everyone." msgstr "Externe Standardnetze müssen für alle freigegeben werden." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Standardnetztyp für externe Netze, wenn keine Anbieterattribut angegeben " "wurden. Der Standardwert None bedeutet, dass, wenn keine Anbieterattribut " "beim Erstellen von externen Netzen angegeben werden, derselbe Typ wie bei " "Nutzernetzen verwendet wird. Die zulässigen Werte für die " "Konfigurationsoption external_network_type hängen von den konfigurierten " "Netztypwerten in der Konfigurationsoption type_drivers ab." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Standardanzahl an zulässigen RBAC-Einträgen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Standardanzahl an zulässigen Ressourcen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "Default security group" msgstr "Standardsicherheitsgruppe" msgid "Default security group already exists." msgstr "Standardsicherheitsgruppe ist bereits vorhanden." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Der Standardwert für die Eigenschaft 'availability_zone_hints'. Die mit " "Verfügbarkeitszonen kompatiblen Scheduler verwenden diesen Wert, wenn der " "Wert für 'availability_zone_hints' der Ressourcen leer ist. Mehrere " "Verfügbarkeitszonen können als Zeichenfolge, durch Kommas getrennt, " "angegeben werden. Dieser Wert kann leer sein. In diesem Fall wird die " "Verfügbarkeitszone bei der Ressourcenplanung als hoch verfügbar betrachtet, " "auch dann, wenn die Eigenschaft 'availability_zone_hints' für eine Ressource " "leer ist. " msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Definieren Sie den Standardwert von enable_snat, falls in " "external_gateway_info nichts angegeben ist." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Definiert Provider für erweiterte Services mit dem folgenden Format: " "::[:default]" msgid "Delete the namespace by removing all devices." msgstr "Löschen Sie den Namensbereich durch Entfernen aller Geräte." #, python-format msgid "Deleting port %s" msgstr "Port %s wird gelöscht" #, python-format msgid "Deployment error: %(reason)s." msgstr "Implementierungsfehler: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "IPsets löschen, auch wenn eine iptables-Referenz vorhanden ist." msgid "Destroy all IPsets." msgstr "Alle IPsets löschen." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Einheit %(dev_name)s in Zuordnung %(mapping)s nicht eindeutig" msgid "Device not found" msgstr "Einheit nicht gefunden" msgid "Domain to use for building the hostnames" msgstr "Für das Erstellen von Hostnamen zu verwendende Domäne" msgid "Downgrade no longer supported" msgstr "Herabstufung wird nicht mehr unterstützt" #, python-format msgid "Driver %s is not unique across providers" msgstr "Treiber %s ist für Anbieter nicht eindeutig" msgid "Driver for external DNS integration." msgstr "Treiber für externe DNS-Integration." msgid "Driver for security groups firewall in the L2 agent" msgstr "Treiber für Sicherheitsgruppen-Firewall im L2-Agenten" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Zu verwendender Treiber bei Netzzuordnung zum DHCP-Agenten" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "Zu verwendender Treiber bei Routerzuordnung zum Standard-L3-Agenten" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Treiber, der für die IPv6-Präfixdelegierung verwendet wird. Dies muss ein " "Einstiegspunkt sein, der im Namensbereich neutron.agent.linux.pd_drivers " "definiert ist. In setup.cfg finden Sie die Einstiegspunkte, die in der " "Neutron-Quelle enthalten sind." #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Doppelte L3HARouterAgentPortBinding wird für Router %(router)s erstellt. Es " "konnte kein Aktualisierung für die Datenbank durchgeführt werden. Entfernen " "Sie alle Duplikate, bevor Sie die Aktualisierung der Datenbank durchführen." msgid "Duplicate Security Group Rule in POST." msgstr "Doppelte Sicherheitsgruppenregel in POST." msgid "Duplicate address detected" msgstr "Doppelte Adresse erkannt." #, python-format msgid "ERROR: %s" msgstr "FEHLER: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "FEHLER: Konfigurationsdatei kann über die Standardsuchpfade (~/.neutron/, " "~/, /etc/neutron/, /etc/) und über die Option '--config-file' nicht gefunden " "werden!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Einer der Parameter network_id und router_id muss an die Methode _get_ports " "übergeben werden." msgid "Either subnet_id or port_id must be specified" msgstr "Entweder 'subnet_id' oder 'port_id' muss angegeben sein" msgid "Enable HA mode for virtual routers." msgstr "Hochverfügbarkeitsmodus für virtuelle Router aktivieren." msgid "Enable SSL on the API server" msgstr "SSL auf dem API-Server aktivieren" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "VXLAN auf dem Agenten aktivieren. Kann aktiviert werden, wenn der Agent vom " "ml2-Plug-in mithilfe eines Linuxbridge-Mechanismus-Treibers verwaltet wird" msgid "" "Enable local ARP responder which provides local responses instead of " "performing ARP broadcast into the overlay. Enabling local ARP responder is " "not fully compatible with the allowed-address-pairs extension." msgstr "" "Aktivieren Sie den lokalen ARP-Responder, der lokale Antworten bereitstellt " "anstatt ARP-Broadcasts im Overlay durchzuführen. Die Aktivierung eines " "lokalen ARP-Responders ist nicht vollständig kompatibel mit der Erweiterung " "zulässiger Adresspaare." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Aktivieren Sie Services auf einem Agenten mit admin_state_up False. Wenn " "diese Option 'False' lautet und wenn admin_state_up eines Agenten auf " "'False' gesetzt wird, werden die Dienste darauf deaktiviert. Agenten mit " "admin_state_up False werden, unabhängig von dieser Option, nicht für die " "automatische Planung ausgewählt. Die manuelle Planung ist für solche Agenten " "jedoch verfügbar, wenn diese Option auf 'True' gesetzt ist." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Aktiviert die Delegierung von IPv6-Präfixen für die automatische Subnetz-" "CIDR-Zuordnung. Setzen Sie den Wert auf 'True', um die Delegierung von IPv6-" "Präfixen für die Subnetzzuordnung in einer für die Präfixdelegierung " "geeigneten Umgebung zu aktivieren. Benutzer, die " "Subnetzerstellunganforderungen für IPv6-Subnetze ohne Angabe einer CIDR oder " "Subnetzpool-ID stellen, erhalten eine CIDR über den " "Präfixdelegierungsmechanismus. Beachten Sie, dass die Aktivierung der " "Präfixdelegierung das Verhalten für den IPv6-Standardsubnetzpool außer Kraft " "setzt." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Aktiviert den dnsmasq-Dienst zur Bereitstellung von Namensauflösungen für " "Instanzen mithilfe DNS-Resolvern auf dem Host, auf dem der DHCP-Agent " "ausgeführt wird. Entfernt die Option '--no-resolv' aus den dnsmasq-" "Verarbeitungsargumenten. Dieses Feature wird deaktiviert, wenn angepasste " "DNS-Resolver zur Option 'dnsmasq_dns_servers' hinzugefügt werden." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Fehler %(reason)s beim Ausführen der Operation." #, python-format msgid "Error parsing dns address %s" msgstr "Fehler bei Auswertung der DNS-Adresse %s" #, python-format msgid "Error while reading %s" msgstr "Fehler beim Lesen von %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Der Grenzwert von %s Sekunde(n) wurde überschritten, als darauf gewartet " "wurde, dass sich der vorläufige Status der Adresse ändert." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Vorhandene Präfixe müssen eine Untergruppe der neuen Präfixe sein" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Exit-Code: %(returncode)d; Standardeingabe: %(stdin)s; Standardausgabe: " "%(stdout)s; Standardfehler: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Erweiterungs-%(driver)s fehlgeschlagen." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Der Erweiterungstreiber %(driver)s, der für das Dienste-Plugin " "%(service_plugin)s erforderlich ist, wurde nicht gefunden." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Erweiterung zur Verwendung mit dem l2population-Mechanismus-Treiber des ml2-" "Plug-ins. Sie ermöglicht dem Plug-in das Belegen der VXLAN-" "Weiterleitungstabelle." #, python-format msgid "Extension with alias %s does not exist" msgstr "Erweiterung mit Alias %s ist nicht vorhanden" msgid "Extensions list to use" msgstr "Zur verwendende Liste der Erweiterungen" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "Externe IP %s entspricht der Gateway-IP" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Fehler bei Neuterminierung von Router %(router_id)s: kein auswählbarer L3-" "Agent gefunden." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Zuordnung des Routers %(router_id)s zum L3-Agenten %(agent_id)s ist " "fehlgeschlagen." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Port auf Netz %(network_id)s wurde nicht erstellt, da 'fixed_ips' ungültiges " "Teilnetz %(subnet_id)s enthielt" #, python-format msgid "Failed to locate source for %s." msgstr "Quelle für %s nicht gefunden." msgid "Failed to remove supplemental groups" msgstr "Fehler beim Entfernen zusätzlicher Gruppen" #, python-format msgid "Failed to set gid %s" msgstr "Fehler beim Festlegen von GID %s" #, python-format msgid "Failed to set uid %s" msgstr "Fehler beim Festlegen von Benutzer-ID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Fehler bei der Konfiguration eines %(type)s-Tunnel-Ports auf %(ip)s" msgid "Failure applying iptables rules" msgstr "Fehler beim Anwenden von iptables-Regeln." #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Fehler beim Warten darauf, dass Adresse %(address)s bereit ist: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Einfache Anbieternetzwerke sind deaktiviert." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Für TCP/UDP-Protokolle muss 'port_range_min' '<= port_range_max' sein" msgid "Force ip_lib calls to use the root helper" msgstr "ip_lib-Aufrufe erzwingen, um Roothilfeprogramm zu verwenden" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Die Version der Gateway-IP stimmt nicht mit der Version des Zuordnungspools " "überein." msgid "Gateway is not valid on subnet" msgstr "Gateway ist auf Teilnetz nicht gültig" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Gruppe (Gruppen-ID oder Name), die Metadaten-Proxy nach der Initialisierung " "ausführt (falls leer: Agent-ausführende Gruppe)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Gruppe (Gruppen-ID oder Name), die diesen Prozess nach der Initialisierung " "ausführt" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Hostname, der vom Neutron-Server, von Agenten und Services auf dieser " "Maschine verwendet werden soll. Alle auf dieser Maschine ausgeführten " "Agenten und Services müssen denselben Hostwert verwenden." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP-Code (port-range-max) %(value)s ist angegeben, aber ICMP-Typ (port-" "range-min) fehlt." msgid "ID of network" msgstr "Netz-ID" msgid "ID of network to probe" msgstr "ID von Netz das überprüft werden soll" msgid "ID of probe port to delete" msgstr "ID von Überprüfungsport der gelöscht werden soll" msgid "ID of probe port to execute command" msgstr "ID von Überprüfungsport zum Ausführen des Befehls" msgid "ID of the router" msgstr "ID des Routers" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP-Adresse %(ip)s bereits in Teilnetz %(subnet_id)s zugeordnet" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP-Adresse %(ip)s gehört nicht zu Teilnetz %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "IP-Zuordnung fehlgeschlagen. Versuchen Sie es später noch einmal." msgid "IP allocation requires subnet_id or ip_address" msgstr "'subnet_id' oder 'ip_address' für IP-Zuordnung erforderlich" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply hat den folgenden Satz an iptables-Regeln nicht " "angewendet:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Für Präfixdelegierung muss der IPv6-Adressmodus SLAAC oder Stateless sein." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Für Präfixdelegierung muss der IPv6-RA-Modus SLAAC oder Stateless sein." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "IPv6-Adresse %(ip)s kann nicht direkt einem Port im Teilnetz %(subnet_id)s " "zugeordnet werden, da das Teilnetz für automatische Adressen konfiguriert " "wurde" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "IPv6-Teilnetz %s, das für den Empfang von RAs von einem externen Router " "konfiguriert ist, kann nicht zum Neutron-Router hinzugefügt werden." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Bei 'True' sollen Plugins, die dies unterstützen, VLAN-transparente Netze " "erstellen dürfen." msgid "Illegal IP version number" msgstr "Illegale IP-Versionsnummer" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "In einigen Fällen ist kein Neutron-Router vorhanden, um die Metadaten-IP " "bereitzustellen. Der DHCP-Server kann jedoch für die Bereitstellung dieser " "Informationen verwendet werden. Setzen dieses Werts bewirkt, dass der DHCP-" "Server bestimmte Hostrouten an die DHCP-Anforderung anhängt. Bei Aktivierung " "dieser Option wird der Metadatendienst für alle Netze aktiviert. " msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Gibt an, dass dieser L3-Agent auch Router ohne ein konfiguriertes externes " "Netzgateway verarbeiten soll. Diese Option sollte nur für einen einzelnen " "Agenten in einer Neutron-Implementierung auf 'True' gesetzt werden und kann " "für alle Agenten auf 'False' gesetzt werden, wenn alle Router ein externes " "Netzgateway erfordern." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "Die Instanz der Klasse %(module)s.%(class)s muss das Attribut ' _cache' " "enthalten." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Unzureichender Präfixspeicherplatz für die Zuordnung von Teilnetzgröße /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Berechtigungen sind für das Entfernen der Standardsicherheitsgruppe nicht " "ausreichend." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Zu verwendende Integrationsbrücke. Ändern Sie diesen Parameter nur, wenn Sie " "gute Gründe dafür haben. Dies ist der Name der OVS-Integrationsbrücke. Es " "gibt eine pro Hypervisor. Die Integrationsbrücke agiert als virtuelle Patch-" "Bay. Alle VM-VIFs werden an diese Brücke angehängt und anschließend " "entsprechend ihrer Netzkonnektivität gepatched." msgid "Interface to monitor" msgstr "Zu überwachende Schnittstelle" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervall zwischen Überprüfungen der Aktivität von untergeordneten Prozessen " "(Sekunden), verwenden Sie zum Deaktivieren '0'" msgid "Interval between two metering measures" msgstr "Intervall zwischen zwei Messungsmaßnahmen" msgid "Interval between two metering reports" msgstr "Intervall zwischen zwei Messungsberichten" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Ungültige Einheit %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Ungültige Aktion '%(action)s' für Objekttyp '%(object_type)s'. Gültige " "Aktionen: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Ungültiger Authentifizierungstyp: %(auth_type)s, gültige Typen sind: " "%(valid_auth_types)s" #, python-format msgid "Invalid direction '%s'" msgstr "Ungültige Richtung '%s'" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ungültiger Ethernet-Typ %(ethertype)s für Protokoll %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Ungültiges Format: %s" #, python-format msgid "Invalid group/gid: '%s'" msgstr "Ungültige Gruppe/Gruppen ID: '%s'" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Ungültiger Instanzstatus: %(state)s, gültige Status sind: %(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Ungültige Zuordnung: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Ungültiger PCI-Steckplatz %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Ungültiges Anbieterformat. Letzter Teil sollte 'default' oder leer sein: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Ungültiger Ressourcentyp %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Ungültige Route: %s" msgid "Invalid service provider format" msgstr "Ungültiges Diensteanbieterformat" #, python-format msgid "Invalid user/uid: '%s'" msgstr "Ungültiger Benutzer/Benutzer ID: '%s'" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Ungültiger Wert für ICMP %(field)s (%(attr)s) %(value)s. Er muss zwischen 0 " "und 255 liegen." #, python-format msgid "Invalid value for port %(port)s" msgstr "Ungültiger Wert für Port %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables-Mangling-Markierung zum Markieren des Eingangs vom externen Netz. " "Diese Markierung wird mit 0xffff maskiert, sodass nur die unteren 16 Bits " "verwendet werden." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables-Mangling-Markierung zum Markieren von Metadaten gültiger " "Anforderungen. Diese Markierung wird mit 0xffff maskiert, sodass nur die " "unteren 16 Bits verwendet werden." msgid "" "Keep in track in the database of current resource quota usage. Plugins which " "do not leverage the neutron database should set this flag to False." msgstr "" "Überwachen der aktuellen Kontingentnutzung in der Datenbank. Bei Plugins, " "die die Neutron-Datenbank nicht verwenden, sollte dieses Flag auf False " "festgelegt werden" msgid "Keepalived didn't respawn" msgstr "Keepalived wurde nicht generiert" msgid "Keepalived didn't spawn" msgstr "Keepalived hat keinen Prozess erstellt." #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Der Kernel-HZ-Wert %(value)s ist nicht gültig. Dieser Wert muss größer als 0 " "sein." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3-Agentenfehler bei der NAT-Konfiguration für Floating IPs." msgid "L3 agent failure to setup floating IPs" msgstr "L3-Agentenfehler bei der Konfiguration von Floating IPs." #, python-format msgid "Limit must be an integer 0 or greater and not '%s'" msgstr "Limit muss eine Ganzzahl 0 oder größer sein und nicht '%s'" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Anzahl von Leases begrenzen, um eine Dienstverweigerung zu verhindern." msgid "List of :" msgstr "Liste mit den Elementen :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Liste mit :: oder , " "die physical_network-Namen angeben, die für VLAN-Provider- und Nutzer-Netze " "verwendet werden können, wie auch als Bereiche von VLAN-Tags für jedes " "verfügbare Netz für die Zuordnung zu Nutzernetzen." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Liste der Netztypentreibereingangspunkte, die aus dem Namensbereich neutron." "ml2.type_drivers geladen werden." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Liste von physical_network-Namen, mit denen einfache Netze erstellt werden " "können. Verwenden Sie den Standardwert '*', um einfache Netze mit beliebigen " "physical_network-Namen zuzulassen. Verwenden Sie eine leere Liste, um " "einfache Netze zu inaktivieren." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Position für UNIX-Domänensocket von Metadaten-Proxy." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Position von UNIX-Domänensocket von Metadatenproxy" msgid "Location to store DHCP server config files." msgstr "Position zum Speichern von Konfigurationsdateien des DHCP-Servers." msgid "Location to store IPv6 PD files." msgstr "Position zum Speichern von IPv6-PD-Dateien." msgid "Location to store IPv6 RA config files" msgstr "Position zum Speichern von IPv6-RA-Konfigurationsdateien" msgid "Location to store child pid files" msgstr "Position zum Speichern von untergeordneten PID-Dateien" msgid "Location to store keepalived config files" msgstr "Position zum Speichern von keepalived-Konfigurationsdateien" msgid "Log agent heartbeats" msgstr "Überwachungssignale von Agenten protokollieren" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "MTU des zugrunde liegenden physischen Netzes. Neutron verwendet diesen Wert, " "um MTU für alle virtuellen Netzkomponenten zu berechnen. Bei einfachen und " "bei VLAN-Netzen verwendet Neutron diesen Wert ohne Modifikation. Bei Overlay-" "Netzen, wie z. B. VXLAN, zieht Neutron den Overhead des Overlay-Protokolls " "automatisch von diesem Wert ab. Nimmt standardmäßig den Wert 1500 an, dem " "Standardwert für Ethernet." msgid "MTU size of veth interfaces" msgstr "MTU-Größe von Veth-Schnittstellen" msgid "Make the l2 agent run in DVR mode." msgstr "L2-Agent im DVR-Modus ausführen." msgid "Malformed request body" msgstr "Fehlerhafter Anforderungshauptteil" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "MaxRtrAdvInterval-Einstellung für radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Maximale Anzahl an DNS-Namensservern pro Subnetz" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Maximale Anzahl an L3-Agenten, für die ein HA-Router geplant wird. Bei " "Angabe von 0 wird der Router für jeden Agenten geplant." msgid "Maximum number of allowed address pairs" msgstr "Maximale Anzahl an zulässigen Adresspaaren" msgid "Maximum number of host routes per subnet" msgstr "Maximale Anzahl an Hostroutes pro Subnetz" msgid "Maximum number of routes per router" msgstr "Maximale Anzahl an Routen pro Router" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modus von UNIX-Domänensocket für Metadaten-Proxy, 4 Werte zulässig: " "'deduce': Modus aus Werten von metadata_proxy_user/group ableiten, 'user': " "Modus von Metadaten-Proxy-Socket auf 0o644 festlegen, zur Verwendung, wenn " "metadata_proxy_user Agent-ausführender Benutzer oder Root ist, 'group': " "Modus von Metadaten-Proxy-Socket auf 0o664 festlegen, zur Verwendung, wenn " "metadata_proxy_group Agent-ausführende Gruppe oder Root ist, 'all': Modus " "von Metadaten-Proxy-Socket auf 0o666 festlegen, zur anderweitigen Verwendung." msgid "Metering driver" msgstr "Messungstreiber" #, python-format msgid "Method %s cannot be called within a transaction." msgstr "" "Die Methode %s kann nicht aus einer Transaktion heraus aufgerufen werden." msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "MinRtrAdvInterval-Einstellung für radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Abfrage minimieren durch Überwachung von ovsdb auf Schnittstellenänderungen." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Fehlender Schlüssel in Zuordnung: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Multicastgruppe für VXLAN. Wenn sie konfiguriert ist, kann der gesamte " "Broadcastverkehr an diese Multicastgruppe gesendet werden. Ohne " "Konfiguration ist der Multicast-VXLAN-Modus inaktiviert." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Multicastgruppe(n) für VXLAN-Schnittstelle. Ein Gruppenadressbereich, der " "mit der CIDR-Notation angegeben werden kann. Durch die Angabe eines Bereichs " "können unterschiedliche VNIs verschiedene Gruppenadressen verwenden und so " "fehlerhaften Broadcastverkehr an Tunnelendpunkt senkden oder entfernen. Wenn " "Sie eine eindeutige Gruppe für jede mögliche VNI (24 Bit) reservieren " "möchten, verwenden Sie die Einstellung /8, wie z. B. 239.0.0.0/8. Diese " "Einstellung muss für alle Agenten gleich sein." #, python-format msgid "Multiple default providers for service %s" msgstr "Mehrere Standardanbieter für Dienst %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Mehrere Anbieter angegeben für Dienst %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Mehrere 'tenant_ids' bei Erstellung von Sicherheitsgruppenregel für " "Massenerstellung nicht zulässig" msgid "Must also specify protocol if port range is given." msgstr "" "Bei angegebenem Portbereich muss ebenfalls ein Protokoll angegeben werden." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Angabe von einer oder mehreren Aktionen für Ablaufhinzufügung oder Änderung " "erforderlich" msgid "Name of Open vSwitch bridge to use" msgstr "Name der zu verwendenden Open vSwitch-Brücke" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Name der zu verwendenden Nova-Region. Nützlich, wenn Keystone mehrere " "Regionen verwaltet. " msgid "" "Name of placement region to use. Useful if keystone manages more than one " "region." msgstr "" "Name der zu verwendenden Placement-Region. Nützlich, wenn Keystone mehrere " "Regionen verwaltet. " msgid "Namespace of the router" msgstr "Namensbereich des Routers" msgid "Native pagination depend on native sorting" msgstr "Die native Paginierung ist von der nativen Sortierung abhängig" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Es müssen Migrationen aus dem %(project)s-Contract-Branch angewendet werden. " "Hierfür müssen alle Neutron-Serverinstanzen heruntergefahren werden, bevor " "die Aktualisierung fortgesetzt werden kann. " msgid "Negative delta (downgrade) not supported" msgstr "Negatives Delta (Herabstufung) nicht unterstützt" msgid "Negative relative revision (downgrade) not supported" msgstr "Negative relative Revision (Herabstufung) nicht unterstützt" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Netz %s enthält kein IPv4-Teilnetz" #, python-format msgid "Network %s is not a valid external network" msgstr "Netz %s ist kein gültiges externes Netz" #, python-format msgid "Network %s is not an external network" msgstr "Netz %s ist kein externes Netz" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Netz der Größe %(size)s, aus IP-Bereich %(parent_range)s ausschließlich der " "IP-Bereiche %(excluded_ranges)s wurde nicht gefunden." #, python-format msgid "Network type value '%s' not supported" msgstr "Netztypwert '%s' wird nicht unterstützt" msgid "Network type value needed by the ML2 plugin" msgstr "Netztypwert für ML2-Plug-in erforderlich" msgid "Neutron core_plugin not configured!" msgstr "Neutron-'core_plugin' nicht konfiguriert!" msgid "No default router:external network" msgstr "Kein router:external-Standardnetz" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Kein Standardsubnetzpool für IPv%s gefunden." msgid "No default subnetpools defined" msgstr "Es wurden keine Subnetzpools definiert." #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Keine weiteren IP-Adressen für Teilnetz %(subnet_id)s verfügbar." msgid "No more IP addresses available." msgstr "Keine weiteren IP-Adressen verfügbar." msgid "No offline migrations pending." msgstr "Keine Offline-Migrationen anstehend." #, python-format msgid "No shared key in %s fields" msgstr "Kein gemeinsam genutzter Schlüssel in %s-Feldern" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Es ist nicht zulässig, einem Agenten im Modus 'dvr' manuell einen Router " "zuzuordnen." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Es ist nicht zulässig, einen Router aus einem Agenten im Modus 'dvr' manuell " "zu entfernen." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Anzahl der DHCP-Agenten, die zum Hosten eines Mandatennetzwerkes geplant " "werden. Wenn diese Zahl größer als 1 ist, weist der Scheduler automatisch " "mehrere DHCP-Agenten für ein angegebenes Nutzernetz zu, wodurch " "Hochverfügbarkeit für den DHCP-Service erreicht wird." msgid "Number of RPC worker processes dedicated to state reports queue." msgstr "" "Anzahl der RPC-Worker-Prozesse, die der Statusberichtswarteschlange " "zugewiesen ist." msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Anzahl der Rückstandanforderungen, mit denen der Metadatenserver-Socket " "konfiguriert werden soll" msgid "Number of backlog requests to configure the socket with" msgstr "" "Anzahl der Rückstandanforderungen, mit denen der Socket konfiguriert werden " "soll" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Anzahl von Bits in einer ipv4-PTR-Zone, die als Netzpräfix betrachtet wird. " "Es muss an der Bytegrenze ausgerichtet werden. Der Mindestwert ist 8. Der " "maximal zulässige Wert ist 24. Daraus ergibt sich ein Wertebereich von 8, 16 " "und 24. " msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Anzahl von Bits in einer ipv6-PTR-Zone, die als Netzpräfix betrachtet wird. " "Es muss an der nyble-Grenze ausgerichtet werden. Der Mindestwert ist 4. Der " "maximal zulässige Wert ist 124. Daraus ergibt sich ein Wertebereich von 4, " "8, 12, 16,..., 124." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen dynamischen IPs pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Netzen pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Ports pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Anzahl an zulässigen Routern pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Anzahl der Sekunden zwischen dem Senden von Ereignissen an Nova, wenn " "Ereignisse zum Senden vorhanden sind. " msgid "Number of seconds to keep retrying to listen" msgstr "" "Anzahl der Sekunden, in denen wiederholt versucht wird, empfangsbereit zu " "sein" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Anzahl an zulässigen Sicherheitsgruppen pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Anzahl an zulässigen Sicherheitsregeln pro Nutzer. Ein negativer Wert " "bedeutet unbegrenzt." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Anzahl der separaten Worker-Prozesse für Metadatenserver (wird standardmäßig " "auf die Hälfte der Anzahl der CPUs festgelegt)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Anzahl an zulässigen Teilnetzen pro Nutzer. Ein negativer Wert bedeutet " "unbegrenzt." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Die Anzahl der während des Synchronisationsprozesses zu verwendenden " "Threads. Die Größe des auf dem Server konfigurierten Verbindungspools darf " "nicht überschritten werden." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Zu verwendender OVS-Datenpfad. 'system' ist der Standardwert und entspricht " "dem Kernel-Datenpfad. Setzen Sie diesen Wert auf 'netdev', wenn Sie den " "Benutzerbereichsdatenpfad aktivieren möchten." msgid "OVS vhost-user socket directory." msgstr "OVS-vhost-user-Socketverzeichnis." #, python-format msgid "Object '%s' contains invalid data" msgstr "Objekt '%s' enthält ungültige Daten" msgid "Only admin can view or configure quota" msgstr "Nur Admins können Kontingente anzeigen oder konfigurieren" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Nur Administratoren sind dazu berechtigt, auf Kontingente für andere Nutzer " "zuzugreifen" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Nur Administratoren können Richtlinien an Objekten bearbeiten, deren Eigner " "sie nicht sind." msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Aktualisierung von Regeln nicht für mehrere Sicherheitsprofile gleichzeitig " "zulässig" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Nur Angabe von 'remote_ip_prefix' oder 'remote_group_id' ist zulässig." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operation auf Einheit %(dev_name)s nicht unterstützt" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Sortierte Liste der network_types für die Zuordnung als Mandantennetze. Der " "Standardwert 'local' ist hilfreich für Einzeltests, bietet jedoch keine " "Konnektivität zwischen Hosts." msgid "Override the default dnsmasq settings with this file." msgstr "Standard-'dnsmasq'-Einstellungen mit dieser Datei außer Kraft setzen." msgid "Owner type of the device: network/compute" msgstr "Eigentümertyp des Geräts: Netz/Rechenknoten" msgid "POST requests are not supported on this resource." msgstr "POST-Anforderungen werden auf dieser Ressource nicht unterstützt." #, python-format msgid "Package %s not installed" msgstr "Paket %s nicht installiert" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Analysieren von 'bridge_mappings' fehlgeschlagen: %s." msgid "Password for connecting to designate in admin context" msgstr "" "Kennwort zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Path to PID file for this process" msgstr "Pfad zur PID-Datei für diesen Prozess" msgid "Path to the router directory" msgstr "Pfad zum Routerverzeichnis" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Peer-Patch-Port in Integrationsbrücke für Tunnelbrücke." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Peer-Patch-Port in Tunnelbrücke für Integrationsbrücke." msgid "Phase upgrade options do not accept revision specification" msgstr "Phasenupgradeoptionen akzeptieren keine Revisionsspezifikation" msgid "Ping timeout" msgstr "Ping-Zeitlimitüberschreitung" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Port %(id)s verfügt nicht über statische IP-Adresse %(address)s" #, python-format msgid "Port %(port_id)s is not managed by this agent." msgstr "Der Port %(port_id)s wird nicht von diesem Agenten verwaltet." #, python-format msgid "Port %s does not exist" msgstr "Der Port %s ist nicht vorhanden." #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Port %s besitzt mehrere statische IPv4-Adressen. Es muss eine bestimmte IPv4-" "Adresse angegeben werden, wenn eine dynamische IP zugewiesen wird" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "Präfixdelegierung kann nur bei IPv6-Teilnetzen verwendet werden." msgid "Private key of client certificate." msgstr "Privater Schlüssel für Clientzertifikat." #, python-format msgid "Probe %s deleted" msgstr "Stichprobe %s gelöscht" #, python-format msgid "Probe created : %s " msgstr "Stichprobe erstellt: %s " msgid "Process is already started" msgstr "Prozess wurde bereits gestartet" msgid "Process is not running." msgstr "Prozess läuft nicht." msgid "Protocol to access nova metadata, http or https" msgstr "Protokoll für den Zugriff auf Nova-Metadaten, HTTP oder HTTPS" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Der Providername %(name)s ist auf %(len)s Zeichen begrenzt." #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "RBAC-Richtlinie des Typs %(object_type)s mit ID %(id)s nicht gefunden" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "RBAC-Richtlinie für Objekt %(object_id)s kann nicht entfernt werden, da " "weitere Objekte von ihr abhängen.\n" "Details: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Dauer in Sekunden, für die zufallsgeneriert beim Starten des Schedulers für " "regelmäßige Tasks gewartet werden soll, um die Belastung zu reduzieren. " "(Inaktivierung durch Festlegen auf 0)" msgid "Ranges must be in the same IP version" msgstr "Bereiche müssen dieselbe IP-Version haben." msgid "Ranges must be netaddr.IPRange" msgstr "Bereiche müssen 'netaddr.IPRange' sein." msgid "Ranges must not overlap" msgstr "Bereiche dürfen nicht überlappen." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Versionssensitive Zweigbezeichnungen (%s) werden nicht weiter unterstützt. " "Wechseln Sie zu expand@- und contract@-Bezeichnungen." msgid "Remote metadata server experienced an internal server error." msgstr "Interner Serverfehler bei fernem Metadatenserver." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Das Repository enthält keine HEAD-Dateien für Contract- und Expand-Branches. " msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Darstellung des Ressourcentyps, zu dessen Arbeitslast vom Agenten Bericht " "erstattet wird. Dies kann \"networks\", \"subnets\" oder \"ports\" sein. Bei " "Angabe (Standardwert ist 'networks') extrahiert der Server bei jedem " "report_interval eine bestimmte Arbeitslast, die als Teil des " "Agentenkonfigurationsobjekts vom Agentenberichtsstatus, der der Anzahl der " "konsumierten Ressourcen entspricht, gesendet wird. dhcp_load_type kann in " "Verbindung mit network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler verwendet werden. Wenn der " "network_scheduler_driver WeightScheduler ist, kann dhcp_load_type so " "konfiguriert werden, dass die Auswahl für die Ressource mit Lastausgleich " "dargestellt wird. Beispiel: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Anforderung fehlgeschlagen: interner Serverfehler bei Verarbeitung Ihrer " "Anforderung." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Zurücksetzen der Ablauftabelle beim Start. Bei der Einstellung True erfolgt " "eine kurze Unterbrechung des Datenverkehrs." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Ressource %(resource)s %(resource_id)s konnte nicht gefunden werden." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ressource %(resource_id)s des Typs %(resource_type)s nicht gefunden" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "Ressource '%(resource_id)s' ist bereits Anbieter '%(provider)s' für " "Dienstetyp '%(service_type)s' zugeordnet" msgid "Resource body required" msgstr "Ressourcen-Nachrichtentext erforderlich" msgid "Resource not found." msgstr "Ressource nicht gefunden." msgid "Resources required" msgstr "Ressourcen erforderlich" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Roothilfeprogramm. Setzen Sie 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' ab, um die echte Rootfilterfunktion zu verwenden. Wechseln Sie zu " "'sudo', um das Filtern zu überspringen und den Befehl direkt auszuführen. " msgid "Root permissions are required to drop privileges." msgstr "Rootberechtigungen sind zum Löschen von Berechtigungen erforderlich." #, python-format msgid "Router already has a port on subnet %s" msgstr "Router verfügt bereits über einen Port auf Teilnetz %s" msgid "Router port must have at least one fixed IP" msgstr "Der Router-Port muss mindestens eine feste IP-Adresse haben." #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Ausführen von %(cmd)s (%(desc)s) für %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Ausführen von %(cmd)s für %(project)s ..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Sekunden zwischen Status-Berichten von Knoten an Server; sollte geringer " "sein als agent_down_time; am besten sollte es die Hälfte oder weniger von " "agent_down_time betragen." msgid "Seconds between running periodic tasks." msgstr "Sekunden zwischen Ausführungen regelmäßig wiederkehrender Tasks." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Sekunden bis zur Annahme, dass der Agent inaktiv ist; sollte mindestens " "doppelt so hoch sein wie report_interval, damit sichergestellt ist, dass der " "Agent wirklich inaktiv ist." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Sicherheitsgruppe %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Sicherheitsgruppenregel %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Sicherheitsgruppe %(id)s ist nicht vorhanden" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "Sicherheitsgruppenregel %(id)s ist nicht vorhanden" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "Die Sicherheitsgruppenregel ist bereits vorhanden. Die Regel-ID ist " "%(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Sicherheitsgruppenregel für Ethernet-Typ '%(ethertype)s' wird nicht " "unterstützt. Zulässige Werte: %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Regelprotokoll %(protocol)s für Sicherheitsgruppe nicht unterstützt. Nur " "Protokollwerte %(values)s und ganzzahlige Darstellungen [0 bis 255] werden " "unterstützt." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Benachrichtigung an Nova senden, wenn sich die Portdaten (fixed_ips/" "floatingip) ändern, damit Nova den Zwischenspeicher aktualisieren kann. " msgid "Send notification to nova when port status changes" msgstr "Benachrichtigung an Nova senden, wenn sich der Portstatus ändert" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Diensteanbieter '%(provider)s' konnte nicht für Dienstetyp %(service_type)s " "gefunden werden " msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Service zum Behandeln der DHCPv6-Präfixdelegierung." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "Dienstetyp %(service_type)s weist keinen Standard-Diensteanbieter auf" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Neues Zeitlimit in Sekunden für neue RPC-Aufrufe festlegen, nachdem Agent " "SIGTERM empfängt. Wenn der Wert auf 0 gesetzt ist, wird das RPC-Zeitlimit " "nicht geändert" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "DF-Bit (Don't Fragment) auf GRE/VXLAN-Tunnel für abgehende IP-Pakete " "festlegen oder die Festlegung aufheben." msgid "Shared address scope can't be unshared" msgstr "" "Freigabe des gemeinsam genutzten Adressbereichs kann nicht aufgehoben werden" msgid "String prefix used to match IPset names." msgstr "Zeichenfolgepräfix zum Abgleichen von IPset-Namen." #, python-format msgid "Sub-project %s not installed." msgstr "Unterprojekt %s nicht installiert." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Teilnetz für Routerschnittstelle muss über eine Gateway-IP-Adresse verfügen" msgid "Subnet pool has existing allocations" msgstr "Der Teilnetzpool verfügt über vorhandene Zuordnungen" msgid "Subnet used for the l3 HA admin network." msgstr "" "Teilnetz, das für das L3-Verwaltungsnetz für hohe Verfügbarkeit verwendet " "wird." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Systemweites Flag zum Bestimmen des Routertyps, den Nutzer erstellen können. " "Kann nur vom Administrator überschrieben werden." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Von Neutron-Metadaten-Namensbereichsproxy verwendeter TCP-Port." msgid "TCP Port used by Nova metadata server." msgstr "Von Nova-Metadatenserver verwendeter TCP-Port." msgid "TTL for vxlan interface protocol packets." msgstr "TTL für VXLAN-Schnittstellenprotokollpakete." #, python-format msgid "Tag %(tag)s could not be found." msgstr "Schlagwort %(tag)s konnte nicht gefunden werden." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "Nutzer %(tenant_id)s darf %(resource)s auf diesem Netz nicht erstellen" msgid "Tenant id for connecting to designate in admin context" msgstr "" "Mandanten-ID zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "Tenant name for connecting to designate in admin context" msgstr "" "Mandantenname zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Der DHCP-Server kann zur Bereitstellung von Metadatenunterstützung für " "isolierte Netze beitragen. Wenn Sie diesen Wert auf 'True' setzen, hängt der " "DHCP-Server bestimmte Hostrouten an die DHCP-Anforderung an. Der " "Metadatendienst wird nur aktiviet, wenn das Subnetz keinen Router-Port " "enthält. Die Gastinstanz muss so konfiguriert sein, dass Hostrouten über " "DHCP (Option 121) angefordert werden. Diese Option ist wirkungslos, wenn " "'force_metadata' auf 'True' gesetzt wird." msgid "The UDP port to use for VXLAN tunnels." msgstr "UDP-Port für VXLAN-Tunnel." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "Die Adresszuordnungsanforderung konnte nicht erfüllt werden: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Ankündigungsintervall in Sekunden" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Die MAC-Basisadresse, die Neutron für VIFs verwendet. Die ersten drei " "Oktetts bleiben unverändert. Wenn das vierte Oktett nicht 00 ist, wird es " "ebenfalls verwendet. Die anderen werden zufällig generiert. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Die MAC-Basisadresse, die durch Neutron für eindeutige DVR-Instanzen " "verwendet wird. Die ersten 3 Oktetts bleiben unverändert. Wenn das 4. Oktett " "nicht 00 ist, wird es ebenfalls verwendet. Die anderen werden zufällig " "generiert. Die 'dvr_base_mac' *muss* sich von 'base_mac' unterscheiden, um " "eine Vermischung mit zugeordneten MACs für Nutzerports zu vermeiden. " "Beispiel mit 4 Oktetts: dvr_base_mac = fa:16:3f:4f:00:00. Standardmäßig " "werden 3 Oktetts verwendet" msgid "The core plugin Neutron will use" msgstr "Core-Plugin, das Neutron verwenden wird" msgid "The driver used to manage the DHCP server." msgstr "Der für die Verwaltung des DHCP-Servers verwendete Treiber." msgid "The driver used to manage the virtual interface." msgstr "" "Der für die Verwaltung der virtuellen Schnittstelle verwendete Treiber." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Die beim Erstellen PTR-Zoenen zu verwendende E-Mail-Adresse. Ohne Angabe " "einer E-Mail-Adresse wird die E-Mail-Adresse admin@ verwendet." msgid "The host IP to bind to." msgstr "Die Host-IP, an die gebunden werden soll." msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Maximale Anzahl an in einer einzelnen Antwort zurückgegebenen Elementen. Der " "Wert 'infinite' oder eine negative Ganzzahl bedeuten, dass es keine " "Begrenzung gibt." msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Der Netztyp, der beim Erstellen des HA-Netzes für einen HA-Router verwendet " "werden soll. Standardmäßig oder bei fehlender Angabe wird das erste " "'tenant_network_types' verwendet. Dies ist hilfreich, wenn der VRRP-" "Datenverkehr ein bestimmtes Netz verwenden soll, das nicht das Standardnetz " "ist." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Die Anzahl an Sekunden, die der Agent zwischen Abfragen lokaler " "Geräteänderungen wartet." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Die Anzahl an Sekunden, die gewartet werden soll, bevor die ovsdb-" "Überwachung nach einer Kommunikationsunterbrechung erneut generiert wird." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Die Anzahl an 'sort_keys' und 'sort_dirs' muss gleich sein" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Der Pfad für API-Erweiterungen. Beachten Sie, dass dies eine durch Punkte " "getrennte Liste von Pfaden sein kann. Beispiel: api_extensions_path = " "extensions:/path/to/more/exts:/even/more/exts. An diesen Pfad wird '__path__ " "of neutron.extensions' angehängt, sodass Sie Ihre Erweiterungen hier nicht " "mehr angeben müssen, wenn Sie dort bereits angegeben wurden." msgid "The physical network name with which the HA network can be created." msgstr "" "Der Name des physischen Netzes, mit dem das HA-Netz erstellt werden kann." #, python-format msgid "The port '%s' was deleted" msgstr "Port '%s' wurde gelöscht" msgid "The port to bind to" msgstr "Der Port an den gebunden werden soll" #, python-format msgid "The requested content type %s is invalid." msgstr "Der angeforderte Inhaltstyp %s ist ungültig." msgid "The resource could not be found." msgstr "Die Ressource konnte nicht gefunden werden." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "Der Router %(router_id)s wurde bereits vom L3-Agenten %(agent_id)s gehostet." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Auf dem Server ist entweder ein Fehler aufgetreten oder der Server kann die " "angeforderte Operation nicht ausführen." msgid "The service plugins Neutron will use" msgstr "Service-Plugins, die Neutron verwenden wird" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Die Teilnetzanforderung konnte nicht erfüllt werden: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "Das Unterprojekt für das der Befehl ausgeführt werden soll. Mögliche Werte: " "'%s'." msgid "The type of authentication to use" msgstr "Der zu verwendende Authentifizierungtyp" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Diesem Netz sind Router zugeordnet, die für den Zugriff von dieser " "Richtlinie abhängig sind." msgid "" "Timeout in seconds for ovsdb commands. If the timeout expires, ovsdb " "commands will fail with ALARMCLOCK error." msgstr "" "Zeitlimit in Sekunden für ovsdb-Befehle. Wenn das Zeitlimit abgelaufen ist, " "schlagen ovsdb-Befehle mit einem Fehler vom Typ ALARMCLOCK fehl." msgid "Timeout in seconds to wait for a single OpenFlow request." msgstr "" "Zeitlimit in Sekunden für die Wartezeit auf eine einzelne OpenFlow-" "Anforderung." msgid "" "Timeout in seconds to wait for the local switch connecting the controller." msgstr "" "Zeitlimit in Sekunden für die Wartezeit, in der der lokale Switch die " "Verbindung mit dem Controller hergestellt haben muss." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "'True' zum Löschen aller Ports auf den OpenvSwitch-Brücken. 'False' zum " "Löschen von Ports, die von Neutron auf Integrationsbrücken und externen " "Netzbrücken erstellt wurden." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Tunnel-IP-Wert für ML2-Plug-in erforderlich" msgid "Tunnel bridge to use." msgstr "Zu verwendende Tunnelbrücke." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Typ des zu verwendenden Nova-Endpunkts. Dieser Endpunkt wird im Keystone-" "Katalog gesucht und muss vom Typ 'public', 'internal' oder 'admin' sein." msgid "" "Type of the placement endpoint to use. This endpoint will be looked up in " "the keystone catalog and should be one of public, internal or admin." msgstr "" "Typ des zu verwendenden Placement-Endpunkts. Dieser Endpunkt wird im " "Keystone-Katalog gesucht und muss vom Typ 'public', 'internal' oder 'admin' " "sein." msgid "URL for connecting to designate" msgstr "URL zum Herstellen einer Verbindung zu Designate. " msgid "URL to database" msgstr "URL an Datenbank" #, python-format msgid "Unable to access %s" msgstr "Kein Zugriff auf %s möglich" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Fehler beim Berechnen der %(address_type)s-Adresse. Grund: %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "Wert in %s kann nicht konvertiert werden" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT-Schnittstellenport kann nicht erstellt werden" #, python-format msgid "Unable to determine mac address for %s" msgstr "MAC-Adresse für %s kann nicht bestimmt werden" #, python-format msgid "Unable to find '%s' in request body" msgstr "'%s' kann in Anforderungshauptteil nicht gefunden werden" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "IP-Adresse %(ip_address)s auf Teilnetz %(subnet_id)s wurde nicht gefunden" #, python-format msgid "Unable to find resource name in %s" msgstr "Ressourcenname kann nicht in %s gefunden werden" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Zielfeld kann nicht aus %s identifiziert werden. Übereinstimmung sollte im " "Format %%()s vorliegen" msgid "Unable to provide external connectivity" msgstr "Externe Konnektivität kann nicht bereitgestellt werden." msgid "Unable to provide tenant private network" msgstr "Das private Mandantennetz kann nicht bereitgestellt werden." #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Übereinstimmung %(match)s kann nicht als übergeordnete Ressource bestätigt " "werden: %(res)s wurde nicht gefunden" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Nicht erwartete Bezeichnung für Script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Unerwartete Anzahl an Alembic-Verzweigungspunkten: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Unerwarteter Antwortcode: %s" #, python-format msgid "Unexpected response: %s" msgstr "Unerwartete Antwort: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Einheitenname '%(unit)s' ist nicht gültig." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Unbekannter Adresstyp %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Unbekanntes Attribut '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Unbekannte Kette: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Unerwarteter Netztyp %(network_type)s." msgid "Unmapped error" msgstr "Nicht zugeordneter Fehler" msgid "Unrecognized action" msgstr "Nicht erkannte Aktion" msgid "Unrecognized field" msgstr "Nicht erkanntes Feld" msgid "Unsupported Content-Type" msgstr "Nicht unterstützter Inhaltstyp" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Nicht unterstützter Netztyp %(net_type)s." msgid "Unsupported request type" msgstr "Nicht unterstützter Anforderungstyp" msgid "Updating default security group not allowed." msgstr "Aktualisieren von Standardsicherheitsgruppe nicht zulässig." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "ML2-l2population-Mechanismus-Treiber verwenden, um ferne MAC- und IP-" "Adressen abzurufen und die Tunnelskalierbarkeit zu verbessern." msgid "Use broadcast in DHCP replies." msgstr "Verwenden Sie Broadcast in DHCP-Antworten." msgid "Use either --delta or relative revision, not both" msgstr "" "Verwenden Sie entweder --delta oder relative Revision, nicht beides gemeinsam" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Verwenden Sie ipset, um die Geschwindigkeit der auf iptables basierenden " "Sicherheitsgruppen zu verbessern. Für die Aktivierung der ipset-" "Unterstützung ist es erforderlich, dass ipset auf einem L2-Agentenknoten " "installiert ist." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Verwenden Sie das Roothilfeprogramm beim Auflisten von Namensbereichen in " "einem System. Dies ist möglicherweise je nach Sicherheitskonfiguration nicht " "erforderlich. Wenn das Roothilfeprogramm nicht erforderlich ist, setzen Sie " "es zugunsten einer Leistungsverbesserung auf 'False'." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Verwenden Sie virtuelles Ethernet anstelle von Patch-Ports, um die " "Integrationsbrücke mit physischen Netzen zu verbinden. Kernels ohne Patch-" "Port-Unterstützung durch Open vSwitch werden unterstützt, vorausgesetzt der " "Wert ist auf 'True' gesetzt." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Benutzer (Benutzer-ID oder Name), der Metadaten-Proxy nach der " "Initialisierung ausführt (falls leer: Agent-ausführender Benutzer)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Benutzer (Benutzer-ID oder Name), der diesen Prozess nach der " "Initialisierung ausführt" msgid "Username for connecting to designate in admin context" msgstr "" "Benutzername zum Herstellen einer Verbindung zu Designate im " "Administratorkontext." msgid "VRRP authentication password" msgstr "VRRP-Authentifizierungskennwort" msgid "VRRP authentication type" msgstr "VRRP-Authentifizierungstyp" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Der Wert der Host-Kernel-Aktualisierungsrate (hz) für die Berechnung des " "Mindest-Burst-Werts für Bandbreitengrenzwertregeln für einen Port mit QoS. " "Informationen zum HZ-Wert finden Sie in der Kernel-Konfigurationsdatei und " "im tc-tbf-Handbuch." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Wert der Latenzzeit (ms) für die Berechnung der Warteschlangengröße für " "einen Port mit QoS. Weitere Informationen finden Sie im tc-tbf-Handbuch." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Beim Proxy-Vorgang von Metadatenanforderungen unterzeichnet Neutron den " "Instanz-ID-Header mit einem geheimen Schlüssel für gemeinsame Nutzung, um " "Spoofing zu verhindern. Sie können für einen geheimen Schlüssel eine " "beliebige Zeichenfolge auswählen. Sie muss jedoch hier und in der vom Nova-" "Metadatenserver verwendeten Konfiguration identisch sein. Hinweis: Nova " "verwendet denselben Konfigurationsschlüssel, allerdings im Abschnitt " "[neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Position zum Speichern von Neutron-Statusdateien. Dieses Verzeichnis muss " "für den Agenten beschreibbar sein." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Mit IPv6 benötigt das Netz, das für das externe Gateway verwendet wird, kein " "zugehöriges Teilnetz, da die automatisch zugewiesene LLA (Link-Local " "Address) verwendet werden kann. Eine IPv6-Gateway-Adresse ist jedoch für die " "Verwendung als Next-Hop für die Standardroute erforderlich. Ist hier keine " "IPv6-Gateway-Adresse konfiguriert (und nur dann), wird der Neutron-Router so " "konfiguriert, dass er die Standardroute von RAs (Router Advertisements) vom " "vorgeschalteten Router erhält; in diesem Fall muss der vorgeschaltete Router " "ebenfalls zum Senden dieser RAs konfiguriert sein. Wenn das ipv6_gateway " "konfiguriert ist, sollte es die LLA der Schnittstelle auf dem " "vorgeschalteten Router sein. Wenn ein Next-Hop benötigt wird, der eine GUA " "(Global Unique Address) verwendet, muss dies über ein Teilnetz geschehen, " "das dem Netz zugeordnet ist, nicht über diesen Parameter. " msgid "You must implement __call__" msgstr "Sie müssen '__call__' implementieren" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Sie müssen eine Konfigurationsdatei für die Brücke angeben: entweder '--" "config-file' oder env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Sie müssen eine Überarbeitung oder ein relatives Delta bereitstellen" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Ein Subnetzpool muss angegeben werden, wenn cidr nicht angegeben ist." msgid "add_ha_port cannot be called inside of a transaction." msgstr "" "'add_ha_port' kann nicht aus einer Transaktion heraus aufgerufenn werden." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools sind nur für bestimmte Teilnetzanforderungen zulässig." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools sind nicht im Subnetz." msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools verwenden die falsche IP-Version." msgid "already a synthetic attribute" msgstr "Ist bereits ein synthetisches Attribut" msgid "binding:profile value too large" msgstr "Bindung: Profilwert zu groß" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Ausführen von %(event)s nicht möglich. Ursache: %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr und prefixlen dürfen nicht gemeinsam angegeben werden" msgid "dns_domain cannot be specified without a dns_name" msgstr "'dns_domain' kann nicht ohne 'dns_name' angegeben werden." msgid "dns_name cannot be specified without a dns_domain" msgstr "'dns_name' kann nicht ohne 'dns_domain' angegeben werden." msgid "fixed_ip_address cannot be specified without a port_id" msgstr "'fixed_ip_address' kann nicht ohne 'port_id' angegeben werden" #, python-format msgid "has device owner %s" msgstr "hat Geräteeigentümer %s" msgid "in use" msgstr "im Gebrauch" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "IP-Befehl fehlgeschlagen auf Einheit %(dev_name)s: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "IP-Link-Fähigkeit %(capability)s wird nicht unterstützt" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "IP-Link-Befehl wird nicht unterstützt: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version muss angegeben werden, wenn cidr und subnetpool_id nicht " "angegeben sind" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode ist nicht gültig, wenn ip_version 4 ist" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode ist nicht gültig, wenn ip_version 4 ist" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode kann nicht auf '%(ra_mode)s' gesetzt sein, wenn " "ipv6_address_mode auf '%(addr_mode)s' gesetzt ist. Sind beide Attribute " "gesetzt, müssen sie denselben Wert aufweisen" msgid "mac address update" msgstr "MAC-Adressaktualisierung" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Es müssen exakt 2 Argumente angegeben werden: cidr und MAC." msgid "network_type required" msgstr "network_type erforderlich" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type-Wert '%s' wird nicht unterstützt" msgid "new subnet" msgstr "Neues Teilnetz" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' unbekannt für einfaches Anbieternetzwerk" msgid "physical_network required for flat provider network" msgstr "physical_network erforderlich für einfaches Anbieternetzwerk" #, python-format msgid "provider:physical_network specified for %s network" msgstr "'provider:physical_network' für %s-Netz angegeben" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval muss >= 0 sein, falls angegeben." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "" "'segmentation_id' außerhalb des gültigen Bereichs (%(min)s bis %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id erfordert physical_network für VLAN-Provider-Netz" msgid "shared attribute switching to synthetic" msgstr "Gemeinsam genutztes Attribut wird in synthetisches Attribut geändert." #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Teilnetzpool %(subnetpool_id)s kann nach Zuordnung eines gemeinsam genutzten " "Adressbereichs %(address_scope_id)s nicht aktualisiert werden" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Es können nicht subnetpool_id und use_default_subnetpool gleichzeitig " "festgelegt werden." msgid "the nexthop is not connected with router" msgstr "Der nächste Hop ist nicht mit dem Router verbunden" msgid "the nexthop is used by router" msgstr "Der nächste Hop wird vom Router verwendet" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/es/0000755000175000017500000000000000000000000020503 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/es/LC_MESSAGES/0000755000175000017500000000000000000000000022270 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/es/LC_MESSAGES/neutron.po0000644000175000017500000027553700000000000024345 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Victoria Martínez de la Cruz , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:53+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Mandato: %(cmd)s\n" "Código de salida: %(code)s\n" "Stdin: %(stdin)s\n" "Salida estándar: %(stdout)s\n" "Error estándar: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "El archivo HEAD de %(branch)s no coincide con el head de la línea de tiempo " "de migración, se esperaba: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s es un valor no válido para sort_dirs, los valores válidos " "son '%(asc)s' y '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s prohibido para red de proveedor %(tunnel)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' no coincide con la versión de IP '%(ip_version)s'" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s no puede invocarse en la modalidad fuera de línea" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s es un atributo no válido para sort_keys" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s debe implementar get_port_from_device o get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s prohibido para la red de proveedor VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s prohibido para la red de proveedor simple" #, python-format msgid "%s prohibited for local provider network" msgstr "%s prohibido para la red de proveedor local" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' no es un tipo de objeto RBAC válido" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' no está admitido para filtrar" #, python-format msgid "'module' object has no attribute '%s'" msgstr "El objeto 'module' no tiene ningún atributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' es menor que 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "0 no está permitido como longitud del prefijo de CIDR" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Debe especificarse un cidr en ausencia de una agrupación de subred" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Valor decimal como número de empresa privada registrada del proveedor según " "lo exigido en RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Ya existe una red externa predeterminada: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Ya se ha definido una agrupación de subredes predeterminada para esta " "familia de IP. Solo puede haber un valor predeterminado por cada familia de " "IP" msgid "A metering driver must be specified" msgstr "Se debe especificar un controlador de medición" msgid "Access to this resource was denied." msgstr "Se ha denegado el acceso a este recurso." msgid "Action to be executed when a child process dies" msgstr "Acción para ejecutar cuando termina un proceso secundario" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Añadir comentarios a reglas de iptables. Establézcalo en False para no " "permitir la adición, en las reglas de iptables generadas, de comentarios " "para describir el propósito de cada regla. El sistema debe admitir el módulo " "de comentarios de iptables para que se puedan añadir comentarios." msgid "Address not present on interface" msgstr "La dirección no está presente en la interfaz" msgid "Adds test attributes to core resources." msgstr "Añade atributos de prueba a recursos de núcleo." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "El agente %(id)s no es un agente L3 válido o se ha inhabilitado" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "El agente se inicia con admin_state_up=False cuando enable_new_agents=False. " "En ese caso, los recursos del usuario no se planificarán automáticamente en " "el agente hasque que admin cambie admin_state_up a True." #, python-format msgid "Agent updated: %(payload)s" msgstr "El agente se ha actualizado: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Permita la planificación automática de redes para el agente DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Permitir auto programación de enrutadores al agente L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Permitir soporte de solapamiento de IP en Neutron. Atención: el siguiente " "parámetro se DEBE definir a False si se utiliza Neutron conjuntamente con " "los grupos de seguridad de Nova." msgid "Allow running metadata proxy." msgstr "Permitir ejecutar el proxy de metadatos." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Notificación de la operación de permitir el envío de recurso al agente DHCP" msgid "Allow the creation of PTR records" msgstr "Permitir la creación de registros PTR" msgid "Allow the usage of the bulk API" msgstr "Permitir el uso de la API masiva" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permitir ejecutar solicitudes SSL (https) no seguras en los metadatos de Nova" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permite atender solicitudes de metadatos procedentes de una red de acceso a " "metadatos dedicada cuyo CIDR es 169.254.169.254/16 (o un prefijo más largo) " "y está conectada a un direccionador Neutron desde el cual las VM envían la " "solicitud metadata:1. En este caso, no se inyectará la opción DHCP 121 en " "las VM, porqué podrán alcanzar 169.254.169.254 mediante un direccionador. " "Esta opción requiere enable_isolated_metadata = True." msgid "An RBAC policy already exists with those values." msgstr "Ya existe una política RBAC con esos valores." msgid "An identifier must be specified when updating a subnet" msgstr "Se debe especificar un identificador al actualizar una subred" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Lista de puntos de entrada del controlador de extensión para cargar desde el " "espacio de nombres neutron.ml2.extension_drivers. Por ejemplo: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Una lista ordenada de puntos de entrada de controlador de mecanismo de red a " "cargar desde el espacio de nombres neutron.ml2.mechanism_drivers." msgid "An unknown error has occurred. Please try your request again." msgstr "Se ha producido un error desconocido. Intente la solicitud otra vez." msgid "Async process didn't respawn" msgstr "El proceso de asignación no se ha vuelto a generar" msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL de autorización para establecer conexión con el designado en el contexto " "de administración" msgid "Automatically remove networks from offline DHCP agents." msgstr "Eliminar automáticamente las redes de los agentes DHCP fuera de línea." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Volver a planificar automáticamente los direccionadores de los agentes L3 " "fuera de línea a los agentes L3 en línea." msgid "Availability zone of this node" msgstr "Zona de disponibilidad de este nodo" msgid "Available commands" msgstr "Mandatos disponibles" #, python-format msgid "Base MAC: %s" msgstr "MAC base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Directorio de registro base para registro dnsmasq. El registro contiene " "información de registro DHCP y DNS y es útil para problemas de depuración " "con DHCP o DNS. Si esta sección es nula, inhabilite el registro dnsmasq." msgid "Body contains invalid data" msgstr "El cuerpo contiene datos no válidos" msgid "Bulk operation not supported" msgstr "No se soporta operación masiva" msgid "CIDR to monitor" msgstr "CIDR a supervisar" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "No se ha encontrado la devolución de llamada para %(resource_type)s" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "La devolución de llamada para %(resource_type)s ha devuelto el tipo de " "recurso equivocado" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "No se puede añadir una IP flotante al puerto %s que no tiene direcciones " "IPv4 fijas" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "No se pueden añadir varias devoluciones de llamada para %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "No se puede asignar una subred IPv%(req_ver)s de la agrupación de subred IPv" "%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "No se puede asignar la subred solicitada a partir del conjunto disponible de " "prefijos" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "No se puede inhabilitar enable_dhcp con atributos ipv6 establecidos" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "No se puede manejar la subred de tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "No puede tener varias subredes IPv4 en el puerto del direccionador" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "No puede tener varios puertos de direccionador con el mismo ID de red si " "amboscontienen subredes IPv6. El puerto existente %(p)s tiene subredes IPv6 " "y un id de red %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "No se puede alojar el direccionador distribuido %(router_id)s en el agente " "L3 heredado %(agent_id)s." msgid "Cannot specify both subnet-id and port-id" msgstr "No se puede especificar el ID de subred y el ID de puerto" msgid "Cannot understand JSON" msgstr "No se puede entender JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "No se puede actualizar el atributo de sólo lectura %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Archivo de clave pública de entidad emisora de certificados (cert CA) para " "ssl" msgid "Check ebtables installation" msgstr "Compruebe la instalación de ebtables" msgid "Check for ARP header match support" msgstr "Comprobar el soporte de coincidencia de cabeceras ARP" msgid "Check for ARP responder support" msgstr "Comprobar el soporte de encuestado de ARP" msgid "Check for ICMPv6 header match support" msgstr "Comprobar el soporte de coincidencia de cabeceras ICMPv6" msgid "Check for OVS Geneve support" msgstr "Comprobar el soporte de OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Comprobar el soporte vxlan OVS" msgid "Check for VF management support" msgstr "Comprobar el soporte de gestión VF" msgid "Check for iproute2 vxlan support" msgstr "Comprobar el soporte vxlan iproute2" msgid "Check for nova notification support" msgstr "Comprobar el soporte de notificación nova" msgid "Check for patch port support" msgstr "Comprobar el soporte de puerto de parche" msgid "Check ip6tables installation" msgstr "Compruebe la instalación de ip6tables" msgid "Check ipset installation" msgstr "Compruebe la instalación de ipset" msgid "Check keepalived IPv6 support" msgstr "Compruebe el soporte keepalived IPv6" msgid "Check minimal dibbler version" msgstr "Compruebe la versión mínima de dibbler" msgid "Check minimal dnsmasq version" msgstr "Comprobar la versión mínima de dnsmasq" msgid "Check netns permission settings" msgstr "Comprobar los valores de permiso netns" msgid "Check ovs conntrack support" msgstr "Compruebe el soporte para ovs conntrack" msgid "Check ovsdb native interface support" msgstr "Comprobar el soporte de interfaz nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "El Cidr %(subnet_cidr)s de la subred %(subnet_id)s se solapa con el cidr " "%(cidr)s de la subred %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Limpiar solo los recursos de un tipo de agente específico." msgid "Client certificate for nova metadata api server." msgstr "Certificado de cliente para el servidor de la API de metadatos de Nova" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Lista separada por comas de tuplas : que " "correlaciona network_device con la lista específica de nodo del agente de " "funciones virtuales que no deben utilizarse para las redes virtuales. " "vfs_to_exclude es una lista separada por punto y coma de funciones virtuales " "a excluir de network_device. El dispositivo de red (network_device) de la " "correlación debe aparecer en la lista physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Lista separada por comas de tuplas de : " "que correlaciona nombres de red física con las interfaces de dispositivo de " "red física específica de nodo del agente de la función física SR-IOV que se " "va a utilizar para las redes VLAN. Todas las redes físicas listadas en " "network_vlan_ranges en el servidor deben tener correlaciones con las " "interfaces adecuadas en cada agente." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Lista separada por comas de tuplas : " "para correlacionar nombres de red física con las interfaces físicas " "específicas de nodo del agente a utilizar para redes simples y VLAN. Todas " "las redes físicas que se enumeran en network_vlan_ranges en el servidor " "deberían tener correlaciones a las interfaces adecuadas de cada agente." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "La lista separada por comas de conjuntos de variables : " "enumera los rangos de Los ID de túnel GRE que están disponibles para la " "asignación de red de arrendatario" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por comas de conjuntos de variables : que " "enumeran los rangos de ID de Geneve VNI disponibles para la asignación de " "red de arrendatario" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por comas de conjuntos de variables : que " "enumeran los rangos de ID de VXLAN VNI que están disponibles para la " "asignación de red de arrendatario" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Lista separada por comas de los servidores DNS que se utilizarán como " "reenviadores." msgid "Command to execute" msgstr "Mandato a ejecutar" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Archivo de configuración para controlador de interfaz (También puede " "utilizar l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valor ethertype %(ethertype)s en conflicto para CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controla si la API de grupo de seguridad neutron está habilitada en el " "servidor. Debe ser false cuando no hay grupos de seguridad o se utiliza la " "API de grupo de seguridad nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "No se puede enlazar a %(host)s:%(port)s después de intentar por %(time)d " "segundos" msgid "Could not deserialize data" msgstr "No se han podido deserializar los datos" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Duración de concesión de DHCP (en segundos). Utilice -1 para indicar a " "dnsmasq que utilice tiempos de concesión infinitos." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Los despliegues de DVR para las subcapas VXLAN/GRE/Geneve requieren que L2-" "pop esté habilitado, el lado del agente y del servidor." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Motor de base de datos para el cual se generará el script cuando se utilice " "la migración fuera de línea." msgid "Default external networks must be shared to everyone." msgstr "" "Las redes externas predeterminadas se deben compartir con todo el mundo." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Tipo de red predeterminado para redes externas si se especifican atributos " "sin proveedor . De manera predeterminada, es Ninguno, que significa que si " "no se especifican atributos de proveedor al crear redes externas, tendrán el " "mismo tipo que las redes de arrendatario. Los valores permitidos de la " "opción de configuración external_network_type dependen de los valores de " "tipo de red configurados en la opción de configuración de type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Número predeterminado de entradas RBAC permitidas por arrendatario. Un valor " "negativo significa ilimitados." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Número predeterminado de recursos permitidos por arrendatario. Un valor " "negativo significa ilimitados." msgid "Default security group" msgstr "Grupo de seguridad predeterminado" msgid "Default security group already exists." msgstr "El grupo de seguridad predeterminado ya existe." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valor predeteminado de las sugerencias de zonas de disponibilidad. Los " "planificadores conocedores de zonas de disponibilidad utilizan esto cuando " "availability_zone_hints de los recursos está vacío. Se pueden especificar " "múltiples zonas de disponibilidad en una cadena separada por comas. Este " "valor puede estar vacío. En este caso, aunque availability_zone_hints de un " "recurso esté vacío, se tiene en cuenta la zona de disponibilidad para la " "alta disponibilidad al planificar el recurso." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Defina el valor predeterminado de enable_snat si no se proporciona en " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Define proveedores para servicios avanzados con el formato: :" ":[:predeterminados]" msgid "Delete the namespace by removing all devices." msgstr "Suprimir el espacio de nombres eliminando todos los dispositivos. " #, python-format msgid "Deleting port %s" msgstr "Suprimiendo el puerto %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Error de despliegue: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Destruir IPsets aunque haya una referencia iptables." msgid "Destroy all IPsets." msgstr "Destruir todos los IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "" "El dispositivo %(dev_name)s en la correlación: %(mapping)s no es exclusivo" msgid "Device not found" msgstr "No se ha encontrado el dispositivo" msgid "Domain to use for building the hostnames" msgstr "Dominio a utilizar par crear los nombres de host" msgid "Downgrade no longer supported" msgstr "La degradación ya no está soportada" #, python-format msgid "Driver %s is not unique across providers" msgstr "El controlador %s no es único entre los proveedores" msgid "Driver for external DNS integration." msgstr "Controlador para la integración externa de DNS." msgid "Driver for security groups firewall in the L2 agent" msgstr "Controlador para el cortafuegos de grupos de seguridad en el agente L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "" "Controlador que utilizar para la planificación de la red para el agente DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Controlador que utilizar para la planificación del direccionador para un " "agente L3 predeterminado" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Controlador utilizado para la delegación de prefijos ipv6. Debe ser un punto " "de entrada definido en el espacio de nombres neutron.agent.linux.pd_drivers. " "Consulte setup.cfg para ver los puntos de entrada incluidos con el origen de " "neutron." #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Se ha creado un L3HARouterAgentPortBinding duplicado para en direccionador " "%(router)s. No se puede actualizar la base de datos. Elimine todos los " "duplicados antes de actualizar la base de datos." msgid "Duplicate Security Group Rule in POST." msgstr "Regla de grupo de seguridad duplicada en POST." msgid "Duplicate address detected" msgstr "Se ha detectado una dirección duplicada" #, python-format msgid "ERROR: %s" msgstr "ERROR: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERROR: no se ha podido encontrar el archivo de configuración por medio de " "las rutas de búsqueda predeterminada (~/.neutron/, ~/, /etc/neutron/, /etc/) " "¡y la opción '--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "Debe pasarse un parámetro network_id o router_id al método _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "Se debe especificar el ID de subred o el ID de puerto" msgid "Enable HA mode for virtual routers." msgstr "Habilitar modo HA para direccionadores virtuales." msgid "Enable SSL on the API server" msgstr "Habilitar SSL en el servidor API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Habilitar VXLAN en el agente. Se puede habilitar cuando el agente es " "gestionado por ml2 plugin usando controlador de mecanismo linuxbridge" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Habilite servicios en un agente con admin_state_up False. Si esta opción es " "False, cuando el valor admin_state_up de un agente se convierte en False, " "los servicios en élse inhabilitarán. Los agentes con admin_state_up False no " "se seleccionan para laplanificación automática independientemente de esta " "opción. No obstante, la planificación manual paraestos agentes está " "disponible si esta opción es True." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Habilita la delegación de prefijo IPv6 para la asignación automática de CIDR " "de subred. Establézcalo en True para habilitar la delegación de prefijo IPv6 " "para la asignación de subred en un entorno que admita PD (Prefix " "Delegation). A los usuarios que realicen solicitudes de creación de subred " "para subredes IPv6 sin proporcionar un CIDR o un ID de agrupación de subred " "se les dará un CIDR mediante el mecanismo de delegación de prefijos. Tenga " "en cuenta que al habilitar PD se sustituirá el comportamiento de la " "agrupación de subred IPv6 predeterminada." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permite al servicio dnsmasq proporcionar resolución de nombres para " "instancias mediante resolvedores DNS en el host donde se ejecuta el agente " "DHCP. Elimina la opción '--no-resolv' de los argumentos del proceso dnsmasq. " "Si se añaden resolvedores DNS personalizados a la opción " "'dnsmasq_dns_servers' se deshabilita esta característica." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Error %(reason)s al intentar realizar la operación." #, python-format msgid "Error parsing dns address %s" msgstr "Error al analizar la dirección dns %s" #, python-format msgid "Error while reading %s" msgstr "Error al leer %s " #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Se ha superado el límite de %s segundos esperando que la dirección abandone " "el estado de tentativa." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Los prefijos existentes deben ser una subred de los prefijos nuevos" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Código de salida: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; " "Stderr: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Error en la extenxión %(driver)s." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "No se ha encontrado el controlador de extensión %(driver)s necesario para el " "plugin de servicio %(service_plugin)s." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extensión para usar unto con el controlador de mecanismo l2population del " "plug-in ml2. Este habilita el plugin para completar la tabla de reenvío " "VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "La ampliación con el alias %s no existe" msgid "Extensions list to use" msgstr "Lista de extensiones que se va a utilizar" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "El IP externo %s es el mismo que el IP de pasarela" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "No se ha podido volver a programar el direccionador %(router_id)s: no se ha " "encontrado ningún agente l3 elegible." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Se ha encontrado un error la planificación del direccionador %(router_id)s " "para el agente L3 %(agent_id)s." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "No se ha podido Se ha encontrado un error al crear un puerto en la red " "%(network_id)s, porque fixed_ips incluía una subred no válida %(subnet_id)s" #, python-format msgid "Failed to locate source for %s." msgstr "No se ha podido buscar el origen de %s." msgid "Failed to remove supplemental groups" msgstr "No se han podido eliminar los grupos suplementarios" #, python-format msgid "Failed to set gid %s" msgstr "No se ha podido establecer el gid %s" #, python-format msgid "Failed to set uid %s" msgstr "No se ha podido establecer el uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Ha fallado al configurar %(type)s el puerto de túnel a %(ip)s" msgid "Failure applying iptables rules" msgstr "Error al aplicar las reglas de iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Error al esperar que la dirección %(address)s pase a estar lista: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Las redes de proveedor simples están deshabilitadas" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Para los protocolos TCP/UDP, port_range_min debe ser <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "Forzar llamadas ip_lib para usar el ayudante raíz" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "La versión de la IP de pasarela no es consistente con la versión de la " "agrupación de asignación" msgid "Gateway is not valid on subnet" msgstr "La pasarela no es válida en la subred" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Grupo (gid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización (si está vacío: grupo efectivo del agente)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Grupo (gid o nombre) que ejecuta este proceso después de su inicialización" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Nombre de host a utilizar por los agentes, servicios y el servidor de " "Neutron que se ejecutan en esta máquina. Todos los agentes y servicios que " "se ejecutan en esta máquina deben utilizar el mismo valor de host." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Se proporciona el código ICMP (port-range-max) %(value)s, pero falta el tipo " "ICMP (port-range-min)." msgid "ID of network" msgstr "ID de red" msgid "ID of network to probe" msgstr "ID de red a analizar" msgid "ID of probe port to delete" msgstr "ID de puerto de analizador a suprimir" msgid "ID of probe port to execute command" msgstr "ID de puerto de analizador para ejecutar mandato" msgid "ID of the router" msgstr "ID del direccionador" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "La dirección IP %(ip)s ya está asignada en la subred %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "La dirección IP %(ip)s no pertenece a la subred %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "Ha fallado la asignación de IP Inténtelo de nuevo más tarde." msgid "IP allocation requires subnet_id or ip_address" msgstr "La asignación de IP necesita subnet_id o ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply no ha podido aplicar el siguiente conjunto de reglas " "de iptables:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalidad de dirección IPv6 debe ser SLAAC o sin estado para la " "delegación de prefijos." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalidad de IPv6 RA debe ser SLAAC o sin estado para la delegación de " "prefijos." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "La dirección IPv6 %(ip)s no se puede asignar directamente a un puerto en la " "subred %(subnet_id)s ya que la subred está configurada para direcciones " "automáticas" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "La subred IPv6 %s configurada para recibir RA de un direccionador externo no " "se puede añadir al direccionador de Neutron." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Si es True, permite a los plug-in que la soportan crear redes VLAN " "transparentes." msgid "Illegal IP version number" msgstr "Número de versión IP no permitido" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "En algunos casos, el direccionador Neutron no está presente para " "proporcionar el IP de los metadatos, pero se puede utilizar el servidor " "DHCP para proporcionar esta información. Si se define este valor, se forzará " "al servidor DHCP a añadir rutas de host específicas a la solicitud DHCP. Si " "no se define esta opción, el servicio de metadatos estará activado para " "todas las redes." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica que este agente L3 también debería manejar direccionadores que no " "tengan una pasarela de red externa configurada. Esta opción solo puede ser " "True para un único agente en un despliegue Neutron, y puede ser False para " "todos los agentes si todos los direccionadores tienen que tener una pasarela " "de red externa." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "La intancia de clase %(module)s.%(class)s debe contener el atributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Espacio de prefijo insuficiente para asignar el tamaño de subred %s" msgid "Insufficient rights for removing default security group." msgstr "" "No hay derechos suficientes para eliminar el grupo de seguridad " "predeterminado." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Puente de integración a utilizar. No cambie este parámetro a menos que tenga " "un buen motivo para hacerlo. Es el nombre del puente de integración OVS. Hay " "un por cada hipervisor. El puente de integración actúa como 'bahía de " "parche' virtual. Todas las VIF de la VM se conectan a ese puente y después " "se 'parchean' según su conectividad de red." msgid "Interface to monitor" msgstr "Interfaz a supervisar" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalo entre comprobaciones de vida de procesos secundarios (segundos), " "utilice 0 para inhabilitarlo" msgid "Interval between two metering measures" msgstr "Intervalo entre dos medidas de medición" msgid "Interval between two metering reports" msgstr "Intervalo entre dos informes de medición" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo no válido %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Acción no válida '%(action)s' para el tipo de objeto '%(object_type)s'. " "Acciones válidas : %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo de autenticación no válida: %(auth_type)s, los tipos válidos son: " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ethertype no válido %(ethertype)s para el protocolo %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Formato no válido: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Estado de instancia no válido: %(state)s, los estados válidos son: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Correlación no válida: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Ranura pci no válida %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato de proveedor no válido. La última parte debe ser 'predeterminado' o " "vacío: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo de recurso %(resource_type)s no válido" #, python-format msgid "Invalid route: %s" msgstr "Ruta no válida: %s" msgid "Invalid service provider format" msgstr "Formato de proveedor de servicio no válido" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valor no válido para ICMP %(field)s (%(attr)s) %(value)s. Debe ser 0 a 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valor no válido para el puerto %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de corte de iptables utilizada para marcar la entrada desde la red " "externa. Esta marca se enmascara con 0xffff de modo que sólo se utilizarán " "los 16 bits inferiores." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de corte de iptables utilizada para marcar solicitudes válidas de " "metadatos. Esta marca se enmascara con 0xffff de modo que sólo se utilizarán " "los 16 bits inferiores." msgid "Keepalived didn't respawn" msgstr "Keepalived no se ha vuelto a generar" msgid "Keepalived didn't spawn" msgstr "Keepalived no se ha generado" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "El valor de Kernel HZ %(value)s no es válido. Debe ser mayor que 0." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "El agente L3 no ha podido configurar NAT para las IP flotantes" msgid "L3 agent failure to setup floating IPs" msgstr "El agente L3 no ha podido configurar las IP flotantes" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Límite de número de alquileres para evitar denegación de servicio." msgid "List of :" msgstr "Lista de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Lista de :: o especificar " "nombres de physical_network utilizables para proveedor de VLAN y " "arrendatario redes, así como rangos de etiquetas VLAN en cada uno disponible " "para asignación para las redes de arrendatarios." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Lista de puntos de entrada del controlador de tipo de red a cargar desde el " "espacio de nombres neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Lista de nombres de physical_network con los cuales se pueden crear redes " "simples. Utillice el valor predeterminado '*' para permitir redes simples " "con nombres de physical_network arbitrarios. Utillice una lista vacía para " "deshabilitar las redes simples." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicación para socket de dominio UNIX de proxy de metadatos." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Ubicación de socket de dominio UNIX de proxy de metadatos" msgid "Location to store DHCP server config files." msgstr "" "Ubicación donde almacenar los archivos de configuración de servidor DHCP." msgid "Location to store IPv6 PD files." msgstr "Ubicación donde se almacenan los archivos PD de IPv6." msgid "Location to store IPv6 RA config files" msgstr "Ubicación para almacenar archivos de configuración de IPv6 RA" msgid "Location to store child pid files" msgstr "Ubicación para almacenar archivos pid hijos" msgid "Log agent heartbeats" msgstr "Registrar señales de supervisión de agente" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "MTU de la red física subyacente. Neutron utiliza este valor para calcular el " "MTU de todos los componentes de la red virtual. Para redes planas y VLAN, " "neutron utiliza este valor sin modificar. Para redes superpuestas tales como " "VXLAN, neutron automáticamente sustrae el protocolo de superposición por " "encima de este valor. El valor predeterminado es 1500, que es el valor " "estándar para Ethernet." msgid "MTU size of veth interfaces" msgstr "Tamaño de MTU de la interfaz de veth" msgid "Make the l2 agent run in DVR mode." msgstr "Hacer que el agente l2 se ejecute en modalidad DVR." msgid "Malformed request body" msgstr "Cuerpo de solicitud formado incorrectamente" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Parámetro MaxRtrAdvInterval para radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Número máximo de servidores de nombres DNS por subred" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Número máximo de agentes L3 a los que se replanificará un direccionador HA. " "Si está definido a 0, se replanificará el direccionador a cada uno de los " "agentes." msgid "Maximum number of allowed address pairs" msgstr "Número máximo de pares de direcciones permitidos" msgid "Maximum number of host routes per subnet" msgstr "Número máximo de rutas de host por subred" msgid "Maximum number of routes per router" msgstr "Número máximo de rutas por direccionador" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modalidad de socket de dominio UNIX de proxy de metadatos, 4 valores " "permitidos: 'deduce': deducir la modalidad de los valores " "metadata_proxy_user/group; 'user': establecer la modalidad de socket de " "proxy de metadatos en 0o644 y utilizarse cuando metadata_proxy_user es " "usuario efectivo de agente o raíz; 'group': establecer la modalidad de " "socket de proxy de metadatos en 0o664 y utilizarse cuando " "metadata_proxy_group es grupo efectivo de agente o raíz, 'all': establecer " "la modalidad de socket de proxy de metadatos en 0o666 y, de lo contrario, " "utilizarse." msgid "Metering driver" msgstr "Controlador de medición" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Parámetro MinRtrAdvInterval para radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "Minimizar sondeo supervisando ovsdb para cambios de interfaz." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Falta clave en correlación: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Grupo de multidifusión para VXLAN. Cuando esté configurada, permitirá enviar " "todo el tráfico de difusión a este grupo de multidifusión. Si se deja sin " "configurar, se deshabilitará el modo de multidifusión VXLAN." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Grupo de multidifusión para la interfaz VXLAN. Se puede especificar un rango " "de direcciones de grupo utilizando la notación CIDR. Especificar un rango " "permite que diferentes VNI utilicen diferentes direcciones de grupos, " "reduciendo o eliminando tráfico de difusión espurio a los puntos finales del " "tunel. Para reservar un grupo exclusivo para cada posible VNI (24-bit) , " "utilice un /8, como por ejemplo 239.0.0.0/8. Este ajuste debe ser el mismo " "en todos los agentes." #, python-format msgid "Multiple default providers for service %s" msgstr "Múltiples proveedores predeterminados para servicio %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Múltiples proveedores especificados para servicio %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "No se permiten varios Id de arrendatario en creación de regla de grupo de " "seguridad masiva" msgid "Must also specify protocol if port range is given." msgstr "" "Se debe especificar también el protocolo si se indica el rango de puertos." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Debe especificar una o más acciones en la adición o modificación de flujo" msgid "Name of Open vSwitch bridge to use" msgstr "Nombre de puente de Open vSwitch a utilizar" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nombre de región de nova a utilizar. Es útil si keystone gestiona más de una " "región." msgid "Namespace of the router" msgstr "Espacio de nombres del direccionador" msgid "Native pagination depend on native sorting" msgstr "La paginación nativa depende de la ordenación nativa" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Es necesario aplicar migraciones desde la bifurcación de contratación de " "%(project)s. Esto requerirá apagar todas las instancias de servidor de " "Neutron antes de proceder con la actualización." msgid "Negative delta (downgrade) not supported" msgstr "El delta negativo (degradación) no está soportado" msgid "Negative relative revision (downgrade) not supported" msgstr "La revisión relativa negativa (degradación) no está soportada" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "La red %s no contiene ninguna subred IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "La red %s no es una red externa válida" #, python-format msgid "Network %s is not an external network" msgstr "La red %s no es una red externa" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "No se ha encontrado la red de tamaño %(size)s, de rango de IP " "%(parent_range)s, excluyendo los rangos %(excluded_ranges)s." #, python-format msgid "Network type value '%s' not supported" msgstr "No hay soporte para el valor de tipo de red '%s'" msgid "Network type value needed by the ML2 plugin" msgstr "El plugin ML2 necesita el valor de tipo de red" msgid "Neutron core_plugin not configured!" msgstr "¡Neutron core_plugin no está configurado!" msgid "No default router:external network" msgstr "No hay ninguna red router:external predeterminada" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "" "No se ha encontrado ninguna agrupación de subredes predeterminada para IPv%s" msgid "No default subnetpools defined" msgstr "No se han definido agrupaciones de subredes predeterminadas" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "No hay más direcciones IP disponibles en la subred %(subnet_id)s." msgid "No offline migrations pending." msgstr "No hay migraciones fuera de línea pendientes." #, python-format msgid "No shared key in %s fields" msgstr "No hay ninguna clave compartida en los campos de %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "No está permitido asignar manualmente un direccionador a un agente en modo " "'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "No está permitido eliminar manualmente un direccionador de un agente en modo " "'dvr'." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Número de agentes DHCP programados para alojar una red de arrendatarios. Si " "este número es mayor que 1, el planificador asigna automáticamente varios " "agentes DHCP de una red de arrendatarios dada y proporciona alta " "disponibilidad para el servicio DHCP." msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Número de solicitudes de retraso para configurar el socket de servidor de " "metadatos con" msgid "Number of backlog requests to configure the socket with" msgstr "" "Número de solicitudes de registro de reserva para configurar el socket con" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Número de bits en una zona PTR ipv4 que se considerarán prefijo de red. " "Tiene que estar alineado con el límite de bytes. El valor mínimo es 8. El " "valor máximo es 24. En consecuencia, el rango de valores es 8, 16 y 24." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Número de bits en una zona PTR ipv6 que se considerarán prefijo de red. " "Tiene que estar alineado con el límite de nyble. El valor mínimo es 4. El " "valor máximo es 124. En consecuencia, el rango de valores es 4, 8, 12, " "16,..., 124." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Número de IP flotantes permitidas por arrendatario. Un valor negativo " "significa ilimitados." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Número de redes permitidas por arrendatario. Un valor negativo significa " "ilimitado." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Número de puertos permitidos por arrendatario. Un valor negativo significa " "ilimitado." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Número de direccionadores permitidos por arrendatario. Un valor negativo " "significa ilimitado." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Número de segundos entre en el envío de sucesos a nova si hay sucesos a " "enviar." msgid "Number of seconds to keep retrying to listen" msgstr "Número de segundos en seguir intentando escuchar" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de grupos de seguridad permitidos por arrendatario. Un valor negativo " "significa ilimitados." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de reglas de seguridad permitidas por arrendatario. Un valor negativo " "significa ilimitados." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Número de procesos de trabajador independientes para servidor de metadatos " "(por omisión es la mitad del número de CPU)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Número de subredes permitidas por arrendatario. Un valor negativo significa " "ilimitado." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Número de hebras a usar durante el proceso de sincronización. No debería " "exceder el tamaño de la agrupación de conexión configurado en el servidor." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Base de datos OVS a utilizar. 'system' es el valor predeterminado y " "corresponde a la vía de acceso a datos del kernel. Para habilitar la vía de " "acceso a datos del espacio de usuario, defina este valor a 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Directorio del socket de vhost-user de OVS" msgid "Only admin can view or configure quota" msgstr "Solo los administradores pueden ver o configurar cuotas" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Sólo está autorizado el administrador para acceder a cuotas para otro " "arrendatario" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Sólo los admininstradores pueden manipular políticas en objetos de los que " "no son propietarios" msgid "Only allowed to update rules for one security profile at a time" msgstr "Solo se permite actualizar reglas para un perfil de seguridad a la vez" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Solo se puede proporcionar remote_ip_prefix o remote_group_id." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operación no admitida en el dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Lista ordenada de network_types para asignar como redes de arrendatarios. El " "valor predeterminado 'local' es útil para pruebas en un solo recuadro, pero " "no proporciona ninguna conectividad entre hosts." msgid "Override the default dnsmasq settings with this file." msgstr "" "Alterar temporalmente los valores dnsmasq predeterminados con este archivo." msgid "Owner type of the device: network/compute" msgstr "Tipo de propietario del dispositivo: red/cálculo" msgid "POST requests are not supported on this resource." msgstr "Las solicitudes de POST no son admitidas en este recurso." #, python-format msgid "Package %s not installed" msgstr "El paquete %s no se ha instalado" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "El análisis de bridge_mappings ha fallado: %s." msgid "Password for connecting to designate in admin context" msgstr "" "Contraseña para establecer conexión con el designado en el contexto de " "administración" msgid "Path to PID file for this process" msgstr "Vía de acceso al archivo de PID para este proceso" msgid "Path to the router directory" msgstr "Vía de acceso al directorio del direccionador" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Puerto de parche de igual en puente de integración para puente de túnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Puerto de parche de igual en puente de túnel para puente de integración." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Las opciones de actualización de fase no aceptan la especificación de " "revisión" msgid "Ping timeout" msgstr "Tiempo de espera de ping" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "El puerto %(id)s no tiene una IP fija %(address)s" #, python-format msgid "Port %s does not exist" msgstr "El puerto %s no existe" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "El puerto %s tiene varias direcciones IPv4 fijas. Debe proporcionar una " "dirección IPv4 específica al asignar una IP flotante" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "La delegación de prefijos sólo se puede utilizar con subredes IPv6." msgid "Private key of client certificate." msgstr "Clave privada del certificado de cliente." #, python-format msgid "Probe %s deleted" msgstr "Se ha suprimido el analizador %s" #, python-format msgid "Probe created : %s " msgstr "Se ha creado analizador: %s " msgid "Process is already started" msgstr "El proceso ya se ha iniciado" msgid "Process is not running." msgstr "El proceso no se está ejecutando." msgid "Protocol to access nova metadata, http or https" msgstr "El protocolo para acceder a los metadatos de Nova, http o https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "El nombre de proveedor %(name)s está limitado a %(len)s caracteres" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "No se ha encontrado la política RBAC de tipo %(object_type)s con el ID %(id)s" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "No se puede eliminar la política RBAC en el objeto %(object_id)s porque " "otros objetos dependen de ella.\n" "Detlles: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Rango de segundos para retrasar aleatoriamente al iniciar la tarea periódica " "programador para reducir avalanchas. (Inhabilitar al establecer en 0)" msgid "Ranges must be in the same IP version" msgstr "Los rangos deben ser de la misma versión de IP." msgid "Ranges must be netaddr.IPRange" msgstr "Los rangos deben ser netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Los rangos no se pueden solapar." #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Las etiquetas de rama para el release (%s) están en desusuo. Cambie a las " "etiquetas expand@ y contract@." msgid "Remote metadata server experienced an internal server error." msgstr "" "El servidor de metadatos remoto ha experimentado un error de servidor " "interno. " msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "El repositorio no contiene archivos HEAD para bifurcaciones de contratación " "y ampliación." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Representando el tipo de recurso cuya carga está notificando el agente. " "Puede ser \"networks\", \"subnets\" o \"ports\". Cuando se especifica (el " "valor predeterminado es redes), el servidor extraerá la carga particular " "enviada como parte del objeto de configuración de agentes desde el estado de " "informe del agente, que es el número de recursos que se está consumiendo, en " "cada report_interval.dhcp_load_type que puede utilizarse junto con " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler. Cuando network_scheduler_driver es WeightScheduler, " "dhcp_load_type se puede configurar para representar la opción para el " "recurso que se está equilibrando. Ejemplo: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Ha fallado la solicitar: error interno de servidor al procesar la solicitud." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Restablecer tabla de flujo al iniciar. Si se establece en True, se producirá " "una interrupción breve del tráfico ." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "No se ha podido encontrar el recurso %(resource)s %(resource_id)s." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "" "No se ha encontrado el recurso %(resource_id)s de tipo %(resource_type)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "El recurso '%(resource_id)s' ya está asociado con el proveedor " "'%(provider)s' para el tipo de servicio '%(service_type)s'" msgid "Resource body required" msgstr "Se necesita cuerpo de recurso" msgid "Resource not found." msgstr "Recurso no encontrado." msgid "Resources required" msgstr "Recursos necesarios " msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Aplicación de ayudante raíz . Utilice 'neutron-rootwrap /etc/neutron/" "rootwrap.conf de sudo ' para utilizar el recurso de filtro raíz real. Cambie " "a 'sudo' para saltar el filtrado y ejecutar directamente el comando." msgid "Root permissions are required to drop privileges." msgstr "Se necesitan permisos de root para descartar privilegios." #, python-format msgid "Router already has a port on subnet %s" msgstr "El direccionador ya tiene un puerto en la subred %s" msgid "Router port must have at least one fixed IP" msgstr "El puerto del direccionador debe tener al menos una IP fija" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Ejecutando %(cmd)s (%(desc)s) para %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Ejecutando %(cmd)s para %(project)s ..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Segundos entre nodos que informan del estado al servidor; debe ser menor que " "agent_down_time, mejor si es la mitad o menos que agent_down_time." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Segundos para considerar que el agente está inactivo; debe ser como mínimo " "el doble de report_interval, para asegurarse de que el agente está inactivo " "definitivamente." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Grupo de seguridad %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regla de grupo de seguridad %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "El grupo de seguridad %(id)s no existe" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La regla de grupo de seguridad %(id)s no existe" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "La regla de grupo de seguridad ya existe. El id de regla es %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "No se admite la regla de grupo de seguridad para ethertype '%(ethertype)s'. " "Los valores permitidos son %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "El protocolo de la regla del grupo de seguridad %(protocol)s no es admitido. " "Solo se admiten valores de protocolo %(values)s y representaciones de " "enteros [de 0 a 255]." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Envíe notificación a nova cuando cambien los datos de puerto (fixed_ips/" "floatingip) para que nova pueda actualizar la memoria caché." msgid "Send notification to nova when port status changes" msgstr "Envíe notificación a nova cuando cambie el estado de puerto" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "El proveedor de servicio '%(provider)s' no se ha podido encontrar para el " "tipo de servicio %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Servicio que gestiona la delegación de prefijos DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "El tipo de servicio %(service_type)s no tiene un proveedor de servicio " "predeterminado" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Establecer el nuevo tiempo de espera en segundos para nuevas llamadas rpc " "después de que el agente reciba SIGTERM. Si el valor se establece en 0, no " "se modificará el tiempo de espera de rpc" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Establecer o anular el establecimiento del bit DF (don't fragment) en el " "paquete de IP saliente que lleva el túnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "No se puede dejar de compartir el ámbito de la dirección compartida" msgid "String prefix used to match IPset names." msgstr "Prefijo de serie utilizado para coincidir con nombres IPset." #, python-format msgid "Sub-project %s not installed." msgstr "El subproyecto %s no se ha instalado." msgid "Subnet for router interface must have a gateway IP" msgstr "" "La subred para la interfaz de direccionador debe tener una IP de pasarela" msgid "Subnet pool has existing allocations" msgstr "La agrupación de subred tiene asignaciones existentes" msgid "Subnet used for the l3 HA admin network." msgstr "Subred utilizada con la red de administradores HA l3." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Distintivo en todo el sistema para determinar el tipo de direccionador que " "pueden crear los arrendatarios. Sólo el administrador puede alterarlo " "temporalmente." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "" "Puerto TCP usado por el proxy de espacio de nombre de metadatos Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Puerto TCP utilizado por el servidor de metadatos de Nova." msgid "TTL for vxlan interface protocol packets." msgstr "TTL para paquetes de protocolo de interfaz vxlan." #, python-format msgid "Tag %(tag)s could not be found." msgstr "No se ha podido encontrar la etiqueta %(tag)s." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "El arrendatario %(tenant_id)s no está autorizado a crear %(resource)s en " "esta red" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ID de inquilino para establecer conexión con el designado en el contexto de " "administración" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nombre de inquilino para establecer conexión con el designado en el contexto " "de administración" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "El servidor DHCP puede ajudar a proporcionar soporte para metadatos en " "redes aisladas. Si se define este valor a True, provocará que el servidor " "DHCP añada rutas específicas de host a la solicitud DHCP. El servicio de " "metadatos sólo se activará cuando la subred no contenga ningún puerto de " "direccionador. La instancia de invitado debe estar configurada para " "solicitar rutas de host vía DHCP (Opción 121). Esta opción no tiene ningún " "efecto cuando force_metadata está definido en True." msgid "The UDP port to use for VXLAN tunnels." msgstr "El puerto UDP para a usar para los túneles VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "No se ha podido satisfacer la solicitud de asignación de dirección porque: " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "Intervalo de anuncio en segundos" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Dirección MAC base que Neutron utiliza para las VIF. Los 3 primeros octetos " "permanecerán sin cambios. Si el cuarto octeto no es 00, también se " "utilizará. Los otros se generan aleatoriamente. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Dirección mac base que Neutron utiliza para las instancias DVR exclusivas. " "Los 3 primeros octetos permanecerán sin cambios. Si el cuarto octeto no es " "00, también se utilizará. Los otros se generan aleatoriamente. " "'dvr_base_mac' *debe* ser diferente de 'base_mac' para evitar que se mezclen " "con MAC asignado para los puertos de arrendatarios. Un ejemplo de 4 octetos " "sería dvr_base_mac = fa:16:3f:4f:00:00. El valor predeterminado es 3 octetos." msgid "The core plugin Neutron will use" msgstr "El core plugin Neutron usará" msgid "The driver used to manage the DHCP server." msgstr "El controlador utilizado para gestionar el servidor DHCP." msgid "The driver used to manage the virtual interface." msgstr "El controlador utilizado para gestionar la interfaz virtual." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "La dirección de correo electrónico a utilizar al crear zonas PTR. Si no se " "especifica la dirección de correo electrónico será admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "El número máximo de elementos devueltos en una única respuesta, el valor " "'infinite' o un entero negativo significa que no hay límite" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Tipo de red que se debe utilizar al crear la red de alta disponibilidad para " "un direccionador HA. De manera predeterminada o si está vacío, se utilizan " "el primer 'tenant_network_types'. Esto es útil cuando el tráfico VRRP debe " "utilizar una red específica que no sea el valor predeterminado." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "El número de segundos que el agente esperará entre sondeos de cambios de " "dispositivo local." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Número de segundos a esperar antes de volver a generar el supervisor ovsdb " "después de perder la comunicación con él." msgid "The number of sort_keys and sort_dirs must be same" msgstr "El número de sort_keys y sort_dirs debe ser igual" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "La vía de acceso para ampliaciones de API. Observe que puede ser una lista " "de vías de acceso separadas por punto y coma. Por ejemplo: " "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. Además, " "se añade __path__ of neutron.extensions, de forma que si sus extensiones " "están ahí no es necesario especificarlas aquí." msgid "The physical network name with which the HA network can be created." msgstr "Nombre de la red física con la que se puede crear la red HA." #, python-format msgid "The port '%s' was deleted" msgstr "El puerto '%s' se ha suprimido" msgid "The port to bind to" msgstr "El puerto al que enlazar" #, python-format msgid "The requested content type %s is invalid." msgstr "El tipo de contenido solicitado %s no es válido." msgid "The resource could not be found." msgstr "El recurso no se ha podido encontrar." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "El direccionador %(router_id)s ya está alojado por el agente L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "El servidor tiene un error o no puede ejecutar la operación solicitada." msgid "The service plugins Neutron will use" msgstr "Los plug-ins de servicio que utilizará Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "No se ha podido satisfacer la solicitud de subred porque: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "Subproyecto con el que ejecutar el mandato. Puede ser uno de: '%s'." msgid "The type of authentication to use" msgstr "El tipo de autenticación a utilizar" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Hay direccionadores conectados a esta red que dependen de esta política para " "su acceso." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "Verdadero para suprimir todos los puertos en todos los puentes OpenvSwitch. " "Falso para suprimir puertos creados por Neutron por los puentes de red " "externos y de integración." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "El plugin ML2 necesita el valor de IP de túnel" msgid "Tunnel bridge to use." msgstr "Puente de túnel para utilizar." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Tipo de punto final de nova a utilizar. Este punto final se consultará en el " "catálogo de keystone y debe ser uno de los siguientes: público, interno o " "administrativo." msgid "URL for connecting to designate" msgstr "URL para establecer conexión con el designado" msgid "URL to database" msgstr "URL en base de datos" #, python-format msgid "Unable to access %s" msgstr "No se puede acceder a %s " #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "No se puede calcular la dirección %(address_type)s debido a: %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "No se puede convertir el valor en %s " msgid "Unable to create the SNAT Interface Port" msgstr "No se puede crear el puerto de interfaz SNAT" #, python-format msgid "Unable to determine mac address for %s" msgstr "No se ha podido determinar la dirección mac para %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "No se puede encontrar '%s' en el cuerpo de solicitud " #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "No se ha encontrado la dirección IP %(ip_address)s en la subred %(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "No se ha podido encontrar el nombre del recurso en %s" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "No se ha podido identificar un campo destino desde: %s. La coincidencia debe " "tener la forma %%()s" msgid "Unable to provide external connectivity" msgstr "No se puede proporcionar conectividad externa" msgid "Unable to provide tenant private network" msgstr "No se puede proporcionar una red privada de inquilino" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "No se ha podido verificar la coincidencia:%(match)s como recurso primario: " "%(res)s no se ha encontrado" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Etiqueta inesperada para el script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Número inesperado de puntos de bifurcación alémbica: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Código de respuesta inesperado: %s" #, python-format msgid "Unexpected response: %s" msgstr "Respuesta inesperada : %s " #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "El nombre de unidad '%(unit)s' no es válido." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo de dirección desconocido %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Atributo desconocido '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Cadena desconocida: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo de red desconocido %(network_type)s." msgid "Unmapped error" msgstr "Error no correlacionado" msgid "Unrecognized action" msgstr "Acción no reconocida" msgid "Unrecognized field" msgstr "Campo no reconocido" msgid "Unsupported Content-Type" msgstr "Tipo de contenido no soportado" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo de red no soportado %(net_type)s." msgid "Unsupported request type" msgstr "Tipo de solicitud no soportado" msgid "Updating default security group not allowed." msgstr "Actualización del grupo de seguridad predeterminado no permitida." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Use el controlador del mecanismo ML2 l2population para aprender el uso " "remoto MAC e IPs y mejorar la escalabilidad del túnel." msgid "Use broadcast in DHCP replies." msgstr "Utilizar la difusión en respuestas DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Utilice --delta o la revisión relativa, pero no ambas" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilice ipset para agilizar los grupos de seguridad basados en iptables. " "Para habilitar el soporte para ipset es necesario que ipset esté instalado " "en el nodo agente L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilice el ayudante raíz para listar los espacios de nombres en un sistema. " "Esto puede no ser necesario dependiendo de la configuración de seguridad. Si " "el ayudante raíz no es necesario, establézcalo en False para mejorar el " "rendimiento." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilice veths en lugar de puertos de parche para interconectar el puente de " "integración con las redes físicas. Se admite kernel sin soporte de puertos " "de parche Open vSwitch siempre y cuando esté definido a True." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Usuario (uid o nombre) que ejecuta el proxy de metadatos después de su " "inicialización (si está vacío: usuario efectivo del agente)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Usuario (uid o nombre) que ejecuta este proceso después de su inicialización" msgid "Username for connecting to designate in admin context" msgstr "" "Nombre de usuario para establecer conexión con el designado en el contexto " "de administración" msgid "VRRP authentication password" msgstr "Contraseña de autenticación de VRRP" msgid "VRRP authentication type" msgstr "Tipo de autenticación VRRP" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valor del tickrate del kernel host para calcular el valor mínimo de ráfaga " "en las reglas de limitación de ancho de banda para un puerto con QoS, " "Consulte en el archivo de configuración el valor de HZ y consulte el manual " "de tc-tbf para obtener más información." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valor de latencia (ms) para calcular el tamaño de la cola para un puerto con " "QoS. Consulte el manual de tc-tbf para obtener más información." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Cuando se envían solicitudes de metadatos por proxy, Neutron firma la " "cabecera ID de instancia con un secreto compartido para evitar la " "suplantación de identidad. Puede seleccionar cualquier cadena como secreto, " "pero debe coincidir con la que se haya utilizado en la configuration del " "servidor de metadatos de Nova. NOTA: Nova utiliza la misma clave de " "configuración, pero en la sección [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Dónde almacenar archivos de estado Neutron. Este directorio se debe poder " "escribir por el agente." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Con IPv6, la red utilizada para la pasarela externa no debetener una subred " "asociada, ya que puede utilizarse la dirección de enlace local(LLA) asignada " "automáticamente. No obstante, se necesita una dirección de pasarela IPv6 " "parautilizarla como siguiente salto para la ruta predeterminada. Si no se " "configura aquí ningunadirección de pasarela IPv6, (y sólo entonces) se " "configurará un direccionador de Neutronpara obtener su ruta predeterminada " "de los avisos de direccionador (RA) deldireccionador en sentido ascendente; " "en este caso, el direccionador en sentido ascendente también " "debeconfigurarse para enviar estos RA. ipv6_gateway, cuando se configurada, " "debeser la LLA de interfaz en el direccionador en sentido ascendente. Si " "desea un siguiente salto utilizando una dirección exclusivo global (GUA), " "debe hacerse utilizando una subred asignada a la red, no mediante este " "parámetro." msgid "You must implement __call__" msgstr "Debe implementar __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Debe proporcionar un archivo config para puente, ya sea --config-file o " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Debe proporcionar una revisión o delta relativa" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "se debe especificar una agrupación de subredes si no hay un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "no se puede invocar a add_ha_port dentro de una transacción." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools sólo se permite para solicitudes de subred específicas." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools no están en la subred" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utiliza la versión de IP incorrecta" msgid "already a synthetic attribute" msgstr "ya es un atributo sintético" msgid "binding:profile value too large" msgstr "Valor de binding:profile demasiado grande" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "no se puede llevar a cabo %(event)s debido a %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr y prefixlen no pueden proporcionarse conjuntamente" msgid "dns_domain cannot be specified without a dns_name" msgstr "No se puede especificar dns_domain sin un dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "No se puede especificar dns_name sin un dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address no se puede especificar sin un port_id" #, python-format msgid "has device owner %s" msgstr "tiene el propietario de dispositivo %s" msgid "in use" msgstr "en uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "El mandato ip ha fallado en el dispositivo %(dev_name)s: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "No hay soporte para la función de ip link %(capability)s" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "No hay soporte para el mandato ip link: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "ip_version debe especificarse en ausencia de cidr y subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode no es válido cuando ip_version es 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode no es válido cuando ip_version es 4" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode establecido en '%(ra_mode)s' con ipv6_address_mode establecido " "en '%(addr_mode)s' no es válido. Si se establecen ambos atributos, deben " "tener el mismo valor" msgid "mac address update" msgstr "Actualización de la dirección MAC" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "debe dar exactamente 2 argumentos: cidr y MAC" msgid "network_type required" msgstr "network_type requerido" #, python-format msgid "network_type value '%s' not supported" msgstr "valor network_type '%s' no admitido" msgid "new subnet" msgstr "nueva subred" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' desconocida para la red de proveedor simple" msgid "physical_network required for flat provider network" msgstr "se requiere physical_network para la red de proveedor simple" #, python-format msgid "provider:physical_network specified for %s network" msgstr "proveedor:physical_network especificado para la red %s" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval debe ser >= 0 si se proporciona." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fuera de rango (%(min)s a %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id requiere physical_network para la red de proveedor VLAN" msgid "shared attribute switching to synthetic" msgstr "atributo compartido cambiando a sintético" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "La agrupación de subred %(subnetpool_id)s no se puede actualizar cuando está " "asociada con el ámbito de dirección compartida %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "No se puede especificar a la vez subnetpool_id y use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "el siguiente salto no está conectado con el direccionador" msgid "the nexthop is used by router" msgstr "el siguiente salto lo está utilizando el direccionador" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/fr/0000755000175000017500000000000000000000000020503 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/fr/LC_MESSAGES/0000755000175000017500000000000000000000000022270 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/fr/LC_MESSAGES/neutron.po0000644000175000017500000030070400000000000024326 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # François Bureau, 2013 # Maxime COQUEREL , 2014-2015 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:53+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Commande : %(cmd)s\n" "Code de sortie : %(code)s\n" "Stdin : %(stdin)s\n" "Stdout : %(stdout)s\n" "Stderr : %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Le fichier HEAD %(branch)s ne correspond pas à l'élément Head de calendrier " "de migration, attendu : %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s est une valeur non valide pour sort_dirs ; les valeurs " "valides sont '%(asc)s' et '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s interdit pour le réseau de fournisseur %(tunnel)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' ne correspond pas à ip_version '%(ip_version)s'" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s ne peut pas être appelé en mode hors ligne" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s est un attribut non valide pour sort_keys" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s doit implémenter get_port_from_device ou get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s interdit pour le réseau de fournisseurs de réseau local virtuel" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s interdit pour le réseau de fournisseurs non hiérarchique" #, python-format msgid "%s prohibited for local provider network" msgstr "%s interdit pour le réseau de fournisseurs local" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' n'est pas un type d'objet RBAC valide" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' n'est pas pris en charge pour le filtrage " #, python-format msgid "'module' object has no attribute '%s'" msgstr "L'objet 'module' n'a pas d'attribut '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' est plus petit que 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "La longueur 0 n'est pas autorisée pour le préfixe CIDR" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" "Une valeur cidr doit être indiquée si aucun pool de sous-réseau n'est défini" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Valeur décimale telle que Numéro d'entreprise privé enregistré du " "fournisseur comme requis par RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Un réseau externe par défaut existe déjà : %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Un pool de sous-réseau par défaut pour cette famille IP est déjà défini. Il " "ne peut y avoir qu'un seul pool par défaut par famille IP." msgid "A metering driver must be specified" msgstr "Un pilote de mesure doit être spécifié." msgid "Access to this resource was denied." msgstr "L'accès a cette ressource était refusé" msgid "Action to be executed when a child process dies" msgstr "Action à exécuter quand un processus enfant meurt" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Ajouter des commentaires aux règles iptables. Définir avec la valeur false " "pour interdire l'ajout de commentaires aux règles iptables générées qui " "décrivent l'objectif de chaque règle. Le système doit prendre en charge le " "module de commentaires iptables pour l'ajout de commentaires." msgid "Address not present on interface" msgstr "Une adresse n'est pas présente sur l'interface" msgid "Adds test attributes to core resources." msgstr "Ajoute les attributs de test aux ressources principales." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "L'agent %(id)s n'est pas un agent L3 ou a été désactivé" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "L'agent démarre avec admin_state_up=False si enable_new_agents=False. Dans " "ce cas, les ressources de l'utilisateur ne sont pas planifiées " "automatiquement pour l'agent sauf si l'administrateur affecte la valeur True " "à admin_state_up. " #, python-format msgid "Agent updated: %(payload)s" msgstr "Mise à jour de l'agent: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Autorise la planification automatique des réseaux de l'agent DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Autorise la planification automatique des routeurs vers l'agent L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Autoriser la prise en charge IP du chevauchement dans Neutron. Attention : " "le paramètre ci-après DOIT être défini sur False si Neutron est utilisé " "conjointement avec des groupes de sécurité Nova." msgid "Allow running metadata proxy." msgstr "Autorisez le proxy de métadonnées en cours d'exécution." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Autoriser l'envoi de notifications d'opérations de ressources à l'agent DHCP" msgid "Allow the creation of PTR records" msgstr "Autoriser la création d'enregistrements PTR" msgid "Allow the usage of the bulk API" msgstr "Autoriser l'utilisation de l'API de traitement en bloc" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permet d'effectuer des requêtes (https) non sécurisées aux métadonnées de " "nova" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permet le traitement des demandes de métadonnées en provenance d'un réseau " "d'accès de métadonnées dédié dont le CIDR est 169.254.169.254/16 (ou un " "préfixe plus long), et qui est connecté à un routeur Neutron depuis lequel " "les machines virtuelles envoient une demande metadata:1. Dans ce cas, DHCP " "Option 121 n'est pas injecté dans les machines virtuelles, car celles-ci " "pourront accéder à l'adresse 169.254.169.254 via un routeur. Cette option " "requiert enable_isolated_metadata = True." msgid "An RBAC policy already exists with those values." msgstr "Une stratégie RBAC existe déjà avec ces valeurs." msgid "An identifier must be specified when updating a subnet" msgstr "" "Un identificateur doit être spécifié lors de la mise à jour d'un sous-réseau" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Liste ordonnée de points d'entrée de pilote de mécanisme à charger à partir " "de l'espace de nom neutron.ml2.mechanism_drivers." msgid "An unknown error has occurred. Please try your request again." msgstr "Une erreur inconnue s'est produite. Renouvelez votre demande." msgid "Async process didn't respawn" msgstr "Le processus Async n'a pas été relancé" msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL d'autorisation pour la connexion au réseau désigné dans un contexte admin" msgid "Automatically remove networks from offline DHCP agents." msgstr "Supprime automatiquement les réseaux des agents DHCP hors ligne." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Replanifier automatiquement les routeurs pour qu'ils passent d'agents L3 " "hors connexion aux agents L3 connectés." msgid "Availability zone of this node" msgstr "Zone de disponibilité du noeud" msgid "Available commands" msgstr "Commandes disponibles" #, python-format msgid "Base MAC: %s" msgstr "MAC de base : %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Répertoire de journaux de base pour la consignation dnsmasq. Le journal " "contient des informations de journal DHCP et DNS et s'avère utile pour " "déboguer les problèmes liés à DHCP ou DNS. Si cette section est NULL, " "désactivez la consignation dnsmasq. " msgid "Body contains invalid data" msgstr "Le corps contient des données non valides" msgid "Bulk operation not supported" msgstr "Opération globale non prise en charge" msgid "CIDR to monitor" msgstr "CIDR à surveiller" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Rappel pour %(resource_type)s introuvable" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "Le rappel pour %(resource_type)s a renvoyé un type de ressource incorrect" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Impossible d'ajouter une adresse IP flottante au port %s qui n'a pas " "d'adresse IPv4 fixe" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Impossible d'ajouter plusieurs rappels pour %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Impossible d'allouer le sous-réseau IPv%(req_ver)s à partir du pool de sous-" "réseau IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Impossible d'allouer le sous-réseau demandé à partir de l'ensemble de " "préfixes disponibles" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Impossible de désactiver enable_dhcp avec des attributs ipv6 définis" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Impossible de traiter le sous-réseau de type %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Impossible d'avoir plusieurs sous-réseaux IPv4 sur le port de routeur" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Impossible d'avoir plusieurs ports de routeur avec le même ID réseau s'ils " "contiennent tous des sous-réseaux IPv6. Le port %(p)s existant a un ou " "plusieurs sous-réseaux IPv6 et l'ID réseau %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Impossible d'héberger un routeur distribué %(router_id)s sur l'agent L3 " "existant %(agent_id)s." msgid "Cannot specify both subnet-id and port-id" msgstr "Impossible de spécifier l'ID sous-réseau et l'ID port" msgid "Cannot understand JSON" msgstr "Impossible de comprendre JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Impossible de mettre à jour l'attribut en lecture seule %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Fichier de clés publiques de l'autorité de certification (CA cert) pour SSL" msgid "Check ebtables installation" msgstr "Vérifier l'installation ebtables " msgid "Check for ARP header match support" msgstr "Vérifier le support de correspondance d'en-tête ARP" msgid "Check for ARP responder support" msgstr "Vérifier le support de programme de réponse ARP" msgid "Check for ICMPv6 header match support" msgstr "Vérifier le support de correspondance d'en-tête ICMPv6" msgid "Check for OVS Geneve support" msgstr "Vérifier le support OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Vérifier le support OVS vxlan" msgid "Check for VF management support" msgstr "Vérifier le support de gestion VF" msgid "Check for iproute2 vxlan support" msgstr "Vérifier le support iproute2 vxlan" msgid "Check for nova notification support" msgstr "Vérifier le support de notification de Nova" msgid "Check for patch port support" msgstr "Vérifier le support de port de correctif" msgid "Check ip6tables installation" msgstr "Consultez l'installation ip6tables" msgid "Check ipset installation" msgstr "Vérifier l'installation ipset" msgid "Check keepalived IPv6 support" msgstr "Vérifier le support de keepalived IPv6" msgid "Check minimal dibbler version" msgstr "Vérifier la version minimale de dibbler" msgid "Check minimal dnsmasq version" msgstr "Vérifier la version minimale de dnsmasq" msgid "Check netns permission settings" msgstr "Vérifier les autorisations netns" msgid "Check ovs conntrack support" msgstr "Consultez le support ovs conntrack" msgid "Check ovsdb native interface support" msgstr "Consulter le support d'interface native ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Le routage CIDR %(subnet_cidr)s du sous-réseau %(subnet_id)s chevauche le " "routage CIDR %(cidr)s du sous-réseau %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Ressources de nettoyage d'un type d'agent spécifique uniquement." msgid "Client certificate for nova metadata api server." msgstr "Certificat client pour le serveur d'API des métadonnées nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Liste des uplets :, séparés par des " "virgules, qui mappent network_device à la liste de fonctions spécifique à un " "nœud d'agent des fonctions virtuelles qui ne doivent pas être utilisées pour " "une mise en réseau virtuelle. vfs_to_exclude est une liste de fonctions " "virtuelles, séparées par des virgules, à exclure de network_device. " "network_device dans le mappage doit figurer dans la liste " "physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Liste des uplets : séparés par des " "virgules, qui mappent les noms de réseau physique aux interfaces d'unité " "réseau physiques spécifiques à un nœud d'agent de la fonction physique SR-" "IOV à utiliser pour les réseaux VLAN. Tous les réseaux physiques répertoriés " "dans network_vlan_ranges sur le serveur doivent avoir des mappages aux " "interfaces appropriées sur chaque agent." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Liste des uplets : séparés par des " "virgules qui mappent les noms de réseau physique aux interfaces de réseau " "physique spécifiques à un nœud d'agent à utiliser pour les réseaux " "centralisés et VLAN. Tous les réseaux physiques répertoriés dans " "network_vlan_ranges sur le serveur doivent avoir des mappages aux interfaces " "appropriées sur chaque agent." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID GRE disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID VNI Geneve disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Liste d'uplets : séparés par des virgules énumérant des " "plages d'ID VNI VXLAN disponibles pour l'allocation de réseau locataire" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Liste séparée par des virgules des serveurs DNS qui seront utilisés comme " "réexpéditeurs." msgid "Command to execute" msgstr "Commande à exécuter" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Fichier de configuration du pilote d'interface (vous pouvez aussi utiliser " "l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valeur en conflit ethertype %(ethertype)s pour le CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Indique si l'API de groupe de sécurité neutron est activée sur le serveur. " "Elle doit être false si aucun groupe de sécurité n'est utilisé ou en cas " "d'utilisation de l'API du groupe de sécurité neutron." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Echec lors de la liaison à %(host)s:%(port)s après attente de %(time)d " "secondes" msgid "Could not deserialize data" msgstr "Impossible de désérialiser des données" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Durée de bail DHCP (en secondes). Utiliser -1 pour ordonner dnsmasq pour " "utiliser des durées de bail illimitées." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Les déploiements DVR pour les sous-couches VXLAN/GRE/Geneve nécessitent que " "L2-pop soit activé, à la fois côté agent et côté serveur." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Moteur de base de données pour lequel le script va être généré lors de " "l'utilisation d'une migration hors ligne." msgid "Default external networks must be shared to everyone." msgstr "Les réseaux externes par défaut doivent être partagés par tous." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Type de réseau par défaut pour des réseaux externes lorsqu'aucun attribut de " "fournisseur n'est spécifié. La valeur par défaut None signifie que si des " "attributs de fournisseur ne sont pas spécifiés lors de la création de " "réseaux externes, ces derniers prennent le même type que les réseaux " "locataires. Les valeurs autorisées pour l'option de config " "external_network_type dépendent des valeurs de type de réseau configurées " "dans l'option de config type_drivers. " msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre par défaut d'entrées RBAC autorisées par locataire. Une valeur " "négative signifie que le nombre est illimité." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre de ressources par défaut autorisées par le locataire. Une valeur " "négative signifie illimité." msgid "Default security group" msgstr "Groupe de sécurité par défaut" msgid "Default security group already exists." msgstr "Le groupe de sécurité par défaut existe déjà." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valeur par défaut des suggestions de zone de disponibilité. Les " "planificateurs de zone de disponibilité utilisent cette valeur lorsque le " "paramètre resources availability_zone_hints est à blanc. Plusieurs zones de " "disponibilité peuvent être indiquées en les séparant par une virgule. Cette " "valeur peut être vide. Dans ce cas, même si le paramètre " "availability_zone_hints d'une ressource est à blanc, la zone de " "disponibilité est prise en compte pour la haute disponibilité lors de la " "planification de la ressource." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Définissez la valeur par défaut de enable_snat si elle n'est pas indiquée " "dans external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Définit des fournisseurs pour les services avancés utilisant le format : " "::[:default]" msgid "Delete the namespace by removing all devices." msgstr "Supprimez l'espace de nom en supprimant toutes les unités." #, python-format msgid "Deleting port %s" msgstr "Suppression du port %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Erreur de déploiement : %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Détruire les IPsets même s'il y a une référence iptables." msgid "Destroy all IPsets." msgstr "Destruction de tous les IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Périphérique %(dev_name)s non unique dans le mappage '%(mapping)s'" msgid "Device not found" msgstr "Equipement non trouvé" msgid "Domain to use for building the hostnames" msgstr "Domaine à utiliser pour générer les noms d'hôte" msgid "Downgrade no longer supported" msgstr "La rétromigration n'est plus prise en charge" #, python-format msgid "Driver %s is not unique across providers" msgstr "Le pilote %s n'est pas unique entre les fournisseurs" msgid "Driver for external DNS integration." msgstr "Pilote pour intégration DNS externe." msgid "Driver for security groups firewall in the L2 agent" msgstr "" "Pilote pour le pare-feu de groupes de sécurité dans l'agent de niveau 2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Pilote à utiliser pour la planification du réseau de l'agent DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Pilote à utiliser pour la planification du routeur de l'agent L3 par défaut" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Pilote utilisé pour la délégation de préfixe ipv6. Il doit s'agir d'un point " "d'entrée défini dans l'espace de nom neutron.agent.linux.pd_drivers. Voir " "setup.cfg pour connaître les points d'entrée inclus avec la source de " "neutron. " #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Un élément L3HARouterAgentPortBinding en double est créé pour le ou les " "routeurs %(router)s. La base de données ne peut pas être mise à niveau. " "Retirez tous les éléments en double avant de mettre à niveau la base de " "données." msgid "Duplicate Security Group Rule in POST." msgstr "" "Règle de groupe de sécurité en double dans l'autotest à la mise sous tension." msgid "Duplicate address detected" msgstr "Adresse en double détectée" #, python-format msgid "ERROR: %s" msgstr "ERREUR : %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERREUR : Impossible de trouver le fichier de configuration via les chemins " "de recherche par défaut (~/.neutron/, ~/, /etc/neutron/, /etc/) et l'option " "'--config-file' !" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Le paramètre network_id ou le paramètre router_id doit être passé à la " "méthode _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "L'ID sous-réseau ou l'ID port doit être spécifié." msgid "Enable HA mode for virtual routers." msgstr "Activer le mode haute disponibilité pour les routeurs virtuels." msgid "Enable SSL on the API server" msgstr "Active SSL sur le serveur API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Activer VXLAN sur l'agent. Il peut être activé lorsque l'agent est géré par " "le plug-in ml2 utilisant le pilote de mécanisme linuxbridge" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Activer les services sur un agent ayant admin_state_up avec une valeur " "False. Si cette option est False, lorsque admin_state_up d'un agent se voit " "attribuer la valeur False, les services qui y sont associés seront " "automatiquement désactivés. Les agents ayant admin_state_up avec la valeur " "False ne sont pas sélectionnés pour la planification automatique, quelle que " "soit la valeur de cette option. Toutefois, il est possible de procéder à une " "planification manuelle pour ces agents si cette option a pour valeur True." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Active la délégation de préfixe IPv6 pour l'allocation CIDR de sous-réseau " "automatique. Définissez ce paramètre sur True pour activer la délégation de " "préfixe IPv6 pour l'allocation de sous-réseau dans un environnement " "compatible PD. Les utilisateurs effectuant des demandes de création de sous-" "réseau pour des sous-réseaux IPv6 sans indiquer de CIDR ou d'ID de pool de " "sous-réseau se verront affecter un CIDR via le mécanisme de délégation de " "préfixe. Notez que l'activation de PD se substitue au comportement du pool " "de sous-réseau IPv6 par défaut." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permet au service dnsmasq de fournir la résolution de nom pour les instances " "via des programmes de résolution DNS sur l'hôte exécutant l'agent DHCP. " "Supprime l'option '--no-resolv' des arguments de processus dnsmasq. L'ajout " "de programmes de résolution DNS personnalisés à l'option " "'dnsmasq_dns_servers' désactive cette fonction." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erreur %(reason)s lors de la tentative d'exécution de l'opération." #, python-format msgid "Error parsing dns address %s" msgstr "Erreur lors de l'analyse syntaxique de l'adresse DNS %s" #, python-format msgid "Error while reading %s" msgstr "Erreur lors de la lecture de %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Limite dépassée de %s secondes, en attente adresse pour sortie de l'état de " "tentative." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" "Les préfixes existants doivent être un sous-réseau des nouveaux préfixes" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Code de sortie: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; " "Stderr: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Echec de l'extension %(driver)s. " #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Le pilote d'extension %(driver)s requis pour le plugin de service " "%(service_plugin)s est introuvable." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extension à utiliser avec le pilote de mécanisme l2population du plug-in " "ml2. Elle permet au plug-in de remplir la table de réacheminement VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "L'extension avec l'alias %s n'existe pas" msgid "Extensions list to use" msgstr "Liste d'extensions à utiliser." #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "L'adresse IP externe %s est identique à l'adresse IP de passerelle" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Echec de la replanification du routeur %(router_id)s : aucun agent l3 " "éligible trouvé." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Echec de planification du routeur %(router_id)s vers l'agent L3 %(agent_id)s." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Echec de la création de port sur le réseau %(network_id)s car les adresses " "IP fixes incluent le sous-réseau non valide %(subnet_id)s " #, python-format msgid "Failed to locate source for %s." msgstr "Échec pour localiser la source de %s." msgid "Failed to remove supplemental groups" msgstr "Echec de la suppression des groupes supplémentaires" #, python-format msgid "Failed to set gid %s" msgstr "Echec de la définition du GID %s" #, python-format msgid "Failed to set uid %s" msgstr "Echec de la définition de l'UID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Echec de la configuration du port de tunnel %(type)s sur %(ip)s" msgid "Failure applying iptables rules" msgstr "Échec lors de la mise à jour des règles iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Echec lors de l'attente du passage de l'adresse %(address)s à l'état prêt : " "%(reason)s" msgid "Flat provider networks are disabled" msgstr "Les réseaux de fournisseurs centralisés sont désactivés" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Pour les protocole TCP/UDP, port_range_min doit être <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "Forcez les appels ip_lib à utiliser Root Helper" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Version IP de passerelle non cohérente avec la version de pool d'allocation" msgid "Gateway is not valid on subnet" msgstr "La passerelle n'est pas valide sur le sous-réseau." msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Groupe (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation (si vide : groupe effectif de l'agent)." msgid "Group (gid or name) running this process after its initialization" msgstr "Groupe (UID ou nom) exécutant ce processus après son initialisation" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Nom d'hôte qui doit être utilisé par le serveur, les agents et les services " "Neutron qui s'exécutent sur cette machine. Tous les agents et services qui " "s'exécutent sur cette machine doivent utiliser la même valeur d'hôte." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Le code ICMP (port-range-max) %(value)s est fourni mais le type ICMP (port-" "range-min) est manquant." msgid "ID of network" msgstr "ID du réseau" msgid "ID of network to probe" msgstr "ID du réseau à sonder" msgid "ID of probe port to delete" msgstr "ID du port sonde à supprimer" msgid "ID of probe port to execute command" msgstr "ID du port sonde pour exécuter la commande" msgid "ID of the router" msgstr "Identifiant du routeur" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "L'adresse IP %(ip)s est déjà dans le sous-réseaux %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "L'adresse IP %(ip)s n'appartient pas au sous-réseau %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "Échec de l'allocation IP. Réessayez ultérieurement." msgid "IP allocation requires subnet_id or ip_address" msgstr "L'allocation d'adresse IP requiert subnet_id ou ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply n'a pas pu appliquer l'ensemble d'iptables suivant " "iptables :\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Le mode Adresse IPv6 doit être SLAAC ou Sans état pour la délégation de " "préfixe. " msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Le mode RA IPv6 doit être SLAAC ou Sans état pour la délégation de préfixe. " #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "L'adresse IPv6 %(ip)s ne peut pas être directement affectée à un port du " "sous-réseau%(subnet_id)s car celui-ci est configuré pour l'obtention " "automatique d'adresses " #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "Le sous-réseau IPv6 %s configuré pour recevoir les avertissements (RA) d'un " "routeur externe ne peut pas être ajouté au routeur Neutron." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Si True, autorisez les plug-in qui les prennent en charge pour créer les " "réseaux VLAN transparents." msgid "Illegal IP version number" msgstr "Numéro de version IP illégal" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Dans certains cas, le routeur Neutron n'est pas présent pour fournir l'IP de " "métadonnées mais le serveur DHCP peut être utilisé pour fournir ces " "informations. Lorsque cette valeur est définie, cela force le serveur DHCP à " "ajouter des routes hôtes spécifiques à la demande DHCP. Lorsque cette option " "est définie, le service de métadonnées est activé pour tous les réseaux." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indique que cet agent L3 doit aussi traiter les routeurs pour lesquels " "aucune passerelle de réseau externe n'est configurée. Cette option doit être " "définie sur True uniquement pour un seul agent dans un déploiement Neutron ; " "elle peut être définie sur False pour tous les agents si tous les routeurs " "doivent avoir une passerelle de réseau externe." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "L'instance de la classe %(module)s.%(class)s doit contenir l'attribut _cache." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Espace préfixe insuffisant pour l'allocation de la taille de sous-réseau /%s" msgid "Insufficient rights for removing default security group." msgstr "Droits insuffisants pour retirer le groupe de sécurité par défaut." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Pont d'intégration à utiliser. Ne modifiez pas ce paramètre à moins d'avoir " "une bonne raison pour cela. Il s'agit du nom du pont d'intégration OVS. Il y " "en a un par hyperviseur. Le pont d'intégration fait office de 'baie " "corrective' virtuelle. Tous les VIF de machine virtuelle sont connectés à ce " "pont puis 'corrigés' d'après leur connectivité réseau." msgid "Interface to monitor" msgstr "Interface à surveiller" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalle entre les vérifications de l'activité du processus enfant (en " "secondes). Utilisez 0 pour désactiver" msgid "Interval between two metering measures" msgstr "Intervalle entre deux mesures" msgid "Interval between two metering reports" msgstr "Intervalle entre deux rapports de mesures" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Périphérique non valide %(dev_name)s : %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Action non valide %(action)s' pour le type d'objet %(object_type)s'. Actions " "valides : %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Type d'authentification non valide : %(auth_type)s, les types valides sont : " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s non valide pour le protocole %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Format non valide : %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Etat d'instance non valide : %(state)s, les états valides sont : " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Mappage non valide : '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Port pci invalide %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Format de fournisseur non valide. La dernière partie doit être 'default' ou " "vide : %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Ressource type %(resource_type)s non valide" #, python-format msgid "Invalid route: %s" msgstr "Chemin non valide : %s" msgid "Invalid service provider format" msgstr "Format de fournisseur de service non valide" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valeur non valide pour ICMP %(field)s (%(attr)s) %(value)s. Elle doit être " "comprise entre 0 et 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valeur non valide pour le port %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marque Mangle Iptables utilisée pour marquer les entrées du réseau externe. " "Cette marque sera masquée avec 0xffff de sorte que seuls les 16 bits les " "plus bas soient utilisés. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marque Mangle Iptables utilisée pour marquer les demandes valides de " "métadonnées. Cette marque sera masquée avec 0xffff de sorte que seuls les 16 " "bits les plus bas soient utilisés. " msgid "Keepalived didn't respawn" msgstr "Keepalived n'a pas été relancée" msgid "Keepalived didn't spawn" msgstr "Keepalived n'a pas été lancé" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "La valeur HZ du noyau %(value)s n'est pas valide. Cette valeur doit être " "supérieure à 0." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L'agent L3 n'a pas pu configurer NAT pour les IP flottantes" msgid "L3 agent failure to setup floating IPs" msgstr "L'agent L3 n'a pas pu configurer les IP flottantes" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limiter le nombre de baux pour éviter un déni de service." msgid "List of :" msgstr "Liste de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Liste de :: ou " "indiquant des noms physical_network utilisables pour les réseaux de " "fournisseurs VLAN et de locataires, ainsi que les plages d'étiquettes VLAN " "disponibles dans le cadre de l'allocation aux réseaux locataires." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Liste des points d'entrées du pilote de type de réseau à charger à partir de " "l'espace de nom neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Liste de noms de réseau physique qui peuvent être utilisés pour créer des " "réseaux centralisés. Utilisez par défaut '*' pour autoriser les réseaux " "centralisés avec des noms de réseau physique arbitraires. Utilisez une " "liste vide pour désactiver les réseaux centralisés." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées" msgid "Location to store DHCP server config files." msgstr "Emplacement de stockage des fichiers de configuration du serveur DHCP." msgid "Location to store IPv6 PD files." msgstr "Emplacement pour stocker les fichiers IPv6 PD" msgid "Location to store IPv6 RA config files" msgstr "Emplacement de stockage des fichiers de configuration IPv6 RA" msgid "Location to store child pid files" msgstr "Emplacement de stockage des fichiers de PID enfant" msgid "Log agent heartbeats" msgstr "Consigner les pulsations d'agent" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "MTU du réseau physique sous-jacent. Neutron utilise cette valeur pour " "calculer la valeur MTU de tous les composants réseau virtuels. Pour les " "réseaux centralisés et VLAN, Neutron utilise cette valeur sans la modifier. " "Pour les réseaux superposés, tels que VXLAN, Neutron soustrait " "automatiquement la surcharge du protocole de superposition de cette valeur. " "La valeur par défaut est 1500, valeur standard pour Ethernet." msgid "MTU size of veth interfaces" msgstr "Taille de MTU des interfaces veth" msgid "Make the l2 agent run in DVR mode." msgstr "Exécuter l'agent l2 dans le mode DVR." msgid "Malformed request body" msgstr "Format de corps de demande incorrect" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Paramètre MaxRtrAdvInterval pour radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Nombre maximum de serveurs de noms DNS par sous-réseau" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Nombre maximum d'agents L3 sur lesquels un routeur HA sera planifié. Si ce " "paramètre est défini sur 0, le routeur sera planifié sur chaque agent." msgid "Maximum number of allowed address pairs" msgstr "Nombre maximal de paires d'adresses autorisé" msgid "Maximum number of host routes per subnet" msgstr "Nombre maximal de routes hôte par sous-réseau" msgid "Maximum number of routes per router" msgstr "Nombre maximum de routes par routeur" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Mode du socket de domaine UNIX de proxy de métadonnées, 4 valeurs " "autorisées : 'deduce' : mode de déduction à partir des valeurs de " "metadata_proxy_user/group, 'user' : mode du socket de proxy de métadonnées " "défini sur 0o644, à utiliser lorsque metadata_proxy_user correspond à " "l'utilisateur ou la racine effectif de l'agent, 'group' : mode du socket de " "proxy de métadonnées défini sur 0o664,à utiliser lorsque " "metadata_proxy_group correspond au groupe ou à la racine effectif de " "l'agent, 'all' : mode du socket de proxy de métadonnées défini sur 0o666, à " "utiliser dans les autres cas." msgid "Metering driver" msgstr "Pilote de décompte" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Paramètre MinRtrAdvInterval pour radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Réduire au minimum l'interrogation en surveillant les changements " "d'interface de l'ovsdb." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Clé manquante dans le mappage : '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Groupe de multidiffusion pour VXLAN. Lorsque ce paramètre est configuré, il " "permet l'envoi de tout le trafic de diffusion vers ce groupe de " "multidiffusion. Dans le cas contraire, il désactive le mode VXLAN de " "multidiffusion." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Groupe(s) de multidiffusion pour l'interface vxlan. Une plage d'adresses de " "groupe peut être spécifiée en utilisant la notation CIDR. Si une plage est " "indiquée, différents VNI peuvent utiliser différentes adresses de groupe, ce " "qui réduit ou élimine le trafic de multidiffusion fallacieux vers les nœuds " "finaux de tunnel. Pour réserver un groupe unique pour chaque VNI possible " "(24 bits), utilisez /8, par exemple 239.0.0.0/8. Ce paramètre doit être " "identique sur tous les agents." #, python-format msgid "Multiple default providers for service %s" msgstr "Fournisseurs multiples par défaut pour le service %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Fournisseurs multiples indiqués pour le service %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "L'existence de plusieurs ID titulaire n'est pas autorisée lors de la " "création du règle de groupe de sécurité en bloc." msgid "Must also specify protocol if port range is given." msgstr "" "Un protocole doit aussi être précisé si une plage de ports est fournie." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Doit indiquer une ou plusieurs actions sur l'ajout ou la modification de flux" msgid "Name of Open vSwitch bridge to use" msgstr "Nom du pont Open vSwitch à utiliser" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nom de la région nova à utiliser. Utile si keystone gère plusieurs régions." msgid "Namespace of the router" msgstr "Espace de nom du routeur" msgid "Native pagination depend on native sorting" msgstr "La mise en page native dépend du tri natif" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "La migrations doit être appliquée depuis la branche contract %(project)s. " "Cela va impliquer l'arrêt de toutes les instances de serveur Neutron avant " "la mise à niveau." msgid "Negative delta (downgrade) not supported" msgstr "Delta négatif (rétromigration) non pris en charge" msgid "Negative relative revision (downgrade) not supported" msgstr "Révision relative négative (rétromigration) non prise en charge" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Le réseau %s ne contient pas de sous-réseau IPv4 " #, python-format msgid "Network %s is not a valid external network" msgstr "Le réseau %s n'est pas un réseau externe valide." #, python-format msgid "Network %s is not an external network" msgstr "Réseau %s n'est pas un réseau externe" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Le réseau de taille %(size)s, de plage IP %(parent_range)s (hors plages IP " "%(excluded_ranges)s) est introuvable." #, python-format msgid "Network type value '%s' not supported" msgstr "Valeur de type de réseau '%s' non prise en charge" msgid "Network type value needed by the ML2 plugin" msgstr "Valeur de type de réseau requise par le plug-in ML2" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin n'est pas configuré ! " msgid "No default router:external network" msgstr "Aucun réseau router:external par défaut" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Aucun pool de sous-réseau par défaut trouvé pour IPv%s" msgid "No default subnetpools defined" msgstr "Aucun pool de sous-réseau défini" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "" "Pas d'autres adresses IP disponibles pour le sous-réseau %(subnet_id)s." msgid "No offline migrations pending." msgstr "Aucune migration hors ligne en attente." #, python-format msgid "No shared key in %s fields" msgstr "Aucune clé partagée dans les champs %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Non autorisé à affecter manuellement un routeur à un agent en mode 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Non autorisé à retirer manuellement un routeur d'un agent en mode 'dvr'." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Nombre d'agents DHCP planifiés pour héberger un réseau titulaire. Si ce " "nombre est supérieur à 1, le planificateur affecte automatiquement plusieurs " "agents DHCP pour un réseau titulaire donné, ce qui fournit de la haute " "disponibilité au service DHCP. " msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Nombre de demandes en attente avec lequel configurer le socket du serveur de " "métadonnées" msgid "Number of backlog requests to configure the socket with" msgstr "Nombre de demandes en attente avec lequel configurer le socket" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Nombre de bits dans une zone PTR ipv4 qui fera office de préfixe réseau. " "Doit s'aligner sur la frontière de bit. La valeur minimum est 8. La valeur " "maximum est 24. Par conséquent, la plage de valeurs est 8, 16 et 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Nombre de bits dans une zone PTR ipv6 qui fera office de préfixe réseau. " "Doit s'aligner sur la frontière nyble. La valeur minimum est 4. La valeur " "maximum est 124. Par conséquent, la plage de valeurs est 4, 8, 12, 16,..., " "124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Nombre d'adresses IP flottantes autorisées par locataire. Une valeur " "négative signifie illimité." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de réseaux autorisés par le locataire. Une valeur négative signifie " "illimité" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de ports autorisés par le locataire. Une valeur négative signifie " "illimité" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Nombre de routeurs autorisés par locataire. Une valeur négative signifie " "illimité" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Nombre de secondes entre deux envois d'événements à nova s'il y a des " "événements à envoyer." msgid "Number of seconds to keep retrying to listen" msgstr "Nombre de secondes a attendre avant d'essayer d'écouter à nouveau" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre de groupes de sécurité autorisés par locataire. Une valeur négative " "signifie illimité." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Nombre de règles de sécurité autorisées par locataire. Une valeur négative " "signifie illimité." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Nombre de processus de traitement séparés pour le serveur de métadonnées " "(par défaut, la moitié du nombre d'unités centrales)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Nombre de sous-réseaux autorisés par le locataire. Une valeur négative " "signifie illimité" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Nombres d'unités d'exécution à utiliser durant le processus de " "synchronisation. Ce nombre ne doit pas être supérieur à la taille de pool de " "connexion configurée sur le serveur." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Chemin de données OVS à utiliser. 'system' est la valeur par défaut et elle " "correspond au chemin de données du noyau. Pour activer le chemin de données " "de l'espace utilisateur, définissez cette valeur sur 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Répertoire de socket OVS vhost-user." msgid "Only admin can view or configure quota" msgstr "Seul l'administrateur peut afficher ou configurer des quotas" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Seul l'administrateur est autorisé à accéder aux quotas d'un autre locataire" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Seuls les administrateurs peuvent gérer des stratégies sur des objets qui ne " "leur appartiennent pas" msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Les règles peuvent être mises à jour pour un seul profil de sécurité à la " "fois." msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Seul remote_ip_prefix ou remote_group_id peut être fourni." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Opération non prise en charge sur le périphérique %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Liste triée des éléments network_types à allouer en tant que réseaux " "locataires. La valeur par défaut 'local' est utile pour les tests single-box " "mais elle ne fournit aucune connectivité entre les hôtes." msgid "Override the default dnsmasq settings with this file." msgstr "Remplacez les paramètres dnsmasq par défaut par ce fichier." msgid "Owner type of the device: network/compute" msgstr "Type de propriétaire de l'unité : réseau/ordinateur" msgid "POST requests are not supported on this resource." msgstr "Les requêtes POST ne sont pas prises en charge sur cette ressource." #, python-format msgid "Package %s not installed" msgstr "Le package %s n'est pas installé" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Echec de l'analyse syntaxique bridge_mappings : %s." msgid "Password for connecting to designate in admin context" msgstr "" "Mot de passe pour la connexion au réseau désigné dans un contexte admin" msgid "Path to PID file for this process" msgstr "Chemin d'accès au fichier PID pour ce processus" msgid "Path to the router directory" msgstr "Chemin d'accès au répertoire du routeur" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Port correctif homologue dans le pont d'intégration pour le pont de tunnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Port correctif homologue dans le pont d'intégration tunnel pour le pont " "d'intégration." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Les options de mise à niveau de phase n'acceptent pas la spécification de " "révision" msgid "Ping timeout" msgstr "Délai d'expiration de la commande ping" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Le port %(id)s ne dispose pas de l'adresse IP fixe %(address)s." #, python-format msgid "Port %s does not exist" msgstr "Le port %s est inexistant." #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Le port %s comporte plusieurs adresses IPv4 fixes. Une adresse IPv4 " "spécifique doit être fournie lors de l'affectation d'une adresse IP " "flottante." msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "" "La délégation de préfixe peut uniquement être utilisée avec des sous-réseaux " "IPv6. " msgid "Private key of client certificate." msgstr "Clé privée pour le certificat client." #, python-format msgid "Probe %s deleted" msgstr "Sonde %s supprimée" #, python-format msgid "Probe created : %s " msgstr "Sonde créée : %s " msgid "Process is already started" msgstr "Le processus est déjà démarré" msgid "Process is not running." msgstr "Le processus n'est pas en fonctionnement." msgid "Protocol to access nova metadata, http or https" msgstr "Protocole d'accès aux métadonnées de nova, HTTP ou https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Le nom de fournisseur %(name)s est limité à %(len)s caractères" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "La stratégie RBAC de type %(object_type)s avec l'ID %(id)s est introuvable" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "La stratégie RBAC sur l'objet %(object_id)s ne peut pas être retirée car " "d'autres objets en dépendent.\n" "Détails : %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervalle en secondes de retard au hasard lors du démarrage du " "planificateur de tâches périodiques de manière à réduire les encombrements " "(définissez ce chiffre sur 0 pour désactiver cette fonction)." msgid "Ranges must be in the same IP version" msgstr "Les plages doivent être dans la même version IP" msgid "Ranges must be netaddr.IPRange" msgstr "Les plages doivent être netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Les plages ne doivent pas se chevaucher" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Les libellés de branche orientés édition (%s) sont obsolètes. Passez aux " "libellés expand@ et contract@. " msgid "Remote metadata server experienced an internal server error." msgstr "" "Le serveur de métadonnées distant a subi une erreur de serveur interne." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Le référentiel ne contient pas les fichiers HEAD pour les branches contract " "et expand." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Représentation du type de ressource dont la charge est signalée par l'agent. " "Il peut s'agir de \"réseaux\", \"sous-réseaux\" ou \"ports\". Lorsqu'il est " "spécifié (la valeur par défaut est réseaux), le serveur extrait la charge " "particulière envoyée en tant que composant de son objet de configuration " "d'agent depuis l'état de rapport d'agent, qui correspond au nombre de " "ressources consommées, à chaque intervalle report_interval.dhcp_load_type, " "et pouvant être utilisées en combinaison avec network_scheduler_driver = " "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler Lorsque " "network_scheduler_driver est WeightScheduler, dhcp_load_type peut être " "configuré pour représenter le choix pour la ressource équilibrée. Exemple : " "dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Echec de la demande : erreur de serveur interne lors du traitement de votre " "demande." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Réinitialiser la table de flux au démarrage. Affecter la valeur True à ce " "paramètre entraîne une courte interruption du trafic." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "La ressource %(resource)s %(resource_id)s est introuvable." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ressource %(resource_id)s de type %(resource_type)s non trouvée." #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "La ressource '%(resource_id)s' est déjà associée au fournisseur " "'%(provider)s' pour le type de service '%(service_type)s'" msgid "Resource body required" msgstr "Corps de ressource obligatoire" msgid "Resource not found." msgstr "Ressource non trouvé." msgid "Resources required" msgstr "Ressources obligatoires" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Application d'assistant racine. Utilisez 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf' pour utiliser la véritable fonction de filtre racine. " "Remplacez par 'sudo' pour ignorer le filtrage et exécuter simplement la " "commande directement." msgid "Root permissions are required to drop privileges." msgstr "Les droits root sont obligatoires pour supprimer des privilèges." #, python-format msgid "Router already has a port on subnet %s" msgstr "Le routeur dispose déjà d'un port sur le sous-réseau %s." msgid "Router port must have at least one fixed IP" msgstr "Le port de routeur doit avoir au moins une IP fixe" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Exécution de %(cmd)s (%(desc)s) pour %(project)s... " #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Exécution de %(cmd)s pour %(project)s... " msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Secondes entre les noeuds signalant l'état au serveur ; cette valeur doit " "être inférieure à agent_down_time, et au mieux, inférieure ou égale à la " "moitié de agent_down_time." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Nombre de secondes avant de considérer que l'agent est arrêté ; cette valeur " "doit être au moins le double de report_interval, pour s'assurer que l'agent " "est effectivement arrêté." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Groupe de sécurité %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Règle de groupe de sécurité %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Le groupe de sécurité %(id)s n'existe pas." #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La règle de groupe de sécurité %(id)s n'existe pas." #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "Une règle de groupe de sécurité existe déjà. L'ID règle est %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Règle de groupe de sécurité pour ethertype '%(ethertype)s' non prise en " "charge. Les valeurs autorisées sont %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Le protocole %(protocol)s de la règle du groupe de sécurité n'est pas pris " "en charge. Seules les valeurs de protocole Les valeurs %(values)s et les " "représentations sous forme d'entier [0 à 255] sont prises en charge." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Envoyer une notification à nova lors de la modification des données de port " "(fixed_ips/floatingip) pour que nova puisse mettre à jour son cache." msgid "Send notification to nova when port status changes" msgstr "" "Envoyer une notification à nova lors de la modification du statut de port" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Fournisseur de services '%(provider)s' introuvable pour le type de service " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Service de traitement de la délégation de préfixe DHCPv6. " #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "Le type de service %(service_type)s ne possède pas de fournisseur de " "services par défaut" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Redéfinir le délai d'attente (en secondes) des nouveaux appels RPC observé " "une fois que l'agent a reçu SIGTERM. Si la valeur est définie sur 0, le " "délai d'attente RPC reste inchangé" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Définissez ou annulez la définition du bit de fragment sur le paquet IP " "sortant véhiculant le tunnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "Impossible d'annuler le partage d'une portée d'adresse partagée" msgid "String prefix used to match IPset names." msgstr "Préfixe de chaîne utilisé pour correspondre aux noms IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Le sous projet %s n'est pas installé." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Le sous-réseau de l'interface de routeur doit avoir une adresse IP " "passerelle." msgid "Subnet pool has existing allocations" msgstr "Le pool de sous-réseau dispose d'allocations existantes" msgid "Subnet used for the l3 HA admin network." msgstr "" "Sous-réseau utilisé pour le réseau administrateur haute disponibilité L3." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Indicateur système pour déterminer le type de router que les locataires " "peuvent créer. Seul l'administrateur peut outrepasser cela" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Port TCP utilisé par le proxy d'espace de nom de métadonnées Neutron" msgid "TCP Port used by Nova metadata server." msgstr "Port TCP utilisé par le serveur de métadonnées Nova" msgid "TTL for vxlan interface protocol packets." msgstr "Durée de vie pour les paquets du protocole d'interface vxlan." #, python-format msgid "Tag %(tag)s could not be found." msgstr "Tag %(tag)s introuvable." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Titulaire %(tenant_id)s non autorisé à créer %(resource)s sur ce réseau" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ID locataire pour la connexion au réseau désigné dans un contexte admin" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nom de locataire pour la connexion au réseau désigné dans un contexte admin" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Le serveur DHCP peut contribuer à fournir un support de métadonnées sur des " "réseaux isolés. Si cette valeur est définie sur True, le serveur DHCP ajoute " "des routes hôtes spécifiques à la demande DHCP. Le service de métadonnées " "n'est activé que lorsque le sous-réseau ne contient aucun port de routeur. " "L'instance invitée doit être configurée pour la demande de routes hôtes via " "DHCP (Option 121). Cette option n'a aucun effet lorsque force_metadata est " "défini sur True." msgid "The UDP port to use for VXLAN tunnels." msgstr "Port UDP a utiliser pour les tunnels VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "Impossible de répondre à la demande d'allocation d'adresse. Motif : " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "Intervalle de publication en secondes" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Adresse MAC de base que Neutron va utiliser pour les VIF. Les 3 premiers " "octets demeurent inchangés. Si le 4e octet est différent de 00, il sera " "également utilisé. Les autres seront générés de manière aléatoire." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Adresse MAC de base utilisée pour les instances DVR uniques par Neutron. Les " "3 premiers octets restent inchangés. Si le 4ème octet est différent de 00, " "il sera également utilisé. Les autres seront générés de manière aléatoire. " "L'adresse 'dvr_base_mac' *doit* être différente de l'adresse 'base_mac' pour " "éviter de les confondre avec les adresses MAC allouées pour les ports " "titulaires. 3 octets sont utilisés par défaut. " msgid "The core plugin Neutron will use" msgstr "Le core plugin de Neutron va etre utiliser" msgid "The driver used to manage the DHCP server." msgstr "Pilote utilisé pour gérer le serveur DHCP" msgid "The driver used to manage the virtual interface." msgstr "Pilote utilisé pour gérer l'interface virtuelle" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Adresse e-mail à utiliser lors de la création de zones PTR. Si elle n'est " "pas indiquée, il s'agira de l'adresse admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Nombre maximal d'éléments renvoyés dans une seule réponse, valeur définie " "sur 'infinite' ou sur un entier négatif qui signifie illimité" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Type de réseau à utiliser lors de la création du réseau haute disponibilité " "pour un routeur haute disponibilité. Par défaut ou si cette zone est vide, " "le premier élément 'tenant_network_types' est utilisé. Cela s'avère utile " "lorsque le trafic VRRP doit utiliser un réseau spécifique différent de celui " "défini par défaut. " msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Temps en secondes pendant lequel l'agent attendra les interrogations sur les " "modifications de l'unité locale." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Le nombre de secondes d'attente avant de régénérer le moniteur ovsdb après " "avoir perdu la communication avec ce dernier." msgid "The number of sort_keys and sort_dirs must be same" msgstr "" "Le nombre de clés de tri (sort_keys) et de répertoires de tri (sort_dirs) " "doit être identique" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Chemin des extensions API. Notez qu'il peut s'agir d'une liste de chemins " "séparés par des virgules. Par exemple : api_extensions_path = extensions:/" "path/to/more/exts:/even/more/exts. Le __chemin__ de neutron.extensions lui " "est ajouté, de sorte que si vos extensions figurent dans ce chemin, vous " "n'avez pas besoin de les indiquer ici." msgid "The physical network name with which the HA network can be created." msgstr "" "Nom de réseau physique avec lequel le réseau haute disponibilité peut être " "créé. " #, python-format msgid "The port '%s' was deleted" msgstr "Le port '%s' a été supprimé" msgid "The port to bind to" msgstr "Port à connecter" #, python-format msgid "The requested content type %s is invalid." msgstr "Le type de contenu %s de la requete est invalide." msgid "The resource could not be found." msgstr "La ressource est introuvable." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "Le routeur %(router_id)s est déjà hébergé par l'agent L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Le serveur a perdu la connexion ou est incapable d'effectuer l'opération " "demandée." msgid "The service plugins Neutron will use" msgstr "Plug-in de service utilisés ultérieurement par Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Impossible de répondre à la demande de sous-réseau. Motif : %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "Sous-projet sur lequel la commande doit être exécutée. Valeurs possibles : " "'%s'." msgid "The type of authentication to use" msgstr "Type d'authentification à utiliser" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Certains routeurs connectés à ce réseau dépendent de cette stratégie pour " "l'accès." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "La valeur est vraie pour la suppression de tous les ports sur tous les ponts " "OpenvSwitch. Elle est fausse pour la suppression des ports créés par Neutron " "lors de l'intégration et des ponts de réseau externes." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valeur IP de tunnel requise par le plug-in ML2" msgid "Tunnel bridge to use." msgstr "Pont de tunnel à utiliser." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Type de nœud final Nova à utiliser. Ce nœud final sera recherché dans le " "catalogue Keystone et il doit être de type public, interne ou admin." msgid "URL for connecting to designate" msgstr "URL pour la connexion au réseau désigné" msgid "URL to database" msgstr "URL de la base de données" #, python-format msgid "Unable to access %s" msgstr "Impossible d'accéder à %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Impossible de calculer l'adresse %(address_type)s. Motif : %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "Impossible de convertir la valeur en %s" msgid "Unable to create the SNAT Interface Port" msgstr "Impossible de créer l'interface du port SNAT" #, python-format msgid "Unable to determine mac address for %s" msgstr "Impossible de déterminer l'adresse mac pour %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Impossible de trouver '%s' dans la corps de demande" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Impossible de trouver l'adresse IP %(ip_address)s dans le sous réseau " "%(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Impossible de trouver le nom de la ressource dans %s" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Impossible d'identifier une zone cible à partir de : %s. La correspondance " "doit être au format %%()s" msgid "Unable to provide external connectivity" msgstr "Impossible de fournir une connectivité externe" msgid "Unable to provide tenant private network" msgstr "Impossible de fournir un réseau privé locataire" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Impossible de vérifier la correspondance %(match)s comme ressource parent : " "%(res)s n'a pas été trouvée" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Libellé inattendu pour le script %(script_name)s : %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Nombre de points de branche alembic inattendu : %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Code de réponse inattendu : %s" #, python-format msgid "Unexpected response: %s" msgstr "Réponse inattendue : %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Le nom d'unité '%(unit)s' n'est pas valide." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Type d'adresse inconnu %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Attribut inconnu '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Chaîne inconnue : %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Type de réseau inconnu %(network_type)s." msgid "Unmapped error" msgstr "Erreur de non-correspondance" msgid "Unrecognized action" msgstr "Action inconnu" msgid "Unrecognized field" msgstr "Champ non reconnu" msgid "Unsupported Content-Type" msgstr "Type de contenu non pris en charge" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Le type de réseau %(net_type)s n'est pas pris en charge." msgid "Unsupported request type" msgstr "Type de demande non pris en charge" msgid "Updating default security group not allowed." msgstr "Mise à jour du groupe de sécurité par défaut non autorisée" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Utilisez le pilote de mécanisme l2population ML2 pour connaître les adresses " "MAC et IP et pour améliorer l'évolutivité du tunnel." msgid "Use broadcast in DHCP replies." msgstr "Utilisez la diffusion dans les réponses DHCP." msgid "Use either --delta or relative revision, not both" msgstr "" "Utiliser soit un --delta, soit une révision relative, mais pas les deux" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utiliser ipset pour accélérer les groupes de sécurité basés sur iptables. " "L'activation du support ipset nécessite l'installation d'ipset sur le noeud " "d'agent L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilisez l'assistant racine lors de l'affichage de la liste des espaces de " "noms sur un système. Cette opération n'est peut-être pas obligatoire selon " "la configuration de sécurité. Si l'assistant racine n'est pas requis, " "définissez cette option sur False afin d'améliorer les performances." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilisez veths au lieu de ports de correctif pour interconnecter le pont " "d'intégration avec des réseaux physiques. Le noyau sans port de correctif " "Open vSwitch est pris en charge si le paramètre est défini sur True." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Utilisateur (UID ou nom) exécutant le proxy de métadonnées après son " "initialisation (si vide : utilisateur effectif de l'agent)." msgid "User (uid or name) running this process after its initialization" msgstr "Utilisateur (UID ou nom) exécutant ce process après son initialisation" msgid "Username for connecting to designate in admin context" msgstr "" "Nom d'utilisateur pour la connexion au réseau désigné dans un contexte admin" msgid "VRRP authentication password" msgstr "Mot de passe pour l'authentification VRRP" msgid "VRRP authentication type" msgstr "Type d'authentification VRRP" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valeur du rythme de noyau hôte (hz) pour le calcul de la valeur de rafale " "minimum dans les règles de limite de bande passante pour un port avec QoS. " "Consultez le fichier de configuration du noyau pour la valeur HZ et le " "manuel tc-tbf pour plus d'informations." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valeur de latence (ms) pour le calcul de la taille de file d'attente d'un " "port avec QoS. Consultez le manuel tc-tbf pour plus d'informations." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Lors de la mise en cache des demandes de métadonnées, Neutron signe l'en-" "tête Instance-ID à l'aide d'un secret partagé afin d'éviter toute " "usurpation. Vous pouvez choisir une chaîne comme secret, mais elle doit être " "identique ici et dans la configuration utilisée par le serveur de " "métadonnées Nova. REMARQUE : Nova utilise la même clé de configuration, mais " "dans la section [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Où stocker des fichiers d'état de Neutron. Ce répertoire doit être " "accessible en écriture par l'agent." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Avec IPv6, le réseau utilisé pour la passerelle externe ne doit pas " "obligatoirement disposer d'un sous-réseau associé, étant donné que l'adresse " "link-local (LLA) automatiquement affectée peut être utilisée. En revanche, " "une adresse de passerelle IPv6 est nécessaire pour pouvoir faire un saut sur " "le chemin par défaut. Si aucune adresse de passerelle IPv6 n'estconfigurée " "dans ce cas, le routeur Neutron sera configuré pour obtenir son chemin par " "défaut (et uniquement dans ce but) à partir des annonces du routeur en " "amont ; dans cette situation, le routeur en amont doit être également " "configuré pour envoyer lesdites annonces. ipv6_gateway, lorsqu'il est " "configuré, doit constituer la LLA de l'interface du routeur en amont. Si un " "saut utilisantune adresse unique globale (GUA) est souhaité, il doit être " "effectué via un sous-réseau attribué au réseau, et non pas par " "l'intermédiaire de ce paramètre. " msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Vous devez fournir un fichier de configuration pour le pont --config-file ou " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Vous devez fournir une révision ou un delta relatif." msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Un pool de sous-réseau doit être spécifié en l'absence d'un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "" "Le paramètre add_ha_port ne peut pas être appelé à l'intérieur d'une " "transaction." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools autorisé uniquement pour les requêtes de sous-réseau " "spécifiques." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools ne figurent pas dans le sous-réseau" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utilise une version IP erronée" msgid "already a synthetic attribute" msgstr "déjà un attribut synthétique" msgid "binding:profile value too large" msgstr "Valeur de liaison:profil excessive" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Impossible d'exécuter %(event)s en raison de %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr et prefixlen ne doivent pas être fournis ensemble" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain ne peut pas être spécifié sans dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name ne peut pas être spécifié sans dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "Impossible de spécifier une adresse IP fixe sans ID port" #, python-format msgid "has device owner %s" msgstr "a le propriétaire de terminal %s" msgid "in use" msgstr "utilisé" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "Echec de la commande sur le périphérique %(dev_name)s : %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "Fonctionnalité de liaison IP %(capability)s non prise en charge" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Commande link IP non prise en charge : %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version doit être indiqué si cidr et subnetpool_id ne sont pas définis" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode est non valide quand ip_version est 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode est non valide quand ip_version est 4" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode défini sur '%(ra_mode)s' avec ipv6_address_mode défini sur " "'%(addr_mode)s' n'est pas correct. Si les deux attributs sont définis, ils " "doivent avoir la même valeur" msgid "mac address update" msgstr "Mise à jour d'adresse MAC" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "doit fournir exactement 2 arguments - cidr et MAC" msgid "network_type required" msgstr "network_type requis" #, python-format msgid "network_type value '%s' not supported" msgstr "Valeur network_type '%s' non prise en charge" msgid "new subnet" msgstr "nouveau sous-réseau" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "" "physical_network '%s' inconnu pour le réseau de fournisseurs non hiérarchique" msgid "physical_network required for flat provider network" msgstr "" "physical_network obligatoire pour le réseau de fournisseurs non hiérarchique" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network spécifié pour le réseau %s" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval doit être >= 0 si fourni." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id hors plage (%(min)s à %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id requiert physical_network pour le réseau de fournisseurs de " "réseau local virtuel" msgid "shared attribute switching to synthetic" msgstr "commutation d'attribut partagé vers attribut synthétique" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Le pool de sous-réseau %(subnetpool_id)s ne peut pas être mis à jour s'il " "est associé avec la portée d'adresse partagée %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Les paramètres subnetpool_id et use_default_subnetpool ne peuvent pas être " "tous deux spécifiés" msgid "the nexthop is not connected with router" msgstr "nexthop n'est pas connecté au routeur" msgid "the nexthop is used by router" msgstr "nexthop est utilisé par le routeur" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/it/0000755000175000017500000000000000000000000020510 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/it/LC_MESSAGES/0000755000175000017500000000000000000000000022275 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/it/LC_MESSAGES/neutron.po0000644000175000017500000027147100000000000024343 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:53+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Comando: %(cmd)s\n" "Codice di uscita: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Il file HEAD %(branch)s non corrisponde all'head di durata della migrazione, " "previsto: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s non è un valore valido per sort_dirs, il valore valido è " "'%(asc)s' e '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s non consentito per la rete del provider %(tunnel)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' non corrisponde alla ip_version '%(ip_version)s'" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s Impossibile chiamare durante la modalità offline" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s è un attributo non valido per sort_keys" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s deve implementare get_port_from_device o get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s vietato per la rete del provider VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s vietato per rete flat del provider" #, python-format msgid "%s prohibited for local provider network" msgstr "%s è vietato per la rete del provider locale" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' non è un tipo di oggetto valido" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' non è supportata per il filtro" #, python-format msgid "'module' object has no attribute '%s'" msgstr "L'oggetto 'module' non ha un attributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' è più piccolo di 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "0 non è consentito come lunghezza del prefisso CIDR" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "È necessario specificare un cidr in assenza di un pool di sottoreti" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Un valore decimale come il numero dell'azienda privata registrato dal " "fornitore come richiesto da RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Una rete esterna predefinita esiste già: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Un pool di sottorete predefinito per questa famiglia IP è già stato " "impostato. Solo un valore predefinito può esistere per famiglia IP" msgid "A metering driver must be specified" msgstr "Specificare un driver di misurazione" msgid "Access to this resource was denied." msgstr "L'accesso a questa risorsa è stato negato." msgid "Action to be executed when a child process dies" msgstr "Azione da eseguire quando termina un processo child" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Aggiungere commenti alle regole iptables. Impostare su false per non " "consentire l'aggiunta di commenti alle regole iptables generate che " "descrivono lo scopo di ciascun ruolo. Il sistema deve supportare il modulo " "di commenti iptables per l'aggiunta di commenti." msgid "Address not present on interface" msgstr "Indirizzo non presente sull'interfaccia" msgid "Adds test attributes to core resources." msgstr "Aggiunge gli attributi di test alle risorse principali." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "L'agent %(id)s non è un agent L3 oppure è stato disabilitato" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "L'agent inizia con admin_state_up=False quando enable_new_agents=False. In " "tal caso, le risorse dell'utente non saranno pianificate automaticamente per " "l'agent finché l'admin non modifica admin_state_up in True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agent aggiornato: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Consenti pianificazione automatica delle reti nell'agent DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Consenti pianificazione automatica dei router nell'agent L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Consentire la sovrapposizione del supporto IP in Neutron. Attenzione: il " "seguente parametro DEVE essere impostato su False se Neutron viene " "utilizzato insieme ai gruppi di sicurezza Nova." msgid "Allow running metadata proxy." msgstr "Consenti l'esecuzione del proxy di metadati." msgid "Allow sending resource operation notification to DHCP agent" msgstr "Consenti notifica operazione di invio risorse all'agent DHCP" msgid "Allow the creation of PTR records" msgstr "Consenti la creazione di record PTR" msgid "Allow the usage of the bulk API" msgstr "Consenti l'utilizzo dell'API bulk" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Consentire l'esecuzione di richieste SSL (https) non protette sui metadati " "nova" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Consente di servire le richieste di metadati da una rete di accesso ai " "metadati dedicata il cui CIDR è 169.254.169.254/16 (o un prefisso più " "esteso) ed è connessa a un router Neutron da cui le VM inviano i metadati:1 " "richiesta. In questo caso, l'opzione DHCP 121 non sarà inserita nelle VM in " "quanto non saranno in grado di raggiungere 169.254.169.254 tramite un " "router. Questa opzione richiede enable_isolated_metadata = True." msgid "An RBAC policy already exists with those values." msgstr "Una politica RBAC esiste già con questi valori." msgid "An identifier must be specified when updating a subnet" msgstr "" "Un identificativo deve essere specificato durante l'aggiornamento di una " "sottorete" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Un elenco ordinato di punti di ingresso del driver di estensione da caricare " "dallo spazio dei nomi neutron.ml2.extension_drivers. Ad esempio: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Un elenco ordinato dei punti di ingresso del driver del meccanismo di rete " "da caricare dallo spazio dei nomi neutron.ml2.mechanism_drivers." msgid "An unknown error has occurred. Please try your request again." msgstr "Si è verificato un errore sconosciuto. Ritentare la richiesta." msgid "Async process didn't respawn" msgstr "Il processo async non ha eseguito la nuova generazione" msgid "Authorization URL for connecting to designate in admin context" msgstr "URL autorizzazione per la connessione da designare nel contesto admin" msgid "Automatically remove networks from offline DHCP agents." msgstr "Rimuove automaticamente le reti dagli agent DHCP offline." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Ripianifica automaticamente i router dagli agent L3 offline agli agent L3 " "online." msgid "Availability zone of this node" msgstr "Zona di disponibilità di questo nodo" msgid "Available commands" msgstr "Comandi disponibili" #, python-format msgid "Base MAC: %s" msgstr "MAC base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Directory log di base per la registrazione dnsmasq. Il log contiene info di " "log DHCP e DNS ed è utile per il debug dei problemi con DHCP o DNS. Se " "questa sezione è null, disabilitare il log dnsmasq." msgid "Body contains invalid data" msgstr "Il corpo contiene dati non validi" msgid "Bulk operation not supported" msgstr "Operazione massiccia non supportata" msgid "CIDR to monitor" msgstr "CIDR da monitorare" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Callback per %(resource_type)s non trovata" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "La callback per %(resource_type)s ha restituito un tipo risorsa errato" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Impossibile aggiungere l'IP mobile alla porta %s che non ha indirizzi IPv4 " "fissi" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Impossibile aggiungere più callback per %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Impossibile assegnare la sottorete IPv%(req_ver)s dal pool di sottoreti IPv" "%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Impossibile assegnare la sottorete richiesta dall'insieme di prefissi " "disponibili" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Impossibile disabilitare enable_dhcp con gli attributi ipv6 impostati" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Impossibile gestire la sottorete di tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Impossibile avere più sottoreti IPv4 sulla porta del router" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Impossibile avere più porte router con lo stesso ID di rete se entrambe " "contengono sottoreti IPv6. La porta esistente %(p)s ha sottoreti IPv6 e ID " "di rete %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Impossibile ospitare il router distribuito %(router_id)s sull'agent legacy " "L3 %(agent_id)s." msgid "Cannot specify both subnet-id and port-id" msgstr "Impossibile specificare entrambi subnet_id e port_id" msgid "Cannot understand JSON" msgstr "Impossibile riconoscere JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Impossibile aggiornare l'attributo di sola lettura %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "File di chiave pubblica Certificate Authority (CA cert) per ssl" msgid "Check ebtables installation" msgstr "Controlla installazione di ebtables" msgid "Check for ARP header match support" msgstr "Verifica il supporto di corrispondenza intestazione ARP" msgid "Check for ARP responder support" msgstr "Verifica il supporto responder ARP" msgid "Check for ICMPv6 header match support" msgstr "Verifica il supporto di corrispondenza intestazione ICMPv6" msgid "Check for OVS Geneve support" msgstr "Verifica il supporto OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Verifica il supporto OVS vxlan" msgid "Check for VF management support" msgstr "Verifica il supporto di gestione VF management" msgid "Check for iproute2 vxlan support" msgstr "Verifica il supporto iproute2 vxlan" msgid "Check for nova notification support" msgstr "Verifica il supporto di notifica nova" msgid "Check for patch port support" msgstr "Verifica il supporto porta patch" msgid "Check ip6tables installation" msgstr "Controlla installazione di ip6tables" msgid "Check ipset installation" msgstr "Controlla installazione di ipset" msgid "Check keepalived IPv6 support" msgstr "Controlla supporto IPv6 con keepalive" msgid "Check minimal dibbler version" msgstr "Controlla versione dibbler minima" msgid "Check minimal dnsmasq version" msgstr "Verifica versione dnsmasq minima" msgid "Check netns permission settings" msgstr "Verifica le impostazioni di autorizzazione netns" msgid "Check ovs conntrack support" msgstr "Verifica il supporto OVS conntrack" msgid "Check ovsdb native interface support" msgstr "Verifica supporto interfaccia nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Cidr %(subnet_cidr)s della sottorete %(subnet_id)s si sovrappone con il cidr " "%(cidr)s della sottorete %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Ripulire solo le risorse di un tipo di agent specifico." msgid "Client certificate for nova metadata api server." msgstr "Certificato client per il server api dei metadati nova" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Elenco di tuple : che associano " "network_device all'elenco specifico del nodo dell'agent delle funzioni " "virtuali che non devono essere utilizzate per la rete virtuale. " "vfs_to_exclude è un elenco separato da punto e virgola delle funzioni " "virtuali da escludere da network_device. Il network_device nell'associazione " "deve essere presente nell'elenco physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Elenco di tuple : separate di virgole che " "associano i nomi della rete fisica alle interfacce del dispositivo di rete " "fisico specifico del nodo dell'agent della funzione fisica SR-IOV da " "utilizzare per le reti VLAN. Tutte le reti fisiche elencate in " "network_vlan_ranges sul server devono avere associazioni alle interfacce " "appropriate su ogni agent." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Elenco di tuple : che associano i nomi " "della rete fisica all'interfaccia di rete fisica specifica del nodo " "dell'agent da utilizzare per le reti VLAN. Tutte le reti fisiche elencate in " "network_vlan_ranges sul server devono avere associazioni alle interfacce " "appropriate su ogni agent" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " ID tunnel GRE disponibili per l'assegnazione di rete tenant" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " di ID VNI Geneve disponibili per l'assegnazione della rete titolare" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Elenco separato da virgole di intervalli di enumerazione tuple :" " di VXLAN VNI ID disponibili per l'assegnazione della rete tenant" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Elenco separato da virgole dei server DNS che verranno utilizzati come " "server di inoltro." msgid "Command to execute" msgstr "Comando da eseguire" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "File di configurazione per il driver di interfaccia (È possibile utilizzare " "anche l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Valore ethertype %(ethertype)s in conflitto per CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controlla se l'API del gruppo di sicurezza neutron è abilitata sul server. " "Dovrebbe essere impostata su false quando non si utilizzano gruppi di " "sicurezza o si utilizza l'API del gruppo di sicurezza nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Impossibile effettuare il bind a %(host)s:%(port)s dopo aver provato per " "%(time)d secondi" msgid "Could not deserialize data" msgstr "Impossibile deserializzare i dati" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Durata rilascio DHCP (in secondi). Utilizzare -1 per informare dnsmasq di " "utilizzare infinite volte il rilascio." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Le distribuzioni DVR per VXLAN/GRE/Geneve sottostanti richiedono che sia " "abilitato L2-pop, sia sul lato agent che server." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Motore di database per cui verrà generato lo script quando si utilizza la " "migrazione offline." msgid "Default external networks must be shared to everyone." msgstr "Le reti esterne predefinite devono essere condivise con chiunque." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Il tipo di rete predefinito per le reti esterne quando non si specificano " "attributi provider. Per impostazione predefinita è Nessuno, che indica che " "se gli attributi provider non sono stati specificati durante la creazione di " "reti esterne, avranno lo stesso tipo delle reti titolari. I valori " "consentiti per l'opzione config external_network_type dipendono dai valori " "del tipo di rete configurati nell'opzione config type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero predefinito di voci RBAC consentite per titolare. Un valore negativo " "indica un numero illimitato." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero predefinito di risorse consentite per tenant. Un valore negativo " "indica un numero illimitato." msgid "Default security group" msgstr "Gruppo di sicurezza predefinito" msgid "Default security group already exists." msgstr "Il gruppo di sicurezza predefinito già esiste." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Il valore predefinito dei suggerimenti per la zona di disponibilità. Gli " "scheduler che riconoscono la zona di disponibilità utilizzano questo valore " "quando le risorse availability_zone_hints sono vuote. Più zone di " "disponibilità possono essere specificate mediante una stringa separata da " "virgole. Questo valore non può essere vuoto. In questo caso, anche se " "availability_zone_hints per una risorsa è vuoto, la zona di disponibilità " "viene considerata per l'elevata disponibilità durante la pianificazione " "della risorsa." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Definire il valore predefinito di enable_snat se non fornito in " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Definisce i provider per i servizi avanzati utilizzando il formato: " "::[:default]" msgid "Delete the namespace by removing all devices." msgstr "Elimina lo spazio dei nomi rimuovendo tutti i dispositivi." #, python-format msgid "Deleting port %s" msgstr "Eliminazione della porta %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Errore di distribuzione: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Distruggere gli IPset anche se c'è un riferimento iptables." msgid "Destroy all IPsets." msgstr "Distruggere tutti gli IPset." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Dispositivo %(dev_name)s nell'associazione: %(mapping)s non univoco" msgid "Device not found" msgstr "Dispositivo non trovato" msgid "Domain to use for building the hostnames" msgstr "Dominio da utilizzare per creare i nomi host" msgid "Downgrade no longer supported" msgstr "Riduzione non più supportata" #, python-format msgid "Driver %s is not unique across providers" msgstr "Il driver %s non è univoco tra i provider" msgid "Driver for external DNS integration." msgstr "Driver per l'integrazione DNS esterna." msgid "Driver for security groups firewall in the L2 agent" msgstr "Driver per il firewall dei gruppi di sicurezza nell'agent L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Driver da utilizzare per la pianificazione della rete nell'agent DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Driver da utilizzare per la pianificazione del router nell'agent L3 " "predefinito" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Il driver utilizzato per la delega prefisso ipv6. Deve essere un punto di " "immissione definito nello spazio dei nomi neutron.agent.linux.pd_drivers. " "Consultare setup.cfg per i punti di immissione inclusi con l'origine neutron." #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "L3HARouterAgentPortBinding duplicato viene creato per i router %(router)s. " "Il database non può essere aggiornato. Rimuovere tutti i duplicati prima di " "aggiornare il database." msgid "Duplicate Security Group Rule in POST." msgstr "Regola del gruppo di sicurezza duplicata in POST." msgid "Duplicate address detected" msgstr "Rilevato indirizzo duplicato" #, python-format msgid "ERROR: %s" msgstr "ERRORE: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERRORE: Impossibile trovare il file di configurazione utilizzando i percorsi " "di ricerca predefiniti (~/.neutron/, ~/, /etc/neutron/, /etc/) e l'opzione " "'--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Uno dei parametri network_id o router_id deve essere passato al metodo " "_get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "È necessario specificare subnet_id o port_id" msgid "Enable HA mode for virtual routers." msgstr "Abilitare la modalità HA per i router virtuali." msgid "Enable SSL on the API server" msgstr "Abilitazione di SSL sul server API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Abilitare VXLAN sull'agent. Può essere abilitata quando l'agent è gestito " "dal plugin ml2 utilizzando il driver del meccanismo linuxbridge" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Abilitare i servizi sull'agent con admin_state_up False. Se questa opzione è " "False, quando admin_state_up di un agent è su False, verranno disabilitati i " "servizi su tale agent. Gli agent con admin_state_up False non vengono " "selezionati per la pianificazione automatica indipendentemente da questa " "opzione. Ma è disponibile la pianificazione manuale di tali agent se questa " "opzione è impostata su True." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Abilita la delegazione del prefisso IPv6 per l'allocazione CIDR della " "sottorete automatica. Impostare su True per abilitare la delegazione del " "prefisso IPv6 per l'allocazione della sottorete in un ambiente con capacità " "PD. Gli utenti che effettuano richieste di creazione di una sottorete per le " "sottoreti IPv6 senza fornire un CIDR o un ID pool di sottorete riceveranno " "un CIDR mediante il meccanismo di delegazione del prefisso. L'abilitazione " "di PD sostituirà il comportamento del pool di sottorete IPv6 predefinito." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Abilita il servizio dnsmasq a fornire la risoluzione dei nomi per le istanze " "mediante i resolver DNS sull'host che esegue l'agent DHCP. In realtà rimuove " "l'opzione '--no-resolv' dagli argomenti del processo dnsmasq. L'aggiunta di " "risolver DNS personalizzati all'opzione 'dnsmasq_dns_servers' disabilita " "questa funzione." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Errore %(reason)s durante l'operazione." #, python-format msgid "Error parsing dns address %s" msgstr "Errore durante l'analisi dell'indirizzo dns %s" #, python-format msgid "Error while reading %s" msgstr "Errore durante le lettura di %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Superato il limite di %s in attesa dell'indirizzo da lasciare nello stato di " "tentativo." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "I prefissi esistenti devono essere un sottoinsieme dei nuovi prefissi" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Codice uscita: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Estensione %(driver)s non riuscita." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Driver di estensione %(driver)s richiesto per il plugin di servizio " "%(service_plugin)s non trovato." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Estensione per utilizzare insieme del driver del meccanismo l2population del " "plugin m12. Essa abilita il plugin per popolare la tabella di inoltro VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "L'estensione con alias %s non esiste" msgid "Extensions list to use" msgstr "Elenco estensioni da utilizzare" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "L'IP esterno %s è uguale all'IP gateway" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Impossibile ripianificare il router %(router_id)s: non è stato trovato " "nessun agent L3 adatto." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Impossibile pianificare il router %(router_id)s per l'agent L3 %(agent_id)s." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Impossibile creare la porta nella rete %(network_id)s perché fixed_ips ha " "incluso una sottorete %(subnet_id)s non valida" #, python-format msgid "Failed to locate source for %s." msgstr "Impossibile individuare l'origine per %s." msgid "Failed to remove supplemental groups" msgstr "Impossibile rimuovere i gruppi supplementari" #, python-format msgid "Failed to set gid %s" msgstr "Impossibile impostare il gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Impossibile impostare l'uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Impossibile impostare la porta tunnel %(type)s su %(ip)s" msgid "Failure applying iptables rules" msgstr "Errore nell'applicazione di regole iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "" "Errore durante l'attesa della disponibilità dell'indirizzo %(address)s: " "%(reason)s" msgid "Flat provider networks are disabled" msgstr "Le reti flat del provider sono disabilitate" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Per i protocolli TCP/UDP, port_range_min deve essere <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "Forzare le chiamate ip_lib ad utilizzare root helper" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "Versione IP gateway incoerente con la versione del pool di allocazione" msgid "Gateway is not valid on subnet" msgstr "Il gateway non è valido sulla sottorete" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Gruppo (gid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione (se vuoto: gruppo operativo dell'agent)." msgid "Group (gid or name) running this process after its initialization" msgstr "" "Gruppo (gid o name) che esegue questo processo dopo la relativa " "inizializzazione" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Il nome host da utilizzare dal server Neutron, gli agent e servizi in " "esecuzione su questa macchina. Tutti gli agent ed i servizi in esecuzione su " "questa macchina devono utilizzare lo stesso valore host." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Il codice ICMP (port-range-max) %(value)s è stato fornito, ma il tipo ICMP " "(port-range-min) manca." msgid "ID of network" msgstr "ID della rete" msgid "ID of network to probe" msgstr "ID di rete per probe" msgid "ID of probe port to delete" msgstr "ID della porta probe da eliminare" msgid "ID of probe port to execute command" msgstr "ID della porta probe per eseguire il comando" msgid "ID of the router" msgstr "ID del router" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "Indirizzo IP %(ip)s già assegnato nella sottorete %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "L'indirizzo IP %(ip)s non appartiene alla sottorete %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "Allocazione IP non riuscita. Provare successivamente." msgid "IP allocation requires subnet_id or ip_address" msgstr "L'assegnazione IP richiede subnet_id o ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply non è riuscito ad applicare la seguente serie di " "regole iptables:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalità indirizzo IPv6 deve essere SLAAC o stateless per la delega " "prefisso." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "La modalità RA IPv6 deve essere SLAAC o stateless per la delega prefisso." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "L'indirizzo IPv6 %(ip)s non può essere assegnato direttamente ad una porta " "sulla sottorete %(subnet_id)s perché la sottorete è configurata per gli " "indirizzi automatici" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "La sottorete IPv6 %s configurata per ricevere RA da un router esterno non " "può essere aggiunta a Neutron Router." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Se True, consentire ai plugin che lo supportano di creare reti VLAN " "trasparenti." msgid "Illegal IP version number" msgstr "Numero della versione IP non valido" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "In alcuni casi il router Neutron non è presente per fornire l'IP dei " "metadati ma il server DHCP può essere utilizzato per fornire queste " "informazioni. L'impostazione di questo valore su True farà in modo che il " "server DHCP aggiunga instradamenti host specifici alla richiesta DHCP. Se " "questa opzione è impostata, il servizio di metadati verrà attivato per tutte " "le reti." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica che questo agent L3 deve anche gestire i router che non hanno un " "gateway di rete esterna configurato. Questa opzione deve essere True solo " "per un singolo agent di una distribuzione Neutron e può essere False per " "tutti gli agent se tutti i router devono avere un gateway di rete esterna." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "L'istanza di classe %(module)s.%(class)s deve contenere l'attributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" "Spazio prefisso insufficiente per assegnare la dimensione della sottorete /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Diritti non sufficienti per rimuovere il gruppo di sicurezza predefinito." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Il bridge di integrazione da utilizzare. Non modificare questo parametro a " "meno che non si abbia una buona ragione per farlo. Questo è il nome del " "bridge di integrazione OVS. Esiste un bridge per ciascun hypervisor. Il " "bridge di integrazione agisce come un 'patch bay' virtuale. Tutti i VIF VM " "sono collegati a questo bridge e quindi 'corretti' in base alla rispettiva " "connettività di rete." msgid "Interface to monitor" msgstr "Interfaccia da monitorare" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervallo tra i controlli dell'attività del processo child (secondi), " "utilizzare 0 per disabilitare" msgid "Interval between two metering measures" msgstr "Intervallo tra due misure" msgid "Interval between two metering reports" msgstr "Intervallo tra due report di misurazione" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo non valido %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Azione non valida '%(action)s' per tipo di oggetto '%(object_type)s'. Azioni " "valide: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo di autenticazione non valido: %(auth_type)s, i tipi validi sono: " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s non valido per il protocollo %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Formato non valido: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Stato istanza non valido: %(state)s, gli stati validi sono: %(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Associazione non valida: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "pci slot non valido %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato del provider non valido. L'ultima parte deve essere 'default' o " "vuota: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo di risorsa non valido %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Route invalido: %s" msgid "Invalid service provider format" msgstr "Formato del provider del servizio non valido" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valore non valido per ICMP %(field)s (%(attr)s) %(value)s. Deve essere " "compreso tra 0 e 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valore invalido per la porta %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Indicatore mangle iptables per contrassegnare l'ingresso dalla rete esterna. " "Tale indicatoreverrà mascherato con 0xffff in modo che verranno utilizzati " "solo i 16 bit inferiori. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Indicatore mangle iptables per contrassegnare le richieste valide di " "metadati. Tale indicatore verràmascherato con 0xffff in modo che verranno " "utilizzati solo i 16 bit inferiori. " msgid "Keepalived didn't respawn" msgstr "Keepalived non ha eseguito la nuova generazione" msgid "Keepalived didn't spawn" msgstr "Keepalived non ha eseguito la generazione" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Il valore Kernel HZ %(value)s non è valido. Questo valore deve essere " "maggiore di 0." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Errore dell'agent L3 durante la configurazione di NAT per IP mobili" msgid "L3 agent failure to setup floating IPs" msgstr "Errore dell'agent L3 durante la configurazione di IP mobili" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limitare il numero di lease per evitare un denial-of-service." msgid "List of :" msgstr "Elenco di :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Elenco di :: o che " "specificano nomi physical_network utilizzabili per le reti tenant e provider " "VLAN, come anche gli intervalli di tag VLAN su ciascuno disponibile per " "l'assegnazione alle reti tenant." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Elenco dei punti di ingresso del driver del tipo di rete da caricare dallo " "spazio dei nomi neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Elenco di nomi physical_network con cui possono essere create reti flat. " "Utilizzare il valore '*' predefinito per consentire reti flat con nomi " "physical_network arbitrari. Utilizzare un elenco vuoto per disabilitare le " "reti flat." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicazione per il socket del dominio UNIX del proxy di metadati." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Ubicazione del socket del dominio UNIX del proxy di metadati" msgid "Location to store DHCP server config files." msgstr "Ubicazione per archiviare i file di configurazione del server DHCP." msgid "Location to store IPv6 PD files." msgstr "Ubicazione per archiviare i file PD IPv6." msgid "Location to store IPv6 RA config files" msgstr "Ubicazione per memorizzare i file di configurazione IPv6 RA" msgid "Location to store child pid files" msgstr "Ubicazione per archiviare i file pid dell'elemento child" msgid "Log agent heartbeats" msgstr "Registra gli heartbeat dell'agent" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "MTU della rete fisica sottostante. Neutron utilizza questo valore per " "calcolare la MTU per tutti i componenti della rete virtuale. Per le reti " "flat e VLAN, neutron utilizza questo valore senza modifica. Per le reti di " "sovrapposizione come VXLAN, neutron sottrae automaticamente l'overhead del " "protocollo di sovrapposizione da questo valore. Il valore predefinito è " "impostato su 1500, il valore standard per Ethernet." msgid "MTU size of veth interfaces" msgstr "Dimensione MTU delle interfacce veth" msgid "Make the l2 agent run in DVR mode." msgstr "Eseguire l'agent L2 in modalità DVR." msgid "Malformed request body" msgstr "Corpo richiesta non corretto" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Impostazione MaxRtrAdvInterval per radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Numero massimo di server dei nomi DNS per la sottorete" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Numero massimo di agent L3 su cui verrà pianificato un router HA. Se è " "impostato su 0, il router verrà pianificato su ciascun agent." msgid "Maximum number of allowed address pairs" msgstr "Numero massimo di coppie di indirizzi consentito" msgid "Maximum number of host routes per subnet" msgstr "Numero massimo di route host per la sottorete" msgid "Maximum number of routes per router" msgstr "Numero massimo di instradamenti per router" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modalità socket del dominio UNIX del proxy di metadati, 4 valori consentiti: " "'deduce': modalità deduzione da valori metadata_proxy_user/group, 'user': " "impostare modalità socket proxy metadati su 0o644, da usare quando " "metadata_proxy_user è l'utente effettivo agent o root, 'group': impostare " "modalità socket proxy metadati su 0o664, da usare quando " "metadata_proxy_group è gruppo effettivo agent o root, 'all': impostare " "modalità socket proxy metadati su 0o666, per usare altrimenti." msgid "Metering driver" msgstr "Driver di misurazione" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Impostazione MinRtrAdvInterval per radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Ridurre al minimo il polling controllando ovsdb per le modifiche " "all'interfaccia." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Chiave mancante nell'associazione: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Gruppo multicast per VXLAN. Quando configurato, abilita l'invio di tutto il " "traffico broadcast a questo gruppo multicast. Quando non configurato, " "disabilita la modalità multicast VXLAN." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Gruppo multicast per l'interfaccia vxlan. Un intervallo di indirizzi di " "gruppo può essere specificato utilizzando la notazione CIDR. La definizione " "di un intervallo consente a VNI diversi di utilizzare indirizzi di gruppo " "diversi, riducendo o eliminando il traffico di broadcast spurio agli " "endpoint del tunnel. Per riservare un gruppo univoco per ciascun VNI " "possibile (24-bit), utilizzare /8, ad esempio, 239.0.0.0/8. Questa " "impostazione deve essere la stessa su tutti gli agent." #, python-format msgid "Multiple default providers for service %s" msgstr "Più provider predefiniti per il servizio %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Più provider specificati per il servizio %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "La creazione in massa di più tenant_id nella regola del gruppo di sicurezza " "non è consentita" msgid "Must also specify protocol if port range is given." msgstr "" "È necessario anche specificare il protocollo se è fornito l'intervallo di " "porta." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "È necessario specificare una o più azioni nell'aggiunta o modifica del flusso" msgid "Name of Open vSwitch bridge to use" msgstr "Nome del bridge Open vSwitch da utilizzare" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome della regione nova da utilizzare. Utile nel caso in cui keystone " "gestisce più di una regione." msgid "Namespace of the router" msgstr "Spazio dei nomi del router" msgid "Native pagination depend on native sorting" msgstr "La paginazione nativa deipende dall'ordinamento nativo" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "È necessario applicare le migrazioni dal ramo di contratto %(project)s. " "Prima di procedere con l'aggiornamento è necessario che tutte le istanze del " "server Neutron vengano chiuse." msgid "Negative delta (downgrade) not supported" msgstr "Delta negativo (riduzione) non supportato" msgid "Negative relative revision (downgrade) not supported" msgstr "Revisione relativa negativa (riduzione) non suportata" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "La rete %s non contiene alcuna sottorete IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "La rete %s non è una rete esterna valida" #, python-format msgid "Network %s is not an external network" msgstr "La rete %s non è una rete esterna" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Rete di dimensione %(size)s, dall'intervallo IP %(parent_range)s esclusi gli " "intervalli IP %(excluded_ranges)s non trovata." #, python-format msgid "Network type value '%s' not supported" msgstr "Valore del tipo di rete '%s' non supportato" msgid "Network type value needed by the ML2 plugin" msgstr "Valore Tipo di rete richiesto dal plugin ML2" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin non configurato!" msgid "No default router:external network" msgstr "Nessuna rete router:external predefinita" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Nessun pool di sottorete predefinito trovato per IPv%s" msgid "No default subnetpools defined" msgstr "Nessun pool di sottorete predefinito definito" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Indirizzi IP non più disponibili per la sottorete %(subnet_id)s." msgid "No offline migrations pending." msgstr "Nessuna migrazione offline in sospeso." #, python-format msgid "No shared key in %s fields" msgstr "Nessuna chiave condivisa in %s campi" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Attualmente non è consentito assegnare manualmente un router ad un agent in " "modalità 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Attualmente non è consentito rimuovere manualmente un router da un agent in " "modalità 'dvr'." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Numero di agent DHCP pianificati per ospitare una rete titolare. Se questo " "numero è maggiore di 1, lo scheduler assegna automaticamente più agent DHCP " "per una data rete titolare, fornendo l'alta disponibilità per il servizio " "DHCP." msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Numero di richieste di backlog con cui configurare il socket server dei " "metadati" msgid "Number of backlog requests to configure the socket with" msgstr "Numero di richieste di backlog per configurare il socket con" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Il numero di bit in una zona PTR ipv4 che verrà considerato prefisso di " "rete. Deve allinearsi al limite di byte. Il valore minimo è 8. Il valore " "massimo è 24. Di conseguenza, l'intervallo di valori è 8, 16 e 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Il numero di bit in una zona PTR ipv6 che verrà considerato prefisso di " "rete. Deve allinearsi al limite nyble. Il valore minimo è 4. Il valore " "massimo è 124. Di conseguenza, l'intervallo di valori è 4, 8, 12, 16, ...., " "124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Numero di IP mobili consentiti per tenant. Un valore negativo indica un " "numero illimitato." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Numero di reti consentite per tenant. Un valore negativo indica un numero " "illimitato." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Numero di porte consentite per tenant. Un valore negativo indica un numero " "illimitato." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Numero di router consentiti per tenant. Un valore negativo indica un numero " "illimitato." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Numero di secondi tra l'invio di eventi a nova se vi sono eventuali eventi " "da inviare." msgid "Number of seconds to keep retrying to listen" msgstr "Numero di secondi per trattenere i nuovi tentativi di ascolto" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero di gruppi di sicurezza consentiti per tenant. Un valore negativo " "indica un numero illimitato." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Numero di regole di sicurezza consentite per tenant. Un valore negativo " "indica un numero illimitato." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Numero di processi worker separati per server di metadati (il valore " "predefinito è metà del numero di CPU)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Numero di sottoreti consentite per tenant. Un valore negativo indica un " "numero illimitato." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Numero di thread da utilizzare durante il processo di sincronizzazione. Non " "deve superare la dimensione del pool di connessione configurata sul server." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Datapath OVS da utilizzare. 'system' è il valore predefinito e corrisponde " "al datapath del kernel. Per abilitare il datapath dello spazio dei nomi, " "impostare questo valore su 'netdev'." msgid "OVS vhost-user socket directory." msgstr "Directory socket vhost-user OVS." msgid "Only admin can view or configure quota" msgstr "Solo admin può visualizzare o configurare una quota" msgid "Only admin is authorized to access quotas for another tenant" msgstr "Solo l'admin è autorizzato ad accedere alle quote per un altro tenant" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Solo gli admin possono gestire le politiche su oggetti che non possiedono" msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Al momento è consentito solo aggiornare le regole per un profilo di " "sicurezza." msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "È possibile fornire solo remote_ip_prefix o remote_group_id." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operazione non supportata sul dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Elenco ordinato di network_types da assegnare come reti tenant. Il valore " "predefinito 'local' è utile per la verifica single-box ma non fornisce " "alcuna connettività tra host." msgid "Override the default dnsmasq settings with this file." msgstr "" "Sostituire le impostazioni dnsmasq predefinite utilizzando questo file." msgid "Owner type of the device: network/compute" msgstr "Tipo proprietario dell'unità: rete/compute" msgid "POST requests are not supported on this resource." msgstr "Le richieste POST non sono supportate su questa risorsa." #, python-format msgid "Package %s not installed" msgstr "Pacchetto %s non installato" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Analisi bridge_mappings non riuscita: %s." msgid "Password for connecting to designate in admin context" msgstr "Password per la connessione da designare nel contesto admin" msgid "Path to PID file for this process" msgstr "Percorso per il file PID per questo processo" msgid "Path to the router directory" msgstr "Percorso per la directory del router" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Porta patch peer nel bridge di integrazione per il bridge tunnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Porta patch peer nel bridge tunnel per il bridge di integrazione." msgid "Phase upgrade options do not accept revision specification" msgstr "" "Le opzioni di aggiornamento fase non accettano la specifica di revisione" msgid "Ping timeout" msgstr "Timeout di ping" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "La porta %(id)s non dispone di un ip fisso %(address)s" #, python-format msgid "Port %s does not exist" msgstr "La porta %s non esiste" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "La porta %s dispone di più indirizzi IPv4 fissi. È necessario fornirne uno " "specifico durante l'assegnazione di un IP mobile" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "La delega prefisso può essere utilizzata solo con sottoreti IPv6." msgid "Private key of client certificate." msgstr "Chiave privata del certificato client." #, python-format msgid "Probe %s deleted" msgstr "Probe %s eliminato" #, python-format msgid "Probe created : %s " msgstr "Probe creato : %s " msgid "Process is already started" msgstr "Processo già avviato" msgid "Process is not running." msgstr "Il processo non è in esecuzione." msgid "Protocol to access nova metadata, http or https" msgstr "Protocollo per accedere ai metadati nova, http o https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Il nome del provider %(name)s è limitato a %(len)s caratteri" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Politica RBAC di tipo %(object_type)s con ID %(id)s non trovata" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "La politica RBAC sull'oggetto %(object_id)s non può essere rimossa perché " "altri oggetti dipendono da essa.\n" "Dettagli: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervallo di secondi per ritardare casualmente l'avvio di attività " "periodiche programma di pianificazione per ridurre la modifica data/ora. " "(Disabilitare impostando questa opzione a 0)" msgid "Ranges must be in the same IP version" msgstr "Gli intervalli devono essere nella stessa versione IP" msgid "Ranges must be netaddr.IPRange" msgstr "Gli intervalli devono essere netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Gli intervalli non devono sovrapporsi" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Le etichette ramo che riconoscono la release (%s) sono sconsigliate. Passare " "alle etichette expand@ e contract@." msgid "Remote metadata server experienced an internal server error." msgstr "Il server di metadati remoto ha rilevato un errore di server interno." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "Il repository non contiene i file HEAD per i rami di contratto ed espansione." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Rappresentazione del tipo di risorsa il cui carico è segnalato dall'agent. " "Può essere \"networks\", \"subnets\" o \"ports\". Quando specificato " "(L'impostazione predefinita è networks), il server estrarrà il carico " "particolare inviato come parte del relativo oggetto di configurazione agent " "dallo stato del report agent, il quale rappresenta il numero di risorse " "utilizzate, ad ogni report_interval. dhcp_load_type può essere utilizzato in " "combinazione con network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler Quando network_scheduler_driver è " "WeightScheduler, dhcp_load_type può essere configurato per rappresentare la " "scelta per la risorsa in fase di bilanciamento. Esempio: " "dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Richiesta non riuscita: errore server interno durante l'elaborazione della " "richiesta." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Reimpostare tabella flusso all'avvio. Impostandolo su True si provoca una " "breve interruzione del traffico." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Impossibile trovare la risorsa %(resource)s %(resource_id)s." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Risorsa %(resource_id)s di tipo %(resource_type)s non trovata" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "La risorsa '%(resource_id)s' è già associata al provider '%(provider)s' per " "il tipo di servizio '%(service_type)s'" msgid "Resource body required" msgstr "Corpo risorsa richiesto" msgid "Resource not found." msgstr "Risorsa non trovata." msgid "Resources required" msgstr "Risorse richieste" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Applicazione root helper. Utilizzare 'sudo neutron-rootwrap /etc/neutron/" "rootwrap.conf' per utilizzare la funzione di filtro root reale. Passare su " "'sudo' per ignorare il filtro e semplicemente eseguire il comando " "direttamente." msgid "Root permissions are required to drop privileges." msgstr "Per rilasciare i privilegi sono necessarie le autorizzazioni root." #, python-format msgid "Router already has a port on subnet %s" msgstr "Il router dispone già di una porta sulla sottorete %s" msgid "Router port must have at least one fixed IP" msgstr "La porta del router deve avere almeno un IP fisso" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Esecuzione di %(cmd)s (%(desc)s) per %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Esecuzione di %(cmd)s per %(project)s ..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Secondi tra lo stato riportato dai nodi al server; deve essere inferiore di " "agent_down_time, è preferibile che sia la metà o meno di agent_down_time." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Secondi per considerare che l'agent è inattivo; deve essere almeno il doppio " "di report_interval, per essere sicuri che l'agent è definitivamente inattivo." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Gruppo di sicurezza %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regola gruppo di sicurezza %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Il gruppo di sicurezza %(id)s non esiste" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "La regola del gruppo di sicurezza %(id)s non esiste" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "La regola del gruppo di sicurezza già esiste. L'ID regola è %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Regola del gruppo di sicurezza per ethertype '%(ethertype)s' non supportata. " "I valori consentiti sono %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Il protocollo della regole del gruppo di sicurezza %(protocol)s non è " "supportato. Solo i valori del protocollo %(values)s e le rappresentazioni " "numeri interi [0-255] sono supportati." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Invia una notifica a nova quando i dati porta (fixed_ips/floatingip) vengono " "modificati e in tal modo nova può aggiornare la propria cache." msgid "Send notification to nova when port status changes" msgstr "Invia una notifica a nova quando lo stato della porta cambia" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Provider del servizio '%(provider)s' non trovato per il tipo di servizio " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Il servizio per gestire la delega prefisso DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "Il tipo del servizio %(service_type)s non ha un provider del servizio " "predefinito" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Impostare il nuovo timeout in secondi per le nuove chiamate rpc dopo che " "l'agent riceve SIGTERM. Se il valore è impostato su 0, il timeout rpc non " "verrà modificato" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Impostare o annullare l'impostazione del bit del frammento non DF sul " "pacchetto IP in uscita che trasporta il tunnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "Impossibile annullare la condivisione di un ambito indirizzo condiviso" msgid "String prefix used to match IPset names." msgstr "Prefisso stringa utilizzato per la corrispondenza con i nomi IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Sottoprogetto %s non installato." msgid "Subnet for router interface must have a gateway IP" msgstr "La sottorete per l'interfaccia del router deve avere un IP gateway" msgid "Subnet pool has existing allocations" msgstr "Il pool di sottoreti ha assegnazioni esistenti" msgid "Subnet used for the l3 HA admin network." msgstr "Sottorete utilizzata per la rete admin HA L3" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "L'indicatore lato sistema per determinare il tipo di router che i tenant " "possono creare. Solo l'Admin può sovrascrivere." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Porta TCP utilizzata dal proxy spazio dei nomi dei metadati Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Porta TCP utilizzata dal server di metadati Nova." msgid "TTL for vxlan interface protocol packets." msgstr "Pacchetti del protocollo dell'interfaccia TTL per vxlan." #, python-format msgid "Tag %(tag)s could not be found." msgstr "Impossibile trovare il tag %(tag)s." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Tenant %(tenant_id)s non consentito per creare %(resource)s su questa rete" msgid "Tenant id for connecting to designate in admin context" msgstr "ID tenant per la connessione da designare nel contesto admin" msgid "Tenant name for connecting to designate in admin context" msgstr "Nome tenant per la connessione da designare nel contesto admin" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Il server DHCP può fornire il supporto di metadati nelle reti isolate. " "L'impostazione di questo valore su True farà in modo che il server DHCP " "aggiunga instradamenti host specifici alla richiesta DHCP. Il servizio di " "metadati verrà attivato solo quando la sottorete non contiene porte del " "router. L'istanza guest deve essere configurata per richiedere gli " "instradamenti host mediante DHCP (Opzione 121). Questa opzione non ha alcun " "effetto quando force_metadata è impostato su True." msgid "The UDP port to use for VXLAN tunnels." msgstr "La porta UDP da utilizzare per i tunnel VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "La richiesta di assegnazione dell'indirizzo non può essere soddisfatta " "perché: %(reason)s" msgid "The advertisement interval in seconds" msgstr "L'intervallo di annuncio in secondi" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "L'indirizzo MAC di base utilizzato da Neutron per i VIF. I primi 3 ottetti " "rimangono inalterati. Se il quarto ottetto non è 00, potrà anche essere " "utilizzato. Gli altri vengono generati casualmente. " msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "L'indirizzo mac di base utilizzato per istanze DVR univoche da Neutron. I " "primi 3 ottetti rimangono inalterati. Se il quarto ottetto non è 00, potrà " "anche essere utilizzato. Gli altri vengono generati casualmente. " "'dvr_base_mac' *deve* essere diverso da 'base_mac' per evitare la confusione " "con i MAC assegnati per le porte titolari. Un esempio di 4 ottetti è " "dvr_base_mac = fa:16:3f:4f:00:00. Il valore predefinito è 3 ottetti" msgid "The core plugin Neutron will use" msgstr "Il plugin principale che Neutron utilizzerà" msgid "The driver used to manage the DHCP server." msgstr "Il driver utilizzato per gestire il server DHCP." msgid "The driver used to manage the virtual interface." msgstr "Il driver utilizzato per gestire l'interfaccia virtuale." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "L'indirizzo email da utilizzare durante la creazione di zone PTR. Se non " "specificato, l'indirizzo email sarà admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Il numero massimo di elementi restituiti in una singola risposta, il valore " "era 'infinite' oppure un numero intero negativo che indica nessun limite" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Il tipo di rete da utilizzare quando si crea la rete HA per un router HA. " "Per impostazione predefinita o se vuoto, è utilizzato il primo " "'tenant_network_types'. Ciò è utile quando il traffico VRRP deve utilizzare " "una rete specifica che non è quella predefinita." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Il numero di secondi in l'agent attenderà tra i polling per le modifiche " "dell'unità locale." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Il numero di secondi di attesa prima di generare nuovamente il monitor ovsdb " "dopo la perdita di comunicazione." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Il numero di sort_keys e sort_dirs deve essere uguale" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Il percorso per le estensioni API. Può essere un elenco di percorsi separato " "dai due punti. Ad esempio: api_extensions_path = extensions:/path/to/more/" "exts:/even/more/exts. Il __percorso__ di neutron.extensions è aggiunto a " "tale percorso, per cui, se le estensioni si trovano nel percorso non è " "necessario specificarle." msgid "The physical network name with which the HA network can be created." msgstr "Il nome della rete fisica con cui può essere creata la rete HA." #, python-format msgid "The port '%s' was deleted" msgstr "La porta '%s' è stata eliminata" msgid "The port to bind to" msgstr "La porta a cui collegarsi" #, python-format msgid "The requested content type %s is invalid." msgstr "Il tipo di contenuto richiesto %s non è valido." msgid "The resource could not be found." msgstr "Impossibile trovare la risorsa." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "Il router %(router_id)s è stato già ospitato dall'agent L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "Il server è in errore o non è capace di eseguire l'operazione richiesta." msgid "The service plugins Neutron will use" msgstr "Il plugin del servizio che Neutron utilizzerà" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "" "La richiesta della sottorete non può essere soddisfatta perché: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "Il sottoprogetto su cui eseguire il comando. Può essere uno di: '%s'." msgid "The type of authentication to use" msgstr "Il tipo di autenticazione da utilizzare" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Sono presenti router collegati a questa rete che dipendono da questa " "politica per l'accesso." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True per eliminare tutte le porte su tutti i bridge OpenvSwitch. False per " "eliminare le porte create da Neutron nell'integrazione e i bridge di reti " "esterne." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valore IP tunnel IP richiesto dal plugin ML2" msgid "Tunnel bridge to use." msgstr "Bridge del tunnel da utilizzare." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Tipo di endpoint nova da utilizzare. Questo endpoint verrà ricercato nel " "catalogo keystone e deve essere public, internal o admin." msgid "URL for connecting to designate" msgstr "URL per la connessione da designare" msgid "URL to database" msgstr "URL per il database" #, python-format msgid "Unable to access %s" msgstr "Impossibile accedere a %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "Impossibile calcolare l'indirizzo %(address_type)s a causa di: %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "Impossibile convertire il valore in %s" msgid "Unable to create the SNAT Interface Port" msgstr "Impossibile creare la porta dell'interfaccia SNAT" #, python-format msgid "Unable to determine mac address for %s" msgstr "Impossibile determinare l'indirizzo mac per %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Impossibile trovare '%s' nel corpo della richiesta" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Impossibile trovare l'indirizzo IP %(ip_address)s nella sottorete " "%(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Impossibile trovare il nome risorsa in %s" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Impossibile identificare un campo di destinazione da:%s. La corrispondenza " "deve essere presente nel modulo %%()s" msgid "Unable to provide external connectivity" msgstr "Impossibile fornire la connettività esterna" msgid "Unable to provide tenant private network" msgstr "Impossibile fornire la rete privata del tenant" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Impossibile verificare la corrispondenza:%(match)s come risorsa parent: " "%(res)s non è stata trovata" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Etichetta imprevista per lo script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Numero non previsto di punti di ramo alembic: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Imprevisto codice di risposta: %s" #, python-format msgid "Unexpected response: %s" msgstr "Risposta imprevista: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Il nome unità '%(unit)s' non è valido." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo di indirizzo sconosciuto %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Attributo sconosciuto '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Catena sconosciuta: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo di rete %(network_type)s sconosciuto." msgid "Unmapped error" msgstr "Errore non associato" msgid "Unrecognized action" msgstr "Azione non riconosciuta" msgid "Unrecognized field" msgstr "Campo non riconosciuto" msgid "Unsupported Content-Type" msgstr "Tipo-contenuto non supportato" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo di rete non supportato %(net_type)s." msgid "Unsupported request type" msgstr "Tipo di richiesta non supportato" msgid "Updating default security group not allowed." msgstr "L'aggiornamento del gruppo di sicurezza predefinito non è consentito." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "utilizzare il driver del meccanismo ML2 l2population per conoscere MAC e IP " "remoti e migliorare la scalabilità del tunnel." msgid "Use broadcast in DHCP replies." msgstr "Utilizzare broadcast nelle risposte DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Utilizzare --revisione delta o relativa, non entrambe" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilizzare ipset per velocizzare i gruppi di sicurezza basati su iptable. " "L'abilitazione del supporto ipset richiede che ipset sia installato sul nodo " "dell'agent L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Utilizzare il root helper per visualizzare gli spazi dei nomi in un sistema " "operativo. Ciò potrebbe non essere richiesto in base alla configurazione di " "sicurezza. Se il root helper non è richiesto, impostare su False per un " "miglioramento delle prestazioni." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Utilizzare veths invece delle porte patch per interconnettere il bridge di " "integrazione alle reti fisiche. Supporta kernel senza supporto per porta " "patch Open vSwitch se impostato su True." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Utente (uid o nome) che esegue il proxy di metadati dopo la relativa " "inizializzazione (se vuoto: utente operativo dell'agent)." msgid "User (uid or name) running this process after its initialization" msgstr "" "Utente (uid o name) che esegue questo processo dopo la relativa " "inizializzazione" msgid "Username for connecting to designate in admin context" msgstr "Nome utente per la connessione da designare nel contesto admin" msgid "VRRP authentication password" msgstr "Password di autenticazione VRRP" msgid "VRRP authentication type" msgstr "Tipo di autenticazione VRRP" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Il valore della velocità di tick (hz) del kernel host per il calcolo del " "valore burst minimo nelle regole del limite di larghezza di banda per una " "porta con QoS. Vedere il file di configurazione kernel per il valore HZ e il " "manuale tc-tbf per ulteriori informazioni." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Il valore di latenza (ms) per il calcolo della dimensione della coda per una " "porta con QoS. Per ulteriori informazioni, vedere il manuale tc-tbf." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Quando si trasferiscono richieste di metadati, Neutron firma l'intestazione " "Instance-ID con un segreto condiviso per evitare lo spoofing. È possibile " "selezionare una qualsiasi stringa per un segreto ma deve corrispondere qui e " "nella configurazione utilizzata da Nova Metadata Server. NOTA: Nova utilizza " "la stessa chiave di configurazione, ma nella sezione [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Dove memorizzare i file di stato Neutron. Questa directory deve essere " "scrivibile dall'agent." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Con IPv6, non è necessario che la rete utilizzata per il gateway esterno " "disponga di una sottorete associata, poiché verrà utilizzato il LLA (link-" "local address) assegnato automaticamente. Tuttavia, è necessario un " "indirizzo gateway IPv6 per l'utilizzo come successivo hop per " "l'instradamento predefinito. Se qui non è configuratonessun indirizzo " "gateway Ipv6 (e solo poi) verrà configurato il router Neutron per ottenere " "il relativo instradamento predefinito da RA (Router Advertisement) dal " "router upstream; in tal caso il router upstream deve essere anche " "configuratoper inviare questi RA. Ipv6_gateway, quando configurato, " "deveessere il LLA dell'interfaccia sul router upstream. Se si desidera un " "hop successivo che utilizzi un GUA (Global Uunique Address) è necessario " "ottenerlo mediante una sottorete assegnata alla rete e non attraverso questo " "parametro." msgid "You must implement __call__" msgstr "È necessario implementare __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "È necessario fornire un file di configurazione per il bridge - --config-file " "o env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "È necessario fornire una revisione o delta relativo" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "un pool di sottorete deve essere specificato in assenza di un cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port non può essere richiamato all'interno di una transazione." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools consentita solo per specifiche richieste della sottorete." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools non presenti nella sottorete" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools utilizzano la versione IP errata" msgid "already a synthetic attribute" msgstr "è già presente un attributo synthetic" msgid "binding:profile value too large" msgstr "valore binding:profile troppo esteso" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "impossibile esegure %(event)s a causa di %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "non devono essere forniti insieme cidr e prefixlen" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain non può essere specificato senza un dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name non può essere specificato senza un dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "Impossibile specificare un fixed_ip_address senza un porta_id" #, python-format msgid "has device owner %s" msgstr "ha il proprietario del dispositivo %s" msgid "in use" msgstr "in uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "comando ip non riuscito sul dispositivo %(dev_name)s: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "La funzione ip link %(capability)s non è supportata" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Il comando ip link non è supportato: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "è necessario specificare ip_version in assenza di cidr e subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode non è valida quando ip_version è 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode non è valida quando ip_version è 4" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode impostato su '%(ra_mode)s' con ipv6_address_mode impostato su " "'%(addr_mode)s' non è valido. Se sono impostati entrambi gli attributi, essi " "devono avere lo stesso valore" msgid "mac address update" msgstr "aggiornamento indirizzo mac" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "è necessario fornire esattamente 2 argomenti - cidr e MAC" msgid "network_type required" msgstr "network_type obbligatorio" #, python-format msgid "network_type value '%s' not supported" msgstr "Valore network_type '%s' non supportato" msgid "new subnet" msgstr "nuova sottorete" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' sconosciuta per rete flat del provider" msgid "physical_network required for flat provider network" msgstr "physical_network richiesta per rete flat del provider" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network specificata per la rete %s" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval deve essere >= 0 se fornito." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fuori dall'intervallo (da %(min)s a %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id richiede physical_network per la rete del provider VLAN" msgid "shared attribute switching to synthetic" msgstr "passaggio dell'attributo condiviso su synthetic" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Il pool di sottorete %(subnetpool_id)s non può essere aggiornato quando " "associato all'ambito indirizzo condiviso %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "subnetpool_id e use_default_subnetpool non possono essere entrambi " "specificati" msgid "the nexthop is not connected with router" msgstr "l'hop successivo non è connesso al router" msgid "the nexthop is used by router" msgstr "l'hop successivo è utilizzato dal router" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/ja/0000755000175000017500000000000000000000000020466 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/ja/LC_MESSAGES/0000755000175000017500000000000000000000000022253 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/ja/LC_MESSAGES/neutron.po0000644000175000017500000032460000000000000024312 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Akihiro Motoki , 2013 # Akihiro Motoki , 2013 # Sasuke(Kyohei MORIYAMA) <>, 2015 # NachiUeno , 2013 # NachiUeno , 2013 # Sasuke(Kyohei MORIYAMA) <>, 2015 # Tomoyuki KATO , 2013 # Akihiro Motoki , 2016. #zanata # Andreas Jaeger , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-16 09:21+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "コマンド: %(cmd)s\n" "終了コード: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s の HEAD ファイルが予期される移行のタイムラインヘッドと合致しませ" "ん: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s は sort_dirs には無効な値です。有効な値は '%(asc)s' および " "'%(desc)s' です" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s は %(tunnel)s プロバイダーネットワークで禁止されています" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' が ip_version '%(ip_version)s' と一致しません" #, python-format msgid "%s cannot be called while in offline mode" msgstr "オフラインモードでは、%s を呼び出せません" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%sは、sort_keys には無効な属性です" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s は get_port_from_device または get_ports_from_devices を実装していなければ" "なりません。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s は VLAN プロバイダーネットワークで禁止されています" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s は flat プロバイダーネットワークで禁止されています" #, python-format msgid "%s prohibited for local provider network" msgstr "%s は local プロバイダーネットワークで禁止されています" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' は RBAC の有効なオブジェクトタイプではありません" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' はフィルタリングではサポートされません" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' オブジェクトに属性 '%s' がありません" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' が 'port_min' よりも小さくなっています" msgid "0 is not allowed as CIDR prefix length" msgstr "0 は CIDR プレフィックス長として許可されていません" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "サブネットプールがない場合、cidr の指定は必須です" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "RFC3315 DUID-EN が必要とするベンダーが登録した私企業番号の 10 進数値。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "デフォルトの外部ネットワークが既に存在します: %(net_id)s" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "この IP ファミリーに対するデフォルトのサブネットプールが既に設定されていま" "す。IP ファミリーごとにデフォルトは 1 つしか設定できません。" msgid "A metering driver must be specified" msgstr "計測ドライバーを指定する必要があります" msgid "Access to this resource was denied." msgstr "このリソースへのアクセスは拒否されました。" msgid "Action to be executed when a child process dies" msgstr "子プロセスが異常終了したときに実行されるアクション" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "iptables ルールにコメントを追加します。この値を False に設定すると、生成され" "る iptalbes ルールにルールの目的を説明するコメントを追加しなくなります。シス" "テムでは、コメントの追加用に iptables コメントモジュールがサポートされている" "必要があります。" msgid "Address not present on interface" msgstr "インターフェース上に存在しないアドレス" msgid "Adds test attributes to core resources." msgstr "テスト属性をコアリソースに追加します。" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "" "エージェント %(id)s は、L3 エージェントでないか、使用不可になっています" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "enable_new_agents=False の場合、エージェントは admin_state_up=False の状態で" "処理を開始します。この場合、 管理者が admin_state_up を True に変更するまで、" "ユーザーのリソースが自動的にエージェントにスケジュール設定されることはありま" "せん。" #, python-format msgid "Agent updated: %(payload)s" msgstr "エージェントが更新されました: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "DHCP エージェントに対するネットワークの自動スケジューリングを許可" msgid "Allow auto scheduling of routers to L3 agent." msgstr "" "L3 エージェントに対するルーターの自動スケジューリングを許可してください。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Neutron で重複する IP のサポートを許容します。注意: Nova のセキュリティーグ" "ループとともに Neutron を使用する場合は、以下のパラメーターを必ず False に設" "定する必要があります。" msgid "Allow running metadata proxy." msgstr "メタデータプロキシーの実行を許可します。" msgid "Allow sending resource operation notification to DHCP agent" msgstr "DHCP エージェントへのリソース操作通知の送信の許可" msgid "Allow the creation of PTR records" msgstr "PTR レコードの作成の許可" msgid "Allow the usage of the bulk API" msgstr "Bulk API の使用を許可" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Nova メタデータに対する非セキュアな SSL (https) 要求を実行することを許可しま" "す" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "CIDR が 169.254.169.254/16 (またはこれより大きなプレフィックス) で VM が " "metadata:1 要求を送信する Neuron ルーターに接続している、特定のメタデータアク" "セスネットワークから出されるメタデータ要求に対応します。この場合、DHCP オプ" "ション 121 はルーターを経由して 169.254.169.254 に到達できるため、VM に挿入さ" "れません。このオプションを設定するには enable_isolated_metadata = True と設定" "する必要があります。" msgid "An RBAC policy already exists with those values." msgstr "これらの値に関して RBAC ポリシーが既に存在します。" msgid "An identifier must be specified when updating a subnet" msgstr "サブネットを更新する際には ID を指定する必要があります" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "neutron.ml2.extension_drivers の名前空間からロードされる拡張ドライバーのエン" "トリーポイントを一定の順序に並べたリスト。例: extension_drivers = " "port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "neutron.ml2.mechanism_drivers 名前空間からロードされるネットワーキングメカニ" "ズムドライバーのエンドポイントの順序付きリスト。" msgid "An unknown error has occurred. Please try your request again." msgstr "不明なエラーが発生しました。要求を再試行してください。" msgid "Async process didn't respawn" msgstr "非同期プロセスが再生成されませんでした" msgid "Authorization URL for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用認証 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "ネットワークをオフライン DHCP エージェントから自動的に削除します。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "ルーターのスケジュールをオフライン L3 エージェントからオンライン L3 エージェ" "ントに自動的に変更します。" msgid "Availability zone of this node" msgstr "このノードのアベイラビリティーゾーン" msgid "Available commands" msgstr "使用可能なコマンド" #, python-format msgid "Base MAC: %s" msgstr "ベース MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "dnsmasq のログを保存する基本となるログディレクトリー。ログには DHCP と DNS の" "ログ情報が含まれ、DHCP または DNS のデバッグを行うために役立ちます。このセク" "ションに何の値も設定しない場合は、dnsmasq ログを無効化します。" msgid "Body contains invalid data" msgstr "本文に無効なデータが含まれています" msgid "Bulk operation not supported" msgstr "バルク操作はサポートされていません" msgid "CIDR to monitor" msgstr "モニター対象の CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "%(resource_type)s のコールバックが見つかりません" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s のコールバックが間違ったリソースタイプを返しました" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "固定の IPv4 アドレスを持たないポート %s に Floating IP を追加することはできま" "せん" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "%(resource_type)s に複数のコールバックを追加することはできません" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "IPv%(pool_ver)s サブネットプールから IPv%(req_ver)s サブネットを割り当てるこ" "とはできません" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "要求されたサブネットを使用可能なプレフィックスのセットから割り振ることができ" "ません" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "ipv6 属性が設定された状態で enable_dhcp を無効にすることはできません" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "タイプ %(subnet_type)s のサブネットを処理できません" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "ルーターポートには複数の IPv4 サブネットは設定できません" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "同じネットワーク ID を持つ複数のルーターポートのいずれにも IPv6 サブネットが" "含まれる場合、これらのポートは使用できません。既存のポート %(p)s には IPv6 サ" "ブネットがあり、ネットワーク ID は %(nid)s です" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "レガシーの L3 エージェント %(agent_id)s で分散ルーター %(router_id)s をホスト" "できません。" msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id と port-id の両方を指定することはできません" msgid "Cannot understand JSON" msgstr "JSON を解釈できません" #, python-format msgid "Cannot update read-only attribute %s" msgstr "読み取り専用属性 %s を更新できません" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "ssl の認証局公開鍵 (CA cert) ファイル" msgid "Check ebtables installation" msgstr "ebtables のインストールの検査" msgid "Check for ARP header match support" msgstr "ARP ヘッダーマッチのサポートの検査" msgid "Check for ARP responder support" msgstr "ARP 応答側サポートの検査" msgid "Check for ICMPv6 header match support" msgstr "ICMPv6 ヘッダーマッチのサポートの検査" msgid "Check for OVS Geneve support" msgstr "OVS Geneve サポートの検査" msgid "Check for OVS vxlan support" msgstr "OVS vxlan サポートの検査" msgid "Check for VF management support" msgstr "VF 管理サポートの検査" msgid "Check for iproute2 vxlan support" msgstr "iproute2 vxlan サポートの検査" msgid "Check for nova notification support" msgstr "Nova 通知サポートの検査" msgid "Check for patch port support" msgstr "パッチポートのサポートの検査" msgid "Check ip6tables installation" msgstr "ip6tables のインストールの検査" msgid "Check ipset installation" msgstr "ipset のインストールの検査" msgid "Check keepalived IPv6 support" msgstr "Keepalived の IPv6 サポートの検査" msgid "Check minimal dibbler version" msgstr "Dibbler の最小バージョンの検査" msgid "Check minimal dnsmasq version" msgstr "最小 dnsmasq バージョンの検査" msgid "Check netns permission settings" msgstr "netns 許可設定の検査" msgid "Check ovs conntrack support" msgstr "ovs の conntrack サポートの検査" msgid "Check ovsdb native interface support" msgstr "ovsdb ネイティブインターフェースのサポートの検査" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "サブネット %(subnet_id)s の CIDR %(subnet_cidr)s がサブネット %(sub_id)s の " "CIDR %(cidr)s とオーバーラップしています" msgid "Cleanup resources of a specific agent type only." msgstr "特定のエージェントタイプのみのリソースをクリーンアップします。" msgid "Client certificate for nova metadata api server." msgstr "Nova メタデータ API サーバー用のクライアント証明書。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "コンマで区切られた のリスト: タプルは、仮想" "ネットワーキングに使用してはならない仮想機能のエージェントのノード固有のリス" "トに network_device をマッピングします。vfs_to_exclude は、セミコロンで区切ら" "れた network_device から除外される仮想機能のリストです。マッピングに含まれる " "network_device は、physical_device_mappings リストに表示されます。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "コンマで区切られた のリスト: 物理ネットワーク名をエージェ" "ントのノード固有の物理ネットワークのデバイスインターフェース (SR-IOV の物理機" "能を持つ) にマッピングする タプルは、VLAN ネットワークで使用" "されます。サーバー上の network_vlan_ranges にリストされるすべての物理ネット" "ワークは、各エージェントの適切なインターフェースにマッピングされる必要があり" "ます。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "コンマで区切られた のリスト: 物理ネットワーク名をエージェ" "ントのノード固有の物理ネットワークのインターフェースにマッピングする " " タプルは、フラットネットワークと VLAN ネットワークで使用" "されます。サーバー上の network_vlan_ranges にリストされるすべての物理ネット" "ワークは、各エージェントの適切なインターフェースとマッピングされる必要があり" "ます。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な GRE トンネル ID の範囲を列挙する " ": タプルのコンマ区切りリスト" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な Geneve VNI ID の範囲をエミュレート" "する、コンマで区切った : タプルのリスト。" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "テナントネットワークの割り当てに使用可能な VXLAN VNI ID の範囲を列挙する " ": タプルのコンマ区切りリスト" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "フォワーダーとして使用される DNS サーバーのカンマ区切りのリスト。" msgid "Command to execute" msgstr "実行するコマンド" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "インターフェースドライバーの構成ファイル (l3_agent.ini を使用することもできま" "す)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s とイーサネットタイプ値 %(ethertype)s が競合しています" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Neutron セキュリティーグループ API をサーバーで有効化するかどうかを制御しま" "す。セキュリティーグループを使用しない場合、または Nova セキュリティーグルー" "プ API を使用する場合には、False にする必要があります。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "%(time)d 秒間試行しましたが %(host)s:%(port)s にバインドできませんでした" msgid "Could not deserialize data" msgstr "シリアライズされたデータを復元することができませんでした" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP リース期間 (秒)。dnsmasq に無制限のリース時間の使用を指示するには、-1 を" "使用します。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN、GRE、Geneve のアンダーレイの DVR 実装環境では、エージェント側とサー" "バー側で L2-pop を有効化する必要があります。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "オフラインで移行を行う際にスクリプトが生成されるデータベースエンジン。" msgid "Default external networks must be shared to everyone." msgstr "デフォルトの外部ネットワークは全メンバーに共有する必要があります" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "プロバイダーの属性が指定されない場合の外部ネットワーク用のデフォルトのネット" "ワークタイプ。デフォルト値は None です。これは、外部ネットワークの作成時にプ" "ロバイダーの属性が指定されない場合に、テナントネットワークと同じネットワーク" "タイプを使用することを意味します。external_network_type の設定オプションとし" "て許容される値は、type_drivers の設定オプションで設定されたネットワークタイプ" "値によって決まります。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "テナントごとの RBAC 項目のデフォルト数。負の値がある場合、制限が設定されてい" "ないことを指します。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるリソースのデフォルト数。負の値は無制限を意味しま" "す。" msgid "Default security group" msgstr "デフォルトセキュリティグループ" msgid "Default security group already exists." msgstr "デフォルトのセキュリティーグループが既に存在します。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "アベイラビリティーゾーンのヒントのデフォルト値。リソースの " "availability_zone_hints が空の場合、アベイラビリティーゾーンを参照するスケ" "ジューラーがこの値を使用します。コンマで区切られた文字列によって複数のアベイ" "ラビリティーゾーンを指定できます。この値は空である場合があります。その場合、" "リソースの availability_zone_hints が空であっても、リソースのスケジューリング" "を行う際に、高可用性を実現するようアベイラビリティーゾーンの検討が行われま" "す。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "enable_snat のデフォルト値が external_gateway_info で提供されていない場合は、" "定義してください。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "次のフォーマットを使用して拡張サービスのプロバイダーが定義されます: " "::[:default]" msgid "Delete the namespace by removing all devices." msgstr "すべてのデバイスを削除して、名前空間を削除します。" #, python-format msgid "Deleting port %s" msgstr "ポート %s を削除しています" #, python-format msgid "Deployment error: %(reason)s." msgstr "デプロイメントエラー: %(reason)s" msgid "Destroy IPsets even if there is an iptables reference." msgstr "iptables の参照がある場合でも IPset を破棄します。" msgid "Destroy all IPsets." msgstr "すべての IPset を破棄します。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "マッピング: %(mapping)s 内のデバイス %(dev_name)s が一意ではありません" msgid "Device not found" msgstr "デバイスが見つかりません" msgid "Domain to use for building the hostnames" msgstr "ホスト名の作成に使用するドメイン" msgid "Downgrade no longer supported" msgstr "ダウングレードは現在ではサポートされていません" #, python-format msgid "Driver %s is not unique across providers" msgstr "ドライバー %s はプロバイダー全体で固有ではありません" msgid "Driver for external DNS integration." msgstr "外部 DNS の連携のためのドライバー。" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 エージェントのセキュリティーグループのファイアウォールのドライバー" msgid "Driver to use for scheduling network to DHCP agent" msgstr "" "DHCP エージェントに対するネットワークのスケジューリングに使用するドライバー" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "デフォルトの L3 エージェントに対するルーターのスケジューリングに使用するドラ" "イバー" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "ipv6 のプレフィックスデリゲーションを行うためのドライバー。neutron.agent." "linux.pd_drivers の名前空間で定義したエントリーポイントである必要があります。" "neutron のソースに含まれるエントリーポイントについては、setup.cfg を参照して" "ください。" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "ルーター %(router)s に関して、重複する L3HARouterAgentPortBinding が作成され" "ます。データベースはアップグレードできません。データベースをアップグレードす" "る前にすべての重複を削除してください。" msgid "Duplicate Security Group Rule in POST." msgstr "POST に重複するセキュリティーグループルールがあります。" msgid "Duplicate address detected" msgstr "検出された重複アドレス" #, python-format msgid "ERROR: %s" msgstr "エラー: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "エラー: デフォルトの検索パス (~/.neutron/, ~/, /etc/neutron/, /etc/) および " "'--config-file' オプションを使用して、構成ファイルが見つかりません。" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "パラメーター network_id または router_id のいずれかを _get_ports メソッドに渡" "す必要があります。" msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id または port_id のいずれかを指定する必要があります" msgid "Enable HA mode for virtual routers." msgstr "仮想ルーターのために HA モードを有効化します。" msgid "Enable SSL on the API server" msgstr "API サーバー上で SSL を有効にします" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "エージェントで VXLAN を有効にしてください。linuxbridge メカニズムドライバーを" "使用してエージェントが ml2 プラグインによって管理されているときに、VXLAN を有" "効にできます" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "admin_state_up が False のエージェントでサービスを有効化します。このオプショ" "ンが False の場合、エージェントの admin_state_up が False に変更されると、そ" "のエージェントでのサービスは無効化されます。admin_state_up が False のエー" "ジェントは、このオプションとは無関係に、自動スケジューリング用には選択されま" "せん。ただし、このオプションが True の場合、このようなエージェントに対しては" "手動スケジューリングが使用できます。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "自動的にサブネットの CIDR を割り当てるために IPv6 のプレフィックスデリゲー" "ションを有効化します。True に設定した場合、PD に対応した環境でのサブネットの" "割り当てのために、IPv6 のプレフィックスデリゲーションを有効化します。CIDR ま" "たはサブネットプールの ID を指定せずに IPv6 のサブネットの作成要求を行うユー" "ザーには、プレフィックスデリゲーションのメカニズム経由で CIDR が提供されま" "す。PD を有効化するとデフォルトの IPv6 サブネットプールの挙動がオーバーライド" "されることに注意してください。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "dnsmasq サービスを有効化すると、DHCP エージェントを実行するホスト上で DNS リ" "ゾルバー経由でインスタンスの名前解決を行うことができます。dnsmasq プロセスの" "引数から '--no-resolv' オプションを効果的に削除します。'dnsmasq_dns_servers' " "オプションにカスタムの DNS リゾルバーを追加すると、この機能を無効化できます。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "操作の試行中に発生したエラー %(reason)s。" #, python-format msgid "Error parsing dns address %s" msgstr "DNS アドレス %s の解析中にエラーが発生しました" #, python-format msgid "Error while reading %s" msgstr "%s の読み取り中にエラーが発生しました" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "アドレスが一時的な状態を終了するまでの待機時間の上限の %s 秒を超過しました。" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" "既存のプレフィックスは新規プレフィックスのサブセットでなければなりません" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "終了コード: %(returncode)d、Stdin: %(stdin)s、Stdout: %(stdout)s、Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "拡張 %(driver)s が失敗しました。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "サービスプラグイン %(service_plugin)s に必要な拡張ドライバー %(driver)s が見" "つかりませんでした。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "ml2 プラグインの l2population メカニズムドライバーとともに使用する拡張機能。" "これにより、このプラグインは VXLAN 転送テーブルにデータを追加できるようになり" "ます。" #, python-format msgid "Extension with alias %s does not exist" msgstr "エイリアス %s を持つ拡張は存在しません" msgid "Extensions list to use" msgstr "使用する拡張機能リスト" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s はゲートウェイ IP と同一です" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "ルーター %(router_id)s のスケジュール変更に失敗しました: 適格な L3 エージェン" "トが見つかりません。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "L3 エージェント %(agent_id)s に対するルーター %(router_id)s のスケジューリン" "グに失敗しました。" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "fixed_ips が無効なサブネット %(subnet_id)s に含まれていたため、ネットワーク " "%(network_id)s でポートを作成できませんでした" #, python-format msgid "Failed to locate source for %s." msgstr "%s のソースの特定に失敗しました。" msgid "Failed to remove supplemental groups" msgstr "補足グループの削除に失敗しました" #, python-format msgid "Failed to set gid %s" msgstr "gid %s の設定に失敗しました。" #, python-format msgid "Failed to set uid %s" msgstr "uid %s の設定に失敗しました" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(ip)s に対する %(type)s トンネルポートをセットアップできませんでした" msgid "Failure applying iptables rules" msgstr "iptables ルール適用の失敗" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "アドレス %(address)s の準備ができるまでの待機の失敗: %(reason)s" msgid "Flat provider networks are disabled" msgstr "flat プロバイダーネットワークが無効化されています" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "TCP/UDP プロトコルの場合、port_range_min は port_range_max 以下でなければなり" "ません" msgid "Force ip_lib calls to use the root helper" msgstr "ip_lib 呼び出しでルートヘルパーを強制的に使用します" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "ゲートウェイの IP バージョンは割り当てプールのバージョンと一致する必要があり" "ます" msgid "Gateway is not valid on subnet" msgstr "ゲートウェイがサブネット上で無効です" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "初期化後にメタデータプロキシーを実行しているグループ (gid または名前) (空の場" "合: エージェント有効グループ)。" msgid "Group (gid or name) running this process after its initialization" msgstr "初期化後にこのプロセスを実行するグループ (gid または名前)" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "このマシン上で稼働する Neutron のサーバー、エージェント、サービスが使用するホ" "スト名。このマシン上で稼働するすべてのエージェントとサービスは同じホスト値を" "使用する必要があります。" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP コード (port-range-max) %(value)s が指定されましたが、ICMP タイプ (port-" "range-min) がありません。" msgid "ID of network" msgstr "ネットワークの ID" msgid "ID of network to probe" msgstr "プローブするネットワークの ID" msgid "ID of probe port to delete" msgstr "削除するプローブポートの ID" msgid "ID of probe port to execute command" msgstr "コマンドを実行するプローブ ポートの ID" msgid "ID of the router" msgstr "ルーターの ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "" "IP アドレス %(ip)s が既にサブネット %(subnet_id)s に割り当てられています" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP アドレス %(ip)s がサブネット %(subnet_id)s に属していません" msgid "IP allocation failed. Try again later." msgstr "IP の割り当てが失敗しました。後で再び割り当ててください。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP を割り当てるには subnet_id または ip_address が必要です" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply が、次の一連の iptables 規則の適用に失敗しました: \n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "プレフィックスデリゲーションを行うには、IPv6 アドレスモードは SLAAC または " "Stateless である必要があります。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "プレフィックスデリゲーションを行うには、IPv6 RA モードは SLAAC または " "Stateless である必要があります。" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "サブネット %(subnet_id)s は自動アドレス用に設定されているため、IPv6 アドレス " "%(ip)s をこのサブネット上のポートに直接割り当てることはできません" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "外部ルーターから RA を受け取るように構成された IPv6 サブネット %s をNeutron " "ルーターに追加することはできません。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "True の場合、IPAM ドライバーをサポートするプラグインに VLAN トランスペアレン" "トネットワークの作成を許可します。" msgid "Illegal IP version number" msgstr "IP バージョン番号が正しくありません" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Neutron ルーターが存在せず、メタデータ IP を提供できない場合に、DHCP サーバー" "を使用してこの情報を提供することができます。この値を設定すると、DHCP サーバー" "は DHCP 要求に対して特定のホストの経路を追加します。このオプションを設定する" "と、すべてのネットワークに対してメタデータサービスが有効化されます。" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "この L3 エージェントが 外部ネットワーク用のゲートウェイを設定していなルーター" "にも対応する必要があることを示しています。すべてのルーターが外部ネットワーク" "用のゲートウェイを持つ必要がある場合、Neutron の実装環境の 1 つのエージェント" "に対してのみ、このオプションを True に設定する必要があり、すべてのエージェン" "トに対しては通常、このオプションを False を設定します。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "クラス %(module)s のインスタンス。%(class)s は _cache 属性を含む必要がありま" "す" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "サブネットサイズ /%s を割り振るためのプレフィックス空間が不十分です" msgid "Insufficient rights for removing default security group." msgstr "デフォルトのセキュリティーグループを削除するための権限が不十分です。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "使用する統合ブリッジ。適切な理由がない限り、このパラメーターを変更しないでく" "ださい。これは OVS の統合ブリッジの名前となります。ハイパーバイザーごとに 1 " "つのブリッジが存在します。この統合ブリッジは仮想の「パッチベイ」として機能し" "ます。すべての VM の VIF はこのブリッジに接続し、ネットワーク接続に基づいて" "パッチが適用されます。" msgid "Interface to monitor" msgstr "モニター対象のインターフェース" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "子プロセスの動作状況を確認する間隔 (秒)、無効にするには 0 を指定します" msgid "Interval between two metering measures" msgstr "2 つの計測間の間隔" msgid "Interval between two metering reports" msgstr "2 つの計測レポート間の間隔" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "無効なデバイス %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "オブジェクトタイプ '%(object_type)s' の無効なアクション '%(action)s' 。有効な" "アクション: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "認証タイプ %(auth_type)s は無効です。有効なタイプは %(valid_auth_types)s です" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "プロトコル %(protocol)s に関する無効なイーサタイプ %(ethertype)s。" #, python-format msgid "Invalid format: %s" msgstr "無効な形式: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "インスタンス状態 %(state)s は無効です。有効な状態は %(valid_states)s です" #, python-format msgid "Invalid mapping: '%s'" msgstr "無効なマッピング: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "無効な PCI スロット %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "プロバイダーの指定形式が無効です。最後の部分は 'default' または空にしてくださ" "い: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "無効なリソースタイプ %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "無効な経路: %s" msgid "Invalid service provider format" msgstr "サービスプロバイダーの指定形式が無効です" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) %(value)s に無効な値です。これは 0 から 255 までで" "なければなりません。" #, python-format msgid "Invalid value for port %(port)s" msgstr "ポート %(port)s の無効値" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables は外部ネットワークからの進入を示すために使用されるマークを分割しま" "す。このマークを 0xffff でマスキングすることで、後半の 16 ビットのみを使用し" "ます。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Iptables はメタデータの適切な要求を示すために使用されるマークを分割します。こ" "のマークを 0xffff でマスキングすることで、後半の 16 ビットのみを使用します。" msgid "Keepalived didn't respawn" msgstr "keepalived が再作成されませんでした" msgid "Keepalived didn't spawn" msgstr "Keepalived が生成されませんでした" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "カーネル HZ の値 %(value)s が無効です。0 より大きい値にしなければなりません。" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 エージェントによる Floating IP への NAT のセットアップの失敗" msgid "L3 agent failure to setup floating IPs" msgstr "L3 エージェントによる Floating IP のセットアップの失敗" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Denial-of-Service を防ぐためにリースの数を制限してください。" msgid "List of :" msgstr " のリスト: " msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" ":: または のリスト。" "このリストには、VLAN プロバイダー/テナントネットワークに使用できる " "physical_network 名が指定されるだけでなく、テナントネットワークに割り振ること" "ができる各物理ネットワークの VLAN タグの範囲も指定されます。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "neutron.ml2.type_drivers 名前空間からロードするネットワークタイプドライバーの" "エントリーポイントのリスト。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "フラットネットワークの作成が可能な physical_network 名のリスト。デフォルト値" "の '*' を使用すると、任意の physical_network 名を持つフラットネットワークを作" "成できます。空のリストを使用すると、フラットネットワークが無効化されます。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "メタデータプロキシー UNIX ドメインソケットのロケーション。" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "メタデータプロキシーの UNIX ドメインソケットの場所" msgid "Location to store DHCP server config files." msgstr "DHCP サーバーの構成ファイルを保存するロケーション。" msgid "Location to store IPv6 PD files." msgstr "IPv6 PD ファイルを保存するロケーション。" msgid "Location to store IPv6 RA config files" msgstr "IPv6 RA 設定ファイルを保存する場所" msgid "Location to store child pid files" msgstr "子プロセスの PID ファイルを保持する場所" msgid "Log agent heartbeats" msgstr "エージェントのハートビートを記録します" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "基盤となる物理ネットワークの MTU。Neutron はこの値を使用して、すべての仮想" "ネットワークのコンポーネントの MTU を計算します。フラットネットワークおよび " "VLAN ネットワークの場合は、Neutron はこの値を変更することなく使用します。" "VXLAN のようなオーバーレイネットワークの場合は、Neutron はこの値からオーバー" "レイプロトコルオーバーヘッドの値を自動的に減算します。デフォルト値は " "Ethernet の標準値である 1500 です。" msgid "MTU size of veth interfaces" msgstr "veth インターフェースの MTU サイズ" msgid "Make the l2 agent run in DVR mode." msgstr "L2 エージェントを DVR モードで実行します。" msgid "Malformed request body" msgstr "誤った形式のリクエスト本文" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf の MaxRtrAdvInterval 設定" msgid "Maximum number of DNS nameservers per subnet" msgstr "サブネットごとの DNS ネームサーバーの最大数" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "HA ルーターがスケジュール設定される L3 エージェントの最大数。この値を 0 に設" "定すると、ルーターはすべてのエージェントに対してスケジュール設定されます。" msgid "Maximum number of allowed address pairs" msgstr "許可されたアドレスペアの最大数" msgid "Maximum number of host routes per subnet" msgstr "サブネットごとのホスト経路の最大数" msgid "Maximum number of routes per router" msgstr "ルーターごとに設定可能な経路の最大数" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "メタデータプロキシーの UNIX ドメインのソケットモードでは 4 つの値を使用できま" "す。'deduce' は metadata_proxy_user/group の値からモードを推測します。'user' " "はメタデータプロキシーのソケットモードを 0o644 に設定します " "(metadata_proxy_user がエージェントの有効なユーザーまたはルートである場合に使" "用)。'group' はメタデータプロキシーのソケットモードを 0o664 に設定します " "(metadata_proxy_group がエージェントの有効なグループまたはルートである場合に" "使用)。'all' はメタデータプロキシーのソケットモードを 0o666 に設定します (そ" "の他の場合に使用)。" msgid "Metering driver" msgstr "計測ドライバー" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr " radvd.conf の MinRtrAdvInterval 設定" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "インターフェース変更の検出に関して ovsdb をモニターすることでポーリングが最小" "化されます。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "マッピングにキーがありません: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN のマルチキャストグループ。設定されると、このマルチキャストグループにす" "べてのブロードキャストトラフィックを送信できます。設定されないままにすると、" "マルチキャスト VXLAN モードを無効化します。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "vxlan インターフェースのマルチキャストグループ。CIDR 表記を使用することで、一" "定の範囲のグループアドレスを指定できます。範囲を指定するとさまざまな VNI がさ" "まざまなグループアドレスを使用できるため、トンネルのエンドポイントへの不適切" "なブロードキャストトラフィックを削減または排除できます。使用する可能性のある" "各 (24 ビットの) VNI に独自のグループを予約するには、 /8 (239.0.0.0/8 など) " "を使用します。この設定はすべてのエージェントで同じである必要があります。" #, python-format msgid "Multiple default providers for service %s" msgstr "サービス %s のデフォルトのプロバイダーが複数あります" #, python-format msgid "Multiple providers specified for service %s" msgstr "複数のプロバイダーがサービス %s に対して指定されました" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "バルクセキュリティーグループルールの作成で複数の tenant_id は許可されません" msgid "Must also specify protocol if port range is given." msgstr "" "ポートの範囲が提供されている場合は、プロトコルも指定する必要があります。" msgid "Must specify one or more actions on flow addition or modification" msgstr "フローの追加または変更について、1 つ以上のアクションを指定してください" msgid "Name of Open vSwitch bridge to use" msgstr "使用する Open vSwitch ブリッジの名前" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "使用する nova リージョンの名前。Keystone で複数のリージョンを管理する場合に役" "立ちます。" msgid "Namespace of the router" msgstr "ルーターの名前空間" msgid "Native pagination depend on native sorting" msgstr "ネイティブページ編集はネイティブソートに依存します" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "%(project)s の収縮枝からの移行を適用する必要があります。アップグレードを行う" "前に、すべての Neutron サーバーのインスタンスをシャットダウンする必要がありま" "す。" msgid "Negative delta (downgrade) not supported" msgstr "負のデルタ (ダウングレード) はサポートされていません" msgid "Negative relative revision (downgrade) not supported" msgstr "負の相対的な変更 (ダウングレード) はサポートされていません" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "ネットワーク %s には IPv4 サブネットが含まれません" #, python-format msgid "Network %s is not a valid external network" msgstr "ネットワーク %s は有効な外部ネットワークではありません" #, python-format msgid "Network %s is not an external network" msgstr "ネットワーク %s は外部ネットワークではありません" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "IP 範囲 %(parent_range)s (IP 範囲 %(excluded_ranges)s を除く) からのサイズ " "%(size)s のネットワークが見つかりませんでした。" #, python-format msgid "Network type value '%s' not supported" msgstr "ネットワークタイプ値 '%s' はサポートされていません" msgid "Network type value needed by the ML2 plugin" msgstr "ネットワークタイプ値が ML2 プラグインに必要です" msgid "Neutron core_plugin not configured!" msgstr "Neutron の core_plugin が設定されていません。" msgid "No default router:external network" msgstr "デフォルトの router:external ネットワークがありません" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "IPv%s に関するデフォルトのサブネットプールが見つかりません" msgid "No default subnetpools defined" msgstr "定義されたデフォルトのサブネットプールがありません" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "" "サブネット %(subnet_id)s ではこれ以上使用可能な IP アドレスがありません。" msgid "No offline migrations pending." msgstr "オフラインで実行中の移行はありません。" #, python-format msgid "No shared key in %s fields" msgstr "%s フィールドに共有鍵が存在しません" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "'dvr' モードのエージェントへの手動でのルーター割り当ては許可されません。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "'dvr' モードのエージェントからの手動でのルーター削除は許可されません。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "テナントネットワークをホストするようにスケジュール設定された DHCP エージェン" "トの数。この数が 1 より大きい場合、スケジューラーが自動的に特定のテナントネッ" "トワークに複数の DHCP エージェントを割り当てるため、DHCP サービスの高可用性が" "実現します。" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "メタデータサーバーソケットの構成に使用されるバックログ要求の数" msgid "Number of backlog requests to configure the socket with" msgstr "ソケットに設定するリクエストのバックログ数" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "ネットワークのプレフィックスとみなされる Ipv4 PTR ゾーン内のビット数。バイト" "境界と同じ桁数である必要があります。最小値は 8 で、最大値は 24 です。そのた" "め、使用できる値は 8、16、および 24 です。" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "ネットワークのプレフィックスとみなされる Ipv6 PTR ゾーン内のビット数。nyble " "境界と同じ桁数である必要があります。最小値は 4 で、最大値は 124 です。そのた" "め、使用できる値は 4、 8、12、16、... 124 です。" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "テナント当たりに許可される Floating IP 数。負の値は無制限を意味します。 " msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるネットワーク数。負の値は無制限を意味します。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるポート数。負の値は無制限を意味します。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "テナント当たりに許可されるルーター数。負の値は無制限を意味します。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "送信するイベントがある場合の nova へのイベント送信間の秒数。" msgid "Number of seconds to keep retrying to listen" msgstr "リッスンを試行し続ける秒数" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるセキュリティーグループ数。負の値は無制限を意味しま" "す。 " msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "テナント当たりに許可されるセキュリティールール数。負の値は無制限を意味しま" "す。 " msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "メタデータサーバーの個別のワーカープロセスの数 (デフォルト値は CPU 数の半数)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "テナント当たりに許可されるサブネット数。負の値は無制限を意味します。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "同期プロセス中に使用するスレッドの数。サーバーで設定した接続プールサイズの値" "を超えてはなりません。" msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "使用する OVS データパス。デフォルト値は 'system' であり、これはカーネルのデー" "タパスに該当します。ユーザースペースのデータパスを有効化するには、この値を " "'netdev' に設定します。" msgid "OVS vhost-user socket directory." msgstr "OVS の vhost-user ソケットディレクトリー。" msgid "Only admin can view or configure quota" msgstr "admin のみが割り当て量を表示または構成できます" msgid "Only admin is authorized to access quotas for another tenant" msgstr "別のテナントの割り当て量へのアクセスが許可されているのは管理者のみです" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "自分がオーナーではないオブジェクトのポリシーを操作できるのは管理者に限られま" "す" msgid "Only allowed to update rules for one security profile at a time" msgstr "一度に 1 つのセキュリティープロファイルのルールのみを更新できます" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "remote_ip_prefix または remote_group_id のみを指定できます。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "デバイス %(dev_name)s でサポートされない処理" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "テナントネットワークとして割り当てる network_types を一定の順序に並べたリス" "ト。デフォルト値の 'local' はシングルボックステストに役立つものの、ホスト間の" "接続は提供しません。" msgid "Override the default dnsmasq settings with this file." msgstr "" "このファイルを使用して、デフォルトの dnsmasq 設定をオーバーライドします。" msgid "Owner type of the device: network/compute" msgstr "デバイスの所有者タイプ: network/compute" msgid "POST requests are not supported on this resource." msgstr "POST 要求は、このリソースではサポートされていません。" #, python-format msgid "Package %s not installed" msgstr "パッケージ %s はインストールされていません" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappings の解析に失敗しました: %s。" msgid "Password for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用パスワード" msgid "Path to PID file for this process" msgstr "このプロセスの PID ファイルのパス" msgid "Path to the router directory" msgstr "ルーターディレクトリーのパス" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "トンネルブリッジの統合ブリッジ内のピアパッチポート。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "統合ブリッジのトンネルブリッジ内のピアパッチポート。" msgid "Phase upgrade options do not accept revision specification" msgstr "" "フェーズのアップグレードオプションでは、変更の指定を行うことはできません" msgid "Ping timeout" msgstr "ping タイムアウト" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "ポート %(id)s に Fixed IP %(address)s がありません" #, python-format msgid "Port %s does not exist" msgstr "ポート %s は存在しません" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "ポート %s には複数の固定 IPv4 アドレスがあります。Floating IP を割り当てる際" "には、特定の IPv4 アドレスを提供する必要があります" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "" "プレフィックスデリゲーションが使用できるのは IPv6 のサブネットに限られます。" msgid "Private key of client certificate." msgstr "クライアント証明書の秘密鍵。" #, python-format msgid "Probe %s deleted" msgstr "プローブ %s が削除されました" #, python-format msgid "Probe created : %s " msgstr "作成されたプローブ: %s " msgid "Process is already started" msgstr "プロセスが既に実行されています" msgid "Process is not running." msgstr "プロセスが実行されていません" msgid "Protocol to access nova metadata, http or https" msgstr "Nova メタデータ、http、または https にアクセスするためのプロトコル" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "プロバイダー名 %(name)s の制限は %(len)s 文字までです。" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "" "ID %(id)s を持つタイプ %(object_type)s の RBAC ポリシーが見つかりません" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "他のオブジェクトがこの RBAC ポリシーに依存しているため、オブジェクト " "%(object_id)s に対する RBAC ポリシーを削除できません。\n" "詳細: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "集中状態を緩和するため、定期タスクスケジューラーの開始時に挿入するランダムな" "遅延時間 (秒) の範囲。(無効にするには 0 に設定)" msgid "Ranges must be in the same IP version" msgstr "範囲は同じ IP バージョンである必要があります" msgid "Ranges must be netaddr.IPRange" msgstr "範囲は netaddr.IPRange である必要があります" msgid "Ranges must not overlap" msgstr "範囲は重複することはできません" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "リリースを認識する分岐ラベル (%s) は提供を終了しています。expand@ ラベルと " "contract@ ラベルに変更してください。" msgid "Remote metadata server experienced an internal server error." msgstr "リモートメタデータサーバーで内部サーバーエラーが発生しました。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "リポジトリーに縮小分岐と拡張分岐の HEAD ファイルが含まれていません。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "負荷がエージェントによって報告されているリソースタイプを表します。このタイプ" "には、\"networks\"、\"subnets\"、または \"ports\" があります。指定した場合 " "(デフォルトは networks)、サーバーは、エージェントレポート状態 " "(report_interval ごとに消費されるリソース数) からそのエージェント構成オブジェ" "クトの一部として送信された特定の負荷を抽出します。dhcp_load_type は " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler と組み合わせて使用できます。network_scheduler_driver が " "WeightScheduler の場合、dhcp_load_type は平衡を取るリソースの選択肢を表すよう" "に構成することができます。例: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "要求が失敗しました。要求の処理中に内部サーバーエラーが発生しました。" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "起動時にフローテーブルをリセットします。この値を True に設定すると、一時的に" "トラフィックが中断します。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "リソース %(resource)s %(resource_id)s が見つかりませんでした。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "タイプ %(resource_type)s のリソース %(resource_id)s は見つかりません" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "リソース '%(resource_id)s' は既にサービスタイプ '%(service_type)s' のプロバイ" "ダー '%(provider)s' に関連付けられています" msgid "Resource body required" msgstr "リソース本文が必要です" msgid "Resource not found." msgstr "リソースが見つかりません。" msgid "Resources required" msgstr "リソースが必要です" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "ルートヘルパーのアプリケーション。実際のルートフィルターの機能を使用するため" "には、'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' を使用します。フィル" "タリングをスキップして、直接コマンドを実行するには、'sudo' を使用します。" msgid "Root permissions are required to drop privileges." msgstr "特権を除去するにはルート許可が必要です。" #, python-format msgid "Router already has a port on subnet %s" msgstr "ルーターに、既にサブネット %s 上のポートがあります" msgid "Router port must have at least one fixed IP" msgstr "ルーターポートには 1 つ以上の Fixed IP を設定する必要があります" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "%(project)s に対して %(cmd)s (%(desc)s) を実行中です..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "%(project)s に対して %(cmd)s を実行中です..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "ノード状態をサーバーに報告する間隔 (秒)。agent_down_time 未満である必要があ" "ります。agent_down_time の半分以下であれば最適です。" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "エージェントがダウンしていると見なすまでの時間 (秒)。エージェントが完全にダウ" "ンしていることを確実にするには、この値を少なくとも report_interval の 2 倍に" "してください。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "セキュリティーグループ %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "セキュリティーグループルール %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "セキュリティーグループ %(id)s は存在しません" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "セキュリティーグループルール %(id)s は存在しません" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "" "セキュリティーグループルールが既に存在しています。ルール ID は %(rule_id)s で" "す。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "イーサネットタイプ '%(ethertype)s' に関するセキュリティーグループルールが対応" "していません。許容される値は %(values)s です。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "セキュリティーグループルールのプロトコル %(protocol)s が対応していません。使" "用できるのは、プロトコルの値 %(values)s と整数値 [0 から 255 まで] のみです。" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "nova がそのキャッシュを更新できるように、ポートデータ (fixed_ips/floatingip) " "が変更されたときに通知を nova に送信します。" msgid "Send notification to nova when port status changes" msgstr "ポート状態の変更時の nova への通知送信" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "サービスタイプ %(service_type)s のサービスプロバイダー '%(provider)s' は見つ" "かりませんでした" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "DHCPv6 のプレフィックスデリゲーションを処理するサービス。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "サービスタイプ %(service_type)s にはデフォルトのサービスプロバイダーがありま" "せん" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "エージェントによる SIGTERM 受信後の新規 rpc 呼び出しの新規タイムアウト(秒) を" "設定します。値を 0 に設定すると、rpc タイムアウトは変更されません" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN トンネルを構成した発信 IP パケットで、フラグメント禁止 (DF) ビット" "を設定または設定解除します。" msgid "Shared address scope can't be unshared" msgstr "共有アドレススコープは共有を解除できません" msgid "String prefix used to match IPset names." msgstr "IPset 名とのマッチングを行うために使用する文字列のプレフィックス。" #, python-format msgid "Sub-project %s not installed." msgstr "サブプロジェクト %s はインストールされていません" msgid "Subnet for router interface must have a gateway IP" msgstr "ルーターインターフェースのサブネットにはゲートウェイ IP が必要です" msgid "Subnet pool has existing allocations" msgstr "サブネットプールに既存の割り当てがあります" msgid "Subnet used for the l3 HA admin network." msgstr "l3 HA 管理ネットワークに使用されるサブネット。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "テナントで作成可能なルーターのタイプを判別するためのシステム全体のフラグ。管" "理者のみがオーバーライドできます。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron メタデータ名前空間プロキシーが使用する TCP Port" msgid "TCP Port used by Nova metadata server." msgstr "Nova メタデータサーバーによって使用される TCP ポート。" msgid "TTL for vxlan interface protocol packets." msgstr "vxlan インターフェースプロトコルパケットの TTL。" #, python-format msgid "Tag %(tag)s could not be found." msgstr "タグ %(tag)s が見つかりませんでした。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "テナント %(tenant_id)s は、このネットワークでの %(resource)s の作成を許可され" "ていません" msgid "Tenant id for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用テナント ID" msgid "Tenant name for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用テナント名" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP サーバーは孤立したネットワークに対してメタデータサポートを提供することが" "できます。この値を True に設定すると、DHCP サーバーは DHCP 要求に対する特定の" "ホストへの経路を追加できます。このメタデータサービスが有効化されるのは、サブ" "ネットにルーターのポートが含まれない場合に限られます。ゲストインスタンスに" "は、DHCP (オプション 121) 経由でホストの経路を要求するよう設定を行う必要があ" "ります。force_metadata を True に設定する場合、このオプションは機能しません。" msgid "The UDP port to use for VXLAN tunnels." msgstr "VXLAN トンネルで使用する UDP ポート。" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "%(reason)s のため、アドレスの割り当て要求に対応できませんでした" msgid "The advertisement interval in seconds" msgstr "通知間隔 (秒)" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Neutron が VIF 用に使用する基本の MAC アドレス。最初の 3 つのオクテットは変更" "しません。4 つ目のオクテットが 00 の場合、これも使用できます。その他のオク" "テットはランダムに生成されます。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron によって独自の DVR インスタンスに使用される基本の MAC アドレス。最初" "の 3 つのオクテットは変更されません。4 つ目のオクテットが 00 でない場合は、" "このオクテットも使用できます。その他のオクテットはランダムに生成されます。 テ" "ナントポートに割り当てられた MAC アドレスとの混同を避けるために、 " "'dvr_base_mac' は 'base_mac' とは違う値である必要があります。4 つ目のオクテッ" "トの例としては、dvr_base_mac = fa:16:3f:4f:00:00 があります。デフォルトは、3 " "つ目のオクテットです。" msgid "The core plugin Neutron will use" msgstr "Neutron が使用するコアプラグイン" msgid "The driver used to manage the DHCP server." msgstr "DHCP サーバーの管理に使用されるドライバー。" msgid "The driver used to manage the virtual interface." msgstr "仮想インターフェースの管理に使用されるドライバー。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "PTR ゾーンの作成時に使用する E メールアドレス。指定しない場合、E メールアドレ" "スは admin@ になります。" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "1 回の応答で最大数の項目が返されました。値は 'infinite' または (無制限を意味" "する) 負の整数でした" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "HA ルーター用に HA ネットワークを作成する際に使用するネットワークタイプ。デ" "フォルトまたはこの値が空の場合、最初の 'tenant_network_types' が使用されま" "す。VRRP トラフィックがデフォルトではない特定のネットワークを使用しなければな" "らない場合には、この設定が役立ちます。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "ローカルデバイスの変更のポーリング間にエージェントが待機する間隔 (秒)。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "ovsdb モニターとの通信が途絶えた後で ovsdb モニターを再作成する前に待機する時" "間 (秒)" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys と sort_dirs の数は同じでなければなりません" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 強化機能のパス。このパスはコロンで区切られたパスのリストであることに注意" "してください。例としては、api_extensions_path = extensions:/path/to/more/" "exts:/even/more/exts があります。このパスには neutron.extensions の __path__ " "が付いているため、強化機能がこのパスにある場合、ここで指定する必要はありませ" "ん。" msgid "The physical network name with which the HA network can be created." msgstr "HA ネットワークを作成可能な物理ネットワーク名。" #, python-format msgid "The port '%s' was deleted" msgstr "ポート '%s' が削除されました" msgid "The port to bind to" msgstr "バインド先のポート" #, python-format msgid "The requested content type %s is invalid." msgstr "要求されたコンテンツタイプ %s は無効です。" msgid "The resource could not be found." msgstr "リソースが見つかりませんでした。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "ルーター %(router_id)s は、既に L3 エージェント %(agent_id)s によってホストさ" "れています。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "サーバーに誤りがあるか、または要求された操作を実行することができません。" msgid "The service plugins Neutron will use" msgstr "Neutron が使用するサービスプラグイン" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "%(reason)s のため、サブネットの要求に対応できませんでした" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "コマンドの実行の対象となるサブコマンド。'%s' のうちのいずれかにすることができ" "ます。" msgid "The type of authentication to use" msgstr "使用する認証のタイプ" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "このネットワークにはルーターが存在し、ルーターはアクセスの際にこのポリシーを" "使用します。" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "すべての OpenvSwitch ブリッジですべてのポートを削除する場合は True。統合およ" "び外部ネットワークブリッジで Neutron によって作成されたポートを削除する場合" "は False。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "トンネル IP 値が ML2 プラグインに必要です" msgid "Tunnel bridge to use." msgstr "使用するトンネルブリッジ。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "使用する nova のエンドポイントのタイプ。このエンドポイントは Keystone のカタ" "ログで参照され、public、internal、または admin のいずれかである必要がありま" "す。" msgid "URL for connecting to designate" msgstr "designate への接続用 URL" msgid "URL to database" msgstr "データベースへの URL" #, python-format msgid "Unable to access %s" msgstr "%s にアクセスできません" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "%(reason)s のため %(address_type)s アドレスを計算できません" #, python-format msgid "Unable to convert value in %s" msgstr "%s で値を変換できません" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT インターフェースポートの作成ができません" #, python-format msgid "Unable to determine mac address for %s" msgstr "%s の MAC アドレスを決定できません" #, python-format msgid "Unable to find '%s' in request body" msgstr "要求本体で '%s' が見つかりません" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "サブネット %(subnet_id)s 上で IP アドレス %(ip_address)s が見つかりません" #, python-format msgid "Unable to find resource name in %s" msgstr "%s にリソース名を見つけることはできません" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "%s からターゲットフィールドを特定できません。一致の形式は %%()s " "でなければなりません" msgid "Unable to provide external connectivity" msgstr "外部接続を提供できません" msgid "Unable to provide tenant private network" msgstr "テナントのプライベートネットワークを提供できません" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "match:%(match)s を親リソースとして検査できません: %(res)s が見つかりませんで" "した" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "スクリプト %(script_name)s に関する予期しないラベル: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "alembic の分岐点の予期しない数: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "予期しない応答コード: %s" #, python-format msgid "Unexpected response: %s" msgstr "予期しない応答: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "ユニット名 '%(unit)s' が無効です。" #, python-format msgid "Unknown address type %(address_type)s" msgstr "不明なアドレスタイプ %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "属性 '%s' が不明です。" #, python-format msgid "Unknown chain: %r" msgstr "不明なチェーン: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "不明なネットワークタイプ %(network_type)s。" msgid "Unmapped error" msgstr "マップ解除エラー" msgid "Unrecognized action" msgstr "認識されないアクション" msgid "Unrecognized field" msgstr "認識されないフィールド" msgid "Unsupported Content-Type" msgstr "サポートされない Content-Type" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "サポートされないネットワークタイプ %(net_type)s" msgid "Unsupported request type" msgstr "サポートされない要求タイプです" msgid "Updating default security group not allowed." msgstr "デフォルトのセキュリティーグループの更新は許可されません。" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "リモート MAC および IP を認識してトンネルのスケーラビリティーを向上させるに" "は、ML2 l2population メカニズムドライバーを使用してください。" msgid "Use broadcast in DHCP replies." msgstr "DHCP の応答で ブロードキャストを使用します。" msgid "Use either --delta or relative revision, not both" msgstr "--delta と相対的な変更の両方ではなく、どちらか一方を使用してください" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "iptables ベースのセキュリティーグループの速度を向上させるために、ipset を使用" "します。ipset へのサポートを有効にするには、L2 のエージェントノードに ipset " "をインストールする必要があります。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "システム上の名前空間を一覧表示する際にはルートヘルパーを使用します。セキュリ" "ティー設定によっては、有効にする必要がない場合があります。ルートヘルパーが必" "要ない場合は、パフォーマンスを高めるためにこの値を False に設定します。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "patch port の代わりに veths を使用して、統合ブリッジを物理ネットワークに相互" "接続します。Open vSwitch の patch port のサポートが True に設定されている限" "り、当該サポートが存在しない場合でもカーネルをサポートします。" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "初期化後にメタデータプロキシーを実行しているユーザー (uid または名前) (空の場" "合: エージェント有効ユーザー)。" msgid "User (uid or name) running this process after its initialization" msgstr "初期化後にこのプロセスを実行するユーザー (uid または名前)" msgid "Username for connecting to designate in admin context" msgstr "管理者のコンテキストにおける designate への接続用ユーザー名" msgid "VRRP authentication password" msgstr "VRRP 認証パスワード" msgid "VRRP authentication type" msgstr "VRRP 認証タイプ" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "QoS を設定したポートについて、帯域幅の制限ルールに基づいて最小バースト値を計" "算するためのホストのカーネルのチックレート値 (Hz)。詳細情報については、Hz 値" "のカーネル設定ファイルと tc-tbf マニュアルを参照してください。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "QoS を設定したポートについて、キューのサイズを検索するたためのレイテンシー値 " "(ミリ秒)。詳細情報については、tc-tbf マニュアルを参照してください。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "メタデータ要求のプロキシーを実行する際に、Neutron はスプーフィングを防止する" "ために共有秘密鍵を使用して インスタンス ID ヘッダーに署名します。秘密鍵として" "任意の文字列を選択できるものの、その値はここと Nova Metadata Server が使用す" "る設定で一致する必要があります。注意: Nova は同じ設定鍵を使用するものの、その" "値は [neutron] セクションにあります。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Neutron 状態ファイルの保管場所。このディレクトリーは、エージェントが書き込み" "を行える場所でなければなりません。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "IPv6 では、自動的に割り当てられたリンクローカルアドレス (LLA) を使用できるた" "め、外部ゲートウェイに使用するネットワークにはサブネットを関連付ける必要はあ" "りません。ただし、IPv6 ゲートウェイアドレスはデフォルト経路のネクストホップと" "して使用するために必要です。IPv6 ゲートウェイアドレスをここで構成しない場合に" "のみ、上流ルーターのルーター広告 (RA) からデフォルト経路を取得するように " "Neutron ルーターが構成されます。この場合、これらの RA を送信するように上流" "ルーターを構成することも必要です。ipv6_gateway を構成する場合、これは上流ルー" "ター上のインターフェースの LLA でなければなりません。グローバルユニークアドレ" "ス (GUA) を使用したネクストホップが必要な場合は、このパラメーターを使用するの" "ではなく、ネットワークに割り振られたサブネットを介してこれを行う必要がありま" "す。" msgid "You must implement __call__" msgstr "__call__ を実装する必要があります" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "ブリッジの構成ファイルとして --config-file または " "env[NEUTRON_TEST_CONFIG_FILE] のいずれかを指定する必要があります" msgid "You must provide a revision or relative delta" msgstr "変更または相対デルタを指定する必要があります" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "cidr がない場合、サブネットプールの指定は必須です" msgid "add_ha_port cannot be called inside of a transaction." msgstr "トランザクション内に add_ha_port を呼び出すことはできません。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools は特定のサブネット要求にのみ許可されます。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools がサブネット内に存在しません" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools が間違ったIP バージョンを使用しています" msgid "already a synthetic attribute" msgstr "既に synthetic 属性を使用" msgid "binding:profile value too large" msgstr "binding:profile 値が大きすぎます" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "%(reason)s のため %(event)s を実行できません" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr と prefixlen を同時に指定してはなりません" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain は dns_name なしでは指定できません" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name は dns_domain なしでは指定できません" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address は、port_id なしでは指定できません" #, python-format msgid "has device owner %s" msgstr "デバイス所有者 %s" msgid "in use" msgstr "使用されています" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "ip コマンドがデバイス %(dev_name)s で失敗しました: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "ip リンク機能 %(capability)s はサポートされていません" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip リンクコマンドはサポートされていません: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "cidr および subnetpool_id がない場合、ip_version の指定は必須です" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version が 4 の場合、ipv6_address_mode は無効です" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version が 4 の場合、ipv6_ra_mode は無効です" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode が '%(ra_mode)s' に、ipv6_address_mode が '%(addr_mode)s' に設定" "されていますが、これは無効です。両方の属性を設定する場合、これらは同じ値でな" "ければなりません" msgid "mac address update" msgstr "mac アドレス更新" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必ず 2 つの引数 (cidr および MAC) を提供する必要があります" msgid "network_type required" msgstr "network_type が必要です" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type 値 '%s' はサポートされていません" msgid "new subnet" msgstr "新規サブネット" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "flat プロバイダーネットワークの physical_network '%s' が不明です" msgid "physical_network required for flat provider network" msgstr "flat プロバイダーネットワークには physical_network が必要です" #, python-format msgid "provider:physical_network specified for %s network" msgstr "%s ネットワークに provider:physical_network が指定されました" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval は、指定する場合は 0 以上にする必要があります。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id が範囲 (%(min)s から %(max)s) 外です" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "segmentation_id には、VLAN プロバイダーネットワークの physical_network が必要" "です" msgid "shared attribute switching to synthetic" msgstr "共有属性を synthetic に変更します" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "サブネットプール %(subnetpool_id)s が共有アドレススコープ " "%(address_scope_id)s と関連付けられている場合は、サブネットプールを更新するこ" "とはできません" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "subnetpool_id と use_default_subnetpool の両方を指定することはできません" msgid "the nexthop is not connected with router" msgstr "ルーターによってネクストホップが接続されていません" msgid "the nexthop is used by router" msgstr "ネクストホップがルーターによって使用されています" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/ko_KR/0000755000175000017500000000000000000000000021101 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3150446 neutron-16.0.0.0b2.dev214/neutron/locale/ko_KR/LC_MESSAGES/0000755000175000017500000000000000000000000022666 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/ko_KR/LC_MESSAGES/neutron.po0000644000175000017500000030047100000000000024725 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Seong-ho Cho , 2013 # Seong-ho Cho , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata # HYUNGBAI PARK , 2016. #zanata # jtjang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-08-17 10:52+0000\n" "Last-Translator: HYUNGBAI PARK \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "명령: %(cmd)s\n" "종료 코드: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "%(branch)s HEAD 파일이 마이그레이션 타임라인 헤드와 일치하지 않음, 예상값: " "%(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s은(는) sort_dirs에 대해 올바르지 않은 값이며, 올바른 값은 " "'%(asc)s' 및 '%(desc)s'입니다. " #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(tunnel)s 제공자 네트워크에 대해 %(key)s이(가) 금지됨" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" "%(name)s '%(addr)s'이(가) ip_version '%(ip_version)s'과(와) 일치하지 않음" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s은(는) 오프라인 모드 중 호출할 수 없습니다. " #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s이(는) sort_keys에 대해 올바르지 않은 속성입니다. " #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "" "%s은(는) get_port_from_device 또는 get_ports_from_devices를 구현해야 합니다." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "%s prohibited for flat provider network" msgstr "플랫 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "%s prohibited for local provider network" msgstr "로컬 제공자 네트워크에 대해 %s이(가) 금지됨" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s'은(는) 올바른 RBAC 오브젝트 유형이 아님" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s'은(는) 필터링을 위해 지원되지 않음" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' 오브젝트에 '%s' 속성이 없음" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max'가 'port_min'보다 작음" msgid "0 is not allowed as CIDR prefix length" msgstr "0은 CIDR 접두부 길이로 허용되지 않음" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "서브넷 풀이 없는 경우 cidr을 지정해야 함" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "RFC3315 DUID-EN에서 요구하는 대로 벤더의 등록된 개인용 엔터프라이즈 번호로서" "의 10진수 값입니다. " #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "기본 외부 네트워크가 이미 있음: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "이 IP 제품군의 기본 subnetpool이 이미 설정되었습니다. IP 제품군당 기본값은 하" "나만 있을 수 있습니다." msgid "A metering driver must be specified" msgstr "측정 드라이버를 지정해야 함" msgid "Access to this resource was denied." msgstr "이 자원에 대한 액세스가 거부되었습니다." msgid "Action to be executed when a child process dies" msgstr "하위 프로세스가 정지될 때 조치가 실행됨" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "iptables 규칙에 주석을 추가하십시오. false로 설정하면 각 규칙의 용도를 설명하" "는 생성된 iptables 규칙에 주석을 추가할 수 없습니다. 시스템에서 주석을 추가하" "기 위한 iptables 주석 모듈을 지원해야 합니다." msgid "Address not present on interface" msgstr "인터페이스에 주소가 없음" msgid "Adds test attributes to core resources." msgstr "코어 자원에 테스트 속성을 추가합니다." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "%(id)s 에이전트가 L3 에이전트가 아니거나 사용 안함 상태임" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "enable_new_agents=False인 경우 에이전트는 admin_state_up=False로 시작합니다. " "이 경우 사용자의 자원은 관리자가 admin_state_up을 True로 변경할 때까지 에이전" "트에 대해 자동으로 스케줄되지 않습니다. " #, python-format msgid "Agent updated: %(payload)s" msgstr "업데이트된 에이전트: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "DHCP 에이전트에 대한 네트워크 자동 스케줄링을 허용합니다. " msgid "Allow auto scheduling of routers to L3 agent." msgstr "L3 에이전트에 대한 라우터 자동 스케줄링을 허용합니다." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Neutron에서 중복 IP 지원을 허용합니다. 주의: Neutron을 Nova 보안 그룹과 함께 " "사용하는 경우 다음 매개변수를 False로 설정해야 합니다." msgid "Allow running metadata proxy." msgstr "메타데이터 프록시 실행을 허용합니다." msgid "Allow sending resource operation notification to DHCP agent" msgstr "DHCP 에이전트에 자원 조작 알림 전송 허용" msgid "Allow the creation of PTR records" msgstr "PTR 레코드의 작성 허용" msgid "Allow the usage of the bulk API" msgstr "벌크 API 사용 허용" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "nova 메타데이터에 대한 비보안 SSL(https) 요청 수행 허용" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "CIDR이 169.254.169.254/16(또는 더 큰 접두사)이며 VM에서 metadata:1 요청을 보" "내는 Neutron 라우터에 연결된 전용 메타데이터 액세스 네트워크에서 전송되는 메" "타데이터 요청에 대해 서비스를 제공할 수 있습니다. 이 경우 라우터를 통해 " "169.254.169.254에 연결할 수 있으므로 DHCP 옵션 121은 VM에 삽입되지 않습니다. " "이 옵션에는 enable_isolated_metadata = True가 필요합니다." msgid "An RBAC policy already exists with those values." msgstr "해당 값의 RBAC 정책이 이미 있습니다." msgid "An identifier must be specified when updating a subnet" msgstr "서브넷을 업데이트할 때 ID를 지정해야 함" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "neutron.ml2.mechanism_drivers 네임스페이스로부터 로드할 확장 드라이버 엔드포" "인트의 정렬된 목록입니다. 예: extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "neutron.ml2.mechanism_drivers 네임스페이스로부터 로드할 네트워킹 메커니즘 드" "라이버 시작점의 정렬된 목록입니다." msgid "An unknown error has occurred. Please try your request again." msgstr "알 수 없는 오류가 발생했습니다. 요청을 다시 시도하십시오. " msgid "Async process didn't respawn" msgstr "비동기 프로세스가 다시 파생되지 않음" msgid "Authorization URL for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 인증 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "오프라인 DHCP 에이전트에서 네트워크를 자동으로 제거합니다." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "오프라인 L3 에이전트부터 온라인 L3 에이전트까지 라우트를 자동으로 다시 스케줄" "합니다." msgid "Availability zone of this node" msgstr "이 노드의 가용 구역" msgid "Available commands" msgstr "사용 가능한 명령" #, python-format msgid "Base MAC: %s" msgstr "기본 MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "dnsmasq 로깅을 위한 기본 로그 디렉토리입니다. 이 로그는 DHCP 및 DNS 로그 정보" "를 포함하고 있으며 DHCP 또는 DNS에 대한 문제를 디버깅하는 데 유용합니다. 이 " "섹션이 널인 경우에는 dnsmasq 로그를 사용 안함으로 설정하십시오. " msgid "Body contains invalid data" msgstr "본문에 올바르지 않은 데이터가 포함되어 있음" msgid "Bulk operation not supported" msgstr "벌크 오퍼레이션은 지원되지 않음" msgid "CIDR to monitor" msgstr "모니터할 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "%(resource_type)s에 대한 콜백을 찾을 수 없음" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s에 대한 콜백에서 잘못된 자원 유형을 리턴함" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "고정 IPv4 주소가 없는 포트 %s에 부동 IP를 추가할 수 없음" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "%(resource_type)s에 대한 다중 콜백을 추가할 수 없음" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "IPv%(pool_ver)s 서브넷 풀에서 IPv%(req_ver)s 서브넷을 할당할 수 없음" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "사용 가능한 접두부 세트에서 요청한 서브넷을 할당할 수 없음" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "ipv6 속성이 설정된 enable_dhcp를 사용할 수 없음" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "%(subnet_type)s 유형의 서브넷을 처리할 수 없음" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "라우터 포트에 IPv4 서브넷이 여러 개일 수 없음" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "모두 IPv6 서브넷이 있는 경우 같은 네트워크 ID를 사용하는 라우터 포트를 여러 " "개 사용할 수 없음. 기존 포트 %(p)s에 IPv6 서브넷 및 네트워크 ID %(nid)s이" "(가) 있음." #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "레거시 L3 에이전트 %(agent_id)s에서 분산된 라우터 %(router_id)s을(를) 호스팅" "할 수 업습니다." msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id와 port-id를 둘 다 지정할 수 없음" msgid "Cannot understand JSON" msgstr "JSON을 이해할 수 없음" #, python-format msgid "Cannot update read-only attribute %s" msgstr "읽기 전용 속성 %s을(를) 업데이트할 수 없음" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "ssl용 인증 기관 공개 키(CA cert) 파일 " msgid "Check ebtables installation" msgstr "ebtables 설치 확인" msgid "Check for ARP header match support" msgstr "ARP 헤더 일치 지원 확인" msgid "Check for ARP responder support" msgstr "ARP 응답기 지원 확인" msgid "Check for ICMPv6 header match support" msgstr "ICMPv6 헤더 일치 지원 확인" msgid "Check for OVS Geneve support" msgstr "OVS Geneve 지원 확인" msgid "Check for OVS vxlan support" msgstr "OVS vxlan 지원 확인" msgid "Check for VF management support" msgstr "VF 관리 지원 확인" msgid "Check for iproute2 vxlan support" msgstr "iproute2 vxlan 지원 확인" msgid "Check for nova notification support" msgstr "nova 알림 지원 확인" msgid "Check for patch port support" msgstr "패치 포트 지원 확인" msgid "Check ip6tables installation" msgstr "ip6tables 설치 확인" msgid "Check ipset installation" msgstr "ipset 설치 확인" msgid "Check keepalived IPv6 support" msgstr "keepalived IPv6 지원 확인" msgid "Check minimal dibbler version" msgstr "최소 dibbler 버전 확인" msgid "Check minimal dnsmasq version" msgstr "최소 dnsmasq 버전 확인" msgid "Check netns permission settings" msgstr "netns 권한 설정 확인" msgid "Check ovs conntrack support" msgstr "ovs conntrack 지원 확인" msgid "Check ovsdb native interface support" msgstr "ovsdb 네이티브 인터페이스 지원 확인" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "서브넷 %(subnet_id)s의 cidr %(subnet_cidr)s이(가) 서브넷 %(sub_id)s의 cidr " "%(cidr)s과(와) 겹침" msgid "Cleanup resources of a specific agent type only." msgstr "특정 에이전트 유형의 자원만 정리합니다." msgid "Client certificate for nova metadata api server." msgstr "nova 메타데이터 api 서버에 대한 클라이언트 인증서입니다." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "가상 네트워킹에 사용해서는 안되는 가상 함수의 에이전트 노드별 목록에 " "network_device를 맵핑하는 쉼표로 구분된 : 튜" "플 목록입니다. vfs_to_exclude는 network_device에서 제외시킬 세미콜론으로 구분" "된 가상 함수 목록입니다. 맵핑에 사용된 network_device는 " "physical_device_mappings 목록에 표시되어야 합니다." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "VLAN 네트워크에 사용할 SR-IOV 실제 기능의 에이전트 노드별 실제 네트워크 디바" "이스 인터페이스에 실제 네트워크 이름을 맵핑하는 쉼표로 구분된 " ": 튜플 목록입니다. 서버의 " "network_vlan_ranges에 나열된 모든 실제 네트워크는 각 에이전트의 해당 인터페이" "스에 대한 맵핑이 있어야 합니다." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "플랫 및 VLAN 네트워크에 사용할 에이전트 노드별 실제 네트워크 인터페이스에 실" "제 네트워크 이름을 맵핑하는 쉼표로 구분된 :" " 튜플 목록입니다. 서버의 network_vlan_ranges에 나열된 모" "든 실제 네트워크는 각 에이전트의 해당 인터페이스에 대한 맵핑이 있어야 합니다." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "테넌트 네트워크 할당에 사용 가능한 GRE 터널 ID의 범위를 열거한 :" " 튜플을 쉼표로 구분한 목록입니다." msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "테넌트 네트워크 할당에 사용 가능한 Geneve VNI ID의 범위를 열거하는 :" " 튜플의 쉼표로 구분된 목록입니다. " msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "테넌트 네트워크 할당에 사용 가능한 VXLAN VNI ID의 범위를 열거한 :" " 튜플의 쉼표로 구분된 목록입니다. " msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "쉼표로 분리된 DNS 서버의 목록이며 전달자로 사용됩니다." msgid "Command to execute" msgstr "실행할 명령" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "인터페이스 드라이버에 대한 구성 파일(l3_agent.ini도 사용할 수 있음)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s에 대한 충돌하는 값 ethertype %(ethertype)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "서버에서 neutron 보안 그룹 API가 사용되는지 여부를 제어합니다.보안 그룹을 사" "용하지 않거나 nova 보안 그룹 API를 사용할 때는 false이어야 합니다." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "%(time)d후 시도한 다음 %(host)s:%(port)s에 바인딩할 수 없습니다" msgid "Could not deserialize data" msgstr "데이터를 직렬화 해제할 수 없음" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP 리스 기간(초)입니다. dnsmasq에 무한 리스 시간을 사용하도록 지시하려면 -1" "을 사용하십시오." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN/GRE/Geneve 기초를 위한 DVR 배치를 수행하려면 에이전트 측과 서버 측 모두" "에서 L2-pop을 사용으로 설정해야 합니다. " msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "오프라인 마이그레이션을 사용할 때 스크립트가 생성될 데이터베이스 엔진입니다." msgid "Default external networks must be shared to everyone." msgstr "기본 외부 네트워크를 모든 사용자와 공유해야 합니다." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "제공자 속성이 지정되지 않은 경우 외부 네트워크의 기본 네트워크 유형입니다. 기" "본적으로 이 유형은 None이며 이는 외부 네트워크를 작성하는 중에 제공자 속성이 " "지정되지 않은 경우 해당 제공자 속성은 테넌트 네트워크와 동일한 유형을 가진다" "는 것을 의미합니다. external_network_type 구성 옵션에 대해 허용되는 값은 " "type_drivers 구성 옵션에서 구성된 네트워크 유형 값에 따라 다릅니다. " msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "테넌트당 허용되는 기본 RBAC 항목 수입니다. 음수 값은 무제한을 의미합니다. " msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "테넌트당 허용되는 기본 자원 수입니다. 음수 값은 무제한을 의미합니다." msgid "Default security group" msgstr "기본 보안 그룹" msgid "Default security group already exists." msgstr "기본 보안 그룹이 이미 존재합니다. " msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "가용 구역 힌트의 기본값입니다. 리소스 availability_zone_hints가 비어 있으면 " "가용 구역 인식 스케줄러에서 이 값을 사용합니다. 쉼표로 구분된 문자열을 사용하" "여 여러 가용 구역을 지정할 수 있습니다. 이 값은 비어있을 수 있습니다. 이 경" "우 자원의 availability_zone_hints가 비어 있어도 자원을 스케줄링하는 동안 고가" "용성을 위해 가용 구역을 고려합니다." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "external_gateway_info에 제공되지 않은 경우 enable_snat의 기본값을 정의하십시" "오. " msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "다음 형식을 사용하여 고급 서비스에 대한 제공자 정의: ::" "[:default]" msgid "Delete the namespace by removing all devices." msgstr "모든 디바이스를 제거하여 네임스페이스를 삭제하십시오. " #, python-format msgid "Deleting port %s" msgstr "포트 %s 삭제 중" #, python-format msgid "Deployment error: %(reason)s." msgstr "배포 오류: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "iptables 참조가 있는 경우에도 IPset를 영구 삭제하십시오. " msgid "Destroy all IPsets." msgstr "모든 IPset를 영구 삭제하십시오. " #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "%(mapping)s 맵핑의 %(dev_name)s 디바이스가 고유하지 않음" msgid "Device not found" msgstr "디바이스를 찾을 수 없음" msgid "Domain to use for building the hostnames" msgstr "호스트 이름 빌드에 사용할 도메인" msgid "Downgrade no longer supported" msgstr "다운그레이드는 현재 지원하지 않음" #, python-format msgid "Driver %s is not unique across providers" msgstr "%s 드라이버가 제공자에서 고유하지 않음" msgid "Driver for external DNS integration." msgstr "외부 DNS 통합을 위한 드라이버." msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 에이전트의 보안 그룹 방화벽에 대한 드라이버" msgid "Driver to use for scheduling network to DHCP agent" msgstr "DHCP 에이전트에 대한 네트워크 스케줄링에 사용할 드라이버" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "기본 L3 에이전트에 대한 라우터 스케줄링에 사용할 드라이버" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "ipv6 접두부 위임에 사용되는 드라이버입니다. 이는 neutron.agent.linux." "pd_drivers 네임스페이스에서 정의된 시작점이어야 합니다. neutron 소스와 함께 " "포함된 시작점은 setup.cfg를 참조하십시오. " #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "라우터 %(router)s의 중복 L3HARouterAgentPortBinding이 작성되었습니다. 데이터" "베이스를 업그레이드할 수 없습니다. 데이터베이스를 업그레이드하기 전에 모든 중" "복을 제거하십시오." msgid "Duplicate Security Group Rule in POST." msgstr "POST에 중복 보안 그룹 규칙이 있습니다. " msgid "Duplicate address detected" msgstr "중복 주소 발견" #, python-format msgid "ERROR: %s" msgstr "오류: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "오류: 기본 검색 경로(~/.quantum/, ~/, /etc/quantum/, /etc/) 및 '--config-" "file' (~/.neutron/, ~/, /etc/neutron/, /etc/) 및 '--config-file' 옵션!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "매개변수 network_id 및 router_id 중 하나를 _get_ports 메소드에 전달해야 합니" "다." msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id 또는 port_id 중 하나를 지정해야 함" msgid "Enable HA mode for virtual routers." msgstr "가상 라우터에 대해 HA 모드를 사용합니다." msgid "Enable SSL on the API server" msgstr "API 서버에서 SSL 연결 활성화" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "에이전트에서 VXLAN을 사용 가능하게 설정하십시오. linuxbridge 메커니즘 드라이" "버를 사용하여 ml2 플러그인이 에이전트를 관리할 경우 사용할 수 있습니다." msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "admin_state_up False인 에이전트의 서비스 사용. 이 옵션이 False이면 에이전트" "의 admin_state_up이 False가 될 때 해당 서비스가 사용 안함으로 설정됩니다. " "admin_state_up False인 에이전트는 이 옵션과 관계 없이 자동 스케줄링에 사용하" "도록 선택하지 않습니다. 그러나 이 옵션이 True이면 이러한 에이전트에 수동 스케" "줄링을 사용할 수 있습니다." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "지동 서브넷 CIDR 할당을 위해 IPV6 접두어 위임을 사용합니다. PD 가능 환경에서 " "서브넷 할당을 위해 IPv6 접두어 위임을 사용하려면 True로 설정합니다. 사용자가 " "CIDR 또는 subnetpool ID를 제공하지 않고 IPv6 서브넷의 서브넷 작성을 요청하면 " "접두어 위임 메커니즘을 통해 CIDR이 제공됩니다. PD를 사용하면 기본 IPv6 " "subnetpool의 동작이 재정의됩니다." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "DHCP 에이전트를 실행 중인 호스트에서 DNS 분석기를 통해 인스턴스의 이름 분석" "을 제공하는 dnsmasq 서비스를 사용합니다. dnsmasq 프로세스 인수에서 '--no-" "resolv' 옵션을 효과적으로 제거합니다. 사용자 정의 DNS 분석기를 " "'dnsmasq_dns_servers' 옵션에 추가하면 이 기능이 사용되지 않습니다." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "조작 시도 중 오류 %(reason)s이(가) 발생했습니다. " #, python-format msgid "Error parsing dns address %s" msgstr "DNS 주소 %s 구문 분석 오류" #, python-format msgid "Error while reading %s" msgstr "%s을(를) 읽는 중에 오류 발생" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "주소가 임시 상태를 벗어날 때까지 대기하는 동안 %s초 제한이 초과되어씃ㅂ니다." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "기존 접두부는 새 접두부의 서브넷이어야 함" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "종료 코드: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "확장 %(driver)s이(가) 실패했습니다. " #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "서비스 플러그인 %(service_plugin)s에 필요한 확장 드라이버 %(driver)s을(를) 찾" "을 수 없음" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "ml2 플러그인의 l2population 메커니즘 드라이버와 함께 사용할 확장기능. 이를 통" "해플러그인이 VXLAN 전달 테이블을 채울 수 있습니다." #, python-format msgid "Extension with alias %s does not exist" msgstr "별명이 %s인 확장이 존재하지 않음" msgid "Extensions list to use" msgstr "사용할 확장 목록" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "외부 IP %s이(가) 게이트웨이 IP와 같음" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "%(router_id)s 라우터를 다시 스케줄하지 못함: 적합한 l3 에이전트를 찾을 수 없" "습니다." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "L3 에이전트 %(agent_id)s에 대한 %(router_id)s 라우터를 스케줄링하지 못했습니" "다. " #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "fixed_ips에 올바르지 않은 서브넷 %(subnet_id)s이(가) 포함되어 있어서 네트워" "크 %(network_id)s에서 포트를 작성하지 못했습니다. " #, python-format msgid "Failed to locate source for %s." msgstr "%s에 대한 소스를 찾지 못했습니다. " msgid "Failed to remove supplemental groups" msgstr "보조 그룹을 제거하지 못함" #, python-format msgid "Failed to set gid %s" msgstr "gid %s을(를) 설정하지 못함" #, python-format msgid "Failed to set uid %s" msgstr "uid %s을(를) 설정하지 못함" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "%(type)s 터널 포트를 %(ip)s(으)로 설정하지 못함" msgid "Failure applying iptables rules" msgstr "iptables 규칙 적용 실패" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "주소 %(address)s이(가) 준비될 때까지 기다리는 데 실패함: %(reason)s" msgid "Flat provider networks are disabled" msgstr "플랫 제공자 네트워크가 사용되지 않음" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "TCP/UDP 프로토콜의 경우 port_range_min은 port_range_max 이하여야 함" msgid "Force ip_lib calls to use the root helper" msgstr "루트 헬퍼를 사용하는 ip_lib 호출을 강제합니다" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "게이트웨이 IP 버전이 할당 풀 버전과 일치하지 않음" msgid "Gateway is not valid on subnet" msgstr "게이트웨이가 서브넷에서 올바르지 않음" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "초기화 후에 메타데이터 프록시를 실행하는 그룹(gid 또는 이름)(비어 있는 경우: " "에이전트 유효 그룹)." msgid "Group (gid or name) running this process after its initialization" msgstr "초기화 이후 이 프로세스를 실행하는 그룹(gid 또는 이름)" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "이 시스템에서 실행 중인 neutron 서버, 에이전트 및 서비스에서 사용할 호스트 이" "름입니다. 이 시스템에서 실행 중인 모든 에이전트 및 서비스는 같은 호스트 값" "을 사용해야 합니다." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "ICMP 코드 (port-range-max) %(value)s이(가) 제공되지만 ICMP 유형(port-range-" "min)이 누락되었습니다." msgid "ID of network" msgstr "네트워크의 ID" msgid "ID of network to probe" msgstr "프로브할 네트워크의 ID" msgid "ID of probe port to delete" msgstr "삭제할 프로브 포트의 ID" msgid "ID of probe port to execute command" msgstr "명령을 실행할 프로브 포트의 ID" msgid "ID of the router" msgstr "라우터의 ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP 주소 %(ip)s이(가) 이미 서브넷 %(subnet_id)s에서 할당되어 있음" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 주소 %(ip)s이(가) 서브넷 %(subnet_id)s에 속하지 않음" msgid "IP allocation failed. Try again later." msgstr "IP 할당에 실패했습니다. 나중에 다시 시도하십시오." msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 할당은 subnet_id 또는 ip_address가 필요함" msgid "IP allocation requires subnets for network" msgstr "IP 할당은 네트워크의 서브넷을 필요로 합니다." #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply가 다음 iptables 규칙 세트를 적용하지 못함:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "IPv6 주소 모드는 접두부 위임에 대해 Stateless 또는 SLAAC여야 합니다. " msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "IPv6 RA 모드는 접두부 위임에 대해 Stateless 또는 SLAAC여야 합니다. " #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "서브넷이 자동 주소에 대해 구성되어 있기 때문에 IPv6 주소 %(ip)s을(를) 서브넷 " "%(subnet_id)s의 포트에 직접 지정할 수 없음" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "외부 라우터에서 RA를 수신하도록 구성된 IPv6 서브넷 %s을(를) Neutron 라우터에 " "추가할 수 없습니다." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "True인 경우 이를 지원하는 플러그인을 사용하여 VLAN 투명 네트워크를 작성할 수 " "있습니다." msgid "Illegal IP version number" msgstr "올바르지 않은 IP 버전 번호" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "경우에 따라 메타데이터 IP를 제공하는 Neutron 라우터는 없지만 DHCP 서버를 사용" "하여 이 정보를 제공할 수 있습니다. 이 값을 설정하면 DHCP 서버가 특정 호스트 " "경로를 DHCP 요청에 강제로 추가합니다. 이 옵션이 설정되면 모든 네트워크의 메타" "데이터 서비스가 활성화됩니다." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "이 L3 에이전트에서 외부 네트워크 게이트웨이가 구성되지 않은 라우터도 처리해" "야 함을 나타냅니다. 이 옵션은 Neutron 배포의 단일 에이전트에만 True여야 하" "며, 라우터에 외부 네트워크 게이트웨이가 있어야 하는 경우에는 모든 에이전트에 " "False일 수 있습니다." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "클래스 %(module)s.%(class)s의 인스턴스에 _cache 속성이 포함되어야 합니다." #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "접두부 공간이 부족하여 서브넷 크기 /%s을(를) 할당할 수 없음" msgid "Insufficient rights for removing default security group." msgstr "기본 보안 그룹을 제거할 수 있는 권한이 없습니다." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "사용할 통합 브릿지입니다. 합당한 이유가 없으면 이 매개변수를 변경하지 마십시" "오. 이 매개변수는 OVS 통합 브릿지의 이름입니다. 하이퍼바이저당 한 개가 있습니" "다. 통합 브릿지는 가상 '패치 베이'의 역할을 수행합니다. 모든 VM VIF가 이 브릿" "지에 연결된 다음 네트워크 연결성에 따라 \"패치\"됩니다." msgid "Interface to monitor" msgstr "모니터할 인터페이스" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "하위 프로세스 활동 확인 간격(초), 사용 안함으로 설정하려면 0을 지정하십시오." msgid "Interval between two metering measures" msgstr "2개의 측정 조치 간의 간격" msgid "Interval between two metering reports" msgstr "2개의 측정 보고서 간의 간격" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "올바르지 않은 디바이스 %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "오브젝트 유형 '%(object_type)s'에 대한 조치 '%(action)s'이(가) 올바르지 않습" "니다. 올바른 조치: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "올바르지 않은 인증 유형임: %(auth_type)s, 올바른 유형은 다음과 같음: " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "" "프로토콜 %(protocol)s의 ethertype %(ethertype)s이(가) 올바르지 않습니다." #, python-format msgid "Invalid format: %s" msgstr "올바르지 않은 형식: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "올바르지 않은 인스턴스 상태: %(state)s, 올바른 상태는 %(valid_states)s임" #, python-format msgid "Invalid mapping: '%s'" msgstr "올바르지 않은 맵핑: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "올바르지 않은 pci 슬롯 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "올바르지 않은 제공자 형식. 마지막 부분이 '기본값'이거나 비어 있어야 함: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "올바르지 않은 자원 유형 %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "올바르지 않은 라우트: %s" msgid "Invalid service provider format" msgstr "올바르지 않은 서비스 제공자 형식" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) %(value)s의 값이 올바르지 않음. 이 값은 0에서 255 " "사이여야 합니다. " #, python-format msgid "Invalid value for port %(port)s" msgstr "%(port)s 포트에 대한 올바르지 않은 값" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "외부 네트워크의 입구를 표시하는 데 사용되는 Iptables mangle 표시입니다. 이 표" "시는 하위 16비트만 사용되도록 0xffff로 마스크됩니다. " msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "메타데이터 올바른 요청을 표시하는 데 사용되는 Iptables mangle 표시입니다. 이 " "표시는 하위 16비트만 사용되도록 0xffff로 마스크됩니다. " msgid "Keepalived didn't respawn" msgstr "유휴되면 다시 파생되지 않음" msgid "Keepalived didn't spawn" msgstr "활성 유지(keepalive)가 파생되지 않음" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "커널 HZ 값 %(value)s이(가) 올바르지 않습니다. 이 값은 0보다 커야 합니다." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 에이전트에서 Floating IP의 NAT 설정 실패" msgid "L3 agent failure to setup floating IPs" msgstr "L3 에이전트에서 Floating IP 설정 실패" msgid "Limit number of leases to prevent a denial-of-service." msgstr "서비스 거부(DoS)를 막기 위해 리스 수를 제한합니다." msgid "List of :" msgstr ":의 목록" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "테넌트 네트워크에 대한 할당에 사용할 수 있는 각 VLAN 태그의 범위 및VLAN 제공" "자와 테넌트 네트워크에 사용할 수 있는 실제 네트워크 이름을 지정하는 " ":: 또는 의 목록입니" "다." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "neutron.ml2.type_drivers 네임스페이스에서 로드할네트워크 유형 드라이버 시작점" "의 목록입니다. " msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "플랫 네트워크를 작성할 수 있는 실제 네트워크 이름의 목록입니다. 플랫 네트워크" "에 임의의 physical_network 이름을 사용하려면 기본값 '*'를 사용하십시오. 빈 목" "록을 사용하여 플랫 네트워크를 비활성화합니다." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "메타데이터 프록시 UNIX 도메인 소켓의 위치입니다." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "메타데이터 프록시 UNIX 도메인 소켓의 위치" msgid "Location to store DHCP server config files." msgstr "DHCP 서버 구성 파일을 저장할 위치." msgid "Location to store IPv6 PD files." msgstr "IPv6 PD 파일을 저장할 위치입니다. " msgid "Location to store IPv6 RA config files" msgstr "IPv6 RA 구성 파일을 저장할 위치" msgid "Location to store child pid files" msgstr "하위 pid 파일을 저장할 위치" msgid "Log agent heartbeats" msgstr "로그 에이전트 하트비트" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "기본 물리 네트워크의 MTU입니다. Neutron에서는 이 값을 사용하여 모든 가상 네트" "워크 구성 요소의 MTU를 계산합니다. 일반 및 VLAN 네트워크의 경우 neutron에서" "는 이 값을 수정하지 않고 사용합니다. VXLAN과 같은 오버레이 네트워크의 경우 " "neutron이 이 값에서 오버레이 프로토콜 오버헤드를 자동으로 제거합니다. 이더넷" "의 표준 값인 1500으로 기본값이 지정됩니다." msgid "MTU size of veth interfaces" msgstr "veth 인터페이스의 MTU 크기" msgid "Make the l2 agent run in DVR mode." msgstr "l2 에이전트를 DVR 모드에서 실행하십시오." msgid "Malformed request body" msgstr "형식이 틀린 요청 본문" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf의 MaxRtrAdvInterval 설정" msgid "Maximum number of DNS nameservers per subnet" msgstr "서브넷당 최대 DNS 네임스페이스 수" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "HA 라우터가 스케줄될 최대 L3 에이전트 수입니다. 이 수가 0으로 설정되면 라우터" "가 모든 에이전트에서 스케줄됩니다." msgid "Maximum number of allowed address pairs" msgstr "허용되는 주소 쌍 최대 수" msgid "Maximum number of host routes per subnet" msgstr "서브넷당 호스트 라우트의 최대 수" msgid "Maximum number of routes per router" msgstr "라우터당 최대 경로 수" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "메타데이터 프록시 UNIX 도메인 소켓 모드, 4개의 값이 허용됨: 'deduce': " "metadata_proxy_user/group 값의 추론 모드, 'user': 메타데이터 프록시 소켓 모드" "를 0o644로 설정, metadata_proxy_user가 에이전트 유효 사용자 또는 루트인 경우 " "사용, 'group': 메타데이터 프록시 소켓 모드를 0o664로 설정, " "metadata_proxy_group이 에이전트 유효 그룹 또는 루트인 경우 사용, 'all': 메타" "데이터 프록시 소켓 모드를 0o666으로 설정, 기타 경우에 사용" msgid "Metering driver" msgstr "측정 드라이버" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf의 MinRtrAdvInterval 설정" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "인터페이스 변경사항에 대한 ovsdb를 모니터링하여 폴링을 최소화합니다." #, python-format msgid "Missing key in mapping: '%s'" msgstr "맵핑에서 키 누락: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN의 멀티캐스트 그룹입니다. 이 그룹이 구성되면 모든 브로드캐스트 트래픽을 " "이 멀티캐스트 그룹에 보낼 수 있습니다. 구성되지 않은 상태로 두면 멀티캐스트 " "VXLAN 모드가 사용되지 않습니다." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "vxlan 인터페이스의 멀티캐스트 그룹입니다. 그룹 주소의 범위는 CIDR 표기법을 사" "용하여 지정할 수 있습니다. 범위를 지정하면 여러 다른 VNI에서 여러 다른 그룹 " "주소를 사용할 수 있으므로, 터널 엔드포인트에 대한 의사 브로드캐스트 트래픽이 " "감소하거나 제거됩니다. 가능한 각 (24비트) VNI의 고유 그룹을 유지하려면 /8을 " "사용하십시오(예: 239.0.0.0/8). 이 설정은 모든 에이전트에서 같아야 합니다." #, python-format msgid "Multiple default providers for service %s" msgstr "%s 서비스에 대한 다중 기본 제공자 " #, python-format msgid "Multiple providers specified for service %s" msgstr "%s 서비스에 대해 다중 제공자가 지정됨" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "벌크 보안 그룹 규칙 작성의 다중 tenant_id는 허용되지 않음" msgid "Must also specify protocol if port range is given." msgstr "포트 범위가 제공되는 경우 프로토콜도 지정해야 합니다. " msgid "Must specify one or more actions on flow addition or modification" msgstr "플로우 추가 또는 수정 시 하나 이상의 조치를 지정해야 함" msgid "Name of Open vSwitch bridge to use" msgstr "사용할 열린 vSwitch 브릿지의 이름" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "사용할 nova 리젼의 이름입니다. 키스톤이 둘 이상의 리젼을 관리할 경우 유용합니" "다." msgid "Namespace of the router" msgstr "라우터의 네임스페이스" msgid "Native pagination depend on native sorting" msgstr "네이티브 페이지 번호 매기기는 네이티브 정렬에 따라 다름" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "%(project)s 계약 분기에서 마이그레이션을 적용해야 합니다. 업그레이드를 계속하" "기 전에 모든 Neutron 서버 인스턴스를 종료해야 합니다." msgid "Negative delta (downgrade) not supported" msgstr "음수의 델타(다운그레이드)는 지원하지 않음" msgid "Negative relative revision (downgrade) not supported" msgstr "음수의 상대적 개정판(다운그레이드)은 지원하지 않음" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "네트워크 %s에 IPv4 서브넷이 포함되어 있지 않음" #, python-format msgid "Network %s is not a valid external network" msgstr "%s 네트워크가 올바른 외부 네트워크가 아님" #, python-format msgid "Network %s is not an external network" msgstr "%s 네트워크가 외부 네트워크가 아님" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "IP 범위가 %(parent_range)s이고 크기가 %(size)s인(IP 범위 %(excluded_ranges)s " "제외) 네트워크를 발견하지 못했습니다." #, python-format msgid "Network type value '%s' not supported" msgstr "네트워크 유형 값 '%s'이(가) 지원되지 않음" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 플러그인에 네트워크 유형 값이 필요함" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin이 구성되지 않았습니다" msgid "No default router:external network" msgstr "기본 router:external 네트워크가 없음" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "IPv%s의 기본 subnetpool이 없음" msgid "No default subnetpools defined" msgstr "기본 subnetpools가 정의되지 않음" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "서브넷 %(subnet_id)s에 대해 사용 가능한 IP 주소가 더 이상 없습니다. " msgid "No more IP addresses available." msgstr "사용 가능한 IP 주소가 더 이상 없습니다." msgid "No offline migrations pending." msgstr "보류 중인 오프라인 마이그레이션이 없습니다." #, python-format msgid "No shared key in %s fields" msgstr "%s 필드의 공유 키가 없음" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "'dvr' 모드에서 수동으로 에이전트에 라우터를 지정할 수 없습니다." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "'dvr' 모드에서 수동으로 에이전트에서 라우터를 제거할 수 없습니다." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "테넌트 네트워크를 호스팅하기 위해 스케줄된 DHCP 에이전트의 수입니다. 이 숫자" "가 1보다 크면 스케줄러는 지정된 테넌트 네트워크에 대해 다중 DHCP 에이전트를 " "자동으로 지정하여 DHCP 서비스에 대한 고가용성을 제공합니다. " msgid "Number of backlog requests to configure the metadata server socket with" msgstr "메타데이터 서버 소켓을 구성하기 위한 백로그 요청 수" msgid "Number of backlog requests to configure the socket with" msgstr "소켓을 설정하려는 백로그 요청 횟수" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "네트워크 접두어로 고려될 ipv4 PTR 구역의 비트 수입니다. 바이트 경계에 맞게 정" "렬해야 합니다. 최소값은 8이고 최대값은 24입니다. 결과적으로 값의 범위는 8, " "16 및 24입니다." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "네트워크 접두어로 고려될 ipv6 PTR 구역의 비트 수입니다. nyble 경계에 맞게 정" "렬해야 합니다. 최소값은 4이고 최대값은 124입니다. 결과적으로 값의 범위는 4, " "8, 12, 16,..., 124입니다." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "테넌트당 허용된 부동 IP 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "테넌트당 허용되는 네트워크 수입니다. 음수 값은 무제한을 의미합니다." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "테넌트당 허용되는 포트 수입니다. 음수 값은 무제한을 의미합니다." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "테넌트당 허용된 라우터 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "보낼 이벤트가 있는 경우 nova에 전송하는 이벤트 간의 시간(초)입니다." msgid "Number of seconds to keep retrying to listen" msgstr "감청 재시도 계속할 시간" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "테넌트당 허용된 보안 그룹 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "테넌트당 허용된 보안 규칙 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "메타데이터 서버에 대한 별도의 작업자 프로세스 수(기본값은 CPU 수의 절반으로 " "지정됨)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "테넌트당 허용되는 서브넷 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "동기화 프로세스 중에 사용할 스레드 수입니다. 서버에 구성된 연결 풀 크기를 초" "과하지 않아야 합니다." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "사용할 OVS 데이터 경로입니다. '시스템'은 기본값이며 커널 데이터 경로에 해당합" "니다. 사용자 공간 데이터 경로를 사용하려면 이 값을 'netdev'로 설정하십시오." msgid "OVS vhost-user socket directory." msgstr "OVS vhost-사용자 소켓 디렉토리." msgid "Only admin can view or configure quota" msgstr "관리자만이 할당량을 보거나 구성할 수 있습니다. " msgid "Only admin is authorized to access quotas for another tenant" msgstr "관리자만 다른 테넌트의 할당량에 액세스할 수 있는 권한이 있음" msgid "Only admins can manipulate policies on objects they do not own" msgstr "소유하지 않은 오브젝트의 정책은 관리자만 조작할 수 있음" msgid "Only allowed to update rules for one security profile at a time" msgstr "한 번에 하나의 보안 프로파일에 대한 규칙만 업데이트하도록 허용됨" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "remote_ip_prefix 또는 remote_group_id만이 제공될 수 있습니다. " #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "디바이스 %(dev_name)s에 대한 조작이 지원되지 않음" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "테넌트 네트워크로 할당할 network_types의 정렬된 목록입니다. 기본값 'local'은 " "단일 상자 테스트에 유용하지만 호스트 간 연결을 제공하지 않습니다." msgid "Override the default dnsmasq settings with this file." msgstr "기본 dnsmasq 설정을 이 파일로 대체합니다." msgid "Owner type of the device: network/compute" msgstr "디바이스의 소유자 유형: network/compute" msgid "POST requests are not supported on this resource." msgstr "이 자원에서 POST 요청이 지원되지 않습니다." #, python-format msgid "Package %s not installed" msgstr "패키지 %s이(가) 설치되지 않음" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "bridge_mappings 구문 분석 실패: %s." msgid "Password for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 암호" msgid "Path to PID file for this process" msgstr "이 프로세스에 대한 PID 파일의 경로" msgid "Path to the router directory" msgstr "라우터 디렉토리의 경로" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "터널 브릿지에 대한 통합 브릿지에 있는 피어 패치 포트입니다." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "통합 브릿지에 대한 터널 브릿지에 있는 피어 패치 포트입니다." msgid "Phase upgrade options do not accept revision specification" msgstr "단계 업그레이드 옵션이 개정 스펙을 승인하지 않음" msgid "Ping timeout" msgstr "Ping 제한시간 초과" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "%(id)s 포트가 고정 IP %(address)s을(를) 갖지 않음" #, python-format msgid "Port %s does not exist" msgstr "%s 포트가 없음" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "포트 %s에 다중 고정 IPv4 주소가 있습니다. 부동 IP를 지정하는 경우에는 특정 " "IPv4 주소를 제공해야 합니다. " msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "접두부 위임은 IPv6 서브넷에만 사용할 수 있습니다. " msgid "Private key of client certificate." msgstr "클라이언트 인증서의 개인 키입니다." #, python-format msgid "Probe %s deleted" msgstr "%s 프로브가 삭제되었음" #, python-format msgid "Probe created : %s " msgstr "프로브 작성: %s " msgid "Process is already started" msgstr "프로세스가 이미 시작됨" msgid "Process is not running." msgstr "프로세스가 실행 중이지 않습니다." msgid "Protocol to access nova metadata, http or https" msgstr "nova 메타데이터에 액세스하기 위한 프로토콜, http 또는 https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "제공자 이름 %(name)s은(는) %(len)s자로 제한됨" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "ID가 %(id)s인 %(object_type)s 유형의 RBAC 정책을 찾을 수 없음" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "다른 오브젝트가 의존하고 있기 때문에 오브젝트 %(object_id)s에 대한 RBAC 정책" "을 제거할 수 없습니다. \n" "세부사항: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "몰리지 않도록 주기적 태스크 스케줄러를 시작할 때 무작위로 지연할 시간의 범위" "(초)입니다. (0으로 설정하여 사용 안함) " msgid "Ranges must be in the same IP version" msgstr "범위가 동일한 IP 버전에 있어야 함" msgid "Ranges must be netaddr.IPRange" msgstr "범위는 netaddr.IPRange여야 함" msgid "Ranges must not overlap" msgstr "범위는 중첩되지 않아야 함" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "분기 레이블(%s)은 더 이상 사용되지 않습니다. expand@ 및 contract@ 레이블로 전" "환하십시오. " msgid "Remote metadata server experienced an internal server error." msgstr "원격 메타데이터 서버에서 내부 서버 오류가 발생했습니다. " msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "저장소에 계약 및 확장 분기의 HEAD 파일이 포함되지 않습니다." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "에이전트에서 로드를 보고하는 자원 유형을 나타냅니다. 이는 \"네트워크\", \"서" "브넷\" 또는 \"포트\"입니다. 이를 지정하는 경우 (기본값은 네트워크임) 서버는 " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler와의 조합에서 report_interval.dhcp_load_type을 사용할 수 있을 " "때마다 에이전트 보고 상태에서 에이전트 구성 오브젝트의 일부로 보낸 특정 로드" "를 추출하는데, 이는 이용 중인 자원 수입니다. network_scheduler_driver가 " "WeightScheduler인 경우 dhcp_load_type을 구성하여 밸런스 조정 중인 자원에 대" "한 선택을 표시할 수 있습니다. 예: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "요청 실패: 요청을 처리하는 중에 내부 서버 오류가 발생했습니다. " msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "시작 시 플로우 테이블을 재설정하십시오. 이를 True로 설정하면 짧은 트래픽 인터" "럽트가 발생합니다. " #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "자원 %(resource)s %(resource_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "유형 %(resource_type)s의 자원 %(resource_id)s을(를) 찾을 수 없음" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "'%(resource_id)s' 자원이 이미 '%(service_type)s' 서비스 유형에 대한 " "'%(provider)s' 제공자와 연관되어 있음" msgid "Resource body required" msgstr "자원 본문 필수" msgid "Resource not found." msgstr "자원을 찾을 수 없습니다." msgid "Resources required" msgstr "자원 필수" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "루트 헬퍼 애플리케이션. 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'를 " "사용하여 실제 루트 필터 기능을 사용합니다. 'sudo'로 변경하여 필터링을 건너뛰" "고 명령을 직접 실행하기만 하면 됩니다." msgid "Root permissions are required to drop privileges." msgstr "권한을 삭제하려면 루트 권한이 필요합니다." #, python-format msgid "Router already has a port on subnet %s" msgstr "라우터가 이미 %s 서브넷에 포트를 갖고 있음" msgid "Router port must have at least one fixed IP" msgstr "라우터 포트에는 하나 이상의 Fixed IP가 있어야 함" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "%(project)s에 대한 %(cmd)s(%(desc)s) 실행 중..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "%(project)s에 대한 %(cmd)s 실행 중..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "서버에 대한 상태를 보고하는 노드 사이의 시간(초)이며 agent_down_time보다 짧아" "야 하며 절반이거나 agent_down_time보다 짧은 경우 최적입니다." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "에이전트가 작동 중지되었다고 간주되는 시간(초)이며 에이전트가 계속 작동 중지 " "상태인지 확인할 수 있도록 report_interval의 두 배 이상이어야 합니다." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "보안 그룹 %(id)s %(reason)s입니다. " #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "보안 그룹 규칙 %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "%(id)s 보안 그룹이 존재하지 않음" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "보안 그룹 규칙 %(id)s이(가) 존재하지 않음" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "보안 그룹 규칙이 이미 있습니다. 규칙 ID는 %(rule_id)s입니다." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "ethertype '%(ethertype)s'의 보안 그룹 규칙이 지원되지 않습니다. 허용된 값은 " "%(values)s입니다." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "보안 그룹 규칙 프로토콜 %(protocol)s이(가) 지원되지 않습니다. 프로토콜 값 " "%(values)s 및 정수 표시 [0 - 255]만 지원됩니다. " msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "포트 데이터(fixed_ips/floatingip)가 변경되면 알림을 nova에 보냅니다. 이에 따" "라 nova는 해당 캐시를 업데이트할 수 있습니다." msgid "Send notification to nova when port status changes" msgstr "포트 상태가 변경되면 알림을 nova에 보냅니다." #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "서비스 유형에 대한 '%(provider)s' 서비스 제공자를 찾을 수 없음: " "%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "DHCPv6 접두부 위임을 처리할 서비스입니다. " #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "%(service_type)s 서비스 유형에 기본 서비스 제공자가 없음" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "에이전트에서 SIGTERM을 수신한 후에 새 rpc 호출에 대한 새 제한시간(초)을 설정" "합니다. 값을 0으로 설정하면 rpc 제한시간이 변경되지 않습니다." msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "GRE/VXLAN 터널을 전송하는 발신 IP 패킷에 DF(Don't Fragment) 비트를 설정하거" "나 설정 해제하십시오." msgid "Shared address scope can't be unshared" msgstr "공유 주소 범위는 공유 해제할 수 없음" msgid "String prefix used to match IPset names." msgstr "IPset 이름을 일치시키는 데 사용되는 문자열 접두부입니다. " #, python-format msgid "Sub-project %s not installed." msgstr "하위 프로젝트 %s이(가) 설치되지 않았습니다. " msgid "Subnet for router interface must have a gateway IP" msgstr "라우터 인터페이스에 대한 서브넷은 게이트웨이 IP를 가져야 함" msgid "Subnet pool has existing allocations" msgstr "서브넷 풀에 기존 할당이 있음" msgid "Subnet used for the l3 HA admin network." msgstr "l3 HA 관리 네트워크에 사용된 서브넷입니다." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "테넌트가 작성할 수 있는 라우터 유형을 판별하는 시스템 범위 플래그입니다. 관리" "자만 대체할 수 있습니다." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron 메타데이터 네임스페이스 프록시가 사용하는 TCP 포트입니다. " msgid "TCP Port used by Nova metadata server." msgstr "Nova 메타데이터 서버가 사용한 TCP 포트입니다. " msgid "TTL for vxlan interface protocol packets." msgstr "vxlan 인터페이스 프로토콜 패킷용 TTL." #, python-format msgid "Tag %(tag)s could not be found." msgstr "%(tag)s 태그를 찾을 수 없습니다." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "%(tenant_id)s 테넌트는 이 네트워크에 %(resource)s을(를) 작성하도록 허용되지 " "않음" msgid "Tenant id for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 테넌트 ID" msgid "Tenant name for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 테넌트 이름" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 서버는 격리된 네트워크에서 메타데이터 지원을 제공하도록 지원할 수 있습니" "다. 이 값을 True로 설정하면 DHCP 서버가 특정 호스트 경로를 DHCP 요청에 추가합" "니다. 메타데이터 서비스는 서브넷에 라우터 포트가 포함되지 않은 경우에만 활성" "화됩니다. DHCP를 통해 호스트 경로를 요청하려면 게스트 인스턴스가 구성되어야 " "합니다(옵션 121). 이 옵션은 force_metadata가 True로 설정된 경우 적용되지 않습" "니다." msgid "The UDP port to use for VXLAN tunnels." msgstr "VXLAN 터널에 사용하는 UDP 포트" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "다음 원인으로 인해 주소 할당 요청을 충족할 수 없음: %(reason)s" msgid "The advertisement interval in seconds" msgstr "광고 간격(초)" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "VIF에기본 MAC 주소 Neutron을 사용합니다. 처음 세 개의 옥텟은 변경되지 않은 상" "태로 남습니다. 네 번째 옥텟이 00이 아니면 이 옥텟도 사용됩니다. 다른 옥텟은 " "임의로 생성됩니다." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron에 의해 고유 DVR 인스턴스에 사용되는 기본 mac 주소입니다. 처음 세 개 " "옥텟은 변경되지 않고 남아 있습니다. 네 번째 옥텟이 00이 아니면 이 옥텟도 사용" "됩니다. 다른 옥텟은 무작위로 생성됩니다. 테넌트 포트에 대해 할당된 MAC과의 혼" "합을 방지하기 위해 'dvr_base_mac'은 'base_mac'과 달라야 *합니다*. 4 옥텟 예제" "는 dvr_base_mac = fa:16:3f:4f:00:00입니다. 기본값은 3 옥텟입니다. " msgid "The core plugin Neutron will use" msgstr "Neutron이 사용할 코어 플러그인" msgid "The driver used to manage the DHCP server." msgstr "DHCP 서버를 관리하는 데 사용되는 드라이버입니다. " msgid "The driver used to manage the virtual interface." msgstr "가상 인터페이스를 관리하는 데 사용되는 드라이버입니다. " msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "PTR 구역을 작성할 때 사용할 이메일 주소입니다. 지정되지 않은 경우 이메일 주소" "는 admin@입니다." msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "단일 응답으로 최대 항목 수가 리턴되었습니다. 값이 'infinite' 또는 음수인 경" "우 제한이 없다는 의미입니다. " msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "HA 라우터에 대한 HA 네트워크 작성 시 사용할 네트워크 유형입니다. 기본적으로 " "또는 비어 있는 경우 첫 번째 'tenant_network_types'가 사용됩니다. 이는 VRRP 트" "래픽이 기본값이 아닌 특정 네트워크를 사용해야 하는 경우에 유용합니다. " msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "에이전트가 로컬 디바이스 변경을 폴링하는 사이에 대기하는 시간(초). " msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "통신이 유실된 후에 ovsdb 모니터를 재파생하기 전에 대기할 시간(초)입니다." msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys 및 sort_dirs의 수가 같아야 함" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 확장의 경로입니다. 이 경로는 콜론으로 구분된 경로 목록일 수 있습니다. " "예: api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. " "neutron.extensions의 __path__가 이 경로에 추가되므로, 해당 위치에 확장이 있으" "면 여기에 지정하지 않아도 됩니다." msgid "The physical network name with which the HA network can be created." msgstr "HA 네트워크를 작성하는 데 사용할 수 있는 실제 네트워크 이름입니다. " #, python-format msgid "The port '%s' was deleted" msgstr "포트 '%s'이(가) 삭제됨" msgid "The port to bind to" msgstr "바인드할 포트" #, python-format msgid "The requested content type %s is invalid." msgstr "요청한 컨텐츠 유형 %s이(가) 올바르지 않습니다." msgid "The resource could not be found." msgstr "자원을 찾을 수 없습니다. " #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "" "L3 에이전트 %(agent_id)s에서 %(router_id)s 라우터를 이미 호스트하고 있습니" "다. " msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "서버에 오류가 있거나 서버가 요청된 조작을 수행할 수 없습니다." msgid "The service plugins Neutron will use" msgstr "Neutron이 사용할 서비스 플러그인" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "다음 이유로 인해 서브넷 요청을 충족할 수 없음: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "명령을 실행할 하위 프로젝트입니다. 다음 중 하나가 될 수 있음: '%s'." msgid "The type of authentication to use" msgstr "사용할 인증 유형" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "이 네트워크에 연결된 라우터가 있으며, 해당 라우터에는 액세스를 위해 이 정책" "이 필요합니다." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "모든 OpenvSwitch 브릿지의 모든 포트를 삭제하려면 true입니다. 통합 및 외부 네" "트워크 브릿지에 Neutron이 작성한 포트를 삭제하려면 false입니다. " msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 플러그인에 터널 IP 값이 필요함" msgid "Tunnel bridge to use." msgstr "사용할 터널 브릿지입니다." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "사용할 nova 엔드포인트의 유형입니다. 이 엔드포인트는 keystone 카탈로그에서 검" "색하며 공용, 내부 또는 관리 중 하나여야 합니다." msgid "URL for connecting to designate" msgstr "지정하기 위해 연결할 URL" msgid "URL to database" msgstr "데이터베이스에 대한 URL" #, python-format msgid "Unable to access %s" msgstr "%s에 액세스할 수 없음" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "다음 원인으로 인해 %(address_type)s 주소를 계산할 수 없음: %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "%s의 값을 변환할 수 없음" msgid "Unable to create the SNAT Interface Port" msgstr "SNAT 인터페이스 포트를 작성할 수 없음" #, python-format msgid "Unable to determine mac address for %s" msgstr "%s의 맥 주소를 확인할 수 없습니다" #, python-format msgid "Unable to find '%s' in request body" msgstr "요청 본문에서 '%s'을(를) 찾을 수 없음" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "서브넷 %(subnet_id)s에서 IP 주소 %(ip_address)s을(를) 찾을 수 없음" #, python-format msgid "Unable to find resource name in %s" msgstr "%s에서 자원 이름을 찾을 수 없음" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "%s에서 대상 필드를 식별할 수 없음. 일치가 다음 양식이어야 함." "%%()s" msgid "Unable to provide external connectivity" msgstr "외부 연결을 제공할 수 없음" msgid "Unable to provide tenant private network" msgstr "테넌트 개인 네트워크를 제공할 수 없음" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "상위 소스로서 일치 %(match)s을(를) 확인할 수 없음. %(res)s을(를) 찾을 수 없음" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "스크립트 %(script_name)s에 대해 예상치 않은 레이블: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "예상치 못한 수의 변형 장치(alembic) 분기점: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "예기치 않은 응답 코드: %s" #, python-format msgid "Unexpected response: %s" msgstr "예상치 않은 응답: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "단위 이름 '%(unit)s'이(가) 올바르지 않습니다." #, python-format msgid "Unknown address type %(address_type)s" msgstr "알 수 없는 주소 유형 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "알 수 없는 속성 '%s'입니다." #, python-format msgid "Unknown chain: %r" msgstr "알 수 없는 체인: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "알 수 없는 네트워크 유형 %(network_type)s." msgid "Unmapped error" msgstr "맵핑되지 않은 오류" msgid "Unrecognized action" msgstr "인식되지 않는 조치" msgid "Unrecognized field" msgstr "인식되지 않는 필드" msgid "Unsupported Content-Type" msgstr "지원되지 않는 Content-Type" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "지원되지 않는 네트워크 유형 %(net_type)s입니다." msgid "Unsupported request type" msgstr "지원되지 않는 요청 유형" msgid "Updating default security group not allowed." msgstr "기본 보안 그룹 업데이트가 허용되지 않습니다. " msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "원격 MAC 및 IP를 학습하고 터널 확장성을 개선하려면 ML2 l2population 메커니즘 " "드라이버를 사용하십시오." msgid "Use broadcast in DHCP replies." msgstr "DHCP 복제본에서 브로드캐스팅을 사용하십시오." msgid "Use either --delta or relative revision, not both" msgstr "--델타 또는 상대적 개정판 중 하나 사용" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "ipset을 사용하여 iptables 기반 보안 그룹의 속도를 높입니다. ipset 지원을 사용" "하려면 ipset이 L2 에이전트 노드에 설치되어야 합니다." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "시스템에 네임스페이스를 나열할 때 루트 헬퍼를 사용하십시오. 보안 구성에 따라 " "이 작업은 필요하지 않을 수 있습니다. 루트 헬퍼가 필요하지 않으면 성능이 향상" "되도록 False로 설정하십시오." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "패치 포트 대신 veth를 사용하여 통합 브릿지와 실제 브릿지를 상호연결하십시오. " "True로 설정된 경우에 한해 Open vSwitch 패치 포트가 없는 커널이 지원됩니다." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "초기화 후에 메타데이터 프록시를 실행하는 사용자(uid 또는 이름)(비어 있는 경" "우: 에이전트 유효 사용자)." msgid "User (uid or name) running this process after its initialization" msgstr "초기화 이후 이 프로세스를 실행하는 사용자(uid 또는 이름)" msgid "Username for connecting to designate in admin context" msgstr "관리 컨텍스트에서 지정하기 위해 연결할 사용자 이름" msgid "VRRP authentication password" msgstr "VRRP 인증 비밀번호" msgid "VRRP authentication type" msgstr "VRRP 인증 유형" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "QoS가 있는 포트의 대역폭 한계 규칙에서 최소 버스트 값을 계산하기 위한 호스트 " "커널 틱(tick) 속도(hz) 값입니다. 자세한 내용은 HZ 값 및 tc-tbf 매뉴얼의 커널 " "구성 파일을 참조하십시오." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "QoS가 있는 포트의 큐 크기를 계산하기 위한 지연 시간 값(ms)입니다. 자세한 정보" "는 tc-tbf 매뉴얼을 참조하십시오." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "메타데이터 요청의 프록시 역할을 수행할 때 Neutron이 위조를 방지하기 위해 공" "유 시크릿으로 Instance-ID 헤더에 서명합니다. 시크릿으로 임의의 문자열을 선택" "할 수 있지만 여기에 있는 문자열 및 Nova Metadata Server에서 사용하는 구성과 " "일치해야 합니다. 참고: Nova에서는 [neutron] 섹션에 있는 동일한 구성 키를 사용" "합니다." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Neutron 상태 파일을 저장할 위치. 에이전트가 이 디렉토리에 쓸 수 있어야 합니" "다." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "IPv6를 사용하면 자동으로 지정되는 링크 로컬 주소(LLA)를 사용할 수 있으므로 외" "부 게이트웨이에 사용한 네트워크에 연관 서브넷이 필요하지 않습니다. 그러나 기" "본 라우트의 다음 홉으로 사용할 IPv6 게이트웨이 주소가 필요합니다. 여기서 " "IPv6 게이트웨이 주소를 구성하지 않으면(또한 이 경우에만)상위 라우터의 " "RA(Router Advertisement)에서 해당 기본 라우트를 가져오도록 Neutron 라우터를 " "구성할 수 있습니다. 이 경우 이러한 RA를 보내도록 상위 라우터를 구성할 수도 있" "습니다. ipv6_gateway를 구성한 경우, 이 게이트웨이가 상위 라우터의 인터페이스" "에 대한 LLA여야 합니다. 글로벌 고유 주소(GUA)를 사용하는 다음 합이 필요한 경" "우, 이 매개변수가 아닌 네트워크에 할당된 서브넷을 통해 수행해야 합니다. " msgid "You must implement __call__" msgstr "__call__을 구현해야 합니다. " msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "브릿지에 대한 구성 파일, 즉 --config-file 또는 env[QUANTUM_TEST_CONFIG_FILE] " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "개정판 또는 상대적 델타를 제공해야 함" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "cidr이 없는 경우 subnetpool을 지정해야 함" msgid "add_ha_port cannot be called inside of a transaction." msgstr "트랜잭션 내에서 add_ha_port를 호출할 수 없습니다." msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools는 특정 서브넷 요청에만 사용할 수 있습니다." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools가 서브넷에 없음" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools에서 잘못된 ip 버전을 사용함" msgid "already a synthetic attribute" msgstr "이미 합성 속성임" msgid "binding:profile value too large" msgstr "바인딩:프로파일 값이 너무 김" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "%(reason)s(으)로 인해 %(event)s을(를) 수행할 수 없음" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr 및 prefixlen을 함께 입력하지 않아야 함" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_name 없이 dns_domain을 지정할 수 없음" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_domain 없이 dns_name을 지정할 수 없음" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address는 port_id 없이 지정할 수 없음" #, python-format msgid "has device owner %s" msgstr "디바이스 소유자 %s이(가) 있음" msgid "in use" msgstr "사용 중" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "%(dev_name)s 디바이스에 대한 ip 명령 실패: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "ip 링크 기능 %(capability)s이(가) 지원되지 않음" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip 링크 명령이 지원되지 않음: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "cidr 및 subnetpool_id가 없는 경우 ip_version을 지정해야 함" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version이 4인 경우 ipv6_address_mode가 올바르지 않음" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version이 4인 경우 ipv6_ra_mode가 올바르지 않음" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "'%(ra_mode)s'(으)로 설정된 ipv6_ra_mode('%(addr_mode)s'(으)로 설정된 " "ipv6_address_mode 포함)가 올바르지 않습니다. 두 설정 다 설정된 경우 동일한 값" "이어야 합니다." msgid "mac address update" msgstr "mac 주소 업데이트" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "정확히 두 개의 인수 - cidr 및 MAC를 제공해야 함" msgid "network_type required" msgstr "network_type이 필요함" #, python-format msgid "network_type value '%s' not supported" msgstr "network_type에서 '%s' 값을 지원하지 않습니다" msgid "new subnet" msgstr "새 서브넷" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "플랫 제공자 네트워크에 대해 실제 네트워크 '%s'을(를) 알 수 없음. " msgid "physical_network required for flat provider network" msgstr "플랫 제공자 네트워크에 실제 네트워크 필요" #, python-format msgid "provider:physical_network specified for %s network" msgstr "%s 네트워크에 대해 지정된 provider:physical_network 입니다" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval은 >= 0이어야 합니다(제공된 경우)." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id가 범위(%(min)s - %(max)s)를 벗어남" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id는 VLAN 제공자 네트워크의 physical_network가 필요함" msgid "shared attribute switching to synthetic" msgstr "공유 속성을 합성으로 전환" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "공유 주소 범위 %(address_scope_id)s과(와) 연관된 경우 서브넷 풀 " "%(subnetpool_id)s을(를) 업데이트할 수 없음" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "subnetpool_id 및 use_default_subnetpool을 모두 지정할 수 없음" msgid "the nexthop is not connected with router" msgstr "nexthop이 라우터와 연결되지 않음" msgid "the nexthop is used by router" msgstr "라우터가 nexthop을 사용함" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/pt_BR/0000755000175000017500000000000000000000000021102 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3190448 neutron-16.0.0.0b2.dev214/neutron/locale/pt_BR/LC_MESSAGES/0000755000175000017500000000000000000000000022667 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/pt_BR/LC_MESSAGES/neutron.po0000644000175000017500000027131200000000000024727 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Gabriel Wainer, 2013 # Andreas Jaeger , 2016. #zanata # Fernando Pimenta , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-03-28 07:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Comando: %(cmd)s\n" "Código de saída: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "O arquivo HEAD %(branch)s não corresponde ao cabeçalho da linha de tempo de " "migração, esperado: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s é um valor inválido para sort_dirs, o valor válido é " "'%(asc)s' e '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s proibida para rede de provedor %(tunnel)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' não corresponde à ip_version '%(ip_version)s'" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s não pode ser chamado durante o modo offline" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s é um atributo inválido para sort_keys" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s deve implementar get_port_from_device ou get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s proibido para rede de provedor VLAN" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s proibido para rede de provedor flat" #, python-format msgid "%s prohibited for local provider network" msgstr "%s proibido para rede de provedor local" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' não é um tipo de objeto RBAC válido" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' não é suportado para filtragem" #, python-format msgid "'module' object has no attribute '%s'" msgstr "O objeto 'módulo' não possui nenhum atributo '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' é menor que 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "0 não é permitido como um comprimento do prefixo CIDR" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Um cidr deve ser especificado na ausência de um conjunto de sub-rede" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Um valor decimal, como o Número de Empresa Privada Registrada do Fornecedor, " "conforme requerido pelo RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Uma rede externa padrão já existe: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Um conjunto de sub-redes padrão para essa família de IP já foi configurado. " "Apenas um padrão pode existir por família de IP." msgid "A metering driver must be specified" msgstr "Um driver de medição deve ser especificado" msgid "Access to this resource was denied." msgstr "Acesso à este recurso foi negado." msgid "Action to be executed when a child process dies" msgstr "Ação a ser executada quando um processo-filho morre" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Adicionar comentários a regras do iptables. Configure como falso para " "desabilitar a adição de comentários para tabelas de regras geradas que " "descrevam o propósito de cada regra. O sistema deve suportar o módulo de " "comentários do iptables para adicionar comentários." msgid "Address not present on interface" msgstr "Endereço não está presente na interface" msgid "Adds test attributes to core resources." msgstr "Inclui atributos de teste aos recursos principais." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "O agente %(id)s não é um agente L3 ou foi desativado" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "Agente inicia com admin_state_up=False quando enable_new_agents=False. No " "caso, os recursos do usuário não serão planejados automaticamente para o " "agente até que o administrador mude admin_state_up para True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Agente atualizado:%(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Permitir o planejamento automático de redes para o agente DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Permitir planejamento automático de roteadores para agente L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Permitir sobreposição de suporte IP no Neutron. Atenção: o parâmetro a " "seguir DEVERÁ ser configurado para False se o Neutron estiver sendo usado em " "conjunto com os grupos de segurança do Nova." msgid "Allow running metadata proxy." msgstr "Permite executar proxy de metadados." msgid "Allow sending resource operation notification to DHCP agent" msgstr "" "Permitir envio de notificação de operação de recurso para o agente DHCP" msgid "Allow the creation of PTR records" msgstr "Permitir a criação de registros PTR" msgid "Allow the usage of the bulk API" msgstr "Permitir o uso da API em massa" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "" "Permita executar solicitações (https) de SSL inseguras para metadados nova" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Permite entregar solicitações de metadados provenientes de uma rede de " "acesso de metadados dedicada cujo CIDR é 169.254.169.254/16 (ou um prefixo " "maior), e é conectado a um roteador Neutron a partir do qual as MVs enviam a " "solicitação metadata:1. Nesse caso, a Opção 121 do DHCP não será injetada " "nas MVs já que elas poderão acessar 169.254.169.254 por meio de um roteador. " "Essa opção requer enable_isolated_metadata = True." msgid "An RBAC policy already exists with those values." msgstr "Uma política RBAC já existe com esses valores." msgid "An identifier must be specified when updating a subnet" msgstr "Um identificador deve ser especificado ao atualizar uma sub-rede" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Uma lista ordenada de pontos de entrada do driver de extensão a serem " "carregados a partir do namespace neutron.ml2.extension_drivers. Por exemplo: " "extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Lista ordenada de pontos de entrada do driver de mecanismo de rede que será " "carregada do namespace neutron.ml2.mechanism_drivers." msgid "An unknown error has occurred. Please try your request again." msgstr "Ocorreu um erro desconhecido. Tente a solicitação novamente." msgid "Async process didn't respawn" msgstr "O processo assíncrono não sofreu spawn novamente" msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL de autorização para conexão ao Designate no contexto de admnistrador" msgid "Automatically remove networks from offline DHCP agents." msgstr "Remover automaticamente as redes de agentes DHCP offline." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Reagende roteadores automaticamente de agentes L3 offline para agentes L3 " "online." msgid "Availability zone of this node" msgstr "Zona de disponibilidade deste nó" msgid "Available commands" msgstr "Comandos disponíveis" #, python-format msgid "Base MAC: %s" msgstr "MAC Base: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Diretório de log base para criação de log dnsmasq. O log contém informações " "de log DHCP e DNS e é útil para depurar problemas com DHCP ou DNS. Se esta " "seção for nula, desative o log dnsmasq." msgid "Body contains invalid data" msgstr "O corpo contém dados inválidos" msgid "Bulk operation not supported" msgstr "Operação em massa não suportada" msgid "CIDR to monitor" msgstr "CIDR para monitorar" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Retorno de chamada para %(resource_type)s não localizado" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "" "Retorno de chamada para %(resource_type)s retornou tipo de recurso errado" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Não é possível incluir IP flutuante na porta %s que não tem endereços IPv4 " "fixos" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "" "Não é possível incluir vários retornos de chamada para %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Não é possível alocar a sub-rede IPv%(req_ver)s a partir do conjunto de sub-" "rede IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" "Não é possível alocar a sub-rede solicitada a partir do conjunto disponível " "de prefixos" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Não é possível desativar enable_dhcp com conjunto de atributos ipv6" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Não é possível manipular a sub-rede do tipo %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "Não é possível ter diversas sub-redes IPV4 na porta do roteador" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Não é possível ter várias portas de roteador com o mesmo ID de rede se ambas " "contiverem sub-redes IPv6. A porta existente %(p)s possui sub-rede(s) IPv6 e " "o ID de rede %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Não é possível hospedar o roteador distribuído %(router_id)s no agente L3 " "legacy %(agent_id)s." msgid "Cannot specify both subnet-id and port-id" msgstr "Não é possível especificar subnet-id e port-id" msgid "Cannot understand JSON" msgstr "Não é possível entender JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Não é possível atualizar o atributo de leitura %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "" "Arquivo de chave pública da autoridade de certificação (certificado CA) para " "ssl" msgid "Check ebtables installation" msgstr "Verificar instalação de ebtables" msgid "Check for ARP header match support" msgstr "Verificar suporte de correspondência de cabeçalho ARP" msgid "Check for ARP responder support" msgstr "Verifique se há suporte respondente para ARP" msgid "Check for ICMPv6 header match support" msgstr "Verificar suporte de correspondência de cabeçalho ICMPv6" msgid "Check for OVS Geneve support" msgstr "Verificar suporte a OVS Geneve" msgid "Check for OVS vxlan support" msgstr "Verifique o suporte do vxlan do OVS" msgid "Check for VF management support" msgstr "Verifique o suporte de gerenciamento de VF" msgid "Check for iproute2 vxlan support" msgstr "Verifique o suporte do vxlan do iproute2" msgid "Check for nova notification support" msgstr "Verifique suporte para nova notificação" msgid "Check for patch port support" msgstr "Verifique o suporte para a porta de correção" msgid "Check ip6tables installation" msgstr "Verificar instalação do ip6tables" msgid "Check ipset installation" msgstr "Verificar instalação do ipset" msgid "Check keepalived IPv6 support" msgstr "Verificar suporte a keepalived IPv6" msgid "Check minimal dibbler version" msgstr "Verificar versão do dibbler mínima" msgid "Check minimal dnsmasq version" msgstr "Verifique a versão dnsmasq mínima" msgid "Check netns permission settings" msgstr "Verifique as configurações de permissão netns" msgid "Check ovs conntrack support" msgstr "Verificar suporte conntrack do OVS" msgid "Check ovsdb native interface support" msgstr "Verifique o suporte da interface nativa ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "O cidr %(subnet_cidr)s de sub-rede %(subnet_id)s se sobrepõe com o cidr " "%(cidr)s da sub-rede %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Limpar recursos somente de um tipo de agente específico." msgid "Client certificate for nova metadata api server." msgstr "Certificado do cliente para o servidor da API de metadados nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Lista separada por vírgulas de tuplas :, " "mapeando network_device para a lista específica do nó do agente de funções " "virtuais que não devem ser usadas para rede virtual. vfs_to_exclude é uma " "lista separada por ponto-e-vírgula de funções virtuais para excluir do " "network_device. O network_device no mapeamento deve aparecer na lista " "physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Lista separada por vírgula de : tuplas que " "mapeiam nomes de rede física para as interfaces de dispositivo de rede " "física específicas do nó do agente da função física SR-IOV a serem usadas " "para redes VLAN. Todas as redes físicas listadas em network_vlan_ranges no " "servidor devem ter mapeamentos para as interfaces apropriadas em cada agente." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Lista separada por vírgulas de : " "tuplas que mapeiam nomes de rede física para interfaces de rede física " "específicas do nó do agente a serem usadas para redes simples e de VLAN. " "Todas as redes físicas listadas em network_vlan_ranges no servidor devem ter " "mapeamentos para as interfaces apropriadas em cada agente." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Lista separada por vírgula de tuplas : enumerando as " "faixas de IDs de túnel GRE que estão disponíveis para alocação de redes de " "tenant" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Lista separada por vírgula de tuplas : enumerando " "intervalos de IDs Geneve VNI que estão disponíveis para alocação de rede " "locatária" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Lista de valores separados por vírgula de tuplas : " "enumerando faixas de VXLAN VNI IDs que estão disponíveis para alocação de " "redes de tenant" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Lista separada por vírgula dos servidores DNS que será utilizada como " "encaminhadores." msgid "Command to execute" msgstr "Comando a ser executado" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Arquivo de configuração para driver de interface (também é possível usar " "l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "O valor conflitante ethertype %(ethertype)s para CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Controla se a API do grupo de segurança neutron está ativada no servidor. " "Ele deve ser false quando não usa nenhum grupo de segurança ou usa a API do " "grupo de segurança nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Não foi possível associar-se à %(host)s:%(port)s após tentar por %(time)d " "segundos" msgid "Could not deserialize data" msgstr "Não foi possível desserializar dados" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Duração de lease de DHCP (em segundos). Use -1 para dizer ao dnsmasq para " "usar lease infinitas vezes." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "Implementações de DVR para bases VXLAN/GRE/Geneve requerem que L2-pop esteja " "ativado, no lado do Agente e do Servidor." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Mecanismo de bancos de dados para o qual o script será gerado ao usar a " "migração off-line." msgid "Default external networks must be shared to everyone." msgstr "Redes externas padrão devem ser compartilhadas para todos." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Tipo de rede padrão para redes externas quando nenhum atributo de provedor é " "especificado. Por padrão, é Nenhum, o que significa que se os atributos de " "provedor não forem especificados durante a criação de redes externas, eles " "terão o mesmo tipo que as redes locatárias. Os valores permitidos para a " "opção de configuração external_network_type dependem dos valores de tipo de " "rede configurados em type_drivers CapturePostTypes." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Número padrão de entradas RBAC permitido por locatário. Um valor negativo " "significa ilimitado." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Número padrão de recurso permitido por locatário. Um valor negativo " "significa ilimitado." msgid "Default security group" msgstr "Grupo de segurança padrão" msgid "Default security group already exists." msgstr "O grupo de segurança padrão já existe." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Valor padrão das dicas de zona de disponibilidade. A zona de disponibilidade " "reconhece que os planejadores utilizam esse valor quando " "availability_zone_hints de recursos estiver vazio. Diversas zonas de " "disponibilidades podem ser especificadas por uma sequência separada por " "vírgulas. Esse valor pode ser vazio. Nesse caso, mesmo que " "availability_zone_hints de um recurso esteja vazio, a zona de " "disponibilidade será considerada para alta disponibilidade ao planejar o " "recurso." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Defina o valor padrão de enable_snat se não fornecido em " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Define provedores para serviços avançados usando o formato::" ":[:default]" msgid "Delete the namespace by removing all devices." msgstr "Excluir o namespace removendo todos os dispositivos." #, python-format msgid "Deleting port %s" msgstr "Excluindo porta %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Erro de implementação: %(reason)s" msgid "Destroy IPsets even if there is an iptables reference." msgstr "Destrua os IPsets mesmo se houver uma referência de iptables." msgid "Destroy all IPsets." msgstr "Destrua todos os IPsets." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Dispositivo %(dev_name)s no mapeamento: %(mapping)s não exclusivo" msgid "Device not found" msgstr "Dispositivo não localizado." msgid "Domain to use for building the hostnames" msgstr "Domínio a ser usado para construir os nomes dos hosts" msgid "Downgrade no longer supported" msgstr "O downgrade não é mais suportado" #, python-format msgid "Driver %s is not unique across providers" msgstr "Driver %s não é único em todos provedores" msgid "Driver for external DNS integration." msgstr "O driver para intgração do DNS externa." msgid "Driver for security groups firewall in the L2 agent" msgstr "Driver para firewall para grupos de segurança no agente L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Driver a ser usado para planejar a rede para o agente DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "Driver a ser usado para planejar o roteador para um agente L3 padrão" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Driver usado para delegação de prefixo ipv6. Este precisa ser um ponto de " "entrada definido no namespace neutron.agent.linux.pd_drivers. Consulte setup." "cfg para pontos de entrada incluídos com a origem neutron." #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "L3HARouterAgentPortBinding duplicado é criado para um ou mais roteadores " "%(router)s. O banco de dados não pode ser atualizado. Remova todas as " "duplicatas antes de fazer upgrade do banco de dados." msgid "Duplicate Security Group Rule in POST." msgstr "Regra do Grupo de Segurança Duplicada no Autoteste Inicial." msgid "Duplicate address detected" msgstr "Endereço duplicado detectado" #, python-format msgid "ERROR: %s" msgstr "ERRO: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ERRO: Não é possível localizar o arquivo de configuração através dos " "caminhos de procura padrão (~/.neutron/, ~/, /etc/neutron/, /etc/) e a opção " "'--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Um dos parâmetros network_id ou router_id deve ser transmitido para o método " "_get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "subnet_id ou port_id deve ser especificado" msgid "Enable HA mode for virtual routers." msgstr "Ative o modo de alta disponibilidade para roteadores virtuais." msgid "Enable SSL on the API server" msgstr "Habilite SSL no servidor de API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Ative o VXLAN no agente. Pode ser ativado quando o agente é gerenciado pelo " "plug-in ml2 usando o driver do mecanismo linuxbridge" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Ativar os serviços em um agente com admin_state_up False. Se essa opção for " "False, quando admin_state_up de um agente tornar-se False, os serviços nele " "serão desativados. Os agentes com admin_state_up False não são selecionados " "para planejamento automático, independentemente dessa opção. Mas o " "planejamento manual para tais agentes estará disponível se essa opção for " "True." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Ativa o IPv6 Prefix Delegation para alocação automática de CIDR de sub-rede. " "Configure para True para ativar o IPv6 Prefix Delegation para alocação de " "sub-rede em um ambiente apto para PD. Os usuários que fazem solicitações de " "criação de sub-rede para sub-redes IPv6 sem fornecer um CIDR ou um ID de " "conjunto de sub-redes receberão um CIDR por meio do mecanismo Prefix " "Delegation. Observe que a ativação do PD substitui o comportamento do " "conjunto de sub-redes IPv6 padrão. " msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Permite que o serviço dnsmasq forneça resolução de nome para instâncias por " "meio dos resolvedores de DNS no host que executa o agente DHCP. Remove " "efetivamente a opção '--no-resolv' dos argumentos do processo dnsmasq. A " "inclusão dos resolvedores de DNS customizados na opção " "'dnsmasq_dns_servers' desativa esse recurso." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erro %(reason)s ao tentar a operação." #, python-format msgid "Error parsing dns address %s" msgstr "Erro ao analisar endereço dns %s" #, python-format msgid "Error while reading %s" msgstr "Erro ao ler %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Excedido limite de %s segundos ao aguardar o endereço sair do estado de " "tentativa." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Prefixos existentes devem ser um subconjunto dos novos prefixos" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Código de saída: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; " "Stderr: %(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Extensão %(driver)s com falha." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Driver da extensão %(driver)s necessário para o plug-in de serviço " "%(service_plugin)s não localizado." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Extensão a ser usada ao lado do driver do mecanismo l2population do plug-in " "ml2. Ela permite que o plug-in preencha a tabela de encaminhamento de VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "A extensão com %s não existe" msgid "Extensions list to use" msgstr "Lista de extensões a serem usadas" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "O IP externo %s é o mesmo que o IP de gateway" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Falha ao reagendar o roteador %(router_id)s: nenhum agente l3 elegível " "encontrado." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Falha ao planejar o roteador %(router_id)s para o Agente L3 %(agent_id)s." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Falha ao criar a porta na rede %(network_id)s, porque fixed_ips incluía uma " "sub-rede inválida %(subnet_id)s" #, python-format msgid "Failed to locate source for %s." msgstr "Falha ao localizar origem para %s." msgid "Failed to remove supplemental groups" msgstr "Falha ao remover grupos suplementares" #, python-format msgid "Failed to set gid %s" msgstr "Falha ao configurar gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Falha ao configurar uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Falha ao setar porta do túnel %(type)s para %(ip)s" msgid "Failure applying iptables rules" msgstr "Falha ao aplicar regras do iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "Falha ao aguardar o endereço %(address)s ficar pronto: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Redes de provedor simples são desativadas." msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Para protocolos TCP/UDP, port_range_min deve ser <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "Força chamadas ip_lib para utilizar o ajudante raiz" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "" "Versão de IP do gateway inconsistente com a versão do conjunto de alocações." msgid "Gateway is not valid on subnet" msgstr "O gateway não é válido na sub-rede" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Grupo (gid ou nome) executando proxy de metadados após sua inicialização (se " "vazio: grupo efetivo do agente)." msgid "Group (gid or name) running this process after its initialization" msgstr "Grupo (gid ou nome) executando esse processo após sua inicialização" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "O nome do host a ser usado pelo servidor, agentes e serviços do Neutron em " "execução nesta máquina. Todos os agentes e serviços em execução nesta " "máquina devem usar o mesmo valor do host." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "O código do ICMP (port-range-max) %(value)s é fornecido, mas o tipo do ICMP " "(port-range-min) está ausente." msgid "ID of network" msgstr "ID da rede" msgid "ID of network to probe" msgstr "ID da rede para análise" msgid "ID of probe port to delete" msgstr "ID da porta da análise a ser excluída" msgid "ID of probe port to execute command" msgstr "ID da porta da análise para executar comando" msgid "ID of the router" msgstr "ID do roteador" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "Endereço IP %(ip)s já alocado na sub-rede %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "O endereço IP %(ip)s não pertence à sub-rede %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "A alocação de IP falhou. Tente novamente mais tarde" msgid "IP allocation requires subnet_id or ip_address" msgstr "A alocação de IP requer subnet_id ou ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply falhou ao aplicar o seguinte conjunto de regras de " "tabelas de IP: \n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "O Modo de endereço IPv6 deve ser SLAAC ou Stateless para delegação de " "prefixo." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "O modo IPv6 RA deve ser SLAAC ou Stateless para delegação de prefixo." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "O endereço IPv6 %(ip)s não pode ser designado diretamente a uma porta na sub-" "rede %(subnet_id)s porque a sub-rede está configurada para endereços " "automáticos" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "A sub-rede IPv6 %s configurada para receber RAs de um roteador externo não " "pode ser incluída ao Neutron Router." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Se True, então permita que plug-ins que suportam-no criem redes " "transparentes da VLAN." msgid "Illegal IP version number" msgstr "Número de versão de IP ilegal" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "Em alguns casos, o roteador Neutron não está presente para fornecer o IP de " "metadados, mas o servidor DHCP poderá ser usado para fornecer essas " "informações. A configuração desse valor forçará o servidor DHCP a anexar " "rotas de host específicas à solicitação DHCP. Se essa opção for configurada, " "o serviço de metadados será ativado para todas as redes." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Indica que esse agente L3 também deve manipular roteadores que não possuírem " "um gateway de rede externo configurado. Essa opção deverá ser True somente " "para um agente único em uma implementação Neutron, e poderá ser False para " "todos os agentes se todos os roteadores tiverem um gateway de rede externo. " #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "A instância da classe %(module)s.%(class)s deve conter o atributo _cache" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Espaço de prefixo insuficiente para alocar o tamanho da sub-rede /%s" msgid "Insufficient rights for removing default security group." msgstr "Direitos insuficientes para remover o grupo de segurança padrão." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Ponte de integração a ser utilizada. Não altere esse parâmetro, a menos que " "haja uma boa razão para isso. Esse é o nome da ponte de integração do OVS. " "Há uma por hypervisor. A ponte de integração atua como um 'compartimento de " "correção' virtual. Todos os VIFs da MV são conectados a essa ponte e, em " "seguida, 'corrigidos' de acordo com sua conectividade de rede." msgid "Interface to monitor" msgstr "Interface para monitorar" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Intervalo entre verificações de um processo-filho em tempo real (segundos), " "use 0 para desativar" msgid "Interval between two metering measures" msgstr "Intervalo entre duas medidas de medição" msgid "Interval between two metering reports" msgstr "Intervalo entre dois relatórios de medição" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Dispositivo Inválido %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Ação inválida '%(action)s' para o tipo de objeto '%(object_type)s'. Ações " "válidas: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Tipo de autenticação inválido: %(auth_type)s, os tipos válidos são: " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Ethertype %(ethertype)s inválido para o protocolo %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Formato inválido: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Estado da instância inválido: %(state)s, os estados válidos são: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Mapeamento inválido: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Slot pci inválido %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Formato de provedor inválido. Última parte deve ser 'default' ou vazia: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Tipo de recurso inválido %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Rota inválida: %s" msgid "Invalid service provider format" msgstr "Formato inválido de provedor de serviço" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Valor inválido para ICMP %(field)s (%(attr)s) %(value)s. Deve ser de 0 a 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Valor inválido para a porta %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de tratamento de Iptables usada para marcar ingresso de rede externa. " "Essa marca será mascarada com 0xffff, de modo que apenas os 16 bits " "inferiores serão usados." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Marca de tratamento de Iptables usada para marcar solicitações válidas de " "metadados. Essa marca será mascarada com 0xffff, de modo que apenas os 16 " "bits inferiores serão usados." msgid "Keepalived didn't respawn" msgstr "Keepalived não sofreu spawn novamente" msgid "Keepalived didn't spawn" msgstr "Keepalived não sofreu spawn" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Valor do Kernel HZ %(value)s não é válido. Esse valor deve ser maior que 0." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Falaha do agente L3 ao configurar o NAT para IPs flutuantes" msgid "L3 agent failure to setup floating IPs" msgstr "Falha do agente L3 ao configurar IPs flutuantes" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Limitar o número de concessões para impedir uma negação de serviço." msgid "List of :" msgstr "Lista de :" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Lista de :: ou " "especificando nomes physical_network utilizáveis para provedores VLAN e " "redes de tenant, bem como faixas de tags de VLAN em cada um disponível para " "alocação pelas redes de tenant." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Lista de pontos de entrada do driver de tipo de rede que será carregado do " "namespace neutron.ml2.type_drivers namespace." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Lista de physical_network em que redes simples podem ser criadas. Utilize o " "padrão '*' para permitir redes simples com nomes physical_network " "arbitrários. Use uma lista vazia para desativar redes simples." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Local para soquete de domínio UNIX de Proxy de Metadados." msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Local de soquete de domínio UNIX de Proxy de Metadados" msgid "Location to store DHCP server config files." msgstr "Local para armazenar arquivos de configuração do servidor DHCP" msgid "Location to store IPv6 PD files." msgstr "Local para armazenar arquivos IPv6 PD." msgid "Location to store IPv6 RA config files" msgstr "Local para armazenar arquivos de configuração RA IPv6" msgid "Location to store child pid files" msgstr "Local para armazenar arquivos pid filhos" msgid "Log agent heartbeats" msgstr "Registrar pulsações do agente" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "O MTU da rede física subjacente. O Neutron usa esse valor para calcular o " "MTU de todos os componentes de rede virtual. Para redes simples e VLAN, o " "Neutron usa esse valor sem modificação. Para redes sobrepostas, como VXLAN, " "o Neutron subtrai automaticamente a sobrecarga de protocolo sobreposta desse " "valor. Padronizado para 1500, o valor padrão para Ethernet." msgid "MTU size of veth interfaces" msgstr "Tamanho MTU de interfaces vEth" msgid "Make the l2 agent run in DVR mode." msgstr "Faça com que o agente l2 seja executado no modo DVR." msgid "Malformed request body" msgstr "Corpo da solicitação malformado" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Configuração de MaxRtrAdvInterval para o radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Número máximo de servidores de nomes DNS por sub-rede" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "O número máximo de agentes L3 em que um roteador de HA será planejado. Se " "configurado para 0, o roteador será planejado em cada agente." msgid "Maximum number of allowed address pairs" msgstr "Número máximo de pares de endereço permitido" msgid "Maximum number of host routes per subnet" msgstr "Número máximo de rotas do host por sub-rede" msgid "Maximum number of routes per router" msgstr "Número máximo de rotas por roteador" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Modo de soquete de domínio UNIX de proxy de metadados, 4 valores permitidos: " "'deduce': deduzir modo de valores de metadata_proxy_user/group, 'user': " "definir modo de soquete de proxy de metadados para 0o644, para uso quando " "metadata_proxy_user for usuário ou raiz de agente efetivo, 'group': definir " "modo de soquete de proxy de metadados para 0o664, para uso quando " "metadata_proxy_group for grupo ou raiz de agente efetivo, 'all': definir " "modo de soquete de proxy de metadados para 0o666, para uso de outra forma." msgid "Metering driver" msgstr "Driver de medição" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Configuração de MinRtrAdvInterval para o radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "Minimizar pesquisa monitorando ovsdb para alterações da interface." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Chave ausente no mapeamento: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "O grupo multicast para VXLAN. Quando configurado, permitirá o envio de todo " "o tráfego de transmissão para esse grupo multicast. Quando desconfigurado, " "desativa o modo VXLAN multicast." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Um ou mais grupos multicast para a interface VXLAN. Um intervalo de " "endereços de grupo pode ser especificado usando a notação CIDR. Especificar " "um intervalo permite que diferentes VNIs utilizem diferentes endereços de " "grupo, reduzindo ou eliminando tráfego de transmissão falso para os " "terminais do túnel. Para reservar um grupo exclusivo para cada VNI (24 bits) " "possível, use um /8 como 239.0.0.0/8. Essa configuração deve ser a mesma em " "todos os agentes." #, python-format msgid "Multiple default providers for service %s" msgstr "Mútliplos provedores padrão para o serviço %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Mútliplos provedores especificados para o serviço %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Vários tenant_ids na criação da regra do grupo de segurança em massa não " "permitido" msgid "Must also specify protocol if port range is given." msgstr "" "Deve-se também especificar o protocolo se o intervalo de portas for " "fornecido." msgid "Must specify one or more actions on flow addition or modification" msgstr "Deve especificar uma ou mais ações na adição ou modificação do fluxo" msgid "Name of Open vSwitch bridge to use" msgstr "Nome da ponte Open vSwitch a ser usado" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome da região do nova para utilização. Útil se keystone gerencia mais de " "uma região." msgid "Namespace of the router" msgstr "Namespace do roteador" msgid "Native pagination depend on native sorting" msgstr "A paginação nativa depende da classificação nativa" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "É necessário aplicar migrações a partir da ramificação de contrato " "%(project)s. Isso requer que todas as instâncias do servidor Neutron sejam " "encerradas antes de continuar com o upgrade." msgid "Negative delta (downgrade) not supported" msgstr "Delta negativo (downgrade) não suportado" msgid "Negative relative revision (downgrade) not supported" msgstr "Revisão relativa negativa (downgrade) não suportada" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "A rede %s não contém nenhuma sub-rede IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "A rede %s não é uma rede externa válida" #, python-format msgid "Network %s is not an external network" msgstr "A rede %s não é uma rede externa" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Rede de tamanho %(size)s, do intervalo de IP %(parent_range)s, excluindo " "intervalos de IP %(excluded_ranges)s não foi localizada." #, python-format msgid "Network type value '%s' not supported" msgstr "Valor do tipo de rede '%s' não suportado" msgid "Network type value needed by the ML2 plugin" msgstr "Valor de tipo de rede necessário pelo plug-in ML2" msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin não configurado!" msgid "No default router:external network" msgstr "Nenhuma rede router:external padrão" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Nenhum conjunto de sub-redes padrão localizado para IPv%s" msgid "No default subnetpools defined" msgstr "Nenhum conjunto de sub-redes padrão definido" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "Nenhum outro endereço IP disponível para a sub-rede %(subnet_id)s." msgid "No offline migrations pending." msgstr "Nenhuma migração off-line pendente." #, python-format msgid "No shared key in %s fields" msgstr "Nenhuma chave compartilhada nos campos %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Não é permitido designar manualmente um roteador para um agente no modo " "'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "" "Não é permitido remover manualmente um roteador de um agente no modo 'dvr'." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Número de agentes DHCP planejados para hospedar uma rede de um locatário. Se " "esse número for maior que 1, o planejador designará automaticamente vários " "agentes DHCP para uma determinada rede locatária, fornecendo alta " "disponibilidade para o serviço DHCP." msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Número de solicitações de lista não processada para configurar o soquete do " "servidor de metadados com" msgid "Number of backlog requests to configure the socket with" msgstr "Número de requisições de backlog para configurar no socket" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Número de bits em uma zona PTR IPV4 que será considerada como um prefixo de " "rede. Ele deve estar alinhado ao limite de byte. O valor mínimo é 8. O valor " "máximo é 24. Consequentemente, um intervalo de valores é 8, 16 e 24." msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Número de bits em uma zona PTR IPV6 que será considerada como um prefixo de " "rede. Ele deve estar alinhado ao limite nyble. O valor mínimo é 4. O valor " "máximo é 124. Consequentemente, um intervalo de valores é 4, 8, 12, " "16, ...., 24." msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Número de IPs flutuantes permitido por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Número de redes permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Número de portas permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Número de roteadores permitidos por locatário. Um valor negativo significa " "ilimitado." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Número de segundos entre o envio de eventos para nova se houver qualquer " "evento a enviar." msgid "Number of seconds to keep retrying to listen" msgstr "Número de segundos para continuar tentando escutar" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de grupos de segurança permitidos por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Número de regras de segurança permitidas por locatário. Um valor negativo " "significa ilimitado." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Número de processos do trabalhador separados para o servidor de metadados " "(padrão para metade do número de CPUs)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Número de sub-redes permitidas por locatário. Um valor negativo significa " "ilimitado." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Número de threads utilizadas durante o processo de sincronização. Não devem " "exceder o limite do pool de conexões configurado no servidor." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "O caminho de dados do OVS a ser utilizado. 'system' é o valor padrão e " "corresponde ao caminho de dados do kernel. Para ativar o caminho de dados do " "espaço do usuário, configure esse valor para 'netdev'." msgid "OVS vhost-user socket directory." msgstr "O diretório de soquete do usuário vhost do OVS." msgid "Only admin can view or configure quota" msgstr "Somente admin pode visualizar ou configurar cota" msgid "Only admin is authorized to access quotas for another tenant" msgstr "" "Somente o administrador está autorizado a acessar as cotas para outro " "locatário" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Apenas administradores podem manipular políticas em objetos que não " "pertencem a eles." msgid "Only allowed to update rules for one security profile at a time" msgstr "Permitido apenas atualizar regras para um perfil de segurança por vez" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Apenas remote_ip_prefix ou remote_group_id pode ser fornecido." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Operação não suportada no dispositivo %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Lista ordenada de network_types a serem alocados como redes locatárias. O " "valor padrão 'local' é útil para teste single-box, mas não fornece nenhuma " "conectividade entre os hosts." msgid "Override the default dnsmasq settings with this file." msgstr "Sobrescreva as configurações padrão de dnsmasq com este arquivo." msgid "Owner type of the device: network/compute" msgstr "Tipo de proprietário do dispositivo: rede/cálculo" msgid "POST requests are not supported on this resource." msgstr "Requisições POST não são suportadas neste recurso." #, python-format msgid "Package %s not installed" msgstr "Pacote %s não instalado" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Falha na análise de bridge_mappings: %s." msgid "Password for connecting to designate in admin context" msgstr "Senha para conexão ao Designate no contexto de admnistrador" msgid "Path to PID file for this process" msgstr "Caminho para o arquivo PID para este processo" msgid "Path to the router directory" msgstr "Caminho para o diretório do roteador" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "" "Porta de correção do peer na ponte de integração para a ponte do túnel." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "" "Porta da correção do peer na ponte do túnel para a ponte de integração." msgid "Phase upgrade options do not accept revision specification" msgstr "Opções de upgrade de fase não aceitam especificação de revisão" msgid "Ping timeout" msgstr "Tempo Limite de Ping" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "A porta %(id)s não possui IP fixo %(address)s" #, python-format msgid "Port %s does not exist" msgstr "A porta %s não existe" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "A porta %s tem vários endereços IPv4 fixos. Deve-se fornecer um endereço " "IPv4 específico ao designar um IP flutuante" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "A delegação de prefixo só pode ser usada com sub-redes IPv6." msgid "Private key of client certificate." msgstr "Chave privada de certificado do cliente." #, python-format msgid "Probe %s deleted" msgstr "Análise %s excluída" #, python-format msgid "Probe created : %s " msgstr "Análise criada: %s " msgid "Process is already started" msgstr "O processo já está iniciado" msgid "Process is not running." msgstr "O processo não está em execução." msgid "Protocol to access nova metadata, http or https" msgstr "Protocolo para acessar os metadados de nova, http ou https" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "O nome do provedor %(name)s é limitado a %(len)s caracteres" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Política RBAC do tipo %(object_type)s com o ID %(id)s não localizada" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "A política RBAC no objeto %(object_id)s não pode ser removida porque outros " "objetos dependem dela.\n" "Detalhes: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Intervalo de segundos para atrasar aleatoriamente quando iniciar o " "planejador de tarefas periódicas para reduzir registro de data e hora. " "(Desativar configurando como 0)" msgid "Ranges must be in the same IP version" msgstr "Os intervalos devem estar na mesma versão do IP" msgid "Ranges must be netaddr.IPRange" msgstr "Os intervalos devem ser netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Os intervalos não devem se sobrepor" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Rótulos de ramificações cientes da Liberação (%s) foram descontinuados. " "Alterne para os rótulos expand@ e contract@." msgid "Remote metadata server experienced an internal server error." msgstr "" "O servidor de metadados remoto experimentou um erro de servidor interno." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "" "O repositório não contém arquivos HEAD para ramificações de contrato e de " "expansão." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Representando o tipo de recurso cujo carregamento está sendo relatado pelo " "agente. Isso pode ser \"redes\", \"sub-redes\" ou \"portas\". Quando " "especificado (o padrão é redes), o servidor irá extrair carregamento " "particular enviado como parte do seu objeto de configuração do agente do " "relatório de estado do agente, que é o número de recursos sendo consumido, " "em cada report_interval.dhcp_load_type pode ser usado em combinação com " "network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler." "WeightScheduler Quando o network_scheduler_driver é WeightScheduler, " "dhcp_load_type pode ser configurado para representar a opção para o recurso " "que está sendo balanceado. Exemplo: dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Falha de solicitação: erro do servidor interno ao processar sua solicitação." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Reconfigure a tabela de fluxo ao iniciar. Configurar isso como True causará " "uma breve interrupção do tráfego." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "O recurso %(resource)s %(resource_id)s não pôde ser localizado." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Recurso %(resource_id)s do tipo %(resource_type)s não localizado" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "Recurso '%(resource_id)s' já está associado com o provedor '%(provider)s' " "para o tipo de serviço '%(service_type)s'" msgid "Resource body required" msgstr "Corpo do recurso necessário" msgid "Resource not found." msgstr "Recurso não encontrado." msgid "Resources required" msgstr "Recursos necessários" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Aplicação ajudante de Root. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' para usar a habilidade the filtragem de root real. Modifique para " "'sudo' para pular a filtragem e apenas executar o comando diretamente." msgid "Root permissions are required to drop privileges." msgstr "As permissões de raiz são necessárias para descartar privilégios." #, python-format msgid "Router already has a port on subnet %s" msgstr "O roteador já possui uma porta na sub-rede %s" msgid "Router port must have at least one fixed IP" msgstr "A porta do Roteador deve ter pelo menos um IP fixo" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Executando %(cmd)s (%(desc)s) para %(project)s..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Executando %(cmd)s para %(project)s..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Segundos entre os nós que relatam o estado para o servidor; deve ser menor " "que agent_down_time, melhor se for metade ou menos do que agent_down_time." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Segundos para considerar que o agente está inativo; deve ser no mínimo duas " "vezes report_interval, para ter certeza de que o agente está inativo." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Grupo de segurança %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Regra de grupo de segurança %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "O grupo de segurança %(id)s não existe" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "A regra do grupo de segurança %(id)s não existe" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "A regra do grupo de segurança já existe. ID de regra é %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "A regra do grupo de segurança para ethertype '%(ethertype)s' não é " "suportada. Os valores permitidos são %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Protocolo de regra do grupo de segurança %(protocol)s não suportado. Apenas " "valores valores %(values)s e representações de número inteiro [0 a 255] são " "suportados." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Enviar notificação para nova quando dados da porta (fixed_ips/floatingip) " "muda de modo que nova possa atualizar seu cache." msgid "Send notification to nova when port status changes" msgstr "Enviar notificação para nova quando o status da porta muda" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Provedor de serviço '%(provider)s' não pôde ser encontrado para o tipo de " "serviço %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Serviço para manipular a delegação de Prefixo DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "" "Tipo de serviço %(service_type)s não possui um provedor de serviço padrão" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Configure novo tempo limite em segundos para novas chamadas rpc depois que o " "agente receber SIGTERM. Se o valor for configurado como 0, o tempo limite de " "rpc não será alterado" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Configure ou desconfigure o bit don't fragment (DF) no pacote IP de saída " "que transporta o túnel GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "O escopo de endereço compartilhado não pode ser descompartilhado" msgid "String prefix used to match IPset names." msgstr "Prefixo de sequência usado para corresponder nomes de IPset." #, python-format msgid "Sub-project %s not installed." msgstr "O subprojeto %s não está instalado." msgid "Subnet for router interface must have a gateway IP" msgstr "A sub-rede para a interface do roteador deve ter um IP de gateway" msgid "Subnet pool has existing allocations" msgstr "O conjunto de sub-rede possui alocações existentes" msgid "Subnet used for the l3 HA admin network." msgstr "Sub-rede usada para a rede administrativa de alta disponibilidade l3." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Sinalizador do Sistema Inteiro para determinar o tipo de roteador que " "locatários podem criar. Somente administrador pode substituir." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Porta TCP usada pelo proxy de namespace de metadados Neutron." msgid "TCP Port used by Nova metadata server." msgstr "Porta TCP usada pelo servidor de metadados Nova." msgid "TTL for vxlan interface protocol packets." msgstr "TTL para pacotes de protocolo da interface vxlan." #, python-format msgid "Tag %(tag)s could not be found." msgstr "A tag %(tag)s não pôde ser localizada." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Arrendatário %(tenant_id)s não permitido para criar %(resource)s nesta rede" msgid "Tenant id for connecting to designate in admin context" msgstr "ID de locatário para conexão ao Designate no contexto de admnistrador" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Nome do locatário para conexão ao Designate no contexto de admnistrador" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "O servidor DHCP pode ajudar a fornecer suporte de metadados em redes " "isoladas. Configurar esse valor para True fará com que o servidor DHCP anexe " "rotas de host específicas à solicitação DHCP. O serviço de metadados será " "ativado somente quando a sub-rede não contiver nenhuma porta do roteador. A " "instância convidada deve ser configurada para solicitar rotas de host por " "meio de DHCP (Opção 121). Essa opção não tem efeito algum quando " "force_metadata estiver configurado para True." msgid "The UDP port to use for VXLAN tunnels." msgstr "A porta UDP utilizada para túneis VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "" "A solicitação de alocação de endereço não pôde ser satisfeita porque: " "%(reason)s" msgid "The advertisement interval in seconds" msgstr "O intervalo de propaganda em segundos" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "O endereço MAC de base que o Neutron usará para VIFs. Os 3 primeiros octetos " "permanecerão inalterados. Se o 4º octeto não for 00, ele também será " "utilizado, Os outros serão gerados aleatoriamente." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "O endereço mac base usado para instâncias DVR exclusivas pelo Neutron. Os " "primeiros 3 octetos permanecerão inalterados. Se o quarto octeto não for 00, " "ele também será usado. Os outros serão aleatoriamente gerados. O " "'dvr_base_mac' *deve* ser diferente de 'base_mac' para evitar misturá-los " "com os do MAC alocados para portas locatárias. Um exemplo de 4 octetos seria " "dvr_base_mac = fa:16:3f:4f:00:00. O padrão é 3 octetos" msgid "The core plugin Neutron will use" msgstr "O plug-in principal que o Neutron irá utilizar." msgid "The driver used to manage the DHCP server." msgstr "O driver usado para gerenciar o servidor DHCP." msgid "The driver used to manage the virtual interface." msgstr "Driver usado para gerenciar a interface virtual." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "O endereço de e-mail a ser usado ao criar zonas PTR. Se não especificado, o " "endereço de e-mail será admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "O número máximo de itens retornados em uma única resposta, o valor era " "'infinito' ou um número inteiro negativo significa que não há limite" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "O tipo de rede a ser usado ao criar a rede HA para um roteador HA. Por " "padrão ou se em branco, o primeiro 'tenant_network_types' será usado. Isso é " "útil quando o tráfego VRRP deve usar uma rede específica que não é a padrão." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "O número de segundos que o agente aguardará entre as pesquisas para mudanças " "do dispositivo local." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "O número de segundos a aguardar antes de reiniciar o monitor ovsdb após " "perder comunicação com ele." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Os números de sort_keys e sort_dirs devem ser os mesmos" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "O caminho para extensões da API. Note que isso pode ser uma lista separada " "por vírgula de caminhos. Por exemplo: api_extensions_path = extensions:/path/" "to/more/exts:/even/more/exts. O __path__ of neutron.extensions é anexado a " "isso, de modo que se suas extensões estiverem lá, não será necessário " "especificá-las aqui. " msgid "The physical network name with which the HA network can be created." msgstr "O nome da rede física com o qual a rede HA pode ser criada." #, python-format msgid "The port '%s' was deleted" msgstr "A porta '%s' foi excluída" msgid "The port to bind to" msgstr "A porta para ligar a" #, python-format msgid "The requested content type %s is invalid." msgstr "O tipo de conteúdo requisitado %s é inválido." msgid "The resource could not be found." msgstr "O recurso não pôde ser encontrado." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "O roteador %(router_id)s já foi hospedado pelo Agente L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "O servidor possui um erro ou é incapaz de executar a operação solicitada." msgid "The service plugins Neutron will use" msgstr "Os plugins de serviço que o Neutron irá utilizar" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "A solicitação de sub-rede não pôde ser satisfeita porque: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "" "O subprojeto com relação ao qual executar o comando. Pode ser um de: '%s'." msgid "The type of authentication to use" msgstr "O tipo de autenticação a ser usado" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "Há roteadores conectados a essa rede que dependem dessa política para acesso." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True para excluir todas as portas em todas as pontes OpenvSwitch. False para " "excluir portas criadas pelo Neutron na integração e pontes de rede externa." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Valor do IP do túnel necessário pelo plug-in ML2" msgid "Tunnel bridge to use." msgstr "Ponte do túnel a ser utilizada." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "O tipo do terminal Nova a ser utilizado. Esse terminal será bloqueado no " "catálogo de keystone e deverá ser público, interno ou de administração." msgid "URL for connecting to designate" msgstr "URL para conexão ao Designate" msgid "URL to database" msgstr "URL para banco de dados" #, python-format msgid "Unable to access %s" msgstr "Não é possível acessar %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "" "Não é possível calcular o endereço %(address_type)s devido a: %(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "Não é possível converter valor em %s" msgid "Unable to create the SNAT Interface Port" msgstr "Não é possível criar a Porta da Interface SNAT" #, python-format msgid "Unable to determine mac address for %s" msgstr "Não foi possível determinar o endereço MAC para %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Não foi possível localizar '%s' no corpo da solicitação" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "" "Não é possível localizar endereço IP %(ip_address)s na sub-rede %(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "Não foi possível encontrar nome de recurso em %s" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Não é possível identificar um campo de destino de: %s. A correspondência " "deve estar no formato %%()s" msgid "Unable to provide external connectivity" msgstr "Não é possível fornecer conectividade externa" msgid "Unable to provide tenant private network" msgstr "Não é possível fornecer rede privada do locatário." #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Não foi possível verificar resultados:%(match)s pois o recurso pai: %(res)s " "não foi encontrado" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Rótulo inesperado para o script %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Número inesperado de pontos de ramificação alembic: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Código de resposta inesperado: %s" #, python-format msgid "Unexpected response: %s" msgstr "Resposta inesperada: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "O nome da unidade '%(unit)s' não é válido." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Tipo de endereço desconhecido %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Atributo desconhecido '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Cadeia desconhecida: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Tipo de rede desconhecido %(network_type)s." msgid "Unmapped error" msgstr "Erro não mapeado" msgid "Unrecognized action" msgstr "Ação não reconhecida" msgid "Unrecognized field" msgstr "Campo não reconhecido" msgid "Unsupported Content-Type" msgstr "Tipo de Conteúdo Não Suportado" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Tipo de rede não suportado %(net_type)s." msgid "Unsupported request type" msgstr "Tipo de solicitação não suportado" msgid "Updating default security group not allowed." msgstr "Não permitido atualizar o grupo de segurança padrão." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Utilize o driver de mecanismo de população ML2 l2 para aprender sobre MAC e " "IPs remotos e melhorar a escalabilidade do túnel." msgid "Use broadcast in DHCP replies." msgstr "Usar broadcast em respostas DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Use --delta ou revisão relativa, não ambos" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Utilize ipset para acelerar os grupos de segurança baseados em iptables. " "Para habilitar o suporte a ipset é necessário que o ipset esteja instalado " "no nó do agente L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Use o ajudante de Root quando listar os namespaces no sistema. Isso pode não " "ser necessário dependendo das configurações de segurança. Se o ajudante de " "root não for necessário, configure isto para falso para melhorar a " "performance." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Use veths em vez de portas de correção para interconectar a ponte de " "integração a redes físicas. Suporta kernel sem o suporte da porta de " "correção Open vSwitch desde que configurado como True." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Usuário (uid ou nome) executando proxy de metadados após sua inicialização " "(se vazio: usuário efetivo do agente)." msgid "User (uid or name) running this process after its initialization" msgstr "Usuário (uid ou nome) executando esse processo após sua inicialização" msgid "Username for connecting to designate in admin context" msgstr "Nome de usuário para conexão ao Designate no contexto de admnistrador" msgid "VRRP authentication password" msgstr "Senha de autenticação do VRRP" msgid "VRRP authentication type" msgstr "Tipo de autenticação do VRRP" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Valor da taxa tick (hz) do kernel do host para calcular o valor de burst " "mínimo nas regras de limite de largura da banda para uma porta com QoS. " "Consulte o arquivo de configuração do kernel para obter o valor de HZ e o " "manual tc-tbf para obter mais informações." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Valor de latência (ms) para calcular o tamanho da fila de uma porta com QoS. " "Consulte o manual tc-tbf para obter mais informações." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "Ao configurar o proxy de solicitações de metadados, o Neutron designa o " "cabeçalho Instance-ID com um segredo compartilhado para evitar spoofing. É " "possível selecionar qualquer sequência de um segredo, mas ela deverá " "corresponder aqui e na configurada usada pelo Nova Metadata Server. NOTA: O " "Nova usa a mesma chave de configuração, mas na seção [neutro]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Onde armazenar arquivos de estado Neutron. O agente deve ter permissão de " "escrita neste diretório." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "Com IPv6, a rede usada para o gateway externo não precisa ter um sub-rede " "associada, pois o Link-local Address (LLA) designado automaticamente pode " "ser usado. No entanto, um endereço do gateway IPv6 é necessário para ser " "usado como o próximo hop para a rota padrão. Se nenhum endereço do gateway " "IPv6 for configurado aqui, (somente então) o roteador neutron será " "configurado para obter sua rota padrão de router advertisements (RAs) do " "roteador de envio de dados; em cujo caso o roteador de envio de dados também " "deve ser configurado para enviar esses RAs. O ipv6_gateway, quando " "configurado, deve ser o LLA da interface no roteador de envio de dados. Se " "um próximo hop usando um global unique address (GUA) for desejado, isso " "precisará ser feito por meio de uma sub-rede alocada para a rede e não por " "meio desse parâmetro. " msgid "You must implement __call__" msgstr "Você deve implementar __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Você deve fornecer um arquivo de configuração para a ponte - --config-file " "ou env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Você deve fornecer uma revisão ou um delta relativo" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "Um conjunto de sub-redes deve ser especificado na ausência de um CIDR" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port não pode ser chamado dentro de uma transação." msgid "allocation_pools allowed only for specific subnet requests." msgstr "" "allocation_pools permitido somente para solicitações de sub-rede específicas." msgid "allocation_pools are not in the subnet" msgstr "allocation_pools não estão na sub-rede" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools usam versão de IP errada" msgid "already a synthetic attribute" msgstr "já é um atributo sintético" msgid "binding:profile value too large" msgstr "ligação: valor de perfil muito grande" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "Não é possível executar %(event)s devido a %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr e prefixlen não devem ser fornecidos juntos" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain não pode ser especificado sem um dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name não pode ser especificado sem um dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address não pode ser especificado sem um port_id" #, python-format msgid "has device owner %s" msgstr "possui o proprietário do dispositivo %s" msgid "in use" msgstr "em uso" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "comando ip falhou no dispositivo %(dev_name)s:%(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "a capacidade %(capability)s de link de IP não é suportada" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "o comando de link do IP não é suportado: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" "ip_version deve ser especificado na ausência de cidr e de subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode não é válido quando ip_version for 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode não será válido quando ip_version for 4" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "ipv6_ra_mode configurado para '%(ra_mode)s' com ipv6_address_mode " "configurado para '%(addr_mode)s' não é válido. Se ambos os atributos forem " "configurados, eles devem ter o mesmo valor" msgid "mac address update" msgstr "atualização do endereço mac" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Deve-se fornece exatamente 2 argumentos - cidr e MAC" msgid "network_type required" msgstr "network_type necessário" #, python-format msgid "network_type value '%s' not supported" msgstr "Valor de network_type '%s' não suportado" msgid "new subnet" msgstr "nova sub-rede" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "physical_network '%s' desconhecida para rede de provedor flat" msgid "physical_network required for flat provider network" msgstr "physical_network requerida para rede de provedor flat" #, python-format msgid "provider:physical_network specified for %s network" msgstr "provider:physical_network especificado para a rede %s" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval deve ser >= 0 se fornecida." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id fora da faixa (%(min)s até %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id requer physical_network para rede de provedor VLAN" msgid "shared attribute switching to synthetic" msgstr "atributo compartilhado alternando para sintético" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "O conjunto de sub-rede %(subnetpool_id)s não pode ser atualizado quando " "associado ao escopo de endereço compartilhado %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "O subnetpool_id e o use_default_subnetpool não podem ser especificados. " msgid "the nexthop is not connected with router" msgstr "o nexthop não está conectado com o roteador" msgid "the nexthop is used by router" msgstr "o nexthop é usado pelo roteador" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9710417 neutron-16.0.0.0b2.dev214/neutron/locale/ru/0000755000175000017500000000000000000000000020522 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3190448 neutron-16.0.0.0b2.dev214/neutron/locale/ru/LC_MESSAGES/0000755000175000017500000000000000000000000022307 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/ru/LC_MESSAGES/neutron.po0000644000175000017500000035550600000000000024357 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:54+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "Команда: %(cmd)s\n" "Код выхода: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "" "Файл HEAD %(branch)s отличается от ожидаемого для графика миграции: %(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "Значение %(invalid_dirs)s недопустимо для sort_dirs; допустимое значение: " "'%(asc)s' и '%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(key)s запрещен для сети поставщика %(tunnel)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' не соответствует версии IP '%(ip_version)s'" #, python-format msgid "%s cannot be called while in offline mode" msgstr "%s нельзя вызывать в режиме без подключения" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "Атрибут %s недопустим для sort_keys" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s должен реализовать get_port_from_device или get_ports_from_devices." #, python-format msgid "%s prohibited for VLAN provider network" msgstr "%s запрещено для сети VLAN провайдера" #, python-format msgid "%s prohibited for flat provider network" msgstr "%s запрещено для одноуровневой сети провайдера" #, python-format msgid "%s prohibited for local provider network" msgstr "%s запрещено для локальной сети провайдера" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' не является допустимым типом объекта RBAC" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' не поддерживает фильтрацию" #, python-format msgid "'module' object has no attribute '%s'" msgstr "Объект 'module' не содержит атрибута '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' меньше чем 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "Нулевое значение запрещено в качестве длины префикса CIDR" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "Значение cidr должно быть указано при отсутствии пула подсетей" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "" "Десятичное значение в качестве зарегистрированного номера частного " "предприятия производителя в соответствии с RFC3315 DUID-EN." #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "Внешняя сеть по умолчанию уже существует: %(net_id)s." msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "" "Пул подсетей по умолчанию уже задан для этой версии IP. Для версии IP может " "быть задан только один пул по умолчанию" msgid "A metering driver must be specified" msgstr "Необходимо указать драйвер измерений" msgid "Access to this resource was denied." msgstr "Доступ к этому ресурсу запрещен." msgid "Action to be executed when a child process dies" msgstr "Действие, выполняемое при завершении дочернего процесса" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "Добавить комментарии в правила iptables. Укажите значение false, чтобы не " "добавлять комментарии в сгенерированные правила iptables с описанием кажого " "правила. Добавление комментариев возможно, только если система поддерживает " "модуль комментариев iptables." msgid "Address not present on interface" msgstr "Адрес не задан для интерфейса" msgid "Adds test attributes to core resources." msgstr "Добавляет атрибуты теста в базовые ресурсы." #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "Агент %(id)s выключен или не является агентом L3" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "Агент запускается с параметром admin_state_up=False, если включен параметр " "enable_new_agents=False. В этом случае пользовательские ресурсы не будут " "автоматически запланированы в агенте, пока администратор не изменит " "значение параметра admin_state_up на True." #, python-format msgid "Agent updated: %(payload)s" msgstr "Агент обновлен: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "Разрешить автоматическое планирование сетей для агента DHCP." msgid "Allow auto scheduling of routers to L3 agent." msgstr "Разрешить автоматическое планирование маршрутизаторов для агента L3." msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "Разрешить поддержку перекрывающихся IP-адресов в Neutron. Внимание: " "следующий параметр ДОЛЖЕН быть задан равным False, если Neutron используется " "совместно с группами защиты Nova." msgid "Allow running metadata proxy." msgstr "Разрешить выполнение прокси метаданных." msgid "Allow sending resource operation notification to DHCP agent" msgstr "Разрешить отправку уведомления об операции ресурса агенту DHCP" msgid "Allow the creation of PTR records" msgstr "Разрешить создание записей PTR" msgid "Allow the usage of the bulk API" msgstr "Разрешить использование Bulk API" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "Разрешить незащищенные запросы SSL (https) метаданных nova" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "Разрешает обслуживать запросы метаданных, исходящие из выделенной сети для " "метаданных с CIDR 169.254.169.254/16 (или подсети), подключенной к " "маршрутизатору Neutron, из которой VM отправляют запрос metadata:1. В этом " "случае DHCP Option 121 не будет добавляться в VM, так как они обращаются к " "сети 169.254.169.254 через машрутизатор. Для этой опции необходимо указать " "enable_isolated_metadata = True." msgid "An RBAC policy already exists with those values." msgstr "Стратегия RBAC с такими параметрами уже существует." msgid "An identifier must be specified when updating a subnet" msgstr "При обновлении подсети необходимо указать идентификатор" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "Упорядоченный список конечных точек драйверов расширения, загружаемых из " "пространства имен neutron.ml2.extension_drivers. Пример: extension_drivers = " "port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "Упорядоченный список конечных точек драйверов механизмов создания сетей, " "загружаемых из пространства имен neutron.ml2.mechanism_drivers." msgid "An unknown error has occurred. Please try your request again." msgstr "" "Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." msgid "Async process didn't respawn" msgstr "Асинхронный процесс не перезапустился" msgid "Authorization URL for connecting to designate in admin context" msgstr "" "URL авторизации для подключения к назначенному объекту в административном " "контексте" msgid "Automatically remove networks from offline DHCP agents." msgstr "Автоматически удалять сети из отключенных агентов DHCP." msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "" "Автоматически перепланировать маршрутизаторы с отключенных агентов L3 на " "включенные агенты L3 ." msgid "Availability zone of this node" msgstr "Зона доступности узла." msgid "Available commands" msgstr "Доступные команды" #, python-format msgid "Base MAC: %s" msgstr "Базовый MAC: %s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "Базовый каталог для файлов протокола dnsmasq. Протокол содержит сведения " "DHCP и DNS и используется для отладки ошибок в DHCP или DNS. Если этот " "раздел пуст, протокол dnsmasq отключен." msgid "Body contains invalid data" msgstr "В теле содержатся недопустимые данные" msgid "Bulk operation not supported" msgstr "Групповая операция не поддерживается" msgid "CIDR to monitor" msgstr "CIDR для монитора" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "Обратный вызов для %(resource_type)s не найден" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "Обратный вызов %(resource_type)s вернул неправильный тип ресурса" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "" "Не удается добавить нефиксированный IP-адрес в порт %s, содержащий " "фиксированные адреса IPv4" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "Не удалось добавить несколько обратных вызовов для %(resource_type)s" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" "Не удалось связать подсеть IPv%(req_ver)s из пула подсетей IPv%(pool_ver)s" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "Невозможно выделить запрошенную подсеть из доступного набора префиксов" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "Невозможно отключить enable_dhcp, если заданы атрибуты ipv6" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "Не удается обработать подсеть с типом %(subnet_type)s" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "С портом маршрутизатора не может быть связано несколько подсетей IPv4" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "Невозможно иметь несколько портов маршрутизатора с одинаковыми ИД сети, если " "обасодержат подсети IPv6. Существующий порт %(p)s имеет ИД сети и подсетей " "IPv6 %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "" "Распределенный маршрутизатор %(router_id)s не может работать на устаревшем " "агенте L3 %(agent_id)s." msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id и port-id нельзя указывать одновременно" msgid "Cannot understand JSON" msgstr "Невозможно распознать JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "Невозможно обновить атрибут %s, доступный только для чтения" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "Файл общего ключа CA (CA cert) для ssl" msgid "Check ebtables installation" msgstr "Проверить установку ebtables" msgid "Check for ARP header match support" msgstr "Проверить наличия поддержки сопоставления заголовка ARP" msgid "Check for ARP responder support" msgstr "Проверка наличия поддержки промежуточного клиента ARP" msgid "Check for ICMPv6 header match support" msgstr "Проверить наличия поддержки сопоставления заголовка ICMPv6" msgid "Check for OVS Geneve support" msgstr "Проверить на наличие поддержки Geneve OVS" msgid "Check for OVS vxlan support" msgstr "Проверить на наличие поддержки OVS vxlan" msgid "Check for VF management support" msgstr "Проверить наличия поддержки управления VF" msgid "Check for iproute2 vxlan support" msgstr "Проверка наличия поддержки iproute2 vxlan" msgid "Check for nova notification support" msgstr "Проверка наличия поддержки уведомлений nova" msgid "Check for patch port support" msgstr "Проверка наличия поддержки портов исправлений" msgid "Check ip6tables installation" msgstr "Проверить установку ip6tables" msgid "Check ipset installation" msgstr "Проверить установку ipset" msgid "Check keepalived IPv6 support" msgstr "Проверить наличие поддержки IPv6 в конфигурации keepalived" msgid "Check minimal dibbler version" msgstr "Проверить минимальную версию dibbler" msgid "Check minimal dnsmasq version" msgstr "Проверить минимальную версию dnsmasq" msgid "Check netns permission settings" msgstr "Проверить параметры прав доступа netns" msgid "Check ovs conntrack support" msgstr "Проверить поддержку conntrack в ovs" msgid "Check ovsdb native interface support" msgstr "Проверить поддержку собственного интерфейса ovsdb" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "Cidr %(subnet_cidr)s подсети %(subnet_id)s перекрывается с cidr %(cidr)s " "подсети %(sub_id)s" msgid "Cleanup resources of a specific agent type only." msgstr "Очистить ресурсы только для заданного типа агента." msgid "Client certificate for nova metadata api server." msgstr "Сертификат клиента для сервера API метаданных nova." msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "Список записей, разделенных запятыми, вида <сетевое-устройство>:<исключаемые-" "vfs>, связывающих сетевое устройство со списком виртуальных функций узла " "агента, которые не должны использоваться для виртуальных сетей. исключаемые-" "vfs - это список виртуальных функций, исключаемых для сетевого устройства, " "разделенный точкой с запятой. Связанное сетевое устройство должно входить в " "список physical_device_mappings." msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "Список записей, разделенных запятыми, вида <физическая-сеть>:<сетевое-" "устройство>, связывающих имена физических сетей с физическим сетевым " "устройством узла агента физической функции SR-IOV для сетей VLAN. Все " "физические сети, перечисленные в network_vlan_ranges на сервере, должны " "иметь связи с соответствующими интерфейсами на каждом агенте." msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "Список записей, разделенных запятыми, вида <физическая-сеть>:<физический-" "интерфейс>, связывающих имена физических сетей с физическими сетевыми " "интерфейсами узла агента для одноуровневых сетей и сетей VLAN. Все " "физические сети, перечисленные в network_vlan_ranges на сервере, должны " "иметь связи с соответствующими интерфейсами на каждом агенте." msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены диапазоны ИД туннелей GRE, доступные для выделения сети " "арендатора" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены ИД VNI Geneve, доступные для выделения сети арендатора" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "Разделенный запятой список кортежей :, в котором " "перечислены идентификаторы VNI VXLAN, доступные для выделения сети арендатора" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "" "Разделенный запятыми список серверов DNS, которые будут использоваться для " "пересылки." msgid "Command to execute" msgstr "Выполняемая команда" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "" "Файл конфигурации для драйвера интерфейса (Можно также использовать l3_agent." "ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "Конфликтующее значение ethertype %(ethertype)s для CIDR %(cidr)s" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "Контролирует, включен ли API групп защиты neutron на сервере. Значение " "должно быть false, когда группы защиты не используются или используется API " "групп защиты nova." #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" "Не удалось подключиться к порту %(host)s:%(port)s по истечении %(time)d " "секунд" msgid "Could not deserialize data" msgstr "Не удалось десериализовать данные" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "Продолжительность выделения адреса DHCP (в секундах). Укажите -1, чтобы " "dnsmasq использовала бесконечное время выделения." msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "При развертывании DVR для основных функций VXLAN/GRE/Geneve включить L2-pop " "как на стороне агента, так и на стороне сервера." msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "" "Служба базы данных, для которой будет создан сценарий при использовании " "миграции с отключением." msgid "Default external networks must be shared to everyone." msgstr "Внешняя сеть по умолчанию должна быть общедоступной." msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "Тип по умолчанию для внешних сетей, если не указаны атрибуты провайдера. " "Значение по умолчанию Нет, означающее, что если во время создания внешних " "сетей не были указаны атрибуты провайдера, то их тип будет совпадать с типом " "сетей арендатора. Разрешенные значения для опции конфигурации " "external_network_type зависят от значений типа сети, настроенных в опции " "конфигурации type_drivers." msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество записей RBAC по умолчанию на одного арендатора. Отрицательное " "значение - без ограничений." msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество ресурсов по умолчанию на одного арендатора. Отрицательное " "значение - не ограничено." msgid "Default security group" msgstr "Группа защиты по умолчанию" msgid "Default security group already exists." msgstr "Группа защиты по умолчанию уже существует." msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "Значение по умолчанию параметра availability_zone_hints. Планировщики, " "учитывающие зону доступности, используют этот параметр, когда параметр " "availability_zone_hints для ресурсов пустой. Несколько зон доступности " "разделяются запятыми. Значение может быть пустым. В этом случае, даже если " "параметр availability_zone_hints для ресурса пустой, зона доступности " "считается пригодной для функций высокой готовности при планировании ресурса." msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "Определить значение по умолчанию enable_snat, если оно не указано в " "external_gateway_info." msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "Определяет поставщиков для расширенных служб в формате: :" ":[:default]" msgid "Delete the namespace by removing all devices." msgstr "Удалите пространство имен, удалив все устройства." #, python-format msgid "Deleting port %s" msgstr "Удаление порта %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "Ошибка развертывания: %(reason)s." msgid "Destroy IPsets even if there is an iptables reference." msgstr "Уничтожить IPset даже при наличии ссылок iptables." msgid "Destroy all IPsets." msgstr "Уничтожить все IPset." #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "Устройство %(dev_name)s в карте связей %(mapping)s неуникально" msgid "Device not found" msgstr "Устройство не найдено" msgid "Domain to use for building the hostnames" msgstr "Домен, используемый для компоновки имен хостов" msgid "Downgrade no longer supported" msgstr "Понижение больше не поддерживается" #, python-format msgid "Driver %s is not unique across providers" msgstr "Драйвер %s не является уникальным среди поставщиков" msgid "Driver for external DNS integration." msgstr "Драйвер для интеграции с внешним DNS." msgid "Driver for security groups firewall in the L2 agent" msgstr "Драйвер для брандмауэра групп защиты в агенте L2" msgid "Driver to use for scheduling network to DHCP agent" msgstr "Драйвер, используемый для планирования сети для агента DHCP" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" "Драйвер, используемый для планирования маршрутизатора для агента L3 по " "умолчанию" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "Драйвер для делегирования префикса ipv6. Должен быть точкой входа, " "определенной в пространстве имен neutron.agent.linux.pd_drivers. См. файл " "setup.cfg на наличие точек входа, включенных в исходный код neutron." #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "Обнаружен повторяющийся L3HARouterAgentPortBinding для маршрутизаторов " "%(router)s. Обновление базы данных невозможно. Перед обновлением базы данных " "устраните все повторы." msgid "Duplicate Security Group Rule in POST." msgstr "Совпадающие правила группы защиты в POST." msgid "Duplicate address detected" msgstr "Повторяющийся адрес" #, python-format msgid "ERROR: %s" msgstr "Ошибка: %s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "ОШИБКА: Не удалось найти файл конфигурации с использованием путей поиска по " "умолчанию (~/.neutron/, ~/, /etc/neutron/, /etc/) и опция '--config-file'!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "" "Либо один из параметров network_id, либо router_id должен быть передан в " "метод _get_ports." msgid "Either subnet_id or port_id must be specified" msgstr "Необходимо указать или subnet_id, или port_id" msgid "Enable HA mode for virtual routers." msgstr "Включить режим высокой готовности для виртуальных маршрутизаторов." msgid "Enable SSL on the API server" msgstr "Разрешить применение SSL на сервере API" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "Активируйте VXLAN на агенте. Активация возможна, если агентом управляет " "модуль ml2, использующий драйвер механизма linuxbridge" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "Включить службы на агенте с admin_state_up False. Если эта опция равна " "False, когда admin_state_up агента устанавливается False, службы на нем " "будут выключены. Агенты с admin_state_up False не выбраны для " "автоматического планирования независимо от этой опции. Но ручное " "планирование для таких агентов доступно, если опция равна True." msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "Разрешает делегирование префикса IPv6 при автоматическом выделении CIDR " "подсети. Присвойте параметру значение True, чтобы включить делегирование " "префикса IPv6 при выделении сети в среде с поддержкой делегирования " "префикса. Пользователи, запрашивающие создание подсети IPv6 без указания " "CIDR или ИД пула подсетей, получают CIDR посредством делегирования префикса. " "Учтите, что включение делегирования префикса переопределяет стандартное " "поведение пула подсетей IPv6." msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "Включает службу dnsmasq для обработки запросов DNS на хосте, на котором " "работает агент DHCP. Аннулирует действие опции '--no-resolv' в аргументах " "процесса dnsmasq. Эта функция выключается, если в опцию " "'dnsmasq_dns_servers' добавляются пользовательские обработчики запросов DNS." #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Ошибка %(reason)s во время выполнения операции." #, python-format msgid "Error parsing dns address %s" msgstr "Ошибка при анализе адреса dns %s" #, python-format msgid "Error while reading %s" msgstr "Ошибка при чтении %s" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Превышено время ожидания выхода адреса из временного состояния ( %s секунд)" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "Существующие префиксы должны быть подмножеством новых префиксов" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "Код возврата: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "Сбой расширения %(driver)s." #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "" "Не найден драйвер расширения %(driver)s, необходимый для модуля службы " "%(service_plugin)s." msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "Расширение для использования наряду с драйвером механизма l2population " "модуля ml2. Оно обеспечивает заполнение модулем таблицы пересылки VXLAN." #, python-format msgid "Extension with alias %s does not exist" msgstr "Расширение с псевдонимом %s не существует" msgid "Extensions list to use" msgstr "Список используемых расширений" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "Внешний IP-адрес %s совпадает с IP-адресом шлюза" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "" "Не удалось перепланировать маршрутизатор %(router_id)s: не найден допустимый " "агент L3." #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "" "Не удалось запланировать маршрутизатор %(router_id)s для агента L3 " "%(agent_id)s." #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "Не удалось создать порт в сети %(network_id)s, так как fixed_ips содержат " "недопустимую подсеть %(subnet_id)s" #, python-format msgid "Failed to locate source for %s." msgstr "Не удалось найти источник для %s." msgid "Failed to remove supplemental groups" msgstr "Не удалось удалить дополнительные группы" #, python-format msgid "Failed to set gid %s" msgstr "Не удалось получить gid %s" #, python-format msgid "Failed to set uid %s" msgstr "Не удалось задать uid %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "Не удалось настроить порт туннеля %(type)s на %(ip)s" msgid "Failure applying iptables rules" msgstr "Не удалось применить правила iptables" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "Сбой ожидания готовности адреса %(address)s: %(reason)s" msgid "Flat provider networks are disabled" msgstr "Одноуровневые сети выключены" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "Для протоколов TCP/UDP значение port_range_min должно быть <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "" "Использовать в вызовах ip_lib вспомогательную программу для получения прав " "доступа root" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "Версия IP шлюза несовместима с версией для пула выделения адресов" msgid "Gateway is not valid on subnet" msgstr "Шлюз недопустим в подсети" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "Группа (gid или имя) использует proxy метаданных после инициализации (если " "пустое, используется группа агента). " msgid "Group (gid or name) running this process after its initialization" msgstr "Группа (gid или имя) запускает этот процесс после инициализации" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Имя хоста для использования сервером Neutron, агентами и службами, " "запущенными в этой системе. Все агенты и службы, запущенные в этой системе, " "должны использовать одно и то же значение хоста." #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "Код ICMP (port-range-max) %(value)s указан, но тип ICMP (port-range-min) " "отсутствует." msgid "ID of network" msgstr "ИД сети" msgid "ID of network to probe" msgstr "ИД сети для тестирования" msgid "ID of probe port to delete" msgstr "ИД удаляемого тестового порта" msgid "ID of probe port to execute command" msgstr "ИД тестового порта для выполнения команды" msgid "ID of the router" msgstr "ИД маршрутизатора" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP-адрес %(ip)s уже выделен в подсети %(subnet_id)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP-адрес %(ip)s не принадлежит подсети %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "Не удалось выделить IP-адрес. Повторите попытку позже." msgid "IP allocation requires subnet_id or ip_address" msgstr "Для выделения IP-адреса требуется subnet_id или ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "Функции IPTablesManager.apply не удалось применить следующий набор правил " "iptables :\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Допустимое значение режима адресов IPv6 для делегирования префикса: SLAAC " "или без сохранения состояния." msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "" "Допустимое значение режима RA IPv6 для делегирования префикса: SLAAC или без " "сохранения состояния." #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "Адрес IPv6 %(ip)s нельзя напрямую связывать с портом в подсети " "%(subnet_id)s, так как подсеть настроена для автоматического выделения " "адресов" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "Подсеть IPv6 %s, настроенная для приема RA из внешнего маршрутизатора, не " "может быть добавлена в маршрутизатор Neutron." msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" "Если True, разрешаются модули, поддерживающие создание прозрачных сетей VLAN." msgid "Illegal IP version number" msgstr "Запрещенный номер версии IP" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "В некоторых ситуациях машрутизатор Neutron отсутствует и не предоставляет " "метаданные для IP, но эту задачу решает сервер DHCP. Если задан этот " "параметр, то сервер DHCP будет добавлять маршруты к хостам в запрос DHCP. С " "этим параметром служба метаданных будет активирована для всех сетей." msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "Указывает, что агент L3 должен также обрабатывать маршрутизаторы, для " "которых не настроен шлюз во внешнюю сеть. Опции следует присвоить значение " "True, если в развертывании Neutron участвует только один агент. Опции можно " "присвоить значение False для всех агентов, если все маршрутизаторы должны " "иметь шлюз во внешнюю сеть." #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "" "Экземпляр класса %(module)s.%(class)s должен содержать _cache attribute" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "Недостаточное пространство префиксов для выделения размера сети /%s" msgid "Insufficient rights for removing default security group." msgstr "" "Отсутствуют требуемые права доступа для удаления группы защиты по умолчанию." msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "Используемый мост интеграции. Не изменяйте этот параметр без серьезных " "причин. Это имя моста интеграции OVS. Для каждого гипервизора предусмотрен " "один мост. Мост интеграции работает как виртуальная коммутационная панель. " "Все виртуальные интерфейсы VM подключаются к этому мосту и затем " "коммутируются согласно топологии сети." msgid "Interface to monitor" msgstr "Интерфейс для монитора" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "" "Интервал между проверками работы дочернего процесса (в секундах), 0 для " "отключения" msgid "Interval between two metering measures" msgstr "Интервал между двумя показателями измерений" msgid "Interval between two metering reports" msgstr "Интервал между двумя отчетами измерений" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "Недопустимое устройство %(dev_name)s: %(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "Недопустимое действие '%(action)s' для типа объекта '%(object_type)s'. " "Допустимые действия: %(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "" "Недопустимый тип идентификации: %(auth_type)s. Допустимые типы: " "%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "Недопустимый тип %(ethertype)s для протокола %(protocol)s." #, python-format msgid "Invalid format: %s" msgstr "Неправильный формат: %s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "" "Недопустимое состояние экземпляра: %(state)s. Допустимые состояния: " "%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "Недопустимое отображение: '%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "Недопустимый разъем pci %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "" "Недопустимый формат поставщика. Последняя часть должна иметь вид 'default' " "или быть пустой: %s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "Недопустимый тип ресурса %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "Недопустимый маршрут: %s" msgid "Invalid service provider format" msgstr "Недопустимый формат поставщика службы" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "Недопустимое значение для ICMP %(field)s (%(attr)s) %(value)s. Значение " "должно лежать в диапазоне от 0 до 255." #, python-format msgid "Invalid value for port %(port)s" msgstr "Недопустимое значение для порта %(port)s" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Пометка mangle в iptables применяется для пометки входа из внешней сети. Эта " "пометка будет применяться с маской 0xffff для использования только младших " "16 бит." msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "Пометка mangle в Iptables, используемая для пометки допустимых запросов " "метаданных. Эта пометка будет применяться с маской 0xffff для использования " "только младших 16 бит." msgid "Keepalived didn't respawn" msgstr "Демон keepalived не выполнил повторное порождение" msgid "Keepalived didn't spawn" msgstr "Демон keepalived не запустился" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "" "Недопустимое значение HZ ядра %(value)s. Значение должно быть больше 0." msgid "L3 agent failure to setup NAT for floating IPs" msgstr "Ошибка агента L3 при настройке NAT для нефиксированных IP" msgid "L3 agent failure to setup floating IPs" msgstr "Ошибка агента L3 при настройке нефиксированных IP" msgid "Limit number of leases to prevent a denial-of-service." msgstr "Ограничить число выделений во избежание отказа в обслуживании." msgid "List of :" msgstr "Список <физическая-сеть>:<физический-мост>" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "Список :: или , " "содержащий имена физических сетей, которые могут использоваться для сетей " "VLAN провайдера и арендатора, а также диапазоны тегов VLAN для каждой сети, " "доступной для выделения арендаторам." msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "Список конечных точек драйвера типа сети, загружаемых из пространства имен " "neutron.ml2.type_drivers." msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "Список имен физических сетей, с которыми можно создавать одноуровневые сети. " "Для создания одноуровневых сетей с произвольными именами физических сетей " "используйте символ *. Пустой список запрещает создание одноуровневых сетей." msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Расположение сокета домена UNIX прокси метаданных. " msgid "Location of Metadata Proxy UNIX domain socket" msgstr "Расположение сокета домена UNIX прокси метаданных" msgid "Location to store DHCP server config files." msgstr "Расположение для хранения файлов конфигурации сервера DHCP." msgid "Location to store IPv6 PD files." msgstr "Расположение для хранения файлов PD IPv6." msgid "Location to store IPv6 RA config files" msgstr "Расположение для хранения файлов конфигурации RA IPv6" msgid "Location to store child pid files" msgstr "Расположение для хранения дочерних файлов pid" msgid "Log agent heartbeats" msgstr "Вести протокол периодических сигналов агента" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "MTU физической среды. Neutron использует это значение для расчета MTU для " "всех компонентов виртуальной сети. Для простых сетей и VLAN Neutron не " "изменяет это значение. Для сетей с перекрытием, таких как VXLAN, Neutron " "автоматически вычитает байты, необходимые для протокола перекрытия, из этого " "значения. Значение по умолчанию: 1500, стандартное для Ethernet." msgid "MTU size of veth interfaces" msgstr "Размер MTU интерфейсов veth" msgid "Make the l2 agent run in DVR mode." msgstr "Создать агент L2, выполняемый в режиме DVR." msgid "Malformed request body" msgstr "Неправильное тело запроса" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "Параметр MaxRtrAdvInterval для radvd.conf" msgid "Maximum number of DNS nameservers per subnet" msgstr "Максимальное количество серверов DNS для подсети" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "Максимальное число агентов L3, где будет запланирован маршрутизатор высокой " "готовности. Если параметр равен 0, то маршрутизатор будет запланирован на " "каждом агенте." msgid "Maximum number of allowed address pairs" msgstr "Максимальное число разрешенных пар адресов" msgid "Maximum number of host routes per subnet" msgstr "Максимальное количество маршрутов хоста на подсеть" msgid "Maximum number of routes per router" msgstr "Максимальное количество маршрутов на маршрутизатор" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "Режим сокета домена UNIX Proxy метаданных, допускается 4 значения: 'deduce': " "получать режим из значений metadata_proxy_user/group, 'user': присвоить " "режиму сокета proxy метаданных значение 0o644 для применения в случае, если " "значением metadata_proxy_user является пользователь root или эффективный " "пользователь агента, 'group': присвоить режиму сокета proxy метаданных " "значение 0o664 для применения в случае, если значением metadata_proxy_group " "является root или эффективная группа агента, 'all': присвоить режиму сокета " "proxy метаданных значение 0o666 для использования в остальных случаях." msgid "Metering driver" msgstr "Драйвер измерения" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "Параметр MinRtrAdvInterval для radvd.conf" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" "Минимизировать опрос путем мониторинга ovsdb на предмет изменений интерфейса." #, python-format msgid "Missing key in mapping: '%s'" msgstr "Отсутствует ключ в отображении: '%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "Многоадресная группа для VXLAN. Если она настроена, то весь " "широковещательный трафик направляется в эту группу. Если она не задана, то " "режим многоадресной передачи VXLAN выключен." msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "Многоадресные группы для интерфейса vxlan. Диапазон адресов группы можно " "указать в нотации CIDR. Если указан диапазон, то различные VNI смогут " "использовать разные адреса группы, что снижает или даже исключает " "интенсивный широковещательный трафик для конечных точек туннеля. Для того " "чтобы зарезервировать уникальную группу для каждого возможного VNI (24 " "бита), используйте формат /8, например, 239.0.0.0/8. Этот параметр должен " "быть одинаковым во всех агентах." #, python-format msgid "Multiple default providers for service %s" msgstr "Несколько поставщиков по умолчанию для службы %s" #, python-format msgid "Multiple providers specified for service %s" msgstr "Несколько поставщиков задано для службы %s" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "" "Групповая операция создания нескольких tenant_ids в правиле группы защиты не " "разрешена" msgid "Must also specify protocol if port range is given." msgstr "При указании диапазона портов необходимо задать протокол." msgid "Must specify one or more actions on flow addition or modification" msgstr "" "Необходимо указать одно или несколько действий добавления или изменения " "потока" msgid "Name of Open vSwitch bridge to use" msgstr "Имя используемого моста Open vSwitch" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Имя используемого региона nova. Необходимо, если keystone управляет " "несколькими регионами." msgid "Namespace of the router" msgstr "Пространство имен маршрутизатора" msgid "Native pagination depend on native sorting" msgstr "Внутреннее разбиение на страницы зависит от внутренней сортировки" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "Требуется применить миграцию из ветви contract %(project)s. При этом " "необходимо выключить все серверы Neutron перед началом обновления." msgid "Negative delta (downgrade) not supported" msgstr "Отрицательная дельта (понижение) не поддерживается" msgid "Negative relative revision (downgrade) not supported" msgstr "Отрицательная относительная ревизия (понижение) не поддерживается" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "Сеть %s не содержит подсетей IPv4" #, python-format msgid "Network %s is not a valid external network" msgstr "Сеть %s не является допустимой внешней сетью" #, python-format msgid "Network %s is not an external network" msgstr "Сеть %s не является внешней" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "Сеть размера %(size)s из диапазона IP-адресов %(parent_range)s, кроме " "диапазонов IP-адресов %(excluded_ranges)s, не найдена." #, python-format msgid "Network type value '%s' not supported" msgstr "Значение типа сети '%s' не поддерживается" msgid "Network type value needed by the ML2 plugin" msgstr "Для модуля ML2 требуется значение типа сети" msgid "Neutron core_plugin not configured!" msgstr "Не настроен core_plugin Neutron!" msgid "No default router:external network" msgstr "Не задан маршрут по умолчанию во внешнюю сеть" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "Не найден пул подсетей по умолчанию для IPv%s" msgid "No default subnetpools defined" msgstr "Не определены пулы подсетей по умолчанию" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "В подсети %(subnet_id)s больше нет доступных IP-адресов." msgid "No offline migrations pending." msgstr "Нет ожидающих миграций с выключением." #, python-format msgid "No shared key in %s fields" msgstr "Нет общего ключа в полях %s" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "Невозможно вручную присвоить маршрутизатор агенту в режиме 'dvr'." msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "Невозможно вручную удалить маршрутизатор из агента в режиме 'dvr'." msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "Число агентов DHCP, запланированных для управления сети арендатора. Если это " "значение больше 1, планировщик автоматически присваивает несколько агентов " "DHCP для заданной сети арендатора, обеспечивая высокую готовность службы " "DHCP." msgid "Number of backlog requests to configure the metadata server socket with" msgstr "" "Количество непереданных запросов для настройки сокета сервера метаданных" msgid "Number of backlog requests to configure the socket with" msgstr "Количество непереданных запросов для настройки сокета" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "Число разрядов в зоне PTR ipv4, которые будут обрабатываться как префикс " "сети. Должно быть выравнено на границу байта. Минимальное значение: 8. " "Максимальное значение: 24. Допустимые значения: 8, 16 и 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "Число разрядов в зоне PTR ipv6, которые будут обрабатываться как префикс " "сети. Должно быть выравнено на границу полубайта. Минимальное значение: 4. " "Максимальное значение: 124. Допустимые значения: 4, 8, 12, 16,..., 124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "" "Количество нефиксированных IP-адресов на одного арендатора. Отрицательное " "значение - не ограничено." msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" "Число разрешенных сетей на одного арендатора. Отрицательное значение " "означает отсутствие ограничений." msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" "Количество портов на одного арендатора. Отрицательное значение - не " "ограничено." msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "" "Количество маршрутизаторов на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "" "Интервал, в секундах, между отправкой событий nova, если имеются события, " "требующие отправки." msgid "Number of seconds to keep retrying to listen" msgstr "Интервал (в секундах) для продолжения попыток приема" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество групп защиты на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "" "Количество правил защиты на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "" "Количество отдельных процессов обработчика для сервера метаданных (значение " "по умолчанию: половина от количества процессоров)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" "Количество подсетей на одного арендатора. Отрицательное значение - не " "ограничено." msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "Число нитей, используемых в процессе синхронизации. Оно не должно превышать " "размер пула соединений, настроенный на сервере." msgid "OK" msgstr "OK" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "Путь к данным OVS. Значение по умолчанию 'system' соответствует пути к " "данным, задаваемому ядром. Для того чтобы использовать пользовательский путь " "к данным, укажите значение netdev'." msgid "OVS vhost-user socket directory." msgstr "Каталог сокетов пользователя для виртуальных хостов OVS." msgid "Only admin can view or configure quota" msgstr "Только администратор может просматривать и настраивать квоту" msgid "Only admin is authorized to access quotas for another tenant" msgstr "Только администратор имеет доступ к квотам других арендаторов" msgid "Only admins can manipulate policies on objects they do not own" msgstr "" "Только администраторы могут управлять стратегиями объектов, владельцами " "которых они не являются." msgid "Only allowed to update rules for one security profile at a time" msgstr "" "Разрешено обновлять правила одновременно только для одного профайла защиты" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "Можно задать только remote_ip_prefix или remote_group_id." #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "Операция не поддерживается в устройстве %(dev_name)s" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "Упорядоченный список типов сетей для выделения в качестве сетей арендатора. " "Значение по умолчанию 'local' полезно для тестирования автономной системы, " "но не позволяет связать хосты." msgid "Override the default dnsmasq settings with this file." msgstr "" "Переопределите параметры по умолчанию для dnsmasq с помощью этого файла." msgid "Owner type of the device: network/compute" msgstr "Тип владельца устройства: network/compute" msgid "POST requests are not supported on this resource." msgstr "Запросы POST не поддерживаются этим ресурсом." #, python-format msgid "Package %s not installed" msgstr "Пакет %s не установлен" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "Синтаксический анализ bridge_mappings не выполнен: %s." msgid "Password for connecting to designate in admin context" msgstr "" "Пароль для подключения к назначенному объекту в административном контексте" msgid "Path to PID file for this process" msgstr "Путь к файлу PID для этого процесса" msgid "Path to the router directory" msgstr "Путь к каталогу маршрутизатора" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "Равноправный порт исправлений в мосте интеграции для моста туннеля." msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "Равноправный порт исправлений в мосте туннеля для моста интеграции." msgid "Phase upgrade options do not accept revision specification" msgstr "Опции обновления фазы не принимают спецификацию ревизии" msgid "Ping timeout" msgstr "Тайм-аут проверки связи" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "Порт %(id)s не имеет фиксированного IP-адреса %(address)s" #, python-format msgid "Port %s does not exist" msgstr "Порт %s не существует" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "Порт %s содержит несколько фиксированных адресов IPv4. При назначении " "нефиксированного IP-адреса необходимо указать конкретный адрес IPv4" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "Делегирование префикса можно использовать только в подсетях IPv6." msgid "Private key of client certificate." msgstr "Личный ключ сертификата клиента." #, python-format msgid "Probe %s deleted" msgstr "Тест %s удален" #, python-format msgid "Probe created : %s " msgstr "Создан тест %s " msgid "Process is already started" msgstr "Процесс уже запущен" msgid "Process is not running." msgstr "Процесс не запущен." msgid "Protocol to access nova metadata, http or https" msgstr "Протокол для доступа к метаданным nova (http или https)" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "Имя поставщика %(name)s, не более %(len)s символов" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "Не найдена стратегия RBAC с типом %(object_type)s и ИД %(id)s" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "Не удается удалить стратегию RBAC для объекта %(object_id)s, так как от нее " "зависят другие объекты.\n" "Сведения: %(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "Диапазон случайных задержек (в секундах) при запуске планировщика " "периодических задач во избежание взрывного запуска. (Для выключения задайте " "0)" msgid "Ranges must be in the same IP version" msgstr "Версия IP для диапазонов должна совпадать" msgid "Ranges must be netaddr.IPRange" msgstr "Формат диапазонов: netaddr.IPRange" msgid "Ranges must not overlap" msgstr "Диапазоны не должны перекрываться" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "Метки ветви информации о выпуске (%s) устарели. Перейдите на использование " "меток expand@ и contract@." msgid "Remote metadata server experienced an internal server error." msgstr "Внутренняя ошибка удаленного сервера метаданных." msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "Хранилище не содержит файлы HEAD для ветвей contract и expand." msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "Представление типа ресурса, о чьей загрузке сообщает агент. Это может быть " "\"networks\", \"subnets\" или \"ports\". Когда указано (по умолчанию " "networks), сервер извлекает определенную загрузку, отправленную как часть " "его объекта конфигурации агента из состояния отчета агента, который содержит " "количество потребленных ресурсов за каждый интервал report_interval. " "dhcp_load_type можно использовать в сочетании с network_scheduler_driver = " "neutron.scheduler.dhcp_agent_scheduler.WeightScheduler Когда " "network_scheduler_driver - WeightScheduler, dhcp_load_type можно настроить " "для представления выбора балансируемого ресурса. Пример: " "dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "" "Запрос не выполнен: при обработке запроса произошла внутренняя ошибка " "сервера." msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "Выполнить сброс таблицы потоков при старте. При значении True вызовет " "кратковременное прерывание потока." #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "Ресурс %(resource)s %(resource_id)s не найден." #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "Ресурс %(resource_id)s с типом %(resource_type)s не найден" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "Ресурс '%(resource_id)s' уже связан с поставщиком '%(provider)s' для типа " "службы '%(service_type)s'" msgid "Resource body required" msgstr "Требуется тело ресурса" msgid "Resource not found." msgstr "Ресурс не найден." msgid "Resources required" msgstr "Требуются ресурсы" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "Вспомогательное приложение для получения прав root. Команла 'sudo neutron-" "rootwrap /etc/neutron/rootwrap.conf' вызывает утилиту фильтрации с правами " "root. Вызовите 'sudo', чтобы пропустить фильтрацию и выполнить команду " "непосредственно." msgid "Root permissions are required to drop privileges." msgstr "Для сброса прав доступа требуются права доступа пользователя Root." #, python-format msgid "Router already has a port on subnet %s" msgstr "У маршрутизатора уже есть порт в подсети %s" msgid "Router port must have at least one fixed IP" msgstr "Порт маршрутизатора должне иметь хотя бы один фиксированный IP-адрес" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "Выполняется %(cmd)s (%(desc)s) для %(project)s ..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "Выполняется %(cmd)s для %(project)s ..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "Интервал отправки сообщений о состоянии узлов на сервер (в секундах). " "Значение должно быть меньше, чем agent_down_time, оптимально - не больше " "половины значения agent_down_time." msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "Интервал (в секундах), в течение которого агент считается выключенным; " "должен по меньшей мере вдвое превышать значение report_interval, чтобы " "убедиться в том, что агент выключен навсегда." #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "Группа защиты %(id)s %(reason)s." #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "Правило группы защиты %(id)s %(reason)s." #, python-format msgid "Security group %(id)s does not exist" msgstr "Группа защиты %(id)s не существует" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "Правило группы защиты %(id)s не существует" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "Правило группы защиты уже существует. ИД правила: %(rule_id)s." #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "Правило группы защиты для типа '%(ethertype)s' не поддерживается. Допустимые " "значения: %(values)s." #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "Протокол правил группы защиты %(protocol)s не поддерживается. Поддерживаются " "значения %(values)s и целочисленные представления [0 - 255]." msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "Отправить уведомление nova в случае изменения данных порта (fixed_ips/" "floatingip), чтобы обеспечить обновление кэша nova." msgid "Send notification to nova when port status changes" msgstr "Отправить уведомление nova в случае изменения состояния порта" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "" "Поставщик службы '%(provider)s' не найден для типа службы %(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "Служба для обработки делегирования префикса DHCPv6." #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "Тип службы %(service_type)s не содержит поставщика службы по умолчанию" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "Задать новый тайм-аут (в секундах) для новых вызовов rpc после получения " "агентом сигнала SIGTERM. При значении 0 тайм-аут rpc не может быть изменен" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "Установка/сброс бита Не разбивать на фрагменты (DF) в исходящем пакете IP, " "несущем туннель GRE/VXLAN." msgid "Shared address scope can't be unshared" msgstr "Для общей адресной области нельзя отменить совместное использование" msgid "String prefix used to match IPset names." msgstr "Префикс строки для сопоставления имен IPset." #, python-format msgid "Sub-project %s not installed." msgstr "Подпроект %s не установлен." msgid "Subnet for router interface must have a gateway IP" msgstr "" "Маска подсети для интерфейса маршрутизатора должна иметь IP-адрес шлюза" msgid "Subnet pool has existing allocations" msgstr "Пул подсетей имеет существующие выделения" msgid "Subnet used for the l3 HA admin network." msgstr "" "Подсеть, используемая для сети администрирования высокой готовности L3." msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "" "Общесистемный флаг для определения типа маршрутизаторов, которые арендаторы " "могут создавать. Может быть переопределен только администратором." msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Порт TCP, применяемый прокси пространства имен метаданных." msgid "TCP Port used by Nova metadata server." msgstr "Порт TCP, используемый сервером метаданных Nova." msgid "TTL for vxlan interface protocol packets." msgstr "TTL для пакетов протокола интерфейса vxlan." #, python-format msgid "Tag %(tag)s could not be found." msgstr "Тег %(tag)s не найден." #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" "Арендатору %(tenant_id)s не разрешено создание ресурса %(resource)s в этой " "сети" msgid "Tenant id for connecting to designate in admin context" msgstr "" "ИД арендатора для подключения к назначенному объекту в административном " "контексте" msgid "Tenant name for connecting to designate in admin context" msgstr "" "Имя арендатора для подключения к назначенному объекту в административном " "контексте" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "Сервер DHCP может помогать в получении метаданных в изолированных сетях. " "Если параметру присвоено значение True, то сервер DHCP будет добавлять " "маршруты к хостам в запрос DHCP. Служба метаданных активируется, только " "когда подсеть не содержит портов маршрутизатора. Гостевой экземпляр должен " "быть настроен для запросов маршрутов к хостам через DHCP (Option 121). Этот " "параметр ни на что не влияет, если force_metadata задан равным True." msgid "The UDP port to use for VXLAN tunnels." msgstr "Порт UDP, применяемый для туннелей VXLAN." #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "Не удается выполнить запрос на выделение адреса, причина: %(reason)s" msgid "The advertisement interval in seconds" msgstr "Интервал объявления в секундах" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Базовый mac-адрес, используемый в Neutron для VIF. Первые 3 октета не будут " "изменены. Если 4-й октет не равен 00, он тоже будет использоваться. " "Остальные будут созданы случайным образом." msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Базовый mac-адрес, используемый в Neutron для уникальных экземпляров DVR. " "Первые 3 октета не будут изменены. Если 4-й октет не равен 00, он тоже будет " "использоваться. Остальные будут созданы случайным образом. Параметр " "'dvr_base_mac' *должен* отличаться от 'base_mac' для предотвращения " "смешивания их с MAC-адресами, выделенными для портов арендатора. Пример 4 " "октетов: dvr_base_mac = fa:16:3f:4f:00:00. ПО умолчанию используется 3 октета" msgid "The core plugin Neutron will use" msgstr "Будет использоваться базовый модуль Neutron" msgid "The driver used to manage the DHCP server." msgstr "драйвер, используемый для управления сервером DHCP." msgid "The driver used to manage the virtual interface." msgstr "Драйвер, используемый для управления виртуальным интерфейсом." msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "Адрес электронной почты для создания записей PTR. Если не указан, будет " "использоваться admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" "Максимальное количество элементов, возвращаемых в одном ответе; значение " "было 'infinite' или отрицательным целым, что означает бесконечное число" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "Тип сети при создании сети HA для маршрутизатора HA. По умолчанию (или при " "пустом значении) используется первое значение 'tenant_network_types'. Такой " "подход помогает, если поток данных VRRP должен использовать сеть, не " "являющуюся стандартной." msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "" "Интервал опроса агентом локальных устройств на предмет наличия изменений." msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" "Время ожидания, в секундах, повторного порождения монитора ovsdb после " "потери соединения с ним." msgid "The number of sort_keys and sort_dirs must be same" msgstr "Количество sort_keys и sort_dirs должно быть одинаковым" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "Путь для расширений API. Пути разделяются точкой с запятой. Пример: " "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts. " "__path__ для расширений neutron добавляется автоматически, и если расширения " "содержатся там, их не требуется указывать здесь." msgid "The physical network name with which the HA network can be created." msgstr "Имя физической сети для создания сети HA." #, python-format msgid "The port '%s' was deleted" msgstr "Порт '%s' был удален" msgid "The port to bind to" msgstr "Порт для подключения к" #, python-format msgid "The requested content type %s is invalid." msgstr "Запрашиваемый тип содержимого %s является недопустимым." msgid "The resource could not be found." msgstr "Ресурс не найден." #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "Маршрутизатор %(router_id)s уже был размещен агентом L3 %(agent_id)s." msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "" "На сервере возникла ошибка, или он не поддерживает выполнение запрошенной " "операции." msgid "The service plugins Neutron will use" msgstr "Будут использоваться модули служб Neutron" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "Запрос подсети не удается выполнить, причина: %(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "Подпроект для выполнения команды. Допустимые значения: '%s'." msgid "The type of authentication to use" msgstr "Применяемый тип идентификации" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "" "К сети подключены маршрутизаторы, доступ к которым зависит от этой стратегии." msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True - удалить все порты для всех мостов OpenvSwitch. False - удалить порты, " "созданные Neutron для мостов интеграции и внешних сетей." msgid "Tunnel IP value needed by the ML2 plugin" msgstr "Для модуля ML2 требуется значение IP-адреса туннеля" msgid "Tunnel bridge to use." msgstr "Используемый мост туннеля." msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "Тип используемой конечной точки nova. Поиск конечной точки выполняется в " "каталоге keystone, конечная точка может быть общедоступной, внутренней или " "административной." msgid "URL for connecting to designate" msgstr "URL для подключения к назначенному объекту" msgid "URL to database" msgstr "URL базы данных" #, python-format msgid "Unable to access %s" msgstr "Ошибка доступа к %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "Не удалось вычислить адрес %(address_type)s, причина:%(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "Невозможно преобразовать значение в %s" msgid "Unable to create the SNAT Interface Port" msgstr "Не удалось создать порт интерфейса SNAT" #, python-format msgid "Unable to determine mac address for %s" msgstr "Невозможно определить mac-адрес для %s" #, python-format msgid "Unable to find '%s' in request body" msgstr "Отсутствует '%s' в теле запроса" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "Не удалось найти IP-адрес %(ip_address)s в подсети %(subnet_id)s" #, python-format msgid "Unable to find resource name in %s" msgstr "В %s не найдено имя ресурса" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" "Невозможно идентифицировать целевое поле из %s. Совпадение должно быть в " "форме %%()s" msgid "Unable to provide external connectivity" msgstr "Не удалось предоставить связь со внешней сетью" msgid "Unable to provide tenant private network" msgstr "Не удалось предоставить частную сеть арендатора" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "" "Невозможно проверить совпадение %(match)s, так как родительский ресурс " "%(res)s не найдено" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Непредвиденная метка для сценария %(script_name)s: %(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "Непредвиденное число переходных точек ветвления: %(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "Непредвиденный код ответа: %s" #, python-format msgid "Unexpected response: %s" msgstr "Непредвиденный ответ: %s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "Недопустимое имя модуля '%(unit)s'." #, python-format msgid "Unknown address type %(address_type)s" msgstr "Неизвестный тип адреса %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "Неизвестный атрибут '%s'." #, python-format msgid "Unknown chain: %r" msgstr "Неизвестная цепочка: %r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "Неизвестный тип сети %(network_type)s." msgid "Unmapped error" msgstr "Ошибка без преобразования" msgid "Unrecognized action" msgstr "Неизвестное действие" msgid "Unrecognized field" msgstr "Неизвестное поле" msgid "Unsupported Content-Type" msgstr "Не поддерживаемый тип содержимого" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "Неподдерживаемый тип сети %(net_type)s." msgid "Unsupported request type" msgstr "Неподдерживаемый тип запроса" msgid "Updating default security group not allowed." msgstr "Обновление группы защиты по умолчанию не разрешено." msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "Использовать драйвер механизма ML2 l2population для определения удаленных " "MAC- и IP-адресов и улучшения масштабируемости туннеля." msgid "Use broadcast in DHCP replies." msgstr "Использовать широковещательные пакеты в ответах DHCP." msgid "Use either --delta or relative revision, not both" msgstr "Используйте или --delta, или относительную ревизию, но не оба" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "Использовать ipset для ускорения обработки групп защиты на основе iptables. " "Поддержка ipset требует, чтобы ipset был установлен в узле агента L2." msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "Использовать вспомогательное приложение для получения прав root для чтения " "пространств имен в системе. Это может не потребоваться при соответствующим " "образом настроенной конфигурации защиты. Если вспомогательное приложение для " "получения прав root не используется, присвойте параметру значение false для " "повышения производительности." msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "Использовать интерфейсы veth вместо коммутационных портов для связи моста " "интеграции с физическими мостами. Если параметр равен True, то может " "использоваться ядро без поддержки коммутационных портов Open vSwitch." msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "Пользователь (uid или имя) использует proxy метаданных после инициализации " "(если пустое, используется пользователь агента). " msgid "User (uid or name) running this process after its initialization" msgstr "Пользователь (uid или имя) запускает этот процесс после инициализации" msgid "Username for connecting to designate in admin context" msgstr "" "Имя пользователя для подключения к назначенному объекту в административном " "контексте" msgid "VRRP authentication password" msgstr "Пароль идентификации VRRP" msgid "VRRP authentication type" msgstr "Тип идентификации VRRP" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "Частота отсчетов времени в ядре (в Гц) для вычисления минимального значения " "пакета в правилах ограничения пропускной способности канала для порта с QoS. " "За дополнительной информацией обратитесь к описанию параметра ядра HZ и " "руководству по tc-tbf." msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "Задержка в миллисекундах для вычисления размера очереди для порта с QoS. За " "дополнительной информацией обратитесь к руководству по tc-tbf." msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "При проксировании запросов метаданных Neutron подписывает заголовок Instance-" "ID общим секретным ключом для предотвращения подмены. Ключом может быть " "любая строка, но она должна совпадать с указанной в конфигурации для сервера " "метаданных Nova. Примечание: Nova использует тот же ключ конфигурации, но в " "разделе [neutron]." msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "" "Расположение хранения файлов состояния Neutron. Этот каталог должен быть " "доступен для записи агентом." msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "При использовании IPv6 применяемой для внешнего шлюза сети не обязательно " "иметь связанную подсеть, так как может быть использован автоматически " "назначаемый адрес link-local (LLA). Однако, адрес шлюза IPv6 необходим в " "качестве следующего узла для маршрута по умолчанию. Если адрес шлюза IPv6 не " "указан здесь, (и только в этом случае) будет настроен маршрутизатор Neutron " "для получения маршрута по умолчанию из объявлений маршрутизатора (RA) от " "маршрутизатора выше по течению. В этом случае маршрутизатор выше по течению " "должен быть также настроен для отправки этих RA. Когда указано значение " "ipv6_gateway, оно должно указывать на LLA интерфейса маршрутизатора выше по " "течению. Если следующий узел, использующийглобальный уникальный адрес (GUA), " "является предпочитаем, это необходимо обеспечить посредством подсети, " "выделенной для сети, а не с помощью этого параметра. " msgid "You must implement __call__" msgstr "Отсутствует реализация __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "" "Необходимо задать файл конфигурации для моста, или --config-file, или " "env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "Необходимо указать ревизию или относительную дельта" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "пул подсетей должен быть указан в отсутствие cidr" msgid "add_ha_port cannot be called inside of a transaction." msgstr "add_ha_port нельзя вызывать внутри транзакции." msgid "allocation_pools allowed only for specific subnet requests." msgstr "allocation_pools разрешено только для определенных запросов подсетей. " msgid "allocation_pools are not in the subnet" msgstr "allocation_pools не принадлежит подсети" msgid "allocation_pools use the wrong ip version" msgstr "Недопустимая версия IP для allocation_pools" msgid "already a synthetic attribute" msgstr "атрибут уже является синтетическим" msgid "binding:profile value too large" msgstr "Слишком большое значение binding:profile" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "не удается выполнить %(event)s, причина: %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "cidr и prefixlen не должны быть указаны вместе" msgid "dns_domain cannot be specified without a dns_name" msgstr "dns_domain должен указываться вместе с dns_name" msgid "dns_name cannot be specified without a dns_domain" msgstr "dns_name должен указываться вместе с dns_domain" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "fixed_ip_address должен указываться вместе с port_id" #, python-format msgid "has device owner %s" msgstr "имеет владельца устройства %s" msgid "in use" msgstr "Используется" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "Не удалось выполнить команду ip на устройстве %(dev_name)s: %(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "Функция ip link %(capability)s не поддерживается" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "Команда ip link не поддерживается: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "ip_version должно быть указано при отсутствии cidr and subnetpool_id" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ipv6_address_mode недопустим, когда ip_version - 4" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ipv6_ra_mode недопустим, когда ip_version - 4" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "Для ipv6_ra_mode задано значение '%(ra_mode)s', а значение " "ipv6_address_mode, заданное как '%(addr_mode)s', является недопустимым. Если " "указаны оба атрибута, их значения должны совпадать" msgid "mac address update" msgstr "Обновление mac-адреса" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Необходимо задать ровно 2 аргумента - cidr и MAC" msgid "network_type required" msgstr "Требуется network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "Для network_type не поддерживается значение '%s'" msgid "new subnet" msgstr "новая подсеть" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "Неизвестная физическая сеть '%s' для одноуровневой сети провайдера" msgid "physical_network required for flat provider network" msgstr "Для одноуровневой сети провайдера требуется physical_network" #, python-format msgid "provider:physical_network specified for %s network" msgstr "Для сети %s указан provider:physical_network" msgid "respawn_interval must be >= 0 if provided." msgstr "Значение respawn_interval, если оно указано, должно быть >= 0." #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id вне диапазона (%(min)s - %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "" "Для segmentation_id требуется physical_network для сети VLAN провайдера" msgid "shared attribute switching to synthetic" msgstr "общий атрибут изменен на синтетический" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "Пул подсетей %(subnetpool_id)s нельзя изменять, если он связан с " "общедоступной адресной областью %(address_scope_id)s" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "" "Нельзя одновременно задавать значения subnetpool_id и use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "следующий узел не соединен с маршрутизатором" msgid "the nexthop is used by router" msgstr "следующий узел используется маршрутизатором" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9750416 neutron-16.0.0.0b2.dev214/neutron/locale/zh_CN/0000755000175000017500000000000000000000000021075 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3190448 neutron-16.0.0.0b2.dev214/neutron/locale/zh_CN/LC_MESSAGES/0000755000175000017500000000000000000000000022662 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/zh_CN/LC_MESSAGES/neutron.po0000644000175000017500000025050000000000000024716 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Allerson Yao, 2015 # Zhong Chaoliang , 2013 # lyndon zhang , 2014 # MA QIANG , 2014 # MA QIANG , 2014 # Zhong Chaoliang , 2013 # 汪军 , 2015 # 陈展奇 , 2013-2014 # 颜海峰 , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:55+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "命令:%(cmd)s\n" "退出代码:%(code)s\n" "标准输入:%(stdin)s\n" "标准输出:%(stdout)s\n" "标准错误:%(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "%(branch)s HEAD 文件与迁移时间线头不匹配,需要:%(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "%(invalid_dirs)s 对于 sort_dirs 是无效值,有效值是“%(asc)s”和“%(desc)s”" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "对于 %(tunnel)s 提供程序网络,已禁止 %(key)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s“%(addr)s”与 ip_version“%(ip_version)s”不匹配" #, python-format msgid "%s cannot be called while in offline mode" msgstr "在 %s 处于脱机方式时,无法对其进行调用" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "%s 对于 sort_keys 是无效属性" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s 必须实现 get_port_from_device 或 get_ports_from_devices。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN提供者网络中禁止%s" #, python-format msgid "%s prohibited for flat provider network" msgstr "在平面供应商网络中禁止%s" #, python-format msgid "%s prohibited for local provider network" msgstr "在本地供应商网络中禁止%s" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "“%s”不是有效的 RBAC 对象类型" #, python-format msgid "'%s' is not supported for filtering" msgstr "“%s”不支持进行过滤" #, python-format msgid "'module' object has no attribute '%s'" msgstr "“module”对象没有属性“%s”" msgid "'port_max' is smaller than 'port_min'" msgstr "“port_max”小于“port_min”" msgid "0 is not allowed as CIDR prefix length" msgstr "0不允许作为CIDR前缀长度" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "在缺少子网池的情况下,必须指定 cidr" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "作为 RFC3315 DUID-EN 所需要的供应商的已注册私营企业号的十进制值。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "缺省外部网络已存在:%(net_id)s。" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "已对此 IP 系列设置缺省子网池。对于每个 IP 系列,只能有一个缺省子网池。" msgid "A metering driver must be specified" msgstr "必须指定测量驱动程序" msgid "Access to this resource was denied." msgstr "访问该资源被拒绝。" msgid "Action to be executed when a child process dies" msgstr "当子进程终止时要执行的操作" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "向 iptable 规则添加注释。设置为 false 以禁止向描述规则用途的已生成 iptable 添" "加注释。系统必须支持 iptable 注释模块以添加注释。" msgid "Address not present on interface" msgstr "接口上没有地址" msgid "Adds test attributes to core resources." msgstr "将测试属性添加至核心资源。" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "代理 %(id)s 不是 L3 代理或已禁用" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "当 enable_new_agents=False 时,代理将使用 admin_state_up=False 启动。在这种情" "况下,将不会自动对代理调度用户的资源,直到管理员将 admin_state_up 更改为 " "True 为止。" #, python-format msgid "Agent updated: %(payload)s" msgstr "进程更新: %(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "允许自动对 DHCP 代理调度网络。" msgid "Allow auto scheduling of routers to L3 agent." msgstr "允许自动对 L3 代理调度路由器。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "允许在 Neutron 中重叠 IP 支持。注意:如果将 Neutron 与 Nova 安全组配合使用," "那么以下参数必须设置为 False。" msgid "Allow running metadata proxy." msgstr "允许运行 metadata代理" msgid "Allow sending resource operation notification to DHCP agent" msgstr "允许将资源操作通知发送至 DHCP 代理" msgid "Allow the creation of PTR records" msgstr "允许创建 PTR 记录" msgid "Allow the usage of the bulk API" msgstr "允许使用成批 API" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "允许对 nova 元数据执行非安全 SSL (HTTPS) 请求" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "允许处理来自专用元数据访问网络的元数据请求,此网络的 CIDR 为 " "169.254.169.254/16(或更大前缀),并且连接至 VM 从其发送元数据(1 个请求)的 " "Neutron 路由器。在此情况下,不会在 VM 中插入 DHCP 选项 121,因为它们能够通过" "路由器到达 169.254.169.254。此选项要求 enable_isolated_metadata = True。" msgid "An RBAC policy already exists with those values." msgstr "已存在带有这些值的 RBAC 策略。" msgid "An identifier must be specified when updating a subnet" msgstr "更新子网时,必须指定标识" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "要从 neutron.ml2.extension_drivers 名称空间装入的扩展驱动程序入口点的有序列" "表。例如:extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "要从 neutron.ml2.mechanism_drivers 名称空间装入的联网机制驱动程序入口点的已排" "序列表。" msgid "An unknown error has occurred. Please try your request again." msgstr "发生未知错误。请再次尝试您的请求。" msgid "Async process didn't respawn" msgstr "同步进程未重新衍生" msgid "Authorization URL for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接授权 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "自动从脱机 DHCP 代理移除网络。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "将路由器从脱机 L3 代理自动重新安排至联机 L3 代理程序。" msgid "Availability zone of this node" msgstr "此节点的可用区域" msgid "Available commands" msgstr "可用的命令" #, python-format msgid "Base MAC: %s" msgstr "基本 MAC:%s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "用于 dnsmasq 日志记录的基本日志目录。日志包含 DHCP 和 DNS 日志信息,对于调试 " "DHCP 或 DNS 存在的问题很有用。如果此部分为空,请禁用 dnsmasq 日志。" msgid "Body contains invalid data" msgstr "主体中包含无效数据" msgid "Bulk operation not supported" msgstr "成批操作不受支持" msgid "CIDR to monitor" msgstr "要监视的 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "找不到针对 %(resource_type)s 的回调" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "针对 %(resource_type)s 的回调返回了错误的资源类型" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "无法将浮动 IP 添加至没有固定 IPv4 地址的端口 %s" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "无法添加针对 %(resource_type)s 的多个回调" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "无法从 IPv%(pool_ver)s 子网池分配 IPv%(req_ver)s 子网" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "无法从可用的一组前缀分配所请求的子网" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "在设置了 ipv6 属性的情况下,无法禁用 enable_dhcp" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "无法处理类型为 %(subnet_type)s 的子网" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "路由器端口上不能有多个 IPv4 子网" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "不得存在多个具有相同网络标识的路由器端口(如果它们都包含 IPv6 子网)。现有端" "口 %(p)s 具有 IPv6 子网和网络标识 %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "无法在传统 L3 代理程序 %(agent_id)s 上托管分布式路由器 %(router_id)s。" msgid "Cannot specify both subnet-id and port-id" msgstr "无法同时指定 subnet-id 和 port-id" msgid "Cannot understand JSON" msgstr "无法理解 JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "无法更新只读属性 %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "用于 SSL 的认证中心公用密钥(CA 证书)文件" msgid "Check ebtables installation" msgstr "检查 ebtables 安装" msgid "Check for ARP header match support" msgstr "检查 ARP 头匹配支持" msgid "Check for ARP responder support" msgstr "检查 ARP 响应程序支持" msgid "Check for ICMPv6 header match support" msgstr "检查 ICMPv6 头匹配支持" msgid "Check for OVS Geneve support" msgstr "检查 OVS Geneve 支持" msgid "Check for OVS vxlan support" msgstr "检查OVS vxlan支持" msgid "Check for VF management support" msgstr "检查 VF 管理支持" msgid "Check for iproute2 vxlan support" msgstr "检查 iproute2 vxlan 支持" msgid "Check for nova notification support" msgstr "检查 nova 通知支持" msgid "Check for patch port support" msgstr "检查补丁端口支持" msgid "Check ip6tables installation" msgstr "检查 ip6tables 安装" msgid "Check ipset installation" msgstr "检查 ipset 安装" msgid "Check keepalived IPv6 support" msgstr "检查保持活动的 IPv6 支持" msgid "Check minimal dibbler version" msgstr "检查最低点播器版本" msgid "Check minimal dnsmasq version" msgstr "检查最小 dnsmasq 版本" msgid "Check netns permission settings" msgstr "检查 netns 许可权设置" msgid "Check ovs conntrack support" msgstr "检查 ovs conntrack 支持" msgid "Check ovsdb native interface support" msgstr "检查 ovsdb 本机接口支持" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "子网 %(subnet_id)s 的 cidr %(subnet_cidr)s 与子网 %(sub_id)s 的 cidr " "%(cidr)s 重叠" msgid "Cleanup resources of a specific agent type only." msgstr "仅清除特定代理程序类型的资源。" msgid "Client certificate for nova metadata api server." msgstr "nova 元数据 API 服务器的客户机证书。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" ": 元组的逗号分隔列表,这些元组将 " "network_device 映射至代理程序的特定于节点的不应用于虚拟联网的功能列表。" "vfs_to_exclude 是要从 network_device 中排除的虚拟功能的分号分隔列表。映射中" "的 network_vlan_ranges 应出现在 physical_device_mappings 列表中。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" " : 元组的逗号分隔列表,这些元组将物理网络名" "称映射至代理程序的 SR-IOV 物理功能的特定于节点的物理网络设备接口(将用于 " "VLAN 网络)。服务器上的 network_vlan_ranges 中列示的所有物理网络在每个代理程" "序上应具有指向相应接口的映射。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" ": 元组的逗号分隔列表,这些元组将物理网" "络名称映射至代理程序的特定于节点的物理网络接口(将用于平面网络和 VLAN 网" "络)。服务器上的 network_vlan_ranges 中列示的所有物理网络在每个代理程序上应具" "有指向相应接口的映射。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" "可用于租户网络分配的 GRE 隧道标识的 : 元组枚举范围的逗号分" "隔列表" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" "枚举可用于租户网络分配的 Geneve VNI 标识的范围的: 元组的逗" "号分隔列表" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" "可用于租户网络分配的 VXLAN VNI 标识的 : 元组枚举范围的逗号" "分隔列表" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "将用作转发器的 DNS 服务器的逗号分隔列表。" msgid "Command to execute" msgstr "要执行的命令" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "用于接口驱动程序的配置文件(还可使用 l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s 具有冲突值 ethertype %(ethertype)s " msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "控制是否在服务器中启用了 neutron 安全组 API。未使用安全组或使用 nova安全组 " "API 时,它应该为 false。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "在尝试%(time)d 秒之后不能绑定 %(host)s:%(port)s " msgid "Could not deserialize data" msgstr "未能对数据进行反序列化" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "DHCP 租赁持续时间(以秒计)。使用 -1 告诉 dnsmasq 使用无限租赁时间。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "VXLAN/GRE/Geneve 底层的 DVR 部署需要在代理端和服务器端都启用 L2-pop。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "使用脱机迁移时将对其生成脚本的数据库引擎。" msgid "Default external networks must be shared to everyone." msgstr "缺省外部网络必须共享给每个人。" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "在未指定提供者属性时,外部网络的缺省网络类型。缺省情况下,它为“无”,这意味着" "如果在创建外部网络时未指定提供者属性,那么它们将与租户网络具有相同类型。" "external_network_type 配置选项的允许值取决于在 type_drivers 配置选项中所配置" "的网络类型值。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的缺省 RBAC 条目数。负值表示无限制。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的缺省资源数。负值表示无限。" msgid "Default security group" msgstr "缺省安全组" msgid "Default security group already exists." msgstr "缺省安全组已存在。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "可用区域提示的缺省值。如果资源的 availability_zone_hints 为空,那么可用区域感" "知调度程序使用此项。可通过逗号分隔的字符串来指定多个可用区域。此值可为空。在" "此情况下,即使资源的 availability_zone_hints 为空,那么调度该资源时,可用区域" "仍被视为具备高可用性。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "如果未提供 enable_snat 的值,请在 external_gateway_info 中定义 enable_snat 的" "缺省值。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "会使用以下格式为高级服务定义提供程序:::[:" "default]" msgid "Delete the namespace by removing all devices." msgstr "请通过除去所有设备来删除名称空间。" #, python-format msgid "Deleting port %s" msgstr "正在删除端口 %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "部署错误:%(reason)s。" msgid "Destroy IPsets even if there is an iptables reference." msgstr "即使存在 iptables 引用,也会破坏 IPset。" msgid "Destroy all IPsets." msgstr "破坏所有 IPset。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "映射 %(mapping)s 中的设备 %(dev_name)s 并非唯一" msgid "Device not found" msgstr "找不到设备" msgid "Domain to use for building the hostnames" msgstr "要用于构建主机名的域" msgid "Downgrade no longer supported" msgstr "降级不再支持" #, python-format msgid "Driver %s is not unique across providers" msgstr "驱动程序 %s 在提供程序中不唯一" msgid "Driver for external DNS integration." msgstr "外部 DNS 集成的驱动程序。" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 代理程序中的安全组防火墙的驱动程序" msgid "Driver to use for scheduling network to DHCP agent" msgstr "要用于对 DHCP 代理调度网络的驱动程序" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "要用于对缺省 L3 代理调度路由器的驱动程序" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "用于 IPv6 前缀授权的驱动程序。这需要是 neutron.agent.linux.pd_drivers 名称空" "间中定义的入口点。请参阅 setup.cfg 以了解 Neutron 源随附的入口点。" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "对路由器 %(router)s 创建了重复 L3HARouterAgentPortBinding。无法升级数据库。请" "移除所有重复项,然后升级数据库。" msgid "Duplicate Security Group Rule in POST." msgstr "POST 中的安全组规则重复。" msgid "Duplicate address detected" msgstr "检测到重复地址。" #, python-format msgid "ERROR: %s" msgstr "错误:%s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "错误:无法通过缺省搜索路径(~/.neutron/、~/、/etc/neutron/ 和 /etc/)以及“--" "config-file”选项找到配置文件!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "参数 network_id 或 router_id 的其中之一必须传递至_get_ports 方法。" msgid "Either subnet_id or port_id must be specified" msgstr "必须指定 subnet_id 或 port_id" msgid "Enable HA mode for virtual routers." msgstr "为虚拟路由器启用HA模式。" msgid "Enable SSL on the API server" msgstr "在API 服务器上打开SSL" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "请在代理上启用 VXLAN。可在通过使用 linuxbridge 机制驱动程序由 ml2 插件管理代" "理时启用" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "在 admin_state_up 为 False 的代理上启用服务。如果此选项为 False,那么当代理" "的 admin_state_up 变为 False 时,将禁用该代理上的服务。无论此选项如何,都不会" "选择 admin_state_up 为 False 的代理进行自动调度。但是,如果此选项为 True,那" "么可以手动调度这样的代理。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "对自动子网 CIDR 分配启用 IPv6 前缀代理。设置为 True 将在支持 PD 的环境中对子" "网分配启用 IPv6 前缀代理。如果用户对 IPv6 子网发出创建子网请求但未提供 CIDR " "或子网池标识,那么系统将通过前缀代理机制为该用户提供 CIDR。请注意,启用 PD 将" "覆盖缺省 IPv6 子网池的行为。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "启用 dnsmasq 服务以在运行 DHCP 代理程序的主机上通过 DNS 解析器提供实例的名称" "解析。实际上会从 dnsmasq 进程自变量中移除“--no-resolv”选项。将定制 DNS 解析器" "添加至“dnsmasq_dns_servers”选项会禁用此功能部件。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "尝试执行该操作时发生错误 %(reason)s。" #, python-format msgid "Error parsing dns address %s" msgstr "解析 dns 地址 %s 时出错" #, python-format msgid "Error while reading %s" msgstr "读取 %s 时出错" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" "Exceeded %s second limit waiting for address to leave the tentative state." msgid "Existing prefixes must be a subset of the new prefixes" msgstr "现有前缀必须是新前缀的子集" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "退出代码:%(returncode)d;Stdin:%(stdin)s;Stdout:%(stdout)s;Stderr:" "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "扩展 %(driver)s 失败。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "找不到服务插件 %(service_plugin)s 所需的扩展驱动程序 %(driver)s。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "要与 ml2 插件的 l2population 机制驱动程序一起使用的扩展。它使该插件能够填充 " "VXLAN 转发表。" #, python-format msgid "Extension with alias %s does not exist" msgstr "具有别名 %s 的扩展不存在" msgid "Extensions list to use" msgstr "要使用的扩展列表" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s 和网关IP相同" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "重新安排路由器 %(router_id)s 失败:找不到合格 L3 代理。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "将路由器 %(router_id)s 调度到 L3 代理 %(agent_id)s 失败。" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "未能在网络 %(network_id)s 上创建端口,因为 fixed_ips 包括了无效子网 " "%(subnet_id)s" #, python-format msgid "Failed to locate source for %s." msgstr "未能找到 %s 的源。" msgid "Failed to remove supplemental groups" msgstr "未能移除补充组" #, python-format msgid "Failed to set gid %s" msgstr "设置gid %s 失败" #, python-format msgid "Failed to set uid %s" msgstr "设置uid %s 失败" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "未能将 %(type)s 隧道端口设置为 %(ip)s" msgid "Failure applying iptables rules" msgstr "应用 iptable 规则时失败" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "等待地址 %(address)s 变得就绪时发生故障:%(reason)s" msgid "Flat provider networks are disabled" msgstr "平面供应商网络被禁用" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "对于 TCP/UDP 协议,port_range_min 必须小于等于 port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "强制ip_lib呼叫使用root helper" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "网关 IP 版本与分配池版本不一致" msgid "Gateway is not valid on subnet" msgstr "网关在子网上无效" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "在元数据代理的初始化之后,运行该代理的组(gid 或名称),(如果此组为空,那么" "这是代理有效组)。" msgid "Group (gid or name) running this process after its initialization" msgstr "在此进程的初始化之后,运行此进程的组(gid 或名称)" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "Neutron 服务器以及此机器上运行的代理程序和服务要使用的主机名。此机器上运行的" "所有代理程序和服务必须使用同一主机值。" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "已提供 ICMP 代码 (port-range-max) %(value)s,但缺少 ICMP 类型 (port-range-" "min)。" msgid "ID of network" msgstr "网络的标识" msgid "ID of network to probe" msgstr "要探测的网络的标识" msgid "ID of probe port to delete" msgstr "要删除的探测器端口的标识" msgid "ID of probe port to execute command" msgstr "用于执行命令的探测器端口的标识" msgid "ID of the router" msgstr "路由器ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "在子网 %(subnet_id)s 中已分配 IP 地址 %(ip)s" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 地址 %(ip)s 不属于子网 %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "IP 分配失败。请稍后重试。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 分配需要 subnet_id 或 ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply 无法应用以下 iptables规则集:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "对于前缀授权,IPv6 寻址方式必须为 SLAAC 或者“无状态”。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "对于前缀授权,IPv6 RA 方式必须为 SLAAC 或者“无状态”。" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "无法直接将 IPv6 地址 %(ip)s 分配给子网%(subnet_id)s,因为针对自动地址配置了该" "子网" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "无法将已配置为从外部路由器接收 RA 的 IPv6 子网 %s 添加至 Neutron 路由器。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "如果为 True,那么允许那些支持它的插件创建 VLAN 透明网络。" msgid "Illegal IP version number" msgstr "IP 版本号不合法" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "在某些情况下,没有 Neutron 路由器可提供元数据 IP,但 DHCP 服务器可用于提供此" "信息。设置此值将强制 DHCP 服务器将特定主机路由追加至 DHCP 请求。如果设置了此" "选项,那么将对所有网络激活此元数据服务。" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "指示此 L3 代理程序还应处理未配置外部网络网关的路由器。此选项只有在用于 " "Neutron 部署中的单个代理程序时才应为 True,在所有路由器必须具有外部网络网关的" "情况下用于所有代理程序时可为 False。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "类 %(module)s.%(class)s 的实例必须包含 _cache 属性" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "没有足够的前缀空间来分配子网大小 /%s" msgid "Insufficient rights for removing default security group." msgstr "权利不足,无法移除缺省安全组。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "要使用的集成网桥。不要更改此参数,除非您有正当原因。这是 OVS 集成网桥的名称。" "每个 hypervisor 都有一个集成网桥。此集成网桥充当虚拟“接线架”。所有 VM VIF 附" "加至此网桥,然后根据其网络连接进行“接线”。" msgid "Interface to monitor" msgstr "要监视的接口" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "子进程活性检查之间的时间间隔(秒),使用 0 来进行禁用" msgid "Interval between two metering measures" msgstr "在采取两种测量措施之间的时间间隔" msgid "Interval between two metering reports" msgstr "在生成两个测量报告之间的时间间隔" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "无效设备 %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "针对对象类型“%(object_type)s”的操作“%(action)s”无效。有效操作为:" "%(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "认证类型 %(auth_type)s 无效,以下是有效类型:%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "ethertype %(ethertype)s 对协议 %(protocol)s 无效。" #, python-format msgid "Invalid format: %s" msgstr "格式无效:%s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "实例状态 %(state)s 无效,以下是有效状态:%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "映射无效:“%s”" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "无效 PCI 插槽 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "提供程序格式无效。最后部分应该为“default”或空:%s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "无效资源类型 %(resource_type)s" #, python-format msgid "Invalid route: %s" msgstr "路由无效:%s" msgid "Invalid service provider format" msgstr "服务提供程序格式无效" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "ICMP %(field)s (%(attr)s) 的值 %(value)s 无效。它必须为 0 到 255。" #, python-format msgid "Invalid value for port %(port)s" msgstr "端口 %(port)s 的值无效" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用来标记外部网络中的入口的 Iptables mangle 标记。将使用 0xffff 来屏蔽此标记," "以便将仅使用低位的 16 位。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用来标记元数据有效请求的 Iptables mangle 标记。将使用 0xffff 来屏蔽此标记,以" "便将仅使用低位的 16 位。" msgid "Keepalived didn't respawn" msgstr "保持活动的未重新衍生" msgid "Keepalived didn't spawn" msgstr "Keepalived 未衍生" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "内核 HZ 值 %(value)s 无效。此值必须大于 0。" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 代理程序无法为浮动 IP 设置 NAT" msgid "L3 agent failure to setup floating IPs" msgstr "L3 代理程序无法设置浮动 IP" msgid "Limit number of leases to prevent a denial-of-service." msgstr "请对租赁数进行限制,以防止拒绝服务。" msgid "List of :" msgstr ": 的列表" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" "为VLAN提供商和租户网络提供:: 或 " "专属物理网络名称,从事实现每个租户网络可以分配到相应的VLAN" "标识。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "要从 neutron.ml2.type_drivers 名称空间装入的网络类型驱动程序入口点的列表。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "可通过其创建平面网络的 physical_network 名称的列表。使用缺省值“*”将允许平面网" "络使用任意 physical_network 名称。使用空列表将禁用平面网络。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "元数据代理 UNIX 域套接字的位置。" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "元数据代理 UNIX 域套接字的位置" msgid "Location to store DHCP server config files." msgstr "用于存储 DHCP 服务器配置文件的位置。" msgid "Location to store IPv6 PD files." msgstr "用来存储 IPv6 PD 文件的位置。" msgid "Location to store IPv6 RA config files" msgstr "用于存储 IPv6 RA 配置文件的位置" msgid "Location to store child pid files" msgstr "用于存储子 pid 文件的位置" msgid "Log agent heartbeats" msgstr "日志代理程序脉动信号" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "底层物理网络的 MTU。Neutron 使用此值计算所有虚拟网络组件的 MTU。对于平面网络" "和 VLAN 网络,neutron 使用此值而不做修改。对于 VXLAN 之类的覆盖网络,neutron " "自动从此值减去覆盖协议开销。缺省为 1500(这是以太网的标准值)。" msgid "MTU size of veth interfaces" msgstr "veth 接口的 MTU 大小" msgid "Make the l2 agent run in DVR mode." msgstr "使 l2 代理在 DVR 方式下运行。" msgid "Malformed request body" msgstr "请求主体的格式不正确" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MaxRtrAdvInterval 设置" msgid "Maximum number of DNS nameservers per subnet" msgstr "每个子网的 DNS 名称服务器的最大数目" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "将对其调度 HA 路由器的 L3 代理程序的最大数目。如果设置为 0,那么将对每个代理" "程序调度该路由器。" msgid "Maximum number of allowed address pairs" msgstr "允许的最大地址对数" msgid "Maximum number of host routes per subnet" msgstr "每个子网的最大主机路由数" msgid "Maximum number of routes per router" msgstr "每个路由器的最大路由数目" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "元数据代理 UNIX 域套接字方式,允许使用以下 4 个值:“deduce”:根据 " "metadata_proxy_user/group 值进行推论的推论方式;“user”:将元数据代理套接字方" "式设置为 0o644,以在 metadata_proxy_user 为代理有效用户或者 root 用户时使" "用;“group”:将元数据代理套接字方式设置为 0o664,以在 metadata_proxy_group 为" "代理有效组或者 root 用户时使用;“all”:将元数据代理套接字方式设置为 0o666,以" "在其他情况下使用。" msgid "Metering driver" msgstr "测量驱动程序" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MinRtrAdvInterval 设置" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "请通过监视 ovsdb 以获取接口更改来最大程度地减少轮询。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "映射中缺少键:“%s”" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN 的多点广播组。如果配置了此项,那么系统允许将所有广播流量发送至此多点广" "播组。如果保留为未配置,那么系统将禁用多点广播 VXLAN 方式。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "VXLAN 接口的多点广播组。必须使用 CIDR 注释指定一定范围的组地址。指定范围允许" "不同 VNI 使用不同组地址,从而减少或消除发送至通道端点的伪造广播流量。为对每个" "可能的(24 位)VNI 保留唯一组,请使用 /8,例如,239.0.0.0/8。此设置在所有代理" "程序上必须相同。" #, python-format msgid "Multiple default providers for service %s" msgstr "对于服务 %s,存在多个缺省提供程序" #, python-format msgid "Multiple providers specified for service %s" msgstr "对于服务 %s,已指定多个提供程序" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "不允许在成批安全组规则创建中使用多个 tenant_id" msgid "Must also specify protocol if port range is given." msgstr "如果指定了端口范围,那么还必须指定协议。" msgid "Must specify one or more actions on flow addition or modification" msgstr "必须在添加或删除流时指定一个或多个操作" msgid "Name of Open vSwitch bridge to use" msgstr "要使用的已打开 vSwitch 网桥的名称" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 nova 区域的名称。如果 keystone 管理多个区域,那么这很有用。" msgid "Namespace of the router" msgstr "路由器名字空间" msgid "Native pagination depend on native sorting" msgstr "本机分页依赖于本机排序" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "需要从 %(project)s 合同分支应用迁移。这要求所有 Neutron 服务器实例关闭,然后" "继续升级。" msgid "Negative delta (downgrade) not supported" msgstr "不支持为负数的增量修订版(降级)" msgid "Negative relative revision (downgrade) not supported" msgstr "不支持为负数的相关修订版(降级)" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "网络 %s 中不包含任何 IPv4 子网" #, python-format msgid "Network %s is not a valid external network" msgstr "网络 %s 是无效外部网络" #, python-format msgid "Network %s is not an external network" msgstr "网络 %s 不是外部网络" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "从 IP 范围 %(parent_range)s(排除 IP 范围%(excluded_ranges)s)中找不到大小为 " "%(size)s 的网络。" #, python-format msgid "Network type value '%s' not supported" msgstr "网络类型值“%s”不受支持" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 插件需要网络类型值" msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" msgid "No default router:external network" msgstr "没有缺省路由器:外部网络" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "找不到对应 IPv%s 的缺省子网池" msgid "No default subnetpools defined" msgstr "未定义缺省子网池" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "没有更多 IP 地址可用于子网 %(subnet_id)s。" msgid "No offline migrations pending." msgstr "没有脱机迁移处于暂挂状态。" #, python-format msgid "No shared key in %s fields" msgstr "%s 字段中没有共享键" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不允许以“dvr”方式将路由器手动分配给代理程序。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "不允许以“dvr”方式从代理程序手动移除路由器。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "已调度的主管租户网络的 DHCP 代理数。如果此数目大于 1,那么调度程序会自动为所" "给定的租户网络分配多个 DHCP 代理,从而为 DHCP 服务提供高可用性。" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "关于配置元数据服务器套接字的储备请求数" msgid "Number of backlog requests to configure the socket with" msgstr "积压许多配置socket的请求" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "ipv4 PTR 区域中将被视为网络前缀的位数。它必须符合字节限制。最小值为 8。最大值" "为 24。因此,有效值包括:8、16 和 24。" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "ipv6 PTR 区域中将被视为网络前缀的位数。它必须符合字节限制。最小值为 4。最大值" "为 124。因此,有效值包括:4,8,12,16,...,124。" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的浮动 IP 数。负值表示无限。" msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的网络数。负值表示无限。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的端口数。负值表示无限。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "每个租户允许的路由器数。负值表示无限。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "前后两次将事件发送至 nova 的间隔秒数(如果有事件要发送)。" msgid "Number of seconds to keep retrying to listen" msgstr "若干秒保持重试监听" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的安全组数。负值表示无限。" msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "每个租户允许的安全性规则数。负值表示无限。" msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "元数据服务器的单独工作程序进程数(缺省设置为 CPU 数目的一半)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "每个租户允许的子网数。负值表示无限。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "同步过程中要使用的线程数。不应超过在服务器上配置的连接池大小。" msgid "OK" msgstr "确定" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "要使用的 OVS 数据路径。“system”是缺省值,对应内核数据路径,要启用用户空间数据" "路径,请将此值设置为“netdev”。" msgid "OVS vhost-user socket directory." msgstr "OVS vhost-user 套接字目录。" msgid "Only admin can view or configure quota" msgstr "只有管理员才能查看或配置配额" msgid "Only admin is authorized to access quotas for another tenant" msgstr "只有管理员才有权访问另一租户的配额" msgid "Only admins can manipulate policies on objects they do not own" msgstr "只有管理员才能处理针对并非他们所有的对象的策略" msgid "Only allowed to update rules for one security profile at a time" msgstr "一次仅允许为一个安全概要文件更新规则" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "只能提供 remote_ip_prefix 或 remote_group_id。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "操作在设备 %(dev_name)s 上不受支持" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "要作为租户网络分配的 network_type 的有序列表。缺省值“local”对单框测试很有用," "但不会在主机之间提供连接。" msgid "Override the default dnsmasq settings with this file." msgstr "使用此文件覆盖缺省 dnsmasq 设置。" msgid "Owner type of the device: network/compute" msgstr "设备的所有者类型如下:网络/计算" msgid "POST requests are not supported on this resource." msgstr "POST 请求在此资源上不受支持。" #, python-format msgid "Package %s not installed" msgstr "未安装软件包 %s" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "解析 bridge_mappings 失败:%s。" msgid "Password for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接密码" msgid "Path to PID file for this process" msgstr "此进程的 PID 文件的路径" msgid "Path to the router directory" msgstr "直连路由器的路径" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "集成网桥中的同级补丁端口(对于隧道网桥)。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "隧道网桥中的同级补丁端口(对于集成网桥)。" msgid "Phase upgrade options do not accept revision specification" msgstr "阶段升级选项不接受修订规范" msgid "Ping timeout" msgstr "Ping 超时" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "端口 %(id)s 没有固定 ip %(address)s" #, python-format msgid "Port %s does not exist" msgstr "端口 %s 不存在" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "" "端口 %s 具有多个固定 IPv4 地址。当分配浮动 IP 时,必须提供特定 IPv4 地址" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "前缀授权只能用于 IPv6 子网。" msgid "Private key of client certificate." msgstr "客户机证书的专用密钥。" #, python-format msgid "Probe %s deleted" msgstr "已删除探测器 %s" #, python-format msgid "Probe created : %s " msgstr "已创建探测器:%s " msgid "Process is already started" msgstr "进程已经启动" msgid "Process is not running." msgstr "进程未运行" msgid "Protocol to access nova metadata, http or https" msgstr "用于访问 nova 元数据的协议(HTTP 或 HTTPS)" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "供应商名称 %(name)s 被限制为不超过 %(len)s 个字符" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "找不到标识为 %(id)s 的 %(object_type)s 类型的 RBAC 策略" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "无法移除对象 %(object_id)s 的 RBAC 策略,因为其他对象依赖于它。\n" "详细信息:%(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "当启动定期任务调度程序以减少拥堵时要随机延迟的秒数范围.(通过设置为 0 来禁用)" msgid "Ranges must be in the same IP version" msgstr "范围必须为同一 IP 版本" msgid "Ranges must be netaddr.IPRange" msgstr "范围必须为 netaddr.IPRange" msgid "Ranges must not overlap" msgstr "范围不能重叠" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "" "建议不要使用对发行版敏感的分支标签 (%s)。请切换到 expand@ 和 contract@ 标签。" msgid "Remote metadata server experienced an internal server error." msgstr "远程元数据服务器遇到内部服务器错误。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "存储库未包含用于合同分支和扩展分支的 HEAD 文件。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "表示其负载要由代理报告的资源类型。这可以是“网络”、“子网”或“端口”。如果已指定" "(缺省值为“网络”),那么服务器将根据代理报告状态抽取特定负载(作为其代理配置" "对象的一部分发送),这是在每个 report_interval 要消耗的资源数。" "dhcp_load_type 可与 network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler 一起使用。当 network_scheduler_driver " "为 WeightScheduler 时,dhcp_load_type 可配置为表示针对要均衡的资源的选择。示" "例:dhcp_load_type=networks" msgid "Request Failed: internal server error while processing your request." msgstr "请求失败:在处理请求时,发生内部服务器错误。" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "在启动时重置流表。将此项设置为 True 将导致短暂的通信中断。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "找不到资源 %(resource)s %(resource_id)s。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "找不到类型为 %(resource_type)s 的资源 %(resource_id)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "对于服务类型“%(service_type)s”,资源“%(resource_id)s”已经与提供程" "序“%(provider)s”关联" msgid "Resource body required" msgstr "需要资源主体" msgid "Resource not found." msgstr "找不到资源。" msgid "Resources required" msgstr "需要资源" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "root helper 应用程序。使用“sudo neutron-rootwrap /etc/neutron/rootwrap." "conf”以使用真实根过滤工具。更改为“sudo”将跳过过滤并且仅直接运行该命令。" msgid "Root permissions are required to drop privileges." msgstr "删除特权需要 root 用户许可权。" #, python-format msgid "Router already has a port on subnet %s" msgstr "路由器已在子网 %s 上具有端口" msgid "Router port must have at least one fixed IP" msgstr "路由器端口必须具有至少一个固定 IP" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "正在对 %(project)s 运行 %(cmd)s (%(desc)s)..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "正在对 %(project)s 运行 %(cmd)s..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "节点向服务器报告状态的间隔秒数;应该小于 agent_down_time,最好小于 " "agent_down_time 或是它的一半。" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "认为代理已关闭的秒数;应该至少为 report_interval 的两倍,以确保代理已正常关" "闭。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "安全组 %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "安全组规则 %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "安全组 %(id)s 不存在" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "安全组规则 %(id)s 不存在" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "安全组规则已存在,规则标识为 %(rule_id)s。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "ethertype “%(ethertype)s” 的安全组规则不受支持。允许的值为 %(values)s。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "安全组规则协议 %(protocol)s 不受支持。只有协议值 %(values)s 和整数表示 [0 到 " "255] 受支持。" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "当端口数据(固定 IP/floatingip)更改时,将通知发送至 nova,以便 nova 可更新其" "高速缓存。" msgid "Send notification to nova when port status changes" msgstr "当端口状态更改时,将通知发送至 nova" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "对于以下服务类型,找不到服务提供程序“%(provider)s”:%(service_type)s" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "用来处理 DHCPv6 前缀授权的服务。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "服务类型 %(service_type)s 没有缺省服务提供程序" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "在代理接收到 SIGTERM 之后,为新的 RPC 调用设置新超时(以秒计)。如果值设置为 " "0,那么 RPC 超时将不更改" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "在承载 GRE/VXLAN 隧道的出局 IP 包上设置或取消设置不分段 (DF) 位。" msgid "Shared address scope can't be unshared" msgstr "无法取消共享已共享的地址范围" msgid "String prefix used to match IPset names." msgstr "用来匹配 IPset 名称的字符串前缀。" #, python-format msgid "Sub-project %s not installed." msgstr "未安装子项目 %s。" msgid "Subnet for router interface must have a gateway IP" msgstr "路由器接口的子网必须具有网关 IP" msgid "Subnet pool has existing allocations" msgstr "子网池具有现有分配" msgid "Subnet used for the l3 HA admin network." msgstr "用于 l3 HA 管理网络的子网。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "系统范围标记,用于确定租户可创建的路由器类型。仅管理员可以覆盖。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "TCP 端口已由 Neutron 元数据名称空间代理使用。" msgid "TCP Port used by Nova metadata server." msgstr "Nova 元数据服务器使用的 TCP 端口。" msgid "TTL for vxlan interface protocol packets." msgstr "用于 vxlan 接口协议包的 TTL。" #, python-format msgid "Tag %(tag)s could not be found." msgstr "找不到标记 %(tag)s。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "不允许租户 %(tenant_id)s 在此网络上创建 %(resource)s" msgid "Tenant id for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接租户标识" msgid "Tenant name for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接租户名" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 服务器可帮助在隔离网络上提供元数据支持。将此值设置为 True 将导致 DHCP 服" "务器将特定主机路由追加至 DHCP 请求。仅当子网未包含任何路由器端口时,才会激活" "元数据服务。访客实例必须配置为通过 DHCP 请求主机路由(选项 121)。如果 " "force_metadata 设置为 True,那么此选项没有任何效果。" msgid "The UDP port to use for VXLAN tunnels." msgstr "UDP端口用于VXLAN隧道" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "未能满足地址分配请求,原因:%(reason)s" msgid "The advertisement interval in seconds" msgstr "通告间隔(秒)" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "基本 MAC 地址 Neutron 将用于 VIF。前 3 个八位元将保持不变。如果第 4 个八位元" "并非 00,那么也将使用该八位元。将随机生成其他八位元。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "由 Neutron 用于唯一 DVR 实例的基本 MAC 地址。前三个八位元将保持不变。如果第四" "个八位元不为 00,那么也将使用该八位元。将随机生成其他八位元。“dvr_base_mac”必" "须不同于“base_mac”,以避免将它们与为租户端口分配的 MAC 混合使用。以下是一个具" "有 4 个八位元的示例:dvr_base_mac = fa:16:3f:4f:00:00。缺省值为 3 个八位元" msgid "The core plugin Neutron will use" msgstr "Neutron 将使用的核心插件" msgid "The driver used to manage the DHCP server." msgstr "用于管理 DHCP 服务器的驱动程序。" msgid "The driver used to manage the virtual interface." msgstr "用于管理虚拟接口的驱动程序。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "创建 PTR 区域时要使用的电子邮件地址。如果未指定,那么电子邮件地址将为 " "admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "在单个响应中返回的最大项数,值为“无限”或负整数表示无限制" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "为 HA 路由器创建 HA 网络时要使用的网络类型。缺省情况下,或者如果网络类型为" "空,那么将使用第一个“tenant_network_types”。这在 VRRP 流量应使用特定网络(该" "网络不是缺省网络)时很有帮助。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "在轮询本地设备更改之间,代理将等待的秒数。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "在与 ovsdb 监视器失去通信联系之后重新衍生该监视器之前要等待的秒数。" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_keys 的数字与 sort_dirs 的数字必须相同" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 扩展的路径。请注意,它可能是路径的冒号分隔列表。例如:" "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts。neutron." "extensions 的 __path__ 将追加至此项之后,所以,如果扩展位于该处,那么不需要在" "此处指定它们。" msgid "The physical network name with which the HA network can be created." msgstr "可以用来创建 HA 网络的物理网络名称。" #, python-format msgid "The port '%s' was deleted" msgstr "已删除端口“%s”" msgid "The port to bind to" msgstr "端口要绑定至" #, python-format msgid "The requested content type %s is invalid." msgstr "请求的内容类型%s非法。" msgid "The resource could not be found." msgstr "找不到该资源。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "路由器 %(router_id)s 已由 L3 代理 %(agent_id)s 主管。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "服务器已出错或无法执行所请求操作。" msgid "The service plugins Neutron will use" msgstr "Neutron 将使用的服务插件" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "未能满足子网请求,原因:%(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "要对其执行命令的子项目。可以是“%s”的其中一项。" msgid "The type of authentication to use" msgstr "要使用的认证的类型" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "根据此策略,有一些路由器附加至此网络以用于访问。" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "True 表示删除所有 OpenvSwitch 网桥上的所有端口。False 表示删除集成和外部网络" "网桥上由 Neutron 创建的端口。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 插件需要隧道 IP 值" msgid "Tunnel bridge to use." msgstr "要使用的隧道网桥。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "要使用的 nova 端点的类型。系统将在 keystone 目录中查找此端点,值应该为 " "public、internal 或 admin 的其中之一。" msgid "URL for connecting to designate" msgstr "要指定的连接 URL" msgid "URL to database" msgstr "指向数据库的 URL" #, python-format msgid "Unable to access %s" msgstr "无法访问 %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "无法计算 %(address_type)s 地址,原因:%(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "无法转换 %s 中的值" msgid "Unable to create the SNAT Interface Port" msgstr "无法创建 SNAT 接口端口" #, python-format msgid "Unable to determine mac address for %s" msgstr "无法为 %s 确定网卡地址" #, python-format msgid "Unable to find '%s' in request body" msgstr "在请求主体中找不到“%s”" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "在子网 %(subnet_id)s 上找不到 IP 地址 %(ip_address)s" #, python-format msgid "Unable to find resource name in %s" msgstr "在%s中找不到源的名称" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "无法从:%s中匹配目标域. 匹配应该使用以下形式%%(域名)s" msgid "Unable to provide external connectivity" msgstr "无法提供外部连接" msgid "Unable to provide tenant private network" msgstr "无法提供租户专用网络" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "无法验证该匹配%(match)s为父资源:未找到%(res)s" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "脚本 %(script_name)s 的意外标签:%(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "净化器分支点的数目异常:%(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "意外响应代码:%s" #, python-format msgid "Unexpected response: %s" msgstr "意外响应:%s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "单元名称“%(unit)s”无效。" #, python-format msgid "Unknown address type %(address_type)s" msgstr "未知地址类型 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "属性“%s”未知。" #, python-format msgid "Unknown chain: %r" msgstr "链未知:%r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "未知网络类型 %(network_type)s。" msgid "Unmapped error" msgstr "已取消映射错误" msgid "Unrecognized action" msgstr "无法识别动作" msgid "Unrecognized field" msgstr "无法识别字段" msgid "Unsupported Content-Type" msgstr "Content-Type 不受支持" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "网络类型 %(net_type)s 不受支持。" msgid "Unsupported request type" msgstr "未支持请求类型" msgid "Updating default security group not allowed." msgstr "正在更新的默认安全组内容不合法" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "请使用 ML2 l2population 机制驱动程序以了解远程 MAC 和 IP 并提高隧道可伸缩性。" msgid "Use broadcast in DHCP replies." msgstr "在 DHCP 应答中使用广播。" msgid "Use either --delta or relative revision, not both" msgstr "请使用 --delta 或者相关修订版,但是不能同时指定这两者" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "使用 ipset 以加速基于安全组的 iptable。启用 ipset 支持要求该 ipset 安装在 L2 " "代理程序节点上。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "列示系统上的名称空间时使用 root helper。根据安全配置,这可能不是必需的。如果 " "root helper 不是必需的,请将其设置为 False 以改进性能。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "使用 veth 而不是接线端口以使集成网桥与物理网络互连。设置为 True 时支持不具备 " "Open vSwitch 接线端口支持的内核。" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "在元数据代理的初始化之后,运行该代理的用户(uid 或名称),(如果此用户为空," "那么这是代理有效用户)。" msgid "User (uid or name) running this process after its initialization" msgstr "在此进程的初始化之后,运行此进程的用户(uid 或名称)" msgid "Username for connecting to designate in admin context" msgstr "管理员上下文中要指定的连接用户名" msgid "VRRP authentication password" msgstr "VRRP认证密码" msgid "VRRP authentication type" msgstr "VRRP认证类型" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "主机内核节拍率的值 (hz),用于计算带有 QoS 的端口的带宽限制规则中的最小脉冲" "值。请参阅内核配置文件以获取 HZ 值,并参阅 tc-tbf 手册以获取更多信息。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "延迟值 (ms),用于计算带有 QoS 的端口的队列的大小。请参阅 tc-tbf 手册以了解更" "多信息。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "代理元数据请求时,Neutron 会使用共享密钥签署 Instance-ID 头以避免电子诈骗。可" "选择任何字符串作为密钥,但此处的密钥必须与 Nova Metadata Server 使用的配置中" "的密钥相匹配。注意:Nova 使用同一配置密钥,但在 [neutron] 节中。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "用于存储 Neutron 状态文件的位置。此目录对于代理必须为可写。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "对于 IPv6,用于外部网关的网络不需要具备相关联的子网,因为可以使用自动指定的链" "路本地地址 (LLA)。但是,需要 IPv6 网关地址用作缺省路由的下一个路由器。如果此" "处未配置 IPv6 网关地址,那么将配置 Neutron 路由器,以从上游的路由器中获取路由" "器广告(RA)中的缺省路由;在这种情况下,还必须配置上游路由器以发送这些 RA。配" "置了 ipv6_gateway 时,ipv6_gateway 应为上游路由器上的接口的 LLA。如果需要下一" "个使用全局唯一地址 (GUA) 的路由器,那么它需要通过分配给该网络的子网来完成,而" "不是通过此参数来完成。" msgid "You must implement __call__" msgstr "必须实现 __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "必须为网桥提供配置文件 - --config-file 或env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "必须提供修订或相对变化量" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "缺少 CIDR 时必须指定子网池" msgid "add_ha_port cannot be called inside of a transaction." msgstr "不能在事务内部调用 add_ha_port。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "仅允许将 allocation_pools 用于特定子网请求。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools 不在子网内" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools 使用错误的 IP 版本" msgid "already a synthetic attribute" msgstr "已是综合属性" msgid "binding:profile value too large" msgstr "binding:profile 值太大" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "无法执行 %(event)s,因为 %(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "不得同时指定 cidr 和 prefixlen" msgid "dns_domain cannot be specified without a dns_name" msgstr "不能在没有 dns_name 的情况下指定 dns_domain" msgid "dns_name cannot be specified without a dns_domain" msgstr "不能在没有 dns_domain 的情况下指定 dns_name" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "在没有 port_id 的情况下,无法指定 fixed_ip_address" #, python-format msgid "has device owner %s" msgstr "具有设备所有者 %s" msgid "in use" msgstr "正在使用" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "对设备 %(dev_name)s 执行 IP 命令失败:%(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "IP 链接功能 %(capability)s 不受支持" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "ip 链路命令未支持: %(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "在缺少 cidr 和 subnetpool_id 的情况下,必须指定 ip_version" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "ip_version 为 4 时,ipv6_address_mode 无效" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "ip_version 为 4 时,ipv6_ra_mode 无效" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "设置为“%(ra_mode)s”的 ipv6_ra_mode(在 ipv6_address_mode 设置" "为“%(addr_mode)s”的情况下)无效。如果设置了这两个属性,那么它们必须为同一个值" msgid "mac address update" msgstr "MAC 地址更新" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必须提供正好 2 个自变量:cidr 和 MAC" msgid "network_type required" msgstr "需要 network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "不支持的网络类型值 '%s'" msgid "new subnet" msgstr "新子网" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "平面供应商网络的物理网络 '%s'为未知状态" msgid "physical_network required for flat provider network" msgstr "平面供应商网络需要的物理网络" #, python-format msgid "provider:physical_network specified for %s network" msgstr "提供程序:已为%s 网络指定 physical_network" msgid "respawn_interval must be >= 0 if provided." msgstr "respawn_interval 必须不小于 0(如果已提供此项)。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id 超出范围,从(%(min)s 到 %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id 需要 VLAN 提供程序网络的 physical_network" msgid "shared attribute switching to synthetic" msgstr "共享属性正切换为综合属性" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "当子网池 %(subnetpool_id)s 与共享地址范围 %(address_scope_id)s 相关联时,将无" "法更新该子网池" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "不能同时指定 subnetpool_id 和 use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "下一中继段未与路由器连接" msgid "the nexthop is used by router" msgstr "路由器已使用下一中继段" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9750416 neutron-16.0.0.0b2.dev214/neutron/locale/zh_TW/0000755000175000017500000000000000000000000021127 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3190448 neutron-16.0.0.0b2.dev214/neutron/locale/zh_TW/LC_MESSAGES/0000755000175000017500000000000000000000000022714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/locale/zh_TW/LC_MESSAGES/neutron.po0000644000175000017500000025165700000000000024766 0ustar00coreycorey00000000000000# Translations template for neutron. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the neutron project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: neutron VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-12-20 15:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:56+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "" "\n" "Command: %(cmd)s\n" "Exit code: %(code)s\n" "Stdin: %(stdin)s\n" "Stdout: %(stdout)s\n" "Stderr: %(stderr)s" msgstr "" "\n" "指令:%(cmd)s\n" "結束碼:%(code)s\n" "標準輸入:%(stdin)s\n" "標準輸出:%(stdout)s\n" "標準錯誤:%(stderr)s" #, python-format msgid "" "%(branch)s HEAD file does not match migration timeline head, expected: " "%(head)s" msgstr "%(branch)s HEAD 檔與移轉時間表表頭不符,預期為:%(head)s" #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s' " "and '%(desc)s'" msgstr "" "對於 sort_dir 來說,%(invalid_dirs)s 是無效值,有效值為 '%(asc)s' 及 " "'%(desc)s'" #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "%(tunnel)s 提供者網路已禁止 %(key)s" #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "%(name)s '%(addr)s' 與 ip_version '%(ip_version)s' 不符" #, python-format msgid "%s cannot be called while in offline mode" msgstr "當 %s 處於離線模式時,無法對其進行呼叫" #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "對於 sort_key 來說,%s 是無效的屬性" #, python-format msgid "%s must implement get_port_from_device or get_ports_from_devices." msgstr "%s 必須實作 get_port_from_device 或 get_ports_from_devices。" #, python-format msgid "%s prohibited for VLAN provider network" msgstr "VLAN 提供者網路已禁止 %s" #, python-format msgid "%s prohibited for flat provider network" msgstr "平面提供者網路已禁止 %s" #, python-format msgid "%s prohibited for local provider network" msgstr "本端提供者網路已禁止 %s" #, python-format msgid "'%s' is not a valid RBAC object type" msgstr "'%s' 不是有效的 RBAC 物件類型" #, python-format msgid "'%s' is not supported for filtering" msgstr "'%s' 不支援過濾" #, python-format msgid "'module' object has no attribute '%s'" msgstr "'module' 物件不含屬性 '%s'" msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' 小於 'port_min'" msgid "0 is not allowed as CIDR prefix length" msgstr "不接受 0 作為 CIDR 字首長度" msgid "A cidr must be specified in the absence of a subnet pool" msgstr "如果未指定子網路儲存區,則必須指定 cidr" msgid "" "A decimal value as Vendor's Registered Private Enterprise Number as required " "by RFC3315 DUID-EN." msgstr "十進位值,依 RFC3315 DUID-EN 的需要,作為供應商的已登錄專用企業號碼。" #, python-format msgid "A default external network already exists: %(net_id)s." msgstr "預設外部網路已經存在:%(net_id)s。" msgid "" "A default subnetpool for this IP family has already been set. Only one " "default may exist per IP family" msgstr "已經設定了此 IP 系列的預設子網路儲存區。每個 IP 系列只能存在一個預設值" msgid "A metering driver must be specified" msgstr "必須指定計量驅動程式" msgid "Access to this resource was denied." msgstr "拒絕存取此資源。" msgid "Action to be executed when a child process dies" msgstr "子程序當掉時要執行的動作" msgid "" "Add comments to iptables rules. Set to false to disallow the addition of " "comments to generated iptables rules that describe each rule's purpose. " "System must support the iptables comments module for addition of comments." msgstr "" "將註解新增至 iptables 規則。設為 false 可禁止向所產生用來說明每一個規則之目的" "的 iptables 規則新增註解。系統必須支援 iptables 註解模組才能新增註解。" msgid "Address not present on interface" msgstr "位址未呈現在介面上" msgid "Adds test attributes to core resources." msgstr "將測試屬性新增至核心資源。" #, python-format msgid "Agent %(id)s is not a L3 Agent or has been disabled" msgstr "代理程式 %(id)s 不是 L3 代理程式或者已停用" msgid "" "Agent starts with admin_state_up=False when enable_new_agents=False. In the " "case, user's resources will not be scheduled automatically to the agent " "until admin changes admin_state_up to True." msgstr "" "當 enable_new_agents=False 時,代理程式從 admin_state_up=False 開始。在這種情" "況下,不會將使用者的資源自動排程到代理程式,直到管理者將 admin_state_up 變更" "為 True 為止。" #, python-format msgid "Agent updated: %(payload)s" msgstr "已更新代理程式:%(payload)s" msgid "Allow auto scheduling networks to DHCP agent." msgstr "容許自動將網路排程到 DHCP 代理程式。" msgid "Allow auto scheduling of routers to L3 agent." msgstr "容許自動將路由器排定到 L3 代理程式。" msgid "" "Allow overlapping IP support in Neutron. Attention: the following parameter " "MUST be set to False if Neutron is being used in conjunction with Nova " "security groups." msgstr "" "容許 Neutron 中的重疊 IP 支援。注意:如果將 Neutron 與 Nova 安全性群組一起使" "用,則必須將下列參數設為 False。" msgid "Allow running metadata proxy." msgstr "容許執行 meta 資料 Proxy。" msgid "Allow sending resource operation notification to DHCP agent" msgstr "容許將資源作業通知傳送給 DHCP 代理程式" msgid "Allow the creation of PTR records" msgstr "容許建立 PTR 記錄" msgid "Allow the usage of the bulk API" msgstr "容許使用主體 API" msgid "Allow to perform insecure SSL (https) requests to nova metadata" msgstr "容許對 Nova meta 資料執行不安全的 SSL (HTTPS) 要求" msgid "" "Allows for serving metadata requests coming from a dedicated metadata access " "network whose CIDR is 169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send metadata:1 request. In " "this case DHCP Option 121 will not be injected in VMs, as they will be able " "to reach 169.254.169.254 through a router. This option requires " "enable_isolated_metadata = True." msgstr "" "容許負責處理來自專用 meta 資料存取網路的 meta 資料要求,該網路的 CIDR 是 " "169.254.169.254/16(或更大字首)並且已連接至 Neutron 路由器(VM 從此 Neutron " "路由器傳送 metadata:1 要求)。在這種情況下,DHCP 選項 121 將不注入 VM,因為它" "們能夠透過路由器呼叫到 169.254.169.254。這個選項需要 " "enable_isolated_metadata = True。" msgid "An RBAC policy already exists with those values." msgstr "包含那些值的 RBAC 原則已經存在。" msgid "An identifier must be specified when updating a subnet" msgstr "更新子網路時,必須提供 ID" msgid "" "An ordered list of extension driver entrypoints to be loaded from the " "neutron.ml2.extension_drivers namespace. For example: extension_drivers = " "port_security,qos" msgstr "" "要從 neutron.ml2.extension_drivers 名稱空間載入之延伸驅動程式進入點的有序清" "單。例如:extension_drivers = port_security,qos" msgid "" "An ordered list of networking mechanism driver entrypoints to be loaded from " "the neutron.ml2.mechanism_drivers namespace." msgstr "" "要從 neutron.ml2.mechanism_drivers 名稱空間載入的網路機制驅動程式進入點有序清" "單。" msgid "An unknown error has occurred. Please try your request again." msgstr "發生不明錯誤。請重試要求。" msgid "Async process didn't respawn" msgstr "同步程序未再次大量產生" msgid "Authorization URL for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的授權 URL" msgid "Automatically remove networks from offline DHCP agents." msgstr "從離線 DHCP 代理程式自動移除網路。" msgid "" "Automatically reschedule routers from offline L3 agents to online L3 agents." msgstr "自動將路由器從離線 L3 代理程式重新排定至線上 L3代理程式。" msgid "Availability zone of this node" msgstr "此節點的可用性區域。" msgid "Available commands" msgstr "可用的指令" #, python-format msgid "Base MAC: %s" msgstr "基本 MAC:%s" msgid "" "Base log dir for dnsmasq logging. The log contains DHCP and DNS log " "information and is useful for debugging issues with either DHCP or DNS. If " "this section is null, disable dnsmasq log." msgstr "" "用於 dnsmasq 記載的基本日誌目錄。日誌包含 DHCP 及 DNS 日誌資訊,並且在對 " "DHCP 或 DNS 方面的問題進行除錯時很有用。如果此區段是空值,則將停用 dnsmasq 日" "誌。" msgid "Body contains invalid data" msgstr "內文包含無效資料" msgid "Bulk operation not supported" msgstr "不支援主體作業" msgid "CIDR to monitor" msgstr "要監視的 CIDR" #, python-format msgid "Callback for %(resource_type)s not found" msgstr "找不到 %(resource_type)s 的回呼" #, python-format msgid "Callback for %(resource_type)s returned wrong resource type" msgstr "%(resource_type)s 的回呼傳回了錯誤的資源類型" #, python-format msgid "Cannot add floating IP to port %s that has no fixed IPv4 addresses" msgstr "無法將浮動 IP 新增至沒有固定 IPv4 位址的埠 %s" #, python-format msgid "Cannot add multiple callbacks for %(resource_type)s" msgstr "無法新增 %(resource_type)s 的多重回呼" #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "無法從 IPv%(pool_ver)s 子網路儲存區配置 IPv%(req_ver)s 子網路" msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "無法配置可用字首集中的所要求子網路" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "在設定了 ipv6 屬性的情況下,無法停用 enable_dhcp" #, python-format msgid "Cannot handle subnet of type %(subnet_type)s" msgstr "無法處理類型為 %(subnet_type)s 的子網路" msgid "Cannot have multiple IPv4 subnets on router port" msgstr "路由器埠上不能具有多個 IPv4 子網路" #, python-format msgid "" "Cannot have multiple router ports with the same network id if both contain " "IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network id %(nid)s" msgstr "" "不能具有多個包含相同網路 ID 的路由器埠(如果兩者都包含 IPv6 子網路)。現有埠 " "%(p)s 具有 IPv6 子網路和網路 ID %(nid)s" #, python-format msgid "" "Cannot host distributed router %(router_id)s on legacy L3 agent %(agent_id)s." msgstr "無法在舊式 L3 代理程式 %(agent_id)s 上管理分散式路由器 %(router_id)s。" msgid "Cannot specify both subnet-id and port-id" msgstr "無法同時指定 subnet-id 及 port-id" msgid "Cannot understand JSON" msgstr "無法理解 JSON" #, python-format msgid "Cannot update read-only attribute %s" msgstr "無法更新唯讀屬性 %s" msgid "Certificate Authority public key (CA cert) file for ssl" msgstr "用於 SSL 的「憑證管理中心」公開金鑰(CA 憑證)檔案" msgid "Check ebtables installation" msgstr "檢查 ebtables 安裝" msgid "Check for ARP header match support" msgstr "檢查 ARP 標頭符合支援" msgid "Check for ARP responder support" msgstr "檢查 ARP 回應者支援" msgid "Check for ICMPv6 header match support" msgstr "檢查 ICMPv6 標頭符合支援" msgid "Check for OVS Geneve support" msgstr "檢查 OVS Geneve 支援" msgid "Check for OVS vxlan support" msgstr "檢查 OVS vxlan 支援" msgid "Check for VF management support" msgstr "檢查 VF 管理支援" msgid "Check for iproute2 vxlan support" msgstr "檢查 iproute2 vxlan 支援" msgid "Check for nova notification support" msgstr "檢查 Nova 通知支援" msgid "Check for patch port support" msgstr "檢查修補程式埠支援" msgid "Check ip6tables installation" msgstr "檢查 ip6tables 安裝" msgid "Check ipset installation" msgstr "檢查 ipset 安裝" msgid "Check keepalived IPv6 support" msgstr "檢查 keepalived IPv6 支援" msgid "Check minimal dibbler version" msgstr "檢查 dibbler 版本下限" msgid "Check minimal dnsmasq version" msgstr "檢查最低 dnsmasq 版本" msgid "Check netns permission settings" msgstr "檢查 netns 權限設定" msgid "Check ovs conntrack support" msgstr "檢查 ovs conntrack 支援" msgid "Check ovsdb native interface support" msgstr "檢查 OVSDB 原生介面支援" #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s of " "subnet %(sub_id)s" msgstr "" "子網路 %(subnet_id)s 的 CIDR %(subnet_cidr)s 與子網路 %(sub_id)s 的 CIDR " "%(cidr)s 重疊" msgid "Cleanup resources of a specific agent type only." msgstr "只清除特定代理程式類型的資源。" msgid "Client certificate for nova metadata api server." msgstr "Nova meta 資料 API 伺服器的用戶端憑證。" msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " "should not be used for virtual networking. vfs_to_exclude is a semicolon-" "separated list of virtual functions to exclude from network_device. The " "network_device in the mapping should appear in the physical_device_mappings " "list." msgstr "" "network_device 與不應用於虛擬網路之虛擬函數的代理程式節點專用清單的 " ": 值組對映清單(使用逗點區隔)。" "vfs_to_exclude 是要從 network_device 中排除之虛擬函數的分號區隔清單。對映中" "的 network_device 應該出現在 physical_device_mappings 清單中。" msgid "" "Comma-separated list of : tuples mapping " "physical network names to the agent's node-specific physical network device " "interfaces of SR-IOV physical function to be used for VLAN networks. All " "physical networks listed in network_vlan_ranges on the server should have " "mappings to appropriate interfaces on each agent." msgstr "" "實體網路名稱與要用於 VLAN 網路之 SR-IOV 實體功能的代理程式節點專用實體網路裝" "置介面的: 值組對映清單(使用逗點區隔)。列" "在伺服器上network_vlan_ranges 中的所有實體網路都應該具有與每個代理程式上適當" "介面的對映。" msgid "" "Comma-separated list of : tuples " "mapping physical network names to the agent's node-specific physical network " "interfaces to be used for flat and VLAN networks. All physical networks " "listed in network_vlan_ranges on the server should have mappings to " "appropriate interfaces on each agent." msgstr "" "實體網路名稱與要用於平面網路及 VLAN 網路之代理程式節點專用實體網路介面的" ": 值組對映清單(使用逗點區隔)。列在伺" "服器上network_vlan_ranges 中的所有實體網路都應該具有與每個代理程式上適當介面" "的對映。" msgid "" "Comma-separated list of : tuples enumerating ranges of GRE " "tunnel IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 GRE 通" "道 ID 範圍" msgid "" "Comma-separated list of : tuples enumerating ranges of " "Geneve VNI IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 " "Geneve VNI ID 範圍" msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" ": 值組的逗點區隔清單,用於列舉可用於承租人網路配置的 VXLAN " "VNI ID 範圍" msgid "" "Comma-separated list of the DNS servers which will be used as forwarders." msgstr "將用來作為轉遞程式的 DNS 伺服器逗點區隔清單。" msgid "Command to execute" msgstr "要執行的指令" msgid "Config file for interface driver (You may also use l3_agent.ini)" msgstr "介面驅動程式的配置檔(您也可使用 l3_agent.ini)" #, python-format msgid "Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s" msgstr "CIDR %(cidr)s 的乙太網路類型 %(ethertype)s 值有衝突" msgid "" "Controls whether the neutron security group API is enabled in the server. It " "should be false when using no security groups or using the nova security " "group API." msgstr "" "控制是否在伺服器中啟用 Neutron 安全群組 API。當不使用安全群組時或者使用 Nova " "安全群組 API 時,它應該是 false。" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "嘗試 %(time)d 秒後仍無法連結至 %(host)s:%(port)s" msgid "Could not deserialize data" msgstr "無法解除序列化資料" msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" "DHCP 租賃期限(以秒為單位)。使用 -1 可告知 dnsmasq 使用無限的租賃時間。" msgid "" "DVR deployments for VXLAN/GRE/Geneve underlays require L2-pop to be enabled, " "in both the Agent and Server side." msgstr "" "VXLAN/GRE/Geneve 基礎的 DVR 部署需要同時在代理程式端及伺服器端啟用 L2-pop。" msgid "" "Database engine for which script will be generated when using offline " "migration." msgstr "使用離線移轉時,將對其產生 Script 的資料庫引擎。" msgid "Default external networks must be shared to everyone." msgstr "必須將預設外部網路與所有使用者共用。" msgid "" "Default network type for external networks when no provider attributes are " "specified. By default it is None, which means that if provider attributes " "are not specified while creating external networks then they will have the " "same type as tenant networks. Allowed values for external_network_type " "config option depend on the network type values configured in type_drivers " "config option." msgstr "" "在未指定提供者屬性時,外部網路的預設網路類型。依預設,它是「無」,這表示如果" "在建立外部網路時未指定提供者屬性,則它們將相同的類型作為承租人網路。" "external_network_type 配置選項所接受的值視 type_drivers 配置選項中配置的網路" "類型值而定。" msgid "" "Default number of RBAC entries allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的 RBAC 項目數目。負數值表示無限制。" msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的預設資源數目。負數值表示無限制。" msgid "Default security group" msgstr "預設安全群組" msgid "Default security group already exists." msgstr "預設安全群組已存在。" msgid "" "Default value of availability zone hints. The availability zone aware " "schedulers use this when the resources availability_zone_hints is empty. " "Multiple availability zones can be specified by a comma separated string. " "This value can be empty. In this case, even if availability_zone_hints for a " "resource is empty, availability zone is considered for high availability " "while scheduling the resource." msgstr "" "可用性區域提示的預設值。當資源 availability_zone_hints 為空時,可用性區域知道" "排程器使用此項。可以透過逗點區隔的字串來指定多個可用性區域。此值可以是空的。" "在這種情況下,即使資源 availability_zone_hints 為空,也會在排程資源時將可用性" "區域視為高可用性。" msgid "" "Define the default value of enable_snat if not provided in " "external_gateway_info." msgstr "" "定義 enable_snat 的預設值(如果未在external_gateway_info 中提供的話)。" msgid "" "Defines providers for advanced services using the format: :" ":[:default]" msgstr "" "使用下列格式,給進階服務定義提供者:::[:default]" msgid "Delete the namespace by removing all devices." msgstr "透過移除所有裝置來刪除名稱空間。" #, python-format msgid "Deleting port %s" msgstr "正在刪除埠 %s" #, python-format msgid "Deployment error: %(reason)s." msgstr "部署錯誤:%(reason)s。" msgid "Destroy IPsets even if there is an iptables reference." msgstr "即使有 iptables 參照,也毀損 IPset。" msgid "Destroy all IPsets." msgstr "毀損所有 IPset。" #, python-format msgid "Device %(dev_name)s in mapping: %(mapping)s not unique" msgstr "對映 %(mapping)s 中的裝置 %(dev_name)s 不是唯一的" msgid "Device not found" msgstr "找不到裝置" msgid "Domain to use for building the hostnames" msgstr "用於建置主機名稱的網域" msgid "Downgrade no longer supported" msgstr "不再支援降級" #, python-format msgid "Driver %s is not unique across providers" msgstr "驅動程式 %s 在提供者之間不是唯一的" msgid "Driver for external DNS integration." msgstr "用於外部 DNS 整合的驅動程式。" msgid "Driver for security groups firewall in the L2 agent" msgstr "L2 代理程式中安全群組防火牆的驅動程式" msgid "Driver to use for scheduling network to DHCP agent" msgstr "用於將網路排程到 DHCP 代理程式的驅動程式" msgid "Driver to use for scheduling router to a default L3 agent" msgstr "用於將路由器排程到預設 L3 代理程式的驅動程式" msgid "" "Driver used for ipv6 prefix delegation. This needs to be an entry point " "defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for " "entry points included with the neutron source." msgstr "" "用於 IPv6 字首委派的驅動程式。這需要是 neutron.agent.linux.pd_drivers 中定義" "的一個進入點。請參閱 setup.cfg,以取得Neutron 來源隨附的進入點。" #, python-format msgid "" "Duplicate L3HARouterAgentPortBinding is created for router(s) %(router)s. " "Database cannot be upgraded. Please, remove all duplicates before upgrading " "the database." msgstr "" "為路由器 %(router)s 建立了重複的 L3HARouterAgentPortBinding。無法升級資料庫。" "請先移除所有重複項目,然後再升級資料庫。" msgid "Duplicate Security Group Rule in POST." msgstr "POST 中的安全群組規則重複。" msgid "Duplicate address detected" msgstr "偵測到重複位址" #, python-format msgid "ERROR: %s" msgstr "錯誤:%s" msgid "" "ERROR: Unable to find configuration file via the default search paths (~/." "neutron/, ~/, /etc/neutron/, /etc/) and the '--config-file' option!" msgstr "" "錯誤:無法透過預設搜尋路徑(~/.neutron/、~/、/etc/neutron/及 /etc/)與 '--" "config-file' 選項來找到配置檔!" msgid "" "Either one of parameter network_id or router_id must be passed to _get_ports " "method." msgstr "必須將 network_id 或 router_id 中的一個參數傳遞至_get_ports 方法。" msgid "Either subnet_id or port_id must be specified" msgstr "必須指定 subnet_id 或 port_id" msgid "Enable HA mode for virtual routers." msgstr "啟用虛擬路由器的 HA 模式。" msgid "Enable SSL on the API server" msgstr "在 API 伺服器上啟用 SSL" msgid "" "Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 " "plugin using linuxbridge mechanism driver" msgstr "" "在代理程式上啟用 VXLAN。代理程式是由 ML2 外掛程式(使用 LinuxBridge 機制驅動" "程式)管理時,可以啟用 VXLAN" msgid "" "Enable services on an agent with admin_state_up False. If this option is " "False, when admin_state_up of an agent is turned False, services on it will " "be disabled. Agents with admin_state_up False are not selected for automatic " "scheduling regardless of this option. But manual scheduling to such agents " "is available if this option is True." msgstr "" "對 admin_state_up 為 False 的代理程式啟用服務。如果此選項為 False,則當代理程" "式的 admin_state_up 變為 False 時,將停用其上的服務。無論此選項為何,都不會選" "取 admin_state_up 為 False的代理程式以進行自動排程。但如果此選項為 True,則可" "以使用此類代理程式的手動排程。" msgid "" "Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to " "True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable " "environment. Users making subnet creation requests for IPv6 subnets without " "providing a CIDR or subnetpool ID will be given a CIDR via the Prefix " "Delegation mechanism. Note that enabling PD will override the behavior of " "the default IPv6 subnetpool." msgstr "" "針對自動子網路 CIDR 配置啟用 IPv6 字首委派。設為 True 可針對支援 PD 之環境中" "的子網路配置,啟用 IPv6 字首委派。將透過字首委派機制向針對 IPv6 子網路發出子" "網路建立要求但卻未提供 CIDR 或子網路儲存區 ID 的使用者,提供 CIDR。請注意,啟" "用 PD 將置換預設 IPv6 子網路儲存區的行為。" msgid "" "Enables the dnsmasq service to provide name resolution for instances via DNS " "resolvers on the host running the DHCP agent. Effectively removes the '--no-" "resolv' option from the dnsmasq process arguments. Adding custom DNS " "resolvers to the 'dnsmasq_dns_servers' option disables this feature." msgstr "" "容許 dnsmasq 服務透過執行 DHCP 代理程式的主機上的 DNS 解析器,為實例提供名稱" "解析。從 dnsmasq 程序引數中有效地移除 '--no-resolv' 選項。將自訂 DNS 解析器新" "增至 'dnsmasq_dns_servers' 選項會停用此功能。" #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "嘗試執行作業時發生錯誤 %(reason)s。" #, python-format msgid "Error parsing dns address %s" msgstr "剖析 DNS 位址 %s 時發生錯誤" #, python-format msgid "Error while reading %s" msgstr "讀取 %s 時發生錯誤" #, python-format msgid "" "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "等待位址離開暫訂狀態時,已超過 %s 秒限制。" msgid "Existing prefixes must be a subset of the new prefixes" msgstr "現有字首必須是新字首的子集" #, python-format msgid "" "Exit code: %(returncode)d; Stdin: %(stdin)s; Stdout: %(stdout)s; Stderr: " "%(stderr)s" msgstr "" "結束碼:%(returncode)d;標準輸入:%(stdin)s;標準輸出:%(stdout)s,標準錯誤:" "%(stderr)s" #, python-format msgid "Extension %(driver)s failed." msgstr "延伸 %(driver)s 失敗。" #, python-format msgid "" "Extension driver %(driver)s required for service plugin %(service_plugin)s " "not found." msgstr "找不到服務外掛程式 %(service_plugin)s 所需的延伸驅動程式 %(driver)s。" msgid "" "Extension to use alongside ml2 plugin's l2population mechanism driver. It " "enables the plugin to populate VXLAN forwarding table." msgstr "" "與 ML2 外掛程式的 l2population 機制驅動程式一起使用的延伸。它支援該外掛程式將" "資料移入 VXLAN 轉遞表格。" #, python-format msgid "Extension with alias %s does not exist" msgstr "別名為 %s 的延伸不存在" msgid "Extensions list to use" msgstr "要使用的延伸清單" #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "外部 IP %s 與閘道 IP 相同" #, python-format msgid "Failed rescheduling router %(router_id)s: no eligible l3 agent found." msgstr "無法重新排定路由器 %(router_id)s:找不到適用的 L3 代理程式。" #, python-format msgid "Failed scheduling router %(router_id)s to the L3 Agent %(agent_id)s." msgstr "無法將路由器 %(router_id)s 排程到 L3 代理程式 %(agent_id)s。" #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips included " "invalid subnet %(subnet_id)s" msgstr "" "無法在網路 %(network_id)s 上建立埠,因為 fixed_ips 包含無效的子網路 " "%(subnet_id)s" #, python-format msgid "Failed to locate source for %s." msgstr "找不到 %s 的來源。" msgid "Failed to remove supplemental groups" msgstr "無法移除增補群組" #, python-format msgid "Failed to set gid %s" msgstr "無法設定 GID %s" #, python-format msgid "Failed to set uid %s" msgstr "無法設定 UID %s" #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "無法將 %(type)s 通道埠設為 %(ip)s" msgid "Failure applying iptables rules" msgstr "套用 iptables 規則時失敗" #, python-format msgid "Failure waiting for address %(address)s to become ready: %(reason)s" msgstr "等待位址 %(address)s 變成備妥時失敗:%(reason)s" msgid "Flat provider networks are disabled" msgstr "已停用平面提供程序網路" msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "對於 TCP/UDP 通訊協定,port_range_min 必須 <= port_range_max" msgid "Force ip_lib calls to use the root helper" msgstr "強制 ip_lib 呼叫使用根說明程式" msgid "Gateway IP version inconsistent with allocation pool version" msgstr "閘道 IP 版本與配置儲存區版本不一致" msgid "Gateway is not valid on subnet" msgstr "閘道在子網路上無效" msgid "" "Group (gid or name) running metadata proxy after its initialization (if " "empty: agent effective group)." msgstr "" "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的群組(GID 或名稱)(如果為空:" "則為代理程式有效群組)。" msgid "Group (gid or name) running this process after its initialization" msgstr "在此程序起始設定之後執行此程序的群組(GID 或名稱)" msgid "" "Hostname to be used by the Neutron server, agents and services running on " "this machine. All the agents and services running on this machine must use " "the same host value." msgstr "" "在此機器上執行之 Neutron 伺服器、代理程式及服務要使用的主機名稱。在此機器上執" "行的所有代理程式及服務都必須使用相同的主機值。" #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" "min) is missing." msgstr "" "提供了 ICMP 代碼 (port-range-max) %(value)s,但遺漏了 ICMP 類型(port-range-" "min)。" msgid "ID of network" msgstr "網路的 ID" msgid "ID of network to probe" msgstr "要探測的網路 ID" msgid "ID of probe port to delete" msgstr "要刪除的探針埠 ID" msgid "ID of probe port to execute command" msgstr "要執行指令的探針埠 ID" msgid "ID of the router" msgstr "路由器 ID" #, python-format msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" msgstr "IP 位址 %(ip)s 已經在子網路 %(subnet_id)s 中得到配置" #, python-format msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" msgstr "IP 位址 %(ip)s 不屬於子網路 %(subnet_id)s" msgid "IP allocation failed. Try again later." msgstr "IP 配置失敗。請稍後再試。" msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 配置需要 subnet_id 或 ip_address" #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" "%s" msgstr "" "IPTablesManager.apply 無法套用下列 iptables 規則集:\n" "%s" msgid "IPv6 Address Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "針對字首委派,IPv6 位址模式必須是 SLAAC 或 Stateless。" msgid "IPv6 RA Mode must be SLAAC or Stateless for Prefix Delegation." msgstr "針對字首委派,IPv6 RA 模式必須是 SLAAC 或 Stateless。" #, python-format msgid "" "IPv6 address %(ip)s cannot be directly assigned to a port on subnet " "%(subnet_id)s as the subnet is configured for automatic addresses" msgstr "" "無法直接將 IPv6 位址 %(ip)s 指派給子網路%(subnet_id)s 上的埠,因為該子網路配" "置為用於自動位址" #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot be " "added to Neutron Router." msgstr "" "無法將配置為從外部路由器接收 RA 的 IPv6 子網路 %s 新增至 Neutron 路由器。" msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "如果為 True,則容許支援它的外掛程式建立 VLAN 透通網路。" msgid "Illegal IP version number" msgstr "無效的 IP 版本號碼" msgid "" "In some cases the Neutron router is not present to provide the metadata IP " "but the DHCP server can be used to provide this info. Setting this value " "will force the DHCP server to append specific host routes to the DHCP " "request. If this option is set, then the metadata service will be activated " "for all the networks." msgstr "" "在部分情況下,Neutron 路由器未呈現以提供 meta 資料 IP,但 DHCP 伺服器可用於提" "供此資訊。設定此值會強制讓 DHCP 伺服器將特定的主機路線附加至 DHCP 要求。如果" "設定此選項,則將對所有網路啟動 meta 資料服務。" msgid "" "Indicates that this L3 agent should also handle routers that do not have an " "external network gateway configured. This option should be True only for a " "single agent in a Neutron deployment, and may be False for all agents if all " "routers must have an external network gateway." msgstr "" "指示此 L3 代理程式還應該處理尚未配置外部網路閘道的路由器。針對 Neutron 部署中" "的單個代理程式,這個選項只應該為 True;如果所有路由器都必須具有外部網路閘道," "則針對所有路由器,這個選項應該為 False。" #, python-format msgid "Instance of class %(module)s.%(class)s must contain _cache attribute" msgstr "類別 %(module)s 的實例。%(class)s 必須包含 _cache 屬性" #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "字首空間不足,無法配置子網路大小 /%s" msgid "Insufficient rights for removing default security group." msgstr "權限不足,無法移除預設安全群組。" msgid "" "Integration bridge to use. Do not change this parameter unless you have a " "good reason to. This is the name of the OVS integration bridge. There is one " "per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM " "VIFs are attached to this bridge and then 'patched' according to their " "network connectivity." msgstr "" "要使用的整合橋接器。除非您有適當的原因,否則請勿變更此參數。這是 OVS 整合橋接" "器的名稱。每個 Hypervisor 有一個整合橋接器。整合橋接器充當虛擬的「修補程式機" "架」。所有 VM VIF 都已連接至此橋接器,然後根據其網路連線功能進行「修補」。" msgid "Interface to monitor" msgstr "要監視的介面" msgid "" "Interval between checks of child process liveness (seconds), use 0 to disable" msgstr "子程序存活檢查之間的間隔(秒),使用 0 以停用" msgid "Interval between two metering measures" msgstr "兩次計量測量之間的間隔" msgid "Interval between two metering reports" msgstr "兩次計量報告之間的間隔" #, python-format msgid "Invalid Device %(dev_name)s: %(reason)s" msgstr "無效的裝置 %(dev_name)s:%(reason)s" #, python-format msgid "" "Invalid action '%(action)s' for object type '%(object_type)s'. Valid " "actions: %(valid_actions)s" msgstr "" "針對物件類型 '%(object_type)s' 的動作 '%(action)s' 無效。有效動作:" "%(valid_actions)s" #, python-format msgid "" "Invalid authentication type: %(auth_type)s, valid types are: " "%(valid_auth_types)s" msgstr "無效的鑑別類型:%(auth_type)s,有效的類型為:%(valid_auth_types)s" #, python-format msgid "Invalid ethertype %(ethertype)s for protocol %(protocol)s." msgstr "通訊協定 %(protocol)s 的乙太網路類型 %(ethertype)s 無效。" #, python-format msgid "Invalid format: %s" msgstr "無效的格式:%s" #, python-format msgid "Invalid instance state: %(state)s, valid states are: %(valid_states)s" msgstr "無效的實例狀態:%(state)s,有效的狀態為:%(valid_states)s" #, python-format msgid "Invalid mapping: '%s'" msgstr "無效的對映:'%s'" #, python-format msgid "Invalid pci slot %(pci_slot)s" msgstr "無效的 PCI 插槽 %(pci_slot)s" #, python-format msgid "Invalid provider format. Last part should be 'default' or empty: %s" msgstr "無效的提供者格式。最後一個部分應該是 'default' 或空白:%s" #, python-format msgid "Invalid resource type %(resource_type)s" msgstr "資源類型 %(resource_type)s 無效" #, python-format msgid "Invalid route: %s" msgstr "無效的路徑:%s" msgid "Invalid service provider format" msgstr "無效的服務提供者格式" #, python-format msgid "" "Invalid value for ICMP %(field)s (%(attr)s) %(value)s. It must be 0 to 255." msgstr "" "ICMP %(field)s (%(attr)s) 的值 %(value)s 無效。該值必須在 0 到255 之間。" #, python-format msgid "Invalid value for port %(port)s" msgstr "埠 %(port)s 的值無效" msgid "" "Iptables mangle mark used to mark ingress from external network. This mark " "will be masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用於標示外部網路中入口的 iptables 破壞標記。此標記將以 0xffff 進行遮罩,以便" "只使用較低的 16 位元。" msgid "" "Iptables mangle mark used to mark metadata valid requests. This mark will be " "masked with 0xffff so that only the lower 16 bits will be used." msgstr "" "用於標示 meta 資料有效要求的 iptables 破壞標記。此標記將以 0xffff 進行遮罩," "以便只使用較低的 16 位元。" msgid "Keepalived didn't respawn" msgstr "Keepalived 未再次大量產生" msgid "Keepalived didn't spawn" msgstr "Keepalived 未大量產生" #, python-format msgid "" "Kernel HZ value %(value)s is not valid. This value must be greater than 0." msgstr "核心 HZ 值 %(value)s 無效。此值必須大於 0。" msgid "L3 agent failure to setup NAT for floating IPs" msgstr "L3 代理程式無法針對浮動 IP 設定 NAT" msgid "L3 agent failure to setup floating IPs" msgstr "L3 代理程式無法設定浮動 IP" msgid "Limit number of leases to prevent a denial-of-service." msgstr "限制租賃次數以防止阻斷服務攻擊。" msgid "List of :" msgstr ": 的清單" msgid "" "List of :: or " "specifying physical_network names usable for VLAN provider and tenant " "networks, as well as ranges of VLAN tags on each available for allocation to " "tenant networks." msgstr "" ":: 的清單,指定可" "用於 VLAN 提供者及承租人網路的 physical_network 名稱,以及在每個可用於配置給" "承租人網路的 physical_network 上指定 VLAN標籤範圍。" msgid "" "List of network type driver entrypoints to be loaded from the neutron.ml2." "type_drivers namespace." msgstr "" "要從 neutron.ml2.type_drivers 名稱空間中載入的網路類型驅動程式進入點清單。" msgid "" "List of physical_network names with which flat networks can be created. Use " "default '*' to allow flat networks with arbitrary physical_network names. " "Use an empty list to disable flat networks." msgstr "" "可用來建立平面網路的 physical_network 名稱清單。使用預設值 '*' 可容許含有任" "意 physical_network 名稱的平面網路。使用空白清單可停用平面網路。" msgid "Location for Metadata Proxy UNIX domain socket." msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置" msgid "Location of Metadata Proxy UNIX domain socket" msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置" msgid "Location to store DHCP server config files." msgstr "DHCP 伺服器配置檔的儲存位置。" msgid "Location to store IPv6 PD files." msgstr "用於儲存 IPv6 PD 檔的位置。" msgid "Location to store IPv6 RA config files" msgstr "用於儲存 IPv6 RA 配置檔的位置" msgid "Location to store child pid files" msgstr "子項 PID 檔案的儲存位置" msgid "Log agent heartbeats" msgstr "日誌代理程式活動訊號" msgid "" "MTU of the underlying physical network. Neutron uses this value to calculate " "MTU for all virtual network components. For flat and VLAN networks, neutron " "uses this value without modification. For overlay networks such as VXLAN, " "neutron automatically subtracts the overlay protocol overhead from this " "value. Defaults to 1500, the standard value for Ethernet." msgstr "" "基礎實體網路的 MTU。Neutron 使用此值來計算所有虛擬網路元件的 MTU。若為平面網" "路和 VLAN 網路,Neutron 使用此值,而不進行修改。若為套版網路(例如 VXLAN)," "Neutron 會自動從此值中扣除套版通訊協定額外負擔。預設值為 1500,這是乙太網路的" "標準值。" msgid "MTU size of veth interfaces" msgstr "veth 介面的 MTU 大小" msgid "Make the l2 agent run in DVR mode." msgstr "讓 L2 代理程式在 DVR 模式下執行。" msgid "Malformed request body" msgstr "要求內文的格式不正確" msgid "MaxRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MaxRtrAdvInterval 設定" msgid "Maximum number of DNS nameservers per subnet" msgstr "每個子網路的 DNS 名稱伺服器數目上限" msgid "" "Maximum number of L3 agents which a HA router will be scheduled on. If it is " "set to 0 then the router will be scheduled on every agent." msgstr "" "將在其中排程 HA 路由器的 L3 代理程式數目上限。如果將其設為 0,則將在每一個代" "理程式上排程該路由器。" msgid "Maximum number of allowed address pairs" msgstr "所容許的位址配對數目上限" msgid "Maximum number of host routes per subnet" msgstr "每個子網路的主機路徑數目上限" msgid "Maximum number of routes per router" msgstr "每個路由器的路徑數目上限" msgid "" "Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce " "mode from metadata_proxy_user/group values, 'user': set metadata proxy " "socket mode to 0o644, to use when metadata_proxy_user is agent effective " "user or root, 'group': set metadata proxy socket mode to 0o664, to use when " "metadata_proxy_group is agent effective group or root, 'all': set metadata " "proxy socket mode to 0o666, to use otherwise." msgstr "" "meta 資料 Proxy UNIX 網域 Socket 模式,容許下列四個值:'deduce':來自 " "metadata_proxy_user/group 值的 deduce 模式;'user':將 meta 資料 ProxySocket " "模式設定為 0o644,以在 metadata_proxy_user 是代理程式有效使用者或 root 使用者" "時使用;'group':將 meta 資料 Proxy Socket 模式設定為 0o664,以在 " "metadata_proxy_group 是有效群組或 root 使用者時使用;'all':將 meta 資料" "Proxy Socket 模式設定為 0o666,以在其他情況下使用。" msgid "Metering driver" msgstr "計量驅動程式" msgid "MinRtrAdvInterval setting for radvd.conf" msgstr "radvd.conf 的 MinRtrAdvInterval 設定" msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "透過監視 OVSDB 是否有介面變更,將輪詢減至最少。" #, python-format msgid "Missing key in mapping: '%s'" msgstr "對映中遺漏了索引鍵:'%s'" msgid "" "Multicast group for VXLAN. When configured, will enable sending all " "broadcast traffic to this multicast group. When left unconfigured, will " "disable multicast VXLAN mode." msgstr "" "VXLAN 的多重播送群組。當已配置時,容許將所有廣播資料流量傳送至此多重播送群" "組。當保持未配置時,將停用多重播送 VXLAN 模式。" msgid "" "Multicast group(s) for vxlan interface. A range of group addresses may be " "specified by using CIDR notation. Specifying a range allows different VNIs " "to use different group addresses, reducing or eliminating spurious broadcast " "traffic to the tunnel endpoints. To reserve a unique group for each possible " "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on " "all the agents." msgstr "" "VXLAN 介面的多重播送群組。可以使用 CIDR 表示法來指定群組位址的範圍。指定一個" "範圍會容許不同的 VNI 使用不同的群組位址,以減少或刪除傳送至通道端點的虛假廣播" "資料流量。如果要為每一個可能的(24 位元)VNI 保留一個唯一群組,請使用 /8(例" "如 239.0.0.0/8)。在所有代理程式上,此設定必須相同。" #, python-format msgid "Multiple default providers for service %s" msgstr "服務 %s 的多個預設提供者" #, python-format msgid "Multiple providers specified for service %s" msgstr "給服務 %s 指定了多個提供者" msgid "Multiple tenant_ids in bulk security group rule create not allowed" msgstr "不容許主體安全群組規則建立作業中存在多個 tenant_id" msgid "Must also specify protocol if port range is given." msgstr "如果給定了埠範圍,則也必須指定通訊協定。" msgid "Must specify one or more actions on flow addition or modification" msgstr "必須對流程新增作業或修改作業指定一個以上的動作" msgid "Name of Open vSwitch bridge to use" msgstr "要使用的 Open vSwitch 橋接器名稱" msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 Nova 區域名稱。如果 Keystone 管理多個區域,則很有用。" msgid "Namespace of the router" msgstr "路由器名稱空間" msgid "Native pagination depend on native sorting" msgstr "原生分頁相依於原生排序" #, python-format msgid "" "Need to apply migrations from %(project)s contract branch. This will require " "all Neutron server instances to be shutdown before proceeding with the " "upgrade." msgstr "" "需要套用來自 %(project)s 合約分支的移轉。這將需要先關閉所有 Neutron 伺服器實" "例,然後再繼續升級。" msgid "Negative delta (downgrade) not supported" msgstr "不支援負數差異(降級)" msgid "Negative relative revision (downgrade) not supported" msgstr "不支援負面的相對修訂(降級)" #, python-format msgid "Network %s does not contain any IPv4 subnet" msgstr "網路 %s 不包含任何 IPv4 子網路" #, python-format msgid "Network %s is not a valid external network" msgstr "網路 %s 不是有效的外部網路" #, python-format msgid "Network %s is not an external network" msgstr "網路 %s 不是外部網路" #, python-format msgid "" "Network of size %(size)s, from IP range %(parent_range)s excluding IP ranges " "%(excluded_ranges)s was not found." msgstr "" "在 IP 範圍 %(parent_range)s(排除 IP 範圍 %(excluded_ranges)s)中找不到大小" "為 %(size)s 的網路。" #, python-format msgid "Network type value '%s' not supported" msgstr "不支援網路類型值 '%s'" msgid "Network type value needed by the ML2 plugin" msgstr "ML2 外掛程式需要的網路類型值" msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" msgid "No default router:external network" msgstr "沒有預設 router:external 網路" #, python-format msgid "No default subnetpool found for IPv%s" msgstr "找不到 IPv%s 的預設子網路儲存區" msgid "No default subnetpools defined" msgstr "未定義預設子網路儲存區" #, python-format msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "沒有其他 IP 位址可用於子網路 %(subnet_id)s。" msgid "No offline migrations pending." msgstr "沒有擱置中的離線移轉。" #, python-format msgid "No shared key in %s fields" msgstr "%s 欄位中沒有共用金鑰" msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不容許將路由器手動指派給處於 'dvr' 模式的代理程式。" msgid "Not allowed to manually remove a router from an agent in 'dvr' mode." msgstr "不容許從處於 'dvr' 模式的代理程式中手動移除路由器。" msgid "" "Number of DHCP agents scheduled to host a tenant network. If this number is " "greater than 1, the scheduler automatically assigns multiple DHCP agents for " "a given tenant network, providing high availability for DHCP service." msgstr "" "已排程管理承租人網路的 DHCP 代理程式數目。如果此數目大於 1,則排程器會自動為" "給定的承租人網路指派多個 DHCP 代理程式,為 DHCP 服務提供高可用性。" msgid "Number of backlog requests to configure the metadata server socket with" msgstr "要配置給 meta 資料伺服器 Socket 的待辦事項要求數目" msgid "Number of backlog requests to configure the socket with" msgstr "要配置給 Socket 的待辦事項要求數目" msgid "" "Number of bits in an ipv4 PTR zone that will be considered network prefix. " "It has to align to byte boundary. Minimum value is 8. Maximum value is 24. " "As a consequence, range of values is 8, 16 and 24" msgstr "" "將被視為網路字首之 IPv4 PTR 區域中的位元數目。它必須與位元組界限對齊。下限值" "為 8。上限值為 24。因此,值的範圍是 8、16 和 24" msgid "" "Number of bits in an ipv6 PTR zone that will be considered network prefix. " "It has to align to nyble boundary. Minimum value is 4. Maximum value is 124. " "As a consequence, range of values is 4, 8, 12, 16,..., 124" msgstr "" "將被視為網路字首之 IPv6 PTR 區域中的位元數目。它必須與 nyble 界限對齊。下限值" "為 4。上限值為 124。因此,值的範圍是 4、8、12、16、...、124" msgid "" "Number of floating IPs allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的浮動 IP 數目。負數值表示無限制。" msgid "" "Number of networks allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的網路數目。負數值表示無限制。" msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的埠數目。負數值表示無限制。" msgid "Number of routers allowed per tenant. A negative value means unlimited." msgstr "每個承租人所容許的路由器數目。負數值表示無限制。" msgid "" "Number of seconds between sending events to nova if there are any events to " "send." msgstr "兩次將事件傳送至 Nova 之間的秒數(如果有任何事件要傳送)。" msgid "Number of seconds to keep retrying to listen" msgstr "不斷重試接聽的秒數" msgid "" "Number of security groups allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的安全群組數目。負數值表示無限制。" msgid "" "Number of security rules allowed per tenant. A negative value means " "unlimited." msgstr "每個承租人所容許的安全規則數目。負數值表示無限制。" msgid "" "Number of separate worker processes for metadata server (defaults to half of " "the number of CPUs)" msgstr "meta 資料伺服器的獨立工作者處理程序數目(預設為CPU 數目的一半)" msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "每個承租人所容許的子網路數目。負數值表示無限制。" msgid "" "Number of threads to use during sync process. Should not exceed connection " "pool size configured on server." msgstr "" "執行同步程序期間要使用的執行緒數目。不應超過伺服器上配置的連線儲存區大小。" msgid "OK" msgstr "確定" msgid "" "OVS datapath to use. 'system' is the default value and corresponds to the " "kernel datapath. To enable the userspace datapath set this value to 'netdev'." msgstr "" "要使用的 OVS 資料路徑。'system' 是預設值,且與核心資料路徑對應。如果要啟用使" "用者空間資料路徑,請將此值設為 'netdev'。" msgid "OVS vhost-user socket directory." msgstr "OVS vhost 使用者 Socket 目錄。" msgid "Only admin can view or configure quota" msgstr "只有管理者才能檢視或配置配額" msgid "Only admin is authorized to access quotas for another tenant" msgstr "只有管理者才獲授權來存取另一個承租人的配額" msgid "Only admins can manipulate policies on objects they do not own" msgstr "只有管理者才可以操作不歸他們擁有之物件上的原則" msgid "Only allowed to update rules for one security profile at a time" msgstr "一次只容許更新一個安全設定檔的規則" msgid "Only remote_ip_prefix or remote_group_id may be provided." msgstr "只能提供 remote_ip_prefix 或 remote_group_id。" #, python-format msgid "Operation not supported on device %(dev_name)s" msgstr "作業在裝置 %(dev_name)s 上不受支援" msgid "" "Ordered list of network_types to allocate as tenant networks. The default " "value 'local' is useful for single-box testing but provides no connectivity " "between hosts." msgstr "" "要配置作為租戶網路的 network_type 有序清單。預設值 'local' 對單框測試很有用," "但卻不提供主機之間的連線功能。" msgid "Override the default dnsmasq settings with this file." msgstr "使用此檔案來置換預設 dnsmasq 設定。" msgid "Owner type of the device: network/compute" msgstr "裝置的擁有者類型:網路/計算" msgid "POST requests are not supported on this resource." msgstr "此資源上不支援 POST 要求。" #, python-format msgid "Package %s not installed" msgstr "未安裝套件 %s" #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "剖析 bridge_mappings 時失敗:%s。" msgid "Password for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的密碼" msgid "Path to PID file for this process" msgstr "用於此程序的 PID 檔案路徑" msgid "Path to the router directory" msgstr "路由器目錄的路徑" msgid "Peer patch port in integration bridge for tunnel bridge." msgstr "整合橋接器中用於通道橋接器的同層級修補程式埠。" msgid "Peer patch port in tunnel bridge for integration bridge." msgstr "通道橋接器中用於整合橋接器的同層級修補程式埠。" msgid "Phase upgrade options do not accept revision specification" msgstr "階段升級選項不接受修訂規格" msgid "Ping timeout" msgstr "連通測試逾時值" #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "埠 %(id)s 沒有固定 IP %(address)s" #, python-format msgid "Port %s does not exist" msgstr "埠 %s 不存在" #, python-format msgid "" "Port %s has multiple fixed IPv4 addresses. Must provide a specific IPv4 " "address when assigning a floating IP" msgstr "埠 %s 具有多個固定 IPv4 位址。指派浮動 IP 時,必須提供特定的 IPv4 位址" msgid "Prefix Delegation can only be used with IPv6 subnets." msgstr "字首委派只能與 IPv6 子網路搭配使用。" msgid "Private key of client certificate." msgstr "用戶端憑證的私密金鑰。" #, python-format msgid "Probe %s deleted" msgstr "已刪除探針 %s" #, python-format msgid "Probe created : %s " msgstr "已建立探針:%s " msgid "Process is already started" msgstr "程序已啟動" msgid "Process is not running." msgstr "程序不在執行中。" msgid "Protocol to access nova metadata, http or https" msgstr "用於存取 Nova meta 資料的通訊協定:HTTP 或 HTTPS" #, python-format msgid "Provider name %(name)s is limited by %(len)s characters" msgstr "提供者名稱 %(name)s 最多只能包含 %(len)s 個字元" #, python-format msgid "RBAC policy of type %(object_type)s with ID %(id)s not found" msgstr "找不到 ID 為 %(id)s 且類型為 %(object_type)s 的 RBAC 原則" #, python-format msgid "" "RBAC policy on object %(object_id)s cannot be removed because other objects " "depend on it.\n" "Details: %(details)s" msgstr "" "無法移除物件 %(object_id)s 上的 RBAC 原則,因為其他物件相依於該原則。\n" "詳細資料:%(details)s" msgid "" "Range of seconds to randomly delay when starting the periodic task scheduler " "to reduce stampeding. (Disable by setting to 0)" msgstr "" "啟動定期作業排定器以減少大混亂的隨機延遲秒數範圍。(如果要停用,則設為 0)" msgid "Ranges must be in the same IP version" msgstr "範圍必須位於相同的 IP 版本中" msgid "Ranges must be netaddr.IPRange" msgstr "範圍必須是 netaddr.IPRange" msgid "Ranges must not overlap" msgstr "範圍不得重疊" #, python-format msgid "" "Release aware branch labels (%s) are deprecated. Please switch to expand@ " "and contract@ labels." msgstr "版本相關分支標籤 (%s) 已遭到淘汰。請切換至 expand@ 和 contract@ 標籤。" msgid "Remote metadata server experienced an internal server error." msgstr "遠端 meta 資料伺服器發生內部伺服器錯誤。" msgid "" "Repository does not contain HEAD files for contract and expand branches." msgstr "儲存庫不含合約及延伸分支的 HEAD 檔。" msgid "" "Representing the resource type whose load is being reported by the agent. " "This can be \"networks\", \"subnets\" or \"ports\". When specified (Default " "is networks), the server will extract particular load sent as part of its " "agent configuration object from the agent report state, which is the number " "of resources being consumed, at every report_interval.dhcp_load_type can be " "used in combination with network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler When the network_scheduler_driver is " "WeightScheduler, dhcp_load_type can be configured to represent the choice " "for the resource being balanced. Example: dhcp_load_type=networks" msgstr "" "代表將由代理程式報告其負載的資源類型。它可以為「網路」、「子網路」或「埠」。" "指定時(預設值為網路),伺服器將從代理程式報告狀態(為所耗用的資源數目)擷取" "作為其代理程式配置物件一部分傳送的特定負載,擷取間隔為 report_interval." "dhcp_load_type 可以與network_scheduler_driver = neutron.scheduler." "dhcp_agent_scheduler.WeightScheduler 組合使用。當 network_scheduler_driver " "為 WeightScheduler 時,可以將 dhcp_load_type配置為代表您選擇要進行平衡的資" "源。範例:dhcp_load_type=網路" msgid "Request Failed: internal server error while processing your request." msgstr "要求失敗:處理要求時發生內部伺服器錯誤。" msgid "" "Reset flow table on start. Setting this to True will cause brief traffic " "interruption." msgstr "" "在啟動時重設流程表格。如果將此項設定為 True,則將導致簡短的資料流量岔斷。" #, python-format msgid "Resource %(resource)s %(resource_id)s could not be found." msgstr "找不到資源 %(resource)s %(resource_id)s。" #, python-format msgid "Resource %(resource_id)s of type %(resource_type)s not found" msgstr "找不到類型為 %(resource_type)s 的資源 %(resource_id)s" #, python-format msgid "" "Resource '%(resource_id)s' is already associated with provider " "'%(provider)s' for service type '%(service_type)s'" msgstr "" "資源 '%(resource_id)s' 已與服務類型 '%(service_type)s' 的提供者 " "'%(provider)s' 產生關聯" msgid "Resource body required" msgstr "需要資源主體" msgid "Resource not found." msgstr "找不到資源。" msgid "Resources required" msgstr "需要資源" msgid "" "Root helper application. Use 'sudo neutron-rootwrap /etc/neutron/rootwrap." "conf' to use the real root filter facility. Change to 'sudo' to skip the " "filtering and just run the command directly." msgstr "" "根說明程式應用程式。利用 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' " "來使用實際的根過濾器工具。變更為 'sudo' 以跳過過濾並且僅直接執行該指令。" msgid "Root permissions are required to drop privileges." msgstr "需要 root 權限才能捨棄專用權。" #, python-format msgid "Router already has a port on subnet %s" msgstr "路由器在子網路 %s 上已經有埠" msgid "Router port must have at least one fixed IP" msgstr "路由器埠必須具有至少一個固定 IP" #, python-format msgid "Running %(cmd)s (%(desc)s) for %(project)s ..." msgstr "正在對 %(project)s 執行 %(cmd)s (%(desc)s)..." #, python-format msgid "Running %(cmd)s for %(project)s ..." msgstr "正在對 %(project)s 執行 %(cmd)s..." msgid "" "Seconds between nodes reporting state to server; should be less than " "agent_down_time, best if it is half or less than agent_down_time." msgstr "" "兩個節點將狀態報告給伺服器的間隔秒數;應該小於 agent_down_time;如果是 " "agent_down_time 的一半或者小於 agent_down_time,則最佳。" msgid "" "Seconds to regard the agent is down; should be at least twice " "report_interval, to be sure the agent is down for good." msgstr "" "將代理程式視為已關閉的秒數;應該至少是report_interval 的兩倍,以確保代理程式" "已永久關閉。" #, python-format msgid "Security Group %(id)s %(reason)s." msgstr "安全群組 %(id)s %(reason)s。" #, python-format msgid "Security Group Rule %(id)s %(reason)s." msgstr "安全群組規則 %(id)s %(reason)s。" #, python-format msgid "Security group %(id)s does not exist" msgstr "安全群組 %(id)s 不存在" #, python-format msgid "Security group rule %(id)s does not exist" msgstr "安全群組規則 %(id)s 不存在" #, python-format msgid "Security group rule already exists. Rule id is %(rule_id)s." msgstr "安全群組規則已經存在。規則 ID 為 %(rule_id)s。" #, python-format msgid "" "Security group rule for ethertype '%(ethertype)s' not supported. Allowed " "values are %(values)s." msgstr "" "不支援乙太網路類型 '%(ethertype)s' 的安全群組規則。容許的值為 %(values)s。" #, python-format msgid "" "Security group rule protocol %(protocol)s not supported. Only protocol " "values %(values)s and integer representations [0 to 255] are supported." msgstr "" "不支援安全群組規則通訊協定 %(protocol)s。僅支援通訊協定值 %(values)s 和整數表" "示法 [0 到 255]。" msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes so " "nova can update its cache." msgstr "" "埠資料 (fixed_ips/floatingip) 變更時,將通知傳送至 Nova,以便 Nova 可以更新其" "快取。" msgid "Send notification to nova when port status changes" msgstr "埠狀態變更時,將通知傳送至 Nova" #, python-format msgid "" "Service provider '%(provider)s' could not be found for service type " "%(service_type)s" msgstr "找不到服務類型 %(service_type)s 的服務提供者 '%(provider)s'" msgid "Service to handle DHCPv6 Prefix delegation." msgstr "用於處理 DHCPv6 字首委派的服務。" #, python-format msgid "Service type %(service_type)s does not have a default service provider" msgstr "服務類型 %(service_type)s 不具有預設服務提供者" msgid "" "Set new timeout in seconds for new rpc calls after agent receives SIGTERM. " "If value is set to 0, rpc timeout won't be changed" msgstr "" "在代理程式接收 SIGTERM 之後為新 RPC 呼叫設定新逾時(以秒為單位)。如果值設定" "為 0,RPC 逾時將不會變更" msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying GRE/" "VXLAN tunnel." msgstr "" "在帶有 GRE/VXLAN 通道的送出 IP 封包上,設定或取消設定「不劃分片段 (DF)」位" "元。" msgid "Shared address scope can't be unshared" msgstr "無法將已共用的位址範圍取消共用" msgid "String prefix used to match IPset names." msgstr "用來符合 IPset 名稱的字串字首。" #, python-format msgid "Sub-project %s not installed." msgstr "未安裝子專案 %s。" msgid "Subnet for router interface must have a gateway IP" msgstr "路由器介面的子網路必須具有閘道 IP" msgid "Subnet pool has existing allocations" msgstr "子網路儲存區具有現有的配置" msgid "Subnet used for the l3 HA admin network." msgstr "用於 l3 HA 管理網路的子網路。" msgid "" "System-wide flag to determine the type of router that tenants can create. " "Only admin can override." msgstr "此系統層面旗標用來決定承租人可以建立的路由器類型。只有管理者才能置換。" msgid "TCP Port used by Neutron metadata namespace proxy." msgstr "Neutron meta 資料名稱空間 Proxy 所使用的 TCP 埠。" msgid "TCP Port used by Nova metadata server." msgstr "Nova meta 資料伺服器所使用的 TCP 埠。" msgid "TTL for vxlan interface protocol packets." msgstr "VXLAN 介面通訊協定封包的 TTL。" #, python-format msgid "Tag %(tag)s could not be found." msgstr "找不到標記 %(tag)s。" #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "Tenant %(tenant_id)s 不可在此網路上建立 %(resource)s" msgid "Tenant id for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的租戶 ID" msgid "Tenant name for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的租戶名稱" msgid "" "The DHCP server can assist with providing metadata support on isolated " "networks. Setting this value to True will cause the DHCP server to append " "specific host routes to the DHCP request. The metadata service will only be " "activated when the subnet does not contain any router port. The guest " "instance must be configured to request host routes via DHCP (Option 121). " "This option doesn't have any effect when force_metadata is set to True." msgstr "" "DHCP 伺服器可透過在隔離網路上提供 meta 資料支援進行協助。將此值設為 True 會導" "致 DHCP 伺服器將特定的主機路線附加至 DHCP 要求。僅當子網路不包含任何路由器埠" "時,才啟動 meta 資料服務。訪客實例必須配置成透過 DHCP 來要求主機路線(選項 " "121)。將 force_metadata 設為 True 時,這個選項沒有任何效果。" msgid "The UDP port to use for VXLAN tunnels." msgstr "要用於 VXLAN 通道的 UDP 埠。" #, python-format msgid "" "The address allocation request could not be satisfied because: %(reason)s" msgstr "無法滿足位址配置要求,原因:%(reason)s" msgid "The advertisement interval in seconds" msgstr "廣告間隔(以秒為單位)" msgid "" "The base MAC address Neutron will use for VIFs. The first 3 octets will " "remain unchanged. If the 4th octet is not 00, it will also be used. The " "others will be randomly generated." msgstr "" "Neutron 將用於 VIF 的基本 MAC 位址。前 3 個八位元組將保持不變。如果第 4 個八" "位元組不是 00,則也將使用該八位元組。其他各項將隨機產生。" msgid "" "The base mac address used for unique DVR instances by Neutron. The first 3 " "octets will remain unchanged. If the 4th octet is not 00, it will also be " "used. The others will be randomly generated. The 'dvr_base_mac' *must* be " "different from 'base_mac' to avoid mixing them up with MAC's allocated for " "tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. " "The default is 3 octet" msgstr "" "Neutron 用於唯一 DVR 實例的基本 MAC 位址。前 3 個八位元組將保持不變。如果第 " "4 個八位元組不是 00,則也將使用該八位元組。其他個項將隨機產" "生。'dvr_base_mac' 必須與 'base_mac' 不同,以避免將它們與對承租人埠配置的 " "MAC 混合。4 個八位元組範例如下:dvr_base_mac = fa:16:3f:4f:00:00。預設值為 3 " "個八位元組" msgid "The core plugin Neutron will use" msgstr "Neutron 將使用的核心外掛程式" msgid "The driver used to manage the DHCP server." msgstr "用於管理 DHCP 伺服器的驅動程式。" msgid "The driver used to manage the virtual interface." msgstr "用於管理虛擬介面的驅動程式。" msgid "" "The email address to be used when creating PTR zones. If not specified, the " "email address will be admin@" msgstr "" "建立 PTR 區域時,要使用的電子郵件位址。如果未指定,則電子郵件位址將是 " "admin@" msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "在單一回應中傳回的項目數上限,值為 'infinite' 或負整數時表示無限制" msgid "" "The network type to use when creating the HA network for an HA router. By " "default or if empty, the first 'tenant_network_types' is used. This is " "helpful when the VRRP traffic should use a specific network which is not the " "default one." msgstr "" "為 HA 路由器建立 HA 網路時要使用的網路類型。依預設或者在空白的情況下,將使用" "第一個 'tenant_network_types'。當 VRRP 資料流量應該使用的特定網路不是預設網路" "時,這很有用。" msgid "" "The number of seconds the agent will wait between polling for local device " "changes." msgstr "輪詢本端裝置變更之間代理程式將等待的秒數。" msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "與 OVSDB 監視器的通訊中斷後重新大量產生OVSDB 監視器之前等待的秒數。" msgid "The number of sort_keys and sort_dirs must be same" msgstr "sort_key 數目及 sort_dir 數目必須相同" msgid "" "The path for API extensions. Note that this can be a colon-separated list of " "paths. For example: api_extensions_path = extensions:/path/to/more/exts:/" "even/more/exts. The __path__ of neutron.extensions is appended to this, so " "if your extensions are in there you don't need to specify them here." msgstr "" "API 延伸的路徑。請注意,這可以是分號區隔的路徑清單。例如:" "api_extensions_path = extensions:/path/to/more/exts:/even/more/exts。已將 " "neutron.extensions 的 __path__ 附加到此項,所以如果您的延伸在這裡,則不需要在" "這裡指定它們。" msgid "The physical network name with which the HA network can be created." msgstr "建立 HA 網路時可以使用的實體網路名稱。" #, python-format msgid "The port '%s' was deleted" msgstr "已刪除埠 '%s'" msgid "The port to bind to" msgstr "要連結至的埠" #, python-format msgid "The requested content type %s is invalid." msgstr "所要求的內容類型 %s 無效。" msgid "The resource could not be found." msgstr "找不到資源。" #, python-format msgid "" "The router %(router_id)s has been already hosted by the L3 Agent " "%(agent_id)s." msgstr "路由器 %(router_id)s 已經由 L3 代理程式 %(agent_id)s 管理。" msgid "" "The server has either erred or is incapable of performing the requested " "operation." msgstr "伺服器發生錯誤,或者無法執行所要求的作業。" msgid "The service plugins Neutron will use" msgstr "Neutron 將使用的服務外掛程式" #, python-format msgid "The subnet request could not be satisfied because: %(reason)s" msgstr "無法滿足子網路要求,原因:%(reason)s" #, python-format msgid "The subproject to execute the command against. Can be one of: '%s'." msgstr "要對其執行指令的子專案。可以是下列其中一個:'%s'。" msgid "The type of authentication to use" msgstr "要使用的鑑別類型" msgid "" "There are routers attached to this network that depend on this policy for " "access." msgstr "有依賴於此存取原則的路由器已連接至此網路。" msgid "" "True to delete all ports on all the OpenvSwitch bridges. False to delete " "ports created by Neutron on integration and external network bridges." msgstr "" "如果為 True,則刪除所有 OpenvSwitch 橋接器上的所有埠。如果為 False,則刪除" "Neutron 在整合及外部網路橋接器上建立的埠。" msgid "Tunnel IP value needed by the ML2 plugin" msgstr "ML2 外掛程式需要的通道 IP 值" msgid "Tunnel bridge to use." msgstr "要使用的通道橋接器。" msgid "" "Type of the nova endpoint to use. This endpoint will be looked up in the " "keystone catalog and should be one of public, internal or admin." msgstr "" "要使用之 Nova 端點的類型。此端點將在 Keystone 型錄予以查閱,並且應該是共用、" "內部或管理的其中一個。" msgid "URL for connecting to designate" msgstr "用於連接以指定的 URL" msgid "URL to database" msgstr "資料庫 URL" #, python-format msgid "Unable to access %s" msgstr "無法存取 %s" #, python-format msgid "Unable to calculate %(address_type)s address because of:%(reason)s" msgstr "無法計算 %(address_type)s 位址,原因:%(reason)s" #, python-format msgid "Unable to convert value in %s" msgstr "無法轉換 %s 中的值" msgid "Unable to create the SNAT Interface Port" msgstr "無法建立「SNAT 介面埠」" #, python-format msgid "Unable to determine mac address for %s" msgstr "無法判定 %s 的 MAC 位址" #, python-format msgid "Unable to find '%s' in request body" msgstr "在要求內文中找不到 '%s'" #, python-format msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" msgstr "在子網路 %(subnet_id)s 上找不到 IP 位址 %(ip_address)s" #, python-format msgid "Unable to find resource name in %s" msgstr "在 %s 中找不到資源名稱" #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "無法識別來自 %s 的目標欄位。相符項的格式應該為%%()s" msgid "Unable to provide external connectivity" msgstr "無法提供外部連線功能" msgid "Unable to provide tenant private network" msgstr "無法提供租戶專用網路" #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " "found" msgstr "無法驗證相符項 %(match)s,因為找不到母項資源 %(res)s" #, python-format msgid "Unexpected label for script %(script_name)s: %(labels)s" msgstr "Script %(script_name)s 的標籤不符合預期:%(labels)s" #, python-format msgid "Unexpected number of alembic branch points: %(branchpoints)s" msgstr "alembic 分支點數目不符合預期:%(branchpoints)s" #, python-format msgid "Unexpected response code: %s" msgstr "非預期的回應碼:%s" #, python-format msgid "Unexpected response: %s" msgstr "非預期的回應:%s" #, python-format msgid "Unit name '%(unit)s' is not valid." msgstr "單元名稱 '%(unit)s' 無效。" #, python-format msgid "Unknown address type %(address_type)s" msgstr "不明的位址類型 %(address_type)s" #, python-format msgid "Unknown attribute '%s'." msgstr "不明屬性 '%s'。" #, python-format msgid "Unknown chain: %r" msgstr "不明鏈:%r" #, python-format msgid "Unknown network type %(network_type)s." msgstr "不明的網路類型 %(network_type)s。" msgid "Unmapped error" msgstr "「未對映」錯誤" msgid "Unrecognized action" msgstr "無法辨識的動作" msgid "Unrecognized field" msgstr "無法辨識的欄位" msgid "Unsupported Content-Type" msgstr "不支援的內容類型" #, python-format msgid "Unsupported network type %(net_type)s." msgstr "不支援網路類型 %(net_type)s。" msgid "Unsupported request type" msgstr "不受支援的要求類型" msgid "Updating default security group not allowed." msgstr "不容許更新預設安全群組。" msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" "使用 ML2 l2population 機制驅動程式,來瞭解遠端 MAC 及 IP 位址,並提升通道可調" "整性。" msgid "Use broadcast in DHCP replies." msgstr "在 DHCP 回覆中使用廣播。" msgid "Use either --delta or relative revision, not both" msgstr "使用 --delta 或相對修訂,但不要同時使用兩者" msgid "" "Use ipset to speed-up the iptables based security groups. Enabling ipset " "support requires that ipset is installed on L2 agent node." msgstr "" "使用 ipset 來加速 iptables 型安全群組。啟用 ipset 支援需要在 L2 代理程式節點" "上安裝 ipset。" msgid "" "Use the root helper when listing the namespaces on a system. This may not be " "required depending on the security configuration. If the root helper is not " "required, set this to False for a performance improvement." msgstr "" "列出系統上的名稱空間時,請使用根說明程式。視安全性配置而定,可能不需要這樣" "做。如果不需要根說明程式,請將此參數設為 False 以改進效能。" msgid "" "Use veths instead of patch ports to interconnect the integration bridge to " "physical networks. Support kernel without Open vSwitch patch port support so " "long as it is set to True." msgstr "" "使用 veths(而不使用修補程式埠)將整合橋接器交互連接至實體網路。只要將其設為 " "True,即可支援不含 Open vSwitch 修補程式埠支援的核心。" msgid "" "User (uid or name) running metadata proxy after its initialization (if " "empty: agent effective user)." msgstr "" "在 meta 資料 Proxy 起始設定之後執行該 Proxy 的使用者(UID 或名稱)(如果為" "空:則為代理程式有效使用者)。" msgid "User (uid or name) running this process after its initialization" msgstr "在此程序起始設定之後執行此程序的使用者(UID 或名稱)" msgid "Username for connecting to designate in admin context" msgstr "用於連接以在管理環境定義中指定的使用者名稱" msgid "VRRP authentication password" msgstr "VRRP 鑑別密碼" msgid "VRRP authentication type" msgstr "VRRP 鑑別類型" msgid "" "Value of host kernel tick rate (hz) for calculating minimum burst value in " "bandwidth limit rules for a port with QoS. See kernel configuration file for " "HZ value and tc-tbf manual for more information." msgstr "" "主機核心記號率 (hz) 的值,用來透過服務品質計算埠的頻寬限制規則中的激增值下" "限。如需相關資訊,請參閱 HZ 值的核心配置檔和 tc-tbf 手冊。" msgid "" "Value of latency (ms) for calculating size of queue for a port with QoS. See " "tc-tbf manual for more information." msgstr "" "透過服務品質來計算埠的佇列大小時的延遲值(毫秒)。如需相關資訊,請參閱 tc-" "tbf 手冊。" msgid "" "When proxying metadata requests, Neutron signs the Instance-ID header with a " "shared secret to prevent spoofing. You may select any string for a secret, " "but it must match here and in the configuration used by the Nova Metadata " "Server. NOTE: Nova uses the same config key, but in [neutron] section." msgstr "" "對 meta 資料要求執行 Proxy 動作時,Neutron 會使用共用密碼來簽署 Instance-ID " "標頭,以防止盜用。您可以選取任何字串用作密碼,但該字串在此處以及在 Nova meta " "資料伺服器使用的配置中必須相符。附註:Nova 使用相同的配置金鑰,但卻在 " "[neutron] 區段中。" msgid "" "Where to store Neutron state files. This directory must be writable by the " "agent." msgstr "Neutron 狀態檔的儲存位置。此目錄必須可以由代理程式寫入。" msgid "" "With IPv6, the network used for the external gateway does not need to have " "an associated subnet, since the automatically assigned link-local address " "(LLA) can be used. However, an IPv6 gateway address is needed for use as the " "next-hop for the default route. If no IPv6 gateway address is configured " "here, (and only then) the neutron router will be configured to get its " "default route from router advertisements (RAs) from the upstream router; in " "which case the upstream router must also be configured to send these RAs. " "The ipv6_gateway, when configured, should be the LLA of the interface on the " "upstream router. If a next-hop using a global unique address (GUA) is " "desired, it needs to be done via a subnet allocated to the network and not " "through this parameter. " msgstr "" "如果使用 IPv6,則用於外部閘道的網路不需要具有相關聯的子網路,因為可以使用自動" "指派的鏈結本端位址 (LLA)。但是,IPv6 閘道位址需要用作預設路由的下一個中繼站。" "如果未在這裡配置 IPv6 閘道位址,(且僅當那時)則將會配置 Neutron 路由器以從上" "游路由器的路由器通告 (RA) 中取得其預設路由;在該情況下,也必須配置上游路由器" "以傳送這些 RA。ipv6_gateway(如果已配置)應為上游路由器介面的 LLA。如果需要使" "用廣域唯一位址 (GUA) 的下一個中繼站,則需要透過配置給網路的子網路來執行此配" "置,而不是透過此參數。" msgid "You must implement __call__" msgstr "必須實作 __call__" msgid "" "You must provide a config file for bridge - either --config-file or " "env[NEUTRON_TEST_CONFIG_FILE]" msgstr "必須為橋接器提供配置檔:--config-file,或env[NEUTRON_TEST_CONFIG_FILE]" msgid "You must provide a revision or relative delta" msgstr "必須提供修訂或相對差異" msgid "a subnetpool must be specified in the absence of a cidr" msgstr "如果未指定 CIDR,則必須指定子網路儲存區" msgid "add_ha_port cannot be called inside of a transaction." msgstr "無法在交易內呼叫 add_ha_port。" msgid "allocation_pools allowed only for specific subnet requests." msgstr "僅容許用於特定子網路要求的 allocation_pools。" msgid "allocation_pools are not in the subnet" msgstr "allocation_pools 不在子網路中" msgid "allocation_pools use the wrong ip version" msgstr "allocation_pools 使用了錯誤的 IP 版本" msgid "already a synthetic attribute" msgstr "已經是合成屬性" msgid "binding:profile value too large" msgstr "binding:profile 值太大" #, python-format msgid "cannot perform %(event)s due to %(reason)s" msgstr "無法執行 %(event)s,原因:%(reason)s" msgid "cidr and prefixlen must not be supplied together" msgstr "不得同時提供 cidr 和 prefixlen" msgid "dns_domain cannot be specified without a dns_name" msgstr "如果未指定 dns_name,則無法指定 dns_domain" msgid "dns_name cannot be specified without a dns_domain" msgstr "如果未指定 dns_domain,則無法指定 dns_name" msgid "fixed_ip_address cannot be specified without a port_id" msgstr "如果未指定 port_id,則無法指定 fixed_ip_address" #, python-format msgid "has device owner %s" msgstr "具有裝置擁有者 %s" msgid "in use" msgstr "使用中" #, python-format msgid "ip command failed on device %(dev_name)s: %(reason)s" msgstr "對裝置 %(dev_name)s 執行的 IP 指令失敗:%(reason)s" #, python-format msgid "ip link capability %(capability)s is not supported" msgstr "不支援 ip link 功能 %(capability)s" #, python-format msgid "ip link command is not supported: %(reason)s" msgstr "不支援 ip link 指令:%(reason)s" msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "如果未指定 cidr 和 subnetpool_id,則必須指定 ip_version" msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "當 ip_version 是 4 時,ipv6_address_mode 無效" msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "當 ip_version 是 4 時,ipv6_ra_mode 無效" #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " "'%(addr_mode)s' is not valid. If both attributes are set, they must be the " "same value" msgstr "" "如果在 ipv6_address_mode 設為 '%(addr_mode)s' 時將 ipv6_ra_mode 設" "為'%(ra_mode)s',則無效。如果兩個屬性同時設定,則它們的值必須相同" msgid "mac address update" msgstr "MAC 位址更新" msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必須提供 2 個確切引數 - cidr 和 MAC" msgid "network_type required" msgstr "需要 network_type" #, python-format msgid "network_type value '%s' not supported" msgstr "不支援 network_type 值 '%s'" msgid "new subnet" msgstr "新子網路" #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "平面提供者網路的 physical_network '%s' 不明" msgid "physical_network required for flat provider network" msgstr "平面提供者網路所需的 physical_network" #, python-format msgid "provider:physical_network specified for %s network" msgstr "為 %s 網路指定了 provider:physical_network" msgid "respawn_interval must be >= 0 if provided." msgstr "如果提供的話,則 respawn_interval 必須 >= 0。" #, python-format msgid "segmentation_id out of range (%(min)s through %(max)s)" msgstr "segmentation_id 超出範圍(%(min)s 到 %(max)s)" msgid "segmentation_id requires physical_network for VLAN provider network" msgstr "segmentation_id 需要 VLAN 提供者網路的 physical_network" msgid "shared attribute switching to synthetic" msgstr "共用屬性正在切換至合成屬性" #, python-format msgid "" "subnetpool %(subnetpool_id)s cannot be updated when associated with shared " "address scope %(address_scope_id)s" msgstr "" "子網路儲存區 %(subnetpool_id)s 在下列情況下無法更新:與共用位址範圍 " "%(address_scope_id)s 相關聯時" msgid "subnetpool_id and use_default_subnetpool cannot both be specified" msgstr "不能同時指定 subnetpool_id 和 use_default_subnetpool" msgid "the nexthop is not connected with router" msgstr "下一個中繼站未與路由器連接" msgid "the nexthop is used by router" msgstr "路由器已使用下一個中繼站" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/manager.py0000644000175000017500000002600400000000000020623 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict from neutron_lib.plugins import constants as lib_const from neutron_lib.plugins import directory from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import periodic_task from oslo_utils import excutils from osprofiler import profiler import six from neutron._i18n import _ from neutron.common import utils from neutron.plugins.common import constants LOG = logging.getLogger(__name__) CORE_PLUGINS_NAMESPACE = 'neutron.core_plugins' class ManagerMeta(profiler.TracedMeta, type(periodic_task.PeriodicTasks)): pass @six.add_metaclass(ManagerMeta) class Manager(periodic_task.PeriodicTasks): __trace_args__ = {"name": "rpc"} # Set RPC API version to 1.0 by default. target = oslo_messaging.Target(version='1.0') def __init__(self, host=None): if not host: host = cfg.CONF.host self.host = host conf = getattr(self, "conf", cfg.CONF) super(Manager, self).__init__(conf) def periodic_tasks(self, context, raise_on_error=False): self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def after_start(self): """Handler post initialization stuff. Child classes can override this method. """ pass def stop(self): """Handle stop. Child classes can override this method. """ pass def validate_pre_plugin_load(): """Checks if the configuration variables are valid. If the configuration is invalid then the method will return an error message. If all is OK then it will return None. """ if cfg.CONF.core_plugin is None: msg = _('Neutron core_plugin not configured!') return msg @six.add_metaclass(profiler.TracedMeta) class NeutronManager(object): """Neutron's Manager class. Neutron's Manager class is responsible for parsing a config file and instantiating the correct plugin that concretely implements neutron_plugin_base class. """ # TODO(armax): use of the singleton pattern for this class is vestigial, # and it is mainly relied on by the unit tests. It is safer to get rid # of it once the entire codebase (neutron + subprojects) has switched # entirely to using the plugins directory. _instance = None __trace_args__ = {"name": "rpc"} def __init__(self, options=None, config_file=None): # Store instances of already loaded plugins to avoid instantiate same # plugin more than once self._loaded_plugins = {} # If no options have been provided, create an empty dict if not options: options = {} msg = validate_pre_plugin_load() if msg: LOG.critical(msg) raise Exception(msg) # NOTE(jkoelker) Testing for the subclass with the __subclasshook__ # breaks tach monitoring. It has been removed # intentionally to allow v2 plugins to be monitored # for performance metrics. plugin_provider = cfg.CONF.core_plugin LOG.info("Loading core plugin: %s", plugin_provider) # NOTE(armax): keep hold of the actual plugin object plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE, plugin_provider) directory.add_plugin(lib_const.CORE, plugin) # load services from the core plugin first self._load_services_from_core_plugin(plugin) self._load_service_plugins() # Used by pecan WSGI self.resource_plugin_mappings = {} self.resource_controller_mappings = {} self.path_prefix_resource_mappings = defaultdict(list) @staticmethod def load_class_for_provider(namespace, plugin_provider): """Loads plugin using alias or class name :param namespace: namespace where alias is defined :param plugin_provider: plugin alias or class name :returns: plugin that is loaded :raises ImportError: if fails to load plugin """ try: return runtime.load_class_by_alias_or_classname(namespace, plugin_provider) except ImportError: with excutils.save_and_reraise_exception(): LOG.error("Plugin '%s' not found.", plugin_provider) def _get_plugin_class(self, namespace, plugin_provider): return self.load_class_for_provider(namespace, plugin_provider) def _get_plugin_instance(self, namespace, plugin_provider): plugin_class = self._get_plugin_class(namespace, plugin_provider) plugin_inst = self._loaded_plugins.get(plugin_class) if not plugin_inst: plugin_inst = plugin_class() self._loaded_plugins[plugin_class] = plugin_inst return plugin_inst def _load_services_from_core_plugin(self, plugin): """Puts core plugin in service_plugins for supported services.""" LOG.debug("Loading services supported by the core plugin") # supported service types are derived from supported extensions for ext_alias in getattr(plugin, "supported_extension_aliases", []): if ext_alias in constants.EXT_TO_SERVICE_MAPPING: service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias] directory.add_plugin(service_type, plugin) LOG.info("Service %s is supported by the core plugin", service_type) def _get_default_service_plugins(self): """Get default service plugins to be loaded.""" core_plugin = directory.get_plugin() if core_plugin.has_native_datastore(): return constants.DEFAULT_SERVICE_PLUGINS.keys() else: return [] def _load_service_plugins(self): """Loads service plugins. Starts from the core plugin and checks if it supports advanced services then loads classes provided in configuration. """ plugin_providers = cfg.CONF.service_plugins plugin_providers.extend(self._get_default_service_plugins()) LOG.debug("Loading service plugins: %s", plugin_providers) for provider in plugin_providers: if provider == '': continue LOG.info("Loading Plugin: %s", provider) plugin_class = self._get_plugin_class( 'neutron.service_plugins', provider) required_plugins = getattr( plugin_class, "required_service_plugins", []) for req_plugin in required_plugins: LOG.info("Loading service plugin %s, it is required by %s", req_plugin, provider) self._create_and_add_service_plugin(req_plugin) # NOTE(liuyulong): adding one plugin multiple times does not have # bad effect for it. Since all the plugin has its own specific # unique name. self._create_and_add_service_plugin(provider) def _create_and_add_service_plugin(self, provider): plugin_inst = self._get_plugin_instance('neutron.service_plugins', provider) plugin_type = plugin_inst.get_plugin_type() directory.add_plugin(plugin_type, plugin_inst) # search for possible agent notifiers declared in service plugin # (needed by agent management extension) plugin = directory.get_plugin() if (hasattr(plugin, 'agent_notifiers') and hasattr(plugin_inst, 'agent_notifiers')): plugin.agent_notifiers.update(plugin_inst.agent_notifiers) # disable incompatible extensions in core plugin if any utils.disable_extension_by_service_plugin(plugin, plugin_inst) LOG.debug("Successfully loaded %(type)s plugin. " "Description: %(desc)s", {"type": plugin_type, "desc": plugin_inst.get_plugin_description()}) @classmethod @runtime.synchronized("manager") def _create_instance(cls): if not cls.has_instance(): cls._instance = cls() @classmethod def has_instance(cls): return cls._instance is not None @classmethod def clear_instance(cls): cls._instance = None @classmethod def get_instance(cls): # double checked locking if not cls.has_instance(): cls._create_instance() return cls._instance @classmethod def set_plugin_for_resource(cls, resource, plugin): cls.get_instance().resource_plugin_mappings[resource] = plugin @classmethod def get_plugin_for_resource(cls, resource): return cls.get_instance().resource_plugin_mappings.get(resource) @classmethod def set_controller_for_resource(cls, resource, controller): cls.get_instance().resource_controller_mappings[resource] = controller @classmethod def get_controller_for_resource(cls, resource): resource = resource.replace('_', '-') res_ctrl_mappings = cls.get_instance().resource_controller_mappings # If no controller is found for resource, try replacing dashes with # underscores return res_ctrl_mappings.get( resource, res_ctrl_mappings.get(resource.replace('-', '_'))) # TODO(blogan): This isn't used by anything else other than tests and # probably should be removed @classmethod def get_service_plugin_by_path_prefix(cls, path_prefix): service_plugins = directory.get_unique_plugins() for service_plugin in service_plugins: plugin_path_prefix = getattr(service_plugin, 'path_prefix', None) if plugin_path_prefix and plugin_path_prefix == path_prefix: return service_plugin @classmethod def add_resource_for_path_prefix(cls, resource, path_prefix): resources = cls.get_instance().path_prefix_resource_mappings[ path_prefix].append(resource) return resources @classmethod def get_resources_for_path_prefix(cls, path_prefix): return cls.get_instance().path_prefix_resource_mappings[path_prefix] def init(): """Call to load the plugins (core+services) machinery.""" if not directory.is_loaded(): NeutronManager.get_instance() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/neutron_plugin_base_v2.py0000644000175000017500000004124700000000000023670 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ v2 Neutron Plug-in API specification. :class:`NeutronPluginBaseV2` provides the definition of minimum set of methods that needs to be implemented by a v2 Neutron Plug-in. """ import abc from neutron_lib.services import base as base_services import six @six.add_metaclass(abc.ABCMeta) class NeutronPluginBaseV2(base_services.WorkerBase): @abc.abstractmethod def create_subnet(self, context, subnet): """Create a subnet. Create a subnet, which represents a range of IP addresses that can be allocated to devices :param context: neutron api request context :param subnet: dictionary describing the subnet, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_subnet(self, context, id, subnet): """Update values of a subnet. :param context: neutron api request context :param id: UUID representing the subnet to update. :param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_subnet(self, context, id, fields=None): """Retrieve a subnet. :param context: neutron api request context :param id: UUID representing the subnet to fetch. :param fields: a list of strings that are valid keys in a subnet dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a subnet dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_subnets_count(self, context, filters=None): """Return the number of subnets. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_subnet(self, context, id): """Delete a subnet. :param context: neutron api request context :param id: UUID representing the subnet to delete. """ pass def create_subnetpool(self, context, subnetpool): """Create a subnet pool. :param context: neutron api request context :param subnetpool: Dictionary representing the subnetpool to create. """ raise NotImplementedError() def update_subnetpool(self, context, id, subnetpool): """Update a subnet pool. :param context: neutron api request context :param subnetpool: Dictionary representing the subnetpool attributes to update. """ raise NotImplementedError() def get_subnetpool(self, context, id, fields=None): """Show a subnet pool. :param context: neutron api request context :param id: The UUID of the subnetpool to show. """ raise NotImplementedError() def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve list of subnet pools.""" raise NotImplementedError() def delete_subnetpool(self, context, id): """Delete a subnet pool. :param context: neutron api request context :param id: The UUID of the subnet pool to delete. """ raise NotImplementedError() @abc.abstractmethod def create_network(self, context, network): """Create a network. Create a network, which represents an L2 network segment which can have a set of subnets and ports associated with it. :param context: neutron api request context :param network: dictionary describing the network, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_network(self, context, id, network): """Update values of a network. :param context: neutron api request context :param id: UUID representing the network to update. :param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_network(self, context, id, fields=None): """Retrieve a network. :param context: neutron api request context :param id: UUID representing the network to fetch. :param fields: a list of strings that are valid keys in a network dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a network dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_networks_count(self, context, filters=None): """Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_network(self, context, id): """Delete a network. :param context: neutron api request context :param id: UUID representing the network to delete. """ pass @abc.abstractmethod def create_port(self, context, port): """Create a port. Create a port, which is a connection point of a device (e.g., a VM NIC) to attach to a L2 neutron network. :param context: neutron api request context :param port: dictionary describing the port, with keys as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. All keys will be populated. """ pass @abc.abstractmethod def update_port(self, context, id, port): """Update values of a port. :param context: neutron api request context :param id: UUID representing the port to update. :param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. """ pass @abc.abstractmethod def get_port(self, context, id, fields=None): """Retrieve a port. :param context: neutron api request context :param id: UUID representing the port to fetch. :param fields: a list of strings that are valid keys in a port dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass @abc.abstractmethod def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a port dictionary as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Only these fields will be returned. """ pass def get_ports_count(self, context, filters=None): """Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a network as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object in :file:`neutron/api/v2/attributes.py`. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() @abc.abstractmethod def delete_port(self, context, id): """Delete a port. :param context: neutron api request context :param id: UUID representing the port to delete. """ pass def start_rpc_listeners(self): """Start the RPC listeners. Most plugins start RPC listeners implicitly on initialization. In order to support multiple process RPC, the plugin needs to expose control over when this is started. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() def start_rpc_state_reports_listener(self): """Start the RPC listeners consuming state reports queue. This optional method creates rpc consumer for REPORTS queue only. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ raise NotImplementedError() def rpc_workers_supported(self): """Return whether the plugin supports multiple RPC workers. A plugin that supports multiple RPC workers should override the start_rpc_listeners method to ensure that this method returns True and that start_rpc_listeners is called at the appropriate time. Alternately, a plugin can override this method to customize detection of support for multiple rpc workers .. note:: this method is optional, as it was not part of the originally defined plugin API. """ return (self.__class__.start_rpc_listeners != NeutronPluginBaseV2.start_rpc_listeners) def rpc_state_report_workers_supported(self): """Return whether the plugin supports state report RPC workers. .. note:: this method is optional, as it was not part of the originally defined plugin API. """ return (self.__class__.start_rpc_state_reports_listener != NeutronPluginBaseV2.start_rpc_state_reports_listener) def has_native_datastore(self): """Return True if the plugin uses Neutron's native datastore. .. note:: plugins like ML2 should override this method and return True. """ return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3190448 neutron-16.0.0.0b2.dev214/neutron/notifiers/0000755000175000017500000000000000000000000020637 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/notifiers/__init__.py0000644000175000017500000000000000000000000022736 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/notifiers/batch_notifier.py0000644000175000017500000000525500000000000024200 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import eventlet from neutron.common import utils class BatchNotifier(object): def __init__(self, batch_interval, callback): self._pending_events = eventlet.Queue() self.callback = callback self.batch_interval = batch_interval self._mutex = threading.Lock() def queue_event(self, event): """Called to queue sending an event with the next batch of events. Sending events individually, as they occur, has been problematic as it can result in a flood of sends. Previously, there was a loopingcall thread that would send batched events on a periodic interval. However, maintaining a persistent thread in the loopingcall was also problematic. This replaces the loopingcall with a mechanism that creates a short-lived thread on demand whenever an event is queued. That thread will check if the lock is released, send all queued events and then sleep for 'batch_interval' seconds. If at the end of this sleep time, other threads have added new events to the event queue, the same thread will process them. At the same time, other threads will be able to add new events to the queue and will spawn new "synced_send" threads to process them. But if the mutex is locked, the spawned thread will end immediately. :param event: the event that occurred. """ if not event: return self._pending_events.put(event) def synced_send(): if not self._mutex.locked(): with self._mutex: while not self._pending_events.empty(): self._notify() # sleeping after send while holding the lock allows # subsequent events to batch up eventlet.sleep(self.batch_interval) utils.spawn_n(synced_send) def _notify(self): batched_events = [] while not self._pending_events.empty(): batched_events.append(self._pending_events.get()) self.callback(batched_events) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/notifiers/ironic.py0000644000175000017500000001427200000000000022502 0ustar00coreycorey00000000000000# Copyright (c) 2019 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings as portbindings_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from openstack import connection from openstack import exceptions as os_exc from oslo_config import cfg from oslo_log import log as logging from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) BAREMETAL_EVENT_TYPE = 'network' IRONIC_API_VERSION = 'latest' IRONIC_SESSION = None IRONIC_CONF_SECTION = 'ironic' IRONIC_CLIENT_VERSION = 1 @registry.has_registry_receivers class Notifier(object): _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): self.batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self.send_events) self.irclient = self._get_ironic_client() def _get_session(self, group): auth = ks_loading.load_auth_from_conf_options(cfg.CONF, group) session = ks_loading.load_session_from_conf_options( cfg.CONF, group, auth=auth) return session def _get_ironic_client(self): """Get Ironic client instance.""" global IRONIC_SESSION if not IRONIC_SESSION: IRONIC_SESSION = self._get_session(IRONIC_CONF_SECTION) return connection.Connection( session=IRONIC_SESSION, oslo_conf=cfg.CONF).baremetal def send_events(self, batched_events): try: response = self.irclient.post('/events', json={'events': batched_events}, microversion='1.54') os_exc.raise_from_response(response) except Exception as e: LOG.exception('Error encountered posting the event to ' 'ironic. {error}'.format(error=e)) @registry.receives(resources.PORT, [events.AFTER_UPDATE]) def process_port_update_event(self, resource, event, trigger, original_port=None, port=None, **kwargs): # We only want to notify about baremetal ports. if not (port[portbindings_def.VNIC_TYPE] == portbindings_def.VNIC_BAREMETAL): # TODO(TheJulia): Add the smartnic flag at some point... return original_port_status = original_port['status'] current_port_status = port['status'] port_event = None if (original_port_status == n_const.PORT_STATUS_ACTIVE and current_port_status in [n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_ERROR]): port_event = 'unbind_port' elif (original_port_status == n_const.PORT_STATUS_DOWN and current_port_status in [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_ERROR]): port_event = 'bind_port' LOG.debug('Queuing event for {event_type} for port {port} ' 'for status {status}.'.format(event_type=port_event, port=port['id'], status=current_port_status)) if port_event: notify_event = { 'event': '.'.join([BAREMETAL_EVENT_TYPE, port_event]), 'port_id': port['id'], 'mac_address': port[port_def.PORT_MAC_ADDRESS], 'status': current_port_status, 'device_id': port['device_id'], 'binding:host_id': port[portbindings_def.HOST_ID], 'binding:vnic_type': port[portbindings_def.VNIC_TYPE] } # Filter keys with empty string as value. In case a type UUID field # or similar is not set the API won't accept empty string. self.batch_notifier.queue_event( {k: v for k, v in notify_event.items() if v != ''}) @registry.receives(resources.PORT, [events.AFTER_DELETE]) def process_port_delete_event(self, resource, event, trigger, original_port=None, port=None, **kwargs): # We only want to notify about baremetal ports. if not (port[portbindings_def.VNIC_TYPE] == portbindings_def.VNIC_BAREMETAL): # TODO(TheJulia): Add the smartnic flag at some point... return port_event = 'delete_port' LOG.debug('Queuing event for {event_type} for port {port} ' 'for status {status}.'.format(event_type=port_event, port=port['id'], status='DELETED')) notify_event = { 'event': '.'.join([BAREMETAL_EVENT_TYPE, port_event]), 'port_id': port['id'], 'mac_address': port[port_def.PORT_MAC_ADDRESS], 'status': 'DELETED', 'device_id': port['device_id'], 'binding:host_id': port[portbindings_def.HOST_ID], 'binding:vnic_type': port[portbindings_def.VNIC_TYPE] } # Filter keys with empty string as value. In case a type UUID field # or similar is not set the API won't accept empty string. self.batch_notifier.queue_event( {k: v for k, v in notify_event.items() if v != ''}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/notifiers/nova.py0000644000175000017500000002544000000000000022161 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as exc from neutron_lib.plugins import directory from novaclient import api_versions from novaclient import client as nova_client from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_context import context as common_context from oslo_log import log as logging from oslo_utils import uuidutils from sqlalchemy.orm import attributes as sql_attr from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed'} NOVA_API_VERSION = "2.1" @registry.has_registry_receivers class Notifier(object): _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova') self.session = ks_loading.load_session_from_conf_options( cfg.CONF, 'nova', auth=auth) self.extensions = [ ext for ext in nova_client.discover_extensions(NOVA_API_VERSION) if ext.name == "server_external_events"] self.batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self.send_events) def _get_nova_client(self): global_id = common_context.generate_request_id() return nova_client.Client( api_versions.APIVersion(NOVA_API_VERSION), session=self.session, region_name=cfg.CONF.nova.region_name, endpoint_type=cfg.CONF.nova.endpoint_type, extensions=self.extensions, global_request_id=global_id) def _is_compute_port(self, port): try: if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) and port['device_owner'].startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX)): return True except (KeyError, AttributeError): pass return False def _get_network_changed_event(self, port): return {'name': 'network-changed', 'server_uuid': port['device_id'], 'tag': port['id']} def _get_port_delete_event(self, port): return {'server_uuid': port['device_id'], 'name': VIF_DELETED, 'tag': port['id']} @registry.receives(resources.PORT, [events.BEFORE_RESPONSE]) @registry.receives(resources.FLOATING_IP, [events.BEFORE_RESPONSE]) def _send_nova_notification(self, resource, event, trigger, payload=None): self.send_network_change(payload.action, payload.states[0], payload.latest_state) def send_network_change(self, action, original_obj, returned_obj): """Called when a network change is made that nova cares about. :param action: the event that occurred. :param original_obj: the previous value of resource before action. :param returned_obj: the body returned to client as result of action. """ if not cfg.CONF.notify_nova_on_port_data_changes: return # When neutron re-assigns floating ip from an original instance # port to a new instance port without disassociate it first, an # event should be sent for original instance, that will make nova # know original instance's info, and update database for it. if (action == 'update_floatingip' and returned_obj['floatingip'].get('port_id') and original_obj.get('port_id')): disassociate_returned_obj = {'floatingip': {'port_id': None}} event = self.create_port_changed_event(action, original_obj, disassociate_returned_obj) self.batch_notifier.queue_event(event) event = self.create_port_changed_event(action, original_obj, returned_obj) self.batch_notifier.queue_event(event) def create_port_changed_event(self, action, original_obj, returned_obj): port = None if action in ['update_port', 'delete_port']: port = returned_obj['port'] elif action in ['update_floatingip', 'create_floatingip', 'delete_floatingip']: # NOTE(arosen) if we are associating a floatingip the # port_id is in the returned_obj. Otherwise on disassociate # it's in the original_object port_id = (returned_obj['floatingip'].get('port_id') or original_obj.get('port_id')) if port_id is None: return ctx = context.get_admin_context() try: port = directory.get_plugin().get_port(ctx, port_id) except exc.PortNotFound: LOG.debug("Port %s was deleted, no need to send any " "notification", port_id) return if port and self._is_compute_port(port): if action == 'delete_port': return self._get_port_delete_event(port) else: return self._get_network_changed_event(port) def _can_notify(self, port): if not port.id: LOG.warning("Port ID not set! Nova will not be notified of " "port status change.") return False # If there is no device_id set there is nothing we can do here. if not port.device_id: LOG.debug("device_id is not set on port %s yet.", port.id) return False # We only want to notify about nova ports. if not self._is_compute_port(port): return False return True def record_port_status_changed(self, port, current_port_status, previous_port_status, initiator): """Determine if nova needs to be notified due to port status change. """ # clear out previous _notify_event port._notify_event = None if not self._can_notify(port): return # We notify nova when a vif is unplugged which only occurs when # the status goes from ACTIVE to DOWN. if (previous_port_status == constants.PORT_STATUS_ACTIVE and current_port_status == constants.PORT_STATUS_DOWN): event_name = VIF_UNPLUGGED # We only notify nova when a vif is plugged which only occurs # when the status goes from: # NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR. elif (previous_port_status in [sql_attr.NO_VALUE, constants.PORT_STATUS_DOWN, constants.PORT_STATUS_BUILD] and current_port_status in [constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_ERROR]): event_name = VIF_PLUGGED # All the remaining state transitions are of no interest to nova else: LOG.debug("Ignoring state change previous_port_status: " "%(pre_status)s current_port_status: %(cur_status)s" " port_id %(id)s", {'pre_status': previous_port_status, 'cur_status': current_port_status, 'id': port.id}) return port._notify_event = ( {'server_uuid': port.device_id, 'name': event_name, 'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status), 'tag': port.id}) def send_port_status(self, mapper, connection, port): event = getattr(port, "_notify_event", None) self.batch_notifier.queue_event(event) port._notify_event = None def notify_port_active_direct(self, port): """Notify nova about active port Used when port was wired on the host other than port's current host according to port binding. This happens during live migration. In this case ml2 plugin skips port status update but we still we need to notify nova. """ if not self._can_notify(port): return port._notify_event = ( {'server_uuid': port.device_id, 'name': VIF_PLUGGED, 'status': 'completed', 'tag': port.id}) self.send_port_status(None, None, port) def send_events(self, batched_events): LOG.debug("Sending events: %s", batched_events) novaclient = self._get_nova_client() try: response = novaclient.server_external_events.create( batched_events) except nova_exceptions.NotFound: LOG.debug("Nova returned NotFound for event: %s", batched_events) except Exception: LOG.exception("Failed to notify nova on events: %s", batched_events) else: if not isinstance(response, list): LOG.error("Error response returned from nova: %s", response) return response_error = False for event in response: try: code = event['code'] except KeyError: response_error = True continue if code != 200: LOG.warning("Nova event: %s returned with failed " "status", event) else: LOG.info("Nova event response: %s", event) if response_error: LOG.error("Error response returned from nova: %s", response) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3230448 neutron-16.0.0.0b2.dev214/neutron/objects/0000755000175000017500000000000000000000000020266 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/README.rst0000644000175000017500000000115200000000000021754 0ustar00coreycorey00000000000000=============== Neutron Objects =============== Directory ========= This directory is designed to contain all modules which have objects definitions shipped with core Neutron. The files and directories located inside of this directory should follow the guidelines below. Structure --------- The Neutron objects tree should have the following structure: * The expected directory structure is flat, except for the ML2 plugins. All ML2 plugin objects should fall under the plugins subdirectory (i.e. plugins/ml2/gre_allocation). * Module names should use singular forms for nouns (network.py, not networks.py). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/__init__.py0000644000175000017500000000145300000000000022402 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys def register_objects(): # local import to avoid circular import failure from neutron.common import utils dirn = os.path.dirname(sys.modules[__name__].__file__) utils.import_modules_recursively(dirn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/address_scope.py0000644000175000017500000000504300000000000023460 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import address_scope as models from neutron.db import models_v2 from neutron.db import rbac_db_models from neutron.objects import base from neutron.objects import rbac from neutron.objects import rbac_db from neutron.objects import subnetpool @base.NeutronObjectRegistry.register class AddressScopeRBAC(rbac.RBACBaseObject): # Version 1.0: Initial version VERSION = '1.0' db_model = rbac_db_models.AddressScopeRBAC @base.NeutronObjectRegistry.register class AddressScope(rbac_db.NeutronRbacObject): # Version 1.0: Initial version # Version 1.1: Add RBAC support VERSION = '1.1' # required by RbacNeutronMetaclass rbac_db_cls = AddressScopeRBAC db_model = models.AddressScope fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(), 'shared': obj_fields.BooleanField(), 'ip_version': common_types.IPVersionEnumField(), } @classmethod def get_network_address_scope(cls, context, network_id, ip_version): query = context.session.query(cls.db_model) query = query.join( models_v2.SubnetPool, models_v2.SubnetPool.address_scope_id == cls.db_model.id) query = query.filter( cls.db_model.ip_version == ip_version, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id, models_v2.Subnet.network_id == network_id) scope_model_obj = query.one_or_none() if scope_model_obj: return cls._load_object(context, scope_model_obj) return None @classmethod def get_bound_tenant_ids(cls, context, obj_id): snp_objs = subnetpool.SubnetPool.get_objects( context, address_scope_id=obj_id ) return {snp.project_id for snp in snp_objs} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/agent.py0000644000175000017500000001710500000000000021742 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from neutron_lib.objects import common_types from neutron_lib.objects import utils as obj_utils from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields from sqlalchemy import func from neutron.agent.common import utils from neutron.db.models import agent as agent_model from neutron.db.models import l3agent as rb_model from neutron.db.models import l3ha as l3ha_model from neutron.db import models_v2 from neutron.objects import base @base.NeutronObjectRegistry.register class Agent(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Added resources_synced VERSION = '1.1' db_model = agent_model.Agent fields = { 'id': common_types.UUIDField(), 'agent_type': obj_fields.StringField(), 'binary': obj_fields.StringField(), 'topic': obj_fields.StringField(), 'host': obj_fields.StringField(), 'availability_zone': obj_fields.StringField(nullable=True), 'admin_state_up': obj_fields.BooleanField(default=True), 'started_at': obj_fields.DateTimeField(tzinfo_aware=False), 'created_at': obj_fields.DateTimeField(tzinfo_aware=False), 'heartbeat_timestamp': obj_fields.DateTimeField(tzinfo_aware=False), 'description': obj_fields.StringField(nullable=True), 'configurations': common_types.DictOfMiscValuesField(), 'resource_versions': common_types.DictOfMiscValuesField(nullable=True), 'load': obj_fields.IntegerField(default=0), 'resources_synced': obj_fields.BooleanField(nullable=True), } @classmethod def modify_fields_to_db(cls, fields): result = super(Agent, cls).modify_fields_to_db(fields) if ('configurations' in result and not isinstance(result['configurations'], obj_utils.StringMatchingFilterObj)): # dump configuration into string, set '' if empty '{}' result['configurations'] = ( cls.filter_to_json_str(result['configurations'], default='')) if ('resource_versions' in result and not isinstance(result['resource_versions'], obj_utils.StringMatchingFilterObj)): # dump resource version into string, set None if empty '{}' or None result['resource_versions'] = ( cls.filter_to_json_str(result['resource_versions'])) return result @classmethod def modify_fields_from_db(cls, db_obj): fields = super(Agent, cls).modify_fields_from_db(db_obj) if 'configurations' in fields: # load string from DB, set {} if configuration is '' fields['configurations'] = ( cls.load_json_from_str(fields['configurations'], default={})) if 'resource_versions' in fields: # load string from DB, set None if resource_version is None or '' fields['resource_versions'] = ( cls.load_json_from_str(fields['resource_versions'])) return fields def obj_make_compatible(self, primitive, target_version): super(Agent, self).obj_make_compatible(primitive, target_version) _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('resources_synced', None) @property def is_active(self): return not utils.is_agent_down(self.heartbeat_timestamp) # TODO(ihrachys) reuse query builder from # get_l3_agents_ordered_by_num_routers @classmethod def get_l3_agent_with_min_routers(cls, context, agent_ids): """Return l3 agent with the least number of routers.""" with cls.db_context_reader(context): query = context.session.query( agent_model.Agent, func.count( rb_model.RouterL3AgentBinding.router_id ).label('count')).outerjoin( rb_model.RouterL3AgentBinding).group_by( agent_model.Agent, rb_model.RouterL3AgentBinding .l3_agent_id).order_by('count') res = query.filter(agent_model.Agent.id.in_(agent_ids)).first() agent_obj = cls._load_object(context, res[0]) return agent_obj @classmethod def get_l3_agents_ordered_by_num_routers(cls, context, agent_ids): with cls.db_context_reader(context): query = (context.session.query(agent_model.Agent, func.count( rb_model.RouterL3AgentBinding.router_id) .label('count')). outerjoin(rb_model.RouterL3AgentBinding). group_by(agent_model.Agent). filter(agent_model.Agent.id.in_(agent_ids)). order_by('count')) agents = [cls._load_object(context, record[0]) for record in query] return agents @classmethod def get_ha_agents(cls, context, network_id=None, router_id=None): if not (network_id or router_id): return [] query = context.session.query(agent_model.Agent.host) query = query.join(l3ha_model.L3HARouterAgentPortBinding, l3ha_model.L3HARouterAgentPortBinding.l3_agent_id == agent_model.Agent.id) if router_id: query = query.filter( l3ha_model.L3HARouterAgentPortBinding.router_id == router_id).all() elif network_id: query = query.join(models_v2.Port, models_v2.Port.device_id == l3ha_model.L3HARouterAgentPortBinding.router_id) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner.in_( (const.DEVICE_OWNER_HA_REPLICATED_INT, const.DEVICE_OWNER_ROUTER_SNAT))).all() # L3HARouterAgentPortBinding will have l3 agent ids of hosting agents. # But we need l2 agent(for tunneling ip) while creating FDB entries. hosts = [host[0] for host in query] agents = cls.get_objects(context, host=hosts) return agents @classmethod def _get_agents_by_availability_zones_and_agent_type( cls, context, agent_type, availability_zones): query = context.session.query( agent_model.Agent).filter_by( agent_type=agent_type).group_by( agent_model.Agent.availability_zone) query = query.filter( agent_model.Agent.availability_zone.in_(availability_zones)).all() agents = [cls._load_object(context, record) for record in query] return agents @classmethod def get_objects_by_agent_mode(cls, context, agent_mode=None, **kwargs): mode_filter = obj_utils.StringContains(agent_mode) return cls.get_objects(context, configurations=mode_filter, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/auto_allocate.py0000644000175000017500000000237500000000000023463 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.objects import base from neutron.services.auto_allocate import models @base.NeutronObjectRegistry.register class AutoAllocatedTopology(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.AutoAllocatedTopology primary_keys = ['project_id'] fields = { 'project_id': obj_fields.StringField(), 'network_id': common_types.UUIDField(), 'router_id': common_types.UUIDField(nullable=True), } fields_no_update = ['network_id', 'router_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/base.py0000644000175000017500000011107200000000000021554 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import copy import functools import itertools from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.objects import exceptions as o_exc from neutron_lib.objects.extensions import standardattributes from oslo_db import exception as obj_exc from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import versionutils from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import exception as obj_exception from oslo_versionedobjects import fields as obj_fields import six from sqlalchemy import orm from neutron._i18n import _ from neutron.db import standard_attr from neutron.objects.db import api as obj_db_api LOG = logging.getLogger(__name__) _NO_DB_MODEL = object() def get_object_class_by_model(model): for obj_class in NeutronObjectRegistry.obj_classes().values(): obj_class = obj_class[0] if getattr(obj_class, 'db_model', _NO_DB_MODEL) is model: return obj_class raise o_exc.NeutronDbObjectNotFoundByModel(model=model.__name__) def register_filter_hook_on_model(model, filter_name): obj_class = get_object_class_by_model(model) obj_class.add_extra_filter_name(filter_name) class LazyQueryIterator(six.Iterator): def __init__(self, obj_class, lazy_query): self.obj_class = obj_class self.context = None self.query = lazy_query def __iter__(self): self.results = self.query.all() self.i = 0 return self def __next__(self): if self.i >= len(self.results): raise StopIteration() item = self.obj_class._load_object(self.context, self.results[self.i]) self.i += 1 return item class Pager(object): '''Pager class This class represents a pager object. It is consumed by get_objects to specify sorting and pagination criteria. ''' def __init__(self, sorts=None, limit=None, page_reverse=None, marker=None): '''Initialize :param sorts: A list of (key, direction) tuples. direction: True == ASC, False == DESC :param limit: maximum number of items to return :param page_reverse: True if sort direction is reversed. :param marker: the last item of the previous page; when used, returns next results after the marker resource. ''' self.sorts = sorts self.limit = limit self.page_reverse = page_reverse self.marker = marker def to_kwargs(self, context, obj_cls): res = { attr: getattr(self, attr) for attr in ('sorts', 'limit', 'page_reverse') if getattr(self, attr) is not None } if self.marker and self.limit: res['marker_obj'] = obj_db_api.get_object( obj_cls, context, id=self.marker) return res def __str__(self): return str(self.__dict__) def __eq__(self, other): return self.__dict__ == other.__dict__ class NeutronObjectRegistry(obj_base.VersionedObjectRegistry): _registry = None def __new__(cls, *args, **kwargs): # TODO(slaweq): this should be moved back to oslo.versionedobjects # lib as soon as bug https://bugs.launchpad.net/neutron/+bug/1731948 # will be fixed and OVO's registry class will support defining custom # registries for objects. # NOTE(slaweq): it is overridden method # oslo_versionedobjects.base.VersionedObjectRegistry.__new__ # We need to overwrite it to use separate registry for Neutron's # objects. # This is necessary to avoid clash in naming objects between Neutron # and e.g. os-vif (for example Route or Subnet objects are used in # both) if not NeutronObjectRegistry._registry: NeutronObjectRegistry._registry = object.__new__( NeutronObjectRegistry, *args, **kwargs) NeutronObjectRegistry._registry._obj_classes = \ collections.defaultdict(list) self = object.__new__(cls, *args, **kwargs) self._obj_classes = ( NeutronObjectRegistry._registry._obj_classes) return self @six.add_metaclass(abc.ABCMeta) class NeutronObject(obj_base.VersionedObject, obj_base.VersionedObjectDictCompat, obj_base.ComparableVersionedObject): synthetic_fields = [] extra_filter_names = set() # To use lazy queries for child objects, you must set the ORM # relationship in the db model to 'dynamic'. By default, all # children are eager loaded. lazy_fields = set() def __init__(self, context=None, **kwargs): super(NeutronObject, self).__init__(context, **kwargs) self._load_synthetic_fields = True self.obj_set_defaults() def _synthetic_fields_items(self): for field in self.synthetic_fields: if field in self: yield field, getattr(self, field) def to_dict(self): dict_ = {} # not using obj_to_primitive because it skips extra fields for name, value in self.items(): # we have to check if item is in fields because obj_extra_fields # is included in self.items() if name in self.fields and name not in self.synthetic_fields: value = self.fields[name].to_primitive(self, name, value) if name == 'tenant_id': if ('project_id' in self.fields and not self.obj_attr_is_set('project_id')): continue dict_[name] = value for field_name, value in self._synthetic_fields_items(): field = self.fields[field_name] if isinstance(field, obj_fields.ListOfObjectsField): dict_[field_name] = [obj.to_dict() for obj in value] elif isinstance(field, obj_fields.ObjectField): dict_[field_name] = ( dict_[field_name].to_dict() if value else None) else: dict_[field_name] = field.to_primitive(self, field_name, value) return dict_ @classmethod def is_synthetic(cls, field): return field in cls.synthetic_fields @classmethod def is_object_field(cls, field): return (isinstance(cls.fields[field], obj_fields.ListOfObjectsField) or isinstance(cls.fields[field], obj_fields.ObjectField)) @classmethod def obj_class_from_name(cls, objname, objver): """Returns a class from the registry based on a name and version.""" # NOTE(slaweq): it is override method # oslo_versionedobjects.base.VersionedObject.obj_class_from_name # We need to override it to use Neutron's objects registry class # (NeutronObjectRegistry) instead of original VersionedObjectRegistry # class from oslo_versionedobjects # This is necessary to avoid clash in naming objects between Neutron # and e.g. os-vif (for example Route or Subnet objects are used in # both) if objname not in NeutronObjectRegistry.obj_classes(): LOG.error('Unable to instantiate unregistered object type ' '%(objtype)s', dict(objtype=objname)) raise obj_exception.UnsupportedObjectError(objtype=objname) # NOTE(comstud): If there's not an exact match, return the highest # compatible version. The objects stored in the class are sorted # such that highest version is first, so only set compatible_match # once below. compatible_match = None for objclass in NeutronObjectRegistry.obj_classes()[objname]: if objclass.VERSION == objver: return objclass if (not compatible_match and versionutils.is_compatible(objver, objclass.VERSION)): compatible_match = objclass if compatible_match: return compatible_match # As mentioned above, latest version is always first in the list. latest_ver = ( NeutronObjectRegistry.obj_classes()[objname][0].VERSION) raise obj_exception.IncompatibleObjectVersion(objname=objname, objver=objver, supported=latest_ver) @classmethod def clean_obj_from_primitive(cls, primitive, context=None): obj = cls.obj_from_primitive(primitive, context) obj.obj_reset_changes() return obj @classmethod def get_object(cls, context, fields=None, **kwargs): raise NotImplementedError() @classmethod def add_extra_filter_name(cls, filter_name): """Register filter passed from API layer. :param filter_name: Name of the filter passed in the URL Filter names are validated in validate_filters() method which by default allows filters based on fields' names. Extensions can create new filter names. Such names must be registered to particular object with this method. """ cls.extra_filter_names.add(filter_name) @classmethod def validate_filters(cls, **kwargs): bad_filters = {key for key in kwargs if key not in cls.fields or cls.is_synthetic(key)} bad_filters.difference_update(cls.extra_filter_names) if bad_filters: bad_filters = ', '.join(bad_filters) msg = _("'%s' is not supported for filtering") % bad_filters raise n_exc.InvalidInput(error_message=msg) @classmethod @abc.abstractmethod def get_objects(cls, context, _pager=None, validate_filters=True, fields=None, **kwargs): raise NotImplementedError() @classmethod def get_values(cls, context, field, validate_filters=True, **kwargs): raise NotImplementedError() @classmethod def _update_objects(cls, objects, values): if not isinstance(objects, collections.Sequence): objects = (objects, ) for obj in objects: for k, v in values.items(): setattr(obj, k, v) obj.update() return len(objects) @classmethod def update_object(cls, context, values, validate_filters=True, **kwargs): obj = cls.get_object( context, validate_filters=validate_filters, **kwargs) if obj: cls._update_objects(obj, values) return obj @classmethod def update_objects(cls, context, values, validate_filters=True, **kwargs): objs = cls.get_objects( context, validate_filters=validate_filters, **kwargs) return cls._update_objects(objs, values) @classmethod def delete_objects(cls, context, validate_filters=True, **kwargs): objs = cls.get_objects( context, validate_filters=validate_filters, **kwargs) for obj in objs: obj.delete() return len(objs) def create(self): raise NotImplementedError() def update(self): raise NotImplementedError() def delete(self): raise NotImplementedError() @classmethod def count(cls, context, validate_filters=True, **kwargs): '''Count the number of objects matching filtering criteria.''' return len( cls.get_objects( context, validate_filters=validate_filters, **kwargs)) def _guarantee_rw_subtransaction(func): @functools.wraps(func) def decorator(self, *args, **kwargs): with self.db_context_writer(self.obj_context): return func(self, *args, **kwargs) return decorator class DeclarativeObject(abc.ABCMeta): def __init__(cls, name, bases, dct): super(DeclarativeObject, cls).__init__(name, bases, dct) if 'project_id' in cls.fields: obj_extra_fields_set = set(cls.obj_extra_fields) obj_extra_fields_set.add('tenant_id') cls.obj_extra_fields = list(obj_extra_fields_set) setattr(cls, 'tenant_id', property(lambda x: x.get('project_id', None))) fields_no_update_set = set(cls.fields_no_update) for base in itertools.chain([cls], bases): keys_set = set() if hasattr(base, 'primary_keys'): keys_set.update(base.primary_keys) if hasattr(base, 'obj_extra_fields'): keys_set.update(base.obj_extra_fields) for key in keys_set: if key in cls.fields or key in cls.obj_extra_fields: fields_no_update_set.add(key) cls.fields_no_update = list(fields_no_update_set) model = getattr(cls, 'db_model', None) if model: # generate unique_keys from the model if not getattr(cls, 'unique_keys', None): cls.unique_keys = [] obj_field_names = set(cls.fields.keys()) model_to_obj_translation = { v: k for (k, v) in cls.fields_need_translation.items()} keys = db_utils.get_unique_keys(model) or [] for model_unique_key in keys: obj_unique_key = [model_to_obj_translation.get(key, key) for key in model_unique_key] if obj_field_names.issuperset(obj_unique_key): cls.unique_keys.append(obj_unique_key) cls.create = _guarantee_rw_subtransaction(cls.create) cls.update = _guarantee_rw_subtransaction(cls.update) if (hasattr(cls, 'has_standard_attributes') and cls.has_standard_attributes()): setattr(cls, 'standard_attr_id', property(lambda x: x.db_obj.standard_attr_id if x.db_obj else None)) standardattributes.add_standard_attributes(cls) standardattributes.add_tag_filter_names(cls) # Instantiate extra filters per class cls.extra_filter_names = set(cls.extra_filter_names) # add tenant_id filter for objects that have project_id if 'project_id' in cls.fields and 'tenant_id' not in cls.fields: cls.extra_filter_names.add('tenant_id') invalid_fields = [f for f in cls.synthetic_fields if f not in cls.fields] if invalid_fields: raise o_exc.NeutronObjectValidatorException(fields=invalid_fields) @six.add_metaclass(DeclarativeObject) class NeutronDbObject(NeutronObject): # should be overridden for all persistent objects db_model = None # should be overridden for all rbac aware objects rbac_db_cls = None # whether to use new engine facade for the object new_facade = False primary_keys = ['id'] # 'unique_keys' is a list of unique keys that can be used with get_object # instead of 'primary_keys' (e.g. [['key1'], ['key2a', 'key2b']]). # By default 'unique_keys' will be inherited from the 'db_model' unique_keys = [] # this is a dict to store the association between the foreign key and the # corresponding key in the main table for a synthetic field of a specific # class, e.g. port extension has 'port_id' as foreign key, that is # associated with the key 'id' of the table Port for the synthetic # field of class Port. So foreign_keys = {'Port': {'port_id': 'id'}}. # The assumption is the association is the same for all object fields. # E.g. all the port extension will use 'port_id' as key. foreign_keys = {} fields_no_update = [] # dict with name mapping: {'field_name_in_object': 'field_name_in_db'} # It can be used also as DB relationship mapping to synthetic fields name. # It is needed to load synthetic fields with one SQL query using side # loaded entities. # Examples: {'synthetic_field_name': 'relationship_name_in_model'} # {'field_name_in_object': 'field_name_in_db'} fields_need_translation = {} # obj_extra_fields defines properties that are not part of the model # but we want to expose them for easier usage of the object. # Handling of obj_extra_fields is in oslo.versionedobjects. # The extra fields can be accessed as read only property and are exposed # in to_dict() # obj_extra_fields = [] def __init__(self, *args, **kwargs): super(NeutronDbObject, self).__init__(*args, **kwargs) self._captured_db_model = None @property def db_obj(self): '''Return a database model that persists object data.''' return self._captured_db_model def _set_lazy_contexts(self, fields, context): for field in self.lazy_fields.intersection(fields): if isinstance(fields[field], LazyQueryIterator): fields[field].context = context def from_db_object(self, db_obj): fields = self.modify_fields_from_db(db_obj) if self.lazy_fields: self._set_lazy_contexts(fields, self.obj_context) for field in self.fields: if field in fields and not self.is_synthetic(field): setattr(self, field, fields[field]) if self._load_synthetic_fields: self.load_synthetic_db_fields(db_obj) self._captured_db_model = db_obj self.obj_reset_changes() @classmethod def has_standard_attributes(cls): return bool(cls.db_model and issubclass(cls.db_model, standard_attr.HasStandardAttributes)) @classmethod def modify_fields_to_db(cls, fields): """Modify the fields before data is inserted into DB. This method enables to modify the fields and its content before data is inserted into DB. It uses the fields_need_translation dict with structure: { 'field_name_in_object': 'field_name_in_db' } :param fields: dict of fields from NeutronDbObject :return: modified dict of fields """ for k, v in fields.items(): if isinstance(v, LazyQueryIterator): fields[k] = list(v) result = copy.deepcopy(dict(fields)) for field, field_db in cls.fields_need_translation.items(): if field in result: result[field_db] = result.pop(field) return result @classmethod def _get_lazy_iterator(cls, field, appender_query): if field not in cls.lazy_fields: raise KeyError(_('Field %s is not a lazy query field') % field) n_obj_classes = NeutronObjectRegistry.obj_classes() n_obj = n_obj_classes.get(cls.fields[field].objname) return LazyQueryIterator(n_obj[0], appender_query) @classmethod def modify_fields_from_db(cls, db_obj): """Modify the fields after data were fetched from DB. It uses the fields_need_translation dict with structure: { 'field_name_in_object': 'field_name_in_db' } :param db_obj: model fetched from database :return: modified dict of DB values """ # db models can have declarative proxies that are not exposed into # db.keys() so we must fetch data based on object fields definition potential_fields = (list(cls.fields.keys()) + list(cls.fields_need_translation.values())) result = {field: db_obj[field] for field in potential_fields if db_obj.get(field) is not None} for field, field_db in cls.fields_need_translation.items(): if field_db in result: result[field] = result.pop(field_db) for k, v in result.items(): # don't allow sqlalchemy lists to propagate outside if isinstance(v, orm.collections.InstrumentedList): result[k] = list(v) if isinstance(v, orm.dynamic.AppenderQuery): result[k] = cls._get_lazy_iterator(k, v) return result @classmethod def _load_object(cls, context, db_obj, fields=None): obj = cls(context) if fields is not None and len(fields) != 0: if len(set(fields).intersection(set(cls.synthetic_fields))) == 0: obj._load_synthetic_fields = False obj.from_db_object(db_obj) return obj def obj_load_attr(self, attrname): """Set None for nullable fields that has unknown value. In case model attribute is not present in database, value stored under ``attrname'' field will be unknown. In such cases if the field ``attrname'' is a nullable Field return None """ try: is_attr_nullable = self.fields[attrname].nullable except KeyError: return super(NeutronDbObject, self).obj_load_attr(attrname) if is_attr_nullable: self[attrname] = None # TODO(ihrachys) remove once we switch plugin code to enginefacade @staticmethod def _use_db_facade(context): try: enginefacade._transaction_ctx_for_context(context) except obj_exc.NoEngineContextEstablished: return False return True @classmethod def db_context_writer(cls, context): """Return read-write session activation decorator.""" if cls.new_facade or cls._use_db_facade(context): return db_api.CONTEXT_WRITER.using(context) return db_api.autonested_transaction(context.session) @classmethod def db_context_reader(cls, context): """Return read-only session activation decorator.""" if cls.new_facade or cls._use_db_facade(context): return db_api.CONTEXT_READER.using(context) return db_api.autonested_transaction(context.session) @classmethod def get_object(cls, context, fields=None, **kwargs): """Fetch a single object Return the first result of given context or None if the result doesn't contain any row. Next, convert it to a versioned object. :param context: :param fields: indicate which fields the caller is interested in using. Note that currently this is limited to avoid loading synthetic fields when possible, and does not affect db queries. Default is None, which is the same as []. Example: ['id', 'name'] :param kwargs: multiple keys defined by key=value pairs :return: single object of NeutronDbObject class or None """ lookup_keys = set(kwargs.keys()) all_keys = itertools.chain([cls.primary_keys], cls.unique_keys) if not any(lookup_keys.issuperset(keys) for keys in all_keys): missing_keys = set(cls.primary_keys).difference(lookup_keys) raise o_exc.NeutronPrimaryKeyMissing(object_class=cls, missing_keys=missing_keys) with cls.db_context_reader(context): db_obj = obj_db_api.get_object( cls, context, **cls.modify_fields_to_db(kwargs)) if db_obj: return cls._load_object(context, db_obj, fields=fields) @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, fields=None, **kwargs): """Fetch a list of objects Fetch all results from DB and convert them to versioned objects. :param context: :param _pager: a Pager object representing advanced sorting/pagination criteria :param validate_filters: Raises an error in case of passing an unknown filter :param fields: indicate which fields the caller is interested in using. Note that currently this is limited to avoid loading synthetic fields when possible, and does not affect db queries. Default is None, which is the same as []. Example: ['id', 'name'] :param kwargs: multiple keys defined by key=value pairs :return: list of objects of NeutronDbObject class or empty list """ if validate_filters: cls.validate_filters(**kwargs) with cls.db_context_reader(context): db_objs = obj_db_api.get_objects( cls, context, _pager=_pager, **cls.modify_fields_to_db(kwargs)) return [cls._load_object(context, db_obj, fields=fields) for db_obj in db_objs] @classmethod def get_values(cls, context, field, validate_filters=True, **kwargs): """Fetch a list of values of a specific object's field Fetch a specific column from DB. :param context: :param field: a specific field of the object :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: list of objects of NeutronDbObject class or empty list """ cls._validate_field(field) db_field = cls.fields_need_translation.get(field, field) if validate_filters: cls.validate_filters(**kwargs) with cls.db_context_reader(context): db_values = obj_db_api.get_values( cls, context, db_field, **cls.modify_fields_to_db(kwargs)) obj = cls(context) values = [] for db_value in db_values: value = cls.modify_fields_from_db({ db_field: db_value}).get(field) value = cls.fields[field].coerce(obj, field, value) values.append(value) return values @classmethod def _validate_field(cls, field): if field not in cls.fields or cls.is_synthetic(field): msg = _("Get value of field '%(field)s' is not supported by " "object '%(object)s'.") % {'field': field, 'object': cls} raise n_exc.InvalidInput(error_message=msg) @classmethod def update_object(cls, context, values, validate_filters=True, **kwargs): """Update an object that match filtering criteria from DB. :param context: :param values: multiple keys to update in matching objects :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: The updated version of the object """ if validate_filters: cls.validate_filters(**kwargs) # if we have standard attributes, we will need to fetch records to # update revision numbers db_obj = None if cls.has_standard_attributes(): return super(NeutronDbObject, cls).update_object( context, values, validate_filters=False, **kwargs) else: with cls.db_context_writer(context): db_obj = obj_db_api.update_object( cls, context, cls.modify_fields_to_db(values), **cls.modify_fields_to_db(kwargs)) return cls._load_object(context, db_obj) @classmethod def update_objects(cls, context, values, validate_filters=True, **kwargs): """Update objects that match filtering criteria from DB. :param context: :param values: multiple keys to update in matching objects :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: Number of entries updated """ if validate_filters: cls.validate_filters(**kwargs) with cls.db_context_writer(context): # if we have standard attributes, we will need to fetch records to # update revision numbers if cls.has_standard_attributes(): return super(NeutronDbObject, cls).update_objects( context, values, validate_filters=False, **kwargs) return obj_db_api.update_objects( cls, context, cls.modify_fields_to_db(values), **cls.modify_fields_to_db(kwargs)) @classmethod def delete_objects(cls, context, validate_filters=True, **kwargs): """Delete objects that match filtering criteria from DB. :param context: :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: Number of entries deleted """ if validate_filters: cls.validate_filters(**kwargs) with cls.db_context_writer(context): return obj_db_api.delete_objects( cls, context, **cls.modify_fields_to_db(kwargs)) @classmethod def is_accessible(cls, context, db_obj): return (context.is_admin or context.tenant_id == db_obj.tenant_id) @staticmethod def filter_to_str(value): if isinstance(value, list): return [str(val) for val in value] return str(value) @staticmethod def filter_to_json_str(value, default=None): def _dict_to_json(v): return ( jsonutils.dumps( collections.OrderedDict( sorted(v.items(), key=lambda t: t[0]) ) ) if v else default ) if isinstance(value, list): return [_dict_to_json(val) for val in value] v = _dict_to_json(value) return v @staticmethod def load_json_from_str(field, default=None): value = field or default if value: value = jsonutils.loads(value) return value def _get_changed_persistent_fields(self): fields = self.obj_get_changes() for field in self.synthetic_fields: if field in fields: del fields[field] return fields def _validate_changed_fields(self, fields): fields = fields.copy() forbidden_updates = set(self.fields_no_update) & set(fields.keys()) if forbidden_updates: raise o_exc.NeutronObjectUpdateForbidden(fields=forbidden_updates) return fields def load_synthetic_db_fields(self, db_obj=None): """Load synthetic DB fields Load the synthetic fields that are stored in a different table from the main object. This method doesn't take care of loading synthetic fields that aren't stored in the DB, e.g. 'shared' in RBAC policy. """ clsname = self.__class__.__name__ # TODO(rossella_s) Find a way to handle ObjectFields with # subclasses=True for field in self.synthetic_fields: try: field_def = self.fields[field] objclasses = NeutronObjectRegistry.obj_classes( ).get(field_def.objname) except AttributeError: # NOTE(rossella_s) this is probably because this field is not # an ObjectField continue if not objclasses: # NOTE(rossella_s) some synthetic fields are not handled by # this method, for example the ones that have subclasses, see # QosRule continue objclass = objclasses[0] foreign_keys = objclass.foreign_keys.get(clsname) if not foreign_keys: raise o_exc.NeutronSyntheticFieldsForeignKeysNotFound( parent=clsname, child=objclass.__name__) if len(foreign_keys.keys()) > 1: raise o_exc.NeutronSyntheticFieldMultipleForeignKeys( field=field) synthetic_field_db_name = ( self.fields_need_translation.get(field, field)) # synth_db_objs can be list, empty list or None, that is why # we need 'is not None', because [] is valid case for 'True' if isinstance(field_def, obj_fields.ListOfObjectsField): synth_db_objs = (db_obj.get(synthetic_field_db_name, None) if db_obj else None) if synth_db_objs is not None: synth_objs = [objclass._load_object(self.obj_context, obj) for obj in synth_db_objs] else: synth_objs = objclass.get_objects( self.obj_context, **{ k: getattr(self, v) if v in self else db_obj.get(v) for k, v in foreign_keys.items()}) setattr(self, field, synth_objs) else: synth_db_obj = (db_obj.get(synthetic_field_db_name, None) if db_obj else None) if synth_db_obj: synth_db_obj = objclass._load_object(self.obj_context, synth_db_obj) setattr(self, field, synth_db_obj) self.obj_reset_changes([field]) def create(self): fields = self._get_changed_persistent_fields() with self.db_context_writer(self.obj_context): try: db_obj = obj_db_api.create_object( self, self.obj_context, self.modify_fields_to_db(fields)) except obj_exc.DBDuplicateEntry as db_exc: raise o_exc.NeutronDbObjectDuplicateEntry( object_class=self.__class__, db_exception=db_exc) self.from_db_object(db_obj) def _get_composite_keys(self): keys = {} for key in self.primary_keys: keys[key] = getattr(self, key) return keys def update_fields(self, obj_data, reset_changes=False): """Updates fields of an object that are not forbidden to be updated. :param obj_data: the full set of object data :type obj_data: dict :param reset_changes: indicates whether the object's current set of changed fields should be cleared :type reset_changes: boolean :returns: None """ if reset_changes: self.obj_reset_changes() for k, v in obj_data.items(): if k not in self.fields_no_update: setattr(self, k, v) def update(self): updates = self._get_changed_persistent_fields() updates = self._validate_changed_fields(updates) with self.db_context_writer(self.obj_context): db_obj = obj_db_api.update_object( self, self.obj_context, self.modify_fields_to_db(updates), **self.modify_fields_to_db( self._get_composite_keys())) self.from_db_object(db_obj) def delete(self): obj_db_api.delete_object(self, self.obj_context, **self.modify_fields_to_db( self._get_composite_keys())) self._captured_db_model = None @classmethod def count(cls, context, validate_filters=True, **kwargs): """Count the number of objects matching filtering criteria. :param context: :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: number of matching objects """ if validate_filters: cls.validate_filters(**kwargs) return obj_db_api.count( cls, context, **cls.modify_fields_to_db(kwargs) ) @classmethod def objects_exist(cls, context, validate_filters=True, **kwargs): """Check if objects are present in DB. :param context: :param validate_filters: Raises an error in case of passing an unknown filter :param kwargs: multiple keys defined by key=value pairs :return: boolean. True if object is present. """ if validate_filters: cls.validate_filters(**kwargs) # Succeed if at least a single object matches; no need to fetch more return bool(obj_db_api.count( cls, context, **cls.modify_fields_to_db(kwargs)) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/conntrack_helper.py0000644000175000017500000000244200000000000024163 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import conntrack_helper as models from neutron.objects import base @base.NeutronObjectRegistry.register class ConntrackHelper(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ConntrackHelper fields = { 'id': common_types.UUIDField(), 'router_id': common_types.UUIDField(), 'protocol': common_types.IpProtocolEnumField(), 'port': common_types.PortRangeField(), 'helper': obj_fields.StringField(), } primary_keys = ['id'] foreign_keys = {'Routers': {'router_id': 'id'}} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3230448 neutron-16.0.0.0b2.dev214/neutron/objects/db/0000755000175000017500000000000000000000000020653 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/db/__init__.py0000644000175000017500000000000000000000000022752 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/db/api.py0000644000175000017500000001066100000000000022002 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(ihrachys): cover the module with functional tests targeting supported # backends from neutron_lib.db import model_query from neutron_lib import exceptions as n_exc from neutron_lib.objects import utils as obj_utils from oslo_utils import uuidutils # Common database operation implementations def _get_filter_query(obj_cls, context, **kwargs): with obj_cls.db_context_reader(context): filters = _kwargs_to_filters(**kwargs) query = model_query.get_collection_query( context, obj_cls.db_model, filters) return query def get_object(obj_cls, context, **kwargs): return _get_filter_query(obj_cls, context, **kwargs).first() def count(obj_cls, context, **kwargs): return _get_filter_query(obj_cls, context, **kwargs).count() def _kwargs_to_filters(**kwargs): retain_classes = (list, set, obj_utils.FilterObj) return {k: v if isinstance(v, retain_classes) else [v] for k, v in kwargs.items()} def get_objects(obj_cls, context, _pager=None, **kwargs): with obj_cls.db_context_reader(context): filters = _kwargs_to_filters(**kwargs) return model_query.get_collection( context, obj_cls.db_model, dict_func=None, # return all the data filters=filters, **(_pager.to_kwargs(context, obj_cls) if _pager else {})) def get_values(obj_cls, context, field, **kwargs): with obj_cls.db_context_reader(context): filters = _kwargs_to_filters(**kwargs) return model_query.get_values( context, obj_cls.db_model, field, filters=filters) def create_object(obj_cls, context, values, populate_id=True): with obj_cls.db_context_writer(context): if (populate_id and 'id' not in values and hasattr(obj_cls.db_model, 'id')): values['id'] = uuidutils.generate_uuid() db_obj = obj_cls.db_model(**values) context.session.add(db_obj) return db_obj def _safe_get_object(obj_cls, context, **kwargs): db_obj = get_object(obj_cls, context, **kwargs) if db_obj is None: key = ", ".join(['%s=%s' % (key, value) for (key, value) in kwargs.items()]) raise n_exc.ObjectNotFound( id="%s(%s)" % (obj_cls.db_model.__name__, key)) return db_obj def update_object(obj_cls, context, values, **kwargs): with obj_cls.db_context_writer(context): db_obj = _safe_get_object(obj_cls, context, **kwargs) db_obj.update(values) db_obj.save(session=context.session) return db_obj def delete_object(obj_cls, context, **kwargs): with obj_cls.db_context_writer(context): db_obj = _safe_get_object(obj_cls, context, **kwargs) context.session.delete(db_obj) def update_objects(obj_cls, context, values, **kwargs): '''Update matching objects, if any. Return number of updated objects. This function does not raise exceptions if nothing matches. :param obj_cls: Object class :param values: values to update in matching objects :param kwargs: multiple filters defined by key=value pairs :return: Number of entries updated ''' with obj_cls.db_context_writer(context): if not values: return count(obj_cls, context, **kwargs) q = _get_filter_query(obj_cls, context, **kwargs) return q.update(values, synchronize_session=False) def delete_objects(obj_cls, context, **kwargs): '''Delete matching objects, if any. Return number of deleted objects. This function does not raise exceptions if nothing matches. :param obj_cls: Object class :param kwargs: multiple filters defined by key=value pairs :return: Number of entries deleted ''' with obj_cls.db_context_writer(context): db_objs = get_objects(obj_cls, context, **kwargs) for db_obj in db_objs: context.session.delete(db_obj) return len(db_objs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3230448 neutron-16.0.0.0b2.dev214/neutron/objects/extensions/0000755000175000017500000000000000000000000022465 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/extensions/__init__.py0000644000175000017500000000000000000000000024564 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/extensions/port_security.py0000644000175000017500000000206200000000000025752 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_security from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.objects import base class _PortSecurity(base.NeutronDbObject): fields = { 'id': common_types.UUIDField(), 'port_security_enabled': obj_fields.BooleanField( default=port_security.DEFAULT_PORT_SECURITY), } foreign_keys = { 'Port': {'id': 'id'}, 'Network': {'id': 'id'}, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/flavor.py0000644000175000017500000000615700000000000022142 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import flavor as models from neutron.objects import base @base.NeutronObjectRegistry.register class FlavorServiceProfileBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.FlavorServiceProfileBinding primary_keys = ['flavor_id', 'service_profile_id'] fields = { 'flavor_id': common_types.UUIDField(), 'service_profile_id': common_types.UUIDField(), } @base.NeutronObjectRegistry.register class ServiceProfile(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ServiceProfile synthetic_fields = ['flavor_ids'] fields = { 'id': common_types.UUIDField(), 'description': obj_fields.StringField(nullable=True), 'driver': obj_fields.StringField(), 'enabled': obj_fields.BooleanField(default=True), 'metainfo': obj_fields.StringField(nullable=True), 'flavor_ids': common_types.SetOfUUIDsField(nullable=True, default=None) } def from_db_object(self, db_obj): super(ServiceProfile, self).from_db_object(db_obj) if db_obj.get('flavors', []): self.flavor_ids = { fl.flavor_id for fl in db_obj.flavors } else: self.flavor_ids = set() self.obj_reset_changes(['flavor_ids']) @base.NeutronObjectRegistry.register class Flavor(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.Flavor synthetic_fields = ['service_profile_ids'] fields = { 'id': common_types.UUIDField(), 'name': obj_fields.StringField(nullable=True), 'description': obj_fields.StringField(nullable=True), 'enabled': obj_fields.BooleanField(default=True), 'service_type': obj_fields.StringField(nullable=True), 'service_profile_ids': common_types.SetOfUUIDsField(nullable=True, default=None) } def from_db_object(self, db_obj): super(Flavor, self).from_db_object(db_obj) if db_obj.get('service_profiles', []): self.service_profile_ids = { sp.service_profile_id for sp in db_obj.service_profiles } else: self.service_profile_ids = set() self.obj_reset_changes(['service_profile_ids']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/floatingip.py0000644000175000017500000000244200000000000022776 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron.db.models import dns as models from neutron.objects import base @base.NeutronObjectRegistry.register class FloatingIPDNS(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.FloatingIPDNS primary_keys = ['floatingip_id'] foreign_keys = {'FloatingIP': {'floatingip_id': 'id'}} fields = { 'floatingip_id': common_types.UUIDField(), 'dns_name': common_types.DomainNameField(), 'dns_domain': common_types.DomainNameField(), 'published_dns_name': common_types.DomainNameField(), 'published_dns_domain': common_types.DomainNameField(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/ipam.py0000644000175000017500000000663100000000000021574 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.ipam.drivers.neutrondb_ipam import db_models from neutron.objects import base @base.NeutronObjectRegistry.register class IpamAllocationPool(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = db_models.IpamAllocationPool foreign_keys = {'IpamSubnet': {'ipam_subnet_id': 'id'}} fields = { 'id': common_types.UUIDField(), 'ipam_subnet_id': common_types.UUIDField(), 'first_ip': obj_fields.IPAddressField(), 'last_ip': obj_fields.IPAddressField(), } fields_no_update = ['ipam_subnet_id'] @classmethod def modify_fields_from_db(cls, db_obj): result = super(IpamAllocationPool, cls).modify_fields_from_db(db_obj) if 'first_ip' in result: result['first_ip'] = netaddr.IPAddress(result['first_ip']) if 'last_ip' in result: result['last_ip'] = netaddr.IPAddress(result['last_ip']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(IpamAllocationPool, cls).modify_fields_to_db(fields) if 'first_ip' in result: result['first_ip'] = cls.filter_to_str(result['first_ip']) if 'last_ip' in result: result['last_ip'] = cls.filter_to_str(result['last_ip']) return result @base.NeutronObjectRegistry.register class IpamAllocation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = db_models.IpamAllocation primary_keys = ['ip_address', 'ipam_subnet_id'] fields = { 'ip_address': obj_fields.IPAddressField(), 'status': common_types.IpamAllocationStatusEnumField(nullable=True), 'ipam_subnet_id': common_types.UUIDField() } @classmethod def modify_fields_from_db(cls, db_obj): result = super(IpamAllocation, cls).modify_fields_from_db(db_obj) if 'ip_address' in result: result['ip_address'] = netaddr.IPAddress(result['ip_address']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(IpamAllocation, cls).modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result @base.NeutronObjectRegistry.register class IpamSubnet(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = db_models.IpamSubnet fields = { 'id': common_types.UUIDField(), 'neutron_subnet_id': common_types.UUIDField(nullable=True), 'allocation_pools': obj_fields.ListOfObjectsField( 'IpamAllocationPool') } synthetic_fields = ['allocation_pools'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/l3_hamode.py0000644000175000017500000000477000000000000022503 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import agent as agent_model from neutron.db.models import l3ha from neutron.objects import base @base.NeutronObjectRegistry.register class L3HARouterAgentPortBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3ha.L3HARouterAgentPortBinding fields = { 'port_id': common_types.UUIDField(), 'router_id': common_types.UUIDField(), 'l3_agent_id': common_types.UUIDField(nullable=True), 'state': common_types.HARouterEnumField( default=constants.HA_ROUTER_STATE_STANDBY), } primary_keys = ['port_id'] fields_no_update = ['router_id', 'port_id', 'l3_agent_id'] @classmethod def get_l3ha_filter_host_router(cls, context, router_ids, host): query = context.session.query(l3ha.L3HARouterAgentPortBinding) if host: query = query.join(agent_model.Agent).filter( agent_model.Agent.host == host) query = query.filter( l3ha.L3HARouterAgentPortBinding.router_id.in_(router_ids)) return query.all() @base.NeutronObjectRegistry.register class L3HARouterNetwork(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3ha.L3HARouterNetwork fields = { 'network_id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), } primary_keys = ['network_id', 'project_id'] @base.NeutronObjectRegistry.register class L3HARouterVRIdAllocation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3ha.L3HARouterVRIdAllocation fields = { 'network_id': common_types.UUIDField(), 'vr_id': obj_fields.IntegerField() } primary_keys = ['network_id', 'vr_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/l3agent.py0000644000175000017500000000513500000000000022201 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields import sqlalchemy as sa from sqlalchemy.orm import joinedload from sqlalchemy import sql from neutron.db.models import agent as agent_model from neutron.db.models import l3_attrs from neutron.db.models import l3agent from neutron.objects import base @base.NeutronObjectRegistry.register class RouterL3AgentBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3agent.RouterL3AgentBinding primary_keys = ['router_id', 'l3_agent_id'] fields = { 'router_id': common_types.UUIDField(), 'l3_agent_id': common_types.UUIDField(), 'binding_index': obj_fields.IntegerField( default=l3agent.LOWEST_BINDING_INDEX), } # TODO(ihrachys) return OVO objects not models # TODO(ihrachys) move under Agent object class @classmethod def get_l3_agents_by_router_ids(cls, context, router_ids): query = context.session.query(l3agent.RouterL3AgentBinding) query = query.options(joinedload('l3_agent')).filter( l3agent.RouterL3AgentBinding.router_id.in_(router_ids)) return [db_obj.l3_agent for db_obj in query.all()] @classmethod def get_down_router_bindings(cls, context, cutoff): query = (context.session.query( l3agent.RouterL3AgentBinding). join(agent_model.Agent). filter(agent_model.Agent.heartbeat_timestamp < cutoff, agent_model.Agent.admin_state_up).outerjoin( l3_attrs.RouterExtraAttributes, l3_attrs.RouterExtraAttributes.router_id == l3agent.RouterL3AgentBinding.router_id).filter( sa.or_( l3_attrs.RouterExtraAttributes.ha == sql.false(), l3_attrs.RouterExtraAttributes.ha == sql.null()))) bindings = [cls._load_object(context, db_obj) for db_obj in query.all()] return bindings ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3230448 neutron-16.0.0.0b2.dev214/neutron/objects/logapi/0000755000175000017500000000000000000000000021541 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/logapi/__init__.py0000644000175000017500000000000000000000000023640 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/logapi/logging_resource.py0000644000175000017500000000327000000000000025452 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron_lib.objects.logapi import event_types from neutron_lib.services.logapi import constants as log_const from oslo_versionedobjects import fields as obj_fields from neutron.db.models import loggingapi as log_db from neutron.objects import base @base.NeutronObjectRegistry.register class Log(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = log_db.Log fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'resource_type': obj_fields.StringField(), 'resource_id': common_types.UUIDField(nullable=True, default=None), 'target_id': common_types.UUIDField(nullable=True, default=None), 'event': event_types.SecurityEventField(default=log_const.ALL_EVENT), 'enabled': obj_fields.BooleanField(default=True), } fields_no_update = ['project_id', 'resource_type', 'resource_id', 'target_id', 'event'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/metering.py0000644000175000017500000000523600000000000022460 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from oslo_versionedobjects import fields as obj_fields from neutron.db.models import metering as metering_models from neutron.objects import base @base.NeutronObjectRegistry.register class MeteringLabelRule(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = metering_models.MeteringLabelRule foreign_keys = {'MeteringLabel': {'metering_label_id': 'id'}} fields = { 'id': common_types.UUIDField(), 'direction': common_types.FlowDirectionEnumField(nullable=True), 'remote_ip_prefix': common_types.IPNetworkField(nullable=True), 'metering_label_id': common_types.UUIDField(), 'excluded': obj_fields.BooleanField(default=False), } fields_no_update = ['metering_label_id'] @classmethod def modify_fields_from_db(cls, db_obj): result = super(MeteringLabelRule, cls).modify_fields_from_db(db_obj) if 'remote_ip_prefix' in result: result['remote_ip_prefix'] = net_utils.AuthenticIPNetwork( result['remote_ip_prefix']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(MeteringLabelRule, cls).modify_fields_to_db(fields) if 'remote_ip_prefix' in result: result['remote_ip_prefix'] = cls.filter_to_str( result['remote_ip_prefix']) return result @base.NeutronObjectRegistry.register class MeteringLabel(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = metering_models.MeteringLabel synthetic_fields = ['rules'] fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(), 'description': obj_fields.StringField(), 'rules': obj_fields.ListOfObjectsField('MeteringLabelRule', nullable=True), 'shared': obj_fields.BooleanField(default=False), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/network.py0000644000175000017500000003324000000000000022333 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib import constants from neutron_lib.objects import common_types from oslo_utils import versionutils from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields import sqlalchemy as sa from neutron.db.models import dns as dns_models from neutron.db.models import external_net as ext_net_model from neutron.db.models import segment as segment_model from neutron.db import models_v2 from neutron.db.network_dhcp_agent_binding import models as ndab_models from neutron.db.port_security import models as ps_models from neutron.db import rbac_db_models from neutron.objects import agent as agent_obj from neutron.objects import base from neutron.objects.extensions import port_security as base_ps from neutron.objects.qos import binding from neutron.objects import rbac from neutron.objects import rbac_db @base.NeutronObjectRegistry.register class NetworkRBAC(rbac.RBACBaseObject): # Version 1.0: Initial version # Version 1.1: Added 'id' and 'project_id' # Version 1.2: Inherit from rbac.RBACBaseObject; changed 'object_id' from # StringField to UUIDField VERSION = '1.2' db_model = rbac_db_models.NetworkRBAC def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): standard_fields = ['id', 'project_id'] for f in standard_fields: primitive.pop(f, None) @classmethod def get_projects(cls, context, object_id=None, action=None, target_tenant=None): clauses = [] if object_id: clauses.append(rbac_db_models.NetworkRBAC.object_id == object_id) if action: clauses.append(rbac_db_models.NetworkRBAC.action == action) if target_tenant: clauses.append(rbac_db_models.NetworkRBAC.target_tenant == target_tenant) query = context.session.query(rbac_db_models.NetworkRBAC.target_tenant) if clauses: query = query.filter(sa.and_(*clauses)) return [data[0] for data in query] @base.NeutronObjectRegistry.register class NetworkDhcpAgentBinding(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Added 'binding_index' VERSION = '1.1' db_model = ndab_models.NetworkDhcpAgentBinding primary_keys = ['network_id', 'dhcp_agent_id'] fields = { 'network_id': common_types.UUIDField(), 'dhcp_agent_id': common_types.UUIDField(), 'binding_index': obj_fields.IntegerField(), } # NOTE(ndahiwade): The join was implemented this way as get_objects # currently doesn't support operators like '<' or '>' @classmethod def get_down_bindings(cls, context, cutoff): agent_objs = agent_obj.Agent.get_objects(context) dhcp_agent_ids = [obj.id for obj in agent_objs if obj.heartbeat_timestamp < cutoff] return cls.get_objects(context, dhcp_agent_id=dhcp_agent_ids) @base.NeutronObjectRegistry.register class NetworkSegment(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = segment_model.NetworkSegment fields = { 'id': common_types.UUIDField(), 'network_id': common_types.UUIDField(), 'name': obj_fields.StringField(nullable=True), 'network_type': obj_fields.StringField(), 'physical_network': obj_fields.StringField(nullable=True), 'segmentation_id': obj_fields.IntegerField(nullable=True), 'is_dynamic': obj_fields.BooleanField(default=False), 'segment_index': obj_fields.IntegerField(default=0), 'hosts': obj_fields.ListOfStringsField(nullable=True) } synthetic_fields = ['hosts'] fields_no_update = ['network_id'] foreign_keys = { 'Network': {'network_id': 'id'}, 'PortBindingLevel': {'id': 'segment_id'}, } def create(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): hosts = self.hosts if hosts is None: hosts = [] super(NetworkSegment, self).create() if 'hosts' in fields: self._attach_hosts(hosts) def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): super(NetworkSegment, self).update() if 'hosts' in fields: self._attach_hosts(fields['hosts']) def _attach_hosts(self, hosts): SegmentHostMapping.delete_objects( self.obj_context, segment_id=self.id, ) if hosts: for host in hosts: SegmentHostMapping( self.obj_context, segment_id=self.id, host=host).create() self.hosts = hosts self.obj_reset_changes(['hosts']) def obj_load_attr(self, attrname): if attrname == 'hosts': return self._load_hosts() super(NetworkSegment, self).obj_load_attr(attrname) def _load_hosts(self, db_obj=None): if db_obj: hosts = db_obj.get('segment_host_mapping', []) else: hosts = SegmentHostMapping.get_objects(self.obj_context, segment_id=self.id) self.hosts = [host['host'] for host in hosts] self.obj_reset_changes(['hosts']) def from_db_object(self, db_obj): super(NetworkSegment, self).from_db_object(db_obj) self._load_hosts(db_obj) @classmethod def get_objects(cls, context, _pager=None, **kwargs): if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [ (field, True) for field in ('network_id', 'segment_index') ] return super(NetworkSegment, cls).get_objects(context, _pager, **kwargs) @base.NeutronObjectRegistry.register class NetworkPortSecurity(base_ps._PortSecurity): # Version 1.0: Initial version VERSION = "1.0" db_model = ps_models.NetworkSecurityBinding fields_need_translation = {'id': 'network_id'} @base.NeutronObjectRegistry.register class ExternalNetwork(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ext_net_model.ExternalNetwork foreign_keys = {'Network': {'network_id': 'id'}} primary_keys = ['network_id'] fields = { 'network_id': common_types.UUIDField(), 'is_default': obj_fields.BooleanField(default=False), } @base.NeutronObjectRegistry.register class Network(rbac_db.NeutronRbacObject): # Version 1.0: Initial version # Version 1.1: Changed 'mtu' to be not nullable VERSION = '1.1' rbac_db_cls = NetworkRBAC db_model = models_v2.Network fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'status': obj_fields.StringField(nullable=True), 'admin_state_up': obj_fields.BooleanField(nullable=True), 'vlan_transparent': obj_fields.BooleanField(nullable=True), # TODO(ihrachys): consider converting to a field of stricter type 'availability_zone_hints': obj_fields.ListOfStringsField( nullable=True), 'shared': obj_fields.BooleanField(default=False), 'mtu': obj_fields.IntegerField(default=constants.DEFAULT_NETWORK_MTU), # TODO(ihrachys): consider exposing availability zones # TODO(ihrachys): consider converting to boolean 'security': obj_fields.ObjectField( 'NetworkPortSecurity', nullable=True), 'segments': obj_fields.ListOfObjectsField( 'NetworkSegment', nullable=True), 'dns_domain': common_types.DomainNameField(nullable=True), 'qos_policy_id': common_types.UUIDField(nullable=True, default=None), # TODO(ihrachys): add support for tags, probably through a base class # since it's a feature that will probably later be added for other # resources too # TODO(ihrachys): expose external network attributes } synthetic_fields = [ 'dns_domain', 'qos_policy_id', 'security', 'segments', ] fields_need_translation = { 'security': 'port_security', } def create(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): dns_domain = self.dns_domain qos_policy_id = self.qos_policy_id super(Network, self).create() if 'dns_domain' in fields: self._set_dns_domain(dns_domain) if 'qos_policy_id' in fields: self._attach_qos_policy(qos_policy_id) def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): super(Network, self).update() if 'dns_domain' in fields: self._set_dns_domain(fields['dns_domain']) if 'qos_policy_id' in fields: self._attach_qos_policy(fields['qos_policy_id']) def _attach_qos_policy(self, qos_policy_id): binding.QosPolicyNetworkBinding.delete_objects( self.obj_context, network_id=self.id) if qos_policy_id: net_binding_obj = binding.QosPolicyNetworkBinding( self.obj_context, policy_id=qos_policy_id, network_id=self.id) net_binding_obj.create() self.qos_policy_id = qos_policy_id self.obj_reset_changes(['qos_policy_id']) def _set_dns_domain(self, dns_domain): NetworkDNSDomain.delete_objects(self.obj_context, network_id=self.id) if dns_domain: NetworkDNSDomain(self.obj_context, network_id=self.id, dns_domain=dns_domain).create() self.dns_domain = dns_domain self.obj_reset_changes(['dns_domain']) @classmethod def modify_fields_from_db(cls, db_obj): result = super(Network, cls).modify_fields_from_db(db_obj) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_string_to_list( result[az_def.AZ_HINTS])) return result @classmethod def modify_fields_to_db(cls, fields): result = super(Network, cls).modify_fields_to_db(fields) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_list_to_string( result[az_def.AZ_HINTS])) return result def from_db_object(self, *objs): super(Network, self).from_db_object(*objs) for db_obj in objs: # extract domain name if db_obj.get('dns_domain'): self.dns_domain = ( db_obj.dns_domain.dns_domain ) else: self.dns_domain = None self.obj_reset_changes(['dns_domain']) # extract qos policy binding if db_obj.get('qos_policy_binding'): self.qos_policy_id = ( db_obj.qos_policy_binding.policy_id ) else: self.qos_policy_id = None self.obj_reset_changes(['qos_policy_id']) @classmethod def get_bound_tenant_ids(cls, context, policy_id): # TODO(ihrachys): provide actual implementation return set() def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version >= (1, 1): if primitive['mtu'] is None: # mtu will not be nullable after raise exception.IncompatibleObjectVersion( objver=target_version, objname=self.__class__.__name__) @base.NeutronObjectRegistry.register class SegmentHostMapping(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = segment_model.SegmentHostMapping fields = { 'segment_id': common_types.UUIDField(), 'host': obj_fields.StringField(), } primary_keys = ['segment_id', 'host'] @base.NeutronObjectRegistry.register class NetworkDNSDomain(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = dns_models.NetworkDNSDomain primary_keys = ['network_id'] fields = { 'network_id': common_types.UUIDField(), 'dns_domain': common_types.DomainNameField(), } @classmethod def get_net_dns_from_port(cls, context, port_id): net_dns = context.session.query(cls.db_model).join( models_v2.Port, cls.db_model.network_id == models_v2.Port.network_id).filter_by( id=port_id).one_or_none() if net_dns is None: return None return super(NetworkDNSDomain, cls)._load_object(context, net_dns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/network_segment_range.py0000644000175000017500000002421200000000000025230 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools from neutron_lib import constants from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from sqlalchemy import and_ from sqlalchemy import not_ from sqlalchemy import or_ from sqlalchemy import sql from neutron._i18n import _ from neutron.common import _constants as common_constants from neutron.db.models import network_segment_range as range_model from neutron.db.models.plugins.ml2 import geneveallocation as \ geneve_alloc_model from neutron.db.models.plugins.ml2 import gre_allocation_endpoints as \ gre_alloc_model from neutron.db.models.plugins.ml2 import vlanallocation as vlan_alloc_model from neutron.db.models.plugins.ml2 import vxlanallocation as vxlan_alloc_model from neutron.db.models import segment as segments_model from neutron.db import models_v2 from neutron.objects import base models_map = { constants.TYPE_VLAN: vlan_alloc_model.VlanAllocation, constants.TYPE_VXLAN: vxlan_alloc_model.VxlanAllocation, constants.TYPE_GRE: gre_alloc_model.GreAllocation, constants.TYPE_GENEVE: geneve_alloc_model.GeneveAllocation } @base.NeutronObjectRegistry.register class NetworkSegmentRange(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = range_model.NetworkSegmentRange primary_keys = ['id'] fields = { 'id': common_types.UUIDField(), 'name': obj_fields.StringField(nullable=True), 'default': obj_fields.BooleanField(nullable=False), 'shared': obj_fields.BooleanField(nullable=False), 'project_id': obj_fields.StringField(nullable=True), 'network_type': common_types.NetworkSegmentRangeNetworkTypeEnumField( nullable=False), 'physical_network': obj_fields.StringField(nullable=True), 'minimum': obj_fields.IntegerField(nullable=True), 'maximum': obj_fields.IntegerField(nullable=True) } def to_dict(self, fields=None): _dict = super(NetworkSegmentRange, self).to_dict() # extend the network segment range dict with `available` and `used` # fields _dict.update({'available': self._get_available_allocation()}) _dict.update({'used': self._get_used_allocation_mapping()}) # TODO(kailun): For tag mechanism. This will be removed in bug/1704137 try: _dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags] except AttributeError: # AttrtibuteError can be raised when accessing self.db_obj # or self.db_obj.standard_attr pass # NOTE(ralonsoh): this workaround should be removed once the migration # from "tenant_id" to "project_id" is finished. _dict = db_utils.resource_fields(_dict, fields) _dict.pop('tenant_id', None) return _dict def _check_shared_project_id(self, action): if self.shared is False and not self.project_id: raise n_exc.ObjectActionError( action=action, reason='if NetworkSegmentRange is not shared, it must have a ' 'project_id') def create(self): self._check_shared_project_id('create') super(NetworkSegmentRange, self).create() def update(self): self._check_shared_project_id('update') super(NetworkSegmentRange, self).update() def _get_allocation_model_details(self): model = models_map.get(self.network_type) if model is not None: alloc_segmentation_id = model.get_segmentation_id() else: msg = (_("network_type '%s' unknown for getting allocation " "information") % self.network_type) raise n_exc.InvalidInput(error_message=msg) allocated = model.allocated return model, alloc_segmentation_id, allocated def _get_available_allocation(self): with self.db_context_reader(self.obj_context): model, alloc_segmentation_id, allocated = ( self._get_allocation_model_details()) query = self.obj_context.session.query(alloc_segmentation_id) query = query.filter(and_( alloc_segmentation_id >= self.minimum, alloc_segmentation_id <= self.maximum), not_(allocated)) if self.network_type == constants.TYPE_VLAN: alloc_available = query.filter( model.physical_network == self.physical_network).all() else: alloc_available = query.all() return [segmentation_id for (segmentation_id,) in alloc_available] def _get_used_allocation_mapping(self): with self.db_context_reader(self.obj_context): query = self.obj_context.session.query( segments_model.NetworkSegment.segmentation_id, models_v2.Network.project_id) alloc_used = (query.filter(and_( segments_model.NetworkSegment.network_type == self.network_type, segments_model.NetworkSegment.physical_network == self.physical_network, segments_model.NetworkSegment.segmentation_id >= self.minimum, segments_model.NetworkSegment.segmentation_id <= self.maximum)) .filter( segments_model.NetworkSegment.network_id == models_v2.Network.id)).all() return {segmentation_id: project_id for segmentation_id, project_id in alloc_used} @classmethod def _build_query_segments(cls, context, model, network_type, **filters): columns = set(dict(model.__table__.columns)) model_filters = dict((k, filters[k]) for k in columns & set(filters.keys())) query = (context.session.query(model) .filter_by(allocated=False, **model_filters).distinct()) _and = and_( cls.db_model.network_type == network_type, model.physical_network == cls.db_model.physical_network if network_type == constants.TYPE_VLAN else sql.expression.true()) return query.join(range_model.NetworkSegmentRange, _and) @classmethod def get_segments_for_project(cls, context, model, network_type, model_segmentation_id, **filters): _filters = copy.deepcopy(filters) project_id = _filters.pop('project_id', None) if not project_id: return [] with cls.db_context_reader(context): query = cls._build_query_segments(context, model, network_type, **_filters) query = query.filter(and_( model_segmentation_id >= cls.db_model.minimum, model_segmentation_id <= cls.db_model.maximum, cls.db_model.project_id == project_id)) return query.limit(common_constants.IDPOOL_SELECT_SIZE).all() @classmethod def get_segments_shared(cls, context, model, network_type, model_segmentation_id, **filters): _filters = copy.deepcopy(filters) project_id = _filters.pop('project_id', None) with cls.db_context_reader(context): # Retrieve default segment ID range. default_range = context.session.query(cls.db_model).filter( and_(cls.db_model.network_type == network_type, cls.db_model.default == sql.expression.true())) if network_type == constants.TYPE_VLAN: default_range.filter(cls.db_model.physical_network == _filters['physical_network']) segment_ids = set(range(default_range.all()[0].minimum, default_range.all()[0].maximum + 1)) # Retrieve other project segment ID ranges (not own project, not # default range). other_project_ranges = context.session.query(cls.db_model).filter( and_(cls.db_model.project_id != project_id, cls.db_model.project_id.isnot(None), cls.db_model.network_type == network_type)) if network_type == constants.TYPE_VLAN: other_project_ranges = other_project_ranges.filter( cls.db_model.physical_network == _filters['physical_network']) for other_project_range in other_project_ranges.all(): _set = set(range(other_project_range.minimum, other_project_range.maximum + 1)) segment_ids.difference_update(_set) # NOTE(ralonsoh): https://stackoverflow.com/questions/4628333/ # converting-a-list-of-integers-into-range-in-python segment_ranges = [ [t[0][1], t[-1][1]] for t in (tuple(g[1]) for g in itertools.groupby( enumerate(segment_ids), key=lambda enum_seg: enum_seg[1] - enum_seg[0]))] # Retrieve all segments belonging to the default range except those # assigned to other projects. query = cls._build_query_segments(context, model, network_type, **_filters) clauses = [and_(model_segmentation_id >= range[0], model_segmentation_id <= range[1]) for range in segment_ranges] query = query.filter(or_(*clauses)) return query.limit(common_constants.IDPOOL_SELECT_SIZE).all() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3270447 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/0000755000175000017500000000000000000000000021747 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/__init__.py0000644000175000017500000000000000000000000024046 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3270447 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/0000755000175000017500000000000000000000000022441 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/__init__.py0000644000175000017500000000000000000000000024540 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/base.py0000644000175000017500000000520600000000000023730 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from neutron.common import _constants as common_constants from neutron.objects import base class EndpointBase(base.NeutronDbObject): primary_keys = ['ip_address'] @classmethod def modify_fields_from_db(cls, db_obj): result = super(EndpointBase, cls).modify_fields_from_db(db_obj) if 'ip_address' in result: result['ip_address'] = netaddr.IPAddress(result['ip_address']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(EndpointBase, cls).modify_fields_to_db(fields) if 'ip_address' in fields: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result class SegmentAllocation(object, metaclass=abc.ABCMeta): @classmethod def get_unallocated_segments(cls, context, **filters): with cls.db_context_reader(context): columns = set(dict(cls.db_model.__table__.columns)) model_filters = dict((k, filters[k]) for k in columns & set(filters.keys())) query = context.session.query(cls.db_model).filter_by( allocated=False, **model_filters) return query.limit(common_constants.IDPOOL_SELECT_SIZE).all() @classmethod def allocate(cls, context, **segment): with cls.db_context_writer(context): return context.session.query(cls.db_model).filter_by( allocated=False, **segment).update({'allocated': True}) @classmethod def deallocate(cls, context, **segment): with cls.db_context_writer(context): return context.session.query(cls.db_model).filter_by( allocated=True, **segment).update({'allocated': False}) @classmethod def update_primary_keys(cls, _dict, segmentation_id=None, **kwargs): _dict[cls.primary_keys[0]] = segmentation_id @abc.abstractmethod def get_segmentation_id(self): pass @property def segmentation_id(self): return self.db_obj.segmentation_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/flatallocation.py0000644000175000017500000000211600000000000026007 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_versionedobjects import fields as obj_fields from neutron.db.models.plugins.ml2 import flatallocation from neutron.objects import base @base.NeutronObjectRegistry.register class FlatAllocation(base.NeutronDbObject): # Version 1.0: Initial Version VERSION = '1.0' db_model = flatallocation.FlatAllocation fields = { 'physical_network': obj_fields.StringField() } primary_keys = ['physical_network'] network_type = n_const.TYPE_FLAT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/geneveallocation.py0000644000175000017500000000325400000000000026336 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_versionedobjects import fields as obj_fields from neutron.db.models.plugins.ml2 import geneveallocation from neutron.objects import base from neutron.objects.plugins.ml2 import base as ml2_base @base.NeutronObjectRegistry.register class GeneveAllocation(base.NeutronDbObject, ml2_base.SegmentAllocation): # Version 1.0: Initial version VERSION = '1.0' db_model = geneveallocation.GeneveAllocation primary_keys = ['geneve_vni'] fields = { 'geneve_vni': obj_fields.IntegerField(), 'allocated': obj_fields.BooleanField(default=False), } network_type = n_const.TYPE_GENEVE @classmethod def get_segmentation_id(cls): return cls.db_model.get_segmentation_id() @base.NeutronObjectRegistry.register class GeneveEndpoint(ml2_base.EndpointBase): # Version 1.0: Initial version VERSION = '1.0' db_model = geneveallocation.GeneveEndpoints fields = { 'ip_address': obj_fields.IPAddressField(), 'host': obj_fields.StringField(nullable=True), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/greallocation.py0000644000175000017500000000323200000000000025636 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_versionedobjects import fields as obj_fields from neutron.db.models.plugins.ml2 import gre_allocation_endpoints as gre_model from neutron.objects import base from neutron.objects.plugins.ml2 import base as ml2_base @base.NeutronObjectRegistry.register class GreAllocation(base.NeutronDbObject, ml2_base.SegmentAllocation): # Version 1.0: Initial version VERSION = '1.0' db_model = gre_model.GreAllocation primary_keys = ['gre_id'] fields = { 'gre_id': obj_fields.IntegerField(), 'allocated': obj_fields.BooleanField(default=False) } network_type = n_const.TYPE_GRE @classmethod def get_segmentation_id(cls): return cls.db_model.get_segmentation_id() @base.NeutronObjectRegistry.register class GreEndpoint(ml2_base.EndpointBase): # Version 1.0: Initial version VERSION = '1.0' db_model = gre_model.GreEndpoints fields = { 'ip_address': obj_fields.IPAddressField(), 'host': obj_fields.StringField(nullable=True) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/vlanallocation.py0000644000175000017500000000511000000000000026016 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models.plugins.ml2 import vlanallocation as vlan_alloc_model from neutron.objects import base from neutron.objects.plugins.ml2 import base as ml2_base @base.NeutronObjectRegistry.register class VlanAllocation(base.NeutronDbObject, ml2_base.SegmentAllocation): # Version 1.0: Initial version VERSION = '1.0' db_model = vlan_alloc_model.VlanAllocation fields = { 'physical_network': obj_fields.StringField(), 'vlan_id': common_types.VlanIdRangeField(), 'allocated': obj_fields.BooleanField(), } primary_keys = ['physical_network', 'vlan_id'] network_type = n_const.TYPE_VLAN @staticmethod def get_physical_networks(context): query = context.session.query(VlanAllocation.db_model.physical_network) query = query.group_by(VlanAllocation.db_model.physical_network) physnets = query.all() return {physnet.physical_network for physnet in physnets} @staticmethod def delete_physical_networks(context, physical_networks): column = VlanAllocation.db_model.physical_network context.session.query(VlanAllocation.db_model).filter( column.in_(physical_networks)).delete(synchronize_session=False) @staticmethod def bulk_create(ctx, physical_network, vlan_ids): ctx.session.bulk_insert_mappings( vlan_alloc_model.VlanAllocation, [{'physical_network': physical_network, 'allocated': False, 'vlan_id': vlan_id} for vlan_id in vlan_ids]) @classmethod def update_primary_keys(cls, _dict, segmentation_id=None, physical_network=None): _dict['physical_network'] = physical_network _dict['vlan_id'] = segmentation_id @classmethod def get_segmentation_id(cls): return cls.db_model.get_segmentation_id() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/plugins/ml2/vxlanallocation.py0000644000175000017500000000341100000000000026210 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models.plugins.ml2 import vxlanallocation as vxlan_model from neutron.objects import base from neutron.objects.plugins.ml2 import base as ml2_base @base.NeutronObjectRegistry.register class VxlanAllocation(base.NeutronDbObject, ml2_base.SegmentAllocation): # Version 1.0: Initial version VERSION = '1.0' db_model = vxlan_model.VxlanAllocation primary_keys = ['vxlan_vni'] fields = { 'vxlan_vni': obj_fields.IntegerField(), 'allocated': obj_fields.BooleanField(default=False), } network_type = n_const.TYPE_VXLAN @classmethod def get_segmentation_id(cls): return cls.db_model.get_segmentation_id() @base.NeutronObjectRegistry.register class VxlanEndpoint(ml2_base.EndpointBase): # Version 1.0: Initial version VERSION = '1.0' db_model = vxlan_model.VxlanEndpoints fields = { 'ip_address': obj_fields.IPAddressField(), 'udp_port': common_types.PortRangeField(), 'host': obj_fields.StringField(nullable=True), } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3270447 neutron-16.0.0.0b2.dev214/neutron/objects/port/0000755000175000017500000000000000000000000021252 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/__init__.py0000644000175000017500000000000000000000000023351 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3270447 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/0000755000175000017500000000000000000000000023451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/__init__.py0000644000175000017500000000000000000000000025550 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/allowedaddresspairs.py0000644000175000017500000000447500000000000030071 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from neutron.db.models import allowed_address_pair as models from neutron.objects import base @base.NeutronObjectRegistry.register class AllowedAddressPair(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.AllowedAddressPair primary_keys = ['port_id', 'mac_address', 'ip_address'] fields = { 'port_id': common_types.UUIDField(), 'mac_address': common_types.MACAddressField(), 'ip_address': common_types.IPNetworkField(), } foreign_keys = { 'Port': {'port_id': 'id'}, } # TODO(mhickey): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_to_db(cls, fields): result = super(AllowedAddressPair, cls).modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) if 'mac_address' in result: result['mac_address'] = cls.filter_to_str(result['mac_address']) return result # TODO(mhickey): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_from_db(cls, db_obj): fields = super(AllowedAddressPair, cls).modify_fields_from_db(db_obj) if 'ip_address' in fields: # retain string format as stored in the database fields['ip_address'] = net_utils.AuthenticIPNetwork( fields['ip_address']) if 'mac_address' in fields: # retain string format as stored in the database fields['mac_address'] = net_utils.AuthenticEUI( fields['mac_address']) return fields ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/data_plane_status.py0000644000175000017500000000230500000000000027516 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import data_plane_status as db_models from neutron.objects import base @base.NeutronObjectRegistry.register class PortDataPlaneStatus(base.NeutronDbObject): # Version 1.0: Initial version VERSION = "1.0" db_model = db_models.PortDataPlaneStatus primary_keys = ['port_id'] fields = { 'port_id': common_types.UUIDField(), 'data_plane_status': obj_fields.StringField(), } foreign_keys = {'Port': {'port_id': 'id'}} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/extra_dhcp_opt.py0000644000175000017500000000237500000000000027035 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.extra_dhcp_opt import models from neutron.objects import base @base.NeutronObjectRegistry.register class ExtraDhcpOpt(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ExtraDhcpOpt fields = { 'id': common_types.UUIDField(), 'port_id': common_types.UUIDField(), 'opt_name': obj_fields.StringField(), 'opt_value': obj_fields.StringField(), 'ip_version': obj_fields.IntegerField(), } fields_no_update = ['port_id'] foreign_keys = { 'Port': {'port_id': 'id'}, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/port_security.py0000644000175000017500000000166000000000000026741 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.port_security import models from neutron.objects import base from neutron.objects.extensions import port_security as base_ps @base.NeutronObjectRegistry.register class PortSecurity(base_ps._PortSecurity): # Version 1.0: Initial version VERSION = "1.0" fields_need_translation = {'id': 'port_id'} db_model = models.PortSecurityBinding ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port/extensions/uplink_status_propagation.py0000644000175000017500000000226300000000000031336 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import uplink_status_propagation as db_models from neutron.objects import base @base.NeutronObjectRegistry.register class PortUplinkStatusPropagation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = "1.0" db_model = db_models.PortUplinkStatusPropagation primary_keys = ['port_id'] fields = { 'port_id': common_types.UUIDField(), 'propagate_uplink_status': obj_fields.BooleanField(default=False), } foreign_keys = {'Port': {'port_id': 'id'}} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/port_forwarding.py0000644000175000017500000001371600000000000024056 0ustar00coreycorey00000000000000# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import netaddr from neutron_lib.objects import common_types from neutron.db.models import l3 from neutron.db.models import port_forwarding as models from neutron.objects import base from neutron.objects import router from neutron_lib import constants as lib_const from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields FIELDS_NOT_SUPPORT_FILTER = ['internal_ip_address', 'internal_port'] @base.NeutronObjectRegistry.register class PortForwarding(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Change unique constraint # Version 1.2: Add "description" field VERSION = '1.2' db_model = models.PortForwarding primary_keys = ['id'] foreign_keys = {'FloatingIP': {'floatingip_id': 'id'}, 'Port': {'internal_port_id': 'id'}} # Notes: 'socket': 'socket' maybe odd here, but for current OVO and the # definition of PortForwarding obj, this obj doesn't define a field named # "socket", but the db model does, it will get the value to store into db. # And this obj defines some fields like "internal_ip_address" and # "internal_port" which will construct "socket" field. Also there is # a reason why it like this. Please see neutron/objects/base.py#n468 # So if we don't set it into fields_need_translation, the OVO base will # default skip the field from db. fields_need_translation = { 'socket': 'socket', 'internal_port_id': 'internal_neutron_port_id' } fields = { 'id': common_types.UUIDField(), 'floatingip_id': common_types.UUIDField(nullable=False), 'external_port': common_types.PortRangeField(nullable=False), 'protocol': common_types.IpProtocolEnumField(nullable=False), 'internal_port_id': common_types.UUIDField(nullable=False), 'internal_ip_address': obj_fields.IPV4AddressField(), 'internal_port': common_types.PortRangeField(nullable=False), 'floating_ip_address': obj_fields.IPV4AddressField(), 'router_id': common_types.UUIDField(), 'description': obj_fields.StringField() } comparision_ignored_fields = ['revision_number', 'updated_at', 'created_at'] synthetic_fields = ['floating_ip_address', 'router_id'] fields_no_update = { 'id', 'floatingip_id' } def __eq__(self, other): for attr in self.fields: # Some fields are inherited from standards attributes and are # irrelevant while comparing two PortForwarding. if attr in self.comparision_ignored_fields: continue if getattr(self, attr) != getattr(other, attr): return False return True def obj_load_attr(self, attrname): if attrname in ['floating_ip_address', 'router_id']: return self._load_attr_from_fip(attrname) super(PortForwarding, self).obj_load_attr(attrname) def _load_attr_from_fip(self, attrname): # get all necessary info from fip obj fip_obj = router.FloatingIP.get_object( self.obj_context, id=self.floatingip_id) value = getattr(fip_obj, attrname) setattr(self, attrname, value) self.obj_reset_changes([attrname]) def from_db_object(self, db_obj): super(PortForwarding, self).from_db_object(db_obj) self._load_attr_from_fip(attrname='router_id') self._load_attr_from_fip(attrname='floating_ip_address') def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 2): primitive.pop('description', None) @classmethod def modify_fields_from_db(cls, db_obj): result = super(PortForwarding, cls).modify_fields_from_db(db_obj) if 'socket' in result: groups = result['socket'].split(":") result['internal_ip_address'] = netaddr.IPAddress( groups[0], version=lib_const.IP_VERSION_4) result['internal_port'] = int(groups[1]) del result['socket'] return result @classmethod def modify_fields_to_db(cls, fields): result = super(PortForwarding, cls).modify_fields_to_db(fields) if 'internal_ip_address' in result and 'internal_port' in result: result['socket'] = str( result['internal_ip_address']) + ":" + str( result['internal_port']) del result['internal_ip_address'] del result['internal_port'] return result @classmethod def get_port_forwarding_obj_by_routers(cls, context, router_ids): query = context.session.query(cls.db_model, l3.FloatingIP) query = query.join(l3.FloatingIP, cls.db_model.floatingip_id == l3.FloatingIP.id) query = query.filter(l3.FloatingIP.router_id.in_(router_ids)) return cls._unique_port_forwarding_iterator(query) @classmethod def _unique_port_forwarding_iterator(cls, query): q = query.order_by(l3.FloatingIP.router_id) keyfunc = lambda row: row[1] group_iterator = itertools.groupby(q, keyfunc) for key, value in group_iterator: for row in value: yield (row[1]['router_id'], row[1]['floating_ip_address'], row[0]['id'], row[1]['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/ports.py0000644000175000017500000005747400000000000022030 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields from neutron.db.models import dns as dns_models from neutron.db.models import l3 from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects.qos import binding from neutron.plugins.ml2 import models as ml2_models LOG = logging.getLogger(__name__) class PortBindingBase(base.NeutronDbObject): foreign_keys = { 'Port': {'port_id': 'id'}, } @classmethod def modify_fields_to_db(cls, fields): result = super(PortBindingBase, cls).modify_fields_to_db(fields) for field in ['profile', 'vif_details']: if field in result: # dump field into string, set '' if empty '{}' or None result[field] = ( cls.filter_to_json_str(result[field], default='')) return result @classmethod def modify_fields_from_db(cls, db_obj): fields = super(PortBindingBase, cls).modify_fields_from_db(db_obj) if 'vif_details' in fields: # load string from DB into dict, set None if vif_details is '' fields['vif_details'] = ( cls.load_json_from_str(fields['vif_details'])) if 'profile' in fields: # load string from DB into dict, set {} if profile is '' fields['profile'] = ( cls.load_json_from_str(fields['profile'], default={})) return fields @base.NeutronObjectRegistry.register class PortBinding(PortBindingBase): # Version 1.0: Initial version VERSION = '1.0' db_model = ml2_models.PortBinding fields = { 'port_id': common_types.UUIDField(), 'host': obj_fields.StringField(), 'profile': common_types.DictOfMiscValuesField(), 'vif_type': obj_fields.StringField(), 'vif_details': common_types.DictOfMiscValuesField(nullable=True), 'vnic_type': obj_fields.StringField(), 'status': common_types.PortBindingStatusEnumField( default=constants.ACTIVE), } primary_keys = ['port_id', 'host'] @base.NeutronObjectRegistry.register class DistributedPortBinding(PortBindingBase): # Version 1.0: Initial version VERSION = '1.0' db_model = ml2_models.DistributedPortBinding fields = { 'port_id': common_types.UUIDField(), 'host': obj_fields.StringField(), 'profile': common_types.DictOfMiscValuesField(), 'vif_type': obj_fields.StringField(), 'vif_details': common_types.DictOfMiscValuesField(nullable=True), 'vnic_type': obj_fields.StringField(), # NOTE(ihrachys): Fields below are specific to this type of binding. In # the future, we could think of converging different types of bindings # into a single field 'status': obj_fields.StringField(), 'router_id': obj_fields.StringField(nullable=True), } primary_keys = ['host', 'port_id'] @base.NeutronObjectRegistry.register class PortBindingLevel(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Added segment_id VERSION = '1.1' db_model = ml2_models.PortBindingLevel primary_keys = ['port_id', 'host', 'level'] fields = { 'port_id': common_types.UUIDField(), 'host': obj_fields.StringField(), 'level': obj_fields.IntegerField(), 'driver': obj_fields.StringField(nullable=True), 'segment': obj_fields.ObjectField( 'NetworkSegment', nullable=True ), # arguably redundant but allows us to define foreign key for 'segment' # synthetic field inside NetworkSegment definition 'segment_id': common_types.UUIDField(nullable=True), } synthetic_fields = ['segment'] foreign_keys = { 'Port': {'port_id': 'id'}, } @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('port_id', True), ('level', True)] return super(PortBindingLevel, cls).get_objects( context, _pager, validate_filters, **kwargs) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('segment_id', None) @base.NeutronObjectRegistry.register class IPAllocation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models_v2.IPAllocation fields = { 'port_id': common_types.UUIDField(nullable=True), 'subnet_id': common_types.UUIDField(), 'network_id': common_types.UUIDField(), 'ip_address': obj_fields.IPAddressField(), } fields_no_update = fields.keys() primary_keys = ['subnet_id', 'network_id', 'ip_address'] foreign_keys = { 'Port': {'port_id': 'id'}, } # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_to_db(cls, fields): result = super(IPAllocation, cls).modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_from_db(cls, db_obj): fields = super(IPAllocation, cls).modify_fields_from_db(db_obj) if 'ip_address' in fields: fields['ip_address'] = netaddr.IPAddress(fields['ip_address']) return fields @classmethod def get_alloc_by_subnet_id(cls, context, subnet_id, device_owner, exclude=True): # need to join with ports table as IPAllocation's port # is not joined eagerly and thus producing query which yields # incorrect results if exclude: alloc_db = (context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet_id).join(models_v2.Port). filter(~models_v2.Port.device_owner. in_(device_owner)).first()) else: alloc_db = (context.session.query(models_v2.IPAllocation). filter_by(subnet_id=subnet_id).join(models_v2.Port). filter(models_v2.Port.device_owner. in_(device_owner)).first()) if exclude and alloc_db: return super(IPAllocation, cls)._load_object(context, alloc_db) if alloc_db: return True @base.NeutronObjectRegistry.register class PortDNS(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Add dns_domain attribute VERSION = '1.1' db_model = dns_models.PortDNS primary_keys = ['port_id'] foreign_keys = { 'Port': {'port_id': 'id'}, } fields = { 'port_id': common_types.UUIDField(), 'current_dns_name': common_types.DomainNameField(), 'current_dns_domain': common_types.DomainNameField(), 'previous_dns_name': common_types.DomainNameField(), 'previous_dns_domain': common_types.DomainNameField(), 'dns_name': common_types.DomainNameField(), 'dns_domain': common_types.DomainNameField(), } def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('dns_domain', None) @base.NeutronObjectRegistry.register class SecurityGroupPortBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = sg_models.SecurityGroupPortBinding fields = { 'port_id': common_types.UUIDField(), 'security_group_id': common_types.UUIDField(), } primary_keys = ['port_id', 'security_group_id'] @base.NeutronObjectRegistry.register class Port(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Add data_plane_status field # Version 1.2: Added segment_id to binding_levels # Version 1.3: distributed_binding -> distributed_bindings # Version 1.4: Attribute binding becomes ListOfObjectsField # Version 1.5: Added qos_network_policy_id field VERSION = '1.5' db_model = models_v2.Port fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'network_id': common_types.UUIDField(), 'mac_address': common_types.MACAddressField(), 'admin_state_up': obj_fields.BooleanField(), 'device_id': obj_fields.StringField(), 'device_owner': obj_fields.StringField(), 'status': obj_fields.StringField(), 'allowed_address_pairs': obj_fields.ListOfObjectsField( 'AllowedAddressPair', nullable=True ), 'bindings': obj_fields.ListOfObjectsField( 'PortBinding', nullable=True ), 'data_plane_status': obj_fields.ObjectField( 'PortDataPlaneStatus', nullable=True ), 'dhcp_options': obj_fields.ListOfObjectsField( 'ExtraDhcpOpt', nullable=True ), 'distributed_bindings': obj_fields.ListOfObjectsField( 'DistributedPortBinding', nullable=True ), 'dns': obj_fields.ObjectField('PortDNS', nullable=True), 'fixed_ips': obj_fields.ListOfObjectsField( 'IPAllocation', nullable=True ), # TODO(ihrachys): consider converting to boolean 'security': obj_fields.ObjectField( 'PortSecurity', nullable=True ), 'security_group_ids': common_types.SetOfUUIDsField( nullable=True, # TODO(ihrachys): how do we safely pass a mutable default? default=None, ), 'qos_policy_id': common_types.UUIDField(nullable=True, default=None), 'qos_network_policy_id': common_types.UUIDField(nullable=True, default=None), 'binding_levels': obj_fields.ListOfObjectsField( 'PortBindingLevel', nullable=True ), # TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic # field in later object iterations } extra_filter_names = {'security_group_ids'} fields_no_update = ['project_id', 'network_id'] synthetic_fields = [ 'allowed_address_pairs', 'bindings', 'binding_levels', 'data_plane_status', 'dhcp_options', 'distributed_bindings', 'dns', 'fixed_ips', 'qos_policy_id', 'qos_network_policy_id', 'security', 'security_group_ids', ] fields_need_translation = { 'bindings': 'port_bindings', 'dhcp_options': 'dhcp_opts', 'distributed_bindings': 'distributed_port_binding', 'security': 'port_security', } def create(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): sg_ids = self.security_group_ids if sg_ids is None: sg_ids = set() qos_policy_id = self.qos_policy_id super(Port, self).create() if 'security_group_ids' in fields: self._attach_security_groups(sg_ids) if 'qos_policy_id' in fields: self._attach_qos_policy(qos_policy_id) def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): super(Port, self).update() if 'security_group_ids' in fields: self._attach_security_groups(fields['security_group_ids']) if 'qos_policy_id' in fields: self._attach_qos_policy(fields['qos_policy_id']) def _attach_qos_policy(self, qos_policy_id): binding.QosPolicyPortBinding.delete_objects( self.obj_context, port_id=self.id) if qos_policy_id: port_binding_obj = binding.QosPolicyPortBinding( self.obj_context, policy_id=qos_policy_id, port_id=self.id) port_binding_obj.create() self.qos_policy_id = qos_policy_id self.obj_reset_changes(['qos_policy_id']) def _attach_security_groups(self, sg_ids): # TODO(ihrachys): consider introducing an (internal) object for the # binding to decouple database operations a bit more obj_db_api.delete_objects( SecurityGroupPortBinding, self.obj_context, port_id=self.id) if sg_ids: for sg_id in sg_ids: self._attach_security_group(sg_id) self.security_group_ids = sg_ids self.obj_reset_changes(['security_group_ids']) def _attach_security_group(self, sg_id): obj_db_api.create_object( SecurityGroupPortBinding, self.obj_context, {'port_id': self.id, 'security_group_id': sg_id} ) @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, security_group_ids=None, **kwargs): if security_group_ids: ports_with_sg = cls.get_ports_ids_by_security_groups( context, security_group_ids) port_ids = kwargs.get("id", []) if port_ids: kwargs['id'] = list(set(port_ids) & set(ports_with_sg)) else: kwargs['id'] = ports_with_sg return super(Port, cls).get_objects(context, _pager, validate_filters, **kwargs) @classmethod def get_port_ids_filter_by_segment_id(cls, context, segment_id): query = context.session.query(models_v2.Port.id) query = query.join( ml2_models.PortBindingLevel, ml2_models.PortBindingLevel.port_id == models_v2.Port.id) query = query.filter( ml2_models.PortBindingLevel.segment_id == segment_id) return [p.id for p in query] @classmethod def modify_fields_to_db(cls, fields): result = super(Port, cls).modify_fields_to_db(fields) # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. if 'mac_address' in result: result['mac_address'] = cls.filter_to_str(result['mac_address']) # convert None to [] if 'distributed_port_binding' in result: result['distributed_port_binding'] = ( result['distributed_port_binding'] or [] ) return result @classmethod def modify_fields_from_db(cls, db_obj): fields = super(Port, cls).modify_fields_from_db(db_obj) # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. if 'mac_address' in fields: fields['mac_address'] = net_utils.AuthenticEUI( fields['mac_address']) distributed_port_binding = fields.get('distributed_bindings') if distributed_port_binding: # TODO(ihrachys) support multiple bindings fields['distributed_bindings'] = fields['distributed_bindings'][0] else: fields['distributed_bindings'] = [] return fields def from_db_object(self, db_obj): super(Port, self).from_db_object(db_obj) # extract security group bindings if db_obj.get('security_groups', []): self.security_group_ids = { sg.security_group_id for sg in db_obj.security_groups } else: self.security_group_ids = set() fields_to_change = ['security_group_ids'] # extract qos policy binding if db_obj.get('qos_policy_binding'): self.qos_policy_id = db_obj.qos_policy_binding.policy_id fields_to_change.append('qos_policy_id') if db_obj.get('qos_network_policy_binding'): self.qos_network_policy_id = ( db_obj.qos_network_policy_binding.policy_id) fields_to_change.append('qos_network_policy_binding') self.obj_reset_changes(fields_to_change) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('data_plane_status', None) if _target_version < (1, 2): binding_levels = primitive.get('binding_levels', []) for lvl in binding_levels: lvl['versioned_object.version'] = '1.0' lvl['versioned_object.data'].pop('segment_id', None) if _target_version < (1, 3): bindings = primitive.pop('distributed_bindings', []) primitive['distributed_binding'] = (bindings[0] if bindings else None) if _target_version < (1, 4): # In version 1.4 we add support for multiple port bindings. # Previous versions only support one port binding. The following # lines look for the active port binding, which is the only one # needed in previous versions if 'bindings' in primitive: original_bindings = primitive.pop('bindings') primitive['binding'] = None for a_binding in original_bindings: if (a_binding['versioned_object.data']['status'] == constants.ACTIVE): primitive['binding'] = a_binding break if _target_version < (1, 5): primitive.pop('qos_network_policy_id', None) @classmethod def get_ports_by_router_and_network(cls, context, router_id, owner, network_id): """Returns port objects filtering by router ID, owner and network ID""" rports_filter = (models_v2.Port.network_id == network_id, ) router_filter = (models_v2.Port.network_id == network_id, ) return cls._get_ports_by_router(context, router_id, owner, rports_filter, router_filter) @classmethod def get_ports_by_router_and_port(cls, context, router_id, owner, port_id): """Returns port objects filtering by router ID, owner and port ID""" rports_filter = (l3.RouterPort.port_id == port_id, ) router_filter = (models_v2.Port.id == port_id, ) return cls._get_ports_by_router(context, router_id, owner, rports_filter, router_filter) @classmethod def _get_ports_by_router(cls, context, router_id, owner, rports_filter, router_filter): """Returns port objects filtering by router id and owner The method will receive extra filters depending of the caller (filter by network or filter by port). The ports are retrieved using: - The RouterPort registers. Each time a port is assigned to a router, a new RouterPort register is added to the DB. - The port owner and device_id information. Both searches should return the same result. If not, a warning message is logged and the port list to be returned is completed with the missing ones. """ rports_filter += (l3.RouterPort.router_id == router_id, l3.RouterPort.port_type == owner) router_filter += (models_v2.Port.device_id == router_id, models_v2.Port.device_owner == owner) ports = context.session.query(models_v2.Port).join( l3.RouterPort).filter(*rports_filter) ports_rports = [cls._load_object(context, db_obj) for db_obj in ports.all()] ports = context.session.query(models_v2.Port).filter(*router_filter) ports_router = [cls._load_object(context, db_obj) for db_obj in ports.all()] ports_rports_ids = {p.id for p in ports_rports} ports_router_ids = {p.id for p in ports_router} missing_port_ids = ports_router_ids - ports_rports_ids if missing_port_ids: LOG.warning('The following ports, assigned to router ' '%(router_id)s, do not have a "routerport" register: ' '%(port_ids)s', {'router_id': router_id, 'port_ids': missing_port_ids}) port_objs = [p for p in ports_router if p.id in missing_port_ids] ports_rports += port_objs return ports_rports @classmethod def get_ports_ids_by_security_groups(cls, context, security_group_ids, excluded_device_owners=None): query = context.session.query(sg_models.SecurityGroupPortBinding) query = query.filter( sg_models.SecurityGroupPortBinding.security_group_id.in_( security_group_ids)) if excluded_device_owners: query = query.join(models_v2.Port) query = query.filter( ~models_v2.Port.device_owner.in_(excluded_device_owners)) return [port_binding['port_id'] for port_binding in query.all()] @classmethod def get_ports_by_binding_type_and_host(cls, context, binding_type, host): query = context.session.query(models_v2.Port).join( ml2_models.PortBinding) query = query.filter( ml2_models.PortBinding.vif_type == binding_type, ml2_models.PortBinding.host == host) return [cls._load_object(context, db_obj) for db_obj in query.all()] @classmethod def get_ports_by_vnic_type_and_host( cls, context, vnic_type, host): query = context.session.query(models_v2.Port).join( ml2_models.PortBinding) query = query.filter( ml2_models.PortBinding.vnic_type == vnic_type, ml2_models.PortBinding.host == host) return [cls._load_object(context, db_obj) for db_obj in query.all()] @classmethod def check_network_ports_by_binding_types( cls, context, network_id, binding_types, negative_search=False): """This method is to check whether networks have ports with given binding_types. :param context: :param network_id: ID of network to check :param binding_types: list of binding types to look for :param negative_search: if set to true, ports with with binding_type other than "binding_types" will be counted :return: True if any port is found, False otherwise """ query = context.session.query(models_v2.Port).join( ml2_models.PortBinding) query = query.filter(models_v2.Port.network_id == network_id) if negative_search: query = query.filter( ml2_models.PortBinding.vif_type.notin_(binding_types)) else: query = query.filter( ml2_models.PortBinding.vif_type.in_(binding_types)) return bool(query.count()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/provisioning_blocks.py0000644000175000017500000000206600000000000024727 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields as obj_fields from neutron.db.models import provisioning_block as pb_model from neutron.objects import base @base.NeutronObjectRegistry.register class ProvisioningBlock(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = pb_model.ProvisioningBlock fields = { 'standard_attr_id': obj_fields.IntegerField(), 'entity': obj_fields.StringField() } primary_keys = ['standard_attr_id', 'entity'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3270447 neutron-16.0.0.0b2.dev214/neutron/objects/qos/0000755000175000017500000000000000000000000021070 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/__init__.py0000644000175000017500000000000000000000000023167 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/binding.py0000644000175000017500000000451600000000000023062 0ustar00coreycorey00000000000000# Copyright 2017 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron.db.qos import models as qos_db_model from neutron.objects import base @base.NeutronObjectRegistry.register class QosPolicyPortBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosPortPolicyBinding fields = { 'policy_id': common_types.UUIDField(), 'port_id': common_types.UUIDField() } primary_keys = ['port_id'] fields_no_update = ['policy_id', 'port_id'] @base.NeutronObjectRegistry.register class QosPolicyNetworkBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosNetworkPolicyBinding fields = { 'policy_id': common_types.UUIDField(), 'network_id': common_types.UUIDField() } primary_keys = ['network_id'] fields_no_update = ['policy_id', 'network_id'] @base.NeutronObjectRegistry.register class QosPolicyFloatingIPBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosFIPPolicyBinding fields = { 'policy_id': common_types.UUIDField(), 'fip_id': common_types.UUIDField() } primary_keys = ['policy_id', 'fip_id'] fields_no_update = ['policy_id', 'fip_id'] @base.NeutronObjectRegistry.register class QosPolicyRouterGatewayIPBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosRouterGatewayIPPolicyBinding fields = { 'policy_id': common_types.UUIDField(), 'router_id': common_types.UUIDField() } primary_keys = ['policy_id', 'router_id'] fields_no_update = ['policy_id', 'router_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/policy.py0000644000175000017500000003736400000000000022756 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from neutron_lib.exceptions import qos as qos_exc from neutron_lib.objects import common_types from oslo_db import exception as db_exc from oslo_utils import versionutils from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields from neutron.db.models import l3 from neutron.db import models_v2 from neutron.db.qos import models as qos_db_model from neutron.db import rbac_db_models from neutron.objects import base as base_db from neutron.objects.db import api as obj_db_api from neutron.objects.qos import binding from neutron.objects.qos import rule as rule_obj_impl from neutron.objects import rbac from neutron.objects import rbac_db @base_db.NeutronObjectRegistry.register class QosPolicyRBAC(rbac.RBACBaseObject): # Version 1.0: Initial version # Version 1.1: Inherit from rbac_db.RBACBaseObject; added 'id' and # 'project_id'; changed 'object_id' from StringField to # UUIDField VERSION = '1.1' db_model = rbac_db_models.QosPolicyRBAC def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): standard_fields = ['id', 'project_id'] for f in standard_fields: primitive.pop(f) @base_db.NeutronObjectRegistry.register class QosPolicy(rbac_db.NeutronRbacObject): # Version 1.0: Initial version # Version 1.1: QosDscpMarkingRule introduced # Version 1.2: Added QosMinimumBandwidthRule # Version 1.3: Added standard attributes (created_at, revision, etc) # Version 1.4: Changed tenant_id to project_id # Version 1.5: Direction for bandwidth limit rule added # Version 1.6: Added "is_default" field # Version 1.7: Added floating IP bindings # Version 1.8: Added router gateway QoS policy bindings VERSION = '1.8' # required by RbacNeutronMetaclass rbac_db_cls = QosPolicyRBAC db_model = qos_db_model.QosPolicy fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'name': obj_fields.StringField(), 'shared': obj_fields.BooleanField(default=False), 'rules': obj_fields.ListOfObjectsField('QosRule', subclasses=True), 'is_default': obj_fields.BooleanField(default=False), } fields_no_update = ['id', 'project_id'] synthetic_fields = ['rules', 'is_default'] extra_filter_names = {'is_default'} binding_models = {'port': binding.QosPolicyPortBinding, 'network': binding.QosPolicyNetworkBinding, 'fip': binding.QosPolicyFloatingIPBinding, 'router': binding.QosPolicyRouterGatewayIPBinding} def obj_load_attr(self, attrname): if attrname == 'rules': return self._reload_rules() elif attrname == 'is_default': return self._reload_is_default() return super(QosPolicy, self).obj_load_attr(attrname) def _reload_rules(self): rules = rule_obj_impl.get_rules(self, self.obj_context, self.id) setattr(self, 'rules', rules) self.obj_reset_changes(['rules']) def _reload_is_default(self): if self.get_default() == self.id: setattr(self, 'is_default', True) else: setattr(self, 'is_default', False) self.obj_reset_changes(['is_default']) def get_rule_by_id(self, rule_id): """Return rule specified by rule_id. @raise QosRuleNotFound: if there is no such rule in the policy. """ for rule in self.rules: if rule_id == rule.id: return rule raise qos_exc.QosRuleNotFound(policy_id=self.id, rule_id=rule_id) # TODO(hichihara): For tag mechanism. This will be removed in bug/1704137 def to_dict(self): _dict = super(QosPolicy, self).to_dict() try: _dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags] except AttributeError: # AttrtibuteError can be raised when accessing self.db_obj # or self.db_obj.standard_attr pass return _dict @classmethod def get_policy_obj(cls, context, policy_id): """Fetch a QoS policy. :param context: neutron api request context :type context: neutron.context.Context :param policy_id: the id of the QosPolicy to fetch :type policy_id: str uuid :returns: a QosPolicy object :raises: n_exc.QosPolicyNotFound """ obj = cls.get_object(context, id=policy_id) if obj is None: raise qos_exc.QosPolicyNotFound(policy_id=policy_id) return obj @classmethod def get_object(cls, context, **kwargs): policy_obj = super(QosPolicy, cls).get_object(context, **kwargs) if not policy_obj: return policy_obj.obj_load_attr('rules') policy_obj.obj_load_attr('is_default') return policy_obj @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): objs = super(QosPolicy, cls).get_objects(context, _pager, validate_filters, **kwargs) result = [] for obj in objs: obj.obj_load_attr('rules') obj.obj_load_attr('is_default') result.append(obj) return result @classmethod def _get_object_policy(cls, context, binding_cls, **kwargs): with cls.db_context_reader(context): binding_db_obj = obj_db_api.get_object(binding_cls, context, **kwargs) if binding_db_obj: return cls.get_object(context, id=binding_db_obj['policy_id']) @classmethod def get_network_policy(cls, context, network_id): return cls._get_object_policy(context, binding.QosPolicyNetworkBinding, network_id=network_id) @classmethod def get_port_policy(cls, context, port_id): return cls._get_object_policy(context, binding.QosPolicyPortBinding, port_id=port_id) @classmethod def get_fip_policy(cls, context, fip_id): return cls._get_object_policy( context, binding.QosPolicyFloatingIPBinding, fip_id=fip_id) @classmethod def get_router_policy(cls, context, router_id): return cls._get_object_policy( context, binding.QosPolicyRouterGatewayIPBinding, router_id=router_id) # TODO(QoS): Consider extending base to trigger registered methods for us def create(self): with self.db_context_writer(self.obj_context): super(QosPolicy, self).create() if self.is_default: self.set_default() self.obj_load_attr('rules') def update(self): with self.db_context_writer(self.obj_context): if 'is_default' in self.obj_what_changed(): if self.is_default: self.set_default() else: self.unset_default() super(QosPolicy, self).update() def delete(self): with self.db_context_writer(self.obj_context): for object_type, obj_class in self.binding_models.items(): pager = base_db.Pager(limit=1) binding_obj = obj_class.get_objects(self.obj_context, policy_id=self.id, _pager=pager) if binding_obj: raise qos_exc.QosPolicyInUse( policy_id=self.id, object_type=object_type, object_id=binding_obj[0]['%s_id' % object_type]) super(QosPolicy, self).delete() def attach_network(self, network_id): network_binding = {'policy_id': self.id, 'network_id': network_id} network_binding_obj = binding.QosPolicyNetworkBinding( self.obj_context, **network_binding) try: network_binding_obj.create() except db_exc.DBReferenceError as e: raise qos_exc.NetworkQosBindingError(policy_id=self.id, net_id=network_id, db_error=e) def attach_port(self, port_id): port_binding_obj = binding.QosPolicyPortBinding( self.obj_context, policy_id=self.id, port_id=port_id) try: port_binding_obj.create() except db_exc.DBReferenceError as e: raise qos_exc.PortQosBindingError(policy_id=self.id, port_id=port_id, db_error=e) def attach_floatingip(self, fip_id): fip_binding_obj = binding.QosPolicyFloatingIPBinding( self.obj_context, policy_id=self.id, fip_id=fip_id) try: fip_binding_obj.create() except db_exc.DBReferenceError as e: raise qos_exc.FloatingIPQosBindingError(policy_id=self.id, fip_id=fip_id, db_error=e) def attach_router(self, router_id): router_binding_obj = binding.QosPolicyRouterGatewayIPBinding( self.obj_context, policy_id=self.id, router_id=router_id) try: router_binding_obj.create() except db_exc.DBReferenceError as e: raise qos_exc.RouterQosBindingError(policy_id=self.id, router_id=router_id, db_error=e) def detach_network(self, network_id): deleted = binding.QosPolicyNetworkBinding.delete_objects( self.obj_context, network_id=network_id) if not deleted: raise qos_exc.NetworkQosBindingNotFound(net_id=network_id, policy_id=self.id) def detach_port(self, port_id): deleted = binding.QosPolicyPortBinding.delete_objects(self.obj_context, port_id=port_id) if not deleted: raise qos_exc.PortQosBindingNotFound(port_id=port_id, policy_id=self.id) def detach_floatingip(self, fip_id): deleted = binding.QosPolicyFloatingIPBinding.delete_objects( self.obj_context, fip_id=fip_id) if not deleted: raise qos_exc.FloatingIPQosBindingNotFound(fip_id=fip_id, policy_id=self.id) def detach_router(self, router_id): deleted = binding.QosPolicyRouterGatewayIPBinding.delete_objects( self.obj_context, router_id=router_id) if not deleted: raise qos_exc.RouterQosBindingNotFound(router_id=router_id, policy_id=self.id) def set_default(self): if not self.get_default(): qos_default_policy = QosPolicyDefault(self.obj_context, qos_policy_id=self.id, project_id=self.project_id) qos_default_policy.create() elif self.get_default() != self.id: raise qos_exc.QoSPolicyDefaultAlreadyExists( project_id=self.project_id) def unset_default(self): if self.get_default() == self.id: qos_default_policy = QosPolicyDefault.get_object( self.obj_context, project_id=self.project_id) qos_default_policy.delete() def get_default(self): qos_default_policy = QosPolicyDefault.get_object( self.obj_context, project_id=self.project_id) if qos_default_policy: return qos_default_policy.qos_policy_id def get_bound_networks(self): return [ nb.network_id for nb in binding.QosPolicyNetworkBinding.get_objects( self.obj_context, policy_id=self.id) ] def get_bound_ports(self): return [ pb.port_id for pb in binding.QosPolicyPortBinding.get_objects( self.obj_context, policy_id=self.id) ] def get_bound_floatingips(self): return [ fb.fip_id for fb in binding.QosPolicyFloatingIPBinding.get_objects( self.obj_context, policy_id=self.id) ] def get_bound_routers(self): return [ rb.router_id for rb in binding.QosPolicyRouterGatewayIPBinding.get_objects( self.obj_context, policy_id=self.id) ] @classmethod def _get_bound_tenant_ids(cls, session, binding_db, bound_db, binding_db_id_column, policy_id): return list(itertools.chain.from_iterable( session.query(bound_db.tenant_id).join( binding_db, bound_db.id == binding_db_id_column).filter( binding_db.policy_id == policy_id).all())) @classmethod def get_bound_tenant_ids(cls, context, policy_id): """Implements RbacNeutronObject.get_bound_tenant_ids. :returns: set -- a set of tenants' ids dependent on QosPolicy. """ net = models_v2.Network qosnet = qos_db_model.QosNetworkPolicyBinding port = models_v2.Port qosport = qos_db_model.QosPortPolicyBinding fip = l3.FloatingIP qosfip = qos_db_model.QosFIPPolicyBinding router = l3.Router qosrouter = qos_db_model.QosRouterGatewayIPPolicyBinding bound_tenants = [] with cls.db_context_reader(context): bound_tenants.extend(cls._get_bound_tenant_ids( context.session, qosnet, net, qosnet.network_id, policy_id)) bound_tenants.extend( cls._get_bound_tenant_ids(context.session, qosport, port, qosport.port_id, policy_id)) bound_tenants.extend( cls._get_bound_tenant_ids(context.session, qosfip, fip, qosfip.fip_id, policy_id)) bound_tenants.extend( cls._get_bound_tenant_ids(context.session, qosrouter, router, qosrouter.router_id, policy_id)) return set(bound_tenants) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 8): raise exception.IncompatibleObjectVersion( objver=target_version, objname=self.__class__.__name__) @base_db.NeutronObjectRegistry.register class QosPolicyDefault(base_db.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = qos_db_model.QosPolicyDefault fields = { 'qos_policy_id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), } primary_keys = ['project_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/qos_policy_validator.py0000644000175000017500000000546600000000000025703 0ustar00coreycorey00000000000000# Copyright (c) 2017-18 NEC Technologies India Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.exceptions import qos as qos_exc from neutron_lib.services.qos import constants as qos_consts def check_bandwidth_rule_conflict(policy, rule_data): """Implementation of the QoS Rule checker. This function checks if the new rule to be associated with the policy doesn't conflict with the existing rules. Raises an exception if conflict is identified. """ for rule in policy.rules: if rule.rule_type == qos_consts.RULE_TYPE_DSCP_MARKING: # Skip checks if Rule is DSCP continue elif rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: if "max_kbps" in rule_data and ( int(rule.min_kbps) > int(rule_data["max_kbps"])): raise qos_exc.QoSRuleParameterConflict( rule_value=rule_data["max_kbps"], policy_id=policy["id"], existing_rule=rule.rule_type, existing_value=rule.min_kbps) elif rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: if "min_kbps" in rule_data and ( int(rule.max_kbps) < int(rule_data["min_kbps"])): raise qos_exc.QoSRuleParameterConflict( rule_value=rule_data["min_kbps"], policy_id=policy["id"], existing_rule=rule.rule_type, existing_value=rule.max_kbps) def check_rules_conflict(policy, rule_obj): """Implementation of the QoS Policy rules conflicts. This function checks if the new rule to be associated with policy doesn't have any duplicate rule already in policy. Raises an exception if conflict is identified. """ for rule in policy.rules: # NOTE(slaweq): we don't want to raise exception when compared rules # have got same id as it means that it is probably exactly the same # rule so there is no conflict if rule.id == getattr(rule_obj, "id", None): continue if rule.duplicates(rule_obj): raise qos_exc.QoSRulesConflict( new_rule_type=rule_obj.rule_type, rule_id=rule.id, policy_id=policy.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/rule.py0000644000175000017500000001362700000000000022422 0ustar00coreycorey00000000000000# Copyright 2015 Huawei Technologies India Pvt Ltd, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import sys from neutron_lib import constants from neutron_lib.objects import common_types from neutron_lib.services.qos import constants as qos_consts from neutron_lib.utils import helpers from oslo_utils import versionutils from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields import six from neutron.db.qos import models as qos_db_model from neutron.objects import base DSCP_MARK = 'dscp_mark' def get_rules(obj_cls, context, qos_policy_id): all_rules = [] with obj_cls.db_context_reader(context): for rule_type in qos_consts.VALID_RULE_TYPES: rule_cls_name = 'Qos%sRule' % helpers.camelize(rule_type) rule_cls = getattr(sys.modules[__name__], rule_cls_name) rules = rule_cls.get_objects(context, qos_policy_id=qos_policy_id) all_rules.extend(rules) return all_rules @six.add_metaclass(abc.ABCMeta) class QosRule(base.NeutronDbObject): # Version 1.0: Initial version, only BandwidthLimitRule # 1.1: Added DscpMarkingRule # 1.2: Added QosMinimumBandwidthRule # 1.3: Added direction for BandwidthLimitRule # # NOTE(mangelajo): versions need to be handled from the top QosRule object # because it's the only reference QosPolicy can make # to them via obj_relationships version map VERSION = '1.3' fields = { 'id': common_types.UUIDField(), 'qos_policy_id': common_types.UUIDField() } fields_no_update = ['id', 'qos_policy_id'] # should be redefined in subclasses rule_type = None duplicates_compare_fields = () def duplicates(self, other_rule): """Returns True if rules have got same values in fields defined in 'duplicates_compare_fields' list. In case when subclass don't have defined any field in duplicates_compare_fields, only rule types are compared. """ if self.rule_type != other_rule.rule_type: return False if self.duplicates_compare_fields: for field in self.duplicates_compare_fields: if getattr(self, field) != getattr(other_rule, field): return False return True def to_dict(self): dict_ = super(QosRule, self).to_dict() dict_['type'] = self.rule_type return dict_ def should_apply_to_port(self, port): """Check whether a rule can be applied to a specific port. This function has the logic to decide whether a rule should be applied to a port or not, depending on the source of the policy (network, or port). Eventually rules could override this method, or we could make it abstract to allow different rule behaviour. """ is_port_policy = self.qos_policy_id == port[qos_consts.QOS_POLICY_ID] is_network_policy_only = port[qos_consts.QOS_POLICY_ID] is None is_network_device_port = any(port['device_owner'].startswith(prefix) for prefix in constants.DEVICE_OWNER_PREFIXES) # NOTE(miouge): Network QoS policies should apply to ext routers ports: # - DEVICE_OWNER_AGENT_GW for DVR routers # - DEVICE_OWNER_ROUTER_GW for normal neutron routers is_router_gw = any(port['device_owner'].startswith(prefix) for prefix in [constants.DEVICE_OWNER_AGENT_GW, constants.DEVICE_OWNER_ROUTER_GW]) # NOTE(ralonsoh): return True if: # - Is a port QoS policy (not a network QoS policy) # - Is not an internal network device (e.g. router) and is a network # QoS policy and there is no port QoS policy return (is_port_policy or ((is_router_gw or not is_network_device_port) and is_network_policy_only)) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 3): raise exception.IncompatibleObjectVersion( objver=target_version, objtype=self.__class__.__name__) @base.NeutronObjectRegistry.register class QosBandwidthLimitRule(QosRule): db_model = qos_db_model.QosBandwidthLimitRule fields = { 'max_kbps': obj_fields.IntegerField(nullable=True), 'max_burst_kbps': obj_fields.IntegerField(nullable=True), 'direction': common_types.FlowDirectionEnumField( default=constants.EGRESS_DIRECTION) } duplicates_compare_fields = ['direction'] rule_type = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT @base.NeutronObjectRegistry.register class QosDscpMarkingRule(QosRule): db_model = qos_db_model.QosDscpMarkingRule fields = { DSCP_MARK: common_types.DscpMarkField(), } rule_type = qos_consts.RULE_TYPE_DSCP_MARKING @base.NeutronObjectRegistry.register class QosMinimumBandwidthRule(QosRule): db_model = qos_db_model.QosMinimumBandwidthRule fields = { 'min_kbps': obj_fields.IntegerField(nullable=True), 'direction': common_types.FlowDirectionEnumField(), } duplicates_compare_fields = ['direction'] rule_type = qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/qos/rule_type.py0000644000175000017500000000677700000000000023473 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_utils import versionutils from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields from neutron.objects import base class RuleTypeField(obj_fields.BaseEnumField): def __init__(self, **kwargs): self.AUTO_TYPE = obj_fields.Enum( valid_values=qos_consts.VALID_RULE_TYPES) super(RuleTypeField, self).__init__(**kwargs) @base.NeutronObjectRegistry.register class QosRuleType(base.NeutronObject): # Version 1.0: Initial version # Version 1.1: Added QosDscpMarkingRule # Version 1.2: Added QosMinimumBandwidthRule # Version 1.3: Added drivers field VERSION = '1.3' fields = { 'type': RuleTypeField(), 'drivers': obj_fields.ListOfObjectsField( 'QosRuleTypeDriver', nullable=True) } synthetic_fields = ['drivers'] # we don't receive context because we don't need db access at all @classmethod def get_object(cls, rule_type_name, **kwargs): plugin = directory.get_plugin(alias=constants.QOS) drivers = plugin.supported_rule_type_details(rule_type_name) drivers_obj = [QosRuleTypeDriver( name=driver['name'], supported_parameters=driver['supported_parameters']) for driver in drivers] return cls(type=rule_type_name, drivers=drivers_obj) # we don't receive context because we don't need db access at all @classmethod def get_objects(cls, validate_filters=True, **kwargs): if validate_filters: cls.validate_filters(**kwargs) rule_types = ( directory.get_plugin(alias=constants.QOS).supported_rule_types) # TODO(ihrachys): apply filters to returned result return [cls(type=type_) for type_ in rule_types] # we don't receive context because we don't need db access at all @classmethod def get_values(cls, field, **kwargs): return [getattr(obj, field) for obj in cls.get_objects(**kwargs)] def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 3): raise exception.IncompatibleObjectVersion( objver=target_version, objtype=self.__class__.__name__) @base.NeutronObjectRegistry.register class QosRuleTypeDriver(base.NeutronObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'name': obj_fields.StringField(), 'supported_parameters': common_types.ListOfDictOfMiscValuesField() } def to_dict(self): return { 'name': self.name, 'supported_parameters': self.supported_parameters} @classmethod def get_objects(cls, context, **kwargs): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/quota.py0000644000175000017500000001215000000000000021770 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields import sqlalchemy as sa from sqlalchemy import sql from neutron.db.quota import models from neutron.objects import base @base.NeutronObjectRegistry.register class ResourceDelta(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ResourceDelta primary_keys = ['resource', 'reservation_id'] foreign_keys = {'Reservation': {'reservation_id': 'id'}} fields = { 'resource': obj_fields.StringField(), 'reservation_id': common_types.UUIDField(), 'amount': obj_fields.IntegerField(nullable=True), } @base.NeutronObjectRegistry.register class Reservation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.Reservation fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'expiration': obj_fields.DateTimeField(tzinfo_aware=False, nullable=True), 'resource_deltas': obj_fields.ListOfObjectsField( ResourceDelta.__name__, nullable=True), } synthetic_fields = ['resource_deltas'] def create(self): deltas = self.resource_deltas with self.db_context_writer(self.obj_context): super(Reservation, self).create() if deltas: for delta in deltas: delta.reservation_id = self.id delta.create() self.resource_deltas.append(delta) self.obj_reset_changes(['resource_deltas']) @classmethod def delete_expired(cls, context, now, project_id): resv_query = context.session.query(models.Reservation) if project_id: project_expr = (models.Reservation.project_id == project_id) else: project_expr = sql.true() # TODO(manjeets) Fetch and delete objects using # object/db/api.py once comparison operations are # supported resv_query = resv_query.filter(sa.and_( project_expr, models.Reservation.expiration < now)) return resv_query.delete() @classmethod def get_total_reservations_map(cls, context, now, project_id, resources, expired): if not resources: return resv_query = context.session.query( models.ResourceDelta.resource, models.Reservation.expiration, sql.func.sum(models.ResourceDelta.amount)).join( models.Reservation) if expired: exp_expr = (models.Reservation.expiration < now) else: exp_expr = (models.Reservation.expiration >= now) resv_query = resv_query.filter(sa.and_( models.Reservation.project_id == project_id, models.ResourceDelta.resource.in_(resources), exp_expr)).group_by( models.ResourceDelta.resource, models.Reservation.expiration) return dict((resource, total_reserved) for (resource, exp, total_reserved) in resv_query) @base.NeutronObjectRegistry.register class Quota(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.Quota fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'resource': obj_fields.StringField(nullable=True), 'limit': obj_fields.IntegerField(nullable=True), } @base.NeutronObjectRegistry.register class QuotaUsage(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.QuotaUsage primary_keys = ['resource', 'project_id'] fields = { 'resource': obj_fields.StringField(), 'project_id': obj_fields.StringField(), 'dirty': obj_fields.BooleanField(default=False), 'in_use': obj_fields.IntegerField(default=0), 'reserved': obj_fields.IntegerField(default=0), } @classmethod def get_object_dirty_protected(cls, context, **kwargs): query = context.session.query(cls.db_model) query = query.filter_by(**cls.modify_fields_to_db(kwargs)) # NOTE(manjeets) as lock mode was just for protecting dirty bits # an update on dirty will prevent the race. query.filter_by(dirty=True).update({'dirty': True}) res = query.first() if res: return cls._load_object(context, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/rbac.py0000644000175000017500000000373100000000000021553 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from six import add_metaclass from sqlalchemy import and_ from neutron.objects import base @add_metaclass(abc.ABCMeta) class RBACBaseObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'object_id': common_types.UUIDField(), 'target_tenant': obj_fields.StringField(), 'action': obj_fields.StringField(), } fields_no_update = ['id', 'project_id', 'object_id'] @classmethod def get_projects(cls, context, object_id=None, action=None, target_tenant=None): clauses = [] if object_id: clauses.append(cls.db_model.object_id == object_id) if action: clauses.append(cls.db_model.action == action) if target_tenant: clauses.append(cls.db_model.target_tenant == target_tenant) query = context.session.query(cls.db_model.target_tenant) if clauses: query = query.filter(and_(*clauses)) return [data[0] for data in query] @classmethod def get_type_class_map(cls): return {klass.db_model.object_type: klass for klass in cls.__subclasses__()} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/rbac_db.py0000644000175000017500000003721100000000000022220 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import exceptions from six import add_metaclass from six import with_metaclass from sqlalchemy import and_ from neutron._i18n import _ from neutron.db import _utils as db_utils from neutron.db import rbac_db_mixin from neutron.db import rbac_db_models as models from neutron.extensions import rbac as ext_rbac from neutron.objects import base from neutron.objects.db import api as obj_db_api @add_metaclass(abc.ABCMeta) class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin, base.NeutronDbObject): rbac_db_cls = None @classmethod @abc.abstractmethod def get_bound_tenant_ids(cls, context, obj_id): """Returns ids of all tenants depending on this db object. Has to be implemented by classes using RbacNeutronMetaclass. The tenants are the ones that need the sharing or 'visibility' of the object to them. E.g: for QosPolicy that would be the tenants using the Networks and Ports with the shared QosPolicy applied to them. :returns: set -- a set of tenants' ids dependent on this object. """ @staticmethod def is_network_shared(context, rbac_entries): # NOTE(korzen) this method is copied from db_base_plugin_common. # The shared attribute for a network now reflects if the network # is shared to the calling tenant via an RBAC entry. matches = ('*',) + ((context.tenant_id,) if context else ()) for entry in rbac_entries: if (entry.action == models.ACCESS_SHARED and entry.target_tenant in matches): return True return False @staticmethod def get_shared_with_tenant(context, rbac_db_cls, obj_id, tenant_id): # NOTE(korzen) This method enables to query within already started # session rbac_db_model = rbac_db_cls.db_model return (db_utils.model_query(context, rbac_db_model).filter( and_(rbac_db_model.object_id == obj_id, rbac_db_model.action == models.ACCESS_SHARED, rbac_db_model.target_tenant.in_( ['*', tenant_id]))).count() != 0) @classmethod def is_shared_with_tenant(cls, context, obj_id, tenant_id): ctx = context.elevated() with cls.db_context_reader(ctx): return cls.get_shared_with_tenant(ctx, cls.rbac_db_cls, obj_id, tenant_id) @classmethod def is_accessible(cls, context, db_obj): return (super( RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or cls.is_shared_with_tenant(context, db_obj.id, context.tenant_id)) @classmethod def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action): rbac_db_model = cls.rbac_db_cls.db_model return db_utils.model_query(context, rbac_db_model).filter( and_(rbac_db_model.object_id == rbac_obj_id, rbac_db_model.action == rbac_action)) @classmethod def _get_tenants_with_shared_access_to_db_obj(cls, context, obj_id): rbac_db_model = cls.rbac_db_cls.db_model return set(itertools.chain.from_iterable(context.session.query( rbac_db_model.target_tenant).filter( and_(rbac_db_model.object_id == obj_id, rbac_db_model.action == models.ACCESS_SHARED, rbac_db_model.target_tenant != '*')))) @classmethod def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant): ctx_admin = context.elevated() rb_model = cls.rbac_db_cls.db_model bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id) db_obj_sharing_entries = cls._get_db_obj_rbac_entries( ctx_admin, obj_id, models.ACCESS_SHARED) def raise_policy_in_use(): raise ext_rbac.RbacPolicyInUse( object_id=obj_id, details='tenant_id={}'.format(target_tenant)) if target_tenant != '*': # if there is a wildcard rule, we can return early because it # shares the object globally wildcard_sharing_entries = db_obj_sharing_entries.filter( rb_model.target_tenant == '*') if wildcard_sharing_entries.count(): return if target_tenant in bound_tenant_ids: raise_policy_in_use() return # for the wildcard we need to query all of the rbac entries to # see if any allow the object sharing other_target_tenants = cls._get_tenants_with_shared_access_to_db_obj( ctx_admin, obj_id) if not bound_tenant_ids.issubset(other_target_tenants): raise_policy_in_use() @classmethod def validate_rbac_policy_delete(cls, resource, event, trigger, payload=None): """Callback to handle RBAC_POLICY, BEFORE_DELETE callback. :raises: RbacPolicyInUse -- in case the policy is in use. """ context = payload.context policy = payload.latest_state if policy['action'] != models.ACCESS_SHARED: return target_tenant = policy['target_tenant'] db_obj = obj_db_api.get_object( cls, context.elevated(), id=policy['object_id']) if db_obj.tenant_id == target_tenant: return cls._validate_rbac_policy_delete(context=context, obj_id=policy['object_id'], target_tenant=target_tenant) @classmethod def validate_rbac_policy_create(cls, resource, event, trigger, payload=None): """Callback to handle RBAC_POLICY, BEFORE_CREATE callback. """ pass @classmethod def validate_rbac_policy_update(cls, resource, event, trigger, payload=None): """Callback to handle RBAC_POLICY, BEFORE_UPDATE callback. :raises: RbacPolicyInUse -- in case the update is forbidden. """ policy = payload.latest_state prev_tenant = policy['target_tenant'] new_tenant = payload.request_body['target_tenant'] if prev_tenant == new_tenant: return if new_tenant != '*': return cls.validate_rbac_policy_delete( resource, event, trigger, payload=payload) @classmethod def validate_rbac_policy_change(cls, resource, event, trigger, payload=None): """Callback to validate changes. This is the dispatching function for create, update and delete callbacks. On creation and update, verify that the creator is an admin or owns the resource being shared. """ object_type = payload.metadata.get('object_type') context = payload.context policy = (payload.request_body if event == events.BEFORE_CREATE else payload.latest_state) # TODO(hdaniel): As this code was shamelessly stolen from # NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces # should be synced and contain the same bugs, until Network RBAC logic # (hopefully) melded with this one. if object_type != cls.rbac_db_cls.db_model.object_type: return db_obj = obj_db_api.get_object( cls, context.elevated(), id=policy['object_id']) if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE): if (not context.is_admin and db_obj['tenant_id'] != context.tenant_id): msg = _("Only admins can manipulate policies on objects " "they do not own") raise exceptions.InvalidInput(error_message=msg) callback_map = {events.BEFORE_CREATE: cls.validate_rbac_policy_create, events.BEFORE_UPDATE: cls.validate_rbac_policy_update, events.BEFORE_DELETE: cls.validate_rbac_policy_delete} if event in callback_map: return callback_map[event](resource, event, trigger, payload=payload) def attach_rbac(self, obj_id, project_id, target_tenant='*'): obj_type = self.rbac_db_cls.db_model.object_type rbac_policy = {'rbac_policy': {'object_id': obj_id, 'target_tenant': target_tenant, 'project_id': project_id, 'object_type': obj_type, 'action': models.ACCESS_SHARED}} return self.create_rbac_policy(self.obj_context, rbac_policy) def update_shared(self, is_shared_new, obj_id): admin_context = self.obj_context.elevated() shared_prev = obj_db_api.get_object(self.rbac_db_cls, admin_context, object_id=obj_id, target_tenant='*', action=models.ACCESS_SHARED) is_shared_prev = bool(shared_prev) if is_shared_prev == is_shared_new: return # 'shared' goes False -> True if not is_shared_prev and is_shared_new: self.attach_rbac(obj_id, self.obj_context.tenant_id) return # 'shared' goes True -> False is actually an attempt to delete # rbac rule for sharing obj_id with target_tenant = '*' self._validate_rbac_policy_delete(self.obj_context, obj_id, '*') return self.obj_context.session.delete(shared_prev) def from_db_object(self, db_obj): self._load_shared(db_obj) super(RbacNeutronDbObjectMixin, self).from_db_object(db_obj) def obj_load_attr(self, attrname): if attrname == 'shared': return self._load_shared() super(RbacNeutronDbObjectMixin, self).obj_load_attr(attrname) def _load_shared(self, db_obj=None): # Do not override 'shared' attribute on create() or update() if 'shared' in self.obj_get_changes(): return if db_obj: # NOTE(korzen) db_obj is passed when object is loaded from DB rbac_entries = db_obj.get('rbac_entries') or {} shared = self.is_network_shared(self.obj_context, rbac_entries) else: # NOTE(korzen) this case is used when object was # instantiated and without DB interaction (get_object(s), update, # create), it should be rare case to load 'shared' by that method shared = self.get_shared_with_tenant( self.obj_context.elevated(), self.rbac_db_cls, self.id, self.project_id ) setattr(self, 'shared', shared) self.obj_reset_changes(['shared']) def _update_post(self, obj_changes): if "shared" in obj_changes: self.update_shared(self.shared, self.id) def _update_hook(self, update_orig): with self.db_context_writer(self.obj_context): # NOTE(slaweq): copy of object changes is required to pass it later to # _update_post method because update() will reset all those changes obj_changes = self.obj_get_changes() update_orig(self) _update_post(self, obj_changes) self._load_shared(db_obj=self.db_obj) def _create_post(self): if self.shared: self.attach_rbac(self.id, self.project_id) def _create_hook(self, orig_create): with self.db_context_writer(self.obj_context): orig_create(self) _create_post(self) self._load_shared(db_obj=self.db_obj) def _to_dict_hook(self, to_dict_orig): dct = to_dict_orig(self) if self.obj_context: dct['shared'] = self.is_shared_with_tenant(self.obj_context, self.id, self.obj_context.tenant_id) else: # most OVO objects on an agent will not have a context set on the # object because they will be generated from obj_from_primitive. dct['shared'] = False return dct class RbacNeutronMetaclass(type): """Adds support for RBAC in NeutronDbObjects. Injects code for CRUD operations and modifies existing ops to do so. """ @classmethod def _get_attribute(cls, attribute_name, bases): for b in bases: attribute = getattr(b, attribute_name, None) if attribute: return attribute @classmethod def get_attribute(cls, attribute_name, bases, dct): return (dct.get(attribute_name, None) or cls._get_attribute(attribute_name, bases)) @classmethod def update_synthetic_fields(cls, bases, dct): if not dct.get('synthetic_fields', None): synthetic_attr = cls.get_attribute('synthetic_fields', bases, dct) dct['synthetic_fields'] = synthetic_attr or [] if 'shared' in dct['synthetic_fields']: raise exceptions.ObjectActionError( action=_('shared attribute switching to synthetic'), reason=_('already a synthetic attribute')) dct['synthetic_fields'].append('shared') @staticmethod def subscribe_to_rbac_events(class_instance): for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE): registry.subscribe(class_instance.validate_rbac_policy_change, resources.RBAC_POLICY, e) @staticmethod def validate_existing_attrs(cls_name, dct): if 'shared' not in dct['fields']: raise KeyError(_('No shared key in %s fields') % cls_name) if 'rbac_db_cls' not in dct: raise AttributeError(_('rbac_db_cls not found in %s') % cls_name) @staticmethod def get_replaced_method(orig_method, new_method): def func(self): return new_method(self, orig_method) return func @classmethod def replace_class_methods_with_hooks(cls, bases, dct): methods_replacement_map = {'create': _create_hook, 'update': _update_hook, 'to_dict': _to_dict_hook} for orig_method_name, new_method in methods_replacement_map.items(): orig_method = cls.get_attribute(orig_method_name, bases, dct) hook_method = cls.get_replaced_method(orig_method, new_method) dct[orig_method_name] = hook_method def __new__(cls, name, bases, dct): cls.validate_existing_attrs(name, dct) cls.update_synthetic_fields(bases, dct) cls.replace_class_methods_with_hooks(bases, dct) klass = type(name, (RbacNeutronDbObjectMixin,) + bases, dct) klass.add_extra_filter_name('shared') cls.subscribe_to_rbac_events(klass) return klass NeutronRbacObject = with_metaclass(RbacNeutronMetaclass, base.NeutronDbObject) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/router.py0000644000175000017500000002717000000000000022167 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import netaddr from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib import constants as n_const from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from oslo_versionedobjects import fields as obj_fields import six from sqlalchemy import func from neutron.db.models import dvr as dvr_models from neutron.db.models import l3 from neutron.db.models import l3_attrs from neutron.db.models import l3agent as rb_model from neutron.db import models_v2 from neutron.objects import base @base.NeutronObjectRegistry.register class RouterRoute(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3.RouterRoute fields = { 'router_id': common_types.UUIDField(), 'destination': common_types.IPNetworkField(), 'nexthop': obj_fields.IPAddressField() } primary_keys = ['router_id', 'destination', 'nexthop'] foreign_keys = {'Router': {'router_id': 'id'}} @classmethod def modify_fields_from_db(cls, db_obj): result = super(RouterRoute, cls).modify_fields_from_db(db_obj) if 'destination' in result: result['destination'] = net_utils.AuthenticIPNetwork( result['destination']) if 'nexthop' in result: result['nexthop'] = netaddr.IPAddress(result['nexthop']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(RouterRoute, cls).modify_fields_to_db(fields) if 'destination' in result: result['destination'] = cls.filter_to_str(result['destination']) if 'nexthop' in result: result['nexthop'] = cls.filter_to_str(result['nexthop']) return result @base.NeutronObjectRegistry.register class RouterExtraAttributes(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3_attrs.RouterExtraAttributes fields = { 'router_id': common_types.UUIDField(), 'distributed': obj_fields.BooleanField(default=False), 'service_router': obj_fields.BooleanField(default=False), 'ha': obj_fields.BooleanField(default=False), 'ha_vr_id': obj_fields.IntegerField(nullable=True), 'availability_zone_hints': obj_fields.ListOfStringsField(nullable=True) } primary_keys = ['router_id'] foreign_keys = {'Router': {'router_id': 'id'}} @classmethod def modify_fields_from_db(cls, db_obj): result = super(RouterExtraAttributes, cls).modify_fields_from_db( db_obj) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_string_to_list( result[az_def.AZ_HINTS])) return result @classmethod def modify_fields_to_db(cls, fields): result = super(RouterExtraAttributes, cls).modify_fields_to_db(fields) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_list_to_string( result[az_def.AZ_HINTS])) return result @classmethod def get_router_agents_count(cls, context): # TODO(sshank): This is pulled out from l3_agentschedulers_db.py # until a way to handle joins is figured out. binding_model = rb_model.RouterL3AgentBinding sub_query = (context.session.query( binding_model.router_id, func.count(binding_model.router_id).label('count')). join(l3_attrs.RouterExtraAttributes, binding_model.router_id == l3_attrs.RouterExtraAttributes.router_id). join(l3.Router). group_by(binding_model.router_id).subquery()) query = (context.session.query(l3.Router, sub_query.c.count). outerjoin(sub_query)) return [(router, agent_count) for router, agent_count in query] @base.NeutronObjectRegistry.register class RouterPort(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3.RouterPort primary_keys = ['router_id', 'port_id'] foreign_keys = {'Router': {'router_id': 'id'}, 'Port': {'port_id': 'id'}} fields = { 'router_id': common_types.UUIDField(), 'port_id': common_types.UUIDField(), 'port_type': obj_fields.StringField(nullable=True), } @classmethod def get_router_ids_by_subnetpool(cls, context, subnetpool_id): query = context.session.query(l3.RouterPort.router_id) query = query.join(models_v2.Port) query = query.join( models_v2.Subnet, models_v2.Subnet.network_id == models_v2.Port.network_id) query = query.filter( models_v2.Subnet.subnetpool_id == subnetpool_id, l3.RouterPort.port_type.in_(n_const.ROUTER_PORT_OWNERS)) query = query.distinct() return [r[0] for r in query] @base.NeutronObjectRegistry.register class DVRMacAddress(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = dvr_models.DistributedVirtualRouterMacAddress primary_keys = ['host'] fields = { 'host': obj_fields.StringField(), 'mac_address': common_types.MACAddressField() } @classmethod def modify_fields_from_db(cls, db_obj): fields = super(DVRMacAddress, cls).modify_fields_from_db(db_obj) if 'mac_address' in fields: # NOTE(tonytan4ever): Here uses AuthenticEUI to retain the format # passed from API. fields['mac_address'] = net_utils.AuthenticEUI( fields['mac_address']) return fields @classmethod def modify_fields_to_db(cls, fields): result = super(DVRMacAddress, cls).modify_fields_to_db(fields) if 'mac_address' in fields: result['mac_address'] = cls.filter_to_str(result['mac_address']) return result @base.NeutronObjectRegistry.register class Router(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3.Router fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'status': common_types.RouterStatusEnumField(nullable=True), 'admin_state_up': obj_fields.BooleanField(nullable=True), 'gw_port_id': common_types.UUIDField(nullable=True), 'enable_snat': obj_fields.BooleanField(default=True), 'flavor_id': common_types.UUIDField(nullable=True), 'extra_attributes': obj_fields.ObjectField( 'RouterExtraAttributes', nullable=True), } synthetic_fields = ['extra_attributes'] fields_no_update = ['project_id'] @classmethod def check_routers_not_owned_by_projects(cls, context, gw_ports, projects): """This method is to check whether routers that aren't owned by existing projects or not """ # TODO(hungpv) We may want to implement NOT semantic in get_object(s) query = context.session.query(l3.Router).filter( l3.Router.gw_port_id.in_(gw_ports)) query = query.filter( ~l3.Router.project_id.in_(projects)) return bool(query.count()) @base.NeutronObjectRegistry.register class FloatingIP(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = l3.FloatingIP fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'floating_ip_address': obj_fields.IPAddressField(), 'floating_network_id': common_types.UUIDField(), 'floating_port_id': common_types.UUIDField(), 'fixed_port_id': common_types.UUIDField(nullable=True), 'fixed_ip_address': obj_fields.IPAddressField(nullable=True), 'router_id': common_types.UUIDField(nullable=True), 'last_known_router_id': common_types.UUIDField(nullable=True), 'status': common_types.FloatingIPStatusEnumField(nullable=True), 'dns': obj_fields.ObjectField('FloatingIPDNS', nullable=True), } fields_no_update = ['project_id', 'floating_ip_address', 'floating_network_id', 'floating_port_id'] synthetic_fields = ['dns'] @classmethod def modify_fields_from_db(cls, db_obj): result = super(FloatingIP, cls).modify_fields_from_db(db_obj) if 'fixed_ip_address' in result: result['fixed_ip_address'] = netaddr.IPAddress( result['fixed_ip_address']) if 'floating_ip_address' in result: result['floating_ip_address'] = netaddr.IPAddress( result['floating_ip_address']) return result @classmethod def modify_fields_to_db(cls, fields): result = super(FloatingIP, cls).modify_fields_to_db(fields) if 'fixed_ip_address' in result: if result['fixed_ip_address'] is not None: result['fixed_ip_address'] = cls.filter_to_str( result['fixed_ip_address']) if 'floating_ip_address' in result: result['floating_ip_address'] = cls.filter_to_str( result['floating_ip_address']) return result @classmethod def get_scoped_floating_ips(cls, context, router_ids): query = context.session.query(l3.FloatingIP, models_v2.SubnetPool.address_scope_id) query = query.join( models_v2.Port, l3.FloatingIP.fixed_port_id == models_v2.Port.id) # Outer join of Subnet can cause each ip to have more than one row. query = query.outerjoin( models_v2.Subnet, models_v2.Subnet.network_id == models_v2.Port.network_id) query = query.filter(models_v2.Subnet.ip_version == 4) query = query.outerjoin( models_v2.SubnetPool, models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id) # Filter out on router_ids query = query.filter(l3.FloatingIP.router_id.in_(router_ids)) return cls._unique_floatingip_iterator(context, query) @classmethod def _unique_floatingip_iterator(cls, context, query): """Iterates over only one row per floating ip. Ignores others.""" # Group rows by fip id. They must be sorted by same. q = query.order_by(l3.FloatingIP.id) keyfunc = lambda row: row[0]['id'] group_iterator = itertools.groupby(q, keyfunc) # Just hit the first row of each group for key, value in group_iterator: row = [r for r in six.next(value)] yield (cls._load_object(context, row[0]), row[1]) @base.NeutronObjectRegistry.register class DvrFipGatewayPortAgentBinding(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = dvr_models.DvrFipGatewayPortAgentBinding new_facade = True primary_keys = ['network_id', 'agent_id'] fields = { 'network_id': common_types.UUIDField(), 'agent_id': common_types.UUIDField(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/securitygroup.py0000644000175000017500000001544000000000000023570 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as context_lib from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields from sqlalchemy import or_ from neutron.db.models import securitygroup as sg_models from neutron.db import rbac_db_models from neutron.objects import base from neutron.objects import ports from neutron.objects import rbac from neutron.objects import rbac_db @base.NeutronObjectRegistry.register class SecurityGroupRBAC(rbac.RBACBaseObject): # Version 1.0: Initial version VERSION = '1.0' db_model = rbac_db_models.SecurityGroupRBAC @base.NeutronObjectRegistry.register class SecurityGroup(rbac_db.NeutronRbacObject): # Version 1.0: Initial version # Version 1.1: Add RBAC support # Version 1.2: Added stateful support VERSION = '1.2' # required by RbacNeutronMetaclass rbac_db_cls = SecurityGroupRBAC db_model = sg_models.SecurityGroup fields = { 'id': common_types.UUIDField(), 'name': obj_fields.StringField(nullable=True), 'project_id': obj_fields.StringField(nullable=True), 'shared': obj_fields.BooleanField(default=False), 'stateful': obj_fields.BooleanField(default=True), 'is_default': obj_fields.BooleanField(default=False), 'rules': obj_fields.ListOfObjectsField( 'SecurityGroupRule', nullable=True ), # NOTE(ihrachys): we don't include source_rules that is present in the # model until we realize it's actually needed } fields_no_update = ['project_id', 'is_default'] synthetic_fields = ['is_default', 'rules'] extra_filter_names = {'is_default'} lazy_fields = set(['rules']) def create(self): # save is_default before super() resets it to False is_default = self.is_default with self.db_context_writer(self.obj_context): super(SecurityGroup, self).create() if is_default: default_group = DefaultSecurityGroup( self.obj_context, project_id=self.project_id, security_group_id=self.id) default_group.create() self.is_default = True self.obj_reset_changes(['is_default']) def from_db_object(self, db_obj): super(SecurityGroup, self).from_db_object(db_obj) if self._load_synthetic_fields: setattr(self, 'is_default', bool(db_obj.get('default_security_group'))) self.obj_reset_changes(['is_default']) @classmethod def get_sg_by_id(cls, context, sg_id): return super(SecurityGroup, cls).get_object(context, id=sg_id) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('shared') if _target_version < (1, 2): primitive.pop('stateful') @classmethod def get_bound_tenant_ids(cls, context, obj_id): port_objs = ports.Port.get_objects(context, security_group_ids=[obj_id]) return {port.tenant_id for port in port_objs} @base.NeutronObjectRegistry.register class DefaultSecurityGroup(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = sg_models.DefaultSecurityGroup fields = { 'project_id': obj_fields.StringField(), 'security_group_id': common_types.UUIDField(), } fields_no_update = ['security_group_id'] primary_keys = ['project_id'] @base.NeutronObjectRegistry.register class SecurityGroupRule(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = sg_models.SecurityGroupRule fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'security_group_id': common_types.UUIDField(), 'remote_group_id': common_types.UUIDField(nullable=True), 'direction': common_types.FlowDirectionEnumField(nullable=True), 'ethertype': common_types.EtherTypeEnumField(nullable=True), 'protocol': common_types.IpProtocolEnumField(nullable=True), 'port_range_min': common_types.PortRangeWith0Field(nullable=True), 'port_range_max': common_types.PortRangeWith0Field(nullable=True), 'remote_ip_prefix': common_types.IPNetworkField(nullable=True), } foreign_keys = {'SecurityGroup': {'security_group_id': 'id'}} fields_no_update = ['project_id', 'security_group_id', 'remote_group_id'] # TODO(sayalilunkad): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_to_db(cls, fields): result = super(SecurityGroupRule, cls).modify_fields_to_db(fields) remote_ip_prefix = result.get('remote_ip_prefix') if remote_ip_prefix: result['remote_ip_prefix'] = cls.filter_to_str(remote_ip_prefix) return result # TODO(sayalilunkad): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_from_db(cls, db_obj): fields = super(SecurityGroupRule, cls).modify_fields_from_db(db_obj) if 'remote_ip_prefix' in fields: fields['remote_ip_prefix'] = ( net_utils.AuthenticIPNetwork(fields['remote_ip_prefix'])) return fields @classmethod def get_security_group_rule_ids(cls, project_id): """Retrieve all SG rules related to this project_id This method returns the SG rule IDs that meet these conditions: - The rule belongs to this project_id - The rule belongs to a security group that belongs to the project_id """ context = context_lib.get_admin_context() query = context.session.query(cls.db_model.id) query = query.join( SecurityGroup.db_model, cls.db_model.security_group_id == SecurityGroup.db_model.id) clauses = or_(SecurityGroup.db_model.project_id == project_id, cls.db_model.project_id == project_id) rule_ids = query.filter(clauses).all() return [rule_id[0] for rule_id in rule_ids] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/servicetype.py0000644000175000017500000000226100000000000023203 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields from neutron.db.models import servicetype as models from neutron.objects import base @base.NeutronObjectRegistry.register class ProviderResourceAssociation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.ProviderResourceAssociation primary_keys = ['provider_name', 'resource_id'] fields = { 'provider_name': obj_fields.StringField(), 'resource_id': common_types.UUIDField(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/stdattrs.py0000644000175000017500000000226200000000000022512 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.objects.extensions import standardattributes as stdattr_obj from oslo_versionedobjects import fields as obj_fields from neutron.db import standard_attr from neutron.objects import base # TODO(ihrachys): add unit tests for the object @base.NeutronObjectRegistry.register class StandardAttribute(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' new_facade = True db_model = standard_attr.StandardAttribute fields = { 'id': obj_fields.IntegerField(), 'resource_type': obj_fields.StringField(), } fields.update(stdattr_obj.STANDARD_ATTRIBUTES) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/subnet.py0000644000175000017500000005142400000000000022146 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib.db import model_query from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields from sqlalchemy import and_, or_ from neutron.db.models import dns as dns_models from neutron.db.models import segment as segment_model from neutron.db.models import subnet_service_type from neutron.db import models_v2 from neutron.ipam import exceptions as ipam_exceptions from neutron.objects import base from neutron.objects import network from neutron.objects import rbac_db from neutron.services.segments import exceptions as segment_exc @base.NeutronObjectRegistry.register class DNSNameServer(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models_v2.DNSNameServer primary_keys = ['address', 'subnet_id'] foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields = { 'address': obj_fields.StringField(), 'subnet_id': common_types.UUIDField(), 'order': obj_fields.IntegerField() } @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): """Fetch DNSNameServer objects with default sort by 'order' field. """ if not _pager: _pager = base.Pager() if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('order', True)] return super(DNSNameServer, cls).get_objects(context, _pager, validate_filters, **kwargs) @base.NeutronObjectRegistry.register class Route(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' new_facade = True db_model = models_v2.SubnetRoute primary_keys = ['destination', 'nexthop', 'subnet_id'] foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields = { 'subnet_id': common_types.UUIDField(), 'destination': common_types.IPNetworkField(), 'nexthop': obj_fields.IPAddressField() } @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Route, cls).modify_fields_from_db(db_obj) if 'destination' in result: result['destination'] = net_utils.AuthenticIPNetwork( result['destination']) if 'nexthop' in result: result['nexthop'] = netaddr.IPAddress(result['nexthop']) return result @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Route, cls).modify_fields_to_db(fields) if 'destination' in result: result['destination'] = cls.filter_to_str(result['destination']) if 'nexthop' in fields: result['nexthop'] = cls.filter_to_str(result['nexthop']) return result @base.NeutronObjectRegistry.register class IPAllocationPool(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models_v2.IPAllocationPool foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields_need_translation = { 'start': 'first_ip', 'end': 'last_ip' } fields = { 'id': common_types.UUIDField(), 'subnet_id': common_types.UUIDField(), 'start': obj_fields.IPAddressField(), 'end': obj_fields.IPAddressField() } fields_no_update = ['subnet_id'] @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(IPAllocationPool, cls).modify_fields_from_db(db_obj) if 'start' in result: result['start'] = netaddr.IPAddress(result['start']) if 'end' in result: result['end'] = netaddr.IPAddress(result['end']) return result @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(IPAllocationPool, cls).modify_fields_to_db(fields) if 'first_ip' in result: result['first_ip'] = cls.filter_to_str(result['first_ip']) if 'last_ip' in result: result['last_ip'] = cls.filter_to_str(result['last_ip']) return result @base.NeutronObjectRegistry.register class SubnetServiceType(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = subnet_service_type.SubnetServiceType foreign_keys = {'Subnet': {'subnet_id': 'id'}} primary_keys = ['subnet_id', 'service_type'] fields = { 'subnet_id': common_types.UUIDField(), 'service_type': obj_fields.StringField() } @classmethod def query_filter_service_subnets(cls, query, service_type): # TODO(tuanvu): find OVO-like solution for handling "join queries" Subnet = models_v2.Subnet ServiceType = subnet_service_type.SubnetServiceType query = query.add_entity(ServiceType) query = query.outerjoin(ServiceType) query = query.filter(or_( ServiceType.service_type.is_(None), ServiceType.service_type == service_type, # Allow DHCP ports to be created on subnets of any # service type when DHCP is enabled on the subnet. and_(Subnet.enable_dhcp.is_(True), service_type == const.DEVICE_OWNER_DHCP))) return query.from_self(Subnet) # RBAC metaclass is not applied here because 'shared' attribute of Subnet # is dependent on Network 'shared' state, and in Subnet object # it can be read-only. The necessary changes are applied manually: # - defined 'shared' attribute in 'fields' # - added 'shared' to synthetic_fields # - registered extra_filter_name for 'shared' attribute # - added loading shared attribute based on network 'rbac_entries' @base.NeutronObjectRegistry.register class Subnet(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Add dns_publish_fixed_ip field VERSION = '1.1' db_model = models_v2.Subnet new_facade = True fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'network_id': common_types.UUIDField(), 'segment_id': common_types.UUIDField(nullable=True), # NOTE: subnetpool_id can be 'prefix_delegation' string # when the IPv6 Prefix Delegation is enabled 'subnetpool_id': obj_fields.StringField(nullable=True), 'ip_version': common_types.IPVersionEnumField(), 'cidr': common_types.IPNetworkField(), 'gateway_ip': obj_fields.IPAddressField(nullable=True), 'allocation_pools': obj_fields.ListOfObjectsField('IPAllocationPool', nullable=True), 'enable_dhcp': obj_fields.BooleanField(nullable=True), 'shared': obj_fields.BooleanField(nullable=True), 'dns_nameservers': obj_fields.ListOfObjectsField('DNSNameServer', nullable=True), 'dns_publish_fixed_ip': obj_fields.BooleanField(nullable=True), 'host_routes': obj_fields.ListOfObjectsField('Route', nullable=True), 'ipv6_ra_mode': common_types.IPV6ModeEnumField(nullable=True), 'ipv6_address_mode': common_types.IPV6ModeEnumField(nullable=True), 'service_types': obj_fields.ListOfStringsField(nullable=True) } synthetic_fields = ['allocation_pools', 'dns_nameservers', 'dns_publish_fixed_ip', 'host_routes', 'service_types', 'shared'] foreign_keys = {'Network': {'network_id': 'id'}} fields_no_update = ['project_id', 'network_id'] fields_need_translation = { 'host_routes': 'routes' } def __init__(self, context=None, **kwargs): super(Subnet, self).__init__(context, **kwargs) self.add_extra_filter_name('shared') def obj_load_attr(self, attrname): if attrname == 'dns_publish_fixed_ip': return self._load_dns_publish_fixed_ip() if attrname == 'shared': return self._load_shared() if attrname == 'service_types': return self._load_service_types() super(Subnet, self).obj_load_attr(attrname) def _load_dns_publish_fixed_ip(self, db_obj=None): if db_obj: object_data = db_obj.get('dns_publish_fixed_ip', None) else: object_data = SubnetDNSPublishFixedIP.get_objects( self.obj_context, subnet_id=self.id) dns_publish_fixed_ip = False if object_data: dns_publish_fixed_ip = object_data.get( 'dns_publish_fixed_ip') setattr(self, 'dns_publish_fixed_ip', dns_publish_fixed_ip) self.obj_reset_changes(['dns_publish_fixed_ip']) def _load_shared(self, db_obj=None): if db_obj: # NOTE(korzen) db_obj is passed when Subnet object is loaded # from DB rbac_entries = db_obj.get('rbac_entries') or {} shared = (rbac_db.RbacNeutronDbObjectMixin. is_network_shared(self.obj_context, rbac_entries)) else: # NOTE(korzen) this case is used when Subnet object was # instantiated and without DB interaction (get_object(s), update, # create), it should be rare case to load 'shared' by that method shared = (rbac_db.RbacNeutronDbObjectMixin. get_shared_with_tenant(self.obj_context.elevated(), network.NetworkRBAC, self.network_id, self.project_id)) setattr(self, 'shared', shared) self.obj_reset_changes(['shared']) def _load_service_types(self, db_obj=None): if db_obj: service_types = db_obj.get('service_types', []) else: service_types = SubnetServiceType.get_objects(self.obj_context, subnet_id=self.id) self.service_types = [service_type['service_type'] for service_type in service_types] self.obj_reset_changes(['service_types']) def from_db_object(self, db_obj): super(Subnet, self).from_db_object(db_obj) self._load_dns_publish_fixed_ip(db_obj) self._load_shared(db_obj) self._load_service_types(db_obj) @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Subnet, cls).modify_fields_from_db(db_obj) if 'cidr' in result: result['cidr'] = net_utils.AuthenticIPNetwork(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: result['gateway_ip'] = netaddr.IPAddress(result['gateway_ip']) return result @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Subnet, cls).modify_fields_to_db(fields) if 'cidr' in result: result['cidr'] = cls.filter_to_str(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: result['gateway_ip'] = cls.filter_to_str(result['gateway_ip']) return result @classmethod def find_candidate_subnets(cls, context, network_id, host, service_type, fixed_configured, fixed_ips): """Find canditate subnets for the network, host, and service_type""" query = cls.query_subnets_on_network(context, network_id) query = SubnetServiceType.query_filter_service_subnets( query, service_type) # Select candidate subnets and return them if not cls.is_host_set(host): if fixed_configured: # If fixed_ips in request and host is not known all subnets on # the network are candidates. Host/Segment will be validated # on port update with binding:host_id set. Allocation _cannot_ # be deferred as requested fixed_ips would then be lost. return cls._query_filter_by_fixed_ips_segment( query, fixed_ips).all() # If the host isn't known, we can't allocate on a routed network. # So, exclude any subnets attached to segments. return cls._query_exclude_subnets_on_segments(query).all() # The host is known. Consider both routed and non-routed networks results = cls._query_filter_by_segment_host_mapping(query, host).all() # For now, we're using a simplifying assumption that a host will only # touch one segment in a given routed network. Raise exception # otherwise. This restriction may be relaxed as use cases for multiple # mappings are understood. segment_ids = {subnet.segment_id for subnet, mapping in results if mapping} if 1 < len(segment_ids): raise segment_exc.HostConnectedToMultipleSegments( host=host, network_id=network_id) return [subnet for subnet, _mapping in results] @classmethod def _query_filter_by_fixed_ips_segment(cls, query, fixed_ips): """Excludes subnets not on the same segment as fixed_ips :raises: FixedIpsSubnetsNotOnSameSegment """ segment_ids = [] for fixed_ip in fixed_ips: subnet = None if 'subnet_id' in fixed_ip: try: subnet = query.filter( cls.db_model.id == fixed_ip['subnet_id']).all()[0] except IndexError: # NOTE(hjensas): The subnet is invalid for the network, # return all subnets. This will be detected in following # IPAM code and some exception will be raised. return query elif 'ip_address' in fixed_ip: ip = netaddr.IPNetwork(fixed_ip['ip_address']) for s in query.all(): if ip in netaddr.IPNetwork(s.cidr): subnet = s break if not subnet: # NOTE(hjensas): The ip address is invalid, return all # subnets. This will be detected in following IPAM code # and some exception will be raised. return query if subnet and subnet.segment_id not in segment_ids: segment_ids.append(subnet.segment_id) if 1 < len(segment_ids): raise segment_exc.FixedIpsSubnetsNotOnSameSegment() segment_id = None if not segment_ids else segment_ids[0] return query.filter(cls.db_model.segment_id == segment_id) @classmethod def _query_filter_by_segment_host_mapping(cls, query, host): # TODO(tuanvu): find OVO-like solution for handling "join queries" and # write unit test for this function """Excludes subnets on segments not reachable by the host The query gets two kinds of subnets: those that are on segments that the host can reach and those that are not on segments at all (assumed reachable by all hosts). Hence, subnets on segments that the host *cannot* reach are excluded. """ SegmentHostMapping = segment_model.SegmentHostMapping # A host has been provided. Consider these two scenarios # 1. Not a routed network: subnets are not on segments # 2. Is a routed network: only subnets on segments mapped to host # The following join query returns results for either. The two are # guaranteed to be mutually exclusive when subnets are created. query = query.add_entity(SegmentHostMapping) query = query.outerjoin( SegmentHostMapping, and_(cls.db_model.segment_id == SegmentHostMapping.segment_id, SegmentHostMapping.host == host)) # Essentially "segment_id IS NULL XNOR host IS NULL" query = query.filter(or_(and_(cls.db_model.segment_id.isnot(None), SegmentHostMapping.host.isnot(None)), and_(cls.db_model.segment_id.is_(None), SegmentHostMapping.host.is_(None)))) return query @classmethod def query_subnets_on_network(cls, context, network_id): query = model_query.get_collection_query(context, cls.db_model) return query.filter(cls.db_model.network_id == network_id) @classmethod def _query_exclude_subnets_on_segments(cls, query): """Excludes all subnets associated with segments For the case where the host is not known, we don't consider any subnets that are on segments. But, we still consider subnets that are not associated with any segment (i.e. for non-routed networks). """ return query.filter(cls.db_model.segment_id.is_(None)) @classmethod def is_host_set(cls, host): """Utility to tell if the host is set in the port binding""" # This seems redundant, but its not. Host is unset if its None, '', # or ATTR_NOT_SPECIFIED due to differences in host binding # implementations. return host and validators.is_attr_set(host) @classmethod def network_has_no_subnet(cls, context, network_id, host, service_type): # Determine why we found no subnets to raise the right error query = cls.query_subnets_on_network(context, network_id) if cls.is_host_set(host): # Empty because host isn't mapped to a segment with a subnet? s_query = query.filter(cls.db_model.segment_id.isnot(None)) if s_query.limit(1).count() != 0: # It is a routed network but no subnets found for host raise segment_exc.HostNotConnectedToAnySegment( host=host, network_id=network_id) if not query.limit(1).count(): # Network has *no* subnets of any kind. This isn't an error. return True # Does filtering ineligible service subnets makes the list empty? query = SubnetServiceType.query_filter_service_subnets( query, service_type) if query.limit(1).count(): # No, must be a deferred IP port because there are matching # subnets. Happens on routed networks when host isn't known. raise ipam_exceptions.DeferIpam() return False def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): # version 1.1 adds "dns_publish_fixed_ip" primitive.pop('dns_publish_fixed_ip', None) @base.NeutronObjectRegistry.register class NetworkSubnetLock(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models_v2.NetworkSubnetLock new_facade = True primary_keys = ['network_id'] fields = { 'network_id': common_types.UUIDField(), 'subnet_id': common_types.UUIDField(nullable=True) } @classmethod def lock_subnet(cls, context, network_id, subnet_id): subnet_lock = super(NetworkSubnetLock, cls).get_object( context, network_id=network_id) if subnet_lock: subnet_lock.subnet_id = subnet_id subnet_lock.update() else: subnet_lock = NetworkSubnetLock(context, network_id=network_id, subnet_id=subnet_id) subnet_lock.create() @base.NeutronObjectRegistry.register class SubnetDNSPublishFixedIP(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = dns_models.SubnetDNSPublishFixedIP primary_keys = ['subnet_id'] fields = { 'subnet_id': common_types.UUIDField(), 'dns_publish_fixed_ip': obj_fields.BooleanField() } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/subnetpool.py0000644000175000017500000001426300000000000023040 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.db import model_query from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields import sqlalchemy as sa from neutron._i18n import _ from neutron.db import models_v2 as models from neutron.db import rbac_db_models from neutron.extensions import rbac as ext_rbac from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects import rbac from neutron.objects import rbac_db from neutron.objects import subnet @base.NeutronObjectRegistry.register class SubnetPoolRBAC(rbac.RBACBaseObject): # Version 1.0: Initial version VERSION = '1.0' db_model = rbac_db_models.SubnetPoolRBAC @base.NeutronObjectRegistry.register class SubnetPool(rbac_db.NeutronRbacObject): # Version 1.0: Initial version # Version 1.1: Add RBAC support VERSION = '1.1' # required by RbacNeutronMetaclass rbac_db_cls = SubnetPoolRBAC db_model = models.SubnetPool fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(nullable=True), 'name': obj_fields.StringField(nullable=True), 'ip_version': common_types.IPVersionEnumField(), 'default_prefixlen': common_types.IPNetworkPrefixLenField(), 'min_prefixlen': common_types.IPNetworkPrefixLenField(), 'max_prefixlen': common_types.IPNetworkPrefixLenField(), 'shared': obj_fields.BooleanField(), 'is_default': obj_fields.BooleanField(), 'default_quota': obj_fields.IntegerField(nullable=True), 'hash': obj_fields.StringField(nullable=True), 'address_scope_id': common_types.UUIDField(nullable=True), 'prefixes': common_types.ListOfIPNetworksField(nullable=True) } fields_no_update = ['id', 'project_id'] synthetic_fields = ['prefixes'] def from_db_object(self, db_obj): super(SubnetPool, self).from_db_object(db_obj) self.prefixes = [] self.prefixes = [ prefix.cidr for prefix in db_obj.get('prefixes', []) ] self.obj_reset_changes(['prefixes']) def _attach_prefixes(self, prefixes): SubnetPoolPrefix.delete_objects(self.obj_context, subnetpool_id=self.id) for prefix in prefixes: SubnetPoolPrefix(self.obj_context, subnetpool_id=self.id, cidr=prefix).create() self.prefixes = prefixes self.obj_reset_changes(['prefixes']) # TODO(ihrachys): Consider extending base to trigger registered methods def create(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): prefixes = self.prefixes super(SubnetPool, self).create() if 'prefixes' in fields: self._attach_prefixes(prefixes) # TODO(ihrachys): Consider extending base to trigger registered methods def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): super(SubnetPool, self).update() if 'prefixes' in fields: self._attach_prefixes(fields['prefixes']) @classmethod def get_bound_tenant_ids(cls, context, obj_id): sn_objs = subnet.Subnet.get_objects(context, subnetpool_id=obj_id) return {snp.project_id for snp in sn_objs} @classmethod def validate_rbac_policy_create(cls, resource, event, trigger, payload=None): context = payload.context policy = payload.request_body db_obj = obj_db_api.get_object( cls, context.elevated(), id=policy['object_id']) if not db_obj["address_scope_id"]: # Nothing to validate return rbac_as_model = rbac_db_models.AddressScopeRBAC # Ensure that target project has access to AS shared_to_target_project_or_to_all = ( sa.and_( rbac_as_model.target_tenant.in_( ["*", policy['target_tenant']] ), rbac_as_model.object_id == db_obj["address_scope_id"] ) ) matching_policies = model_query.query_with_hooks( context, rbac_db_models.AddressScopeRBAC ).filter(shared_to_target_project_or_to_all).count() if matching_policies == 0: raise ext_rbac.RbacPolicyInitError( object_id=policy['object_id'], reason=_("target project doesn't have access to " "associated address scope.")) @base.NeutronObjectRegistry.register class SubnetPoolPrefix(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.SubnetPoolPrefix fields = { 'subnetpool_id': common_types.UUIDField(), 'cidr': common_types.IPNetworkField(), } primary_keys = ['subnetpool_id', 'cidr'] # TODO(ihrachys): get rid of it once we switch the db model to using CIDR # custom type @classmethod def modify_fields_to_db(cls, fields): result = super(SubnetPoolPrefix, cls).modify_fields_to_db(fields) if 'cidr' in result: result['cidr'] = cls.filter_to_str(result['cidr']) return result # TODO(ihrachys): get rid of it once we switch the db model to using CIDR # custom type @classmethod def modify_fields_from_db(cls, db_obj): fields = super(SubnetPoolPrefix, cls).modify_fields_from_db(db_obj) if 'cidr' in fields: fields['cidr'] = netaddr.IPNetwork(fields['cidr']) return fields ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/tag.py0000644000175000017500000001044300000000000021415 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib.db import model_query from sqlalchemy.orm import aliased from oslo_versionedobjects import fields as obj_fields from neutron.db.models import tag as tag_model from neutron.db import standard_attr from neutron.objects import base # Taggable resources resource_model_map = standard_attr.get_standard_attr_resource_model_map() _filter_methods = [] # prevent GC of our partial functions def _get_tag_list(tag_strings): tags = set() for tag_str in tag_strings: tags |= set(tag_str.split(',')) return list(tags) def _apply_tag_filters(model, query, filters): """Apply tag filters There are four types of filter: `tags` -- One or more strings that will be used to filter results in an AND expression: T1 AND T2 `tags-any` -- One or more strings that will be used to filter results in an OR expression: T1 OR T2 `not-tags` -- One or more strings that will be used to filter results in a NOT AND expression: NOT (T1 AND T2) `not-tags-any` -- One or more strings that will be used to filter results in a NOT OR expression: NOT (T1 OR T2) Note: tag values can be specified comma separated string. for example, 'GET /v2.0/networks?tags-any=red,blue' is equivalent to 'GET /v2.0/networks?tags-any=red&tags-any=blue' it means 'red' or 'blue'. """ if 'tags' in filters: tags = _get_tag_list(filters.pop('tags')) first_tag = tags.pop(0) query = query.join( tag_model.Tag, model.standard_attr_id == tag_model.Tag.standard_attr_id) query = query.filter(tag_model.Tag.tag == first_tag) for tag in tags: tag_alias = aliased(tag_model.Tag) query = query.join( tag_alias, model.standard_attr_id == tag_alias.standard_attr_id) query = query.filter(tag_alias.tag == tag) if 'tags-any' in filters: tags = _get_tag_list(filters.pop('tags-any')) query = query.join( tag_model.Tag, model.standard_attr_id == tag_model.Tag.standard_attr_id) query = query.filter(tag_model.Tag.tag.in_(tags)) if 'not-tags' in filters: tags = _get_tag_list(filters.pop('not-tags')) first_tag = tags.pop(0) subq = query.session.query(tag_model.Tag.standard_attr_id) subq = subq.filter(tag_model.Tag.tag == first_tag) for tag in tags: tag_alias = aliased(tag_model.Tag) subq = subq.join( tag_alias, tag_model.Tag.standard_attr_id == tag_alias.standard_attr_id) subq = subq.filter(tag_alias.tag == tag) query = query.filter(~model.standard_attr_id.in_(subq)) if 'not-tags-any' in filters: tags = _get_tag_list(filters.pop('not-tags-any')) subq = query.session.query(tag_model.Tag.standard_attr_id) subq = subq.filter(tag_model.Tag.tag.in_(tags)) query = query.filter(~model.standard_attr_id.in_(subq)) return query def register_tag_hooks(): for model in resource_model_map.values(): method = functools.partial(_apply_tag_filters, model) _filter_methods.append(method) model_query.register_hook(model, "tag", query_hook=None, filter_hook=None, result_filters=method) @base.NeutronObjectRegistry.register class Tag(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = tag_model.Tag fields = { 'tag': obj_fields.StringField(), 'standard_attr_id': obj_fields.IntegerField() } primary_keys = ['tag', 'standard_attr_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/objects/trunk.py0000644000175000017500000001236100000000000022006 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from neutron_lib.objects import common_types from neutron_lib.objects import exceptions as o_exc from oslo_db import exception as o_db_exc from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields from neutron.objects import base from neutron.services.trunk import exceptions as t_exc from neutron.services.trunk import models @base.NeutronObjectRegistry.register class SubPort(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = models.SubPort new_facade = True primary_keys = ['port_id'] foreign_keys = {'Trunk': {'trunk_id': 'id'}} fields = { 'port_id': common_types.UUIDField(), 'trunk_id': common_types.UUIDField(), 'segmentation_type': obj_fields.StringField(), 'segmentation_id': obj_fields.IntegerField(), } fields_no_update = ['segmentation_type', 'segmentation_id', 'trunk_id'] def to_dict(self): _dict = super(SubPort, self).to_dict() # trunk_id is redundant in the subport dict. _dict.pop('trunk_id') return _dict def create(self): try: with self.db_context_writer(self.obj_context): super(SubPort, self).create() except o_db_exc.DBReferenceError as ex: if ex.key_table is None: # NOTE(ivc): 'key_table' is provided by 'oslo.db' [1] # only for a limited set of database backends (i.e. # MySQL and PostgreSQL). Other database backends # (including SQLite) would have 'key_table' set to None. # We emulate the 'key_table' support for such database # backends. # # [1] https://github.com/openstack/oslo.db/blob/3fadd5a # /oslo_db/sqlalchemy/exc_filters.py#L190-L203 self.obj_context.session.rollback() with self.db_context_reader(self.obj_context): if not Trunk.get_object(self.obj_context, id=self.trunk_id): ex.key_table = Trunk.db_model.__tablename__ if ex.key_table == Trunk.db_model.__tablename__: raise t_exc.TrunkNotFound(trunk_id=self.trunk_id) raise n_exc.PortNotFound(port_id=self.port_id) except o_exc.NeutronDbObjectDuplicateEntry: raise t_exc.DuplicateSubPort( segmentation_type=self.segmentation_type, segmentation_id=self.segmentation_id, trunk_id=self.trunk_id) @base.NeutronObjectRegistry.register class Trunk(base.NeutronDbObject): # Version 1.0: Initial version # Version 1.1: Changed tenant_id to project_id VERSION = '1.1' db_model = models.Trunk new_facade = True fields = { 'admin_state_up': obj_fields.BooleanField(), 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'name': obj_fields.StringField(), 'port_id': common_types.UUIDField(), 'status': obj_fields.StringField(), 'sub_ports': obj_fields.ListOfObjectsField(SubPort.__name__), } fields_no_update = ['project_id', 'port_id'] synthetic_fields = ['sub_ports'] def create(self): with self.db_context_writer(self.obj_context): sub_ports = [] if self.obj_attr_is_set('sub_ports'): sub_ports = self.sub_ports try: super(Trunk, self).create() except o_db_exc.DBReferenceError: raise n_exc.PortNotFound(port_id=self.port_id) if sub_ports: for sub_port in sub_ports: sub_port.trunk_id = self.id sub_port.create() self.sub_ports.append(sub_port) self.obj_reset_changes(['sub_ports']) def update(self, **kwargs): self.update_fields(kwargs) super(Trunk, self).update() # TODO(hichihara): For tag mechanism. This will be removed in bug/1704137 def to_dict(self): _dict = super(Trunk, self).to_dict() try: _dict['tags'] = [t.tag for t in self.db_obj.standard_attr.tags] except AttributeError: # AttrtibuteError can be raised when accessing self.db_obj # or self.db_obj.standard_attr pass return _dict def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive['tenant_id'] = primitive.pop('project_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/opts.py0000644000175000017500000002644400000000000020206 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools import operator from keystoneauth1 import loading as ks_loading from oslo_config import cfg import neutron.agent.agent_extensions_manager import neutron.agent.securitygroups_rpc import neutron.common.cache_utils import neutron.conf.agent.agent_extensions_manager import neutron.conf.agent.common import neutron.conf.agent.database.agents_db import neutron.conf.agent.database.agentschedulers_db import neutron.conf.agent.dhcp import neutron.conf.agent.l3.config import neutron.conf.agent.l3.ha import neutron.conf.agent.linux import neutron.conf.agent.metadata.config as meta_conf import neutron.conf.agent.ovs_conf import neutron.conf.agent.ovsdb_api import neutron.conf.agent.xenapi_conf import neutron.conf.common import neutron.conf.db.dvr_mac_db import neutron.conf.db.extraroute_db import neutron.conf.db.l3_agentschedulers_db import neutron.conf.db.l3_dvr_db import neutron.conf.db.l3_gwmode_db import neutron.conf.db.l3_hamode_db import neutron.conf.extensions.allowedaddresspairs import neutron.conf.extensions.conntrack_helper import neutron.conf.plugins.ml2.config import neutron.conf.plugins.ml2.drivers.agent import neutron.conf.plugins.ml2.drivers.driver_type import neutron.conf.plugins.ml2.drivers.linuxbridge import neutron.conf.plugins.ml2.drivers.macvtap import neutron.conf.plugins.ml2.drivers.mech_sriov.agent_common import neutron.conf.plugins.ml2.drivers.mech_sriov.mech_sriov_conf import neutron.conf.plugins.ml2.drivers.openvswitch.mech_ovs_conf import neutron.conf.plugins.ml2.drivers.ovs_conf import neutron.conf.quota import neutron.conf.service import neutron.conf.services.logging import neutron.conf.services.metering_agent import neutron.conf.wsgi import neutron.db.migration.cli import neutron.extensions.l3 import neutron.extensions.securitygroup import neutron.plugins.ml2.drivers.mech_sriov.agent.common.config import neutron.wsgi NOVA_GROUP = 'nova' IRONIC_GROUP = 'ironic' CONF = cfg.CONF deprecations = {'nova.cafile': [cfg.DeprecatedOpt('ca_certificates_file', group=NOVA_GROUP)], 'nova.insecure': [cfg.DeprecatedOpt('api_insecure', group=NOVA_GROUP)], 'nova.timeout': [cfg.DeprecatedOpt('url_timeout', group=NOVA_GROUP)]} _nova_options = ks_loading.register_session_conf_options( CONF, NOVA_GROUP, deprecated_opts=deprecations) _ironic_options = ks_loading.register_session_conf_options( CONF, IRONIC_GROUP) def list_agent_opts(): return [ ('agent', itertools.chain( neutron.conf.agent.common.ROOT_HELPER_OPTS, neutron.conf.agent.common.AGENT_STATE_OPTS, neutron.conf.agent.common.IPTABLES_OPTS, neutron.conf.agent.common.PROCESS_MONITOR_OPTS, neutron.conf.agent.common.AVAILABILITY_ZONE_OPTS) ), ('DEFAULT', itertools.chain( neutron.conf.agent.common.INTERFACE_DRIVER_OPTS, neutron.conf.agent.metadata.config.SHARED_OPTS) ) ] def list_extension_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.extensions.allowedaddresspairs .allowed_address_pair_opts, neutron.conf.extensions.conntrack_helper.conntrack_helper_opts) ), ('quotas', itertools.chain( neutron.conf.quota.l3_quota_opts, neutron.conf.quota.security_group_quota_opts) ) ] def list_db_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.agent.database.agents_db.AGENT_OPTS, neutron.conf.db.extraroute_db.EXTRA_ROUTE_OPTS, neutron.conf.db.l3_gwmode_db.L3GWMODE_OPTS, neutron.conf.agent.database.agentschedulers_db .AGENTS_SCHEDULER_OPTS, neutron.conf.db.dvr_mac_db.DVR_MAC_ADDRESS_OPTS, neutron.conf.db.l3_dvr_db.ROUTER_DISTRIBUTED_OPTS, neutron.conf.db.l3_agentschedulers_db.L3_AGENTS_SCHEDULER_OPTS, neutron.conf.db.l3_hamode_db.L3_HA_OPTS) ), ('database', neutron.db.migration.cli.get_engine_config()) ] def list_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.common.core_cli_opts, neutron.conf.common.core_opts, neutron.conf.wsgi.socket_opts, neutron.conf.service.SERVICE_OPTS, neutron.conf.service.RPC_EXTRA_OPTS) ), (neutron.conf.common.NOVA_CONF_SECTION, itertools.chain( neutron.conf.common.nova_opts) ), (neutron.conf.common.IRONIC_CONF_SECTION, itertools.chain( neutron.conf.common.ironic_opts) ), ('quotas', neutron.conf.quota.core_quota_opts) ] def list_base_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.agent.common.INTERFACE_OPTS, neutron.conf.agent.common.INTERFACE_DRIVER_OPTS, neutron.conf.service.RPC_EXTRA_OPTS) ), ('agent', neutron.conf.agent.common.AGENT_STATE_OPTS), ('ovs', itertools.chain( neutron.conf.agent.ovsdb_api.API_OPTS, neutron.conf.agent.ovs_conf.OPTS) ), ] def list_az_agent_opts(): return [ ('agent', neutron.conf.agent.common.AVAILABILITY_ZONE_OPTS), ] def list_dhcp_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.agent.dhcp.DHCP_AGENT_OPTS, neutron.conf.agent.dhcp.DHCP_OPTS, neutron.conf.agent.dhcp.DNSMASQ_OPTS) ) ] def list_linux_bridge_opts(): return [ ('linux_bridge', neutron.conf.plugins.ml2.drivers.linuxbridge.bridge_opts), ('vxlan', neutron.conf.plugins.ml2.drivers.linuxbridge.vxlan_opts), ('agent', itertools.chain( neutron.conf.plugins.ml2.drivers.agent.agent_opts, neutron.conf.agent.agent_extensions_manager. AGENT_EXT_MANAGER_OPTS) ), ('securitygroup', neutron.conf.agent.securitygroups_rpc.security_group_opts), ('network_log', neutron.conf.services.logging.log_driver_opts) ] def list_l3_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.agent.l3.config.OPTS, neutron.conf.service.SERVICE_OPTS, neutron.conf.agent.l3.ha.OPTS, neutron.conf.agent.common.PD_DRIVER_OPTS, neutron.conf.agent.common.RA_OPTS) ), ('agent', neutron.conf.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS), ('network_log', neutron.conf.services.logging.log_driver_opts) ] def list_macvtap_opts(): return [ ('macvtap', neutron.conf.plugins.ml2.drivers.macvtap.macvtap_opts), ('agent', neutron.conf.plugins.ml2.drivers.agent.agent_opts), ('securitygroup', neutron.conf.agent.securitygroups_rpc.security_group_opts) ] def list_metadata_agent_opts(): return [ ('DEFAULT', itertools.chain( meta_conf.SHARED_OPTS, meta_conf.METADATA_PROXY_HANDLER_OPTS, meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS) ), ('agent', neutron.conf.agent.common.AGENT_STATE_OPTS) ] def list_metering_agent_opts(): return [ ('DEFAULT', neutron.conf.services.metering_agent.metering_agent_opts), ] def list_ml2_conf_opts(): return [ ('ml2', neutron.conf.plugins.ml2.config.ml2_opts), ('ml2_type_flat', neutron.conf.plugins.ml2.drivers.driver_type.flat_opts), ('ml2_type_vlan', neutron.conf.plugins.ml2.drivers.driver_type.vlan_opts), ('ml2_type_gre', neutron.conf.plugins.ml2.drivers.driver_type.gre_opts), ('ml2_type_vxlan', neutron.conf.plugins.ml2.drivers.driver_type.vxlan_opts), ('ml2_type_geneve', neutron.conf.plugins.ml2.drivers.driver_type.geneve_opts), ('securitygroup', neutron.conf.agent.securitygroups_rpc.security_group_opts), ('ovs_driver', neutron.conf.plugins.ml2.drivers.openvswitch.mech_ovs_conf. ovs_driver_opts), ('sriov_driver', neutron.conf.plugins.ml2.drivers.mech_sriov.mech_sriov_conf. sriov_driver_opts) ] def list_ovs_opts(): return [ ('DEFAULT', itertools.chain( neutron.conf.service.RPC_EXTRA_OPTS) ), ('ovs', itertools.chain( neutron.conf.plugins.ml2.drivers.ovs_conf.ovs_opts, neutron.conf.agent.ovsdb_api.API_OPTS) ), ('agent', itertools.chain( neutron.conf.plugins.ml2.drivers.ovs_conf.agent_opts, neutron.conf.agent.agent_extensions_manager. AGENT_EXT_MANAGER_OPTS) ), ('securitygroup', neutron.conf.agent.securitygroups_rpc.security_group_opts), ('network_log', neutron.conf.services.logging.log_driver_opts) ] def list_sriov_agent_opts(): return [ ('sriov_nic', neutron.conf.plugins.ml2.drivers.mech_sriov.agent_common. sriov_nic_opts), ('agent', neutron.conf.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS) ] def list_auth_opts(): opt_list = copy.deepcopy(_nova_options) opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0]) # NOTE(mhickey): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: for plugin_option in ks_loading.get_auth_plugin_conf_options(name): if all(option.name != plugin_option.name for option in opt_list): opt_list.append(plugin_option) opt_list.sort(key=operator.attrgetter('name')) return [(NOVA_GROUP, opt_list)] def list_ironic_auth_opts(): opt_list = copy.deepcopy(_ironic_options) opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0]) # NOTE(mhickey): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: for plugin_option in ks_loading.get_auth_plugin_conf_options(name): if all(option.name != plugin_option.name for option in opt_list): opt_list.append(plugin_option) opt_list.sort(key=operator.attrgetter('name')) return [(IRONIC_GROUP, opt_list)] def list_xenapi_opts(): return [ ('xenapi', neutron.conf.agent.xenapi_conf.XENAPI_OPTS) ] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.331045 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/0000755000175000017500000000000000000000000020754 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/__init__.py0000644000175000017500000000000000000000000023053 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/app.py0000644000175000017500000000346200000000000022113 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from neutron.pecan_wsgi.controllers import root from neutron.pecan_wsgi import hooks from neutron.pecan_wsgi import startup def versions_factory(global_config, **local_config): return pecan.make_app(root.RootController()) def v2_factory(global_config, **local_config): # Processing Order: # As request enters lower priority called before higher. # Response from controller is passed from higher priority to lower. app_hooks = [ hooks.UserFilterHook(), # priority 90 hooks.ContextHook(), # priority 95 hooks.ExceptionTranslationHook(), # priority 100 hooks.BodyValidationHook(), # priority 120 hooks.OwnershipValidationHook(), # priority 125 hooks.QuotaEnforcementHook(), # priority 130 hooks.NotifierHook(), # priority 135 hooks.QueryParametersHook(), # priority 139 hooks.PolicyHook(), # priority 140 ] app = pecan.make_app(root.V2Controller(), debug=False, force_canonical=False, hooks=app_hooks, guess_content_type_from_ext=True) startup.initialize_all() return app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/constants.py0000644000175000017500000000132300000000000023341 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACTION_MAP = {'POST': 'create', 'PUT': 'update', 'GET': 'get', 'DELETE': 'delete'} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.331045 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/0000755000175000017500000000000000000000000023322 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/__init__.py0000644000175000017500000000140500000000000025433 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.pecan_wsgi.controllers import quota from neutron.pecan_wsgi.controllers import resource CollectionsController = resource.CollectionsController QuotasController = quota.QuotasController ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/extensions.py0000644000175000017500000000452700000000000026103 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from neutron._i18n import _ from neutron.api import extensions from neutron.pecan_wsgi.controllers import utils class ExtensionsController(object): @utils.expose() def _lookup(self, alias, *remainder): return ExtensionController(alias), remainder @utils.expose(generic=True) def index(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() exts = [extensions.ExtensionController._translate(ext) for ext in ext_mgr.extensions.values()] return {'extensions': exts} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') def not_supported(self): # NOTE(blogan): Normally we'd return 405 but the legacy extensions # controller returned 404. pecan.abort(404) class ExtensionController(object): def __init__(self, alias): self.alias = alias @utils.expose(generic=True) def index(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext = ext_mgr.extensions.get(self.alias, None) if not ext: pecan.abort( 404, detail=_("Extension with alias %s " "does not exist") % self.alias) return {'extension': extensions.ExtensionController._translate(ext)} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') def not_supported(self): # NOTE(blogan): Normally we'd return 405 but the legacy extensions # controller returned 404. pecan.abort(404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/quota.py0000644000175000017500000001322700000000000025032 0ustar00coreycorey00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import attributes from neutron_lib.api import converters from neutron_lib.db import constants as db_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_utils import importutils import pecan from pecan import request from neutron._i18n import _ from neutron.pecan_wsgi.controllers import utils from neutron.quota import resource_registry RESOURCE_NAME = "quota" TENANT_ID_ATTR = {'tenant_id': {'allow_post': False, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True}} class QuotasController(utils.NeutronPecanController): def __init__(self): self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) super(QuotasController, self).__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) def _check_admin(self, context, reason=_("Only admin can view or configure quota")): if not context.is_admin: raise n_exc.AdminRequired(reason=reason) @utils.expose() def _lookup(self, tenant_id, *remainder): return QuotaController(self._driver, tenant_id), remainder @utils.expose(generic=True) def index(self): neutron_context = request.context.get('neutron_context') # FIXME(salv-orlando): There shouldn't be any need to do this explicit # check. However some behaviours from the "old" extension have # been temporarily carried over here self._check_admin(neutron_context) # TODO(salv-orlando): proper plurals management return {self.collection: self._driver.get_all_quotas( neutron_context, resource_registry.get_all_resources())} @utils.when(index, method='POST') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) class QuotaController(utils.NeutronPecanController): def __init__(self, _driver, tenant_id): self._driver = _driver self._tenant_id = tenant_id super(QuotaController, self).__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) # Ensure limits for all registered resources are returned attr_dict = attributes.RESOURCES[self.collection] for quota_resource in resource_registry.get_all_resources().keys(): attr_dict[quota_resource] = { 'allow_post': False, 'allow_put': True, 'convert_to': converters.convert_to_int, 'validate': { 'type:range': [-1, db_const.DB_INTEGER_MAX_VALUE]}, 'is_visible': True} # The quota resource must always declare a tenant_id attribute, # otherwise the attribute will be stripped off when generating the # response attr_dict.update(TENANT_ID_ATTR) @utils.expose(generic=True) def index(self): return get_tenant_quotas(self._tenant_id, self._driver) @utils.when(index, method='PUT') def put(self, *args, **kwargs): neutron_context = request.context.get('neutron_context') # For put requests there's always going to be a single element quota_data = request.context['resources'][0] for key, value in quota_data.items(): self._driver.update_quota_limit( neutron_context, self._tenant_id, key, value) return get_tenant_quotas(self._tenant_id, self._driver) @utils.when_delete(index) def delete(self): neutron_context = request.context.get('neutron_context') self._driver.delete_tenant_quota(neutron_context, self._tenant_id) @utils.when(index, method='POST') def not_supported(self): pecan.abort(405) def get_tenant_quotas(tenant_id, driver=None): if not driver: driver = importutils.import_class(cfg.CONF.QUOTAS.quota_driver) neutron_context = request.context.get('neutron_context') if tenant_id == 'tenant': # NOTE(salv-orlando): Read the following before the code in order # to avoid puking. # There is a weird undocumented behaviour of the Neutron quota API # as 'tenant' is used as an API action to return the identifier # of the tenant in the request context. This is used exclusively # for interaction with python-neutronclient and is a possibly # unnecessary 'whoami' API endpoint. Pending resolution of this # API issue, this controller will just treat the magic string # 'tenant' (and only that string) and return the response expected # by python-neutronclient return {'tenant': {'tenant_id': neutron_context.tenant_id}} tenant_quotas = driver.get_tenant_quotas( neutron_context, resource_registry.get_all_resources(), tenant_id) tenant_quotas['tenant_id'] = tenant_id return {RESOURCE_NAME: tenant_quotas} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/resource.py0000644000175000017500000002340200000000000025524 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import pecan from pecan import request import webob from neutron._i18n import _ from neutron import manager from neutron.pecan_wsgi.controllers import utils LOG = logging.getLogger(__name__) class ItemController(utils.NeutronPecanController): def __init__(self, resource, item, plugin=None, resource_info=None, parent_resource=None, member_actions=None): super(ItemController, self).__init__(None, resource, plugin=plugin, resource_info=resource_info, parent_resource=parent_resource, member_actions=member_actions) self.item = item @utils.expose(generic=True) def index(self, *args, **kwargs): return self.get(*args, **kwargs) def get(self, *args, **kwargs): neutron_context = request.context['neutron_context'] getter_args = [neutron_context, self.item] # NOTE(tonytan4ever): This implicitly forces the getter method # uses the parent_id as the last argument, thus easy for future # refactoring if 'parent_id' in request.context: getter_args.append(request.context['parent_id']) fields = request.context['query_params'].get('fields') return {self.resource: self.plugin_shower(*getter_args, fields=fields)} @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') def not_supported(self): pecan.abort(405) @utils.when(index, method='PUT') def put(self, *args, **kwargs): neutron_context = request.context['neutron_context'] if "resources" not in request.context: msg = (_("Unable to find '%s' in request body") % request.context['resource']) raise webob.exc.HTTPBadRequest(msg) resources = request.context['resources'] # Bulk update is not supported, 'resources' always contains a single # elemenet data = {self.resource: resources[0]} updater_args = [neutron_context, self.item] if 'parent_id' in request.context: updater_args.append(request.context['parent_id']) updater_args.append(data) return {self.resource: self.plugin_updater(*updater_args)} @utils.when_delete(index) def delete(self): if request.body: msg = _("Request body is not supported in DELETE.") raise webob.exc.HTTPBadRequest(msg) neutron_context = request.context['neutron_context'] deleter_args = [neutron_context, self.item] if 'parent_id' in request.context: deleter_args.append(request.context['parent_id']) return self.plugin_deleter(*deleter_args) @utils.expose() def _lookup(self, collection, *remainder): request.context['collection'] = collection collection_path = '/'.join([self.resource, collection]) controller = manager.NeutronManager.get_controller_for_resource( collection_path) if not controller: if collection not in self._member_actions: LOG.warning("No controller found for: %s - returning " "response code 404", collection) pecan.abort(404) # collection is a member action, so we create a new controller # for it. method = self._member_actions[collection] kwargs = {'plugin': self.plugin, 'resource_info': self.resource_info} if method == 'PUT': kwargs['update_action'] = collection elif method == 'GET': kwargs['show_action'] = collection controller = MemberActionController( self.resource, self.item, self, **kwargs) else: request.context['parent_id'] = request.context['resource_id'] request.context['resource'] = controller.resource return controller, remainder class CollectionsController(utils.NeutronPecanController): item_controller_class = ItemController @utils.expose() def _lookup(self, item, *remainder): # Store resource identifier in request context request.context['resource_id'] = item uri_identifier = '%s_id' % self.resource request.context['uri_identifiers'][uri_identifier] = item return (self.item_controller_class( self.resource, item, resource_info=self.resource_info, # NOTE(tonytan4ever): item needs to share the same # parent as collection parent_resource=self.parent, member_actions=self._member_actions, plugin=self.plugin), remainder) @utils.expose(generic=True) def index(self, *args, **kwargs): return self.get(*args, **kwargs) def get(self, *args, **kwargs): # NOTE(blogan): these are set in the FieldsAndFiltersHoook query_params = request.context['query_params'] neutron_context = request.context['neutron_context'] lister_args = [neutron_context] if 'parent_id' in request.context: lister_args.append(request.context['parent_id']) return {self.collection: self.plugin_lister(*lister_args, **query_params)} @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) @utils.when(index, method='POST') def post(self, *args, **kwargs): if 'resources' not in request.context: # user didn't specify any body, which is invalid for collections msg = (_("Unable to find '%s' in request body") % request.context['resource']) raise webob.exc.HTTPBadRequest(msg) resources = request.context['resources'] pecan.response.status = 201 return self.create(resources) def create(self, resources): if request.context['is_bulk']: # Bulk! creator = self.plugin_bulk_creator key = self.collection data = {key: [{self.resource: res} for res in resources]} creator_kwargs = {self.collection: data} else: creator = self.plugin_creator key = self.resource data = {key: resources[0]} creator_kwargs = {self.resource: data} neutron_context = request.context['neutron_context'] creator_args = [neutron_context] if 'parent_id' in request.context and self._parent_id_name: creator_kwargs[self._parent_id_name] = request.context['parent_id'] return {key: creator(*creator_args, **creator_kwargs)} class MemberActionController(ItemController): @property def plugin_shower(self): # NOTE(blogan): Do an explicit check for the _show_action because # pecan will see the plugin_shower property as a possible custom route # and try to evaluate it, which causes the code block to be executed. # If _show_action is None, getattr throws an exception and fails a # request. if self._show_action: return getattr(self.plugin, self._show_action) @property def plugin_updater(self): if self._update_action: return getattr(self.plugin, self._update_action) def __init__(self, resource, item, parent_controller, plugin=None, resource_info=None, show_action=None, update_action=None): super(MemberActionController, self).__init__( resource, item, plugin=plugin, resource_info=resource_info) self._show_action = show_action self._update_action = update_action self.parent_controller = parent_controller @utils.expose(generic=True) def index(self, *args, **kwargs): if not self._show_action: pecan.abort(405) neutron_context = request.context['neutron_context'] # NOTE(blogan): The legacy wsgi code did not pass fields to the plugin # on GET member actions. To maintain compatibility, we'll do the same. return self.plugin_shower(neutron_context, self.item) @utils.when(index, method='PUT') def put(self, *args, **kwargs): if not self._update_action: LOG.debug("Action %(action)s is not defined on resource " "%(resource)s", {'action': self._update_action, 'resource': self.resource}) pecan.abort(405) neutron_context = request.context['neutron_context'] LOG.debug("Processing member action %(action)s for resource " "%(resource)s identified by %(item)s", {'action': self._update_action, 'resource': self.resource, 'item': self.item}) return self.plugin_updater(neutron_context, self.item, request.context['request_data']) @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') @utils.when(index, method='DELETE') def not_supported(self): return super(MemberActionController, self).not_supported() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/root.py0000644000175000017500000001316500000000000024665 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # Copyright (c) 2015 Rackspace, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnetpool as subnetpool_def from oslo_config import cfg from oslo_log import log import pecan from pecan import request from six.moves import urllib from neutron._i18n import _ from neutron.api.views import versions as versions_view from neutron import manager from neutron.pecan_wsgi.controllers import extensions as ext_ctrl from neutron.pecan_wsgi.controllers import utils CONF = cfg.CONF LOG = log.getLogger(__name__) _VERSION_INFO = {} _CORE_RESOURCES = {net_def.RESOURCE_NAME: net_def.COLLECTION_NAME, subnet_def.RESOURCE_NAME: subnet_def.COLLECTION_NAME, subnetpool_def.RESOURCE_NAME: subnetpool_def.COLLECTION_NAME, port_def.RESOURCE_NAME: port_def.COLLECTION_NAME} def _load_version_info(version_info): if version_info['id'] in _VERSION_INFO: raise AssertionError(_("ID %s must not be in " "VERSION_INFO") % version_info['id']) _VERSION_INFO[version_info['id']] = version_info def _get_version_info(): return _VERSION_INFO.values() class RootController(object): @utils.expose(generic=True) def index(self): version_objs = [ { "id": "v2.0", "status": "CURRENT", }, ] builder = versions_view.get_view_builder(pecan.request) versions = [builder.build(version) for version in version_objs] return dict(versions=versions) @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) class V2Controller(object): # Same data structure as neutron.api.versions.Versions for API backward # compatibility version_info = { 'id': 'v2.0', 'status': 'CURRENT' } _load_version_info(version_info) # NOTE(blogan): Paste deploy handled the routing to the legacy extension # controller. If the extensions filter is removed from the api-paste.ini # then this controller will be routed to This means operators had # the ability to turn off the extensions controller via tha api-paste but # will not be able to turn it off with the pecan switch. extensions = ext_ctrl.ExtensionsController() @utils.expose(generic=True) def index(self): if not pecan.request.path_url.endswith('/'): pecan.abort(404) layout = [] for name, collection in _CORE_RESOURCES.items(): href = urllib.parse.urljoin(pecan.request.path_url, collection) resource = {'name': name, 'collection': collection, 'links': [{'rel': 'self', 'href': href}]} layout.append(resource) return {'resources': layout} @utils.when(index, method='HEAD') @utils.when(index, method='POST') @utils.when(index, method='PATCH') @utils.when(index, method='PUT') @utils.when(index, method='DELETE') def not_supported(self): pecan.abort(405) @utils.expose() def _lookup(self, collection, *remainder): # if collection exists in the extension to service plugins map then # we are assuming that collection is the service plugin and # needs to be remapped. # Example: https://neutron.endpoint/v2.0/fwaas/firewall_groups if (remainder and manager.NeutronManager.get_resources_for_path_prefix( collection)): collection = remainder[0] remainder = remainder[1:] controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: LOG.warning("No controller found for: %s - returning response " "code 404", collection) pecan.abort(404) # Store resource and collection names in pecan request context so that # hooks can leverage them if necessary. The following code uses # attributes from the controller instance to ensure names have been # properly sanitized (eg: replacing dashes with underscores) request.context['resource'] = controller.resource request.context['collection'] = controller.collection # NOTE(blogan): initialize a dict to store the ids of the items walked # in the path for example: /networks/1234 would cause uri_identifiers # to contain: {'network_id': '1234'} # This is for backwards compatibility with legacy extensions that # defined their own controllers and expected kwargs to be passed in # with the uri_identifiers request.context['uri_identifiers'] = {} return controller, remainder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/controllers/utils.py0000644000175000017500000004264000000000000025042 0ustar00coreycorey00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import copy import functools from neutron_lib.api import attributes from neutron_lib import constants from neutron_lib.db import api as db_api from oslo_log import log as logging from oslo_utils import excutils import pecan from pecan import request from neutron._i18n import _ from neutron.api import api_common from neutron import manager from neutron_lib import exceptions # Utility functions for Pecan controllers. LOG = logging.getLogger(__name__) class Fakecode(object): co_varnames = () def _composed(*decorators): """Takes a list of decorators and returns a single decorator.""" def final_decorator(f): for d in decorators: # workaround for pecan bug that always assumes decorators # have a __code__ attr if not hasattr(d, '__code__'): setattr(d, '__code__', Fakecode()) f = d(f) return f return final_decorator def _protect_original_resources(f): """Wrapper to ensure that mutated resources are discarded on retries.""" @functools.wraps(f) def wrapped(*args, **kwargs): ctx = request.context if 'resources' in ctx: orig = ctx.get('protected_resources') if not orig: # this is the first call so we just take the whole reference ctx['protected_resources'] = ctx['resources'] # TODO(blogan): Once bug 157751 is fixed and released in # neutron-lib this memo will no longer be needed. This is just # quick way to not depend on a release of neutron-lib. # The version that has that bug fix will need to be updated in # neutron-lib. memo = {id(constants.ATTR_NOT_SPECIFIED): constants.ATTR_NOT_SPECIFIED} ctx['resources'] = copy.deepcopy(ctx['protected_resources'], memo=memo) return f(*args, **kwargs) return wrapped def _pecan_generator_wrapper(func, *args, **kwargs): """Helper function so we don't have to specify json for everything.""" kwargs.setdefault('content_type', 'application/json') kwargs.setdefault('template', 'json') return _composed(_protect_original_resources, db_api.retry_db_errors, func(*args, **kwargs)) def expose(*args, **kwargs): return _pecan_generator_wrapper(pecan.expose, *args, **kwargs) def when(index, *args, **kwargs): return _pecan_generator_wrapper(index.when, *args, **kwargs) def when_delete(index, *args, **kwargs): kwargs['method'] = 'DELETE' deco = _pecan_generator_wrapper(index.when, *args, **kwargs) return _composed(_set_del_code, deco) def _set_del_code(f): """Handle logic of disabling json templating engine and setting HTTP code. We return 204 on delete without content. However, pecan defaults empty responses with the json template engine to 'null', which is not empty content. This breaks connection re-use for some clients due to the inconsistency. So we need to detect when there is no response and disable the json templating engine. See https://github.com/pecan/pecan/issues/72 """ @functools.wraps(f) def wrapped(*args, **kwargs): f(*args, **kwargs) pecan.response.status = 204 pecan.override_template(None) # NOTE(kevinbenton): we are explicitly not returning the DELETE # response from the controller because that is the legacy Neutron # API behavior. return wrapped class NeutronPecanController(object): LIST = 'list' SHOW = 'show' CREATE = 'create' UPDATE = 'update' DELETE = 'delete' def __init__(self, collection, resource, plugin=None, resource_info=None, allow_pagination=None, allow_sorting=None, parent_resource=None, member_actions=None, collection_actions=None, item=None, action_status=None): # Ensure dashes are always replaced with underscores self.collection = collection and collection.replace('-', '_') self.resource = resource and resource.replace('-', '_') self._member_actions = member_actions or {} self._collection_actions = collection_actions or {} self._resource_info = resource_info self._plugin = plugin # Controllers for some resources that are not mapped to anything in # RESOURCE_ATTRIBUTE_MAP will not have anything in _resource_info if self.resource_info: self._mandatory_fields = set([field for (field, data) in self.resource_info.items() if data.get('required_by_policy')]) if 'tenant_id' in self._mandatory_fields: # ensure that project_id is queried in the database when # tenant_id is required self._mandatory_fields.add('project_id') else: self._mandatory_fields = set() self.allow_pagination = allow_pagination if self.allow_pagination is None: self.allow_pagination = True self.allow_sorting = allow_sorting if self.allow_sorting is None: self.allow_sorting = True self.native_pagination = api_common.is_native_pagination_supported( self.plugin) self.native_sorting = api_common.is_native_sorting_supported( self.plugin) if self.allow_pagination and self.native_pagination: if not self.native_sorting: raise exceptions.Invalid( _("Native pagination depends on native sorting") ) self.filter_validation = api_common.is_filter_validation_supported( self.plugin) self.primary_key = self._get_primary_key() self.parent = parent_resource parent_resource = '_%s' % parent_resource if parent_resource else '' self._parent_id_name = ('%s_id' % self.parent if self.parent else None) self._plugin_handlers = { self.LIST: 'get%s_%s' % (parent_resource, self.collection), self.SHOW: 'get%s_%s' % (parent_resource, self.resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: self._plugin_handlers[action] = '%s%s_%s' % ( action, parent_resource, self.resource) self.item = item self.action_status = action_status or {} def _set_response_code(self, result, method_name): if method_name in self.action_status: pecan.response.status = self.action_status[method_name] else: pecan.response.status = 200 if result else 204 def build_field_list(self, request_fields): added_fields = [] combined_fields = [] req_fields_set = {f for f in request_fields if f} if req_fields_set: added_fields = self._mandatory_fields - req_fields_set combined_fields = req_fields_set | self._mandatory_fields # field sorting is to match old behavior of legacy API and to make # this drop-in compatible with the old API unit tests return sorted(combined_fields), list(added_fields) @property def plugin(self): if not self._plugin: self._plugin = manager.NeutronManager.get_plugin_for_resource( self.collection) return self._plugin @property def resource_info(self): if not self._resource_info: self._resource_info = attributes.RESOURCES.get( self.collection) return self._resource_info def _get_primary_key(self, default_primary_key='id'): if not self.resource_info: return default_primary_key for key, value in self.resource_info.items(): if value.get('primary_key', False): return key return default_primary_key @property def plugin_handlers(self): return self._plugin_handlers @property def plugin_lister(self): return getattr(self.plugin, self._plugin_handlers[self.LIST]) @property def plugin_shower(self): return getattr(self.plugin, self._plugin_handlers[self.SHOW]) @property def plugin_creator(self): return getattr(self.plugin, self._plugin_handlers[self.CREATE]) @property def plugin_bulk_creator(self): native = getattr(self.plugin, '%s_bulk' % self._plugin_handlers[self.CREATE], None) # NOTE(kevinbenton): this flag is just to make testing easier since we # don't have any in-tree plugins without native bulk support if getattr(self.plugin, '_FORCE_EMULATED_BULK', False) or not native: return self._emulated_bulk_creator return native def _emulated_bulk_creator(self, context, **kwargs): objs = [] body = kwargs[self.collection] try: for item in body[self.collection]: objs.append(self.plugin_creator(context, item)) return objs except Exception: with excutils.save_and_reraise_exception(): for obj in objs: try: self.plugin_deleter(context, obj['id']) except Exception: LOG.exception("Unable to undo bulk create for " "%(resource)s %(id)s", {'resource': self.collection, 'id': obj['id']}) @property def plugin_deleter(self): return getattr(self.plugin, self._plugin_handlers[self.DELETE]) @property def plugin_updater(self): return getattr(self.plugin, self._plugin_handlers[self.UPDATE]) class ShimRequest(object): def __init__(self, context): self.context = context def invert_dict(dictionary): inverted = defaultdict(list) for k, v in dictionary.items(): inverted[v].append(k) return inverted class ShimItemController(NeutronPecanController): def __init__(self, collection, resource, item, controller, collection_actions=None, member_actions=None, action_status=None): super(ShimItemController, self).__init__( collection, resource, collection_actions=collection_actions, member_actions=member_actions, item=item, action_status=action_status) self.controller = controller self.controller_delete = getattr(controller, 'delete', None) self.controller_update = getattr(controller, 'update', None) self.controller_show = getattr(controller, 'show', None) self.inverted_collection_actions = invert_dict( self._collection_actions) @expose(generic=True) def index(self): shim_request = ShimRequest(request.context['neutron_context']) kwargs = request.context['uri_identifiers'] if self.item in self.inverted_collection_actions['GET']: method = getattr(self.controller, self.item, None) # collection actions should not take an self.item because they are # essentially static items. result = method(shim_request, **kwargs) self._set_response_code(result, self.item) return result elif not self.controller_show: pecan.abort(405) else: result = self.controller_show(shim_request, self.item, **kwargs) self._set_response_code(result, 'show') return result @when_delete(index) def delete(self): if not self.controller_delete: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] result = self.controller_delete(shim_request, self.item, **uri_identifiers) self._set_response_code(result, 'delete') return result @when(index, method='PUT') def update(self): if not self.controller_update: pecan.abort(405) pecan.response.status = self.action_status.get('update', 201) shim_request = ShimRequest(request.context['neutron_context']) kwargs = request.context['uri_identifiers'] try: kwargs['body'] = request.context['request_data'] except KeyError: pass result = self.controller_update(shim_request, self.item, **kwargs) self._set_response_code(result, 'update') return result @expose() def _lookup(self, resource, *remainder): request.context['resource'] = self.resource return ShimMemberActionController(self.collection, resource, self.item, self.controller, self._member_actions), remainder class ShimCollectionsController(NeutronPecanController): def __init__(self, collection, resource, controller, collection_actions=None, member_actions=None, collection_methods=None, action_status=None): collection_methods = collection_methods or {} super(ShimCollectionsController, self).__init__( collection, resource, member_actions=member_actions, collection_actions=collection_actions, action_status=action_status) self.controller = controller self.controller_index = getattr(controller, 'index', None) self.controller_create = getattr(controller, 'create', None) self.controller_update = getattr(controller, 'update', None) self.collection_methods = {} for action, method in collection_methods.items(): controller_method = getattr(controller, action, None) self.collection_methods[method] = ( controller_method, self.action_status.get(action, 200)) @expose(generic=True) def index(self): if (not self.controller_index and request.method not in self.collection_methods): pecan.abort(405) controller_method_status = self.collection_methods.get(request.method) status = None if controller_method_status: controller_method = controller_method_status[0] status = controller_method_status[1] else: controller_method = self.controller_index shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] args = [shim_request] if request.method == 'PUT': args.append(request.context.get('request_data')) result = controller_method(*args, **uri_identifiers) if not status: self._set_response_code(result, 'index') else: pecan.response.status = status return result @when(index, method='POST') def create(self): if not self.controller_create: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] result = self.controller_create(shim_request, request.context.get('request_data'), **uri_identifiers) self._set_response_code(result, 'create') return result @expose() def _lookup(self, item, *remainder): request.context['resource'] = self.resource request.context['resource_id'] = item return ( ShimItemController(self.collection, self.resource, item, self.controller, member_actions=self._member_actions, collection_actions=self._collection_actions, action_status=self.action_status), remainder ) class ShimMemberActionController(NeutronPecanController): def __init__(self, collection, resource, item, controller, member_actions): super(ShimMemberActionController, self).__init__( collection, resource, member_actions=member_actions, item=item) self.controller = controller self.inverted_member_actions = invert_dict(self._member_actions) @expose(generic=True) def index(self): if self.resource not in self.inverted_member_actions['GET']: pecan.abort(404) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] method = getattr(self.controller, self.resource) return method(shim_request, self.item, **uri_identifiers) class PecanResourceExtension(object): def __init__(self, collection, controller, plugin): self.collection = collection self.controller = controller self.plugin = plugin ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.331045 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/0000755000175000017500000000000000000000000022077 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/__init__.py0000644000175000017500000000304300000000000024210 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.pecan_wsgi.hooks import body_validation from neutron.pecan_wsgi.hooks import context from neutron.pecan_wsgi.hooks import notifier from neutron.pecan_wsgi.hooks import ownership_validation from neutron.pecan_wsgi.hooks import policy_enforcement from neutron.pecan_wsgi.hooks import query_parameters from neutron.pecan_wsgi.hooks import quota_enforcement from neutron.pecan_wsgi.hooks import translation from neutron.pecan_wsgi.hooks import userfilters ExceptionTranslationHook = translation.ExceptionTranslationHook ContextHook = context.ContextHook BodyValidationHook = body_validation.BodyValidationHook OwnershipValidationHook = ownership_validation.OwnershipValidationHook PolicyHook = policy_enforcement.PolicyHook QuotaEnforcementHook = quota_enforcement.QuotaEnforcementHook NotifierHook = notifier.NotifierHook QueryParametersHook = query_parameters.QueryParametersHook UserFilterHook = userfilters.UserFilterHook ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/body_validation.py0000644000175000017500000000521100000000000025617 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from pecan import hooks import webob.exc from neutron._i18n import _ from neutron.api.v2 import base as v2_base from neutron.pecan_wsgi.hooks import utils class BodyValidationHook(hooks.PecanHook): priority = 120 def before(self, state): if state.request.method not in ('POST', 'PUT'): return resource = state.request.context.get('resource') collection = state.request.context.get('collection') neutron_context = state.request.context['neutron_context'] is_create = state.request.method == 'POST' if not resource: return if not state.request.body: return try: json_data = jsonutils.loads(state.request.body) if not isinstance(json_data, dict): raise ValueError() except ValueError: msg = _("Body contains invalid data") raise webob.exc.HTTPBadRequest(msg) # Raw data are consumed by member actions such as add_router_interface state.request.context['request_data'] = json_data if not (resource in json_data or collection in json_data): # there is no resource in the request. This can happen when a # member action is being processed or on agent scheduler operations return # Prepare data to be passed to the plugin from request body controller = utils.get_controller(state) data = v2_base.Controller.prepare_request_body( neutron_context, json_data, is_create, resource, controller.resource_info, allow_bulk=is_create) if collection in data: state.request.context['resources'] = [item[resource] for item in data[collection]] state.request.context['is_bulk'] = True else: state.request.context['resources'] = [data[resource]] state.request.context['is_bulk'] = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/context.py0000644000175000017500000000200700000000000024134 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context from pecan import hooks class ContextHook(hooks.PecanHook): """Moves the request env's neutron.context into the requests context.""" priority = 95 def before(self, state): ctx = (state.request.environ.get('neutron.context') or context.get_admin_context()) state.request.context['neutron_context'] = ctx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/notifier.py0000644000175000017500000001114600000000000024273 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import rpc as n_rpc from oslo_log import log from pecan import hooks from neutron.pecan_wsgi import constants as pecan_constants from neutron.pecan_wsgi.hooks import utils LOG = log.getLogger(__name__) class NotifierHook(hooks.PecanHook): priority = 135 @property def _notifier(self): if not hasattr(self, '_notifier_inst'): self._notifier_inst = n_rpc.get_notifier('network') return self._notifier_inst def before(self, state): if state.request.method not in ('POST', 'PUT', 'DELETE'): return resource = state.request.context.get('resource') if not resource: return if utils.is_member_action(utils.get_controller(state)): return action = pecan_constants.ACTION_MAP.get(state.request.method) event = '%s.%s.start' % (resource, action) if action in ('create', 'update'): # notifier just gets plain old body without any treatment other # than the population of the object ID being operated on try: payload = state.request.json.copy() if not payload: return except ValueError: return if action == 'update': payload['id'] = state.request.context.get('resource_id') elif action == 'delete': resource_id = state.request.context.get('resource_id') payload = {resource + '_id': resource_id} self._notifier.info(state.request.context.get('neutron_context'), event, payload) def after(self, state): resource_name = state.request.context.get('resource') collection_name = state.request.context.get('collection') neutron_context = state.request.context.get('neutron_context') action = pecan_constants.ACTION_MAP.get(state.request.method) if not action or action not in ('create', 'update', 'delete'): return if utils.is_member_action(utils.get_controller(state)): return if not resource_name: LOG.debug("Skipping NotifierHook processing as there was no " "resource associated with the request") return if state.response.status_int > 300: LOG.debug("No notification will be sent due to unsuccessful " "status code: %s", state.response.status_int) return original = {} if (action in ('delete', 'update') and state.request.context.get('original_resources', [])): # We only need the original resource for updates and deletes original = state.request.context.get('original_resources')[0] if action == 'delete': # The object has been deleted, so we must notify the agent with the # data of the original object as the payload, but we do not need # to pass it in as the original result = {resource_name: original} original = {} else: if not state.response.body: result = {} else: result = state.response.json notifier_method = '%s.%s.end' % (resource_name, action) notifier_action = utils.get_controller(state).plugin_handlers[action] registry.publish(resource_name, events.BEFORE_RESPONSE, self, payload=events.APIEventPayload( neutron_context, notifier_method, notifier_action, request_body=state.request.body, states=(original, result,), collection_name=collection_name)) if action == 'delete': resource_id = state.request.context.get('resource_id') result[resource_name + '_id'] = resource_id self._notifier.info(neutron_context, notifier_method, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/ownership_validation.py0000644000175000017500000000412300000000000026701 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from pecan import hooks import webob from neutron._i18n import _ class OwnershipValidationHook(hooks.PecanHook): priority = 125 def before(self, state): if state.request.method != 'POST': return for item in state.request.context.get('resources', []): self._validate_network_tenant_ownership(state, item) def _validate_network_tenant_ownership(self, state, resource_item): # TODO(salvatore-orlando): consider whether this check can be folded # in the policy engine neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') if (neutron_context.is_admin or neutron_context.is_advsvc or resource not in ('port', 'subnet')): return plugin = directory.get_plugin() network = plugin.get_network(neutron_context, resource_item['network_id']) # do not perform the check on shared networks if network.get('shared'): return network_owner = network['tenant_id'] if network_owner != resource_item['tenant_id']: msg = _("Tenant %(tenant_id)s not allowed to " "create %(resource)s on this network") raise webob.exc.HTTPForbidden(msg % { "tenant_id": resource_item['tenant_id'], "resource": resource, }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/policy_enforcement.py0000644000175000017500000002705500000000000026346 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib import constants as const from oslo_log import log as logging from oslo_policy import policy as oslo_policy from oslo_utils import excutils from pecan import hooks import webob from neutron._i18n import _ from neutron.extensions import quotasv2 from neutron import manager from neutron.pecan_wsgi import constants as pecan_constants from neutron.pecan_wsgi.controllers import quota from neutron.pecan_wsgi.hooks import utils from neutron import policy LOG = logging.getLogger(__name__) def _custom_getter(resource, resource_id): """Helper function to retrieve resources not served by any plugin.""" if resource == quotasv2.RESOURCE_NAME: return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME] def fetch_resource(method, neutron_context, controller, collection, resource, resource_id, parent_id=None): field_list = [] if method == 'PUT': attrs = controller.resource_info if not attrs: # this isn't a request for a normal resource. it could be # an action like removing a network from a dhcp agent. # return None and assume the custom controller for this will # handle the necessary logic. return field_list = [name for (name, value) in attrs.items() if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] plugin = manager.NeutronManager.get_plugin_for_resource(collection) if plugin: if utils.is_member_action(controller): getter = controller.parent_controller.plugin_shower else: getter = controller.plugin_shower getter_args = [neutron_context, resource_id] if parent_id: getter_args.append(parent_id) return getter(*getter_args, fields=field_list) else: # Some legit resources, like quota, do not have a plugin yet. # Retrieving the original object is nevertheless important # for policy checks. return _custom_getter(resource, resource_id) class PolicyHook(hooks.PecanHook): priority = 140 def before(self, state): # This hook should be run only for PUT,POST and DELETE methods and for # requests targeting a neutron resource resources = state.request.context.get('resources', []) if state.request.method not in ('POST', 'PUT', 'DELETE'): return # As this routine will likely alter the resources, do a shallow copy resources_copy = resources[:] neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') # If there is no resource for this request, don't bother running authZ # policies if not resource: return controller = utils.get_controller(state) if not controller or utils.is_member_action(controller): return collection = state.request.context.get('collection') needs_prefetch = (state.request.method == 'PUT' or state.request.method == 'DELETE') policy.init() action = controller.plugin_handlers[ pecan_constants.ACTION_MAP[state.request.method]] # NOTE(salv-orlando): As bulk updates are not supported, in case of PUT # requests there will be only a single item to process, and its # identifier would have been already retrieved by the lookup process; # in the case of DELETE requests there won't be any item to process in # the request body original_resources = [] if needs_prefetch: try: item = resources_copy.pop() except IndexError: # Ops... this was a delete after all! item = {} resource_id = state.request.context.get('resource_id') parent_id = state.request.context.get('parent_id') method = state.request.method resource_obj = fetch_resource(method, neutron_context, controller, collection, resource, resource_id, parent_id=parent_id) if resource_obj: original_resources.append(resource_obj) obj = copy.copy(resource_obj) obj.update(item) obj[const.ATTRIBUTES_TO_UPDATE] = list(item) # Put back the item in the list so that policies could be # enforced resources_copy.append(obj) # TODO(salv-orlando): as other hooks might need to prefetch resources, # store them in the request context. However, this should be done in a # separate hook which is conveniently called before all other hooks state.request.context['original_resources'] = original_resources for item in resources_copy: try: policy.enforce( neutron_context, action, item, pluralized=collection) except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: # If a tenant is modifying it's own object, it's safe to # return a 403. Otherwise, pretend that it doesn't exist # to avoid giving away information. controller = utils.get_controller(state) s_action = controller.plugin_handlers[controller.SHOW] if not policy.check(neutron_context, s_action, item, pluralized=collection): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) def after(self, state): neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') collection = state.request.context.get('collection') controller = utils.get_controller(state) if not resource: # can't filter a resource we don't recognize return # NOTE(kevinbenton): extension listing isn't controlled by policy if resource == 'extension': return try: data = state.response.json except ValueError: return if state.request.method not in pecan_constants.ACTION_MAP: return if not data or (resource not in data and collection not in data): return policy.init() is_single = resource in data action_type = pecan_constants.ACTION_MAP[state.request.method] if action_type == 'get': action = controller.plugin_handlers[controller.SHOW] else: action = controller.plugin_handlers[action_type] key = resource if is_single else collection to_process = [data[resource]] if is_single else data[collection] # in the single case, we enforce which raises on violation # in the plural case, we just check so violating items are hidden policy_method = policy.enforce if is_single else policy.check plugin = manager.NeutronManager.get_plugin_for_resource(collection) try: resp = [self._get_filtered_item(state.request, controller, resource, collection, item) for item in to_process if (state.request.method != 'GET' or policy_method(neutron_context, action, item, plugin=plugin, pluralized=collection))] except oslo_policy.PolicyNotAuthorized: # This exception must be explicitly caught as the exception # translation hook won't be called if an error occurs in the # 'after' handler. Instead of raising an HTTPNotFound exception, # we have to set the status_code here to prevent the catch_errors # middleware from turning this into a 500. state.response.status_code = 404 return if is_single: resp = resp[0] state.response.json = {key: resp} def _get_filtered_item(self, request, controller, resource, collection, data): neutron_context = request.context.get('neutron_context') to_exclude = self._exclude_attributes_by_policy( neutron_context, controller, resource, collection, data) return self._filter_attributes(request, data, to_exclude) def _filter_attributes(self, request, data, fields_to_strip): # This routine will remove the fields that were requested to the # plugin for policy evaluation but were not specified in the # API request return dict(item for item in data.items() if item[0] not in fields_to_strip) def _exclude_attributes_by_policy(self, context, controller, resource, collection, data): """Identifies attributes to exclude according to authZ policies. Return a list of attribute names which should be stripped from the response returned to the user because the user is not authorized to see them. """ attributes_to_exclude = [] for attr_name in list(data): # TODO(amotoki): All attribute maps have tenant_id and # it determines excluded attributes based on tenant_id. # We need to migrate tenant_id to project_id later # as attr_info is referred to in various places and we need # to check all logs carefully. if attr_name == 'project_id': continue attr_data = controller.resource_info.get(attr_name) if attr_data and attr_data['is_visible']: if policy.check( context, # NOTE(kevinbenton): this used to reference a # _plugin_handlers dict, why? 'get_%s:%s' % (resource, attr_name), data, might_not_exist=True, pluralized=collection): # this attribute is visible, check next one continue # if the code reaches this point then either the policy check # failed or the attribute was not visible in the first place attributes_to_exclude.append(attr_name) # TODO(amotoki): As mentioned in the above TODO, # we treat project_id and tenant_id equivalently. # This should be migrated to project_id later. if attr_name == 'tenant_id': attributes_to_exclude.append('project_id') if attributes_to_exclude: LOG.debug("Attributes excluded by policy engine: %s", attributes_to_exclude) return attributes_to_exclude ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/query_parameters.py0000644000175000017500000001455600000000000026054 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks from neutron.api import api_common from neutron import manager from neutron.pecan_wsgi.hooks import policy_enforcement from neutron.pecan_wsgi.hooks import utils # TODO(blogan): ideally it'd be nice to get the pagination and sorting # helpers from the controller but since the controllers are # instantiated at startup and not on request, it would cause race # conditions because we need a new instantiation of a pagination # and sorting helper per request/response flow. As a result, we're forced to # pass them through the request context. def _get_pagination_helper(request, controller): if 'pagination_helper' in request.context: return request.context['pagination_helper'] if not controller.allow_pagination: helper = api_common.NoPaginationHelper(request, controller.primary_key) elif controller.native_pagination: helper = api_common.PaginationNativeHelper(request, controller.primary_key) else: helper = api_common.PaginationEmulatedHelper(request, controller.primary_key) request.context['pagination_helper'] = helper return helper def _get_sorting_helper(request, controller): if 'sorting_helper' in request.context: return request.context['sorting_helper'] if not controller.allow_sorting: helper = api_common.NoSortingHelper(request, controller.resource_info) elif controller.native_sorting: helper = api_common.SortingNativeHelper(request, controller.resource_info) else: helper = api_common.SortingEmulatedHelper(request, controller.resource_info) request.context['sorting_helper'] = helper return helper def _listify(thing): return thing if isinstance(thing, list) else [thing] def _set_fields(state, controller): params = state.request.params.mixed() fields = params.get('fields', []) # if only one fields query parameter is passed, pecan will not put # that parameter in a list, so we need to convert it into a list fields = _listify(fields) combined_fields, added_fields = controller.build_field_list(fields) state.request.context['query_params']['fields'] = combined_fields state.request.context['added_fields'] = added_fields return combined_fields, added_fields def _set_filters(state, controller): params = state.request.params.mixed() filters = api_common.get_filters_from_dict( {k: _listify(v) for k, v in params.items()}, controller.resource_info, skips=['fields', 'sort_key', 'sort_dir', 'limit', 'marker', 'page_reverse'], is_filter_validation_supported=controller.filter_validation) return filters class QueryParametersHook(hooks.PecanHook): # NOTE(blogan): needs to be run after the priority hook. after methods # are run in reverse priority order. priority = policy_enforcement.PolicyHook.priority - 1 def before(self, state): self._process_if_match_headers(state) state.request.context['query_params'] = {} if state.request.method != 'GET': return collection = state.request.context.get('collection') if not collection: return controller = utils.get_controller(state) combined_fields, added_fields = _set_fields(state, controller) filters = _set_filters(state, controller) query_params = {'fields': combined_fields, 'filters': filters} pagination_helper = _get_pagination_helper(state.request, controller) sorting_helper = _get_sorting_helper(state.request, controller) sorting_helper.update_args(query_params) sorting_helper.update_fields(query_params.get('fields', []), added_fields) pagination_helper.update_args(query_params) pagination_helper.update_fields(query_params.get('fields', []), added_fields) state.request.context['query_params'] = query_params def _process_if_match_headers(self, state): collection = state.request.context.get('collection') if not collection: return # add in if-match criterion to the context if present revision_number = api_common.check_request_for_revision_constraint( state.request) if revision_number is None: return state.request.context['neutron_context'].set_transaction_constraint( collection, state.request.context['resource_id'], revision_number) def after(self, state): resource = state.request.context.get('resource') collection = state.request.context.get('collection') # NOTE(blogan): don't paginate extension list or non-GET requests if (not resource or resource == 'extension' or state.request.method != 'GET'): return try: data = state.response.json except ValueError: return # Do not attempt to paginate if the body is not a list of entities if not data or resource in data or collection not in data: return controller = manager.NeutronManager.get_controller_for_resource( collection) sorting_helper = _get_sorting_helper(state.request, controller) pagination_helper = _get_pagination_helper(state.request, controller) obj_list = sorting_helper.sort(data[collection]) obj_list = pagination_helper.paginate(obj_list) resp_body = {collection: obj_list} pagination_links = pagination_helper.get_links(obj_list) if pagination_links: resp_body['_'.join([collection, 'links'])] = pagination_links state.response.json = resp_body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/quota_enforcement.py0000644000175000017500000000670400000000000026176 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.db import api as db_api from neutron_lib import exceptions from oslo_log import log as logging from pecan import hooks from neutron import manager from neutron import quota from neutron.quota import resource_registry LOG = logging.getLogger(__name__) class QuotaEnforcementHook(hooks.PecanHook): priority = 130 def before(self, state): collection = state.request.context.get('collection') resource = state.request.context.get('resource') items = state.request.context.get('resources') if state.request.method != 'POST' or not resource or not items: return plugin = manager.NeutronManager.get_plugin_for_resource(collection) # Store requested resource amounts grouping them by tenant deltas = collections.Counter(map(lambda x: x['tenant_id'], items)) # Perform quota enforcement reservations = [] neutron_context = state.request.context.get('neutron_context') for (tenant_id, delta) in deltas.items(): try: reservation = quota.QUOTAS.make_reservation( neutron_context, tenant_id, {resource: delta}, plugin) LOG.debug("Made reservation on behalf of %(tenant_id)s " "for: %(delta)s", {'tenant_id': tenant_id, 'delta': {resource: delta}}) reservations.append(reservation) except exceptions.QuotaResourceUnknown as e: # Quotas cannot be enforced on this resource LOG.debug(e) # Save the reservations in the request context so that they can be # retrieved in the 'after' hook state.request.context['reservations'] = reservations @db_api.retry_db_errors def after(self, state): neutron_context = state.request.context.get('neutron_context') if not neutron_context: return collection = state.request.context.get('collection') resource = state.request.context.get('resource') if state.request.method == 'GET' and collection: # resync on list operations to preserve behavior of old API resource_registry.resync_resource( neutron_context, resource, neutron_context.tenant_id) # Commit reservation(s) reservations = state.request.context.get('reservations') or [] if not reservations and state.request.method != 'DELETE': return with db_api.CONTEXT_WRITER.using(neutron_context): # Commit the reservation(s) for reservation in reservations: quota.QUOTAS.commit_reservation( neutron_context, reservation.reservation_id) resource_registry.set_resources_dirty(neutron_context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/translation.py0000644000175000017500000000313400000000000025010 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import faults import oslo_i18n from oslo_log import log as logging from pecan import hooks from neutron.api import api_common LOG = logging.getLogger(__name__) class ExceptionTranslationHook(hooks.PecanHook): def on_error(self, state, e): language = None if state.request.accept_language: all_languages = oslo_i18n.get_available_languages('neutron') language = state.request.accept_language.lookup( all_languages, default='fake_LANG') if language == 'fake_LANG': language = None exc = api_common.convert_exception_to_http_exc(e, faults.FAULT_MAP, language) if hasattr(exc, 'code') and 400 <= exc.code < 500: LOG.info('%(action)s failed (client error): %(exc)s', {'action': state.request.method, 'exc': exc}) else: LOG.exception('%s failed.', state.request.method) return exc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/userfilters.py0000644000175000017500000000337700000000000025032 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks class UserFilterHook(hooks.PecanHook): # we do this at the very end to ensure user-defined filters # don't impact things like pagination and notification hooks priority = 90 def after(self, state): user_fields = state.request.params.getall('fields') if not user_fields: return try: data = state.response.json except ValueError: return resource = state.request.context.get('resource') collection = state.request.context.get('collection') if collection not in data and resource not in data: return is_single = resource in data key = resource if resource in data else collection if is_single: data[key] = self._filter_item( state.response.json[key], user_fields) else: data[key] = [ self._filter_item(i, user_fields) for i in state.response.json[key] ] state.response.json = data def _filter_item(self, item, fields): return { field: value for field, value in item.items() if field in fields } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/hooks/utils.py0000644000175000017500000000221000000000000023604 0ustar00coreycorey00000000000000# Copyright (c) 2015 Taturiello Consulting, Meh. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.pecan_wsgi.controllers import resource from neutron.pecan_wsgi.controllers import utils as controller_utils def get_controller(state): if (state.arguments and state.arguments.args and isinstance(state.arguments.args[0], controller_utils.NeutronPecanController)): controller = state.arguments.args[0] return controller def is_member_action(controller): return isinstance(controller, resource.MemberActionController) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/pecan_wsgi/startup.py0000644000175000017500000001415200000000000023033 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import attributes from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base from neutron import manager from neutron.pecan_wsgi.controllers import resource as res_ctrl from neutron.pecan_wsgi.controllers import utils from neutron import policy from neutron.quota import resource_registry # NOTE(blogan): This currently already exists in neutron.api.v2.router but # instead of importing that module and creating circular imports elsewhere, # it's easier to just copy it here. The likelihood of it needing to be changed # are slim to none. RESOURCES = {'network': 'networks', 'subnet': 'subnets', 'subnetpool': 'subnetpools', 'port': 'ports'} def initialize_all(): manager.init() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext_mgr.extend_resources("2.0", attributes.RESOURCES) # At this stage we have a fully populated resource attribute map; # build Pecan controllers and routes for all core resources plugin = directory.get_plugin() for resource, collection in RESOURCES.items(): resource_registry.register_resource_by_name(resource) new_controller = res_ctrl.CollectionsController(collection, resource, plugin=plugin) manager.NeutronManager.set_controller_for_resource( collection, new_controller) manager.NeutronManager.set_plugin_for_resource(collection, plugin) pecanized_resources = ext_mgr.get_pecan_resources() for pec_res in pecanized_resources: manager.NeutronManager.set_controller_for_resource( pec_res.collection, pec_res.controller) manager.NeutronManager.set_plugin_for_resource( pec_res.collection, pec_res.plugin) # Now build Pecan Controllers and routes for all extensions resources = ext_mgr.get_resources() # Extensions controller is already defined, we don't need it. resources.pop(0) for ext_res in resources: path_prefix = ext_res.path_prefix.strip('/') collection = ext_res.collection # Retrieving the parent resource. It is expected the format of # the parent resource to be: # {'collection_name': 'name-of-collection', # 'member_name': 'name-of-resource'} # collection_name does not appear to be used in the legacy code # inside the controller logic, so we can assume we do not need it. parent = ext_res.parent or {} parent_resource = parent.get('member_name') collection_key = collection if parent_resource: collection_key = '/'.join([parent_resource, collection]) collection_actions = ext_res.collection_actions member_actions = ext_res.member_actions if manager.NeutronManager.get_controller_for_resource(collection_key): # This is a collection that already has a pecan controller, we # do not need to do anything else continue legacy_controller = getattr(ext_res.controller, 'controller', ext_res.controller) new_controller = None if isinstance(legacy_controller, base.Controller): resource = legacy_controller.resource plugin = legacy_controller.plugin attr_info = legacy_controller.attr_info member_actions = legacy_controller.member_actions pagination = legacy_controller.allow_pagination sorting = legacy_controller.allow_sorting # NOTE(blogan): legacy_controller and ext_res both can both have # member_actions. the member_actions for ext_res are strictly for # routing, while member_actions for legacy_controller are used for # handling the request once the routing has found the controller. # They're always the same so we will just use the ext_res # member_action. new_controller = res_ctrl.CollectionsController( collection, resource, resource_info=attr_info, parent_resource=parent_resource, member_actions=member_actions, plugin=plugin, allow_pagination=pagination, allow_sorting=sorting, collection_actions=collection_actions) # new_controller.collection has replaced hyphens with underscores manager.NeutronManager.set_plugin_for_resource( new_controller.collection, plugin) if path_prefix: manager.NeutronManager.add_resource_for_path_prefix( collection, path_prefix) else: new_controller = utils.ShimCollectionsController( collection, None, legacy_controller, collection_actions=collection_actions, member_actions=member_actions, action_status=ext_res.controller.action_status, collection_methods=ext_res.collection_methods) manager.NeutronManager.set_controller_for_resource( collection_key, new_controller) # Certain policy checks require that the extensions are loaded # and the RESOURCE_ATTRIBUTE_MAP populated before they can be # properly initialized. This can only be claimed with certainty # once this point in the code has been reached. In the event # that the policies have been initialized before this point, # calling reset will cause the next policy check to # re-initialize with all of the required data in place. policy.reset() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.331045 neutron-16.0.0.0b2.dev214/neutron/plugins/0000755000175000017500000000000000000000000020316 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/__init__.py0000644000175000017500000000000000000000000022415 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.331045 neutron-16.0.0.0b2.dev214/neutron/plugins/common/0000755000175000017500000000000000000000000021606 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/common/__init__.py0000644000175000017500000000000000000000000023705 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/common/constants.py0000644000175000017500000000235400000000000024200 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import constants as p_const # Maps extension alias to service type that # can be implemented by the core plugin. EXT_TO_SERVICE_MAPPING = { 'fwaas': p_const.FIREWALL, 'vpnaas': p_const.VPN, 'metering': p_const.METERING, 'router': p_const.L3, 'qos': p_const.QOS, } # Maps default service plugins entry points to their extension aliases DEFAULT_SERVICE_PLUGINS = { 'auto_allocate': 'auto-allocated-topology', 'tag': 'tag', 'timestamp': 'timestamp', 'network_ip_availability': 'network-ip-availability', 'flavors': 'flavors', 'revisions': 'revisions', } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3350449 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/0000755000175000017500000000000000000000000021010 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/README0000644000175000017500000000570400000000000021676 0ustar00coreycorey00000000000000The Modular Layer 2 (ML2) plugin is a framework allowing OpenStack Networking to simultaneously utilize the variety of layer 2 networking technologies found in complex real-world data centers. It supports the Open vSwitch, Linux bridge, and Hyper-V L2 agents, replacing and deprecating the monolithic plugins previously associated with those agents, and can also support hardware devices and SDN controllers. The ML2 framework is intended to greatly simplify adding support for new L2 networking technologies, requiring much less initial and ongoing effort than would be required for an additional monolithic core plugin. It is also intended to foster innovation through its organization as optional driver modules. The ML2 plugin supports all the non-vendor-specific neutron API extensions, and works with the standard neutron DHCP agent. It utilizes the service plugin interface to implement the L3 router abstraction, allowing use of either the standard neutron L3 agent or alternative L3 solutions. Additional service plugins can also be used with the ML2 core plugin. Drivers within ML2 implement separately extensible sets of network types and of mechanisms for accessing networks of those types. Multiple mechanisms can be used simultaneously to access different ports of the same virtual network. Mechanisms can utilize L2 agents via RPC and/or interact with external devices or controllers. By utilizing the multiprovidernet extension, virtual networks can be composed of multiple segments of the same or different types. Type and mechanism drivers are loaded as python entrypoints using the stevedore library. Each available network type is managed by an ML2 type driver. Type drivers maintain any needed type-specific network state, and perform provider network validation and tenant network allocation. As of the havana release, drivers for the local, flat, vlan, gre, and vxlan network types are included. Each available networking mechanism is managed by an ML2 mechanism driver. All registered mechanism drivers are called twice when networks, subnets, and ports are created, updated, or deleted. They are first called as part of the DB transaction, where they can maintain any needed driver-specific state. Once the transaction has been committed, they are called again, at which point they can interact with external devices and controllers. Mechanism drivers are also called as part of the port binding process, to determine whether the associated mechanism can provide connectivity for the network, and if so, the network segment and VIF driver to be used. The havana release includes mechanism drivers for the Open vSwitch, Linux bridge, and Hyper-V L2 agents, and for vendor switches/controllers/etc. It also includes an L2 Population mechanism driver that can help optimize tunneled virtual network traffic. For additional information regarding the ML2 plugin and its collection of type and mechanism drivers, see the OpenStack manuals and http://wiki.openstack.org/wiki/Neutron/ML2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/__init__.py0000644000175000017500000000000000000000000023107 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3350449 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/common/0000755000175000017500000000000000000000000022300 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/common/__init__.py0000644000175000017500000000000000000000000024377 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/common/exceptions.py0000644000175000017500000000332000000000000025031 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by ML2.""" from neutron_lib import exceptions from neutron._i18n import _ class MechanismDriverError(exceptions.MultipleExceptions): """Mechanism driver call failed.""" def __init__(self, method, errors=None): # The message is not used by api, because api will unwrap # MultipleExceptions and return inner exceptions. Keep it # for backward-compatibility, in case other code use it. self.message = _("%s failed.") % method super(MechanismDriverError, self).__init__(errors or []) class ExtensionDriverError(exceptions.InvalidInput): """Extension driver call failed.""" message = _("Extension %(driver)s failed.") class ExtensionDriverNotFound(exceptions.InvalidConfigurationOption): """Required extension driver not found in ML2 config.""" message = _("Extension driver %(driver)s required for " "service plugin %(service_plugin)s not found.") class UnknownNetworkType(exceptions.NeutronException): """Network with unknown type.""" message = _("Unknown network type %(network_type)s.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/db.py0000644000175000017500000003142000000000000021747 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils import six from sqlalchemy import or_ from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.objects import base as objects_base from neutron.objects import ports as port_obj from neutron.plugins.ml2 import models from neutron.services.segments import exceptions as seg_exc LOG = log.getLogger(__name__) # limit the number of port OR LIKE statements in one query MAX_PORTS_PER_QUERY = 500 @db_api.CONTEXT_WRITER def add_port_binding(context, port_id): record = models.PortBinding( port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND) context.session.add(record) return record @db_api.CONTEXT_WRITER def set_binding_levels(context, levels): if levels: for level in levels: level.create() LOG.debug("For port %(port_id)s, host %(host)s, " "set binding levels %(levels)s", {'port_id': levels[0].port_id, 'host': levels[0].host, 'levels': levels}) else: LOG.debug("Attempted to set empty binding levels") @db_api.CONTEXT_READER def get_binding_level_objs(context, port_id, host): if host: pager = objects_base.Pager(sorts=[('level', True)]) port_bl_objs = port_obj.PortBindingLevel.get_objects( context, _pager=pager, port_id=port_id, host=host) LOG.debug("For port %(port_id)s, host %(host)s, " "got binding levels %(levels)s", {'port_id': port_id, 'host': host, 'levels': port_bl_objs}) return port_bl_objs @db_api.CONTEXT_WRITER def clear_binding_levels(context, port_id, host): if host: port_obj.PortBindingLevel.delete_objects( context, port_id=port_id, host=host) LOG.debug("For port %(port_id)s, host %(host)s, " "cleared binding levels", {'port_id': port_id, 'host': host}) def ensure_distributed_port_binding(context, port_id, host, router_id=None): with db_api.CONTEXT_READER.using(context): record = (context.session.query(models.DistributedPortBinding). filter_by(port_id=port_id, host=host).first()) if record: return record try: with db_api.CONTEXT_WRITER.using(context): record = models.DistributedPortBinding( port_id=port_id, host=host, router_id=router_id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, status=n_const.PORT_STATUS_DOWN) context.session.add(record) return record except db_exc.DBDuplicateEntry: LOG.debug("Distributed Port %s already bound", port_id) with db_api.CONTEXT_READER.using(context): return (context.session.query(models.DistributedPortBinding). filter_by(port_id=port_id, host=host).one()) def delete_distributed_port_binding_if_stale(context, binding): if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN: with db_api.CONTEXT_WRITER.using(context): LOG.debug("Distributed port: Deleting binding %s", binding) context.session.delete(binding) def get_port(context, port_id): """Get port record for update within transaction.""" with db_api.CONTEXT_READER.using(context): try: # Set enable_eagerloads to True, so that lazy load can be # proceed later. record = (context.session.query(models_v2.Port). enable_eagerloads(True). filter(models_v2.Port.id.startswith(port_id)). one()) return record except exc.NoResultFound: return except exc.MultipleResultsFound: LOG.error("Multiple ports have port_id starting with %s", port_id) return @db_api.CONTEXT_READER def get_port_from_device_mac(context, device_mac): LOG.debug("get_port_from_device_mac() called for mac %s", device_mac) ports = port_obj.Port.get_objects(context, mac_address=device_mac) return ports.pop() if ports else None def get_ports_and_sgs(context, port_ids): """Get ports from database with security group info.""" # break large queries into smaller parts if len(port_ids) > MAX_PORTS_PER_QUERY: LOG.debug("Number of ports %(pcount)s exceeds the maximum per " "query %(maxp)s. Partitioning queries.", {'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY}) return (get_ports_and_sgs(context, port_ids[:MAX_PORTS_PER_QUERY]) + get_ports_and_sgs(context, port_ids[MAX_PORTS_PER_QUERY:])) LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids) if not port_ids: # if port_ids is empty, avoid querying to DB to ask it for nothing return [] ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids) return [make_port_dict_with_security_groups(port, sec_groups) for port, sec_groups in six.iteritems(ports_to_sg_ids)] def get_sg_ids_grouped_by_port(context, port_ids): sg_ids_grouped_by_port = {} sg_binding_port = sg_models.SecurityGroupPortBinding.port_id with db_api.CONTEXT_READER.using(context): # partial UUIDs must be individually matched with startswith. # full UUIDs may be matched directly in an IN statement partial_uuids = set(port_id for port_id in port_ids if not uuidutils.is_uuid_like(port_id)) full_uuids = set(port_ids) - partial_uuids or_criteria = [models_v2.Port.id.startswith(port_id) for port_id in partial_uuids] if full_uuids: or_criteria.append(models_v2.Port.id.in_(full_uuids)) query = context.session.query( models_v2.Port, sg_models.SecurityGroupPortBinding.security_group_id) query = query.outerjoin(sg_models.SecurityGroupPortBinding, models_v2.Port.id == sg_binding_port) query = query.filter(or_(*or_criteria)) for port, sg_id in query: if port not in sg_ids_grouped_by_port: sg_ids_grouped_by_port[port] = [] if sg_id: sg_ids_grouped_by_port[port].append(sg_id) return sg_ids_grouped_by_port def make_port_dict_with_security_groups(port, sec_groups): plugin = directory.get_plugin() port_dict = plugin._make_port_dict(port) port_dict['security_groups'] = sec_groups port_dict['security_group_rules'] = [] port_dict['security_group_source_groups'] = [] port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] return port_dict def get_port_binding_host(context, port_id): try: with db_api.CONTEXT_READER.using(context): query = (context.session.query(models.PortBinding.host). filter(models.PortBinding.port_id.startswith(port_id))) query = query.filter( models.PortBinding.status == n_const.ACTIVE).one() except exc.NoResultFound: LOG.debug("No active binding found for port %(port_id)s", {'port_id': port_id}) return except exc.MultipleResultsFound: LOG.error("Multiple ports have port_id starting with %s", port_id) return return query.host @db_api.CONTEXT_READER def generate_distributed_port_status(context, port_id): # an OR'ed value of status assigned to parent port from the # distributedportbinding bucket query = context.session.query(models.DistributedPortBinding.status) final_status = n_const.PORT_STATUS_BUILD for bind in query.filter(models.DistributedPortBinding.port_id == port_id): if bind.status == n_const.PORT_STATUS_ACTIVE: return bind.status elif bind.status == n_const.PORT_STATUS_DOWN: final_status = bind.status return final_status def get_distributed_port_binding_by_host(context, port_id, host): with db_api.CONTEXT_READER.using(context): binding = ( context.session.query(models.DistributedPortBinding). filter(models.DistributedPortBinding.port_id.startswith(port_id), models.DistributedPortBinding.host == host).first()) if not binding: LOG.debug("No binding for distributed port %(port_id)s with host " "%(host)s", {'port_id': port_id, 'host': host}) return binding def get_distributed_port_bindings(context, port_id): with db_api.CONTEXT_READER.using(context): bindings = (context.session.query(models.DistributedPortBinding). filter(models.DistributedPortBinding.port_id.startswith( port_id)).all()) if not bindings: LOG.debug("No bindings for distributed port %s", port_id) return bindings @db_api.CONTEXT_READER def partial_port_ids_to_full_ids(context, partial_ids): """Takes a list of the start of port IDs and returns full IDs. Returns dictionary of partial IDs to full IDs if a single match is found. """ result = {} to_full_query = (context.session.query(models_v2.Port.id). filter(or_(*[models_v2.Port.id.startswith(p) for p in partial_ids]))) candidates = [match[0] for match in to_full_query] for partial_id in partial_ids: matching = [c for c in candidates if c.startswith(partial_id)] if len(matching) == 1: result[partial_id] = matching[0] continue if len(matching) < 1: LOG.info("No ports have port_id starting with %s", partial_id) elif len(matching) > 1: LOG.error("Multiple ports have port_id starting with %s", partial_id) return result @db_api.CONTEXT_READER def get_port_db_objects(context, port_ids): """Takes a list of port_ids and returns matching port db objects. return format is a dictionary keyed by passed in IDs with db objects for values or None if the port was not present. """ port_qry = (context.session.query(models_v2.Port). filter(models_v2.Port.id.in_(port_ids))) result = {p: None for p in port_ids} for port in port_qry: result[port.id] = port return result @db_api.CONTEXT_READER def is_dhcp_active_on_any_subnet(context, subnet_ids): if not subnet_ids: return False return bool(context.session.query(models_v2.Subnet.id). enable_eagerloads(False).filter_by(enable_dhcp=True). filter(models_v2.Subnet.id.in_(subnet_ids)).count()) def _prevent_segment_delete_with_port_bound(resource, event, trigger, payload=None): """Raise exception if there are any ports bound with segment_id.""" if payload.metadata.get('for_net_delete'): # don't check for network deletes return with db_api.CONTEXT_READER.using(payload.context): port_ids = port_obj.Port.get_port_ids_filter_by_segment_id( payload.context, segment_id=payload.resource_id) # There are still some ports in the segment, segment should not be deleted # TODO(xiaohhui): Should we delete the dhcp port automatically here? if port_ids: reason = _("The segment is still bound with port(s) " "%s") % ", ".join(port_ids) raise seg_exc.SegmentInUse(segment_id=payload.resource_id, reason=reason) def subscribe(): registry.subscribe(_prevent_segment_delete_with_port_bound, resources.SEGMENT, events.BEFORE_DELETE) subscribe() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/driver_context.py0000644000175000017500000002720500000000000024427 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_log import log from oslo_serialization import jsonutils import sqlalchemy from neutron.db import segments_db LOG = log.getLogger(__name__) class InstanceSnapshot(object): """Used to avoid holding references to DB objects in PortContext.""" def __init__(self, obj): self._model_class = obj.__class__ self._identity_key = sqlalchemy.orm.util.identity_key(instance=obj)[1] self._cols = [col.key for col in sqlalchemy.inspect(self._model_class).columns] for col in self._cols: setattr(self, col, getattr(obj, col)) def persist_state_to_session(self, session): """Updates the state of the snapshot in the session. Finds the SQLA object in the session if it exists or creates a new object and updates the object with the column values stored in this snapshot. """ db_obj = session.query(self._model_class).get(self._identity_key) if db_obj: for col in self._cols: setattr(db_obj, col, getattr(self, col)) else: session.add(self._model_class(**{col: getattr(self, col) for col in self._cols})) def __getitem__(self, item): if item not in self._cols: raise KeyError(item) return getattr(self, item) class MechanismDriverContext(object): """MechanismDriver context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin # This temporarily creates a reference loop, but the # lifetime of PortContext is limited to a single # method call of the plugin. self._plugin_context = plugin_context class NetworkContext(MechanismDriverContext, api.NetworkContext): def __init__(self, plugin, plugin_context, network, original_network=None, segments=None): super(NetworkContext, self).__init__(plugin, plugin_context) self._network = network self._original_network = original_network self._segments = segments_db.get_network_segments( plugin_context, network['id']) if segments is None else segments @property def current(self): return self._network @property def original(self): return self._original_network @property def network_segments(self): return self._segments class SubnetContext(MechanismDriverContext, api.SubnetContext): def __init__(self, plugin, plugin_context, subnet, network, original_subnet=None): super(SubnetContext, self).__init__(plugin, plugin_context) self._subnet = subnet self._original_subnet = original_subnet self._network_context = NetworkContext(plugin, plugin_context, network) if network else None @property def current(self): return self._subnet @property def original(self): return self._original_subnet @property def network(self): if self._network_context is None: network = self._plugin.get_network( self._plugin_context, self.current['network_id']) self._network_context = NetworkContext( self._plugin, self._plugin_context, network) return self._network_context class PortContext(MechanismDriverContext, api.PortContext): def __init__(self, plugin, plugin_context, port, network, binding, binding_levels, original_port=None): super(PortContext, self).__init__(plugin, plugin_context) self._port = port self._original_port = original_port if isinstance(network, NetworkContext): self._network_context = network else: self._network_context = NetworkContext( plugin, plugin_context, network) if network else None # NOTE(kevinbenton): InstanceSnapshot can go away once we are working # with OVO objects instead of native SQLA objects. self._binding = InstanceSnapshot(binding) self._binding_levels = binding_levels or [] self._segments_to_bind = None self._new_bound_segment = None self._next_segments_to_bind = None if original_port: self._original_vif_type = binding.vif_type self._original_vif_details = self._plugin._get_vif_details(binding) self._original_binding_levels = self._binding_levels else: self._original_vif_type = None self._original_vif_details = None self._original_binding_levels = None self._new_port_status = None # The following methods are for use by the ML2 plugin and are not # part of the driver API. def _prepare_to_bind(self, segments_to_bind): self._segments_to_bind = segments_to_bind self._new_bound_segment = None self._next_segments_to_bind = None def _clear_binding_levels(self): self._binding_levels = [] def _push_binding_level(self, binding_level): # NOTE(slaweq): binding_level should be always OVO with no reference # to DB object self._binding_levels.append(binding_level) def _pop_binding_level(self): return self._binding_levels.pop() # The following implement the abstract methods and properties of # the driver API. @property def current(self): return self._port @property def original(self): return self._original_port @property def status(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._binding.status return self._port['status'] @property def original_status(self): # REVISIT(rkukura): Should return host-specific status for DVR # ports. Fix as part of resolving bug 1367391. if self._original_port: return self._original_port['status'] @property def network(self): if not self._network_context: network = self._plugin.get_network( self._plugin_context, self.current['network_id']) self._network_context = NetworkContext( self._plugin, self._plugin_context, network) return self._network_context @property def binding_levels(self): if self._binding_levels: return [{ api.BOUND_DRIVER: level.driver, api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._binding_levels] @property def original_binding_levels(self): if self._original_binding_levels: return [{ api.BOUND_DRIVER: level.driver, api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._original_binding_levels] @property def top_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[0].segment_id) @property def original_top_bound_segment(self): if self._original_binding_levels: return self._expand_segment( self._original_binding_levels[0].segment_id) @property def bottom_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[-1].segment_id) @property def original_bottom_bound_segment(self): if self._original_binding_levels: return self._expand_segment( self._original_binding_levels[-1].segment_id) def _expand_segment(self, segment_id): for s in self.network.network_segments: if s['id'] == segment_id: return s # TODO(kevinbenton): eliminate the query below. The above should # always return since the port is bound to a network segment. Leaving # in for now for minimally invasive change for back-port. segment = segments_db.get_segment_by_id(self._plugin_context, segment_id) if not segment: LOG.warning("Could not expand segment %s", segment_id) return segment @property def host(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._binding.host return self._port.get(portbindings.HOST_ID) @property def original_host(self): # REVISIT(rkukura): Eliminate special DVR case as part of # resolving bug 1367391? if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE: return self._original_port and self._binding.host else: return (self._original_port and self._original_port.get(portbindings.HOST_ID)) @property def vif_type(self): return self._binding.vif_type @property def original_vif_type(self): return self._original_vif_type @property def vif_details(self): return self._plugin._get_vif_details(self._binding) @property def original_vif_details(self): return self._original_vif_details @property def segments_to_bind(self): return self._segments_to_bind def host_agents(self, agent_type): return self._plugin.get_agents(self._plugin_context, filters={'agent_type': [agent_type], 'host': [self._binding.host]}) def set_binding(self, segment_id, vif_type, vif_details, status=None): # TODO(rkukura) Verify binding allowed, segment in network self._new_bound_segment = segment_id self._binding.vif_type = vif_type self._binding.vif_details = jsonutils.dumps(vif_details) self._new_port_status = status def _unset_binding(self): '''Undo a previous call to set_binding() before it gets committed. This method is for MechanismManager and is not part of the driver API. ''' self._new_bound_segment = None self._binding.vif_type = portbindings.VIF_TYPE_UNBOUND self._binding.vif_details = '' self._new_port_status = None def continue_binding(self, segment_id, next_segments_to_bind): # TODO(rkukura) Verify binding allowed, segment in network self._new_bound_segment = segment_id self._next_segments_to_bind = next_segments_to_bind def allocate_dynamic_segment(self, segment): network_id = self._network_context.current['id'] return self._plugin.type_manager.allocate_dynamic_segment( self._plugin_context, network_id, segment) def release_dynamic_segment(self, segment_id): return self._plugin.type_manager.release_dynamic_segment( self._plugin_context, segment_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3350449 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/0000755000175000017500000000000000000000000022466 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/__init__.py0000644000175000017500000000000000000000000024565 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/0000755000175000017500000000000000000000000023564 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/__init__.py0000644000175000017500000000000000000000000025663 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py0000644000175000017500000001700400000000000030061 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six class NetworkSegment(object): """Represents a Neutron network segment""" def __init__(self, network_type, physical_network, segmentation_id, mtu=None): self.network_type = network_type self.physical_network = physical_network self.segmentation_id = segmentation_id self.mtu = mtu @six.add_metaclass(abc.ABCMeta) class CommonAgentManagerRpcCallBackBase(object): """Base class for managers RPC callbacks. This class must be inherited by a RPC callback class that is used in combination with the common agent. """ def __init__(self, context, agent, sg_agent): self.context = context self.agent = agent self.sg_agent = sg_agent self.network_map = {} # stores received port_updates and port_deletes for # processing by the main loop self.updated_devices = set() @abc.abstractmethod def security_groups_rule_updated(self, context, **kwargs): """Callback for security group rule update. :param security_groups: list of updated security_groups """ @abc.abstractmethod def security_groups_member_updated(self, context, **kwargs): """Callback for security group member update. :param security_groups: list of updated security_groups """ def add_network(self, network_id, network_segment): """Add a network to the agent internal network list :param network_id: The UUID of the network :param network_segment: The NetworkSegment object for this network """ self.network_map[network_id] = network_segment def get_and_clear_updated_devices(self): """Get and clear the list of devices for which a update was received. :return: set - A set with updated devices. Format is ['tap1', 'tap2'] """ # Save and reinitialize the set variable that the port_update RPC uses. # This should be thread-safe as the greenthread should not yield # between these two statements. updated_devices = self.updated_devices self.updated_devices = set() return updated_devices @six.add_metaclass(abc.ABCMeta) class CommonAgentManagerBase(object): """Base class for managers that are used with the common agent loop. This class must be inherited by a manager class that is used in combination with the common agent. """ @abc.abstractmethod def ensure_port_admin_state(self, device, admin_state_up): """Enforce admin_state for a port :param device: The device for which the admin_state should be set :param admin_state_up: True for admin_state_up, False for admin_state_down """ @abc.abstractmethod def get_agent_configurations(self): """Establishes the agent configuration map. The content of this map is part of the agent state reports to the neutron server. :return: map -- the map containing the configuration values :rtype: dict """ @abc.abstractmethod def get_agent_id(self): """Calculate the agent id that should be used on this host :return: str -- agent identifier """ @abc.abstractmethod def get_all_devices(self): """Get a list of all devices of the managed type from this host A device in this context is a String that represents a network device. This can for example be the name of the device or its MAC address. This value will be stored in the Plug-in and be part of the device_details. Typically this list is retrieved from the sysfs. E.g. for linuxbridge it returns all names of devices of type 'tap' that start with a certain prefix. :return: set -- the set of all devices e.g. ['tap1', 'tap2'] """ @abc.abstractmethod def get_devices_modified_timestamps(self, devices): """Get a dictionary of modified timestamps by device The devices passed in are expected to be the same format that get_all_devices returns. :return: dict -- A dictionary of timestamps keyed by device """ @abc.abstractmethod def get_extension_driver_type(self): """Get the agent extension driver type. :return: str -- The String defining the agent extension type """ @abc.abstractmethod def get_rpc_callbacks(self, context, agent, sg_agent): """Returns the class containing all the agent rpc callback methods :return: class - the class containing the agent rpc callback methods. It must reflect the CommonAgentManagerRpcCallBackBase Interface. """ @abc.abstractmethod def get_agent_api(self, **kwargs): """Get L2 extensions drivers API interface class. :return: instance of the class containing Agent Extension API """ @abc.abstractmethod def get_rpc_consumers(self): """Get a list of topics for which an RPC consumer should be created :return: list -- A list of topics. Each topic in this list is a list consisting of a name, an operation, and an optional host param keying the subscription to topic.host for plugin calls. """ @abc.abstractmethod def plug_interface(self, network_id, network_segment, device, device_owner): """Plug the interface (device). :param network_id: The UUID of the Neutron network :param network_segment: The NetworkSegment object for this network :param device: The device that should be plugged :param device_owner: The device owner of the port :return: bool -- True if the interface is plugged now. False if the interface could not be plugged. """ @abc.abstractmethod def setup_arp_spoofing_protection(self, device, device_details): """Setup the arp spoofing protection for the given port. :param device: The device to set up arp spoofing rules for, where device is the device String that is stored in the Neutron Plug-in for this Port. E.g. 'tap1' :param device_details: The device_details map retrieved from the Neutron Plugin """ @abc.abstractmethod def delete_arp_spoofing_protection(self, devices): """Remove the arp spoofing protection for the given ports. :param devices: List of devices that have been removed, where device is the device String that is stored for this port in the Neutron Plug-in. E.g. ['tap1', 'tap2'] """ @abc.abstractmethod def delete_unreferenced_arp_protection(self, current_devices): """Cleanup arp spoofing protection entries. :param current_devices: List of devices that currently exist on this host, where device is the device String that could have been stored in the Neutron Plug-in. E.g. ['tap1', 'tap2'] """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/_common_agent.py0000644000175000017500000005235500000000000026755 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import sys import time from neutron_lib.agent import constants as agent_consts from neutron_lib.agent import topics from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources as local_resources from neutron_lib import constants from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_service import service from oslo_utils import excutils from osprofiler import profiler from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import capabilities from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc") class CommonAgentLoop(service.Service): def __init__(self, manager, polling_interval, quitting_rpc_timeout, agent_type, agent_binary): """Constructor. :param manager: the manager object containing the impl specifics :param polling_interval: interval (secs) to poll DB. :param quitting_rpc_timeout: timeout in seconds for rpc calls after stop is called. :param agent_type: Specifies the type of the agent :param agent_binary: The agent binary string """ super(CommonAgentLoop, self).__init__() self.mgr = manager self._validate_manager_class() self.polling_interval = polling_interval self.quitting_rpc_timeout = quitting_rpc_timeout self.agent_type = agent_type self.agent_binary = agent_binary def _validate_manager_class(self): if not isinstance(self.mgr, amb.CommonAgentManagerBase): LOG.error("Manager class must inherit from " "CommonAgentManagerBase to ensure CommonAgent " "works properly.") sys.exit(1) def start(self): # stores all configured ports on agent self.network_ports = collections.defaultdict(list) # flag to do a sync after revival self.fullsync = False self.context = context.get_admin_context_without_session() self.setup_rpc() self.init_extension_manager(self.connection) configurations = {'extensions': self.ext_manager.names()} configurations.update(self.mgr.get_agent_configurations()) self.failed_report_state = False # TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': self.agent_binary, 'host': cfg.CONF.host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': self.agent_type, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) capabilities.notify_init_event(self.agent_type, self) # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.daemon_loop() def stop(self, graceful=True): LOG.info("Stopping %s agent.", self.agent_type) if graceful and self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) super(CommonAgentLoop, self).stop(graceful) def reset(self): common_config.setup_logging() def _report_state(self): try: devices = len(self.mgr.get_all_devices()) self.agent_state.get('configurations')['devices'] = devices agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info('%s Agent has just been revived. ' 'Doing a full sync.', self.agent_type) self.fullsync = True # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") def _validate_rpc_endpoints(self): if not isinstance(self.endpoints[0], amb.CommonAgentManagerRpcCallBackBase): LOG.error("RPC Callback class must inherit from " "CommonAgentManagerRpcCallBackBase to ensure " "CommonAgent works properly.") sys.exit(1) def setup_rpc(self): self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True) self.agent_id = self.mgr.get_agent_id() LOG.info("RPC agent_id: %s", self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init # Handle updates from service self.rpc_callbacks = self.mgr.get_rpc_callbacks(self.context, self, self.sg_agent) self.endpoints = [self.rpc_callbacks] self._validate_rpc_endpoints() # Define the listening consumers for the agent consumers = self.mgr.get_rpc_consumers() self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=False) def init_extension_manager(self, connection): ext_manager.register_opts(cfg.CONF) self.ext_manager = ( ext_manager.L2AgentExtensionsManager(cfg.CONF)) agent_api = self.mgr.get_agent_api(sg_agent=self.sg_agent) self.ext_manager.initialize( connection, self.mgr.get_extension_driver_type(), agent_api) def _clean_network_ports(self, device): for netid, ports_list in self.network_ports.items(): for port_data in ports_list: if device == port_data['device']: ports_list.remove(port_data) if ports_list == []: self.network_ports.pop(netid) return port_data['port_id'] def _update_network_ports(self, network_id, port_id, device): self._clean_network_ports(device) self.network_ports[network_id].append({ "port_id": port_id, "device": device }) def process_network_devices(self, device_info): resync_a = False resync_b = False self.sg_agent.setup_port_filters(device_info.get('added'), device_info.get('updated')) # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. devices_added_updated = (set(device_info.get('added')) | set(device_info.get('updated'))) if devices_added_updated: resync_a = self.treat_devices_added_updated(devices_added_updated) if device_info.get('removed'): resync_b = self.treat_devices_removed(device_info['removed']) # If one of the above operations fails => resync with plugin return (resync_a | resync_b) def treat_devices_added_updated(self, devices): try: devices_details_list = self.plugin_rpc.get_devices_details_list( self.context, devices, self.agent_id, host=cfg.CONF.host) except Exception: LOG.exception("Unable to get port details for %s", devices) # resync is needed return True for device_details in devices_details_list: self._process_device_if_exists(device_details) # no resync is needed return False def _process_device_if_exists(self, device_details): # ignore exceptions from devices that disappear because they will # be handled as removed in the next iteration device = device_details['device'] with self._ignore_missing_device_exceptions(device): LOG.debug("Port %s added", device) if 'port_id' in device_details: LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': device_details}) self.mgr.setup_arp_spoofing_protection(device, device_details) segment = amb.NetworkSegment( device_details.get('network_type'), device_details['physical_network'], device_details.get('segmentation_id'), device_details.get('mtu') ) network_id = device_details['network_id'] self.rpc_callbacks.add_network(network_id, segment) interface_plugged = self.mgr.plug_interface( network_id, segment, device, device_details['device_owner']) # REVISIT(scheuran): Changed the way how ports admin_state_up # is implemented. # # Old lb implementation: # - admin_state_up: ensure that tap is plugged into bridge # - admin_state_down: remove tap from bridge # New lb implementation: # - admin_state_up: set tap device state to up # - admin_state_down: set tap device state to down # # However both approaches could result in races with # nova/libvirt and therefore to an invalid system state in the # scenario, where an instance is booted with a port configured # with admin_state_up = False: # # Libvirt does the following actions in exactly # this order (see libvirt virnetdevtap.c) # 1) Create the tap device, set its MAC and MTU # 2) Plug the tap into the bridge # 3) Set the tap online # # Old lb implementation: # A race could occur, if the lb agent removes the tap device # right after step 1). Then libvirt will add it to the bridge # again in step 2). # New lb implementation: # The race could occur if the lb-agent sets the taps device # state to down right after step 2). In step 3) libvirt # might set it to up again. # # This is not an issue if an instance is booted with a port # configured with admin_state_up = True. Libvirt would just # set the tap device up again. # # This refactoring is recommended for the following reasons: # 1) An existing race with libvirt caused by the behavior of # the old implementation. See Bug #1312016 # 2) The new code is much more readable if interface_plugged: self.mgr.ensure_port_admin_state( device, device_details['admin_state_up']) # update plugin about port status if admin_state is up if device_details['admin_state_up']: if interface_plugged: self.plugin_rpc.update_device_up(self.context, device, self.agent_id, cfg.CONF.host) else: self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) self._update_network_ports(device_details['network_id'], device_details['port_id'], device_details['device']) self.ext_manager.handle_port(self.context, device_details) registry.publish(local_resources.PORT_DEVICE, events.AFTER_UPDATE, self, payload=events.DBEventPayload( self.context, states=(device_details,), resource_id=device)) elif constants.NO_ACTIVE_BINDING in device_details: LOG.info("Device %s has no active binding in host", device) else: LOG.info("Device %s not defined on plugin", device) @contextlib.contextmanager def _ignore_missing_device_exceptions(self, device): try: yield except Exception: with excutils.save_and_reraise_exception() as ectx: if device not in self.mgr.get_all_devices(): ectx.reraise = False LOG.debug("%s was removed during processing.", device) def treat_devices_removed(self, devices): resync = False self.sg_agent.remove_devices_filter(devices) for device in devices: LOG.info("Attachment %s removed", device) details = None try: details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, cfg.CONF.host) except Exception: LOG.exception("Error occurred while removing port %s", device) resync = True if details and details['exists']: LOG.info("Port %s updated.", device) else: LOG.debug("Device %s not defined on plugin", device) port_id = self._clean_network_ports(device) try: self.ext_manager.delete_port(self.context, {'device': device, 'port_id': port_id}) except Exception: LOG.exception("Error occurred while processing extensions " "for port removal %s", device) resync = True registry.publish(local_resources.PORT_DEVICE, events.AFTER_DELETE, self, payload=events.DBEventPayload( self.context, states=(details,), resource_id=device)) self.mgr.delete_arp_spoofing_protection(devices) return resync @staticmethod def _get_devices_locally_modified(timestamps, previous_timestamps): """Returns devices with previous timestamps that do not match new. If a device did not have a timestamp previously, it will not be returned because this means it is new. """ return {device for device, timestamp in timestamps.items() if device in previous_timestamps and timestamp != previous_timestamps.get(device)} def scan_devices(self, previous, sync): device_info = {} updated_devices = self.rpc_callbacks.get_and_clear_updated_devices() current_devices = self.mgr.get_all_devices() device_info['current'] = current_devices if previous is None: # This is the first iteration of daemon_loop(). previous = {'added': set(), 'current': set(), 'updated': set(), 'removed': set(), 'timestamps': {}} # clear any orphaned ARP spoofing rules (e.g. interface was # manually deleted) self.mgr.delete_unreferenced_arp_protection(current_devices) # check to see if any devices were locally modified based on their # timestamps changing since the previous iteration. If a timestamp # doesn't exist for a device, this calculation is skipped for that # device. device_info['timestamps'] = self.mgr.get_devices_modified_timestamps( current_devices) locally_updated = self._get_devices_locally_modified( device_info['timestamps'], previous['timestamps']) if locally_updated: LOG.debug("Adding locally changed devices to updated set: %s", locally_updated) updated_devices |= locally_updated if sync: # This is the first iteration, or the previous one had a problem. # Re-add all existing devices. device_info['added'] = current_devices # Retry cleaning devices that may not have been cleaned properly. # And clean any that disappeared since the previous iteration. device_info['removed'] = (previous['removed'] | previous['current'] - current_devices) # Retry updating devices that may not have been updated properly. # And any that were updated since the previous iteration. # Only update devices that currently exist. device_info['updated'] = (previous['updated'] | updated_devices & current_devices) else: device_info['added'] = current_devices - previous['current'] device_info['removed'] = previous['current'] - current_devices device_info['updated'] = updated_devices & current_devices return device_info def _device_info_has_changes(self, device_info): return (device_info.get('added') or device_info.get('updated') or device_info.get('removed')) def daemon_loop(self): LOG.info("%s Agent RPC Daemon Started!", self.agent_type) device_info = None sync = True while True: start = time.time() if self.fullsync: sync = True self.fullsync = False if sync: LOG.info("%s Agent out of sync with plugin!", self.agent_type) device_info = self.scan_devices(previous=device_info, sync=sync) sync = False if (self._device_info_has_changes(device_info) or self.sg_agent.firewall_refresh_needed()): LOG.debug("Agent loop found changes! %s", device_info) try: sync = self.process_network_devices(device_info) except Exception: LOG.exception("Error in agent loop. Devices info: %s", device_info) sync = True # sleep till end of polling interval elapsed = (time.time() - start) if (elapsed < self.polling_interval): time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, self.state_rpc): rpc_api.client.timeout = timeout ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/capabilities.py0000644000175000017500000000221100000000000026563 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry def notify_init_event(agent_type, agent): """Notify init event for the specified agent.""" registry.publish(agent_type, events.AFTER_INIT, agent) def register(callback, agent_type): """Subscribe callback to init event for the specified agent. :param agent_type: an agent type as defined in neutron_lib.constants. :param callback: a callback that can process the agent init event. """ registry.subscribe(callback, agent_type, events.AFTER_INIT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/agent/config.py0000644000175000017500000000150600000000000025405 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import agent agent.register_agent_opts() config.register_agent_state_opts_helper(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/helpers.py0000644000175000017500000001444500000000000024512 0ustar00coreycorey00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as p_utils from neutron_lib.utils import helpers from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from neutron.objects import network_segment_range as ns_range LOG = log.getLogger(__name__) class BaseTypeDriver(api.ML2TypeDriver): """BaseTypeDriver for functions common to Segment and flat.""" def __init__(self): try: self.physnet_mtus = helpers.parse_mappings( cfg.CONF.ml2.physical_network_mtus, unique_values=False ) except Exception as e: LOG.error("Failed to parse physical_network_mtus: %s", e) self.physnet_mtus = [] def get_mtu(self, physical_network=None): return p_utils.get_deployment_physnet_mtu() class SegmentTypeDriver(BaseTypeDriver): """SegmentTypeDriver for segment allocation. Provide methods helping to perform segment allocation fully or partially specified. """ def __init__(self, model): super(SegmentTypeDriver, self).__init__() self.model = model.db_model self.segmentation_obj = model primary_keys_columns = self.model.__table__.primary_key.columns self.primary_keys = {col.name for col in primary_keys_columns} def allocate_fully_specified_segment(self, context, **raw_segment): """Allocate segment fully specified by raw_segment. If segment exists, then try to allocate it and return db object If segment does not exists, then try to create it and return db object If allocation/creation failed, then return None """ network_type = self.get_type() try: with db_api.CONTEXT_WRITER.using(context): alloc = ( context.session.query(self.model).filter_by(**raw_segment). first()) if alloc: if alloc.allocated: # Segment already allocated return else: # Segment not allocated LOG.debug("%(type)s segment %(segment)s allocate " "started ", {"type": network_type, "segment": raw_segment}) count = (context.session.query(self.model). filter_by(allocated=False, **raw_segment). update({"allocated": True})) if count: LOG.debug("%(type)s segment %(segment)s allocate " "done ", {"type": network_type, "segment": raw_segment}) return alloc # Segment allocated or deleted since select LOG.debug("%(type)s segment %(segment)s allocate " "failed: segment has been allocated or " "deleted", {"type": network_type, "segment": raw_segment}) # Segment to create or already allocated LOG.debug("%(type)s segment %(segment)s create started", {"type": network_type, "segment": raw_segment}) alloc = self.model(allocated=True, **raw_segment) alloc.save(context.session) LOG.debug("%(type)s segment %(segment)s create done", {"type": network_type, "segment": raw_segment}) except db_exc.DBDuplicateEntry: # Segment already allocated (insert failure) alloc = None LOG.debug("%(type)s segment %(segment)s create failed", {"type": network_type, "segment": raw_segment}) return alloc def allocate_partially_specified_segment(self, context, **filters): """Allocate model segment from pool partially specified by filters. Return allocated db object or None. """ network_type = self.get_type() if directory.get_plugin(plugin_constants.NETWORK_SEGMENT_RANGE): calls = [ functools.partial( ns_range.NetworkSegmentRange.get_segments_for_project, context, self.model, network_type, self.model_segmentation_id, **filters), functools.partial( ns_range.NetworkSegmentRange.get_segments_shared, context, self.model, network_type, self.model_segmentation_id, **filters)] else: calls = [functools.partial( self.segmentation_obj.get_unallocated_segments, context, **filters)] for call in calls: allocations = call() for alloc in allocations: segment = dict((k, alloc[k]) for k in self.primary_keys) if self.segmentation_obj.allocate(context, **segment): LOG.debug('%(type)s segment allocate from pool success ' 'with %(segment)s ', {'type': network_type, 'segment': segment}) return alloc raise db_exc.RetryRequest( exceptions.NoNetworkFoundInMaximumAllowedAttempts()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/0000755000175000017500000000000000000000000023522 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/README0000644000175000017500000000371000000000000024403 0ustar00coreycorey00000000000000Neutron ML2 l2 population Mechanism Drivers l2 population (l2pop) mechanism drivers implements the ML2 driver to improve open source plugins overlay implementations (VXLAN with Linux bridge and GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate the forwarding information among agents using a common RPC API. More informations could be found on the wiki page [1]. VXLAN Linux kernel: ------------------- The VXLAN Linux kernel module provide all necessary functionalities to populate the forwarding table and local ARP responder tables. This module appears on release 3.7 of the vanilla Linux kernel in experimental: - 3.8: first stable release, no edge replication (multicast necessary), - 3.9: edge replication only for the broadcasted packets, - 3.11: edge replication for broadcast, multicast and unknown packets. Note: Some distributions (like RHEL) have backported this module on precedent kernel version. OpenvSwitch: ------------ The OVS OpenFlow tables provide all of the necessary functionality to populate the forwarding table and local ARP responder tables. A wiki page describe how the flow tables did evolve on OVS agents: - [2] without local ARP responder - [3] with local ARP responder. /!\ This functionality is only available since the development branch 2.1. It's possible to disable (enable by default) it through the flag 'arp_responder'. /!\ Note: A difference persists between the LB and OVS agents when they are used with the l2-pop mechanism driver (and local ARP responder available). The LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS agent will flood it. [1] https://wiki.openstack.org/wiki/L2population_blueprint [2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic [3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responder././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/__init__.py0000644000175000017500000000000000000000000025621 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/db.py0000644000175000017500000001620100000000000024461 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from oslo_serialization import jsonutils from oslo_utils import timeutils from sqlalchemy import orm from neutron.db.models import agent as agent_model from neutron.db.models import l3ha as l3ha_model from neutron.db import models_v2 from neutron.objects import agent as agent_objs from neutron.plugins.ml2 import models as ml2_models HA_ROUTER_PORTS = (const.DEVICE_OWNER_HA_REPLICATED_INT, const.DEVICE_OWNER_ROUTER_SNAT) def get_agent_ip_by_host(context, agent_host): agent = get_agent_by_host(context, agent_host) if agent: return get_agent_ip(agent) def _get_agent_conf_dict(agent): configuration = agent.configurations if not isinstance(configuration, dict): configuration = jsonutils.loads(configuration) return configuration def get_agent_ip(agent): configuration = _get_agent_conf_dict(agent) return configuration.get('tunneling_ip') def get_agent_uptime(agent): return timeutils.delta_seconds(agent.started_at, agent.heartbeat_timestamp) def get_agent_tunnel_types(agent): configuration = _get_agent_conf_dict(agent) return configuration.get('tunnel_types') def get_agent_l2pop_network_types(agent): configuration = _get_agent_conf_dict(agent) return configuration.get('l2pop_network_types') def get_agent_by_host(context, agent_host): """Return a L2 agent on the host.""" agents = agent_objs.Agent.get_objects(context, host=agent_host) for agent in agents: if get_agent_ip(agent): return agent def _get_active_network_ports(context, network_id): query = context.session.query(ml2_models.PortBinding, agent_model.Agent) query = query.join( agent_model.Agent, agent_model.Agent.host == ml2_models.PortBinding.host) query = query.join(models_v2.Port) query = query.options(orm.subqueryload(ml2_models.PortBinding.port)) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE) return query def _ha_router_interfaces_on_network_query(context, network_id): query = context.session.query(models_v2.Port) query = query.join( l3ha_model.L3HARouterAgentPortBinding, l3ha_model.L3HARouterAgentPortBinding.router_id == models_v2.Port.device_id) return query.filter( models_v2.Port.network_id == network_id, models_v2.Port.device_owner.in_(HA_ROUTER_PORTS)) def _get_ha_router_interface_ids(context, network_id): query = _ha_router_interfaces_on_network_query(context, network_id) return query.from_self(models_v2.Port.id).distinct() def get_nondistributed_active_network_ports(context, network_id): query = _get_active_network_ports(context, network_id) # Exclude DVR and HA router interfaces query = query.filter(models_v2.Port.device_owner != const.DEVICE_OWNER_DVR_INTERFACE) ha_iface_ids_query = _get_ha_router_interface_ids(context, network_id) query = query.filter(models_v2.Port.id.notin_(ha_iface_ids_query)) return [(bind, agent) for bind, agent in query.all() if get_agent_ip(agent)] def get_dvr_active_network_ports(context, network_id): query = context.session.query(ml2_models.DistributedPortBinding, agent_model.Agent) query = query.join(agent_model.Agent, agent_model.Agent.host == ml2_models.DistributedPortBinding.host) query = query.join(models_v2.Port) query = query.options( orm.subqueryload(ml2_models.DistributedPortBinding.port)) query = query.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner == const.DEVICE_OWNER_DVR_INTERFACE) return [(bind, agent) for bind, agent in query.all() if get_agent_ip(agent)] def get_distributed_active_network_ports(context, network_id): return (get_dvr_active_network_ports(context, network_id) + get_ha_active_network_ports(context, network_id)) def get_ha_active_network_ports(context, network_id): agents = get_ha_agents(context, network_id=network_id) return [(None, agent) for agent in agents] def get_ha_agents(context, network_id=None, router_id=None): agents = agent_objs.Agent.get_ha_agents(context, network_id=network_id, router_id=router_id) return [agent for agent in agents if get_agent_ip(agent)] def get_ha_agents_by_router_id(context, router_id): return get_ha_agents(context, router_id=router_id) def get_agent_network_active_port_count(context, agent_host, network_id): query = context.session.query(models_v2.Port) query1 = query.join(ml2_models.PortBinding) query1 = query1.filter(models_v2.Port.network_id == network_id, models_v2.Port.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner != const.DEVICE_OWNER_DVR_INTERFACE, ml2_models.PortBinding.host == agent_host) ha_iface_ids_query = _get_ha_router_interface_ids(context, network_id) query1 = query1.filter(models_v2.Port.id.notin_(ha_iface_ids_query)) ha_port_count = get_ha_router_active_port_count( context, agent_host, network_id) query2 = query.join(ml2_models.DistributedPortBinding) query2 = query2.filter(models_v2.Port.network_id == network_id, ml2_models.DistributedPortBinding.status == const.PORT_STATUS_ACTIVE, models_v2.Port.device_owner == const.DEVICE_OWNER_DVR_INTERFACE, ml2_models.DistributedPortBinding.host == agent_host) return (query1.count() + query2.count() + ha_port_count) def get_ha_router_active_port_count(context, agent_host, network_id): # Return num of HA router interfaces on the given network and host query = _ha_router_interfaces_on_network_query(context, network_id) query = query.filter(models_v2.Port.status == const.PORT_STATUS_ACTIVE) query = query.join(agent_model.Agent) query = query.filter(agent_model.Agent.host == agent_host) return query.count() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/mech_driver.py0000644000175000017500000003713000000000000026367 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from oslo_log import log as logging from neutron._i18n import _ from neutron.db import l3_hamode_db from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc LOG = logging.getLogger(__name__) class L2populationMechanismDriver(api.MechanismDriver): def __init__(self): super(L2populationMechanismDriver, self).__init__() self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() def initialize(self): LOG.debug("Experimental L2 population driver") self.rpc_ctx = n_context.get_admin_context_without_session() def _get_port_fdb_entries(self, port): # the port might be concurrently deleted if not port or not port.get('fixed_ips'): return [] return [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip['ip_address']) for ip in port['fixed_ips']] def _remove_flooding(self, fdb_entries): for network_fdb in fdb_entries.values(): for agent_fdb in network_fdb.get('ports', {}).values(): try: agent_fdb.remove(const.FLOODING_ENTRY) except ValueError: pass def check_vlan_transparency(self, context): """L2population driver vlan transparency support.""" return True def _get_ha_port_agents_fdb( self, context, network_id, router_id): other_fdb_ports = {} for agent in l2pop_db.get_ha_agents_by_router_id(context, router_id): agent_active_ports = l2pop_db.get_agent_network_active_port_count( context, agent.host, network_id) if agent_active_ports == 0: ip = l2pop_db.get_agent_ip(agent) other_fdb_ports[ip] = [const.FLOODING_ENTRY] return other_fdb_ports def delete_port_postcommit(self, context): port = context.current agent_host = context.host plugin_context = context._plugin_context fdb_entries = self._get_agent_fdb( plugin_context, context.bottom_bound_segment, port, agent_host) if fdb_entries and l3_hamode_db.is_ha_router_port( plugin_context, port['device_owner'], port['device_id']): network_id = port['network_id'] other_fdb_ports = self._get_ha_port_agents_fdb( plugin_context, network_id, port['device_id']) fdb_entries[network_id]['ports'] = other_fdb_ports self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx, fdb_entries) def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): # NOTE(cbrandily): let other mechanisms (openvswitch, linuxbridge, ...) # perform the filtering return set() def _get_diff_ips(self, orig, port): orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) # check if an ip has been added or removed orig_chg_ips = orig_ips.difference(port_ips) port_chg_ips = port_ips.difference(orig_ips) if orig_chg_ips or port_chg_ips: return orig_chg_ips, port_chg_ips def _fixed_ips_changed(self, context, orig, port, diff_ips): orig_ips, port_ips = diff_ips if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): agent_host = context.host else: agent_host = context.original_host if not agent_host: return # We should not add arp responder for non tunnel network type port_context = context._plugin_context agent = l2pop_db.get_agent_by_host(port_context, agent_host) segment = context.bottom_bound_segment if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip_by_host(context._plugin_context, agent_host) orig_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in orig_ips] port_mac_ip = [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip) for ip in port_ips] upd_fdb_entries = {port['network_id']: {agent_ip: {}}} ports = upd_fdb_entries[port['network_id']][agent_ip] if orig_mac_ip: ports['before'] = orig_mac_ip if port_mac_ip: ports['after'] = port_mac_ip self.L2populationAgentNotify.update_fdb_entries( self.rpc_ctx, {'chg_ip': upd_fdb_entries}) return True def update_port_precommit(self, context): port = context.current orig = context.original if (orig['mac_address'] != port['mac_address'] and context.status == const.PORT_STATUS_ACTIVE): msg = _("unable to modify mac_address of ACTIVE port " "%s") % port['id'] raise exceptions.InvalidInput(error_message=msg) def update_port_postcommit(self, context): port = context.current orig = context.original plugin_context = context._plugin_context if l3_hamode_db.is_ha_router_port(plugin_context, port['device_owner'], port['device_id']): return diff_ips = self._get_diff_ips(orig, port) if diff_ips: self._fixed_ips_changed(context, orig, port, diff_ips) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: if context.status == const.PORT_STATUS_ACTIVE: self.update_port_up(context) if context.status == const.PORT_STATUS_DOWN: agent_host = context.host fdb_entries = self._get_agent_fdb( plugin_context, context.bottom_bound_segment, port, agent_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) elif (context.host != context.original_host and context.original_status == const.PORT_STATUS_ACTIVE and context.status == const.PORT_STATUS_DOWN): # The port has been migrated. Send notification about port # removal from old host. fdb_entries = self._get_agent_fdb( plugin_context, context.original_bottom_bound_segment, orig, context.original_host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) elif context.status != context.original_status: if context.status == const.PORT_STATUS_ACTIVE: self.update_port_up(context) elif context.status == const.PORT_STATUS_DOWN: fdb_entries = self._get_agent_fdb( plugin_context, context.bottom_bound_segment, port, context.host) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) def _validate_segment(self, segment, port_id, agent): if not segment: LOG.debug("Port %(port)s updated by agent %(agent)s isn't bound " "to any segment", {'port': port_id, 'agent': agent}) return False network_types = l2pop_db.get_agent_l2pop_network_types(agent) if network_types is None: network_types = l2pop_db.get_agent_tunnel_types(agent) if segment['network_type'] not in network_types: return False return True def _create_agent_fdb(self, context, agent, segment, network_id): agent_fdb_entries = {network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {}}} tunnel_network_ports = ( l2pop_db.get_distributed_active_network_ports(context, network_id)) fdb_network_ports = ( l2pop_db.get_nondistributed_active_network_ports(context, network_id)) ports = agent_fdb_entries[network_id]['ports'] ports.update(self._get_tunnels( fdb_network_ports + tunnel_network_ports, agent.host)) for agent_ip, fdbs in ports.items(): for binding, agent in fdb_network_ports: if l2pop_db.get_agent_ip(agent) == agent_ip: fdbs.extend(self._get_port_fdb_entries(binding.port)) return agent_fdb_entries def _get_tunnels(self, tunnel_network_ports, exclude_host): agents = {} for __, agent in tunnel_network_ports: if agent.host == exclude_host: continue ip = l2pop_db.get_agent_ip(agent) if not ip: LOG.debug("Unable to retrieve the agent ip, check " "the agent %s configuration.", agent.host) continue if ip not in agents: agents[ip] = [const.FLOODING_ENTRY] return agents def update_port_down(self, context): port = context.current agent_host = context.host l3plugin = directory.get_plugin(plugin_constants.L3) # when agent transitions to backup, don't remove flood flows if agent_host and l3plugin and getattr(l3plugin, "list_router_ids_on_host", None): admin_context = n_context.get_admin_context() port_context = context._plugin_context fdb_entries = self._get_agent_fdb( port_context, context.bottom_bound_segment, port, agent_host, include_ha_router_ports=True) if (fdb_entries and l3plugin.list_router_ids_on_host( admin_context, agent_host, [port['device_id']])): # NOTE(slaweq): in case this is HA router, remove unicast # entries to this port but don't remove flood entry self._remove_flooding(fdb_entries) self.L2populationAgentNotify.remove_fdb_entries( self.rpc_ctx, fdb_entries) def update_port_up(self, context, refresh_tunnels=False): port = context.current agent_host = context.host port_context = context._plugin_context agent = l2pop_db.get_agent_by_host(port_context, agent_host) if not agent: LOG.warning("Unable to retrieve active L2 agent on host %s", agent_host) return network_id = port['network_id'] agent_active_ports = l2pop_db.get_agent_network_active_port_count( port_context, agent_host, network_id) LOG.debug("host: %s, agent_active_ports: %s, refresh_tunnels: %s", agent_host, agent_active_ports, refresh_tunnels) agent_ip = l2pop_db.get_agent_ip(agent) segment = context.bottom_bound_segment if not self._validate_segment(segment, port['id'], agent): return other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, network_id) other_fdb_ports = other_fdb_entries[network_id]['ports'] # with high concurrency more than 1 port may be activated on an agent # at the same time (like VM port + a DVR port) so checking for 1 or 2 is_first_port = agent_active_ports in (1, 2) if is_first_port or refresh_tunnels: # First port(s) activated on current agent in this network, # we have to provide it with the whole list of fdb entries agent_fdb_entries = self._create_agent_fdb(port_context, agent, segment, network_id) # And notify other agents to add flooding entry other_fdb_ports[agent_ip].append(const.FLOODING_ENTRY) if agent_fdb_entries[network_id]['ports'].keys(): self.L2populationAgentNotify.add_fdb_entries( self.rpc_ctx, agent_fdb_entries, agent_host) # Notify other agents to add fdb rule for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and not l3_hamode_db.is_ha_router_port( port_context, port['device_owner'], port['device_id'])): other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port) self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, other_fdb_entries) def _get_agent_fdb(self, context, segment, port, agent_host, include_ha_router_ports=False): if not agent_host: return network_id = port['network_id'] agent_active_ports = l2pop_db.get_agent_network_active_port_count( context, agent_host, network_id) agent = l2pop_db.get_agent_by_host(context, agent_host) if not agent: LOG.warning("Unable to retrieve active L2 agent on host %s", agent_host) return if not self._validate_segment(segment, port['id'], agent): return agent_ip = l2pop_db.get_agent_ip(agent) other_fdb_entries = self._get_fdb_entries_template( segment, agent_ip, port['network_id']) if agent_active_ports == 0: # Agent is removing its last activated port in this network, # other agents needs to be notified to delete their flooding entry. other_fdb_entries[network_id]['ports'][agent_ip].append( const.FLOODING_ENTRY) # Notify other agents to remove fdb rules for current port if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and (include_ha_router_ports or not l3_hamode_db.is_ha_router_port(context, port['device_owner'], port['device_id']))): fdb_entries = self._get_port_fdb_entries(port) other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries return other_fdb_entries @classmethod def _get_fdb_entries_template(cls, segment, agent_ip, network_id): return { network_id: {'segment_id': segment['segmentation_id'], 'network_type': segment['network_type'], 'ports': {agent_ip: []}}} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/rpc.py0000644000175000017500000000662200000000000024666 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.agent import topics from neutron_lib import rpc as n_rpc from oslo_log import log as logging import oslo_messaging LOG = logging.getLogger(__name__) PortInfo = collections.namedtuple("PortInfo", "mac_address ip_address") class L2populationAgentNotifyAPI(object): def __init__(self, topic=topics.AGENT): self.topic = topic self.topic_l2pop_update = topics.get_topic_name(topic, topics.L2POPULATION, topics.UPDATE) target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def _notification_fanout(self, context, method, fdb_entries): LOG.debug('Fanout notify l2population agents at %(topic)s ' 'the message %(method)s with %(fdb_entries)s', {'topic': self.topic, 'method': method, 'fdb_entries': fdb_entries}) cctxt = self.client.prepare(topic=self.topic_l2pop_update, fanout=True) cctxt.cast(context, method, fdb_entries=fdb_entries) def _notification_host(self, context, method, fdb_entries, host): LOG.debug('Notify l2population agent %(host)s at %(topic)s the ' 'message %(method)s with %(fdb_entries)s', {'host': host, 'topic': self.topic, 'method': method, 'fdb_entries': fdb_entries}) cctxt = self.client.prepare(topic=self.topic_l2pop_update, server=host) cctxt.cast(context, method, fdb_entries=fdb_entries) def add_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'add_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'add_fdb_entries', fdb_entries) def remove_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'remove_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'remove_fdb_entries', fdb_entries) def update_fdb_entries(self, context, fdb_entries, host=None): if fdb_entries: if host: self._notification_host(context, 'update_fdb_entries', fdb_entries, host) else: self._notification_fanout(context, 'update_fdb_entries', fdb_entries) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/rpc_manager/0000755000175000017500000000000000000000000026000 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py0000644000175000017500000000000000000000000030077 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py0000644000175000017500000003100500000000000031645 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import helpers as log_helpers import six from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager @six.add_metaclass(abc.ABCMeta) class L2populationRpcCallBackMixin(object): '''General mixin class of L2-population RPC call back. The following methods are called through RPC. add_fdb_entries(), remove_fdb_entries(), update_fdb_entries() The following methods are used in an agent as internal methods. fdb_add(), fdb_remove(), fdb_update() ''' @log_helpers.log_method_call def add_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_add(context, self._unmarshall_fdb_entries(fdb_entries)) @log_helpers.log_method_call def remove_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_remove(context, self._unmarshall_fdb_entries(fdb_entries)) @log_helpers.log_method_call def update_fdb_entries(self, context, fdb_entries, host=None): if not host or host == cfg.CONF.host: self.fdb_update(context, self._unmarshall_fdb_entries(fdb_entries)) @staticmethod def _unmarshall_fdb_entries(fdb_entries): """Prepares fdb_entries from JSON. All methods in this class that receive messages should call this to unmarshall fdb_entries from the wire. :param fdb_entries: Original fdb_entries data-structure. Looks like: { : { ..., 'ports': { : [ [, ], ... ], ... Or in the case of an update: { 'chg_ip': { '': { '': { 'before': [ [, ], ... ], 'after' : [ [, ], ... ], }, '': { 'before': ... :returns: Deep copy with [, ] converted to PortInfo """ unmarshalled = dict(fdb_entries) chg_ip_nets = [net.values() for net in unmarshalled.get('chg_ip', {}).values()] for agent in itertools.chain.from_iterable(chg_ip_nets): for when in ('before', 'after'): if when in agent: agent[when] = [l2pop_rpc.PortInfo(*pi) for pi in agent[when]] for value in unmarshalled.values(): if 'ports' in value: value['ports'] = dict( (address, [l2pop_rpc.PortInfo(*pi) for pi in port_infos]) for address, port_infos in value['ports'].items() ) return unmarshalled @abc.abstractmethod def fdb_add(self, context, fdb_entries): pass @abc.abstractmethod def fdb_remove(self, context, fdb_entries): pass @abc.abstractmethod def fdb_update(self, context, fdb_entries): pass @six.add_metaclass(abc.ABCMeta) class L2populationRpcCallBackTunnelMixin(L2populationRpcCallBackMixin): '''Mixin class of L2-population call back for Tunnel. The following methods are all used in agents as internal methods. Some of the methods in this class use Local VLAN Mapping, aka lvm. It's a python object with at least the following attributes: ============ ========================================================= Attribute Description ============ ========================================================= vlan An identifier used by the agent to identify a neutron network. network_type A network type found in neutron.plugins.common.constants. ============ ========================================================= NOTE(yamamoto): "Local VLAN" is an OVS-agent term. OVS-agent internally uses 802.1q VLAN tagging to isolate networks. While this class inherited the terms from OVS-agent, it does not assume the specific underlying technologies. E.g. this class is also used by ofagent, where a different mechanism is used. ''' @abc.abstractmethod def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): '''Add flow for fdb This method is assumed to be used by method fdb_add_tun. We expect to add a flow entry to send a packet to specified port on bridge. And you may edit some information for local arp response. :param br: represent the bridge on which add_fdb_flow should be applied. :param port_info: PortInfo instance to include mac and ip. .mac_address .ip_address :remote_ip: remote ip address. :param lvm: a local VLAN map of network. :param ofport: a port to add. ''' pass @abc.abstractmethod def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): '''Delete flow for fdb This method is assumed to be used by method fdb_remove_tun. We expect to delete a flow entry to send a packet to specified port from bridge. And you may delete some information for local arp response. :param br: represent the bridge on which del_fdb_flow should be applied. :param port_info: PortInfo instance to include mac and ip. .mac_address .ip_address :remote_ip: remote ip address. :param lvm: local VLAN map of a network. See add_fdb_flow for more explanation. :param ofport: a port to delete. ''' pass @abc.abstractmethod def setup_tunnel_port(self, br, remote_ip, network_type): '''Setup an added tunnel port. This method is assumed to be used by method fdb_add_tun. We expect to prepare to call add_fdb_flow. It will be mainly adding a port to a bridge. If you need, you may do some preparations for a bridge. :param br: represent the bridge on which setup_tunnel_port should be applied. :param remote_ip: an ip for a port to setup. :param network_type: a type of a network. :returns: an ofport value. value 0 means the port is unavailable. ''' pass @abc.abstractmethod def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): '''Clean up a deleted tunnel port. This method is assumed to be used by method fdb_remove_tun. We expect to clean up after calling del_fdb_flow. It will be mainly deleting a port from a bridge. If you need, you may do some cleanup for a bridge. :param br: represent the bridge on which cleanup_tunnel_port should be applied. :param tun_ofport: a port value to cleanup. :param tunnel_type: a type of a tunnel. ''' pass @abc.abstractmethod def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): '''Operate the ARP respond information. Update MAC/IPv4 associations, which is typically used by the local ARP responder. For example, OVS-agent sets up flow entries to perform ARP responses. :param br: represent the bridge on which setup_entry_for_arp_reply should be applied. :param action: add/remove flow for arp response information. :param local_vid: id in local VLAN map of network's ARP entry. :param mac_address: MAC string value. :param ip_address: IP string value. ''' pass def get_agent_ports(self, fdb_entries): """Generator to yield port info. For each known (i.e found in VLAN manager) network in fdb_entries, yield (lvm, fdb_entries[network_id]['ports']) pair. :param fdb_entries: l2pop fdb entries """ vlan_manager = vlanmanager.LocalVlanManager() for network_id, values in fdb_entries.items(): try: lvm = vlan_manager.get(network_id) except vlanmanager.MappingNotFound: continue agent_ports = values.get('ports') yield (lvm, agent_ports) @log_helpers.log_method_call def fdb_add_tun(self, context, br, lvm, agent_ports, lookup_port): for remote_ip, ports in agent_ports.items(): # Ensure we have a tunnel port with this remote agent ofport = lookup_port(lvm.network_type, remote_ip) if not ofport: ofport = self.setup_tunnel_port(br, remote_ip, lvm.network_type) if ofport == 0: continue for port in ports: self.add_fdb_flow(br, port, remote_ip, lvm, ofport) @log_helpers.log_method_call def fdb_remove_tun(self, context, br, lvm, agent_ports, lookup_port): for remote_ip, ports in agent_ports.items(): ofport = lookup_port(lvm.network_type, remote_ip) if not ofport: continue for port in ports: self.del_fdb_flow(br, port, remote_ip, lvm, ofport) if port == n_const.FLOODING_ENTRY: # Check if this tunnel port is still used self.cleanup_tunnel_port(br, ofport, lvm.network_type) @log_helpers.log_method_call def fdb_update(self, context, fdb_entries): '''Call methods named '_fdb_'. This method assumes that methods '_fdb_' are defined in class. Currently the following actions are available. chg_ip ''' for action, values in fdb_entries.items(): method = '_fdb_' + action if not hasattr(self, method): raise NotImplementedError() getattr(self, method)(context, values) @log_helpers.log_method_call def fdb_chg_ip_tun(self, context, br, fdb_entries, local_ip): '''fdb update when an IP of a port is updated. The ML2 l2-pop mechanism driver sends an fdb update rpc message when an IP of a port is updated. :param context: RPC context. :param br: represent the bridge on which fdb_chg_ip_tun should be applied. :param fdb_entries: fdb dicts that contain all mac/IP information per agent and network. {'net1': {'agent_ip': {'before': PortInfo, 'after': PortInfo } } 'net2': ... } PortInfo has .mac_address and .ip_address attrs. :param local_ip: local IP address of this agent. ''' vlan_manager = vlanmanager.LocalVlanManager() for network_id, agent_ports in fdb_entries.items(): try: lvm = vlan_manager.get(network_id) except vlanmanager.MappingNotFound: continue for agent_ip, state in agent_ports.items(): if agent_ip == local_ip: continue after = state.get('after', []) for mac_ip in after: self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, mac_ip.mac_address, mac_ip.ip_address) before = state.get('before', []) for mac_ip in before: self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, mac_ip.mac_address, mac_ip.ip_address) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/0000755000175000017500000000000000000000000025002 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/__init__.py0000644000175000017500000000000000000000000027101 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/0000755000175000017500000000000000000000000026100 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py0000644000175000017500000000000000000000000030177 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py0000644000175000017500000002175600000000000031007 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.utils import net from oslo_concurrency import lockutils from oslo_log import log as logging import tenacity from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) SPOOF_CHAIN_PREFIX = 'neutronARP-' MAC_CHAIN_PREFIX = 'neutronMAC-' def setup_arp_spoofing_protection(vif, port_details): if not port_details.get('port_security_enabled', True): # clear any previous entries related to this port delete_arp_spoofing_protection([vif]) LOG.info("Skipping ARP spoofing rules for port '%s' because " "it has port security disabled", vif) return if net.is_port_trusted(port_details): # clear any previous entries related to this port delete_arp_spoofing_protection([vif]) LOG.debug("Skipping ARP spoofing rules for network owned port " "'%s'.", vif) return _setup_arp_spoofing_protection(vif, port_details) @lockutils.synchronized('ebtables') def _setup_arp_spoofing_protection(vif, port_details): current_rules = ebtables(['-L']).splitlines() _install_mac_spoofing_protection(vif, port_details, current_rules) # collect all of the addresses and cidrs that belong to the port addresses = {f['ip_address'] for f in port_details['fixed_ips']} if port_details.get('allowed_address_pairs'): addresses |= {p['ip_address'] for p in port_details['allowed_address_pairs']} addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 4} if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses): # don't try to install protection because a /0 prefix allows any # address anyway and the ARP_SPA can only match on /1 or more. return _install_arp_spoofing_protection(vif, addresses, current_rules) def chain_name(vif): # start each chain with a common identifier for cleanup to find return '%s%s' % (SPOOF_CHAIN_PREFIX, vif) @lockutils.synchronized('ebtables') def delete_arp_spoofing_protection(vifs): current_rules = ebtables(['-L']).splitlines() _delete_arp_spoofing_protection(vifs, current_rules, table='nat', chain='PREROUTING') # TODO(haleyb) this can go away in "R" cycle, it's here to cleanup # old chains in the filter table current_rules = ebtables(['-L'], table='filter').splitlines() _delete_arp_spoofing_protection(vifs, current_rules, table='filter', chain='FORWARD') def _delete_arp_spoofing_protection(vifs, current_rules, table, chain): # delete the jump rule and then delete the whole chain jumps = [vif for vif in vifs if vif_jump_present(vif, current_rules)] for vif in jumps: ebtables(['-D', chain, '-i', vif, '-j', chain_name(vif), '-p', 'ARP'], table=table) for vif in vifs: if chain_exists(chain_name(vif), current_rules): ebtables(['-X', chain_name(vif)], table=table) _delete_mac_spoofing_protection(vifs, current_rules, table=table, chain=chain) def _delete_unreferenced_arp_protection(current_vifs, table, chain): # deletes all jump rules and chains that aren't in current_vifs but match # the spoof prefix current_rules = ebtables(['-L'], table=table).splitlines() to_delete = [] for line in current_rules: # we're looking to find and turn the following: # Bridge chain: SPOOF_CHAIN_PREFIXtap199, entries: 0, policy: DROP # into 'tap199' if line.startswith('Bridge chain: %s' % SPOOF_CHAIN_PREFIX): devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0] if devname not in current_vifs: to_delete.append(devname) LOG.info("Clearing orphaned ARP spoofing entries for devices %s", to_delete) _delete_arp_spoofing_protection(to_delete, current_rules, table=table, chain=chain) @lockutils.synchronized('ebtables') def delete_unreferenced_arp_protection(current_vifs): _delete_unreferenced_arp_protection(current_vifs, table='nat', chain='PREROUTING') # TODO(haleyb) this can go away in "R" cycle, it's here to cleanup # old chains in the filter table _delete_unreferenced_arp_protection(current_vifs, table='filter', chain='FORWARD') @lockutils.synchronized('ebtables') def install_arp_spoofing_protection(vif, addresses): current_rules = ebtables(['-L']).splitlines() _install_arp_spoofing_protection(vif, addresses, current_rules) def _install_arp_spoofing_protection(vif, addresses, current_rules): # make a VIF-specific ARP chain so we don't conflict with other rules vif_chain = chain_name(vif) if not chain_exists(vif_chain, current_rules): ebtables(['-N', vif_chain, '-P', 'DROP']) # flush the chain to clear previous accepts. this will cause dropped ARP # packets until the allows are installed, but that's better than leaked # spoofed packets and ARP can handle losses. ebtables(['-F', vif_chain]) for addr in sorted(addresses): ebtables(['-A', vif_chain, '-p', 'ARP', '--arp-ip-src', addr, '-j', 'ACCEPT']) # check if jump rule already exists, if not, install it if not vif_jump_present(vif, current_rules): ebtables(['-A', 'PREROUTING', '-i', vif, '-j', vif_chain, '-p', 'ARP']) def chain_exists(chain, current_rules): for rule in current_rules: if rule.startswith('Bridge chain: %s' % chain): return True return False def vif_jump_present(vif, current_rules): searches = (('-i %s' % vif), ('-j %s' % chain_name(vif)), ('-p ARP')) for line in current_rules: if all(s in line for s in searches): return True return False def _install_mac_spoofing_protection(vif, port_details, current_rules): mac_addresses = {port_details['mac_address']} if port_details.get('allowed_address_pairs'): mac_addresses |= {p['mac_address'] for p in port_details['allowed_address_pairs']} mac_addresses = list(mac_addresses) vif_chain = _mac_chain_name(vif) # mac filter chain for each vif which has a default deny if not chain_exists(vif_chain, current_rules): ebtables(['-N', vif_chain, '-P', 'DROP']) # check if jump rule already exists, if not, install it if not _mac_vif_jump_present(vif, current_rules): ebtables(['-A', 'PREROUTING', '-i', vif, '-j', vif_chain]) # we can't just feed all allowed macs at once because we can exceed # the maximum argument size. limit to 500 per rule. for chunk in (mac_addresses[i:i + 500] for i in range(0, len(mac_addresses), 500)): new_rule = ['-A', vif_chain, '-i', vif, '--among-src', ','.join(sorted(chunk)), '-j', 'RETURN'] ebtables(new_rule) _delete_vif_mac_rules(vif, current_rules) def _mac_vif_jump_present(vif, current_rules): searches = (('-i %s' % vif), ('-j %s' % _mac_chain_name(vif))) for line in current_rules: if all(s in line for s in searches): return True return False def _mac_chain_name(vif): return '%s%s' % (MAC_CHAIN_PREFIX, vif) def _delete_vif_mac_rules(vif, current_rules): chain = _mac_chain_name(vif) for rule in current_rules: if '-i %s' % vif in rule and '--among-src' in rule: ebtables(['-D', chain] + rule.split()) def _delete_mac_spoofing_protection(vifs, current_rules, table, chain): # delete the jump rule and then delete the whole chain jumps = [vif for vif in vifs if _mac_vif_jump_present(vif, current_rules)] for vif in jumps: ebtables(['-D', chain, '-i', vif, '-j', _mac_chain_name(vif)], table=table) for vif in vifs: chain = _mac_chain_name(vif) if chain_exists(chain, current_rules): ebtables(['-X', chain], table=table) # Used to scope ebtables commands in testing NAMESPACE = None @tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.02), retry=tenacity.retry_if_exception(lambda e: e.returncode == 255), reraise=True ) def ebtables(comm, table='nat'): execute = ip_lib.IPWrapper(NAMESPACE).netns.execute return execute(['ebtables', '-t', table, '--concurrent'] + comm, run_as_root=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/common/0000755000175000017500000000000000000000000027370 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py0000644000175000017500000000000000000000000031467 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py0000644000175000017500000000133500000000000031211 0ustar00coreycorey00000000000000# Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.conf.plugins.ml2.drivers import linuxbridge linuxbridge.register_linuxbridge_opts() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py0000644000175000017500000000153400000000000031761 0ustar00coreycorey00000000000000# Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. FLAT_VLAN_ID = -1 LOCAL_VLAN_ID = -2 # Supported VXLAN features VXLAN_NONE = 'not_supported' VXLAN_MCAST = 'multicast_flooding' VXLAN_UCAST = 'unicast_flooding' EXTENSION_DRIVER_TYPE = 'linuxbridge' RESOURCE_ID_LENGTH = 11 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py0000644000175000017500000000224200000000000031102 0ustar00coreycorey00000000000000# Copyright 2012 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants as n_const from oslo_log import log from neutron.plugins.ml2.drivers.linuxbridge.agent.common import constants LOG = log.getLogger(__name__) def get_tap_device_name(interface_id): """Convert port ID into device name format expected by linux bridge.""" if not interface_id: LOG.warning("Invalid Interface ID, will lead to incorrect " "tap device name") tap_device_name = (n_const.TAP_DEVICE_PREFIX + interface_id[:constants.RESOURCE_ID_LENGTH]) return tap_device_name ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3390448 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/0000755000175000017500000000000000000000000031652 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.p0000644000175000017500000000000000000000000033560 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver0000644000175000017500000001644700000000000033766 0ustar00coreycorey00000000000000# Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log from oslo_utils import netutils from neutron.agent.l2.extensions import qos_linux as qos from neutron.agent.linux import iptables_manager from neutron.agent.linux import tc_lib from neutron.services.qos.drivers.linuxbridge import driver LOG = log.getLogger(__name__) class QosLinuxbridgeAgentDriver(qos.QosLinuxAgentDriver): # TODO(ralonsoh): # - All driver calls should include the rule parameter, including # the delete function, to have the 'direction' parameter. This QoS # extension modification is going to be implemented in # https://review.opendev.org/#/c/341186/ SUPPORTED_RULES = driver.SUPPORTED_RULES IPTABLES_DIRECTION = {const.INGRESS_DIRECTION: 'physdev-out', const.EGRESS_DIRECTION: 'physdev-in'} IPTABLES_DIRECTION_PREFIX = {const.INGRESS_DIRECTION: "i", const.EGRESS_DIRECTION: "o"} def __init__(self): super(QosLinuxbridgeAgentDriver, self).__init__() self.iptables_manager = None self.agent_api = None self.tbf_latency = cfg.CONF.QOS.tbf_latency def consume_api(self, agent_api): self.agent_api = agent_api def initialize(self): LOG.info("Initializing Linux bridge QoS extension") if self.agent_api: self.iptables_manager = self.agent_api.get_iptables_manager() if not self.iptables_manager: # If agent_api can't provide iptables_manager, it can be # created here for extension needs self.iptables_manager = iptables_manager.IptablesManager( state_less=True, use_ipv6=netutils.is_ipv6_enabled()) self.iptables_manager.initialize_mangle_table() def _dscp_chain_name(self, direction, device): return iptables_manager.get_chain_name( "qos-%s%s" % (self.IPTABLES_DIRECTION_PREFIX[direction], device[3:])) def _dscp_rule(self, direction, device): return ('-m physdev --%s %s --physdev-is-bridged ' '-j $%s') % (self.IPTABLES_DIRECTION[direction], device, self._dscp_chain_name(direction, device)) def _dscp_rule_tag(self, device): return "dscp-%s" % device @log_helpers.log_method_call def create_bandwidth_limit(self, port, rule): tc_wrapper = self._get_tc_wrapper(port) if rule.direction == const.INGRESS_DIRECTION: tc_wrapper.set_tbf_bw_limit( rule.max_kbps, rule.max_burst_kbps, self.tbf_latency) else: tc_wrapper.set_filters_bw_limit( rule.max_kbps, self._get_egress_burst_value(rule) ) @log_helpers.log_method_call def update_bandwidth_limit(self, port, rule): tc_wrapper = self._get_tc_wrapper(port) if rule.direction == const.INGRESS_DIRECTION: tc_wrapper.set_tbf_bw_limit( rule.max_kbps, rule.max_burst_kbps, self.tbf_latency) else: tc_wrapper.update_filters_bw_limit( rule.max_kbps, self._get_egress_burst_value(rule) ) @log_helpers.log_method_call def delete_bandwidth_limit(self, port): tc_wrapper = self._get_tc_wrapper(port) tc_wrapper.delete_filters_bw_limit() @log_helpers.log_method_call def delete_bandwidth_limit_ingress(self, port): tc_wrapper = self._get_tc_wrapper(port) tc_wrapper.delete_tbf_bw_limit() @log_helpers.log_method_call def create_dscp_marking(self, port, rule): with self.iptables_manager.defer_apply(): self._set_outgoing_qos_chain_for_port(port) self._set_dscp_mark_rule(port, rule.dscp_mark) @log_helpers.log_method_call def update_dscp_marking(self, port, rule): with self.iptables_manager.defer_apply(): self._delete_dscp_mark_rule(port) self._set_outgoing_qos_chain_for_port(port) self._set_dscp_mark_rule(port, rule.dscp_mark) @log_helpers.log_method_call def delete_dscp_marking(self, port): with self.iptables_manager.defer_apply(): self._delete_dscp_mark_rule(port) self._delete_outgoing_qos_chain_for_port(port) def _set_outgoing_qos_chain_for_port(self, port): chain_name = self._dscp_chain_name( const.EGRESS_DIRECTION, port['device']) chain_rule = self._dscp_rule( const.EGRESS_DIRECTION, port['device']) self.iptables_manager.ipv4['mangle'].add_chain(chain_name) self.iptables_manager.ipv6['mangle'].add_chain(chain_name) self.iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', chain_rule) self.iptables_manager.ipv6['mangle'].add_rule('POSTROUTING', chain_rule) def _delete_outgoing_qos_chain_for_port(self, port): chain_name = self._dscp_chain_name( const.EGRESS_DIRECTION, port['device']) # Iptables chain removal "cascades", and will remove rules in # other chains that jump to it, like those added above. if self._qos_chain_is_empty(port, 4): self.iptables_manager.ipv4['mangle'].remove_chain(chain_name) if self._qos_chain_is_empty(port, 6): self.iptables_manager.ipv6['mangle'].remove_chain(chain_name) def _set_dscp_mark_rule(self, port, dscp_value): chain_name = self._dscp_chain_name( const.EGRESS_DIRECTION, port['device']) # iptables rules use hexadecimal values with --set-dscp rule = "-j DSCP --set-dscp %s" % format(dscp_value, '#04x') self.iptables_manager.ipv4['mangle'].add_rule( chain_name, rule, tag=self._dscp_rule_tag(port['device'])) self.iptables_manager.ipv6['mangle'].add_rule( chain_name, rule, tag=self._dscp_rule_tag(port['device'])) def _delete_dscp_mark_rule(self, port): self.iptables_manager.ipv4['mangle'].clear_rules_by_tag( self._dscp_rule_tag(port['device'])) self.iptables_manager.ipv6['mangle'].clear_rules_by_tag( self._dscp_rule_tag(port['device'])) def _qos_chain_is_empty(self, port, ip_version=4): chain_name = self._dscp_chain_name( const.EGRESS_DIRECTION, port['device']) rules_in_chain = self.iptables_manager.get_chain( "mangle", chain_name, ip_version=ip_version) return len(rules_in_chain) == 0 def _get_tc_wrapper(self, port): return tc_lib.TcCommand( port['device'], cfg.CONF.QOS.kernel_hz, ) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_0000644000175000017500000000231600000000000033752 0ustar00coreycorey00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class LinuxbridgeAgentExtensionAPI(object): '''Implements the Agent API for L2 agent. Extensions can gain access to this API by overriding the consume_api method which has been added to the AgentExtension class. ''' def __init__(self, iptables_manager): super(LinuxbridgeAgentExtensionAPI, self).__init__() self.iptables_manager = iptables_manager def get_iptables_manager(self): """Allows extensions to get an iptables manager, used by agent, to use for managing extension specific iptables rules """ return self.iptables_manager ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_capabilities.py0000644000175000017500000000171600000000000033504 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from neutron.plugins.ml2.drivers.agent import capabilities from neutron.services.trunk.drivers.linuxbridge.agent import driver def register(): """Register Linux Bridge capabilities.""" # Add capabilities to be loaded during agent initialization capabilities.register(driver.init_handler, constants.AGENT_TYPE_LINUXBRIDGE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py0000644000175000017500000013414300000000000033724 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2012 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Performs per host Linux Bridge configuration for Neutron. # Based on the structure of the OpenVSwitch agent in the # Neutron OpenVSwitch Plugin. import sys import netaddr from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.plugins import utils as plugin_utils from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import service from oslo_utils import excutils from six import moves from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.common import profiler as setup_profiler from neutron.common import utils from neutron.conf.agent import common as agent_config from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa from neutron.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc as l2pop_rpc from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import constants as lconst from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import utils as lb_utils from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_agent_extension_api as agent_extension_api from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_capabilities LOG = logging.getLogger(__name__) LB_AGENT_BINARY = 'neutron-linuxbridge-agent' BRIDGE_NAME_PREFIX = "brq" MAX_VLAN_POSTFIX_LEN = 5 VXLAN_INTERFACE_PREFIX = "vxlan-" IPTABLES_DRIVERS = [ 'iptables', 'iptables_hybrid', 'neutron.agent.linux.iptables_firewall.IptablesFirewallDriver', 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' ] class LinuxBridgeManager(amb.CommonAgentManagerBase): def __init__(self, bridge_mappings, interface_mappings): super(LinuxBridgeManager, self).__init__() self.bridge_mappings = bridge_mappings self.interface_mappings = interface_mappings self.validate_interface_mappings() self.validate_bridge_mappings() self.ip = ip_lib.IPWrapper() self.agent_api = None # VXLAN related parameters: self.local_ip = cfg.CONF.VXLAN.local_ip self.vxlan_mode = lconst.VXLAN_NONE if cfg.CONF.VXLAN.enable_vxlan: device = self.get_local_ip_device() self.validate_vxlan_group_with_local_ip() self.local_int = device.name self.check_vxlan_support() def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): LOG.error("Interface %(intf)s for physical network %(net)s" " does not exist. Agent terminated!", {'intf': interface, 'net': physnet}) sys.exit(1) def validate_bridge_mappings(self): for physnet, bridge in self.bridge_mappings.items(): if not ip_lib.device_exists(bridge): LOG.error("Bridge %(brq)s for physical network %(net)s" " does not exist. Agent terminated!", {'brq': bridge, 'net': physnet}) sys.exit(1) def _is_valid_multicast_range(self, mrange): try: addr, vxlan_min, vxlan_max = mrange.split(':') if int(vxlan_min) > int(vxlan_max): raise ValueError() try: local_ver = netaddr.IPAddress(self.local_ip).version n_addr = netaddr.IPAddress(addr) if not n_addr.is_multicast() or n_addr.version != local_ver: raise ValueError() except netaddr.core.AddrFormatError: raise ValueError() except ValueError: return False return True def validate_vxlan_group_with_local_ip(self): for r in cfg.CONF.VXLAN.multicast_ranges: if not self._is_valid_multicast_range(r): LOG.error("Invalid multicast_range %(r)s. Must be in " ":: format and " "addresses must be in the same family as local IP " "%(loc)s.", {'r': r, 'loc': self.local_ip}) sys.exit(1) if not cfg.CONF.VXLAN.vxlan_group: return try: ip_addr = netaddr.IPAddress(self.local_ip) # Ensure the configured group address/range is valid and multicast group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group) if not group_net.is_multicast(): raise ValueError() if not ip_addr.version == group_net.version: raise ValueError() except (netaddr.core.AddrFormatError, ValueError): LOG.error("Invalid VXLAN Group: %(group)s, must be an address " "or network (in CIDR notation) in a multicast " "range of the same address family as local_ip: " "%(ip)s", {'group': cfg.CONF.VXLAN.vxlan_group, 'ip': self.local_ip}) sys.exit(1) def get_local_ip_device(self): """Return the device with local_ip on the host.""" device = self.ip.get_device_by_ip(self.local_ip) if not device: LOG.error("Tunneling cannot be enabled without the local_ip " "bound to an interface on the host. Please " "configure local_ip %s on the host interface to " "be used for tunneling and restart the agent.", self.local_ip) sys.exit(1) return device @staticmethod def get_bridge_name(network_id): if not network_id: LOG.warning("Invalid Network ID, will lead to incorrect " "bridge name") bridge_name = BRIDGE_NAME_PREFIX + \ network_id[:lconst.RESOURCE_ID_LENGTH] return bridge_name @staticmethod def get_subinterface_name(physical_interface, vlan_id): if not vlan_id: LOG.warning("Invalid VLAN ID, will lead to incorrect " "subinterface name") vlan_postfix = '.%s' % vlan_id # For the vlan subinterface name prefix we use: # * the physical_interface, if len(physical_interface) + # len(vlan_postifx) <= 15 for backward compatibility reasons # Example: physical_interface = eth0 # prefix = eth0.1 # prefix = eth0.1111 # # * otherwise a unique hash per physical_interface to help debugging # Example: physical_interface = long_interface # prefix = longHASHED.1 # prefix = longHASHED.1111 # # Remark: For some physical_interface values, the used prefix can be # both, the physical_interface itself or a hash, depending # on the vlan_postfix length. # Example: physical_interface = mix_interface # prefix = mix_interface.1 (backward compatible) # prefix = mix_iHASHED.1111 if (len(physical_interface) + len(vlan_postfix) > constants.DEVICE_NAME_MAX_LEN): physical_interface = plugin_utils.get_interface_name( physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN - MAX_VLAN_POSTFIX_LEN)) return "%s%s" % (physical_interface, vlan_postfix) @staticmethod def get_tap_device_name(interface_id): return lb_utils.get_tap_device_name(interface_id) @staticmethod def get_vxlan_device_name(segmentation_id): if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI: return VXLAN_INTERFACE_PREFIX + str(segmentation_id) else: LOG.warning("Invalid Segmentation ID: %s, will lead to " "incorrect vxlan device name", segmentation_id) @staticmethod def _match_multicast_range(segmentation_id): for mrange in cfg.CONF.VXLAN.multicast_ranges: addr, vxlan_min, vxlan_max = mrange.split(':') if int(vxlan_min) <= segmentation_id <= int(vxlan_max): return addr def get_vxlan_group(self, segmentation_id): mcast_addr = self._match_multicast_range(segmentation_id) if mcast_addr: net = netaddr.IPNetwork(mcast_addr) else: net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group) # Map the segmentation ID to (one of) the group address(es) return str(net.network + (int(segmentation_id) & int(net.hostmask))) def get_deletable_bridges(self): bridge_list = bridge_lib.get_bridge_names() bridges = {b for b in bridge_list if b.startswith(BRIDGE_NAME_PREFIX)} bridges.difference_update(self.bridge_mappings.values()) return bridges @staticmethod def get_tap_devices_count(bridge_name): if_list = bridge_lib.BridgeDevice(bridge_name).get_interfaces() return len([interface for interface in if_list if interface.startswith(constants.TAP_DEVICE_PREFIX)]) def ensure_vlan_bridge(self, network_id, phy_bridge_name, physical_interface, vlan_id): """Create a vlan and bridge unless they already exist.""" interface = self.ensure_vlan(physical_interface, vlan_id) if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) else: bridge_name = self.get_bridge_name(network_id) if self.ensure_bridge(bridge_name, interface): return interface def ensure_vxlan_bridge(self, network_id, segmentation_id, mtu): """Create a vxlan and bridge unless they already exist.""" interface = self.ensure_vxlan(segmentation_id, mtu) if not interface: LOG.error("Failed creating vxlan interface for " "%(segmentation_id)s", {'segmentation_id': segmentation_id}) return bridge_name = self.get_bridge_name(network_id) self.ensure_bridge(bridge_name, interface, update_interface=False) return interface def get_interface_details(self, interface, ip_version): device = self.ip.device(interface) ips = device.addr.list(scope='global', ip_version=ip_version) # Update default gateway if necessary gateway = device.route.get_gateway(scope='global', ip_version=ip_version) return ips, gateway def ensure_flat_bridge(self, network_id, phy_bridge_name, physical_interface): """Create a non-vlan bridge unless it already exists.""" if phy_bridge_name: return self.ensure_bridge(phy_bridge_name) else: bridge_name = self.get_bridge_name(network_id) if self.ensure_bridge(bridge_name, physical_interface): return physical_interface def ensure_local_bridge(self, network_id, phy_bridge_name): """Create a local bridge unless it already exists.""" if phy_bridge_name: bridge_name = phy_bridge_name else: bridge_name = self.get_bridge_name(network_id) return self.ensure_bridge(bridge_name) def ensure_vlan(self, physical_interface, vlan_id): """Create a vlan unless it already exists.""" interface = self.get_subinterface_name(physical_interface, vlan_id) if not ip_lib.device_exists(interface): LOG.debug("Creating subinterface %(interface)s for " "VLAN %(vlan_id)s on interface " "%(physical_interface)s", {'interface': interface, 'vlan_id': vlan_id, 'physical_interface': physical_interface}) try: int_vlan = self.ip.add_vlan(interface, physical_interface, vlan_id) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: if ip_lib.vlan_in_use(vlan_id): ctxt.reraise = False LOG.error("Unable to create VLAN interface for " "VLAN ID %s because it is in use by " "another interface.", vlan_id) return int_vlan.disable_ipv6() int_vlan.link.set_up() LOG.debug("Done creating subinterface %s", interface) return interface def ensure_vxlan(self, segmentation_id, mtu=None): """Create a vxlan unless it already exists.""" interface = self.get_vxlan_device_name(segmentation_id) if not ip_lib.device_exists(interface): LOG.debug("Creating vxlan interface %(interface)s for " "VNI %(segmentation_id)s", {'interface': interface, 'segmentation_id': segmentation_id}) args = {'dev': self.local_int, 'srcport': (cfg.CONF.VXLAN.udp_srcport_min, cfg.CONF.VXLAN.udp_srcport_max), 'dstport': cfg.CONF.VXLAN.udp_dstport, 'ttl': cfg.CONF.VXLAN.ttl} if cfg.CONF.VXLAN.tos: args['tos'] = cfg.CONF.VXLAN.tos if cfg.CONF.AGENT.dscp or cfg.CONF.AGENT.dscp_inherit: LOG.warning('The deprecated tos option in group VXLAN ' 'is set and takes precedence over dscp and ' 'dscp_inherit in group AGENT.') elif cfg.CONF.AGENT.dscp_inherit: args['tos'] = 'inherit' elif cfg.CONF.AGENT.dscp: args['tos'] = int(cfg.CONF.AGENT.dscp) << 2 if self.vxlan_mode == lconst.VXLAN_MCAST: args['group'] = self.get_vxlan_group(segmentation_id) if cfg.CONF.VXLAN.l2_population: args['proxy'] = cfg.CONF.VXLAN.arp_responder try: int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: # perform this check after an attempt rather than before # to avoid excessive lookups and a possible race condition. if ip_lib.vxlan_in_use(segmentation_id): ctxt.reraise = False LOG.error("Unable to create VXLAN interface for " "VNI %s because it is in use by another " "interface.", segmentation_id) return None if mtu: try: int_vxlan.link.set_mtu(mtu) except ip_lib.InvalidArgument: phys_dev_mtu = ip_lib.get_device_mtu(self.local_int) LOG.error("Provided MTU value %(mtu)s for VNI " "%(segmentation_id)s is too high according " "to physical device %(dev)s MTU=%(phys_mtu)s.", {'mtu': mtu, 'segmentation_id': segmentation_id, 'dev': self.local_int, 'phys_mtu': phys_dev_mtu}) int_vxlan.link.delete() return None int_vxlan.disable_ipv6() int_vxlan.link.set_up() LOG.debug("Done creating vxlan interface %s", interface) return interface def _update_interface_ip_details(self, destination, source, ips, gateway): dst_device = self.ip.device(destination) src_device = self.ip.device(source) # Append IP's to bridge if necessary if ips: for ip in ips: # If bridge ip address already exists, then don't add # otherwise will report error to = utils.cidr_to_ip(ip['cidr']) if not dst_device.addr.list(to=to): dst_device.addr.add(cidr=ip['cidr']) if gateway: # Ensure that the gateway can be updated by changing the metric metric = 100 ip_version = utils.get_ip_version(gateway['cidr']) if gateway['metric'] != ip_lib.IP_ROUTE_METRIC_DEFAULT[ip_version]: metric = gateway['metric'] - 1 dst_device.route.add_gateway(gateway=gateway['via'], metric=metric) src_device.route.delete_gateway(gateway=gateway['via']) # Remove IP's from interface if ips: for ip in ips: src_device.addr.delete(cidr=ip['cidr']) def update_interface_ip_details(self, destination, source): # Returns True if there were IPs or a gateway moved updated = False for ip_version in (constants.IP_VERSION_4, constants.IP_VERSION_6): ips, gateway = self.get_interface_details(source, ip_version) if ips or gateway: self._update_interface_ip_details(destination, source, ips, gateway) updated = True return updated def ensure_bridge(self, bridge_name, interface=None, update_interface=True): """Create a bridge unless it already exists.""" # ensure_device_is_ready instead of device_exists is used here # because there are cases where the bridge exists but it's not UP, # for example: # 1) A greenthread was executing this function and had not yet executed # "ip link set bridge_name up" before eventlet switched to this # thread running the same function # 2) The Nova VIF driver was running concurrently and had just created # the bridge, but had not yet put it UP if not ip_lib.ensure_device_is_ready(bridge_name): LOG.debug("Starting bridge %(bridge_name)s for subinterface " "%(interface)s", {'bridge_name': bridge_name, 'interface': interface}) bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name) if bridge_device.setfd(0): return if bridge_device.disable_stp(): return if bridge_device.link.set_up(): return LOG.debug("Done starting bridge %(bridge_name)s for " "subinterface %(interface)s", {'bridge_name': bridge_name, 'interface': interface}) else: bridge_device = bridge_lib.BridgeDevice(bridge_name) if not interface: return bridge_name # Update IP info if necessary if update_interface: self.update_interface_ip_details(bridge_name, interface) # Check if the interface is part of the bridge if not bridge_device.owns_interface(interface): try: # Check if the interface is not enslaved in another bridge bridge = bridge_lib.BridgeDevice.get_interface_bridge( interface) if bridge: bridge.delif(interface) bridge_device.addif(interface) except Exception as e: LOG.error("Unable to add %(interface)s to %(bridge_name)s" "! Exception: %(e)s", {'interface': interface, 'bridge_name': bridge_name, 'e': e}) return return bridge_name def ensure_physical_in_bridge(self, network_id, network_type, physical_network, segmentation_id, mtu): if network_type == constants.TYPE_VXLAN: if self.vxlan_mode == lconst.VXLAN_NONE: LOG.error("Unable to add vxlan interface for network %s", network_id) return return self.ensure_vxlan_bridge(network_id, segmentation_id, mtu) # NOTE(nick-ma-z): Obtain mappings of physical bridge and interfaces physical_bridge = self.bridge_mappings.get(physical_network) physical_interface = self.interface_mappings.get(physical_network) if not physical_bridge and not physical_interface: LOG.error("No bridge or interface mappings" " for physical network %s", physical_network) return if network_type == constants.TYPE_FLAT: return self.ensure_flat_bridge(network_id, physical_bridge, physical_interface) elif network_type == constants.TYPE_VLAN: return self.ensure_vlan_bridge(network_id, physical_bridge, physical_interface, segmentation_id) else: LOG.error("Unknown network_type %(network_type)s for network " "%(network_id)s.", {network_type: network_type, network_id: network_id}) def add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner, mtu): """Add tap interface and handle interface missing exceptions.""" try: return self._add_tap_interface(network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner, mtu) except Exception: with excutils.save_and_reraise_exception() as ctx: if not ip_lib.device_exists(tap_device_name): # the exception was likely a side effect of the tap device # being removed during handling so we just return false # like we would if it didn't exist to begin with. ctx.reraise = False return False def _add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner, mtu): """Add tap interface. If a VIF has been plugged into a network, this function will add the corresponding tap device to the relevant bridge. """ if not ip_lib.device_exists(tap_device_name): LOG.debug("Tap device: %s does not exist on " "this host, skipped", tap_device_name) return False bridge_name = self.bridge_mappings.get(physical_network) if not bridge_name: bridge_name = self.get_bridge_name(network_id) if network_type == constants.TYPE_LOCAL: self.ensure_local_bridge(network_id, bridge_name) elif not self.ensure_physical_in_bridge(network_id, network_type, physical_network, segmentation_id, mtu): return False if mtu: # <-None with device_details from older neutron servers. # we ensure the MTU here because libvirt does not set the # MTU of a bridge it creates and the tap device it creates will # inherit from the bridge its plugged into, which will be 1500 # at the time. See bug/1684326 for details. self._set_tap_mtu(tap_device_name, mtu) # Avoid messing with plugging devices into a bridge that the agent # does not own if not device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX): # Check if device needs to be added to bridge if not bridge_lib.BridgeDevice.get_interface_bridge( tap_device_name): data = {'tap_device_name': tap_device_name, 'bridge_name': bridge_name} LOG.debug("Adding device %(tap_device_name)s to bridge " "%(bridge_name)s", data) if bridge_lib.BridgeDevice(bridge_name).addif(tap_device_name): return False else: data = {'tap_device_name': tap_device_name, 'device_owner': device_owner, 'bridge_name': bridge_name} LOG.debug("Skip adding device %(tap_device_name)s to " "%(bridge_name)s. It is owned by %(device_owner)s and " "thus added elsewhere.", data) return True @staticmethod def _set_tap_mtu(tap_device_name, mtu): ip_lib.IPDevice(tap_device_name).link.set_mtu(mtu) def plug_interface(self, network_id, network_segment, tap_name, device_owner): return self.add_tap_interface(network_id, network_segment.network_type, network_segment.physical_network, network_segment.segmentation_id, tap_name, device_owner, network_segment.mtu) def delete_bridge(self, bridge_name): bridge_device = bridge_lib.BridgeDevice(bridge_name) if bridge_device.exists(): physical_interfaces = set(self.interface_mappings.values()) interfaces_on_bridge = bridge_device.get_interfaces() for interface in interfaces_on_bridge: self.remove_interface(bridge_name, interface) if interface.startswith(VXLAN_INTERFACE_PREFIX): self.delete_interface(interface) else: # Match the vlan/flat interface in the bridge. # If the bridge has an IP, it mean that this IP was moved # from the current interface, which also mean that this # interface was not created by the agent. updated = self.update_interface_ip_details(interface, bridge_name) if not updated and interface not in physical_interfaces: self.delete_interface(interface) try: LOG.debug("Deleting bridge %s", bridge_name) if bridge_device.link.set_down(): return if bridge_device.delbr(): return LOG.debug("Done deleting bridge %s", bridge_name) except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: if not bridge_device.exists(): # the exception was likely a side effect of the bridge # being removed by nova during handling, # so we just return ctxt.reraise = False LOG.debug("Cannot delete bridge %s; it does not exist", bridge_name) return else: LOG.debug("Cannot delete bridge %s; it does not exist", bridge_name) @staticmethod def remove_interface(bridge_name, interface_name): bridge_device = bridge_lib.BridgeDevice(bridge_name) if bridge_device.exists(): if not bridge_device.owns_interface(interface_name): return True LOG.debug("Removing device %(interface_name)s from bridge " "%(bridge_name)s", {'interface_name': interface_name, 'bridge_name': bridge_name}) try: bridge_device.delif(interface_name) LOG.debug("Done removing device %(interface_name)s from " "bridge %(bridge_name)s", {'interface_name': interface_name, 'bridge_name': bridge_name}) return True except RuntimeError: with excutils.save_and_reraise_exception() as ctxt: if not bridge_device.owns_interface(interface_name): # the exception was likely a side effect of the tap # being deleted by some other agent during handling ctxt.reraise = False LOG.debug("Cannot remove %(interface_name)s from " "%(bridge_name)s. It is not on the bridge.", {'interface_name': interface_name, 'bridge_name': bridge_name}) return False else: LOG.debug("Cannot remove device %(interface_name)s bridge " "%(bridge_name)s does not exist", {'interface_name': interface_name, 'bridge_name': bridge_name}) return False def delete_interface(self, interface): device = self.ip.device(interface) if device.exists(): LOG.debug("Deleting interface %s", interface) device.link.set_down() device.link.delete() LOG.debug("Done deleting interface %s", interface) def get_devices_modified_timestamps(self, devices): # NOTE(kevinbenton): we aren't returning real timestamps here. We # are returning interface indexes instead which change when the # interface is removed/re-added. This works for the direct # comparison the common agent loop performs with these. # See bug/1622833 for details. return {d: bridge_lib.get_interface_ifindex(d) for d in devices} @staticmethod def get_all_devices(): devices = set() for device in bridge_lib.get_bridge_names(): if device.startswith(constants.TAP_DEVICE_PREFIX): devices.add(device) return devices def vxlan_ucast_supported(self): if not cfg.CONF.VXLAN.l2_population: return False if not ip_lib.iproute_arg_supported( ['bridge', 'fdb'], 'append'): LOG.warning('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode', {'option': 'append', 'command': 'bridge fdb', 'mode': 'VXLAN UCAST'}) return False test_iface = None for seg_id in moves.range(1, constants.MAX_VXLAN_VNI + 1): if (ip_lib.device_exists(self.get_vxlan_device_name(seg_id)) or ip_lib.vxlan_in_use(seg_id)): continue test_iface = self.ensure_vxlan(seg_id) break else: LOG.error('No valid Segmentation ID to perform UCAST test.') return False try: bridge_lib.FdbInterface.append(constants.FLOODING_ENTRY[0], test_iface, '1.1.1.1', log_fail_as_error=False) return True except RuntimeError: return False finally: self.delete_interface(test_iface) @staticmethod def vxlan_mcast_supported(): if not cfg.CONF.VXLAN.vxlan_group: LOG.warning('VXLAN muticast group(s) must be provided in ' 'vxlan_group option to enable VXLAN MCAST mode') return False if not ip_lib.iproute_arg_supported( ['ip', 'link', 'add', 'type', 'vxlan'], 'proxy'): LOG.warning('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode', {'option': 'proxy', 'command': 'ip link add type vxlan', 'mode': 'VXLAN MCAST'}) return False return True def check_vxlan_support(self): self.vxlan_mode = lconst.VXLAN_NONE if self.vxlan_ucast_supported(): self.vxlan_mode = lconst.VXLAN_UCAST elif self.vxlan_mcast_supported(): self.vxlan_mode = lconst.VXLAN_MCAST else: raise exceptions.VxlanNetworkUnsupported() LOG.debug('Using %s VXLAN mode', self.vxlan_mode) @staticmethod def fdb_ip_entry_exists(mac, ip, interface): ip_version = utils.get_ip_version(ip) entry = ip_lib.dump_neigh_entries(ip_version, interface, dst=ip, lladdr=mac) return entry != [] @staticmethod def fdb_bridge_entry_exists(mac, interface, agent_ip=None): entries = bridge_lib.FdbInterface.show(interface) if not agent_ip: return mac in entries return (agent_ip in entries and mac in entries) @staticmethod def add_fdb_ip_entry(mac, ip, interface): if cfg.CONF.VXLAN.arp_responder: ip_lib.add_neigh_entry(ip, mac, interface) @staticmethod def remove_fdb_ip_entry(mac, ip, interface): if cfg.CONF.VXLAN.arp_responder: ip_lib.delete_neigh_entry(ip, mac, interface) def add_fdb_entries(self, agent_ip, ports, interface): for mac, ip in ports: if mac != constants.FLOODING_ENTRY[0]: self.add_fdb_ip_entry(mac, ip, interface) bridge_lib.FdbInterface.replace(mac, interface, agent_ip, check_exit_code=False) elif self.vxlan_mode == lconst.VXLAN_UCAST: if self.fdb_bridge_entry_exists(mac, interface): bridge_lib.FdbInterface.append(mac, interface, agent_ip, check_exit_code=False) else: bridge_lib.FdbInterface.add(mac, interface, agent_ip, check_exit_code=False) def remove_fdb_entries(self, agent_ip, ports, interface): for mac, ip in ports: if mac != constants.FLOODING_ENTRY[0]: self.remove_fdb_ip_entry(mac, ip, interface) bridge_lib.FdbInterface.delete(mac, interface, agent_ip, check_exit_code=False) elif self.vxlan_mode == lconst.VXLAN_UCAST: bridge_lib.FdbInterface.delete(mac, interface, agent_ip, check_exit_code=False) def get_agent_id(self): if self.bridge_mappings: mac = ip_lib.get_device_mac( list(self.bridge_mappings.values())[0]) else: devices = self.ip.get_devices(True) for device in devices: mac = ip_lib.get_device_mac(device.name) if mac: break else: LOG.error("Unable to obtain MAC address for unique ID. " "Agent terminated!") sys.exit(1) return 'lb%s' % mac.replace(":", "") def get_agent_configurations(self): configurations = {'bridge_mappings': self.bridge_mappings, 'interface_mappings': self.interface_mappings } if self.vxlan_mode != lconst.VXLAN_NONE: configurations['tunneling_ip'] = self.local_ip configurations['tunnel_types'] = [constants.TYPE_VXLAN] configurations['l2_population'] = cfg.CONF.VXLAN.l2_population return configurations def get_rpc_callbacks(self, context, agent, sg_agent): return LinuxBridgeRpcCallbacks(context, agent, sg_agent) def get_agent_api(self, **kwargs): if self.agent_api: return self.agent_api sg_agent = kwargs.get("sg_agent") iptables_manager = self._get_iptables_manager(sg_agent) self.agent_api = agent_extension_api.LinuxbridgeAgentExtensionAPI( iptables_manager) return self.agent_api @staticmethod def _get_iptables_manager(sg_agent): if not sg_agent: return None if cfg.CONF.SECURITYGROUP.firewall_driver in IPTABLES_DRIVERS: return sg_agent.firewall.iptables def get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.NETWORK, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE], [topics.PORT_BINDING, topics.DEACTIVATE], [topics.PORT_BINDING, topics.ACTIVATE]] if cfg.CONF.VXLAN.l2_population: consumers.append([topics.L2POPULATION, topics.UPDATE]) return consumers def ensure_port_admin_state(self, tap_name, admin_state_up): LOG.debug("Setting admin_state_up to %s for device %s", admin_state_up, tap_name) if admin_state_up: ip_lib.IPDevice(tap_name).link.set_up() else: ip_lib.IPDevice(tap_name).link.set_down() def setup_arp_spoofing_protection(self, device, device_details): arp_protect.setup_arp_spoofing_protection(device, device_details) def delete_arp_spoofing_protection(self, devices): arp_protect.delete_arp_spoofing_protection(devices) def delete_unreferenced_arp_protection(self, current_devices): arp_protect.delete_unreferenced_arp_protection(current_devices) def get_extension_driver_type(self): return lconst.EXTENSION_DRIVER_TYPE class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin, l2pop_rpc.L2populationRpcCallBackMixin, amb.CommonAgentManagerRpcCallBackBase): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update # 1.5 Added binding_activate and binding_deactivate target = oslo_messaging.Target(version='1.5') def network_delete(self, context, **kwargs): LOG.debug("network_delete received") network_id = kwargs.get('network_id') # NOTE(nick-ma-z): Don't remove pre-existing user-defined bridges if network_id in self.network_map: phynet = self.network_map[network_id].physical_network if phynet and phynet in self.agent.mgr.bridge_mappings: LOG.info("Physical network %s is defined in " "bridge_mappings and cannot be deleted.", network_id) return bridge_name = self.agent.mgr.get_bridge_name(network_id) LOG.debug("Delete %s", bridge_name) self.agent.mgr.delete_bridge(bridge_name) self.network_map.pop(network_id, None) def port_update(self, context, **kwargs): port_id = kwargs['port']['id'] device_name = self.agent.mgr.get_tap_device_name(port_id) # Put the device name in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. self.updated_devices.add(device_name) LOG.debug("port_update RPC received for port: %s", port_id) def binding_deactivate(self, context, **kwargs): if kwargs.get('host') != cfg.CONF.host: return interface_name = self.agent.mgr.get_tap_device_name( kwargs.get('port_id')) bridge_name = self.agent.mgr.get_bridge_name(kwargs.get('network_id')) LOG.debug("Removing device %(interface_name)s from bridge " "%(bridge_name)s due to binding being de-activated", {'interface_name': interface_name, 'bridge_name': bridge_name}) self.agent.mgr.remove_interface(bridge_name, interface_name) def binding_activate(self, context, **kwargs): if kwargs.get('host') != cfg.CONF.host: return # Since the common agent loop treats added and updated the same way, # just add activated ports to the updated devices list. This way, # adding binding activation is less disruptive to the existing code port_id = kwargs.get('port_id') device_name = self.agent.mgr.get_tap_device_name(port_id) self.updated_devices.add(device_name) LOG.debug("Binding activation received for port: %s", port_id) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] LOG.debug("network_update message processed for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.agent.network_ports[network_id]}) for port_data in self.agent.network_ports[network_id]: self.updated_devices.add(port_data['device']) def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") for network_id, values in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != constants.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) agent_ports = values.get('ports') for agent_ip, ports in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue self.agent.mgr.add_fdb_entries(agent_ip, ports, interface) def fdb_remove(self, context, fdb_entries): LOG.debug("fdb_remove received") for network_id, values in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != constants.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) agent_ports = values.get('ports') for agent_ip, ports in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue self.agent.mgr.remove_fdb_entries(agent_ip, ports, interface) def _fdb_chg_ip(self, context, fdb_entries): LOG.debug("update chg_ip received") for network_id, agent_ports in fdb_entries.items(): segment = self.network_map.get(network_id) if not segment: return if segment.network_type != constants.TYPE_VXLAN: return interface = self.agent.mgr.get_vxlan_device_name( segment.segmentation_id) for agent_ip, state in agent_ports.items(): if agent_ip == self.agent.mgr.local_ip: continue after = state.get('after', []) for mac, ip in after: self.agent.mgr.add_fdb_ip_entry(mac, ip, interface) before = state.get('before', []) for mac, ip in before: self.agent.mgr.remove_fdb_ip_entry(mac, ip, interface) def fdb_update(self, context, fdb_entries): LOG.debug("fdb_update received") for action, values in fdb_entries.items(): method = '_fdb_' + action if not hasattr(self, method): raise NotImplementedError() getattr(self, method)(context, values) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() agent_config.setup_privsep() try: interface_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error("Parsing physical_interface_mappings failed: %s. " "Agent terminated!", e) sys.exit(1) LOG.info("Interface mappings: %s", interface_mappings) try: bridge_mappings = helpers.parse_mappings( cfg.CONF.LINUX_BRIDGE.bridge_mappings) except ValueError as e: LOG.error("Parsing bridge_mappings failed: %s. " "Agent terminated!", e) sys.exit(1) LOG.info("Bridge mappings: %s", bridge_mappings) manager = LinuxBridgeManager(bridge_mappings, interface_mappings) linuxbridge_capabilities.register() polling_interval = cfg.CONF.AGENT.polling_interval quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout, constants.AGENT_TYPE_LINUXBRIDGE, LB_AGENT_BINARY) setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host) LOG.info("Agent initialized successfully, now running... ") launcher = service.launch(cfg.CONF, agent, restart_method='mutate') launcher.wait() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/0000755000175000017500000000000000000000000027271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py0000644000175000017500000000000000000000000031370 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py0000644000175000017500000000447400000000000033164 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron.agent import securitygroups_rpc from neutron.plugins.ml2.drivers import mech_agent from neutron.services.qos.drivers.linuxbridge import driver as lb_qos_driver class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using linuxbridge L2 agent. The LinuxbridgeMechanismDriver integrates the ml2 plugin with the linuxbridge L2 agent. Port binding with this driver requires the linuxbridge agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2} super(LinuxbridgeMechanismDriver, self).__init__( constants.AGENT_TYPE_LINUXBRIDGE, portbindings.VIF_TYPE_BRIDGE, vif_details) lb_qos_driver.register() def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [constants.TYPE_LOCAL, constants.TYPE_FLAT, constants.TYPE_VLAN]) def get_mappings(self, agent): mappings = dict(agent['configurations'].get('interface_mappings', {}), **agent['configurations'].get('bridge_mappings', {})) return mappings def check_vlan_transparency(self, context): """Linuxbridge driver vlan transparency support.""" return True ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/0000755000175000017500000000000000000000000024121 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/__init__.py0000644000175000017500000000000000000000000026220 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/agent/0000755000175000017500000000000000000000000025217 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/agent/__init__.py0000644000175000017500000000000000000000000027316 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py0000644000175000017500000002105500000000000032157 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import service from neutron.agent.linux import ip_lib from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.conf.plugins.ml2.drivers import macvtap as config from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.plugins.ml2.drivers.macvtap import macvtap_common LOG = logging.getLogger(__name__) MACVTAP_AGENT_BINARY = "neutron-macvtap-agent" MACVTAP_FS = "/sys/class/net/" EXTENSION_DRIVER_TYPE = 'macvtap' config.register_macvtap_opts() class MacvtapRPCCallBack(sg_rpc.SecurityGroupAgentRpcCallbackMixin, amb.CommonAgentManagerRpcCallBackBase): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update target = oslo_messaging.Target(version='1.4') def network_delete(self, context, **kwargs): LOG.debug("network_delete received") network_id = kwargs.get('network_id') if network_id not in self.network_map: LOG.error("Network %s is not available.", network_id) return segment = self.network_map.get(network_id) if segment and segment.network_type == constants.TYPE_VLAN: if_mappings = self.agent.mgr.interface_mappings vlan_device_name = macvtap_common.get_vlan_device_name( if_mappings[segment.physical_network], str(segment.segmentation_id)) ip_dev = ip_lib.IPDevice(vlan_device_name) if ip_dev.exists(): LOG.debug("Delete %s", ip_dev.name) ip_dev.link.delete() else: LOG.debug("Cannot delete vlan device %s; it does not exist", vlan_device_name) def port_update(self, context, **kwargs): port = kwargs['port'] LOG.debug("port_update received for port %s ", port) mac = port['mac_address'] # Put the device name in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. self.updated_devices.add(mac) class MacvtapManager(amb.CommonAgentManagerBase): def __init__(self, interface_mappings): self.interface_mappings = interface_mappings self.validate_interface_mappings() self.mac_device_name_mappings = dict() def validate_interface_mappings(self): for physnet, interface in self.interface_mappings.items(): if not ip_lib.device_exists(interface): LOG.error("Interface %(intf)s for physical network " "%(net)s does not exist. Agent terminated!", {'intf': interface, 'net': physnet}) sys.exit(1) def ensure_port_admin_state(self, device, admin_state_up): LOG.debug("Setting admin_state_up to %s for device %s", admin_state_up, device) dev = ip_lib.IPDevice(self.mac_device_name_mappings[device]) if admin_state_up: dev.link.set_up() else: dev.link.set_down() def get_agent_configurations(self): return {'interface_mappings': self.interface_mappings} def get_agent_id(self): devices = ip_lib.IPWrapper().get_devices(True) for device in devices: mac = ip_lib.get_device_mac(device.name) if mac: return 'macvtap%s' % mac.replace(":", "") LOG.error("Unable to obtain MAC address for unique ID. " "Agent terminated!") sys.exit(1) def get_devices_modified_timestamps(self, devices): # TODO(kevinbenton): this should be implemented to detect # rapid Nova instance rebuilds. return {} def get_all_devices(self): devices = set() all_device_names = os.listdir(MACVTAP_FS) # Refresh the mac_device_name mapping self.mac_device_name_mappings = dict() for device_name in all_device_names: if device_name.startswith(constants.MACVTAP_DEVICE_PREFIX): mac = ip_lib.get_device_mac(device_name) self.mac_device_name_mappings[mac] = device_name devices.add(mac) return devices def get_extension_driver_type(self): return EXTENSION_DRIVER_TYPE def get_rpc_callbacks(self, context, agent, sg_agent): return MacvtapRPCCallBack(context, agent, sg_agent) def get_agent_api(self, **kwargs): pass def get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] return consumers def plug_interface(self, network_id, network_segment, device, device_owner): # Setting ALLMULTICAST Flag on macvtap device to allow the guest # receiving traffic for arbitrary multicast addresses. # The alternative would be to let libvirt instantiate the macvtap # device with the 'trustGuestRxFilters' option. But doing so, the guest # would be able to change its mac address and therefore the mac # address of the macvtap device. dev = ip_lib.IPDevice(self.mac_device_name_mappings[device]) dev.link.set_allmulticast_on() return True def setup_arp_spoofing_protection(self, device, device_details): pass def delete_arp_spoofing_protection(self, devices): pass def delete_unreferenced_arp_protection(self, current_devices): pass def parse_interface_mappings(): if not cfg.CONF.macvtap.physical_interface_mappings: LOG.error("No physical_interface_mappings provided, but at least " "one mapping is required. Agent terminated!") sys.exit(1) try: interface_mappings = helpers.parse_mappings( cfg.CONF.macvtap.physical_interface_mappings) LOG.info("Interface mappings: %s", interface_mappings) return interface_mappings except ValueError as e: LOG.error("Parsing physical_interface_mappings failed: %s. " "Agent terminated!", e) sys.exit(1) def validate_firewall_driver(): fw_driver = cfg.CONF.SECURITYGROUP.firewall_driver supported_fw_drivers = ['neutron.agent.firewall.NoopFirewallDriver', 'noop'] if fw_driver not in supported_fw_drivers: LOG.error('Unsupported configuration option for "SECURITYGROUP.' 'firewall_driver"! Only the NoopFirewallDriver is ' 'supported by macvtap agent, but "%s" is configured. ' 'Set the firewall_driver to "noop" and start the ' 'agent again. Agent terminated!', fw_driver) sys.exit(1) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() validate_firewall_driver() interface_mappings = parse_interface_mappings() manager = MacvtapManager(interface_mappings) polling_interval = cfg.CONF.AGENT.polling_interval quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = ca.CommonAgentLoop(manager, polling_interval, quitting_rpc_timeout, constants.AGENT_TYPE_MACVTAP, MACVTAP_AGENT_BINARY) LOG.info("Agent initialized successfully, now running... ") launcher = service.launch(cfg.CONF, agent, restart_method='mutate') launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py0000644000175000017500000000207100000000000027476 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from neutron_lib.plugins import utils as plugin_utils MAX_VLAN_POSTFIX_LEN = 5 def get_vlan_device_name(src_dev, vlan): """Generating the vlan device name.""" # Ensure that independent of the vlan len the same name prefix is used. src_dev = plugin_utils.get_interface_name( src_dev, max_len=n_const.DEVICE_NAME_MAX_LEN - MAX_VLAN_POSTFIX_LEN) return "%s.%s" % (src_dev, vlan) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/mech_driver/0000755000175000017500000000000000000000000026410 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/mech_driver/__init__.py0000644000175000017500000000000000000000000030507 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py0000644000175000017500000001351400000000000031415 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_log import log from neutron.plugins.ml2.drivers.macvtap import macvtap_common from neutron.plugins.ml2.drivers import mech_agent LOG = log.getLogger(__name__) MACVTAP_MODE_BRIDGE = 'bridge' class MacvtapMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using Macvtap L2 agent. The MacvtapMechanismDriver integrates the ml2 plugin with the macvtap L2 agent. Port binding with this driver requires the macvtap agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ def __init__(self): vif_details = {portbindings.CAP_PORT_FILTER: False, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2} super(MacvtapMechanismDriver, self).__init__( constants.AGENT_TYPE_MACVTAP, portbindings.VIF_TYPE_MACVTAP, vif_details) def get_allowed_network_types(self, agent): return [constants.TYPE_FLAT, constants.TYPE_VLAN] def get_mappings(self, agent): return agent['configurations'].get('interface_mappings', {}) def check_vlan_transparency(self, context): """Macvtap driver vlan transparency support.""" return False def _is_live_migration(self, context): # We cannot just check if # context.original['host_id'] != context.current['host_id'] # This condition is also true, if nova does a reschedule of a # instance when something went wrong during spawn. In this case, # context.original['host_id'] is set to the failed host. # The only safe way to detect a migration is to look into the binding # profiles 'migrating_to' attribute, which is set by Nova since patch # https://review.opendev.org/#/c/275073/. if not context.original: # new port return False port_profile = context.original.get(portbindings.PROFILE) if port_profile and port_profile.get('migrating_to', None): LOG.debug("Live migration with profile %s detected.", port_profile) return True else: return False def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): vif_details_segment = self.vif_details mappings = self.get_mappings(agent) interface = mappings[segment['physical_network']] network_type = segment[api.NETWORK_TYPE] if network_type == constants.TYPE_VLAN: vlan_id = segment[api.SEGMENTATION_ID] macvtap_src = macvtap_common.get_vlan_device_name(interface, vlan_id) vif_details_segment['vlan'] = vlan_id else: macvtap_src = interface if self._is_live_migration(context): # We can use the original port here, as during live migration # portbinding is done after the migration happened. Nova will # not do a reschedule of the instance migration if binding # fails, but just set the instance into error state. # Due to that we can be sure that the original port is the # migration source port. orig_vif_details = context.original[portbindings.VIF_DETAILS] orig_source = orig_vif_details[ portbindings.VIF_DETAILS_MACVTAP_SOURCE] if orig_source != macvtap_src: source_host = context.original[portbindings.HOST_ID] target_host = agent['host'] LOG.error("Vif binding denied by mechanism driver. " "MacVTap source device '%(target_dev)s' on " "the migration target '%(target_host)s'is " "not equal to device '%(source_dev)s' on " "the migration source '%(source_host)s. " "Make sure that the " "interface mapping of macvtap " "agent on both hosts is equal " "for the physical network '%(physnet)s'!", {'source_dev': orig_source, 'target_dev': macvtap_src, 'target_host': target_host, 'source_host': source_host, 'physnet': segment['physical_network']}) return False vif_details_segment['physical_interface'] = interface vif_details_segment['macvtap_source'] = macvtap_src vif_details_segment['macvtap_mode'] = MACVTAP_MODE_BRIDGE LOG.debug("Macvtap vif_details added to context binding: %s", vif_details_segment) context.set_binding(segment[api.ID], self.vif_type, vif_details_segment) return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_agent.py0000644000175000017500000003777700000000000025157 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import uuid from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib.placement import utils as place_utils from neutron_lib.plugins.ml2 import api from oslo_log import log import six from neutron._i18n import _ from neutron.db import provisioning_blocks LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class AgentMechanismDriverBase(api.MechanismDriver): """Base class for drivers that attach to networks using an L2 agent. The AgentMechanismDriverBase provides common code for mechanism drivers that integrate the ml2 plugin with L2 agents. Port binding with this driver requires the driver's associated agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. MechanismDrivers using this base class must pass the agent type to __init__(), and must implement try_to_bind_segment_for_agent(). """ def __init__(self, agent_type, supported_vnic_types=[portbindings.VNIC_NORMAL]): """Initialize base class for specific L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param supported_vnic_types: The binding:vnic_type values we can bind """ self.agent_type = agent_type self.supported_vnic_types = supported_vnic_types def initialize(self): pass def create_port_precommit(self, context): self._insert_provisioning_block(context) def update_port_precommit(self, context): if context.host == context.original_host: return self._insert_provisioning_block(context) def _insert_provisioning_block(self, context): # we insert a status barrier to prevent the port from transitioning # to active until the agent reports back that the wiring is done port = context.current if not context.host or port['status'] == const.PORT_STATUS_ACTIVE: # no point in putting in a block if the status is already ACTIVE return vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: # we check the VNIC type because there could be multiple agents # on a single host with different VNIC types return if context.host_agents(self.agent_type): provisioning_blocks.add_provisioning_component( context._plugin_context, port['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) def bind_port(self, context): LOG.debug("Attempting to bind port %(port)s on " "network %(network)s", {'port': context.current['id'], 'network': context.network.current['id']}) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug("Refusing to bind due to unsupported vnic_type: %s", vnic_type) return agents = context.host_agents(self.agent_type) if not agents: LOG.debug("Port %(pid)s on network %(network)s not bound, " "no agent of type %(at)s registered on host %(host)s", {'pid': context.current['id'], 'at': self.agent_type, 'network': context.network.current['id'], 'host': context.host}) for agent in agents: LOG.debug("Checking agent: %s", agent) if agent['alive']: if (vnic_type == portbindings.VNIC_SMARTNIC and not agent['configurations'].get('baremetal_smartnic')): LOG.debug('Agent on host %s can not bind SmartNIC ' 'port %s', agent['host'], context.current['id']) continue for segment in context.segments_to_bind: if self.try_to_bind_segment_for_agent(context, segment, agent): LOG.debug("Bound using segment: %s", segment) return else: LOG.warning("Refusing to bind port %(pid)s to dead agent: " "%(agent)s", {'pid': context.current['id'], 'agent': agent}) @abc.abstractmethod def try_to_bind_segment_for_agent(self, context, segment, agent): """Try to bind with segment for agent. :param context: PortContext instance describing the port :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind :returns: True iff segment has been bound for agent Called outside any transaction during bind_port() so that derived MechanismDrivers can use agent_db data along with built-in knowledge of the corresponding agent's capabilities to attempt to bind to the specified network segment for the agent. If the segment can be bound for the agent, this function must call context.set_binding() with appropriate values and then return True. Otherwise, it must return False. """ def blacklist_supported_vnic_types(self, vnic_types, blacklist): """Validate the blacklist and blacklist the supported_vnic_types :param vnic_types: The supported_vnic_types list :param blacklist: The blacklist as in vnic_type_blacklist :return The blacklisted vnic_types """ if not blacklist: return vnic_types # Not valid values in the blacklist: if not all(bl in vnic_types for bl in blacklist): raise ValueError(_("Not all of the items from vnic_type_blacklist " "are valid vnic_types for %(agent)s mechanism " "driver. The valid values are: " "%(valid_vnics)s.") % {'agent': self.agent_type, 'valid_vnics': vnic_types}) supported_vnic_types = [vnic_t for vnic_t in vnic_types if vnic_t not in blacklist] # Nothing left in the supported vnict types list: if len(supported_vnic_types) < 1: raise ValueError(_("All possible vnic_types were blacklisted for " "%s mechanism driver!") % self.agent_type) return supported_vnic_types def _possible_agents_for_port(self, context): agent_filters = { 'host': [context.current['binding:host_id']], 'agent_type': [self.agent_type], 'admin_state_up': [True], # By not filtering for 'alive' we may report being responsible # and still not being able to handle the binding. But that case # will be properly logged and handled very soon. That is when # trying to bind with a dead agent. } return context._plugin.get_agents( context._plugin_context, filters=agent_filters, ) def responsible_for_ports_allocation(self, context): """Report if an agent is responsible for a resource provider. :param context: PortContext instance describing the port :returns: True for responsible, False for not responsible An agent based mechanism driver is reponsible for a resource provider if an agent of it is responsible for that resource provider. An agent reports responsibility by including the resource provider in the configurations field of the agent heartbeat. """ uuid_ns = self.resource_provider_uuid5_namespace if uuid_ns is None: return False if 'allocation' not in context.current['binding:profile']: return False rp = uuid.UUID(context.current['binding:profile']['allocation']) host_agents = self._possible_agents_for_port(context) reported = {} for agent in host_agents: if 'resource_provider_bandwidths' in agent['configurations']: for device in agent['configurations'][ 'resource_provider_bandwidths'].keys(): device_rp_uuid = place_utils.device_resource_provider_uuid( namespace=uuid_ns, host=agent['host'], device=device) if device_rp_uuid == rp: reported[agent['id']] = agent if len(reported) == 1: agent = list(reported.values())[0] LOG.debug("Agent %(agent)s of type %(agent_type)s reports to be " "responsible for resource provider %(rsc_provider)s", {'agent': agent['id'], 'agent_type': agent['agent_type'], 'rsc_provider': rp}) return True elif len(reported) > 1: LOG.error("Agent misconfiguration, multiple agents on the same " "host %(host)s reports being responsible for resource " "provider %(rsc_provider)s: %(agents)s", {'host': context.current['binding:host_id'], 'rsc_provider': rp, 'agents': reported.keys()}) return False else: # not responsible, must be somebody else return False @six.add_metaclass(abc.ABCMeta) class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase): """Base class for simple drivers using an L2 agent. The SimpleAgentMechanismDriverBase provides common code for mechanism drivers that integrate the ml2 plugin with L2 agents, where the binding:vif_type and binding:vif_details values are the same for all bindings. Port binding with this driver requires the driver's associated agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. MechanismDrivers using this base class must pass the agent type and the values for binding:vif_type and binding:vif_details to __init__(), and must implement check_segment_for_agent(). """ def __init__(self, agent_type, vif_type, vif_details, supported_vnic_types=[portbindings.VNIC_NORMAL]): """Initialize base class for specific L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param vif_type: Value for binding:vif_type when bound :param vif_details: Dictionary with details for VIF driver when bound :param supported_vnic_types: The binding:vnic_type values we can bind """ super(SimpleAgentMechanismDriverBase, self).__init__( agent_type, supported_vnic_types) self.vif_type = vif_type self.vif_details = {portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_LEGACY} self.vif_details.update(vif_details) def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): context.set_binding(segment[api.ID], self.get_vif_type(context, agent, segment), self.get_vif_details(context, agent, segment)) return True else: return False def get_vif_details(self, context, agent, segment): return self.vif_details def get_supported_vif_type(self, agent): """Return supported vif type appropriate for the agent.""" return self.vif_type def get_vif_type(self, context, agent, segment): """Return the vif type appropriate for the agent and segment.""" return self.vif_type @abc.abstractmethod def get_allowed_network_types(self, agent=None): """Return the agent's or driver's allowed network types. For example: return ('flat', ...). You can also refer to the configuration the given agent exposes. """ pass @abc.abstractmethod def get_mappings(self, agent): """Return the agent's bridge or interface mappings. For example: agent['configurations'].get('bridge_mappings', {}). """ pass def physnet_in_mappings(self, physnet, mappings): """Is the physical network part of the given mappings?""" return physnet in mappings def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): hosts = set() filters = {'host': candidate_hosts, 'agent_type': [self.agent_type]} for agent in agent_getter(context, filters=filters): if any(self.check_segment_for_agent(s, agent) for s in segments): hosts.add(agent['host']) return hosts def check_segment_for_agent(self, segment, agent): """Check if segment can be bound for agent. :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind :returns: True iff segment can be bound for agent Called outside any transaction during bind_port so that derived MechanismDrivers can use agent_db data along with built-in knowledge of the corresponding agent's capabilities to determine whether or not the specified network segment can be bound for the agent. """ mappings = self.get_mappings(agent) allowed_network_types = self.get_allowed_network_types(agent) LOG.debug("Checking segment: %(segment)s " "for mappings: %(mappings)s " "with network types: %(network_types)s", {'segment': segment, 'mappings': mappings, 'network_types': allowed_network_types}) network_type = segment[api.NETWORK_TYPE] if network_type not in allowed_network_types: LOG.debug( 'Network %(network_id)s with segment %(id)s is type ' 'of %(network_type)s but agent %(agent)s or mechanism driver ' 'only support %(allowed_network_types)s.', {'network_id': segment['network_id'], 'id': segment['id'], 'network_type': network_type, 'agent': agent['host'], 'allowed_network_types': allowed_network_types}) return False if network_type in [const.TYPE_FLAT, const.TYPE_VLAN]: physnet = segment[api.PHYSICAL_NETWORK] if not self.physnet_in_mappings(physnet, mappings): LOG.debug( 'Network %(network_id)s with segment %(id)s is connected ' 'to physical network %(physnet)s, but agent %(agent)s ' 'reported physical networks %(mappings)s. ' 'The physical network must be configured on the ' 'agent if binding is to succeed.', {'network_id': segment['network_id'], 'id': segment['id'], 'physnet': physnet, 'agent': agent['host'], 'mappings': mappings}) return False return True ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/0000755000175000017500000000000000000000000024624 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/__init__.py0000644000175000017500000000000000000000000026723 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/0000755000175000017500000000000000000000000025722 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py0000644000175000017500000000000000000000000030021 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/common/0000755000175000017500000000000000000000000027212 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py0000644000175000017500000000000000000000000031311 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py0000644000175000017500000000441300000000000031033 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from neutron._i18n import _ from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import agent from neutron.conf.plugins.ml2.drivers.mech_sriov import agent_common as \ agent_common_config def parse_exclude_devices(exclude_list): """Parse Exclude devices list parses excluded device list in the form: dev_name:pci_dev_1;pci_dev_2 @param exclude list: list of string pairs in "key:value" format the key part represents the network device name the value part is a list of PCI slots separated by ";" """ exclude_mapping = {} for dev_mapping in exclude_list: try: dev_name, exclude_devices = dev_mapping.split(":", 1) except ValueError: raise ValueError(_("Invalid mapping: '%s'") % dev_mapping) dev_name = dev_name.strip() if not dev_name: raise ValueError(_("Missing key in mapping: '%s'") % dev_mapping) if dev_name in exclude_mapping: raise ValueError(_("Device %(dev_name)s in mapping: %(mapping)s " "not unique") % {'dev_name': dev_name, 'mapping': dev_mapping}) exclude_devices_list = exclude_devices.split(";") exclude_devices_set = set() for dev in exclude_devices_list: dev = dev.strip() if dev: exclude_devices_set.add(dev) exclude_mapping[dev_name] = exclude_devices_set return exclude_mapping agent.register_agent_opts() agent_common_config.register_agent_sriov_nic_opts() config.register_agent_state_opts_helper(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py0000644000175000017500000000217500000000000031752 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import exceptions from neutron._i18n import _ class SriovNicError(exceptions.NeutronException): pass class InvalidDeviceError(SriovNicError): message = _("Invalid Device %(dev_name)s: %(reason)s") class IpCommandError(SriovNicError): message = _("ip command failed on device %(dev_name)s: %(reason)s") class IpCommandOperationNotSupportedError(SriovNicError): message = _("Operation not supported on device %(dev_name)s") class InvalidPciSlotError(SriovNicError): message = _("Invalid pci slot %(pci_slot)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py0000644000175000017500000005247300000000000031447 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import re from neutron_lib.utils import helpers from oslo_log import log as logging from neutron._i18n import _ from neutron.agent.linux import ip_link_support from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib LOG = logging.getLogger(__name__) class PciOsWrapper(object): """OS wrapper for checking virtual functions""" DEVICE_PATH = "/sys/class/net/%s/device" PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net" NUMVFS_PATH = "/sys/class/net/%s/device/sriov_numvfs" VIRTFN_FORMAT = r"^virtfn(?P\d+)" VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT) MAC_VTAP_PREFIX = "upper_macvtap*" @classmethod def scan_vf_devices(cls, dev_name): """Scan os directories to get VF devices @param dev_name: pf network device name @return: list of virtual functions """ vf_list = [] dev_path = cls.DEVICE_PATH % dev_name if not os.path.isdir(dev_path): LOG.error("Failed to get devices for %s", dev_name) raise exc.InvalidDeviceError(dev_name=dev_name, reason=_("Device not found")) file_list = os.listdir(dev_path) for file_name in file_list: pattern_match = cls.VIRTFN_REG_EX.match(file_name) if pattern_match: vf_index = int(pattern_match.group("vf_index")) file_path = os.path.join(dev_path, file_name) if os.path.islink(file_path): file_link = os.readlink(file_path) pci_slot = os.path.basename(file_link) vf_list.append((pci_slot, vf_index)) return vf_list @classmethod def pf_device_exists(cls, dev_name): return os.path.isdir(cls.DEVICE_PATH % dev_name) @classmethod def is_assigned_vf_direct(cls, dev_name, vf_index): """Check if VF is assigned. Checks if a given vf index of a given device name is assigned as PCI passthrough by checking the relevant path in the system: VF is assigned if: Direct VF: PCI_PATH does not exist. @param dev_name: pf network device name @param vf_index: vf index @return: True if VF is assigned, False otherwise """ path = cls.PCI_PATH % (dev_name, vf_index) return not os.path.isdir(path) @classmethod def get_vf_macvtap_upper_devs(cls, dev_name, vf_index): """Retrieve VF netdev upper (macvtap) devices. @param dev_name: pf network device name @param vf_index: vf index @return: list of upper net devices associated with the VF """ path = cls.PCI_PATH % (dev_name, vf_index) upper_macvtap_path = os.path.join(path, "*", cls.MAC_VTAP_PREFIX) devs = [os.path.basename(dev) for dev in glob.glob(upper_macvtap_path)] # file name is in the format of upper_ extract netdev name return [dev.split('_')[1] for dev in devs] @classmethod def is_assigned_vf_macvtap(cls, dev_name, vf_index): """Check if VF is assigned. Checks if a given vf index of a given device name is assigned as macvtap by checking the relevant path in the system: Macvtap VF: upper_macvtap path exists. @param dev_name: pf network device name @param vf_index: vf index @return: True if VF is assigned, False otherwise """ return bool(cls.get_vf_macvtap_upper_devs(dev_name, vf_index)) @classmethod def get_numvfs(cls, dev_name): """Get configured number of VFs on device @param dev_name: pf network device name @return: integer number of VFs or -1 if sriov_numvfs file not found (device doesn't support this config) """ try: with open(cls.NUMVFS_PATH % dev_name) as f: numvfs = int(f.read()) LOG.debug("Number of VFs configured on device %s: %s", dev_name, numvfs) return numvfs except IOError: LOG.warning("Error reading sriov_numvfs file for device %s, " "probably not supported by this device", dev_name) return -1 class EmbSwitch(object): """Class to manage logical embedded switch entity. Embedded Switch object is logical entity representing all VFs connected to same physical network Each physical network is mapped to PF network device interface, meaning all its VF, excluding the devices in exclude_device list. @ivar pci_slot_map: dictionary for mapping each pci slot to vf index @ivar pci_dev_wrapper: pci device wrapper """ def __init__(self, dev_name, exclude_devices): """Constructor @param dev_name: network device name @param exclude_devices: list of pci slots to exclude """ self.dev_name = dev_name self.pci_slot_map = {} self.scanned_pci_list = [] self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name) self._load_devices(exclude_devices) def _load_devices(self, exclude_devices): """Load devices from driver and filter if needed. @param exclude_devices: excluded devices mapping device_name: pci slots """ self.scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name) for pci_slot, vf_index in self.scanned_pci_list: if pci_slot not in exclude_devices: self.pci_slot_map[pci_slot] = vf_index def get_pci_slot_list(self): """Get list of VF addresses.""" return self.pci_slot_map.keys() def get_assigned_devices_info(self): """Get assigned Virtual Functions mac and pci slot information and populates vf_to_pci_slot mappings @return: list of VF pair (mac address, pci slot) """ assigned_devices_info = [] for pci_slot, vf_index in self.pci_slot_map.items(): mac = self.get_pci_device(pci_slot) if mac: assigned_devices_info.append((mac, pci_slot)) return assigned_devices_info def get_device_state(self, pci_slot): """Get device state. @param pci_slot: Virtual Function address """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.get_vf_state(vf_index) def set_device_state(self, pci_slot, state, propagate_uplink_state): """Set device state. @param pci_slot: Virtual Function address @param state: link state """ vf_index = self._get_vf_index(pci_slot) return self.pci_dev_wrapper.set_vf_state(vf_index, state, auto=propagate_uplink_state) def set_device_rate(self, pci_slot, rate_type, rate_kbps): """Set device rate: rate (max_tx_rate), min_tx_rate @param pci_slot: Virtual Function address @param rate_type: device rate name type. Could be 'rate' and 'min_tx_rate'. @param rate_kbps: device rate in kbps """ vf_index = self._get_vf_index(pci_slot) # NOTE(ralonsoh): ip link sets rate in Mbps therefore we need to # convert the rate_kbps value from kbps to Mbps. # Zero means to disable the rate so the lowest rate available is 1Mbps. # Floating numbers are not allowed if 0 < rate_kbps < 1000: rate_mbps = 1 else: rate_mbps = helpers.round_val(rate_kbps / 1000.0) log_dict = { 'rate_mbps': rate_mbps, 'rate_kbps': rate_kbps, 'vf_index': vf_index, 'rate_type': rate_type } if rate_kbps % 1000 != 0: LOG.debug("'%(rate_type)s' for SR-IOV ports is counted in Mbps; " "setting %(rate_mbps)s Mbps limit for port %(vf_index)s " "instead of %(rate_kbps)s kbps", log_dict) else: LOG.debug("Setting %(rate_mbps)s Mbps limit for port %(vf_index)s", log_dict) return self.pci_dev_wrapper.set_vf_rate(vf_index, rate_type, rate_mbps) def _get_vf_index(self, pci_slot): vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: LOG.warning("Cannot find vf index for pci slot %s", pci_slot) raise exc.InvalidPciSlotError(pci_slot=pci_slot) return vf_index def set_device_spoofcheck(self, pci_slot, enabled): """Set device spoofchecking @param pci_slot: Virtual Function address @param enabled: True to enable spoofcheck, False to disable """ vf_index = self.pci_slot_map.get(pci_slot) if vf_index is None: raise exc.InvalidPciSlotError(pci_slot=pci_slot) return self.pci_dev_wrapper.set_vf_spoofcheck(vf_index, enabled) def _get_macvtap_mac(self, vf_index): upperdevs = PciOsWrapper.get_vf_macvtap_upper_devs( self.dev_name, vf_index) # NOTE(adrianc) although there can be many macvtap upper # devices, we expect to have excatly one. if len(upperdevs) > 1: LOG.warning("Found more than one macvtap upper device for PF " "%(pf)s with VF index %(vf_index)s.", {"pf": self.dev_name, "vf_index": vf_index}) upperdev = upperdevs[0] return pci_lib.PciDeviceIPWrapper( upperdev).device(upperdev).link.address def get_pci_device(self, pci_slot): """Get mac address for given Virtual Function address @param pci_slot: pci slot @return: MAC address of virtual function """ if not PciOsWrapper.pf_device_exists(self.dev_name): # If the root PCI path does not exist, then the VF cannot # actually have been allocated and there is no way we can # manage it. return None vf_index = self.pci_slot_map.get(pci_slot) mac = None if vf_index is not None: # NOTE(adrianc) for VF passthrough take administrative mac from PF # netdevice, for macvtap take mac directly from macvtap interface. # This is done to avoid relying on hypervisor [lack of] logic to # keep effective and administrative mac in sync. if PciOsWrapper.is_assigned_vf_direct(self.dev_name, vf_index): macs = self.pci_dev_wrapper.get_assigned_macs([vf_index]) mac = macs.get(vf_index) elif PciOsWrapper.is_assigned_vf_macvtap( self.dev_name, vf_index): mac = self._get_macvtap_mac(vf_index) return mac class ESwitchManager(object): """Manages logical Embedded Switch entities for physical network.""" def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): cls._instance = super(ESwitchManager, cls).__new__(cls) cls.emb_switches_map = {} cls.pci_slot_map = {} cls.skipped_devices = set() return cls._instance def device_exists(self, device_mac, pci_slot): """Verify if device exists. Check if a device mac exists and matches the given VF pci slot @param device_mac: device mac @param pci_slot: VF address """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return True return False def get_assigned_devices_info(self, phys_net=None): """Get all assigned devices. Get all assigned devices belongs to given embedded switch @param phys_net: physical network, if none get all assigned devices @return: set of assigned VFs (mac address, pci slot) pair """ if phys_net: eswitch_objects = self.emb_switches_map.get(phys_net, set()) else: eswitch_objects = set() for eswitch_list in self.emb_switches_map.values(): eswitch_objects |= set(eswitch_list) assigned_devices = set() for embedded_switch in eswitch_objects: for device in embedded_switch.get_assigned_devices_info(): assigned_devices.add(device) return assigned_devices def get_device_state(self, device_mac, pci_slot): """Get device state. Get the device state (up/enable, down/disable, or auto) @param device_mac: device mac @param pci_slot: VF PCI slot @return: device state (enable/disable/auto) None if failed """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: return embedded_switch.get_device_state(pci_slot) return pci_lib.LinkState.DISABLE def set_device_max_rate(self, device_mac, pci_slot, max_kbps): """Set device max rate Sets the device max rate in kbps @param device_mac: device mac @param pci_slot: pci slot @param max_kbps: device max rate in kbps """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, max_kbps) def set_device_min_tx_rate(self, device_mac, pci_slot, min_kbps): """Set device min_tx_rate Sets the device min_tx_rate in kbps @param device_mac: device mac @param pci_slot: pci slot @param max_kbps: device min_tx_rate in kbps """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE, min_kbps) def set_device_state(self, device_mac, pci_slot, admin_state_up, propagate_uplink_state): """Set device state Sets the device state (up or down) @param device_mac: device mac @param pci_slot: pci slot @param admin_state_up: device admin state True/False @param propagate_uplink_state: follow uplink state True/False """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_state(pci_slot, admin_state_up, propagate_uplink_state) def set_device_spoofcheck(self, device_mac, pci_slot, enabled): """Set device spoofcheck Sets device spoofchecking (enabled or disabled) @param device_mac: device mac @param pci_slot: pci slot @param enabled: device spoofchecking """ embedded_switch = self._get_emb_eswitch(device_mac, pci_slot) if embedded_switch: embedded_switch.set_device_spoofcheck(pci_slot, enabled) def _process_emb_switch_map(self, phys_net, dev_name, exclude_devices): """Process emb_switch_map @param phys_net: physical network @param dev_name: device name @param exclude_devices: PCI devices to ignore. """ emb_switches = self.emb_switches_map.get(phys_net, []) for switch in emb_switches: if switch.dev_name == dev_name: if not PciOsWrapper.pf_device_exists(dev_name): # If the device is given to the VM as PCI-PT # then delete the respective emb_switch from map self.emb_switches_map.get(phys_net).remove(switch) return # We don't know about this device at the moment, so add to the map. if PciOsWrapper.pf_device_exists(dev_name): self._create_emb_switch( phys_net, dev_name, exclude_devices.get(dev_name, set())) def discover_devices(self, device_mappings, exclude_devices): """Discover which Virtual functions to manage. Discover devices, and create embedded switch object for network device @param device_mappings: device mapping physical_network:device_name @param exclude_devices: excluded devices mapping device_name: pci slots """ if exclude_devices is None: exclude_devices = {} for phys_net, dev_names in device_mappings.items(): for dev_name in dev_names: self._process_emb_switch_map(phys_net, dev_name, exclude_devices) def _create_emb_switch(self, phys_net, dev_name, exclude_devices): embedded_switch = EmbSwitch(dev_name, exclude_devices) numvfs = PciOsWrapper.get_numvfs(dev_name) if numvfs == 0: # numvfs might be 0 on pre-up state of a device # giving such devices one more chance to initialize if dev_name not in self.skipped_devices: self.skipped_devices.add(dev_name) LOG.info("Device %s has 0 VFs configured. Skipping " "for now to let the device initialize", dev_name) return else: # looks like device indeed has 0 VFs configured # it is probably used just as direct-physical LOG.info("Device %s has 0 VFs configured", dev_name) numvfs_cur = len(embedded_switch.scanned_pci_list) if numvfs >= 0 and numvfs > numvfs_cur: LOG.info("Not all VFs were initialized on device %(device)s: " "expected - %(expected)s, actual - %(actual)s. Skipping.", {'device': dev_name, 'expected': numvfs, 'actual': numvfs_cur}) self.skipped_devices.add(dev_name) return self.emb_switches_map.setdefault(phys_net, []).append(embedded_switch) for pci_slot in embedded_switch.get_pci_slot_list(): self.pci_slot_map[pci_slot] = embedded_switch self.skipped_devices.discard(dev_name) def _get_emb_eswitch(self, device_mac, pci_slot): """Get embedded switch. Get embedded switch by pci slot and validate pci has device mac @param device_mac: device mac @param pci_slot: pci slot """ embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: used_device_mac = embedded_switch.get_pci_device(pci_slot) if used_device_mac != device_mac: LOG.warning("device pci mismatch: %(device_mac)s " "- %(pci_slot)s", {"device_mac": device_mac, "pci_slot": pci_slot}) embedded_switch = None return embedded_switch def clear_max_rate(self, pci_slot): """Clear the VF "rate" parameter Clear the "rate" configuration from VF by setting it to 0. @param pci_slot: VF PCI slot """ self._clear_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) def clear_min_tx_rate(self, pci_slot): """Clear the VF "min_tx_rate" parameter Clear the "min_tx_rate" configuration from VF by setting it to 0. @param pci_slot: VF PCI slot """ self._clear_rate( pci_slot, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE) def _clear_rate(self, pci_slot, rate_type): """Clear the VF rate parameter specified in rate_type Clear the rate configuration from VF by setting it to 0. @param pci_slot: VF PCI slot @param rate_type: rate to clear ('rate', 'min_tx_rate') """ # NOTE(Moshe Levi): we don't use the self._get_emb_eswitch here, # because when clearing the VF it may be not assigned. This happens # when libvirt releases the VF back to the hypervisor on delete VM. # Therefore we should just clear the VF rate according to pci_slot no # matter if VF is assigned or not. embedded_switch = self.pci_slot_map.get(pci_slot) if embedded_switch: # NOTE(Moshe Levi): check the pci_slot is not assigned to some # other port before resetting the rate. if embedded_switch.get_pci_device(pci_slot) is None: embedded_switch.set_device_rate(pci_slot, rate_type, 0) else: LOG.warning("VF with PCI slot %(pci_slot)s is already " "assigned; skipping reset for '%(rate_type)s' " "device configuration parameter", {'pci_slot': pci_slot, 'rate_type': rate_type}) else: LOG.error("PCI slot %(pci_slot)s has no mapping to Embedded " "Switch; skipping", {'pci_slot': pci_slot}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000755000175000017500000000000000000000000031474 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py0000644000175000017500000000000000000000000033573 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.0000644000175000017500000001011100000000000033644 0ustar00coreycorey00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.l2.extensions import qos_linux as qos from neutron.plugins.ml2.drivers.mech_sriov.agent.common import ( exceptions as exc) from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm from neutron.services.qos.drivers.sriov import driver LOG = logging.getLogger(__name__) class QosSRIOVAgentDriver(qos.QosLinuxAgentDriver): SUPPORTED_RULES = driver.SUPPORTED_RULES def __init__(self): super(QosSRIOVAgentDriver, self).__init__() self.eswitch_mgr = None def initialize(self): self.eswitch_mgr = esm.ESwitchManager() def create_bandwidth_limit(self, port, rule): self.update_bandwidth_limit(port, rule) def update_bandwidth_limit(self, port, rule): pci_slot = port['profile'].get('pci_slot') device = port['device'] self._set_vf_max_rate(device, pci_slot, rule.max_kbps) def delete_bandwidth_limit(self, port): pci_slot = port['profile'].get('pci_slot') if port.get('device_owner') is None: self.eswitch_mgr.clear_max_rate(pci_slot) else: device = port['device'] self._set_vf_max_rate(device, pci_slot) def _set_vf_max_rate(self, device, pci_slot, max_kbps=0): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_max_rate( device, pci_slot, max_kbps) except exc.SriovNicError: LOG.exception( "Failed to set device %s max rate", device) else: LOG.info("No device with MAC %s defined on agent.", device) # TODO(ihrachys): those handlers are pretty similar, probably could make # use of some code deduplication def create_minimum_bandwidth(self, port, rule): self.update_minimum_bandwidth(port, rule) def update_minimum_bandwidth(self, port, rule): pci_slot = port['profile'].get('pci_slot') device = port['device'] self._set_vf_min_tx_rate(device, pci_slot, rule.min_kbps) def delete_minimum_bandwidth(self, port): pci_slot = port['profile'].get('pci_slot') if port.get('device_owner') is None: self.eswitch_mgr.clear_min_tx_rate(pci_slot) else: device = port['device'] self._set_vf_min_tx_rate(device, pci_slot) # Note(lajoskatona): As minimum bandwidth rule was allowed to be used by # OVS and SRIOV even with ingress direction for the placement based # enforcement, but the dataplane enforcement implementation is not yet # ready this method is empty. # For details see: # RFE for placement based enforcement: # https://bugs.launchpad.net/neutron/+bug/1578989 # RFE for dataplane based enforcement: # https://bugs.launchpad.net/neutron/+bug/1560963 def delete_minimum_bandwidth_ingress(self, port): LOG.debug("Minimum bandwidth rule for ingress direction was deleted " "for port %s", port['port_id']) def _set_vf_min_tx_rate(self, device, pci_slot, min_tx_kbps=0): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_min_tx_rate( device, pci_slot, min_tx_kbps) except exc.SriovNicError: LOG.exception( "Failed to set device %s min_tx_rate", device) else: LOG.info("No device with MAC %s defined on agent.", device) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py0000644000175000017500000001513300000000000027700 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc LOG = logging.getLogger(__name__) class LinkState(object): ENABLE = "enable" DISABLE = "disable" AUTO = "auto" class PciDeviceIPWrapper(ip_lib.IPWrapper): """Wrapper class for ip link commands. wrapper for getting/setting pci device details using ip link... """ VF_PATTERN = r"^vf\s+(?P\d+)\s+" MAC_PATTERN = r"MAC\s+(?P[a-fA-F0-9:]+)," STATE_PATTERN = r"\s+link-state\s+(?P\w+)" ANY_PATTERN = ".*," VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT) IP_LINK_OP_NOT_SUPPORTED = 'RTNETLINK answers: Operation not supported' def __init__(self, dev_name): super(PciDeviceIPWrapper, self).__init__() self.dev_name = dev_name def _set_feature(self, vf_index, feature, value): """Sets vf feature Checks if the feature is not supported or there's some general error during ip link invocation and raises exception accordingly. :param vf_index: vf index :param feature: name of a feature to be passed to ip link, such as 'state' or 'spoofchk' :param value: value of the feature setting """ try: self._as_root([], "link", ("set", self.dev_name, "vf", str(vf_index), feature, value)) except Exception as e: if self.IP_LINK_OP_NOT_SUPPORTED in str(e): raise exc.IpCommandOperationNotSupportedError( dev_name=self.dev_name) else: raise exc.IpCommandError(dev_name=self.dev_name, reason=str(e)) def get_assigned_macs(self, vf_list): """Get assigned mac addresses for vf list. @param vf_list: list of vf indexes @return: dict mapping of vf to mac """ try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: LOG.exception("Failed executing ip command") raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_to_mac_mapping = {} vf_lines = self._get_vf_link_show(vf_list, out) if vf_lines: for vf_line in vf_lines: vf_details = self._parse_vf_link_show(vf_line) if vf_details: vf_num = vf_details.get('vf') vf_mac = vf_details.get("MAC") vf_to_mac_mapping[vf_num] = vf_mac return vf_to_mac_mapping def get_vf_state(self, vf_index): """Get vf state {enable/disable/auto} @param vf_index: vf index """ try: out = self._as_root([], "link", ("show", self.dev_name)) except Exception as e: LOG.exception("Failed executing ip command") raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show([vf_index], out) if vf_lines: vf_details = self._parse_vf_link_show(vf_lines[0]) if vf_details: state = vf_details.get("link-state", LinkState.DISABLE) if state in (LinkState.AUTO, LinkState.ENABLE): return state return LinkState.DISABLE def set_vf_state(self, vf_index, state, auto=False): """sets vf state. @param vf_index: vf index @param state: required state {True/False} """ if auto: status_str = LinkState.AUTO else: status_str = LinkState.ENABLE if state else \ LinkState.DISABLE self._set_feature(vf_index, "state", status_str) def set_vf_spoofcheck(self, vf_index, enabled): """sets vf spoofcheck @param vf_index: vf index @param enabled: True to enable spoof checking, False to disable """ setting = "on" if enabled else "off" self._set_feature(vf_index, "spoofchk", setting) def set_vf_rate(self, vf_index, rate_type, rate_value): """sets vf rate. @param vf_index: vf index @param rate_type: vf rate type ('rate', 'min_tx_rate') @param rate_value: vf rate in Mbps """ self._set_feature(vf_index, rate_type, str(rate_value)) def _get_vf_link_show(self, vf_list, link_show_out): """Get link show output for VFs get vf link show command output filtered by given vf list @param vf_list: list of vf indexes @param link_show_out: link show command output @return: list of output rows regarding given vf_list """ vf_lines = [] for line in link_show_out.split("\n"): line = line.strip() if line.startswith("vf"): details = line.split() index = int(details[1]) if index in vf_list: vf_lines.append(line) if not vf_lines: LOG.warning("Cannot find vfs %(vfs)s in device %(dev_name)s", {'vfs': vf_list, 'dev_name': self.dev_name}) return vf_lines def _parse_vf_link_show(self, vf_line): """Parses vf link show command output line. @param vf_line: link show vf line """ vf_details = {} pattern_match = self.VF_DETAILS_REG_EX.match(vf_line) if pattern_match: vf_details["vf"] = int(pattern_match.group("vf_index")) vf_details["MAC"] = pattern_match.group("mac") vf_details["link-state"] = pattern_match.group("state") else: LOG.warning("failed to parse vf link show line %(line)s: " "for %(device)s", {'line': vf_line, 'device': self.dev_name}) return vf_details ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py0000644000175000017500000006256500000000000031463 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import itertools import socket import sys import time from neutron_lib.agent import topics from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_constants from neutron_lib import context from neutron_lib.placement import utils as place_utils from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from osprofiler import profiler import six from neutron._i18n import _ from neutron.agent.common import utils from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.common import profiler as setup_profiler from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm LOG = logging.getLogger(__name__) class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC (works with NoopFirewallDriver) # 1.2 Support DVR (Distributed Virtual Router) RPC (not supported) # 1.3 Added param devices_to_update to security_groups_provider_updated # (works with NoopFirewallDriver) # 1.4 Added support for network_update # 1.5 Added support for binding_activate and binding_deactivate target = oslo_messaging.Target(version='1.5') def __init__(self, context, agent, sg_agent): super(SriovNicSwitchRpcCallbacks, self).__init__() self.context = context self.agent = agent self.sg_agent = sg_agent def port_update(self, context, **kwargs): LOG.debug("port_update received") port = kwargs.get('port') vnic_type = port.get(portbindings.VNIC_TYPE) if vnic_type and vnic_type == portbindings.VNIC_DIRECT_PHYSICAL: LOG.debug("The SR-IOV agent doesn't handle %s ports.", portbindings.VNIC_DIRECT_PHYSICAL) return # Put the port mac address in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. mac = port['mac_address'] pci_slot = None if port.get(portbindings.PROFILE): pci_slot = port[portbindings.PROFILE].get('pci_slot') if pci_slot: self.agent.updated_devices.add((mac, pci_slot)) LOG.debug("port_update RPC received for port: %(id)s with MAC " "%(mac)s and PCI slot %(pci_slot)s slot", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; " "skipping", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] LOG.debug("network_update message received for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.agent.network_ports[network_id]}) for port_data in self.agent.network_ports[network_id]: self.agent.updated_devices.add(port_data['device']) def binding_activate(self, context, **kwargs): if kwargs.get('host') != self.agent.conf.host: return LOG.debug("binding activate for port %s", kwargs.get('port_id')) device_details = self.agent.get_device_details_from_port_id( kwargs.get('port_id')) mac = device_details.get('mac_address') binding_profile = device_details.get('profile') if binding_profile: pci_slot = binding_profile.get('pci_slot') self.agent.activated_bindings.add((mac, pci_slot)) else: LOG.warning("binding_profile not found for port %s.", kwargs.get('port_id')) def binding_deactivate(self, context, **kwargs): if kwargs.get('host') != self.agent.conf.host: return LOG.debug("binding deactivate for port %s. NOOP.", kwargs.get('port_id')) @profiler.trace_cls("rpc") class SriovNicSwitchAgent(object): def __init__(self, physical_devices_mappings, exclude_devices, polling_interval, rp_bandwidths, rp_inventory_defaults, rp_hypervisors): self.polling_interval = polling_interval self.network_ports = collections.defaultdict(list) self.conf = cfg.CONF self.device_mappings = physical_devices_mappings self.exclude_devices = exclude_devices self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) # Stores port update notifications for processing in the main loop self.updated_devices = set() # Stores pairs for ports whose binding has been # activated. self.activated_bindings = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) configurations = {'device_mappings': physical_devices_mappings, n_constants.RP_BANDWIDTHS: rp_bandwidths, n_constants.RP_INVENTORY_DEFAULTS: rp_inventory_defaults, 'resource_provider_hypervisors': rp_hypervisors, 'extensions': self.ext_manager.names()} # TODO(mangelajo): optimize resource_versions (see ovs agent) self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'start_flag': True} # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0 def _setup_rpc(self): self.agent_id = 'nic-switch-agent.%s' % socket.gethostname() LOG.info("RPC agent_id: %s", self.agent_id) self.topic = topics.AGENT self.failed_report_state = False self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init # Handle updates from service self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self, self.sg_agent)] # Define the listening consumers for the agent consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.UPDATE], [topics.SECURITY_GROUP, topics.UPDATE], [topics.PORT_BINDING, topics.DEACTIVATE], [topics.PORT_BINDING, topics.ACTIVATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=False) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state(self.context, self.agent_state) # we only want to update resource versions on startup self.agent_state.pop('resource_versions', None) self.agent_state.pop('start_flag', None) except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") def _create_agent_extension_manager(self, connection): ext_manager.register_opts(self.conf) mgr = ext_manager.L2AgentExtensionsManager(self.conf) mgr.initialize(connection, 'sriov') return mgr def setup_eswitch_mgr(self, device_mappings, exclude_devices=None): exclude_devices = exclude_devices or {} self.eswitch_mgr = esm.ESwitchManager() self.eswitch_mgr.discover_devices(device_mappings, exclude_devices) def scan_devices(self, registered_devices, updated_devices): curr_devices = self.eswitch_mgr.get_assigned_devices_info() self.agent_state.get('configurations')['devices'] = len(curr_devices) device_info = {} device_info['current'] = curr_devices device_info['added'] = curr_devices - registered_devices # we need to clean up after devices are removed device_info['removed'] = registered_devices - curr_devices # we don't want to process updates for devices that don't exist device_info['updated'] = (updated_devices & curr_devices - device_info['removed']) return device_info def _device_info_has_changes(self, device_info): return (device_info.get('added') or device_info.get('updated') or device_info.get('removed')) def process_network_devices(self, device_info): resync_a = False resync_b = False self.sg_agent.prepare_devices_filter(device_info.get('added')) if device_info.get('updated'): self.sg_agent.refresh_firewall() # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. devices_added_updated = (set(device_info.get('added')) | set(device_info.get('updated'))) if devices_added_updated: resync_a = self.treat_devices_added_updated(devices_added_updated) if device_info.get('removed'): resync_b = self.treat_devices_removed(device_info['removed']) # If one of the above operations fails => resync with plugin return (resync_a | resync_b) def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True, propagate_uplink_state=False): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_spoofcheck(device, pci_slot, spoofcheck) except Exception: LOG.warning("Failed to set spoofcheck for device %s", device) LOG.info("Device %(device)s spoofcheck %(spoofcheck)s", {"device": device, "spoofcheck": spoofcheck}) try: self.eswitch_mgr.set_device_state(device, pci_slot, admin_state_up, propagate_uplink_state) except exc.IpCommandOperationNotSupportedError: LOG.warning("Device %s does not support state change", device) except exc.SriovNicError: LOG.warning("Failed to set device %s state", device) return False else: LOG.info("No device with MAC %s defined on agent.", device) return False return True def _update_network_ports(self, network_id, port_id, mac_pci_slot): self._clean_network_ports(mac_pci_slot) self.network_ports[network_id].append({ "port_id": port_id, "device": mac_pci_slot}) def _clean_network_ports(self, mac_pci_slot): for netid, ports_list in self.network_ports.items(): for port_data in ports_list: if mac_pci_slot == port_data['device']: ports_list.remove(port_data) if ports_list == []: self.network_ports.pop(netid) return port_data['port_id'] def treat_devices_added_updated(self, devices_info): try: macs_list = set([device_info[0] for device_info in devices_info]) devices_details_list = self.plugin_rpc.get_devices_details_list( self.context, macs_list, self.agent_id, self.conf.host) except Exception as e: LOG.debug("Unable to get port details for devices " "with MAC addresses %(devices)s: %(e)s", {'devices': macs_list, 'e': e}) # resync is needed return True devices_up = set() devices_down = set() resync = False for device_details in devices_details_list: device = device_details['device'] LOG.debug("Port with MAC address %s is added", device) if 'port_id' in device_details: LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': device_details}) port_id = device_details['port_id'] profile = device_details['profile'] spoofcheck = device_details.get('port_security_enabled', True) if self.treat_device( device, profile.get('pci_slot'), device_details['admin_state_up'], spoofcheck, device_details['propagate_uplink_status']): if device_details['admin_state_up']: devices_up.add(device) else: devices_down.add(device) else: resync = True self._update_network_ports(device_details['network_id'], port_id, (device, profile.get('pci_slot'))) self.ext_manager.handle_port(self.context, device_details) elif n_constants.NO_ACTIVE_BINDING in device_details: # Port was added but its binding in this agent # hasn't been activated yet. It will be treated as # added when binding is activated LOG.info("Device with MAC %s has no active binding in host", device) else: LOG.info("Device with MAC %s not defined on plugin", device) self.plugin_rpc.update_device_list(self.context, devices_up, devices_down, self.agent_id, self.conf.host) return resync def treat_devices_removed(self, devices): resync = False for device in devices: mac, pci_slot = device LOG.info("Removing device with MAC address %(mac)s and " "PCI slot %(pci_slot)s", {'mac': mac, 'pci_slot': pci_slot}) try: port_id = self._clean_network_ports(device) if port_id: port = {'port_id': port_id, 'device': mac, 'profile': {'pci_slot': pci_slot}} self.ext_manager.delete_port(self.context, port) else: LOG.warning("port_id to device with MAC " "%s not found", mac) dev_details = self.plugin_rpc.update_device_down(self.context, mac, self.agent_id, cfg.CONF.host) except Exception as e: LOG.debug("Removing port failed for device with MAC address " "%(mac)s and PCI slot %(pci_slot)s due to %(exc)s", {'mac': mac, 'pci_slot': pci_slot, 'exc': e}) resync = True continue if dev_details['exists']: LOG.info("Port with MAC %(mac)s and PCI slot " "%(pci_slot)s updated.", {'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("Device with MAC %(mac)s and PCI slot " "%(pci_slot)s not defined on plugin", {'mac': mac, 'pci_slot': pci_slot}) return resync def process_activated_bindings(self, device_info, activated_bindings_copy): """Process activated bindings. Add activated bindings to the 'added' set in device info. :param device_info: A dict that contains the set of 'current', 'added', 'removed' and 'updated' ports. :param activated_bindings_copy: A set of activated port bindings. :return: None """ LOG.debug("Processing activated bindings: %s", activated_bindings_copy) # Compute which ports for activated bindings are already present activated_bindings_copy &= device_info['current'] # Treat them as just added device_info['added'] |= activated_bindings_copy def get_device_details_from_port_id(self, port_id): """Get device details from server :param port_id: Port identifier (UUID). :return: A dict containing various port attributes if the port is bound to the host. In case the port is not bound to the host then the method will return A dict with a minimal set of attributes e.g {'device': port_id}. """ return self.plugin_rpc.get_device_details(self.context, port_id, self.agent_id, host=cfg.CONF.host) def daemon_loop(self): sync = True devices = set() LOG.info("SRIOV NIC Agent RPC Daemon Started!") while True: start = time.time() LOG.debug("Agent rpc_loop - iteration:%d started", self.iter_num) if sync: LOG.info("Agent out of sync with plugin!") devices.clear() sync = False device_info = {} # Save updated devices dict to perform rollback in case # resync would be needed, and then clear self.updated_devices. # As the greenthread should not yield between these # two statements, this will should be thread-safe. updated_devices_copy = self.updated_devices self.updated_devices = set() activated_bindings_copy = self.activated_bindings self.activated_bindings = set() try: self.eswitch_mgr.discover_devices(self.device_mappings, self.exclude_devices) device_info = self.scan_devices(devices, updated_devices_copy) if activated_bindings_copy: self.process_activated_bindings(device_info, activated_bindings_copy) if self._device_info_has_changes(device_info): LOG.debug("Agent loop found changes! %s", device_info) # If treat devices fails - indicates must resync with # plugin sync = self.process_network_devices(device_info) devices = device_info['current'] except Exception: LOG.exception("Error in agent loop. Devices info: %s", device_info) sync = True # Restore devices that were removed from this set earlier # without overwriting ones that may have arrived since. self.updated_devices |= updated_devices_copy self.activated_bindings |= activated_bindings_copy # sleep till end of polling interval elapsed = (time.time() - start) if (elapsed < self.polling_interval): time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) self.iter_num = self.iter_num + 1 class SriovNicAgentConfigParser(object): def __init__(self): self.device_mappings = {} self.exclude_devices = {} def parse(self): """Parses device_mappings and exclude_devices. Parse and validate the consistency in both mappings """ self.device_mappings = helpers.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self.rp_bandwidths = place_utils.parse_rp_bandwidths( cfg.CONF.SRIOV_NIC.resource_provider_bandwidths) self.rp_inventory_defaults = place_utils.parse_rp_inventory_defaults( cfg.CONF.SRIOV_NIC.resource_provider_inventory_defaults) self.rp_hypervisors = utils.default_rp_hypervisors( cfg.CONF.SRIOV_NIC.resource_provider_hypervisors, self.device_mappings ) self._validate() def _validate(self): """Validate configuration. Validate that network_device in excluded_device exists in device mappings. Validate that network_device in resource_provider_bandwidths exists in device mappings. """ dev_net_set = set(itertools.chain.from_iterable( six.itervalues(self.device_mappings))) for dev_name in self.exclude_devices.keys(): if dev_name not in dev_net_set: raise ValueError(_( "Invalid exclude_devices: " "Device name %(dev_name)s is missing from " "physical_device_mappings") % {'dev_name': dev_name}) n_utils.validate_rp_bandwidth(self.rp_bandwidths, dev_net_set) def main(): common_config.init(sys.argv[1:]) common_config.setup_logging() try: config_parser = SriovNicAgentConfigParser() config_parser.parse() device_mappings = config_parser.device_mappings exclude_devices = config_parser.exclude_devices rp_bandwidths = config_parser.rp_bandwidths rp_inventory_defaults = config_parser.rp_inventory_defaults rp_hypervisors = config_parser.rp_hypervisors except ValueError: LOG.exception("Failed on Agent configuration parse. " "Agent terminated!") raise SystemExit(1) LOG.info("Physical Devices mappings: %s", device_mappings) LOG.info("Exclude Devices: %s", exclude_devices) LOG.info("Resource provider bandwidths: %s", rp_bandwidths) LOG.info("Resource provider inventory defaults: %s", rp_inventory_defaults) LOG.info("Resource provider hypervisors: %s", rp_hypervisors) polling_interval = cfg.CONF.AGENT.polling_interval try: agent = SriovNicSwitchAgent(device_mappings, exclude_devices, polling_interval, rp_bandwidths, rp_inventory_defaults, rp_hypervisors) except exc.SriovNicError: LOG.exception("Agent Initialization Failed") raise SystemExit(1) # Start everything. setup_profiler.setup("neutron-sriov-nic-agent", cfg.CONF.host) LOG.info("Agent initialized successfully, now running... ") agent.daemon_loop() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/0000755000175000017500000000000000000000000027113 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py0000644000175000017500000000000000000000000031212 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py0000644000175000017500000000165100000000000031651 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by SRIOV Mechanism Driver.""" from neutron_lib import exceptions from neutron._i18n import _ class SriovUnsupportedNetworkType(exceptions.NeutronException): """Method was invoked for unsupported network type.""" message = _("Unsupported network type %(net_type)s.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py0000644000175000017500000002146600000000000031765 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log from neutron._i18n import _ from neutron.conf.plugins.ml2.drivers.mech_sriov import mech_sriov_conf from neutron.plugins.ml2.drivers import mech_agent from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ import exceptions as exc from neutron.services.qos.drivers.sriov import driver as sriov_qos_driver LOG = log.getLogger(__name__) FLAT_VLAN = 0 mech_sriov_conf.register_sriov_mech_driver_opts() class SriovNicSwitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Mechanism Driver for SR-IOV capable NIC based switching. The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the sriovNicSwitch L2 agent depending on configuration option. Port binding with this driver may require the sriovNicSwitch agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. L2 agent is not essential for port binding; port binding is handled by VIF Driver via libvirt domain XML. L2 Agent presents in order to manage port update events. """ resource_provider_uuid5_namespace = uuid.UUID( '87f1895c-73bb-11e8-9008-c4d987b2a692') def __init__(self, agent_type=constants.AGENT_TYPE_NIC_SWITCH, vif_details={portbindings.CAP_PORT_FILTER: False, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2}, supported_vnic_types=[portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP, portbindings.VNIC_DIRECT_PHYSICAL]): """Initialize base class for SriovNicSwitch L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param vif_details: Dictionary with details for VIF driver when bound :param supported_vnic_types: The binding:vnic_type values we can bind """ self.agent_type = agent_type # TODO(lajoskatona): move this blacklisting to # SimpleAgentMechanismDriverBase. By that e blacklisting and validation # of the vnic_types would be available for all mechanism drivers. self.supported_vnic_types = self.blacklist_supported_vnic_types( vnic_types=supported_vnic_types, blacklist=cfg.CONF.SRIOV_DRIVER.vnic_type_blacklist ) # NOTE(ndipanov): PF passthrough requires a different vif type self.vnic_type_for_vif_type = ( {vtype: portbindings.VIF_TYPE_HOSTDEV_PHY if vtype == portbindings.VNIC_DIRECT_PHYSICAL else portbindings.VIF_TYPE_HW_VEB for vtype in self.supported_vnic_types}) self.vif_details = vif_details sriov_qos_driver.register() def get_allowed_network_types(self, agent): return (constants.TYPE_FLAT, constants.TYPE_VLAN) def get_mappings(self, agent): return agent['configurations'].get('device_mappings', {}) def get_standard_device_mappings(self, agent): """Return the agent's device mappings in a standard way. The common format for OVS and SRIOv mechanism drivers: {'physnet_name': ['device_or_bridge_1', 'device_or_bridge_2']} :param agent: The agent :returns A dict in the format: {'physnet_name': ['bridge_or_device']} :raises ValueError: if there is no device_mappings key in agent['configurations'] """ if 'device_mappings' in agent['configurations']: return agent['configurations']['device_mappings'] else: raise ValueError(_('Cannot standardize device mappings of agent ' 'type: %s'), agent['agent_type']) def bind_port(self, context): LOG.debug("Attempting to bind port %(port)s on " "network %(network)s", {'port': context.current['id'], 'network': context.network.current['id']}) profile = context.current.get(portbindings.PROFILE) vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) capabilities = [] if profile: capabilities = profile.get('capabilities', []) if (vnic_type == portbindings.VNIC_DIRECT and 'switchdev' in capabilities): LOG.debug("Refusing to bind due to unsupported vnic_type: %s " "with switchdev capability", portbindings.VNIC_DIRECT) return if vnic_type not in self.supported_vnic_types: LOG.debug("Refusing to bind due to unsupported vnic_type: %s", vnic_type) return if vnic_type == portbindings.VNIC_DIRECT_PHYSICAL: # Physical functions don't support things like QoS properties, # spoof checking, etc. so we might as well side-step the agent # for now. The agent also doesn't currently recognize non-VF # PCI devices so we won't get port status change updates # either. This should be changed in the future so physical # functions can use device mapping checks and the plugin can # get port status updates. for segment in context.segments_to_bind: if self.try_to_bind_segment_for_agent(context, segment, agent=None): break return for agent in context.host_agents(self.agent_type): LOG.debug("Checking agent: %s", agent) if agent['alive']: for segment in context.segments_to_bind: if self.try_to_bind_segment_for_agent(context, segment, agent): return else: LOG.warning("Attempting to bind with dead agent: %s", agent) def try_to_bind_segment_for_agent(self, context, segment, agent): vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_DIRECT) vif_type = self.vnic_type_for_vif_type.get( vnic_type, portbindings.VIF_TYPE_HW_VEB) if not self.check_segment_for_agent(segment, agent): return False port_status = (constants.PORT_STATUS_ACTIVE if agent is None else constants.PORT_STATUS_DOWN) context.set_binding(segment[api.ID], vif_type, self._get_vif_details(segment), port_status) LOG.debug("Bound using segment: %s", segment) return True def check_segment_for_agent(self, segment, agent=None): """Check if segment can be bound. :param segment: segment dictionary describing segment to bind :param agent: agents_db entry describing agent to bind or None :returns: True if segment can be bound for agent """ network_type = segment[api.NETWORK_TYPE] if network_type in self.get_allowed_network_types(agent): if agent: mappings = self.get_mappings(agent) LOG.debug("Checking segment: %(segment)s " "for mappings: %(mappings)s ", {'segment': segment, 'mappings': mappings}) return segment[api.PHYSICAL_NETWORK] in mappings return True return False def check_vlan_transparency(self, context): """SR-IOV driver vlan transparency support.""" return True def _get_vif_details(self, segment): network_type = segment[api.NETWORK_TYPE] if network_type == constants.TYPE_FLAT: vlan_id = FLAT_VLAN elif network_type == constants.TYPE_VLAN: vlan_id = segment[api.SEGMENTATION_ID] else: raise exc.SriovUnsupportedNetworkType(net_type=network_type) vif_details = self.vif_details.copy() vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id) return vif_details ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.343045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/0000755000175000017500000000000000000000000025037 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000027136 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.347045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/0000755000175000017500000000000000000000000026135 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py0000644000175000017500000000000000000000000030234 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.347045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/common/0000755000175000017500000000000000000000000027425 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py0000644000175000017500000000000000000000000031524 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py0000644000175000017500000000160600000000000031247 0ustar00coreycorey00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import agent from neutron.conf.plugins.ml2.drivers import ovs_conf agent.register_agent_opts() ovs_conf.register_ovs_agent_opts() config.register_agent_state_opts_helper(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py0000644000175000017500000001247200000000000032021 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants as p_const # Special vlan_id value in ovs_vlan_allocations table indicating flat network FLAT_VLAN_ID = -1 # Topic for tunnel notifications between the plugin and agent TUNNEL = 'tunnel' # Name prefixes for veth device or patch port pair linking the integration # bridge with the physical bridge for a physical network PEER_INTEGRATION_PREFIX = 'int-' PEER_PHYSICAL_PREFIX = 'phy-' # Nonexistent peer used to create patch ports without associating them, it # allows to define flows before association NONEXISTENT_PEER = 'nonexistent-peer' # The different types of tunnels TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN, p_const.TYPE_GENEVE] # --- OpenFlow table IDs # --- Integration bridge (int_br) LOCAL_SWITCHING = 0 # Various tables for DVR use of integration bridge flows DVR_TO_SRC_MAC = 1 DVR_TO_SRC_MAC_VLAN = 2 ARP_DVR_MAC_TO_DST_MAC = 3 ARP_DVR_MAC_TO_DST_MAC_VLAN = 4 CANARY_TABLE = 23 # Table for ARP poison/spoofing prevention rules ARP_SPOOF_TABLE = 24 # Table for MAC spoof filtering MAC_SPOOF_TABLE = 25 # Table to decide whether further filtering is needed TRANSIENT_TABLE = 60 TRANSIENT_EGRESS_TABLE = 61 # Tables used for ovs firewall BASE_EGRESS_TABLE = 71 RULES_EGRESS_TABLE = 72 ACCEPT_OR_INGRESS_TABLE = 73 BASE_INGRESS_TABLE = 81 RULES_INGRESS_TABLE = 82 OVS_FIREWALL_TABLES = ( BASE_EGRESS_TABLE, RULES_EGRESS_TABLE, ACCEPT_OR_INGRESS_TABLE, BASE_INGRESS_TABLE, RULES_INGRESS_TABLE, ) # Tables for parties interacting with ovs firewall ACCEPTED_EGRESS_TRAFFIC_TABLE = 91 ACCEPTED_INGRESS_TRAFFIC_TABLE = 92 DROPPED_TRAFFIC_TABLE = 93 ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE = 94 INT_BR_ALL_TABLES = ( LOCAL_SWITCHING, DVR_TO_SRC_MAC, DVR_TO_SRC_MAC_VLAN, CANARY_TABLE, ARP_SPOOF_TABLE, MAC_SPOOF_TABLE, TRANSIENT_TABLE, TRANSIENT_EGRESS_TABLE, BASE_EGRESS_TABLE, RULES_EGRESS_TABLE, ACCEPT_OR_INGRESS_TABLE, BASE_INGRESS_TABLE, RULES_INGRESS_TABLE, ACCEPTED_EGRESS_TRAFFIC_TABLE, ACCEPTED_INGRESS_TRAFFIC_TABLE, DROPPED_TRAFFIC_TABLE) # --- Tunnel bridge (tun_br) # Various tables for tunneling flows DVR_PROCESS = 1 PATCH_LV_TO_TUN = 2 GRE_TUN_TO_LV = 3 VXLAN_TUN_TO_LV = 4 GENEVE_TUN_TO_LV = 6 DVR_NOT_LEARN = 9 LEARN_FROM_TUN = 10 UCAST_TO_TUN = 20 ARP_RESPONDER = 21 FLOOD_TO_TUN = 22 TUN_BR_ALL_TABLES = ( LOCAL_SWITCHING, DVR_PROCESS, PATCH_LV_TO_TUN, GRE_TUN_TO_LV, VXLAN_TUN_TO_LV, GENEVE_TUN_TO_LV, DVR_NOT_LEARN, LEARN_FROM_TUN, UCAST_TO_TUN, ARP_RESPONDER, FLOOD_TO_TUN) # --- Physical Bridges (phys_brs) # Various tables for DVR use of physical bridge flows DVR_PROCESS_VLAN = 1 LOCAL_VLAN_TRANSLATION = 2 DVR_NOT_LEARN_VLAN = 3 PHY_BR_ALL_TABLES = ( LOCAL_SWITCHING, DVR_PROCESS_VLAN, LOCAL_VLAN_TRANSLATION, DVR_NOT_LEARN_VLAN) # --- end of OpenFlow table IDs # type for ARP reply in ARP header ARP_REPLY = '0x2' # Map tunnel types to tables number TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV, p_const.TYPE_GENEVE: GENEVE_TUN_TO_LV} # The default respawn interval for the ovsdb monitor DEFAULT_OVSDBMON_RESPAWN = 30 # Represent invalid OF Port OFPORT_INVALID = -1 ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' 'mod_dl_src:%(mac)s,' 'load:0x2->NXM_OF_ARP_OP[],' 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' 'load:%(mac)#x->NXM_NX_ARP_SHA[],' 'load:%(ip)#x->NXM_OF_ARP_SPA[],' 'in_port') # Represent ovs status OVS_RESTARTED = 0 OVS_NORMAL = 1 OVS_DEAD = 2 EXTENSION_DRIVER_TYPE = 'ovs' # ovs datapath types OVS_DATAPATH_SYSTEM = 'system' OVS_DATAPATH_NETDEV = 'netdev' OVS_DPDK_VHOST_USER = 'dpdkvhostuser' OVS_DPDK_VHOST_USER_CLIENT = 'dpdkvhostuserclient' OVS_DPDK_PORT_TYPES = [OVS_DPDK_VHOST_USER, OVS_DPDK_VHOST_USER_CLIENT] # default ovs vhost-user socket location VHOST_USER_SOCKET_DIR = '/var/run/openvswitch' MAX_DEVICE_RETRIES = 5 # OpenFlow version constants OPENFLOW10 = "OpenFlow10" OPENFLOW11 = "OpenFlow11" OPENFLOW12 = "OpenFlow12" OPENFLOW13 = "OpenFlow13" OPENFLOW14 = "OpenFlow14" OPENFLOW15 = "OpenFlow15" OPENFLOW_MAX_PRIORITY = 65535 # A placeholder for dead vlans. DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 # callback resource for setting 'bridge_name' in the 'binding:vif_details' OVS_BRIDGE_NAME = 'ovs_bridge_name' # callback resource for notifying to ovsdb handler OVSDB_RESOURCE = 'ovsdb' # Used in ovs port 'external_ids' in order mark it for no cleanup when # ovs_cleanup script is used. SKIP_CLEANUP = 'skip_cleanup' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.347045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/0000755000175000017500000000000000000000000031707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.p0000644000175000017500000000000000000000000033615 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver0000644000175000017500000002224000000000000034007 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib import constants from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging from neutron.agent.l2.extensions import qos_linux as qos from neutron.services.qos.drivers.openvswitch import driver LOG = logging.getLogger(__name__) class QosOVSAgentDriver(qos.QosLinuxAgentDriver): SUPPORTED_RULES = driver.SUPPORTED_RULES def __init__(self): super(QosOVSAgentDriver, self).__init__() self.br_int_name = cfg.CONF.OVS.integration_bridge self.br_int = None self.agent_api = None self.ports = collections.defaultdict(dict) def consume_api(self, agent_api): self.agent_api = agent_api def _minimum_bandwidth_initialize(self): """Clear QoS setting at agent restart. This is for clearing stale settings (such as ports and QoS tables deleted while the agent is down). The current implementation can not find stale settings. The solution is to clear everything and rebuild. There is no performance impact however the QoS feature will be down until the QoS rules are rebuilt. """ self.br_int.clear_minimum_bandwidth_qos() def initialize(self): self.br_int = self.agent_api.request_int_br() self.cookie = self.br_int.default_cookie self._minimum_bandwidth_initialize() def create_bandwidth_limit(self, port, rule): self.update_bandwidth_limit(port, rule) def update_bandwidth_limit(self, port, rule): vif_port = port.get('vif_port') if not vif_port: port_id = port.get('port_id') LOG.debug("update_bandwidth_limit was received for port %s but " "vif_port was not found. It seems that port is already " "deleted", port_id) return self.ports[port['port_id']][(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, rule.direction)] = port if rule.direction == constants.INGRESS_DIRECTION: self._update_ingress_bandwidth_limit(vif_port, rule) else: self._update_egress_bandwidth_limit(vif_port, rule) def delete_bandwidth_limit(self, port): port_id = port.get('port_id') vif_port = port.get('vif_port') port = self.ports[port_id].pop((qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, constants.EGRESS_DIRECTION), None) if not port and not vif_port: LOG.debug("delete_bandwidth_limit was received " "for port %s but port was not found. " "It seems that bandwidth_limit is already deleted", port_id) return vif_port = vif_port or port.get('vif_port') self.br_int.delete_egress_bw_limit_for_port(vif_port.port_name) def delete_bandwidth_limit_ingress(self, port): port_id = port.get('port_id') vif_port = port.get('vif_port') port = self.ports[port_id].pop((qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, constants.INGRESS_DIRECTION), None) if not port and not vif_port: LOG.debug("delete_bandwidth_limit_ingress was received " "for port %s but port was not found. " "It seems that bandwidth_limit is already deleted", port_id) return vif_port = vif_port or port.get('vif_port') self.br_int.delete_ingress_bw_limit_for_port(vif_port.port_name) def create_dscp_marking(self, port, rule): self.update_dscp_marking(port, rule) def update_dscp_marking(self, port, rule): self.ports[port['port_id']][qos_consts.RULE_TYPE_DSCP_MARKING] = port vif_port = port.get('vif_port') if not vif_port: port_id = port.get('port_id') LOG.debug("update_dscp_marking was received for port %s but " "vif_port was not found. It seems that port is already " "deleted", port_id) return port = self.br_int.get_port_ofport(vif_port.port_name) self.br_int.install_dscp_marking_rule(port=port, dscp_mark=rule.dscp_mark) def delete_dscp_marking(self, port): vif_port = port.get('vif_port') dscp_port = self.ports[port['port_id']].pop(qos_consts. RULE_TYPE_DSCP_MARKING, 0) if not dscp_port and not vif_port: LOG.debug("delete_dscp_marking was received for port %s but " "no port information was stored to be deleted", port['port_id']) return vif_port = vif_port or dscp_port.get('vif_port') port_num = vif_port.ofport self.br_int.uninstall_flows(in_port=port_num, table_id=0, reg2=0) def _update_egress_bandwidth_limit(self, vif_port, rule): max_kbps = rule.max_kbps # NOTE(slaweq): According to ovs docs: # http://openvswitch.org/support/dist-docs/ovs-vswitchd.conf.db.5.html # ovs accepts only integer values of burst: max_burst_kbps = int(self._get_egress_burst_value(rule)) self.br_int.create_egress_bw_limit_for_port(vif_port.port_name, max_kbps, max_burst_kbps) def _update_ingress_bandwidth_limit(self, vif_port, rule): port_name = vif_port.port_name max_kbps = rule.max_kbps or 0 max_burst_kbps = rule.max_burst_kbps or 0 self.br_int.update_ingress_bw_limit_for_port( port_name, max_kbps, max_burst_kbps ) def create_minimum_bandwidth(self, port, rule): self.update_minimum_bandwidth(port, rule) def update_minimum_bandwidth(self, port, rule): vif_port = port.get('vif_port') if not vif_port: LOG.debug('update_minimum_bandwidth was received for port %s but ' 'vif_port was not found. It seems that port is already ' 'deleted', port.get('port_id')) return self.ports[port['port_id']][(qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH, rule.direction)] = port # queue_num is used to identify the port which traffic come from, # it needs to be unique across br-int. It is convenient to use ofport # as queue_num because it is unique in br-int and start from 1. egress_port_names = [] for phy_br in self.agent_api.request_phy_brs(): ports = phy_br.get_bridge_ports('') if not ports: LOG.warning('Bridge %s does not have a physical port ' 'connected', phy_br.br_name) egress_port_names.extend(ports) qos_id = self.br_int.update_minimum_bandwidth_queue( port['port_id'], egress_port_names, vif_port.ofport, rule.min_kbps) LOG.debug('Minimum bandwidth rule was updated/created for port ' '%(port_id)s and rule %(rule_id)s. QoS ID: %(qos_id)s. ' 'Egress ports with QoS applied: %(ports)s', {'port_id': port['port_id'], 'rule_id': rule.id, 'qos_id': qos_id, 'ports': egress_port_names}) def delete_minimum_bandwidth(self, port): rule_port = self.ports[port['port_id']].pop( (qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH, constants.EGRESS_DIRECTION), None) if not rule_port: LOG.debug('delete_minimum_bandwidth was received for port %s but ' 'no port information was stored to be deleted', port['port_id']) return self.br_int.delete_minimum_bandwidth_queue(port['port_id']) LOG.debug("Minimum bandwidth rule was deleted for port: %s.", port['port_id']) def delete_minimum_bandwidth_ingress(self, port): rule_port = self.ports[port['port_id']].pop( (qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH, constants.INGRESS_DIRECTION), None) if not rule_port: LOG.debug('delete_minimum_bandwidth_ingress was received for port ' '%s but no port information was stored to be deleted', port['port_id']) return LOG.debug("Minimum bandwidth rule for ingress direction was deleted " "for port %s", port['port_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/main.py0000644000175000017500000000244600000000000027441 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from neutron.common import config as common_config from neutron.common import profiler from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import \ main as of_main cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') def main(): common_config.init(sys.argv[1:]) of_main.init_config() common_config.setup_logging() profiler.setup("neutron-ovs-agent", cfg.CONF.host) of_main.main() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.347045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/0000755000175000017500000000000000000000000027766 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py0000644000175000017500000000000000000000000032065 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py0000644000175000017500000000411000000000000032270 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron.agent.common import ovs_lib class OVSBridgeCookieMixin(object): '''Mixin to provide cookie retention functionality to the OVSAgentBridge ''' def __init__(self, *args, **kwargs): super(OVSBridgeCookieMixin, self).__init__(*args, **kwargs) self._reserved_cookies = set() @property def reserved_cookies(self): if self._default_cookie not in self._reserved_cookies: self._reserved_cookies.add(self._default_cookie) return set(self._reserved_cookies) def request_cookie(self): if self._default_cookie not in self._reserved_cookies: self._reserved_cookies.add(self._default_cookie) uuid_stamp = ovs_lib.generate_random_cookie() while uuid_stamp in self._reserved_cookies: uuid_stamp = ovs_lib.generate_random_cookie() self._reserved_cookies.add(uuid_stamp) return uuid_stamp def unset_cookie(self, cookie): self._reserved_cookies.discard(cookie) def set_agent_uuid_stamp(self, val): self._reserved_cookies.add(val) if self._default_cookie in self._reserved_cookies: self._reserved_cookies.remove(self._default_cookie) super(OVSBridgeCookieMixin, self).set_agent_uuid_stamp(val) def clone(self): '''Used by OVSCookieBridge, can be overridden by subclasses if a behavior different from copy.copy is needed. ''' return copy.copy(self) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3510451 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/0000755000175000017500000000000000000000000031254 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py0000644000175000017500000000000000000000000033353 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_proce0000644000175000017500000001220700000000000033647 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_ken.lib.packet import ether_types from os_ken.lib.packet import icmpv6 from os_ken.lib.packet import in_proto from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants class OVSDVRProcessMixin(object): """Common logic for br-tun and br-phys' DVR_PROCESS tables. Inheriters should provide self.dvr_process_table_id and self.dvr_process_next_table_id. """ @staticmethod def _dvr_process_ipv4_match(ofp, ofpp, vlan_tag, gateway_ip): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip) def install_dvr_process_ipv4(self, vlan_tag, gateway_ip): # block ARP (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) self.install_drop(table_id=constants.FLOOD_TO_TUN, priority=3, match=match) def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) self.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod def _dvr_process_ipv6_match(ofp, ofpp, vlan_tag, gateway_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_ROUTER_ADVERT, eth_src=gateway_mac) def install_dvr_process_ipv6(self, vlan_tag, gateway_mac): # block RA (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) self.install_drop(table_id=constants.FLOOD_TO_TUN, priority=3, match=match) def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) self.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod def _dvr_process_in_match(ofp, ofpp, vlan_tag, vif_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_dst=vif_mac) @staticmethod def _dvr_process_out_match(ofp, ofpp, vlan_tag, vif_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_src=vif_mac) def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_in_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) table_id = self.dvr_process_table_id self.install_drop(table_id=table_id, priority=2, match=match) match = self._dvr_process_out_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) actions = [ ofpp.OFPActionSetField(eth_src=dvr_mac_address), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable( table_id=self.dvr_process_next_table_id), ] self.install_instructions(table_id=table_id, priority=1, match=match, instructions=instructions) def delete_dvr_process(self, vlan_tag, vif_mac): (_dp, ofp, ofpp) = self._get_dp() table_id = self.dvr_process_table_id match = self._dvr_process_in_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) self.uninstall_flows(table_id=table_id, match=match) match = self._dvr_process_out_match(ofp, ofpp, vlan_tag=vlan_tag, vif_mac=vif_mac) self.uninstall_flows(table_id=table_id, match=match) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py0000644000175000017500000003653300000000000033115 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ * references ** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic """ import netaddr from neutron_lib import constants as p_const from os_ken.lib.packet import ether_types from os_ken.lib.packet import icmpv6 from os_ken.lib.packet import in_proto from oslo_log import log as logging from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge LOG = logging.getLogger(__name__) class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): """openvswitch agent br-int specific logic.""" of_tables = constants.INT_BR_ALL_TABLES def setup_default_table(self): self.setup_canary_table() self.install_goto(dest_table_id=constants.TRANSIENT_TABLE) self.install_normal(table_id=constants.TRANSIENT_TABLE, priority=3) self.install_drop(table_id=constants.ARP_SPOOF_TABLE) self.install_drop(table_id=constants.LOCAL_SWITCHING, priority=constants.OPENFLOW_MAX_PRIORITY, vlan_vid=constants.DEAD_VLAN_TAG) # When openflow firewall is not enabled, we use this table to # deal with all egress flow. self.install_normal(table_id=constants.TRANSIENT_EGRESS_TABLE, priority=3) def setup_canary_table(self): self.install_drop(constants.CANARY_TABLE) def check_canary_table(self): try: flows = self.dump_flows(constants.CANARY_TABLE) except RuntimeError: LOG.exception("Failed to communicate with the switch") return constants.OVS_DEAD return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED @staticmethod def _local_vlan_match(_ofp, ofpp, port, vlan_vid): return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid) def provision_local_vlan(self, port, lvid, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() if segmentation_id is None: vlan_vid = ofp.OFPVID_NONE actions = [ofpp.OFPActionPushVlan()] else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT actions = [] match = self._local_vlan_match(ofp, ofpp, port, vlan_vid) actions += [ ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable(table_id=constants.TRANSIENT_TABLE), ] self.install_instructions( instructions=instructions, priority=3, match=match, ) def reclaim_local_vlan(self, port, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() if segmentation_id is None: vlan_vid = ofp.OFPVID_NONE else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT match = self._local_vlan_match(ofp, ofpp, port, vlan_vid) self.uninstall_flows(match=match) @staticmethod def _arp_dvr_dst_mac_match(ofp, ofpp, vlan, dvr_mac): # If eth_dst is equal to the dvr mac of this host, then # flag it as matched. return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_dst=dvr_mac) @staticmethod def _dvr_dst_mac_table_id(network_type): if network_type == p_const.TYPE_VLAN: return constants.ARP_DVR_MAC_TO_DST_MAC_VLAN else: return constants.ARP_DVR_MAC_TO_DST_MAC def install_dvr_dst_mac_for_arp(self, network_type, vlan_tag, gateway_mac, dvr_mac, rtr_port): table_id = self._dvr_dst_mac_table_id(network_type) # Match the destination MAC with the DVR MAC (_dp, ofp, ofpp) = self._get_dp() match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac) # Incoming packet will come with destination MAC of DVR host MAC from # the ARP Responder. The Source MAC in this case will have the source # MAC of the port MAC that responded from the ARP responder. # So just remove the DVR host MAC from the 'eth_dst' and replace it # with the gateway-mac. The packet should end up in the right the table # for the packet to reach the router interface. actions = [ ofpp.OFPActionSetField(eth_dst=gateway_mac), ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(rtr_port, 0) ] self.install_apply_actions(table_id=table_id, priority=5, match=match, actions=actions) @staticmethod def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_dst=dst_mac) @staticmethod def _dvr_to_src_mac_table_id(network_type): if network_type == p_const.TYPE_VLAN: return constants.DVR_TO_SRC_MAC_VLAN else: return constants.DVR_TO_SRC_MAC def install_dvr_to_src_mac(self, network_type, vlan_tag, gateway_mac, dst_mac, dst_port): table_id = self._dvr_to_src_mac_table_id(network_type) (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_to_src_mac_match(ofp, ofpp, vlan_tag=vlan_tag, dst_mac=dst_mac) actions = [ ofpp.OFPActionSetField(eth_src=gateway_mac), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable(table_id=constants.TRANSIENT_TABLE), ] self.install_instructions(table_id=table_id, priority=20, match=match, instructions=instructions) actions = [ ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(dst_port, 0), ] self.install_apply_actions(table_id=constants.TRANSIENT_TABLE, priority=20, match=match, actions=actions) def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac): table_id = self._dvr_to_src_mac_table_id(network_type) (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_to_src_mac_match(ofp, ofpp, vlan_tag=vlan_tag, dst_mac=dst_mac) for table in (table_id, constants.TRANSIENT_TABLE): self.uninstall_flows( strict=True, priority=20, table_id=table, match=match) def add_dvr_mac_vlan(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=4, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC_VLAN) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, eth_src=mac) def add_dvr_mac_tun(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=port, eth_src=mac, dest_table_id=constants.DVR_TO_SRC_MAC) def remove_dvr_mac_tun(self, mac, port): self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, eth_src=mac) def delete_dvr_dst_mac_for_arp(self, network_type, vlan_tag, gateway_mac, dvr_mac, rtr_port): table_id = self._dvr_to_src_mac_table_id(network_type) (_dp, ofp, ofpp) = self._get_dp() match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac) self.uninstall_flows( strict=True, priority=5, table_id=table_id, match=match) def add_dvr_gateway_mac_arp_vlan(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=5, in_port=port, eth_dst=mac, dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC_VLAN) def remove_dvr_gateway_mac_arp_vlan(self, mac, port): self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, eth_dst=mac) def add_dvr_gateway_mac_arp_tun(self, mac, port): self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=5, in_port=port, eth_dst=mac, dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC) def remove_dvr_gateway_mac_arp_tun(self, mac, port): self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, eth_dst=mac) @staticmethod def _arp_reply_match(ofp, ofpp, port): return ofpp.OFPMatch(in_port=port, eth_type=ether_types.ETH_TYPE_ARP) @staticmethod def _icmpv6_reply_match(ofp, ofpp, port): return ofpp.OFPMatch(in_port=port, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT) def install_icmpv6_na_spoofing_protection(self, port, ip_addresses): # Allow neighbor advertisements as long as they match addresses # that actually belong to the port. for ip in ip_addresses: masked_ip = self._cidr_to_os_ken(ip) self.install_goto( table_id=constants.ARP_SPOOF_TABLE, priority=2, eth_type=ether_types.ETH_TYPE_IPV6, ip_proto=in_proto.IPPROTO_ICMPV6, icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT, ipv6_nd_target=masked_ip, in_port=port, dest_table_id=constants.TRANSIENT_TABLE) # Now that the rules are ready, direct icmpv6 neighbor advertisement # traffic from the port into the anti-spoof table. (_dp, ofp, ofpp) = self._get_dp() match = self._icmpv6_reply_match(ofp, ofpp, port=port) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=10, match=match, dest_table_id=constants.ARP_SPOOF_TABLE) def set_allowed_macs_for_port(self, port, mac_addresses=None, allow_all=False): if allow_all: self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, in_port=port) self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port) return mac_addresses = mac_addresses or [] for address in mac_addresses: self.install_goto( table_id=constants.MAC_SPOOF_TABLE, priority=2, eth_src=address, in_port=port, dest_table_id=constants.TRANSIENT_TABLE) # normalize so we can see if macs are the same mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses} flows = self.dump_flows(constants.MAC_SPOOF_TABLE) for flow in flows: matches = dict(flow.match.items()) if matches.get('in_port') != port: continue if not matches.get('eth_src'): continue flow_mac = matches['eth_src'] if netaddr.EUI(flow_mac) not in mac_addresses: self.uninstall_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port, eth_src=flow_mac) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=9, in_port=port, dest_table_id=constants.MAC_SPOOF_TABLE) def install_arp_spoofing_protection(self, port, ip_addresses): # allow ARP replies as long as they match addresses that actually # belong to the port. for ip in ip_addresses: masked_ip = self._cidr_to_os_ken(ip) self.install_goto(table_id=constants.ARP_SPOOF_TABLE, priority=2, eth_type=ether_types.ETH_TYPE_ARP, arp_spa=masked_ip, in_port=port, dest_table_id=constants.MAC_SPOOF_TABLE) # Now that the rules are ready, direct ARP traffic from the port into # the anti-spoof table. # This strategy fails gracefully because OVS versions that can't match # on ARP headers will just process traffic normally. (_dp, ofp, ofpp) = self._get_dp() match = self._arp_reply_match(ofp, ofpp, port=port) self.install_goto(table_id=constants.LOCAL_SWITCHING, priority=10, match=match, dest_table_id=constants.ARP_SPOOF_TABLE) def delete_arp_spoofing_protection(self, port): (_dp, ofp, ofpp) = self._get_dp() match = self._arp_reply_match(ofp, ofpp, port=port) self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, match=match) match = self._icmpv6_reply_match(ofp, ofpp, port=port) self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, match=match) self.delete_arp_spoofing_allow_rules(port) def delete_arp_spoofing_allow_rules(self, port): self.uninstall_flows(table_id=constants.ARP_SPOOF_TABLE, in_port=port) def install_dscp_marking_rule(self, port, dscp_mark): # reg2 is a metadata field that does not alter packets. # By loading a value into this field and checking if the value is # altered it allows the packet to be resubmitted and go through # the flow table again to be identified by other flows. (dp, ofp, ofpp) = self._get_dp() actions = [ofpp.OFPActionSetField(reg2=1), ofpp.OFPActionSetField(ip_dscp=dscp_mark), ofpp.NXActionResubmit(in_port=port)] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ] self.install_instructions(instructions, table_id=0, priority=65535, in_port=port, reg2=0, eth_type=0x0800) self.install_instructions(instructions, table_id=0, priority=65535, in_port=port, reg2=0, eth_type=0x86DD) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py0000644000175000017500000000547700000000000033311 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent physical bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS_VLAN dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION of_tables = constants.PHY_BR_ALL_TABLES def setup_default_table(self): self.install_normal() @staticmethod def _local_vlan_match(ofp, ofpp, port, lvid): return ofpp.OFPMatch(in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT) def provision_local_vlan(self, port, lvid, segmentation_id, distributed): table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0 (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, port, lvid) if segmentation_id is None: actions = [ofpp.OFPActionPopVlan()] else: vlan_vid = segmentation_id | ofp.OFPVID_PRESENT actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)] actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)] self.install_apply_actions(table_id=table_id, priority=4, match=match, actions=actions) def reclaim_local_vlan(self, port, lvid): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, port, lvid) self.uninstall_flows(match=match) def add_dvr_mac_vlan(self, mac, port): self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN, priority=2, eth_src=mac, port=port) def remove_dvr_mac_vlan(self, mac): # REVISIT(yamamoto): match in_port as well? self.uninstall_flows( table_id=constants.DVR_NOT_LEARN_VLAN, eth_src=mac) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py0000644000175000017500000003232600000000000033125 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_ken.lib.packet import arp from os_ken.lib.packet import ether_types from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_dvr_process from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, br_dvr_process.OVSDVRProcessMixin): """openvswitch agent tunnel bridge specific logic.""" # Used by OVSDVRProcessMixin dvr_process_table_id = constants.DVR_PROCESS dvr_process_next_table_id = constants.PATCH_LV_TO_TUN of_tables = constants.TUN_BR_ALL_TABLES def setup_default_table(self, patch_int_ofport, arp_responder_enabled): (dp, ofp, ofpp) = self._get_dp() # Table 0 (default) will sort incoming traffic depending on in_port self.install_goto(dest_table_id=constants.PATCH_LV_TO_TUN, priority=1, in_port=patch_int_ofport) self.install_drop() # default drop if arp_responder_enabled: # ARP broadcast-ed request go to the local ARP_RESPONDER table to # be locally resolved # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? self.install_goto(dest_table_id=constants.ARP_RESPONDER, table_id=constants.PATCH_LV_TO_TUN, priority=1, eth_dst="ff:ff:ff:ff:ff:ff", eth_type=ether_types.ETH_TYPE_ARP) # PATCH_LV_TO_TUN table will handle packets coming from patch_int # unicasts go to table UCAST_TO_TUN where remote addresses are learnt self.install_goto(dest_table_id=constants.UCAST_TO_TUN, table_id=constants.PATCH_LV_TO_TUN, eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')) # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.PATCH_LV_TO_TUN, eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')) # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id # for each tunnel type, and resubmit to table LEARN_FROM_TUN where # remote mac addresses will be learnt for tunnel_type in constants.TUNNEL_NETWORK_TYPES: self.install_drop(table_id=constants.TUN_TABLE[tunnel_type]) # LEARN_FROM_TUN table will have a single flow using a learn action to # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac # addresses (assumes that lvid has already been set by a previous flow) # Once remote mac addresses are learnt, output packet to patch_int flow_specs = [ ofpp.NXFlowSpecMatch(src=('vlan_tci', 0), dst=('vlan_tci', 0), n_bits=12), ofpp.NXFlowSpecMatch(src=('eth_src', 0), dst=('eth_dst', 0), n_bits=48), ofpp.NXFlowSpecLoad(src=0, dst=('vlan_tci', 0), n_bits=16), ofpp.NXFlowSpecLoad(src=('tunnel_id', 0), dst=('tunnel_id', 0), n_bits=64), ofpp.NXFlowSpecOutput(src=('in_port', 0), dst='', n_bits=32), ] actions = [ ofpp.NXActionLearn(table_id=constants.UCAST_TO_TUN, cookie=self.default_cookie, priority=1, hard_timeout=300, specs=flow_specs), ofpp.OFPActionOutput(patch_int_ofport, 0), ] self.install_apply_actions(table_id=constants.LEARN_FROM_TUN, priority=1, actions=actions) # Egress unicast will be handled in table UCAST_TO_TUN, where remote # mac addresses will be learned. For now, just add a default flow that # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them # as broadcasts/multicasts self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.UCAST_TO_TUN) if arp_responder_enabled: # If none of the ARP entries correspond to the requested IP, the # broadcast-ed packet is resubmitted to the flooding table self.install_goto(dest_table_id=constants.FLOOD_TO_TUN, table_id=constants.ARP_RESPONDER) # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, # for now, add a default drop action self.install_drop(table_id=constants.FLOOD_TO_TUN) @staticmethod def _local_vlan_match(_ofp, ofpp, tun_id): return ofpp.OFPMatch(tunnel_id=tun_id) def provision_local_vlan(self, network_type, lvid, segmentation_id, distributed=False): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, segmentation_id) table_id = constants.TUN_TABLE[network_type] if distributed: dest_table_id = constants.DVR_NOT_LEARN else: dest_table_id = constants.LEARN_FROM_TUN actions = [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT), ] instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ofpp.OFPInstructionGotoTable(table_id=dest_table_id)] self.install_instructions(table_id=table_id, priority=1, match=match, instructions=instructions) def reclaim_local_vlan(self, network_type, segmentation_id): (_dp, ofp, ofpp) = self._get_dp() match = self._local_vlan_match(ofp, ofpp, segmentation_id) table_id = constants.TUN_TABLE[network_type] self.uninstall_flows(table_id=table_id, match=match) @staticmethod def _flood_to_tun_match(ofp, ofpp, vlan): return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT) def install_flood_to_tun(self, vlan, tun_id, ports): (_dp, ofp, ofpp) = self._get_dp() match = self._flood_to_tun_match(ofp, ofpp, vlan) actions = [ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id)] for port in ports: actions.append(ofpp.OFPActionOutput(port, 0)) self.install_apply_actions(table_id=constants.FLOOD_TO_TUN, priority=1, match=match, actions=actions) def delete_flood_to_tun(self, vlan): (_dp, ofp, ofpp) = self._get_dp() match = self._flood_to_tun_match(ofp, ofpp, vlan) self.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod def _unicast_to_tun_match(ofp, ofpp, vlan, mac): return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_dst=mac) def install_unicast_to_tun(self, vlan, tun_id, port, mac): (_dp, ofp, ofpp) = self._get_dp() match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac) actions = [ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ofpp.OFPActionOutput(port, 0)] self.install_apply_actions(table_id=constants.UCAST_TO_TUN, priority=2, match=match, actions=actions) def delete_unicast_to_tun(self, vlan, mac): (_dp, ofp, ofpp) = self._get_dp() if mac is None: match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT) else: match = self._unicast_to_tun_match(ofp, ofpp, vlan, mac) self.uninstall_flows(table_id=constants.UCAST_TO_TUN, match=match) @staticmethod def _arp_responder_match(ofp, ofpp, vlan, ip): # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP, arp_tpa=ip) def install_arp_responder(self, vlan, ip, mac): (dp, ofp, ofpp) = self._get_dp() match = self._arp_responder_match(ofp, ofpp, vlan, ip) actions = [ofpp.OFPActionSetField(arp_op=arp.ARP_REPLY), ofpp.NXActionRegMove(src_field='arp_sha', dst_field='arp_tha', n_bits=48), ofpp.NXActionRegMove(src_field='arp_spa', dst_field='arp_tpa', n_bits=32), ofpp.OFPActionSetField(arp_sha=mac), ofpp.OFPActionSetField(arp_spa=ip), ofpp.NXActionRegMove(src_field='eth_src', dst_field='eth_dst', n_bits=48), ofpp.OFPActionSetField(eth_src=mac), ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0)] self.install_apply_actions(table_id=constants.ARP_RESPONDER, priority=1, match=match, actions=actions) def delete_arp_responder(self, vlan, ip): (_dp, ofp, ofpp) = self._get_dp() if ip is None: # REVISIT(yamamoto): add arp_op=arp.ARP_REQUEST matcher? match = ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, eth_type=ether_types.ETH_TYPE_ARP) else: match = self._arp_responder_match(ofp, ofpp, vlan, ip) self.uninstall_flows(table_id=constants.ARP_RESPONDER, match=match) def setup_tunnel_port(self, network_type, port): self.install_goto(dest_table_id=constants.TUN_TABLE[network_type], priority=1, in_port=port) def cleanup_tunnel_port(self, port): self.uninstall_flows(in_port=port) def add_dvr_mac_tun(self, mac, port): self.install_output(table_id=constants.DVR_NOT_LEARN, priority=1, eth_src=mac, port=port) def remove_dvr_mac_tun(self, mac): # REVISIT(yamamoto): match in_port as well? self.uninstall_flows(table_id=constants.DVR_NOT_LEARN, eth_src=mac) def deferred(self): # REVISIT(yamamoto): This is for API compat with "ovs-ofctl" # interface. Consider removing this mechanism when obsoleting # "ovs-ofctl" interface. # For "ovs-ofctl" interface, "deferred" mechanism would improve # performance by batching flow-mods with a single ovs-ofctl command # invocation. # On the other hand, for this "native" interface, the overheads of # each flow-mods are already minimum and batching doesn't make much # sense. Thus this method is left as no-op. # It might be possible to send multiple flow-mods with a single # barrier. But it's unclear that level of performance optimization # is desirable while it would certainly complicate error handling. return self def __enter__(self): # REVISIT(yamamoto): See the comment on deferred(). return self def __exit__(self, exc_type, exc_value, traceback): # REVISIT(yamamoto): See the comment on deferred(). pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py0000644000175000017500000000240000000000000032546 0ustar00coreycorey00000000000000# Copyright (C) 2015 VA Linux Systems Japan K.K. # Copyright (C) 2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_ken.base import app_manager from os_ken import cfg as os_ken_cfg from oslo_config import cfg cfg.CONF.import_group( 'OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.common.config') def init_config(): os_ken_cfg.CONF(project='os_ken', args=[]) os_ken_cfg.CONF.ofp_listen_host = cfg.CONF.OVS.of_listen_address os_ken_cfg.CONF.ofp_tcp_listen_port = cfg.CONF.OVS.of_listen_port def main(): app_manager.AppManager.run_apps([ 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'openflow.native.ovs_oskenapp', ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py0000644000175000017500000003215000000000000033455 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import debtcollector import eventlet import netaddr from neutron_lib import exceptions import os_ken.app.ofctl.api as ofctl_api import os_ken.exception as os_ken_exc from os_ken.lib import ofctl_string from os_ken.ofproto import ofproto_parser from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils import six from neutron._i18n import _ from neutron.agent.common import ovs_lib LOG = logging.getLogger(__name__) BUNDLE_ID_WIDTH = 1 << 32 COOKIE_DEFAULT = object() class ActiveBundleRunning(exceptions.NeutronException): message = _("Another active bundle 0x%(bundle_id)x is running") class OpenFlowSwitchMixin(object): """Mixin to provide common convenient routines for an openflow switch. NOTE(yamamoto): super() points to ovs_lib.OVSBridge. See ovs_bridge.py how this class is actually used. """ @staticmethod def _cidr_to_os_ken(ip): n = netaddr.IPNetwork(ip) if n.hostmask: return (str(n.ip), str(n.netmask)) return str(n.ip) def __init__(self, *args, **kwargs): self._app = kwargs.pop('os_ken_app') self.active_bundles = set() super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs) def _get_dp_by_dpid(self, dpid_int): """Get os-ken datapath object for the switch.""" timeout_sec = cfg.CONF.OVS.of_connect_timeout start_time = timeutils.now() while True: dp = ofctl_api.get_datapath(self._app, dpid_int) if dp is not None: break # The switch has not established a connection to us; retry again # until timeout. if timeutils.now() > start_time + timeout_sec: m = _("Switch connection timeout") LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) return dp def _send_msg(self, msg, reply_cls=None, reply_multi=False, active_bundle=None): timeout_sec = cfg.CONF.OVS.of_request_timeout timeout = eventlet.Timeout(seconds=timeout_sec) if active_bundle is not None: (dp, ofp, ofpp) = self._get_dp() msg = ofpp.ONFBundleAddMsg(dp, active_bundle['id'], active_bundle['bundle_flags'], msg, []) try: result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi) except os_ken_exc.OSKenException as e: m = _("ofctl request %(request)s error %(error)s") % { "request": msg, "error": e, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) except eventlet.Timeout as e: with excutils.save_and_reraise_exception() as ctx: if e is timeout: ctx.reraise = False m = _("ofctl request %(request)s timed out") % { "request": msg, } LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) finally: timeout.cancel() LOG.debug("ofctl request %(request)s result %(result)s", {"request": msg, "result": result}) return result @staticmethod def _match(_ofp, ofpp, match, **match_kwargs): if match is not None: return match return ofpp.OFPMatch(**match_kwargs) def uninstall_flows(self, table_id=None, strict=False, priority=0, cookie=COOKIE_DEFAULT, cookie_mask=0, match=None, active_bundle=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() if table_id is None: table_id = ofp.OFPTT_ALL if cookie == ovs_lib.COOKIE_ANY: cookie = 0 if cookie_mask != 0: raise Exception(_("cookie=COOKIE_ANY but cookie_mask set to " "%s") % cookie_mask) elif cookie == COOKIE_DEFAULT: cookie = self._default_cookie cookie_mask = ovs_lib.UINT64_BITMASK match = self._match(ofp, ofpp, match, **match_kwargs) if strict: cmd = ofp.OFPFC_DELETE_STRICT else: cmd = ofp.OFPFC_DELETE msg = ofpp.OFPFlowMod(dp, command=cmd, cookie=cookie, cookie_mask=cookie_mask, table_id=table_id, match=match, priority=priority, out_group=ofp.OFPG_ANY, out_port=ofp.OFPP_ANY) self._send_msg(msg, active_bundle=active_bundle) def dump_flows(self, table_id=None): (dp, ofp, ofpp) = self._get_dp() if table_id is None: table_id = ofp.OFPTT_ALL msg = ofpp.OFPFlowStatsRequest(dp, table_id=table_id) replies = self._send_msg(msg, reply_cls=ofpp.OFPFlowStatsReply, reply_multi=True) flows = [] for rep in replies: flows += rep.body return flows def _dump_and_clean(self, table_id=None): cookies = set([f.cookie for f in self.dump_flows(table_id)]) - \ self.reserved_cookies for c in cookies: LOG.warning("Deleting flow with cookie 0x%(cookie)x", {'cookie': c}) self.uninstall_flows(cookie=c, cookie_mask=ovs_lib.UINT64_BITMASK) def cleanup_flows(self): LOG.info("Reserved cookies for %s: %s", self.br_name, self.reserved_cookies) for table_id in self.of_tables: self._dump_and_clean(table_id) def install_goto_next(self, table_id, active_bundle=None): self.install_goto(table_id=table_id, dest_table_id=table_id + 1, active_bundle=active_bundle) def install_output(self, port, table_id=0, priority=0, match=None, **match_kwargs): (_dp, ofp, ofpp) = self._get_dp() actions = [ofpp.OFPActionOutput(port, 0)] instructions = [ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, actions)] self.install_instructions(table_id=table_id, priority=priority, instructions=instructions, match=match, **match_kwargs) def install_normal(self, table_id=0, priority=0, match=None, **match_kwargs): (_dp, ofp, _ofpp) = self._get_dp() self.install_output(port=ofp.OFPP_NORMAL, table_id=table_id, priority=priority, match=match, **match_kwargs) def install_goto(self, dest_table_id, table_id=0, priority=0, match=None, **match_kwargs): (_dp, _ofp, ofpp) = self._get_dp() instructions = [ofpp.OFPInstructionGotoTable(table_id=dest_table_id)] self.install_instructions(table_id=table_id, priority=priority, instructions=instructions, match=match, **match_kwargs) def install_drop(self, table_id=0, priority=0, match=None, **match_kwargs): self.install_instructions(table_id=table_id, priority=priority, instructions=[], match=match, **match_kwargs) def install_instructions(self, instructions, table_id=0, priority=0, match=None, active_bundle=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() match = self._match(ofp, ofpp, match, **match_kwargs) if isinstance(instructions, six.string_types): debtcollector.deprecate("Use of string instruction is " "deprecated", removal_version='U') jsonlist = ofctl_string.ofp_instruction_from_str( ofp, instructions) instructions = ofproto_parser.ofp_instruction_from_jsondict( dp, jsonlist) msg = ofpp.OFPFlowMod(dp, table_id=table_id, cookie=self.default_cookie, match=match, priority=priority, instructions=instructions) self._send_msg(msg, active_bundle=active_bundle) def install_apply_actions(self, actions, table_id=0, priority=0, match=None, **match_kwargs): (dp, ofp, ofpp) = self._get_dp() instructions = [ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), ] self.install_instructions(table_id=table_id, priority=priority, match=match, instructions=instructions, **match_kwargs) def bundled(self, atomic=False, ordered=False): return BundledOpenFlowBridge(self, atomic, ordered) class BundledOpenFlowBridge(object): def __init__(self, br, atomic, ordered): self.br = br self.active_bundle = None self.bundle_flags = 0 if not atomic and not ordered: return (dp, ofp, ofpp) = self.br._get_dp() if atomic: self.bundle_flags |= ofp.ONF_BF_ATOMIC if ordered: self.bundle_flags |= ofp.ONF_BF_ORDERED def __getattr__(self, name): if name.startswith('install') or name.startswith('uninstall'): under = getattr(self.br, name) if self.active_bundle is None: return under return functools.partial(under, active_bundle=dict( id=self.active_bundle, bundle_flags=self.bundle_flags)) raise AttributeError(_("Only install_* or uninstall_* methods " "can be used")) def __enter__(self): if self.active_bundle is not None: raise ActiveBundleRunning(bundle_id=self.active_bundle) while True: self.active_bundle = random.randrange(BUNDLE_ID_WIDTH) if self.active_bundle not in self.br.active_bundles: self.br.active_bundles.add(self.active_bundle) break try: (dp, ofp, ofpp) = self.br._get_dp() msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle, ofp.ONF_BCT_OPEN_REQUEST, self.bundle_flags, []) reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg) if reply.type != ofp.ONF_BCT_OPEN_REPLY: raise RuntimeError( _("Unexpected reply type %d != ONF_BCT_OPEN_REPLY") % reply.type) return self except Exception: self.br.active_bundles.remove(self.active_bundle) self.active_bundle = None raise def __exit__(self, type, value, traceback): (dp, ofp, ofpp) = self.br._get_dp() if type is None: ctrl_type = ofp.ONF_BCT_COMMIT_REQUEST expected_reply = ofp.ONF_BCT_COMMIT_REPLY else: ctrl_type = ofp.ONF_BCT_DISCARD_REQUEST expected_reply = ofp.ONF_BCT_DISCARD_REPLY LOG.warning( "Discarding bundle with ID 0x%(id)x due to an exception", {'id': self.active_bundle}) try: msg = ofpp.ONFBundleCtrlMsg(dp, self.active_bundle, ctrl_type, self.bundle_flags, []) reply = self.br._send_msg(msg, reply_cls=ofpp.ONFBundleCtrlMsg) if reply.type != expected_reply: # The bundle ID may be in a bad state. Let's leave it # in active_bundles so that we will never use it again. raise RuntimeError(_("Unexpected reply type %d") % reply.type) self.br.active_bundles.remove(self.active_bundle) finally: # It is possible the bundle is kept open, but this must be # cleared or all subsequent __enter__ will fail. self.active_bundle = None ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.p0000644000175000017500000001040200000000000033555 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.common import ipv6_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent.openflow \ import br_cookie from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ofswitch LOG = logging.getLogger(__name__) class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, br_cookie.OVSBridgeCookieMixin, ovs_lib.OVSBridge): """Common code for bridges used by OVS agent""" _cached_dpid = None def _get_dp(self): """Get (dp, ofp, ofpp) tuple for the switch. A convenient method for openflow message composers. """ while True: if self._cached_dpid is None: dpid = self.get_datapath_id() LOG.info("Bridge %(br_name)s has datapath-ID %(dpid)s", {"br_name": self.br_name, "dpid": dpid}) if dpid is None: raise RuntimeError(_("Unknown datapath id.")) self._cached_dpid = int(dpid, 16) try: dp = self._get_dp_by_dpid(self._cached_dpid) return dp, dp.ofproto, dp.ofproto_parser except RuntimeError: with excutils.save_and_reraise_exception() as ctx: # Retry if dpid has been changed. # NOTE(yamamoto): Open vSwitch change its dpid on # some events. # REVISIT(yamamoto): Consider to set dpid statically. new_dpid = int(self.get_datapath_id(), 16) if new_dpid != self._cached_dpid: LOG.info("Bridge %(br_name)s changed its " "datapath-ID from %(old)x to %(new)x", { "br_name": self.br_name, "old": self._cached_dpid, "new": new_dpid, }) ctx.reraise = False self._cached_dpid = new_dpid def setup_controllers(self, conf): url = ipv6_utils.valid_ipv6_url(conf.OVS.of_listen_address, conf.OVS.of_listen_port) controllers = ["tcp:" + url] self.add_protocols(ovs_consts.OPENFLOW13) self.set_controller(controllers) # NOTE(ivc): Force "out-of-band" controller connection mode (see # "In-Band Control" [1]). # # By default openvswitch uses "in-band" controller connection mode # which adds hidden OpenFlow rules (only visible by issuing ovs-appctl # bridge/dump-flows
) and leads to a network loop on br-tun. As of # now the OF controller is hosted locally with OVS which fits the # "out-of-band" mode. If the remote OF controller is ever to be # supported by openvswitch agent in the future, "In-Band Control" [1] # should be taken into consideration for physical bridge only, but # br-int and br-tun must be configured with the "out-of-band" # controller connection mode. # # [1] https://github.com/openvswitch/ovs/blob/master/DESIGN.md self.set_controllers_connection_mode("out-of-band") self.set_controllers_inactivity_probe(conf.OVS.of_inactivity_probe) def drop_port(self, in_port): self.install_drop(priority=2, in_port=in_port) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp0000644000175000017500000000476700000000000033724 0ustar00coreycorey00000000000000# Copyright (C) 2015 VA Linux Systems Japan K.K. # Copyright (C) 2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os_ken.app.ofctl.api # noqa from os_ken.base import app_manager from os_ken.lib import hub from os_ken.ofproto import ofproto_v1_3 from oslo_log import log as logging from oslo_utils import excutils from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_int from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_phys from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import br_tun from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_neutron_agent as ovs_agent LOG = logging.getLogger(__name__) def agent_main_wrapper(bridge_classes): try: ovs_agent.main(bridge_classes) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Agent main thread died of an exception") finally: # The following call terminates os-ken's AppManager.run_apps(), # which is needed for clean shutdown of an agent process. # The close() call must be called in another thread, otherwise # it suicides and ends prematurely. hub.spawn(app_manager.AppManager.get_instance().close) class OVSNeutronAgentOSKenApp(app_manager.OSKenApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] def start(self): # Start os-ken event loop thread super(OVSNeutronAgentOSKenApp, self).start() def _make_br_cls(br_cls): return functools.partial(br_cls, os_ken_app=self) # Start agent main loop thread bridge_classes = { 'br_int': _make_br_cls(br_int.OVSIntegrationBridge), 'br_phys': _make_br_cls(br_phys.OVSPhysicalBridge), 'br_tun': _make_br_cls(br_tun.OVSTunnelBridge), } return hub.spawn(agent_main_wrapper, bridge_classes, raise_error=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py0000644000175000017500000000467500000000000033435 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class OVSCookieBridge(object): '''Bridge restricting flow operations to its own distinct cookie This class creates a bridge derived from a bridge passed at init (which has to inherit from OVSBridgeCookieMixin), but that has its own cookie, registered to the underlying bridge, and that will use this cookie in all flow operations. ''' def __new__(cls, bridge): cookie_bridge = bridge.clone() cookie_bridge.set_agent_uuid_stamp(bridge.request_cookie()) return cookie_bridge def __init__(self, bridge): pass class OVSAgentExtensionAPI(object): '''Implements the Agent API for Open vSwitch agent. Extensions can gain access to this API by overriding the consume_api method which has been added to the AgentExtension class. ''' def __init__(self, int_br, tun_br, phys_brs=None): super(OVSAgentExtensionAPI, self).__init__() self.br_int = int_br self.br_tun = tun_br self.br_phys = phys_brs or {} def request_int_br(self): """Allows extensions to request an integration bridge to use for extension specific flows. """ return OVSCookieBridge(self.br_int) def request_tun_br(self): """Allows extensions to request a tunnel bridge to use for extension specific flows. If tunneling is not enabled, this method will return None. """ if not self.br_tun: return None return OVSCookieBridge(self.br_tun) def request_phy_brs(self): """Allows extensions to request all physical bridges to use for extension specific flows. This a generator function which returns all existing physical bridges in the switch. """ for phy_br in self.br_phys.values(): yield OVSCookieBridge(phy_br) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py0000644000175000017500000000167200000000000032035 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron.plugins.ml2.drivers.agent import capabilities from neutron.services.trunk.drivers.openvswitch.agent import driver def register(): """Register OVS capabilities.""" # Add capabilities to be loaded during agent initialization capabilities.register(driver.init_handler, constants.AGENT_TYPE_OVS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py0000644000175000017500000007656200000000000033141 0ustar00coreycorey00000000000000# Copyright 2014, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import netaddr from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from osprofiler import profiler from neutron.agent.linux.openvswitch_firewall import firewall as ovs_firewall from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants LOG = logging.getLogger(__name__) # A class to represent a DVR-hosted subnet including vif_ports resident on # that subnet class LocalDVRSubnetMapping(object): def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID): # set of compute ports on this dvr subnet self.compute_ports = {} self.subnet = subnet self.csnat_ofport = csnat_ofport self.dvr_owned = False def __str__(self): return ("subnet = %s compute_ports = %s csnat_port = %s" " is_dvr_owned = %s" % (self.subnet, self.get_compute_ofports(), self.get_csnat_ofport(), self.is_dvr_owned())) def get_subnet_info(self): return self.subnet def set_dvr_owned(self, owned): self.dvr_owned = owned def is_dvr_owned(self): return self.dvr_owned def add_compute_ofport(self, vif_id, ofport): self.compute_ports[vif_id] = ofport def remove_compute_ofport(self, vif_id): self.compute_ports.pop(vif_id, 0) def remove_all_compute_ofports(self): self.compute_ports.clear() def get_compute_ofports(self): return self.compute_ports def set_csnat_ofport(self, ofport): self.csnat_ofport = ofport def get_csnat_ofport(self): return self.csnat_ofport class OVSPort(object): def __init__(self, id, ofport, mac, device_owner): self.id = id self.mac = mac self.ofport = ofport self.subnets = set() self.device_owner = device_owner def __str__(self): return ("OVSPort: id = %s, ofport = %s, mac = %s, " "device_owner = %s, subnets = %s" % (self.id, self.ofport, self.mac, self.device_owner, self.subnets)) def add_subnet(self, subnet_id): self.subnets.add(subnet_id) def remove_subnet(self, subnet_id): self.subnets.remove(subnet_id) def remove_all_subnets(self): self.subnets.clear() def get_subnets(self): return self.subnets def get_device_owner(self): return self.device_owner def get_mac(self): return self.mac def get_ofport(self): return self.ofport @profiler.trace_cls("ovs_dvr_agent") class OVSDVRNeutronAgent(object): '''Implements OVS-based DVR (Distributed Virtual Router) agent''' # history # 1.0 Initial version def __init__(self, context, plugin_rpc, integ_br, tun_br, bridge_mappings, phys_brs, int_ofports, phys_ofports, patch_int_ofport=constants.OFPORT_INVALID, patch_tun_ofport=constants.OFPORT_INVALID, host=None, enable_tunneling=False, enable_distributed_routing=False): self.context = context self.plugin_rpc = plugin_rpc self.host = host self.enable_tunneling = enable_tunneling self.enable_distributed_routing = enable_distributed_routing self.bridge_mappings = bridge_mappings self.phys_brs = phys_brs self.int_ofports = int_ofports self.phys_ofports = phys_ofports self.reset_ovs_parameters(integ_br, tun_br, patch_int_ofport, patch_tun_ofport) self.reset_dvr_parameters() self.dvr_mac_address = None if self.enable_distributed_routing: self.get_dvr_mac_address() self.conf = cfg.CONF self.firewall = None def set_firewall(self, firewall=None): self.firewall = firewall def setup_dvr_flows(self): self.setup_dvr_flows_on_integ_br() self.setup_dvr_flows_on_tun_br() self.setup_dvr_flows_on_phys_br() self.setup_dvr_mac_flows_on_all_brs() def reset_ovs_parameters(self, integ_br, tun_br, patch_int_ofport, patch_tun_ofport): '''Reset the openvswitch parameters''' self.int_br = integ_br self.tun_br = tun_br self.patch_int_ofport = patch_int_ofport self.patch_tun_ofport = patch_tun_ofport def reset_dvr_parameters(self): '''Reset the DVR parameters''' self.local_dvr_map = {} self.local_csnat_map = {} self.local_ports = {} self.registered_dvr_macs = set() def get_dvr_mac_address(self): try: self.get_dvr_mac_address_with_retry() except oslo_messaging.RemoteError as e: LOG.error('L2 agent could not get DVR MAC address at ' 'startup due to RPC error. It happens when the ' 'server does not support this RPC API. Detailed ' 'message: %s', e) except oslo_messaging.MessagingTimeout: LOG.error('DVR: Failed to obtain a valid local ' 'DVR MAC address') if not self.in_distributed_mode(): sys.exit(1) def get_dvr_mac_address_with_retry(self): # Get the local DVR MAC Address from the Neutron Server. # This is the first place where we contact the server on startup # so retry in case it's not ready to respond for retry_count in reversed(range(5)): try: details = self.plugin_rpc.get_dvr_mac_address_by_host( self.context, self.host) except oslo_messaging.MessagingTimeout as e: with excutils.save_and_reraise_exception() as ctx: if retry_count > 0: ctx.reraise = False LOG.warning('L2 agent could not get DVR MAC ' 'address from server. Retrying. ' 'Detailed message: %s', e) else: LOG.debug("L2 Agent DVR: Received response for " "get_dvr_mac_address_by_host() from " "plugin: %r", details) self.dvr_mac_address = ( netaddr.EUI(details['mac_address'], dialect=netaddr.mac_unix_expanded)) return def setup_dvr_flows_on_integ_br(self): '''Setup up initial dvr flows into br-int''' LOG.info("L2 Agent operating in DVR Mode with MAC %s", self.dvr_mac_address) # Add a canary flow to int_br to track OVS restarts self.int_br.setup_canary_table() # Insert 'drop' action as the default for Table DVR_TO_SRC_MAC self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1) self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, priority=1) for physical_network in self.bridge_mappings: self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=self.int_ofports[ physical_network]) def setup_dvr_flows_on_tun_br(self): '''Setup up initial dvr flows into br-tun''' if not self.enable_tunneling: return self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, priority=1, in_port=self.patch_int_ofport) # table-miss should be sent to learning table self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN, dest_table_id=constants.LEARN_FROM_TUN) self.tun_br.install_goto(table_id=constants.DVR_PROCESS, dest_table_id=constants.PATCH_LV_TO_TUN) def setup_dvr_flows_on_phys_br(self): '''Setup up initial dvr flows into br-phys''' for physical_network in self.bridge_mappings: self.phys_brs[physical_network].install_goto( in_port=self.phys_ofports[physical_network], priority=2, dest_table_id=constants.DVR_PROCESS_VLAN) self.phys_brs[physical_network].install_goto( priority=1, dest_table_id=constants.DVR_NOT_LEARN_VLAN) self.phys_brs[physical_network].install_goto( table_id=constants.DVR_PROCESS_VLAN, priority=0, dest_table_id=constants.LOCAL_VLAN_TRANSLATION) self.phys_brs[physical_network].install_drop( table_id=constants.LOCAL_VLAN_TRANSLATION, in_port=self.phys_ofports[physical_network], priority=2) self.phys_brs[physical_network].install_normal( table_id=constants.DVR_NOT_LEARN_VLAN, priority=1) def _add_dvr_mac_for_phys_br(self, physical_network, mac): self.int_br.add_dvr_mac_vlan(mac=mac, port=self.int_ofports[physical_network]) phys_br = self.phys_brs[physical_network] phys_br.add_dvr_mac_vlan(mac=mac, port=self.phys_ofports[physical_network]) def _add_arp_dvr_mac_for_phys_br(self, physical_network, mac): self.int_br.add_dvr_gateway_mac_arp_vlan( mac=mac, port=self.int_ofports[physical_network]) def _remove_dvr_mac_for_phys_br(self, physical_network, mac): # REVISIT(yamamoto): match in_port as well? self.int_br.remove_dvr_mac_vlan(mac=mac) phys_br = self.phys_brs[physical_network] # REVISIT(yamamoto): match in_port as well? phys_br.remove_dvr_mac_vlan(mac=mac) def _add_dvr_mac_for_tun_br(self, mac): self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport) def _add_arp_dvr_mac_for_tun_br(self, mac): self.int_br.add_dvr_gateway_mac_arp_tun( mac=mac, port=self.patch_tun_ofport) def _remove_dvr_mac_for_tun_br(self, mac): self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) # REVISIT(yamamoto): match in_port as well? self.tun_br.remove_dvr_mac_tun(mac=mac) def _add_dvr_mac(self, mac): for physical_network in self.bridge_mappings: self._add_dvr_mac_for_phys_br(physical_network, mac) if self.enable_tunneling: self._add_dvr_mac_for_tun_br(mac) LOG.debug("Added DVR MAC flow for %s", mac) self.registered_dvr_macs.add(mac) def _add_dvr_mac_for_arp(self, mac): for physical_network in self.bridge_mappings: self._add_arp_dvr_mac_for_phys_br(physical_network, mac) if self.enable_tunneling: self._add_arp_dvr_mac_for_tun_br(mac) LOG.debug("Added ARP DVR MAC flow for %s", mac) def _remove_dvr_mac(self, mac): for physical_network in self.bridge_mappings: self._remove_dvr_mac_for_phys_br(physical_network, mac) if self.enable_tunneling: self._remove_dvr_mac_for_tun_br(mac) LOG.debug("Removed DVR MAC flow for %s", mac) self.registered_dvr_macs.remove(mac) def setup_dvr_mac_flows_on_all_brs(self): dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) for mac in dvr_macs: c_mac = netaddr.EUI(mac['mac_address'], dialect=netaddr.mac_unix_expanded) if c_mac == self.dvr_mac_address: self._add_dvr_mac_for_arp(c_mac) LOG.debug("Added the DVR MAC rule for ARP %s", c_mac) continue self._add_dvr_mac(c_mac) def dvr_mac_address_update(self, dvr_macs): if not self.dvr_mac_address: LOG.debug("Self mac unknown, ignoring this " "dvr_mac_address_update() ") return dvr_host_macs = set() for entry in dvr_macs: e_mac = netaddr.EUI(entry['mac_address'], dialect=netaddr.mac_unix_expanded) if e_mac == self.dvr_mac_address: continue dvr_host_macs.add(e_mac) if dvr_host_macs == self.registered_dvr_macs: LOG.debug("DVR Mac address already up to date") return dvr_macs_added = dvr_host_macs - self.registered_dvr_macs dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs for oldmac in dvr_macs_removed: self._remove_dvr_mac(oldmac) for newmac in dvr_macs_added: self._add_dvr_mac(newmac) def in_distributed_mode(self): return self.dvr_mac_address is not None def process_tunneled_network(self, network_type, lvid, segmentation_id): self.tun_br.provision_local_vlan( network_type=network_type, lvid=lvid, segmentation_id=segmentation_id, distributed=self.in_distributed_mode()) def _bind_distributed_router_interface_port(self, port, lvm, fixed_ips, device_owner): # since distributed router port must have only one fixed # IP, directly use fixed_ips[0] fixed_ip = fixed_ips[0] subnet_uuid = fixed_ip['subnet_id'] if subnet_uuid in self.local_dvr_map: ldm = self.local_dvr_map[subnet_uuid] else: # set up LocalDVRSubnetMapping available for this subnet subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=fixed_ips) if not subnet_info: LOG.warning("DVR: Unable to retrieve subnet information " "for subnet_id %s. The subnet or the gateway " "may have already been deleted", subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", {"uuid": subnet_uuid, "info": subnet_info}) ldm = LocalDVRSubnetMapping(subnet_info) self.local_dvr_map[subnet_uuid] = ldm # DVR takes over ldm.set_dvr_owned(True) vlan_to_use = lvm.vlan if lvm.network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id subnet_info = ldm.get_subnet_info() ip_version = subnet_info['ip_version'] if self.firewall and isinstance(self.firewall, ovs_firewall.OVSFirewallDriver): tunnel_direct_info = {"network_type": lvm.network_type, "physical_network": lvm.physical_network} self.firewall.install_accepted_egress_direct_flow( subnet_info['gateway_mac'], lvm.vlan, port.ofport, tunnel_direct_info=tunnel_direct_info) local_compute_ports = ( self.plugin_rpc.get_ports_on_host_by_subnet( self.context, self.host, subnet_uuid)) LOG.debug("DVR: List of ports received from " "get_ports_on_host_by_subnet %s", local_compute_ports) vif_by_id = self.int_br.get_vifs_by_ids( [local_port['id'] for local_port in local_compute_ports]) for local_port in local_compute_ports: vif = vif_by_id.get(local_port['id']) if not vif: continue ldm.add_compute_ofport(vif.vif_id, vif.ofport) if vif.vif_id in self.local_ports: # ensure if a compute port is already on # a different dvr routed subnet # if yes, queue this subnet to that port comp_ovsport = self.local_ports[vif.vif_id] comp_ovsport.add_subnet(subnet_uuid) else: # the compute port is discovered first here that its on # a dvr routed subnet queue this subnet to that port comp_ovsport = OVSPort(vif.vif_id, vif.ofport, vif.vif_mac, local_port['device_owner']) comp_ovsport.add_subnet(subnet_uuid) self.local_ports[vif.vif_id] = comp_ovsport # create rule for just this vm port self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=comp_ovsport.get_mac(), dst_port=comp_ovsport.get_ofport()) self.int_br.install_dvr_dst_mac_for_arp( lvm.network_type, vlan_tag=lvm.vlan, gateway_mac=port.vif_mac, dvr_mac=self.dvr_mac_address, rtr_port=port.ofport) if lvm.network_type == n_const.TYPE_VLAN: # TODO(vivek) remove the IPv6 related flows once SNAT is not # used for IPv6 DVR. br = self.phys_brs[lvm.physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br # TODO(vivek) remove the IPv6 related flows once SNAT is not # used for IPv6 DVR. if ip_version == 4: br.install_dvr_process_ipv4( vlan_tag=lvm.vlan, gateway_ip=fixed_ip['ip_address']) else: br.install_dvr_process_ipv6( vlan_tag=lvm.vlan, gateway_mac=port.vif_mac) br.install_dvr_process( vlan_tag=lvm.vlan, vif_mac=port.vif_mac, dvr_mac_address=self.dvr_mac_address) # the dvr router interface is itself a port, so capture it # queue this subnet to that port. A subnet appears only once as # a router interface on any given router ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): # Handle new compute port added use-case subnet_uuid = None for ips in fixed_ips: if ips['subnet_id'] not in self.local_dvr_map: continue subnet_uuid = ips['subnet_id'] ldm = self.local_dvr_map[subnet_uuid] if not ldm.is_dvr_owned(): # well this is CSNAT stuff, let dvr come in # and do plumbing for this vm later continue # This confirms that this compute port belongs # to a dvr hosted subnet. # Accommodate this VM Port into the existing rule in # the integration bridge LOG.debug("DVR: Plumbing compute port %s", port.vif_id) subnet_info = ldm.get_subnet_info() ldm.add_compute_ofport(port.vif_id, port.ofport) if port.vif_id in self.local_ports: # ensure if a compute port is already on a different # dvr routed subnet # if yes, queue this subnet to that port ovsport = self.local_ports[port.vif_id] ovsport.add_subnet(subnet_uuid) else: # the compute port is discovered first here that its # on a dvr routed subnet, queue this subnet to that port ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport vlan_to_use = lvm.vlan if lvm.network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # create a rule for this vm port self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=ovsport.get_mac(), dst_port=ovsport.get_ofport()) def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): # We only pass the subnet uuid so the server code will correctly # use the gateway_ip value from the subnet when looking up the # centralized-SNAT (CSNAT) port, get it early from the first fixed_ip. subnet_uuid = fixed_ips[0]['subnet_id'] if port.vif_id in self.local_ports: # throw an error if CSNAT port is already on a different # dvr routed subnet ovsport = self.local_ports[port.vif_id] subs = list(ovsport.get_subnets()) if subs[0] == subnet_uuid: return LOG.error("Centralized-SNAT port %(port)s on subnet " "%(port_subnet)s already seen on a different " "subnet %(orig_subnet)s", { "port": port.vif_id, "port_subnet": subnet_uuid, "orig_subnet": subs[0], }) return ldm = None subnet_info = None if subnet_uuid not in self.local_dvr_map: # no csnat ports seen on this subnet - create csnat state # for this subnet subnet_info = self.plugin_rpc.get_subnet_for_dvr( self.context, subnet_uuid, fixed_ips=None) if not subnet_info: LOG.warning("DVR: Unable to retrieve subnet information " "for subnet_id %s. The subnet or the gateway " "may have already been deleted", subnet_uuid) return LOG.debug("get_subnet_for_dvr for subnet %(uuid)s " "returned with %(info)s", {"uuid": subnet_uuid, "info": subnet_info}) ldm = LocalDVRSubnetMapping(subnet_info, port.ofport) self.local_dvr_map[subnet_uuid] = ldm else: ldm = self.local_dvr_map[subnet_uuid] subnet_info = ldm.get_subnet_info() # Store csnat OF Port in the existing DVRSubnetMap ldm.set_csnat_ofport(port.ofport) # create ovsPort footprint for csnat port ovsport = OVSPort(port.vif_id, port.ofport, port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport vlan_to_use = lvm.vlan if lvm.network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id self.int_br.install_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, gateway_mac=subnet_info['gateway_mac'], dst_mac=ovsport.get_mac(), dst_port=ovsport.get_ofport()) def bind_port_to_dvr(self, port, local_vlan_map, fixed_ips, device_owner): if not self.in_distributed_mode(): return if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES + [n_const.TYPE_VLAN]): LOG.debug("DVR: Port %s is with network_type %s not supported" " for dvr plumbing", port.vif_id, local_vlan_map.network_type) return if (port.vif_id in self.local_ports and self.local_ports[port.vif_id].ofport != port.ofport): LOG.info("DVR: Port %(vif)s changed port number to " "%(ofport)s, rebinding.", {'vif': port.vif_id, 'ofport': port.ofport}) self.unbind_port_from_dvr(port, local_vlan_map) if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self._bind_distributed_router_interface_port(port, local_vlan_map, fixed_ips, device_owner) if device_owner and n_utils.is_dvr_serviced(device_owner): self._bind_port_on_dvr_subnet(port, local_vlan_map, fixed_ips, device_owner) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._bind_centralized_snat_port_on_dvr_subnet(port, local_vlan_map, fixed_ips, device_owner) def _unbind_distributed_router_interface_port(self, port, lvm): ovsport = self.local_ports[port.vif_id] # removal of distributed router interface subnet_ids = ovsport.get_subnets() subnet_set = set(subnet_ids) network_type = lvm.network_type physical_network = lvm.physical_network vlan_to_use = lvm.vlan if network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # ensure we process for all the subnets laid on this removed port for sub_uuid in subnet_set: if sub_uuid not in self.local_dvr_map: continue ldm = self.local_dvr_map[sub_uuid] subnet_info = ldm.get_subnet_info() ip_version = subnet_info['ip_version'] # DVR is no more owner ldm.set_dvr_owned(False) # remove all vm rules for this dvr subnet # clear of compute_ports altogether compute_ports = ldm.get_compute_ofports() for vif_id in compute_ports: comp_port = self.local_ports[vif_id] self.int_br.delete_dvr_to_src_mac( network_type=network_type, vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac()) ldm.remove_all_compute_ofports() self.int_br.delete_dvr_dst_mac_for_arp( network_type=network_type, vlan_tag=vlan_to_use, gateway_mac=port.vif_mac, dvr_mac=self.dvr_mac_address, rtr_port=port.ofport) if ldm.get_csnat_ofport() == constants.OFPORT_INVALID: # if there is no csnat port for this subnet, remove # this subnet from local_dvr_map, as no dvr (or) csnat # ports available on this agent anymore self.local_dvr_map.pop(sub_uuid, None) if network_type == n_const.TYPE_VLAN: br = self.phys_brs[physical_network] if network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br if ip_version == 4: if subnet_info['gateway_ip']: br.delete_dvr_process_ipv4( vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip']) else: br.delete_dvr_process_ipv6( vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac']) ovsport.remove_subnet(sub_uuid) if self.firewall and isinstance(self.firewall, ovs_firewall.OVSFirewallDriver): self.firewall.delete_accepted_egress_direct_flow( subnet_info['gateway_mac'], lvm.vlan) if lvm.network_type == n_const.TYPE_VLAN: br = self.phys_brs[physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: br = self.tun_br br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac) # release port state self.local_ports.pop(port.vif_id, None) def _unbind_port_on_dvr_subnet(self, port, lvm): ovsport = self.local_ports[port.vif_id] # This confirms that this compute port being removed belonged # to a dvr hosted subnet. LOG.debug("DVR: Removing plumbing for compute port %s", port) subnet_ids = ovsport.get_subnets() # ensure we process for all the subnets laid on this port for sub_uuid in subnet_ids: if sub_uuid not in self.local_dvr_map: continue ldm = self.local_dvr_map[sub_uuid] ldm.remove_compute_ofport(port.vif_id) vlan_to_use = lvm.vlan if lvm.network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # first remove this vm port rule self.int_br.delete_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) # release port state self.local_ports.pop(port.vif_id, None) def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm): ovsport = self.local_ports[port.vif_id] # This confirms that this compute port being removed belonged # to a dvr hosted subnet. LOG.debug("DVR: Removing plumbing for csnat port %s", port) sub_uuid = list(ovsport.get_subnets())[0] # ensure we process for all the subnets laid on this port if sub_uuid not in self.local_dvr_map: return ldm = self.local_dvr_map[sub_uuid] ldm.set_csnat_ofport(constants.OFPORT_INVALID) vlan_to_use = lvm.vlan if lvm.network_type == n_const.TYPE_VLAN: vlan_to_use = lvm.segmentation_id # then remove csnat port rule self.int_br.delete_dvr_to_src_mac( network_type=lvm.network_type, vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) if not ldm.is_dvr_owned(): # if not owned by DVR (only used for csnat), remove this # subnet state altogether self.local_dvr_map.pop(sub_uuid, None) # release port state self.local_ports.pop(port.vif_id, None) def unbind_port_from_dvr(self, vif_port, local_vlan_map): if not self.in_distributed_mode(): return # Handle port removed use-case if vif_port and vif_port.vif_id not in self.local_ports: LOG.debug("DVR: Non distributed port, ignoring %s", vif_port) return ovsport = self.local_ports[vif_port.vif_id] device_owner = ovsport.get_device_owner() if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self._unbind_distributed_router_interface_port(vif_port, local_vlan_map) if device_owner and n_utils.is_dvr_serviced(device_owner): self._unbind_port_on_dvr_subnet(vif_port, local_vlan_map) if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT: self._unbind_centralized_snat_port_on_dvr_subnet(vif_port, local_vlan_map) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py0000644000175000017500000037552000000000000032262 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import collections import functools import hashlib import signal import sys import time import eventlet import netaddr from neutron_lib.agent import constants as agent_consts from neutron_lib.agent import topics from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net from neutron_lib.callbacks import events as callback_events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources as callback_resources from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib.placement import utils as place_utils from neutron_lib.plugins import utils as plugin_utils from neutron_lib.utils import helpers import os_vif from os_vif.objects import instance_info as vif_instance_object from os_vif.objects import network as vif_network_object from os_vif.objects import vif as vif_obj from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import systemd from oslo_utils import netutils from osprofiler import profiler from ovsdbapp import exceptions as ovs_exceptions import six from neutron._i18n import _ from neutron.agent.common import ip_lib from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils from neutron.agent import firewall as agent_firewall from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent.linux import xenapi_root_helper from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config from neutron.common import utils as n_utils from neutron.conf.agent import common as agent_config from neutron.conf.agent import xenapi_conf from neutron.conf import service as service_conf from neutron.plugins.ml2.drivers.agent import capabilities from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_api from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_capabilities from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_dvr_neutron_agent from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager LOG = logging.getLogger(__name__) cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 'agent.common.config') cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') INIT_MAX_TRIES = 3 class _mac_mydialect(netaddr.mac_unix): word_fmt = '%.2x' class OVSPluginApi(agent_rpc.CacheBackedPluginApi): pass class PortInfo(collections.UserDict): def __init__(self, current=None, added=None, removed=None, updated=None, re_added=None): _dict = {'current': current or set(), 'added': added or set(), 'removed': removed or set(), 'updated': updated or set(), 're_added': re_added or set()} super(PortInfo, self).__init__(_dict) def has_zero_prefixlen_address(ip_addresses): return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses) class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, dvr_rpc.DVRAgentRpcCallbackMixin): '''Implements OVS-based tunneling, VLANs and flat networks. Two local bridges are created: an integration bridge (defaults to 'br-int') and a tunneling bridge (defaults to 'br-tun'). An additional bridge is created for each physical network interface used for VLANs and/or flat networks. All VM VIFs are plugged into the integration bridge. VM VIFs on a given virtual network share a common "local" VLAN (i.e. not propagated externally). The VLAN id of this local VLAN is mapped to the physical networking details realizing that virtual network. For virtual networks realized as GRE tunnels, a Logical Switch (LS) identifier is used to differentiate tenant traffic on inter-HV tunnels. A mesh of tunnels is created to other Hypervisors in the cloud. These tunnels originate and terminate on the tunneling bridge of each hypervisor. Port patching is done to connect local VLANs on the integration bridge to inter-hypervisor tunnels on the tunnel bridge. For each virtual network realized as a VLAN or flat network, a veth or a pair of patch ports is used to connect the local VLAN on the integration bridge with the physical network bridge, with flow rules adding, modifying, or stripping VLAN tags as necessary. ''' # history # 1.0 Initial version # 1.1 Support Security Group RPC # 1.2 Support DVR (Distributed Virtual Router) RPC # 1.3 Added param devices_to_update to security_groups_provider_updated # 1.4 Added support for network_update # 1.5 Added binding_activate and binding_deactivate # 1.7 Add support for smartnic ports target = oslo_messaging.Target(version='1.7') def __init__(self, bridge_classes, ext_manager, conf=None): '''Constructor. :param bridge_classes: a dict for bridge classes. :param conf: an instance of ConfigOpts ''' super(OVSNeutronAgent, self).__init__() self.conf = conf or cfg.CONF self.ovs = ovs_lib.BaseOVS() self.ext_manager = ext_manager agent_conf = self.conf.AGENT ovs_conf = self.conf.OVS self.fullsync = False # init bridge classes with configured datapath type. self.br_int_cls, self.br_phys_cls, self.br_tun_cls = ( functools.partial(bridge_classes[b], datapath_type=ovs_conf.datapath_type) for b in ('br_int', 'br_phys', 'br_tun')) self.use_veth_interconnection = ovs_conf.use_veth_interconnection self.veth_mtu = agent_conf.veth_mtu self.available_local_vlans = set(six.moves.range( n_const.MIN_VLAN_TAG, n_const.MAX_VLAN_TAG + 1)) self.tunnel_types = agent_conf.tunnel_types or [] self.enable_tunneling = bool(self.tunnel_types) self.l2_pop = agent_conf.l2_population # TODO(ethuleau): Change ARP responder so it's not dependent on the # ML2 l2 population mechanism driver. self.enable_distributed_routing = agent_conf.enable_distributed_routing self.arp_responder_enabled = agent_conf.arp_responder and self.l2_pop if (self.enable_distributed_routing and self.enable_tunneling and not self.arp_responder_enabled): LOG.warning("ARP responder was not enabled but is required since " "DVR and tunneling are enabled, setting to True.") self.arp_responder_enabled = True host = self.conf.host self.agent_id = 'ovs-agent-%s' % host # Validate agent configurations self._check_agent_configurations() # Keep track of int_br's device count for use by _report_state() self.int_br_device_count = 0 self.int_br = self.br_int_cls(ovs_conf.integration_bridge) self.setup_integration_br() # Stores port update notifications for processing in main rpc loop self.updated_ports = set() # Stores port delete notifications self.deleted_ports = set() # Stores the port IDs whose binding has been deactivated self.deactivated_bindings = set() # Stores the port IDs whose binding has been activated self.activated_bindings = set() # Stores smartnic ports update/remove self.updated_smartnic_ports = list() # Stores integration bridge smartnic ports data self.current_smartnic_ports_map = {} self.network_ports = collections.defaultdict(set) # keeps association between ports and ofports to detect ofport change self.vifname_to_ofport_map = {} # Stores newly created bridges self.added_bridges = list() self.bridge_mappings = self._parse_bridge_mappings( ovs_conf.bridge_mappings) self.rp_bandwidths = place_utils.parse_rp_bandwidths( ovs_conf.resource_provider_bandwidths) br_set = set(six.itervalues(self.bridge_mappings)) n_utils.validate_rp_bandwidth(self.rp_bandwidths, br_set) self.rp_inventory_defaults = place_utils.parse_rp_inventory_defaults( ovs_conf.resource_provider_inventory_defaults) self.rp_hypervisors = utils.default_rp_hypervisors( ovs_conf.resource_provider_hypervisors, {k: [v] for k, v in self.bridge_mappings.items()} ) self.setup_physical_bridges(self.bridge_mappings) self.vlan_manager = vlanmanager.LocalVlanManager() self._reset_tunnel_ofports() self.polling_interval = agent_conf.polling_interval self.minimize_polling = agent_conf.minimize_polling self.ovsdb_monitor_respawn_interval = ( agent_conf.ovsdb_monitor_respawn_interval or constants.DEFAULT_OVSDBMON_RESPAWN) self.local_ip = ovs_conf.local_ip self.tunnel_count = 0 self.vxlan_udp_port = agent_conf.vxlan_udp_port self.dont_fragment = agent_conf.dont_fragment self.tunnel_csum = agent_conf.tunnel_csum self.tos = ('inherit' if agent_conf.dscp_inherit else (int(agent_conf.dscp) << 2 if agent_conf.dscp else None)) self.tun_br = None self.patch_int_ofport = constants.OFPORT_INVALID self.patch_tun_ofport = constants.OFPORT_INVALID if self.enable_tunneling: # The patch_int_ofport and patch_tun_ofport are updated # here inside the call to setup_tunnel_br() self.setup_tunnel_br(ovs_conf.tunnel_bridge) self.setup_tunnel_br_flows() self.setup_rpc() self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( self.context, self.dvr_plugin_rpc, self.int_br, self.tun_br, self.bridge_mappings, self.phys_brs, self.int_ofports, self.phys_ofports, self.patch_int_ofport, self.patch_tun_ofport, host, self.enable_tunneling, self.enable_distributed_routing) if self.enable_distributed_routing: self.dvr_agent.setup_dvr_flows() # Collect additional bridges to monitor self.ancillary_brs = self.setup_ancillary_bridges( ovs_conf.integration_bridge, ovs_conf.tunnel_bridge) agent_api = ovs_ext_api.OVSAgentExtensionAPI(self.int_br, self.tun_br, self.phys_brs) self.ext_manager.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE, agent_api) # In order to keep existed device's local vlan unchanged, # restore local vlan mapping at start self._restore_local_vlan_map() # Security group agent support self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc( self.context, self.sg_plugin_rpc, defer_refresh_firewall=True, integration_bridge=self.int_br) self.sg_plugin_rpc.register_legacy_sg_notification_callbacks( self.sg_agent) self.sg_agent.init_ovs_dvr_firewall(self.dvr_agent) # we default to False to provide backward compat with out of tree # firewall drivers that expect the logic that existed on the Neutron # server which only enabled hybrid plugging based on the use of the # hybrid driver. hybrid_plug = getattr(self.sg_agent.firewall, 'OVS_HYBRID_PLUG_REQUIRED', False) self.prevent_arp_spoofing = ( not self.sg_agent.firewall.provides_arp_spoofing_protection) self.failed_report_state = False # TODO(mangelajo): optimize resource_versions to only report # versions about resources which are common, # or which are used by specific extensions. self.agent_state = { 'binary': 'neutron-openvswitch-agent', 'host': host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {'bridge_mappings': self.bridge_mappings, n_const.RP_BANDWIDTHS: self.rp_bandwidths, n_const.RP_INVENTORY_DEFAULTS: self.rp_inventory_defaults, 'resource_provider_hypervisors': self.rp_hypervisors, 'integration_bridge': ovs_conf.integration_bridge, 'tunnel_types': self.tunnel_types, 'tunneling_ip': self.local_ip, 'l2_population': self.l2_pop, 'arp_responder_enabled': self.arp_responder_enabled, 'enable_distributed_routing': self.enable_distributed_routing, 'log_agent_heartbeats': agent_conf.log_agent_heartbeats, 'extensions': self.ext_manager.names(), 'datapath_type': ovs_conf.datapath_type, 'ovs_capabilities': self.ovs.capabilities, 'vhostuser_socket_dir': ovs_conf.vhostuser_socket_dir, portbindings.OVS_HYBRID_PLUG: hybrid_plug, 'baremetal_smartnic': self.conf.AGENT.baremetal_smartnic}, 'resource_versions': resources.LOCAL_RESOURCE_VERSIONS, 'agent_type': n_const.AGENT_TYPE_OVS, 'start_flag': True} report_interval = agent_conf.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) # Initialize iteration counter self.iter_num = 0 self.run_daemon_loop = True self.catch_sigterm = False self.catch_sighup = False if self.conf.AGENT.baremetal_smartnic: os_vif.initialize() # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() self.quitting_rpc_timeout = agent_conf.quitting_rpc_timeout def _parse_bridge_mappings(self, bridge_mappings): try: return helpers.parse_mappings(bridge_mappings) except ValueError as e: raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) def _report_state(self): # How many devices are likely used by a VM self.agent_state.get('configurations')['devices'] = ( self.int_br_device_count) self.agent_state.get('configurations')['in_distributed_mode'] = ( self.dvr_agent.in_distributed_mode()) try: agent_status = self.state_rpc.report_state(self.context, self.agent_state, True) if agent_status == agent_consts.AGENT_REVIVED: LOG.info('Agent has just been revived. ' 'Doing a full sync.') self.fullsync = True # we only want to update resource versions on startup if self.agent_state.pop('resource_versions', None): # On initial start, we notify systemd after initialization # is complete. systemd.notify_once() if self.iter_num > 0: # agent is considered started after # initial sync with server (iter 0) is done self.agent_state.pop('start_flag', None) except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") def _restore_local_vlan_map(self): self._local_vlan_hints = {} # skip INVALID and UNASSIGNED to match scan_ports behavior ofport_filter = (ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT) cur_ports = self.int_br.get_vif_ports(ofport_filter) port_names = [p.port_name for p in cur_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "other_config", "tag"], ports=port_names) by_name = {x['name']: x for x in port_info} for port in cur_ports: # if a port was deleted between get_vif_ports and # get_ports_attributes, we # will get a KeyError try: local_vlan_map = by_name[port.port_name]['other_config'] local_vlan = by_name[port.port_name]['tag'] except KeyError: continue if not local_vlan: continue net_uuid = local_vlan_map.get('net_uuid') if (net_uuid and net_uuid not in self._local_vlan_hints and local_vlan != constants.DEAD_VLAN_TAG): self.available_local_vlans.remove(local_vlan) self._local_vlan_hints[local_vlan_map['net_uuid']] = local_vlan def _dispose_local_vlan_hints(self): self.available_local_vlans.update(self._local_vlan_hints.values()) self._local_vlan_hints = {} def _reset_tunnel_ofports(self): self.tun_br_ofports = {n_const.TYPE_GENEVE: {}, n_const.TYPE_GRE: {}, n_const.TYPE_VXLAN: {}} def _update_network_segmentation_id(self, network): if network.get(provider_net.NETWORK_TYPE) != n_const.TYPE_VLAN: return try: lvm = self.vlan_manager.get(network['id']) except vlanmanager.MappingNotFound: return segmentation_id_old = lvm.segmentation_id if segmentation_id_old == network[provider_net.SEGMENTATION_ID]: return self.vlan_manager.update_segmentation_id( network['id'], network[provider_net.SEGMENTATION_ID]) lvid = lvm.vlan physical_network = network[provider_net.PHYSICAL_NETWORK] phys_br = self.phys_brs[physical_network] phys_port = self.phys_ofports[physical_network] int_port = self.int_ofports[physical_network] phys_br.reclaim_local_vlan(port=phys_port, lvid=lvid) phys_br.provision_local_vlan( port=phys_port, lvid=lvid, segmentation_id=network[provider_net.SEGMENTATION_ID], distributed=self.enable_distributed_routing) self.int_br.reclaim_local_vlan(port=int_port, segmentation_id=segmentation_id_old) self.int_br.provision_local_vlan( port=int_port, lvid=lvid, segmentation_id=network[provider_net.SEGMENTATION_ID]) def setup_rpc(self): self.plugin_rpc = OVSPluginApi(topics.PLUGIN) # allow us to receive port_update/delete callbacks from the cache self.plugin_rpc.register_legacy_notification_callbacks(self) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerAPIShim( self.plugin_rpc.remote_resource_cache) self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init self.context = context.get_admin_context_without_session() # Made a simple RPC call to Neutron Server. while True: try: self.state_rpc.has_alive_neutron_server(self.context) except oslo_messaging.MessagingTimeout as e: LOG.warning('l2-agent cannot contact neutron server. ' 'Check connectivity to neutron server. ' 'Retrying... ' 'Detailed message: %(msg)s.', {'msg': e}) continue break # Define the listening consumers for the agent consumers = [[constants.TUNNEL, topics.UPDATE], [constants.TUNNEL, topics.DELETE], [topics.DVR, topics.UPDATE]] if self.l2_pop: consumers.append([topics.L2POPULATION, topics.UPDATE]) self.connection = agent_rpc.create_consumers([self], topics.AGENT, consumers, start_listening=False) @profiler.trace("rpc") def port_update(self, context, **kwargs): port = kwargs.get('port') agent_restarted = kwargs.pop("agent_restarted", False) # Put the port identifier in the updated_ports set. # Even if full port details might be provided to this call, # they are not used since there is no guarantee the notifications # are processed in the same order as the relevant API requests if not agent_restarted: # When ovs-agent is just restarted, the first RPC loop will # process all the port as 'added'. And all of these ports will # send a port_update notification after that processing. This # will cause all these ports to be processed again in next RPC # loop as 'updated'. So here we just ignore such local update # notification. self.updated_ports.add(port['id']) if not self.conf.AGENT.baremetal_smartnic: return # In case of smart-nic port, add smart-nic representor port to # the integration bridge. port_data = (self.plugin_rpc.remote_resource_cache .get_resource_by_id(resources.PORT, port['id'])) if not port_data: LOG.warning('Failed to get port details, port id: %s', port['id']) return for port_binding in port_data.get('bindings', []): if port_binding['vnic_type'] == portbindings.VNIC_SMARTNIC: if port_binding['host'] == self.conf.host: local_link = (port_binding['profile'] ['local_link_information']) if local_link: self._add_port_to_updated_smartnic_ports( port_data['mac_address'], local_link[0]['port_id'], port_data['id'], port_binding['vif_type'], port_data['device_id']) elif (not port_binding['host'] and port_binding['vif_type'] == portbindings.VIF_TYPE_UNBOUND and port['id'] in self.current_smartnic_ports_map.keys()): smartnic_port = self.current_smartnic_ports_map[ port['id']] self._add_port_to_updated_smartnic_ports( smartnic_port['vif_mac'], smartnic_port['vif_name'], port['id'], portbindings.VIF_TYPE_UNBOUND) else: # The port doesn't belong to this Smart NIC, # the reason for this could be multi Smart NIC # setup. LOG.info("Smart NIC port %(port_id)s does not belong " "to host %(host)s", {'port_id': port['id'], 'host': self.conf.host}) def treat_smartnic_port(self, smartnic_port_data): mac = smartnic_port_data['mac'] vm_uuid = smartnic_port_data['vm_uuid'] rep_port = smartnic_port_data['vif_name'] iface_id = smartnic_port_data['iface_id'] vif_type = smartnic_port_data['vif_type'] instance_info = vif_instance_object.InstanceInfo(uuid=vm_uuid) vif = self._get_vif_object(iface_id, rep_port, mac) try: if vif_type == portbindings.VIF_TYPE_OVS: os_vif.plug(vif, instance_info) self.current_smartnic_ports_map[iface_id] = ( self.create_smartnic_port_map_entry_data(mac, rep_port)) elif vif_type == portbindings.VIF_TYPE_UNBOUND: os_vif.unplug(vif, instance_info) self.current_smartnic_ports_map.pop(iface_id, None) else: LOG.error("Unexpected vif_type:%(vif_type)s for " "%(vnic_type)s port:%(port_id)s", {'vnic_type': portbindings.VNIC_SMARTNIC, 'vif_type': vif_type, 'port_id': iface_id}) except Exception as e: LOG.error("Failed to treat %(vnic_type)s port:%(port_id)s , " "error:%(error)s", {'vnic_type': portbindings.VNIC_SMARTNIC, 'port_id': iface_id, 'error': e}) def _get_vif_object(self, iface_id, rep_port, mac): network = vif_network_object.Network( bridge=self.conf.OVS.integration_bridge) port_profile = vif_obj.VIFPortProfileOpenVSwitch( interface_id=iface_id, create_port=True) return vif_obj.VIFOpenVSwitch( vif_name=rep_port, plugin='ovs', port_profile=port_profile, network=network, address=str(mac)) def _add_port_to_updated_smartnic_ports(self, mac, vif_name, iface_id, vif_type, vm_uuid=''): self.updated_smartnic_ports.append({ 'mac': mac, 'vm_uuid': vm_uuid, 'vif_name': vif_name, 'iface_id': iface_id, 'vif_type': vif_type}) @profiler.trace("rpc") def port_delete(self, context, **kwargs): port_id = kwargs.get('port_id') self.deleted_ports.add(port_id) self.updated_ports.discard(port_id) @profiler.trace("rpc") def network_update(self, context, **kwargs): network_id = kwargs['network']['id'] network = self.plugin_rpc.get_network_details( self.context, network_id, self.agent_id, self.conf.host) self._update_network_segmentation_id(network) for port_id in self.network_ports[network_id]: # notifications could arrive out of order, if the port is deleted # we don't want to update it anymore if port_id not in self.deleted_ports: self.updated_ports.add(port_id) LOG.debug("network_update message processed for network " "%(network_id)s, with ports: %(ports)s", {'network_id': network_id, 'ports': self.network_ports[network_id]}) @profiler.trace("rpc") def binding_deactivate(self, context, **kwargs): if kwargs.get('host') != self.conf.host: return port_id = kwargs.get('port_id') self.deactivated_bindings.add(port_id) @profiler.trace("rpc") def binding_activate(self, context, **kwargs): if kwargs.get('host') != self.conf.host: return port_id = kwargs.get('port_id') self.activated_bindings.add(port_id) def _clean_network_ports(self, port_id): for port_set in self.network_ports.values(): if port_id in port_set: port_set.remove(port_id) break def _get_port_local_vlan(self, port_id): for network_id, port_set in self.network_ports.items(): if port_id in port_set: lvm = self.vlan_manager.get(network_id) return lvm.vlan def process_deleted_ports(self, port_info): # don't try to process removed ports as deleted ports since # they are already gone if 'removed' in port_info: self.deleted_ports -= port_info['removed'] deleted_ports = list(self.deleted_ports) with self.int_br.deferred(full_ordered=True, use_bundle=True) as int_br: while self.deleted_ports: port_id = self.deleted_ports.pop() port = self.int_br.get_vif_port_by_id(port_id) if (isinstance(self.sg_agent.firewall, agent_firewall.NoopFirewallDriver) or not agent_sg_rpc.is_firewall_enabled()): try: self.delete_accepted_egress_direct_flow( int_br, port.ofport, port.mac, self._get_port_local_vlan(port_id)) except Exception as err: LOG.debug("Failed to remove accepted egress flows " "for port %s, error: %s", port_id, err) self._clean_network_ports(port_id) self.ext_manager.delete_port(self.context, {"vif_port": port, "port_id": port_id}) # move to dead VLAN so deleted ports no # longer have access to the network if port: # don't log errors since there is a chance someone will be # removing the port from the bridge at the same time self.port_dead(port, log_errors=False) self.port_unbound(port_id) # Flush firewall rules after ports are put on dead VLAN to be # more secure self.sg_agent.remove_devices_filter(deleted_ports) def create_smartnic_port_map_entry_data(self, vif_mac, vif_name): return {"vif_mac": vif_mac, "vif_name": vif_name} def process_smartnic_ports(self): smartnic_ports = self.plugin_rpc.get_ports_by_vnic_type_and_host( self.context, portbindings.VNIC_SMARTNIC, self.conf.host) smartnic_ports_map = {smartnic_port['id']: smartnic_port for smartnic_port in smartnic_ports} smartnic_port_ids = set(smartnic_ports_map.keys()) ofport_filter = (ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT) cur_smartnic_ports = self.int_br.get_vif_ports(ofport_filter) self.current_smartnic_ports_map = { port.vif_id: self.create_smartnic_port_map_entry_data( port.vif_mac, port.port_name) for port in cur_smartnic_ports} cur_smartnic_port_ids = set(self.current_smartnic_ports_map.keys()) removed_ports = [{'vif_id': vif_id, **self.current_smartnic_ports_map[ vif_id]} for vif_id in cur_smartnic_port_ids - smartnic_port_ids] added_ports = [smartnic_ports_map[port_id] for port_id in smartnic_port_ids - cur_smartnic_port_ids] def _process_added_ports(smartnic_added_ports): for smartnic_port in smartnic_added_ports: local_link = (smartnic_port['binding:profile'] ['local_link_information']) if local_link: self._add_port_to_updated_smartnic_ports( smartnic_port['mac_address'], local_link[0]['port_id'], smartnic_port['id'], smartnic_port['binding:vif_type'], smartnic_port['device_id']) def _process_removed_ports(removed_ports): for ovs_port in removed_ports: self._add_port_to_updated_smartnic_ports( ovs_port['vif_mac'], ovs_port['vif_name'], ovs_port['vif_id'], portbindings.VIF_TYPE_UNBOUND) _process_removed_ports(removed_ports) _process_added_ports(added_ports) def process_deactivated_bindings(self, port_info): # don't try to deactivate bindings for removed ports since they are # already gone if 'removed' in port_info: self.deactivated_bindings -= port_info['removed'] while self.deactivated_bindings: port_id = self.deactivated_bindings.pop() port = self.int_br.get_vif_port_by_id(port_id) if not port: continue self.int_br.delete_port(port.port_name) LOG.debug(("Port id %s unplugged from integration bridge because " "its binding was de-activated"), port_id) def process_activated_bindings(self, port_info, activated_bindings_copy): # Compute which ports for activated bindings are still present... activated_bindings_copy &= port_info['current'] # ...and treat them as just added port_info['added'] |= activated_bindings_copy @profiler.trace("rpc") def tunnel_update(self, context, **kwargs): LOG.debug("tunnel_update received") if not self.enable_tunneling: return tunnel_ip = kwargs.get('tunnel_ip') tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: LOG.error("No tunnel_type specified, cannot create tunnels") return if tunnel_type not in self.tunnel_types: LOG.error("tunnel_type %s not supported by agent", tunnel_type) return if tunnel_ip == self.local_ip: return tun_name = self.get_tunnel_name(tunnel_type, self.local_ip, tunnel_ip) if tun_name is None: return if not self.l2_pop: self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip, tunnel_type) self._setup_tunnel_flood_flow(self.tun_br, tunnel_type) @profiler.trace("rpc") def tunnel_delete(self, context, **kwargs): LOG.debug("tunnel_delete received") if not self.enable_tunneling: return tunnel_ip = kwargs.get('tunnel_ip') if not tunnel_ip: LOG.error("No tunnel_ip specified, cannot delete tunnels") return tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: LOG.error("No tunnel_type specified, cannot delete tunnels") return if tunnel_type not in self.tunnel_types: LOG.error("tunnel_type %s not supported by agent", tunnel_type) return ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip) self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type) def _tunnel_port_lookup(self, network_type, remote_ip): return self.tun_br_ofports[network_type].get(remote_ip) def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") for lvm, agent_ports in self.get_agent_ports(fdb_entries): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: with self.tun_br.deferred() as deferred_br: self.fdb_add_tun(context, deferred_br, lvm, agent_ports, self._tunnel_port_lookup) else: self.fdb_add_tun(context, self.tun_br, lvm, agent_ports, self._tunnel_port_lookup) def fdb_remove(self, context, fdb_entries): LOG.debug("fdb_remove received") for lvm, agent_ports in self.get_agent_ports(fdb_entries): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: with self.tun_br.deferred() as deferred_br: self.fdb_remove_tun(context, deferred_br, lvm, agent_ports, self._tunnel_port_lookup) else: self.fdb_remove_tun(context, self.tun_br, lvm, agent_ports, self._tunnel_port_lookup) def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == n_const.FLOODING_ENTRY: lvm.tun_ofports.add(ofport) br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, lvm.tun_ofports) else: self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, port_info.mac_address, port_info.ip_address) br.install_unicast_to_tun(lvm.vlan, lvm.segmentation_id, ofport, port_info.mac_address) def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == n_const.FLOODING_ENTRY: if ofport not in lvm.tun_ofports: LOG.debug("attempt to remove a non-existent port %s", ofport) return lvm.tun_ofports.remove(ofport) if len(lvm.tun_ofports) > 0: br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, lvm.tun_ofports) else: # This local vlan doesn't require any more tunneling br.delete_flood_to_tun(lvm.vlan) else: self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, port_info.mac_address, port_info.ip_address) br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address) def _fdb_chg_ip(self, context, fdb_entries): LOG.debug("update chg_ip received") with self.tun_br.deferred() as deferred_br: self.fdb_chg_ip_tun(context, deferred_br, fdb_entries, self.local_ip) def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): '''Set the ARP respond entry. When the l2 population mechanism driver and OVS supports to edit ARP fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the tunnel bridge. ''' if not self.arp_responder_enabled: return ip = netaddr.IPAddress(ip_address) if ip.version == 6: return ip = str(ip) mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect)) if action == 'add': br.install_arp_responder(local_vid, ip, mac) elif action == 'remove': br.delete_arp_responder(local_vid, ip) else: LOG.warning('Action %s not supported', action) def _local_vlan_for_flat(self, lvid, physical_network): phys_br = self.phys_brs[physical_network] phys_port = self.phys_ofports[physical_network] int_br = self.int_br int_port = self.int_ofports[physical_network] phys_br.provision_local_vlan(port=phys_port, lvid=lvid, segmentation_id=None, distributed=False) int_br.provision_local_vlan(port=int_port, lvid=lvid, segmentation_id=None) def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): distributed = self.enable_distributed_routing phys_br = self.phys_brs[physical_network] phys_port = self.phys_ofports[physical_network] int_br = self.int_br int_port = self.int_ofports[physical_network] phys_br.provision_local_vlan(port=phys_port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) int_br.provision_local_vlan(port=int_port, lvid=lvid, segmentation_id=segmentation_id) def _add_local_vlan(self, net_uuid, network_type, physical_network, segmentation_id): """Add a network to the local VLAN manager On a restart or crash of OVS, the network associated with this VLAN will already be assigned, so check for that here before assigning a new one. If the VLAN tag is not used, check if there are local VLAN tags available. """ try: lvm = self.vlan_manager.get(net_uuid) except vlanmanager.MappingNotFound: lvid = self._local_vlan_hints.pop(net_uuid, None) if lvid is None: if not self.available_local_vlans: LOG.error("No local VLAN available for net-id=%s", net_uuid) return lvid = self.available_local_vlans.pop() self.vlan_manager.add( net_uuid, lvid, network_type, physical_network, segmentation_id) lvm = self.vlan_manager.get(net_uuid) LOG.info( "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s", {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) return lvm def provision_local_vlan(self, net_uuid, network_type, physical_network, segmentation_id): '''Provisions a local VLAN. :param net_uuid: the uuid of the network associated with this vlan. :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', 'local', 'geneve') :param physical_network: the physical network for 'vlan' or 'flat' :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' ''' lvm = self._add_local_vlan(net_uuid, network_type, physical_network, segmentation_id) if not lvm or not lvm.vlan: return lvid = lvm.vlan if network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: # outbound broadcast/multicast ofports = list(self.tun_br_ofports[network_type].values()) if ofports: self.tun_br.install_flood_to_tun(lvid, segmentation_id, ofports) # inbound from tunnels: set lvid in the right table # and resubmit to Table LEARN_FROM_TUN for mac learning if self.enable_distributed_routing: self.dvr_agent.process_tunneled_network( network_type, lvid, segmentation_id) else: self.tun_br.provision_local_vlan( network_type=network_type, lvid=lvid, segmentation_id=segmentation_id) else: LOG.error("Cannot provision %(network_type)s network for " "net-id=%(net_uuid)s - tunneling disabled", {'network_type': network_type, 'net_uuid': net_uuid}) elif network_type == n_const.TYPE_FLAT: if physical_network in self.phys_brs: self._local_vlan_for_flat(lvid, physical_network) else: LOG.error("Cannot provision flat network for " "net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s", {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == n_const.TYPE_VLAN: if physical_network in self.phys_brs: self._local_vlan_for_vlan(lvid, physical_network, segmentation_id) else: LOG.error("Cannot provision VLAN network for " "net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s", {'net_uuid': net_uuid, 'physical_network': physical_network}) elif network_type == n_const.TYPE_LOCAL: # no flows needed for local networks pass else: LOG.error("Cannot provision unknown network type " "%(network_type)s for net-id=%(net_uuid)s", {'network_type': network_type, 'net_uuid': net_uuid}) def reclaim_local_vlan(self, net_uuid): '''Reclaim a local VLAN. :param net_uuid: the network uuid associated with this vlan. ''' try: lvm = vlanmanager.LocalVlanManager().pop(net_uuid) except KeyError: LOG.debug("Network %s not used on agent.", net_uuid) return LOG.info("Reclaiming vlan = %(vlan_id)s from " "net-id = %(net_uuid)s", {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: self.tun_br.reclaim_local_vlan( network_type=lvm.network_type, segmentation_id=lvm.segmentation_id) self.tun_br.delete_flood_to_tun(lvm.vlan) self.tun_br.delete_unicast_to_tun(lvm.vlan, None) self.tun_br.delete_arp_responder(lvm.vlan, None) if self.l2_pop: # Try to remove tunnel ports if not used by other networks for ofport in lvm.tun_ofports: self.cleanup_tunnel_port(self.tun_br, ofport, lvm.network_type) elif lvm.network_type == n_const.TYPE_FLAT: if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] br.reclaim_local_vlan( port=self.phys_ofports[lvm.physical_network], lvid=lvm.vlan) # inbound br = self.int_br br.reclaim_local_vlan( port=self.int_ofports[lvm.physical_network], segmentation_id=None) elif lvm.network_type == n_const.TYPE_VLAN: if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] br.reclaim_local_vlan( port=self.phys_ofports[lvm.physical_network], lvid=lvm.vlan) # inbound br = self.int_br br.reclaim_local_vlan( port=self.int_ofports[lvm.physical_network], segmentation_id=lvm.segmentation_id) elif lvm.network_type == n_const.TYPE_LOCAL: # no flows needed for local networks pass else: LOG.error("Cannot reclaim unknown network type " "%(network_type)s for net-id=%(net_uuid)s", {'network_type': lvm.network_type, 'net_uuid': net_uuid}) self.available_local_vlans.add(lvm.vlan) def port_bound(self, port, net_uuid, network_type, physical_network, segmentation_id, fixed_ips, device_owner, provisioning_needed): '''Bind port to net_uuid/lsw_id and install flow for inbound traffic to vm. :param port: an ovs_lib.VifPort object. :param net_uuid: the net_uuid this port is to be associated with. :param network_type: the network type ('gre', 'vlan', 'flat', 'local') :param physical_network: the physical network for 'vlan' or 'flat' :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' :param fixed_ips: the ip addresses assigned to this port :param device_owner: the string indicative of owner of this port :param provisioning_needed: indicates if this is called for an OVS restart or recreated physical bridges and requires to do local vlan provisioning ''' if net_uuid not in self.vlan_manager or provisioning_needed: self.provision_local_vlan(net_uuid, network_type, physical_network, segmentation_id) lvm = self.vlan_manager.get(net_uuid) lvm.vif_ports[port.vif_id] = port self.dvr_agent.bind_port_to_dvr(port, lvm, fixed_ips, device_owner) port_other_config = self.int_br.db_get_val("Port", port.port_name, "other_config") if port_other_config is None: if port.vif_id in self.deleted_ports: LOG.debug("Port %s deleted concurrently", port.vif_id) elif port.vif_id in self.updated_ports: LOG.error("Expected port %s not found", port.vif_id) else: LOG.debug("Unable to get config for port %s", port.vif_id) return False vlan_mapping = {'net_uuid': net_uuid, 'network_type': network_type, 'physical_network': str(physical_network)} if segmentation_id is not None: vlan_mapping['segmentation_id'] = str(segmentation_id) port_other_config.update(vlan_mapping) self.int_br.set_db_attribute("Port", port.port_name, "other_config", port_other_config) return True def _add_port_tag_info(self, need_binding_ports): port_names = [p['vif_port'].port_name for p in need_binding_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "tag", "other_config"], ports=port_names, if_exists=True) info_by_port = { x['name']: { 'tag': x['tag'], 'other_config': x['other_config'] or {} } for x in port_info } for port_detail in need_binding_ports: try: lvm = self.vlan_manager.get(port_detail['network_id']) except vlanmanager.MappingNotFound: continue port = port_detail['vif_port'] try: cur_info = info_by_port[port.port_name] except KeyError: continue str_vlan = str(lvm.vlan) other_config = cur_info['other_config'] if (cur_info['tag'] != lvm.vlan or other_config.get('tag') != str_vlan): other_config['tag'] = str_vlan self.int_br.set_db_attribute( "Port", port.port_name, "other_config", other_config) # Uninitialized port has tag set to [] if cur_info['tag']: LOG.warning("Uninstall flows of ofport %s due to " "local vlan change.", port.ofport) self.int_br.uninstall_flows(in_port=port.ofport) def _bind_devices(self, need_binding_ports): devices_up = [] devices_down = [] failed_devices = [] tunnels_missing = False port_names = [p['vif_port'].port_name for p in need_binding_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "tag"], ports=port_names, if_exists=True) tags_by_name = {x['name']: x['tag'] for x in port_info} for port_detail in need_binding_ports: try: lvm = self.vlan_manager.get(port_detail['network_id']) except vlanmanager.MappingNotFound: # network for port was deleted. skip this port since it # will need to be handled as a DEAD port in the next scan continue port = port_detail['vif_port'] device = port_detail['device'] # Do not bind a port if it's already bound cur_tag = tags_by_name.get(port.port_name) if cur_tag is None: LOG.debug("Port %s was deleted concurrently, skipping it", port.port_name) continue if self.prevent_arp_spoofing: self.setup_arp_spoofing_protection(self.int_br, port, port_detail) if cur_tag != lvm.vlan: self.int_br.set_db_attribute( "Port", port.port_name, "tag", lvm.vlan) # update plugin about port status # FIXME(salv-orlando): Failures while updating device status # must be handled appropriately. Otherwise this might prevent # neutron server from sending network-vif-* events to the nova # API server, thus possibly preventing instance spawn. if port_detail.get('admin_state_up'): LOG.debug("Setting status for %s to UP", device) devices_up.append(device) if (not tunnels_missing and lvm.network_type in constants.TUNNEL_NETWORK_TYPES and len(lvm.tun_ofports) == 0): tunnels_missing = True else: LOG.debug("Setting status for %s to DOWN", device) devices_down.append(device) if devices_up or devices_down: # When the iter_num == 0, that indicate the ovs-agent is doing # the initialization work. L2 pop needs this precise knowledge # to notify the agent to refresh the tunnel related flows. # Otherwise, these flows will be cleaned as stale due to the # different cookie id. We also set refresh_tunnels if the agent # has not received a notification and is missing tunnels. refresh_tunnels = (self.iter_num == 0) or tunnels_missing devices_set = self.plugin_rpc.update_device_list( self.context, devices_up, devices_down, self.agent_id, self.conf.host, refresh_tunnels=refresh_tunnels) failed_devices = (devices_set.get('failed_devices_up') + devices_set.get('failed_devices_down')) if failed_devices: LOG.error("Configuration for devices %s failed!", failed_devices) LOG.info("Configuration for devices up %(up)s and devices " "down %(down)s completed.", {'up': devices_up, 'down': devices_down}) return set(failed_devices) @staticmethod def setup_arp_spoofing_protection(bridge, vif, port_details): if not port_details.get('port_security_enabled', True): LOG.info("Skipping ARP spoofing rules for port '%s' because " "it has port security disabled", vif.port_name) bridge.delete_arp_spoofing_protection(port=vif.ofport) bridge.set_allowed_macs_for_port(port=vif.ofport, allow_all=True) return if port_details['device_owner'].startswith( n_const.DEVICE_OWNER_NETWORK_PREFIX): LOG.debug("Skipping ARP spoofing rules for network owned port " "'%s'.", vif.port_name) bridge.delete_arp_spoofing_protection(port=vif.ofport) bridge.set_allowed_macs_for_port(port=vif.ofport, allow_all=True) return # clear any previous flows related to this port in our ARP table bridge.delete_arp_spoofing_allow_rules(port=vif.ofport) # collect all of the addresses and cidrs that belong to the port addresses = {f['ip_address'] for f in port_details['fixed_ips']} mac_addresses = {vif.vif_mac} if port_details.get('allowed_address_pairs'): addresses |= {p['ip_address'] for p in port_details['allowed_address_pairs']} mac_addresses |= {p['mac_address'] for p in port_details['allowed_address_pairs'] if p.get('mac_address')} bridge.set_allowed_macs_for_port(vif.ofport, mac_addresses) ipv6_addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 6} # Allow neighbor advertisements for LLA address. ipv6_addresses |= {str(netutils.get_ipv6_addr_by_EUI64( n_const.IPv6_LLA_PREFIX, mac)) for mac in mac_addresses} if not has_zero_prefixlen_address(ipv6_addresses): # Install protection only when prefix is not zero because a /0 # prefix allows any address anyway and the nd_target can only # match on /1 or more. bridge.install_icmpv6_na_spoofing_protection( port=vif.ofport, ip_addresses=ipv6_addresses) ipv4_addresses = {ip for ip in addresses if netaddr.IPNetwork(ip).version == 4} if not has_zero_prefixlen_address(ipv4_addresses): # Install protection only when prefix is not zero because a /0 # prefix allows any address anyway and the ARP_SPA can only # match on /1 or more. bridge.install_arp_spoofing_protection(port=vif.ofport, ip_addresses=ipv4_addresses) else: bridge.delete_arp_spoofing_protection(port=vif.ofport) def port_unbound(self, vif_id, net_uuid=None): '''Unbind port. Removes corresponding local vlan mapping object if this is its last VIF. :param vif_id: the id of the vif :param net_uuid: the net_uuid this port is associated with. ''' try: net_uuid = net_uuid or self.vlan_manager.get_net_uuid(vif_id) except vlanmanager.VifIdNotFound: LOG.info( 'port_unbound(): net_uuid %s not managed by VLAN manager', net_uuid) return lvm = self.vlan_manager.get(net_uuid) if vif_id in lvm.vif_ports: vif_port = lvm.vif_ports[vif_id] self.dvr_agent.unbind_port_from_dvr(vif_port, lvm) lvm.vif_ports.pop(vif_id, None) if not lvm.vif_ports: self.reclaim_local_vlan(net_uuid) def port_dead(self, port, log_errors=True): '''Once a port has no binding, put it on the "dead vlan". :param port: an ovs_lib.VifPort object. ''' # Don't kill a port if it's already dead cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag", log_errors=log_errors) if cur_tag and cur_tag != constants.DEAD_VLAN_TAG: self.int_br.set_db_attribute("Port", port.port_name, "tag", constants.DEAD_VLAN_TAG, log_errors=log_errors) self.int_br.drop_port(in_port=port.ofport) def setup_integration_br(self): '''Setup the integration bridge. ''' # Ensure the integration bridge is created. # ovs_lib.OVSBridge.create() will run the equivalent of # ovs-vsctl -- --may-exist add-br BRIDGE_NAME # which does nothing if bridge already exists. self.int_br.create() self.int_br.set_secure_mode() self.int_br.setup_controllers(self.conf) self.int_br.set_igmp_snooping_state(self.conf.OVS.igmp_snooping_enable) if self.conf.AGENT.drop_flows_on_start: # Delete the patch port between br-int and br-tun if we're deleting # the flows on br-int, so that traffic doesn't get flooded over # while flows are missing. self.int_br.delete_port(self.conf.OVS.int_peer_patch_port) self.int_br.uninstall_flows(cookie=ovs_lib.COOKIE_ANY) self.int_br.setup_default_table() def setup_ancillary_bridges(self, integ_br, tun_br): '''Setup ancillary bridges - for example br-ex.''' ovs = ovs_lib.BaseOVS() ovs_bridges = set(ovs.get_bridges()) # Remove all known bridges ovs_bridges.remove(integ_br) if self.enable_tunneling: ovs_bridges.remove(tun_br) br_names = [self.phys_brs[physical_network].br_name for physical_network in self.phys_brs] ovs_bridges.difference_update(br_names) # Filter list of bridges to those that have external # bridge-id's configured br_names = [] for bridge in ovs_bridges: bridge_id = ovs.get_bridge_external_bridge_id(bridge, log_errors=False) if bridge_id != bridge: br_names.append(bridge) ovs_bridges.difference_update(br_names) ancillary_bridges = [] for bridge in ovs_bridges: br = ovs_lib.OVSBridge(bridge) LOG.info('Adding %s to list of bridges.', bridge) ancillary_bridges.append(br) return ancillary_bridges def setup_tunnel_br(self, tun_br_name=None): '''(re)initialize the tunnel bridge. Creates tunnel bridge, and links it to the integration bridge using a patch port. :param tun_br_name: the name of the tunnel bridge. ''' if not self.tun_br: self.tun_br = self.br_tun_cls(tun_br_name) # tun_br.create() won't recreate bridge if it exists, but will handle # cases where something like datapath_type has changed self.tun_br.create(secure_mode=True) self.tun_br.setup_controllers(self.conf) if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or self.patch_tun_ofport == ovs_lib.INVALID_OFPORT): self.patch_tun_ofport = self.int_br.add_patch_port( self.conf.OVS.int_peer_patch_port, self.conf.OVS.tun_peer_patch_port) if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or self.patch_int_ofport == ovs_lib.INVALID_OFPORT): self.patch_int_ofport = self.tun_br.add_patch_port( self.conf.OVS.tun_peer_patch_port, self.conf.OVS.int_peer_patch_port) if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, self.patch_int_ofport): LOG.error("Failed to create OVS patch port. Cannot have " "tunneling enabled on this agent, since this " "version of OVS does not support tunnels or patch " "ports. Agent terminated!") sys.exit(1) if self.conf.AGENT.drop_flows_on_start: self.tun_br.uninstall_flows(cookie=ovs_lib.COOKIE_ANY) def setup_tunnel_br_flows(self): '''Setup the tunnel bridge. Add all flows to the tunnel bridge. ''' self.tun_br.setup_default_table(self.patch_int_ofport, self.arp_responder_enabled) def _reconfigure_physical_bridges(self, bridges): try: sync = self._do_reconfigure_physical_bridges(bridges) self.added_bridges = [] except RuntimeError: # If there was error and bridges aren't properly reconfigured, # there is no need to do full sync once again. It will be done when # reconfiguration of physical bridges will be finished without # errors sync = False self.added_bridges = bridges LOG.warning("RuntimeError during setup of physical bridges: %s", bridges) return sync def _do_reconfigure_physical_bridges(self, bridges): sync = False bridge_mappings = {} for bridge in bridges: LOG.info("Physical bridge %s was just re-created.", bridge) for phys_net, phys_br in self.bridge_mappings.items(): if bridge == phys_br: bridge_mappings[phys_net] = bridge if bridge_mappings: sync = True self.setup_physical_bridges(bridge_mappings) return sync def _check_bridge_datapath_id(self, bridge, datapath_ids_set): """Check for bridges with duplicate datapath-id Bottom 48 bits auto-derived from MAC of NIC. Upper 12 bits free, so we OR it with (bridge # << 48) to create a unique ID It must be exactly 64 bits, else OVS will reject it - zfill :param bridge: (OVSPhysicalBridge) bridge :param datapath_ids_set: (set) used datapath ids in OVS """ dpid = int(bridge.get_datapath_id(), 16) dpid_hex = format(dpid, '0x').zfill(16) if dpid_hex in datapath_ids_set: dpid_hex = format( dpid + (len(datapath_ids_set) << 48), '0x').zfill(16) bridge.set_datapath_id(dpid_hex) LOG.info('Bridge %s datapath-id = 0x%s', bridge.br_name, dpid_hex) datapath_ids_set.add(dpid_hex) def setup_physical_bridges(self, bridge_mappings): '''Setup the physical network bridges. Creates physical network bridges and links them to the integration bridge using veths or patch ports. :param bridge_mappings: map physical network names to bridge names. ''' self.phys_brs = {} self.int_ofports = {} self.phys_ofports = {} datapath_ids_set = set() ip_wrapper = ip_lib.IPWrapper() ovs = ovs_lib.BaseOVS() ovs_bridges = ovs.get_bridges() for physical_network, bridge in bridge_mappings.items(): LOG.info("Mapping physical network %(physical_network)s to " "bridge %(bridge)s", {'physical_network': physical_network, 'bridge': bridge}) # setup physical bridge if bridge not in ovs_bridges: LOG.error("Bridge %(bridge)s for physical network " "%(physical_network)s does not exist. Agent " "terminated!", {'physical_network': physical_network, 'bridge': bridge}) sys.exit(1) br = self.br_phys_cls(bridge) self._check_bridge_datapath_id(br, datapath_ids_set) # The bridge already exists, so create won't recreate it, but will # handle things like changing the datapath_type br.create() br.set_secure_mode() br.setup_controllers(self.conf) if cfg.CONF.AGENT.drop_flows_on_start: br.uninstall_flows(cookie=ovs_lib.COOKIE_ANY) br.setup_default_table() self.phys_brs[physical_network] = br # interconnect physical and integration bridges using veth/patches int_if_name = plugin_utils.get_interface_name( bridge, prefix=constants.PEER_INTEGRATION_PREFIX) phys_if_name = plugin_utils.get_interface_name( bridge, prefix=constants.PEER_PHYSICAL_PREFIX) # Interface type of port for physical and integration bridges must # be same, so check only one of them. # Not logging error here, as the interface may not exist yet. # Type check is done to cleanup wrong interface if any. int_type = self.int_br.db_get_val("Interface", int_if_name, "type", log_errors=False) if self.use_veth_interconnection: # Drop ports if the interface types doesn't match the # configuration value. if int_type == 'patch': self.int_br.delete_port(int_if_name) br.delete_port(phys_if_name) device = ip_lib.IPDevice(int_if_name) if device.exists(): device.link.delete() # Give udev a chance to process its rules here, to avoid # race conditions between commands launched by udev rules # and the subsequent call to ip_wrapper.add_veth utils.execute(['udevadm', 'settle', '--timeout=10']) int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, phys_if_name) int_ofport = self.int_br.add_port(int_if_name) phys_ofport = br.add_port(phys_if_name) else: # Drop ports if the interface type doesn't match the # configuration value if int_type == 'veth': self.int_br.delete_port(int_if_name) br.delete_port(phys_if_name) # Setup int_br to physical bridge patches. If they already # exist we leave them alone, otherwise we create them but don't # connect them until after the drop rules are in place. if self.int_br.port_exists(int_if_name): int_ofport = self.int_br.get_port_ofport(int_if_name) else: int_ofport = self.int_br.add_patch_port( int_if_name, constants.NONEXISTENT_PEER) if br.port_exists(phys_if_name): phys_ofport = br.get_port_ofport(phys_if_name) else: phys_ofport = br.add_patch_port( phys_if_name, constants.NONEXISTENT_PEER) self.int_ofports[physical_network] = int_ofport self.phys_ofports[physical_network] = phys_ofport # These two drop flows are the root cause for the bug #1803919. # And now we add a rpc check during agent start procedure. If # ovs agent can not reach any neutron server, or all neutron # servers are down, these flows will not be installed anymore. # Bug #1803919 was fixed in that way. # And as a reminder, we can not do much work on this. Because # the bridge mappings can be varied. Provider (external) network # can be implicitly set on any physical bridge due to the basic # NORMAL flow. Different vlan range networks can also have many # bridge map settings, these tenant network traffic can also be # blocked by the following drop flows. # block all untranslated traffic between bridges self.int_br.drop_port(in_port=int_ofport) br.drop_port(in_port=phys_ofport) if self.use_veth_interconnection: # enable veth to pass traffic int_veth.link.set_up() phys_veth.link.set_up() if self.veth_mtu: # set up mtu size for veth interfaces int_veth.link.set_mtu(self.veth_mtu) phys_veth.link.set_mtu(self.veth_mtu) else: # associate patch ports to pass traffic self.int_br.set_db_attribute('Interface', int_if_name, 'options', {'peer': phys_if_name}) br.set_db_attribute('Interface', phys_if_name, 'options', {'peer': int_if_name}) def update_stale_ofport_rules(self): # ARP spoofing rules and drop-flow upon port-delete # use ofport-based rules previous = self.vifname_to_ofport_map current = self.int_br.get_vif_port_to_ofport_map() # if any ofport numbers have changed, re-process the devices as # added ports so any rules based on ofport numbers are updated. moved_ports = self._get_ofport_moves(current, previous) # delete any stale rules based on removed ofports ofports_deleted = set(previous.values()) - set(current.values()) for ofport in ofports_deleted: if self.prevent_arp_spoofing: self.int_br.delete_arp_spoofing_protection(port=ofport) self.int_br.uninstall_flows(in_port=ofport) # store map for next iteration self.vifname_to_ofport_map = current return moved_ports @staticmethod def _get_ofport_moves(current, previous): """Returns a list of moved ports. Takes two port->ofport maps and returns a list ports that moved to a different ofport. Deleted ports are not included. """ port_moves = [] for name, ofport in previous.items(): if name not in current: continue current_ofport = current[name] if ofport != current_ofport: port_moves.append(name) return port_moves def _get_port_info(self, registered_ports, cur_ports, readd_registered_ports): port_info = PortInfo(current=cur_ports) # FIXME(salv-orlando): It's not really necessary to return early # if nothing has changed. if not readd_registered_ports and cur_ports == registered_ports: return port_info if readd_registered_ports: port_info['added'] = cur_ports else: port_info['added'] = cur_ports - registered_ports # Update port_info with ports not found on the integration bridge port_info['removed'] = registered_ports - cur_ports return port_info def _update_port_info_failed_devices_stats(self, port_info, failed_devices): # remove failed devices that don't need to be retried failed_devices['added'] -= port_info['removed'] failed_devices['removed'] -= port_info['added'] # Disregard devices that were never noticed by the agent port_info['removed'] &= port_info['current'] # retry failed devices port_info['added'] |= failed_devices['added'] if failed_devices['added']: LOG.debug("retrying failed devices %s", failed_devices['added']) port_info['removed'] |= failed_devices['removed'] # Update current ports port_info['current'] |= port_info['added'] port_info['current'] -= port_info['removed'] def process_ports_events(self, events, registered_ports, ancillary_ports, old_ports_not_ready, failed_devices, failed_ancillary_devices, updated_ports=None): port_info = PortInfo(current=registered_ports) ancillary_port_info = PortInfo(current=ancillary_ports) ports_not_ready_yet = set() if updated_ports is None: updated_ports = set() # if a port was added and then removed or viceversa since the agent # can't know the order of the operations, check the status of the port # to determine if the port was added or deleted added_ports = {p['name'] for p in events['added']} removed_ports = {p['name'] for p in events['removed']} updated_ports.update({p['name'] for p in events['modified']}) ports_re_added = added_ports & removed_ports ports_re_added = [p for p in ports_re_added if ovs_lib.BaseOVS().port_exists(p)] events['re_added'] = [e for e in events['removed'] if e['name'] in ports_re_added] events['removed'] = [e for e in events['removed'] if e['name'] not in ports_re_added] ports_removed = [p['name'] for p in events['removed']] events['added'] = [e for e in events['added'] if e['name'] not in ports_removed] # TODO(rossella_s): scanning the ancillary bridge won't be needed # anymore when https://review.opendev.org/#/c/203381 since the bridge # id stored in external_ids will be used to identify the bridge the # port belongs to cur_ancillary_ports = set() for bridge in self.ancillary_brs: cur_ancillary_ports |= bridge.get_vif_port_set() cur_ancillary_ports |= ancillary_port_info['current'] def _process_port(port, ports, ancillary_ports): # check 'iface-id' is set otherwise is not a port # the agent should care about if 'attached-mac' in port.get('external_ids', []): iface_id = self.int_br.portid_from_external_ids( port['external_ids']) if iface_id: if port['ofport'] == ovs_lib.UNASSIGNED_OFPORT: LOG.debug("Port %s not ready yet on the bridge", iface_id) ports_not_ready_yet.add(port['name']) return # check if port belongs to ancillary bridge if iface_id in cur_ancillary_ports: ancillary_ports.add(iface_id) else: ports.add(iface_id) if old_ports_not_ready: old_ports_not_ready_attrs = self.int_br.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], ports=old_ports_not_ready, if_exists=True) now_ready_ports = set( [p['name'] for p in old_ports_not_ready_attrs]) LOG.debug("Ports %s are now ready", now_ready_ports) old_ports_not_ready_yet = old_ports_not_ready - now_ready_ports removed_ports = set([p['name'] for p in events['removed']]) old_ports_not_ready_yet -= removed_ports LOG.debug("Ports %s were not ready at last iteration and are not " "ready yet", old_ports_not_ready_yet) ports_not_ready_yet |= old_ports_not_ready_yet events['added'].extend(old_ports_not_ready_attrs) for event_type in ('added', 'removed', 're_added'): for port in events.get(event_type, []): _process_port(port, port_info[event_type], ancillary_port_info[event_type]) self._update_port_info_failed_devices_stats(port_info, failed_devices) self._update_port_info_failed_devices_stats(ancillary_port_info, failed_ancillary_devices) updated_ports.update(self.check_changed_vlans()) if updated_ports: # Some updated ports might have been removed in the # meanwhile, and therefore should not be processed. # In this case the updated port won't be found among # current ports. updated_ports &= port_info['current'] port_info['updated'] = updated_ports return port_info, ancillary_port_info, ports_not_ready_yet def scan_ports(self, registered_ports, sync, updated_ports=None): cur_ports = self.int_br.get_vif_port_set() self.int_br_device_count = len(cur_ports) port_info = self._get_port_info(registered_ports, cur_ports, sync) if updated_ports is None: updated_ports = set() updated_ports.update(self.check_changed_vlans()) if updated_ports: # Some updated ports might have been removed in the # meanwhile, and therefore should not be processed. # In this case the updated port won't be found among # current ports. updated_ports &= cur_ports if updated_ports: port_info['updated'] = updated_ports return port_info def scan_ancillary_ports(self, registered_ports, sync): cur_ports = set() for bridge in self.ancillary_brs: cur_ports |= bridge.get_vif_port_set() return self._get_port_info(registered_ports, cur_ports, sync) def check_changed_vlans(self): """Check for changed VLAN tags. If changes, notify server and return. The returned value is a set of port ids of the ports concerned by a vlan tag loss. """ port_tags = self.int_br.get_port_tag_dict() changed_ports = set() for lvm in self.vlan_manager: for port in lvm.vif_ports.values(): if ( port.port_name in port_tags and port_tags[port.port_name] != lvm.vlan ): LOG.info( "Port '%(port_name)s' has lost " "its vlan tag '%(vlan_tag)d'!", {'port_name': port.port_name, 'vlan_tag': lvm.vlan} ) changed_ports.add(port.vif_id) if changed_ports: # explicitly mark these DOWN on the server since they have been # manipulated (likely a nova unplug/replug) and need to be rewired devices_down = self.plugin_rpc.update_device_list(self.context, [], changed_ports, self.agent_id, self.conf.host) failed_devices = set(devices_down.get('failed_devices_down')) if failed_devices: LOG.debug("Status updated failed for %s", failed_devices) return changed_ports def treat_vif_port(self, vif_port, port_id, network_id, network_type, physical_network, segmentation_id, admin_state_up, fixed_ips, device_owner, provisioning_needed): # When this function is called for a port, the port should have # an OVS ofport configured, as only these ports were considered # for being treated. If that does not happen, it is a potential # error condition of which operators should be aware port_needs_binding = True if not vif_port.ofport: LOG.warning("VIF port: %s has no ofport configured, " "and might not be able to transmit", vif_port.vif_id) if vif_port: if admin_state_up: port_needs_binding = self.port_bound( vif_port, network_id, network_type, physical_network, segmentation_id, fixed_ips, device_owner, provisioning_needed) else: LOG.info("VIF port: %s admin state up disabled, " "putting on the dead VLAN", vif_port.vif_id) self.port_dead(vif_port) self.plugin_rpc.update_device_down( self.context, port_id, self.agent_id, self.conf.host) port_needs_binding = False else: LOG.debug("No VIF port for port %s defined on agent.", port_id) return port_needs_binding def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type): try: if (netaddr.IPAddress(self.local_ip).version != netaddr.IPAddress(remote_ip).version): LOG.error("IP version mismatch, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s", {'lip': self.local_ip, 'rip': remote_ip}) return 0 except Exception: LOG.error("Invalid local or remote IP, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s", {'lip': self.local_ip, 'rip': remote_ip}) return 0 ofport = br.add_tunnel_port(port_name, remote_ip, self.local_ip, tunnel_type, self.vxlan_udp_port, self.dont_fragment, self.tunnel_csum, self.tos) if ofport == ovs_lib.INVALID_OFPORT: LOG.error("Failed to set-up %(type)s tunnel port to %(ip)s", {'type': tunnel_type, 'ip': remote_ip}) return 0 self.tun_br_ofports[tunnel_type][remote_ip] = ofport # Add flow in default table to resubmit to the right # tunneling table (lvid will be set in the latter) br.setup_tunnel_port(tunnel_type, ofport) return ofport def _setup_tunnel_flood_flow(self, br, tunnel_type): ofports = self.tun_br_ofports[tunnel_type].values() if ofports and not self.l2_pop: # Update flooding flows to include the new tunnel for vlan_mapping in self.vlan_manager: if vlan_mapping.network_type == tunnel_type: br.install_flood_to_tun(vlan_mapping.vlan, vlan_mapping.segmentation_id, ofports) def setup_tunnel_port(self, br, remote_ip, network_type): port_name = self.get_tunnel_name( network_type, self.local_ip, remote_ip) if port_name is None: return 0 ofport = self._setup_tunnel_port(br, port_name, remote_ip, network_type) self._setup_tunnel_flood_flow(br, network_type) return ofport def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): # Check if this tunnel port is still used for lvm in self.vlan_manager: if tun_ofport in lvm.tun_ofports: break # If not, remove it else: items = list(self.tun_br_ofports[tunnel_type].items()) for remote_ip, ofport in items: if ofport == tun_ofport: port_name = self.get_tunnel_name( tunnel_type, self.local_ip, remote_ip) br.delete_port(port_name) br.cleanup_tunnel_port(ofport) self.tun_br_ofports[tunnel_type].pop(remote_ip, None) def treat_devices_added_or_updated(self, devices, provisioning_needed, re_added): skipped_devices = [] need_binding_devices = [] binding_no_activated_devices = set() agent_restarted = self.iter_num == 0 devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, devices, self.agent_id, self.conf.host, agent_restarted)) failed_devices = set(devices_details_list.get('failed_devices')) devices = devices_details_list.get('devices') vif_by_id = self.int_br.get_vifs_by_ids( [vif['device'] for vif in devices]) for details in devices: device = details['device'] LOG.debug("Processing port: %s", device) port = vif_by_id.get(device) if not port: # The port disappeared and cannot be processed LOG.info("Port %s was not found on the integration bridge " "and will therefore not be processed", device) self.ext_manager.delete_port(self.context, {'port_id': device}) skipped_devices.append(device) continue if 'port_id' in details: LOG.info("Port %(device)s updated. Details: %(details)s", {'device': device, 'details': details}) details['vif_port'] = port need_binding = self.treat_vif_port(port, details['port_id'], details['network_id'], details['network_type'], details['physical_network'], details['segmentation_id'], details['admin_state_up'], details['fixed_ips'], details['device_owner'], provisioning_needed) if need_binding: need_binding_devices.append(details) self._update_port_network(details['port_id'], details['network_id']) if details['device'] in re_added: self.ext_manager.delete_port(self.context, details) self.ext_manager.handle_port(self.context, details) else: if n_const.NO_ACTIVE_BINDING in details: # Port was added to the bridge, but its binding in this # agent hasn't been activated yet. It will be treated as # added when binding is activated binding_no_activated_devices.add(device) LOG.debug("Device %s has no active binding in host", device) else: LOG.warning( "Device %s not defined on plugin or binding failed", device) if (port and port.ofport != -1): self.port_dead(port) return (skipped_devices, binding_no_activated_devices, need_binding_devices, failed_devices) def _update_port_network(self, port_id, network_id): self._clean_network_ports(port_id) self.network_ports[network_id].add(port_id) def treat_ancillary_devices_added(self, devices): devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, devices, self.agent_id, self.conf.host)) failed_devices = set(devices_details_list.get('failed_devices')) devices_added = [ d['device'] for d in devices_details_list.get('devices')] # update plugin about port status devices_set_up = ( self.plugin_rpc.update_device_list(self.context, devices_added, [], self.agent_id, self.conf.host)) failed_devices |= set(devices_set_up.get('failed_devices_up')) LOG.info("Ancillary Ports %(added)s added, failed devices " "%(failed)s", {'added': devices, 'failed': failed_devices}) return failed_devices def treat_devices_removed(self, devices): self.sg_agent.remove_devices_filter(devices) LOG.info("Ports %s removed", devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) failed_devices = set(devices_down.get('failed_devices_down')) LOG.debug("Port removal failed for %s", failed_devices) for device in devices: self.ext_manager.delete_port(self.context, {'port_id': device}) self.port_unbound(device) return failed_devices def treat_ancillary_devices_removed(self, devices): LOG.info("Ancillary ports %s removed", devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) LOG.info("Devices down %s ", devices_down) failed_devices = set(devices_down.get('failed_devices_down')) if failed_devices: LOG.debug("Port removal failed for %s", failed_devices) for detail in devices_down.get('devices_down'): if detail['exists']: LOG.info("Port %s updated.", detail['device']) # Nothing to do regarding local networking else: LOG.debug("Device %s not defined on plugin", detail['device']) return failed_devices def treat_devices_skipped(self, devices): LOG.info("Ports %s skipped, changing status to down", devices) devices_down = self.plugin_rpc.update_device_list(self.context, [], devices, self.agent_id, self.conf.host) failed_devices = set(devices_down.get('failed_devices_down')) if failed_devices: LOG.debug("Port down failed for %s", failed_devices) def process_network_ports(self, port_info, provisioning_needed): failed_devices = {'added': set(), 'removed': set()} # TODO(salv-orlando): consider a solution for ensuring notifications # are processed exactly in the same order in which they were # received. This is tricky because there are two notification # sources: the neutron server, and the ovs db monitor process # If there is an exception while processing security groups ports # will not be wired anyway, and a resync will be triggered # VIF wiring needs to be performed always for 'new' devices. # For updated ports, re-wiring is not needed in most cases, but needs # to be performed anyway when the admin state of a device is changed. # A device might be both in the 'added' and 'updated' # list at the same time; avoid processing it twice. devices_added_updated = (port_info.get('added', set()) | port_info.get('updated', set())) re_added = port_info.get('re_added', set()) need_binding_devices = [] skipped_devices = set() binding_no_activated_devices = set() start = time.time() if devices_added_updated: (skipped_devices, binding_no_activated_devices, need_binding_devices, failed_devices['added']) = ( self.treat_devices_added_or_updated( devices_added_updated, provisioning_needed, re_added)) LOG.info("process_network_ports - iteration:%(iter_num)d - " "treat_devices_added_or_updated completed. " "Skipped %(num_skipped)d and no activated binding " "devices %(num_no_active_binding)d of %(num_current)d " "devices currently available. " "Time elapsed: %(elapsed).3f", {'iter_num': self.iter_num, 'num_skipped': len(skipped_devices), 'num_no_active_binding': len(binding_no_activated_devices), 'num_current': len(port_info['current']), 'elapsed': time.time() - start}) # Update the list of current ports storing only those which # have been actually processed. skipped_devices = set(skipped_devices) port_info['current'] = (port_info['current'] - skipped_devices) # TODO(salv-orlando): Optimize avoiding applying filters # unnecessarily, (eg: when there are no IP address changes) added_ports = (port_info.get('added', set()) - skipped_devices - binding_no_activated_devices) self._add_port_tag_info(need_binding_devices) self.process_install_ports_egress_flows(need_binding_devices) self.sg_agent.setup_port_filters(added_ports, port_info.get('updated', set())) LOG.info("process_network_ports - iteration:%(iter_num)d - " "agent port security group processed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) failed_devices['added'] |= self._bind_devices(need_binding_devices) if 'removed' in port_info and port_info['removed']: start = time.time() failed_devices['removed'] |= self.treat_devices_removed( port_info['removed']) LOG.info("process_network_ports - iteration:%(iter_num)d - " "treat_devices_removed completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) if skipped_devices: start = time.time() self.treat_devices_skipped(skipped_devices) LOG.info("process_network_ports - iteration:%(iter_num)d - " "treat_devices_skipped completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) return failed_devices def process_install_ports_egress_flows(self, ports): if not self.conf.AGENT.explicitly_egress_direct: return if (isinstance(self.sg_agent.firewall, agent_firewall.NoopFirewallDriver) or not agent_sg_rpc.is_firewall_enabled()): with self.int_br.deferred(full_ordered=True, use_bundle=True) as int_br: for port in ports: try: self.install_accepted_egress_direct_flow(port, int_br) # give other coroutines a chance to run eventlet.sleep(0) except Exception as err: LOG.debug("Failed to install accepted egress flows " "for port %s, error: %s", port['port_id'], err) def install_accepted_egress_direct_flow(self, port_detail, br_int): lvm = self.vlan_manager.get(port_detail['network_id']) port = port_detail['vif_port'] br_int.add_flow( table=constants.TRANSIENT_TABLE, priority=9, in_port=port.ofport, dl_src=port_detail['mac_address'], actions='resubmit(,{:d})'.format( constants.TRANSIENT_EGRESS_TABLE)) br_int.add_flow( table=constants.TRANSIENT_EGRESS_TABLE, priority=12, dl_dst=port_detail['mac_address'], actions='output:{:d}'.format(port.ofport)) patch_ofport = None if lvm.network_type in ( n_const.TYPE_VXLAN, n_const.TYPE_GRE, n_const.TYPE_GENEVE): port_name = self.conf.OVS.int_peer_patch_port patch_ofport = self.int_br.get_port_ofport(port_name) elif lvm.network_type == n_const.TYPE_VLAN: bridge = self.bridge_mappings.get(lvm.physical_network) port_name = plugin_utils.get_interface_name( bridge, prefix=constants.PEER_INTEGRATION_PREFIX) patch_ofport = self.int_br.get_port_ofport(port_name) if patch_ofport is not None: br_int.add_flow( table=constants.TRANSIENT_EGRESS_TABLE, priority=10, dl_src=port_detail['mac_address'], dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", in_port=port.ofport, actions='mod_vlan_vid:{:d},' 'output:{:d}'.format( lvm.vlan, patch_ofport)) def delete_accepted_egress_direct_flow(self, br_int, ofport, mac, vlan): if not self.conf.AGENT.explicitly_egress_direct: return br_int.delete_flows( table=constants.TRANSIENT_TABLE, in_port=ofport, dl_src=mac) self.delete_flows( table=constants.TRANSIENT_EGRESS_TABLE, dl_dst=mac) self.delete_flows( table=constants.TRANSIENT_EGRESS_TABLE, dl_src=mac, in_port=ofport) def process_ancillary_network_ports(self, port_info): failed_devices = {'added': set(), 'removed': set()} if 'added' in port_info and port_info['added']: start = time.time() failed_added = self.treat_ancillary_devices_added( port_info['added']) LOG.info("process_ancillary_network_ports - iteration: " "%(iter_num)d - treat_ancillary_devices_added " "completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) failed_devices['added'] = failed_added if 'removed' in port_info and port_info['removed']: start = time.time() failed_removed = self.treat_ancillary_devices_removed( port_info['removed']) failed_devices['removed'] = failed_removed LOG.info("process_ancillary_network_ports - iteration: " "%(iter_num)d - treat_ancillary_devices_removed " "completed in %(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) return failed_devices @classmethod def get_tunnel_hash(cls, ip_address, hashlen): try: addr = netaddr.IPAddress(ip_address) if addr.version == n_const.IP_VERSION_4: # We cannot change this from 8, since it could break # backwards-compatibility return '%08x' % addr else: # Create 32-bit Base32 encoded hash sha1 = hashlib.sha1(ip_address.encode()) iphash = base64.b32encode(sha1.digest()) return iphash[:hashlen].decode().lower() except Exception: LOG.warning("Invalid remote IP: %s", ip_address) return def tunnel_sync(self): LOG.debug("Configuring tunnel endpoints to other OVS agents") try: for tunnel_type in self.tunnel_types: details = self.plugin_rpc.tunnel_sync(self.context, self.local_ip, tunnel_type, self.conf.host) if not self.l2_pop: tunnels = details['tunnels'] for tunnel in tunnels: if self.local_ip != tunnel['ip_address']: remote_ip = tunnel['ip_address'] tun_name = self.get_tunnel_name( tunnel_type, self.local_ip, remote_ip) if tun_name is None: continue self._setup_tunnel_port(self.tun_br, tun_name, tunnel['ip_address'], tunnel_type) self._setup_tunnel_flood_flow(self.tun_br, tunnel_type) except Exception as e: LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s", {'local_ip': self.local_ip, 'e': e}) return True return False @classmethod def get_tunnel_name(cls, network_type, local_ip, remote_ip): # This string is used to build port and interface names in OVS. # Port and interface names can be max 16 characters long, # including NULL, and must be unique per table per host. # We make the name as long as possible given the network_type, # for example, 'vxlan-012345678' or 'geneve-01234567'. # Remove length of network type and dash hashlen = n_const.DEVICE_NAME_MAX_LEN - len(network_type) - 1 remote_tunnel_hash = cls.get_tunnel_hash(remote_ip, hashlen) if not remote_tunnel_hash: return None return '%s-%s' % (network_type, remote_tunnel_hash) def _agent_has_updates(self, polling_manager): return (polling_manager.is_polling_required or self.updated_ports or self.deleted_ports or self.deactivated_bindings or self.activated_bindings or self.updated_smartnic_ports or self.sg_agent.firewall_refresh_needed()) def _port_info_has_changes(self, port_info): return (port_info.get('added') or port_info.get('removed') or port_info.get('updated')) def check_ovs_status(self): try: # Check for the canary flow status = self.int_br.check_canary_table() except Exception: LOG.exception("Failure while checking for the canary flow") status = constants.OVS_DEAD if status == constants.OVS_RESTARTED: LOG.warning("OVS is restarted. OVSNeutronAgent will reset " "bridges and recover ports.") elif status == constants.OVS_DEAD: LOG.warning("OVS is dead. OVSNeutronAgent will keep running " "and checking OVS status periodically.") return status def loop_count_and_wait(self, start_time, port_stats): # sleep till end of polling interval elapsed = time.time() - start_time LOG.info("Agent rpc_loop - iteration:%(iter_num)d " "completed. Processed ports statistics: " "%(port_stats)s. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'port_stats': port_stats, 'elapsed': elapsed}) if elapsed < self.polling_interval: time.sleep(self.polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)!", {'polling_interval': self.polling_interval, 'elapsed': elapsed}) self.iter_num = self.iter_num + 1 def get_port_stats(self, port_info, ancillary_port_info): port_stats = { 'regular': { 'added': len(port_info.get('added', [])), 'updated': len(port_info.get('updated', [])), 'removed': len(port_info.get('removed', []))}} if self.ancillary_brs: port_stats['ancillary'] = { 'added': len(ancillary_port_info.get('added', [])), 'removed': len(ancillary_port_info.get('removed', []))} return port_stats def cleanup_stale_flows(self): LOG.info("Cleaning stale %s flows", self.int_br.br_name) self.int_br.cleanup_flows() for pby_br in self.phys_brs.values(): LOG.info("Cleaning stale %s flows", pby_br.br_name) pby_br.cleanup_flows() if self.enable_tunneling: LOG.info("Cleaning stale %s flows", self.tun_br.br_name) self.tun_br.cleanup_flows() def process_port_info(self, start, polling_manager, sync, ovs_restarted, ports, ancillary_ports, updated_ports_copy, consecutive_resyncs, ports_not_ready_yet, failed_devices, failed_ancillary_devices): # There are polling managers that don't have get_events, e.g. # AlwaysPoll used by windows implementations # REVISIT (rossella_s) This needs to be reworked to hide implementation # details regarding polling in BasePollingManager subclasses if sync or not (hasattr(polling_manager, 'get_events')): if sync: LOG.info("Agent out of sync with plugin!") consecutive_resyncs = consecutive_resyncs + 1 if (consecutive_resyncs >= constants.MAX_DEVICE_RETRIES): LOG.warning( "Clearing cache of registered ports," " retries to resync were > %s", constants.MAX_DEVICE_RETRIES) ports.clear() ancillary_ports.clear() consecutive_resyncs = 0 else: consecutive_resyncs = 0 # TODO(rossella_s): For implementations that use AlwaysPoll # resync if a device failed. This can be improved in future sync = (any(failed_devices.values()) or any(failed_ancillary_devices.values())) # NOTE(rossella_s) don't empty the queue of events # calling polling_manager.get_events() since # the agent might miss some event (for example a port # deletion) reg_ports = (set() if ovs_restarted else ports) port_info = self.scan_ports(reg_ports, sync, updated_ports_copy) # Treat ancillary devices if they exist if self.ancillary_brs: ancillary_port_info = self.scan_ancillary_ports( ancillary_ports, sync) LOG.info("Agent rpc_loop - iteration:%(iter_num)d" " - ancillary port info retrieved. " "Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) else: ancillary_port_info = {} else: consecutive_resyncs = 0 events = polling_manager.get_events() port_info, ancillary_port_info, ports_not_ready_yet = ( self.process_ports_events(events, ports, ancillary_ports, ports_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports_copy)) registry.notify( constants.OVSDB_RESOURCE, callback_events.AFTER_READ, self, ovsdb_events=events) return (port_info, ancillary_port_info, consecutive_resyncs, ports_not_ready_yet) def _remove_devices_not_to_retry(self, failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_to_retry): """This method removes the devices that exceeded the number of retries from failed_devices and failed_ancillary_devices """ for event in ['added', 'removed']: failed_devices[event] = ( failed_devices[event] - devices_not_to_retry[event]) failed_ancillary_devices[event] = ( failed_ancillary_devices[event] - ancillary_devices_not_to_retry[event]) def _get_devices_not_to_retry(self, failed_devices, failed_ancillary_devices, failed_devices_retries_map): """Return the devices not to retry and update the retries map""" new_failed_devices_retries_map = {} devices_not_to_retry = {} ancillary_devices_not_to_retry = {} def _increase_retries(devices_set): devices_not_to_retry = set() for dev in devices_set: retries = failed_devices_retries_map.get(dev, 0) if retries >= constants.MAX_DEVICE_RETRIES: devices_not_to_retry.add(dev) LOG.warning( "Device %(dev)s failed for %(times)s times and won't " "be retried anymore", { 'dev': dev, 'times': constants.MAX_DEVICE_RETRIES}) else: new_failed_devices_retries_map[dev] = retries + 1 return devices_not_to_retry for event in ['added', 'removed']: devices_not_to_retry[event] = _increase_retries( failed_devices[event]) ancillary_devices_not_to_retry[event] = _increase_retries( failed_ancillary_devices[event]) return (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_to_retry) def update_retries_map_and_remove_devs_not_to_retry( self, failed_devices, failed_ancillary_devices, failed_devices_retries_map): (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_to_retry) = self._get_devices_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map) self._remove_devices_not_to_retry( failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_to_retry) return new_failed_devices_retries_map def _handle_ovs_restart(self, polling_manager): self.setup_integration_br() self.setup_physical_bridges(self.bridge_mappings) if self.enable_tunneling: self._reset_tunnel_ofports() self.setup_tunnel_br() self.setup_tunnel_br_flows() self.agent_state['start_flag'] = True # Force state report to avoid race condition # with l2pop fdb entries update self._report_state() if self.enable_distributed_routing: self.dvr_agent.reset_ovs_parameters(self.int_br, self.tun_br, self.patch_int_ofport, self.patch_tun_ofport) self.dvr_agent.reset_dvr_parameters() self.dvr_agent.setup_dvr_flows() # notify that OVS has restarted registry.publish( callback_resources.AGENT, callback_events.OVS_RESTARTED, self, payload=None) # restart the polling manager so that it will signal as added # all the current ports # REVISIT (rossella_s) Define a method "reset" in # BasePollingManager that will be implemented by AlwaysPoll as # no action and by InterfacePollingMinimizer as start/stop if isinstance(polling_manager, polling.InterfacePollingMinimizer): polling_manager.stop() polling_manager.start() def rpc_loop(self, polling_manager): idl_monitor = self.ovs.ovsdb.idl_monitor sync = False ports = set() updated_ports_copy = set() activated_bindings_copy = set() ancillary_ports = set() tunnel_sync = True ovs_restarted = False consecutive_resyncs = 0 need_clean_stale_flow = True ports_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = {'added': set(), 'removed': set()} failed_devices_retries_map = {} while self._check_and_handle_signal(): if self.fullsync: LOG.info("rpc_loop doing a full sync.") sync = True self.fullsync = False port_info = {} ancillary_port_info = {} start = time.time() LOG.info("Agent rpc_loop - iteration:%d started", self.iter_num) ovs_status = self.check_ovs_status() if ovs_status == constants.OVS_RESTARTED: self._handle_ovs_restart(polling_manager) tunnel_sync = self.enable_tunneling or tunnel_sync elif ovs_status == constants.OVS_DEAD: # Agent doesn't apply any operations when ovs is dead, to # prevent unexpected failure or crash. Sleep and continue # loop in which ovs status will be checked periodically. port_stats = self.get_port_stats({}, {}) self.loop_count_and_wait(start, port_stats) continue # Check if any physical bridge wasn't recreated recently added_bridges = idl_monitor.bridges_added + self.added_bridges bridges_recreated = self._reconfigure_physical_bridges( added_bridges) sync |= bridges_recreated # Notify the plugin of tunnel IP if self.enable_tunneling and tunnel_sync: try: tunnel_sync = self.tunnel_sync() except Exception: LOG.exception("Error while configuring tunnel endpoints") tunnel_sync = True ovs_restarted |= (ovs_status == constants.OVS_RESTARTED) devices_need_retry = (any(failed_devices.values()) or any(failed_ancillary_devices.values()) or ports_not_ready_yet) if (self._agent_has_updates(polling_manager) or sync or devices_need_retry): try: LOG.info("Agent rpc_loop - iteration:%(iter_num)d - " "starting polling. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) if self.conf.AGENT.baremetal_smartnic: if sync: self.process_smartnic_ports() updated_smartnic_ports_copy = ( self.updated_smartnic_ports) self.updated_smartnic_ports = list() for port_data in updated_smartnic_ports_copy: self.treat_smartnic_port(port_data) # Save updated ports dict to perform rollback in # case resync would be needed, and then clear # self.updated_ports. As the greenthread should not yield # between these two statements, this will be thread-safe updated_ports_copy = self.updated_ports self.updated_ports = set() activated_bindings_copy = self.activated_bindings self.activated_bindings = set() (port_info, ancillary_port_info, consecutive_resyncs, ports_not_ready_yet) = (self.process_port_info( start, polling_manager, sync, ovs_restarted, ports, ancillary_ports, updated_ports_copy, consecutive_resyncs, ports_not_ready_yet, failed_devices, failed_ancillary_devices)) sync = False self.process_deleted_ports(port_info) self.process_deactivated_bindings(port_info) self.process_activated_bindings(port_info, activated_bindings_copy) ofport_changed_ports = self.update_stale_ofport_rules() if ofport_changed_ports: port_info.setdefault('updated', set()).update( ofport_changed_ports) LOG.info("Agent rpc_loop - iteration:%(iter_num)d - " "port information retrieved. " "Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) # Secure and wire/unwire VIFs and update their status # on Neutron server if (self._port_info_has_changes(port_info) or self.sg_agent.firewall_refresh_needed() or ovs_restarted): LOG.debug("Starting to process devices in:%s", port_info) provisioning_needed = ( ovs_restarted or bridges_recreated) failed_devices = self.process_network_ports( port_info, provisioning_needed) if need_clean_stale_flow: self.cleanup_stale_flows() need_clean_stale_flow = False LOG.info("Agent rpc_loop - iteration:%(iter_num)d - " "ports processed. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) ports = port_info['current'] if self.ancillary_brs: failed_ancillary_devices = ( self.process_ancillary_network_ports( ancillary_port_info)) LOG.info("Agent rpc_loop - iteration: " "%(iter_num)d - ancillary ports " "processed. Elapsed:%(elapsed).3f", {'iter_num': self.iter_num, 'elapsed': time.time() - start}) ancillary_ports = ancillary_port_info['current'] polling_manager.polling_completed() failed_devices_retries_map = ( self.update_retries_map_and_remove_devs_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map)) # Keep this flag in the last line of "try" block, # so we can sure that no other Exception occurred. ovs_restarted = False self._dispose_local_vlan_hints() except Exception: LOG.exception("Error while processing VIF ports") # Put the ports back in self.updated_port self.updated_ports |= updated_ports_copy self.activated_bindings |= activated_bindings_copy sync = True port_stats = self.get_port_stats(port_info, ancillary_port_info) self.loop_count_and_wait(start, port_stats) def daemon_loop(self): # Start everything. LOG.info("Agent initialized successfully, now running... ") signal.signal(signal.SIGTERM, self._handle_sigterm) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, self._handle_sighup) br_names = [br.br_name for br in self.phys_brs.values()] self.ovs.ovsdb.idl_monitor.start_bridge_monitor(br_names) with polling.get_polling_manager( self.minimize_polling, self.ovsdb_monitor_respawn_interval) as pm: self.rpc_loop(polling_manager=pm) def _handle_sigterm(self, signum, frame): self.catch_sigterm = True if self.quitting_rpc_timeout: LOG.info( 'SIGTERM received, capping RPC timeout by %d seconds.', self.quitting_rpc_timeout) self.set_rpc_timeout(self.quitting_rpc_timeout) def _handle_sighup(self, signum, frame): self.catch_sighup = True def _check_and_handle_signal(self): if self.catch_sigterm: LOG.info("Agent caught SIGTERM, quitting daemon loop.") self.run_daemon_loop = False self.catch_sigterm = False if self.catch_sighup: LOG.info("Agent caught SIGHUP, resetting.") self.conf.mutate_config_files() config.setup_logging() LOG.debug('Full set of CONF:') self.conf.log_opt_values(LOG, logging.DEBUG) self.catch_sighup = False return self.run_daemon_loop def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, self.dvr_plugin_rpc, self.state_rpc): rpc_api.client.set_max_timeout(timeout) def _check_agent_configurations(self): if (self.enable_distributed_routing and self.enable_tunneling and not self.l2_pop): raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve " "underlays require L2-pop to be enabled, " "in both the Agent and Server side.")) def validate_local_ip(local_ip): """Verify if the ip exists on the agent's host.""" if not ip_lib.IPWrapper().get_device_by_ip(local_ip): LOG.error("Tunneling can't be enabled with invalid local_ip '%s'." " IP couldn't be found on this host's interfaces.", local_ip) raise SystemExit(1) def validate_tunnel_config(tunnel_types, local_ip): """Verify local ip and tunnel config if tunneling is enabled.""" if not tunnel_types: return validate_local_ip(local_ip) for tun in tunnel_types: if tun not in constants.TUNNEL_NETWORK_TYPES: LOG.error('Invalid tunnel type specified: %s', tun) raise SystemExit(1) def prepare_xen_compute(): is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper \ or xenapi_root_helper.ROOT_HELPER_DAEMON_TOKEN == \ cfg.CONF.AGENT.root_helper_daemon if is_xen_compute_host: xenapi_conf.register_xenapi_opts() # Force ip_lib to always use the root helper to ensure that ip # commands target xen dom0 rather than domU. cfg.CONF.register_opts(ip_lib.OPTS) cfg.CONF.set_default('ip_lib_force_root', True) def main(bridge_classes): prepare_xen_compute() ovs_capabilities.register() ext_manager.register_opts(cfg.CONF) agent_config.setup_privsep() service_conf.register_service_opts(service_conf.RPC_EXTRA_OPTS, cfg.CONF) ext_mgr = ext_manager.L2AgentExtensionsManager(cfg.CONF) # now that all extensions registered their options, we can log them n_utils.log_opt_values(LOG) validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) init_try = 1 while True: try: agent = OVSNeutronAgent(bridge_classes, ext_mgr, cfg.CONF) capabilities.notify_init_event(n_const.AGENT_TYPE_OVS, agent) break except ovs_exceptions.TimeoutException as e: if init_try < INIT_MAX_TRIES: LOG.warning("Ovsdb command timeout!") init_try += 1 else: LOG.error("%(err)s agent terminated after %(attempts)s " "initialization attempts!", {'err': e, 'attempts': init_try}) sys.exit(1) except (RuntimeError, ValueError) as e: LOG.error("%s agent terminated!", e) sys.exit(1) agent.daemon_loop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py0000644000175000017500000000743600000000000031014 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron._i18n import _ class VifIdNotFound(exceptions.NeutronException): message = _('VIF ID %(vif_id)s not found in any network managed by ' 'VLAN Manager') class MappingAlreadyExists(exceptions.NeutronException): message = _('VLAN mapping for network with id %(net_id)s already exists') class MappingNotFound(exceptions.NeutronException): message = _('Mapping for network %(net_id)s not found.') class LocalVLANMapping(object): def __init__(self, vlan, network_type, physical_network, segmentation_id, vif_ports=None): self.vlan = vlan self.network_type = network_type self.physical_network = physical_network self.segmentation_id = segmentation_id self.vif_ports = vif_ports or {} # set of tunnel ports on which packets should be flooded self.tun_ofports = set() def __str__(self): return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % (self.vlan, self.network_type, self.physical_network, self.segmentation_id)) def __eq__(self, other): return all(hasattr(other, a) and getattr(self, a) == getattr(other, a) for a in ['vlan', 'network_type', 'physical_network', 'segmentation_id', 'vif_ports']) def __hash__(self): return id(self) class LocalVlanManager(object): """Singleton manager that maps internal VLAN mapping to external network segmentation ids. """ def __new__(cls): if not hasattr(cls, '_instance'): cls._instance = super(LocalVlanManager, cls).__new__(cls) return cls._instance def __init__(self): if not hasattr(self, 'mapping'): self.mapping = {} def __contains__(self, key): return key in self.mapping def __iter__(self): for value in list(self.mapping.values()): yield value def items(self): for item in self.mapping.items(): yield item def add(self, net_id, vlan, network_type, physical_network, segmentation_id, vif_ports=None): if net_id in self.mapping: raise MappingAlreadyExists(net_id=net_id) self.mapping[net_id] = LocalVLANMapping( vlan, network_type, physical_network, segmentation_id, vif_ports) def get_net_uuid(self, vif_id): for network_id, vlan_mapping in self.mapping.items(): if vif_id in vlan_mapping.vif_ports: return network_id raise VifIdNotFound(vif_id=vif_id) def get(self, net_id): try: return self.mapping[net_id] except KeyError: raise MappingNotFound(net_id=net_id) def pop(self, net_id): try: return self.mapping.pop(net_id) except KeyError: raise MappingNotFound(net_id=net_id) def update_segmentation_id(self, net_id, segmentation_id): try: self.mapping[net_id].segmentation_id = segmentation_id except KeyError: raise MappingNotFound(net_id=net_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3510451 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/mech_driver/0000755000175000017500000000000000000000000027326 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py0000644000175000017500000000000000000000000031425 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py0000644000175000017500000002267200000000000033256 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from oslo_config import cfg from oslo_log import log from neutron._i18n import _ from neutron.agent import securitygroups_rpc from neutron.conf.plugins.ml2.drivers.openvswitch import mech_ovs_conf from neutron.plugins.ml2.drivers import mech_agent from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as a_const from neutron.services.logapi.drivers.openvswitch import driver as log_driver from neutron.services.qos.drivers.openvswitch import driver as ovs_qos_driver LOG = log.getLogger(__name__) IPTABLES_FW_DRIVER_FULL = ("neutron.agent.linux.iptables_firewall." "OVSHybridIptablesFirewallDriver") mech_ovs_conf.register_ovs_mech_driver_opts() class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using openvswitch L2 agent. The OpenvswitchMechanismDriver integrates the ml2 plugin with the openvswitch L2 agent. Port binding with this driver requires the openvswitch agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ resource_provider_uuid5_namespace = uuid.UUID( '87ee7d5c-73bb-11e8-9008-c4d987b2a692') def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() hybrid_plug_required = ( not cfg.CONF.SECURITYGROUP.firewall_driver or cfg.CONF.SECURITYGROUP.firewall_driver in ( IPTABLES_FW_DRIVER_FULL, 'iptables_hybrid') ) and sg_enabled vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: hybrid_plug_required, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2} # NOTE(moshele): Bind DIRECT (SR-IOV) port allows # to offload the OVS flows using tc to the SR-IOV NIC. # We are using OVS mechanism driver because the openvswitch (>=2.8.0) # support hardware offload via tc and that allow us to manage the VF by # OpenFlow control plane using representor net-device. super(OpenvswitchMechanismDriver, self).__init__( constants.AGENT_TYPE_OVS, portbindings.VIF_TYPE_OVS, vif_details) # TODO(lajoskatona): move this blacklisting to # SimpleAgentMechanismDriverBase. By that e blacklisting and validation # of the vnic_types would be available for all mechanism drivers. self.supported_vnic_types = self.blacklist_supported_vnic_types( vnic_types=[portbindings.VNIC_NORMAL, portbindings.VNIC_DIRECT, portbindings.VNIC_SMARTNIC], blacklist=cfg.CONF.OVS_DRIVER.vnic_type_blacklist ) LOG.info("%s's supported_vnic_types: %s", self.agent_type, self.supported_vnic_types) ovs_qos_driver.register() log_driver.register() def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [constants.TYPE_LOCAL, constants.TYPE_FLAT, constants.TYPE_VLAN]) def get_mappings(self, agent): return agent['configurations'].get('bridge_mappings', {}) def get_standard_device_mappings(self, agent): """Return the agent's bridge mappings in a standard way. The common format for OVS and SRIOv mechanism drivers: {'physnet_name': ['device_or_bridge_1', 'device_or_bridge_2']} :param agent: The agent :returns A dict in the format: {'physnet_name': ['bridge_or_device']} :raises ValueError: if there is no bridge_mappings key in agent['configurations'] """ if 'bridge_mappings' in agent['configurations']: return {k: [v] for k, v in agent['configurations']['bridge_mappings'].items()} else: raise ValueError(_('Cannot standardize bridge mappings of agent ' 'type: %s'), agent['agent_type']) def check_vlan_transparency(self, context): """Currently Openvswitch driver doesn't support vlan transparency.""" return False def bind_port(self, context): vnic_type = context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) profile = context.current.get(portbindings.PROFILE) capabilities = [] if profile: capabilities = profile.get('capabilities', []) if (vnic_type == portbindings.VNIC_DIRECT and 'switchdev' not in capabilities): LOG.debug("Refusing to bind due to unsupported vnic_type: %s with " "no switchdev capability", portbindings.VNIC_DIRECT) return super(OpenvswitchMechanismDriver, self).bind_port(context) def get_supported_vif_type(self, agent): caps = agent['configurations'].get('ovs_capabilities', {}) if (any(x in caps.get('iface_types', []) for x in [a_const.OVS_DPDK_VHOST_USER, a_const.OVS_DPDK_VHOST_USER_CLIENT]) and agent['configurations'].get('datapath_type') == a_const.OVS_DATAPATH_NETDEV): return portbindings.VIF_TYPE_VHOST_USER return self.vif_type def get_vif_type(self, context, agent, segment): if (context.current.get(portbindings.VNIC_TYPE) == portbindings.VNIC_DIRECT): return portbindings.VIF_TYPE_OVS return self.get_supported_vif_type(agent) def get_vhost_mode(self, iface_types): # NOTE(sean-k-mooney): this function converts the ovs vhost user # driver mode into the qemu vhost user mode. If OVS is the server, # qemu is the client and vice-versa. if (a_const.OVS_DPDK_VHOST_USER_CLIENT in iface_types): return portbindings.VHOST_USER_MODE_SERVER return portbindings.VHOST_USER_MODE_CLIENT def get_vif_details(self, context, agent, segment): vif_details = self._pre_get_vif_details(agent, context) self._set_bridge_name(context.current, vif_details, agent) return vif_details @staticmethod def _set_bridge_name(port, vif_details, agent): # REVISIT(rawlin): add BridgeName as a nullable column to the Port # model and simply check here if it's set and insert it into the # vif_details. def set_bridge_name_inner(bridge_name): vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name bridge_name = agent['configurations'].get('integration_bridge') if bridge_name: vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name registry.publish( a_const.OVS_BRIDGE_NAME, events.BEFORE_READ, set_bridge_name_inner, payload=events.EventPayload(None, metadata={'port': port})) def _pre_get_vif_details(self, agent, context): a_config = agent['configurations'] vif_type = self.get_vif_type(context, agent, segment=None) if vif_type != portbindings.VIF_TYPE_VHOST_USER: details = dict(self.vif_details) hybrid = portbindings.OVS_HYBRID_PLUG if hybrid in a_config: # we only override the vif_details for hybrid plugging set # in the constructor if the agent specifically requests it details[hybrid] = a_config[hybrid] else: sock_path = self.agent_vhu_sockpath(agent, context.current['id']) caps = a_config.get('ovs_capabilities', {}) mode = self.get_vhost_mode(caps.get('iface_types', [])) details = {portbindings.CAP_PORT_FILTER: False, portbindings.OVS_HYBRID_PLUG: False, portbindings.VHOST_USER_MODE: mode, portbindings.VHOST_USER_OVS_PLUG: True, portbindings.VHOST_USER_SOCKET: sock_path} details[portbindings.OVS_DATAPATH_TYPE] = a_config.get( 'datapath_type', a_const.OVS_DATAPATH_SYSTEM) return details @staticmethod def agent_vhu_sockpath(agent, port_id): """Return the agent's vhost-user socket path for a given port""" sockdir = agent['configurations'].get('vhostuser_socket_dir', a_const.VHOST_USER_SOCKET_DIR) sock_name = (constants.VHOST_USER_DEVICE_PREFIX + port_id)[:14] return os.path.join(sockdir, sock_name) @staticmethod def provider_network_attribute_updates_supported(): return [provider_net.SEGMENTATION_ID] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3510451 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/0000755000175000017500000000000000000000000023270 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/__init__.py0000644000175000017500000000000000000000000025367 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3510451 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/0000755000175000017500000000000000000000000025557 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/__init__.py0000644000175000017500000000000000000000000027656 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py0000644000175000017500000014615700000000000030436 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import atexit import copy import datetime import functools import operator import signal import threading import types from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_db import exception as os_db_exc from oslo_log import log from oslo_utils import timeutils from ovsdbapp.backend.ovs_idl import idlutils from neutron._i18n import _ from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils from neutron.common import utils as n_utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_hash_ring_db from neutron.db import ovn_revision_numbers_db from neutron.db import provisioning_blocks from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import worker from neutron.services.qos.drivers.ovn import driver as qos_driver from neutron.services.segments import db as segment_service_db from neutron.services.trunk.drivers.ovn import trunk_driver LOG = log.getLogger(__name__) METADATA_READY_WAIT_TIMEOUT = 15 AGENTS = {} class MetadataServiceReadyWaitTimeoutException(Exception): pass class OVNPortUpdateError(n_exc.BadRequest): pass class OVNMechanismDriver(api.MechanismDriver): """OVN ML2 mechanism driver A mechanism driver is called on the creation, update, and deletion of networks and ports. For every event, there are two methods that get called - one within the database transaction (method suffix of _precommit), one right afterwards (method suffix of _postcommit). Exceptions raised by methods called inside the transaction can rollback, but should not make any blocking calls (for example, REST requests to an outside controller). Methods called after transaction commits can make blocking external calls, though these will block the entire process. Exceptions raised in calls after the transaction commits may cause the associated resource to be deleted. Because rollback outside of the transaction is not done in the update network/port case, all data validation must be done within methods that are part of the database transaction. """ supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ LOG.info("Starting OVNMechanismDriver") self._nb_ovn = None self._sb_ovn = None self._plugin_property = None self._ovn_client_inst = None self._maintenance_thread = None self.node_uuid = None self.hash_ring_group = ovn_const.HASH_RING_ML2_GROUP self.sg_enabled = ovn_acl.is_sg_enabled() # NOTE(lucasagomes): _clean_hash_ring() must be called before # self.subscribe() to avoid processes racing when adding or # deleting nodes from the Hash Ring during service initialization self._clean_hash_ring() self._post_fork_event = threading.Event() if cfg.CONF.SECURITYGROUP.firewall_driver: LOG.warning('Firewall driver configuration is ignored') self._setup_vif_port_bindings() self.subscribe() self.qos_driver = qos_driver.OVNQosNotificationDriver.create(self) self.trunk_driver = trunk_driver.OVNTrunkDriver.create(self) @property def _plugin(self): if self._plugin_property is None: self._plugin_property = directory.get_plugin() return self._plugin_property @property def _ovn_client(self): if self._ovn_client_inst is None: if not(self._nb_ovn and self._sb_ovn): # Wait until the post_fork_initialize method has finished and # IDLs have been correctly setup. self._post_fork_event.wait() self._ovn_client_inst = ovn_client.OVNClient(self._nb_ovn, self._sb_ovn) return self._ovn_client_inst @property def nb_ovn(self): # NOTE (twilson): This and sb_ovn can be moved to instance variables # once all references to the private versions are changed return self._nb_ovn @property def sb_ovn(self): return self._sb_ovn def _setup_vif_port_bindings(self): self.supported_vnic_types = [portbindings.VNIC_NORMAL, portbindings.VNIC_DIRECT] self.vif_details = { portbindings.VIF_TYPE_OVS: { portbindings.CAP_PORT_FILTER: self.sg_enabled }, portbindings.VIF_TYPE_VHOST_USER: { portbindings.CAP_PORT_FILTER: False, portbindings.VHOST_USER_MODE: portbindings.VHOST_USER_MODE_SERVER, portbindings.VHOST_USER_OVS_PLUG: True }, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2, } def subscribe(self): registry.subscribe(self.pre_fork_initialize, resources.PROCESS, events.BEFORE_SPAWN) registry.subscribe(self.post_fork_initialize, resources.PROCESS, events.AFTER_INIT) registry.subscribe(self._add_segment_host_mapping_for_segment, resources.SEGMENT, events.AFTER_CREATE) # Handle security group/rule notifications if self.sg_enabled: registry.subscribe(self._create_security_group_precommit, resources.SECURITY_GROUP, events.PRECOMMIT_CREATE) registry.subscribe(self._update_security_group, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe(self._create_security_group, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe(self._delete_security_group, resources.SECURITY_GROUP, events.AFTER_DELETE) registry.subscribe(self._create_sg_rule_precommit, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE) registry.subscribe(self._process_sg_rule_notification, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe(self._process_sg_rule_notification, resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE) def _clean_hash_ring(self, *args, **kwargs): admin_context = n_context.get_admin_context() ovn_hash_ring_db.remove_nodes_from_host(admin_context, self.hash_ring_group) def pre_fork_initialize(self, resource, event, trigger, payload=None): """Pre-initialize the ML2/OVN driver.""" atexit.register(self._clean_hash_ring) signal.signal(signal.SIGTERM, self._clean_hash_ring) def post_fork_initialize(self, resource, event, trigger, payload=None): # NOTE(rtheis): This will initialize all workers (API, RPC, # plugin service and OVN) with OVN IDL connections. self._post_fork_event.clear() self._ovn_client_inst = None is_maintenance = (ovn_utils.get_method_class(trigger) == worker.MaintenanceWorker) if not is_maintenance: admin_context = n_context.get_admin_context() self.node_uuid = ovn_hash_ring_db.add_node(admin_context, self.hash_ring_group) self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls( self, trigger, binding_events=not is_maintenance) # AGENTS must be populated after fork so if ovn-controller is stopped # before a worker handles a get_agents request, we still show agents populate_agents(self) # Override agents API methods self.patch_plugin_merge("get_agents", get_agents) self.patch_plugin_choose("get_agent", get_agent) self.patch_plugin_choose("update_agent", update_agent) self.patch_plugin_choose("delete_agent", delete_agent) # Now IDL connections can be safely used. self._post_fork_event.set() if is_maintenance: # Call the synchronization task if its maintenance worker # This sync neutron DB to OVN-NB DB only in inconsistent states self.nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( self._plugin, self._nb_ovn, self._sb_ovn, ovn_conf.get_ovn_neutron_sync_mode(), self ) self.nb_synchronizer.sync() # This sync neutron DB to OVN-SB DB only in inconsistent states self.sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( self._plugin, self._sb_ovn, self ) self.sb_synchronizer.sync() self._maintenance_thread = maintenance.MaintenanceThread() self._maintenance_thread.add_periodics( maintenance.DBInconsistenciesPeriodics(self._ovn_client)) self._maintenance_thread.add_periodics( maintenance.HashRingHealthCheckPeriodics( self.hash_ring_group)) self._maintenance_thread.start() def _create_security_group_precommit(self, resource, event, trigger, **kwargs): ovn_revision_numbers_db.create_initial_revision( kwargs['context'], kwargs['security_group']['id'], ovn_const.TYPE_SECURITY_GROUPS) def _create_security_group(self, resource, event, trigger, security_group, **kwargs): self._ovn_client.create_security_group(kwargs['context'], security_group) def _delete_security_group(self, resource, event, trigger, security_group_id, **kwargs): self._ovn_client.delete_security_group(kwargs['context'], security_group_id) def _update_security_group(self, resource, event, trigger, security_group, **kwargs): # OVN doesn't care about updates to security groups, only if they # exist or not. We are bumping the revision number here so it # doesn't show as inconsistent to the maintenance periodic task ovn_revision_numbers_db.bump_revision( kwargs['context'], security_group, ovn_const.TYPE_SECURITY_GROUPS) def _create_sg_rule_precommit(self, resource, event, trigger, **kwargs): sg_rule = kwargs.get('security_group_rule') context = kwargs.get('context') ovn_revision_numbers_db.create_initial_revision( context, sg_rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES) def _process_sg_rule_notification( self, resource, event, trigger, **kwargs): if event == events.AFTER_CREATE: self._ovn_client.create_security_group_rule( kwargs['context'], kwargs.get('security_group_rule')) elif event == events.BEFORE_DELETE: sg_rule = self._plugin.get_security_group_rule( kwargs['context'], kwargs.get('security_group_rule_id')) self._ovn_client.delete_security_group_rule( kwargs['context'], sg_rule) def _is_network_type_supported(self, network_type): return (network_type in [const.TYPE_LOCAL, const.TYPE_FLAT, const.TYPE_GENEVE, const.TYPE_VLAN]) def _validate_network_segments(self, network_segments): for network_segment in network_segments: network_type = network_segment['network_type'] segmentation_id = network_segment['segmentation_id'] physical_network = network_segment['physical_network'] LOG.debug('Validating network segment with ' 'type %(network_type)s, ' 'segmentation ID %(segmentation_id)s, ' 'physical network %(physical_network)s', {'network_type': network_type, 'segmentation_id': segmentation_id, 'physical_network': physical_network}) if not self._is_network_type_supported(network_type): msg = _('Network type %s is not supported') % network_type raise n_exc.InvalidInput(error_message=msg) def create_network_precommit(self, context): """Allocate resources for a new network. :param context: NetworkContext instance describing the new network. Create a new network, allocating resources as necessary in the database. Called inside transaction context on session. Call cannot block. Raising an exception will result in a rollback of the current transaction. """ self._validate_network_segments(context.network_segments) ovn_revision_numbers_db.create_initial_revision( context._plugin_context, context.current['id'], ovn_const.TYPE_NETWORKS) def create_network_postcommit(self, context): """Create a network. :param context: NetworkContext instance describing the new network. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. """ network = context.current self._ovn_client.create_network(context._plugin_context, network) def update_network_precommit(self, context): """Update resources of a network. :param context: NetworkContext instance describing the new state of the network, as well as the original state prior to the update_network call. Update values of a network, updating the associated resources in the database. Called inside transaction context on session. Raising an exception will result in rollback of the transaction. update_network_precommit is called for all changes to the network state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ self._validate_network_segments(context.network_segments) def update_network_postcommit(self, context): """Update a network. :param context: NetworkContext instance describing the new state of the network, as well as the original state prior to the update_network call. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will cause the deletion of the resource. update_network_postcommit is called for all changes to the network state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ # FIXME(lucasagomes): We can delete this conditional after # https://bugs.launchpad.net/neutron/+bug/1739798 is fixed. if context._plugin_context.session.is_active: return self._ovn_client.update_network(context._plugin_context, context.current) def delete_network_postcommit(self, context): """Delete a network. :param context: NetworkContext instance describing the current state of the network, prior to the call to delete it. Called after the transaction commits. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Runtime errors are not expected, and will not prevent the resource from being deleted. """ self._ovn_client.delete_network( context._plugin_context, context.current['id']) def create_subnet_precommit(self, context): ovn_revision_numbers_db.create_initial_revision( context._plugin_context, context.current['id'], ovn_const.TYPE_SUBNETS) def create_subnet_postcommit(self, context): self._ovn_client.create_subnet(context._plugin_context, context.current, context.network.current) def update_subnet_postcommit(self, context): self._ovn_client.update_subnet( context._plugin_context, context.current, context.network.current) def delete_subnet_postcommit(self, context): self._ovn_client.delete_subnet(context._plugin_context, context.current['id']) def create_port_precommit(self, context): """Allocate resources for a new port. :param context: PortContext instance describing the port. Create a new port, allocating resources as necessary in the database. Called inside transaction context on session. Call cannot block. Raising an exception will result in a rollback of the current transaction. """ port = context.current if ovn_utils.is_lsp_ignored(port): return ovn_utils.validate_and_get_data_from_binding_profile(port) if self._is_port_provisioning_required(port, context.host): self._insert_port_provisioning_block(context._plugin_context, port['id']) ovn_revision_numbers_db.create_initial_revision( context._plugin_context, port['id'], ovn_const.TYPE_PORTS) # in the case of router ports we also need to # track the creation and update of the LRP OVN objects if ovn_utils.is_lsp_router_port(port): ovn_revision_numbers_db.create_initial_revision( context._plugin_context, port['id'], ovn_const.TYPE_ROUTER_PORTS) def _is_port_provisioning_required(self, port, host, original_host=None): vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug('No provisioning block for port %(port_id)s due to ' 'unsupported vnic_type: %(vnic_type)s', {'port_id': port['id'], 'vnic_type': vnic_type}) return False if port['status'] == const.PORT_STATUS_ACTIVE: LOG.debug('No provisioning block for port %s since it is active', port['id']) return False if not host: LOG.debug('No provisioning block for port %s since it does not ' 'have a host', port['id']) return False if host == original_host: LOG.debug('No provisioning block for port %s since host unchanged', port['id']) return False if not self._sb_ovn.chassis_exists(host): LOG.debug('No provisioning block for port %(port_id)s since no ' 'OVN chassis for host: %(host)s', {'port_id': port['id'], 'host': host}) return False return True def _insert_port_provisioning_block(self, context, port_id): # Insert a provisioning block to prevent the port from # transitioning to active until OVN reports back that # the port is up. provisioning_blocks.add_provisioning_component( context, port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) def _notify_dhcp_updated(self, port_id): """Notifies Neutron that the DHCP has been update for port.""" admin_context = n_context.get_admin_context() if provisioning_blocks.is_object_blocked( admin_context, port_id, resources.PORT): provisioning_blocks.provisioning_complete( admin_context, port_id, resources.PORT, provisioning_blocks.DHCP_ENTITY) def _validate_ignored_port(self, port, original_port): if ovn_utils.is_lsp_ignored(port): if not ovn_utils.is_lsp_ignored(original_port): # From not ignored port to ignored port msg = (_('Updating device_owner to %(device_owner)s for port ' '%(port_id)s is not supported') % {'device_owner': port['device_owner'], 'port_id': port['id']}) raise OVNPortUpdateError(resource='port', msg=msg) elif ovn_utils.is_lsp_ignored(original_port): # From ignored port to not ignored port msg = (_('Updating device_owner for port %(port_id)s owned by ' '%(device_owner)s is not supported') % {'port_id': port['id'], 'device_owner': original_port['device_owner']}) raise OVNPortUpdateError(resource='port', msg=msg) def create_port_postcommit(self, context): """Create a port. :param context: PortContext instance describing the port. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will result in the deletion of the resource. """ port = copy.deepcopy(context.current) port['network'] = context.network.current self._ovn_client.create_port(context._plugin_context, port) self._notify_dhcp_updated(port['id']) def update_port_precommit(self, context): """Update resources of a port. :param context: PortContext instance describing the new state of the port, as well as the original state prior to the update_port call. Called inside transaction context on session to complete a port update as defined by this mechanism driver. Raising an exception will result in rollback of the transaction. update_port_precommit is called for all changes to the port state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ port = context.current original_port = context.original self._validate_ignored_port(port, original_port) ovn_utils.validate_and_get_data_from_binding_profile(port) if self._is_port_provisioning_required(port, context.host, context.original_host): self._insert_port_provisioning_block(context._plugin_context, port['id']) if ovn_utils.is_lsp_router_port(port): # handle the case when an existing port is added to a # logical router so we need to track the creation of the lrp if not ovn_utils.is_lsp_router_port(original_port): ovn_revision_numbers_db.create_initial_revision( context._plugin_context, port['id'], ovn_const.TYPE_ROUTER_PORTS, may_exist=True) def update_port_postcommit(self, context): """Update a port. :param context: PortContext instance describing the new state of the port, as well as the original state prior to the update_port call. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Raising an exception will result in the deletion of the resource. update_port_postcommit is called for all changes to the port state. It is up to the mechanism driver to ignore state or state changes that it does not know or care about. """ port = copy.deepcopy(context.current) port['network'] = context.network.current original_port = copy.deepcopy(context.original) original_port['network'] = context.network.current # NOTE(mjozefcz): Check if port is in migration state. If so update # the port status from DOWN to UP in order to generate 'fake' # vif-interface-plugged event. This workaround is needed to # perform live-migration with live_migration_wait_for_vif_plug=True. if ((port['status'] == const.PORT_STATUS_DOWN and ovn_const.MIGRATING_ATTR in port[portbindings.PROFILE].keys() and port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_OVS)): LOG.info("Setting port %s status from DOWN to UP in order " "to emit vif-interface-plugged event.", port['id']) self._plugin.update_port_status(context._plugin_context, port['id'], const.PORT_STATUS_ACTIVE) # The revision has been changed. In the meantime # port-update event already updated the OVN configuration, # So there is no need to update it again here. Anyway it # will fail that OVN has port with bigger revision. return self._ovn_client.update_port(context._plugin_context, port, port_object=original_port) self._notify_dhcp_updated(port['id']) def delete_port_postcommit(self, context): """Delete a port. :param context: PortContext instance describing the current state of the port, prior to the call to delete it. Called after the transaction completes. Call can block, though will block the entire process so care should be taken to not drastically affect performance. Runtime errors are not expected, and will not prevent the resource from being deleted. """ port = copy.deepcopy(context.current) port['network'] = context.network.current # FIXME(lucasagomes): PortContext does not have a session, therefore # we need to use the _plugin_context attribute. self._ovn_client.delete_port(context._plugin_context, port['id'], port_object=port) def bind_port(self, context): """Attempt to bind a port. :param context: PortContext instance describing the port This method is called outside any transaction to attempt to establish a port binding using this mechanism driver. Bindings may be created at each of multiple levels of a hierarchical network, and are established from the top level downward. At each level, the mechanism driver determines whether it can bind to any of the network segments in the context.segments_to_bind property, based on the value of the context.host property, any relevant port or network attributes, and its own knowledge of the network topology. At the top level, context.segments_to_bind contains the static segments of the port's network. At each lower level of binding, it contains static or dynamic segments supplied by the driver that bound at the level above. If the driver is able to complete the binding of the port to any segment in context.segments_to_bind, it must call context.set_binding with the binding details. If it can partially bind the port, it must call context.continue_binding with the network segments to be used to bind at the next lower level. If the binding results are committed after bind_port returns, they will be seen by all mechanism drivers as update_port_precommit and update_port_postcommit calls. But if some other thread or process concurrently binds or updates the port, these binding results will not be committed, and update_port_precommit and update_port_postcommit will not be called on the mechanism drivers with these results. Because binding results can be discarded rather than committed, drivers should avoid making persistent state changes in bind_port, or else must ensure that such state changes are eventually cleaned up. Implementing this method explicitly declares the mechanism driver as having the intention to bind ports. This is inspected by the QoS service to identify the available QoS rules you can use with ports. """ port = context.current vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug('Refusing to bind port %(port_id)s due to unsupported ' 'vnic_type: %(vnic_type)s', {'port_id': port['id'], 'vnic_type': vnic_type}) return capabilities = ovn_utils.get_port_capabilities(port) if (vnic_type == portbindings.VNIC_DIRECT and ovn_const.PORT_CAP_SWITCHDEV not in capabilities): LOG.debug("Refusing to bind port due to unsupported vnic_type: %s " "with no switchdev capability", portbindings.VNIC_DIRECT) return # OVN chassis information is needed to ensure a valid port bind. # Collect port binding data and refuse binding if the OVN chassis # cannot be found. chassis_physnets = [] try: datapath_type, iface_types, chassis_physnets = ( self._sb_ovn.get_chassis_data_for_ml2_bind_port(context.host)) iface_types = iface_types.split(',') if iface_types else [] except RuntimeError: LOG.debug('Refusing to bind port %(port_id)s due to ' 'no OVN chassis for host: %(host)s', {'port_id': port['id'], 'host': context.host}) return for segment_to_bind in context.segments_to_bind: network_type = segment_to_bind['network_type'] segmentation_id = segment_to_bind['segmentation_id'] physical_network = segment_to_bind['physical_network'] LOG.debug('Attempting to bind port %(port_id)s on host %(host)s ' 'for network segment with type %(network_type)s, ' 'segmentation ID %(segmentation_id)s, ' 'physical network %(physical_network)s', {'port_id': port['id'], 'host': context.host, 'network_type': network_type, 'segmentation_id': segmentation_id, 'physical_network': physical_network}) # TODO(rtheis): This scenario is only valid on an upgrade from # neutron ML2 OVS since invalid network types are prevented during # network creation and update. The upgrade should convert invalid # network types. Once bug/1621879 is fixed, refuse to bind # ports with unsupported network types. if not self._is_network_type_supported(network_type): LOG.info('Upgrade allowing bind port %(port_id)s with ' 'unsupported network type: %(network_type)s', {'port_id': port['id'], 'network_type': network_type}) if ((network_type in ['flat', 'vlan']) and (physical_network not in chassis_physnets)): LOG.info('Refusing to bind port %(port_id)s on ' 'host %(host)s due to the OVN chassis ' 'bridge mapping physical networks ' '%(chassis_physnets)s not supporting ' 'physical network: %(physical_network)s', {'port_id': port['id'], 'host': context.host, 'chassis_physnets': chassis_physnets, 'physical_network': physical_network}) else: if (datapath_type == ovn_const.CHASSIS_DATAPATH_NETDEV and ovn_const.CHASSIS_IFACE_DPDKVHOSTUSER in iface_types): vhost_user_socket = ovn_utils.ovn_vhu_sockpath( ovn_conf.get_ovn_vhost_sock_dir(), port['id']) vif_type = portbindings.VIF_TYPE_VHOST_USER port[portbindings.VIF_DETAILS].update({ portbindings.VHOST_USER_SOCKET: vhost_user_socket}) vif_details = dict(self.vif_details[vif_type]) vif_details[portbindings.VHOST_USER_SOCKET] = ( vhost_user_socket) else: vif_type = portbindings.VIF_TYPE_OVS vif_details = self.vif_details[vif_type] context.set_binding(segment_to_bind[api.ID], vif_type, vif_details) break def get_workers(self): """Get any worker instances that should have their own process Any driver that needs to run processes separate from the API or RPC workers, can return a sequence of worker instances. """ # See doc/source/design/ovn_worker.rst for more details. return [worker.MaintenanceWorker()] def _update_dnat_entry_if_needed(self, port_id, up=True): """Update DNAT entry if using distributed floating ips.""" if not ovn_conf.is_ovn_distributed_floating_ip(): return if not self._nb_ovn: self._nb_ovn = self._ovn_client._nb_idl nat = self._nb_ovn.db_find('NAT', ('logical_port', '=', port_id), ('type', '=', 'dnat_and_snat')).execute() if not nat: return # We take first entry as one port can only have one FIP nat = nat[0] # If the external_id doesn't exist, let's create at this point. # TODO(dalvarez): Remove this code in T cycle when we're sure that # all DNAT entries have the external_id. if not nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY): self._nb_ovn.db_set('NAT', nat['_uuid'], ('external_ids', {ovn_const.OVN_FIP_EXT_MAC_KEY: nat['external_mac']})).execute() if up: mac = nat['external_ids'][ovn_const.OVN_FIP_EXT_MAC_KEY] LOG.debug("Setting external_mac of port %s to %s", port_id, mac) self._nb_ovn.db_set( 'NAT', nat['_uuid'], ('external_mac', mac)).execute(check_error=True) else: LOG.debug("Clearing up external_mac of port %s", port_id) self._nb_ovn.db_clear( 'NAT', nat['_uuid'], 'external_mac').execute(check_error=True) def _should_notify_nova(self, db_port): # NOTE(twilson) It is possible for a test to override a config option # after the plugin has been initialized so the nova_notifier attribute # is not set on the plugin return (cfg.CONF.notify_nova_on_port_status_changes and hasattr(self._plugin, 'nova_notifier') and db_port.device_owner.startswith( const.DEVICE_OWNER_COMPUTE_PREFIX)) def set_port_status_up(self, port_id): # Port provisioning is complete now that OVN has reported that the # port is up. Any provisioning block (possibly added during port # creation or when OVN reports that the port is down) must be removed. LOG.info("OVN reports status up for port: %s", port_id) self._update_dnat_entry_if_needed(port_id) self._wait_for_metadata_provisioned_if_needed(port_id) admin_context = n_context.get_admin_context() provisioning_blocks.provisioning_complete( admin_context, port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) try: # NOTE(lucasagomes): Router ports in OVN is never bound # to a host given their decentralized nature. By calling # provisioning_complete() - as above - don't do it for us # becasue the router ports are unbind so, for OVN we are # forcing the status here. Maybe it's something that we can # change in core Neutron in the future. db_port = ml2_db.get_port(admin_context, port_id) if not db_port: return if db_port.device_owner in (const.DEVICE_OWNER_ROUTER_INTF, const.DEVICE_OWNER_DVR_INTERFACE, const.DEVICE_OWNER_ROUTER_HA_INTF): self._plugin.update_port_status(admin_context, port_id, const.PORT_STATUS_ACTIVE) elif self._should_notify_nova(db_port): self._plugin.nova_notifier.notify_port_active_direct(db_port) except (os_db_exc.DBReferenceError, n_exc.PortNotFound): LOG.debug('Port not found during OVN status up report: %s', port_id) def set_port_status_down(self, port_id): # Port provisioning is required now that OVN has reported that the # port is down. Insert a provisioning block and mark the port down # in neutron. The block is inserted before the port status update # to prevent another entity from bypassing the block with its own # port status update. LOG.info("OVN reports status down for port: %s", port_id) self._update_dnat_entry_if_needed(port_id, False) admin_context = n_context.get_admin_context() try: db_port = ml2_db.get_port(admin_context, port_id) if not db_port: return self._insert_port_provisioning_block(admin_context, port_id) self._plugin.update_port_status(admin_context, port_id, const.PORT_STATUS_DOWN) if self._should_notify_nova(db_port): self._plugin.nova_notifier.record_port_status_changed( db_port, const.PORT_STATUS_ACTIVE, const.PORT_STATUS_DOWN, None) self._plugin.nova_notifier.send_port_status( None, None, db_port) except (os_db_exc.DBReferenceError, n_exc.PortNotFound): LOG.debug("Port not found during OVN status down report: %s", port_id) def delete_mac_binding_entries(self, external_ip): """Delete all MAC_Binding entries associated to this IP address""" mac_binds = self._sb_ovn.db_find_rows( 'MAC_Binding', ('ip', '=', external_ip)).execute() or [] for entry in mac_binds: self._sb_ovn.db_destroy('MAC_Binding', entry.uuid).execute() def update_segment_host_mapping(self, host, phy_nets): """Update SegmentHostMapping in DB""" if not host: return ctx = n_context.get_admin_context() segments = segment_service_db.get_segments_with_phys_nets( ctx, phy_nets) available_seg_ids = { segment['id'] for segment in segments if segment['network_type'] in ('flat', 'vlan')} segment_service_db.update_segment_host_mapping( ctx, host, available_seg_ids) def _add_segment_host_mapping_for_segment(self, resource, event, trigger, context, segment): phynet = segment.physical_network if not phynet: return host_phynets_map = self._sb_ovn.get_chassis_hostname_and_physnets() hosts = {host for host, phynets in host_phynets_map.items() if phynet in phynets} segment_service_db.map_segment_to_hosts(context, segment.id, hosts) def _wait_for_metadata_provisioned_if_needed(self, port_id): """Wait for metadata service to be provisioned. Wait until metadata service has been setup for this port in the chassis it resides. If metadata is disabled or DHCP is not enabled for its subnets, this function will return right away. """ if ovn_conf.is_ovn_metadata_enabled() and self._sb_ovn: # Wait until metadata service has been setup for this port in the # chassis it resides. result = ( self._sb_ovn.get_logical_port_chassis_and_datapath(port_id)) if not result: LOG.warning("Logical port %s doesn't exist in OVN", port_id) return chassis, datapath = result if not chassis: LOG.warning("Logical port %s is not bound to a " "chassis", port_id) return # Check if the port belongs to some IPv4 subnet with DHCP enabled. context = n_context.get_admin_context() port = self._plugin.get_port(context, port_id) port_subnet_ids = set( ip['subnet_id'] for ip in port['fixed_ips'] if n_utils.get_ip_version(ip['ip_address']) == const.IP_VERSION_4) if not port_subnet_ids: # The port doesn't belong to any IPv4 subnet return subnets = self._plugin.get_subnets(context, filters=dict( network_id=[port['network_id']], ip_version=[4], enable_dhcp=True)) subnet_ids = set( s['id'] for s in subnets if s['id'] in port_subnet_ids) if not subnet_ids: return try: n_utils.wait_until_true( lambda: datapath in self._sb_ovn.get_chassis_metadata_networks(chassis), timeout=METADATA_READY_WAIT_TIMEOUT, exception=MetadataServiceReadyWaitTimeoutException) except MetadataServiceReadyWaitTimeoutException: # If we reach this point it means that metadata agent didn't # provision the datapath for this port on its chassis. Either # the agent is not running or it crashed. We'll complete the # provisioning block though. LOG.warning("Metadata service is not ready for port %s, check" " networking-ovn-metadata-agent status/logs.", port_id) def agent_alive(self, chassis, type_): nb_cfg = chassis.nb_cfg key = ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY if type_ == ovn_const.OVN_METADATA_AGENT: nb_cfg = int(chassis.external_ids.get( ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, 0)) key = ovn_const.METADATA_LIVENESS_CHECK_EXT_ID_KEY try: updated_at = timeutils.parse_isotime(chassis.external_ids[key]) except KeyError: updated_at = timeutils.utcnow(with_timezone=True) # Allow a maximum of 1 difference between expected and read values # to avoid false positives. if self._nb_ovn.nb_global.nb_cfg - nb_cfg <= 1: # update the time of our successful check value = timeutils.utcnow(with_timezone=True).isoformat() self._sb_ovn.db_set('Chassis', chassis.uuid, ('external_ids', {key: value})).execute( check_error=True) return True now = timeutils.utcnow(with_timezone=True) if (now - updated_at).total_seconds() < cfg.CONF.agent_down_time: # down, but not yet timed out return True return False def _format_agent_info(self, chassis, binary, agent_id, type_, description, alive): return { 'binary': binary, 'host': chassis.hostname, 'heartbeat_timestamp': timeutils.utcnow(), 'availability_zone': 'n/a', 'topic': 'n/a', 'description': description, 'configurations': { 'chassis_name': chassis.name, 'bridge-mappings': chassis.external_ids.get('ovn-bridge-mappings', '')}, 'start_flag': True, 'agent_type': type_, 'id': agent_id, 'alive': alive, 'admin_state_up': True} def agents_from_chassis(self, chassis): agent_dict = {} # Check for ovn-controller / ovn-controller gateway agent_type = ovn_const.OVN_CONTROLLER_AGENT # Only the chassis name stays consistent after ovn-controller restart agent_id = chassis.name if ('enable-chassis-as-gw' in chassis.external_ids.get('ovn-cms-options', [])): agent_type = ovn_const.OVN_CONTROLLER_GW_AGENT alive = self.agent_alive(chassis, agent_type) description = chassis.external_ids.get( ovn_const.OVN_AGENT_DESC_KEY, '') agent_dict[agent_id] = self._format_agent_info( chassis, 'ovn-controller', agent_id, agent_type, description, alive) # Check for the metadata agent metadata_agent_id = chassis.external_ids.get( ovn_const.OVN_AGENT_METADATA_ID_KEY) if metadata_agent_id: agent_type = ovn_const.OVN_METADATA_AGENT alive = self.agent_alive(chassis, agent_type) description = chassis.external_ids.get( ovn_const.OVN_AGENT_METADATA_DESC_KEY, '') agent_dict[metadata_agent_id] = self._format_agent_info( chassis, 'networking-ovn-metadata-agent', metadata_agent_id, agent_type, description, alive) return agent_dict def patch_plugin_merge(self, method_name, new_fn, op=operator.add): old_method = getattr(self._plugin, method_name) @functools.wraps(old_method) def fn(slf, *args, **kwargs): new_method = types.MethodType(new_fn, self._plugin) results = old_method(*args, **kwargs) return op(results, new_method(*args, _driver=self, **kwargs)) setattr(self._plugin, method_name, types.MethodType(fn, self._plugin)) def patch_plugin_choose(self, method_name, new_fn): old_method = getattr(self._plugin, method_name) @functools.wraps(old_method) def fn(slf, *args, **kwargs): new_method = types.MethodType(new_fn, self._plugin) try: return new_method(*args, _driver=self, **kwargs) except KeyError: return old_method(*args, **kwargs) setattr(self._plugin, method_name, types.MethodType(fn, self._plugin)) def ping_chassis(self): """Update NB_Global.nb_cfg so that Chassis.nb_cfg will increment""" last_ping = self._nb_ovn.nb_global.external_ids.get( ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY) if last_ping: interval = max(cfg.CONF.agent_down_time // 2, 1) next_ping = (timeutils.parse_isotime(last_ping) + datetime.timedelta(seconds=interval)) if timeutils.utcnow(with_timezone=True) < next_ping: return with self._nb_ovn.create_transaction(check_error=True, bump_nb_cfg=True) as txn: txn.add(self._nb_ovn.check_liveness()) def populate_agents(driver): for ch in driver._sb_ovn.tables['Chassis'].rows.values(): # update the cache, rows are hashed on uuid but it is the name that # stays consistent across ovn-controller restarts AGENTS.update({ch.name: ch}) def get_agents(self, context, filters=None, fields=None, _driver=None): _driver.ping_chassis() filters = filters or {} agent_list = [] populate_agents(_driver) for ch in AGENTS.values(): for agent in _driver.agents_from_chassis(ch).values(): if all(agent[k] in v for k, v in filters.items()): agent_list.append(agent) return agent_list def get_agent(self, context, id, fields=None, _driver=None): chassis = None try: # look up Chassis by *name*, which the id attribte is chassis = _driver._sb_ovn.lookup('Chassis', id) except idlutils.RowNotFound: # If the UUID is not found, check for the metadata agent ID for ch in _driver._sb_ovn.tables['Chassis'].rows.values(): metadata_agent_id = ch.external_ids.get( ovn_const.OVN_AGENT_METADATA_ID_KEY) if id == metadata_agent_id: chassis = ch break else: raise n_exc.agent.AgentNotFound(id=id) return _driver.agents_from_chassis(chassis)[id] def update_agent(self, context, id, agent, _driver=None): ovn_agent = get_agent(self, None, id, _driver=_driver) chassis_name = ovn_agent['configurations']['chassis_name'] agent_type = ovn_agent['agent_type'] agent = agent['agent'] # neutron-client always passes admin_state_up, openstack client doesn't # and we can just fall through to raising in the case that admin_state_up # is being set to False, otherwise the end-state will be fine if not agent.get('admin_state_up', True): pass elif 'description' in agent: _driver._sb_ovn.set_chassis_neutron_description( chassis_name, agent['description'], agent_type).execute(check_error=True) return agent else: # admin_state_up=True w/o description return agent raise n_exc.BadRequest(resource='agent', msg='OVN agent status cannot be updated') def delete_agent(self, context, id, _driver=None): get_agent(self, None, id, _driver=_driver) raise n_exc.BadRequest(resource='agent', msg='OVN agents cannot be deleted') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3510451 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/0000755000175000017500000000000000000000000026674 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py0000644000175000017500000000000000000000000030773 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/api.py0000644000175000017500000006534700000000000030036 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from ovsdbapp import api import six from neutron.common.ovn import constants as ovn_const @six.add_metaclass(abc.ABCMeta) class API(api.API): @abc.abstractmethod def create_lswitch_port(self, lport_name, lswitch_name, may_exist=True, **columns): """Create a command to add an OVN logical switch port :param lport_name: The name of the lport :type lport_name: string :param lswitch_name: The name of the lswitch the lport is created on :type lswitch_name: string :param may_exist: Do not fail if lport already exists :type may_exist: bool :param columns: Dictionary of port columns Supported columns: macs, external_ids, parent_name, tag, enabled :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def set_lswitch_port(self, lport_name, if_exists=True, **columns): """Create a command to set OVN logical switch port fields :param lport_name: The name of the lport :type lport_name: string :param columns: Dictionary of port columns Supported columns: macs, external_ids, parent_name, tag, enabled :param if_exists: Do not fail if lport does not exist :type if_exists: bool :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_lswitch_port(self, lport_name=None, lswitch_name=None, ext_id=None, if_exists=True): """Create a command to delete an OVN logical switch port :param lport_name: The name of the lport :type lport_name: string :param lswitch_name: The name of the lswitch :type lswitch_name: string :param ext_id: The external id of the lport :type ext_id: pair of :param if_exists: Do not fail if the lport does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def create_lrouter(self, name, may_exist=True, **columns): """Create a command to add an OVN lrouter :param name: The id of the lrouter :type name: string :param may_exist: Do not fail if lrouter already exists :type may_exist: bool :param columns: Dictionary of lrouter columns Supported columns: external_ids, default_gw, ip :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def update_lrouter(self, name, if_exists=True, **columns): """Update a command to add an OVN lrouter :param name: The id of the lrouter :type name: string :param if_exists: Do not fail if the lrouter does not exist :type if_exists: bool :param columns: Dictionary of lrouter columns Supported columns: external_ids, default_gw, ip :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_lrouter(self, name, if_exists=True): """Create a command to delete an OVN lrouter :param name: The id of the lrouter :type name: string :param if_exists: Do not fail if the lrouter does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def add_lrouter_port(self, name, lrouter, may_exist=True, **columns): """Create a command to add an OVN lrouter port :param name: The unique name of the lrouter port :type name: string :param lrouter: The unique name of the lrouter :type lrouter: string :param lswitch: The unique name of the lswitch :type lswitch: string :param may_exist: If true, do not fail if lrouter port set already exists. :type may_exist: bool :param columns: Dictionary of lrouter columns Supported columns: external_ids, mac, network :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def update_lrouter_port(self, name, if_exists=True, **columns): """Update a command to add an OVN lrouter port :param name: The unique name of the lrouter port :type name: string :param if_exists: Do not fail if the lrouter port does not exist :type if_exists: bool :param columns: Dictionary of lrouter columns Supported columns: networks :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_lrouter_port(self, name, lrouter, if_exists=True): """Create a command to delete an OVN lrouter port :param name: The unique name of the lport :type name: string :param lrouter: The unique name of the lrouter :type lrouter: string :param if_exists: Do not fail if the lrouter port does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def set_lrouter_port_in_lswitch_port( self, lswitch_port, lrouter_port, is_gw_port=False, if_exists=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER): """Create a command to set lswitch_port as lrouter_port :param lswitch_port: The name of logical switch port :type lswitch_port: string :param lrouter_port: The name of logical router port :type lrouter_port: string :param is_gw_port: True if logical router port is gw port :type is_gw_port: bool :param if_exists: Do not fail if the lswitch port does not exist :type if_exists: bool :param lsp_address: logical switch port's addresses to set :type lsp_address: string or list of strings :returns: :class:`Command` with no result """ @abc.abstractmethod def add_acl(self, lswitch, lport, **columns): """Create an ACL for a logical port. :param lswitch: The logical switch the port is attached to. :type lswitch: string :param lport: The logical port this ACL is associated with. :type lport: string :param columns: Dictionary of ACL columns Supported columns: see ACL table in OVN_Northbound :type columns: dictionary """ @abc.abstractmethod def delete_acl(self, lswitch, lport, if_exists=True): """Delete all ACLs for a logical port. :param lswitch: The logical switch the port is attached to. :type lswitch: string :param lport: The logical port this ACL is associated with. :type lport: string :param if_exists: Do not fail if the ACL for this lport does not exist :type if_exists: bool """ @abc.abstractmethod def update_acls(self, lswitch_names, port_list, acl_new_values_dict, need_compare=True, is_add_acl=True): """Update the list of acls on logical switches with new values. :param lswitch_names: List of logical switch names :type lswitch_name: [] :param port_list: Iterator of list of ports :type port_list: [] :param acl_new_values_dict: Dictionary of acls indexed by port id :type acl_new_values_dict: {} :param need_compare: If acl_new_values_dict need compare with existing acls :type need_compare: bool :is_add_acl: If updating is caused by adding acl :type is_add_acl: bool """ @abc.abstractmethod def get_acl_by_id(self, acl_id): """Get an ACL by its ID. :param acl_id: ID of the ACL to lookup :type acl_id: string :returns The ACL row or None: """ @abc.abstractmethod def add_static_route(self, lrouter, **columns): """Add static route to logical router. :param lrouter: The unique name of the lrouter :type lrouter: string :param columns: Dictionary of static columns Supported columns: prefix, nexthop, valid :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_static_route(self, lrouter, ip_prefix, nexthop, if_exists=True): """Delete static route from logical router. :param lrouter: The unique name of the lrouter :type lrouter: string :param ip_prefix: The prefix of the static route :type ip_prefix: string :param nexthop: The nexthop of the static route :type nexthop: string :param if_exists: Do not fail if router does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def create_address_set(self, name, may_exist=True, **columns): """Create an address set :param name: The name of the address set :type name: string :param may_exist: Do not fail if address set already exists :type may_exist: bool :param columns: Dictionary of address set columns Supported columns: external_ids, addresses :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_address_set(self, name, if_exists=True): """Delete an address set :param name: The name of the address set :type name: string :param if_exists: Do not fail if the address set does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def update_address_set(self, name, addrs_add, addrs_remove, if_exists=True): """Updates addresses in an address set :param name: The name of the address set :type name: string :param addrs_add: The addresses to be added :type addrs_add: [] :param addrs_remove: The addresses to be removed :type addrs_remove: [] :param if_exists: Do not fail if the address set does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def update_address_set_ext_ids(self, name, external_ids, if_exists=True): """Update external IDs for an address set :param name: The name of the address set :type name: string :param external_ids: The external IDs for the address set :type external_ids: dict :param if_exists: Do not fail if the address set does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def get_all_chassis_gateway_bindings(self, chassis_candidate_list=None): """Return a dictionary of chassis name:list of gateways :param chassis_candidate_list: List of possible chassis candidates :type chassis_candidate_list: [] :returns: {} of chassis to routers mapping """ @abc.abstractmethod def get_gateway_chassis_binding(self, gateway_id): """Return the list of chassis to which the gateway is bound to As one gateway can be hosted by multiple chassis, this method is returning a list of those chassis ordered by priority. This means that the first element of the list is the chassis hosting the gateway with the highest priority (which will likely be where the router port is going to be active). :param gateway_id: The gateway id :type gateway_id: string :returns: a list of strings with the chassis names """ @abc.abstractmethod def get_unhosted_gateways(self, port_physnet_dict, chassis_physnets, gw_chassis): """Return a list of gateways not hosted on chassis :param port_physnet_dict: Dictionary of gateway ports and their physnet :param chassis_physnets: Dictionary of chassis and physnets :param gw_chassis: List of gateway chassis provided by admin through ovn-cms-options :returns: List of gateways not hosted on a valid chassis """ @abc.abstractmethod def add_dhcp_options(self, subnet_id, port_id=None, may_exist=True, **columns): """Adds the DHCP options specified in the @columns in DHCP_Options If the DHCP options already exist in the DHCP_Options table for the @subnet_id (and @lsp_name), updates the row, else creates a new row. :param subnet_id: The subnet id to which the DHCP options belong to :type subnet_id: string :param port_id: The port id to which the DHCP options belong to if specified :type port_id: string :param may_exist: If true, checks if the DHCP options for subnet_id exists or not. If it already exists, it updates the row with the columns specified. Else creates a new row. :type may_exist: bool :type columns: Dictionary of DHCP_Options columns Supported columns: see DHCP_Options table in OVN_Northbound :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_dhcp_options(self, row_uuid, if_exists=True): """Deletes the row in DHCP_Options with the @row_uuid :param row_uuid: The UUID of the row to be deleted. :type row_uuid: string :param if_exists: Do not fail if the DHCP_Options row does not exist :type if_exists: bool """ @abc.abstractmethod def get_subnet_dhcp_options(self, subnet_id, with_ports=False): """Returns the Subnet DHCP options as a dictionary :param subnet_id: The subnet id whose DHCP options are returned :type subnet_id: string :param with_ports: If True, also returns the ports DHCP options. :type with_ports: bool :returns: Returns a dictionary containing two keys: subnet and ports. """ @abc.abstractmethod def get_subnets_dhcp_options(self, subnet_ids): """Returns the Subnets DHCP options as list of dictionary :param subnet_ids: The subnet ids whose DHCP options are returned :type subnet_ids: list of string :returns: Returns the columns of the DHCP_Options as list of dictionary. Empty list is returned if no DHCP_Options matched found. """ @abc.abstractmethod def get_address_sets(self): """Gets all address sets in the OVN_Northbound DB :returns: dictionary indexed by name, DB columns as values """ @abc.abstractmethod def get_port_groups(self): """Gets all port groups in the OVN_Northbound DB :returns: dictionary indexed by name, DB columns as values """ @abc.abstractmethod def get_router_port_options(self, lsp_name): """Get options set for lsp of type router :returns: router port options """ @abc.abstractmethod def add_nat_rule_in_lrouter(self, lrouter, **columns): """Add NAT rule in logical router :param lrouter: The unique name of the lrouter :type lrouter: string :param columns: Dictionary of nat columns Supported columns: type, logical_ip, external_ip :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def delete_nat_rule_in_lrouter(self, lrouter, type, logical_ip, external_ip, if_exists=True): """Delete NAT rule in logical router :param lrouter: The unique name of the lrouter :type lrouter: string :param type: Type of nat. Supported values are 'snat', 'dnat' and 'dnat_and_snat' :type type: string :param logical_ip: IP or network that needs to be natted :type logical_ip: string :param external_ip: External IP to be used for nat :type external_ip: string :param if_exists: Do not fail if the Logical_Router row does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def get_lrouter_nat_rules(self, lrouter): """Returns the nat rules of a router :param lrouter: The unique name of the router :type lrouter: string :returns: A list of nat rules of the router, with each item as a dict with the keys - 'external_ip', 'logical_ip' 'type' and 'uuid' of the row. """ @abc.abstractmethod def set_nat_rule_in_lrouter(self, lrouter, nat_rule_uuid, **columns): """Sets the NAT rule fields :param lrouter: The unique name of the router to which this the NAT rule belongs to. :type lrouter: string :param nat_rule_uuid: The uuid of the NAT rule row to be updated. :type nat_rule_uuid: string :type columns: dictionary :returns: :class:`Command` with no result """ @abc.abstractmethod def get_lswitch(self, lswitch_name): """Returns the logical switch :param lswitch_name: The unique name of the logical switch :type lswitch_name: string :returns: Returns logical switch or None """ @abc.abstractmethod def get_ls_and_dns_record(self, lswitch_name): """Returns the logical switch and 'dns' records :param lswitch_name: The unique name of the logical switch :type lswitch_name: string :returns: Returns logical switch and dns records as a tuple """ @abc.abstractmethod def get_floatingip(self, fip_id): """Get a Floating IP by its ID :param fip_id: The floating IP id :type fip_id: string :returns: The NAT rule row or None """ @abc.abstractmethod def get_floatingip_by_ips(self, router_id, logical_ip, external_ip): """Get a Floating IP based on it's logical and external IPs. DEPRECATED. In the Rocky release of OpenStack this method can be removed and get_floatingip() should be used instead. This method is a backward compatibility layer for the Pike -> Queens release. :param router_id: The ID of the router to which the FIP belongs to. :type lrouter: string :param logical_ip: The FIP's logical IP address :type logical_ip: string :param external_ip: The FIP's external IP address :type external_ip: string :returns: The NAT rule row or None """ def check_revision_number(self, name, resource, resource_type, if_exists=True): """Compare the revision number from Neutron and OVN. Check if the revision number in OVN is lower than the one from the Neutron resource, otherwise raise RevisionConflict and abort the transaction. :param name: The unique name of the resource :type name: string :param resource: The neutron resource object :type resource: dictionary :param resource_type: The resource object type :type resource_type: dictionary :param if_exists: Do not fail if resource does not exist :type if_exists: bool :returns: :class:`Command` with no result :raise: RevisionConflict if the revision number in OVN is equal or higher than the neutron object """ @abc.abstractmethod def get_lswitch_port(self, lsp_name): """Get a Logical Switch Port by its name. :param lsp_name: The Logical Switch Port name :type lsp_name: string :returns: The Logical Switch Port row or None """ @abc.abstractmethod def get_lrouter(self, lrouter_name): """Get a Logical Router by its name :param lrouter_name: The name of the logical router :type lrouter_name: string :returns: The Logical_Router row or None """ @abc.abstractmethod def delete_lrouter_ext_gw(self, lrouter_name): """Delete Logical Router external gateway. :param lrouter_name: The name of the logical router :type lrouter_name: string :returns: :class:`Command` with no result """ @abc.abstractmethod def get_address_set(self, addrset_id, ip_version='ip4'): """Get a Address Set by its ID. :param addrset_id: The Address Set ID :type addrset_id: string :param ip_version: Either "ip4" or "ip6". Defaults to "ip4" :type addr_name: string :returns: The Address Set row or None """ @abc.abstractmethod def set_lswitch_port_to_virtual_type(self, lport_name, vip, virtual_parent, if_exists=True): """Set the type of a given port to "virtual". Set the type of a given port to "virtual" and all its related options. :param lport_name: The name of the lport :type lport_name: string :param vip: The virtual ip :type vip: string :param virtual_parent: The name of the parent lport :type virtual_parent: string :param if_exists: Do not fail if lport does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @abc.abstractmethod def unset_lswitch_port_to_virtual_type(self, lport_name, virtual_parent, if_exists=True): """Unset the type of a given port from "virtual". Unset the type of a given port from "virtual" and all its related options. :param lport_name: The name of the lport :type lport_name: string :param virtual_parent: The name of the parent lport :type virtual_parent: string :param if_exists: Do not fail if lport does not exist :type if_exists: bool :returns: :class:`Command` with no result """ @six.add_metaclass(abc.ABCMeta) class SbAPI(api.API): @abc.abstractmethod def chassis_exists(self, hostname): """Test if chassis for given hostname exists. @param hostname: The hostname of the chassis @type hostname: string :returns: True if the chassis exists, else False. """ @abc.abstractmethod def get_chassis_hostname_and_physnets(self): """Return a dict contains hostname and physnets mapping. Hostname will be dict key, and a list of physnets will be dict value. And hostname and physnets are related to the same host. """ def get_gateway_chassis_from_cms_options(self): """Get chassis eligible for external connectivity from CMS options. When admin wants to enable router gateway on few chassis, he would set the external_ids as ovs-vsctl set open . external_ids:ovn-cms-options="enable-chassis-as-gw" In this function, we parse ovn-cms-options and return these chassis :returns: List with chassis names. """ @abc.abstractmethod def get_chassis_and_physnets(self): """Return a dict contains chassis name and physnets mapping. Chassis name will be dict key, and a list of physnets will be dict value. And chassis name and physnets are related to the same chassis. """ @abc.abstractmethod def get_all_chassis(self, chassis_type=None): """Return a list of all chassis which match the compute_type :param chassis_type: The type of chassis :type chassis_type: string """ @abc.abstractmethod def get_chassis_data_for_ml2_bind_port(self, hostname): """Return chassis data for ML2 port binding. @param hostname: The hostname of the chassis @type hostname: string :returns: Tuple containing the chassis datapath type, iface types and physical networks for the OVN bridge mappings. :raises: RuntimeError exception if an OVN chassis does not exist. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py0000644000175000017500000013055100000000000031054 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from ovsdbapp.backend.ovs_idl import command from ovsdbapp.backend.ovs_idl import idlutils from neutron._i18n import _ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import exceptions as ovn_exc from neutron.common.ovn import utils RESOURCE_TYPE_MAP = { ovn_const.TYPE_NETWORKS: 'Logical_Switch', ovn_const.TYPE_PORTS: 'Logical_Switch_Port', ovn_const.TYPE_ROUTERS: 'Logical_Router', ovn_const.TYPE_ROUTER_PORTS: 'Logical_Router_Port', ovn_const.TYPE_FLOATINGIPS: 'NAT', ovn_const.TYPE_SUBNETS: 'DHCP_Options', } def _addvalue_to_list(row, column, new_value): row.addvalue(column, new_value) def _delvalue_from_list(row, column, old_value): row.delvalue(column, old_value) def _updatevalues_in_list(row, column, new_values=None, old_values=None): new_values = new_values or [] old_values = old_values or [] for new_value in new_values: row.addvalue(column, new_value) for old_value in old_values: row.delvalue(column, old_value) def get_lsp_dhcp_options_uuids(lsp, lsp_name): # Get dhcpv4_options and dhcpv6_options uuids from Logical_Switch_Port, # which are references of port dhcp options in DHCP_Options table. uuids = set() for dhcp_opts in getattr(lsp, 'dhcpv4_options', []): external_ids = getattr(dhcp_opts, 'external_ids', {}) if external_ids.get('port_id') == lsp_name: uuids.add(dhcp_opts.uuid) for dhcp_opts in getattr(lsp, 'dhcpv6_options', []): external_ids = getattr(dhcp_opts, 'external_ids', {}) if external_ids.get('port_id') == lsp_name: uuids.add(dhcp_opts.uuid) return uuids def _add_gateway_chassis(api, txn, lrp_name, val): gateway_chassis = api._tables.get('Gateway_Chassis') if gateway_chassis: prio = len(val) uuid_list = [] for chassis in val: gwc_name = '%s_%s' % (lrp_name, chassis) try: gwc = idlutils.row_by_value(api.idl, 'Gateway_Chassis', 'name', gwc_name) except idlutils.RowNotFound: gwc = txn.insert(gateway_chassis) gwc.name = gwc_name gwc.chassis_name = chassis gwc.priority = prio prio = prio - 1 uuid_list.append(gwc.uuid) return 'gateway_chassis', uuid_list else: chassis = {ovn_const.OVN_GATEWAY_CHASSIS_KEY: val[0]} return 'options', chassis class CheckLivenessCommand(command.BaseCommand): def __init__(self, api): super(CheckLivenessCommand, self).__init__(api) def run_idl(self, txn): # txn.pre_commit responsible for updating nb_global.nb_cfg, but # python-ovs will not update nb_cfg if no other changes are made self.api.nb_global.setkey('external_ids', ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY, str(timeutils.utcnow(with_timezone=True))) self.result = self.api.nb_global.nb_cfg class AddLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, lswitch, may_exist, **columns): super(AddLSwitchPortCommand, self).__init__(api) self.lport = lport self.lswitch = lswitch self.may_exist = may_exist self.columns = columns def run_idl(self, txn): try: lswitch = idlutils.row_by_value(self.api.idl, 'Logical_Switch', 'name', self.lswitch) except idlutils.RowNotFound: msg = _("Logical Switch %s does not exist") % self.lswitch raise RuntimeError(msg) if self.may_exist: port = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lport, None) if port: return port = txn.insert(self.api._tables['Logical_Switch_Port']) port.name = self.lport dhcpv4_options = self.columns.pop('dhcpv4_options', []) if isinstance(dhcpv4_options, list): port.dhcpv4_options = dhcpv4_options else: port.dhcpv4_options = [dhcpv4_options.result] dhcpv6_options = self.columns.pop('dhcpv6_options', []) if isinstance(dhcpv6_options, list): port.dhcpv6_options = dhcpv6_options else: port.dhcpv6_options = [dhcpv6_options.result] for col, val in self.columns.items(): setattr(port, col, val) # add the newly created port to existing lswitch _addvalue_to_list(lswitch, 'ports', port.uuid) self.result = port.uuid def post_commit(self, txn): self.result = txn.get_insert_uuid(self.result) class SetLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, if_exists, **columns): super(SetLSwitchPortCommand, self).__init__(api) self.lport = lport self.columns = columns self.if_exists = if_exists def run_idl(self, txn): try: port = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lport) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Switch Port %s does not exist") % self.lport raise RuntimeError(msg) # Delete DHCP_Options records no longer referred by this port. # The table rows should be consistent for the same transaction. # After we get DHCP_Options rows uuids from port dhcpv4_options # and dhcpv6_options references, the rows shouldn't disappear for # this transaction before we delete it. cur_port_dhcp_opts = get_lsp_dhcp_options_uuids( port, self.lport) new_port_dhcp_opts = set() dhcpv4_options = self.columns.pop('dhcpv4_options', None) if dhcpv4_options is None: new_port_dhcp_opts.update([option.uuid for option in getattr(port, 'dhcpv4_options', [])]) elif isinstance(dhcpv4_options, list): new_port_dhcp_opts.update(dhcpv4_options) port.dhcpv4_options = dhcpv4_options else: new_port_dhcp_opts.add(dhcpv4_options.result) port.dhcpv4_options = [dhcpv4_options.result] dhcpv6_options = self.columns.pop('dhcpv6_options', None) if dhcpv6_options is None: new_port_dhcp_opts.update([option.uuid for option in getattr(port, 'dhcpv6_options', [])]) elif isinstance(dhcpv6_options, list): new_port_dhcp_opts.update(dhcpv6_options) port.dhcpv6_options = dhcpv6_options else: new_port_dhcp_opts.add(dhcpv6_options.result) port.dhcpv6_options = [dhcpv6_options.result] for uuid in cur_port_dhcp_opts - new_port_dhcp_opts: self.api._tables['DHCP_Options'].rows[uuid].delete() for col, val in self.columns.items(): setattr(port, col, val) class DelLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, lswitch, if_exists): super(DelLSwitchPortCommand, self).__init__(api) self.lport = lport self.lswitch = lswitch self.if_exists = if_exists def run_idl(self, txn): try: lport = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lport) lswitch = idlutils.row_by_value(self.api.idl, 'Logical_Switch', 'name', self.lswitch) except idlutils.RowNotFound: if self.if_exists: return msg = _("Port %s does not exist") % self.lport raise RuntimeError(msg) # Delete DHCP_Options records no longer referred by this port. cur_port_dhcp_opts = get_lsp_dhcp_options_uuids( lport, self.lport) for uuid in cur_port_dhcp_opts: self.api._tables['DHCP_Options'].rows[uuid].delete() _delvalue_from_list(lswitch, 'ports', lport) self.api._tables['Logical_Switch_Port'].rows[lport.uuid].delete() class AddLRouterCommand(command.BaseCommand): def __init__(self, api, name, may_exist, **columns): super(AddLRouterCommand, self).__init__(api) self.name = name self.columns = columns self.may_exist = may_exist def run_idl(self, txn): if self.may_exist: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.name, None) if lrouter: return row = txn.insert(self.api._tables['Logical_Router']) row.name = self.name for col, val in self.columns.items(): setattr(row, col, val) class UpdateLRouterCommand(command.BaseCommand): def __init__(self, api, name, if_exists, **columns): super(UpdateLRouterCommand, self).__init__(api) self.name = name self.columns = columns self.if_exists = if_exists def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.name, None) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router %s does not exist") % self.name raise RuntimeError(msg) if lrouter: for col, val in self.columns.items(): setattr(lrouter, col, val) return class DelLRouterCommand(command.BaseCommand): def __init__(self, api, name, if_exists): super(DelLRouterCommand, self).__init__(api) self.name = name self.if_exists = if_exists def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router %s does not exist") % self.name raise RuntimeError(msg) self.api._tables['Logical_Router'].rows[lrouter.uuid].delete() class AddLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, lrouter, may_exist, **columns): super(AddLRouterPortCommand, self).__init__(api) self.name = name self.lrouter = lrouter self.may_exist = may_exist self.columns = columns def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) try: idlutils.row_by_value(self.api.idl, 'Logical_Router_Port', 'name', self.name) if self.may_exist: return # The LRP entry with certain name has already exist, raise an # exception to notice caller. It's caller's responsibility to # call UpdateLRouterPortCommand to get LRP entry processed # correctly. msg = _("Logical Router Port with name \"%s\" " "already exists.") % self.name raise RuntimeError(msg) except idlutils.RowNotFound: lrouter_port = txn.insert(self.api._tables['Logical_Router_Port']) lrouter_port.name = self.name for col, val in self.columns.items(): if col == 'gateway_chassis': col, val = _add_gateway_chassis(self.api, txn, self.name, val) setattr(lrouter_port, col, val) _addvalue_to_list(lrouter, 'ports', lrouter_port) class UpdateLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, if_exists, **columns): super(UpdateLRouterPortCommand, self).__init__(api) self.name = name self.columns = columns self.if_exists = if_exists def run_idl(self, txn): try: lrouter_port = idlutils.row_by_value(self.api.idl, 'Logical_Router_Port', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router Port %s does not exist") % self.name raise RuntimeError(msg) # TODO(lucasagomes): Remove this check once we drop the support # for OVS versions <= 2.8 ipv6_ra_configs_supported = self.api.is_col_present( 'Logical_Router_Port', 'ipv6_ra_configs') for col, val in self.columns.items(): if col == 'ipv6_ra_configs' and not ipv6_ra_configs_supported: continue if col == 'gateway_chassis': col, val = _add_gateway_chassis(self.api, txn, self.name, val) setattr(lrouter_port, col, val) class DelLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, lrouter, if_exists): super(DelLRouterPortCommand, self).__init__(api) self.name = name self.lrouter = lrouter self.if_exists = if_exists def run_idl(self, txn): try: lrouter_port = idlutils.row_by_value(self.api.idl, 'Logical_Router_Port', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router Port %s does not exist") % self.name raise RuntimeError(msg) try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) _delvalue_from_list(lrouter, 'ports', lrouter_port) lrouter_port.delete() class SetLRouterPortInLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lswitch_port, lrouter_port, is_gw_port, if_exists, lsp_address): super(SetLRouterPortInLSwitchPortCommand, self).__init__(api) self.lswitch_port = lswitch_port self.lrouter_port = lrouter_port self.is_gw_port = is_gw_port self.if_exists = if_exists self.lsp_address = lsp_address def run_idl(self, txn): try: port = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lswitch_port) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Switch Port %s does not " "exist") % self.lswitch_port raise RuntimeError(msg) options = {'router-port': self.lrouter_port} if self.is_gw_port: options[ovn_const.OVN_GATEWAY_NAT_ADDRESSES_KEY] = 'router' setattr(port, 'options', options) setattr(port, 'type', 'router') setattr(port, 'addresses', self.lsp_address) class AddACLCommand(command.BaseCommand): def __init__(self, api, lswitch, lport, **columns): super(AddACLCommand, self).__init__(api) self.lswitch = lswitch self.lport = lport self.columns = columns def run_idl(self, txn): try: lswitch = idlutils.row_by_value(self.api.idl, 'Logical_Switch', 'name', self.lswitch) except idlutils.RowNotFound: msg = _("Logical Switch %s does not exist") % self.lswitch raise RuntimeError(msg) row = txn.insert(self.api._tables['ACL']) for col, val in self.columns.items(): setattr(row, col, val) _addvalue_to_list(lswitch, 'acls', row.uuid) class DelACLCommand(command.BaseCommand): def __init__(self, api, lswitch, lport, if_exists): super(DelACLCommand, self).__init__(api) self.lswitch = lswitch self.lport = lport self.if_exists = if_exists def run_idl(self, txn): try: lswitch = idlutils.row_by_value(self.api.idl, 'Logical_Switch', 'name', self.lswitch) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Switch %s does not exist") % self.lswitch raise RuntimeError(msg) acls_to_del = [] acls = getattr(lswitch, 'acls', []) for acl in acls: ext_ids = getattr(acl, 'external_ids', {}) if ext_ids.get('neutron:lport') == self.lport: acls_to_del.append(acl) for acl in acls_to_del: acl.delete() _updatevalues_in_list(lswitch, 'acls', old_values=acls_to_del) class UpdateACLsCommand(command.BaseCommand): def __init__(self, api, lswitch_names, port_list, acl_new_values_dict, need_compare=True, is_add_acl=True): """This command updates the acl list for the logical switches @param lswitch_names: List of Logical Switch Names @type lswitch_names: [] @param port_list: Iterator of List of Ports @type port_list: [] @param acl_new_values_dict: Dictionary of acls indexed by port id @type acl_new_values_dict: {} @need_compare: If acl_new_values_dict needs be compared with existing acls. @type: Boolean. @is_add_acl: If updating is caused by acl adding action. @type: Boolean. """ super(UpdateACLsCommand, self).__init__(api) self.lswitch_names = lswitch_names self.port_list = port_list self.acl_new_values_dict = acl_new_values_dict self.need_compare = need_compare self.is_add_acl = is_add_acl def _acl_list_sub(self, acl_list1, acl_list2): """Compute the elements in acl_list1 but not in acl_list2. If acl_list1 and acl_list2 were sets, the result of this routine could be thought of as acl_list1 - acl_list2. Note that acl_list1 and acl_list2 cannot actually be sets as they contain dictionary items i.e. set([{'a':1}) doesn't work. """ acl_diff = [] for acl in acl_list1: if acl not in acl_list2: acl_diff.append(acl) return acl_diff def _compute_acl_differences(self, port_list, acl_old_values_dict, acl_new_values_dict, acl_obj_dict): """Compute the difference between the new and old sets of acls @param port_list: Iterator of a List of ports @type port_list: [] @param acl_old_values_dict: Dictionary of old acl values indexed by port id @param acl_new_values_dict: Dictionary of new acl values indexed by port id @param acl_obj_dict: Dictionary of acl objects indexed by the acl value in string format. @var acl_del_objs_dict: Dictionary of acl objects to be deleted indexed by the lswitch. @var acl_add_values_dict: Dictionary of acl values to be added indexed by the lswitch. @return: (acl_del_objs_dict, acl_add_values_dict) @rtype: ({}, {}) """ acl_del_objs_dict = {} acl_add_values_dict = {} for port in port_list: lswitch_name = port['network_id'] acls_old = acl_old_values_dict.get(port['id'], []) acls_new = acl_new_values_dict.get(port['id'], []) acls_del = self._acl_list_sub(acls_old, acls_new) acls_add = self._acl_list_sub(acls_new, acls_old) acl_del_objs = acl_del_objs_dict.setdefault(lswitch_name, []) for acl in acls_del: acl_del_objs.append(acl_obj_dict[str(acl)]) acl_add_values = acl_add_values_dict.setdefault(lswitch_name, []) for acl in acls_add: # Remove lport and lswitch columns del acl['lswitch'] del acl['lport'] acl_add_values.append(acl) return acl_del_objs_dict, acl_add_values_dict def _get_update_data_without_compare(self): lswitch_ovsdb_dict = {} for switch_name in self.lswitch_names: switch_name = utils.ovn_name(switch_name) lswitch = idlutils.row_by_value(self.api.idl, 'Logical_Switch', 'name', switch_name) lswitch_ovsdb_dict[switch_name] = lswitch if self.is_add_acl: acl_add_values_dict = {} for port in self.port_list: switch_name = utils.ovn_name(port['network_id']) if switch_name not in acl_add_values_dict: acl_add_values_dict[switch_name] = [] if port['id'] in self.acl_new_values_dict: acl_add_values_dict[switch_name].append( self.acl_new_values_dict[port['id']]) acl_del_objs_dict = {} else: acl_add_values_dict = {} acl_del_objs_dict = {} del_acl_extids = [] for acl_dict in self.acl_new_values_dict.values(): del_acl_extids.append({acl_dict['match']: acl_dict['external_ids']}) for switch_name, lswitch in lswitch_ovsdb_dict.items(): if switch_name not in acl_del_objs_dict: acl_del_objs_dict[switch_name] = [] acls = getattr(lswitch, 'acls', []) for acl in acls: match = getattr(acl, 'match') acl_extids = {match: getattr(acl, 'external_ids')} if acl_extids in del_acl_extids: acl_del_objs_dict[switch_name].append(acl) return lswitch_ovsdb_dict, acl_del_objs_dict, acl_add_values_dict def run_idl(self, txn): if self.need_compare: # Get all relevant ACLs in 1 shot acl_values_dict, acl_obj_dict, lswitch_ovsdb_dict = ( self.api.get_acls_for_lswitches(self.lswitch_names)) # Compute the difference between the new and old set of ACLs acl_del_objs_dict, acl_add_values_dict = ( self._compute_acl_differences( self.port_list, acl_values_dict, self.acl_new_values_dict, acl_obj_dict)) else: lswitch_ovsdb_dict, acl_del_objs_dict, acl_add_values_dict = ( self._get_update_data_without_compare()) for lswitch_name, lswitch in lswitch_ovsdb_dict.items(): acl_del_objs = acl_del_objs_dict.get(lswitch_name, []) acl_add_values = acl_add_values_dict.get(lswitch_name, []) # Continue if no ACLs to add or delete. if not acl_del_objs and not acl_add_values: continue # Delete old ACLs. if acl_del_objs: for acl_del_obj in acl_del_objs: try: acl_del_obj.delete() except AssertionError: # If we try to delete a row twice, just continue pass # Add new ACLs. acl_add_objs = None if acl_add_values: acl_add_objs = [] for acl_value in acl_add_values: row = txn.insert(self.api._tables['ACL']) for col, val in acl_value.items(): setattr(row, col, val) acl_add_objs.append(row.uuid) # Update logical switch ACLs. _updatevalues_in_list(lswitch, 'acls', new_values=acl_add_objs, old_values=acl_del_objs) class AddStaticRouteCommand(command.BaseCommand): def __init__(self, api, lrouter, **columns): super(AddStaticRouteCommand, self).__init__(api) self.lrouter = lrouter self.columns = columns def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) row = txn.insert(self.api._tables['Logical_Router_Static_Route']) for col, val in self.columns.items(): setattr(row, col, val) _addvalue_to_list(lrouter, 'static_routes', row.uuid) class DelStaticRouteCommand(command.BaseCommand): def __init__(self, api, lrouter, ip_prefix, nexthop, if_exists): super(DelStaticRouteCommand, self).__init__(api) self.lrouter = lrouter self.ip_prefix = ip_prefix self.nexthop = nexthop self.if_exists = if_exists def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) static_routes = getattr(lrouter, 'static_routes', []) for route in static_routes: ip_prefix = getattr(route, 'ip_prefix', '') nexthop = getattr(route, 'nexthop', '') if self.ip_prefix == ip_prefix and self.nexthop == nexthop: _delvalue_from_list(lrouter, 'static_routes', route) route.delete() break class AddAddrSetCommand(command.BaseCommand): def __init__(self, api, name, may_exist, **columns): super(AddAddrSetCommand, self).__init__(api) self.name = name self.columns = columns self.may_exist = may_exist def run_idl(self, txn): if self.may_exist: addrset = idlutils.row_by_value(self.api.idl, 'Address_Set', 'name', self.name, None) if addrset: return row = txn.insert(self.api._tables['Address_Set']) row.name = self.name for col, val in self.columns.items(): setattr(row, col, val) class DelAddrSetCommand(command.BaseCommand): def __init__(self, api, name, if_exists): super(DelAddrSetCommand, self).__init__(api) self.name = name self.if_exists = if_exists def run_idl(self, txn): try: addrset = idlutils.row_by_value(self.api.idl, 'Address_Set', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Address set %s does not exist. " "Can't delete.") % self.name raise RuntimeError(msg) self.api._tables['Address_Set'].rows[addrset.uuid].delete() class UpdateAddrSetCommand(command.BaseCommand): def __init__(self, api, name, addrs_add, addrs_remove, if_exists): super(UpdateAddrSetCommand, self).__init__(api) self.name = name self.addrs_add = addrs_add self.addrs_remove = addrs_remove self.if_exists = if_exists def run_idl(self, txn): try: addrset = idlutils.row_by_value(self.api.idl, 'Address_Set', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Address set %s does not exist. " "Can't update addresses") % self.name raise RuntimeError(msg) _updatevalues_in_list( addrset, 'addresses', new_values=self.addrs_add, old_values=self.addrs_remove) class UpdateAddrSetExtIdsCommand(command.BaseCommand): def __init__(self, api, name, external_ids, if_exists): super(UpdateAddrSetExtIdsCommand, self).__init__(api) self.name = name self.external_ids = external_ids self.if_exists = if_exists def run_idl(self, txn): try: addrset = idlutils.row_by_value(self.api.idl, 'Address_Set', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Address set %s does not exist. " "Can't update external IDs") % self.name raise RuntimeError(msg) addrset.verify('external_ids') addrset_external_ids = getattr(addrset, 'external_ids', {}) for ext_id_key, ext_id_value in self.external_ids.items(): addrset_external_ids[ext_id_key] = ext_id_value addrset.external_ids = addrset_external_ids class UpdateChassisExtIdsCommand(command.BaseCommand): def __init__(self, api, name, external_ids, if_exists): super(UpdateChassisExtIdsCommand, self).__init__(api) self.name = name self.external_ids = external_ids self.if_exists = if_exists def run_idl(self, txn): try: chassis = idlutils.row_by_value(self.api.idl, 'Chassis', 'name', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Chassis %s does not exist. " "Can't update external IDs") % self.name raise RuntimeError(msg) chassis.verify('external_ids') chassis_external_ids = getattr(chassis, 'external_ids', {}) for ext_id_key, ext_id_value in self.external_ids.items(): chassis_external_ids[ext_id_key] = ext_id_value chassis.external_ids = chassis_external_ids class UpdatePortBindingExtIdsCommand(command.BaseCommand): def __init__(self, api, name, external_ids, if_exists): super(UpdatePortBindingExtIdsCommand, self).__init__(api) self.name = name self.external_ids = external_ids self.if_exists = if_exists def run_idl(self, txn): try: port = idlutils.row_by_value(self.api.idl, 'Port_Binding', 'logical_port', self.name) except idlutils.RowNotFound: if self.if_exists: return msg = _("Port %s does not exist. " "Can't update external IDs") % self.name raise RuntimeError(msg) port.verify('external_ids') port_external_ids = getattr(port, 'external_ids', {}) for ext_id_key, ext_id_value in self.external_ids.items(): port_external_ids[ext_id_key] = ext_id_value port.external_ids = port_external_ids class AddDHCPOptionsCommand(command.BaseCommand): def __init__(self, api, subnet_id, port_id=None, may_exist=True, **columns): super(AddDHCPOptionsCommand, self).__init__(api) self.columns = columns self.may_exist = may_exist self.subnet_id = subnet_id self.port_id = port_id self.new_insert = False def _get_dhcp_options_row(self): for row in self.api._tables['DHCP_Options'].rows.values(): external_ids = getattr(row, 'external_ids', {}) port_id = external_ids.get('port_id') if self.subnet_id == external_ids.get('subnet_id'): if self.port_id == port_id: return row def run_idl(self, txn): row = None if self.may_exist: row = self._get_dhcp_options_row() if not row: row = txn.insert(self.api._tables['DHCP_Options']) self.new_insert = True for col, val in self.columns.items(): setattr(row, col, val) self.result = row.uuid def post_commit(self, txn): # Update the result with inserted uuid for new inserted row, or the # uuid get in run_idl should be real uuid already. if self.new_insert: self.result = txn.get_insert_uuid(self.result) class DelDHCPOptionsCommand(command.BaseCommand): def __init__(self, api, row_uuid, if_exists=True): super(DelDHCPOptionsCommand, self).__init__(api) self.if_exists = if_exists self.row_uuid = row_uuid def run_idl(self, txn): if self.row_uuid not in self.api._tables['DHCP_Options'].rows: if self.if_exists: return msg = _("DHCP Options row %s does not exist") % self.row_uuid raise RuntimeError(msg) self.api._tables['DHCP_Options'].rows[self.row_uuid].delete() class AddNATRuleInLRouterCommand(command.BaseCommand): # TODO(chandrav): Add unit tests, bug #1638715. def __init__(self, api, lrouter, **columns): super(AddNATRuleInLRouterCommand, self).__init__(api) self.lrouter = lrouter self.columns = columns def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) row = txn.insert(self.api._tables['NAT']) for col, val in self.columns.items(): setattr(row, col, val) # TODO(chandrav): convert this to ovs transaction mutate lrouter.verify('nat') nat = getattr(lrouter, 'nat', []) nat.append(row.uuid) setattr(lrouter, 'nat', nat) class DeleteNATRuleInLRouterCommand(command.BaseCommand): # TODO(chandrav): Add unit tests, bug #1638715. def __init__(self, api, lrouter, type, logical_ip, external_ip, if_exists): super(DeleteNATRuleInLRouterCommand, self).__init__(api) self.lrouter = lrouter self.type = type self.logical_ip = logical_ip self.external_ip = external_ip self.if_exists = if_exists def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) lrouter.verify('nat') # TODO(chandrav): convert this to ovs transaction mutate nats = getattr(lrouter, 'nat', []) for nat in nats: type = getattr(nat, 'type', '') external_ip = getattr(nat, 'external_ip', '') logical_ip = getattr(nat, 'logical_ip', '') if (self.type == type and self.external_ip == external_ip and self.logical_ip == logical_ip): nats.remove(nat) nat.delete() break setattr(lrouter, 'nat', nats) class SetNATRuleInLRouterCommand(command.BaseCommand): def __init__(self, api, lrouter, nat_rule_uuid, **columns): super(SetNATRuleInLRouterCommand, self).__init__(api) self.lrouter = lrouter self.nat_rule_uuid = nat_rule_uuid self.columns = columns def run_idl(self, txn): try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) lrouter.verify('nat') nat_rules = getattr(lrouter, 'nat', []) for nat_rule in nat_rules: if nat_rule.uuid == self.nat_rule_uuid: for col, val in self.columns.items(): setattr(nat_rule, col, val) break class CheckRevisionNumberCommand(command.BaseCommand): def __init__(self, api, name, resource, resource_type, if_exists): super(CheckRevisionNumberCommand, self).__init__(api) self.name = name self.resource = resource self.resource_type = resource_type self.if_exists = if_exists def _get_floatingip(self): # TODO(lucasagomes): We can't use self.api.lookup() because that # method does not introspect map type columns. We could either: # 1. Enhance it to look into maps or, 2. Add a new ``name`` column # to the NAT table so that we can use lookup() just like we do # for other resources for nat in self.api._tables['NAT'].rows.values(): if nat.type != 'dnat_and_snat': continue ext_ids = getattr(nat, 'external_ids', {}) if ext_ids.get(ovn_const.OVN_FIP_EXT_ID_KEY) == self.name: return nat raise idlutils.RowNotFound( table='NAT', col='external_ids', match=self.name) def _get_subnet(self): for dhcp in self.api._tables['DHCP_Options'].rows.values(): ext_ids = getattr(dhcp, 'external_ids', {}) # Ignore ports DHCP Options if ext_ids.get('port_id'): continue if ext_ids.get('subnet_id') == self.name: return dhcp raise idlutils.RowNotFound( table='DHCP_Options', col='external_ids', match=self.name) def run_idl(self, txn): try: ovn_table = RESOURCE_TYPE_MAP[self.resource_type] # TODO(lucasagomes): After OVS 2.8.2 is released all tables should # have the external_ids column. We can remove this conditional # here by then. if not self.api.is_col_present(ovn_table, 'external_ids'): return ovn_resource = None if self.resource_type == ovn_const.TYPE_FLOATINGIPS: ovn_resource = self._get_floatingip() elif self.resource_type == ovn_const.TYPE_SUBNETS: ovn_resource = self._get_subnet() else: ovn_resource = self.api.lookup(ovn_table, self.name) except idlutils.RowNotFound: if self.if_exists: return msg = (_('Failed to check the revision number for %s: Resource ' 'does not exist') % self.name) raise RuntimeError(msg) external_ids = getattr(ovn_resource, 'external_ids', {}) ovn_revision = int(external_ids.get( ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1)) neutron_revision = utils.get_revision_number(self.resource, self.resource_type) if ovn_revision > neutron_revision: raise ovn_exc.RevisionConflict( resource_id=self.name, resource_type=self.resource_type) ovn_resource.verify('external_ids') ovn_resource.setkey('external_ids', ovn_const.OVN_REV_NUM_EXT_ID_KEY, str(neutron_revision)) def post_commit(self, txn): self.result = ovn_const.TXN_COMMITTED class DeleteLRouterExtGwCommand(command.BaseCommand): def __init__(self, api, lrouter, if_exists): super(DeleteLRouterExtGwCommand, self).__init__(api) self.lrouter = lrouter self.if_exists = if_exists def run_idl(self, txn): # TODO(lucasagomes): Remove this check after OVS 2.8.2 is tagged # (prior to that, the external_ids column didn't exist in this # table). if not self.api.is_col_present('Logical_Router_Static_Route', 'external_ids'): return try: lrouter = idlutils.row_by_value(self.api.idl, 'Logical_Router', 'name', self.lrouter) except idlutils.RowNotFound: if self.if_exists: return msg = _("Logical Router %s does not exist") % self.lrouter raise RuntimeError(msg) lrouter.verify('static_routes') static_routes = getattr(lrouter, 'static_routes', []) for route in static_routes: external_ids = getattr(route, 'external_ids', {}) if ovn_const.OVN_ROUTER_IS_EXT_GW in external_ids: _delvalue_from_list(lrouter, 'static_routes', route) route.delete() break lrouter.verify('nat') nats = getattr(lrouter, 'nat', []) for nat in nats: if nat.type != 'snat': continue _delvalue_from_list(lrouter, 'nat', nat) nat.delete() lrouter_ext_ids = getattr(lrouter, 'external_ids', {}) gw_port_id = lrouter_ext_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY) if not gw_port_id: return try: lrouter_port = idlutils.row_by_value( self.api.idl, 'Logical_Router_Port', 'name', utils.ovn_lrouter_port_name(gw_port_id)) except idlutils.RowNotFound: return _delvalue_from_list(lrouter, 'ports', lrouter_port) class SetLSwitchPortToVirtualTypeCommand(command.BaseCommand): def __init__(self, api, lport, vip, parent, if_exists): super(SetLSwitchPortToVirtualTypeCommand, self).__init__(api) self.lport = lport self.vip = vip self.parent = parent self.if_exists = if_exists def run_idl(self, txn): try: lsp = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lport) except idlutils.RowNotFound: if self.if_exists: return msg = "Logical Switch Port %s does not exist" % self.lport raise RuntimeError(msg) options = lsp.options options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY] = self.vip virtual_parents = options.get( ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, set()) if virtual_parents: virtual_parents = set(virtual_parents.split(',')) virtual_parents.add(self.parent) options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY] = ','.join( virtual_parents) setattr(lsp, 'options', options) setattr(lsp, 'type', ovn_const.LSP_TYPE_VIRTUAL) class UnsetLSwitchPortToVirtualTypeCommand(command.BaseCommand): def __init__(self, api, lport, parent, if_exists): super(UnsetLSwitchPortToVirtualTypeCommand, self).__init__(api) self.lport = lport self.parent = parent self.if_exists = if_exists def run_idl(self, txn): try: lsp = idlutils.row_by_value(self.api.idl, 'Logical_Switch_Port', 'name', self.lport) except idlutils.RowNotFound: if self.if_exists: return msg = "Logical Switch Port %s does not exist" % self.lport raise RuntimeError(msg) options = lsp.options virtual_parents = options.get( ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, set()) if virtual_parents: virtual_parents = set(virtual_parents.split(',')) try: virtual_parents.remove(self.parent) except KeyError: pass # If virtual-parents is now empty, change the type and remove the # virtual-parents and virtual-ip options if not virtual_parents: options.pop(ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, None) options.pop(ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY, None) setattr(lsp, 'type', '') else: options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY] = ','.join( virtual_parents) setattr(lsp, 'options', options) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py0000644000175000017500000011154600000000000031731 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import uuid from neutron_lib import exceptions as n_exc from neutron_lib.utils import helpers from oslo_log import log from oslo_utils import uuidutils from ovsdbapp.backend import ovs_idl from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.backend.ovs_idl import transaction as idl_trans from ovsdbapp.backend.ovs_idl import vlog from ovsdbapp.schema.ovn_northbound import impl_idl as nb_impl_idl from ovsdbapp.schema.ovn_southbound import impl_idl as sb_impl_idl import tenacity from neutron._i18n import _ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import exceptions as ovn_exc from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as cfg from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import commands as cmd from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor LOG = log.getLogger(__name__) class OvnNbTransaction(idl_trans.Transaction): def __init__(self, *args, **kwargs): # NOTE(lucasagomes): The bump_nb_cfg parameter is only used by # the agents health status check self.bump_nb_cfg = kwargs.pop('bump_nb_cfg', False) super(OvnNbTransaction, self).__init__(*args, **kwargs) def pre_commit(self, txn): if not self.bump_nb_cfg: return self.api.nb_global.increment('nb_cfg') # This version of Backend doesn't use a class variable for ovsdb_connection # and therefor allows networking-ovn to manage connection scope on its own class Backend(ovs_idl.Backend): lookup_table = {} def __init__(self, connection): self.ovsdb_connection = connection super(Backend, self).__init__(connection) def start_connection(self, connection): try: self.ovsdb_connection.start() except Exception as e: connection_exception = OvsdbConnectionUnavailable( db_schema=self.schema, error=e) LOG.exception(connection_exception) raise connection_exception @property def idl(self): return self.ovsdb_connection.idl @property def tables(self): return self.idl.tables _tables = tables def is_table_present(self, table_name): return table_name in self._tables def is_col_present(self, table_name, col_name): return self.is_table_present(table_name) and ( col_name in self._tables[table_name].columns) def create_transaction(self, check_error=False, log_errors=True): return idl_trans.Transaction( self, self.ovsdb_connection, self.ovsdb_connection.timeout, check_error, log_errors) # Check for a column match in the table. If not found do a retry with # a stop delay of 10 secs. This function would be useful if the caller # wants to verify for the presence of a particular row in the table # with the column match before doing any transaction. # Eg. We can check if Logical_Switch row is present before adding a # logical switch port to it. @tenacity.retry(retry=tenacity.retry_if_exception_type(RuntimeError), wait=tenacity.wait_exponential(), stop=tenacity.stop_after_delay(10), reraise=True) def check_for_row_by_value_and_retry(self, table, column, match): try: idlutils.row_by_value(self.idl, table, column, match) except idlutils.RowNotFound: msg = (_("%(match)s does not exist in %(column)s of %(table)s") % {'match': match, 'column': column, 'table': table}) raise RuntimeError(msg) class OvsdbConnectionUnavailable(n_exc.ServiceUnavailable): message = _("OVS database connection to %(db_schema)s failed with error: " "'%(error)s'. Verify that the OVS and OVN services are " "available and that the 'ovn_nb_connection' and " "'ovn_sb_connection' configuration options are correct.") # Retry forever to get the OVN NB and SB IDLs. Wait 2^x * 1 seconds between # each retry, up to 'max_interval' seconds, then interval will be fixed # to 'max_interval' seconds afterwards. The default 'max_interval' is 180. def get_ovn_idls(driver, trigger, binding_events=False): @tenacity.retry( wait=tenacity.wait_exponential( max=cfg.get_ovn_ovsdb_retry_max_interval()), reraise=True) def get_ovn_idl_retry(cls): trigger_class = utils.get_method_class(trigger) LOG.info('Getting %(cls)s for %(trigger)s with retry', {'cls': cls.__name__, 'trigger': trigger_class.__name__}) return cls(get_connection(cls, trigger, driver, binding_events)) vlog.use_python_logger(max_level=cfg.get_ovn_ovsdb_log_level()) return tuple(get_ovn_idl_retry(c) for c in (OvsdbNbOvnIdl, OvsdbSbOvnIdl)) def get_connection(db_class, trigger=None, driver=None, binding_events=False): if db_class == OvsdbNbOvnIdl: args = (cfg.get_ovn_nb_connection(), 'OVN_Northbound') elif db_class == OvsdbSbOvnIdl: args = (cfg.get_ovn_sb_connection(), 'OVN_Southbound') if binding_events: if db_class == OvsdbNbOvnIdl: idl_ = ovsdb_monitor.OvnNbIdl.from_server(*args, driver=driver) else: idl_ = ovsdb_monitor.OvnSbIdl.from_server(*args, driver=driver) else: if db_class == OvsdbNbOvnIdl: idl_ = ovsdb_monitor.BaseOvnIdl.from_server(*args) else: idl_ = ovsdb_monitor.BaseOvnSbIdl.from_server(*args) return connection.Connection(idl_, timeout=cfg.get_ovn_ovsdb_timeout()) class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): def __init__(self, connection): super(OvsdbNbOvnIdl, self).__init__(connection) self.idl._session.reconnect.set_probe_interval( cfg.get_ovn_ovsdb_probe_interval()) @property def nb_global(self): return next(iter(self.tables['NB_Global'].rows.values())) def create_transaction(self, check_error=False, log_errors=True, bump_nb_cfg=False): return OvnNbTransaction( self, self.ovsdb_connection, self.ovsdb_connection.timeout, check_error, log_errors, bump_nb_cfg=bump_nb_cfg) @contextlib.contextmanager def transaction(self, *args, **kwargs): """A wrapper on the ovsdbapp transaction to work with revisions. This method is just a wrapper around the ovsdbapp transaction to handle revision conflicts correctly. """ try: with super(OvsdbNbOvnIdl, self).transaction(*args, **kwargs) as t: yield t except ovn_exc.RevisionConflict as e: LOG.info('Transaction aborted. Reason: %s', e) def create_lswitch_port(self, lport_name, lswitch_name, may_exist=True, **columns): return cmd.AddLSwitchPortCommand(self, lport_name, lswitch_name, may_exist, **columns) def set_lswitch_port(self, lport_name, if_exists=True, **columns): return cmd.SetLSwitchPortCommand(self, lport_name, if_exists, **columns) def delete_lswitch_port(self, lport_name=None, lswitch_name=None, ext_id=None, if_exists=True): if lport_name is not None: return cmd.DelLSwitchPortCommand(self, lport_name, lswitch_name, if_exists) else: raise RuntimeError(_("Currently only supports " "delete by lport-name")) def get_all_logical_switches_with_ports(self): result = [] for lswitch in self._tables['Logical_Switch'].rows.values(): if ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY not in ( lswitch.external_ids): continue ports = [] provnet_port = None for lport in getattr(lswitch, 'ports', []): if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in lport.external_ids: ports.append(lport.name) # Handle provider network port elif lport.name.startswith( ovn_const.OVN_PROVNET_PORT_NAME_PREFIX): provnet_port = lport.name result.append({'name': lswitch.name, 'ports': ports, 'provnet_port': provnet_port}) return result def get_all_logical_routers_with_rports(self): """Get logical Router ports associated with all logical Routers @return: list of dict, each dict has key-value: - 'name': string router_id in neutron. - 'static_routes': list of static routes dict. - 'ports': dict of port_id in neutron (key) and networks on port (value). - 'snats': list of snats dict - 'dnat_and_snats': list of dnat_and_snats dict """ result = [] for lrouter in self._tables['Logical_Router'].rows.values(): if ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY not in ( lrouter.external_ids): continue lrports = {lrport.name.replace('lrp-', ''): lrport.networks for lrport in getattr(lrouter, 'ports', [])} sroutes = [{'destination': sroute.ip_prefix, 'nexthop': sroute.nexthop} for sroute in getattr(lrouter, 'static_routes', [])] dnat_and_snats = [] snat = [] for nat in getattr(lrouter, 'nat', []): columns = {'logical_ip': nat.logical_ip, 'external_ip': nat.external_ip, 'type': nat.type} if nat.type == 'dnat_and_snat': if nat.external_mac: columns['external_mac'] = nat.external_mac[0] if nat.logical_port: columns['logical_port'] = nat.logical_port[0] dnat_and_snats.append(columns) elif nat.type == 'snat': snat.append(columns) result.append({'name': lrouter.name.replace('neutron-', ''), 'static_routes': sroutes, 'ports': lrports, 'snats': snat, 'dnat_and_snats': dnat_and_snats}) return result def get_acl_by_id(self, acl_id): try: return self.lookup('ACL', uuid.UUID(acl_id)) except idlutils.RowNotFound: return def get_acls_for_lswitches(self, lswitch_names): """Get the existing set of acls that belong to the logical switches @param lswitch_names: List of logical switch names @type lswitch_names: [] @var acl_values_dict: A dictionary indexed by port_id containing the list of acl values in string format that belong to that port @var acl_obj_dict: A dictionary indexed by acl value containing the corresponding acl idl object. @var lswitch_ovsdb_dict: A dictionary mapping from logical switch name to lswitch idl object @return: (acl_values_dict, acl_obj_dict, lswitch_ovsdb_dict) """ acl_values_dict = {} acl_obj_dict = {} lswitch_ovsdb_dict = {} for lswitch_name in lswitch_names: try: lswitch = idlutils.row_by_value(self.idl, 'Logical_Switch', 'name', utils.ovn_name(lswitch_name)) except idlutils.RowNotFound: # It is possible for the logical switch to be deleted # while we are searching for it by name in idl. continue lswitch_ovsdb_dict[lswitch_name] = lswitch acls = getattr(lswitch, 'acls', []) # Iterate over each acl in a lswitch and store the acl in # a key:value representation for e.g. acl_string. This # key:value representation can invoke the code - # self._ovn.add_acl(**acl_string) for acl in acls: ext_ids = getattr(acl, 'external_ids', {}) port_id = ext_ids.get('neutron:lport') acl_list = acl_values_dict.setdefault(port_id, []) acl_string = {'lport': port_id, 'lswitch': utils.ovn_name(lswitch_name)} for acl_key in getattr(acl, "_data", {}): try: acl_string[acl_key] = getattr(acl, acl_key) except AttributeError: pass acl_obj_dict[str(acl_string)] = acl acl_list.append(acl_string) return acl_values_dict, acl_obj_dict, lswitch_ovsdb_dict def create_lrouter(self, name, may_exist=True, **columns): return cmd.AddLRouterCommand(self, name, may_exist, **columns) def update_lrouter(self, name, if_exists=True, **columns): return cmd.UpdateLRouterCommand(self, name, if_exists, **columns) def delete_lrouter(self, name, if_exists=True): return cmd.DelLRouterCommand(self, name, if_exists) def add_lrouter_port(self, name, lrouter, may_exist=False, **columns): return cmd.AddLRouterPortCommand(self, name, lrouter, may_exist, **columns) def update_lrouter_port(self, name, if_exists=True, **columns): return cmd.UpdateLRouterPortCommand(self, name, if_exists, **columns) def delete_lrouter_port(self, name, lrouter, if_exists=True): return cmd.DelLRouterPortCommand(self, name, lrouter, if_exists) def set_lrouter_port_in_lswitch_port( self, lswitch_port, lrouter_port, is_gw_port=False, if_exists=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER): return cmd.SetLRouterPortInLSwitchPortCommand(self, lswitch_port, lrouter_port, is_gw_port, if_exists, lsp_address) def add_acl(self, lswitch, lport, **columns): return cmd.AddACLCommand(self, lswitch, lport, **columns) def delete_acl(self, lswitch, lport, if_exists=True): return cmd.DelACLCommand(self, lswitch, lport, if_exists) def update_acls(self, lswitch_names, port_list, acl_new_values_dict, need_compare=True, is_add_acl=True): return cmd.UpdateACLsCommand(self, lswitch_names, port_list, acl_new_values_dict, need_compare=need_compare, is_add_acl=is_add_acl) def add_static_route(self, lrouter, **columns): return cmd.AddStaticRouteCommand(self, lrouter, **columns) def delete_static_route(self, lrouter, ip_prefix, nexthop, if_exists=True): return cmd.DelStaticRouteCommand(self, lrouter, ip_prefix, nexthop, if_exists) def create_address_set(self, name, may_exist=True, **columns): return cmd.AddAddrSetCommand(self, name, may_exist, **columns) def delete_address_set(self, name, if_exists=True, **columns): return cmd.DelAddrSetCommand(self, name, if_exists) def update_address_set(self, name, addrs_add, addrs_remove, if_exists=True): return cmd.UpdateAddrSetCommand(self, name, addrs_add, addrs_remove, if_exists) def update_address_set_ext_ids(self, name, external_ids, if_exists=True): return cmd.UpdateAddrSetExtIdsCommand(self, name, external_ids, if_exists) def _get_logical_router_port_gateway_chassis(self, lrp): """Get the list of chassis hosting this gateway port. @param lrp: logical router port @type lrp: Logical_Router_Port row @return: List of tuples (chassis_name, priority) sorted by priority """ # Try retrieving gateway_chassis with new schema. If new schema is not # supported or user is using old schema, then use old schema for # getting gateway_chassis chassis = [] if self._tables.get('Gateway_Chassis'): for gwc in lrp.gateway_chassis: chassis.append((gwc.chassis_name, gwc.priority)) else: rc = lrp.options.get(ovn_const.OVN_GATEWAY_CHASSIS_KEY) if rc: chassis.append((rc, 0)) # make sure that chassis are sorted by priority return sorted(chassis, reverse=True, key=lambda x: x[1]) def get_all_chassis_gateway_bindings(self, chassis_candidate_list=None): chassis_bindings = {} for chassis_name in chassis_candidate_list or []: chassis_bindings.setdefault(chassis_name, []) for lrp in self._tables['Logical_Router_Port'].rows.values(): if not lrp.name.startswith('lrp-'): continue chassis = self._get_logical_router_port_gateway_chassis(lrp) for chassis_name, prio in chassis: if (not chassis_candidate_list or chassis_name in chassis_candidate_list): routers_hosted = chassis_bindings.setdefault(chassis_name, []) routers_hosted.append((lrp.name, prio)) return chassis_bindings def get_gateway_chassis_binding(self, gateway_name): try: lrp = idlutils.row_by_value( self.idl, 'Logical_Router_Port', 'name', gateway_name) chassis_list = self._get_logical_router_port_gateway_chassis(lrp) return [chassis for chassis, prio in chassis_list] except idlutils.RowNotFound: return [] def get_chassis_gateways(self, chassis_name): gw_chassis = self.db_find_rows( 'Gateway_Chassis', ('chassis_name', '=', chassis_name)) return gw_chassis.execute(check_error=True) def get_unhosted_gateways(self, port_physnet_dict, chassis_with_physnets, all_gw_chassis): unhosted_gateways = set() for port, physnet in port_physnet_dict.items(): lrp_name = '%s%s' % (ovn_const.LRP_PREFIX, port) original_state = self.get_gateway_chassis_binding(lrp_name) # Filter out chassis that lost physnet, the cms option, # or has been deleted. actual_gw_chassis = [ chassis for chassis in original_state if not utils.is_gateway_chassis_invalid( chassis, all_gw_chassis, physnet, chassis_with_physnets)] # Check if gw ports are fully scheduled. if len(actual_gw_chassis) >= ovn_const.MAX_GW_CHASSIS: continue # If there are no gateways with 'enable-chassis-as-gw' cms option # then try to schedule on all gateways with physnets connected, # and filter required physnet. available_chassis = { c for c in all_gw_chassis or chassis_with_physnets.keys() if not utils.is_gateway_chassis_invalid( c, all_gw_chassis, physnet, chassis_with_physnets)} if available_chassis == set(original_state): # The same situation as was before. Nothing # to be rescheduled. continue if not available_chassis: # There is no chassis that could host # this gateway. continue unhosted_gateways.add(lrp_name) return unhosted_gateways def add_dhcp_options(self, subnet_id, port_id=None, may_exist=True, **columns): return cmd.AddDHCPOptionsCommand(self, subnet_id, port_id=port_id, may_exist=may_exist, **columns) def delete_dhcp_options(self, row_uuid, if_exists=True): return cmd.DelDHCPOptionsCommand(self, row_uuid, if_exists=if_exists) def _format_dhcp_row(self, row): ext_ids = dict(getattr(row, 'external_ids', {})) return {'cidr': row.cidr, 'options': dict(row.options), 'external_ids': ext_ids, 'uuid': row.uuid} def get_subnet_dhcp_options(self, subnet_id, with_ports=False): subnet = None ports = [] for row in self._tables['DHCP_Options'].rows.values(): external_ids = getattr(row, 'external_ids', {}) if subnet_id == external_ids.get('subnet_id'): port_id = external_ids.get('port_id') if with_ports and port_id: ports.append(self._format_dhcp_row(row)) elif not port_id: subnet = self._format_dhcp_row(row) if not with_ports: break return {'subnet': subnet, 'ports': ports} def get_subnets_dhcp_options(self, subnet_ids): ret_opts = [] for row in self._tables['DHCP_Options'].rows.values(): external_ids = getattr(row, 'external_ids', {}) if (external_ids.get('subnet_id') in subnet_ids and not external_ids.get('port_id')): ret_opts.append(self._format_dhcp_row(row)) if len(ret_opts) == len(subnet_ids): break return ret_opts def get_all_dhcp_options(self): dhcp_options = {'subnets': {}, 'ports_v4': {}, 'ports_v6': {}} for row in self._tables['DHCP_Options'].rows.values(): external_ids = getattr(row, 'external_ids', {}) if not external_ids.get('subnet_id'): # This row is not created by OVN ML2 driver. Ignore it. continue if not external_ids.get('port_id'): dhcp_options['subnets'][external_ids['subnet_id']] = ( self._format_dhcp_row(row)) else: port_dict = 'ports_v6' if ':' in row.cidr else 'ports_v4' dhcp_options[port_dict][external_ids['port_id']] = ( self._format_dhcp_row(row)) return dhcp_options def get_address_sets(self): address_sets = {} for row in self._tables['Address_Set'].rows.values(): # TODO(lucasagomes): Remove OVN_SG_NAME_EXT_ID_KEY in the # Rocky release if not (ovn_const.OVN_SG_EXT_ID_KEY in row.external_ids or ovn_const.OVN_SG_NAME_EXT_ID_KEY in row.external_ids): continue name = getattr(row, 'name') data = {} for row_key in getattr(row, "_data", {}): data[row_key] = getattr(row, row_key) address_sets[name] = data return address_sets def get_router_port_options(self, lsp_name): try: lsp = idlutils.row_by_value(self.idl, 'Logical_Switch_Port', 'name', lsp_name) options = getattr(lsp, 'options') for key in list(options.keys()): if key not in ovn_const.OVN_ROUTER_PORT_OPTION_KEYS: del(options[key]) return options except idlutils.RowNotFound: return {} def add_nat_rule_in_lrouter(self, lrouter, **columns): return cmd.AddNATRuleInLRouterCommand(self, lrouter, **columns) def delete_nat_rule_in_lrouter(self, lrouter, type, logical_ip, external_ip, if_exists=True): return cmd.DeleteNATRuleInLRouterCommand(self, lrouter, type, logical_ip, external_ip, if_exists) def get_lrouter_nat_rules(self, lrouter_name): try: lrouter = idlutils.row_by_value(self.idl, 'Logical_Router', 'name', lrouter_name) except idlutils.RowNotFound: msg = _("Logical Router %s does not exist") % lrouter_name raise RuntimeError(msg) nat_rules = [] for nat_rule in getattr(lrouter, 'nat', []): ext_ids = {} # TODO(dalvarez): remove this check once the minimum OVS required # version contains the column (when OVS 2.8.2 is released). if self.is_col_present('NAT', 'external_ids'): ext_ids = dict(getattr(nat_rule, 'external_ids', {})) nat_rules.append({'external_ip': nat_rule.external_ip, 'logical_ip': nat_rule.logical_ip, 'type': nat_rule.type, 'uuid': nat_rule.uuid, 'external_ids': ext_ids}) return nat_rules def set_nat_rule_in_lrouter(self, lrouter, nat_rule_uuid, **columns): return cmd.SetNATRuleInLRouterCommand(self, lrouter, nat_rule_uuid, **columns) def get_lswitch_port(self, lsp_name): try: return self.lookup('Logical_Switch_Port', lsp_name) except idlutils.RowNotFound: return None def get_parent_port(self, lsp_name): lsp = self.get_lswitch_port(lsp_name) if not lsp: return '' return lsp.parent_name def get_lswitch(self, lswitch_name): # FIXME(lucasagomes): We should refactor those get_*() # methods. Some of 'em require the name, others IDs etc... It can # be confusing. if uuidutils.is_uuid_like(lswitch_name): lswitch_name = utils.ovn_name(lswitch_name) try: return self.lookup('Logical_Switch', lswitch_name) except idlutils.RowNotFound: return None def get_ls_and_dns_record(self, lswitch_name): ls = self.get_lswitch(lswitch_name) if not ls: return (None, None) if not hasattr(ls, 'dns_records'): return (ls, None) for dns_row in ls.dns_records: if dns_row.external_ids.get('ls_name') == lswitch_name: return (ls, dns_row) return (ls, None) def get_floatingip(self, fip_id): # TODO(dalvarez): remove this check once the minimum OVS required # version contains the column (when OVS 2.8.2 is released). if not self.is_col_present('NAT', 'external_ids'): return fip = self.db_find('NAT', ('external_ids', '=', {ovn_const.OVN_FIP_EXT_ID_KEY: fip_id})) result = fip.execute(check_error=True) return result[0] if result else None def get_floatingip_by_ips(self, router_id, logical_ip, external_ip): if not all([router_id, logical_ip, external_ip]): return for nat in self.get_lrouter_nat_rules(utils.ovn_name(router_id)): if (nat['type'] == 'dnat_and_snat' and nat['logical_ip'] == logical_ip and nat['external_ip'] == external_ip): return nat def get_address_set(self, addrset_id, ip_version='ip4'): addr_name = utils.ovn_addrset_name(addrset_id, ip_version) try: return idlutils.row_by_value(self.idl, 'Address_Set', 'name', addr_name) except idlutils.RowNotFound: return None def check_revision_number(self, name, resource, resource_type, if_exists=True): return cmd.CheckRevisionNumberCommand( self, name, resource, resource_type, if_exists) def get_lrouter(self, lrouter_name): if uuidutils.is_uuid_like(lrouter_name): lrouter_name = utils.ovn_name(lrouter_name) # TODO(lucasagomes): Use lr_get() once we start refactoring this # API to use methods from ovsdbapp. lr = self.db_find_rows('Logical_Router', ('name', '=', lrouter_name)) result = lr.execute(check_error=True) return result[0] if result else None def get_lrouter_port(self, lrp_name): # TODO(mangelajo): Implement lrp_get() ovsdbapp and use from here if uuidutils.is_uuid_like(lrp_name): lrp_name = utils.ovn_lrouter_port_name(lrp_name) lrp = self.db_find_rows('Logical_Router_Port', ('name', '=', lrp_name)) result = lrp.execute(check_error=True) return result[0] if result else None def delete_lrouter_ext_gw(self, lrouter_name, if_exists=True): return cmd.DeleteLRouterExtGwCommand(self, lrouter_name, if_exists) def is_port_groups_supported(self): return self.is_table_present('Port_Group') def get_port_group(self, pg_name): if uuidutils.is_uuid_like(pg_name): pg_name = utils.ovn_port_group_name(pg_name) try: for pg in self._tables['Port_Group'].rows.values(): if pg.name == pg_name: return pg except KeyError: # TODO(dalvarez): This except block is added for backwards compat # with old OVN schemas (<=2.9) where Port Groups are not present. # This (and other conditional code around this feature) shall be # removed at some point. return def get_port_groups(self): port_groups = {} try: for row in self._tables['Port_Group'].rows.values(): name = getattr(row, 'name') if not (ovn_const.OVN_SG_EXT_ID_KEY in row.external_ids or name == ovn_const.OVN_DROP_PORT_GROUP_NAME): continue data = {} for row_key in getattr(row, "_data", {}): data[row_key] = getattr(row, row_key) port_groups[name] = data except KeyError: # TODO(dalvarez): This except block is added for backwards compat # with old OVN schemas (<=2.9) where Port Groups are not present. # This (and other conditional code around this feature) shall be # removed at some point. pass return port_groups def check_liveness(self): return cmd.CheckLivenessCommand(self) def set_lswitch_port_to_virtual_type(self, lport_name, vip, virtual_parent, if_exists=True): return cmd.SetLSwitchPortToVirtualTypeCommand( self, lport_name, vip, virtual_parent, if_exists) def unset_lswitch_port_to_virtual_type(self, lport_name, virtual_parent, if_exists=True): return cmd.UnsetLSwitchPortToVirtualTypeCommand( self, lport_name, virtual_parent, if_exists) class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend): def __init__(self, connection): super(OvsdbSbOvnIdl, self).__init__(connection) # TODO(twilson) This direct access of the idl should be removed in # favor of a backend-agnostic method self.idl._session.reconnect.set_probe_interval( cfg.get_ovn_ovsdb_probe_interval()) def _get_chassis_physnets(self, chassis): bridge_mappings = chassis.external_ids.get('ovn-bridge-mappings', '') mapping_dict = helpers.parse_mappings(bridge_mappings.split(','), unique_values=False) return list(mapping_dict.keys()) def chassis_exists(self, hostname): cmd = self.db_find('Chassis', ('hostname', '=', hostname)) return bool(cmd.execute(check_error=True)) def get_chassis_hostname_and_physnets(self): chassis_info_dict = {} for ch in self.chassis_list().execute(check_error=True): chassis_info_dict[ch.hostname] = self._get_chassis_physnets(ch) return chassis_info_dict def get_gateway_chassis_from_cms_options(self): gw_chassis = [] for ch in self.chassis_list().execute(check_error=True): cms_options = ch.external_ids.get('ovn-cms-options', '') if 'enable-chassis-as-gw' in cms_options.split(','): gw_chassis.append(ch.name) return gw_chassis def get_chassis_and_physnets(self): chassis_info_dict = {} for ch in self.chassis_list().execute(check_error=True): chassis_info_dict[ch.name] = self._get_chassis_physnets(ch) return chassis_info_dict def get_all_chassis(self, chassis_type=None): # TODO(azbiswas): Use chassis_type as input once the compute type # preference patch (as part of external ids) merges. return [c.name for c in self.chassis_list().execute(check_error=True)] def get_chassis_data_for_ml2_bind_port(self, hostname): try: cmd = self.db_find_rows('Chassis', ('hostname', '=', hostname)) chassis = next(c for c in cmd.execute(check_error=True)) except StopIteration: msg = _('Chassis with hostname %s does not exist') % hostname raise RuntimeError(msg) return (chassis.external_ids.get('datapath-type', ''), chassis.external_ids.get('iface-types', ''), self._get_chassis_physnets(chassis)) def get_metadata_port_network(self, network): # TODO(twilson) This function should really just take a Row/RowView try: dp = self.lookup('Datapath_Binding', uuid.UUID(network)) except idlutils.RowNotFound: return None cmd = self.db_find_rows('Port_Binding', ('datapath', '=', dp), ('type', '=', 'localport')) return next(iter(cmd.execute(check_error=True)), None) def get_chassis_metadata_networks(self, chassis_name): """Return a list with the metadata networks the chassis is hosting.""" chassis = self.lookup('Chassis', chassis_name) proxy_networks = chassis.external_ids.get( 'neutron-metadata-proxy-networks', None) return proxy_networks.split(',') if proxy_networks else [] def set_chassis_metadata_networks(self, chassis, networks): nets = ','.join(networks) if networks else '' # TODO(twilson) This could just use DbSetCommand return cmd.UpdateChassisExtIdsCommand( self, chassis, {'neutron-metadata-proxy-networks': nets}, if_exists=True) def set_chassis_neutron_description(self, chassis, description, agent_type): desc_key = (ovn_const.OVN_AGENT_METADATA_DESC_KEY if agent_type == ovn_const.OVN_METADATA_AGENT else ovn_const.OVN_AGENT_DESC_KEY) return cmd.UpdateChassisExtIdsCommand( self, chassis, {desc_key: description}, if_exists=False) def get_network_port_bindings_by_ip(self, network, ip_address): rows = self.db_list_rows('Port_Binding').execute(check_error=True) # TODO(twilson) It would be useful to have a db_find that takes a # comparison function return [r for r in rows if (r.mac and str(r.datapath.uuid) == network) and ip_address in r.mac[0].split(' ')] def update_metadata_health_status(self, chassis, nb_cfg): return cmd.UpdateChassisExtIdsCommand( self, chassis, {ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: str(nb_cfg)}, if_exists=True) def set_port_cidrs(self, name, cidrs): # TODO(twilson) add if_exists to db commands return self.db_set('Port_Binding', name, 'external_ids', {'neutron-port-cidrs': cidrs}) def get_ports_on_chassis(self, chassis): # TODO(twilson) Some day it would be nice to stop passing names around # and just start using chassis objects so db_find_rows could be used rows = self.db_list_rows('Port_Binding').execute(check_error=True) return [r for r in rows if r.chassis and r.chassis[0].name == chassis] def get_logical_port_chassis_and_datapath(self, name): for port in self._tables['Port_Binding'].rows.values(): if port.logical_port == name: datapath = str(port.datapath.uuid) chassis = port.chassis[0].name if port.chassis else None return chassis, datapath ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py0000644000175000017500000006507700000000000031547 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import inspect import threading from futurist import periodics from neutron_lib.api.definitions import external_net from neutron_lib import constants as n_const from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ovsdbapp.backend.ovs_idl import event as row_event from neutron.common.ovn import constants as ovn_const from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_hash_ring_db as hash_ring_db from neutron.db import ovn_revision_numbers_db as revision_numbers_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync CONF = cfg.CONF LOG = log.getLogger(__name__) DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update' INCONSISTENCY_TYPE_DELETE = 'delete' class MaintenanceThread(object): def __init__(self): self._callables = [] self._thread = None self._worker = None def add_periodics(self, obj): for name, member in inspect.getmembers(obj): if periodics.is_periodic(member): LOG.debug('Periodic task found: %(owner)s.%(member)s', {'owner': obj.__class__.__name__, 'member': name}) self._callables.append((member, (), {})) def start(self): if self._thread is None: self._worker = periodics.PeriodicWorker(self._callables) self._thread = threading.Thread(target=self._worker.start) self._thread.daemon = True self._thread.start() def stop(self): self._worker.stop() self._worker.wait() self._thread.join() self._worker = self._thread = None def rerun_on_schema_updates(func): """Tasks decorated with this will rerun upon database version updates.""" func._rerun_on_schema_updates = True return func class OVNNBDBReconnectionEvent(row_event.RowEvent): """Event listening to reconnections from OVN Northbound DB.""" def __init__(self, driver, version): self.driver = driver self.version = version table = 'Connection' events = (self.ROW_CREATE,) super(OVNNBDBReconnectionEvent, self).__init__(events, table, None) self.event_name = self.__class__.__name__ def run(self, event, row, old): curr_version = self.driver.get_ovn_nbdb_version() if self.version != curr_version: self.driver.nbdb_schema_updated_hook() self.version = curr_version class SchemaAwarePeriodicsBase(object): def __init__(self, ovn_client): self._nb_idl = ovn_client._nb_idl self._set_schema_aware_periodics() self._nb_idl.idl.notify_handler.watch_event(OVNNBDBReconnectionEvent( self, self.get_ovn_nbdb_version())) def get_ovn_nbdb_version(self): return self._nb_idl.idl._db.version def _set_schema_aware_periodics(self): self._schema_aware_periodics = [] for name, member in inspect.getmembers(self): if not inspect.ismethod(member): continue schema_upt = getattr(member, '_rerun_on_schema_updates', None) if schema_upt and periodics.is_periodic(member): LOG.debug('Schema aware periodic task found: ' '%(owner)s.%(member)s', {'owner': self.__class__.__name__, 'member': name}) self._schema_aware_periodics.append(member) @abc.abstractmethod def nbdb_schema_updated_hook(self): """Hook invoked upon OVN NB schema is updated.""" class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): def __init__(self, ovn_client): self._ovn_client = ovn_client # FIXME(lucasagomes): We should not be accessing private # attributes like that, perhaps we should extend the OVNClient # class and create an interface for the locks ? self._nb_idl = self._ovn_client._nb_idl self._sb_idl = self._ovn_client._sb_idl self._idl = self._nb_idl.idl self._idl.set_lock('ovn_db_inconsistencies_periodics') self._sync_timer = timeutils.StopWatch() super(DBInconsistenciesPeriodics, self).__init__(ovn_client) self._resources_func_map = { ovn_const.TYPE_NETWORKS: { 'neutron_get': self._ovn_client._plugin.get_network, 'ovn_get': self._nb_idl.get_lswitch, 'ovn_create': self._ovn_client.create_network, 'ovn_update': self._ovn_client.update_network, 'ovn_delete': self._ovn_client.delete_network, }, ovn_const.TYPE_PORTS: { 'neutron_get': self._ovn_client._plugin.get_port, 'ovn_get': self._nb_idl.get_lswitch_port, 'ovn_create': self._ovn_client.create_port, 'ovn_update': self._ovn_client.update_port, 'ovn_delete': self._ovn_client.delete_port, }, ovn_const.TYPE_FLOATINGIPS: { 'neutron_get': self._ovn_client._l3_plugin.get_floatingip, 'ovn_get': self._nb_idl.get_floatingip, 'ovn_create': self._ovn_client.create_floatingip, 'ovn_update': self._ovn_client.update_floatingip, 'ovn_delete': self._ovn_client.delete_floatingip, }, ovn_const.TYPE_ROUTERS: { 'neutron_get': self._ovn_client._l3_plugin.get_router, 'ovn_get': self._nb_idl.get_lrouter, 'ovn_create': self._ovn_client.create_router, 'ovn_update': self._ovn_client.update_router, 'ovn_delete': self._ovn_client.delete_router, }, ovn_const.TYPE_SECURITY_GROUPS: { 'neutron_get': self._ovn_client._plugin.get_security_group, 'ovn_get': self._get_security_group, 'ovn_create': self._ovn_client.create_security_group, 'ovn_delete': self._ovn_client.delete_security_group, }, ovn_const.TYPE_SECURITY_GROUP_RULES: { 'neutron_get': self._ovn_client._plugin.get_security_group_rule, 'ovn_get': self._nb_idl.get_acl_by_id, 'ovn_create': self._ovn_client.create_security_group_rule, 'ovn_delete': self._ovn_client.delete_security_group_rule, }, ovn_const.TYPE_ROUTER_PORTS: { 'neutron_get': self._ovn_client._plugin.get_port, 'ovn_get': self._nb_idl.get_lrouter_port, 'ovn_create': self._create_lrouter_port, 'ovn_update': self._ovn_client.update_router_port, 'ovn_delete': self._ovn_client.delete_router_port, }, } def _get_security_group(self, uuid): return (self._nb_idl.get_address_set(uuid) or self._nb_idl.get_port_group(uuid)) @property def has_lock(self): return not self._idl.is_lock_contended def nbdb_schema_updated_hook(self): if not self.has_lock: return for func in self._schema_aware_periodics: LOG.debug('OVN Northbound DB schema version was updated,' 'invoking "%s"', func.__name__) try: func() except periodics.NeverAgain: pass except Exception: LOG.exception( 'Unknown error while executing "%s"', func.__name__) def _fix_create_update(self, context, row): res_map = self._resources_func_map[row.resource_type] try: # Get the latest version of the resource in Neutron DB n_obj = res_map['neutron_get'](context, row.resource_uuid) except n_exc.NotFound: LOG.warning('Skip fixing resource %(res_uuid)s (type: ' '%(res_type)s). Resource does not exist in Neutron ' 'database anymore', {'res_uuid': row.resource_uuid, 'res_type': row.resource_type}) return ovn_obj = res_map['ovn_get'](row.resource_uuid) if not ovn_obj: res_map['ovn_create'](context, n_obj) else: if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES: LOG.error("SG rule %s found with a revision number while " "this resource doesn't support updates", row.resource_uuid) elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS: # In OVN, we don't care about updates to security groups, # so just bump the revision number to whatever it's # supposed to be. revision_numbers_db.bump_revision(context, n_obj, row.resource_type) else: ext_ids = getattr(ovn_obj, 'external_ids', {}) ovn_revision = int(ext_ids.get( ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1)) # If the resource exist in the OVN DB but the revision # number is different from Neutron DB, updated it. if ovn_revision != n_obj['revision_number']: res_map['ovn_update'](context, n_obj) else: # If the resource exist and the revision number # is equal on both databases just bump the revision on # the cache table. revision_numbers_db.bump_revision(context, n_obj, row.resource_type) def _fix_delete(self, context, row): res_map = self._resources_func_map[row.resource_type] ovn_obj = res_map['ovn_get'](row.resource_uuid) if not ovn_obj: revision_numbers_db.delete_revision( context, row.resource_uuid, row.resource_type) else: res_map['ovn_delete'](context, row.resource_uuid) def _fix_create_update_subnet(self, context, row): # Get the lasted version of the port in Neutron DB sn_db_obj = self._ovn_client._plugin.get_subnet( context, row.resource_uuid) n_db_obj = self._ovn_client._plugin.get_network( context, sn_db_obj['network_id']) if row.revision_number == ovn_const.INITIAL_REV_NUM: self._ovn_client.create_subnet(context, sn_db_obj, n_db_obj) else: self._ovn_client.update_subnet(context, sn_db_obj, n_db_obj) # The migration will run just once per neutron-server instance. If the lock # is held by some other neutron-server instance in the cloud, we'll attempt # to perform the migration every 10 seconds until completed. @periodics.periodic(spacing=10, run_immediately=True) @rerun_on_schema_updates def migrate_to_port_groups(self): """Perform the migration from Address Sets to Port Groups. """ # TODO(dalvarez): Remove this in U cycle when we're sure that all # versions are running using Port Groups (and OVS >= 2.10). # If Port Groups are not supported or we've already migrated, we don't # need to attempt to migrate again. if (not self._nb_idl.is_port_groups_supported() or not self._nb_idl.get_address_sets()): raise periodics.NeverAgain() # Only the worker holding a valid lock within OVSDB will perform the # migration. if not self.has_lock: return admin_context = n_context.get_admin_context() nb_sync = ovn_db_sync.OvnNbSynchronizer( self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl, None, None) nb_sync.migrate_to_port_groups(admin_context) raise periodics.NeverAgain() def _log_maintenance_inconsistencies(self, create_update_inconsistencies, delete_inconsistencies): if not CONF.debug: return def _log(inconsistencies, type_): if not inconsistencies: return c = {} for f in inconsistencies: if f.resource_type not in c: c[f.resource_type] = 1 else: c[f.resource_type] += 1 fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items()) LOG.debug('Maintenance task: Number of inconsistencies ' 'found at %(type_)s: %(fail_str)s', {'type_': type_, 'fail_str': fail_str}) _log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE) _log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE) @periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL, run_immediately=True) def check_for_inconsistencies(self): # Only the worker holding a valid lock within OVSDB will run # this periodic if not self.has_lock: return admin_context = n_context.get_admin_context() create_update_inconsistencies = ( revision_numbers_db.get_inconsistent_resources(admin_context)) delete_inconsistencies = ( revision_numbers_db.get_deleted_resources(admin_context)) if not any([create_update_inconsistencies, delete_inconsistencies]): LOG.debug('Maintenance task: No inconsistencies found. Skipping') return LOG.debug('Maintenance task: Synchronizing Neutron ' 'and OVN databases') self._log_maintenance_inconsistencies(create_update_inconsistencies, delete_inconsistencies) self._sync_timer.restart() dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s ' '(type: %(res_type)s) at %(type_)s') # Fix the create/update resources inconsistencies for row in create_update_inconsistencies: LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid, 'res_type': row.resource_type, 'type_': INCONSISTENCY_TYPE_CREATE_UPDATE}) try: # NOTE(lucasagomes): The way to fix subnets is bit # different than other resources. A subnet in OVN language # is just a DHCP rule but, this rule only exist if the # subnet in Neutron has the "enable_dhcp" attribute set # to True. So, it's possible to have a consistent subnet # resource even when it does not exist in the OVN database. if row.resource_type == ovn_const.TYPE_SUBNETS: self._fix_create_update_subnet(admin_context, row) else: self._fix_create_update(admin_context, row) except Exception: LOG.exception('Maintenance task: Failed to fix resource ' '%(res_uuid)s (type: %(res_type)s)', {'res_uuid': row.resource_uuid, 'res_type': row.resource_type}) # Fix the deleted resources inconsistencies for row in delete_inconsistencies: LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid, 'res_type': row.resource_type, 'type_': INCONSISTENCY_TYPE_DELETE}) try: if row.resource_type == ovn_const.TYPE_SUBNETS: self._ovn_client.delete_subnet(admin_context, row.resource_uuid) else: self._fix_delete(admin_context, row) except Exception: LOG.exception('Maintenance task: Failed to fix deleted ' 'resource %(res_uuid)s (type: %(res_type)s)', {'res_uuid': row.resource_uuid, 'res_type': row.resource_type}) self._sync_timer.stop() LOG.info('Maintenance task: Synchronization finished ' '(took %.2f seconds)', self._sync_timer.elapsed()) def _create_lrouter_port(self, context, port): router_id = port['device_id'] self._ovn_client._l3_plugin.add_router_interface( context, router_id, {'port_id': port['id']}, may_exist=True) def _check_subnet_global_dhcp_opts(self): inconsistent_subnets = [] admin_context = n_context.get_admin_context() subnet_filter = {'enable_dhcp': [True]} neutron_subnets = self._ovn_client._plugin.get_subnets( admin_context, subnet_filter) global_v4_opts = ovn_conf.get_global_dhcpv4_opts() global_v6_opts = ovn_conf.get_global_dhcpv6_opts() LOG.debug('Checking %s subnets for global DHCP option consistency', len(neutron_subnets)) for subnet in neutron_subnets: ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options( subnet['id'])['subnet'] inconsistent_opts = [] if ovn_dhcp_opts: if subnet['ip_version'] == n_const.IP_VERSION_4: for opt, value in global_v4_opts.items(): if value != ovn_dhcp_opts['options'].get(opt, None): inconsistent_opts.append(opt) if subnet['ip_version'] == n_const.IP_VERSION_6: for opt, value in global_v6_opts.items(): if value != ovn_dhcp_opts['options'].get(opt, None): inconsistent_opts.append(opt) if inconsistent_opts: LOG.debug('Subnet %s has inconsistent DHCP opts: %s', subnet['id'], inconsistent_opts) inconsistent_subnets.append(subnet) return inconsistent_subnets # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) def check_global_dhcp_opts(self): # This periodic task is included in DBInconsistenciesPeriodics since # it uses the lock to ensure only one worker is executing if not self.has_lock: return if (not ovn_conf.get_global_dhcpv4_opts() and not ovn_conf.get_global_dhcpv6_opts()): # No need to scan the subnets if the settings are unset. raise periodics.NeverAgain() LOG.debug('Maintenance task: Checking DHCP options on subnets') self._sync_timer.restart() fix_subnets = self._check_subnet_global_dhcp_opts() if fix_subnets: admin_context = n_context.get_admin_context() LOG.debug('Triggering update for %s subnets', len(fix_subnets)) for subnet in fix_subnets: neutron_net = self._ovn_client._plugin.get_network( admin_context, subnet['network_id']) try: self._ovn_client.update_subnet(admin_context, subnet, neutron_net) except Exception: LOG.exception('Failed to update subnet %s', subnet['id']) self._sync_timer.stop() LOG.info('Maintenance task: DHCP options check finished ' '(took %.2f seconds)', self._sync_timer.elapsed()) raise periodics.NeverAgain() # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=1800, run_immediately=True) def check_metadata_ports(self): # If OVN metadata is disabled do not run this task again if not ovn_conf.is_ovn_metadata_enabled(): raise periodics.NeverAgain() # Make sure that only one worker is executing this if not self.has_lock: return admin_context = n_context.get_admin_context() for n in self._ovn_client._plugin.get_networks(admin_context): self._ovn_client.create_metadata_port(admin_context, n) raise periodics.NeverAgain() # TODO(lucasagomes): Remove this in the U cycle # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) def check_for_port_security_unknown_address(self): if not self.has_lock: return for port in self._nb_idl.lsp_list().execute(check_error=True): if port.type == ovn_const.LSP_TYPE_LOCALNET: continue addresses = port.addresses type_ = port.type.strip() if not port.port_security: if not type_ and ovn_const.UNKNOWN_ADDR not in addresses: addresses.append(ovn_const.UNKNOWN_ADDR) elif type_ and ovn_const.UNKNOWN_ADDR in addresses: addresses.remove(ovn_const.UNKNOWN_ADDR) else: if type_ and ovn_const.UNKNOWN_ADDR in addresses: addresses.remove(ovn_const.UNKNOWN_ADDR) elif not type_ and ovn_const.UNKNOWN_ADDR in addresses: addresses.remove(ovn_const.UNKNOWN_ADDR) if addresses: self._nb_idl.lsp_set_addresses( port.name, addresses=addresses).execute(check_error=True) else: self._nb_idl.db_clear( 'Logical_Switch_Port', port.name, 'addresses').execute(check_error=True) raise periodics.NeverAgain() # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) def check_for_fragmentation_support(self): if not self.has_lock: return context = n_context.get_admin_context() for net in self._ovn_client._plugin.get_networks( context, {external_net.EXTERNAL: [True]}): self._ovn_client.set_gateway_mtu(context, net) raise periodics.NeverAgain() # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) def check_for_igmp_snoop_support(self): if not self.has_lock: return with self._nb_idl.transaction(check_error=True) as txn: value = ('true' if ovn_conf.is_igmp_snooping_enabled() else 'false') for ls in self._nb_idl.ls_list().execute(check_error=True): if ls.other_config.get(ovn_const.MCAST_SNOOP, None) == value: continue txn.add(self._nb_idl.db_set( 'Logical_Switch', ls.name, ('other_config', { ovn_const.MCAST_SNOOP: value, ovn_const.MCAST_FLOOD_UNREGISTERED: value}))) raise periodics.NeverAgain() # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) def check_for_ha_chassis_group_address(self): # If external ports is not supported stop running # this periodic task if not self._ovn_client.is_external_ports_supported(): raise periodics.NeverAgain() if not self.has_lock: return default_ch_grp = self._nb_idl.ha_chassis_group_add( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, may_exist=True).execute( check_error=True) # NOTE(lucasagomes): Find the existing chassis with the highest # priority and keep it as being the highest to avoid moving # things around high_prio_ch = max(default_ch_grp.ha_chassis, key=lambda x: x.priority, default=None) all_ch = self._sb_idl.get_all_chassis() gw_ch = self._sb_idl.get_gateway_chassis_from_cms_options() ch_to_del = set(all_ch) - set(gw_ch) with self._nb_idl.transaction(check_error=True) as txn: for ch in ch_to_del: txn.add(self._nb_idl.ha_chassis_group_del_chassis( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, ch, if_exists=True)) # NOTE(lucasagomes): If the high priority chassis is in # the list of chassis to be added/updated. Add it first with # the highest priority number possible and then add the rest # (the priority of the rest of the chassis does not matter # since only the highest one is active) priority = ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY if high_prio_ch and high_prio_ch.chassis_name in gw_ch: txn.add(self._nb_idl.ha_chassis_group_add_chassis( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, high_prio_ch.chassis_name, priority=priority)) gw_ch.remove(high_prio_ch.chassis_name) priority -= 1 for ch in gw_ch: txn.add(self._nb_idl.ha_chassis_group_add_chassis( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, ch, priority=priority)) priority -= 1 raise periodics.NeverAgain() class HashRingHealthCheckPeriodics(object): def __init__(self, group): self._group = group self.ctx = n_context.get_admin_context() @periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL) def touch_hash_ring_nodes(self): # NOTE(lucasagomes): Note that we do not rely on the OVSDB lock # here because we want the maintenance tasks from each instance to # execute this task. hash_ring_db.touch_nodes_from_host(self.ctx, self._group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py0000644000175000017500000033610500000000000031416 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import netaddr from neutron_lib.api.definitions import l3 from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as p_utils from neutron_lib.utils import helpers from neutron_lib.utils import net as n_net from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from ovsdbapp.backend.ovs_idl import idlutils from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_revision_numbers_db as db_rev from neutron.scheduler import l3_ovn_scheduler from neutron.services.qos.drivers.ovn import driver as qos_driver LOG = log.getLogger(__name__) OvnPortInfo = collections.namedtuple( 'OvnPortInfo', ['type', 'options', 'addresses', 'port_security', 'parent_name', 'tag', 'dhcpv4_options', 'dhcpv6_options', 'cidrs', 'device_owner', 'security_group_ids']) GW_INFO = collections.namedtuple('GatewayInfo', ['network_id', 'subnet_id', 'router_ip', 'gateway_ip', 'ip_version', 'ip_prefix']) class OVNClient(object): def __init__(self, nb_idl, sb_idl): self._nb_idl = nb_idl self._sb_idl = sb_idl self._plugin_property = None self._l3_plugin_property = None self._qos_driver = qos_driver.OVNQosDriver(self) self._ovn_scheduler = l3_ovn_scheduler.get_scheduler() @property def _plugin(self): if self._plugin_property is None: self._plugin_property = directory.get_plugin() return self._plugin_property @property def _l3_plugin(self): if self._l3_plugin_property is None: self._l3_plugin_property = directory.get_plugin( plugin_constants.L3) return self._l3_plugin_property def _transaction(self, commands, txn=None): """Create a new transaction or add the commands to an existing one.""" if txn is None: with self._nb_idl.transaction(check_error=True) as new_txn: for cmd in commands: new_txn.add(cmd) else: for cmd in commands: txn.add(cmd) def _is_virtual_port_supported(self): # TODO(lucasagomes): Remove this method in the future. The # "virtual" port type was added in the version 2.12 of OVN return self._sb_idl.is_col_present('Port_Binding', 'virtual_parent') def is_external_ports_supported(self): return self._nb_idl.is_col_present( 'Logical_Switch_Port', 'ha_chassis_group') def _get_allowed_addresses_from_port(self, port): if not port.get(psec.PORTSECURITY): return [], [] if utils.is_lsp_trusted(port): return [], [] allowed_addresses = set() new_macs = set() addresses = port['mac_address'] for ip in port.get('fixed_ips', []): addresses += ' ' + ip['ip_address'] for allowed_address in port.get('allowed_address_pairs', []): # If allowed address pair has same mac as the port mac, # append the allowed ip address to the 'addresses'. # Else we will have multiple entries for the same mac in # 'Logical_Switch_Port.port_security'. if allowed_address['mac_address'] == port['mac_address']: addresses += ' ' + allowed_address['ip_address'] else: allowed_addresses.add(allowed_address['mac_address'] + ' ' + allowed_address['ip_address']) new_macs.add(allowed_address['mac_address']) allowed_addresses.add(addresses) return list(allowed_addresses), list(new_macs) def _get_subnet_dhcp_options_for_port(self, port, ip_version): """Returns the subnet dhcp options for the port. Return the first found DHCP options belong for the port. """ subnets = [ fixed_ip['subnet_id'] for fixed_ip in port['fixed_ips'] if netaddr.IPAddress(fixed_ip['ip_address']).version == ip_version] get_opts = self._nb_idl.get_subnets_dhcp_options(subnets) if get_opts: if ip_version == const.IP_VERSION_6: # Always try to find a dhcpv6 stateful v6 subnet to return. # This ensures port can get one stateful v6 address when port # has multiple dhcpv6 stateful and stateless subnets. for opts in get_opts: # We are setting ovn_const.DHCPV6_STATELESS_OPT to "true" # in _get_ovn_dhcpv6_opts, so entries in DHCP_Options table # should have unicode type 'true' if they were defined as # dhcpv6 stateless. if opts['options'].get( ovn_const.DHCPV6_STATELESS_OPT) != 'true': return opts return get_opts[0] def _get_port_dhcp_options(self, port, ip_version): """Return dhcp options for port. In case the port is dhcp disabled, or IP addresses it has belong to dhcp disabled subnets, returns None. Otherwise, returns a dict: - with content from a existing DHCP_Options row for subnet, if the port has no extra dhcp options. - with only one item ('cmd', AddDHCPOptionsCommand(..)), if the port has extra dhcp options. The command should be processed in the same transaction with port creating or updating command to avoid orphan row issue happen. """ lsp_dhcp_disabled, lsp_dhcp_opts = utils.get_lsp_dhcp_opts( port, ip_version) if lsp_dhcp_disabled: return subnet_dhcp_options = self._get_subnet_dhcp_options_for_port( port, ip_version) if not subnet_dhcp_options: # NOTE(lizk): It's possible for Neutron to configure a port with IP # address belongs to subnet disabled dhcp. And no DHCP_Options row # will be inserted for such a subnet. So in that case, the subnet # dhcp options here will be None. return if not lsp_dhcp_opts: return subnet_dhcp_options # This port has extra DHCP options defined, so we will create a new # row in DHCP_Options table for it. subnet_dhcp_options['options'].update(lsp_dhcp_opts) subnet_dhcp_options['external_ids'].update( {'port_id': port['id']}) subnet_id = subnet_dhcp_options['external_ids']['subnet_id'] add_dhcp_opts_cmd = self._nb_idl.add_dhcp_options( subnet_id, port_id=port['id'], cidr=subnet_dhcp_options['cidr'], options=subnet_dhcp_options['options'], external_ids=subnet_dhcp_options['external_ids']) return {'cmd': add_dhcp_opts_cmd} def get_virtual_port_parents(self, virtual_ip, port): ls = self._nb_idl.ls_get(utils.ovn_name(port['network_id'])).execute( check_error=True) return [lsp.name for lsp in ls.ports for ps in lsp.port_security if lsp.name != port['id'] and virtual_ip in ps] def _get_port_options(self, port): context = n_context.get_admin_context() binding_prof = utils.validate_and_get_data_from_binding_profile(port) vtep_physical_switch = binding_prof.get('vtep-physical-switch') port_type = '' cidrs = '' if vtep_physical_switch: vtep_logical_switch = binding_prof.get('vtep-logical-switch') port_type = 'vtep' options = {'vtep-physical-switch': vtep_physical_switch, 'vtep-logical-switch': vtep_logical_switch} addresses = [ovn_const.UNKNOWN_ADDR] parent_name = [] tag = [] port_security = [] else: options = {} parent_name = binding_prof.get('parent_name', []) tag = binding_prof.get('tag', []) address = port['mac_address'] for ip in port.get('fixed_ips', []): try: subnet = self._plugin.get_subnet(context, ip['subnet_id']) except n_exc.SubnetNotFound: continue ip_addr = ip['ip_address'] address += ' ' + ip_addr cidrs += ' {}/{}'.format(ip['ip_address'], subnet['cidr'].split('/')[1]) # Check if the port being created is a virtual port if (self._is_virtual_port_supported() and not port['device_owner']): parents = self.get_virtual_port_parents(ip_addr, port) if parents: port_type = ovn_const.LSP_TYPE_VIRTUAL options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY] = ip_addr options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY] = ( ','.join(parents)) port_security, new_macs = ( self._get_allowed_addresses_from_port(port)) addresses = [address] addresses.extend(new_macs) # Only adjust the OVN type if the port is not owned by Neutron # DHCP agents. if (port['device_owner'] == const.DEVICE_OWNER_DHCP and not utils.is_neutron_dhcp_agent_port(port)): port_type = 'localport' capabilities = utils.get_port_capabilities(port) vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if (vnic_type == portbindings.VNIC_DIRECT and ovn_const.PORT_CAP_SWITCHDEV not in capabilities): if self.is_external_ports_supported(): port_type = ovn_const.LSP_TYPE_EXTERNAL else: LOG.warning('The version of OVN used does not support ' 'the "external ports" feature used for ' 'SR-IOV ports with OVN native DHCP') # The "unknown" address should only be set for the normal LSP # ports (the ones which type is empty) if not port_security and not port_type: # Port security is disabled for this port. # So this port can send traffic with any mac address. # OVN allows any mac address from a port if "unknown" # is added to the Logical_Switch_Port.addresses column. # So add it. addresses.append(ovn_const.UNKNOWN_ADDR) dhcpv4_options = self._get_port_dhcp_options(port, const.IP_VERSION_4) dhcpv6_options = self._get_port_dhcp_options(port, const.IP_VERSION_6) # HA Chassis Group will bind the port to the highest # priority Chassis if port_type != ovn_const.LSP_TYPE_EXTERNAL: options.update({'requested-chassis': port.get(portbindings.HOST_ID, '')}) device_owner = port.get('device_owner', '') sg_ids = ' '.join(utils.get_lsp_security_groups(port)) return OvnPortInfo(port_type, options, addresses, port_security, parent_name, tag, dhcpv4_options, dhcpv6_options, cidrs.strip(), device_owner, sg_ids) def _get_default_ha_chassis_group(self): return self._nb_idl.ha_chassis_group_get( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME).execute( check_error=True).uuid def create_port(self, context, port): if utils.is_lsp_ignored(port): return port_info = self._get_port_options(port) external_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port['name'], ovn_const.OVN_DEVID_EXT_ID_KEY: port['device_id'], ovn_const.OVN_PROJID_EXT_ID_KEY: port['project_id'], ovn_const.OVN_CIDRS_EXT_ID_KEY: port_info.cidrs, ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: port_info.device_owner, ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(port['network_id']), ovn_const.OVN_SG_IDS_EXT_ID_KEY: port_info.security_group_ids, ovn_const.OVN_REV_NUM_EXT_ID_KEY: str( utils.get_revision_number( port, ovn_const.TYPE_PORTS))} lswitch_name = utils.ovn_name(port['network_id']) sg_cache = {} subnet_cache = {} # It's possible to have a network created on one controller and then a # port created on a different controller quickly enough that the second # controller does not yet see that network in its local cache of the # OVN northbound database. Check if the logical switch is present # or not in the idl's local copy of the database before creating # the lswitch port. self._nb_idl.check_for_row_by_value_and_retry( 'Logical_Switch', 'name', lswitch_name) with self._nb_idl.transaction(check_error=True) as txn: if not port_info.dhcpv4_options: dhcpv4_options = [] elif 'cmd' in port_info.dhcpv4_options: dhcpv4_options = txn.add(port_info.dhcpv4_options['cmd']) else: dhcpv4_options = [port_info.dhcpv4_options['uuid']] if not port_info.dhcpv6_options: dhcpv6_options = [] elif 'cmd' in port_info.dhcpv6_options: dhcpv6_options = txn.add(port_info.dhcpv6_options['cmd']) else: dhcpv6_options = [port_info.dhcpv6_options['uuid']] # The lport_name *must* be neutron port['id']. It must match the # iface-id set in the Interfaces table of the Open_vSwitch # database which nova sets to be the port ID. kwargs = { 'lport_name': port['id'], 'lswitch_name': lswitch_name, 'addresses': port_info.addresses, 'external_ids': external_ids, 'parent_name': port_info.parent_name, 'tag': port_info.tag, 'enabled': port.get('admin_state_up'), 'options': port_info.options, 'type': port_info.type, 'port_security': port_info.port_security, 'dhcpv4_options': dhcpv4_options, 'dhcpv6_options': dhcpv6_options } if (self.is_external_ports_supported() and port_info.type == ovn_const.LSP_TYPE_EXTERNAL): kwargs['ha_chassis_group'] = ( self._get_default_ha_chassis_group()) # NOTE(mjozefcz): Do not set addresses if the port is not # bound, has no device_owner and it is OVN LB VIP port. # For more details check related bug #1789686. if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and not port.get('device_owner') and port.get(portbindings.VIF_TYPE) == portbindings.VIF_TYPE_UNBOUND): kwargs['addresses'] = [] # Check if the parent port was created with the # allowed_address_pairs already set allowed_address_pairs = port.get('allowed_address_pairs', []) if (self._is_virtual_port_supported() and allowed_address_pairs and port_info.type != ovn_const.LSP_TYPE_VIRTUAL): addrs = [addr['ip_address'] for addr in allowed_address_pairs] self._set_unset_virtual_port_type(context, txn, port, addrs) port_cmd = txn.add(self._nb_idl.create_lswitch_port( **kwargs)) # Handle ACL's for this port. If we're not using Port Groups # because either the schema doesn't support it or we didn't # migrate old SGs from Address Sets to Port Groups, then we # keep the old behavior. For those SGs this port belongs to # that are modelled as a Port Group, we'll use it. sg_ids = utils.get_lsp_security_groups(port) if self._nb_idl.is_port_groups_supported(): # If this is not a trusted port or port security is enabled, # add it to the default drop Port Group so that all traffic # is dropped by default. if not utils.is_lsp_trusted(port) or port_info.port_security: self._add_port_to_drop_port_group(port_cmd, txn) # For SGs modelled as OVN Port Groups, just add the port to # its Port Group. for sg in sg_ids: txn.add(self._nb_idl.pg_add_ports( utils.ovn_port_group_name(sg), port_cmd)) else: # SGs modelled as Address Sets: acls_new = ovn_acl.add_acls(self._plugin, context, port, sg_cache, subnet_cache, self._nb_idl) for acl in acls_new: txn.add(self._nb_idl.add_acl(**acl)) if port.get('fixed_ips') and sg_ids: addresses = ovn_acl.acl_port_ips(port) # NOTE(rtheis): Fail port creation if the address set # doesn't exist. This prevents ports from being created on # any security groups out-of-sync between neutron and OVN. for sg_id in sg_ids: for ip_version in addresses: if addresses[ip_version]: txn.add(self._nb_idl.update_address_set( name=utils.ovn_addrset_name(sg_id, ip_version), addrs_add=addresses[ip_version], addrs_remove=None, if_exists=False)) if self.is_dns_required_for_port(port): self.add_txns_to_sync_port_dns_records(txn, port) # Add qos for port by qos table of logical flow instead of tc qos_options = self._qos_driver.get_qos_options(port) if qos_options: qos_rule_column = self._create_qos_rules(qos_options, port, lswitch_name) txn.add(self._nb_idl.qos_add(**qos_rule_column)) db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS) def _set_unset_virtual_port_type(self, context, txn, parent_port, addresses, unset=False): cmd = self._nb_idl.set_lswitch_port_to_virtual_type if unset: cmd = self._nb_idl.unset_lswitch_port_to_virtual_type for addr in addresses: virt_port = self._plugin.get_ports(context, filters={ portbindings.VIF_TYPE: portbindings.VIF_TYPE_UNBOUND, 'network_id': [parent_port['network_id']], 'fixed_ips': {'ip_address': [addr]}}) if not virt_port: continue virt_port = virt_port[0] args = {'lport_name': virt_port['id'], 'virtual_parent': parent_port['id'], 'if_exists': True} LOG.debug("Parent port %(virtual_parent)s found for " "virtual port %(lport_name)s", args) if not unset: args['vip'] = addr txn.add(cmd(**args)) # TODO(lucasagomes): The ``port_object`` parameter was added to # keep things backward compatible. Remove it in the Rocky release. def update_port(self, context, port, qos_options=None, port_object=None): if utils.is_lsp_ignored(port): return # Does not need to add qos rule to port_info port_info = self._get_port_options(port) external_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port['name'], ovn_const.OVN_DEVID_EXT_ID_KEY: port['device_id'], ovn_const.OVN_PROJID_EXT_ID_KEY: port['project_id'], ovn_const.OVN_CIDRS_EXT_ID_KEY: port_info.cidrs, ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: port_info.device_owner, ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(port['network_id']), ovn_const.OVN_SG_IDS_EXT_ID_KEY: port_info.security_group_ids, ovn_const.OVN_REV_NUM_EXT_ID_KEY: str( utils.get_revision_number( port, ovn_const.TYPE_PORTS))} lswitch_name = utils.ovn_name(port['network_id']) sg_cache = {} subnet_cache = {} check_rev_cmd = self._nb_idl.check_revision_number( port['id'], port, ovn_const.TYPE_PORTS) with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) columns_dict = {} if utils.is_lsp_router_port(port): port_info.options.update( self._nb_idl.get_router_port_options(port['id'])) else: columns_dict['type'] = port_info.type columns_dict['addresses'] = port_info.addresses if not port_info.dhcpv4_options: dhcpv4_options = [] elif 'cmd' in port_info.dhcpv4_options: dhcpv4_options = txn.add(port_info.dhcpv4_options['cmd']) else: dhcpv4_options = [port_info.dhcpv4_options['uuid']] if not port_info.dhcpv6_options: dhcpv6_options = [] elif 'cmd' in port_info.dhcpv6_options: dhcpv6_options = txn.add(port_info.dhcpv6_options['cmd']) else: dhcpv6_options = [port_info.dhcpv6_options['uuid']] # NOTE(mjozefcz): Do not set addresses if the port is not # bound, has no device_owner and it is OVN LB VIP port. # For more details check related bug #1789686. if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and not port.get('device_owner') and port.get(portbindings.VIF_TYPE) == portbindings.VIF_TYPE_UNBOUND): columns_dict['addresses'] = [] if self.is_external_ports_supported(): if port_info.type == ovn_const.LSP_TYPE_EXTERNAL: columns_dict['ha_chassis_group'] = ( self._get_default_ha_chassis_group()) else: # Clear the ha_chassis_group field columns_dict['ha_chassis_group'] = [] ovn_port = self._nb_idl.lookup('Logical_Switch_Port', port['id']) addr_pairs_diff = utils.compute_address_pairs_diff(ovn_port, port) if (self._is_virtual_port_supported() and port_info.type != ovn_const.LSP_TYPE_VIRTUAL): self._set_unset_virtual_port_type( context, txn, port, addr_pairs_diff.added) self._set_unset_virtual_port_type( context, txn, port, addr_pairs_diff.removed, unset=True) # NOTE(lizk): Fail port updating if port doesn't exist. This # prevents any new inserted resources to be orphan, such as port # dhcp options or ACL rules for port, e.g. a port was created # without extra dhcp options and security group, while updating # includes the new attributes setting to port. txn.add(self._nb_idl.set_lswitch_port( lport_name=port['id'], external_ids=external_ids, parent_name=port_info.parent_name, tag=port_info.tag, options=port_info.options, enabled=port['admin_state_up'], port_security=port_info.port_security, dhcpv4_options=dhcpv4_options, dhcpv6_options=dhcpv6_options, if_exists=False, **columns_dict)) # Determine if security groups or fixed IPs are updated. old_sg_ids = set(utils.get_ovn_port_security_groups(ovn_port)) new_sg_ids = set(utils.get_lsp_security_groups(port)) detached_sg_ids = old_sg_ids - new_sg_ids attached_sg_ids = new_sg_ids - old_sg_ids if self._nb_idl.is_port_groups_supported(): for sg in detached_sg_ids: txn.add(self._nb_idl.pg_del_ports( utils.ovn_port_group_name(sg), port['id'])) for sg in attached_sg_ids: txn.add(self._nb_idl.pg_add_ports( utils.ovn_port_group_name(sg), port['id'])) if (not utils.is_lsp_trusted(port) and utils.is_port_security_enabled(port)): self._add_port_to_drop_port_group(port['id'], txn) # If the port doesn't belong to any security group and # port_security is disabled, or it's a trusted port, then # allow all traffic. elif ((not new_sg_ids and not utils.is_port_security_enabled(port)) or utils.is_lsp_trusted(port)): self._del_port_from_drop_port_group(port['id'], txn) else: old_fixed_ips = utils.remove_macs_from_lsp_addresses( ovn_port.addresses) new_fixed_ips = [x['ip_address'] for x in port.get('fixed_ips', [])] is_fixed_ips_updated = ( sorted(old_fixed_ips) != sorted(new_fixed_ips)) port_security_changed = ( utils.is_port_security_enabled(port) != bool(ovn_port.port_security)) # Refresh ACLs for changed security groups or fixed IPs. if (detached_sg_ids or attached_sg_ids or is_fixed_ips_updated or port_security_changed): # Note that update_acls will compare the port's ACLs to # ensure only the necessary ACLs are added and deleted # on the transaction. acls_new = ovn_acl.add_acls(self._plugin, context, port, sg_cache, subnet_cache, self._nb_idl) txn.add(self._nb_idl.update_acls([port['network_id']], [port], {port['id']: acls_new}, need_compare=True)) # Refresh address sets for changed security groups or fixed # IPs. if len(old_fixed_ips) != 0 or len(new_fixed_ips) != 0: addresses = ovn_acl.acl_port_ips(port) addresses_old = utils.sort_ips_by_version( utils.get_ovn_port_addresses(ovn_port)) # Add current addresses to attached security groups. for sg_id in attached_sg_ids: for ip_version in addresses: if addresses[ip_version]: txn.add(self._nb_idl.update_address_set( name=utils.ovn_addrset_name(sg_id, ip_version), addrs_add=addresses[ip_version], addrs_remove=None)) # Remove old addresses from detached security groups. for sg_id in detached_sg_ids: for ip_version in addresses_old: if addresses_old[ip_version]: txn.add(self._nb_idl.update_address_set( name=utils.ovn_addrset_name(sg_id, ip_version), addrs_add=None, addrs_remove=addresses_old[ip_version])) if is_fixed_ips_updated or addr_pairs_diff.changed: # We have refreshed address sets for attached and # detached security groups, so now we only need to take # care of unchanged security groups. unchanged_sg_ids = new_sg_ids & old_sg_ids for sg_id in unchanged_sg_ids: for ip_version in addresses: addr_add = ((set(addresses[ip_version]) - set(addresses_old[ip_version])) or None) addr_remove = ( (set(addresses_old[ip_version]) - set(addresses[ip_version])) or None) if addr_add or addr_remove: txn.add(self._nb_idl.update_address_set( name=utils.ovn_addrset_name( sg_id, ip_version), addrs_add=addr_add, addrs_remove=addr_remove)) # Update QoS policy rule, delete the old one, then add the new one # If we create port with qos_policy, we also need execute # update_port method, which qos_policy is in port dict, so we # also need get policy from port dict if qos_options is None qos_options_new = (qos_options if qos_options else self._qos_driver.get_qos_options(port)) # If port_object is None, we also need to get necessary params # to delete the qos rule qos_options_old = (self._qos_driver.get_qos_options(port_object) if port_object else qos_options_new) ovn_net = self._nb_idl.get_lswitch(lswitch_name) ovn_net_qos_policy = (ovn_net.external_ids [ovn_const.OVN_QOS_POLICY_EXT_ID_KEY] if ovn_const.OVN_QOS_POLICY_EXT_ID_KEY in ovn_net.external_ids else None) if qos_options_new: qos_rule_column_old = self._create_qos_rules(qos_options_old, port, lswitch_name, if_delete=True) # Delete old QoS rule first txn.add(self._nb_idl.qos_del(**qos_rule_column_old)) # Add new QoS rule qos_rule_column_new = self._create_qos_rules(qos_options_new, port, lswitch_name) txn.add(self._nb_idl.qos_add(**qos_rule_column_new)) # If we want to delete port qos_rule by using # param '--no-qos-policy' elif qos_options_old: qos_rule_column_old = self._create_qos_rules(qos_options_old, port, lswitch_name, if_delete=True) # Delete old QoS rule txn.add(self._nb_idl.qos_del(**qos_rule_column_old)) # If we want to delete network qos_rule by using # param '--no-qos-policy' elif not qos_options_old and ovn_net_qos_policy: txn.add(self._nb_idl.qos_del(lswitch_name)) if self.is_dns_required_for_port(port): self.add_txns_to_sync_port_dns_records( txn, port, original_port=port_object) elif port_object and self.is_dns_required_for_port(port_object): # We need to remove the old entries self.add_txns_to_remove_port_dns_records(txn, port_object) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS) def _create_qos_rules(self, qos_options, port, lswitch_name, if_delete=False): qos_rule = {} direction = 'from-lport' if qos_options['direction'] ==\ 'egress' else 'to-lport' qos_rule.update(switch=lswitch_name, direction=direction, priority=2002) if direction == 'from-lport': match = 'inport == ' + '"{}"'.format(port['id']) qos_rule.update(match=match) else: match = 'outport == ' + '"{}"'.format(port['id']) qos_rule.update(match=match) # QoS of bandwidth_limit if 'qos_max_rate' in qos_options: burst = qos_options.get('qos_burst') qos_rule.update(rate=qos_options['qos_max_rate'], burst=burst, dscp=None) # QoS of dscp elif 'dscp_mark' in qos_options: qos_rule.update(rate=None, burst=None, dscp=qos_options['dscp_mark']) # There is no need 'rate', 'burst' or 'dscp' for deleted method if if_delete is True: qos_rule.pop('rate') qos_rule.pop('burst') qos_rule.pop('dscp') return qos_rule def _delete_port(self, port_id, port_object=None): ovn_port = self._nb_idl.lookup('Logical_Switch_Port', port_id) network_id = ovn_port.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY) # TODO(lucasagomes): For backward compatibility, if network_id # is not in the OVNDB, look at the port_object if not network_id and port_object: network_id = port_object['network_id'] with self._nb_idl.transaction(check_error=True) as txn: txn.add(self._nb_idl.delete_lswitch_port( port_id, network_id)) if not self._nb_idl.is_port_groups_supported(): txn.add(self._nb_idl.delete_acl(network_id, port_id)) addresses = utils.sort_ips_by_version( utils.get_ovn_port_addresses(ovn_port)) sec_groups = utils.get_ovn_port_security_groups( ovn_port, skip_trusted_port=False) for sg_id in sec_groups: for ip_version, addr_list in addresses.items(): if not addr_list: continue txn.add(self._nb_idl.update_address_set( name=utils.ovn_addrset_name(sg_id, ip_version), addrs_add=None, addrs_remove=addr_list)) # Delete qos rule of port try: if (not port_object or 'qos_policy_id' not in port_object or port_object['qos_policy_id'] is None): pass else: qos_options = self._qos_driver.get_qos_options(port_object) qos_rule_column = self._create_qos_rules(qos_options, port_object, network_id, if_delete=True) txn.add(self._nb_idl.qos_del(**qos_rule_column)) except KeyError: pass if port_object and self.is_dns_required_for_port(port_object): self.add_txns_to_remove_port_dns_records(txn, port_object) # Check if the port being deleted is a virtual parent if (ovn_port.type != ovn_const.LSP_TYPE_VIRTUAL and self._is_virtual_port_supported()): ls = self._nb_idl.ls_get(network_id).execute( check_error=True) cmd = self._nb_idl.unset_lswitch_port_to_virtual_type for lsp in ls.ports: if lsp.type != ovn_const.LSP_TYPE_VIRTUAL: continue if port_id in lsp.options.get( ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ''): txn.add(cmd(lsp.name, port_id, if_exists=True)) # TODO(lucasagomes): The ``port_object`` parameter was added to # keep things backward compatible. Remove it in the Rocky release. def delete_port(self, context, port_id, port_object=None): try: self._delete_port(port_id, port_object=port_object) except idlutils.RowNotFound: pass db_rev.delete_revision(context, port_id, ovn_const.TYPE_PORTS) def _create_or_update_floatingip(self, floatingip, txn=None): router_id = floatingip.get('router_id') if not router_id: return commands = [] admin_context = n_context.get_admin_context() fip_db = self._l3_plugin._get_floatingip( admin_context, floatingip['id']) gw_lrouter_name = utils.ovn_name(router_id) # TODO(chandrav): Since the floating ip port is not # bound to any chassis, packets destined to floating ip # will be dropped. To overcome this, delete the floating # ip port. Proper fix for this would be to redirect packets # destined to floating ip to the router port. This would # require changes in ovn-northd. commands.append(self._nb_idl.delete_lswitch_port( fip_db['floating_port_id'], utils.ovn_name(floatingip['floating_network_id']))) ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: floatingip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number( floatingip, ovn_const.TYPE_FLOATINGIPS)), ovn_const.OVN_FIP_PORT_EXT_ID_KEY: floatingip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: gw_lrouter_name} columns = {'type': 'dnat_and_snat', 'logical_ip': floatingip['fixed_ip_address'], 'external_ip': floatingip['floating_ip_address']} if ovn_conf.is_ovn_distributed_floating_ip(): port = self._plugin.get_port( admin_context, fip_db['floating_port_id']) columns['logical_port'] = floatingip['port_id'] ext_ids[ovn_const.OVN_FIP_EXT_MAC_KEY] = port['mac_address'] if self._nb_idl.lsp_get_up(floatingip['port_id']).execute(): columns['external_mac'] = port['mac_address'] # TODO(dalvarez): remove this check once the minimum OVS required # version contains the column (when OVS 2.8.2 is released). if self._nb_idl.is_col_present('NAT', 'external_ids'): columns['external_ids'] = ext_ids # TODO(mjozefcz): Remove this workaround when OVN LB # will support both decentralized FIPs on LB and member. lb_member_fip = self._is_lb_member_fip(admin_context, floatingip) if (ovn_conf.is_ovn_distributed_floating_ip() and lb_member_fip): LOG.warning("Port %s is configured as a member " "of one of OVN Load_Balancers and " "Load_Balancer has FIP assigned. " "In order to make traffic work member " "FIP needs to be centralized, even if " "this environment is configured as DVR. " "Removing logical_port and external_mac from " "NAT entry.", floatingip['port_id']) columns.pop('logical_port', None) columns.pop('external_mac', None) commands.append(self._nb_idl.add_nat_rule_in_lrouter(gw_lrouter_name, **columns)) # Get the logical port (of the private network) and set the field # external_ids:fip=. This will be used by the ovn octavia driver # to add the floating ip as vip in the Load_Balancer.vips column. private_lsp = self._nb_idl.get_lswitch_port(floatingip['port_id']) if private_lsp: port_fip = { ovn_const.OVN_PORT_FIP_EXT_ID_KEY: floatingip['floating_ip_address']} commands.append( self._nb_idl.db_set('Logical_Switch_Port', private_lsp.uuid, ('external_ids', port_fip)) ) if not lb_member_fip: commands.extend( self._handle_lb_fip_cmds( admin_context, private_lsp, action=ovn_const.FIP_ACTION_ASSOCIATE)) else: LOG.warning("LSP for floatingip %s, has not been found! " "Cannot set FIP on VIP.", floatingip['id']) self._transaction(commands, txn=txn) def _is_lb_member_fip(self, context, fip): port = self._plugin.get_port( context, fip['port_id']) member_subnet = [ip['subnet_id'] for ip in port['fixed_ips'] if ip['ip_address'] == fip['fixed_ip_address']] if not member_subnet: return False member_subnet = member_subnet[0] ls = self._nb_idl.lookup( 'Logical_Switch', utils.ovn_name(port['network_id'])) for lb in ls.load_balancer: for ext_id in lb.external_ids.keys(): if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): members = lb.external_ids[ext_id] if not members: continue for member in members.split(','): if ('%s:' % fip['fixed_ip_address'] in member and '_%s' % member_subnet in member): return True return False def _handle_lb_fip_cmds(self, context, lb_lsp, action=ovn_const.FIP_ACTION_ASSOCIATE): commands = [] if not ovn_conf.is_ovn_distributed_floating_ip(): return commands lb_lsp_fip_port = lb_lsp.external_ids.get( ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '') if not lb_lsp_fip_port.startswith(ovn_const.LB_VIP_PORT_PREFIX): return commands # This is a FIP on LB VIP. # Loop over members and delete FIP external_mac/logical_port enteries. # Find all LBs with this LSP as VIP. lbs = self._nb_idl.db_find_rows( 'Load_Balancer', ('external_ids', '=', { ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: lb_lsp.name}) ).execute(check_error=True) for lb in lbs: # GET all LS where given LB is linked. ls_linked = [ item for item in self._nb_idl.db_find_rows( 'Logical_Switch').execute(check_error=True) if lb in item.load_balancer] if not ls_linked: return # Find out IP addresses and subnets of configured members. members_to_verify = [] for ext_id in lb.external_ids.keys(): if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): members = lb.external_ids[ext_id] if not members: continue for member in members.split(','): # NOTE(mjozefcz): Remove this workaround in W release. # Last argument of member info is a subnet_id from # from which member comes from. # member_`id`_`ip`:`port`_`subnet_ip` member_info = member.split('_') if len(member_info) >= 4: m = {} m['id'] = member_info[1] m['ip'] = member_info[2].split(':')[0] m['subnet_id'] = member_info[3] try: subnet = self._plugin.get_subnet( context, m['subnet_id']) m['network_id'] = subnet['network_id'] members_to_verify.append(m) except n_exc.SubnetNotFound: LOG.debug("Cannot find subnet details " "for OVN LB member " "%s.", m['id']) # Find a member LSPs from all linked LS to this LB. for member in members_to_verify: ls = self._nb_idl.lookup( 'Logical_Switch', utils.ovn_name(member['network_id'])) for lsp in ls.ports: if not lsp.addresses: continue if member['ip'] in utils.remove_macs_from_lsp_addresses( lsp.addresses): member['lsp'] = lsp nats = self._nb_idl.db_find_rows( 'NAT', ('external_ids', '=', { ovn_const.OVN_FIP_PORT_EXT_ID_KEY: lsp.name}) ).execute(check_error=True) for nat in nats: if action == ovn_const.FIP_ACTION_ASSOCIATE: # NOTE(mjozefcz): We should delete logical_port # and external_mac entries from member NAT in # order to make traffic work. LOG.warning( "Port %s is configured as a member " "of one of OVN Load_Balancers and " "Load_Balancer has FIP assigned. " "In order to make traffic work member " "FIP needs to be centralized, even if " "this environment is configured as " "DVR. Removing logical_port and " "external_mac from NAT entry.", lsp.name) commands.extend([ self._nb_idl.db_clear( 'NAT', nat.uuid, 'external_mac'), self._nb_idl.db_clear( 'NAT', nat.uuid, 'logical_port')]) else: # NOTE(mjozefcz): The FIP from LB VIP is # dissassociated now. We can decentralize # member FIPs now. LOG.warning( "Port %s is configured as a member " "of one of OVN Load_Balancers and " "Load_Balancer has FIP disassociated. " "DVR for this port can be enabled back.", lsp.name) commands.append(self._nb_idl.db_set( 'NAT', nat.uuid, ('logical_port', lsp.name))) port = self._plugin.get_port(context, lsp.name) if port['status'] == const.PORT_STATUS_ACTIVE: commands.append( self._nb_idl.db_set( 'NAT', nat.uuid, ('external_mac', port['mac_address']))) return commands def _delete_floatingip(self, fip, lrouter, txn=None): commands = [self._nb_idl.delete_nat_rule_in_lrouter( lrouter, type='dnat_and_snat', logical_ip=fip['logical_ip'], external_ip=fip['external_ip'])] try: port_id = ( fip['external_ids'].get(ovn_const.OVN_FIP_PORT_EXT_ID_KEY)) if port_id: private_lsp = self._nb_idl.get_lswitch_port(port_id) if private_lsp: commands.append( self._nb_idl.db_remove( 'Logical_Switch_Port', private_lsp.uuid, 'external_ids', (ovn_const.OVN_PORT_FIP_EXT_ID_KEY))) commands.extend( self._handle_lb_fip_cmds( n_context.get_admin_context(), private_lsp, action=ovn_const.FIP_ACTION_DISASSOCIATE)) except KeyError: LOG.debug("FIP %s doesn't have external_ids.", fip) self._transaction(commands, txn=txn) def update_floatingip_status(self, context, floatingip): # NOTE(lucasagomes): OVN doesn't care about the floating ip # status, this method just bumps the revision number check_rev_cmd = self._nb_idl.check_revision_number( floatingip['id'], floatingip, ovn_const.TYPE_FLOATINGIPS) with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision( context, floatingip, ovn_const.TYPE_FLOATINGIPS) def create_floatingip(self, context, floatingip): try: self._create_or_update_floatingip(floatingip) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Unable to create floating ip in gateway ' 'router. Error: %s', e) db_rev.bump_revision(context, floatingip, ovn_const.TYPE_FLOATINGIPS) # NOTE(lucasagomes): Revise the expected status # of floating ips, setting it to ACTIVE here doesn't # see consistent with other drivers (ODL here), see: # https://bugs.launchpad.net/networking-ovn/+bug/1657693 if floatingip.get('router_id'): self._l3_plugin.update_floatingip_status( n_context.get_admin_context(), floatingip['id'], const.FLOATINGIP_STATUS_ACTIVE) # TODO(lucasagomes): The ``fip_object`` parameter was added to # keep things backward compatible since old FIPs might not have # the OVN_FIP_EXT_ID_KEY in their external_ids field. Remove it # in the Rocky release. def update_floatingip(self, context, floatingip, fip_object=None): fip_status = None router_id = None ovn_fip = self._nb_idl.get_floatingip(floatingip['id']) if not ovn_fip and fip_object: router_id = fip_object.get('router_id') ovn_fip = self._nb_idl.get_floatingip_by_ips( router_id, fip_object['fixed_ip_address'], fip_object['floating_ip_address']) check_rev_cmd = self._nb_idl.check_revision_number( floatingip['id'], floatingip, ovn_const.TYPE_FLOATINGIPS) with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) if ovn_fip: lrouter = ovn_fip['external_ids'].get( ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY, utils.ovn_name(router_id)) self._delete_floatingip(ovn_fip, lrouter, txn=txn) fip_status = const.FLOATINGIP_STATUS_DOWN if floatingip.get('port_id'): self._create_or_update_floatingip(floatingip, txn=txn) fip_status = const.FLOATINGIP_STATUS_ACTIVE if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision( context, floatingip, ovn_const.TYPE_FLOATINGIPS) if fip_status: self._l3_plugin.update_floatingip_status( context, floatingip['id'], fip_status) # TODO(lucasagomes): The ``fip_object`` parameter was added to # keep things backward compatible since old FIPs might not have # the OVN_FIP_EXT_ID_KEY in their external_ids field. Remove it # in the Rocky release. def delete_floatingip(self, context, fip_id, fip_object=None): router_id = None ovn_fip = self._nb_idl.get_floatingip(fip_id) if not ovn_fip and fip_object: router_id = fip_object.get('router_id') ovn_fip = self._nb_idl.get_floatingip_by_ips( router_id, fip_object['fixed_ip_address'], fip_object['floating_ip_address']) if ovn_fip: lrouter = ovn_fip['external_ids'].get( ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY, utils.ovn_name(router_id)) try: self._delete_floatingip(ovn_fip, lrouter) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Unable to delete floating ip in gateway ' 'router. Error: %s', e) db_rev.delete_revision(context, fip_id, ovn_const.TYPE_FLOATINGIPS) def disassociate_floatingip(self, floatingip, router_id): lrouter = utils.ovn_name(router_id) try: self._delete_floatingip(floatingip, lrouter) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Unable to disassociate floating ip in gateway ' 'router. Error: %s', e) def _get_gw_info(self, context, router): gateways_info = [] ext_gw_info = router.get(l3.EXTERNAL_GW_INFO, {}) network_id = ext_gw_info.get('network_id', '') for ext_fixed_ip in ext_gw_info.get('external_fixed_ips', []): subnet_id = ext_fixed_ip['subnet_id'] subnet = self._plugin.get_subnet(context, subnet_id) gateways_info.append(GW_INFO( network_id, subnet_id, ext_fixed_ip['ip_address'], subnet.get('gateway_ip'), subnet['ip_version'], const.IPv4_ANY if subnet['ip_version'] == const.IP_VERSION_4 else const.IPv6_ANY)) return gateways_info def _delete_router_ext_gw(self, router, networks, txn): context = n_context.get_admin_context() if not networks: networks = [] router_id = router['id'] gw_port_id = router['gw_port_id'] gw_lrouter_name = utils.ovn_name(router_id) gateways = self._get_gw_info(context, router) for gw_info in gateways: if gw_info.ip_version == const.IP_VERSION_4: for network in networks: txn.add(self._nb_idl.delete_nat_rule_in_lrouter( gw_lrouter_name, type='snat', logical_ip=network, external_ip=gw_info.router_ip)) txn.add(self._nb_idl.delete_static_route( gw_lrouter_name, ip_prefix=gw_info.ip_prefix, nexthop=gw_info.gateway_ip)) txn.add(self._nb_idl.delete_lrouter_port( utils.ovn_lrouter_port_name(gw_port_id), gw_lrouter_name)) def _get_nets_and_ipv6_ra_confs_for_router_port( self, context, port_fixed_ips): networks = set() ipv6_ra_configs = {} ipv6_ra_configs_supported = self._nb_idl.is_col_present( 'Logical_Router_Port', 'ipv6_ra_configs') for fixed_ip in port_fixed_ips: subnet_id = fixed_ip['subnet_id'] subnet = self._plugin.get_subnet(context, subnet_id) cidr = netaddr.IPNetwork(subnet['cidr']) networks.add("%s/%s" % (fixed_ip['ip_address'], str(cidr.prefixlen))) if subnet.get('ipv6_address_mode') and not ipv6_ra_configs and ( ipv6_ra_configs_supported): ipv6_ra_configs['address_mode'] = ( utils.get_ovn_ipv6_address_mode( subnet['ipv6_address_mode'])) ipv6_ra_configs['send_periodic'] = 'true' net = self._plugin.get_network(context, subnet['network_id']) ipv6_ra_configs['mtu'] = str(net['mtu']) return list(networks), ipv6_ra_configs def _add_router_ext_gw(self, router, networks, txn): context = n_context.get_admin_context() router_id = router['id'] # 1. Add the external gateway router port. gateways = self._get_gw_info(context, router) gw_port_id = router['gw_port_id'] port = self._plugin.get_port(context, gw_port_id) self._create_lrouter_port(context, router_id, port, txn=txn) def _build_extids(gw_info): # TODO(lucasagomes): Remove this check after OVS 2.8.2 is tagged # (prior to that, the external_ids column didn't exist in this # table). columns = {} if self._nb_idl.is_col_present('Logical_Router_Static_Route', 'external_ids'): columns['external_ids'] = { ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: gw_info.subnet_id} return columns # 2. Add default route with nexthop as gateway ip lrouter_name = utils.ovn_name(router_id) for gw_info in gateways: columns = _build_extids(gw_info) txn.add(self._nb_idl.add_static_route( lrouter_name, ip_prefix=gw_info.ip_prefix, nexthop=gw_info.gateway_ip, **columns)) # 3. Add snat rules for tenant networks in lrouter if snat is enabled if utils.is_snat_enabled(router) and networks: self.update_nat_rules(router, networks, enable_snat=True, txn=txn) return port def _check_external_ips_changed(self, ovn_snats, ovn_static_routes, router): context = n_context.get_admin_context() gateways = self._get_gw_info(context, router) ovn_gw_subnets = None if self._nb_idl.is_col_present('Logical_Router_Static_Route', 'external_ids'): ovn_gw_subnets = [ getattr(route, 'external_ids', {}).get( ovn_const.OVN_SUBNET_EXT_ID_KEY) for route in ovn_static_routes] for gw_info in gateways: if ovn_gw_subnets and gw_info.subnet_id not in ovn_gw_subnets: return True if gw_info.ip_version == 6: continue for snat in ovn_snats: if snat.external_ip != gw_info.router_ip: return True return False def update_router_routes(self, context, router_id, add, remove, txn=None): if not any([add, remove]): return lrouter_name = utils.ovn_name(router_id) commands = [] for route in add: commands.append( self._nb_idl.add_static_route( lrouter_name, ip_prefix=route['destination'], nexthop=route['nexthop'])) for route in remove: commands.append( self._nb_idl.delete_static_route( lrouter_name, ip_prefix=route['destination'], nexthop=route['nexthop'])) self._transaction(commands, txn=txn) def _get_router_ports(self, context, router_id, get_gw_port=False): router_db = self._l3_plugin._get_router(context, router_id) if get_gw_port: return [p.port for p in router_db.attached_ports] else: # When the existing deployment is migrated to OVN # we may need to consider other port types - DVR_INTERFACE/HA_INTF. return [p.port for p in router_db.attached_ports if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF, const.DEVICE_OWNER_DVR_INTERFACE, const.DEVICE_OWNER_HA_REPLICATED_INT, const.DEVICE_OWNER_ROUTER_HA_INTF]] def _get_v4_network_for_router_port(self, context, port): cidr = None for fixed_ip in port['fixed_ips']: subnet_id = fixed_ip['subnet_id'] subnet = self._plugin.get_subnet(context, subnet_id) if subnet['ip_version'] != 4: continue cidr = subnet['cidr'] return cidr def _get_v4_network_of_all_router_ports(self, context, router_id, ports=None): networks = [] ports = ports or self._get_router_ports(context, router_id) for port in ports: network = self._get_v4_network_for_router_port(context, port) if network: networks.append(network) return networks def _gen_router_ext_ids(self, router): return { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: router.get('name', 'no_router_name'), ovn_const.OVN_GW_PORT_EXT_ID_KEY: router.get('gw_port_id') or '', ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number( router, ovn_const.TYPE_ROUTERS))} def create_router(self, context, router, add_external_gateway=True): """Create a logical router.""" external_ids = self._gen_router_ext_ids(router) enabled = router.get('admin_state_up') lrouter_name = utils.ovn_name(router['id']) added_gw_port = None with self._nb_idl.transaction(check_error=True) as txn: txn.add(self._nb_idl.create_lrouter(lrouter_name, external_ids=external_ids, enabled=enabled, options={})) # TODO(lucasagomes): add_external_gateway is being only used # by the ovn_db_sync.py script, remove it after the database # synchronization work if add_external_gateway: networks = self._get_v4_network_of_all_router_ports( context, router['id']) if router.get(l3.EXTERNAL_GW_INFO) and networks is not None: added_gw_port = self._add_router_ext_gw( router, networks, txn) if added_gw_port: db_rev.bump_revision(context, added_gw_port, ovn_const.TYPE_ROUTER_PORTS) db_rev.bump_revision(context, router, ovn_const.TYPE_ROUTERS) # TODO(lucasagomes): The ``router_object`` parameter was added to # keep things backward compatible with old routers created prior to # the database sync work. Remove it in the Rocky release. def update_router(self, context, new_router, router_object=None): """Update a logical router.""" router_id = new_router['id'] router_name = utils.ovn_name(router_id) ovn_router = self._nb_idl.get_lrouter(router_name) gateway_new = new_router.get(l3.EXTERNAL_GW_INFO) gateway_old = utils.get_lrouter_ext_gw_static_route(ovn_router) added_gw_port = None deleted_gw_port_id = None if router_object: gateway_old = gateway_old or router_object.get(l3.EXTERNAL_GW_INFO) ovn_snats = utils.get_lrouter_snats(ovn_router) networks = self._get_v4_network_of_all_router_ports(context, router_id) try: check_rev_cmd = self._nb_idl.check_revision_number( router_name, new_router, ovn_const.TYPE_ROUTERS) with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) if gateway_new and not gateway_old: # Route gateway is set added_gw_port = self._add_router_ext_gw( new_router, networks, txn) elif gateway_old and not gateway_new: # router gateway is removed txn.add(self._nb_idl.delete_lrouter_ext_gw(router_name)) if router_object: self._delete_router_ext_gw( router_object, networks, txn) deleted_gw_port_id = router_object['gw_port_id'] elif gateway_new and gateway_old: # Check if external gateway has changed, if yes, delete # the old gateway and add the new gateway if self._check_external_ips_changed( ovn_snats, gateway_old, new_router): txn.add(self._nb_idl.delete_lrouter_ext_gw( router_name)) if router_object: self._delete_router_ext_gw( router_object, networks, txn) deleted_gw_port_id = router_object['gw_port_id'] added_gw_port = self._add_router_ext_gw( new_router, networks, txn) else: # Check if snat has been enabled/disabled and update new_snat_state = gateway_new.get('enable_snat', True) if bool(ovn_snats) != new_snat_state: if utils.is_snat_enabled(new_router) and networks: self.update_nat_rules( new_router, networks, enable_snat=new_snat_state, txn=txn) update = {'external_ids': self._gen_router_ext_ids(new_router)} update['enabled'] = new_router.get('admin_state_up') or False txn.add(self._nb_idl.update_lrouter(router_name, **update)) # Check for route updates routes = new_router.get('routes', []) old_routes = utils.get_lrouter_non_gw_routes(ovn_router) added, removed = helpers.diff_list_of_dict( old_routes, routes) self.update_router_routes( context, router_id, added, removed, txn=txn) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision(context, new_router, ovn_const.TYPE_ROUTERS) if added_gw_port: db_rev.bump_revision(context, added_gw_port, ovn_const.TYPE_ROUTER_PORTS) if deleted_gw_port_id: db_rev.delete_revision(context, deleted_gw_port_id, ovn_const.TYPE_ROUTER_PORTS) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Unable to update router %(router)s. ' 'Error: %(error)s', {'router': router_id, 'error': e}) def delete_router(self, context, router_id): """Delete a logical router.""" lrouter_name = utils.ovn_name(router_id) with self._nb_idl.transaction(check_error=True) as txn: txn.add(self._nb_idl.delete_lrouter(lrouter_name)) db_rev.delete_revision(context, router_id, ovn_const.TYPE_ROUTERS) def get_candidates_for_scheduling(self, physnet, cms=None, chassis_physnets=None): """Return chassis for scheduling gateway router. Criteria for selecting chassis as candidates 1) chassis from cms with proper bridge mappings 2) if no chassis is available from 1) then, select chassis with proper bridge mappings """ # TODO(lucasagomes): Simplify the logic here, the CMS option has # been introduced long ago and by now all gateway chassis should # include it. This will match the logic in the is_gateway_chassis() # (utils.py) cms = cms or self._sb_idl.get_gateway_chassis_from_cms_options() chassis_physnets = (chassis_physnets or self._sb_idl.get_chassis_and_physnets()) cms_bmaps = [] bmaps = [] for chassis, physnets in chassis_physnets.items(): if physnet and physnet in physnets: if chassis in cms: cms_bmaps.append(chassis) else: bmaps.append(chassis) candidates = cms_bmaps or bmaps if not cms_bmaps: LOG.debug("No eligible chassis with external connectivity" " through ovn-cms-options for %s", physnet) LOG.debug("Chassis candidates with external connectivity: %s", candidates) return candidates def _get_physnet(self, network): if network.get(pnet.NETWORK_TYPE) in [const.TYPE_FLAT, const.TYPE_VLAN]: return network.get(pnet.PHYSICAL_NETWORK) def _gen_router_port_ext_ids(self, port): ext_ids = { ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number( port, ovn_const.TYPE_ROUTER_PORTS)), ovn_const.OVN_SUBNET_EXT_IDS_KEY: ' '.join(utils.get_port_subnet_ids(port)), ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(port['network_id'])} router_id = port.get('device_id') if router_id: ext_ids[ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY] = router_id return ext_ids def _gen_router_port_options(self, port, network=None): options = {} if network is None: network = self._plugin.get_network(n_context.get_admin_context(), port['network_id']) # For VLAN type networks we need to set the # "reside-on-redirect-chassis" option so the routing for this # logical router port is centralized in the chassis hosting the # distributed gateway port. # https://github.com/openvswitch/ovs/commit/85706c34d53d4810f54bec1de662392a3c06a996 if network.get(pnet.NETWORK_TYPE) == const.TYPE_VLAN: options['reside-on-redirect-chassis'] = 'true' is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get( 'device_owner') if is_gw_port and ovn_conf.is_ovn_emit_need_to_frag_enabled(): options[ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION] = str( network['mtu']) return options def _create_lrouter_port(self, context, router_id, port, txn=None): """Create a logical router port.""" lrouter = utils.ovn_name(router_id) networks, ipv6_ra_configs = ( self._get_nets_and_ipv6_ra_confs_for_router_port( context, port['fixed_ips'])) lrouter_port_name = utils.ovn_lrouter_port_name(port['id']) is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get( 'device_owner') columns = {} columns['options'] = self._gen_router_port_options(port) if is_gw_port: port_net = self._plugin.get_network(n_context.get_admin_context(), port['network_id']) physnet = self._get_physnet(port_net) candidates = self.get_candidates_for_scheduling(physnet) selected_chassis = self._ovn_scheduler.select( self._nb_idl, self._sb_idl, lrouter_port_name, candidates=candidates) if selected_chassis: columns['gateway_chassis'] = selected_chassis lsp_address = ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER if ipv6_ra_configs: columns['ipv6_ra_configs'] = ipv6_ra_configs commands = [ self._nb_idl.add_lrouter_port( name=lrouter_port_name, lrouter=lrouter, mac=port['mac_address'], networks=networks, may_exist=True, external_ids=self._gen_router_port_ext_ids(port), **columns), self._nb_idl.set_lrouter_port_in_lswitch_port( port['id'], lrouter_port_name, is_gw_port=is_gw_port, lsp_address=lsp_address)] self._transaction(commands, txn=txn) def create_router_port(self, context, router_id, router_interface): port = self._plugin.get_port(context, router_interface['port_id']) with self._nb_idl.transaction(check_error=True) as txn: multi_prefix = False if (len(router_interface.get('subnet_ids', [])) == 1 and len(port['fixed_ips']) > 1): # NOTE(lizk) It's adding a subnet onto an already # existing router interface port, try to update lrouter port # 'networks' column. self._update_lrouter_port(context, port, txn=txn) multi_prefix = True else: self._create_lrouter_port(context, router_id, port, txn=txn) router = self._l3_plugin.get_router(context, router_id) if router.get(l3.EXTERNAL_GW_INFO): cidr = None for fixed_ip in port['fixed_ips']: subnet = self._plugin.get_subnet(context, fixed_ip['subnet_id']) if multi_prefix: if 'subnet_id' in router_interface: if subnet['id'] != router_interface['subnet_id']: continue if subnet['ip_version'] == 4: cidr = subnet['cidr'] if utils.is_snat_enabled(router) and cidr: self.update_nat_rules(router, networks=[cidr], enable_snat=True, txn=txn) db_rev.bump_revision(context, port, ovn_const.TYPE_ROUTER_PORTS) def _update_lrouter_port(self, context, port, if_exists=False, txn=None): """Update a logical router port.""" networks, ipv6_ra_configs = ( self._get_nets_and_ipv6_ra_confs_for_router_port( context, port['fixed_ips'])) lsp_address = ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER lrp_name = utils.ovn_lrouter_port_name(port['id']) update = {'networks': networks, 'ipv6_ra_configs': ipv6_ra_configs} is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get( 'device_owner') commands = [ self._nb_idl.update_lrouter_port( name=lrp_name, external_ids=self._gen_router_port_ext_ids(port), options=self._gen_router_port_options(port), if_exists=if_exists, **update), self._nb_idl.set_lrouter_port_in_lswitch_port( port['id'], lrp_name, is_gw_port=is_gw_port, lsp_address=lsp_address)] self._transaction(commands, txn=txn) def update_router_port(self, context, port, if_exists=False): lrp_name = utils.ovn_lrouter_port_name(port['id']) check_rev_cmd = self._nb_idl.check_revision_number( lrp_name, port, ovn_const.TYPE_ROUTER_PORTS) with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) self._update_lrouter_port(context, port, if_exists=if_exists, txn=txn) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision( context, port, ovn_const.TYPE_ROUTER_PORTS) def _delete_lrouter_port(self, context, port_id, router_id=None, txn=None): """Delete a logical router port.""" commands = [self._nb_idl.lrp_del( utils.ovn_lrouter_port_name(port_id), utils.ovn_name(router_id) if router_id else None, if_exists=True)] self._transaction(commands, txn=txn) db_rev.delete_revision(context, port_id, ovn_const.TYPE_ROUTER_PORTS) def delete_router_port(self, context, port_id, router_id=None, subnet_ids=None): try: ovn_port = self._nb_idl.lookup( 'Logical_Router_Port', utils.ovn_lrouter_port_name(port_id)) except idlutils.RowNotFound: return subnet_ids = subnet_ids or [] port_removed = False with self._nb_idl.transaction(check_error=True) as txn: port = None try: port = self._plugin.get_port(context, port_id) # The router interface port still exists, call ovn to # update it self._update_lrouter_port(context, port, txn=txn) except n_exc.PortNotFound: # The router interface port doesn't exist any more, # we will call ovn to delete it once we remove the snat # rules in the router itself if we have to port_removed = True router_id = router_id or ovn_port.external_ids.get( ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY) if not router_id: router_id = port.get('device_id') router = None if router_id: router = self._l3_plugin.get_router(context, router_id) if not router.get(l3.EXTERNAL_GW_INFO): if port_removed: self._delete_lrouter_port(context, port_id, router_id, txn=txn) return if not subnet_ids: subnet_ids = ovn_port.external_ids.get( ovn_const.OVN_SUBNET_EXT_IDS_KEY, []) subnet_ids = subnet_ids.split() elif port: subnet_ids = utils.get_port_subnet_ids(port) cidr = None for sid in subnet_ids: subnet = self._plugin.get_subnet(context, sid) if subnet['ip_version'] == 4: cidr = subnet['cidr'] break if router and utils.is_snat_enabled(router) and cidr: self.update_nat_rules( router, networks=[cidr], enable_snat=False, txn=txn) # NOTE(mangelajo): If the port doesn't exist anymore, we # delete the router port as the last operation and update the # revision database to ensure consistency if port_removed: self._delete_lrouter_port(context, port_id, router_id, txn=txn) else: # otherwise, we just update the revision database db_rev.bump_revision( context, port, ovn_const.TYPE_ROUTER_PORTS) def update_nat_rules(self, router, networks, enable_snat, txn=None): """Update the NAT rules in a logical router.""" context = n_context.get_admin_context() func = (self._nb_idl.add_nat_rule_in_lrouter if enable_snat else self._nb_idl.delete_nat_rule_in_lrouter) gw_lrouter_name = utils.ovn_name(router['id']) gateways = self._get_gw_info(context, router) # Update NAT rules only for IPv4 subnets commands = [func(gw_lrouter_name, type='snat', logical_ip=network, external_ip=gw_info.router_ip) for gw_info in gateways if gw_info.ip_version != const.IP_VERSION_6 for network in networks] self._transaction(commands, txn=txn) def _create_provnet_port(self, txn, network, physnet, tag): txn.add(self._nb_idl.create_lswitch_port( lport_name=utils.ovn_provnet_port_name(network['id']), lswitch_name=utils.ovn_name(network['id']), addresses=[ovn_const.UNKNOWN_ADDR], external_ids={}, type=ovn_const.LSP_TYPE_LOCALNET, tag=tag if tag else [], options={'network_name': physnet})) def _gen_network_parameters(self, network): params = {'external_ids': { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: network['name'], ovn_const.OVN_NETWORK_MTU_EXT_ID_KEY: str(network['mtu']), ovn_const.OVN_REV_NUM_EXT_ID_KEY: str( utils.get_revision_number(network, ovn_const.TYPE_NETWORKS))}} # NOTE(lucasagomes): There's a difference between the # "qos_policy_id" key existing and it being None, the latter is a # valid value. Since we can't save None in OVSDB, we are converting # it to "null" as a placeholder. if 'qos_policy_id' in network: params['external_ids'][ovn_const.OVN_QOS_POLICY_EXT_ID_KEY] = ( network['qos_policy_id'] or 'null') # Enable IGMP snooping if igmp_snooping_enable is enabled in Neutron value = 'true' if ovn_conf.is_igmp_snooping_enabled() else 'false' params['other_config'] = {ovn_const.MCAST_SNOOP: value, ovn_const.MCAST_FLOOD_UNREGISTERED: value} return params def create_network(self, context, network): # Create a logical switch with a name equal to the Neutron network # UUID. This provides an easy way to refer to the logical switch # without having to track what UUID OVN assigned to it. lswitch_params = self._gen_network_parameters(network) lswitch_name = utils.ovn_name(network['id']) with self._nb_idl.transaction(check_error=True) as txn: txn.add(self._nb_idl.ls_add(lswitch_name, **lswitch_params, may_exist=True)) physnet = network.get(pnet.PHYSICAL_NETWORK) if physnet: self._create_provnet_port(txn, network, physnet, network.get(pnet.SEGMENTATION_ID)) db_rev.bump_revision(context, network, ovn_const.TYPE_NETWORKS) self.create_metadata_port(context, network) return network def delete_network(self, context, network_id): with self._nb_idl.transaction(check_error=True) as txn: ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record( utils.ovn_name(network_id)) txn.add(self._nb_idl.ls_del(utils.ovn_name(network_id), if_exists=True)) if ls_dns_record: txn.add(self._nb_idl.dns_del(ls_dns_record.uuid)) db_rev.delete_revision( context, network_id, ovn_const.TYPE_NETWORKS) def _is_qos_update_required(self, network): # Is qos service enabled if 'qos_policy_id' not in network: return False # Check if qos service wasn't enabled before ovn_net = self._nb_idl.get_lswitch(utils.ovn_name(network['id'])) if ovn_const.OVN_QOS_POLICY_EXT_ID_KEY not in ovn_net.external_ids: return True # Check if the policy_id has changed new_qos_id = network['qos_policy_id'] or 'null' return new_qos_id != ovn_net.external_ids[ ovn_const.OVN_QOS_POLICY_EXT_ID_KEY] def set_gateway_mtu(self, context, prov_net, txn=None): ports = self._plugin.get_ports( context, filters=dict(network_id=[prov_net['id']], device_owner=[const.DEVICE_OWNER_ROUTER_GW])) commands = [] for port in ports: lrp_name = utils.ovn_lrouter_port_name(port['id']) # TODO(lucasagomes): Use lrp_set_options() once # https://review.opendev.org/671765 is merged and a new version # of ovsdbapp is released options = self._gen_router_port_options(port, prov_net) commands.append(self._nb_idl.update_lrouter_port( name=lrp_name, if_exists=True, options=options)) self._transaction(commands, txn=txn) def update_network(self, context, network): lswitch_name = utils.ovn_name(network['id']) # Check if QoS needs to be update, before updating OVNDB qos_update_required = self._is_qos_update_required(network) check_rev_cmd = self._nb_idl.check_revision_number( lswitch_name, network, ovn_const.TYPE_NETWORKS) # TODO(numans) - When a network's dns domain name is updated, we need # to update the DNS records for this network in DNS OVN NB DB table. # (https://bugs.launchpad.net/networking-ovn/+bug/1777978) # Eg. if the network n1's dns domain name was "test1" and if it has # 2 bound ports - p1 and p2, we would have created the below dns # records # =========================== # p1 = P1_IP # p1.test1 = P1_IP # p1.default_domain = P1_IP # p2 = P2_IP # p2.test1 = P2_IP # p2.default_domain = P2_IP # =========================== # if the network n1's dns domain name is updated to test2, then we need # to delete the below DNS records # =========================== # p1.test1 = P1_IP # p2.test1 = P2_IP # =========================== # and add the new ones # =========================== # p1.test2 = P1_IP # p2.test2 = P2_IP # =========================== # in the DNS row for this network. with self._nb_idl.transaction(check_error=True) as txn: txn.add(check_rev_cmd) lswitch_params = self._gen_network_parameters(network) lswitch = self._nb_idl.get_lswitch(lswitch_name) txn.add(self._nb_idl.db_set( 'Logical_Switch', lswitch_name, *lswitch_params.items())) # Check if previous mtu is different than current one, # checking will help reduce number of operations if (not lswitch or lswitch.external_ids.get( ovn_const.OVN_NETWORK_MTU_EXT_ID_KEY) != str(network['mtu'])): subnets = self._plugin.get_subnets_by_network( context, network['id']) for subnet in subnets: self.update_subnet(context, subnet, network, txn) if utils.is_provider_network(network): # make sure to use admin context as this is a providernet self.set_gateway_mtu(n_context.get_admin_context(), network, txn) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: if qos_update_required: self._qos_driver.update_network(network) db_rev.bump_revision(context, network, ovn_const.TYPE_NETWORKS) def _add_subnet_dhcp_options(self, subnet, network, ovn_dhcp_options=None): if utils.is_dhcp_options_ignored(subnet): return if not ovn_dhcp_options: ovn_dhcp_options = self._get_ovn_dhcp_options(subnet, network) with self._nb_idl.transaction(check_error=True) as txn: rev_num = {ovn_const.OVN_REV_NUM_EXT_ID_KEY: str( utils.get_revision_number(subnet, ovn_const.TYPE_SUBNETS))} ovn_dhcp_options['external_ids'].update(rev_num) txn.add(self._nb_idl.add_dhcp_options(subnet['id'], **ovn_dhcp_options)) def _get_ovn_dhcp_options(self, subnet, network, server_mac=None): external_ids = { 'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number( subnet, ovn_const.TYPE_SUBNETS))} dhcp_options = {'cidr': subnet['cidr'], 'options': {}, 'external_ids': external_ids} if subnet['enable_dhcp']: if subnet['ip_version'] == const.IP_VERSION_4: dhcp_options['options'] = self._get_ovn_dhcpv4_opts( subnet, network, server_mac=server_mac) else: dhcp_options['options'] = self._get_ovn_dhcpv6_opts( subnet, server_id=server_mac) return dhcp_options def _process_global_dhcp_opts(self, options, ip_version): if ip_version == 4: global_options = ovn_conf.get_global_dhcpv4_opts() else: global_options = ovn_conf.get_global_dhcpv6_opts() for option, value in global_options.items(): if option in ovn_const.GLOBAL_DHCP_OPTS_BLACKLIST[ip_version]: # This option is not allowed to be set with a global setting LOG.debug('DHCP option %s is not permitted to be set in ' 'global options. This option will be ignored.', option) continue # If the value is null (i.e. config ntp_server:), treat it as # a request to remove the option if value: options[option] = value else: try: del(options[option]) except KeyError: # Option not present, job done pass def _get_ovn_dhcpv4_opts(self, subnet, network, server_mac=None): metadata_port_ip = self._find_metadata_port_ip( n_context.get_admin_context(), subnet) # TODO(dongj): Currently the metadata port is created only when # ovn_metadata_enabled is true, therefore this is a restriction for # supporting DHCP of subnet without gateway IP. # We will remove this restriction later. service_id = subnet['gateway_ip'] or metadata_port_ip if not service_id: return {} default_lease_time = str(ovn_conf.get_ovn_dhcp_default_lease_time()) mtu = network['mtu'] options = { 'server_id': service_id, 'lease_time': default_lease_time, 'mtu': str(mtu), } if cfg.CONF.dns_domain and cfg.CONF.dns_domain != 'openstacklocal': # NOTE(mjozefcz): String field should be with quotes, # otherwise ovn will try to resolve it as variable. options['domain_name'] = '"%s"' % cfg.CONF.dns_domain if subnet['gateway_ip']: options['router'] = subnet['gateway_ip'] if server_mac: options['server_mac'] = server_mac else: options['server_mac'] = n_net.get_random_mac( cfg.CONF.base_mac.split(':')) dns_servers = (subnet.get('dns_nameservers') or ovn_conf.get_dns_servers() or utils.get_system_dns_resolvers()) if dns_servers: options['dns_server'] = '{%s}' % ', '.join(dns_servers) else: LOG.warning("No relevant dns_servers defined for subnet %s. Check " "the /etc/resolv.conf file", subnet['id']) routes = [] if metadata_port_ip: routes.append('%s/32,%s' % ( ovn_const.METADATA_DEFAULT_IP, metadata_port_ip)) # Add subnet host_routes to 'classless_static_route' dhcp option routes.extend(['%s,%s' % (route['destination'], route['nexthop']) for route in subnet['host_routes']]) if routes: # if there are static routes, then we need to add the # default route in this option. As per RFC 3442 dhcp clients # should ignore 'router' dhcp option (option 3) # if option 121 is present. if subnet['gateway_ip']: routes.append('0.0.0.0/0,%s' % subnet['gateway_ip']) options['classless_static_route'] = '{' + ', '.join(routes) + '}' self._process_global_dhcp_opts(options, ip_version=4) return options def _get_ovn_dhcpv6_opts(self, subnet, server_id=None): """Returns the DHCPv6 options""" dhcpv6_opts = { 'server_id': server_id or n_net.get_random_mac( cfg.CONF.base_mac.split(':')) } if subnet['dns_nameservers']: dns_servers = '{%s}' % ', '.join(subnet['dns_nameservers']) dhcpv6_opts['dns_server'] = dns_servers if subnet.get('ipv6_address_mode') == const.DHCPV6_STATELESS: dhcpv6_opts[ovn_const.DHCPV6_STATELESS_OPT] = 'true' self._process_global_dhcp_opts(dhcpv6_opts, ip_version=6) return dhcpv6_opts def _remove_subnet_dhcp_options(self, subnet_id, txn): dhcp_options = self._nb_idl.get_subnet_dhcp_options( subnet_id, with_ports=True) if dhcp_options['subnet'] is not None: txn.add(self._nb_idl.delete_dhcp_options( dhcp_options['subnet']['uuid'])) # Remove subnet and port DHCP_Options rows, the DHCP options in # lsp rows will be removed by related UUID for opt in dhcp_options['ports']: txn.add(self._nb_idl.delete_dhcp_options(opt['uuid'])) def _enable_subnet_dhcp_options(self, subnet, network, txn): if utils.is_dhcp_options_ignored(subnet): return filters = {'fixed_ips': {'subnet_id': [subnet['id']]}} all_ports = self._plugin.get_ports(n_context.get_admin_context(), filters=filters) ports = [p for p in all_ports if not utils.is_network_device_port(p)] dhcp_options = self._get_ovn_dhcp_options(subnet, network) subnet_dhcp_cmd = self._nb_idl.add_dhcp_options(subnet['id'], **dhcp_options) subnet_dhcp_option = txn.add(subnet_dhcp_cmd) # Traverse ports to add port DHCP_Options rows for port in ports: lsp_dhcp_disabled, lsp_dhcp_opts = utils.get_lsp_dhcp_opts( port, subnet['ip_version']) if lsp_dhcp_disabled: continue elif not lsp_dhcp_opts: lsp_dhcp_options = subnet_dhcp_option else: port_dhcp_options = copy.deepcopy(dhcp_options) port_dhcp_options['options'].update(lsp_dhcp_opts) port_dhcp_options['external_ids'].update( {'port_id': port['id']}) lsp_dhcp_options = txn.add(self._nb_idl.add_dhcp_options( subnet['id'], port_id=port['id'], **port_dhcp_options)) columns = ({'dhcpv6_options': lsp_dhcp_options} if subnet['ip_version'] == const.IP_VERSION_6 else { 'dhcpv4_options': lsp_dhcp_options}) # Set lsp DHCP options txn.add(self._nb_idl.set_lswitch_port( lport_name=port['id'], **columns)) def _update_subnet_dhcp_options(self, subnet, network, txn): if utils.is_dhcp_options_ignored(subnet): return original_options = self._nb_idl.get_subnet_dhcp_options( subnet['id'])['subnet'] mac = None if original_options: if subnet['ip_version'] == const.IP_VERSION_6: mac = original_options['options'].get('server_id') else: mac = original_options['options'].get('server_mac') new_options = self._get_ovn_dhcp_options(subnet, network, mac) # Check whether DHCP changed if (original_options and original_options['cidr'] == new_options['cidr'] and original_options['options'] == new_options['options']): return txn.add(self._nb_idl.add_dhcp_options(subnet['id'], **new_options)) dhcp_options = self._nb_idl.get_subnet_dhcp_options( subnet['id'], with_ports=True) # When a subnet dns_nameserver is updated, then we should update # the port dhcp options for ports (with no port specific dns_server # defined). if 'options' in new_options and 'options' in original_options: orig_dns_server = original_options['options'].get('dns_server') new_dns_server = new_options['options'].get('dns_server') dns_server_changed = (orig_dns_server != new_dns_server) else: dns_server_changed = False for opt in dhcp_options['ports']: if not new_options.get('options'): continue options = dict(new_options['options']) p_dns_server = opt['options'].get('dns_server') if dns_server_changed and (orig_dns_server == p_dns_server): # If port has its own dns_server option defined, then # orig_dns_server and p_dns_server will not match. opt['options']['dns_server'] = new_dns_server options.update(opt['options']) port_id = opt['external_ids']['port_id'] txn.add(self._nb_idl.add_dhcp_options( subnet['id'], port_id=port_id, options=options)) def create_subnet(self, context, subnet, network): if subnet['enable_dhcp']: if subnet['ip_version'] == 4: self.update_metadata_port(context, network['id']) self._add_subnet_dhcp_options(subnet, network) db_rev.bump_revision(context, subnet, ovn_const.TYPE_SUBNETS) def _modify_subnet_dhcp_options(self, subnet, ovn_subnet, network, txn): if subnet['enable_dhcp'] and not ovn_subnet: self._enable_subnet_dhcp_options(subnet, network, txn) elif subnet['enable_dhcp'] and ovn_subnet: self._update_subnet_dhcp_options(subnet, network, txn) elif not subnet['enable_dhcp'] and ovn_subnet: self._remove_subnet_dhcp_options(subnet['id'], txn) def update_subnet(self, context, subnet, network, txn=None): ovn_subnet = self._nb_idl.get_subnet_dhcp_options( subnet['id'])['subnet'] if subnet['enable_dhcp'] or ovn_subnet: self.update_metadata_port(context, network['id']) check_rev_cmd = self._nb_idl.check_revision_number( subnet['id'], subnet, ovn_const.TYPE_SUBNETS) if not txn: with self._nb_idl.transaction(check_error=True) as txn_n: txn_n.add(check_rev_cmd) self._modify_subnet_dhcp_options(subnet, ovn_subnet, network, txn_n) else: self._modify_subnet_dhcp_options(subnet, ovn_subnet, network, txn) if check_rev_cmd.result == ovn_const.TXN_COMMITTED: db_rev.bump_revision(context, subnet, ovn_const.TYPE_SUBNETS) def delete_subnet(self, context, subnet_id): with self._nb_idl.transaction(check_error=True) as txn: self._remove_subnet_dhcp_options(subnet_id, txn) db_rev.delete_revision( context, subnet_id, ovn_const.TYPE_SUBNETS) def create_security_group(self, context, security_group): # If the OVN schema supports Port Groups, we'll model security groups # as such. Otherwise, for backwards compatibility, we'll keep creating # two Address Sets for each Neutron SG (one for IPv4 and one for # IPv6). with self._nb_idl.transaction(check_error=True) as txn: ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: security_group['id']} if self._nb_idl.is_port_groups_supported(): name = utils.ovn_port_group_name(security_group['id']) txn.add(self._nb_idl.pg_add( name=name, acls=[], external_ids=ext_ids)) # When a SG is created, it comes with some default rules, # so we'll apply them to the Port Group. ovn_acl.add_acls_for_sg_port_group(self._nb_idl, security_group, txn) else: for ip_version in ('ip4', 'ip6'): name = utils.ovn_addrset_name(security_group['id'], ip_version) txn.add(self._nb_idl.create_address_set( name=name, external_ids=ext_ids)) db_rev.bump_revision( context, security_group, ovn_const.TYPE_SECURITY_GROUPS) def create_default_drop_port_group(self, ports=None): pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME with self._nb_idl.transaction(check_error=True) as txn: if not self._nb_idl.get_port_group(pg_name): # If drop Port Group doesn't exist yet, create it. txn.add(self._nb_idl.pg_add(pg_name, acls=[], may_exist=True)) # Add ACLs to this Port Group so that all traffic is dropped. acls = ovn_acl.add_acls_for_drop_port_group(pg_name) for acl in acls: txn.add(self._nb_idl.pg_acl_add(may_exist=True, **acl)) if ports: ports_ids = [port['id'] for port in ports] # Add the ports to the default Port Group txn.add(self._nb_idl.pg_add_ports(pg_name, ports_ids)) def _add_port_to_drop_port_group(self, port, txn): self.create_default_drop_port_group() txn.add(self._nb_idl.pg_add_ports(ovn_const.OVN_DROP_PORT_GROUP_NAME, port)) def _del_port_from_drop_port_group(self, port, txn): pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME if self._nb_idl.get_port_group(pg_name): txn.add(self._nb_idl.pg_del_ports(pg_name, port)) def delete_security_group(self, context, security_group_id): with self._nb_idl.transaction(check_error=True) as txn: if self._nb_idl.is_port_groups_supported(): name = utils.ovn_port_group_name(security_group_id) txn.add(self._nb_idl.pg_del(name=name)) else: for ip_version in ('ip4', 'ip6'): name = utils.ovn_addrset_name(security_group_id, ip_version) txn.add(self._nb_idl.delete_address_set(name=name)) db_rev.delete_revision(context, security_group_id, ovn_const.TYPE_SECURITY_GROUPS) def _process_security_group_rule(self, rule, is_add_acl=True): admin_context = n_context.get_admin_context() ovn_acl.update_acls_for_security_group( self._plugin, admin_context, self._nb_idl, rule['security_group_id'], rule, is_add_acl=is_add_acl) def create_security_group_rule(self, context, rule): self._process_security_group_rule(rule) db_rev.bump_revision( context, rule, ovn_const.TYPE_SECURITY_GROUP_RULES) def delete_security_group_rule(self, context, rule): self._process_security_group_rule(rule, is_add_acl=False) db_rev.delete_revision( context, rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES) def _find_metadata_port(self, context, network_id): if not ovn_conf.is_ovn_metadata_enabled(): return ports = self._plugin.get_ports(context, filters=dict( network_id=[network_id], device_owner=[const.DEVICE_OWNER_DHCP])) # Metadata ports are DHCP ports not belonging to the Neutron # DHCP agents for port in ports: if not utils.is_neutron_dhcp_agent_port(port): return port def _find_metadata_port_ip(self, context, subnet): metadata_port = self._find_metadata_port(context, subnet['network_id']) if metadata_port: for fixed_ip in metadata_port['fixed_ips']: if fixed_ip['subnet_id'] == subnet['id']: return fixed_ip['ip_address'] def create_metadata_port(self, context, network): if ovn_conf.is_ovn_metadata_enabled(): metadata_port = self._find_metadata_port(context, network['id']) if not metadata_port: # Create a neutron port for DHCP/metadata services port = {'port': {'network_id': network['id'], 'tenant_id': network['project_id'], 'device_owner': const.DEVICE_OWNER_DHCP, 'device_id': 'ovnmeta-%s' % network['id']}} # TODO(boden): rehome create_port into neutron-lib p_utils.create_port(self._plugin, context, port) def update_metadata_port(self, context, network_id): """Update metadata port. This function will allocate an IP address for the metadata port of the given network in all its IPv4 subnets. """ if not ovn_conf.is_ovn_metadata_enabled(): return # Retrieve the metadata port of this network metadata_port = self._find_metadata_port(context, network_id) if not metadata_port: LOG.error("Metadata port couldn't be found for network %s", network_id) return # Retrieve all subnets in this network subnets = self._plugin.get_subnets(context, filters=dict( network_id=[network_id], ip_version=[4])) subnet_ids = set(s['id'] for s in subnets) port_subnet_ids = set(ip['subnet_id'] for ip in metadata_port['fixed_ips']) # Find all subnets where metadata port doesn't have an IP in and # allocate one. if subnet_ids != port_subnet_ids: wanted_fixed_ips = [] for fixed_ip in metadata_port['fixed_ips']: wanted_fixed_ips.append( {'subnet_id': fixed_ip['subnet_id'], 'ip_address': fixed_ip['ip_address']}) wanted_fixed_ips.extend( dict(subnet_id=s) for s in subnet_ids - port_subnet_ids) port = {'id': metadata_port['id'], 'port': {'network_id': network_id, 'fixed_ips': wanted_fixed_ips}} self._plugin.update_port(n_context.get_admin_context(), metadata_port['id'], port) def get_parent_port(self, port_id): return self._nb_idl.get_parent_port(port_id) def is_dns_required_for_port(self, port): try: if not all([port['dns_name'], port['dns_assignment'], port['device_id']]): return False except KeyError: # Possible that dns extension is not enabled. return False if not self._nb_idl.is_table_present('DNS'): return False return True def get_port_dns_records(self, port): port_dns_records = {} net = port.get('network', {}) net_dns_domain = net.get('dns_domain', '').rstrip('.') for dns_assignment in port.get('dns_assignment', []): hostname = dns_assignment['hostname'] fqdn = dns_assignment['fqdn'].rstrip('.') net_dns_fqdn = hostname + '.' + net_dns_domain if hostname not in port_dns_records: port_dns_records[hostname] = dns_assignment['ip_address'] if net_dns_domain and net_dns_fqdn != fqdn: port_dns_records[net_dns_fqdn] = ( dns_assignment['ip_address']) else: port_dns_records[hostname] += " " + ( dns_assignment['ip_address']) if net_dns_domain and net_dns_fqdn != fqdn: port_dns_records[hostname + '.' + net_dns_domain] += ( " " + dns_assignment['ip_address']) if fqdn not in port_dns_records: port_dns_records[fqdn] = dns_assignment['ip_address'] else: port_dns_records[fqdn] += " " + dns_assignment['ip_address'] return port_dns_records def add_txns_to_sync_port_dns_records(self, txn, port, original_port=None): # NOTE(numans): - This implementation has certain known limitations # and that will be addressed in the future patches # https://bugs.launchpad.net/networking-ovn/+bug/1739257. # Please see the bug report for more information, but just to sum up # here # - We will have issues if two ports have same dns name # - If a port is deleted with dns name 'd1' and a new port is # added with the same dns name 'd1'. records_to_add = self.get_port_dns_records(port) lswitch_name = utils.ovn_name(port['network_id']) ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record(lswitch_name) # If ls_dns_record is None, then we need to create a DNS row for the # logical switch. if ls_dns_record is None: dns_add_txn = txn.add(self._nb_idl.dns_add( external_ids={'ls_name': ls.name}, records=records_to_add)) txn.add(self._nb_idl.ls_set_dns_records(ls.uuid, dns_add_txn)) return if original_port: old_records = self.get_port_dns_records(original_port) for old_hostname, old_ips in old_records.items(): if records_to_add.get(old_hostname) != old_ips: txn.add(self._nb_idl.dns_remove_record( ls_dns_record.uuid, old_hostname, if_exists=True)) for hostname, ips in records_to_add.items(): if ls_dns_record.records.get(hostname) != ips: txn.add(self._nb_idl.dns_add_record( ls_dns_record.uuid, hostname, ips)) def add_txns_to_remove_port_dns_records(self, txn, port): lswitch_name = utils.ovn_name(port['network_id']) ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record(lswitch_name) if ls_dns_record is None: return net = port.get('network', {}) net_dns_domain = net.get('dns_domain', '').rstrip('.') hostnames = [] for dns_assignment in port['dns_assignment']: hostname = dns_assignment['hostname'] fqdn = dns_assignment['fqdn'].rstrip('.') if hostname not in hostnames: hostnames.append(hostname) net_dns_fqdn = hostname + '.' + net_dns_domain if net_dns_domain and net_dns_fqdn != fqdn: hostnames.append(net_dns_fqdn) if fqdn not in hostnames: hostnames.append(fqdn) for hostname in hostnames: if ls_dns_record.records.get(hostname): txn.add(self._nb_idl.dns_remove_record( ls_dns_record.uuid, hostname, if_exists=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py0000644000175000017500000016374600000000000031572 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from datetime import datetime import itertools from eventlet import greenthread from neutron_lib.api.definitions import l3 from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.utils import helpers from oslo_log import log import six from neutron.common.ovn import acl as acl_utils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.services.segments import db as segments_db LOG = log.getLogger(__name__) SYNC_MODE_OFF = 'off' SYNC_MODE_LOG = 'log' SYNC_MODE_REPAIR = 'repair' @six.add_metaclass(abc.ABCMeta) class OvnDbSynchronizer(object): def __init__(self, core_plugin, ovn_api, ovn_driver): self.ovn_driver = ovn_driver self.ovn_api = ovn_api self.core_plugin = core_plugin def sync(self, delay_seconds=10): self._gt = greenthread.spawn_after_local(delay_seconds, self.do_sync) @abc.abstractmethod def do_sync(self): """Method to sync the OVN DB.""" def stop(self): try: self._gt.kill() except AttributeError: # Haven't started syncing pass class OvnNbSynchronizer(OvnDbSynchronizer): """Synchronizer class for NB.""" def __init__(self, core_plugin, ovn_api, sb_ovn, mode, ovn_driver): super(OvnNbSynchronizer, self).__init__( core_plugin, ovn_api, ovn_driver) self.mode = mode self.l3_plugin = directory.get_plugin(plugin_constants.L3) self._ovn_client = ovn_client.OVNClient(ovn_api, sb_ovn) def stop(self): if utils.is_ovn_l3(self.l3_plugin): self.l3_plugin._ovn.ovsdb_connection.stop() self.l3_plugin._sb_ovn.ovsdb_connection.stop() super(OvnNbSynchronizer, self).stop() def do_sync(self): if self.mode == SYNC_MODE_OFF: LOG.debug("Neutron sync mode is off") return LOG.debug("Starting OVN-Northbound DB sync process") ctx = context.get_admin_context() self.sync_address_sets(ctx) self.sync_port_groups(ctx) self.sync_networks_ports_and_dhcp_opts(ctx) self.sync_port_dns_records(ctx) self.sync_acls(ctx) self.sync_routers_and_rports(ctx) def _create_port_in_ovn(self, ctx, port): # Remove any old ACLs for the port to avoid creating duplicate ACLs. self.ovn_api.delete_acl( utils.ovn_name(port['network_id']), port['id']).execute(check_error=True) # Create the port in OVN. This will include ACL and Address Set # updates as needed. self._ovn_client.create_port(ctx, port) def remove_common_acls(self, neutron_acls, nb_acls): """Take out common acls of the two acl dictionaries. @param neutron_acls: neutron dictionary of port vs acls @type neutron_acls: {} @param nb_acls: nb dictionary of port vs acls @type nb_acls: {} @return: Nothing, original dictionary modified """ for port in neutron_acls.keys(): for acl in list(neutron_acls[port]): if port in nb_acls and acl in nb_acls[port]: neutron_acls[port].remove(acl) nb_acls[port].remove(acl) def compute_address_set_difference(self, neutron_sgs, nb_sgs): neutron_sgs_name_set = set(neutron_sgs.keys()) nb_sgs_name_set = set(nb_sgs.keys()) sgnames_to_add = list(neutron_sgs_name_set - nb_sgs_name_set) sgnames_to_delete = list(nb_sgs_name_set - neutron_sgs_name_set) sgs_common = list(neutron_sgs_name_set & nb_sgs_name_set) sgs_to_update = {} for sg_name in sgs_common: neutron_addr_set = set(neutron_sgs[sg_name]['addresses']) nb_addr_set = set(nb_sgs[sg_name]['addresses']) addrs_to_add = list(neutron_addr_set - nb_addr_set) addrs_to_delete = list(nb_addr_set - neutron_addr_set) if addrs_to_add or addrs_to_delete: sgs_to_update[sg_name] = {'name': sg_name, 'addrs_add': addrs_to_add, 'addrs_remove': addrs_to_delete} return sgnames_to_add, sgnames_to_delete, sgs_to_update def get_acls(self, context): """create the list of ACLS in OVN. @param context: neutron_lib.context @type context: object of type neutron_lib.context.Context @var lswitch_names: List of lswitch names @var acl_list: List of NB acls @var acl_list_dict: Dictionary of acl-lists based on lport as key @return: acl_list-dict """ lswitch_names = set([]) for network in self.core_plugin.get_networks(context): lswitch_names.add(network['id']) acl_dict, ignore1, ignore2 = ( self.ovn_api.get_acls_for_lswitches(lswitch_names)) acl_list = list(itertools.chain(*acl_dict.values())) acl_list_dict = {} for acl in acl_list: acl = acl_utils.filter_acl_dict( acl, extra_fields=['lport', 'lswitch']) key = acl['lport'] if key in acl_list_dict: acl_list_dict[key].append(acl) else: acl_list_dict[key] = list([acl]) return acl_list_dict def get_address_sets(self): return self.ovn_api.get_address_sets() def sync_port_groups(self, ctx): """Sync Port Groups between neutron and NB. @param ctx: neutron_lib.context @type ctx: object of type neutron_lib.context.Context """ if not self.ovn_api.is_port_groups_supported(): return neutron_sgs = {} neutron_pgs = set() with ctx.session.begin(subtransactions=True): for sg in self.core_plugin.get_security_groups(ctx): pg_name = utils.ovn_port_group_name(sg['id']) neutron_pgs.add(pg_name) neutron_sgs[pg_name] = sg['id'] neutron_pgs.add(ovn_const.OVN_DROP_PORT_GROUP_NAME) ovn_pgs = set() port_groups = self.ovn_api.db_list_rows('Port_Group').execute() or [] for pg in port_groups: ovn_pgs.add(pg.name) add_pgs = neutron_pgs.difference(ovn_pgs) remove_pgs = ovn_pgs.difference(neutron_pgs) LOG.debug('Port Groups added %d, removed %d', len(add_pgs), len(remove_pgs)) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Port-Group-SYNC: transaction started @ %s', str(datetime.now())) if add_pgs: db_ports = self.core_plugin.get_ports(ctx) ovn_ports = set(p.name for p in self.ovn_api.lsp_list().execute()) with self.ovn_api.transaction(check_error=True) as txn: pg = ovn_const.OVN_DROP_PORT_GROUP_NAME # Process default drop port group first if pg in add_pgs: txn.add(self.ovn_api.pg_add(name=pg, acls=[])) add_pgs.remove(pg) # Add ports to the drop port group. Only add those that # already exists in OVN. The rest will be added during the # ports sync operation later. for n_port in db_ports: if ((n_port['security_groups'] or n_port['port_security_enabled']) and n_port['id'] in ovn_ports): txn.add(self.ovn_api.pg_add_ports( pg, n_port['id'])) for pg in add_pgs: # If it's a security group PG, add the ext id ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: neutron_sgs[pg]} txn.add(self.ovn_api.pg_add(name=pg, acls=[], external_ids=ext_ids)) # Add the ports belonging to the SG to this port group for n_port in db_ports: if (neutron_sgs[pg] in n_port['security_groups'] and n_port['id'] in ovn_ports): txn.add(self.ovn_api.pg_add_ports( pg, n_port['id'])) for pg in remove_pgs: txn.add(self.ovn_api.pg_del(pg)) LOG.debug('Port-Group-SYNC: transaction finished @ %s', str(datetime.now())) def sync_address_sets(self, ctx): """Sync Address Sets between neutron and NB. @param ctx: neutron_lib.context @type ctx: object of type neutron_lib.context.Context @var db_ports: List of ports from neutron DB """ LOG.debug('Address-Set-SYNC: started @ %s', str(datetime.now())) sgnames_to_add = sgnames_to_delete = [] sgs_to_update = {} nb_sgs = self.get_address_sets() if self.ovn_api.is_port_groups_supported(): # If Port Groups are supported, we just need to delete all Address # Sets from NB database. sgnames_to_delete = nb_sgs.keys() else: neutron_sgs = {} with ctx.session.begin(subtransactions=True): db_sgs = self.core_plugin.get_security_groups(ctx) db_ports = self.core_plugin.get_ports(ctx) for sg in db_sgs: for ip_version in ['ip4', 'ip6']: name = utils.ovn_addrset_name(sg['id'], ip_version) neutron_sgs[name] = { 'name': name, 'addresses': [], 'external_ids': { ovn_const.OVN_SG_EXT_ID_KEY: sg['id']}} for port in db_ports: sg_ids = utils.get_lsp_security_groups(port) if port.get('fixed_ips') and sg_ids: addresses = acl_utils.acl_port_ips(port) for sg_id in sg_ids: for ip_version in addresses: name = utils.ovn_addrset_name(sg_id, ip_version) neutron_sgs[name]['addresses'].extend( addresses[ip_version]) sgnames_to_add, sgnames_to_delete, sgs_to_update = ( self.compute_address_set_difference(neutron_sgs, nb_sgs)) LOG.debug('Address_Sets added %d, removed %d, updated %d', len(sgnames_to_add), len(sgnames_to_delete), len(sgs_to_update)) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Address-Set-SYNC: transaction started @ %s', str(datetime.now())) with self.ovn_api.transaction(check_error=True) as txn: for sgname in sgnames_to_add: sg = neutron_sgs[sgname] txn.add(self.ovn_api.create_address_set(**sg)) for sgname, sg in sgs_to_update.items(): txn.add(self.ovn_api.update_address_set(**sg)) for sgname in sgnames_to_delete: txn.add(self.ovn_api.delete_address_set(name=sgname)) LOG.debug('Address-Set-SYNC: transaction finished @ %s', str(datetime.now())) def _get_acls_from_port_groups(self): ovn_acls = [] port_groups = self.ovn_api.db_list_rows('Port_Group').execute() for pg in port_groups: acls = getattr(pg, 'acls', []) for acl in acls: acl_string = {} acl_string['port_group'] = pg.name for acl_key in getattr(acl, "_data", {}): acl_string[acl_key] = getattr(acl, acl_key) acl_string.pop('meter') acl_string.pop('external_ids') ovn_acls.append(acl_string) return ovn_acls def _sync_acls_port_groups(self, ctx): # If Port Groups are supported, the ACLs in the system will equal # the number of SG rules plus the default drop rules as OVN would # allow all traffic by default if those are not added. neutron_acls = [] for sgr in self.core_plugin.get_security_group_rules(ctx): pg_name = utils.ovn_port_group_name(sgr['security_group_id']) neutron_acls.append(acl_utils._add_sg_rule_acl_for_port_group( pg_name, sgr, self.ovn_api)) neutron_acls += acl_utils.add_acls_for_drop_port_group( ovn_const.OVN_DROP_PORT_GROUP_NAME) ovn_acls = self._get_acls_from_port_groups() # We need to remove also all the ACLs applied to Logical Switches def get_num_acls(ovn_acls): return len([item for sublist in ovn_acls for item in sublist[1]]) ovn_acls_from_ls = [(row.name, row.acls) for row in ( self.ovn_api._tables['Logical_Switch'].rows.values())] num_acls_to_remove_from_ls = get_num_acls(ovn_acls_from_ls) # Remove the common ones for na in list(neutron_acls): for ovn_a in ovn_acls: if all(item in na.items() for item in ovn_a.items()): neutron_acls.remove(na) ovn_acls.remove(ovn_a) break num_acls_to_add = len(neutron_acls) num_acls_to_remove = len(ovn_acls) + num_acls_to_remove_from_ls if 0 != num_acls_to_add or 0 != num_acls_to_remove: LOG.warning('ACLs-to-be-added %(add)d ' 'ACLs-to-be-removed %(remove)d', {'add': num_acls_to_add, 'remove': num_acls_to_remove}) if self.mode == SYNC_MODE_REPAIR: with self.ovn_api.transaction(check_error=True) as txn: for acla in neutron_acls: LOG.warning('ACL found in Neutron but not in ' 'OVN DB for port group %s', acla['port_group']) txn.add(self.ovn_api.pg_acl_add(**acla)) with self.ovn_api.transaction(check_error=True) as txn: for aclr in ovn_acls: LOG.warning('ACLs found in OVN DB but not in ' 'Neutron for port group %s', aclr['port_group']) txn.add(self.ovn_api.pg_acl_del(aclr['port_group'], aclr['direction'], aclr['priority'], aclr['match'])) for aclr in ovn_acls_from_ls: # Remove all the ACLs from any Logical Switch if they have # any. Elements are (lswitch_name, list_of_acls). if len(aclr[1]) > 0: LOG.warning('Removing ACLs from OVN from Logical ' 'Switch %s', aclr[0]) txn.add(self.ovn_api.acl_del(aclr[0])) def _sync_acls(self, ctx): """Sync ACLs between neutron and NB when not using Port Groups. @param ctx: neutron_lib.context @type ctx: object of type neutron_lib.context.Context @var db_ports: List of ports from neutron DB @var neutron_acls: neutron dictionary of port vs list-of-acls @var nb_acls: NB dictionary of port vs list-of-acls @var subnet_cache: cache for subnets @return: Nothing """ db_ports = {} for port in self.core_plugin.get_ports(ctx): db_ports[port['id']] = port sg_cache = {} subnet_cache = {} neutron_acls = {} for port_id, port in db_ports.items(): if utils.get_lsp_security_groups(port): acl_list = acl_utils.add_acls(self.core_plugin, ctx, port, sg_cache, subnet_cache, self.ovn_api) if port_id in neutron_acls: neutron_acls[port_id].extend(acl_list) else: neutron_acls[port_id] = acl_list nb_acls = self.get_acls(ctx) self.remove_common_acls(neutron_acls, nb_acls) num_acls_to_add = len(list(itertools.chain(*neutron_acls.values()))) num_acls_to_remove = len(list(itertools.chain(*nb_acls.values()))) if 0 != num_acls_to_add or 0 != num_acls_to_remove: LOG.warning('ACLs-to-be-added %(add)d ' 'ACLs-to-be-removed %(remove)d', {'add': num_acls_to_add, 'remove': num_acls_to_remove}) if self.mode == SYNC_MODE_REPAIR: with self.ovn_api.transaction(check_error=True) as txn: for acla in list(itertools.chain(*neutron_acls.values())): LOG.warning('ACL found in Neutron but not in ' 'OVN DB for port %s', acla['lport']) txn.add(self.ovn_api.add_acl(**acla)) with self.ovn_api.transaction(check_error=True) as txn: for aclr in list(itertools.chain(*nb_acls.values())): # Both lswitch and lport aren't needed within the ACL. lswitchr = aclr.pop('lswitch').replace('neutron-', '') lportr = aclr.pop('lport') aclr_dict = {lportr: aclr} LOG.warning('ACLs found in OVN DB but not in ' 'Neutron for port %s', lportr) txn.add(self.ovn_api.update_acls( [lswitchr], [lportr], aclr_dict, need_compare=False, is_add_acl=False )) def sync_acls(self, ctx): """Sync ACLs between neutron and NB. @param ctx: neutron_lib.context @type ctx: object of type neutron_lib.context.Context @return: Nothing """ LOG.debug('ACL-SYNC: started @ %s', str(datetime.now())) if self.ovn_api.is_port_groups_supported(): self._sync_acls_port_groups(ctx) else: self._sync_acls(ctx) LOG.debug('ACL-SYNC: finished @ %s', str(datetime.now())) def _calculate_fips_differences(self, ovn_fips, db_fips): to_add = [] to_remove = [] for db_fip in db_fips: for ovn_fip in ovn_fips: if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and ovn_fip['external_ip'] == db_fip['floating_ip_address']): break else: to_add.append(db_fip) for ovn_fip in ovn_fips: for db_fip in db_fips: if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and ovn_fip['external_ip'] == db_fip['floating_ip_address']): break else: to_remove.append(ovn_fip) return to_add, to_remove def sync_routers_and_rports(self, ctx): """Sync Routers between neutron and NB. @param ctx: neutron_lib.context @type ctx: object of type neutron_lib.context.Context @var db_routers: List of Routers from neutron DB @var db_router_ports: List of Router ports from neutron DB @var lrouters: NB dictionary of logical routers and the corresponding logical router ports. vs list-of-acls @var del_lrouters_list: List of Routers that need to be deleted from NB @var del_lrouter_ports_list: List of Router ports that need to be deleted from NB @return: Nothing """ if not utils.is_ovn_l3(self.l3_plugin): LOG.debug("OVN L3 mode is disabled, skipping " "sync routers and router ports") return LOG.debug('OVN-NB Sync Routers and Router ports started @ %s', str(datetime.now())) db_routers = {} db_extends = {} db_router_ports = {} for router in self.l3_plugin.get_routers(ctx): db_routers[router['id']] = router db_extends[router['id']] = {} db_extends[router['id']]['routes'] = [] db_extends[router['id']]['snats'] = [] db_extends[router['id']]['fips'] = [] if not router.get(l3.EXTERNAL_GW_INFO): continue gateways = self._ovn_client._get_gw_info(ctx, router) for gw_info in gateways: prefix = (constants.IPv4_ANY if gw_info.ip_version == constants.IP_VERSION_4 else constants.IPv6_ANY) if gw_info.gateway_ip: db_extends[router['id']]['routes'].append( {'destination': prefix, 'nexthop': gw_info.gateway_ip}) if gw_info.ip_version == constants.IP_VERSION_6: continue if gw_info.router_ip and utils.is_snat_enabled(router): networks = ( self._ovn_client._get_v4_network_of_all_router_ports( ctx, router['id'])) for network in networks: db_extends[router['id']]['snats'].append({ 'logical_ip': network, 'external_ip': gw_info.router_ip, 'type': 'snat'}) fips = self.l3_plugin.get_floatingips( ctx, {'router_id': list(db_routers.keys())}) for fip in fips: db_extends[fip['router_id']]['fips'].append(fip) interfaces = self.l3_plugin._get_sync_interfaces( ctx, list(db_routers.keys()), [constants.DEVICE_OWNER_ROUTER_INTF, constants.DEVICE_OWNER_ROUTER_GW, constants.DEVICE_OWNER_DVR_INTERFACE, constants.DEVICE_OWNER_ROUTER_HA_INTF, constants.DEVICE_OWNER_HA_REPLICATED_INT]) for interface in interfaces: db_router_ports[interface['id']] = interface lrouters = self.ovn_api.get_all_logical_routers_with_rports() del_lrouters_list = [] del_lrouter_ports_list = [] update_sroutes_list = [] update_lrport_list = [] update_snats_list = [] update_fips_list = [] for lrouter in lrouters: if lrouter['name'] in db_routers: for lrport, lrport_nets in lrouter['ports'].items(): if lrport in db_router_ports: # We dont have to check for the networks and # ipv6_ra_configs values. Lets add it to the # update_lrport_list. If they are in sync, then # update_router_port will be a no-op. update_lrport_list.append(db_router_ports[lrport]) del db_router_ports[lrport] else: del_lrouter_ports_list.append( {'port': lrport, 'lrouter': lrouter['name']}) if 'routes' in db_routers[lrouter['name']]: db_routes = db_routers[lrouter['name']]['routes'] else: db_routes = [] if 'routes' in db_extends[lrouter['name']]: db_routes.extend(db_extends[lrouter['name']]['routes']) ovn_routes = lrouter['static_routes'] add_routes, del_routes = helpers.diff_list_of_dict( ovn_routes, db_routes) update_sroutes_list.append({'id': lrouter['name'], 'add': add_routes, 'del': del_routes}) ovn_fips = lrouter['dnat_and_snats'] db_fips = db_extends[lrouter['name']]['fips'] add_fips, del_fips = self._calculate_fips_differences( ovn_fips, db_fips) update_fips_list.append({'id': lrouter['name'], 'add': add_fips, 'del': del_fips}) ovn_nats = lrouter['snats'] db_snats = db_extends[lrouter['name']]['snats'] add_snats, del_snats = helpers.diff_list_of_dict( ovn_nats, db_snats) update_snats_list.append({'id': lrouter['name'], 'add': add_snats, 'del': del_snats}) del db_routers[lrouter['name']] else: del_lrouters_list.append(lrouter) for r_id, router in db_routers.items(): LOG.warning("Router found in Neutron but not in " "OVN DB, router id=%s", router['id']) if self.mode == SYNC_MODE_REPAIR: try: LOG.warning("Creating the router %s in OVN NB DB", router['id']) self._ovn_client.create_router( ctx, router, add_external_gateway=False) if 'routes' in router: update_sroutes_list.append( {'id': router['id'], 'add': router['routes'], 'del': []}) if 'routes' in db_extends[router['id']]: update_sroutes_list.append( {'id': router['id'], 'add': db_extends[router['id']]['routes'], 'del': []}) if 'snats' in db_extends[router['id']]: update_snats_list.append( {'id': router['id'], 'add': db_extends[router['id']]['snats'], 'del': []}) if 'fips' in db_extends[router['id']]: update_fips_list.append( {'id': router['id'], 'add': db_extends[router['id']]['fips'], 'del': []}) except RuntimeError: LOG.warning("Create router in OVN NB failed for router %s", router['id']) for rp_id, rrport in db_router_ports.items(): LOG.warning("Router Port found in Neutron but not in OVN " "DB, router port_id=%s", rrport['id']) if self.mode == SYNC_MODE_REPAIR: try: LOG.warning("Creating the router port %s in OVN NB DB", rrport['id']) self._ovn_client._create_lrouter_port( ctx, rrport['device_id'], rrport) except RuntimeError: LOG.warning("Create router port in OVN " "NB failed for router port %s", rrport['id']) for rport in update_lrport_list: LOG.warning("Router Port port_id=%s needs to be updated " "for networks changed", rport['id']) if self.mode == SYNC_MODE_REPAIR: try: LOG.warning( "Updating networks on router port %s in OVN NB DB", rport['id']) self._ovn_client.update_router_port(ctx, rport) except RuntimeError: LOG.warning("Update router port networks in OVN " "NB failed for router port %s", rport['id']) with self.ovn_api.transaction(check_error=True) as txn: for lrouter in del_lrouters_list: LOG.warning("Router found in OVN but not in " "Neutron, router id=%s", lrouter['name']) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Deleting the router %s from OVN NB DB", lrouter['name']) txn.add(self.ovn_api.delete_lrouter( utils.ovn_name(lrouter['name']))) for lrport_info in del_lrouter_ports_list: LOG.warning("Router Port found in OVN but not in " "Neutron, port_id=%s", lrport_info['port']) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Deleting the port %s from OVN NB DB", lrport_info['port']) txn.add(self.ovn_api.delete_lrouter_port( utils.ovn_lrouter_port_name(lrport_info['port']), utils.ovn_name(lrport_info['lrouter']), if_exists=False)) for sroute in update_sroutes_list: if sroute['add']: LOG.warning("Router %(id)s static routes %(route)s " "found in Neutron but not in OVN", {'id': sroute['id'], 'route': sroute['add']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Add static routes %s to OVN NB DB", sroute['add']) for route in sroute['add']: txn.add(self.ovn_api.add_static_route( utils.ovn_name(sroute['id']), ip_prefix=route['destination'], nexthop=route['nexthop'])) if sroute['del']: LOG.warning("Router %(id)s static routes %(route)s " "found in OVN but not in Neutron", {'id': sroute['id'], 'route': sroute['del']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Delete static routes %s from OVN NB DB", sroute['del']) for route in sroute['del']: txn.add(self.ovn_api.delete_static_route( utils.ovn_name(sroute['id']), ip_prefix=route['destination'], nexthop=route['nexthop'])) for fip in update_fips_list: if fip['del']: LOG.warning("Router %(id)s floating ips %(fip)s " "found in OVN but not in Neutron", {'id': fip['id'], 'fip': fip['del']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning( "Delete floating ips %s from OVN NB DB", fip['del']) for nat in fip['del']: self._ovn_client._delete_floatingip( nat, utils.ovn_name(fip['id']), txn=txn) if fip['add']: LOG.warning("Router %(id)s floating ips %(fip)s " "found in Neutron but not in OVN", {'id': fip['id'], 'fip': fip['add']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Add floating ips %s to OVN NB DB", fip['add']) for nat in fip['add']: self._ovn_client._create_or_update_floatingip( nat, txn=txn) for snat in update_snats_list: if snat['del']: LOG.warning("Router %(id)s snat %(snat)s " "found in OVN but not in Neutron", {'id': snat['id'], 'snat': snat['del']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Delete snats %s from OVN NB DB", snat['del']) for nat in snat['del']: txn.add(self.ovn_api.delete_nat_rule_in_lrouter( utils.ovn_name(snat['id']), logical_ip=nat['logical_ip'], external_ip=nat['external_ip'], type='snat')) if snat['add']: LOG.warning("Router %(id)s snat %(snat)s " "found in Neutron but not in OVN", {'id': snat['id'], 'snat': snat['add']}) if self.mode == SYNC_MODE_REPAIR: LOG.warning("Add snats %s to OVN NB DB", snat['add']) for nat in snat['add']: txn.add(self.ovn_api.add_nat_rule_in_lrouter( utils.ovn_name(snat['id']), logical_ip=nat['logical_ip'], external_ip=nat['external_ip'], type='snat')) LOG.debug('OVN-NB Sync routers and router ports finished %s', str(datetime.now())) def _sync_subnet_dhcp_options(self, ctx, db_networks, ovn_subnet_dhcp_options): LOG.debug('OVN-NB Sync DHCP options for Neutron subnets started') db_subnets = {} filters = {'enable_dhcp': [1]} for subnet in self.core_plugin.get_subnets(ctx, filters=filters): if (subnet['ip_version'] == constants.IP_VERSION_6 and subnet.get('ipv6_address_mode') == constants.IPV6_SLAAC): continue db_subnets[subnet['id']] = subnet del_subnet_dhcp_opts_list = [] for subnet_id, ovn_dhcp_opts in ovn_subnet_dhcp_options.items(): if subnet_id in db_subnets: network = db_networks[utils.ovn_name( db_subnets[subnet_id]['network_id'])] if constants.IP_VERSION_6 == db_subnets[subnet_id][ 'ip_version']: server_mac = ovn_dhcp_opts['options'].get('server_id') else: server_mac = ovn_dhcp_opts['options'].get('server_mac') dhcp_options = self._ovn_client._get_ovn_dhcp_options( db_subnets[subnet_id], network, server_mac=server_mac) # Verify that the cidr and options are also in sync. if dhcp_options['cidr'] == ovn_dhcp_opts['cidr'] and ( dhcp_options['options'] == ovn_dhcp_opts['options']): del db_subnets[subnet_id] else: db_subnets[subnet_id]['ovn_dhcp_options'] = dhcp_options else: del_subnet_dhcp_opts_list.append(ovn_dhcp_opts) for subnet_id, subnet in db_subnets.items(): LOG.warning('DHCP options for subnet %s is present in ' 'Neutron but out of sync for OVN', subnet_id) if self.mode == SYNC_MODE_REPAIR: try: LOG.debug('Adding/Updating DHCP options for subnet %s in ' ' OVN NB DB', subnet_id) network = db_networks[utils.ovn_name(subnet['network_id'])] # _ovn_client._add_subnet_dhcp_options doesn't create # a new row in DHCP_Options if the row already exists. # See commands.AddDHCPOptionsCommand. self._ovn_client._add_subnet_dhcp_options( subnet, network, subnet.get('ovn_dhcp_options')) except RuntimeError: LOG.warning('Adding/Updating DHCP options for subnet ' '%s failed in OVN NB DB', subnet_id) txn_commands = [] for dhcp_opt in del_subnet_dhcp_opts_list: LOG.warning('Out of sync subnet DHCP options for subnet %s ' 'found in OVN NB DB which needs to be deleted', dhcp_opt['external_ids']['subnet_id']) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Deleting subnet DHCP options for subnet %s ', dhcp_opt['external_ids']['subnet_id']) txn_commands.append(self.ovn_api.delete_dhcp_options( dhcp_opt['uuid'])) if txn_commands: with self.ovn_api.transaction(check_error=True) as txn: for cmd in txn_commands: txn.add(cmd) LOG.debug('OVN-NB Sync DHCP options for Neutron subnets finished') def _sync_port_dhcp_options(self, ctx, ports_need_sync_dhcp_opts, ovn_port_dhcpv4_opts, ovn_port_dhcpv6_opts): LOG.debug('OVN-NB Sync DHCP options for Neutron ports with extra ' 'dhcp options assigned started') txn_commands = [] lsp_dhcp_key = {constants.IP_VERSION_4: 'dhcpv4_options', constants.IP_VERSION_6: 'dhcpv6_options'} ovn_port_dhcp_opts = {constants.IP_VERSION_4: ovn_port_dhcpv4_opts, constants.IP_VERSION_6: ovn_port_dhcpv6_opts} for port in ports_need_sync_dhcp_opts: if self.mode == SYNC_MODE_REPAIR: LOG.debug('Updating DHCP options for port %s in OVN NB DB', port['id']) set_lsp = {} for ip_v in [constants.IP_VERSION_4, constants.IP_VERSION_6]: dhcp_opts = ( self._ovn_client._get_port_dhcp_options( port, ip_v)) if not dhcp_opts or 'uuid' in dhcp_opts: # If the Logical_Switch_Port.dhcpv4_options or # dhcpv6_options no longer refers a port dhcp options # created in DHCP_Options earlier, that port dhcp # options will be deleted in the following # ovn_port_dhcp_options handling. set_lsp[lsp_dhcp_key[ip_v]] = [ dhcp_opts['uuid']] if dhcp_opts else [] else: # If port has extra port dhcp # options, a command will returned by # self._ovn_client._get_port_dhcp_options # to add or update port dhcp options. ovn_port_dhcp_opts[ip_v].pop(port['id'], None) dhcp_options = dhcp_opts['cmd'] txn_commands.append(dhcp_options) set_lsp[lsp_dhcp_key[ip_v]] = dhcp_options if set_lsp: txn_commands.append(self.ovn_api.set_lswitch_port( lport_name=port['id'], **set_lsp)) for ip_v in [constants.IP_VERSION_4, constants.IP_VERSION_6]: for port_id, dhcp_opt in ovn_port_dhcp_opts[ip_v].items(): LOG.warning( 'Out of sync port DHCPv%(ip_version)d options for ' '(subnet %(subnet_id)s port %(port_id)s) found in OVN ' 'NB DB which needs to be deleted', {'ip_version': ip_v, 'subnet_id': dhcp_opt['external_ids']['subnet_id'], 'port_id': port_id}) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Deleting port DHCPv%d options for (subnet %s, ' 'port %s)', ip_v, dhcp_opt['external_ids']['subnet_id'], port_id) txn_commands.append(self.ovn_api.delete_dhcp_options( dhcp_opt['uuid'])) if txn_commands: with self.ovn_api.transaction(check_error=True) as txn: for cmd in txn_commands: txn.add(cmd) LOG.debug('OVN-NB Sync DHCP options for Neutron ports with extra ' 'dhcp options assigned finished') def _sync_metadata_ports(self, ctx, db_ports): """Ensure metadata ports in all Neutron networks. This method will ensure that all networks have one and only one metadata port. """ if not ovn_conf.is_ovn_metadata_enabled(): return LOG.debug('OVN sync metadata ports started') for net in self.core_plugin.get_networks(ctx): dhcp_ports = self.core_plugin.get_ports(ctx, filters=dict( network_id=[net['id']], device_owner=[constants.DEVICE_OWNER_DHCP])) for port in dhcp_ports: # Do not touch the Neutron DHCP agents ports if utils.is_neutron_dhcp_agent_port(port): dhcp_ports.remove(port) if not dhcp_ports: LOG.warning('Missing metadata port found in Neutron for ' 'network %s', net['id']) if self.mode == SYNC_MODE_REPAIR: try: # Create the missing port in both Neutron and OVN. LOG.warning('Creating missing metadadata port in ' 'Neutron and OVN for network %s', net['id']) self._ovn_client.create_metadata_port(ctx, net) except n_exc.IpAddressGenerationFailure: LOG.error('Could not allocate IP addresses for ' 'metadata port in network %s', net['id']) continue else: # Delete all but one DHCP ports. Only one is needed for # metadata. for port in dhcp_ports[1:]: LOG.warning('Unnecessary DHCP port %s for network %s ' 'found in Neutron', port['id'], net['id']) if self.mode == SYNC_MODE_REPAIR: LOG.warning('Deleting unnecessary DHCP port %s for ' 'network %s', port['id'], net['id']) self.core_plugin.delete_port(ctx, port['id']) db_ports.pop(port['id'], None) port = dhcp_ports[0] if port['id'] in db_ports.keys(): LOG.warning('Metadata port %s for network %s found in ' 'Neutron but not in OVN', port['id'], net['id']) if self.mode == SYNC_MODE_REPAIR: LOG.warning('Creating metadata port %s for network ' '%s in OVN', port['id'], net['id']) self._create_port_in_ovn(ctx, port) db_ports.pop(port['id']) if self.mode == SYNC_MODE_REPAIR: # Make sure that this port has an IP address in all the subnets self._ovn_client.update_metadata_port(ctx, net['id']) LOG.debug('OVN sync metadata ports finished') def sync_networks_ports_and_dhcp_opts(self, ctx): LOG.debug('OVN-NB Sync networks, ports and DHCP options started') db_networks = {} for net in self.core_plugin.get_networks(ctx): db_networks[utils.ovn_name(net['id'])] = net # Ignore the floating ip ports with device_owner set to # constants.DEVICE_OWNER_FLOATINGIP db_ports = {port['id']: port for port in self.core_plugin.get_ports(ctx) if not utils.is_lsp_ignored(port)} ovn_all_dhcp_options = self.ovn_api.get_all_dhcp_options() db_network_cache = dict(db_networks) ports_need_sync_dhcp_opts = [] lswitches = self.ovn_api.get_all_logical_switches_with_ports() del_lswitchs_list = [] del_lports_list = [] add_provnet_ports_list = [] for lswitch in lswitches: if lswitch['name'] in db_networks: for lport in lswitch['ports']: if lport in db_ports: port = db_ports.pop(lport) if not utils.is_network_device_port(port): ports_need_sync_dhcp_opts.append(port) else: del_lports_list.append({'port': lport, 'lswitch': lswitch['name']}) db_network = db_networks[lswitch['name']] physnet = db_network.get(pnet.PHYSICAL_NETWORK) # Updating provider attributes is forbidden by neutron, thus # we only need to consider missing provnet-ports in OVN DB. if physnet and not lswitch['provnet_port']: add_provnet_ports_list.append( {'network': db_network, 'lswitch': lswitch['name']}) del db_networks[lswitch['name']] else: del_lswitchs_list.append(lswitch) for net_id, network in db_networks.items(): LOG.warning("Network found in Neutron but not in " "OVN DB, network_id=%s", network['id']) if self.mode == SYNC_MODE_REPAIR: try: LOG.debug('Creating the network %s in OVN NB DB', network['id']) self._ovn_client.create_network(ctx, network) except RuntimeError: LOG.warning("Create network in OVN NB failed for " "network %s", network['id']) self._sync_metadata_ports(ctx, db_ports) self._sync_subnet_dhcp_options( ctx, db_network_cache, ovn_all_dhcp_options['subnets']) for port_id, port in db_ports.items(): LOG.warning("Port found in Neutron but not in OVN " "DB, port_id=%s", port['id']) if self.mode == SYNC_MODE_REPAIR: try: LOG.debug('Creating the port %s in OVN NB DB', port['id']) self._create_port_in_ovn(ctx, port) if port_id in ovn_all_dhcp_options['ports_v4']: dhcp_disable, lsp_opts = utils.get_lsp_dhcp_opts( port, constants.IP_VERSION_4) if lsp_opts: ovn_all_dhcp_options['ports_v4'].pop(port_id) if port_id in ovn_all_dhcp_options['ports_v6']: dhcp_disable, lsp_opts = utils.get_lsp_dhcp_opts( port, constants.IP_VERSION_6) if lsp_opts: ovn_all_dhcp_options['ports_v6'].pop(port_id) except RuntimeError: LOG.warning("Create port in OVN NB failed for" " port %s", port['id']) with self.ovn_api.transaction(check_error=True) as txn: for lswitch in del_lswitchs_list: LOG.warning("Network found in OVN but not in " "Neutron, network_id=%s", lswitch['name']) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Deleting the network %s from OVN NB DB', lswitch['name']) txn.add(self.ovn_api.ls_del(lswitch['name'])) for provnet_port_info in add_provnet_ports_list: network = provnet_port_info['network'] LOG.warning("Provider network found in Neutron but " "provider network port not found in OVN DB, " "network_id=%s", provnet_port_info['lswitch']) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Creating the provnet port %s in OVN NB DB', utils.ovn_provnet_port_name(network['id'])) self._ovn_client._create_provnet_port( txn, network, network.get(pnet.PHYSICAL_NETWORK), network.get(pnet.SEGMENTATION_ID)) for lport_info in del_lports_list: LOG.warning("Port found in OVN but not in " "Neutron, port_id=%s", lport_info['port']) if self.mode == SYNC_MODE_REPAIR: LOG.debug('Deleting the port %s from OVN NB DB', lport_info['port']) txn.add(self.ovn_api.delete_lswitch_port( lport_name=lport_info['port'], lswitch_name=lport_info['lswitch'])) if lport_info['port'] in ovn_all_dhcp_options['ports_v4']: LOG.debug('Deleting port DHCPv4 options for (port %s)', lport_info['port']) txn.add(self.ovn_api.delete_dhcp_options( ovn_all_dhcp_options['ports_v4'].pop( lport_info['port'])['uuid'])) if lport_info['port'] in ovn_all_dhcp_options['ports_v6']: LOG.debug('Deleting port DHCPv6 options for (port %s)', lport_info['port']) txn.add(self.ovn_api.delete_dhcp_options( ovn_all_dhcp_options['ports_v6'].pop( lport_info['port'])['uuid'])) self._sync_port_dhcp_options(ctx, ports_need_sync_dhcp_opts, ovn_all_dhcp_options['ports_v4'], ovn_all_dhcp_options['ports_v6']) LOG.debug('OVN-NB Sync networks, ports and DHCP options finished') def sync_port_dns_records(self, ctx): if self.mode != SYNC_MODE_REPAIR: return LOG.debug('OVN-NB Sync port dns records') # Ignore the floating ip ports with device_owner set to # constants.DEVICE_OWNER_FLOATINGIP db_ports = [port for port in self.core_plugin.get_ports(ctx) if not port.get('device_owner', '').startswith( constants.DEVICE_OWNER_FLOATINGIP)] dns_records = {} for port in db_ports: if self._ovn_client.is_dns_required_for_port(port): port_dns_records = self._ovn_client.get_port_dns_records(port) if port['network_id'] not in dns_records: dns_records[port['network_id']] = {} dns_records[port['network_id']].update(port_dns_records) for network_id, port_dns_records in dns_records.items(): self._set_dns_records(network_id, port_dns_records) def _set_dns_records(self, network_id, dns_records): lswitch_name = utils.ovn_name(network_id) ls, ls_dns_record = self.ovn_api.get_ls_and_dns_record(lswitch_name) with self.ovn_api.transaction(check_error=True) as txn: if not ls_dns_record: dns_add_txn = txn.add(self.ovn_api.dns_add( external_ids={'ls_name': ls.name}, records=dns_records)) txn.add(self.ovn_api.ls_set_dns_records(ls.uuid, dns_add_txn)) else: txn.add(self.ovn_api.dns_set_records(ls_dns_record.uuid, **dns_records)) def _delete_address_sets(self, ctx): with self.ovn_api.transaction(check_error=True) as txn: for sg in self.core_plugin.get_security_groups(ctx): for ip_version in ['ip4', 'ip6']: txn.add(self.ovn_api.delete_address_set( utils.ovn_addrset_name(sg['id'], ip_version))) def _delete_acls_from_lswitches(self, ctx): with self.ovn_api.transaction(check_error=True) as txn: for net in self.core_plugin.get_networks(ctx): # Calling acl_del from ovsdbapp with no ACL will delete # all the ACLs belonging to that Logical Switch. txn.add(self.ovn_api.acl_del(utils.ovn_name(net['id']))) def _create_default_drop_port_group(self, db_ports): with self.ovn_api.transaction(check_error=True) as txn: pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME if not self.ovn_api.get_port_group(pg_name): # If drop Port Group doesn't exist yet, create it. txn.add(self.ovn_api.pg_add(pg_name, acls=[])) # Add ACLs to this Port Group so that all traffic is dropped. acls = acl_utils.add_acls_for_drop_port_group(pg_name) for acl in acls: txn.add(self.ovn_api.pg_acl_add(**acl)) ports_ids = [port['id'] for port in db_ports] # Add the ports to the default Port Group txn.add(self.ovn_api.pg_add_ports(pg_name, ports_ids)) def _create_sg_port_groups_and_acls(self, ctx, db_ports): # Create a Port Group per Neutron Security Group with self.ovn_api.transaction(check_error=True) as txn: for sg in self.core_plugin.get_security_groups(ctx): pg_name = utils.ovn_port_group_name(sg['id']) if self.ovn_api.get_port_group(pg_name): continue ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: sg['id']} txn.add(self.ovn_api.pg_add( name=pg_name, acls=[], external_ids=ext_ids)) acl_utils.add_acls_for_sg_port_group(self.ovn_api, sg, txn) for port in db_ports: for sg in port['security_groups']: txn.add(self.ovn_api.pg_add_ports( utils.ovn_port_group_name(sg), port['id'])) def migrate_to_port_groups(self, ctx): # This routine is responsible for migrating the current Security # Groups and SG Rules to the new Port Groups implementation. # 1. Create the default drop Port Group and add all ports with port # security enabled to it. # 2. Create a Port Group for every existing Neutron Security Group and # add all its Security Group Rules as ACLs to that Port Group. # 3. Delete all existing Address Sets in NorthBound database which # correspond to a Neutron Security Group. # 4. Delete all the ACLs in every Logical Switch (Neutron network). # If Port Groups are not supported or we've already migrated, return if (not self.ovn_api.is_port_groups_supported() or not self.ovn_api.get_address_sets()): return LOG.debug('Port Groups Migration task started') # Ignore the floating ip ports with device_owner set to # constants.DEVICE_OWNER_FLOATINGIP db_ports = [port for port in self.core_plugin.get_ports(ctx) if not utils.is_lsp_ignored(port) and not utils.is_lsp_trusted(port) and utils.is_port_security_enabled(port)] self._create_default_drop_port_group(db_ports) self._create_sg_port_groups_and_acls(ctx, db_ports) self._delete_address_sets(ctx) self._delete_acls_from_lswitches(ctx) LOG.debug('Port Groups Migration task finished') class OvnSbSynchronizer(OvnDbSynchronizer): """Synchronizer class for SB.""" def __init__(self, core_plugin, ovn_api, ovn_driver): super(OvnSbSynchronizer, self).__init__( core_plugin, ovn_api, ovn_driver) self.l3_plugin = directory.get_plugin(plugin_constants.L3) def do_sync(self): """Method to sync the OVN_Southbound DB with neutron DB. OvnSbSynchronizer will sync data from OVN_Southbound to neutron. And the synchronization will always be performed, no matter what mode it is. """ LOG.debug("Starting OVN-Southbound DB sync process") ctx = context.get_admin_context() self.sync_hostname_and_physical_networks(ctx) if utils.is_ovn_l3(self.l3_plugin): self.l3_plugin.schedule_unhosted_gateways() def sync_hostname_and_physical_networks(self, ctx): LOG.debug('OVN-SB Sync hostname and physical networks started') host_phynets_map = self.ovn_api.get_chassis_hostname_and_physnets() current_hosts = set(host_phynets_map) previous_hosts = segments_db.get_hosts_mapped_with_segments(ctx) stale_hosts = previous_hosts - current_hosts for host in stale_hosts: LOG.debug('Stale host %s found in Neutron, but not in OVN SB DB. ' 'Clear its SegmentHostMapping in Neutron', host) self.ovn_driver.update_segment_host_mapping(host, []) new_hosts = current_hosts - previous_hosts for host in new_hosts: LOG.debug('New host %s found in OVN SB DB, but not in Neutron. ' 'Add its SegmentHostMapping in Neutron', host) self.ovn_driver.update_segment_host_mapping( host, host_phynets_map[host]) for host in current_hosts & previous_hosts: LOG.debug('Host %s found both in OVN SB DB and Neutron. ' 'Trigger updating its SegmentHostMapping in Neutron, ' 'to keep OVN SB DB and Neutron have consistent data', host) self.ovn_driver.update_segment_host_mapping( host, host_phynets_map[host]) LOG.debug('OVN-SB Sync hostname and physical networks finished') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py0000644000175000017500000005337600000000000032150 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import datetime from neutron_lib import context as neutron_context from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib.utils import helpers from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import event as row_event from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp import event from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import exceptions from neutron.common.ovn import hash_ring_manager from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_hash_ring_db CONF = cfg.CONF LOG = log.getLogger(__name__) class BaseEvent(row_event.RowEvent): table = None events = tuple() def __init__(self): self.event_name = self.__class__.__name__ super(BaseEvent, self).__init__(self.events, self.table, None) @abc.abstractmethod def match_fn(self, event, row, old=None): """Define match criteria other than table/event""" def matches(self, event, row, old=None): if row._table.name != self.table or event not in self.events: return False if not self.match_fn(event, row, old): return False LOG.debug("%s : Matched %s, %s, %s %s", self.event_name, self.table, event, self.conditions, self.old_conditions) return True class ChassisEvent(row_event.RowEvent): """Chassis create update delete event.""" def __init__(self, driver): self.driver = driver self.l3_plugin = directory.get_plugin(constants.L3) table = 'Chassis' events = (self.ROW_CREATE, self.ROW_UPDATE, self.ROW_DELETE) super(ChassisEvent, self).__init__(events, table, None) self.event_name = 'ChassisEvent' def handle_ha_chassis_group_changes(self, event, row, old): """Handle HA Chassis Group changes. This method handles the inclusion and removal of Chassis to/from the default HA Chassis Group. """ if not self.driver._ovn_client.is_external_ports_supported(): return is_gw_chassis = utils.is_gateway_chassis(row) # If the Chassis being created is not a gateway, ignore it if not is_gw_chassis and event == self.ROW_CREATE: return if event == self.ROW_UPDATE: is_old_gw = utils.is_gateway_chassis(old) if is_gw_chassis and is_old_gw: return elif not is_gw_chassis and is_old_gw: # Chassis is not a gateway anymore, treat it as deletion event = self.ROW_DELETE elif is_gw_chassis and not is_old_gw: # Chassis is now a gateway, treat it as creation event = self.ROW_CREATE if event == self.ROW_CREATE: default_group = self.driver._nb_ovn.ha_chassis_group_get( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME).execute( check_error=True) # Find what's the lowest priority number current in the group # and add the new chassis as the new lowest min_priority = min( [ch.priority for ch in default_group.ha_chassis], default=ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY) self.driver._nb_ovn.ha_chassis_group_add_chassis( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, row.name, priority=min_priority - 1).execute(check_error=True) elif event == self.ROW_DELETE: self.driver._nb_ovn.ha_chassis_group_del_chassis( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, row.name, if_exists=True).execute(check_error=True) def match_fn(self, event, row, old): if event != self.ROW_UPDATE: return True # NOTE(lucasgomes): If the external_ids column wasn't updated # (meaning, Chassis "gateway" status didn't change) just returns if not hasattr(old, 'external_ids') and event == self.ROW_UPDATE: return if (old.external_ids.get('ovn-bridge-mappings') != row.external_ids.get('ovn-bridge-mappings')): return True f = utils.is_gateway_chassis return f(old) != f(row) def run(self, event, row, old): host = row.hostname phy_nets = [] if event != self.ROW_DELETE: bridge_mappings = row.external_ids.get('ovn-bridge-mappings', '') mapping_dict = helpers.parse_mappings(bridge_mappings.split(','), unique_values=False) phy_nets = list(mapping_dict) self.driver.update_segment_host_mapping(host, phy_nets) if utils.is_ovn_l3(self.l3_plugin): # If chassis lost physnet or has been # deleted we can limit the scope and # reschedule only ports from this chassis. # In other cases we need to reschedule all gw ports. kwargs = {'event_from_chassis': None} if event == self.ROW_DELETE: kwargs['event_from_chassis'] = row.name elif event == self.ROW_UPDATE: old_mappings = old.external_ids.get('ovn-bridge-mappings', set()) or set() new_mappings = row.external_ids.get('ovn-bridge-mappings', set()) or set() if old_mappings: old_mappings = set(old_mappings.split(',')) if new_mappings: new_mappings = set(new_mappings.split(',')) mappings_removed = old_mappings - new_mappings mappings_added = new_mappings - old_mappings if mappings_removed and not mappings_added: # Mapping has been only removed. So we can # limit scope of rescheduling only to impacted # gateway chassis. kwargs['event_from_chassis'] = row.name self.l3_plugin.schedule_unhosted_gateways(**kwargs) self.handle_ha_chassis_group_changes(event, row, old) class PortBindingChassisUpdateEvent(row_event.RowEvent): """Event for matching a port moving chassis If the LSP is up and the Port_Binding chassis has just changed, there is a good chance the host died without cleaning up the chassis column on the Port_Binding. The port never goes down, so we won't see update the driver with the LogicalSwitchPortUpdateUpEvent which only monitors for transitions from DOWN to UP. """ def __init__(self, driver): self.driver = driver table = 'Port_Binding' events = (self.ROW_UPDATE,) super(PortBindingChassisUpdateEvent, self).__init__( events, table, None) self.event_name = self.__class__.__name__ def match_fn(self, event, row, old=None): # NOTE(twilson) ROW_UPDATE events always pass old, but chassis will # only be set if chassis has changed old_chassis = getattr(old, 'chassis', None) if not (row.chassis and old_chassis) or row.chassis == old_chassis: return False if row.type == ovn_const.OVN_CHASSIS_REDIRECT: return False try: lsp = self.driver._nb_ovn.lookup('Logical_Switch_Port', row.logical_port) except idlutils.RowNotFound: LOG.warning("Logical Switch Port %(port)s not found for " "Port_Binding %(binding)s", {'port': row.logical_port, 'binding': row.uuid}) return False return bool(lsp.up) def run(self, event, row, old=None): self.driver.set_port_status_up(row.logical_port) class PortBindingChassisEvent(row_event.RowEvent): """Port_Binding update event - set chassis for chassisredirect port. When a chassisredirect port is updated with chassis, this event get generated. We will update corresponding router's gateway port with the chassis's host_id. Later, users can check router's gateway port host_id to find the location of master HA router. """ def __init__(self, driver): self.driver = driver self.l3_plugin = directory.get_plugin(constants.L3) table = 'Port_Binding' events = (self.ROW_UPDATE,) super(PortBindingChassisEvent, self).__init__( events, table, (('type', '=', ovn_const.OVN_CHASSIS_REDIRECT),)) self.event_name = 'PortBindingChassisEvent' def run(self, event, row, old): if not utils.is_ovn_l3(self.l3_plugin): return router = host = None chassis = getattr(row, 'chassis', None) if chassis: router = row.datapath.external_ids.get('name', '').replace( 'neutron-', '') host = chassis[0].hostname LOG.info("Router %(router)s is bound to host %(host)s", {'router': router, 'host': host}) self.l3_plugin.update_router_gateway_port_bindings( router, host) class LogicalSwitchPortCreateUpEvent(row_event.RowEvent): """Row create event - Logical_Switch_Port 'up' = True. On connection, we get a dump of all ports, so if there is a neutron port that is down that has since been activated, we'll catch it here. This event will not be generated for new ports getting created. """ def __init__(self, driver): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_CREATE,) super(LogicalSwitchPortCreateUpEvent, self).__init__( events, table, (('up', '=', True),)) self.event_name = 'LogicalSwitchPortCreateUpEvent' def run(self, event, row, old): self.driver.set_port_status_up(row.name) class LogicalSwitchPortCreateDownEvent(row_event.RowEvent): """Row create event - Logical_Switch_Port 'up' = False On connection, we get a dump of all ports, so if there is a neutron port that is up that has since been deactivated, we'll catch it here. This event will not be generated for new ports getting created. """ def __init__(self, driver): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_CREATE,) super(LogicalSwitchPortCreateDownEvent, self).__init__( events, table, (('up', '=', False),)) self.event_name = 'LogicalSwitchPortCreateDownEvent' def run(self, event, row, old): self.driver.set_port_status_down(row.name) class LogicalSwitchPortUpdateUpEvent(row_event.RowEvent): """Row update event - Logical_Switch_Port 'up' going from False to True This happens when the VM goes up. New value of Logical_Switch_Port 'up' will be True and the old value will be False. """ def __init__(self, driver): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) super(LogicalSwitchPortUpdateUpEvent, self).__init__( events, table, (('up', '=', True),), old_conditions=(('up', '=', False),)) self.event_name = 'LogicalSwitchPortUpdateUpEvent' def run(self, event, row, old): self.driver.set_port_status_up(row.name) class LogicalSwitchPortUpdateDownEvent(row_event.RowEvent): """Row update event - Logical_Switch_Port 'up' going from True to False This happens when the VM goes down. New value of Logical_Switch_Port 'up' will be False and the old value will be True. """ def __init__(self, driver): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) super(LogicalSwitchPortUpdateDownEvent, self).__init__( events, table, (('up', '=', False),), old_conditions=(('up', '=', True),)) self.event_name = 'LogicalSwitchPortUpdateDownEvent' def run(self, event, row, old): self.driver.set_port_status_down(row.name) class FIPAddDeleteEvent(row_event.RowEvent): """Row event - NAT 'dnat_and_snat' entry added or deleted This happens when a FIP is created or removed. """ def __init__(self, driver): self.driver = driver table = 'NAT' events = (self.ROW_CREATE, self.ROW_DELETE) super(FIPAddDeleteEvent, self).__init__( events, table, (('type', '=', 'dnat_and_snat'),)) self.event_name = 'FIPAddDeleteEvent' def run(self, event, row, old): # When a FIP is added or deleted, we will delete all entries in the # MAC_Binding table of SB OVSDB corresponding to that IP Address. # TODO(dalvarez): Remove this workaround once fixed in core OVN: # https://mail.openvswitch.org/pipermail/ovs-discuss/2018-October/047604.html self.driver.delete_mac_binding_entries(row.external_ip) class OvnDbNotifyHandler(event.RowEventHandler): def __init__(self, driver): super(OvnDbNotifyHandler, self).__init__() self.driver = driver class BaseOvnIdl(connection.OvsdbIdl): def __init__(self, remote, schema): self.notify_handler = event.RowEventHandler() super(BaseOvnIdl, self).__init__(remote, schema) @classmethod def from_server(cls, connection_string, schema_name): _check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_all() return cls(connection_string, helper) def notify(self, event, row, updates=None): self.notify_handler.notify(event, row, updates) class BaseOvnSbIdl(connection.OvsdbIdl): @classmethod def from_server(cls, connection_string, schema_name): _check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_table('Chassis') helper.register_table('Encap') helper.register_table('Port_Binding') helper.register_table('Datapath_Binding') return cls(connection_string, helper) class OvnIdl(BaseOvnIdl): def __init__(self, driver, remote, schema): super(OvnIdl, self).__init__(remote, schema) self.driver = driver self.notify_handler = OvnDbNotifyHandler(driver) # ovsdb lock name to acquire. # This event lock is used to handle the notify events sent by idl.Idl # idl.Idl will call notify function for the "update" rpc method it # receives from the ovsdb-server. # This event lock is required for the following reasons # - If there are multiple neutron servers running, OvnWorkers of # these neutron servers would receive the notify events from # idl.Idl # # - we do not want all the neutron servers to handle these events # # - only the neutron server which has the lock will handle the # notify events. # # - In case the neutron server which owns this lock goes down, # ovsdb server would assign the lock to one of the other neutron # servers. self.event_lock_name = "neutron_ovn_event_lock" def notify(self, event, row, updates=None): # Do not handle the notification if the event lock is requested, # but not granted by the ovsdb-server. if self.is_lock_contended: return self.notify_handler.notify(event, row, updates) @abc.abstractmethod def post_connect(self): """Should be called after the idl has been initialized""" class OvnIdlDistributedLock(BaseOvnIdl): def __init__(self, driver, remote, schema): super(OvnIdlDistributedLock, self).__init__(remote, schema) self.driver = driver self.notify_handler = OvnDbNotifyHandler(driver) self._node_uuid = self.driver.node_uuid self._hash_ring = hash_ring_manager.HashRingManager( self.driver.hash_ring_group) self._last_touch = None def notify(self, event, row, updates=None): try: target_node = self._hash_ring.get_node(str(row.uuid)) except exceptions.HashRingIsEmpty as e: LOG.error('HashRing is empty, error: %s', e) return if target_node != self._node_uuid: return # If the worker hasn't been health checked by the maintenance # thread (see bug #1834498), indicate that it's alive here time_now = timeutils.utcnow() touch_timeout = time_now - datetime.timedelta( seconds=ovn_const.HASH_RING_TOUCH_INTERVAL) if not self._last_touch or touch_timeout >= self._last_touch: # NOTE(lucasagomes): Guard the db operation with an exception # handler. If heartbeating fails for whatever reason, log # the error and continue with processing the event try: ctx = neutron_context.get_admin_context() ovn_hash_ring_db.touch_node(ctx, self._node_uuid) self._last_touch = time_now except Exception: LOG.exception('Hash Ring node %s failed to heartbeat', self._node_uuid) LOG.debug('Hash Ring: Node %(node)s (host: %(hostname)s) ' 'handling event "%(event)s" for row %(row)s ' '(table: %(table)s)', {'node': self._node_uuid, 'hostname': CONF.host, 'event': event, 'row': row.uuid, 'table': row._table.name}) self.notify_handler.notify(event, row, updates) @abc.abstractmethod def post_connect(self): """Should be called after the idl has been initialized""" class OvnNbIdl(OvnIdlDistributedLock): def __init__(self, driver, remote, schema): super(OvnNbIdl, self).__init__(driver, remote, schema) self._lsp_update_up_event = LogicalSwitchPortUpdateUpEvent(driver) self._lsp_update_down_event = LogicalSwitchPortUpdateDownEvent(driver) self._lsp_create_up_event = LogicalSwitchPortCreateUpEvent(driver) self._lsp_create_down_event = LogicalSwitchPortCreateDownEvent(driver) self._fip_create_delete_event = FIPAddDeleteEvent(driver) self.notify_handler.watch_events([self._lsp_create_up_event, self._lsp_create_down_event, self._lsp_update_up_event, self._lsp_update_down_event, self._fip_create_delete_event]) @classmethod def from_server(cls, connection_string, schema_name, driver): _check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_all() return cls(driver, connection_string, helper) def unwatch_logical_switch_port_create_events(self): """Unwatch the logical switch port create events. When the ovs idl client connects to the ovsdb-server, it gets a dump of all logical switch ports as events and we need to process them at start up. After the startup, there is no need to watch these events. So unwatch these events. """ self.notify_handler.unwatch_events([self._lsp_create_up_event, self._lsp_create_down_event]) self._lsp_create_up_event = None self._lsp_create_down_event = None def post_connect(self): self.unwatch_logical_switch_port_create_events() class OvnSbIdl(OvnIdlDistributedLock): @classmethod def from_server(cls, connection_string, schema_name, driver): _check_and_set_ssl_files(schema_name) helper = idlutils.get_schema_helper(connection_string, schema_name) helper.register_table('Chassis') helper.register_table('Encap') helper.register_table('Port_Binding') helper.register_table('Datapath_Binding') helper.register_table('MAC_Binding') return cls(driver, connection_string, helper) def post_connect(self): """Watch Chassis events. When the ovs idl client connects to the ovsdb-server, it gets a dump of all Chassis create event. We don't need to process them because there will be sync up at startup. After that, we will watch the events to make notify work. """ self._chassis_event = ChassisEvent(self.driver) self._portbinding_event = PortBindingChassisEvent(self.driver) self.notify_handler.watch_events( [self._chassis_event, self._portbinding_event, PortBindingChassisUpdateEvent(self.driver)]) def _check_and_set_ssl_files(schema_name): if schema_name == 'OVN_Southbound': priv_key_file = ovn_conf.get_ovn_sb_private_key() cert_file = ovn_conf.get_ovn_sb_certificate() ca_cert_file = ovn_conf.get_ovn_sb_ca_cert() else: priv_key_file = ovn_conf.get_ovn_nb_private_key() cert_file = ovn_conf.get_ovn_nb_certificate() ca_cert_file = ovn_conf.get_ovn_nb_ca_cert() if priv_key_file: Stream.ssl_set_private_key_file(priv_key_file) if cert_file: Stream.ssl_set_certificate_file(cert_file) if ca_cert_file: Stream.ssl_set_ca_cert_file(ca_cert_file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py0000644000175000017500000000234600000000000030564 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import worker from neutron.common import config class MaintenanceWorker(worker.BaseWorker): def start(self): super(MaintenanceWorker, self).start() # NOTE(twilson) The super class will trigger the post_fork_initialize # in the driver, which starts the connection/IDL notify loop which # keeps the process from exiting def stop(self): """Stop service.""" super(MaintenanceWorker, self).stop() def wait(self): """Wait for service to complete.""" super(MaintenanceWorker, self).wait() @staticmethod def reset(): config.reset_service() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_flat.py0000644000175000017500000001235200000000000025032 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib.objects import exceptions as obj_base from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log from neutron._i18n import _ from neutron.conf.plugins.ml2.drivers import driver_type from neutron.objects.plugins.ml2 import flatallocation as flat_obj from neutron.plugins.ml2.drivers import helpers LOG = log.getLogger(__name__) driver_type.register_ml2_drivers_flat_opts() class FlatTypeDriver(helpers.BaseTypeDriver): """Manage state for flat networks with ML2. The FlatTypeDriver implements the 'flat' network_type. Flat network segments provide connectivity between VMs and other devices using any connected IEEE 802.1D conformant physical_network, without the use of VLAN tags, tunneling, or other segmentation mechanisms. Therefore at most one flat network segment can exist on each available physical_network. """ def __init__(self): super(FlatTypeDriver, self).__init__() self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) def _parse_networks(self, entries): self.flat_networks = entries if '*' in self.flat_networks: LOG.info("Arbitrary flat physical_network names allowed") self.flat_networks = None elif not self.flat_networks: LOG.info("Flat networks are disabled") else: LOG.info("Allowable flat physical_network names: %s", self.flat_networks) def get_type(self): return p_const.TYPE_FLAT def initialize(self): LOG.info("ML2 FlatTypeDriver initialization complete") def initialize_network_segment_range_support(self): pass def update_network_segment_range_allocations(self): pass def get_network_segment_ranges(self): pass def is_partial_segment(self, segment): return False def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) if not physical_network: msg = _("physical_network required for flat provider network") raise exc.InvalidInput(error_message=msg) if self.flat_networks is not None and not self.flat_networks: msg = _("Flat provider networks are disabled") raise exc.InvalidInput(error_message=msg) if self.flat_networks and physical_network not in self.flat_networks: msg = (_("physical_network '%s' unknown for flat provider network") % physical_network) raise exc.InvalidInput(error_message=msg) for key, value in segment.items(): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK]: msg = _("%s prohibited for flat provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, context, segment, filters=None): physical_network = segment[api.PHYSICAL_NETWORK] try: LOG.debug("Reserving flat network on physical " "network %s", physical_network) alloc = flat_obj.FlatAllocation( context, physical_network=physical_network) alloc.create() except obj_base.NeutronDbObjectDuplicateEntry: raise exc.FlatNetworkInUse( physical_network=physical_network) segment[api.MTU] = self.get_mtu(alloc.physical_network) return segment def allocate_tenant_segment(self, context, filters=None): # Tenant flat networks are not supported. return def release_segment(self, context, segment): physical_network = segment[api.PHYSICAL_NETWORK] with db_api.CONTEXT_WRITER.using(context): obj = flat_obj.FlatAllocation.get_object( context, physical_network=physical_network) if obj: obj.delete() LOG.debug("Releasing flat network on physical network %s", physical_network) else: LOG.warning( "No flat network found on physical network %s", physical_network) def get_mtu(self, physical_network): seg_mtu = super(FlatTypeDriver, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if physical_network in self.physnet_mtus: mtu.append(int(self.physnet_mtus[physical_network])) return min(mtu) if mtu else 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_geneve.py0000644000175000017500000000453000000000000025354 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db.models.plugins.ml2 import geneveallocation as \ geneve_alloc_model from neutron.objects.plugins.ml2 import geneveallocation as geneve_obj from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) driver_type.register_ml2_drivers_geneve_opts() class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(GeneveTypeDriver, self).__init__(geneve_obj.GeneveAllocation, geneve_obj.GeneveEndpoint) self.max_encap_size = cfg.CONF.ml2_type_geneve.max_header_size self.model_segmentation_id = ( geneve_alloc_model.GeneveAllocation.geneve_vni) def get_type(self): return p_const.TYPE_GENEVE def initialize(self): try: self._initialize(cfg.CONF.ml2_type_geneve.vni_ranges) except n_exc.NetworkTunnelRangeError: LOG.error("Failed to parse vni_ranges. Service terminated!") raise SystemExit() def get_endpoints(self): """Get every geneve endpoints from database.""" geneve_endpoints = self._get_endpoints() return [{'ip_address': geneve_endpoint.ip_address, 'host': geneve_endpoint.host} for geneve_endpoint in geneve_endpoints] def add_endpoint(self, ip, host): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): mtu = super(GeneveTypeDriver, self).get_mtu() return mtu - self.max_encap_size if mtu else 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_gre.py0000644000175000017500000000435000000000000024660 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db.models.plugins.ml2 import gre_allocation_endpoints as \ gre_alloc_model from neutron.objects.plugins.ml2 import greallocation as gre_obj from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) driver_type.register_ml2_drivers_gre_opts() class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(GreTypeDriver, self).__init__( gre_obj.GreAllocation, gre_obj.GreEndpoint) self.model_segmentation_id = gre_alloc_model.GreAllocation.gre_id def get_type(self): return p_const.TYPE_GRE def initialize(self): try: self._initialize(cfg.CONF.ml2_type_gre.tunnel_id_ranges) except n_exc.NetworkTunnelRangeError: LOG.exception("Failed to parse tunnel_id_ranges. " "Service terminated!") raise SystemExit() def get_endpoints(self): """Get every gre endpoints from database.""" gre_endpoints = self._get_endpoints() return [{'ip_address': gre_endpoint.ip_address, 'host': gre_endpoint.host} for gre_endpoint in gre_endpoints] def add_endpoint(self, ip, host): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): mtu = super(GreTypeDriver, self).get_mtu(physical_network) return mtu - p_const.GRE_ENCAP_OVERHEAD if mtu else 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_local.py0000644000175000017500000000462500000000000025202 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from oslo_log import log from neutron._i18n import _ LOG = log.getLogger(__name__) class LocalTypeDriver(api.ML2TypeDriver): """Manage state for local networks with ML2. The LocalTypeDriver implements the 'local' network_type. Local network segments provide connectivity between VMs and other devices running on the same node, provided that a common local network bridging technology is available to those devices. Local network segments do not provide any connectivity between nodes. """ def __init__(self): LOG.info("ML2 LocalTypeDriver initialization complete") def get_type(self): return p_const.TYPE_LOCAL def initialize(self): pass def initialize_network_segment_range_support(self): pass def update_network_segment_range_allocations(self): pass def get_network_segment_ranges(self): pass def is_partial_segment(self, segment): return False def validate_provider_segment(self, segment): for key, value in segment.items(): if value and key != api.NETWORK_TYPE: msg = _("%s prohibited for local provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, context, segment, filters=None): # No resources to reserve return segment def allocate_tenant_segment(self, context, filters=None): # No resources to allocate return {api.NETWORK_TYPE: p_const.TYPE_LOCAL} def release_segment(self, context, segment): # No resources to release pass def get_mtu(self, physical_network=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_tunnel.py0000644000175000017500000006013200000000000025410 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import itertools import operator import netaddr from neutron_lib.agent import topics from neutron_lib import constants as p_const from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import uuidutils import six from six import moves from sqlalchemy import or_ from neutron._i18n import _ from neutron.objects import base as base_obj from neutron.objects import network_segment_range as range_obj from neutron.plugins.ml2.drivers import helpers from neutron.services.network_segment_range import plugin as range_plugin LOG = log.getLogger(__name__) TUNNEL = 'tunnel' def chunks(iterable, chunk_size): """Chunks data into chunk with size<=chunk_size.""" iterator = iter(iterable) chunk = list(itertools.islice(iterator, 0, chunk_size)) while chunk: yield chunk chunk = list(itertools.islice(iterator, 0, chunk_size)) @six.add_metaclass(abc.ABCMeta) class _TunnelTypeDriverBase(helpers.SegmentTypeDriver): BULK_SIZE = 100 def __init__(self, model): super(_TunnelTypeDriverBase, self).__init__(model) self.segmentation_key = next(iter(self.primary_keys)) @abc.abstractmethod def add_endpoint(self, ip, host): """Register the endpoint in the type_driver database. :param ip: the IP address of the endpoint :param host: the Host name of the endpoint """ @abc.abstractmethod def get_endpoints(self): """Get every endpoint managed by the type_driver :returns: a list of dict [{ip_address:endpoint_ip, host:endpoint_host}, ..] """ @abc.abstractmethod def get_endpoint_by_host(self, host): """Get endpoint for a given host managed by the type_driver :param host: the Host name of the endpoint if host found in type_driver database :returns: db object for that particular host else :returns: None """ @abc.abstractmethod def get_endpoint_by_ip(self, ip): """Get endpoint for a given tunnel ip managed by the type_driver :param ip: the IP address of the endpoint if ip found in type_driver database :returns: db object for that particular ip else :returns: None """ @abc.abstractmethod def delete_endpoint(self, ip): """Delete the endpoint in the type_driver database. :param ip: the IP address of the endpoint """ @abc.abstractmethod def delete_endpoint_by_host_or_ip(self, host, ip): """Delete the endpoint in the type_driver database. This function will delete any endpoint matching the specified ip or host. :param host: the host name of the endpoint :param ip: the IP address of the endpoint """ def _initialize(self, raw_tunnel_ranges): self.tunnel_ranges = [] self._parse_tunnel_ranges(raw_tunnel_ranges, self.tunnel_ranges) if not range_plugin.is_network_segment_range_enabled(): # service plugins are initialized/loaded after the ML2 driver # initialization. Thus, we base on the information whether # ``network_segment_range`` service plugin is enabled/defined in # ``neutron.conf`` to decide whether to skip the first time sync # allocation during driver initialization, instead of using the # directory.get_plugin() method - the normal way used elsewhere to # check if a plugin is loaded. self.sync_allocations() def _parse_tunnel_ranges(self, tunnel_ranges, current_range): for entry in tunnel_ranges: entry = entry.strip() try: tun_min, tun_max = entry.split(':') tun_min = tun_min.strip() tun_max = tun_max.strip() tunnel_range = int(tun_min), int(tun_max) except ValueError as ex: raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex) plugin_utils.verify_tunnel_range(tunnel_range, self.get_type()) current_range.append(tunnel_range) LOG.info("%(type)s ID ranges: %(range)s", {'type': self.get_type(), 'range': current_range}) @db_api.retry_db_errors def _populate_new_default_network_segment_ranges(self): ctx = context.get_admin_context() for tun_min, tun_max in self.tunnel_ranges: res = { 'id': uuidutils.generate_uuid(), 'name': '', 'default': True, 'shared': True, 'network_type': self.get_type(), 'minimum': tun_min, 'maximum': tun_max} with db_api.CONTEXT_WRITER.using(ctx): new_default_range_obj = ( range_obj.NetworkSegmentRange(ctx, **res)) new_default_range_obj.create() @db_api.retry_db_errors def _delete_expired_default_network_segment_ranges(self): ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): filters = { 'default': True, 'network_type': self.get_type(), } old_default_range_objs = range_obj.NetworkSegmentRange.get_objects( ctx, **filters) for obj in old_default_range_objs: obj.delete() @db_api.retry_db_errors def _get_network_segment_ranges_from_db(self): ranges = [] ctx = context.get_admin_context() with db_api.CONTEXT_READER.using(ctx): range_objs = (range_obj.NetworkSegmentRange.get_objects( ctx, network_type=self.get_type())) for obj in range_objs: ranges.append((obj['minimum'], obj['maximum'])) return ranges def initialize_network_segment_range_support(self): self._delete_expired_default_network_segment_ranges() self._populate_new_default_network_segment_ranges() # Override self.tunnel_ranges with the network segment range # information from DB and then do a sync_allocations since the # segment range service plugin has not yet been loaded at this # initialization time. self.tunnel_ranges = self._get_network_segment_ranges_from_db() self.sync_allocations() def update_network_segment_range_allocations(self): self.sync_allocations() @db_api.retry_db_errors def sync_allocations(self): # determine current configured allocatable tunnel ids tunnel_ids = set() ranges = self.get_network_segment_ranges() for tun_min, tun_max in ranges: tunnel_ids |= set(moves.range(tun_min, tun_max + 1)) tunnel_id_getter = operator.attrgetter(self.segmentation_key) tunnel_col = getattr(self.model, self.segmentation_key) ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): # remove from table unallocated tunnels not currently allocatable # fetch results as list via all() because we'll be iterating # through them twice allocs = ctx.session.query(self.model).all() # collect those vnis that needs to be deleted from db unallocateds = ( tunnel_id_getter(a) for a in allocs if not a.allocated) to_remove = (x for x in unallocateds if x not in tunnel_ids) # Immediately delete tunnels in chunks. This leaves no work for # flush at the end of transaction for chunk in chunks(to_remove, self.BULK_SIZE): (ctx.session.query(self.model).filter(tunnel_col.in_(chunk)). filter_by(allocated=False).delete(synchronize_session=False)) # collect vnis that need to be added existings = {tunnel_id_getter(a) for a in allocs} missings = list(tunnel_ids - existings) for chunk in chunks(missings, self.BULK_SIZE): bulk = [{self.segmentation_key: x, 'allocated': False} for x in chunk] ctx.session.execute(self.model.__table__.insert(), bulk) def is_partial_segment(self, segment): return segment.get(api.SEGMENTATION_ID) is None def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) if physical_network: msg = _("provider:physical_network specified for %s " "network") % segment.get(api.NETWORK_TYPE) raise exc.InvalidInput(error_message=msg) for key, value in segment.items(): if value and key not in [api.NETWORK_TYPE, api.SEGMENTATION_ID]: msg = (_("%(key)s prohibited for %(tunnel)s provider network"), {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)}) raise exc.InvalidInput(error_message=msg) def get_mtu(self, physical_network=None): seg_mtu = super(_TunnelTypeDriverBase, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if cfg.CONF.ml2.path_mtu > 0: mtu.append(cfg.CONF.ml2.path_mtu) version = cfg.CONF.ml2.overlay_ip_version ip_header_length = p_const.IP_HEADER_LENGTH[version] return min(mtu) - ip_header_length if mtu else 0 def get_network_segment_ranges(self): """Get the driver network segment ranges. Queries all tunnel network segment ranges from DB if the ``NETWORK_SEGMENT_RANGE`` service plugin is enabled. Otherwise, they will be loaded from the host config file - `ml2_conf.ini`. """ ranges = self.tunnel_ranges if directory.get_plugin(plugin_constants.NETWORK_SEGMENT_RANGE): ranges = self._get_network_segment_ranges_from_db() return ranges @six.add_metaclass(abc.ABCMeta) class TunnelTypeDriver(_TunnelTypeDriverBase): """Define stable abstract interface for ML2 type drivers. tunnel type networks rely on tunnel endpoints. This class defines abstract methods to manage these endpoints. ML2 type driver that passes session to functions: - reserve_provider_segment - allocate_tenant_segment - release_segment - get_allocation """ def reserve_provider_segment(self, session, segment, filters=None): if self.is_partial_segment(segment): filters = filters or {} alloc = self.allocate_partially_specified_segment(session, **filters) if not alloc: raise exc.NoNetworkAvailable() else: segmentation_id = segment.get(api.SEGMENTATION_ID) alloc = self.allocate_fully_specified_segment( session, **{self.segmentation_key: segmentation_id}) if not alloc: raise exc.TunnelIdInUse(tunnel_id=segmentation_id) return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def allocate_tenant_segment(self, session, filters=None): filters = filters or {} alloc = self.allocate_partially_specified_segment(session, **filters) if not alloc: return return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def release_segment(self, session, segment): tunnel_id = segment[api.SEGMENTATION_ID] ranges = self.get_network_segment_ranges() inside = any(lo <= tunnel_id <= hi for lo, hi in ranges) info = {'type': self.get_type(), 'id': tunnel_id} with session.begin(subtransactions=True): query = (session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id})) if inside: count = query.update({"allocated": False}) if count: LOG.debug("Releasing %(type)s tunnel %(id)s to pool", info) else: count = query.delete() if count: LOG.debug("Releasing %(type)s tunnel %(id)s outside pool", info) if not count: LOG.warning("%(type)s tunnel %(id)s not found", info) def get_allocation(self, session, tunnel_id): return (session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id}). first()) @six.add_metaclass(abc.ABCMeta) class ML2TunnelTypeDriver(_TunnelTypeDriverBase): """Define stable abstract interface for ML2 type drivers. tunnel type networks rely on tunnel endpoints. This class defines abstract methods to manage these endpoints. ML2 type driver that passes context as argument to functions: - reserve_provider_segment - allocate_tenant_segment - release_segment - get_allocation """ def reserve_provider_segment(self, context, segment, filters=None): if self.is_partial_segment(segment): filters = filters or {} alloc = self.allocate_partially_specified_segment(context, **filters) if not alloc: raise exc.NoNetworkAvailable() else: segmentation_id = segment.get(api.SEGMENTATION_ID) alloc = self.allocate_fully_specified_segment( context, **{self.segmentation_key: segmentation_id}) if not alloc: raise exc.TunnelIdInUse(tunnel_id=segmentation_id) return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def allocate_tenant_segment(self, context, filters=None): filters = filters or {} alloc = self.allocate_partially_specified_segment(context, **filters) if not alloc: return return {api.NETWORK_TYPE: self.get_type(), api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key), api.MTU: self.get_mtu()} def release_segment(self, context, segment): tunnel_id = segment[api.SEGMENTATION_ID] ranges = self.get_network_segment_ranges() inside = any(lo <= tunnel_id <= hi for lo, hi in ranges) info = {'type': self.get_type(), 'id': tunnel_id} with db_api.CONTEXT_WRITER.using(context): query = (context.session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id})) if inside: count = query.update({"allocated": False}) if count: LOG.debug("Releasing %(type)s tunnel %(id)s to pool", info) else: count = query.delete() if count: LOG.debug("Releasing %(type)s tunnel %(id)s outside pool", info) if not count: LOG.warning("%(type)s tunnel %(id)s not found", info) @db_api.CONTEXT_READER def get_allocation(self, context, tunnel_id): return (context.session.query(self.model). filter_by(**{self.segmentation_key: tunnel_id}). first()) class EndpointTunnelTypeDriver(ML2TunnelTypeDriver): def __init__(self, segment_model, endpoint_model): super(EndpointTunnelTypeDriver, self).__init__(segment_model) if issubclass(endpoint_model, base_obj.NeutronDbObject): self.endpoint_model = endpoint_model.db_model else: self.endpoint_model = endpoint_model self.segmentation_key = next(iter(self.primary_keys)) def get_endpoint_by_host(self, host): LOG.debug("get_endpoint_by_host() called for host %s", host) session = db_api.get_reader_session() return (session.query(self.endpoint_model). filter_by(host=host).first()) def get_endpoint_by_ip(self, ip): LOG.debug("get_endpoint_by_ip() called for ip %s", ip) session = db_api.get_reader_session() return (session.query(self.endpoint_model). filter_by(ip_address=ip).first()) def delete_endpoint(self, ip): LOG.debug("delete_endpoint() called for ip %s", ip) session = db_api.get_writer_session() session.query(self.endpoint_model).filter_by(ip_address=ip).delete() def delete_endpoint_by_host_or_ip(self, host, ip): LOG.debug("delete_endpoint_by_host_or_ip() called for " "host %(host)s or %(ip)s", {'host': host, 'ip': ip}) session = db_api.get_writer_session() session.query(self.endpoint_model).filter( or_(self.endpoint_model.host == host, self.endpoint_model.ip_address == ip)).delete() def _get_endpoints(self): LOG.debug("_get_endpoints() called") session = db_api.get_reader_session() return session.query(self.endpoint_model) def _add_endpoint(self, ip, host, **kwargs): LOG.debug("_add_endpoint() called for ip %s", ip) session = db_api.get_writer_session() try: endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) endpoint.save(session) except db_exc.DBDuplicateEntry: endpoint = (session.query(self.endpoint_model). filter_by(ip_address=ip).one()) LOG.warning("Endpoint with ip %s already exists", ip) return endpoint class TunnelRpcCallbackMixin(object): def setup_tunnel_callback_mixin(self, notifier, type_manager): self._notifier = notifier self._type_manager = type_manager def tunnel_sync(self, rpc_context, **kwargs): """Update new tunnel. Updates the database with the tunnel IP. All listening agents will also be notified about the new tunnel IP. """ tunnel_ip = kwargs.get('tunnel_ip') if not tunnel_ip: msg = _("Tunnel IP value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) host = kwargs.get('host') version = netaddr.IPAddress(tunnel_ip).version if version != cfg.CONF.ml2.overlay_ip_version: msg = (_("Tunnel IP version does not match ML2 " "overlay_ip_version: %(overlay)s, host: %(host)s, " "tunnel_ip: %(ip)s"), {'overlay': cfg.CONF.ml2.overlay_ip_version, 'host': host, 'ip': tunnel_ip}) raise exc.InvalidInput(error_message=msg) tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: msg = _("Network type value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) driver = self._type_manager.drivers.get(tunnel_type) if driver: # The given conditional statements will verify the following # things: # 1. If host is not passed from an agent, it is a legacy mode. # 2. If passed host and tunnel_ip are not found in the DB, # it is a new endpoint. # 3. If host is passed from an agent and it is not found in DB # but the passed tunnel_ip is found, delete the endpoint # from DB and add the endpoint with (tunnel_ip, host), # it is an upgrade case. # 4. If passed host is found in DB and passed tunnel ip is not # found, delete the endpoint belonging to that host and # add endpoint with latest (tunnel_ip, host), it is a case # where local_ip of an agent got changed. # 5. If the passed host had another ip in the DB the host-id has # roamed to a different IP then delete any reference to the new # local_ip or the host id. Don't notify tunnel_delete for the # old IP since that one could have been taken by a different # agent host-id (neutron-ovs-cleanup should be used to clean up # the stale endpoints). # Finally create a new endpoint for the (tunnel_ip, host). if host: host_endpoint = driver.obj.get_endpoint_by_host(host) ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip) if (ip_endpoint and ip_endpoint.host is None and host_endpoint is None): driver.obj.delete_endpoint(ip_endpoint.ip_address) elif (ip_endpoint and ip_endpoint.host != host): LOG.info( "Tunnel IP %(ip)s was used by host %(host)s and " "will be assigned to %(new_host)s", {'ip': ip_endpoint.ip_address, 'host': ip_endpoint.host, 'new_host': host}) driver.obj.delete_endpoint_by_host_or_ip( host, ip_endpoint.ip_address) elif (host_endpoint and host_endpoint.ip_address != tunnel_ip): # Notify all other listening agents to delete stale tunnels self._notifier.tunnel_delete( rpc_context, host_endpoint.ip_address, tunnel_type) driver.obj.delete_endpoint(host_endpoint.ip_address) tunnel = driver.obj.add_endpoint(tunnel_ip, host) tunnels = driver.obj.get_endpoints() entry = {'tunnels': tunnels} # Notify all other listening agents self._notifier.tunnel_update(rpc_context, tunnel.ip_address, tunnel_type) # Return the list of tunnels IP's to the agent return entry else: msg = (_("Network type value %(type)s not supported, " "host: %(host)s with tunnel IP: %(ip)s") % {'type': tunnel_type, 'host': host or 'legacy mode (no host provided by agent)', 'ip': tunnel_ip}) raise exc.InvalidInput(error_message=msg) class TunnelAgentRpcApiMixin(object): def _get_tunnel_update_topic(self): return topics.get_topic_name(self.topic, TUNNEL, topics.UPDATE) def tunnel_update(self, context, tunnel_ip, tunnel_type): cctxt = self.client.prepare(topic=self._get_tunnel_update_topic(), fanout=True) cctxt.cast(context, 'tunnel_update', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) def _get_tunnel_delete_topic(self): return topics.get_topic_name(self.topic, TUNNEL, topics.DELETE) def tunnel_delete(self, context, tunnel_ip, tunnel_type): cctxt = self.client.prepare(topic=self._get_tunnel_delete_topic(), fanout=True) cctxt.cast(context, 'tunnel_delete', tunnel_ip=tunnel_ip, tunnel_type=tunnel_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_vlan.py0000644000175000017500000003703000000000000025044 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import sys from neutron_lib import constants as p_const from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from neutron._i18n import _ from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db.models.plugins.ml2 import vlanallocation as vlan_alloc_model from neutron.objects import network_segment_range as range_obj from neutron.objects.plugins.ml2 import vlanallocation as vlanalloc from neutron.plugins.ml2.drivers import helpers from neutron.services.network_segment_range import plugin as range_plugin LOG = log.getLogger(__name__) driver_type.register_ml2_drivers_vlan_opts() class VlanTypeDriver(helpers.SegmentTypeDriver): """Manage state for VLAN networks with ML2. The VlanTypeDriver implements the 'vlan' network_type. VLAN network segments provide connectivity between VMs and other devices using any connected IEEE 802.1Q conformant physical_network segmented into virtual networks via IEEE 802.1Q headers. Up to 4094 VLAN network segments can exist on each available physical_network. """ def __init__(self): super(VlanTypeDriver, self).__init__(vlanalloc.VlanAllocation) self.model_segmentation_id = vlan_alloc_model.VlanAllocation.vlan_id self._parse_network_vlan_ranges() @db_api.retry_db_errors def _populate_new_default_network_segment_ranges(self): ctx = context.get_admin_context() for (physical_network, vlan_ranges) in ( self.network_vlan_ranges.items()): for vlan_min, vlan_max in vlan_ranges: res = { 'id': uuidutils.generate_uuid(), 'name': '', 'default': True, 'shared': True, 'network_type': p_const.TYPE_VLAN, 'physical_network': physical_network, 'minimum': vlan_min, 'maximum': vlan_max} with db_api.CONTEXT_WRITER.using(ctx): new_default_range_obj = ( range_obj.NetworkSegmentRange(ctx, **res)) new_default_range_obj.create() @db_api.retry_db_errors def _delete_expired_default_network_segment_ranges(self): ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): filters = { 'default': True, 'network_type': p_const.TYPE_VLAN, } old_default_range_objs = range_obj.NetworkSegmentRange.get_objects( ctx, **filters) for obj in old_default_range_objs: obj.delete() def _parse_network_vlan_ranges(self): try: self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.ml2_type_vlan.network_vlan_ranges) except Exception: LOG.exception("Failed to parse network_vlan_ranges. " "Service terminated!") sys.exit(1) LOG.info("Network VLAN ranges: %s", self.network_vlan_ranges) @db_api.retry_db_errors def _sync_vlan_allocations(self): ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): # VLAN ranges per physical network: # {phy1: [(1, 10), (30, 50)], ...} ranges = self.get_network_segment_ranges() # Delete those VLAN registers from unconfigured physical networks physnets = vlanalloc.VlanAllocation.get_physical_networks(ctx) physnets_unconfigured = physnets - set(ranges) if physnets_unconfigured: LOG.debug('Removing any VLAN register on physical networks %s', physnets_unconfigured) vlanalloc.VlanAllocation.delete_physical_networks( ctx, physnets_unconfigured) # Get existing allocations for all configured physical networks allocations = collections.defaultdict(list) for alloc in vlanalloc.VlanAllocation.get_objects(ctx): allocations[alloc.physical_network].append(alloc) for physical_network, vlan_ranges in ranges.items(): # determine current configured allocatable vlans for # this physical network vlan_ids = set() for vlan_min, vlan_max in vlan_ranges: vlan_ids |= set(range(vlan_min, vlan_max + 1)) # remove from table unallocated vlans not currently # allocatable if physical_network in allocations: for alloc in allocations[physical_network]: try: # see if vlan is allocatable vlan_ids.remove(alloc.vlan_id) except KeyError: # it's not allocatable, so check if its allocated if not alloc.allocated: # it's not, so remove it from table LOG.debug("Removing vlan %(vlan_id)s on " "physical network " "%(physical_network)s from pool", {'vlan_id': alloc.vlan_id, 'physical_network': physical_network}) # This UPDATE WHERE statement blocks anyone # from concurrently changing the allocation # values to True while our transaction is # open so we don't accidentally delete # allocated segments. If someone has already # allocated, update_objects will return 0 so we # don't delete. if vlanalloc.VlanAllocation.update_objects( ctx, values={'allocated': False}, allocated=False, vlan_id=alloc.vlan_id, physical_network=physical_network): alloc.delete() del allocations[physical_network] # Add missing allocatable VLAN registers for "physical_network" vlanalloc.VlanAllocation.bulk_create(ctx, physical_network, vlan_ids) @db_api.retry_db_errors def _get_network_segment_ranges_from_db(self): ranges = {} ctx = context.get_admin_context() with db_api.CONTEXT_READER.using(ctx): range_objs = (range_obj.NetworkSegmentRange.get_objects( ctx, network_type=self.get_type())) for obj in range_objs: physical_network = obj['physical_network'] if physical_network not in ranges: ranges[physical_network] = [] ranges[physical_network].append((obj['minimum'], obj['maximum'])) return ranges def get_type(self): return p_const.TYPE_VLAN def initialize(self): if not range_plugin.is_network_segment_range_enabled(): # service plugins are initialized/loaded after the ML2 driver # initialization. Thus, we base on the information whether # ``network_segment_range`` service plugin is enabled/defined in # ``neutron.conf`` to decide whether to skip the first time sync # allocation during driver initialization, instead of using the # directory.get_plugin() method - the normal way used elsewhere to # check if a plugin is loaded. self._sync_vlan_allocations() LOG.info("VlanTypeDriver initialization complete") def initialize_network_segment_range_support(self): self._delete_expired_default_network_segment_ranges() self._populate_new_default_network_segment_ranges() # Override self.network_vlan_ranges with the network segment range # information from DB and then do a sync_allocations since the # segment range service plugin has not yet been loaded at this # initialization time. self.network_vlan_ranges = self._get_network_segment_ranges_from_db() self._sync_vlan_allocations() def update_network_segment_range_allocations(self): self._sync_vlan_allocations() def get_network_segment_ranges(self): """Get the driver network segment ranges. Queries all VLAN network segment ranges from DB if the ``NETWORK_SEGMENT_RANGE`` service plugin is enabled. Otherwise, they will be loaded from the host config file - `ml2_conf.ini`. """ ranges = self.network_vlan_ranges if directory.get_plugin(plugin_constants.NETWORK_SEGMENT_RANGE): ranges = self._get_network_segment_ranges_from_db() return ranges def is_partial_segment(self, segment): return segment.get(api.SEGMENTATION_ID) is None def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) segmentation_id = segment.get(api.SEGMENTATION_ID) ranges = self.get_network_segment_ranges() if physical_network: if physical_network not in ranges: msg = (_("physical_network '%s' unknown " "for VLAN provider network") % physical_network) raise exc.InvalidInput(error_message=msg) if segmentation_id is not None: if not plugin_utils.is_valid_vlan_tag(segmentation_id): msg = (_("segmentation_id out of range (%(min)s through " "%(max)s)") % {'min': p_const.MIN_VLAN_TAG, 'max': p_const.MAX_VLAN_TAG}) raise exc.InvalidInput(error_message=msg) else: if not ranges.get(physical_network): msg = (_("Physical network %s requires segmentation_id " "to be specified when creating a provider " "network") % physical_network) raise exc.InvalidInput(error_message=msg) elif segmentation_id is not None: msg = _("segmentation_id requires physical_network for VLAN " "provider network") raise exc.InvalidInput(error_message=msg) for key, value in segment.items(): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK, api.SEGMENTATION_ID]: msg = _("%s prohibited for VLAN provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, context, segment, filters=None): filters = filters or {} project_id = filters.get('project_id') filters = {} physical_network = segment.get(api.PHYSICAL_NETWORK) if physical_network is not None: filters['physical_network'] = physical_network vlan_id = segment.get(api.SEGMENTATION_ID) if vlan_id is not None: filters['vlan_id'] = vlan_id if self.is_partial_segment(segment): if (directory.get_plugin( plugin_constants.NETWORK_SEGMENT_RANGE)and project_id): filters['project_id'] = project_id alloc = self.allocate_partially_specified_segment( context, **filters) if not alloc: raise exc.NoNetworkAvailable() else: alloc = self.allocate_fully_specified_segment( context, **filters) if not alloc: raise exc.VlanIdInUse(**filters) return {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: alloc.physical_network, api.SEGMENTATION_ID: alloc.vlan_id, api.MTU: self.get_mtu(alloc.physical_network)} def allocate_tenant_segment(self, context, filters=None): filters = filters or {} ranges = self.get_network_segment_ranges() for physnet in ranges: filters['physical_network'] = physnet alloc = self.allocate_partially_specified_segment( context, **filters) if alloc: break else: return return {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: alloc.physical_network, api.SEGMENTATION_ID: alloc.vlan_id, api.MTU: self.get_mtu(alloc.physical_network)} def release_segment(self, context, segment): physical_network = segment[api.PHYSICAL_NETWORK] vlan_id = segment[api.SEGMENTATION_ID] vlan_ranges = self.get_network_segment_ranges() ranges = vlan_ranges.get(physical_network, []) inside = any(lo <= vlan_id <= hi for lo, hi in ranges) count = False with db_api.CONTEXT_WRITER.using(context): alloc = vlanalloc.VlanAllocation.get_object( context, physical_network=physical_network, vlan_id=vlan_id) if alloc: if inside and alloc.allocated: count = True alloc.allocated = False alloc.update() LOG.debug("Releasing vlan %(vlan_id)s on physical " "network %(physical_network)s to pool", {'vlan_id': vlan_id, 'physical_network': physical_network}) else: count = True alloc.delete() LOG.debug("Releasing vlan %(vlan_id)s on physical " "network %(physical_network)s outside pool", {'vlan_id': vlan_id, 'physical_network': physical_network}) if not count: LOG.warning("No vlan_id %(vlan_id)s found on physical " "network %(physical_network)s", {'vlan_id': vlan_id, 'physical_network': physical_network}) def get_mtu(self, physical_network): seg_mtu = super(VlanTypeDriver, self).get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) if physical_network in self.physnet_mtus: mtu.append(int(self.physnet_mtus[physical_network])) return min(mtu) if mtu else 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/drivers/type_vxlan.py0000644000175000017500000000454500000000000025241 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db.models.plugins.ml2 import vxlanallocation as vxlan_alloc_model from neutron.objects.plugins.ml2 import vxlanallocation as vxlan_obj from neutron.plugins.ml2.drivers import type_tunnel LOG = log.getLogger(__name__) driver_type.register_ml2_drivers_vxlan_opts() class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(VxlanTypeDriver, self).__init__( vxlan_obj.VxlanAllocation, vxlan_obj.VxlanEndpoint) self.model_segmentation_id = ( vxlan_alloc_model.VxlanAllocation.vxlan_vni) def get_type(self): return p_const.TYPE_VXLAN def initialize(self): try: self._initialize(cfg.CONF.ml2_type_vxlan.vni_ranges) except n_exc.NetworkTunnelRangeError: LOG.exception("Failed to parse vni_ranges. " "Service terminated!") raise SystemExit() def get_endpoints(self): """Get every vxlan endpoints from database.""" vxlan_endpoints = self._get_endpoints() return [{'ip_address': vxlan_endpoint.ip_address, 'udp_port': vxlan_endpoint.udp_port, 'host': vxlan_endpoint.host} for vxlan_endpoint in vxlan_endpoints] def add_endpoint(self, ip, host, udp_port=p_const.VXLAN_UDP_PORT): return self._add_endpoint(ip, host, udp_port=udp_port) def get_mtu(self, physical_network=None): mtu = super(VxlanTypeDriver, self).get_mtu() return mtu - p_const.VXLAN_ENCAP_OVERHEAD if mtu else 0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/0000755000175000017500000000000000000000000023207 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/__init__.py0000644000175000017500000000000000000000000025306 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/data_plane_status.py0000644000175000017500000000312300000000000027253 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import data_plane_status as dps_lib from neutron_lib.plugins.ml2 import api from oslo_log import log as logging from neutron.db import data_plane_status_db as dps_db LOG = logging.getLogger(__name__) class DataPlaneStatusExtensionDriver(api.ExtensionDriver, dps_db.DataPlaneStatusMixin): _supported_extension_alias = 'data-plane-status' def initialize(self): LOG.info("DataPlaneStatusExtensionDriver initialization complete") @property def extension_alias(self): return self._supported_extension_alias def process_update_port(self, plugin_context, data, result): if dps_lib.DATA_PLANE_STATUS in data: self._process_update_port_data_plane_status(plugin_context, data, result) def extend_port_dict(self, session, db_data, result): self._extend_port_data_plane_status(result, db_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/dns_integration.py0000644000175000017500000005752000000000000026761 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import dns_domain_ports from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_const from neutron_lib.exceptions import dns as dns_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_log import log as logging from neutron.db import segments_db from neutron.objects import network as net_obj from neutron.objects import ports as port_obj from neutron.objects import subnet as subnet_obj from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) class DNSExtensionDriver(api.ExtensionDriver): _supported_extension_alias = dns_apidef.ALIAS @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, plugin_context, request_data, db_data): dns_domain = request_data.get(dns_apidef.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if dns_domain: net_obj.NetworkDNSDomain(plugin_context, network_id=db_data['id'], dns_domain=dns_domain).create() db_data[dns_apidef.DNSDOMAIN] = dns_domain def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns_apidef.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns_apidef.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = net_obj.NetworkDNSDomain.get_object( plugin_context, network_id=net_id) if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns_apidef.DNSDOMAIN] = new_value net_dns_domain.update() else: net_dns_domain.delete() db_data[dns_apidef.DNSDOMAIN] = '' elif new_value: net_obj.NetworkDNSDomain(plugin_context, network_id=net_id, dns_domain=new_value).create() db_data[dns_apidef.DNSDOMAIN] = new_value def process_create_port(self, plugin_context, request_data, db_data): if not (request_data.get(dns_apidef.DNSNAME) or request_data.get(dns_apidef.DNSDOMAIN)): return dns_name, is_dns_domain_default = self._get_request_dns_name( request_data) if is_dns_domain_default: return network, subnets = self._get_details(plugin_context, db_data['network_id']) self._create_port_dns_record(plugin_context, request_data, db_data, network, subnets, dns_name) def _create_port_dns_record(self, plugin_context, request_data, db_data, network, subnets, dns_name): external_dns_domain = (request_data.get(dns_apidef.DNSDOMAIN) or network.get(dns_apidef.DNSDOMAIN)) flag = self.external_dns_not_needed(plugin_context, network, subnets) current_dns_name, current_dns_domain = ( self._calculate_current_dns_name_and_domain( dns_name, external_dns_domain, flag)) dns_data_obj = port_obj.PortDNS( plugin_context, port_id=db_data['id'], current_dns_name=current_dns_name, current_dns_domain=current_dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name, dns_domain=request_data.get(dns_apidef.DNSDOMAIN, '')) dns_data_obj.create() return dns_data_obj def _calculate_current_dns_name_and_domain(self, dns_name, external_dns_domain, no_external_dns_service): # When creating a new PortDNS object, the current_dns_name and # current_dns_domain fields hold the data that the integration driver # will send to the external DNS service. They are set to non-blank # values only if all the following conditions are met: # 1) There is an external DNS integration driver configured # 2) The user request contains a valid non-blank value for the port's # dns_name # 3) The user request contains a valid non-blank value for the port's # dns_domain or the port's network has a non-blank value in its # dns_domain attribute are_both_dns_attributes_set = dns_name and external_dns_domain if no_external_dns_service or not are_both_dns_attributes_set: return '', '' return dns_name, external_dns_domain def _update_dns_db(self, plugin_context, request_data, db_data, network, subnets): dns_name = request_data.get(dns_apidef.DNSNAME) dns_domain = request_data.get(dns_apidef.DNSDOMAIN) has_fixed_ips = 'fixed_ips' in request_data dns_data_db = port_obj.PortDNS.get_object( plugin_context, port_id=db_data['id']) if dns_data_db: is_dns_name_changed = ( dns_name is not None and dns_data_db[dns_apidef.DNSNAME] != dns_name) is_dns_domain_changed = ( dns_domain is not None and dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain) if (is_dns_name_changed or is_dns_domain_changed or (has_fixed_ips and dns_data_db['current_dns_name'])): dns_data_db = self._populate_previous_external_dns_data( dns_data_db) dns_data_db = self._populate_current_external_dns_data( request_data, network, dns_data_db, dns_name, dns_domain, is_dns_name_changed, is_dns_domain_changed) elif not dns_data_db['current_dns_name']: # If port was removed from external DNS service in previous # update, make sure we don't attempt removal again dns_data_db['previous_dns_name'] = '' dns_data_db['previous_dns_domain'] = '' dns_data_db.update() return dns_data_db if dns_name or dns_domain: dns_data_db = self._create_port_dns_record( plugin_context, request_data, db_data, network, subnets, dns_name or '') return dns_data_db def _populate_previous_external_dns_data(self, dns_data_db): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) return dns_data_db def _populate_current_external_dns_data(self, request_data, network, dns_data_db, dns_name, dns_domain, is_dns_name_changed, is_dns_domain_changed): if is_dns_name_changed or is_dns_domain_changed: if is_dns_name_changed: dns_data_db[dns_apidef.DNSNAME] = dns_name external_dns_domain = (dns_data_db[dns_apidef.DNSDOMAIN] or network.get(dns_apidef.DNSDOMAIN)) if is_dns_domain_changed: dns_data_db[dns_apidef.DNSDOMAIN] = dns_domain external_dns_domain = request_data[dns_apidef.DNSDOMAIN] if not external_dns_domain: external_dns_domain = network.get(dns_apidef.DNSDOMAIN) dns_data_db['current_dns_name'] = dns_data_db[dns_apidef.DNSNAME] dns_data_db['current_dns_domain'] = external_dns_domain if not (dns_data_db['current_dns_name'] and dns_data_db['current_dns_domain']): dns_data_db['current_dns_name'] = '' dns_data_db['current_dns_domain'] = '' return dns_data_db def process_update_port(self, plugin_context, request_data, db_data): has_dns_name = dns_apidef.DNSNAME in request_data has_fixed_ips = 'fixed_ips' in request_data has_dns_domain = dns_apidef.DNSDOMAIN in request_data if not any((has_dns_name, has_fixed_ips, has_dns_domain)): return is_dns_domain_default = self._get_request_dns_name( request_data)[1] if is_dns_domain_default: self._extend_port_dict(plugin_context.session, db_data, db_data, None) return network, subnets = self._get_details(plugin_context, db_data['network_id']) dns_data_db = None if self.external_dns_not_needed(plugin_context, network, subnets): # No need to update external DNS service. Only process the port's # dns_name or dns_domain attributes if necessary if has_dns_name or has_dns_domain: dns_data_db = self._process_only_port_update( plugin_context, request_data, db_data) else: dns_data_db = self._update_dns_db(plugin_context, request_data, db_data, network, subnets) self._extend_port_dict(plugin_context.session, db_data, db_data, dns_data_db) def _process_only_port_update(self, plugin_context, request_data, db_data): dns_name = request_data.get(dns_apidef.DNSNAME) dns_domain = request_data.get(dns_apidef.DNSDOMAIN) dns_data_db = port_obj.PortDNS.get_object( plugin_context, port_id=db_data['id']) if dns_data_db: if dns_name is not None and dns_data_db[ dns_apidef.DNSNAME] != dns_name: dns_data_db[dns_apidef.DNSNAME] = dns_name if (dns_domain is not None and dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain): dns_data_db[dns_apidef.DNSDOMAIN] = dns_domain dns_data_db.update() return dns_data_db dns_data_db = port_obj.PortDNS(plugin_context, port_id=db_data['id'], current_dns_name='', current_dns_domain='', previous_dns_name='', previous_dns_domain='', dns_name=dns_name or '', dns_domain=dns_domain or '') dns_data_db.create() return dns_data_db def external_dns_not_needed(self, context, network, subnets): """Decide if ports in network need to be sent to the DNS service. :param context: plugin request context :param network: network dictionary :param subnets: list of subnets in network :return: True or False """ return False def extend_network_dict(self, session, db_data, response_data): response_data[dns_apidef.DNSDOMAIN] = '' if db_data.dns_domain: response_data[dns_apidef.DNSDOMAIN] = db_data.dns_domain[ dns_apidef.DNSDOMAIN] return response_data def _get_dns_domain(self): if not cfg.CONF.dns_domain: return '' if cfg.CONF.dns_domain.endswith('.'): return cfg.CONF.dns_domain return '%s.' % cfg.CONF.dns_domain def _get_request_dns_name(self, port): dns_domain = self._get_dns_domain() if dns_domain and dns_domain != lib_const.DNS_DOMAIN_DEFAULT: return port.get(dns_apidef.DNSNAME, ''), False return '', True def _get_request_dns_name_and_domain_name(self, dns_data_db): dns_domain = self._get_dns_domain() dns_name = '' if dns_domain and dns_domain != lib_const.DNS_DOMAIN_DEFAULT: if dns_data_db: dns_name = dns_data_db.dns_name return dns_name, dns_domain def _get_dns_names_for_port(self, ips, dns_data_db): dns_assignment = [] dns_name, dns_domain = self._get_request_dns_name_and_domain_name( dns_data_db) for ip in ips: if dns_name: hostname = dns_name fqdn = dns_name if not dns_name.endswith('.'): fqdn = '%s.%s' % (dns_name, dns_domain) else: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) dns_assignment.append({'ip_address': ip['ip_address'], 'hostname': hostname, 'fqdn': fqdn}) return dns_assignment def _get_dns_name_for_port_get(self, port, dns_data_db): if port['fixed_ips']: return self._get_dns_names_for_port(port['fixed_ips'], dns_data_db) return [] def _extend_port_dict(self, session, db_data, response_data, dns_data_db): if not dns_data_db: response_data[dns_apidef.DNSNAME] = '' else: response_data[dns_apidef.DNSNAME] = dns_data_db[dns_apidef.DNSNAME] response_data['dns_assignment'] = self._get_dns_name_for_port_get( db_data, dns_data_db) return response_data def extend_port_dict(self, session, db_data, response_data): dns_data_db = db_data.dns return self._extend_port_dict(session, db_data, response_data, dns_data_db) def _get_details(self, context, network_id): plugin = directory.get_plugin() network = plugin.get_network(context, network_id) subnets = plugin.get_subnets_by_network(context, network_id) return network, subnets class DNSExtensionDriverML2(DNSExtensionDriver): def initialize(self): LOG.info("DNSExtensionDriverML2 initialization complete") def _is_tunnel_tenant_network(self, provider_net): if provider_net['network_type'] == 'geneve': tunnel_ranges = cfg.CONF.ml2_type_geneve.vni_ranges elif provider_net['network_type'] == 'vxlan': tunnel_ranges = cfg.CONF.ml2_type_vxlan.vni_ranges else: tunnel_ranges = cfg.CONF.ml2_type_gre.tunnel_id_ranges segmentation_id = int(provider_net['segmentation_id']) for entry in tunnel_ranges: entry = entry.strip() tun_min, tun_max = entry.split(':') tun_min = tun_min.strip() tun_max = tun_max.strip() return int(tun_min) <= segmentation_id <= int(tun_max) def _is_vlan_tenant_network(self, provider_net): network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( cfg.CONF.ml2_type_vlan.network_vlan_ranges) vlan_ranges = network_vlan_ranges[provider_net['physical_network']] if not vlan_ranges: return False segmentation_id = int(provider_net['segmentation_id']) for vlan_range in vlan_ranges: if vlan_range[0] <= segmentation_id <= vlan_range[1]: return True def external_dns_not_needed(self, context, network, subnets): dns_driver = _get_dns_driver() if not dns_driver: return True for subnet in subnets: if subnet.get('dns_publish_fixed_ip'): return False if network['router:external']: return True segments = segments_db.get_network_segments(context, network['id']) if len(segments) > 1: return False provider_net = segments[0] if provider_net['network_type'] == 'local': return True if provider_net['network_type'] == 'flat': return False if provider_net['network_type'] == 'vlan': return self._is_vlan_tenant_network(provider_net) if provider_net['network_type'] in ['gre', 'vxlan', 'geneve']: return self._is_tunnel_tenant_network(provider_net) return True class DNSDomainPortsExtensionDriver(DNSExtensionDriverML2): _supported_extension_aliases = [dns_apidef.ALIAS, dns_domain_ports.ALIAS] @property def extension_aliases(self): return self._supported_extension_aliases def initialize(self): LOG.info("DNSDomainPortsExtensionDriver initialization complete") def extend_port_dict(self, session, db_data, response_data): response_data = ( super(DNSDomainPortsExtensionDriver, self).extend_port_dict( session, db_data, response_data)) dns_data_db = db_data.dns response_data[dns_apidef.DNSDOMAIN] = '' if dns_data_db: response_data[dns_apidef.DNSDOMAIN] = dns_data_db[ dns_apidef.DNSDOMAIN] DNS_DRIVER = None def _get_dns_driver(): global DNS_DRIVER if DNS_DRIVER: return DNS_DRIVER if not cfg.CONF.external_dns_driver: return try: DNS_DRIVER = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return DNS_DRIVER except ImportError: LOG.exception("ImportError exception occurred while loading " "the external DNS service driver") raise dns_exc.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) def _filter_by_subnet(context, fixed_ips): subnet_filtered = [] filter_fixed_ips = False for ip in fixed_ips: # TODO(slaweq): This might be a performance issue if ports have lots # of fixed_ips attached, possibly collect subnets first and do a # single get_objects call instead subnet = subnet_obj.Subnet.get_object( context, id=ip['subnet_id']) if subnet.get('dns_publish_fixed_ip'): filter_fixed_ips = True subnet_filtered.append(str(ip['ip_address'])) if filter_fixed_ips: return subnet_filtered else: return [str(ip['ip_address']) for ip in fixed_ips] def _create_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port = kwargs['port'] dns_data_db = port_obj.PortDNS.get_object( context, port_id=port['id']) if not (dns_data_db and dns_data_db['current_dns_name']): return records = _filter_by_subnet(context, port['fixed_ips']) _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def _send_data_to_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error publishing port data in external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "DNS service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error deleting port data from external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(records)}) def _update_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] updated_port = kwargs['port'] original_port = kwargs.get('original_port') if not original_port: return original_ips = _filter_by_subnet(context, original_port['fixed_ips']) updated_ips = _filter_by_subnet(context, updated_port['fixed_ips']) is_dns_name_changed = (updated_port[dns_apidef.DNSNAME] != original_port[dns_apidef.DNSNAME]) is_dns_domain_changed = (dns_apidef.DNSDOMAIN in updated_port and updated_port[dns_apidef.DNSDOMAIN] != original_port[dns_apidef.DNSDOMAIN]) ips_changed = set(original_ips) != set(updated_ips) if not any((is_dns_name_changed, is_dns_domain_changed, ips_changed)): return dns_data_db = port_obj.PortDNS.get_object( context, port_id=updated_port['id']) if not (dns_data_db and (dns_data_db['previous_dns_name'] or dns_data_db[ 'current_dns_name'])): return if dns_data_db['previous_dns_name']: _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['previous_dns_domain'], dns_data_db['previous_dns_name'], original_ips) if dns_data_db['current_dns_name']: _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], updated_ips) def _delete_port_in_external_dns_service(resource, event, trigger, payload=None): dns_driver = _get_dns_driver() if not dns_driver: return context = payload.context port_id = payload.resource_id dns_data_db = port_obj.PortDNS.get_object( context, port_id=port_id) if not dns_data_db: return if dns_data_db['current_dns_name']: ip_allocations = port_obj.IPAllocation.get_objects(context, port_id=port_id) records = _filter_by_subnet(context, ip_allocations) _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def subscribe(): registry.subscribe( _create_port_in_external_dns_service, resources.PORT, events.AFTER_CREATE) registry.subscribe( _update_port_in_external_dns_service, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _delete_port_in_external_dns_service, resources.PORT, events.BEFORE_DELETE) subscribe() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/port_security.py0000644000175000017500000000623500000000000026502 0ustar00coreycorey00000000000000# Copyright 2015 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import validators from neutron_lib.plugins.ml2 import api from neutron_lib.utils import net from oslo_log import log as logging from neutron.db import portsecurity_db_common as ps_db_common LOG = logging.getLogger(__name__) class PortSecurityExtensionDriver(api.ExtensionDriver, ps_db_common.PortSecurityDbCommon): _supported_extension_alias = 'port-security' def initialize(self): LOG.info("PortSecurityExtensionDriver initialization complete") @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, context, data, result): # Create the network extension attributes. if psec.PORTSECURITY not in data: data[psec.PORTSECURITY] = psec.DEFAULT_PORT_SECURITY self._process_network_port_security_create(context, data, result) def process_update_network(self, context, data, result): # Update the network extension attributes. if psec.PORTSECURITY in data: self._process_network_port_security_update(context, data, result) def process_create_port(self, context, data, result): # Create the port extension attributes. data[psec.PORTSECURITY] = self._determine_port_security(context, data) self._process_port_port_security_create(context, data, result) def process_update_port(self, context, data, result): if psec.PORTSECURITY in data: self._process_port_port_security_update( context, data, result) def extend_network_dict(self, session, db_data, result): self._extend_port_security_dict(result, db_data) def extend_port_dict(self, session, db_data, result): self._extend_port_security_dict(result, db_data) def _determine_port_security(self, context, port): """Returns a boolean (port_security_enabled). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. """ # we don't apply security groups for dhcp, router if port.get('device_owner') and net.is_port_trusted(port): return False if validators.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return port_security_enabled ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/qos.py0000644000175000017500000000375000000000000024370 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins.ml2 import api from oslo_log import log as logging from neutron.core_extensions import base as base_core from neutron.core_extensions import qos as qos_core LOG = logging.getLogger(__name__) QOS_EXT_DRIVER_ALIAS = 'qos' class QosExtensionDriver(api.ExtensionDriver): def initialize(self): self.core_ext_handler = qos_core.QosCoreResourceExtension() LOG.debug("QosExtensionDriver initialization complete") def process_create_network(self, context, data, result): self.core_ext_handler.process_fields( context, base_core.NETWORK, base_core.EVENT_CREATE, data, result) def process_update_network(self, context, data, result): self.core_ext_handler.process_fields( context, base_core.NETWORK, base_core.EVENT_UPDATE, data, result) def process_create_port(self, context, data, result): self.core_ext_handler.process_fields( context, base_core.PORT, base_core.EVENT_UPDATE, data, result) process_update_port = process_create_port def extend_network_dict(self, session, db_data, result): result.update( self.core_ext_handler.extract_fields( base_core.NETWORK, db_data)) def extend_port_dict(self, session, db_data, result): result.update( self.core_ext_handler.extract_fields(base_core.PORT, db_data)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/subnet_dns_publish_fixed_ip.py0000644000175000017500000000633300000000000031327 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import dns_domain_ports as ports_apidef from neutron_lib.api.definitions import subnet_dns_publish_fixed_ip as sn_dns from neutron_lib.api import validators from oslo_log import log as logging from neutron.objects import subnet as subnet_obj from neutron.plugins.ml2.extensions import dns_integration as dns_int LOG = logging.getLogger(__name__) class SubnetDNSPublishFixedIPExtensionDriver( dns_int.DNSDomainPortsExtensionDriver): _supported_extension_aliases = [dns_apidef.ALIAS, ports_apidef.ALIAS, sn_dns.ALIAS] def initialize(self): LOG.info("SubnetDNSPublishFixedIPExtensionDriver initialization " "complete") @property def extension_aliases(self): return self._supported_extension_aliases def extend_subnet_dict(self, session, db_data, response_data): # TODO(jh): This returns None instead of the proper response_data # response_data = ( # super(SubnetDNSPublishFixedIPExtensionDriver, # self).extend_subnet_dict( # session, db_data, response_data)) response_data['dns_publish_fixed_ip'] = False if db_data.dns_publish_fixed_ip: response_data['dns_publish_fixed_ip'] = True return response_data def process_create_subnet(self, plugin_context, request_data, db_data): flag = request_data.get(sn_dns.DNS_PUBLISH_FIXED_IP) if not validators.is_attr_set(flag): return if flag: subnet_obj.SubnetDNSPublishFixedIP( plugin_context, subnet_id=db_data['id'], dns_publish_fixed_ip=flag).create() db_data[sn_dns.DNS_PUBLISH_FIXED_IP] = flag def process_update_subnet(self, plugin_context, request_data, db_data): new_value = request_data.get(sn_dns.DNS_PUBLISH_FIXED_IP) if not validators.is_attr_set(new_value): return current_value = db_data.get(sn_dns.DNS_PUBLISH_FIXED_IP) if current_value == new_value: return subnet_id = db_data['id'] if new_value: subnet_obj.SubnetDNSPublishFixedIP( plugin_context, subnet_id=subnet_id, dns_publish_fixed_ip=new_value).create() else: sn_obj = subnet_obj.SubnetDNSPublishFixedIP.get_object( plugin_context, subnet_id=subnet_id) sn_obj.delete() db_data[sn_dns.DNS_PUBLISH_FIXED_IP] = new_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/tag_ports_during_bulk_creation.py0000644000175000017500000000377700000000000032052 0ustar00coreycorey00000000000000# Copyright (c) 2019 Verizon Media # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import \ tag_ports_during_bulk_creation as apidef from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron.extensions import tagging LOG = logging.getLogger(__name__) class TagPortsDuringBulkCreationExtensionDriver(api.ExtensionDriver): _supported_extension_alias = apidef.ALIAS def initialize(self): LOG.info("TagPortsDuringBulkCreationExtensionDriver " "initialization complete") @property def extension_alias(self): return self._supported_extension_alias @property def tag_plugin(self): if not hasattr(self, '_tag_plugin'): self._tag_plugin = directory.get_plugin(tagging.TAG_PLUGIN_TYPE) return self._tag_plugin @property def plugin(self): if not hasattr(self, '_plugin'): self._plugin = directory.get_plugin() return self._plugin @log_helpers.log_method_call def process_create_port(self, plugin_context, request_data, db_data): tags = request_data.get('tags') if not (self.tag_plugin and tags): return port_db = self.plugin._get_port(plugin_context, db_data['id']) self.tag_plugin.add_tags(plugin_context, port_db.standard_attr_id, tags) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/extensions/uplink_status_propagation.py0000644000175000017500000000301000000000000031063 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import uplink_status_propagation as usp from neutron_lib.plugins.ml2 import api from oslo_log import log as logging from neutron.db import uplink_status_propagation_db as usp_db LOG = logging.getLogger(__name__) class UplinkStatusPropagationExtensionDriver( api.ExtensionDriver, usp_db.UplinkStatusPropagationMixin): _supported_extension_alias = 'uplink-status-propagation' def initialize(self): LOG.info("UplinkStatusPropagationExtensionDriver initialization " "complete") @property def extension_alias(self): return self._supported_extension_alias def process_create_port(self, context, data, result): # Create the port extension attributes. if usp.PROPAGATE_UPLINK_STATUS in data: self._process_create_port(context, data, result) def extend_port_dict(self, session, db_data, result): self._extend_port_dict(result, db_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/managers.py0000644000175000017500000015413000000000000023163 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as provider from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib.exceptions import multiprovidernet as mpnet_exc from neutron_lib.exceptions import placement as place_exc from neutron_lib.exceptions import vlantransparent as vlan_exc from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log from oslo_utils import excutils import stevedore from neutron._i18n import _ from neutron.conf.plugins.ml2 import config from neutron.db import segments_db from neutron.objects import ports from neutron.plugins.ml2.common import exceptions as ml2_exc LOG = log.getLogger(__name__) MAX_BINDING_LEVELS = 10 config.register_ml2_plugin_opts() class TypeManager(stevedore.named.NamedExtensionManager): """Manage network segment types using drivers.""" def __init__(self): # Mapping from type name to DriverManager self.drivers = {} LOG.info("Configured type driver names: %s", cfg.CONF.ml2.type_drivers) super(TypeManager, self).__init__('neutron.ml2.type_drivers', cfg.CONF.ml2.type_drivers, invoke_on_load=True) LOG.info("Loaded type driver names: %s", self.names()) self._register_types() self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) self._check_external_network_type(cfg.CONF.ml2.external_network_type) def _register_types(self): for ext in self: network_type = ext.obj.get_type() if network_type in self.drivers: LOG.error("Type driver '%(new_driver)s' ignored because" " type driver '%(old_driver)s' is already" " registered for type '%(type)s'", {'new_driver': ext.name, 'old_driver': self.drivers[network_type].name, 'type': network_type}) else: self.drivers[network_type] = ext LOG.info("Registered types: %s", self.drivers.keys()) def _check_tenant_network_types(self, types): self.tenant_network_types = [] for network_type in types: if network_type in self.drivers: self.tenant_network_types.append(network_type) else: LOG.error("No type driver for tenant network_type: %s. " "Service terminated!", network_type) raise SystemExit(1) LOG.info("Tenant network_types: %s", self.tenant_network_types) def _check_external_network_type(self, ext_network_type): if ext_network_type and ext_network_type not in self.drivers: LOG.error("No type driver for external network_type: %s. " "Service terminated!", ext_network_type) raise SystemExit(1) def _process_provider_segment(self, segment): (network_type, physical_network, segmentation_id) = (self._get_attribute(segment, attr) for attr in provider.ATTRIBUTES) if validators.is_attr_set(network_type): segment = {api.NETWORK_TYPE: network_type, api.PHYSICAL_NETWORK: physical_network, api.SEGMENTATION_ID: segmentation_id} self.validate_provider_segment(segment) return segment msg = _("network_type required") raise exc.InvalidInput(error_message=msg) def _process_provider_create(self, network): if any(validators.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): # Verify that multiprovider and provider attributes are not set # at the same time. if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): raise mpnet_exc.SegmentsSetInConjunctionWithProviders() segment = self._get_provider_segment(network) return [self._process_provider_segment(segment)] elif validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): segments = [self._process_provider_segment(s) for s in network[mpnet_apidef.SEGMENTS]] mpnet_apidef.check_duplicate_segments( segments, self.is_partial_segment) return segments def _match_segment(self, segment, filters): return all(not filters.get(attr) or segment.get(attr) in filters[attr] for attr in provider.ATTRIBUTES) def _get_provider_segment(self, network): # TODO(manishg): Placeholder method # Code intended for operating on a provider segment should use # this method to extract the segment, even though currently the # segment attributes are part of the network dictionary. In the # future, network and segment information will be decoupled and # here we will do the job of extracting the segment information. return network def network_matches_filters(self, network, filters): if not filters: return True if any(validators.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): segments = [self._get_provider_segment(network)] elif validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): segments = self._get_attribute(network, mpnet_apidef.SEGMENTS) else: return True return any(self._match_segment(s, filters) for s in segments) def _get_attribute(self, attrs, key): value = attrs.get(key) if value is constants.ATTR_NOT_SPECIFIED: value = None return value def extend_network_dict_provider(self, context, network): # this method is left for backward compat even though it would be # easy to change the callers in tree to use the bulk function return self.extend_networks_dict_provider(context, [network]) def extend_networks_dict_provider(self, context, networks): ids = [network['id'] for network in networks] net_segments = segments_db.get_networks_segments(context, ids) for network in networks: segments = net_segments[network['id']] self._extend_network_dict_provider(network, segments) def _extend_network_dict_provider(self, network, segments): if not segments: LOG.debug("Network %s has no segments", network['id']) for attr in provider.ATTRIBUTES: network[attr] = None elif len(segments) > 1: network[mpnet_apidef.SEGMENTS] = [ {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} for segment in segments] else: segment = segments[0] network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] network[provider.PHYSICAL_NETWORK] = segment[ api.PHYSICAL_NETWORK] network[provider.SEGMENTATION_ID] = segment[ api.SEGMENTATION_ID] def initialize(self): for network_type, driver in self.drivers.items(): LOG.info("Initializing driver for type '%s'", network_type) driver.obj.initialize() def initialize_network_segment_range_support(self): for network_type, driver in self.drivers.items(): if network_type in constants.NETWORK_SEGMENT_RANGE_TYPES: LOG.info("Initializing driver network segment range support " "for type '%s'", network_type) driver.obj.initialize_network_segment_range_support() def _add_network_segment(self, context, network_id, segment, segment_index=0): segments_db.add_network_segment( context, network_id, segment, segment_index) def _update_network_segment(self, context, network_id, segmentation_id): segments_db.update_network_segment( context, network_id, segmentation_id) def create_network_segments(self, context, network, tenant_id): """Call type drivers to create network segments.""" segments = self._process_provider_create(network) filters = {'project_id': tenant_id} with db_api.CONTEXT_WRITER.using(context): network_id = network['id'] if segments: for segment_index, segment in enumerate(segments): segment = self.reserve_provider_segment( context, segment, filters=filters) self._add_network_segment(context, network_id, segment, segment_index) elif (cfg.CONF.ml2.external_network_type and self._get_attribute(network, extnet_apidef.EXTERNAL)): segment = self._allocate_ext_net_segment( context, filters=filters) self._add_network_segment(context, network_id, segment) else: segment = self._allocate_tenant_net_segment( context, filters=filters) self._add_network_segment(context, network_id, segment) def update_network_segment(self, context, network, net_data, segment): """Call type drivers to update a network segment. Update operation is currently only supported for VLAN type segments, and only the SEGMENTATION_ID field can be changed. """ project_id = network['project_id'] segmentation_id = net_data.get(provider.SEGMENTATION_ID) network_type = segment[api.NETWORK_TYPE] if network_type != constants.TYPE_VLAN: msg = (_('Only VLAN type networks can be updated.')) raise exc.InvalidInput(error_message=msg) elif not segmentation_id: msg = (_('Only %s field can be updated in VLAN type networks') % api.SEGMENTATION_ID) raise exc.InvalidInput(error_message=msg) new_segment = {api.NETWORK_TYPE: segment[api.NETWORK_TYPE], api.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], api.SEGMENTATION_ID: segmentation_id} self.validate_provider_segment(new_segment) self.reserve_provider_segment(context, new_segment, filters={'project_id': project_id}) self._update_network_segment(context, segment['id'], segmentation_id) self.release_network_segment(context, segment) def reserve_network_segment(self, context, segment_data): """Call type drivers to reserve a network segment.""" # Validate the data of segment if not validators.is_attr_set(segment_data[api.NETWORK_TYPE]): msg = _("network_type required") raise exc.InvalidInput(error_message=msg) net_type = self._get_attribute(segment_data, api.NETWORK_TYPE) phys_net = self._get_attribute(segment_data, api.PHYSICAL_NETWORK) seg_id = self._get_attribute(segment_data, api.SEGMENTATION_ID) segment = {api.NETWORK_TYPE: net_type, api.PHYSICAL_NETWORK: phys_net, api.SEGMENTATION_ID: seg_id} self.validate_provider_segment(segment) # Reserve segment in type driver with db_api.CONTEXT_WRITER.using(context): return self.reserve_provider_segment(context, segment) def is_partial_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) if driver: return driver.obj.is_partial_segment(segment) else: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) def validate_provider_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) if driver: driver.obj.validate_provider_segment(segment) else: msg = _("network_type value '%s' not supported") % network_type raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, context, segment, filters=None): network_type = segment.get(api.NETWORK_TYPE) driver = self.drivers.get(network_type) if isinstance(driver.obj, api.TypeDriver): return driver.obj.reserve_provider_segment(context.session, segment, filters) else: return driver.obj.reserve_provider_segment(context, segment, filters) def _allocate_segment(self, context, network_type, filters=None): driver = self.drivers.get(network_type) if isinstance(driver.obj, api.TypeDriver): return driver.obj.allocate_tenant_segment(context.session, filters) else: return driver.obj.allocate_tenant_segment(context, filters) def _allocate_tenant_net_segment(self, context, filters=None): for network_type in self.tenant_network_types: segment = self._allocate_segment(context, network_type, filters) if segment: return segment raise exc.NoNetworkAvailable() def _allocate_ext_net_segment(self, context, filters=None): network_type = cfg.CONF.ml2.external_network_type segment = self._allocate_segment(context, network_type, filters) if segment: return segment raise exc.NoNetworkAvailable() def release_network_segments(self, context, network_id): segments = segments_db.get_network_segments(context, network_id, filter_dynamic=None) for segment in segments: self.release_network_segment(context, segment) def release_network_segment(self, context, segment): network_type = segment.get(api.NETWORK_TYPE) driver = self.drivers.get(network_type) if driver: if isinstance(driver.obj, api.TypeDriver): driver.obj.release_segment(context.session, segment) else: driver.obj.release_segment(context, segment) else: LOG.error("Failed to release segment '%s' because " "network type is not supported.", segment) def allocate_dynamic_segment(self, context, network_id, segment): """Allocate a dynamic segment using a partial or full segment dict.""" dynamic_segment = segments_db.get_dynamic_segment( context, network_id, segment.get(api.PHYSICAL_NETWORK), segment.get(api.SEGMENTATION_ID)) if dynamic_segment: return dynamic_segment driver = self.drivers.get(segment.get(api.NETWORK_TYPE)) if isinstance(driver.obj, api.TypeDriver): dynamic_segment = driver.obj.reserve_provider_segment( context.session, segment) else: dynamic_segment = driver.obj.reserve_provider_segment( context, segment) segments_db.add_network_segment(context, network_id, dynamic_segment, is_dynamic=True) return dynamic_segment def release_dynamic_segment(self, context, segment_id): """Delete a dynamic segment.""" segment = segments_db.get_segment_by_id(context, segment_id) if segment: driver = self.drivers.get(segment.get(api.NETWORK_TYPE)) if driver: if isinstance(driver.obj, api.TypeDriver): driver.obj.release_segment(context.session, segment) else: driver.obj.release_segment(context, segment) segments_db.delete_network_segment(context, segment_id) else: LOG.error("Failed to release segment '%s' because " "network type is not supported.", segment) else: LOG.debug("No segment found with id %(segment_id)s", segment_id) def update_network_segment_range_allocations(self, network_type): driver = self.drivers.get(network_type) driver.obj.update_network_segment_range_allocations() def network_type_supported(self, network_type): return bool(network_type in self.drivers) class MechanismManager(stevedore.named.NamedExtensionManager): """Manage networking mechanisms using drivers.""" def __init__(self): # Registered mechanism drivers, keyed by name. self.mech_drivers = {} # Ordered list of mechanism drivers, defining # the order in which the drivers are called. self.ordered_mech_drivers = [] LOG.info("Configured mechanism driver names: %s", cfg.CONF.ml2.mechanism_drivers) super(MechanismManager, self).__init__( 'neutron.ml2.mechanism_drivers', cfg.CONF.ml2.mechanism_drivers, invoke_on_load=True, name_order=True, on_missing_entrypoints_callback=self._driver_not_found, on_load_failure_callback=self._driver_not_loaded ) LOG.info("Loaded mechanism driver names: %s", self.names()) self._register_mechanisms() self.host_filtering_supported = self.is_host_filtering_supported() if not self.host_filtering_supported: LOG.info("No mechanism drivers provide segment reachability " "information for agent scheduling.") def _driver_not_found(self, names): msg = (_("The following mechanism drivers were not found: %s") % names) LOG.critical(msg) raise SystemExit(msg) def _driver_not_loaded(self, manager, entrypoint, exception): LOG.critical("The '%(entrypoint)s' entrypoint could not be" " loaded for the following reason: '%(reason)s'.", {'entrypoint': entrypoint, 'reason': exception}) raise SystemExit(str(exception)) def _register_mechanisms(self): """Register all mechanism drivers. This method should only be called once in the MechanismManager constructor. """ for ext in self: self.mech_drivers[ext.name] = ext self.ordered_mech_drivers.append(ext) LOG.info("Registered mechanism drivers: %s", [driver.name for driver in self.ordered_mech_drivers]) def initialize(self): for driver in self.ordered_mech_drivers: LOG.info("Initializing mechanism driver '%s'", driver.name) driver.obj.initialize() def _check_vlan_transparency(self, context): """Helper method for checking vlan transparecncy support. :param context: context parameter to pass to each method call :raises: neutron_lib.exceptions.vlantransparent. VlanTransparencyDriverError if any mechanism driver doesn't support vlan transparency. """ if context.current.get('vlan_transparent'): for driver in self.ordered_mech_drivers: if not driver.obj.check_vlan_transparency(context): raise vlan_exc.VlanTransparencyDriverError() def _call_on_drivers(self, method_name, context, continue_on_failure=False, raise_db_retriable=False): """Helper method for calling a method across all mechanism drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param continue_on_failure: whether or not to continue to call all mechanism drivers once one has raised an exception :param raise_db_retriable: whether or not to treat retriable db exception by mechanism drivers to propagate up to upper layer so that upper layer can handle it or error in ML2 player :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver call fails. or DB retriable error when raise_db_retriable=False. See neutron_lib.db.api.is_retriable for what db exception is retriable """ errors = [] for driver in self.ordered_mech_drivers: try: getattr(driver.obj, method_name)(context) except Exception as e: if raise_db_retriable and db_api.is_retriable(e): with excutils.save_and_reraise_exception(): LOG.debug("DB exception raised by Mechanism driver " "'%(name)s' in %(method)s", {'name': driver.name, 'method': method_name}, exc_info=e) LOG.exception( "Mechanism driver '%(name)s' failed in %(method)s", {'name': driver.name, 'method': method_name} ) errors.append(e) if not continue_on_failure: break if errors: raise ml2_exc.MechanismDriverError( method=method_name, errors=errors ) def create_network_precommit(self, context): """Notify all mechanism drivers during network creation. :raises: DB retriable error if create_network_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._check_vlan_transparency(context) self._call_on_drivers("create_network_precommit", context, raise_db_retriable=True) def create_network_postcommit(self, context): """Notify all mechanism drivers after network creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_network_postcommit call fails. Called after the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, where the network will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_network_postcommit", context) def update_network_precommit(self, context): """Notify all mechanism drivers during network update. :raises: DB retriable error if update_network_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_network_precommit", context, raise_db_retriable=True) def update_network_postcommit(self, context): """Notify all mechanism drivers after network update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_network_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_network_postcommit", context, continue_on_failure=True) def delete_network_precommit(self, context): """Notify all mechanism drivers during network deletion. :raises: DB retriable error if delete_network_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_network_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_network_precommit", context, raise_db_retriable=True) def delete_network_postcommit(self, context): """Notify all mechanism drivers after network deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_network_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the network resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the network. """ self._call_on_drivers("delete_network_postcommit", context, continue_on_failure=True) def create_subnet_precommit(self, context): """Notify all mechanism drivers during subnet creation. :raises: DB retriable error if create_subnet_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_subnet_precommit", context, raise_db_retriable=True) def create_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_subnet_postcommit call fails. Called after the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, where the subnet will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_subnet_postcommit", context) def update_subnet_precommit(self, context): """Notify all mechanism drivers during subnet update. :raises: DB retriable error if update_subnet_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_subnet_precommit", context, raise_db_retriable=True) def update_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_subnet_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_subnet_postcommit", context, continue_on_failure=True) def delete_subnet_precommit(self, context): """Notify all mechanism drivers during subnet deletion. :raises: DB retriable error if delete_subnet_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_subnet_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_subnet_precommit", context, raise_db_retriable=True) def delete_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_subnet_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the subnet resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the subnet. """ self._call_on_drivers("delete_subnet_postcommit", context, continue_on_failure=True) def create_port_precommit(self, context): """Notify all mechanism drivers during port creation. :raises: DB retriable error if create_port_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_port_precommit", context, raise_db_retriable=True) def create_port_postcommit(self, context): """Notify all mechanism drivers of port creation. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_port_postcommit call fails. Called after the database transaction. Errors raised by mechanism drivers are left to propagate to the caller, where the port will be deleted, triggering any required cleanup. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("create_port_postcommit", context) def update_port_precommit(self, context): """Notify all mechanism drivers during port update. :raises: DB retriable error if update_port_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("update_port_precommit", context, raise_db_retriable=True) def update_port_postcommit(self, context): """Notify all mechanism drivers after port update. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_port_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. """ self._call_on_drivers("update_port_postcommit", context, continue_on_failure=True) def delete_port_precommit(self, context): """Notify all mechanism drivers during port deletion. :raises:DB retriable error if delete_port_precommit raises them See neutron_lib.db.api.is_retriable for what db exception is retriable or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_port_precommit call fails. Called within the database transaction. If a mechanism driver raises an exception, then a MechanismDriverError is propagated to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ self._call_on_drivers("delete_port_precommit", context, raise_db_retriable=True) def delete_port_postcommit(self, context): """Notify all mechanism drivers after port deletion. :raises: neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_port_postcommit call fails. Called after the database transaction. If any mechanism driver raises an error, then the error is logged but we continue to call every other mechanism driver. A MechanismDriverError is then reraised at the end to notify the caller of a failure. In general we expect the caller to ignore the error, as the port resource has already been deleted from the database and it doesn't make sense to undo the action by recreating the port. """ self._call_on_drivers("delete_port_postcommit", context, continue_on_failure=True) def bind_port(self, context): """Attempt to bind a port using registered mechanism drivers. :param context: PortContext instance describing the port Called outside any transaction to attempt to establish a port binding. """ binding = context._binding LOG.debug("Attempting to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s with profile %(profile)s", {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, 'profile': binding.profile}) context._clear_binding_levels() if not self._bind_port_level(context, 0, context.network.network_segments): binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED LOG.error("Failed to bind port %(port)s on host %(host)s " "for vnic_type %(vnic_type)s using segments " "%(segments)s", {'port': context.current['id'], 'host': context.host, 'vnic_type': binding.vnic_type, 'segments': context.network.network_segments}) def _bind_port_level(self, context, level, segments_to_bind, drivers=None, redoing_bottom=False): if drivers is None: drivers = self.ordered_mech_drivers binding = context._binding port_id = context.current['id'] LOG.debug("Attempting to bind port %(port)s by drivers %(drivers)s " "on host %(host)s at level %(level)s using " "segments %(segments)s", {'port': port_id, 'drivers': ','.join([driver.name for driver in drivers]), 'host': context.host, 'level': level, 'segments': segments_to_bind}) if level == MAX_BINDING_LEVELS: LOG.error("Exceeded maximum binding levels attempting to bind " "port %(port)s on host %(host)s", {'port': context.current['id'], 'host': context.host}) return False drivers = self._check_drivers_connectivity(drivers, context) if not drivers: LOG.error("Port %(port)s does not have an IP address assigned and " "there are no driver with 'connectivity' = 'l2'. The " "port cannot be bound.", {'port': context.current['id']}) return False for driver in drivers: if not self._check_driver_to_bind(driver, segments_to_bind, context._binding_levels): continue try: context._prepare_to_bind(segments_to_bind) driver.obj.bind_port(context) segment = context._new_bound_segment if segment: pbl_obj = ports.PortBindingLevel( context._plugin_context, port_id=port_id, host=context.host, level=level, driver=driver.name, segment_id=segment ) context._push_binding_level(pbl_obj) next_segments = context._next_segments_to_bind if next_segments: # Continue binding another level. if self._bind_port_level(context, level + 1, next_segments): return True else: LOG.warning("Failed to bind port %(port)s on " "host %(host)s at level %(lvl)s", {'port': context.current['id'], 'host': context.host, 'lvl': level + 1}) context._pop_binding_level() else: # NOTE(bence romsics): Consider: "In case of # hierarchical port binding binding_profile.allocation # [decided and sent by Placement and Nova] # is meant to drive the binding only on the binding # level that represents the closest physical interface # to the nova server." Link to spec: # # https://review.opendev.org/#/c/508149/14/specs\ # /rocky/minimum-bandwidth-\ # allocation-placement-api.rst@582 # # But we cannot tell if a binding level is # the bottom binding level before set_binding() # gets called, and that's already too late. So we # must undo the last binding after set_binding() # was called and redo the last level trying to # bind only with one driver as inferred from # the allocation. In order to undo the binding # here we must also assume that each driver's # bind_port() implementation is side effect free # beyond calling set_binding(). # # Also please note that technically we allow for # a redo to call continue_binding() instead of # set_binding() and by that turn what was supposed # to be the bottom level into a non-bottom binding # level. A thorough discussion is recommended if # you think of taking advantage of this. # # Also if we find use cases requiring # diamond-shaped selections of drivers on different # levels (eg. driverA and driverB can be both # a valid choice on level 0, but on level 1 both # previous choice leads to driverC) then we need # to restrict segment selection too based on # traits of the allocated resource provider on # the top binding_level (==0). if (context.current['binding:profile'] is not None and 'allocation' in context.current[ 'binding:profile'] and not redoing_bottom): LOG.debug( "Undo bottom bound level and redo it " "according to binding_profile.allocation, " "resource provider uuid: %s", context.current[ 'binding:profile']['allocation']) context._pop_binding_level() context._unset_binding() return self._bind_port_level( context, level, segments_to_bind, drivers=[self._infer_driver_from_allocation( context)], redoing_bottom=True) # Binding complete. LOG.debug("Bound port: %(port)s, " "host: %(host)s, " "vif_type: %(vif_type)s, " "vif_details: %(vif_details)s, " "binding_levels: %(binding_levels)s", {'port': port_id, 'host': context.host, 'vif_type': binding.vif_type, 'vif_details': binding.vif_details, 'binding_levels': context.binding_levels}) return True except Exception: LOG.exception("Mechanism driver %s failed in " "bind_port", driver.name) def _infer_driver_from_allocation(self, context): """Choose mechanism driver as implied by allocation in placement. :param context: PortContext instance describing the port :returns: a single MechanismDriver instance Ports allocated to a resource provider (ie. a physical network interface) in Placement have the UUID of the provider in their binding:profile.allocation. The choice of a physical network interface (as recorded in the allocation) implies a choice of mechanism driver too. When an allocation was received we expect exactly one mechanism driver to be responsible for that physical network interface resource provider. """ drivers = [] for driver in self.ordered_mech_drivers: if driver.obj.responsible_for_ports_allocation(context): drivers.append(driver) if len(drivers) == 0: LOG.error("Failed to bind port %(port)s on host " "%(host)s allocated on resource provider " "%(rsc_provider)s, because no mechanism driver " "reports being responsible", {'port': context.current['id'], 'host': context.host, 'rsc_provider': context.current[ 'binding:profile']['allocation']}) raise place_exc.UnknownResourceProvider( rsc_provider=context.current['binding:profile']['allocation']) if len(drivers) >= 2: raise place_exc.AmbiguousResponsibilityForResourceProvider( rsc_provider=context.current['binding:profile']['allocation'], drivers=','.join([driver.name for driver in drivers])) # NOTE(bence romsics): The error conditions for raising either # UnknownResourceProvider or AmbiguousResponsibilityForResourceProvider # are pretty static therefore the usual 10-times-retry of a binding # failure could easily be unnecessary in those cases. However at this # point special handling of these exceptions in the binding retry loop # seems like premature optimization to me since these exceptions are # always a sign of a misconfigured neutron deployment. LOG.debug("Restricting possible bindings of port %(port)s " "(as inferred from placement allocation) to " "mechanism driver '%(driver)s'", {'port': context.current['id'], 'driver': drivers[0].name}) return drivers[0] def is_host_filtering_supported(self): return all(driver.obj.is_host_filtering_supported() for driver in self.ordered_mech_drivers) def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): """Filter hosts with access to at least one segment. :returns: a subset of candidate_hosts. This method returns all hosts from candidate_hosts with access to a segment according to at least one driver. """ candidate_hosts = set(candidate_hosts) if not self.host_filtering_supported: return candidate_hosts hosts_with_access = set() for driver in self.ordered_mech_drivers: hosts = driver.obj.filter_hosts_with_segment_access( context, segments, candidate_hosts, agent_getter) hosts_with_access |= hosts candidate_hosts -= hosts if not candidate_hosts: break return hosts_with_access def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels): # To prevent a possible binding loop, don't try to bind with # this driver if the same driver has already bound at a higher # level to one of the segments we are currently trying to # bind. Note that it is OK for the same driver to bind at # multiple levels using different segments. segment_ids_to_bind = {s[api.ID] for s in segments_to_bind} for level in binding_levels: if (level.driver == driver.name and level.segment_id in segment_ids_to_bind): LOG.debug("segment %(segment)s is already bound " "by driver %(driver)s", {"segment": level.segment_id, "driver": level.driver}) return False return True def _check_drivers_connectivity(self, drivers, port_context): """If port does not have an IP address, driver connectivity must be l2 A port without an IP address can be bound only to a mech driver with "connectivity" = "l2". "legacy" or "l3" (e.g.: Calico) drivers cannot have a port bound without an IP allocated. """ if port_context.current.get('fixed_ips'): return drivers return [d for d in drivers if getattr(d.obj, 'vif_details', {}).get( portbindings.VIF_DETAILS_CONNECTIVITY) == portbindings.CONNECTIVITY_L2] def get_workers(self): workers = [] for driver in self.ordered_mech_drivers: workers += driver.obj.get_workers() return workers class ExtensionManager(stevedore.named.NamedExtensionManager): """Manage extension drivers using drivers.""" def __init__(self): # Ordered list of extension drivers, defining # the order in which the drivers are called. self.ordered_ext_drivers = [] LOG.info("Configured extension driver names: %s", cfg.CONF.ml2.extension_drivers) super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers', cfg.CONF.ml2.extension_drivers, invoke_on_load=True, name_order=True) LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): """Register all extension drivers. This method should only be called once in the ExtensionManager constructor. """ for ext in self: self.ordered_ext_drivers.append(ext) LOG.info("Registered extension drivers: %s", [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: LOG.info("Initializing extension driver '%s'", driver.name) driver.obj.initialize() def extension_aliases(self): exts = [] for driver in self.ordered_ext_drivers: aliases = driver.obj.extension_aliases for alias in aliases: if not alias: continue exts.append(alias) LOG.info("Got %(alias)s extension from driver '%(drv)s'", {'alias': alias, 'drv': driver.name}) return exts def _call_on_ext_drivers(self, method_name, plugin_context, data, result): """Helper method for calling a method across all extension drivers.""" for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(plugin_context, data, result) except Exception: with excutils.save_and_reraise_exception(): LOG.info("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) def process_create_network(self, plugin_context, data, result): """Notify all extension drivers during network creation.""" self._call_on_ext_drivers("process_create_network", plugin_context, data, result) def process_update_network(self, plugin_context, data, result): """Notify all extension drivers during network update.""" self._call_on_ext_drivers("process_update_network", plugin_context, data, result) def process_create_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet creation.""" self._call_on_ext_drivers("process_create_subnet", plugin_context, data, result) def process_update_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet update.""" self._call_on_ext_drivers("process_update_subnet", plugin_context, data, result) def process_create_port(self, plugin_context, data, result): """Notify all extension drivers during port creation.""" self._call_on_ext_drivers("process_create_port", plugin_context, data, result) def process_update_port(self, plugin_context, data, result): """Notify all extension drivers during port update.""" self._call_on_ext_drivers("process_update_port", plugin_context, data, result) def _call_on_dict_driver(self, method_name, session, base_model, result): for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(session, base_model, result) except Exception: LOG.exception("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) raise ml2_exc.ExtensionDriverError(driver=driver.name) def extend_network_dict(self, session, base_model, result): """Notify all extension drivers to extend network dictionary.""" self._call_on_dict_driver("extend_network_dict", session, base_model, result) def extend_subnet_dict(self, session, base_model, result): """Notify all extension drivers to extend subnet dictionary.""" self._call_on_dict_driver("extend_subnet_dict", session, base_model, result) def extend_port_dict(self, session, base_model, result): """Notify all extension drivers to extend port dictionary.""" self._call_on_dict_driver("extend_port_dict", session, base_model, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/models.py0000644000175000017500000001254000000000000022647 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import segment as segment_models from neutron.db import models_v2 BINDING_PROFILE_LEN = 4095 class PortBinding(model_base.BASEV2): """Represent binding-related state of a port. A port binding stores the port attributes required for the portbindings extension, as well as internal ml2 state such as which MechanismDriver and which segment are used by the port binding. """ __tablename__ = 'ml2_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, default='', server_default='', primary_key=True) vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') status = sa.Column(sa.String(16), nullable=False, default=constants.ACTIVE, server_default=constants.ACTIVE) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("port_bindings", lazy='joined', cascade='delete')) revises_on_change = ('port', ) class PortBindingLevel(model_base.BASEV2): """Represent each level of a port binding. Stores information associated with each level of an established port binding. Different levels might correspond to the host and ToR switch, for instance. """ __tablename__ = 'ml2_port_binding_levels' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, primary_key=True) level = sa.Column(sa.Integer, primary_key=True, autoincrement=False) driver = sa.Column(sa.String(64)) segment_id = sa.Column(sa.String(36), sa.ForeignKey('networksegments.id', ondelete="SET NULL")) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("binding_levels", lazy='subquery', cascade='delete')) segment = orm.relationship( segment_models.NetworkSegment, load_on_pending=True) revises_on_change = ('port', ) class DistributedPortBinding(model_base.BASEV2): """Represent binding-related state of a Distributed Router(DVR, HA) port. Port binding for all the ports associated to a Distributed router(DVR, HA) identified by router_id. Currently DEVICE_OWNER_ROUTER_SNAT(DVR+HA router), DEVICE_OWNER_DVR_INTERFACE, DEVICE_OWNER_HA_REPLICATED_INT are distributed router ports. """ __tablename__ = 'ml2_distributed_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, primary_key=True) router_id = sa.Column(sa.String(36), nullable=True) vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') status = sa.Column(sa.String(16), nullable=False) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, load_on_pending=True, backref=orm.backref("distributed_port_binding", lazy='subquery', cascade='delete')) revises_on_change = ('port', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/ovo_rpc.py0000644000175000017500000001501100000000000023027 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import traceback import futurist from futurist import waiters from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_ctx from neutron_lib.db import api as db_api from oslo_concurrency import lockutils from oslo_log import log as logging from neutron._i18n import _ from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.handlers import resources_rpc from neutron.objects import network from neutron.objects import ports from neutron.objects import securitygroup from neutron.objects import subnet LOG = logging.getLogger(__name__) class _ObjectChangeHandler(object): def __init__(self, resource, object_class, resource_push_api): self._resource = resource self._obj_class = object_class self._resource_push_api = resource_push_api self._resources_to_push = {} # NOTE(annp): uWSGI seems not happy with eventlet.GreenPool. # So switching to ThreadPool self._worker_pool = futurist.ThreadPoolExecutor() self.fts = [] self._semantic_warned = False for event in (events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE): registry.subscribe(self.handle_event, resource, event) def wait(self): """Waits for all outstanding events to be dispatched.""" done, not_done = waiters.wait_for_all(self.fts) if not not_done: del self.fts[:] def _is_session_semantic_violated(self, context, resource, event): """Return True and print an ugly error on transaction violation. This code is to print ugly errors when AFTER_CREATE/UPDATE event transaction semantics are violated by other parts of the code. """ if not context.session.is_active: return False if not self._semantic_warned: stack = traceback.extract_stack() stack = "".join(traceback.format_list(stack)) LOG.warning("This handler is supposed to handle AFTER " "events, as in 'AFTER it's committed', " "not BEFORE. Offending resource event: " "%(r)s, %(e)s. Location:\n%(l)s", {'r': resource, 'e': event, 'l': stack}) self._semantic_warned = True return True def handle_event(self, resource, event, trigger, context, *args, **kwargs): """Callback handler for resource change that pushes change to RPC. We always retrieve the latest state and ignore what was in the payload to ensure that we don't get any stale data. """ if self._is_session_semantic_violated(context, resource, event): return resource_id = self._extract_resource_id(kwargs) # we preserve the context so we can trace a receive on the agent back # to the server-side event that triggered it self._resources_to_push[resource_id] = context.to_dict() # spawn worker so we don't block main AFTER_UPDATE thread self.fts.append(self._worker_pool.submit(self.dispatch_events)) @lockutils.synchronized('event-dispatch') def dispatch_events(self): # this is guarded by a lock to ensure we don't get too many concurrent # dispatchers hitting the database simultaneously. to_dispatch, self._resources_to_push = self._resources_to_push, {} # TODO(kevinbenton): now that we are batching these, convert to a # single get_objects call for all of them for resource_id, context_dict in to_dispatch.items(): context = n_ctx.Context.from_dict(context_dict) # attempt to get regardless of event type so concurrent delete # after create/update is the same code-path as a delete event with db_api.get_context_manager().independent.reader.using( context): obj = self._obj_class.get_object(context, id=resource_id) # CREATE events are always treated as UPDATE events to ensure # listeners are written to handle out-of-order messages if obj is None: rpc_event = rpc_events.DELETED # construct a fake object with the right ID so we can # have a payload for the delete message. obj = self._obj_class(id=resource_id) else: rpc_event = rpc_events.UPDATED self._resource_push_api.push(context, [obj], rpc_event) def _extract_resource_id(self, callback_kwargs): id_kwarg = '%s_id' % self._resource if id_kwarg in callback_kwargs: return callback_kwargs[id_kwarg] if self._resource in callback_kwargs: return callback_kwargs[self._resource]['id'] raise RuntimeError(_("Couldn't find resource ID in callback event")) class OVOServerRpcInterface(object): """ML2 server-side RPC interface. Generates RPC callback notifications on ML2 object changes. """ def __init__(self): self._rpc_pusher = resources_rpc.ResourcesPushRpcApi() self._setup_change_handlers() LOG.debug("ML2 OVO RPC backend initialized.") def _setup_change_handlers(self): """Setup all of the local callback listeners for resource changes.""" resource_objclass_map = { resources.PORT: ports.Port, resources.SUBNET: subnet.Subnet, resources.NETWORK: network.Network, resources.SECURITY_GROUP: securitygroup.SecurityGroup, resources.SECURITY_GROUP_RULE: securitygroup.SecurityGroupRule, } self._resource_handlers = { res: _ObjectChangeHandler(res, obj_class, self._rpc_pusher) for res, obj_class in resource_objclass_map.items() } def wait(self): """Wait for all handlers to finish processing async events.""" for handler in self._resource_handlers.values(): handler.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/plugin.py0000644000175000017500000036372300000000000022676 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread import netaddr from netaddr.strategy import eui48 from neutron_lib.agent import constants as agent_consts from neutron_lib.agent import topics from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import agent_resources_synced from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import availability_zone_filter from neutron_lib.api.definitions import default_subnetpools from neutron_lib.api.definitions import dhcpagentscheduler from neutron_lib.api.definitions import empty_string_filtering from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import filter_validation as filter_apidef from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef from neutron_lib.api.definitions import ip_substring_port_filtering from neutron_lib.api.definitions import multiprovidernet from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import network_availability_zone from neutron_lib.api.definitions import network_mtu as mtu_apidef from neutron_lib.api.definitions import network_mtu_writable as mtuw_apidef from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port_mac_address_regenerate from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings_extended as pbe_ext from neutron_lib.api.definitions import provider_net from neutron_lib.api.definitions import rbac_address_scope from neutron_lib.api.definitions import rbac_security_groups as rbac_sg_apidef from neutron_lib.api.definitions import rbac_subnetpool from neutron_lib.api.definitions import security_groups_port_filtering from neutron_lib.api.definitions import stateful_security_group from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnet_onboard as subnet_onboard_def from neutron_lib.api.definitions import subnetpool_prefix_ops \ as subnetpool_prefix_ops_def from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as p_utils from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_db import exception as os_db_exception from oslo_log import helpers as log_helpers from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils import sqlalchemy from sqlalchemy import or_ from sqlalchemy.orm import exc as sa_exc from neutron._i18n import _ from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import resources_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.common import utils from neutron.db import address_scope_db from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import dvr_mac_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import provisioning_blocks from neutron.db.quota import driver # noqa from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.db import segments_db from neutron.db import subnet_service_type_mixin from neutron.db import vlantransparent_db from neutron.extensions import filter_validation from neutron.extensions import vlantransparent from neutron.ipam import exceptions as ipam_exc from neutron.objects import base as base_obj from neutron.objects import ports as ports_obj from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import db from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers import mech_agent from neutron.plugins.ml2.extensions import qos as qos_ext from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import ovo_rpc from neutron.plugins.ml2 import rpc from neutron.quota import resource_registry from neutron.services.segments import plugin as segments_plugin LOG = log.getLogger(__name__) MAX_BIND_TRIES = 10 SERVICE_PLUGINS_REQUIRED_DRIVERS = { 'qos': [qos_ext.QOS_EXT_DRIVER_ALIAS] } def _ml2_port_result_filter_hook(query, filters): values = filters and filters.get(portbindings.HOST_ID, []) if not values: return query bind_criteria = models.PortBinding.host.in_(values) return query.filter(models_v2.Port.port_bindings.any(bind_criteria)) @resource_extend.has_resource_extenders @registry.has_registry_receivers class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_mac_db.DVRDbMixin, external_net_db.External_net_db_mixin, sg_db_rpc.SecurityGroupServerRpcMixin, agentschedulers_db.AZDhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, vlantransparent_db.Vlantransparent_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, address_scope_db.AddressScopeDbMixin, subnet_service_type_mixin.SubnetServiceTypeMixin): """Implement the Neutron L2 abstractions using modules. Ml2Plugin is a Neutron plugin based on separately extensible sets of network types and mechanisms for connecting to networks of those types. The network types and mechanisms are implemented as drivers loaded via Python entry points. Networks can be made up of multiple segments (not yet fully implemented). """ # This attribute specifies whether the plugin supports or not # bulk/pagination/sorting operations. Name mangling is used in # order to ensure it is qualified by class __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # This attribute specifies whether the plugin supports or not # filter validations. Name mangling is used in # order to ensure it is qualified by class __filter_validation_support = True # List of supported extensions _supported_extension_aliases = [provider_net.ALIAS, external_net.ALIAS, portbindings.ALIAS, "quotas", "security-group", rbac_address_scope.ALIAS, rbac_sg_apidef.ALIAS, rbac_subnetpool.ALIAS, agent_apidef.ALIAS, dhcpagentscheduler.ALIAS, multiprovidernet.ALIAS, addr_apidef.ALIAS, edo_ext.ALIAS, "subnet_allocation", mtu_apidef.ALIAS, mtuw_apidef.ALIAS, vlan_apidef.ALIAS, address_scope.ALIAS, az_def.ALIAS, network_availability_zone.ALIAS, availability_zone_filter.ALIAS, default_subnetpools.ALIAS, "subnet-service-types", ip_substring_port_filtering.ALIAS, security_groups_port_filtering.ALIAS, empty_string_filtering.ALIAS, filter_apidef.ALIAS, port_mac_address_regenerate.ALIAS, pbe_ext.ALIAS, agent_resources_synced.ALIAS, subnet_onboard_def.ALIAS, subnetpool_prefix_ops_def.ALIAS, stateful_security_group.ALIAS] # List of agent types for which all binding_failed ports should try to be # rebound when agent revive _rebind_on_revive_agent_types = [const.AGENT_TYPE_OVS] @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] aliases += self.extension_manager.extension_aliases() sg_rpc.disable_security_group_extension_by_config(aliases) vlantransparent._disable_extension_by_config(aliases) filter_validation._disable_extension_by_config(aliases) self._aliases = aliases return self._aliases def __new__(cls, *args, **kwargs): model_query.register_hook( models_v2.Port, "ml2_port_bindings", query_hook=None, filter_hook=None, result_filters=_ml2_port_result_filter_hook) return super(Ml2Plugin, cls).__new__(cls, *args, **kwargs) @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=sg_models.SecurityGroup, security_group_rule=sg_models.SecurityGroupRule) def __init__(self): # First load drivers, then initialize DB, then initialize drivers self.type_manager = managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() super(Ml2Plugin, self).__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check_worker(self.agent_health_check) self.add_workers(self.mechanism_manager.get_workers()) self._verify_service_plugins_requirements() LOG.info("Modular L2 Plugin initialization complete") def _setup_rpc(self): """Initialize components to support agent communication.""" self.endpoints = [ rpc.RpcCallbacks(self.notifier, self.type_manager), securitygroups_rpc.SecurityGroupServerRpcCallback(), dvr_rpc.DVRServerRpcCallback(), dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback(), resources_rpc.ResourcesPullRpcCallback() ] def _setup_dhcp(self): """Initialize components to support DHCP.""" self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.add_periodic_dhcp_agent_status_check() def _verify_service_plugins_requirements(self): for service_plugin in cfg.CONF.service_plugins: extension_drivers = SERVICE_PLUGINS_REQUIRED_DRIVERS.get( service_plugin, [] ) for extension_driver in extension_drivers: if extension_driver not in self.extension_manager.names(): raise ml2_exc.ExtensionDriverNotFound( driver=extension_driver, service_plugin=service_plugin ) @registry.receives(resources.PORT, [provisioning_blocks.PROVISIONING_COMPLETE]) def _port_provisioned(self, rtype, event, trigger, payload=None): port_id = payload.resource_id port = db.get_port(payload.context, port_id) port_binding = p_utils.get_port_binding_by_status_and_host( getattr(port, 'port_bindings', []), const.ACTIVE) if not port or not port_binding: LOG.debug("Port %s was deleted so its status cannot be updated.", port_id) return if port_binding.vif_type in (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND): # NOTE(kevinbenton): we hit here when a port is created without # a host ID and the dhcp agent notifies that its wiring is done LOG.debug("Port %s cannot update to ACTIVE because it " "is not bound.", port_id) return else: # port is bound, but we have to check for new provisioning blocks # one last time to detect the case where we were triggered by an # unbound port and the port became bound with new provisioning # blocks before 'get_port' was called above if provisioning_blocks.is_object_blocked(payload.context, port_id, resources.PORT): LOG.debug("Port %s had new provisioning blocks added so it " "will not transition to active.", port_id) return if not port.admin_state_up: LOG.debug("Port %s is administratively disabled so it will " "not transition to active.", port_id) return self.update_port_status( payload.context, port_id, const.PORT_STATUS_ACTIVE) @log_helpers.log_method_call def _start_rpc_notifiers(self): """Initialize RPC notifiers for agents.""" self.ovo_notifier = ovo_rpc.OVOServerRpcInterface() self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) @log_helpers.log_method_call def start_rpc_listeners(self): """Start the RPC loop to let the plugin communicate with agents.""" self._setup_rpc() self.topic = topics.PLUGIN self.conn = n_rpc.Connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer( topics.SERVER_RESOURCE_VERSIONS, [resources_rpc.ResourcesPushToServerRpcCallback()], fanout=True) # process state reports despite dedicated rpc workers self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) return self.conn.consume_in_threads() def start_rpc_state_reports_listener(self): self.conn_reports = n_rpc.Connection() self.conn_reports.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) return self.conn_reports.consume_in_threads() def _filter_nets_provider(self, context, networks, filters): return [network for network in networks if self.type_manager.network_matches_filters(network, filters) ] def _check_mac_update_allowed(self, orig_port, port, binding): unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND) new_mac = port.get('mac_address') mac_change = (new_mac is not None and orig_port['mac_address'] != new_mac) if (mac_change and binding.vif_type not in unplugged_types): raise exc.PortBound(port_id=orig_port['id'], vif_type=binding.vif_type, old_mac=orig_port['mac_address'], new_mac=port['mac_address']) return mac_change def _reset_mac_for_direct_physical(self, orig_port, port, binding): # when unbinding direct-physical port we need to free # physical device MAC address so that other ports may reuse it if (binding.vnic_type == portbindings.VNIC_DIRECT_PHYSICAL and port.get('device_id') == '' and port.get('device_owner') == '' and orig_port['device_id'] != ''): port['mac_address'] = self._generate_macs()[0] return True else: return False @registry.receives(resources.AGENT, [events.AFTER_UPDATE]) def _retry_binding_revived_agents(self, resource, event, trigger, payload=None): context = payload.context host = payload.metadata.get('host') agent = payload.desired_state agent_status = agent.get('agent_status') agent_type = agent.get('agent_type') if (agent_status != agent_consts.AGENT_REVIVED or not agent.get('admin_state_up') or agent_type not in self._rebind_on_revive_agent_types): return ports = ports_obj.Port.get_ports_by_binding_type_and_host( context, portbindings.VIF_TYPE_BINDING_FAILED, host) for port in ports: binding = self._get_binding_for_host(port.bindings, host) if not binding: LOG.debug('No bindings found for port %(port_id)s ' 'on host %(host)s', {'port_id': port.id, 'host': host}) continue port_dict = self._make_port_dict(port.db_obj) network = self.get_network(context, port.network_id) try: levels = db.get_binding_level_objs( context, port.id, binding.host) # TODO(slaweq): use binding OVO instead of binding.db_obj when # ML2 plugin will switch to use Port Binding OVO everywhere mech_context = driver_context.PortContext( self, context, port_dict, network, binding.db_obj, levels) self._bind_port_if_needed(mech_context) except Exception as e: LOG.warning('Attempt to bind port %(port_id)s after agent ' '%(agent_type)s on host %(host)s revived failed. ' 'Error: %(error)s', {'port_id': port.id, 'agent_type': agent_type, 'host': host, 'error': e}) def _clear_port_binding(self, mech_context, binding, port, original_host): binding.vif_type = portbindings.VIF_TYPE_UNBOUND binding.vif_details = '' db.clear_binding_levels(mech_context._plugin_context, port['id'], original_host) mech_context._clear_binding_levels() def _process_port_binding_attributes(self, binding, attrs): changes = False host = const.ATTR_NOT_SPECIFIED if attrs and portbindings.HOST_ID in attrs: host = attrs.get(portbindings.HOST_ID) or '' original_host = binding.host if validators.is_attr_set(host) and original_host != host: binding.host = host changes = True vnic_type = attrs.get(portbindings.VNIC_TYPE) if attrs else None if (validators.is_attr_set(vnic_type) and binding.vnic_type != vnic_type): binding.vnic_type = vnic_type changes = True # treat None as clear of profile. profile = None if attrs and portbindings.PROFILE in attrs: profile = attrs.get(portbindings.PROFILE) or {} if profile not in (None, const.ATTR_NOT_SPECIFIED, self._get_profile(binding)): binding.profile = jsonutils.dumps(profile) if len(binding.profile) > models.BINDING_PROFILE_LEN: msg = _("binding:profile value too large") raise exc.InvalidInput(error_message=msg) changes = True return changes, original_host def _process_port_binding(self, mech_context, attrs): plugin_context = mech_context._plugin_context binding = mech_context._binding port = mech_context.current changes, original_host = self._process_port_binding_attributes(binding, attrs) # Unbind the port if needed. if changes: self._clear_port_binding(mech_context, binding, port, original_host) port['status'] = const.PORT_STATUS_DOWN super(Ml2Plugin, self).update_port( mech_context._plugin_context, port['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: self._clear_port_binding(mech_context, binding, port, original_host) binding.host = '' self._update_port_dict_binding(port, binding) binding.persist_state_to_session(plugin_context.session) return changes @db_api.retry_db_errors def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False, allow_commit=True): if not context.network.network_segments: LOG.debug("Network %s has no segments, skipping binding", context.network.current['id']) return context for count in range(1, MAX_BIND_TRIES + 1): if count > 1: # yield for binding retries so that we give other threads a # chance to do their work greenthread.sleep(0) # multiple attempts shouldn't happen very often so we log each # attempt after the 1st. LOG.info("Attempt %(count)s to bind port %(port)s", {'count': count, 'port': context.current['id']}) bind_context, need_notify, try_again = self._attempt_binding( context, need_notify) if count == MAX_BIND_TRIES or not try_again: if self._should_bind_port(context) and allow_commit: # At this point, we attempted to bind a port and reached # its final binding state. Binding either succeeded or # exhausted all attempts, thus no need to try again. # Now, the port and its binding state should be committed. context, need_notify, try_again = ( self._commit_port_binding(context, bind_context, need_notify)) else: context = bind_context if not try_again: if allow_notify and need_notify: self._notify_port_updated(context) return context LOG.error("Failed to commit binding results for %(port)s " "after %(max)s tries", {'port': context.current['id'], 'max': MAX_BIND_TRIES}) return context def _should_bind_port(self, context): return (context._binding.host and context._binding.vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED)) def _attempt_binding(self, context, need_notify): try_again = False if self._should_bind_port(context): bind_context = self._bind_port(context) if bind_context.vif_type != portbindings.VIF_TYPE_BINDING_FAILED: # Binding succeeded. Suggest notifying of successful binding. need_notify = True else: # Current attempt binding failed, try to bind again. try_again = True context = bind_context return context, need_notify, try_again def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous # transaction. port = orig_context.current orig_binding = orig_context._binding new_binding = models.PortBinding( host=orig_binding.host, vnic_type=orig_binding.vnic_type, profile=orig_binding.profile, vif_type=portbindings.VIF_TYPE_UNBOUND, vif_details='' ) self._update_port_dict_binding(port, new_binding) new_context = driver_context.PortContext( self, orig_context._plugin_context, port, orig_context.network.current, new_binding, None, original_port=orig_context.original) # Attempt to bind the port and return the context with the # result. self.mechanism_manager.bind_port(new_context) return new_context def _commit_port_binding(self, orig_context, bind_context, need_notify, update_binding_levels=True): port_id = orig_context.current['id'] plugin_context = orig_context._plugin_context orig_binding = orig_context._binding new_binding = bind_context._binding # TODO(yamahata): revise what to be passed or new resource # like PORTBINDING should be introduced? # It would be addressed during EventPayload conversion. registry.notify(resources.PORT, events.BEFORE_UPDATE, self, context=plugin_context, port=orig_context.current, original_port=orig_context.current, orig_binding=orig_binding, new_binding=new_binding) # After we've attempted to bind the port, we begin a # transaction, get the current port state, and decide whether # to commit the binding results. with db_api.CONTEXT_WRITER.using(plugin_context): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. try: port_db = self._get_port(plugin_context, port_id) cur_binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) except exc.PortNotFound: port_db, cur_binding = None, None if not port_db or not cur_binding: # The port has been deleted concurrently, so just # return the unbound result from the initial # transaction that completed before the deletion. LOG.debug("Port %s has been deleted concurrently", port_id) return orig_context, False, False # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is # possible (but unlikely) that the port's state could change # concurrently while these calls are being made. If another # thread or process succeeds in binding the port before this # thread commits its results, the already committed results are # used. If attributes such as binding:host_id, binding:profile, # or binding:vnic_type are updated concurrently, the try_again # flag is returned to indicate that the commit was unsuccessful. oport = self._make_port_dict(port_db) port = self._make_port_dict(port_db) network = bind_context.network.current if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: # REVISIT(rkukura): The PortBinding instance from the # ml2_port_bindings table, returned as cur_binding # from port_db.port_binding above, is # currently not used for DVR distributed ports, and is # replaced here with the DistributedPortBinding instance from # the ml2_distributed_port_bindings table specific to the host # on which the distributed port is being bound. It # would be possible to optimize this code to avoid # fetching the PortBinding instance in the DVR case, # and even to avoid creating the unused entry in the # ml2_port_bindings table. But the upcoming resolution # for bug 1367391 will eliminate the # ml2_distributed_port_bindings table, use the # ml2_port_bindings table to store non-host-specific # fields for both distributed and non-distributed # ports, and introduce a new ml2_port_binding_hosts # table for the fields that need to be host-specific # in the distributed case. Since the PortBinding # instance will then be needed, it does not make sense # to optimize this code to avoid fetching it. cur_binding = db.get_distributed_port_binding_by_host( plugin_context, port_id, orig_binding.host) cur_context_binding = cur_binding if new_binding.status == const.INACTIVE: cur_context_binding = ( p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.INACTIVE, host=new_binding.host)) cur_context = driver_context.PortContext( self, plugin_context, port, network, cur_context_binding, None, original_port=oport) # Commit our binding results only if port has not been # successfully bound concurrently by another thread or # process and no binding inputs have been changed. commit = ((cur_binding.vif_type in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) and orig_binding.host == cur_binding.host and orig_binding.vnic_type == cur_binding.vnic_type and orig_binding.profile == cur_binding.profile) if commit: # Update the port's binding state with our binding # results. if new_binding.status == const.INACTIVE: cur_context_binding.status = const.ACTIVE cur_binding.status = const.INACTIVE else: cur_context_binding.vif_type = new_binding.vif_type cur_context_binding.vif_details = new_binding.vif_details if update_binding_levels: db.clear_binding_levels(plugin_context, port_id, cur_binding.host) db.set_binding_levels(plugin_context, bind_context._binding_levels) # refresh context with a snapshot of updated state cur_context._binding = driver_context.InstanceSnapshot( cur_context_binding) cur_context._binding_levels = bind_context._binding_levels # Update PortContext's port dictionary to reflect the # updated binding state. self._update_port_dict_binding(port, cur_context_binding) # Update the port status if requested by the bound driver. if (bind_context._binding_levels and bind_context._new_port_status): port_db.status = bind_context._new_port_status port['status'] = bind_context._new_port_status # Call the mechanism driver precommit methods, commit # the results, and call the postcommit methods. self.mechanism_manager.update_port_precommit(cur_context) else: # Try to populate the PortContext with the current binding # levels so that the RPC notification won't get suppressed. # This is to avoid leaving ports stuck in a DOWN state. # For more information see bug: # https://bugs.launchpad.net/neutron/+bug/1755810 LOG.warning("Concurrent port binding operations failed on " "port %s", port_id) levels = db.get_binding_level_objs(plugin_context, port_id, cur_binding.host) for level in levels: cur_context._push_binding_level(level) # refresh context with a snapshot of the current binding state cur_context._binding = driver_context.InstanceSnapshot( cur_binding) if commit: # Continue, using the port state as of the transaction that # just finished, whether that transaction committed new # results or discovered concurrent port state changes. # Also, Trigger notification for successful binding commit. kwargs = { 'context': plugin_context, 'port': self._make_port_dict(port_db), # ensure latest state 'mac_address_updated': False, 'original_port': oport, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) self.mechanism_manager.update_port_postcommit(cur_context) need_notify = True try_again = False else: try_again = True return cur_context, need_notify, try_again def _update_port_dict_binding(self, port, binding): port[portbindings.VNIC_TYPE] = binding.vnic_type port[portbindings.PROFILE] = self._get_profile(binding) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: port[portbindings.HOST_ID] = '' port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_DISTRIBUTED port[portbindings.VIF_DETAILS] = {} else: port[portbindings.HOST_ID] = binding.host port[portbindings.VIF_TYPE] = binding.vif_type port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid", {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid", {'value': binding.profile, 'port': binding.port_id}) return {} @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ml2_extend_port_dict_binding(port_res, port_db): plugin = directory.get_plugin() if isinstance(port_db, ports_obj.Port): bindings = port_db.bindings else: bindings = port_db.port_bindings port_binding = p_utils.get_port_binding_by_status_and_host( bindings, const.ACTIVE) # None when called during unit tests for other plugins. if port_binding: plugin._update_port_dict_binding(port_res, port_binding) # ML2's resource extend functions allow extension drivers that extend # attributes for the resources to add those attributes to the result. @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _ml2_md_extend_network_dict(result, netdb): plugin = directory.get_plugin() session = plugin._object_session_or_new_session(netdb) plugin.extension_manager.extend_network_dict(session, netdb, result) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ml2_md_extend_port_dict(result, portdb): plugin = directory.get_plugin() session = plugin._object_session_or_new_session(portdb) plugin.extension_manager.extend_port_dict(session, portdb, result) @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _ml2_md_extend_subnet_dict(result, subnetdb): plugin = directory.get_plugin() session = plugin._object_session_or_new_session(subnetdb) plugin.extension_manager.extend_subnet_dict(session, subnetdb, result) @staticmethod def _object_session_or_new_session(sql_obj): session = sqlalchemy.inspect(sql_obj).session if not session: session = db_api.get_reader_session() return session def _notify_port_updated(self, mech_context): port = mech_context.current segment = mech_context.bottom_bound_segment if not segment: # REVISIT(rkukura): This should notify agent to unplug port network = mech_context.network.current LOG.debug("In _notify_port_updated(), no bound segment for " "port %(port_id)s on network %(network_id)s", {'port_id': port['id'], 'network_id': network['id']}) return self.notifier.port_update(mech_context._plugin_context, port, segment[api.NETWORK_TYPE], segment[api.SEGMENTATION_ID], segment[api.PHYSICAL_NETWORK]) def _update_segmentation_id(self, context, network, net_data): """Update segmentation ID in a single provider network""" segments = segments_db.get_networks_segments( context, [network['id']])[network['id']] if len(segments) > 1: msg = _('Provider network attributes can be updated only in ' 'provider networks with a single segment.') raise exc.InvalidInput(error_message=msg) vif_types = [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED] for mech_driver in self.mechanism_manager.ordered_mech_drivers: if (isinstance(mech_driver.obj, mech_agent.AgentMechanismDriverBase) and provider_net.SEGMENTATION_ID in mech_driver.obj. provider_network_attribute_updates_supported()): agent_type = mech_driver.obj.agent_type agents = self.get_agents( context, filters={'agent_type': [agent_type]}) for agent in agents: vif_types.append( mech_driver.obj.get_supported_vif_type(agent)) if ports_obj.Port.check_network_ports_by_binding_types( context, network['id'], vif_types, negative_search=True): msg = (_('Provider network attribute %(attr)s cannot be updated ' 'if any port in the network has not the following ' '%(vif_field)s: %(vif_types)s') % {'attr': provider_net.SEGMENTATION_ID, 'vif_field': portbindings.VIF_TYPE, 'vif_types': ', '.join(vif_types)}) raise exc.InvalidInput(error_message=msg) self.type_manager.update_network_segment(context, network, net_data, segments[0]) def _update_provider_network_attributes(self, context, network, net_data): """Raise exception if provider network attrs update are not supported. This function will raise an exception if the provider network attribute update is not supported. """ provider_net_attrs = (set(provider_net.ATTRIBUTES) - {provider_net.SEGMENTATION_ID}) requested_provider_net_attrs = set(net_data) & provider_net_attrs for attr in requested_provider_net_attrs: if (validators.is_attr_set(net_data.get(attr)) and net_data.get(attr) != network[attr]): msg = (_('Plugin does not support updating the following ' 'provider network attributes: %s') % ', '.join(provider_net_attrs)) raise exc.InvalidInput(error_message=msg) if net_data.get(provider_net.SEGMENTATION_ID): self._update_segmentation_id(context, network, net_data) def _delete_objects(self, context, resource, objects): delete_op = getattr(self, 'delete_%s' % resource) for obj in objects: try: delete_op(context, obj['result']['id']) except KeyError: LOG.exception("Could not find %s to delete.", resource) except Exception: LOG.exception("Could not delete %(res)s %(id)s.", {'res': resource, 'id': obj['result']['id']}) def _create_bulk_ml2(self, resource, context, request_items): objects = [] collection = "%ss" % resource items = request_items[collection] obj_before_create = getattr(self, '_before_create_%s' % resource) for item in items: obj_before_create(context, item) with db_api.CONTEXT_WRITER.using(context): obj_creator = getattr(self, '_create_%s_db' % resource) for item in items: try: attrs = item[resource] result, mech_context = obj_creator(context, item) objects.append({'mech_context': mech_context, 'result': result, 'attributes': attrs}) except Exception as e: with excutils.save_and_reraise_exception(): utils.attach_exc_details( e, ("An exception occurred while creating " "the %(resource)s:%(item)s"), {'resource': resource, 'item': item}) postcommit_op = getattr(self, '_after_create_%s' % resource) for obj in objects: try: postcommit_op(context, obj['result'], obj['mech_context']) except Exception: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] LOG.exception("ML2 _after_create_%(res)s " "failed for %(res)s: " "'%(failed_id)s'. Deleting " "%(res)ss %(resource_ids)s", {'res': resource, 'failed_id': obj['result']['id'], 'resource_ids': ', '.join(resource_ids)}) # _after_handler will have deleted the object that threw to_delete = [o for o in objects if o != obj] self._delete_objects(context, resource, to_delete) return objects def _get_network_mtu(self, network_db, validate=True): mtus = [] try: segments = network_db['segments'] except KeyError: segments = [network_db] for s in segments: segment_type = s.get('network_type') if segment_type is None: continue try: type_driver = self.type_manager.drivers[segment_type].obj except KeyError: # NOTE(ihrachys) This can happen when type driver is not loaded # for an existing segment, or simply when the network has no # segments at the specific time this is computed. # In the former case, while it's probably an indication of # a bad setup, it's better to be safe than sorry here. Also, # several unit tests use non-existent driver types that may # trigger the exception here. if segment_type and s['segmentation_id']: LOG.warning( "Failed to determine MTU for segment " "%(segment_type)s:%(segment_id)s; network " "%(network_id)s MTU calculation may be not " "accurate", { 'segment_type': segment_type, 'segment_id': s['segmentation_id'], 'network_id': network_db['id'], } ) else: mtu = type_driver.get_mtu(s['physical_network']) # Some drivers, like 'local', may return None; the assumption # then is that for the segment type, MTU has no meaning or # unlimited, and so we should then ignore those values. if mtu: mtus.append(mtu) max_mtu = min(mtus) if mtus else p_utils.get_deployment_physnet_mtu() net_mtu = network_db.get('mtu') if validate: # validate that requested mtu conforms to allocated segments if net_mtu and max_mtu and max_mtu < net_mtu: msg = _("Requested MTU is too big, maximum is %d") % max_mtu raise exc.InvalidInput(error_message=msg) # if mtu is not set in database, use the maximum possible return net_mtu or max_mtu def _before_create_network(self, context, network): net_data = network[net_def.RESOURCE_NAME] registry.notify(resources.NETWORK, events.BEFORE_CREATE, self, context=context, network=net_data) def _create_network_db(self, context, network): net_data = network[net_def.RESOURCE_NAME] tenant_id = net_data['tenant_id'] with db_api.CONTEXT_WRITER.using(context): net_db = self.create_network_db(context, network) net_data['id'] = net_db.id self.type_manager.create_network_segments(context, net_data, tenant_id) net_db.mtu = self._get_network_mtu(net_db) result = self._make_network_dict(net_db, process_extensions=False, context=context) self.extension_manager.process_create_network( context, # NOTE(ihrachys) extensions expect no id in the dict {k: v for k, v in net_data.items() if k != 'id'}, result) self._process_l3_create(context, result, net_data) self.type_manager.extend_network_dict_provider(context, result) # Update the transparent vlan if configured if extensions.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) net_db['vlan_transparent'] = vlt result['vlan_transparent'] = vlt if az_def.AZ_HINTS in net_data: self.validate_availability_zones(context, 'network', net_data[az_def.AZ_HINTS]) az_hints = az_validator.convert_az_list_to_string( net_data[az_def.AZ_HINTS]) net_db[az_def.AZ_HINTS] = az_hints result[az_def.AZ_HINTS] = az_hints registry.notify(resources.NETWORK, events.PRECOMMIT_CREATE, self, context=context, request=net_data, network=result) resource_extend.apply_funcs('networks', result, net_db) mech_context = driver_context.NetworkContext(self, context, result) self.mechanism_manager.create_network_precommit(mech_context) return result, mech_context @utils.transaction_guard @db_api.retry_if_session_inactive() def create_network(self, context, network): self._before_create_network(context, network) result, mech_context = self._create_network_db(context, network) return self._after_create_network(context, result, mech_context) def _after_create_network(self, context, result, mech_context): kwargs = {'context': context, 'network': result} registry.notify(resources.NETWORK, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error("mechanism_manager.create_network_postcommit " "failed, deleting network '%s'", result['id']) self.delete_network(context, result['id']) return result @utils.transaction_guard @db_api.retry_if_session_inactive() def create_network_bulk(self, context, networks): objects = self._create_bulk_ml2( net_def.RESOURCE_NAME, context, networks) return [obj['result'] for obj in objects] @utils.transaction_guard @db_api.retry_if_session_inactive() def update_network(self, context, id, network): net_data = network[net_def.RESOURCE_NAME] need_network_update_notify = False with db_api.CONTEXT_WRITER.using(context): original_network = self.get_network(context, id) self._update_provider_network_attributes( context, original_network, net_data) updated_network = super(Ml2Plugin, self).update_network(context, id, network) self.extension_manager.process_update_network(context, net_data, updated_network) self._process_l3_update(context, updated_network, net_data) # ToDO(QoS): This would change once EngineFacade moves out db_network = self._get_network(context, id) # Expire the db_network in current transaction, so that the join # relationship can be updated. context.session.expire(db_network) if mtuw_apidef.MTU in net_data: db_network.mtu = self._get_network_mtu(db_network) # agents should now update all ports to reflect new MTU need_network_update_notify = True updated_network = self._make_network_dict( db_network, context=context) self.type_manager.extend_network_dict_provider( context, updated_network) registry.publish(resources.NETWORK, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=net_data, states=(original_network,), resource_id=id, desired_state=updated_network)) # TODO(QoS): Move out to the extension framework somehow. need_network_update_notify |= ( qos_consts.QOS_POLICY_ID in net_data and original_network[qos_consts.QOS_POLICY_ID] != updated_network[qos_consts.QOS_POLICY_ID]) mech_context = driver_context.NetworkContext( self, context, updated_network, original_network=original_network) self.mechanism_manager.update_network_precommit(mech_context) # TODO(apech) - handle errors raised by update_network, potentially # by re-calling update_network with the previous attributes. For # now the error is propagated to the caller, which is expected to # either undo/retry the operation or delete the resource. kwargs = {'context': context, 'network': updated_network, 'original_network': original_network} registry.notify(resources.NETWORK, events.AFTER_UPDATE, self, **kwargs) self.mechanism_manager.update_network_postcommit(mech_context) if need_network_update_notify: self.notifier.network_update(context, updated_network) return updated_network @db_api.retry_if_session_inactive() def get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): net_db = self._get_network(context, id) net_data = self._make_network_dict(net_db, context=context) self.type_manager.extend_network_dict_provider(context, net_data) return db_utils.resource_fields(net_data, fields) @db_api.retry_if_session_inactive() def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): with db_api.CONTEXT_READER.using(context): nets_db = super(Ml2Plugin, self)._get_networks( context, filters, None, sorts, limit, marker, page_reverse) net_data = [] for net in nets_db: net_data.append(self._make_network_dict(net, context=context)) self.type_manager.extend_networks_dict_provider(context, net_data) nets = self._filter_nets_provider(context, net_data, filters) return [db_utils.resource_fields(net, fields) for net in nets] def get_network_contexts(self, context, network_ids): """Return a map of network_id to NetworkContext for network_ids.""" net_filters = {'id': list(set(network_ids))} nets_by_netid = { n['id']: n for n in self.get_networks(context, filters=net_filters) } segments_by_netid = segments_db.get_networks_segments( context, list(nets_by_netid.keys())) netctxs_by_netid = { net_id: driver_context.NetworkContext( self, context, nets_by_netid[net_id], segments=segments_by_netid[net_id]) for net_id in nets_by_netid.keys() } return netctxs_by_netid @utils.transaction_guard def delete_network(self, context, id): # the only purpose of this override is to protect this from being # called inside of a transaction. return super(Ml2Plugin, self).delete_network(context, id) # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before # other precommit handlers. This is necessary to ensure we avoid another # handler deleting a subresource of the network, e.g. segments. @registry.receives(resources.NETWORK, [events.PRECOMMIT_DELETE], priority=0) def _network_delete_precommit_handler(self, rtype, event, trigger, context, network_id, **kwargs): network = self.get_network(context, network_id) mech_context = driver_context.NetworkContext(self, context, network) # TODO(kevinbenton): move this mech context into something like # a 'delete context' so it's not polluting the real context object setattr(context, '_mech_context', mech_context) self.mechanism_manager.delete_network_precommit( mech_context) @registry.receives(resources.NETWORK, [events.AFTER_DELETE]) def _network_delete_after_delete_handler(self, rtype, event, trigger, context, network, **kwargs): try: self.mechanism_manager.delete_network_postcommit( context._mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the network. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error("mechanism_manager.delete_network_postcommit" " failed") self.notifier.network_delete(context, network['id']) def _before_create_subnet(self, context, subnet): subnet_data = subnet[subnet_def.RESOURCE_NAME] registry.notify(resources.SUBNET, events.BEFORE_CREATE, self, context=context, subnet=subnet_data) def _create_subnet_db(self, context, subnet): with db_api.CONTEXT_WRITER.using(context): result, net_db, ipam_sub = self._create_subnet_precommit( context, subnet) self.extension_manager.process_create_subnet( context, subnet[subnet_def.RESOURCE_NAME], result) network = self._make_network_dict(net_db, context=context) self.type_manager.extend_network_dict_provider(context, network) mech_context = driver_context.SubnetContext(self, context, result, network) self.mechanism_manager.create_subnet_precommit(mech_context) return result, mech_context @utils.transaction_guard @db_api.retry_if_session_inactive() def create_subnet(self, context, subnet): self._before_create_subnet(context, subnet) result, mech_context = self._create_subnet_db(context, subnet) return self._after_create_subnet(context, result, mech_context) def _after_create_subnet(self, context, result, mech_context): # db base plugin post commit ops self._create_subnet_postcommit(context, result) kwargs = {'context': context, 'subnet': result} registry.notify(resources.SUBNET, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error("mechanism_manager.create_subnet_postcommit " "failed, deleting subnet '%s'", result['id']) self.delete_subnet(context, result['id']) return result @utils.transaction_guard @db_api.retry_if_session_inactive() def create_subnet_bulk(self, context, subnets): objects = self._create_bulk_ml2( subnet_def.RESOURCE_NAME, context, subnets) return [obj['result'] for obj in objects] @utils.transaction_guard @db_api.retry_if_session_inactive() def update_subnet(self, context, id, subnet): with db_api.CONTEXT_WRITER.using(context): updated_subnet, original_subnet = self._update_subnet_precommit( context, id, subnet) self.extension_manager.process_update_subnet( context, subnet[subnet_def.RESOURCE_NAME], updated_subnet) updated_subnet = self.get_subnet(context, id) mech_context = driver_context.SubnetContext( self, context, updated_subnet, network=None, original_subnet=original_subnet) self.mechanism_manager.update_subnet_precommit(mech_context) self._update_subnet_postcommit(context, original_subnet, updated_subnet) # TODO(apech) - handle errors raised by update_subnet, potentially # by re-calling update_subnet with the previous attributes. For # now the error is propagated to the caller, which is expected to # either undo/retry the operation or delete the resource. self.mechanism_manager.update_subnet_postcommit(mech_context) return updated_subnet @utils.transaction_guard def delete_subnet(self, context, id): # the only purpose of this override is to protect this from being # called inside of a transaction. return super(Ml2Plugin, self).delete_subnet(context, id) # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before # other precommit handlers. This is necessary to ensure we avoid another # handler deleting a subresource of the subnet. @registry.receives(resources.SUBNET, [events.PRECOMMIT_DELETE], priority=0) def _subnet_delete_precommit_handler(self, rtype, event, trigger, context, subnet_id, **kwargs): subnet_obj = self._get_subnet_object(context, subnet_id) subnet = self._make_subnet_dict(subnet_obj, context=context) network = self.get_network(context, subnet['network_id']) mech_context = driver_context.SubnetContext(self, context, subnet, network) # TODO(kevinbenton): move this mech context into something like # a 'delete context' so it's not polluting the real context object setattr(context, '_mech_context', mech_context) self.mechanism_manager.delete_subnet_precommit(mech_context) @registry.receives(resources.SUBNET, [events.AFTER_DELETE]) def _subnet_delete_after_delete_handler(self, rtype, event, trigger, context, subnet, **kwargs): try: self.mechanism_manager.delete_subnet_postcommit( context._mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the subnet. Ideally we'd notify the caller of # the fact that an error occurred. LOG.error("mechanism_manager.delete_subnet_postcommit failed") # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_create_processing(self, context, port_data, port): attrs = port[port_def.RESOURCE_NAME] port_security = ((port_data.get(psec.PORTSECURITY) is None) or port_data[psec.PORTSECURITY]) # allowed address pair checks if self._check_update_has_allowed_address_pairs(port): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: # remove ATTR_NOT_SPECIFIED attrs[addr_apidef.ADDRESS_PAIRS] = [] if port_security: self._ensure_default_security_group_on_port(context, port) elif self._check_update_has_security_groups(port): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() def _setup_dhcp_agent_provisioning_component(self, context, port): subnet_ids = [f['subnet_id'] for f in port['fixed_ips']] if (db.is_dhcp_active_on_any_subnet(context, subnet_ids) and len(self.get_dhcp_agents_hosting_networks(context, [port['network_id']]))): # the agents will tell us when the dhcp config is ready so we setup # a provisioning component to prevent the port from going ACTIVE # until a dhcp_ready_on_port notification is received. provisioning_blocks.add_provisioning_component( context, port['id'], resources.PORT, provisioning_blocks.DHCP_ENTITY) else: provisioning_blocks.remove_provisioning_component( context, port['id'], resources.PORT, provisioning_blocks.DHCP_ENTITY) def _before_create_port(self, context, port): attrs = port[port_def.RESOURCE_NAME] if not attrs.get('status'): attrs['status'] = const.PORT_STATUS_DOWN registry.notify(resources.PORT, events.BEFORE_CREATE, self, context=context, port=attrs) # NOTE(kevinbenton): triggered outside of transaction since it # emits 'AFTER' events if it creates. self._ensure_default_security_group(context, attrs['tenant_id']) def _create_port_db(self, context, port): attrs = port[port_def.RESOURCE_NAME] with db_api.CONTEXT_WRITER.using(context): dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, []) port_db = self.create_port_db(context, port) result = self._make_port_dict(port_db, process_extensions=False) self.extension_manager.process_create_port(context, attrs, result) self._portsec_ext_port_create_processing(context, result, port) # sgids must be got after portsec checked with security group sgs = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, result, sgs) network = self.get_network(context, result['network_id']) binding = db.add_port_binding(context, result['id']) mech_context = driver_context.PortContext(self, context, result, network, binding, None) self._process_port_binding(mech_context, attrs) result[addr_apidef.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, result, attrs.get(addr_apidef.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts(context, result, dhcp_opts) kwargs = {'context': context, 'port': result} registry.notify( resources.PORT, events.PRECOMMIT_CREATE, self, **kwargs) self.mechanism_manager.create_port_precommit(mech_context) self._setup_dhcp_agent_provisioning_component(context, result) resource_extend.apply_funcs('ports', result, port_db) return result, mech_context @utils.transaction_guard @db_api.retry_if_session_inactive() def create_port(self, context, port): self._before_create_port(context, port) result, mech_context = self._create_port_db(context, port) return self._after_create_port(context, result, mech_context) def _after_create_port(self, context, result, mech_context): # notify any plugin that is interested in port create events kwargs = {'context': context, 'port': result} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error("mechanism_manager.create_port_postcommit " "failed, deleting port '%s'", result['id']) self.delete_port(context, result['id'], l3_port_check=False) try: bound_context = self._bind_port_if_needed(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error("_bind_port_if_needed " "failed, deleting port '%s'", result['id']) self.delete_port(context, result['id'], l3_port_check=False) return bound_context.current @utils.transaction_guard @db_api.retry_if_session_inactive() def create_port_bulk(self, context, ports): # TODO(njohnston): Break this up into smaller functions. port_list = ports.get('ports') for port in port_list: self._before_create_port(context, port) port_data = [] network_cache = dict() macs = self._generate_macs(len(port_list)) with db_api.CONTEXT_WRITER.using(context): for port in port_list: # Set up the port request dict pdata = port.get('port') project_id = pdata.get('project_id') or pdata.get('tenant_id') security_group_ids = pdata.get('security_groups') if security_group_ids is const.ATTR_NOT_SPECIFIED: security_group_ids = None else: security_group_ids = set(security_group_ids) if pdata.get('device_owner'): self._enforce_device_owner_not_router_intf_or_device_id( context, pdata.get('device_owner'), pdata.get('device_id'), project_id) bulk_port_data = dict( project_id=project_id, name=pdata.get('name'), network_id=pdata.get('network_id'), admin_state_up=pdata.get('admin_state_up'), status=pdata.get('status', const.PORT_STATUS_ACTIVE), device_id=pdata.get('device_id'), device_owner=pdata.get('device_owner'), description=pdata.get('description')) # Ensure that the networks exist. network_id = pdata.get('network_id') if network_id not in network_cache: network = self.get_network(context, network_id) network_cache[network_id] = network else: network = network_cache[network_id] # Determine the MAC address raw_mac_address = pdata.get('mac_address', const.ATTR_NOT_SPECIFIED) if raw_mac_address is const.ATTR_NOT_SPECIFIED: raw_mac_address = macs.pop() elif self._is_mac_in_use(context, network_id, raw_mac_address): raise exc.MacAddressInUse(net_id=network_id, mac=raw_mac_address) eui_mac_address = netaddr.EUI(raw_mac_address, dialect=eui48.mac_unix_expanded) port['port']['mac_address'] = str(eui_mac_address) # Create the Port object db_port_obj = ports_obj.Port(context, mac_address=eui_mac_address, id=uuidutils.generate_uuid(), **bulk_port_data) db_port_obj.create() # Call IPAM to allocate IP addresses try: # TODO(njohnston): IPAM allocation needs to be revamped to # be bulk-friendly. ips = self.ipam.allocate_ips_for_port_and_store( context, port, db_port_obj['id']) ipam_fixed_ips = [] for ip in ips: fixed_ip = ports_obj.IPAllocation( port_id=db_port_obj['id'], subnet_id=ip['subnet_id'], network_id=network_id, ip_address=ip['ip_address']) ipam_fixed_ips.append(fixed_ip) db_port_obj['fixed_ips'] = ipam_fixed_ips db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_IMMEDIATE) except ipam_exc.DeferIpam: db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_DEFERRED) fixed_ips = pdata.get('fixed_ips') if validators.is_attr_set(fixed_ips) and not fixed_ips: # [] was passed explicitly as fixed_ips: unaddressed port. db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_NONE) # Make port dict port_dict = self._make_port_dict(db_port_obj, process_extensions=False) port_dict[portbindings.HOST_ID] = pdata.get( portbindings.HOST_ID) # Activities immediately post-port-creation self.extension_manager.process_create_port(context, pdata, port_dict) self._portsec_ext_port_create_processing(context, port_dict, port) sgs = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, port_dict, sgs) # process port binding binding = db.add_port_binding(context, port_dict['id']) binding_host = pdata.get( portbindings.HOST_ID, const.ATTR_NOT_SPECIFIED) if binding_host != const.ATTR_NOT_SPECIFIED: binding["host"] = binding_host mech_context = driver_context.PortContext(self, context, port_dict, network, binding, None) self._process_port_binding(mech_context, port_dict) # process allowed address pairs db_port_obj[addr_apidef.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, port_dict, port_dict.get(addr_apidef.ADDRESS_PAIRS))) # handle DHCP setup dhcp_opts = port_dict.get(edo_ext.EXTRADHCPOPTS, []) self._process_port_create_extra_dhcp_opts(context, port_dict, dhcp_opts) # send PRECOMMIT_CREATE notification kwargs = {'context': context, 'port': db_port_obj} registry.notify( resources.PORT, events.PRECOMMIT_CREATE, self, **kwargs) self.mechanism_manager.create_port_precommit(mech_context) # handle DHCP agent provisioning self._setup_dhcp_agent_provisioning_component(context, port_dict) port_data.append( { 'id': db_port_obj['id'], 'port_obj': db_port_obj, 'mech_context': mech_context, 'port_dict': port_dict }) # Perform actions after the transaction is committed completed_ports = [] for port in port_data: resource_extend.apply_funcs('ports', port['port_dict'], port['port_obj'].db_obj) completed_ports.append( self._after_create_port(context, port['port_dict'], port['mech_context'])) return completed_ports # TODO(yalei) - will be simplified after security group and address pair be # converted to ext driver too. def _portsec_ext_port_update_processing(self, updated_port, context, port, id): port_security = ((updated_port.get(psec.PORTSECURITY) is None) or updated_port[psec.PORTSECURITY]) if port_security: return # check the address-pairs if self._check_update_has_allowed_address_pairs(port): # has address pairs in request raise addr_exc.AddressPairAndPortSecurityRequired() elif not self._check_update_deletes_allowed_address_pairs(port): # not a request for deleting the address-pairs updated_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) # check if address pairs has been in db, if address pairs could # be put in extension driver, we can refine here. if updated_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() # checks if security groups were updated adding/modifying # security groups, port security is set if self._check_update_has_security_groups(port): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() elif not self._check_update_deletes_security_groups(port): if not extensions.is_extension_supported(self, 'security-group'): return # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(Ml2Plugin, self)._get_port_security_group_bindings( context, filters) ) if security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() @utils.transaction_guard @db_api.retry_if_session_inactive() def update_port(self, context, id, port): attrs = port[port_def.RESOURCE_NAME] need_port_update_notify = False bound_mech_contexts = [] original_port = self.get_port(context, id) registry.notify(resources.PORT, events.BEFORE_UPDATE, self, context=context, port=attrs, original_port=original_port) with db_api.CONTEXT_WRITER.using(context): port_db = self._get_port(context, id) binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) if not binding: raise exc.PortNotFound(port_id=id) mac_address_updated = self._check_mac_update_allowed( port_db, attrs, binding) mac_address_updated |= self._reset_mac_for_direct_physical( port_db, attrs, binding) need_port_update_notify |= mac_address_updated original_port = self._make_port_dict(port_db) updated_port = super(Ml2Plugin, self).update_port(context, id, port) self.extension_manager.process_update_port(context, attrs, updated_port) self._portsec_ext_port_update_processing(updated_port, context, port, id) if (psec.PORTSECURITY in attrs) and ( original_port[psec.PORTSECURITY] != updated_port[psec.PORTSECURITY]): need_port_update_notify = True # TODO(QoS): Move out to the extension framework somehow. # Follow https://review.opendev.org/#/c/169223 for a solution. if (qos_consts.QOS_POLICY_ID in attrs and original_port[qos_consts.QOS_POLICY_ID] != updated_port[qos_consts.QOS_POLICY_ID]): need_port_update_notify = True if addr_apidef.ADDRESS_PAIRS in attrs: need_port_update_notify |= ( self.update_address_pairs_on_port(context, id, port, original_port, updated_port)) need_port_update_notify |= self.update_security_group_on_port( context, id, port, original_port, updated_port) network = self.get_network(context, original_port['network_id']) need_port_update_notify |= self._update_extra_dhcp_opts_on_port( context, id, port, updated_port) levels = db.get_binding_level_objs(context, id, binding.host) # one of the operations above may have altered the model call # _make_port_dict again to ensure latest state is reflected so mech # drivers, callback handlers, and the API caller see latest state. # We expire here to reflect changed relationships on the obj. # Repeatable read will ensure we still get the state from this # transaction in spite of concurrent updates/deletes. context.session.expire(port_db) updated_port.update(self._make_port_dict(port_db)) mech_context = driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port) need_port_update_notify |= self._process_port_binding( mech_context, attrs) registry.publish( resources.PORT, events.PRECOMMIT_UPDATE, self, payload=events.DBEventPayload( context, request_body=attrs, states=(original_port,), resource_id=id, desired_state=updated_port)) # For DVR router interface ports we need to retrieve the # DVRPortbinding context instead of the normal port context. # The normal Portbinding context does not have the status # of the ports that are required by the l2pop to process the # postcommit events. # NOTE:Sometimes during the update_port call, the DVR router # interface port may not have the port binding, so we cannot # create a generic bindinglist that will address both the # DVR and non-DVR cases here. # TODO(Swami): This code need to be revisited. if port_db['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: dist_binding_list = db.get_distributed_port_bindings(context, id) for dist_binding in dist_binding_list: levels = db.get_binding_level_objs(context, id, dist_binding.host) dist_mech_context = driver_context.PortContext( self, context, updated_port, network, dist_binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit( dist_mech_context) bound_mech_contexts.append(dist_mech_context) else: self.mechanism_manager.update_port_precommit(mech_context) if any(updated_port[k] != original_port[k] for k in ('fixed_ips', 'mac_address')): # only add block if fixed_ips or mac_address changed self._setup_dhcp_agent_provisioning_component( context, updated_port) bound_mech_contexts.append(mech_context) # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': mac_address_updated, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) # Note that DVR Interface ports will have bindings on # multiple hosts, and so will have multiple mech_contexts, # while other ports typically have just one. # Since bound_mech_contexts has both the DVR and non-DVR # contexts we can manage just with a single for loop. try: for mech_context in bound_mech_contexts: self.mechanism_manager.update_port_postcommit( mech_context) except ml2_exc.MechanismDriverError: LOG.error("mechanism_manager.update_port_postcommit " "failed for port %s", id) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) if original_port['admin_state_up'] != updated_port['admin_state_up']: need_port_update_notify = True if original_port['status'] != updated_port['status']: need_port_update_notify = True # NOTE: In the case of DVR ports, the port-binding is done after # router scheduling when sync_routers is called and so this call # below may not be required for DVR routed interfaces. But still # since we don't have the mech_context for the DVR router interfaces # at certain times, we just pass the port-context and return it, so # that we don't disturb other methods that are expecting a return # value. bound_context = self._bind_port_if_needed( mech_context, allow_notify=True, need_notify=need_port_update_notify) return bound_context.current def _process_distributed_port_binding(self, mech_context, context, attrs): plugin_context = mech_context._plugin_context binding = mech_context._binding port = mech_context.current port_id = port['id'] if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: binding.vif_details = '' binding.vif_type = portbindings.VIF_TYPE_UNBOUND if binding.host: db.clear_binding_levels(plugin_context, port_id, binding.host) binding.host = '' self._update_port_dict_binding(port, binding) binding.host = attrs and attrs.get(portbindings.HOST_ID) binding.router_id = attrs and attrs.get('device_id') # merge into session to reflect changes binding.persist_state_to_session(plugin_context.session) def delete_distributed_port_bindings_by_router_id(self, context, router_id): for binding in (context.session.query(models.DistributedPortBinding). filter_by(router_id=router_id)): db.clear_binding_levels(context, binding.port_id, binding.host) context.session.delete(binding) @utils.transaction_guard @db_api.retry_if_session_inactive() def update_distributed_port_binding(self, context, id, port): attrs = port[port_def.RESOURCE_NAME] host = attrs and attrs.get(portbindings.HOST_ID) host_set = validators.is_attr_set(host) if not host_set: LOG.error("No Host supplied to bind DVR Port %s", id) return binding = db.get_distributed_port_binding_by_host(context, id, host) device_id = attrs and attrs.get('device_id') router_id = binding and binding.get('router_id') update_required = ( not binding or binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or router_id != device_id) if update_required: try: with db_api.CONTEXT_WRITER.using(context): orig_port = self.get_port(context, id) if not binding: binding = db.ensure_distributed_port_binding( context, id, host, router_id=device_id) network = self.get_network(context, orig_port['network_id']) levels = db.get_binding_level_objs(context, id, host) mech_context = driver_context.PortContext( self, context, orig_port, network, binding, levels, original_port=orig_port) self._process_distributed_port_binding( mech_context, context, attrs) except (os_db_exception.DBReferenceError, exc.PortNotFound): LOG.debug("DVR Port %s has been deleted concurrently", id) return self._bind_port_if_needed(mech_context) def _pre_delete_port(self, context, port_id, port_check): """Do some preliminary operations before deleting the port.""" LOG.debug("Deleting port %s", port_id) try: # notify interested parties of imminent port deletion; # a failure here prevents the operation from happening registry.publish( resources.PORT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, metadata={'port_check': port_check}, resource_id=port_id)) except exceptions.CallbackFailure as e: # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise exc.ServicePortInUse(port_id=port_id, reason=e) @utils.transaction_guard @db_api.retry_if_session_inactive() def delete_port(self, context, id, l3_port_check=True): self._pre_delete_port(context, id, l3_port_check) # TODO(armax): get rid of the l3 dependency in the with block router_ids = [] l3plugin = directory.get_plugin(plugin_constants.L3) with db_api.CONTEXT_WRITER.using(context): try: port_db = self._get_port(context, id) binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE, raise_if_not_found=True, port_id=id) except exc.PortNotFound: LOG.debug("The port '%s' was deleted", id) return port = self._make_port_dict(port_db) network = self.get_network(context, port['network_id']) bound_mech_contexts = [] kwargs = { 'context': context, 'id': id, 'network': network, 'port': port, 'port_db': port_db, 'bindings': binding, } device_owner = port['device_owner'] if device_owner == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_distributed_port_bindings(context, id) for bind in bindings: levels = db.get_binding_level_objs(context, id, bind.host) kwargs['bind'] = bind kwargs['levels'] = levels registry.notify(resources.PORT, events.PRECOMMIT_DELETE, self, **kwargs) mech_context = driver_context.PortContext( self, context, port, network, bind, levels) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) else: levels = db.get_binding_level_objs(context, id, binding.host) kwargs['bind'] = None kwargs['levels'] = levels registry.notify(resources.PORT, events.PRECOMMIT_DELETE, self, **kwargs) mech_context = driver_context.PortContext( self, context, port, network, binding, levels) self.mechanism_manager.delete_port_precommit(mech_context) bound_mech_contexts.append(mech_context) if l3plugin: router_ids = l3plugin.disassociate_floatingips( context, id, do_notify=False) LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s", {"port_id": id, "owner": device_owner}) super(Ml2Plugin, self).delete_port(context, id) self._post_delete_port( context, port, router_ids, bound_mech_contexts) def _post_delete_port(self, context, port, router_ids, bound_mech_contexts): kwargs = { 'context': context, 'port': port, 'router_ids': router_ids, } registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs) try: # Note that DVR Interface ports will have bindings on # multiple hosts, and so will have multiple mech_contexts, # while other ports typically have just one. for mech_context in bound_mech_contexts: self.mechanism_manager.delete_port_postcommit(mech_context) except ml2_exc.MechanismDriverError: # TODO(apech) - One or more mechanism driver failed to # delete the port. Ideally we'd notify the caller of the # fact that an error occurred. LOG.error("mechanism_manager.delete_port_postcommit failed for" " port %s", port['id']) self.notifier.port_delete(context, port['id']) @utils.transaction_guard @db_api.retry_if_session_inactive(context_var_name='plugin_context') def get_bound_port_context(self, plugin_context, port_id, host=None, cached_networks=None): with db_api.CONTEXT_READER.using(plugin_context) as session: try: port_db = (session.query(models_v2.Port). enable_eagerloads(False). filter(models_v2.Port.id.startswith(port_id)). one()) except sa_exc.NoResultFound: LOG.info("No ports have port_id starting with %s", port_id) return except sa_exc.MultipleResultsFound: LOG.error("Multiple ports have port_id starting with %s", port_id) return port = self._make_port_dict(port_db) network = (cached_networks or {}).get(port['network_id']) if not network: network = self.get_network(plugin_context, port['network_id']) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_distributed_port_binding_by_host( plugin_context, port['id'], host) if not binding: LOG.error("Binding info for DVR port %s not found", port_id) return None levels = db.get_binding_level_objs( plugin_context, port_db.id, host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) else: # since eager loads are disabled in port_db query # related attribute port_binding could disappear in # concurrent port deletion. # It's not an error condition. binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) if not binding: LOG.info("Binding info for port %s was not found, " "it might have been deleted already.", port_id) return levels = db.get_binding_level_objs( plugin_context, port_db.id, binding.host) port_context = driver_context.PortContext( self, plugin_context, port, network, binding, levels) return self._bind_port_if_needed(port_context) @utils.transaction_guard @db_api.retry_if_session_inactive(context_var_name='plugin_context') def get_bound_ports_contexts(self, plugin_context, dev_ids, host=None): result = {} with db_api.CONTEXT_READER.using(plugin_context): dev_to_full_pids = db.partial_port_ids_to_full_ids( plugin_context, dev_ids) # get all port objects for IDs port_dbs_by_id = db.get_port_db_objects( plugin_context, dev_to_full_pids.values()) # get all networks for PortContext construction netctxs_by_netid = self.get_network_contexts( plugin_context, {p.network_id for p in port_dbs_by_id.values()}) for dev_id in dev_ids: port_id = dev_to_full_pids.get(dev_id) port_db = port_dbs_by_id.get(port_id) if (not port_id or not port_db or port_db.network_id not in netctxs_by_netid): result[dev_id] = None continue port = self._make_port_dict(port_db) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_distributed_port_binding_by_host( plugin_context, port['id'], host) bindlevelhost_match = host else: binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) bindlevelhost_match = binding.host if binding else None if not binding: LOG.info("Binding info for port %s was not found, " "it might have been deleted already.", port_id) result[dev_id] = None continue levels = [l for l in port_db.binding_levels if l.host == bindlevelhost_match] levels = sorted(levels, key=lambda l: l.level) network_ctx = netctxs_by_netid.get(port_db.network_id) port_context = driver_context.PortContext( self, plugin_context, port, network_ctx, binding, levels) result[dev_id] = port_context return {d: self._bind_port_if_needed(pctx) if pctx else None for d, pctx in result.items()} def update_port_status(self, context, port_id, status, host=None, network=None): """Update port status Returns port_id (non-truncated uuid) if the port exists. Otherwise returns None. 'network' is deprecated and has no effect """ full = db.partial_port_ids_to_full_ids(context, [port_id]) if port_id not in full: return None port_id = full[port_id] return self.update_port_statuses( context, {port_id: status}, host)[port_id] @utils.transaction_guard @db_api.retry_if_session_inactive() def update_port_statuses(self, context, port_id_to_status, host=None): result = {} port_ids = port_id_to_status.keys() port_dbs_by_id = db.get_port_db_objects(context, port_ids) for port_id, status in port_id_to_status.items(): if not port_dbs_by_id.get(port_id): LOG.debug("Port %(port)s update to %(val)s by agent not found", {'port': port_id, 'val': status}) result[port_id] = None continue result[port_id] = self._safe_update_individual_port_db_status( context, port_dbs_by_id[port_id], status, host) return result def _safe_update_individual_port_db_status(self, context, port, status, host): port_id = port.id try: return self._update_individual_port_db_status( context, port, status, host) except Exception: with excutils.save_and_reraise_exception() as ectx: # don't reraise if port doesn't exist anymore ectx.reraise = bool(db.get_port(context, port_id)) def _update_individual_port_db_status(self, context, port, status, host): updated = False network = None port_id = port.id if ((port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE) or port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): attr = { 'id': port.id, portbindings.HOST_ID: host, 'status': status } registry.notify(resources.PORT, events.BEFORE_UPDATE, self, original_port=port, context=context, port=attr) with db_api.CONTEXT_WRITER.using(context): context.session.add(port) # bring port into writer session if (port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): original_port = self._make_port_dict(port) port.status = status # explicit flush before _make_port_dict to ensure extensions # listening for db events can modify the port if necessary context.session.flush() updated_port = self._make_port_dict(port) binding = p_utils.get_port_binding_by_status_and_host( port.port_bindings, const.ACTIVE, raise_if_not_found=True, port_id=port_id) levels = db.get_binding_level_objs( context, port.id, binding.host) mech_context = driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit(mech_context) updated = True elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: binding = db.get_distributed_port_binding_by_host( context, port['id'], host) if not binding: return if binding.status != status: binding.status = status updated = True if (updated and port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): with db_api.CONTEXT_WRITER.using(context): port = db.get_port(context, port_id) if not port: LOG.warning("Port %s not found during update", port_id) return original_port = self._make_port_dict(port) network = network or self.get_network( context, original_port['network_id']) port.status = db.generate_distributed_port_status(context, port['id']) updated_port = self._make_port_dict(port) levels = db.get_binding_level_objs(context, port_id, host) mech_context = (driver_context.PortContext( self, context, updated_port, network, binding, levels, original_port=original_port)) self.mechanism_manager.update_port_precommit(mech_context) if updated: self.mechanism_manager.update_port_postcommit(mech_context) kwargs = {'context': context, 'port': mech_context.current, 'original_port': original_port} registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: db.delete_distributed_port_binding_if_stale(context, binding) return port['id'] @db_api.retry_if_session_inactive() def port_bound_to_host(self, context, port_id, host): if not host: return port = db.get_port(context, port_id) if not port: LOG.debug("No Port match for: %s", port_id) return if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: bindings = db.get_distributed_port_bindings(context, port_id) for b in bindings: if b.host == host: return port LOG.debug("No binding found for DVR port %s", port['id']) return else: port_host = db.get_port_binding_host(context, port_id) return port if (port_host == host) else None @db_api.retry_if_session_inactive() def get_ports_from_devices(self, context, devices): port_ids_to_devices = dict( (self._device_to_port_id(context, device), device) for device in devices) port_ids = list(port_ids_to_devices.keys()) ports = db.get_ports_and_sgs(context, port_ids) for port in ports: # map back to original requested id port_id = next((port_id for port_id in port_ids if port['id'].startswith(port_id)), None) port['device'] = port_ids_to_devices.get(port_id) return ports @staticmethod def _device_to_port_id(context, device): # REVISIT(rkukura): Consider calling into MechanismDrivers to # process device names, or having MechanismDrivers supply list # of device prefixes to strip. for prefix in const.INTERFACE_PREFIXES: if device.startswith(prefix): return device[len(prefix):] # REVISIT(irenab): Consider calling into bound MD to # handle the get_device_details RPC if not uuidutils.is_uuid_like(device): port = db.get_port_from_device_mac(context, device) if port: return port.id return device def _get_ports_query(self, context, filters=None, *args, **kwargs): filters = filters or {} security_groups = filters.pop("security_groups", None) limit = kwargs.pop('limit', None) if security_groups: port_bindings = self._get_port_security_group_bindings( context, filters={'security_group_id': security_groups}) if 'id' in filters: filters['id'] = [entry['port_id'] for entry in port_bindings if entry['port_id'] in filters['id']] else: filters['id'] = [entry['port_id'] for entry in port_bindings] fixed_ips = filters.get('fixed_ips', {}) ip_addresses_s = fixed_ips.get('ip_address_substr') query = super(Ml2Plugin, self)._get_ports_query(context, filters, *args, **kwargs) if ip_addresses_s: substr_filter = or_(*[models_v2.Port.fixed_ips.any( models_v2.IPAllocation.ip_address.like('%%%s%%' % ip)) for ip in ip_addresses_s]) query = query.filter(substr_filter) if limit: query = query.limit(limit) return query def filter_hosts_with_network_access( self, context, network_id, candidate_hosts): segments = segments_db.get_network_segments(context, network_id) return self.mechanism_manager.filter_hosts_with_segment_access( context, segments, candidate_hosts, self.get_agents) def check_segment_for_agent(self, segment, agent): for mech_driver in self.mechanism_manager.ordered_mech_drivers: driver_agent_type = getattr(mech_driver.obj, 'agent_type', None) if driver_agent_type and driver_agent_type == agent['agent_type']: if mech_driver.obj.check_segment_for_agent(segment, agent): return True return False @registry.receives(resources.SEGMENT, [events.AFTER_DELETE]) def _handle_after_delete_segment_change( self, rtype, event, trigger, payload=None): # TODO(boden); refactor into _handle_segment_change once all # event types use payloads return self._handle_segment_change( rtype, event, trigger, payload.context, payload.latest_state) @registry.receives(resources.SEGMENT, (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE, events.AFTER_CREATE)) def _handle_segment_change(self, rtype, event, trigger, context, segment): if (event == events.PRECOMMIT_CREATE and not isinstance(trigger, segments_plugin.Plugin)): # TODO(xiaohhui): Now, when create network, ml2 will reserve # segment and trigger this event handler. This event handler # will reserve segment again, which will lead to error as the # segment has already been reserved. This check could be removed # by unifying segment creation procedure. return network_id = segment.get('network_id') if event == events.PRECOMMIT_CREATE: updated_segment = self.type_manager.reserve_network_segment( context, segment) # The segmentation id might be from ML2 type driver, update it # in the original segment. segment[api.SEGMENTATION_ID] = updated_segment[api.SEGMENTATION_ID] elif event == events.PRECOMMIT_DELETE: self.type_manager.release_network_segment(context, segment) # change in segments could affect resulting network mtu, so let's # recalculate it network_db = self._get_network(context, network_id) network_db.mtu = self._get_network_mtu( network_db, validate=(event != events.PRECOMMIT_DELETE)) network_db.save(session=context.session) try: self._notify_mechanism_driver_for_segment_change( event, context, network_id) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): LOG.error("mechanism_manager error occurred when " "handle event %(event)s for segment " "'%(segment)s'", {'event': event, 'segment': segment['id']}) def _notify_mechanism_driver_for_segment_change(self, event, context, network_id): network_with_segments = self.get_network(context, network_id) mech_context = driver_context.NetworkContext( self, context, network_with_segments, original_network=network_with_segments) if event in [events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE]: self.mechanism_manager.update_network_precommit(mech_context) elif event in [events.AFTER_CREATE, events.AFTER_DELETE]: self.mechanism_manager.update_network_postcommit(mech_context) @staticmethod def _validate_compute_port(port): if not port['device_owner'].startswith( const.DEVICE_OWNER_COMPUTE_PREFIX): msg = _('Invalid port %s. Operation only valid on compute ' 'ports') % port['id'] raise exc.BadRequest(resource='port', msg=msg) def _make_port_binding_dict(self, binding, fields=None): res = {key: binding[key] for key in ( pbe_ext.HOST, pbe_ext.VIF_TYPE, pbe_ext.VNIC_TYPE, pbe_ext.STATUS)} if isinstance(binding, ports_obj.PortBinding): res[pbe_ext.PROFILE] = binding.profile or {} res[pbe_ext.VIF_DETAILS] = binding.vif_details or {} else: res[pbe_ext.PROFILE] = self._get_profile(binding) res[pbe_ext.VIF_DETAILS] = self._get_vif_details(binding) return db_utils.resource_fields(res, fields) def _get_port_binding_attrs(self, binding, host=None): return {portbindings.VNIC_TYPE: binding.get(pbe_ext.VNIC_TYPE), portbindings.HOST_ID: binding.get(pbe_ext.HOST) or host, portbindings.PROFILE: binding.get(pbe_ext.PROFILE, {})} def _process_active_binding_change(self, changes, mech_context, port_dict, original_host): if changes: self._clear_port_binding(mech_context, mech_context._binding, port_dict, original_host) port_dict['status'] = const.PORT_STATUS_DOWN super(Ml2Plugin, self).update_port( mech_context._plugin_context, port_dict['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) self._update_port_dict_binding(port_dict, mech_context._binding) mech_context._binding.persist_state_to_session( mech_context._plugin_context.session) @utils.transaction_guard @db_api.retry_if_session_inactive() def create_port_binding(self, context, port_id, binding): attrs = binding[pbe_ext.RESOURCE_NAME] with db_api.CONTEXT_WRITER.using(context): port_db = self._get_port(context, port_id) self._validate_compute_port(port_db) if self._get_binding_for_host(port_db.port_bindings, attrs[pbe_ext.HOST]): raise exc.PortBindingAlreadyExists( port_id=port_id, host=attrs[pbe_ext.HOST]) status = const.ACTIVE is_active_binding = True active_binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) if active_binding: status = const.INACTIVE is_active_binding = False network = self.get_network(context, port_db['network_id']) port_dict = self._make_port_dict(port_db) new_binding = models.PortBinding( port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND, status=status) mech_context = driver_context.PortContext(self, context, port_dict, network, new_binding, None) changes, original_host = self._process_port_binding_attributes( mech_context._binding, self._get_port_binding_attrs(attrs)) if is_active_binding: self._process_active_binding_change(changes, mech_context, port_dict, original_host) bind_context = self._bind_port_if_needed( mech_context, allow_commit=is_active_binding) if (bind_context._binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED): raise exc.PortBindingError(port_id=port_id, host=attrs[pbe_ext.HOST]) bind_context._binding.port_id = port_id bind_context._binding.status = status if not is_active_binding: with db_api.CONTEXT_WRITER.using(context): bind_context._binding.persist_state_to_session(context.session) db.set_binding_levels(context, bind_context._binding_levels) return self._make_port_binding_dict(bind_context._binding) @utils.transaction_guard @db_api.retry_if_session_inactive() def get_port_bindings(self, context, port_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): port = ports_obj.Port.get_object(context, id=port_id) if not port: raise exc.PortNotFound(port_id=port_id) self._validate_compute_port(port) filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) bindings = ports_obj.PortBinding.get_objects( context, _pager=pager, port_id=port_id, **filters) return [self._make_port_binding_dict(binding, fields) for binding in bindings] @utils.transaction_guard @db_api.retry_if_session_inactive() def get_port_binding(self, context, host, port_id, fields=None): port = ports_obj.Port.get_object(context, id=port_id) if not port: raise exc.PortNotFound(port_id=port_id) self._validate_compute_port(port) binding = ports_obj.PortBinding.get_object(context, host=host, port_id=port_id) if not binding: raise exc.PortBindingNotFound(port_id=port_id, host=host) return self._make_port_binding_dict(binding, fields) def _get_binding_for_host(self, bindings, host): for binding in bindings: if binding.host == host: return binding @utils.transaction_guard @db_api.retry_if_session_inactive() def update_port_binding(self, context, host, port_id, binding): attrs = binding[pbe_ext.RESOURCE_NAME] with db_api.CONTEXT_WRITER.using(context): port_db = self._get_port(context, port_id) self._validate_compute_port(port_db) original_binding = self._get_binding_for_host( port_db.port_bindings, host) if not original_binding: raise exc.PortBindingNotFound(port_id=port_id, host=host) is_active_binding = (original_binding.status == const.ACTIVE) network = self.get_network(context, port_db['network_id']) port_dict = self._make_port_dict(port_db) mech_context = driver_context.PortContext(self, context, port_dict, network, original_binding, None) changes, original_host = self._process_port_binding_attributes( mech_context._binding, self._get_port_binding_attrs(attrs, host=host)) if is_active_binding: self._process_active_binding_change(changes, mech_context, port_dict, original_host) bind_context = self._bind_port_if_needed( mech_context, allow_commit=is_active_binding) if (bind_context._binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED): raise exc.PortBindingError(port_id=port_id, host=host) if not is_active_binding: with db_api.CONTEXT_WRITER.using(context): bind_context._binding.persist_state_to_session(context.session) db.set_binding_levels(context, bind_context._binding_levels) return self._make_port_binding_dict(bind_context._binding) @utils.transaction_guard @db_api.retry_if_session_inactive() def activate(self, context, host, port_id): with db_api.CONTEXT_WRITER.using(context): # TODO(mlavalle) Next two lines can be removed when bug #1770267 is # fixed if isinstance(port_id, dict): port_id = port_id['port_id'] port_db = self._get_port(context, port_id) self._validate_compute_port(port_db) active_binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.ACTIVE) if host == (active_binding and active_binding.host): raise exc.PortBindingAlreadyActive(port_id=port_id, host=host) inactive_binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, const.INACTIVE, host=host) if not inactive_binding or inactive_binding.host != host: raise exc.PortBindingNotFound(port_id=port_id, host=host) network = self.get_network(context, port_db['network_id']) port_dict = self._make_port_dict(port_db) levels = db.get_binding_level_objs(context, port_id, active_binding.host) original_context = driver_context.PortContext(self, context, port_dict, network, active_binding, levels) self._clear_port_binding(original_context, active_binding, port_dict, active_binding.host) port_dict['status'] = const.PORT_STATUS_DOWN super(Ml2Plugin, self).update_port( context, port_dict['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) levels = db.get_binding_level_objs(context, port_id, inactive_binding.host) bind_context = driver_context.PortContext(self, context, port_dict, network, inactive_binding, levels) for count in range(MAX_BIND_TRIES): cur_context, _, try_again = self._commit_port_binding( original_context, bind_context, need_notify=True, update_binding_levels=False) if not try_again: self.notifier.binding_deactivate(context, port_id, active_binding.host, network['id']) self.notifier.binding_activate(context, port_id, inactive_binding.host) return self._make_port_binding_dict(cur_context._binding) raise exc.PortBindingError(port_id=port_id, host=host) @utils.transaction_guard @db_api.retry_if_session_inactive() def delete_port_binding(self, context, host, port_id): ports_obj.PortBinding.delete_objects(context, host=host, port_id=port_id) db.clear_binding_levels(context, port_id=port_id, host=host) @db_api.retry_if_session_inactive() def get_ports_by_vnic_type_and_host(self, context, **kwargs): host = kwargs['host'] vnic_type = kwargs['vnic_type'] ports = ports_obj.Port.get_ports_by_vnic_type_and_host( context, vnic_type, host) return [self._make_port_dict(port.db_obj) for port in ports] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/plugins/ml2/rpc.py0000644000175000017500000005463500000000000022163 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import topics from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import uplink_status_propagation as usp from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log import oslo_messaging from osprofiler import profiler from sqlalchemy.orm import exc from neutron.api.rpc.handlers import dvr_rpc from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.db import l3_hamode_db from neutron.db import provisioning_blocks from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers import type_tunnel # REVISIT(kmestery): Allow the type and mechanism drivers to supply the # mixins and eventually remove the direct dependencies on type_tunnel. LOG = log.getLogger(__name__) class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): # history # 1.0 Initial version (from openvswitch/linuxbridge) # 1.1 Support Security Group RPC # 1.2 Support get_devices_details_list # 1.3 get_device_details rpc signature upgrade to obtain 'host' and # return value to include fixed_ips and device_owner for # the device port # 1.4 tunnel_sync rpc signature upgrade to obtain 'host' # 1.5 Support update_device_list and # get_devices_details_list_and_failed_devices # 1.6 Support get_network_details # 1.7 Support get_ports_by_vnic_type_and_host # 1.8 Rename agent_restarted to refresh_tunnels in # update_device_list to reflect its expanded purpose target = oslo_messaging.Target(version='1.8') def __init__(self, notifier, type_manager): self.setup_tunnel_callback_mixin(notifier, type_manager) super(RpcCallbacks, self).__init__() def _get_new_status(self, host, port_context): port = port_context.current if not host or host == port_context.host: new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up'] else n_const.PORT_STATUS_DOWN) if port['status'] != new_status: return new_status @staticmethod def _get_request_details(kwargs): return (kwargs.get('agent_id'), kwargs.get('host'), kwargs.get('device') or kwargs.get('network')) def get_device_details(self, rpc_context, **kwargs): """Agent requests device details.""" agent_id, host, device = self._get_request_details(kwargs) # cached networks used for reducing number of network db calls # for server internal usage only cached_networks = kwargs.get('cached_networks') LOG.debug("Device %(device)s details requested by agent " "%(agent_id)s with host %(host)s", {'device': device, 'agent_id': agent_id, 'host': host}) plugin = directory.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) port_context = plugin.get_bound_port_context(rpc_context, port_id, host, cached_networks) if not port_context: LOG.debug("Device %(device)s requested by agent " "%(agent_id)s not found in database", {'device': device, 'agent_id': agent_id}) return {'device': device} port = port_context.current # caching information about networks for future use if cached_networks is not None: if port['network_id'] not in cached_networks: cached_networks[port['network_id']] = ( port_context.network.current) result = self._get_device_details(rpc_context, agent_id=agent_id, host=host, device=device, port_context=port_context) if 'network_id' in result: # success so we update status new_status = self._get_new_status(host, port_context) if new_status: plugin.update_port_status(rpc_context, port_id, new_status, host, port_context.network.current) return result def _get_device_details(self, rpc_context, agent_id, host, device, port_context): segment = port_context.bottom_bound_segment port = port_context.current if not segment: LOG.warning("Device %(device)s requested by agent " "%(agent_id)s on network %(network_id)s not " "bound, vif_type: %(vif_type)s", {'device': device, 'agent_id': agent_id, 'network_id': port['network_id'], 'vif_type': port_context.vif_type}) return {'device': device} if (port['device_owner'].startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX) and port[portbindings.HOST_ID] != host): LOG.debug("Device %(device)s has no active binding in host " "%(host)s", {'device': device, 'host': host}) return {'device': device, n_const.NO_ACTIVE_BINDING: True} network_qos_policy_id = port_context.network._network.get( qos_consts.QOS_POLICY_ID) entry = {'device': device, 'network_id': port['network_id'], 'port_id': port['id'], 'mac_address': port['mac_address'], 'admin_state_up': port['admin_state_up'], 'network_type': segment[api.NETWORK_TYPE], 'segmentation_id': segment[api.SEGMENTATION_ID], 'physical_network': segment[api.PHYSICAL_NETWORK], 'mtu': port_context.network._network.get('mtu'), 'fixed_ips': port['fixed_ips'], 'device_owner': port['device_owner'], 'allowed_address_pairs': port['allowed_address_pairs'], 'port_security_enabled': port.get(psec.PORTSECURITY, True), 'qos_policy_id': port.get(qos_consts.QOS_POLICY_ID), 'network_qos_policy_id': network_qos_policy_id, 'profile': port[portbindings.PROFILE], 'propagate_uplink_status': port.get( usp.PROPAGATE_UPLINK_STATUS, False)} LOG.debug("Returning: %s", entry) return entry def get_devices_details_list(self, rpc_context, **kwargs): # cached networks used for reducing number of network db calls cached_networks = {} return [ self.get_device_details( rpc_context, device=device, cached_networks=cached_networks, **kwargs ) for device in kwargs.pop('devices', []) ] def get_devices_details_list_and_failed_devices(self, rpc_context, **kwargs): devices = [] failed_devices = [] devices_to_fetch = kwargs.pop('devices', []) plugin = directory.get_plugin() host = kwargs.get('host') bound_contexts = plugin.get_bound_ports_contexts(rpc_context, devices_to_fetch, host) for device in devices_to_fetch: if not bound_contexts.get(device): # unbound bound LOG.debug("Device %(device)s requested by agent " "%(agent_id)s not found in database", {'device': device, 'agent_id': kwargs.get('agent_id')}) devices.append({'device': device}) continue try: devices.append(self._get_device_details( rpc_context, agent_id=kwargs.get('agent_id'), host=host, device=device, port_context=bound_contexts[device])) except Exception: LOG.exception("Failed to get details for device %s", device) failed_devices.append(device) new_status_map = {ctxt.current['id']: self._get_new_status(host, ctxt) for ctxt in bound_contexts.values() if ctxt} # filter out any without status changes new_status_map = {p: s for p, s in new_status_map.items() if s} try: plugin.update_port_statuses(rpc_context, new_status_map, host) except Exception: LOG.exception("Failure updating statuses, retrying all") failed_devices = devices_to_fetch devices = [] return {'devices': devices, 'failed_devices': failed_devices} def get_network_details(self, rpc_context, **kwargs): """Agent requests network details.""" agent_id, host, network = self._get_request_details(kwargs) LOG.debug("Network %(network)s details requested by agent " "%(agent_id)s with host %(host)s", {'network': network, 'agent_id': agent_id, 'host': host}) plugin = directory.get_plugin() return plugin.get_network(rpc_context, network) @profiler.trace("rpc") def update_device_down(self, rpc_context, **kwargs): """Device no longer exists on agent.""" # TODO(garyk) - live migration and port status agent_id, host, device = self._get_request_details(kwargs) LOG.debug("Device %(device)s no longer exists at agent " "%(agent_id)s", {'device': device, 'agent_id': agent_id}) plugin = directory.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) port_exists = True if (host and not plugin.port_bound_to_host(rpc_context, port_id, host)): LOG.debug("Device %(device)s not bound to the" " agent host %(host)s", {'device': device, 'host': host}) else: try: port_exists = bool(plugin.update_port_status( rpc_context, port_id, n_const.PORT_STATUS_DOWN, host)) except exc.StaleDataError: port_exists = False LOG.debug("delete_port and update_device_down are being " "executed concurrently. Ignoring StaleDataError.") return {'device': device, 'exists': port_exists} self.notify_l2pop_port_wiring(port_id, rpc_context, n_const.PORT_STATUS_DOWN, host) return {'device': device, 'exists': port_exists} @profiler.trace("rpc") def update_device_up(self, rpc_context, **kwargs): """Device is up on agent.""" refresh_tunnels = kwargs.pop('refresh_tunnels', False) if not refresh_tunnels: # For backward compatibility with older agents refresh_tunnels = kwargs.pop('agent_restarted', False) agent_id, host, device = self._get_request_details(kwargs) LOG.debug("Device %(device)s up at agent %(agent_id)s", {'device': device, 'agent_id': agent_id}) plugin = directory.get_plugin() port_id = plugin._device_to_port_id(rpc_context, device) port = plugin.port_bound_to_host(rpc_context, port_id, host) if host and not port: LOG.debug("Device %(device)s not bound to the" " agent host %(host)s", {'device': device, 'host': host}) # this might mean that a VM is in the process of live migration # and vif was plugged on the destination compute node; # need to notify nova explicitly port = ml2_db.get_port(rpc_context, port_id) # _device_to_port_id may have returned a truncated UUID if the # agent did not provide a full one (e.g. Linux Bridge case). if not port: LOG.debug("Port %s not found, will not notify nova.", port_id) return else: if port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX): # NOTE(haleyb): It is possible for a test to override a # config option after the plugin has been initialized so # the nova_notifier attribute is not set on the plugin. if (cfg.CONF.notify_nova_on_port_status_changes and hasattr(plugin, 'nova_notifier')): plugin.nova_notifier.notify_port_active_direct(port) return else: self.update_port_status_to_active(port, rpc_context, port_id, host) self.notify_l2pop_port_wiring(port_id, rpc_context, n_const.PORT_STATUS_ACTIVE, host, refresh_tunnels) def update_port_status_to_active(self, port, rpc_context, port_id, host): plugin = directory.get_plugin() if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE: # NOTE(kevinbenton): we have to special case DVR ports because of # the special multi-binding status update logic they have that # depends on the host plugin.update_port_status(rpc_context, port_id, n_const.PORT_STATUS_ACTIVE, host) else: # _device_to_port_id may have returned a truncated UUID if the # agent did not provide a full one (e.g. Linux Bridge case). We # need to look up the full one before calling provisioning_complete if not port: port = ml2_db.get_port(rpc_context, port_id) if not port: # port doesn't exist, no need to add a provisioning block return provisioning_blocks.provisioning_complete( rpc_context, port['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) def notify_l2pop_port_wiring(self, port_id, rpc_context, status, host, refresh_tunnels=False): """Notify the L2pop driver that a port has been wired/unwired. The L2pop driver uses this notification to broadcast forwarding entries to other agents on the same network as the port for port_id. """ plugin = directory.get_plugin() l2pop_driver = plugin.mechanism_manager.mech_drivers.get( 'l2population') if not l2pop_driver: return port = ml2_db.get_port(rpc_context, port_id) if not port: return port_context = plugin.get_bound_port_context( rpc_context, port_id, host) if not port_context: # port deleted return # NOTE: DVR ports are already handled and updated through l2pop # and so we don't need to update it again here. But, l2pop did not # handle DVR ports while restart neutron-*-agent, we need to handle # it here. if (port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE and not refresh_tunnels): return port = port_context.current if (port['device_owner'] != n_const.DEVICE_OWNER_DVR_INTERFACE and status == n_const.PORT_STATUS_ACTIVE and port[portbindings.HOST_ID] != host and not l3_hamode_db.is_ha_router_port(rpc_context, port['device_owner'], port['device_id'])): # don't setup ACTIVE forwarding entries unless bound to this # host or if it's an HA or DVR port (which is special-cased in # the mech driver) return port_context.current['status'] = status port_context.current[portbindings.HOST_ID] = host if status == n_const.PORT_STATUS_ACTIVE: l2pop_driver.obj.update_port_up(port_context, refresh_tunnels) else: l2pop_driver.obj.update_port_down(port_context) @profiler.trace("rpc") def update_device_list(self, rpc_context, **kwargs): devices_up = [] failed_devices_up = [] devices_down = [] failed_devices_down = [] devices = kwargs.get('devices_up') if devices: for device in devices: try: self.update_device_up( rpc_context, device=device, **kwargs) except Exception: failed_devices_up.append(device) LOG.error("Failed to update device %s up", device) else: devices_up.append(device) devices = kwargs.get('devices_down') if devices: for device in devices: try: dev = self.update_device_down( rpc_context, device=device, **kwargs) except Exception: failed_devices_down.append(device) LOG.error("Failed to update device %s down", device) else: devices_down.append(dev) return {'devices_up': devices_up, 'failed_devices_up': failed_devices_up, 'devices_down': devices_down, 'failed_devices_down': failed_devices_down} def get_ports_by_vnic_type_and_host(self, rpc_context, vnic_type, host): plugin = directory.get_plugin() return plugin.get_ports_by_vnic_type_and_host( rpc_context, vnic_type=vnic_type, host=host) class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin, sg_rpc.SecurityGroupAgentRpcApiMixin, type_tunnel.TunnelAgentRpcApiMixin): """Agent side of the openvswitch rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_active_networks_info, create_dhcp_port, update_dhcp_port, and removed get_dhcp_port methods. 1.4 - Added network_update 1.5 - Added binding_activate and binding_deactivate """ def __init__(self, topic): self.topic = topic self.topic_network_delete = topics.get_topic_name(topic, topics.NETWORK, topics.DELETE) self.topic_port_update = topics.get_topic_name(topic, topics.PORT, topics.UPDATE) self.topic_port_delete = topics.get_topic_name(topic, topics.PORT, topics.DELETE) self.topic_network_update = topics.get_topic_name(topic, topics.NETWORK, topics.UPDATE) self.topic_port_binding_deactivate = topics.get_topic_name( topic, topics.PORT_BINDING, topics.DEACTIVATE) self.topic_port_binding_activate = topics.get_topic_name( topic, topics.PORT_BINDING, topics.ACTIVATE) target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def network_delete(self, context, network_id): cctxt = self.client.prepare(topic=self.topic_network_delete, fanout=True) cctxt.cast(context, 'network_delete', network_id=network_id) def port_update(self, context, port, network_type, segmentation_id, physical_network): cctxt = self.client.prepare(topic=self.topic_port_update, fanout=True) cctxt.cast(context, 'port_update', port=port, network_type=network_type, segmentation_id=segmentation_id, physical_network=physical_network) def port_delete(self, context, port_id): cctxt = self.client.prepare(topic=self.topic_port_delete, fanout=True) cctxt.cast(context, 'port_delete', port_id=port_id) def network_update(self, context, network): cctxt = self.client.prepare(topic=self.topic_network_update, fanout=True, version='1.4') cctxt.cast(context, 'network_update', network=network) def binding_deactivate(self, context, port_id, host, network_id): cctxt = self.client.prepare(topic=self.topic_port_binding_deactivate, fanout=True, version='1.5') cctxt.cast(context, 'binding_deactivate', port_id=port_id, host=host, network_id=network_id) def binding_activate(self, context, port_id, host): cctxt = self.client.prepare(topic=self.topic_port_binding_activate, fanout=True, version='1.5') cctxt.cast(context, 'binding_activate', port_id=port_id, host=host) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/policy.py0000644000175000017500000005134300000000000020514 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import re import sys from neutron_lib.api import attributes from neutron_lib.api.definitions import network as net_apidef from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib.services import constants as service_const from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_policy import policy from oslo_utils import excutils import six import stevedore from neutron._i18n import _ from neutron.common import cache_utils as cache LOG = logging.getLogger(__name__) _ENFORCER = None ADMIN_CTX_POLICY = 'context_is_admin' ADVSVC_CTX_POLICY = 'context_is_advsvc' # Identify the attribute used by a resource to reference another resource _RESOURCE_FOREIGN_KEYS = { net_apidef.COLLECTION_NAME: 'network_id', # TODO(slaweq): use SECURITYGROUPS constant from api def when # securitygroups api def will be moved to neutron-lib 'security_groups': 'security_group_id' } def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def register_rules(enforcer): extmgr = stevedore.extension.ExtensionManager('neutron.policies', invoke_on_load=True) policies = [list(e.obj) for e in extmgr.extensions] LOG.debug('Loaded default policies from %s ' 'under neutron.policies entry points', [e.name for e in extmgr.extensions]) enforcer.register_defaults(itertools.chain(*policies)) def init(conf=cfg.CONF, policy_file=None): """Init an instance of the Enforcer class.""" global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(conf, policy_file=policy_file) register_rules(_ENFORCER) _ENFORCER.load_rules(True) def refresh(policy_file=None): """Reset policy and init a new instance of Enforcer.""" reset() init(policy_file=policy_file) def get_resource_and_action(action, pluralized=None): """Return resource and enforce_attr_based_check(boolean) per resource and action extracted from api operation. """ data = action.split(':', 1)[0].split('_', 1) resource = pluralized or ("%ss" % data[-1]) enforce_attr_based_check = data[0] not in ('get', 'delete') return (resource, enforce_attr_based_check) def set_rules(policies, overwrite=True): """Set rules based on the provided dict of rules. :param policies: New policies to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. """ LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path) init() _ENFORCER.set_rules(policies, overwrite) def _is_attribute_explicitly_set(attribute_name, resource, target, action): """Verify that an attribute is present and is explicitly set.""" if target.get(constants.ATTRIBUTES_TO_UPDATE): # In the case of update, the function should not pay attention to a # default value of an attribute, but check whether it was explicitly # marked as being updated instead. return (attribute_name in target[constants.ATTRIBUTES_TO_UPDATE] and target[attribute_name] is not constants.ATTR_NOT_SPECIFIED) result = (attribute_name in target and target[attribute_name] is not constants.ATTR_NOT_SPECIFIED) if result and 'default' in resource[attribute_name]: return target[attribute_name] != resource[attribute_name]['default'] return result def _should_validate_sub_attributes(attribute, sub_attr): """Verify that sub-attributes are iterable and should be validated.""" validate = attribute.get('validate') return (validate and isinstance(sub_attr, collections.Iterable) and any([k.startswith('type:dict') and v for (k, v) in validate.items()])) def _build_subattr_match_rule(attr_name, attr, action, target): """Create the rule to match for sub-attribute policy checks.""" # TODO(salv-orlando): Instead of relying on validator info, introduce # typing for API attributes # Expect a dict as type descriptor validate = attr['validate'] key = [k for k in validate.keys() if k.startswith('type:dict')] if not key: LOG.warning("Unable to find data type descriptor for attribute %s", attr_name) return data = validate[key[0]] if not isinstance(data, dict): LOG.debug("Attribute type descriptor is not a dict. Unable to " "generate any sub-attr policy rule for %s.", attr_name) return sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' % (action, attr_name, sub_attr_name)) for sub_attr_name in data if sub_attr_name in target[attr_name]] return policy.AndCheck(sub_attr_rules) def _build_list_of_subattrs_rule(attr_name, attribute_value, action): rules = [] for sub_attr in attribute_value: if isinstance(sub_attr, dict): for k in sub_attr: rules.append(policy.RuleCheck( 'rule', '%s:%s:%s' % (action, attr_name, k))) if rules: return policy.AndCheck(rules) def _process_rules_list(rules, match_rule): """Recursively walk a policy rule to extract a list of match entries.""" if isinstance(match_rule, policy.RuleCheck): rules.append(match_rule.match) elif isinstance(match_rule, policy.AndCheck): for rule in match_rule.rules: _process_rules_list(rules, rule) return rules def _build_match_rule(action, target, pluralized): """Create the rule to match for a given action. The policy rule to be matched is built in the following way: 1) add entries for matching permission on objects 2) add an entry for the specific action (e.g.: create_network) 3) add an entry for attributes of a resource for which the action is being executed (e.g.: create_network:shared) 4) add an entry for sub-attributes of a resource for which the action is being executed (e.g.: create_router:external_gateway_info:network_id) """ match_rule = policy.RuleCheck('rule', action) resource, enforce_attr_based_check = get_resource_and_action( action, pluralized) if enforce_attr_based_check: # assigning to variable with short name for improving readability res_map = attributes.RESOURCES if resource in res_map: for attribute_name in res_map[resource]: if _is_attribute_explicitly_set(attribute_name, res_map[resource], target, action): attribute = res_map[resource][attribute_name] if 'enforce_policy' in attribute: attr_rule = policy.RuleCheck( 'rule', '%s:%s' % (action, attribute_name)) # Build match entries for sub-attributes if _should_validate_sub_attributes( attribute, target[attribute_name]): attr_rule = policy.AndCheck( [attr_rule, _build_subattr_match_rule( attribute_name, attribute, action, target)]) attribute_value = target[attribute_name] if isinstance(attribute_value, list): subattr_rule = _build_list_of_subattrs_rule( attribute_name, attribute_value, action) if subattr_rule: attr_rule = policy.AndCheck( [attr_rule, subattr_rule]) match_rule = policy.AndCheck([match_rule, attr_rule]) return match_rule # This check is registered as 'tenant_id' so that it can override # GenericCheck which was used for validating parent resource ownership. # This will prevent us from having to handling backward compatibility # for policy.json # TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks @policy.register('tenant_id') class OwnerCheck(policy.Check): """Resource ownership check. This check verifies the owner of the current resource, or of another resource referenced by the one under analysis. In the former case it falls back to a regular GenericCheck, whereas in the latter case it leverages the plugin to load the referenced resource and perform the check. """ def __init__(self, kind, match): # Process the match try: self.target_field = re.findall(r'^\%\((.*)\)s$', match)[0] except IndexError: err_reason = (_("Unable to identify a target field from:%s. " "Match should be in the form %%()s") % match) LOG.exception(err_reason) raise exceptions.PolicyInitError( policy="%s:%s" % (kind, match), reason=err_reason) self._cache = cache._get_memory_cache_region(expiration_time=5) super(OwnerCheck, self).__init__(kind, match) @cache.cache_method_results def _extract(self, resource_type, resource_id, field): # NOTE(salv-orlando): This check currently assumes the parent # resource is handled by the core plugin. It might be worth # having a way to map resources to plugins so to make this # check more general plugin = directory.get_plugin() if resource_type in service_const.EXT_PARENT_RESOURCE_MAPPING: plugin = directory.get_plugin( service_const.EXT_PARENT_RESOURCE_MAPPING[resource_type]) f = getattr(plugin, 'get_%s' % resource_type) # f *must* exist, if not found it is better to let neutron # explode. Check will be performed with admin context try: data = f(context.get_admin_context(), resource_id, fields=[field]) except exceptions.NotFound as e: # NOTE(kevinbenton): a NotFound exception can occur if a # list operation is happening at the same time as one of # the parents and its children being deleted. So we issue # a RetryRequest so the API will redo the lookup and the # problem items will be gone. raise db_exc.RetryRequest(e) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Policy check error while calling %s!', f) return data[field] def __call__(self, target, creds, enforcer): if self.target_field not in target: # policy needs a plugin check # target field is in the form resource:field # however if they're not separated by a colon, use an underscore # as a separator for backward compatibility def do_split(separator): parent_res, parent_field = self.target_field.split( separator, 1) return parent_res, parent_field for separator in (':', '_'): try: parent_res, parent_field = do_split(separator) break except ValueError: LOG.debug("Unable to find ':' as separator in %s.", self.target_field) else: # If we are here split failed with both separators err_reason = (_("Unable to find resource name in %s") % self.target_field) LOG.error(err_reason) raise exceptions.PolicyCheckError( policy="%s:%s" % (self.kind, self.match), reason=err_reason) parent_foreign_key = _RESOURCE_FOREIGN_KEYS.get( "%ss" % parent_res, None) if parent_res == constants.EXT_PARENT_PREFIX: for resource in service_const.EXT_PARENT_RESOURCE_MAPPING: key = "%s_%s_id" % (constants.EXT_PARENT_PREFIX, resource) if key in target: parent_foreign_key = key parent_res = resource break if not parent_foreign_key: err_reason = (_("Unable to verify match:%(match)s as the " "parent resource: %(res)s was not found") % {'match': self.match, 'res': parent_res}) LOG.error(err_reason) raise exceptions.PolicyCheckError( policy="%s:%s" % (self.kind, self.match), reason=err_reason) target[self.target_field] = self._extract( parent_res, target[parent_foreign_key], parent_field) match = self.match % target if self.kind in creds: return match == six.text_type(creds[self.kind]) return False @policy.register('field') class FieldCheck(policy.Check): def __init__(self, kind, match): # Process the match resource, field_value = match.split(':', 1) field, value = field_value.split('=', 1) super(FieldCheck, self).__init__(kind, '%s:%s:%s' % (resource, field, value)) # Value might need conversion - we need help from the attribute map try: attr = attributes.RESOURCES[resource][field] conv_func = attr['convert_to'] except KeyError: conv_func = lambda x: x self.field = field self.resource = resource self.value = conv_func(value) self.regex = re.compile(value[1:]) if value.startswith('~') else None def __call__(self, target_dict, cred_dict, enforcer): target_value = self._get_target_value(target_dict) # target_value might be a boolean, explicitly compare with None if target_value is None: return False if self.regex: return bool(self.regex.match(target_value)) return target_value == self.value def _get_target_value(self, target_dict): if self.field in target_dict: return target_dict[self.field] # NOTE(slaweq): In case that target field is "networks:shared" we need # to treat it in "special" way as it may be used for resources other # than network, e.g. for port or subnet target_value = None if self.resource == "networks" and self.field == constants.SHARED: target_network_id = target_dict.get("network_id") if not target_network_id: LOG.debug("Unable to find network_id field in target: " "%(target_dict)s", {'field': self.field, 'target_dict': target_dict}) return project_id = target_dict.get('project_id') ctx = (context.Context(tenant_id=project_id) if project_id else context.get_admin_context()) plugin = directory.get_plugin() network = plugin.get_network(ctx, target_network_id) target_value = network.get(self.field) if target_value is None: LOG.debug("Unable to find requested field: %(field)s in target: " "%(target_dict)s", {'field': self.field, 'target_dict': target_dict}) return target_value def _prepare_check(context, action, target, pluralized): """Prepare rule, target, and credentials for the policy engine.""" # Compare with None to distinguish case in which target is {} if target is None: target = {} match_rule = _build_match_rule(action, target, pluralized) credentials = context.to_policy_values() return match_rule, target, credentials def log_rule_list(match_rule): if LOG.isEnabledFor(logging.DEBUG): rules = _process_rules_list([], match_rule) LOG.debug("Enforcing rules: %s", rules) def check(context, action, target, plugin=None, might_not_exist=False, pluralized=None): """Verifies that the action is valid on the target in this context. :param context: neutron context :param action: string representing the action to be checked this should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param plugin: currently unused and deprecated. Kept for backward compatibility. :param might_not_exist: If True the policy check is skipped (and the function returns True) if the specified policy does not exist. Defaults to false. :param pluralized: pluralized case of resource e.g. firewall_policy -> pluralized = "firewall_policies" :return: Returns True if access is permitted else False. """ # If we already know the context has admin rights do not perform an # additional check and authorize the operation if context.is_admin: return True if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules): return True match_rule, target, credentials = _prepare_check(context, action, target, pluralized) result = _ENFORCER.enforce(match_rule, target, credentials, pluralized=pluralized) return result def enforce(context, action, target, plugin=None, pluralized=None): """Verifies that the action is valid on the target in this context. :param context: neutron context :param action: string representing the action to be checked this should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param plugin: currently unused and deprecated. Kept for backward compatibility. :param pluralized: pluralized case of resource e.g. firewall_policy -> pluralized = "firewall_policies" :raises oslo_policy.policy.PolicyNotAuthorized: if verification fails. """ # If we already know the context has admin rights do not perform an # additional check and authorize the operation if context.is_admin: return True rule, target, credentials = _prepare_check(context, action, target, pluralized) try: result = _ENFORCER.enforce(rule, target, credentials, action=action, do_raise=True) except policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception(): log_rule_list(rule) LOG.debug("Failed policy check for '%s'", action) return result def get_enforcer(): # NOTE(amotoki): This was borrowed from nova/policy.py. # This method is for use by oslo.policy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the neutron config options will fail as those are not expected to # be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='neutron') init() return _ENFORCER ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/privileged/0000755000175000017500000000000000000000000020767 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/__init__.py0000644000175000017500000000237000000000000023102 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_privsep import capabilities as caps from oslo_privsep import priv_context # It is expected that most (if not all) neutron operations can be # executed with these privileges. default = priv_context.PrivContext( __name__, cfg_section='privsep', pypath=__name__ + '.default', # TODO(gus): CAP_SYS_ADMIN is required (only?) for manipulating # network namespaces. SYS_ADMIN is a lot of scary powers, so # consider breaking this out into a separate minimal context. capabilities=[caps.CAP_SYS_ADMIN, caps.CAP_NET_ADMIN, caps.CAP_DAC_OVERRIDE, caps.CAP_DAC_READ_SEARCH, caps.CAP_SYS_PTRACE], ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/0000755000175000017500000000000000000000000022065 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/__init__.py0000644000175000017500000000000000000000000024164 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/0000755000175000017500000000000000000000000023224 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/__init__.py0000644000175000017500000000000000000000000025323 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/ip_lib.py0000644000175000017500000006052300000000000025042 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes from ctypes import util as ctypes_util import errno import os import socket from neutron_lib import constants from oslo_log import log as logging import pyroute2 from pyroute2 import netlink from pyroute2.netlink import exceptions as netlink_exceptions from pyroute2.netlink import rtnl from pyroute2.netlink.rtnl import ifinfmsg from pyroute2.netlink.rtnl import ndmsg from pyroute2 import NetlinkError from pyroute2 import netns from neutron._i18n import _ from neutron import privileged LOG = logging.getLogger(__name__) _IP_VERSION_FAMILY_MAP = {4: socket.AF_INET, 6: socket.AF_INET6} NETNS_RUN_DIR = '/var/run/netns' _CDLL = None def _get_cdll(): global _CDLL if not _CDLL: # NOTE(ralonsoh): from https://docs.python.org/3.6/library/ # ctypes.html#ctypes.PyDLL: "Instances of this class behave like CDLL # instances, except that the Python GIL is not released during the # function call, and after the function execution the Python error # flag is checked." # Check https://bugs.launchpad.net/neutron/+bug/1870352 _CDLL = ctypes.PyDLL(ctypes_util.find_library('c'), use_errno=True) return _CDLL def _get_scope_name(scope): """Return the name of the scope (given as a number), or the scope number if the name is unknown. For backward compatibility (with "ip" tool) "global" scope is converted to "universe" before converting to number """ scope = 'universe' if scope == 'global' else scope return rtnl.rt_scope.get(scope, scope) class NetworkNamespaceNotFound(RuntimeError): message = _("Network namespace %(netns_name)s could not be found.") def __init__(self, netns_name): super(NetworkNamespaceNotFound, self).__init__( self.message % {'netns_name': netns_name}) class NetworkInterfaceNotFound(RuntimeError): message = _("Network interface %(device)s not found in namespace " "%(namespace)s.") def __init__(self, message=None, device=None, namespace=None): # NOTE(slaweq): 'message' can be passed as an optional argument # because of how privsep daemon works. If exception is raised in # function called by privsep daemon, it will then try to reraise it # and will call it always with passing only message from originally # raised exception. message = message or self.message % { 'device': device, 'namespace': namespace} super(NetworkInterfaceNotFound, self).__init__(message) class InterfaceOperationNotSupported(RuntimeError): message = _("Operation not supported on interface %(device)s, namespace " "%(namespace)s.") def __init__(self, message=None, device=None, namespace=None): # NOTE(slaweq): 'message' can be passed as an optional argument # because of how privsep daemon works. If exception is raised in # function called by privsep daemon, it will then try to reraise it # and will call it always with passing only message from originally # raised exception. message = message or self.message % { 'device': device, 'namespace': namespace} super(InterfaceOperationNotSupported, self).__init__(message) class IpAddressAlreadyExists(RuntimeError): message = _("IP address %(ip)s already configured on %(device)s.") def __init__(self, message=None, ip=None, device=None): # NOTE(slaweq): 'message' can be passed as an optional argument # because of how privsep daemon works. If exception is raised in # function called by privsep daemon, it will then try to reraise it # and will call it always with passing only message from originally # raised exception. message = message or self.message % {'ip': ip, 'device': device} super(IpAddressAlreadyExists, self).__init__(message) class InterfaceAlreadyExists(RuntimeError): message = _("Interface %(device)s already exists.") def __init__(self, message=None, device=None): # NOTE(slaweq): 'message' can be passed as an optional argument # because of how privsep daemon works. If exception is raised in # function called by privsep daemon, it will then try to reraise it # and will call it always with passing only message from originally # raised exception. message = message or self.message % {'device': device} super(InterfaceAlreadyExists, self).__init__(message) def _make_route_dict(destination, nexthop, device, scope): return {'destination': destination, 'nexthop': nexthop, 'device': device, 'scope': scope} @privileged.default.entrypoint def get_routing_table(ip_version, namespace=None): """Return a list of dictionaries, each representing a route. :param ip_version: IP version of routes to return, for example 4 :param namespace: The name of the namespace from which to get the routes :return: a list of dictionaries, each representing a route. The dictionary format is: {'destination': cidr, 'nexthop': ip, 'device': device_name, 'scope': scope} """ family = _IP_VERSION_FAMILY_MAP[ip_version] try: netns = pyroute2.NetNS(namespace, flags=0) if namespace else None except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise routes = [] with pyroute2.IPDB(nl=netns) as ipdb: ipdb_routes = ipdb.routes ipdb_interfaces = ipdb.interfaces for route in ipdb_routes: if route['family'] != family: continue dst = route['dst'] nexthop = route.get('gateway') oif = route.get('oif') scope = _get_scope_name(route['scope']) # If there is not a valid outgoing interface id, check if # this is a multipath route (i.e. same destination with # multiple outgoing interfaces) if oif: device = ipdb_interfaces[oif]['ifname'] rt = _make_route_dict(dst, nexthop, device, scope) routes.append(rt) elif route.get('multipath'): for mpr in route['multipath']: oif = mpr['oif'] device = ipdb_interfaces[oif]['ifname'] rt = _make_route_dict(dst, nexthop, device, scope) routes.append(rt) return routes def get_iproute(namespace): # From iproute.py: # `IPRoute` -- RTNL API to the current network namespace # `NetNS` -- RTNL API to another network namespace if namespace: # do not try and create the namespace return pyroute2.NetNS(namespace, flags=0) else: return pyroute2.IPRoute() @privileged.default.entrypoint def open_namespace(namespace): """Open namespace to test if the namespace is ready to be manipulated""" with pyroute2.NetNS(namespace, flags=0): pass @privileged.default.entrypoint def list_ns_pids(namespace): """List namespace process PIDs Based on Pyroute2.netns.ns_pids(). Remove when https://github.com/svinota/pyroute2/issues/633 is fixed. """ ns_pids = [] try: ns_path = os.path.join(NETNS_RUN_DIR, namespace) ns_inode = os.stat(ns_path).st_ino except (OSError, FileNotFoundError): return ns_pids for pid in os.listdir('/proc'): if not pid.isdigit(): continue try: pid_path = os.path.join('/proc', pid, 'ns', 'net') if os.stat(pid_path).st_ino == ns_inode: ns_pids.append(int(pid)) except (OSError, FileNotFoundError): continue return ns_pids def _translate_ip_device_exception(e, device=None, namespace=None): if e.code == errno.ENODEV: raise NetworkInterfaceNotFound(device=device, namespace=namespace) if e.code == errno.EOPNOTSUPP: raise InterfaceOperationNotSupported(device=device, namespace=namespace) def get_link_id(device, namespace, raise_exception=True): with get_iproute(namespace) as ip: link_id = ip.link_lookup(ifname=device) if not link_id or len(link_id) < 1: if raise_exception: raise NetworkInterfaceNotFound(device=device, namespace=namespace) else: LOG.debug('Interface %(dev)s not found in namespace %(namespace)s', {'dev': device, 'namespace': namespace}) return None return link_id[0] def _run_iproute_link(command, device, namespace=None, **kwargs): try: with get_iproute(namespace) as ip: idx = get_link_id(device, namespace) return ip.link(command, index=idx, **kwargs) except NetlinkError as e: _translate_ip_device_exception(e, device, namespace) raise except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise def _run_iproute_neigh(command, device, namespace, **kwargs): try: with get_iproute(namespace) as ip: idx = get_link_id(device, namespace) return ip.neigh(command, ifindex=idx, **kwargs) except NetlinkError as e: _translate_ip_device_exception(e, device, namespace) raise except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise def _run_iproute_addr(command, device, namespace, **kwargs): try: with get_iproute(namespace) as ip: idx = get_link_id(device, namespace) return ip.addr(command, index=idx, **kwargs) except NetlinkError as e: _translate_ip_device_exception(e, device, namespace) raise except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def add_ip_address(ip_version, ip, prefixlen, device, namespace, scope, broadcast=None): family = _IP_VERSION_FAMILY_MAP[ip_version] try: _run_iproute_addr('add', device, namespace, address=ip, mask=prefixlen, family=family, broadcast=broadcast, scope=_get_scope_name(scope)) except NetlinkError as e: if e.code == errno.EEXIST: raise IpAddressAlreadyExists(ip=ip, device=device) raise @privileged.default.entrypoint def delete_ip_address(ip_version, ip, prefixlen, device, namespace): family = _IP_VERSION_FAMILY_MAP[ip_version] try: _run_iproute_addr("delete", device, namespace, address=ip, mask=prefixlen, family=family) except NetlinkError as e: # when trying to delete a non-existent IP address, pyroute2 raises # NetlinkError with code EADDRNOTAVAIL (99, 'Cannot assign requested # address') # this shouldn't raise an error if e.code == errno.EADDRNOTAVAIL: return raise @privileged.default.entrypoint def flush_ip_addresses(ip_version, device, namespace): family = _IP_VERSION_FAMILY_MAP[ip_version] try: with get_iproute(namespace) as ip: idx = get_link_id(device, namespace) ip.flush_addr(index=idx, family=family) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def create_interface(ifname, namespace, kind, **kwargs): ifname = ifname[:constants.DEVICE_NAME_MAX_LEN] try: with get_iproute(namespace) as ip: physical_interface = kwargs.pop("physical_interface", None) if physical_interface: link_key = "vxlan_link" if kind == "vxlan" else "link" kwargs[link_key] = get_link_id(physical_interface, namespace) return ip.link("add", ifname=ifname, kind=kind, **kwargs) except NetlinkError as e: if e.code == errno.EEXIST: raise InterfaceAlreadyExists(device=ifname) raise except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def delete_interface(ifname, namespace, **kwargs): _run_iproute_link("del", ifname, namespace, **kwargs) @privileged.default.entrypoint def interface_exists(ifname, namespace): try: idx = get_link_id(ifname, namespace, raise_exception=False) return bool(idx) except OSError as e: if e.errno == errno.ENOENT: return False raise @privileged.default.entrypoint def set_link_flags(device, namespace, flags): link = _run_iproute_link("get", device, namespace)[0] new_flags = flags | link['flags'] return _run_iproute_link("set", device, namespace, flags=new_flags) @privileged.default.entrypoint def set_link_attribute(device, namespace, **attributes): return _run_iproute_link("set", device, namespace, **attributes) @privileged.default.entrypoint def get_link_attributes(device, namespace): link = _run_iproute_link("get", device, namespace)[0] return { 'mtu': link.get_attr('IFLA_MTU'), 'qlen': link.get_attr('IFLA_TXQLEN'), 'state': link.get_attr('IFLA_OPERSTATE'), 'qdisc': link.get_attr('IFLA_QDISC'), 'brd': link.get_attr('IFLA_BROADCAST'), 'link/ether': link.get_attr('IFLA_ADDRESS'), 'alias': link.get_attr('IFLA_IFALIAS'), 'allmulticast': bool(link['flags'] & ifinfmsg.IFF_ALLMULTI), 'link_kind': link.get_nested('IFLA_LINKINFO', 'IFLA_INFO_KIND') } @privileged.default.entrypoint def add_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs): """Add a neighbour entry. :param ip_address: IP address of entry to add :param mac_address: MAC address of entry to add :param device: Device name to use in adding entry :param namespace: The name of the namespace in which to add the entry """ family = _IP_VERSION_FAMILY_MAP[ip_version] _run_iproute_neigh('replace', device, namespace, dst=ip_address, lladdr=mac_address, family=family, state=ndmsg.states['permanent'], **kwargs) @privileged.default.entrypoint def delete_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs): """Delete a neighbour entry. :param ip_address: IP address of entry to delete :param mac_address: MAC address of entry to delete :param device: Device name to use in deleting entry :param namespace: The name of the namespace in which to delete the entry """ family = _IP_VERSION_FAMILY_MAP[ip_version] try: _run_iproute_neigh('delete', device, namespace, dst=ip_address, lladdr=mac_address, family=family, **kwargs) except NetlinkError as e: # trying to delete a non-existent entry shouldn't raise an error if e.code == errno.ENOENT: return raise @privileged.default.entrypoint def dump_neigh_entries(ip_version, device, namespace, **kwargs): """Dump all neighbour entries. :param ip_version: IP version of entries to show (4 or 6) :param device: Device name to use in dumping entries :param namespace: The name of the namespace in which to dump the entries :param kwargs: Callers add any filters they use as kwargs :return: a list of dictionaries, each representing a neighbour. The dictionary format is: {'dst': ip_address, 'lladdr': mac_address, 'device': device_name} """ family = _IP_VERSION_FAMILY_MAP[ip_version] entries = [] dump = _run_iproute_neigh('dump', device, namespace, family=family, **kwargs) for entry in dump: attrs = dict(entry['attrs']) entries += [{'dst': attrs['NDA_DST'], 'lladdr': attrs.get('NDA_LLADDR'), 'device': device}] return entries @privileged.default.entrypoint def create_netns(name, **kwargs): """Create a network namespace. :param name: The name of the namespace to create """ try: netns.create(name, libc=_get_cdll()) except OSError as e: if e.errno != errno.EEXIST: raise @privileged.default.entrypoint def remove_netns(name, **kwargs): """Remove a network namespace. :param name: The name of the namespace to remove """ try: netns.remove(name, libc=_get_cdll()) except OSError as e: if e.errno != errno.ENOENT: raise @privileged.default.entrypoint def list_netns(**kwargs): """List network namespaces. Caller requires raised priveleges to list namespaces """ return netns.listnetns(**kwargs) def make_serializable(value): """Make a pyroute2 object serializable This function converts 'netlink.nla_slot' object (key, value) in a list of two elements. """ def _ensure_string(value): return value.decode() if isinstance(value, bytes) else value if isinstance(value, list): return [make_serializable(item) for item in value] elif isinstance(value, netlink.nla_slot): return [_ensure_string(value[0]), make_serializable(value[1])] elif isinstance(value, netlink.nla_base): return make_serializable(value.dump()) elif isinstance(value, dict): return {_ensure_string(key): make_serializable(data) for key, data in value.items()} elif isinstance(value, tuple): return tuple(make_serializable(item) for item in value) return _ensure_string(value) @privileged.default.entrypoint def get_link_devices(namespace, **kwargs): """List interfaces in a namespace :return: (list) interfaces in a namespace """ try: with get_iproute(namespace) as ip: return make_serializable(ip.get_links(**kwargs)) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise def get_device_names(namespace, **kwargs): """List interface names in a namespace :return: a list of strings with the names of the interfaces in a namespace """ devices_attrs = [link['attrs'] for link in get_link_devices(namespace, **kwargs)] device_names = [] for device_attrs in devices_attrs: for link_name in (link_attr[1] for link_attr in device_attrs if link_attr[0] == 'IFLA_IFNAME'): device_names.append(link_name) return device_names @privileged.default.entrypoint def get_ip_addresses(namespace, **kwargs): """List of IP addresses in a namespace :return: (tuple) IP addresses in a namespace """ try: with get_iproute(namespace) as ip: return make_serializable(ip.get_addr(**kwargs)) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def list_ip_rules(namespace, ip_version, match=None, **kwargs): """List all IP rules""" try: with get_iproute(namespace) as ip: rules = make_serializable(ip.get_rules( family=_IP_VERSION_FAMILY_MAP[ip_version], match=match, **kwargs)) for rule in rules: rule['attrs'] = { key: value for key, value in ((item[0], item[1]) for item in rule['attrs'])} return rules except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def add_ip_rule(namespace, **kwargs): """Add a new IP rule""" try: with get_iproute(namespace) as ip: ip.rule('add', **kwargs) except netlink_exceptions.NetlinkError as e: if e.code == errno.EEXIST: return raise except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def delete_ip_rule(namespace, **kwargs): """Delete an IP rule""" try: with get_iproute(namespace) as ip: ip.rule('del', **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise def _make_pyroute2_route_args(namespace, ip_version, cidr, device, via, table, metric, scope, protocol): """Returns a dictionary of arguments to be used in pyroute route commands :param namespace: (string) name of the namespace :param ip_version: (int) [4, 6] :param cidr: (string) source IP or CIDR address (IPv4, IPv6) :param device: (string) input interface name :param via: (string) gateway IP address :param table: (string, int) table number or name :param metric: (int) route metric :param scope: (int) route scope :param protocol: (string) protocol name (pyroute2.netlink.rtnl.rt_proto) :return: a dictionary with the kwargs needed in pyroute rule commands """ args = {'family': _IP_VERSION_FAMILY_MAP[ip_version]} if not scope: scope = 'global' if via else 'link' scope = _get_scope_name(scope) if scope: args['scope'] = scope if cidr: args['dst'] = cidr if device: args['oif'] = get_link_id(device, namespace) if via: args['gateway'] = via if table: args['table'] = int(table) if metric: args['priority'] = int(metric) if protocol: args['proto'] = protocol return args @privileged.default.entrypoint def add_ip_route(namespace, cidr, ip_version, device=None, via=None, table=None, metric=None, scope=None, **kwargs): """Add an IP route""" kwargs.update(_make_pyroute2_route_args( namespace, ip_version, cidr, device, via, table, metric, scope, 'static')) try: with get_iproute(namespace) as ip: ip.route('replace', **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def list_ip_routes(namespace, ip_version, device=None, table=None, **kwargs): """List IP routes""" kwargs.update(_make_pyroute2_route_args( namespace, ip_version, None, device, None, table, None, None, None)) try: with get_iproute(namespace) as ip: return make_serializable(ip.route('show', **kwargs)) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def delete_ip_route(namespace, cidr, ip_version, device=None, via=None, table=None, scope=None, **kwargs): """Delete an IP route""" kwargs.update(_make_pyroute2_route_args( namespace, ip_version, cidr, device, via, table, None, scope, None)) try: with get_iproute(namespace) as ip: ip.route('del', **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise NetworkNamespaceNotFound(netns_name=namespace) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/netlink_constants.py0000644000175000017500000000537300000000000027346 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Some parts are based on python-conntrack: # Copyright (c) 2009-2011,2015 Andrew Grigorev # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import socket CONNTRACK = 0 NFCT_O_PLAIN = 0 NFCT_OF_TIME_BIT = 1 NFCT_OF_TIME = 1 << NFCT_OF_TIME_BIT NFCT_Q_DESTROY = 2 NFCT_Q_FLUSH = 4 NFCT_Q_DUMP = 5 NFCT_T_DESTROY_BIT = 2 NFCT_T_DESTROY = 1 << NFCT_T_DESTROY_BIT ATTR_IPV4_SRC = 0 ATTR_IPV4_DST = 1 ATTR_IPV6_SRC = 4 ATTR_IPV6_DST = 5 ATTR_PORT_SRC = 8 ATTR_PORT_DST = 9 ATTR_ICMP_TYPE = 12 ATTR_ICMP_CODE = 13 ATTR_ICMP_ID = 14 ATTR_L3PROTO = 15 ATTR_L4PROTO = 17 ATTR_ZONE = 61 NFCT_T_NEW_BIT = 0 NFCT_T_NEW = 1 << NFCT_T_NEW_BIT NFCT_T_UPDATE_BIT = 1 NFCT_T_UPDATE = 1 << NFCT_T_UPDATE_BIT NFCT_T_DESTROY_BIT = 2 NFCT_T_DESTROY = 1 << NFCT_T_DESTROY_BIT NFCT_T_ALL = NFCT_T_NEW | NFCT_T_UPDATE | NFCT_T_DESTROY NFCT_CB_CONTINUE = 1 NFCT_CB_FAILURE = -1 NFNL_SUBSYS_CTNETLINK = 1 BUFFER = 1024 # IPv6 address memory buffer ADDR_BUFFER_6 = 16 ADDR_BUFFER_4 = 4 IPVERSION_SOCKET = {4: socket.AF_INET, 6: socket.AF_INET6} IPVERSION_BUFFER = {4: ADDR_BUFFER_4, 6: ADDR_BUFFER_6} ENTRY_IS_LOWER = -1 ENTRY_MATCHES = 0 ENTRY_IS_HIGHER = 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/netlink_lib.py0000644000175000017500000003073400000000000026077 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Some parts are based on python-conntrack: # Copyright (c) 2009-2011,2015 Andrew Grigorev # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import ctypes from ctypes import util import re from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging from neutron._i18n import _ from neutron import privileged from neutron.privileged.agent.linux import netlink_constants as nl_constants LOG = logging.getLogger(__name__) nfct_lib = util.find_library('netfilter_conntrack') nfct = ctypes.CDLL(nfct_lib) libc = ctypes.CDLL(util.find_library('libc.so.6')) # In unit tests the actual nfct library may not be installed, and since we # don't make actual calls to it we don't want to add a hard dependency. if nfct_lib: # It's important that the types be defined properly on all of the functions # we call from nfct, otherwise pointers can be truncated and cause # segfaults. nfct.nfct_set_attr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] nfct.nfct_set_attr_u8.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_uint8] nfct.nfct_set_attr_u16.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_uint16] nfct.nfct_snprintf.argtypes = [ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint] nfct.nfct_new.restype = ctypes.c_void_p nfct.nfct_destroy.argtypes = [ctypes.c_void_p] nfct.nfct_query.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] nfct.nfct_callback_register.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] nfct.nfct_open.restype = ctypes.c_void_p nfct.nfct_close.argtypes = [ctypes.c_void_p] IP_VERSIONS = [constants.IP_VERSION_4, constants.IP_VERSION_6] DATA_CALLBACK = None # position of attribute in raw conntrack entry ATTR_POSITIONS = { 'icmp': [('type', 6), ('code', 7), ('src', 4), ('dst', 5), ('id', 8), ('zone', 16)], 'icmpv6': [('type', 6), ('code', 7), ('src', 4), ('dst', 5), ('id', 8), ('zone', 16)], 'tcp': [('sport', 7), ('dport', 8), ('src', 5), ('dst', 6), ('zone', 15)], 'udp': [('sport', 6), ('dport', 7), ('src', 4), ('dst', 5), ('zone', 14)] } TARGET = {'src': {4: nl_constants.ATTR_IPV4_SRC, 6: nl_constants.ATTR_IPV6_SRC}, 'dst': {4: nl_constants.ATTR_IPV4_DST, 6: nl_constants.ATTR_IPV6_DST}, 'ipversion': {4: nl_constants.ATTR_L3PROTO, 6: nl_constants.ATTR_L3PROTO}, 'protocol': {4: nl_constants.ATTR_L4PROTO, 6: nl_constants.ATTR_L4PROTO}, 'code': {4: nl_constants.ATTR_ICMP_CODE, 6: nl_constants.ATTR_ICMP_CODE}, 'type': {4: nl_constants.ATTR_ICMP_TYPE, 6: nl_constants.ATTR_ICMP_TYPE}, 'id': {4: nl_constants.ATTR_ICMP_ID, 6: nl_constants.ATTR_ICMP_ID}, 'sport': {4: nl_constants.ATTR_PORT_SRC, 6: nl_constants.ATTR_PORT_SRC}, 'dport': {4: nl_constants.ATTR_PORT_DST, 6: nl_constants.ATTR_PORT_DST}, 'zone': {4: nl_constants.ATTR_ZONE, 6: nl_constants.ATTR_ZONE} } NFCT_CALLBACK = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p) class ConntrackManager(object): def __init__(self, family_socket=None): self.family_socket = family_socket self.set_functions = { 'src': {4: nfct.nfct_set_attr, 6: nfct.nfct_set_attr}, 'dst': {4: nfct.nfct_set_attr, 6: nfct.nfct_set_attr}, 'ipversion': {4: nfct.nfct_set_attr_u8, 6: nfct.nfct_set_attr_u8}, 'protocol': {4: nfct.nfct_set_attr_u8, 6: nfct.nfct_set_attr_u8}, 'type': {4: nfct.nfct_set_attr_u8, 6: nfct.nfct_set_attr_u8}, 'code': {4: nfct.nfct_set_attr_u8, 6: nfct.nfct_set_attr_u8}, 'id': {4: nfct.nfct_set_attr_u16, 6: nfct.nfct_set_attr_u16}, 'sport': {4: nfct.nfct_set_attr_u16, 6: nfct.nfct_set_attr_u16}, 'dport': {4: nfct.nfct_set_attr_u16, 6: nfct.nfct_set_attr_u16}, 'zone': {4: nfct.nfct_set_attr_u16, 6: nfct.nfct_set_attr_u16} } self.converters = {'src': bytes, 'dst': bytes, 'ipversion': nl_constants.IPVERSION_SOCKET.get, 'protocol': constants.IP_PROTOCOL_MAP.get, 'code': int, 'type': int, 'id': libc.htons, 'sport': libc.htons, 'dport': libc.htons, 'zone': int } def list_entries(self): entries = [] raw_entry = ctypes.create_string_buffer(nl_constants.BUFFER) @NFCT_CALLBACK def callback(type_, conntrack, data): nfct.nfct_snprintf(raw_entry, nl_constants.BUFFER, conntrack, type_, nl_constants.NFCT_O_PLAIN, nl_constants.NFCT_OF_TIME) entries.append(raw_entry.value.decode('utf-8')) return nl_constants.NFCT_CB_CONTINUE self._callback_register(nl_constants.NFCT_T_ALL, callback, DATA_CALLBACK) data_ref = self._get_ref(self.family_socket or nl_constants.IPVERSION_SOCKET[4]) self._query(nl_constants.NFCT_Q_DUMP, data_ref) return entries def delete_entries(self, entries): conntrack = nfct.nfct_new() try: for entry in entries: self._set_attributes(conntrack, entry) self._query(nl_constants.NFCT_Q_DESTROY, conntrack) except Exception as e: msg = _("Failed to delete conntrack entries %s") % e LOG.critical(msg) raise exceptions.CTZoneExhaustedError() finally: nfct.nfct_destroy(conntrack) def _query(self, query_type, query_data): result = nfct.nfct_query(self.conntrack_handler, query_type, query_data) if result == nl_constants.NFCT_CB_FAILURE: LOG.warning("Netlink query failed") def _convert_text_to_binary(self, source, addr_family): dest = ctypes.create_string_buffer( nl_constants.IPVERSION_BUFFER[addr_family]) libc.inet_pton(nl_constants.IPVERSION_SOCKET[addr_family], source.encode('utf-8'), dest) return dest.raw def _set_attributes(self, conntrack, entry): ipversion = entry.get('ipversion', 4) for attr, value in entry.items(): set_function = self.set_functions[attr][ipversion] target = TARGET[attr][ipversion] converter = self.converters[attr] if attr in ['src', 'dst']: # convert src and dst of IPv4 and IPv6 into same format value = self._convert_text_to_binary(value, ipversion) set_function(conntrack, target, converter(value)) def _callback_register(self, message_type, callback_func, data): nfct.nfct_callback_register(self.conntrack_handler, message_type, callback_func, data) def _get_ref(self, data): return ctypes.byref(ctypes.c_int(data)) def __enter__(self): self.conntrack_handler = nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) if not self.conntrack_handler: msg = _("Failed to open new conntrack handler") LOG.critical(msg) raise exceptions.CTZoneExhaustedError() return self def __exit__(self, *args): nfct.nfct_close(self.conntrack_handler) def _parse_entry(entry, ipversion, zone): """Parse entry from text to Python tuple :param entry: raw conntrack entry :param ipversion: ip version 4 or 6 :return: conntrack entry in Python tuple in format (ipversion, protocol, sport, dport, src_ip, dst_ip, zone) example: (4, 'tcp', '1', '2', '1.1.1.1', '2.2.2.2', 1) The attributes are ordered to be easy to compare with other entries and compare with firewall rule """ protocol = entry[1] parsed_entry = [ipversion, protocol] for attr, position in ATTR_POSITIONS[protocol]: val = entry[position].partition('=')[2] try: parsed_entry.append(int(val)) except ValueError: parsed_entry.append(val) parsed_entry[-1] = zone return tuple(parsed_entry) @privileged.default.entrypoint def list_entries(zone): """List and parse all conntrack entries in zone :param zone: zone in which entries belong to :return: sorted list of conntrack entries in Python tuple with sort key is dest port example: [(4, 'icmp', '8', '0', '1.1.1.1', '2.2.2.2', '1234'), (4, 'tcp', '1', '2', '1.1.1.1', '2.2.2.2')] """ parsed_entries = [] for ipversion in IP_VERSIONS: with ConntrackManager(nl_constants.IPVERSION_SOCKET[ipversion]) \ as conntrack: raw_entries = [entry for entry in conntrack.list_entries() if re.search(r'\bzone={}\b'.format(zone), entry) is not None] for raw_entry in raw_entries: _entry = raw_entry.split() parsed_entry = _parse_entry(_entry, ipversion, zone) parsed_entries.append(parsed_entry) # sort by dest port return sorted(parsed_entries, key=lambda x: x[3]) @privileged.default.entrypoint def delete_entries(entries): """Delete selected entries :param entries: list of parsed (as tuple) entries to delete :return: None """ entry_args = [] for entry in entries: entry_arg = {'ipversion': entry[0], 'protocol': entry[1]} for idx, attr in enumerate(ATTR_POSITIONS[entry_arg['protocol']]): entry_arg[attr[0]] = entry[idx + 2] entry_args.append(entry_arg) with ConntrackManager() as conntrack: conntrack.delete_entries(entry_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/privileged/agent/linux/tc_lib.py0000644000175000017500000001717300000000000025043 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import socket from neutron_lib import constants as n_constants import pyroute2 from pyroute2 import protocols as pyroute2_protocols from neutron._i18n import _ from neutron import privileged from neutron.privileged.agent.linux import ip_lib _IP_VERSION_FAMILY_MAP = {n_constants.IP_VERSION_4: socket.AF_INET, n_constants.IP_VERSION_6: socket.AF_INET6} class TrafficControlClassNotFound(RuntimeError): message = _('Traffic control class %(classid)s not found in namespace ' '%(namespace)s.') def __init__(self, message=None, classid=None, namespace=None): message = message or self.message % { 'classid': classid, 'namespace': namespace} super(TrafficControlClassNotFound, self).__init__(message) @privileged.default.entrypoint def add_tc_qdisc(device, namespace=None, **kwargs): """Add TC qdisc""" index = ip_lib.get_link_id(device, namespace) try: with ip_lib.get_iproute(namespace) as ip: ip.tc('replace', index=index, **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def list_tc_qdiscs(device, namespace=None): """List all TC qdiscs of a device""" index = ip_lib.get_link_id(device, namespace) try: with ip_lib.get_iproute(namespace) as ip: return ip_lib.make_serializable(ip.get_qdiscs(index=index)) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def delete_tc_qdisc(device, parent=None, kind=None, namespace=None, raise_interface_not_found=True, raise_qdisc_not_found=True): """Delete a TC qdisc of a device""" try: index = ip_lib.get_link_id(device, namespace) args = {} if parent: args['parent'] = parent if kind: args['kind'] = kind with ip_lib.get_iproute(namespace) as ip: ip.tc('del', index=index, **args) except ip_lib.NetworkInterfaceNotFound: if raise_interface_not_found: raise except pyroute2.NetlinkError as e: # NOTE(ralonsoh): tc delete will raise a NetlinkError exception with # code (22, 'Invalid argument') if kind='ingress' and the qdisc does # not exist. This behaviour must be refactored in pyroute2. if ((e.code == errno.ENOENT or (e.code == errno.EINVAL and kind == 'ingress')) and raise_qdisc_not_found is False): # NOTE(ralonsoh): return error code for testing purposes return e.code raise except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def add_tc_policy_class(device, parent, classid, class_type, namespace=None, **kwargs): """Add/replace TC policy class""" try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: ip.tc('replace-class', kind=class_type, index=index, handle=classid, parent=parent, **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def list_tc_policy_classes(device, namespace=None): """List all TC policy classes of a device""" try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: return ip_lib.make_serializable(ip.get_classes(index=index)) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def delete_tc_policy_class(device, parent, classid, namespace=None, **kwargs): """Delete TC policy class""" try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: ip.tc('del-class', index=index, handle=classid, parent=parent, **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise except pyroute2.NetlinkError as e: if e.code == errno.ENOENT: raise TrafficControlClassNotFound(classid=classid, namespace=namespace) @privileged.default.entrypoint def add_tc_filter_match32(device, parent, priority, class_id, keys, protocol=None, namespace=None, **kwargs): """Add TC filter, type: match u32""" # NOTE(ralonsoh): by default (protocol=None), every packet is filtered. protocol = protocol or pyroute2_protocols.ETH_P_ALL try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: ip.tc('add-filter', kind='u32', index=index, parent=parent, prio=priority, target=class_id, protocol=protocol, keys=keys, **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def add_tc_filter_policy(device, parent, priority, rate, burst, mtu, action, protocol=None, keys=None, flowid=1, namespace=None, **kwargs): """Add TC filter, type: policy filter By default (protocol=None), that means every packet is shaped. "keys" and "target" (flowid) parameters are mandatory. If the filter is applied on a classless qdisc, "target" is irrelevant and a default value can be passed. If all packets must be shaped, an empty filter ("keys") can be passed. """ keys = keys if keys else ['0x0/0x0'] protocol = protocol or pyroute2_protocols.ETH_P_ALL try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: ip.tc('add-filter', kind='u32', index=index, parent=parent, prio=priority, protocol=protocol, rate=rate, burst=burst, mtu=mtu, action=action, keys=keys, target=flowid, **kwargs) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise @privileged.default.entrypoint def list_tc_filters(device, parent, namespace=None, **kwargs): """List TC filters""" try: index = ip_lib.get_link_id(device, namespace) with ip_lib.get_iproute(namespace) as ip: return ip_lib.make_serializable( ip.get_filters(index=index, parent=parent, **kwargs)) except OSError as e: if e.errno == errno.ENOENT: raise ip_lib.NetworkNamespaceNotFound(netns_name=namespace) raise ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/profiling/0000755000175000017500000000000000000000000020626 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/profiling/__init__.py0000644000175000017500000000000000000000000022725 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/profiling/profiled_decorator.py0000644000175000017500000000573600000000000025061 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cProfile from datetime import datetime import io import pstats from oslo_config import cfg from oslo_log import log import six from neutron.common import utils from neutron.conf import profiling as profiling_conf_opts LOG = log.getLogger(__name__) CONF = cfg.CONF profiling_conf_opts.register_profiling_opts() SORT_BY = 'cumtime' def profile(f): """Decorator to profile Neutron code. This will profile the decorated method or function with cProfile and log the result. """ @six.wraps(f) def profile_wrapper(*args, **kwargs): try: if cfg.CONF.enable_code_profiling: profid = "%s.%s" % (f.__module__, f.__name__) profiler = cProfile.Profile() profiler.enable() start_time = datetime.now() return f(*args, **kwargs) finally: if cfg.CONF.enable_code_profiling: profiler.disable() elapsed_time = datetime.now() - start_time elapsed_time_ms = (elapsed_time.seconds * 1000.0 + elapsed_time.microseconds / 1000.0) stream = io.StringIO() stats = pstats.Stats(profiler, stream=stream).sort_stats( SORT_BY) stats.print_stats(cfg.CONF.code_profiling_calls_to_log) stats.print_callers(cfg.CONF.code_profiling_calls_to_log) stats.print_callees(cfg.CONF.code_profiling_calls_to_log) profiler_info = utils.collect_profiler_info() if not profiler_info: profiler_info = {'base_id': 'not available', 'parent_id': 'not available'} LOG.debug('os-profiler parent trace-id %(parent_id)s ' 'trace-id %(trace_id)s %(elapsed_time)7d millisecs ' 'elapsed for %(method)s(*%(args)s, **%(kwargs)s):' '\n%(profiler_data)s', {'parent_id': profiler_info['parent_id'], 'trace_id': profiler_info['base_id'], 'elapsed_time': elapsed_time_ms, 'method': profid, 'args': args, 'kwargs': kwargs, 'profiler_data': stream.getvalue()}) stream.close() return profile_wrapper ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/quota/0000755000175000017500000000000000000000000017766 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/quota/__init__.py0000644000175000017500000002551600000000000022110 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for instances, volumes, and floating ips.""" import sys from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils import six import webob from neutron._i18n import _ from neutron.conf import quota from neutron.db.quota import api as quota_api from neutron.quota import resource_registry LOG = logging.getLogger(__name__) QUOTA_DB_MODULE = quota.QUOTA_DB_MODULE QUOTA_DB_DRIVER = quota.QUOTA_DB_DRIVER QUOTA_CONF_DRIVER = quota.QUOTA_CONF_DRIVER # Register the configuration options quota.register_quota_opts(quota.core_quota_opts) class ConfDriver(object): """Configuration driver. Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the default values in neutron.conf. """ def _get_quotas(self, context, resources): """Get quotas. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = resource.default return quotas def limit_check(self, context, tenant_id, resources, values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param tenant_id: The tenant_id to check quota. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) # Get the applicable quotas quotas = self._get_quotas(context, resources) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) @staticmethod def get_tenant_quotas(context, resources, tenant_id): quotas = {} sub_resources = dict((k, v) for k, v in resources.items()) for resource in sub_resources.values(): quotas[resource.name] = resource.default return quotas @staticmethod def get_all_quotas(context, resources): return [] @staticmethod def delete_tenant_quota(context, tenant_id): msg = _('Access to this resource was denied.') raise webob.exc.HTTPForbidden(msg) @staticmethod def update_quota_limit(context, tenant_id, resource, limit): msg = _('Access to this resource was denied.') raise webob.exc.HTTPForbidden(msg) def make_reservation(self, context, tenant_id, resources, deltas, plugin): """This driver does not support reservations. This routine is provided for backward compatibility purposes with the API controllers which have now been adapted to make reservations rather than counting resources and checking limits - as this routine ultimately does. """ for resource in deltas.keys(): count = QUOTAS.count(context, resource, plugin, tenant_id) total_use = deltas.get(resource, 0) + count deltas[resource] = total_use self.limit_check( context, tenant_id, resource_registry.get_all_resources(), deltas) # return a fake reservation - the REST controller expects it return quota_api.ReservationInfo('fake', None, None, None) def commit_reservation(self, context, reservation_id): """This is a noop as this driver does not support reservations.""" def cancel_reservation(self, context, reservation_id): """This is a noop as this driver does not support reservations.""" class QuotaEngine(object): """Represent the set of recognized quotas.""" _instance = None @classmethod def get_instance(cls): if not cls._instance: cls._instance = cls() return cls._instance def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._driver = None self._driver_class = quota_driver_class def get_driver(self): if self._driver is None: _driver_class = (self._driver_class or cfg.CONF.QUOTAS.quota_driver) if (_driver_class == QUOTA_DB_DRIVER and QUOTA_DB_MODULE not in sys.modules): # If quotas table is not loaded, force config quota driver. _driver_class = QUOTA_CONF_DRIVER LOG.info("ConfDriver is used as quota_driver because the " "loaded plugin does not support 'quotas' table.") if isinstance(_driver_class, six.string_types): _driver_class = importutils.import_object(_driver_class) if isinstance(_driver_class, ConfDriver): versionutils.report_deprecated_feature( LOG, ("The quota driver neutron.quota.ConfDriver is " "deprecated as of Liberty. " "neutron.db.quota.driver.DbQuotaDriver should " "be used in its place")) self._driver = _driver_class LOG.info('Loaded quota_driver: %s.', _driver_class) return self._driver def count(self, context, resource_name, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource_name: The name of the resource, as a string. """ # Get the resource res = resource_registry.get_resource(resource_name) if not res or not hasattr(res, 'count'): raise exceptions.QuotaResourceUnknown(unknown=[resource_name]) return res.count(context, *args, **kwargs) def make_reservation(self, context, tenant_id, deltas, plugin): # Verify that resources are managed by the quota engine # Ensure no value is less than zero unders = [key for key, val in deltas.items() if val < 0] if unders: raise exceptions.InvalidQuotaValue(unders=sorted(unders)) requested_resources = set(deltas.keys()) all_resources = resource_registry.get_all_resources() managed_resources = set([res for res in all_resources.keys() if res in requested_resources]) # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources if unknown_resources: raise exceptions.QuotaResourceUnknown( unknown=sorted(unknown_resources)) # FIXME(salv-orlando): There should be no reason for sending all the # resource in the registry to the quota driver, but as other driver # APIs request them, this will be sorted out with a different patch. return self.get_driver().make_reservation( context, tenant_id, all_resources, deltas, plugin) def commit_reservation(self, context, reservation_id): self.get_driver().commit_reservation(context, reservation_id) def cancel_reservation(self, context, reservation_id): self.get_driver().cancel_reservation(context, reservation_id) def limit_check(self, context, tenant_id, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a countable resource. If any of the proposed values exceeds the respective quota defined for the tenant, an OverQuota exception will be raised. The exception will include a sorted list with the resources which exceed the quota limit. Otherwise, the method returns nothing. :param context: Request context :param tenant_id: Tenant for which the quota limit is being checked :param values: Dict specifying requested deltas for each resource """ # TODO(salv-orlando): Deprecate calls to this API # Verify that resources are managed by the quota engine requested_resources = set(values.keys()) managed_resources = set([res for res in resource_registry.get_all_resources() if res in requested_resources]) # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources if unknown_resources: raise exceptions.QuotaResourceUnknown( unknown=sorted(unknown_resources)) return self.get_driver().limit_check( context, tenant_id, resource_registry.get_all_resources(), values) QUOTAS = QuotaEngine.get_instance() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/quota/resource.py0000644000175000017500000003636600000000000022205 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from sqlalchemy import exc as sql_exc from sqlalchemy.orm import session as se from neutron._i18n import _ from neutron.db.quota import api as quota_api LOG = log.getLogger(__name__) def _count_resource(context, collection_name, tenant_id): count_getter_name = "get_%s_count" % collection_name getter_name = "get_%s" % collection_name plugins = directory.get_plugins() for pname in sorted(plugins, # inspect core plugin first key=lambda n: n != constants.CORE): # Some plugins support a count method for particular resources, using a # DB's optimized counting features. We try to use that one if present. # Otherwise just use regular getter to retrieve all objects and count # in python, allowing older plugins to still be supported try: obj_count_getter = getattr(plugins[pname], count_getter_name) return obj_count_getter( context, filters={'tenant_id': [tenant_id]}) except (NotImplementedError, AttributeError): try: obj_getter = getattr(plugins[pname], getter_name) obj_list = obj_getter( context, filters={'tenant_id': [tenant_id]}) return len(obj_list) if obj_list else 0 except (NotImplementedError, AttributeError): pass raise NotImplementedError( _('No plugins that support counting %s found.') % collection_name) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag, plural_name=None): """Initializes a resource. :param name: The name of the resource, i.e., "instances". :param flag: The name of the flag or configuration option :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ self.name = name # If a plural name is not supplied, default to adding an 's' to # the resource name, unless the resource name ends in 'y', in which # case remove the 'y' and add 'ies'. Even if the code should not fiddle # too much with English grammar, this is a rather common and easy to # implement rule. if plural_name: self.plural_name = plural_name elif self.name[-1] == 'y': self.plural_name = "%sies" % self.name[:-1] else: self.plural_name = "%ss" % self.name # always convert dashes to underscores self.plural_name = self.plural_name.replace('-', '_') self.flag = flag @property def default(self): """Return the default value of the quota.""" # Any negative value will be interpreted as an infinite quota, # and stored as -1 for compatibility with current behaviour value = getattr(cfg.CONF.QUOTAS, self.flag, cfg.CONF.QUOTAS.default_quota) return max(value, -1) @property def dirty(self): """Return the current state of the Resource instance. :returns: True if the resource count is out of sync with actual date, False if it is in sync, and None if the resource instance does not track usage. """ class CountableResource(BaseResource): """Describe a resource where the counts are determined by a function.""" def __init__(self, name, count, flag=None, plural_name=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., network, subnet, etc.,. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. :param name: The name of the resource, i.e., "instances". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ super(CountableResource, self).__init__( name, flag=flag, plural_name=plural_name) self._count_func = count def count(self, context, plugin, tenant_id, **kwargs): # NOTE(ihrachys) _count_resource doesn't receive plugin return self._count_func(context, self.plural_name, tenant_id) class TrackedResource(BaseResource): """Resource which keeps track of its usage data.""" def __init__(self, name, model_class, flag, plural_name=None): """Initializes an instance for a given resource. TrackedResource are directly mapped to data model classes. Resource usage is tracked in the database, and the model class to which this resource refers is monitored to ensure always "fresh" usage data are employed when performing quota checks. This class operates under the assumption that the model class describing the resource has a tenant identifier attribute. :param name: The name of the resource, i.e., "networks". :param model_class: The sqlalchemy model class of the resource for which this instance is being created :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param plural_name: Plural form of the resource name. If not specified, it is generated automatically by appending an 's' to the resource name, unless it ends with a 'y'. In that case the last letter is removed, and 'ies' is appended. Dashes are always converted to underscores. """ super(TrackedResource, self).__init__( name, flag=flag, plural_name=plural_name) # Register events for addition/removal of records in the model class # As tenant_id is immutable for all Neutron objects there is no need # to register a listener for update events self._model_class = model_class self._dirty_tenants = set() self._out_of_sync_tenants = set() @property def dirty(self): return self._dirty_tenants def mark_dirty(self, context): if not self._dirty_tenants: return with db_api.CONTEXT_WRITER.using(context): # It is not necessary to protect this operation with a lock. # Indeed when this method is called the request has been processed # and therefore all resources created or deleted. # dirty_tenants will contain all the tenants for which the # resource count is changed. The list might contain also tenants # for which resource count was altered in other requests, but this # won't be harmful. dirty_tenants_snap = self._dirty_tenants.copy() for tenant_id in dirty_tenants_snap: quota_api.set_quota_usage_dirty(context, self.name, tenant_id) self._out_of_sync_tenants |= dirty_tenants_snap self._dirty_tenants -= dirty_tenants_snap def _db_event_handler(self, mapper, _conn, target): try: tenant_id = target['tenant_id'] except AttributeError: with excutils.save_and_reraise_exception(): LOG.error("Model class %s does not have a tenant_id " "attribute", target) self._dirty_tenants.add(tenant_id) # Retry the operation if a duplicate entry exception is raised. This # can happen is two or more workers are trying to create a resource of a # give kind for the same tenant concurrently. Retrying the operation will # ensure that an UPDATE statement is emitted rather than an INSERT one @db_api.retry_if_session_inactive() def _set_quota_usage(self, context, tenant_id, in_use): return quota_api.set_quota_usage( context, self.name, tenant_id, in_use=in_use) def _resync(self, context, tenant_id, in_use): # Update quota usage usage_info = self._set_quota_usage(context, tenant_id, in_use) self._dirty_tenants.discard(tenant_id) self._out_of_sync_tenants.discard(tenant_id) LOG.debug(("Unset dirty status for tenant:%(tenant_id)s on " "resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) return usage_info def resync(self, context, tenant_id): if tenant_id not in self._out_of_sync_tenants: return LOG.debug(("Synchronizing usage tracker for tenant:%(tenant_id)s on " "resource:%(resource)s"), {'tenant_id': tenant_id, 'resource': self.name}) in_use = context.session.query( self._model_class.tenant_id).filter_by( tenant_id=tenant_id).count() # Update quota usage return self._resync(context, tenant_id, in_use) def count_used(self, context, tenant_id, resync_usage=True): """Returns the current usage count for the resource. :param context: The request context. :param tenant_id: The ID of the tenant :param resync_usage: Default value is set to True. Syncs with in_use usage. """ # Load current usage data, setting a row-level lock on the DB usage_info = quota_api.get_quota_usage_by_resource_and_tenant( context, self.name, tenant_id) # If dirty or missing, calculate actual resource usage querying # the database and set/create usage info data # NOTE: this routine "trusts" usage counters at service startup. This # assumption is generally valid, but if the database is tampered with, # or if data migrations do not take care of usage counters, the # assumption will not hold anymore if (tenant_id in self._dirty_tenants or not usage_info or usage_info.dirty): LOG.debug(("Usage tracker for resource:%(resource)s and tenant:" "%(tenant_id)s is out of sync, need to count used " "quota"), {'resource': self.name, 'tenant_id': tenant_id}) in_use = context.session.query( self._model_class.tenant_id).filter_by( tenant_id=tenant_id).count() # Update quota usage, if requested (by default do not do that, as # typically one counts before adding a record, and that would mark # the usage counter as dirty again) if resync_usage: usage_info = self._resync(context, tenant_id, in_use) else: resource = usage_info.resource if usage_info else self.name tenant_id = usage_info.tenant_id if usage_info else tenant_id dirty = usage_info.dirty if usage_info else True usage_info = quota_api.QuotaUsageInfo( resource, tenant_id, in_use, dirty) LOG.debug(("Quota usage for %(resource)s was recalculated. " "Used quota:%(used)d."), {'resource': self.name, 'used': usage_info.used}) return usage_info.used def count_reserved(self, context, tenant_id): """Return the current reservation count for the resource.""" # NOTE(princenana) Current implementation of reservations # is ephemeral and returns the default value reservations = quota_api.get_reservations_for_resources( context, tenant_id, [self.name]) reserved = reservations.get(self.name, 0) return reserved def count(self, context, _plugin, tenant_id, resync_usage=True): """Return the count of the resource. The _plugin parameter is unused but kept for compatibility with the signature of the count method for CountableResource instances. """ return (self.count_used(context, tenant_id, resync_usage) + self.count_reserved(context, tenant_id)) def _except_bulk_delete(self, delete_context): if delete_context.mapper.class_ == self._model_class: raise RuntimeError(_("%s may not be deleted in bulk because " "it is tracked by the quota engine via " "SQLAlchemy event handlers, which are not " "compatible with bulk deletes.") % self._model_class) def register_events(self): listen = db_api.sqla_listen listen(self._model_class, 'after_insert', self._db_event_handler) listen(self._model_class, 'after_delete', self._db_event_handler) listen(se.Session, 'after_bulk_delete', self._except_bulk_delete) def unregister_events(self): try: db_api.sqla_remove(self._model_class, 'after_insert', self._db_event_handler) db_api.sqla_remove(self._model_class, 'after_delete', self._db_event_handler) db_api.sqla_remove(se.Session, 'after_bulk_delete', self._except_bulk_delete) except sql_exc.InvalidRequestError: LOG.warning("No sqlalchemy event for resource %s found", self.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/quota/resource_registry.py0000644000175000017500000002155000000000000024122 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from oslo_config import cfg from oslo_log import log import six from neutron._i18n import _ from neutron.quota import resource LOG = log.getLogger(__name__) # Wrappers for easing access to the ResourceRegistry singleton def register_resource(resource): ResourceRegistry.get_instance().register_resource(resource) def register_resource_by_name(resource_name, plural_name=None): ResourceRegistry.get_instance().register_resource_by_name( resource_name, plural_name) def get_all_resources(): return ResourceRegistry.get_instance().resources def unregister_all_resources(): if not ResourceRegistry._instance: return return ResourceRegistry.get_instance().unregister_resources() def get_resource(resource_name): return ResourceRegistry.get_instance().get_resource(resource_name) def is_tracked(resource_name): return ResourceRegistry.get_instance().is_tracked(resource_name) # auxiliary functions and decorators def set_resources_dirty(context): """Sets the dirty bit for resources with usage changes. This routine scans all registered resources, and, for those whose dirty status is True, sets the dirty bit to True in the database for the appropriate tenants. :param context: a Neutron request context with a DB session """ if not cfg.CONF.QUOTAS.track_quota_usage: return for res in get_all_resources().values(): with db_api.CONTEXT_WRITER.using(context): if is_tracked(res.name) and res.dirty: res.mark_dirty(context) def resync_resource(context, resource_name, tenant_id): if not cfg.CONF.QUOTAS.track_quota_usage: return if is_tracked(resource_name): res = get_resource(resource_name) # If the resource is tracked count supports the resync_usage parameter res.resync(context, tenant_id) def mark_resources_dirty(f): """Decorator for functions which alter resource usage. This decorator ensures set_resource_dirty is invoked after completion of the decorated function. """ @six.wraps(f) def wrapper(_self, context, *args, **kwargs): ret_val = f(_self, context, *args, **kwargs) set_resources_dirty(context) return ret_val return wrapper class tracked_resources(object): """Decorator for specifying resources for which usage should be tracked. A plugin class can use this decorator to specify for which resources usage info should be tracked into an appropriate table rather than being explicitly counted. """ def __init__(self, override=False, **kwargs): self._tracked_resources = kwargs self._override = override def __call__(self, f): @six.wraps(f) def wrapper(*args, **kwargs): registry = ResourceRegistry.get_instance() for resource_name in self._tracked_resources: registry.set_tracked_resource( resource_name, self._tracked_resources[resource_name], self._override) return f(*args, **kwargs) return wrapper class ResourceRegistry(object): """Registry for resource subject to quota limits. This class keeps track of Neutron resources for which quota limits are enforced, regardless of whether their usage is being tracked or counted. For tracked-usage resources, that is to say those resources for which there are usage counters which are kept in sync with the actual number of rows in the database, this class allows the plugin to register their names either explicitly or through the @tracked_resources decorator, which should preferably be applied to the __init__ method of the class. """ _instance = None @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def __init__(self): self._resources = {} # Map usage tracked resources to the correspondent db model class self._tracked_resource_mappings = {} def __contains__(self, resource): return resource in self._resources def _create_resource_instance(self, resource_name, plural_name): """Factory function for quota Resource. This routine returns a resource instance of the appropriate type according to system configuration. If QUOTAS.track_quota_usage is True, and there is a model mapping for the current resource, this function will return an instance of AccountedResource; otherwise an instance of CountableResource. """ if (not cfg.CONF.QUOTAS.track_quota_usage or resource_name not in self._tracked_resource_mappings): LOG.info("Creating instance of CountableResource for " "resource:%s", resource_name) return resource.CountableResource( resource_name, resource._count_resource, 'quota_%s' % resource_name) else: LOG.info("Creating instance of TrackedResource for " "resource:%s", resource_name) return resource.TrackedResource( resource_name, self._tracked_resource_mappings[resource_name], 'quota_%s' % resource_name) def set_tracked_resource(self, resource_name, model_class, override=False): # Do not do anything if tracking is disabled by config if not cfg.CONF.QUOTAS.track_quota_usage: return if isinstance(self._resources.get(resource_name), resource.CountableResource): raise RuntimeError(_("Resource %s is already registered as a " "countable resource.") % resource_name) current_model_class = self._tracked_resource_mappings.setdefault( resource_name, model_class) # Check whether setdefault also set the entry in the dict if current_model_class != model_class: LOG.debug("A model class is already defined for %(resource)s: " "%(current_model_class)s. Override:%(override)s", {'resource': resource_name, 'current_model_class': current_model_class, 'override': override}) if override: self._tracked_resource_mappings[resource_name] = model_class LOG.debug("Tracking information for resource: %s configured", resource_name) def is_tracked(self, resource_name): """Find out if a resource if tracked or not. :param resource_name: name of the resource. :returns: True if resource_name is registered and tracked, otherwise False. Please note that here when False it returned it simply means that resource_name is not a TrackedResource instance, it does not necessarily mean that the resource is not registered. """ return resource_name in self._tracked_resource_mappings def register_resource(self, resource): if resource.name in self._resources: LOG.warning('%s is already registered', resource.name) if resource.name in self._tracked_resource_mappings: resource.register_events() self._resources[resource.name] = resource def register_resources(self, resources): for res in resources: self.register_resource(res) def register_resource_by_name(self, resource_name, plural_name=None): """Register a resource by name.""" resource = self._create_resource_instance( resource_name, plural_name) self.register_resource(resource) def unregister_resources(self): """Unregister all resources.""" for (res_name, res) in self._resources.items(): if res_name in self._tracked_resource_mappings: res.unregister_events() self._resources.clear() self._tracked_resource_mappings.clear() def get_resource(self, resource_name): """Return a resource given its name. :returns: The resource instance or None if the resource is not found """ return self._resources.get(resource_name) @property def resources(self): return self._resources ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.355045 neutron-16.0.0.0b2.dev214/neutron/scheduler/0000755000175000017500000000000000000000000020613 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/__init__.py0000644000175000017500000000000000000000000022712 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/base_resource_filter.py0000644000175000017500000000355300000000000025361 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.db import api as db_api import six @six.add_metaclass(abc.ABCMeta) class BaseResourceFilter(object): """Encapsulate logic that is specific to the resource type.""" @abc.abstractmethod def filter_agents(self, plugin, context, resource): """Return the agents that can host the resource.""" def bind(self, context, agents, resource_id, force_scheduling=False): """Bind the resource to the agents.""" with db_api.CONTEXT_WRITER.using(context): for agent in agents: # Load is being incremented here to reflect latest agent load # even within the agent report interval. This will be very # much necessary when bulk resource creation happens within a # agent report interval time. # NOTE: The resource being bound might or might not be of the # same type which is accounted for the load. It isn't a # problem because "+ 1" here does not meant to predict # precisely what the load of the agent will be. The value will # be corrected by the agent on the next report interval. agent.load += 1 agent.update() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/base_scheduler.py0000644000175000017500000000642400000000000024143 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from operator import attrgetter import random from oslo_log import log as logging import six LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseScheduler(object): """The base scheduler (agnostic to resource type). Child classes of BaseScheduler must define the self.resource_filter to filter agents of particular type. """ resource_filter = None @abc.abstractmethod def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): """Return a subset of agents based on the specific scheduling logic.""" def schedule(self, plugin, context, resource): """Select and bind agents to a given resource.""" if not self.resource_filter: return # filter the agents that can host the resource filtered_agents_dict = self.resource_filter.filter_agents( plugin, context, resource) num_agents = filtered_agents_dict['n_agents'] hostable_agents = filtered_agents_dict['hostable_agents'] hosted_agents = filtered_agents_dict['hosted_agents'] chosen_agents = self.select(plugin, context, hostable_agents, hosted_agents, num_agents) # bind the resource to the agents force_scheduling = bool(resource.get('candidate_hosts')) self.resource_filter.bind( context, chosen_agents, resource['id'], force_scheduling) debug_data = ['(%s, %s, %s)' % (agent['agent_type'], agent['host'], resource['id']) for agent in chosen_agents] LOG.debug('Resources bound (agent type, host, resource id): %s', ', '.join(debug_data)) return chosen_agents class BaseChanceScheduler(BaseScheduler): """Choose agents randomly.""" def __init__(self, resource_filter): self.resource_filter = resource_filter def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): chosen_agents = random.sample(resource_hostable_agents, num_agents_needed) return chosen_agents class BaseWeightScheduler(BaseScheduler): """Choose agents based on load.""" def __init__(self, resource_filter): self.resource_filter = resource_filter def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): chosen_agents = sorted(resource_hostable_agents, key=attrgetter('load'))[0:num_agents_needed] return chosen_agents ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/dhcp_agent_scheduler.py0000644000175000017500000003754500000000000025335 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from operator import itemgetter from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.objects import exceptions from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import utils as agent_utils from neutron.db.network_dhcp_agent_binding import models as ndab_model from neutron.objects import agent as agent_obj from neutron.objects import network from neutron.scheduler import base_resource_filter from neutron.scheduler import base_scheduler LOG = logging.getLogger(__name__) class AutoScheduler(object): def auto_schedule_networks(self, plugin, context, host): """Schedule non-hosted networks to the DHCP agent on the specified host. """ agents_per_network = cfg.CONF.dhcp_agents_per_network # a list of (agent, net_ids) tuples bindings_to_add = [] # NOTE(ralonsoh) use writer manager to call get_network. See # https://review.opendev.org/#/c/483518/. Must be changed to READER. with db_api.CONTEXT_WRITER.using(context): fields = ['network_id', 'enable_dhcp', 'segment_id'] subnets = plugin.get_subnets(context, fields=fields) net_ids = {} net_segment_ids = collections.defaultdict(set) for s in subnets: if s['enable_dhcp']: net_segment_ids[s['network_id']].add(s.get('segment_id')) for network_id, segment_ids in net_segment_ids.items(): is_routed_network = any(segment_ids) net_ids[network_id] = is_routed_network if not net_ids: LOG.debug('No non-hosted networks') return False dhcp_agents = agent_obj.Agent.get_objects( context, agent_type=constants.AGENT_TYPE_DHCP, host=host, admin_state_up=True) segment_host_mapping = network.SegmentHostMapping.get_objects( context, host=host) segments_on_host = {s.segment_id for s in segment_host_mapping} for dhcp_agent in dhcp_agents: if agent_utils.is_agent_down(dhcp_agent.heartbeat_timestamp): LOG.warning('DHCP agent %s is not active', dhcp_agent.id) continue for net_id, is_routed_network in net_ids.items(): agents = plugin.get_dhcp_agents_hosting_networks( context, [net_id]) segments_on_network = net_segment_ids[net_id] if is_routed_network: if len(segments_on_network & segments_on_host) == 0: continue else: if len(agents) >= agents_per_network: continue if any(dhcp_agent.id == agent.id for agent in agents): continue net = plugin.get_network(context, net_id) az_hints = (net.get(az_def.AZ_HINTS) or cfg.CONF.default_availability_zones) if (az_hints and dhcp_agent['availability_zone'] not in az_hints): continue bindings_to_add.append( (dhcp_agent, net_id, is_routed_network)) # do it outside transaction so particular scheduling results don't # make other to fail debug_data = [] for agent, net_id, is_routed_network in bindings_to_add: self.resource_filter.bind( context, [agent], net_id, force_scheduling=is_routed_network) debug_data.append('(%s, %s, %s)' % (agent['agent_type'], agent['host'], net_id)) LOG.debug('Resources bound (agent type, host, resource id): %s', ', '.join(debug_data)) return True class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler): def __init__(self): super(ChanceScheduler, self).__init__(DhcpFilter()) class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler): def __init__(self): super(WeightScheduler, self).__init__(DhcpFilter()) class AZAwareWeightScheduler(WeightScheduler): def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): """AZ aware scheduling If the network has multiple AZs, agents are scheduled as follows: - select AZ with least agents scheduled for the network - for AZs with same amount of scheduled agents, the AZ which contains least weight agent will be used first - choose agent in the AZ with WeightScheduler """ # The dict to record the agents in each AZ, the record will be sorted # according to the weight of agent. So that the agent with less weight # will be used first. hostable_az_agents = collections.defaultdict(list) # The dict to record the number of agents in each AZ. When the number # of agents in each AZ is the same and num_agents_needed is less than # the number of AZs, we want to select agents with less weight. # Use an OrderedDict here, so that the AZ with least weight agent # will be recorded first in the case described above. And, as a result, # the agent with least weight will be used first. num_az_agents = collections.OrderedDict() # resource_hostable_agents should be a list with agents in the order of # their weight. resource_hostable_agents = ( super(AZAwareWeightScheduler, self).select( plugin, context, resource_hostable_agents, resource_hosted_agents, len(resource_hostable_agents))) for agent in resource_hostable_agents: az_agent = agent['availability_zone'] hostable_az_agents[az_agent].append(agent) if az_agent not in num_az_agents: num_az_agents[az_agent] = 0 if num_agents_needed <= 0: return [] for agent in resource_hosted_agents: az_agent = agent['availability_zone'] if az_agent in num_az_agents: num_az_agents[az_agent] += 1 chosen_agents = [] while num_agents_needed > 0: # 'min' will stably output the first min value in the list. select_az = min(num_az_agents.items(), key=itemgetter(1))[0] # Select the agent in AZ with least weight. select_agent = hostable_az_agents[select_az][0] chosen_agents.append(select_agent) # Update the AZ-agents records. del hostable_az_agents[select_az][0] if not hostable_az_agents[select_az]: del num_az_agents[select_az] else: num_az_agents[select_az] += 1 num_agents_needed -= 1 return chosen_agents class DhcpFilter(base_resource_filter.BaseResourceFilter): def get_vacant_network_dhcp_agent_binding_index( self, context, network_id, force_scheduling): """Return a vacant binding_index to use and whether or not it exists. Each NetworkDhcpAgentBinding has a binding_index which is unique per network_id, and when creating a single binding we require to find a 'vacant' binding_index which isn't yet used - for example if we have bindings with indices 1 and 3, then clearly binding_index == 2 is free. :returns: binding_index. """ num_agents = agent_obj.Agent.count( context, agent_type=constants.AGENT_TYPE_DHCP) num_agents = min(num_agents, cfg.CONF.dhcp_agents_per_network) bindings = network.NetworkDhcpAgentBinding.get_objects( context, network_id=network_id) binding_indices = [b.binding_index for b in bindings] all_indices = set(range(ndab_model.LOWEST_BINDING_INDEX, num_agents + 1)) open_slots = sorted(list(all_indices - set(binding_indices))) if open_slots: return open_slots[0] # Last chance: if this is a manual scheduling, we're gonna allow # creation of a binding_index even if it will exceed # max_l3_agents_per_router. if force_scheduling: return max(all_indices) + 1 return -1 def bind(self, context, agents, network_id, force_scheduling=False): """Bind the network to the agents.""" # customize the bind logic bound_agents = agents[:] for agent in agents: binding_index = self.get_vacant_network_dhcp_agent_binding_index( context, network_id, force_scheduling) if binding_index < ndab_model.LOWEST_BINDING_INDEX: LOG.debug('Unable to find a vacant binding_index for ' 'network %(network_id)s and agent %(agent_id)s', {'network_id': network_id, 'agent_id': agent.id}) continue # saving agent_id to use it after rollback to avoid # DetachedInstanceError agent_id = agent.id try: network.NetworkDhcpAgentBinding( context, dhcp_agent_id=agent_id, network_id=network_id, binding_index=binding_index).create() except exceptions.NeutronDbObjectDuplicateEntry: # it's totally ok, someone just did our job! bound_agents.remove(agent) LOG.info('Agent %s already present', agent_id) LOG.debug('Network %(network_id)s is scheduled to be ' 'hosted by DHCP agent %(agent_id)s with binding_index ' '%(binding_index)d', {'network_id': network_id, 'agent_id': agent_id, 'binding_index': binding_index}) super(DhcpFilter, self).bind(context, bound_agents, network_id) def filter_agents(self, plugin, context, network): """Return the agents that can host the network. This function returns a dictionary which has 3 keys. n_agents: The number of agents should be scheduled. If n_agents=0, all networks are already scheduled or no more agent can host the network. hostable_agents: A list of agents which can host the network. hosted_agents: A list of agents which already hosts the network. """ agents_dict = self._get_network_hostable_dhcp_agents( plugin, context, network) if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': agents_dict['hosted_agents']} return agents_dict def _filter_agents_with_network_access(self, plugin, context, network, hostable_agents): if 'candidate_hosts' in network: hostable_dhcp_hosts = network['candidate_hosts'] else: hostable_dhcp_hosts = plugin.filter_hosts_with_network_access( context, network['id'], [agent['host'] for agent in hostable_agents]) reachable_agents = [agent for agent in hostable_agents if agent['host'] in hostable_dhcp_hosts] return reachable_agents def _get_dhcp_agents_hosting_network(self, plugin, context, network): """Return dhcp agents hosting the given network or None if a given network is already hosted by enough number of agents. """ agents_per_network = cfg.CONF.dhcp_agents_per_network # TODO(gongysh) don't schedule the networks with only # subnets whose enable_dhcp is false with db_api.CONTEXT_READER.using(context): network_hosted_agents = plugin.get_dhcp_agents_hosting_networks( context, [network['id']], hosts=network.get('candidate_hosts')) if len(network_hosted_agents) >= agents_per_network: LOG.debug('Network %s is already hosted by enough agents.', network['id']) return return network_hosted_agents def _get_active_agents(self, plugin, context, az_hints): """Return a list of active dhcp agents.""" with db_api.CONTEXT_READER.using(context): filters = {'agent_type': [constants.AGENT_TYPE_DHCP], 'admin_state_up': [True]} if az_hints: filters['availability_zone'] = az_hints active_dhcp_agents = plugin.get_agent_objects( context, filters=filters) if not active_dhcp_agents: LOG.warning('No more DHCP agents') return [] return active_dhcp_agents def _get_network_hostable_dhcp_agents(self, plugin, context, network): """Provide information on hostable DHCP agents for network. The returned value includes the number of agents that will actually host the given network, a list of DHCP agents that can host the given network, and a list of DHCP agents currently hosting the network. """ hosted_agents = self._get_dhcp_agents_hosting_network(plugin, context, network) if hosted_agents is None: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []} n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents) az_hints = (network.get(az_def.AZ_HINTS) or cfg.CONF.default_availability_zones) active_dhcp_agents = self._get_active_agents(plugin, context, az_hints) hosted_agent_ids = [agent['id'] for agent in hosted_agents] if not active_dhcp_agents: return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': hosted_agents} hostable_dhcp_agents = [ agent for agent in active_dhcp_agents if agent.id not in hosted_agent_ids and plugin.is_eligible_agent( context, True, agent)] hostable_dhcp_agents = self._filter_agents_with_network_access( plugin, context, network, hostable_dhcp_agents) if not hostable_dhcp_agents: result = {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': hosted_agents} else: result = {'n_agents': min(len(hostable_dhcp_agents), n_agents), 'hostable_agents': hostable_dhcp_agents, 'hosted_agents': hosted_agents} hostable_agents_ids = [a['id'] for a in result['hostable_agents']] hosted_agents_ids = [a['id'] for a in result['hosted_agents']] LOG.debug('Network hostable DHCP agents. Network: %(network)s, ' 'hostable agents: %(hostable_agents)s, hosted agents: ' '%(hosted_agents)s', {'network': network['id'], 'hostable_agents': hostable_agents_ids, 'hosted_agents': hosted_agents_ids}) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/l3_agent_scheduler.py0000644000175000017500000004376700000000000024740 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import functools import itertools import random from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib import constants as lib_const from neutron_lib.db import api as lib_db_api from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import six from neutron.common import utils from neutron.conf.db import l3_hamode_db from neutron.db.models import l3agent as rb_model from neutron.objects import l3agent as rb_obj LOG = logging.getLogger(__name__) cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) @six.add_metaclass(abc.ABCMeta) class L3Scheduler(object): def __init__(self): self.max_ha_agents = cfg.CONF.max_l3_agents_per_router def schedule(self, plugin, context, router_id, candidates=None): """Schedule the router to an active L3 agent. Schedule the router only if it is not already scheduled. """ return self._schedule_router( plugin, context, router_id, candidates=candidates) def _router_has_binding(self, context, router_id, l3_agent_id): router_binding_model = rb_model.RouterL3AgentBinding query = context.session.query(router_binding_model.router_id) query = query.filter(router_binding_model.router_id == router_id, router_binding_model.l3_agent_id == l3_agent_id) return query.count() > 0 def _get_routers_can_schedule(self, plugin, context, routers, l3_agent): """Get the subset of routers that can be scheduled on the L3 agent.""" ids_to_discard = set() for router in routers: # check if the l3 agent is compatible with the router candidates = plugin.get_l3_agent_candidates( context, router, [l3_agent]) if not candidates: ids_to_discard.add(router['id']) return [r for r in routers if r['id'] not in ids_to_discard] def auto_schedule_routers(self, plugin, context, host): """Schedule under-scheduled routers to L3 Agents. An under-scheduled router is a router that is either completely un-scheduled (scheduled to 0 agents), or an HA router that is under-scheduled (scheduled to less than max_l3_agents configuration option. The function finds all the under-scheduled routers and schedules them. :param host: if unspecified, under-scheduled routers are scheduled to all agents (not necessarily from the requesting host). If specified, under-scheduled routers are scheduled only to the agent on 'host'. """ l3_agent = plugin.get_enabled_agent_on_host( context, lib_const.AGENT_TYPE_L3, host) if not l3_agent: return underscheduled_routers = self._get_underscheduled_routers( plugin, context) target_routers = self._get_routers_can_schedule( plugin, context, underscheduled_routers, l3_agent) for router in target_routers: self.schedule(plugin, context, router['id'], candidates=[l3_agent]) def _get_underscheduled_routers(self, plugin, context): underscheduled_routers = [] max_agents_for_ha = plugin.get_number_of_agents_for_scheduling(context) for router, count in plugin.get_routers_l3_agents_count(context): if (count < 1 or router.get('ha', False) and count < max_agents_for_ha): # Either the router was un-scheduled (scheduled to 0 agents), # or it's an HA router and it was under-scheduled (scheduled to # less than max_agents_for_ha). Either way, it should be added # to the list of routers we want to handle. underscheduled_routers.append(router) return underscheduled_routers def _get_candidates(self, plugin, context, sync_router): """Return L3 agents where a router could be scheduled.""" is_ha = sync_router.get('ha', False) with lib_db_api.CONTEXT_READER.using(context): # allow one router is hosted by just # one enabled l3 agent hosting since active is just a # timing problem. Non-active l3 agent can return to # active any time current_l3_agents = plugin.get_l3_agents_hosting_routers( context, [sync_router['id']], admin_state_up=True) if current_l3_agents and not is_ha: LOG.debug('Router %(router_id)s has already been hosted ' 'by L3 agent %(agent_id)s', {'router_id': sync_router['id'], 'agent_id': current_l3_agents[0]['id']}) return [] active_l3_agents = plugin.get_l3_agents(context, active=True) if not active_l3_agents: LOG.warning('No active L3 agents') return [] candidates = plugin.get_l3_agent_candidates(context, sync_router, active_l3_agents) if not candidates: LOG.warning('No L3 agents can host the router %s', sync_router['id']) return candidates def _bind_routers(self, plugin, context, routers, l3_agent): for router in routers: if router.get('ha'): if not self._router_has_binding(context, router['id'], l3_agent.id): self.create_ha_port_and_bind( plugin, context, router['id'], router['tenant_id'], l3_agent) else: self.bind_router(plugin, context, router['id'], l3_agent.id) @lib_db_api.retry_db_errors def bind_router(self, plugin, context, router_id, agent_id, is_manual_scheduling=False, is_ha=False): """Bind the router to the l3 agent which has been chosen. The function tries to create a RouterL3AgentBinding object and add it to the database. It returns the binding that was created or None if it failed to create it due to some conflict. In the HA router case, when creating a RouterL3AgentBinding (with some binding_index) fails because some other RouterL3AgentBinding was concurrently created using the same binding_index, then the function will retry to create an entry with a new binding_index. This creation will be retried up to lib_db_api.MAX_RETRIES times. If, still in the HA router case, the creation failed because the router has already been bound to the l3 agent in question or has been removed (by a concurrent operation), then no further attempts will be made and the function will return None. Note that for non-HA routers, the function will always perform exactly one try, regardless of the error preventing the addition of a new RouterL3AgentBinding object to the database. """ if rb_obj.RouterL3AgentBinding.objects_exist( context, router_id=router_id, l3_agent_id=agent_id): LOG.debug('Router %(router_id)s has already been scheduled ' 'to L3 agent %(agent_id)s.', {'router_id': router_id, 'agent_id': agent_id}) return if not is_ha: binding_index = rb_model.LOWEST_BINDING_INDEX if rb_obj.RouterL3AgentBinding.objects_exist( context, router_id=router_id, binding_index=binding_index): LOG.debug('Non-HA router %s has already been scheduled', router_id) return else: binding_index = plugin.get_vacant_binding_index( context, router_id, is_manual_scheduling) if binding_index < rb_model.LOWEST_BINDING_INDEX: LOG.debug('Unable to find a vacant binding_index for ' 'router %(router_id)s and agent %(agent_id)s', {'router_id': router_id, 'agent_id': agent_id}) return try: binding = rb_obj.RouterL3AgentBinding( context, l3_agent_id=agent_id, router_id=router_id, binding_index=binding_index) binding.create() LOG.debug('Router %(router_id)s is scheduled to L3 agent ' '%(agent_id)s with binding_index %(binding_index)d', {'router_id': router_id, 'agent_id': agent_id, 'binding_index': binding_index}) return binding except db_exc.DBReferenceError: LOG.debug('Router %s has already been removed ' 'by concurrent operation', router_id) def _schedule_router(self, plugin, context, router_id, candidates=None): if not plugin.router_supports_scheduling(context, router_id): return sync_router = plugin.get_router(context, router_id) candidates = candidates or self._get_candidates( plugin, context, sync_router) if not candidates: return elif sync_router.get('ha', False): chosen_agents = self._bind_ha_router(plugin, context, router_id, sync_router.get('tenant_id'), candidates) if not chosen_agents: return chosen_agent = chosen_agents[-1] else: chosen_agent = self._choose_router_agent( plugin, context, candidates) self.bind_router(plugin, context, router_id, chosen_agent.id) return chosen_agent @abc.abstractmethod def _choose_router_agent(self, plugin, context, candidates): """Choose an agent from candidates based on a specific policy.""" pass @abc.abstractmethod def _choose_router_agents_for_ha(self, plugin, context, candidates): """Choose agents from candidates based on a specific policy.""" pass def _get_num_of_agents_for_ha(self, candidates_count): return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents else candidates_count) def _add_port_from_net_and_ensure_vr_id(self, plugin, ctxt, router_db, tenant_id, ha_net): plugin._ensure_vr_id(ctxt, router_db, ha_net) return plugin.add_ha_port(ctxt, router_db.id, ha_net.network_id, tenant_id) def create_ha_port_and_bind(self, plugin, context, router_id, tenant_id, agent, is_manual_scheduling=False): """Creates and binds a new HA port for this agent.""" ctxt = context.elevated() router_db = plugin._get_router(ctxt, router_id) creator = functools.partial(self._add_port_from_net_and_ensure_vr_id, plugin, ctxt, router_db, tenant_id) dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id) dep_creator = functools.partial(plugin._create_ha_network, ctxt, tenant_id) dep_deleter = functools.partial(plugin._delete_ha_network, ctxt) dep_id_attr = 'network_id' # This might fail in case of concurrent calls, which is good for us # as we can skip the rest of this function. binding = self.bind_router( plugin, context, router_id, agent['id'], is_manual_scheduling=is_manual_scheduling, is_ha=True) if not binding: return try: port_binding = utils.create_object_with_dependency( creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0] # NOTE(ralonsoh): to be migrated to the new facade that can't be # used with "create_object_with_dependency". with lib_db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent['id'] except db_exc.DBDuplicateEntry: LOG.debug("Router %(router)s already scheduled for agent " "%(agent)s", {'router': router_id, 'agent': agent['id']}) port_id = port_binding.port_id # Below call will also delete entry from L3HARouterAgentPortBinding # and RouterPort tables plugin._core_plugin.delete_port(context, port_id, l3_port_check=False) except l3_exc.RouterNotFound: LOG.debug('Router %s has already been removed ' 'by concurrent operation', router_id) # we try to clear the HA network here in case the port we created # blocked the concurrent router delete operation from getting rid # of the HA network ha_net = plugin.get_ha_network(ctxt, tenant_id) if ha_net: plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id) def _filter_scheduled_agents(self, plugin, context, router_id, candidates): hosting = plugin.get_l3_agents_hosting_routers(context, [router_id]) # convert to comparable types hosting_list = [tuple(host) for host in hosting] return list(set(candidates) - set(hosting_list)) def _bind_ha_router(self, plugin, context, router_id, tenant_id, candidates): """Bind a HA router to agents based on a specific policy.""" candidates = self._filter_scheduled_agents(plugin, context, router_id, candidates) chosen_agents = self._choose_router_agents_for_ha( plugin, context, candidates) for agent in chosen_agents: self.create_ha_port_and_bind(plugin, context, router_id, tenant_id, agent) return chosen_agents class ChanceScheduler(L3Scheduler): """Randomly allocate an L3 agent for a router.""" def _choose_router_agent(self, plugin, context, candidates): return random.choice(candidates) def _choose_router_agents_for_ha(self, plugin, context, candidates): num_agents = self._get_num_of_agents_for_ha(len(candidates)) return random.sample(candidates, num_agents) class LeastRoutersScheduler(L3Scheduler): """Allocate to an L3 agent with the least number of routers bound.""" def _choose_router_agent(self, plugin, context, candidates): candidate_ids = [candidate['id'] for candidate in candidates] chosen_agent = plugin.get_l3_agent_with_min_routers( context, candidate_ids) return chosen_agent def _choose_router_agents_for_ha(self, plugin, context, candidates): num_agents = self._get_num_of_agents_for_ha(len(candidates)) ordered_agents = plugin.get_l3_agents_ordered_by_num_routers( context, [candidate['id'] for candidate in candidates]) return ordered_agents[:num_agents] class AZLeastRoutersScheduler(LeastRoutersScheduler): """Availability zone aware scheduler. If a router is ha router, allocate L3 agents distributed AZs according to router's az_hints. """ def _get_az_hints(self, router): return (router.get(az_def.AZ_HINTS) or cfg.CONF.default_availability_zones) def _get_routers_can_schedule(self, plugin, context, routers, l3_agent): """Overwrite L3Scheduler's method to filter by availability zone.""" target_routers = [] for r in routers: az_hints = self._get_az_hints(r) if not az_hints or l3_agent['availability_zone'] in az_hints: target_routers.append(r) if not target_routers: return [] return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule( plugin, context, target_routers, l3_agent) def _get_candidates(self, plugin, context, sync_router): """Overwrite L3Scheduler's method to filter by availability zone.""" all_candidates = ( super(AZLeastRoutersScheduler, self)._get_candidates( plugin, context, sync_router)) candidates = [] az_hints = self._get_az_hints(sync_router) for agent in all_candidates: if not az_hints or agent['availability_zone'] in az_hints: candidates.append(agent) return candidates def _choose_router_agents_for_ha(self, plugin, context, candidates): ordered_agents = plugin.get_l3_agents_ordered_by_num_routers( context, [candidate['id'] for candidate in candidates]) num_agents = self._get_num_of_agents_for_ha(len(ordered_agents)) # Order is kept in each az group_by_az = collections.defaultdict(list) for agent in ordered_agents: az = agent['availability_zone'] group_by_az[az].append(agent) selected_agents = [] for az, agents in itertools.cycle(group_by_az.items()): if not agents: continue selected_agents.append(agents.pop(0)) if len(selected_agents) >= num_agents: break return selected_agents ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/scheduler/l3_ovn_scheduler.py0000644000175000017500000001357300000000000024434 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import copy import random from oslo_log import log import six from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf LOG = log.getLogger(__name__) OVN_SCHEDULER_CHANCE = 'chance' OVN_SCHEDULER_LEAST_LOADED = 'leastloaded' @six.add_metaclass(abc.ABCMeta) class OVNGatewayScheduler(object): def __init__(self): pass @abc.abstractmethod def select(self, nb_idl, sb_idl, gateway_name, candidates=None): """Schedule the gateway port of a router to an OVN chassis. Schedule the gateway router port only if it is not already scheduled. """ def filter_existing_chassis(self, nb_idl, gw_chassis, physnet, chassis_physnets, existing_chassis): chassis_list = copy.copy(existing_chassis) for chassis_name in existing_chassis: if utils.is_gateway_chassis_invalid(chassis_name, gw_chassis, physnet, chassis_physnets): LOG.debug("Chassis %(chassis)s is invalid for scheduling " "router in physnet: %(physnet)s.", {'chassis': chassis_name, 'physnet': physnet}) chassis_list.remove(chassis_name) return chassis_list def _schedule_gateway(self, nb_idl, sb_idl, gateway_name, candidates, existing_chassis): existing_chassis = existing_chassis or [] candidates = candidates or self._get_chassis_candidates(sb_idl) candidates = list(set(candidates) - set(existing_chassis)) # If no candidates, or gateway scheduled on MAX_GATEWAY_CHASSIS nodes # or all candidates in existing_chassis, return existing_chassis. # Otherwise, if more candidates present, then schedule them. if existing_chassis: if not candidates or ( len(existing_chassis) == ovn_const.MAX_GW_CHASSIS): return existing_chassis if not candidates: return [ovn_const.OVN_GATEWAY_INVALID_CHASSIS] chassis_count = ovn_const.MAX_GW_CHASSIS - len(existing_chassis) # The actual binding of the gateway to a chassis via the options # column or gateway_chassis column in the OVN_Northbound is done # by the caller chassis = self._select_gateway_chassis( nb_idl, candidates)[:chassis_count] # priority of existing chassis is higher than candidates chassis = existing_chassis + chassis LOG.debug("Gateway %s scheduled on chassis %s", gateway_name, chassis) return chassis @abc.abstractmethod def _select_gateway_chassis(self, nb_idl, candidates): """Choose a chassis from candidates based on a specific policy.""" def _get_chassis_candidates(self, sb_idl): # TODO(azbiswas): Allow selection of a specific type of chassis when # the upstream code merges. # return (sb_idl.get_all_chassis('gateway_router') or # sb_idl.get_all_chassis()) return sb_idl.get_all_chassis() class OVNGatewayChanceScheduler(OVNGatewayScheduler): """Randomly select an chassis for a gateway port of a router""" def select(self, nb_idl, sb_idl, gateway_name, candidates=None, existing_chassis=None): return self._schedule_gateway(nb_idl, sb_idl, gateway_name, candidates, existing_chassis) def _select_gateway_chassis(self, nb_idl, candidates): candidates = copy.deepcopy(candidates) random.shuffle(candidates) return candidates class OVNGatewayLeastLoadedScheduler(OVNGatewayScheduler): """Select the least loaded chassis for a gateway port of a router""" def select(self, nb_idl, sb_idl, gateway_name, candidates=None, existing_chassis=None): return self._schedule_gateway(nb_idl, sb_idl, gateway_name, candidates, existing_chassis) @staticmethod def _get_chassis_load_by_prios(chassis_info): """Retrieve the amount of ports by priorities hosted in the chassis. @param chassis_info: list of (port, prio) hosted by this chassis @type chassis_info: [] @return: A list of (prio, number_of_ports) tuples. """ chassis_load = {} for lrp, prio in chassis_info: chassis_load[prio] = chassis_load.get(prio, 0) + 1 return chassis_load.items() @staticmethod def _get_chassis_load(chassis): chassis_ports_prios = chassis[1] return sorted( OVNGatewayLeastLoadedScheduler._get_chassis_load_by_prios( chassis_ports_prios), reverse=True) def _select_gateway_chassis(self, nb_idl, candidates): chassis_bindings = nb_idl.get_all_chassis_gateway_bindings(candidates) return [chassis for chassis, load in sorted(chassis_bindings.items(), key=OVNGatewayLeastLoadedScheduler._get_chassis_load)] OVN_SCHEDULER_STR_TO_CLASS = { OVN_SCHEDULER_CHANCE: OVNGatewayChanceScheduler, OVN_SCHEDULER_LEAST_LOADED: OVNGatewayLeastLoadedScheduler} def get_scheduler(): return OVN_SCHEDULER_STR_TO_CLASS[ovn_conf.get_ovn_l3_scheduler()]() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/server/0000755000175000017500000000000000000000000020143 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/server/__init__.py0000644000175000017500000000503200000000000022254 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... import os import sys from oslo_config import cfg from neutron._i18n import _ from neutron.common import config from neutron.common import profiler # NOTE(annp): These environment variables are required for deploying # neutron-api under mod_wsgi. Currently, these variables are set as DevStack's # default. If you intend to use other tools for deploying neutron-api under # mod_wsgi, you should export these variables with your values. NEUTRON_CONF = 'neutron.conf' NEUTRON_CONF_DIR = '/etc/neutron/' NEUTRON_PLUGIN_CONF = 'plugins/ml2/ml2_conf.ini' def _get_config_files(env=None): if env is None: env = os.environ dirname = env.get('OS_NEUTRON_CONFIG_DIR', NEUTRON_CONF_DIR).strip() files = [s.strip() for s in env.get('OS_NEUTRON_CONFIG_FILES', '').split(';') if s.strip()] if not files: files = [NEUTRON_CONF, NEUTRON_PLUGIN_CONF] return [os.path.join(dirname, fname) for fname in files] def _init_configuration(): # the configuration will be read into the cfg.CONF global data structure conf_files = _get_config_files() config.init(sys.argv[1:], default_config_files=conf_files) config.setup_logging() config.set_config_defaults() if not cfg.CONF.config_file: sys.exit(_("ERROR: Unable to find configuration file via the default" " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and" " the '--config-file' option!")) def boot_server(server_func): _init_configuration() try: server_func() except KeyboardInterrupt: pass except RuntimeError as e: sys.exit(_("ERROR: %s") % e) def get_application(): _init_configuration() profiler.setup('neutron-server', cfg.CONF.host) return config.load_paste_app('neutron') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/server/rpc_eventlet.py0000644000175000017500000000270500000000000023213 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2011 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # If ../neutron/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... from neutron_lib.api import attributes from oslo_log import log from neutron.api import extensions from neutron import manager from neutron import service LOG = log.getLogger(__name__) def eventlet_rpc_server(): LOG.info("Eventlet based AMQP RPC server starting...") try: manager.init() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext_mgr.extend_resources("2.0", attributes.RESOURCES) rpc_workers_launcher = service.start_rpc_workers() except NotImplementedError: LOG.info("RPC was already started in parent process by " "plugin.") else: rpc_workers_launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/server/wsgi_eventlet.py0000644000175000017500000000274400000000000023403 0ustar00coreycorey00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_log import log from neutron import service LOG = log.getLogger(__name__) def eventlet_wsgi_server(): neutron_api = service.serve_wsgi(service.NeutronApiService) start_api_and_rpc_workers(neutron_api) def start_api_and_rpc_workers(neutron_api): try: worker_launcher = service.start_all_workers(neutron_api) pool = eventlet.GreenPool() api_thread = pool.spawn(neutron_api.wait) plugin_workers_thread = pool.spawn(worker_launcher.wait) # api and other workers should die together. When one dies, # kill the other. api_thread.link(lambda gt: plugin_workers_thread.kill()) plugin_workers_thread.link(lambda gt: api_thread.kill()) pool.waitall() except NotImplementedError: LOG.info("RPC was already started in parent process by " "plugin.") neutron_api.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/service.py0000644000175000017500000003604700000000000020661 0ustar00coreycorey00000000000000# Copyright 2011 VMware, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import os import random from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context from neutron_lib.db import api as session from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_messaging import server as rpc_server from oslo_service import loopingcall from oslo_service import service as common_service from oslo_utils import excutils from oslo_utils import importutils import psutil from neutron.common import config from neutron.common import profiler from neutron.conf import service from neutron import worker as neutron_worker from neutron import wsgi service.register_service_opts(service.SERVICE_OPTS) service.register_service_opts(service.RPC_EXTRA_OPTS) LOG = logging.getLogger(__name__) class WsgiService(object): """Base class for WSGI based services. For each api you define, you must also define these flags: :_listen: The address on which to listen :_listen_port: The port on which to listen """ def __init__(self, app_name): self.app_name = app_name self.wsgi_app = None def start(self): self.wsgi_app = _run_wsgi(self.app_name) def wait(self): self.wsgi_app.wait() class NeutronApiService(WsgiService): """Class for neutron-api service.""" def __init__(self, app_name): profiler.setup('neutron-server', cfg.CONF.host) super(NeutronApiService, self).__init__(app_name) @classmethod def create(cls, app_name='neutron'): # Setup logging early config.setup_logging() service = cls(app_name) return service def serve_wsgi(cls): try: service = cls.create() service.start() except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unrecoverable error: please check log ' 'for details.') registry.publish(resources.PROCESS, events.BEFORE_SPAWN, service) return service class RpcWorker(neutron_worker.NeutronBaseWorker): """Wraps a worker to be handled by ProcessLauncher""" start_listeners_method = 'start_rpc_listeners' def __init__(self, plugins, worker_process_count=1): super(RpcWorker, self).__init__( worker_process_count=worker_process_count ) self._plugins = plugins self._servers = [] def start(self): super(RpcWorker, self).start(desc="rpc worker") for plugin in self._plugins: if hasattr(plugin, self.start_listeners_method): try: servers = getattr(plugin, self.start_listeners_method)() except NotImplementedError: continue self._servers.extend(servers) def wait(self): try: self._wait() except Exception: LOG.exception('done with wait') raise def _wait(self): LOG.debug('calling RpcWorker wait()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): LOG.debug('calling wait on %s', server) server.wait() else: LOG.debug('NOT calling wait on %s', server) LOG.debug('returning from RpcWorker wait()') def stop(self): LOG.debug('calling RpcWorker stop()') for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): LOG.debug('calling stop on %s', server) server.stop() @staticmethod def reset(): config.reset_service() class RpcReportsWorker(RpcWorker): start_listeners_method = 'start_rpc_state_reports_listener' def _get_worker_count(): # Start with the number of CPUs num_workers = processutils.get_worker_count() # Now don't use more than half the system memory, assuming # a steady-state bloat of around 2GB. mem = psutil.virtual_memory() mem_workers = int(mem.total / (2 * 1024 * 1024 * 1024)) if mem_workers < num_workers: num_workers = mem_workers # And just in case, always at least one. if num_workers <= 0: num_workers = 1 return num_workers def _get_rpc_workers(plugin=None): if plugin is None: plugin = directory.get_plugin() service_plugins = directory.get_plugins().values() workers = cfg.CONF.rpc_workers if workers is None: # By default, half as many rpc workers as api workers workers = int(_get_api_workers() / 2) if workers < 1: workers = 1 # If workers > 0 then start_rpc_listeners would be called in a # subprocess and we cannot simply catch the NotImplementedError. It is # simpler to check this up front by testing whether the plugin supports # multiple RPC workers. if not plugin.rpc_workers_supported(): LOG.debug("Active plugin doesn't implement start_rpc_listeners") if workers > 0: LOG.error("'rpc_workers = %d' ignored because " "start_rpc_listeners is not implemented.", workers) raise NotImplementedError() # passing service plugins only, because core plugin is among them rpc_workers = [RpcWorker(service_plugins, worker_process_count=workers)] if (cfg.CONF.rpc_state_report_workers > 0 and plugin.rpc_state_report_workers_supported()): rpc_workers.append( RpcReportsWorker( [plugin], worker_process_count=cfg.CONF.rpc_state_report_workers ) ) return rpc_workers def _get_plugins_workers(): # NOTE(twilson) get_plugins also returns the core plugin plugins = directory.get_unique_plugins() # TODO(twilson) Instead of defaulting here, come up with a good way to # share a common get_workers default between NeutronPluginBaseV2 and # ServicePluginBase return [ plugin_worker for plugin in plugins if hasattr(plugin, 'get_workers') for plugin_worker in plugin.get_workers() ] class AllServicesNeutronWorker(neutron_worker.NeutronBaseWorker): def __init__(self, services, worker_process_count=1): super(AllServicesNeutronWorker, self).__init__(worker_process_count) self._services = services self._launcher = common_service.Launcher(cfg.CONF, restart_method='mutate') def start(self): for srv in self._services: self._launcher.launch_service(srv) super(AllServicesNeutronWorker, self).start(desc="services worker") def stop(self): self._launcher.stop() def wait(self): self._launcher.wait() def reset(self): self._launcher.restart() def _start_workers(workers, neutron_api=None): process_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count > 0 ] try: if process_workers: # Get eventual already existing instance from WSGI app worker_launcher = None if neutron_api: worker_launcher = neutron_api.wsgi_app.process_launcher if worker_launcher is None: worker_launcher = common_service.ProcessLauncher( cfg.CONF, wait_interval=1.0, restart_method='mutate' ) # add extra process worker and spawn there all workers with # worker_process_count == 0 thread_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count < 1 ] if thread_workers: process_workers.append( AllServicesNeutronWorker(thread_workers) ) # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. session.get_context_manager().dispose_pool() for worker in process_workers: worker_launcher.launch_service(worker, worker.worker_process_count) else: worker_launcher = common_service.ServiceLauncher(cfg.CONF) for worker in workers: worker_launcher.launch_service(worker) return worker_launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unrecoverable error: please check log for ' 'details.') def start_all_workers(neutron_api=None): workers = _get_rpc_workers() + _get_plugins_workers() launcher = _start_workers(workers, neutron_api) registry.publish(resources.PROCESS, events.AFTER_SPAWN, None) return launcher def start_rpc_workers(): rpc_workers = _get_rpc_workers() LOG.debug('using launcher for rpc, workers=%s', cfg.CONF.rpc_workers) launcher = _start_workers(rpc_workers) registry.publish(resources.PROCESS, events.AFTER_SPAWN, None) return launcher def start_plugins_workers(): plugins_workers = _get_plugins_workers() return _start_workers(plugins_workers) def _get_api_workers(): workers = cfg.CONF.api_workers if workers is None: workers = _get_worker_count() return workers def _run_wsgi(app_name): app = config.load_paste_app(app_name) if not app: LOG.error('No known API applications configured.') return return run_wsgi_app(app) def run_wsgi_app(app): server = wsgi.Server("Neutron") server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, workers=_get_api_workers(), desc="api worker") LOG.info("Neutron service started, listening on %(host)s:%(port)s", {'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port}) return server class Service(n_rpc.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.binary = binary self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] profiler.setup(binary, host) super(Service, self).__init__(host, topic, manager=self.manager) def start(self): self.manager.init_host() super(Service, self).start() if self.report_interval: pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.FixedIntervalLoopingCall( self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) self.manager.after_start() def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None): """Instantiates class and passes back application object. :param host: defaults to cfg.CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'neutron-' part :param manager: defaults to cfg.CONF._manager :param report_interval: defaults to cfg.CONF.report_interval :param periodic_interval: defaults to cfg.CONF.periodic_interval :param periodic_fuzzy_delay: defaults to cfg.CONF.periodic_fuzzy_delay """ if not host: host = cfg.CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary.rpartition('neutron-')[2] topic = topic.replace("-", "_") if not manager: manager = cfg.CONF.get('%s_manager' % topic, None) if report_interval is None: report_interval = cfg.CONF.report_interval if periodic_interval is None: periodic_interval = cfg.CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = cfg.CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay) return service_obj def kill(self): """Destroy the service object.""" self.stop() def stop(self): super(Service, self).stop() for x in self.timers: try: x.stop() except Exception: LOG.exception("Exception occurs when timer stops") self.timers = [] self.manager.stop() def wait(self): super(Service, self).wait() for x in self.timers: try: x.wait() except Exception: LOG.exception("Exception occurs when waiting for timer") def reset(self): config.reset_service() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service.""" # Todo(gongysh) report state to neutron server pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/0000755000175000017500000000000000000000000020460 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/__init__.py0000644000175000017500000000000000000000000022557 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/0000755000175000017500000000000000000000000023274 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/__init__.py0000644000175000017500000000000000000000000025373 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/db.py0000644000175000017500000004175600000000000024250 0ustar00coreycorey00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import constants as api_const from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import network as net_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as p_utils from oslo_log import log as logging from neutron._i18n import _ from neutron.objects import auto_allocate as auto_allocate_obj from neutron.objects import base as base_obj from neutron.objects import network as net_obj from neutron.services.auto_allocate import exceptions LOG = logging.getLogger(__name__) CHECK_REQUIREMENTS = 'dry-run' def _ensure_external_network_default_value_callback( resource, event, trigger, **kwargs): """Ensure the is_default db field matches the create/update request.""" # TODO(boden): remove shim once all callbacks use payloads if 'payload' in kwargs: _request = kwargs['payload'].request_body _context = kwargs['payload'].context _network = kwargs['payload'].desired_state _orig = kwargs['payload'].states[0] else: _request = kwargs['request'] _context = kwargs['context'] _network = kwargs['network'] _orig = kwargs.get('original_network') @db_api.retry_if_session_inactive() def _do_ensure_external_network_default_value_callback( context, request, orig, network): is_default = request.get(api_const.IS_DEFAULT) if is_default is None: return if is_default: # ensure only one default external network at any given time pager = base_obj.Pager(limit=1) objs = net_obj.ExternalNetwork.get_objects(context, _pager=pager, is_default=True) if objs: if objs[0] and network['id'] != objs[0].network_id: raise exceptions.DefaultExternalNetworkExists( net_id=objs[0].network_id) if orig and orig.get(api_const.IS_DEFAULT) == is_default: return network[api_const.IS_DEFAULT] = is_default # Reflect the status of the is_default on the create/update request obj = net_obj.ExternalNetwork.get_object(context, network_id=network['id']) if obj: obj.is_default = is_default obj.update() _do_ensure_external_network_default_value_callback( _context, _request, _orig, _network) @resource_extend.has_resource_extenders class AutoAllocatedTopologyMixin(object): def __new__(cls, *args, **kwargs): # NOTE(kevinbenton): we subscribe on object construction because # the tests blow away the callback manager for each run new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args, **kwargs) registry.subscribe(_ensure_external_network_default_value_callback, resources.NETWORK, events.PRECOMMIT_UPDATE) registry.subscribe(_ensure_external_network_default_value_callback, resources.NETWORK, events.PRECOMMIT_CREATE) return new # TODO(armax): if a tenant modifies auto allocated resources under # the hood the behavior of the get_auto_allocated_topology API is # undetermined. Consider adding callbacks to deal with the following # situations: # - insert subnet -> plug router interface # - delete router -> remove the entire topology # - update subnet -> prevent operation # - update router gateway -> prevent operation # - ... @property def core_plugin(self): if not getattr(self, '_core_plugin', None): self._core_plugin = directory.get_plugin() return self._core_plugin @property def l3_plugin(self): if not getattr(self, '_l3_plugin', None): self._l3_plugin = directory.get_plugin(constants.L3) return self._l3_plugin @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_external_network_default(net_res, net_db): """Add is_default field to 'show' response.""" if net_db.external is not None: net_res[api_const.IS_DEFAULT] = net_db.external.is_default return net_res def get_auto_allocated_topology(self, context, tenant_id, fields=None): """Return tenant's network associated to auto-allocated topology. The topology will be provisioned upon return, if network is missing. """ fields = fields or [] tenant_id = self._validate(context, tenant_id) if CHECK_REQUIREMENTS in fields: # for dry-run requests, simply validates that subsequent # requests can be fulfilled based on a set of requirements # such as existence of default networks, pools, etc. return self._check_requirements(context, tenant_id) elif fields: raise n_exc.BadRequest(resource='auto_allocate', msg=_("Unrecognized field")) # Check for an existent topology network_id = self._get_auto_allocated_network(context, tenant_id) if network_id: return self._response(network_id, tenant_id, fields=fields) # See if we indeed have an external network to connect to, otherwise # we will fail fast default_external_network = self._get_default_external_network( context) # If we reach this point, then we got some work to do! network_id = self._build_topology( context, tenant_id, default_external_network) return self._response(network_id, tenant_id, fields=fields) def delete_auto_allocated_topology(self, context, tenant_id): tenant_id = self._validate(context, tenant_id) topology = self._get_auto_allocated_topology(context, tenant_id) if topology: subnets = self.core_plugin.get_subnets( context, filters={'network_id': [topology['network_id']]}) self._cleanup( context, network_id=topology['network_id'], router_id=topology['router_id'], subnets=subnets) def _build_topology(self, context, tenant_id, default_external_network): """Build the network topology and returns its network UUID.""" try: subnets = self._provision_tenant_private_network( context, tenant_id) network_id = subnets[0]['network_id'] router = self._provision_external_connectivity( context, default_external_network, subnets, tenant_id) network_id = self._save( context, tenant_id, network_id, router['id'], subnets) return network_id except exceptions.UnknownProvisioningError as e: # Clean partially provisioned topologies, and reraise the # error. If it can be retried, so be it. LOG.error("Unknown error while provisioning topology for " "tenant %(tenant_id)s. Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) self._cleanup( context, network_id=e.network_id, router_id=e.router_id, subnets=e.subnets) raise e.error def _check_requirements(self, context, tenant_id): """Raise if requirements are not met.""" self._get_default_external_network(context) try: self._get_supported_subnetpools(context) except n_exc.NotFound: raise exceptions.AutoAllocationFailure( reason=_("No default subnetpools defined")) return {'id': 'dry-run=pass', 'tenant_id': tenant_id} def _validate(self, context, tenant_id): """Validate and return the tenant to be associated to the topology.""" if tenant_id == 'None': # NOTE(HenryG): the client might be sending us astray by # passing no tenant; this is really meant to be the tenant # issuing the request, therefore let's get it from the context tenant_id = context.tenant_id if not context.is_admin and tenant_id != context.tenant_id: raise n_exc.NotAuthorized() return tenant_id def _get_auto_allocated_topology(self, context, tenant_id): """Return the auto allocated topology record if present or None.""" return auto_allocate_obj.AutoAllocatedTopology.get_object( context, project_id=tenant_id) def _get_auto_allocated_network(self, context, tenant_id): """Get the auto allocated network for the tenant.""" network = self._get_auto_allocated_topology(context, tenant_id) if network: return network['network_id'] @staticmethod def _response(network_id, tenant_id, fields=None): """Build response for auto-allocated network.""" res = { 'id': network_id, 'tenant_id': tenant_id } return db_utils.resource_fields(res, fields) def _get_default_external_network(self, context): """Get the default external network for the deployment.""" default_external_networks = net_obj.ExternalNetwork.get_objects( context, is_default=True) if not default_external_networks: LOG.error("Unable to find default external network " "for deployment, please create/assign one to " "allow auto-allocation to work correctly.") raise exceptions.AutoAllocationFailure( reason=_("No default router:external network")) if len(default_external_networks) > 1: LOG.error("Multiple external default networks detected. " "Network %s is true 'default'.", default_external_networks[0]['network_id']) return default_external_networks[0].network_id def _get_supported_subnetpools(self, context): """Return the default subnet pools available.""" default_subnet_pools = [ self.core_plugin.get_default_subnetpool( context, ver) for ver in (4, 6) ] available_pools = [ s for s in default_subnet_pools if s ] if not available_pools: LOG.error("No default pools available") raise n_exc.NotFound() return available_pools def _provision_tenant_private_network(self, context, tenant_id): """Create a tenant private network/subnets.""" network = None try: network_args = { 'name': 'auto_allocated_network', 'admin_state_up': False, 'tenant_id': tenant_id, 'shared': False } network = p_utils.create_network( self.core_plugin, context, {'network': network_args}) subnets = [] for pool in self._get_supported_subnetpools(context): subnet_args = { 'name': 'auto_allocated_subnet_v%s' % pool['ip_version'], 'network_id': network['id'], 'tenant_id': tenant_id, 'ip_version': pool['ip_version'], 'subnetpool_id': pool['id'], } subnets.append(p_utils.create_subnet( self.core_plugin, context, {'subnet': subnet_args})) return subnets except (n_exc.SubnetAllocationError, ValueError, n_exc.BadRequest, n_exc.NotFound) as e: LOG.error("Unable to auto allocate topology for tenant " "%(tenant_id)s due to missing or unmet " "requirements. Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) if network: self._cleanup(context, network['id']) raise exceptions.AutoAllocationFailure( reason=_("Unable to provide tenant private network")) except Exception as e: network_id = network['id'] if network else None raise exceptions.UnknownProvisioningError(e, network_id=network_id) def _provision_external_connectivity(self, context, default_external_network, subnets, tenant_id): """Uplink tenant subnet(s) to external network.""" router_args = { 'name': 'auto_allocated_router', l3_apidef.EXTERNAL_GW_INFO: { 'network_id': default_external_network}, 'tenant_id': tenant_id, 'admin_state_up': True } router = None attached_subnets = [] try: router = self.l3_plugin.create_router( context, {'router': router_args}) for subnet in subnets: self.l3_plugin.add_router_interface( context, router['id'], {'subnet_id': subnet['id']}) attached_subnets.append(subnet) return router except n_exc.BadRequest as e: LOG.error("Unable to auto allocate topology for tenant " "%(tenant_id)s because of router errors. " "Reason: %(reason)s", {'tenant_id': tenant_id, 'reason': e}) router_id = router['id'] if router else None self._cleanup(context, network_id=subnets[0]['network_id'], router_id=router_id, subnets=attached_subnets) raise exceptions.AutoAllocationFailure( reason=_("Unable to provide external connectivity")) except Exception as e: router_id = router['id'] if router else None raise exceptions.UnknownProvisioningError( e, network_id=subnets[0]['network_id'], router_id=router_id, subnets=subnets) def _save(self, context, tenant_id, network_id, router_id, subnets): """Save auto-allocated topology, or revert in case of DB errors.""" try: auto_allocate_obj.AutoAllocatedTopology( context, project_id=tenant_id, network_id=network_id, router_id=router_id).create() self.core_plugin.update_network( context, network_id, {'network': {'admin_state_up': True}}) except obj_exc.NeutronDbObjectDuplicateEntry: LOG.debug("Multiple auto-allocated networks detected for " "tenant %s. Attempting clean up for network %s " "and router %s.", tenant_id, network_id, router_id) self._cleanup( context, network_id=network_id, router_id=router_id, subnets=subnets) network_id = self._get_auto_allocated_network(context, tenant_id) except Exception as e: raise exceptions.UnknownProvisioningError( e, network_id=network_id, router_id=router_id, subnets=subnets) return network_id def _cleanup(self, context, network_id=None, router_id=None, subnets=None): """Clean up auto allocated resources.""" # Concurrent attempts to delete the topology may interleave and # cause some operations to fail with NotFound exceptions. Rather # than fail partially, the exceptions should be ignored and the # cleanup should proceed uninterrupted. if router_id: for subnet in subnets or []: ignore_notfound( self.l3_plugin.remove_router_interface, context, router_id, {'subnet_id': subnet['id']}) ignore_notfound(self.l3_plugin.delete_router, context, router_id) if network_id: ignore_notfound( self.core_plugin.delete_network, context, network_id) def ignore_notfound(func, *args, **kwargs): """Call the given function and pass if a `NotFound` exception is raised.""" try: return func(*args, **kwargs) except n_exc.NotFound: pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/exceptions.py0000644000175000017500000000250100000000000026025 0ustar00coreycorey00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from neutron._i18n import _ class AutoAllocationFailure(n_exc.Conflict): message = _("Deployment error: %(reason)s.") class DefaultExternalNetworkExists(n_exc.Conflict): message = _("A default external network already exists: %(net_id)s.") class UnknownProvisioningError(Exception): """To track unknown errors and partial provisioning steps.""" def __init__(self, error, network_id=None, router_id=None, subnets=None): self.error = error self.network_id = network_id self.router_id = router_id self.subnets = subnets def __str__(self): return str(self.error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/models.py0000644000175000017500000000242200000000000025131 0ustar00coreycorey00000000000000# Copyright (c) 2015-2016 Hewlett Packard Enterprise Development Company LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa class AutoAllocatedTopology(model_base.BASEV2, model_base.HasProjectPrimaryKey): __tablename__ = 'auto_allocated_topologies' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False) router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='SET NULL'), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/auto_allocate/plugin.py0000644000175000017500000000242000000000000025142 0ustar00coreycorey00000000000000# Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import auto_allocated_topology from neutron.services.auto_allocate import db class Plugin(db.AutoAllocatedTopologyMixin): _instance = None supported_extension_aliases = [auto_allocated_topology.ALIAS] __filter_validation_support = True @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def get_plugin_description(self): return "Auto Allocated Topology - aka get me a network." @classmethod def get_plugin_type(cls): return "auto-allocated-topology" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/0000755000175000017500000000000000000000000024001 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/__init__.py0000644000175000017500000000000000000000000026100 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/common/0000755000175000017500000000000000000000000025271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/common/__init__.py0000644000175000017500000000000000000000000027370 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/common/exceptions.py0000644000175000017500000000215400000000000030026 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron_lib import exceptions as n_exc class ConntrackHelperNotFound(n_exc.NotFound): message = _("Conntrack Helper %(id)s could not be found.") class ConntrackHelperNotAllowed(n_exc.BadRequest): message = _("Conntrack Helper %(helper)s is not allowed.") class InvalidProtocolForHelper(n_exc.BadRequest): message = _("Conntrack Helper %(helper)s does not support: %(protocol)s. " "Supported protocols are: %(supported_protocols)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/conntrack_helper/plugin.py0000644000175000017500000002124200000000000025652 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.api.definitions import expose_l3_conntrack_helper as exposedef from neutron_lib.api.definitions import l3 from neutron_lib.api.definitions import l3_conntrack_helper as apidef from neutron_lib.callbacks import registry from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import l3 as lib_l3_exc from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as oslo_db_exc from neutron._i18n import _ from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.handlers import resources_rpc from neutron.db import db_base_plugin_common from neutron.extensions import l3_conntrack_helper from neutron.objects import base as base_obj from neutron.objects import conntrack_helper as cth from neutron.objects import router from neutron.services.conntrack_helper.common import exceptions as cth_exc @resource_extend.has_resource_extenders @registry.has_registry_receivers class Plugin(l3_conntrack_helper.ConntrackHelperPluginBase): """Implementation of the Neutron Conntrack Helper Service Plugin. This class implements a Conntrack Helper plugin. """ required_service_plugins = [l3.ROUTER] supported_extension_aliases = [apidef.ALIAS, exposedef.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): super(Plugin, self).__init__() self.push_api = resources_rpc.ResourcesPushRpcApi() self.l3_plugin = directory.get_plugin(constants.L3) self.core_plugin = directory.get_plugin() # Option allowed_conntrack_helpers is a list of key, value pairs. # The list can contain same key (conntrack helper module) multiple # times with a different value (protocol). Merge to a dictonary # with key (conntrack helper) and values (protocols) as a list. self.constraints = collections.defaultdict(list) for x in cfg.CONF.allowed_conntrack_helpers: self.constraints[next(iter(x.keys()))].append( next(iter(x.values()))) @staticmethod @resource_extend.extends([l3.ROUTERS]) def _extend_router_dict(result_dict, db): fields = [apidef.PROTOCOL, apidef.PORT, apidef.HELPER] result_dict[apidef.COLLECTION_NAME] = [] if db.conntrack_helpers: conntack_helper_result = [] for conntack_helper in db.conntrack_helpers: cth_dict = cth.ConntrackHelper.modify_fields_from_db( conntack_helper) for key in list(cth_dict.keys()): if key not in fields: cth_dict.pop(key) conntack_helper_result.append(cth_dict) result_dict[apidef.COLLECTION_NAME] = conntack_helper_result return result_dict def get_router(self, context, router_id, fields=None): router_obj = router.Router.get_object(context, id=router_id) if not router_obj: raise lib_l3_exc.RouterNotFound(router_id=router_id) return router_obj def _find_existing_conntrack_helper(self, context, router_id, conntrack_helper): # Because the session had been flushed by NeutronDbObjectDuplicateEntry # so if we want to use the context to get another db queries, we need # to rollback first. context.session.rollback() param = {'router_id': router_id, 'protocol': conntrack_helper['protocol'], 'port': conntrack_helper['port'], 'helper': conntrack_helper['helper']} objs = cth.ConntrackHelper.get_objects(context, **param) if objs: return (objs[0], param) def _get_conntrack_helper(self, context, id): cth_obj = cth.ConntrackHelper.get_object(context, id=id) if not cth_obj: raise cth_exc.ConntrackHelperNotFound(id=id) return cth_obj def _check_conntrack_helper_constraints(self, cth_obj): if cth_obj.helper not in self.constraints: raise cth_exc.ConntrackHelperNotAllowed(helper=cth_obj.helper) elif cth_obj.protocol not in self.constraints[cth_obj.helper]: raise cth_exc.InvalidProtocolForHelper( helper=cth_obj.helper, protocol=cth_obj.protocol, supported_protocols=', '.join( self.constraints[cth_obj.helper])) @db_base_plugin_common.convert_result_to_dict def create_router_conntrack_helper(self, context, router_id, conntrack_helper): conntrack_helper = conntrack_helper.get(apidef.RESOURCE_NAME) conntrack_helper['router_id'] = router_id cth_obj = cth.ConntrackHelper(context, **conntrack_helper) self._check_conntrack_helper_constraints(cth_obj) try: with db_api.CONTEXT_WRITER.using(context): # If this get_router does not raise an exception, a router # with router_id exists. self.get_router(context, router_id) cth_obj.create() except obj_exc.NeutronDbObjectDuplicateEntry: (__, conflict_params) = self._find_existing_conntrack_helper( context, router_id, cth_obj.to_dict()) message = _("A duplicate conntrack helper entry with same " "attributes already exists, conflicting values " "are %s") % conflict_params raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) self.push_api.push(context, [cth_obj], rpc_events.CREATED) return cth_obj @db_base_plugin_common.convert_result_to_dict def update_router_conntrack_helper(self, context, id, router_id, conntrack_helper): conntrack_helper = conntrack_helper.get(apidef.RESOURCE_NAME) try: with db_api.CONTEXT_WRITER.using(context): cth_obj = self._get_conntrack_helper(context, id) cth_obj.update_fields(conntrack_helper, reset_changes=True) self._check_conntrack_helper_constraints(cth_obj) cth_obj.update() except oslo_db_exc.DBDuplicateEntry: (__, conflict_params) = self._find_existing_conntrack_helper( context, cth_obj.router_id, cth_obj.to_dict()) message = _("A duplicate conntrack helper entry with same " "attributes already exists, conflicting values " "are %s") % conflict_params raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) self.push_api.push(context, [cth_obj], rpc_events.UPDATED) return cth_obj @db_base_plugin_common.make_result_with_fields @db_base_plugin_common.convert_result_to_dict def get_router_conntrack_helper(self, context, id, router_id, fields=None): return self._get_conntrack_helper(context, id) @db_base_plugin_common.make_result_with_fields @db_base_plugin_common.convert_result_to_dict def get_router_conntrack_helpers(self, context, router_id=None, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) return cth.ConntrackHelper.get_objects(context, _pager=pager, router_id=router_id, **filters) def delete_router_conntrack_helper(self, context, id, router_id): cth_obj = self._get_conntrack_helper(context, id) with db_api.CONTEXT_WRITER.using(context): cth_obj.delete() self.push_api.push(context, [cth_obj], rpc_events.DELETED) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/0000755000175000017500000000000000000000000023007 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/__init__.py0000644000175000017500000000000000000000000025106 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/driver.py0000644000175000017500000000524300000000000024660 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import six from neutron import manager LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class ExternalDNSService(object): """Interface definition for an external dns service driver.""" def __init__(self): """Initialize external dns service driver.""" @classmethod def get_instance(cls): """Return an instance of the configured external DNS driver.""" external_dns_driver_name = cfg.CONF.external_dns_driver mgr = manager.NeutronManager LOG.debug("Loading external dns driver: %s", external_dns_driver_name) driver_class = mgr.load_class_for_provider( 'neutron.services.external_dns_drivers', external_dns_driver_name) return driver_class() @abc.abstractmethod def create_record_set(self, context, dns_domain, dns_name, records): """Create a record set in the specified zone. :param context: neutron api request context :type context: neutron_lib.context.Context :param dns_domain: the dns_domain where the record set will be created :type dns_domain: String :param dns_name: the name associated with the record set :type dns_name: String :param records: the records in the set :type records: List of Strings :raises: neutron.extensions.dns.DNSDomainNotFound neutron.extensions.dns.DuplicateRecordSet """ @abc.abstractmethod def delete_record_set(self, context, dns_domain, dns_name, records): """Delete a record set in the specified zone. :param context: neutron api request context :type context: neutron.context.Context :param dns_domain: the dns_domain from which the record set will be deleted :type dns_domain: String :param dns_name: the dns_name associated with the record set to be deleted :type dns_name: String :param records: the records in the set to be deleted :type records: List of Strings """ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/drivers/0000755000175000017500000000000000000000000024465 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/drivers/__init__.py0000644000175000017500000000000000000000000026564 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/drivers/designate/0000755000175000017500000000000000000000000026430 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/drivers/designate/__init__.py0000644000175000017500000000000000000000000030527 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/externaldns/drivers/designate/driver.py0000644000175000017500000001623100000000000030300 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from designateclient import exceptions as d_exc from designateclient.v2 import client as d_client from keystoneauth1.identity.generic import password from keystoneauth1 import loading from keystoneauth1 import token_endpoint from neutron_lib import constants from neutron_lib.exceptions import dns as dns_exc from oslo_config import cfg from neutron.conf.services import extdns_designate_driver from neutron.services.externaldns import driver IPV4_PTR_ZONE_PREFIX_MIN_SIZE = 8 IPV4_PTR_ZONE_PREFIX_MAX_SIZE = 24 IPV6_PTR_ZONE_PREFIX_MIN_SIZE = 4 IPV6_PTR_ZONE_PREFIX_MAX_SIZE = 124 _SESSION = None CONF = cfg.CONF extdns_designate_driver.register_designate_opts() def get_clients(context): global _SESSION if not _SESSION: _SESSION = loading.load_session_from_conf_options( CONF, 'designate') auth = token_endpoint.Token(CONF.designate.url, context.auth_token) client = d_client.Client(session=_SESSION, auth=auth) if CONF.designate.auth_type: admin_auth = loading.load_auth_from_conf_options( CONF, 'designate') else: admin_auth = password.Password( auth_url=CONF.designate.admin_auth_url, username=CONF.designate.admin_username, password=CONF.designate.admin_password, tenant_name=CONF.designate.admin_tenant_name, tenant_id=CONF.designate.admin_tenant_id) admin_client = d_client.Client(session=_SESSION, auth=admin_auth, endpoint_override=CONF.designate.url) return client, admin_client class Designate(driver.ExternalDNSService): """Driver for Designate.""" def __init__(self): ipv4_ptr_zone_size = CONF.designate.ipv4_ptr_zone_prefix_size ipv6_ptr_zone_size = CONF.designate.ipv6_ptr_zone_prefix_size if (ipv4_ptr_zone_size < IPV4_PTR_ZONE_PREFIX_MIN_SIZE or ipv4_ptr_zone_size > IPV4_PTR_ZONE_PREFIX_MAX_SIZE or (ipv4_ptr_zone_size % 8) != 0): raise dns_exc.InvalidPTRZoneConfiguration( parameter='ipv4_ptr_zone_size', number='8', maximum=str(IPV4_PTR_ZONE_PREFIX_MAX_SIZE), minimum=str(IPV4_PTR_ZONE_PREFIX_MIN_SIZE)) if (ipv6_ptr_zone_size < IPV6_PTR_ZONE_PREFIX_MIN_SIZE or ipv6_ptr_zone_size > IPV6_PTR_ZONE_PREFIX_MAX_SIZE or (ipv6_ptr_zone_size % 4) != 0): raise dns_exc.InvalidPTRZoneConfiguration( parameter='ipv6_ptr_zone_size', number='4', maximum=str(IPV6_PTR_ZONE_PREFIX_MAX_SIZE), minimum=str(IPV6_PTR_ZONE_PREFIX_MIN_SIZE)) def create_record_set(self, context, dns_domain, dns_name, records): designate, designate_admin = get_clients(context) v4, v6 = self._classify_records(records) try: if v4: designate.recordsets.create(dns_domain, dns_name, 'A', v4) if v6: designate.recordsets.create(dns_domain, dns_name, 'AAAA', v6) except d_exc.NotFound: raise dns_exc.DNSDomainNotFound(dns_domain=dns_domain) except d_exc.Conflict: raise dns_exc.DuplicateRecordSet(dns_name=dns_name) if not CONF.designate.allow_reverse_dns_lookup: return # Set up the PTR records recordset_name = '%s.%s' % (dns_name, dns_domain) ptr_zone_email = 'admin@%s' % dns_domain[:-1] if CONF.designate.ptr_zone_email: ptr_zone_email = CONF.designate.ptr_zone_email for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name) in_addr_zone_description = ( 'An %s zone for reverse lookups set up by Neutron.' % '.'.join(in_addr_name.split('.')[-3:])) try: # Since we don't delete in-addr zones, assume it already # exists. If it doesn't, create it designate_admin.recordsets.create(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name]) except d_exc.NotFound: designate_admin.zones.create( in_addr_zone_name, email=ptr_zone_email, description=in_addr_zone_description) designate_admin.recordsets.create(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name]) def _classify_records(self, records): v4 = [] v6 = [] for record in records: if netaddr.IPAddress(record).version == 4: v4.append(record) else: v6.append(record) return v4, v6 def _get_in_addr_zone_name(self, in_addr_name): units = self._get_bytes_or_nybles_to_skip(in_addr_name) return '.'.join(in_addr_name.split('.')[units:]) def _get_bytes_or_nybles_to_skip(self, in_addr_name): if 'in-addr.arpa' in in_addr_name: return int((constants.IPv4_BITS - CONF.designate.ipv4_ptr_zone_prefix_size) / 8) return int((constants.IPv6_BITS - CONF.designate.ipv6_ptr_zone_prefix_size) / 4) def delete_record_set(self, context, dns_domain, dns_name, records): designate, designate_admin = get_clients(context) ids_to_delete = self._get_ids_ips_to_delete( dns_domain, '%s.%s' % (dns_name, dns_domain), records, designate) for _id in ids_to_delete: designate.recordsets.delete(dns_domain, _id) if not CONF.designate.allow_reverse_dns_lookup: return for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name) designate_admin.recordsets.delete(in_addr_zone_name, in_addr_name) def _get_ids_ips_to_delete(self, dns_domain, name, records, designate_client): try: recordsets = designate_client.recordsets.list( dns_domain, criterion={"name": "%s" % name}) except d_exc.NotFound: raise dns_exc.DNSDomainNotFound(dns_domain=dns_domain) ids = [rec['id'] for rec in recordsets] ips = [str(ip) for rec in recordsets for ip in rec['records']] if set(ips) != set(records): raise dns_exc.DuplicateRecordSet(dns_name=name) return ids ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.359045 neutron-16.0.0.0b2.dev214/neutron/services/flavors/0000755000175000017500000000000000000000000022134 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/flavors/__init__.py0000644000175000017500000000000000000000000024233 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/flavors/flavors_plugin.py0000644000175000017500000000250300000000000025540 0ustar00coreycorey00000000000000# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import flavors from neutron_lib.api.definitions import servicetype from neutron_lib.plugins import constants from neutron_lib.services import base as service_base from neutron.db import flavors_db class FlavorsPlugin(service_base.ServicePluginBase, flavors_db.FlavorsDbMixin): """Implements Neutron Flavors Service plugin.""" supported_extension_aliases = [flavors.ALIAS, servicetype.ALIAS] __filter_validation_support = True @classmethod def get_plugin_type(cls): return constants.FLAVORS def get_plugin_description(self): return "Neutron Flavors and Service Profiles manager plugin" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/0000755000175000017500000000000000000000000022376 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/README0000644000175000017500000000242600000000000023262 0ustar00coreycorey00000000000000This service plugin implements the L3 routing functionality (resources router and floatingip) that in earlier releases before Havana was provided by core plugins (openvswitch, linuxbridge, ... etc). Core plugins can now choose not to implement L3 routing functionality and instead delegate that to the L3 routing service plugin. The required changes to a core plugin are in that case: - Do not inherit 'l3_db.L3_NAT_db_mixin' (or its descendants like extraroute) anymore. - Remove "router" from 'supported_extension_aliases'. - Modify any 'self' references to members in L3_NAT_db_mixin to instead use 'directory.get_plugin(constants.L3)' For example, self.prevent_l3_port_deletion(...) becomes something like plugin = directory.get_plugin(constants.L3) if plugin: plugin.prevent_l3_port_deletion(...) If the core plugin has relied on the L3Agent the following must also be changed: - Do not inherit 'l3_rpc_base.L3RpcCallbackMixin' in any '*RpcCallbacks' class. - Do not be a consumer of the topics.L3PLUGIN topic for RPC. To use the L3 routing service plugin, add 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' to 'service_plugins' in '/etc/neutron/neutron.conf'. That is, service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/__init__.py0000644000175000017500000000000000000000000024475 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/l3_router_plugin.py0000644000175000017500000001755700000000000026263 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import topics from neutron_lib.api.definitions import dvr from neutron_lib.api.definitions import extraroute from neutron_lib.api.definitions import extraroute_atomic from neutron_lib.api.definitions import fip_port_details from neutron_lib.api.definitions import floatingip_pools from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_gw_mode from neutron_lib.api.definitions import l3_ext_ha_mode from neutron_lib.api.definitions import l3_flavors from neutron_lib.api.definitions import l3_port_ip_change_not_allowed from neutron_lib.api.definitions import qos_gateway_ip from neutron_lib.api.definitions import router_availability_zone from neutron_lib import constants as n_const from neutron_lib.db import resource_extend from neutron_lib.plugins import constants as plugin_constants from neutron_lib import rpc as n_rpc from neutron_lib.services import base as service_base from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.db import dns_db from neutron.db import extraroute_db from neutron.db import l3_dvr_ha_scheduler_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_fip_pools_db from neutron.db import l3_fip_port_details from neutron.db import l3_fip_qos from neutron.db import l3_gateway_ip_qos from neutron.db import l3_hamode_db from neutron.db import l3_hascheduler_db from neutron.db.models import l3 as l3_models from neutron.extensions import _admin_state_down_before_update_lib from neutron.quota import resource_registry from neutron import service from neutron.services.l3_router.service_providers import driver_controller LOG = logging.getLogger(__name__) def disable_dvr_extension_by_config(aliases): if not cfg.CONF.enable_dvr: LOG.info('Disabled DVR extension.') if 'dvr' in aliases: aliases.remove('dvr') def disable_l3_qos_extension_by_plugins(ext, aliases): qos_class = 'neutron.services.qos.qos_plugin.QoSPlugin' if all(p not in cfg.CONF.service_plugins for p in ['qos', qos_class]): if ext in aliases: aliases.remove(ext) @resource_extend.has_resource_extenders class L3RouterPlugin(service_base.ServicePluginBase, extraroute_db.ExtraRoute_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_gateway_ip_qos.L3_gw_ip_qos_db_mixin, l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin, dns_db.DNSDbMixin, l3_fip_qos.FloatingQoSDbMixin, l3_fip_port_details.Fip_port_details_db_mixin, l3_fip_pools_db.FloatingIPPoolsMixin): """Implementation of the Neutron L3 Router Service Plugin. This class implements a L3 service plugin that provides router and floatingip resources and manages associated request/response. All DB related work is implemented in classes l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin. """ _supported_extension_aliases = [dvr.ALIAS, l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS, extraroute.ALIAS, extraroute_atomic.ALIAS, n_const.L3_AGENT_SCHEDULER_EXT_ALIAS, l3_ext_ha_mode.ALIAS, router_availability_zone.ALIAS, l3_flavors.ALIAS, "qos-fip", fip_port_details.ALIAS, floatingip_pools.ALIAS, qos_gateway_ip.ALIAS, l3_port_ip_change_not_allowed.ALIAS, _admin_state_down_before_update_lib.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True IP_UPDATE_NOT_ALLOWED_LIST = [ n_const.DEVICE_OWNER_ROUTER_INTF, n_const.DEVICE_OWNER_ROUTER_HA_INTF, n_const.DEVICE_OWNER_HA_REPLICATED_INT, n_const.DEVICE_OWNER_ROUTER_SNAT, n_const.DEVICE_OWNER_DVR_INTERFACE] @resource_registry.tracked_resources(router=l3_models.Router, floatingip=l3_models.FloatingIP) def __init__(self): self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.add_periodic_l3_agent_status_check() super(L3RouterPlugin, self).__init__() if 'dvr' in self.supported_extension_aliases: l3_dvrscheduler_db.subscribe() if 'l3-ha' in self.supported_extension_aliases: l3_hascheduler_db.subscribe() self.agent_notifiers.update( {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) rpc_worker = service.RpcWorker([self], worker_process_count=0) self.add_worker(rpc_worker) self.l3_driver_controller = driver_controller.DriverController(self) @property def supported_extension_aliases(self): if not hasattr(self, '_aliases'): aliases = self._supported_extension_aliases[:] disable_dvr_extension_by_config(aliases) disable_l3_qos_extension_by_plugins('qos-fip', aliases) disable_l3_qos_extension_by_plugins('qos-gateway-ip', aliases) self._aliases = aliases return self._aliases @log_helpers.log_method_call def start_rpc_listeners(self): # RPC support self.topic = topics.L3PLUGIN self.conn = n_rpc.Connection() self.endpoints = [l3_rpc.L3RpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads() @classmethod def get_plugin_type(cls): return plugin_constants.L3 def get_plugin_description(self): """returns string description of the plugin.""" return ("L3 Router Service Plugin for basic L3 forwarding" " between (L2) Neutron networks and access to external" " networks via a NAT gateway.") def router_supports_scheduling(self, context, router_id): return self.l3_driver_controller.uses_scheduler(context, router_id) def create_floatingip(self, context, floatingip): """Create floating IP. :param context: Neutron request context :param floatingip: data for the floating IP being created :returns: A floating IP object on success As the l3 router plugin asynchronously creates floating IPs leveraging the l3 agent, the initial status for the floating IP object will be DOWN. """ return super(L3RouterPlugin, self).create_floatingip( context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN) @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def add_flavor_id(router_res, router_db): router_res['flavor_id'] = router_db['flavor_id'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/0000755000175000017500000000000000000000000026133 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/__init__.py0000644000175000017500000000000000000000000030232 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/base.py0000644000175000017500000000466200000000000027427 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ class _FeatureFlag(object): def is_compatible(self, value): if value == self.requires: return True if value and self.supports: return True return False def __init__(self, supports, requires): self.supports = supports self.requires = requires if requires and not supports: raise RuntimeError(_("A driver can't require a feature and not " "support it.")) UNSUPPORTED = _FeatureFlag(supports=False, requires=False) OPTIONAL = _FeatureFlag(supports=True, requires=False) MANDATORY = _FeatureFlag(supports=True, requires=True) class L3ServiceProvider(object): """Base class for L3 service provider drivers. On __init__ this will be given a handle to the l3 plugin. It is then the responsibility of the driver to subscribe to the events it is interested in (e.g. router_create, router_update, router_delete, etc). The 'ha' and 'distributed' attributes below are used to determine if a router request with the 'ha' or 'distributed' attribute can be supported by this particular driver. These attributes must be present. The 'use_integrated_agent_scheduler' flag indicates whether or not routers which belong to the driver should be automatically scheduled using the L3 agent scheduler integrated into Neutron. """ ha_support = UNSUPPORTED distributed_support = UNSUPPORTED use_integrated_agent_scheduler = False def __init__(self, l3plugin): self.l3plugin = l3plugin def owns_router(self, context, router_id): """Returns True if router is associated with driver, else False.""" if not router_id: return False return self.l3plugin.l3_driver_controller.get_provider_for_router( context, router_id) == self ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/driver_controller.py0000644000175000017500000003376400000000000032260 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import priority_group from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_const from neutron_lib.db import api as db_api from neutron_lib import exceptions as lib_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from neutron._i18n import _ from neutron.db import servicetype_db as st_db from neutron.services import provider_configuration from neutron.services import service_base LOG = logging.getLogger(__name__) @registry.has_registry_receivers class DriverController(object): """Driver controller for the L3 service plugin. This component is responsible for dispatching router requests to L3 service providers and for performing the bookkeeping about which driver is associated with a given router. This is not intended to be accessed by the drivers or the l3 plugin. All of the methods are marked as private to reflect this. """ def __init__(self, l3_plugin): self.l3_plugin = l3_plugin self._stm = st_db.ServiceTypeManager.get_instance() self._stm.add_provider_configuration( plugin_constants.L3, _LegacyPlusProviderConfiguration()) self._load_drivers() def _load_drivers(self): self.drivers, self.default_provider = ( service_base.load_drivers(plugin_constants.L3, self.l3_plugin)) # store the provider name on each driver to make finding inverse easy for provider_name, driver in self.drivers.items(): setattr(driver, 'name', provider_name) @property def _flavor_plugin(self): if not hasattr(self, '_flavor_plugin_ref'): self._flavor_plugin_ref = directory.get_plugin( plugin_constants.FLAVORS) return self._flavor_plugin_ref @registry.receives(resources.ROUTER, [events.BEFORE_CREATE], priority_group.PRIORITY_ROUTER_CONTROLLER) def _check_router_request(self, resource, event, trigger, context, router, **kwargs): """Validates that API request is sane (flags compat with flavor).""" drv = self._get_provider_for_create(context, router) _ensure_driver_supports_request(drv, router) @registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE], priority_group.PRIORITY_ROUTER_CONTROLLER) def _set_router_provider(self, resource, event, trigger, context, router, router_db, **kwargs): """Associates a router with a service provider. Association is done by flavor_id if it's specified, otherwise it will fallback to determining which loaded driver supports the ha/distributed attributes associated with the router. """ if _flavor_specified(router): router_db.flavor_id = router['flavor_id'] drv = self._get_provider_for_create(context, router) self._stm.add_resource_association(context, plugin_constants.L3, drv.name, router['id']) registry.publish( resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, trigger, payload=events.DBEventPayload( context, request_body=router, states=(router_db,), metadata={'old_driver': None, 'new_driver': drv}, resource_id=router_db.get('id'))) @registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE], priority_group.PRIORITY_ROUTER_CONTROLLER) def _clear_router_provider(self, resource, event, trigger, context, router_id, **kwargs): """Remove the association between a router and a service provider.""" drv = self.get_provider_for_router(context, router_id) registry.publish( resources.ROUTER_CONTROLLER, events.PRECOMMIT_DELETE_ASSOCIATIONS, trigger, payload=events.DBEventPayload( context, metadata={'old_driver': drv, 'new_driver': None}, resource_id=router_id)) self._stm.del_resource_associations(context, [router_id]) @registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE], priority_group.PRIORITY_ROUTER_CONTROLLER) def _update_router_provider(self, resource, event, trigger, payload=None): """Handle transition between providers. The provider can currently be changed only by the caller updating 'ha' and/or 'distributed' attributes. If we allow updates of flavor_id directly in the future those requests will also land here. """ drv = self.get_provider_for_router(payload.context, payload.resource_id) new_drv = None if _flavor_specified(payload.request_body): if (payload.request_body['flavor_id'] != payload.states[0]['flavor_id']): # TODO(kevinbenton): this is currently disallowed by the API # so we shouldn't hit it but this is a placeholder to add # support later. raise NotImplementedError() # the following is to support updating the 'ha' and 'distributed' # attributes via the API. try: _ensure_driver_supports_request(drv, payload.request_body) except lib_exc.InvalidInput: # the current driver does not support this request, we need to # migrate to a new provider. populate the distributed and ha # flags from the previous state if not in the update so we can # determine the target provider appropriately. # NOTE(kevinbenton): if the router is associated with a flavor # we bail because changing the provider without changing # the flavor will make things inconsistent. We can probably # update the flavor automatically in the future. if payload.states[0]['flavor_id']: raise lib_exc.InvalidInput(error_message=_( "Changing the 'ha' and 'distributed' attributes on a " "router associated with a flavor is not supported")) if 'distributed' not in payload.request_body: payload.request_body['distributed'] = (payload.states[0] ['distributed']) if 'ha' not in payload.request_body: payload.request_body['ha'] = payload.states[0]['ha'] LOG.debug("Get a provider driver handle based on the ha flag: " "%(ha_flag)s and distributed flag: %(distributed_flag)s", {'ha_flag': payload.request_body['ha'], 'distributed_flag': payload.request_body['distributed']}) new_drv = self._attrs_to_driver(payload.request_body) if new_drv: LOG.debug("Router %(id)s migrating from %(old)s provider to " "%(new)s provider.", {'id': payload.resource_id, 'old': drv, 'new': new_drv}) _ensure_driver_supports_request(new_drv, payload.request_body) # TODO(kevinbenton): notify old driver explicitly of driver change with db_api.CONTEXT_WRITER.using(payload.context): registry.publish( resources.ROUTER_CONTROLLER, events.PRECOMMIT_DELETE_ASSOCIATIONS, trigger, payload=payload) self._stm.del_resource_associations( payload.context, [payload.resource_id]) self._stm.add_resource_association( payload.context, plugin_constants.L3, new_drv.name, payload.resource_id, expire_session=False) registry.publish( resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, trigger, payload=payload) def get_provider_for_router(self, context, router_id): """Return the provider driver handle for a router id.""" driver_name = self._stm.get_provider_names_by_resource_ids( context, [router_id]).get(router_id) if not driver_name: # this is an old router that hasn't been mapped to a provider # yet so we do this now router = self.l3_plugin.get_router(context, router_id) driver = self._attrs_to_driver(router) driver_name = driver.name with context.session.begin(subtransactions=True): self._stm.add_resource_association( context, plugin_constants.L3, driver_name, router_id) registry.publish( resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, self, payload=events.DBEventPayload( context, states=(router,), metadata={'old_driver': None, 'new_driver': driver}, resource_id=router_id)) return self.drivers[driver_name] def _get_provider_for_create(self, context, router): """Get provider based on flavor or ha/distributed flags.""" if not _flavor_specified(router): return self._attrs_to_driver(router) return self._get_l3_driver_by_flavor(context, router['flavor_id']) def _get_l3_driver_by_flavor(self, context, flavor_id): """Get a provider driver handle for a given flavor_id.""" flavor = self._flavor_plugin.get_flavor(context, flavor_id) provider = self._flavor_plugin.get_flavor_next_provider( context, flavor['id'])[0] # TODO(kevinbenton): the callback framework suppresses the nice errors # these generate when they fail to lookup. carry them through driver = self.drivers[provider['provider']] return driver def _attrs_to_driver(self, router): """Get a provider driver handle based on the ha/distributed flags.""" distributed = _is_distributed( router.get('distributed', lib_const.ATTR_NOT_SPECIFIED)) ha = _is_ha(router.get('ha', lib_const.ATTR_NOT_SPECIFIED)) drivers = self.drivers.values() # make sure default is tried before the rest if defined if self.default_provider: drivers.insert(0, self.drivers[self.default_provider]) for driver in drivers: if _is_driver_compatible(distributed, ha, driver): return driver raise NotImplementedError( _("Could not find a service provider that supports " "distributed=%(d)s and ha=%(h)s") % {'d': distributed, 'h': ha} ) def uses_scheduler(self, context, router_id): """Returns True if the integrated L3 scheduler should be used.""" return (self.get_provider_for_router(context, router_id). use_integrated_agent_scheduler) class _LegacyPlusProviderConfiguration( provider_configuration.ProviderConfiguration): def __init__(self): # loads up ha, dvr, and single_node service providers automatically. # If an operator has setup explicit values that conflict with these, # the operator defined values will take priority. super(_LegacyPlusProviderConfiguration, self).__init__( svc_type=plugin_constants.L3) for name, driver in (('dvrha', 'dvrha.DvrHaDriver'), ('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'), ('single_node', 'single_node.SingleNodeDriver')): path = 'neutron.services.l3_router.service_providers.%s' % driver try: self.add_provider({'service_type': plugin_constants.L3, 'name': name, 'driver': path, 'default': False}) except lib_exc.Invalid: LOG.debug("Could not add L3 provider '%s', it may have " "already been explicitly defined.", name) def _is_driver_compatible(distributed, ha, driver): if not driver.distributed_support.is_compatible(distributed): return False if not driver.ha_support.is_compatible(ha): return False return True def _is_distributed(distributed_attr): if distributed_attr is False: return False if distributed_attr == lib_const.ATTR_NOT_SPECIFIED: return cfg.CONF.router_distributed return True def _is_ha(ha_attr): if ha_attr is False: return False if ha_attr == lib_const.ATTR_NOT_SPECIFIED: return cfg.CONF.l3_ha return True def _flavor_specified(router): return ('flavor_id' in router and router['flavor_id'] != lib_const.ATTR_NOT_SPECIFIED) def _ensure_driver_supports_request(drv, router_body): r = router_body for key, attr in (('distributed', 'distributed_support'), ('ha', 'ha_support')): flag = r.get(key) if flag not in [True, False]: continue # not specified in body if not getattr(drv, attr).is_compatible(flag): raise lib_exc.InvalidInput(error_message=( _("Provider %(name)s does not support %(key)s=%(flag)s") % dict(name=drv.name, key=key, flag=flag))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/dvr.py0000644000175000017500000000142200000000000027277 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.l3_router.service_providers import base class DvrDriver(base.L3ServiceProvider): distributed_support = base.MANDATORY use_integrated_agent_scheduler = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/dvrha.py0000644000175000017500000000157700000000000027623 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.l3_router.service_providers import base from neutron.services.l3_router.service_providers import dvr from neutron.services.l3_router.service_providers import ha class DvrHaDriver(dvr.DvrDriver, ha.HaDriver): ha_support = base.MANDATORY dvr_support = base.MANDATORY ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/ha.py0000644000175000017500000000141000000000000027071 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.l3_router.service_providers import base class HaDriver(base.L3ServiceProvider): ha_support = base.MANDATORY use_integrated_agent_scheduler = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/l3_router/service_providers/single_node.py0000644000175000017500000000144000000000000030772 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.l3_router.service_providers import base class SingleNodeDriver(base.L3ServiceProvider): """Provider for single L3 agent routers.""" use_integrated_agent_scheduler = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/0000755000175000017500000000000000000000000021733 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/__init__.py0000644000175000017500000000000000000000000024032 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/0000755000175000017500000000000000000000000023031 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/__init__.py0000644000175000017500000000000000000000000025130 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/l3/0000755000175000017500000000000000000000000023347 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/l3/__init__.py0000644000175000017500000000000000000000000025446 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/l3/base.py0000644000175000017500000000770600000000000024645 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_log import log as logging from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.conf.services import logging as log_cfg from neutron import manager LOG = logging.getLogger(__name__) log_cfg.register_log_driver_opts() class L3LoggingExtensionBase(object): """Base class for l3 logging extension like SNATLogExtension, FWaaSV2LogExtension """ SUPPORTED_RESOURCE_TYPES = [resources.LOGGING_RESOURCE] def consume_api(self, agent_api): self.agent_api = agent_api def _load_driver_cls(self, namesapce, driver_name): return manager.NeutronManager.load_class_for_provider( namesapce, driver_name) def _register_rpc_consumers(self): registry.register( self._handle_notification, resources.LOGGING_RESOURCE) self._connection = n_rpc.Connection() endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic = resources_rpc.resource_type_versioned_topic( resources.LOGGING_RESOURCE) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() def _get_router_info(self, router_id): router_info = self.agent_api.get_router_info(router_id) if router_info: return router_info LOG.debug("Router %s is not managed by this agent. " "It was possibly deleted concurrently.", router_id) @lockutils.synchronized('log') def _handle_notification(self, context, resource_type, log_resources, event_type): with self.log_driver.defer_apply(): if event_type == events.UPDATED: self._update_logging(context, log_resources) elif event_type == events.CREATED: self.log_driver.start_logging( context, log_resources=log_resources) elif event_type == events.DELETED: self.log_driver.stop_logging( context, log_resources=log_resources) def _update_logging(self, context, log_resources): enables = [] disables = [] for log_resource in log_resources: if log_resource.enabled: enables.append(log_resource) else: disables.append(log_resource) if enables: self.log_driver.start_logging(context, log_resources=enables) if disables: self.log_driver.stop_logging(context, log_resources=disables) def _process_update_router(self, context, router): router_info = self._get_router_info(router['id']) if router_info: self.log_driver.start_logging(context, router_info=router_info) @lockutils.synchronized('log-port') def add_router(self, context, data): self._process_update_router(context, data) @lockutils.synchronized('log-port') def update_router(self, context, data): self._process_update_router(context, data) def delete_router(self, context, data): self.log_driver.stop_logging(context, router_info=data) def ha_state_change(self, context, data): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/agent/log_extension.py0000644000175000017500000001132600000000000026263 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib from neutron_lib.agent import extension from neutron_lib import constants from oslo_concurrency import lockutils import six from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.conf.services import logging as log_cfg from neutron import manager from neutron.services.logapi.rpc import agent as agent_rpc log_cfg.register_log_driver_opts() LOGGING_DRIVERS_NAMESPACE = 'neutron.services.logapi.drivers' @six.add_metaclass(abc.ABCMeta) class LoggingDriver(object): """Defines abstract interface for logging driver""" # specific logging types are supported SUPPORTED_LOGGING_TYPES = None @abc.abstractmethod def initialize(self, resource_rpc, **kwargs): """Perform logging driver initialization. """ @abc.abstractmethod def start_logging(self, context, **kwargs): """Enable logging :param context: rpc context :param kwargs: log_resources data or port_id """ @abc.abstractmethod def stop_logging(self, context, **kwargs): """Disable logging :param context: rpc context :param kwargs: log_resources data or port_id """ def defer_apply_on(self): """Defer application of logging rule.""" pass def defer_apply_off(self): """Turn off deferral of rules and apply the logging rules now.""" pass @contextlib.contextmanager def defer_apply(self): """Defer apply context.""" self.defer_apply_on() try: yield finally: self.defer_apply_off() class LoggingExtension(extension.AgentExtension): SUPPORTED_RESOURCE_TYPES = [resources.LOGGING_RESOURCE] def initialize(self, connection, driver_type): """Initialize agent extension.""" self.log_driver = manager.NeutronManager.load_class_for_provider( LOGGING_DRIVERS_NAMESPACE, driver_type)(self.agent_api) self.resource_rpc = agent_rpc.LoggingApiStub() self._register_rpc_consumers(connection) self.log_driver.initialize(self.resource_rpc) def consume_api(self, agent_api): self.agent_api = agent_api def _register_rpc_consumers(self, connection): endpoints = [resources_rpc.ResourcesPushRpcCallback()] for resource_type in self.SUPPORTED_RESOURCE_TYPES: registry.register(self._handle_notification, resource_type) topic = resources_rpc.resource_type_versioned_topic(resource_type) connection.create_consumer(topic, endpoints, fanout=True) @lockutils.synchronized('log-port') def _handle_notification(self, context, resource_type, log_resources, event_type): with self.log_driver.defer_apply(): if event_type == events.UPDATED: self._update_logging(context, log_resources) elif event_type == events.CREATED: self.log_driver.start_logging( context, log_resources=log_resources) elif event_type == events.DELETED: self.log_driver.stop_logging( context, log_resources=log_resources) @lockutils.synchronized('log-port') def handle_port(self, context, port): if port['device_owner'].startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX): self.log_driver.start_logging(context, port_id=port['port_id']) def delete_port(self, context, port): self.log_driver.stop_logging(context, port_id=port['port_id']) def _update_logging(self, context, log_resources): enables = [] disables = [] for log_resource in log_resources: if log_resource.enabled: enables.append(log_resource) else: disables.append(log_resource) if enables: self.log_driver.start_logging(context, log_resources=enables) if disables: self.log_driver.stop_logging(context, log_resources=disables) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3630452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/0000755000175000017500000000000000000000000023223 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/__init__.py0000644000175000017500000000000000000000000025322 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/db_api.py0000644000175000017500000002237400000000000025023 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.services.logapi import constants from oslo_log import log as logging from sqlalchemy.orm import exc as orm_exc from neutron.db.models import securitygroup as sg_db from neutron.objects.logapi import logging_resource as log_object from neutron.objects import ports as port_objects from neutron.objects import securitygroup as sg_object from neutron.services.logapi.common import validators LOG = logging.getLogger(__name__) def _get_ports_attached_to_sg(context, sg_id): """Return a list of ports attached to a security group""" with db_api.CONTEXT_READER.using(context): ports = context.session.query( sg_db.SecurityGroupPortBinding.port_id).filter( sg_db.SecurityGroupPortBinding.security_group_id == sg_id).all() return [port for (port,) in ports] def _get_ports_filter_in_tenant(context, tenant_id): """Return a list of ports filter under a tenant""" try: sg_id = sg_db.SecurityGroupPortBinding.security_group_id with db_api.CONTEXT_READER.using(context): ports = context.session.query( sg_db.SecurityGroupPortBinding.port_id).join( sg_db.SecurityGroup, sg_db.SecurityGroup.id == sg_id).filter( sg_db.SecurityGroup.tenant_id == tenant_id).all() return list({port for (port,) in ports}) except orm_exc.NoResultFound: return [] def _get_sgs_attached_to_port(context, port_id): """Return a list of security groups are associated to a port""" with db_api.CONTEXT_READER.using(context): sg_ids = context.session.query( sg_db.SecurityGroupPortBinding.security_group_id).filter( sg_db.SecurityGroupPortBinding.port_id == port_id).all() return [sg_id for (sg_id, ) in sg_ids] def _get_ports_being_logged(context, sg_log): """Return a list of ports being logged for a log_resource""" target_id = sg_log['target_id'] resource_id = sg_log['resource_id'] # if 'target_id' (port_id) is specified in a log_resource if target_id is not None: port_ids = [target_id] # if 'resource_id' (sg_id) is specified in a log_resource elif resource_id is not None: port_ids = _get_ports_attached_to_sg(context, resource_id) # both 'resource_id' and 'target_id' aren't specified in a log_resource else: port_ids = _get_ports_filter_in_tenant(context, sg_log['project_id']) # list of validated ports's being logged validated_port_ids = [] ports = port_objects.Port.get_objects(context, id=port_ids) for port in ports: if port.status != const.PORT_STATUS_ACTIVE: continue if validators.validate_log_type_for_port('security_group', port): validated_port_ids.append(port.id) else: msg = ("Logging type %(log_type)s is not supported on " "port %(port_id)s." % {'log_type': 'security_group', 'port_id': port.id}) LOG.warning(msg) return validated_port_ids def _get_sg_ids_log_for_port(context, sg_log, port_id): """Return a list of security group ids being logged for a port""" sg_ids = _get_sgs_attached_to_port(context, port_id) resource_id = sg_log['resource_id'] # if resource_id is not specified if not resource_id: return sg_ids # if resource_id is specified and belong a set of sgs are # associated to port if resource_id in sg_ids: return [resource_id] return [] def _create_sg_rule_dict(rule_in_db): """Return a dict of a security group rule""" direction = rule_in_db['direction'] rule_dict = { 'direction': direction, 'ethertype': rule_in_db['ethertype']} rule_dict.update({ key: rule_in_db[key] for key in ('protocol', 'port_range_min', 'port_range_max', 'remote_group_id') if rule_in_db[key] is not None}) remote_ip_prefix = rule_in_db['remote_ip_prefix'] if remote_ip_prefix is not None: direction_ip_prefix = constants.DIRECTION_IP_PREFIX[direction] rule_dict[direction_ip_prefix] = remote_ip_prefix rule_dict['security_group_id'] = rule_in_db['security_group_id'] return rule_dict def _get_sg_rules(context, sg_log, port_id): """Return a list of sg_rules log for a port being logged""" sg_ids = _get_sg_ids_log_for_port(context, sg_log, port_id) if not sg_ids: return [] filters = {'security_group_id': sg_ids} rules_in_db = sg_object.SecurityGroupRule.get_objects(context, **filters) return [_create_sg_rule_dict(rule_in_db) for rule_in_db in rules_in_db] def _get_port_log_dict(context, port_id, sg_log): return { 'port_id': port_id, 'security_group_rules': _get_sg_rules(context, sg_log, port_id) } def _make_log_dict(context, sg_log, port_ids_log): return { 'id': sg_log['id'], 'ports_log': [_get_port_log_dict(context, port_id, sg_log) for port_id in port_ids_log], 'event': sg_log['event'], 'project_id': sg_log['project_id'] } def get_logs_bound_port(context, port_id): """Return a list of log_resources bound to a port""" port = port_objects.Port.get_object(context, id=port_id) project_id = port['project_id'] logs = log_object.Log.get_objects(context, project_id=project_id, resource_type=constants.SECURITY_GROUP, enabled=True) is_bound = lambda log: (log.resource_id in port.security_group_ids or log.target_id == port.id or (not log.target_id and not log.resource_id)) return [log for log in logs if is_bound(log)] def get_logs_bound_sg(context, sg_id): """Return a list of log_resources bound to a security group""" project_id = context.tenant_id log_objs = log_object.Log.get_objects( context, project_id=project_id, resource_type=constants.SECURITY_GROUP, enabled=True) log_resources = [] for log_obj in log_objs: if log_obj.resource_id == sg_id: log_resources.append(log_obj) elif log_obj.target_id: port = port_objects.Port.get_object( context, id=log_obj.target_id) if sg_id in port.security_group_ids: log_resources.append(log_obj) elif not log_obj.resource_id and not log_obj.target_id: log_resources.append(log_obj) return log_resources def get_sg_log_info_for_port(context, port_id): """Return a list of security groups log info for a port This method provides a list of security groups log info for a port. The list has format as below: [ {'id': xxx, 'ports_log': [{'port_id': u'xxx', 'security_group_rules': [{ 'direction': u'egress', 'ethertype': u'IPv6', 'security_group_id': u'xxx'}, {...}] }] 'event': u'ALL', 'project_id': u'xxx' }, ... ] :param context: current running context information :param port_id: port ID which needed to get security groups log info """ sg_logs = get_logs_bound_port(context, port_id) return [_make_log_dict(context, sg_log, [port_id]) for sg_log in sg_logs] def get_sg_log_info_for_log_resources(context, log_resources): """Return a list of security groups log info for list of log_resources This method provides a list of security groups log info for list of log_resources. The list has format as below: [ {'id': xxx, 'ports_log': [{'port_id': u'xxx', 'security_group_rules': [{ 'direction': u'egress', 'ethertype': u'IPv6', 'security_group_id': u'xxx'}, {...}] }, ...] 'event': u'ALL', 'project_id': u'xxx' }, ... ] :param context: current running context information :param log_resources: list of log_resources, which needed to get security groups log info """ logs_info = [] for sg_log in log_resources: port_ids = _get_ports_being_logged(context, sg_log) logs_info.append(_make_log_dict(context, sg_log, port_ids)) return logs_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/exceptions.py0000644000175000017500000000473000000000000025762 0ustar00coreycorey00000000000000# Copyright 2017 Fujitsu Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron_lib import exceptions as n_exc class LogResourceNotFound(n_exc.NotFound): message = _("Log resource %(log_id)s could not be found.") class InvalidLogResourceType(n_exc.InvalidInput): message = _("Invalid log resource_type: %(resource_type)s.") class LoggingTypeNotSupported(n_exc.Conflict): message = _("Logging type %(log_type)s is not supported on " "port %(port_id)s.") class TargetResourceNotFound(n_exc.NotFound): message = _("Target resource %(target_id)s could not be found.") class ResourceNotFound(n_exc.NotFound): message = _("Resource %(resource_id)s could not be found.") class InvalidResourceConstraint(n_exc.InvalidInput): message = _("Invalid resource constraint between resource " "(%(resource)s %(resource_id)s) and target resource " "(%(target_resource)s %(target_id)s).") class LogapiDriverException(n_exc.NeutronException): """A log api driver Exception""" message = _("Driver exception: %(exception_msg)s") class CookieNotFound(n_exc.NotFound): message = _("Cookie %(cookie_id)s could not be found.") class ValidatedMethodNotFound(n_exc.NeutronException): """A validated method not found Exception""" message = _('Validated method for %(resource_type)s log ' 'could not be found.') class ResourceIdNotSpecified(n_exc.InvalidInput): message = _('resource_id should be specified for %(resource_type)s.') class RouterNotEnabledSnat(n_exc.NeutronException): message = _('SNAT is not enabled for router %(resource_id)s.') class EventsDisabled(n_exc.InvalidInput): message = _('List of events %(events)s were disabled for ' '%(resource_type)s.') class RouterGatewayNotSet(n_exc.NeutronException): message = _('Router gateway is not set for router %(resource_id)s.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/sg_callback.py0000644000175000017500000000251500000000000026025 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.services.logapi import constants as log_const from neutron.services.logapi.common import db_api from neutron.services.logapi.drivers import manager class SecurityGroupRuleCallBack(manager.ResourceCallBackBase): def handle_event(self, resource, event, trigger, **kwargs): context = kwargs.get("context") sg_rule = kwargs.get('security_group_rule') if sg_rule: sg_id = sg_rule.get('security_group_id') else: sg_id = kwargs.get('security_group_id') log_resources = db_api.get_logs_bound_sg(context, sg_id) if log_resources: self.resource_push_api( log_const.RESOURCE_UPDATE, context, log_resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/sg_validate.py0000644000175000017500000000613600000000000026065 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.services.logapi import constants as log_const from oslo_log import log as logging from sqlalchemy.orm import exc as orm_exc from neutron.db import _utils as db_utils from neutron.db.models import securitygroup as sg_db from neutron.objects import ports from neutron.objects import securitygroup as sg_object from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import validators LOG = logging.getLogger(__name__) def _check_port_bound_sg(context, sg_id, port_id): try: db_utils.model_query(context, sg_db.SecurityGroupPortBinding)\ .filter_by(security_group_id=sg_id, port_id=port_id).one() except orm_exc.NoResultFound: raise log_exc.InvalidResourceConstraint( resource=log_const.SECURITY_GROUP, resource_id=sg_id, target_resource=log_const.PORT, target_id=port_id ) def _check_sg_exists(context, sg_id): if sg_object.SecurityGroup.count(context, id=sg_id) < 1: raise log_exc.ResourceNotFound(resource_id=sg_id) def _get_port(context, port_id): port = ports.Port.get_object(context, id=port_id) if not port: raise log_exc.TargetResourceNotFound(target_id=port_id) return port @validators.ResourceValidateRequest.register(log_const.SECURITY_GROUP) def validate_security_group_request(context, log_data): """Validate a log request This method validates log request is satisfied or not. A ResourceNotFound will be raised if resource_id in log_data not exists or a TargetResourceNotFound will be raised if target_id in log_data not exists. This method will also raise a LoggingTypeNotSupported, if there is no log_driver supporting for resource_type in log_data. In addition, if log_data specify both resource_id and target_id. A InvalidResourceConstraint will be raised if there is no constraint between resource_id and target_id. """ resource_id = log_data.get('resource_id') target_id = log_data.get('target_id') if resource_id: _check_sg_exists(context, resource_id) if target_id: port = _get_port(context, target_id) if not validators.validate_log_type_for_port( log_const.SECURITY_GROUP, port): raise log_exc.LoggingTypeNotSupported( log_type=log_const.SECURITY_GROUP, port_id=target_id) if resource_id and target_id: _check_port_bound_sg(context, resource_id, target_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/snat_validate.py0000644000175000017500000000447400000000000026424 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.services.logapi import constants as log_const from oslo_log import log as logging from neutron.objects import router from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import validators LOG = logging.getLogger(__name__) EVENTS_DISABLE = [log_const.DROP_EVENT, log_const.ACCEPT_EVENT] def _get_router(context, router_id): router_obj = router.Router.get_object(context, id=router_id) if not router_obj: raise log_exc.ResourceNotFound(resource_id=router_id) return router_obj @validators.ResourceValidateRequest.register(log_const.SNAT) def validate_snat_request(context, log_data): """Validate the incoming SNAT log request This method validates whether SNAT log request is satisfied or not. A ResourceNotFound will be raised if resource_id in log_data does not belong to any Router object. This method will also raise a RouterNotEnabledSnat exception in the case of a indicated router does not enable SNAT feature. """ resource_id = log_data.get('resource_id') event = log_data.get('event') if not resource_id: raise log_exc.ResourceIdNotSpecified(resource_type=log_const.SNAT) if event in EVENTS_DISABLE: raise log_exc.EventsDisabled(events=EVENTS_DISABLE, resource_type=log_const.SNAT) router_obj = _get_router(context, resource_id) # Check whether SNAT is enabled or not if not router_obj.enable_snat: raise log_exc.RouterNotEnabledSnat(resource_id=resource_id) # Check whether router gateway is set or not. if not router_obj.gw_port_id: raise log_exc.RouterGatewayNotSet(resource_id=resource_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/common/validators.py0000644000175000017500000001123500000000000025747 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.plugins import utils from oslo_log import log as logging from neutron.services.logapi.common import exceptions as log_exc LOG = logging.getLogger(__name__) SKIPPED_VIF_TYPES = [ portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED, ] def _validate_vnic_type(driver, vnic_type, port_id): if driver.is_vnic_compatible(vnic_type): return True LOG.debug("vnic_type %(vnic_type)s of port %(port_id)s " "is not compatible with logging driver %(driver)s", {'vnic_type': vnic_type, 'port_id': port_id, 'driver': driver.name}) return False def _validate_vif_type(driver, vif_type, port_id): if driver.is_vif_type_compatible(vif_type): return True LOG.debug("vif_type %(vif_type)s of port %(port_id)s " "is not compatible with logging driver %(driver)s", {'vif_type': vif_type, 'port_id': port_id, 'driver': driver.name}) return False def validate_log_type_for_port(log_type, port): """Validate a specific logging type on a specific port This method checks whether or not existing a log_driver which supports for the logging type on the port. :param log_type: a logging type (e.g security_group) :param port: a port object """ log_plugin = directory.get_plugin(alias=plugin_const.LOG_API) drivers = log_plugin.driver_manager.drivers port_binding = utils.get_port_binding_by_status_and_host( port.bindings, constants.ACTIVE, raise_if_not_found=True, port_id=port['id']) for driver in drivers: vif_type = port_binding.vif_type if vif_type not in SKIPPED_VIF_TYPES: if not _validate_vif_type(driver, vif_type, port['id']): continue else: vnic_type = port_binding.vnic_type if not _validate_vnic_type(driver, vnic_type, port['id']): continue if driver.is_logging_type_supported(log_type): return True return False class ResourceValidateRequest(object): _instance = None def __init__(self): self.validate_methods = {} @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance @property def validate_methods_map(self): return self.validate_methods def validate_request(self, context, log_data): """Validate request This method will get validated method according to resource_type. An InvalidLogResourceType exception will be raised if there is no logging driver that supports resource_type as logging resource. In addition, a ValidatedMethodNotFound exception will be raised if a validate method was not registered for resource_type. """ resource_type = log_data.get('resource_type') log_plugin = directory.get_plugin(alias=plugin_const.LOG_API) supported_logging_types = log_plugin.supported_logging_types if resource_type in supported_logging_types: method = self.get_validated_method(resource_type) method(context, log_data) else: raise log_exc.InvalidLogResourceType(resource_type=resource_type) def get_validated_method(self, resource_type): """Get the validated method for resource_type""" method = self.validate_methods[resource_type] if not method: raise log_exc.ValidatedMethodNotFound(resource_type=resource_type) return method @classmethod def register(cls, resource_type): """This is intended to be used as a decorator to register a validated method for resource_type. """ def func_wrap(func): cls.get_instance().validate_methods[resource_type] = func return func return func_wrap ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/0000755000175000017500000000000000000000000023411 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/__init__.py0000644000175000017500000000000000000000000025510 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/base.py0000644000175000017500000001320600000000000024677 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.services.logapi import constants as log_const from oslo_log import log as logging from neutron.services.logapi.rpc import server as server_rpc LOG = logging.getLogger(__name__) @registry.has_registry_receivers class DriverBase(object): def __init__(self, name, vif_types, vnic_types, supported_logging_types, requires_rpc=False): """Instantiate a log driver. :param name: driver name. :param vif_types: list of interfaces (VIFs) supported. :param vnic_types: list of vnic types supported. :param supported_logging_types: list of supported logging types. :param requires_rpc: indicates if this driver expects rpc sever to notify or callback """ self.name = name self.vif_types = vif_types self.vnic_types = vnic_types self.supported_logging_types = supported_logging_types self.requires_rpc = requires_rpc # The log driver should advertise itself as supported driver by calling # register_driver() on the LoggingServiceDriverManager. Therefore, # logging plugin can discover which resources types are supported by # the log driver. @registry.receives(log_const.LOGGING_PLUGIN, [events.AFTER_INIT]) def _register(self, resource, event, trigger, payload=None): if self.is_loaded: # trigger is the LoggingServiceDriverManager trigger.register_driver(self) def register_rpc_methods(self, resource_type, rpc_methods): server_rpc.register_rpc_methods(resource_type, rpc_methods) def is_loaded(self): """True if the driver is active for the Neutron Server. Implement this method to determine if your driver is actively configured for this Neutron Server deployment. """ return True def is_vif_type_compatible(self, vif_type): """True if the driver is compatible with the VIF type.""" return vif_type in self.vif_types def is_vnic_compatible(self, vnic_type): """True if the driver is compatible with the specific VNIC type.""" return vnic_type in self.vnic_types def is_logging_type_supported(self, log_type): supported = log_type in self.supported_logging_types if not supported: LOG.debug("logging type %(log_type)s is not supported by " "%(driver_name)s", {'log_type': log_type, 'driver_name': self.name}) return supported def create_log(self, context, log_obj): """Create a log_obj invocation. This method can be implemented by the specific driver subclass to update the backend where necessary with a specific log object. :param context: current running context information :param log_obj: a log objects being created """ def create_log_precommit(self, context, log_obj): """Create a log_obj precommit. This method can be implemented by the specific driver subclass to handle the precommit event of a log_object that is being created. :param context: current running context information :param log_obj: a log object being created """ def update_log(self, context, log_obj): """Update a log_obj invocation. This method can be implemented by the specific driver subclass to update the backend where necessary with a specific log object. :param context: current running context information :param log_obj: a log object being updated """ def update_log_precommit(self, context, log_obj): """Update a log_obj precommit. This method can be implemented by the specific driver subclass to handle update precommit event of a log_object that is being updated. :param context: current running context information :param log_obj: a log_object being updated. """ def delete_log(self, context, log_obj): """Delete a log_obj invocation. This method can be implemented by the specific driver subclass to delete the backend where necessary with a specific log object. :param context: current running context information :param log_obj: a log_object being deleted """ def delete_log_precommit(self, context, log_obj): """Delete a log_obj precommit. This method can be implemented by the specific driver subclass to handle delete precommit event of a log_object that is being deleted. :param context: current running context information :param log_obj: a log_object being deleted """ def resource_update(self, context, log_objs): """Tell the agent when resources related to log_objects are being updated :param context: current running context information :param log_objs: a list of log_objects, whose related resources are being updated. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/manager.py0000644000175000017500000001157700000000000025410 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import exceptions from neutron_lib.services.logapi import constants as log_const from oslo_log import log as logging from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.rpc import server as server_rpc LOG = logging.getLogger(__name__) RESOURCE_CB_CLASS_MAP = {} # This function should be called by log_driver def register(resource_type, obj_class): if resource_type not in RESOURCE_CB_CLASS_MAP: RESOURCE_CB_CLASS_MAP[resource_type] = obj_class def _get_param(args, kwargs, name, index): try: return kwargs[name] except KeyError: try: return args[index] except IndexError: msg = "Missing parameter %s" % name raise log_exc.LogapiDriverException(exception_msg=msg) class ResourceCallBackBase(object): def __new__(cls, *args, **kwargs): if not hasattr(cls, '_instance'): cls._instance = super(ResourceCallBackBase, cls).__new__(cls) return cls._instance def __init__(self, resource, push_api): self.resource_push_api = push_api for event in (events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE): registry.subscribe(self.handle_event, resource, event) def handle_event(self, resource, event, trigger, **kwargs): """Handle resource callback event""" pass class LoggingServiceDriverManager(object): def __init__(self): self._drivers = set() self.rpc_required = False registry.publish(log_const.LOGGING_PLUGIN, events.AFTER_INIT, self) if self.rpc_required: self._start_rpc_listeners() self.logging_rpc = server_rpc.LoggingApiNotification() @property def drivers(self): return self._drivers def register_driver(self, driver): """Register driver with logging plugin. This method is called from drivers on INIT event. """ self._drivers.add(driver) self.rpc_required |= driver.requires_rpc # Handle callback event AFTER_UPDATE, AFTER_DELETE, AFTER_CREATE of # resources which related to log object. For example: when a sg_rule # is added or deleted from security group, if this rule is bounded by a # log_resources, then it should tell to agent to trigger log_drivers. self._setup_resources_cb_handle() def _start_rpc_listeners(self): self._skeleton = server_rpc.LoggingApiSkeleton() return self._skeleton.conn.consume_in_threads() @property def supported_logging_types(self): if not self._drivers: return set() log_types = set() for driver in self._drivers: log_types |= set(driver.supported_logging_types) LOG.debug("Supported logging types (logging types supported " "by at least one loaded log_driver): %s", log_types) return log_types def call(self, method_name, *args, **kwargs): """Helper method for calling a method across all extension drivers.""" exc_list = [] for driver in self._drivers: try: getattr(driver, method_name)(*args, **kwargs) except Exception as exc: exception_msg = ("Extension driver '%(name)s' failed in " "%(method)s") exception_data = {'name': driver.name, 'method': method_name} LOG.exception(exception_msg, exception_data) exc_list.append(exc) if exc_list: raise exceptions.DriverCallError(exc_list=exc_list) if self.rpc_required: context = _get_param(args, kwargs, 'context', index=0) log_obj = _get_param(args, kwargs, 'log_obj', index=1) try: rpc_method = getattr(self.logging_rpc, method_name) except AttributeError: LOG.error("Method %s is not implemented in logging RPC", method_name) return rpc_method(context, log_obj) def _setup_resources_cb_handle(self): for res, obj_class in RESOURCE_CB_CLASS_MAP.items(): obj_class(res, self.call) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/openvswitch/0000755000175000017500000000000000000000000025762 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000030061 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/openvswitch/driver.py0000644000175000017500000000446500000000000027640 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import resources from neutron_lib.services.logapi import constants as log_const from oslo_log import log as logging from oslo_utils import importutils from neutron.services.logapi.common import sg_callback from neutron.services.logapi.drivers import base from neutron.services.logapi.drivers import manager from neutron.services.logapi.rpc import server as server_rpc LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_LOGGING_TYPES = ['security_group'] class OVSDriver(base.DriverBase): @staticmethod def create(): return OVSDriver( name='openvswitch', vif_types=[portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER], vnic_types=[portbindings.VNIC_NORMAL], supported_logging_types=SUPPORTED_LOGGING_TYPES, requires_rpc=True) def register(): """Register the driver.""" global DRIVER if not DRIVER: DRIVER = OVSDriver.create() # Register RPC methods if DRIVER.requires_rpc: rpc_methods = [ {resources.PORT: server_rpc.get_sg_log_info_for_port}, {log_const.LOG_RESOURCE: server_rpc.get_sg_log_info_for_log_resources} ] DRIVER.register_rpc_methods(log_const.SECURITY_GROUP, rpc_methods) # Trigger decorator importutils.import_module( 'neutron.services.logapi.common.sg_validate' ) # Register resource callback handler manager.register( resources.SECURITY_GROUP_RULE, sg_callback.SecurityGroupRuleCallBack) LOG.debug('Open vSwitch logging driver registered') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/openvswitch/log_oskenapp.py0000644000175000017500000000252300000000000031017 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_ken.base import app_manager from os_ken.controller import handler from os_ken.controller import ofp_event from os_ken.ofproto import ofproto_v1_3 from oslo_log import log as logging LOG = logging.getLogger(__name__) class OVSLogOSKenApp(app_manager.OSKenApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] packet_in_handlers = [] def register_packet_in_handler(self, caller): self.packet_in_handlers.append(caller) def unregister_packet_in_handler(self, caller): self.packet_in_handlers.remove(caller) @handler.set_ev_cls(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER) def packet_in_handler(self, ev): for caller in self.packet_in_handlers: caller(ev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py0000644000175000017500000004434600000000000031704 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib import constants as lib_const from neutron_lib.services.logapi import constants as log_const from os_ken.base import app_manager from os_ken.lib.packet import packet from oslo_config import cfg from oslo_log import formatters from oslo_log import handlers from oslo_log import log as logging from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw from neutron.agent.linux.openvswitch_firewall import rules from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.services.logapi.agent import log_extension as log_ext from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.drivers.openvswitch import log_oskenapp LOG = logging.getLogger(__name__) OVS_FW_TO_LOG_TABLES = { ovs_consts.RULES_EGRESS_TABLE: ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, ovs_consts.RULES_INGRESS_TABLE: ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, } FIELDS_TO_REMOVE = ['priority', 'actions', 'dl_type', 'reg_port', 'reg_remote_group'] REMOTE_RULE_PRIORITY = 70 def setup_logging(): log_file = cfg.CONF.network_log.local_output_log_base if log_file: from logging import handlers as watch_handler log_file_handler = watch_handler.WatchedFileHandler(log_file) log_file_handler.setLevel( logging.DEBUG if cfg.CONF.debug else logging.INFO) LOG.logger.addHandler(log_file_handler) log_file_handler.setFormatter( formatters.ContextFormatter( fmt=cfg.CONF.logging_default_format_string, datefmt=cfg.CONF.log_date_format)) elif cfg.CONF.use_journal: journal_handler = handlers.OSJournalHandler() LOG.logger.addHandler(journal_handler) else: syslog_handler = handlers.OSSysLogHandler() LOG.logger.addHandler(syslog_handler) def find_deleted_sg_rules(old_port, new_ports): del_rules = list() for port in new_ports: if old_port.id == port.id: for rule in old_port.secgroup_rules: if rule not in port.secgroup_rules: del_rules.append(rule) return del_rules return del_rules class Cookie(object): def __init__(self, cookie_id, port, action, project): self.id = cookie_id self.port = port self.action = action self.project = project self.log_object_refs = set() def __eq__(self, other): return (self.id == other.id and self.action == other.action and self.port == other.port) def __hash__(self): return hash(self.id) def add_log_obj_ref(self, log_id): self.log_object_refs.add(log_id) def remove_log_obj_ref(self, log_id): self.log_object_refs.discard(log_id) @property def is_empty(self): return not self.log_object_refs class OFPortLog(object): def __init__(self, port, ovs_port, log_event): self.id = port['port_id'] self.ofport = ovs_port.ofport self.secgroup_rules = [self._update_rule(rule) for rule in port['security_group_rules']] # event can be ALL, DROP and ACCEPT self.event = log_event def _update_rule(self, rule): protocol = rule.get('protocol') if protocol is not None: if not isinstance(protocol, int) and protocol.isdigit(): rule['protocol'] = int(protocol) elif (rule.get('ethertype') == lib_const.IPv6 and protocol == lib_const.PROTO_NAME_ICMP): rule['protocol'] = lib_const.PROTO_NUM_IPV6_ICMP else: rule['protocol'] = lib_const.IP_PROTOCOL_MAP.get( protocol, protocol) return rule class OVSFirewallLoggingDriver(log_ext.LoggingDriver): SUPPORTED_LOGGING_TYPES = ['security_group'] REQUIRED_PROTOCOLS = [ ovs_consts.OPENFLOW13, ovs_consts.OPENFLOW14, ] def __init__(self, agent_api): integration_bridge = agent_api.request_int_br() self.int_br = self.initialize_bridge(integration_bridge) self._deferred = False self.log_ports = collections.defaultdict(dict) self.cookies_table = set() self.cookie_ids_to_delete = set() self.conj_id_map = ovsfw.ConjIdMap() def initialize(self, resource_rpc, **kwargs): self.resource_rpc = resource_rpc setup_logging() self.start_logapp() @staticmethod def initialize_bridge(bridge): bridge.add_protocols(*OVSFirewallLoggingDriver.REQUIRED_PROTOCOLS) # set rate limit and burst limit for controller bridge.set_controller_rate_limit(cfg.CONF.network_log.rate_limit) bridge.set_controller_burst_limit(cfg.CONF.network_log.burst_limit) return bridge.deferred(full_ordered=True) def start_logapp(self): app_mgr = app_manager.AppManager.get_instance() self.log_app = app_mgr.instantiate(log_oskenapp.OVSLogOSKenApp) self.log_app.start() self.log_app.register_packet_in_handler(self.packet_in_handler) def packet_in_handler(self, ev): msg = ev.msg cookie_id = msg.cookie pkt = packet.Packet(msg.data) try: cookie_entry = self._get_cookie_by_id(cookie_id) LOG.info("action=%s project_id=%s log_resource_ids=%s vm_port=%s " "pkt=%s", cookie_entry.action, cookie_entry.project, list(cookie_entry.log_object_refs), cookie_entry.port, pkt) except log_exc.CookieNotFound: LOG.warning("Unknown cookie=%s packet_in pkt=%s", cookie_id, pkt) def defer_apply_on(self): self._deferred = True def defer_apply_off(self): if self._deferred: self.int_br.apply_flows() self._cleanup_cookies() self._deferred = False def _get_cookie(self, port_id, action): for cookie in self.cookies_table: if cookie.port == port_id and cookie.action == action: return cookie def _get_cookies_by_port(self, port_id): cookies_list = [] for cookie in self.cookies_table: if cookie.port == port_id: cookies_list.append(cookie) return cookies_list def _get_cookie_by_id(self, cookie_id): for cookie in self.cookies_table: if str(cookie.id) == str(cookie_id): return cookie raise log_exc.CookieNotFound(cookie_id=cookie_id) def _cleanup_cookies(self): cookie_ids = self.cookie_ids_to_delete self.cookie_ids_to_delete = set() for cookie_id in cookie_ids: self.int_br.br.unset_cookie(cookie_id) def generate_cookie(self, port_id, action, log_id, project_id): cookie = self._get_cookie(port_id, action) if not cookie: cookie_id = self.int_br.br.request_cookie() cookie = Cookie(cookie_id=cookie_id, port=port_id, action=action, project=project_id) self.cookies_table.add(cookie) cookie.add_log_obj_ref(log_id) return cookie.id def _schedule_cookie_deletion(self, cookie): # discard a cookie object self.cookies_table.remove(cookie) # schedule to cleanup cookie_ids later self.cookie_ids_to_delete.add(cookie.id) def start_logging(self, context, **kwargs): LOG.debug("start logging: %s", str(kwargs)) for resource_type in self.SUPPORTED_LOGGING_TYPES: # handle port updated, agent restarted if 'port_id' in kwargs: self._handle_logging('_create', context, resource_type, **kwargs) else: self._handle_log_resources_by_type( '_create', context, resource_type, **kwargs) def stop_logging(self, context, **kwargs): LOG.debug("stop logging: %s", str(kwargs)) for resource_type in self.SUPPORTED_LOGGING_TYPES: # handle port deleted if 'port_id' in kwargs: self._handle_logging('_delete', context, resource_type, **kwargs) else: self._handle_log_resources_by_type( '_delete', context, resource_type, **kwargs) def _handle_log_resources_by_type( self, action, context, resource_type, **kwargs): log_resources = [] for log_obj in kwargs.get('log_resources', []): if log_obj['resource_type'] == resource_type: log_resources.append(log_obj) if log_resources: self._handle_logging( action, context, resource_type, log_resources=log_resources) def _handle_logging(self, action, context, resource_type, **kwargs): handler_name = "%s_%s_log" % (action, resource_type) handler = getattr(self, handler_name) handler(context, **kwargs) def create_ofport_log(self, port, log_id, log_event): port_id = port['port_id'] ovs_port = self.int_br.br.get_vif_port_by_id(port_id) if ovs_port: of_port_log = OFPortLog(port, ovs_port, log_event) self.log_ports[log_id].add(of_port_log) def _create_security_group_log(self, context, **kwargs): port_id = kwargs.get('port_id') log_resources = kwargs.get('log_resources') logs_info = [] if port_id: # try to clean port flows log for port updated/create event self._cleanup_port_flows_log(port_id) logs_info = self.resource_rpc.get_sg_log_info_for_port( context, resource_type=log_const.SECURITY_GROUP, port_id=port_id) elif log_resources: logs_info = self.resource_rpc.get_sg_log_info_for_log_resources( context, resource_type=log_const.SECURITY_GROUP, log_resources=log_resources) for log_info in logs_info: log_id = log_info['id'] old_ofport_logs = self.log_ports.get(log_id, []) ports = log_info.get('ports_log') self.log_ports[log_id] = set() for port in ports: self.create_ofport_log(port, log_id, log_info.get('event')) # try to clean flows log if sg_rules are deleted for port in old_ofport_logs: del_rules = find_deleted_sg_rules( port, self.log_ports[log_id]) if del_rules: self._delete_sg_rules_flow_log(port, del_rules) for port_log in self.log_ports[log_id]: self.add_flows_from_rules(port_log, log_info) def _cleanup_port_flows_log(self, port_id): cookies_list = self._get_cookies_by_port(port_id) for cookie in cookies_list: if cookie.action == log_const.ACCEPT_EVENT: self._delete_flows( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, cookie=cookie.id) self._delete_flows( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, cookie=cookie.id) if cookie.action == log_const.DROP_EVENT: self._delete_flows( table=ovs_consts.DROPPED_TRAFFIC_TABLE, cookie=cookie.id) self._schedule_cookie_deletion(cookie) def _delete_security_group_log(self, context, **kwargs): # port deleted event port_id = kwargs.get('port_id') if port_id: self._cleanup_port_flows_log(port_id) # log resources deleted events for log_resource in kwargs.get('log_resources', []): log_id = log_resource.get('id') of_port_logs = self.log_ports.get(log_id, []) for of_port_log in of_port_logs: self.delete_port_flows_log(of_port_log, log_id) def _log_accept_flow(self, **flow): # log first accepted packet flow['table'] = OVS_FW_TO_LOG_TABLES[flow['table']] flow['actions'] = 'controller' # forward egress accepted packet and log if flow['table'] == ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE: flow['actions'] = 'resubmit(,%d),controller' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) self._add_flow(**flow) def _add_flow(self, **kwargs): dl_type = kwargs.get('dl_type') ovsfw.create_reg_numbers(kwargs) if isinstance(dl_type, int): kwargs['dl_type'] = "0x{:04x}".format(dl_type) LOG.debug("Add flow firewall log %s", str(kwargs)) if self._deferred: self.int_br.add_flow(**kwargs) else: self.int_br.br.add_flow(**kwargs) def _delete_flows(self, **kwargs): ovsfw.create_reg_numbers(kwargs) if self._deferred: self.int_br.delete_flows(**kwargs) else: self.int_br.br.delete_flows(**kwargs) def _log_drop_packet(self, port, log_id, project_id): cookie = self.generate_cookie(port.id, log_const.DROP_EVENT, log_id, project_id) self._add_flow( cookie=cookie, table=ovs_consts.DROPPED_TRAFFIC_TABLE, priority=53, reg_port=port.ofport, actions='controller' ) def create_rules_generator_for_port(self, port): for rule in port.secgroup_rules: yield rule def _create_conj_flows_log(self, remote_rule, port): ethertype = remote_rule['ethertype'] direction = remote_rule['direction'] remote_sg_id = remote_rule['remote_group_id'] secgroup_id = remote_rule['security_group_id'] # we only want to log first accept packet, that means a packet with # ct_state=+new-est, reg_remote_group=conj_id + 1 will be logged flow_template = { 'priority': REMOTE_RULE_PRIORITY, 'dl_type': ovsfw_consts.ethertype_to_dl_type_map[ethertype], 'reg_port': port.ofport, 'reg_remote_group': self.conj_id_map.get_conj_id( secgroup_id, remote_sg_id, direction, ethertype) + 1, } if direction == lib_const.INGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE elif direction == lib_const.EGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE return [flow_template] def _log_accept_packet(self, port, log_id, project_id): cookie = self.generate_cookie(port.id, log_const.ACCEPT_EVENT, log_id, project_id) for rule in self.create_rules_generator_for_port(port): if 'remote_group_id' in rule: flows = self._create_conj_flows_log(rule, port) else: flows = rules.create_flows_from_rule_and_port(rule, port) for flow in flows: flow['cookie'] = cookie self._log_accept_flow(**flow) def add_flows_from_rules(self, port, log_info): # log event can be ACCEPT or DROP or ALL(both ACCEPT and DROP) event = log_info['event'] project_id = log_info['project_id'] log_id = log_info['id'] if event == log_const.ACCEPT_EVENT: self._log_accept_packet(port, log_id, project_id) elif event == log_const.DROP_EVENT: self._log_drop_packet(port, log_id, project_id) else: self._log_drop_packet(port, log_id, project_id) self._log_accept_packet(port, log_id, project_id) def _delete_accept_flows_log(self, port, log_id): cookie = self._get_cookie(port.id, log_const.ACCEPT_EVENT) if cookie: cookie.remove_log_obj_ref(log_id) if cookie.is_empty: self._delete_flows( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, cookie=cookie.id) self._delete_flows( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, cookie=cookie.id) self._schedule_cookie_deletion(cookie) def _delete_drop_flows_log(self, port, log_id): cookie = self._get_cookie(port.id, log_const.DROP_EVENT) if cookie: cookie.remove_log_obj_ref(log_id) if cookie.is_empty: self._delete_flows(table=ovs_consts.DROPPED_TRAFFIC_TABLE, cookie=cookie.id) self._schedule_cookie_deletion(cookie) def delete_port_flows_log(self, port, log_id): """Delete all flows log for given port and log_id""" event = port.event if event == log_const.ACCEPT_EVENT: self._delete_accept_flows_log(port, log_id) elif event == log_const.DROP_EVENT: self._delete_drop_flows_log(port, log_id) else: self._delete_accept_flows_log(port, log_id) self._delete_drop_flows_log(port, log_id) def _delete_sg_rules_flow_log(self, port, del_rules): cookie = self._get_cookie(port.id, log_const.ACCEPT_EVENT) if not cookie: return for rule in del_rules: if 'remote_group_id' in rule: flows = self._create_conj_flows_log(rule, port) else: flows = rules.create_flows_from_rule_and_port(rule, port) for flow in flows: for kw in FIELDS_TO_REMOVE: flow.pop(kw, None) flow['table'] = OVS_FW_TO_LOG_TABLES[flow['table']] flow['cookie'] = cookie.id self._delete_flows(**flow) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/logging_plugin.py0000644000175000017500000001201300000000000025306 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import logging from neutron_lib.db import api as db_api from neutron_lib.services.logapi import constants as log_const from neutron.db import db_base_plugin_common from neutron.extensions import logging as log_ext from neutron.objects import base as base_obj from neutron.objects.logapi import logging_resource as log_object from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import validators from neutron.services.logapi.drivers import manager as driver_mgr class LoggingPlugin(log_ext.LoggingPluginBase): """Implementation of the Neutron logging api plugin.""" supported_extension_aliases = [logging.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): super(LoggingPlugin, self).__init__() self.driver_manager = driver_mgr.LoggingServiceDriverManager() self.validator_mgr = validators.ResourceValidateRequest.get_instance() @property def supported_logging_types(self): # supported_logging_types are be dynamically loaded from log_drivers return self.driver_manager.supported_logging_types @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_logs(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return information for available log objects""" filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) return log_object.Log.get_objects(context, _pager=pager, **filters) def _get_log(self, context, log_id): """Return the log object or raise if not found""" log_obj = log_object.Log.get_object(context, id=log_id) if not log_obj: raise log_exc.LogResourceNotFound(log_id=log_id) return log_obj @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_log(self, context, log_id, fields=None): return self._get_log(context, log_id) @db_base_plugin_common.convert_result_to_dict def create_log(self, context, log): """Create a log object""" log_data = log['log'] self.validator_mgr.validate_request(context, log_data) with db_api.CONTEXT_WRITER.using(context): # body 'log' contains both tenant_id and project_id # but only latter needs to be used to create Log object. # We need to remove redundant keyword. log_data.pop('tenant_id', None) log_obj = log_object.Log(context=context, **log_data) log_obj.create() if log_obj.enabled: self.driver_manager.call( log_const.CREATE_LOG_PRECOMMIT, context, log_obj) if log_obj.enabled: self.driver_manager.call( log_const.CREATE_LOG, context, log_obj) return log_obj @db_base_plugin_common.convert_result_to_dict def update_log(self, context, log_id, log): """Update information for the specified log object""" log_data = log['log'] with db_api.CONTEXT_WRITER.using(context): log_obj = log_object.Log(context, id=log_id) log_obj.update_fields(log_data, reset_changes=True) log_obj.update() need_notify = 'enabled' in log_data if need_notify: self.driver_manager.call( log_const.UPDATE_LOG_PRECOMMIT, context, log_obj) if need_notify: self.driver_manager.call( log_const.UPDATE_LOG, context, log_obj) return log_obj def delete_log(self, context, log_id): """Delete the specified log object""" with db_api.CONTEXT_WRITER.using(context): log_obj = self._get_log(context, log_id) log_obj.delete() self.driver_manager.call( log_const.DELETE_LOG_PRECOMMIT, context, log_obj) self.driver_manager.call( log_const.DELETE_LOG, context, log_obj) def get_loggable_resources(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get supported logging types""" return [{'type': type_} for type_ in self.supported_logging_types] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/logapi/rpc/0000755000175000017500000000000000000000000022517 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/rpc/__init__.py0000644000175000017500000000000000000000000024616 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/rpc/agent.py0000644000175000017500000000364000000000000024172 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import rpc as n_rpc from neutron_lib.services.logapi import constants as log_const from oslo_log import helpers as log_helpers import oslo_messaging class LoggingApiStub(object): """Stub proxy code for agent->server communication.""" def __init__(self): target = oslo_messaging.Target( topic=log_const.LOGGING_PLUGIN, version='1.0', namespace=log_const.RPC_NAMESPACE_LOGGING) self.rpc_client = n_rpc.get_client(target) @log_helpers.log_method_call def get_sg_log_info_for_port(self, context, resource_type, port_id): """Return list of sg_log info for a port""" cctxt = self.rpc_client.prepare() return cctxt.call(context, 'get_sg_log_info_for_port', resource_type=resource_type, port_id=port_id) @log_helpers.log_method_call def get_sg_log_info_for_log_resources(self, context, resource_type, log_resources): """Return list of sg_log info for list of log_resources""" cctxt = self.rpc_client.prepare() return cctxt.call(context, 'get_sg_log_info_for_log_resources', resource_type=resource_type, log_resources=log_resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/logapi/rpc/server.py0000644000175000017500000001067300000000000024406 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import resources as r_const from neutron_lib import rpc as n_rpc from neutron_lib.services.logapi import constants as log_const from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from neutron.api.rpc.callbacks import events from neutron.api.rpc.handlers import resources_rpc from neutron.services.logapi.common import db_api LOG = logging.getLogger(__name__) # RPC methods mapping RPC_RESOURCES_METHOD_MAP = {} # This function must be called when a log_driver is registered. def register_rpc_methods(resource_type, rpc_methods): """Register RPC methods. :param resource_type: string and must be a valid resource type. :param rpc_methods: list of RPC methods to be registered. This param would look like: [ {'PORT': get_sg_log_info_for_port}, {'LOG_RESOURCE': get_sg_log_info_for_log_resources} ] """ if resource_type not in RPC_RESOURCES_METHOD_MAP: RPC_RESOURCES_METHOD_MAP[resource_type] = rpc_methods def get_rpc_method(resource_type, rpc_method_key): if resource_type not in RPC_RESOURCES_METHOD_MAP: raise NotImplementedError() for rpc_method in RPC_RESOURCES_METHOD_MAP[resource_type]: if rpc_method_key in rpc_method.keys(): return list(rpc_method.values())[0] raise NotImplementedError() def get_sg_log_info_for_port(context, port_id): return db_api.get_sg_log_info_for_port(context, port_id) def get_sg_log_info_for_log_resources(context, log_resources): return db_api.get_sg_log_info_for_log_resources(context, log_resources) class LoggingApiSkeleton(object): """Skeleton proxy code for agent->server communication.""" # History # 1.0 Initial version # 1.1 Introduce resource_type as a keyword in order to extend # support for other resources target = oslo_messaging.Target( version='1.1', namespace=log_const.RPC_NAMESPACE_LOGGING) def __init__(self): self.conn = n_rpc.Connection() self.conn.create_consumer(log_const.LOGGING_PLUGIN, [self], fanout=False) @log_helpers.log_method_call def get_sg_log_info_for_port(self, context, port_id, **kwargs): resource_type = kwargs.get('resource_type', log_const.SECURITY_GROUP) LOG.debug("Logging agent requests log info " "for port with resource type %s", resource_type) rpc_method = get_rpc_method(resource_type, r_const.PORT) return rpc_method(context, port_id) @log_helpers.log_method_call def get_sg_log_info_for_log_resources(self, context, log_resources, **kwargs): resource_type = kwargs.get('resource_type', log_const.SECURITY_GROUP) LOG.debug("Logging agent requests log info " "for log resources with resource type %s", resource_type) rpc_method = get_rpc_method(resource_type, log_const.LOG_RESOURCE) return rpc_method(context, log_resources) class LoggingApiNotification(object): def __init__(self): self.notification_api = resources_rpc.ResourcesPushRpcApi() @log_helpers.log_method_call def create_log(self, context, log_obj): self.notification_api.push(context, [log_obj], events.CREATED) @log_helpers.log_method_call def update_log(self, context, log_obj): self.notification_api.push(context, [log_obj], events.UPDATED) @log_helpers.log_method_call def delete_log(self, context, log_obj): self.notification_api.push(context, [log_obj], events.DELETED) @log_helpers.log_method_call def resource_update(self, context, log_objs): """Tell to agent when resources related to log_objects updated""" self.notification_api.push(context, log_objs, events.UPDATED) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/loki/0000755000175000017500000000000000000000000021416 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/loki/__init__.py0000644000175000017500000000000000000000000023515 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/loki/loki_plugin.py0000644000175000017500000000335700000000000024314 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import time from neutron_lib.db import api as db_api from neutron_lib.services import base as service_base from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy.orm import session as se LOG = logging.getLogger(__name__) class LokiPlugin(service_base.ServicePluginBase): """Loki brings us the gift of sporadic database failures and delays.""" def __init__(self): super(LokiPlugin, self).__init__() db_api.sqla_listen(se.Session, 'before_flush', self.random_deadlock) db_api.sqla_listen(se.Session, 'loaded_as_persistent', self.random_delay) def random_deadlock(self, session, flush_context, instances): if random.randrange(0, 51) > 49: # 1/50 probability raise db_exc.DBDeadlock() def random_delay(self, session, instance): if random.randrange(0, 201) > 199: # 1/200 probability LOG.debug("Loki has delayed loading of instance %s", instance) time.sleep(1) def get_plugin_type(self): return "loki" def get_plugin_description(self): return "Injects deadlocks and delays into database operations." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/metering/0000755000175000017500000000000000000000000022272 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/__init__.py0000644000175000017500000000000000000000000024371 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/metering/agents/0000755000175000017500000000000000000000000023553 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/agents/__init__.py0000644000175000017500000000000000000000000025652 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/agents/metering_agent.py0000644000175000017500000002743100000000000027124 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import context from neutron_lib import rpc as n_rpc from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_service import service from oslo_utils import timeutils from neutron._i18n import _ from neutron.agent import rpc as agent_rpc from neutron.common import config as common_config from neutron.conf.agent import common as config from neutron.conf.services import metering_agent from neutron import manager from neutron import service as neutron_service from neutron.services.metering.drivers import utils as driverutils LOG = logging.getLogger(__name__) class MeteringPluginRpc(object): def __init__(self, host): # NOTE(yamamoto): super.__init__() call here is not only for # aesthetics. Because of multiple inheritances in MeteringAgent, # it's actually necessary to initialize parent classes of # manager.Manager correctly. super(MeteringPluginRpc, self).__init__(host) target = oslo_messaging.Target(topic=topics.METERING_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) def _get_sync_data_metering(self, context): try: cctxt = self.client.prepare() return cctxt.call(context, 'get_sync_data_metering', host=self.host) except Exception: LOG.exception("Failed synchronizing routers") class MeteringAgent(MeteringPluginRpc, manager.Manager): def __init__(self, host, conf=None): self.conf = conf or cfg.CONF self._load_drivers() self.context = context.get_admin_context_without_session() self.metering_loop = loopingcall.FixedIntervalLoopingCall( self._metering_loop ) measure_interval = self.conf.measure_interval self.last_report = 0 self.metering_loop.start(interval=measure_interval) self.host = host self.label_tenant_id = {} self.routers = {} self.metering_infos = {} super(MeteringAgent, self).__init__(host=host) def _load_drivers(self): """Loads plugin-driver from configuration.""" LOG.info("Loading Metering driver %s", self.conf.driver) if not self.conf.driver: raise SystemExit(_('A metering driver must be specified')) self.metering_driver = driverutils.load_metering_driver(self, self.conf) def _metering_notification(self): for label_id, info in self.metering_infos.items(): data = {'label_id': label_id, 'tenant_id': self.label_tenant_id.get(label_id), 'pkts': info['pkts'], 'bytes': info['bytes'], 'time': info['time'], 'first_update': info['first_update'], 'last_update': info['last_update'], 'host': self.host} LOG.debug("Send metering report: %s", data) notifier = n_rpc.get_notifier('metering') notifier.info(self.context, 'l3.meter', data) info['pkts'] = 0 info['bytes'] = 0 info['time'] = 0 def _purge_metering_info(self): deadline_timestamp = timeutils.utcnow_ts() - self.conf.report_interval label_ids = [ label_id for label_id, info in self.metering_infos.items() if info['last_update'] < deadline_timestamp] for label_id in label_ids: del self.metering_infos[label_id] def _add_metering_info(self, label_id, pkts, bytes): ts = timeutils.utcnow_ts() info = self.metering_infos.get(label_id, {'bytes': 0, 'pkts': 0, 'time': 0, 'first_update': ts, 'last_update': ts}) info['bytes'] += bytes info['pkts'] += pkts info['time'] += ts - info['last_update'] info['last_update'] = ts self.metering_infos[label_id] = info return info def _add_metering_infos(self): self.label_tenant_id = {} for router in self.routers.values(): tenant_id = router['tenant_id'] labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] self.label_tenant_id[label_id] = tenant_id accs = self._get_traffic_counters(self.context, self.routers.values()) if not accs: return for label_id, acc in accs.items(): self._add_metering_info(label_id, acc['pkts'], acc['bytes']) def _metering_loop(self): self._sync_router_namespaces(self.context, self.routers.values()) self._add_metering_infos() ts = timeutils.utcnow_ts() delta = ts - self.last_report report_interval = self.conf.report_interval if delta >= report_interval: self._metering_notification() self._purge_metering_info() self.last_report = ts @runtime.synchronized('metering-agent') def _invoke_driver(self, context, meterings, func_name): try: return getattr(self.metering_driver, func_name)(context, meterings) except AttributeError: LOG.exception("Driver %(driver)s does not implement %(func)s", {'driver': self.conf.driver, 'func': func_name}) except RuntimeError: LOG.exception("Driver %(driver)s:%(func)s runtime error", {'driver': self.conf.driver, 'func': func_name}) @periodic_task.periodic_task(run_immediately=True) def _sync_routers_task(self, context): routers = self._get_sync_data_metering(self.context) routers_on_agent = set(self.routers.keys()) routers_on_server = set( [router['id'] for router in routers] if routers else []) for router_id in routers_on_agent - routers_on_server: del self.routers[router_id] self._invoke_driver(context, router_id, 'remove_router') if not routers: return self._update_routers(context, routers) def router_deleted(self, context, router_id): self._add_metering_infos() if router_id in self.routers: del self.routers[router_id] return self._invoke_driver(context, router_id, 'remove_router') def routers_updated(self, context, routers=None): if not routers: routers = self._get_sync_data_metering(self.context) if not routers: return self._update_routers(context, routers) def _update_routers(self, context, routers): for router in routers: self.routers[router['id']] = router return self._invoke_driver(context, routers, 'update_routers') def _get_traffic_counters(self, context, routers): LOG.debug("Get router traffic counters") return self._invoke_driver(context, routers, 'get_traffic_counters') def _sync_router_namespaces(self, context, routers): LOG.debug("Sync router namespaces") return self._invoke_driver(context, routers, 'sync_router_namespaces') def add_metering_label_rule(self, context, routers): return self._invoke_driver(context, routers, 'add_metering_label_rule') def remove_metering_label_rule(self, context, routers): return self._invoke_driver(context, routers, 'remove_metering_label_rule') def update_metering_label_rules(self, context, routers): LOG.debug("Update metering rules from agent") return self._invoke_driver(context, routers, 'update_metering_label_rules') def add_metering_label(self, context, routers): LOG.debug("Creating a metering label from agent") return self._invoke_driver(context, routers, 'add_metering_label') def remove_metering_label(self, context, routers): self._add_metering_infos() LOG.debug("Delete a metering label from agent") return self._invoke_driver(context, routers, 'remove_metering_label') class MeteringAgentWithStateReport(MeteringAgent): def __init__(self, host, conf=None): super(MeteringAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { 'binary': 'neutron-metering-agent', 'host': host, 'topic': topics.METERING_AGENT, 'configurations': { 'metering_driver': self.conf.driver, 'measure_interval': self.conf.measure_interval, 'report_interval': self.conf.report_interval }, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_METERING} report_interval = cfg.CONF.AGENT.report_interval self.use_call = True if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.state_rpc.report_state(self.context, self.agent_state, self.use_call) self.agent_state.pop('start_flag', None) self.use_call = False except AttributeError: # This means the server does not support report_state LOG.warning("Neutron server does not support state report. " "State report for this agent will be disabled.") self.heartbeat.stop() except Exception: self.failed_report_state = True LOG.exception("Failed reporting state!") return if self.failed_report_state: self.failed_report_state = False LOG.info("Successfully reported state after a previous failure.") def agent_updated(self, context, payload): LOG.info("agent_updated by server side %s!", payload) def main(): conf = cfg.CONF metering_agent.register_metering_agent_opts() config.register_agent_state_opts_helper(conf) common_config.init(sys.argv[1:]) config.setup_logging() config.setup_privsep() server = neutron_service.Service.create( binary='neutron-metering-agent', topic=topics.METERING_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.services.metering.agents.' 'metering_agent.MeteringAgentWithStateReport') service.launch(cfg.CONF, server, restart_method='mutate').wait() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/0000755000175000017500000000000000000000000023750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/__init__.py0000644000175000017500000000000000000000000026047 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/abstract_driver.py0000644000175000017500000000262700000000000027507 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class MeteringAbstractDriver(object): """Abstract Metering driver.""" def __init__(self, plugin, conf): pass @abc.abstractmethod def update_routers(self, context, routers): pass @abc.abstractmethod def remove_router(self, context, router_id): pass @abc.abstractmethod def update_metering_label_rules(self, context, routers): pass @abc.abstractmethod def add_metering_label(self, context, routers): pass @abc.abstractmethod def remove_metering_label(self, context, routers): pass @abc.abstractmethod def get_traffic_counters(self, context, routers): pass @abc.abstractmethod def sync_router_namespaces(self, context, routers): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/iptables/0000755000175000017500000000000000000000000025553 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/iptables/__init__.py0000644000175000017500000000000000000000000027652 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/iptables/iptables_driver.py0000644000175000017500000004476300000000000031321 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import netutils from neutron.agent.common import utils as common_utils from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.conf.agent import common as config from neutron.services.metering.drivers import abstract_driver LOG = logging.getLogger(__name__) NS_PREFIX = 'qrouter-' WRAP_NAME = 'neutron-meter' EXTERNAL_DEV_PREFIX = 'qg-' ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX TOP_CHAIN = WRAP_NAME + "-FORWARD" RULE = '-r-' LABEL = '-l-' config.register_interface_driver_opts_helper(cfg.CONF) config.register_interface_opts() class IptablesManagerTransaction(object): __transactions = {} def __init__(self, im): self.im = im transaction = self.__transactions.get(im, 0) transaction += 1 self.__transactions[im] = transaction def __enter__(self): return self.im def __exit__(self, type, value, traceback): transaction = self.__transactions.get(self.im) if transaction == 1: self.im.apply() del self.__transactions[self.im] else: transaction -= 1 self.__transactions[self.im] = transaction class RouterWithMetering(object): def __init__(self, conf, router): self.conf = conf self.id = router['id'] self.router = router # TODO(cbrandily): deduplicate ns_name generation in metering/l3 self.ns_name = NS_PREFIX + self.id self.iptables_manager = None self.snat_iptables_manager = None self.metering_labels = {} self.create_iptables_managers() def create_iptables_managers(self): """Creates iptables managers if the are not already created Returns True if any manager is created """ created = False if self.router['distributed'] and self.snat_iptables_manager is None: # If distributed routers then we need to apply the # metering agent label rules in the snat namespace as well. snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( self.id) # Check for namespace existence before we assign the # snat_iptables_manager if ip_lib.network_namespace_exists(snat_ns_name): self.snat_iptables_manager = iptables_manager.IptablesManager( namespace=snat_ns_name, binary_name=WRAP_NAME, state_less=True, use_ipv6=netutils.is_ipv6_enabled()) created = True if self.iptables_manager is None: # Check of namespace existence before we assign the # iptables_manager # NOTE(Swami): If distributed routers, all external traffic on a # compute node will flow through the rfp interface in the router # namespace. if ip_lib.network_namespace_exists(self.ns_name): self.iptables_manager = iptables_manager.IptablesManager( namespace=self.ns_name, binary_name=WRAP_NAME, state_less=True, use_ipv6=netutils.is_ipv6_enabled()) created = True return created class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): def __init__(self, plugin, conf): self.plugin = plugin self.conf = conf or cfg.CONF self.routers = {} self.driver = common_utils.load_interface_driver(self.conf) def _update_router(self, router): r = self.routers.get(router['id']) if r is None: r = RouterWithMetering(self.conf, router) r.router = router self.routers[r.id] = r return r @log_helpers.log_method_call def update_routers(self, context, routers): # disassociate removed routers router_ids = set(router['id'] for router in routers) for router_id, rm in self.routers.items(): if router_id not in router_ids: self._process_disassociate_metering_label(rm.router) for router in routers: old_gw_port_id = None old_rm = self.routers.get(router['id']) if old_rm: old_gw_port_id = old_rm.router['gw_port_id'] gw_port_id = router['gw_port_id'] if gw_port_id != old_gw_port_id: if old_rm: if router.get('distributed'): old_rm_im = old_rm.snat_iptables_manager else: old_rm_im = old_rm.iptables_manager # In case the selected manager is None pick another one. # This is not ideal sometimes. old_rm_im = (old_rm_im or old_rm.snat_iptables_manager or old_rm.iptables_manager) if old_rm_im: with IptablesManagerTransaction(old_rm_im): self._process_disassociate_metering_label(router) if gw_port_id: self._process_associate_metering_label(router) elif gw_port_id: self._process_associate_metering_label(router) @log_helpers.log_method_call def remove_router(self, context, router_id): if router_id in self.routers: del self.routers[router_id] def get_external_device_names(self, rm): gw_port_id = rm.router.get('gw_port_id') if not gw_port_id: return None, None # NOTE (Swami): External device 'qg' should be used on the # Router namespace if the router is legacy and should be used on # SNAT namespace if the router is distributed. ext_dev = (EXTERNAL_DEV_PREFIX + gw_port_id)[:self.driver.DEV_NAME_LEN] ext_snat_dev = (ROUTER_2_FIP_DEV_PREFIX + rm.id)[:self.driver.DEV_NAME_LEN] return ext_dev, ext_snat_dev def _process_metering_label_rules(self, rules, label_chain, rules_chain, ext_dev, im): if not ext_dev: return for rule in rules: self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain) def _process_metering_label_rule_add(self, rule, ext_dev, label_chain, rules_chain, im): self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain) def _process_metering_label_rule_delete(self, rule, ext_dev, label_chain, rules_chain, im): self._remove_rule_from_chain(ext_dev, rule, im, label_chain, rules_chain) def _add_rule_to_chain(self, ext_dev, rule, im, label_chain, rules_chain): ipt_rule = self._prepare_rule(ext_dev, rule, label_chain) if rule['excluded']: im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False, top=True) else: im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False, top=False) def _remove_rule_from_chain(self, ext_dev, rule, im, label_chain, rules_chain): ipt_rule = self._prepare_rule(ext_dev, rule, label_chain) if rule['excluded']: im.ipv4['filter'].remove_rule(rules_chain, ipt_rule, wrap=False, top=True) else: im.ipv4['filter'].remove_rule(rules_chain, ipt_rule, wrap=False, top=False) def _prepare_rule(self, ext_dev, rule, label_chain): remote_ip = rule['remote_ip_prefix'] if rule['direction'] == 'egress': dir_opt = '-s %s -o %s' % (remote_ip, ext_dev) else: dir_opt = '-d %s -i %s' % (remote_ip, ext_dev) if rule['excluded']: ipt_rule = '%s -j RETURN' % dir_opt else: ipt_rule = '%s -j %s' % (dir_opt, label_chain) return ipt_rule def _process_ns_specific_metering_label(self, router, ext_dev, im): '''Process metering label based on the associated namespaces.''' rm = self.routers.get(router['id']) with IptablesManagerTransaction(im): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name( WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name( WRAP_NAME + RULE + label_id, wrap=False) exists = rm.metering_labels.get(label_id) if not exists: self._create_metering_label_chain(rm, label_chain, rules_chain) rm.metering_labels[label_id] = label rules = label.get('rules') if rules: self._process_metering_label_rules( rules, label_chain, rules_chain, ext_dev, im) def _process_associate_metering_label(self, router): self._update_router(router) rm = self.routers.get(router['id']) ext_dev, ext_snat_dev = self.get_external_device_names(rm) for (im, dev) in [(rm.iptables_manager, ext_dev), (rm.snat_iptables_manager, ext_snat_dev)]: if im: self._process_ns_specific_metering_label(router, dev, im) def _process_ns_specific_disassociate_metering_label(self, router, im): '''Disassociate metering label based on specific namespaces.''' rm = self.routers.get(router['id']) with IptablesManagerTransaction(im): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] if label_id not in rm.metering_labels: continue label_chain = iptables_manager.get_chain_name( WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name( WRAP_NAME + RULE + label_id, wrap=False) im.ipv4['filter'].remove_chain(label_chain, wrap=False) im.ipv4['filter'].remove_chain(rules_chain, wrap=False) def _process_disassociate_metering_label(self, router): rm = self.routers.get(router['id']) if not rm: return for im in [rm.iptables_manager, rm.snat_iptables_manager]: if im: self._process_ns_specific_disassociate_metering_label( router, im) labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] del rm.metering_labels[label_id] @log_helpers.log_method_call def add_metering_label(self, context, routers): for router in routers: self._process_associate_metering_label(router) @log_helpers.log_method_call def add_metering_label_rule(self, context, routers): for router in routers: self._add_metering_label_rule(router) @log_helpers.log_method_call def remove_metering_label_rule(self, context, routers): for router in routers: self._remove_metering_label_rule(router) @log_helpers.log_method_call def update_metering_label_rules(self, context, routers): for router in routers: self._update_metering_label_rules(router) def _add_metering_label_rule(self, router): self._process_metering_rule_action(router, 'create') def _remove_metering_label_rule(self, router): self._process_metering_rule_action(router, 'delete') def _create_metering_label_chain(self, rm, label_chain, rules_chain): rm.iptables_manager.ipv4['filter'].add_chain(label_chain, wrap=False) rm.iptables_manager.ipv4['filter'].add_chain(rules_chain, wrap=False) rm.iptables_manager.ipv4['filter'].add_rule( TOP_CHAIN, '-j ' + rules_chain, wrap=False) rm.iptables_manager.ipv4['filter'].add_rule( label_chain, '', wrap=False) def _process_metering_rule_action_based_on_ns(self, router, action, ext_dev, im): '''Process metering rule actions based specific namespaces.''' rm = self.routers.get(router['id']) with IptablesManagerTransaction(im): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name( WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name( WRAP_NAME + RULE + label_id, wrap=False) exists = rm.metering_labels.get(label_id) if action == 'create' and not exists: self._create_metering_label_chain(rm, label_chain, rules_chain) rm.metering_labels[label_id] = label rule = label.get('rule') if rule: if action == 'create': self._process_metering_label_rule_add( rule, ext_dev, label_chain, rules_chain, im) elif action == 'delete': self._process_metering_label_rule_delete( rule, ext_dev, label_chain, rules_chain, im) def _process_metering_rule_action(self, router, action): rm = self.routers.get(router['id']) if not rm: return ext_dev, ext_snat_dev = self.get_external_device_names(rm) for (im, dev) in [(rm.iptables_manager, ext_dev), (rm.snat_iptables_manager, ext_snat_dev)]: if im and dev: self._process_metering_rule_action_based_on_ns( router, action, dev, im) def _update_metering_label_rules_based_on_ns(self, router, ext_dev, im): '''Update metering lable rules based on namespace.''' with IptablesManagerTransaction(im): labels = router.get(constants.METERING_LABEL_KEY, []) for label in labels: label_id = label['id'] label_chain = iptables_manager.get_chain_name( WRAP_NAME + LABEL + label_id, wrap=False) rules_chain = iptables_manager.get_chain_name( WRAP_NAME + RULE + label_id, wrap=False) im.ipv4['filter'].empty_chain(rules_chain, wrap=False) rules = label.get('rules') if rules: self._process_metering_label_rules( rules, label_chain, rules_chain, ext_dev, im) def _update_metering_label_rules(self, router): rm = self.routers.get(router['id']) if not rm: return ext_dev, ext_snat_dev = self.get_external_device_names(rm) for (im, dev) in [(rm.iptables_manager, ext_dev), (rm.snat_iptables_manager, ext_snat_dev)]: if im and dev: self._update_metering_label_rules_based_on_ns(router, dev, im) @log_helpers.log_method_call def remove_metering_label(self, context, routers): for router in routers: self._process_disassociate_metering_label(router) @log_helpers.log_method_call def get_traffic_counters(self, context, routers): accs = {} routers_to_reconfigure = set() for router in routers: rm = self.routers.get(router['id']) if not rm: continue for label_id in rm.metering_labels: try: chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL + label_id, wrap=False) chain_acc = rm.iptables_manager.get_traffic_counters( chain, wrap=False, zero=True) except RuntimeError: LOG.exception('Failed to get traffic counters, ' 'router: %s', router) routers_to_reconfigure.add(router['id']) continue if not chain_acc: continue acc = accs.get(label_id, {'pkts': 0, 'bytes': 0}) acc['pkts'] += chain_acc['pkts'] acc['bytes'] += chain_acc['bytes'] accs[label_id] = acc for router_id in routers_to_reconfigure: del self.routers[router_id] return accs @log_helpers.log_method_call def sync_router_namespaces(self, context, routers): for router in routers: rm = self.routers.get(router['id']) if not rm: continue # NOTE(bno1): Sometimes a router is added before its namespaces are # created. The metering agent has to periodically check if the # namespaces for the missing iptables managers have appearead and # create the managers for them. When a new manager is created, the # metering rules have to be added to it. if rm.create_iptables_managers(): self._process_associate_metering_label(router) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/noop/0000755000175000017500000000000000000000000024723 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/noop/__init__.py0000644000175000017500000000000000000000000027022 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/noop/noop_driver.py0000644000175000017500000000324100000000000027623 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from neutron.services.metering.drivers import abstract_driver class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver): @log_helpers.log_method_call def update_routers(self, context, routers): pass @log_helpers.log_method_call def remove_router(self, context, router_id): pass @log_helpers.log_method_call def update_metering_label_rules(self, context, routers): pass @log_helpers.log_method_call def add_metering_label_rule(self, context, routers): pass @log_helpers.log_method_call def remove_metering_label_rule(self, context, routers): pass @log_helpers.log_method_call def add_metering_label(self, context, routers): pass @log_helpers.log_method_call def remove_metering_label(self, context, routers): pass @log_helpers.log_method_call def get_traffic_counters(self, context, routers): pass @log_helpers.log_method_call def sync_router_namespaces(self, context, routers): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/drivers/utils.py0000644000175000017500000000242600000000000025466 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.utils import runtime from oslo_log import log as logging LOG = logging.getLogger(__name__) METERING_NAMESPACE = 'neutron.services.metering_drivers' def load_metering_driver(plugin, conf): """Load metering driver :param plugin: the metering plugin :param conf: driver configuration object :raises SystemExit of 1 if driver cannot be loaded """ try: loaded_class = runtime.load_class_by_alias_or_classname( METERING_NAMESPACE, conf.driver) return loaded_class(plugin, conf) except ImportError: LOG.error("Error loading metering driver '%s'", conf.driver) raise SystemExit(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/metering/metering_plugin.py0000644000175000017500000000561200000000000026040 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.agent import topics from neutron_lib.api.definitions import metering as metering_apidef from neutron_lib import rpc as n_rpc from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api from neutron.db.metering import metering_db from neutron.db.metering import metering_rpc from neutron import service class MeteringPlugin(metering_db.MeteringDbMixin): """Implementation of the Neutron Metering Service Plugin.""" supported_extension_aliases = [metering_apidef.ALIAS] path_prefix = "/metering" __filter_validation_support = True def __init__(self): super(MeteringPlugin, self).__init__() self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() rpc_worker = service.RpcWorker([self], worker_process_count=0) self.add_worker(rpc_worker) def start_rpc_listeners(self): self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)] self.conn = n_rpc.Connection() self.conn.create_consumer( topics.METERING_PLUGIN, self.endpoints, fanout=False) return self.conn.consume_in_threads() def create_metering_label(self, context, metering_label): label = super(MeteringPlugin, self).create_metering_label( context, metering_label) data = self.get_sync_data_metering(context) self.meter_rpc.add_metering_label(context, data) return label def delete_metering_label(self, context, label_id): data = self.get_sync_data_metering(context, label_id) label = super(MeteringPlugin, self).delete_metering_label( context, label_id) self.meter_rpc.remove_metering_label(context, data) return label def create_metering_label_rule(self, context, metering_label_rule): rule = super(MeteringPlugin, self).create_metering_label_rule( context, metering_label_rule) data = self.get_sync_data_for_rule(context, rule) self.meter_rpc.add_metering_label_rule(context, data) return rule def delete_metering_label_rule(self, context, rule_id): rule = super(MeteringPlugin, self).delete_metering_label_rule( context, rule_id) data = self.get_sync_data_for_rule(context, rule) self.meter_rpc.remove_metering_label_rule(context, data) return rule ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3670452 neutron-16.0.0.0b2.dev214/neutron/services/network_ip_availability/0000755000175000017500000000000000000000000025373 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/network_ip_availability/__init__.py0000644000175000017500000000000000000000000027472 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/network_ip_availability/plugin.py0000644000175000017500000000457500000000000027256 0ustar00coreycorey00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import network_ip_availability from neutron_lib.db import utils as db_utils from neutron_lib import exceptions import neutron.db.db_base_plugin_v2 as db_base_plugin_v2 import neutron.db.network_ip_availability_db as ip_availability_db class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin, db_base_plugin_v2.NeutronDbPluginV2): """This plugin exposes IP availability data for networks and subnets.""" _instance = None supported_extension_aliases = [network_ip_availability.ALIAS] __filter_validation_support = True @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance def get_plugin_description(self): return "Provides IP availability data for each network and subnet." @classmethod def get_plugin_type(cls): return "network-ip-availability" def get_network_ip_availabilities(self, context, filters=None, fields=None): """Returns ip availability data for a collection of networks.""" net_ip_availabilities = super( NetworkIPAvailabilityPlugin, self ).get_network_ip_availabilities(context, filters) return [db_utils.resource_fields(net_ip_availability, fields) for net_ip_availability in net_ip_availabilities] def get_network_ip_availability(self, context, id=None, fields=None): """Return ip availability data for a specific network id.""" filters = {'network_id': [id]} result = self.get_network_ip_availabilities(context, filters) if result: return db_utils.resource_fields(result[0], fields) else: raise exceptions.NetworkNotFound(net_id=id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/network_segment_range/0000755000175000017500000000000000000000000025047 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/network_segment_range/__init__.py0000644000175000017500000000000000000000000027146 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/network_segment_range/plugin.py0000644000175000017500000002720200000000000026722 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import network_segment_range as range_def from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import network_segment_range as range_exc from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log import six from neutron._i18n import _ from neutron.db import segments_db from neutron.extensions import network_segment_range as ext_range from neutron.objects import base as base_obj from neutron.objects import network_segment_range as obj_network_segment_range LOG = log.getLogger(__name__) def is_network_segment_range_enabled(): network_segment_range_class = ('neutron.services.network_segment_range.' 'plugin.NetworkSegmentRangePlugin') return any(p in cfg.CONF.service_plugins for p in ['network_segment_range', network_segment_range_class]) def _get_physical_network(network_segment_range): if network_segment_range.get('network_type') != const.TYPE_VLAN: return None physical_network = network_segment_range.get( "physical_network", const.ATTR_NOT_SPECIFIED) if not validators.is_attr_set(physical_network): message = _("Network type %s requires 'physical_network' to be " "specified while creating new range") % const.TYPE_VLAN raise lib_exc.BadRequest(resource=range_def.RESOURCE_NAME, msg=message) return physical_network class NetworkSegmentRangePlugin(ext_range.NetworkSegmentRangePluginBase): """Implements Neutron Network Segment Range Service plugin.""" supported_extension_aliases = [range_def.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): super(NetworkSegmentRangePlugin, self).__init__() self.type_manager = directory.get_plugin().type_manager self.type_manager.initialize_network_segment_range_support() def _get_network_segment_range(self, context, id): obj = obj_network_segment_range.NetworkSegmentRange.get_object( context, id=id) if obj is None: raise range_exc.NetworkSegmentRangeNotFound(range_id=id) return obj def _validate_network_segment_range_eligible(self, network_segment_range): range_data = (network_segment_range.get('minimum'), network_segment_range.get('maximum')) # Currently, network segment range only supports VLAN, VxLAN, # GRE and Geneve. if network_segment_range.get('network_type') == const.TYPE_VLAN: plugin_utils.verify_vlan_range(range_data) else: plugin_utils.verify_tunnel_range( range_data, network_segment_range.get('network_type')) def _validate_network_segment_range_overlap(self, context, network_segment_range): filters = { 'default': False, 'network_type': network_segment_range['network_type'], 'physical_network': _get_physical_network(network_segment_range) } range_objs = obj_network_segment_range.NetworkSegmentRange.get_objects( context, **filters) overlapped_range_id = [ obj.id for obj in range_objs if (network_segment_range['minimum'] <= obj.maximum and network_segment_range['maximum'] >= obj.minimum)] if overlapped_range_id: raise range_exc.NetworkSegmentRangeOverlaps( range_id=', '.join(overlapped_range_id)) def _add_unchanged_range_attributes(self, updates, existing): """Adds data for unspecified fields on incoming update requests.""" for key, value in six.iteritems(existing): updates.setdefault(key, value) return updates def _is_network_segment_range_referenced(self, context, network_segment_range): return segments_db.network_segments_exist_in_range( context, network_segment_range['network_type'], network_segment_range.get('physical_network'), network_segment_range) def _is_network_segment_range_type_supported(self, network_type): if not (self.type_manager.network_type_supported(network_type) and network_type in const.NETWORK_SEGMENT_RANGE_TYPES): raise range_exc.NetworkSegmentRangeNetTypeNotSupported( type=network_type) return True def _are_allocated_segments_in_range_impacted(self, context, existing_range, updated_range): updated_range_min = updated_range.get('minimum', existing_range['minimum']) updated_range_max = updated_range.get('maximum', existing_range['maximum']) existing_range_min, existing_range_max = ( segments_db.min_max_actual_segments_in_range( context, existing_range['network_type'], existing_range.get('physical_network'), existing_range)) if existing_range_min and existing_range_max: return bool(updated_range_min >= existing_range_min or updated_range_max <= existing_range_max) return False @log_helpers.log_method_call def create_network_segment_range(self, context, network_segment_range): """Check network types supported on network segment range creation.""" range_data = network_segment_range['network_segment_range'] if self._is_network_segment_range_type_supported( range_data['network_type']): with db_api.CONTEXT_WRITER.using(context): self._validate_network_segment_range_eligible(range_data) self._validate_network_segment_range_overlap(context, range_data) network_segment_range = ( obj_network_segment_range.NetworkSegmentRange( context, name=range_data['name'], description=range_data.get('description'), default=False, shared=range_data['shared'], project_id=(range_data['project_id'] if not range_data['shared'] else None), network_type=range_data['network_type'], physical_network=(range_data['physical_network'] if range_data['network_type'] == const.TYPE_VLAN else None), minimum=range_data['minimum'], maximum=range_data['maximum']) ) network_segment_range.create() self.type_manager.update_network_segment_range_allocations( network_segment_range['network_type']) return network_segment_range.to_dict() @log_helpers.log_method_call def get_network_segment_range(self, context, id, fields=None): network_segment_range = self._get_network_segment_range( context, id) return network_segment_range.to_dict(fields=fields) @log_helpers.log_method_call def get_network_segment_ranges(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # TODO(kailun): Based on the current spec: # https://review.opendev.org/599980, this method call may # possibly return a large amount of data since ``available`` # segment list and ``used`` segment/project mapping will be also # returned and they can be large sometimes. Considering that this # API is admin-only and list operations won't be called often based # on the use cases, we'll keep this open for now and evaluate the # potential impacts. An alternative is to return the ``available`` # and ``used`` segment number or percentage. pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} network_segment_ranges = ( obj_network_segment_range.NetworkSegmentRange.get_objects( context, _pager=pager, **filters)) return [ network_segment_range.to_dict(fields=fields) for network_segment_range in network_segment_ranges ] @log_helpers.log_method_call def update_network_segment_range(self, context, id, network_segment_range): """Check existing network segment range impact on range updates.""" updated_range_data = network_segment_range['network_segment_range'] with db_api.CONTEXT_WRITER.using(context): network_segment_range = self._get_network_segment_range(context, id) existing_range_data = network_segment_range.to_dict() if existing_range_data['default']: raise range_exc.NetworkSegmentRangeDefaultReadOnly(range_id=id) if self._are_allocated_segments_in_range_impacted( context, existing_range=existing_range_data, updated_range=updated_range_data): raise range_exc.NetworkSegmentRangeReferencedByProject( range_id=id) new_range_data = self._add_unchanged_range_attributes( updated_range_data, existing_range_data) self._validate_network_segment_range_eligible(new_range_data) network_segment_range.update_fields(new_range_data) network_segment_range.update() self.type_manager.update_network_segment_range_allocations( network_segment_range['network_type']) return network_segment_range.to_dict() @log_helpers.log_method_call def delete_network_segment_range(self, context, id): """Check segment reference on network segment range deletion.""" with db_api.CONTEXT_WRITER.using(context): network_segment_range = self._get_network_segment_range(context, id) range_data = network_segment_range.to_dict() if range_data['default']: raise range_exc.NetworkSegmentRangeDefaultReadOnly(range_id=id) if self._is_network_segment_range_referenced( context, range_data): raise range_exc.NetworkSegmentRangeReferencedByProject( range_id=id) network_segment_range.delete() self.type_manager.update_network_segment_range_allocations( network_segment_range['network_type']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/ovn_l3/0000755000175000017500000000000000000000000021660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/ovn_l3/__init__.py0000644000175000017500000000000000000000000023757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/ovn_l3/plugin.py0000644000175000017500000004637700000000000023551 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import dns_db from neutron.db import extraroute_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_models from neutron.quota import resource_registry from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_log import log from oslo_utils import excutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import extensions from neutron.common.ovn import utils from neutron.db import l3_fip_port_details from neutron.db import ovn_revision_numbers_db as db_rev from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.scheduler import l3_ovn_scheduler LOG = log.getLogger(__name__) @registry.has_registry_receivers class OVNL3RouterPlugin(service_base.ServicePluginBase, extraroute_db.ExtraRoute_dbonly_mixin, l3_gwmode_db.L3_NAT_db_mixin, dns_db.DNSDbMixin, l3_fip_port_details.Fip_port_details_db_mixin): """Implementation of the OVN L3 Router Service Plugin. This class implements a L3 service plugin that provides router and floatingip resources and manages associated request/response. """ # TODO(mjozefcz): Start consuming it from neutron-lib # once available. supported_extension_aliases = ( extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3) @resource_registry.tracked_resources(router=l3_models.Router, floatingip=l3_models.FloatingIP) def __init__(self): LOG.info("Starting OVNL3RouterPlugin") super(OVNL3RouterPlugin, self).__init__() self._plugin_property = None self._mech = None self._ovn_client_inst = None self.scheduler = l3_ovn_scheduler.get_scheduler() self._register_precommit_callbacks() def _register_precommit_callbacks(self): registry.subscribe( self.create_router_precommit, resources.ROUTER, events.PRECOMMIT_CREATE) registry.subscribe( self.create_floatingip_precommit, resources.FLOATING_IP, events.PRECOMMIT_CREATE) @property def _ovn_client(self): if self._ovn_client_inst is None: self._ovn_client_inst = ovn_client.OVNClient(self._ovn, self._sb_ovn) return self._ovn_client_inst @property def _ovn(self): return self._plugin_driver.nb_ovn @property def _sb_ovn(self): return self._plugin_driver.sb_ovn @property def _plugin(self): if self._plugin_property is None: self._plugin_property = directory.get_plugin() return self._plugin_property @property def _plugin_driver(self): if self._mech is None: self._mech = self._plugin.mechanism_manager.mech_drivers['ovn'].obj return self._mech def get_plugin_type(self): return plugin_constants.L3 def get_plugin_description(self): """returns string description of the plugin.""" return ("L3 Router Service Plugin for basic L3 forwarding" " using OVN") def create_router_precommit(self, resource, event, trigger, context, router, router_id, router_db): db_rev.create_initial_revision( context, router_id, ovn_const.TYPE_ROUTERS) def create_router(self, context, router): router = super(OVNL3RouterPlugin, self).create_router(context, router) try: self._ovn_client.create_router(context, router) except Exception: with excutils.save_and_reraise_exception(): # Delete the logical router LOG.error('Unable to create lrouter for %s', router['id']) super(OVNL3RouterPlugin, self).delete_router(context, router['id']) return router def update_router(self, context, id, router): original_router = self.get_router(context, id) result = super(OVNL3RouterPlugin, self).update_router(context, id, router) try: self._ovn_client.update_router(context, result, original_router) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unable to update lrouter for %s', id) revert_router = {'router': original_router} super(OVNL3RouterPlugin, self).update_router(context, id, revert_router) return result def delete_router(self, context, id): original_router = self.get_router(context, id) super(OVNL3RouterPlugin, self).delete_router(context, id) try: self._ovn_client.delete_router(context, id) except Exception: with excutils.save_and_reraise_exception(): super(OVNL3RouterPlugin, self).create_router( context, {'router': original_router}) def _add_neutron_router_interface(self, context, router_id, interface_info, may_exist=False): try: router_interface_info = ( super(OVNL3RouterPlugin, self).add_router_interface( context, router_id, interface_info)) except n_exc.PortInUse: if not may_exist: raise # NOTE(lucasagomes): If the port is already being used it means # the interface has been created already, let's just fetch it from # the database. Perhaps the code below should live in Neutron # itself, a get_router_interface() method in the main class # would be handy port = self._plugin.get_port(context, interface_info['port_id']) subnets = [self._plugin.get_subnet(context, s) for s in utils.get_port_subnet_ids(port)] router_interface_info = ( self._make_router_interface_info( router_id, port['tenant_id'], port['id'], port['network_id'], subnets[0]['id'], [subnet['id'] for subnet in subnets])) return router_interface_info def add_router_interface(self, context, router_id, interface_info, may_exist=False): router_interface_info = self._add_neutron_router_interface( context, router_id, interface_info, may_exist=may_exist) try: self._ovn_client.create_router_port( context, router_id, router_interface_info) except Exception: with excutils.save_and_reraise_exception(): super(OVNL3RouterPlugin, self).remove_router_interface( context, router_id, router_interface_info) return router_interface_info def remove_router_interface(self, context, router_id, interface_info): router_interface_info = ( super(OVNL3RouterPlugin, self).remove_router_interface( context, router_id, interface_info)) try: port_id = router_interface_info['port_id'] subnet_ids = router_interface_info.get('subnet_ids') self._ovn_client.delete_router_port( context, port_id, router_id=router_id, subnet_ids=subnet_ids) except Exception: with excutils.save_and_reraise_exception(): super(OVNL3RouterPlugin, self).add_router_interface( context, router_id, interface_info) return router_interface_info def create_floatingip_precommit(self, resource, event, trigger, context, floatingip, floatingip_id, floatingip_db): db_rev.create_initial_revision( context, floatingip_id, ovn_const.TYPE_FLOATINGIPS) def create_floatingip(self, context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN): fip = super(OVNL3RouterPlugin, self).create_floatingip( context, floatingip, initial_status) self._ovn_client.create_floatingip(context, fip) return fip def delete_floatingip(self, context, id): # TODO(lucasagomes): Passing ``original_fip`` object as a # parameter to the OVNClient's delete_floatingip() method is done # for backward-compatible reasons. Remove it in the Rocky release # of OpenStack. original_fip = self.get_floatingip(context, id) super(OVNL3RouterPlugin, self).delete_floatingip(context, id) self._ovn_client.delete_floatingip(context, id, fip_object=original_fip) def update_floatingip(self, context, id, floatingip): # TODO(lucasagomes): Passing ``original_fip`` object as a # parameter to the OVNClient's update_floatingip() method is done # for backward-compatible reasons. Remove it in the Rocky release # of OpenStack. original_fip = self.get_floatingip(context, id) fip = super(OVNL3RouterPlugin, self).update_floatingip(context, id, floatingip) self._ovn_client.update_floatingip(context, fip, fip_object=original_fip) return fip def update_floatingip_status(self, context, floatingip_id, status): fip = super(OVNL3RouterPlugin, self).update_floatingip_status( context, floatingip_id, status) self._ovn_client.update_floatingip_status(context, fip) return fip def disassociate_floatingips(self, context, port_id, do_notify=True): fips = self.get_floatingips(context.elevated(), filters={'port_id': [port_id]}) router_ids = super(OVNL3RouterPlugin, self).disassociate_floatingips( context, port_id, do_notify) for fip in fips: router_id = fip.get('router_id') fixed_ip_address = fip.get('fixed_ip_address') if router_id and fixed_ip_address: update_fip = {'logical_ip': fixed_ip_address, 'external_ip': fip['floating_ip_address']} try: self._ovn_client.disassociate_floatingip(update_fip, router_id) self.update_floatingip_status( context, fip['id'], n_const.FLOATINGIP_STATUS_DOWN) except Exception as e: LOG.error('Error in disassociating floatingip %(id)s: ' '%(error)s', {'id': fip['id'], 'error': e}) return router_ids def _get_gateway_port_physnet_mapping(self): # This function returns all gateway ports with corresponding # external network's physnet net_physnet_dict = {} port_physnet_dict = {} l3plugin = directory.get_plugin(plugin_constants.L3) if not l3plugin: return port_physnet_dict context = n_context.get_admin_context() for net in l3plugin._plugin.get_networks( context, {external_net.EXTERNAL: [True]}): if net.get(pnet.NETWORK_TYPE) in [n_const.TYPE_FLAT, n_const.TYPE_VLAN]: net_physnet_dict[net['id']] = net.get(pnet.PHYSICAL_NETWORK) for port in l3plugin._plugin.get_ports(context, filters={ 'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW]}): port_physnet_dict[port['id']] = net_physnet_dict.get( port['network_id']) return port_physnet_dict def update_router_gateway_port_bindings(self, router, host): status = (n_const.PORT_STATUS_ACTIVE if host else n_const.PORT_STATUS_DOWN) context = n_context.get_admin_context() filters = {'device_id': [router], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW]} for port in self._plugin.get_ports(context, filters=filters): # FIXME(lucasagomes): Ideally here we would use only # one database transaction for the status and binding the # host but, even tho update_port_status() receives a "host" # parameter apparently it doesn't work for ports which the # device owner is router_gateway. We need to look into it and # fix the problem in Neutron before updating it here. if host: self._plugin.update_port( context, port['id'], {'port': {portbindings.HOST_ID: host}}) if port['status'] != status: self._plugin.update_port_status(context, port['id'], status) def schedule_unhosted_gateways(self, event_from_chassis=None): # GW ports and its physnets. port_physnet_dict = self._get_gateway_port_physnet_mapping() # Filter out unwanted ports in case of event. if event_from_chassis: gw_chassis = self._ovn.get_chassis_gateways( chassis_name=event_from_chassis) if not gw_chassis: return ports_impacted = [] for gwc in gw_chassis: try: ports_impacted.append(utils.get_port_id_from_gwc_row(gwc)) except AttributeError: # Malformed GWC format. pass port_physnet_dict = { k: v for k, v in port_physnet_dict.items() if k in ports_impacted} if not port_physnet_dict: return # All chassis with physnets configured. chassis_with_physnets = self._sb_ovn.get_chassis_and_physnets() # All chassis with enable_as_gw_chassis set all_gw_chassis = self._sb_ovn.get_gateway_chassis_from_cms_options() unhosted_gateways = self._ovn.get_unhosted_gateways( port_physnet_dict, chassis_with_physnets, all_gw_chassis) for g_name in unhosted_gateways: physnet = port_physnet_dict.get(g_name[len(ovn_const.LRP_PREFIX):]) # Remove any invalid gateway chassis from the list, otherwise # we can have a situation where all existing_chassis are invalid existing_chassis = self._ovn.get_gateway_chassis_binding(g_name) master = existing_chassis[0] if existing_chassis else None existing_chassis = self.scheduler.filter_existing_chassis( nb_idl=self._ovn, gw_chassis=all_gw_chassis, physnet=physnet, chassis_physnets=chassis_with_physnets, existing_chassis=existing_chassis) candidates = self._ovn_client.get_candidates_for_scheduling( physnet, cms=all_gw_chassis, chassis_physnets=chassis_with_physnets) chassis = self.scheduler.select( self._ovn, self._sb_ovn, g_name, candidates=candidates, existing_chassis=existing_chassis) if master and master != chassis[0]: if master not in chassis: LOG.debug("Master gateway chassis %(old)s " "has been removed from the system. Moving " "gateway %(gw)s to other chassis %(new)s.", {'gw': g_name, 'old': master, 'new': chassis[0]}) else: LOG.debug("Gateway %s is hosted at %s.", g_name, master) # NOTE(mjozefcz): It means scheduler moved master chassis # to other gw based on scheduling method. But we don't # want network flap - so moving actual master to be on # the top. index = chassis.index(master) chassis[0], chassis[index] = chassis[index], chassis[0] # NOTE(dalvarez): Let's commit the changes in separate transactions # as we will rely on those for scheduling subsequent gateways. with self._ovn.transaction(check_error=True) as txn: txn.add(self._ovn.update_lrouter_port( g_name, gateway_chassis=chassis)) @staticmethod @registry.receives(resources.SUBNET, [events.AFTER_UPDATE]) def _subnet_update(resource, event, trigger, **kwargs): l3plugin = directory.get_plugin(plugin_constants.L3) if not l3plugin: return context = kwargs['context'] orig = kwargs['original_subnet'] current = kwargs['subnet'] orig_gw_ip = orig['gateway_ip'] current_gw_ip = current['gateway_ip'] if orig_gw_ip == current_gw_ip: return gw_ports = l3plugin._plugin.get_ports(context, filters={ 'network_id': [orig['network_id']], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW], 'fixed_ips': {'subnet_id': [orig['id']]}, }) router_ids = {port['device_id'] for port in gw_ports} remove = [{'destination': '0.0.0.0/0', 'nexthop': orig_gw_ip} ] if orig_gw_ip else [] add = [{'destination': '0.0.0.0/0', 'nexthop': current_gw_ip} ] if current_gw_ip else [] with l3plugin._ovn.transaction(check_error=True) as txn: for router_id in router_ids: l3plugin._ovn_client.update_router_routes( context, router_id, add, remove, txn=txn) @staticmethod @registry.receives(resources.PORT, [events.AFTER_UPDATE]) def _port_update(resource, event, trigger, **kwargs): l3plugin = directory.get_plugin(plugin_constants.L3) if not l3plugin: return current = kwargs['port'] if utils.is_lsp_router_port(current): # We call the update_router port with if_exists, because neutron, # internally creates the port, and then calls update, which will # trigger this callback even before we had the chance to create # the OVN NB DB side l3plugin._ovn_client.update_router_port(kwargs['context'], current, if_exists=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/placement_report/0000755000175000017500000000000000000000000024023 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/placement_report/__init__.py0000644000175000017500000000000000000000000026122 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/placement_report/plugin.py0000644000175000017500000002664500000000000025710 0ustar00coreycorey00000000000000# Copyright 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import exceptions as ks_exc from neutron_lib.agent import constants as agent_const from neutron_lib.api.definitions import agent_resources_synced from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.placement import client as place_client from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import placement_report from neutron.notifiers import batch_notifier LOG = logging.getLogger(__name__) PLUGIN_TYPE = "placement_report" @registry.has_registry_receivers class PlacementReportPlugin(service_base.ServicePluginBase): supported_extension_aliases = [] # A service plugin without claiming support for filter validation would # disable filter validation for all other plugins, so we report support # although this plugin doesn't have filters. __filter_validation_support = True @classmethod def get_plugin_type(cls): return PLUGIN_TYPE def get_plugin_description(self): return "Sync placement info from agent to server to placement." def __init__(self): self._core_plugin = directory.get_plugin() # NOTE(bence romsics): The following bug and fix may be relevant here. # https://bugs.launchpad.net/nova/+bug/1697825 # https://review.opendev.org/493536 self._placement_client = place_client.PlacementAPIClient(cfg.CONF) self._agents = PlacementReporterAgents(self._core_plugin) self._batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self._execute_deferred) def _execute_deferred(self, deferred_batch): for deferred in deferred_batch: deferred() def _get_rp_by_name(self, name): rps = self._placement_client.list_resource_providers( name=name)['resource_providers'] # RP names are unique, therefore we can get 0 or 1. But not many. return rps[0] def _sync_placement_state(self, agent, agent_db): configurations = agent['configurations'] mech_driver = self._agents.mechanism_driver_by_agent_type( agent['agent_type']) uuid_ns = mech_driver.resource_provider_uuid5_namespace supported_vnic_types = mech_driver.supported_vnic_types device_mappings = mech_driver.get_standard_device_mappings(agent) if 'resource_provider_hypervisors' in configurations: # When the agent has the fix for # https://bugs.launchpad.net/neutron/+bug/1853840 # it sends us hypervisor names (compute nodes in nova terminology). hypervisors = configurations['resource_provider_hypervisors'] else: # For older agents without the fix we have to assume the old # buggy behavior. There we assumed DEFAULT.host is the same as the # hypervisor name, which is true in many deployments, but not # always. (In nova terminology: The compute host's DEFAULT.host is # not neccessarily the same as the compute node name. We may even # have multiple compute nodes behind a compute host.) # TODO(bence romsics): This else branch can be removed when we no # longer want to support pre-Ussuri agents. hypervisors = { device: agent['host'] for device in configurations['resource_provider_bandwidths'].keys() } log_msg = ( 'Synchronization of resources ' 'of agent type %(type)s ' 'at host %(host)s ' 'to placement %(result)s.') try: name2uuid = {} for name in hypervisors.values(): name2uuid[name] = self._get_rp_by_name(name=name)['uuid'] hypervisor_rps = {} for device, hypervisor in hypervisors.items(): hypervisor_rps[device] = { 'name': hypervisor, 'uuid': name2uuid[hypervisor], } except (IndexError, ks_exc.HttpError, ks_exc.ClientException): agent_db.resources_synced = False agent_db.update() LOG.warning( log_msg, {'type': agent['agent_type'], 'host': agent['host'], 'result': 'failed'}) return state = placement_report.PlacementState( rp_bandwidths=configurations[ 'resource_provider_bandwidths'], rp_inventory_defaults=configurations[ 'resource_provider_inventory_defaults'], driver_uuid_namespace=uuid_ns, agent_type=agent['agent_type'], hypervisor_rps=hypervisor_rps, device_mappings=device_mappings, supported_vnic_types=supported_vnic_types, client=self._placement_client) deferred_batch = state.deferred_sync() # NOTE(bence romsics): Some client calls depend on earlier # ones, but not all. There are calls in a batch that can succeed # independently of earlier calls. Therefore even if a call fails # we have to suppress its failure so the later independent calls # have a chance to succeed. If we queue up the deferred client # calls one by one then we cannot handle errors at the end of # a batch. So instead we should wrap the deferred client calls # in a single deferred batch which executes the client calls, # continuing to the next client call even if there was an error # but remembering if an error happened. Then at the end of the # batch (also having access to the agent object) set the agent's # resources_synced attribute according to the success/failure # of the batch. Since each client call does monkey patched I/O # we'll yield to other eventlet threads in each call therefore # the performance should not be affected by the wrapping. def batch(): errors = False for deferred in deferred_batch: try: LOG.debug('placement client: {}'.format(deferred)) deferred.execute() except Exception: errors = True LOG.exception( 'placement client call failed: %s', str(deferred)) resources_synced = not errors agent_db.resources_synced = resources_synced agent_db.update() if resources_synced: LOG.debug( log_msg, {'type': agent['agent_type'], 'host': agent['host'], 'result': 'succeeded'}) else: LOG.warning( log_msg, {'type': agent['agent_type'], 'host': agent['host'], 'result': 'failed'}) self._batch_notifier.queue_event(batch) @registry.receives(resources.AGENT, [events.AFTER_CREATE, events.AFTER_UPDATE]) def handle_placement_config(self, resource, event, trigger, payload): # NOTE(bence romsics): This method gets called a lot, keep it quick. agent = payload.desired_state status = payload.metadata.get('status') context = payload.context if agent['agent_type'] not in self._agents.supported_agent_types: return if 'resource_provider_bandwidths' not in agent['configurations']: LOG.warning( "The mechanism driver claims agent type supports " "placement reports, but the agent does not report " "'resoure_provider_bandwidths' in its configurations. " "host: %(host)s, type: %(type)s", {'host': agent['agent_type'], 'type': agent['host']}) return # We need to get the same agent as in # neutron.db.agents_db.AgentDbMixin.create_or_update_agent() agent_db = self._core_plugin._get_agent_by_type_and_host( context, agent['agent_type'], agent['host']) # sync the state known by us to placement if ( # agent object in API (re-)created status == agent_const.AGENT_NEW or # agent (re-)started (even without config change) 'start_flag' in agent or # never tried to sync yet or last sync failed not agent_db[agent_resources_synced.RESOURCES_SYNCED]): LOG.debug( 'placement: syncing state for agent type %s on host %s', agent['agent_type'], agent['host']) self._sync_placement_state(agent, agent_db) else: LOG.debug( 'placement: nothing to sync for agent type %s on host %s', agent['agent_type'], agent['host']) class PlacementReporterAgents(object): # Yep, this is meant to depend on ML2. def __init__(self, ml2_plugin): try: self._mechanism_drivers = ml2_plugin.mechanism_manager.\ ordered_mech_drivers except AttributeError: LOG.error( "Invalid plugin configuration: " "The placement service plugin depends on the ML2 core plugin. " "You likely want to remove 'placement' from " "neutron.conf: DEFAULT.service_plugins") raise self._supported_agent_types = [] self._agent_type_to_mech_driver = {} @property def supported_agent_types(self): if not self._supported_agent_types: # NOTE(bence romsics): We treat the presence of the # RP uuid namespace a proxy for supporting placement reports from # the driver's agent type. But we could introduce a property/logic # explicitly describing the agent types supporting placement # reports any time if this proved to be insufficient. self._supported_agent_types = [ driver.obj.agent_type for driver in self._mechanism_drivers if driver.obj.resource_provider_uuid5_namespace is not None] LOG.debug('agent types supporting placement reports: %s', ', '.join(self._supported_agent_types)) return self._supported_agent_types def mechanism_driver_by_agent_type(self, agent_type): if agent_type not in self._agent_type_to_mech_driver: for driver in self._mechanism_drivers: if (hasattr(driver.obj, 'agent_type') and agent_type == driver.obj.agent_type): self._agent_type_to_mech_driver[agent_type] = driver.obj break return self._agent_type_to_mech_driver[agent_type] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/0000755000175000017500000000000000000000000023527 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/__init__.py0000644000175000017500000000000000000000000025626 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/common/0000755000175000017500000000000000000000000025017 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/common/__init__.py0000644000175000017500000000000000000000000027116 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/common/exceptions.py0000644000175000017500000000306000000000000027551 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron_lib import exceptions as n_exc class PortForwardingNotFound(n_exc.NotFound): message = _("Port Forwarding %(id)s could not be found.") class PortForwardingNotSupportFilterField(n_exc.BadRequest): message = _("Port Forwarding filter %(filter)s is not supported.") class PortHasPortForwarding(n_exc.BadRequest): message = _("Cannot associate floating IP to port " "%(port_id)s because it already has a " "Port Forwarding binding.") class FipInUseByPortForwarding(n_exc.InUse): message = _("Floating IP %(id)s in use by Port Forwarding resources.") class PortHasBindingFloatingIP(n_exc.InUse): message = _("Cannot create port forwarding to floating IP " "%(floating_ip_address)s (%(fip_id)s) with port %(port_id)s " "using fixed IP %(fixed_ip)s, as that port already " "has a binding floating IP.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/portforwarding/pf_plugin.py0000644000175000017500000006241700000000000026076 0ustar00coreycorey00000000000000# Copyright (c) 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import netaddr from neutron_lib.api.definitions import expose_port_forwarding_in_fip from neutron_lib.api.definitions import fip_pf_description from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef from neutron_lib.api.definitions import l3 from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_consts from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import l3 as lib_l3_exc from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_log import log as logging from neutron._i18n import _ from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.handlers import resources_rpc from neutron.common import utils from neutron.db import db_base_plugin_common from neutron.db import l3_dvr_db from neutron.extensions import floating_ip_port_forwarding as fip_pf from neutron.objects import base as base_obj from neutron.objects import port_forwarding as pf from neutron.objects import router as l3_obj from neutron.services.portforwarding.common import exceptions as pf_exc LOG = logging.getLogger(__name__) # Move to neutron-lib someday. PORT_FORWARDING_FLOATINGIP_KEY = '_pf_floatingips' @resource_extend.has_resource_extenders @registry.has_registry_receivers class PortForwardingPlugin(fip_pf.PortForwardingPluginBase): """Implementation of the Neutron Port Forwarding Service Plugin. This class implements a Port Forwarding plugin. """ required_service_plugins = ['router'] supported_extension_aliases = [apidef.ALIAS, expose_port_forwarding_in_fip.ALIAS, fip_pf_description.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): super(PortForwardingPlugin, self).__init__() self.push_api = resources_rpc.ResourcesPushRpcApi() self.l3_plugin = directory.get_plugin(constants.L3) self.core_plugin = directory.get_plugin() @staticmethod @resource_extend.extends([l3.FLOATINGIPS]) def _extend_floatingip_dict(result_dict, db): fields = [apidef.INTERNAL_IP_ADDRESS, apidef.PROTOCOL, apidef.INTERNAL_PORT, apidef.EXTERNAL_PORT] result_dict[apidef.COLLECTION_NAME] = [] if db.port_forwardings: port_forwarding_result = [] for port_forwarding in db.port_forwardings: pf_dict = pf.PortForwarding.modify_fields_from_db( port_forwarding) for key in list(pf_dict.keys()): if key not in fields: pf_dict.pop(key) elif key == apidef.INTERNAL_IP_ADDRESS: pf_dict[key] = str(pf_dict[key]) port_forwarding_result.append(pf_dict) result_dict[apidef.COLLECTION_NAME] = port_forwarding_result return result_dict @registry.receives(resources.FLOATING_IP, [events.BEFORE_CREATE, events.BEFORE_UPDATE]) def _check_port_has_port_forwarding(self, resource, event, trigger, payload=None): port_id = payload.request_body['floatingip'].get('port_id') if not port_id: return pf_objs = pf.PortForwarding.get_objects( payload.context, internal_port_id=port_id) if not pf_objs: return # Port may not bind to host yet, or port may migrate from one # dvr_no_external host to one dvr host. So we just do not allow # all dvr router's floating IP to be binded to a port which # already has port forwarding. router = self.l3_plugin.get_router(payload.context.elevated(), pf_objs[0].router_id) if l3_dvr_db.is_distributed_router(router): raise pf_exc.PortHasPortForwarding(port_id=port_id) @registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_UPDATE, events.PRECOMMIT_DELETE]) def _check_floatingip_request(self, resource, event, trigger, context, **kwargs): # We only support the "free" floatingip to be associated with # port forwarding resources. And in the PUT request of floatingip, # the request body must contain a "port_id" field which is not # allowed in port forwarding functionality. floatingip_id = None if event == events.PRECOMMIT_UPDATE: fip_db = kwargs.get('floatingip_db') floatingip_id = fip_db.id # Here the key-value must contain a floatingip param, and the value # must a dict with key 'floatingip'. if not kwargs['floatingip']['floatingip'].get('port_id'): # Only care about the associate floatingip cases. # The port_id field is a must-option. But if a floatingip # disassociate a internal port, the port_id should be null. LOG.debug('Skip check for floatingip %s, as the update ' 'request does not contain port_id.', floatingip_id) return elif event == events.PRECOMMIT_DELETE: floatingip_id = kwargs.get('port').get('device_id') if not floatingip_id: return exist_pf_resources = pf.PortForwarding.get_objects( context, floatingip_id=floatingip_id) if exist_pf_resources: raise pf_exc.FipInUseByPortForwarding(id=floatingip_id) @registry.receives(resources.PORT, [events.AFTER_UPDATE, events.PRECOMMIT_DELETE]) @db_api.retry_if_session_inactive() def _process_port_request(self, resource, event, trigger, context, **kwargs): # Deleting floatingip will receive port resource with precommit_delete # event, so just return, then check the request in # _check_floatingip_request callback. if kwargs['port']['device_owner'].startswith( lib_consts.DEVICE_OWNER_FLOATINGIP): return # This block is used for checking if there are some fixed ips updates. # Whatever the event is AFTER_UPDATE/PRECOMMIT_DELETE, # we will use the update_ip_set for checking if the possible associated # port forwarding resources need to be deleted for port's AFTER_UPDATE # event. Or get all affected ip addresses for port's PRECOMMIT_DELETE # event. port_id = kwargs['port']['id'] update_fixed_ips = kwargs['port']['fixed_ips'] update_ip_set = set() for update_fixed_ip in update_fixed_ips: if (netaddr.IPNetwork(update_fixed_ip.get('ip_address')).version == lib_consts.IP_VERSION_4): update_ip_set.add(update_fixed_ip.get('ip_address')) if not update_ip_set: return # If the port owner wants to update or delete port, we must elevate the # context to check if the floatingip or port forwarding resources # are owned by other tenants. if not context.is_admin: context = context.elevated() # If the logic arrives here, that means we have got update_ip_set and # its value is not None. So we need to get all port forwarding # resources based on the request port_id for preparing the next # process, such as deleting them. pf_resources = pf.PortForwarding.get_objects( context, internal_port_id=port_id) if not pf_resources: return # If the logic arrives here, that means we have got pf_resources and # its value is not None either. Then we collect all ip addresses # which are used by port forwarding resources to generate used_ip_set, # and we default to set remove_ip_set as used_ip_set which means we # want to delete all port forwarding resources when event is # PRECOMMIT_DELETE. And when event is AFTER_UPDATE, we get the # different part. used_ip_set = set() for pf_resource in pf_resources: used_ip_set.add(str(pf_resource.internal_ip_address)) remove_ip_set = used_ip_set if event == events.AFTER_UPDATE: remove_ip_set = used_ip_set - update_ip_set if not remove_ip_set: return # Here, we get the remove_ip_set, the following block will delete the # port forwarding resources based on remove_ip_set. Just need to note # here, if event is AFTER_UPDATE, and remove_ip_set is empty, the # following block won't be processed. remove_port_forwarding_list = [] with db_api.CONTEXT_WRITER.using(context): for pf_resource in pf_resources: if str(pf_resource.internal_ip_address) in remove_ip_set: pf_objs = pf.PortForwarding.get_objects( context, floatingip_id=pf_resource.floatingip_id) if len(pf_objs) == 1 and pf_objs[0].id == pf_resource.id: fip_obj = l3_obj.FloatingIP.get_object( context, id=pf_resource.floatingip_id) fip_obj.update_fields({'router_id': None}) fip_obj.update() pf_resource.delete() remove_port_forwarding_list.append(pf_resource) self.push_api.push(context, remove_port_forwarding_list, rpc_events.DELETED) def _get_internal_ip_subnet(self, request_ip, fixed_ips): request_ip = netaddr.IPNetwork(request_ip) for fixed_ip in fixed_ips: if netaddr.IPNetwork(fixed_ip['ip_address']) == request_ip: return fixed_ip['subnet_id'] def _find_a_router_for_fip_port_forwarding( self, context, pf_dict, fip_obj): internal_port_id = pf_dict['internal_port_id'] internal_port = self.core_plugin.get_port(context, internal_port_id) v4_fixed_ips = [fixed_ip for fixed_ip in internal_port['fixed_ips'] if (netaddr.IPNetwork(fixed_ip['ip_address'] ).version == lib_consts.IP_VERSION_4)] if not v4_fixed_ips: # As port forwarding works with ipv4 addresses, # if there is no ipv4 address, we need to raise. message = _("Requested internal port %s must allocate " "an IPv4 address at least.") % internal_port_id raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) # Get the internal ip address, if not specified, choose the first ipv4 # address. internal_ip_address = pf_dict.get('internal_ip_address') if not internal_ip_address: internal_ip_address = v4_fixed_ips[0]['ip_address'] pf_dict['internal_ip_address'] = internal_ip_address internal_subnet_id = v4_fixed_ips[0]['subnet_id'] else: # check the matched fixed ip internal_subnet_id = self._get_internal_ip_subnet( internal_ip_address, v4_fixed_ips) if not internal_subnet_id: message = _( "Requested internal IP address %(internal_ip_address)s is " "not suitable for internal neutron port " "%(internal_port_id)s, as its fixed_ips are " "%(fixed_ips)s") % { 'internal_ip_address': internal_ip_address, 'internal_port_id': internal_port['id'], 'fixed_ips': v4_fixed_ips} raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) internal_subnet = self.core_plugin.get_subnet( context, internal_subnet_id) external_network_id = fip_obj.floating_network_id try: return self.l3_plugin.get_router_for_floatingip( context, internal_port, internal_subnet, external_network_id) except lib_l3_exc.ExternalGatewayForFloatingIPNotFound: message = _( "External network %(external_net_id)s is not reachable from " "subnet %(internal_subnet_id)s. Cannot set " "Port forwarding for port %(internal_port_id)s with " "Floating IP %(port_forwarding_id)s") % { 'external_net_id': external_network_id, 'internal_subnet_id': internal_subnet_id, 'internal_port_id': internal_port_id, 'port_forwarding_id': fip_obj.id} raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) def _check_port_has_binding_floating_ip(self, context, port_forwarding): port_id = port_forwarding['internal_port_id'] floatingip_objs = l3_obj.FloatingIP.get_objects( context.elevated(), fixed_port_id=port_id) if floatingip_objs: floating_ip_address = floatingip_objs[0].floating_ip_address raise pf_exc.PortHasBindingFloatingIP( floating_ip_address=floating_ip_address, fip_id=floatingip_objs[0].id, port_id=port_id, fixed_ip=port_forwarding['internal_ip_address']) @db_base_plugin_common.convert_result_to_dict def create_floatingip_port_forwarding(self, context, floatingip_id, port_forwarding): port_forwarding = port_forwarding.get(apidef.RESOURCE_NAME) port_forwarding['floatingip_id'] = floatingip_id self._check_port_has_binding_floating_ip(context, port_forwarding) with db_api.CONTEXT_WRITER.using(context): fip_obj = self._get_fip_obj(context, floatingip_id) if fip_obj.fixed_port_id: raise lib_l3_exc.FloatingIPPortAlreadyAssociated( port_id=port_forwarding['internal_port_id'], fip_id=fip_obj.id, floating_ip_address=fip_obj.floating_ip_address, fixed_ip=str(port_forwarding['internal_ip_address']), net_id=fip_obj.floating_network_id) router_id = self._find_a_router_for_fip_port_forwarding( context, port_forwarding, fip_obj) pf_obj = pf.PortForwarding(context, **port_forwarding) # If this func does not raise an exception, means the # router_id matched. # case1: fip_obj.router_id = None # case2: fip_obj.router_id is the same with we selected. self._check_router_match(context, fip_obj, router_id, port_forwarding) if not fip_obj.router_id: values = {'router_id': router_id, 'fixed_port_id': None} l3_obj.FloatingIP.update_objects( context, values, id=floatingip_id) try: pf_obj.create() except obj_exc.NeutronDbObjectDuplicateEntry: (__, conflict_params) = self._find_existing_port_forwarding( context, floatingip_id, port_forwarding) message = _("A duplicate port forwarding entry with same " "attributes already exists, conflicting " "values are %s") % conflict_params raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) self.push_api.push(context, [pf_obj], rpc_events.CREATED) return pf_obj @db_base_plugin_common.convert_result_to_dict def update_floatingip_port_forwarding(self, context, id, floatingip_id, port_forwarding): port_forwarding = port_forwarding.get(apidef.RESOURCE_NAME) new_internal_port_id = None if port_forwarding and port_forwarding.get('internal_port_id'): new_internal_port_id = port_forwarding.get('internal_port_id') self._check_port_has_binding_floating_ip(context, port_forwarding) try: with db_api.CONTEXT_WRITER.using(context): fip_obj = self._get_fip_obj(context, floatingip_id) pf_obj = pf.PortForwarding.get_object(context, id=id) if not pf_obj: raise pf_exc.PortForwardingNotFound(id=id) ori_internal_port_id = pf_obj.internal_port_id if new_internal_port_id and (new_internal_port_id != ori_internal_port_id): router_id = self._find_a_router_for_fip_port_forwarding( context, port_forwarding, fip_obj) self._check_router_match(context, fip_obj, router_id, port_forwarding) # As the socket will update when dict contains # internal_ip_address and internal_port. internal_ip_address = port_forwarding.get( 'internal_ip_address') internal_port = port_forwarding.get('internal_port') if any([internal_ip_address, internal_port]): port_forwarding.update({ 'internal_ip_address': internal_ip_address if internal_ip_address else str(pf_obj.internal_ip_address), 'internal_port': internal_port if internal_port else pf_obj.internal_port }) pf_obj.update_fields(port_forwarding, reset_changes=True) pf_obj.update() except obj_exc.NeutronDbObjectDuplicateEntry: (__, conflict_params) = self._find_existing_port_forwarding( context, floatingip_id, pf_obj.to_dict()) message = _("A duplicate port forwarding entry with same " "attributes already exists, conflicting values " "are %s") % conflict_params raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) self.push_api.push(context, [pf_obj], rpc_events.UPDATED) return pf_obj def _check_router_match(self, context, fip_obj, router_id, pf_dict): internal_port_id = pf_dict['internal_port_id'] if fip_obj.router_id and fip_obj.router_id != router_id: objs = pf.PortForwarding.get_objects( context, floatingip_id=fip_obj.id, internal_ip_address=pf_dict['internal_ip_address'], internal_port=pf_dict['internal_port']) if objs: message = _("Floating IP %(floatingip_id)s with params: " "internal_ip_address: %(internal_ip_address)s, " "internal_port: %(internal_port)s " "already exists") % { 'floatingip_id': fip_obj.id, 'internal_ip_address': pf_dict['internal_ip_address'], 'internal_port': pf_dict['internal_port']} else: message = _("The Floating IP %(floatingip_id)s had been set " "on router %(router_id)s, the internal Neutron " "port %(internal_port_id)s can not reach it") % { 'floatingip_id': fip_obj.id, 'router_id': fip_obj.router_id, 'internal_port_id': internal_port_id} raise lib_exc.BadRequest(resource=apidef.RESOURCE_NAME, msg=message) def _find_existing_port_forwarding(self, context, floatingip_id, port_forwarding, specify_params=None): # Because the session had been flushed by NeutronDbObjectDuplicateEntry # so if we want to use the context to get another db queries, we need # to rollback first. context.session.rollback() if not specify_params: specify_params = [ {'floatingip_id': floatingip_id, 'external_port': port_forwarding['external_port'], 'protocol': port_forwarding['protocol']}, {'internal_port_id': port_forwarding['internal_port_id'], 'internal_ip_address': port_forwarding['internal_ip_address'], 'internal_port': port_forwarding['internal_port'], 'protocol': port_forwarding['protocol']}] for param in specify_params: objs = pf.PortForwarding.get_objects(context, **param) if objs: return (objs[0], param) def _get_fip_obj(self, context, fip_id): fip_obj = l3_obj.FloatingIP.get_object(context, id=fip_id) if not fip_obj: raise lib_l3_exc.FloatingIPNotFound(floatingip_id=fip_id) return fip_obj @db_base_plugin_common.make_result_with_fields @db_base_plugin_common.convert_result_to_dict def get_floatingip_port_forwarding(self, context, id, floatingip_id, fields=None): self._get_fip_obj(context, floatingip_id) obj = pf.PortForwarding.get_object(context, id=id) if not obj: raise pf_exc.PortForwardingNotFound(id=id) return obj def _validate_filter_for_port_forwarding(self, request_filter): if not request_filter: return for filter_member_key in request_filter.keys(): if filter_member_key in pf.FIELDS_NOT_SUPPORT_FILTER: raise pf_exc.PortForwardingNotSupportFilterField( filter=filter_member_key) @db_base_plugin_common.make_result_with_fields @db_base_plugin_common.convert_result_to_dict def get_floatingip_port_forwardings(self, context, floatingip_id=None, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): self._get_fip_obj(context, floatingip_id) filters = filters or {} self._validate_filter_for_port_forwarding(filters) pager = base_obj.Pager(sorts, limit, page_reverse, marker) return pf.PortForwarding.get_objects( context, _pager=pager, floatingip_id=floatingip_id, **filters) def delete_floatingip_port_forwarding(self, context, id, floatingip_id): pf_obj = pf.PortForwarding.get_object(context, id=id) if not pf_obj or pf_obj.floatingip_id != floatingip_id: raise pf_exc.PortForwardingNotFound(id=id) with db_api.CONTEXT_WRITER.using(context): fip_obj = self._get_fip_obj(context, pf_obj.floatingip_id) pf_objs = pf.PortForwarding.get_objects( context, floatingip_id=pf_obj.floatingip_id) if len(pf_objs) == 1 and pf_objs[0].id == pf_obj.id: fip_obj.update_fields({'router_id': None}) fip_obj.update() pf_obj.delete() self.push_api.push(context, [pf_obj], rpc_events.DELETED) def sync_port_forwarding_fip(self, context, routers): if not routers: return router_ids = [router.get('id') for router in routers] router_pf_fip_set = collections.defaultdict(set) fip_pfs = collections.defaultdict(set) router_fip_ids = collections.defaultdict(set) item_pf_fields = pf.PortForwarding.get_port_forwarding_obj_by_routers( context, router_ids) for router_id, fip_addr, pf_id, fip_id in item_pf_fields: router_pf_fip_set[router_id].add(utils.ip_to_cidr(fip_addr, 32)) fip_pfs[fip_id].add(pf_id) router_fip_ids[router_id].add(fip_id) for router in routers: if router['id'] in router_fip_ids: router['port_forwardings_fip_set'] = router_pf_fip_set[ router['id']] router['fip_managed_by_port_forwardings'] = router_fip_ids[ router['id']] router_pf_fips_info = router.get( PORT_FORWARDING_FLOATINGIP_KEY, []) for fip_id in router_fip_ids[router['id']]: fip = self.l3_plugin.get_floatingip(context, fip_id) router_pf_fips_info.append(fip) router[PORT_FORWARDING_FLOATINGIP_KEY] = router_pf_fips_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/provider_configuration.py0000644000175000017500000002410000000000000025610 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import itertools import os from neutron_lib.db import constants as db_const from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils import stevedore from neutron._i18n import _ from neutron.conf.services import provider_configuration as prov_config LOG = logging.getLogger(__name__) SERVICE_PROVIDERS = 'neutron.service_providers' # TODO(HenryG): use MovedGlobals to deprecate this. serviceprovider_opts = prov_config.serviceprovider_opts prov_config.register_service_provider_opts() class NeutronModule(object): """A Neutron extension module.""" def __init__(self, service_module): self.module_name = service_module self.repo = { 'mod': self._import_or_none(), 'ini': None } def _import_or_none(self): try: return importlib.import_module(self.module_name) except ImportError: return None def installed(self): LOG.debug("NeutronModule installed = %s", self.module_name) return self.module_name def module(self): return self.repo['mod'] # Return an INI parser for the child module def ini(self, neutron_dir=None): if self.repo['ini'] is None: ini_file = cfg.ConfigOpts() prov_config.register_service_provider_opts(ini_file) if neutron_dir is not None: neutron_dirs = [neutron_dir] else: try: neutron_dirs = cfg.CONF.config_dirs except cfg.NoSuchOptError: neutron_dirs = None if not neutron_dirs: neutron_dirs = ['/etc/neutron'] # load configuration from all matching files to reflect oslo.config # behaviour config_files = [] for neutron_dir in neutron_dirs: ini_path = os.path.join(neutron_dir, '%s.conf' % self.module_name) if os.path.exists(ini_path): config_files.append(ini_path) # NOTE(ihrachys): we could pass project=self.module_name instead to # rely on oslo.config to find configuration files for us, but: # 1. that would render neutron_dir argument ineffective; # 2. that would break loading configuration file from under # /etc/neutron in case no --config-dir is passed. # That's why we need to explicitly construct CLI here. ini_file(args=list(itertools.chain.from_iterable( ['--config-file', file_] for file_ in config_files ))) self.repo['ini'] = ini_file return self.repo['ini'] def service_providers(self): """Return the service providers for the extension module.""" providers = [] # Attempt to read the config from cfg.CONF first; when passing # --config-dir, the option is merged from all the definitions # made across all the imported config files try: providers = cfg.CONF.service_providers.service_provider except cfg.NoSuchOptError: pass # Alternatively, if the option is not available, try to load # it from the provider module's config file; this may be # necessary, if modules are loaded on the fly (DevStack may # be an example) if not providers: providers = self.ini().service_providers.service_provider if providers: versionutils.report_deprecated_feature( LOG, 'Implicit loading of service providers from ' 'neutron_*.conf files is deprecated and will be ' 'removed in Ocata release.') return providers # global scope function that should be used in service APIs def normalize_provider_name(name): return name.lower() def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS): """Return path to provider driver class In order to keep backward compatibility with configs < Kilo, we need to translate driver class paths after advanced services split. This is done by defining old class path as entry point in neutron package. """ try: driver_manager = stevedore.driver.DriverManager( namespace, driver).driver except ImportError: return driver except RuntimeError: return driver new_driver = "%s.%s" % (driver_manager.__module__, driver_manager.__name__) LOG.warning( "The configured driver %(driver)s has been moved, automatically " "using %(new_driver)s instead. Please update your config files, " "as this automatic fixup will be removed in a future release.", {'driver': driver, 'new_driver': new_driver}) return new_driver def parse_service_provider_opt(service_module='neutron', service_type=None): """Parse service definition opts and returns result.""" def validate_name(name): if len(name) > db_const.NAME_FIELD_SIZE: raise n_exc.Invalid( _("Provider name %(name)s is limited by %(len)s characters") % {'name': name, 'len': db_const.NAME_FIELD_SIZE}) neutron_mod = NeutronModule(service_module) svc_providers_opt = neutron_mod.service_providers() LOG.debug("Service providers = %s", svc_providers_opt) res = [] for prov_def in svc_providers_opt: split = prov_def.split(':') try: svc_type, name, driver = split[:3] if service_type and service_type != svc_type: continue except ValueError: raise n_exc.Invalid(_("Invalid service provider format")) validate_name(name) name = normalize_provider_name(name) default = False if len(split) == 4 and split[3]: if split[3] == 'default': default = True else: msg = (_("Invalid provider format. " "Last part should be 'default' or empty: %s") % prov_def) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) res.append({'service_type': svc_type, 'name': name, 'driver': driver, 'default': default}) return res class ServiceProviderNotFound(n_exc.InvalidInput): message = _("Service provider '%(provider)s' could not be found " "for service type %(service_type)s") class DefaultServiceProviderNotFound(n_exc.InvalidInput): message = _("Service type %(service_type)s does not have a default " "service provider") class ServiceProviderAlreadyAssociated(n_exc.Conflict): message = _("Resource '%(resource_id)s' is already associated with " "provider '%(provider)s' for service type '%(service_type)s'") class ProviderConfiguration(object): def __init__(self, svc_module='neutron', svc_type=None): self.providers = {} for prov in parse_service_provider_opt(svc_module, svc_type): self.add_provider(prov) def _ensure_driver_unique(self, driver): for v in self.providers.values(): if v['driver'] == driver: msg = (_("Driver %s is not unique across providers") % driver) LOG.error(msg) raise n_exc.Invalid(msg) def _ensure_default_unique(self, type, default): if not default: return for k, v in self.providers.items(): if k[0] == type and v['default']: msg = _("Multiple default providers " "for service %s") % type LOG.error(msg) raise n_exc.Invalid(msg) def add_provider(self, provider): self._ensure_driver_unique(provider['driver']) self._ensure_default_unique(provider['service_type'], provider['default']) provider_type = (provider['service_type'], provider['name']) if provider_type in self.providers: msg = (_("Multiple providers specified for service " "%s") % provider['service_type']) LOG.error(msg) raise n_exc.Invalid(msg) self.providers[provider_type] = {'driver': provider['driver'], 'default': provider['default']} def _check_entry(self, k, v, filters): # small helper to deal with query filters if not filters: return True for index, key in enumerate(['service_type', 'name']): if key in filters: if k[index] not in filters[key]: return False for key in ['driver', 'default']: if key in filters: if v[key] not in filters[key]: return False return True def get_service_providers(self, filters=None, fields=None): return [db_utils.resource_fields({'service_type': k[0], 'name': k[1], 'driver': v['driver'], 'default': v['default']}, fields) for k, v in self.providers.items() if self._check_entry(k, v, filters)] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/0000755000175000017500000000000000000000000021262 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/__init__.py0000644000175000017500000000000000000000000023361 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/0000755000175000017500000000000000000000000022740 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/__init__.py0000644000175000017500000000000000000000000025037 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/linuxbridge/0000755000175000017500000000000000000000000025254 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/linuxbridge/__init__.py0000644000175000017500000000000000000000000027353 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/linuxbridge/driver.py0000644000175000017500000000372500000000000027130 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': constants.VALID_DIRECTIONS} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} } } class LinuxBridgeDriver(base.DriverBase): @staticmethod def create(): return LinuxBridgeDriver( name='linuxbridge', vif_types=[portbindings.VIF_TYPE_BRIDGE, portbindings.VIF_TYPE_TAP], vnic_types=[portbindings.VNIC_NORMAL], supported_rules=SUPPORTED_RULES, requires_rpc_notifications=True) def register(): """Register the driver.""" global DRIVER if not DRIVER: DRIVER = LinuxBridgeDriver.create() LOG.debug('Linuxbridge QoS driver registered') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/manager.py0000644000175000017500000001757100000000000024737 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants as lib_constants from neutron_lib import exceptions from neutron_lib.plugins import utils from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.callbacks.producer import registry as rpc_registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects.qos import policy as policy_object LOG = logging.getLogger(__name__) SKIPPED_VIF_TYPES = [ portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED ] class QosServiceDriverManager(object): def __init__(self): self._drivers = [] self.rpc_notifications_required = False rpc_registry.provide(self._get_qos_policy_cb, resources.QOS_POLICY) # notify any registered QoS driver that we're ready, those will # call the driver manager back with register_driver if they # are enabled registry.publish(qos_consts.QOS_PLUGIN, events.AFTER_INIT, self) if self.rpc_notifications_required: self.push_api = resources_rpc.ResourcesPushRpcApi() @staticmethod def _get_qos_policy_cb(resource, policy_id, **kwargs): context = kwargs.get('context') if context is None: LOG.warning( 'Received %(resource)s %(policy_id)s without context', {'resource': resource, 'policy_id': policy_id}) return policy = policy_object.QosPolicy.get_object(context, id=policy_id) return policy @staticmethod def _validate_vnic_type(driver, vnic_type, port_id): if driver.is_vnic_compatible(vnic_type): return True LOG.debug("vnic_type %(vnic_type)s of port %(port_id)s " "is not compatible with QoS driver %(driver)s", {'vnic_type': vnic_type, 'port_id': port_id, 'driver': driver.name}) return False @staticmethod def _validate_vif_type(driver, vif_type, port_id): if driver.is_vif_type_compatible(vif_type): return True LOG.debug("vif_type %(vif_type)s of port %(port_id)s " "is not compatible with QoS driver %(driver)s", {'vif_type': vif_type, 'port_id': port_id, 'driver': driver.name}) return False @staticmethod def _parse_parameter_values(parameter_values): validator, possible_values = list(parameter_values.items())[0] if validator == 'type:range': parameter_values = { "start": possible_values[0], "end": possible_values[1] } parameter_type = lib_constants.VALUES_TYPE_RANGE elif validator == 'type:values': parameter_values = possible_values parameter_type = lib_constants.VALUES_TYPE_CHOICES return parameter_values, parameter_type def call(self, method_name, *args, **kwargs): """Helper method for calling a method across all extension drivers.""" exc_list = [] for driver in self._drivers: try: getattr(driver, method_name)(*args, **kwargs) except Exception as exc: exception_msg = ("Extension driver '%(name)s' failed in " "%(method)s") exception_data = {'name': driver.name, 'method': method_name} LOG.exception(exception_msg, exception_data) exc_list.append(exc) if exc_list: raise exceptions.DriverCallError(exc_list=exc_list) if self.rpc_notifications_required: context = kwargs.get('context') or args[0] policy_obj = kwargs.get('policy_obj') or args[1] # we don't push create_policy events since policies are empty # on creation, they only become of any use when rules get # attached to them. if method_name == qos_consts.UPDATE_POLICY: self.push_api.push(context, [policy_obj], rpc_events.UPDATED) elif method_name == qos_consts.DELETE_POLICY: self.push_api.push(context, [policy_obj], rpc_events.DELETED) def register_driver(self, driver): """Register driver with qos plugin. This method is called from drivers on INIT event. """ self._drivers.append(driver) self.rpc_notifications_required |= driver.requires_rpc_notifications def validate_rule_for_port(self, context, rule, port): port_binding = utils.get_port_binding_by_status_and_host( port.bindings, lib_constants.ACTIVE, raise_if_not_found=True, port_id=port['id']) for driver in self._drivers: vif_type = port_binding.vif_type if vif_type not in SKIPPED_VIF_TYPES: if not self._validate_vif_type(driver, vif_type, port['id']): continue else: vnic_type = port_binding.vnic_type if not self._validate_vnic_type(driver, vnic_type, port['id']): continue if (driver.is_rule_supported(rule) and driver.validate_rule_for_port(context, rule, port)): return True return False @property def supported_rule_types(self): if not self._drivers: return [] rule_types = set(qos_consts.VALID_RULE_TYPES) # Recalculate on every call to allow drivers determine supported rule # types dynamically for driver in self._drivers: new_rule_types = rule_types & set(driver.supported_rules) dropped_rule_types = rule_types - new_rule_types if dropped_rule_types: LOG.debug("%(rule_types)s rule types disabled " "because enabled %(driver)s does not support them", {'rule_types': ', '.join(dropped_rule_types), 'driver': driver.name}) rule_types = new_rule_types LOG.debug("Supported QoS rule types " "(common subset for all loaded QoS drivers): %s", rule_types) return rule_types def supported_rule_type_details(self, rule_type_name): if not self._drivers: return [] rule_type_drivers = [] for driver in self._drivers: if rule_type_name in driver.supported_rules: supported_parameters = [] rule_parameters = driver.supported_rules.get(rule_type_name) for name, values in rule_parameters.items(): parameter_values, parameter_type = ( self._parse_parameter_values(values)) supported_parameters.append({ "parameter_name": name, "parameter_values": parameter_values, "parameter_type": parameter_type }) rule_type_drivers.append({ "name": driver.name, "supported_parameters": supported_parameters }) return rule_type_drivers ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/openvswitch/0000755000175000017500000000000000000000000025311 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000027410 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/openvswitch/driver.py0000644000175000017500000000532100000000000027157 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from neutron.objects import network as network_object LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': constants.VALID_DIRECTIONS} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} }, qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { qos_consts.MIN_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: {'type:values': constants.VALID_DIRECTIONS} } } class OVSDriver(base.DriverBase): @staticmethod def create(): return OVSDriver( name='openvswitch', vif_types=[portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER], vnic_types=[portbindings.VNIC_NORMAL, portbindings.VNIC_DIRECT], supported_rules=SUPPORTED_RULES, requires_rpc_notifications=True) def validate_rule_for_port(self, context, rule, port): # Minimum-bandwidth rule is only supported on networks whose # first segment is backed by a physnet. if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: net = network_object.Network.get_object( context, id=port.network_id) physnet = net.segments[0].physical_network if physnet is None: return False return True def register(): """Register the driver.""" global DRIVER if not DRIVER: DRIVER = OVSDriver.create() LOG.debug('Open vSwitch QoS driver registered') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/ovn/0000755000175000017500000000000000000000000023542 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/ovn/__init__.py0000644000175000017500000000000000000000000025641 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/ovn/driver.py0000644000175000017500000001464000000000000025414 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.qos import policy as qos_policy from neutron.objects.qos import rule as qos_rule from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib.db import constants as db_consts from neutron_lib.plugins import directory from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging from neutron.common.ovn import utils LOG = logging.getLogger(__name__) OVN_QOS = 'qos' SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': constants.VALID_DIRECTIONS}, }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS}, } } VIF_TYPES = [portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER] VNIC_TYPES = [portbindings.VNIC_NORMAL] class OVNQosNotificationDriver(base.DriverBase): """OVN notification driver for QoS.""" def __init__(self, name='OVNQosDriver', vif_types=VIF_TYPES, vnic_types=VNIC_TYPES, supported_rules=SUPPORTED_RULES, requires_rpc_notifications=False): super(OVNQosNotificationDriver, self).__init__( name, vif_types, vnic_types, supported_rules, requires_rpc_notifications) @classmethod def create(cls, plugin_driver): cls._driver = plugin_driver return cls() @property def is_loaded(self): return OVN_QOS in cfg.CONF.ml2.extension_drivers def create_policy(self, context, policy): # No need to update OVN on create pass def update_policy(self, context, policy): # Call into OVN client to update the policy self._driver._ovn_client._qos_driver.update_policy(context, policy) def delete_policy(self, context, policy): # No need to update OVN on delete pass class OVNQosDriver(object): """Qos driver for OVN""" def __init__(self, driver): LOG.info("Starting OVNQosDriver") super(OVNQosDriver, self).__init__() self._driver = driver self._plugin_property = None @property def _plugin(self): if self._plugin_property is None: self._plugin_property = directory.get_plugin() return self._plugin_property def _generate_port_options(self, context, policy_id): if policy_id is None: return {} options = {} # The policy might not have any rules all_rules = qos_rule.get_rules(qos_policy.QosPolicy, context, policy_id) for rule in all_rules: if isinstance(rule, qos_rule.QosBandwidthLimitRule): options['qos_max_rate'] = rule.max_kbps if rule.max_burst_kbps: options['qos_burst'] = rule.max_burst_kbps options['direction'] = rule.direction if isinstance(rule, qos_rule.QosDscpMarkingRule): options['dscp_mark'] = rule.dscp_mark options['direction'] = constants.EGRESS_DIRECTION return options def get_qos_options(self, port): # Is qos service enabled if 'qos_policy_id' not in port: return {} # Don't apply qos rules to network devices if utils.is_network_device_port(port): return {} # Determine if port or network policy should be used context = n_context.get_admin_context() port_policy_id = port.get('qos_policy_id') network_policy_id = None if not port_policy_id: network_policy = qos_policy.QosPolicy.get_network_policy( context, port['network_id']) network_policy_id = network_policy.id if network_policy else None # Generate qos options for the selected policy policy_id = port_policy_id or network_policy_id return self._generate_port_options(context, policy_id) def _update_network_ports(self, context, network_id, options): # Retrieve all ports for this network ports = self._plugin.get_ports(context, filters={'network_id': [network_id]}) for port in ports: # Don't apply qos rules if port has a policy port_policy_id = port.get('qos_policy_id') if port_policy_id: continue # Don't apply qos rules to network devices if utils.is_network_device_port(port): continue # Call into OVN client to update port self._driver.update_port(port, qos_options=options) def update_network(self, network): # Is qos service enabled if 'qos_policy_id' not in network: return # Update the qos options on each network port context = n_context.get_admin_context() options = self._generate_port_options( context, network['qos_policy_id']) self._update_network_ports(context, network.get('id'), options) def update_policy(self, context, policy): options = self._generate_port_options(context, policy.id) # Update each network bound to this policy network_bindings = policy.get_bound_networks() for network_id in network_bindings: self._update_network_ports(context, network_id, options) # Update each port bound to this policy port_bindings = policy.get_bound_ports() for port_id in port_bindings: port = self._plugin.get_port(context, port_id) self._driver.update_port(port, qos_options=options) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/sriov/0000755000175000017500000000000000000000000024102 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/sriov/__init__.py0000644000175000017500000000000000000000000026201 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/drivers/sriov/driver.py0000644000175000017500000000411600000000000025751 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION]} }, qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { qos_consts.MIN_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: {'type:values': constants.VALID_DIRECTIONS} } } class SRIOVNICSwitchDriver(base.DriverBase): @staticmethod def create(): return SRIOVNICSwitchDriver( name='sriovnicswitch', vif_types=[portbindings.VIF_TYPE_HW_VEB], vnic_types=[portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP], supported_rules=SUPPORTED_RULES, requires_rpc_notifications=True) def register(): """Register the driver.""" global DRIVER if not DRIVER: DRIVER = SRIOVNICSwitchDriver.create() LOG.debug('SR-IOV NIC Switch QoS driver registered') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/qos/qos_plugin.py0000644000175000017500000006241700000000000024026 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port_resource_request from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import qos as qos_apidef from neutron_lib.api.definitions import qos_bw_limit_direction from neutron_lib.api.definitions import qos_bw_minimum_ingress from neutron_lib.api.definitions import qos_default from neutron_lib.api.definitions import qos_port_network_policy from neutron_lib.api.definitions import qos_rule_type_details from neutron_lib.api.definitions import qos_rules_alias from neutron_lib.callbacks import events as callbacks_events from neutron_lib.callbacks import registry as callbacks_registry from neutron_lib.callbacks import resources as callbacks_resources from neutron_lib import constants as nl_constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import qos as qos_exc from neutron_lib.placement import constants as pl_constants from neutron_lib.placement import utils as pl_utils from neutron_lib.services.qos import constants as qos_consts from neutron._i18n import _ from neutron.db import db_base_plugin_common from neutron.extensions import qos from neutron.objects import base as base_obj from neutron.objects import network as network_object from neutron.objects import ports as ports_object from neutron.objects.qos import policy as policy_object from neutron.objects.qos import qos_policy_validator as checker from neutron.objects.qos import rule_type as rule_type_object from neutron.services.qos.drivers import manager @resource_extend.has_resource_extenders class QoSPlugin(qos.QoSPluginBase): """Implementation of the Neutron QoS Service Plugin. This class implements a Quality of Service plugin that provides quality of service parameters over ports and networks. """ supported_extension_aliases = [qos_apidef.ALIAS, qos_bw_limit_direction.ALIAS, qos_default.ALIAS, qos_rule_type_details.ALIAS, port_resource_request.ALIAS, qos_bw_minimum_ingress.ALIAS, qos_rules_alias.ALIAS, qos_port_network_policy.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): super(QoSPlugin, self).__init__() self.driver_manager = manager.QosServiceDriverManager() callbacks_registry.subscribe( self._validate_create_port_callback, callbacks_resources.PORT, callbacks_events.PRECOMMIT_CREATE) callbacks_registry.subscribe( self._validate_update_port_callback, callbacks_resources.PORT, callbacks_events.PRECOMMIT_UPDATE) callbacks_registry.subscribe( self._validate_update_network_callback, callbacks_resources.NETWORK, callbacks_events.PRECOMMIT_UPDATE) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_resource_request(port_res, port_db): """Add resource request to a port.""" if isinstance(port_db, ports_object.Port): qos_id = port_db.qos_policy_id or port_db.qos_network_policy_id else: qos_id = None if port_db.get('qos_policy_binding'): qos_id = port_db.qos_policy_binding.policy_id elif port_db.get('qos_network_policy_binding'): qos_id = port_db.qos_network_policy_binding.policy_id port_res['resource_request'] = None if not qos_id: return port_res qos_policy = policy_object.QosPolicy.get_object( context.get_admin_context(), id=qos_id) resources = {} # NOTE(ralonsoh): we should move this translation dict to n-lib. rule_direction_class = { nl_constants.INGRESS_DIRECTION: pl_constants.CLASS_NET_BW_INGRESS_KBPS, nl_constants.EGRESS_DIRECTION: pl_constants.CLASS_NET_BW_EGRESS_KBPS } for rule in qos_policy.rules: if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: resources[rule_direction_class[rule.direction]] = rule.min_kbps if not resources: return port_res # NOTE(ralonsoh): we should not rely on the current execution order of # the port extending functions. Although here we have # port_res[VNIC_TYPE], we should retrieve this value from the port DB # object instead. vnic_trait = pl_utils.vnic_type_trait( port_res[portbindings.VNIC_TYPE]) # TODO(lajoskatona): Change to handle all segments when any traits # support will be available. See Placement spec: # https://review.opendev.org/565730 first_segment = network_object.NetworkSegment.get_objects( context.get_admin_context(), network_id=port_db.network_id)[0] if not first_segment or not first_segment.physical_network: return port_res physnet_trait = pl_utils.physnet_trait( first_segment.physical_network) port_res['resource_request'] = { 'required': [physnet_trait, vnic_trait], 'resources': resources} return port_res def _get_ports_with_policy(self, context, policy): networks_ids = policy.get_bound_networks() ports_with_net_policy = ports_object.Port.get_objects( context, network_id=networks_ids) # Filter only this ports which don't have overwritten policy ports_with_net_policy = [ port for port in ports_with_net_policy if port.qos_policy_id is None ] ports_ids = policy.get_bound_ports() ports_with_policy = ports_object.Port.get_objects( context, id=ports_ids) return list(set(ports_with_policy + ports_with_net_policy)) def _validate_create_port_callback(self, resource, event, trigger, **kwargs): context = kwargs['context'] port_id = kwargs['port']['id'] port = ports_object.Port.get_object(context, id=port_id) policy_id = port.qos_policy_id or port.qos_network_policy_id if policy_id is None: return policy = policy_object.QosPolicy.get_object( context.elevated(), id=policy_id) self.validate_policy_for_port(context, policy, port) def _validate_update_port_callback(self, resource, event, trigger, payload=None): context = payload.context original_policy_id = payload.states[0].get( qos_consts.QOS_POLICY_ID) policy_id = payload.desired_state.get(qos_consts.QOS_POLICY_ID) if policy_id is None or policy_id == original_policy_id: return updated_port = ports_object.Port.get_object( context, id=payload.desired_state['id']) policy = policy_object.QosPolicy.get_object( context.elevated(), id=policy_id) self.validate_policy_for_port(context, policy, updated_port) def _validate_update_network_callback(self, resource, event, trigger, payload=None): context = payload.context original_network = payload.states[0] updated_network = payload.desired_state original_policy_id = original_network.get(qos_consts.QOS_POLICY_ID) policy_id = updated_network.get(qos_consts.QOS_POLICY_ID) if policy_id is None or policy_id == original_policy_id: return policy = policy_object.QosPolicy.get_object( context.elevated(), id=policy_id) ports = ports_object.Port.get_objects( context, network_id=updated_network['id']) # Filter only this ports which don't have overwritten policy ports = [ port for port in ports if port.qos_policy_id is None ] self.validate_policy_for_ports(context, policy, ports) def validate_policy(self, context, policy): ports = self._get_ports_with_policy(context, policy) self.validate_policy_for_ports(context, policy, ports) def validate_policy_for_ports(self, context, policy, ports): for port in ports: self.validate_policy_for_port(context, policy, port) def validate_policy_for_port(self, context, policy, port): for rule in policy.rules: if not self.driver_manager.validate_rule_for_port( context, rule, port): raise qos_exc.QosRuleNotSupported(rule_type=rule.rule_type, port_id=port['id']) def reject_min_bw_rule_updates(self, context, policy): ports = self._get_ports_with_policy(context, policy) for port in ports: # NOTE(bence romsics): In some cases the presence of # 'binding:profile.allocation' is a more precise marker than # 'device_owner' about when we have to reject min-bw related # policy/rule updates. However 'binding:profile.allocation' cannot # be used in a generic way here. Consider the case when the first # min-bw rule is added to a policy having ports in-use. Those ports # will not have 'binding:profile.allocation', but this policy # update must be rejected. if (port.device_owner is not None and port.device_owner.startswith( nl_constants.DEVICE_OWNER_COMPUTE_PREFIX)): raise NotImplementedError(_( 'Cannot update QoS policies/rules backed by resources ' 'tracked in Placement')) @db_base_plugin_common.convert_result_to_dict def create_policy(self, context, policy): """Create a QoS policy. :param context: neutron api request context :type context: neutron_lib.context.Context :param policy: policy data to be applied :type policy: dict :returns: a QosPolicy object """ # NOTE(dasm): body 'policy' contains both tenant_id and project_id # but only latter needs to be used to create QosPolicy object. # We need to remove redundant keyword. # This cannot be done in other place of stacktrace, because neutron # needs to be backward compatible. tenant_id = policy['policy'].pop('tenant_id', None) if not policy['policy'].get('project_id'): policy['policy']['project_id'] = tenant_id policy_obj = policy_object.QosPolicy(context, **policy['policy']) with db_api.CONTEXT_WRITER.using(context): policy_obj.create() self.driver_manager.call(qos_consts.CREATE_POLICY_PRECOMMIT, context, policy_obj) self.driver_manager.call(qos_consts.CREATE_POLICY, context, policy_obj) return policy_obj @db_base_plugin_common.convert_result_to_dict def update_policy(self, context, policy_id, policy): """Update a QoS policy. :param context: neutron api request context :type context: neutron.context.Context :param policy_id: the id of the QosPolicy to update :param policy_id: str uuid :param policy: new policy data to be applied :type policy: dict :returns: a QosPolicy object """ policy_data = policy['policy'] with db_api.CONTEXT_WRITER.using(context): policy_obj = policy_object.QosPolicy.get_policy_obj( context, policy_id) policy_obj.update_fields(policy_data, reset_changes=True) policy_obj.update() self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy_obj) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy_obj) return policy_obj def delete_policy(self, context, policy_id): """Delete a QoS policy. :param context: neutron api request context :type context: neutron.context.Context :param policy_id: the id of the QosPolicy to delete :type policy_id: str uuid :returns: None """ with db_api.CONTEXT_WRITER.using(context): policy = policy_object.QosPolicy(context) policy.id = policy_id policy.delete() self.driver_manager.call(qos_consts.DELETE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.DELETE_POLICY, context, policy) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy(self, context, policy_id, fields=None): """Get a QoS policy. :param context: neutron api request context :type context: neutron.context.Context :param policy_id: the id of the QosPolicy to update :type policy_id: str uuid :returns: a QosPolicy object """ return policy_object.QosPolicy.get_policy_obj(context, policy_id) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get QoS policies. :param context: neutron api request context :type context: neutron.context.Context :param filters: search criteria :type filters: dict :returns: QosPolicy objects meeting the search criteria """ filters = filters or dict() pager = base_obj.Pager(sorts, limit, page_reverse, marker) return policy_object.QosPolicy.get_objects(context, _pager=pager, **filters) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_rule_type(self, context, rule_type_name, fields=None): if not context.is_admin: raise lib_exc.NotAuthorized() return rule_type_object.QosRuleType.get_object(rule_type_name) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_rule_types(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): if not filters: filters = {} return rule_type_object.QosRuleType.get_objects(**filters) def supported_rule_type_details(self, rule_type_name): return self.driver_manager.supported_rule_type_details(rule_type_name) @property def supported_rule_types(self): return self.driver_manager.supported_rule_types @db_base_plugin_common.convert_result_to_dict def create_policy_rule(self, context, rule_cls, policy_id, rule_data): """Create a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to create the rule :type policy_id: str uuid :param rule_data: the rule data to be applied :type rule_data: dict :returns: a QoS policy rule object """ rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.CONTEXT_WRITER.using(context): # Ensure that we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) checker.check_bandwidth_rule_conflict(policy, rule_data) rule = rule_cls(context, qos_policy_id=policy_id, **rule_data) checker.check_rules_conflict(policy, rule) rule.create() policy.obj_load_attr('rules') self.validate_policy(context, policy) if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy) return rule @db_base_plugin_common.convert_result_to_dict def update_policy_rule(self, context, rule_cls, rule_id, policy_id, rule_data): """Update a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to update :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :param rule_data: the new rule data to update :type rule_data: dict :returns: a QoS policy rule object """ rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.CONTEXT_WRITER.using(context): # Ensure we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) # Ensure the rule belongs to the policy. checker.check_bandwidth_rule_conflict(policy, rule_data) rule = policy.get_rule_by_id(rule_id) rule.update_fields(rule_data, reset_changes=True) checker.check_rules_conflict(policy, rule) rule.update() policy.obj_load_attr('rules') self.validate_policy(context, policy) if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy) return rule def _get_policy_id(self, context, rule_cls, rule_id): with db_api.autonested_transaction(context.session): rule_object = rule_cls.get_object(context, id=rule_id) if not rule_object: raise qos_exc.QosRuleNotFound(policy_id="", rule_id=rule_id) return rule_object.qos_policy_id def update_rule(self, context, rule_cls, rule_id, rule_data): """Update a QoS policy rule alias. This method processes a QoS policy rule update, where the rule is an API first level resource instead of a subresource of a policy. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to update :type rule_id: str uuid :param rule_data: the new rule data to update :type rule_data: dict :returns: a QoS policy rule object :raises: qos_exc.QosRuleNotFound """ policy_id = self._get_policy_id(context, rule_cls, rule_id) rule_data_name = rule_cls.rule_type + '_rule' alias_rule_data_name = 'alias_' + rule_data_name rule_data[rule_data_name] = rule_data.pop(alias_rule_data_name) return self.update_policy_rule(context, rule_cls, rule_id, policy_id, rule_data) def delete_policy_rule(self, context, rule_cls, rule_id, policy_id): """Delete a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QosPolicy Rule to delete :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :returns: None """ with db_api.CONTEXT_WRITER.using(context): # Ensure we have access to the policy. policy = policy_object.QosPolicy.get_policy_obj(context, policy_id) rule = policy.get_rule_by_id(rule_id) rule.delete() policy.obj_load_attr('rules') if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: self.reject_min_bw_rule_updates(context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY_PRECOMMIT, context, policy) self.driver_manager.call(qos_consts.UPDATE_POLICY, context, policy) def delete_rule(self, context, rule_cls, rule_id): """Delete a QoS policy rule alias. This method processes a QoS policy rule delete, where the rule is an API first level resource instead of a subresource of a policy. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QosPolicy Rule to delete :type rule_id: str uuid :returns: None :raises: qos_exc.QosRuleNotFound """ policy_id = self._get_policy_id(context, rule_cls, rule_id) return self.delete_policy_rule(context, rule_cls, rule_id, policy_id) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy_rule(self, context, rule_cls, rule_id, policy_id, fields=None): """Get a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to get :type rule_id: str uuid :param policy_id: the id of the rule's policy :type policy_id: str uuid :returns: a QoS policy rule object :raises: qos_exc.QosRuleNotFound """ with db_api.CONTEXT_READER.using(context): # Ensure we have access to the policy. policy_object.QosPolicy.get_policy_obj(context, policy_id) rule = rule_cls.get_object(context, id=rule_id) if not rule: raise qos_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id) return rule def get_rule(self, context, rule_cls, rule_id, fields=None): """Get a QoS policy rule alias. This method processes a QoS policy rule get, where the rule is an API first level resource instead of a subresource of a policy :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to get :type rule_id: str uuid :returns: a QoS policy rule object :raises: qos_exc.QosRuleNotFound """ policy_id = self._get_policy_id(context, rule_cls, rule_id) return self.get_policy_rule(context, rule_cls, rule_id, policy_id) # TODO(QoS): enforce rule types when accessing rule objects @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_policy_rules(self, context, rule_cls, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get QoS policy rules. :param context: neutron api request context :type context: neutron.context.Context :param rule_cls: the rule object class :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to get rules :type policy_id: str uuid :returns: QoS policy rule objects meeting the search criteria """ with db_api.CONTEXT_READER.using(context): # Ensure we have access to the policy. policy_object.QosPolicy.get_policy_obj(context, policy_id) filters = filters or dict() filters[qos_consts.QOS_POLICY_ID] = policy_id pager = base_obj.Pager(sorts, limit, page_reverse, marker) return rule_cls.get_objects(context, _pager=pager, **filters) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/rbac/0000755000175000017500000000000000000000000021367 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/rbac/__init__.py0000644000175000017500000000000000000000000023466 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.371045 neutron-16.0.0.0b2.dev214/neutron/services/revisions/0000755000175000017500000000000000000000000022501 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/revisions/__init__.py0000644000175000017500000000000000000000000024600 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/revisions/revision_plugin.py0000644000175000017500000003164400000000000026277 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import revisionifmatch from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.services import base as service_base from oslo_log import log as logging import sqlalchemy from sqlalchemy.orm import exc from sqlalchemy.orm import session as se import webob.exc from neutron._i18n import _ from neutron.db import standard_attr LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class RevisionPlugin(service_base.ServicePluginBase): """Plugin to populate revision numbers into standard attr resources.""" supported_extension_aliases = ['standard-attr-revisions', revisionifmatch.ALIAS] __filter_validation_support = True def __init__(self): super(RevisionPlugin, self).__init__() # background on these event hooks: # https://docs.sqlalchemy.org/en/latest/orm/session_events.html db_api.sqla_listen(se.Session, 'before_flush', self.bump_revisions) db_api.sqla_listen( se.Session, "after_flush_postexec", self._emit_related_revision_bumps) db_api.sqla_listen(se.Session, 'after_commit', self._clear_rev_bumped_flags) db_api.sqla_listen(se.Session, 'after_rollback', self._clear_rev_bumped_flags) def bump_revisions(self, session, context, instances): self._enforce_if_match_constraints(session) # bump revision number for any updated objects in the session self._bump_obj_revisions( session, [ obj for obj in session.dirty if isinstance(obj, standard_attr.HasStandardAttributes)] ) # see if any created/updated/deleted objects bump the revision # of another object objects_with_related_revisions = [ o for o in session.deleted | session.dirty | session.new if getattr(o, 'revises_on_change', ()) ] collected = session.info.setdefault('_related_bumped', set()) self._collect_related_tobump( session, objects_with_related_revisions, collected) def _emit_related_revision_bumps(self, session, context): # within after_flush_postexec, emit an UPDATE statement to increment # revision flags for related objects that were located in the # before_flush phase. # # note that this event isn't called if the flush fails; # in that case, the transaction is rolled back and the # after_rollback event will invoke self._clear_rev_bumped_flags # to clean out state. collected = session.info.get('_related_bumped', None) if collected: try: self._bump_obj_revisions( session, collected, version_check=False) finally: collected.clear() def _collect_related_tobump(self, session, objects, collected): for obj in objects: if obj in collected: continue for revises_col in getattr(obj, 'revises_on_change', ()): related_obj = self._find_related_obj(obj, revises_col) if not related_obj: LOG.warning("Could not find related %(col)s for " "resource %(obj)s to bump revision.", {'obj': obj, 'col': revises_col}) continue # if related object revises others, bump those as well self._collect_related_tobump(session, [related_obj], collected) # no need to bump revisions on related objects being deleted if related_obj not in session.deleted: collected.add(related_obj) return collected def get_plugin_type(self): return "revision_plugin" def get_plugin_description(self): return "Adds revision numbers to resources." @staticmethod @resource_extend.extends( list(standard_attr.get_standard_attr_resource_model_map())) def extend_resource_dict_revision(resource_res, resource_db): resource_res['revision_number'] = resource_db.revision_number def _find_related_obj(self, obj, relationship_col): """Gets a related object off of a relationship. Raises a runtime error if the relationship isn't configured correctly for revision bumping. """ # first check to see if it's directly attached to the object already try: related_obj = getattr(obj, relationship_col) except exc.ObjectDeletedError: # object was in session but another writer deleted it return None if related_obj: return related_obj for rel in sqlalchemy.inspect(obj).mapper.relationships: if rel.key != relationship_col: continue if not rel.load_on_pending: raise RuntimeError(_("revises_on_change relationships must " "have load_on_pending set to True to " "bump parent revisions on create: %s") % relationship_col) def _clear_rev_bumped_flags(self, session): """This clears all flags on commit/rollback to enable rev bumps.""" session.info.pop('_related_bumped', None) for inst in session: setattr(inst, '_rev_bumped', False) def _bump_obj_revisions(self, session, objects, version_check=True): """Increment object revisions. If version_check=True, uses SQLAlchemy ORM's compare-and-swap feature (known as "version_id_col" in the ORM mapping), which is part of the StandardAttribute class. If version_check=False, runs an UPDATE statement directly against the set of all StandardAttribute objects at once, without using any compare and swap logic. If a revision number constraint rule was associated with the Session, this is retrieved and each object is tested to see if it matches this condition; if so, the constraint is enforced. """ # filter objects for which we've already bumped the revision to_bump = [ obj for obj in objects if not getattr(obj, '_rev_bumped', False)] if not to_bump: return self._run_constrained_instance_match_check(session, to_bump) if not version_check: # this UPDATE statement could alternatively be written to run # as an UPDATE-per-object with Python-generated revision numbers # as parameters. session.query(standard_attr.StandardAttribute).filter( standard_attr.StandardAttribute.id.in_( [obj._effective_standard_attribute_id for obj in to_bump] ) ).update({ # note that SQLAlchemy runs the onupdate function for # the updated_at column and applies it to the SET clause as # well. standard_attr.StandardAttribute.revision_number: standard_attr.StandardAttribute.revision_number + 1}, synchronize_session=False) # run a SELECT to get back the new values we just generated. # if MySQL supported RETURNING, we could get these numbers # back from the UPDATE without running another SELECT. retrieve_revision_numbers = { row.id: (row.revision_number, row.updated_at) for row in session.query( standard_attr.StandardAttribute.id, standard_attr.StandardAttribute.revision_number, standard_attr.StandardAttribute.updated_at, ).filter( standard_attr.StandardAttribute.id.in_( [ obj._effective_standard_attribute_id for obj in to_bump ] ) ) } for obj in to_bump: if version_check: # full version check, run the ORM routine to UPDATE # the row with a WHERE clause obj.bump_revision() else: # no version check - get back what we did in our one-step # UPDATE statement and set it without causing change in # ORM flush state try: new_version_id, new_updated_at = retrieve_revision_numbers[ obj._effective_standard_attribute_id ] except KeyError: # in case the object was deleted concurrently LOG.warning( "No standard attr row found for resource: %(obj)s", {'obj': obj}) else: obj._set_updated_revision_number( new_version_id, new_updated_at) setattr(obj, '_rev_bumped', True) def _run_constrained_instance_match_check(self, session, objects): instance, match = self._get_constrained_instance_match(session) for obj in objects: if instance and instance == obj: # one last check before bumping revision self._enforce_if_match_constraints(session) def _find_instance_by_column_value(self, session, model, column, value): """Lookup object in session or from DB based on a column's value.""" for session_obj in session: if not isinstance(session_obj, model): continue if getattr(session_obj, column) == value: return session_obj # object isn't in session so we have to query for it related_obj = (session.query(model).filter_by(**{column: value}). first()) return related_obj def _get_constrained_instance_match(self, session): """Returns instance and constraint of if-match criterion if present. Checks the context associated with the session for compare-and-swap update revision number constraints. If one is found, this returns the instance that is constrained as well as the requested revision number to match. """ context = session.info.get('using_context') criteria = context.get_transaction_constraint() if context else None if not criteria: return None, None match = criteria.if_revision_match mmap = standard_attr.get_standard_attr_resource_model_map() model = mmap.get(criteria.resource) if not model: msg = _("Revision matching not supported for this resource") raise exc.BadRequest(resource=criteria.resource, msg=msg) instance = self._find_instance_by_column_value( session, model, 'id', criteria.resource_id) return instance, match def _enforce_if_match_constraints(self, session): """Check for if-match constraints and raise exception if violated. We determine the collection being modified and look for any objects of the collection type in the dirty/deleted items in the session. If they don't match the revision_number constraint supplied, we throw an exception. We are protected from a concurrent update because if we match revision number here and another update commits to the database first, the compare and swap of revision_number will fail and a StaleDataError (or deadlock in galera multi-writer) will be raised, at which point this will be retried and fail to match. """ instance, match = self._get_constrained_instance_match(session) if not instance or getattr(instance, '_rev_bumped', False): # no constraints present or constrain satisfied in this transaction return if instance.revision_number != match: raise RevisionNumberConstraintFailed(match, instance.revision_number) class RevisionNumberConstraintFailed(webob.exc.HTTPPreconditionFailed): def __init__(self, expected, current): detail = (_("Constrained to %(exp)s, but current revision is %(cur)s") % {'exp': expected, 'cur': current}) super(RevisionNumberConstraintFailed, self).__init__(detail=detail) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/segments/0000755000175000017500000000000000000000000022305 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/segments/__init__.py0000644000175000017500000000000000000000000024404 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/segments/db.py0000644000175000017500000003570600000000000023257 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_utils import uuidutils from neutron.db import segments_db as db from neutron.extensions import segment as extension from neutron import manager from neutron.objects import base as base_obj from neutron.objects import network from neutron.services.segments import exceptions _USER_CONFIGURED_SEGMENT_PLUGIN = None def check_user_configured_segment_plugin(): global _USER_CONFIGURED_SEGMENT_PLUGIN # _USER_CONFIGURED_SEGMENT_PLUGIN will contain 3 possible values: # 1. None, this just happens during neutron-server startup. # 2. True, this means that users configure the 'segments' # service plugin in neutron config file. # 3. False, this means that can not find 'segments' service # plugin in neutron config file. # This function just load once to store the result # into _USER_CONFIGURED_SEGMENT_PLUGIN during neutron-server startup. if _USER_CONFIGURED_SEGMENT_PLUGIN is None: segment_class = 'neutron.services.segments.plugin.Plugin' _USER_CONFIGURED_SEGMENT_PLUGIN = any( p in cfg.CONF.service_plugins for p in ['segments', segment_class]) return _USER_CONFIGURED_SEGMENT_PLUGIN class SegmentDbMixin(object): """Mixin class to add segment.""" @staticmethod def _make_segment_dict(segment_obj, fields=None): res = {'id': segment_obj['id'], 'network_id': segment_obj['network_id'], 'name': segment_obj['name'], 'description': segment_obj['description'], db.PHYSICAL_NETWORK: segment_obj[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_obj[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_obj[db.SEGMENTATION_ID], 'hosts': segment_obj['hosts'], 'segment_index': segment_obj['segment_index']} resource_extend.apply_funcs('segments', res, segment_obj.db_obj) return db_utils.resource_fields(res, fields) def _get_segment(self, context, segment_id): segment = network.NetworkSegment.get_object(context, id=segment_id) if not segment: raise exceptions.SegmentNotFound(segment_id=segment_id) return segment @log_helpers.log_method_call def create_segment(self, context, segment): """Create a segment.""" segment = segment['segment'] segment_id = segment.get('id') or uuidutils.generate_uuid() try: new_segment = self._create_segment_db(context, segment_id, segment) except db_exc.DBReferenceError: raise n_exc.NetworkNotFound(net_id=segment['network_id']) registry.notify(resources.SEGMENT, events.AFTER_CREATE, self, context=context, segment=new_segment) return self._make_segment_dict(new_segment) def _create_segment_db(self, context, segment_id, segment): with db_api.CONTEXT_WRITER.using(context): network_id = segment['network_id'] physical_network = segment[extension.PHYSICAL_NETWORK] if physical_network == constants.ATTR_NOT_SPECIFIED: physical_network = None network_type = segment[extension.NETWORK_TYPE] segmentation_id = segment[extension.SEGMENTATION_ID] if segmentation_id == constants.ATTR_NOT_SPECIFIED: segmentation_id = None name = segment['name'] if name == constants.ATTR_NOT_SPECIFIED: name = None description = segment['description'] if description == constants.ATTR_NOT_SPECIFIED: description = None args = {'id': segment_id, 'network_id': network_id, 'name': name, 'description': description, db.PHYSICAL_NETWORK: physical_network, db.NETWORK_TYPE: network_type, db.SEGMENTATION_ID: segmentation_id} # Calculate the index of segment segment_index = 0 segments = self.get_segments( context, filters={'network_id': [network_id]}, fields=['segment_index'], sorts=[('segment_index', True)]) if segments: # NOTE(xiaohhui): The new index is the last index + 1, this # may cause discontinuous segment_index. But segment_index # can functionally work as the order index for segments. segment_index = (segments[-1].get('segment_index') + 1) args['segment_index'] = segment_index new_segment = network.NetworkSegment(context, **args) new_segment.create() # Do some preliminary operations before committing the segment to # db registry.notify( resources.SEGMENT, events.PRECOMMIT_CREATE, self, context=context, segment=new_segment) # The new segment might have been updated by the callbacks # subscribed to the PRECOMMIT_CREATE event. So update it in the DB new_segment.update() return new_segment @log_helpers.log_method_call def update_segment(self, context, uuid, segment): """Update an existing segment.""" segment = segment['segment'] with db_api.CONTEXT_WRITER.using(context): curr_segment = self._get_segment(context, uuid) curr_segment.update_fields(segment) curr_segment.update() return self._make_segment_dict(curr_segment) @log_helpers.log_method_call def get_segment(self, context, uuid, fields=None): segment_db = self._get_segment(context, uuid) return self._make_segment_dict(segment_db, fields) @log_helpers.log_method_call def get_segments(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) segment_objs = network.NetworkSegment.get_objects( context, _pager=pager, **filters) return [self._make_segment_dict(obj) for obj in segment_objs] @log_helpers.log_method_call def get_segments_count(self, context, filters=None): filters = filters or {} return network.NetworkSegment.count(context, **filters) @log_helpers.log_method_call def get_segments_by_hosts(self, context, hosts): if not hosts: return [] segment_host_mapping = network.SegmentHostMapping.get_objects( context, host=hosts) return list({mapping.segment_id for mapping in segment_host_mapping}) @log_helpers.log_method_call def delete_segment(self, context, uuid, for_net_delete=False): """Delete an existing segment.""" segment_dict = self.get_segment(context, uuid) # Do some preliminary operations before deleting the segment registry.publish(resources.SEGMENT, events.BEFORE_DELETE, self.delete_segment, payload=events.DBEventPayload( context, metadata={ 'for_net_delete': for_net_delete}, states=(segment_dict,), resource_id=uuid)) # Delete segment in DB with db_api.CONTEXT_WRITER.using(context): if not network.NetworkSegment.delete_objects(context, id=uuid): raise exceptions.SegmentNotFound(segment_id=uuid) # Do some preliminary operations before deleting segment in db registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE, self.delete_segment, context=context, segment=segment_dict) registry.publish(resources.SEGMENT, events.AFTER_DELETE, self.delete_segment, payload=events.DBEventPayload( context, states=(segment_dict,), resource_id=uuid)) @db_api.retry_if_session_inactive() @lockutils.synchronized('update_segment_host_mapping') def update_segment_host_mapping(context, host, current_segment_ids): with db_api.CONTEXT_WRITER.using(context): segment_host_mapping = network.SegmentHostMapping.get_objects( context, host=host) previous_segment_ids = { seg_host['segment_id'] for seg_host in segment_host_mapping} for segment_id in current_segment_ids - previous_segment_ids: network.SegmentHostMapping( context, segment_id=segment_id, host=host).create() stale_segment_ids = previous_segment_ids - current_segment_ids if stale_segment_ids: for entry in segment_host_mapping: if entry.segment_id in stale_segment_ids: entry.delete() def get_hosts_mapped_with_segments(context): """Get hosts that are mapped with segments. L2 providers can use this method to get an overview of SegmentHostMapping, and then delete the stale SegmentHostMapping. """ segment_host_mapping = network.SegmentHostMapping.get_objects(context) return {row.host for row in segment_host_mapping} def _get_phys_nets(agent): configurations_dict = agent.get('configurations', {}) mappings = configurations_dict.get('bridge_mappings', {}) mappings.update(configurations_dict.get('interface_mappings', {})) mappings.update(configurations_dict.get('device_mappings', {})) return list(mappings.keys()) reported_hosts = set() # NOTE: Module level variable of segments plugin. It should be removed once # segments becomes a default plugin. segments_plugin = None def get_segments_with_phys_nets(context, phys_nets): """Get segments from physical networks. L2 providers usually have information of hostname and physical networks. They could use this method to get related segments and then update SegmentHostMapping. """ phys_nets = list(phys_nets) if not phys_nets: return [] with db_api.CONTEXT_READER.using(context): return network.NetworkSegment.get_objects( context, physical_network=phys_nets) def map_segment_to_hosts(context, segment_id, hosts): """Map segment to a collection of hosts.""" with db_api.CONTEXT_WRITER.using(context): for host in hosts: network.SegmentHostMapping( context, segment_id=segment_id, host=host).create() def _update_segment_host_mapping_for_agent(resource, event, trigger, payload=None): plugin = payload.metadata.get('plugin') agent = payload.desired_state host = payload.metadata.get('host') context = payload.context check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None) if (not check_user_configured_segment_plugin() or not check_segment_for_agent): return phys_nets = _get_phys_nets(agent) if not phys_nets: return start_flag = agent.get('start_flag', None) if host in reported_hosts and not start_flag: return reported_hosts.add(host) segments = get_segments_with_phys_nets(context, phys_nets) current_segment_ids = { segment['id'] for segment in segments if check_segment_for_agent(segment, agent)} update_segment_host_mapping(context, host, current_segment_ids) registry.publish(resources.SEGMENT_HOST_MAPPING, events.AFTER_CREATE, plugin, payload=events.DBEventPayload( context, metadata={ 'host': host, 'current_segment_ids': current_segment_ids})) def _add_segment_host_mapping_for_segment(resource, event, trigger, context, segment): if not context.session.is_active: # The session might be in partial rollback state, due to errors in # peer callback. In that case, there is no need to add the mapping. # Just return here. return if not segment.physical_network: return cp = directory.get_plugin() check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None) if not check_user_configured_segment_plugin() or not hasattr( cp, 'get_agents') or not check_segment_for_agent: # not an agent-supporting plugin registry.unsubscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) return hosts = {agent['host'] for agent in cp.get_agents(context) if check_segment_for_agent(segment, agent)} map_segment_to_hosts(context, segment.id, hosts) def _delete_segments_for_network(resource, event, trigger, context, network_id): admin_ctx = context.elevated() global segments_plugin if not segments_plugin: segments_plugin = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', 'segments')() segments = segments_plugin.get_segments( admin_ctx, filters={'network_id': [network_id]}) for segment in segments: segments_plugin.delete_segment(admin_ctx, segment['id'], for_net_delete=True) def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_CREATE) registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_UPDATE) registry.subscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) registry.subscribe(_delete_segments_for_network, resources.NETWORK, events.PRECOMMIT_DELETE) subscribe() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/segments/exceptions.py0000644000175000017500000000553500000000000025050 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron_lib import exceptions class SegmentNotFound(exceptions.NotFound): message = _("Segment %(segment_id)s could not be found.") class NoUpdateSubnetWhenMultipleSegmentsOnNetwork(exceptions.BadRequest): message = _("The network '%(network_id)s' has multiple segments, it is " "only possible to associate an existing subnet with a segment " "on networks with a single segment.") class SubnetsNotAllAssociatedWithSegments(exceptions.BadRequest): message = _("All of the subnets on network '%(network_id)s' must either " "all be associated with segments or all not associated with " "any segment.") class SubnetCantAssociateToDynamicSegment(exceptions.BadRequest): message = _("A subnet cannot be associated with a dynamic segment.") class SubnetSegmentAssociationChangeNotAllowed(exceptions.BadRequest): message = _("A subnet that is already associated with a segment cannot " "have its segment association changed.") class NetworkIdsDontMatch(exceptions.BadRequest): message = _("The subnet's network id, '%(subnet_network)s', doesn't match " "the network_id of segment '%(segment_id)s'") class HostConnectedToMultipleSegments(exceptions.Conflict): message = _("Host %(host)s is connected to multiple segments on routed " "provider network '%(network_id)s'. It should be connected " "to one.") class HostNotConnectedToAnySegment(exceptions.Conflict): message = _("Host %(host)s is not connected to any segments on routed " "provider network '%(network_id)s'. It should be connected " "to one.") class HostNotCompatibleWithFixedIps(exceptions.Conflict): message = _("Host %(host)s is not connected to a segment where the " "existing fixed_ips on port %(port_id)s will function given " "the routed network topology.") class SegmentInUse(exceptions.InUse): message = _("Segment '%(segment_id)s' cannot be deleted: %(reason)s.") class FixedIpsSubnetsNotOnSameSegment(exceptions.BadRequest): message = _("Cannot allocate addresses from different segments.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/segments/plugin.py0000644000175000017500000007352200000000000024166 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystoneauth1 import loading as ks_loading import netaddr from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef from neutron_lib.api.definitions import l2_adjacency as l2adj_apidef from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import segment as seg_apidef from neutron_lib.api.definitions import segments_peer_subnet_host_routes from neutron_lib.api.definitions import standard_attr_segment from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import subnet_segmentid_writable from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import placement as placement_exc from neutron_lib.placement import client as placement_client from neutron_lib.plugins import directory from novaclient import client as nova_client from novaclient import exceptions as nova_exc from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from neutron._i18n import _ from neutron.common import ipv6_utils from neutron.db import models_v2 from neutron.extensions import segment from neutron.notifiers import batch_notifier from neutron.objects import network as net_obj from neutron.objects import ports as ports_obj from neutron.objects import subnet as subnet_obj from neutron.services.segments import db from neutron.services.segments import exceptions LOG = log.getLogger(__name__) NOVA_API_VERSION = '2.41' IPV4_RESOURCE_CLASS = 'IPV4_ADDRESS' SEGMENT_NAME_STUB = 'Neutron segment id %s' MAX_INVENTORY_UPDATE_RETRIES = 10 @resource_extend.has_resource_extenders @registry.has_registry_receivers class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase): _instance = None supported_extension_aliases = [seg_apidef.ALIAS, ipalloc_apidef.ALIAS, l2adj_apidef.ALIAS, standard_attr_segment.ALIAS, subnet_segmentid_writable.ALIAS, segments_peer_subnet_host_routes.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): self.nova_updater = NovaSegmentNotifier() self.segment_host_routes = SegmentHostRoutes() @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_network_dict_binding(network_res, network_db): if not directory.get_plugin('segments'): return # TODO(carl_baldwin) Make this work with service subnets when # it's a thing. is_adjacent = (not network_db.subnets or not network_db.subnets[0].segment_id) network_res[l2adj_apidef.L2_ADJACENCY] = is_adjacent @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _extend_subnet_dict_binding(subnet_res, subnet_db): subnet_res['segment_id'] = subnet_db.get('segment_id') @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_binding(port_res, port_db): if not directory.get_plugin('segments'): return value = ipalloc_apidef.IP_ALLOCATION_IMMEDIATE if port_db.get('ip_allocation'): value = port_db.get('ip_allocation') port_res[ipalloc_apidef.IP_ALLOCATION] = value @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance @registry.receives(resources.SEGMENT, [events.BEFORE_DELETE]) def _prevent_segment_delete_with_subnet_associated( self, resource, event, trigger, payload=None): """Raise exception if there are any subnets associated with segment.""" if payload.metadata.get('for_net_delete'): # don't check if this is a part of a network delete operation return segment_id = payload.resource_id subnets = subnet_obj.Subnet.get_objects(payload.context, segment_id=segment_id) subnet_ids = [s.id for s in subnets] if subnet_ids: reason = _("The segment is still associated with subnet(s) " "%s") % ", ".join(subnet_ids) raise exceptions.SegmentInUse(segment_id=segment_id, reason=reason) @registry.receives( resources.SUBNET, [events.PRECOMMIT_DELETE_ASSOCIATIONS]) def _validate_auto_address_subnet_delete(self, resource, event, trigger, payload): context = payload.context subnet = subnet_obj.Subnet.get_object(context, id=payload.resource_id) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if not is_auto_addr_subnet or subnet.segment_id is None: return net_allocs = (context.session.query(models_v2.IPAllocation.port_id). filter_by(subnet_id=subnet.id)) port_ids_on_net = [ipalloc.port_id for ipalloc in net_allocs] for port_id in port_ids_on_net: try: port = ports_obj.Port.get_object(context, id=port_id) fixed_ips = [f for f in port['fixed_ips'] if f['subnet_id'] != subnet.id] if len(fixed_ips) != 0: continue LOG.info("Found port %(port_id)s, with IP auto-allocation " "only on subnet %(subnet)s which is associated with " "segment %(segment_id)s, cannot delete", {'port_id': port_id, 'subnet': subnet.id, 'segment_id': subnet.segment_id}) raise n_exc.SubnetInUse(subnet_id=subnet.id) except n_exc.PortNotFound: # port is gone continue class Event(object): def __init__(self, method, segment_ids, total=None, reserved=None, segment_host_mappings=None, host=None): self.method = method if isinstance(segment_ids, set): self.segment_ids = segment_ids else: self.segment_id = segment_ids self.total = total self.reserved = reserved self.segment_host_mappings = segment_host_mappings self.host = host @registry.has_registry_receivers class NovaSegmentNotifier(object): def __init__(self): self.p_client, self.n_client = self._get_clients() self.batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self._send_notifications) def _get_clients(self): p_client = placement_client.PlacementAPIClient( cfg.CONF, openstack_api_version='placement 1.1') n_auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova') n_session = ks_loading.load_session_from_conf_options( cfg.CONF, 'nova', auth=n_auth) extensions = [ ext for ext in nova_client.discover_extensions(NOVA_API_VERSION) if ext.name == "server_external_events"] n_client = nova_client.Client( NOVA_API_VERSION, session=n_session, region_name=cfg.CONF.nova.region_name, endpoint_type=cfg.CONF.nova.endpoint_type, extensions=extensions) return p_client, n_client def _send_notifications(self, batched_events): for event in batched_events: try: event.method(event) except placement_exc.PlacementEndpointNotFound: LOG.debug('Placement API was not found when trying to ' 'update routed networks IPv4 inventories') return def _notify_subnet(self, context, subnet, segment_id): total, reserved = self._calculate_inventory_total_and_reserved(subnet) if total: segment_host_mappings = net_obj.SegmentHostMapping.get_objects( context, segment_id=segment_id) self.batch_notifier.queue_event(Event( self._create_or_update_nova_inventory, segment_id, total=total, reserved=reserved, segment_host_mappings=segment_host_mappings)) @registry.receives(resources.SUBNET, [events.AFTER_CREATE]) def _notify_subnet_created(self, resource, event, trigger, context, subnet, **kwargs): segment_id = subnet.get('segment_id') if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4: return self._notify_subnet(context, subnet, segment_id) def _create_or_update_nova_inventory(self, event): try: self._update_nova_inventory(event) except placement_exc.PlacementResourceProviderNotFound: self._create_nova_inventory(event.segment_id, event.total, event.reserved, event.segment_host_mappings) def _update_nova_inventory(self, event): for count in range(MAX_INVENTORY_UPDATE_RETRIES): ipv4_inventory = self.p_client.get_inventory(event.segment_id, IPV4_RESOURCE_CLASS) if event.total: ipv4_inventory['total'] += event.total if event.reserved: ipv4_inventory['reserved'] += event.reserved try: self.p_client.update_resource_provider_inventory( event.segment_id, ipv4_inventory, IPV4_RESOURCE_CLASS) return except placement_exc.PlacementResourceProviderGenerationConflict: LOG.debug('Re-trying to update Nova IPv4 inventory for ' 'routed network segment: %s', event.segment_id) LOG.error('Failed to update Nova IPv4 inventory for routed ' 'network segment: %s', event.segment_id) def _get_nova_aggregate_uuid(self, aggregate): try: return aggregate.uuid except AttributeError: with excutils.save_and_reraise_exception(): LOG.exception("uuid was not returned as part of the aggregate " "object which indicates that the Nova API " "backend does not support microversions. Ensure " "that the compute endpoint in the service " "catalog points to the v2.1 API.") def _create_nova_inventory(self, segment_id, total, reserved, segment_host_mappings): name = SEGMENT_NAME_STUB % segment_id resource_provider = {'name': name, 'uuid': segment_id} self.p_client.create_resource_provider(resource_provider) aggregate = self.n_client.aggregates.create(name, None) aggregate_uuid = self._get_nova_aggregate_uuid(aggregate) self.p_client.associate_aggregates(segment_id, [aggregate_uuid]) for mapping in segment_host_mappings: self.n_client.aggregates.add_host(aggregate.id, mapping.host) ipv4_inventory = { IPV4_RESOURCE_CLASS: { 'total': total, 'reserved': reserved, 'min_unit': 1, 'max_unit': 1, 'step_size': 1, 'allocation_ratio': 1.0, } } self.p_client.update_resource_provider_inventories( segment_id, ipv4_inventory) def _calculate_inventory_total_and_reserved(self, subnet): total = 0 reserved = 0 allocation_pools = subnet.get('allocation_pools') or [] for pool in allocation_pools: total += int(netaddr.IPAddress(pool['end']) - netaddr.IPAddress(pool['start'])) + 1 if total: if subnet.get('gateway_ip'): total += 1 reserved += 1 if subnet.get('enable_dhcp'): reserved += 1 return total, reserved @registry.receives(resources.SUBNET, [events.AFTER_UPDATE]) def _notify_subnet_updated(self, resource, event, trigger, context, subnet, original_subnet, **kwargs): segment_id = subnet.get('segment_id') original_segment_id = original_subnet.get('segment_id') if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4: return if original_segment_id != segment_id: # Migration to routed network, treat as create self._notify_subnet(context, subnet, segment_id) return filters = {'segment_id': [segment_id], 'ip_version': [constants.IP_VERSION_4]} if not subnet['allocation_pools']: plugin = directory.get_plugin() alloc_pools = [s['allocation_pools'] for s in plugin.get_subnets(context, filters=filters)] if not any(alloc_pools): self.batch_notifier.queue_event(Event( self._delete_nova_inventory, segment_id)) return original_total, original_reserved = ( self._calculate_inventory_total_and_reserved(original_subnet)) updated_total, updated_reserved = ( self._calculate_inventory_total_and_reserved(subnet)) total = updated_total - original_total reserved = updated_reserved - original_reserved if total or reserved: segment_host_mappings = None if not original_subnet['allocation_pools']: segment_host_mappings = net_obj.SegmentHostMapping.get_objects( context, segment_id=segment_id) self.batch_notifier.queue_event(Event( self._create_or_update_nova_inventory, segment_id, total=total, reserved=reserved, segment_host_mappings=segment_host_mappings)) @registry.receives(resources.SUBNET, [events.AFTER_DELETE]) def _notify_subnet_deleted(self, resource, event, trigger, context, subnet, **kwargs): segment_id = subnet.get('segment_id') if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4: return total, reserved = self._calculate_inventory_total_and_reserved(subnet) if total: filters = {'segment_id': [segment_id], 'ip_version': [4]} plugin = directory.get_plugin() if plugin.get_subnets_count(context, filters=filters) > 0: self.batch_notifier.queue_event(Event( self._update_nova_inventory, segment_id, total=-total, reserved=-reserved)) else: self.batch_notifier.queue_event(Event( self._delete_nova_inventory, segment_id)) def _get_aggregate_id(self, segment_id): aggregate_uuid = self.p_client.list_aggregates( segment_id)['aggregates'][0] aggregates = self.n_client.aggregates.list() for aggregate in aggregates: nc_aggregate_uuid = self._get_nova_aggregate_uuid(aggregate) if nc_aggregate_uuid == aggregate_uuid: return aggregate.id def _delete_nova_inventory(self, event): aggregate_id = self._get_aggregate_id(event.segment_id) aggregate = self.n_client.aggregates.get_details( aggregate_id) for host in aggregate.hosts: self.n_client.aggregates.remove_host(aggregate_id, host) self.n_client.aggregates.delete(aggregate_id) self.p_client.delete_resource_provider(event.segment_id) @registry.receives(resources.SEGMENT_HOST_MAPPING, [events.AFTER_CREATE]) def _notify_host_addition_to_aggregate(self, resource, event, trigger, payload=None): subnets = subnet_obj.Subnet.get_objects( payload.context, segment_id=payload.metadata.get('current_segment_ids')) segment_ids = {s.segment_id for s in subnets} self.batch_notifier.queue_event( Event(self._add_host_to_aggregate, segment_ids, host=payload.metadata.get('host'))) def _add_host_to_aggregate(self, event): for segment_id in event.segment_ids: try: aggregate_id = self._get_aggregate_id(segment_id) except placement_exc.PlacementAggregateNotFound: LOG.info('When adding host %(host)s, aggregate not found ' 'for routed network segment %(segment_id)s', {'host': event.host, 'segment_id': segment_id}) continue try: self.n_client.aggregates.add_host(aggregate_id, event.host) except nova_exc.Conflict: LOG.info('Host %(host)s already exists in aggregate for ' 'routed network segment %(segment_id)s', {'host': event.host, 'segment_id': segment_id}) @registry.receives(resources.PORT, [events.AFTER_CREATE, events.AFTER_DELETE]) def _notify_port_created_or_deleted(self, resource, event, trigger, context, port, **kwargs): if not self._does_port_require_nova_inventory_update(port): return ipv4_subnets_number, segment_id = ( self._get_ipv4_subnets_number_and_segment_id(port, context)) if segment_id: if event == events.AFTER_DELETE: ipv4_subnets_number = -ipv4_subnets_number self.batch_notifier.queue_event( Event(self._update_nova_inventory, segment_id, reserved=ipv4_subnets_number)) @registry.receives(resources.PORT, [events.AFTER_UPDATE]) def _notify_port_updated(self, resource, event, trigger, context, **kwargs): port = kwargs.get('port') original_port = kwargs.get('original_port') does_original_port_require_nova_inventory_update = ( self._does_port_require_nova_inventory_update(original_port)) does_port_require_nova_inventory_update = ( self._does_port_require_nova_inventory_update(port)) if not (does_original_port_require_nova_inventory_update or does_port_require_nova_inventory_update): return original_port_ipv4_subnets_number, segment_id = ( self._get_ipv4_subnets_number_and_segment_id(original_port, context)) if not segment_id: return port_ipv4_subnets_number = len(self._get_ipv4_subnet_ids(port)) if not does_original_port_require_nova_inventory_update: original_port_ipv4_subnets_number = 0 if not does_port_require_nova_inventory_update: port_ipv4_subnets_number = 0 update = port_ipv4_subnets_number - original_port_ipv4_subnets_number if update: self.batch_notifier.queue_event(Event(self._update_nova_inventory, segment_id, reserved=update)) def _get_ipv4_subnets_number_and_segment_id(self, port, context): ipv4_subnet_ids = self._get_ipv4_subnet_ids(port) if not ipv4_subnet_ids: return 0, None subnet = subnet_obj.Subnet.get_object(context, id=ipv4_subnet_ids[0]) if subnet and subnet.segment_id: return len(ipv4_subnet_ids), subnet.segment_id return 0, None def _does_port_require_nova_inventory_update(self, port): device_owner = port.get('device_owner') if (device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) or device_owner == constants.DEVICE_OWNER_DHCP): return False return True def _get_ipv4_subnet_ids(self, port): ipv4_subnet_ids = [] for ip in port.get('fixed_ips', []): if netaddr.IPAddress( ip['ip_address']).version == constants.IP_VERSION_4: ipv4_subnet_ids.append(ip['subnet_id']) return ipv4_subnet_ids @registry.has_registry_receivers class SegmentHostRoutes(object): def _get_subnets(self, context, network_id): return subnet_obj.Subnet.get_objects(context, network_id=network_id) def _count_subnets(self, context, network_id): return subnet_obj.Subnet.count(context, network_id=network_id) def _calculate_routed_network_host_routes(self, context, ip_version, network_id=None, subnet_id=None, segment_id=None, host_routes=None, gateway_ip=None, old_gateway_ip=None, deleted_cidr=None): """Calculate host routes for routed network. This method is used to calculate the host routes for routed networks both when handling the user create or update request and when making updates to subnets on the network in response to events: AFTER_CREATE and AFTER_DELETE. :param ip_version: IP version (4/6). :param network_id: Network ID. :param subnet_id: UUID of the subnet. :param segment_id: Segement ID associated with the subnet. :param host_routes: Current host_routes of the subnet. :param gateway_ip: The subnets gateway IP address. :param old_gateway_ip: The old gateway IP address of the subnet when it is changed on update. :param deleted_cidr: The cidr of a deleted subnet. :returns Host routes with routes for the other subnet's on the routed network appended unless a route to the destination already exists. """ if host_routes is None: host_routes = [] dest_ip_nets = [netaddr.IPNetwork(route['destination']) for route in host_routes] # Drop routes to the deleted cidr, when the subnet was deleted. if deleted_cidr: delete_route = {'destination': deleted_cidr, 'nexthop': gateway_ip} if delete_route in host_routes: host_routes.remove(delete_route) for subnet in self._get_subnets(context, network_id): if (subnet.id == subnet_id or subnet.segment_id == segment_id or subnet.ip_version != ip_version): continue subnet_ip_net = netaddr.IPNetwork(subnet.cidr) if old_gateway_ip: old_route = {'destination': str(subnet.cidr), 'nexthop': old_gateway_ip} if old_route in host_routes: host_routes.remove(old_route) dest_ip_nets.remove(subnet_ip_net) if gateway_ip: # Use netaddr here in case the user provided a summary route # (supernet route). I.e subnet.cidr = 10.0.1.0/24 and # the user provided a host route for 10.0.0.0/16. We don't # need to append a route in this case. if not any(subnet_ip_net in ip_net for ip_net in dest_ip_nets): host_routes.append({'destination': subnet.cidr, 'nexthop': gateway_ip}) return host_routes def _host_routes_need_update(self, host_routes, calc_host_routes): """Compare host routes and calculated host routes :param host_routes: Current host routes :param calc_host_routes: Host routes + calculated host routes for routed network :returns True if host_routes and calc_host_routes are not equal """ return ((set((route['destination'], route['nexthop']) for route in host_routes) != set((route['destination'], route['nexthop']) for route in calc_host_routes))) def _update_routed_network_host_routes(self, context, network_id, deleted_cidr=None): """Update host routes on subnets on a routed network after event Host routes on the subnets on a routed network may need updates after any CREATE or DELETE event. :param network_id: Network ID :param deleted_cidr: The cidr of a deleted subnet. """ for subnet in self._get_subnets(context, network_id): host_routes = [{'destination': str(route.destination), 'nexthop': route.nexthop} for route in subnet.host_routes] calc_host_routes = self._calculate_routed_network_host_routes( context=context, ip_version=subnet.ip_version, network_id=subnet.network_id, subnet_id=subnet.id, segment_id=subnet.segment_id, host_routes=copy.deepcopy(host_routes), gateway_ip=subnet.gateway_ip, deleted_cidr=deleted_cidr) if self._host_routes_need_update(host_routes, calc_host_routes): LOG.debug( "Updating host routes for subnet %s on routed network %s", subnet.id, subnet.network_id) plugin = directory.get_plugin() plugin.update_subnet(context, subnet.id, {'subnet': { 'host_routes': calc_host_routes}}) @registry.receives(resources.SUBNET, [events.BEFORE_CREATE]) def host_routes_before_create(self, resource, event, trigger, context, subnet, **kwargs): segment_id = subnet.get('segment_id') gateway_ip = subnet.get('gateway_ip') if validators.is_attr_set(subnet.get('host_routes')): host_routes = subnet.get('host_routes') else: host_routes = [] if segment_id is not None and validators.is_attr_set(gateway_ip): calc_host_routes = self._calculate_routed_network_host_routes( context=context, ip_version=netaddr.IPNetwork(subnet['cidr']).version, network_id=subnet['network_id'], segment_id=segment_id, host_routes=copy.deepcopy(host_routes), gateway_ip=gateway_ip) if (not host_routes or self._host_routes_need_update(host_routes, calc_host_routes)): subnet['host_routes'] = calc_host_routes @registry.receives(resources.SUBNET, [events.BEFORE_UPDATE]) def host_routes_before_update(self, resource, event, trigger, **kwargs): context = kwargs['context'] subnet, original_subnet = kwargs['request'], kwargs['original_subnet'] orig_segment_id = original_subnet.get('segment_id') segment_id = subnet.get('segment_id', orig_segment_id) orig_gateway_ip = original_subnet.get('gateway_ip') gateway_ip = subnet.get('gateway_ip', orig_gateway_ip) orig_host_routes = original_subnet.get('host_routes') host_routes = subnet.get('host_routes', orig_host_routes) if (segment_id and (host_routes != orig_host_routes or gateway_ip != orig_gateway_ip)): calc_host_routes = self._calculate_routed_network_host_routes( context=context, ip_version=netaddr.IPNetwork(original_subnet['cidr']).version, network_id=original_subnet['network_id'], segment_id=segment_id, host_routes=copy.deepcopy(host_routes), gateway_ip=gateway_ip, old_gateway_ip=orig_gateway_ip if ( gateway_ip != orig_gateway_ip) else None) if self._host_routes_need_update(host_routes, calc_host_routes): subnet['host_routes'] = calc_host_routes @registry.receives(resources.SUBNET, [events.AFTER_CREATE]) def host_routes_after_create(self, resource, event, trigger, **kwargs): context = kwargs['context'] subnet = kwargs['subnet'] # If there are other subnets on the network and subnet has segment_id # ensure host routes for all subnets are updated. if (self._count_subnets(context, subnet['network_id']) > 1 and subnet.get('segment_id')): self._update_routed_network_host_routes(context, subnet['network_id']) @registry.receives(resources.SUBNET, [events.AFTER_DELETE]) def host_routes_after_delete(self, resource, event, trigger, context, subnet, **kwargs): # If this is a routed network, remove any routes to this subnet on # this networks remaining subnets. if subnet.get('segment_id'): self._update_routed_network_host_routes( context, subnet['network_id'], deleted_cidr=subnet['cidr']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/service_base.py0000644000175000017500000000501500000000000023465 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from neutron.db import servicetype_db as sdb from neutron.services import provider_configuration as pconf LOG = logging.getLogger(__name__) def load_drivers(service_type, plugin): """Loads drivers for specific service. Passes plugin instance to driver's constructor """ service_type_manager = sdb.ServiceTypeManager.get_instance() providers = (service_type_manager. get_service_providers( None, filters={'service_type': [service_type]}) ) if not providers: msg = ("No providers specified for '%s' service, exiting" % service_type) LOG.error(msg) raise SystemExit(1) drivers = {} for provider in providers: try: drivers[provider['name']] = importutils.import_object( provider['driver'], plugin ) LOG.debug("Loaded '%(provider)s' provider for service " "%(service_type)s", {'provider': provider['driver'], 'service_type': service_type}) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception("Error loading provider '%(provider)s' for " "service %(service_type)s", {'provider': provider['driver'], 'service_type': service_type}) default_provider = None try: provider = service_type_manager.get_default_service_provider( None, service_type) default_provider = provider['name'] except pconf.DefaultServiceProviderNotFound: LOG.info("Default provider is not specified for service type %s", service_type) return drivers, default_provider ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/tag/0000755000175000017500000000000000000000000021233 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/tag/__init__.py0000644000175000017500000000000000000000000023332 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/tag/tag_plugin.py0000644000175000017500000001152000000000000023735 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import directory from oslo_log import helpers as log_helpers from sqlalchemy.orm import exc from neutron.db import standard_attr from neutron.extensions import tagging from neutron.objects import tag as tag_obj # Taggable resources resource_model_map = standard_attr.get_standard_attr_resource_model_map() @resource_extend.has_resource_extenders class TagPlugin(tagging.TagPluginBase): """Implementation of the Neutron Tag Service Plugin.""" supported_extension_aliases = ['standard-attr-tag'] __filter_validation_support = True def __new__(cls, *args, **kwargs): inst = super(TagPlugin, cls).__new__(cls, *args, **kwargs) tag_obj.register_tag_hooks() return inst @staticmethod @resource_extend.extends(list(resource_model_map)) def _extend_tags_dict(response_data, db_data): if not directory.get_plugin(tagging.TAG_PLUGIN_TYPE): return tags = [tag_db.tag for tag_db in db_data.standard_attr.tags] response_data['tags'] = tags def _get_resource(self, context, resource, resource_id): model = resource_model_map[resource] try: return model_query.get_by_id(context, model, resource_id) except exc.NoResultFound: raise tagging.TagResourceNotFound(resource=resource, resource_id=resource_id) @log_helpers.log_method_call def get_tags(self, context, resource, resource_id): res = self._get_resource(context, resource, resource_id) tags = [tag_db.tag for tag_db in res.standard_attr.tags] return dict(tags=tags) @log_helpers.log_method_call def get_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) if not any(tag == tag_db.tag for tag_db in res.standard_attr.tags): raise tagging.TagNotFound(tag=tag) @log_helpers.log_method_call @db_api.retry_if_session_inactive() def update_tags(self, context, resource, resource_id, body): with db_api.CONTEXT_WRITER.using(context): # We get and do all operations with objects in one session res = self._get_resource(context, resource, resource_id) new_tags = set(body['tags']) old_tags = {tag_db.tag for tag_db in res.standard_attr.tags} tags_added = new_tags - old_tags tags_removed = old_tags - new_tags if tags_removed: tag_obj.Tag.delete_objects( context, standard_attr_id=res.standard_attr_id, tag=[ tag_db.tag for tag_db in res.standard_attr.tags if tag_db.tag in tags_removed ] ) self.add_tags(context, res.standard_attr_id, tags_added) return body def add_tags(self, context, standard_attr_id, tags): for tag in tags: tag_obj.Tag(context, standard_attr_id=standard_attr_id, tag=tag).create() @log_helpers.log_method_call def update_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) if any(tag == tag_db.tag for tag_db in res.standard_attr.tags): return try: tag_obj.Tag(context, standard_attr_id=res.standard_attr_id, tag=tag).create() except obj_exc.NeutronDbObjectDuplicateEntry: pass @log_helpers.log_method_call def delete_tags(self, context, resource, resource_id): res = self._get_resource(context, resource, resource_id) tag_obj.Tag.delete_objects(context, standard_attr_id=res.standard_attr_id) @log_helpers.log_method_call def delete_tag(self, context, resource, resource_id, tag): res = self._get_resource(context, resource, resource_id) if not tag_obj.Tag.delete_objects( context, tag=tag, standard_attr_id=res.standard_attr_id): raise tagging.TagNotFound(tag=tag) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/timestamp/0000755000175000017500000000000000000000000022463 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/timestamp/__init__.py0000644000175000017500000000000000000000000024562 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/timestamp/timestamp_db.py0000644000175000017500000001003000000000000025477 0ustar00coreycorey00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from oslo_utils import timeutils from sqlalchemy.orm import session as se from neutron.db import standard_attr CHANGED_SINCE = 'changed_since' TIME_FORMAT_WHOLE_SECONDS = '%Y-%m-%dT%H:%M:%S' def _change_since_result_filter_hook(query, filters): # this block is for change_since query # we get the changed_since string from filters. # And translate it from string to datetime type. # Then compare with the timestamp in db which has # datetime type. values = filters and filters.get(CHANGED_SINCE, []) if not values: return query data = filters[CHANGED_SINCE][0] try: changed_since_string = timeutils.parse_isotime(data) except Exception: msg = ("The input %s must be in the " "following format: YYYY-MM-DDTHH:MM:SSZ") % CHANGED_SINCE raise n_exc.InvalidInput(error_message=msg) changed_since = (timeutils. normalize_time(changed_since_string)) target_model_class = query.column_descriptions[0]['type'] query = query.join(standard_attr.StandardAttribute, target_model_class.standard_attr_id == standard_attr.StandardAttribute.id).filter( standard_attr.StandardAttribute.updated_at >= changed_since) return query def _update_timestamp(session, context, instances): objs_list = session.new.union(session.dirty) while objs_list: obj = objs_list.pop() if (isinstance(obj, standard_attr.HasStandardAttributes) and obj.standard_attr_id): obj.updated_at = timeutils.utcnow() def _format_timestamp(resource_db, result): result['created_at'] = (resource_db.created_at. strftime(TIME_FORMAT_WHOLE_SECONDS)) + 'Z' result['updated_at'] = (resource_db.updated_at. strftime(TIME_FORMAT_WHOLE_SECONDS)) + 'Z' def _add_timestamp(mapper, _conn, target): if not target.created_at and not target.updated_at: time = timeutils.utcnow() for field in ['created_at', 'updated_at']: setattr(target, field, time) return target @resource_extend.has_resource_extenders class TimeStamp_db_mixin(object): """Mixin class to add Time Stamp methods.""" def __new__(cls, *args, **kwargs): rs_model_maps = standard_attr.get_standard_attr_resource_model_map() for model in rs_model_maps.values(): model_query.register_hook( model, "change_since_query", query_hook=None, filter_hook=None, result_filters=_change_since_result_filter_hook) return super(TimeStamp_db_mixin, cls).__new__(cls, *args, **kwargs) def register_db_events(self): listen = db_api.sqla_listen listen(standard_attr.StandardAttribute, 'before_insert', _add_timestamp) listen(se.Session, 'before_flush', _update_timestamp) @staticmethod @resource_extend.extends( list(standard_attr.get_standard_attr_resource_model_map())) def _extend_resource_dict_timestamp(resource_res, resource_db): if (resource_db and resource_db.created_at and resource_db.updated_at): _format_timestamp(resource_db, resource_res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/timestamp/timestamp_plugin.py0000644000175000017500000000300200000000000026411 0ustar00coreycorey00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.services import base as service_base from neutron.db import models_v2 from neutron.objects import base as base_obj from neutron.services.timestamp import timestamp_db as ts_db class TimeStampPlugin(service_base.ServicePluginBase, ts_db.TimeStamp_db_mixin): """Implements Neutron Timestamp Service plugin.""" supported_extension_aliases = ['standard-attr-timestamp'] __filter_validation_support = True def __init__(self): super(TimeStampPlugin, self).__init__() self.register_db_events() # TODO(jlibosva): Move this to register_model_query_hook base_obj.register_filter_hook_on_model( models_v2.SubnetPool, ts_db.CHANGED_SINCE) @classmethod def get_plugin_type(cls): return 'timestamp' def get_plugin_description(self): return "Adds timestamps to Neutron resources with standard attributes" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/trunk/0000755000175000017500000000000000000000000021623 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/__init__.py0000644000175000017500000000000000000000000023722 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/callbacks.py0000644000175000017500000000242300000000000024115 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(boden): remove this once moved over to neutron-lib payloads class TrunkPayload(object): """Payload for trunk-related callback registry notifications.""" def __init__(self, context, trunk_id, current_trunk=None, original_trunk=None, subports=None): self.context = context self.trunk_id = trunk_id self.current_trunk = current_trunk self.original_trunk = original_trunk self.subports = subports if subports else [] def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/0000755000175000017500000000000000000000000023301 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/__init__.py0000644000175000017500000000244400000000000025416 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.trunk.drivers.linuxbridge import driver as lxb_driver from neutron.services.trunk.drivers.openvswitch import driver as ovs_driver def register(): """Load in-tree drivers for the service plugin.""" # Enable the trunk plugin to work with ML2/OVS. Support for other # drivers can be added similarly by executing the registration # code at the time of plugin/mech driver initialization. There should # be at least one compatible driver enabled in the deployment for trunk # setup to be successful. The plugin fails to initialize if no compatible # driver is found in the deployment. lxb_driver.register() ovs_driver.register() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/base.py0000644000175000017500000000674500000000000024601 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron.services.trunk.rpc import backend @registry.has_registry_receivers class DriverBase(object): def __init__(self, name, interfaces, segmentation_types, agent_type=None, can_trunk_bound_port=False): """Instantiate a trunk driver. :param name: driver name. :param interfaces: list of interfaces supported. :param segmentation_types: list of segmentation types supported. :param agent_type: agent type for the driver, None if agentless. :param can_trunk_bound_port: True if trunk creation is allowed for a bound parent port (i.e. trunk creation after VM boot). """ self.name = name self.interfaces = interfaces self.segmentation_types = segmentation_types self.agent_type = agent_type self.can_trunk_bound_port = can_trunk_bound_port @abc.abstractproperty def is_loaded(self): """True if the driver is active for the Neutron Server. Implement this property to determine if your driver is actively configured for this Neutron Server deployment, e.g. check if core_plugin or mech_drivers config options (for ML2) is set as required. """ def is_interface_compatible(self, interface): """True if the driver is compatible with the interface.""" return interface in self.interfaces def is_agent_compatible(self, agent_type): """True if the driver is compatible with the agent type.""" return agent_type == self.agent_type @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): """Register the trunk driver. This method should be overridden so that the driver can subscribe to the required trunk events. The driver should also advertise itself as supported driver by calling register_driver() on the TrunkPlugin otherwise the trunk plugin may fail to start if no compatible configuration is found. External drivers must subscribe to the AFTER_INIT event for the trunk plugin so that they can integrate without an explicit register() method invocation. :param resource: neutron.services.trunk.constants.TRUNK_PLUGIN :param event: neutron_lib.callbacks.events.AFTER_INIT :param trigger: neutron.service.trunks.plugin.TrunkPlugin """ trigger.register_driver(self) # Set up the server-side RPC backend if the driver is loaded, # it is agent based, and the RPC backend is not already initialized. if self.is_loaded and self.agent_type and not trigger.is_rpc_enabled(): trigger.set_rpc_backend(backend.ServerSideRpcBackend()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/0000755000175000017500000000000000000000000025615 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/__init__.py0000644000175000017500000000000000000000000027714 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3750453 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/agent/0000755000175000017500000000000000000000000026713 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/agent/__init__.py0000644000175000017500000000000000000000000031012 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/agent/driver.py0000644000175000017500000002075300000000000030567 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events as local_events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources as local_resources from neutron_lib.services.trunk import constants as t_const from oslo_log import log as logging import oslo_messaging from neutron.api.rpc.callbacks import events from neutron.api.rpc.handlers import resources_rpc from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber from neutron.services.trunk.rpc import agent as trunk_rpc LOG = logging.getLogger(__name__) def init_handler(resource, event, trigger, payload=None): """Handler for agent init event.""" LinuxBridgeTrunkDriver() @registry.has_registry_receivers class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton): """Driver responsible for handling trunk/subport/port events. Receives data model events from the server and VIF events from the agent and uses these to drive a Plumber instance to wire up VLAN subinterfaces for any trunks. """ def __init__(self, plumber=None, trunk_api=None): self._plumber = plumber or trunk_plumber.Plumber() self._tapi = trunk_api or _TrunkAPI(trunk_rpc.TrunkStub()) super(LinuxBridgeTrunkDriver, self).__init__() def handle_trunks(self, context, resource_type, trunks, event_type): """Trunk data model change from the server.""" for trunk in trunks: if event_type in (events.UPDATED, events.CREATED): self._tapi.put_trunk(trunk.port_id, trunk) self.wire_trunk(context, trunk) elif event_type == events.DELETED: self._tapi.put_trunk(trunk.port_id, None) self._plumber.delete_trunk_subports(trunk) def handle_subports(self, context, resource_type, subports, event_type): """Subport data model change from the server.""" affected_trunks = set() if event_type == events.DELETED: method = self._tapi.delete_trunk_subport else: method = self._tapi.put_trunk_subport for s in subports: affected_trunks.add(s['trunk_id']) method(s['trunk_id'], s) for trunk_id in affected_trunks: trunk = self._tapi.get_trunk_by_id(context, trunk_id) if not trunk: continue self.wire_trunk(context, trunk) @registry.receives(local_resources.PORT_DEVICE, [local_events.AFTER_DELETE]) def agent_port_delete(self, resource, event, trigger, payload=None): """Agent informed us a VIF was removed.""" # NOTE(kevinbenton): we don't need to do anything to cleanup VLAN # interfaces if a trunk was removed because the kernel will do that # for us. We also don't update the trunk status to DOWN because we # don't want to race with another agent that the trunk may have been # moved to. @registry.receives(local_resources.PORT_DEVICE, [local_events.AFTER_UPDATE]) def agent_port_change(self, resource, event, trigger, payload=None): """The agent hath informed us thusly of a port update or create.""" port_id = payload.latest_state['port_id'] trunk = self._tapi.get_trunk(payload.context, port_id) if trunk: # a wild trunk has appeared! make its children self.wire_trunk(payload.context, trunk) return # clear any VLANs in case this was a trunk that changed status while # agent was offline. self._plumber.delete_subports_by_port_id(port_id) def wire_trunk(self, context, trunk): """Wire up subports while keeping the server trunk status apprised.""" if not self._plumber.trunk_on_host(trunk): LOG.debug("Trunk %s not present on this host", trunk.port_id) return self._tapi.bind_subports_to_host(context, trunk) try: self._plumber.ensure_trunk_subports(trunk) self._tapi.set_trunk_status( context, trunk, t_const.TRUNK_ACTIVE_STATUS) except Exception: if not self._plumber.trunk_on_host(trunk): LOG.debug("Trunk %s removed during wiring", trunk.port_id) return # something broke LOG.exception("Failure setting up subports for %s", trunk.port_id) self._tapi.set_trunk_status(context, trunk, t_const.TRUNK_DEGRADED_STATUS) class _TrunkAPI(object): """Our secret stash of trunks stored by port ID. Tell no one.""" def __init__(self, trunk_stub): self.server_api = trunk_stub self._trunk_by_port_id = {} self._trunk_by_id = {} self._sub_port_id_to_trunk_port_id = {} def _fetch_trunk(self, context, port_id): try: t = self.server_api.get_trunk_details(context, port_id) LOG.debug("Found trunk %(t)s for port %(p)s", dict(p=port_id, t=t)) return t except resources_rpc.ResourceNotFound: return None except oslo_messaging.RemoteError as e: if e.exc_type != 'CallbackNotFound': raise LOG.debug("Trunk plugin disabled on server. Assuming port %s is " "not a trunk.", port_id) return None def set_trunk_status(self, context, trunk, status): self.server_api.update_trunk_status(context, trunk.id, status) def bind_subports_to_host(self, context, trunk): self.server_api.update_subport_bindings(context, trunk.sub_ports) def put_trunk_subport(self, trunk_id, subport): LOG.debug("Adding subport %(sub)s to trunk %(trunk)s", dict(sub=subport, trunk=trunk_id)) if trunk_id not in self._trunk_by_id: # not on this agent return trunk = self._trunk_by_id[trunk_id] trunk.sub_ports = [s for s in trunk.sub_ports if s.port_id != subport.port_id] + [subport] def delete_trunk_subport(self, trunk_id, subport): LOG.debug("Removing subport %(sub)s from trunk %(trunk)s", dict(sub=subport, trunk=trunk_id)) if trunk_id not in self._trunk_by_id: # not on this agent return trunk = self._trunk_by_id[trunk_id] trunk.sub_ports = [s for s in trunk.sub_ports if s.port_id != subport.port_id] def put_trunk(self, port_id, trunk): if port_id in self._trunk_by_port_id: # already existed. expunge sub_port cross ref self._sub_port_id_to_trunk_port_id = { s: p for s, p in self._sub_port_id_to_trunk_port_id.items() if p != port_id} self._trunk_by_port_id[port_id] = trunk if not trunk: return self._trunk_by_id[trunk.id] = trunk for sub in trunk.sub_ports: self._sub_port_id_to_trunk_port_id[sub.port_id] = trunk.port_id def get_trunk_by_id(self, context, trunk_id): """Gets trunk object based on trunk_id. None if not in cache.""" return self._trunk_by_id.get(trunk_id) def get_trunk(self, context, port_id): """Gets trunk object for port_id. None if not trunk.""" if port_id not in self._trunk_by_port_id: # TODO(kevinbenton): ask the server for *all* trunk port IDs on # start and eliminate asking the server if every port is a trunk # TODO(kevinbenton): clear this on AMQP reconnect LOG.debug("Cache miss for port %s, fetching from server", port_id) self.put_trunk(port_id, self._fetch_trunk(context, port_id)) return self.get_trunk(context, port_id) return self._trunk_by_port_id[port_id] def get_trunk_for_subport(self, context, port_id): """Returns trunk if port_id is a subport, else None.""" trunk_port = self._sub_port_id_to_trunk_port_id.get(port_id) if trunk_port: return self.get_trunk(context, trunk_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py0000644000175000017500000001134100000000000032156 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.utils import runtime from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.linuxbridge.agent.common import utils as lutil LOG = logging.getLogger(__name__) class Plumber(object): """Object responsible for VLAN interface CRUD. This handles the creation/deletion/listing of VLAN interfaces for a trunk within a namespace. """ def __init__(self, namespace=None): self.namespace = namespace def trunk_on_host(self, trunk): """Returns true if trunk device is present else False.""" trunk_dev = self._trunk_device_name(trunk) return ip_lib.device_exists(trunk_dev, namespace=self.namespace) def ensure_trunk_subports(self, trunk): """Idempotent wiring for a trunk's subports. Given a trunk object, delete any vlan subinterfaces belonging to a trunk that aren't on the object. Create any which are on the object which do not exist. """ trunk_dev = self._trunk_device_name(trunk) with self._trunk_lock(trunk_dev): # lock scoped to trunk device so two diffs don't interleave expected = self._get_subport_devs_and_vlans(trunk.sub_ports) existing = self._get_vlan_children(trunk_dev) to_delete = existing - expected to_create = expected - existing for devname, vlan_id in to_delete: LOG.debug("Deleting subport %(name)s with vlan tag %(tag)s", dict(name=devname, tag=vlan_id)) self._safe_delete_device(devname) for devname, vlan_id in to_create: LOG.debug("Creating subport %(name)s with vlan tag %(tag)s", dict(name=devname, tag=vlan_id)) self._create_vlan_subint(trunk_dev, devname, vlan_id) def delete_trunk_subports(self, trunk): return self.delete_subports_by_port_id(trunk.port_id) def delete_subports_by_port_id(self, port_id): device = self._get_tap_device_name(port_id) if not ip_lib.device_exists(device, namespace=self.namespace): LOG.debug("Device %s not present on this host", device) return with self._trunk_lock(device): for subname, vlan_id in self._get_vlan_children(device): LOG.debug("Deleting subport %(name)s with vlan tag %(tag)s", dict(name=subname, tag=vlan_id)) self._safe_delete_device(subname) def _trunk_lock(self, trunk_dev): lock_name = 'trunk-%s' % trunk_dev return lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX) def _create_vlan_subint(self, trunk_name, devname, vlan_id): ip_wrap = ip_lib.IPWrapper(namespace=self.namespace) try: dev = ip_wrap.add_vlan(devname, trunk_name, vlan_id) dev.disable_ipv6() except Exception: with excutils.save_and_reraise_exception() as ectx: ectx.reraise = ip_lib.IPDevice( devname, namespace=self.namespace).exists() def _safe_delete_device(self, devname): dev = ip_lib.IPDevice(devname, namespace=self.namespace) try: dev.link.set_down() dev.link.delete() except Exception: with excutils.save_and_reraise_exception() as ectx: ectx.reraise = dev.exists() def _trunk_device_name(self, trunk): return self._get_tap_device_name(trunk.port_id) def _get_subport_devs_and_vlans(self, subports): return {(self._get_tap_device_name(s.port_id), s.segmentation_id) for s in subports} def _get_tap_device_name(self, devname): return lutil.get_tap_device_name(devname) def _get_vlan_children(self, dev): """Return set of (devname, vlan_id) tuples for children of device.""" devices = ip_lib.get_devices_info(namespace=self.namespace) return {(device['name'], device['vlan_id']) for device in devices if device.get('kind') == 'vlan' and device.get('parent_name') == dev} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/linuxbridge/driver.py0000644000175000017500000000326700000000000027472 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.services.trunk import constants as trunk_consts from neutron.services.trunk.drivers import base LOG = logging.getLogger(__name__) NAME = 'linuxbridge' SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_BRIDGE, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.SEGMENTATION_TYPE_VLAN, ) class LinuxBridgeDriver(base.DriverBase): """Server-side Trunk driver for the ML2 Linux Bridge driver.""" @property def is_loaded(self): try: return NAME in cfg.CONF.ml2.mechanism_drivers except cfg.NoSuchOptError: return False @classmethod def create(cls): return cls(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, constants.AGENT_TYPE_LINUXBRIDGE, can_trunk_bound_port=True) def register(): # NOTE(kevinbenton): the thing that is keeping this from being # immediately garbage collected is that it registers callbacks LinuxBridgeDriver.create() LOG.debug("Linux bridge trunk driver initialized.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/0000755000175000017500000000000000000000000025652 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000027751 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/0000755000175000017500000000000000000000000026750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/__init__.py0000644000175000017500000000000000000000000031047 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/driver.py0000644000175000017500000001112300000000000030613 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events as local_events from neutron_lib.callbacks import registry as local_registry from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.services.trunk.rpc import agent LOG = logging.getLogger(__name__) TRUNK_SKELETON = None @local_registry.has_registry_receivers class OVSTrunkSkeleton(agent.TrunkSkeleton): """It processes Neutron Server events to create the physical resources associated to a logical trunk in response to user initiated API events (such as trunk subport add/remove). It collaborates with the OVSDBHandler to implement the trunk control plane. """ def __init__(self, ovsdb_handler): super(OVSTrunkSkeleton, self).__init__() self.ovsdb_handler = ovsdb_handler registry.unsubscribe(self.handle_trunks, resources.TRUNK) def handle_trunks(self, context, resource_type, trunk, event_type): """This method is not required by the OVS Agent driver. Trunk notifications are handled via local OVSDB events. """ raise NotImplementedError() def handle_subports(self, context, resource_type, subports, event_type): # Subports are always created with the same trunk_id and there is # always at least one item in subports list trunk_id = subports[0].trunk_id if self.ovsdb_handler.manages_this_trunk(trunk_id): if event_type not in (events.CREATED, events.DELETED): LOG.error("Unknown or unimplemented event %s", event_type) return ctx = self.ovsdb_handler.context try: LOG.debug("Event %s for subports: %s", event_type, subports) if event_type == events.CREATED: status = self.ovsdb_handler.wire_subports_for_trunk( ctx, trunk_id, subports) elif event_type == events.DELETED: subport_ids = [subport.port_id for subport in subports] status = self.ovsdb_handler.unwire_subports_for_trunk( trunk_id, subport_ids) self.ovsdb_handler.report_trunk_status(ctx, trunk_id, status) except oslo_messaging.MessagingException as e: LOG.error( "Error on event %(event)s for subports " "%(subports)s: %(err)s", {'event': event_type, 'subports': subports, 'err': e}) @local_registry.receives(resources.TRUNK, [local_events.BEFORE_CREATE]) def check_trunk_dependencies(self, resource, event, trigger, **kwargs): # The OVS trunk driver does not work with iptables firewall and QoS. # We should validate the environment configuration and signal that # something might be wrong. # NOTE(armax): this check could be made quite sophisticated in that # we could check for incompatibilities and abort the creation request # only if the trunk is indeed associated with ports that have security # groups and QoS rules, though this would be a lot more work. if "iptables_hybrid" in cfg.CONF.SECURITYGROUP.firewall_driver: LOG.warning( "Firewall driver iptables_hybrid is not compatible with " "trunk ports. Trunk %(trunk_id)s may be insecure.", {'trunk_id': kwargs['trunk'].id}) def init_handler(resource, event, trigger, payload=None): """Handler for agent init event.""" # Set up agent-side RPC for receiving trunk events; we may want to # make this setup conditional based on server-side capabilities. global TRUNK_SKELETON manager = trunk_manager.TrunkManager(trigger.int_br) handler = ovsdb_handler.OVSDBHandler(manager) TRUNK_SKELETON = OVSTrunkSkeleton(handler) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/exceptions.py0000644000175000017500000000155500000000000031511 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from neutron._i18n import _ class TrunkBridgeNotFound(n_exc.NotFound): message = _("Trunk bridge %(bridge)s could not be found.") class ParentPortNotFound(n_exc.NotFound): message = _("Parent port for trunk bridge %(bridge)s could not be found.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py0000644000175000017500000005410500000000000032141 0ustar00coreycorey00000000000000# Copyright (c) 2016 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import time import eventlet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.services.trunk import constants from oslo_concurrency import lockutils from oslo_context import context as o_context from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.api.rpc.handlers import resources_rpc from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as ovs_agent_constants from neutron.services.trunk.drivers.openvswitch.agent import exceptions from neutron.services.trunk.drivers.openvswitch.agent \ import trunk_manager as tman from neutron.services.trunk.drivers.openvswitch import constants as t_const from neutron.services.trunk.drivers.openvswitch import utils from neutron.services.trunk.rpc import agent LOG = logging.getLogger(__name__) DEFAULT_WAIT_FOR_PORT_TIMEOUT = 60 WAIT_BEFORE_TRUNK_DELETE = 3 def lock_on_bridge_name(required_parameter): def func_decor(f): try: br_arg_index = f.__code__.co_varnames.index(required_parameter) except ValueError: raise RuntimeError(_("%s parameter is required for this decorator") % required_parameter) @functools.wraps(f) def inner(*args, **kwargs): try: bridge_name = kwargs[required_parameter] except KeyError: bridge_name = args[br_arg_index] with lockutils.lock(bridge_name): return f(*args, **kwargs) return inner return func_decor def is_trunk_bridge(port_name): return port_name.startswith(t_const.TRUNK_BR_PREFIX) def is_subport(port_name): return port_name.startswith(tman.SubPort.DEV_PREFIX) def is_trunk_service_port(port_name): """True if the port is any of the ports used to realize a trunk.""" return is_trunk_bridge(port_name) or port_name[:2] in ( tman.TrunkParentPort.DEV_PREFIX, tman.SubPort.DEV_PREFIX) def bridge_has_port(bridge, is_port_predicate): """True if there is an OVS port for which is_port_predicate is True. """ try: ifaces = bridge.get_iface_name_list() except RuntimeError as e: LOG.error("Cannot obtain interface list for bridge %(bridge)s: " "%(err)s", {'bridge': bridge.br_name, 'err': e}) return False return any(iface for iface in ifaces if is_port_predicate(iface)) def bridge_has_instance_port(bridge): """True if there is an OVS port that doesn't have bridge or patch ports prefix. """ is_instance_port = lambda p: not is_trunk_service_port(p) return bridge_has_port(bridge, is_instance_port) def bridge_has_service_port(bridge): """True if there is an OVS port that is used to implement a trunk. """ return bridge_has_port(bridge, is_trunk_service_port) @registry.has_registry_receivers class OVSDBHandler(object): """It listens to OVSDB events to create the physical resources associated to a logical trunk in response to OVSDB events (such as VM boot and/or delete). """ def __init__(self, trunk_manager): self.timeout = DEFAULT_WAIT_FOR_PORT_TIMEOUT self._context = n_context.get_admin_context_without_session() self.trunk_manager = trunk_manager self.trunk_rpc = agent.TrunkStub() @property def context(self): self._context.request_id = o_context.generate_request_id() return self._context @registry.receives(ovs_agent_constants.OVSDB_RESOURCE, [events.AFTER_READ]) def process_trunk_port_events( self, resource, event, trigger, ovsdb_events): """Process added and removed port events coming from OVSDB monitor.""" for port_event in ovsdb_events['added']: port_name = port_event['name'] if is_trunk_bridge(port_name): LOG.debug("Processing trunk bridge %s", port_name) # As there is active waiting for port to appear, it's handled # in a separate greenthread. # NOTE: port_name is equal to bridge_name at this point. eventlet.spawn_n(self.handle_trunk_add, port_name) for port_event in ovsdb_events['removed']: bridge_name = port_event['external_ids'].get('bridge_name') if bridge_name and is_trunk_bridge(bridge_name): eventlet.spawn_n( self.handle_trunk_remove, bridge_name, port_event) @lock_on_bridge_name(required_parameter='bridge_name') def handle_trunk_add(self, bridge_name): """Create trunk bridge based on parent port ID. This method is decorated with a lock that prevents processing deletion while creation hasn't been finished yet. It's based on the bridge name so we can keep processing other bridges in parallel. :param bridge_name: Name of the created trunk bridge. """ bridge = ovs_lib.OVSBridge(bridge_name) # Handle condition when there was bridge in both added and removed # events and handle_trunk_remove greenthread was executed before # handle_trunk_add if not bridge.bridge_exists(bridge_name): LOG.debug("The bridge %s was deleted before it was handled.", bridge_name) return # Determine the state of the trunk bridge by looking for the VM's port, # i.e. the trunk parent port and/or patch ports to be present. If the # VM is absent, then we clean the dangling bridge. If the VM is present # the value of 'rewire' tells us whether or not the bridge was dealt # with in a previous added event, and thus it has active patch ports. if not self._is_vm_connected(bridge): LOG.debug("No instance port associated to bridge %s could be " "found. Deleting bridge and its resources.", bridge_name) self.trunk_manager.dispose_trunk(bridge) return # Check if the trunk was provisioned in a previous run. This can happen # at agent startup when existing trunks are notified as added events. rewire = bridge_has_service_port(bridge) # Once we get hold of the trunk parent port, we can provision # the OVS dataplane for the trunk. try: self._wire_trunk(bridge, self._get_parent_port(bridge), rewire) except oslo_messaging.MessagingException as e: LOG.error("Got messaging error while processing trunk bridge " "%(bridge_name)s: %(err)s", {'bridge_name': bridge.br_name, 'err': e}) except exceptions.ParentPortNotFound as e: LOG.error("Failed to get parent port for bridge " "%(bridge_name)s: %(err)s", {'bridge_name': bridge.br_name, 'err': e}) @lock_on_bridge_name(required_parameter='bridge_name') def handle_trunk_remove(self, bridge_name, port): """Remove wiring between trunk bridge and integration bridge. The method calls into trunk manager to remove patch ports on integration bridge side and to delete the trunk bridge. It's decorated with a lock to prevent deletion of bridge while creation is still in process. :param bridge_name: Name of the bridge used for locking purposes. :param port: Parent port dict. """ # TODO(njohnston): In the case of DPDK with trunk ports, if nova # deletes an interface and then re-adds it we can get a race # condition where the port is re-added and then the bridge is # deleted because we did not properly catch the re-addition. To # solve this would require transitioning to ordered event # resolution, like the L3 agent does with the # ResourceProcessingQueue class. Until we can make that happen, we # try to mitigate the issue by checking if there is a port on the # bridge and if so then do not remove it. bridge = ovs_lib.OVSBridge(bridge_name) time.sleep(WAIT_BEFORE_TRUNK_DELETE) if bridge_has_instance_port(bridge): LOG.debug("The bridge %s has instances attached so it will not " "be deleted.", bridge_name) return try: # TODO(jlibosva): Investigate how to proceed during removal of # trunk bridge that doesn't have metadata stored. parent_port_id, trunk_id, subport_ids = self._get_trunk_metadata( port) # NOTE(status_police): we do not report changes in trunk status on # removal to avoid potential races between agents in case the event # is due to a live migration or reassociation of a trunk to a new # VM. self.unwire_subports_for_trunk(trunk_id, subport_ids) self.trunk_manager.remove_trunk(trunk_id, parent_port_id) except tman.TrunkManagerError as te: LOG.error("Removing trunk %(trunk_id)s failed: %(err)s", {'trunk_id': port['external_ids']['trunk_id'], 'err': te}) else: LOG.debug("Deleted resources associated to trunk: %s", trunk_id) def manages_this_trunk(self, trunk_id): """True if this OVSDB handler manages trunk based on given ID.""" bridge_name = utils.gen_trunk_br_name(trunk_id) return ovs_lib.BaseOVS().bridge_exists(bridge_name) def get_connected_subports_for_trunk(self, trunk_id): """Return the list of subports present on the trunk bridge.""" bridge = ovs_lib.OVSBridge(utils.gen_trunk_br_name(trunk_id)) if not bridge.bridge_exists(bridge.br_name): return [] try: ports = bridge.get_ports_attributes( 'Interface', columns=['name', 'external_ids']) return [ self.trunk_manager.get_port_uuid_from_external_ids(port) for port in ports if is_subport(port['name']) ] except (RuntimeError, tman.TrunkManagerError) as e: LOG.error("Failed to get subports for bridge %(bridge)s: " "%(err)s", {'bridge': bridge.br_name, 'err': e}) return [] def wire_subports_for_trunk(self, context, trunk_id, subports, trunk_bridge=None, parent_port=None): """Create OVS ports associated to the logical subports.""" # Tell the server that subports must be bound to this host. subport_bindings = self.trunk_rpc.update_subport_bindings( context, subports) # Bindings were successful: create the OVS subports. subport_bindings = subport_bindings.get(trunk_id, []) subports_mac = {p['id']: p['mac_address'] for p in subport_bindings} subport_ids = [] for subport in subports: try: self.trunk_manager.add_sub_port(trunk_id, subport.port_id, subports_mac[subport.port_id], subport.segmentation_id) except tman.TrunkManagerError as te: LOG.error("Failed to add subport with port ID " "%(subport_port_id)s to trunk with ID " "%(trunk_id)s: %(err)s", {'subport_port_id': subport.port_id, 'trunk_id': trunk_id, 'err': te}) else: subport_ids.append(subport.port_id) try: self._update_trunk_metadata( trunk_bridge, parent_port, trunk_id, subport_ids) except (RuntimeError, exceptions.ParentPortNotFound) as e: LOG.error("Failed to store metadata for trunk %(trunk_id)s: " "%(reason)s", {'trunk_id': trunk_id, 'reason': e}) # NOTE(status_police): Trunk bridge has stale metadata now, it # might cause troubles during deletion. Signal a DEGRADED status; # if the user undo/redo the operation things may go back to # normal. return constants.TRUNK_DEGRADED_STATUS LOG.debug("Added trunk: %s", trunk_id) return self._get_current_status(subports, subport_ids) def unwire_subports_for_trunk(self, trunk_id, subport_ids): """Destroy OVS ports associated to the logical subports.""" ids = [] for subport_id in subport_ids: try: self.trunk_manager.remove_sub_port(trunk_id, subport_id) ids.append(subport_id) except tman.TrunkManagerError as te: LOG.error("Removing subport %(subport_id)s from trunk " "%(trunk_id)s failed: %(err)s", {'subport_id': subport_id, 'trunk_id': trunk_id, 'err': te}) try: # OVS bridge and port to be determined by _update_trunk_metadata bridge = None port = None self._update_trunk_metadata( bridge, port, trunk_id, subport_ids, wire=False) except RuntimeError as e: # NOTE(status_police): Trunk bridge has stale metadata now, it # might cause troubles during deletion. Signal a DEGRADED status; # if the user undo/redo the operation things may go back to # normal. LOG.error("Failed to store metadata for trunk %(trunk_id)s: " "%(reason)s", {'trunk_id': trunk_id, 'reason': e}) return constants.TRUNK_DEGRADED_STATUS except exceptions.ParentPortNotFound as e: # If a user deletes/migrates a VM and remove subports from a trunk # in short sequence, there is a chance that we hit this spot in # that the trunk may still be momentarily bound to the agent. We # should not mark the status as DEGRADED in this case. LOG.debug(e) return self._get_current_status(subport_ids, ids) def report_trunk_status(self, context, trunk_id, status): """Report trunk status to the server.""" self.trunk_rpc.update_trunk_status(context, trunk_id, status) def _get_parent_port(self, trunk_bridge): """Return the OVS trunk parent port plugged on trunk_bridge.""" trunk_br_ports = trunk_bridge.get_ports_attributes( 'Interface', columns=['name', 'external_ids'], if_exists=True) for trunk_br_port in trunk_br_ports: if not is_trunk_service_port(trunk_br_port['name']): return trunk_br_port raise exceptions.ParentPortNotFound(bridge=trunk_bridge.br_name) def _wire_trunk(self, trunk_br, port, rewire=False): """Wire trunk bridge with integration bridge. The method calls into trunk manager to create patch ports for trunk and patch ports for all subports associated with this trunk. If rewire is True, a diff is performed between desired state (the one got from the server) and actual state (the patch ports present on the trunk bridge) and subports are wired/unwired accordingly. :param trunk_br: OVSBridge object representing the trunk bridge. :param port: Parent port dict. :param rewire: True if local trunk state must be reconciled with server's state. """ ctx = self.context try: parent_port_id = ( self.trunk_manager.get_port_uuid_from_external_ids(port)) trunk = self.trunk_rpc.get_trunk_details(ctx, parent_port_id) except tman.TrunkManagerError: LOG.error("Can't obtain parent port ID from port %s", port['name']) return except resources_rpc.ResourceNotFound: LOG.error("Port %s has no trunk associated.", parent_port_id) return try: registry.notify( resources.TRUNK, events.BEFORE_CREATE, self, context=ctx, trunk=trunk) self.trunk_manager.create_trunk( trunk.id, trunk.port_id, port['external_ids'].get('attached-mac')) except tman.TrunkManagerError as te: LOG.error("Failed to create trunk %(trunk_id)s: %(err)s", {'trunk_id': trunk.id, 'err': te}) # NOTE(status_police): Trunk couldn't be created so it ends in # ERROR status and resync can fix that later. self.report_trunk_status( ctx, trunk.id, constants.TRUNK_ERROR_STATUS) return # We need to remove stale subports unwire_status = constants.TRUNK_ACTIVE_STATUS if rewire: old_subport_ids = self.get_connected_subports_for_trunk(trunk.id) subports = {p['port_id'] for p in trunk.sub_ports} subports_to_delete = set(old_subport_ids) - subports if subports_to_delete: unwire_status = self.unwire_subports_for_trunk( trunk.id, subports_to_delete) # NOTE(status_police): inform the server whether the operation # was a partial or complete success. Do not inline status. # NOTE: in case of rewiring we readd ports that are already present on # the bridge because e.g. the segmentation ID might have changed (e.g. # agent crashed, port was removed and readded with a different seg ID) wire_status = self.wire_subports_for_trunk( ctx, trunk.id, trunk.sub_ports, trunk_bridge=trunk_br, parent_port=port) if (unwire_status == wire_status and wire_status == constants.TRUNK_ACTIVE_STATUS): status = constants.TRUNK_ACTIVE_STATUS else: status = constants.TRUNK_DEGRADED_STATUS self.report_trunk_status(ctx, trunk.id, status) def _set_trunk_metadata(self, trunk_bridge, port, trunk_id, subport_ids): """Set trunk metadata in OVS port for trunk parent port.""" # update the parent port external_ids to store the trunk bridge # name, trunk id and subport ids so we can easily remove the trunk # bridge and service ports once this port is removed trunk_bridge = trunk_bridge or ovs_lib.OVSBridge( utils.gen_trunk_br_name(trunk_id)) port = port or self._get_parent_port(trunk_bridge) port['external_ids']['bridge_name'] = trunk_bridge.br_name port['external_ids']['trunk_id'] = trunk_id port['external_ids']['subport_ids'] = jsonutils.dumps(subport_ids) trunk_bridge.set_db_attribute( 'Interface', port['name'], 'external_ids', port['external_ids']) def _get_trunk_metadata(self, port): """Get trunk metadata from OVS port.""" parent_port_id = ( self.trunk_manager.get_port_uuid_from_external_ids(port)) trunk_id = port['external_ids'].get('trunk_id') subport_ids = jsonutils.loads( port['external_ids'].get('subport_ids', '[]')) return parent_port_id, trunk_id, subport_ids def _update_trunk_metadata(self, trunk_bridge, port, trunk_id, subport_ids, wire=True): """Update trunk metadata. :param trunk_bridge: OVS trunk bridge. :param port: OVS parent port. :param trunk_id: trunk ID. :param subport_ids: subports affecting the metadata. :param wire: if True subport_ids are added, otherwise removed. """ trunk_bridge = trunk_bridge or ovs_lib.OVSBridge( utils.gen_trunk_br_name(trunk_id)) port = port or self._get_parent_port(trunk_bridge) _port_id, _trunk_id, old_subports = self._get_trunk_metadata(port) if wire: new_subports = set(old_subports) | set(subport_ids) else: new_subports = set(old_subports) - set(subport_ids) self._set_trunk_metadata(trunk_bridge, port, trunk_id, new_subports) def _get_current_status(self, expected_subports, actual_subports): """Return the current status of the trunk. If the number of expected subports to be processed does not match the number of subports successfully processed, the status returned is DEGRADED, ACTIVE otherwise. """ # NOTE(status_police): a call to this method should be followed by # a trunk_update_status to report the latest trunk status, but there # can be exceptions (e.g. unwire_subports_for_trunk). if len(expected_subports) != len(actual_subports): return constants.TRUNK_DEGRADED_STATUS else: return constants.TRUNK_ACTIVE_STATUS def _is_vm_connected(self, bridge): """True if an instance is connected to bridge, False otherwise.""" bridge_has_port_predicate = functools.partial( bridge_has_instance_port, bridge) try: common_utils.wait_until_true( bridge_has_port_predicate, timeout=self.timeout) return True except common_utils.WaitTimeout: LOG.error( 'No port present on trunk bridge %(br_name)s ' 'in %(timeout)d seconds.', {'br_name': bridge.br_name, 'timeout': self.timeout}) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py0000644000175000017500000002700600000000000032164 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib import exceptions from oslo_log import log as logging from neutron._i18n import _ from neutron.agent.common import ovs_lib from neutron.services.trunk.drivers.openvswitch.agent import exceptions as exc from neutron.services.trunk.drivers.openvswitch import utils LOG = logging.getLogger(__name__) class TrunkManagerError(exceptions.NeutronException): message = _("Error while communicating with OVSDB: %(error)s") def get_br_int_port_name(prefix, port_id): """Return the OVS port name for the given port ID. The port name is the one that plumbs into the integration bridge. """ return ("%si-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] def get_br_trunk_port_name(prefix, port_id): """Return the OVS port name for the given port ID. The port name is the one that plumbs into the trunk bridge. """ return ("%st-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] def get_patch_peer_attrs(peer_name, port_mac=None, port_id=None): external_ids = {} if port_mac: external_ids['attached-mac'] = port_mac if port_id: external_ids['iface-id'] = port_id attrs = [('type', 'patch'), ('options', {'peer': peer_name})] if external_ids: attrs.append( ('external_ids', external_ids)) return attrs class TrunkBridge(ovs_lib.OVSBridge): """An OVS trunk bridge. A trunk bridge has a name that follows a specific naming convention. """ def __init__(self, trunk_id): name = utils.gen_trunk_br_name(trunk_id) super(TrunkBridge, self).__init__(name) def exists(self): return self.bridge_exists(self.br_name) class TrunkParentPort(object): """An OVS trunk parent port. A trunk parent port is represented in OVS with two patch ports that connect a trunk bridge and the integration bridge respectively. These patch ports follow strict naming conventions: tpi- for the patch port that goes into the integration bridge, and tpt- for the patch port that goes into the trunk bridge. """ DEV_PREFIX = 'tp' def __init__(self, trunk_id, port_id, port_mac=None): self.trunk_id = trunk_id self.port_id = port_id self.port_mac = port_mac self.bridge = TrunkBridge(self.trunk_id) self.patch_port_int_name = get_br_int_port_name( self.DEV_PREFIX, port_id) self.patch_port_trunk_name = get_br_trunk_port_name( self.DEV_PREFIX, port_id) self._transaction = None def plug(self, br_int): """Plug patch ports between trunk bridge and given bridge. The method plugs one patch port on the given bridge side using port MAC and ID as external IDs. The other endpoint of patch port is attached to the trunk bridge. Everything is done in a single OVSDB transaction so either all operations succeed or fail. :param br_int: an integration bridge where peer endpoint of patch port will be created. """ # NOTE(jlibosva): OVSDB is an api so it doesn't matter whether we # use self.bridge or br_int ovsdb = self.bridge.ovsdb # Once the bridges are connected with the following patch ports, # the ovs agent will recognize the ports for processing and it will # take over the wiring process and everything that entails. # REVISIT(rossella_s): revisit this integration part, should tighter # control over the wiring logic for trunk ports be required. patch_int_attrs = get_patch_peer_attrs( self.patch_port_trunk_name, self.port_mac, self.port_id) patch_trunk_attrs = get_patch_peer_attrs(self.patch_port_int_name, self.port_mac, self.port_id) with ovsdb.transaction() as txn: txn.add(ovsdb.add_port(br_int.br_name, self.patch_port_int_name)) txn.add(ovsdb.db_set('Interface', self.patch_port_int_name, *patch_int_attrs)) txn.add(ovsdb.add_port(self.bridge.br_name, self.patch_port_trunk_name)) txn.add(ovsdb.db_set('Interface', self.patch_port_trunk_name, *patch_trunk_attrs)) def unplug(self, bridge): """Unplug the trunk from bridge. Method unplugs in single OVSDB transaction the trunk bridge and patch port on provided bridge. :param bridge: bridge that has peer side of patch port for this subport. """ ovsdb = self.bridge.ovsdb with ovsdb.transaction() as txn: txn.add(ovsdb.del_br(self.bridge.br_name)) txn.add(ovsdb.del_port(self.patch_port_int_name, bridge.br_name)) class SubPort(TrunkParentPort): """An OVS trunk subport. A subport is represented in OVS with two patch ports that connect a trunk bridge and the integration bridge respectively. These patch ports follow strict naming conventions: spi- for the patch port that goes into the integration bridge, and spt- for the patch port that goes into the trunk bridge. """ DEV_PREFIX = 'sp' def __init__(self, trunk_id, port_id, port_mac=None, segmentation_id=None): super(SubPort, self).__init__(trunk_id, port_id, port_mac) self.segmentation_id = segmentation_id def plug(self, br_int): """Unplug patch ports between trunk bridge and given bridge. The method unplugs one patch port on the given bridge side using port MAC and ID as external IDs. The other endpoint of patch port is attached to the trunk bridge. Then it sets vlan tag represented by segmentation_id. Everything is done in a single OVSDB transaction so either all operations succeed or fail. :param br_int: an integration bridge where peer endpoint of patch port will be created. """ ovsdb = self.bridge.ovsdb with ovsdb.transaction() as txn: super(SubPort, self).plug(br_int) txn.add(ovsdb.db_set( "Port", self.patch_port_trunk_name, ("tag", self.segmentation_id))) def unplug(self, bridge): """Unplug the sub port from the bridge. Method unplugs in single OVSDB transaction both endpoints of patch ports that represents the subport. :param bridge: bridge that has peer side of patch port for this subport. """ ovsdb = self.bridge.ovsdb with ovsdb.transaction() as txn: txn.add(ovsdb.del_port(self.patch_port_trunk_name, self.bridge.br_name)) txn.add(ovsdb.del_port(self.patch_port_int_name, bridge.br_name)) class TrunkManager(object): """It implements the OVS trunk dataplane. It interfaces with the OVSDB server to execute OVS commands. """ def __init__(self, br_int): self.br_int = br_int def create_trunk(self, trunk_id, port_id, port_mac): """Create the trunk. This patches the bridge for trunk_id with the integration bridge by means of parent port identified by port_id. :param trunk_id: ID of the trunk. :param port_id: ID of the parent port. :param port_mac: the MAC address of the parent port. :raises: TrunkBridgeNotFound: in case trunk bridge does not exist. """ trunk = TrunkParentPort(trunk_id, port_id, port_mac) try: if not trunk.bridge.exists(): raise exc.TrunkBridgeNotFound(bridge=trunk.bridge.br_name) trunk.plug(self.br_int) except RuntimeError as e: raise TrunkManagerError(error=e) def remove_trunk(self, trunk_id, port_id): """Remove the trunk bridge.""" trunk = TrunkParentPort(trunk_id, port_id) try: if trunk.bridge.exists(): trunk.unplug(self.br_int) else: LOG.debug("Trunk bridge with ID %s does not exist.", trunk_id) except RuntimeError as e: raise TrunkManagerError(error=e) def dispose_trunk(self, trunk_bridge): """Clean up all the OVS resources associated to trunk_bridge.""" ovsdb = trunk_bridge.ovsdb patch_peers = [] try: patch_peers = trunk_bridge.get_ports_attributes( 'Interface', columns=['options']) with trunk_bridge.ovsdb.transaction() as txn: for patch_peer in patch_peers: peer_name = patch_peer['options'].get('peer') if peer_name: txn.add(ovsdb.del_port(peer_name, self.br_int.br_name)) txn.add(ovsdb.del_br(trunk_bridge.br_name)) LOG.debug("Deleted bridge '%s' and patch peers '%s'.", trunk_bridge.br_name, patch_peers) except RuntimeError as e: LOG.error("Could not delete '%(peers)s' associated to " "trunk bridge %(name)s. Reason: %(reason)s.", {'peers': patch_peers, 'name': trunk_bridge.br_name, 'reason': e}) def add_sub_port(self, trunk_id, port_id, port_mac, segmentation_id): """Create a sub_port. :param trunk_id: ID of the trunk. :param port_id: ID of the subport. :param segmentation_id: segmentation ID associated with this subport. :param port_mac: MAC address of the subport. """ sub_port = SubPort(trunk_id, port_id, port_mac, segmentation_id) # If creating of parent trunk bridge takes longer than API call for # creating subport then bridge doesn't exist yet. try: if not sub_port.bridge.exists(): raise exc.TrunkBridgeNotFound(bridge=sub_port.bridge.br_name) sub_port.plug(self.br_int) except RuntimeError as e: raise TrunkManagerError(error=e) def remove_sub_port(self, trunk_id, port_id): """Remove a sub_port. :param trunk_id: ID of the trunk. :param port_id: ID of the subport. """ sub_port = SubPort(trunk_id, port_id) # Trunk bridge might have been deleted by calling delete_trunk() before # remove_sub_port(). try: if sub_port.bridge.exists(): sub_port.unplug(self.br_int) else: LOG.debug("Trunk bridge with ID %s does not exist.", trunk_id) except RuntimeError as e: raise TrunkManagerError(error=e) def get_port_uuid_from_external_ids(self, port): """Return the port UUID from the port metadata.""" try: return self.br_int.portid_from_external_ids( port['external_ids']) except RuntimeError as e: raise TrunkManagerError(error=e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/constants.py0000644000175000017500000000116700000000000030245 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TRUNK_BR_PREFIX = 'tbr-' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/driver.py0000644000175000017500000000470000000000000027520 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib.services.trunk import constants as trunk_consts from oslo_config import cfg from oslo_log import log as logging from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as agent_consts) from neutron.services.trunk.drivers import base from neutron.services.trunk.drivers.openvswitch import utils LOG = logging.getLogger(__name__) NAME = 'openvswitch' SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.SEGMENTATION_TYPE_VLAN, ) DRIVER = None class OVSDriver(base.DriverBase): @property def is_loaded(self): try: return NAME in cfg.CONF.ml2.mechanism_drivers except cfg.NoSuchOptError: return False @classmethod def create(cls): return OVSDriver(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, constants.AGENT_TYPE_OVS) def register(): """Register the driver.""" global DRIVER DRIVER = OVSDriver.create() # To set the bridge_name in a parent port's vif_details. registry.subscribe(vif_details_bridge_name_handler, agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ) LOG.debug('Open vSwitch trunk driver registered') def vif_details_bridge_name_handler(resource, event, set_br_name, payload=None): """If port is a trunk port, generate a bridge_name for its vif_details.""" port = payload.metadata['port'] if 'trunk_details' in port: set_br_name(utils.gen_trunk_br_name(port['trunk_details']['trunk_id'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/openvswitch/utils.py0000644000175000017500000000152500000000000027367 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron.services.trunk.drivers.openvswitch import constants as ovs_const def gen_trunk_br_name(trunk_id): return ((ovs_const.TRUNK_BR_PREFIX + trunk_id) [:constants.DEVICE_NAME_MAX_LEN - 1]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/ovn/0000755000175000017500000000000000000000000024103 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/ovn/__init__.py0000644000175000017500000000000000000000000026202 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/drivers/ovn/trunk_driver.py0000644000175000017500000001740100000000000027176 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.services.trunk import constants as trunk_consts from oslo_config import cfg from oslo_log import log from neutron.common.ovn.constants import OVN_ML2_MECH_DRIVER_NAME from neutron.objects import ports as port_obj from neutron.services.trunk.drivers import base as trunk_base SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.SEGMENTATION_TYPE_VLAN, ) LOG = log.getLogger(__name__) class OVNTrunkHandler(object): def __init__(self, plugin_driver): self.plugin_driver = plugin_driver def _set_sub_ports(self, parent_port, subports): txn = self.plugin_driver._nb_ovn.transaction context = n_context.get_admin_context() for port in subports: with context.session.begin(subtransactions=True), ( txn(check_error=True)) as ovn_txn: self._set_binding_profile(context, port, parent_port, ovn_txn) def _unset_sub_ports(self, subports): txn = self.plugin_driver._nb_ovn.transaction context = n_context.get_admin_context() for port in subports: with context.session.begin(subtransactions=True), ( txn(check_error=True)) as ovn_txn: self._unset_binding_profile(context, port, ovn_txn) def _set_binding_profile(self, context, subport, parent_port, ovn_txn): LOG.debug("Setting parent %s for subport %s", parent_port, subport.port_id) db_port = port_obj.Port.get_object(context, id=subport.port_id) if not db_port: LOG.debug("Port not found while trying to set " "binding_profile: %s", subport.port_id) return try: # NOTE(flaviof): We expect binding's host to be set. Otherwise, # sub-port will not transition from DOWN to ACTIVE. db_port.device_owner = trunk_consts.TRUNK_SUBPORT_OWNER for binding in db_port.bindings: binding.profile['parent_name'] = parent_port binding.profile['tag'] = subport.segmentation_id # host + port_id is primary key port_obj.PortBinding.update_object( context, {'profile': binding.profile, 'vif_type': portbindings.VIF_TYPE_OVS}, port_id=subport.port_id, host=binding.host) db_port.update() except n_exc.ObjectNotFound: LOG.debug("Port not found while trying to set " "binding_profile: %s", subport.port_id) return ovn_txn.add(self.plugin_driver._nb_ovn.set_lswitch_port( lport_name=subport.port_id, parent_name=parent_port, tag=subport.segmentation_id)) LOG.debug("Done setting parent %s for subport %s", parent_port, subport.port_id) def _unset_binding_profile(self, context, subport, ovn_txn): LOG.debug("Unsetting parent for subport %s", subport.port_id) db_port = port_obj.Port.get_object(context, id=subport.port_id) if not db_port: LOG.debug("Port not found while trying to unset " "binding_profile: %s", subport.port_id) return try: db_port.device_owner = '' for binding in db_port.bindings: binding.profile.pop('tag', None) binding.profile.pop('parent_name', None) # host + port_id is primary key port_obj.PortBinding.update_object( context, {'profile': binding.profile, 'vif_type': portbindings.VIF_TYPE_UNBOUND}, port_id=subport.port_id, host=binding.host) port_obj.PortBindingLevel.delete_objects( context, port_id=subport.port_id, host=binding.host) db_port.update() except n_exc.ObjectNotFound: LOG.debug("Port not found while trying to unset " "binding_profile: %s", subport.port_id) return ovn_txn.add(self.plugin_driver._nb_ovn.set_lswitch_port( lport_name=subport.port_id, parent_name=[], up=False, tag=[])) LOG.debug("Done unsetting parent for subport %s", subport.port_id) def trunk_created(self, trunk): if trunk.sub_ports: self._set_sub_ports(trunk.port_id, trunk.sub_ports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) def trunk_deleted(self, trunk): if trunk.sub_ports: self._unset_sub_ports(trunk.sub_ports) def subports_added(self, trunk, subports): if subports: self._set_sub_ports(trunk.port_id, subports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) def subports_deleted(self, trunk, subports): if subports: self._unset_sub_ports(subports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.current_trunk) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.original_trunk) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.subports_added(payload.original_trunk, payload.subports) elif event == events.AFTER_DELETE: self.subports_deleted(payload.original_trunk, payload.subports) class OVNTrunkDriver(trunk_base.DriverBase): @property def is_loaded(self): try: return OVN_ML2_MECH_DRIVER_NAME in cfg.CONF.ml2.mechanism_drivers except cfg.NoSuchOptError: return False @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): super(OVNTrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = OVNTrunkHandler(self.plugin_driver) for trunk_event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, trunk_event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, trunk_event) @classmethod def create(cls, plugin_driver): cls.plugin_driver = plugin_driver return cls(OVN_ML2_MECH_DRIVER_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, None, can_trunk_bound_port=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/exceptions.py0000644000175000017500000000637700000000000024373 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron._i18n import _ from neutron_lib import exceptions as n_exc class TrunkPortInUse(n_exc.InUse): message = _("Port %(port_id)s is in use by another trunk.") class TrunkNotFound(n_exc.NotFound): message = _("Trunk %(trunk_id)s could not be found.") class SubPortNotFound(n_exc.NotFound): message = _("SubPort on trunk %(trunk_id)s with parent port %(port_id)s " "could not be found.") class DuplicateSubPort(n_exc.InUse): message = _("segmentation_type %(segmentation_type)s and segmentation_id " "%(segmentation_id)s already in use on trunk %(trunk_id)s.") class ParentPortInUse(n_exc.InUse): message = _("Port %(port_id)s is currently in use and is not " "eligible for use as a parent port.") class SubPortMtuGreaterThanTrunkPortMtu(n_exc.Conflict): message = _("MTU %(port_mtu)s of subport %(port_id)s cannot be greater " "than MTU %(trunk_mtu)s of trunk %(trunk_id)s.") class PortInUseAsTrunkParent(n_exc.InUse): message = _("Port %(port_id)s is currently a parent port " "for trunk %(trunk_id)s.") class PortInUseAsSubPort(n_exc.InUse): message = _("Port %(port_id)s is currently a subport for " "trunk %(trunk_id)s.") class TrunkInUse(n_exc.InUse): message = _("Trunk %(trunk_id)s is currently in use.") class TrunkDisabled(n_exc.Conflict): message = _("Trunk %(trunk_id)s is currently disabled.") class TrunkInErrorState(n_exc.Conflict): message = _("Trunk %(trunk_id)s is in error state. Attempt " "to resolve the error condition before proceeding.") class IncompatibleTrunkPluginConfiguration(n_exc.NeutronException): message = _("Cannot load trunk plugin: no compatible core plugin " "configuration is found.") class IncompatibleDriverSegmentationTypes(n_exc.NeutronException): message = _("Cannot load trunk plugin: no compatible segmentation " "type configuration can be found amongst list of loaded " "drivers.") class SegmentationTypeValidatorNotFound(n_exc.NotFound): message = _("Validator not found for segmentation type %(seg_type)s. " "It must be registered before the plugin init can " "proceed.") class TrunkPluginDriverConflict(n_exc.Conflict): message = _("A misconfiguration in the environment prevents the " "operation from completing, please, contact the admin.") class SubPortBindingError(n_exc.NeutronException): message = _("Failed to set port binding for port %(port_id)s on trunk " "%(trunk_id)s.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/models.py0000644000175000017500000000630000000000000023457 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base from neutron_lib.services.trunk import constants import sqlalchemy as sa from sqlalchemy import sql from neutron.db import models_v2 from neutron.db import standard_attr class Trunk(standard_attr.HasStandardAttributes, model_base.BASEV2, model_base.HasId, model_base.HasProject): admin_state_up = sa.Column( sa.Boolean(), nullable=False, server_default=sql.true()) name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE)) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True) status = sa.Column( sa.String(16), nullable=False, server_default=constants.TRUNK_ACTIVE_STATUS) port = sa.orm.relationship( models_v2.Port, backref=sa.orm.backref('trunk_port', lazy='joined', uselist=False, cascade='delete')) sub_ports = sa.orm.relationship( 'SubPort', lazy='subquery', uselist=True, cascade="all, delete-orphan") api_collections = ['trunks'] collection_resource_map = {'trunks': 'trunk'} tag_support = True class SubPort(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), primary_key=True) port = sa.orm.relationship( models_v2.Port, backref=sa.orm.backref('sub_port', lazy='joined', uselist=False, cascade='delete')) trunk_id = sa.Column(sa.String(36), sa.ForeignKey('trunks.id', ondelete='CASCADE'), nullable=False) segmentation_type = sa.Column(sa.String(32), nullable=False) segmentation_id = sa.Column(sa.Integer, nullable=False) __table_args__ = ( sa.UniqueConstraint( 'trunk_id', 'segmentation_type', 'segmentation_id', name='uniq_subport0trunk_id0segmentation_type0segmentation_id'), model_base.BASEV2.__table_args__ ) # NOTE(armax) constraints like the following are implemented via # business logic rules: # # Deletion of a trunk automatically deletes all of its subports; # Deletion of a (child) port referred by a subport is forbidden; # Deletion of a (parent) port referred by a trunk is forbidden; # A port cannot be a subport and a trunk port at the same time (nested). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/plugin.py0000644000175000017500000004715200000000000023504 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import trunk as trunk_apidef from neutron_lib.api.definitions import trunk_details from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from neutron_lib.services.trunk import constants from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron.db import db_base_plugin_common from neutron.objects import base as objects_base from neutron.objects import trunk as trunk_objects from neutron.services.trunk import callbacks from neutron.services.trunk import drivers from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import rules from neutron.services.trunk.seg_types import validators LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders @registry.has_registry_receivers class TrunkPlugin(service_base.ServicePluginBase): supported_extension_aliases = [trunk_apidef.ALIAS, trunk_details.ALIAS] __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True def __init__(self): self._rpc_backend = None self._drivers = [] self._segmentation_types = {} self._interfaces = set() self._agent_types = set() drivers.register() registry.subscribe(rules.enforce_port_deletion_rules, resources.PORT, events.BEFORE_DELETE) registry.publish(resources.TRUNK_PLUGIN, events.AFTER_INIT, self) for driver in self._drivers: LOG.debug('Trunk plugin loaded with driver %s', driver.name) self.check_compatibility() @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_trunk_details(port_res, port_db): """Add trunk details to a port.""" if port_db.trunk_port: subports = { x.port_id: {'segmentation_id': x.segmentation_id, 'segmentation_type': x.segmentation_type, 'port_id': x.port_id} for x in port_db.trunk_port.sub_ports } core_plugin = directory.get_plugin() ports = core_plugin.get_ports( context.get_admin_context(), filters={'id': subports}) for port in ports: subports[port['id']]['mac_address'] = port['mac_address'] trunk_details = {'trunk_id': port_db.trunk_port.id, 'sub_ports': [x for x in subports.values()]} port_res['trunk_details'] = trunk_details return port_res def check_compatibility(self): """Verify the plugin can load correctly and fail otherwise.""" self.check_driver_compatibility() self.check_segmentation_compatibility() def check_driver_compatibility(self): """Fail to load if no compatible driver is found.""" if not any([driver.is_loaded for driver in self._drivers]): raise trunk_exc.IncompatibleTrunkPluginConfiguration() def check_segmentation_compatibility(self): """Fail to load if segmentation type conflicts are found. In multi-driver deployments each loaded driver must support the same set of segmentation types consistently. """ # Get list of segmentation types for the loaded drivers. list_of_driver_seg_types = [ set(driver.segmentation_types) for driver in self._drivers if driver.is_loaded ] # If not empty, check that there is at least one we can use. compat_segmentation_types = set() if list_of_driver_seg_types: compat_segmentation_types = ( set.intersection(*list_of_driver_seg_types)) if not compat_segmentation_types: raise trunk_exc.IncompatibleDriverSegmentationTypes() # If there is at least one, make sure the validator is defined. try: for seg_type in compat_segmentation_types: self.add_segmentation_type( seg_type, validators.get_validator(seg_type)) except KeyError: raise trunk_exc.SegmentationTypeValidatorNotFound( seg_type=seg_type) def set_rpc_backend(self, backend): self._rpc_backend = backend def is_rpc_enabled(self): return self._rpc_backend is not None def register_driver(self, driver): """Register driver with trunk plugin.""" if driver.agent_type: self._agent_types.add(driver.agent_type) self._interfaces = self._interfaces | set(driver.interfaces) self._drivers.append(driver) @property def registered_drivers(self): """The registered drivers.""" return self._drivers @property def supported_interfaces(self): """A set of supported interfaces.""" return self._interfaces @property def supported_agent_types(self): """A set of supported agent types.""" return self._agent_types def add_segmentation_type(self, segmentation_type, id_validator): self._segmentation_types[segmentation_type] = id_validator LOG.debug('Added support for segmentation type %s', segmentation_type) def validate(self, context, trunk): """Return a valid trunk or raises an error if unable to do so.""" trunk_details = trunk trunk_validator = rules.TrunkPortValidator(trunk['port_id']) trunk_details['port_id'] = trunk_validator.validate(context) subports_validator = rules.SubPortsValidator( self._segmentation_types, trunk['sub_ports'], trunk['port_id']) trunk_details['sub_ports'] = subports_validator.validate(context) return trunk_details def get_plugin_description(self): return "Trunk port service plugin" @classmethod def get_plugin_type(cls): return "trunk" @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_trunk(self, context, trunk_id, fields=None): """Return information for the specified trunk.""" return self._get_trunk(context, trunk_id) @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict def get_trunks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Return information for available trunks.""" filters = filters or {} pager = objects_base.Pager(sorts=sorts, limit=limit, page_reverse=page_reverse, marker=marker) return trunk_objects.Trunk.get_objects(context, _pager=pager, **filters) @db_base_plugin_common.convert_result_to_dict def create_trunk(self, context, trunk): """Create a trunk.""" trunk = self.validate(context, trunk['trunk']) sub_ports = [trunk_objects.SubPort( context=context, port_id=p['port_id'], segmentation_id=p['segmentation_id'], segmentation_type=p['segmentation_type']) for p in trunk['sub_ports']] admin_state_up = trunk.get('admin_state_up', True) # NOTE(status_police): a trunk is created in DOWN status. Depending # on the nature of the create request, a driver may set the status # immediately to ACTIVE if no physical provisioning is required. # Otherwise a transition to BUILD (or ERROR) should be expected # depending on how the driver reacts. PRECOMMIT failures prevent the # trunk from being created altogether. trunk_description = trunk.get('description', "") trunk_obj = trunk_objects.Trunk(context=context, admin_state_up=admin_state_up, id=uuidutils.generate_uuid(), name=trunk.get('name', ""), description=trunk_description, project_id=trunk['tenant_id'], port_id=trunk['port_id'], status=constants.TRUNK_DOWN_STATUS, sub_ports=sub_ports) with db_api.CONTEXT_WRITER.using(context): trunk_obj.create() payload = callbacks.TrunkPayload(context, trunk_obj.id, current_trunk=trunk_obj) registry.notify( resources.TRUNK, events.PRECOMMIT_CREATE, self, payload=payload) registry.notify( resources.TRUNK, events.AFTER_CREATE, self, payload=payload) return trunk_obj @db_base_plugin_common.convert_result_to_dict def update_trunk(self, context, trunk_id, trunk): """Update information for the specified trunk.""" trunk_data = trunk['trunk'] with db_api.CONTEXT_WRITER.using(context): trunk_obj = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk_obj) # NOTE(status_police): a trunk status should not change during an # update_trunk(), even in face of PRECOMMIT failures. This is # because only name and admin_state_up are being affected, and # these are DB properties only. trunk_obj.update_fields(trunk_data, reset_changes=True) trunk_obj.update() payload = events.DBEventPayload( context, resource_id=trunk_id, states=(original_trunk,), desired_state=trunk_obj, request_body=trunk_data) registry.publish(resources.TRUNK, events.PRECOMMIT_UPDATE, self, payload=payload) registry.notify(resources.TRUNK, events.AFTER_UPDATE, self, payload=callbacks.TrunkPayload( context, trunk_id, original_trunk=original_trunk, current_trunk=trunk_obj)) return trunk_obj def delete_trunk(self, context, trunk_id): """Delete the specified trunk.""" with db_api.CONTEXT_WRITER.using(context): trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no # status transition involved. If PRECOMMIT failures occur, # the trunk remains in the status where it was. try: trunk.delete() except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning('Trunk driver raised exception when ' 'deleting trunk port %s: %s', trunk_id, str(e)) payload = callbacks.TrunkPayload(context, trunk_id, original_trunk=trunk) registry.notify(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: LOG.info('Trunk driver does not consider trunk %s ' 'untrunkable', trunk_id) raise trunk_exc.TrunkInUse(trunk_id=trunk_id) registry.notify(resources.TRUNK, events.AFTER_DELETE, self, payload=payload) @db_base_plugin_common.convert_result_to_dict def add_subports(self, context, trunk_id, subports): """Add one or more subports to trunk.""" with db_api.CONTEXT_WRITER.using(context): trunk = self._get_trunk(context, trunk_id) # Check for basic validation since the request body here is not # automatically validated by the API layer. subports = subports['sub_ports'] subports_validator = rules.SubPortsValidator( self._segmentation_types, subports, trunk['port_id']) subports = subports_validator.validate( context, basic_validation=True) added_subports = [] rules.trunk_can_be_managed(context, trunk) original_trunk = copy.deepcopy(trunk) # NOTE(status_police): the trunk status should transition to # DOWN (and finally in ACTIVE or ERROR), only if it is not in # ERROR status already. A user should attempt to resolve the ERROR # condition before adding more subports to the trunk. Should a # trunk be in DOWN or BUILD state (e.g. when dealing with # multiple concurrent requests), the status is still forced to # DOWN and thus can potentially overwrite an interleaving state # change to ACTIVE. Eventually the driver should bring the status # back to ACTIVE or ERROR. if trunk.status == constants.TRUNK_ERROR_STATUS: raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) else: trunk.update(status=constants.TRUNK_DOWN_STATUS) for subport in subports: obj = trunk_objects.SubPort( context=context, trunk_id=trunk_id, port_id=subport['port_id'], segmentation_type=subport['segmentation_type'], segmentation_id=subport['segmentation_id']) obj.create() trunk['sub_ports'].append(obj) added_subports.append(obj) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=added_subports) if added_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_CREATE, self, payload=payload) if added_subports: registry.notify( resources.SUBPORTS, events.AFTER_CREATE, self, payload=payload) return trunk @db_base_plugin_common.convert_result_to_dict def remove_subports(self, context, trunk_id, subports): """Remove one or more subports from trunk.""" subports = subports['sub_ports'] with db_api.CONTEXT_WRITER.using(context): trunk = self._get_trunk(context, trunk_id) original_trunk = copy.deepcopy(trunk) rules.trunk_can_be_managed(context, trunk) subports_validator = rules.SubPortsValidator( self._segmentation_types, subports) # the subports are being removed, therefore we do not need to # enforce any specific trunk rules, other than basic validation # of the request body. subports = subports_validator.validate( context, basic_validation=True, trunk_validation=False) current_subports = {p.port_id: p for p in trunk.sub_ports} removed_subports = [] for subport in subports: subport_obj = current_subports.pop(subport['port_id'], None) if not subport_obj: raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, port_id=subport['port_id']) subport_obj.delete() removed_subports.append(subport_obj) del trunk.sub_ports[:] trunk.sub_ports.extend(current_subports.values()) # NOTE(status_police): the trunk status should transition to # DOWN irrespective of the status in which it is in to allow # the user to resolve potential conflicts due to prior add_subports # operations. # Should a trunk be in DOWN or BUILD state (e.g. when dealing # with multiple concurrent requests), the status is still forced # to DOWN. See add_subports() for more details. trunk.update(status=constants.TRUNK_DOWN_STATUS) payload = callbacks.TrunkPayload(context, trunk_id, current_trunk=trunk, original_trunk=original_trunk, subports=removed_subports) if removed_subports: registry.notify(resources.SUBPORTS, events.PRECOMMIT_DELETE, self, payload=payload) if removed_subports: registry.notify( resources.SUBPORTS, events.AFTER_DELETE, self, payload=payload) return trunk @db_base_plugin_common.filter_fields def get_subports(self, context, trunk_id, fields=None): """Return subports for the specified trunk.""" trunk = self.get_trunk(context, trunk_id) return {'sub_ports': trunk['sub_ports']} def _get_trunk(self, context, trunk_id): """Return the trunk object or raise if not found.""" obj = trunk_objects.Trunk.get_object(context, id=trunk_id) if obj is None: raise trunk_exc.TrunkNotFound(trunk_id=trunk_id) return obj # NOTE(tidwellr) Consider keying off of PRECOMMIT_UPDATE if we find # AFTER_UPDATE to be problematic for setting trunk status when a # a parent port becomes unbound. @registry.receives(resources.PORT, [events.AFTER_UPDATE]) def _trigger_trunk_status_change(self, resource, event, trigger, **kwargs): updated_port = kwargs['port'] trunk_details = updated_port.get('trunk_details') # If no trunk_details, the port is not the parent of a trunk. if not trunk_details: return context = kwargs['context'] original_port = kwargs['original_port'] orig_vif_type = original_port.get(portbindings.VIF_TYPE) new_vif_type = updated_port.get(portbindings.VIF_TYPE) vif_type_changed = orig_vif_type != new_vif_type if vif_type_changed and new_vif_type == portbindings.VIF_TYPE_UNBOUND: trunk_id = trunk_details['trunk_id'] # NOTE(status_police) Trunk status goes to DOWN when the parent # port is unbound. This means there are no more physical resources # associated with the logical resource. self.update_trunk( context, trunk_id, {'trunk': {'status': constants.TRUNK_DOWN_STATUS}}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/0000755000175000017500000000000000000000000022407 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/__init__.py0000644000175000017500000000000000000000000024506 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/agent.py0000644000175000017500000001053100000000000024057 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib import rpc as n_rpc from oslo_log import helpers as log_helpers import oslo_messaging from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.services.trunk.rpc import constants as trunk_consts # This module contains stub (client-side) and skeleton (server-side) # proxy code that executes in the Neutron L2 Agent process space. This # is needed if trunk service plugin drivers have a remote component # (e.g. agent), that needs to communicate with the Neutron Server. # The Agent side exposes the following remote methods: # # - update methods to learn about a trunk and its subports: these # methods are used by the server to tell the agent about trunk # updates; agents may selectively choose to listen to either # trunk or subports updates or both. # # For server-side stub and skeleton proxy code, please look at server.py class TrunkSkeleton(object): """Skeleton proxy code for server->agent communication.""" def __init__(self): registry.register(self.handle_trunks, resources.TRUNK) registry.register(self.handle_subports, resources.SUBPORT) self._connection = n_rpc.Connection() endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic = resources_rpc.resource_type_versioned_topic(resources.SUBPORT) self._connection.create_consumer(topic, endpoints, fanout=True) topic = resources_rpc.resource_type_versioned_topic(resources.TRUNK) self._connection.create_consumer(topic, endpoints, fanout=True) self._connection.consume_in_threads() @abc.abstractmethod def handle_trunks(self, context, resource_type, trunks, event_type): """Handle trunk events.""" # if common logic may be extracted out, consider making a base # version of this method that can be overridden by the inherited # skeleton. # NOTE: If trunk is not managed by the agent, the notification can # either be ignored or cached for future use. @abc.abstractmethod def handle_subports(self, context, resource_type, subports, event_type): """Handle subports event.""" # if common logic may be extracted out, consider making a base # version of this method that can be overridden by the inherited # skeleton. # NOTE: If the subport belongs to a trunk which the agent does not # manage, the notification should be ignored. class TrunkStub(object): """Stub proxy code for agent->server communication.""" # API HISTORY # 1.0 - initial version VERSION = '1.0' def __init__(self): self.stub = resources_rpc.ResourcesPullRpcApi() target = oslo_messaging.Target( topic=trunk_consts.TRUNK_BASE_TOPIC, version=self.VERSION, namespace=trunk_consts.TRUNK_BASE_NAMESPACE) self.rpc_client = n_rpc.get_client(target) @log_helpers.log_method_call def get_trunk_details(self, context, parent_port_id): """Get information about the trunk for the given parent port.""" return self.stub.pull(context, resources.TRUNK, parent_port_id) @log_helpers.log_method_call def update_trunk_status(self, context, trunk_id, status): """Update the trunk status to reflect outcome of data plane wiring.""" return self.rpc_client.prepare().call( context, 'update_trunk_status', trunk_id=trunk_id, status=status) @log_helpers.log_method_call def update_subport_bindings(self, context, subports): """Update subport bindings to match parent port host binding.""" return self.rpc_client.prepare().call( context, 'update_subport_bindings', subports=subports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/backend.py0000644000175000017500000000504600000000000024355 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_log import log as logging from neutron.services.trunk.rpc import server LOG = logging.getLogger(__name__) @registry.has_registry_receivers class ServerSideRpcBackend(object): """The Neutron Server RPC backend.""" def __init__(self): """Initialize an RPC backend for the Neutron Server.""" self._skeleton = server.TrunkSkeleton() self._stub = server.TrunkStub() LOG.debug("RPC backend initialized for trunk plugin") # Set up listeners to trunk events: they dispatch RPC messages # to agents as needed. These are designed to work with any # agent-based driver that may integrate with the trunk service # plugin, e.g. linux bridge or ovs. @registry.receives(resources.TRUNK, [events.AFTER_CREATE, events.AFTER_DELETE]) @registry.receives(resources.SUBPORTS, [events.AFTER_CREATE, events.AFTER_DELETE]) def process_event(self, resource, event, trunk_plugin, payload): """Emit RPC notifications to registered subscribers.""" context = payload.context LOG.debug("RPC notification needed for trunk %s", payload.trunk_id) if resource == resources.SUBPORTS: payload = payload.subports method = { events.AFTER_CREATE: self._stub.subports_added, events.AFTER_DELETE: self._stub.subports_deleted, } elif resource == resources.TRUNK: # On AFTER_DELETE event, current_trunk is None payload = payload.current_trunk or payload.original_trunk method = { events.AFTER_CREATE: self._stub.trunk_created, events.AFTER_DELETE: self._stub.trunk_deleted, } LOG.debug("Emitting event %s for resource %s", event, resource) method[event](context, payload) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/constants.py0000644000175000017500000000122200000000000024772 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TRUNK_BASE_TOPIC = 'trunk' TRUNK_BASE_NAMESPACE = 'trunk' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rpc/server.py0000644000175000017500000002103400000000000024267 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.api.definitions import portbindings from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.services.trunk import constants as trunk_consts from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging from sqlalchemy.orm import exc from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects import trunk as trunk_objects from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk.rpc import constants LOG = logging.getLogger(__name__) # This module contains stub (client-side) and skeleton (server-side) # proxy code that executes in the Neutron server process space. This # is needed if any of the trunk service plugin drivers has a remote # component (e.g. agent), that needs to communicate with the Neutron # Server. # The Server side exposes the following remote methods: # # - lookup method to retrieve trunk details: used by the agent to learn # about the trunk. # - update methods for trunk and its subports: used by the agent to # inform the server about local trunk status changes. # # For agent-side stub and skeleton proxy code, please look at agent.py def trunk_by_port_provider(resource, port_id, context, **kwargs): """Provider callback to supply trunk information by parent port.""" return trunk_objects.Trunk.get_object(context, port_id=port_id) class TrunkSkeleton(object): """Skeleton proxy code for agent->server communication.""" # API version history: # 1.0 Initial version target = oslo_messaging.Target(version='1.0', namespace=constants.TRUNK_BASE_NAMESPACE) _core_plugin = None def __init__(self): # Used to provide trunk lookups for the agent. registry.provide(trunk_by_port_provider, resources.TRUNK) self._connection = n_rpc.Connection() self._connection.create_consumer( constants.TRUNK_BASE_TOPIC, [self], fanout=False) self._connection.consume_in_threads() @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() return self._core_plugin @log_helpers.log_method_call def update_subport_bindings(self, context, subports): """Update subport bindings to match trunk host binding.""" el = context.elevated() ports_by_trunk_id = collections.defaultdict(list) updated_ports = collections.defaultdict(list) for s in subports: ports_by_trunk_id[s['trunk_id']].append(s['port_id']) for trunk_id, subport_ids in ports_by_trunk_id.items(): trunk = trunk_objects.Trunk.get_object(el, id=trunk_id) if not trunk: LOG.debug("Trunk not found. id: %s", trunk_id) continue trunk_updated_ports = self._process_trunk_subport_bindings( el, trunk, subport_ids) updated_ports[trunk.id].extend(trunk_updated_ports) return updated_ports def _safe_update_trunk(self, trunk, **kwargs): for try_cnt in range(db_api.MAX_RETRIES): try: trunk.update(**kwargs) break except exc.StaleDataError as e: if try_cnt < db_api.MAX_RETRIES - 1: LOG.debug("Got StaleDataError exception: %s", e) continue else: # re-raise when all tries failed raise def update_trunk_status(self, context, trunk_id, status): """Update the trunk status to reflect outcome of data plane wiring.""" with db_api.CONTEXT_WRITER.using(context): trunk = trunk_objects.Trunk.get_object(context, id=trunk_id) if trunk: self._safe_update_trunk(trunk, status=status) def _process_trunk_subport_bindings(self, context, trunk, port_ids): """Process port bindings for subports on the given trunk.""" updated_ports = [] trunk_port_id = trunk.port_id trunk_port = self.core_plugin.get_port(context, trunk_port_id) trunk_host = trunk_port.get(portbindings.HOST_ID) # NOTE(status_police) Set the trunk in BUILD state before # processing subport bindings. The trunk will stay in BUILD # state until an attempt has been made to bind all subports # passed here and the agent acknowledges the operation was # successful. self._safe_update_trunk( trunk, status=trunk_consts.TRUNK_BUILD_STATUS) for port_id in port_ids: try: updated_port = self._handle_port_binding(context, port_id, trunk, trunk_host) # NOTE(fitoduarte): consider trimming down the content # of the port data structure. updated_ports.append(updated_port) except trunk_exc.SubPortBindingError as e: LOG.error("Failed to bind subport: %s", e) # NOTE(status_police) The subport binding has failed in a # manner in which we cannot proceed and the user must take # action to bring the trunk back to a sane state. self._safe_update_trunk( trunk, status=trunk_consts.TRUNK_ERROR_STATUS) return [] except Exception as e: msg = ("Failed to bind subport port %(port)s on trunk " "%(trunk)s: %(exc)s") LOG.error(msg, {'port': port_id, 'trunk': trunk.id, 'exc': e}) if len(port_ids) != len(updated_ports): self._safe_update_trunk( trunk, status=trunk_consts.TRUNK_DEGRADED_STATUS) return updated_ports def _handle_port_binding(self, context, port_id, trunk, trunk_host): """Bind the given port to the given host. :param context: The context to use for the operation :param port_id: The UUID of the port to be bound :param trunk: The trunk that the given port belongs to :param trunk_host: The host to bind the given port to """ port = self.core_plugin.update_port( context, port_id, {'port': {portbindings.HOST_ID: trunk_host, 'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}}) vif_type = port.get(portbindings.VIF_TYPE) if vif_type == portbindings.VIF_TYPE_BINDING_FAILED: raise trunk_exc.SubPortBindingError(port_id=port_id, trunk_id=trunk.id) return port class TrunkStub(object): """Stub proxy code for server->agent communication.""" def __init__(self): self._resource_rpc = resources_rpc.ResourcesPushRpcApi() @log_helpers.log_method_call def trunk_created(self, context, trunk): """Tell the agent about a trunk being created.""" self._resource_rpc.push(context, [trunk], events.CREATED) @log_helpers.log_method_call def trunk_deleted(self, context, trunk): """Tell the agent about a trunk being deleted.""" self._resource_rpc.push(context, [trunk], events.DELETED) @log_helpers.log_method_call def subports_added(self, context, subports): """Tell the agent about new subports to add.""" self._resource_rpc.push(context, subports, events.CREATED) @log_helpers.log_method_call def subports_deleted(self, context, subports): """Tell the agent about existing subports to remove.""" self._resource_rpc.push(context, subports, events.DELETED) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/rules.py0000644000175000017500000003356600000000000023344 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.api import converters from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as provider from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.services.trunk import constants from neutron._i18n import _ from neutron.objects import trunk as trunk_objects from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import utils # This layer is introduced for keeping business logic and # data persistence decoupled. def trunk_can_be_managed(context, trunk): """Validate that the trunk can be managed.""" if not trunk.admin_state_up: raise trunk_exc.TrunkDisabled(trunk_id=trunk.id) def enforce_port_deletion_rules(resource, event, trigger, payload=None): """Prohibit the deletion of a port that's used in a trunk.""" # NOTE: the ML2 plugin properly catches these exceptions when raised, but # non-ML2 plugins might not. To address this we should move the callback # registry notification emitted in the ML2 plugin's delete_port() higher # up in the plugin hierarchy. context = payload.context port_id = payload.resource_id subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id) if subport_obj: raise trunk_exc.PortInUseAsSubPort(port_id=port_id, trunk_id=subport_obj.trunk_id) trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id) if trunk_obj: raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id, trunk_id=trunk_obj.id) class TrunkPortValidator(object): def __init__(self, port_id): self.port_id = port_id self._port = None def validate(self, context, parent_port=True): """Validate that the port can be used in a trunk. :param parent_port: True if the port is intended for use as parent in a trunk. """ # TODO(tidwellr): there is a chance of a race between the # time these checks are performed and the time the trunk # creation is executed. To be revisited, if it bites. # Validate that the given port_id is not used by a subport. subports = trunk_objects.SubPort.get_objects( context, port_id=self.port_id) if subports: raise trunk_exc.TrunkPortInUse(port_id=self.port_id) # Validate that the given port_id is not used by a trunk. trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id) if trunks: raise trunk_exc.ParentPortInUse(port_id=self.port_id) if parent_port: # if the port is being used as a parent in a trunk, check if # it can be trunked, i.e. if it is already associated to physical # resources (namely it is bound). Bound ports may be used as # trunk parents, but that depends on the underlying driver in # charge. if not self.can_be_trunked_or_untrunked(context): raise trunk_exc.ParentPortInUse(port_id=self.port_id) else: # if the port is being used as subport in a trunk, check if it is a # port that is not actively used for other purposes, e.g. a router # port, compute port, DHCP port etc. We have no clue what the side # effects of connecting the port to a trunk would be, and it is # better to err on the side of caution and prevent the operation. self.check_not_in_use(context) return self.port_id def is_bound(self, context): """Return true if the port is bound, false otherwise.""" # Validate that the given port_id does not have a port binding. core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) return bool(self._port.get(portbindings.HOST_ID)) def can_be_trunked_or_untrunked(self, context): """"Return true if a port can be trunked.""" if not self.is_bound(context): # An unbound port can be trunked, always. return True trunk_plugin = directory.get_plugin('trunk') vif_type = self._port.get(portbindings.VIF_TYPE) binding_host = self._port.get(portbindings.HOST_ID) # Determine the driver that will be in charge of the trunk: this # can be determined based on the vif type, whether or not the # driver is agent-based, and whether the host is running the agent # associated to the driver itself. host_agent_types = utils.get_agent_types_by_host(context, binding_host) drivers = [ driver for driver in trunk_plugin.registered_drivers if utils.is_driver_compatible( context, driver, vif_type, host_agent_types) ] if len(drivers) > 1: raise trunk_exc.TrunkPluginDriverConflict() elif len(drivers) == 1: return drivers[0].can_trunk_bound_port else: return False def check_not_in_use(self, context): """Raises PortInUse for ports assigned for device purposes.""" core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) # NOTE(armax): the trunk extension itself does not make use of the # device_id field, because it has no reason to. If need be, this # check can be altered to accommodate the change in logic. if self._port['device_id']: raise n_exc.PortInUse(net_id=self._port['network_id'], port_id=self._port['id'], device_id=self._port['device_id']) class SubPortsValidator(object): def __init__(self, segmentation_types, subports, trunk_port_id=None): self._segmentation_types = segmentation_types self.subports = subports self.trunk_port_id = trunk_port_id def validate(self, context, basic_validation=False, trunk_validation=True): """Validate that subports can be used in a trunk.""" # Perform basic validation on subports, in case subports # are not automatically screened by the API layer. if basic_validation: msg = validators.validate_subports(self.subports) if msg: raise n_exc.InvalidInput(error_message=msg) if trunk_validation: trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id) subport_mtus = self._prepare_subports(context) return [self._validate(context, s, trunk_port_mtu, subport_mtus) for s in self.subports] else: return self.subports def _prepare_subports(self, context): """Utility method to parse subports in the request The objective of this method is two-fold: * Update subports segmentation details if INHERIT is requested; * Return the MTU for each of the subport in the request. This method does two things rather than one to allow us to hit the DB once, and thus minimize the number of lookups required to learn about the segmentation type and the MTU of the networks on which subports are plugged. """ InheritIndex = ( collections.namedtuple("InheritIndex", "index has_inherit")) port_ids = {} any_has_inherit = False for i, s in enumerate(self.subports): has_inherit = (s.get('segmentation_type') == constants.SEGMENTATION_TYPE_INHERIT) any_has_inherit |= has_inherit port_ids[s['port_id']] = ( InheritIndex(index=i, has_inherit=has_inherit)) core_plugin = directory.get_plugin() if (any_has_inherit and not extensions.is_extension_supported( core_plugin, provider.ALIAS)): msg = (_("Cannot accept segmentation type %s") % constants.SEGMENTATION_TYPE_INHERIT) raise n_exc.InvalidInput(error_message=msg) ports = core_plugin.get_ports(context, filters={'id': port_ids}) network_port_map = collections.defaultdict(list) for p in ports: network_port_map[p['network_id']].append({'port_id': p['id']}) networks = core_plugin.get_networks( context.elevated(), filters={'id': network_port_map}) subport_mtus = {} for net in networks: for port in network_port_map[net['id']]: if port_ids[port['port_id']].has_inherit: port.update( {'segmentation_id': net[provider.SEGMENTATION_ID], 'segmentation_type': net[provider.NETWORK_TYPE]}) self.subports[port_ids[port['port_id']].index] = port # To speed up the request, record the network MTU for each # subport to avoid hitting the DB more than necessary. Do # that only if the extension is available. if extensions.is_extension_supported(core_plugin, 'net-mtu'): subport_mtus[port['port_id']] = net[api.MTU] return subport_mtus def _get_port_mtu(self, context, port_id): """Get port MTU Return MTU for the network where the given port belongs to. If the network or port cannot be obtained, or if MTU is not defined, returns None. """ core_plugin = directory.get_plugin() if not extensions.is_extension_supported(core_plugin, 'net-mtu'): return try: port = core_plugin.get_port(context, port_id) return core_plugin.get_network( context, port['network_id'])[api.MTU] except (n_exc.PortNotFound, n_exc.NetworkNotFound): # A concurrent request might have made the port or network # disappear; though during DB insertion, the subport request # will fail on integrity constraint, it is safer to return # a None MTU here. return def _raise_subport_is_parent_port(self, context, subport): if subport['port_id'] == self.trunk_port_id: raise trunk_exc.ParentPortInUse(port_id=subport['port_id']) def _raise_subport_invalid_mtu(self, context, subport, trunk_port_mtu, subport_mtus): # Check MTU sanity - subport MTU must not exceed trunk MTU. # If for whatever reason trunk_port_mtu is not available, # the MTU sanity check cannot be enforced. if trunk_port_mtu: # missing MTUs for subports is not an error condition: the # subport UUID may be invalid or non existent. subport_mtu = subport_mtus.get(subport['port_id']) if subport_mtu and subport_mtu > trunk_port_mtu: raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu( port_id=subport['port_id'], port_mtu=subport_mtu, trunk_id=self.trunk_port_id, trunk_mtu=trunk_port_mtu ) def _raise_if_segmentation_details_missing(self, subport): try: segmentation_type = subport["segmentation_type"] segmentation_id = ( converters.convert_to_int(subport["segmentation_id"])) return (segmentation_type, segmentation_id) except KeyError: msg = _("Invalid subport details '%s': missing segmentation " "information. Must specify both segmentation_id and " "segmentation_type") % subport raise n_exc.InvalidInput(error_message=msg) except n_exc.InvalidInput: msg = _("Invalid subport details: segmentation_id '%s' is " "not an integer") % subport["segmentation_id"] raise n_exc.InvalidInput(error_message=msg) def _raise_if_segmentation_details_invalid(self, segmentation_type, segmentation_id): if segmentation_type not in self._segmentation_types: msg = _("Unknown segmentation_type '%s'") % segmentation_type raise n_exc.InvalidInput(error_message=msg) if not self._segmentation_types[segmentation_type](segmentation_id): msg = _("Segmentation ID '%s' is not in range") % segmentation_id raise n_exc.InvalidInput(error_message=msg) def _raise_if_subport_is_used_in_other_trunk(self, context, subport): trunk_validator = TrunkPortValidator(subport['port_id']) trunk_validator.validate(context, parent_port=False) def _validate(self, context, subport, trunk_port_mtu, subport_mtus): self._raise_subport_is_parent_port(context, subport) self._raise_subport_invalid_mtu( context, subport, trunk_port_mtu, subport_mtus) segmentation_type, segmentation_id = ( self._raise_if_segmentation_details_missing(subport)) self._raise_if_segmentation_details_invalid( segmentation_type, segmentation_id) self._raise_if_subport_is_used_in_other_trunk(context, subport) return subport ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/services/trunk/seg_types/0000755000175000017500000000000000000000000023625 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/seg_types/__init__.py0000644000175000017500000000000000000000000025724 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/seg_types/validators.py0000644000175000017500000000324100000000000026347 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import utils as plugin_utils from neutron_lib.services.trunk import constants as trunk_consts from neutron._i18n import _ # Base map of segmentation types supported with their respective validator # functions. In multi-driver deployments all drivers must support the same # set of segmentation types consistently. Drivers can add their own type # and respective validator, however this is a configuration that may be # supported only in single-driver deployments. _supported = { trunk_consts.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag, } def get_validator(segmentation_type): """Get validator for the segmentation type or KeyError if not found.""" return _supported[segmentation_type] def add_validator(segmentation_type, validator_function): """Introduce new entry to the map of supported segmentation types.""" if segmentation_type in _supported: msg = _("Cannot redefine existing %s " "segmentation type") % segmentation_type raise KeyError(msg) _supported[segmentation_type] = validator_function ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/services/trunk/utils.py0000644000175000017500000000441500000000000023341 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib.plugins import directory def get_agent_types_by_host(context, host): """Return the agent types registered on the host.""" agent_types = [] core_plugin = directory.get_plugin() if extensions.is_extension_supported(core_plugin, 'agent'): agents = core_plugin.get_agents( context.elevated(), filters={'host': [host]}) agent_types = [a['agent_type'] for a in agents] return agent_types def is_driver_compatible(context, driver, interface, host_agent_types): """True if the driver is compatible with interface and host_agent_types. There may be edge cases where a stale view or the deployment may make the following test fail to detect the right driver in charge of the bound port. """ # NOTE(armax): this logic stems from the fact that the way Neutron is # architected we do not have a univocal mapping between VIF type and the # Driver serving it, in that the same vif type can be supported by # multiple drivers. A practical example of this is OVS and OVN in the # same deployment. In order to uniquely identify the driver, we cannot # simply look at the vif type, and we need to look at whether the host # to which the port is bound is actually managed by one driver or the # other. is_interface_compatible = driver.is_interface_compatible(interface) # For an agentless driver, only interface compatibility is required. if not driver.agent_type: return is_interface_compatible # For an agent-based driver, both interface and agent compat is required. return is_interface_compatible and driver.agent_type in host_agent_types ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3790452 neutron-16.0.0.0b2.dev214/neutron/tests/0000755000175000017500000000000000000000000017777 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/__init__.py0000644000175000017500000000000000000000000022076 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/base.py0000644000175000017500000005234300000000000021272 0ustar00coreycorey00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base test cases for all neutron tests. """ import abc import contextlib import functools import inspect import logging import os import os.path import threading import eventlet.timeout import fixtures import mock from neutron_lib.callbacks import manager as registry_manager from neutron_lib.db import api as db_api from neutron_lib import fixture from neutron_lib.tests import tools as lib_test_tools from neutron_lib.tests.unit import fake_notifier from oslo_concurrency.fixture import lockutils from oslo_config import cfg from oslo_db import exception as db_exceptions from oslo_db import options as db_options from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import strutils from oslotest import base from osprofiler import profiler import six from sqlalchemy import exc as sqlalchemy_exc import testtools from testtools import content from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg from neutron.api.rpc.callbacks.producer import registry as rpc_producer_reg from neutron.common import config from neutron.conf.agent import common as agent_config from neutron.db import agentschedulers_db from neutron import manager from neutron import policy from neutron.quota import resource_registry from neutron.tests import post_mortem_debug CONF = cfg.CONF CONF.import_opt('state_path', 'neutron.conf.common') ROOTDIR = os.path.dirname(__file__) ETCDIR = os.path.join(ROOTDIR, 'etc') SUDO_CMD = 'sudo -n' TESTCASE_RETRIES = 3 def etcdir(*p): return os.path.join(ETCDIR, *p) def fake_use_fatal_exceptions(*args): return True def bool_from_env(key, strict=False, default=False): value = os.environ.get(key) return strutils.bool_from_string(value, strict=strict, default=default) def setup_test_logging(config_opts, log_dir, log_file_path_template): # Have each test log into its own log file config_opts.set_override('debug', True) fileutils.ensure_tree(log_dir, mode=0o755) log_file = sanitize_log_path( os.path.join(log_dir, log_file_path_template)) config_opts.set_override('log_file', log_file) config.setup_logging() def sanitize_log_path(path): # Sanitize the string so that its log path is shell friendly replace_map = {' ': '-', '(': '_', ')': '_'} for s, r in replace_map.items(): path = path.replace(s, r) return path def unstable_test(reason): def decor(f): @functools.wraps(f) def inner(self, *args, **kwargs): try: return f(self, *args, **kwargs) except Exception as e: msg = ("%s was marked as unstable because of %s, " "failure was: %s") % (self.id(), reason, e) raise self.skipTest(msg) return inner return decor def skip_if_timeout(reason): def decor(f): @functools.wraps(f) def inner(self, *args, **kwargs): try: return f(self, *args, **kwargs) except fixtures.TimeoutException: msg = ("Timeout raised for test %s, skipping it " "because of: %s") % (self.id(), reason) raise self.skipTest(msg) except (sqlalchemy_exc.InterfaceError, db_exceptions.DBConnectionError): # In case of db tests very often TimeoutException is reason of # some sqlalchemy InterfaceError exception and that is final # raised exception which needs to be handled msg = ("DB connection broken in test %s. It is very likely " "that this happend because of test timeout. " "Skipping test because of: %s") % (self.id(), reason) raise self.skipTest(msg) return inner return decor def set_timeout(timeout): """Timeout decorator for test methods. Use this decorator for tests that are expected to pass in very specific amount of time, not common for all other tests. It can have either big or small value. """ def decor(f): @functools.wraps(f) def inner(self, *args, **kwargs): self.useFixture(fixtures.Timeout(timeout, gentle=True)) return f(self, *args, **kwargs) return inner return decor def get_rootwrap_cmd(): return os.environ.get('OS_ROOTWRAP_CMD', SUDO_CMD) def get_rootwrap_daemon_cmd(): return os.environ.get('OS_ROOTWRAP_DAEMON_CMD') class AttributeDict(dict): """Provide attribute access (dict.key) to dictionary values.""" def __getattr__(self, name): """Allow attribute access for all keys in the dict.""" if name in self: return self[name] raise AttributeError(_("Unknown attribute '%s'.") % name) def _catch_timeout(f): @functools.wraps(f) def func(self, *args, **kwargs): for idx in range(1, TESTCASE_RETRIES + 1): try: return f(self, *args, **kwargs) except eventlet.Timeout as e: self.fail('Execution of this test timed out: %s' % e) # NOTE(ralonsoh): exception catch added due to the constant # occurrences of this exception during FT and UT execution. # This is due to [1]. Once the sync decorators are removed or the # privsep ones are decorated by those ones (swap decorator # declarations) this catch can be remove. # [1] https://review.opendev.org/#/c/631275/ except fixtures.TimeoutException: if idx < TESTCASE_RETRIES: msg = ('"fixtures.TimeoutException" during test case ' 'execution no %s; test case re-executed' % idx) self.addDetail('DietTestCase', content.text_content(msg)) self._set_timeout() else: self.fail('Execution of this test timed out') return func class _CatchTimeoutMetaclass(abc.ABCMeta): def __init__(cls, name, bases, dct): super(_CatchTimeoutMetaclass, cls).__init__(name, bases, dct) for name, method in inspect.getmembers( # NOTE(ihrachys): we should use isroutine because it will catch # both unbound methods (python2) and functions (python3) cls, predicate=inspect.isroutine): if name.startswith('test_'): setattr(cls, name, _catch_timeout(method)) # Test worker cannot survive eventlet's Timeout exception, which effectively # kills the whole worker, with all test cases scheduled to it. This metaclass # makes all test cases convert Timeout exceptions into unittest friendly # failure mode (self.fail). @six.add_metaclass(_CatchTimeoutMetaclass) class DietTestCase(base.BaseTestCase): """Same great taste, less filling. BaseTestCase is responsible for doing lots of plugin-centric setup that not all tests require (or can tolerate). This class provides only functionality that is common across all tests. """ def setUp(self): super(DietTestCase, self).setUp() # Suppress some log messages during test runs, otherwise it may cause # issues with subunit parser when running on Python 3. It happened for # example for neutron-functional tests. # With this suppress of log levels DEBUG logs will not be captured by # stestr on pythonlogging stream and will not cause this parser issue. supress_logs = ['neutron', 'neutron_lib', 'stevedore', 'oslo_policy', 'oslo_concurrency', 'oslo_db', 'alembic', 'ovsdbapp'] for supress_log in supress_logs: logger = logging.getLogger(supress_log) logger.setLevel(logging.ERROR) # FIXME(amuller): this must be called in the Neutron unit tests base # class. Moving this may cause non-deterministic failures. Bug #1489098 # for more info. db_options.set_defaults(cfg.CONF, connection='sqlite://') # Configure this first to ensure pm debugging support for setUp() debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER') if debugger: self.addOnException(post_mortem_debug.get_exception_handler( debugger)) # Make sure we see all relevant deprecation warnings when running tests self.useFixture(fixture.WarningsFixture(module_re=['^neutron\\.'])) self.useFixture(fixture.DBQueryHooksFixture()) # NOTE(ihrachys): oslotest already sets stopall for cleanup, but it # does it using six.moves.mock (the library was moved into # unittest.mock in Python 3.4). So until we switch to six.moves.mock # everywhere in unit tests, we can't remove this setup. The base class # is used in 3party projects, so we would need to switch all of them to # six before removing the cleanup callback from here. self.addCleanup(mock.patch.stopall) self.useFixture(fixture.DBResourceExtendFixture()) self.addOnException(self.check_for_systemexit) self.orig_pid = os.getpid() lib_test_tools.reset_random_seed() def addOnException(self, handler): def safe_handler(*args, **kwargs): try: return handler(*args, **kwargs) except Exception: with excutils.save_and_reraise_exception(reraise=False) as ctx: self.addDetail('failure in exception handler %s' % handler, testtools.content.TracebackContent( (ctx.type_, ctx.value, ctx.tb), self)) return super(DietTestCase, self).addOnException(safe_handler) def check_for_systemexit(self, exc_info): if isinstance(exc_info[1], SystemExit): if os.getpid() != self.orig_pid: # Subprocess - let it just exit raise # This makes sys.exit(0) still a failure self.force_failure = True @contextlib.contextmanager def assert_max_execution_time(self, max_execution_time=5): with eventlet.Timeout(max_execution_time, False): yield return self.fail('Execution of this test timed out') def assertOrderedEqual(self, expected, actual): expect_val = self.sort_dict_lists(expected) actual_val = self.sort_dict_lists(actual) self.assertEqual(expect_val, actual_val) def sort_dict_lists(self, dic): for key, value in dic.items(): if isinstance(value, list): dic[key] = sorted(value) elif isinstance(value, dict): dic[key] = self.sort_dict_lists(value) return dic def assertDictSupersetOf(self, expected_subset, actual_superset): """Checks that actual dict contains the expected dict. After checking that the arguments are of the right type, this checks that each item in expected_subset is in, and matches, what is in actual_superset. Separate tests are done, so that detailed info can be reported upon failure. """ if not isinstance(expected_subset, dict): self.fail("expected_subset (%s) is not an instance of dict" % type(expected_subset)) if not isinstance(actual_superset, dict): self.fail("actual_superset (%s) is not an instance of dict" % type(actual_superset)) for k, v in expected_subset.items(): self.assertIn(k, actual_superset) self.assertEqual(v, actual_superset[k], "Key %(key)s expected: %(exp)r, actual %(act)r" % {'key': k, 'exp': v, 'act': actual_superset[k]}) class ProcessMonitorFixture(fixtures.Fixture): """Test fixture to capture and cleanup any spawn process monitor.""" def _setUp(self): self.old_callable = ( external_process.ProcessMonitor._spawn_checking_thread) p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor." "_spawn_checking_thread", new=lambda x: self.record_calls(x)) p.start() self.instances = [] self.addCleanup(self.stop) def stop(self): for instance in self.instances: instance.stop() def record_calls(self, instance): self.old_callable(instance) self.instances.append(instance) class BaseTestCase(DietTestCase): @staticmethod def config_parse(conf=None, args=None): """Create the default configurations.""" if args is None: args = [] args += ['--config-file', etcdir('neutron.conf')] if conf is None: config.init(args=args) else: conf(args) def setUp(self): super(BaseTestCase, self).setUp() self.useFixture(lockutils.ExternalLockFixture()) self.useFixture(fixture.APIDefinitionFixture()) cfg.CONF.set_override('state_path', self.get_default_temp_dir().path) self.addCleanup(CONF.reset) self.useFixture(ProcessMonitorFixture()) self.useFixture(fixtures.MonkeyPatch( 'neutron_lib.exceptions.NeutronException.use_fatal_exceptions', fake_use_fatal_exceptions)) self.useFixture(fixtures.MonkeyPatch( 'oslo_config.cfg.find_config_files', lambda project=None, prog=None, extension=None: [])) self.useFixture(fixture.RPCFixture()) self.setup_config() self._callback_manager = registry_manager.CallbacksManager() self.useFixture(fixture.CallbackRegistryFixture( callback_manager=self._callback_manager)) # Give a private copy of the directory to each test. self.useFixture(fixture.PluginDirectoryFixture()) policy.init() self.addCleanup(policy.reset) self.addCleanup(resource_registry.unregister_all_resources) self.addCleanup(db_api.sqla_remove_all) self.addCleanup(rpc_consumer_reg.clear) self.addCleanup(rpc_producer_reg.clear) self.addCleanup(profiler.clean) def get_new_temp_dir(self): """Create a new temporary directory. :returns: fixtures.TempDir """ return self.useFixture(fixtures.TempDir()) def get_default_temp_dir(self): """Create a default temporary directory. Returns the same directory during the whole test case. :returns: fixtures.TempDir """ if not hasattr(self, '_temp_dir'): self._temp_dir = self.get_new_temp_dir() return self._temp_dir def get_temp_file_path(self, filename, root=None): """Returns an absolute path for a temporary file. If root is None, the file is created in default temporary directory. It also creates the directory if it's not initialized yet. If root is not None, the file is created inside the directory passed as root= argument. :param filename: filename :type filename: string :param root: temporary directory to create a new file in :type root: fixtures.TempDir :returns: absolute file path string """ root = root or self.get_default_temp_dir() return root.join(filename) def setup_config(self, args=None): """Tests that need a non-default config can override this method.""" self.config_parse(args=args) def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) def setup_coreplugin(self, core_plugin=None, load_plugins=True): cp = PluginFixture(core_plugin) self.useFixture(cp) self.patched_dhcp_periodic = cp.patched_dhcp_periodic self.patched_default_svc_plugins = cp.patched_default_svc_plugins if load_plugins: manager.init() def setup_notification_driver(self, notification_driver=None): self.addCleanup(fake_notifier.reset) if notification_driver is None: notification_driver = [fake_notifier.__name__] cfg.CONF.set_override("notification_driver", notification_driver) def setup_rootwrap(self): agent_config.register_root_helper(cfg.CONF) self.config(group='AGENT', root_helper=get_rootwrap_cmd()) self.config(group='AGENT', root_helper_daemon=get_rootwrap_daemon_cmd()) def _simulate_concurrent_requests_process_and_raise(self, calls, args): class SimpleThread(threading.Thread): def __init__(self, q): super(SimpleThread, self).__init__() self.q = q self.exception = None def run(self): try: while not self.q.empty(): item = None try: item = self.q.get(False) func, func_args = item[0], item[1] func(*func_args) except six.moves.queue.Empty: pass finally: if item: self.q.task_done() except Exception as e: self.exception = e def get_exception(self): return self.exception q = six.moves.queue.Queue() for func, func_args in zip(calls, args): q.put_nowait((func, func_args)) threads = [] for z in range(len(calls)): t = SimpleThread(q) threads.append(t) t.start() q.join() for t in threads: e = t.get_exception() if e: raise e class PluginFixture(fixtures.Fixture): def __init__(self, core_plugin=None): super(PluginFixture, self).__init__() self.core_plugin = core_plugin def _setUp(self): # Do not load default service plugins in the testing framework # as all the mocking involved can cause havoc. self.default_svc_plugins_p = mock.patch( 'neutron.manager.NeutronManager._get_default_service_plugins') self.patched_default_svc_plugins = self.default_svc_plugins_p.start() self.dhcp_periodic_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'add_periodic_dhcp_agent_status_check') self.patched_dhcp_periodic = self.dhcp_periodic_p.start() self.agent_health_check_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'add_agent_status_check_worker') self.agent_health_check = self.agent_health_check_p.start() # Plugin cleanup should be triggered last so that # test-specific cleanup has a chance to release references. self.addCleanup(self.cleanup_core_plugin) if self.core_plugin is not None: cfg.CONF.set_override('core_plugin', self.core_plugin) def cleanup_core_plugin(self): """Ensure that the core plugin is deallocated.""" nm = manager.NeutronManager if not nm.has_instance(): return # TODO(marun) Fix plugins that do not properly initialize notifiers agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {} nm.clear_instance() class Timeout(fixtures.Fixture): """Setup per test timeouts. In order to avoid test deadlocks we support setting up a test timeout parameter read from the environment. In almost all cases where the timeout is reached this means a deadlock. A scaling factor allows extremely long tests to specify they need more time. """ def __init__(self, timeout=None, scaling=1): super(Timeout, self).__init__() if timeout is None: timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: self.test_timeout = int(timeout) except ValueError: # If timeout value is invalid do not set a timeout. self.test_timeout = 0 if scaling >= 1: self.test_timeout *= scaling else: raise ValueError('scaling value must be >= 1') def setUp(self): super(Timeout, self).setUp() if self.test_timeout > 0: self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3830452 neutron-16.0.0.0b2.dev214/neutron/tests/common/0000755000175000017500000000000000000000000021267 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/__init__.py0000644000175000017500000000115400000000000023401 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3830452 neutron-16.0.0.0b2.dev214/neutron/tests/common/agents/0000755000175000017500000000000000000000000022550 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/agents/__init__.py0000644000175000017500000000000000000000000024647 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/agents/l2_extensions.py0000644000175000017500000001121600000000000025717 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import signal from oslo_log import log as logging from neutron.agent.common import async_process from neutron.agent.linux import iptables_manager from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) class TcpdumpException(Exception): pass def extract_mod_nw_tos_action(flows): tos_mark = None if flows: flow_list = flows.splitlines() for flow in flow_list: if 'mod_nw_tos' in flow: actions = flow.partition('actions=')[2] after_mod = actions.partition('mod_nw_tos:')[2] tos_mark = int(after_mod.partition(',')[0]) return tos_mark def extract_dscp_value_from_iptables_rules(rules): pattern = (r"^-A neutron-linuxbri-qos-.* -j DSCP " "--set-dscp (?P0x[A-Fa-f0-9]+)$") for rule in rules: m = re.match(pattern, rule) if m: return int(m.group("dscp_value"), 16) def wait_until_bandwidth_limit_rule_applied(check_function, port_vif, rule): def _bandwidth_limit_rule_applied(): bw_rule = check_function(port_vif) expected = None, None if rule: expected = rule.max_kbps, rule.max_burst_kbps return bw_rule == expected common_utils.wait_until_true(_bandwidth_limit_rule_applied) def wait_until_egress_bandwidth_limit_rule_applied(bridge, port_vif, rule): wait_until_bandwidth_limit_rule_applied( bridge.get_egress_bw_limit_for_port, port_vif, rule) def wait_until_ingress_bandwidth_limit_rule_applied(bridge, port_vif, rule): wait_until_bandwidth_limit_rule_applied( bridge.get_ingress_bw_limit_for_port, port_vif, rule) def wait_until_dscp_marking_rule_applied_ovs(bridge, port_vif, rule): def _dscp_marking_rule_applied(): port_num = bridge.get_port_ofport(port_vif) flows = bridge.dump_flows_for(table='0', in_port=str(port_num)) dscp_mark = extract_mod_nw_tos_action(flows) expected = None if rule: expected = rule << 2 return dscp_mark == expected common_utils.wait_until_true(_dscp_marking_rule_applied) def wait_until_dscp_marking_rule_applied_linuxbridge(namespace, port_vif, expected_rule): iptables = iptables_manager.IptablesManager( namespace=namespace) def _dscp_marking_rule_applied(): mangle_rules = iptables.get_rules_for_table("mangle") dscp_mark = extract_dscp_value_from_iptables_rules(mangle_rules) return dscp_mark == expected_rule common_utils.wait_until_true(_dscp_marking_rule_applied) def wait_for_dscp_marked_packet(sender_vm, receiver_vm, dscp_mark): cmd = [ "tcpdump", "-i", receiver_vm.port.name, "-nlt", "src", sender_vm.ip, 'and', 'dst', receiver_vm.ip] if dscp_mark: cmd += ["and", "(ip[1] & 0xfc == %s)" % (dscp_mark << 2)] tcpdump_async = async_process.AsyncProcess(cmd, run_as_root=True, namespace=receiver_vm.namespace) tcpdump_async.start(block=True) sender_vm.block_until_ping(receiver_vm.ip) try: tcpdump_async.stop(kill_signal=signal.SIGINT) except async_process.AsyncProcessException: # If it was already stopped than we don't care about it pass tcpdump_stderr_lines = [] pattern = r"(?P^\d+) packets received by filter" for line in tcpdump_async.iter_stderr(): m = re.match(pattern, line) if m and int(m.group("packets_count")) != 0: return tcpdump_stderr_lines.append(line) tcpdump_stdout_lines = [line for line in tcpdump_async.iter_stdout()] LOG.debug("Captured output lines from tcpdump. Stdout: %s; Stderr: %s", tcpdump_stdout_lines, tcpdump_stderr_lines) raise TcpdumpException( "No packets marked with DSCP = %(dscp_mark)s received from %(src)s " "to %(dst)s" % {'dscp_mark': dscp_mark, 'src': sender_vm.ip, 'dst': receiver_vm.ip}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/agents/l3_agent.py0000755000175000017500000000717100000000000024627 0ustar00coreycorey00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import types import mock from neutron_lib import constants from oslo_config import cfg from neutron.agent.l3 import agent from neutron.agent.l3 import namespaces from neutron.agent import l3_agent class L3NATAgentForTest(agent.L3NATAgentWithStateReport): def __init__(self, host, conf=None): ns_suffix = '@%s' % cfg.CONF.test_namespace_suffix # Mock out building of namespace names orig_build_ns_name = namespaces.build_ns_name def build_ns_name(prefix, identifier): return "%s%s" % (orig_build_ns_name(prefix, identifier), ns_suffix) build_ns = mock.patch.object(namespaces, 'build_ns_name').start() build_ns.side_effect = build_ns_name # Mock the parsing prefix from namespace names orig_get_prefix = namespaces.get_prefix_from_ns_name def get_prefix_from_ns_name(ns_name): if ns_name.endswith(ns_suffix): return orig_get_prefix(ns_name[:-len(ns_suffix)]) parse_prefix = mock.patch.object(namespaces, 'get_prefix_from_ns_name').start() parse_prefix.side_effect = get_prefix_from_ns_name # Mock the parsing id from namespace names orig_get_id = namespaces.get_id_from_ns_name def get_id_from_ns_name(ns_name): if ns_name.endswith(ns_suffix): return orig_get_id(ns_name[:-len(ns_suffix)]) parse_id = mock.patch.object(namespaces, 'get_id_from_ns_name').start() parse_id.side_effect = get_id_from_ns_name super(L3NATAgentForTest, self).__init__(host, conf) def _create_router(self, router_id, router): """Create a router with suffix added to the router namespace name. This is needed to be able to run two agents serving the same router on the same node. """ router = ( super(L3NATAgentForTest, self)._create_router(router_id, router)) router.get_internal_device_name = types.MethodType( get_internal_device_name, router) router.get_external_device_name = types.MethodType( get_external_device_name, router) return router def _append_suffix(dev_name): # If dev_name = 'xyz123' and the suffix is 'hostB' then the result # will be 'xy_stB' return '%s_%s' % (dev_name[:-4], cfg.CONF.test_namespace_suffix[-3:]) def get_internal_device_name(ri, port_id): return _append_suffix( (namespaces.INTERNAL_DEV_PREFIX + port_id) [:constants.LINUX_DEV_LEN]) def get_external_device_name(ri, port_id): return _append_suffix( (namespaces.EXTERNAL_DEV_PREFIX + port_id) [:constants.LINUX_DEV_LEN]) OPTS = [ cfg.StrOpt('test_namespace_suffix', default='testprefix', help="Suffix to append to all namespace names."), ] def register_opts(conf): conf.register_opts(OPTS) def main(manager='neutron.tests.common.agents.l3_agent.L3NATAgentForTest'): register_opts(cfg.CONF) l3_agent.main(manager=manager) if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/agents/ovs_agent.py0000755000175000017500000000334600000000000025120 0ustar00coreycorey00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import sys from neutron_lib import constants as n_const from oslo_utils import encodeutils from neutron.cmd.eventlet.plugins.ovs_neutron_agent import main as _main from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent def get_tunnel_name_full(cls, network_type, local_ip, remote_ip): network_type = network_type[:3] # Remove length of network_type and two dashes hashlen = (n_const.DEVICE_NAME_MAX_LEN - len(network_type) - 2) // 2 remote_tunnel_hash = cls.get_tunnel_hash(remote_ip, hashlen) if not remote_tunnel_hash: return None remote_tunnel_hash = encodeutils.to_utf8(remote_tunnel_hash) remote_ip_hash = hashlib.sha1(remote_tunnel_hash).hexdigest()[:hashlen] local_tunnel_hash = cls.get_tunnel_hash(local_ip, hashlen) local_tunnel_hash = encodeutils.to_utf8(local_tunnel_hash) source_ip_hash = hashlib.sha1(local_tunnel_hash).hexdigest()[:hashlen] return '%s-%s-%s' % (network_type, source_ip_hash, remote_ip_hash) ovs_neutron_agent.OVSNeutronAgent.get_tunnel_name = get_tunnel_name_full def main(): _main() if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/base.py0000644000175000017500000000505200000000000022555 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import unittest from neutron_lib import constants as n_const import testtools.testcase from neutron.common import utils from neutron.tests import base from neutron.tests import tools def create_resource(prefix, creation_func, *args, **kwargs): """Create a new resource that does not already exist. If prefix isn't 'max_length' in size, a random suffix is concatenated to ensure it is random. Otherwise, 'prefix' is used as is. :param prefix: The prefix for a randomly generated name :param creation_func: A function taking the name of the resource to be created as it's first argument. An error is assumed to indicate a name collision. :param *args *kwargs: These will be passed to the create function. """ # Don't generate a random name if prefix is already full-length. if len(prefix) == n_const.DEVICE_NAME_MAX_LEN: return creation_func(prefix, *args, **kwargs) while True: name = utils.get_rand_name( max_length=n_const.DEVICE_NAME_MAX_LEN, prefix=prefix) try: return creation_func(name, *args, **kwargs) except RuntimeError: pass def no_skip_on_missing_deps(wrapped): """Do not allow a method/test to skip on missing dependencies. This decorator raises an error if a skip is raised by wrapped method when OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used only for missing dependencies (including missing system requirements). """ @functools.wraps(wrapped) def wrapper(*args, **kwargs): try: return wrapped(*args, **kwargs) except (testtools.TestCase.skipException, unittest.SkipTest) as e: if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'): tools.fail( '%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS ' 'is enabled, skip reason: %s' % (wrapped.__name__, e)) raise return wrapper ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/config_fixtures.py0000644000175000017500000000570600000000000025047 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import fixtures import six from neutron.tests import base class ConfigDict(base.AttributeDict): def update(self, other): self.convert_to_attr_dict(other) super(ConfigDict, self).update(other) def convert_to_attr_dict(self, other): """Convert nested dicts to AttributeDict. :param other: dictionary to be directly modified. """ for key, value in other.items(): if isinstance(value, dict): if not isinstance(value, base.AttributeDict): other[key] = base.AttributeDict(value) self.convert_to_attr_dict(value) class ConfigFileFixture(fixtures.Fixture): """A fixture that knows how to translate configurations to files. :param base_filename: the filename to use on disk. :param config: a ConfigDict instance. :param temp_dir: an existing temporary directory to use for storage. """ def __init__(self, base_filename, config, temp_dir): super(ConfigFileFixture, self).__init__() self.base_filename = base_filename self.config = config self.temp_dir = temp_dir def _setUp(self): self.write_config_to_configfile() def write_config_to_configfile(self): config_parser = self.dict_to_config_parser(self.config) # Need to randomly generate a unique folder to put the file in self.filename = os.path.join(self.temp_dir, self.base_filename) with open(self.filename, 'w') as f: config_parser.write(f) f.flush() def dict_to_config_parser(self, config_dict): config_parser = six.moves.configparser.ConfigParser() for section, section_dict in config_dict.items(): if section != 'DEFAULT': config_parser.add_section(section) for option, value in section_dict.items(): try: config_parser.set(section, option, value) except TypeError as te: raise TypeError( "%(msg)s: section %(section)s, option %(option)s, " "value: %(value)s" % { 'msg': te.args[0], 'section': section, 'option': option, 'value': value, }) return config_parser ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/conn_testers.py0000644000175000017500000005475300000000000024365 0ustar00coreycorey00000000000000# All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import fixtures import netaddr from neutron_lib import constants from oslo_config import cfg from oslo_utils import uuidutils from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as ovs_consts) from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers # NOTE: IPv6 uses NDP for obtaining destination endpoints link address that # extends round-trip packet time in ICMP tests. The timeout value should be # sufficient for correct scenarios but not too high because of negative # tests. ICMP_VERSION_TIMEOUTS = { constants.IP_VERSION_4: 1, constants.IP_VERSION_6: 2, } class ConnectionTesterException(Exception): pass def _validate_direction(f): @functools.wraps(f) def wrap(self, direction, *args, **kwargs): if direction not in (constants.INGRESS_DIRECTION, constants.EGRESS_DIRECTION): raise ConnectionTesterException('Unknown direction %s' % direction) return f(self, direction, *args, **kwargs) return wrap def _get_packets_sent_received(src_namespace, dst_ip, count): pinger = net_helpers.Pinger(src_namespace, dst_ip, count=count) pinger.start() pinger.wait() return pinger.sent, pinger.received def all_replied(src_ns, dst_ip, count): sent, received = _get_packets_sent_received(src_ns, dst_ip, count) return sent == received def all_lost(src_ns, dst_ip, count): sent, received = _get_packets_sent_received(src_ns, dst_ip, count) return received == 0 class ConnectionTester(fixtures.Fixture): """Base class for testers This class implements API for various methods for testing connectivity. The concrete implementation relies on how encapsulated resources are configured. That means child classes should define resources by themselves (e.g. endpoints connected through linux bridge or ovs bridge). """ UDP = net_helpers.NetcatTester.UDP TCP = net_helpers.NetcatTester.TCP ICMP = constants.PROTO_NAME_ICMP ARP = constants.ETHERTYPE_NAME_ARP INGRESS = constants.INGRESS_DIRECTION EGRESS = constants.EGRESS_DIRECTION def __init__(self, ip_cidr): self.ip_cidr = ip_cidr self.icmp_count = 3 self.connectivity_timeout = 12 def _setUp(self): self._protocol_to_method = { self.UDP: self._test_transport_connectivity, self.TCP: self._test_transport_connectivity, self.ICMP: self._test_icmp_connectivity, self.ARP: self._test_arp_connectivity} self._nc_testers = {} self._pingers = {} self.addCleanup(self.cleanup) def cleanup(self): for nc in self._nc_testers.values(): nc.stop_processes() for pinger in self._pingers.values(): pinger.stop() @property def vm_namespace(self): return self._vm.namespace @property def vm_ip_address(self): return self._vm.ip @property def vm_ip_cidr(self): return self._vm.ip_cidr @vm_ip_cidr.setter def vm_ip_cidr(self, ip_cidr): self._vm.ip_cidr = ip_cidr @property def vm_mac_address(self): return self._vm.port.link.address @vm_mac_address.setter def vm_mac_address(self, mac_address): self._vm.mac_address = mac_address @property def peer_mac_address(self): return self._peer.port.link.address @peer_mac_address.setter def peer_mac_address(self, mac_address): self._peer.mac_address = mac_address @property def peer_namespace(self): return self._peer.namespace @property def peer_ip_address(self): return self._peer.ip def set_vm_default_gateway(self, default_gw): self._vm.set_default_gateway(default_gw) def flush_arp_tables(self): """Flush arptables in all used namespaces""" for machine in (self._peer, self._vm): machine.port.neigh.flush(4, 'all') def _test_transport_connectivity(self, direction, protocol, src_port, dst_port): nc_tester = self._create_nc_tester(direction, protocol, src_port, dst_port) try: nc_tester.test_connectivity() except RuntimeError as exc: nc_tester.stop_processes() raise ConnectionTesterException( "%s connection over %s protocol with %s source port and " "%s destination port can't be established: %s" % ( direction, protocol, src_port, dst_port, exc)) @_validate_direction def _get_namespace_and_address(self, direction): if direction == self.INGRESS: return self.peer_namespace, self.vm_ip_address return self.vm_namespace, self.peer_ip_address def _test_icmp_connectivity(self, direction, protocol, src_port, dst_port): src_namespace, ip_address = self._get_namespace_and_address(direction) ip_version = common_utils.get_ip_version(ip_address) icmp_timeout = ICMP_VERSION_TIMEOUTS[ip_version] try: net_helpers.assert_ping(src_namespace, ip_address, timeout=icmp_timeout) except RuntimeError: raise ConnectionTesterException( "ICMP packets can't get from %s namespace to %s address" % ( src_namespace, ip_address)) def _test_arp_connectivity(self, direction, protocol, src_port, dst_port): src_namespace, ip_address = self._get_namespace_and_address(direction) try: net_helpers.assert_arping(src_namespace, ip_address) except RuntimeError: raise ConnectionTesterException( "ARP queries to %s address have no response from %s namespace" % (ip_address, src_namespace)) @_validate_direction def assert_connection(self, direction, protocol, src_port=None, dst_port=None): testing_method = self._protocol_to_method[protocol] testing_method(direction, protocol, src_port, dst_port) def assert_no_connection(self, direction, protocol, src_port=None, dst_port=None): try: self.assert_connection(direction, protocol, src_port, dst_port) except ConnectionTesterException: pass else: dst_port_info = str() src_port_info = str() if dst_port is not None: dst_port_info = " and destination port %d" % dst_port if src_port is not None: src_port_info = " and source port %d" % src_port raise ConnectionTesterException("%s connection with protocol %s, " "source port %s, destination " "port %s was established but it " "shouldn't be possible" % ( direction, protocol, src_port_info, dst_port_info)) @_validate_direction def assert_established_connection(self, direction, protocol, src_port=None, dst_port=None): nc_params = (direction, protocol, src_port, dst_port) nc_tester = self._nc_testers.get(nc_params) if nc_tester: if nc_tester.is_established: try: nc_tester.test_connectivity() except RuntimeError: raise ConnectionTesterException( "Established %s connection with protocol %s, source " "port %s and destination port %s can no longer " "communicate") else: nc_tester.stop_processes() raise ConnectionTesterException( '%s connection with protocol %s, source port %s and ' 'destination port %s is not established' % nc_params) else: raise ConnectionTesterException( "Attempting to test established %s connection with protocol %s" ", source port %s and destination port %s that hasn't been " "established yet by calling establish_connection()" % nc_params) def assert_no_established_connection(self, direction, protocol, src_port=None, dst_port=None): try: self.assert_established_connection(direction, protocol, src_port, dst_port) except ConnectionTesterException: pass else: raise ConnectionTesterException( 'Established %s connection with protocol %s, source port %s, ' 'destination port %s can still send packets through' % ( direction, protocol, src_port, dst_port)) @_validate_direction def establish_connection(self, direction, protocol, src_port=None, dst_port=None): nc_tester = self._create_nc_tester(direction, protocol, src_port, dst_port) nc_tester.establish_connection() self.addCleanup(nc_tester.stop_processes) def _create_nc_tester(self, direction, protocol, src_port, dst_port): """Create netcat tester If there already exists a netcat tester that has established connection, exception is raised. """ nc_key = (direction, protocol, src_port, dst_port) nc_tester = self._nc_testers.get(nc_key) if nc_tester and nc_tester.is_established: raise ConnectionTesterException( '%s connection using %s protocol, source port %s and ' 'destination port %s is already established' % ( direction, protocol, src_port, dst_port)) if direction == self.INGRESS: client_ns = self.peer_namespace server_ns = self.vm_namespace server_addr = self.vm_ip_address else: client_ns = self.vm_namespace server_ns = self.peer_namespace server_addr = self.peer_ip_address server_port = dst_port or net_helpers.get_free_namespace_port( protocol, server_ns) nc_tester = net_helpers.NetcatTester(client_namespace=client_ns, server_namespace=server_ns, address=server_addr, protocol=protocol, src_port=src_port, dst_port=server_port) self._nc_testers[nc_key] = nc_tester return nc_tester def _get_pinger(self, direction): try: pinger = self._pingers[direction] except KeyError: src_namespace, dst_address = self._get_namespace_and_address( direction) pinger = net_helpers.Pinger( src_namespace, dst_address, interval=0.3) self._pingers[direction] = pinger return pinger def start_sending_icmp(self, direction): pinger = self._get_pinger(direction) pinger.start() def stop_sending_icmp(self, direction): pinger = self._get_pinger(direction) pinger.stop() def get_sent_icmp_packets(self, direction): pinger = self._get_pinger(direction) return pinger.sent def get_received_icmp_packets(self, direction): pinger = self._get_pinger(direction) return pinger.received def assert_net_unreachable(self, direction, destination): src_namespace, dst_address = self._get_namespace_and_address( direction) pinger = net_helpers.Pinger(src_namespace, destination, count=5) pinger.start() pinger.wait() if not pinger.destination_unreachable: raise ConnectionTesterException( 'No Host Destination Unreachable packets were received when ' 'sending icmp packets to %s' % destination) def wait_for_connection(self, direction): src_ns, dst_ip = self._get_namespace_and_address( direction) all_replied_predicate = functools.partial( all_replied, src_ns, dst_ip, count=self.icmp_count) common_utils.wait_until_true( all_replied_predicate, timeout=self.connectivity_timeout, exception=ConnectionTesterException( "Not all ICMP packets replied from %s namespace to %s " "address." % self._get_namespace_and_address(direction))) def wait_for_no_connection(self, direction): src_ns, dst_ip = self._get_namespace_and_address( direction) all_lost_predicate = functools.partial( all_lost, src_ns, dst_ip, count=self.icmp_count) common_utils.wait_until_true( all_lost_predicate, timeout=self.connectivity_timeout, exception=ConnectionTesterException( "At least one packet got reply from %s namespace to %s " "address." % self._get_namespace_and_address(direction))) def set_peer_port_as_patch_port(self): pass def set_peer_port_as_vm_port(self): pass class OVSBaseConnectionTester(ConnectionTester): @property def peer_port_id(self): return self._peer.port.id @property def vm_port_id(self): return self._vm.port.id @staticmethod def set_tag(port_name, bridge, tag): ovsdb = bridge.ovsdb with ovsdb.transaction() as txn: txn.add(ovsdb.db_set('Port', port_name, ('tag', tag))) txn.add( ovsdb.db_add( 'Port', port_name, 'other_config', {'tag': str(tag)})) class OVSConnectionTester(OVSBaseConnectionTester): """Tester with OVS bridge in the middle The endpoints are created as OVS ports attached to the OVS bridge. NOTE: The OVS ports are connected from the namespace. This connection is currently not supported in OVS and may lead to unpredicted behavior: https://bugzilla.redhat.com/show_bug.cgi?id=1160340 """ def __init__(self, ip_cidr, br_int_cls): super(OVSConnectionTester, self).__init__(ip_cidr) self.br_int_cls = br_int_cls def _setUp(self): super(OVSConnectionTester, self)._setUp() br_name = self.useFixture( net_helpers.OVSBridgeFixture()).bridge.br_name self.bridge = self.br_int_cls(br_name) self.bridge.set_secure_mode() self.bridge.setup_controllers(cfg.CONF) self.bridge.setup_default_table() machines = self.useFixture( machine_fixtures.PeerMachines( self.bridge, self.ip_cidr)).machines self._peer = machines[0] self._vm = machines[1] self._set_port_attrs(self._peer.port) self._set_port_attrs(self._vm.port) def _set_port_attrs(self, port): port.id = uuidutils.generate_uuid() attrs = [('type', 'internal'), ('external_ids', { 'iface-id': port.id, 'iface-status': 'active', 'attached-mac': port.link.address})] for column, value in attrs: self.bridge.set_db_attribute('Interface', port.name, column, value) def set_vm_tag(self, tag): self.set_tag(self._vm.port.name, self.bridge, tag) self._vm.port.vlan_tag = tag def set_peer_tag(self, tag): self.set_tag(self._peer.port.name, self.bridge, tag) self._peer.port.vlan_tag = tag def set_peer_port_as_patch_port(self): """As packets coming from tunneling bridges are always tagged with local VLAN tag, this flows will simulate the behavior. """ self.bridge.add_flow( table=ovs_consts.LOCAL_SWITCHING, priority=110, vlan_tci=0, in_port=self.bridge.get_port_ofport(self._peer.port.name), actions='mod_vlan_vid:0x%x,' 'resubmit(,%d)' % ( self._peer.port.vlan_tag, ovs_consts.LOCAL_SWITCHING) ) self.bridge.add_flow( table=ovs_consts.TRANSIENT_TABLE, priority=4, dl_vlan='0x%x' % self._peer.port.vlan_tag, actions='strip_vlan,normal' ) def set_peer_port_as_vm_port(self): """Remove flows simulating traffic from tunneling bridges. This method is opposite to set_peer_port_as_patch_port(). """ self.bridge.delete_flows( table=ovs_consts.LOCAL_SWITCHING, vlan_tci=0, in_port=self.bridge.get_port_ofport(self._peer.port.name), ) self.bridge.delete_flows( table=ovs_consts.TRANSIENT_TABLE, dl_vlan='0x%x' % self._peer.port.vlan_tag, ) class OVSTrunkConnectionTester(OVSBaseConnectionTester): """Tester with OVS bridge and a trunk bridge Two endpoints: one is a VM that is connected to a port associated with a trunk (the port is created on the trunk bridge), the other is a VM on the same network (the port is on the integration bridge). NOTE: The OVS ports are connected from the namespace. This connection is currently not supported in OVS and may lead to unpredicted behavior: https://bugzilla.redhat.com/show_bug.cgi?id=1160340 """ def __init__(self, ip_cidr, br_trunk_name): super(OVSTrunkConnectionTester, self).__init__(ip_cidr) self._br_trunk_name = br_trunk_name def _setUp(self): super(OVSTrunkConnectionTester, self)._setUp() self.bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.br_trunk = self.useFixture( net_helpers.OVSTrunkBridgeFixture(self._br_trunk_name)).bridge self._peer = self.useFixture(machine_fixtures.FakeMachine( self.bridge, self.ip_cidr)) ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, 1) self._vm = self.useFixture(machine_fixtures.FakeMachine( self.br_trunk, ip_cidr)) def add_vlan_interface_and_peer(self, vlan, ip_cidr): """Create a sub_port and a peer We create a sub_port that uses vlan as segmentation ID. In the vm namespace we create a vlan subinterface on the same vlan. A peer on the same network is created. When pinging from the peer to the sub_port packets will be tagged using the internal vlan ID of the network. The sub_port will remove that vlan tag and push the vlan specified in the segmentation ID. The packets will finally reach the vlan subinterface in the vm namespace. """ network = netaddr.IPNetwork(ip_cidr) net_helpers.create_vlan_interface( self._vm.namespace, self._vm.port.name, self.vm_mac_address, network, vlan) self._ip_vlan = str(network.ip) ip_cidr = net_helpers.increment_ip_cidr(ip_cidr, 1) self._peer2 = self.useFixture(machine_fixtures.FakeMachine( self.bridge, ip_cidr)) def set_vm_tag(self, tag): self.set_tag(self._vm.port.name, self.br_trunk, tag) def set_peer_tag(self, tag): self.set_tag(self._peer.port.name, self.bridge, tag) def _get_subport_namespace_and_address(self, direction): if direction == self.INGRESS: return self._peer2.namespace, self._ip_vlan return self._vm.namespace, self._peer2.ip def wait_for_sub_port_connectivity(self, direction): src_ns, dst_ip = self._get_subport_namespace_and_address( direction) all_replied_predicate = functools.partial( all_replied, src_ns, dst_ip, count=self.icmp_count) common_utils.wait_until_true( all_replied_predicate, timeout=self.connectivity_timeout, exception=ConnectionTesterException( "ICMP traffic from %s namespace to subport with address %s " "can't get through." % (src_ns, dst_ip))) def wait_for_sub_port_no_connectivity(self, direction): src_ns, dst_ip = self._get_subport_namespace_and_address( direction) all_lost_predicate = functools.partial( all_lost, src_ns, dst_ip, count=self.icmp_count) common_utils.wait_until_true( all_lost_predicate, timeout=self.connectivity_timeout, exception=ConnectionTesterException( "ICMP traffic from %s namespace to subport with address %s " "can still get through." % (src_ns, dst_ip))) class LinuxBridgeConnectionTester(ConnectionTester): """Tester with linux bridge in the middle Both endpoints are placed in their separated namespace connected to bridge's namespace via veth pair. """ def __init__(self, *args, **kwargs): self.bridge_name = kwargs.pop('bridge_name', None) super(LinuxBridgeConnectionTester, self).__init__(*args, **kwargs) def _setUp(self): super(LinuxBridgeConnectionTester, self)._setUp() bridge_args = {} if self.bridge_name: bridge_args = {'prefix': self.bridge_name, 'prefix_is_full_name': True} self.bridge = self.useFixture( net_helpers.LinuxBridgeFixture(**bridge_args)).bridge machines = self.useFixture( machine_fixtures.PeerMachines( self.bridge, self.ip_cidr)).machines self._peer = machines[0] self._vm = machines[1] @property def bridge_namespace(self): return self.bridge.namespace @property def vm_port_id(self): return net_helpers.VethFixture.get_peer_name(self._vm.port.name) @property def peer_port_id(self): return net_helpers.VethFixture.get_peer_name(self._peer.port.name) def flush_arp_tables(self): self.bridge.neigh.flush(4, 'all') super(LinuxBridgeConnectionTester, self).flush_arp_tables() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3830452 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/0000755000175000017500000000000000000000000025370 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/__init__.py0000644000175000017500000000000000000000000027467 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/ip_address.py0000644000175000017500000000355100000000000030063 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import netaddr from neutron.tests.common.exclusive_resources import resource_allocator TEST_NET_RANGE = { 1: ('192.0.2.1', '192.0.2.254'), 2: ('198.51.100.1', '198.51.100.254'), 3: ('203.0.113.1', '203.0.113.254'), } def get_test_net_address_fixture(test_net_number): """Return exclusive ip address on the system based on RFC 5737. :param block: One of following constants: 1, 2, 3 https://tools.ietf.org/html/rfc5737 """ try: net_range = TEST_NET_RANGE[test_net_number] except KeyError: raise ValueError("Unknown constant for TEST-NET: %d" % test_net_number) return ExclusiveIPAddress(*net_range) def get_random_ip(low, high): parent_range = netaddr.IPRange(low, high) return str(random.choice(parent_range)) class ExclusiveIPAddress(resource_allocator.ExclusiveResource): """Allocate a unique ip address. :ivar address: allocated ip address :type address: netaddr.IPAddress """ def __init__(self, low, high): super(ExclusiveIPAddress, self).__init__( 'ip_addresses', functools.partial(get_random_ip, low, high)) def _setUp(self): super(ExclusiveIPAddress, self)._setUp() self.address = netaddr.IPAddress(self.resource) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/ip_network.py0000644000175000017500000000324700000000000030131 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from neutron.tests.common.exclusive_resources import ip_address from neutron.tests.common.exclusive_resources import resource_allocator def _get_random_network(low, high, netmask): ip = ip_address.get_random_ip(low, high) return str(netaddr.IPNetwork("%s/%s" % (ip, netmask)).cidr) class ExclusiveIPNetwork(resource_allocator.ExclusiveResource): """Allocate a non-overlapping ip network. :ivar network: allocated ip network :type network: netaddr.IPNetwork """ def __init__(self, low, high, netmask): super(ExclusiveIPNetwork, self).__init__( 'ip_networks', functools.partial(_get_random_network, low, high, netmask), self.is_valid) def _setUp(self): super(ExclusiveIPNetwork, self)._setUp() self.network = netaddr.IPNetwork(self.resource) def is_valid(self, new_resource, allocated_resources): new_ipset = netaddr.IPSet([new_resource]) allocated_ipset = netaddr.IPSet(allocated_resources) return new_ipset.isdisjoint(allocated_ipset) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/port.py0000644000175000017500000000236700000000000026736 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron.tests.common.exclusive_resources import resource_allocator from neutron.tests.common import net_helpers class ExclusivePort(resource_allocator.ExclusiveResource): """Allocate a unique port for a specific protocol. :ivar port: allocated port :type port: int """ def __init__(self, protocol, start=1024, end=None): super(ExclusivePort, self).__init__( 'ports', functools.partial(net_helpers.get_free_namespace_port, protocol, start=start, end=end)) def _setUp(self): super(ExclusivePort, self)._setUp() self.port = self.resource ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/exclusive_resources/resource_allocator.py0000644000175000017500000001122000000000000031625 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from neutron_lib.utils import runtime from oslo_log import log as logging from oslo_utils import fileutils LOG = logging.getLogger(__name__) MAX_ATTEMPTS = 100 TMP_DIR = '/tmp/neutron_exclusive_resources/' class ExclusiveResource(fixtures.Fixture): def __init__(self, resource_name, allocator_function, validator=None): self.ra = ResourceAllocator( resource_name, allocator_function, validator) def _setUp(self): self.resource = self.ra.allocate() self.addCleanup(self.ra.release, self.resource) class ResourceAllocator(object): """ResourceAllocator persists cross-process allocations of a resource. Allocations are persisted to a file determined by the 'resource_name', and are allocated via an allocator_function. The public interface (allocate and release) are guarded by a file lock. The intention is to allow atomic, cross-process allocation of shared resources such as ports and IP addresses. For usages of this class, please see ExclusiveIPAddress and its functional tests. Note that this class doesn't maintain in-memory state, and multiple instances of it may be initialized and used. A pool of resources is identified solely by the 'resource_name' argument. """ def __init__(self, resource_name, allocator_function, validator=None): """Initialize a resource allocator. :param resource_name: A unique identifier for a pool of resources. :param allocator_function: A function with no parameters that generates a resource. :param validator: An optional function that accepts a resource and an existing pool and returns if the generated resource is valid. """ def is_valid(new_resource, allocated_resources): return new_resource not in allocated_resources self._allocator_function = allocator_function self._state_file_path = os.path.join(TMP_DIR, resource_name) self._validator = validator if validator else is_valid self._resource_name = resource_name @runtime.synchronized('resource_allocator', external=True, lock_path='/tmp') def allocate(self): allocations = self._get_allocations() for i in range(MAX_ATTEMPTS): resource = str(self._allocator_function()) if self._validator(resource, allocations): allocations.add(resource) self._write_allocations(allocations) LOG.debug('Allocated exclusive resource %s of type %s. ' 'The allocations are now: %s', resource, self._resource_name, allocations) return resource raise ValueError( 'Could not allocate a new resource of type %s from pool %s' % (self._resource_name, allocations)) @runtime.synchronized('resource_allocator', external=True, lock_path='/tmp') def release(self, resource): allocations = self._get_allocations() allocations.remove(resource) if allocations: self._write_allocations(allocations) else: # Clean up the file if we're releasing the last allocation os.remove(self._state_file_path) LOG.debug('Released exclusive resource %s of type %s. The allocations ' 'are now: %s', resource, self._resource_name, allocations) def _get_allocations(self): fileutils.ensure_tree(TMP_DIR, mode=0o755) try: with open(self._state_file_path, 'r') as allocations_file: contents = allocations_file.read() except IOError: contents = None # If the file was empty, we want to return an empty set, not {''} return set(contents.split(',')) if contents else set() def _write_allocations(self, allocations): with open(self._state_file_path, 'w') as allocations_file: allocations_file.write(','.join(allocations)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/helpers.py0000644000175000017500000002057300000000000023312 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from distutils import version import functools import os import random from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import context from oslo_utils import timeutils import neutron from neutron.agent.common import ovs_lib from neutron.db import agents_db HOST = 'localhost' DEFAULT_AZ = 'nova' def find_file(filename, path): """Find a file with name 'filename' located in 'path'.""" for root, _, files in os.walk(path): if filename in files: return os.path.abspath(os.path.join(root, filename)) def find_sample_file(filename): """Find a file with name 'filename' located in the sample directory.""" return find_file( filename, path=os.path.join(neutron.__path__[0], '..', 'etc')) def get_test_log_path(): return os.environ.get('OS_LOG_PATH', '/tmp') class FakePlugin(agents_db.AgentDbMixin): pass def _get_l3_agent_dict(host, agent_mode, internal_only=True, az=DEFAULT_AZ): return { 'agent_type': constants.AGENT_TYPE_L3, 'binary': 'neutron-l3-agent', 'host': host, 'topic': topics.L3_AGENT, 'availability_zone': az, 'configurations': {'agent_mode': agent_mode, 'handle_internal_only_routers': internal_only}} def _register_agent(agent, plugin=None): if not plugin: plugin = FakePlugin() admin_context = context.get_admin_context() plugin.create_or_update_agent(admin_context, agent, timeutils.utcnow()) return plugin._get_agent_by_type_and_host( admin_context, agent['agent_type'], agent['host']) def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY, internal_only=True, az=DEFAULT_AZ): agent = _get_l3_agent_dict(host, agent_mode, internal_only, az) return _register_agent(agent) def _get_dhcp_agent_dict(host, networks=0, az=DEFAULT_AZ): agent = { 'binary': 'neutron-dhcp-agent', 'host': host, 'topic': topics.DHCP_AGENT, 'agent_type': constants.AGENT_TYPE_DHCP, 'availability_zone': az, 'configurations': {'dhcp_driver': 'dhcp_driver', 'networks': networks}} return agent def register_dhcp_agent(host=HOST, networks=0, admin_state_up=True, alive=True, az=DEFAULT_AZ): agent = _register_agent( _get_dhcp_agent_dict(host, networks, az=az)) if not admin_state_up: set_agent_admin_state(agent['id']) if not alive: kill_agent(agent['id']) return FakePlugin()._get_agent_by_type_and_host( context.get_admin_context(), agent['agent_type'], agent['host']) def kill_agent(agent_id): hour_ago = timeutils.utcnow() - datetime.timedelta(hours=1) FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': { 'started_at': hour_ago, 'heartbeat_timestamp': hour_ago}}) def revive_agent(agent_id): now = timeutils.utcnow() FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': {'started_at': now, 'heartbeat_timestamp': now}}) def set_agent_admin_state(agent_id, admin_state_up=False): FakePlugin().update_agent( context.get_admin_context(), agent_id, {'agent': {'admin_state_up': admin_state_up}}) def _get_l2_agent_dict(host, agent_type, binary, tunnel_types=None, tunneling_ip='20.0.0.1', interface_mappings=None, bridge_mappings=None, l2pop_network_types=None, device_mappings=None, start_flag=True, integration_bridge=None): agent = { 'binary': binary, 'host': host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': {}, 'agent_type': agent_type, 'tunnel_type': [], 'start_flag': start_flag} if tunnel_types is not None: agent['configurations']['tunneling_ip'] = tunneling_ip agent['configurations']['tunnel_types'] = tunnel_types if bridge_mappings is not None: agent['configurations']['bridge_mappings'] = bridge_mappings if interface_mappings is not None: agent['configurations']['interface_mappings'] = interface_mappings if l2pop_network_types is not None: agent['configurations']['l2pop_network_types'] = l2pop_network_types if device_mappings is not None: agent['configurations']['device_mappings'] = device_mappings if integration_bridge is not None: agent['configurations']['integration_bridge'] = integration_bridge return agent def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS, binary='neutron-openvswitch-agent', tunnel_types=['vxlan'], tunneling_ip='20.0.0.1', interface_mappings=None, bridge_mappings=None, l2pop_network_types=None, plugin=None, start_flag=True, integration_bridge=None): agent = _get_l2_agent_dict(host, agent_type, binary, tunnel_types, tunneling_ip, interface_mappings, bridge_mappings, l2pop_network_types, start_flag=start_flag, integration_bridge=integration_bridge) return _register_agent(agent, plugin) def register_linuxbridge_agent(host=HOST, agent_type=constants.AGENT_TYPE_LINUXBRIDGE, binary='neutron-linuxbridge-agent', tunnel_types=['vxlan'], tunneling_ip='20.0.0.1', interface_mappings=None, bridge_mappings=None, plugin=None): agent = _get_l2_agent_dict(host, agent_type, binary, tunnel_types, tunneling_ip=tunneling_ip, interface_mappings=interface_mappings, bridge_mappings=bridge_mappings) return _register_agent(agent, plugin) def register_macvtap_agent(host=HOST, agent_type=constants.AGENT_TYPE_MACVTAP, binary='neutron-macvtap-agent', interface_mappings=None, plugin=None): agent = _get_l2_agent_dict(host, agent_type, binary, interface_mappings=interface_mappings) return _register_agent(agent, plugin) def register_sriovnicswitch_agent(host=HOST, agent_type=constants.AGENT_TYPE_NIC_SWITCH, binary='neutron-sriov-nic-agent', device_mappings=None, plugin=None): agent = _get_l2_agent_dict(host, agent_type, binary, device_mappings=device_mappings) return _register_agent(agent, plugin) def get_not_used_vlan(bridge, vlan_range): port_vlans = bridge.ovsdb.db_find( 'Port', ('tag', '!=', []), columns=['tag']).execute() used_vlan_tags = {val['tag'] for val in port_vlans} available_vlans = vlan_range - used_vlan_tags return random.choice(list(available_vlans)) def skip_if_ovs_older_than(ovs_version): """Decorator for test method to skip if OVS version doesn't meet minimal requirement. """ def skip_if_bad_ovs(f): @functools.wraps(f) def check_ovs_and_skip(test): ovs = ovs_lib.BaseOVS() current_ovs_version = version.StrictVersion( ovs.config['ovs_version']) if current_ovs_version < version.StrictVersion(ovs_version): test.skipTest("This test requires OVS version %s or higher." % ovs_version) return f(test) return check_ovs_and_skip return skip_if_bad_ovs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/l3_test_common.py0000644000175000017500000003661700000000000024603 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr from neutron_lib import constants as lib_constants from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from six import moves from neutron.common import ipv6_utils _uuid = uuidutils.generate_uuid class FakeDev(object): def __init__(self, name): self.name = name def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'): subnet_id = _uuid() return {'admin_state_up': True, 'device_id': _uuid(), 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_HA_INTF, 'fixed_ips': [{'ip_address': ip, 'prefixlen': 18, 'subnet_id': subnet_id}], 'id': _uuid(), 'mac_address': mac, 'name': u'L3 HA Admin port 0', 'mtu': 1500, 'network_id': _uuid(), 'status': u'ACTIVE', 'subnets': [{'cidr': '169.254.192.0/18', 'gateway_ip': '169.254.255.254', 'id': subnet_id}], 'tenant_id': '', 'agent_id': _uuid(), 'agent_host': 'aaa', 'priority': 1} def prepare_router_data(ip_version=lib_constants.IP_VERSION_4, enable_snat=None, num_internal_ports=1, enable_floating_ip=False, enable_ha=False, extra_routes=False, dual_stack=False, enable_gw=True, v6_ext_gw_with_sub=True, snat_bound_fip=False, enable_pf_floating_ip=False, vrrp_id=None, **kwargs): fixed_ips = [] subnets = [] gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee') extra_subnets = [] for loop_version in (lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6): if (loop_version == lib_constants.IP_VERSION_4 and (ip_version == lib_constants.IP_VERSION_4 or dual_stack)): ip_address = kwargs.get('ip_address', '19.4.4.4') prefixlen = 24 subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24') gateway_ip = kwargs.get('gateway_ip', '19.4.4.1') _extra_subnet = {'cidr': '9.4.5.0/24'} elif (loop_version == lib_constants.IP_VERSION_6 and (ip_version == lib_constants.IP_VERSION_6 or dual_stack) and v6_ext_gw_with_sub): ip_address = kwargs.get('ip_address', 'fd00::4') prefixlen = 64 subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64') gateway_ip = kwargs.get('gateway_ip', 'fd00::1') _extra_subnet = {'cidr': 'fd01::/64'} else: continue subnet_id = _uuid() fixed_ips.append({'ip_address': ip_address, 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append({'id': subnet_id, 'cidr': subnet_cidr, 'gateway_ip': gateway_ip}) extra_subnets.append(_extra_subnet) if not fixed_ips and v6_ext_gw_with_sub: raise ValueError("Invalid ip_version: %s" % ip_version) router_id = _uuid() ex_gw_port = {} if enable_gw: ex_gw_port = {'id': _uuid(), 'mac_address': gateway_mac, 'mtu': 1500, 'network_id': _uuid(), 'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': extra_subnets} external_gateway_info = {"qos_policy_id": kwargs.get('qos_policy_id')} routes = [] if extra_routes: routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}] router = { 'id': router_id, 'distributed': False, lib_constants.INTERFACE_KEY: [], 'routes': routes, 'gw_port': ex_gw_port, 'external_gateway_info': external_gateway_info} router_fips = router.get(lib_constants.FLOATINGIP_KEY, []) if enable_floating_ip: fip = {'id': _uuid(), 'port_id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '19.4.4.2', 'fixed_ip_address': '10.0.0.1'} qos_policy_id = kwargs.get(qos_consts.QOS_POLICY_ID) if qos_policy_id: fip[qos_consts.QOS_POLICY_ID] = qos_policy_id router_fips.append(fip) if snat_bound_fip: fip = {'id': _uuid(), 'port_id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '19.4.4.3', 'fixed_ip_address': '10.0.0.2'} qos_policy_id = kwargs.get(qos_consts.QOS_POLICY_ID) if qos_policy_id: fip[qos_consts.QOS_POLICY_ID] = qos_policy_id router_fips.append(fip) router[lib_constants.FLOATINGIP_KEY] = router_fips pf_fips = [] if enable_pf_floating_ip: fip = {'id': _uuid(), 'port_id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '19.4.4.4', 'fixed_ip_address': '10.0.0.3'} qos_policy_id = kwargs.get(qos_consts.QOS_POLICY_ID) if qos_policy_id: fip[qos_consts.QOS_POLICY_ID] = qos_policy_id pf_fips.append(fip) router['_pf_floatingips'] = pf_fips router_append_interface(router, count=num_internal_ports, ip_version=ip_version, dual_stack=dual_stack) if enable_ha: ha_port_ip = kwargs.get('ha_port_ip', '169.254.192.1') ha_port_mac = kwargs.get('ha_port_mac', '12:34:56:78:2b:aa') router['ha'] = True router['ha_vr_id'] = vrrp_id or 1 router[lib_constants.HA_INTERFACE_KEY] = ( get_ha_interface(ip=ha_port_ip, mac=ha_port_mac)) if enable_snat is not None: router['enable_snat'] = enable_snat return router def get_subnet_id(port): return port['fixed_ips'][0]['subnet_id'] def router_append_interface(router, count=1, ip_version=lib_constants.IP_VERSION_4, ra_mode=None, addr_mode=None, dual_stack=False, same_port=False): interfaces = router[lib_constants.INTERFACE_KEY] current = sum( [netaddr.IPNetwork(subnet['cidr']).version == ip_version for p in interfaces for subnet in p['subnets']]) # If dual_stack=True, create IPv4 and IPv6 subnets on each port # If same_port=True, create ip_version number of subnets on a single port # Else create just an ip_version subnet on each port if dual_stack: ip_versions = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6] elif same_port: ip_versions = [ip_version] * count count = 1 else: ip_versions = [ip_version] mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix for i in range(current, current + count): fixed_ips = [] subnets = [] for loop_version in ip_versions: if (loop_version == lib_constants.IP_VERSION_4 and (ip_version == lib_constants.IP_VERSION_4 or dual_stack)): ip_pool = '35.4.%i.4' cidr_pool = '35.4.%i.0/24' prefixlen = 24 gw_pool = '35.4.%i.1' elif (loop_version == lib_constants.IP_VERSION_6 and (ip_version == lib_constants.IP_VERSION_6 or dual_stack)): ip_pool = 'fd01:%x:1::6' cidr_pool = 'fd01:%x:1::/64' prefixlen = 64 gw_pool = 'fd01:%x:1::1' else: continue subnet_id = _uuid() fixed_ips.append({'ip_address': ip_pool % i, 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append({'id': subnet_id, 'cidr': cidr_pool % i, 'gateway_ip': gw_pool % i, 'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': addr_mode}) if not fixed_ips: raise ValueError("Invalid ip_version: %s" % ip_version) interfaces.append( {'id': _uuid(), 'mtu': 1500, 'network_id': _uuid(), 'admin_state_up': True, 'fixed_ips': fixed_ips, 'mac_address': str(mac_address), 'subnets': subnets}) mac_address.value += 1 def router_append_subnet(router, count=1, ip_version=lib_constants.IP_VERSION_4, ipv6_subnet_modes=None, interface_id=None, dns_nameservers=None, network_mtu=0): if ip_version == lib_constants.IP_VERSION_6: subnet_mode_none = {'ra_mode': None, 'address_mode': None} if not ipv6_subnet_modes: ipv6_subnet_modes = [subnet_mode_none] * count elif len(ipv6_subnet_modes) != count: ipv6_subnet_modes.extend([subnet_mode_none for i in moves.range(len(ipv6_subnet_modes), count)]) if ip_version == lib_constants.IP_VERSION_4: ip_pool = '35.4.%i.4' cidr_pool = '35.4.%i.0/24' prefixlen = 24 gw_pool = '35.4.%i.1' elif ip_version == lib_constants.IP_VERSION_6: ip_pool = 'fd01:%x::6' cidr_pool = 'fd01:%x::/64' prefixlen = 64 gw_pool = 'fd01:%x::1' else: raise ValueError("Invalid ip_version: %s" % ip_version) interfaces = copy.deepcopy(router.get(lib_constants.INTERFACE_KEY, [])) if interface_id: try: interface = next(i for i in interfaces if i['id'] == interface_id) except StopIteration: raise ValueError("interface_id not found") fixed_ips, subnets = interface['fixed_ips'], interface['subnets'] else: interface = None fixed_ips, subnets = [], [] num_existing_subnets = len(subnets) for i in moves.range(count): subnet_id = _uuid() fixed_ips.append( {'ip_address': ip_pool % (i + num_existing_subnets), 'subnet_id': subnet_id, 'prefixlen': prefixlen}) subnets.append( {'id': subnet_id, 'cidr': cidr_pool % (i + num_existing_subnets), 'gateway_ip': gw_pool % (i + num_existing_subnets), 'dns_nameservers': dns_nameservers, 'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'], 'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']}) if interface: # Update old interface index = interfaces.index(interface) interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets}) else: # New interface appended to interfaces list mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix interfaces.append( {'id': _uuid(), 'mtu': network_mtu, 'network_id': _uuid(), 'admin_state_up': True, 'mac_address': str(mac_address), 'fixed_ips': fixed_ips, 'subnets': subnets}) router[lib_constants.INTERFACE_KEY] = interfaces def router_append_pd_enabled_subnet(router, count=1, prefix=None): if not prefix: prefix = lib_constants.PROVISIONAL_IPV6_PD_PREFIX interfaces = router[lib_constants.INTERFACE_KEY] current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6 for p in interfaces for subnet in p['subnets']) mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix pd_intfs = [] for i in range(current, current + count): subnet_id = _uuid() intf = {'id': _uuid(), 'mtu': 1500, 'network_id': _uuid(), 'admin_state_up': True, 'fixed_ips': [{'ip_address': '::1', 'prefixlen': 64, 'subnet_id': subnet_id}], 'mac_address': str(mac_address), 'subnets': [{'id': subnet_id, 'cidr': prefix, 'gateway_ip': '::1', 'ipv6_ra_mode': lib_constants.IPV6_SLAAC, 'subnetpool_id': lib_constants.IPV6_PD_POOL_ID}]} interfaces.append(intf) pd_intfs.append(intf) mac_address.value += 1 def get_unassigned_pd_interfaces(router): pd_intfs = [] for intf in router[lib_constants.INTERFACE_KEY]: for subnet in intf['subnets']: if (ipv6_utils.is_ipv6_pd_enabled(subnet) and subnet['cidr'] == lib_constants.PROVISIONAL_IPV6_PD_PREFIX): pd_intfs.append(intf) return pd_intfs def get_assigned_pd_interfaces(router): pd_intfs = [] for intf in router[lib_constants.INTERFACE_KEY]: for subnet in intf['subnets']: if (ipv6_utils.is_ipv6_pd_enabled(subnet) and subnet['cidr'] != lib_constants.PROVISIONAL_IPV6_PD_PREFIX): pd_intfs.append(intf) return pd_intfs def assign_prefix_for_pd_interfaces(router): pd_intfs = [] for ifno, intf in enumerate(router[lib_constants.INTERFACE_KEY]): for subnet in intf['subnets']: if (ipv6_utils.is_ipv6_pd_enabled(subnet) and subnet['cidr'] == lib_constants.PROVISIONAL_IPV6_PD_PREFIX): subnet['cidr'] = "2001:db8:%d::/64" % ifno pd_intfs.append(intf) return pd_intfs def prepare_ext_gw_test(context, ri, dual_stack=False): subnet_id = _uuid() fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '20.0.0.30', 'prefixlen': 24}] subnets = [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}] if dual_stack: subnet_id_v6 = _uuid() fixed_ips.append({'subnet_id': subnet_id_v6, 'ip_address': '2001:192:168:100::2', 'prefixlen': 64}) subnets.append({'id': subnet_id_v6, 'cidr': '2001:192:168:100::/64', 'gateway_ip': '2001:192:168:100::1'}) ex_gw_port = {'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'mtu': 1500, 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri.get_external_device_name(ex_gw_port['id']) context.device_exists.return_value = True return interface_name, ex_gw_port ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/machine_fixtures.py0000644000175000017500000001232600000000000025202 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import fixtures from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.tests.common import net_helpers class FakeMachineException(Exception): pass class FakeMachineBase(fixtures.Fixture): """Create a fake machine. :ivar bridge: bridge on which the fake machine is bound :ivar ip_cidr: fake machine ip_cidr :type ip_cidr: str :ivar ip: fake machine ip :type ip: str :ivar gateway_ip: fake machine gateway ip :type gateway_ip: str :ivar namespace: namespace emulating the machine :type namespace: str :ivar port: port binding the namespace to the bridge :type port: IPDevice """ def __init__(self): self.port = None def _setUp(self): ns_fixture = self.useFixture( net_helpers.NamespaceFixture()) self.namespace = ns_fixture.name def execute(self, *args, **kwargs): ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) return ns_ip_wrapper.netns.execute(*args, **kwargs) def ping_predicate(self, dst_ip): try: self.assert_ping(dst_ip) except RuntimeError: return False return True def block_until_ping(self, dst_ip): predicate = functools.partial(self.ping_predicate, dst_ip) utils.wait_until_true( predicate, exception=FakeMachineException( "No ICMP reply obtained from IP address %s" % dst_ip) ) def block_until_no_ping(self, dst_ip): predicate = functools.partial( lambda ip: not self.ping_predicate(ip), dst_ip) utils.wait_until_true( predicate, exception=FakeMachineException( "ICMP packets still pass to %s IP address." % dst_ip) ) def assert_ping(self, dst_ip): net_helpers.assert_ping(self.namespace, dst_ip) def assert_no_ping(self, dst_ip): net_helpers.assert_no_ping(self.namespace, dst_ip) @property def ip(self): raise NotImplementedError() @property def ip_cidr(self): raise NotImplementedError() @property def mac_address(self): return self.port.link.address class FakeMachine(FakeMachineBase): def __init__(self, bridge, ip_cidr, gateway_ip=None, ipv6_cidr=None): super(FakeMachine, self).__init__() self.bridge = bridge self._ip_cidr = ip_cidr self._ipv6_cidr = ipv6_cidr self.gateway_ip = gateway_ip def _setUp(self): super(FakeMachine, self)._setUp() self.port = self.useFixture( net_helpers.PortFixture.get(self.bridge, self.namespace)).port self.port.addr.add(self._ip_cidr) if self.gateway_ip: net_helpers.set_namespace_gateway(self.port, self.gateway_ip) @property def ip(self): return self._ip_cidr.partition('/')[0] @property def ip_cidr(self): return self._ip_cidr @ip_cidr.setter def ip_cidr(self, ip_cidr): self.port.addr.add(ip_cidr) self.port.addr.delete(self._ip_cidr) self._ip_cidr = ip_cidr @property def ipv6(self): return self._ipv6_cidr.partition('/')[0] @property def ipv6_cidr(self): return self._ipv6_cidr @ipv6_cidr.setter def ipv6_cidr(self, ipv6_cidr): if self._ipv6_cidr: self.port.addr.delete(self._ipv6_cidr) self.port.addr.add(ipv6_cidr) self._ipv6_cidr = ipv6_cidr @FakeMachineBase.mac_address.setter def mac_address(self, mac_address): self.port.link.set_down() self.port.link.set_address(mac_address) self.port.link.set_up() def set_default_gateway(self, default_gw): self.port.route.add_gateway(default_gw) class PeerMachines(fixtures.Fixture): """Create 'amount' peered machines on an ip_cidr. :ivar bridge: bridge on which peer machines are bound :ivar ip_cidr: ip_cidr on which peer machines have ips :type ip_cidr: str :ivar machines: fake machines :type machines: FakeMachine list """ CIDR = '192.168.0.1/24' def __init__(self, bridge, ip_cidr=None, gateway_ip=None, amount=2): super(PeerMachines, self).__init__() self.bridge = bridge self.ip_cidr = ip_cidr or self.CIDR self.gateway_ip = gateway_ip self.amount = amount def _setUp(self): self.machines = [] for index in range(self.amount): ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, index) self.machines.append( self.useFixture( FakeMachine(self.bridge, ip_cidr, self.gateway_ip))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/common/net_helpers.py0000644000175000017500000010615100000000000024155 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from concurrent import futures import contextlib import os import random import re import select import shlex import signal import subprocess import time import fixtures import netaddr from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import six from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_firewall from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.db import db_base_plugin_common as db_base from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent from neutron.tests.common import base as common_base from neutron.tests import tools LOG = logging.getLogger(__name__) UNDEFINED = object() NS_PREFIX = 'test-' BR_PREFIX = 'test-br' PORT_PREFIX = 'port' VETH0_PREFIX = 'test-veth0' VETH1_PREFIX = 'test-veth1' PATCH_PREFIX = 'patch' MACVTAP_PREFIX = 'macvtap' # port name should be shorter than DEVICE_NAME_MAX_LEN because if this # port is used to provide vlan connection between two linuxbridge # agents then place for vlan ID is also required, Vlan ID can take max 4 digits # and there is also additional "." in device name so it will in overall gives # DEVICE_NAME_MAX_LEN = 15 chars LB_DEVICE_NAME_MAX_LEN = 10 SS_SOURCE_PORT_PATTERN = re.compile( r'^.*\s+\d+\s+.*:(?P\d+)\s+[^\s]+:.*') READ_TIMEOUT = int( os.environ.get('OS_TEST_READ_TIMEOUT', 5)) CHILD_PROCESS_TIMEOUT = int( os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)) CHILD_PROCESS_SLEEP = float( os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)) TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP, n_const.PROTO_NAME_SCTP) OVS_MANAGER_TEST_PORT_FIRST = 6610 OVS_MANAGER_TEST_PORT_LAST = 6639 def increment_ip_cidr(ip_cidr, offset=1): """Increment ip_cidr offset times. example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24" """ net0 = netaddr.IPNetwork(ip_cidr) net = netaddr.IPNetwork(ip_cidr) net.value += offset if not net0.network < net.ip < net0[-1]: tools.fail( 'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is ' 'outside ip_cidr' % (ip_cidr, offset)) return str(net) def set_namespace_gateway(port_dev, gateway_ip): """Set gateway for the namespace associated to the port.""" if not port_dev.namespace: tools.fail('tests should not change test machine gateway') port_dev.route.add_gateway(gateway_ip) def assert_ping(src_namespace, dst_ip, timeout=1, count=3): ipversion = netaddr.IPAddress(dst_ip).version ping_command = 'ping' if ipversion == 4 else 'ping6' ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) ns_ip_wrapper.netns.execute([ping_command, '-W', timeout, '-c', count, dst_ip]) def assert_async_ping(src_namespace, dst_ip, timeout=1, count=1, interval=1): ipversion = netaddr.IPAddress(dst_ip).version ping_command = 'ping' if ipversion == 4 else 'ping6' ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) # See bug 1588731 for explanation why using -c count ping option # cannot be used and it needs to be done using the following workaround. for _index in range(count): start_time = time.time() ns_ip_wrapper.netns.execute([ping_command, '-W', timeout, '-c', '1', dst_ip]) end_time = time.time() diff = end_time - start_time if 0 < diff < interval: # wait at most "interval" seconds between individual pings time.sleep(interval - diff) @contextlib.contextmanager def async_ping(namespace, ips, timeout=1, count=10): with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor: fs = [executor.submit(assert_async_ping, namespace, ip, count=count, timeout=timeout) for ip in ips] yield lambda: all(f.done() for f in fs) futures.wait(fs) for f in fs: f.result() def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1): try: assert_ping(src_namespace, dst_ip, timeout, count) except RuntimeError: pass else: tools.fail("destination ip %(destination)s is replying to ping from " "namespace %(ns)s, but it shouldn't" % {'ns': src_namespace, 'destination': dst_ip}) def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): """Send arp request using arping executable. NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery Protocol instead. """ ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) arping_cmd = ['arping', '-c', count, '-w', timeout] if source: arping_cmd.extend(['-s', source]) arping_cmd.append(dst_ip) ns_ip_wrapper.netns.execute(arping_cmd) def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): try: assert_arping(src_namespace, dst_ip, source, timeout, count) except RuntimeError: pass else: tools.fail("destination ip %(destination)s is replying to arp from " "namespace %(ns)s, but it shouldn't" % {'ns': src_namespace, 'destination': dst_ip}) def _get_source_ports_from_ss_output(output): ports = set() for line in output.splitlines(): match = SS_SOURCE_PORT_PATTERN.match(line) if match: ports.add(int(match.group('port'))) return ports def get_unused_port(used, start=1024, end=None): if end is None: port_range = utils.execute( ['sysctl', '-n', 'net.ipv4.ip_local_port_range'], run_as_root=True) end = int(port_range.split()[0]) - 1 candidates = set(range(start, end + 1)) return random.choice(list(candidates - used)) def get_free_namespace_port(protocol, namespace=None, start=1024, end=None): """Return an unused port from given namespace WARNING: This function returns a port that is free at the execution time of this function. If this port is used later for binding then there is a potential danger that port will be no longer free. It's up to the programmer to handle error if port is already in use. :param protocol: Return free port for given protocol. Supported protocols are 'tcp' and 'udp'. :param namespace: Namespace in which free port has to be returned. :param start: The starting port number. :param end: The ending port number (free port that is returned would be between (start, end) values. """ if protocol == n_const.PROTO_NAME_TCP: param = '-tna' elif protocol == n_const.PROTO_NAME_UDP: param = '-una' else: raise ValueError("Unsupported protocol %s" % protocol) ip_wrapper = ip_lib.IPWrapper(namespace=namespace) output = ip_wrapper.netns.execute(['ss', param], run_as_root=True) used_ports = _get_source_ports_from_ss_output(output) return get_unused_port(used_ports, start, end) def set_local_port_range(start, end): utils.execute( ['sysctl', '-w', 'net.ipv4.ip_local_port_range=%d %d' % (start, end)], run_as_root=True) utils.execute(['sysctl', '-p'], run_as_root=True) # verify port_range = utils.execute( ['sysctl', '-n', 'net.ipv4.ip_local_port_range'], run_as_root=True) assert int(port_range.split()[0]) == start assert int(port_range.split()[1]) == end def create_patch_ports(source, destination): """Hook up two OVS bridges. The result is two patch ports, each end connected to a bridge. The two patch port names will start with 'patch-', followed by identical four characters. For example patch-xyzw-fedora, and patch-xyzw-ubuntu, where fedora and ubuntu are random strings. :param source: Instance of OVSBridge :param destination: Instance of OVSBridge """ common = common_utils.get_rand_name(max_length=4, prefix='') prefix = '%s-%s-' % (PATCH_PREFIX, common) source_name = common_utils.get_rand_device_name(prefix=prefix) destination_name = common_utils.get_rand_device_name(prefix=prefix) source.add_patch_port(source_name, destination_name) destination.add_patch_port(destination_name, source_name) def create_vlan_interface( namespace, port_name, mac_address, ip_address, vlan_tag): """Create a VLAN interface in namespace with IP address. :param namespace: Namespace in which VLAN interface should be created. :param port_name: Name of the port to which VLAN should be added. :param ip_address: IPNetwork instance containing the VLAN interface IP address. :param vlan_tag: VLAN tag for VLAN interface. """ ip_wrap = ip_lib.IPWrapper(namespace) dev_name = "%s.%d" % (port_name, vlan_tag) ip_wrap.add_vlan(dev_name, port_name, vlan_tag) dev = ip_wrap.device(dev_name) dev.addr.add(str(ip_address)) dev.link.set_address(mac_address) dev.link.set_up() return dev class RootHelperProcess(subprocess.Popen): def __init__(self, cmd, *args, **kwargs): for arg in ('stdin', 'stdout', 'stderr'): kwargs.setdefault(arg, subprocess.PIPE) kwargs.setdefault('universal_newlines', True) self.namespace = kwargs.pop('namespace', None) self.cmd = cmd if self.namespace is not None: cmd = ['ip', 'netns', 'exec', self.namespace] + cmd root_helper = config.get_root_helper(utils.cfg.CONF) cmd = shlex.split(root_helper) + cmd self.child_pid = None LOG.debug("Spawning process %s", cmd) super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) self._wait_for_child_process() def kill(self, sig=signal.SIGKILL): pid = self.child_pid or str(self.pid) utils.execute(['kill', '-%d' % sig, pid], run_as_root=True) def read_stdout(self, timeout=None): return self._read_stream(self.stdout, timeout) @staticmethod def _read_stream(stream, timeout): if timeout: rready, _wready, _xready = select.select([stream], [], [], timeout) if not rready: raise RuntimeError('No output in %.2f seconds' % timeout) return stream.readline() def writeline(self, data): self.stdin.write(data + os.linesep) self.stdin.flush() def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT, sleep=CHILD_PROCESS_SLEEP): def child_is_running(): child_pid = utils.get_root_helper_child_pid( self.pid, self.cmd, run_as_root=True) if utils.pid_invoked_with_cmdline(child_pid, self.cmd): return True try: common_utils.wait_until_true(child_is_running, timeout) except common_utils.WaitTimeout: # If there is an error, the stderr and stdout pipes usually have # information returned by the command executed. If not, timeout # the pipe communication quickly. stdout = stderr = '' try: stdout, stderr = self.communicate(timeout=0.5) except subprocess.TimeoutExpired: pass msg = ("Process %(cmd)s hasn't been spawned in %(seconds)d " "seconds. Return code: %(ret_code)s, stdout: %(stdout)s, " "sdterr: %(stderr)s" % {'cmd': self.cmd, 'seconds': timeout, 'ret_code': self.returncode, 'stdout': stdout, 'stderr': stderr}) raise RuntimeError(msg) self.child_pid = utils.get_root_helper_child_pid( self.pid, self.cmd, run_as_root=True) @property def is_running(self): return self.poll() is None class Pinger(object): """Class for sending ICMP packets asynchronously The aim is to keep sending ICMP packets on background while executing other code. After background 'ping' command is stopped, statistics are available. Difference to assert_(no_)ping() functions located in this module is that these methods send given count of ICMP packets while they wait for the exit code of 'ping' command. >>> pinger = Pinger('pinger_test', '192.168.0.2') >>> pinger.start(); time.sleep(5); pinger.stop() >>> pinger.sent, pinger.received 7 7 """ stats_pattern = re.compile( r'^(?P\d+) packets transmitted,.*(?P\d+) received.*$') unreachable_pattern = re.compile( r'.* Destination .* Unreachable') TIMEOUT = 15 def __init__(self, namespace, address, count=None, timeout=1, interval=None): self.proc = None self.namespace = namespace self.address = address self.count = count self.timeout = timeout self.destination_unreachable = False self.sent = 0 self.received = 0 self.interval = interval def _wait_for_death(self): is_dead = lambda: self.proc.poll() is not None common_utils.wait_until_true( is_dead, timeout=self.TIMEOUT, exception=RuntimeError( "Ping command hasn't ended after %d seconds." % self.TIMEOUT)) def _parse_stats(self): for line in self.proc.stdout: if (not self.destination_unreachable and self.unreachable_pattern.match(line)): self.destination_unreachable = True continue result = self.stats_pattern.match(line) if result: self.sent = int(result.group('trans')) self.received = int(result.group('recv')) break else: raise RuntimeError("Didn't find ping statistics.") def start(self): if self.proc and self.proc.is_running: raise RuntimeError("This pinger has already a running process") ip_version = common_utils.get_ip_version(self.address) ping_exec = 'ping' if ip_version == n_const.IP_VERSION_4 else 'ping6' cmd = [ping_exec, '-W', str(self.timeout)] if self.count: cmd.extend(['-c', str(self.count)]) if self.interval: cmd.extend(['-i', str(self.interval)]) cmd.append(self.address) self.proc = RootHelperProcess(cmd, namespace=self.namespace) def stop(self): if self.proc and self.proc.is_running: self.proc.kill(signal.SIGINT) self._wait_for_death() self._parse_stats() def wait(self): if self.count: self._wait_for_death() self._parse_stats() else: raise RuntimeError("Pinger is running infinitely, use stop() " "first") class NetcatTester(object): TCP = n_const.PROTO_NAME_TCP UDP = n_const.PROTO_NAME_UDP SCTP = n_const.PROTO_NAME_SCTP VERSION_TO_ALL_ADDRESS = { n_const.IP_VERSION_4: '0.0.0.0', n_const.IP_VERSION_6: '::', } def __init__(self, client_namespace, server_namespace, address, dst_port, protocol, server_address=None, src_port=None): """Initialize NetcatTester Tool for testing connectivity on transport layer using netcat executable. The processes are spawned lazily. :param client_namespace: Namespace in which netcat process that connects to other netcat will be spawned :param server_namespace: Namespace in which listening netcat process will be spawned :param address: Server address from client point of view :param dst_port: Port on which netcat listens :param protocol: Transport protocol, either 'tcp', 'udp' or 'sctp' :param server_address: Address in server namespace on which netcat should listen :param src_port: Source port of netcat process spawned in client namespace - packet will have src_port in TCP/UDP header with this value """ self.client_namespace = client_namespace self.server_namespace = server_namespace self._client_process = None self._server_process = None self.address = address self.dst_port = str(dst_port) self.src_port = str(src_port) if src_port else None if protocol not in TRANSPORT_PROTOCOLS: raise ValueError("Unsupported protocol %s" % protocol) self.protocol = protocol ip_version = netaddr.IPAddress(address).version self.server_address = ( server_address or self.VERSION_TO_ALL_ADDRESS[ip_version]) @property def client_process(self): if not self._client_process: self.establish_connection() return self._client_process @property def server_process(self): if not self._server_process: self._spawn_server_process() return self._server_process def _spawn_server_process(self): self._server_process = self._spawn_nc_in_namespace( self.server_namespace, address=self.server_address, listen=True) @property def is_established(self): return bool(self._client_process and not self._client_process.poll()) def establish_connection(self): if self.is_established: raise RuntimeError('%(proto)s connection to %(ip_addr)s is already' ' established' % {'proto': self.protocol, 'ip_addr': self.address}) if not self._server_process: self._spawn_server_process() self._client_process = self._spawn_nc_in_namespace( self.client_namespace, address=self.address) if self.protocol == self.UDP: # Create an ASSURED entry in conntrack table for UDP packets, # that requires 3-way communication # 1st transmission creates UNREPLIED # 2nd transmission removes UNREPLIED # 3rd transmission creates ASSURED data = 'foo' self.client_process.writeline(data) self.server_process.read_stdout(READ_TIMEOUT) self.server_process.writeline(data) self.client_process.read_stdout(READ_TIMEOUT) self.client_process.writeline(data) self.server_process.read_stdout(READ_TIMEOUT) def test_connectivity(self, respawn=False): testing_string = uuidutils.generate_uuid() if respawn: self.stop_processes() self.client_process.writeline(testing_string) message = self.server_process.read_stdout(READ_TIMEOUT).strip() self.server_process.writeline(message) message = self.client_process.read_stdout(READ_TIMEOUT).strip() return message == testing_string def test_no_connectivity(self, respawn=False): try: return not self.test_connectivity(respawn) except RuntimeError: return True def _spawn_nc_in_namespace(self, namespace, address, listen=False): cmd = ['ncat', address, self.dst_port] if self.protocol == self.UDP: cmd.append('-u') elif self.protocol == self.SCTP: cmd.append('--sctp') if listen: cmd.append('-l') if self.protocol in (self.TCP, self.SCTP): cmd.append('-k') else: cmd.extend(['-w', '20']) if self.src_port: cmd.extend(['-p', self.src_port]) proc = RootHelperProcess(cmd, namespace=namespace) return proc def stop_processes(self, skip_errors=None): skip_errors = (['No such process'] if skip_errors is None else skip_errors) for proc_attr in ('_client_process', '_server_process'): proc = getattr(self, proc_attr) if proc: try: if proc.poll() is None: proc.kill() proc.wait() except n_exc.ProcessExecutionError as exc: for skip_error in skip_errors: if skip_error in str(exc): break else: raise exc setattr(self, proc_attr, None) class NamespaceFixture(fixtures.Fixture): """Create a namespace. :ivar ip_wrapper: created namespace :type ip_wrapper: IPWrapper :ivar name: created namespace name :type name: str """ def __init__(self, prefix=NS_PREFIX): super(NamespaceFixture, self).__init__() self.prefix = prefix def _setUp(self): ip = ip_lib.IPWrapper() self.name = self.prefix + uuidutils.generate_uuid() self.ip_wrapper = ip.ensure_namespace(self.name) self.addCleanup(self.destroy) def destroy(self): if self.ip_wrapper.netns.exists(self.name): for pid in ip_lib.list_namespace_pids(self.name): utils.kill_process(pid, signal.SIGKILL, run_as_root=True) self.ip_wrapper.netns.delete(self.name) class VethFixture(fixtures.Fixture): """Create a veth. :ivar ports: created veth ports :type ports: tuple of 2 IPDevice """ def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ports = common_base.create_resource( VETH0_PREFIX, lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name))) self.addCleanup(self.destroy) def destroy(self): for port in self.ports: ip_wrapper = ip_lib.IPWrapper(port.namespace) if (ip_wrapper.netns.exists(port.namespace) or port.namespace is None): try: ip_wrapper.del_veth(port.name) break except RuntimeError: # NOTE(cbrandily): It seems a veth is automagically deleted # when a namespace owning a veth endpoint is deleted. pass @staticmethod def get_peer_name(name): if name.startswith(VETH0_PREFIX): return name.replace(VETH0_PREFIX, VETH1_PREFIX) elif name.startswith(VETH1_PREFIX): return name.replace(VETH1_PREFIX, VETH0_PREFIX) else: tools.fail('%s is not a valid VethFixture veth endpoint' % name) class NamedVethFixture(VethFixture): """Create a veth with at least one specified name of a device :ivar ports: created veth ports :type ports: tuple of 2 IPDevice """ def __init__(self, veth0_prefix=VETH0_PREFIX, veth1_prefix=VETH1_PREFIX): super(NamedVethFixture, self).__init__() self.veth0_name = self.get_veth_name(veth0_prefix) self.veth1_name = self.get_veth_name(veth1_prefix) def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ports = ip_wrapper.add_veth(self.veth0_name, self.veth1_name) self.addCleanup(self.destroy) @staticmethod def get_veth_name(name): if name.startswith(VETH0_PREFIX): return common_utils.get_rand_device_name(VETH0_PREFIX) if name.startswith(VETH1_PREFIX): return common_utils.get_rand_device_name(VETH1_PREFIX) return name class MacvtapFixture(fixtures.Fixture): """Create a macvtap. :param src_dev: source device for macvtap :type src_dev: IPDevice :param mode: mode of macvtap :type mode: string :ivar ip_dev: created macvtap :type ip_dev: IPDevice """ def __init__(self, src_dev=None, mode=None, prefix=MACVTAP_PREFIX): super(MacvtapFixture, self).__init__() self.src_dev = src_dev self.mode = mode self.prefix = prefix def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ip_dev = common_base.create_resource( self.prefix, ip_wrapper.add_macvtap, self.src_dev, mode=self.mode) self.addCleanup(self.destroy) def destroy(self): if (ip_lib.network_namespace_exists(self.ip_dev.namespace) or self.ip_dev.namespace is None): try: self.ip_dev.link.delete() except RuntimeError: pass @six.add_metaclass(abc.ABCMeta) class PortFixture(fixtures.Fixture): """Create a port. :ivar port: created port :type port: IPDevice :ivar bridge: port bridge """ def __init__(self, bridge=None, namespace=None, mac=None, port_id=None): super(PortFixture, self).__init__() self.bridge = bridge self.namespace = namespace self.mac = (mac or db_base.DbBasePluginCommon._generate_macs()[0]) self.port_id = port_id or uuidutils.generate_uuid() @abc.abstractmethod def _create_bridge_fixture(self): pass @abc.abstractmethod def _setUp(self): super(PortFixture, self)._setUp() if not self.bridge: self.bridge = self.useFixture(self._create_bridge_fixture()).bridge @classmethod def get(cls, bridge, namespace=None, mac=None, port_id=None, hybrid_plug=False): """Deduce PortFixture class from bridge type and instantiate it.""" if isinstance(bridge, ovs_lib.OVSBridge): return OVSPortFixture(bridge, namespace, mac, port_id, hybrid_plug) if isinstance(bridge, bridge_lib.BridgeDevice): return LinuxBridgePortFixture(bridge, namespace, mac, port_id) if isinstance(bridge, VethBridge): return VethPortFixture(bridge, namespace) tools.fail('Unexpected bridge type: %s' % type(bridge)) class OVSBridgeFixture(fixtures.Fixture): """Create an OVS bridge. :ivar prefix: bridge name prefix :type prefix: str :ivar bridge: created bridge :type bridge: OVSBridge """ def __init__(self, prefix=BR_PREFIX): super(OVSBridgeFixture, self).__init__() self.prefix = prefix def _setUp(self): ovs = ovs_lib.BaseOVS() self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge) self.addCleanup(self.bridge.destroy) class OVSTrunkBridgeFixture(OVSBridgeFixture): """This bridge doesn't generate the name.""" def _setUp(self): ovs = ovs_lib.BaseOVS() self.bridge = ovs.add_bridge(self.prefix) self.addCleanup(self.bridge.destroy) class OVSPortFixture(PortFixture): NIC_NAME_LEN = 14 def __init__(self, bridge=None, namespace=None, mac=None, port_id=None, hybrid_plug=False): super(OVSPortFixture, self).__init__(bridge, namespace, mac, port_id) self.hybrid_plug = hybrid_plug self.vlan_tag = None def _create_bridge_fixture(self): return OVSBridgeFixture() def _setUp(self): super(OVSPortFixture, self)._setUp() # because in some tests this port can be used to providing connection # between linuxbridge agents and vlan_id can be also added to this # device name it has to be max LB_DEVICE_NAME_MAX_LEN long port_name = common_utils.get_rand_name( LB_DEVICE_NAME_MAX_LEN, PORT_PREFIX ) if self.hybrid_plug: self.hybrid_plug_port(port_name) else: self.plug_port(port_name) def plug_port(self, port_name): # TODO(jlibosva): Don't use interface driver for fullstack fake # machines as the port should be treated by OVS agent and not by # external party interface_config = cfg.ConfigOpts() config.register_interface_opts(interface_config) ovs_interface = interface.OVSInterfaceDriver(interface_config) ovs_interface.plug_new( None, self.port_id, port_name, self.mac, bridge=self.bridge.br_name, namespace=self.namespace) # NOTE(mangelajo): for OVS implementations remove the DEAD VLAN tag # on ports that we intend to use as fake vm interfaces, they # need to be flat. This is related to lp#1767422 self.bridge.clear_db_attribute("Port", port_name, "tag") self.addCleanup(self.bridge.delete_port, port_name) self.port = ip_lib.IPDevice(port_name, self.namespace) def hybrid_plug_port(self, port_name): """Plug port with linux bridge in the middle. """ ip_wrapper = ip_lib.IPWrapper(self.namespace) qvb_name, qvo_name = self._get_veth_pair_names(self.port_id) qvb, qvo = self.useFixture(NamedVethFixture(qvb_name, qvo_name)).ports qvb.link.set_up() qvo.link.set_up() qbr_name = self._get_br_name(self.port_id) self.qbr = self.useFixture( LinuxBridgeFixture(qbr_name, namespace=None, prefix_is_full_name=True)).bridge self.qbr.link.set_up() self.qbr.setfd(0) self.qbr.disable_stp() self.qbr.addif(qvb_name) qvo_attrs = ('external_ids', {'iface-id': self.port_id, 'iface-status': 'active', 'attached-mac': self.mac}) self.bridge.add_port(qvo_name, qvo_attrs) # NOTE(jlibosva): Create fake vm port, instead of tap device, we use # veth pair here in order to be able to attach it to linux bridge in # root namespace. Name with tap is in root namespace and its peer is in # the namespace hybrid_port_name = iptables_firewall.get_hybrid_port_name(self.port_id) bridge_port, self.port = self.useFixture( NamedVethFixture(hybrid_port_name)).ports self.addCleanup(self.port.link.delete) ip_wrapper.add_device_to_namespace(self.port) bridge_port.link.set_up() self.qbr.addif(bridge_port) self.port.link.set_address(self.mac) self.port.link.set_up() # NOTE(jlibosva): Methods below are taken from nova.virt.libvirt.vif def _get_br_name(self, iface_id): return ("qbr" + iface_id)[:self.NIC_NAME_LEN] def _get_veth_pair_names(self, iface_id): return (("qvb%s" % iface_id)[:self.NIC_NAME_LEN], ("qvo%s" % iface_id)[:self.NIC_NAME_LEN]) class LinuxBridgeFixture(fixtures.Fixture): """Create a linux bridge. :ivar bridge: created bridge :type bridge: BridgeDevice :ivar namespace: created bridge namespace :type namespace: str """ def __init__(self, prefix=BR_PREFIX, namespace=UNDEFINED, prefix_is_full_name=False): super(LinuxBridgeFixture, self).__init__() self.prefix = prefix self.prefix_is_full_name = prefix_is_full_name self.namespace = namespace def _setUp(self): if self.namespace is UNDEFINED: self.namespace = self.useFixture(NamespaceFixture()).name self.bridge = self._create_bridge() self.addCleanup(self.safe_delete) self.bridge.link.set_up() self.addCleanup(self.safe_set_down) def safe_set_down(self): try: self.bridge.link.set_down() except RuntimeError: pass def safe_delete(self): try: self.bridge.delbr() except RuntimeError: pass def _create_bridge(self): if self.prefix_is_full_name: return bridge_lib.BridgeDevice.addbr( name=self.prefix, namespace=self.namespace ) else: return common_base.create_resource( self.prefix, bridge_lib.BridgeDevice.addbr, namespace=self.namespace) class LinuxBridgePortFixture(PortFixture): """Create a linux bridge port. :ivar port: created port :type port: IPDevice :ivar br_port: bridge side veth peer port :type br_port: IPDevice """ def __init__(self, bridge, namespace=None, mac=None, port_id=None): super(LinuxBridgePortFixture, self).__init__( bridge, namespace, mac, port_id) # we need to override port_id value here because in Port() class it is # always generated as random. In LinuxBridgePortFixture we need to have # it empty if it was not give because then proper veth_pair will be # created (for example in some functional tests) self.port_id = port_id def _create_bridge_fixture(self): return LinuxBridgeFixture() def _setUp(self): super(LinuxBridgePortFixture, self)._setUp() br_port_name = self._get_port_name() if br_port_name: self.veth_fixture = self.useFixture( NamedVethFixture(veth0_prefix=br_port_name)) else: self.veth_fixture = self.useFixture(VethFixture()) self.br_port, self.port = self.veth_fixture.ports if self.mac: self.port.link.set_address(self.mac) # bridge side br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace) br_ip_wrapper.add_device_to_namespace(self.br_port) self.bridge.addif(self.br_port) self.br_port.link.set_up() # port side ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) ns_ip_wrapper.add_device_to_namespace(self.port) self.port.link.set_up() def _get_port_name(self): if self.port_id: return linuxbridge_agent.LinuxBridgeManager.get_tap_device_name( self.port_id) return None class VethBridge(object): def __init__(self, ports): self.ports = ports self.unallocated_ports = list(self.ports) def allocate_port(self): try: return self.unallocated_ports.pop() except IndexError: tools.fail('All FakeBridge ports (%s) are already allocated.' % len(self.ports)) class VethBridgeFixture(fixtures.Fixture): """Simulate a bridge with a veth. :ivar bridge: created bridge :type bridge: FakeBridge """ def _setUp(self): ports = self.useFixture(VethFixture()).ports self.bridge = VethBridge(ports) class VethPortFixture(PortFixture): """Create a veth bridge port. :ivar port: created port :type port: IPDevice """ def _create_bridge_fixture(self): return VethBridgeFixture() def _setUp(self): super(VethPortFixture, self)._setUp() self.port = self.bridge.allocate_port() ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) ns_ip_wrapper.add_device_to_namespace(self.port) self.port.link.set_up() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3830452 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/0000755000175000017500000000000000000000000021437 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/README0000644000175000017500000000020400000000000022313 0ustar00coreycorey00000000000000The files in this directory are intended for use by the Neutron infra jobs that run the various functional test suites in the gate. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/gate_hook.sh0000644000175000017500000000512000000000000023731 0ustar00coreycorey00000000000000#!/usr/bin/env bash set -ex VENV=${1:-"api"} FLAVOR=${2:-"all"} GATE_DEST=$BASE/new NEUTRON_PATH=$GATE_DEST/neutron GATE_HOOKS=$NEUTRON_PATH/neutron/tests/contrib/hooks DEVSTACK_PATH=$GATE_DEST/devstack LOCAL_CONF=$DEVSTACK_PATH/late-local.conf RALLY_EXTRA_DIR=$NEUTRON_PATH/rally-jobs/extra DSCONF=/tmp/devstack-tools/bin/dsconf # Install devstack-tools used to produce local.conf; we can't rely on # test-requirements.txt because the gate hook is triggered before neutron is # installed sudo -H pip install virtualenv virtualenv /tmp/devstack-tools /tmp/devstack-tools/bin/pip install -U devstack-tools==0.4.0 # Inject config from hook into localrc function load_rc_hook { local hook="$1" local tmpfile local config tmpfile=$(mktemp) config=$(cat $GATE_HOOKS/$hook) echo "[[local|localrc]]" > $tmpfile $DSCONF setlc_raw $tmpfile "$config" $DSCONF merge_lc $LOCAL_CONF $tmpfile rm -f $tmpfile } # Inject config from hook into local.conf function load_conf_hook { local hook="$1" $DSCONF merge_lc $LOCAL_CONF $GATE_HOOKS/$hook } # Tweak gate configuration for our rally scenarios function load_rc_for_rally { for file in $(ls $RALLY_EXTRA_DIR/*.setup); do tmpfile=$(mktemp) config=$(cat $file) echo "[[local|localrc]]" > $tmpfile $DSCONF setlc_raw $tmpfile "$config" $DSCONF merge_lc $LOCAL_CONF $tmpfile rm -f $tmpfile done } case $VENV in "api"|"api-pecan"|"full-pecan"|"dsvm-scenario-ovs") # TODO(ihrachys) consider feeding result of ext-list into tempest.conf load_rc_hook api_all_extensions if [ "${FLAVOR}" = "dvrskip" ]; then load_rc_hook disable_dvr_tests fi load_conf_hook quotas load_rc_hook uplink_status_propagation load_rc_hook dns load_rc_hook qos load_rc_hook segments load_rc_hook trunk load_rc_hook network_segment_range load_conf_hook vlan_provider load_conf_hook osprofiler load_conf_hook availability_zone load_conf_hook tunnel_types load_rc_hook log # bug 1743463 load_conf_hook openvswitch_type_drivers if [[ "$VENV" =~ "dsvm-scenario" ]]; then load_rc_hook ubuntu_image fi if [[ "$VENV" =~ "pecan" ]]; then load_conf_hook pecan fi if [[ "$FLAVOR" = "dvrskip" ]]; then load_conf_hook disable_dvr fi if [[ "$VENV" =~ "dsvm-scenario-ovs" ]]; then load_conf_hook dvr fi ;; "rally") load_rc_for_rally ;; *) echo "Unrecognized environment $VENV". exit 1 esac export DEVSTACK_LOCALCONF=$(cat $LOCAL_CONF) $BASE/new/devstack-gate/devstack-vm-gate.sh ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3870454 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/0000755000175000017500000000000000000000000022562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/api_all_extensions0000644000175000017500000000614700000000000026375 0ustar00coreycorey00000000000000# Keep entries alphabetically # NOTE: The first entry should not use '+=' and a comma. NETWORK_API_EXTENSIONS="address-scope" NETWORK_API_EXTENSIONS+=",agent" NETWORK_API_EXTENSIONS+=",allowed-address-pairs" NETWORK_API_EXTENSIONS+=",auto-allocated-topology" NETWORK_API_EXTENSIONS+=",availability_zone" NETWORK_API_EXTENSIONS+=",availability_zone_filter" NETWORK_API_EXTENSIONS+=",binding" NETWORK_API_EXTENSIONS+=",binding-extended" NETWORK_API_EXTENSIONS+=",default-subnetpools" NETWORK_API_EXTENSIONS+=",dhcp_agent_scheduler" NETWORK_API_EXTENSIONS+=",dns-integration" NETWORK_API_EXTENSIONS+=",dvr" NETWORK_API_EXTENSIONS+=",empty-string-filtering" NETWORK_API_EXTENSIONS+=",ext-gw-mode" NETWORK_API_EXTENSIONS+=",external-net" NETWORK_API_EXTENSIONS+=",extra_dhcp_opt" NETWORK_API_EXTENSIONS+=",extraroute" NETWORK_API_EXTENSIONS+=",filter-validation" NETWORK_API_EXTENSIONS+=",fip-port-details" NETWORK_API_EXTENSIONS+=",flavors" NETWORK_API_EXTENSIONS+=",floatingip-pools" NETWORK_API_EXTENSIONS+=",ip-substring-filtering" NETWORK_API_EXTENSIONS+=",l3-conntrack-helper" NETWORK_API_EXTENSIONS+=",l3-flavors" NETWORK_API_EXTENSIONS+=",l3-ha" NETWORK_API_EXTENSIONS+=",l3_agent_scheduler" NETWORK_API_EXTENSIONS+=",l3-port-ip-change-not-allowed" NETWORK_API_EXTENSIONS+=",logging" NETWORK_API_EXTENSIONS+=",metering" NETWORK_API_EXTENSIONS+=",multi-provider" NETWORK_API_EXTENSIONS+=",net-mtu" NETWORK_API_EXTENSIONS+=",net-mtu-writable" NETWORK_API_EXTENSIONS+=",network-ip-availability" NETWORK_API_EXTENSIONS+=",network_availability_zone" NETWORK_API_EXTENSIONS+=",network-segment-range" NETWORK_API_EXTENSIONS+=",pagination" NETWORK_API_EXTENSIONS+=",port-security" NETWORK_API_EXTENSIONS+=",project-id" NETWORK_API_EXTENSIONS+=",provider" NETWORK_API_EXTENSIONS+=",qos" NETWORK_API_EXTENSIONS+=",qos-fip" NETWORK_API_EXTENSIONS+=",qos-gateway-ip" NETWORK_API_EXTENSIONS+=",quotas" NETWORK_API_EXTENSIONS+=",quota_details" NETWORK_API_EXTENSIONS+=",rbac-policies" NETWORK_API_EXTENSIONS+=",rbac-address-scope"" NETWORK_API_EXTENSIONS+=",rbac-security-groups"" NETWORK_API_EXTENSIONS+=",rbac-subnetpool"" NETWORK_API_EXTENSIONS+=",router" NETWORK_API_EXTENSIONS+=",router-admin-state-down-before-update" NETWORK_API_EXTENSIONS+=",router_availability_zone" NETWORK_API_EXTENSIONS+=",security-group" NETWORK_API_EXTENSIONS+=",port-mac-address-regenerate" NETWORK_API_EXTENSIONS+=",port-security-groups-filtering" NETWORK_API_EXTENSIONS+=",segment" NETWORK_API_EXTENSIONS+=",segments-peer-subnet-host-routes" NETWORK_API_EXTENSIONS+=",service-type" NETWORK_API_EXTENSIONS+=",sorting" NETWORK_API_EXTENSIONS+=",standard-attr-description" NETWORK_API_EXTENSIONS+=",standard-attr-revisions" NETWORK_API_EXTENSIONS+=",standard-attr-segment" NETWORK_API_EXTENSIONS+=",standard-attr-timestamp" NETWORK_API_EXTENSIONS+=",standard-attr-tag" NETWORK_API_EXTENSIONS+=",stateful-security-group" NETWORK_API_EXTENSIONS+=",subnet_allocation" NETWORK_API_EXTENSIONS+=",subnet-dns-publish-fixed-ip" NETWORK_API_EXTENSIONS+=",tag-ports-during-bulk-creation" NETWORK_API_EXTENSIONS+=",trunk" NETWORK_API_EXTENSIONS+=",trunk-details" NETWORK_API_EXTENSIONS+=",uplink-status-propagation" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/availability_zone0000644000175000017500000000034500000000000026214 0ustar00coreycorey00000000000000[[test-config|$TEMPEST_CONFIG]] [neutron_plugin_options] agent_availability_zone = nova [[post-config|/$NEUTRON_L3_CONF]] [agent] availability_zone = nova [[post-config|/$NEUTRON_DHCP_CONF]] [agent] availability_zone = nova ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/disable_dvr0000644000175000017500000000007300000000000024763 0ustar00coreycorey00000000000000[[post-config|/$NEUTRON_CONF]] [DEFAULT] enable_dvr=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/disable_dvr_tests0000644000175000017500000000004500000000000026204 0ustar00coreycorey00000000000000DISABLE_NETWORK_API_EXTENSIONS="dvr" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/dns0000644000175000017500000000003300000000000023265 0ustar00coreycorey00000000000000enable_service neutron-dns ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/dvr0000644000175000017500000000022700000000000023301 0ustar00coreycorey00000000000000[[test-config|$TEMPEST_CONFIG]] [neutron_plugin_options] l3_agent_mode = dvr_snat [[post-config|/$NEUTRON_L3_CONF]] [DEFAULT] agent_mode = dvr_snat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/log0000644000175000017500000000020000000000000023256 0ustar00coreycorey00000000000000enable_service neutron-log [[post-config|/$NEUTRON_CORE_PLUGIN_CONF]] [network_log] local_output_log_base = /tmp/test_log.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/network_segment_range0000644000175000017500000000005400000000000027073 0ustar00coreycorey00000000000000enable_service neutron-network-segment-range././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/openvswitch_type_drivers0000644000175000017500000000044300000000000027656 0ustar00coreycorey00000000000000[[test-config|$TEMPEST_CONFIG]] [neutron_plugin_options] available_type_drivers=flat,geneve,vlan,gre,local,vxlan [[post-config|/$NEUTRON_CORE_PLUGIN_CONF]] [ml2] type_drivers=flat,geneve,vlan,gre,local,vxlan [ml2_type_vxlan] vni_ranges = 1:2000 [ml2_type_gre] tunnel_id_ranges = 1:1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/osprofiler0000644000175000017500000000045400000000000024674 0ustar00coreycorey00000000000000[[post-config|/etc/neutron/api-paste.ini]] [composite:neutronapi_v2_0] use = call:neutron.auth:pipeline_factory noauth = cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0 keystone = cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/qos0000644000175000017500000000012700000000000023307 0ustar00coreycorey00000000000000enable_plugin neutron https://opendev.org/openstack/neutron enable_service neutron-qos ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/quotas0000644000175000017500000000026700000000000024026 0ustar00coreycorey00000000000000[[post-config|$NEUTRON_CONF]] [quotas] # x10 of default quotas (at the time of writing) quota_router=100 quota_floatingip=500 quota_security_group=100 quota_security_group_rule=1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/segments0000644000175000017500000000004000000000000024324 0ustar00coreycorey00000000000000enable_service neutron-segments ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/trunk0000644000175000017500000000003500000000000023646 0ustar00coreycorey00000000000000enable_service neutron-trunk ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/tunnel_types0000644000175000017500000000026400000000000025240 0ustar00coreycorey00000000000000# ideally we would configure it in openvswitch_agent.ini but devstack doesn't # load it for its l2 agent [[post-config|/$NEUTRON_CORE_PLUGIN_CONF]] [AGENT] tunnel_types=gre,vxlan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/ubuntu_image0000644000175000017500000000046600000000000025177 0ustar00coreycorey00000000000000DOWNLOAD_DEFAULT_IMAGES=False IMAGE_URLS="http://cloud-images.ubuntu.com/releases/16.04/release-20180424/ubuntu-16.04-server-cloudimg-amd64-disk1.img," DEFAULT_INSTANCE_TYPE=ds512M DEFAULT_INSTANCE_USER=ubuntu BUILD_TIMEOUT=784 [[test-config|$TEMPEST_CONFIG]] [neutron_plugin_options] image_is_advanced=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/uplink_status_propagation0000644000175000017500000000006100000000000030012 0ustar00coreycorey00000000000000enable_service neutron-uplink-status-propagation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/hooks/vlan_provider0000644000175000017500000000025200000000000025356 0ustar00coreycorey00000000000000[[test-config|$TEMPEST_CONFIG]] [neutron_plugin_options] provider_vlans=foo, [[post-config|/$NEUTRON_CORE_PLUGIN_CONF]] [ml2_type_vlan] network_vlan_ranges = foo:1:10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/contrib/testing.filters0000644000175000017500000000464500000000000024517 0ustar00coreycorey00000000000000# neutron-rootwrap command filters to support functional testing. It # is NOT intended to be used outside of a test environment. # # This file should be owned by (and only-writeable by) the root user [Filters] # enable ping from namespace ping_filter: CommandFilter, ping, root ping6_filter: CommandFilter, ping6, root ping_kill: KillFilter, root, ping, -2 # enable curl from namespace curl_filter: RegExpFilter, /usr/bin/curl, root, curl, --max-time, \d+, -D-, http://[0-9a-z:./-]+ ncat_filter: CommandFilter, ncat, root ncat_kill: KillFilter, root, ncat, -9 ss_filter: CommandFilter, ss, root # enable neutron-linuxbridge-cleanup from namespace lb_cleanup_filter: RegExpFilter, neutron-linuxbridge-cleanup, root, neutron-linuxbridge-cleanup, --config-file, .* # enable dhclient from namespace dhclient_filter: CommandFilter, dhclient, root dhclient_kill: KillFilter, root, dhclient, -9 # Actually, dhclient is used for test dhcp-agent and runs # in dhcp-agent namespace. If in that namespace resolv.conf file not exist # dhclient will override system /etc/resolv.conf # Filters below are limit functions mkdir, rm and touch # only to create and delete file resolv.conf in the namespace mkdir_filter: RegExpFilter, /bin/mkdir, root, mkdir, -p, /etc/netns/qdhcp-[0-9a-z./-]+ rm_filter: RegExpFilter, /bin/rm, root, rm, -r, /etc/netns/qdhcp-[0-9a-z./-]+ touch_filter: RegExpFilter, /bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf touch_filter2: RegExpFilter, /usr/bin/touch, root, touch, /etc/netns/qdhcp-[0-9a-z./-]+/resolv.conf # needed by test_ovs_flows which runs ovs-appctl ofproto/trace ovstrace_filter: RegExpFilter, ovs-appctl, root, ovs-appctl, ofproto/trace, .*, .* # needed for TestGetRootHelperChildPid bash_filter: RegExpFilter, /bin/bash, root, bash, -c, \(sleep 100\) sleep_kill: KillFilter, root, sleep, -9 #needed by test_netns_cleanup process_spawn: EnvFilter, env, root, PATH=, python ip_exec: IpNetnsExecFilter, ip, root ps: CommandFilter, ps, root pid_kill: RegExpFilter, kill, root, kill, -\d+, .* #needed to set up fullstack 'multinode' environment rabbitmqctl: CommandFilter, rabbitmqctl, root linuxbridge_agent: CommandFilter, neutron-linuxbridge-agent, root dhcp_agent: CommandFilter, dhcp_agent.py, root ovs_agent: CommandFilter, ovs_agent.py, root l3_agent: CommandFilter, l3_agent.py, root #needed to capture and analyze traffic in fullstack tests (f.e. in DSCP scenarios) tcpdump: CommandFilter, tcpdump, root ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3870454 neutron-16.0.0.0b2.dev214/neutron/tests/etc/0000755000175000017500000000000000000000000020552 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/etc/api-paste.ini0000644000175000017500000000266700000000000023151 0ustar00coreycorey00000000000000[composite:neutron] use = egg:Paste#urlmap /: neutronversions_composite /v2.0: neutronapi_v2_0 [composite:neutronapi_v2_0] use = call:neutron.auth:pipeline_factory noauth = cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0 keystone = cors http_proxy_to_wsgi request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 [composite:neutronversions_composite] use = call:neutron.auth:pipeline_factory noauth = cors http_proxy_to_wsgi neutronversions keystone = cors http_proxy_to_wsgi neutronversions [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:catch_errors] paste.filter_factory = oslo_middleware:CatchErrors.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = neutron [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [filter:keystonecontext] paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:extensions] paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory [app:neutronversions] paste.app_factory = neutron.pecan_wsgi.app:versions_factory [app:neutronapiapp_v2_0] paste.app_factory = neutron.api.v2.router:APIRouter.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/etc/api-paste.ini.test0000644000175000017500000000044200000000000024114 0ustar00coreycorey00000000000000[pipeline:extensions_app_with_filter] pipeline = extensions extensions_test_app [filter:extensions] paste.filter_factory = neutron.common.extensions:plugin_aware_extension_middleware_factory [app:extensions_test_app] paste.app_factory = neutron.tests.unit.api.test_extensions:app_factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/etc/neutron.conf0000644000175000017500000000073700000000000023122 0ustar00coreycorey00000000000000[DEFAULT] # Show more verbose log output (sets DEBUG log level output) default_log_levels = neutron=DEBUG # Show debugging output in logs (sets DEBUG log level output) debug = False # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 9696 # Paste configuration file api_paste_config = api-paste.ini.test # The messaging transport URL transport_url = 'fake:/' lock_path = $state_path/lock [database] connection = 'sqlite://' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/etc/neutron_test.conf0000644000175000017500000000007600000000000024155 0ustar00coreycorey00000000000000[service_providers] service_provider=foo service_provider=bar ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/etc/neutron_test2.conf.example0000644000175000017500000000005100000000000025662 0ustar00coreycorey00000000000000[service_providers] service_provider=zzz ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3910453 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/0000755000175000017500000000000000000000000021767 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/README0000644000175000017500000000003700000000000022647 0ustar00coreycorey00000000000000Please see neutron/TESTING.rst.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/__init__.py0000644000175000017500000000115400000000000024101 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3910453 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/agents/0000755000175000017500000000000000000000000023250 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/agents/__init__.py0000644000175000017500000000000000000000000025347 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/agents/dhcp_agent.py0000755000175000017500000000474400000000000025732 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import sys from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.linux import dhcp as linux_dhcp from neutron.cmd.eventlet.agents import dhcp as dhcp_agent OPTS = [ cfg.StrOpt('test_namespace_suffix', default='testprefix', help="Suffix to append to all DHCP namespace names."), ] def _get_namespace_name(id_, suffix=None): suffix = suffix or cfg.CONF.test_namespace_suffix return "%s%s%s" % (linux_dhcp.NS_PREFIX, id_, suffix) def NetModel_init(self, d): super(linux_dhcp.NetModel, self).__init__(d) self._ns_name = _get_namespace_name(self.id) @classmethod def existing_dhcp_networks(cls, conf): """Return a list of existing networks ids that we have configs for.""" confs_dir = cls.get_confs_dir(conf) networks = [] try: for c in os.listdir(confs_dir): c = c.replace(cfg.CONF.test_namespace_suffix, "") if uuidutils.is_uuid_like(c): networks.append(c) except OSError: pass return networks def monkeypatch_dhcplocalprocess_init(): original_init = linux_dhcp.DhcpLocalProcess.__init__ def new_init(self, conf, network, process_monitor, version=None, plugin=None): network_copy = copy.deepcopy(network) network_copy.id = "%s%s" % (network.id, cfg.CONF.test_namespace_suffix) original_init( self, conf, network_copy, process_monitor, version, plugin) self.network = network linux_dhcp.DhcpLocalProcess.__init__ = new_init def monkeypatch_linux_dhcp(): linux_dhcp.NetModel.__init__ = NetModel_init linux_dhcp.Dnsmasq.existing_dhcp_networks = existing_dhcp_networks monkeypatch_dhcplocalprocess_init() def main(): cfg.CONF.register_opts(OPTS) monkeypatch_linux_dhcp() dhcp_agent.main() if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/agents/l3_agent.py0000755000175000017500000000146200000000000025324 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2017 Eayun, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron.common import eventlet_utils from neutron.tests.common.agents import l3_agent eventlet_utils.monkey_patch() if __name__ == "__main__": sys.exit(l3_agent.main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/agents/ovs_agent.py0000755000175000017500000000402200000000000025610 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2017 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from oslo_config import cfg from neutron.agent.common import ovs_lib from neutron.agent.l2.extensions import qos as qos_extension from neutron.services.trunk.drivers.openvswitch.agent \ import driver as trunk_driver from neutron.tests.common.agents import ovs_agent def monkeypatch_init_handler(): original_handler = trunk_driver.init_handler def new_init_handler(resource, event, trigger, payload=None): # NOTE(slaweq): make this setup conditional based on server-side # capabilities for fullstack tests we can assume that server-side # and agent-side conf are in sync if "trunk" not in cfg.CONF.service_plugins: return original_handler(resource, event, trigger, payload) trunk_driver.init_handler = new_init_handler def monkeypatch_qos(): mock.patch.object(ovs_lib.OVSBridge, 'clear_minimum_bandwidth_qos').start() if "qos" in cfg.CONF.service_plugins: mock.patch.object(qos_extension.QosAgentExtension, '_process_reset_port').start() def main(): # TODO(slaweq): this monkepatch will not be necessary when # https://review.opendev.org/#/c/506722/ will be merged and ovsdb-server # ovs-vswitchd processes for each test will be isolated in separate # namespace monkeypatch_init_handler() monkeypatch_qos() ovs_agent.main() if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/base.py0000644000175000017500000001506200000000000023257 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from concurrent import futures import itertools import os import random import netaddr from neutron_lib.tests import tools from oslo_config import cfg from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.tests import base as tests_base from neutron.tests.common import helpers from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.fullstack.resources import client as client_resource from neutron.tests.unit import testlib_api # This is the directory from which infra fetches log files for fullstack tests DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(), 'dsvm-fullstack-logs') ROOTDIR = os.path.dirname(__file__) LOG = logging.getLogger(__name__) class BaseFullStackTestCase(testlib_api.MySQLTestCaseMixin, testlib_api.SqlTestCase): """Base test class for full-stack tests.""" BUILD_WITH_MIGRATIONS = True def setUp(self, environment): super(BaseFullStackTestCase, self).setUp() tests_base.setup_test_logging( cfg.CONF, DEFAULT_LOG_DIR, '%s.txt' % self.get_name()) # NOTE(zzzeek): the opportunistic DB fixtures have built for # us a per-test (or per-process) database. Set the URL of this # database in CONF as the full stack tests need to actually run a # neutron server against this database. _orig_db_url = cfg.CONF.database.connection cfg.CONF.set_override( 'connection', str(self.engine.url), group='database') self.addCleanup( cfg.CONF.set_override, "connection", _orig_db_url, group="database" ) # NOTE(ihrachys): seed should be reset before environment fixture below # since the latter starts services that may rely on generated port # numbers tools.reset_random_seed() # configure test runner to use rootwrap self.setup_rootwrap() config.setup_privsep() self.environment = environment self.environment.test_name = self.get_name() self.useFixture(self.environment) self.client = self.environment.neutron_server.client self.safe_client = self.useFixture( client_resource.ClientFixture(self.client)) def get_name(self): class_name, test_name = self.id().split(".")[-2:] return "%s.%s" % (class_name, test_name) def _wait_until_agent_up(self, agent_id): def _agent_up(): agent = self.client.show_agent(agent_id)['agent'] return agent.get('alive') common_utils.wait_until_true(_agent_up) def _wait_until_agent_down(self, agent_id): def _agent_down(): agent = self.client.show_agent(agent_id)['agent'] return not agent.get('alive') common_utils.wait_until_true(_agent_down) def _assert_ping_during_agents_restart( self, agents, src_namespace, ips, restart_timeout=10, ping_timeout=1, count=10): with net_helpers.async_ping( src_namespace, ips, timeout=ping_timeout, count=count) as done: LOG.debug("Restarting agents") executor = futures.ThreadPoolExecutor(max_workers=len(agents)) restarts = [agent.restart(executor=executor) for agent in agents] futures.wait(restarts, timeout=restart_timeout) self.assertTrue(all([r.done() for r in restarts])) LOG.debug("Restarting agents - done") # It is necessary to give agents time to initialize # because some crucial steps (e.g. setting up bridge flows) # happen only after RPC is established agent_names = ', '.join({agent.process_fixture.process_name for agent in agents}) common_utils.wait_until_true( done, timeout=count * (ping_timeout + 1), exception=RuntimeError("Could not ping the other VM, " "re-starting %s leads to network " "disruption" % agent_names)) def _find_available_ips(self, network, subnet, num): ports = self.safe_client.list_ports(network_id=network['id']) used_ips = netaddr.IPSet( [netaddr.IPAddress(ip['ip_address']) for port in ports for ip in port['fixed_ips']]) used_ips.add(netaddr.IPAddress(subnet['gateway_ip'])) # Note(lajoskatona): Suppose that we have 1 allocation pool for the # subnet, that should be quite good assumption for testing. valid_ip_pool = subnet['allocation_pools'][0] valid_ips = netaddr.IPSet(netaddr.IPRange( valid_ip_pool['start'], valid_ip_pool['end']) ) valid_ips = valid_ips.difference(used_ips) if valid_ips.size < num: self.fail("Cannot find enough free IP addresses.") initial = random.randint(0, min(valid_ips.size - num, 1000)) available_ips = itertools.islice(valid_ips, initial, initial + num) return [str(available_ip) for available_ip in available_ips] def _create_external_vm(self, network, subnet): vm = self.useFixture( machine_fixtures.FakeMachine( self.environment.central_bridge, common_utils.ip_to_cidr(subnet['gateway_ip'], 24))) # NOTE(slaweq): as ext_net is 'vlan' network type external_vm needs to # send packets with proper vlan also vm.bridge.set_db_attribute( "Port", vm.port.name, "tag", network.get("provider:segmentation_id")) return vm def assert_namespace_exists(self, ns_name): common_utils.wait_until_true( lambda: ip_lib.network_namespace_exists(ns_name, try_is_ready=True)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3910453 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/0000755000175000017500000000000000000000000024001 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/__init__.py0000644000175000017500000000000000000000000026100 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/client.py0000644000175000017500000003210600000000000025633 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import fixtures import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutronclient.common import exceptions from neutron.common import utils def _safe_method(f): @functools.wraps(f) def delete(*args, **kwargs): try: return f(*args, **kwargs) except exceptions.NotFound: pass return delete class ClientFixture(fixtures.Fixture): """Manage and cleanup neutron resources.""" def __init__(self, client): super(ClientFixture, self).__init__() self.client = client def _create_resource(self, resource_type, spec): create = getattr(self.client, 'create_%s' % resource_type) delete = getattr(self.client, 'delete_%s' % resource_type) body = {resource_type: spec} resp = create(body=body) data = resp[resource_type] self.addCleanup(_safe_method(delete), data['id']) return data def _update_resource(self, resource_type, id, spec): update = getattr(self.client, 'update_%s' % resource_type) body = {resource_type: spec} resp = update(id, body=body) return resp[resource_type] def _delete_resource(self, resource_type, id): delete = getattr(self.client, 'delete_%s' % resource_type) return delete(id) def create_router(self, tenant_id, name=None, ha=False, external_network=None, external_subnet=None): resource_type = 'router' name = name or utils.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'name': name, 'ha': ha} if external_network: spec['external_gateway_info'] = {"network_id": external_network} if external_subnet: spec['external_gateway_info']['external_fixed_ips'] = ( [{"subnet_id": external_subnet}]) return self._create_resource(resource_type, spec) def update_router(self, router_id, **kwargs): return self._update_resource('router', router_id, kwargs) def create_network(self, tenant_id, name=None, external=False, network_type=None, segmentation_id=None, physical_network=None, mtu=None): resource_type = 'network' name = name or utils.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'name': name} spec['router:external'] = external if segmentation_id is not None: spec['provider:segmentation_id'] = segmentation_id if network_type is not None: spec['provider:network_type'] = network_type if physical_network is not None: spec['provider:physical_network'] = physical_network if mtu is not None: spec['mtu'] = mtu return self._create_resource(resource_type, spec) def update_network(self, id, **kwargs): return self._update_resource('network', id, kwargs) def delete_network(self, id): return self._delete_resource('network', id) def create_subnet(self, tenant_id, network_id, cidr, gateway_ip=None, name=None, enable_dhcp=True, ipv6_address_mode='slaac', ipv6_ra_mode='slaac', subnetpool_id=None, ip_version=None): resource_type = 'subnet' name = name or utils.get_rand_name(prefix=resource_type) if cidr and not ip_version: ip_version = netaddr.IPNetwork(cidr).version spec = {'tenant_id': tenant_id, 'network_id': network_id, 'name': name, 'enable_dhcp': enable_dhcp, 'ip_version': ip_version} if ip_version == constants.IP_VERSION_6: spec['ipv6_address_mode'] = ipv6_address_mode spec['ipv6_ra_mode'] = ipv6_ra_mode if gateway_ip: spec['gateway_ip'] = gateway_ip if subnetpool_id: spec['subnetpool_id'] = subnetpool_id if cidr: spec['cidr'] = cidr return self._create_resource(resource_type, spec) def list_ports(self, retrieve_all=True, **kwargs): resp = self.client.list_ports(retrieve_all=retrieve_all, **kwargs) return resp['ports'] def create_port(self, tenant_id, network_id, hostname=None, qos_policy_id=None, security_groups=None, **kwargs): spec = { 'network_id': network_id, 'tenant_id': tenant_id, } spec.update(kwargs) if hostname is not None: spec[portbindings.HOST_ID] = hostname if qos_policy_id: spec['qos_policy_id'] = qos_policy_id if security_groups: spec['security_groups'] = security_groups return self._create_resource('port', spec) def update_port(self, port_id, **kwargs): return self._update_resource('port', port_id, kwargs) def create_floatingip(self, tenant_id, floating_network_id, fixed_ip_address, port_id, qos_policy_id=None): spec = { 'floating_network_id': floating_network_id, 'tenant_id': tenant_id, 'fixed_ip_address': fixed_ip_address, 'port_id': port_id } if qos_policy_id: spec['qos_policy_id'] = qos_policy_id return self._create_resource('floatingip', spec) def add_router_interface(self, router_id, subnet_id): body = {'subnet_id': subnet_id} router_interface_info = self.client.add_interface_router( router=router_id, body=body) self.addCleanup(_safe_method(self.client.remove_interface_router), router=router_id, body=body) return router_interface_info def create_qos_policy(self, tenant_id, name, description, shared, is_default): policy = self.client.create_qos_policy( body={'policy': {'name': name, 'description': description, 'shared': shared, 'tenant_id': tenant_id, 'is_default': is_default}}) def detach_and_delete_policy(): qos_policy_id = policy['policy']['id'] ports_with_policy = self.client.list_ports()['ports'] for port in ports_with_policy: if qos_policy_id == port['qos_policy_id']: self.client.update_port( port['id'], body={'port': {'qos_policy_id': None}}) self.client.delete_qos_policy(qos_policy_id) # NOTE: We'll need to add support for detaching from network once # create_network() supports qos_policy_id. self.addCleanup(_safe_method(detach_and_delete_policy)) return policy['policy'] def create_bandwidth_limit_rule(self, tenant_id, qos_policy_id, limit=None, burst=None, direction=None): rule = {'tenant_id': tenant_id} if limit: rule['max_kbps'] = limit if burst: rule['max_burst_kbps'] = burst if direction: rule['direction'] = direction rule = self.client.create_bandwidth_limit_rule( policy=qos_policy_id, body={'bandwidth_limit_rule': rule}) self.addCleanup(_safe_method(self.client.delete_bandwidth_limit_rule), rule['bandwidth_limit_rule']['id'], qos_policy_id) return rule['bandwidth_limit_rule'] def create_minimum_bandwidth_rule(self, tenant_id, qos_policy_id, min_bw, direction=None): rule = {'tenant_id': tenant_id, 'min_kbps': min_bw} if direction: rule['direction'] = direction rule = self.client.create_minimum_bandwidth_rule( policy=qos_policy_id, body={'minimum_bandwidth_rule': rule}) self.addCleanup(_safe_method( self.client.delete_minimum_bandwidth_rule), rule['minimum_bandwidth_rule']['id'], qos_policy_id) return rule['minimum_bandwidth_rule'] def create_dscp_marking_rule(self, tenant_id, qos_policy_id, dscp_mark=0): rule = {'tenant_id': tenant_id} if dscp_mark: rule['dscp_mark'] = dscp_mark rule = self.client.create_dscp_marking_rule( policy=qos_policy_id, body={'dscp_marking_rule': rule}) self.addCleanup(_safe_method(self.client.delete_dscp_marking_rule), rule['dscp_marking_rule']['id'], qos_policy_id) return rule['dscp_marking_rule'] def create_trunk(self, tenant_id, port_id, name=None, admin_state_up=None, sub_ports=None): """Create a trunk via API. :param tenant_id: ID of the tenant. :param port_id: Parent port of trunk. :param name: Name of the trunk. :param admin_state_up: Admin state of the trunk. :param sub_ports: List of subport dictionaries in format {'port_id': , 'segmentation_type': 'vlan', 'segmentation_id': } :return: Dictionary with trunk's data returned from Neutron API. """ spec = { 'port_id': port_id, 'tenant_id': tenant_id, } if name is not None: spec['name'] = name if sub_ports is not None: spec['sub_ports'] = sub_ports if admin_state_up is not None: spec['admin_state_up'] = admin_state_up trunk = self.client.create_trunk({'trunk': spec})['trunk'] if sub_ports: self.addCleanup( _safe_method(self.trunk_remove_subports), tenant_id, trunk['id'], trunk['sub_ports']) self.addCleanup(_safe_method(self.client.delete_trunk), trunk['id']) return trunk def trunk_add_subports(self, tenant_id, trunk_id, sub_ports): """Add subports to the trunk. :param tenant_id: ID of the tenant. :param trunk_id: ID of the trunk. :param sub_ports: List of subport dictionaries to be added in format {'port_id': , 'segmentation_type': 'vlan', 'segmentation_id': } """ spec = { 'tenant_id': tenant_id, 'sub_ports': sub_ports, } trunk = self.client.trunk_add_subports(trunk_id, spec) sub_ports_to_remove = [ sub_port for sub_port in trunk['sub_ports'] if sub_port in sub_ports] self.addCleanup( _safe_method(self.trunk_remove_subports), tenant_id, trunk_id, sub_ports_to_remove) def trunk_remove_subports(self, tenant_id, trunk_id, sub_ports): """Remove subports from the trunk. :param trunk_id: ID of the trunk. :param sub_ports: List of subport port IDs. """ spec = { 'tenant_id': tenant_id, 'sub_ports': sub_ports, } return self.client.trunk_remove_subports(trunk_id, spec) def create_security_group(self, tenant_id, name=None, stateful=True): resource_type = 'security_group' name = name or utils.get_rand_name(prefix=resource_type) spec = {'tenant_id': tenant_id, 'name': name, 'stateful': stateful} return self._create_resource(resource_type, spec) def update_security_group(self, security_group_id, **kwargs): return self._update_resource('security_group', security_group_id, kwargs) def create_security_group_rule(self, tenant_id, security_group_id, **kwargs): resource_type = 'security_group_rule' spec = {'tenant_id': tenant_id, 'security_group_id': security_group_id} spec.update(kwargs) return self._create_resource(resource_type, spec) def create_network_log(self, tenant_id, resource_type, enabled=True, **kwargs): spec = {'project_id': tenant_id, 'resource_type': resource_type, 'enabled': enabled} spec.update(kwargs) net_log = self.client.create_network_log({'log': spec}) self.addCleanup( _safe_method(self.client.delete_network_log), net_log['log']['id']) return net_log def update_quota(self, project_id, tracked_resource, quota): self._update_resource('quota', project_id, {tracked_resource: quota}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/config.py0000644000175000017500000004334300000000000025627 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile from neutron_lib import constants from neutron.common import utils from neutron.plugins.ml2.extensions import qos as qos_ext from neutron.tests import base from neutron.tests.common import config_fixtures from neutron.tests.common.exclusive_resources import port from neutron.tests.common import helpers as c_helpers from neutron.tests.common import net_helpers from neutron.tests.fullstack import base as fullstack_base PHYSICAL_NETWORK_NAME = "physnet1" MINIMUM_BANDWIDTH_INGRESS_KBPS = 1000 MINIMUM_BANDWIDTH_EGRESS_KBPS = 1000 NEUTRON_SERVER_PORT_START = 10000 NEUTRON_SERVER_PORT_END = 20000 OVS_OF_PORT_LISTEN_START = 20001 OVS_OF_PORT_LISTEN_END = 30000 CLIENT_CONN_PORT_START = 30001 CLIENT_CONN_PORT_END = 65000 class ConfigFixture(config_fixtures.ConfigFileFixture): """A fixture that holds an actual Neutron configuration. Note that 'self.config' is intended to only be updated once, during the constructor, so if this fixture is re-used (setUp is called twice), then the dynamic configuration values won't change. The correct usage is initializing a new instance of the class. """ def __init__(self, env_desc, host_desc, temp_dir, base_filename): super(ConfigFixture, self).__init__( base_filename, config_fixtures.ConfigDict(), temp_dir) self.env_desc = env_desc self.host_desc = host_desc def _generate_namespace_suffix(self): return utils.get_rand_name(prefix='test') class NeutronConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, connection, rabbitmq_environment, use_local_apipaste=True): super(NeutronConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='neutron.conf') self.config.update({ 'DEFAULT': { 'host': self._generate_host(), 'state_path': self._generate_state_path(self.temp_dir), 'core_plugin': 'ml2', 'service_plugins': env_desc.service_plugins, 'auth_strategy': 'noauth', 'debug': 'True', 'global_physnet_mtu': str(env_desc.global_mtu), 'agent_down_time': str(env_desc.agent_down_time), 'transport_url': 'rabbit://%(user)s:%(password)s@%(host)s:5672/%(vhost)s' % {'user': rabbitmq_environment.user, 'password': rabbitmq_environment.password, 'host': rabbitmq_environment.host, 'vhost': rabbitmq_environment.vhost}, 'api_workers': '2', }, 'database': { 'connection': connection, }, 'oslo_concurrency': { 'lock_path': '$state_path/lock', }, 'agent': { 'report_interval': str(env_desc.agent_down_time // 2), 'log_agent_heartbeats': 'True', }, }) if use_local_apipaste: self.config['DEFAULT']['api_paste_config'] = ( self._generate_api_paste()) policy_file = self._generate_policy_json() if policy_file: self.config['oslo_policy'] = {'policy_file': policy_file} # Set root_helper/root_helper_daemon only when env var is set root_helper = os.environ.get('OS_ROOTWRAP_CMD') if root_helper: self.config['agent']['root_helper'] = root_helper root_helper_daemon = os.environ.get('OS_ROOTWRAP_DAEMON_CMD') if root_helper_daemon: self.config['agent']['root_helper_daemon'] = root_helper_daemon if env_desc.router_scheduler: self.config['DEFAULT']['router_scheduler_driver'] = ( env_desc.router_scheduler) if env_desc.has_placement: service_plugins = self.config['DEFAULT']['service_plugins'] self.config['DEFAULT']['service_plugins'] = ( '%s,%s' % (service_plugins, 'placement') ) self.config.update({ 'placement': { 'auth_type': 'noauth', 'auth_section': 'http://127.0.0.1:%s/placement' % env_desc.placement_port } }) if env_desc.dhcp_scheduler_class: self.config['DEFAULT']['dhcp_agents_per_network'] = '1' self.config['DEFAULT']['network_scheduler_driver'] = ( env_desc.dhcp_scheduler_class) net_helpers.set_local_port_range(CLIENT_CONN_PORT_START, CLIENT_CONN_PORT_END) def _setUp(self): self.config['DEFAULT'].update({ 'bind_port': self.useFixture( port.ExclusivePort(constants.PROTO_NAME_TCP, start=NEUTRON_SERVER_PORT_START, end=NEUTRON_SERVER_PORT_END)).port }) super(NeutronConfigFixture, self)._setUp() def _generate_host(self): return utils.get_rand_name(prefix='host-') def _generate_state_path(self, temp_dir): # Assume that temp_dir will be removed by the caller self.state_path = tempfile.mkdtemp(prefix='state_path', dir=temp_dir) return self.state_path def _generate_api_paste(self): return c_helpers.find_sample_file('api-paste.ini') def _generate_policy_json(self): return c_helpers.find_sample_file('policy.json') def get_host(self): return self.config['DEFAULT']['host'] class ML2ConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types): super(ML2ConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='ml2_conf.ini') mechanism_drivers = self.env_desc.mech_drivers if self.env_desc.l2_pop: mechanism_drivers += ',l2population' self.config.update({ 'ml2': { 'tenant_network_types': tenant_network_types, 'mechanism_drivers': mechanism_drivers, }, 'ml2_type_vlan': { 'network_vlan_ranges': PHYSICAL_NETWORK_NAME + ':1000:1029', }, 'ml2_type_gre': { 'tunnel_id_ranges': '1:30', }, 'ml2_type_vxlan': { 'vni_ranges': '1001:1030', }, }) extension_drivers = ['port_security'] if env_desc.qos: extension_drivers.append(qos_ext.QOS_EXT_DRIVER_ALIAS) self.config['ml2']['extension_drivers'] = ','.join(extension_drivers) class OVSConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip, **kwargs): super(OVSConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='openvswitch_agent.ini') self.tunneling_enabled = self.env_desc.tunneling_enabled ext_dev = utils.get_rand_device_name(prefix='br-eth') self.config.update({ 'ovs': { 'local_ip': local_ip, 'integration_bridge': self._generate_integration_bridge(), 'bridge_mappings': '%s:%s' % (PHYSICAL_NETWORK_NAME, ext_dev), 'of_inactivity_probe': '0', 'ovsdb_debug': 'True', }, 'securitygroup': { 'firewall_driver': host_desc.firewall_driver, }, 'agent': { 'l2_population': str(self.env_desc.l2_pop), 'arp_responder': str(self.env_desc.arp_responder), 'debug_iptables_rules': str(env_desc.debug_iptables), 'use_helper_for_ns_read': 'False', } }) if self.tunneling_enabled: self.config['agent'].update({ 'tunnel_types': self.env_desc.network_type}) self.config['ovs'].update({ 'tunnel_bridge': self._generate_tunnel_bridge(), 'int_peer_patch_port': self._generate_int_peer(), 'tun_peer_patch_port': self._generate_tun_peer()}) else: if env_desc.report_bandwidths: self.config['ovs'][constants.RP_BANDWIDTHS] = \ '%s:%s:%s' % (ext_dev, MINIMUM_BANDWIDTH_EGRESS_KBPS, MINIMUM_BANDWIDTH_INGRESS_KBPS) if env_desc.qos: self.config['agent']['extensions'] = 'qos' if env_desc.log: self.config['agent']['extensions'] = 'log' test_name = kwargs.get("test_name") test_name = base.sanitize_log_path(test_name) self.config.update({ 'network_log': { 'local_output_log_base': self._generate_temp_log_file(test_name)} }) def _setUp(self): self.config['ovs'].update({ 'of_listen_port': self.useFixture( port.ExclusivePort(constants.PROTO_NAME_TCP, start=OVS_OF_PORT_LISTEN_START, end=OVS_OF_PORT_LISTEN_END)).port }) super(OVSConfigFixture, self)._setUp() def _generate_integration_bridge(self): return utils.get_rand_device_name(prefix='br-int') def _generate_tunnel_bridge(self): return utils.get_rand_device_name(prefix='br-tun') def _generate_int_peer(self): return utils.get_rand_device_name(prefix='patch-tun') def _generate_tun_peer(self): return utils.get_rand_device_name(prefix='patch-int') def _generate_temp_log_file(self, test_name): log_dir_path = os.path.join(fullstack_base.DEFAULT_LOG_DIR, test_name) if not os.path.exists(log_dir_path): os.mkdir(log_dir_path, 0o755) return '%s/%s.log' % (log_dir_path, utils.get_rand_name(prefix="test-sg-")) def get_br_int_name(self): return self.config.ovs.integration_bridge def get_br_phys_name(self): return self.config.ovs.bridge_mappings.split(':')[1] def get_br_tun_name(self): return self.config.ovs.tunnel_bridge class SRIOVConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip): super(SRIOVConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='sriov_agent.ini') device1 = utils.get_rand_device_name(prefix='ens5') device2 = utils.get_rand_device_name(prefix='ens6') phys_dev_mapping = '%s:%s,%s:%s' % (PHYSICAL_NETWORK_NAME, device1, PHYSICAL_NETWORK_NAME, device2) rp_bandwidths = '%s:%s:%s,%s:%s:%s' % (device1, MINIMUM_BANDWIDTH_EGRESS_KBPS, MINIMUM_BANDWIDTH_INGRESS_KBPS, device2, MINIMUM_BANDWIDTH_EGRESS_KBPS, MINIMUM_BANDWIDTH_INGRESS_KBPS) self.config.update({ 'sriov_nic': { 'physical_device_mappings': phys_dev_mapping, 'resource_provider_bandwidths': rp_bandwidths, } }) def _setUp(self): super(SRIOVConfigFixture, self)._setUp() class PlacementConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir): super(PlacementConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='placement.ini') self.config.update({ 'DEFAULT': { 'debug': 'True', 'placement_port': self.env_desc.placement_port } }) def _setUp(self): super(PlacementConfigFixture, self)._setUp() class LinuxBridgeConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip, physical_device_name): super(LinuxBridgeConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename="linuxbridge_agent.ini" ) self.config.update({ 'VXLAN': { 'enable_vxlan': str(self.env_desc.tunneling_enabled), 'local_ip': local_ip, 'l2_population': str(self.env_desc.l2_pop), }, 'securitygroup': { 'firewall_driver': host_desc.firewall_driver, }, 'AGENT': { 'debug_iptables_rules': str(env_desc.debug_iptables), 'use_helper_for_ns_read': 'False', } }) if env_desc.qos: self.config.update({ 'AGENT': { 'extensions': 'qos' } }) if self.env_desc.tunneling_enabled: self.config.update({ 'LINUX_BRIDGE': { 'bridge_mappings': self._generate_bridge_mappings( physical_device_name ) } }) else: self.config.update({ 'LINUX_BRIDGE': { 'physical_interface_mappings': self._generate_bridge_mappings( physical_device_name ) } }) def _generate_bridge_mappings(self, device_name): return '%s:%s' % (PHYSICAL_NETWORK_NAME, device_name) class L3ConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, integration_bridge=None): super(L3ConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='l3_agent.ini') if host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: self._prepare_config_with_ovs_agent(integration_bridge) elif host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self._prepare_config_with_linuxbridge_agent() if host_desc.l3_agent_mode: self.config['DEFAULT'].update({ 'agent_mode': host_desc.l3_agent_mode}) self.config['DEFAULT'].update({ 'debug': 'True', 'test_namespace_suffix': self._generate_namespace_suffix(), }) self.config.update({ 'agent': {'use_helper_for_ns_read': 'False'} }) if host_desc.availability_zone: self.config['agent'].update({ 'availability_zone': host_desc.availability_zone }) if host_desc.l3_agent_extensions: self.config['agent'].update({ 'extensions': host_desc.l3_agent_extensions }) def _prepare_config_with_ovs_agent(self, integration_bridge): self.config.update({ 'DEFAULT': { 'interface_driver': ('neutron.agent.linux.interface.' 'OVSInterfaceDriver'), }, 'OVS': { 'integration_bridge': integration_bridge, } }) def _prepare_config_with_linuxbridge_agent(self): self.config.update({ 'DEFAULT': { 'interface_driver': ('neutron.agent.linux.interface.' 'BridgeInterfaceDriver'), } }) class DhcpConfigFixture(ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, integration_bridge=None): super(DhcpConfigFixture, self).__init__( env_desc, host_desc, temp_dir, base_filename='dhcp_agent.ini') if host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: self._prepare_config_with_ovs_agent(integration_bridge) elif host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self._prepare_config_with_linuxbridge_agent() self.config['DEFAULT'].update({ 'debug': 'True', 'dhcp_confs': self._generate_dhcp_path(), 'test_namespace_suffix': self._generate_namespace_suffix() }) self.config.update({ 'AGENT': {'use_helper_for_ns_read': 'False'} }) if host_desc.availability_zone: self.config['AGENT'].update({ 'availability_zone': host_desc.availability_zone }) def _setUp(self): super(DhcpConfigFixture, self)._setUp() self.addCleanup(self._clean_dhcp_path) def _prepare_config_with_ovs_agent(self, integration_bridge): self.config.update({ 'DEFAULT': { 'interface_driver': 'openvswitch', }, 'OVS': { 'integration_bridge': integration_bridge, } }) def _prepare_config_with_linuxbridge_agent(self): self.config.update({ 'DEFAULT': { 'interface_driver': 'linuxbridge', } }) def _generate_dhcp_path(self): # NOTE(slaweq): dhcp_conf path needs to be directory with read # permission for everyone, otherwise dnsmasq process will not be able # to read his configs self.dhcp_path = tempfile.mkdtemp(prefix="dhcp_configs_", dir="/tmp/") os.chmod(self.dhcp_path, 0o755) return self.dhcp_path def _clean_dhcp_path(self): shutil.rmtree(self.dhcp_path, ignore_errors=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/environment.py0000644000175000017500000004215200000000000026723 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from neutron_lib import constants from neutronclient.common import exceptions as nc_exc from oslo_config import cfg from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as lb_agent from neutron.tests.common.exclusive_resources import ip_address from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.common import net_helpers from neutron.tests.fullstack.resources import config from neutron.tests.fullstack.resources import process class EnvironmentDescription(object): """A set of characteristics of an environment setup. Does the setup, as a whole, support tunneling? How about l2pop? """ def __init__(self, network_type='vxlan', l2_pop=True, qos=False, mech_drivers='openvswitch,linuxbridge', service_plugins='router', arp_responder=False, agent_down_time=75, router_scheduler=None, global_mtu=constants.DEFAULT_NETWORK_MTU, debug_iptables=False, log=False, report_bandwidths=False, has_placement=False, placement_port=None, dhcp_scheduler_class=None,): self.network_type = network_type self.l2_pop = l2_pop self.qos = qos self.log = log self.network_range = None self.mech_drivers = mech_drivers self.arp_responder = arp_responder self.agent_down_time = agent_down_time self.router_scheduler = router_scheduler self.global_mtu = global_mtu self.service_plugins = service_plugins self.debug_iptables = debug_iptables self.report_bandwidths = report_bandwidths self.has_placement = has_placement self.placement_port = placement_port self.dhcp_scheduler_class = dhcp_scheduler_class if self.qos: self.service_plugins += ',qos' if self.log: self.service_plugins += ',log' @property def tunneling_enabled(self): return self.network_type in ('vxlan', 'gre') class HostDescription(object): """A set of characteristics of an environment Host. What agents should the host spawn? What mode should each agent operate under? """ def __init__(self, l3_agent=False, dhcp_agent=False, l2_agent_type=constants.AGENT_TYPE_OVS, firewall_driver='noop', availability_zone=None, l3_agent_mode=None, l3_agent_extensions=None): self.l2_agent_type = l2_agent_type self.l3_agent = l3_agent self.dhcp_agent = dhcp_agent self.firewall_driver = firewall_driver self.availability_zone = availability_zone self.l3_agent_mode = l3_agent_mode self.l3_agent_extensions = l3_agent_extensions class Host(fixtures.Fixture): """The Host class models a physical host running agents, all reporting with the same hostname. OpenStack installers or administrators connect compute nodes to the physical tenant network by connecting the provider bridges to their respective physical NICs. Or, if using tunneling, by configuring an IP address on the appropriate physical NIC. The Host class does the same with the connect_* methods. TODO(amuller): Add start/stop/restart methods that will start/stop/restart all of the agents on this host. Add a kill method that stops all agents and disconnects the host from other hosts. """ def __init__(self, env_desc, host_desc, test_name, neutron_config, central_bridge): self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_config = neutron_config self.central_bridge = central_bridge self.host_namespace = None self.agents = {} # we need to cache already created "per network" bridges if linuxbridge # agent is used on host: self.network_bridges = {} def _setUp(self): self.local_ip = self.allocate_local_ip() if self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: self.setup_host_with_ovs_agent() elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_NIC_SWITCH: self.setup_host_with_sriov_agent() elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self.setup_host_with_linuxbridge_agent() if self.host_desc.l3_agent: self.l3_agent = self.useFixture( process.L3AgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, self.l3_agent_cfg_fixture)) if self.host_desc.dhcp_agent: self.dhcp_agent = self.useFixture( process.DhcpAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, self.dhcp_agent_cfg_fixture, namespace=self.host_namespace)) def setup_host_with_ovs_agent(self): agent_cfg_fixture = config.OVSConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip, test_name=self.test_name) self.useFixture(agent_cfg_fixture) self.br_phys = self.useFixture( net_helpers.OVSBridgeFixture( agent_cfg_fixture.get_br_phys_name())).bridge if self.env_desc.tunneling_enabled: self.useFixture( net_helpers.OVSBridgeFixture( agent_cfg_fixture.get_br_tun_name())).bridge self.connect_to_central_network_via_tunneling() else: self.connect_to_central_network_via_vlans(self.br_phys) self.ovs_agent = self.useFixture( process.OVSAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture)) if self.host_desc.l3_agent: self.l3_agent_cfg_fixture = self.useFixture( config.L3ConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.ovs_agent.agent_cfg_fixture.get_br_int_name())) if self.host_desc.dhcp_agent: self.dhcp_agent_cfg_fixture = self.useFixture( config.DhcpConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.ovs_agent.agent_cfg_fixture.get_br_int_name())) def setup_host_with_sriov_agent(self): agent_cfg_fixture = config.SRIOVConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip) self.useFixture(agent_cfg_fixture) self.sriov_agent = self.useFixture( process.SRIOVAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture)) def setup_host_with_linuxbridge_agent(self): # First we need to provide connectivity for agent to prepare proper # bridge mappings in agent's config: self.host_namespace = self.useFixture( net_helpers.NamespaceFixture(prefix="host-") ).name self.connect_namespace_to_control_network() agent_cfg_fixture = config.LinuxBridgeConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip, physical_device_name=self.host_port.name ) self.useFixture(agent_cfg_fixture) self.linuxbridge_agent = self.useFixture( process.LinuxBridgeAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture, namespace=self.host_namespace ) ) if self.host_desc.l3_agent: self.l3_agent_cfg_fixture = self.useFixture( config.L3ConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir)) if self.host_desc.dhcp_agent: self.dhcp_agent_cfg_fixture = self.useFixture( config.DhcpConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir)) def _connect_ovs_port(self, cidr_address): ovs_device = self.useFixture( net_helpers.OVSPortFixture( bridge=self.central_bridge, namespace=self.host_namespace)).port # NOTE: This sets an IP address on the host's root namespace # which is cleaned up when the device is deleted. ovs_device.addr.add(cidr_address) return ovs_device def connect_namespace_to_control_network(self): self.host_port = self._connect_ovs_port( common_utils.ip_to_cidr(self.local_ip, 24) ) self.host_port.link.set_up() def connect_to_central_network_via_tunneling(self): veth_1, veth_2 = self.useFixture( net_helpers.VethFixture()).ports # NOTE: This sets an IP address on the host's root namespace # which is cleaned up when the device is deleted. veth_1.addr.add(common_utils.ip_to_cidr(self.local_ip, 32)) veth_1.link.set_up() veth_2.link.set_up() def connect_to_central_network_via_vlans(self, host_data_bridge): # If using VLANs as a segmentation device, it's needed to connect # a provider bridge to a centralized, shared bridge. net_helpers.create_patch_ports( self.central_bridge, host_data_bridge) def allocate_local_ip(self): if not self.env_desc.network_range: return str(self.useFixture( ip_address.ExclusiveIPAddress( '240.0.0.1', '240.255.255.254')).address) return str(self.useFixture( ip_address.ExclusiveIPAddress( str(self.env_desc.network_range[2]), str(self.env_desc.network_range[-2]))).address) def get_bridge(self, network_id): if "ovs" in self.agents.keys(): return self.ovs_agent.br_int elif "linuxbridge" in self.agents.keys(): bridge = self.network_bridges.get(network_id, None) if not bridge: br_prefix = lb_agent.LinuxBridgeManager.get_bridge_name( network_id) bridge = self.useFixture( net_helpers.LinuxBridgeFixture( prefix=br_prefix, namespace=self.host_namespace, prefix_is_full_name=True)).bridge self.network_bridges[network_id] = bridge return bridge @property def hostname(self): return self.neutron_config.config.DEFAULT.host @property def l3_agent(self): return self.agents['l3'] @l3_agent.setter def l3_agent(self, agent): self.agents['l3'] = agent @property def dhcp_agent(self): return self.agents['dhcp'] @dhcp_agent.setter def dhcp_agent(self, agent): self.agents['dhcp'] = agent @property def ovs_agent(self): return self.agents['ovs'] @ovs_agent.setter def ovs_agent(self, agent): self.agents['ovs'] = agent @property def sriov_agent(self): return self.agents['sriov'] @sriov_agent.setter def sriov_agent(self, agent): self.agents['sriov'] = agent @property def linuxbridge_agent(self): return self.agents['linuxbridge'] @linuxbridge_agent.setter def linuxbridge_agent(self, agent): self.agents['linuxbridge'] = agent @property def l2_agent(self): if self.host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: return self.linuxbridge_agent elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: return self.ovs_agent elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_NIC_SWITCH: return self.sriov_agent class Environment(fixtures.Fixture): """Represents a deployment topology. Environment is a collection of hosts. It starts a Neutron server and a parametrized number of Hosts, each a collection of agents. The Environment accepts a collection of HostDescription, each describing the type of Host to create. """ def __init__(self, env_desc, hosts_desc): """Initialize Environment :param env_desc: An EnvironmentDescription instance. :param hosts_desc: A list of HostDescription instances. """ super(Environment, self).__init__() self.env_desc = env_desc self.hosts_desc = hosts_desc self.hosts = [] def wait_until_env_is_up(self): common_utils.wait_until_true( self._processes_are_ready, timeout=180, sleep=10) def _processes_are_ready(self): try: running_agents = self.neutron_server.client.list_agents()['agents'] agents_count = sum(len(host.agents) for host in self.hosts) return len(running_agents) == agents_count except nc_exc.NeutronClientException: return False def _create_host(self, host_desc): temp_dir = self.useFixture(fixtures.TempDir()).path neutron_config = config.NeutronConfigFixture( self.env_desc, host_desc, temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment) self.useFixture(neutron_config) return self.useFixture( Host(self.env_desc, host_desc, self.test_name, neutron_config, self.central_bridge)) def _setUp(self): self.temp_dir = self.useFixture(fixtures.TempDir()).path # we need this bridge before rabbit and neutron service will start self.central_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-data')).bridge # Get rabbitmq address (and cnt-data network) rabbitmq_ip_address = self._configure_port_for_rabbitmq() self.rabbitmq_environment = self.useFixture( process.RabbitmqEnvironmentFixture(host=rabbitmq_ip_address) ) plugin_cfg_fixture = self.useFixture( config.ML2ConfigFixture( self.env_desc, self.hosts_desc, self.temp_dir, self.env_desc.network_type)) neutron_cfg_fixture = self.useFixture( config.NeutronConfigFixture( self.env_desc, None, self.temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment)) self.neutron_server = self.useFixture( process.NeutronServerFixture( self.env_desc, None, self.test_name, neutron_cfg_fixture, plugin_cfg_fixture)) if self.env_desc.has_placement: placement_cfg_fixture = self.useFixture( config.PlacementConfigFixture(self.env_desc, self.hosts_desc, self.temp_dir) ) self.placement = self.useFixture( process.PlacementFixture( self.env_desc, self.hosts_desc, self.test_name, placement_cfg_fixture) ) self.hosts = [self._create_host(desc) for desc in self.hosts_desc] self.wait_until_env_is_up() def _configure_port_for_rabbitmq(self): self.env_desc.network_range = self._get_network_range() if not self.env_desc.network_range: return "127.0.0.1" rabbitmq_ip = str(self.env_desc.network_range[1]) rabbitmq_port = ip_lib.IPDevice(self.central_bridge.br_name) rabbitmq_port.addr.add(common_utils.ip_to_cidr(rabbitmq_ip, 24)) rabbitmq_port.link.set_up() return rabbitmq_ip def _get_network_range(self): # NOTE(slaweq): We need to choose IP address on which rabbitmq will be # available because LinuxBridge agents are spawned in their own # namespaces and need to know where the rabbitmq server is listening. # For ovs agent it is not necessary because agents are spawned in # globalscope together with rabbitmq server so default localhost # address is fine for them for desc in self.hosts_desc: if desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: return self.useFixture( ip_network.ExclusiveIPNetwork( "240.0.0.0", "240.255.255.255", "24")).network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/machine.py0000644000175000017500000002117500000000000025765 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import spawn import itertools import netaddr from oslo_log import log as logging from neutron_lib.api.definitions import portbindings as pbs from neutron_lib import constants from neutron.agent.common import async_process from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers FULLSTACK_DHCLIENT_SCRIPT = 'fullstack-dhclient-script' LOG = logging.getLogger(__name__) class FakeFullstackMachinesList(list): """A list of items implementing the FakeFullstackMachine interface.""" def block_until_all_boot(self): for vm in self: vm.block_until_boot() def ping_all(self): # Generate an iterable of all unique pairs. For example: # itertools.permutations(range(3), 2) results in: # ((0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)) for vm_1, vm_2 in itertools.permutations(self, 2): vm_1.block_until_ping(vm_2.ip) class FakeFullstackMachine(machine_fixtures.FakeMachineBase): NO_RESOLV_CONF_DHCLIENT_SCRIPT_PATH = ( spawn.find_executable(FULLSTACK_DHCLIENT_SCRIPT)) def __init__(self, host, network_id, tenant_id, safe_client, neutron_port=None, bridge_name=None, use_dhcp=False): super(FakeFullstackMachine, self).__init__() self.host = host self.tenant_id = tenant_id self.network_id = network_id self.safe_client = safe_client self.neutron_port = neutron_port self.bridge_name = bridge_name self.use_dhcp = use_dhcp self.dhclient_async = None def _setUp(self): super(FakeFullstackMachine, self)._setUp() self.bridge = self._get_bridge() if not self.neutron_port: self.neutron_port = self.safe_client.create_port( network_id=self.network_id, tenant_id=self.tenant_id, hostname=self.host.hostname) mac_address = self.neutron_port['mac_address'] hybrid_plug = self.neutron_port[pbs.VIF_DETAILS].get( pbs.OVS_HYBRID_PLUG, False) self.bind_port_if_needed() self.port = self.useFixture( net_helpers.PortFixture.get( self.bridge, self.namespace, mac_address, self.neutron_port['id'], hybrid_plug)).port for fixed_ip in self.neutron_port['fixed_ips']: self._configure_ipaddress(fixed_ip) def bind_port_if_needed(self): if self.neutron_port[pbs.VIF_TYPE] == pbs.VIF_TYPE_UNBOUND: self.safe_client.client.update_port( self.neutron_port['id'], {'port': {pbs.HOST_ID: self.host.hostname}}) self.addCleanup(self.safe_client.client.update_port, self.neutron_port['id'], {'port': {pbs.HOST_ID: ''}}) def _get_bridge(self): if self.bridge_name is None: return self.host.get_bridge(self.network_id) agent_type = self.host.host_desc.l2_agent_type if agent_type == constants.AGENT_TYPE_OVS: new_bridge = self.useFixture( net_helpers.OVSTrunkBridgeFixture(self.bridge_name)).bridge else: raise NotImplementedError( "Support for %s agent is not implemented." % agent_type) return new_bridge def _configure_ipaddress(self, fixed_ip): subnet_id = fixed_ip['subnet_id'] subnet = self.safe_client.client.show_subnet(subnet_id) if (netaddr.IPAddress(fixed_ip['ip_address']).version == constants.IP_VERSION_6): # v6Address/default_route is auto-configured. self._ipv6 = fixed_ip['ip_address'] self.gateway_ipv6 = subnet['subnet']['gateway_ip'] else: self._ip = fixed_ip['ip_address'] prefixlen = netaddr.IPNetwork(subnet['subnet']['cidr']).prefixlen self._ip_cidr = '%s/%s' % (self._ip, prefixlen) self.gateway_ip = subnet['subnet']['gateway_ip'] if self.use_dhcp: self._configure_ipaddress_via_dhcp() else: self._configure_static_ipaddress() def _configure_static_ipaddress(self): self.port.addr.add(self.ip_cidr) if self.gateway_ip: net_helpers.set_namespace_gateway(self.port, self.gateway_ip) def _configure_ipaddress_via_dhcp(self): self._start_async_dhclient() self.addCleanup(self._stop_async_dhclient) def _start_async_dhclient(self): cmd = ["dhclient", '-sf', self.NO_RESOLV_CONF_DHCLIENT_SCRIPT_PATH, '--no-pid', '-d', self.port.name] self.dhclient_async = async_process.AsyncProcess( cmd, run_as_root=True, respawn_interval=5, namespace=self.namespace) self.dhclient_async.start() def _stop_async_dhclient(self): if not self.dhclient_async: return try: self.dhclient_async.stop() cmd = self.dhclient_async.cmd stdout = list(self.dhclient_async.iter_stdout()) stderr = list(self.dhclient_async.iter_stderr()) LOG.debug('Stopping async dhclient [%(cmd)s]. stdout: ' '[%(stdout)s] - stderr: [%(stderr)s]', {'cmd': cmd, 'stdout': stdout, 'stderr': stderr}) except async_process.AsyncProcessException: # If it was already stopped than we don't care about it pass @property def ipv6(self): return self._ipv6 @property def ip(self): return self._ip @property def ip_cidr(self): return self._ip_cidr def ip_configured(self): for port_ip in self.port.addr.list(ip_version=constants.IP_VERSION_4): if port_ip.get('cidr') == self.ip_cidr: return True return False def gateway_configured(self): gateway_info = self.port.route.get_gateway() if not gateway_info: return False return gateway_info.get('via') == self.gateway_ip def block_until_boot(self): utils.wait_until_true( lambda: (self.safe_client.client.show_port(self.neutron_port['id']) ['port']['status'] == 'ACTIVE'), sleep=3) def block_until_dhcp_config_done(self): utils.wait_until_true( lambda: self.ip_configured() and self.gateway_configured(), exception=machine_fixtures.FakeMachineException( "Address %s or gateway %s not configured properly on " "port %s" % (self.ip_cidr, self.gateway_ip, self.port.name) ) ) def destroy(self): """Destroy this fake machine. This should simulate deletion of a vm. It doesn't call cleanUp(). """ self.safe_client.client.update_port( self.neutron_port['id'], {'port': {pbs.HOST_ID: ''}} ) # All associated vlan interfaces are deleted too # If VM is connected to Linuxbridge it hasn't got "delete_port" method # and it is not necessary to delete tap port connected to this bridge. # It is veth pair and will be removed together with VM namespace if hasattr(self.bridge, "delete_port"): self.bridge.delete_port(self.port.name) ip_lib.delete_network_namespace(self.namespace) class FakeFullstackTrunkMachine(FakeFullstackMachine): def __init__(self, trunk, *args, **kwargs): super(FakeFullstackTrunkMachine, self).__init__(*args, **kwargs) self.trunk = trunk def add_vlan_interface(self, mac_address, ip_address, segmentation_id): """Add VLAN interface to VM's namespace. :param mac_address: MAC address to be set on VLAN interface. :param ip_address: The IPNetwork instance containing IP address assigned to the interface. :param segmentation_id: VLAN tag added to the interface. """ net_helpers.create_vlan_interface( self.namespace, self.port.name, mac_address, ip_address, segmentation_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/resources/process.py0000644000175000017500000003455600000000000026046 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from distutils import spawn import os import re import signal import fixtures from neutronclient.common import exceptions as nc_exc from neutronclient.v2_0 import client from oslo_log import log as logging from oslo_utils import fileutils from neutron.agent.common import async_process from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests import base from neutron.tests.common import net_helpers from neutron.tests.fullstack import base as fullstack_base LOG = logging.getLogger(__name__) CMD_FOLDER = 'agents' class ProcessFixture(fixtures.Fixture): def __init__(self, test_name, process_name, exec_name, config_filenames, namespace=None, kill_signal=signal.SIGKILL): super(ProcessFixture, self).__init__() self.test_name = test_name self.process_name = process_name self.exec_name = exec_name self.config_filenames = config_filenames self.process = None self.kill_signal = kill_signal self.namespace = namespace def _setUp(self): self.start() self.addCleanup(self.stop) def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(fullstack_base.DEFAULT_LOG_DIR, test_name) fileutils.ensure_tree(log_dir, mode=0o755) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "%s--%s.log" % (self.process_name, timestamp) run_as_root = bool(self.namespace) exec_name = (self.exec_name if run_as_root else spawn.find_executable(self.exec_name)) cmd = [exec_name, '--log-dir', log_dir, '--log-file', log_file] for filename in self.config_filenames: cmd += ['--config-file', filename] self.process = async_process.AsyncProcess( cmd, run_as_root=run_as_root, namespace=self.namespace ) self.process.start(block=True) LOG.debug("Process started: %s", self.process_name) def stop(self, kill_signal=None): kill_signal = kill_signal or self.kill_signal try: self.process.stop( block=True, kill_signal=kill_signal, kill_timeout=15) except async_process.AsyncProcessException as e: if "Process is not running" not in str(e): raise LOG.debug("Process stopped: %s", self.process_name) def restart(self, executor=None): def _restart(): self.stop() self.start() LOG.debug("Restarting process: %s", self.process_name) if executor is None: _restart() else: return executor.submit(_restart) class RabbitmqEnvironmentFixture(fixtures.Fixture): def __init__(self, host="127.0.0.1"): super(RabbitmqEnvironmentFixture, self).__init__() self.host = host def _setUp(self): self.user = common_utils.get_rand_name(prefix='user') self.password = common_utils.get_rand_name(prefix='pass') self.vhost = common_utils.get_rand_name(prefix='vhost') self._execute('add_user', self.user, self.password) self.addCleanup(self._execute, 'delete_user', self.user) self._execute('add_vhost', self.vhost) self.addCleanup(self._execute, 'delete_vhost', self.vhost) self._execute('set_permissions', '-p', self.vhost, self.user, '.*', '.*', '.*') def _execute(self, *args): cmd = ['rabbitmqctl'] cmd.extend(args) utils.execute(cmd, run_as_root=True) class ServiceFixture(fixtures.Fixture): def restart(self, executor=None): return self.process_fixture.restart(executor=executor) def start(self): return self.process_fixture.start() def stop(self, kill_signal=None): return self.process_fixture.stop(kill_signal=kill_signal) class NeutronServerFixture(ServiceFixture): NEUTRON_SERVER = "neutron-server" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, plugin_cfg_fixture, service_cfg_fixtures=None): super(NeutronServerFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.plugin_cfg_fixture = plugin_cfg_fixture self.service_cfg_fixtures = service_cfg_fixtures def _setUp(self): config_filenames = [self.neutron_cfg_fixture.filename, self.plugin_cfg_fixture.filename] if self.service_cfg_fixtures: config_filenames.extend( [scf.filename for scf in self.service_cfg_fixtures]) self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_SERVER, exec_name=self.NEUTRON_SERVER, config_filenames=config_filenames, kill_signal=signal.SIGTERM)) common_utils.wait_until_true(self.server_is_live) def server_is_live(self): try: self.client.list_networks() return True except nc_exc.NeutronClientException: return False @property def client(self): url = ("http://127.0.0.1:%s" % self.neutron_cfg_fixture.config.DEFAULT.bind_port) return client.Client(auth_strategy="noauth", endpoint_url=url) class OVSAgentFixture(ServiceFixture): NEUTRON_OVS_AGENT = "neutron-openvswitch-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture): super(OVSAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config self.agent_cfg_fixture = agent_cfg_fixture self.agent_config = agent_cfg_fixture.config def _setUp(self): self.br_int = self.useFixture( net_helpers.OVSBridgeFixture( self.agent_cfg_fixture.get_br_int_name())).bridge config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_OVS_AGENT, exec_name=spawn.find_executable( 'ovs_agent.py', path=os.path.join(fullstack_base.ROOTDIR, CMD_FOLDER)), config_filenames=config_filenames, kill_signal=signal.SIGTERM)) class PlacementFixture(fixtures.Fixture): def __init__(self, env_desc, host_desc, test_name, placement_cfg_fixture): super(PlacementFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.placement_cfg_fixture = placement_cfg_fixture self.placement_config = self.placement_cfg_fixture.config def _setUp(self): self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name='placement', exec_name=spawn.find_executable( 'placement.py', path=os.path.join(fullstack_base.ROOTDIR, 'servers') ), config_filenames=[self.placement_cfg_fixture.filename], kill_signal=signal.SIGTERM)) class SRIOVAgentFixture(ServiceFixture): NEUTRON_SRIOV_AGENT = "neutron-sriov-nic-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture): super(SRIOVAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config self.agent_cfg_fixture = agent_cfg_fixture self.agent_config = agent_cfg_fixture.config def _setUp(self): config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_SRIOV_AGENT, exec_name=self.NEUTRON_SRIOV_AGENT, config_filenames=config_filenames, kill_signal=signal.SIGTERM)) class LinuxBridgeAgentFixture(ServiceFixture): NEUTRON_LINUXBRIDGE_AGENT = "neutron-linuxbridge-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture, namespace=None): super(LinuxBridgeAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config self.agent_cfg_fixture = agent_cfg_fixture self.agent_config = agent_cfg_fixture.config self.namespace = namespace def _setUp(self): config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] self.process_fixture = self.useFixture( ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_LINUXBRIDGE_AGENT, exec_name=self.NEUTRON_LINUXBRIDGE_AGENT, config_filenames=config_filenames, namespace=self.namespace ) ) class L3AgentFixture(ServiceFixture): NEUTRON_L3_AGENT = "neutron-l3-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, l3_agent_cfg_fixture, namespace=None): super(L3AgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.l3_agent_cfg_fixture = l3_agent_cfg_fixture self.namespace = namespace def _setUp(self): self.plugin_config = self.l3_agent_cfg_fixture.config config_filenames = [self.neutron_cfg_fixture.filename, self.l3_agent_cfg_fixture.filename] # if we execute in namespace as root, then allow rootwrap to find the # executable, otherwise construct full path ourselves if self.namespace: exec_name = 'l3_agent.py' else: exec_name = spawn.find_executable( 'l3_agent.py', path=os.path.join(fullstack_base.ROOTDIR, CMD_FOLDER)) self.process_fixture = self.useFixture( ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_L3_AGENT, exec_name=exec_name, config_filenames=config_filenames, namespace=self.namespace ) ) def get_namespace_suffix(self): return self.plugin_config.DEFAULT.test_namespace_suffix class DhcpAgentFixture(fixtures.Fixture): NEUTRON_DHCP_AGENT = "neutron-dhcp-agent" def __init__(self, env_desc, host_desc, test_name, neutron_cfg_fixture, agent_cfg_fixture, namespace=None): super(DhcpAgentFixture, self).__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.agent_cfg_fixture = agent_cfg_fixture self.namespace = namespace def _setUp(self): self.plugin_config = self.agent_cfg_fixture.config config_filenames = [self.neutron_cfg_fixture.filename, self.agent_cfg_fixture.filename] # if we execute in namespace as root, then allow rootwrap to find the # executable, otherwise construct full path ourselves if self.namespace: exec_name = 'dhcp_agent.py' else: exec_name = spawn.find_executable( 'dhcp_agent.py', path=os.path.join(fullstack_base.ROOTDIR, CMD_FOLDER)) self.process_fixture = self.useFixture( ProcessFixture( test_name=self.test_name, process_name=self.NEUTRON_DHCP_AGENT, exec_name=exec_name, config_filenames=config_filenames, namespace=self.namespace ) ) self.dhcp_namespace_pattern = re.compile( r"qdhcp-[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}%s" % self.get_namespace_suffix()) self.addCleanup(self.clean_dhcp_namespaces) def get_agent_hostname(self): return self.neutron_cfg_fixture.config['DEFAULT']['host'] def get_namespace_suffix(self): return self.plugin_config.DEFAULT.test_namespace_suffix def kill(self): self.process_fixture.stop() self.clean_dhcp_namespaces() def clean_dhcp_namespaces(self): """Delete all DHCP namespaces created by DHCP agent. In some tests for DHCP agent HA agents are killed when handling DHCP service for network(s). In such case DHCP namespace is not deleted by DHCP agent and such namespaces are found and deleted using agent's namespace suffix. """ for namespace in ip_lib.list_network_namespaces(): if self.dhcp_namespace_pattern.match(namespace): try: ip_lib.delete_network_namespace(namespace) except RuntimeError: # Continue cleaning even if namespace deletions fails pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3910453 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/schedulers/0000755000175000017500000000000000000000000024130 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/schedulers/__init__.py0000644000175000017500000000000000000000000026227 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/schedulers/dhcp.py0000644000175000017500000000426500000000000025427 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from neutron.scheduler import base_scheduler from neutron.scheduler import dhcp_agent_scheduler class AlwaysTheOtherAgentScheduler(base_scheduler.BaseChanceScheduler, dhcp_agent_scheduler.AutoScheduler): """Choose always different agent that the ones selected previously This dhcp agent scheduler intended use is only in fullstack tests. The goal is to ensure the concurrently running schedulings to select different agents so the over-scheduling becomes visible in the number of agents scheduled to the network. To use this scheduler initialize your EnvironmentDescription with dhcp_scheduler_class='neutron.tests.fullstack.test_dhcp_agent.' 'AlwaysTheOtherAgentScheduler' """ def __init__(self): self.last_selected_agent_ids = [] super(AlwaysTheOtherAgentScheduler, self).__init__( dhcp_agent_scheduler.DhcpFilter()) def select(self, plugin, context, resource_hostable_agents, resource_hosted_agents, num_agents_needed): possible_agents = [] for agent in resource_hostable_agents: if agent.id in self.last_selected_agent_ids: continue else: possible_agents.append(agent) num_agents = min(len(possible_agents), num_agents_needed) self.last_selected_agent_ids = [ ag.id for ag in possible_agents[0:num_agents]] # Note(lajoskatona): To make the race window big enough let's delay # the actual scheduling. time.sleep(5) return possible_agents[0:num_agents_needed] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3910453 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/servers/0000755000175000017500000000000000000000000023460 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/servers/__init__.py0000644000175000017500000000000000000000000025557 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/servers/placement.py0000755000175000017500000001145300000000000026011 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright (c) 2019 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import sys import uuid from wsgiref import simple_server as wsgi_simple_server from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_serialization import jsonutils from six.moves import urllib from neutron.common import config as common_config LOG = logging.getLogger(__name__) PortType = types.Integer(1, 65535) placement_opts = [ cfg.Opt('placement_port', type=PortType) ] cfg.CONF.register_opts(placement_opts) class FakePlacement(object): rp_template = { "uuid": None, "generation": 0, "parent_provider_uuid": None, "name": None, } def __init__(self): self.host_rp_uuid = str(uuid.uuid4()) host_rp = copy.deepcopy(self.rp_template) host_rp['uuid'] = self.host_rp_uuid self.resource_providers = { self.host_rp_uuid: host_rp } def get_resource_providers(self, **kwargs): id = kwargs.get('id', None) if not id: return jsonutils.dumps( { 'resource_providers': [self.resource_providers[self.host_rp_uuid]] }) else: return jsonutils.dumps(self.resource_providers[id]) def put_traits(self, **kwargs): # Return empty sting otherwise wsgiref goes mad return '' def put_resource_providers(self, **kwargs): id = kwargs.get('id', None) req_body = kwargs.get('body', None) if id: rp_dict = copy.deepcopy(self.rp_template) rp_dict['uuid'] = id rp_dict['parent_provider_uuid'] = req_body['parent_provider_uuid'] rp_dict['name'] = req_body['name'] self.resource_providers[rp_dict['uuid']] = rp_dict return jsonutils.dumps(rp_dict) def put_resource_providers_traits(self, **kwargs): resp = kwargs['body'] resp['resource_provider_generation'] += 1 return jsonutils.dumps(resp) def put_resource_providers_inventories(self, **kwargs): resp = kwargs['body'] resp['resource_provider_generation'] += 1 return jsonutils.dumps(resp) def build_method_name(self, action, path_info): path_info = urllib.parse.urlparse(path_info).path path_info_list = path_info.strip('/').split('/') method_name = action.lower() for path_chunk in path_info_list: if any(s in path_chunk for s in ['placement', 'CUSTOM']): continue # If there is uuid in the path, that should be thrown out try: uuid.UUID(path_chunk) except ValueError: method_name += '_' + path_chunk return method_name def wsgi_app(self, env, start_response): response_headers = [('Content-Type', 'application/json')] http_status = '200 OK' # Poor men's routing meth_name = self.build_method_name(env['REQUEST_METHOD'], env['PATH_INFO']) params = {} # Fetch params from url try: params['id'] = env['PATH_INFO'].split('/')[3] except IndexError: pass # Fetch body try: request_body_size = int(env.get('CONTENT_LENGTH', 0)) if request_body_size > 0: req_body = env['wsgi.input'].read(request_body_size) params['body'] = jsonutils.loads(req_body.decode('utf-8')) except ValueError: pass LOG.debug('Request on %s (%s) with body: %s', env['PATH_INFO'], env['REQUEST_METHOD'], params) response = getattr(self, meth_name)(**params) LOG.debug('Response from %s: %s', meth_name, response) response = response.encode('utf-8') start_response(http_status, response_headers) return [response] if __name__ == "__main__": common_config.init(sys.argv[1:]) common_config.setup_logging() placement_port = cfg.CONF.placement_port LOG.info("Placement fixture started on port: %s", placement_port) mock_placement = FakePlacement() wsgi_simple_server.make_server( '', placement_port, mock_placement.wsgi_app).serve_forever() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_agent_bandwidth_report.py0000644000175000017500000002302300000000000030115 0ustar00coreycorey00000000000000# Copyright 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import constants from neutron.common import utils from neutron.tests.common import net_helpers from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import config as f_const from neutron.tests.fullstack.resources import environment from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests BR_MAPPINGS = 'bridge_mappings' DEV_MAPPINGS = 'device_mappings' def _get_physnet_names_from_mapping(mapping): physnets = [] for pair in mapping.split(','): physnets.append(pair.split(':')[0]) return physnets def _add_new_device_to_agent_config(l2_agent_config, mapping_key_name, new_dev): old_bw = l2_agent_config[constants.RP_BANDWIDTHS] old_mappings = l2_agent_config[mapping_key_name] if new_dev in old_bw or new_dev in old_mappings: return new_mappings = 'physnetnew:%s' % new_dev new_bw = '%s:%s:%s' % (new_dev, f_const.MINIMUM_BANDWIDTH_EGRESS_KBPS, f_const.MINIMUM_BANDWIDTH_INGRESS_KBPS) l2_agent_config[mapping_key_name] = '%s,%s' % ( old_mappings, new_mappings) l2_agent_config[constants.RP_BANDWIDTHS] = '%s,%s' % ( old_bw, new_bw) def _change_agent_conf(l2_agent_config, l2_agent, mapping_key_name, new_dev): _add_new_device_to_agent_config(l2_agent_config, mapping_key_name, new_dev) l2_agent.agent_cfg_fixture.write_config_to_configfile() def _add_new_bridge_and_restart_agent(host): l2_agent = host.l2_agent l2_agent_config = l2_agent.agent_cfg_fixture.config if 'ovs' in host.agents: new_dev = utils.get_rand_device_name(prefix='br-new') _change_agent_conf( l2_agent_config['ovs'], l2_agent, BR_MAPPINGS, new_dev) physnets = _get_physnet_names_from_mapping( l2_agent_config['ovs'][BR_MAPPINGS]) br_phys_new = host.useFixture( net_helpers.OVSBridgeFixture(new_dev)).bridge host.connect_to_central_network_via_vlans(br_phys_new) elif 'sriov' in host.agents: new_dev = utils.get_rand_device_name(prefix='ens7') _change_agent_conf( l2_agent_config['sriov_nic'], l2_agent, 'physical_device_mappings', new_dev) physnets = _get_physnet_names_from_mapping( l2_agent_config['sriov_nic']['physical_device_mappings']) l2_agent.restart() return physnets class TestAgentBandwidthReport(base.BaseFullStackTestCase): scenarios = [ (constants.AGENT_TYPE_OVS, {'l2_agent_type': constants.AGENT_TYPE_OVS}), (constants.AGENT_TYPE_NIC_SWITCH, {'l2_agent_type': constants.AGENT_TYPE_NIC_SWITCH}) ] def setUp(self, env=None): if not env: host_desc = [environment.HostDescription( l3_agent=False, l2_agent_type=self.l2_agent_type)] env_desc = environment.EnvironmentDescription( network_type='vlan', l2_pop=False, report_bandwidths=True, ) env = environment.Environment(env_desc, host_desc) super(TestAgentBandwidthReport, self).setUp(env) def _check_agent_configurations(self, agent_id, expected_physnets): agent = self.client.show_agent(agent_id)['agent'] agent_configurations = agent['configurations'] if 'Open vSwitch' in agent['agent_type']: mapping_key = BR_MAPPINGS elif 'NIC Switch' in agent['agent_type']: mapping_key = DEV_MAPPINGS else: return False for physnet in expected_physnets: if physnet not in agent_configurations[mapping_key]: return False bridge_or_devices = agent_configurations[mapping_key][physnet] if (constants.RP_BANDWIDTHS not in agent_configurations or constants.RP_INVENTORY_DEFAULTS not in agent_configurations): return False if mapping_key == BR_MAPPINGS: if (bridge_or_devices not in agent_configurations[constants.RP_BANDWIDTHS]): return False else: for device in bridge_or_devices: if (device not in agent_configurations[constants.RP_BANDWIDTHS]): return False for device in agent_configurations[constants.RP_BANDWIDTHS]: conf_device = agent_configurations[constants.RP_BANDWIDTHS][device] if (f_const.MINIMUM_BANDWIDTH_INGRESS_KBPS != conf_device['ingress'] and f_const.MINIMUM_BANDWIDTH_EGRESS_KBPS != conf_device[device]['egress']): return False return True def test_agent_configurations(self): agents = self.client.list_agents() self.assertEqual(1, len(agents['agents'])) self.assertTrue(agents['agents'][0]['alive']) agent_config = self.environment.hosts[0].l2_agent.agent_config if 'ovs' in self.environment.hosts[0].agents: physnets = _get_physnet_names_from_mapping( agent_config['ovs'][BR_MAPPINGS]) elif 'sriov' in self.environment.hosts[0].agents: physnets = _get_physnet_names_from_mapping( agent_config['sriov_nic']['physical_device_mappings']) self.assertTrue( self._check_agent_configurations(agents['agents'][0]['id'], physnets)) # Add new physnet with bandwidth value to agent config and check # if after agent restart and report_interval wait it is visible in # the configurations field. physnets = _add_new_bridge_and_restart_agent(self.environment.hosts[0]) agents = self.client.list_agents() l2_agent = agents['agents'][0] neutron_config = self.environment.hosts[0].l2_agent.neutron_config report_interval = neutron_config['agent']['report_interval'] check_agent_alive = functools.partial(self._check_agent_configurations, l2_agent['id'], physnets) utils.wait_until_true( predicate=check_agent_alive, timeout=float(report_interval) + 10, sleep=5) class TestPlacementBandwidthReport(base.BaseFullStackTestCase): scenarios = [ (constants.AGENT_TYPE_OVS, {'l2_agent_type': constants.AGENT_TYPE_OVS, 'mech_drivers': 'openvswitch,linuxbridge', 'placement_port': '8080'}), (constants.AGENT_TYPE_NIC_SWITCH, {'l2_agent_type': constants.AGENT_TYPE_NIC_SWITCH, 'mech_drivers': 'sriovnicswitch', 'placement_port': '8081'}) ] def setUp(self): host_desc = [environment.HostDescription( l3_agent=False, l2_agent_type=self.l2_agent_type)] env_desc = environment.EnvironmentDescription( network_type='vlan', l2_pop=False, mech_drivers=self.mech_drivers, report_bandwidths=True, has_placement=True, placement_port=self.placement_port ) env = environment.Environment(env_desc, host_desc) super(TestPlacementBandwidthReport, self).setUp(env) def _check_agent_not_synced(self): return not self._check_agent_synced() def _check_agent_synced(self): agents = self.client.list_agents(agent_type=self.l2_agent_type) for agent in agents['agents']: if (agent['id'] == self.original_agent_id and agent['resources_synced']): return True return False def test_configurations_are_synced_towards_placement(self): neutron_config = self.environment.hosts[0].l2_agent.neutron_config report_interval = int(neutron_config['agent']['report_interval']) agents = self.client.list_agents(agent_type=self.l2_agent_type) self.assertEqual(1, len(agents['agents'])) self.original_agent_id = agents['agents'][0]['id'] check_agent_synced = functools.partial(self._check_agent_synced) utils.wait_until_true( predicate=check_agent_synced, timeout=report_interval + 10, sleep=1) self.environment.placement.process_fixture.stop() _add_new_bridge_and_restart_agent(self.environment.hosts[0]) check_agent_not_synced = functools.partial( self._check_agent_not_synced) utils.wait_until_true( predicate=check_agent_not_synced, timeout=report_interval + 10, sleep=1) self.environment.placement.process_fixture.start() check_agent_synced = functools.partial(self._check_agent_synced) utils.wait_until_true( predicate=check_agent_synced, timeout=report_interval + 10, sleep=1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_connectivity.py0000644000175000017500000002064300000000000026123 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal from neutron_lib import constants from oslo_log import log as logging from oslo_utils import uuidutils import testscenarios from neutron.common import utils as common_utils from neutron.tests.common import net_helpers from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import config from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests SEGMENTATION_ID = 1234 LOG = logging.getLogger(__name__) class BaseConnectivitySameNetworkTest(base.BaseFullStackTestCase): arp_responder = False use_dhcp = True num_hosts = 3 def setUp(self): host_descriptions = [ # There's value in enabling L3 agents registration when l2pop # is enabled, because l2pop code makes assumptions about the # agent types present on machines. environment.HostDescription( l3_agent=self.l2_pop, l2_agent_type=self.l2_agent_type, dhcp_agent=self.use_dhcp, ) for _ in range(self.num_hosts)] env = environment.Environment( environment.EnvironmentDescription( network_type=self.network_type, l2_pop=self.l2_pop, arp_responder=self.arp_responder), host_descriptions) super(BaseConnectivitySameNetworkTest, self).setUp(env) def _prepare_network(self, tenant_uuid): net_args = {'network_type': self.network_type} if self.network_type in ['flat', 'vlan']: net_args['physical_network'] = config.PHYSICAL_NETWORK_NAME if self.network_type in ['vlan', 'gre', 'vxlan']: net_args['segmentation_id'] = SEGMENTATION_ID network = self.safe_client.create_network(tenant_uuid, **net_args) self.safe_client.create_subnet( tenant_uuid, network['id'], '20.0.0.0/24', enable_dhcp=self.use_dhcp) return network def _prepare_vms_in_net(self, tenant_uuid, network): vms = machine.FakeFullstackMachinesList( self.useFixture( machine.FakeFullstackMachine( host, network['id'], tenant_uuid, self.safe_client, use_dhcp=self.use_dhcp)) for host in self.environment.hosts) vms.block_until_all_boot() return vms def _prepare_vms_in_single_network(self): tenant_uuid = uuidutils.generate_uuid() network = self._prepare_network(tenant_uuid) return self._prepare_vms_in_net(tenant_uuid, network) def _test_connectivity(self): vms = self._prepare_vms_in_single_network() vms.ping_all() class TestOvsConnectivitySameNetwork(BaseConnectivitySameNetworkTest): l2_agent_type = constants.AGENT_TYPE_OVS scenarios = [ ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ('GRE-l2pop-arp_responder', {'network_type': 'gre', 'l2_pop': True, 'arp_responder': True}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False})] def test_connectivity(self): self._test_connectivity() class TestOvsConnectivitySameNetworkOnOvsBridgeControllerStop( BaseConnectivitySameNetworkTest): num_hosts = 2 l2_agent_type = constants.AGENT_TYPE_OVS scenarios = [ ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ('GRE and l2pop', {'network_type': 'gre', 'l2_pop': True}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False})] def _test_controller_timeout_does_not_break_connectivity(self, kill_signal=None): # Environment preparation is effectively the same as connectivity test vms = self._prepare_vms_in_single_network() vms.ping_all() ns0 = vms[0].namespace ip1 = vms[1].ip LOG.debug("Stopping agents (hence also OVS bridge controllers)") for host in self.environment.hosts: if kill_signal is not None: host.l2_agent.stop(kill_signal=kill_signal) else: host.l2_agent.stop() # Ping to make sure that 3 x 5 seconds is overcame even under a high # load. The time was chosen to match three times inactivity_probe time, # which is the time after which the OVS vswitchd # treats the controller as dead and starts managing the bridge # by itself when the fail type settings is not set to secure (see # ovs-vsctl man page for further details) with net_helpers.async_ping(ns0, [ip1], timeout=2, count=25) as done: common_utils.wait_until_true( done, exception=RuntimeError("Networking interrupted after " "controllers have vanished")) def test_controller_timeout_does_not_break_connectivity_sigterm(self): self._test_controller_timeout_does_not_break_connectivity() def test_controller_timeout_does_not_break_connectivity_sigkill(self): self._test_controller_timeout_does_not_break_connectivity( signal.SIGKILL) class TestLinuxBridgeConnectivitySameNetwork(BaseConnectivitySameNetworkTest): l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE scenarios = [ ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False}), ('VXLAN and l2pop', {'network_type': 'vxlan', 'l2_pop': True}) ] def test_connectivity(self): self._test_connectivity() class TestConnectivitySameNetworkNoDhcp(BaseConnectivitySameNetworkTest): scenarios = [ (constants.AGENT_TYPE_OVS, {'l2_agent_type': constants.AGENT_TYPE_OVS}), (constants.AGENT_TYPE_LINUXBRIDGE, {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE}) ] use_dhcp = False network_type = 'vxlan' l2_pop = False def test_connectivity(self): self._test_connectivity() class TestUninterruptedConnectivityOnL2AgentRestart( BaseConnectivitySameNetworkTest): num_hosts = 2 ovs_agent_scenario = [('OVS', {'l2_agent_type': constants.AGENT_TYPE_OVS})] lb_agent_scenario = [('LB', {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})] network_scenarios = [ ('Flat network', {'network_type': 'flat', 'l2_pop': False}), ('VLANs', {'network_type': 'vlan', 'l2_pop': False}), ('VXLAN', {'network_type': 'vxlan', 'l2_pop': False}), ] scenarios = ( testscenarios.multiply_scenarios(ovs_agent_scenario, network_scenarios) + testscenarios.multiply_scenarios(lb_agent_scenario, network_scenarios) ) def test_l2_agent_restart(self, agent_restart_timeout=20): # Environment preparation is effectively the same as connectivity test vms = self._prepare_vms_in_single_network() vms.ping_all() ns0 = vms[0].namespace ip1 = vms[1].ip agents = [host.l2_agent for host in self.environment.hosts] # Restart agents on all nodes simultaneously while pinging across # the hosts. The ping has to cross int and phys bridges and travels # via central bridge as the vms are on separate hosts. self._assert_ping_during_agents_restart( agents, ns0, [ip1], restart_timeout=agent_restart_timeout, ping_timeout=2, count=agent_restart_timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_dhcp_agent.py0000644000175000017500000002043000000000000025473 0ustar00coreycorey00000000000000# Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from neutron_lib import constants from oslo_utils import uuidutils from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.tests.fullstack.agents import dhcp_agent from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class BaseDhcpAgentTest(base.BaseFullStackTestCase): scenarios = [ (constants.AGENT_TYPE_OVS, {'l2_agent_type': constants.AGENT_TYPE_OVS}), (constants.AGENT_TYPE_LINUXBRIDGE, {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE}) ] boot_vm_for_test = True dhcp_scheduler_class = None def setUp(self): host_descriptions = [ environment.HostDescription( dhcp_agent=True, l2_agent_type=self.l2_agent_type ) for _ in range(self.number_of_hosts)] env = environment.Environment( environment.EnvironmentDescription( l2_pop=False, arp_responder=False, agent_down_time=self.agent_down_time, dhcp_scheduler_class=self.dhcp_scheduler_class, ), host_descriptions) super(BaseDhcpAgentTest, self).setUp(env) self.project_id = uuidutils.generate_uuid() if self.boot_vm_for_test: self._create_network_subnet_and_vm() def _spawn_vm(self): host = random.choice(self.environment.hosts) vm = self.useFixture( machine.FakeFullstackMachine( host, self.network['id'], self.project_id, self.safe_client, use_dhcp=True)) vm.block_until_boot() return vm def _create_network_subnet_and_vm(self): self.network = self.safe_client.create_network(self.project_id) self.subnet = self.safe_client.create_subnet( self.project_id, self.network['id'], cidr='10.0.0.0/24', gateway_ip='10.0.0.1', name='subnet-test', enable_dhcp=True) self.vm = self._spawn_vm() class TestDhcpAgentNoHA(BaseDhcpAgentTest): number_of_hosts = 1 agent_down_time = 60 def test_dhcp_assignment(self): # First check if network was scheduled to one DHCP agent dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id']) self.assertEqual(1, len(dhcp_agents['agents'])) # And check if IP and gateway config is fine on FakeMachine self.vm.block_until_dhcp_config_done() def test_mtu_update(self): # The test case needs access to devices in nested namespaces. ip_lib # doesn't support it, and it's probably unsafe to touch the library for # testing matters. # TODO(jlibosva) revisit when ip_lib supports nested namespaces if self.environment.hosts[0].dhcp_agent.namespace is not None: self.skipTest("ip_lib doesn't support nested namespaces") self.vm.block_until_dhcp_config_done() namespace = dhcp_agent._get_namespace_name( self.network['id'], suffix=self.environment.hosts[0].dhcp_agent.get_namespace_suffix()) self.assert_namespace_exists(namespace) ip = ip_lib.IPWrapper(namespace) devices = ip.get_devices() self.assertEqual(1, len(devices)) dhcp_dev = devices[0] mtu = dhcp_dev.link.mtu self.assertEqual(1450, mtu) mtu -= 1 self.safe_client.update_network(self.network['id'], mtu=mtu) common_utils.wait_until_true(lambda: dhcp_dev.link.mtu == mtu) class TestDhcpAgentHA(BaseDhcpAgentTest): number_of_hosts = 2 agent_down_time = 30 def _wait_until_network_rescheduled(self, old_agent): def _agent_rescheduled(): network_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] if network_agents: return network_agents[0]['id'] != old_agent['id'] return False common_utils.wait_until_true(_agent_rescheduled, timeout=120) def _kill_dhcp_agent(self, agent): for host in self.environment.hosts: hostname = host.dhcp_agent.get_agent_hostname() if hostname == agent['host']: host.dhcp_agent.kill() self._wait_until_agent_down(agent['id']) break def _add_network_to_new_agent(self): dhcp_agents = self.client.list_agents( agent_type=constants.AGENT_TYPE_DHCP)['agents'] dhcp_agents_ids = [agent['id'] for agent in dhcp_agents] current_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] current_agents_ids = [agent['id'] for agent in current_agents] new_agents_ids = list(set(dhcp_agents_ids) - set(current_agents_ids)) if new_agents_ids: new_agent_id = random.choice(new_agents_ids) self.client.add_network_to_dhcp_agent( new_agent_id, {'network_id': self.network['id']}) def test_reschedule_network_on_new_agent(self): network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] self.assertEqual(1, len(network_dhcp_agents)) self._kill_dhcp_agent(network_dhcp_agents[0]) self._wait_until_network_rescheduled(network_dhcp_agents[0]) # ensure that only one agent is handling DHCP for this network new_network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] self.assertEqual(1, len(new_network_dhcp_agents)) # check if new vm will get IP from new DHCP agent new_vm = self._spawn_vm() new_vm.block_until_dhcp_config_done() def test_multiple_agents_for_network(self): network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] self.assertEqual(1, len(network_dhcp_agents)) self._add_network_to_new_agent() # ensure that two agents are handling DHCP for this network network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] self.assertEqual(2, len(network_dhcp_agents)) self._kill_dhcp_agent(network_dhcp_agents[0]) # check if new vm will get IP from DHCP agent which is still alive new_vm = self._spawn_vm() new_vm.block_until_dhcp_config_done() class TestDhcpAgentHARaceCondition(BaseDhcpAgentTest): agent_down_time = 30 number_of_hosts = 2 boot_vm_for_test = False dhcp_scheduler_class = ('neutron.tests.fullstack.schedulers.dhcp.' 'AlwaysTheOtherAgentScheduler') def setUp(self): super(TestDhcpAgentHARaceCondition, self).setUp() self._create_network_with_multiple_subnets() def _create_network_with_multiple_subnets(self): self.network = self.safe_client.create_network(self.project_id) funcs = [] args = [] for i in range(4): funcs.append(self.safe_client.create_subnet) args.append(( self.project_id, self.network['id'], '10.0.%s.0/24' % i, '10.0.%s.1' % i, 'subnet-test-%s' % i, True )) self._simulate_concurrent_requests_process_and_raise(funcs, args) def test_dhcp_agent_ha_with_race_condition(self): network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks( self.network['id'])['agents'] self.assertEqual(1, len(network_dhcp_agents)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_firewall.py0000644000175000017500000001343200000000000025210 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import constants from oslo_log import log as logging from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.linux import iptables_firewall from neutron.agent.linux import iptables_manager from neutron.agent.linux.openvswitch_firewall import iptables as ovs_iptables from neutron.common import utils from neutron.tests.common import machine_fixtures from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine LOG = logging.getLogger(__name__) class IptablesNotConfiguredException(Exception): pass class VmsUnreachableException(Exception): pass class FirewallMigrationTestCase(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [ environment.HostDescription( l3_agent=False, l2_agent_type=constants.AGENT_TYPE_OVS, firewall_driver='iptables_hybrid', dhcp_agent=False, )] env = environment.Environment( environment.EnvironmentDescription(), host_descriptions) super(FirewallMigrationTestCase, self).setUp(env) # fullstack doesn't separate nodes running ovs agent so iptables rules # are implemented in root namespace self.iptables_manager = iptables_manager.IptablesManager() def _prepare_resources(self): self.tenant_uuid = uuidutils.generate_uuid() network = self.safe_client.create_network(self.tenant_uuid) self.safe_client.create_subnet( self.tenant_uuid, network['id'], '20.0.0.0/24', enable_dhcp=False) vms = machine.FakeFullstackMachinesList( self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], network['id'], self.tenant_uuid, self.safe_client, use_dhcp=False)) for i in range(2)) vms.block_until_all_boot() for vm in vms: self._add_icmp_security_group_rule(vm) return vms def _add_icmp_security_group_rule(self, vm): sg_id = self.safe_client.create_security_group(self.tenant_uuid)['id'] self.safe_client.create_security_group_rule( self.tenant_uuid, sg_id, direction=constants.INGRESS_DIRECTION, ethertype=constants.IPv4, protocol=constants.PROTO_NAME_ICMP) self.safe_client.client.update_port( vm.neutron_port['id'], body={'port': {'security_groups': [sg_id]}}) self.addCleanup( self.safe_client.client.update_port, vm.neutron_port['id'], body={'port': {'security_groups': []}}) def _validate_iptables_rules(self, vms): """Check if rules from iptables firewall are configured. Raises IptablesNotConfiguredException exception if no rules are found. """ for vm in vms: vm_tap_device = iptables_firewall.get_hybrid_port_name( vm.neutron_port['id']) filter_rules = self.iptables_manager.get_rules_for_table('filter') if not any(vm_tap_device in line for line in filter_rules): raise IptablesNotConfiguredException( "There are no iptables rules configured for interface %s" % vm_tap_device) def _switch_firewall(self, firewall_driver): """Switch firewall_driver to given driver and restart the agent.""" l2_agent = self.environment.hosts[0].l2_agent l2_agent_config = l2_agent.agent_cfg_fixture.config l2_agent_config['securitygroup']['firewall_driver'] = firewall_driver l2_agent.agent_cfg_fixture.write_config_to_configfile() l2_agent.restart() int_bridge = ovs_lib.OVSBridge( l2_agent_config['ovs']['integration_bridge']) predicate = functools.partial( ovs_iptables.is_bridge_cleaned, int_bridge) utils.wait_until_true( predicate, exception=RuntimeError( "Bridge %s hasn't been marked as clean." % int_bridge.br_name)) def test_migration(self): vms = self._prepare_resources() # Make sure ICMP packets can get through with iptables firewall vms.ping_all() self._validate_iptables_rules(vms) self._switch_firewall('openvswitch') # Make sure security groups still work after migration vms.ping_all() self.assertRaises( IptablesNotConfiguredException, self._validate_iptables_rules, vms) # Remove security groups so traffic cannot get through for vm in vms: self.safe_client.client.update_port( vm.neutron_port['id'], body={'port': {'security_groups': []}}) # TODO(jlibosva): Test all permutations and don't fail on the first one self.assertRaises(machine_fixtures.FakeMachineException, vms.ping_all) # Add back some security groups allowing ICMP and test traffic can now # get through for vm in vms: self._add_icmp_security_group_rule(vm) vms.ping_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_l3_agent.py0000644000175000017500000005543400000000000025107 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import time import netaddr from neutron_lib import constants from oslo_utils import uuidutils from neutron.agent.l3 import ha_router from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import l3_tc_lib from neutron.common import utils as common_utils from neutron.tests import base as tests_base from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class TestL3Agent(base.BaseFullStackTestCase): def _create_external_network_and_subnet(self, tenant_id): network = self.safe_client.create_network( tenant_id, name='public', external=True) subnet = self._create_external_subnet(tenant_id, network['id']) return network, subnet def _create_external_subnet(self, tenant_id, network_id): cidr = self.useFixture( ip_network.ExclusiveIPNetwork( "240.0.0.0", "240.255.255.255", "24")).network subnet = self.safe_client.create_subnet(tenant_id, network_id, cidr) return subnet def block_until_port_status_active(self, port_id): def is_port_status_active(): port = self.client.show_port(port_id) return port['port']['status'] == 'ACTIVE' common_utils.wait_until_true(lambda: is_port_status_active(), sleep=1) def _create_and_attach_subnet( self, tenant_id, subnet_cidr, network_id, router_id): subnet = self.safe_client.create_subnet( tenant_id, network_id, subnet_cidr) router_interface_info = self.safe_client.add_router_interface( router_id, subnet['id']) self.block_until_port_status_active( router_interface_info['port_id']) def _boot_fake_vm_in_network(self, host, tenant_id, network_id, wait=True): vm = self.useFixture( machine.FakeFullstackMachine( host, network_id, tenant_id, self.safe_client, use_dhcp=True)) if wait: vm.block_until_boot() return vm def _create_net_subnet_and_vm(self, tenant_id, subnet_cidrs, host, router): network = self.safe_client.create_network(tenant_id) for cidr in subnet_cidrs: self._create_and_attach_subnet( tenant_id, cidr, network['id'], router['id']) return self._boot_fake_vm_in_network(host, tenant_id, network['id']) def _test_gateway_ip_changed(self): tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) external_vm = self._create_external_vm(ext_net, ext_sub) router = self.safe_client.create_router(tenant_id, external_network=ext_net['id']) vm = self._create_net_subnet_and_vm( tenant_id, ['20.0.0.0/24', '2001:db8:aaaa::/64'], self.environment.hosts[1], router) # ping external vm to test snat vm.block_until_ping(external_vm.ip) fip = self.safe_client.create_floatingip( tenant_id, ext_net['id'], vm.ip, vm.neutron_port['id']) # ping floating ip from external vm external_vm.block_until_ping(fip['floating_ip_address']) # ping router gateway IP old_gw_ip = router['external_gateway_info'][ 'external_fixed_ips'][0]['ip_address'] external_vm.block_until_ping(old_gw_ip) gateway_port = self.safe_client.list_ports( device_id=router['id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW)[0] ip_1, ip_2 = self._find_available_ips(ext_net, ext_sub, 2) self.safe_client.update_port(gateway_port['id'], fixed_ips=[ {'ip_address': ip_1}, {'ip_address': ip_2}]) # ping router gateway new IPs external_vm.block_until_ping(ip_1) external_vm.block_until_ping(ip_2) # ping router old gateway IP, should fail now external_vm.block_until_no_ping(old_gw_ip) def _test_external_subnet_changed(self): tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) external_vm = self._create_external_vm(ext_net, ext_sub) router = self.safe_client.create_router(tenant_id, external_network=ext_net['id']) vm = self._create_net_subnet_and_vm( tenant_id, ['20.0.0.0/24', '2001:db8:aaaa::/64'], self.environment.hosts[1], router) # ping external vm to test snat vm.block_until_ping(external_vm.ip) # ping router gateway IP gw_ip = router['external_gateway_info'][ 'external_fixed_ips'][0]['ip_address'] external_vm.block_until_ping(gw_ip) # create second external subnet and external vm on it ext_sub_2 = self._create_external_subnet(tenant_id, ext_net['id']) external_vm_2 = self._create_external_vm(ext_net, ext_sub_2) # move original router gateway IP to be on second subnet ip_1, ip_2 = self._find_available_ips(ext_net, ext_sub_2, 2) ext_info = { 'network_id': ext_net['id'], 'external_fixed_ips': [{'ip_address': ip_2, 'subnet_id': ext_sub_2['id']}]} self.safe_client.update_router(router['id'], external_gateway_info=ext_info) # ping external vm_2 to test snat vm.block_until_ping(external_vm_2.ip) # ping router gateway new IP external_vm_2.block_until_ping(ip_2) # ping original router old gateway IP, should fail now external_vm.block_until_no_ping(gw_ip) # clear the external gateway so ext_sub_2 can be deleted self.safe_client.update_router(router['id'], external_gateway_info={}) def _get_namespace(self, router_id, agent=None): namespace = namespaces.build_ns_name(namespaces.NS_PREFIX, router_id) if agent: suffix = agent.get_namespace_suffix() else: suffix = self.environment.hosts[0].l3_agent.get_namespace_suffix() return "%s@%s" % (namespace, suffix) def _get_l3_agents_with_ha_state( self, l3_agents, router_id, ha_state=None): found_agents = [] agents_hosting_router = self.client.list_l3_agent_hosting_routers( router_id)['agents'] for agent in l3_agents: agent_host = agent.neutron_cfg_fixture.get_host() for agent_hosting_router in agents_hosting_router: if (agent_hosting_router['host'] == agent_host and ((ha_state is None) or ( agent_hosting_router['ha_state'] == ha_state))): found_agents.append(agent) break return found_agents def _router_fip_qos_after_admin_state_down_up(self, ha=False): def get_router_gw_interface(): devices = ip.get_devices() return [dev.name for dev in devices if dev.name.startswith('qg-')] tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) external_vm = self._create_external_vm(ext_net, ext_sub) router = self.safe_client.create_router(tenant_id, ha=ha, external_network=ext_net['id']) vm = self._create_net_subnet_and_vm( tenant_id, ['20.0.0.0/24', '2001:db8:aaaa::/64'], self.environment.hosts[1], router) # ping external vm to test snat vm.block_until_ping(external_vm.ip) qos_policy = self.safe_client.create_qos_policy( tenant_id, 'fs_policy', 'Fullstack testing policy', shared='False', is_default='False') self.safe_client.create_bandwidth_limit_rule( tenant_id, qos_policy['id'], 1111, 2222, constants.INGRESS_DIRECTION) self.safe_client.create_bandwidth_limit_rule( tenant_id, qos_policy['id'], 3333, 4444, constants.EGRESS_DIRECTION) fip = self.safe_client.create_floatingip( tenant_id, ext_net['id'], vm.ip, vm.neutron_port['id'], qos_policy_id=qos_policy['id']) # ping floating ip from external vm external_vm.block_until_ping(fip['floating_ip_address']) self.safe_client.update_router(router['id'], admin_state_up=False) external_vm.block_until_no_ping(fip['floating_ip_address']) self.safe_client.update_router(router['id'], admin_state_up=True) external_vm.block_until_ping(fip['floating_ip_address']) if ha: l3_agents = [host.agents['l3'] for host in self.environment.hosts] router_agent = self._get_l3_agents_with_ha_state( l3_agents, router['id'])[0] qrouter_ns = self._get_namespace( router['id'], router_agent) else: qrouter_ns = self._get_namespace(router['id']) ip = ip_lib.IPWrapper(qrouter_ns) try: common_utils.wait_until_true(get_router_gw_interface) except common_utils.WaitTimeout: self.fail('Router gateway interface "qg-*" not found') interface_name = get_router_gw_interface()[0] tc_wrapper = l3_tc_lib.FloatingIPTcCommand( interface_name, namespace=qrouter_ns) common_utils.wait_until_true( functools.partial( self._wait_until_filters_set, tc_wrapper), timeout=60) def _wait_until_filters_set(self, tc_wrapper): def _is_filter_set(direction): filter_ids = tc_wrapper.get_existing_filter_ids( direction) if not filter_ids: return False return 1 == len(filter_ids) return (_is_filter_set(constants.INGRESS_DIRECTION) and _is_filter_set(constants.EGRESS_DIRECTION)) class TestLegacyL3Agent(TestL3Agent): def setUp(self): host_descriptions = [ environment.HostDescription(l3_agent=True, dhcp_agent=True, l3_agent_extensions="fip_qos"), environment.HostDescription()] env = environment.Environment( environment.EnvironmentDescription( network_type='vlan', l2_pop=False, qos=True), host_descriptions) super(TestLegacyL3Agent, self).setUp(env) def test_namespace_exists(self): tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id) network = self.safe_client.create_network(tenant_id) subnet = self.safe_client.create_subnet( tenant_id, network['id'], '20.0.0.0/24', gateway_ip='20.0.0.1') self.safe_client.add_router_interface(router['id'], subnet['id']) namespace = self._get_namespace(router['id']) self.assert_namespace_exists(namespace) def test_mtu_update(self): tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id) network = self.safe_client.create_network(tenant_id) subnet = self.safe_client.create_subnet( tenant_id, network['id'], '20.0.0.0/24', gateway_ip='20.0.0.1') self.safe_client.add_router_interface(router['id'], subnet['id']) namespace = self._get_namespace(router['id']) self.assert_namespace_exists(namespace) ip = ip_lib.IPWrapper(namespace) common_utils.wait_until_true(lambda: ip.get_devices()) devices = ip.get_devices() self.assertEqual(1, len(devices)) ri_dev = devices[0] mtu = ri_dev.link.mtu self.assertEqual(1500, mtu) mtu -= 1 network = self.safe_client.update_network(network['id'], mtu=mtu) common_utils.wait_until_true(lambda: ri_dev.link.mtu == mtu) def test_east_west_traffic(self): tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id) vm1 = self._create_net_subnet_and_vm( tenant_id, ['20.0.0.0/24', '2001:db8:aaaa::/64'], self.environment.hosts[0], router) vm2 = self._create_net_subnet_and_vm( tenant_id, ['21.0.0.0/24', '2001:db8:bbbb::/64'], self.environment.hosts[1], router) vm1.block_until_ping(vm2.ip) # Verify ping6 from vm2 to vm1 IPv6 Address vm2.block_until_ping(vm1.ipv6) def test_north_south_traffic(self): # This function creates an external network which is connected to # central_bridge and spawns an external_vm on it. # The external_vm is configured with the gateway_ip (both v4 & v6 # addresses) of external subnet. Later, it creates a tenant router, # a tenant network and two tenant subnets (v4 and v6). The tenant # router is associated with tenant network and external network to # provide north-south connectivity to the VMs. # We validate the following in this testcase. # 1. SNAT support: using ping from tenant VM to external_vm # 2. Floating IP support: using ping from external_vm to VM floating ip # 3. IPv6 ext connectivity: using ping6 from tenant vm to external_vm. tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) external_vm = self._create_external_vm(ext_net, ext_sub) # Create an IPv6 subnet in the external network v6network = self.useFixture( ip_network.ExclusiveIPNetwork( "2001:db8:1234::1", "2001:db8:1234::10", "64")).network ext_v6sub = self.safe_client.create_subnet( tenant_id, ext_net['id'], v6network) router = self.safe_client.create_router(tenant_id, external_network=ext_net['id']) # Configure the gateway_ip of external v6subnet on the external_vm. external_vm.ipv6_cidr = common_utils.ip_to_cidr( ext_v6sub['gateway_ip'], 64) # Configure an IPv6 downstream route to the v6Address of router gw port for fixed_ip in router['external_gateway_info']['external_fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: external_vm.set_default_gateway(fixed_ip['ip_address']) vm = self._create_net_subnet_and_vm( tenant_id, ['20.0.0.0/24', '2001:db8:aaaa::/64'], self.environment.hosts[1], router) # ping external vm to test snat vm.block_until_ping(external_vm.ip) fip = self.safe_client.create_floatingip( tenant_id, ext_net['id'], vm.ip, vm.neutron_port['id']) # ping floating ip from external vm external_vm.block_until_ping(fip['floating_ip_address']) # Verify VM is able to reach the router interface. vm.block_until_ping(vm.gateway_ipv6) # Verify north-south connectivity using ping6 to external_vm. vm.block_until_ping(external_vm.ipv6) # Now let's remove and create again phys bridge and check connectivity # once again br_phys = self.environment.hosts[0].br_phys br_phys.destroy() br_phys.create() self.environment.hosts[0].connect_to_central_network_via_vlans( br_phys) # ping floating ip from external vm external_vm.block_until_ping(fip['floating_ip_address']) # Verify VM is able to reach the router interface. vm.block_until_ping(vm.gateway_ipv6) # Verify north-south connectivity using ping6 to external_vm. vm.block_until_ping(external_vm.ipv6) def test_gateway_ip_changed(self): self._test_gateway_ip_changed() def test_external_subnet_changed(self): self._test_external_subnet_changed() def test_router_fip_qos_after_admin_state_down_up(self): self._router_fip_qos_after_admin_state_down_up() class TestHAL3Agent(TestL3Agent): def setUp(self): host_descriptions = [ environment.HostDescription(l3_agent=True, dhcp_agent=True, l3_agent_extensions="fip_qos") for _ in range(2)] env = environment.Environment( environment.EnvironmentDescription( network_type='vlan', l2_pop=True, qos=True), host_descriptions) super(TestHAL3Agent, self).setUp(env) def _is_ha_router_active_on_one_agent(self, router_id): agents = self.client.list_l3_agent_hosting_routers(router_id) return ( agents['agents'][0]['ha_state'] != agents['agents'][1]['ha_state']) def test_ha_router(self): # TODO(amuller): Test external connectivity before and after a # failover, see: https://review.opendev.org/#/c/196393/ tenant_id = uuidutils.generate_uuid() router = self.safe_client.create_router(tenant_id, ha=True) common_utils.wait_until_true( lambda: len(self.client.list_l3_agent_hosting_routers( router['id'])['agents']) == 2, timeout=90) common_utils.wait_until_true( functools.partial( self._is_ha_router_active_on_one_agent, router['id']), timeout=90) def _get_keepalived_state(self, keepalived_state_file): with open(keepalived_state_file, "r") as fd: return fd.read() def _get_state_file_for_master_agent(self, router_id): for host in self.environment.hosts: keepalived_state_file = os.path.join( host.neutron_config.state_path, "ha_confs", router_id, "state") if self._get_keepalived_state(keepalived_state_file) == "master": return keepalived_state_file def test_keepalived_multiple_sighups_does_not_forfeit_mastership(self): """Setup a complete "Neutron stack" - both an internal and an external network+subnet, and a router connected to both. """ tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) router = self.safe_client.create_router(tenant_id, ha=True, external_network=ext_net['id']) common_utils.wait_until_true( lambda: len(self.client.list_l3_agent_hosting_routers( router['id'])['agents']) == 2, timeout=90) common_utils.wait_until_true( functools.partial( self._is_ha_router_active_on_one_agent, router['id']), timeout=90) keepalived_state_file = self._get_state_file_for_master_agent( router['id']) self.assertIsNotNone(keepalived_state_file) network = self.safe_client.create_network(tenant_id) self._create_and_attach_subnet( tenant_id, '13.37.0.0/24', network['id'], router['id']) # Create 10 fake VMs, each with a floating ip. Each floating ip # association should send a SIGHUP to the keepalived's parent process, # unless the Throttler works. host = self.environment.hosts[0] vms = [self._boot_fake_vm_in_network(host, tenant_id, network['id'], wait=False) for i in range(10)] for vm in vms: self.safe_client.create_floatingip( tenant_id, ext_net['id'], vm.ip, vm.neutron_port['id']) # Check that the keepalived's state file has not changed and is still # master. This will indicate that the Throttler works. We want to check # for ha_vrrp_advert_int (the default is 2 seconds), plus a bit more. time_to_stop = (time.time() + (common_utils.DEFAULT_THROTTLER_VALUE * ha_router.THROTTLER_MULTIPLIER * 1.3)) while True: if time.time() > time_to_stop: break self.assertEqual( "master", self._get_keepalived_state(keepalived_state_file)) @tests_base.unstable_test("bug 1798475") def test_ha_router_restart_agents_no_packet_lost(self): tenant_id = uuidutils.generate_uuid() ext_net, ext_sub = self._create_external_network_and_subnet(tenant_id) router = self.safe_client.create_router(tenant_id, ha=True, external_network=ext_net['id']) external_vm = self._create_external_vm(ext_net, ext_sub) common_utils.wait_until_true( lambda: len(self.client.list_l3_agent_hosting_routers( router['id'])['agents']) == 2, timeout=90) common_utils.wait_until_true( functools.partial( self._is_ha_router_active_on_one_agent, router['id']), timeout=90) router_ip = router['external_gateway_info'][ 'external_fixed_ips'][0]['ip_address'] l3_agents = [host.agents['l3'] for host in self.environment.hosts] l3_standby_agents = self._get_l3_agents_with_ha_state( l3_agents, router['id'], 'standby') l3_active_agents = self._get_l3_agents_with_ha_state( l3_agents, router['id'], 'active') self.assertEqual(1, len(l3_active_agents)) # Let's check first if connectivity from external_vm to router's # external gateway IP is possible before we restart agents external_vm.block_until_ping(router_ip) self._assert_ping_during_agents_restart( l3_standby_agents, external_vm.namespace, [router_ip], count=60) self._assert_ping_during_agents_restart( l3_active_agents, external_vm.namespace, [router_ip], count=60) def test_gateway_ip_changed(self): self._test_gateway_ip_changed() def test_external_subnet_changed(self): self._test_external_subnet_changed() def test_router_fip_qos_after_admin_state_down_up(self): self._router_fip_qos_after_admin_state_down_up(ha=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_logging.py0000644000175000017500000001520500000000000025031 0ustar00coreycorey00000000000000# Copyright 2018 Fujitsu Limited # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib import constants from oslo_utils import uuidutils from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as ovs_const) from neutron.tests.common import net_helpers from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine class BaseLoggingTestCase(base.BaseFullStackTestCase): number_of_hosts = 1 def setUp(self): host_desc = [ environment.HostDescription( l2_agent_type=constants.AGENT_TYPE_OVS, firewall_driver='openvswitch', dhcp_agent=True) for _ in range(self.number_of_hosts)] env_desc = environment.EnvironmentDescription( mech_drivers='openvswitch', log=True) env = environment.Environment(env_desc, host_desc) super(BaseLoggingTestCase, self).setUp(env) self.tenant_id = uuidutils.generate_uuid() self.network = self.safe_client.create_network( self.tenant_id, 'network-test') self.subnet = self.safe_client.create_subnet( self.tenant_id, self.network['id'], cidr='10.0.0.0/24', gateway_ip='10.0.0.1', name='subnet-test') def assert_no_connection(self, *args, **kwargs): netcat = net_helpers.NetcatTester(*args, **kwargs) try: utils.wait_until_true(netcat.test_no_connectivity) finally: netcat.stop_processes() def _wait_for_log_rules_applied(self, vm, table, actions): def _is_log_flow_set(table, actions): flows = vm.bridge.dump_flows_for_table(table) flows_list = flows.splitlines() pattern = re.compile( r"^.* table=%s.* actions=%s" % (table, actions)) for flow in flows_list: if pattern.match(flow.strip()): return True return False utils.wait_until_true(lambda: _is_log_flow_set(table, actions)) def _check_log(self, log_id, action, regex_str=None): config = self.environment.hosts[0].ovs_agent.agent_config def _is_log_event(log_id, action, regex_str): regex_p = re.compile( r"^.*action=%s.* log_resource_ids=\[[^\]]*%s" % ( action, log_id) + ".*" + regex_str if regex_str else "") with open(config.network_log.local_output_log_base) as f: for line in f.readlines(): if regex_p.match(line): return True return False utils.wait_until_true(lambda: _is_log_event(log_id, action, regex_str)) class TestLogging(BaseLoggingTestCase): def _create_network_log(self, resource_type, resource_id=None, target_id=None): return self.safe_client.create_network_log( tenant_id=self.tenant_id, name='test-log', resource_type=resource_type, resource_id=resource_id, target_id=target_id) def _prepare_vms(self): sgs = [self.safe_client.create_security_group(self.tenant_id) for i in range(2)] port1 = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[0].hostname, security_groups=[sgs[0]['id']]) port2 = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[0].hostname, security_groups=[sgs[1]['id']]) # insert security-group-rules allow icmp self.safe_client.create_security_group_rule( self.tenant_id, sgs[0]['id'], direction=constants.INGRESS_DIRECTION, ethertype=constants.IPv4, protocol=constants.PROTO_NAME_ICMP) # insert security-group-rules allow icmp self.safe_client.create_security_group_rule( self.tenant_id, sgs[1]['id'], direction=constants.INGRESS_DIRECTION, ethertype=constants.IPv4, protocol=constants.PROTO_NAME_ICMP) vm1 = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client, neutron_port=port1)) vm2 = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client, neutron_port=port2)) return machine.FakeFullstackMachinesList([vm1, vm2]) def test_logging(self): vms = self._prepare_vms() vms.block_until_all_boot() sg_log = self._create_network_log(resource_type='security_group') log_id = sg_log['log']['id'] for vm in vms: self._wait_for_log_rules_applied( vm, ovs_const.ACCEPTED_EGRESS_TRAFFIC_TABLE, actions=r"resubmit\(,%d\),CONTROLLER:65535" % ( ovs_const.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE)) self._wait_for_log_rules_applied( vm, ovs_const.ACCEPTED_INGRESS_TRAFFIC_TABLE, actions="CONTROLLER:65535") self._wait_for_log_rules_applied( vm, ovs_const.DROPPED_TRAFFIC_TABLE, actions="CONTROLLER:65535") # ping all vm vms.ping_all() # check log accept packets for icmp self._check_log(log_id=log_id, action='ACCEPT') # Try to connect from VM1 to VM2 via ssh self.assert_no_connection( vms[0].namespace, vms[1].namespace, vms[1].ip, 22, net_helpers.NetcatTester.TCP) # Try to ssh from VM2 to VM1 via ssh self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 22, net_helpers.NetcatTester.TCP) # check log drop packets for ssh self._check_log(log_id=log_id, action='DROP', regex_str="dst_port=22") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_mtu.py0000644000175000017500000000534600000000000024215 0ustar00coreycorey00000000000000# Copyright 2017 NEC India # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import exceptions from oslo_utils import uuidutils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class MTUNetworkTestSetup(base.BaseFullStackTestCase): host_desc = [] # No need to register agents for this test case def setUp(self): env = environment.Environment( environment.EnvironmentDescription(), self.host_desc) super(MTUNetworkTestSetup, self).setUp(env) self.tenant_id = uuidutils.generate_uuid() def _restart_neutron_server(self, global_mtu): env = environment.Environment( environment.EnvironmentDescription(global_mtu=global_mtu), self.host_desc) env.test_name = self.get_name() self.useFixture(env) env.neutron_server.restart() class TestMTUScenarios(MTUNetworkTestSetup): def test_mtu_update_network_neg(self): network = self.safe_client.create_network(self.tenant_id, name='mtu-test-network', mtu=1450) self.assertRaises(exceptions.BadRequest, self.safe_client.update_network, network['id'], mtu=9000) def test_mtu_update_delete_network(self): network = self.safe_client.create_network(self.tenant_id, name='mtu-test-network', mtu=1200) self.safe_client.update_network(network['id'], mtu=1450) res = self.safe_client.delete_network(network['id']) self.assertEqual((), res) def test_global_physnet_mtu_update_delete_network(self): network = self.safe_client.create_network(self.tenant_id, name='mtu-test-network', mtu=1450) self._restart_neutron_server(1400) res = self.safe_client.delete_network(network['id']) self.assertEqual((), res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_port_shut_down.py0000644000175000017500000000746400000000000026471 0ustar00coreycorey00000000000000# Copyright 2017 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron.common import utils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.unit import testlib_api from neutron_lib import constants from oslo_utils import uuidutils load_tests = testlib_api.module_load_tests class PortShutDownTest(base.BaseFullStackTestCase): # This is a test to confirm the port status # on shutting down the port administratively. # The port status should no longer be ACTIVE # and go to DOWN use_dhcp = True l2_pop = False arp_responder = False num_hosts = 1 scenarios = [ (constants.AGENT_TYPE_LINUXBRIDGE, {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE}), (constants.AGENT_TYPE_OVS, {'l2_agent_type': constants.AGENT_TYPE_OVS}) ] def setUp(self): host_descriptions = [ environment.HostDescription( l2_agent_type=self.l2_agent_type, dhcp_agent=self.use_dhcp, ) for _ in range(self.num_hosts)] env = environment.Environment( environment.EnvironmentDescription( l2_pop=self.l2_pop, arp_responder=self.arp_responder), host_descriptions) super(PortShutDownTest, self).setUp(env) def _create_external_network_and_subnet(self, tenant_id): # This test is not exclusive for the external networks. # It is only used here to implicitly create a dhcp port # on the network creation. network = self.safe_client.create_network( tenant_id, name='test-public', external=True, network_type='local') self.safe_client.create_subnet(tenant_id, network['id'], '240.0.0.0/8', gateway_ip='240.0.0.2') return network def _get_network_dhcp_ports(self, network_id): return self.client.list_ports(network_id=network_id, device_owner=constants.DEVICE_OWNER_DHCP)['ports'] def _is_port_active(self, port_id): port = self.client.show_port(port_id)['port'] return port['status'] == constants.PORT_STATUS_ACTIVE def _is_port_down(self, port_id): port = self.client.show_port(port_id)['port'] return port['status'] == constants.PORT_STATUS_DOWN def test_port_shut_down(self): tenant_id = uuidutils.generate_uuid() # Create an external network network = self._create_external_network_and_subnet(tenant_id) # Check if the DHCP port is created port_created = functools.partial(self._get_network_dhcp_ports, network['id']) utils.wait_until_true(port_created) # Get the DHCP port port = self._get_network_dhcp_ports(network['id'])[0] # Wait till the changes are reflected to DB port_status_active_predicate = functools.partial( self._is_port_active, port['id']) utils.wait_until_true(port_status_active_predicate) # Shut down the port self.safe_client.update_port(port['id'], admin_state_up=False) port_status_down_predicate = functools.partial( self._is_port_down, port['id']) utils.wait_until_true(port_status_down_predicate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_ports_binding.py0000644000175000017500000001047600000000000026251 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_utils import uuidutils from neutron.common import utils as common_utils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class TestPortsBinding(base.BaseFullStackTestCase): scenarios = [ ('Open vSwitch Agent', {'l2_agent_type': constants.AGENT_TYPE_OVS})] def setUp(self): host_descriptions = [ environment.HostDescription( l2_agent_type=self.l2_agent_type, l3_agent=False)] env = environment.Environment( environment.EnvironmentDescription( agent_down_time=10), host_descriptions) super(TestPortsBinding, self).setUp(env) self.l2_agent_process = self.environment.hosts[0].l2_agent self.l2_agent = self.safe_client.client.list_agents( agent_type=self.l2_agent_type)['agents'][0] self.tenant_id = uuidutils.generate_uuid() self.network = self.safe_client.create_network(self.tenant_id) self.subnet = self.safe_client.create_subnet( self.tenant_id, self.network['id'], '20.0.0.0/24') def _ensure_port_bound(self, port_id): def port_bound(): port = self.safe_client.client.show_port(port_id)['port'] return (port[portbindings.VIF_TYPE] not in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) common_utils.wait_until_true(port_bound) def _ensure_port_binding_failed(self, port_id): def port_binding_failed(): port = self.safe_client.client.show_port(port_id)['port'] return (port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_BINDING_FAILED) common_utils.wait_until_true(port_binding_failed) def test_smartnic_port_binding(self): """Test scenario 1. Create SmartNIC port which will not be properly bound to host because OVS agent doesn't bind SmartNIC ports by default 2. Validate port's bound status to be bound failed 3. Stop L2 agent and wait until it will be DEAD 4. Set `baremetal_smartnic=True` in agent config 5. Start L2 agent and wait until it is Alive 6. Create SmartNIC port which will be properly bound to host 7. Validate port's bound status """ smartnic_port = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[0].hostname, **{"binding:vnic_type": "smart-nic", "binding:profile": { "local_link_information": [ { "port_id": "port1", "hostname": self.environment.hosts[0].hostname, } ] } } ) self._ensure_port_binding_failed(smartnic_port['id']) # configure neutron agent to bind SmartNIC ports self.l2_agent_process = self.environment.hosts[0].l2_agent self.l2_agent = self.safe_client.client.list_agents( agent_type=self.l2_agent_type)['agents'][0] self.l2_agent_process.stop() self._wait_until_agent_down(self.l2_agent['id']) l2_agent_config = self.l2_agent_process.agent_cfg_fixture.config l2_agent_config['agent']['baremetal_smartnic'] = 'True' self.l2_agent_process.agent_cfg_fixture.write_config_to_configfile() self.l2_agent_process.restart() self._wait_until_agent_up(self.l2_agent['id']) self._ensure_port_bound(smartnic_port['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_ports_rebind.py0000644000175000017500000001505300000000000026076 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_utils import uuidutils from neutron.common import utils as common_utils from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class TestPortsRebind(base.BaseFullStackTestCase): scenarios = [ ('Open vSwitch Agent', {'l2_agent_type': constants.AGENT_TYPE_OVS}), ('Linux Bridge Agent', { 'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})] def setUp(self): host_descriptions = [ environment.HostDescription( l2_agent_type=self.l2_agent_type, l3_agent=self.use_l3_agent)] env = environment.Environment( environment.EnvironmentDescription( agent_down_time=10), host_descriptions) super(TestPortsRebind, self).setUp(env) self.l2_agent_process = self.environment.hosts[0].l2_agent self.l2_agent = self.safe_client.client.list_agents( agent_type=self.l2_agent_type)['agents'][0] self.tenant_id = uuidutils.generate_uuid() self.network = self.safe_client.create_network(self.tenant_id) self.subnet = self.safe_client.create_subnet( self.tenant_id, self.network['id'], '20.0.0.0/24') def _ensure_port_bound(self, port_id): def port_bound(): port = self.safe_client.client.show_port(port_id)['port'] return ( port[portbindings.VIF_TYPE] not in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]) common_utils.wait_until_true(port_bound) def _ensure_port_binding_failed(self, port_id): def port_binding_failed(): port = self.safe_client.client.show_port(port_id)['port'] return (port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_BINDING_FAILED) common_utils.wait_until_true(port_binding_failed) class TestVMPortRebind(TestPortsRebind): use_l3_agent = False def test_vm_port_rebound_when_L2_agent_revived(self): """Test scenario 1. Create port which will be properly bound to host 2. Stop L2 agent and wait until it will be DEAD 3. Create another port - it should have "binding_failed" 4. Turn on L2 agent 5. Port from p.3 should be bound properly after L2 agent will be UP """ vm_1 = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client)) vm_1.block_until_boot() self._ensure_port_bound(vm_1.neutron_port['id']) vm_1_port = self.safe_client.client.show_port( vm_1.neutron_port['id'])['port'] self.l2_agent_process = self.environment.hosts[0].l2_agent self.l2_agent = self.safe_client.client.list_agents( agent_type=self.l2_agent_type)['agents'][0] self.l2_agent_process.stop() self._wait_until_agent_down(self.l2_agent['id']) vm_2 = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client)) self._ensure_port_binding_failed(vm_2.neutron_port['id']) vm_2_port = self.safe_client.client.show_port( vm_2.neutron_port['id'])['port'] # check if vm_1 port is still bound as it was before self._ensure_port_bound(vm_1.neutron_port['id']) # and that revision number of vm_1's port wasn't changed self.assertEqual( vm_1_port['revision_number'], self.safe_client.client.show_port( vm_1_port['id'])['port']['revision_number']) self.l2_agent_process.start() self._wait_until_agent_up(self.l2_agent['id']) self._ensure_port_bound(vm_2_port['id']) class TestRouterPortRebind(TestPortsRebind): use_l3_agent = True def setUp(self): super(TestRouterPortRebind, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.ext_net = self.safe_client.create_network( self.tenant_id, external=True) ext_cidr = self.useFixture( ip_network.ExclusiveIPNetwork( "240.0.0.0", "240.255.255.255", "24")).network self.safe_client.create_subnet( self.tenant_id, self.ext_net['id'], ext_cidr) self.router = self.safe_client.create_router( self.tenant_id, external_network=self.ext_net['id']) def test_vm_port_rebound_when_L2_agent_revived(self): """Test scenario 1. Ensure that router gateway port is bound properly 2. Stop L2 agent and wait until it will be DEAD 3. Create router interface and check that it's port is "binding_failed" 4. Turn on L2 agent 5. Router's port created in p.3 should be now bound properly """ if self.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: self.skipTest("Bug 1798085") gw_port = self.safe_client.client.list_ports( device_id=self.router['id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW)['ports'][0] self._ensure_port_bound(gw_port['id']) self.l2_agent_process.stop() self._wait_until_agent_down(self.l2_agent['id']) router_interface_info = self.safe_client.add_router_interface( self.router['id'], self.subnet['id']) self._ensure_port_binding_failed(router_interface_info['port_id']) self.l2_agent_process.start() self._wait_until_agent_up(self.l2_agent['id']) self._ensure_port_bound(router_interface_info['port_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_qos.py0000644000175000017500000007427100000000000024215 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import constants from neutron_lib.services.qos import constants as qos_consts from neutronclient.common import exceptions from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.linux import tc_lib from neutron.common import utils from neutron.tests.common.agents import l2_extensions from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import config as fullstack_config from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api from neutron.conf.plugins.ml2.drivers import linuxbridge as \ linuxbridge_agent_config from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent from neutron.services.qos.drivers.linuxbridge import driver as lb_drv from neutron.services.qos.drivers.openvswitch import driver as ovs_drv load_tests = testlib_api.module_load_tests BANDWIDTH_BURST = 100 BANDWIDTH_LIMIT = 500 MIN_BANDWIDTH = 300 DSCP_MARK = 16 class BaseQoSRuleTestCase(object): number_of_hosts = 1 physical_network = None @property def reverse_direction(self): if self.direction == constants.INGRESS_DIRECTION: return constants.EGRESS_DIRECTION elif self.direction == constants.EGRESS_DIRECTION: return constants.INGRESS_DIRECTION def setUp(self): host_desc = [ environment.HostDescription( l3_agent=False, l2_agent_type=self.l2_agent_type ) for _ in range(self.number_of_hosts)] env_desc = environment.EnvironmentDescription( agent_down_time=10, qos=True) env = environment.Environment(env_desc, host_desc) super(BaseQoSRuleTestCase, self).setUp(env) self.l2_agent_process = self.environment.hosts[0].l2_agent self.l2_agent = self.safe_client.client.list_agents( agent_type=self.l2_agent_type)['agents'][0] self.tenant_id = uuidutils.generate_uuid() network_args = {} if self.physical_network: network_args = {'physical_network': self.physical_network, 'network_type': 'vlan'} self.network = self.safe_client.create_network( self.tenant_id, name='network-test', **network_args) self.subnet = self.safe_client.create_subnet( self.tenant_id, self.network['id'], cidr='10.0.0.0/24', gateway_ip='10.0.0.1', name='subnet-test', enable_dhcp=False) def _create_qos_policy(self): return self.safe_client.create_qos_policy( self.tenant_id, 'fs_policy', 'Fullstack testing policy', shared='False', is_default='False') def _prepare_vm_with_qos_policy(self, rule_add_functions): if rule_add_functions: qos_policy = self._create_qos_policy() qos_policy_id = qos_policy['id'] for rule_add in rule_add_functions: rule_add(qos_policy) else: qos_policy_id = qos_policy = None port = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[0].hostname, qos_policy_id) vm = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[0], self.network['id'], self.tenant_id, self.safe_client, neutron_port=port)) return vm, qos_policy class _TestBwLimitQoS(BaseQoSRuleTestCase): number_of_hosts = 1 @staticmethod def _get_expected_egress_burst_value(limit): return int( limit * qos_consts.DEFAULT_BURST_RATE ) def _wait_for_bw_rule_removed(self, vm, direction): # No values are provided when port doesn't have qos policy self._wait_for_bw_rule_applied(vm, None, None, direction) def _add_bw_limit_rule(self, limit, burst, direction, qos_policy): qos_policy_id = qos_policy['id'] rule = self.safe_client.create_bandwidth_limit_rule( self.tenant_id, qos_policy_id, limit, burst, direction) # Make it consistent with GET reply rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT rule['qos_policy_id'] = qos_policy_id qos_policy['rules'].append(rule) def _create_vm_with_limit_rules(self): # Create port with qos policy attached, with different direction vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_bw_limit_rule, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction), functools.partial( self._add_bw_limit_rule, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.reverse_direction)]) self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction) self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.reverse_direction) return vm, qos_policy def _restart_agent_and_check_rules_applied(self, policy_id, vm, final_rules, add_rules=None, update_rules=None, delete_rules=None): # final_rules: the last valid rule after all operations # (clear/update/reset rules during the l2-agent stop) are completed. # add_rules: rules that need to be added during the l2-agent stop. # update_rules: rules that need to be updated during the l2-agent stop. # delete_rules:rules that need to be deleted during the l2-agent stop. add_rules = list() if not add_rules else add_rules update_rules = list() if not update_rules else update_rules delete_rules = list() if not delete_rules else delete_rules # Stop l2_agent and clear/update/reset the port qos rules self.l2_agent_process.stop() self._wait_until_agent_down(self.l2_agent['id']) for rule in delete_rules: self.client.delete_bandwidth_limit_rule(rule['id'], policy_id) for rule in add_rules: self.safe_client.create_bandwidth_limit_rule( self.tenant_id, policy_id, rule.get('limit'), rule.get('burst'), rule['direction']) for rule in update_rules: self.client.update_bandwidth_limit_rule( rule['id'], policy_id, body={'bandwidth_limit_rule': {'max_kbps': rule.get('limit'), 'max_burst_kbps': rule.get('burst'), 'direction': rule.get('direction')}}) # Start l2_agent to check if these rules is cleared self.l2_agent_process.start() self._wait_until_agent_up(self.l2_agent['id']) all_directions = set([self.direction, self.reverse_direction]) for final_rule in final_rules: all_directions -= set([final_rule['direction']]) self._wait_for_bw_rule_applied( vm, final_rule.get('limit'), final_rule.get('burst'), final_rule['direction']) # Make sure there are no other rules. for direction in list(all_directions): self._wait_for_bw_rule_applied(vm, None, None, direction) def test_bw_limit_qos_policy_rule_lifecycle(self): new_limit = BANDWIDTH_LIMIT + 100 # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_bw_limit_rule, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)]) bw_rule = qos_policy['rules'][0] self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction) qos_policy_id = qos_policy['id'] self.client.delete_bandwidth_limit_rule(bw_rule['id'], qos_policy_id) self._wait_for_bw_rule_removed(vm, self.direction) # Create new rule with no given burst value, in such case ovs and lb # agent should apply burst value as # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE new_expected_burst = self._get_expected_burst_value(new_limit, self.direction) new_rule = self.safe_client.create_bandwidth_limit_rule( self.tenant_id, qos_policy_id, new_limit, direction=self.direction) self._wait_for_bw_rule_applied( vm, new_limit, new_expected_burst, self.direction) # Update qos policy rule id self.client.update_bandwidth_limit_rule( new_rule['id'], qos_policy_id, body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT, 'max_burst_kbps': BANDWIDTH_BURST}}) self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction) # Remove qos policy from port self.client.update_port( vm.neutron_port['id'], body={'port': {'qos_policy_id': None}}) self._wait_for_bw_rule_removed(vm, self.direction) def test_bw_limit_direction_change(self): # Create port with qos policy attached, with rule self.direction vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_bw_limit_rule, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)]) bw_rule = qos_policy['rules'][0] self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction) # Update rule by changing direction to opposite then it was before self.client.update_bandwidth_limit_rule( bw_rule['id'], qos_policy['id'], body={'bandwidth_limit_rule': { 'direction': self.reverse_direction}}) self._wait_for_bw_rule_removed(vm, self.direction) self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.reverse_direction) def test_bw_limit_qos_no_rules_l2_agent_restart(self): vm, qos_policy = self._create_vm_with_limit_rules() bw_rule_1 = qos_policy['rules'][0] bw_rule_2 = qos_policy['rules'][1] qos_policy_id = qos_policy['id'] # final_rules indicates the last valid rule after all operations # (clear/update/reset rules during the l2-agent stop) are completed final_rules = [{'direction': self.direction, 'limit': None}, {'direction': self.reverse_direction, 'limit': None}] self._restart_agent_and_check_rules_applied( qos_policy_id, vm, final_rules=final_rules, delete_rules=[bw_rule_1, bw_rule_2]) def test_bw_limit_qos_rules_deleted_l2_agent_restart(self): vm, qos_policy = self._create_vm_with_limit_rules() bw_rule_1 = qos_policy['rules'][0] qos_policy_id = qos_policy['id'] # final_rules indicates the last valid rule after all operations # (clear/update/reset rules during the l2-agent stop) are completed final_rules = [{'direction': self.direction, 'limit': None}, {'direction': self.reverse_direction, 'limit': BANDWIDTH_LIMIT, 'burst': BANDWIDTH_BURST}] self._restart_agent_and_check_rules_applied( qos_policy_id, vm, final_rules=final_rules, delete_rules=[bw_rule_1]) def test_bw_limit_qos_rules_changed_l2_agent_restart(self): vm, qos_policy = self._create_vm_with_limit_rules() bw_rule_1 = qos_policy['rules'][0] bw_rule_2 = qos_policy['rules'][1] qos_policy_id = qos_policy['id'] add_rules = [{'direction': self.direction, 'limit': BANDWIDTH_LIMIT * 2, 'burst': BANDWIDTH_BURST * 2}, {'direction': self.reverse_direction, 'limit': BANDWIDTH_LIMIT * 2, 'burst': BANDWIDTH_BURST * 2}] self._restart_agent_and_check_rules_applied( qos_policy_id, vm, final_rules=add_rules, add_rules=add_rules, delete_rules=[bw_rule_1, bw_rule_2]) def test_bw_limit_qos_rules_updated_l2_agent_restart(self): vm, qos_policy = self._create_vm_with_limit_rules() bw_rule_1 = qos_policy['rules'][0] bw_rule_2 = qos_policy['rules'][1] qos_policy_id = qos_policy['id'] update_rules = [{'id': bw_rule_1['id'], 'direction': bw_rule_1['direction'], 'limit': BANDWIDTH_LIMIT * 2, 'burst': BANDWIDTH_BURST * 2}, {'id': bw_rule_2['id'], 'direction': bw_rule_2['direction'], 'limit': BANDWIDTH_LIMIT * 2, 'burst': BANDWIDTH_BURST * 2}] self._restart_agent_and_check_rules_applied( qos_policy_id, vm, final_rules=update_rules, update_rules=update_rules) class TestBwLimitQoSOvs(_TestBwLimitQoS, base.BaseFullStackTestCase): l2_agent_type = constants.AGENT_TYPE_OVS scenarios = [ ('ingress', {'direction': constants.INGRESS_DIRECTION}), ('egress', {'direction': constants.EGRESS_DIRECTION}) ] @staticmethod def _get_expected_burst_value(limit, direction): # For egress bandwidth limit this value should be calculated as # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE if direction == constants.EGRESS_DIRECTION: return TestBwLimitQoSOvs._get_expected_egress_burst_value(limit) else: return 0 def _wait_for_bw_rule_applied(self, vm, limit, burst, direction): if direction == constants.EGRESS_DIRECTION: utils.wait_until_true( lambda: vm.bridge.get_egress_bw_limit_for_port( vm.port.name) == (limit, burst)) elif direction == constants.INGRESS_DIRECTION: utils.wait_until_true( lambda: vm.bridge.get_ingress_bw_limit_for_port( vm.port.name) == (limit, burst)) def test_bw_limit_qos_port_removed(self): """Test if rate limit config is properly removed when whole port is removed. """ # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_bw_limit_rule, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction)]) self._wait_for_bw_rule_applied( vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST, self.direction) # Delete port with qos policy attached vm.destroy() self._wait_for_bw_rule_removed(vm, self.direction) self.assertIsNone(vm.bridge.find_qos(vm.port.name)) self.assertIsNone(vm.bridge.find_queue(vm.port.name, ovs_lib.QOS_DEFAULT_QUEUE)) class TestBwLimitQoSLinuxbridge(_TestBwLimitQoS, base.BaseFullStackTestCase): l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE scenarios = [ ('egress', {'direction': constants.EGRESS_DIRECTION}), ('ingress', {'direction': constants.INGRESS_DIRECTION}), ] @staticmethod def _get_expected_burst_value(limit, direction): # For egress bandwidth limit this value should be calculated as # bandwidth_limit * qos_consts.DEFAULT_BURST_RATE if direction == constants.EGRESS_DIRECTION: return TestBwLimitQoSLinuxbridge._get_expected_egress_burst_value( limit) else: return TestBwLimitQoSLinuxbridge._get_expected_ingress_burst_value( limit) @staticmethod def _get_expected_ingress_burst_value(limit): return int( float(limit) / float(linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE)) def _wait_for_bw_rule_applied(self, vm, limit, burst, direction): port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name( vm.neutron_port['id']) tc = tc_lib.TcCommand( port_name, linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE, namespace=vm.host.host_namespace ) if direction == constants.EGRESS_DIRECTION: utils.wait_until_true( lambda: tc.get_filters_bw_limits() == (limit, burst)) elif direction == constants.INGRESS_DIRECTION: utils.wait_until_true( lambda: tc.get_tbf_bw_limits() == (limit, burst)) class _TestDscpMarkingQoS(BaseQoSRuleTestCase): number_of_hosts = 2 def _wait_for_dscp_marking_rule_removed(self, vm): self._wait_for_dscp_marking_rule_applied(vm, None) def _add_dscp_rule(self, dscp_mark, qos_policy): qos_policy_id = qos_policy['id'] rule = self.safe_client.create_dscp_marking_rule( self.tenant_id, qos_policy_id, dscp_mark) # Make it consistent with GET reply rule['type'] = qos_consts.RULE_TYPE_DSCP_MARKING rule['qos_policy_id'] = qos_policy_id qos_policy['rules'].append(rule) def test_dscp_qos_policy_rule_lifecycle(self): new_dscp_mark = DSCP_MARK + 8 # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial(self._add_dscp_rule, DSCP_MARK)]) dscp_rule = qos_policy['rules'][0] self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK) qos_policy_id = qos_policy['id'] self.client.delete_dscp_marking_rule(dscp_rule['id'], qos_policy_id) self._wait_for_dscp_marking_rule_removed(vm) # Create new rule new_rule = self.safe_client.create_dscp_marking_rule( self.tenant_id, qos_policy_id, new_dscp_mark) self._wait_for_dscp_marking_rule_applied(vm, new_dscp_mark) # Update qos policy rule id self.client.update_dscp_marking_rule( new_rule['id'], qos_policy_id, body={'dscp_marking_rule': {'dscp_mark': DSCP_MARK}}) self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK) # Remove qos policy from port self.client.update_port( vm.neutron_port['id'], body={'port': {'qos_policy_id': None}}) self._wait_for_dscp_marking_rule_removed(vm) def test_dscp_marking_packets(self): # Create port (vm) which will be used to received and test packets receiver_port = self.safe_client.create_port( self.tenant_id, self.network['id'], self.environment.hosts[1].hostname) receiver = self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[1], self.network['id'], self.tenant_id, self.safe_client, neutron_port=receiver_port)) # Create port with qos policy attached sender, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial(self._add_dscp_rule, DSCP_MARK)]) sender.block_until_boot() receiver.block_until_boot() self._wait_for_dscp_marking_rule_applied(sender, DSCP_MARK) l2_extensions.wait_for_dscp_marked_packet( sender, receiver, DSCP_MARK) def test_dscp_marking_clean_port_removed(self): """Test if DSCP marking OpenFlow/iptables rules are removed when whole port is removed. """ # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial(self._add_dscp_rule, DSCP_MARK)]) self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK) # Delete port with qos policy attached vm.destroy() self._wait_for_dscp_marking_rule_removed(vm) class TestDscpMarkingQoSOvs(_TestDscpMarkingQoS, base.BaseFullStackTestCase): l2_agent_type = constants.AGENT_TYPE_OVS def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark): l2_extensions.wait_until_dscp_marking_rule_applied_ovs( vm.bridge, vm.port.name, dscp_mark) class TestDscpMarkingQoSLinuxbridge(_TestDscpMarkingQoS, base.BaseFullStackTestCase): l2_agent_type = constants.AGENT_TYPE_LINUXBRIDGE def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark): l2_extensions.wait_until_dscp_marking_rule_applied_linuxbridge( vm.host.host_namespace, vm.port.name, dscp_mark) class TestQoSWithL2Population(base.BaseFullStackTestCase): scenarios = [ (constants.AGENT_TYPE_OVS, {'mech_drivers': 'openvswitch', 'supported_rules': ovs_drv.SUPPORTED_RULES}), (constants.AGENT_TYPE_LINUXBRIDGE, {'mech_drivers': 'linuxbridge', 'supported_rules': lb_drv.SUPPORTED_RULES}) ] def setUp(self): host_desc = [] # No need to register agents for this test case env_desc = environment.EnvironmentDescription( qos=True, l2_pop=True, mech_drivers=self.mech_drivers) env = environment.Environment(env_desc, host_desc) super(TestQoSWithL2Population, self).setUp(env) def test_supported_qos_rule_types(self): res = self.client.list_qos_rule_types() rule_types = {t['type'] for t in res['rule_types']} expected_rules = set(self.supported_rules) self.assertEqual(expected_rules, rule_types) class TestQoSPolicyIsDefault(base.BaseFullStackTestCase): NAME = 'fs_policy' DESCRIPTION = 'Fullstack testing policy' SHARED = True def setUp(self): host_desc = [] # No need to register agents for this test case env_desc = environment.EnvironmentDescription(qos=True) env = environment.Environment(env_desc, host_desc) super(TestQoSPolicyIsDefault, self).setUp(env) def _create_qos_policy(self, project_id, is_default): return self.safe_client.create_qos_policy( project_id, self.NAME, self.DESCRIPTION, shared=self.SHARED, is_default=is_default) def _update_qos_policy(self, qos_policy_id, is_default): return self.client.update_qos_policy( qos_policy_id, body={'policy': {'is_default': is_default}}) def test_create_one_default_qos_policy_per_project(self): project_ids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()] for project_id in project_ids: qos_policy = self._create_qos_policy(project_id, True) self.assertTrue(qos_policy['is_default']) self.assertEqual(project_id, qos_policy['project_id']) qos_policy = self._create_qos_policy(project_id, False) self.assertFalse(qos_policy['is_default']) self.assertEqual(project_id, qos_policy['project_id']) def test_create_two_default_qos_policies_per_project(self): project_id = uuidutils.generate_uuid() qos_policy = self._create_qos_policy(project_id, True) self.assertTrue(qos_policy['is_default']) self.assertEqual(project_id, qos_policy['project_id']) self.assertRaises(exceptions.Conflict, self._create_qos_policy, project_id, True) def test_update_default_status(self): project_ids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()] for project_id in project_ids: qos_policy = self._create_qos_policy(project_id, True) self.assertTrue(qos_policy['is_default']) qos_policy = self._update_qos_policy(qos_policy['id'], False) self.assertFalse(qos_policy['policy']['is_default']) def test_update_default_status_conflict(self): project_id = uuidutils.generate_uuid() qos_policy_1 = self._create_qos_policy(project_id, True) self.assertTrue(qos_policy_1['is_default']) qos_policy_2 = self._create_qos_policy(project_id, False) self.assertFalse(qos_policy_2['is_default']) self.assertRaises(exceptions.Conflict, self._update_qos_policy, qos_policy_2['id'], True) class _TestMinBwQoS(BaseQoSRuleTestCase): number_of_hosts = 1 physical_network = fullstack_config.PHYSICAL_NETWORK_NAME def _wait_for_min_bw_rule_removed(self, vm, direction): # No values are provided when port doesn't have qos policy self._wait_for_min_bw_rule_applied(vm, None, direction) def _add_min_bw_rule(self, min_bw, direction, qos_policy): qos_policy_id = qos_policy['id'] rule = self.safe_client.create_minimum_bandwidth_rule( self.tenant_id, qos_policy_id, min_bw, direction) # Make it consistent with GET reply rule['type'] = qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH rule['qos_policy_id'] = qos_policy_id qos_policy['rules'].append(rule) def test_min_bw_qos_policy_rule_lifecycle(self): new_limit = MIN_BANDWIDTH - 100 # Create port with qos policy attached vm, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_min_bw_rule, MIN_BANDWIDTH, self.direction)]) bw_rule = qos_policy['rules'][0] self._wait_for_min_bw_rule_applied(vm, MIN_BANDWIDTH, self.direction) qos_policy_id = qos_policy['id'] self.client.delete_minimum_bandwidth_rule(bw_rule['id'], qos_policy_id) self._wait_for_min_bw_rule_removed(vm, self.direction) new_rule = self.safe_client.create_minimum_bandwidth_rule( self.tenant_id, qos_policy_id, new_limit, direction=self.direction) self._wait_for_min_bw_rule_applied(vm, new_limit, self.direction) # Update qos policy rule id self.client.update_minimum_bandwidth_rule( new_rule['id'], qos_policy_id, body={'minimum_bandwidth_rule': {'min_kbps': MIN_BANDWIDTH}}) self._wait_for_min_bw_rule_applied(vm, MIN_BANDWIDTH, self.direction) # Remove qos policy from port self.client.update_port( vm.neutron_port['id'], body={'port': {'qos_policy_id': None}}) self._wait_for_min_bw_rule_removed(vm, self.direction) class TestMinBwQoSOvs(_TestMinBwQoS, base.BaseFullStackTestCase): l2_agent_type = constants.AGENT_TYPE_OVS scenarios = [ ('egress', {'direction': constants.EGRESS_DIRECTION}) ] def _wait_for_min_bw_rule_applied(self, vm, min_bw, direction): if direction == constants.EGRESS_DIRECTION: utils.wait_until_true( lambda: vm.bridge.get_egress_min_bw_for_port( vm.neutron_port['id']) == min_bw) elif direction == constants.INGRESS_DIRECTION: self.fail('"%s" direction not implemented' % constants.INGRESS_DIRECTION) def _find_agent_qos_and_queue(self, vm): # NOTE(ralonsoh): the "_min_bw_qos_id" in vm.bridge is not the same as # the ID in the agent br_int instance. We need first to find the QoS # register and the Queue assigned to vm.neutron_port['id'] data = {'qos': None, 'qos_queue': None, 'queue_num': None} def check_qos_and_queue(): queue = vm.bridge._find_queue(vm.neutron_port['id']) data['queue_num'] = int(queue['external_ids']['queue-num']) qoses = vm.bridge._list_qos() for qos in qoses: qos_queue = qos['queues'].get(data['queue_num']) if qos_queue and qos_queue.uuid == queue['_uuid']: data['qos'] = qos data['qos_queue'] = qos_queue return True try: utils.wait_until_true(check_qos_and_queue, timeout=10) return data['qos'], data['qos_queue'] except utils.WaitTimeout: qoses = vm.bridge._list_qos() queues = vm.bridge._list_queues() queuenum = ('QoS register not found with queue-num %s' % data['queue_num']) qoses = '\nList of OVS QoS registers:\n%s' % '\n'.join(qoses) queues = '\nList of OVS Queue registers:\n%s' % '\n'.join(queues) self.fail(queuenum + qoses + queues) def test_min_bw_qos_port_removed(self): """Test if min BW limit config is properly removed when port removed. In case another port is added without a QoS policy, the L2 agent QoS extension will call "handle_port" and then it will force the reset of this port (self._process_reset_port(port)). This test will check that if the port is not present in the agent QoS cache, the policy is not removed. """ # Create port without qos policy attached vm_noqos, _ = self._prepare_vm_with_qos_policy(None) # Create port with qos policy attached vm_qos, qos_policy = self._prepare_vm_with_qos_policy( [functools.partial( self._add_min_bw_rule, MIN_BANDWIDTH, self.direction)]) self._wait_for_min_bw_rule_applied( vm_qos, MIN_BANDWIDTH, self.direction) # Check QoS policy and Queue rule. qos, queue = self._find_agent_qos_and_queue(vm_qos) self.assertEqual({'min-rate': str(MIN_BANDWIDTH * 1000)}, queue.other_config) queues = vm_qos.bridge._list_queues(port=vm_qos.neutron_port['id']) self.assertEqual(1, len(queues)) self.assertEqual(queue.uuid, queues[0]['_uuid']) # Delete port with qos policy attached vm_qos.destroy() self._wait_for_min_bw_rule_removed(vm_qos, self.direction) self.assertEqual( [], vm_qos.bridge._list_queues(port=vm_qos.neutron_port['id'])) vm_noqos.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_securitygroup.py0000644000175000017500000006736200000000000026342 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutronclient.common import exceptions as nc_exc from oslo_utils import uuidutils from neutron.agent.linux import iptables_firewall from neutron.agent.linux import iptables_manager from neutron.cmd.sanity import checks from neutron.common import utils as common_utils from neutron.tests.common import net_helpers from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class StatelessRulesNotConfiguredException(Exception): pass class OVSVersionChecker(object): conntrack_supported = None @classmethod def supports_ovsfirewall(cls): if cls.conntrack_supported is None: cls.conntrack_supported = checks.ovs_conntrack_supported() return cls.conntrack_supported class BaseSecurityGroupsSameNetworkTest(base.BaseFullStackTestCase): def setUp(self): debug_iptables = self.firewall_driver.startswith("iptables") host_descriptions = [ environment.HostDescription( l2_agent_type=self.l2_agent_type, firewall_driver=self.firewall_driver, dhcp_agent=True) for _ in range(self.num_hosts)] env = environment.Environment( environment.EnvironmentDescription( network_type=self.network_type, debug_iptables=debug_iptables), host_descriptions) super(BaseSecurityGroupsSameNetworkTest, self).setUp(env) if (self.firewall_driver == 'openvswitch' and not OVSVersionChecker.supports_ovsfirewall()): self.skipTest("Open vSwitch firewall_driver doesn't work " "with this version of ovs.") def assert_connection(self, *args, **kwargs): netcat = net_helpers.NetcatTester(*args, **kwargs) def test_connectivity(): try: return netcat.test_connectivity() except RuntimeError: return False try: common_utils.wait_until_true(test_connectivity) finally: netcat.stop_processes() def assert_no_connection(self, *args, **kwargs): netcat = net_helpers.NetcatTester(*args, **kwargs) try: common_utils.wait_until_true(netcat.test_no_connectivity) finally: netcat.stop_processes() class TestSecurityGroupsSameNetwork(BaseSecurityGroupsSameNetworkTest): network_type = 'vxlan' scenarios = [ # TODO(njohnston): Re-add the linuxbridge scenario once it is stable # The iptables_hybrid driver lacks isolation between agents and # because of that using only one host is enough ('ovs-hybrid', { 'firewall_driver': 'iptables_hybrid', 'l2_agent_type': constants.AGENT_TYPE_OVS, 'num_hosts': 1}), ('ovs-openflow', { 'firewall_driver': 'openvswitch', 'l2_agent_type': constants.AGENT_TYPE_OVS, 'num_hosts': 2})] index_to_sg = [0, 0, 1, 2] # NOTE(toshii): As a firewall_driver can interfere with others, # the recommended way to add test is to expand this method, not # adding another. def test_securitygroup(self): """Tests if a security group rules are working, by confirming that 0. traffic is allowed when port security is disabled, 1. connection from outside of allowed security group is blocked 2. connection from allowed security group is permitted 3. traffic not explicitly allowed (eg. ICMP) is blocked, 4. a security group update takes effect, 5. a security group update for entire port range works 6. a remote security group member addition works, and 7. an established connection stops by deleting a SG rule. 8. multiple overlapping remote rules work, 9. test other protocol functionality by using SCTP protocol 10. test two vms with same mac on the same host in different networks 11. test using multiple security groups 12. test stateless security groups when firewall driver is iptables or iptables_hybrid. """ tenant_uuid = uuidutils.generate_uuid() subnet_cidr = '20.0.0.0/24' vms, ports, sgs, network, index_to_host = self._create_resources( tenant_uuid, subnet_cidr) # 0. check that traffic is allowed when port security is disabled self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP) self.assert_connection( vms[2].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP) vms[0].block_until_ping(vms[1].ip) vms[0].block_until_ping(vms[2].ip) vms[1].block_until_ping(vms[2].ip) # Apply security groups to the ports for port, sg in zip(ports, self.index_to_sg): self.safe_client.client.update_port( port['id'], body={'port': {'port_security_enabled': True, 'security_groups': [sgs[sg]['id']]}}) # 1. connection from outside of allowed security group is blocked netcat = net_helpers.NetcatTester( vms[2].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP) # Wait until port update takes effect on the ports common_utils.wait_until_true( netcat.test_no_connectivity, exception=AssertionError( "Still can connect to the VM from different host.") ) netcat.stop_processes() # 2. check if connection from allowed security group is permitted self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP) # 3. check if traffic not explicitly allowed (eg. ICMP) is blocked vms[0].block_until_no_ping(vms[1].ip) vms[0].block_until_no_ping(vms[2].ip) vms[1].block_until_no_ping(vms[2].ip) # 4. check if a security group update takes effect self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) rule1 = self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=3344, port_range_max=3344) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) # 5. check if a security group update for entire port range works self.client.delete_security_group_rule(rule1['id']) self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.TCP) rule1 = self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=1, port_range_max=65535) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.TCP) self.client.delete_security_group_rule(rule1['id']) # 6. check if a remote security group member addition works rule2 = self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[1]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=3355, port_range_max=3355) self.assert_connection( vms[2].namespace, vms[0].namespace, vms[0].ip, 3355, net_helpers.NetcatTester.TCP) # 7. check if an established connection stops by deleting # the supporting SG rule. index_to_host.append(index_to_host[2]) self.index_to_sg.append(1) ports.append( self.safe_client.create_port(tenant_uuid, network['id'], self.environment.hosts[ index_to_host[-1]].hostname, security_groups=[sgs[1]['id']])) vms.append( self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[index_to_host[-1]], network['id'], tenant_uuid, self.safe_client, neutron_port=ports[-1], use_dhcp=True))) self.assertEqual(5, len(vms)) vms[4].block_until_boot() netcat = net_helpers.NetcatTester(vms[4].namespace, vms[0].namespace, vms[0].ip, 3355, net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) self.assertTrue(netcat.test_connectivity()) self.client.delete_security_group_rule(rule2['id']) common_utils.wait_until_true(lambda: netcat.test_no_connectivity(), sleep=8) netcat.stop_processes() # 8. check if multiple overlapping remote rules work self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[1]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=3333, port_range_max=3333) self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[2]['id'], direction='ingress', ethertype=constants.IPv4) for i in range(2): self.assert_connection( vms[0].namespace, vms[1].namespace, vms[1].ip, 3333, net_helpers.NetcatTester.TCP) self.assert_connection( vms[2].namespace, vms[1].namespace, vms[1].ip, 3333, net_helpers.NetcatTester.TCP) self.assert_connection( vms[3].namespace, vms[0].namespace, vms[0].ip, 8080, net_helpers.NetcatTester.TCP) # 9. check SCTP is supported by security group self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.SCTP) self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NUM_SCTP, port_range_min=3366, port_range_max=3366) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.SCTP) # 10. test two vms with same mac on the same host in different networks self._test_overlapping_mac_addresses() # 11. Check using multiple security groups self._test_using_multiple_security_groups() # 12. test stateless security groups when firewall driver is iptables # or iptables_hybrid. # TODO(njohnston): Re-add the iptables here once it is stable if self.firewall_driver == 'iptables_hybrid': self._test_stateless_security_groups() def _test_using_multiple_security_groups(self): """Test using multiple security groups. This test will do following things: 1. Create three vms with two security groups. vm0, vm1 in sg0; vm2 in sg1. 2. Add SSH and ICMP rules in sg0. vm0 and vm1 can ping and ssh for each other, but can not access between vm0 and vm2. 3. Using multiple security groups(sg0, sg1) for vm0, and sg1 have rules allowed sg0 access(ICMP), so vm0 and vm1 can ping vm2. 4. Then remove sg0 from vm0, we removed ICMP and SSH rules. vm0 and vm1 can not ping and ssh for each other. """ tenant_uuid = uuidutils.generate_uuid() subnet_cidr = '30.0.0.0/24' vms, ports, sgs, _, _ = self._create_resources(tenant_uuid, subnet_cidr) # Apply security groups to the ports for port, sg in zip(ports, self.index_to_sg): self.safe_client.client.update_port( port['id'], body={'port': {'port_security_enabled': True, 'security_groups': [sgs[sg]['id']]}}) # Traffic not explicitly allowed (eg. SSH, ICMP) is blocked self.verify_no_connectivity_between_vms( vms[1], vms[0], net_helpers.NetcatTester.TCP, 22) vms[0].block_until_no_ping(vms[1].ip) vms[0].block_until_no_ping(vms[2].ip) vms[1].block_until_no_ping(vms[2].ip) # Add SSH and ICMP allowed in the same security group self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=22, port_range_max=22) self.verify_connectivity_between_vms( vms[1], vms[0], net_helpers.NetcatTester.TCP, 22) self.verify_no_connectivity_between_vms( vms[2], vms[0], net_helpers.NetcatTester.TCP, 22) self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_ICMP) vms[1].block_until_ping(vms[0].ip) vms[2].block_until_no_ping(vms[0].ip) # Update vm0 to use two security groups # Add security group rules(ICMP) in another security group self.safe_client.client.update_port( ports[0]['id'], body={'port': {'security_groups': [sgs[0]['id'], sgs[1]['id']]}}) self.safe_client.create_security_group_rule( tenant_uuid, sgs[1]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_ICMP) vms[0].block_until_ping(vms[2].ip) vms[1].block_until_ping(vms[2].ip) vms[2].block_until_no_ping(vms[0].ip) vms[2].block_until_no_ping(vms[1].ip) self.verify_connectivity_between_vms( vms[1], vms[0], net_helpers.NetcatTester.TCP, 22) self.verify_no_connectivity_between_vms( vms[2], vms[0], net_helpers.NetcatTester.TCP, 22) # Remove first security group from port self.safe_client.client.update_port( ports[0]['id'], body={'port': {'security_groups': [sgs[1]['id']]}}) vms[0].block_until_ping(vms[2].ip) vms[1].block_until_ping(vms[2].ip) vms[2].block_until_no_ping(vms[0].ip) vms[2].block_until_no_ping(vms[1].ip) self.verify_no_connectivity_between_vms( vms[1], vms[0], net_helpers.NetcatTester.TCP, 22) def _test_stateless_security_groups(self): """Test stateless security groups. This test will run basic tests for stateless security groups 1. Check the stateless rules are configured for vm interfaces when firewall driver is iptables or iptables_hybrid. 2. Check connection is blocked for stateless security group. 3. Check if connection from allowed security group is permitted. 4. Check if traffic not explicitly allowed (eg. ICMP) is blocked. 5. Check if a security group update for entire port range works. 6. Check conflict when adding stateful and stateless security groups to the same port """ tenant_uuid = uuidutils.generate_uuid() subnet_cidr = '40.0.0.0/24' vms, ports, sgs, _, _ = self._create_resources( tenant_uuid, subnet_cidr, stateful=False) # Apply security groups to the ports for port in ports[:2]: self.safe_client.client.update_port( port['id'], body={'port': {'port_security_enabled': True, 'security_groups': [sgs[0]['id']]}}) # Check the stateless rules are configured for vm interfaces when # firewall driver is iptables or iptables_hybrid. self._validate_stateless_rules(vms[:2]) # Connection is blocked for stateless security group self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP) self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP, src_port=3344) rule1 = self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=3344, port_range_max=3344) # Check if connection from allowed security group is permitted self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3333, net_helpers.NetcatTester.TCP, src_port=3344) # Check if traffic not explicitly allowed (eg. ICMP) is blocked vms[0].block_until_no_ping(vms[1].ip) vms[0].block_until_no_ping(vms[2].ip) vms[1].block_until_no_ping(vms[2].ip) # Check if a security group update for entire port range works self.client.delete_security_group_rule(rule1['id']) self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) self.assert_no_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.TCP) self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=1, port_range_max=65535) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3344, net_helpers.NetcatTester.TCP) self.assert_connection( vms[1].namespace, vms[0].namespace, vms[0].ip, 3366, net_helpers.NetcatTester.TCP) # Update sg1 to be stateful self.safe_client.update_security_group(sgs[1]['id'], **{'stateful': True}) # Check conflict when adding stateful and stateless security groups # to the same port self.assertRaises(nc_exc.Conflict, self.safe_client.client.update_port, ports[0]['id'], body={'port': { 'security_groups': [sgs[0]['id'], sgs[1]['id']]}}) def _validate_stateless_rules(self, vms): """Check if stateless rules from iptables firewall are configured. Raises StatelessRulesNotConfiguredException exception if no stateless rules are found. """ for vm in vms: iptables = iptables_manager.IptablesManager( namespace=vm.host.host_namespace) vm_tap_device = iptables_firewall.get_hybrid_port_name( vm.neutron_port['id']) common_utils.wait_until_true( lambda: self._is_stateless_configured(iptables, vm_tap_device), exception=StatelessRulesNotConfiguredException( "There are no stateless rules configured for " "interface %s" % vm_tap_device)) @staticmethod def _is_stateless_configured(iptables, vm_tap_device): filter_rules = iptables.get_rules_for_table('raw') return any((vm_tap_device and '--notrack') in line for line in filter_rules) # NOTE: This can be used after refactor other tests to # one scenario one test. def _create_resources(self, tenant_uuid, subnet_cidr, stateful=True): if self.firewall_driver == 'iptables_hybrid': # The iptables_hybrid driver lacks isolation between agents index_to_host = [0] * 4 else: index_to_host = [0, 1, 1, 0] network = self.safe_client.create_network(tenant_uuid) self.safe_client.create_subnet( tenant_uuid, network['id'], subnet_cidr) sgs = [self.safe_client.create_security_group(tenant_uuid, stateful=stateful) for i in range(3)] ports = [ self.safe_client.create_port(tenant_uuid, network['id'], self.environment.hosts[host].hostname, security_groups=[], port_security_enabled=False) for host in index_to_host] self.safe_client.create_security_group_rule( tenant_uuid, sgs[0]['id'], remote_group_id=sgs[0]['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=3333, port_range_max=3333) vms = [ self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[host], network['id'], tenant_uuid, self.safe_client, neutron_port=ports[port], use_dhcp=True)) for port, host in enumerate(index_to_host)] map(lambda vm: vm.block_until_boot(), vms) map(lambda vm: vm.block_until_dhcp_config_done(), vms) return vms, ports, sgs, network, index_to_host def _create_vm_on_host( self, project_id, network_id, sg_id, host, mac_address=None): if mac_address: port = self.safe_client.create_port( project_id, network_id, host.hostname, security_groups=[sg_id], mac_address=mac_address) else: port = self.safe_client.create_port( project_id, network_id, host.hostname, security_groups=[sg_id]) return self.useFixture( machine.FakeFullstackMachine( host, network_id, project_id, self.safe_client, neutron_port=port)) def _create_three_vms_first_has_static_mac( self, project_id, allowed_port, subnet_cidr): """Create three vms. First VM has a static mac and is placed on first host. Second VM is placed on the first host and third VM is placed on second host. """ network = self.safe_client.create_network(project_id) self.safe_client.create_subnet( project_id, network['id'], subnet_cidr) sg = self.safe_client.create_security_group(project_id) self.safe_client.create_security_group_rule( project_id, sg['id'], direction='ingress', ethertype=constants.IPv4, protocol=constants.PROTO_NAME_TCP, port_range_min=allowed_port, port_range_max=allowed_port) vms = [self._create_vm_on_host( project_id, network['id'], sg['id'], self.environment.hosts[0], mac_address="fa:16:3e:de:ad:fe")] if self.firewall_driver == 'iptables_hybrid': # iptables lack isolation between agents, use only a single host vms.extend([ self._create_vm_on_host( project_id, network['id'], sg['id'], self.environment.hosts[0]) for _ in range(2)]) else: vms.extend([ self._create_vm_on_host( project_id, network['id'], sg['id'], host) for host in self.environment.hosts[:2]]) map(lambda vm: vm.block_until_boot(), vms) return vms def verify_connectivity_between_vms(self, src_vm, dst_vm, protocol, port): self.assert_connection( src_vm.namespace, dst_vm.namespace, dst_vm.ip, port, protocol) def verify_no_connectivity_between_vms( self, src_vm, dst_vm, protocol, port): self.assert_no_connection( src_vm.namespace, dst_vm.namespace, dst_vm.ip, port, protocol) def _test_overlapping_mac_addresses(self): project1 = uuidutils.generate_uuid() p1_allowed = 4444 project2 = uuidutils.generate_uuid() p2_allowed = 4445 p1_vms = self._create_three_vms_first_has_static_mac( project1, p1_allowed, '20.0.2.0/24') p2_vms = self._create_three_vms_first_has_static_mac( project2, p2_allowed, '20.0.3.0/24') have_connectivity = [ (p1_vms[0], p1_vms[1], p1_allowed), (p1_vms[1], p1_vms[2], p1_allowed), (p2_vms[0], p2_vms[1], p2_allowed), (p2_vms[1], p2_vms[2], p2_allowed), ] for vm1, vm2, port in have_connectivity: self.verify_connectivity_between_vms( vm1, vm2, net_helpers.NetcatTester.TCP, port) self.verify_connectivity_between_vms( vm2, vm1, net_helpers.NetcatTester.TCP, port) self.verify_no_connectivity_between_vms( vm1, vm2, net_helpers.NetcatTester.TCP, port + 1) self.verify_no_connectivity_between_vms( vm2, vm1, net_helpers.NetcatTester.TCP, port + 1) class SecurityGroupRulesTest(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [environment.HostDescription()] env = environment.Environment(environment.EnvironmentDescription(), host_descriptions) super(SecurityGroupRulesTest, self).setUp(env) def test_security_group_rule_quota(self): project_id = uuidutils.generate_uuid() quota = self.client.show_quota_details(project_id) sg_rules_used = quota['quota']['security_group_rule']['used'] self.assertEqual(0, sg_rules_used) self.safe_client.create_security_group(project_id) quota = self.client.show_quota_details(project_id) sg_rules_used = quota['quota']['security_group_rule']['used'] self.safe_client.update_quota(project_id, 'security_group_rule', sg_rules_used) self.assertRaises(nc_exc.OverQuotaClient, self.safe_client.create_security_group, project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_segmentation_id.py0000644000175000017500000001007700000000000026556 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutronclient.common import exceptions from oslo_utils import uuidutils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import config from neutron.tests.fullstack.resources import environment from neutron.tests.unit import testlib_api load_tests = testlib_api.module_load_tests class TestSegmentationId(base.BaseFullStackTestCase): scenarios = [ ('Open vSwitch Agent', {'l2_agent_type': constants.AGENT_TYPE_OVS}), ('Linux Bridge Agent', { 'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})] def setUp(self): hosts_description = [ environment.HostDescription( l2_agent_type=self.l2_agent_type, l3_agent=False)] env = environment.Environment( environment.EnvironmentDescription(), hosts_description) super(TestSegmentationId, self).setUp(env) self.tenant_id = uuidutils.generate_uuid() def _create_network(self): seg_id = 100 network = self.safe_client.create_network( self.tenant_id, network_type="vlan", segmentation_id=seg_id, physical_network=config.PHYSICAL_NETWORK_NAME) self.assertEqual(seg_id, network['provider:segmentation_id']) # Ensure that segmentation_id is really set properly in DB network = self.safe_client.client.show_network( network['id'])['network'] self.assertEqual(seg_id, network['provider:segmentation_id']) return network def _update_segmentation_id(self, network): # Now change segmentation_id to some other value new_seg_id = network['provider:segmentation_id'] + 1 new_net_args = {'provider:segmentation_id': new_seg_id} network = self.safe_client.update_network( network['id'], **new_net_args) self.assertEqual( new_seg_id, network['provider:segmentation_id']) # Ensure that segmentation_id was really changed network = self.safe_client.client.show_network( network['id'])['network'] self.assertEqual(new_seg_id, network['provider:segmentation_id']) def test_change_segmentation_id_no_ports_in_network(self): network = self._create_network() self._update_segmentation_id(network) def test_change_segmentation_id_with_unbound_ports_in_network(self): network = self._create_network() self.safe_client.create_subnet( self.tenant_id, network['id'], '20.0.0.0/24') # Unbound port self.safe_client.create_port(self.tenant_id, network['id']) # Port failed to bind self.safe_client.create_port(self.tenant_id, network['id'], "non-exisiting-host") self._update_segmentation_id(network) def test_change_segmentation_id_with_bound_ports_in_network(self): network = self._create_network() self.safe_client.create_subnet( self.tenant_id, network['id'], '20.0.0.0/24') self.safe_client.create_port(self.tenant_id, network['id'], self.environment.hosts[0].hostname) if self.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE: # Linuxbridge agent don't support update of segmentation_id for # the network so this should raise an exception self.assertRaises(exceptions.BadRequest, self._update_segmentation_id, network) else: self._update_segmentation_id(network) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_subnet.py0000644000175000017500000000711300000000000024702 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants from oslo_utils import uuidutils from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment class TestSubnet(base.BaseFullStackTestCase): def setUp(self): host_descriptions = [ environment.HostDescription(l3_agent=True, dhcp_agent=True), environment.HostDescription()] env = environment.Environment( environment.EnvironmentDescription(network_type='vlan', l2_pop=False), host_descriptions) super(TestSubnet, self).setUp(env) self._project_id = uuidutils.generate_uuid() self._network = self._create_network(self._project_id) def _create_network(self, project_id, name='test_network'): return self.safe_client.create_network(project_id, name=name) def _create_subnet(self, project_id, network_id, cidr, ipv6_address_mode=None, ipv6_ra_mode=None, subnetpool_id=None): ip_version = None if ipv6_address_mode or ipv6_ra_mode: ip_version = constants.IP_VERSION_6 return self.safe_client.create_subnet( project_id, network_id, cidr, enable_dhcp=True, ipv6_address_mode=ipv6_address_mode, ipv6_ra_mode=ipv6_ra_mode, subnetpool_id=subnetpool_id, ip_version=ip_version) def _show_subnet(self, subnet_id): return self.client.show_subnet(subnet_id) def test_create_subnet_ipv4(self): cidr = self.useFixture( ip_network.ExclusiveIPNetwork( '240.0.0.0', '240.255.255.255', '24')).network subnet = self._create_subnet(self._project_id, self._network['id'], cidr) subnet = self._show_subnet(subnet['id']) self.assertEqual(subnet['subnet']['gateway_ip'], str(netaddr.IPNetwork(cidr).network + 1)) def test_create_subnet_ipv6_slaac(self): cidr = self.useFixture( ip_network.ExclusiveIPNetwork( '2001:db8::', '2001:db8::ffff', '64')).network subnet = self._create_subnet(self._project_id, self._network['id'], cidr, ipv6_address_mode='slaac', ipv6_ra_mode='slaac') subnet = self._show_subnet(subnet['id']) self.assertEqual(subnet['subnet']['gateway_ip'], str(netaddr.IPNetwork(cidr).network)) def test_create_subnet_ipv6_prefix_delegation(self): subnet = self._create_subnet(self._project_id, self._network['id'], None, ipv6_address_mode='slaac', ipv6_ra_mode='slaac', subnetpool_id='prefix_delegation') subnet = self._show_subnet(subnet['id']) self.assertIsNone(subnet['subnet']['gateway_ip']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/fullstack/test_trunk.py0000644000175000017500000002743100000000000024552 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from neutron_lib import constants from oslo_utils import uuidutils from neutron.common import utils from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.services.trunk.drivers.openvswitch import utils as trunk_ovs_utils from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine def trunk_bridge_does_not_exist(trunk_id): """Return true if trunk bridge for given ID does not exists.""" bridge = trunk_manager.TrunkBridge(trunk_id) return not bridge.exists() def make_ip_network(port, network): """Make an IPNetwork object from port and network. Function returns IPNetwork object containing fixed IP address from port dictionary with prefixlen from network object. :param port: Port dictionary returned by Neutron API :param network: IPNetwork object in which the port's IP will be assigned. """ ip_address = netaddr.IPAddress( port['fixed_ips'][0]['ip_address']) return netaddr.IPNetwork( (ip_address.value, network.prefixlen)) class TrunkTestException(Exception): pass class Network(object): """A helper class to keep persistent info about assigned addresses.""" def __init__(self, prefix, network_cidr, tag=None): self.prefix = prefix self.network = netaddr.IPNetwork(network_cidr) self.neutron_network = None self.neutron_subnet = None self.tag = tag # Currently, only vlan is supported. Pass via __init__ once more are # supported. self.segmentation_type = 'vlan' @property def cidr(self): return str(self.network.cidr) @property def gateway(self): """Return lowest possible IP in the given subnet.""" return str(netaddr.IPAddress(self.network.first + 1)) @property def id(self): return self.neutron_network['id'] @property def name(self): return "%s-network" % self.prefix @property def subnet_name(self): return "%s-subnet" % self.prefix class TestTrunkPlugin(base.BaseFullStackTestCase): def setUp(self): host_desc = [environment.HostDescription( l3_agent=False, l2_agent_type=constants.AGENT_TYPE_OVS)] env_desc = environment.EnvironmentDescription(service_plugins='trunk') env = environment.Environment(env_desc, host_desc) super(TestTrunkPlugin, self).setUp(env) self.tenant_id = uuidutils.generate_uuid() self.trunk_network = Network('trunk', '10.0.0.0/24') self.vlan1_network = Network('vlan1', '192.168.0.0/24', tag=10) self.vlan2_network = Network('vlan2', '192.168.1.0/24', tag=20) self.host = self.environment.hosts[0] for network in ( self.trunk_network, self.vlan1_network, self.vlan2_network): self.create_network_and_subnet(network) def create_network_and_subnet(self, network): """Create network and subnet resources in Neutron based on network object. The resource names will be -network and -subnet, where prefix is taken from network object. :param network: Network object from this module. """ network.neutron_network = self.safe_client.create_network( self.tenant_id, network.name) network.neutron_subnet = self.safe_client.create_subnet( self.tenant_id, network.id, cidr=network.cidr, gateway_ip=network.gateway, name=network.subnet_name, enable_dhcp=False) def create_vlan_aware_vm(self, trunk_network, vlan_networks): """Create a fake machine with one untagged port and subports according vlan_networks parameter. :param trunk_network: Instance of Network where trunk port should be created. :param vlan_networks: List of Network instances where subports should be created. """ trunk_parent_port = self.safe_client.create_port( self.tenant_id, trunk_network.id) vlan_subports = [ self.safe_client.create_port(self.tenant_id, vlan_network.id, mac_address=trunk_parent_port['mac_address']) for vlan_network in vlan_networks] trunk = self.safe_client.create_trunk( self.tenant_id, name='mytrunk', port_id=trunk_parent_port['id'], sub_ports=[ {'port_id': vlan_subport['id'], 'segmentation_type': 'vlan', 'segmentation_id': vlan_network.tag} for vlan_subport, vlan_network in zip(vlan_subports, vlan_networks) ], ) vm = self.useFixture( machine.FakeFullstackTrunkMachine( trunk, self.host, trunk_network.id, self.tenant_id, self.safe_client, neutron_port=trunk_parent_port, bridge_name=trunk_ovs_utils.gen_trunk_br_name(trunk['id']))) for port, vlan_network in zip(vlan_subports, vlan_networks): ip_network = make_ip_network(port, vlan_network.network) vm.add_vlan_interface( port['mac_address'], ip_network, vlan_network.tag) vm.block_until_boot() return vm def create_vm_in_network(self, network): """Create a fake machine in given network.""" return self.useFixture( machine.FakeFullstackMachine( self.host, network.id, self.tenant_id, self.safe_client ) ) def add_subport_to_vm(self, vm, subport_network): """Add subport from subport_network to given vm. :param vm: FakeFullstackMachine instance to with subport should be added. :param subport_network: Network object representing network containing port for subport. """ subport = self.safe_client.create_port( self.tenant_id, subport_network.id, mac_address=vm.neutron_port['mac_address']) subport_spec = { 'port_id': subport['id'], 'segmentation_type': subport_network.segmentation_type, 'segmentation_id': subport_network.tag } self.safe_client.trunk_add_subports( self.tenant_id, vm.trunk['id'], [subport_spec]) ip_network = make_ip_network(subport, subport_network.network) vm.add_vlan_interface( subport['mac_address'], ip_network, subport_network.tag) # NOTE(slaweq): As is described in bug # https://bugs.launchpad.net/neutron/+bug/1687709 when more than one # different ovs-agent with enabled trunk driver is running at a time it # might lead to race contitions between them. # Because of that ovs_agent used for fullstack tests is monkeypatched and # loads trunk driver only if trunk service plugin is enabled. # That makes restriction that only a single set of tests with trunk-enabled # services will run at the same time. def test_trunk_lifecycle(self): """Test life-cycle of a fake VM with trunk port. This test uses 4 fake machines: - vlan_aware_vm (A) that is at the beginning connected to a trunk network and a vlan1 network. - trunk_network_vm (B) that is connected to the trunk network. - vlan1_network_vm (C) that is connected to the vlan1 network. - vlan2_network_vm (D) that is connected to a vlan2 network. Scenario steps: - all the vms from above are created - A can talk with B (over the trunk network) - A can talk with C (over the vlan1 network) - A can not talk with D (no leg on the vlan2 network) - subport from the vlan2 network is added to A - A can now talk with D (over the vlan2 network) - subport from the vlan1 network is removed from A - A can talk with B (over the trunk network) - A can not talk with C (no leg on the vlan1 network) - A can talk with D (over the vlan2 network) - A is deleted which leads to removal of trunk bridge - no leftovers like patch ports to the trunk bridge should remain on an integration bridge """ vlan_aware_vm = self.create_vlan_aware_vm( self.trunk_network, [self.vlan1_network] ) trunk_id = vlan_aware_vm.trunk['id'] # Create helper vms with different networks trunk_network_vm = self.create_vm_in_network(self.trunk_network) vlan1_network_vm = self.create_vm_in_network(self.vlan1_network) vlan2_network_vm = self.create_vm_in_network(self.vlan2_network) for vm in trunk_network_vm, vlan1_network_vm, vlan2_network_vm: vm.block_until_boot() # Test connectivity to trunk and subport vlan_aware_vm.block_until_ping(trunk_network_vm.ip) vlan_aware_vm.block_until_ping(vlan1_network_vm.ip) # Subport for vlan2 hasn't been added yet vlan_aware_vm.block_until_no_ping(vlan2_network_vm.ip) # Add another subport and test self.add_subport_to_vm(vlan_aware_vm, self.vlan2_network) vlan_aware_vm.block_until_ping(vlan2_network_vm.ip) # Remove the first subport self.safe_client.trunk_remove_subports( self.tenant_id, trunk_id, [vlan_aware_vm.trunk['sub_ports'][0]]) # vlan1_network_vm now shouldn't be able to talk to vlan_aware_vm vlan_aware_vm.block_until_no_ping(vlan1_network_vm.ip) # but trunk and vlan2 should be able to ping vlan_aware_vm.block_until_ping(trunk_network_vm.ip) vlan_aware_vm.block_until_ping(vlan2_network_vm.ip) # Delete vm and check that patch ports and trunk bridge are gone vlan_aware_vm.destroy() bridge_doesnt_exist_predicate = functools.partial( trunk_bridge_does_not_exist, trunk_id) utils.wait_until_true( bridge_doesnt_exist_predicate, exception=TrunkTestException( 'Trunk bridge with ID %s has not been removed' % trunk_id) ) integration_bridge = self.host.get_bridge(None) no_patch_ports_predicate = functools.partial( lambda bridge: not ovsdb_handler.bridge_has_service_port(bridge), integration_bridge, ) try: utils.wait_until_true(no_patch_ports_predicate) except utils.WaitTimeout: # Create exception object after timeout to provide up-to-date list # of interfaces raise TrunkTestException( "Integration bridge %s still has following ports while some of" " them are patch ports for trunk that were supposed to be " "removed: %s" % ( integration_bridge.br_name, integration_bridge.get_iface_name_list() ) ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/0000755000175000017500000000000000000000000022141 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/__init__.py0000644000175000017500000000202200000000000024246 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ In order to save gate resources, test paths that have similar environmental requirements to the functional path are marked for discovery. """ import os.path from neutron.common import eventlet_utils eventlet_utils.monkey_patch() def load_tests(loader, tests, pattern): this_dir = os.path.dirname(__file__) new_tests = loader.discover(start_dir=this_dir, pattern=pattern) tests.addTests(new_tests) return tests ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/0000755000175000017500000000000000000000000023237 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/__init__.py0000644000175000017500000000000000000000000025336 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/common/0000755000175000017500000000000000000000000024527 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/common/__init__.py0000644000175000017500000000000000000000000026626 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/common/test_ovs_lib.py0000644000175000017500000004762700000000000027615 0ustar00coreycorey00000000000000# Copyright (c) 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import mock from neutron_lib.services.qos import constants as qos_constants from oslo_utils import uuidutils import six from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as ovs_constants from neutron.tests.functional import base MIN_RATE_DEFAULT = 1000000 MAX_RATE_DEFAULT = 3000000 BURST_DEFAULT = 2000000 QUEUE_NUM_DEFAULT = 'queue_num' OTHER_CONFIG_DEFAULT = {six.u('max-rate'): six.u(str(MAX_RATE_DEFAULT)), six.u('burst'): six.u(str(BURST_DEFAULT)), six.u('min-rate'): six.u(str(MIN_RATE_DEFAULT))} class BaseOVSTestCase(base.BaseSudoTestCase): def setUp(self): super(BaseOVSTestCase, self).setUp() self.br_name = ('br-' + uuidutils.generate_uuid())[:10] self.port_id = ('port-' + uuidutils.generate_uuid())[:8] self.ovs = ovs_lib.OVSBridge(self.br_name) self.elements_to_clean = {'bridges': [], 'devices': [], 'qoses': [], 'queues': []} self.addCleanup(self._clean_system) def _clean_system(self): # NOTE(ralonsoh): the deletion order is important. First we need to # delete any bridge (and the ports attached); then the physical devices # created. QoS registers can be deleted if no port has those rules # assigned. Queues registers can be deleted if no QoS register refers # to those Queues. for bridge in self.elements_to_clean['bridges']: self.ovs.ovsdb.del_br(bridge).execute() for device in self.elements_to_clean['devices']: ip_lib.IPDevice(device).link.delete() for qos in self.elements_to_clean['qoses']: self.ovs.ovsdb.db_destroy('QoS', qos).execute() for queue in self.elements_to_clean['queues']: self.ovs.ovsdb.db_destroy('Queue', queue).execute() def _list_queues(self, queue_id=None): queues = self.ovs.ovsdb.db_list( 'Queue', columns=('_uuid', 'other_config', 'external_ids')).execute() if queue_id: for queue in (queue for queue in queues if queue['_uuid'] == queue_id): return queue else: return None return queues def _create_queue(self, max_kbps=int(MAX_RATE_DEFAULT / 1000), max_burst_kbps=int(BURST_DEFAULT / 1000), min_kbps=int(MIN_RATE_DEFAULT / 1000), neutron_port_id=None, queue_num=None): neutron_port_id = (('port-' + uuidutils.generate_uuid())[:13] if not neutron_port_id else neutron_port_id) queue_num = QUEUE_NUM_DEFAULT if not queue_num else queue_num queue_id = self.ovs._update_queue(neutron_port_id, queue_num, max_kbps=max_kbps, max_burst_kbps=max_burst_kbps, min_kbps=min_kbps) self.elements_to_clean['queues'].append(queue_id) return queue_id, neutron_port_id def _create_qos(self, qos_id=None, queues=None): qos_id = self.ovs._update_qos(qos_id=qos_id, queues=queues) self.elements_to_clean['qoses'].append(qos_id) return qos_id def _list_qos(self, qos_id=None): qoses = self.ovs.ovsdb.db_list( 'QoS', columns=('_uuid', 'queues', 'external_ids', 'type')).execute() if qos_id: for qos in (qos for qos in qoses if qos['_uuid'] == qos_id): return qos else: return None return qoses def _create_bridge(self): self.ovs.ovsdb.add_br(self.br_name).execute() self.elements_to_clean['bridges'].append(self.br_name) def _create_port(self, port_name): self.ovs.ovsdb.add_port(self.br_name, port_name).execute() def _find_port_uuid(self, port_name): return self.ovs.ovsdb.db_get('Port', port_name, '_uuid').execute() def _find_port_qos(self, port_name): return self.ovs.ovsdb.db_get('Port', port_name, 'qos').execute() def _create_dummy_device(self): device_name = ('dummy-' + uuidutils.generate_uuid())[:12] ip_lib.IPWrapper().add_dummy(device_name) self.elements_to_clean['devices'].append(device_name) return device_name def _check_value(self, expected_value, retrieve_fn, *args, **kwargs): def check_value(ret, keys_to_check): ret[0] = retrieve_fn(*args, **kwargs) if keys_to_check and isinstance(expected_value, dict): for key in keys_to_check: if ret[0][key] != expected_value[key]: return False return True return ret[0] == expected_value ret = [None] keys_to_check = kwargs.pop('keys_to_check', None) part_check_value = functools.partial(check_value, ret, keys_to_check) try: common_utils.wait_until_true(part_check_value, timeout=5, sleep=1) except common_utils.WaitTimeout: self.fail('Expected value: %s, retrieved value: %s' % (expected_value, ret[0])) def test__update_queue_new(self): queue_id, neutron_port_id = self._create_queue() self.assertIsNotNone(queue_id) external_ids = {six.u('port'): six.u(neutron_port_id), six.u('queue-num'): six.u('queue_num'), six.u('type'): six.u(qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)} expected = {'_uuid': queue_id, 'other_config': OTHER_CONFIG_DEFAULT, 'external_ids': external_ids} self._check_value(expected, self._list_queues, queue_id) def test__update_queue_update(self): queue_id, neutron_port_id = self._create_queue() self.assertIsNotNone(queue_id) other_config = {six.u('max-rate'): six.u('6000000'), six.u('burst'): six.u('5000000'), six.u('min-rate'): six.u('4000000')} external_ids = {six.u('port'): six.u(neutron_port_id), six.u('queue-num'): six.u('queue_num'), six.u('type'): six.u(qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)} queue = self._list_queues(queue_id) self.assertIsNotNone(queue) queue_id, _ = self._create_queue(max_kbps=6000, max_burst_kbps=5000, min_kbps=4000, queue_num=queue_id, neutron_port_id=neutron_port_id) self.assertIsNotNone(queue_id) expected = {'_uuid': queue_id, 'other_config': other_config, 'external_ids': external_ids} self._check_value(expected, self._list_queues, queue_id) def test__find_queue(self): queue_id, neutron_port_id = self._create_queue() external_ids = {six.u('port'): six.u(neutron_port_id), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u('queue_num')} expected = {'_uuid': queue_id, 'external_ids': external_ids, 'other_config': OTHER_CONFIG_DEFAULT} self._check_value(expected, self.ovs._find_queue, neutron_port_id) def test__list_queues(self): ports = [] queue_ids = [] for _ in range(5): queue_id, neutron_port_id = self._create_queue() queue_ids.append(queue_id) ports.append(neutron_port_id) for idx, port in enumerate(ports): external_ids = {six.u('port'): six.u(ports[idx]), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u('queue_num')} expected = {'_uuid': queue_ids[idx], 'external_ids': external_ids, 'other_config': OTHER_CONFIG_DEFAULT} self._check_value([expected], self.ovs._list_queues, port=port, _type=qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH) self._check_value([], self.ovs._list_queues, port=port, _type='other_type') def test__delete_queue(self): queue_id, port_id = self._create_queue() external_ids = {six.u('port'): six.u(port_id), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u('queue_num')} expected = {'_uuid': queue_id, 'external_ids': external_ids, 'other_config': OTHER_CONFIG_DEFAULT} self._check_value(expected, self._list_queues, queue_id=queue_id) self.ovs._delete_queue(queue_id) self._check_value(None, self._list_queues, queue_id=queue_id) def test__delete_queue_still_used_in_a_qos(self): queue_id, port_id = self._create_queue() queues = {1: queue_id} qos_id_1 = self._create_qos(queues=queues) self.ovs._min_bw_qos_id = uuidutils.generate_uuid() qos_id_2 = self._create_qos(queues=queues) with mock.patch.object(ovs_lib.LOG, 'error') as mock_error: self.assertRaises(RuntimeError, self.ovs._delete_queue, queue_id) qoses = ', '.join(sorted([str(qos_id_1), str(qos_id_2)])) msg = ('Queue %(queue)s was still in use by the following QoS rules: ' '%(qoses)s') mock_error.assert_called_once_with( msg, {'queue': str(queue_id), 'qoses': qoses}) def test__update_qos_new(self): queue_id, port_id = self._create_queue() queues = {1: queue_id} qos_id = self._create_qos(queues=queues) external_ids = {six.u('id'): six.u(self.ovs._min_bw_qos_id), six.u('_type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)} expected = {'_uuid': qos_id, 'type': 'linux-htb', 'external_ids': external_ids} self._check_value(expected, self._list_qos, qos_id, keys_to_check=['_uuid', 'type', 'external_ids']) qos = self._list_qos(qos_id) self.assertEqual(queues[1], qos['queues'][1].uuid) def test__update_qos_update(self): queue_id_1, _ = self._create_queue() queues = {1: queue_id_1} qos_id = self._create_qos(queues=queues) external_ids = {six.u('id'): six.u(self.ovs._min_bw_qos_id), six.u('_type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)} expected = {'_uuid': qos_id, 'type': 'linux-htb', 'external_ids': external_ids} self._check_value(expected, self._list_qos, qos_id, keys_to_check=['_uuid', 'type', 'external_ids']) qos = self._list_qos(qos_id) self.assertEqual(queues[1], qos['queues'][1].uuid) queue_id_2, _ = self._create_queue() queues[2] = queue_id_2 self._create_qos(qos_id=qos_id, queues=queues) self._check_value(expected, self._list_qos, qos_id, keys_to_check=['_uuid', 'type', 'external_ids']) qos = self._list_qos(qos_id) self.assertEqual(2, len(qos['queues'])) self.assertEqual(queues[1], qos['queues'][1].uuid) self.assertEqual(queues[2], qos['queues'][2].uuid) def test__find_qos(self): queue_id, _ = self._create_queue() queues = {1: queue_id} qos_id = self._create_qos(queues=queues) self._check_value((qos_id, queues), self.ovs._find_qos) def test__set_port_qos(self): port_name = 'test_port' self._create_bridge() self._create_port(port_name) self._check_value([], self._find_port_qos, port_name) qos_id = self._create_qos() self.ovs._set_port_qos(port_name, qos_id=qos_id) self._check_value(qos_id, self._find_port_qos, port_name) self.ovs._set_port_qos(port_name) self._check_value([], self._find_port_qos, port_name) def test_get_bridge_ports(self): self._create_bridge() device_names = [] for _ in range(5): device_name = self._create_dummy_device() device_names.append(device_name) self._create_port(device_name) bridge_ports = self.ovs.get_bridge_ports('') device_names.sort() bridge_ports.sort() self.assertEqual(device_names, bridge_ports) def test__set_queue_for_minimum_bandwidth(self): self._create_bridge() self.ovs._set_queue_for_minimum_bandwidth(1234) flows = self.ovs.dump_flows_for_table(ovs_constants.LOCAL_SWITCHING) expected = 'priority=200,reg4=0,in_port=1234 actions=set_queue:1234,' \ 'load:0x1->NXM_NX_REG4[0],resubmit(,0)' self.assertIn(expected, flows) def test__unset_queue_for_minimum_bandwidth(self): self.test__set_queue_for_minimum_bandwidth() self.ovs._unset_queue_for_minimum_bandwidth(1234) flows = self.ovs.dump_flows_for_table(ovs_constants.LOCAL_SWITCHING) expected = 'in_port=1234' self.assertNotIn(expected, flows) def test_update_minimum_bandwidth_queue(self): port_name = 'test_output_port_1' self._create_bridge() self._create_port(port_name) queue_num = 1 queue_id, port_id = self._create_queue(neutron_port_id=self.port_id) queues = {queue_num: queue_id} qos_id = self._create_qos(queues=queues) self.ovs.update_minimum_bandwidth_queue(self.port_id, [port_name], queue_num, 1800) self._check_value(qos_id, self._find_port_qos, port_name) external_ids = {six.u('port'): six.u(port_id), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u('queue_num')} other_config = {six.u('max-rate'): six.u(str(MAX_RATE_DEFAULT)), six.u('burst'): six.u(str(BURST_DEFAULT)), six.u('min-rate'): six.u('1800000')} expected = {'_uuid': queue_id, 'external_ids': external_ids, 'other_config': other_config} self._check_value(expected, self._list_queues, queue_id) def test_update_minimum_bandwidth_queue_no_qos_no_queue(self): port_name = 'test_output_port_2' self._create_bridge() self._create_port(port_name) queue_num = 1 self.ovs.update_minimum_bandwidth_queue(self.port_id, [port_name], queue_num, 1700) qos_id = self._find_port_qos(port_name) qos = self._list_qos(qos_id) queue_id = qos['queues'][1].uuid external_ids = {six.u('port'): six.u(self.port_id), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u(str(queue_num))} other_config = {six.u('min-rate'): six.u('1700000')} expected = {'_uuid': queue_id, 'external_ids': external_ids, 'other_config': other_config} self._check_value(expected, self._list_queues, queue_id) self.elements_to_clean['qoses'].append(qos_id) self.elements_to_clean['queues'].append(queue_id) def test_delete_minimum_bandwidth_queue(self): queue_id_1, neutron_port_id_1 = self._create_queue(queue_num=1) queue_id_2, neutron_port_id_2 = self._create_queue(queue_num=2) queues = {1: queue_id_1, 2: queue_id_2} qos_id = self._create_qos(queues=queues) self._check_value({'_uuid': qos_id}, self._list_qos, qos_id, keys_to_check=['_uuid']) qos = self._list_qos(qos_id) self.assertEqual(queue_id_1, qos['queues'][1].uuid) self.assertEqual(queue_id_2, qos['queues'][2].uuid) self.ovs.delete_minimum_bandwidth_queue(neutron_port_id_2) self._check_value({'_uuid': qos_id}, self._list_qos, qos_id, keys_to_check=['_uuid']) qos = self._list_qos(qos_id) self.assertEqual(1, len(qos['queues'])) self.assertEqual(queue_id_1, qos['queues'][1].uuid) self.ovs.delete_minimum_bandwidth_queue(neutron_port_id_1) self._check_value({'_uuid': qos_id}, self._list_qos, qos_id, keys_to_check=['_uuid']) qos = self._list_qos(qos_id) self.assertEqual(0, len(qos['queues'])) def test_delete_minimum_bandwidth_queue_no_qos_found(self): queue_id, neutron_port_id = self._create_queue(queue_num=1) self.addCleanup(self.ovs._delete_queue, queue_id) # Check that it will not raise any exception even if there is no # qos with associated queues self.ovs.delete_minimum_bandwidth_queue(neutron_port_id) # And verify that this queue wasn't in fact deleted as there was no # qos found queue = self._list_queues(queue_id) self.assertEqual(queue_id, queue['_uuid']) def test_clear_minimum_bandwidth_qos(self): queue_id_1, _ = self._create_queue(queue_num=1) queue_id_2, _ = self._create_queue(queue_num=2) queue_id_3, port_id_3 = self._create_queue() queues = {1: queue_id_1, 2: queue_id_2} qos_id = self._create_qos(queues=queues) # NOTE(ralonsoh): we need to clean only the QoS rule created in this # test in order to avoid any interference with other tests. qoses = self.ovs._list_qos(_id=self.ovs._min_bw_qos_id) with mock.patch.object(self.ovs, '_list_qos') as mock_list_qos: mock_list_qos.return_value = qoses self.ovs.clear_minimum_bandwidth_qos() self._check_value(None, self._list_qos, qos_id=qos_id) self._check_value(None, self._list_queues, queue_id=queue_id_1) self._check_value(None, self._list_queues, queue_id=queue_id_2) external_ids = {six.u('port'): six.u(port_id_3), six.u('type'): six.u( qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH), six.u('queue-num'): six.u('queue_num')} expected = {'_uuid': queue_id_3, 'external_ids': external_ids, 'other_config': OTHER_CONFIG_DEFAULT} self._check_value(expected, self._list_queues, queue_id=queue_id_3) def test_get_egress_min_bw_for_port(self): self.ovs.update_minimum_bandwidth_queue(self.port_id, [], 1, 2800) self._check_value(2800, self.ovs.get_egress_min_bw_for_port, port_id=self.port_id) def test_set_controllers_inactivity_probe(self): self._create_bridge() self.ovs.set_controller(['tcp:127.0.0.1:6633']) self.ovs.set_controllers_inactivity_probe(8) self.assertEqual(8000, self.ovs.db_get_val('Controller', self.br_name, 'inactivity_probe')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/0000755000175000017500000000000000000000000023554 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/__init__.py0000644000175000017500000000000000000000000025653 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/base.py0000644000175000017500000005054000000000000025044 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # Copyright (c) 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import eventlet import fixtures import mock from neutron_lib import constants as n_const from neutron_lib.utils import net from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent.linux import interface from neutron.common import utils from neutron.conf.agent import common as agent_config from neutron.conf.agent import ovs_conf as ovs_agent_config from neutron.conf import common as common_config from neutron.conf.plugins.ml2.drivers import agent from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers \ import qos_driver as ovs_qos_driver from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import main as main_mod from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ as ovs_agent from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class OVSOFControllerHelper(object): """Helper class that runs os-ken openflow controller.""" def start_of_controller(self, conf): self.br_int_cls = None self.br_tun_cls = None self.br_phys_cls = None self.init_done = False self.init_done_ev = eventlet.event.Event() self.main_ev = eventlet.event.Event() self.addCleanup(self._kill_main) retry_count = 3 while True: # Try a few different ports as a port conflict # causes the test to fail. conf.set_override('of_listen_port', net_helpers.get_free_namespace_port( n_const.PROTO_NAME_TCP), group='OVS') cfg.CONF.set_override('of_listen_port', conf.OVS.of_listen_port, group='OVS') main_mod.init_config() self._main_thread = eventlet.spawn(self._kick_main) # Wait for _kick_main -> openflow main -> _agent_main # NOTE(yamamoto): This complexity came from how we run openflow # controller. Main routine blocks while running the embedded # openflow controller. In that case, the agent rpc_loop runs in # another thread. However, for FT we need to run setUp() and # test_xxx() in the same thread. So I made this run openflow main # in a separate thread instead. try: while not self.init_done: self.init_done_ev.wait() break except fixtures.TimeoutException: self._kill_main() retry_count -= 1 if retry_count < 0: raise Exception('port allocation failed') def _kick_main(self): with mock.patch.object(ovs_agent, 'main', self._agent_main): main_mod.main() def _kill_main(self): self.main_ev.send() self._main_thread.wait() def _agent_main(self, bridge_classes): self.br_int_cls = bridge_classes['br_int'] self.br_phys_cls = bridge_classes['br_phys'] self.br_tun_cls = bridge_classes['br_tun'] # signal to setUp() self.init_done = True self.init_done_ev.send() self.main_ev.wait() class OVSAgentTestFramework(base.BaseOVSLinuxTestCase, OVSOFControllerHelper): def setUp(self): super(OVSAgentTestFramework, self).setUp() agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.' 'ovs_neutron_agent.OVSPluginApi') mock.patch(agent_rpc).start() mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.br_int = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-int') self.br_tun = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-tun') self.br_phys = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-phys') patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:] self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:] self.ovs = ovs_lib.BaseOVS() self.config = self._configure_agent() self.driver = interface.OVSInterfaceDriver(self.config) self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.start_of_controller(self.config) def _get_config_opts(self): config = cfg.ConfigOpts() config.register_opts(common_config.core_opts) agent.register_agent_opts(config) ovs_conf.register_ovs_agent_opts(config) agent_config.register_interface_opts(config) agent_config.register_interface_driver_opts_helper(config) agent_config.register_agent_state_opts_helper(config) ovs_agent_config.register_ovs_agent_opts(config) ext_manager.register_opts(config) return config def _configure_agent(self): config = self._get_config_opts() config.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') config.set_override('integration_bridge', self.br_int, "OVS") config.set_override('tunnel_bridge', self.br_tun, "OVS") config.set_override('int_peer_patch_port', self.patch_tun, "OVS") config.set_override('tun_peer_patch_port', self.patch_int, "OVS") config.set_override('host', 'ovs-agent') return config def _bridge_classes(self): return { 'br_int': self.br_int_cls, 'br_phys': self.br_phys_cls, 'br_tun': self.br_tun_cls } def create_agent(self, create_tunnels=True, ancillary_bridge=None, local_ip='192.168.10.1'): if create_tunnels: tunnel_types = [n_const.TYPE_VXLAN] else: tunnel_types = None bridge_mappings = ['physnet:%s' % self.br_phys] self.config.set_override('tunnel_types', tunnel_types, "AGENT") self.config.set_override('polling_interval', 1, "AGENT") self.config.set_override('local_ip', local_ip, "OVS") self.config.set_override('bridge_mappings', bridge_mappings, "OVS") # Physical bridges should be created prior to running self._bridge_classes()['br_phys'](self.br_phys).create() ext_mgr = ext_manager.L2AgentExtensionsManager(self.config) with mock.patch.object(ovs_qos_driver.QosOVSAgentDriver, '_minimum_bandwidth_initialize'): agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(), ext_mgr, self.config) self.addCleanup(self.ovs.delete_bridge, self.br_int) if tunnel_types: self.addCleanup(self.ovs.delete_bridge, self.br_tun) self.addCleanup(self.ovs.delete_bridge, self.br_phys) agent.sg_agent = mock.Mock() agent.ancillary_brs = [] if ancillary_bridge: agent.ancillary_brs.append(ancillary_bridge) return agent def _mock_get_events(self, agent, polling_manager, ports): get_events = polling_manager.get_events p_ids = [p['id'] for p in ports] def filter_events(): events = get_events() filtered_ports = [] for dev in events['added']: iface_id = agent.int_br.portid_from_external_ids( dev.get('external_ids', [])) if iface_id in p_ids: # if the event is not about a port that was created by # this test, we filter the event out. Since these tests are # not run in isolation processing all the events might make # some test fail ( e.g. the agent might keep resycing # because it keeps finding not ready ports that are created # by other tests) filtered_ports.append(dev) return {'added': filtered_ports, 'removed': events['removed'], 'modified': []} polling_manager.get_events = mock.Mock(side_effect=filter_events) def stop_agent(self, agent, rpc_loop_thread): agent.run_daemon_loop = False rpc_loop_thread.wait() def start_agent(self, agent, ports=None, unplug_ports=None): if unplug_ports is None: unplug_ports = [] if ports is None: ports = [] self.setup_agent_rpc_mocks(agent, unplug_ports) polling_manager = polling.InterfacePollingMinimizer() self._mock_get_events(agent, polling_manager, ports) self.addCleanup(polling_manager.stop) polling_manager.start() utils.wait_until_true( polling_manager._monitor.is_active) agent.check_ovs_status = mock.Mock( return_value=constants.OVS_NORMAL) self.agent_thread = eventlet.spawn(agent.rpc_loop, polling_manager) self.addCleanup(self.stop_agent, agent, self.agent_thread) return polling_manager def _create_test_port_dict(self): return {'id': uuidutils.generate_uuid(), 'mac_address': net.get_random_mac( 'fa:16:3e:00:00:00'.split(':')), 'fixed_ips': [{ 'ip_address': '10.%d.%d.%d' % ( random.randint(3, 254), random.randint(3, 254), random.randint(3, 254))}], 'vif_name': utils.get_rand_name( self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)} def _create_test_network_dict(self): return {'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid()} def _plug_ports(self, network, ports, agent, bridge=None, namespace=None): if namespace is None: namespace = self.namespace for port in ports: bridge = bridge or agent.int_br self.driver.plug( network.get('id'), port.get('id'), port.get('vif_name'), port.get('mac_address'), bridge.br_name, namespace=namespace) ip_cidrs = ["%s/8" % (port.get('fixed_ips')[0][ 'ip_address'])] self.driver.init_l3(port.get('vif_name'), ip_cidrs, namespace=namespace) def _unplug_ports(self, ports, agent): for port in ports: self.driver.unplug( port.get('vif_name'), agent.int_br.br_name, self.namespace) def _get_device_details(self, port, network): dev = {'device': port['id'], 'port_id': port['id'], 'network_id': network['id'], 'network_type': network.get('network_type', 'vlan'), 'physical_network': network.get('physical_network', 'physnet'), 'segmentation_id': network.get('segmentation_id', 1), 'fixed_ips': port['fixed_ips'], 'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX, 'admin_state_up': True} return dev def assert_bridge(self, br, exists=True): self.assertEqual(exists, self.ovs.bridge_exists(br)) def assert_patch_ports(self, agent): def get_peer(port): return agent.int_br.db_get_val( 'Interface', port, 'options', check_error=True) utils.wait_until_true( lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) utils.wait_until_true( lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) def assert_bridge_ports(self): for port in [self.patch_tun, self.patch_int]: self.assertTrue(self.ovs.port_exists(port)) def assert_vlan_tags(self, ports, agent): for port in ports: res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') self.assertTrue(res) def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True): """Helper to check expected rpc call are received :param call: The call to check :param expected_devices: The device for which call is expected :param is_up: True if expected_devices are devices that are set up, False if expected_devices are devices that are set down """ if is_up: rpc_devices = [ dev for args in call.call_args_list for dev in args[0][1]] else: rpc_devices = [ dev for args in call.call_args_list for dev in args[0][2]] for dev in rpc_devices: if dev in expected_devices: expected_devices.remove(dev) # reset mock otherwise if the mock is called again the same call param # will be processed again call.reset_mock() return not expected_devices def create_test_ports(self, amount=3, **kwargs): ports = [] for x in range(amount): ports.append(self._create_test_port_dict(**kwargs)) return ports def _mock_update_device(self, context, devices_up, devices_down, agent_id, host=None, refresh_tunnels=False): dev_up = [] dev_down = [] for port in self.ports: if devices_up and port['id'] in devices_up: dev_up.append(port['id']) if devices_down and port['id'] in devices_down: dev_down.append({'device': port['id'], 'exists': True}) return {'devices_up': dev_up, 'failed_devices_up': [], 'devices_down': dev_down, 'failed_devices_down': []} def setup_agent_rpc_mocks(self, agent, unplug_ports): def mock_device_details(context, devices, agent_id, host=None, agent_restarted=False): details = [] for port in self.ports: if port['id'] in devices: dev = self._get_device_details( port, self.network) details.append(dev) ports_to_unplug = [x for x in unplug_ports if x['id'] in devices] if ports_to_unplug: self._unplug_ports(ports_to_unplug, self.agent) return {'devices': details, 'failed_devices': []} (agent.plugin_rpc.get_devices_details_list_and_failed_devices. side_effect) = mock_device_details agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) def _prepare_resync_trigger(self, agent): def mock_device_raise_exception(context, devices_up, devices_down, agent_id, host=None): agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) raise Exception('Exception to trigger resync') self.agent.plugin_rpc.update_device_list.side_effect = ( mock_device_raise_exception) def _prepare_failed_dev_up_trigger(self, agent): def mock_failed_devices_up(context, devices_up, devices_down, agent_id, host=None, refresh_tunnels=False): failed_devices = [] devices = list(devices_up) # first port fails if self.ports[0]['id'] in devices_up: # reassign side_effect so that next RPC call will succeed agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) devices.remove(self.ports[0]['id']) failed_devices.append(self.ports[0]['id']) return {'devices_up': devices, 'failed_devices_up': failed_devices, 'devices_down': [], 'failed_devices_down': []} self.agent.plugin_rpc.update_device_list.side_effect = ( mock_failed_devices_up) def _prepare_failed_dev_down_trigger(self, agent): def mock_failed_devices_down(context, devices_up, devices_down, agent_id, host=None, refresh_tunnels=False): # first port fails failed_port_id = self.ports[0]['id'] failed_devices_down = [] dev_down = [ {'device': p['id'], 'exists': True} for p in self.ports if p['id'] in devices_down and ( p['id'] != failed_port_id)] # check if it's the call to set devices down and if the device # that is supposed to fail is in the call then modify the # side_effect so that next RPC call will succeed. if devices_down and failed_port_id in devices_down: agent.plugin_rpc.update_device_list.side_effect = ( self._mock_update_device) failed_devices_down.append(failed_port_id) return {'devices_up': devices_up, 'failed_devices_up': [], 'devices_down': dev_down, 'failed_devices_down': failed_devices_down} self.agent.plugin_rpc.update_device_list.side_effect = ( mock_failed_devices_down) def wait_until_ports_state(self, ports, up, timeout=60): port_ids = [p['id'] for p in ports] utils.wait_until_true( lambda: self._expected_plugin_rpc_call( self.agent.plugin_rpc.update_device_list, port_ids, up), timeout=timeout) def setup_agent_and_ports(self, port_dicts, create_tunnels=True, ancillary_bridge=None, trigger_resync=False, failed_dev_up=False, failed_dev_down=False, network=None): self.ports = port_dicts self.agent = self.create_agent(create_tunnels=create_tunnels, ancillary_bridge=ancillary_bridge) self.agent.iter_num += 1 self.polling_manager = self.start_agent(self.agent, ports=self.ports) self.network = network or self._create_test_network_dict() if trigger_resync: self._prepare_resync_trigger(self.agent) elif failed_dev_up: self._prepare_failed_dev_up_trigger(self.agent) elif failed_dev_down: self._prepare_failed_dev_down_trigger(self.agent) self._plug_ports(self.network, self.ports, self.agent, bridge=ancillary_bridge) def plug_ports_to_phys_br(self, network, ports, namespace=None): physical_network = network.get('physical_network', 'physnet') phys_segmentation_id = network.get('segmentation_id', None) network_type = network.get('network_type', 'flat') phys_br = self.agent.phys_brs[physical_network] self._plug_ports(network, ports, self.agent, bridge=phys_br, namespace=namespace) if network_type == 'flat': # NOTE(slaweq): for OVS implementations remove the DEAD VLAN tag # on ports that belongs to flat network. DEAD VLAN tag is added # to each newly created port. This is related to lp#1767422 for port in ports: phys_br.clear_db_attribute("Port", port['vif_name'], "tag") elif phys_segmentation_id and network_type == 'vlan': for port in ports: phys_br.set_db_attribute( "Port", port['vif_name'], "tag", phys_segmentation_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/extensions/0000755000175000017500000000000000000000000025753 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/extensions/__init__.py0000644000175000017500000000000000000000000030052 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.0000644000175000017500000003516200000000000033765 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants from oslo_utils import uuidutils import testscenarios from neutron.api.rpc.callbacks.consumer import registry as consumer_reg from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.common.agents import l2_extensions from neutron.tests.functional.agent.l2 import base load_tests = testscenarios.load_tests_apply_scenarios TEST_POLICY_ID1 = "a2d72369-4246-4f19-bd3c-af51ec8d70cd" TEST_POLICY_ID2 = "46ebaec0-0570-43ac-82f6-60d2b03168c5" TEST_DSCP_MARK_1 = 14 TEST_DSCP_MARK_2 = 30 class OVSAgentQoSExtensionTestFramework(base.OVSAgentTestFramework): test_dscp_marking_rule_1 = rule.QosDscpMarkingRule( context=None, qos_policy_id=TEST_POLICY_ID1, id="9f126d84-551a-4dcf-bb01-0e9c0df0c793", dscp_mark=TEST_DSCP_MARK_1) test_dscp_marking_rule_2 = rule.QosDscpMarkingRule( context=None, qos_policy_id=TEST_POLICY_ID2, id="7f126d84-551a-4dcf-bb01-0e9c0df0c793", dscp_mark=TEST_DSCP_MARK_2) test_bw_limit_rule_1 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID1, id="5f126d84-551a-4dcf-bb01-0e9c0df0c793", max_kbps=1000, max_burst_kbps=10) test_bw_limit_rule_2 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID2, id="fa9128d9-44af-49b2-99bb-96548378ad42", max_kbps=900, max_burst_kbps=9) def setUp(self): super(OVSAgentQoSExtensionTestFramework, self).setUp() self.config.set_override('extensions', ['qos'], 'agent') self._set_pull_mock() self.set_test_qos_rules(TEST_POLICY_ID1, [self.test_bw_limit_rule_1, self.test_dscp_marking_rule_1]) self.set_test_qos_rules(TEST_POLICY_ID2, [self.test_bw_limit_rule_2, self.test_dscp_marking_rule_2]) def _set_pull_mock(self): self.qos_policies = {} def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def set_test_qos_rules(self, policy_id, policy_rules): """This function sets the policy test rules to be exposed.""" qos_policy = policy.QosPolicy( context=None, project_id=uuidutils.generate_uuid(), id=policy_id, name="Test Policy Name", description="This is a policy for testing purposes", shared=False, rules=policy_rules) qos_policy.obj_reset_changes() self.qos_policies[policy_id] = qos_policy def _create_test_port_dict(self, policy_id=None): port_dict = super(OVSAgentQoSExtensionTestFramework, self)._create_test_port_dict() port_dict['qos_policy_id'] = policy_id port_dict['network_qos_policy_id'] = None return port_dict def _get_device_details(self, port, network): dev = super(OVSAgentQoSExtensionTestFramework, self)._get_device_details(port, network) dev['qos_policy_id'] = port['qos_policy_id'] return dev def _assert_bandwidth_limit_rule_is_set(self, port, rule): if rule.direction == constants.INGRESS_DIRECTION: max_rate, burst = ( self.agent.int_br.get_ingress_bw_limit_for_port( port['vif_name'])) else: max_rate, burst = ( self.agent.int_br.get_egress_bw_limit_for_port( port['vif_name'])) self.assertEqual(max_rate, rule.max_kbps) self.assertEqual(burst, rule.max_burst_kbps) def _assert_bandwidth_limit_rule_not_set(self, port, rule_direction): if rule_direction == constants.INGRESS_DIRECTION: max_rate, burst = ( self.agent.int_br.get_ingress_bw_limit_for_port( port['vif_name'])) else: max_rate, burst = ( self.agent.int_br.get_egress_bw_limit_for_port( port['vif_name'])) self.assertIsNone(max_rate) self.assertIsNone(burst) def wait_until_bandwidth_limit_rule_applied(self, port, rule): if rule and rule.direction == constants.INGRESS_DIRECTION: l2_extensions.wait_until_ingress_bandwidth_limit_rule_applied( self.agent.int_br, port['vif_name'], rule) else: l2_extensions.wait_until_egress_bandwidth_limit_rule_applied( self.agent.int_br, port['vif_name'], rule) def _assert_dscp_marking_rule_is_set(self, port, dscp_rule): port_num = self.agent.int_br._get_port_val(port['vif_name'], 'ofport') flows = self.agent.int_br.dump_flows_for(table='0', in_port=str(port_num)) tos_mark = l2_extensions.extract_mod_nw_tos_action(flows) self.assertEqual(dscp_rule.dscp_mark << 2, tos_mark) def _assert_dscp_marking_rule_not_set(self, port): port_num = self.agent.int_br._get_port_val(port['vif_name'], 'ofport') flows = self.agent.int_br.dump_flows_for(table='0', in_port=str(port_num)) tos_mark = l2_extensions.extract_mod_nw_tos_action(flows) self.assertIsNone(tos_mark) def wait_until_dscp_marking_rule_applied(self, port, dscp_mark): l2_extensions.wait_until_dscp_marking_rule_applied_ovs( self.agent.int_br, port['vif_name'], dscp_mark) def _create_port_with_qos(self): port_dict = self._create_test_port_dict() port_dict['qos_policy_id'] = TEST_POLICY_ID1 self.setup_agent_and_ports([port_dict]) self.wait_until_ports_state(self.ports, up=True) self.wait_until_bandwidth_limit_rule_applied(port_dict, self.test_bw_limit_rule_1) return port_dict class TestOVSAgentQosExtension(OVSAgentQoSExtensionTestFramework): scenarios = [ ('ingress', {'direction': constants.INGRESS_DIRECTION}), ('egress', {'direction': constants.EGRESS_DIRECTION}) ] def setUp(self): super(TestOVSAgentQosExtension, self).setUp() self.test_bw_limit_rule_1.direction = self.direction self.test_bw_limit_rule_2.direction = self.direction @property def reverse_direction(self): if self.direction == constants.INGRESS_DIRECTION: return constants.EGRESS_DIRECTION elif self.direction == constants.EGRESS_DIRECTION: return constants.INGRESS_DIRECTION def test_port_creation_with_bandwidth_limit(self): """Make sure bandwidth limit rules are set in low level to ports.""" self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self._assert_bandwidth_limit_rule_is_set( port, self.test_bw_limit_rule_1) def test_port_creation_with_bandwidth_limits_both_directions(self): """Make sure bandwidth limit rules are set in low level to ports. This test is checking applying rules for both possible directions at once """ reverse_direction_bw_limit_rule = copy.deepcopy( self.test_bw_limit_rule_1) reverse_direction_bw_limit_rule.direction = self.reverse_direction self.qos_policies[TEST_POLICY_ID1].rules.append( reverse_direction_bw_limit_rule) self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self._assert_bandwidth_limit_rule_is_set( port, self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( port, reverse_direction_bw_limit_rule) def test_port_creation_with_different_bandwidth_limits(self): """Make sure different types of policies end on the right ports.""" port_dicts = self.create_test_ports(amount=3) port_dicts[0]['qos_policy_id'] = TEST_POLICY_ID1 port_dicts[1]['qos_policy_id'] = TEST_POLICY_ID2 self.setup_agent_and_ports(port_dicts) self.wait_until_ports_state(self.ports, up=True) self._assert_bandwidth_limit_rule_is_set(self.ports[0], self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set(self.ports[1], self.test_bw_limit_rule_2) self._assert_bandwidth_limit_rule_not_set(self.ports[2], self.direction) def test_port_creation_with_dscp_marking(self): """Make sure dscp marking rules are set in low level to ports.""" self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self._assert_dscp_marking_rule_is_set( port, self.test_dscp_marking_rule_1) def test_port_creation_with_different_dscp_markings(self): """Make sure different types of policies end on the right ports.""" port_dicts = self.create_test_ports(amount=3) port_dicts[0]['qos_policy_id'] = TEST_POLICY_ID1 port_dicts[1]['qos_policy_id'] = TEST_POLICY_ID2 self.setup_agent_and_ports(port_dicts) self.wait_until_ports_state(self.ports, up=True) self._assert_dscp_marking_rule_is_set(self.ports[0], self.test_dscp_marking_rule_1) self._assert_dscp_marking_rule_is_set(self.ports[1], self.test_dscp_marking_rule_2) self._assert_dscp_marking_rule_not_set(self.ports[2]) def test_simple_port_policy_update(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) self._assert_dscp_marking_rule_is_set(self.ports[0], self.test_dscp_marking_rule_1) policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules[0].max_kbps = 500 policy_copy.rules[0].max_burst_kbps = 5 policy_copy.rules[1].dscp_mark = TEST_DSCP_MARK_2 context = mock.Mock() consumer_reg.push(context, resources.QOS_POLICY, [policy_copy], events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(self.ports[0], policy_copy.rules[0]) self._assert_bandwidth_limit_rule_is_set(self.ports[0], policy_copy.rules[0]) self._assert_dscp_marking_rule_is_set(self.ports[0], self.test_dscp_marking_rule_2) def test_simple_port_policy_update_change_bw_limit_direction(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(amount=1, policy_id=TEST_POLICY_ID1)) self.wait_until_ports_state(self.ports, up=True) self._assert_bandwidth_limit_rule_is_set(self.ports[0], self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_not_set(self.ports[0], self.reverse_direction) policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules[0].direction = self.reverse_direction context = mock.Mock() consumer_reg.push(context, resources.QOS_POLICY, [policy_copy], events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(self.ports[0], policy_copy.rules[0]) self._assert_bandwidth_limit_rule_not_set(self.ports[0], self.direction) self._assert_bandwidth_limit_rule_is_set(self.ports[0], policy_copy.rules[0]) def test_port_qos_disassociation(self): """Test that qos_policy_id set to None will remove all qos rules from given port. """ port_dict = self._create_port_with_qos() port_dict['qos_policy_id'] = None self.agent.port_update(None, port=port_dict) self.wait_until_bandwidth_limit_rule_applied(port_dict, None) def test_port_qos_update_policy_id(self): """Test that change of qos policy id on given port refreshes all its rules. """ port_dict = self._create_port_with_qos() port_dict['qos_policy_id'] = TEST_POLICY_ID2 self.agent.port_update(None, port=port_dict) self.wait_until_bandwidth_limit_rule_applied(port_dict, self.test_bw_limit_rule_2) def test_policy_rule_delete(self): port_dict = self._create_port_with_qos() policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules = list() context = mock.Mock() consumer_reg.push(context, resources.QOS_POLICY, [policy_copy], events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(port_dict, None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/0000755000175000017500000000000000000000000023555 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/__init__.py0000644000175000017500000000000000000000000025654 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/bin/0000755000175000017500000000000000000000000024325 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/bin/__init__.py0000644000175000017500000000000000000000000026424 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/bin/cmd_keepalived_state_change.py0000755000175000017500000000135300000000000032345 0ustar00coreycorey00000000000000#!/usr/bin/python # Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron.agent.l3.keepalived_state_change import main if __name__ == "__main__": sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/0000755000175000017500000000000000000000000025754 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/__init__.py0000644000175000017500000000000000000000000030053 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3950453 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/qos/0000755000175000017500000000000000000000000026556 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/qos/__init__.py0000644000175000017500000000000000000000000030655 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/qos/test_fip_qos_extension.py0000644000175000017500000002735700000000000033741 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import exceptions from oslo_utils import uuidutils from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3.extensions.qos import fip as fip_qos from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.l3 import test_dvr_router _uuid = uuidutils.generate_uuid TEST_POLICY_ID1 = _uuid() TEST_POLICY_ID2 = _uuid() TEST_POLICY_ID3 = _uuid() class L3AgentFipQoSExtensionTestFramework(framework.L3AgentTestFramework): test_bw_limit_rule_1 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID1, id=_uuid(), max_kbps=111, max_burst_kbps=222, direction=constants.INGRESS_DIRECTION) test_bw_limit_rule_2 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID1, id=_uuid(), max_kbps=333, max_burst_kbps=444, direction=constants.EGRESS_DIRECTION) test_bw_limit_rule_3 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID2, id=_uuid(), max_kbps=555, max_burst_kbps=666, direction=constants.INGRESS_DIRECTION) test_bw_limit_rule_4 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=TEST_POLICY_ID3, id=_uuid(), max_kbps=777, max_burst_kbps=888, direction=constants.EGRESS_DIRECTION) def setUp(self): super(L3AgentFipQoSExtensionTestFramework, self).setUp() self.conf.set_override('extensions', ['fip_qos'], 'agent') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) self._set_pull_mock() self.set_test_qos_rules(TEST_POLICY_ID1, [self.test_bw_limit_rule_1, self.test_bw_limit_rule_2]) self.set_test_qos_rules(TEST_POLICY_ID2, [self.test_bw_limit_rule_3]) self.set_test_qos_rules(TEST_POLICY_ID3, [self.test_bw_limit_rule_4]) self.fip_qos_ext = fip_qos.FipQosAgentExtension() def _set_pull_mock(self): self.qos_policies = {} def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def set_test_qos_rules(self, policy_id, policy_rules): """This function sets the policy test rules to be exposed.""" qos_policy = policy.QosPolicy( context=None, project_id=_uuid(), id=policy_id, name="Test Policy Name", description="This is a policy for testing purposes", shared=False, rules=policy_rules) qos_policy.obj_reset_changes() self.qos_policies[policy_id] = qos_policy def _assert_bandwidth_limit_rule_is_set(self, router, ip, rule): device = self.fip_qos_ext._get_rate_limit_ip_device(router) tc_wrapper = self.fip_qos_ext._get_tc_wrapper(device) def get_filter_id(): try: return tc_wrapper.get_filter_id_for_ip(rule.direction, ip) except exceptions.FilterIDForIPNotFound: pass common_utils.wait_until_true(get_filter_id) def _assert_bandwidth_limit_rule_not_set(self, router, ip, rule, dvr_no_external=False): device = self.fip_qos_ext._get_rate_limit_ip_device(router) if dvr_no_external: self.assertIsNone(device) else: tc_wrapper = self.fip_qos_ext._get_tc_wrapper(device) filter_id = tc_wrapper.get_filter_id_for_ip(rule.direction, ip) self.assertIsNone(filter_id) class TestL3AgentFipQosExtension(L3AgentFipQoSExtensionTestFramework): def _test_centralized_routers(self, enable_ha=False, ingress=True, egress=True): qos_policy_id = TEST_POLICY_ID1 if ingress and not egress: qos_policy_id = TEST_POLICY_ID2 elif egress and not ingress: qos_policy_id = TEST_POLICY_ID3 router_info = self.generate_router_info( enable_ha=enable_ha, qos_policy_id=qos_policy_id) ri = self.manage_router(self.agent, router_info) if qos_policy_id == TEST_POLICY_ID1: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_2) elif qos_policy_id == TEST_POLICY_ID2: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_3) elif qos_policy_id == TEST_POLICY_ID3: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_4) def test_legacy_router_fip_qos(self): self._test_centralized_routers() def test_legacy_router_fip_qos_ingress(self): self._test_centralized_routers(ingress=True, egress=False) def test_legacy_router_fip_qos_egress(self): self._test_centralized_routers(ingress=False, egress=True) def test_ha_router_fip_qos(self): self._test_centralized_routers(enable_ha=True) def test_ha_router_fip_qos_ingress(self): self._test_centralized_routers(enable_ha=True, ingress=True, egress=False) def test_ha_router_fip_qos_egress(self): self._test_centralized_routers(enable_ha=True, ingress=False, egress=True) def _test_router_with_pf_fips_qos(self, enable_ha): router_info = self.generate_router_info( enable_ha=enable_ha, enable_pf_floating_ip=True, qos_policy_id=TEST_POLICY_ID1) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_2) def test_ha_router_with_pf_fips_qos(self): self._test_router_with_pf_fips_qos(enable_ha=True) def test_legacy_router_with_pf_fips_qos(self): self._test_router_with_pf_fips_qos(enable_ha=False) class TestL3AgentFipQosExtensionDVR( test_dvr_router.TestDvrRouter, L3AgentFipQoSExtensionTestFramework): def test_dvr_local_router_no_fip(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR router_info = self.generate_dvr_router_info( enable_floating_ip=False) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_not_set( ri, '19.4.4.2', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_not_set( ri, '19.4.4.2', self.test_bw_limit_rule_2) def _test_dvr_fip_qos(self, enable_ha=False): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR router_info = self.generate_dvr_router_info( enable_ha=enable_ha, enable_gw=True, qos_policy_id=TEST_POLICY_ID1) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_2) def test_dvr_local_router_fip_qos(self): self._test_dvr_fip_qos() def test_ha_dvr_local_router_fip_qos(self): self._test_dvr_fip_qos(enable_ha=True) def _test_agent_mode_dvr_no_external(self, enable_ha=False): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_NO_EXTERNAL router_info = self.generate_dvr_router_info( enable_ha=enable_ha, enable_floating_ip=True, enable_centralized_fip=True, enable_snat=True, snat_bound_fip=True, qos_policy_id=TEST_POLICY_ID1) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_not_set( ri, '19.4.4.2', self.test_bw_limit_rule_1, dvr_no_external=True) self._assert_bandwidth_limit_rule_not_set( ri, '19.4.4.2', self.test_bw_limit_rule_2, dvr_no_external=True) def test_dvr_no_external_no_qos(self): self._test_agent_mode_dvr_no_external() def test_ha_dvr_no_external_no_qos(self): self._test_agent_mode_dvr_no_external(enable_ha=True) def _test_dvr_fip_snat_bound_agent_mode_dvr_snat(self, enable_ha=False): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info( enable_ha=enable_ha, snat_bound_fip=True, enable_gw=True, qos_policy_id=TEST_POLICY_ID1) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.2', self.test_bw_limit_rule_2) def test_dvr_dvr_fip_snat_qos(self): self._test_dvr_fip_snat_bound_agent_mode_dvr_snat() def test_ha_dvr_dvr_fip_snat_qos(self): self._test_dvr_fip_snat_bound_agent_mode_dvr_snat(enable_ha=True) def _assert_dvr_snat_qrouter_ns_rule_is_set(self, device, ip, rule): tc_wrapper = self.fip_qos_ext._get_tc_wrapper(device) def get_filter_id(): try: return tc_wrapper.get_filter_id_for_ip(rule.direction, ip) except exceptions.FilterIDForIPNotFound: pass common_utils.wait_until_true(get_filter_id) def test_dvr_snat_qos_rules_set_in_qrouter(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info( enable_snat=True, enable_gw=True, enable_floating_ip=True, qos_policy_id=TEST_POLICY_ID1) ri = self.manage_router(self.agent, router_info) gw_port = ri.get_ex_gw_port() rfp_dev_name = ri.get_external_device_interface_name(gw_port) if ri.router_namespace.exists(): dvr_fip_device = ip_lib.IPDevice( rfp_dev_name, namespace=ri.ns_name) self._assert_dvr_snat_qrouter_ns_rule_is_set( dvr_fip_device, '19.4.4.2', self.test_bw_limit_rule_1) self._assert_dvr_snat_qrouter_ns_rule_is_set( dvr_fip_device, '19.4.4.2', self.test_bw_limit_rule_2) class LinuxBridgeL3AgentFipQosExtensionTestCase(TestL3AgentFipQosExtension): INTERFACE_DRIVER = 'neutron.agent.linux.interface.BridgeInterfaceDriver' ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_conntrack_helper_extension.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_conntrack_helper_extensi0000644000175000017500000001355000000000000034022 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron_lib import constants from oslo_utils import uuidutils from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3.extensions import conntrack_helper from neutron.agent.linux import iptables_manager as iptable_mng from neutron.common import utils as common_utils from neutron.objects import conntrack_helper as cth_obj from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.l3 import test_dvr_router class L3AgentConntrackHelperExtensionTestFramework( framework.L3AgentTestFramework): def setUp(self): super(L3AgentConntrackHelperExtensionTestFramework, self).setUp() self.conf.set_override('extensions', ['conntrack_helper'], 'agent') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) self.cth_ext = conntrack_helper.ConntrackHelperAgentExtension() self.router_id1 = uuidutils.generate_uuid() self.router_id2 = uuidutils.generate_uuid() self.conntrackhelper1 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=69, helper='tftp', router_id=self.router_id1) self.conntrackhelper2 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='tcp', port=21, helper='ftp', router_id=self.router_id2) self.conntrack_helpers = [self.conntrackhelper1, self.conntrackhelper2] self.managed_cths = {} self.managed_cths[self.conntrackhelper1.id] = self.conntrackhelper1 self.managed_cths[self.conntrackhelper2.id] = self.conntrackhelper2 self.router_cth_map = collections.defaultdict(set) self.router_cth_map[self.router_id1].add(self.conntrackhelper1.id) self.router_cth_map[self.router_id2].add(self.conntrackhelper2.id) self._set_bulk_poll_mock() def _set_bulk_poll_mock(self): def _bulk_pull_mock(context, resource_type, filter_kwargs=None): if 'router_id' in filter_kwargs: result = [] for cthobj in self.conntrack_helpers: if cthobj.router_id in filter_kwargs['router_id']: result.append(cthobj) return result return self.conntrack_helpers self.bulk_pull = mock.patch('neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.bulk_pull').start() self.bulk_pull.side_effect = _bulk_pull_mock def _assert_conntrack_helper_iptables_is_set(self, router_info, cth): iptables_manager = self.cth_ext._get_iptables_manager(router_info) tag = conntrack_helper.CONNTRACK_HELPER_PREFIX + cth.id chain_name = (conntrack_helper.CONNTRACK_HELPER_CHAIN_PREFIX + cth.id)[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP] rule = ('-p %s --dport %s -j CT --helper %s' % (cth.protocol, cth.port, cth.helper)) rule_obj = iptable_mng.IptablesRule(chain_name, rule, True, False, iptables_manager.wrap_name, tag, None) def check_chain_rules_set(): existing_ipv4_chains = iptables_manager.ipv4['raw'].chains existing_ipv6_chains = iptables_manager.ipv6['raw'].chains if (chain_name not in existing_ipv4_chains or chain_name not in existing_ipv6_chains): return False existing_ipv4_rules = iptables_manager.ipv4['raw'].rules existing_ipv6_rules = iptables_manager.ipv6['raw'].rules return (rule_obj in existing_ipv4_rules and rule_obj in existing_ipv6_rules) common_utils.wait_until_true(check_chain_rules_set) def _test_centralized_routers(self, router_info): router_id = router_info['id'] for cthobj in self.conntrack_helpers: cthobj.router_id = router_id router_info['managed_conntrack_helpers'] = self.managed_cths router_info['router_conntrack_helper_mapping'] = self.router_cth_map ri = self.manage_router(self.agent, router_info) for cthobj in self.conntrack_helpers: self._assert_conntrack_helper_iptables_is_set(ri, cthobj) class TestL3AgentConntrackHelperExtension( test_dvr_router.DvrRouterTestFramework, L3AgentConntrackHelperExtensionTestFramework): def test_legacy_router_conntrack_helper(self): router_info = self.generate_router_info(enable_ha=False) self._test_centralized_routers(router_info) def test_ha_router_conntrack_helper(self): router_info = self.generate_router_info(enable_ha=True) self._test_centralized_routers(router_info) def test_dvr_edge_router(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info(enable_ha=False) self._test_centralized_routers(router_info) def test_dvr_ha_router(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info(enable_ha=True) self._test_centralized_routers(router_info) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_gateway_ip_qos_extension.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_gateway_ip_qos_extension0000644000175000017500000001721100000000000034047 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import exceptions from oslo_utils import uuidutils from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3.extensions.qos import gateway_ip as gateway_ip_qos from neutron.common import utils as common_utils from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.l3 import test_dvr_router _uuid = uuidutils.generate_uuid INGRESS_EGRESS_POLICY_ID = _uuid() INGRESS_POLICY_ID = _uuid() EGRESS_POLICY_ID = _uuid() class RouterGatewayIPQosAgentExtensionTestFramework( framework.L3AgentTestFramework): test_bw_limit_rule_1 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=INGRESS_EGRESS_POLICY_ID, id=_uuid(), max_kbps=111, max_burst_kbps=222, direction=constants.INGRESS_DIRECTION) test_bw_limit_rule_2 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=INGRESS_EGRESS_POLICY_ID, id=_uuid(), max_kbps=333, max_burst_kbps=444, direction=constants.EGRESS_DIRECTION) test_bw_limit_rule_3 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=INGRESS_POLICY_ID, id=_uuid(), max_kbps=555, max_burst_kbps=666, direction=constants.INGRESS_DIRECTION) test_bw_limit_rule_4 = rule.QosBandwidthLimitRule( context=None, qos_policy_id=EGRESS_POLICY_ID, id=_uuid(), max_kbps=777, max_burst_kbps=888, direction=constants.EGRESS_DIRECTION) def setUp(self): super(RouterGatewayIPQosAgentExtensionTestFramework, self).setUp() self.conf.set_override('extensions', ['gateway_ip_qos'], 'agent') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) self._set_pull_mock() self.set_test_qos_rules(INGRESS_EGRESS_POLICY_ID, [self.test_bw_limit_rule_1, self.test_bw_limit_rule_2]) self.set_test_qos_rules(INGRESS_POLICY_ID, [self.test_bw_limit_rule_3]) self.set_test_qos_rules(EGRESS_POLICY_ID, [self.test_bw_limit_rule_4]) self.gateway_ip_qos_ext = ( gateway_ip_qos.RouterGatewayIPQosAgentExtension()) def _set_pull_mock(self): self.qos_policies = {} def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def set_test_qos_rules(self, policy_id, policy_rules): """This function sets the policy test rules to be exposed.""" qos_policy = policy.QosPolicy( context=None, project_id=_uuid(), id=policy_id, name="Test Policy Name", description="This is a policy for testing purposes", shared=False, rules=policy_rules) qos_policy.obj_reset_changes() self.qos_policies[policy_id] = qos_policy def _assert_bandwidth_limit_rule_is_set(self, router, ip, rule): ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_name(ex_gw_port['id']) device = self.gateway_ip_qos_ext._get_gateway_tc_rule_device( router, interface_name) tc_wrapper = self.gateway_ip_qos_ext._get_tc_wrapper(device) def get_filter_id(): try: return tc_wrapper.get_filter_id_for_ip(rule.direction, ip) except exceptions.FilterIDForIPNotFound: pass common_utils.wait_until_true(get_filter_id) class TestRouterGatewayIPQosAgentExtension( RouterGatewayIPQosAgentExtensionTestFramework): def _test_centralized_routers(self, enable_ha=False, ingress=True, egress=True): qos_policy_id = INGRESS_EGRESS_POLICY_ID if ingress and not egress: qos_policy_id = INGRESS_POLICY_ID elif egress and not ingress: qos_policy_id = EGRESS_POLICY_ID router_info = self.generate_router_info( enable_ha=enable_ha, qos_policy_id=qos_policy_id) ri = self.manage_router(self.agent, router_info) if qos_policy_id == INGRESS_EGRESS_POLICY_ID: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_2) elif qos_policy_id == INGRESS_POLICY_ID: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_3) elif qos_policy_id == EGRESS_POLICY_ID: self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_4) def test_legacy_router_gateway_ip_qos(self): self._test_centralized_routers() def test_legacy_router_gateway_ip_qos_ingress(self): self._test_centralized_routers(ingress=True, egress=False) def test_legacy_router_gateway_ip_qos_egress(self): self._test_centralized_routers(ingress=False, egress=True) def test_ha_router_gateway_ip_qos(self): self._test_centralized_routers(enable_ha=True) def test_ha_router_gateway_ip_qos_ingress(self): self._test_centralized_routers(enable_ha=True, ingress=True, egress=False) def test_ha_router_gateway_ip_qos_egress(self): self._test_centralized_routers(enable_ha=True, ingress=False, egress=True) class TestRouterGatewayIPQosAgentExtensionDVR( test_dvr_router.TestDvrRouter, RouterGatewayIPQosAgentExtensionTestFramework): def _test_dvr_gateway_ip_qos(self, enable_ha=False): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info( enable_ha=enable_ha, enable_snat=True, enable_gw=True, qos_policy_id=INGRESS_EGRESS_POLICY_ID) ri = self.manage_router(self.agent, router_info) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_1) self._assert_bandwidth_limit_rule_is_set( ri, '19.4.4.4', self.test_bw_limit_rule_2) def test_dvr_edge_router_gateway_ip_qos(self): self._test_dvr_gateway_ip_qos() def test_ha_dvr_edge_router_gateway_ip_qos(self): self._test_dvr_gateway_ip_qos(enable_ha=True) class LinuxBridgeRouterGatewayIPQosAgentExtensionTestCase( TestRouterGatewayIPQosAgentExtension): INTERFACE_DRIVER = 'neutron.agent.linux.interface.BridgeInterfaceDriver' ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_port_forwarding_extension.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/extensions/test_port_forwarding_extensio0000644000175000017500000002026600000000000034070 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from neutron_lib import constants from oslo_utils import uuidutils import mock from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3.extensions import port_forwarding as pf from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager as iptable_mng from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.objects import port_forwarding as pf_obj from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.l3 import test_dvr_router _uuid = uuidutils.generate_uuid class L3AgentFipPortForwardingExtensionTestFramework( framework.L3AgentTestFramework): def setUp(self): super(L3AgentFipPortForwardingExtensionTestFramework, self).setUp() self.conf.set_override('extensions', ['port_forwarding'], 'agent') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) self.fip_pf_ext = pf.PortForwardingAgentExtension() self.fip_id1 = _uuid() self.fip_id2 = _uuid() self.fip_id3 = _uuid() self.portforwarding1 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.fip_id1, external_port=1111, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.1', internal_port=11111, floating_ip_address='111.111.111.111', router_id=_uuid()) self.portforwarding2 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.fip_id1, external_port=1112, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.2', internal_port=11112, floating_ip_address='111.111.111.111', router_id=_uuid()) self.portforwarding3 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.fip_id2, external_port=1113, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.3', internal_port=11113, floating_ip_address='111.222.111.222', router_id=_uuid()) self.portforwarding4 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.fip_id3, external_port=2222, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='2.2.2.2', internal_port=22222, floating_ip_address='222.222.222.222', router_id=_uuid()) self.port_forwardings = [self.portforwarding1, self.portforwarding2, self.portforwarding3, self.portforwarding4] self._set_bulk_pull_mock() self.managed_fips = [self.fip_id1, self.fip_id2, self.fip_id3] self.fip_list_for_pf = ['111.111.111.111/32', '111.222.111.222/32', '222.222.222.222/32'] def _set_bulk_pull_mock(self): def _bulk_pull_mock(context, resource_type, filter_kwargs=None): if 'floatingip_id' in filter_kwargs: result = [] for pfobj in self.port_forwardings: if pfobj.floatingip_id in filter_kwargs['floatingip_id']: result.append(pfobj) return result return self.port_forwardings self.bulk_pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.bulk_pull').start() self.bulk_pull.side_effect = _bulk_pull_mock def _assert_port_forwarding_fip_is_set(self, router_info, pf_fip): (interface_name, namespace, iptables_manager) = self.fip_pf_ext._get_resource_by_router( router_info) device = ip_lib.IPDevice(interface_name, namespace=namespace) pf_fip_cidr = str(pf_fip) + '/32' def check_existing_cidrs(): existing_cidrs = router_info.get_router_cidrs(device) return pf_fip_cidr in existing_cidrs common_utils.wait_until_true(check_existing_cidrs) def _assert_port_forwarding_iptables_is_set(self, router_info, pf): (interface_name, namespace, iptables_manager) = self.fip_pf_ext._get_resource_by_router( router_info) chain_rule = self.fip_pf_ext._get_fip_rules( pf, iptables_manager.wrap_name)[1] chain_name = chain_rule[0] rule = chain_rule[1] rule_tag = 'fip_portforwarding-' + pf.id rule_obj = iptable_mng.IptablesRule( chain_name, rule, True, False, iptables_manager.wrap_name, rule_tag, None) def check_chain_rules_set(): existing_chains = iptables_manager.ipv4['nat'].chains if chain_name not in existing_chains: return False existing_rules = iptables_manager.ipv4['nat'].rules return rule_obj in existing_rules common_utils.wait_until_true(check_chain_rules_set) def _assert_harouter_fip_is_set(self, router_info, fip_pf): (interface_name, namespace, iptables_manager) = self.fip_pf_ext._get_resource_by_router( router_info) keepalived_pm = router_info.keepalived_manager.get_process() utils.get_conf_file_name(keepalived_pm.pids_path, keepalived_pm.uuid, keepalived_pm.service_pid_fname) conf_path = os.path.join(keepalived_pm.pids_path, keepalived_pm.uuid, 'keepalived.conf') regex = "%s dev %s" % (fip_pf, interface_name) pattern = re.compile(regex) def check_harouter_fip_is_set(): if re.findall(pattern, utils.get_value_from_file(conf_path)): return True return False common_utils.wait_until_true(check_harouter_fip_is_set) def _test_centralized_routers(self, router_info, enable_ha=False): router_id = router_info['id'] for pfobj in self.port_forwardings: pfobj.router_id = router_id router_info['fip_managed_by_port_forwardings'] = self.managed_fips router_info['port_forwardings_fip_set'] = set(self.fip_list_for_pf) ri = self.manage_router(self.agent, router_info) for pfobj in self.port_forwardings: self._assert_port_forwarding_fip_is_set(ri, pfobj.floating_ip_address) self._assert_port_forwarding_iptables_is_set(ri, pfobj) if enable_ha: for fip_pf in self.fip_list_for_pf: self._assert_harouter_fip_is_set(ri, fip_pf) class TestL3AgentFipPortForwardingExtension( L3AgentFipPortForwardingExtensionTestFramework): def test_legacy_router_fip_portforwarding(self): router_info = self.generate_router_info(enable_ha=False) self._test_centralized_routers(router_info, enable_ha=False) def test_ha_router_fip_portforwarding(self): router_info = self.generate_router_info(enable_ha=True) self._test_centralized_routers(router_info, enable_ha=True) class TestL3AgentFipPortForwardingExtensionDVR( test_dvr_router.TestDvrRouter, L3AgentFipPortForwardingExtensionTestFramework): def test_dvr_edge_router(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info(enable_ha=False) self._test_centralized_routers(router_info, enable_ha=False) def test_dvr_ha_router(self): self.agent.conf.agent_mode = constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info(enable_ha=True) self._test_centralized_routers(router_info, enable_ha=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/framework.py0000644000175000017500000007470600000000000026142 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import textwrap import mock import netaddr from neutron_lib import constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import testtools from neutron.agent.common import ovs_lib from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as l3_router_info from neutron.agent import l3_agent as l3_agent_main from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import keepalived from neutron.agent.metadata import driver as metadata_driver from neutron.common import utils as common_utils from neutron.conf.agent import common as agent_config from neutron.conf import common as common_config from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional import base _uuid = uuidutils.generate_uuid OVS_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver' def get_ovs_bridge(br_name): return ovs_lib.OVSBridge(br_name) class L3AgentTestFramework(base.BaseSudoTestCase): INTERFACE_DRIVER = OVS_INTERFACE_DRIVER NESTED_NAMESPACE_SEPARATOR = '@' def setUp(self): super(L3AgentTestFramework, self).setUp() self.mock_plugin_api = mock.patch( 'neutron.agent.l3.agent.L3PluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.conf = self._configure_agent('agent1') self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1', self.conf) def _get_config_opts(self): config = cfg.ConfigOpts() config.register_opts(common_config.core_opts) config.register_opts(common_config.core_cli_opts) logging.register_options(config) agent_config.register_process_monitor_opts(config) agent_config.register_root_helper(config) return config def _configure_agent(self, host, agent_mode='dvr_snat'): conf = self._get_config_opts() l3_agent_main.register_opts(conf) conf.set_override('interface_driver', self.INTERFACE_DRIVER) br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge conf.set_override('integration_bridge', br_int.br_name, 'OVS') temp_dir = self.get_new_temp_dir() get_temp_file_path = functools.partial(self.get_temp_file_path, root=temp_dir) conf.set_override('state_path', temp_dir.path) conf.set_override('log_file', get_temp_file_path('log_file')) conf.set_override('metadata_proxy_socket', get_temp_file_path('metadata_proxy')) conf.set_override('ha_confs_path', get_temp_file_path('ha_confs')) conf.set_override('external_pids', get_temp_file_path('external/pids')) conf.set_override('host', host) conf.set_override('agent_mode', agent_mode) return conf def _get_agent_ovs_integration_bridge(self, agent): return get_ovs_bridge(agent.conf.OVS.integration_bridge) def generate_router_info(self, enable_ha, ip_version=constants.IP_VERSION_4, extra_routes=True, enable_fip=True, enable_snat=True, num_internal_ports=1, dual_stack=False, enable_gw=True, v6_ext_gw_with_sub=True, enable_pf_floating_ip=False, qos_policy_id=None): if ip_version == constants.IP_VERSION_6 and not dual_stack: enable_snat = False enable_fip = False extra_routes = False return l3_test_common.prepare_router_data(ip_version=ip_version, enable_snat=enable_snat, num_internal_ports=( num_internal_ports), enable_floating_ip=( enable_fip), enable_ha=enable_ha, extra_routes=extra_routes, dual_stack=dual_stack, enable_gw=enable_gw, v6_ext_gw_with_sub=( v6_ext_gw_with_sub), enable_pf_floating_ip=( enable_pf_floating_ip), qos_policy_id=qos_policy_id) def change_router_state(self, router_id, state): ri = self.agent.router_info.get(router_id) if not ri: self.fail('Router %s is not present in the L3 agent' % router_id) ri.ha_state = state def _test_conntrack_disassociate_fip(self, ha): '''Test that conntrack immediately drops stateful connection that uses floating IP once it's disassociated. ''' router_info = self.generate_router_info(enable_ha=ha) router = self.manage_router(self.agent, router_info) port = net_helpers.get_free_namespace_port( constants.PROTO_NAME_TCP, router.ns_name) client_address = '19.4.4.3' server_address = '35.4.0.4' def clean_fips(router): router.router[constants.FLOATINGIP_KEY] = [] clean_fips(router) self._add_fip(router, client_address, fixed_address=server_address) router.process() router_ns = ip_lib.IPWrapper(namespace=router.ns_name) netcat = net_helpers.NetcatTester( router.ns_name, router.ns_name, client_address, port, protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) def assert_num_of_conntrack_rules(n): out = router_ns.netns.execute(["conntrack", "-L", "--orig-src", client_address]) self.assertEqual( n, len([line for line in out.strip().split('\n') if line])) if ha: common_utils.wait_until_true(lambda: router.ha_state == 'master') with self.assert_max_execution_time(100): assert_num_of_conntrack_rules(0) self.assertTrue(netcat.test_connectivity()) assert_num_of_conntrack_rules(1) clean_fips(router) router.process() assert_num_of_conntrack_rules(0) with testtools.ExpectedException(RuntimeError): netcat.test_connectivity() def _test_update_floatingip_statuses(self, router_info): router = self.manage_router(self.agent, router_info) rpc = self.agent.plugin_rpc.update_floatingip_statuses self.assertTrue(rpc.called) # Assert that every defined FIP is updated via RPC expected_fips = set([ (fip['id'], constants.FLOATINGIP_STATUS_ACTIVE) for fip in router.router[constants.FLOATINGIP_KEY]]) call = [args[0] for args in rpc.call_args_list][0] actual_fips = set( [(fip_id, status) for fip_id, status in call[2].items()]) self.assertEqual(expected_fips, actual_fips) def _gateway_check(self, gateway_ip, external_device): expected_gateway = gateway_ip ip_vers = netaddr.IPAddress(expected_gateway).version existing_gateway = external_device.route.get_gateway( ip_version=ip_vers).get('via') self.assertEqual(expected_gateway, existing_gateway) def _assert_ha_device(self, router): def ha_router_dev_name_getter(not_used): return router.get_ha_device_name() self.assertTrue(self.device_exists_with_ips_and_mac( router.router[constants.HA_INTERFACE_KEY], ha_router_dev_name_getter, router.ns_name)) def _assert_gateway(self, router, v6_ext_gw_with_sub=True): external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) external_device = ip_lib.IPDevice(external_device_name, namespace=router.ns_name) for subnet in external_port['subnets']: self._gateway_check(subnet['gateway_ip'], external_device) if not v6_ext_gw_with_sub: self._gateway_check(self.agent.conf.ipv6_gateway, external_device) def _check_external_device(self, router): external_port = router.get_ex_gw_port() return (self.device_exists_with_ips_and_mac( external_port, router.get_external_device_name, router.ns_name)) def _assert_external_device(self, router): self.assertTrue(self._check_external_device(router)) def _wait_until_ipv6_accept_ra_has_state( self, ns_name, device_name, enabled): ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) def _ipv6_accept_ra_state(): ra_state = ip_wrapper.netns.execute(['sysctl', '-b', 'net.ipv6.conf.%s.accept_ra' % device_name]) return ( enabled == (int(ra_state) != constants.ACCEPT_RA_DISABLED)) common_utils.wait_until_true(_ipv6_accept_ra_state) def _assert_ipv6_accept_ra(self, router, enabled=True): external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self._wait_until_ipv6_accept_ra_has_state( router.ns_name, external_device_name, enabled) def _wait_until_ipv6_forwarding_has_state(self, ns_name, dev_name, state): def _ipv6_forwarding_has_state(): return ip_lib.get_ipv6_forwarding( device=dev_name, namespace=ns_name) == state common_utils.wait_until_true(_ipv6_forwarding_has_state) def _assert_ipv6_forwarding(self, router, enabled=True, all_enabled=True): external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, int(enabled)) self._wait_until_ipv6_forwarding_has_state( router.ns_name, 'all', int(all_enabled)) def _router_lifecycle(self, enable_ha, ip_version=constants.IP_VERSION_4, dual_stack=False, v6_ext_gw_with_sub=True, router_info=None): router_info = router_info or self.generate_router_info( enable_ha, ip_version, dual_stack=dual_stack, v6_ext_gw_with_sub=(v6_ext_gw_with_sub)) return_copy = copy.deepcopy(router_info) router = self.manage_router(self.agent, router_info) # Add multiple-IPv6-prefix internal router port slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} subnet_modes = [slaac_mode] * 2 self._add_internal_interface_by_subnet(router.router, count=2, ip_version=constants.IP_VERSION_6, ipv6_subnet_modes=subnet_modes) router.process() if enable_ha: common_utils.wait_until_true(lambda: router.ha_state == 'master') # Keepalived notifies of a state transition when it starts, # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. device = router.router[constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, router.get_internal_device_name, router.ns_name) common_utils.wait_until_true(device_exists) self.assertTrue(self._namespace_exists(router.ns_name)) common_utils.wait_until_true( lambda: self._metadata_proxy_exists(self.agent.conf, router)) self._assert_internal_devices(router) self._assert_external_device(router) if not (enable_ha and (ip_version == constants.IP_VERSION_6 or dual_stack)): # Note(SridharG): enable the assert_gateway for IPv6 once # keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional # platform) is updated to 1.2.10 (or above). # For more details: https://review.opendev.org/#/c/151284/ self._assert_gateway(router, v6_ext_gw_with_sub) self.assertTrue(self.floating_ips_configured(router)) self._assert_snat_chains(router) self._assert_floating_ip_chains(router) self._assert_iptables_rules_converged(router) self._assert_extra_routes(router) if (ip_version == constants.IP_VERSION_6 or dual_stack): ip_versions = [constants.IP_VERSION_4, constants.IP_VERSION_6] else: ip_versions = [constants.IP_VERSION_4] self._assert_onlink_subnet_routes(router, ip_versions) self._assert_metadata_chains(router) # Verify router gateway interface is configured to receive Router Advts # when IPv6 is enabled and no IPv6 gateway is configured. if router.use_ipv6 and not v6_ext_gw_with_sub: if not self.agent.conf.ipv6_gateway: self._assert_ipv6_accept_ra(router) if enable_ha: self._assert_ha_device(router) common_utils.wait_until_true( lambda: router.keepalived_manager.get_process().active, timeout=15) self._delete_router(self.agent, router.router_id) self._assert_interfaces_deleted_from_ovs() self._assert_router_does_not_exist(router) if enable_ha: common_utils.wait_until_true( lambda: not router.keepalived_manager.get_process().active, timeout=15) return return_copy def manage_router(self, agent, router): self.addCleanup(agent._safe_router_removed, router['id']) # NOTE(mangelajo): Neutron functional for l3 don't rely on openvswitch # agent tagging ports, and all ports remain untagged # during test execution. # Workaround related to lp#1767422 plugs new ports as # dead vlan (4095) to avoid issues, we need to remove # such tag during functional l3 testing. original_plug_new = interface.OVSInterfaceDriver.plug_new def new_ovs_plug(self, *args, **kwargs): original_plug_new(self, *args, **kwargs) bridge = (kwargs.get('bridge') or args[4] or self.conf.OVS.integration_bridge) device_name = kwargs.get('device_name') or args[2] ovsbr = ovs_lib.OVSBridge(bridge) ovsbr.clear_db_attribute('Port', device_name, 'tag') with mock.patch(OVS_INTERFACE_DRIVER + '.plug_new', autospec=True) as ( ovs_plug): ovs_plug.side_effect = new_ovs_plug agent._process_added_router(router) return agent.router_info[router['id']] def _delete_router(self, agent, router_id): agent._safe_router_removed(router_id) def _add_fip(self, router, fip_address, fixed_address='10.0.0.2', host=None, fixed_ip_address_scope=None): fip = {'id': _uuid(), 'port_id': _uuid(), 'floating_ip_address': fip_address, 'fixed_ip_address': fixed_address, 'host': host, 'fixed_ip_address_scope': fixed_ip_address_scope} router.router[constants.FLOATINGIP_KEY].append(fip) def _add_internal_interface_by_subnet(self, router, count=1, ip_version=constants.IP_VERSION_4, ipv6_subnet_modes=None, interface_id=None): return l3_test_common.router_append_subnet(router, count, ip_version, ipv6_subnet_modes, interface_id) def _namespace_exists(self, namespace): return ip_lib.network_namespace_exists(namespace) def _metadata_proxy_exists(self, conf, router): pm = external_process.ProcessManager( conf, router.router_id, router.ns_name, service=metadata_driver.HAPROXY_SERVICE) return pm.active def device_exists_with_ips_and_mac(self, expected_device, name_getter, namespace): ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips']) return ip_lib.device_exists_with_ips_and_mac( name_getter(expected_device['id']), ip_cidrs, expected_device['mac_address'], namespace) @staticmethod def _port_first_ip_cidr(port): fixed_ip = port['fixed_ips'][0] return common_utils.ip_to_cidr(fixed_ip['ip_address'], fixed_ip['prefixlen']) def get_device_mtu(self, target_device, name_getter, namespace): device = ip_lib.IPDevice(name_getter(target_device), namespace) return device.link.mtu def get_expected_keepalive_configuration(self, router): ha_device_name = router.get_ha_device_name() external_port = router.get_ex_gw_port() ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address']) ex_device_name = router.get_external_device_name( external_port['id']) external_device_cidr = self._port_first_ip_cidr(external_port) internal_port = router.router[constants.INTERFACE_KEY][0] int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address']) internal_device_name = router.get_internal_device_name( internal_port['id']) internal_device_cidr = self._port_first_ip_cidr(internal_port) floating_ip_cidr = common_utils.ip_to_cidr( router.get_floating_ips()[0]['floating_ip_address']) default_gateway_ip = external_port['subnets'][0].get('gateway_ip') extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr') return textwrap.dedent("""\ global_defs { notification_email_from %(email_from)s router_id %(router_id)s } vrrp_instance VR_1 { state BACKUP interface %(ha_device_name)s virtual_router_id 1 priority 50 garp_master_delay 60 nopreempt advert_int 2 track_interface { %(ha_device_name)s } virtual_ipaddress { 169.254.0.1/24 dev %(ha_device_name)s } virtual_ipaddress_excluded { %(floating_ip_cidr)s dev %(ex_device_name)s %(external_device_cidr)s dev %(ex_device_name)s %(internal_device_cidr)s dev %(internal_device_name)s %(ex_port_ipv6)s dev %(ex_device_name)s scope link %(int_port_ipv6)s dev %(internal_device_name)s scope link } virtual_routes { 0.0.0.0/0 via %(default_gateway_ip)s dev %(ex_device_name)s 8.8.8.0/24 via 19.4.4.4 %(extra_subnet_cidr)s dev %(ex_device_name)s scope link } }""") % { 'email_from': keepalived.KEEPALIVED_EMAIL_FROM, 'router_id': keepalived.KEEPALIVED_ROUTER_ID, 'ha_device_name': ha_device_name, 'ex_device_name': ex_device_name, 'external_device_cidr': external_device_cidr, 'internal_device_name': internal_device_name, 'internal_device_cidr': internal_device_cidr, 'floating_ip_cidr': floating_ip_cidr, 'default_gateway_ip': default_gateway_ip, 'int_port_ipv6': int_port_ipv6, 'ex_port_ipv6': ex_port_ipv6, 'extra_subnet_cidr': extra_subnet_cidr, } def _get_rule(self, iptables_manager, table, chain, predicate): rules = iptables_manager.get_chain(table, chain) result = next(rule for rule in rules if predicate(rule)) return result def _assert_router_does_not_exist(self, router): # If the namespace assertion succeeds # then the devices and iptable rules have also been deleted, # so there's no need to check that explicitly. self.assertFalse(self._namespace_exists(router.ns_name)) common_utils.wait_until_true( lambda: not self._metadata_proxy_exists(self.agent.conf, router), timeout=10) def _assert_snat_chains(self, router): self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'snat')) self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'POSTROUTING')) def _assert_floating_ip_chains(self, router, snat_bound_fip=False): if snat_bound_fip: self.assertFalse(router.snat_iptables_manager.is_chain_empty( 'nat', 'float-snat')) self.assertFalse(router.iptables_manager.is_chain_empty( 'nat', 'float-snat')) def _assert_iptables_rules_converged(self, router): # if your code is failing on this line, it means you are not generating # your iptables rules in the same format that iptables-save returns # them. run iptables-save to see the format they should be in self.assertFalse(router.iptables_manager.apply()) def _assert_metadata_chains(self, router): metadata_port_filter = lambda rule: ( str(self.agent.conf.metadata_port) in rule.rule) self.assertTrue(self._get_rule(router.iptables_manager, 'nat', 'PREROUTING', metadata_port_filter)) self.assertTrue(self._get_rule(router.iptables_manager, 'filter', 'INPUT', metadata_port_filter)) def _assert_internal_devices(self, router): internal_devices = router.router[constants.INTERFACE_KEY] self.assertGreater(len(internal_devices), 0) for device in internal_devices: self.assertTrue(self.device_exists_with_ips_and_mac( device, router.get_internal_device_name, router.ns_name)) def _assert_extra_routes(self, router, namespace=None): if namespace is None: namespace = router.ns_name routes = ip_lib.get_routing_table(4, namespace=namespace) routes = [{'nexthop': route['nexthop'], 'destination': route['destination']} for route in routes] for extra_route in router.router['routes']: self.assertIn(extra_route, routes) def _assert_onlink_subnet_routes( self, router, ip_versions, namespace=None): ns_name = namespace or router.ns_name routes = [] for ip_version in ip_versions: _routes = ip_lib.get_routing_table(ip_version, namespace=ns_name) routes.extend(_routes) routes = set(route['destination'] for route in routes) extra_subnets = router.get_ex_gw_port()['extra_subnets'] for extra_subnet in (route['cidr'] for route in extra_subnets): self.assertIn(extra_subnet, routes) def _assert_interfaces_deleted_from_ovs(self): bridge = ovs_lib.OVSBridge(self.agent.conf.OVS.integration_bridge) self.assertFalse(bridge.get_port_name_list()) def floating_ips_configured(self, router): floating_ips = router.router[constants.FLOATINGIP_KEY] external_port = router.get_ex_gw_port() return len(floating_ips) and all( ip_lib.device_exists_with_ips_and_mac( router.get_external_device_name(external_port['id']), ['%s/32' % fip['floating_ip_address']], external_port['mac_address'], namespace=router.ns_name) for fip in floating_ips) def _create_router(self, router_info, agent): ns_name = "%s%s%s" % ( 'qrouter-' + router_info['id'], self.NESTED_NAMESPACE_SEPARATOR, agent.host) ext_name = "qg-%s-%s" % (agent.host, _uuid()[-4:]) int_name = "qr-%s-%s" % (agent.host, _uuid()[-4:]) get_ns_name = mock.patch.object( namespaces.RouterNamespace, '_get_ns_name').start() get_ns_name.return_value = ns_name get_ext_name = mock.patch.object(l3_router_info.RouterInfo, 'get_external_device_name').start() get_ext_name.return_value = ext_name get_int_name = mock.patch.object(l3_router_info.RouterInfo, 'get_internal_device_name').start() get_int_name.return_value = int_name router = self.manage_router(agent, router_info) router_ext_name = mock.patch.object(router, 'get_external_device_name').start() router_ext_name.return_value = get_ext_name.return_value router_int_name = mock.patch.object(router, 'get_internal_device_name').start() router_int_name.return_value = get_int_name.return_value return router def create_ha_routers(self): router_info = self.generate_router_info(enable_ha=True) router1 = self._create_router(router_info, self.agent) self._add_fip(router1, '192.168.111.12') r1_br = ip_lib.IPDevice(router1.driver.conf.OVS.integration_bridge) r1_br.addr.add('19.4.4.1/24') r1_br.link.set_up() router_info_2 = copy.deepcopy(router_info) router_info_2[constants.HA_INTERFACE_KEY] = ( l3_test_common.get_ha_interface(ip='169.254.192.2', mac='22:22:22:22:22:22')) router2 = self._create_router(router_info_2, self.failover_agent) r2_br = ip_lib.IPDevice(router2.driver.conf.OVS.integration_bridge) r2_br.addr.add('19.4.4.1/24') r2_br.link.set_up() return (router1, router2) def _get_master_and_slave_routers(self, router1, router2, check_external_device=True): try: common_utils.wait_until_true( lambda: router1.ha_state == 'master') if check_external_device: common_utils.wait_until_true( lambda: self._check_external_device(router1)) master_router = router1 slave_router = router2 except common_utils.WaitTimeout: common_utils.wait_until_true( lambda: router2.ha_state == 'master') if check_external_device: common_utils.wait_until_true( lambda: self._check_external_device(router2)) master_router = router2 slave_router = router1 common_utils.wait_until_true( lambda: master_router.ha_state == 'master') if check_external_device: common_utils.wait_until_true( lambda: self._check_external_device(master_router)) common_utils.wait_until_true( lambda: slave_router.ha_state == 'backup') return master_router, slave_router def fail_ha_router(self, router): device_name = router.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, router.ha_namespace) ha_device.link.set_down() @staticmethod def fail_gw_router_port(router): r_br = ip_lib.IPDevice(router.driver.conf.OVS.integration_bridge) r_br.link.set_down() @staticmethod def restore_gw_router_port(router): r_br = ip_lib.IPDevice(router.driver.conf.OVS.integration_bridge) r_br.link.set_up() @classmethod def _get_addresses_on_device(cls, namespace, interface): return [address['cidr'] for address in ip_lib.IPDevice(interface, namespace=namespace).addr.list()] def _assert_no_ip_addresses_on_interface(self, namespace, interface): self.assertEqual( [], self._get_addresses_on_device(namespace, interface)) def _assert_ip_addresses_on_interface(self, namespace, interface, ip_addresses): for ip_address in ip_addresses: self._assert_ip_address_on_interface(namespace, interface, ip_address) def _assert_ip_address_not_on_interface(self, namespace, interface, ip_address): self.assertNotIn( ip_address, self._get_addresses_on_device(namespace, interface)) def _assert_ip_address_on_interface(self, namespace, interface, ip_address): self.assertIn( ip_address, self._get_addresses_on_device(namespace, interface)) def _assert_ping_reply_from_expected_address(self, ping_result, expected_address): ping_results = ping_result.split('\n') self.assertGreater( len(ping_results), 1, "The result from ping should be multiple lines") self.assertIn( expected_address, ping_results[1], ("Expect to see %s in the reply of ping, but failed" % expected_address)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_dvr_router.py0000644000175000017500000031363500000000000027374 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import mock import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants as lib_constants from neutron_lib.exceptions import l3 as l3_exc import six import testtools from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import dvr_edge_ha_router as dvr_ha_router from neutron.agent.l3 import dvr_edge_router from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import utils from neutron.tests.common import l3_test_common from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework DEVICE_OWNER_COMPUTE = lib_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class DvrRouterTestFramework(framework.L3AgentTestFramework): def generate_dvr_router_info(self, enable_ha=False, enable_snat=False, enable_gw=True, snat_bound_fip=False, agent=None, extra_routes=False, enable_floating_ip=True, enable_centralized_fip=False, vrrp_id=None, **kwargs): if not agent: agent = self.agent router = l3_test_common.prepare_router_data( enable_snat=enable_snat, enable_floating_ip=enable_floating_ip, enable_ha=enable_ha, extra_routes=extra_routes, num_internal_ports=2, enable_gw=enable_gw, snat_bound_fip=snat_bound_fip, vrrp_id=vrrp_id, **kwargs) internal_ports = router.get(lib_constants.INTERFACE_KEY, []) router['distributed'] = True router['gw_port_host'] = agent.conf.host if enable_floating_ip: for floating_ip in router[lib_constants.FLOATINGIP_KEY]: floating_ip['host'] = agent.conf.host if enable_floating_ip and enable_centralized_fip: # For centralizing the fip, we are emulating the legacy # router behavior were the fip dict does not contain any # host information. router[lib_constants.FLOATINGIP_KEY][0]['host'] = None # In order to test the mixed dvr_snat and compute scenario, we create # two floating IPs, one is distributed, another is centralized. # The distributed floating IP should have the host, which was # just set to None above, then we set it back. The centralized # floating IP has host None, and this IP will be used to test # migration from centralized to distributed. if snat_bound_fip: router[lib_constants.FLOATINGIP_KEY][0]['host'] = agent.conf.host router[lib_constants.FLOATINGIP_KEY][1][ lib_constants.DVR_SNAT_BOUND] = True router[lib_constants.FLOATINGIP_KEY][1]['host'] = None if enable_gw: external_gw_port = router['gw_port'] router['gw_port'][portbindings.HOST_ID] = agent.conf.host self._add_snat_port_info_to_router(router, internal_ports) # FIP has a dependency on external gateway. So we need to create # the snat_port info and fip_agent_gw_port_info irrespective of # the agent type the dvr supports. The namespace creation is # dependent on the agent_type. if enable_floating_ip: for index, floating_ip in enumerate(router['_floatingips']): floating_ip['floating_network_id'] = ( external_gw_port['network_id']) floating_ip['port_id'] = internal_ports[index]['id'] floating_ip['status'] = 'ACTIVE' self._add_fip_agent_gw_port_info_to_router(router, external_gw_port) # Router creation is delegated to router_factory. We have to # re-register here so that factory can find override agent mode # normally. self.agent._register_router_cls(self.agent.router_factory) return router def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port): # Add fip agent gateway port information to the router_info fip_gw_port_list = router.get( lib_constants.FLOATINGIP_AGENT_INTF_KEY, []) if not fip_gw_port_list and external_gw_port: # Get values from external gateway port fixed_ip = external_gw_port['fixed_ips'][0] float_subnet = external_gw_port['subnets'][0] port_ip = fixed_ip['ip_address'] # Pick an ip address which is not the same as port_ip fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5) # Add floatingip agent gateway port info to router prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [ {'subnets': [ {'cidr': float_subnet['cidr'], 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'extra_subnets': external_gw_port['extra_subnets'], 'network_id': external_gw_port['network_id'], 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8d:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': fip_gw_port_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} ] def _add_snat_port_info_to_router(self, router, internal_ports): # Add snat port information to the router snat_port_list = router.get(lib_constants.SNAT_ROUTER_INTF_KEY, []) if not snat_port_list and internal_ports: router[lib_constants.SNAT_ROUTER_INTF_KEY] = [] for port in internal_ports: # Get values from internal port fixed_ip = port['fixed_ips'][0] snat_subnet = port['subnets'][0] port_ip = fixed_ip['ip_address'] # Pick an ip address which is not the same as port_ip snat_ip = str(netaddr.IPAddress(port_ip) + 5) # Add the info to router as the first snat port # in the list of snat ports prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen snat_router_port = { 'subnets': [ {'cidr': snat_subnet['cidr'], 'gateway_ip': snat_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': port['network_id'], 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:89', 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': snat_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} # Get the address scope if there is any if 'address_scopes' in port: snat_router_port['address_scopes'] = port['address_scopes'] router[lib_constants.SNAT_ROUTER_INTF_KEY].append( snat_router_port) class TestDvrRouter(DvrRouterTestFramework, framework.L3AgentTestFramework): def manage_router(self, agent, router): def _safe_fipnamespace_delete_on_ext_net(ext_net_id): try: agent.fipnamespace_delete_on_ext_net(None, ext_net_id) except RuntimeError: pass if router['gw_port']: self.addCleanup( _safe_fipnamespace_delete_on_ext_net, router['gw_port']['network_id']) return super(TestDvrRouter, self).manage_router(agent, router) def test_dvr_update_floatingip_statuses(self): self.agent.conf.agent_mode = 'dvr' self._test_update_floatingip_statuses(self.generate_dvr_router_info()) def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=False) def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=True) def test_dvr_router_lifecycle_ha_with_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=True, enable_snat=True) def test_dvr_lifecycle_no_ha_with_snat_with_fips_with_cent_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=True, snat_bound_fip=True) def test_dvr_lifecycle_ha_with_snat_with_fips_with_cent_fips(self): self._dvr_router_lifecycle(enable_ha=True, enable_snat=True, snat_bound_fip=True) def _check_routes(self, expected_routes, actual_routes): actual_routes = [{key: route[key] for key in expected_routes[0].keys()} for route in actual_routes] self.assertEqual(expected_routes, actual_routes) def _helper_create_dvr_router_fips_for_ext_network( self, agent_mode, **dvr_router_kwargs): self.agent.conf.agent_mode = agent_mode router_info = self.generate_dvr_router_info(**dvr_router_kwargs) router = self.manage_router(self.agent, router_info) fip_ns = router.fip_ns.get_name() return router, fip_ns def _validate_fips_for_external_network(self, router, fip_ns): self.assertTrue(self._namespace_exists(router.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) self._assert_dvr_floating_ips(router) self._assert_snat_namespace_does_not_exist(router) def test_dvr_gateway_move_does_not_remove_redirect_rules(self): """Test to validate snat redirect rules not cleared with snat move.""" self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) router1 = self.manage_router(self.agent, router_info) router1.router['gw_port_host'] = "" self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router1.router['id']] self.assertTrue(self._namespace_exists(router_updated.ns_name)) ip4_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_4) self.assertEqual(6, len(ip4_rules_list)) # list_ip_rules list should have 6 entries. # Three entries from 'default', 'main' and 'local' table. # One rule for the floatingip. # The remaining 2 is for the two router interfaces(csnat ports). default_rules_list_count = 0 interface_rules_list_count = 0 for ip_rule in ip4_rules_list: tbl_index = ip_rule['table'] if tbl_index in ['local', 'default', 'main', str(dvr_fip_ns.FIP_RT_TBL)]: default_rules_list_count = default_rules_list_count + 1 else: interface_rules_list_count = interface_rules_list_count + 1 self.assertEqual(4, default_rules_list_count) self.assertEqual(2, interface_rules_list_count) def test_dvr_update_gateway_port_no_fip_fg_port_recovers_itself_with_fpr( self): self.agent.conf.agent_mode = 'dvr' # Create the router with external net router_info = self.generate_dvr_router_info() external_gw_port = router_info['gw_port'] router = self.manage_router(self.agent, router_info) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) fip_2_rtr_name = router.fip_ns.get_int_device_name(router.router_id) fpr_device = ip_lib.IPDevice(fip_2_rtr_name, namespace=router.fip_ns.name) # Now validate if the gateway is properly configured. rtr_2_fip, fip_2_rtr = router.rtr_fip_subnet.get_pair() tbl_index = router._get_snat_idx(fip_2_rtr) self.assertIn('via', fg_device.route.get_gateway(table=tbl_index)) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) # Now delete the fg- port that was created router.fip_ns.driver.unplug(fg_port_name, namespace=router.fip_ns.name, prefix=dvr_fip_ns.FIP_EXT_DEV_PREFIX) # Now check if the fg- port is missing. self.assertFalse(fg_device.exists()) fpr_device.link.set_down() # Now change the gateway ip for the router and do an update. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) new_fg_port = copy.deepcopy(fg_port) for subnet in new_fg_port['subnets']: subnet['gateway_ip'] = '19.4.4.2' router.router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] self.assertRaises(l3_exc.FloatingIpSetupException, self.agent._process_updated_router, router.router) self.agent._process_updated_router(router.router) self.assertTrue(fg_device.exists()) self.assertTrue(fpr_device.exists()) updated_route = fg_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table=tbl_index) expected_route = [{'cidr': '0.0.0.0/0', 'device': fg_port_name, 'table': tbl_index, 'via': '19.4.4.2'}] self._check_routes(expected_route, updated_route) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(external_gw_port) def test_dvr_update_gateway_port_with_no_gw_port_in_namespace(self): self.agent.conf.agent_mode = 'dvr' # Create the router with external net router_info = self.generate_dvr_router_info() external_gw_port = router_info['gw_port'] router = self.manage_router(self.agent, router_info) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) # Now validate if the gateway is properly configured. rtr_2_fip, fip_2_rtr = router.rtr_fip_subnet.get_pair() tbl_index = router._get_snat_idx(fip_2_rtr) self.assertIn('via', fg_device.route.get_gateway(table=tbl_index)) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) # Now delete the fg- port that was created router.fip_ns.driver.unplug(fg_port_name, namespace=router.fip_ns.name, prefix=dvr_fip_ns.FIP_EXT_DEV_PREFIX) # Now check if the fg- port is missing. self.assertFalse(fg_device.exists()) # Now change the gateway ip for the router and do an update. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) new_fg_port = copy.deepcopy(fg_port) for subnet in new_fg_port['subnets']: subnet['gateway_ip'] = '19.4.4.2' router.router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] self.assertRaises(l3_exc.FloatingIpSetupException, self.manage_router, self.agent, router.router) router = self.manage_router(self.agent, router.router) self.assertTrue(fg_device.exists()) updated_route = fg_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table=tbl_index) expected_route = [{'cidr': '0.0.0.0/0', 'device': fg_port_name, 'table': tbl_index, 'via': '19.4.4.2'}] self._check_routes(expected_route, updated_route) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(external_gw_port) @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') def test_dvr_process_fips_with_no_gw_port_in_namespace(self, fip_subscribe): self.agent.conf.agent_mode = 'dvr' # Create the router with external net router_info = self.generate_dvr_router_info() external_gw_port = router_info['gw_port'] ext_net_id = router_info['_floatingips'][0]['floating_network_id'] # Create the fip namespace up front fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, self.agent.conf, self.agent.driver, self.agent.use_ipv6) fip_ns.create() # Create the router with the fip, this shouldn't allow the # update_gateway_port to be called without the fg- port fip_subscribe.return_value = False fip_ns.agent_gateway_port = ( router_info[lib_constants.FLOATINGIP_AGENT_INTF_KEY]) # This will raise the exception and will also clear # subscription for the ext_net_id self.assertRaises(l3_exc.FloatingIpSetupException, self.manage_router, self.agent, router_info) fip_subscribe.return_value = True self.manage_router(self.agent, router_info) # Now update the router again router = self.manage_router(self.agent, router_info) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) rtr_2_fip, fip_2_rtr = router.rtr_fip_subnet.get_pair() tbl_index = router._get_snat_idx(fip_2_rtr) # Now validate if the gateway is properly configured. self.assertIn('via', fg_device.route.get_gateway(table=tbl_index)) self._validate_fips_for_external_network( router, router.fip_ns.get_name()) self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(external_gw_port) def test_dvr_router_fips_stale_gw_port(self): self.agent.conf.agent_mode = 'dvr' # Create the router with external net dvr_router_kwargs = {'ip_address': '19.4.4.3', 'subnet_cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1', 'gateway_mac': 'ca:fe:de:ab:cd:ef'} router_info = self.generate_dvr_router_info(**dvr_router_kwargs) external_gw_port = router_info['gw_port'] ext_net_id = router_info['_floatingips'][0]['floating_network_id'] # Create the fip namespace up front stale_fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, self.agent.conf, self.agent.driver, self.agent.use_ipv6) stale_fip_ns.create() # Add a stale fg port to the namespace fixed_ip = external_gw_port['fixed_ips'][0] float_subnet = external_gw_port['subnets'][0] fip_gw_port_ip = str(netaddr.IPAddress(fixed_ip['ip_address']) + 10) prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen stale_agent_gw_port = { 'subnets': [{'cidr': float_subnet['cidr'], 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': external_gw_port['network_id'], 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8f:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': fip_gw_port_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid()} stale_fip_ns.create_or_update_gateway_port(stale_agent_gw_port) stale_dev_exists = self.device_exists_with_ips_and_mac( stale_agent_gw_port, stale_fip_ns.get_ext_device_name, stale_fip_ns.get_name()) self.assertTrue(stale_dev_exists) # Create the router, this shouldn't allow the duplicate port to stay router = self.manage_router(self.agent, router_info) # Assert the device no longer exists stale_dev_exists = self.device_exists_with_ips_and_mac( stale_agent_gw_port, stale_fip_ns.get_ext_device_name, stale_fip_ns.get_name()) self.assertFalse(stale_dev_exists) # Validate things are looking good and clean up self._validate_fips_for_external_network( router, router.fip_ns.get_name()) ext_gateway_port = router_info['gw_port'] self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(ext_gateway_port) def test_dvr_router_gateway_redirect_cleanup_on_agent_restart(self): """Test to validate the router namespace gateway redirect rule cleanup. This test checks for the non existence of the gateway redirect rules in the router namespace after the agent restarts while the gateway is removed for the router. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) self._assert_snat_namespace_exists(router1) self.assertTrue(self._namespace_exists(router1.ns_name)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router['gw_port'] = "" router1.router['gw_port_host'] = "" router1.router['external_gateway_info'] = "" restarted_router = self.manage_router(restarted_agent, router1.router) self.assertTrue(self._namespace_exists(restarted_router.ns_name)) ip4_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_4) ip6_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_6) # Just make sure the basic set of rules are there in the router # namespace self.assertEqual(3, len(ip4_rules_list)) self.assertEqual(2, len(ip6_rules_list)) def test_dvr_unused_snat_ns_deleted_when_agent_restarts_after_move(self): """Test to validate the stale snat namespace delete with snat move. This test validates the stale snat namespace cleanup when the agent restarts after the gateway port has been moved from the agent. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) self._assert_snat_namespace_exists(router1) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router['gw_port_host'] = "my-new-host" restarted_router = self.manage_router(restarted_agent, router1.router) self._assert_snat_namespace_does_not_exist(restarted_router) def test_dvr_router_fips_for_multiple_ext_networks(self): agent_mode = 'dvr' # Create the first router fip with external net1 dvr_router1_kwargs = {'ip_address': '19.4.4.3', 'subnet_cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1', 'gateway_mac': 'ca:fe:de:ab:cd:ef'} router1, fip1_ns = ( self._helper_create_dvr_router_fips_for_ext_network( agent_mode, **dvr_router1_kwargs)) # Validate the fip with external net1 self._validate_fips_for_external_network(router1, fip1_ns) # Create the second router fip with external net2 dvr_router2_kwargs = {'ip_address': '19.4.5.3', 'subnet_cidr': '19.4.5.0/24', 'gateway_ip': '19.4.5.1', 'gateway_mac': 'ca:fe:de:ab:cd:fe'} router2, fip2_ns = ( self._helper_create_dvr_router_fips_for_ext_network( agent_mode, **dvr_router2_kwargs)) # Validate the fip with external net2 self._validate_fips_for_external_network(router2, fip2_ns) def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False, custom_mtu=2000, ip_version=lib_constants.IP_VERSION_4, dual_stack=False, snat_bound_fip=False): '''Test dvr router lifecycle :param enable_ha: sets the ha value for the router. :param enable_snat: the value of enable_snat is used to set the agent_mode. ''' # The value of agent_mode can be dvr, dvr_snat, or legacy. # Since by definition this is a dvr (distributed = true) # only dvr and dvr_snat are applicable self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr' # We get the router info particular to a dvr router router_info = self.generate_dvr_router_info( enable_ha, enable_snat, extra_routes=True, snat_bound_fip=snat_bound_fip) for key in ('_interfaces', '_snat_router_interfaces', '_floatingip_agent_interfaces'): for port in router_info[key]: port['mtu'] = custom_mtu router_info['gw_port']['mtu'] = custom_mtu if enable_ha: router_info['_ha_interface']['mtu'] = custom_mtu # We need to mock the get_agent_gateway_port return value # because the whole L3PluginApi is mocked and we need the port # gateway_port information before the l3_agent will create it. # The port returned needs to have the same information as # router_info['gw_port'] fip_agent_gw_port = self._get_fip_agent_gw_port_for_router( router_info['gw_port']) self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port) # With all that set we can now ask the l3_agent to # manage the router (create it, create namespaces, # attach interfaces, etc...) router = self.manage_router(self.agent, router_info) if enable_ha: port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) self._assert_no_ip_addresses_on_interface(router.ha_namespace, interface_name) utils.wait_until_true(lambda: router.ha_state == 'master') # Keepalived notifies of a state transition when it starts, # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. device = router.router[lib_constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, router.get_internal_device_name, router.ns_name) utils.wait_until_true(device_exists) name = router.get_internal_device_name(device['id']) self.assertEqual(custom_mtu, ip_lib.IPDevice(name, router.ns_name).link.mtu) ext_gateway_port = router_info['gw_port'] self.assertTrue(self._namespace_exists(router.ns_name)) utils.wait_until_true( lambda: self._metadata_proxy_exists(self.agent.conf, router)) self._assert_internal_devices(router) self._assert_dvr_external_device(router) self._assert_dvr_gateway(router) self._assert_dvr_floating_ips(router, snat_bound_fip=snat_bound_fip) self._assert_snat_chains(router) self._assert_floating_ip_chains(router, snat_bound_fip=snat_bound_fip) self._assert_metadata_chains(router) self._assert_rfp_fpr_mtu(router, custom_mtu) if enable_snat: if (ip_version == lib_constants.IP_VERSION_6 or dual_stack): ip_versions = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6] else: ip_versions = [lib_constants.IP_VERSION_4] snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self._assert_onlink_subnet_routes( router, ip_versions, snat_ns_name) self._assert_extra_routes(router, namespace=snat_ns_name) # During normal operation, a router-gateway-clear followed by # a router delete results in two notifications to the agent. This # code flow simulates the exceptional case where the notification of # the clearing of the gateway hast been missed, so we are checking # that the L3 agent is robust enough to handle that case and delete # the router correctly. self._delete_router(self.agent, router.router_id) self._assert_fip_namespace_deleted(ext_gateway_port) self._assert_router_does_not_exist(router) self._assert_snat_namespace_does_not_exist(router) def _get_fip_agent_gw_port_for_router(self, external_gw_port): # Add fip agent gateway port information to the router_info if external_gw_port: # Get values from external gateway port fixed_ip = external_gw_port['fixed_ips'][0] float_subnet = external_gw_port['subnets'][0] port_ip = fixed_ip['ip_address'] # Pick an ip address which is not the same as port_ip fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5) # Add floatingip agent gateway port info to router prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen fip_agent_gw_port_info = { 'subnets': [ {'cidr': float_subnet['cidr'], 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'extra_subnets': external_gw_port['extra_subnets'], 'network_id': external_gw_port['network_id'], 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8d:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': fip_gw_port_ip, 'prefixlen': prefixlen}], 'id': framework._uuid(), 'device_id': framework._uuid() } return fip_agent_gw_port_info def _assert_dvr_external_device(self, router): external_port = router.get_ex_gw_port() snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) # if the agent is in dvr_snat mode, then we have to check # that the correct ports and ip addresses exist in the # snat_ns_name namespace if self.agent.conf.agent_mode == 'dvr_snat': device_exists = functools.partial( self.device_exists_with_ips_and_mac, external_port, router.get_external_device_name, snat_ns_name) utils.wait_until_true(device_exists) # if the agent is in dvr mode then the snat_ns_name namespace # should not be present at all: elif self.agent.conf.agent_mode == 'dvr': self.assertFalse( self._namespace_exists(snat_ns_name), "namespace %s was found but agent is in dvr mode not dvr_snat" % (str(snat_ns_name)) ) # if the agent is anything else the test is misconfigured # we force a test failure with message else: self.fail("Agent not configured for dvr or dvr_snat") def _assert_dvr_gateway(self, router): gateway_expected_in_snat_namespace = ( self.agent.conf.agent_mode == 'dvr_snat' ) if gateway_expected_in_snat_namespace: self._assert_dvr_snat_gateway(router) self._assert_removal_of_already_deleted_gateway_device(router) snat_namespace_should_not_exist = ( self.agent.conf.agent_mode == 'dvr' ) if snat_namespace_should_not_exist: self._assert_snat_namespace_does_not_exist(router) def _assert_dvr_snat_gateway(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) external_device = ip_lib.IPDevice(external_device_name, namespace=namespace) existing_gateway = ( external_device.route.get_gateway().get('via')) expected_gateway = external_port['subnets'][0]['gateway_ip'] self.assertEqual(expected_gateway, existing_gateway) def _assert_removal_of_already_deleted_gateway_device(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) device = ip_lib.IPDevice("fakedevice", namespace=namespace) # Assert that no exception is thrown for this case self.assertIsNone(router._delete_gateway_device_if_exists( device, "192.168.0.1", 0)) def _assert_snat_namespace_does_not_exist(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self.assertFalse(self._namespace_exists(namespace)) def _assert_dvr_floating_ips(self, router, snat_bound_fip=False): # in the fip namespace: # Check that the fg- (floatingip_agent_gateway) # is created with the ip address of the external gateway port floating_ips = router.router[lib_constants.FLOATINGIP_KEY] self.assertTrue(floating_ips) # We need to fetch the floatingip agent gateway port info # from the router_info floating_agent_gw_port = ( router.router[lib_constants.FLOATINGIP_AGENT_INTF_KEY]) self.assertTrue(floating_agent_gw_port) external_gw_port = floating_agent_gw_port[0] fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id']) fip_ns_name = fip_ns.get_name() fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac( fip_ns.get_ext_device_name(external_gw_port['id']), [self._port_first_ip_cidr(external_gw_port)], external_gw_port['mac_address'], namespace=fip_ns_name) self.assertTrue(fg_port_created_successfully) # Check fpr-router device has been created device_name = fip_ns.get_int_device_name(router.router_id) fpr_router_device_created_successfully = ip_lib.device_exists( device_name, namespace=fip_ns_name) self.assertTrue(fpr_router_device_created_successfully) # In the router namespace # Check rfp- is created correctly for fip in floating_ips: device_name = fip_ns.get_rtr_ext_device_name(router.router_id) self.assertTrue(ip_lib.device_exists( device_name, namespace=router.ns_name)) # In the router namespace, check the iptables rules are set # correctly for fip in floating_ips: expected_rules = router.floating_forward_rules(fip) if fip.get(lib_constants.DVR_SNAT_BOUND): iptables_mgr = router.snat_iptables_manager else: iptables_mgr = router.iptables_manager self._assert_iptables_rules_exist( iptables_mgr, 'nat', expected_rules) def test_dvr_router_with_ha_for_fip_disassociation(self): """Test to validate the fip rules are deleted in dvr_snat_ha router. This test validates the fip rules are getting deleted in a router namespace when the router has ha and snat enabled after the floatingip is disassociated. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info( enable_snat=True, enable_ha=True, enable_gw=True) fip_agent_gw_port = router_info[ lib_constants.FLOATINGIP_AGENT_INTF_KEY] self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port[0]) router1 = self.manage_router(self.agent, router_info) fip_ns_name = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns_name)) self._assert_snat_namespace_exists(router1) ip4_rules_list_with_fip = ip_lib.list_ip_rules( router1.ns_name, lib_constants.IP_VERSION_4) # The rules_list should have 6 entries: # 3 default rules (local, main and default) # 1 Fip forward rule # 2 interface rules to redirect to snat self.assertEqual(6, len(ip4_rules_list_with_fip)) rfp_device_name = router1.fip_ns.get_rtr_ext_device_name( router1.router_id) rfp_device = ip_lib.IPDevice(rfp_device_name, namespace=router1.ns_name) rtr_2_fip, fip_2_rtr = router1.rtr_fip_subnet.get_pair() self._assert_default_gateway( fip_2_rtr, rfp_device, rfp_device_name) router1.router[lib_constants.FLOATINGIP_KEY] = [] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router1.router['id']] self.assertTrue(self._namespace_exists(router_updated.ns_name)) self._assert_snat_namespace_exists(router1) ip4_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_4) self.assertEqual(5, len(ip4_rules_list)) interface_rules_list_count = 0 fip_rule_count = 0 for ip_rule in ip4_rules_list: tbl_index = ip_rule['table'] if tbl_index not in ['local', 'default', 'main']: interface_rules_list_count += 1 if tbl_index == dvr_fip_ns.FIP_RT_TBL: fip_rule_count += 1 self.assertEqual(2, interface_rules_list_count) self.assertEqual(0, fip_rule_count) def _assert_default_gateway(self, fip_2_rtr, rfp_device, device_name): expected_gateway = [{'device': device_name, 'cidr': '0.0.0.0/0', 'via': str(fip_2_rtr.ip), 'table': dvr_fip_ns.FIP_RT_TBL}] listed_routes = rfp_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table=dvr_fip_ns.FIP_RT_TBL, via=str(fip_2_rtr.ip)) self._check_routes(expected_gateway, listed_routes) def test_dvr_router_rem_fips_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router1.router[lib_constants.FLOATINGIP_KEY] = [] self.manage_router(restarted_agent, router1.router) self._assert_dvr_snat_gateway(router1) self.assertTrue(self._namespace_exists(fip_ns)) def test_dvr_router_update_on_restarted_agent_sets_rtr_fip_connect(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router1 = self.manage_router(self.agent, router_info) self.assertTrue(router1.rtr_fip_connect) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) router_updated = self.manage_router(restarted_agent, router1.router) self.assertTrue(router_updated.rtr_fip_connect) def test_dvr_router_add_fips_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) floating_ips = router.router[lib_constants.FLOATINGIP_KEY] router_ns = router.ns_name fip_rule_prio_1 = self._get_fixed_ip_rule_priority( router_ns, floating_ips[0]['fixed_ip_address']) restarted_agent = neutron_l3_agent.L3NATAgent( self.agent.host, self.agent.conf) floating_ips[0]['floating_ip_address'] = '21.4.4.2' floating_ips[0]['fixed_ip_address'] = '10.0.0.2' self.manage_router(restarted_agent, router_info) fip_rule_prio_2 = self._get_fixed_ip_rule_priority( router_ns, floating_ips[0]['fixed_ip_address']) self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2) def test_dvr_router_floating_ip_moved(self): self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) floating_ips = router.router[lib_constants.FLOATINGIP_KEY] router_ns = router.ns_name fixed_ip = floating_ips[0]['fixed_ip_address'] self.assertTrue(self._fixed_ip_rule_exists(router_ns, fixed_ip)) # Floating IP reassigned to another fixed IP new_fixed_ip = '10.0.0.2' self.assertNotEqual(new_fixed_ip, fixed_ip) floating_ips[0]['fixed_ip_address'] = new_fixed_ip self.agent._process_updated_router(router.router) self.assertFalse(self._fixed_ip_rule_exists(router_ns, fixed_ip)) self.assertTrue(self._fixed_ip_rule_exists(router_ns, new_fixed_ip)) def _assert_iptables_rules_exist(self, router_iptables_manager, table_name, expected_rules): rules = router_iptables_manager.get_rules_for_table(table_name) for rule in expected_rules: self.assertIn( str(iptables_manager.IptablesRule(rule[0], rule[1])), rules) return True def _assert_iptables_rules_not_exist(self, router_iptables_manager, table_name, expected_rules): rules = router_iptables_manager.get_rules_for_table(table_name) for rule in expected_rules: self.assertNotIn( str(iptables_manager.IptablesRule(rule[0], rule[1])), rules) return True def test_prevent_snat_rule_exist_on_restarted_agent(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) ext_port = router.get_ex_gw_port() rfp_devicename = router.get_external_device_interface_name(ext_port) prevent_snat_rule = router._prevent_snat_for_internal_traffic_rule( rfp_devicename) self._assert_iptables_rules_exist( router.iptables_manager, 'nat', [prevent_snat_rule]) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) restarted_router = self.manage_router(restarted_agent, router_info) self._assert_iptables_rules_exist( restarted_router.iptables_manager, 'nat', [prevent_snat_rule]) def _get_fixed_ip_rule_priority(self, namespace, fip): ipv4_rules = ip_lib.list_ip_rules(namespace, 4) for rule in (rule for rule in ipv4_rules if utils.cidr_to_ip(rule['from']) == fip): return rule['priority'] def _fixed_ip_rule_exists(self, namespace, ip): ipv4_rules = ip_lib.list_ip_rules(namespace, 4) for _ in (rule for rule in ipv4_rules if utils.cidr_to_ip(rule['from']) == ip): return True return False def test_dvr_router_add_internal_network_set_arp_cache(self): # Check that, when the router is set up and there are # existing ports on the uplinked subnet, the ARP # cache is properly populated. self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) expected_neighbor = '35.4.1.10' port_data = { 'fixed_ips': [{'ip_address': expected_neighbor}], 'mac_address': 'fa:3e:aa:bb:cc:dd', 'device_owner': DEVICE_OWNER_COMPUTE } self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data] router1 = self.manage_router(self.agent, router_info) internal_device = router1.get_internal_device_name( router_info['_interfaces'][0]['id']) neighbor = ip_lib.dump_neigh_entries(4, internal_device, router1.ns_name, dst=expected_neighbor) self.assertNotEqual([], neighbor) self.assertEqual(expected_neighbor, neighbor[0]['dst']) def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500): dev_mtu = self.get_device_mtu( router.router_id, router.fip_ns.get_rtr_ext_device_name, router.ns_name) self.assertEqual(expected_mtu, dev_mtu) dev_mtu = self.get_device_mtu( router.router_id, router.fip_ns.get_int_device_name, router.fip_ns.get_name()) self.assertEqual(expected_mtu, dev_mtu) def test_dvr_router_fip_agent_mismatch(self): """Test to validate the floatingip agent mismatch. This test validates the condition where floatingip agent gateway port host mismatches with the agent and so the binding will not be there. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() floating_ip = router_info['_floatingips'][0] floating_ip['host'] = 'my_new_host' # In this case the floatingip binding is different and so it # should not create the floatingip namespace on the given agent. # This is also like there is no current binding. router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) # FIP Namespace creation does not depend on the floatingip's # anymore and will be created on each agent when there is # a valid gateway. self.assertTrue(self._namespace_exists(fip_ns)) self._assert_snat_namespace_does_not_exist(router1) def test_dvr_router_fip_create_for_migrating_port(self): """Test to validate the floatingip create on port migrate. This test validates the condition where floatingip host mismatches with the agent, but the 'dest_host' variable matches with the agent host, due to port pre-migrate phase. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() floating_ip = router_info['_floatingips'][0] floating_ip['host'] = 'my_new_host' floating_ip['dest_host'] = self.agent.host # Now we have the floatingip 'host' pointing to host that # does not match to the 'agent.host' and the floatingip # 'dest_host' matches with the agent.host in the case # of live migration due to the port_profile update from # nova. router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) def test_dvr_router_fip_late_binding(self): """Test to validate the floatingip migration or latebinding. This test validates the condition where floatingip private port changes while migration or when the private port host binding is done later after floatingip association. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() fip_agent_gw_port = router_info[ lib_constants.FLOATINGIP_AGENT_INTF_KEY] # Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate # that the server did not create the port, since there was no valid # host binding. router_info[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [] self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port[0]) router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) self._assert_snat_namespace_does_not_exist(router1) def test_dvr_router_fip_namespace_create_without_floatingip(self): """Test to validate the floatingip namespace creation without fip. This test validates the condition where floatingip namespace gets created on the agent when the gateway is added and without floatingip configured for the router. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info(enable_floating_ip=False) fip_agent_gw_port = self._get_fip_agent_gw_port_for_router( router_info['gw_port']) self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port) router1 = self.manage_router(self.agent, router_info) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns)) self.assertTrue(router1.rtr_fip_connect) self._assert_snat_namespace_does_not_exist(router1) def _assert_snat_namespace_exists(self, router): namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) self.assertTrue(self._namespace_exists(namespace)) def _get_dvr_snat_namespace_device_status(self, router, internal_dev_name=None): """Function returns the internal and external device status.""" snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router.router_id) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) qg_device_created_successfully = ip_lib.device_exists( external_device_name, namespace=snat_ns) sg_device_created_successfully = ip_lib.device_exists( internal_dev_name, namespace=snat_ns) return qg_device_created_successfully, sg_device_created_successfully def test_snat_bound_floating_ip(self): """Test to validate the snat bound floatingip lifecycle.""" self.agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info(snat_bound_fip=True) router1 = self.manage_router(self.agent, router_info) snat_bound_floatingips = router_info[lib_constants.FLOATINGIP_KEY] self._assert_snat_namespace_exists(router1) # In the snat namespace, check the iptables rules are set correctly for fip in snat_bound_floatingips: expected_rules = router1.floating_forward_rules(fip) if fip.get(lib_constants.DVR_SNAT_BOUND): self._assert_iptables_rules_exist( router1.snat_iptables_manager, 'nat', expected_rules) def test_floating_ip_migrate_when_unbound_port_is_bound_to_a_host(self): """Test to check floating ips migrate from unbound to bound host.""" self.agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info( enable_floating_ip=True, enable_centralized_fip=True, enable_snat=True, snat_bound_fip=True) router1 = self.manage_router(self.agent, router_info) floatingips = router_info[lib_constants.FLOATINGIP_KEY] distributed_fip = floatingips[0] centralized_floatingip = floatingips[1] # For private ports hosted in dvr_no_fip agent, the floatingip # dict will contain the fip['host'] key, but the value will always # be None to emulate the legacy router. self.assertIsNone(centralized_floatingip['host']) self.assertTrue(self._namespace_exists(router1.ns_name)) fip_ns = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns)) self._assert_snat_namespace_exists(router1) # If fips are centralized then, the DNAT rules are only # configured in the SNAT Namespace and not in the router-ns. expected_rules = router1.floating_forward_rules(distributed_fip) self.assertTrue(self._assert_iptables_rules_exist( router1.iptables_manager, 'nat', expected_rules)) expected_rules = router1._centralized_floating_forward_rules( centralized_floatingip['floating_ip_address'], centralized_floatingip['fixed_ip_address']) self.assertTrue(self._assert_iptables_rules_exist( router1.snat_iptables_manager, 'nat', expected_rules)) qrouter_ns = router1.ns_name fixed_ip_dist = distributed_fip['fixed_ip_address'] snat_ns = router1.snat_namespace.name fixed_ip_cent = centralized_floatingip['fixed_ip_address'] self.assertFalse(self._fixed_ip_rule_exists(qrouter_ns, fixed_ip_cent)) self.assertTrue(self._fixed_ip_rule_exists(qrouter_ns, fixed_ip_dist)) self.assertFalse(self._fixed_ip_rule_exists(snat_ns, fixed_ip_dist)) self.assertFalse(self._fixed_ip_rule_exists(snat_ns, fixed_ip_cent)) # Now let us edit the centralized floatingIP info with 'host' # and remove the 'dvr_snat_bound' router1.router[lib_constants.FLOATINGIP_KEY][1]['host'] = ( self.agent.conf.host) del router1.router[lib_constants.FLOATINGIP_KEY][1]['dvr_snat_bound'] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router_info['id']] qrouter_ns = router_updated.ns_name fixed_ip_dist = distributed_fip['fixed_ip_address'] self._assert_snat_namespace_exists(router_updated) snat_ns = router_updated.snat_namespace.name fixed_ip_cent = centralized_floatingip['fixed_ip_address'] router_updated.get_centralized_fip_cidr_set = mock.Mock( return_value=set(["19.4.4.3/32"])) self.assertTrue(self._assert_iptables_rules_not_exist( router_updated.snat_iptables_manager, 'nat', expected_rules)) port = router_updated.get_ex_gw_port() interface_name = router_updated.get_external_device_name(port['id']) self._assert_ip_address_not_on_interface( snat_ns, interface_name, centralized_floatingip['floating_ip_address']) self.assertTrue(self._fixed_ip_rule_exists(qrouter_ns, fixed_ip_dist)) self.assertFalse(self._fixed_ip_rule_exists(snat_ns, fixed_ip_dist)) self.assertTrue(self._fixed_ip_rule_exists(qrouter_ns, fixed_ip_cent)) self.assertFalse(self._fixed_ip_rule_exists(snat_ns, fixed_ip_cent)) self.assertTrue(self._namespace_exists(fip_ns)) def _test_get_centralized_fip_cidr_set(self, router_info, expected_result_empty): self.agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT self.manage_router(self.agent, router_info) router = self.agent.router_info[router_info['id']] centralized_fips = router.get_centralized_fip_cidr_set() if expected_result_empty: self.assertEqual(set([]), centralized_fips) else: self.assertNotEqual(set([]), centralized_fips) def test_get_centralized_fip_cidr_set(self): router_info = self.generate_dvr_router_info( enable_floating_ip=True, enable_centralized_fip=True, enable_snat=True, snat_bound_fip=True) self._test_get_centralized_fip_cidr_set(router_info, False) def test_get_centralized_fip_cidr_set_not_snat_host(self): router_info = self.generate_dvr_router_info( enable_floating_ip=True, enable_centralized_fip=True, enable_snat=True, snat_bound_fip=True) router_info['gw_port_host'] = 'some-other-host' self._test_get_centralized_fip_cidr_set(router_info, True) def test_get_centralized_fip_cidr_set_no_ex_gw_port(self): self.agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router_info = self.generate_dvr_router_info( enable_floating_ip=True, enable_centralized_fip=True, enable_snat=True, snat_bound_fip=True) router_info['gw_port'] = {} self._test_get_centralized_fip_cidr_set(router_info, True) def test_floating_ip_not_deployed_on_dvr_no_external_agent(self): """Test to check floating ips not configured for dvr_no_external.""" self.agent.conf.agent_mode = ( lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL) router_info = self.generate_dvr_router_info( enable_floating_ip=True, enable_centralized_fip=True) router1 = self.manage_router(self.agent, router_info) centralized_floatingips = router_info[lib_constants.FLOATINGIP_KEY] # For private ports hosted in dvr_no_fip agent, the floatingip # dict will contain the fip['host'] key, but the value will always # be None to emulate the legacy router. self.assertIsNone(centralized_floatingips[0]['host']) self.assertTrue(self._namespace_exists(router1.ns_name)) fip_ns = router1.fip_ns.get_name() self.assertFalse(self._namespace_exists(fip_ns)) # If fips are centralized then, the DNAT rules are only # configured in the SNAT Namespace and not in the router-ns. for fip in centralized_floatingips: expected_rules = router1.floating_forward_rules(fip) self.assertEqual(0, len(expected_rules)) def test_floating_ip_create_does_not_raise_keyerror_on_missing_host(self): """Test to check floating ips configure does not raise Keyerror.""" self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info( enable_floating_ip=True) del router_info[lib_constants.FLOATINGIP_KEY][0]['host'] centralized_floatingips = router_info[lib_constants.FLOATINGIP_KEY][0] self.assertIsNone(centralized_floatingips.get('host')) # No Keyerror should be raised when calling manage_router self.manage_router(self.agent, router_info) def test_dvr_router_snat_namespace_with_interface_remove(self): """Test to validate the snat namespace with interface remove. This test validates the snat namespace for all the external and internal devices. It also validates if the internal device corresponding to the router interface is removed when the router interface is deleted. """ self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info() snat_internal_port = router_info[lib_constants.SNAT_ROUTER_INTF_KEY] router1 = self.manage_router(self.agent, router_info) csnat_internal_port = ( router1.router[lib_constants.SNAT_ROUTER_INTF_KEY]) # Now save the internal device name to verify later internal_device_name = router1._get_snat_int_device_name( csnat_internal_port[0]['id']) self._assert_snat_namespace_exists(router1) qg_device, sg_device = self._get_dvr_snat_namespace_device_status( router1, internal_dev_name=internal_device_name) self.assertTrue(qg_device) self.assertTrue(sg_device) self.assertEqual(router1.snat_ports, snat_internal_port) # Now let us not pass INTERFACE_KEY, to emulate # the interface has been removed. router1.router[lib_constants.INTERFACE_KEY] = [] # Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate # that the server did not send it, since the interface has been # removed. router1.router[lib_constants.SNAT_ROUTER_INTF_KEY] = [] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router_info['id']] self._assert_snat_namespace_exists(router_updated) qg_device, sg_device = self._get_dvr_snat_namespace_device_status( router_updated, internal_dev_name=internal_device_name) self.assertFalse(sg_device) self.assertTrue(qg_device) def _mocked_dvr_ha_router(self, agent, enable_ha=True, enable_gw=True, enable_centralized_fip=False, snat_bound_fip=False, vrrp_id=None, **kwargs): r_info = self.generate_dvr_router_info( enable_ha=enable_ha, enable_snat=True, agent=agent, enable_gw=enable_gw, enable_centralized_fip=enable_centralized_fip, snat_bound_fip=snat_bound_fip, vrrp_id=vrrp_id, **kwargs) r_snat_ns_name = namespaces.build_ns_name(dvr_snat_ns.SNAT_NS_PREFIX, r_info['id']) mocked_r_snat_ns_name = r_snat_ns_name + '@' + agent.host r_ns_name = namespaces.build_ns_name(namespaces.NS_PREFIX, r_info['id']) mocked_r_ns_name = r_ns_name + '@' + agent.host return r_info, mocked_r_ns_name, mocked_r_snat_ns_name def _setup_dvr_ha_agents(self): self.agent.conf.agent_mode = 'dvr_snat' conf = self._configure_agent('agent2') self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport( 'agent2', conf) self.failover_agent.conf.agent_mode = 'dvr_snat' def _setup_dvr_ha_bridges(self): br_int_1 = self._get_agent_ovs_integration_bridge(self.agent) br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent) veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports veth1.link.set_up() veth2.link.set_up() br_int_1.add_port(veth1.name) br_int_2.add_port(veth2.name) def _create_dvr_ha_router(self, agent, enable_gw=True, enable_centralized_fip=False, snat_bound_fip=False, ha_interface=True, vrrp_id=None, **kwargs): get_ns_name = mock.patch.object(namespaces.RouterNamespace, '_get_ns_name').start() get_snat_ns_name = mock.patch.object(dvr_snat_ns.SnatNamespace, 'get_snat_ns_name').start() (r_info, mocked_r_ns_name, mocked_r_snat_ns_name) = self._mocked_dvr_ha_router( agent, ha_interface, enable_gw, enable_centralized_fip, snat_bound_fip, vrrp_id=vrrp_id, **kwargs) if not ha_interface: r_info['ha'] = True get_ns_name.return_value = mocked_r_ns_name get_snat_ns_name.return_value = mocked_r_snat_ns_name router = self.manage_router(agent, r_info) return router def _assert_ip_addresses_in_dvr_ha_snat_namespace_with_fip(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return if router.is_router_master(): centralized_floatingips = ( router.router[lib_constants.FLOATINGIP_KEY]) for fip in centralized_floatingips: expected_rules = router.floating_forward_rules(fip) self.assertFalse(self._assert_iptables_rules_exist( router.snat_iptables_manager, 'nat', expected_rules)) snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) ex_gw_port_cidrs = utils.fixed_ip_cidrs(ex_gw_port["fixed_ips"]) snat_port_cidrs = utils.fixed_ip_cidrs(snat_port["fixed_ips"]) self._assert_ip_addresses_on_interface(namespace, ex_gw_port_name, ex_gw_port_cidrs) self._assert_ip_addresses_on_interface(namespace, snat_port_name, snat_port_cidrs) def _assert_no_ip_addresses_in_dvr_ha_snat_namespace_with_fip(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) self._assert_no_ip_addresses_on_interface(namespace, snat_port_name) self._assert_no_ip_addresses_on_interface(namespace, ex_gw_port_name) def _assert_ip_addresses_in_dvr_ha_snat_namespace(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) ip = ex_gw_port["fixed_ips"][0]['ip_address'] prefix_len = ex_gw_port["fixed_ips"][0]['prefixlen'] ex_gw_port_cidr = ip + "/" + str(prefix_len) ip = snat_port["fixed_ips"][0]['ip_address'] prefix_len = snat_port["fixed_ips"][0]['prefixlen'] snat_port_cidr = ip + "/" + str(prefix_len) self._assert_ip_address_on_interface(namespace, ex_gw_port_name, ex_gw_port_cidr) self._assert_ip_address_on_interface(namespace, snat_port_name, snat_port_cidr) def _assert_no_ip_addresses_in_dvr_ha_snat_namespace(self, router): namespace = router.ha_namespace ex_gw_port = router.get_ex_gw_port() snat_ports = router.get_snat_interfaces() if not snat_ports: return snat_port = snat_ports[0] ex_gw_port_name = router.get_external_device_name( ex_gw_port['id']) snat_port_name = router._get_snat_int_device_name( snat_port['id']) self._assert_no_ip_addresses_on_interface(namespace, snat_port_name) self._assert_no_ip_addresses_on_interface(namespace, ex_gw_port_name) @mock.patch.object(dvr_local_router.DvrLocalRouter, 'connect_rtr_2_fip') @mock.patch.object( dvr_ha_router.DvrEdgeHaRouter, 'get_centralized_fip_cidr_set') def test_dvr_ha_router_with_centralized_fip_calls_keepalived_cidr( self, connect_rtr_2_fip_mock, fip_cidr_centralized_mock): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router( self.agent, enable_gw=True, enable_centralized_fip=True, snat_bound_fip=True) self.assertTrue(fip_cidr_centralized_mock.called) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) self.assertTrue(fip_cidr_centralized_mock.called) @mock.patch.object(dvr_local_router.DvrLocalRouter, 'connect_rtr_2_fip') @mock.patch.object( dvr_edge_router.DvrEdgeRouter, 'get_centralized_fip_cidr_set') def test_dvr_router_with_centralized_fip_calls_keepalived_cidr( self, connect_rtr_2_fip_mock, fip_cidr_centralized_mock): router_info = self.generate_dvr_router_info( enable_gw=True, enable_centralized_fip=True, snat_bound_fip=True) router1 = self.manage_router(self.agent, router_info) self.assertTrue(fip_cidr_centralized_mock.called) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) self.assertTrue(fip_cidr_centralized_mock.called) def test_dvr_ha_router_unbound_from_agents(self): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router( self.agent, enable_gw=True, vrrp_id=14, ha_port_ip="169.254.192.106", ha_port_mac="12:34:56:78:3a:aa") router2 = self._create_dvr_ha_router( self.failover_agent, enable_gw=True, vrrp_id=14, ha_port_ip="169.254.192.107", ha_port_mac="12:34:56:78:3a:bb") master, backup = self._get_master_and_slave_routers( router1, router2, check_external_device=False) self._assert_ip_addresses_in_dvr_ha_snat_namespace(master) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(backup) master_ha_device = master.get_ha_device_name() backup_ha_device = backup.get_ha_device_name() self.assertTrue( ip_lib.device_exists(master_ha_device, master.ha_namespace)) self.assertTrue( ip_lib.device_exists(backup_ha_device, backup.ha_namespace)) new_master_router = copy.deepcopy(master.router) new_master_router['_ha_interface'] = None self.agent._process_updated_router(new_master_router) router_updated = self.agent.router_info[master.router_id] self.assertTrue(self._namespace_exists(router_updated.ns_name)) self._assert_snat_namespace_exists(router_updated) snat_namespace_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router_updated.router_id) self.assertFalse( ip_lib.device_exists(master_ha_device, snat_namespace_name)) utils.wait_until_true(lambda: backup.ha_state == 'master') self._assert_ip_addresses_in_dvr_ha_snat_namespace(backup) self.assertTrue( ip_lib.device_exists(backup_ha_device, backup.ha_namespace)) def _test_dvr_ha_router_failover_with_gw_and_fip(self, enable_gw, enable_centralized_fip, snat_bound_fip, vrrp_id=None): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router( self.agent, enable_gw=enable_gw, enable_centralized_fip=enable_centralized_fip, snat_bound_fip=snat_bound_fip, vrrp_id=vrrp_id, ha_port_ip="169.254.192.100", ha_port_mac="12:34:56:78:2b:aa") router2 = self._create_dvr_ha_router( self.failover_agent, enable_gw=enable_gw, enable_centralized_fip=enable_centralized_fip, snat_bound_fip=snat_bound_fip, vrrp_id=vrrp_id, ha_port_ip="169.254.192.101", ha_port_mac="12:34:56:78:2b:bb") master, backup = self._get_master_and_slave_routers( router1, router2, check_external_device=False) self._assert_ip_addresses_in_dvr_ha_snat_namespace_with_fip(master) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace_with_fip(backup) self.fail_ha_router(master) utils.wait_until_true(lambda: backup.ha_state == 'master') utils.wait_until_true(lambda: master.ha_state == 'backup') self._assert_ip_addresses_in_dvr_ha_snat_namespace_with_fip(backup) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace_with_fip(master) def _test_dvr_ha_router_failover(self, enable_gw, vrrp_id=None): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router( self.agent, enable_gw=enable_gw, vrrp_id=vrrp_id, ha_port_ip="169.254.192.102", ha_port_mac="12:34:56:78:2b:cc") router2 = self._create_dvr_ha_router( self.failover_agent, enable_gw, vrrp_id=vrrp_id, ha_port_ip="169.254.192.103", ha_port_mac="12:34:56:78:2b:dd") master, backup = self._get_master_and_slave_routers( router1, router2, check_external_device=False) self._assert_ip_addresses_in_dvr_ha_snat_namespace(master) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(backup) self.fail_ha_router(master) utils.wait_until_true(lambda: backup.ha_state == 'master') utils.wait_until_true(lambda: master.ha_state == 'backup') self._assert_ip_addresses_in_dvr_ha_snat_namespace(backup) self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(master) def test_dvr_ha_router_failover_with_gw(self): self._test_dvr_ha_router_failover(enable_gw=True, vrrp_id=10) def test_dvr_ha_router_failover_with_gw_and_floatingip(self): self._test_dvr_ha_router_failover_with_gw_and_fip( enable_gw=True, enable_centralized_fip=True, snat_bound_fip=True, vrrp_id=11) def test_dvr_ha_router_failover_without_gw(self): self._test_dvr_ha_router_failover(enable_gw=False, vrrp_id=12) def test_dvr_non_ha_router_update(self): self._setup_dvr_ha_agents() self._setup_dvr_ha_bridges() router1 = self._create_dvr_ha_router( self.agent, vrrp_id=13, ha_port_ip="169.254.192.104", ha_port_mac="12:34:56:78:2b:ee") router2 = self._create_dvr_ha_router( self.failover_agent, ha_interface=False, vrrp_id=13, ha_port_ip="169.254.192.105", ha_port_mac="12:34:56:78:2b:ff") r1_chsfr = mock.patch.object(self.agent, 'check_ha_state_for_router').start() r2_chsfr = mock.patch.object(self.failover_agent, 'check_ha_state_for_router').start() utils.wait_until_true(lambda: router1.ha_state == 'master') self.agent._process_updated_router(router1.router) self.assertTrue(r1_chsfr.called) self.failover_agent._process_updated_router(router2.router) self.assertFalse(r2_chsfr.called) def _setup_dvr_router_static_routes(self, router_namespace=True, check_fpr_int_rule_delete=False, enable_ha=False): """Test to validate the extra routes on dvr routers.""" self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info( enable_snat=True, enable_ha=enable_ha) router1 = self.manage_router(self.agent, router_info) self.assertTrue(self._namespace_exists(router1.ns_name)) self._assert_snat_namespace_exists(router1) fip_ns_name = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns_name)) snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( router1.router_id) if router_namespace: router1.router['routes'] = [{'destination': '8.8.4.0/24', 'nexthop': '35.4.0.20'}] else: router1.router['routes'] = [{'destination': '8.8.4.0/24', 'nexthop': '19.4.4.10'}] self.agent._process_updated_router(router1.router) router_updated = self.agent.router_info[router_info['id']] if router_namespace: self._assert_extra_routes(router_updated) if not enable_ha: self._assert_extra_routes(router_updated, namespace=snat_ns_name) else: rtr_2_fip, fip_2_rtr = router_updated.rtr_fip_subnet.get_pair() # Now get the table index based on the fpr-interface ip. router_fip_table_idx = router_updated._get_snat_idx(fip_2_rtr) self._assert_extra_routes_for_fipns( router_updated, router_fip_table_idx) self._assert_extra_routes(router_updated, namespace=snat_ns_name) if check_fpr_int_rule_delete: router_updated.router[lib_constants.FLOATINGIP_KEY] = [] router_updated.router['gw_port'] = "" router_updated.router['gw_port_host'] = "" router_updated.router['external_gateway_info'] = "" self.agent._process_updated_router(router_updated.router) new_router_info = self.agent.router_info[router_updated.router_id] self.assertTrue(self._namespace_exists(fip_ns_name)) self._assert_extra_routes_for_fipns( new_router_info, router_fip_table_idx, check_fpr_int_rule_delete=check_fpr_int_rule_delete) def _assert_extra_routes_for_fipns(self, router, router_fip_table_idx, check_fpr_int_rule_delete=False): fip_ns_name = router.fip_ns.get_name() self.assertTrue(self._namespace_exists(fip_ns_name)) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fip_ns_int_name = router.fip_ns.get_int_device_name(router.router_id) fg_device = ip_lib.IPDevice(fg_port_name, namespace=fip_ns_name) if not check_fpr_int_rule_delete: self.assertIn('via', fg_device.route.get_gateway( table=router_fip_table_idx)) else: self.assertIsNone(fg_device.route.get_gateway( table=router_fip_table_idx)) ext_net_fw_rules_list = ip_lib.list_ip_rules( fip_ns_name, lib_constants.IP_VERSION_4) if not check_fpr_int_rule_delete: # When floatingip are associated, make sure that the # corresponding rules and routes in route table are created # for the router. expected_rule = {u'from': '0.0.0.0/0', u'iif': fip_ns_int_name, 'priority': str(router_fip_table_idx), 'table': str(router_fip_table_idx), 'type': 'unicast'} for rule in ext_net_fw_rules_list: rule_tbl = rule['table'] if rule_tbl in ['default', 'local', 'main']: continue if rule_tbl == str(router_fip_table_idx): self.assertEqual(expected_rule, rule) # Now check the routes in the table. destination = router.router['routes'][0]['destination'] next_hop = router.router['routes'][0]['nexthop'] actual_routes = fg_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table=router_fip_table_idx, via=str(next_hop)) expected_extra_route = [{'cidr': six.u(destination), 'device': fg_port_name, 'table': router_fip_table_idx, 'via': next_hop}] self._check_routes(expected_extra_route, actual_routes) else: # When floatingip are deleted or disassociated, make sure that the # corresponding rules and routes are cleared from the table # corresponding to the router. self.assertEqual(3, len(ext_net_fw_rules_list)) rule_exist = False for rule in ext_net_fw_rules_list: rule_tbl = rule['table'] if rule_tbl not in ['default', 'local', 'main']: rule_exist = True self.assertFalse(rule_exist) tbl_routes = fg_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table=router_fip_table_idx) self.assertEqual([], tbl_routes) def test_dvr_router_static_routes_in_fip_and_snat_namespace(self): self._setup_dvr_router_static_routes(router_namespace=False) def test_dvr_router_static_routes_in_snat_namespace_and_router_namespace( self): self._setup_dvr_router_static_routes() def test_dvr_ha_rtr_static_routes_in_rtr_namespace(self): self._setup_dvr_router_static_routes(enable_ha=True) def test_dvr_router_rule_and_route_table_cleared_when_fip_removed(self): self._setup_dvr_router_static_routes( router_namespace=False, check_fpr_int_rule_delete=True) def _assert_fip_namespace_interface_static_routes(self, address_scopes, fpr_device, router_info, rtr_2_fip, fpr_device_name): fixed_ips_1 = router_info[lib_constants.INTERFACE_KEY][0]['fixed_ips'] fixed_ips_2 = router_info[lib_constants.INTERFACE_KEY][1]['fixed_ips'] actual_routes = fpr_device.route.list_routes( ip_version=lib_constants.IP_VERSION_4, table='main', via=str(rtr_2_fip.ip)) if not address_scopes: self.assertEqual([], actual_routes) if address_scopes: cidr1 = ( str(fixed_ips_1[0]['ip_address']) + '/' + str(fixed_ips_1[0]['prefixlen'])) cidr2 = ( str(fixed_ips_2[0]['ip_address']) + '/' + str(fixed_ips_2[0]['prefixlen'])) net_addr_1 = netaddr.IPNetwork(cidr1).network net_addr_2 = netaddr.IPNetwork(cidr2).network route_cidr_1 = ( str(net_addr_1) + '/' + str(fixed_ips_1[0]['prefixlen'])) route_cidr_2 = ( str(net_addr_2) + '/' + str(fixed_ips_2[0]['prefixlen'])) expected_routes = [{'device': fpr_device_name, 'cidr': six.u(route_cidr_1), 'via': str(rtr_2_fip.ip), 'table': 'main'}, {'device': fpr_device_name, 'cidr': six.u(route_cidr_2), 'via': str(rtr_2_fip.ip), 'table': 'main'}] # Comparing the static routes for both internal interfaces on the # main table. self._check_routes(expected_routes, actual_routes) else: self.assertEqual([], actual_routes) def _assert_interface_rules_on_gateway_remove(self, router, agent, address_scopes, agent_gw_port, rfp_device, fpr_device, no_external=False): router.router[lib_constants.SNAT_ROUTER_INTF_KEY] = [] router.router['gw_port'] = "" router.router['gw_port_host'] = "" self.agent._process_updated_router(router.router) router_updated = self.agent.router_info[router.router['id']] self.assertTrue(self._namespace_exists(router_updated.ns_name)) if not no_external: self.assertFalse(rfp_device.exists()) self.assertFalse(fpr_device.exists()) self._assert_fip_namespace_deleted( agent_gw_port, assert_ovs_interface=False) if not address_scopes or no_external: ip4_rules_list = ip_lib.list_ip_rules(router_updated.ns_name, lib_constants.IP_VERSION_4) ip6_rules_list = ip_lib.list_ip_rules(router_updated.ns_name, lib_constants.IP_VERSION_6) self.assertEqual(3, len(ip4_rules_list)) self.assertEqual(2, len(ip6_rules_list)) def _setup_dvr_router_for_fast_path_exit(self, address_scopes=True): """Test to validate the fip and router namespace routes. This test validates the fip and router namespace routes that are based on the address scopes. If the address scopes of internal network and external network matches, the traffic will be forwarded to the fip namespace and the reverse traffic to the private network is forwarded to the router namespace. """ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info( enable_snat=True, enable_gw=True, enable_floating_ip=True) router_info[lib_constants.FLOATINGIP_KEY] = [] if address_scopes: address_scope1 = { str(lib_constants.IP_VERSION_4): 'scope1'} address_scope2 = { str(lib_constants.IP_VERSION_4): 'scope1'} else: address_scope1 = { str(lib_constants.IP_VERSION_4): 'scope2'} address_scope2 = { str(lib_constants.IP_VERSION_4): 'scope2'} router_info['gw_port']['address_scopes'] = { str(lib_constants.IP_VERSION_4): 'scope1'} router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) # should have the same address_scopes as gw_port fip_agent_gw_ports = router_info[ lib_constants.FLOATINGIP_AGENT_INTF_KEY] fip_agent_gw_ports[0]['address_scopes'] = ( router_info['gw_port']['address_scopes']) self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_ports[0]) router1 = self.manage_router(self.agent, router_info) fip_ns_name = router1.fip_ns.get_name() self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertTrue(self._namespace_exists(fip_ns_name)) # Check the router namespace for default route. rfp_device_name = router1.fip_ns.get_rtr_ext_device_name( router1.router_id) rfp_device = ip_lib.IPDevice(rfp_device_name, namespace=router1.ns_name) fpr_device_name = router1.fip_ns.get_int_device_name(router1.router_id) fpr_device = ip_lib.IPDevice(fpr_device_name, namespace=fip_ns_name) rtr_2_fip, fip_2_rtr = router1.rtr_fip_subnet.get_pair() self._assert_default_gateway( fip_2_rtr, rfp_device, rfp_device_name) # Check if any snat redirect rules in the router namespace exist. ip4_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_4) ip6_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_6) # Just make sure the basic set of rules are there in the router # namespace self.assertEqual(5, len(ip4_rules_list)) self.assertEqual(2, len(ip6_rules_list)) # Now check the fip namespace static routes for reaching the private # network. self._assert_fip_namespace_interface_static_routes( address_scopes, fpr_device, router_info, rtr_2_fip, fpr_device_name) # Now remove the gateway and validate if the respective interface # routes in router namespace is deleted respectively. self. _assert_interface_rules_on_gateway_remove( router1, self.agent, address_scopes, fip_agent_gw_ports[0], rfp_device, fpr_device) def test_dvr_fip_and_router_namespace_rules_with_address_scopes_match( self): self._setup_dvr_router_for_fast_path_exit(address_scopes=True) def test_dvr_fip_and_router_namespace_rules_with_address_scopes_mismatch( self): self._setup_dvr_router_for_fast_path_exit(address_scopes=False) @mock.patch.object(dvr_local_router.DvrLocalRouter, '_add_interface_routing_rule_to_router_ns') @mock.patch.object(dvr_local_router.DvrLocalRouter, '_add_interface_route_to_fip_ns') def test_dvr_no_external_router_namespace_rules_with_address_scopes_match( self, mock_add_interface_route_rule, mock_add_fip_interface_route_rule): """Test to validate the router namespace routes. This test validates the router namespace routes that are based on the address scopes. If the address scopes of internal network and external network matches, the traffic will be forwarded to SNAT namespace for agents that don't have external connectivity or configured as DVR_NO_EXTERNAL. """ self.agent.conf.agent_mode = ( lib_constants.L3_AGENT_MODE_DVR_NO_EXTERNAL) router_info = self.generate_dvr_router_info( enable_snat=True, enable_gw=True, enable_floating_ip=True) router_info[lib_constants.FLOATINGIP_KEY] = [] address_scope1 = { str(lib_constants.IP_VERSION_4): 'scope1'} address_scope2 = { str(lib_constants.IP_VERSION_4): 'scope1'} router_info['gw_port']['address_scopes'] = { str(lib_constants.IP_VERSION_4): 'scope1'} router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) router1 = self.manage_router(self.agent, router_info) self.assertTrue(self._namespace_exists(router1.ns_name)) self.assertFalse(mock_add_interface_route_rule.called) self.assertFalse(mock_add_fip_interface_route_rule.called) # Check if any snat redirect rules in the router namespace exist. ip4_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_4) ip6_rules_list = ip_lib.list_ip_rules(router1.ns_name, lib_constants.IP_VERSION_6) # Just make sure the basic set of rules are there in the router # namespace self.assertEqual(5, len(ip4_rules_list)) self.assertEqual(2, len(ip6_rules_list)) # Now remove the gateway and validate if the respective interface # routes in router namespace is deleted respectively. self. _assert_interface_rules_on_gateway_remove( router1, self.agent, True, mock.ANY, mock.ANY, mock.ANY, True) def test_dvr_router_gateway_update_to_none(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info(enable_snat=True) router = self.manage_router(self.agent, router_info) gw_port = router.get_ex_gw_port() ex_gw_port_name = router.get_external_device_name(gw_port['id']) ex_gw_device = ip_lib.IPDevice(ex_gw_port_name, namespace=router.snat_namespace.name) fg_port = router.fip_ns.agent_gateway_port fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) fg_device = ip_lib.IPDevice(fg_port_name, namespace=router.fip_ns.name) rtr_2_fip, fip_2_rtr = router.rtr_fip_subnet.get_pair() tbl_index = router._get_snat_idx(fip_2_rtr) self.assertIn('via', ex_gw_device.route.get_gateway()) self.assertIn('via', fg_device.route.get_gateway(table=tbl_index)) # Make this copy to make agent think gw_port changed. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) for subnet in gw_port['subnets']: subnet['gateway_ip'] = None new_fg_port = copy.deepcopy(fg_port) for subnet in new_fg_port['subnets']: subnet['gateway_ip'] = None router.router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] router.process() self.assertIsNone(ex_gw_device.route.get_gateway()) self.assertIsNone(fg_device.route.get_gateway()) def _assert_fip_namespace_deleted(self, ext_gateway_port, assert_ovs_interface=True): ext_net_id = ext_gateway_port['network_id'] fip_ns = self.agent.get_fip_ns(ext_net_id) fip_ns.unsubscribe = mock.Mock() self.agent.fipnamespace_delete_on_ext_net( self.agent.context, ext_net_id) if assert_ovs_interface: self._assert_interfaces_deleted_from_ovs() fip_ns_name = fip_ns.get_name() self.assertFalse(self._namespace_exists(fip_ns_name)) self.assertTrue(fip_ns.destroyed) self.assertTrue(fip_ns.unsubscribe.called) def _setup_address_scope(self, internal_address_scope1, internal_address_scope2, gw_address_scope=None): router_info = self.generate_dvr_router_info(enable_snat=True) address_scope1 = { str(lib_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { str(lib_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { str(lib_constants.IP_VERSION_4): gw_address_scope} fip_agent_gw_ports = router_info[ lib_constants.FLOATINGIP_AGENT_INTF_KEY] fip_agent_gw_ports[0]['address_scopes'] = router_info['gw_port'][ 'address_scopes'] router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) # Renew the address scope router_info[lib_constants.SNAT_ROUTER_INTF_KEY] = [] self._add_snat_port_info_to_router( router_info, router_info[lib_constants.INTERFACE_KEY]) router = self.manage_router(self.agent, router_info) router_ip_cidr1 = self._port_first_ip_cidr(router.internal_ports[0]) router_ip1 = router_ip_cidr1.partition('/')[0] router_ip_cidr2 = self._port_first_ip_cidr(router.internal_ports[1]) router_ip2 = router_ip_cidr2.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) test_machine1 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr1, 10), router_ip1)) test_machine2 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr2, 10), router_ip2)) return test_machine1, test_machine2, router def test_connection_from_same_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip) net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip) def test_connection_from_diff_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope2') # Internal networks that are not in the same address scope should # not reach each other test_machine1.assert_no_ping(test_machine2.ip) test_machine2.assert_no_ping(test_machine1.ip) @testtools.skip('bug/1543885') def test_fip_connection_for_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[lib_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, host=self.agent.conf.host, fixed_ip_address_scope='scope1') fip_diff_scope = '19.4.4.11' self._add_fip(router, fip_diff_scope, fixed_address=machine_diff_scope.ip, host=self.agent.conf.host, fixed_ip_address_scope='scope2') router.process() br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_int, '19.4.4.12/24')) # Floating ip should work no matter of address scope net_helpers.assert_ping(src_machine.namespace, fip_same_scope) net_helpers.assert_ping(src_machine.namespace, fip_diff_scope) def test_direct_route_for_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') gw_port = router.get_ex_gw_port() gw_ip = self._port_first_ip_cidr(gw_port).partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_int, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network net_helpers.assert_ping(src_machine.namespace, machine_same_scope.ip) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. src_machine.assert_no_ping(machine_diff_scope.ip) def test_dvr_snat_namespace_has_ip_nonlocal_bind_disabled(self): self.agent.conf.agent_mode = 'dvr_snat' router_info = self.generate_dvr_router_info( enable_ha=True, enable_snat=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.snat_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) def test_dvr_router_fip_namespace_routes(self): """Test to validate the floatingip namespace subnets routes.""" self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info(enable_floating_ip=False) fip_agent_gw_port = self._get_fip_agent_gw_port_for_router( router_info['gw_port']) self.mock_plugin_api.get_agent_gateway_port.return_value = ( fip_agent_gw_port) router1 = self.manage_router(self.agent, router_info) fip_namespace = router1.fip_ns.get_name() ip_wrapper = ip_lib.IPWrapper(namespace=fip_namespace) interfaces = ip_wrapper.get_devices() fg_interface_name = next( interface.name for interface in interfaces if interface.name.startswith(dvr_fip_ns.FIP_EXT_DEV_PREFIX)) ip_device = ip_lib.IPDevice(fg_interface_name, namespace=fip_namespace) routes = ip_device.route.list_onlink_routes(lib_constants.IP_VERSION_4) self.assertGreater(len(routes), 0) self.assertEqual(len(fip_agent_gw_port['extra_subnets']), len(routes)) extra_subnet_cidr = set(extra_subnet['cidr'] for extra_subnet in fip_agent_gw_port['extra_subnets']) routes_cidr = set(route['cidr'] for route in routes) self.assertEqual(extra_subnet_cidr, routes_cidr) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_ha_router.py0000644000175000017500000005510100000000000027160 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants from oslo_utils import netutils import testtools from neutron.agent.common import ovs_lib from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework class L3HATestCase(framework.L3AgentTestFramework): def test_ha_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=True)) def test_keepalived_state_change_notification(self): enqueue_mock = mock.patch.object( self.agent, 'enqueue_state_change', side_effect=self.change_router_state).start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'master') self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') common_utils.wait_until_true(lambda: (enqueue_mock.call_count == 3 or enqueue_mock.call_count == 4)) calls = [args[0] for args in enqueue_mock.call_args_list] self.assertEqual((router.router_id, 'backup'), calls[0]) self.assertEqual((router.router_id, 'master'), calls[1]) self.assertEqual((router.router_id, 'backup'), calls[-1]) def _expected_rpc_report(self, expected): calls = (args[0][1] for args in self.agent.plugin_rpc.update_ha_routers_states.call_args_list) # Get the last state reported for each router actual_router_states = {} for call in calls: for router_id, state in call.items(): actual_router_states[router_id] = state return actual_router_states == expected def test_keepalived_state_change_bulk_rpc(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self.fail_ha_router(router1) router_info = self.generate_router_info(enable_ha=True) router2 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'backup') common_utils.wait_until_true(lambda: router2.ha_state == 'master') common_utils.wait_until_true( lambda: self._expected_rpc_report( {router1.router_id: 'standby', router2.router_id: 'active'})) def test_ha_router_lifecycle(self): router_info = self._router_lifecycle(enable_ha=True) # ensure everything was cleaned up self._router_lifecycle(enable_ha=True, router_info=router_info) def test_conntrack_disassociate_fip_ha_router(self): self._test_conntrack_disassociate_fip(ha=True) def test_ipv6_ha_router_lifecycle(self): self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=True, ip_version=constants.IP_VERSION_6, v6_ext_gw_with_sub=False) def test_ipv6_ha_router_lifecycle_with_no_gw_subnet_for_router_advts(self): # Verify that router gw interface is configured to receive Router # Advts from upstream router when no external gateway is configured. self._router_lifecycle(enable_ha=True, dual_stack=True, v6_ext_gw_with_sub=False) def _test_ipv6_router_advts_and_fwd_helper(self, state, enable_v6_gw, expected_ra, expected_forwarding): # Schedule router to l3 agent, and then add router gateway. Verify # that router gw interface is configured to receive Router Advts and # IPv6 forwarding is enabled. router_info = l3_test_common.prepare_router_data( enable_snat=True, enable_ha=True, dual_stack=True, enable_gw=False) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'master') if state == 'backup': self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') _ext_dev_name, ex_port = l3_test_common.prepare_ext_gw_test( mock.Mock(), router, dual_stack=enable_v6_gw) router_info['gw_port'] = ex_port router.process() self._assert_ipv6_accept_ra(router, expected_ra) # As router is going first to master and than to backup mode, # ipv6_forwarding should be enabled on "all" interface always after # that transition self._assert_ipv6_forwarding(router, expected_forwarding, True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_master(self): # Check that RA and forwarding are enabled when there's no IPv6 # gateway. self._test_ipv6_router_advts_and_fwd_helper('master', enable_v6_gw=False, expected_ra=True, expected_forwarding=True) # Check that RA is disabled and forwarding is enabled when an IPv6 # gateway is configured. self._test_ipv6_router_advts_and_fwd_helper('master', enable_v6_gw=True, expected_ra=False, expected_forwarding=True) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ipv6_router_advts_and_fwd_after_router_state_change_backup(self): # Check that both RA and forwarding are disabled on backup instances self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=False, expected_ra=False, expected_forwarding=False) self._test_ipv6_router_advts_and_fwd_helper('backup', enable_v6_gw=True, expected_ra=False, expected_forwarding=False) def test_keepalived_configuration(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) expected = self.get_expected_keepalive_configuration(router) self.assertEqual(expected, router.keepalived_manager.get_conf_on_disk()) # Add a new FIP and change the GW IP address router.router = copy.deepcopy(router.router) existing_fip = '19.4.4.2' new_fip = '19.4.4.3' self._add_fip(router, new_fip) subnet_id = framework._uuid() fixed_ips = [{'ip_address': '19.4.4.10', 'prefixlen': 24, 'subnet_id': subnet_id}] subnets = [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.5'}] router.router['gw_port']['subnets'] = subnets router.router['gw_port']['fixed_ips'] = fixed_ips router.process() # Get the updated configuration and assert that both FIPs are in, # and that the GW IP address was updated. new_config = router.keepalived_manager.config.get_config_str() old_gw = '0.0.0.0/0 via 19.4.4.1' new_gw = '0.0.0.0/0 via 19.4.4.5' old_external_device_ip = '19.4.4.4' new_external_device_ip = '19.4.4.10' self.assertIn(existing_fip, new_config) self.assertIn(new_fip, new_config) self.assertNotIn(old_gw, new_config) self.assertIn(new_gw, new_config) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) self.assertNotIn('%s/24 dev %s' % (old_external_device_ip, external_device_name), new_config) self.assertIn('%s/24 dev %s' % (new_external_device_ip, external_device_name), new_config) def test_ha_router_conf_on_restarted_agent(self): router_info = self.generate_router_info(enable_ha=True) router1 = self.manage_router(self.agent, router_info) self._add_fip(router1, '192.168.111.12') restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) common_utils.wait_until_true( lambda: self.floating_ips_configured(router1)) self.assertIn( router1._get_primary_vip(), self._get_addresses_on_device( router1.ns_name, router1.get_ha_device_name())) def test_ha_router_ipv6_radvd_status(self): router_info = self.generate_router_info( ip_version=constants.IP_VERSION_6, enable_ha=True) router1 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'master') common_utils.wait_until_true(lambda: router1.radvd.enabled) def _check_lla_status(router, expected): internal_devices = router.router[constants.INTERFACE_KEY] for device in internal_devices: lladdr = ip_lib.get_ipv6_lladdr(device['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( router.get_internal_device_name(device['id']), [lladdr], device['mac_address'], router.ns_name) self.assertEqual(expected, exists) _check_lla_status(router1, True) device_name = router1.get_ha_device_name() ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name) ha_device.link.set_down() common_utils.wait_until_true(lambda: router1.ha_state == 'backup') common_utils.wait_until_true( lambda: not router1.radvd.enabled, timeout=10) _check_lla_status(router1, False) def test_ha_router_process_ipv6_subnets_to_existing_port(self): router_info = self.generate_router_info(enable_ha=True, ip_version=constants.IP_VERSION_6) router = self.manage_router(self.agent, router_info) def verify_ip_in_keepalived_config(router, iface): config = router.keepalived_manager.config.get_config_str() ip_cidrs = common_utils.fixed_ip_cidrs(iface['fixed_ips']) for ip_addr in ip_cidrs: self.assertIn(ip_addr, config) interface_id = router.router[constants.INTERFACE_KEY][0]['id'] slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} # Add a second IPv6 subnet to the router internal interface. self._add_internal_interface_by_subnet(router.router, count=1, ip_version=constants.IP_VERSION_6, ipv6_subnet_modes=[slaac_mode], interface_id=interface_id) router.process() common_utils.wait_until_true(lambda: router.ha_state == 'master') # Verify that router internal interface is present and is configured # with IP address from both the subnets. internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(2, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) # Remove one subnet from the router internal iface interfaces = copy.deepcopy(router.router.get( constants.INTERFACE_KEY, [])) fixed_ips, subnets = [], [] fixed_ips.append(interfaces[0]['fixed_ips'][0]) subnets.append(interfaces[0]['subnets'][0]) interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets}) router.router[constants.INTERFACE_KEY] = interfaces router.process() # Verify that router internal interface has a single ipaddress internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(1, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) # Verify that keepalived config is properly updated. verify_ip_in_keepalived_config(router, internal_iface) def test_delete_external_gateway_on_standby_router(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) self.fail_ha_router(router) common_utils.wait_until_true(lambda: router.ha_state == 'backup') # The purpose of the test is to simply make sure no exception is raised port = router.get_ex_gw_port() interface_name = router.get_external_device_name(port['id']) router.external_gateway_removed(port, interface_name) def test_removing_floatingip_immediately(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_interface_name(ex_gw_port) common_utils.wait_until_true(lambda: router.ha_state == 'master') self._add_fip(router, '172.168.1.20', fixed_address='10.0.0.3') router.process() router.router[constants.FLOATINGIP_KEY] = [] # The purpose of the test is to simply make sure no exception is raised # Because router.process will consume the FloatingIpSetupException, # call the configure_fip_addresses directly here router.configure_fip_addresses(interface_name) def test_ha_port_status_update(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router1 = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router1.ha_state == 'backup') router1.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router1.router) common_utils.wait_until_true(lambda: router1.ha_state == 'master') def test_ha_router_namespace_has_ip_nonlocal_bind_disabled(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) try: ip_nonlocal_bind_value = ip_lib.get_ip_nonlocal_bind( router.router_namespace.name) except RuntimeError as rte: stat_message = 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind' if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network namespaces." % ( ip_lib.IP_NONLOCAL_BIND)) raise self.assertEqual(0, ip_nonlocal_bind_value) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_namespace_has_ipv6_forwarding_disabled(self): router_info = self.generate_router_info(enable_ha=True) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) external_port = router.get_ex_gw_port() external_device_name = router.get_external_device_name( external_port['id']) common_utils.wait_until_true(lambda: router.ha_state == 'backup') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) common_utils.wait_until_true(lambda: router.ha_state == 'master') self._wait_until_ipv6_forwarding_has_state( router.ns_name, external_device_name, 1) @testtools.skipUnless(netutils.is_ipv6_enabled(), "IPv6 is not enabled") def test_ha_router_without_gw_ipv6_forwarding_state(self): router_info = self.generate_router_info( enable_ha=True, enable_gw=False) router_info[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_DOWN) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'backup') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 0) router.router[constants.HA_INTERFACE_KEY]['status'] = ( constants.PORT_STATUS_ACTIVE) self.agent._process_updated_router(router.router) common_utils.wait_until_true(lambda: router.ha_state == 'master') self._wait_until_ipv6_forwarding_has_state(router.ns_name, 'all', 1) class L3HATestFailover(framework.L3AgentTestFramework): def setUp(self): super(L3HATestFailover, self).setUp() conf = self._configure_agent('agent2') self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport( 'agent2', conf) br_int_1 = self._get_agent_ovs_integration_bridge(self.agent) br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent) veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports veth1.link.set_up() veth2.link.set_up() br_int_1.add_port(veth1.name) br_int_2.add_port(veth2.name) @staticmethod def fail_gw_router_port(router): # NOTE(slaweq): in HA failover tests there are two integration bridges # connected with veth pair to each other. To stop traffic from router's # namespace to gw ip (19.4.4.1) it needs to be blocked by openflow rule # as simple setting ovs_integration_bridge device DOWN will not be # enough because same IP address is also configured on # ovs_integration_bridge device from second router and it will still # respond to ping r_br = ovs_lib.OVSBridge(router.driver.conf.OVS.integration_bridge) external_port = router.get_ex_gw_port() for subnet in external_port['subnets']: r_br.add_flow( proto='ip', nw_dst=subnet['gateway_ip'], actions='drop') @staticmethod def restore_gw_router_port(router): r_br = ovs_lib.OVSBridge(router.driver.conf.OVS.integration_bridge) external_port = router.get_ex_gw_port() for subnet in external_port['subnets']: r_br.delete_flows(proto='ip', nw_dst=subnet['gateway_ip']) def test_ha_router_failover(self): router1, router2 = self.create_ha_routers() master_router, slave_router = self._get_master_and_slave_routers( router1, router2) self._assert_ipv6_accept_ra(master_router, True) self._assert_ipv6_forwarding(master_router, True, True) self._assert_ipv6_accept_ra(slave_router, False) self._assert_ipv6_forwarding(slave_router, False, False) self.fail_ha_router(router1) # NOTE: passing slave_router as first argument, because we expect # that this router should be the master new_master, new_slave = self._get_master_and_slave_routers( slave_router, master_router) self.assertEqual(master_router, new_slave) self.assertEqual(slave_router, new_master) self._assert_ipv6_accept_ra(new_master, True) self._assert_ipv6_forwarding(new_master, True, True) self._assert_ipv6_accept_ra(new_slave, False) # after transition from master -> slave, 'all' IPv6 forwarding should # be enabled self._assert_ipv6_forwarding(new_slave, False, True) def test_ha_router_lost_gw_connection(self): self.agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) self.failover_agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) router1, router2 = self.create_ha_routers() master_router, slave_router = self._get_master_and_slave_routers( router1, router2) self.fail_gw_router_port(master_router) # NOTE: passing slave_router as first argument, because we expect # that this router should be the master new_master, new_slave = self._get_master_and_slave_routers( slave_router, master_router) self.assertEqual(master_router, new_slave) self.assertEqual(slave_router, new_master) def test_both_ha_router_lost_gw_connection(self): self.agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) self.failover_agent.conf.set_override( 'ha_vrrp_health_check_interval', 5) router1, router2 = self.create_ha_routers() master_router, slave_router = self._get_master_and_slave_routers( router1, router2) self.fail_gw_router_port(master_router) self.fail_gw_router_port(slave_router) common_utils.wait_until_true( lambda: master_router.ha_state == 'master') common_utils.wait_until_true( lambda: slave_router.ha_state == 'master') self.restore_gw_router_port(master_router) new_master, new_slave = self._get_master_and_slave_routers( master_router, slave_router) self.assertEqual(master_router, new_master) self.assertEqual(slave_router, new_slave) class LinuxBridgeL3HATestCase(L3HATestCase): INTERFACE_DRIVER = 'neutron.agent.linux.interface.BridgeInterfaceDriver' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_keepalived_state_change.py0000644000175000017500000001745400000000000032017 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import eventlet import mock import netaddr from oslo_utils import uuidutils from neutron.agent.l3 import ha from neutron.agent.l3 import ha_router from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import utils as linux_utils from neutron.common import utils from neutron.tests.common import machine_fixtures as mf from neutron.tests.common import net_helpers from neutron.tests.functional import base def has_expected_arp_entry(device_name, namespace, ip, mac): ip_version = utils.get_ip_version(ip) entry = ip_lib.dump_neigh_entries(ip_version, device_name, namespace, dst=ip, lladdr=mac) return entry != [] class TestMonitorDaemon(base.BaseLoggingTestCase): def setUp(self): super(TestMonitorDaemon, self).setUp() self.conf_dir = self.get_default_temp_dir().path self.pid_file = os.path.join(self.conf_dir, 'pid_file') self.log_file = os.path.join(self.conf_dir, 'log_file') self.state_file = os.path.join(self.conf_dir, 'keepalived-state-change') self.cidr = '169.254.151.1/24' bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.machines = self.useFixture(mf.PeerMachines(bridge)) self.router, self.peer = self.machines.machines[:2] self.router_id = uuidutils.generate_uuid() self._generate_cmd_opts() self.ext_process = external_process.ProcessManager( None, '%s.monitor' % self.pid_file, None, service='test_ip_mon', pids_path=self.conf_dir, default_cmd_callback=self._callback, run_as_root=True) server = linux_utils.UnixDomainWSGIServer( 'neutron-keepalived-state-change', num_threads=1) server.start(ha.KeepalivedStateChangeHandler(mock.Mock()), self.state_file, workers=0, backlog=ha.KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG) self.addCleanup(server.stop) def _run_monitor(self): self.ext_process.enable() self.addCleanup(self.ext_process.disable) eventlet.sleep(5) def _callback(self, *args): return self.cmd_opts def _generate_cmd_opts(self, monitor_interface=None, cidr=None): monitor_interface = monitor_interface or self.router.port.name cidr = cidr or self.cidr self.cmd_opts = [ ha_router.STATE_CHANGE_PROC_NAME, '--router_id=%s' % self.router_id, '--namespace=%s' % self.router.namespace, '--conf_dir=%s' % self.conf_dir, '--log-file=%s' % self.log_file, '--monitor_interface=%s' % monitor_interface, '--monitor_cidr=%s' % cidr, '--pid_file=%s' % self.pid_file, '--state_path=%s' % self.conf_dir, '--user=%s' % os.geteuid(), '--group=%s' % os.getegid()] def _search_in_file(self, file_name, text): def text_in_file(): try: return text in open(file_name).read() except FileNotFoundError: return False try: utils.wait_until_true(text_in_file, timeout=15) except utils.WaitTimeout: devices = {} for dev in ip_lib.IPWrapper( namespace=self.router.namespace).get_devices(): devices[dev.name] = [addr['cidr'] for addr in dev.addr.list()] # NOTE: we need to read here the content of the file. self.fail( 'Text not found in file %(file_name)s: "%(text)s".\nDevice ' 'addresses: %(devices)s.\nFile content:\n%(file_content)s' % {'file_name': file_name, 'text': text, 'devices': devices, 'file_content': open(file_name).read()}) def test_new_fip_sends_garp(self): ns_ip_wrapper = ip_lib.IPWrapper(self.router.namespace) new_interface = ns_ip_wrapper.add_dummy('new_interface') new_interface_cidr = '169.254.152.1/24' new_interface.link.set_up() new_interface.addr.add(new_interface_cidr) self._generate_cmd_opts(monitor_interface='new_interface', cidr=new_interface_cidr) self._run_monitor() next_ip_cidr = net_helpers.increment_ip_cidr(self.machines.ip_cidr, 2) expected_ip = str(netaddr.IPNetwork(next_ip_cidr).ip) # Create incomplete ARP entry self.peer.assert_no_ping(expected_ip) # Wait for ping expiration eventlet.sleep(1) has_entry = has_expected_arp_entry( self.peer.port.name, self.peer.namespace, expected_ip, self.router.port.link.address) self.assertFalse(has_entry) self.router.port.addr.add(next_ip_cidr) has_arp_entry_predicate = functools.partial( has_expected_arp_entry, self.peer.port.name, self.peer.namespace, expected_ip, self.router.port.link.address, ) exc = RuntimeError( "No ARP entry in %s namespace containing IP address %s and MAC " "address %s" % ( self.peer.namespace, expected_ip, self.router.port.link.address)) utils.wait_until_true(has_arp_entry_predicate, timeout=15, exception=exc) msg = ('Sent GARP to %(cidr)s from %(device)s' % {'cidr': expected_ip, 'device': self.router.port.name}) self._search_in_file(self.log_file, msg) def test_read_queue_change_state(self): self._run_monitor() msg = 'Wrote router %s state %s' self.router.port.addr.add(self.cidr) self._search_in_file(self.log_file, msg % (self.router_id, 'master')) self.router.port.addr.delete(self.cidr) self._search_in_file(self.log_file, msg % (self.router_id, 'backup')) def test_read_queue_send_garp(self): self._run_monitor() dev_dummy = 'dev_dummy' ip_wrapper = ip_lib.IPWrapper(namespace=self.router.namespace) ip_wrapper.add_dummy(dev_dummy) ip_device = ip_lib.IPDevice(dev_dummy, namespace=self.router.namespace) ip_device.link.set_up() msg = 'Sent GARP to %(ip_address)s from %(device_name)s' for idx in range(2, 20): next_cidr = net_helpers.increment_ip_cidr(self.cidr, idx) ip_device.addr.add(next_cidr) msg_args = {'ip_address': str(netaddr.IPNetwork(next_cidr).ip), 'device_name': dev_dummy} self._search_in_file(self.log_file, msg % msg_args) ip_device.addr.delete(next_cidr) def test_handle_initial_state_backup(self): # No tracked IP (self.cidr) is configured in the monitored interface # (self.router.port) self._run_monitor() msg = 'Initial status of router %s is %s' % (self.router_id, 'backup') self._search_in_file(self.log_file, msg) def test_handle_initial_state_master(self): self.router.port.addr.add(self.cidr) self._run_monitor() msg = 'Initial status of router %s is %s' % (self.router_id, 'master') self._search_in_file(self.log_file, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_legacy_router.py0000644000175000017500000004737500000000000030052 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_constants from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework class L3AgentTestCase(framework.L3AgentTestFramework): def _test_agent_notifications_for_router_events(self, enable_ha=False): """Test notifications for router create, update, and delete. Make sure that when the agent sends notifications of router events for router create, update, and delete, that the correct handler is called with the right resource, event, and router information. """ event_handler = mock.Mock() registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_CREATE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_CREATE) registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_UPDATE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_UPDATE) registry.subscribe(event_handler, resources.ROUTER, events.BEFORE_DELETE) registry.subscribe(event_handler, resources.ROUTER, events.AFTER_DELETE) router_info = self.generate_router_info(enable_ha=enable_ha) router = self.manage_router(self.agent, router_info) with mock.patch.object(self.agent, 'check_ha_state_for_router') as check: self.agent._process_updated_router(router.router) self._delete_router(self.agent, router.router_id) if enable_ha: check.assert_called_once_with(router.router_id, None) expected_calls = [ mock.call('router', 'before_create', self.agent, router=router), mock.call('router', 'after_create', self.agent, router=router), mock.call('router', 'before_update', self.agent, router=router), mock.call('router', 'after_update', self.agent, router=router), mock.call('router', 'before_delete', self.agent, payload=mock.ANY), mock.call('router', 'after_delete', self.agent, router=router)] event_handler.assert_has_calls(expected_calls) def test_agent_notifications_for_router_events(self): self._test_agent_notifications_for_router_events() def test_agent_notifications_for_router_events_ha(self): self._test_agent_notifications_for_router_events(enable_ha=True) def test_legacy_router_update_floatingip_statuses(self): self._test_update_floatingip_statuses( self.generate_router_info(enable_ha=False)) def test_legacy_router_lifecycle(self): self._router_lifecycle(enable_ha=False, dual_stack=True) def test_legacy_router_lifecycle_with_no_gateway_subnet(self): self.agent.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') self._router_lifecycle(enable_ha=False, dual_stack=True, v6_ext_gw_with_sub=False) def test_legacy_router_gateway_update_to_none(self): router_info = self.generate_router_info(False) router = self.manage_router(self.agent, router_info) gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_name(gw_port['id']) device = ip_lib.IPDevice(interface_name, namespace=router.ns_name) self.assertIn('via', device.route.get_gateway()) # Make this copy, so that the agent will think there is change in # external gateway port. router.ex_gw_port = copy.deepcopy(router.ex_gw_port) for subnet in gw_port['subnets']: subnet['gateway_ip'] = None router.process() self.assertIsNone(device.route.get_gateway()) def test_router_processing_pool_size(self): mock.patch.object(router_info.RouterInfo, 'initialize').start() mock.patch.object(router_info.RouterInfo, 'process').start() self.agent.l3_ext_manager = mock.Mock() mock.patch.object(router_info.RouterInfo, 'delete').start() mock.patch.object(registry, 'notify').start() router_info_1 = self.generate_router_info(False) r1 = self.manage_router(self.agent, router_info_1) self.assertEqual(l3_agent.ROUTER_PROCESS_GREENLET_MIN, self.agent._pool.size) router_info_2 = self.generate_router_info(False) r2 = self.manage_router(self.agent, router_info_2) self.assertEqual(l3_agent.ROUTER_PROCESS_GREENLET_MIN, self.agent._pool.size) router_info_list = [r1, r2] for _i in range(l3_agent.ROUTER_PROCESS_GREENLET_MAX + 1): ri = self.generate_router_info(False) rtr = self.manage_router(self.agent, ri) router_info_list.append(rtr) self.assertEqual(l3_agent.ROUTER_PROCESS_GREENLET_MAX, self.agent._pool.size) for router in router_info_list: self.agent._safe_router_removed(router.router_id) agent_router_info_len = len(self.agent.router_info) if agent_router_info_len < l3_agent.ROUTER_PROCESS_GREENLET_MIN: self.assertEqual(l3_agent.ROUTER_PROCESS_GREENLET_MIN, self.agent._pool.size) elif (l3_agent.ROUTER_PROCESS_GREENLET_MIN <= agent_router_info_len <= l3_agent.ROUTER_PROCESS_GREENLET_MAX): self.assertEqual(agent_router_info_len, self.agent._pool.size) else: self.assertEqual(l3_agent.ROUTER_PROCESS_GREENLET_MAX, self.agent._pool.size) def _make_bridge(self): bridge = framework.get_ovs_bridge(utils.get_rand_name()) bridge.create() self.addCleanup(bridge.destroy) return bridge def test_legacy_router_ns_rebuild(self): router_info = self.generate_router_info(False) router = self.manage_router(self.agent, router_info) gw_port = router.router['gw_port'] gw_inf_name = router.get_external_device_name(gw_port['id']) gw_device = ip_lib.IPDevice(gw_inf_name, namespace=router.ns_name) router_ports = [gw_device] for i_port in router_info.get(lib_constants.INTERFACE_KEY, []): interface_name = router.get_internal_device_name(i_port['id']) router_ports.append( ip_lib.IPDevice(interface_name, namespace=router.ns_name)) namespaces.Namespace.delete(router.router_namespace) # l3 agent should be able to rebuild the ns when it is deleted self.manage_router(self.agent, router_info) # Assert the router ports are there in namespace self.assertTrue(all([port.exists() for port in router_ports])) self._delete_router(self.agent, router.router_id) def test_conntrack_disassociate_fip_legacy_router(self): self._test_conntrack_disassociate_fip(ha=False) def _test_periodic_sync_routers_task(self, routers_to_keep, routers_deleted, routers_deleted_during_resync): ns_names_to_retrieve = set() deleted_routers_info = [] for r in routers_to_keep: ri = self.manage_router(self.agent, r) ns_names_to_retrieve.add(ri.ns_name) for r in routers_deleted + routers_deleted_during_resync: ri = self.manage_router(self.agent, r) deleted_routers_info.append(ri) ns_names_to_retrieve.add(ri.ns_name) mocked_get_router_ids = self.mock_plugin_api.get_router_ids mocked_get_router_ids.return_value = [r['id'] for r in routers_to_keep + routers_deleted_during_resync] mocked_get_routers = self.mock_plugin_api.get_routers mocked_get_routers.return_value = (routers_to_keep + routers_deleted_during_resync) # clear agent router_info as it will be after restart self.agent.router_info = {} # Synchronize the agent with the plug-in with mock.patch.object(namespace_manager.NamespaceManager, 'list_all', return_value=ns_names_to_retrieve): self.agent.periodic_sync_routers_task(self.agent.context) # Mock the plugin RPC API so a known external network id is returned # when the router updates are processed by the agent external_network_id = framework._uuid() # Plug external_gateway_info in the routers that are not going to be # deleted by the agent when it processes the updates. Otherwise, # _process_router_if_compatible in the agent fails for r in routers_to_keep: r['external_gateway_info'] = {'network_id': external_network_id} # while sync updates are still in the queue, higher priority # router_deleted events may be added there as well for r in routers_deleted_during_resync: self.agent.router_deleted(self.agent.context, r['id']) # make sure all events are processed while not self.agent._queue._queue.empty(): self.agent._process_router_update() for r in routers_to_keep: self.assertIn(r['id'], self.agent.router_info) self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX + r['id'])) for ri in deleted_routers_info: self.assertNotIn(ri.router_id, self.agent.router_info) self._assert_router_does_not_exist(ri) def test_periodic_sync_routers_task(self): routers_to_keep = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) self._test_periodic_sync_routers_task(routers_to_keep, routers_deleted=[], routers_deleted_during_resync=[]) def test_periodic_sync_routers_task_routers_deleted_while_agent_down(self): routers_to_keep = [] routers_deleted = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) for i in range(2): routers_deleted.append(self.generate_router_info(False)) self._test_periodic_sync_routers_task(routers_to_keep, routers_deleted, routers_deleted_during_resync=[]) def test_periodic_sync_routers_task_routers_deleted_while_agent_sync(self): routers_to_keep = [] routers_deleted_during_resync = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) for i in range(2): routers_deleted_during_resync.append( self.generate_router_info(False)) self._test_periodic_sync_routers_task( routers_to_keep, routers_deleted=[], routers_deleted_during_resync=routers_deleted_during_resync) def _setup_fip_with_fixed_ip_from_same_subnet(self, enable_snat): """Setup 2 FakeMachines from same subnet, one with floatingip associated. """ router_info = self.generate_router_info(enable_ha=False, enable_snat=enable_snat) router = self.manage_router(self.agent, router_info) router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0]) router_ip = router_ip_cidr.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) src_machine, dst_machine = self.useFixture( machine_fixtures.PeerMachines( br_int, net_helpers.increment_ip_cidr(router_ip_cidr), router_ip)).machines dst_fip = '19.4.4.10' router.router[lib_constants.FLOATINGIP_KEY] = [] self._add_fip(router, dst_fip, fixed_address=dst_machine.ip) router.process() return src_machine, dst_machine, dst_fip def test_fip_connection_from_same_subnet(self): '''Test connection to floatingip which is associated with fixed_ip on the same subnet of the source fixed_ip. In other words it confirms that return packets surely go through the router. ''' src_machine, dst_machine, dst_fip = ( self._setup_fip_with_fixed_ip_from_same_subnet(enable_snat=True)) protocol_port = net_helpers.get_free_namespace_port( lib_constants.PROTO_NAME_TCP, dst_machine.namespace) # client sends to fip netcat = net_helpers.NetcatTester( src_machine.namespace, dst_machine.namespace, dst_fip, protocol_port, protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) self.assertTrue(netcat.test_connectivity()) def test_ping_floatingip_reply_with_floatingip(self): src_machine, _, dst_fip = ( self._setup_fip_with_fixed_ip_from_same_subnet(enable_snat=False)) # Verify that the ping replys with fip ns_ip_wrapper = ip_lib.IPWrapper(src_machine.namespace) result = ns_ip_wrapper.netns.execute( ['ping', '-W', 5, '-c', 1, dst_fip]) self._assert_ping_reply_from_expected_address(result, dst_fip) def _setup_address_scope(self, internal_address_scope1, internal_address_scope2, gw_address_scope=None): router_info = self.generate_router_info(enable_ha=False, num_internal_ports=2) address_scope1 = { str(lib_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { str(lib_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { str(lib_constants.IP_VERSION_4): gw_address_scope} router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) router = self.manage_router(self.agent, router_info) router_ip_cidr1 = self._port_first_ip_cidr(router.internal_ports[0]) router_ip1 = router_ip_cidr1.partition('/')[0] router_ip_cidr2 = self._port_first_ip_cidr(router.internal_ports[1]) router_ip2 = router_ip_cidr2.partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) test_machine1 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr1), router_ip1)) test_machine2 = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr2), router_ip2)) return test_machine1, test_machine2, router def test_connection_from_same_address_scope(self): test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip) net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip) def test_connection_from_diff_address_scope(self): test_machine1, test_machine2, _ = self._setup_address_scope( 'scope1', 'scope2') # Internal networks that are not in the same address scope should # not reach each other test_machine1.assert_no_ping(test_machine2.ip) test_machine2.assert_no_ping(test_machine1.ip) def test_fip_connection_for_address_scope(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[lib_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, fixed_ip_address_scope='scope1') fip_diff_scope = '19.4.4.11' self._add_fip(router, fip_diff_scope, fixed_address=machine_diff_scope.ip, fixed_ip_address_scope='scope2') router.process() br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_int, '19.4.4.12/24')) # Floating ip should work no matter of address scope net_helpers.assert_ping(src_machine.namespace, fip_same_scope) net_helpers.assert_ping(src_machine.namespace, fip_diff_scope) def test_direct_route_for_address_scope(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') gw_port = router.get_ex_gw_port() gw_ip = self._port_first_ip_cidr(gw_port).partition('/')[0] br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) src_machine = self.useFixture( machine_fixtures.FakeMachine(br_int, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network net_helpers.assert_ping(src_machine.namespace, machine_same_scope.ip) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. src_machine.assert_no_ping(machine_diff_scope.ip) def test_connection_from_diff_address_scope_with_fip(self): (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') router.router[lib_constants.FLOATINGIP_KEY] = [] fip = '19.4.4.11' self._add_fip(router, fip, fixed_address=machine_diff_scope.ip, fixed_ip_address_scope='scope2') router.process() # For the internal networks that are in the same address scope as # external network, they should be able to reach the floating ip net_helpers.assert_ping(machine_same_scope.namespace, fip) # For the port with fip, it should be able to reach the internal # networks that are in the same address scope as external network net_helpers.assert_ping(machine_diff_scope.namespace, machine_same_scope.ip) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_metadata_proxy.py0000644000175000017500000001272700000000000030220 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import time import webob import webob.dec import webob.exc from neutron.agent.linux import dhcp from neutron.agent.linux import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.linux import helpers METADATA_REQUEST_TIMEOUT = 60 METADATA_REQUEST_SLEEP = 5 class MetadataFakeProxyHandler(object): def __init__(self, status): self.status = status @webob.dec.wsgify() def __call__(self, req): return webob.Response(status=self.status) class MetadataL3AgentTestCase(framework.L3AgentTestFramework): SOCKET_MODE = 0o644 def _create_metadata_fake_server(self, status): server = utils.UnixDomainWSGIServer('metadata-fake-server') self.addCleanup(server.stop) # NOTE(cbrandily): TempDir fixture creates a folder with 0o700 # permissions but metadata_proxy_socket folder must be readable by all # users self.useFixture( helpers.RecursivePermDirFixture( os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555)) server.start(MetadataFakeProxyHandler(status), self.agent.conf.metadata_proxy_socket, workers=0, backlog=4096, mode=self.SOCKET_MODE) def _query_metadata_proxy(self, machine): url = 'http://%(host)s:%(port)s' % {'host': dhcp.METADATA_DEFAULT_IP, 'port': dhcp.METADATA_PORT} cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url i = 0 CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2 while i <= CONNECTION_REFUSED_TIMEOUT: try: raw_headers = machine.execute(cmd) break except RuntimeError as e: if 'Connection refused' in str(e): time.sleep(METADATA_REQUEST_SLEEP) i += METADATA_REQUEST_SLEEP else: self.fail('metadata proxy unreachable ' 'on %s before timeout' % url) if i > CONNECTION_REFUSED_TIMEOUT: self.fail('Timed out waiting metadata proxy to become available') return raw_headers.splitlines()[0] def test_access_to_metadata_proxy(self): """Test access to the l3-agent metadata proxy. The test creates: * A l3-agent metadata service: * A router (which creates a metadata proxy in the router namespace), * A fake metadata server * A "client" namespace (simulating a vm) with a port on router internal subnet. The test queries from the "client" namespace the metadata proxy on http://169.254.169.254 and asserts that the metadata proxy added the X-Forwarded-For and X-Neutron-Router-Id headers to the request and forwarded the http request to the fake metadata server and the response to the "client" namespace. """ router_info = self.generate_router_info(enable_ha=False) router = self.manage_router(self.agent, router_info) self._create_metadata_fake_server(webob.exc.HTTPOk.code) # Create and configure client namespace router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0]) br_int = framework.get_ovs_bridge( self.agent.conf.OVS.integration_bridge) machine = self.useFixture( machine_fixtures.FakeMachine( br_int, net_helpers.increment_ip_cidr(router_ip_cidr), router_ip_cidr.partition('/')[0])) # Query metadata proxy firstline = self._query_metadata_proxy(machine) # Check status code self.assertIn(str(webob.exc.HTTPOk.code), firstline.split()) class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase): """Test metadata proxy with least privileged user. The least privileged user has uid=65534 and is commonly named 'nobody' but not always, that's why we use its uid. """ SOCKET_MODE = 0o664 def setUp(self): super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp() self.agent.conf.set_override('metadata_proxy_user', '65534') class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase): """Test metadata proxy with least privileged user/group. The least privileged user has uid=65534 and is commonly named 'nobody' but not always, that's why we use its uid. Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's why we use its gid. """ SOCKET_MODE = 0o666 def setUp(self): super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp() self.agent.conf.set_override('metadata_proxy_user', '65534') self.agent.conf.set_override('metadata_proxy_group', '65534') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/l3/test_namespace_manager.py0000644000175000017500000000677700000000000030635 0ustar00coreycorey00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.tests.functional import base _uuid = uuidutils.generate_uuid class NamespaceManagerTestFramework(base.BaseSudoTestCase): def setUp(self): super(NamespaceManagerTestFramework, self).setUp() self.agent_conf = cfg.CONF self.metadata_driver_mock = mock.Mock() self.namespace_manager = namespace_manager.NamespaceManager( self.agent_conf, driver=None, metadata_driver=self.metadata_driver_mock) def _create_namespace(self, router_id, ns_class): namespace = ns_class(router_id, self.agent_conf, driver=None, use_ipv6=False) namespace.create() self.addCleanup(self._delete_namespace, namespace) return namespace.name def _delete_namespace(self, namespace): try: namespace.delete() except RuntimeError as e: # If the namespace didn't exist when delete was attempted, mission # accomplished. Otherwise, re-raise the exception if 'No such file or directory' not in str(e): raise e def _namespace_exists(self, namespace): return ip_lib.network_namespace_exists(namespace) class NamespaceManagerTestCase(NamespaceManagerTestFramework): def test_namespace_manager(self): router_id = _uuid() router_id_to_delete = _uuid() to_keep = set() to_delete = set() to_retrieve = set() to_keep.add(self._create_namespace(router_id, namespaces.RouterNamespace)) to_keep.add(self._create_namespace(router_id, dvr_snat_ns.SnatNamespace)) to_delete.add(self._create_namespace(router_id_to_delete, dvr_snat_ns.SnatNamespace)) to_retrieve = to_keep | to_delete with mock.patch.object(namespace_manager.NamespaceManager, 'list_all', return_value=to_retrieve): with self.namespace_manager as ns_manager: for ns_name in to_keep: id_to_keep = ns_manager.get_prefix_and_id(ns_name)[1] ns_manager.keep_router(id_to_keep) for ns_name in to_keep: self.assertTrue(self._namespace_exists(ns_name)) for ns_name in to_delete: (self.metadata_driver_mock.destroy_monitored_metadata_proxy. assert_called_once_with(mock.ANY, router_id_to_delete, self.agent_conf, ns_name)) self.assertFalse(self._namespace_exists(ns_name)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/0000755000175000017500000000000000000000000024376 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/__init__.py0000644000175000017500000000000000000000000026475 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/base.py0000644000175000017500000000242500000000000025665 0ustar00coreycorey00000000000000# Copyright 2014 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.common.exclusive_resources import ip_address from neutron.tests.functional import base MARK_VALUE = '0x1' MARK_MASK = '0xffffffff' ICMP_MARK_RULE = ('-j MARK --set-xmark %(value)s/%(mask)s' % {'value': MARK_VALUE, 'mask': MARK_MASK}) MARKED_BLOCK_RULE = '-m mark --mark %s -j DROP' % MARK_VALUE ICMP_BLOCK_RULE = '-p icmp -j DROP' class BaseOVSLinuxTestCase(base.BaseSudoTestCase): def get_test_net_address(self, block): """Return exclusive address based on RFC 5737. :param block: One of constants 1, 2 or 3 """ return str(self.useFixture( ip_address.get_test_net_address_fixture(block)).address) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/bin/0000755000175000017500000000000000000000000025146 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/bin/__init__.py0000644000175000017500000000000000000000000027245 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/bin/ip_monitor.py0000755000175000017500000000407400000000000027707 0ustar00coreycorey00000000000000#! /usr/bin/env python # Copyright (c) 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import sys import threading from oslo_serialization import jsonutils from six.moves import queue from neutron.agent.linux import ip_lib EVENT_STOP = threading.Event() EVENT_STARTED = threading.Event() IP_MONITOR = None READ_QUEUE = None def sigterm_handler(_signo, _stack_frame): global EVENT_STOP global IP_MONITOR global READ_QUEUE EVENT_STOP.set() IP_MONITOR.join() READ_QUEUE.join() exit(0) signal.signal(signal.SIGTERM, sigterm_handler) def read_queue(temp_file, _queue, event_stop, event_started): event_started.wait() with open(temp_file, 'w') as f: f.write('') while not event_stop.is_set(): try: retval = _queue.get(timeout=1) except queue.Empty: retval = None if retval: with open(temp_file, 'a+') as f: f.write(jsonutils.dumps(retval) + '\n') def main(temp_file, namespace): global IP_MONITOR global READ_QUEUE namespace = None if namespace == 'None' else namespace _queue = queue.Queue() IP_MONITOR = threading.Thread( target=ip_lib.ip_monitor, args=(namespace, _queue, EVENT_STOP, EVENT_STARTED)) IP_MONITOR.start() READ_QUEUE = threading.Thread( target=read_queue, args=(temp_file, _queue, EVENT_STOP, EVENT_STARTED)) READ_QUEUE.start() READ_QUEUE.join() if __name__ == "__main__": main(sys.argv[1], sys.argv[2]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/bin/ipt_binname.py0000755000175000017500000000232100000000000030006 0ustar00coreycorey00000000000000#! /usr/bin/env python # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import sys import eventlet def print_binary_name(): # NOTE(yamamoto): Don't move this import to module-level. # The aim is to test importing from eventlet non-main thread. # See Bug #1367075 for details. from neutron.agent.linux import iptables_manager print(iptables_manager.binary_name) if __name__ == "__main__": if 'spawn' in sys.argv: eventlet.spawn(print_binary_name).wait() else: print_binary_name() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/helpers.py0000644000175000017500000000550100000000000026413 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import multiprocessing import os import time import fixtures from neutron.agent.linux import utils from neutron.tests import tools class RecursivePermDirFixture(fixtures.Fixture): """Ensure at least perms permissions on directory and ancestors.""" def __init__(self, directory, perms): super(RecursivePermDirFixture, self).__init__() self.directory = directory self.least_perms = perms def _setUp(self): previous_directory = None current_directory = self.directory while previous_directory != current_directory: perms = os.stat(current_directory).st_mode if perms & self.least_perms != self.least_perms: os.chmod(current_directory, perms | self.least_perms) previous_directory = current_directory current_directory = os.path.dirname(current_directory) class AdminDirFixture(fixtures.Fixture): """Handle directory create/delete with admin permissions required""" def __init__(self, directory): super(AdminDirFixture, self).__init__() self.directory = directory def _setUp(self): # NOTE(cbrandily): Ensure we will not delete a directory existing # before test run during cleanup. if os.path.exists(self.directory): tools.fail('%s already exists' % self.directory) create_cmd = ['mkdir', '-p', self.directory] delete_cmd = ['rm', '-r', self.directory] utils.execute(create_cmd, run_as_root=True) self.addCleanup(utils.execute, delete_cmd, run_as_root=True) class SleepyProcessFixture(fixtures.Fixture): """Process fixture to perform time.sleep for a given number of seconds.""" def __init__(self, timeout=60): super(SleepyProcessFixture, self).__init__() self.timeout = timeout @staticmethod def yawn(seconds): time.sleep(seconds) def _setUp(self): self.process = multiprocessing.Process(target=self.yawn, args=[self.timeout]) self.process.start() self.addCleanup(self.destroy) def destroy(self): self.process.terminate() @property def pid(self): return self.process.pid ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/openvswitch_firewall/0000755000175000017500000000000000000000000030634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/openvswitch_firewall/__init__.py0000644000175000017500000000000000000000000032733 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/openvswitch_firewall/test_firewall.py0000644000175000017500000000444400000000000034060 0ustar00coreycorey00000000000000# Copyright 2016, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from neutron.agent.linux.openvswitch_firewall import exceptions from neutron.agent.linux.openvswitch_firewall import firewall from neutron.tests.common import net_helpers from neutron.tests.functional import base class TestGetTagFromOtherConfig(base.BaseSudoTestCase): def setUp(self): super(TestGetTagFromOtherConfig, self).setUp() self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge def set_port_tag(self, port_name, tag): self.bridge.set_db_attribute( 'Port', port_name, 'other_config', {'tag': str(tag)}) def test_correct_tag_is_returned(self): port_number = 42 port = self.useFixture(net_helpers.OVSPortFixture(self.bridge)).port self.set_port_tag(port.name, port_number) observed = firewall.get_tag_from_other_config(self.bridge, port.name) self.assertEqual(port_number, observed) def test_not_existing_name_raises_exception(self): with testtools.ExpectedException(exceptions.OVSFWTagNotFound): firewall.get_tag_from_other_config(self.bridge, 'foo') def test_bad_tag_value_raises_exception(self): port = self.useFixture(net_helpers.OVSPortFixture(self.bridge)).port self.set_port_tag(port.name, 'foo') with testtools.ExpectedException(exceptions.OVSFWTagNotFound): firewall.get_tag_from_other_config(self.bridge, port.name) def test_no_value_set_for_other_config_raises_exception(self): port = self.useFixture(net_helpers.OVSPortFixture(self.bridge)).port with testtools.ExpectedException(exceptions.OVSFWTagNotFound): firewall.get_tag_from_other_config(self.bridge, port.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/openvswitch_firewall/test_iptables.py0000644000175000017500000001044300000000000034052 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_utils import uuidutils from neutron.agent.linux import iptables_firewall import neutron.agent.linux.openvswitch_firewall.firewall as ovs_fw_mod import neutron.agent.linux.openvswitch_firewall.iptables as iptables_helper from neutron.tests.common import conn_testers from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_firewall from neutron.tests.functional import base class TestHelper(base.BaseSudoTestCase): def setUp(self): super(TestHelper, self).setUp() self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.iptables_firewall = ( iptables_firewall.OVSHybridIptablesFirewallDriver(self.namespace)) def add_sg_rules(self, port, security_group_rules): """Add security group rules to given port. Method creates a security group for isolated firewall use. Adds passed rules to it and calls to prepare_port_filter() to the firewall driver. Method returns port description. """ sg_id = uuidutils.generate_uuid() self.iptables_firewall.update_security_group_rules( sg_id, security_group_rules) description = { 'admin_state_up': True, 'device': port.port_id, 'device_owner': test_firewall.DEVICE_OWNER_COMPUTE, 'fixed_ips': ['192.168.0.1'], 'mac_address': port.port.link.address, 'port_security_enabled': True, 'security_groups': [sg_id], 'status': 'ACTIVE', 'network_id': uuidutils.generate_uuid()} self.iptables_firewall.prepare_port_filter(description) return description def _set_vlan_tag_on_port(self, port, tag): qvo_dev_name = iptables_helper.get_device_port_name(port.port_id) conn_testers.OVSBaseConnectionTester.set_tag( qvo_dev_name, self.bridge, tag) def _prepare_port_and_description(self, security_group_rules): hybrid_port = self.useFixture( net_helpers.OVSPortFixture( self.bridge, self.namespace, hybrid_plug=True)) self._set_vlan_tag_on_port(hybrid_port, 1) description = self.add_sg_rules(hybrid_port, security_group_rules) return hybrid_port, description def _check_no_iptables_rules_for_port(self, port): tap_name = self.iptables_firewall._get_device_name( {'device': port.port_id}) iptables_rules = ( self.iptables_firewall.iptables.get_rules_for_table('filter')) for line in iptables_rules: if tap_name in line: raise Exception("port %s still has iptables rules in %s" % ( tap_name, line)) def test_migration(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION}] port, desc = self._prepare_port_and_description(sg_rules) ovs_firewall = ovs_fw_mod.OVSFirewallDriver(self.bridge) # Check that iptables driver was set and replace it with the one that # has access to namespace if isinstance( ovs_firewall.iptables_helper.iptables_driver, iptables_firewall.OVSHybridIptablesFirewallDriver): ovs_firewall.iptables_helper.iptables_driver = ( self.iptables_firewall) ovs_firewall.prepare_port_filter(desc) self._check_no_iptables_rules_for_port(port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/simple_daemon.py0000644000175000017500000000336300000000000027571 0ustar00coreycorey00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_config import cfg from neutron.agent.linux import daemon def main(): class SimpleDaemon(daemon.Daemon): """The purpose of this daemon is to serve as an example, and also as a dummy daemon, which can be invoked by functional testing, it does nothing but setting the pid file, and staying detached in the background. """ def run(self): while True: time.sleep(10) opts = [ cfg.StrOpt('uuid', help='uuid provided from the command line ' 'so external_process can track us via /proc/' 'cmdline interface.', required=True), cfg.StrOpt('pid_file', help='Location of pid file of this process.', required=True) ] cfg.CONF.register_cli_opts(opts) # Don't get the default configuration file cfg.CONF(project='neutron', default_config_files=[]) simple_daemon = SimpleDaemon(cfg.CONF.pid_file, uuid=cfg.CONF.uuid) simple_daemon.start() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_async_process.py0000644000175000017500000000557400000000000030675 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import six from neutron._i18n import _ from neutron.agent.common import async_process from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests.functional import base class AsyncProcessTestFramework(base.BaseLoggingTestCase): def setUp(self): super(AsyncProcessTestFramework, self).setUp() self.test_file_path = self.get_temp_file_path('test_async_process.tmp') self.data = [six.text_type(x) for x in range(4)] with open(self.test_file_path, 'w') as f: f.writelines('%s\n' % item for item in self.data) def _check_stdout(self, proc): # Ensure that all the output from the file is read output = [] while output != self.data: new_output = list(proc.iter_stdout()) if new_output: output += new_output eventlet.sleep(0.01) class TestAsyncProcess(AsyncProcessTestFramework): def _safe_stop(self, proc): try: proc.stop() except async_process.AsyncProcessException: pass def test_stopping_async_process_lifecycle(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path]) self.addCleanup(self._safe_stop, proc) proc.start(block=True) self._check_stdout(proc) proc.stop(block=True) # Ensure that the process and greenthreads have stopped proc._process.wait() self.assertEqual(proc._process.returncode, -9) for watcher in proc._watchers: watcher.wait() def test_async_process_respawns(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path], respawn_interval=0) self.addCleanup(self._safe_stop, proc) proc.start() # Ensure that the same output is read twice self._check_stdout(proc) pid = proc.pid utils.execute(['kill', '-9', pid]) common_utils.wait_until_true( lambda: proc.is_active() and pid != proc.pid, timeout=5, sleep=0.01, exception=RuntimeError(_("Async process didn't respawn"))) self._check_stdout(proc) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_bridge_lib.py0000644000175000017500000002005600000000000030074 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import re import netaddr from neutron_lib import constants from oslo_utils import uuidutils import testscenarios from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.privileged.agent.linux import ip_lib as priv_ip_lib from neutron.tests.common import net_helpers from neutron.tests.functional import base class BridgeLibTestCase(base.BaseSudoTestCase): def setUp(self): super(BridgeLibTestCase, self).setUp() self.bridge, self.port_fixture = self.create_bridge_port_fixture() def create_bridge_port_fixture(self): bridge = self.useFixture( net_helpers.LinuxBridgeFixture(namespace=None)).bridge port_fixture = self.useFixture( net_helpers.LinuxBridgePortFixture( bridge, port_id=uuidutils.generate_uuid())) return bridge, port_fixture def test_is_bridged_interface(self): self.assertTrue( bridge_lib.is_bridged_interface(self.port_fixture.br_port.name)) def test_is_not_bridged_interface(self): self.assertFalse( bridge_lib.is_bridged_interface(self.port_fixture.port.name)) def test_get_bridge_names(self): self.assertIn(self.bridge.name, bridge_lib.get_bridge_names()) def test_get_interface_ifindex(self): port = self.port_fixture.br_port t1 = bridge_lib.get_interface_ifindex(str(port)) self.port_fixture.veth_fixture.destroy() self.port_fixture.veth_fixture._setUp() t2 = bridge_lib.get_interface_ifindex(str(port)) self.assertIsNotNone(t1) self.assertIsNotNone(t2) self.assertGreaterEqual(t2, t1) def test_get_interface_bridge(self): bridge = bridge_lib.BridgeDevice.get_interface_bridge( self.port_fixture.br_port.name) self.assertEqual(self.bridge.name, bridge.name) def test_get_interface_no_bridge(self): bridge = bridge_lib.BridgeDevice.get_interface_bridge( self.port_fixture.port.name) self.assertIsNone(bridge) def test_get_interfaces(self): self.assertEqual( [self.port_fixture.br_port.name], self.bridge.get_interfaces()) def test_get_interfaces_no_bridge(self): bridge = bridge_lib.BridgeDevice('--fake--') self.assertEqual([], bridge.get_interfaces()) def test_disable_ipv6(self): sysfs_path = ("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % self.bridge.name) # first, make sure it's enabled with open(sysfs_path, 'r') as sysfs_disable_ipv6_file: sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read() self.assertEqual("0\n", sysfs_disable_ipv6) self.assertEqual(0, self.bridge.disable_ipv6()) with open(sysfs_path, 'r') as sysfs_disable_ipv6_file: sysfs_disable_ipv6 = sysfs_disable_ipv6_file.read() self.assertEqual("1\n", sysfs_disable_ipv6) class FdbInterfaceTestCase(testscenarios.WithScenarios, base.BaseSudoTestCase): MAC1 = 'ca:fe:ca:fe:ca:fe' MAC2 = 'ca:fe:ca:fe:ca:01' RULE_PATTERN = (r"^(?P([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})) " r"(dst (?P\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b))*") scenarios = [ ('namespace', {'namespace': 'ns_' + uuidutils.generate_uuid()}), ('no_namespace', {'namespace': None}) ] def setUp(self): super(FdbInterfaceTestCase, self).setUp() _uuid = uuidutils.generate_uuid() self.device = ('int_' + _uuid)[:constants.DEVICE_NAME_MAX_LEN] self.device_vxlan = ('vxlan_' + _uuid)[:constants.DEVICE_NAME_MAX_LEN] self.ip = '10.220.0.1/24' self.ip_vxlan = '10.221.0.1/24' if self.namespace: priv_ip_lib.create_netns(self.namespace) else: self._cleanup() self.addCleanup(self._cleanup) vni = random.randint(1, 2**24 - 1) ip_wrapper = ip_lib.IPWrapper(self.namespace) ip_wrapper.add_dummy(self.device) ip_wrapper.add_vxlan(self.device_vxlan, vni, dev=self.device) ip_device = ip_lib.IPDevice(self.device, self.namespace) ip_device.link.set_up() ip_device.addr.add(self.ip) ip_device_vxlan = ip_lib.IPDevice(self.device_vxlan, self.namespace) ip_device_vxlan.link.set_up() ip_device_vxlan.addr.add(self.ip_vxlan) def _cleanup(self): if self.namespace: priv_ip_lib.remove_netns(self.namespace) else: for device in (self.device_vxlan, self.device): try: priv_ip_lib.delete_interface(device, None) except priv_ip_lib.NetworkInterfaceNotFound: pass def _list_fdb_rules(self, device): output = bridge_lib.FdbInterface.show(dev=device, namespace=self.namespace) rules = re.finditer(self.RULE_PATTERN, output, flags=re.MULTILINE) ret = {} for rule in rules: ret[rule.groupdict()['mac']] = rule.groupdict()['ip'] return ret def test_add_delete(self): self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device)) bridge_lib.FdbInterface.add(self.MAC1, self.device, namespace=self.namespace) self.assertIn(self.MAC1, self._list_fdb_rules(self.device)) bridge_lib.FdbInterface.delete(self.MAC1, self.device, namespace=self.namespace) self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device)) def test_add_delete_dst(self): self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device_vxlan)) bridge_lib.FdbInterface.add( self.MAC1, self.device_vxlan, namespace=self.namespace, ip_dst=str(netaddr.IPNetwork(self.ip).ip)) rules = self._list_fdb_rules(self.device_vxlan) self.assertEqual(str(netaddr.IPNetwork(self.ip).ip), rules[self.MAC1]) bridge_lib.FdbInterface.delete( self.MAC1, self.device_vxlan, namespace=self.namespace, ip_dst=str(netaddr.IPNetwork(self.ip).ip)) self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device_vxlan)) def test_append(self): self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device)) bridge_lib.FdbInterface.append(self.MAC1, self.device, namespace=self.namespace) self.assertIn(self.MAC1, self._list_fdb_rules(self.device)) def test_append_dst(self): self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device_vxlan)) bridge_lib.FdbInterface.append( self.MAC1, self.device_vxlan, namespace=self.namespace, ip_dst=str(netaddr.IPNetwork(self.ip).ip)) rules = self._list_fdb_rules(self.device_vxlan) self.assertEqual(str(netaddr.IPNetwork(self.ip).ip), rules[self.MAC1]) def test_replace(self): self.assertNotIn(self.MAC1, self._list_fdb_rules(self.device)) bridge_lib.FdbInterface.add( self.MAC1, self.device_vxlan, namespace=self.namespace, ip_dst=str(netaddr.IPNetwork(self.ip).ip)) rules = self._list_fdb_rules(self.device_vxlan) self.assertEqual(str(netaddr.IPNetwork(self.ip).ip), rules[self.MAC1]) bridge_lib.FdbInterface.replace( self.MAC1, self.device_vxlan, namespace=self.namespace, ip_dst='1.1.1.1') rules = self._list_fdb_rules(self.device_vxlan) self.assertEqual('1.1.1.1', rules[self.MAC1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_dhcp.py0000644000175000017500000001007700000000000026732 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from oslo_config import cfg from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.conf.agent import dhcp as dhcp_conf from neutron.conf import common as common_conf from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.tests import base as tests_base from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class TestDhcp(functional_base.BaseSudoTestCase): def setUp(self): super(TestDhcp, self).setUp() conf = cfg.ConfigOpts() config.register_interface_driver_opts_helper(conf) config.register_interface_opts(conf) conf.register_opts(common_conf.core_opts) conf.register_opts(dhcp_conf.DHCP_AGENT_OPTS) ovs_conf.register_ovs_opts(conf) conf.set_override('interface_driver', 'openvswitch') conf.set_override('host', 'foo-host') self.conf = conf br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override('integration_bridge', br_int.br_name, 'OVS') def test_cleanup_stale_devices(self): plugin = mock.MagicMock() dev_mgr = dhcp.DeviceManager(self.conf, plugin) network = { 'id': 'foo_id', 'tenant_id': 'foo_tenant', 'namespace': 'qdhcp-foo_id', 'ports': [], 'subnets': [tests_base.AttributeDict({'id': 'subnet_foo_id', 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'gateway_ip': '10.0.0.1'})]} dhcp_port = { 'id': 'foo_port_id', 'mac_address': '10:22:33:44:55:67', 'fixed_ips': [tests_base.AttributeDict( {'subnet_id': 'subnet_foo_id', 'ip_address': '10.0.0.1'})] } plugin.create_dhcp_port.return_value = tests_base.AttributeDict( dhcp_port) dev_mgr.driver.plug("foo_id", "foo_id2", "tapfoo_id2", "10:22:33:44:55:68", namespace="qdhcp-foo_id") dev_mgr.driver.plug("foo_id", "foo_id3", "tapfoo_id3", "10:22:33:44:55:69", namespace="qdhcp-foo_id") ipw = ip_lib.IPWrapper(namespace="qdhcp-foo_id") devices = ipw.get_devices() self.addCleanup(ipw.netns.delete, 'qdhcp-foo_id') self.assertEqual(sorted(["tapfoo_id2", "tapfoo_id3"]), sorted(map(str, devices))) # setting up dhcp for the network dev_mgr.setup(tests_base.AttributeDict(network)) common_utils.wait_until_true( lambda: 1 == len(ipw.get_devices()), timeout=5, sleep=0.1, exception=RuntimeError("only one non-loopback device must remain")) devices = ipw.get_devices() self.assertEqual("tapfoo_port_id", devices[0].name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_interface.py0000644000175000017500000001302000000000000027743 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import exceptions from neutron_lib.utils import net from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.conf.agent import common as config from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional import base class InterfaceDriverTestCaseMixin(object): def _test_mtu_set_after_action(self, device_name, br_name, namespace, action=None): mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) plug = functools.partial( self.interface.plug, network_id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), device_name=device_name, mac_address=mac_address, bridge=self.bridge_name, namespace=namespace) plug(mtu=1500) self.assertTrue(ip_lib.device_exists(device_name, namespace)) action = action or plug for mtu in (1450, 1500, 9000, 9000, 1450): action(mtu=mtu) self.assertEqual( mtu, ip_lib.IPDevice(device_name, namespace=namespace).link.mtu) def test_plug_multiple_calls_update_mtu(self): device_name = utils.get_rand_name() namespace = self.useFixture(net_helpers.NamespaceFixture()).name self._test_mtu_set_after_action( device_name, self.bridge_name, namespace) def test_set_mtu(self): device_name = utils.get_rand_name() namespace = self.useFixture(net_helpers.NamespaceFixture()).name self._test_mtu_set_after_action( device_name, self.bridge_name, namespace, functools.partial( self.interface.set_mtu, device_name=device_name, namespace=namespace)) class OVSInterfaceDriverTestCase(linux_base.BaseOVSLinuxTestCase, InterfaceDriverTestCaseMixin): def setUp(self): super(OVSInterfaceDriverTestCase, self).setUp() conf = cfg.ConfigOpts() config.register_interface_opts(conf) self.interface = interface.OVSInterfaceDriver(conf) self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge @property def bridge_name(self): return self.bridge.br_name def test_plug_checks_if_bridge_exists(self): with testtools.ExpectedException(exceptions.BridgeDoesNotExist): self.interface.plug(network_id=42, port_id=71, device_name='not_a_device', mac_address='', bridge='not_a_bridge', namespace='not_a_namespace') def test_plug_succeeds(self): device_name = utils.get_rand_name() mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.assertFalse(self.bridge.get_port_name_list()) self.interface.plug(network_id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), device_name=device_name, mac_address=mac_address, bridge=self.bridge.br_name, namespace=namespace) self.assertIn(device_name, self.bridge.get_port_name_list()) self.assertTrue(ip_lib.device_exists(device_name, namespace)) def test_plug_with_namespace_sets_mtu_higher_than_bridge(self): # First, add a new linuxbridge port with reduced MTU to OVS bridge lb_bridge = self.useFixture( net_helpers.LinuxBridgeFixture()).bridge lb_bridge_port = self.useFixture( net_helpers.LinuxBridgePortFixture(lb_bridge)) lb_bridge_port.port.link.set_mtu(1400) self.bridge.add_port(lb_bridge_port.port.name) device_name = utils.get_rand_name() namespace = self.useFixture(net_helpers.NamespaceFixture()).name # Now plug a device with intended MTU that is higher than for the port # above and validate that its MTU is not reduced to the least MTU on # the bridge self._test_mtu_set_after_action( device_name, self.bridge_name, namespace) class BridgeInterfaceDriverTestCase(base.BaseSudoTestCase, InterfaceDriverTestCaseMixin): def setUp(self): super(BridgeInterfaceDriverTestCase, self).setUp() conf = cfg.ConfigOpts() config.register_interface_opts(conf) self.interface = interface.BridgeInterfaceDriver(conf) self.bridge = self.useFixture(net_helpers.LinuxBridgeFixture()).bridge @property def bridge_name(self): return self.bridge.name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_ip_lib.py0000644000175000017500000011643300000000000027255 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import signal import netaddr from neutron_lib import constants from neutron_lib.utils import net from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import uuidutils from pyroute2.iproute import linux as iproute_linux import testscenarios import testtools from neutron.agent.common import async_process from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.conf.agent import common as config from neutron.privileged.agent.linux import ip_lib as priv_ip_lib from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux.bin import ip_monitor from neutron.tests.functional import base as functional_base LOG = logging.getLogger(__name__) Device = collections.namedtuple('Device', 'name ip_cidrs mac_address namespace') WRONG_IP = '0.0.0.0' TEST_IP = '240.0.0.1' TEST_IP_NEIGH = '240.0.0.2' TEST_IP_SECONDARY = '240.0.0.3' class IpLibTestFramework(functional_base.BaseSudoTestCase): def setUp(self): super(IpLibTestFramework, self).setUp() self._configure() def _configure(self): config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') config.register_interface_opts() self.driver = importutils.import_object(cfg.CONF.interface_driver, cfg.CONF) def generate_device_details(self, name=None, ip_cidrs=None, mac_address=None, namespace=None): if ip_cidrs is None: ip_cidrs = ["%s/24" % TEST_IP] return Device(name or utils.get_rand_name(), ip_cidrs, mac_address or net.get_random_mac('fa:16:3e:00:00:00'.split(':')), namespace or utils.get_rand_name()) def _safe_delete_device(self, device): try: device.link.delete() except RuntimeError: LOG.debug('Could not delete %s, was it already deleted?', device) def manage_device(self, attr): """Create a tuntap with the specified attributes. The device is cleaned up at the end of the test. :param attr: A Device namedtuple :return: A tuntap ip_lib.IPDevice """ ip = ip_lib.IPWrapper(namespace=attr.namespace) if attr.namespace: ip.netns.add(attr.namespace) self.addCleanup(ip.netns.delete, attr.namespace) tap_device = ip.add_tuntap(attr.name) self.addCleanup(self._safe_delete_device, tap_device) tap_device.link.set_address(attr.mac_address) self.driver.init_l3(attr.name, attr.ip_cidrs, namespace=attr.namespace) tap_device.link.set_up() return tap_device class IpLibTestCase(IpLibTestFramework): def _check_routes(self, expected_routes, actual_routes): actual_routes = [{key: route[key] for key in expected_routes[0].keys()} for route in actual_routes] self.assertEqual(expected_routes, actual_routes) def test_rules_lifecycle(self): PRIORITY = 32768 TABLE = 16 attr = self.generate_device_details() device = self.manage_device(attr) test_cases = { constants.IP_VERSION_4: [ { 'ip': '1.1.1.1', 'to': '8.8.8.0/24' }, { 'ip': '1.1.1.1', 'iif': device.name, 'to': '7.7.7.0/24' } ], constants.IP_VERSION_6: [ { 'ip': 'abcd::1', 'to': '1234::/64' }, { 'ip': 'abcd::1', 'iif': device.name, 'to': '4567::/64' } ] } expected_rules = { constants.IP_VERSION_4: [ { 'from': '1.1.1.1', 'to': '8.8.8.0/24', 'priority': str(PRIORITY), 'table': str(TABLE), 'type': 'unicast' }, { 'from': '0.0.0.0/0', 'to': '7.7.7.0/24', 'iif': device.name, 'priority': str(PRIORITY), 'table': str(TABLE), 'type': 'unicast' } ], constants.IP_VERSION_6: [ { 'from': 'abcd::1', 'to': '1234::/64', 'priority': str(PRIORITY), 'table': str(TABLE), 'type': 'unicast' }, { 'from': '::/0', 'to': '4567::/64', 'iif': device.name, 'priority': str(PRIORITY), 'table': str(TABLE), 'type': 'unicast', } ] } for ip_version, test_case in test_cases.items(): for rule in test_case: ip_lib.add_ip_rule(namespace=device.namespace, table=TABLE, priority=PRIORITY, **rule) rules = ip_lib.list_ip_rules(device.namespace, ip_version) for expected_rule in expected_rules[ip_version]: self.assertIn(expected_rule, rules) for rule in test_case: ip_lib.delete_ip_rule(device.namespace, table=TABLE, priority=PRIORITY, **rule) rules = priv_ip_lib.list_ip_rules(device.namespace, ip_version) for expected_rule in expected_rules[ip_version]: self.assertNotIn(expected_rule, rules) def test_device_exists(self): attr = self.generate_device_details() self.assertFalse( ip_lib.device_exists(attr.name, namespace=attr.namespace)) device = self.manage_device(attr) self.assertTrue( ip_lib.device_exists(device.name, namespace=attr.namespace)) self.assertFalse( ip_lib.device_exists(attr.name, namespace='wrong_namespace')) device.link.delete() self.assertFalse( ip_lib.device_exists(attr.name, namespace=attr.namespace)) def test_ipdevice_exists(self): attr = self.generate_device_details() device = self.manage_device(attr) self.assertTrue(device.exists()) device.link.delete() self.assertFalse(device.exists()) def test_vlan_exists(self): attr = self.generate_device_details() ip = ip_lib.IPWrapper(namespace=attr.namespace) ip.netns.add(attr.namespace) self.addCleanup(ip.netns.delete, attr.namespace) priv_ip_lib.create_interface(attr.name, attr.namespace, 'dummy') self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace)) device = ip.add_vlan('vlan1999', attr.name, 1999) self.assertTrue(ip_lib.vlan_in_use(1999, namespace=attr.namespace)) device.link.delete() self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace)) def test_vxlan_exists(self): attr = self.generate_device_details() ip = ip_lib.IPWrapper(namespace=attr.namespace) ip.netns.add(attr.namespace) self.addCleanup(ip.netns.delete, attr.namespace) self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) device = ip.add_vxlan(attr.name, 9999) self.addCleanup(self._safe_delete_device, device) self.assertTrue(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) device.link.delete() self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace)) def test_ipwrapper_get_device_by_ip_None(self): ip_wrapper = ip_lib.IPWrapper(namespace=None) self.assertIsNone(ip_wrapper.get_device_by_ip(ip=None)) def test_ipwrapper_get_device_by_ip(self): # We need to pass both IP and cidr values to get_device_by_ip() # to make sure it filters correctly. test_ip = "%s/24" % TEST_IP test_ip_secondary = "%s/24" % TEST_IP_SECONDARY attr = self.generate_device_details( ip_cidrs=[test_ip, test_ip_secondary] ) self.manage_device(attr) ip_wrapper = ip_lib.IPWrapper(namespace=attr.namespace) self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP).name) self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP_SECONDARY).name) self.assertIsNone(ip_wrapper.get_device_by_ip(TEST_IP_NEIGH)) # this is in the same subnet, so will match if we pass as cidr test_ip_neigh = "%s/24" % TEST_IP_NEIGH self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(test_ip_neigh).name) self.assertIsNone(ip_wrapper.get_device_by_ip(WRONG_IP)) def test_device_exists_with_ips_and_mac(self): attr = self.generate_device_details() device = self.manage_device(attr) self.assertTrue( ip_lib.device_exists_with_ips_and_mac(*attr)) wrong_ip_cidr = '10.0.0.1/8' wrong_mac_address = 'aa:aa:aa:aa:aa:aa' attr = self.generate_device_details(name='wrong_name') self.assertFalse( ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(ip_cidrs=[wrong_ip_cidr]) self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(mac_address=wrong_mac_address) self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) attr = self.generate_device_details(namespace='wrong_namespace') self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr)) device.link.delete() def test_get_device_mac(self): attr = self.generate_device_details() device = self.manage_device(attr) mac_address = ip_lib.get_device_mac(attr.name, namespace=attr.namespace) self.assertEqual(attr.mac_address, mac_address) device.link.delete() def test_get_device_mac_too_long_name(self): name = utils.get_rand_name( max_length=constants.DEVICE_NAME_MAX_LEN + 5) attr = self.generate_device_details(name=name) device = self.manage_device(attr) mac_address = ip_lib.get_device_mac(attr.name, namespace=attr.namespace) self.assertEqual(attr.mac_address, mac_address) device.link.delete() def test_gateway_lifecycle(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) metric = 1000 device = self.manage_device(attr) gateways = { constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0], constants.IP_VERSION_6: "fd00::ff" } expected_gateways = { constants.IP_VERSION_4: { 'metric': metric, 'via': gateways[constants.IP_VERSION_4]}, constants.IP_VERSION_6: { 'metric': metric, 'via': gateways[constants.IP_VERSION_6]}} for ip_version, gateway_ip in gateways.items(): device.route.add_gateway(gateway_ip, metric) self._check_routes( [expected_gateways[ip_version]], [device.route.get_gateway(ip_version=ip_version)]) device.route.delete_gateway(gateway_ip) self.assertIsNone( device.route.get_gateway(ip_version=ip_version)) def test_gateway_flush(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) device = self.manage_device(attr) gateways = { constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0], constants.IP_VERSION_6: "fd00::ff" } for ip_version, gateway_ip in gateways.items(): # Ensure that there is no gateway configured self.assertIsNone( device.route.get_gateway(ip_version=ip_version)) # Now lets add gateway device.route.add_gateway(gateway_ip, table="main") self.assertIsNotNone( device.route.get_gateway(ip_version=ip_version)) # Flush gateway and check that there is no any gateway configured device.route.flush(ip_version, table="main") self.assertIsNone( device.route.get_gateway(ip_version=ip_version)) def test_get_routing_table(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) device = self.manage_device(attr) device_ip = attr.ip_cidrs[0].split('/')[0] destination = '8.8.8.0/24' device.route.add_route(destination, device_ip) destination6 = 'fd01::/64' device.route.add_route(destination6, "fd00::2") expected_routes = [{'nexthop': device_ip, 'device': attr.name, 'destination': destination, 'scope': 'universe'}, {'nexthop': None, 'device': attr.name, 'destination': str( netaddr.IPNetwork(attr.ip_cidrs[0]).cidr), 'scope': 'link'}] routes = ip_lib.get_routing_table(4, namespace=attr.namespace) self.assertItemsEqual(expected_routes, routes) self.assertIsInstance(routes, list) expected_routes6 = [{'nexthop': "fd00::2", 'device': attr.name, 'destination': destination6, 'scope': 'universe'}, {'nexthop': None, 'device': attr.name, 'destination': str( netaddr.IPNetwork(attr.ip_cidrs[1]).cidr), 'scope': 'universe'}] routes6 = ip_lib.get_routing_table(6, namespace=attr.namespace) self.assertItemsEqual(expected_routes6, routes6) self.assertIsInstance(routes6, list) def test_get_routing_table_no_namespace(self): with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound): ip_lib.get_routing_table(4, namespace="nonexistent-netns") def test_get_neigh_entries(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) device = self.manage_device(attr) device.neigh.add(TEST_IP_NEIGH, mac_address) expected_neighs = [{'dst': TEST_IP_NEIGH, 'lladdr': mac_address, 'device': attr.name}] neighs = device.neigh.dump(4) self.assertItemsEqual(expected_neighs, neighs) self.assertIsInstance(neighs, list) device.neigh.delete(TEST_IP_NEIGH, mac_address) neighs = device.neigh.dump(4, dst=TEST_IP_NEIGH, lladdr=mac_address) self.assertEqual([], neighs) def test_get_neigh_entries_no_namespace(self): with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound): ip_lib.dump_neigh_entries(4, namespace="nonexistent-netns") def test_get_neigh_entries_no_interface(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) self.manage_device(attr) with testtools.ExpectedException(ip_lib.NetworkInterfaceNotFound): ip_lib.dump_neigh_entries(4, device="nosuchdevice", namespace=attr.namespace) def test_delete_neigh_entries(self): attr = self.generate_device_details( ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"] ) mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) device = self.manage_device(attr) # trying to delete a non-existent entry shouldn't raise an error device.neigh.delete(TEST_IP_NEIGH, mac_address) def _check_for_device_name(self, ip, name, should_exist): exist = any(d for d in ip.get_devices() if d.name == name) self.assertEqual(should_exist, exist) def test_veth_exists(self): namespace1 = self.useFixture(net_helpers.NamespaceFixture()) namespace2 = self.useFixture(net_helpers.NamespaceFixture()) dev_name1 = utils.get_rand_name() dev_name2 = utils.get_rand_name() device1, device2 = namespace1.ip_wrapper.add_veth( dev_name1, dev_name2, namespace2.name) self.addCleanup(self._safe_delete_device, device1) self.addCleanup(self._safe_delete_device, device2) self._check_for_device_name(namespace1.ip_wrapper, dev_name1, True) self._check_for_device_name(namespace2.ip_wrapper, dev_name2, True) self._check_for_device_name(namespace1.ip_wrapper, dev_name2, False) self._check_for_device_name(namespace2.ip_wrapper, dev_name1, False) # As it is veth pair, remove of device1 should be enough to remove # both devices device1.link.delete() self._check_for_device_name(namespace1.ip_wrapper, dev_name1, False) self._check_for_device_name(namespace2.ip_wrapper, dev_name2, False) def test_macvtap_exists(self): namespace = self.useFixture(net_helpers.NamespaceFixture()) src_dev_name = utils.get_rand_name() src_dev = namespace.ip_wrapper.add_dummy(src_dev_name) self.addCleanup(self._safe_delete_device, src_dev) dev_name = utils.get_rand_name() device = namespace.ip_wrapper.add_macvtap(dev_name, src_dev_name) self.addCleanup(self._safe_delete_device, device) self._check_for_device_name(namespace.ip_wrapper, dev_name, True) device.link.delete() self._check_for_device_name(namespace.ip_wrapper, dev_name, False) def test_dummy_exists(self): namespace = self.useFixture(net_helpers.NamespaceFixture()) dev_name = utils.get_rand_name() device = namespace.ip_wrapper.add_dummy(dev_name) self.addCleanup(self._safe_delete_device, device) self._check_for_device_name(namespace.ip_wrapper, dev_name, True) device.link.delete() self._check_for_device_name(namespace.ip_wrapper, dev_name, False) def test_set_link_mtu(self): attr = self.generate_device_details() device = self.manage_device(attr) device.link.set_mtu(1450) self.assertEqual(1450, device.link.mtu) # Check if proper exception will be raised when wrong MTU value is # provided self.assertRaises(ip_lib.InvalidArgument, device.link.set_mtu, 1) def test_set_link_allmulticast_on(self): attr = self.generate_device_details() device = self.manage_device(attr) self.assertFalse(device.link.allmulticast) device.link.set_allmulticast_on() self.assertTrue(device.link.allmulticast) def test_set_link_netns(self): attr = self.generate_device_details() device = self.manage_device(attr) original_namespace = device.namespace original_ip_wrapper = ip_lib.IPWrapper(namespace=original_namespace) new_namespace = self.useFixture(net_helpers.NamespaceFixture()) device.link.set_netns(new_namespace.name) self.assertEqual(new_namespace.name, device.namespace) self._check_for_device_name( new_namespace.ip_wrapper, device.name, True) self._check_for_device_name( original_ip_wrapper, device.name, False) def test_set_link_name(self): attr = self.generate_device_details() device = self.manage_device(attr) ip_wrapper = ip_lib.IPWrapper(namespace=device.namespace) original_name = device.name new_name = utils.get_rand_name() # device has to be DOWN to rename it device.link.set_down() device.link.set_name(new_name) self.assertEqual(new_name, device.name) self._check_for_device_name(ip_wrapper, new_name, True) self._check_for_device_name(ip_wrapper, original_name, False) def test_set_link_alias(self): attr = self.generate_device_details() device = self.manage_device(attr) alias = utils.get_rand_name() device.link.set_alias(alias) self.assertEqual(alias, device.link.alias) def _add_and_check_ips(self, device, ip_addresses): for cidr, scope, expected_broadcast in ip_addresses: # For IPv4 address add_broadcast flag will be set to True only # if expected_broadcast is given. # For IPv6 add_broadcast flag can be set to True always but # broadcast address will not be set, so expected_broadcast for # IPv6 should be always given as None. add_broadcast = True if cidr.version == constants.IP_VERSION_4: add_broadcast = bool(expected_broadcast) device.addr.add(str(cidr), scope, add_broadcast) device_ips_info = [ (netaddr.IPNetwork(ip_info['cidr']), ip_info['scope'], ip_info['broadcast']) for ip_info in device.addr.list()] self.assertItemsEqual(ip_addresses, device_ips_info) def _flush_ips(self, device, ip_version): device.addr.flush(ip_version) for ip_address in device.addr.list(): cidr = netaddr.IPNetwork(ip_address['cidr']) self.assertNotEqual(ip_version, cidr.version) def test_add_ip_address(self): ip_addresses = [ (netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'), (netaddr.IPNetwork("11.11.11.11/28"), "link", None), (netaddr.IPNetwork("2801::1/120"), "global", None), (netaddr.IPNetwork("fe80::/64"), "link", None)] attr = self.generate_device_details(ip_cidrs=[]) device = self.manage_device(attr) self._add_and_check_ips(device, ip_addresses) # Now let's check if adding already existing IP address will raise # RuntimeError ip_address = ip_addresses[0] self.assertRaises(RuntimeError, device.addr.add, str(ip_address[0]), ip_address[1]) def test_delete_ip_address(self): attr = self.generate_device_details() cidr = attr.ip_cidrs[0] device = self.manage_device(attr) device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()] self.assertIn(cidr, device_cidrs) device.addr.delete(cidr) device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()] self.assertNotIn(cidr, device_cidrs) # Try to delete not existing IP address, it should be just fine and # finish without any error raised device.addr.delete(cidr) def test_flush_ip_addresses(self): ip_addresses = [ (netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'), (netaddr.IPNetwork("11.11.11.11/28"), "link", None), (netaddr.IPNetwork("2801::1/120"), "global", None), (netaddr.IPNetwork("fe80::/64"), "link", None)] attr = self.generate_device_details(ip_cidrs=[]) device = self.manage_device(attr) self._add_and_check_ips(device, ip_addresses) self._flush_ips(device, constants.IP_VERSION_4) self._flush_ips(device, constants.IP_VERSION_6) class TestSetIpNonlocalBind(functional_base.BaseSudoTestCase): def test_assigned_value(self): namespace = self.useFixture(net_helpers.NamespaceFixture()) for expected in (0, 1): failed = ip_lib.set_ip_nonlocal_bind(expected, namespace.name) try: observed = ip_lib.get_ip_nonlocal_bind(namespace.name) except RuntimeError as rte: stat_message = ( 'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind') if stat_message in str(rte): raise self.skipException( "This kernel doesn't support %s in network " "namespaces." % ip_lib.IP_NONLOCAL_BIND) raise self.assertFalse(failed) self.assertEqual(expected, observed) class NamespaceTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(NamespaceTestCase, self).setUp() self.namespace = 'test_ns_' + uuidutils.generate_uuid() ip_lib.create_network_namespace(self.namespace) self.addCleanup(self._delete_namespace) def _delete_namespace(self): ip_lib.delete_network_namespace(self.namespace) def test_network_namespace_exists_ns_exists(self): self.assertTrue(ip_lib.network_namespace_exists(self.namespace)) def test_network_namespace_exists_ns_doesnt_exists(self): self.assertFalse(ip_lib.network_namespace_exists('another_ns')) def test_network_namespace_exists_ns_exists_try_is_ready(self): self.assertTrue(ip_lib.network_namespace_exists(self.namespace, try_is_ready=True)) def test_network_namespace_exists_ns_doesnt_exists_try_is_ready(self): self.assertFalse(ip_lib.network_namespace_exists('another_ns', try_is_ready=True)) class IpMonitorTestCase(testscenarios.WithScenarios, functional_base.BaseLoggingTestCase): scenarios = [ ('namespace', {'namespace': 'ns_' + uuidutils.generate_uuid()}), ('no_namespace', {'namespace': None}) ] def setUp(self): super(IpMonitorTestCase, self).setUp() self.addCleanup(self._cleanup) if self.namespace: priv_ip_lib.create_netns(self.namespace) self.devices = [('int_' + uuidutils.generate_uuid())[ :constants.DEVICE_NAME_MAX_LEN] for _ in range(5)] self.ip_wrapper = ip_lib.IPWrapper(self.namespace) self.temp_file = self.get_temp_file_path('out_' + self.devices[0] + '.tmp') self.proc = self._run_ip_monitor(ip_monitor) def _cleanup(self): self.proc.stop(kill_timeout=10, kill_signal=signal.SIGTERM) if self.namespace: priv_ip_lib.remove_netns(self.namespace) else: for device in self.devices: try: priv_ip_lib.delete_interface(device, self.namespace) except priv_ip_lib.NetworkInterfaceNotFound: pass @staticmethod def _normalize_module_name(name): for suf in ['.pyc', '.pyo']: if name.endswith(suf): return name[:-len(suf)] + '.py' return name def _run_ip_monitor(self, module): executable = self._normalize_module_name(module.__file__) proc = async_process.AsyncProcess( [executable, self.temp_file, str(self.namespace)], run_as_root=True) proc.start(block=True) return proc def _read_file(self, ip_addresses): try: registers = [] with open(self.temp_file, 'r') as f: data = f.read() for line in data.splitlines(): register = jsonutils.loads(line) registers.append({'name': register['name'], 'cidr': register['cidr'], 'event': register['event']}) for ip_address in ip_addresses: if ip_address not in registers: return False return True except (OSError, IOError): return False def _check_read_file(self, ip_addresses): try: utils.wait_until_true(lambda: self._read_file(ip_addresses), timeout=30) except utils.WaitTimeout: with open(self.temp_file, 'r') as f: registers = f.read() self.fail('Defined IP addresses: %s, IP addresses registered: %s' % (ip_addresses, registers)) def _handle_ip_addresses(self, event, ip_addresses): for ip_address in (_ip for _ip in ip_addresses if _ip['event'] == event): ip_device = ip_lib.IPDevice(ip_address['name'], self.namespace) if event == 'removed': ip_device.addr.delete(ip_address['cidr']) if event == 'added': ip_device.addr.add(ip_address['cidr']) def test_add_remove_ip_address_and_interface(self): for device in self.devices: self.ip_wrapper.add_dummy(device) utils.wait_until_true(lambda: self._read_file({}), timeout=30) ip_addresses = [ {'cidr': '192.168.250.1/24', 'event': 'added', 'name': self.devices[0]}, {'cidr': '192.168.250.2/24', 'event': 'added', 'name': self.devices[1]}, {'cidr': '192.168.250.3/24', 'event': 'added', 'name': self.devices[2]}, {'cidr': '192.168.250.10/24', 'event': 'added', 'name': self.devices[3]}, {'cidr': '192.168.250.10/24', 'event': 'removed', 'name': self.devices[3]}, {'cidr': '2001:db8::1/64', 'event': 'added', 'name': self.devices[4]}, {'cidr': '2001:db8::2/64', 'event': 'added', 'name': self.devices[4]}] self._handle_ip_addresses('added', ip_addresses) self._handle_ip_addresses('removed', ip_addresses) self._check_read_file(ip_addresses) ip_device = ip_lib.IPDevice(self.devices[4], self.namespace) ip_device.link.delete() ip_addresses = [ {'cidr': '2001:db8::1/64', 'event': 'removed', 'name': self.devices[4]}, {'cidr': '2001:db8::2/64', 'event': 'removed', 'name': self.devices[4]}] self._check_read_file(ip_addresses) def test_interface_added_after_initilization(self): for device in self.devices[:len(self.devices) - 1]: self.ip_wrapper.add_dummy(device) utils.wait_until_true(lambda: self._read_file({}), timeout=30) ip_addresses = [ {'cidr': '192.168.251.21/24', 'event': 'added', 'name': self.devices[0]}, {'cidr': '192.168.251.22/24', 'event': 'added', 'name': self.devices[1]}] self._handle_ip_addresses('added', ip_addresses) self._check_read_file(ip_addresses) self.ip_wrapper.add_dummy(self.devices[-1]) ip_addresses.append({'cidr': '192.168.251.23/24', 'event': 'added', 'name': self.devices[-1]}) self._handle_ip_addresses('added', [ip_addresses[-1]]) self._check_read_file(ip_addresses) def test_add_and_remove_multiple_ips(self): # NOTE(ralonsoh): testing [1], adding multiple IPs. # [1] https://bugs.launchpad.net/neutron/+bug/1832307 utils.wait_until_true(lambda: self._read_file({}), timeout=30) self.ip_wrapper.add_dummy(self.devices[0]) ip_addresses = [] for i in range(100): _cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32' ip_addresses.append({'cidr': _cidr, 'event': 'added', 'name': self.devices[0]}) self._handle_ip_addresses('added', ip_addresses) self._check_read_file(ip_addresses) for i in range(100): _cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32' ip_addresses.append({'cidr': _cidr, 'event': 'removed', 'name': self.devices[0]}) self._handle_ip_addresses('removed', ip_addresses) self._check_read_file(ip_addresses) class IpRouteCommandTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(IpRouteCommandTestCase, self).setUp() self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name ip_lib.IPWrapper(self.namespace).add_dummy('test_device') self.device = ip_lib.IPDevice('test_device', namespace=self.namespace) self.device.link.set_up() self.device_cidr_ipv4 = '192.168.100.1/24' self.device_cidr_ipv6 = '2020::1/64' self.device.addr.add(self.device_cidr_ipv4) self.device.addr.add(self.device_cidr_ipv6) self.cidrs = ['192.168.0.0/24', '10.0.0.0/8', '2001::/64', 'faaa::/96'] def _assert_route(self, ip_version, table=None, source_prefix=None, cidr=None, scope=None, via=None, metric=None, not_in=False): if not_in: fn = lambda: cmp not in self.device.route.list_routes(ip_version, table=table) msg = 'Route found: %s' else: fn = lambda: cmp in self.device.route.list_routes(ip_version, table=table) msg = 'Route not found: %s' if cidr: ip_version = utils.get_ip_version(cidr) else: ip_version = utils.get_ip_version(via) cidr = constants.IP_ANY[ip_version] if constants.IP_VERSION_6 == ip_version: scope = ip_lib.IP_ADDRESS_SCOPE[0] elif not scope: scope = 'global' if via else 'link' if not metric: metric = ip_lib.IP_ROUTE_METRIC_DEFAULT[ip_version] table = table or iproute_linux.DEFAULT_TABLE table = ip_lib.IP_RULE_TABLES_NAMES.get(table, table) cmp = {'table': table, 'cidr': cidr, 'source_prefix': source_prefix, 'scope': scope, 'device': 'test_device', 'via': via, 'metric': metric} try: utils.wait_until_true(fn, timeout=5) except utils.WaitTimeout: raise self.fail(msg % cmp) def test_add_route_table(self): tables = (None, 1, 253, 254, 255) for cidr in self.cidrs: for table in tables: self.device.route.add_route(cidr, table=table) ip_version = utils.get_ip_version(cidr) self._assert_route(ip_version, cidr=cidr, table=table) def test_add_route_via(self): gateway_ipv4 = str(netaddr.IPNetwork(self.device_cidr_ipv4).ip) gateway_ipv6 = str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1) for cidr in self.cidrs: ip_version = utils.get_ip_version(cidr) gateway = (gateway_ipv4 if ip_version == constants.IP_VERSION_4 else gateway_ipv6) self.device.route.add_route(cidr, via=gateway) self._assert_route(ip_version, cidr=cidr, via=gateway) def test_add_route_metric(self): metrics = (None, 1, 10, 255) for cidr in self.cidrs: for metric in metrics: self.device.route.add_route(cidr, metric=metric) ip_version = utils.get_ip_version(cidr) self._assert_route(ip_version, cidr=cidr, metric=metric) def test_add_route_scope(self): for cidr in self.cidrs: for scope in ip_lib.IP_ADDRESS_SCOPE_NAME: self.device.route.add_route(cidr, scope=scope) ip_version = utils.get_ip_version(cidr) self._assert_route(ip_version, cidr=cidr, scope=scope) def test_add_route_gateway(self): gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip), str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1)) for gateway in gateways: ip_version = utils.get_ip_version(gateway) self.device.route.add_gateway(gateway) self._assert_route(ip_version, cidr=None, via=gateway, scope='global') def test_list_onlink_routes_ipv4(self): cidr_ipv4 = [] for cidr in self.cidrs: if utils.get_ip_version(cidr) == constants.IP_VERSION_4: cidr_ipv4.append(cidr) self.device.route.add_onlink_route(cidr) for cidr in cidr_ipv4: self._assert_route(constants.IP_VERSION_4, cidr=cidr) routes = self.device.route.list_onlink_routes(constants.IP_VERSION_4) self.assertEqual(len(cidr_ipv4), len(routes)) def test_get_and_delete_gateway(self): gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip), str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1)) scopes = ('global', 'site', 'link') metrics = (None, 1, 255) tables = (None, 1, 254, 255) for gateway, scope, metric, table in itertools.product( gateways, scopes, metrics, tables): ip_version = utils.get_ip_version(gateway) self.device.route.add_gateway(gateway, scope=scope, metric=metric, table=table) self._assert_route(ip_version, cidr=None, via=gateway, scope=scope, metric=metric, table=table) self.assertEqual(gateway, self.device.route.get_gateway( ip_version=ip_version, table=table)['via']) self.device.route.delete_gateway(gateway, table=table, scope=scope) self.assertIsNone(self.device.route.get_gateway( ip_version=ip_version, table=table)) def test_delete_route(self): scopes = ('global', 'site', 'link') tables = (None, 1, 254, 255) for cidr, scope, table in itertools.product( self.cidrs, scopes, tables): ip_version = utils.get_ip_version(cidr) self.device.route.add_route(cidr, table=table, scope=scope) self._assert_route(ip_version, cidr=cidr, scope=scope, table=table) self.device.route.delete_route(cidr, table=table, scope=scope) self._assert_route(ip_version, cidr=cidr, scope=scope, table=table, not_in=True) def test_flush(self): tables = (None, 1, 200) ip_versions = (constants.IP_VERSION_4, constants.IP_VERSION_6) for cidr, table in itertools.product(self.cidrs, tables): self.device.route.add_route(cidr, table=table) for ip_version, table in itertools.product(ip_versions, tables): routes = self.device.route.list_routes(ip_version, table=table) self.assertGreater(len(routes), 0) self.device.route.flush(ip_version, table=table) routes = self.device.route.list_routes(ip_version, table=table) self.assertEqual([], routes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_ipset.py0000644000175000017500000000762600000000000027146 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import ip_lib from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_manager from neutron.common import utils from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base from neutron.tests.functional import base as functional_base MAX_IPSET_NAME_LENGTH = 28 IPSET_ETHERTYPE = 'IPv4' UNRELATED_IP = '1.1.1.1' class IpsetBase(functional_base.BaseSudoTestCase): def setUp(self): super(IpsetBase, self).setUp() bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge self.source, self.destination = self.useFixture( machine_fixtures.PeerMachines(bridge)).machines self.ipset_name = utils.get_rand_name(MAX_IPSET_NAME_LENGTH, 'set-') self.icmp_accept_rule = ('-p icmp -m set --match-set %s src -j ACCEPT' % self.ipset_name) self.ipset = self._create_ipset_manager_and_set( ip_lib.IPWrapper(self.destination.namespace), self.ipset_name) self.addCleanup(self.ipset._destroy, self.ipset_name) self.dst_iptables = iptables_manager.IptablesManager( namespace=self.destination.namespace) self._add_iptables_ipset_rules() self.addCleanup(self._remove_iptables_ipset_rules) def _create_ipset_manager_and_set(self, dst_ns, set_name): ipset = ipset_manager.IpsetManager( namespace=dst_ns.namespace) ipset._create_set(set_name, IPSET_ETHERTYPE) return ipset def _remove_iptables_ipset_rules(self): self.dst_iptables.ipv4['filter'].remove_rule( 'INPUT', base.ICMP_BLOCK_RULE) self.dst_iptables.ipv4['filter'].remove_rule( 'INPUT', self.icmp_accept_rule) self.dst_iptables.apply() def _add_iptables_ipset_rules(self): self.dst_iptables.ipv4['filter'].add_rule( 'INPUT', self.icmp_accept_rule) self.dst_iptables.ipv4['filter'].add_rule( 'INPUT', base.ICMP_BLOCK_RULE) self.dst_iptables.apply() class IpsetManagerTestCase(IpsetBase): def test_add_member_allows_ping(self): self.source.assert_no_ping(self.destination.ip) self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) def test_del_member_denies_ping(self): self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) self.ipset._del_member_from_set(self.ipset_name, self.source.ip) self.source.assert_no_ping(self.destination.ip) def test_refresh_ipset_allows_ping(self): self.ipset._refresh_set( self.ipset_name, [UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_no_ping(self.destination.ip) self.ipset._refresh_set( self.ipset_name, [UNRELATED_IP, self.source.ip], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) self.ipset._refresh_set( self.ipset_name, [self.source.ip, UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) def test_destroy_ipset_set(self): self._remove_iptables_ipset_rules() self.ipset._destroy(self.ipset_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_iptables.py0000644000175000017500000002004500000000000027613 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from neutron_lib import constants import testtools from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional.agent.linux.bin import ipt_binname from neutron.tests.functional import base as functional_base class IptablesManagerTestCase(functional_base.BaseSudoTestCase): DIRECTION_CHAIN_MAPPER = {'ingress': 'INPUT', 'egress': 'OUTPUT'} PROTOCOL_BLOCK_RULE = '-p %s -j DROP' PROTOCOL_PORT_BLOCK_RULE = ('-p %(protocol)s -m %(protocol)s ' '--dport %(port)d -j DROP') def setUp(self): super(IptablesManagerTestCase, self).setUp() bridge = self.useFixture(net_helpers.VethBridgeFixture()).bridge self.client, self.server = self.useFixture( machine_fixtures.PeerMachines(bridge)).machines self.client_fw, self.server_fw = self.create_firewalls() # The port is used in isolated namespace that precludes possibility of # port conflicts self.port = net_helpers.get_free_namespace_port( constants.PROTO_NAME_TCP, self.server.namespace) def create_firewalls(self): client_iptables = iptables_manager.IptablesManager( namespace=self.client.namespace) server_iptables = iptables_manager.IptablesManager( namespace=self.server.namespace) return client_iptables, server_iptables def filter_add_rule(self, fw_manager, address, direction, protocol, port): self._ipv4_filter_execute(fw_manager, 'add_rule', direction, protocol, port) def filter_remove_rule(self, fw_manager, address, direction, protocol, port): self._ipv4_filter_execute(fw_manager, 'remove_rule', direction, protocol, port) def _ipv4_filter_execute(self, fw_manager, method, direction, protocol, port): chain, rule = self._get_chain_and_rule(direction, protocol, port) method = getattr(fw_manager.ipv4['filter'], method) method(chain, rule) fw_manager.apply() def _get_chain_and_rule(self, direction, protocol, port): chain = self.DIRECTION_CHAIN_MAPPER[direction] if port: rule = self.PROTOCOL_PORT_BLOCK_RULE % {'protocol': protocol, 'port': port} else: rule = self.PROTOCOL_BLOCK_RULE % protocol return chain, rule def _test_with_nc(self, fw_manager, direction, port, protocol): netcat = net_helpers.NetcatTester( self.client.namespace, self.server.namespace, self.server.ip, self.port, protocol) self.addCleanup(netcat.stop_processes) filter_params = 'direction %s, port %s and protocol %s' % ( direction, port, protocol) self.assertTrue(netcat.test_connectivity(), 'Failed connectivity check before applying a filter ' 'with %s' % filter_params) # REVISIT(jlibosva): Make sure we have ASSURED conntrack entry for # given connection self.filter_add_rule( fw_manager, self.server.ip, direction, protocol, port) with testtools.ExpectedException( RuntimeError, msg='Wrongfully passed a connectivity check after applying ' 'a filter with %s' % filter_params): netcat.test_connectivity() self.filter_remove_rule( fw_manager, self.server.ip, direction, protocol, port) # With TCP packets will get through after firewall was removed, so we # would get old data on socket and with UDP process died, so we need to # respawn processes to have clean sockets self.assertTrue(netcat.test_connectivity(True), 'Failed connectivity check after removing a filter ' 'with %s' % filter_params) def test_icmp(self): self.client.assert_ping(self.server.ip) self.server_fw.ipv4['filter'].add_rule('INPUT', linux_base.ICMP_BLOCK_RULE) self.server_fw.apply() self.client.assert_no_ping(self.server.ip) self.server_fw.ipv4['filter'].remove_rule('INPUT', linux_base.ICMP_BLOCK_RULE) self.server_fw.apply() self.client.assert_ping(self.server.ip) def test_mangle_icmp(self): self.client.assert_ping(self.server.ip) self.server_fw.ipv4['mangle'].add_rule('INPUT', linux_base.ICMP_MARK_RULE) self.server_fw.ipv4['filter'].add_rule('INPUT', linux_base.MARKED_BLOCK_RULE) self.server_fw.apply() self.client.assert_no_ping(self.server.ip) self.server_fw.ipv4['mangle'].remove_rule('INPUT', linux_base.ICMP_MARK_RULE) self.server_fw.ipv4['filter'].remove_rule('INPUT', linux_base.MARKED_BLOCK_RULE) self.server_fw.apply() self.client.assert_ping(self.server.ip) def test_tcp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, protocol=net_helpers.NetcatTester.TCP) def test_tcp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, protocol=net_helpers.NetcatTester.TCP) def test_tcp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, protocol=net_helpers.NetcatTester.TCP) def test_tcp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, protocol=net_helpers.NetcatTester.TCP) def test_udp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, protocol=net_helpers.NetcatTester.UDP) def test_udp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, protocol=net_helpers.NetcatTester.UDP) def test_udp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, protocol=net_helpers.NetcatTester.UDP) def test_udp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, protocol=net_helpers.NetcatTester.UDP) class IptablesManagerNonRootTestCase(base.BaseTestCase): @staticmethod def _normalize_module_name(name): for suf in ['.pyc', '.pyo']: if name.endswith(suf): return name[:-len(suf)] + '.py' return name def _test_binary_name(self, module, *extra_options): executable = self._normalize_module_name(module.__file__) expected = os.path.basename(executable)[:16] observed = utils.execute([executable] + list(extra_options)).rstrip() self.assertEqual(expected, observed) def test_binary_name(self): self._test_binary_name(ipt_binname) def test_binary_name_eventlet_spawn(self): self._test_binary_name(ipt_binname, 'spawn') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_keepalived.py0000644000175000017500000001207500000000000030125 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal from oslo_config import cfg from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import keepalived from neutron.common import utils as common_utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base from neutron.tests.unit.agent.linux import test_keepalived class KeepalivedManagerTestCase(base.BaseSudoTestCase, test_keepalived.KeepalivedConfBaseMixin): def setUp(self): super(KeepalivedManagerTestCase, self).setUp() cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT') self.expected_config = self._get_config() self.process_monitor = external_process.ProcessMonitor(cfg.CONF, 'router') self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) self._prepare_devices() self.manager = keepalived.KeepalivedManager( 'router1', self.expected_config, self.process_monitor, conf_path=cfg.CONF.state_path, namespace=self.namespace) self.addCleanup(self._stop_keepalived_manager) def _stop_keepalived_manager(self): self.manager.disable() try: common_utils.wait_until_true( lambda: not self.manager.get_process().active, timeout=5) except common_utils.WaitTimeout: self.manager.get_process().disable(sig=signal.SIGKILL) def _prepare_devices(self): # NOTE(slaweq): those are devices used in keepalived config file, # prepared by self._get_config() method which is defined in # neutron.tests.unit.agent.linux.test_keepalived module dev_names = ['eth0', 'eth1', 'eth2', 'eth4', 'eth6', 'eth10'] for name in dev_names: ip_device = self.ip_wrapper.add_dummy(name) if name == 'eth0': ip_device.link.set_up() ip_device.addr.add('169.254.201.1/16') def _spawn_keepalived(self, keepalived_manager): keepalived_manager.spawn() process = keepalived_manager.get_process() common_utils.wait_until_true( lambda: process.active, timeout=5, sleep=0.01, exception=RuntimeError(_("Keepalived didn't spawn"))) return process def test_keepalived_spawn(self): self._spawn_keepalived(self.manager) self.assertEqual(self.expected_config.get_config_str(), self.manager.get_conf_on_disk()) def _test_keepalived_respawns(self, normal_exit=True): process = self._spawn_keepalived(self.manager) pid = process.pid exit_code = '-15' if normal_exit else '-9' # Exit the process, and see that when it comes back # It's indeed a different process self.ip_wrapper.netns.execute(['kill', exit_code, pid]) common_utils.wait_until_true( lambda: process.active and pid != process.pid, timeout=5, sleep=0.01, exception=RuntimeError(_("Keepalived didn't respawn"))) def test_keepalived_respawns(self): self._test_keepalived_respawns() def test_keepalived_respawn_with_unexpected_exit(self): self._test_keepalived_respawns(False) def _test_keepalived_spawns_conflicting_pid(self, process, pid_file): # Test the situation when keepalived PID file contains PID of an # existing non-keepalived process. This situation can happen e.g. # after hard node reset. spawn_process = helpers.SleepyProcessFixture() self.useFixture(spawn_process) with open(pid_file, "w") as f_pid_file: f_pid_file.write("%s" % spawn_process.pid) self._spawn_keepalived(self.manager) def test_keepalived_spawns_conflicting_pid_base_process(self): process = self.manager.get_process() pid_file = process.get_pid_file_name() self._test_keepalived_spawns_conflicting_pid(process, pid_file) def test_keepalived_spawns_conflicting_pid_vrrp_subprocess(self): process = self.manager.get_process() pid_file = process.get_pid_file_name() self._test_keepalived_spawns_conflicting_pid( process, self.manager.get_vrrp_pid_file_name(pid_file)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_l3_tc_lib.py0000644000175000017500000001457700000000000027657 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as common_constants from neutron_lib import exceptions from oslo_utils import uuidutils from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import l3_tc_lib from neutron.tests.functional import base as functional_base RATE_LIMIT = 1024 BURST_LIMIT = 512 DEV_NAME = "test_device" class TcLibTestCase(functional_base.BaseSudoTestCase): def create_tc_wrapper_with_namespace_and_device(self): ns_name = uuidutils.generate_uuid() namespace = namespaces.Namespace( ns_name, None, mock.Mock(), False) namespace.create() self.addCleanup(namespace.delete) ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) tc_device = ip_wrapper.add_tuntap(DEV_NAME) tc_device.link.set_up() return l3_tc_lib.FloatingIPTcCommand( DEV_NAME, namespace=ns_name) def test_clear_all_filters(self): ip_addr = "2.2.2.2" l3_tc = self.create_tc_wrapper_with_namespace_and_device() l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.EGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.clear_all_filters(common_constants.INGRESS_DIRECTION) self.assertRaises(exceptions.FilterIDForIPNotFound, l3_tc.get_filter_id_for_ip, common_constants.INGRESS_DIRECTION, ip_addr) l3_tc.clear_all_filters(common_constants.EGRESS_DIRECTION) self.assertRaises(exceptions.FilterIDForIPNotFound, l3_tc.get_filter_id_for_ip, common_constants.EGRESS_DIRECTION, ip_addr) def test_get_filter_id_for_ip(self): ip_addr = "3.3.3.3" l3_tc = self.create_tc_wrapper_with_namespace_and_device() l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.EGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) self.assertIsNotNone( l3_tc.get_filter_id_for_ip(common_constants.INGRESS_DIRECTION, ip_addr)) self.assertIsNotNone( l3_tc.get_filter_id_for_ip(common_constants.EGRESS_DIRECTION, ip_addr)) # testing: IP filter does not exist self.assertRaises(exceptions.FilterIDForIPNotFound, l3_tc.get_filter_id_for_ip, common_constants.EGRESS_DIRECTION, '33.33.33.33') def test_get_existing_filter_ids(self): ip_addr = "4.4.4.4" l3_tc = self.create_tc_wrapper_with_namespace_and_device() l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.EGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) filter_ids = l3_tc.get_existing_filter_ids( common_constants.INGRESS_DIRECTION) self.assertNotEqual(0, len(filter_ids)) filter_ids = l3_tc.get_existing_filter_ids( common_constants.EGRESS_DIRECTION) self.assertNotEqual(0, len(filter_ids)) def test_delete_filter_ids(self): ip_addr1 = "5.5.5.5" ip_addr2 = "6.6.6.6" l3_tc = self.create_tc_wrapper_with_namespace_and_device() l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr1, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr2, RATE_LIMIT, BURST_LIMIT) filter_ids = l3_tc.get_existing_filter_ids( common_constants.INGRESS_DIRECTION) self.assertEqual(2, len(filter_ids)) l3_tc.delete_filter_ids(common_constants.INGRESS_DIRECTION, filter_ids) filter_ids = l3_tc.get_existing_filter_ids( common_constants.INGRESS_DIRECTION) self.assertEqual(0, len(filter_ids)) def test_set_ip_rate_limit(self): ip_addr = "7.7.7.7" l3_tc = self.create_tc_wrapper_with_namespace_and_device() # Set it multiple times l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) # Get only one and no exception filter_id = l3_tc.get_filter_id_for_ip( common_constants.INGRESS_DIRECTION, ip_addr) self.assertIsNotNone(filter_id) def test_clear_ip_rate_limit(self): ip_addr = "8.8.8.8" l3_tc = self.create_tc_wrapper_with_namespace_and_device() l3_tc.set_ip_rate_limit(common_constants.INGRESS_DIRECTION, ip_addr, RATE_LIMIT, BURST_LIMIT) filter_id = l3_tc.get_filter_id_for_ip( common_constants.INGRESS_DIRECTION, ip_addr) self.assertIsNotNone(filter_id) filter_id = l3_tc.clear_ip_rate_limit( common_constants.INGRESS_DIRECTION, ip_addr) self.assertIsNone(filter_id) # testing: IP filter does not exist l3_tc.clear_ip_rate_limit( common_constants.INGRESS_DIRECTION, "88.88.88.88") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py0000644000175000017500000001566500000000000032742 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.utils import net from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base no_arping = net_helpers.assert_no_arping arping = net_helpers.assert_arping class LinuxBridgeARPSpoofTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(LinuxBridgeARPSpoofTestCase, self).setUp() lbfixture = self.useFixture(net_helpers.LinuxBridgeFixture()) self.addCleanup(setattr, arp_protect, 'NAMESPACE', None) arp_protect.NAMESPACE = lbfixture.namespace bridge = lbfixture.bridge self.source, self.destination, self.observer = self.useFixture( machine_fixtures.PeerMachines(bridge, amount=3)).machines self.addCleanup(self._ensure_rules_cleaned) def _ensure_rules_cleaned(self): rules = [r for r in arp_protect.ebtables(['-L']).splitlines() if r and 'Bridge' not in r] self.assertEqual([], rules, 'Test leaked ebtables rules') def _add_arp_protection(self, machine, addresses, extra_port_dict=None): port_dict = {'fixed_ips': [{'ip_address': a} for a in addresses], 'device_owner': 'nobody', 'mac_address': machine.port.link.address} if extra_port_dict: port_dict.update(extra_port_dict) name = net_helpers.VethFixture.get_peer_name(machine.port.name) arp_protect.setup_arp_spoofing_protection(name, port_dict) self.addCleanup(arp_protect.delete_arp_spoofing_protection, [name]) def test_arp_no_protection(self): arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_correct_protection(self): self._add_arp_protection(self.source, [self.source.ip]) self._add_arp_protection(self.destination, [self.destination.ip]) arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_correct_protection_allowed_address_pairs(self): smac = self.source.port.link.address port = {'mac_address': '00:11:22:33:44:55', 'allowed_address_pairs': [{'mac_address': smac, 'ip_address': self.source.ip}]} # make sure a large number of allowed address pairs works for i in range(100000): port['allowed_address_pairs'].append( {'mac_address': net.get_random_mac( 'fa:16:3e:00:00:00'.split(':')), 'ip_address': '10.10.10.10'}) self._add_arp_protection(self.source, ['1.2.2.2'], port) self._add_arp_protection(self.destination, [self.destination.ip]) arping(self.source.namespace, self.destination.ip) arping(self.destination.namespace, self.source.ip) def test_arp_fails_incorrect_protection(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.source.namespace, self.destination.ip) no_arping(self.destination.namespace, self.source.ip) def test_arp_fails_incorrect_mac_protection(self): # a bad mac filter on the source will prevent any traffic from it self._add_arp_protection(self.source, [self.source.ip], {'mac_address': '00:11:22:33:44:55'}) no_arping(self.source.namespace, self.destination.ip) no_arping(self.destination.namespace, self.source.ip) # correcting it should make it work self._add_arp_protection(self.source, [self.source.ip]) arping(self.source.namespace, self.destination.ip) def test_arp_protection_removal(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) name = net_helpers.VethFixture.get_peer_name(self.source.port.name) arp_protect.delete_arp_spoofing_protection([name]) # spoofing should have been removed from source, but not dest arping(self.observer.namespace, self.source.ip) no_arping(self.observer.namespace, self.destination.ip) def test_arp_protection_update(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['192.0.0.0/1']) # spoofing should have been updated on source, but not dest arping(self.observer.namespace, self.source.ip) no_arping(self.observer.namespace, self.destination.ip) def test_arp_protection_port_security_disabled(self): self._add_arp_protection(self.source, ['1.1.1.1']) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['1.1.1.1'], {'port_security_enabled': False}) arping(self.observer.namespace, self.source.ip) def test_arp_protection_network_owner(self): self._add_arp_protection(self.source, ['1.1.1.1']) no_arping(self.observer.namespace, self.source.ip) self._add_arp_protection(self.source, ['1.1.1.1'], {'device_owner': constants.DEVICE_OWNER_ROUTER_GW}) arping(self.observer.namespace, self.source.ip) def test_arp_protection_dead_reference_removal(self): self._add_arp_protection(self.source, ['1.1.1.1']) self._add_arp_protection(self.destination, ['2.2.2.2']) no_arping(self.observer.namespace, self.destination.ip) no_arping(self.observer.namespace, self.source.ip) name = net_helpers.VethFixture.get_peer_name(self.source.port.name) # This should remove all arp protect rules that aren't source port arp_protect.delete_unreferenced_arp_protection([name]) no_arping(self.observer.namespace, self.source.ip) arping(self.observer.namespace, self.destination.ip) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_netlink_lib.py0000644000175000017500000001264500000000000030311 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import utils as linux_utils from neutron.privileged.agent.linux import netlink_lib as nl_lib from neutron.tests.functional import base as functional_base class NetlinkLibTestCase(functional_base.BaseSudoTestCase): """Functional test for netlink_lib: List, delete, flush conntrack entries. For each function, first we add a specific namespace, then create real conntrack entries. netlink_lib function will do list, delete and flush these entries. This class will test this netlink_lib function work as expected. """ def _create_entries(self, zone): conntrack_cmds = ( ['conntrack', '-I', '-p', 'tcp', '-s', '1.1.1.1', '-d', '2.2.2.2', '--sport', '1', '--dport', '2', '--state', 'ESTABLISHED', '--timeout', '1234', '-w', zone], ['conntrack', '-I', '-p', 'udp', '-s', '1.1.1.1', '-d', '2.2.2.2', '--sport', '4', '--dport', '5', '--timeout', '1234', '-w', zone], ['conntrack', '-I', '-p', 'icmp', '-s', '1.1.1.1', '-d', '2.2.2.2', '--icmp-type', '8', '--icmp-code', '0', '--icmp-id', '3333', '--timeout', '1234', '-w', zone], ) for cmd in conntrack_cmds: try: linux_utils.execute(cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: raise Exception('Error while creating entry') def _delete_entry(self, delete_entries, remain_entries, zone): nl_lib.delete_entries(entries=delete_entries) entries_list = nl_lib.list_entries(zone=zone) for delete_entry in delete_entries: self.assertNotIn(delete_entry, entries_list) for remain_entry in remain_entries: self.assertIn(remain_entry, entries_list) @staticmethod def _find_unused_zone_id(start, end): """Find unused zone ID starting from a specified ID""" while start <= end: cmd = ['conntrack', '-L', '-w', start] try: current_entries = linux_utils.execute(cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1]) except RuntimeError: raise Exception('Error while listing entries') if not current_entries: return start start += 1 raise Exception("Can not find usable zone_id") def test_list_entries(self): _zone = self._find_unused_zone_id(10, 30) self._create_entries(zone=_zone) expected = ( (4, 'icmp', 8, 0, '1.1.1.1', '2.2.2.2', 3333, _zone), (4, 'tcp', 1, 2, '1.1.1.1', '2.2.2.2', _zone), (4, 'udp', 4, 5, '1.1.1.1', '2.2.2.2', _zone) ) entries_list = nl_lib.list_entries(zone=_zone) for entry in expected: self.assertIn(entry, entries_list) def test_delete_icmp_entry(self): _zone = self._find_unused_zone_id(31, 50) self._create_entries(zone=_zone) icmp_entry = [(4, 'icmp', 8, 0, '1.1.1.1', '2.2.2.2', 3333, _zone)] remain_entries = ( (4, 'tcp', 1, 2, '1.1.1.1', '2.2.2.2', _zone), (4, 'udp', 4, 5, '1.1.1.1', '2.2.2.2', _zone), ) self._delete_entry(icmp_entry, remain_entries, _zone) def test_delete_tcp_entry(self): _zone = self._find_unused_zone_id(51, 70) self._create_entries(zone=_zone) tcp_entry = [(4, 'tcp', 1, 2, '1.1.1.1', '2.2.2.2', _zone)] remain_entries = ( (4, 'icmp', 8, 0, '1.1.1.1', '2.2.2.2', 3333, _zone), (4, 'udp', 4, 5, '1.1.1.1', '2.2.2.2', _zone) ) self._delete_entry(tcp_entry, remain_entries, _zone) def test_delete_udp_entry(self): _zone = self._find_unused_zone_id(71, 90) self._create_entries(zone=_zone) udp_entry = [(4, 'udp', 4, 5, '1.1.1.1', '2.2.2.2', _zone)] remain_entries = ( (4, 'icmp', 8, 0, '1.1.1.1', '2.2.2.2', 3333, _zone), (4, 'tcp', 1, 2, '1.1.1.1', '2.2.2.2', _zone) ) self._delete_entry(udp_entry, remain_entries, _zone) def test_delete_multiple_entries(self): _zone = self._find_unused_zone_id(91, 110) self._create_entries(zone=_zone) delete_entries = ( (4, 'icmp', 8, 0, '1.1.1.1', '2.2.2.2', 3333, _zone), (4, 'tcp', 1, 2, '1.1.1.1', '2.2.2.2', _zone), (4, 'udp', 4, 5, '1.1.1.1', '2.2.2.2', _zone) ) remain_entries = () self._delete_entry(delete_entries, remain_entries, _zone) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_of_monitor.py0000644000175000017500000000734000000000000030166 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import of_monitor from neutron.common import utils from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class OFMonitorTestCase(functional_base.BaseSudoTestCase): DEFAULT_FLOW = {'table': 0, 'cookie': '0', 'actions': 'NORMAL'} def setUp(self): super(OFMonitorTestCase, self).setUp() self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.of_monitor = of_monitor.OFMonitor(self.bridge.br_name, start=False) self.addCleanup(self.of_monitor.stop) def _format_flow(self, flow, event_type): deleted = '' if event_type == 'DELETED': deleted = 'reason=delete' table = 'table=%s' % flow['table'] cookie = flow.get('cookie') or hex(self.bridge._default_cookie) # NOTE(ralonsoh): remove PY2 "L" suffix in longs cookie = 'cookie=' + cookie.rstrip('L') filters = [] if flow.get('in_port'): filters.append('in_port=%s' % flow.get('in_port')) if flow.get('dl_vlan'): filters.append('dl_vlan=%s' % flow.get('dl_vlan')) if flow.get('dl_src'): filters.append('dl_src=%s' % flow.get('dl_src')) filters = ','.join(filters) actions = '' if flow.get('actions'): actions += 'actions=%s' % flow.get('actions') flow_sections = [section for section in (deleted, table, cookie, filters, actions) if section] return ' '.join(flow_sections) def _check_flow(self, reference_flow, event_type): def _read_and_check(): event = self.of_monitor.of_events if len(event) == 1: events_container.append(event[0]) return True return False events_container = [] try: utils.wait_until_true(_read_and_check, timeout=5) except utils.WaitTimeout: self.fail('Flow "%s" with action %s not found' % (reference_flow, event_type)) event = events_container.pop() self.assertEqual(event_type, event.event_type) self.assertEqual(self._format_flow(reference_flow, event_type), event.flow) def test_of_events(self): self.of_monitor.start() self._check_flow(self.DEFAULT_FLOW, 'ADDED') flow = {'table': 10, 'in_port': 20, 'dl_vlan': 30, 'dl_src': '00:00:00:00:00:01', 'actions': 'NORMAL'} self.bridge.add_flow(**flow) self._check_flow(flow, 'ADDED') flow['table'] = 50 self.bridge.add_flow(**flow) self._check_flow(flow, 'ADDED') flow['actions'] = 'resubmit:100' self.bridge.mod_flow(**flow) self._check_flow(flow, 'MODIFIED') flow['table'] = 10 flow['actions'] = 'NORMAL' flow_to_delete = flow.copy() flow_to_delete.pop('actions') self.bridge.delete_flows(**flow_to_delete) self._check_flow(flow, 'DELETED') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py0000644000175000017500000001347700000000000030707 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests in this module will be skipped unless: - ovsdb-client is installed - ovsdb-client can be invoked password-less via the configured root helper - sudo testing is enabled (see neutron.tests.functional.base for details) """ from oslo_config import cfg from neutron.agent.common import ovs_lib from neutron.agent.common import ovsdb_monitor from neutron.common import utils from neutron.tests import base from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base class BaseMonitorTest(linux_base.BaseOVSLinuxTestCase): def setUp(self): super(BaseMonitorTest, self).setUp() rootwrap_not_configured = (cfg.CONF.AGENT.root_helper == base.SUDO_CMD) if rootwrap_not_configured: # The monitor tests require a nested invocation that has # to be emulated by double sudo if rootwrap is not # configured. self.config(group='AGENT', root_helper=" ".join([base.SUDO_CMD] * 2)) self._check_test_requirements() # ovsdb-client monitor needs to have a bridge to make any output self.useFixture(net_helpers.OVSBridgeFixture()) def _check_test_requirements(self): self.check_command(['ovsdb-client', 'list-dbs'], 'Exit code: 1', 'password-less sudo not granted for ovsdb-client', run_as_root=True) class TestOvsdbMonitor(BaseMonitorTest): def setUp(self): super(TestOvsdbMonitor, self).setUp() self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge') self.addCleanup(self.monitor.stop) self.monitor.start() def collect_monitor_output(self): output = list(self.monitor.iter_stdout()) if output: # Output[0] is header row with spaces for column separation. # Use 'other_config' as an indication of the table header. self.assertIn('other_config', output[0]) return True def test_monitor_generates_initial_output(self): utils.wait_until_true(self.collect_monitor_output, timeout=30) class TestSimpleInterfaceMonitor(BaseMonitorTest): def setUp(self): super(TestSimpleInterfaceMonitor, self).setUp() self.monitor = ovsdb_monitor.SimpleInterfaceMonitor() self.addCleanup(self.monitor.stop) self.monitor.start(block=True, timeout=60) def test_has_updates(self): utils.wait_until_true(lambda: self.monitor.has_updates) # clear the event list self.monitor.get_events() self.useFixture(net_helpers.OVSPortFixture()) # has_updates after port addition should become True utils.wait_until_true(lambda: self.monitor.has_updates is True) def _expected_devices_events(self, devices, state): """Helper to check that events are received for expected devices. :param devices: The list of expected devices. WARNING: This list is modified by this method :param state: The state of the devices (added or removed) """ events = self.monitor.get_events() event_devices = [ (dev['name'], dev['external_ids']) for dev in events.get(state)] for dev in event_devices: if dev[0] in devices: devices.remove(dev[0]) self.assertEqual(dev[1].get('iface-status'), 'active') if not devices: return True def test_get_events(self): utils.wait_until_true(lambda: self.monitor.has_updates) devices = self.monitor.get_events() self.assertTrue(devices.get('added'), 'Initial call should always be true') br = self.useFixture(net_helpers.OVSBridgeFixture()) p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) p2 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) added_devices = [p1.port.name, p2.port.name] utils.wait_until_true( lambda: self._expected_devices_events(added_devices, 'added')) br.bridge.delete_port(p1.port.name) br.bridge.delete_port(p2.port.name) removed_devices = [p1.port.name, p2.port.name] utils.wait_until_true( lambda: self._expected_devices_events(removed_devices, 'removed')) # restart self.monitor.stop(block=True) self.monitor.start(block=True, timeout=60) try: utils.wait_until_true( lambda: self.monitor.get_events().get('added')) except utils.WaitTimeout: raise AssertionError('Initial call should always be true') def test_get_events_includes_ofport(self): utils.wait_until_true(lambda: self.monitor.has_updates) self.monitor.get_events() # clear initial events br = self.useFixture(net_helpers.OVSBridgeFixture()) p1 = self.useFixture(net_helpers.OVSPortFixture(br.bridge)) def p1_event_has_ofport(): if not self.monitor.has_updates: return for e in self.monitor.new_events['added']: if (e['name'] == p1.port.name and e['ofport'] != ovs_lib.UNASSIGNED_OFPORT): return True utils.wait_until_true(p1_event_has_ofport) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_process_monitor.py0000644000175000017500000000742100000000000031240 0ustar00coreycorey00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_config import cfg from six import moves from neutron.agent.linux import external_process from neutron.common import utils from neutron.tests.functional.agent.linux import simple_daemon from neutron.tests.functional import base UUID_FORMAT = "test-uuid-%d" SERVICE_NAME = "service" class BaseTestProcessMonitor(base.BaseLoggingTestCase): def setUp(self): super(BaseTestProcessMonitor, self).setUp() cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT') self._child_processes = [] self._process_monitor = None self.create_child_processes_manager('respawn') self.addCleanup(self.cleanup_spawned_children) def create_child_processes_manager(self, action): cfg.CONF.set_override('check_child_processes_action', action, 'AGENT') self._process_monitor = self.build_process_monitor() def build_process_monitor(self): return external_process.ProcessMonitor( config=cfg.CONF, resource_type='test') def _make_cmdline_callback(self, uuid): def _cmdline_callback(pidfile): cmdline = [sys.executable, simple_daemon.__file__, "--uuid=%s" % uuid, "--pid_file=%s" % pidfile] return cmdline return _cmdline_callback def spawn_n_children(self, n, service=None): self._child_processes = [] for child_number in moves.range(n): uuid = self._child_uuid(child_number) _callback = self._make_cmdline_callback(uuid) pm = external_process.ProcessManager( conf=cfg.CONF, uuid=uuid, default_cmd_callback=_callback, service=service) pm.enable() self._process_monitor.register(uuid, SERVICE_NAME, pm) self._child_processes.append(pm) @staticmethod def _child_uuid(child_number): return UUID_FORMAT % child_number def _kill_last_child(self): self._child_processes[-1].disable() def wait_for_all_children_spawned(self): def all_children_active(): return all(pm.active for pm in self._child_processes) for pm in self._child_processes: directory = os.path.dirname(pm.get_pid_file_name()) self.assertEqual(0o755, os.stat(directory).st_mode & 0o777) # we need to allow extra_time for the check process to happen # and properly execute action over the gone processes under # high load conditions max_wait_time = ( cfg.CONF.AGENT.check_child_processes_interval + 5) utils.wait_until_true( all_children_active, timeout=max_wait_time, sleep=0.01, exception=RuntimeError('Not all children (re)spawned.')) def cleanup_spawned_children(self): self._process_monitor.stop() for pm in self._child_processes: pm.disable() class TestProcessMonitor(BaseTestProcessMonitor): def test_respawn_handler(self): self.spawn_n_children(2) self.wait_for_all_children_spawned() self._kill_last_child() self.wait_for_all_children_spawned() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_tc_lib.py0000644000175000017500000003014600000000000027247 0ustar00coreycorey00000000000000# Copyright (c) 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import mock import netaddr from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import tc_lib from neutron.privileged.agent.linux import ip_lib as priv_ip_lib from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base TEST_HZ_VALUE = 250 LATENCY = 50 BW_LIMIT = 1024 BURST = 512 BASE_DEV_NAME = "test_tap" class TcLibTestCase(functional_base.BaseSudoTestCase): def create_device(self, name): """Create a tuntap with the specified name. The device is cleaned up at the end of the test. """ ip = ip_lib.IPWrapper() tap_device = ip.add_tuntap(name) self.addCleanup(tap_device.link.delete) tap_device.link.set_up() def test_filters_bandwidth_limit(self): device_name = "%s_filters" % BASE_DEV_NAME self.create_device(device_name) tc = tc_lib.TcCommand(device_name, TEST_HZ_VALUE) tc.set_filters_bw_limit(BW_LIMIT, BURST) bw_limit, burst = tc.get_filters_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst) new_bw_limit = BW_LIMIT + 500 new_burst = BURST + 50 tc.update_filters_bw_limit(new_bw_limit, new_burst) bw_limit, burst = tc.get_filters_bw_limits() self.assertEqual(new_bw_limit, bw_limit) self.assertEqual(new_burst, burst) tc.delete_filters_bw_limit() bw_limit, burst = tc.get_filters_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst) def test_tbf_bandwidth_limit(self): device_name = "%s_tbf" % BASE_DEV_NAME self.create_device(device_name) tc = tc_lib.TcCommand(device_name, TEST_HZ_VALUE) tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY) bw_limit, burst = tc.get_tbf_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst) new_bw_limit = BW_LIMIT + 500 new_burst = BURST + 50 tc.set_tbf_bw_limit(new_bw_limit, new_burst, LATENCY) bw_limit, burst = tc.get_tbf_bw_limits() self.assertEqual(new_bw_limit, bw_limit) self.assertEqual(new_burst, burst) tc.delete_tbf_bw_limit() bw_limit, burst = tc.get_tbf_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst) class TcPolicyClassTestCase(functional_base.BaseSudoTestCase): def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def _create_two_namespaces(self): self.ns = ['ns1_' + uuidutils.generate_uuid(), 'ns2_' + uuidutils.generate_uuid()] self.device = ['int1', 'int2'] self.mac = [] self.ip = ['10.100.0.1/24', '10.100.0.2/24'] list(map(priv_ip_lib.create_netns, self.ns)) ip_wrapper = ip_lib.IPWrapper(self.ns[0]) ip_wrapper.add_veth(self.device[0], self.device[1], self.ns[1]) for i in range(2): self.addCleanup(self._remove_ns, self.ns[i]) ip_device = ip_lib.IPDevice(self.device[i], self.ns[i]) self.mac.append(ip_device.link.address) ip_device.link.set_up() ip_device.addr.add(self.ip[i]) def test_list_tc_policy_class_retrieve_statistics(self): statistics = {'bytes', 'packets', 'drop', 'overlimits', 'bps', 'pps', 'qlen', 'backlog'} self._create_two_namespaces() tc_lib.add_tc_qdisc(self.device[0], 'htb', parent='root', handle='1:', namespace=self.ns[0]) tc_lib.add_tc_policy_class(self.device[0], '1:', '1:10', max_kbps=1000, burst_kb=900, min_kbps=500, namespace=self.ns[0]) tc_lib.add_tc_filter_match_mac(self.device[0], '1:', '1:10', self.mac[1], namespace=self.ns[0]) tc_classes = tc_lib.list_tc_policy_class(self.device[0], namespace=self.ns[0]) self.assertEqual(1, len(tc_classes)) self.assertEqual(statistics, set(tc_classes[0]['stats'])) bytes = tc_classes[0]['stats']['bytes'] packets = tc_classes[0]['stats']['packets'] net_helpers.assert_ping(self.ns[1], netaddr.IPNetwork(self.ip[0]).ip, count=1) tc_classes = tc_lib.list_tc_policy_class(self.device[0], namespace=self.ns[0]) self.assertGreater(tc_classes[0]['stats']['bytes'], bytes) self.assertGreater(tc_classes[0]['stats']['packets'], packets) def test_add_tc_policy_class_check_min_kbps_values(self): def warning_args(rate, min_rate): return ('TC HTB class policy rate %(rate)s (bytes/second) is ' 'lower than the minimum accepted %(min_rate)s ' '(bytes/second), for device %(device)s, qdisc ' '%(qdisc)s and classid %(classid)s', {'rate': rate, 'min_rate': min_rate, 'classid': '1:10', 'device': self.device[0], 'qdisc': '1:'}) self._create_two_namespaces() tc_lib.add_tc_qdisc(self.device[0], 'htb', parent='root', handle='1:', namespace=self.ns[0]) with mock.patch.object(tc_lib, 'LOG') as mock_log: # rate > min_rate: OK tc_lib.add_tc_policy_class(self.device[0], '1:', '1:10', max_kbps=2000, burst_kb=1000, min_kbps=4, namespace=self.ns[0]) mock_log.warning.assert_not_called() # rate < min_rate: min_rate = 466 with burst = 128000 tc_lib.add_tc_policy_class(self.device[0], '1:', '1:10', max_kbps=2000, burst_kb=1000, min_kbps=3, namespace=self.ns[0]) mock_log.warning.assert_called_once_with( *warning_args(3 * 128, tc_lib._calc_min_rate(1000 * 128))) # rate < min_rate: min_rate = 466 with burst = 0.8 ceil = 256000 mock_log.reset_mock() tc_lib.add_tc_policy_class(self.device[0], '1:', '1:10', max_kbps=2000, burst_kb=None, min_kbps=5, namespace=self.ns[0]) min_rate = tc_lib._calc_min_rate(qos_consts.DEFAULT_BURST_RATE * 2000 * 128) mock_log.warning.assert_called_once_with( *warning_args(5 * 128, min_rate)) class TcFiltersTestCase(functional_base.BaseSudoTestCase): def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def _create_two_namespaces_connected_using_vxlan(self): """Create two namespaces connected with a veth pair and VXLAN --------------------------------- ---------------------------------- (ns1) | | (ns2) int1: 10.0.100.1/24 <-----------|----|------------> int2: 10.0.100.2/24 | | | | |> int1_vxlan1: 10.0.200.1/24 | | int1_vxlan2: 10.0.200.2/24 <| --------------------------------- ---------------------------------- """ self.vxlan_id = 100 self.ns = ['ns1_' + uuidutils.generate_uuid(), 'ns2_' + uuidutils.generate_uuid()] self.device = ['int1', 'int2'] self.device_vxlan = ['int_vxlan1', 'int_vxlan2'] self.mac_vxlan = [] self.ip = ['10.100.0.1/24', '10.100.0.2/24'] self.ip_vxlan = ['10.200.0.1/24', '10.200.0.2/24'] for i in range(len(self.ns)): priv_ip_lib.create_netns(self.ns[i]) self.addCleanup(self._remove_ns, self.ns[i]) ip_wrapper = ip_lib.IPWrapper(self.ns[i]) if i == 0: ip_wrapper.add_veth(self.device[0], self.device[1], self.ns[1]) ip_wrapper.add_vxlan(self.device_vxlan[i], self.vxlan_id, dev=self.device[i]) ip_device = ip_lib.IPDevice(self.device[i], self.ns[i]) ip_device.link.set_up() ip_device.addr.add(self.ip[i]) ip_device_vxlan = ip_lib.IPDevice(self.device_vxlan[i], self.ns[i]) self.mac_vxlan.append(ip_device_vxlan.link.address) ip_device_vxlan.link.set_up() ip_device_vxlan.addr.add(self.ip_vxlan[i]) bridge_lib.FdbInterface.append( '00:00:00:00:00:00', self.device_vxlan[0], namespace=self.ns[0], ip_dst=str(netaddr.IPNetwork(self.ip[1]).ip)) bridge_lib.FdbInterface.append( '00:00:00:00:00:00', self.device_vxlan[1], namespace=self.ns[1], ip_dst=str(netaddr.IPNetwork(self.ip[0]).ip)) def test_add_tc_filter_vxlan(self): # The traffic control is applied on the veth pair device of the first # namespace (self.ns[0]). The traffic created from the VXLAN interface # when replying to the ping (sent from the other namespace), is # encapsulated in a VXLAN frame and goes through the veth pair # interface. self._create_two_namespaces_connected_using_vxlan() tc_lib.add_tc_qdisc(self.device[0], 'htb', parent='root', handle='1:', namespace=self.ns[0]) classes = tc_lib.list_tc_policy_class(self.device[0], namespace=self.ns[0]) self.assertEqual(0, len(classes)) class_ids = [] for i in range(1, 10): class_id = '1:%s' % i class_ids.append(class_id) tc_lib.add_tc_policy_class( self.device[0], '1:', class_id, namespace=self.ns[0], min_kbps=1000, max_kbps=2000, burst_kb=1600) # Add a filter for a randomly chosen created class, in the first # namespace veth pair device, with the VXLAN MAC address. The traffic # from the VXLAN device must go through this chosen class. chosen_class_id = random.choice(class_ids) tc_lib.add_tc_filter_vxlan( self.device[0], '1:', chosen_class_id, self.mac_vxlan[0], self.vxlan_id, namespace=self.ns[0]) tc_classes = tc_lib.list_tc_policy_class(self.device[0], namespace=self.ns[0]) for tc_class in (c for c in tc_classes if c['classid'] == chosen_class_id): bytes = tc_class['stats']['bytes'] packets = tc_class['stats']['packets'] break else: self.fail('TC class %(class_id)s is not present in the device ' '%(device)s' % {'class_id': chosen_class_id, 'device': self.device[0]}) net_helpers.assert_ping( self.ns[1], netaddr.IPNetwork(self.ip_vxlan[0]).ip, count=1) tc_classes = tc_lib.list_tc_policy_class(self.device[0], namespace=self.ns[0]) for tc_class in tc_classes: if tc_class['classid'] == chosen_class_id: self.assertGreater(tc_class['stats']['bytes'], bytes) self.assertGreater(tc_class['stats']['packets'], packets) else: self.assertEqual(0, tc_class['stats']['bytes']) self.assertEqual(0, tc_class['stats']['packets']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/linux/test_utils.py0000644000175000017500000000704100000000000027151 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron.agent.common import async_process from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests.functional.agent.linux import test_async_process from neutron.tests.functional import base as functional_base class TestPIDHelpers(test_async_process.AsyncProcessTestFramework): def test_get_cmdline_from_pid_and_pid_invoked_with_cmdline(self): cmd = ['tail', '-f', self.test_file_path] proc = async_process.AsyncProcess(cmd) proc.start(block=True) self.addCleanup(proc.stop) pid = proc.pid self.assertEqual(cmd, utils.get_cmdline_from_pid(pid)) self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd)) self.assertEqual([], utils.get_cmdline_from_pid(-1)) class TestGetRootHelperChildPid(functional_base.BaseSudoTestCase): def _addcleanup_sleep_process(self, parent_pid): sleep_pid = utils.execute( ['ps', '--ppid', parent_pid, '-o', 'pid=']).strip() self.addCleanup( utils.execute, ['kill', '-9', sleep_pid], check_exit_code=False, run_as_root=True) def test_get_root_helper_child_pid_returns_first_child(self): """Test that the first child, not lowest child pid is returned. Test creates following process tree: sudo + | +--rootwrap + | +--bash+ | +--sleep 100 and tests that pid of `bash' command is returned. """ def wait_for_sleep_is_spawned(parent_pid): proc_tree = utils.execute( ['pstree', parent_pid], check_exit_code=False) processes = [command.strip() for command in proc_tree.split('---') if command] if processes: return 'sleep' == processes[-1] cmd = ['bash', '-c', '(sleep 100)'] proc = async_process.AsyncProcess(cmd, run_as_root=True) proc.start() # root helpers spawn their child processes asynchronously, and we # don't want to use proc.start(block=True) as that uses # get_root_helper_child_pid (The method under test) internally. sudo_pid = proc._process.pid common_utils.wait_until_true( functools.partial( wait_for_sleep_is_spawned, sudo_pid), sleep=0.1) child_pid = utils.get_root_helper_child_pid( sudo_pid, cmd, run_as_root=True) self.assertIsNotNone( child_pid, "get_root_helper_child_pid is expected to return the pid of the " "bash process") self._addcleanup_sleep_process(child_pid) with open('/proc/%s/cmdline' % child_pid, 'r') as f_proc_cmdline: cmdline = f_proc_cmdline.readline().split('\0')[0] self.assertIn('bash', cmdline) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovn/0000755000175000017500000000000000000000000024041 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovn/__init__.py0000644000175000017500000000000000000000000026140 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovn/metadata/0000755000175000017500000000000000000000000025621 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovn/metadata/__init__.py0000644000175000017500000000000000000000000027720 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py0000644000175000017500000002712100000000000032173 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import event from ovsdbapp.backend.ovs_idl import idlutils from ovsdbapp.tests.functional.schema.ovn_southbound import event as test_event from neutron.agent.ovn.metadata import agent from neutron.agent.ovn.metadata import ovsdb from neutron.agent.ovn.metadata import server as metadata_server from neutron.common.ovn import constants as ovn_const from neutron.common import utils as n_utils from neutron.conf.agent.metadata import config as meta_config from neutron.conf.agent.ovn.metadata import config as meta_config_ovn from neutron.tests.functional import base class MetadataAgentHealthEvent(event.WaitEvent): event_name = 'MetadataAgentHealthEvent' def __init__(self, chassis, sb_cfg, timeout=5): self.chassis = chassis self.sb_cfg = sb_cfg super(MetadataAgentHealthEvent, self).__init__( (self.ROW_UPDATE,), 'Chassis', (('name', '=', self.chassis),), timeout=timeout) def matches(self, event, row, old=None): if not super(MetadataAgentHealthEvent, self).matches(event, row, old): return False return int(row.external_ids.get( ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, 0)) >= self.sb_cfg class TestMetadataAgent(base.TestOVNFunctionalBase): OVN_BRIDGE = 'br-int' FAKE_CHASSIS_HOST = 'ovn-host-fake' def setUp(self): super(TestMetadataAgent, self).setUp() self.handler = self.sb_api.idl.notify_handler # We only have OVN NB and OVN SB running for functional tests mock.patch.object(ovsdb, 'MetadataAgentOvsIdl').start() self._mock_get_ovn_br = mock.patch.object( agent.MetadataAgent, '_get_ovn_bridge', return_value=self.OVN_BRIDGE).start() self.agent = self._start_metadata_agent() def _start_metadata_agent(self): conf = self.useFixture(fixture_config.Config()).conf conf.register_opts(meta_config.SHARED_OPTS) conf.register_opts(meta_config.UNIX_DOMAIN_METADATA_PROXY_OPTS) conf.register_opts(meta_config.METADATA_PROXY_HANDLER_OPTS) conf.register_opts(meta_config_ovn.OVS_OPTS, group='ovs') meta_config_ovn.setup_privsep() ovn_sb_db = self.ovsdb_server_mgr.get_ovsdb_connection_path('sb') conf.set_override('ovn_sb_connection', ovn_sb_db, group='ovn') # We don't need the HA proxy server running for now p = mock.patch.object(metadata_server, 'UnixDomainMetadataProxy') p.start() self.addCleanup(p.stop) self.chassis_name = self.add_fake_chassis(self.FAKE_CHASSIS_HOST) mock.patch.object(agent.MetadataAgent, '_get_own_chassis_name', return_value=self.chassis_name).start() agt = agent.MetadataAgent(conf) agt.start() # Metadata agent will open connections to OVS and SB databases. # Close connections to them when the test ends, self.addCleanup(agt.ovs_idl.ovsdb_connection.stop) self.addCleanup(agt.sb_idl.ovsdb_connection.stop) return agt def test_metadata_agent_healthcheck(self): chassis_row = self.sb_api.db_find( 'Chassis', ('name', '=', self.chassis_name)).execute( check_error=True)[0] # Assert that, prior to creating a resource the metadata agent # didn't populate the external_ids from the Chassis self.assertNotIn(ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, chassis_row['external_ids']) # Let's list the agents to force the nb_cfg to be bumped on NB # db, which will automatically increment the nb_cfg counter on # NB_Global and make ovn-controller copy it over to SB_Global. Upon # this event, Metadata agent will update the external_ids on its # Chassis row to signal that it's healthy. row_event = MetadataAgentHealthEvent(self.chassis_name, 1) self.handler.watch_event(row_event) self.new_list_request('agents').get_response(self.api) # If we do not time out waiting for the event, then we are assured # that the metadata agent has populated the external_ids from the # chassis with the nb_cfg, 1 revisions when listing the agents. self.assertTrue(row_event.wait()) def _create_metadata_port(self, txn, lswitch_name): mdt_port_name = 'ovn-mdt-' + uuidutils.generate_uuid() txn.add( self.nb_api.lsp_add( lswitch_name, mdt_port_name, type='localport', addresses='AA:AA:AA:AA:AA:AA 192.168.122.123', external_ids={ ovn_const.OVN_CIDRS_EXT_ID_KEY: '192.168.122.123/24'})) def _create_logical_switch_port(self): lswitch_name = 'ovn-' + uuidutils.generate_uuid() lswitchport_name = 'ovn-port-' + uuidutils.generate_uuid() # It may take some time to ovn-northd to translate from OVN NB DB to # the OVN SB DB. Wait for port binding event to happen before binding # the port to chassis. pb_event = test_event.WaitForPortBindingEvent(lswitchport_name) self.handler.watch_event(pb_event) with self.nb_api.transaction(check_error=True, log_errors=True) as txn: txn.add( self.nb_api.ls_add(lswitch_name)) txn.add( self.nb_api.create_lswitch_port( lswitchport_name, lswitch_name)) self._create_metadata_port(txn, lswitch_name) self.assertTrue(pb_event.wait()) return lswitchport_name @mock.patch.object(agent.PortBindingChassisCreatedEvent, 'run') def test_agent_resync_on_non_existing_bridge(self, mock_pbinding): # The agent has initialized with br-int and above list_br doesn't # return it, hence the agent should trigger reconfiguration and store # new br-new value to its attribute. self.assertEqual(self.OVN_BRIDGE, self.agent.ovn_bridge) lswitchport_name = self._create_logical_switch_port() # Trigger PortBindingChassisCreatedEvent self.sb_api.lsp_bind(lswitchport_name, self.chassis_name).execute( check_error=True, log_errors=True) exc = Exception('PortBindingChassisCreatedEvent was not called') def check_mock_pbinding(): if mock_pbinding.call_count < 1: return False args = mock_pbinding.call_args[0] self.assertEqual('update', args[0]) self.assertEqual(lswitchport_name, args[1].logical_port) self.assertEqual(self.chassis_name, args[1].chassis[0].name) return True n_utils.wait_until_true(check_mock_pbinding, timeout=10, exception=exc) @mock.patch.object(agent.PortBindingChassisDeletedEvent, 'run') @mock.patch.object(agent.PortBindingChassisCreatedEvent, 'run') def test_agent_events(self, m_pb_created, m_pb_deleted): lswitchport_name = self._create_logical_switch_port() self.sb_api.lsp_bind(lswitchport_name, self.chassis_name).execute( check_error=True, log_errors=True) def pb_created(): if m_pb_created.call_count < 1: return False args = m_pb_created.call_args[0] self.assertEqual('update', args[0]) self.assertEqual(self.chassis_name, args[1].chassis[0].name) self.assertFalse(args[2].chassis) return True n_utils.wait_until_true( pb_created, timeout=10, exception=Exception( "PortBindingChassisCreatedEvent didn't happen on port " "binding.")) self.sb_api.lsp_unbind(lswitchport_name).execute( check_error=True, log_errors=True) def pb_deleted(): if m_pb_deleted.call_count < 1: return False args = m_pb_deleted.call_args[0] self.assertEqual('update', args[0]) self.assertFalse(args[1].chassis) self.assertEqual(self.chassis_name, args[2].chassis[0].name) return True n_utils.wait_until_true( pb_deleted, timeout=10, exception=Exception( "PortBindingChassisDeletedEvent didn't happen on port" "unbind.")) def test_agent_registration_at_chassis_create_event(self): chassis = self.sb_api.lookup('Chassis', self.chassis_name) self.assertIn(ovn_const.OVN_AGENT_METADATA_ID_KEY, chassis.external_ids) # Delete Chassis and assert self.del_fake_chassis(chassis.name) self.assertRaises(idlutils.RowNotFound, self.sb_api.lookup, 'Chassis', self.chassis_name) # Re-add the Chassis self.add_fake_chassis(self.FAKE_CHASSIS_HOST, name=self.chassis_name) def check_for_metadata(): chassis = self.sb_api.lookup('Chassis', self.chassis_name) return ovn_const.OVN_AGENT_METADATA_ID_KEY in chassis.external_ids exc = Exception('Agent metadata failed to re-register itself ' 'after the Chassis %s was re-created' % self.chassis_name) # Check if metadata agent was re-registered chassis = self.sb_api.lookup('Chassis', self.chassis_name) n_utils.wait_until_true( check_for_metadata, timeout=10, exception=exc) def test_metadata_agent_only_monitors_own_chassis(self): # We already have the fake chassis which we should be monitoring, so # create an event looking for a change to another chassis other_name = uuidutils.generate_uuid() other_chassis = self.add_fake_chassis(self.FAKE_CHASSIS_HOST, name=other_name) self.assertEqual(other_chassis, other_name) event = MetadataAgentHealthEvent(chassis=other_name, sb_cfg=-1, timeout=0) # Use the agent's sb_idl to watch for the event since it has condition self.agent.sb_idl.idl.notify_handler.watch_event(event) # Use the test sb_api to set other_chassis values since shouldn't exist # on agent's sb_idl self.sb_api.db_set( 'Chassis', other_chassis, ('external_ids', {'test': 'value'})).execute(check_error=True) event2 = MetadataAgentHealthEvent(chassis=self.chassis_name, sb_cfg=-1) self.agent.sb_idl.idl.notify_handler.watch_event(event2) # Use the test's sb_api again to send a command so we can see if it # completes and short-circuit the need to wait for a timeout to pass # the test. If we get the result to this, we would have gotten the # previous result as well. self.sb_api.db_set( 'Chassis', self.chassis_name, ('external_ids', {'test': 'value'})).execute(check_error=True) self.assertTrue(event2.wait()) self.assertFalse(event.wait()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovsdb/0000755000175000017500000000000000000000000024354 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovsdb/__init__.py0000644000175000017500000000000000000000000026453 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovsdb/native/0000755000175000017500000000000000000000000025642 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovsdb/native/__init__.py0000644000175000017500000000000000000000000027741 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/ovsdb/native/test_connection.py0000644000175000017500000000455700000000000031425 0ustar00coreycorey00000000000000# Copyright (c) 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from ovsdbapp import event from neutron.agent.common import ovs_lib from neutron.tests.functional import base class WaitForBridgesEvent(event.RowEvent): event_name = 'WaitForBridgesEvent' ONETIME = True def __init__(self, bridges, timeout=5): self.bridges_not_seen = set(bridges) self.timeout = timeout self.event = threading.Event() super(WaitForBridgesEvent, self).__init__( (self.ROW_CREATE,), 'Bridge', None) def matches(self, event, row, old=None): if event not in self.events or row._table.name != self.table: return False self.bridges_not_seen.discard(row.name) return not self.bridges_not_seen def run(self, event, row, old): self.event.set() def wait(self): return self.event.wait(self.timeout) class BridgeMonitorTestCase(base.BaseSudoTestCase): def _delete_bridges(self, bridges): for bridge in bridges: self.ovs.delete_bridge(bridge) def test_create_bridges(self): bridges_to_monitor = ['br01', 'br02', 'br03'] bridges_to_create = ['br01', 'br02', 'br03', 'br04', 'br05'] self.ovs = ovs_lib.BaseOVS() self.ovs.ovsdb.idl_monitor.start_bridge_monitor(bridges_to_monitor) self.addCleanup(self._delete_bridges, bridges_to_create) event = WaitForBridgesEvent(bridges_to_monitor) self.ovs.ovsdb.idl_monitor.notify_handler.watch_event(event) for bridge in bridges_to_create: self.ovs.add_bridge(bridge) self.assertTrue(event.wait()) self.assertEqual(bridges_to_monitor, self.ovs.ovsdb.idl_monitor.bridges_added) self.assertEqual([], self.ovs.ovsdb.idl_monitor.bridges_added) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_dhcp_agent.py0000644000175000017500000004607500000000000026760 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os.path import eventlet import fixtures import mock import netaddr from neutron_lib import constants as lib_const from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.dhcp import agent from neutron.agent import dhcp_agent from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.agent.metadata import driver as metadata_driver from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base class DHCPAgentOVSTestFramework(base.BaseSudoTestCase): _DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c") _DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a") _TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _IP_ADDRS = { 4: {'addr': '192.168.10.11', 'cidr': '192.168.10.0/24', 'gateway': '192.168.10.1'}, 6: {'addr': '2001:db8:0:1::c0a8:a0b', 'cidr': '2001:db8:0:1::c0a8:a00/120', 'gateway': '2001:db8:0:1::c0a8:a01'}, } def setUp(self): super(DHCPAgentOVSTestFramework, self).setUp() config.setup_logging() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf dhcp_agent.register_options(self.conf) # NOTE(cbrandily): TempDir fixture creates a folder with 0o700 # permissions but agent dir must be readable by dnsmasq user (nobody) agent_config_dir = self.useFixture(fixtures.TempDir()).path self.useFixture( helpers.RecursivePermDirFixture(agent_config_dir, 0o555)) self.conf.set_override("dhcp_confs", agent_config_dir) self.conf.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') self.conf.set_override('report_interval', 0, 'AGENT') br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override('integration_bridge', br_int.br_name, 'OVS') self.mock_plugin_api = mock.patch( 'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.agent = agent.DhcpAgentWithStateReport('localhost') self.ovs_driver = interface.OVSInterfaceDriver(self.conf) self.conf.set_override('check_child_processes_interval', 1, 'AGENT') def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=lib_const.IP_VERSION_4, prefix_override=None): net_id = uuidutils.generate_uuid() subnet_dict = self.create_subnet_dict( net_id, dhcp_enabled, ip_version, prefix_override) port_dict = self.create_port_dict( net_id, subnet_dict.id, mac_address=str(self._DHCP_PORT_MAC_ADDRESS), ip_version=ip_version) port_dict.device_id = common_utils.get_dhcp_agent_device_id( net_id, self.conf.host) net_dict = self.create_network_dict( net_id, [subnet_dict], [port_dict]) return net_dict def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=lib_const.IP_VERSION_4, prefix_override=None): cidr = self._IP_ADDRS[ip_version]['cidr'] if prefix_override is not None: cidr = '/'.join((cidr.split('/')[0], str(prefix_override))) sn_dict = dhcp.DictModel( id=uuidutils.generate_uuid(), network_id=net_id, ip_version=ip_version, cidr=cidr, gateway_ip=self._IP_ADDRS[ip_version]['gateway'], enable_dhcp=dhcp_enabled, dns_nameservers=[], host_routes=[], ipv6_ra_mode=None, ipv6_address_mode=None) if ip_version == lib_const.IP_VERSION_6: sn_dict['ipv6_address_mode'] = lib_const.DHCPV6_STATEFUL return sn_dict def create_port_dict(self, network_id, subnet_id, mac_address, ip_version=lib_const.IP_VERSION_4, ip_address=None): ip_address = (self._IP_ADDRS[ip_version]['addr'] if not ip_address else ip_address) port_dict = dhcp.DictModel(id=uuidutils.generate_uuid(), name="foo", mac_address=mac_address, network_id=network_id, admin_state_up=True, device_id=uuidutils.generate_uuid(), device_owner="foo", fixed_ips=[{"subnet_id": subnet_id, "ip_address": ip_address}]) return port_dict def create_network_dict(self, net_id, subnets=None, ports=None, non_local_subnets=None): subnets = [] if not subnets else subnets ports = [] if not ports else ports non_local_subnets = [] if not non_local_subnets else non_local_subnets net_dict = dhcp.NetModel(id=net_id, subnets=subnets, non_local_subnets=non_local_subnets, ports=ports, admin_state_up=True, tenant_id=uuidutils.generate_uuid()) return net_dict def get_interface_name(self, network, port): device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock()) return device_manager.get_interface_name(network, port) def configure_dhcp_for_network(self, network, dhcp_enabled=True): self.agent.configure_dhcp_for_network(network) self.addCleanup(self._cleanup_network, network, dhcp_enabled) def _cleanup_network(self, network, dhcp_enabled): self.mock_plugin_api.release_dhcp_port.return_value = None if dhcp_enabled: self.agent.call_driver('disable', network) def assert_dhcp_resources(self, network, dhcp_enabled): ovs = ovs_lib.BaseOVS() port = network.ports[0] iface_name = self.get_interface_name(network, port) self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name)) self.assert_dhcp_namespace(network.namespace, dhcp_enabled) self.assert_accept_ra_disabled(network.namespace) self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled) def assert_dhcp_namespace(self, namespace, dhcp_enabled): self.assertEqual(dhcp_enabled, ip_lib.network_namespace_exists(namespace)) def assert_accept_ra_disabled(self, namespace): actual = ip_lib.IPWrapper(namespace=namespace).netns.execute( ['sysctl', '-b', 'net.ipv6.conf.default.accept_ra']) self.assertEqual('0', actual) def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled): dev = ip_lib.IPDevice(dhcp_iface_name, namespace) self.assertEqual(dhcp_enabled, ip_lib.device_exists( dhcp_iface_name, namespace)) if dhcp_enabled: self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address) def _plug_port_for_dhcp_request(self, network, port): namespace = network.namespace vif_name = self.get_interface_name(network.id, port) self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address, self.conf.OVS.integration_bridge, namespace=namespace) def _ip_list_for_vif(self, vif_name, namespace): ip_device = ip_lib.IPDevice(vif_name, namespace) return ip_device.addr.list(ip_version=lib_const.IP_VERSION_4) def _get_network_port_for_allocation_test(self): network = self.network_dict_for_dhcp() ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1] port = self.create_port_dict( network.id, network.subnets[0].id, mac_address=str(self._TENANT_PORT_MAC_ADDRESS), ip_address=str(ip_addr)) return network, port def assert_good_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) predicate = lambda: len( self._ip_list_for_vif(vif_name, network.namespace)) common_utils.wait_until_true(predicate, 10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) cidr = ip_list[0].get('cidr') ip_addr = str(netaddr.IPNetwork(cidr).ip) self.assertEqual(port.fixed_ips[0].ip_address, ip_addr) def assert_bad_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) # we need wait some time (10 seconds is enough) and check # that dhclient not configured ip-address for interface eventlet.sleep(10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) self.assertEqual([], ip_list) def _run_dhclient(self, vif_name, network): # NOTE: Before run dhclient we should create resolv.conf file # in namespace, where we will run dhclient for testing address # allocation for port, otherwise, dhclient will override # system /etc/resolv.conf # By default, folder for dhcp-agent's namespace doesn't exist # that's why we use AdminDirFixture for create directory # with admin permissions in /etc/netns/ and touch resolv.conf in it. etc_dir = '/etc/netns/%s' % network.namespace self.useFixture(helpers.AdminDirFixture(etc_dir)) cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')] utils.execute(cmd, run_as_root=True) dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name] proc = net_helpers.RootHelperProcess( cmd=dhclient_cmd, namespace=network.namespace) self.addCleanup(proc.wait) self.addCleanup(proc.kill) def _get_metadata_proxy_process(self, network): return external_process.ProcessManager( self.conf, network.id, network.namespace, service=metadata_driver.HAPROXY_SERVICE) class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework): def test_create_subnet_with_dhcp(self): dhcp_enabled = True for version in [4, 6]: network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assert_dhcp_resources(network, dhcp_enabled) def test_create_subnet_with_non64_ipv6_cidrs(self): # the agent should not throw exceptions on weird prefixes dhcp_enabled = True version = 6 for i in (0, 1, 41, 81, 121, 127, 128): network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version, prefix_override=i) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assertFalse(self.agent.needs_resync_reasons[network.id], msg="prefix size of %s triggered resync" % i) def test_agent_mtu_set_on_interface_driver(self): network = self.network_dict_for_dhcp() network["mtu"] = 789 self.configure_dhcp_for_network(network=network) port = network.ports[0] iface_name = self.get_interface_name(network, port) dev = ip_lib.IPDevice(iface_name, network.namespace) self.assertEqual(789, dev.link.mtu) def test_good_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) self._plug_port_for_dhcp_request(network, port) self.assert_good_allocation_for_port(network, port) def test_bad_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1) bad_mac_address.dialect = netaddr.mac_unix port.mac_address = str(bad_mac_address) self._plug_port_for_dhcp_request(network, port) self.assert_bad_allocation_for_port(network, port) def _spawn_network_metadata_proxy(self): network = self.network_dict_for_dhcp() self.conf.set_override('enable_isolated_metadata', True) self.addCleanup(self.agent.disable_isolated_metadata_proxy, network) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) common_utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.01, exception=RuntimeError("Metadata proxy didn't spawn")) return (pm, network) def test_metadata_proxy_respawned(self): pm, network = self._spawn_network_metadata_proxy() old_pid = pm.pid utils.execute(['kill', '-9', old_pid], run_as_root=True) common_utils.wait_until_true( lambda: pm.active and pm.pid != old_pid, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't respawn")) def test_stale_metadata_proxy_killed(self): pm, network = self._spawn_network_metadata_proxy() self.conf.set_override('enable_isolated_metadata', False) self.configure_dhcp_for_network(network=network) common_utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Stale metadata proxy didn't get killed")) def _test_metadata_proxy_spawn_kill_with_subnet_create_delete(self): network = self.network_dict_for_dhcp(ip_version=lib_const.IP_VERSION_6) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) # A newly created network with ipv6 subnet will not have metadata proxy self.assertFalse(pm.active) new_network = copy.deepcopy(network) dhcp_enabled_ipv4_subnet = self.create_subnet_dict(network.id) new_network.subnets.append(dhcp_enabled_ipv4_subnet) self.mock_plugin_api.get_network_info.return_value = new_network self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be spawned for the newly added subnet common_utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't spawn")) self.mock_plugin_api.get_network_info.return_value = network self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be killed because network doesn't need it. common_utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't get killed")) def test_enable_isolated_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', False) self.conf.set_override('enable_isolated_metadata', True) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() def test_force_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', True) self.conf.set_override('enable_isolated_metadata', False) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() def test_notify_port_ready_after_enable_dhcp(self): network = self.network_dict_for_dhcp() dhcp_port = self.create_port_dict( network.id, network.subnets[0].id, '24:77:03:7d:00:4d', ip_address='192.168.10.11') dhcp_port.device_owner = lib_const.DEVICE_OWNER_DHCP network.ports.append(dhcp_port) self.agent.start_ready_ports_loop() self.configure_dhcp_for_network(network) ports_to_send = {p.id for p in network.ports} common_utils.wait_until_true( lambda: self.mock_plugin_api.dhcp_ready_on_ports.called, timeout=1, sleep=0.1, exception=RuntimeError("'dhcp_ready_on_ports' not be called")) self.mock_plugin_api.dhcp_ready_on_ports.assert_called_with( ports_to_send) def test_dhcp_processing_pool_size(self): mock.patch.object(self.agent, 'call_driver').start().return_value = ( True) self.agent.update_isolated_metadata_proxy = mock.Mock() self.agent.disable_isolated_metadata_proxy = mock.Mock() network_info_1 = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=network_info_1) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) network_info_2 = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=network_info_2) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) network_info_list = [network_info_1, network_info_2] for _i in range(agent.DHCP_PROCESS_GREENLET_MAX + 1): ni = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=ni) network_info_list.append(ni) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MAX, self.agent._pool.size) for network in network_info_list: self.agent.disable_dhcp_helper(network.id) agent_network_info_len = len(self.agent.cache.get_network_ids()) if agent_network_info_len < agent.DHCP_PROCESS_GREENLET_MIN: self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) elif (agent.DHCP_PROCESS_GREENLET_MIN <= agent_network_info_len <= agent.DHCP_PROCESS_GREENLET_MAX): self.assertEqual(agent_network_info_len, self.agent._pool.size) else: self.assertEqual(agent.DHCP_PROCESS_GREENLET_MAX, self.agent._pool.size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_firewall.py0000644000175000017500000011312400000000000026457 0ustar00coreycorey00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import netaddr from neutron_lib import constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import testscenarios from neutron.agent.linux import iptables_firewall from neutron.agent.linux import openvswitch_firewall from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.cmd.sanity import checks from neutron.conf.agent import securitygroups_rpc as security_config from neutron.tests.common import conn_testers from neutron.tests.common import helpers from neutron.tests.functional.agent.l2 import base as l2_base from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional import constants as test_constants LOG = logging.getLogger(__name__) load_tests = testscenarios.load_tests_apply_scenarios reverse_direction = { conn_testers.ConnectionTester.INGRESS: conn_testers.ConnectionTester.EGRESS, conn_testers.ConnectionTester.EGRESS: conn_testers.ConnectionTester.INGRESS} reverse_transport_protocol = { conn_testers.ConnectionTester.TCP: conn_testers.ConnectionTester.UDP, conn_testers.ConnectionTester.UDP: conn_testers.ConnectionTester.TCP} DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' def skip_if_firewall(firewall_name): def outter(f): @functools.wraps(f) def wrap(self, *args, **kwargs): if self.firewall_name == firewall_name: self.skipTest("This test doesn't use %s firewall" % firewall_name) return f(self, *args, **kwargs) return wrap return outter def _add_rule(sg_rules, base, port_range_min=None, port_range_max=None): rule = copy.copy(base) if port_range_min: rule['port_range_min'] = port_range_min if port_range_max: rule['port_range_max'] = port_range_max sg_rules.append(rule) class BaseFirewallTestCase(linux_base.BaseOVSLinuxTestCase): FAKE_SECURITY_GROUP_ID = uuidutils.generate_uuid() MAC_SPOOFED = "fa:16:3e:9a:2f:48" scenarios_iptables = testscenarios.multiply_scenarios( [('IptablesFirewallDriver', {'initialize': 'initialize_iptables', 'firewall_name': 'iptables'})], [('with ipset', {'enable_ipset': True}), ('without ipset', {'enable_ipset': False})]) scenarios_ovs_fw_interfaces = testscenarios.multiply_scenarios( [('OVS Firewall Driver', {'initialize': 'initialize_ovs', 'firewall_name': 'openvswitch'})]) scenarios = scenarios_iptables + scenarios_ovs_fw_interfaces ip_cidr = None vlan_range = set(range(1, test_constants.VLAN_COUNT - 1)) def setUp(self): security_config.register_securitygroups_opts() self.net_id = uuidutils.generate_uuid() super(BaseFirewallTestCase, self).setUp() self.tester, self.firewall = getattr(self, self.initialize)() if self.firewall_name == "openvswitch": self.assign_vlan_to_peers() self.src_port_desc = self._create_port_description( self.tester.vm_port_id, [self.tester.vm_ip_address], self.tester.vm_mac_address, [self.FAKE_SECURITY_GROUP_ID], self.net_id) # FIXME(jlibosva): We should consider to call prepare_port_filter with # deferred bridge depending on its performance self.firewall.prepare_port_filter(self.src_port_desc) # Traffic coming from patch-port is always VLAN tagged self.tester.set_peer_port_as_patch_port() def initialize_iptables(self): cfg.CONF.set_override('enable_ipset', self.enable_ipset, 'SECURITYGROUP') br_name = ('brq' + self.net_id)[:constants.LINUX_DEV_LEN] tester = self.useFixture( conn_testers.LinuxBridgeConnectionTester(self.ip_cidr, bridge_name=br_name)) firewall_drv = iptables_firewall.IptablesFirewallDriver( namespace=tester.bridge_namespace) return tester, firewall_drv def initialize_ovs(self): # Tests for ovs requires kernel >= 4.3 and OVS >= 2.5 if not checks.ovs_conntrack_supported(): self.skipTest("Open vSwitch with conntrack is not installed " "on this machine. To run tests for OVS/CT firewall," " please meet the requirements (kernel>=4.3, " "OVS>=2.5). More info at " "https://github.com/openvswitch/ovs/blob/master/" "FAQ.md") self.of_helper = l2_base.OVSOFControllerHelper() self.of_helper.addCleanup = self.addCleanup self.of_helper.start_of_controller(cfg.CONF) tester = self.useFixture( conn_testers.OVSConnectionTester(self.ip_cidr, self.of_helper.br_int_cls)) firewall_drv = openvswitch_firewall.OVSFirewallDriver(tester.bridge) return tester, firewall_drv def assign_vlan_to_peers(self): vlan = helpers.get_not_used_vlan(self.firewall.int_br.br, self.vlan_range) LOG.debug("Using %d vlan tag for this test", vlan) self.tester.set_vm_tag(vlan) self.tester.set_peer_tag(vlan) @staticmethod def _create_port_description(port_id, ip_addresses, mac_address, sg_ids, net_id): return {'admin_state_up': True, 'device': port_id, 'device_owner': DEVICE_OWNER_COMPUTE, 'fixed_ips': ip_addresses, 'mac_address': mac_address, 'port_security_enabled': True, 'security_groups': sg_ids, 'status': 'ACTIVE', 'network_id': net_id} def _apply_security_group_rules(self, sg_id, sg_rules): with self.firewall.defer_apply(): self.firewall.update_security_group_rules(sg_id, sg_rules) self.firewall.update_port_filter(self.src_port_desc) def _apply_security_group_members(self, sg_id, members): with self.firewall.defer_apply(): self.firewall.update_security_group_members(sg_id, members) self.firewall.update_port_filter(self.src_port_desc) class FirewallTestCase(BaseFirewallTestCase): ip_cidr = '192.168.0.1/24' @skip_if_firewall('openvswitch') def test_rule_application_converges(self): sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress'}, {'ethertype': 'IPv6', 'direction': 'egress'}, {'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': '0.0.0.0/0', 'protocol': 'icmp'}, {'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': '0::0/0', 'protocol': 'ipv6-icmp'}] # make sure port ranges converge on all protocols with and without # port ranges (prevents regression of bug 1502924) for proto in ('tcp', 'udp', 'icmp'): for version in ('IPv4', 'IPv6'): if proto == 'icmp' and version == 'IPv6': proto = 'ipv6-icmp' base = {'ethertype': version, 'direction': 'ingress', 'protocol': proto} sg_rules.append(copy.copy(base)) _add_rule(sg_rules, base, port_range_min=50, port_range_max=50) _add_rule(sg_rules, base, port_range_max=55) _add_rule(sg_rules, base, port_range_min=60, port_range_max=60) _add_rule(sg_rules, base, port_range_max=65) # add some single-host rules to prevent regression of bug 1502917 sg_rules.append({'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': '77.77.77.77/32'}) sg_rules.append({'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': 'fe80::1/128'}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) # after one prepare call, another apply should be a NOOP self.assertEqual([], self.firewall.iptables._apply()) orig_sg_rules = copy.copy(sg_rules) for proto in ('tcp', 'udp', 'icmp'): for version in ('IPv4', 'IPv6'): if proto == 'icmp' and version == 'IPv6': proto = 'ipv6-icmp' # make sure firewall is in converged state self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, orig_sg_rules) self.firewall.update_port_filter(self.src_port_desc) sg_rules = copy.copy(orig_sg_rules) # remove one rule and add another to make sure it results in # exactly one delete and insert sg_rules.pop(0 if version == 'IPv4' else 1) sg_rules.append({'ethertype': version, 'direction': 'egress', 'protocol': proto}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) result = self.firewall.update_port_filter(self.src_port_desc) deletes = [r for r in result if r.startswith('-D ')] creates = [r for r in result if r.startswith('-I ')] self.assertEqual(1, len(deletes)) self.assertEqual(1, len(creates)) # quick sanity check to make sure the insert was for the # correct proto self.assertIn('-p %s' % proto, creates[0]) # another apply should be a NOOP if the right rule was removed # and the new one was inserted in the correct position self.assertEqual([], self.firewall.iptables._apply()) @skip_if_firewall('openvswitch') def test_rule_ordering_correct(self): sg_rules = [ {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': i, 'port_range_max': i} for i in range(50, 61) ] self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) # remove a rule and add a new one sg_rules.pop(5) sg_rules.insert(8, {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 400, 'port_range_max': 400}) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) # reverse all of the rules (requires lots of deletes and inserts) sg_rules = list(reversed(sg_rules)) self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.prepare_port_filter(self.src_port_desc) self._assert_sg_out_tcp_rules_appear_in_order(sg_rules) def _assert_sg_out_tcp_rules_appear_in_order(self, sg_rules): outgoing_rule_pref = '-A %s-o%s' % (self.firewall.iptables.wrap_name, self.src_port_desc['device'][3:13]) rules = [ r for r in self.firewall.iptables.get_rules_for_table('filter') if r.startswith(outgoing_rule_pref) ] # we want to ensure the rules went in in the same order we sent indexes = [rules.index('%s -p tcp -m tcp --dport %s -j RETURN' % (outgoing_rule_pref, i['port_range_min'])) for i in sg_rules] # all indexes should be in order with no unexpected rules in between self.assertEqual(list(range(indexes[0], indexes[-1] + 1)), indexes) def test_ingress_icmp_secgroup(self): # update the sg_group to make ping pass sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_mac_spoofing(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_mac_address = self.MAC_SPOOFED self.tester.flush_arp_tables() self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_mac_spoofing_works_without_port_security_enabled(self): self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_mac_address = self.MAC_SPOOFED self.tester.flush_arp_tables() self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_port_security_enabled_set_to_false(self): self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_dhcp_requests_from_vm(self): # DHCPv4 uses source port 67, destination port 68 self.tester.assert_connection(direction=self.tester.EGRESS, protocol=self.tester.UDP, src_port=68, dst_port=67) def test_dhcp_server_forbidden_on_vm(self): self.tester.assert_no_connection(direction=self.tester.EGRESS, protocol=self.tester.UDP, src_port=67, dst_port=68) self.tester.assert_no_connection(direction=self.tester.INGRESS, protocol=self.tester.UDP, src_port=68, dst_port=67) def test_ip_spoofing(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/24" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, src_port=68, dst_port=67, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_ip_spoofing_works_without_port_security_enabled(self): self.src_port_desc['port_security_enabled'] = False self.firewall.update_port_filter(self.src_port_desc) sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/24" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_allowed_address_pairs(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) port_mac = self.tester.vm_mac_address allowed_ip = netaddr.IPAddress(self.tester.vm_ip_address) + 1 not_allowed_ip = "%s/24" % (allowed_ip + 1) allowed_mac = 'fa:16:3e:8c:84:13' not_allowed_mac = 'fa:16:3e:8c:84:14' self.src_port_desc['allowed_address_pairs'] = [ {'mac_address': port_mac, 'ip_address': "%s/32" % allowed_ip}, {'mac_address': allowed_mac, 'ip_address': "%s/32" % allowed_ip}] allowed_ip = "%s/24" % allowed_ip self.firewall.update_port_filter(self.src_port_desc) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.vm_ip_cidr = allowed_ip self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.vm_mac_address = allowed_mac self.tester.vm_ip_cidr = allowed_ip self.tester.flush_arp_tables() self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.vm_mac_address = allowed_mac self.tester.vm_ip_cidr = not_allowed_ip self.tester.flush_arp_tables() self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.vm_mac_address = not_allowed_mac self.tester.vm_ip_cidr = allowed_ip self.tester.flush_arp_tables() self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_arp_is_allowed(self): self.tester.assert_connection(protocol=self.tester.ARP, direction=self.tester.EGRESS) self.tester.assert_connection(protocol=self.tester.ARP, direction=self.tester.INGRESS) def _test_rule(self, direction, protocol): sg_rules = [{'ethertype': constants.IPv4, 'direction': direction, 'protocol': protocol}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_direction = reverse_direction[direction] not_allowed_protocol = reverse_transport_protocol[protocol] self.tester.assert_connection(protocol=protocol, direction=direction) self.tester.assert_no_connection(protocol=not_allowed_protocol, direction=direction) self.tester.assert_no_connection(protocol=protocol, direction=not_allowed_direction) def test_ingress_tcp_rule(self): self._test_rule(self.tester.INGRESS, self.tester.TCP) def test_next_port_closed(self): # https://bugs.launchpad.net/neutron/+bug/1611991 was caused by wrong # masking in rules which allow traffic to a port with even port number port = 42 for direction in (self.tester.EGRESS, self.tester.INGRESS): sg_rules = [{'ethertype': constants.IPv4, 'direction': direction, 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': port, 'source_port_range_max': port}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=direction, src_port=port) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=direction, src_port=port + 1) def test_ingress_udp_rule(self): self._test_rule(self.tester.INGRESS, self.tester.UDP) def test_egress_tcp_rule(self): self._test_rule(self.tester.EGRESS, self.tester.TCP) def test_egress_udp_rule(self): self._test_rule(self.tester.EGRESS, self.tester.UDP) def test_connection_with_destination_port_range(self): port_min = 12345 port_max = 12346 sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': port_min, 'port_range_max': port_max}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_min) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_max) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_min - 1) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS, dst_port=port_max + 1) def test_connection_with_source_port_range(self): source_port_min = 12345 source_port_max = 12346 sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'source_port_range_min': source_port_min, 'source_port_range_max': source_port_max}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_min) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_max) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_min - 1) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS, src_port=source_port_max + 1) @skip_if_firewall('iptables') def test_established_connection_is_cut(self): port = 12345 sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': port, 'port_range_max': port}] connection = {'protocol': self.tester.TCP, 'direction': self.tester.INGRESS, 'dst_port': port} self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.establish_connection(**connection) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, list()) self.tester.assert_no_established_connection(**connection) def test_preventing_firewall_blink(self): direction = self.tester.INGRESS sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp'}] self.tester.start_sending_icmp(direction) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, {}) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.stop_sending_icmp(direction) packets_sent = self.tester.get_sent_icmp_packets(direction) packets_received = self.tester.get_received_icmp_packets(direction) self.assertGreater(packets_sent, 0) self.assertEqual(packets_received, 0) def test_remote_security_groups(self): self.tester.set_peer_port_as_vm_port() remote_sg_id = 'remote_sg_id' peer_port_desc = self._create_port_description( self.tester.peer_port_id, [self.tester.peer_ip_address], self.tester.peer_mac_address, [remote_sg_id], self.net_id) vm_sg_members = {'IPv4': [self.tester.peer_ip_address]} peer_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'}] self.firewall.update_security_group_rules(remote_sg_id, peer_sg_rules) self.firewall.update_security_group_members(remote_sg_id, vm_sg_members) self.firewall.prepare_port_filter(peer_port_desc) vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp', 'remote_group_id': remote_sg_id}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, vm_sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_related_connection(self): """Test ICMP net unreachable packets get back When destination address of ip traffic is not reachable, ICMP packets are returned. This packets are marked as RELATED traffic by conntrack and this test case validates such packets are not dropped by the firewall as ingress ICMP packets are not allowed in this test case. The used address below 1.2.3.4 is outside of subnet that is used in tester object. """ # Enable ip forwarding on the interface in order to reply with # destination net unreachable self.tester._peer.execute([ 'sysctl', '-w', 'net.ipv4.conf.%s.forwarding=1' % self.tester._peer.port.name]) self.tester.set_vm_default_gateway(self.tester.peer_ip_address) vm_sg_rules = [{'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, vm_sg_rules) self.tester.assert_net_unreachable(self.tester.EGRESS, '1.2.3.4') @skip_if_firewall('iptables') def test_tracked_connection(self): # put an openflow rule to perform a CT lookup and hence packet will # carry conntrack information self.tester.bridge.add_flow( table=0, priority=200, dl_type="0x0800", ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED, actions="ct(table=0)" ) # update the sg_group to make ping pass sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) class FirewallTestCaseIPv6(BaseFirewallTestCase): scenarios = BaseFirewallTestCase.scenarios_ovs_fw_interfaces ip_cidr = '2001:db8:aaaa::1/64' def test_icmp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_icmp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_tcp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_tcp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_TCP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) def test_udp_from_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_UDP, 'source_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.UDP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) def test_udp_to_specific_address(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.EGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_UDP, 'destination_ip_prefix': self.tester.peer_ip_address}] self.tester.assert_no_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) self.tester.assert_connection(protocol=self.tester.UDP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.TCP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) @skip_if_firewall('openvswitch') def test_ip_spoofing(self): sg_rules = [{'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}] self._apply_security_group_rules(self.FAKE_SECURITY_GROUP_ID, sg_rules) not_allowed_ip = "%s/64" % ( netaddr.IPAddress(self.tester.vm_ip_address) + 1) self.tester.assert_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.vm_ip_cidr = not_allowed_ip self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.INGRESS) self.tester.assert_no_connection(protocol=self.tester.ICMP, direction=self.tester.EGRESS) self.tester.assert_no_connection(protocol=self.tester.UDP, src_port=546, dst_port=547, direction=self.tester.EGRESS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_l2_lb_agent.py0000644000175000017500000001457500000000000027034 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.agent.linux import ip_lib from neutron.objects import trunk from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber from neutron.tests.functional.agent.linux import test_ip_lib lba = linuxbridge_neutron_agent class LinuxBridgeAgentTests(test_ip_lib.IpLibTestFramework): def setUp(self): super(LinuxBridgeAgentTests, self).setUp() agent_rpc = ('neutron.agent.rpc.PluginApi') mock.patch(agent_rpc).start() mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() cfg.CONF.set_override('enable_vxlan', False, 'VXLAN') def test_validate_interface_mappings(self): mappings = {'physnet1': 'int1', 'physnet2': 'int2'} with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager({}, mappings) self.manage_device( self.generate_device_details()._replace(namespace=None, name='int1')) with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager({}, mappings) self.manage_device( self.generate_device_details()._replace(namespace=None, name='int2')) lba.LinuxBridgeManager({}, mappings) def test_validate_bridge_mappings(self): mappings = {'physnet1': 'br-eth1'} with testtools.ExpectedException(SystemExit): lba.LinuxBridgeManager(mappings, {}) self.manage_device( self.generate_device_details()._replace(namespace=None, name='br-eth1')) lba.LinuxBridgeManager(mappings, {}) def test_vlan_subinterfaces(self): attr = self.generate_device_details() device = self.manage_device(attr) devname = device.name plumber = trunk_plumber.Plumber(namespace=attr.namespace) for i in range(20): subname = 'vtest-%s' % i plumber._create_vlan_subint(devname, subname, i) # ensure no addresses were assigned (e.g. ipv6) vlan_int = ip_lib.IPDevice(subname, namespace=attr.namespace) self.assertFalse(vlan_int.addr.list()) children = plumber._get_vlan_children(devname) expected = {('vtest-%s' % i, i) for i in range(20)} self.assertEqual(expected, children) # delete one plumber._safe_delete_device('vtest-19') children = plumber._get_vlan_children(devname) expected = {('vtest-%s' % i, i) for i in range(19)} self.assertEqual(expected, children) # ensure they are removed by parent removal self._safe_delete_device(device) self.assertFalse(plumber._get_vlan_children(devname)) def test_vlan_QinQ_subinterfaces(self): # the trunk model does not support this right now, but this is to # ensure the plumber on the agent side doesn't explode in their # presence in case an operator does something fancy or we have a # race where a trunk's parent port is converted to a subport while # the agent is offline. attr = self.generate_device_details() device = self.manage_device(attr) devname = device.name plumber = trunk_plumber.Plumber(namespace=attr.namespace) for i in range(20): plumber._create_vlan_subint(devname, 'vtest-%s' % i, i) plumber._create_vlan_subint('vtest-%s' % i, 'qinq-%s' % i, 2) top_level = {('vtest-%s' % i, i) for i in range(20)} for i in range(20): # as we iterate, we delete a vlan from each dev and ensure it # didn't break the top-level vlans self.assertEqual({('qinq-%s' % i, 2)}, plumber._get_vlan_children('vtest-%s' % i)) plumber._safe_delete_device('qinq-%s' % i) self.assertEqual(set(), plumber._get_vlan_children('vtest-%i' % i)) self.assertEqual(top_level, plumber._get_vlan_children(devname)) def test_ensure_trunk_subports(self): attr = self.generate_device_details() device = self.manage_device(attr) devname = device.name plumber = trunk_plumber.Plumber(namespace=attr.namespace) plumber._trunk_device_name = lambda x: devname trunk_obj = self._gen_trunk() plumber.ensure_trunk_subports(trunk_obj) # ensure no mutation the second time with mock.patch.object(plumber, '_safe_delete_device', side_effect=RuntimeError()): plumber.ensure_trunk_subports(trunk_obj) while trunk_obj.sub_ports: # drain down the sub-ports and make sure it keeps # them equal trunk_obj.sub_ports.pop() plumber.ensure_trunk_subports(trunk_obj) expected = {(plumber._get_tap_device_name(sp.port_id), sp.segmentation_id) for sp in trunk_obj.sub_ports} wired = plumber._get_vlan_children(devname) self.assertEqual(expected, wired) def _gen_trunk(self): trunk_obj = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=trunk_obj.id, segmentation_id=i) for i in range(20, 40)] trunk_obj.sub_ports = subports return trunk_obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_l2_ovs_agent.py0000644000175000017500000003507600000000000027245 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # Copyright (c) 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l2 import base class TestOVSAgent(base.OVSAgentTestFramework): def test_port_creation_and_deletion(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports()) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self.agent.int_br.delete_port(port['vif_name']) self.wait_until_ports_state(self.ports, up=False) def test_no_stale_flows_after_port_delete(self): def find_drop_flow(ofport, flows): for flow in flows: # flow.instruction == [] means actions=drop if (not flow.instructions and ('in_port', ofport) in flow.match.items()): return True return False def num_ports_with_drop_flows(ofports, flows): count = 0 for ofport in ofports: if find_drop_flow(ofport, flows): count = count + 1 return count # setup self.setup_agent_and_ports( port_dicts=self.create_test_ports()) self.wait_until_ports_state(self.ports, up=True) # call port_delete first for port in self.ports: self.agent.port_delete([], port_id=port['id']) portnames = [port["vif_name"] for port in self.ports] ofports = [port.ofport for port in self.agent.int_br.get_vif_ports() if port.port_name in portnames] # wait until ports are marked dead, with drop flow utils.wait_until_true( lambda: num_ports_with_drop_flows( ofports, self.agent.int_br.dump_flows( constants.LOCAL_SWITCHING )) == len(ofports)) # delete the ports on bridge for port in self.ports: self.agent.int_br.delete_port(port['vif_name']) self.wait_until_ports_state(self.ports, up=False) # verify no stale drop flows self.assertEqual(0, num_ports_with_drop_flows( ofports, self.agent.int_br.dump_flows( constants.LOCAL_SWITCHING ) )) def _check_datapath_type_netdev(self, expected, default=False): if not default: self.config.set_override('datapath_type', expected, "OVS") agent = self.create_agent() self.start_agent(agent) for br_name in (getattr(self, br) for br in ('br_int', 'br_tun', 'br_phys')): actual = self.ovs.db_get_val('Bridge', br_name, 'datapath_type') self.assertEqual(expected, actual) self.stop_agent(agent, self.agent_thread) def test_datapath_type_change(self): self._check_datapath_type_netdev('system') self._check_datapath_type_netdev('netdev') def test_datapath_type_netdev(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_NETDEV) def test_datapath_type_system(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_SYSTEM) def test_datapath_type_default(self): self._check_datapath_type_netdev( constants.OVS_DATAPATH_SYSTEM, default=True) def test_resync_devices_set_up_after_exception(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) def test_reprocess_port_when_ovs_restarts(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports()) self.wait_until_ports_state(self.ports, up=True) self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED # OVS restarted, the agent should reprocess all the ports self.agent.plugin_rpc.update_device_list.reset_mock() self.wait_until_ports_state(self.ports, up=True) def test_resync_dev_up_after_failure(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), failed_dev_up=True) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=True) def test_resync_dev_down_after_failure(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), failed_dev_down=True) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: self.agent.int_br.delete_port(port['vif_name']) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=False) def test_ancillary_port_creation_and_deletion(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: external_bridge.delete_port(port['vif_name']) self.wait_until_ports_state(self.ports, up=False) def test_resync_ancillary_devices(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) def test_resync_ancillary_dev_up_after_failure(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, failed_dev_up=True) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=True) def test_resync_ancillary_dev_down_after_failure(self): external_bridge = self.useFixture( net_helpers.OVSBridgeFixture()).bridge self.setup_agent_and_ports( port_dicts=self.create_test_ports(), ancillary_bridge=external_bridge, failed_dev_down=True) self.wait_until_ports_state(self.ports, up=True) for port in self.ports: external_bridge.delete_port(port['vif_name']) # in the RPC mock the first port fails and should # be re-synced expected_ports = self.ports + [self.ports[0]] self.wait_until_ports_state(expected_ports, up=False) def test_port_vlan_tags(self): self.setup_agent_and_ports( port_dicts=self.create_test_ports(), trigger_resync=True) self.wait_until_ports_state(self.ports, up=True) self.assert_vlan_tags(self.ports, self.agent) def _test_assert_bridges_ports_vxlan(self, local_ip=None): agent = self.create_agent(local_ip=local_ip) self.assertTrue(self.ovs.bridge_exists(self.br_int)) self.assertTrue(self.ovs.bridge_exists(self.br_tun)) self.assert_bridge_ports() self.assert_patch_ports(agent) def test_assert_bridges_ports_vxlan_ipv4(self): self._test_assert_bridges_ports_vxlan() def test_assert_bridges_ports_vxlan_ipv6(self): self._test_assert_bridges_ports_vxlan(local_ip='2001:db8:100::1') def test_assert_bridges_ports_no_tunnel(self): self.create_agent(create_tunnels=False) self.assertTrue(self.ovs.bridge_exists(self.br_int)) self.assertFalse(self.ovs.bridge_exists(self.br_tun)) def test_assert_pings_during_br_int_setup_not_lost(self): self.setup_agent_and_ports(port_dicts=self.create_test_ports(), create_tunnels=False) self.wait_until_ports_state(self.ports, up=True) ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports] with net_helpers.async_ping(self.namespace, ips) as done: while not done(): self.agent.setup_integration_br() time.sleep(0.25) def test_assert_br_int_patch_port_ofports_dont_change(self): # When the integration bridge is setup, it should reuse the existing # patch ports between br-int and br-tun. self.setup_agent_and_ports(port_dicts=[], create_tunnels=True) patch_int_ofport_before = self.agent.patch_int_ofport patch_tun_ofport_before = self.agent.patch_tun_ofport self.stop_agent(self.agent, self.agent_thread) self.setup_agent_and_ports(port_dicts=[], create_tunnels=True) self.assertEqual(patch_int_ofport_before, self.agent.patch_int_ofport) self.assertEqual(patch_tun_ofport_before, self.agent.patch_tun_ofport) def test_assert_br_phys_patch_port_ofports_dont_change(self): # When the integration bridge is setup, it should reuse the existing # patch ports between br-int and br-phys. self.setup_agent_and_ports(port_dicts=[]) patch_int_ofport_before = self.agent.int_ofports['physnet'] patch_phys_ofport_before = self.agent.phys_ofports['physnet'] self.stop_agent(self.agent, self.agent_thread) self.setup_agent_and_ports(port_dicts=[]) self.assertEqual(patch_int_ofport_before, self.agent.int_ofports['physnet']) self.assertEqual(patch_phys_ofport_before, self.agent.phys_ofports['physnet']) def test_assert_pings_during_br_phys_setup_not_lost_in_vlan_to_flat(self): provider_net = self._create_test_network_dict() provider_net['network_type'] = 'flat' self._test_assert_pings_during_br_phys_setup_not_lost(provider_net) def test_assert_pings_during_br_phys_setup_not_lost_in_vlan_to_vlan(self): provider_net = self._create_test_network_dict() provider_net['network_type'] = 'vlan' provider_net['segmentation_id'] = 876 self._test_assert_pings_during_br_phys_setup_not_lost(provider_net) def _test_assert_pings_during_br_phys_setup_not_lost(self, provider_net): # Separate namespace is needed when pinging from one port to another, # otherwise Linux ping uses loopback instead for sending and receiving # ping, hence ignoring flow setup. ns_phys = self.useFixture(net_helpers.NamespaceFixture()).name ports = self.create_test_ports(amount=2) port_int = ports[0] port_phys = ports[1] ip_int = port_int['fixed_ips'][0]['ip_address'] ip_phys = port_phys['fixed_ips'][0]['ip_address'] self.setup_agent_and_ports(port_dicts=[port_int], create_tunnels=False, network=provider_net) self.plug_ports_to_phys_br(provider_net, [port_phys], namespace=ns_phys) # The OVS agent doesn't monitor the physical bridges, no notification # is sent when a port is up on a physical bridge, hence waiting only # for the ports connected to br-int self.wait_until_ports_state([port_int], up=True) # sanity pings before we start net_helpers.assert_ping(ns_phys, ip_int) net_helpers.assert_ping(self.namespace, ip_phys) with net_helpers.async_ping(ns_phys, [ip_int]) as done: while not done(): self.agent.setup_physical_bridges(self.agent.bridge_mappings) time.sleep(0.25) with net_helpers.async_ping(self.namespace, [ip_phys]) as done: while not done(): self.agent.setup_physical_bridges(self.agent.bridge_mappings) time.sleep(0.25) def test_noresync_after_port_gone(self): '''This will test the scenario where a port is removed after listing it but before getting vif info about it. ''' self.ports = self.create_test_ports(amount=2) self.agent = self.create_agent(create_tunnels=False) self.network = self._create_test_network_dict() self._plug_ports(self.network, self.ports, self.agent) self.start_agent(self.agent, ports=self.ports, unplug_ports=[self.ports[1]]) self.wait_until_ports_state([self.ports[0]], up=True) self.assertRaises( utils.WaitTimeout, self.wait_until_ports_state, [self.ports[1]], up=True, timeout=10) def test_ovs_restarted_event(self): callback = mock.Mock() self.setup_agent_and_ports( port_dicts=self.create_test_ports()) registry.subscribe(callback, resources.AGENT, events.OVS_RESTARTED) self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED utils.wait_until_true(lambda: callback.call_count, timeout=10) callback.assert_called_with(resources.AGENT, events.OVS_RESTARTED, mock.ANY, payload=None) class TestOVSAgentExtensionConfig(base.OVSAgentTestFramework): def setUp(self): super(TestOVSAgentExtensionConfig, self).setUp() self.config.set_override('extensions', ['qos'], 'agent') self.agent = self.create_agent(create_tunnels=False) def test_report_loaded_extension(self): self.agent._report_state() agent_state = self.agent.state_rpc.report_state.call_args[0][1] self.assertEqual(['qos'], agent_state['configurations']['extensions']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_ovs_flows.py0000644000175000017500000004663700000000000026711 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_config import cfg from oslo_serialization import jsonutils from testtools.content import text_content from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_neutron_agent as ovsagt from neutron.tests.common import base as common_base from neutron.tests.common import helpers from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l2 import base as l2_base from neutron.tests.functional.agent import test_ovs_lib from neutron.tests.functional import base from neutron.tests import tools OVS_TRACE_FINAL_FLOW = 'Final flow' OVS_TRACE_DATAPATH_ACTIONS = 'Datapath actions' cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') class OVSAgentTestBase(test_ovs_lib.OVSBridgeTestBase, base.BaseSudoTestCase, l2_base.OVSOFControllerHelper): def setUp(self): super(OVSAgentTestBase, self).setUp() self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.start_of_controller(cfg.CONF) self.br_int = self.br_int_cls(self.br.br_name) self.br_int.set_secure_mode() self.br_int.setup_controllers(cfg.CONF) self.br_int.setup_default_table() def _run_trace(self, brname, spec): required_keys = [OVS_TRACE_FINAL_FLOW, OVS_TRACE_DATAPATH_ACTIONS] t = utils.execute(["ovs-appctl", "ofproto/trace", brname, spec], run_as_root=True) trace = {} trace_lines = t.splitlines() for line in trace_lines: (l, sep, r) = line.partition(':') if not sep: continue elif l in required_keys: trace[l] = r for k in required_keys: if k not in trace: self.fail("%s not found in trace %s" % (k, trace_lines)) return trace class ARPSpoofTestCase(OVSAgentTestBase): def setUp(self): # NOTE(kevinbenton): it would be way cooler to use scapy for # these but scapy requires the python process to be running as # root to bind to the ports. super(ARPSpoofTestCase, self).setUp() self.skip_without_arp_support() self.src_addr = '192.168.0.1' self.dst_addr = '192.168.0.2' self.src_namespace = self.useFixture( net_helpers.NamespaceFixture()).name self.dst_namespace = self.useFixture( net_helpers.NamespaceFixture()).name self.src_p = self.useFixture( net_helpers.OVSPortFixture(self.br, self.src_namespace)).port self.dst_p = self.useFixture( net_helpers.OVSPortFixture(self.br, self.dst_namespace)).port # wait to add IPs until after anti-spoof rules to ensure ARP doesn't # happen before self.addOnException(self.collect_flows_and_ports) def collect_flows_and_ports(self, exc_info): nicevif = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['ofport', 'port_name', 'switch', 'vif_id', 'vif_mac']] nicedev = lambda x: ['%s=%s' % (k, getattr(x, k)) for k in ['name', 'namespace']] + x.addr.list() details = {'flows': self.br.dump_all_flows(), 'vifs': map(nicevif, self.br.get_vif_ports()), 'src_ip': self.src_addr, 'dest_ip': self.dst_addr, 'sourt_port': nicedev(self.src_p), 'dest_port': nicedev(self.dst_p)} self.addDetail('arp-test-state', text_content(jsonutils.dumps(details, indent=5))) @common_base.no_skip_on_missing_deps def skip_without_arp_support(self): if not checks.arp_header_match_supported(): self.skipTest("ARP header matching not supported") def test_arp_spoof_doesnt_block_normal_traffic(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_mac_spoof_blocks_wrong_mac(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) # changing the allowed mac should stop the port from working self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr], mac='00:11:22:33:44:55') net_helpers.assert_no_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_doesnt_block_ipv6(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_blocks_response(self): # this will prevent the destination from responding to the ARP # request for it's own address self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_icmpv6_neigh_advt(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' # this will prevent the destination from responding (i.e., icmpv6 # neighbour advertisement) to the icmpv6 neighbour solicitation # request for it's own address (2000::2) as spoofing rules added # below only allow '2000::3'. self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3']) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_request(self): # this will prevent the source from sending an ARP # request with its own address self._setup_arp_spoof_for_port(self.src_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) ns_ip_wrapper = ip_lib.IPWrapper(self.src_namespace) try: ns_ip_wrapper.netns.execute(['arping', '-I', self.src_p.name, '-c1', self.dst_addr]) tools.fail("arping should have failed. The arp request should " "have been blocked.") except RuntimeError: pass def test_arp_spoof_allowed_address_pairs(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3', self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_icmpv6_neigh_advt_allowed_address_pairs(self): self.src_addr = '2000::1' self.dst_addr = '2000::2' self._setup_arp_spoof_for_port(self.dst_p.name, ['2000::3', self.dst_addr]) self.src_p.addr.add('%s/64' % self.src_addr) self.dst_p.addr.add('%s/64' % self.dst_addr) # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_allowed_address_pairs_0cidr(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['9.9.9.9/0', '1.2.3.4']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3'], psec=False) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_disable_network_port(self): # block first and then disable port security to make sure old rules # are cleared self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self._setup_arp_spoof_for_port( self.dst_p.name, ['192.168.0.3'], device_owner=n_const.DEVICE_OWNER_ROUTER_GW) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) net_helpers.assert_ping(self.src_namespace, self.dst_addr) def _setup_arp_spoof_for_port(self, port, addrs, psec=True, device_owner='nobody', mac=None): vif = next( vif for vif in self.br.get_vif_ports() if vif.port_name == port) ip_addr = addrs.pop() details = {'port_security_enabled': psec, 'fixed_ips': [{'ip_address': ip_addr}], 'device_owner': device_owner, 'allowed_address_pairs': [ dict(ip_address=ip) for ip in addrs]} if mac: vif.vif_mac = mac ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection( self.br_int, vif, details) class CanaryTableTestCase(OVSAgentTestBase): def test_canary_table(self): self.br_int.uninstall_flows(cookie=ovs_lib.COOKIE_ANY) self.assertEqual(constants.OVS_RESTARTED, self.br_int.check_canary_table()) self.br_int.setup_canary_table() self.assertEqual(constants.OVS_NORMAL, self.br_int.check_canary_table()) class DeleteFlowsTestCase(OVSAgentTestBase): def test_delete_flows_bridge_cookie_only(self): PORT = 1 self.br_int.add_flow(in_port=PORT, ip=True, nw_dst="1.1.1.1", actions="output:11") self.br_int.add_flow(in_port=PORT, ip=True, nw_dst="2.2.2.2", cookie=42, actions="output:42") # delete (should only delete flows with the bridge cookie) self.br_int.delete_flows(in_port=PORT) flows = self.br_int.dump_flows_for(in_port=PORT, cookie=self.br_int._default_cookie) flows42 = self.br_int.dump_flows_for(in_port=PORT, cookie=42) # check that only flows with cookie 42 remain self.assertFalse(flows) self.assertTrue(flows42) def test_delete_flows_all(self): PORT = 1 self.br_int.add_flow(in_port=PORT, ip=True, nw_dst="1.1.1.1", actions="output:11") self.br_int.add_flow(in_port=PORT, ip=True, nw_dst="2.2.2.2", cookie=42, actions="output:42") # delete both flows self.br_int.delete_flows(in_port=PORT, cookie=ovs_lib.COOKIE_ANY) # check that no flow remains flows = self.br_int.dump_flows_for(in_port=PORT) self.assertFalse(flows) class OVSFlowTestCase(OVSAgentTestBase): """Tests defined in this class use ovs-appctl ofproto/trace commands, which simulate processing of imaginary packets, to check desired actions are correctly set up by OVS flows. """ def setUp(self): cfg.CONF.set_override('enable_distributed_routing', True, group='AGENT') super(OVSFlowTestCase, self).setUp() self.phys_br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.br_phys = self.br_phys_cls(self.phys_br.br_name) self.br_phys.set_secure_mode() self.br_phys.setup_controllers(cfg.CONF) self.router_addr = '192.168.0.1/24' self.namespace = self.useFixture( net_helpers.NamespaceFixture()).name self.phys_p = self.useFixture( net_helpers.OVSPortFixture(self.br_phys, self.namespace)).port self.tun_br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.br_tun = self.br_tun_cls(self.tun_br.br_name) self.br_tun.set_secure_mode() self.br_tun.setup_controllers(cfg.CONF) self.tun_p = self.br_tun.add_patch_port( common_utils.get_rand_device_name( prefix=cfg.CONF.OVS.tun_peer_patch_port), common_utils.get_rand_device_name( prefix=cfg.CONF.OVS.int_peer_patch_port)) self.br_tun.setup_default_table(self.tun_p, True) def test_provision_local_vlan(self): kwargs = {'port': 123, 'lvid': 888, 'segmentation_id': 777} self.br_phys.provision_local_vlan(distributed=False, **kwargs) trace = self._run_trace(self.phys_br.br_name, "in_port=%(port)d,dl_src=12:34:56:78:aa:bb," "dl_dst=24:12:56:78:aa:bb,dl_type=0x0800," "nw_src=192.168.0.1,nw_dst=192.168.0.2," "nw_proto=1,nw_tos=0,nw_ttl=128," "icmp_type=8,icmp_code=0,dl_vlan=%(lvid)d" % kwargs) self.assertIn("dl_vlan=%(segmentation_id)d" % kwargs, trace["Final flow"]) def test_install_dvr_to_src_mac(self): other_dvr_mac = 'fa:16:3f:01:de:ad' other_dvr_port = 333 kwargs = {'vlan_tag': 888, 'gateway_mac': '12:34:56:78:aa:bb', 'dst_mac': '12:34:56:78:cc:dd', 'dst_port': 123} self.br_int.install_dvr_to_src_mac(network_type='vlan', **kwargs) self.br_int.add_dvr_mac_vlan(mac=other_dvr_mac, port=other_dvr_port) trace = self._run_trace(self.br.br_name, "in_port=%d," % other_dvr_port + "dl_src=" + other_dvr_mac + "," + "dl_dst=%(dst_mac)s,dl_type=0x0800," "nw_src=192.168.0.1,nw_dst=192.168.0.2," "nw_proto=1,nw_tos=0,nw_ttl=128," "icmp_type=8,icmp_code=0," "dl_vlan=%(vlan_tag)d" % kwargs) self.assertIn("vlan_tci=0x0000", trace["Final flow"]) self.assertIn(("dl_src=%(gateway_mac)s" % kwargs), trace["Final flow"]) @helpers.skip_if_ovs_older_than("2.5.1") def test_install_flood_to_tun(self): attrs = { 'remote_ip': self.get_test_net_address(1), 'local_ip': self.get_test_net_address(2), } kwargs = {'vlan': 777, 'tun_id': 888} port_name = common_utils.get_rand_device_name(net_helpers.PORT_PREFIX) ofport = self.br_tun.add_tunnel_port(port_name, attrs['remote_ip'], attrs['local_ip']) self.br_tun.install_flood_to_tun(ports=[ofport], **kwargs) test_packet = ("icmp,in_port=%d," % self.tun_p + "dl_src=12:34:56:ab:cd:ef,dl_dst=12:34:56:78:cc:dd," "nw_src=192.168.0.1,nw_dst=192.168.0.2,nw_ecn=0," "nw_tos=0,nw_ttl=128,icmp_type=8,icmp_code=0," "dl_vlan=%(vlan)d,dl_vlan_pcp=0" % kwargs) trace = self._run_trace(self.tun_br.br_name, test_packet) self.assertIn(("tun_id=0x%(tun_id)x" % kwargs), trace["Final flow"]) self.assertIn("vlan_tci=0x0000,", trace["Final flow"]) self.br_tun.delete_flood_to_tun(kwargs['vlan']) trace = self._run_trace(self.tun_br.br_name, test_packet) self.assertEqual(" unchanged", trace["Final flow"]) self.assertIn("drop", trace["Datapath actions"]) def test_install_instructions_str(self): kwargs = {'in_port': 345, 'vlan_tci': 0x1123} dst_p = self.useFixture( net_helpers.OVSPortFixture(self.br_tun, self.namespace)).port dst_ofp = self.br_tun.get_port_ofport(dst_p.name) self.br_tun.install_instructions("pop_vlan,output:%d" % dst_ofp, priority=10, **kwargs) trace = self._run_trace(self.br_tun.br_name, "in_port=%(in_port)d,dl_src=12:34:56:78:aa:bb," "dl_dst=24:12:56:78:aa:bb,dl_type=0x0800," "nw_src=192.168.0.1,nw_dst=192.168.0.2," "nw_proto=1,nw_tos=0,nw_ttl=128," "icmp_type=8,icmp_code=0,vlan_tci=%(vlan_tci)d" % kwargs) self.assertIn("pop_vlan,", trace["Datapath actions"]) def test_bundled_install(self): kwargs = {'in_port': 345, 'vlan_tci': 0x1321} dst_p = self.useFixture( net_helpers.OVSPortFixture(self.br_tun, self.namespace)).port dst_ofp = self.br_tun.get_port_ofport(dst_p.name) with self.br_tun.bundled() as br: br.install_instructions("pop_vlan,output:%d" % dst_ofp, priority=10, **kwargs) trace = self._run_trace(self.br_tun.br_name, "in_port=%(in_port)d,dl_src=12:34:56:78:aa:bb," "dl_dst=24:12:56:78:aa:bb,dl_type=0x0800," "nw_src=192.168.0.1,nw_dst=192.168.0.2," "nw_proto=1,nw_tos=0,nw_ttl=128," "icmp_type=8,icmp_code=0,vlan_tci=%(vlan_tci)d" % kwargs) self.assertIn("pop_vlan,", trace["Datapath actions"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/test_ovs_lib.py0000644000175000017500000007366400000000000026325 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import uuid import mock from neutron_lib import constants as const from oslo_config import cfg from ovsdbapp.backend.ovs_idl import idlutils from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as agent_const) from neutron.tests.common.exclusive_resources import port from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class OVSBridgeTestBase(base.BaseOVSLinuxTestCase): # TODO(twilson) So far, only ovsdb-related tests are written. It would be # good to also add the openflow-related functions def setUp(self): super(OVSBridgeTestBase, self).setUp() self.ovs = ovs_lib.BaseOVS() self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge def create_ovs_port(self, *interface_attrs): # Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default attrs = collections.OrderedDict(interface_attrs) attrs.setdefault('type', 'internal') port_name = utils.get_rand_device_name(net_helpers.PORT_PREFIX) return (port_name, self.br.add_port(port_name, *attrs.items())) def create_ovs_vif_port(self, iface_id=None, mac=None, iface_field='iface-id'): if iface_id is None: iface_id = utils.get_rand_name() if mac is None: mac = utils.get_rand_name() attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac}) port_name, ofport = self.create_ovs_port(attrs) return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br) class OVSBridgeTestCase(OVSBridgeTestBase): def test_port_lifecycle(self): (port_name, ofport) = self.create_ovs_port(('type', 'internal')) # ofport should always be an integer string with value -1 or > 0. self.assertTrue(int(ofport)) self.assertTrue(int(self.br.get_port_ofport(port_name))) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual(self.br.br_name, self.br.get_bridge_for_iface(port_name)) self.br.delete_port(port_name) self.assertFalse(self.br.port_exists(port_name)) def test_duplicate_port_may_exist_false(self): port_name, ofport = self.create_ovs_port(('type', 'internal')) cmd = self.br.ovsdb.add_port(self.br.br_name, port_name, may_exist=False) self.assertRaises(RuntimeError, cmd.execute, check_error=True) def test_delete_port_if_exists_false(self): cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False) self.assertRaises(RuntimeError, cmd.execute, check_error=True) def test_replace_port(self): port_name = utils.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.replace_port(port_name, ('type', 'internal')) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual('internal', self.br.db_get_val('Interface', port_name, 'type')) self.br.replace_port(port_name, ('type', 'internal'), ('external_ids', {'test': 'test'})) self.assertTrue(self.br.port_exists(port_name)) self.assertEqual('test', self.br.db_get_val('Interface', port_name, 'external_ids')['test']) self.assertEqual(agent_const.DEAD_VLAN_TAG, self.br.db_get_val('Port', port_name, 'tag')) def test_attribute_lifecycle(self): (port_name, ofport) = self.create_ovs_port() tag = 42 self.ovs.set_db_attribute('Port', port_name, 'tag', tag) self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag')) self.assertEqual(tag, self.br.get_port_tag_dict()[port_name]) self.ovs.clear_db_attribute('Port', port_name, 'tag') self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag')) self.assertEqual([], self.br.get_port_tag_dict()[port_name]) def test_attribute_map_handling(self): (pname, ofport) = self.create_ovs_port() expected = {'a': 'b'} self.ovs.set_db_attribute('Port', pname, 'other_config', expected) self.assertEqual(expected, self.ovs.db_get_val('Port', pname, 'other_config')) other = {'c': 'd'} expected.update(other) self.ovs.set_db_attribute('Port', pname, 'other_config', other) self.assertEqual(expected, self.ovs.db_get_val('Port', pname, 'other_config')) other = {'a': 'x'} expected.update(other) self.ovs.set_db_attribute('Port', pname, 'other_config', other) self.assertEqual(expected, self.ovs.db_get_val('Port', pname, 'other_config')) def test_get_bridge_external_bridge_id(self): self.ovs.set_db_attribute('Bridge', self.br.br_name, 'external_ids', {'bridge-id': self.br.br_name}) self.assertEqual( self.br.br_name, self.ovs.get_bridge_external_bridge_id(self.br.br_name)) def test_controller_lifecycle(self): controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'} self.br.set_controller(controllers) self.assertSetEqual(controllers, set(self.br.get_controller())) self.br.del_controller() self.assertEqual([], self.br.get_controller()) def test_non_index_queries(self): controllers = ['tcp:127.0.0.1:6633'] self.br.set_controller(controllers) cmd = self.br.ovsdb.db_set('Controller', self.br.br_name, ('connection_mode', 'out-of-band')) cmd.execute(check_error=True) self.assertEqual('out-of-band', self.br.db_get_val('Controller', self.br.br_name, 'connection_mode')) def test_set_fail_mode_secure(self): self.br.set_secure_mode() self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE) def test_set_fail_mode_standalone(self): self.br.set_standalone_mode() self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE) def _assert_br_fail_mode(self, fail_mode): self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'), fail_mode) def test_add_protocols_start_with_one(self): self.br.set_db_attribute('Bridge', self.br.br_name, 'protocols', ['OpenFlow10'], check_error=True) self.br.add_protocols('OpenFlow13') self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'protocols'), ['OpenFlow10', 'OpenFlow13']) def test_add_protocols_start_with_two_add_two(self): self.br.set_db_attribute('Bridge', self.br.br_name, 'protocols', ['OpenFlow10', 'OpenFlow12'], check_error=True) self.br.add_protocols('OpenFlow13', 'OpenFlow14') self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'protocols'), ['OpenFlow10', 'OpenFlow12', 'OpenFlow13', 'OpenFlow14']) def test_add_protocols_add_existing(self): self.br.set_db_attribute('Bridge', self.br.br_name, 'protocols', ['OpenFlow10', 'OpenFlow12', 'OpenFlow13'], check_error=True) self.br.add_protocols('OpenFlow13') self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'protocols'), ['OpenFlow10', 'OpenFlow12', 'OpenFlow13']) def _test_set_igmp_snooping_state(self, state): self.br.set_igmp_snooping_state(state) self.assertEqual( state, self.br.db_get_val( 'Bridge', self.br.br_name, 'mcast_snooping_enable')) br_other_config = self.ovs.ovsdb.db_find( 'Bridge', ('name', '=', self.br.br_name), columns=['other_config'] ).execute()[0]['other_config'] self.assertEqual( str(state), br_other_config['mcast-snooping-disable-flood-unregistered']) def test_set_igmp_snooping_enabled(self): self._test_set_igmp_snooping_state(True) def test_set_igmp_snooping_disabled(self): self._test_set_igmp_snooping_state(False) def test_get_datapath_id(self): brdev = ip_lib.IPDevice(self.br.br_name) dpid = brdev.link.attributes['link/ether'].replace(':', '') self.br.set_db_attribute('Bridge', self.br.br_name, 'datapath_id', dpid) self.assertIn(dpid, self.br.get_datapath_id()) def _test_add_tunnel_port(self, attrs): port_name = utils.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.add_tunnel_port(port_name, attrs['remote_ip'], attrs['local_ip']) self.assertEqual('gre', self.ovs.db_get_val('Interface', port_name, 'type')) options = self.ovs.db_get_val('Interface', port_name, 'options') for attr, val in attrs.items(): self.assertEqual(val, options[attr]) def test_add_tunnel_port_ipv4(self): attrs = { 'remote_ip': self.get_test_net_address(1), 'local_ip': self.get_test_net_address(2), } self._test_add_tunnel_port(attrs) def test_add_tunnel_port_ipv6(self): attrs = { 'remote_ip': '2001:db8:200::1', 'local_ip': '2001:db8:100::1', } self._test_add_tunnel_port(attrs) def test_add_tunnel_port_custom_port(self): port_name = utils.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.add_tunnel_port( port_name, self.get_test_net_address(1), self.get_test_net_address(2), tunnel_type=const.TYPE_VXLAN, vxlan_udp_port=12345) options = self.ovs.db_get_val('Interface', port_name, 'options') self.assertEqual("12345", options['dst_port']) def test_add_tunnel_port_tos(self): attrs = { 'remote_ip': self.get_test_net_address(1), 'local_ip': self.get_test_net_address(2), 'tos': 'inherit', } port_name = utils.get_rand_device_name(net_helpers.PORT_PREFIX) self.br.add_tunnel_port(port_name, attrs['remote_ip'], attrs['local_ip'], tos=attrs['tos']) self.assertEqual('gre', self.ovs.db_get_val('Interface', port_name, 'type')) options = self.ovs.db_get_val('Interface', port_name, 'options') for attr, val in attrs.items(): self.assertEqual(val, options[attr]) def test_add_patch_port(self): local = utils.get_rand_device_name(net_helpers.PORT_PREFIX) peer = 'remotepeer' self.br.add_patch_port(local, peer) self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'), 'patch') options = self.ovs.db_get_val('Interface', local, 'options') self.assertEqual(peer, options['peer']) def test_get_port_name_list(self): # Note that ovs-vsctl's list-ports does not include the port created # with the same name as the bridge ports = {self.create_ovs_port()[0] for i in range(5)} self.assertSetEqual(ports, set(self.br.get_port_name_list())) def test_get_iface_name_list(self): ifaces = {self.create_ovs_port()[0] for i in range(5)} self.assertSetEqual(ifaces, set(self.br.get_iface_name_list())) def test_get_port_stats(self): # Nothing seems to use this function? (port_name, ofport) = self.create_ovs_port() stats = set(self.br.get_port_stats(port_name).keys()) self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats)) def test_get_vif_ports(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] ports = self.br.get_vif_ports() self.assertEqual(3, len(ports)) self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports])) self.assertEqual(sorted([x.port_name for x in vif_ports]), sorted([x.port_name for x in ports])) def test_get_vif_ports_with_bond(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] # bond ports don't have records in the Interface table but they do in # the Port table orig = self.br.get_port_name_list new_port_name_list = lambda: orig() + ['bondport'] mock.patch.object(self.br, 'get_port_name_list', new=new_port_name_list).start() ports = self.br.get_vif_ports() self.assertEqual(3, len(ports)) self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports])) self.assertEqual(sorted([x.port_name for x in vif_ports]), sorted([x.port_name for x in ports])) def test_get_vif_port_set(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(2)] ports = self.br.get_vif_port_set() expected = set([x.vif_id for x in vif_ports]) self.assertEqual(expected, ports) def test_get_vif_port_set_with_missing_port(self): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port()] # return an extra port to make sure the db list ignores it orig = self.br.get_port_name_list new_port_name_list = lambda: orig() + ['anotherport'] mock.patch.object(self.br, 'get_port_name_list', new=new_port_name_list).start() ports = self.br.get_vif_port_set() expected = set([vif_ports[0].vif_id]) self.assertEqual(expected, ports) def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self): # Create a port on self.br self.create_ovs_vif_port() # Create another, empty bridge br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge # Assert that get_vif_port_set on an empty bridge returns an empty set, # and does not return the other bridge's ports. self.assertEqual(set(), br_2.get_vif_port_set()) def test_get_ports_attributes(self): port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]] db_ports = self.br.get_ports_attributes('Interface', columns=['name']) db_ports_names = [p['name'] for p in db_ports] self.assertEqual(sorted(port_names), sorted(db_ports_names)) def test_get_port_tag_dict(self): # Simple case tested in port test_set_get_clear_db_val pass def test_get_vif_port_by_id(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] for vif in vif_ports: self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id, vif.vif_id) def test_get_vifs_by_ids(self): for i in range(2): self.create_ovs_port() vif_ports = [self.create_ovs_vif_port() for i in range(3)] by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports]) # convert to str for comparison of VifPorts by_id = {vid: str(vport) for vid, vport in by_id.items()} self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id) def test_delete_ports(self): # TODO(twilson) I intensely dislike the current delete_ports function # as the default behavior is really delete_vif_ports(), then it acts # more like a delete_ports() seems like it should if all_ports=True is # passed # Create 2 non-vif ports and 2 vif ports nonvifs = {self.create_ovs_port()[0] for i in range(2)} vifs = {self.create_ovs_vif_port().port_name for i in range(2)} self.assertSetEqual(nonvifs.union(vifs), set(self.br.get_port_name_list())) self.br.delete_ports() self.assertSetEqual(nonvifs, set(self.br.get_port_name_list())) self.br.delete_ports(all_ports=True) self.assertEqual(len(self.br.get_port_name_list()), 0) def test_set_controller_connection_mode(self): controllers = ['tcp:192.0.2.0:6633'] self._set_controllers_connection_mode(controllers) def test_set_multi_controllers_connection_mode(self): controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55'] self._set_controllers_connection_mode(controllers) def _set_controllers_connection_mode(self, controllers): self.br.set_controller(controllers) self.assertEqual(sorted(controllers), sorted(self.br.get_controller())) self.br.set_controllers_connection_mode('out-of-band') self._assert_controllers_connection_mode('out-of-band') self.br.del_controller() self.assertEqual([], self.br.get_controller()) def _assert_controllers_connection_mode(self, connection_mode): controllers = self.br.db_get_val('Bridge', self.br.br_name, 'controller') controllers = [controllers] if isinstance( controllers, uuid.UUID) else controllers for controller in controllers: self.assertEqual(connection_mode, self.br.db_get_val('Controller', controller, 'connection_mode')) def test_egress_bw_limit(self): port_name, _ = self.create_ovs_port() self.br.create_egress_bw_limit_for_port(port_name, 700, 70) max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) self.assertEqual(700, max_rate) self.assertEqual(70, burst) self.br.delete_egress_bw_limit_for_port(port_name) max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name) self.assertIsNone(max_rate) self.assertIsNone(burst) def test_ingress_bw_limit(self): port_name, _ = self.create_ovs_port() self.br.update_ingress_bw_limit_for_port(port_name, 700, 70) max_rate, burst = self.br.get_ingress_bw_limit_for_port(port_name) self.assertEqual(700, max_rate) self.assertEqual(70, burst) self.br.update_ingress_bw_limit_for_port(port_name, 750, 100) max_rate, burst = self.br.get_ingress_bw_limit_for_port(port_name) self.assertEqual(750, max_rate) self.assertEqual(100, burst) self.br.delete_ingress_bw_limit_for_port(port_name) max_rate, burst = self.br.get_ingress_bw_limit_for_port(port_name) self.assertIsNone(max_rate) self.assertIsNone(burst) def test_ingress_bw_limit_dpdk_port(self): port_name, _ = self.create_ovs_port( ('type', agent_const.OVS_DPDK_VHOST_USER)) self.br.update_ingress_bw_limit_for_port(port_name, 700, 70) max_rate, burst = self.br.get_ingress_bw_limit_for_dpdk_port( port_name) self.assertEqual(700, max_rate) self.assertEqual(70, burst) self.br.update_ingress_bw_limit_for_port(port_name, 750, 100) max_rate, burst = self.br.get_ingress_bw_limit_for_dpdk_port( port_name) self.assertEqual(750, max_rate) self.assertEqual(100, burst) self.br.delete_ingress_bw_limit_for_port(port_name) max_rate, burst = self.br.get_ingress_bw_limit_for_dpdk_port( port_name) self.assertIsNone(max_rate) self.assertIsNone(burst) def test_db_create_references(self): with self.ovs.ovsdb.transaction(check_error=True) as txn: queue = txn.add(self.ovs.ovsdb.db_create("Queue", other_config={'a': '1'})) qos = txn.add(self.ovs.ovsdb.db_create("QoS", queues={0: queue})) txn.add(self.ovs.ovsdb.db_set("Port", self.br.br_name, ('qos', qos))) def cleanup(): with self.ovs.ovsdb.transaction() as t: t.add(self.ovs.ovsdb.db_destroy("QoS", qos.result)) t.add(self.ovs.ovsdb.db_destroy("Queue", queue.result)) t.add(self.ovs.ovsdb.db_clear("Port", self.br.br_name, 'qos')) self.addCleanup(cleanup) val = self.ovs.ovsdb.db_get("Port", self.br.br_name, 'qos').execute() self.assertEqual(qos.result, val) def test_db_add_set(self): protocols = ["OpenFlow10", "OpenFlow11"] expected = self.br.initial_protocols.union(protocols) self.br.ovsdb.db_add("Bridge", self.br.br_name, "protocols", *protocols).execute(check_error=True) self.assertItemsEqual(expected, self.br.db_get_val('Bridge', self.br.br_name, "protocols")) def test_db_add_map(self): key = "testdata" data = {key: "testvalue"} self.br.ovsdb.db_add("Bridge", self.br.br_name, "external_ids", data).execute(check_error=True) self.assertEqual(data, self.br.db_get_val('Bridge', self.br.br_name, 'external_ids')) self.br.ovsdb.db_add("Bridge", self.br.br_name, "external_ids", {key: "newdata"}).execute(check_error=True) self.assertEqual(data, self.br.db_get_val('Bridge', self.br.br_name, 'external_ids')) def test_db_add_map_multiple_one_dict(self): data = {"one": "1", "two": "2", "three": "3"} self.br.ovsdb.db_add("Bridge", self.br.br_name, "external_ids", data).execute(check_error=True) self.assertEqual(data, self.br.db_get_val('Bridge', self.br.br_name, 'external_ids')) def test_db_add_map_multiple_dicts(self): data = ({"one": "1"}, {"two": "2"}, {"three": "3"}) self.br.ovsdb.db_add("Bridge", self.br.br_name, "external_ids", *data).execute(check_error=True) combined = {k: v for a in data for k, v in a.items()} self.assertEqual(combined, self.br.db_get_val('Bridge', self.br.br_name, 'external_ids')) def test_db_add_ref(self): ovsdb = self.ovs.ovsdb brname = utils.get_rand_name(prefix=net_helpers.BR_PREFIX) br = ovs_lib.OVSBridge(brname) # doesn't create self.addCleanup(br.destroy) with ovsdb.transaction(check_error=True) as txn: br = txn.add(ovsdb.db_create('Bridge', name=brname)) txn.add(ovsdb.db_add('Open_vSwitch', '.', 'bridges', br)) self.assertIn(brname, self.ovs.get_bridges()) def test_db_add_to_new_object(self): ovsdb = self.ovs.ovsdb brname = utils.get_rand_name(prefix=net_helpers.BR_PREFIX) br = ovs_lib.OVSBridge(brname) # doesn't create self.addCleanup(br.destroy) with ovsdb.transaction(check_error=True) as txn: txn.add(ovsdb.add_br(brname)) txn.add(ovsdb.db_add('Bridge', brname, 'protocols', 'OpenFlow10')) def test_cascading_del_in_txn(self): ovsdb = self.ovs.ovsdb port_name, _ = self.create_ovs_port() def del_port_mod_iface(): with ovsdb.transaction(check_error=True) as txn: txn.add(ovsdb.del_port(port_name, self.br.br_name, if_exists=False)) txn.add(ovsdb.db_set('Interface', port_name, ('type', 'internal'))) self.assertRaises((RuntimeError, idlutils.RowNotFound), del_port_mod_iface) def test_delete_flows_all(self): self.br.add_flow(in_port=1, actions="output:2") self.br.delete_flows(cookie=ovs_lib.COOKIE_ANY) self.assertEqual([], self.br.dump_all_flows()) def test_delete_flows_strict(self): self.br.delete_flows(cookie=ovs_lib.COOKIE_ANY) # remove NORMAL action self.br.add_flow(in_port=1, actions="output:2") self.br.add_flow(in_port=1, priority=100, actions="output:3") self.assertEqual(2, len(self.br.dump_all_flows())) self.br.delete_flows(in_port=1, priority=100, strict=True) self.assertEqual(1, len(self.br.dump_all_flows())) class OVSLibTestCase(base.BaseOVSLinuxTestCase): def setUp(self): super(OVSLibTestCase, self).setUp() self.ovs = ovs_lib.BaseOVS() def test_add_manager_appends(self): port1 = self.useFixture(port.ExclusivePort(const.PROTO_NAME_TCP, start=net_helpers.OVS_MANAGER_TEST_PORT_FIRST, end=net_helpers.OVS_MANAGER_TEST_PORT_LAST)).port port2 = self.useFixture(port.ExclusivePort(const.PROTO_NAME_TCP, start=net_helpers.OVS_MANAGER_TEST_PORT_FIRST, end=net_helpers.OVS_MANAGER_TEST_PORT_LAST)).port manager_list = ["ptcp:%s:127.0.0.1" % port1, "ptcp:%s:127.0.0.1" % port2] # Verify that add_manager does not override the existing manager expected_manager_list = list() for conn_uri in manager_list: self.ovs.add_manager(conn_uri) self.addCleanup(self.ovs.remove_manager, conn_uri) self.assertIn(conn_uri, self.ovs.get_manager()) expected_manager_list.append(conn_uri) # Verify that switch is configured with both the managers for manager_uri in expected_manager_list: self.assertIn(manager_uri, manager_list) def test_add_manager_lifecycle_baseovs(self): port1 = self.useFixture(port.ExclusivePort(const.PROTO_NAME_TCP, start=net_helpers.OVS_MANAGER_TEST_PORT_FIRST, end=net_helpers.OVS_MANAGER_TEST_PORT_LAST)).port conn_uri = "ptcp:%s:127.0.0.1" % port1 self.addCleanup(self.ovs.remove_manager, conn_uri) self.ovs.add_manager(conn_uri) self.assertIn(conn_uri, self.ovs.get_manager()) self.assertEqual(self.ovs.db_get_val('Manager', conn_uri, 'inactivity_probe'), self.ovs.ovsdb_timeout * 1000) self.ovs.remove_manager(conn_uri) self.assertNotIn(conn_uri, self.ovs.get_manager()) def test_bridge_lifecycle_baseovs(self): name = utils.get_rand_name(prefix=net_helpers.BR_PREFIX) self.addCleanup(self.ovs.delete_bridge, name) br = self.ovs.add_bridge(name) self.assertEqual(br.br_name, name) self.assertTrue(self.ovs.bridge_exists(name)) self.ovs.delete_bridge(name) self.assertFalse(self.ovs.bridge_exists(name)) def test_get_bridges(self): bridges = { self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name for i in range(5)} self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges)) def test_bridge_lifecycle_ovsbridge(self): name = utils.get_rand_name(prefix=net_helpers.BR_PREFIX) mac_table_size = 12345 cfg.CONF.set_override( 'bridge_mac_table_size', mac_table_size, group='OVS') br = ovs_lib.OVSBridge(name) self.assertEqual(br.br_name, name) # Make sure that instantiating an OVSBridge does not actually create self.assertFalse(self.ovs.bridge_exists(name)) self.addCleanup(self.ovs.delete_bridge, name) br.create() self.assertTrue(self.ovs.bridge_exists(name)) br_other_config = self.ovs.ovsdb.db_find( 'Bridge', ('name', '=', name), columns=['other_config'] ).execute()[0]['other_config'] self.assertEqual(str(mac_table_size), br_other_config['mac-table-size']) br.destroy() self.assertFalse(self.ovs.bridge_exists(name)) def test_db_find_column_type_list(self): """Fixate output for vsctl/native ovsdb_interface. Makes sure that db_find search queries give the same result for both implementations. """ bridge_name = utils.get_rand_name(prefix=net_helpers.BR_PREFIX) self.addCleanup(self.ovs.delete_bridge, bridge_name) br = self.ovs.add_bridge(bridge_name) port_name = utils.get_rand_name(prefix=net_helpers.PORT_PREFIX) br.add_port(port_name) self.ovs.set_db_attribute('Port', port_name, 'tag', 42) # wrap list/find in transaction so we get a single isolated snapshot with self.ovs.ovsdb.transaction(check_error=True) as txn: tags = txn.add(self.ovs.ovsdb.db_list('Port', columns=['tag'])) len_0_list = txn.add(self.ovs.ovsdb.db_find( 'Port', ('tag', '!=', []), columns=['tag'])) single_value = txn.add(self.ovs.ovsdb.db_find( 'Port', ('tag', '=', 42), columns=['tag'])) # Make sure that there is data to query. # It should be, but let's be a little paranoid here as otherwise # the test has no sense tags_present = [t for t in tags.result if t['tag'] != []] self.assertTrue(tags_present) tags_42 = [t for t in tags_present if t['tag'] == 42] self.assertEqual(tags_42, single_value.result) self.assertItemsEqual(len_0_list.result, tags_present) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.3990455 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/windows/0000755000175000017500000000000000000000000024731 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/windows/__init__.py0000644000175000017500000000000000000000000027030 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/agent/windows/test_ip_lib.py0000644000175000017500000000322400000000000027601 0ustar00coreycorey00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.windows import ip_lib from neutron.tests.functional import base WRONG_IP = '0.0.0.0' TEST_IP = '127.0.0.1' TEST_MAC = '00:00:00:00:00:00' class IpLibTestCase(base.BaseLoggingTestCase): def test_ipwrapper_get_device_by_ip_None(self): self.assertIsNone(ip_lib.IPWrapper().get_device_by_ip(WRONG_IP)) def test_ipwrapper_get_device_by_ip(self): ip_dev = ip_lib.IPWrapper().get_device_by_ip(TEST_IP) self.assertEqual('lo', ip_dev.name) def test_device_has_ip(self): not_a_device = ip_lib.IPDevice('#!#._not_a_device_bleargh!!@@@') self.assertFalse(not_a_device.device_has_ip(TEST_IP)) def test_ip_link_read_mac_address(self): ip_dev = ip_lib.IPWrapper().get_device_by_ip(TEST_IP) self.assertEqual([TEST_MAC], ip_lib.IPLink(ip_dev).address) def test_ip_link_read_mac_address_wrong(self): not_a_device = ip_lib.IPDevice('#!#._not_a_device_bleargh!!@@@') mac_addr = ip_lib.IPLink(not_a_device).address self.assertFalse(mac_addr) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/api/0000755000175000017500000000000000000000000022712 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/api/__init__.py0000644000175000017500000000000000000000000025011 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/api/test_policies.py0000644000175000017500000000765600000000000026150 0ustar00coreycorey00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from neutron_lib.api import attributes from neutron_lib import context from neutron_lib import fixture from neutron.api import extensions from neutron.conf import policies from neutron import policy from neutron.tests.functional import base TEST_PATH = os.path.dirname(os.path.abspath(__file__)) class APIPolicyTestCase(base.BaseLoggingTestCase): """Base class for API policy tests Tests for REST API policy checks. Ideally this would be done against an environment with an instantiated plugin, but there appears to be problems with instantiating a plugin against an sqlite environment and as yet, there is no precedent for running a functional test against an actual database backend. """ api_version = "2.0" def setUp(self): super(APIPolicyTestCase, self).setUp() self.useFixture(fixture.APIDefinitionFixture()) self.extension_path = os.path.abspath(os.path.join( TEST_PATH, "../../../extensions")) self.addCleanup(policy.reset) def _network_definition(self): return {'name': 'test_network', 'ports': [], 'subnets': [], 'status': 'up', 'admin_state_up': True, 'shared': False, 'tenant_id': 'admin', 'id': 'test_network', 'router:external': True} def _check_external_router_policy(self, context): return policy.check(context, 'get_network', self._network_definition()) def test_premature_loading(self): """Test premature policy loading Verifies that loading policies by way of admin context before populating extensions and extending the resource map results in networks with router:external is true being invisible to regular tenants. """ extension_manager = extensions.ExtensionManager(self.extension_path) admin_context = context.get_admin_context() tenant_context = context.Context('test_user', 'test_tenant_id', False) extension_manager.extend_resources(self.api_version, attributes.RESOURCES) self.assertTrue(self._check_external_router_policy(admin_context)) self.assertFalse(self._check_external_router_policy(tenant_context)) def test_proper_load_order(self): """Test proper policy load order Verifies that loading policies by way of admin context after populating extensions and extending the resource map results in networks with router:external are visible to regular tenants. """ policy.reset() extension_manager = extensions.ExtensionManager(self.extension_path) extension_manager.extend_resources(self.api_version, attributes.RESOURCES) # TODO(amotoki): Consider this should be part of # neutron.policy.reset (or refresh), but as of now # this is only required for unit testing. policies.reload_default_policies() policy.init() admin_context = context.get_admin_context() tenant_context = context.Context('test_user', 'test_tenant_id', False) self.assertTrue(self._check_external_router_policy(admin_context)) self.assertTrue(self._check_external_router_policy(tenant_context)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/base.py0000644000175000017500000003432000000000000023427 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import errno import os import shutil import warnings import fixtures import mock from neutron_lib import fixture from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import exception as os_db_exc from oslo_db.sqlalchemy import provision from oslo_log import log from oslo_utils import uuidutils from neutron.agent.linux import utils from neutron.api import extensions as exts from neutron.conf.agent import common as config from neutron.conf.agent import ovs_conf from neutron.conf.plugins.ml2 import config as ml2_config from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf # Load all the models to register them into SQLAlchemy metadata before using # the SqlFixture from neutron.db import models # noqa from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import worker from neutron.plugins.ml2.drivers import type_geneve # noqa from neutron.tests import base from neutron.tests.common import base as common_base from neutron.tests.common import helpers from neutron.tests.functional.resources import process from neutron.tests.unit.plugins.ml2 import test_plugin LOG = log.getLogger(__name__) # This is the directory from which infra fetches log files for functional tests DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(), 'dsvm-functional-logs') SQL_FIXTURE_LOCK = 'sql_fixture_lock' def config_decorator(method_to_decorate, config_tuples): def wrapper(*args, **kwargs): method_to_decorate(*args, **kwargs) for config_tuple in config_tuples: cfg.CONF.set_override(*config_tuple) return wrapper class BaseLoggingTestCase(base.BaseTestCase): def setUp(self): super(BaseLoggingTestCase, self).setUp() # NOTE(slaweq): Because of issue with stestr and Python3, we need # to avoid too much output to be produced during tests, so we will # ignore python warnings here warnings.simplefilter("ignore") base.setup_test_logging( cfg.CONF, DEFAULT_LOG_DIR, "%s.txt" % self.id()) cfg.CONF.set_override('use_helper_for_ns_read', False, group='AGENT') class BaseSudoTestCase(BaseLoggingTestCase): """Base class for tests requiring invocation of commands via a root helper. This class skips (during setUp) its tests unless sudo is enabled, ie: OS_SUDO_TESTING is set to '1' or 'True' in the test execution environment. This is intended to allow developers to run the functional suite (e.g. tox -e functional) without test failures if sudo invocations are not allowed. Running sudo tests in the upstream gate jobs (*-neutron-dsvm-functional) requires the additional step of setting OS_ROOTWRAP_CMD to the rootwrap command configured by devstack, e.g. sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf Gate jobs do not allow invocations of sudo without rootwrap to ensure that rootwrap configuration gets as much testing as possible. """ def setUp(self): super(BaseSudoTestCase, self).setUp() if not base.bool_from_env('OS_SUDO_TESTING'): self.skipTest('Testing with sudo is not enabled') self.setup_rootwrap() config.setup_privsep() self._override_default_config() @common_base.no_skip_on_missing_deps def check_command(self, cmd, error_text, skip_msg, run_as_root=False): try: utils.execute(cmd, run_as_root=run_as_root) except RuntimeError as e: if error_text in str(e): self.skipTest(skip_msg) raise @staticmethod def _override_default_config(): # NOTE(ralonsoh): once https://review.opendev.org/#/c/641681/ is # merged, we should increase the default value of those new parameters. ovs_agent_opts = [('ovsdb_timeout', 30, 'OVS'), ('ovsdb_debug', True, 'OVS'), ] ovs_agent_decorator = config_decorator( ovs_conf.register_ovs_agent_opts, ovs_agent_opts) mock.patch.object(ovs_conf, 'register_ovs_agent_opts', new=ovs_agent_decorator).start() class OVNSqlFixture(fixture.StaticSqlFixture): @classmethod @lockutils.synchronized(SQL_FIXTURE_LOCK) def _init_resources(cls): cls.schema_resource = provision.SchemaResource( provision.DatabaseResource("sqlite"), cls._generate_schema, teardown=False) dependency_resources = {} for name, resource in cls.schema_resource.resources: dependency_resources[name] = resource.getResource() cls.schema_resource.make(dependency_resources) cls.engine = dependency_resources['database'].engine def _delete_from_schema(self, engine): try: super(OVNSqlFixture, self)._delete_from_schema(engine) except os_db_exc.DBNonExistentTable: pass class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase, BaseLoggingTestCase): OVS_DISTRIBUTION = 'openvswitch' OVN_DISTRIBUTION = 'ovn' OVN_SCHEMA_FILES = ['ovn-nb.ovsschema', 'ovn-sb.ovsschema'] _mechanism_drivers = ['logger', 'ovn'] _extension_drivers = ['port_security'] _counter = 0 l3_plugin = 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin' def setUp(self, maintenance_worker=False): ml2_config.cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') ml2_config.cfg.CONF.set_override('tenant_network_types', ['geneve'], group='ml2') ml2_config.cfg.CONF.set_override('vni_ranges', ['1:65536'], group='ml2_type_geneve') ovn_conf.cfg.CONF.set_override('dns_servers', ['10.10.10.10'], group='ovn') self.addCleanup(exts.PluginAwareExtensionManager.clear_instance) super(TestOVNFunctionalBase, self).setUp() self.test_log_dir = os.path.join(DEFAULT_LOG_DIR, self.id()) base.setup_test_logging( cfg.CONF, self.test_log_dir, "testrun.txt") mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj self.l3_plugin = directory.get_plugin(constants.L3) self.ovsdb_server_mgr = None self.ovn_northd_mgr = None self.maintenance_worker = maintenance_worker self.temp_dir = self.useFixture(fixtures.TempDir()).path self._start_ovsdb_server_and_idls() self._start_ovn_northd() def _get_install_share_path(self): lookup_paths = set() for installation in ['local', '']: for distribution in [self.OVN_DISTRIBUTION, self.OVS_DISTRIBUTION]: exists = True for ovn_file in self.OVN_SCHEMA_FILES: path = os.path.join(os.path.sep, 'usr', installation, 'share', distribution, ovn_file) exists &= os.path.isfile(path) lookup_paths.add(os.path.dirname(path)) if exists: return os.path.dirname(path) msg = 'Either ovn-nb.ovsschema and/or ovn-sb.ovsschema are missing. ' msg += 'Looked for schemas in paths:' + ', '.join(sorted(lookup_paths)) raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), msg) # FIXME(lucasagomes): Workaround for # https://bugs.launchpad.net/networking-ovn/+bug/1808146. We should # investigate and properly fix the problem. This method is just a # workaround to alleviate the gate for now and should not be considered # a proper fix. def _setup_database_fixtures(self): fixture = OVNSqlFixture() self.useFixture(fixture) self.engine = fixture.engine def get_additional_service_plugins(self): p = super(TestOVNFunctionalBase, self).get_additional_service_plugins() p.update({'revision_plugin_name': 'revisions'}) return p @property def _ovsdb_protocol(self): return self.get_ovsdb_server_protocol() def get_ovsdb_server_protocol(self): return 'unix' def _start_ovn_northd(self): if not self.ovsdb_server_mgr: return ovn_nb_db = self.ovsdb_server_mgr.get_ovsdb_connection_path('nb') ovn_sb_db = self.ovsdb_server_mgr.get_ovsdb_connection_path('sb') self.ovn_northd_mgr = self.useFixture( process.OvnNorthd(self.temp_dir, ovn_nb_db, ovn_sb_db, protocol=self._ovsdb_protocol)) def _start_ovsdb_server_and_idls(self): # Start 2 ovsdb-servers one each for OVN NB DB and OVN SB DB # ovsdb-server with OVN SB DB can be used to test the chassis up/down # events. install_share_path = self._get_install_share_path() self.ovsdb_server_mgr = self.useFixture( process.OvsdbServer(self.temp_dir, install_share_path, ovn_nb_db=True, ovn_sb_db=True, protocol=self._ovsdb_protocol)) set_cfg = cfg.CONF.set_override set_cfg('ovn_nb_connection', self.ovsdb_server_mgr.get_ovsdb_connection_path(), 'ovn') set_cfg('ovn_sb_connection', self.ovsdb_server_mgr.get_ovsdb_connection_path( db_type='sb'), 'ovn') set_cfg('ovn_nb_private_key', self.ovsdb_server_mgr.private_key, 'ovn') set_cfg('ovn_nb_certificate', self.ovsdb_server_mgr.certificate, 'ovn') set_cfg('ovn_nb_ca_cert', self.ovsdb_server_mgr.ca_cert, 'ovn') set_cfg('ovn_sb_private_key', self.ovsdb_server_mgr.private_key, 'ovn') set_cfg('ovn_sb_certificate', self.ovsdb_server_mgr.certificate, 'ovn') set_cfg('ovn_sb_ca_cert', self.ovsdb_server_mgr.ca_cert, 'ovn') # NOTE(mjozefcz): We can find occasional functional test # failures because of low timeout value - set it to 15 # seconds, should be enought. More info: 1868110 cfg.CONF.set_override( 'ovsdb_connection_timeout', 15, 'ovn') class TriggerCls(mock.MagicMock): def trigger(self): pass trigger_cls = TriggerCls() if self.maintenance_worker: trigger_cls.trigger.__self__.__class__ = worker.MaintenanceWorker cfg.CONF.set_override('neutron_sync_mode', 'off', 'ovn') self.addCleanup(self._collect_processes_logs) self.addCleanup(self.stop) # mech_driver.post_fork_initialize creates the IDL connections self.mech_driver.post_fork_initialize( mock.ANY, mock.ANY, trigger_cls.trigger) self.nb_api = self.mech_driver._nb_ovn self.sb_api = self.mech_driver._sb_ovn def _collect_processes_logs(self): for database in ("nb", "sb"): for file_suffix in ("log", "db"): src_filename = "ovn_%(db)s.%(suffix)s" % { 'db': database, 'suffix': file_suffix } dst_filename = "ovn_%(db)s-%(timestamp)s.%(suffix)s" % { 'db': database, 'suffix': file_suffix, 'timestamp': datetime.now().strftime('%y-%m-%d_%H-%M-%S'), } filepath = os.path.join(self.temp_dir, src_filename) shutil.copyfile( filepath, os.path.join(self.test_log_dir, dst_filename)) def stop(self): if self.maintenance_worker: self.mech_driver.nb_synchronizer.stop() self.mech_driver.sb_synchronizer.stop() self.mech_driver._nb_ovn.ovsdb_connection.stop() self.mech_driver._sb_ovn.ovsdb_connection.stop() def restart(self): self.stop() if self.ovsdb_server_mgr: self.ovsdb_server_mgr.stop() if self.ovn_northd_mgr: self.ovn_northd_mgr.stop() self.ovsdb_server_mgr.delete_dbs() self._start_ovsdb_server_and_idls() self._start_ovn_northd() def add_fake_chassis(self, host, physical_nets=None, external_ids=None, name=None): physical_nets = physical_nets or [] external_ids = external_ids or {} bridge_mapping = ",".join(["%s:br-provider%s" % (phys_net, i) for i, phys_net in enumerate(physical_nets)]) if name is None: name = uuidutils.generate_uuid() external_ids['ovn-bridge-mappings'] = bridge_mapping # We'll be using different IP addresses every time for the Encap of # the fake chassis as the SB schema doesn't allow to have two entries # with same (ip,type) pairs as of OVS 2.11. This shouldn't have any # impact as the tunnels won't get created anyways since ovn-controller # is not running. Ideally we shouldn't be creating more than 255 # fake chassis but from the SB db point of view, 'ip' column can be # any string so we could add entries with ip='172.24.4.1000'. self._counter += 1 self.sb_api.chassis_add( name, ['geneve'], '172.24.4.%d' % self._counter, external_ids=external_ids, hostname=host).execute(check_error=True) return name def del_fake_chassis(self, chassis, if_exists=True): self.sb_api.chassis_del( chassis, if_exists=if_exists).execute(check_error=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/0000755000175000017500000000000000000000000022704 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/__init__.py0000644000175000017500000000000000000000000025003 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/process_spawn.py0000644000175000017500000001365700000000000026160 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import signal import socket import sys import time from neutron_lib import constants as n_const from oslo_config import cfg from neutron.agent.linux import daemon UNIX_FAMILY = 'UNIX' OPTS = [ cfg.IntOpt('num_children', short='n', default=0, help='Number of children to spawn', required=False), cfg.StrOpt('family', short='f', default=n_const.IPv4, choices=[n_const.IPv4, n_const.IPv6, UNIX_FAMILY], help='Listen socket family (%(v4)s, %(v6)s or %(unix)s)' % { 'v4': n_const.IPv4, 'v6': n_const.IPv6, 'unix': UNIX_FAMILY }, required=False), cfg.StrOpt('proto', short='p', default=n_const.PROTO_NAME_TCP, choices=[n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP], help='Protocol (%(tcp)s or %(udp)s)' % { 'tcp': n_const.PROTO_NAME_TCP, 'udp': n_const.PROTO_NAME_UDP }, required=False), cfg.BoolOpt('parent_listen', short='pl', default=True, help='Parent process must listen too', required=False), cfg.BoolOpt('ignore_sigterm', short='i', default=False, help='Ignore SIGTERM', required=False) ] class ProcessSpawn(daemon.Daemon): """This class is part of the functional test of the netns_cleanup module. It allows spawning processes that listen on random ports either on tcp(6), udp(6) or unix sockets. Also it allows handling or ignoring SIGTERM to check whether the cleanup works as expected by getting rid of the spawned processes. """ MAX_BIND_RETRIES = 64 DCT_FAMILY = { n_const.IPv4: socket.AF_INET, n_const.IPv6: socket.AF_INET6, UNIX_FAMILY: socket.AF_UNIX } DCT_PROTO = { n_const.PROTO_NAME_TCP: socket.SOCK_STREAM, n_const.PROTO_NAME_UDP: socket.SOCK_DGRAM, } def __init__(self, pidfile=None, family=n_const.IPv4, proto=n_const.PROTO_NAME_TCP, ignore_sigterm=False, num_children=0, parent_must_listen=True): self.family = family self.proto = proto self.ignore_sigterm = ignore_sigterm self.num_children = num_children self.listen_socket = None self.parent_must_listen = parent_must_listen self.child_pids = [] super(ProcessSpawn, self).__init__(pidfile) def start_listening(self): socket_family = self.DCT_FAMILY[self.family] socket_type = self.DCT_PROTO[self.proto] self.listen_socket = socket.socket(socket_family, socket_type) # Set a different seed per process to increase randomness random.seed(os.getpid()) # Try to listen in a random port which is not currently in use retries = 0 while retries < ProcessSpawn.MAX_BIND_RETRIES: # NOTE(dalvarez): not finding a free port on a freshly created # namespace is very unlikely but if problems show up, retries can # be increased to avoid tests failing try: if self.family == UNIX_FAMILY: self.listen_socket.bind('') else: # Pick a non privileged port port = random.randint(1024, 65535) self.listen_socket.bind(('', port)) except socket.error: retries += 1 else: if n_const.PROTO_NAME_TCP in self.proto: self.listen_socket.listen(0) break def do_sleep(self): while True: time.sleep(10) def run(self): # Spawn as many children as requested children = [] while len(children) != self.num_children: child_pid = os.fork() if child_pid == 0: # Listen and do nothing else self.start_listening() self.do_sleep() return children.append(child_pid) # Install a SIGTERM handler if requested handler = ( signal.SIG_IGN if self.ignore_sigterm else self.sigterm_handler) signal.signal(signal.SIGTERM, handler) self.child_pids = children if self.parent_must_listen: self.start_listening() self.do_sleep() def sigterm_handler(self, signum, frame): if self.listen_socket: self.listen_socket.close() for child in self.child_pids: try: os.kill(child, signal.SIGTERM) except OSError: pass sys.exit(0) def main(): cfg.CONF.register_cli_opts(OPTS) cfg.CONF(project='neutron', default_config_files=[]) proc_spawn = ProcessSpawn(num_children=cfg.CONF.num_children, family=cfg.CONF.family, proto=cfg.CONF.proto, parent_must_listen=cfg.CONF.parent_listen, ignore_sigterm=cfg.CONF.ignore_sigterm) proc_spawn.start() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/test_ipset_cleanup.py0000644000175000017500000000233100000000000027147 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.linux import ipset_manager from neutron.cmd import ipset_cleanup from neutron.conf.agent import cmd from neutron.tests.functional import base class TestIPSetCLIConfig(base.BaseLoggingTestCase): def setup_config(self, args=None): self.conf = ipset_cleanup.setup_conf() super(TestIPSetCLIConfig, self).setup_config(args=args) def test_ipset_opts_registration(self): self.assertFalse(self.conf.allsets) self.assertFalse(self.conf.force) self.assertEqual(ipset_manager.NET_PREFIX, self.conf.prefix) # to unregister opts self.conf.reset() self.conf.unregister_opts(cmd.ip_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/test_linuxbridge_cleanup.py0000644000175000017500000000730600000000000030346 0ustar00coreycorey00000000000000# Copyright (c) 2015 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from neutron_lib import constants from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as lb_agent from neutron.tests import base as tests_base from neutron.tests.common import config_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional import base from neutron.tests import tools class LinuxbridgeCleanupTest(base.BaseSudoTestCase): def _test_linuxbridge_cleanup(self, bridge_exists, callback): br_fixture = self.useFixture( tools.SafeCleanupFixture( net_helpers.LinuxBridgeFixture( prefix=lb_agent.BRIDGE_NAME_PREFIX))).fixture config = callback(br_fixture) # NOTE(slaweq): use of oslo.privsep inside neutron-linuxbridge-cleanup # script requires rootwrap helper to be configured in this script's # config config.update({ 'AGENT': { 'root_helper': tests_base.get_rootwrap_cmd(), 'root_helper_daemon': tests_base.get_rootwrap_daemon_cmd() } }) config.update({'VXLAN': {'enable_vxlan': 'False'}}) temp_dir = self.useFixture(fixtures.TempDir()).path conf = self.useFixture(config_fixtures.ConfigFileFixture( base_filename='neutron.conf', config=config, temp_dir=temp_dir)) cmd = 'neutron-linuxbridge-cleanup', '--config-file', conf.filename ip_wrapper = ip_lib.IPWrapper(br_fixture.namespace) ip_wrapper.netns.execute(cmd) self.assertEqual(bridge_exists, ip_lib.device_exists( br_fixture.bridge.name, br_fixture.namespace)) def test_cleanup_empty_bridge(self): def callback(br_fixture): return config_fixtures.ConfigDict() self._test_linuxbridge_cleanup(False, callback) def test_no_cleanup_bridge_with_tap(self): def callback(br_fixture): # TODO(cbrandily): refactor net_helpers to avoid mocking it mock.patch.object( net_helpers, 'VETH0_PREFIX', new_callable=mock.PropertyMock( return_value=constants.TAP_DEVICE_PREFIX + '0')).start() mock.patch.object( net_helpers, 'VETH1_PREFIX', new_callable=mock.PropertyMock( return_value=constants.TAP_DEVICE_PREFIX + '1')).start() self.useFixture( tools.SafeCleanupFixture( net_helpers.LinuxBridgePortFixture( br_fixture.bridge, br_fixture.namespace))) return config_fixtures.ConfigDict() self._test_linuxbridge_cleanup(True, callback) def test_no_cleanup_bridge_in_bridge_mappings(self): def callback(br_fixture): br_name = br_fixture.bridge.name conf = config_fixtures.ConfigDict() conf.update( {'LINUX_BRIDGE': {'bridge_mappings': 'physnet:%s' % br_name}}) return conf self._test_linuxbridge_cleanup(True, callback) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/test_netns_cleanup.py0000644000175000017500000001543400000000000027162 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import eventlet import mock from neutron_lib import constants as n_const from neutron.agent.l3 import namespaces from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.cmd import netns_cleanup from neutron.common import utils as common_utils from neutron.conf.agent import cmd from neutron.tests import base as basetest from neutron.tests.common import net_helpers from neutron.tests.functional import base from neutron.tests.functional.cmd import process_spawn GET_NAMESPACES = 'neutron.agent.linux.ip_lib.list_network_namespaces' TEST_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver' NUM_SUBPROCESSES = 6 class NetnsCleanupTest(base.BaseSudoTestCase): def setUp(self): super(NetnsCleanupTest, self).setUp() self.get_namespaces_p = mock.patch(GET_NAMESPACES) self.get_namespaces = self.get_namespaces_p.start() def setup_config(self, args=None): if args is None: args = [] # force option enabled to make sure non-empty namespaces are # cleaned up and deleted args.append('--force') self.conf = netns_cleanup.setup_conf() self.conf.set_override('interface_driver', TEST_INTERFACE_DRIVER) self.config_parse(conf=self.conf, args=args) def test_cleanup_network_namespaces_cleans_dhcp_and_l3_namespaces(self): dhcp_namespace = self.useFixture( net_helpers.NamespaceFixture(dhcp.NS_PREFIX)).name l3_namespace = self.useFixture( net_helpers.NamespaceFixture(namespaces.NS_PREFIX)).name bridge = self.useFixture( net_helpers.VethPortFixture(namespace=dhcp_namespace)).bridge self.useFixture( net_helpers.VethPortFixture(bridge, l3_namespace)) # we scope the get_namespaces to our own ones not to affect other # tests, as otherwise cleanup will kill them all self.get_namespaces.return_value = [l3_namespace, dhcp_namespace] # launch processes in each namespace to make sure they're # killed during cleanup procs_launched = self._launch_processes([l3_namespace, dhcp_namespace]) self.assertIsNot(procs_launched, 0) try: common_utils.wait_until_true( lambda: self._get_num_spawned_procs() == procs_launched, timeout=15) except eventlet.Timeout: num_spawned_procs = self._get_num_spawned_procs() err_str = ("Expected number/spawned number: {0}/{1}\nProcess " "information:\n".format(num_spawned_procs, procs_launched)) cmd = ['ps', '-f', '-u', 'root'] err_str += utils.execute(cmd, run_as_root=True) raise Exception(err_str) netns_cleanup.cleanup_network_namespaces(self.conf) self.get_namespaces_p.stop() namespaces_now = ip_lib.list_network_namespaces() procs_after = self._get_num_spawned_procs() self.assertEqual(procs_after, 0) self.assertNotIn(l3_namespace, namespaces_now) self.assertNotIn(dhcp_namespace, namespaces_now) @staticmethod def _launch_processes(namespaces): """Launch processes in the specified namespaces. This function will spawn processes inside the given namespaces: - 6 processes listening on tcp ports (parent + 5 children) - 1 process + 5 subprocesses listening on unix sockets - 1 process + 5 subprocesses listening on udp6 sockets First two sets of processes will process SIGTERM so when the parent gets killed, it will kill all spawned children The last set of processes will ignore SIGTERM. This will allow us to test the cleanup functionality which will issue a SIGKILL to all remaining processes after the SIGTERM attempt """ python_exec = os.path.basename(sys.executable) commands = [[python_exec, process_spawn.__file__, '-n', NUM_SUBPROCESSES, '-f', n_const.IPv4, '-p', n_const.PROTO_NAME_TCP, '--noignore_sigterm', '--parent_listen'], [python_exec, process_spawn.__file__, '-n', NUM_SUBPROCESSES, '-f', process_spawn.UNIX_FAMILY, '-p', n_const.PROTO_NAME_TCP, '--noignore_sigterm', '--noparent_listen'], [python_exec, process_spawn.__file__, '-n', NUM_SUBPROCESSES, '-f', n_const.IPv4, '-p', n_const.PROTO_NAME_UDP, '--ignore_sigterm', '--noparent_listen']] proc_count = 0 for netns in namespaces: ip = ip_lib.IPWrapper(namespace=netns) for command in commands: # The total amount of processes per command is # the process itself plus the number of subprocesses spawned by # it proc_count += (1 + NUM_SUBPROCESSES) # We need to pass the PATH env variable so that python # interpreter runs under the same virtual environment. # Otherwise, it won't find the necessary packages such as # oslo_config ip.netns.execute(command, addl_env={'PATH': os.environ.get('PATH')}) return proc_count @staticmethod def _get_num_spawned_procs(): cmd = ['ps', '-f', '-u', 'root'] out = utils.execute(cmd, run_as_root=True) return sum([1 for line in out.splitlines() if 'process_spawn' in line]) class TestNETNSCLIConfig(basetest.BaseTestCase): def setup_config(self, args=None): self.conf = netns_cleanup.setup_conf() super(TestNETNSCLIConfig, self).setup_config(args=args) def test_netns_opts_registration(self): self.assertFalse(self.conf.force) # to unregister opts self.conf.reset() self.conf.unregister_opts(cmd.netns_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/test_ovs_cleanup.py0000644000175000017500000000517200000000000026640 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron.cmd import ovs_cleanup from neutron.common import utils from neutron.conf.agent import cmd from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base class TestOVSCLIConfig(base.BaseOVSLinuxTestCase): def setup_config(self, args=None): self.conf = ovs_cleanup.setup_conf() super(TestOVSCLIConfig, self).setup_config(args=args) def test_ovs_opts_registration(self): self.assertFalse(self.conf.ovs_all_ports) # to unregister opts self.conf.reset() self.conf.unregister_opts(cmd.ovs_opts) def test_do_main_default_options(self): int_br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override("integration_bridge", int_br.br_name, 'OVS') self.conf.set_override("ovs_all_ports", False) noskip = collections.defaultdict(list) skip = collections.defaultdict(list) # add two vifs, one skipped, and a non-vif port to int_br for collection in (noskip, skip): collection[int_br].append( self.useFixture(net_helpers.OVSPortFixture(int_br)).port.name) # set skippable vif to be skipped int_br.ovsdb.db_set( 'Interface', skip[int_br][0], ('external_ids', {constants.SKIP_CLEANUP: "True"}) ).execute(check_error=True) device_name = utils.get_rand_name() skip[int_br].append(device_name) int_br.add_port(device_name, ('type', 'internal')) # sanity check for collection in (noskip, skip): for bridge, ports in collection.items(): port_list = bridge.get_port_name_list() for port in ports: self.assertIn(port, port_list) ovs_cleanup.do_main(self.conf) ports = int_br.get_port_name_list() for vif in noskip[int_br]: self.assertNotIn(vif, ports) for port in skip[int_br]: self.assertIn(port, ports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/cmd/test_status.py0000644000175000017500000000544400000000000025647 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from oslo_upgradecheck import upgradecheck from neutron.agent.linux import utils from neutron.tests.functional import base class StatusTest(base.BaseLoggingTestCase): def test_neutron_status_cli(self): """This test runs "neutron-status upgrade check" command and check if stdout contains header "Upgrade Check Results". It also checks if stderr is empty. Example output from this CLI tool looks like: +----------------------------------------------------------------+ | Upgrade Check Results | +----------------------------------------------------------------+ | Check: Worker counts configured | | Result: Warning | | Details: The default number of workers has changed. Please see | | release notes for the new values, but it is strongly | | encouraged for deployers to manually set the values for | | api_workers and rpc_workers. | +----------------------------------------------------------------+ Error codes which migh be returned by this command: - Code.SUCCESS, - Code.WARNING, - Code.FAILURE are all accepted as we don't want to test here if there are any potential problems with upgrade or all is fine. This depends on deployment's configuration. """ expected_result_title = "Upgrade Check Results" try: stdout, stderr = utils.execute( cmd=["neutron-status", "upgrade", "check"], extra_ok_codes=[upgradecheck.Code.SUCCESS, upgradecheck.Code.WARNING, upgradecheck.Code.FAILURE], return_stderr=True) self.assertEqual('', stderr) self.assertTrue(expected_result_title in stdout) except exceptions.ProcessExecutionError as error: self.fail("neutron-status upgrade check command failed to run. " "Error: %s" % error) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/common/0000755000175000017500000000000000000000000023431 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/common/__init__.py0000644000175000017500000000000000000000000025530 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/common/test_utils.py0000644000175000017500000000173000000000000026203 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from neutron.common import utils from neutron.tests.functional import base class TestWaitUntilTrue(base.BaseLoggingTestCase): def test_wait_until_true_predicate_succeeds(self): utils.wait_until_true(lambda: True) def test_wait_until_true_predicate_fails(self): with testtools.ExpectedException(utils.WaitTimeout): utils.wait_until_true(lambda: False, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/constants.py0000644000175000017500000000111700000000000024527 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VLAN_COUNT = 4096 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/0000755000175000017500000000000000000000000022526 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/__init__.py0000644000175000017500000000000000000000000024625 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/0000755000175000017500000000000000000000000024702 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/__init__.py0000644000175000017500000000000000000000000027001 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_2e0d7a8a1586_add_binding_index_to_routerl3agentbinding.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_2e0d7a8a1586_add_binding_index0000644000175000017500000000726000000000000033121 0ustar00coreycorey00000000000000# Copyright 2016 Business Cat is Very Serious 13.37 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations class HARouterPortMigrationMixin(object): """Validates binding_index for RouterL3AgentBinding migration.""" def _create_so(self, o_type, values): """create standard attr object.""" stan = db_utils.get_table(self.engine, 'standardattributes') # find next available id taking into account existing records rec_ids = [r.id for r in self.engine.execute(stan.select()).fetchall()] next_id = max([0] + rec_ids) + 1 self.engine.execute(stan.insert().values({'id': next_id, 'resource_type': o_type})) values['standard_attr_id'] = next_id return self._create_rec(o_type, values) def _create_rec(self, o_type, values): otable = db_utils.get_table(self.engine, o_type) self.engine.execute(otable.insert().values(values)) def _make_router_agents_and_bindings(self, router_id): self._create_so('routers', {'id': router_id}) # each router gets a couple of agents for _ in range(2): agent_id = uuidutils.generate_uuid() timestamp = '2000-04-06T14:34:23' self._create_rec('agents', {'id': agent_id, 'topic': 'x', 'agent_type': 'L3', 'binary': 'x', 'host': agent_id, 'created_at': timestamp, 'started_at': timestamp, 'heartbeat_timestamp': timestamp, 'configurations': ''}) self._create_rec('routerl3agentbindings', {'router_id': router_id, 'l3_agent_id': agent_id}) def _create_ha_routers(self, engine): for rid in [uuidutils.generate_uuid() for i in range(10)]: self._make_router_agents_and_bindings(rid) def _pre_upgrade_2e0d7a8a1586(self, engine): self._create_ha_routers(engine) return True # return True so check function is invoked after migrate def _check_2e0d7a8a1586(self, engine, data): bindings_table = db_utils.get_table(engine, 'routerl3agentbindings') rows = engine.execute(bindings_table.select()).fetchall() routers_to_bindings = collections.defaultdict(list) for router_id, agent_id, binding_index in rows: routers_to_bindings[router_id].append(binding_index) for binding_indices in routers_to_bindings.values(): self.assertEqual(list(range(1, 3)), sorted(binding_indices)) class TestHARouterPortMigrationMysql(HARouterPortMigrationMixin, test_migrations.TestWalkMigrationsMysql): pass class TestHARouterPortMigrationPsql(HARouterPortMigrationMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_plugga0000644000175000017500000001457500000000000033207 0ustar00coreycorey00000000000000# Copyright 2016 Infoblox Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations class MigrationToPluggableIpamMixin(object): """Validates data migration to Pluggable IPAM.""" _standard_attribute_id = 0 def _gen_attr_id(self, engine, type): self._standard_attribute_id += 1 standardattributes = db_utils.get_table(engine, 'standardattributes') engine.execute(standardattributes.insert().values({ 'id': self._standard_attribute_id, 'resource_type': type})) return self._standard_attribute_id def _create_subnets(self, engine, data): """Create subnets and saves subnet id in data""" networks = db_utils.get_table(engine, 'networks') subnets = db_utils.get_table(engine, 'subnets') pools = db_utils.get_table(engine, 'ipallocationpools') allocations = db_utils.get_table(engine, 'ipallocations') for cidr in data: ip_version = (constants.IP_VERSION_6 if ':' in cidr else constants.IP_VERSION_4) # Save generated id in incoming dict to simplify validations network_id = uuidutils.generate_uuid() network_dict = dict( id=network_id, standard_attr_id=self._gen_attr_id(engine, 'networks')) engine.execute(networks.insert().values(network_dict)) data[cidr]['id'] = uuidutils.generate_uuid() subnet_dict = dict(id=data[cidr]['id'], cidr=cidr, ip_version=ip_version, standard_attr_id=self._gen_attr_id(engine, 'subnets'), network_id=network_id) engine.execute(subnets.insert().values(subnet_dict)) if data[cidr].get('pools'): for pool in data[cidr]['pools']: pool_dict = dict(id=uuidutils.generate_uuid(), first_ip=pool['first_ip'], last_ip=pool['last_ip'], subnet_id=data[cidr]['id']) engine.execute(pools.insert().values(pool_dict)) if data[cidr].get('allocations'): for ip in data[cidr]['allocations']: ip_dict = dict(ip_address=ip, subnet_id=data[cidr]['id'], network_id=network_id) engine.execute(allocations.insert().values(ip_dict)) def _pre_upgrade_3b935b28e7a0(self, engine): data = { '172.23.0.0/16': { 'pools': [{'first_ip': '172.23.0.2', 'last_ip': '172.23.255.254'}], 'allocations': ('172.23.0.2', '172.23.245.2')}, '192.168.40.0/24': { 'pools': [{'first_ip': '192.168.40.2', 'last_ip': '192.168.40.100'}, {'first_ip': '192.168.40.105', 'last_ip': '192.168.40.150'}, {'first_ip': '192.168.40.155', 'last_ip': '192.168.40.157'}, ], 'allocations': ('192.168.40.2', '192.168.40.3', '192.168.40.15', '192.168.40.60')}, 'fafc:babc::/64': { 'pools': [{'first_ip': 'fafc:babc::2', 'last_ip': 'fafc:babc::6:fe00', }], 'allocations': ('fafc:babc::3',)}} self._create_subnets(engine, data) return data def _check_3b935b28e7a0(self, engine, data): subnets = db_utils.get_table(engine, 'ipamsubnets') pools = db_utils.get_table(engine, 'ipamallocationpools') allocations = db_utils.get_table(engine, 'ipamallocations') ipam_subnets = engine.execute(subnets.select()).fetchall() # Count of ipam subnets should match count of usual subnets self.assertEqual(len(data), len(ipam_subnets)) neutron_to_ipam_id = {subnet.neutron_subnet_id: subnet.id for subnet in ipam_subnets} for cidr in data: self.assertIn(data[cidr]['id'], neutron_to_ipam_id) ipam_subnet_id = neutron_to_ipam_id[data[cidr]['id']] # Validate ip allocations are migrated correctly ipam_allocations = engine.execute(allocations.select().where( allocations.c.ipam_subnet_id == ipam_subnet_id)).fetchall() for ipam_allocation in ipam_allocations: self.assertIn(ipam_allocation.ip_address, data[cidr]['allocations']) self.assertEqual(len(data[cidr]['allocations']), len(ipam_allocations)) # Validate allocation pools are migrated correctly ipam_pools = engine.execute(pools.select().where( pools.c.ipam_subnet_id == ipam_subnet_id)).fetchall() # Covert to dict for easier lookup pool_dict = {pool.first_ip: pool.last_ip for pool in ipam_pools} for p in data[cidr]['pools']: self.assertIn(p['first_ip'], pool_dict) self.assertEqual(p['last_ip'], pool_dict[p['first_ip']]) self.assertEqual(len(data[cidr]['pools']), len(ipam_pools)) class TestMigrationToPluggableIpamMysql(MigrationToPluggableIpamMixin, test_migrations.TestWalkMigrationsMysql): pass class TestMigrationToPluggableIpamPsql(MigrationToPluggableIpamMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_97c25b0d2353_add_name_desc.py0000644000175000017500000000571300000000000032603 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations def _create_record_with_sa(engine, resource_type, attributes): """Create a record with standard attributes.""" sa_table = db_utils.get_table(engine, 'standardattributes') sa_record = engine.execute(sa_table.insert().values( {'resource_type': resource_type})) attributes['standard_attr_id'] = sa_record.inserted_primary_key[0] resource_table = db_utils.get_table(engine, resource_type) engine.execute(resource_table.insert().values(attributes)) class NetworkSegmentNameAndDescriptionMixin(object): """Validates migration that adds name and description .""" def _pre_upgrade_97c25b0d2353(self, engine): # Create a network for segments to belong to net_id = uuidutils.generate_uuid() _create_record_with_sa(engine, 'networks', { 'id': net_id, 'name': '97c25b0d2353'}) # Create some segments with old model ns_table = db_utils.get_table(engine, 'networksegments') for s in range(5): engine.execute(ns_table.insert().values({ 'id': uuidutils.generate_uuid(), 'network_id': net_id, 'network_type': 'flat'})) return True # Return True so check function is invoked after migrate def _check_97c25b0d2353(self, engine, data): ns_table = db_utils.get_table(engine, 'networksegments') sa_table = db_utils.get_table(engine, 'standardattributes') for segment in engine.execute(ns_table.select()).fetchall(): # Ensure a stdattr record was created for this old segment standard_id = segment.standard_attr_id rows = engine.execute(sa_table.select().where( sa_table.c.id == standard_id)).fetchall() self.assertEqual(1, len(rows)) # Ensure this old segment can now be named engine.execute(ns_table.update().values(name='Zeus').where( ns_table.c.standard_attr_id == standard_id)) class TestNetworkSegmentNameDescMySql(NetworkSegmentNameAndDescriptionMixin, test_migrations.TestWalkMigrationsMysql): pass class TestNetworkSegmentNameDescPsql(NetworkSegmentNameAndDescriptionMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bi0000644000175000017500000001136700000000000033444 0ustar00coreycorey00000000000000# Copyright 2016 Business Cat is Very Serious # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations class HARouterPortMigrationMixin(object): """Validates HA port to router port migration.""" def _create_so(self, o_type, values): """create standard attr object.""" stan = db_utils.get_table(self.engine, 'standardattributes') # find next available id taking into account existing records rec_ids = [r.id for r in self.engine.execute(stan.select()).fetchall()] next_id = max([0] + rec_ids) + 1 self.engine.execute(stan.insert().values({'id': next_id, 'resource_type': o_type})) values['standard_attr_id'] = next_id return self._create_rec(o_type, values) def _create_rec(self, o_type, values): otable = db_utils.get_table(self.engine, o_type) self.engine.execute(otable.insert().values(values)) def _make_router_agents_and_ports(self, router_id, network_id, add_binding): self._create_so('routers', {'id': router_id}) # each router gets a couple of agents for _ in range(2): port_id = uuidutils.generate_uuid() self._create_so('ports', {'id': port_id, 'network_id': network_id, 'mac_address': port_id[0:31], 'admin_state_up': True, 'device_id': router_id, 'device_owner': 'network', 'status': 'ACTIVE'}) agent_id = uuidutils.generate_uuid() timestamp = '2000-04-06T14:34:23' self._create_rec('agents', {'id': agent_id, 'topic': 'x', 'agent_type': 'L3', 'binary': 'x', 'host': agent_id, 'created_at': timestamp, 'started_at': timestamp, 'heartbeat_timestamp': timestamp, 'configurations': ''}) self._create_rec('ha_router_agent_port_bindings', {'port_id': port_id, 'router_id': router_id, 'l3_agent_id': agent_id}) if add_binding: ptype = constants.DEVICE_OWNER_ROUTER_HA_INTF self._create_rec('routerports', {'router_id': router_id, 'port_id': port_id, 'port_type': ptype}) def _create_ha_routers_with_ports(self, engine): network_id = uuidutils.generate_uuid() self._create_so('networks', {'id': network_id}) unpatched_router_ids = [uuidutils.generate_uuid() for i in range(10)] for rid in unpatched_router_ids: self._make_router_agents_and_ports(rid, network_id, False) # make half of the routers already have routerport bindings to simulate # a back-port of Ifd3e007aaf2a2ed8123275aa3a9f540838e3c003 patched_router_ids = [uuidutils.generate_uuid() for i in range(10)] for rid in patched_router_ids: self._make_router_agents_and_ports(rid, network_id, True) def _pre_upgrade_a8b517cff8ab(self, engine): self._create_ha_routers_with_ports(engine) return True # return True so check function is invoked after migrate def _check_a8b517cff8ab(self, engine, data): rp = db_utils.get_table(engine, 'routerports') # just ensuring the correct count of routerport records is enough. # 20 routers * 2 ports per router self.assertEqual(40, len(engine.execute(rp.select()).fetchall())) class TestHARouterPortMigrationMysql(HARouterPortMigrationMixin, test_migrations.TestWalkMigrationsMysql): pass class TestHARouterPortMigrationPsql(HARouterPortMigrationMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_b12a3ef66e62_add_standardattr_to_qos_policies.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_b12a3ef66e62_add_standardattr_0000644000175000017500000000445200000000000033227 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations class QosStandardAttrMixin(object): """Validates qos standard attr migration.""" def _create_qos_pol(self, pol_id, description): otable = db_utils.get_table(self.engine, 'qos_policies') values = {'id': pol_id, 'description': description} self.engine.execute(otable.insert().values(values)) def _create_policies_with_descriptions(self, engine): for i in range(10): pol_id = uuidutils.generate_uuid() self._create_qos_pol(pol_id, 'description-%s' % pol_id) def _pre_upgrade_b12a3ef66e62(self, engine): self._create_policies_with_descriptions(engine) return True # return True so check function is invoked after migrate def _check_b12a3ef66e62(self, engine, data): qp = db_utils.get_table(engine, 'qos_policies') sa = db_utils.get_table(engine, 'standardattributes') for qos_pol in engine.execute(qp.select()).fetchall(): # ensure standard attributes model was created standard_id = qos_pol.standard_attr_id rows = engine.execute( sa.select().where(sa.c.id == standard_id)).fetchall() self.assertEqual(1, len(rows)) # ensure description got moved over self.assertEqual('description-%s' % qos_pol.id, rows[0].description) class TestQosStandardAttrMysql(QosStandardAttrMixin, test_migrations.TestWalkMigrationsMysql): pass class TestQosStandardAttrPsql(QosStandardAttrMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py 22 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index0000644000175000017500000000752400000000000033127 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from neutron.tests.functional.db import test_migrations class NetworkDhcpAgentBindingMigrationMixin(object): """Validates binding_index for NetworkDhcpAgentBinding migration.""" def _create_so(self, o_type, values): """create standard attr object.""" stan = db_utils.get_table(self.engine, 'standardattributes') # find next available id taking into account existing records rec_ids = [r.id for r in self.engine.execute(stan.select()).fetchall()] next_id = max([0] + rec_ids) + 1 self.engine.execute(stan.insert().values({'id': next_id, 'resource_type': o_type})) values['standard_attr_id'] = next_id return self._create_rec(o_type, values) def _create_rec(self, o_type, values): otable = db_utils.get_table(self.engine, o_type) self.engine.execute(otable.insert().values(values)) def _make_network_agents_and_bindings(self, network_id): self._create_so('networks', {'id': network_id}) # each network gets a couple of agents for _ in range(2): agent_id = uuidutils.generate_uuid() timestamp = '2000-04-06T14:34:23' self._create_rec('agents', {'id': agent_id, 'topic': 'x', 'agent_type': 'L3', 'binary': 'x', 'host': agent_id, 'created_at': timestamp, 'started_at': timestamp, 'heartbeat_timestamp': timestamp, 'configurations': ''}) self._create_rec('networkdhcpagentbindings', {'network_id': network_id, 'dhcp_agent_id': agent_id}) def _create_networks(self, engine): for nid in [uuidutils.generate_uuid() for i in range(10)]: self._make_network_agents_and_bindings(nid) def _pre_upgrade_c3e9d13c4367(self, engine): self._create_networks(engine) return True # return True so check function is invoked after migrate def _check_c3e9d13c4367(self, engine, data): bindings_table = db_utils.get_table(engine, 'networkdhcpagentbindings') rows = engine.execute(bindings_table.select()).fetchall() networks_to_bindings = collections.defaultdict(list) for network_id, agent_id, binding_index in rows: networks_to_bindings[network_id].append(binding_index) for binding_indices in networks_to_bindings.values(): self.assertEqual(list(range(1, 3)), sorted(binding_indices)) class TestNetworkDhcpAgentBindingMigrationMysql( NetworkDhcpAgentBindingMigrationMixin, test_migrations.TestWalkMigrationsMysql): pass class TestNetworkDhcpAgentBindingMigrationPsql( NetworkDhcpAgentBindingMigrationMixin, test_migrations.TestWalkMigrationsPsql): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/test_ipam.py0000644000175000017500000001422200000000000025066 0ustar00coreycorey00000000000000# Copyright 2015 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.db import db_base_plugin_v2 as base_plugin from neutron.objects import ports as port_obj from neutron.objects import subnet as subnet_obj from neutron.tests.unit import testlib_api # required in order for testresources to optimize same-backend # tests together load_tests = testlib_api.module_load_tests # FIXME(zzzeek): needs to be provided by oslo.db, current version # is not working # load_tests = test_base.optimize_db_test_loader(__file__) class IpamTestCase(testlib_api.SqlTestCase): """Base class for tests that aim to test ip allocation.""" def setUp(self): super(IpamTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' self.setup_coreplugin(DB_PLUGIN_KLASS) self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) self.tenant_id = uuidutils.generate_uuid() self.network_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() self.port_id = uuidutils.generate_uuid() self._create_network() self._create_subnet() def result_set_to_dicts(self, resultset, keys): dicts = [] for item in resultset: item_dict = dict((x, item[x]) for x in keys) dicts.append(item_dict) return dicts def assert_ip_alloc_matches(self, expected): result_set = port_obj.IPAllocation.get_objects(self.cxt) keys = ['port_id', 'ip_address', 'subnet_id', 'network_id'] actual = self.result_set_to_dicts(result_set, keys) self.assertEqual(expected, actual) def assert_ip_alloc_pool_matches(self, expected): result_set = subnet_obj.IPAllocationPool.get_objects(self.cxt) keys = ['start', 'end', 'subnet_id'] actual = self.result_set_to_dicts(result_set, keys) self.assertEqual(expected, actual) def _create_network(self): network = {'tenant_id': self.tenant_id, 'id': self.network_id, 'name': 'test-net', 'admin_state_up': True, 'shared': False, 'status': constants.NET_STATUS_ACTIVE} return self.plugin.create_network(self.cxt, {'network': network}) def _create_subnet(self): subnet = {'tenant_id': self.tenant_id, 'id': self.subnet_id, 'name': 'test_sub', 'network_id': self.network_id, 'ip_version': constants.IP_VERSION_4, 'cidr': '10.10.10.0/29', 'enable_dhcp': False, 'gateway_ip': '10.10.10.1', 'shared': False, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED} return self.plugin.create_subnet(self.cxt, {'subnet': subnet}) def _create_port(self, port_id, fixed_ips=None): port_fixed_ips = (fixed_ips if fixed_ips else constants.ATTR_NOT_SPECIFIED) port = {'tenant_id': self.tenant_id, 'name': 'test_port', 'id': port_id, 'network_id': self.network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'status': constants.PORT_STATUS_ACTIVE, 'device_id': 'test_dev_id', 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, 'fixed_ips': port_fixed_ips} self.plugin.create_port(self.cxt, {'port': port}) def test_allocate_fixed_ip(self): fixed_ip = [{'ip_address': "10.10.10.3", 'subnet_id': self.subnet_id}] self._create_port(self.port_id, fixed_ip) ip_alloc_expected = [{'port_id': self.port_id, 'ip_address': netaddr.IPAddress( fixed_ip[0].get('ip_address')), 'subnet_id': self.subnet_id, 'network_id': self.network_id}] ip_alloc_pool_expected = [{'start': netaddr.IPAddress('10.10.10.2'), 'end': netaddr.IPAddress('10.10.10.6'), 'subnet_id': self.subnet_id}] self.assert_ip_alloc_matches(ip_alloc_expected) self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) def test_allocate_ip_exausted_pool(self): # available from .2 up to .6 -> 5 for i in range(1, 6): self._create_port(uuidutils.generate_uuid()) ip_alloc_pool_expected = [{'start': netaddr.IPAddress('10.10.10.2'), 'end': netaddr.IPAddress('10.10.10.6'), 'subnet_id': self.subnet_id}] self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected) with testtools.ExpectedException(n_exc.IpAddressGenerationFailure): self._create_port(self.port_id) class TestIpamMySql(testlib_api.MySQLTestCaseMixin, IpamTestCase): pass class TestIpamPsql(testlib_api.PostgreSQLTestCaseMixin, IpamTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/test_migrations.py0000644000175000017500000006474600000000000026334 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from contextlib import contextmanager import subprocess from alembic.ddl import base as alembic_ddl from alembic import script as alembic_script from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_db.sqlalchemy import test_migrations from oslo_log import log as logging from oslotest import base as oslotest_base import six import sqlalchemy from sqlalchemy import event # noqa from sqlalchemy.sql import ddl as sqla_ddl from neutron.db import migration as migration_root from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models from neutron.tests import base as test_base from neutron.tests.functional import base as functional_base from neutron.tests.unit import testlib_api cfg.CONF.import_opt('core_plugin', 'neutron.conf.common') CREATION_OPERATIONS = { 'sqla': (sqla_ddl.CreateIndex, sqla_ddl.CreateTable, sqla_ddl.CreateColumn, ), 'alembic': (alembic_ddl.AddColumn, ) } DROP_OPERATIONS = { 'sqla': (sqla_ddl.DropConstraint, sqla_ddl.DropIndex, sqla_ddl.DropTable, ), 'alembic': (alembic_ddl.DropColumn, ) } LOG = logging.getLogger(__name__) # NOTE(slaweq): replace alembic_util logging functions used normally with # olso_log logger to limit output on stdout migration.log_error = LOG.error migration.log_warning = LOG.warning migration.log_info = LOG.info def upgrade(engine, alembic_config, branch_name='heads'): cfg.CONF.set_override('connection', engine.url, group='database') migration.do_alembic_command(alembic_config, 'upgrade', branch_name) class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): '''Test for checking of equality models state and migrations. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and user/password combo to run the tests. For PostgreSQL on Ubuntu this can be done with the following commands:: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; postgres=# create database openstack_citest with owner openstack_citest; For MySQL on Ubuntu this can be done with the following commands:: mysql -u root >create database openstack_citest; >grant all privileges on openstack_citest.* to openstack_citest@localhost identified by 'openstack_citest'; Output is a list that contains information about differences between db and models. Output example:: [('add_table', Table('bat', MetaData(bind=None), Column('info', String(), table=), schema=None)), ('remove_table', Table(u'bar', MetaData(bind=None), Column(u'data', VARCHAR(), table=), schema=None)), ('add_column', None, 'foo', Column('data', Integer(), table=)), ('remove_column', None, 'foo', Column(u'old_data', VARCHAR(), table=None)), [('modify_nullable', None, 'foo', u'x', {'existing_server_default': None, 'existing_type': INTEGER()}, True, False)]] * ``remove_*`` means that there is extra table/column/constraint in db; * ``add_*`` means that it is missing in db; * ``modify_*`` means that on column in db is set wrong type/nullable/server_default. Element contains information: - what should be modified, - schema, - table, - column, - existing correct column parameters, - right value, - wrong value. This class also contains tests for branches, like that correct operations are used in contract and expand branches. ''' BUILD_SCHEMA = False TIMEOUT_SCALING_FACTOR = 4 def setUp(self): super(_TestModelsMigrations, self).setUp() self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(core_plugin='ml2') self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF # Migration tests can take a long time self.useFixture(test_base.Timeout(scaling=self.TIMEOUT_SCALING_FACTOR)) def db_sync(self, engine): upgrade(engine, self.alembic_config) def get_engine(self): return self.engine def get_metadata(self): return head_models.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name == 'alembic_version' or name in external.TABLES): return False return super(_TestModelsMigrations, self).include_object( object_, name, type_, reflected, compare_to) def filter_metadata_diff(self, diff): return list(filter(self.remove_unrelated_errors, diff)) # Remove some difference that are not mistakes just specific of # dialects, etc def remove_unrelated_errors(self, element): insp = sqlalchemy.engine.reflection.Inspector.from_engine( self.get_engine()) dialect = self.get_engine().dialect.name if isinstance(element, tuple): if dialect == 'mysql' and element[0] == 'remove_index': table_name = element[1].table.name for fk in insp.get_foreign_keys(table_name): if fk['name'] == element[1].name: return False cols = [c.name for c in element[1].expressions] for col in cols: if col in insp.get_pk_constraint( table_name)['constrained_columns']: return False else: for modified, _, table, column, _, _, new in element: if modified == 'modify_default' and dialect == 'mysql': constrained = insp.get_pk_constraint(table) if column in constrained['constrained_columns']: return False return True def test_upgrade_expand_branch(self): # Verify that "command neutron-db-manage upgrade --expand" works # without errors. Check this for both MySQL and PostgreSQL. upgrade(self.engine, self.alembic_config, branch_name='%s@head' % migration.EXPAND_BRANCH) def test_upgrade_contract_branch(self): # Verify that "command neutron-db-manage upgrade --contract" works # without errors. Check this for both MySQL and PostgreSQL. upgrade(self.engine, self.alembic_config, branch_name='%s@head' % migration.CONTRACT_BRANCH) @contextmanager def _listener(self, engine, listener_func): try: event.listen(engine, 'before_execute', listener_func) yield finally: event.remove(engine, 'before_execute', listener_func) def test_branches(self): drop_exceptions = collections.defaultdict(list) creation_exceptions = collections.defaultdict(list) def find_migration_exceptions(): # Due to some misunderstandings and some conscious decisions, # there may be some expand migrations which drop elements and # some contract migrations which create elements. These excepted # elements must be returned by a method in the script itself. # The names of the method must be 'contract_creation_exceptions' # or 'expand_drop_exceptions'. The methods must have a docstring # explaining the reason for the exception. # # Here we build lists of the excepted elements and verify that # they are documented. script = alembic_script.ScriptDirectory.from_config( self.alembic_config) for m in list(script.walk_revisions(base='base', head='heads')): branches = m.branch_labels or [] if migration.CONTRACT_BRANCH in branches: method_name = 'contract_creation_exceptions' exceptions_dict = creation_exceptions elif migration.EXPAND_BRANCH in branches: method_name = 'expand_drop_exceptions' exceptions_dict = drop_exceptions else: continue get_excepted_elements = getattr(m.module, method_name, None) if not get_excepted_elements: continue explanation = getattr(get_excepted_elements, '__doc__', "") if len(explanation) < 1: self.fail("%s() requires docstring with explanation" % '.'.join([m.module.__name__, get_excepted_elements.__name__])) for sa_type, elements in get_excepted_elements().items(): exceptions_dict[sa_type].extend(elements) def is_excepted_sqla(clauseelement, exceptions): """Identify excepted operations that are allowed for the branch.""" element = clauseelement.element element_name = element.name if isinstance(element, sqlalchemy.Index): element_name = element.table.name for sa_type_, excepted_names in exceptions.items(): if isinstance(element, sa_type_): if element_name in excepted_names: return True def is_excepted_alembic(clauseelement, exceptions): """Identify excepted operations that are allowed for the branch.""" # For alembic the clause is AddColumn or DropColumn column = clauseelement.column.name table = clauseelement.column.table.name element_name = '.'.join([table, column]) for alembic_type, excepted_names in exceptions.items(): if alembic_type == sqlalchemy.Column: if element_name in excepted_names: return True def is_allowed(clauseelement, exceptions, disallowed_ops): if (isinstance(clauseelement, disallowed_ops['sqla']) and hasattr(clauseelement, 'element')): return is_excepted_sqla(clauseelement, exceptions) if isinstance(clauseelement, disallowed_ops['alembic']): return is_excepted_alembic(clauseelement, exceptions) return True def check_expand_branch(conn, clauseelement, multiparams, params): if not is_allowed(clauseelement, drop_exceptions, DROP_OPERATIONS): self.fail("Migration in expand branch contains drop command") def check_contract_branch(conn, clauseelement, multiparams, params): if not is_allowed(clauseelement, creation_exceptions, CREATION_OPERATIONS): self.fail("Migration in contract branch contains create " "command") find_migration_exceptions() engine = self.engine cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection # upgrade to latest release first; --expand users are expected to # apply all alembic scripts from previous releases before applying # the new ones for release in migration_root.NEUTRON_MILESTONES: release_revisions = migration._find_milestone_revisions( self.alembic_config, release) for rev in release_revisions: migration.do_alembic_command( self.alembic_config, 'upgrade', rev[0]) with self._listener(engine, check_expand_branch): migration.do_alembic_command( self.alembic_config, 'upgrade', '%s@head' % migration.EXPAND_BRANCH) with self._listener(engine, check_contract_branch): migration.do_alembic_command( self.alembic_config, 'upgrade', '%s@head' % migration.CONTRACT_BRANCH) def _test_has_offline_migrations(self, revision, expected): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') migration.do_alembic_command(self.alembic_config, 'upgrade', revision) self.assertEqual(expected, migration.has_offline_migrations(self.alembic_config, 'unused')) def test_has_offline_migrations_pending_contract_scripts(self): self._test_has_offline_migrations('kilo', True) def test_has_offline_migrations_all_heads_upgraded(self): self._test_has_offline_migrations('heads', False) # NOTE(ihrachys): if this test fails for you, it probably means that you # attempt to add an unsafe contract migration script, that is in # contradiction to blueprint online-upgrades # TODO(ihrachys): revisit later in Pike+ where some contract scripts may be # safe again def test_forbid_offline_migrations_starting_newton(self): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') # the following revisions are Newton heads for revision in ('5cd92597d11d', '5c85685d616d'): migration.do_alembic_command( self.alembic_config, 'upgrade', revision) self.assertFalse(migration.has_offline_migrations( self.alembic_config, 'unused'), msg='Offline contract migration scripts are forbidden for Ocata+') class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrations, testlib_api.SqlTestCaseLight, functional_base.BaseLoggingTestCase): @test_base.skip_if_timeout("bug 1687027") def test_forbid_offline_migrations_starting_newton(self): super(TestModelsMigrationsMysql, self).test_forbid_offline_migrations_starting_newton() @test_base.skip_if_timeout("bug 1687027") def test_check_mysql_engine(self): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine) # Test that table creation on MySQL only builds InnoDB tables tables = insp.get_table_names() self.assertGreater(len(tables), 0, "No tables found. Wrong schema?") res = [table for table in tables if insp.get_table_options(table)['mysql_engine'] != 'InnoDB' and table != 'alembic_version'] self.assertEqual(0, len(res), "%s non InnoDB tables created" % res) @test_base.skip_if_timeout("bug 1687027") def test_upgrade_expand_branch(self): super(TestModelsMigrationsMysql, self).test_upgrade_expand_branch() @test_base.skip_if_timeout("bug 1687027") def test_upgrade_contract_branch(self): super(TestModelsMigrationsMysql, self).test_upgrade_contract_branch() @test_base.skip_if_timeout("bug 1687027") def test_branches(self): super(TestModelsMigrationsMysql, self).test_branches() @test_base.skip_if_timeout("bug 1687027") def test_has_offline_migrations_pending_contract_scripts(self): super(TestModelsMigrationsMysql, self).test_has_offline_migrations_pending_contract_scripts() @test_base.skip_if_timeout("bug 1687027") def test_has_offline_migrations_all_heads_upgraded(self): super(TestModelsMigrationsMysql, self).test_has_offline_migrations_all_heads_upgraded() @test_base.skip_if_timeout("bug 1687027") def test_models_sync(self): super(TestModelsMigrationsMysql, self).test_models_sync() class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrations, testlib_api.SqlTestCaseLight): pass class TestSanityCheck(testlib_api.SqlTestCaseLight): BUILD_SCHEMA = False def setUp(self): super(TestSanityCheck, self).setUp() self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def _drop_table(self, table): with self.engine.begin() as conn: table.drop(conn) def test_check_sanity_1df244e556f5(self): ha_router_agent_port_bindings = sqlalchemy.Table( 'ha_router_agent_port_bindings', sqlalchemy.MetaData(), sqlalchemy.Column('port_id', sqlalchemy.String(36)), sqlalchemy.Column('router_id', sqlalchemy.String(36)), sqlalchemy.Column('l3_agent_id', sqlalchemy.String(36))) with self.engine.connect() as conn: ha_router_agent_port_bindings.create(conn) self.addCleanup(self._drop_table, ha_router_agent_port_bindings) # NOTE(haleyb): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter conn.execute(ha_router_agent_port_bindings.insert(), [ {'port_id': '1234', 'router_id': '12345', 'l3_agent_id': '123'}, {'port_id': '12343', 'router_id': '12345', 'l3_agent_id': '123'} ]) script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) script = script_dir.get_revision("1df244e556f5").module self.assertRaises(script.DuplicateL3HARouterAgentPortBinding, script.check_sanity, conn) def test_check_sanity_030a959ceafa(self): routerports = sqlalchemy.Table( 'routerports', sqlalchemy.MetaData(), sqlalchemy.Column('router_id', sqlalchemy.String(36)), sqlalchemy.Column('port_id', sqlalchemy.String(36)), sqlalchemy.Column('port_type', sqlalchemy.String(255))) with self.engine.connect() as conn: routerports.create(conn) self.addCleanup(self._drop_table, routerports) # NOTE(haleyb): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter conn.execute(routerports.insert(), [ {'router_id': '1234', 'port_id': '12345', 'port_type': '123'}, {'router_id': '12343', 'port_id': '12345', 'port_type': '1232'} ]) script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) script = script_dir.get_revision("030a959ceafa").module self.assertRaises(script.DuplicatePortRecordinRouterPortdatabase, script.check_sanity, conn) def test_check_sanity_6b461a21bcfc_dup_on_fixed_ip(self): floatingips = sqlalchemy.Table( 'floatingips', sqlalchemy.MetaData(), sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)), sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)), sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64))) with self.engine.connect() as conn: floatingips.create(conn) self.addCleanup(self._drop_table, floatingips) # NOTE(haleyb): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter conn.execute(floatingips.insert(), [ {'floating_network_id': '12345', 'fixed_port_id': '1234567', 'fixed_ip_address': '12345678'}, {'floating_network_id': '12345', 'fixed_port_id': '1234567', 'fixed_ip_address': '12345678'} ]) script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) script = script_dir.get_revision("6b461a21bcfc").module self.assertRaises(script.DuplicateFloatingIPforOneFixedIP, script.check_sanity, conn) def test_check_sanity_6b461a21bcfc_dup_on_no_fixed_ip(self): floatingips = sqlalchemy.Table( 'floatingips', sqlalchemy.MetaData(), sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)), sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)), sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64))) with self.engine.connect() as conn: floatingips.create(conn) self.addCleanup(self._drop_table, floatingips) # NOTE(haleyb): without this disabled, pylint complains # about a missing 'dml' argument. # pylint: disable=no-value-for-parameter conn.execute(floatingips.insert(), [ {'floating_network_id': '12345', 'fixed_port_id': '1234567', 'fixed_ip_address': None}, {'floating_network_id': '12345', 'fixed_port_id': '1234567', 'fixed_ip_address': None} ]) script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) script = script_dir.get_revision("6b461a21bcfc").module self.assertIsNone(script.check_sanity(conn)) class TestWalkDowngrade(oslotest_base.BaseTestCase): def setUp(self): super(TestWalkDowngrade, self).setUp() self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def test_no_downgrade(self): script_dir = alembic_script.ScriptDirectory.from_config( self.alembic_config) versions = [v for v in script_dir.walk_revisions(base='base', head='heads')] failed_revisions = [] for version in versions: if hasattr(version.module, 'downgrade'): failed_revisions.append(version.revision) if failed_revisions: self.fail('Migrations %s have downgrade' % failed_revisions) return True class _TestWalkMigrations(object): '''This will add framework for testing schema migration for different backends. ''' BUILD_SCHEMA = False def execute_cmd(self, cmd=None): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) output = proc.communicate()[0] self.assertEqual(0, proc.returncode, 'Command failed with ' 'output:\n%s' % output) def _get_alembic_config(self, uri): db_config = migration.get_neutron_config() self.script_dir = alembic_script.ScriptDirectory.from_config(db_config) db_config.neutron_config = cfg.CONF db_config.neutron_config.set_override('connection', six.text_type(uri), group='database') return db_config def _revisions(self): """Provides revisions and its parent revisions. :return: List of tuples. Every tuple contains revision and its parent revision. """ revisions = list(self.script_dir.walk_revisions("base", "heads")) revisions = list(reversed(revisions)) for rev in revisions: # Destination, current yield rev.revision, rev.down_revision def _migrate_up(self, config, engine, dest, curr, with_data=False): if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % dest, None) if pre_upgrade: data = pre_upgrade(engine) migration.do_alembic_command(config, 'upgrade', dest) if with_data: check = getattr(self, "_check_%s" % dest, None) if check and data: check(engine, data) def test_walk_versions(self): """Test migrations ability to upgrade and downgrade. """ engine = self.engine config = self._get_alembic_config(engine.url) revisions = self._revisions() for dest, curr in revisions: self._migrate_up(config, engine, dest, curr, with_data=True) class TestWalkMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestWalkMigrations, testlib_api.SqlTestCaseLight): # NOTE(slaweq): this workaround is taken from Manila patch: # https://review.opendev.org/#/c/291397/ # Set 5 minutes timeout for case of running it on # very slow nodes/VMs. Note, that this test becomes slower with each # addition of new DB migration. On fast nodes it can take about 5-10 # secs having Mitaka set of migrations. 'pymysql' works much slower # on slow nodes than 'psycopg2' and because of that this increased # timeout is required only when for testing with 'mysql' backend. @test_base.set_timeout(600) @test_base.skip_if_timeout("bug 1687027") def test_walk_versions(self): super(TestWalkMigrationsMysql, self).test_walk_versions() class TestWalkMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, _TestWalkMigrations, testlib_api.SqlTestCaseLight): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/test_models.py0000644000175000017500000000234600000000000025427 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from neutron.tests.functional import base class TestDBCreation(base.BaseLoggingTestCase): """Check database schema can be created without conflicts. For each test case is created a SQLite memory database. """ def setUp(self): super(TestDBCreation, self).setUp() self.engine = sqlalchemy.create_engine('sqlite://') def _test_creation(self, module): metadata = module.get_metadata() metadata.create_all(self.engine) def test_head_creation(self): from neutron.db.migration.models import head self._test_creation(head) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/test_network.py0000644000175000017500000002523300000000000025635 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants from neutron_lib import context from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_utils import uuidutils from neutron.objects import network as network_obj from neutron.plugins.ml2 import plugin as ml2_plugin from neutron import quota from neutron.tests.unit import testlib_api class NetworkRBACTestCase(testlib_api.SqlTestCase): """Base class to test network RBAC policies""" def setUp(self): super(NetworkRBACTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(DB_PLUGIN_KLASS) self.plugin = ml2_plugin.Ml2Plugin() self.cxt = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) self.tenant_1 = uuidutils.generate_uuid() self.tenant_2 = uuidutils.generate_uuid() self.network_id = uuidutils.generate_uuid() self.subnet_1_id = uuidutils.generate_uuid() self.subnet_2_id = uuidutils.generate_uuid() self.port_id = uuidutils.generate_uuid() make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() def _create_network(self, tenant_id, network_id, shared, external=False): network = {'tenant_id': tenant_id, 'id': network_id, 'name': 'test-net', 'admin_state_up': True, 'shared': shared, extnet_apidef.EXTERNAL: external, 'status': constants.NET_STATUS_ACTIVE} return self.plugin.create_network(self.cxt, {'network': network}) def _update_network(self, network_id, network): return self.plugin.update_network(self.cxt, network_id, {'network': network}) def _create_subnet(self, tenant_id, subnet_id, shared, cidr=None): cidr = cidr if cidr else '10.10.10.0/24' subnet = {'tenant_id': tenant_id, 'id': subnet_id, 'name': 'test_sub', 'network_id': self.network_id, 'ip_version': constants.IP_VERSION_4, 'cidr': cidr, 'enable_dhcp': False, 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'shared': shared, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED} return self.plugin.create_subnet(self.cxt, {'subnet': subnet}) def _create_port(self, tenant_id, network_id, port_id): port = {'tenant_id': tenant_id, 'name': 'test_port', 'id': port_id, 'network_id': network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'status': constants.PORT_STATUS_ACTIVE, 'device_id': 'test_dev_id', 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, 'fixed_ips': constants.ATTR_NOT_SPECIFIED} return self.plugin.create_port(self.cxt, {'port': port}) def _check_rbac(self, network_id, is_none, external): if external: action = 'access_as_external' else: action = 'access_as_shared' rbac = network_obj.NetworkRBAC.get_object( self.cxt, object_id=network_id, action=action, target_tenant='*') if is_none: self.assertIsNone(rbac) else: self.assertIsNotNone(rbac) def test_create_network_shared(self): self._create_network(self.tenant_1, self.network_id, True) self._check_rbac(self.network_id, is_none=False, external=False) def test_create_network_not_shared(self): self._create_network(self.tenant_1, self.network_id, False) self._check_rbac(self.network_id, is_none=True, external=False) def test_create_network_not_shared_external(self): with mock.patch.object(resource_extend, 'apply_funcs'): self._create_network(self.tenant_1, self.network_id, False, external=True) self._check_rbac(self.network_id, is_none=False, external=True) def test_update_network_to_shared(self): self._create_network(self.tenant_1, self.network_id, False) self._check_rbac(self.network_id, is_none=True, external=False) network_data = {'shared': True} self._update_network(self.network_id, network_data) self._check_rbac(self.network_id, is_none=False, external=False) def test_update_network_to_no_shared_no_subnets(self): self._create_network(self.tenant_1, self.network_id, True) self._check_rbac(self.network_id, is_none=False, external=False) network_data = {'shared': False} self._update_network(self.network_id, network_data) self._check_rbac(self.network_id, is_none=True, external=False) def test_update_network_shared_to_external(self): self._create_network(self.tenant_1, self.network_id, True) self._check_rbac(self.network_id, is_none=False, external=False) self._check_rbac(self.network_id, is_none=True, external=True) network_data = {extnet_apidef.EXTERNAL: True} self._update_network(self.network_id, network_data) self._check_rbac(self.network_id, is_none=False, external=False) self._check_rbac(self.network_id, is_none=False, external=True) def test_update_network_shared_to_internal(self): self._create_network(self.tenant_1, self.network_id, True, external=True) self._check_rbac(self.network_id, is_none=False, external=False) self._check_rbac(self.network_id, is_none=False, external=True) network_data = {extnet_apidef.EXTERNAL: False} self._update_network(self.network_id, network_data) self._check_rbac(self.network_id, is_none=False, external=False) self._check_rbac(self.network_id, is_none=True, external=True) def test_update_network_to_no_shared_tenant_subnet(self): self._create_network(self.tenant_1, self.network_id, True) self._check_rbac(self.network_id, is_none=False, external=False) self._create_subnet(self.tenant_1, self.subnet_1_id, True) network_data = {'shared': False} self._update_network(self.network_id, network_data) self._check_rbac(self.network_id, is_none=True, external=False) def test_update_network_to_no_shared_no_tenant_subnet(self): self._create_network(self.tenant_1, self.network_id, True) self._check_rbac(self.network_id, is_none=False, external=False) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_subnet(self.tenant_2, self.subnet_2_id, True, cidr='10.10.20/24') network_data = {'shared': False} self.assertRaises(n_exc.InvalidSharedSetting, self._update_network, self.network_id, network_data) def test_ensure_no_port_in_asterisk(self): self._create_network(self.tenant_1, self.network_id, True) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, '*') def test_ensure_no_port_in_tenant_1(self): self._create_network(self.tenant_1, self.network_id, True) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, self.tenant_1) def test_ensure_no_port_in_tenant_2(self): self._create_network(self.tenant_1, self.network_id, True) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, self.tenant_2) def test_ensure_port_tenant_1_in_asterisk(self): self._create_network(self.tenant_1, self.network_id, True) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_port(self.tenant_1, self.network_id, self.port_id) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, '*') def test_ensure_port_tenant_2_in_asterisk(self): self._create_network(self.tenant_1, self.network_id, True) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_port(self.tenant_2, self.network_id, self.port_id) self.assertRaises(n_exc.InvalidSharedSetting, self.plugin.ensure_no_tenant_ports_on_network, self.network_id, self.tenant_1, '*') def test_ensure_port_tenant_1_in_tenant_1(self): self._create_network(self.tenant_1, self.network_id, True) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_port(self.tenant_1, self.network_id, self.port_id) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, self.tenant_1) def test_ensure_no_share_port_tenant_2_in_tenant_1(self): self._create_network(self.tenant_1, self.network_id, False) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_port(self.tenant_2, self.network_id, self.port_id) self.plugin.ensure_no_tenant_ports_on_network( self.network_id, self.tenant_1, self.tenant_1) def test_ensure_no_share_port_tenant_2_in_tenant_2(self): self._create_network(self.tenant_1, self.network_id, False) self._create_subnet(self.tenant_1, self.subnet_1_id, True) self._create_port(self.tenant_2, self.network_id, self.port_id) self.assertRaises(n_exc.InvalidSharedSetting, self.plugin.ensure_no_tenant_ports_on_network, self.network_id, self.tenant_1, self.tenant_2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/db/test_ovn_revision_numbers_db.py0000644000175000017500000002011200000000000031053 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common.ovn import constants as ovn_const from neutron.tests.functional import base class TestRevisionNumbers(base.TestOVNFunctionalBase): def _create_network(self, name): data = {'network': {'name': name, 'tenant_id': self._tenant_id}} req = self.new_create_request('networks', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['network'] def _update_network_name(self, net_id, new_name): data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['network'] def _find_network_row_by_name(self, name): for row in self.nb_api._tables['Logical_Switch'].rows.values(): if (row.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY) == name): return row def _create_port(self, name, net_id): data = {'port': {'name': name, 'tenant_id': self._tenant_id, 'network_id': net_id}} req = self.new_create_request('ports', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['port'] def _update_port_name(self, port_id, new_name): data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['port'] def _find_port_row_by_name(self, name): for row in self.nb_api._tables['Logical_Switch_Port'].rows.values(): if (row.external_ids.get( ovn_const.OVN_PORT_NAME_EXT_ID_KEY) == name): return row def _create_router(self, name): data = {'router': {'name': name, 'tenant_id': self._tenant_id}} req = self.new_create_request('routers', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['router'] def _update_router_name(self, net_id, new_name): data = {'router': {'name': new_name}} req = self.new_update_request('routers', data, net_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['router'] def _find_router_row_by_name(self, name): for row in self.nb_api._tables['Logical_Router'].rows.values(): if (row.external_ids.get( ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY) == name): return row def _create_subnet(self, net_id, cidr, name='subnet1'): data = {'subnet': {'name': name, 'tenant_id': self._tenant_id, 'network_id': net_id, 'cidr': cidr, 'ip_version': 4, 'enable_dhcp': True}} req = self.new_create_request('subnets', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def _update_subnet_name(self, subnet_id, new_name): data = {'subnet': {'name': new_name}} req = self.new_update_request('subnets', data, subnet_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def _find_subnet_row_by_id(self, subnet_id): for row in self.nb_api._tables['DHCP_Options'].rows.values(): if (row.external_ids.get('subnet_id') == subnet_id and not row.external_ids.get('port_id')): return row def test_create_network(self): name = 'net1' neutron_net = self._create_network(name) ovn_net = self._find_network_row_by_name(name) ovn_revision = ovn_net.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(neutron_net['revision_number']), ovn_revision) def test_update_network(self): new_name = 'netnew1' neutron_net = self._create_network('net1') updated_net = self._update_network_name(neutron_net['id'], new_name) ovn_net = self._find_network_row_by_name(new_name) ovn_revision = ovn_net.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(updated_net['revision_number']), ovn_revision) def test_create_port(self): name = 'port1' neutron_net = self._create_network('net1') neutron_port = self._create_port(name, neutron_net['id']) ovn_port = self._find_port_row_by_name(name) ovn_revision = ovn_port.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(neutron_port['revision_number']), ovn_revision) def test_update_port(self): new_name = 'portnew1' neutron_net = self._create_network('net1') neutron_port = self._create_port('port1', neutron_net['id']) updated_port = self._update_port_name(neutron_port['id'], new_name) ovn_port = self._find_port_row_by_name(new_name) ovn_revision = ovn_port.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(updated_port['revision_number']), ovn_revision) def test_create_router(self): name = 'router1' neutron_router = self._create_router(name) ovn_router = self._find_router_row_by_name(name) ovn_revision = ovn_router.external_ids[ ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(neutron_router['revision_number']), ovn_revision) def test_update_router(self): new_name = 'newrouter' neutron_router = self._create_router('router1') updated_router = self._update_router_name(neutron_router['id'], new_name) ovn_router = self._find_router_row_by_name(new_name) ovn_revision = ovn_router.external_ids[ ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(updated_router['revision_number']), ovn_revision) def test_create_subnet(self): neutron_net = self._create_network('net1') neutron_subnet = self._create_subnet(neutron_net['id'], '10.0.0.0/24') ovn_subnet = self._find_subnet_row_by_id(neutron_subnet['id']) ovn_revision = ovn_subnet.external_ids[ ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(neutron_subnet['revision_number']), ovn_revision) def test_update_subnet(self): neutron_net = self._create_network('net1') neutron_subnet = self._create_subnet(neutron_net['id'], '10.0.0.0/24') updated_subnet = self._update_subnet_name( neutron_subnet['id'], 'newsubnet') ovn_subnet = self._find_subnet_row_by_id(neutron_subnet['id']) ovn_revision = ovn_subnet.external_ids[ ovn_const.OVN_REV_NUM_EXT_ID_KEY] # Assert it matches with the newest returned by neutron API self.assertEqual(str(updated_subnet['revision_number']), ovn_revision) # TODO(lucasagomes): Add a test for floating IPs here when we get # the router stuff done. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/0000755000175000017500000000000000000000000024260 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/__init__.py0000644000175000017500000000223400000000000026372 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import unittest from pecan import set_config from pecan.testing import load_test_app __all__ = ['FunctionalTest'] class FunctionalTest(unittest.TestCase): """Pecan wsgi functional test base class Used for functional tests where you need to test your literal application and its integration with the framework. """ def setUp(self): self.app = load_test_app(os.path.join( os.path.dirname(__file__), 'config.py' )) self.addCleanup(set_config, {}, overwrite=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/config.py0000644000175000017500000000165100000000000026102 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # use main app settings except for the port number so testing doesn't need to # listen on the main neutron port app = { 'root': 'neutron.pecan_wsgi.controllers.root.RootController', 'modules': ['neutron.pecan_wsgi'], 'errors': { 400: '/error', '__force_dict__': True } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/test_controllers.py0000644000175000017500000014532100000000000030245 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import uuidutils import pecan from pecan import request from neutron.api import extensions from neutron.conf import quota as qconf from neutron import manager from neutron.pecan_wsgi.controllers import root as controllers from neutron.pecan_wsgi.controllers import utils as controller_utils from neutron import policy from neutron.tests.common import helpers from neutron.tests.functional.pecan_wsgi import test_functional from neutron.tests.functional.pecan_wsgi import utils as pecan_utils from neutron.tests.unit import dummy_plugin _SERVICE_PLUGIN_RESOURCE = 'serviceplugin' _SERVICE_PLUGIN_COLLECTION = _SERVICE_PLUGIN_RESOURCE + 's' _SERVICE_PLUGIN_INDEX_BODY = {_SERVICE_PLUGIN_COLLECTION: []} class FakeServicePluginController(controller_utils.NeutronPecanController): resource = _SERVICE_PLUGIN_RESOURCE collection = _SERVICE_PLUGIN_COLLECTION @pecan.expose(generic=True, content_type='application/json', template='json') def index(self): return _SERVICE_PLUGIN_INDEX_BODY class TestRootController(test_functional.PecanFunctionalTest): """Test version listing on root URI.""" base_url = '/' def setUp(self): super(TestRootController, self).setUp() self.setup_service_plugin() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() def setup_service_plugin(self): manager.NeutronManager.set_controller_for_resource( _SERVICE_PLUGIN_COLLECTION, FakeServicePluginController(_SERVICE_PLUGIN_COLLECTION, _SERVICE_PLUGIN_RESOURCE, resource_info={'foo': {}})) def _test_method_returns_code(self, method, code=200): api_method = getattr(self.app, method) response = api_method(self.base_url, expect_errors=True) self.assertEqual(response.status_int, code) def test_get(self): response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) versions = json_body.get('versions') self.assertEqual(1, len(versions)) for (attr, value) in controllers.V2Controller.version_info.items(): self.assertIn(attr, versions[0]) self.assertEqual(value, versions[0][attr]) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('put', 405) class TestV2Controller(TestRootController): base_url = '/v2.0/' def test_get(self): """Verify current version info are returned.""" response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertIn('resources', json_body) self.assertIsInstance(json_body['resources'], list) for r in json_body['resources']: self.assertIn("links", r) self.assertIn("name", r) self.assertIn("collection", r) self.assertIn(self.base_url, r['links'][0]['href']) def test_get_no_trailing_slash(self): response = self.app.get(self.base_url[:-1], expect_errors=True) self.assertEqual(response.status_int, 404) def test_routing_successs(self): """Test dispatch to controller for existing resource.""" response = self.app.get('%sports.json' % self.base_url) self.assertEqual(response.status_int, 200) def test_routing_failure(self): """Test dispatch to controller for non-existing resource.""" response = self.app.get('%sidonotexist.json' % self.base_url, expect_errors=True) self.assertEqual(response.status_int, 404) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('put', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('delete', 405) class TestExtensionsController(TestRootController): """Test extension listing and detail reporting.""" base_url = '/v2.0/extensions' def _get_supported_extensions(self): ext_mgr = extensions.PluginAwareExtensionManager.get_instance() return ext_mgr.get_supported_extension_aliases() def test_index(self): response = self.app.get(self.base_url) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) returned_aliases = [ext['alias'] for ext in json_body['extensions']] supported_extensions = self._get_supported_extensions() self.assertEqual(supported_extensions, set(returned_aliases)) def test_get(self): # Fetch any extension supported by plugins test_alias = self._get_supported_extensions().pop() response = self.app.get('%s/%s' % (self.base_url, test_alias)) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertEqual(test_alias, json_body['extension']['alias']) def test_methods(self): self._test_method_returns_code('post', 404) self._test_method_returns_code('put', 404) self._test_method_returns_code('patch', 404) self._test_method_returns_code('delete', 404) self._test_method_returns_code('head', 404) self._test_method_returns_code('delete', 404) class TestQuotasController(test_functional.PecanFunctionalTest): """Test quota management API controller.""" base_url = '/v2.0/quotas' default_expected_limits = { 'network': qconf.DEFAULT_QUOTA_NETWORK, 'port': qconf.DEFAULT_QUOTA_PORT, 'subnet': qconf.DEFAULT_QUOTA_SUBNET} def _verify_limits(self, response, limits): for resource, limit in limits.items(): self.assertEqual(limit, response['quota'][resource]) def _verify_default_limits(self, response): self._verify_limits(response, self.default_expected_limits) def _verify_after_update(self, response, updated_limits): expected_limits = self.default_expected_limits.copy() expected_limits.update(updated_limits) self._verify_limits(response, expected_limits) def test_index_admin(self): # NOTE(salv-orlando): The quota controller has an hardcoded check for # admin-ness for this operation, which is supposed to return quotas for # all tenants. Such check is "vestigial" from the home-grown WSGI and # shall be removed response = self.app.get('%s.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_index(self): response = self.app.get('%s.json' % self.base_url, expect_errors=True) self.assertEqual(403, response.status_int) def test_get_admin(self): response = self.app.get('%s/foo.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) # As quota limits have not been updated, expect default values json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_get(self): # It is not ok to access another tenant's limits url = '%s/foo.json' % self.base_url response = self.app.get(url, expect_errors=True) self.assertEqual(403, response.status_int) # It is however ok to retrieve your own limits response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_put_get_delete(self): # PUT and DELETE actions are in the same test as a meaningful DELETE # test would require a put anyway url = '%s/foo.json' % self.base_url response = self.app.put_json(url, params={'quota': {'network': 99}}, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_after_update(json_body, {'network': 99}) response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_after_update(json_body, {'network': 99}) response = self.app.delete(url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(204, response.status_int) self.assertFalse(response.body) # As DELETE does not return a body we need another GET response = self.app.get(url, headers={'X-Project-Id': 'foo'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_update_list_delete(self): # PUT and DELETE actions are in the same test as a meaningful DELETE # test would require a put anyway url = '%s/foo.json' % self.base_url response = self.app.put_json(url, params={'quota': {'network': 99}}, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self._verify_after_update(json_body, {'network': 99}) response = self.app.get(self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) found = False for qs in json_body['quotas']: if qs['tenant_id'] == 'foo': found = True self.assertTrue(found) response = self.app.delete(url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(204, response.status_int) self.assertFalse(response.body) response = self.app.get(self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) for qs in json_body['quotas']: self.assertNotEqual('foo', qs['tenant_id']) def test_quotas_get_defaults(self): response = self.app.get('%s/foo/default.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) # As quota limits have not been updated, expect default values json_body = jsonutils.loads(response.body) self._verify_default_limits(json_body) def test_get_tenant_info(self): response = self.app.get('%s/tenant.json' % self.base_url, headers={'X-Project-Id': 'admin', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual('admin', json_body['tenant']['tenant_id']) class TestResourceController(TestRootController): """Test generic controller""" # TODO(salv-orlando): This test case must not explicitly test the 'port' # resource. Also it should validate correct plugin/resource association base_url = '/v2.0' def setUp(self): super(TestResourceController, self).setUp() policy.init() self.addCleanup(policy.reset) self._gen_port() def _gen_port(self): network_id = self.plugin.create_network(context.get_admin_context(), { 'network': {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}})['id'] self.port = self.plugin.create_port(context.get_admin_context(), { 'port': {'tenant_id': 'tenid', 'network_id': network_id, 'fixed_ips': n_const.ATTR_NOT_SPECIFIED, 'mac_address': '00:11:22:33:44:55', 'admin_state_up': True, 'device_id': 'FF', 'device_owner': 'pecan', 'name': 'pecan'}}) def test_get(self): response = self.app.get('/v2.0/ports.json') self.assertEqual(response.status_int, 200) def _check_item(self, expected, item): for attribute in expected: self.assertIn(attribute, item) self.assertEqual(len(expected), len(item)) def _test_get_collection_with_fields_selector(self, fields=None): fields = fields or [] query_params = ['fields=%s' % field for field in fields] url = '/v2.0/ports.json' if query_params: url = '%s?%s' % (url, '&'.join(query_params)) list_resp = self.app.get(url, headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, list_resp.status_int) for item in jsonutils.loads(list_resp.body).get('ports', []): for field in fields: self.assertIn(field, item) if fields: self.assertEqual(len(fields), len(item)) else: for field in ('id', 'name', 'device_owner'): self.assertIn(field, item) def test_get_collection_with_multiple_fields_selector(self): self._test_get_collection_with_fields_selector(fields=['id', 'name']) def test_get_collection_with_single_fields_selector(self): self._test_get_collection_with_fields_selector(fields=['name']) def test_get_collection_without_fields_selector(self): self._test_get_collection_with_fields_selector(fields=[]) def test_project_id_in_mandatory_fields(self): # ports only specifies that tenant_id is mandatory, but project_id # should still be passed to the plugin. mock_get = mock.patch.object(self.plugin, 'get_ports', return_value=[]).start() self.app.get( '/v2.0/ports.json?fields=id', headers={'X-Project-Id': 'tenid'} ) self.assertIn('project_id', mock_get.mock_calls[-1][2]['fields']) def test_get_item_with_fields_selector(self): item_resp = self.app.get( '/v2.0/ports/%s.json?fields=id&fields=name' % self.port['id'], headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, item_resp.status_int) self._check_item(['id', 'name'], jsonutils.loads(item_resp.body)['port']) # Explicitly require an attribute which is also 'required_by_policy'. # The attribute should not be stripped while generating the response item_resp = self.app.get( '/v2.0/ports/%s.json?fields=id&fields=tenant_id' % self.port['id'], headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, item_resp.status_int) self._check_item(['id', 'tenant_id'], jsonutils.loads(item_resp.body)['port']) def test_duped_and_empty_fields_stripped(self): mock_get = mock.patch.object(self.plugin, 'get_ports', return_value=[]).start() self.app.get( '/v2.0/ports.json?fields=id&fields=name&fields=&fields=name', headers={'X-Project-Id': 'tenid'} ) received = mock_get.mock_calls[-1][2]['fields'] self.assertNotIn('', received) self.assertEqual(len(received), len(set(received))) def test_post(self): response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) def test_post_with_retry(self): self._create_failed = False orig = self.plugin.create_port def new_create(*args, **kwargs): if not self._create_failed: self._create_failed = True raise db_exc.RetryRequest(ValueError()) return orig(*args, **kwargs) with mock.patch.object(self.plugin, 'create_port', new=new_create): response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, response.status_int) def test_put(self): response = self.app.put_json('/v2.0/ports/%s.json' % self.port['id'], params={'port': {'name': 'test'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 200) json_body = jsonutils.loads(response.body) self.assertEqual(1, len(json_body)) self.assertIn('port', json_body) self.assertEqual('test', json_body['port']['name']) self.assertEqual('tenid', json_body['port']['tenant_id']) def test_delete(self): response = self.app.delete('/v2.0/ports/%s.json' % self.port['id'], headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 204) self.assertFalse(response.body) def test_delete_disallows_body(self): response = self.app.delete_json( '/v2.0/ports/%s.json' % self.port['id'], params={'port': {'name': 'test'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 400) def test_plugin_initialized(self): self.assertIsNotNone(manager.NeutronManager._instance) def test_methods(self): self._test_method_returns_code('post', 405) self._test_method_returns_code('put', 405) self._test_method_returns_code('patch', 405) self._test_method_returns_code('delete', 405) self._test_method_returns_code('head', 405) self._test_method_returns_code('delete', 405) def test_post_with_empty_body(self): response = self.app.post_json( '/v2.0/ports.json', headers={'X-Project-Id': 'tenid'}, params={}, expect_errors=True) self.assertEqual(response.status_int, 400) def test_post_with_unsupported_json_type(self): response = self.app.post_json( '/v2.0/ports.json', headers={'X-Project-Id': 'tenid'}, params=[1, 2, 3], expect_errors=True) self.assertEqual(response.status_int, 400) def test_bulk_create(self): response = self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}, {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertIn('ports', json_body) ports = json_body['ports'] self.assertEqual(2, len(ports)) for port in ports: self.assertEqual(1, len(port['security_groups'])) def test_bulk_create_with_sg(self): sg_response = self.app.post_json( '/v2.0/security-groups.json', params={'security_group': { "name": "functest", "description": "Functional test"}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, sg_response.status_int) sg_json_body = jsonutils.loads(sg_response.body) self.assertIn('security_group', sg_json_body) sg_id = sg_json_body['security_group']['id'] port_response = self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'admin_state_up': True, 'security_groups': [sg_id], 'tenant_id': 'tenid'}, {'network_id': self.port['network_id'], 'admin_state_up': True, 'security_groups': [sg_id], 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, port_response.status_int) json_body = jsonutils.loads(port_response.body) self.assertIn('ports', json_body) ports = json_body['ports'] self.assertEqual(2, len(ports)) for port in ports: self.assertEqual(1, len(port['security_groups'])) def test_emulated_bulk_create(self): self.plugin._FORCE_EMULATED_BULK = True response = self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}, {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) json_body = jsonutils.loads(response.body) self.assertIn('ports', json_body) self.assertEqual(2, len(json_body['ports'])) def test_emulated_bulk_create_rollback(self): self.plugin._FORCE_EMULATED_BULK = True response = self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}, {'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}, {'network_id': 'bad_net_id', 'admin_state_up': True, 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 400) response = self.app.get( '/v2.0/ports.json', headers={'X-Project-Id': 'tenid'}) # all ports should be rolled back from above so we are just left # with the one created in setup self.assertEqual(1, len(jsonutils.loads(response.body)['ports'])) def test_bulk_create_one_item(self): response = self.app.post_json( '/v2.0/ports.json', params={'ports': [{'network_id': self.port['network_id'], 'admin_state_up': True, 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) json_body = jsonutils.loads(response.body) self.assertIn('ports', json_body) self.assertEqual(1, len(json_body['ports'])) class TestPaginationAndSorting(test_functional.PecanFunctionalTest): RESOURCE_COUNT = 6 def setUp(self): super(TestPaginationAndSorting, self).setUp() policy.init() self.addCleanup(policy.reset) self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() self._create_networks(self.RESOURCE_COUNT) self.networks = self._get_collection()['networks'] def _create_networks(self, count=1): network_ids = [] for index in range(count): network = {'name': 'pecannet-%d' % index, 'tenant_id': 'tenid', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'} network_id = self.plugin.create_network( self.ctx, {'network': network})['id'] network_ids.append(network_id) return network_ids def _get_collection(self, collection=None, limit=None, marker=None, fields=None, page_reverse=False, sort_key=None, sort_dir=None): collection = collection or 'networks' fields = fields or [] query_params = [] if limit: query_params.append('limit=%d' % limit) if marker: query_params.append('marker=%s' % marker) if page_reverse: query_params.append('page_reverse=True') if sort_key: query_params.append('sort_key=%s' % sort_key) if sort_dir: query_params.append('sort_dir=%s' % sort_dir) query_params.extend(['%s%s' % ('fields=', field) for field in fields]) url = '/v2.0/%s.json' % collection if query_params: url = '%s?%s' % (url, '&'.join(query_params)) list_resp = self.app.get(url, headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, list_resp.status_int) return list_resp.json def _test_get_collection_with_pagination(self, expected_list, collection=None, limit=None, marker=None, fields=None, page_reverse=False, sort_key=None, sort_dir=None): expected_list = expected_list or [] collection = collection or 'networks' list_resp = self._get_collection(collection=collection, limit=limit, marker=marker, fields=fields, page_reverse=page_reverse, sort_key=sort_key, sort_dir=sort_dir) if limit and marker: links_key = '%s_links' % collection self.assertIn(links_key, list_resp) if not fields or 'id' in fields: list_resp_ids = [item['id'] for item in list_resp[collection]] self.assertEqual(expected_list, list_resp_ids) if fields: for item in list_resp[collection]: for field in fields: self.assertIn(field, item) def test_get_collection_with_pagination_limit(self): self._test_get_collection_with_pagination([self.networks[0]['id']], limit=1) def test_get_collection_with_pagination_fields_no_pk(self): self._test_get_collection_with_pagination([self.networks[0]['id']], limit=1, fields=['name']) def test_get_collection_with_pagination_limit_over_count(self): expected_ids = [network['id'] for network in self.networks] self._test_get_collection_with_pagination( expected_ids, limit=self.RESOURCE_COUNT + 1) def test_get_collection_with_pagination_marker(self): marker = self.networks[2]['id'] expected_ids = [network['id'] for network in self.networks[3:]] self._test_get_collection_with_pagination(expected_ids, limit=3, marker=marker) def test_get_collection_with_pagination_marker_without_limit(self): marker = self.networks[2]['id'] expected_ids = [network['id'] for network in self.networks] self._test_get_collection_with_pagination(expected_ids, marker=marker) def test_get_collection_with_pagination_and_fields(self): expected_ids = [network['id'] for network in self.networks[:2]] self._test_get_collection_with_pagination( expected_ids, limit=2, fields=['id', 'name']) def test_get_collection_with_pagination_page_reverse(self): marker = self.networks[2]['id'] expected_ids = [network['id'] for network in self.networks[:2]] self._test_get_collection_with_pagination(expected_ids, limit=3, marker=marker, page_reverse=True) def test_get_collection_with_sorting_desc(self): nets = sorted(self.networks, key=lambda net: net['name'], reverse=True) expected_ids = [network['id'] for network in nets] self._test_get_collection_with_pagination(expected_ids, sort_key='name', sort_dir='desc') def test_get_collection_with_sorting_asc(self): nets = sorted(self.networks, key=lambda net: net['name']) expected_ids = [network['id'] for network in nets] self._test_get_collection_with_pagination(expected_ids, sort_key='name', sort_dir='asc') class TestRequestProcessing(TestRootController): def setUp(self): super(TestRequestProcessing, self).setUp() mock.patch('neutron.pecan_wsgi.hooks.notifier.registry').start() # request.context is thread-local storage so it has to be accessed by # the controller. We can capture it into a list here to assert on after # the request finishes. def capture_request_details(*args, **kwargs): self.captured_context = request.context self.request_params = kwargs mock.patch('neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=capture_request_details).start() mock.patch('neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.create', side_effect=capture_request_details).start() mock.patch('neutron.pecan_wsgi.controllers.resource.' 'ItemController.get', side_effect=capture_request_details).start() # TODO(kevinbenton): add context tests for X-Roles etc def test_context_set_in_request(self): self.app.get('/v2.0/ports.json', headers={'X-Project-Id': 'tenant_id'}) self.assertEqual('tenant_id', self.captured_context['neutron_context'].tenant_id) def test_core_resource_identified(self): self.app.get('/v2.0/ports.json') self.assertEqual('port', self.captured_context['resource']) self.assertEqual('ports', self.captured_context['collection']) def test_lookup_identifies_resource_id(self): # We now this will return a 404 but that's not the point as it is # mocked self.app.get('/v2.0/ports/reina.json') self.assertEqual('port', self.captured_context['resource']) self.assertEqual('ports', self.captured_context['collection']) self.assertEqual('reina', self.captured_context['resource_id']) def test_resource_processing_post(self): self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'the_net', 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual('network', self.captured_context['resource']) self.assertEqual('networks', self.captured_context['collection']) resources = self.captured_context['resources'] is_bulk = self.captured_context['is_bulk'] self.assertEqual(1, len(resources)) self.assertEqual('the_net', resources[0]['name']) self.assertTrue(resources[0]['admin_state_up']) self.assertFalse(is_bulk) def test_resource_processing_post_bulk(self): self.app.post_json( '/v2.0/networks.json', params={'networks': [{'name': 'the_net_1', 'admin_state_up': True}, {'name': 'the_net_2', 'admin_state_up': False}]}, headers={'X-Project-Id': 'tenid'}) resources = self.captured_context['resources'] is_bulk = self.captured_context['is_bulk'] self.assertEqual(2, len(resources)) self.assertTrue(resources[0]['admin_state_up']) self.assertEqual('the_net_1', resources[0]['name']) self.assertFalse(resources[1]['admin_state_up']) self.assertEqual('the_net_2', resources[1]['name']) self.assertTrue(is_bulk) def test_resource_processing_post_bulk_one_item(self): self.app.post_json( '/v2.0/networks.json', params={'networks': [{'name': 'the_net_1', 'admin_state_up': True}]}, headers={'X-Project-Id': 'tenid'}) resources = self.captured_context['resources'] is_bulk = self.captured_context['is_bulk'] self.assertEqual(1, len(resources)) self.assertTrue(is_bulk) def test_resource_processing_post_unknown_attribute_returns_400(self): response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'the_net', 'alien': 'E.T.', 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(400, response.status_int) def test_resource_processing_post_validation_error_returns_400(self): response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'the_net', 'admin_state_up': 'invalid_value'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(400, response.status_int) def test_service_plugin_identified(self): # TODO(kevinbenton): fix the unit test setup to include an l3 plugin self.skipTest("A dummy l3 plugin needs to be setup") self.app.get('/v2.0/routers.json') self.assertEqual('router', self.req_stash['resource_type']) # make sure the core plugin was identified as the handler for ports self.assertEqual( directory.get_plugin(plugin_constants.L3), self.req_stash['plugin']) def test_service_plugin_uri(self): nm = manager.NeutronManager.get_instance() nm.path_prefix_resource_mappings[dummy_plugin.RESOURCE_NAME] = [ _SERVICE_PLUGIN_COLLECTION] response = self.do_request('/v2.0/dummy/serviceplugins.json') self.assertEqual(200, response.status_int) self.assertEqual(_SERVICE_PLUGIN_INDEX_BODY, response.json_body) class TestRouterController(TestResourceController): """Specialized tests for the router resource controller This test class adds tests specific for the router controller in order to verify the 'member_action' functionality, which this controller uses for adding and removing router interfaces. """ def setUp(self): cfg.CONF.set_override( 'service_plugins', ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', 'neutron.services.flavors.flavors_plugin.FlavorsPlugin']) super(TestRouterController, self).setUp() policy.init() self.addCleanup(policy.reset) plugin = directory.get_plugin() ctx = context.get_admin_context() l3_plugin = directory.get_plugin(plugin_constants.L3) network_id = pecan_utils.create_network(ctx, plugin)['id'] self.subnet = pecan_utils.create_subnet(ctx, plugin, network_id) self.router = pecan_utils.create_router(ctx, l3_plugin) def test_member_actions_processing(self): response = self.app.put_json( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], params={'subnet_id': self.subnet['id']}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) def test_non_existing_member_action_returns_404(self): response = self.app.put_json( '/v2.0/routers/%s/do_meh.json' % self.router['id'], params={'subnet_id': 'doesitevenmatter'}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) def test_unsupported_method_member_action(self): response = self.app.post_json( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], params={'subnet_id': self.subnet['id']}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(405, response.status_int) response = self.app.get( '/v2.0/routers/%s/add_router_interface.json' % self.router['id'], headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(405, response.status_int) class TestDHCPAgentShimControllers(test_functional.PecanFunctionalTest): def setUp(self): super(TestDHCPAgentShimControllers, self).setUp() policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_dhcp-agents': 'role:admin', 'get_dhcp-networks': 'role:admin', 'create_dhcp-networks': 'role:admin', 'delete_dhcp-networks': 'role:admin'}), overwrite=False) plugin = directory.get_plugin() ctx = context.get_admin_context() self.network = pecan_utils.create_network(ctx, plugin) self.agent = helpers.register_dhcp_agent() # NOTE(blogan): Not sending notifications because this test is for # testing the shim controllers plugin.agent_notifiers[n_const.AGENT_TYPE_DHCP] = None def test_list_dhcp_agents_hosting_network(self): response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_list_networks_on_dhcp_agent(self): response = self.app.get( '/v2.0/agents/%s/dhcp-networks.json' % self.agent.id, headers={'X-Project-Id': 'tenid', 'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_add_remove_dhcp_agent(self): headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} self.app.post_json( '/v2.0/agents/%s/dhcp-networks.json' % self.agent.id, headers=headers, params={'network_id': self.network['id']}) response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers=headers) self.assertIn(self.agent.id, [a['id'] for a in response.json['agents']]) self.app.delete('/v2.0/agents/%(a)s/dhcp-networks/%(n)s.json' % { 'a': self.agent.id, 'n': self.network['id']}, headers=headers) response = self.app.get( '/v2.0/networks/%s/dhcp-agents.json' % self.network['id'], headers=headers) self.assertNotIn(self.agent.id, [a['id'] for a in response.json['agents']]) class TestL3AgentShimControllers(test_functional.PecanFunctionalTest): def setUp(self): cfg.CONF.set_override( 'service_plugins', ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', 'neutron.services.flavors.flavors_plugin.FlavorsPlugin']) super(TestL3AgentShimControllers, self).setUp() policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_l3-agents': 'role:admin', 'get_l3-routers': 'role:admin'}), overwrite=False) ctx = context.get_admin_context() l3_plugin = directory.get_plugin(plugin_constants.L3) self.router = pecan_utils.create_router(ctx, l3_plugin) self.agent = helpers.register_l3_agent() # NOTE(blogan): Not sending notifications because this test is for # testing the shim controllers l3_plugin.agent_notifiers[n_const.AGENT_TYPE_L3] = None def test_list_l3_agents_hosting_router(self): response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_list_routers_on_l3_agent(self): response = self.app.get( '/v2.0/agents/%s/l3-routers.json' % self.agent.id, headers={'X-Roles': 'admin'}) self.assertEqual(200, response.status_int) def test_add_remove_l3_agent(self): headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} response = self.app.post_json( '/v2.0/agents/%s/l3-routers.json' % self.agent.id, headers=headers, params={'router_id': self.router['id']}) self.assertEqual(201, response.status_int) response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers=headers) self.assertIn(self.agent.id, [a['id'] for a in response.json['agents']]) response = self.app.delete( '/v2.0/agents/%(a)s/l3-routers/%(n)s.json' % { 'a': self.agent.id, 'n': self.router['id']}, headers=headers) self.assertEqual(204, response.status_int) self.assertFalse(response.body) response = self.app.get( '/v2.0/routers/%s/l3-agents.json' % self.router['id'], headers=headers) self.assertNotIn(self.agent.id, [a['id'] for a in response.json['agents']]) class TestShimControllers(test_functional.PecanFunctionalTest): def setUp(self): fake_ext = pecan_utils.FakeExtension() fake_plugin = pecan_utils.FakePlugin() plugins = {pecan_utils.FakePlugin.PLUGIN_TYPE: fake_plugin} new_extensions = {fake_ext.get_alias(): fake_ext} super(TestShimControllers, self).setUp( service_plugins=plugins, extensions=new_extensions) policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_meh_meh': '', 'get_meh_mehs': '', 'get_fake_subresources': ''}), overwrite=False) self.addCleanup(policy.reset) def test_hyphenated_resource_controller_not_shimmed(self): collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION.replace( '_', '-') resource = pecan_utils.FakeExtension.HYPHENATED_RESOURCE url = '/v2.0/{}/something.json'.format(collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({resource: {'fake': 'something'}}, resp.json) def test_hyphenated_collection_controller_not_shimmed(self): body_collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION uri_collection = body_collection.replace('_', '-') url = '/v2.0/{}.json'.format(uri_collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({body_collection: [{'fake': 'fake'}]}, resp.json) def test_hyphenated_collection_subresource_controller_not_shimmed(self): body_collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION uri_collection = body_collection.replace('_', '-') # there is only one subresource so far sub_resource_collection = ( pecan_utils.FakeExtension.FAKE_SUB_RESOURCE_COLLECTION) temp_id = uuidutils.generate_uuid() url = '/v2.0/{0}/{1}/{2}'.format( uri_collection, temp_id, sub_resource_collection.replace('_', '-')) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({sub_resource_collection: {'foo': temp_id}}, resp.json) class TestMemberActionController(test_functional.PecanFunctionalTest): def setUp(self): fake_ext = pecan_utils.FakeExtension() fake_plugin = pecan_utils.FakePlugin() plugins = {pecan_utils.FakePlugin.PLUGIN_TYPE: fake_plugin} new_extensions = {fake_ext.get_alias(): fake_ext} super(TestMemberActionController, self).setUp( service_plugins=plugins, extensions=new_extensions) hyphen_collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION self.collection = hyphen_collection.replace('_', '-') def test_get_member_action_controller(self): url = '/v2.0/{}/something/boo_meh.json'.format(self.collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({'boo_yah': 'something'}, resp.json) def test_put_member_action_controller(self): url = '/v2.0/{}/something/put_meh.json'.format(self.collection) resp = self.app.put_json(url, params={'it_matters_not': 'ok'}) self.assertEqual(200, resp.status_int) self.assertEqual({'poo_yah': 'something'}, resp.json) def test_get_member_action_does_not_exist(self): url = '/v2.0/{}/something/are_you_still_there.json'.format( self.collection) resp = self.app.get(url, expect_errors=True) self.assertEqual(404, resp.status_int) def test_put_member_action_does_not_exist(self): url = '/v2.0/{}/something/are_you_still_there.json'.format( self.collection) resp = self.app.put_json(url, params={'it_matters_not': 'ok'}, expect_errors=True) self.assertEqual(404, resp.status_int) def test_put_on_get_member_action(self): url = '/v2.0/{}/something/boo_meh.json'.format(self.collection) resp = self.app.put_json(url, params={'it_matters_not': 'ok'}, expect_errors=True) self.assertEqual(405, resp.status_int) def test_get_on_put_member_action(self): url = '/v2.0/{}/something/put_meh.json'.format(self.collection) resp = self.app.get(url, expect_errors=True) self.assertEqual(405, resp.status_int) class TestParentSubresourceController(test_functional.PecanFunctionalTest): def setUp(self): fake_ext = pecan_utils.FakeExtension() fake_plugin = pecan_utils.FakePlugin() plugins = {pecan_utils.FakePlugin.PLUGIN_TYPE: fake_plugin} new_extensions = {fake_ext.get_alias(): fake_ext} super(TestParentSubresourceController, self).setUp( service_plugins=plugins, extensions=new_extensions) policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_fake_duplicate': '', 'get_meh_meh_fake_duplicate': ''}), overwrite=False) self.addCleanup(policy.reset) hyphen_collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION self.collection = hyphen_collection.replace('_', '-') self.fake_collection = (pecan_utils.FakeExtension. FAKE_PARENT_SUBRESOURCE_COLLECTION) def test_get_duplicate_parent_resource(self): url = '/v2.0/{}'.format(self.fake_collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({'fake_duplicates': [{'fake': 'fakeduplicates'}]}, resp.json) def test_get_duplicate_parent_resource_item(self): url = '/v2.0/{}/something'.format(self.fake_collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({'fake_duplicate': {'fake': 'something'}}, resp.json) def test_get_parent_resource_and_duplicate_subresources(self): url = '/v2.0/{0}/something/{1}'.format(self.collection, self.fake_collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({'fake_duplicates': [{'fake': 'something'}]}, resp.json) def test_get_child_resource_policy_check(self): policy.reset() policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_meh_meh_fake_duplicate': ''} ) ) url = '/v2.0/{0}/something/{1}'.format(self.collection, self.fake_collection) resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({'fake_duplicates': [{'fake': 'something'}]}, resp.json) class TestExcludeAttributePolicy(test_functional.PecanFunctionalTest): def setUp(self): super(TestExcludeAttributePolicy, self).setUp() policy.init() self.addCleanup(policy.reset) plugin = directory.get_plugin() ctx = context.get_admin_context() self.network_id = pecan_utils.create_network(ctx, plugin)['id'] mock.patch('neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get').start() def test_get_networks(self): response = self.app.get('/v2.0/networks/%s.json' % self.network_id, headers={'X-Project-Id': 'tenid'}) json_body = jsonutils.loads(response.body) self.assertEqual(response.status_int, 200) self.assertEqual('tenid', json_body['network']['project_id']) self.assertEqual('tenid', json_body['network']['tenant_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/test_functional.py0000644000175000017500000001416500000000000030042 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants from oslo_config import cfg from oslo_middleware import base from oslo_service import wsgi from oslo_utils import uuidutils import testtools import webob.dec import webtest from neutron.api import extensions as exts from neutron import manager from neutron import tests from neutron.tests.functional import base as functional_base from neutron.tests.unit import testlib_api class InjectContext(base.ConfigurableMiddleware): @webob.dec.wsgify def __call__(self, req): user_id = req.headers.get('X_USER_ID', '') # Determine the tenant tenant_id = req.headers.get('X_PROJECT_ID') # Suck out the roles roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')] # Human-friendly names tenant_name = req.headers.get('X_PROJECT_NAME') user_name = req.headers.get('X_USER_NAME') # Create a context with the authentication data ctx = context.Context(user_id, tenant_id, roles=roles, user_name=user_name, tenant_name=tenant_name) req.environ['neutron.context'] = ctx return self.application def create_test_app(): paste_config_loc = os.path.join(os.path.dirname(tests.__file__), 'etc', 'api-paste.ini') paste_config_loc = os.path.abspath(paste_config_loc) cfg.CONF.set_override('api_paste_config', paste_config_loc) loader = wsgi.Loader(cfg.CONF) app = loader.load_app('neutron') app = InjectContext(app) return webtest.TestApp(app) class PecanFunctionalTest(testlib_api.SqlTestCase, functional_base.BaseLoggingTestCase): def setUp(self, service_plugins=None, extensions=None): self.setup_coreplugin('ml2', load_plugins=False) super(PecanFunctionalTest, self).setUp() self.addCleanup(exts.PluginAwareExtensionManager.clear_instance) self.set_config_overrides() manager.init() ext_mgr = exts.PluginAwareExtensionManager.get_instance() if extensions: ext_mgr.extensions = extensions if service_plugins: service_plugins[constants.CORE] = ext_mgr.plugins.get( constants.CORE) ext_mgr.plugins = service_plugins self.app = create_test_app() def set_config_overrides(self): cfg.CONF.set_override('auth_strategy', 'noauth') def do_request(self, url, tenant_id=None, admin=False, expect_errors=False): if admin: if not tenant_id: tenant_id = 'admin' headers = {'X-Tenant-Id': tenant_id, 'X-Roles': 'admin'} else: headers = {'X-Tenant-ID': tenant_id or ''} return self.app.get(url, headers=headers, expect_errors=expect_errors) class TestErrors(PecanFunctionalTest): def test_404(self): response = self.app.get('/assert_called_once', expect_errors=True) self.assertEqual(response.status_int, 404) def test_bad_method(self): response = self.app.patch('/v2.0/ports/44.json', expect_errors=True) self.assertEqual(response.status_int, 405) class TestRequestID(PecanFunctionalTest): def test_request_id(self): response = self.app.get('/v2.0/') self.assertIn('x-openstack-request-id', response.headers) self.assertTrue( response.headers['x-openstack-request-id'].startswith('req-')) id_part = response.headers['x-openstack-request-id'].split('req-')[1] self.assertTrue(uuidutils.is_uuid_like(id_part)) class TestKeystoneAuth(PecanFunctionalTest): def set_config_overrides(self): # default auth strategy is keystone so we pass pass def test_auth_enforced(self): response = self.app.get('/v2.0/', expect_errors=True) self.assertEqual(response.status_int, 401) class TestInvalidAuth(PecanFunctionalTest): def setup_app(self): # disable normal app setup since it will fail pass def test_invalid_auth_strategy(self): cfg.CONF.set_override('auth_strategy', 'badvalue') # NOTE(blogan): the auth.pipeline_factory will throw a KeyError # with a bad value because that value is not the paste config. # This KeyError is translated to a LookupError, which the oslo wsgi # code translates into PasteAppNotFound. with testtools.ExpectedException(wsgi.PasteAppNotFound): create_test_app() class TestExceptionTranslationHook(PecanFunctionalTest): def test_neutron_nonfound_to_webob_exception(self): # this endpoint raises a Neutron notfound exception. make sure it gets # translated into a 404 error with mock.patch( 'neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=n_exc.NotFound() ): response = self.app.get('/v2.0/ports.json', expect_errors=True) self.assertEqual(response.status_int, 404) def test_unexpected_exception(self): with mock.patch( 'neutron.pecan_wsgi.controllers.resource.' 'CollectionsController.get', side_effect=ValueError('secretpassword') ): response = self.app.get('/v2.0/ports.json', expect_errors=True) self.assertNotIn(response.body, 'secretpassword') self.assertEqual(response.status_int, 500) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/test_hooks.py0000644000175000017500000005331600000000000027024 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api import attributes from neutron_lib.callbacks import events from neutron_lib import context from neutron_lib.db import constants as db_const from neutron_lib.plugins import directory from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from neutron.db.quota import driver as quota_driver from neutron import manager from neutron.pecan_wsgi.controllers import resource from neutron import policy from neutron.tests.functional.pecan_wsgi import test_functional class TestOwnershipHook(test_functional.PecanFunctionalTest): def test_network_ownership_check(self): net_response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) network_id = jsonutils.loads(net_response.body)['network']['id'] port_response = self.app.post_json( '/v2.0/ports.json', params={'port': {'network_id': network_id, 'admin_state_up': True}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(201, port_response.status_int) class TestQueryParametersHook(test_functional.PecanFunctionalTest): def test_if_match_on_update(self): net_response = jsonutils.loads(self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}).body) network_id = net_response['network']['id'] response = self.app.put_json('/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'cat'}}, headers={'X-Project-Id': 'tenid', 'If-Match': 'revision_number=0'}, expect_errors=True) # revision plugin not supported by default, so badrequest self.assertEqual(400, response.status_int) class TestQueryParametersHookWithRevision(test_functional.PecanFunctionalTest): def setUp(self): cfg.CONF.set_override('service_plugins', ['revisions']) super(TestQueryParametersHookWithRevision, self).setUp() def test_if_match_on_update(self): net_response = jsonutils.loads(self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}).body) network_id = net_response['network']['id'] rev = net_response['network']['revision_number'] stale = rev - 1 response = self.app.put_json( '/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'cat'}}, headers={'X-Project-Id': 'tenid', 'If-Match': 'revision_number=%s' % stale}, expect_errors=True) self.assertEqual(412, response.status_int) self.app.put_json('/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'cat'}}, headers={'X-Project-Id': 'tenid', 'If-Match': 'revision_number=%s' % rev}) class TestQuotaEnforcementHook(test_functional.PecanFunctionalTest): def test_quota_enforcement_single(self): ctx = context.get_admin_context() quota_driver.DbQuotaDriver.update_quota_limit( ctx, 'tenid', 'network', 1) # There is enough headroom for creating a network response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh'}}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) # But a second request will fail response = self.app.post_json( '/v2.0/networks.json', params={'network': {'name': 'meh-2'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 409) def test_quota_enforcement_bulk_request(self): ctx = context.get_admin_context() quota_driver.DbQuotaDriver.update_quota_limit( ctx, 'tenid', 'network', 3) # There is enough headroom for a bulk request creating 2 networks response = self.app.post_json( '/v2.0/networks.json', params={'networks': [ {'name': 'meh1'}, {'name': 'meh2'}]}, headers={'X-Project-Id': 'tenid'}) self.assertEqual(response.status_int, 201) # But it won't be possible to create 2 more networks... response = self.app.post_json( '/v2.0/networks.json', params={'networks': [ {'name': 'meh3'}, {'name': 'meh4'}]}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(response.status_int, 409) class TestPolicyEnforcementHook(test_functional.PecanFunctionalTest): FAKE_RESOURCE = { 'mehs': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True, 'primary_key': True}, 'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'restricted_attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'is_visible': True} } } def setUp(self): # Create a controller for a fake resource. This will make the tests # independent from the evolution of the API (so if one changes the API # or the default policies there won't be any risk of breaking these # tests, or at least I hope so) super(TestPolicyEnforcementHook, self).setUp() self.mock_plugin = mock.Mock() attributes.RESOURCES.update(self.FAKE_RESOURCE) manager.NeutronManager.set_plugin_for_resource('mehs', self.mock_plugin) fake_controller = resource.CollectionsController('mehs', 'meh') manager.NeutronManager.set_controller_for_resource( 'mehs', fake_controller) # Inject policies for the fake resource policy.init() policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'create_meh': '', 'update_meh': 'rule:admin_only', 'delete_meh': 'rule:admin_only', 'get_meh': 'rule:admin_only or field:mehs:id=xxx', 'get_meh:restricted_attr': 'rule:admin_only'}), overwrite=False) def test_before_on_create_authorized(self): # Mock a return value for an hypothetical create operation self.mock_plugin.create_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} response = self.app.post_json('/v2.0/mehs.json', params={'meh': {'attr': 'meh'}}, headers={'X-Project-Id': 'tenid'}) # We expect this operation to succeed self.assertEqual(201, response.status_int) self.assertEqual(0, self.mock_plugin.get_meh.call_count) self.assertEqual(1, self.mock_plugin.create_meh.call_count) def test_before_on_put_not_authorized(self): # The policy hook here should load the resource, and therefore we must # mock a get response self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'before', and the # plugin method should not be called at all response = self.app.put_json('/v2.0/mehs/xxx.json', params={'meh': {'attr': 'meh'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) self.assertEqual(0, self.mock_plugin.update_meh.call_count) def test_before_on_put_not_found_when_not_authorized_to_get(self): # the user won't even have permission to view this resource # so the error on unauthorized updates should be translated into # a 404 self.mock_plugin.get_meh.return_value = { 'id': 'yyy', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} response = self.app.put_json('/v2.0/mehs/yyy.json', params={'meh': {'attr': 'meh'}}, headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) self.assertEqual(0, self.mock_plugin.update_meh.call_count) def test_before_on_delete_not_authorized(self): # The policy hook here should load the resource, and therefore we must # mock a get response self.mock_plugin.delete_meh.return_value = None self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'before', and the # plugin method should not be called response = self.app.delete_json('/v2.0/mehs/xxx.json', headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) self.assertEqual(0, self.mock_plugin.delete_meh.call_count) def test_after_on_get_not_found(self): # The GET test policy will deny access to anything whose id is not # 'xxx', so the following request should be forbidden and presented # to the user as an HTTPNotFound self.mock_plugin.get_meh.return_value = { 'id': 'yyy', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} # The policy engine should trigger an exception in 'after', and the # plugin method should be called response = self.app.get('/v2.0/mehs/yyy.json', headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual(1, self.mock_plugin.get_meh.call_count) def test_after_on_get_excludes_admin_attribute(self): self.mock_plugin.get_meh.return_value = { 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'} response = self.app.get('/v2.0/mehs/xxx.json', headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) json_response = jsonutils.loads(response.body) self.assertNotIn('restricted_attr', json_response['meh']) def test_after_on_list_excludes_admin_attribute(self): self.mock_plugin.get_mehs.return_value = [{ 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'}] response = self.app.get('/v2.0/mehs', headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) json_response = jsonutils.loads(response.body) self.assertNotIn('restricted_attr', json_response['mehs'][0]) def test_after_inits_policy(self): self.mock_plugin.get_mehs.return_value = [{ 'id': 'xxx', 'attr': 'meh', 'restricted_attr': '', 'tenant_id': 'tenid'}] policy.reset() response = self.app.get('/v2.0/mehs', headers={'X-Project-Id': 'tenid'}) self.assertEqual(200, response.status_int) class TestMetricsNotifierHook(test_functional.PecanFunctionalTest): def setUp(self): patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.' '_notifier') self.mock_notifier = patcher.start().info super(TestMetricsNotifierHook, self).setUp() def test_post_put_delete_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual( [mock.call(mock.ANY, 'network.create.start', payload), mock.call(mock.ANY, 'network.create.end', json_body)], self.mock_notifier.mock_calls) self.mock_notifier.reset_mock() network_id = json_body['network']['id'] payload = {'network': {'name': 'meh-2'}} response = self.app.put_json( '/v2.0/networks/%s.json' % network_id, params=payload, headers=req_headers) self.assertEqual(200, response.status_int) json_body = jsonutils.loads(response.body) # id should be in payload sent to notifier payload['id'] = network_id self.assertEqual( [mock.call(mock.ANY, 'network.update.start', payload), mock.call(mock.ANY, 'network.update.end', json_body)], self.mock_notifier.mock_calls) self.mock_notifier.reset_mock() before_payload = {'network_id': network_id} after_payload = before_payload.copy() after_payload['network'] = directory.get_plugin().get_network( context.get_admin_context(), network_id) response = self.app.delete( '/v2.0/networks/%s.json' % network_id, headers=req_headers) self.assertEqual(204, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.delete.start', before_payload), mock.call(mock.ANY, 'network.delete.end', after_payload)], self.mock_notifier.mock_calls) def test_bulk_create_triggers_notification(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertEqual(2, self.mock_notifier.call_count) self.mock_notifier.assert_has_calls( [mock.call(mock.ANY, 'network.create.start', payload), mock.call(mock.ANY, 'network.create.end', json_body)]) def test_bad_create_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} plugin = directory.get_plugin() with mock.patch.object(plugin, 'create_network', side_effect=ValueError): response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.create.start', mock.ANY)], self.mock_notifier.mock_calls) def test_bad_update_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.reset_mock() plugin = directory.get_plugin() with mock.patch.object(plugin, 'update_network', side_effect=ValueError): response = self.app.put_json( '/v2.0/networks/%s.json' % json_body['network']['id'], params=payload, headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.update.start', mock.ANY)], self.mock_notifier.mock_calls) def test_bad_delete_doesnt_emit_end(self): req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'} payload = {'network': {'name': 'meh'}} response = self.app.post_json( '/v2.0/networks.json', params=payload, headers=req_headers, expect_errors=True) self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.mock_notifier.reset_mock() plugin = directory.get_plugin() with mock.patch.object(plugin, 'delete_network', side_effect=ValueError): response = self.app.delete( '/v2.0/networks/%s.json' % json_body['network']['id'], headers=req_headers, expect_errors=True) self.assertEqual(500, response.status_int) self.assertEqual( [mock.call(mock.ANY, 'network.delete.start', mock.ANY)], self.mock_notifier.mock_calls) class TestCallbackRegistryNotifier(test_functional.PecanFunctionalTest): def setUp(self): super(TestCallbackRegistryNotifier, self).setUp() patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.registry') self.mock_notifier = patcher.start().publish def _create(self, bulk=False): if bulk: body = {'networks': [{'name': 'meh-1'}, {'name': 'meh-2'}]} else: body = {'network': {'name': 'meh-1'}} response = self.app.post_json( '/v2.0/networks.json', params=body, headers={'X-Project-Id': 'tenid'}) return response.json def test_create(self): self._create() self.mock_notifier.assert_called_once_with( 'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY) payload = self.mock_notifier.call_args[1]['payload'] self.assertEqual('network.create.end', payload.method_name) self.assertEqual('create_network', payload.action) self.assertEqual('networks', payload.collection_name) actual = payload.latest_state self.assertEqual('meh-1', actual['network']['name']) def test_create_bulk(self): self._create(bulk=True) self.mock_notifier.assert_called_once_with( 'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY) payload = self.mock_notifier.call_args[1]['payload'] self.assertEqual('network.create.end', payload.method_name) self.assertEqual('create_network', payload.action) self.assertEqual('networks', payload.collection_name) actual = payload.latest_state self.assertEqual(2, len(actual['networks'])) self.assertEqual('meh-1', actual['networks'][0]['name']) self.assertEqual('meh-2', actual['networks'][1]['name']) def test_update(self): network_id = self._create()['network']['id'] self.mock_notifier.reset_mock() self.app.put_json('/v2.0/networks/%s.json' % network_id, params={'network': {'name': 'new-meh'}}, headers={'X-Project-Id': 'tenid'}) self.mock_notifier.assert_called_once_with( 'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY) payload = self.mock_notifier.call_args[1]['payload'] self.assertEqual('network.update.end', payload.method_name) self.assertEqual('update_network', payload.action) self.assertEqual('networks', payload.collection_name) actual_new = payload.latest_state self.assertEqual('new-meh', actual_new['network']['name']) actual_original = payload.states[0] self.assertEqual(network_id, actual_original['id']) def test_delete(self): network_id = self._create()['network']['id'] self.mock_notifier.reset_mock() self.app.delete( '/v2.0/networks/%s.json' % network_id, headers={'X-Project-Id': 'tenid'}) self.mock_notifier.assert_called_once_with( 'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY) payload = self.mock_notifier.call_args[1]['payload'] self.assertEqual('network.delete.end', payload.method_name) self.assertEqual('delete_network', payload.action) self.assertEqual('networks', payload.collection_name) actual = payload.latest_state self.assertEqual(network_id, actual['network']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/pecan_wsgi/utils.py0000644000175000017500000001614100000000000025775 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions as api_extensions from neutron_lib import constants from neutron.api import extensions from neutron.api.v2 import base from neutron.pecan_wsgi import controllers from neutron.pecan_wsgi.controllers import utils as pecan_utils class FakeSingularCollectionExtension(api_extensions.ExtensionDescriptor): COLLECTION = 'topologies' RESOURCE = 'topology' RAM = { COLLECTION: { 'fake': {'is_visible': True} } } @classmethod def get_name(cls): return "" @classmethod def get_alias(cls): return "fake-sc" @classmethod def get_description(cls): return "" @classmethod def get_updated(cls): return "2099-07-23T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return self.RAM else: return {} def get_pecan_controllers(self): ctrllr = controllers.CollectionsController( self.RESOURCE, self.RESOURCE) return [pecan_utils.PecanResourceExtension(self.RESOURCE, ctrllr)] class FakeSingularCollectionPlugin(object): supported_extension_aliases = ['fake-sc'] def get_topology(self, context, id_, fields=None): return {'fake': id_} def get_topologies(self, context, filters=None, fields=None): return [{'fake': 'fake'}] def create_network(context, plugin): return plugin.create_network( context, {'network': {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}}) def create_subnet(context, plugin, network_id): return plugin.create_subnet( context, {'subnet': {'tenant_id': 'tenid', 'network_id': network_id, 'name': 'pecansub', 'ip_version': constants.IP_VERSION_4, 'cidr': '10.20.30.0/24', 'gateway_ip': '10.20.30.1', 'enable_dhcp': True, 'allocation_pools': [ {'start': '10.20.30.2', 'end': '10.20.30.254'}], 'dns_nameservers': [], 'host_routes': []}}) def create_router(context, l3_plugin): return l3_plugin.create_router( context, {'router': {'name': 'pecanrtr', 'tenant_id': 'tenid', 'admin_state_up': True}}) class FakeExtension(api_extensions.ExtensionDescriptor): HYPHENATED_RESOURCE = 'meh_meh' HYPHENATED_COLLECTION = HYPHENATED_RESOURCE + 's' FAKE_PARENT_SUBRESOURCE_COLLECTION = 'fake_duplicates' FAKE_SUB_RESOURCE_COLLECTION = 'fake_subresources' RESOURCE_ATTRIBUTE_MAP = { 'meh_mehs': { 'fake': {'is_visible': True} }, 'fake_duplicates': { 'fake': {'is_visible': True} } } SUB_RESOURCE_ATTRIBUTE_MAP = { 'fake_subresources': { 'parent': { 'collection_name': ( 'meh_mehs'), 'member_name': 'meh_meh'}, 'parameters': {'foo': {'is_visible': True}, 'bar': {'is_visible': True} } }, 'fake_duplicates': { 'parent': { 'collection_name': ( 'meh_mehs'), 'member_name': 'meh_meh'}, 'parameters': {'fake': {'is_visible': True} } } } @classmethod def get_name(cls): return "fake-ext" @classmethod def get_alias(cls): return "fake-ext" @classmethod def get_description(cls): return "" @classmethod def get_updated(cls): return "meh" def get_resources(self): """Returns Ext Resources.""" resources = [] fake_plugin = FakePlugin() for collection_name in self.RESOURCE_ATTRIBUTE_MAP: resource_name = collection_name[:-1] params = self.RESOURCE_ATTRIBUTE_MAP.get(collection_name, {}) member_actions = {'put_meh': 'PUT', 'boo_meh': 'GET'} if collection_name == self.HYPHENATED_COLLECTION: collection_name = collection_name.replace('_', '-') controller = base.create_resource( collection_name, resource_name, fake_plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True, member_actions=member_actions) resource = extensions.ResourceExtension( collection_name, controller, attr_map=params) resources.append(resource) for collection_name in self.SUB_RESOURCE_ATTRIBUTE_MAP: resource_name = collection_name[:-1] parent = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parent') params = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, fake_plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix="", attr_map=params) resources.append(resource) return resources def get_extended_resources(self, version): if version == "2.0": return self.RESOURCE_ATTRIBUTE_MAP else: return {} class FakePlugin(object): PLUGIN_TYPE = 'fake-ext-plugin' supported_extension_aliases = ['fake-ext'] @classmethod def get_plugin_type(cls): return cls.PLUGIN_TYPE def get_meh_meh(self, context, id_, fields=None): return {'fake': id_} def get_meh_mehs(self, context, filters=None, fields=None): return [{'fake': 'fake'}] def get_fake_duplicate(self, context, id_, fields=None): return {'fake': id_} def get_fake_duplicates(self, context, filters=None, fields=None): return [{'fake': 'fakeduplicates'}] def get_meh_meh_fake_duplicates(self, context, id_, fields=None, filters=None): return [{'fake': id_}] def get_meh_meh_fake_subresources(self, context, id_, fields=None, filters=None): return {'foo': id_} def put_meh(self, context, id_, data): return {'poo_yah': id_} def boo_meh(self, context, id_): return {'boo_yah': id_} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/0000755000175000017500000000000000000000000023622 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/__init__.py0000644000175000017500000000000000000000000025721 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4030454 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/0000755000175000017500000000000000000000000024314 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/__init__.py0000644000175000017500000000000000000000000026413 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/0000755000175000017500000000000000000000000025772 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/__init__.py0000644000175000017500000000000000000000000030071 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/0000755000175000017500000000000000000000000027425 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/__init__.py0000644000175000017500000000000000000000000031524 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/0000755000175000017500000000000000000000000030523 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/__init__.py0000644000175000017500000000000000000000000032622 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_ne0000644000175000017500000000300400000000000033617 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron.plugins.ml2.drivers.macvtap.agent import macvtap_neutron_agent from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class MacvtapAgentTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(MacvtapAgentTestCase, self).setUp() self.mgr = macvtap_neutron_agent.MacvtapManager({}) def test_get_all_devices(self): # Veth is simulating the hosts eth device. In this test it is used as # src_dev for the macvtap veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports macvtap = self.useFixture(net_helpers.MacvtapFixture( src_dev=veth1.name, mode='bridge', prefix=constants.MACVTAP_DEVICE_PREFIX)).ip_dev self.assertEqual(set([macvtap.link.address]), self.mgr.get_all_devices()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/0000755000175000017500000000000000000000000026574 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/__init__.py0000644000175000017500000000000000000000000030673 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/0000755000175000017500000000000000000000000031063 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/__init__.py0000644000175000017500000000000000000000000033162 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/0000755000175000017500000000000000000000000032200 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init_0000644000175000017500000000000000000000000033511 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_im0000644000175000017500000001504700000000000033576 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import uuid from ovsdbapp import event as ovsdb_event from ovsdbapp.tests.functional import base from ovsdbapp.tests import utils from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb \ import impl_idl_ovn as impl from neutron.tests.functional import base as n_base from neutron.tests.functional.resources.ovsdb import events class TestSbApi(base.FunctionalTestCase, n_base.BaseLoggingTestCase): schemas = ['OVN_Southbound', 'OVN_Northbound'] def setUp(self): super(TestSbApi, self).setUp() self.data = { 'chassis': [ {'external_ids': {'ovn-bridge-mappings': 'public:br-ex,private:br-0'}}, {'external_ids': {'ovn-bridge-mappings': 'public:br-ex,public2:br-ex'}}, {'external_ids': {'ovn-bridge-mappings': 'public:br-ex'}}, ] } self.api = impl.OvsdbSbOvnIdl(self.connection['OVN_Southbound']) self.nbapi = impl.OvsdbNbOvnIdl(self.connection['OVN_Northbound']) self.load_test_data() self.handler = ovsdb_event.RowEventHandler() self.api.idl.notify = self.handler.notify def load_test_data(self): with self.api.transaction(check_error=True) as txn: for chassis in self.data['chassis']: chassis['name'] = utils.get_rand_device_name('chassis') chassis['hostname'] = '%s.localdomain.com' % chassis['name'] txn.add(self.api.chassis_add( chassis['name'], ['geneve'], chassis['hostname'], hostname=chassis['hostname'], external_ids=chassis['external_ids'])) def test_get_chassis_hostname_and_physnets(self): mapping = self.api.get_chassis_hostname_and_physnets() self.assertLessEqual(len(self.data['chassis']), len(mapping)) self.assertGreaterEqual(set(mapping.keys()), {c['hostname'] for c in self.data['chassis']}) def test_get_all_chassis(self): chassis_list = set(self.api.get_all_chassis()) our_chassis = {c['name'] for c in self.data['chassis']} self.assertLessEqual(our_chassis, chassis_list) def test_get_chassis_data_for_ml2_bind_port(self): host = self.data['chassis'][0]['hostname'] dp, iface, phys = self.api.get_chassis_data_for_ml2_bind_port(host) self.assertEqual('', dp) self.assertEqual('', iface) self.assertItemsEqual(phys, ['private', 'public']) def test_chassis_exists(self): self.assertTrue(self.api.chassis_exists( self.data['chassis'][0]['hostname'])) self.assertFalse(self.api.chassis_exists("nochassishere")) def test_get_chassis_and_physnets(self): mapping = self.api.get_chassis_and_physnets() self.assertLessEqual(len(self.data['chassis']), len(mapping)) self.assertGreaterEqual(set(mapping.keys()), {c['name'] for c in self.data['chassis']}) def _add_switch_port(self, chassis_name, type='localport'): sname, pname = (utils.get_rand_device_name(prefix=p) for p in ('switch', 'port')) chassis = self.api.lookup('Chassis', chassis_name) row_event = events.WaitForCreatePortBindingEvent(pname) self.handler.watch_event(row_event) with self.nbapi.transaction(check_error=True) as txn: switch = txn.add(self.nbapi.ls_add(sname)) port = txn.add(self.nbapi.lsp_add(sname, pname, type=type)) row_event.wait() return chassis, switch.result, port.result, row_event.row def test_get_metadata_port_network(self): chassis, switch, port, binding = self._add_switch_port( self.data['chassis'][0]['name']) result = self.api.get_metadata_port_network(str(binding.datapath.uuid)) self.assertEqual(binding, result) self.assertEqual(binding.datapath.external_ids['logical-switch'], str(switch.uuid)) def test_get_metadata_port_network_missing(self): val = str(uuid.uuid4()) self.assertIsNone(self.api.get_metadata_port_network(val)) def test_set_get_chassis_metadata_networks(self): name = self.data['chassis'][0]['name'] nets = [str(uuid.uuid4()) for _ in range(3)] self.api.set_chassis_metadata_networks(name, nets).execute( check_error=True) self.assertEqual(nets, self.api.get_chassis_metadata_networks(name)) def test_get_network_port_bindings_by_ip(self): chassis, switch, port, binding = self._add_switch_port( self.data['chassis'][0]['name']) mac = 'de:ad:be:ef:4d:ad' ipaddr = '192.0.2.1' mac_ip = '%s %s' % (mac, ipaddr) pb_update_event = events.WaitForUpdatePortBindingEvent( port.name, mac=[mac_ip]) self.handler.watch_event(pb_update_event) self.nbapi.lsp_set_addresses( port.name, [mac_ip]).execute(check_error=True) self.assertTrue(pb_update_event.wait()) self.api.lsp_bind(port.name, chassis.name).execute(check_error=True) result = self.api.get_network_port_bindings_by_ip( str(binding.datapath.uuid), ipaddr) self.assertIn(binding, result) def test_get_ports_on_chassis(self): chassis, switch, port, binding = self._add_switch_port( self.data['chassis'][0]['name']) self.api.lsp_bind(port.name, chassis.name).execute(check_error=True) self.assertEqual([binding], self.api.get_ports_on_chassis(chassis.name)) def test_get_logical_port_chassis_and_datapath(self): chassis, switch, port, binding = self._add_switch_port( self.data['chassis'][0]['name']) self.api.lsp_bind(port.name, chassis.name).execute(check_error=True) self.assertEqual( (chassis.name, str(binding.datapath.uuid)), self.api.get_logical_port_chassis_and_datapath(port.name)) ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ma0000644000175000017500000010274500000000000033570 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from futurist import periodics from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as n_const from neutron_lib import context as n_context from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as ovn_config from neutron.db import ovn_revision_numbers_db as db_rev from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance from neutron.tests.functional import base from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_extraroute class _TestMaintenanceHelper(base.TestOVNFunctionalBase): """A helper class to keep the code more organized.""" def setUp(self): super(_TestMaintenanceHelper, self).setUp() self._ovn_client = self.mech_driver._ovn_client self._l3_ovn_client = self.l3_plugin._ovn_client ext_mgr = test_extraroute.ExtraRouteTestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.maint = maintenance.DBInconsistenciesPeriodics(self._ovn_client) self.context = n_context.get_admin_context() # Always verify inconsistencies for all objects. db_rev.INCONSISTENCIES_OLDER_THAN = -1 def _find_network_row_by_name(self, name): for row in self.nb_api._tables['Logical_Switch'].rows.values(): if (row.external_ids.get( ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY) == name): return row def _create_network(self, name, external=False): data = {'network': {'name': name, 'tenant_id': self._tenant_id, extnet_apidef.EXTERNAL: external}} req = self.new_create_request('networks', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['network'] def _update_network_name(self, net_id, new_name): data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['network'] def _create_port(self, name, net_id, security_groups=None, device_owner=None): data = {'port': {'name': name, 'tenant_id': self._tenant_id, 'network_id': net_id}} if security_groups is not None: data['port']['security_groups'] = security_groups if device_owner is not None: data['port']['device_owner'] = device_owner req = self.new_create_request('ports', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['port'] def _update_port_name(self, port_id, new_name): data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['port'] def _find_port_row_by_name(self, name): for row in self.nb_api._tables['Logical_Switch_Port'].rows.values(): if (row.external_ids.get( ovn_const.OVN_PORT_NAME_EXT_ID_KEY) == name): return row def _set_global_dhcp_opts(self, ip_version, opts): opt_string = ','.join(['{0}:{1}'.format(key, value) for key, value in opts.items()]) if ip_version == 6: ovn_config.cfg.CONF.set_override('ovn_dhcp6_global_options', opt_string, group='ovn') if ip_version == 4: ovn_config.cfg.CONF.set_override('ovn_dhcp4_global_options', opt_string, group='ovn') def _unset_global_dhcp_opts(self, ip_version): if ip_version == 6: ovn_config.cfg.CONF.clear_override('ovn_dhcp6_global_options', group='ovn') if ip_version == 4: ovn_config.cfg.CONF.clear_override('ovn_dhcp4_global_options', group='ovn') def _create_subnet(self, name, net_id, ip_version=4): data = {'subnet': {'name': name, 'tenant_id': self._tenant_id, 'network_id': net_id, 'ip_version': ip_version, 'enable_dhcp': True}} if ip_version == 4: data['subnet']['cidr'] = '10.0.0.0/24' else: data['subnet']['cidr'] = 'eef0::/64' req = self.new_create_request('subnets', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def _update_subnet_enable_dhcp(self, subnet_id, value): data = {'subnet': {'enable_dhcp': value}} req = self.new_update_request('subnets', data, subnet_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def _find_subnet_row_by_id(self, subnet_id): for row in self.nb_api._tables['DHCP_Options'].rows.values(): if (row.external_ids.get('subnet_id') == subnet_id and not row.external_ids.get('port_id')): return row def _create_router(self, name, external_gateway_info=None): data = {'router': {'name': name, 'tenant_id': self._tenant_id}} if external_gateway_info is not None: data['router']['external_gateway_info'] = external_gateway_info req = self.new_create_request('routers', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['router'] def _update_router_name(self, net_id, new_name): data = {'router': {'name': new_name}} req = self.new_update_request('routers', data, net_id, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['router'] def _find_router_row_by_name(self, name): for row in self.nb_api._tables['Logical_Router'].rows.values(): if (row.external_ids.get( ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY) == name): return row def _create_security_group(self): data = {'security_group': {'name': 'sgtest', 'tenant_id': self._tenant_id, 'description': 'SpongeBob Rocks!'}} req = self.new_create_request('security-groups', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['security_group'] def _find_security_group_row_by_id(self, sg_id): if self.nb_api.is_port_groups_supported(): for row in self.nb_api._tables['Port_Group'].rows.values(): if row.name == utils.ovn_port_group_name(sg_id): return row else: for row in self.nb_api._tables['Address_Set'].rows.values(): if (row.external_ids.get( ovn_const.OVN_SG_EXT_ID_KEY) == sg_id): return row def _create_security_group_rule(self, sg_id): data = {'security_group_rule': {'security_group_id': sg_id, 'direction': 'ingress', 'protocol': n_const.PROTO_NAME_TCP, 'ethertype': n_const.IPv4, 'port_range_min': 22, 'port_range_max': 22, 'tenant_id': self._tenant_id}} req = self.new_create_request('security-group-rules', data, self.fmt) res = req.get_response(self.api) return self.deserialize(self.fmt, res)['security_group_rule'] def _find_security_group_rule_row_by_id(self, sgr_id): for row in self.nb_api._tables['ACL'].rows.values(): if (row.external_ids.get( ovn_const.OVN_SG_RULE_EXT_ID_KEY) == sgr_id): return row def _process_router_interface(self, action, router_id, subnet_id): req = self.new_action_request( 'routers', {'subnet_id': subnet_id}, router_id, '%s_router_interface' % action) res = req.get_response(self.api) return self.deserialize(self.fmt, res) def _add_router_interface(self, router_id, subnet_id): return self._process_router_interface('add', router_id, subnet_id) def _remove_router_interface(self, router_id, subnet_id): return self._process_router_interface('remove', router_id, subnet_id) def _find_router_port_row_by_port_id(self, port_id): for row in self.nb_api._tables['Logical_Router_Port'].rows.values(): if row.name == utils.ovn_lrouter_port_name(port_id): return row class TestMaintenance(_TestMaintenanceHelper): def test_network(self): net_name = 'networktest' with mock.patch.object(self._ovn_client, 'create_network'): neutron_obj = self._create_network(net_name) # Assert the network doesn't exist in OVN self.assertIsNone(self._find_network_row_by_name(net_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the network was now created ovn_obj = self._find_network_row_by_name(net_name) self.assertIsNotNone(ovn_obj) self.assertEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Update new_obj_name = 'networktest_updated' with mock.patch.object(self._ovn_client, 'update_network'): new_neutron_obj = self._update_network_name(neutron_obj['id'], new_obj_name) # Assert the revision numbers are out-of-sync ovn_obj = self._find_network_row_by_name(net_name) self.assertNotEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the old name doesn't exist anymore in the OVNDB self.assertIsNone(self._find_network_row_by_name(net_name)) # Assert the network is now in sync ovn_obj = self._find_network_row_by_name(new_obj_name) self.assertEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Delete with mock.patch.object(self._ovn_client, 'delete_network'): self._delete('networks', new_neutron_obj['id']) # Assert the network still exists in OVNDB self.assertIsNotNone(self._find_network_row_by_name(new_obj_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the network is now deleted from OVNDB self.assertIsNone(self._find_network_row_by_name(new_obj_name)) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, new_neutron_obj['id'])) def test_port(self): obj_name = 'porttest' neutron_net = self._create_network('network1') with mock.patch.object(self._ovn_client, 'create_port'): neutron_obj = self._create_port(obj_name, neutron_net['id']) # Assert the port doesn't exist in OVN self.assertIsNone(self._find_port_row_by_name(obj_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the port was now created ovn_obj = self._find_port_row_by_name(obj_name) self.assertIsNotNone(ovn_obj) self.assertEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Update new_obj_name = 'porttest_updated' with mock.patch.object(self._ovn_client, 'update_port'): new_neutron_obj = self._update_port_name(neutron_obj['id'], new_obj_name) # Assert the revision numbers are out-of-sync ovn_obj = self._find_port_row_by_name(obj_name) self.assertNotEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the old name doesn't exist anymore in the OVNDB self.assertIsNone(self._find_port_row_by_name(obj_name)) # Assert the port is now in sync. Note that for ports we are # fetching it again from the Neutron database prior to comparison # because of the monitor code that can update the ports again upon # changes to it. ovn_obj = self._find_port_row_by_name(new_obj_name) new_neutron_obj = self._ovn_client._plugin.get_port( self.context, neutron_obj['id']) self.assertEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Delete with mock.patch.object(self._ovn_client, 'delete_port'): self._delete('ports', new_neutron_obj['id']) # Assert the port still exists in OVNDB self.assertIsNotNone(self._find_port_row_by_name(new_obj_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the port is now deleted from OVNDB self.assertIsNone(self._find_port_row_by_name(new_obj_name)) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, neutron_obj['id'])) def test_subnet_global_dhcp4_opts(self): obj_name = 'globaltestsubnet' options = {'ntp_server': '1.2.3.4'} neutron_net = self._create_network('network1') # Create a subnet without global options neutron_sub = self._create_subnet(obj_name, neutron_net['id']) # Assert that the option is not set ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertIsNone(ovn_obj.options.get('ntp_server', None)) # Set some global DHCP Options self._set_global_dhcp_opts(ip_version=4, opts=options) # Run the maintenance task to add the new options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was added ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertEqual( ovn_obj.options.get('ntp_server', None), '1.2.3.4') # Change the global option new_options = {'ntp_server': '4.3.2.1'} self._set_global_dhcp_opts(ip_version=4, opts=new_options) # Run the maintenance task to update the options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was changed ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertEqual( ovn_obj.options.get('ntp_server', None), '4.3.2.1') # Change the global option to null new_options = {'ntp_server': ''} self._set_global_dhcp_opts(ip_version=4, opts=new_options) # Run the maintenance task to update the options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was removed ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertIsNone(ovn_obj.options.get('ntp_server', None)) def test_subnet_global_dhcp6_opts(self): obj_name = 'globaltestsubnet' options = {'ntp_server': '1.2.3.4'} neutron_net = self._create_network('network1') # Create a subnet without global options neutron_sub = self._create_subnet(obj_name, neutron_net['id'], 6) # Assert that the option is not set ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertIsNone(ovn_obj.options.get('ntp_server', None)) # Set some global DHCP Options self._set_global_dhcp_opts(ip_version=6, opts=options) # Run the maintenance task to add the new options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was added ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertEqual( ovn_obj.options.get('ntp_server', None), '1.2.3.4') # Change the global option new_options = {'ntp_server': '4.3.2.1'} self._set_global_dhcp_opts(ip_version=6, opts=new_options) # Run the maintenance task to update the options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was changed ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertEqual( ovn_obj.options.get('ntp_server', None), '4.3.2.1') # Change the global option to null new_options = {'ntp_server': ''} self._set_global_dhcp_opts(ip_version=6, opts=new_options) # Run the maintenance task to update the options self.assertRaises(periodics.NeverAgain, self.maint.check_global_dhcp_opts) # Assert that the option was removed ovn_obj = self._find_subnet_row_by_id(neutron_sub['id']) self.assertIsNone(ovn_obj.options.get('ntp_server', None)) def test_subnet(self): obj_name = 'subnettest' neutron_net = self._create_network('network1') with mock.patch.object(self._ovn_client, 'create_subnet'): neutron_obj = self._create_subnet(obj_name, neutron_net['id']) # Assert the subnet doesn't exist in OVN self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the subnet was now created ovn_obj = self._find_subnet_row_by_id(neutron_obj['id']) self.assertIsNotNone(ovn_obj) self.assertEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Update with mock.patch.object(self._ovn_client, 'update_subnet'): neutron_obj = self._update_subnet_enable_dhcp( neutron_obj['id'], False) # Assert the revision numbers are out-of-sync ovn_obj = self._find_subnet_row_by_id(neutron_obj['id']) self.assertNotEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the old name doesn't exist anymore in the OVNDB. When # the subnet's enable_dhcp's is set to False, OVN will remove the # DHCP_Options entry related to that subnet. self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id'])) # Re-enable the DHCP for the subnet and check if the maintenance # thread will re-create it in OVN with mock.patch.object(self._ovn_client, 'update_subnet'): neutron_obj = self._update_subnet_enable_dhcp( neutron_obj['id'], True) # Assert the DHCP_Options still doesn't exist in OVNDB self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the subnet is now in sync ovn_obj = self._find_subnet_row_by_id(neutron_obj['id']) self.assertEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Delete with mock.patch.object(self._ovn_client, 'delete_subnet'): self._delete('subnets', neutron_obj['id']) # Assert the subnet still exists in OVNDB self.assertIsNotNone(self._find_subnet_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the subnet is now deleted from OVNDB self.assertIsNone(self._find_subnet_row_by_id(neutron_obj['id'])) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, neutron_obj['id'])) def test_router(self): obj_name = 'routertest' with mock.patch.object(self._l3_ovn_client, 'create_router'): neutron_obj = self._create_router(obj_name) # Assert the router doesn't exist in OVN self.assertIsNone(self._find_router_row_by_name(obj_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the router was now created ovn_obj = self._find_router_row_by_name(obj_name) self.assertIsNotNone(ovn_obj) self.assertEqual( neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Update new_obj_name = 'routertest_updated' with mock.patch.object(self._l3_ovn_client, 'update_router'): new_neutron_obj = self._update_router_name(neutron_obj['id'], new_obj_name) # Assert the revision numbers are out-of-sync ovn_obj = self._find_router_row_by_name(obj_name) self.assertNotEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the old name doesn't exist anymore in the OVNDB self.assertIsNone(self._find_router_row_by_name(obj_name)) # Assert the router is now in sync ovn_obj = self._find_router_row_by_name(new_obj_name) self.assertEqual( new_neutron_obj['revision_number'], int(ovn_obj.external_ids[ovn_const.OVN_REV_NUM_EXT_ID_KEY])) # > Delete with mock.patch.object(self._l3_ovn_client, 'delete_router'): self._delete('routers', new_neutron_obj['id']) # Assert the router still exists in OVNDB self.assertIsNotNone(self._find_router_row_by_name(new_obj_name)) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the router is now deleted from OVNDB self.assertIsNone(self._find_router_row_by_name(new_obj_name)) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, new_neutron_obj['id'])) def test_security_group(self): with mock.patch.object(self._ovn_client, 'create_security_group'): neutron_obj = self._create_security_group() # Assert the sg doesn't exist in OVN self.assertIsNone( self._find_security_group_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the sg was now created. We don't save the revision number # in the Security Group because OVN doesn't support updating it, # all we care about is whether it exists or not. self.assertIsNotNone( self._find_security_group_row_by_id(neutron_obj['id'])) # > Delete with mock.patch.object(self._ovn_client, 'delete_security_group'): self._delete('security-groups', neutron_obj['id']) # Assert the sg still exists in OVNDB self.assertIsNotNone( self._find_security_group_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the sg is now deleted from OVNDB self.assertIsNone( self._find_security_group_row_by_id(neutron_obj['id'])) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, neutron_obj['id'])) def test_security_group_rule(self): neutron_sg = self._create_security_group() neutron_net = self._create_network('network1') self._create_port('portsgtest', neutron_net['id'], security_groups=[neutron_sg['id']]) with mock.patch.object(self._ovn_client, 'create_security_group_rule'): neutron_obj = self._create_security_group_rule(neutron_sg['id']) # Assert the sg rule doesn't exist in OVN self.assertIsNone( self._find_security_group_rule_row_by_id(neutron_obj['id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the sg rule was now created. We don't save the revision number # in the Security Group because OVN doesn't support updating it, # all we care about is whether it exists or not. self.assertIsNotNone( self._find_security_group_rule_row_by_id(neutron_obj['id'])) # > Delete # FIXME(lucasagomes): Maintenance thread fixing deleted # security group rules is currently broken due to: # https://bugs.launchpad.net/networking-ovn/+bug/1756123 def test_router_port(self): neutron_net = self._create_network('networktest', external=True) neutron_subnet = self._create_subnet('subnettest', neutron_net['id']) neutron_router = self._create_router('routertest') with mock.patch.object(self._l3_ovn_client, 'create_router_port'): with mock.patch('neutron.db.ovn_revision_numbers_db.' 'bump_revision'): neutron_obj = self._add_router_interface(neutron_router['id'], neutron_subnet['id']) # Assert the router port doesn't exist in OVN self.assertIsNone( self._find_router_port_row_by_port_id(neutron_obj['port_id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the router port was now created self.assertIsNotNone( self._find_router_port_row_by_port_id(neutron_obj['port_id'])) # > Delete with mock.patch.object(self._l3_ovn_client, 'delete_router_port'): self._remove_router_interface(neutron_router['id'], neutron_subnet['id']) # Assert the router port still exists in OVNDB self.assertIsNotNone( self._find_router_port_row_by_port_id(neutron_obj['port_id'])) # Call the maintenance thread to fix the problem self.maint.check_for_inconsistencies() # Assert the router port is now deleted from OVNDB self.assertIsNone( self._find_router_port_row_by_port_id(neutron_obj['port_id'])) # Assert the revision number no longer exists self.assertIsNone(db_rev.get_revision_row( self.context, neutron_obj['port_id'])) def test_check_metadata_ports(self): ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', True, group='ovn') neutron_net = self._create_network('network1') metadata_port = self._ovn_client._find_metadata_port( self.context, neutron_net['id']) # Assert the metadata port exists self.assertIsNotNone(metadata_port) # Delete the metadata port self._delete('ports', metadata_port['id']) metadata_port = self._ovn_client._find_metadata_port( self.context, neutron_net['id']) # Assert the metadata port is gone self.assertIsNone(metadata_port) # Call the maintenance thread to fix the problem, it will raise # NeverAgain so that the job only runs once at startup self.assertRaises(periodics.NeverAgain, self.maint.check_metadata_ports) metadata_port = self._ovn_client._find_metadata_port( self.context, neutron_net['id']) # Assert the metadata port was re-created self.assertIsNotNone(metadata_port) def test_check_metadata_ports_not_enabled(self): ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', False, group='ovn') with mock.patch.object(self._ovn_client, 'create_metadata_port') as mock_create_port: self.assertRaises(periodics.NeverAgain, self.maint.check_metadata_ports) # Assert create_metadata_port() wasn't called since metadata # is not enabled self.assertFalse(mock_create_port.called) def test_check_for_port_security_unknown_address(self): neutron_net = self._create_network('network1') neutron_port = self._create_port('port1', neutron_net['id']) # Let's force disabling port security for the LSP self.nb_api.lsp_set_port_security(neutron_port['id'], []).execute( check_error=True) ovn_port = self.nb_api.db_find( 'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute( check_error=True)[0] # Assert that port security is now disabled but the 'unknown' # is not set in the addresses column self.assertFalse(ovn_port['port_security']) self.assertNotIn('unknown', ovn_port['addresses']) # Call the maintenance task to fix the problem. Note that # NeverAgain is raised so it only runs once at start up self.assertRaises(periodics.NeverAgain, self.maint.check_for_port_security_unknown_address) ovn_port = self.nb_api.db_find( 'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute( check_error=True)[0] # Assert that 'unknown' was set in the addresses column for # the port self.assertFalse(ovn_port['port_security']) self.assertIn('unknown', ovn_port['addresses']) # Now the other way around, let's set port_security in the OVN # table while the 'unknown' address is set in the addresses column self.nb_api.lsp_set_port_security( neutron_port['id'], ovn_port['addresses']).execute( check_error=True) ovn_port = self.nb_api.db_find( 'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute( check_error=True)[0] self.assertTrue(ovn_port['port_security']) self.assertIn('unknown', ovn_port['addresses']) # Call the maintenance task to fix the problem. Note that # NeverAgain is raised so it only runs once at start up self.assertRaises(periodics.NeverAgain, self.maint.check_for_port_security_unknown_address) ovn_port = self.nb_api.db_find( 'Logical_Switch_Port', ('name', '=', neutron_port['id'])).execute( check_error=True)[0] # Assert that 'unknown' was removed from the addresses column # for the port self.assertTrue(ovn_port['port_security']) self.assertNotIn('unknown', ovn_port['addresses']) def test_check_for_igmp_snooping_enabled(self): cfg.CONF.set_override('igmp_snooping_enable', False, group='OVS') net = self._create_network('net') ls = self.nb_api.db_find('Logical_Switch', ('name', '=', utils.ovn_name(net['id']))).execute( check_error=True)[0] self.assertEqual('false', ls['other_config'][ovn_const.MCAST_SNOOP]) self.assertEqual( 'false', ls['other_config'][ovn_const.MCAST_FLOOD_UNREGISTERED]) # Change the value of the configuration cfg.CONF.set_override('igmp_snooping_enable', True, group='OVS') # Call the maintenance task and check that the value has been # updated in the Logical Switch self.assertRaises(periodics.NeverAgain, self.maint.check_for_igmp_snoop_support) ls = self.nb_api.db_find('Logical_Switch', ('name', '=', utils.ovn_name(net['id']))).execute( check_error=True)[0] self.assertEqual('true', ls['other_config'][ovn_const.MCAST_SNOOP]) self.assertEqual( 'true', ls['other_config'][ovn_const.MCAST_FLOOD_UNREGISTERED]) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_resources.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ov0000644000175000017500000015444600000000000033624 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.utils import net as n_net from oslo_config import cfg from ovsdbapp.backend.ovs_idl import idlutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as ovn_config from neutron.tests.functional import base class TestNBDbResources(base.TestOVNFunctionalBase): _extension_drivers = ['dns'] def setUp(self): super(TestNBDbResources, self).setUp() self.orig_get_random_mac = n_net.get_random_mac cfg.CONF.set_override('quota_subnet', -1, group='QUOTAS') ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', False, group='ovn') ovn_config.cfg.CONF.set_override('dns_domain', 'ovn.test') # FIXME(lucasagomes): Map the revision numbers properly instead # of stripping them out. Currently, tests like test_dhcp_options() # are quite complex making it difficult to map the exact the revision # number that the DHCP Option will be at assertion time, we need to # refactor it a little to make it easier for mapping these updates. def _strip_revision_number(self, ext_ids): ext_ids.pop(ovn_const.OVN_REV_NUM_EXT_ID_KEY, None) return ext_ids def _verify_dhcp_option_rows(self, expected_dhcp_options_rows): expected_dhcp_options_rows = list(expected_dhcp_options_rows.values()) observed_dhcp_options_rows = [] for row in self.nb_api.tables['DHCP_Options'].rows.values(): ext_ids = self._strip_revision_number(row.external_ids) observed_dhcp_options_rows.append({ 'cidr': row.cidr, 'external_ids': ext_ids, 'options': row.options}) self.assertItemsEqual(expected_dhcp_options_rows, observed_dhcp_options_rows) def _verify_dhcp_option_row_for_port(self, port_id, expected_lsp_dhcpv4_options, expected_lsp_dhcpv6_options=None): lsp = idlutils.row_by_value(self.nb_api.idl, 'Logical_Switch_Port', 'name', port_id, None) if lsp.dhcpv4_options: ext_ids = self._strip_revision_number( lsp.dhcpv4_options[0].external_ids) observed_lsp_dhcpv4_options = { 'cidr': lsp.dhcpv4_options[0].cidr, 'external_ids': ext_ids, 'options': lsp.dhcpv4_options[0].options} else: observed_lsp_dhcpv4_options = {} if lsp.dhcpv6_options: ext_ids = self._strip_revision_number( lsp.dhcpv6_options[0].external_ids) observed_lsp_dhcpv6_options = { 'cidr': lsp.dhcpv6_options[0].cidr, 'external_ids': ext_ids, 'options': lsp.dhcpv6_options[0].options} else: observed_lsp_dhcpv6_options = {} if expected_lsp_dhcpv6_options is None: expected_lsp_dhcpv6_options = {} self.assertEqual(expected_lsp_dhcpv4_options, observed_lsp_dhcpv4_options) self.assertEqual(expected_lsp_dhcpv6_options, observed_lsp_dhcpv6_options) def _get_subnet_dhcp_mac(self, subnet): mac_key = 'server_id' if subnet['ip_version'] == 6 else 'server_mac' dhcp_options = self.mech_driver._nb_ovn.get_subnet_dhcp_options( subnet['id'])['subnet'] return dhcp_options.get('options', {}).get( mac_key) if dhcp_options else None def test_dhcp_options(self): """Test for DHCP_Options table rows When a new subnet is created, a new row has to be created in the DHCP_Options table for this subnet with the dhcp options stored in the DHCP_Options.options column. When ports are created for this subnet (with IPv4 address set and DHCP enabled in the subnet), the Logical_Switch_Port.dhcpv4_options column should refer to the appropriate row of DHCP_Options. In cases where a port has extra DHCPv4 options defined, a new row in the DHCP_Options table should be created for this port and Logical_Switch_Port.dhcpv4_options colimn should refer to this row. In order to map the DHCP_Options row to the subnet (and to a port), subnet_id is stored in DHCP_Options.external_ids column. For DHCP_Options row which belongs to a port, port_id is also stored in the DHCP_Options.external_ids along with the subnet_id. """ n1 = self._make_network(self.fmt, 'n1', True) created_subnets = {} expected_dhcp_options_rows = {} dhcp_mac = {} for cidr in ['10.0.0.0/24', '20.0.0.0/24', '30.0.0.0/24', '40.0.0.0/24', 'aef0::/64', 'bef0::/64']: ip_version = netaddr.IPNetwork(cidr).ip.version res = self._create_subnet(self.fmt, n1['network']['id'], cidr, ip_version=ip_version) subnet = self.deserialize(self.fmt, res)['subnet'] created_subnets[cidr] = subnet dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) if ip_version == 4: options = {'server_id': cidr.replace('0/24', '1'), 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': str(n1['network']['mtu']), 'router': subnet['gateway_ip']} else: options = {'server_id': dhcp_mac[subnet['id']]} expected_dhcp_options_rows[subnet['id']] = { 'cidr': cidr, 'external_ids': {'subnet_id': subnet['id']}, 'options': options} for (cidr, enable_dhcp, gateway_ip) in [ ('50.0.0.0/24', False, '50.0.0.1'), ('60.0.0.0/24', True, None), ('cef0::/64', False, 'cef0::1'), ('def0::/64', True, None)]: ip_version = netaddr.IPNetwork(cidr).ip.version res = self._create_subnet(self.fmt, n1['network']['id'], cidr, ip_version=ip_version, enable_dhcp=enable_dhcp, gateway_ip=gateway_ip) subnet = self.deserialize(self.fmt, res)['subnet'] created_subnets[cidr] = subnet dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) if enable_dhcp: if ip_version == 4: options = {} else: options = {'server_id': dhcp_mac[subnet['id']]} expected_dhcp_options_rows[subnet['id']] = { 'cidr': cidr, 'external_ids': {'subnet_id': subnet['id']}, 'options': options} # create a subnet with dns nameservers and host routes n2 = self._make_network(self.fmt, 'n2', True) res = self._create_subnet( self.fmt, n2['network']['id'], '10.0.0.0/24', dns_nameservers=['7.7.7.7', '8.8.8.8'], host_routes=[{'destination': '30.0.0.0/24', 'nexthop': '10.0.0.4'}, {'destination': '40.0.0.0/24', 'nexthop': '10.0.0.8'}]) subnet = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) static_routes = ('{30.0.0.0/24,10.0.0.4, 40.0.0.0/24,' '10.0.0.8, 0.0.0.0/0,10.0.0.1}') expected_dhcp_options_rows[subnet['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'mtu': str(n2['network']['mtu']), 'router': subnet['gateway_ip'], 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{7.7.7.7, 8.8.8.8}', 'classless_static_route': static_routes}} # create an IPv6 subnet with dns nameservers res = self._create_subnet( self.fmt, n2['network']['id'], 'ae10::/64', ip_version=6, dns_nameservers=['be10::7', 'be10::8']) subnet = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) expected_dhcp_options_rows[subnet['id']] = { 'cidr': 'ae10::/64', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'server_id': dhcp_mac[subnet['id']], 'dns_server': '{be10::7, be10::8}'}} # Verify that DHCP_Options rows are created for these subnets or not self._verify_dhcp_option_rows(expected_dhcp_options_rows) for cidr in ['20.0.0.0/24', 'aef0::/64']: subnet = created_subnets[cidr] # Disable dhcp in subnet and verify DHCP_Options data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet['id']) req.get_response(self.api) options = expected_dhcp_options_rows.pop(subnet['id']) self._verify_dhcp_option_rows(expected_dhcp_options_rows) # Re-enable dhcp in subnet and verify DHCP_Options n_net.get_random_mac = mock.Mock() n_net.get_random_mac.return_value = dhcp_mac[subnet['id']] data = {'subnet': {'enable_dhcp': True}} req = self.new_update_request('subnets', data, subnet['id']) req.get_response(self.api) expected_dhcp_options_rows[subnet['id']] = options self._verify_dhcp_option_rows(expected_dhcp_options_rows) n_net.get_random_mac = self.orig_get_random_mac # Create a port and verify if Logical_Switch_Port.dhcpv4_options # is properly set or not subnet = created_subnets['40.0.0.0/24'] subnet_v6 = created_subnets['aef0::/64'] p = self._make_port( self.fmt, n1['network']['id'], fixed_ips=[ {'subnet_id': subnet['id']}, {'subnet_id': subnet_v6['id']}]) self._verify_dhcp_option_row_for_port( p['port']['id'], expected_dhcp_options_rows[subnet['id']], expected_dhcp_options_rows[subnet_v6['id']]) self._verify_dhcp_option_rows(expected_dhcp_options_rows) # create a port with dhcp disabled subnet subnet = created_subnets['50.0.0.0/24'] p = self._make_port(self.fmt, n1['network']['id'], fixed_ips=[{'subnet_id': subnet['id']}]) self._verify_dhcp_option_row_for_port(p['port']['id'], {}) self._verify_dhcp_option_rows(expected_dhcp_options_rows) # Delete the first subnet created subnet = created_subnets['10.0.0.0/24'] req = self.new_delete_request('subnets', subnet['id']) req.get_response(self.api) # Verify that DHCP_Options rows are deleted or not del expected_dhcp_options_rows[subnet['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) def test_port_dhcp_options(self): dhcp_mac = {} n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') subnet = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) res = self._create_subnet(self.fmt, n1['network']['id'], 'aef0::/64', ip_version=6) subnet_v6 = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet_v6['id']] = self._get_subnet_dhcp_mac(subnet_v6) expected_dhcp_options_rows = { subnet['id']: { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': str(n1['network']['mtu']), 'router': subnet['gateway_ip']}}, subnet_v6['id']: { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': subnet_v6['id']}, 'options': {'server_id': dhcp_mac[subnet_v6['id']]}}} expected_dhcp_v4_options_rows = { subnet['id']: expected_dhcp_options_rows[subnet['id']]} expected_dhcp_v6_options_rows = { subnet_v6['id']: expected_dhcp_options_rows[subnet_v6['id']]} data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': subnet['id']}], 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'mtu', 'opt_value': '1100'}, {'ip_version': 4, 'opt_name': 'ntp-server', 'opt_value': '8.8.8.8'}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p1 = self.deserialize(self.fmt, port_res) expected_dhcp_options_rows['v4-' + p1['port']['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id'], 'port_id': p1['port']['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': '1100', 'router': subnet['gateway_ip'], 'ntp_server': '8.8.8.8'}} expected_dhcp_v4_options_rows['v4-' + p1['port']['id']] = \ expected_dhcp_options_rows['v4-' + p1['port']['id']] data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': subnet['id']}], 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'ip-forward-enable', 'opt_value': '1'}, {'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': '10.0.0.100'}, {'ip_version': 4, 'opt_name': 'dns-server', 'opt_value': '20.20.20.20'}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p2 = self.deserialize(self.fmt, port_res) expected_dhcp_options_rows['v4-' + p2['port']['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id'], 'port_id': p2['port']['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'mtu': str(n1['network']['mtu']), 'router': subnet['gateway_ip'], 'ip_forward_enable': '1', 'tftp_server': '10.0.0.100', 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '20.20.20.20'}} expected_dhcp_v4_options_rows['v4-' + p2['port']['id']] = \ expected_dhcp_options_rows['v4-' + p2['port']['id']] data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': subnet_v6['id']}], 'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'dns-server', 'opt_value': 'aef0::1'}, {'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'foo-domain'}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p3 = self.deserialize(self.fmt, port_res) expected_dhcp_options_rows['v6-' + p3['port']['id']] = { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': subnet_v6['id'], 'port_id': p3['port']['id']}, 'options': {'server_id': dhcp_mac[subnet_v6['id']], 'dns_server': 'aef0::1', 'domain_search': 'foo-domain'}} expected_dhcp_v6_options_rows['v6-' + p3['port']['id']] = \ expected_dhcp_options_rows['v6-' + p3['port']['id']] data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': subnet['id']}, {'subnet_id': subnet_v6['id']}], 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': '100.0.0.100'}, {'ip_version': 6, 'opt_name': 'dns-server', 'opt_value': 'aef0::100'}, {'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'bar-domain'}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p4 = self.deserialize(self.fmt, port_res) expected_dhcp_options_rows['v6-' + p4['port']['id']] = { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': subnet_v6['id'], 'port_id': p4['port']['id']}, 'options': {'server_id': dhcp_mac[subnet_v6['id']], 'dns_server': 'aef0::100', 'domain_search': 'bar-domain'}} expected_dhcp_options_rows['v4-' + p4['port']['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id'], 'port_id': p4['port']['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': str(n1['network']['mtu']), 'router': subnet['gateway_ip'], 'tftp_server': '100.0.0.100'}} expected_dhcp_v4_options_rows['v4-' + p4['port']['id']] = \ expected_dhcp_options_rows['v4-' + p4['port']['id']] expected_dhcp_v6_options_rows['v6-' + p4['port']['id']] = \ expected_dhcp_options_rows['v6-' + p4['port']['id']] # test port without extra_dhcp_opts but using subnet DHCP options data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': subnet['id']}, {'subnet_id': subnet_v6['id']}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p5 = self.deserialize(self.fmt, port_res) self._verify_dhcp_option_rows(expected_dhcp_options_rows) self._verify_dhcp_option_row_for_port( p1['port']['id'], expected_dhcp_options_rows['v4-' + p1['port']['id']]) self._verify_dhcp_option_row_for_port( p2['port']['id'], expected_dhcp_options_rows['v4-' + p2['port']['id']]) self._verify_dhcp_option_row_for_port( p3['port']['id'], {}, expected_lsp_dhcpv6_options=expected_dhcp_options_rows[ 'v6-' + p3['port']['id']]) self._verify_dhcp_option_row_for_port( p4['port']['id'], expected_dhcp_options_rows['v4-' + p4['port']['id']], expected_lsp_dhcpv6_options=expected_dhcp_options_rows[ 'v6-' + p4['port']['id']]) self._verify_dhcp_option_row_for_port( p5['port']['id'], expected_dhcp_options_rows[subnet['id']], expected_lsp_dhcpv6_options=expected_dhcp_options_rows[ subnet_v6['id']]) # Update the subnet with dns_server. It should get propagated # to the DHCP options of the p1. Note that it should not get # propagate to DHCP options of port p2 because, it has overridden # dns-server in the Extra DHCP options. data = {'subnet': {'dns_nameservers': ['7.7.7.7', '8.8.8.8']}} req = self.new_update_request('subnets', data, subnet['id']) req.get_response(self.api) for i in [subnet['id'], 'v4-' + p1['port']['id'], 'v4-' + p4['port']['id']]: expected_dhcp_options_rows[i]['options']['dns_server'] = ( '{7.7.7.7, 8.8.8.8}') self._verify_dhcp_option_rows(expected_dhcp_options_rows) # Update the port p2 by removing dns-server and tfp-server in the # extra DHCP options. dns-server option from the subnet DHCP options # should be updated in the p2 DHCP options data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'ip-forward-enable', 'opt_value': '0'}, {'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': None}, {'ip_version': 4, 'opt_name': 'dns-server', 'opt_value': None}]}} port_req = self.new_update_request('ports', data, p2['port']['id']) port_req.get_response(self.api) p2_expected = expected_dhcp_options_rows['v4-' + p2['port']['id']] p2_expected['options']['dns_server'] = '{7.7.7.7, 8.8.8.8}' p2_expected['options']['ip_forward_enable'] = '0' del p2_expected['options']['tftp_server'] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # Test subnet DHCP disabling and enabling for (subnet_id, expect_subnet_rows_disabled, expect_port_row_disabled ) in [ (subnet['id'], expected_dhcp_v6_options_rows, [(p4, {}, expected_dhcp_options_rows['v6-' + p4['port']['id']]), (p5, {}, expected_dhcp_options_rows[subnet_v6['id']])]), (subnet_v6['id'], expected_dhcp_v4_options_rows, [(p4, expected_dhcp_options_rows['v4-' + p4['port']['id']], {}), (p5, expected_dhcp_options_rows[subnet['id']], {})])]: # Disable subnet's DHCP and verify DHCP_Options, data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet_id) req.get_response(self.api) # DHCP_Options belonging to the subnet or it's ports should be all # removed, current DHCP_Options should be equal to # expect_subnet_rows_disabled self._verify_dhcp_option_rows(expect_subnet_rows_disabled) # Verify that the corresponding port DHCP options were cleared # and the others were not affected. for p in expect_port_row_disabled: self._verify_dhcp_option_row_for_port( p[0]['port']['id'], p[1], p[2]) # Re-enable dhcpv4 in subnet and verify DHCP_Options n_net.get_random_mac = mock.Mock() n_net.get_random_mac.return_value = dhcp_mac[subnet_id] data = {'subnet': {'enable_dhcp': True}} req = self.new_update_request('subnets', data, subnet_id) req.get_response(self.api) self._verify_dhcp_option_rows(expected_dhcp_options_rows) self._verify_dhcp_option_row_for_port( p4['port']['id'], expected_dhcp_options_rows['v4-' + p4['port']['id']], expected_dhcp_options_rows['v6-' + p4['port']['id']]) self._verify_dhcp_option_row_for_port( p5['port']['id'], expected_dhcp_options_rows[subnet['id']], expected_lsp_dhcpv6_options=expected_dhcp_options_rows[ subnet_v6['id']]) n_net.get_random_mac = self.orig_get_random_mac # Disable dhcp in p2 data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'dhcp_disabled', 'opt_value': 'true'}]}} port_req = self.new_update_request('ports', data, p2['port']['id']) port_req.get_response(self.api) del expected_dhcp_options_rows['v4-' + p2['port']['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # delete port p1. port_req = self.new_delete_request('ports', p1['port']['id']) port_req.get_response(self.api) del expected_dhcp_options_rows['v4-' + p1['port']['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # delete the IPv6 extra DHCP options for p4 data = {'port': {'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'dns-server', 'opt_value': None}, {'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': None}]}} port_req = self.new_update_request('ports', data, p4['port']['id']) port_req.get_response(self.api) del expected_dhcp_options_rows['v6-' + p4['port']['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) def test_port_dhcp_opts_add_and_remove_extra_dhcp_opts(self): """Orphaned DHCP_Options row. In this test case a port is created with extra DHCP options. Since it has extra DHCP options a new row in the DHCP_Options is created for this port. Next the port is updated to delete the extra DHCP options. After the update, the Logical_Switch_Port.dhcpv4_options for this port should refer to the subnet DHCP_Options and the DHCP_Options row created for this port earlier should be deleted. """ dhcp_mac = {} n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') subnet = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet['id']] = self._get_subnet_dhcp_mac(subnet) res = self._create_subnet(self.fmt, n1['network']['id'], 'aef0::/64', ip_version=6) subnet_v6 = self.deserialize(self.fmt, res)['subnet'] dhcp_mac[subnet_v6['id']] = self._get_subnet_dhcp_mac(subnet_v6) expected_dhcp_options_rows = { subnet['id']: { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': str(n1['network']['mtu']), 'router': subnet['gateway_ip']}}, subnet_v6['id']: { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': subnet_v6['id']}, 'options': {'server_id': dhcp_mac[subnet_v6['id']]}}} data = { 'port': {'network_id': n1['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': 'compute:None', 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'mtu', 'opt_value': '1100'}, {'ip_version': 4, 'opt_name': 'ntp-server', 'opt_value': '8.8.8.8'}, {'ip_version': 6, 'opt_name': 'dns-server', 'opt_value': 'aef0::100'}]}} port_req = self.new_create_request('ports', data, self.fmt) port_res = port_req.get_response(self.api) p1 = self.deserialize(self.fmt, port_res)['port'] expected_dhcp_options_rows['v4-' + p1['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id'], 'port_id': p1['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': '1100', 'router': subnet['gateway_ip'], 'ntp_server': '8.8.8.8'}} expected_dhcp_options_rows['v6-' + p1['id']] = { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': subnet_v6['id'], 'port_id': p1['id']}, 'options': {'server_id': dhcp_mac[subnet_v6['id']], 'dns_server': 'aef0::100'}} self._verify_dhcp_option_rows(expected_dhcp_options_rows) # The Logical_Switch_Port.dhcp(v4/v6)_options should refer to the # port DHCP options. self._verify_dhcp_option_row_for_port( p1['id'], expected_dhcp_options_rows['v4-' + p1['id']], expected_dhcp_options_rows['v6-' + p1['id']]) # Now update the port to delete the extra DHCP options data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'mtu', 'opt_value': None}, {'ip_version': 4, 'opt_name': 'ntp-server', 'opt_value': None}]}} port_req = self.new_update_request('ports', data, p1['id']) port_req.get_response(self.api) # DHCP_Options row created for the port earlier should have been # deleted. del expected_dhcp_options_rows['v4-' + p1['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # The Logical_Switch_Port.dhcpv4_options for this port should refer to # the subnet DHCP options. self._verify_dhcp_option_row_for_port( p1['id'], expected_dhcp_options_rows[subnet['id']], expected_dhcp_options_rows['v6-' + p1['id']]) # update the port again with extra DHCP options. data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'mtu', 'opt_value': '1200'}, {'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': '8.8.8.8'}]}} port_req = self.new_update_request('ports', data, p1['id']) port_req.get_response(self.api) expected_dhcp_options_rows['v4-' + p1['id']] = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id'], 'port_id': p1['id']}, 'options': {'server_id': '10.0.0.1', 'server_mac': dhcp_mac[subnet['id']], 'lease_time': str(12 * 60 * 60), 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'dns_server': '{10.10.10.10}', 'mtu': '1200', 'router': subnet['gateway_ip'], 'tftp_server': '8.8.8.8'}} self._verify_dhcp_option_rows(expected_dhcp_options_rows) self._verify_dhcp_option_row_for_port( p1['id'], expected_dhcp_options_rows['v4-' + p1['id']], expected_dhcp_options_rows['v6-' + p1['id']]) # Disable DHCPv4 for this port. The DHCP_Options row created for this # port should be get deleted. data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'dhcp_disabled', 'opt_value': 'true'}]}} port_req = self.new_update_request('ports', data, p1['id']) port_req.get_response(self.api) del expected_dhcp_options_rows['v4-' + p1['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # The Logical_Switch_Port.dhcpv4_options for this port should be # empty. self._verify_dhcp_option_row_for_port( p1['id'], {}, expected_dhcp_options_rows['v6-' + p1['id']]) # Disable DHCPv6 for this port. The DHCP_Options row created for this # port should be get deleted. data = {'port': {'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'dhcp_disabled', 'opt_value': 'true'}]}} port_req = self.new_update_request('ports', data, p1['id']) port_req.get_response(self.api) del expected_dhcp_options_rows['v6-' + p1['id']] self._verify_dhcp_option_rows(expected_dhcp_options_rows) # The Logical_Switch_Port.dhcpv4_options for this port should be # empty. self._verify_dhcp_option_row_for_port(p1['id'], {}) def test_dhcp_options_domain_name(self): """Test for DHCP_Options domain name option This test needs dns extension_driver to be enabled. Test test_dhcp_options* are too complex so this case has been moved to separated one. """ cidr = '10.0.0.0/24' data = { 'network': {'name': 'foo', 'dns_domain': 'foo.com.', 'tenant_id': self._tenant_id}} req = self.new_create_request('networks', data, self.fmt) res = req.get_response(self.api) net = self.deserialize(self.fmt, res)['network'] res = self._create_subnet(self.fmt, net['id'], cidr) subnet = self.deserialize(self.fmt, res)['subnet'] dhcp_mac = self._get_subnet_dhcp_mac(subnet) p = self._make_port( self.fmt, net['id'], fixed_ips=[ {'subnet_id': subnet['id']}]) # Ensure that 'foo' taken from network # is not configured as domain_name. # Parameter taken from configuration # should be set instead. expected_dhcp_options_rows = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'dns_server': '{10.10.10.10}', 'domain_name': '"%s"' % cfg.CONF.dns_domain, 'lease_time': '43200', 'mtu': '1450', 'router': '10.0.0.1', 'server_id': '10.0.0.1', 'server_mac': dhcp_mac}} self._verify_dhcp_option_row_for_port( p['port']['id'], expected_dhcp_options_rows) def test_dhcp_options_domain_name_not_set(self): ovn_config.cfg.CONF.set_override('dns_domain', '') n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') subnet = self.deserialize(self.fmt, res)['subnet'] p = self._make_port(self.fmt, n1['network']['id'], fixed_ips=[{'subnet_id': subnet['id']}]) dhcp_mac = self._get_subnet_dhcp_mac(subnet) # Make sure that domain_name is not included. expected_dhcp_options_rows = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': subnet['id']}, 'options': {'dns_server': '{10.10.10.10}', 'lease_time': '43200', 'mtu': '1450', 'router': '10.0.0.1', 'server_id': '10.0.0.1', 'server_mac': dhcp_mac}} self._verify_dhcp_option_row_for_port( p['port']['id'], expected_dhcp_options_rows) class TestPortSecurity(base.TestOVNFunctionalBase): def _get_port_related_acls(self, port_id): ovn_port = self.nb_api.lookup('Logical_Switch_Port', port_id) port_acls = [] for pg in self.nb_api.tables['Port_Group'].rows.values(): for p in pg.ports: if ovn_port.uuid != p.uuid: continue for a in pg.acls: port_acls.append({'match': a.match, 'action': a.action, 'priority': a.priority, 'direction': a.direction}) return port_acls def _get_port_related_acls_port_group_not_supported(self, port_id): port_acls = [] for acl in self.nb_api.tables['ACL'].rows.values(): ext_ids = getattr(acl, 'external_ids', {}) if ext_ids.get('neutron:lport') == port_id: port_acls.append({'match': acl.match, 'action': acl.action, 'priority': acl.priority, 'direction': acl.direction}) return port_acls def _verify_port_acls(self, port_id, expected_acls): if self.nb_api.is_port_groups_supported(): port_acls = self._get_port_related_acls(port_id) else: port_acls = self._get_port_related_acls_port_group_not_supported( port_id) self.assertItemsEqual(expected_acls, port_acls) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'impl_idl_ovn.OvsdbNbOvnIdl.is_port_groups_supported', lambda *args: False) def test_port_security_port_group_not_supported(self): n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') subnet = self.deserialize(self.fmt, res)['subnet'] p = self._make_port(self.fmt, n1['network']['id'], fixed_ips=[{'subnet_id': subnet['id']}]) port_id = p['port']['id'] sg_id = p['port']['security_groups'][0].replace('-', '_') expected_acls_with_sg_ps_enabled = [ {'match': 'inport == "' + str(port_id) + '" && ip', 'action': 'drop', 'priority': 1001, 'direction': 'from-lport'}, {'match': 'outport == "' + str(port_id) + '" && ip', 'action': 'drop', 'priority': 1001, 'direction': 'to-lport'}, {'match': 'inport == "' + str(port_id) + '" && ip4 && ip4.dst == ' '{255.255.255.255, 10.0.0.0/24} && udp && udp.src == 68 ' '&& udp.dst == 67', 'action': 'allow', 'priority': 1002, 'direction': 'from-lport'}, {'match': 'inport == "' + str(port_id) + '" && ip6', 'action': 'allow-related', 'priority': 1002, 'direction': 'from-lport'}, {'match': 'inport == "' + str(port_id) + '" && ip4', 'action': 'allow-related', 'priority': 1002, 'direction': 'from-lport'}, {'match': 'outport == "' + str(port_id) + '" && ip4 && ' 'ip4.src == $as_ip4_' + str(sg_id), 'action': 'allow-related', 'priority': 1002, 'direction': 'to-lport'}, {'match': 'outport == "' + str(port_id) + '" && ip6 && ' 'ip6.src == $as_ip6_' + str(sg_id), 'action': 'allow-related', 'priority': 1002, 'direction': 'to-lport'}, ] self._verify_port_acls(port_id, expected_acls_with_sg_ps_enabled) # clear the security groups. data = {'port': {'security_groups': []}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) # No security groups and port security enabled - > ACLs should be # added to drop the packets. expected_acls_with_no_sg_ps_enabled = [ {'match': 'inport == "' + str(port_id) + '" && ip', 'action': 'drop', 'priority': 1001, 'direction': 'from-lport'}, {'match': 'outport == "' + str(port_id) + '" && ip', 'action': 'drop', 'priority': 1001, 'direction': 'to-lport'}, ] self._verify_port_acls(port_id, expected_acls_with_no_sg_ps_enabled) # Disable port security data = {'port': {'port_security_enabled': False}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) # No security groups and port security disabled - > No ACLs should be # added (allowing all the traffic). self._verify_port_acls(port_id, []) # Enable port security again with no security groups - > ACLs should # be added back to drop the packets. data = {'port': {'port_security_enabled': True}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) self._verify_port_acls(port_id, expected_acls_with_no_sg_ps_enabled) # Set security groups back data = {'port': {'security_groups': p['port']['security_groups']}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) self._verify_port_acls(port_id, expected_acls_with_sg_ps_enabled) def test_port_security_port_group(self): if not self.nb_api.is_port_groups_supported(): self.skipTest('Port groups is not supported') n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') subnet = self.deserialize(self.fmt, res)['subnet'] p = self._make_port(self.fmt, n1['network']['id'], fixed_ips=[{'subnet_id': subnet['id']}]) port_id = p['port']['id'] sg_id = p['port']['security_groups'][0].replace('-', '_') pg_name = utils.ovn_port_group_name(sg_id) expected_acls_with_sg_ps_enabled = [ {'match': 'inport == @neutron_pg_drop && ip', 'action': 'drop', 'priority': 1001, 'direction': 'from-lport'}, {'match': 'outport == @neutron_pg_drop && ip', 'action': 'drop', 'priority': 1001, 'direction': 'to-lport'}, {'match': 'inport == @' + pg_name + ' && ip6', 'action': 'allow-related', 'priority': 1002, 'direction': 'from-lport'}, {'match': 'inport == @' + pg_name + ' && ip4', 'action': 'allow-related', 'priority': 1002, 'direction': 'from-lport'}, {'match': 'outport == @' + pg_name + ' && ip4 && ' 'ip4.src == $' + pg_name + '_ip4', 'action': 'allow-related', 'priority': 1002, 'direction': 'to-lport'}, {'match': 'outport == @' + pg_name + ' && ip6 && ' 'ip6.src == $' + pg_name + '_ip6', 'action': 'allow-related', 'priority': 1002, 'direction': 'to-lport'}, ] self._verify_port_acls(port_id, expected_acls_with_sg_ps_enabled) # clear the security groups. data = {'port': {'security_groups': []}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) # No security groups and port security enabled - > ACLs should be # added to drop the packets. expected_acls_with_no_sg_ps_enabled = [ {'match': 'inport == @neutron_pg_drop && ip', 'action': 'drop', 'priority': 1001, 'direction': 'from-lport'}, {'match': 'outport == @neutron_pg_drop && ip', 'action': 'drop', 'priority': 1001, 'direction': 'to-lport'}, ] self._verify_port_acls(port_id, expected_acls_with_no_sg_ps_enabled) # Disable port security data = {'port': {'port_security_enabled': False}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) # No security groups and port security disabled - > No ACLs should be # added (allowing all the traffic). self._verify_port_acls(port_id, []) # Enable port security again with no security groups - > ACLs should # be added back to drop the packets. data = {'port': {'port_security_enabled': True}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) self._verify_port_acls(port_id, expected_acls_with_no_sg_ps_enabled) # Set security groups back data = {'port': {'security_groups': p['port']['security_groups']}} port_req = self.new_update_request('ports', data, p['port']['id']) port_req.get_response(self.api) self._verify_port_acls(port_id, expected_acls_with_sg_ps_enabled) class TestDNSRecords(base.TestOVNFunctionalBase): _extension_drivers = ['port_security', 'dns'] def _validate_dns_records(self, expected_dns_records): observed_dns_records = [] for dns_row in self.nb_api.tables['DNS'].rows.values(): observed_dns_records.append( {'external_ids': dns_row.external_ids, 'records': dns_row.records}) self.assertItemsEqual(expected_dns_records, observed_dns_records) def _validate_ls_dns_records(self, lswitch_name, expected_dns_records): ls = idlutils.row_by_value(self.nb_api.idl, 'Logical_Switch', 'name', lswitch_name) observed_dns_records = [] for dns_row in ls.dns_records: observed_dns_records.append( {'external_ids': dns_row.external_ids, 'records': dns_row.records}) self.assertItemsEqual(expected_dns_records, observed_dns_records) def setUp(self): ovn_config.cfg.CONF.set_override('dns_domain', 'ovn.test') super(TestDNSRecords, self).setUp() def test_dns_records(self): expected_dns_records = [] nets = [] for n, cidr in [('n1', '10.0.0.0/24'), ('n2', '20.0.0.0/24')]: net_kwargs = {} if n == 'n1': net_kwargs = {dns_apidef.DNSDOMAIN: 'net-' + n + '.'} net_kwargs['arg_list'] = (dns_apidef.DNSDOMAIN,) res = self._create_network(self.fmt, n, True, **net_kwargs) net = self.deserialize(self.fmt, res) nets.append(net) res = self._create_subnet(self.fmt, net['network']['id'], cidr) self.deserialize(self.fmt, res) # At this point no dns records should be created n1_lswitch_name = utils.ovn_name(nets[0]['network']['id']) n2_lswitch_name = utils.ovn_name(nets[1]['network']['id']) self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, expected_dns_records) self._validate_ls_dns_records(n2_lswitch_name, expected_dns_records) port_kwargs = {'arg_list': (dns_apidef.DNSNAME,), dns_apidef.DNSNAME: 'n1p1'} res = self._create_port(self.fmt, nets[0]['network']['id'], device_id='n1p1', **port_kwargs) n1p1 = self.deserialize(self.fmt, res) port_ips = " ".join([f['ip_address'] for f in n1p1['port']['fixed_ips']]) expected_dns_records = [ {'external_ids': {'ls_name': n1_lswitch_name}, 'records': {'n1p1': port_ips, 'n1p1.ovn.test': port_ips, 'n1p1.net-n1': port_ips}} ] self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) self._validate_ls_dns_records(n2_lswitch_name, []) # Create another port, but don't set dns_name. dns record should not # be updated. res = self._create_port(self.fmt, nets[1]['network']['id'], device_id='n2p1') n2p1 = self.deserialize(self.fmt, res) self._validate_dns_records(expected_dns_records) # Update port p2 with dns_name. The dns record should be updated. body = {'dns_name': 'n2p1'} data = {'port': body} req = self.new_update_request('ports', data, n2p1['port']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_int) port_ips = " ".join([f['ip_address'] for f in n2p1['port']['fixed_ips']]) expected_dns_records.append( {'external_ids': {'ls_name': n2_lswitch_name}, 'records': {'n2p1': port_ips, 'n2p1.ovn.test': port_ips}}) self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) self._validate_ls_dns_records(n2_lswitch_name, [expected_dns_records[1]]) # Create n1p2 port_kwargs = {'arg_list': (dns_apidef.DNSNAME,), dns_apidef.DNSNAME: 'n1p2'} res = self._create_port(self.fmt, nets[0]['network']['id'], device_id='n1p1', **port_kwargs) n1p2 = self.deserialize(self.fmt, res) port_ips = " ".join([f['ip_address'] for f in n1p2['port']['fixed_ips']]) expected_dns_records[0]['records']['n1p2'] = port_ips expected_dns_records[0]['records']['n1p2.ovn.test'] = port_ips expected_dns_records[0]['records']['n1p2.net-n1'] = port_ips self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) self._validate_ls_dns_records(n2_lswitch_name, [expected_dns_records[1]]) # Remove device_id from n1p1 body = {'device_id': ''} data = {'port': body} req = self.new_update_request('ports', data, n1p1['port']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_int) expected_dns_records[0]['records'].pop('n1p1') expected_dns_records[0]['records'].pop('n1p1.ovn.test') expected_dns_records[0]['records'].pop('n1p1.net-n1') self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) self._validate_ls_dns_records(n2_lswitch_name, [expected_dns_records[1]]) # Delete n2p1 self._delete('ports', n2p1['port']['id']) expected_dns_records[1]['records'] = {} self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) self._validate_ls_dns_records(n2_lswitch_name, [expected_dns_records[1]]) # Delete n2 self._delete('networks', nets[1]['network']['id']) del expected_dns_records[1] self._validate_dns_records(expected_dns_records) self._validate_ls_dns_records(n1_lswitch_name, [expected_dns_records[0]]) # Delete n1p1 and n1p2 and n1 self._delete('ports', n1p1['port']['id']) self._delete('ports', n1p2['port']['id']) self._delete('networks', nets[0]['network']['id']) self._validate_dns_records([]) class TestNBDbResourcesOverTcp(TestNBDbResources): def get_ovsdb_server_protocol(self): return 'tcp' class TestNBDbResourcesOverSsl(TestNBDbResources): def get_ovsdb_server_protocol(self): return 'ssl' ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ov0000644000175000017500000023347000000000000033617 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common.ovn import acl as acl_utils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as ovn_config from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.services.segments import db as segments_db from neutron.tests.functional import base from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_extraroute from neutron.tests.unit.extensions import test_securitygroup from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import l3 from neutron_lib.api.definitions import port_security as ps from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory from oslo_utils import uuidutils from ovsdbapp.backend.ovs_idl import idlutils class TestOvnNbSync(base.TestOVNFunctionalBase): _extension_drivers = ['port_security', 'dns'] def setUp(self): ovn_config.cfg.CONF.set_override('dns_domain', 'ovn.test') super(TestOvnNbSync, self).setUp(maintenance_worker=True) ext_mgr = test_extraroute.ExtraRouteTestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) sg_mgr = test_securitygroup.SecurityGroupTestExtensionManager() self._sg_api = test_extensions.setup_extensions_middleware(sg_mgr) self.create_lswitches = [] self.create_lswitch_ports = [] self.create_lrouters = [] self.create_lrouter_ports = [] self.create_lrouter_routes = [] self.create_lrouter_nats = [] self.update_lrouter_ports = [] self.create_acls = [] self.delete_lswitches = [] self.delete_lswitch_ports = [] self.delete_lrouters = [] self.delete_lrouter_ports = [] self.delete_lrouter_routes = [] self.delete_lrouter_nats = [] self.delete_acls = [] self.create_address_sets = [] self.delete_address_sets = [] self.update_address_sets = [] self.create_port_groups = [] self.delete_port_groups = [] self.expected_dhcp_options_rows = [] self.reset_lport_dhcpv4_options = [] self.reset_lport_dhcpv6_options = [] self.stale_lport_dhcpv4_options = [] self.stale_lport_dhcpv6_options = [] self.orphaned_lport_dhcp_options = [] self.lport_dhcpv4_disabled = {} self.lport_dhcpv6_disabled = {} self.missed_dhcp_options = [] self.dirty_dhcp_options = [] self.lport_dhcp_ignored = [] self.match_old_mac_dhcp_subnets = [] self.expected_dns_records = [] self.expected_ports_with_unknown_addr = [] self.ctx = context.get_admin_context() ovn_config.cfg.CONF.set_override('ovn_metadata_enabled', True, group='ovn') ovn_config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') def _api_for_resource(self, resource): if resource in ['security-groups']: return self._sg_api else: return super(TestOvnNbSync, self)._api_for_resource(resource) def _create_resources(self, restart_ovsdb_processes=False): net_kwargs = {dns_apidef.DNSDOMAIN: 'ovn.test.'} net_kwargs['arg_list'] = (dns_apidef.DNSDOMAIN,) res = self._create_network(self.fmt, 'n1', True, **net_kwargs) n1 = self.deserialize(self.fmt, res) self.expected_dns_records = [ {'external_ids': {'ls_name': utils.ovn_name(n1['network']['id'])}, 'records': {}} ] res = self._create_subnet(self.fmt, n1['network']['id'], '10.0.0.0/24') n1_s1 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n1['network']['id'], '2001:dba::/64', ip_version=6, enable_dhcp=True) n1_s2 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n1['network']['id'], '2001:dbb::/64', ip_version=6, ipv6_address_mode='slaac', ipv6_ra_mode='slaac') n1_s3 = self.deserialize(self.fmt, res) self.expected_dhcp_options_rows.append({ 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': n1_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'classless_static_route': '{169.254.169.254/32,10.0.0.2, 0.0.0.0/0,10.0.0.1}', 'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'dns_server': '{10.10.10.10}', 'lease_time': str(12 * 60 * 60), 'mtu': str(n1['network']['mtu']), 'domain_name': '"ovn.test"', 'router': n1_s1['subnet']['gateway_ip']}}) self.expected_dhcp_options_rows.append({ 'cidr': '2001:dba::/64', 'external_ids': {'subnet_id': n1_s2['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'server_id': '01:02:03:04:05:06'}}) n1_s1_dhcp_options_uuid = ( self.mech_driver._nb_ovn.get_subnet_dhcp_options( n1_s1['subnet']['id'])['subnet']['uuid']) n1_s2_dhcpv6_options_uuid = ( self.mech_driver._nb_ovn.get_subnet_dhcp_options( n1_s2['subnet']['id'])['subnet']['uuid']) update_port_ids_v4 = [] update_port_ids_v6 = [] n1_port_dict = {} for p in ['p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7']: if p in ['p1', 'p5']: port_kwargs = { 'arg_list': (dns_apidef.DNSNAME, ps.PORTSECURITY), dns_apidef.DNSNAME: 'n1-' + p, ps.PORTSECURITY: 'False', 'device_id': 'n1-' + p} else: port_kwargs = {} res = self._create_port(self.fmt, n1['network']['id'], name='n1-' + p, device_owner='compute:None', **port_kwargs) port = self.deserialize(self.fmt, res) n1_port_dict[p] = port['port']['id'] lport_name = port['port']['id'] lswitch_name = 'neutron-' + n1['network']['id'] if p in ['p1', 'p5']: port_ips = " ".join([f['ip_address'] for f in port['port']['fixed_ips']]) hname = 'n1-' + p self.expected_dns_records[0]['records'][hname] = port_ips hname = 'n1-' + p + '.ovn.test' self.expected_dns_records[0]['records'][hname] = port_ips self.expected_ports_with_unknown_addr.append(lport_name) if p == 'p1': fake_subnet = {'cidr': '11.11.11.11/24'} dhcp_acls = acl_utils.add_acl_dhcp(port['port'], fake_subnet) for dhcp_acl in dhcp_acls: self.create_acls.append(dhcp_acl) elif p == 'p2': self.delete_lswitch_ports.append((lport_name, lswitch_name)) update_port_ids_v4.append(port['port']['id']) update_port_ids_v6.append(port['port']['id']) self.expected_dhcp_options_rows.append({ 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': n1_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0', 'port_id': port['port']['id']}, 'options': { 'classless_static_route': '{169.254.169.254/32,10.0.0.2, 0.0.0.0/0,10.0.0.1}', 'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(n1['network']['mtu']), 'router': n1_s1['subnet']['gateway_ip'], 'tftp_server': '20.0.0.20', 'domain_name': '"ovn.test"', 'dns_server': '8.8.8.8'}}) self.expected_dhcp_options_rows.append({ 'cidr': '2001:dba::/64', 'external_ids': {'subnet_id': n1_s2['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0', 'port_id': port['port']['id']}, 'options': {'server_id': '01:02:03:04:05:06', 'domain_search': 'foo-domain'}}) self.dirty_dhcp_options.append({ 'subnet_id': n1_s1['subnet']['id'], 'port_id': lport_name}) self.dirty_dhcp_options.append({ 'subnet_id': n1_s2['subnet']['id'], 'port_id': lport_name}) elif p == 'p3': self.delete_acls.append((lport_name, lswitch_name)) self.reset_lport_dhcpv4_options.append(lport_name) self.lport_dhcpv6_disabled.update({ lport_name: n1_s2_dhcpv6_options_uuid}) data = {'port': { 'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'dhcp_disabled', 'opt_value': 'True'}]}} port_req = self.new_update_request('ports', data, lport_name) port_req.get_response(self.api) elif p == 'p4': self.lport_dhcpv4_disabled.update({ lport_name: n1_s1_dhcp_options_uuid}) data = {'port': { 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'dhcp_disabled', 'opt_value': 'True'}]}} port_req = self.new_update_request('ports', data, lport_name) port_req.get_response(self.api) self.reset_lport_dhcpv6_options.append(lport_name) elif p == 'p5': self.stale_lport_dhcpv4_options.append({ 'subnet_id': n1_s1['subnet']['id'], 'port_id': port['port']['id'], 'cidr': '10.0.0.0/24', 'options': {'server_id': '10.0.0.254', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(3 * 60 * 60), 'mtu': str(n1['network']['mtu'] / 2), 'router': '10.0.0.254', 'tftp_server': '20.0.0.234', 'domain_name': '"ovn.test"', 'dns_server': '8.8.8.8'}, 'external_ids': {'subnet_id': n1_s1['subnet']['id'], 'port_id': port['port']['id']}}) elif p == 'p6': self.delete_lswitch_ports.append((lport_name, lswitch_name)) elif p == 'p7': update_port_ids_v4.append(port['port']['id']) update_port_ids_v6.append(port['port']['id']) self.expected_dhcp_options_rows.append({ 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': n1_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0', 'port_id': port['port']['id']}, 'options': { 'classless_static_route': '{169.254.169.254/32,10.0.0.2, 0.0.0.0/0,10.0.0.1}', 'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(n1['network']['mtu']), 'router': n1_s1['subnet']['gateway_ip'], 'tftp_server': '20.0.0.20', 'domain_name': '"ovn.test"', 'dns_server': '8.8.8.8'}}) self.expected_dhcp_options_rows.append({ 'cidr': '2001:dba::/64', 'external_ids': {'subnet_id': n1_s2['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0', 'port_id': port['port']['id']}, 'options': {'server_id': '01:02:03:04:05:06', 'domain_search': 'foo-domain'}}) self.reset_lport_dhcpv4_options.append(lport_name) self.reset_lport_dhcpv6_options.append(lport_name) self.dirty_dhcp_options.append({'subnet_id': n1_s1['subnet']['id']}) self.dirty_dhcp_options.append({'subnet_id': n1_s2['subnet']['id']}) res = self._create_network(self.fmt, 'n2', True, **net_kwargs) n2 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n2['network']['id'], '20.0.0.0/24') n2_s1 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n2['network']['id'], '2001:dbd::/64', ip_version=6) n2_s2 = self.deserialize(self.fmt, res) self.expected_dhcp_options_rows.append({ 'cidr': '20.0.0.0/24', 'external_ids': {'subnet_id': n2_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'classless_static_route': '{169.254.169.254/32,20.0.0.2, 0.0.0.0/0,20.0.0.1}', 'server_id': '20.0.0.1', 'server_mac': '01:02:03:04:05:06', 'dns_server': '{10.10.10.10}', 'lease_time': str(12 * 60 * 60), 'mtu': str(n2['network']['mtu']), 'domain_name': '"ovn.test"', 'router': n2_s1['subnet']['gateway_ip']}}) self.expected_dhcp_options_rows.append({ 'cidr': '2001:dbd::/64', 'external_ids': {'subnet_id': n2_s2['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'server_id': '01:02:03:04:05:06'}}) for p in ['p1', 'p2']: port = self._make_port(self.fmt, n2['network']['id'], name='n2-' + p, device_owner='compute:None') if p == 'p1': update_port_ids_v4.append(port['port']['id']) self.expected_dhcp_options_rows.append({ 'cidr': '20.0.0.0/24', 'external_ids': {'subnet_id': n2_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0', 'port_id': port['port']['id']}, 'options': { 'classless_static_route': '{169.254.169.254/32,20.0.0.2, 0.0.0.0/0,20.0.0.1}', 'server_id': '20.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(n1['network']['mtu']), 'router': n2_s1['subnet']['gateway_ip'], 'tftp_server': '20.0.0.20', 'domain_name': '"ovn.test"', 'dns_server': '8.8.8.8'}}) self.missed_dhcp_options.extend([ opts['uuid'] for opts in self.mech_driver._nb_ovn.get_subnets_dhcp_options( [n2_s1['subnet']['id'], n2_s2['subnet']['id']])]) for port_id in update_port_ids_v4: data = {'port': {'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': '20.0.0.20'}, {'ip_version': 4, 'opt_name': 'dns-server', 'opt_value': '8.8.8.8'}]}} port_req = self.new_update_request('ports', data, port_id) port_req.get_response(self.api) for port_id in update_port_ids_v6: data = {'port': {'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'foo-domain'}]}} port_req = self.new_update_request('ports', data, port_id) port_req.get_response(self.api) # External network and subnet e1 = self._make_network(self.fmt, 'e1', True, arg_list=('router:external', 'provider:network_type', 'provider:physical_network'), **{'router:external': True, 'provider:network_type': 'flat', 'provider:physical_network': 'public'}) self.assertTrue(e1['network']['router:external']) self.assertEqual('flat', e1['network']['provider:network_type']) self.assertEqual('public', e1['network']['provider:physical_network']) res = self._create_subnet(self.fmt, e1['network']['id'], '100.0.0.0/24', gateway_ip='100.0.0.254', allocation_pools=[{'start': '100.0.0.2', 'end': '100.0.0.253'}], enable_dhcp=False) e1_s1 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, e1['network']['id'], '2001:db8::/64', gateway_ip='fd05:59e4:ef16::1', ip_version=constants.IP_VERSION_6, enable_dhcp=False) e1_s2 = self.deserialize(self.fmt, res) self.create_lswitches.append('neutron-' + uuidutils.generate_uuid()) self.create_lswitch_ports.append(('neutron-' + uuidutils.generate_uuid(), 'neutron-' + n1['network']['id'])) self.create_lswitch_ports.append(('neutron-' + uuidutils.generate_uuid(), 'neutron-' + n1['network']['id'])) self.delete_lswitches.append('neutron-' + n2['network']['id']) self.delete_lswitch_ports.append( (utils.ovn_provnet_port_name(e1['network']['id']), utils.ovn_name(e1['network']['id']))) r1 = self.l3_plugin.create_router( self.context, {'router': { 'name': 'r1', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': e1_s1['subnet']['id']}, {'ip_address': '2001:db8::23a', 'subnet_id': e1_s2['subnet']['id']}]}}}) self.l3_plugin.add_router_interface( self.context, r1['id'], {'subnet_id': n1_s1['subnet']['id']}) r1_p2 = self.l3_plugin.add_router_interface( self.context, r1['id'], {'subnet_id': n1_s2['subnet']['id']}) self.l3_plugin.add_router_interface( self.context, r1['id'], {'subnet_id': n1_s3['subnet']['id']}) r1_p3 = self.l3_plugin.add_router_interface( self.context, r1['id'], {'subnet_id': n2_s1['subnet']['id']}) self.update_lrouter_ports.append(('lrp-' + r1_p2['port_id'], 'neutron-' + r1['id'], n1_s2['subnet']['gateway_ip'])) self.delete_lrouter_ports.append(('lrp-' + r1_p3['port_id'], 'neutron-' + r1['id'])) self.delete_lrouter_ports.append(('lrp-' + r1['gw_port_id'], 'neutron-' + r1['id'])) self.l3_plugin.update_router( self.context, r1['id'], {'router': {'routes': [{'destination': '10.10.0.0/24', 'nexthop': '20.0.0.10'}, {'destination': '10.11.0.0/24', 'nexthop': '20.0.0.11'}]}}) r1_f1 = self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'floating_ip_address': '100.0.0.20', 'subnet_id': None, 'port_id': n1_port_dict['p1']}}) r1_f2 = self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'subnet_id': None, 'floating_ip_address': '100.0.0.21'}}) self.l3_plugin.update_floatingip( self.context, r1_f2['id'], {'floatingip': { 'port_id': n1_port_dict['p2']}}) # update External subnet gateway ip to test function _subnet_update # of L3 OVN plugin. data = {'subnet': {'gateway_ip': '100.0.0.1'}} subnet_req = self.new_update_request( 'subnets', data, e1_s1['subnet']['id']) subnet_req.get_response(self.api) # Static routes self.create_lrouter_routes.append(('neutron-' + r1['id'], '10.12.0.0/24', '20.0.0.12')) self.create_lrouter_routes.append(('neutron-' + r1['id'], '10.13.0.0/24', '20.0.0.13')) self.delete_lrouter_routes.append(('neutron-' + r1['id'], '10.10.0.0/24', '20.0.0.10')) # Gateway default route self.delete_lrouter_routes.append(('neutron-' + r1['id'], '0.0.0.0/0', '100.0.0.1')) # Gateway sNATs self.create_lrouter_nats.append(('neutron-' + r1['id'], {'external_ip': '100.0.0.100', 'logical_ip': '200.0.0.0/24', 'type': 'snat'})) self.delete_lrouter_nats.append(('neutron-' + r1['id'], {'external_ip': '100.0.0.2', 'logical_ip': '10.0.0.0/24', 'type': 'snat'})) # Floating IPs self.create_lrouter_nats.append(('neutron-' + r1['id'], {'external_ip': '100.0.0.200', 'logical_ip': '200.0.0.200', 'type': 'dnat_and_snat'})) self.create_lrouter_nats.append(('neutron-' + r1['id'], {'external_ip': '100.0.0.201', 'logical_ip': '200.0.0.201', 'type': 'dnat_and_snat', 'external_mac': '01:02:03:04:05:06', 'logical_port': 'vm1' })) self.delete_lrouter_nats.append(('neutron-' + r1['id'], {'external_ip': r1_f1['floating_ip_address'], 'logical_ip': r1_f1['fixed_ip_address'], 'type': 'dnat_and_snat'})) res = self._create_network(self.fmt, 'n4', True, **net_kwargs) n4 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n4['network']['id'], '40.0.0.0/24', enable_dhcp=False) self.expected_dns_records.append( {'external_ids': {'ls_name': utils.ovn_name(n4['network']['id'])}, 'records': {}} ) n4_s1 = self.deserialize(self.fmt, res) n4_port_dict = {} for p in ['p1', 'p2', 'p3']: if p in ['p1', 'p2']: port_kwargs = {'arg_list': (dns_apidef.DNSNAME,), dns_apidef.DNSNAME: 'n4-' + p, 'device_id': 'n4-' + p} else: port_kwargs = {} res = self._create_port(self.fmt, n4['network']['id'], name='n4-' + p, device_owner='compute:None', **port_kwargs) port = self.deserialize(self.fmt, res) if p in ['p1', 'p2']: port_ips = " ".join([f['ip_address'] for f in port['port']['fixed_ips']]) hname = 'n4-' + p self.expected_dns_records[1]['records'][hname] = port_ips hname = 'n4-' + p + '.ovn.test' self.expected_dns_records[1]['records'][hname] = port_ips n4_port_dict[p] = port['port']['id'] self.lport_dhcp_ignored.append(port['port']['id']) r2 = self.l3_plugin.create_router( self.context, {'router': {'name': 'r2', 'admin_state_up': True, 'tenant_id': self._tenant_id}}) n1_prtr = self._make_port(self.fmt, n1['network']['id'], name='n1-p-rtr') self.l3_plugin.add_router_interface( self.context, r2['id'], {'port_id': n1_prtr['port']['id']}) self.l3_plugin.add_router_interface( self.context, r2['id'], {'subnet_id': n4_s1['subnet']['id']}) self.l3_plugin.update_router( self.context, r2['id'], # FIXME(lucasagomes): Add "routes" back, it has been # removed to avoid a race condition that was happening from # time to time. The error was: "Invalid format for routes: # [{'destination': '10.20.0.0/24', 'nexthop': '10.0.0.20'}], # the nexthop is used by route". It seems to be a race within # the tests itself, running the functional tests without # any concurrency doesn't fail when the "routes" are set. # # {'router': {'routes': [{'destination': '10.20.0.0/24', # 'nexthop': '10.0.0.20'}], # ... {'router': {'external_gateway_info': { 'enable_snat': False, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.3', 'subnet_id': e1_s1['subnet']['id']}]}}}) self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'floating_ip_address': '100.0.0.30', 'subnet_id': None, 'port_id': n4_port_dict['p1']}}) self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'floating_ip_address': '100.0.0.31', 'subnet_id': None, 'port_id': n4_port_dict['p2']}}) # To test l3_plugin.disassociate_floatingips, associating floating IP # to port p3 and then deleting p3. self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'floating_ip_address': '100.0.0.32', 'subnet_id': None, 'port_id': n4_port_dict['p3']}}) self._delete('ports', n4_port_dict['p3']) self.create_lrouters.append('neutron-' + uuidutils.generate_uuid()) self.create_lrouter_ports.append(('lrp-' + uuidutils.generate_uuid(), 'neutron-' + r1['id'])) self.create_lrouter_ports.append(('lrp-' + uuidutils.generate_uuid(), 'neutron-' + r1['id'])) self.delete_lrouters.append('neutron-' + r2['id']) address_set_name = n1_prtr['port']['security_groups'][0] self.create_address_sets.extend([('fake_sg', 'ip4'), ('fake_sg', 'ip6')]) self.delete_address_sets.append((address_set_name, 'ip6')) address_adds = ['10.0.0.101', '10.0.0.102'] address_dels = [] for address in n1_prtr['port']['fixed_ips']: address_dels.append(address['ip_address']) self.update_address_sets.append((address_set_name, 'ip4', address_adds, address_dels)) self.create_port_groups.extend([{'name': 'pg1', 'acls': []}, {'name': 'pg2', 'acls': []}]) self.delete_port_groups.append( utils.ovn_port_group_name(n1_prtr['port']['security_groups'][0])) # Create a network and subnet with orphaned OVN resources. n3 = self._make_network(self.fmt, 'n3', True) res = self._create_subnet(self.fmt, n3['network']['id'], '30.0.0.0/24') n3_s1 = self.deserialize(self.fmt, res) res = self._create_subnet(self.fmt, n3['network']['id'], '2001:dbc::/64', ip_version=6) n3_s2 = self.deserialize(self.fmt, res) if not restart_ovsdb_processes: # Test using original mac when syncing. dhcp_mac_v4 = (self.mech_driver._nb_ovn.get_subnet_dhcp_options( n3_s1['subnet']['id'])['subnet'].get('options', {}) .get('server_mac')) dhcp_mac_v6 = (self.mech_driver._nb_ovn.get_subnet_dhcp_options( n3_s2['subnet']['id'])['subnet'].get('options', {}) .get('server_id')) self.assertTrue(dhcp_mac_v4 is not None) self.assertTrue(dhcp_mac_v6 is not None) self.match_old_mac_dhcp_subnets.append(n3_s1['subnet']['id']) self.match_old_mac_dhcp_subnets.append(n3_s2['subnet']['id']) else: dhcp_mac_v4 = '01:02:03:04:05:06' dhcp_mac_v6 = '01:02:03:04:05:06' self.expected_dhcp_options_rows.append({ 'cidr': '30.0.0.0/24', 'external_ids': {'subnet_id': n3_s1['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'classless_static_route': '{169.254.169.254/32,30.0.0.2, 0.0.0.0/0,30.0.0.1}', 'server_id': '30.0.0.1', 'domain_name': '"ovn.test"', 'dns_server': '{10.10.10.10}', 'server_mac': dhcp_mac_v4, 'lease_time': str(12 * 60 * 60), 'mtu': str(n3['network']['mtu']), 'router': n3_s1['subnet']['gateway_ip']}}) self.expected_dhcp_options_rows.append({ 'cidr': '2001:dbc::/64', 'external_ids': {'subnet_id': n3_s2['subnet']['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '0'}, 'options': {'server_id': dhcp_mac_v6}}) fake_port_id1 = uuidutils.generate_uuid() fake_port_id2 = uuidutils.generate_uuid() self.create_lswitch_ports.append(('neutron-' + fake_port_id1, 'neutron-' + n3['network']['id'])) self.create_lswitch_ports.append(('neutron-' + fake_port_id2, 'neutron-' + n3['network']['id'])) stale_dhcpv4_options1 = { 'subnet_id': n3_s1['subnet']['id'], 'port_id': fake_port_id1, 'cidr': '30.0.0.0/24', 'options': {'server_id': '30.0.0.254', 'server_mac': dhcp_mac_v4, 'lease_time': str(3 * 60 * 60), 'mtu': str(n3['network']['mtu'] / 2), 'router': '30.0.0.254', 'tftp_server': '30.0.0.234', 'dns_server': '8.8.8.8'}, 'external_ids': {'subnet_id': n3_s1['subnet']['id'], 'port_id': fake_port_id1}} self.stale_lport_dhcpv4_options.append(stale_dhcpv4_options1) stale_dhcpv4_options2 = stale_dhcpv4_options1.copy() stale_dhcpv4_options2.update({ 'port_id': fake_port_id2, 'external_ids': {'subnet_id': n3_s1['subnet']['id'], 'port_id': fake_port_id2}}) self.stale_lport_dhcpv4_options.append(stale_dhcpv4_options2) self.orphaned_lport_dhcp_options.append(fake_port_id2) stale_dhcpv6_options1 = { 'subnet_id': n3_s2['subnet']['id'], 'port_id': fake_port_id1, 'cidr': '2001:dbc::/64', 'options': {'server_id': dhcp_mac_v6, 'domain-search': 'foo-domain'}, 'external_ids': {'subnet_id': n3_s2['subnet']['id'], 'port_id': fake_port_id1}} self.stale_lport_dhcpv6_options.append(stale_dhcpv6_options1) stale_dhcpv6_options2 = stale_dhcpv6_options1.copy() stale_dhcpv6_options2.update({ 'port_id': fake_port_id2, 'external_ids': {'subnet_id': n3_s2['subnet']['id'], 'port_id': fake_port_id2}}) self.stale_lport_dhcpv6_options.append(stale_dhcpv6_options2) fake_port = {'id': fake_port_id1, 'network_id': n3['network']['id']} dhcp_acls = acl_utils.add_acl_dhcp(fake_port, n3_s1['subnet']) for dhcp_acl in dhcp_acls: self.create_acls.append(dhcp_acl) columns = list(self.nb_api.tables['ACL'].columns) if not (('name' in columns) and ('severity' in columns)): for acl in self.create_acls: acl.pop('name') acl.pop('severity') def _modify_resources_in_nb_db(self): self._delete_metadata_ports() with self.nb_api.transaction(check_error=True) as txn: for lswitch_name in self.create_lswitches: external_ids = {ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: lswitch_name} txn.add(self.nb_api.ls_add(lswitch_name, True, external_ids=external_ids)) for lswitch_name in self.delete_lswitches: txn.add(self.nb_api.ls_del(lswitch_name, True)) for lport_name, lswitch_name in self.create_lswitch_ports: external_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: lport_name} txn.add(self.nb_api.create_lswitch_port( lport_name, lswitch_name, True, external_ids=external_ids)) for lport_name, lswitch_name in self.delete_lswitch_ports: txn.add(self.nb_api.delete_lswitch_port(lport_name, lswitch_name, True)) for lrouter_name in self.create_lrouters: external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: lrouter_name} txn.add(self.nb_api.create_lrouter(lrouter_name, True, external_ids=external_ids)) for lrouter_name in self.delete_lrouters: txn.add(self.nb_api.delete_lrouter(lrouter_name, True)) for lrport, lrouter_name in self.create_lrouter_ports: txn.add(self.nb_api.add_lrouter_port(lrport, lrouter_name)) for lrport, lrouter_name, networks in self.update_lrouter_ports: txn.add(self.nb_api.update_lrouter_port( lrport, True, **{'networks': [networks], 'ipv6_ra_configs': {'foo': 'bar'}})) for lrport, lrouter_name in self.delete_lrouter_ports: txn.add(self.nb_api.delete_lrouter_port(lrport, lrouter_name, True)) for lrouter_name, ip_prefix, nexthop in self.create_lrouter_routes: txn.add(self.nb_api.add_static_route(lrouter_name, ip_prefix=ip_prefix, nexthop=nexthop)) for lrouter_name, ip_prefix, nexthop in self.delete_lrouter_routes: txn.add(self.nb_api.delete_static_route(lrouter_name, ip_prefix, nexthop, True)) for lrouter_name, nat_dict in( self.create_lrouter_nats): txn.add(self.nb_api.add_nat_rule_in_lrouter( lrouter_name, **nat_dict)) for lrouter_name, nat_dict in( self.delete_lrouter_nats): txn.add(self.nb_api.delete_nat_rule_in_lrouter( lrouter_name, if_exists=True, **nat_dict)) for acl in self.create_acls: txn.add(self.nb_api.add_acl(**acl)) for lport_name, lswitch_name in self.delete_acls: txn.add(self.nb_api.delete_acl(lswitch_name, lport_name, True)) for name, ip_version in self.create_address_sets: ovn_name = utils.ovn_addrset_name(name, ip_version) external_ids = {ovn_const.OVN_SG_EXT_ID_KEY: name} txn.add(self.nb_api.create_address_set( ovn_name, True, external_ids=external_ids)) for name, ip_version in self.delete_address_sets: ovn_name = utils.ovn_addrset_name(name, ip_version) txn.add(self.nb_api.delete_address_set(ovn_name, True)) for name, ip_version, ip_adds, ip_dels in self.update_address_sets: ovn_name = utils.ovn_addrset_name(name, ip_version) txn.add(self.nb_api.update_address_set(ovn_name, ip_adds, ip_dels, True)) if self.nb_api.is_port_groups_supported(): for pg in self.create_port_groups: txn.add(self.nb_api.pg_add(**pg)) for pg in self.delete_port_groups: txn.add(self.nb_api.pg_del(pg)) for lport_name in self.reset_lport_dhcpv4_options: txn.add(self.nb_api.set_lswitch_port(lport_name, True, dhcpv4_options=[])) for lport_name in self.reset_lport_dhcpv6_options: txn.add(self.nb_api.set_lswitch_port(lport_name, True, dhcpv6_options=[])) for dhcp_opts in self.stale_lport_dhcpv4_options: dhcpv4_opts = txn.add(self.nb_api.add_dhcp_options( dhcp_opts['subnet_id'], port_id=dhcp_opts['port_id'], cidr=dhcp_opts['cidr'], options=dhcp_opts['options'], external_ids=dhcp_opts['external_ids'], may_exist=False)) if dhcp_opts['port_id'] in self.orphaned_lport_dhcp_options: continue txn.add(self.nb_api.set_lswitch_port( lport_name, True, dhcpv4_options=dhcpv4_opts)) for dhcp_opts in self.stale_lport_dhcpv6_options: dhcpv6_opts = txn.add(self.nb_api.add_dhcp_options( dhcp_opts['subnet_id'], port_id=dhcp_opts['port_id'], cidr=dhcp_opts['cidr'], options=dhcp_opts['options'], external_ids=dhcp_opts['external_ids'], may_exist=False)) if dhcp_opts['port_id'] in self.orphaned_lport_dhcp_options: continue txn.add(self.nb_api.set_lswitch_port( lport_name, True, dhcpv6_options=dhcpv6_opts)) for row_uuid in self.missed_dhcp_options: txn.add(self.nb_api.delete_dhcp_options(row_uuid)) for dhcp_opts in self.dirty_dhcp_options: external_ids = {'subnet_id': dhcp_opts['subnet_id']} if dhcp_opts.get('port_id'): external_ids['port_id'] = dhcp_opts['port_id'] txn.add(self.nb_api.add_dhcp_options( dhcp_opts['subnet_id'], port_id=dhcp_opts.get('port_id'), external_ids=external_ids, options={'foo': 'bar'})) for port_id in self.lport_dhcpv4_disabled: txn.add(self.nb_api.set_lswitch_port( port_id, True, dhcpv4_options=[self.lport_dhcpv4_disabled[port_id]])) for port_id in self.lport_dhcpv6_disabled: txn.add(self.nb_api.set_lswitch_port( port_id, True, dhcpv6_options=[self.lport_dhcpv6_disabled[port_id]])) # Delete the first DNS record and clear the second row records i = 0 for dns_row in self.nb_api.tables['DNS'].rows.values(): if i == 0: txn.add(self.nb_api.dns_del(dns_row.uuid)) else: txn.add(self.nb_api.dns_set_records(dns_row.uuid, **{})) i += 1 def _validate_networks(self, should_match=True): db_networks = self._list('networks') db_net_ids = [net['id'] for net in db_networks['networks']] db_provnet_ports = [utils.ovn_provnet_port_name(net['id']) for net in db_networks['networks'] if net.get('provider:physical_network')] # Get the list of lswitch ids stored in the OVN plugin IDL _plugin_nb_ovn = self.mech_driver._nb_ovn plugin_lswitch_ids = [ row.name.replace('neutron-', '') for row in ( _plugin_nb_ovn._tables['Logical_Switch'].rows.values())] # Get the list of lswitch ids stored in the monitor IDL connection monitor_lswitch_ids = [ row.name.replace('neutron-', '') for row in ( self.nb_api.tables['Logical_Switch'].rows.values())] # Get the list of provnet ports stored in the OVN plugin IDL plugin_provnet_ports = [row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.name.startswith(ovn_const.OVN_PROVNET_PORT_NAME_PREFIX)] # Get the list of provnet ports stored in the monitor IDL connection monitor_provnet_ports = [row.name for row in ( self.nb_api.tables['Logical_Switch_Port'].rows.values()) if row.name.startswith(ovn_const.OVN_PROVNET_PORT_NAME_PREFIX)] if should_match: self.assertItemsEqual(db_net_ids, plugin_lswitch_ids) self.assertItemsEqual(db_net_ids, monitor_lswitch_ids) self.assertItemsEqual(db_provnet_ports, plugin_provnet_ports) self.assertItemsEqual(db_provnet_ports, monitor_provnet_ports) else: self.assertRaises( AssertionError, self.assertItemsEqual, db_net_ids, plugin_lswitch_ids) self.assertRaises( AssertionError, self.assertItemsEqual, db_net_ids, monitor_lswitch_ids) self.assertRaises( AssertionError, self.assertItemsEqual, db_provnet_ports, plugin_provnet_ports) self.assertRaises( AssertionError, self.assertItemsEqual, db_provnet_ports, monitor_provnet_ports) def _validate_metadata_ports(self, should_match=True): """Validate metadata ports. This method will check that all networks have one and only one metadata port and that every metadata port in Neutron also exists in OVN. """ db_ports = self._list('ports') db_metadata_ports_ids = set() db_metadata_ports_nets = set() for port in db_ports['ports']: if (port['device_owner'] == constants.DEVICE_OWNER_DHCP and port['device_id'].startswith('ovnmeta')): db_metadata_ports_ids.add(port['id']) db_metadata_ports_nets.add(port['network_id']) db_networks = self._list('networks') db_net_ids = {net['id'] for net in db_networks['networks']} # Retrieve all localports in OVN _plugin_nb_ovn = self.mech_driver._nb_ovn plugin_metadata_ports = [row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.type == 'localport'] if should_match: # Check that metadata ports exist in both Neutron and OVN dbs. self.assertItemsEqual(db_metadata_ports_ids, plugin_metadata_ports) # Check that all networks have one and only one metadata port. self.assertItemsEqual(db_metadata_ports_nets, db_net_ids) else: metadata_sync = (sorted(db_metadata_ports_ids) == sorted(plugin_metadata_ports)) metadata_unique = (sorted(db_net_ids) == sorted(db_metadata_ports_nets)) self.assertFalse(metadata_sync and metadata_unique) def _validate_ports(self, should_match=True): db_ports = self._list('ports') db_port_ids = [port['id'] for port in db_ports['ports'] if not utils.is_lsp_ignored(port)] db_port_ids_dhcp_valid = set( port['id'] for port in db_ports['ports'] if not utils.is_network_device_port(port) and port['id'] not in self.lport_dhcp_ignored) _plugin_nb_ovn = self.mech_driver._nb_ovn plugin_lport_ids = [ row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in row.external_ids] plugin_lport_ids_dhcpv4_enabled = [ row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.dhcpv4_options] plugin_lport_ids_dhcpv6_enabled = [ row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.dhcpv6_options] monitor_lport_ids = [ row.name for row in ( self.nb_api.tables['Logical_Switch_Port']. rows.values()) if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in row.external_ids] monitor_lport_ids_dhcpv4_enabled = [ row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.dhcpv4_options] monitor_lport_ids_dhcpv6_enabled = [ row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.dhcpv6_options] if should_match: self.assertItemsEqual(db_port_ids, plugin_lport_ids) self.assertItemsEqual(db_port_ids, monitor_lport_ids) expected_dhcpv4_options_ports_ids = ( db_port_ids_dhcp_valid.difference( set(self.lport_dhcpv4_disabled.keys()))) self.assertItemsEqual(expected_dhcpv4_options_ports_ids, plugin_lport_ids_dhcpv4_enabled) self.assertItemsEqual(expected_dhcpv4_options_ports_ids, monitor_lport_ids_dhcpv4_enabled) expected_dhcpv6_options_ports_ids = ( db_port_ids_dhcp_valid.difference( set(self.lport_dhcpv6_disabled.keys()))) self.assertItemsEqual(expected_dhcpv6_options_ports_ids, plugin_lport_ids_dhcpv6_enabled) self.assertItemsEqual(expected_dhcpv6_options_ports_ids, monitor_lport_ids_dhcpv6_enabled) # Check if unknow address is set for the expected lports. for row in ( self.nb_api.tables['Logical_Switch_Port'].rows.values()): if row.name in self.expected_ports_with_unknown_addr: self.assertIn('unknown', row.addresses) else: self.assertRaises( AssertionError, self.assertItemsEqual, db_port_ids, plugin_lport_ids) self.assertRaises( AssertionError, self.assertItemsEqual, db_port_ids, monitor_lport_ids) self.assertRaises( AssertionError, self.assertItemsEqual, db_port_ids, plugin_lport_ids_dhcpv4_enabled) self.assertRaises( AssertionError, self.assertItemsEqual, db_port_ids, monitor_lport_ids_dhcpv4_enabled) @staticmethod def _build_acl_for_pgs(priority, direction, log, name, action, severity, match, port_group, **kwargs): return { 'priority': priority, 'direction': direction, 'log': log, 'name': name, 'action': action, 'severity': severity, 'match': match, 'external_ids': kwargs} def _validate_dhcp_opts(self, should_match=True): observed_plugin_dhcp_options_rows = [] _plugin_nb_ovn = self.mech_driver._nb_ovn for row in _plugin_nb_ovn._tables['DHCP_Options'].rows.values(): opts = dict(row.options) ids = dict(row.external_ids) if ids.get('subnet_id') not in self.match_old_mac_dhcp_subnets: if 'server_mac' in opts: opts['server_mac'] = '01:02:03:04:05:06' else: opts['server_id'] = '01:02:03:04:05:06' observed_plugin_dhcp_options_rows.append({ 'cidr': row.cidr, 'external_ids': row.external_ids, 'options': opts}) observed_monitor_dhcp_options_rows = [] for row in self.nb_api.tables['DHCP_Options'].rows.values(): opts = dict(row.options) ids = dict(row.external_ids) if ids.get('subnet_id') not in self.match_old_mac_dhcp_subnets: if 'server_mac' in opts: opts['server_mac'] = '01:02:03:04:05:06' else: opts['server_id'] = '01:02:03:04:05:06' observed_monitor_dhcp_options_rows.append({ 'cidr': row.cidr, 'external_ids': row.external_ids, 'options': opts}) if should_match: self.assertItemsEqual(self.expected_dhcp_options_rows, observed_plugin_dhcp_options_rows) self.assertItemsEqual(self.expected_dhcp_options_rows, observed_monitor_dhcp_options_rows) else: self.assertRaises( AssertionError, self.assertItemsEqual, self.expected_dhcp_options_rows, observed_plugin_dhcp_options_rows) self.assertRaises( AssertionError, self.assertItemsEqual, self.expected_dhcp_options_rows, observed_monitor_dhcp_options_rows) def _build_acl_to_compare(self, acl, extra_fields=None): acl_to_compare = {} for acl_key in getattr(acl, "_data", {}): try: acl_to_compare[acl_key] = getattr(acl, acl_key) except AttributeError: pass return acl_utils.filter_acl_dict(acl_to_compare, extra_fields) def _validate_acls(self, should_match=True): # Get the neutron DB ACLs. db_acls = [] sg_cache = {} subnet_cache = {} _plugin_nb_ovn = self.mech_driver._nb_ovn if not _plugin_nb_ovn.is_port_groups_supported(): for db_port in self._list('ports')['ports']: acls = acl_utils.add_acls(self.plugin, context.get_admin_context(), db_port, sg_cache, subnet_cache, self.mech_driver._nb_ovn) for acl in acls: db_acls.append(acl_utils.filter_acl_dict(acl)) else: # ACLs due to SGs and default drop port group for sg in self._list('security-groups')['security_groups']: for sgr in sg['security_group_rules']: acl = acl_utils._add_sg_rule_acl_for_port_group( utils.ovn_port_group_name(sg['id']), sgr, self.mech_driver._nb_ovn) db_acls.append(TestOvnNbSync._build_acl_for_pgs(**acl)) for acl in acl_utils.add_acls_for_drop_port_group( ovn_const.OVN_DROP_PORT_GROUP_NAME): db_acls.append(TestOvnNbSync._build_acl_for_pgs(**acl)) # Get the list of ACLs stored in the OVN plugin IDL. plugin_acls = [] for row in _plugin_nb_ovn._tables['Logical_Switch'].rows.values(): for acl in getattr(row, 'acls', []): plugin_acls.append(self._build_acl_to_compare(acl)) if self.nb_api.is_port_groups_supported(): for row in _plugin_nb_ovn._tables['Port_Group'].rows.values(): for acl in getattr(row, 'acls', []): plugin_acls.append( self._build_acl_to_compare( acl, extra_fields=['external_ids'])) # Get the list of ACLs stored in the OVN monitor IDL. monitor_acls = [] for row in self.nb_api.tables['Logical_Switch'].rows.values(): for acl in getattr(row, 'acls', []): monitor_acls.append(self._build_acl_to_compare(acl)) if _plugin_nb_ovn.is_port_groups_supported(): for row in self.nb_api.tables['Port_Group'].rows.values(): for acl in getattr(row, 'acls', []): monitor_acls.append(self._build_acl_to_compare(acl)) if should_match: self.assertItemsEqual(db_acls, plugin_acls) self.assertItemsEqual(db_acls, monitor_acls) else: self.assertRaises( AssertionError, self.assertItemsEqual, db_acls, plugin_acls) self.assertRaises( AssertionError, self.assertItemsEqual, db_acls, monitor_acls) def _validate_routers_and_router_ports(self, should_match=True): db_routers = self._list('routers') db_router_ids = [] db_routes = {} db_nats = {} for db_router in db_routers['routers']: db_router_ids.append(db_router['id']) db_routes[db_router['id']] = [db_route['destination'] + db_route['nexthop'] for db_route in db_router['routes']] db_nats[db_router['id']] = [] if db_router.get(l3.EXTERNAL_GW_INFO): gateways = self.l3_plugin._ovn_client._get_gw_info( self.context, db_router) for gw_info in gateways: # Add gateway default route and snats if gw_info.gateway_ip: db_routes[db_router['id']].append(gw_info.ip_prefix + gw_info.gateway_ip) if (gw_info.ip_version == constants.IP_VERSION_4 and gw_info.router_ip and utils.is_snat_enabled(db_router)): networks = self.l3_plugin._ovn_client.\ _get_v4_network_of_all_router_ports( self.context, db_router['id']) db_nats[db_router['id']].extend( [gw_info.router_ip + network + 'snat' for network in networks]) fips = self._list('floatingips') fip_macs = {} if ovn_config.is_ovn_distributed_floating_ip(): params = 'device_owner=%s' % constants.DEVICE_OWNER_FLOATINGIP fports = self._list('ports', query_params=params)['ports'] fip_macs = {p['device_id']: p['mac_address'] for p in fports if p['device_id']} for fip in fips['floatingips']: if fip['router_id']: mac_address = '' fip_port = '' if fip['id'] in fip_macs: fip_port = fip['port_id'] db_nats[fip['router_id']].append( fip['floating_ip_address'] + fip['fixed_ip_address'] + 'dnat_and_snat' + mac_address + fip_port) _plugin_nb_ovn = self.mech_driver._nb_ovn plugin_lrouter_ids = [ row.name.replace('neutron-', '') for row in ( _plugin_nb_ovn._tables['Logical_Router'].rows.values())] monitor_lrouter_ids = [ row.name.replace('neutron-', '') for row in ( self.nb_api.tables['Logical_Router'].rows.values())] if should_match: self.assertItemsEqual(db_router_ids, plugin_lrouter_ids) self.assertItemsEqual(db_router_ids, monitor_lrouter_ids) else: self.assertRaises( AssertionError, self.assertItemsEqual, db_router_ids, plugin_lrouter_ids) self.assertRaises( AssertionError, self.assertItemsEqual, db_router_ids, monitor_lrouter_ids) def _get_networks_for_router_port(port_fixed_ips): _ovn_client = self.l3_plugin._ovn_client networks, _ = ( _ovn_client._get_nets_and_ipv6_ra_confs_for_router_port( self.ctx, port_fixed_ips)) return networks def _get_ipv6_ra_configs_for_router_port(port_fixed_ips): _ovn_client = self.l3_plugin._ovn_client networks, ipv6_ra_configs = ( _ovn_client._get_nets_and_ipv6_ra_confs_for_router_port( self.ctx, port_fixed_ips)) return ipv6_ra_configs for router_id in db_router_ids: r_ports = self._list('ports', query_params='device_id=%s' % (router_id)) r_port_ids = [p['id'] for p in r_ports['ports']] r_port_networks = { p['id']: _get_networks_for_router_port(p['fixed_ips']) for p in r_ports['ports']} r_port_ipv6_ra_configs = { p['id']: _get_ipv6_ra_configs_for_router_port(p['fixed_ips']) for p in r_ports['ports']} r_routes = db_routes[router_id] r_nats = db_nats[router_id] try: lrouter = idlutils.row_by_value( self.mech_driver._nb_ovn.idl, 'Logical_Router', 'name', 'neutron-' + str(router_id), None) lports = getattr(lrouter, 'ports', []) plugin_lrouter_port_ids = [lport.name.replace('lrp-', '') for lport in lports] plugin_lport_networks = { lport.name.replace('lrp-', ''): lport.networks for lport in lports} plugin_lport_ra_configs = { lport.name.replace('lrp-', ''): lport.ipv6_ra_configs for lport in lports} sroutes = getattr(lrouter, 'static_routes', []) plugin_routes = [sroute.ip_prefix + sroute.nexthop for sroute in sroutes] nats = getattr(lrouter, 'nat', []) plugin_nats = [ nat.external_ip + nat.logical_ip + nat.type + (nat.external_mac[0] if nat.external_mac else '') + (nat.logical_port[0] if nat.logical_port else '') for nat in nats] except idlutils.RowNotFound: plugin_lrouter_port_ids = [] plugin_routes = [] plugin_nats = [] try: lrouter = idlutils.row_by_value( self.nb_api.idl, 'Logical_Router', 'name', 'neutron-' + router_id, None) lports = getattr(lrouter, 'ports', []) monitor_lrouter_port_ids = [lport.name.replace('lrp-', '') for lport in lports] monitor_lport_networks = { lport.name.replace('lrp-', ''): lport.networks for lport in lports} monitor_lport_ra_configs = { lport.name.replace('lrp-', ''): lport.ipv6_ra_configs for lport in lports} sroutes = getattr(lrouter, 'static_routes', []) monitor_routes = [sroute.ip_prefix + sroute.nexthop for sroute in sroutes] nats = getattr(lrouter, 'nat', []) monitor_nats = [ nat.external_ip + nat.logical_ip + nat.type + (nat.external_mac[0] if nat.external_mac else '') + (nat.logical_port[0] if nat.logical_port else '') for nat in nats] except idlutils.RowNotFound: monitor_lrouter_port_ids = [] monitor_routes = [] monitor_nats = [] if should_match: self.assertItemsEqual(r_port_ids, plugin_lrouter_port_ids) self.assertItemsEqual(r_port_ids, monitor_lrouter_port_ids) for p in plugin_lport_networks: self.assertItemsEqual(r_port_networks[p], plugin_lport_networks[p]) self.assertItemsEqual(r_port_ipv6_ra_configs[p], plugin_lport_ra_configs[p]) for p in monitor_lport_networks: self.assertItemsEqual(r_port_networks[p], monitor_lport_networks[p]) self.assertItemsEqual(r_port_ipv6_ra_configs[p], monitor_lport_ra_configs[p]) self.assertItemsEqual(r_routes, plugin_routes) self.assertItemsEqual(r_routes, monitor_routes) self.assertItemsEqual(r_nats, plugin_nats) self.assertItemsEqual(r_nats, monitor_nats) else: self.assertRaises( AssertionError, self.assertItemsEqual, r_port_ids, plugin_lrouter_port_ids) self.assertRaises( AssertionError, self.assertItemsEqual, r_port_ids, monitor_lrouter_port_ids) for _p in self.update_lrouter_ports: p = _p[0].replace('lrp-', '') if p in plugin_lport_networks: self.assertRaises( AssertionError, self.assertItemsEqual, r_port_networks[p], plugin_lport_networks[p]) self.assertRaises( AssertionError, self.assertItemsEqual, r_port_ipv6_ra_configs[p], plugin_lport_ra_configs[p]) if p in monitor_lport_networks: self.assertRaises( AssertionError, self.assertItemsEqual, r_port_networks[p], monitor_lport_networks[p]) self.assertRaises( AssertionError, self.assertItemsEqual, r_port_ipv6_ra_configs[p], monitor_lport_ra_configs[p]) self.assertRaises( AssertionError, self.assertItemsEqual, r_routes, plugin_routes) self.assertRaises( AssertionError, self.assertItemsEqual, r_routes, monitor_routes) self.assertRaises( AssertionError, self.assertItemsEqual, r_nats, plugin_nats) self.assertRaises( AssertionError, self.assertItemsEqual, r_nats, monitor_nats) def _validate_address_sets(self, should_match=True): _plugin_nb_ovn = self.mech_driver._nb_ovn if _plugin_nb_ovn.is_port_groups_supported(): # If Port Groups are supported, no Address Sets are expected. # This validation is still useful as we expect existing ones to # be deleted after the sync. db_sgs = [] else: db_ports = self._list('ports')['ports'] sgs = self._list('security-groups')['security_groups'] db_sgs = {} for sg in sgs: for ip_version in ['ip4', 'ip6']: name = utils.ovn_addrset_name(sg['id'], ip_version) db_sgs[name] = [] for port in db_ports: sg_ids = utils.get_lsp_security_groups(port) addresses = acl_utils.acl_port_ips(port) for sg_id in sg_ids: for ip_version in addresses: name = utils.ovn_addrset_name(sg_id, ip_version) db_sgs[name].extend(addresses[ip_version]) nb_address_sets = _plugin_nb_ovn.get_address_sets() nb_sgs = {} for nb_sgid, nb_values in nb_address_sets.items(): nb_sgs[nb_sgid] = nb_values['addresses'] mn_sgs = {} for row in self.nb_api.tables['Address_Set'].rows.values(): mn_sgs[getattr(row, 'name')] = getattr(row, 'addresses') if should_match: self.assertItemsEqual(nb_sgs, db_sgs) self.assertItemsEqual(mn_sgs, db_sgs) else: # This condition is to cover the case when we use Port Groups # and we completely deleted the NB DB. At this point, the expected # number of Address Sets is 0 and the observed number in NB is # also 0 so we can't have the asserts below as both will be empty. if _plugin_nb_ovn.is_port_groups_supported() and nb_sgs: self.assertRaises(AssertionError, self.assertItemsEqual, nb_sgs, db_sgs) self.assertRaises(AssertionError, self.assertItemsEqual, mn_sgs, db_sgs) def _validate_port_groups(self, should_match=True): _plugin_nb_ovn = self.mech_driver._nb_ovn if not _plugin_nb_ovn.is_port_groups_supported(): return db_pgs = [] for sg in self._list('security-groups')['security_groups']: db_pgs.append(utils.ovn_port_group_name(sg['id'])) db_pgs.append(ovn_const.OVN_DROP_PORT_GROUP_NAME) nb_pgs = _plugin_nb_ovn.get_port_groups() mn_pgs = [] for row in self.nb_api.tables['Port_Group'].rows.values(): mn_pgs.append(getattr(row, 'name', '')) if should_match: self.assertItemsEqual(nb_pgs, db_pgs) self.assertItemsEqual(mn_pgs, db_pgs) else: self.assertRaises(AssertionError, self.assertItemsEqual, nb_pgs, db_pgs) self.assertRaises(AssertionError, self.assertItemsEqual, mn_pgs, db_pgs) def _delete_metadata_ports(self): """Delete some metadata ports. This method will delete one half of the metadata ports from Neutron and the remaining ones only from OVN. This way we can exercise the metadata sync completely: ie., that metadata ports are recreated in Neutron when missing and that the corresponding OVN localports are also created. """ db_ports = self._list('ports') db_metadata_ports = [port for port in db_ports['ports'] if port['device_owner'] == constants.DEVICE_OWNER_DHCP and port['device_id'].startswith('ovnmeta')] lswitches = {} ports_to_delete = len(db_metadata_ports) / 2 for port in db_metadata_ports: lswitches[port['id']] = 'neutron-' + port['network_id'] if ports_to_delete: self._delete('ports', port['id']) ports_to_delete -= 1 _plugin_nb_ovn = self.mech_driver._nb_ovn plugin_metadata_ports = [row.name for row in ( _plugin_nb_ovn._tables['Logical_Switch_Port'].rows.values()) if row.type == 'localport'] with self.nb_api.transaction(check_error=True) as txn: for port in plugin_metadata_ports: txn.add(self.nb_api.delete_lswitch_port(port, lswitches[port], True)) def _validate_dns_records(self, should_match=True): observed_dns_records = [] for dns_row in self.nb_api.tables['DNS'].rows.values(): observed_dns_records.append( {'external_ids': dns_row.external_ids, 'records': dns_row.records}) if should_match: self.assertItemsEqual(self.expected_dns_records, observed_dns_records) else: self.assertRaises(AssertionError, self.assertItemsEqual, self.expected_dns_records, observed_dns_records) def _validate_resources(self, should_match=True): self._validate_networks(should_match=should_match) self._validate_metadata_ports(should_match=should_match) self._validate_ports(should_match=should_match) self._validate_dhcp_opts(should_match=should_match) self._validate_acls(should_match=should_match) self._validate_routers_and_router_ports(should_match=should_match) self._validate_address_sets(should_match=should_match) self._validate_port_groups(should_match=should_match) self._validate_dns_records(should_match=should_match) def _sync_resources(self, mode): nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn, mode, self.mech_driver) self.addCleanup(nb_synchronizer.stop) nb_synchronizer.do_sync() def _test_ovn_nb_sync_helper(self, mode, modify_resources=True, restart_ovsdb_processes=False, should_match_after_sync=True): self._create_resources(restart_ovsdb_processes) self._validate_resources(should_match=True) if modify_resources: self._modify_resources_in_nb_db() if restart_ovsdb_processes: # Restart the ovsdb-server and plugin idl. # This causes a new ovsdb-server to be started with empty # OVN NB DB self.restart() if modify_resources or restart_ovsdb_processes: self._validate_resources(should_match=False) self._sync_resources(mode) self._validate_resources(should_match=should_match_after_sync) def test_ovn_nb_sync_repair(self): self._test_ovn_nb_sync_helper('repair') def test_ovn_nb_sync_repair_delete_ovn_nb_db(self): # In this test case, the ovsdb-server for OVN NB DB is restarted # with empty OVN NB DB. self._test_ovn_nb_sync_helper('repair', modify_resources=False, restart_ovsdb_processes=True) def test_ovn_nb_sync_log(self): self._test_ovn_nb_sync_helper('log', should_match_after_sync=False) def test_ovn_nb_sync_off(self): self._test_ovn_nb_sync_helper('off', should_match_after_sync=False) class TestOvnSbSync(base.TestOVNFunctionalBase): def setUp(self): super(TestOvnSbSync, self).setUp(maintenance_worker=True) self.segments_plugin = directory.get_plugin('segments') self.sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( self.plugin, self.mech_driver._sb_ovn, self.mech_driver) self.addCleanup(self.sb_synchronizer.stop) self.ctx = context.get_admin_context() def get_additional_service_plugins(self): p = super(TestOvnSbSync, self).get_additional_service_plugins() p.update({'segments': 'neutron.services.segments.plugin.Plugin'}) return p def _sync_resources(self): self.sb_synchronizer.sync_hostname_and_physical_networks(self.ctx) def create_segment(self, network_id, physical_network, segmentation_id): segment_data = {'network_id': network_id, 'physical_network': physical_network, 'segmentation_id': segmentation_id, 'network_type': 'vlan', 'name': constants.ATTR_NOT_SPECIFIED, 'description': constants.ATTR_NOT_SPECIFIED} return self.segments_plugin.create_segment( self.ctx, segment={'segment': segment_data}) def test_ovn_sb_sync_add_new_host(self): with self.network() as network: network_id = network['network']['id'] self.create_segment(network_id, 'physnet1', 50) self.add_fake_chassis('host1', ['physnet1']) segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertFalse(segment_hosts) self._sync_resources() segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertEqual({'host1'}, segment_hosts) def test_ovn_sb_sync_update_existing_host(self): with self.network() as network: network_id = network['network']['id'] segment = self.create_segment(network_id, 'physnet1', 50) segments_db.update_segment_host_mapping( self.ctx, 'host1', {segment['id']}) segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertEqual({'host1'}, segment_hosts) self.add_fake_chassis('host1', ['physnet2']) self._sync_resources() segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertFalse(segment_hosts) def test_ovn_sb_sync_delete_stale_host(self): with self.network() as network: network_id = network['network']['id'] segment = self.create_segment(network_id, 'physnet1', 50) segments_db.update_segment_host_mapping( self.ctx, 'host1', {segment['id']}) segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertEqual({'host1'}, segment_hosts) # Since there is no chassis in the sb DB, host1 is the stale host # recorded in neutron DB. It should be deleted after sync. self._sync_resources() segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertFalse(segment_hosts) def test_ovn_sb_sync(self): with self.network() as network: network_id = network['network']['id'] seg1 = self.create_segment(network_id, 'physnet1', 50) self.create_segment(network_id, 'physnet2', 51) segments_db.update_segment_host_mapping( self.ctx, 'host1', {seg1['id']}) segments_db.update_segment_host_mapping( self.ctx, 'host2', {seg1['id']}) segments_db.update_segment_host_mapping( self.ctx, 'host3', {seg1['id']}) segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) self.assertEqual({'host1', 'host2', 'host3'}, segment_hosts) self.add_fake_chassis('host2', ['physnet2']) self.add_fake_chassis('host3', ['physnet3']) self.add_fake_chassis('host4', ['physnet1']) self._sync_resources() segment_hosts = segments_db.get_hosts_mapped_with_segments(self.ctx) # host1 should be cleared since it is not in the chassis DB. host3 # should be cleared since there is no segment for mapping. self.assertEqual({'host2', 'host4'}, segment_hosts) class TestOvnNbSyncOverTcp(TestOvnNbSync): def get_ovsdb_server_protocol(self): return 'tcp' class TestOvnSbSyncOverTcp(TestOvnSbSync): def get_ovsdb_server_protocol(self): return 'tcp' class TestOvnNbSyncOverSsl(TestOvnNbSync): def get_ovsdb_server_protocol(self): return 'ssl' class TestOvnSbSyncOverSsl(TestOvnSbSync): def get_ovsdb_server_protocol(self): return 'ssl' @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.impl_idl_ovn.' 'OvsdbNbOvnIdl.is_port_groups_supported', lambda *args: False) class TestOvnNbSyncNoPgs(TestOvnNbSync): pass ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ov0000644000175000017500000002313500000000000033612 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.common.ovn import constants as ovn_const from neutron.common import utils as n_utils from neutron.db import ovn_hash_ring_db as db_hash_ring from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor from neutron.tests.functional import base from neutron.tests.functional.resources.ovsdb import fixtures from neutron_lib.api.definitions import portbindings from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from ovsdbapp.backend.ovs_idl import event class WaitForMACBindingDeleteEvent(event.WaitEvent): event_name = 'WaitForMACBindingDeleteEvent' def __init__(self, entry): table = 'MAC_Binding' events = (self.ROW_DELETE,) conditions = (('_uuid', '=', entry),) super(WaitForMACBindingDeleteEvent, self).__init__( events, table, conditions, timeout=15) class DistributedLockTestEvent(event.WaitEvent): ONETIME = False COUNTER = 0 def __init__(self): table = 'Logical_Switch_Port' events = (self.ROW_CREATE,) super(DistributedLockTestEvent, self).__init__( events, table, (), timeout=15) self.event_name = 'DistributedLockTestEvent' def run(self, event, row, old): self.COUNTER += 1 self.event.set() class TestNBDbMonitor(base.TestOVNFunctionalBase): def setUp(self): super(TestNBDbMonitor, self).setUp() self.chassis = self.add_fake_chassis('ovs-host1') self.l3_plugin = directory.get_plugin(plugin_constants.L3) def create_port(self): net = self._make_network(self.fmt, 'net1', True) self._make_subnet(self.fmt, net, '20.0.0.1', '20.0.0.0/24', ip_version=4) arg_list = ('device_owner', 'device_id', portbindings.HOST_ID) host_arg = {'device_owner': 'compute:nova', 'device_id': uuidutils.generate_uuid(), portbindings.HOST_ID: 'ovs-host1'} port_res = self._create_port(self.fmt, net['network']['id'], arg_list=arg_list, **host_arg) port = self.deserialize(self.fmt, port_res)['port'] return port def _create_fip(self, port, fip_address): e1 = self._make_network(self.fmt, 'e1', True, arg_list=('router:external', 'provider:network_type', 'provider:physical_network'), **{'router:external': True, 'provider:network_type': 'flat', 'provider:physical_network': 'public'}) res = self._create_subnet(self.fmt, e1['network']['id'], '100.0.0.0/24', gateway_ip='100.0.0.254', allocation_pools=[{'start': '100.0.0.2', 'end': '100.0.0.253'}], enable_dhcp=False) e1_s1 = self.deserialize(self.fmt, res) r1 = self.l3_plugin.create_router( self.context, {'router': { 'name': 'r1', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'external_gateway_info': { 'enable_snat': True, 'network_id': e1['network']['id'], 'external_fixed_ips': [ {'ip_address': '100.0.0.2', 'subnet_id': e1_s1['subnet']['id']}]}}}) self.l3_plugin.add_router_interface( self.context, r1['id'], {'subnet_id': port['fixed_ips'][0]['subnet_id']}) r1_f2 = self.l3_plugin.create_floatingip( self.context, {'floatingip': { 'tenant_id': self._tenant_id, 'floating_network_id': e1['network']['id'], 'subnet_id': None, 'floating_ip_address': fip_address, 'port_id': port['id']}}) return r1_f2 def test_floatingip_mac_bindings(self): """Check that MAC_Binding entries are cleared on FIP add/removal This test will: * Create a MAC_Binding entry for an IP address on the 'network1' datapath. * Create a FIP with that same IP address on an external. network and associate it to a Neutron port on a private network. * Check that the MAC_Binding entry gets deleted. * Create a new MAC_Binding entry for the same IP address. * Delete the FIP. * Check that the MAC_Binding entry gets deleted. """ self._make_network(self.fmt, 'network1', True) dp = self.sb_api.db_find( 'Datapath_Binding', ('external_ids', '=', {'name2': 'network1'})).execute() macb_id = self.sb_api.db_create('MAC_Binding', datapath=dp[0]['_uuid'], ip='100.0.0.21').execute() port = self.create_port() # Ensure that the MAC_Binding entry gets deleted after creating a FIP row_event = WaitForMACBindingDeleteEvent(macb_id) self.mech_driver._sb_ovn.idl.notify_handler.watch_event(row_event) fip = self._create_fip(port, '100.0.0.21') self.assertTrue(row_event.wait()) # Now that the FIP is created, add a new MAC_Binding entry with the # same IP address macb_id = self.sb_api.db_create('MAC_Binding', datapath=dp[0]['_uuid'], ip='100.0.0.21').execute() # Ensure that the MAC_Binding entry gets deleted after deleting the FIP row_event = WaitForMACBindingDeleteEvent(macb_id) self.mech_driver._sb_ovn.idl.notify_handler.watch_event(row_event) self.l3_plugin.delete_floatingip(self.context, fip['id']) self.assertTrue(row_event.wait()) def _test_port_binding_and_status(self, port_id, action, status): # This function binds or unbinds port to chassis and # checks if port status matches with input status core_plugin = directory.get_plugin() self.sb_api.check_for_row_by_value_and_retry( 'Port_Binding', 'logical_port', port_id) def check_port_status(status): port = core_plugin.get_ports( self.context, filters={'id': [port_id]})[0] return port['status'] == status if action == 'bind': self.sb_api.lsp_bind(port_id, self.chassis, may_exist=True).execute(check_error=True) else: self.sb_api.lsp_unbind(port_id).execute(check_error=True) n_utils.wait_until_true(lambda: check_port_status(status)) def test_port_up_down_events(self): """Test the port up down events. This test case creates a port, binds the port to chassis, tests if the ovsdb monitor calls mech_driver to set port status to 'ACTIVE'. Then unbinds the port and checks if the port status is set to "DOWN' """ port = self.create_port() self._test_port_binding_and_status(port['id'], 'bind', 'ACTIVE') self._test_port_binding_and_status(port['id'], 'unbind', 'DOWN') def test_distributed_lock(self): row_event = DistributedLockTestEvent() self.mech_driver._nb_ovn.idl.notify_handler.watch_event(row_event) worker_list = [self.mech_driver._nb_ovn, ] # Create 10 fake workers for _ in range(10): node_uuid = uuidutils.generate_uuid() db_hash_ring.add_node( self.context, ovn_const.HASH_RING_ML2_GROUP, node_uuid) fake_driver = mock.MagicMock( node_uuid=node_uuid, hash_ring_group=ovn_const.HASH_RING_ML2_GROUP) _idl = ovsdb_monitor.OvnNbIdl.from_server( self.ovsdb_server_mgr.get_ovsdb_connection_path(), 'OVN_Northbound', fake_driver) worker = self.useFixture( fixtures.OVNIdlConnectionFixture( idl=_idl, timeout=10)).connection worker.idl.notify_handler.watch_event(row_event) worker.start() worker_list.append(worker) # Refresh the hash rings just in case [worker.idl._hash_ring.refresh() for worker in worker_list] # Assert we have 11 active workers in the ring self.assertEqual( 11, len(db_hash_ring.get_active_nodes( self.context, interval=ovn_const.HASH_RING_NODES_TIMEOUT, group_name=ovn_const.HASH_RING_ML2_GROUP))) # Trigger the event self.create_port() # Wait for the event to complete self.assertTrue(row_event.wait()) # Assert that only one worker handled the event self.assertEqual(1, row_event.COUNTER) class TestNBDbMonitorOverTcp(TestNBDbMonitor): def get_ovsdb_server_protocol(self): return 'tcp' class TestNBDbMonitorOverSsl(TestNBDbMonitor): def get_ovsdb_server_protocol(self): return 'ssl' ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_dri0000644000175000017500000005702000000000000033623 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import mock from neutron_lib.api.definitions import portbindings from oslo_config import cfg from oslo_utils import uuidutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.common import utils as n_utils from neutron.db import ovn_revision_numbers_db as db_rev from neutron.tests import base as tests_base from neutron.tests.functional import base class TestPortBinding(base.TestOVNFunctionalBase): def setUp(self): super(TestPortBinding, self).setUp() self.ovs_host = 'ovs-host' self.dpdk_host = 'dpdk-host' self.invalid_dpdk_host = 'invalid-host' self.vhu_mode = 'server' self.add_fake_chassis(self.ovs_host) self.add_fake_chassis( self.dpdk_host, external_ids={'datapath-type': 'netdev', 'iface-types': 'dummy,dummy-internal,dpdkvhostuser'}) self.add_fake_chassis( self.invalid_dpdk_host, external_ids={'datapath-type': 'netdev', 'iface-types': 'dummy,dummy-internal,geneve,vxlan'}) self.n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, self.n1['network']['id'], '10.0.0.0/24') self.deserialize(self.fmt, res) def _create_or_update_port(self, port_id=None, hostname=None): if port_id is None: port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id}} if hostname: port_data['port']['device_id'] = uuidutils.generate_uuid() port_data['port']['device_owner'] = 'compute:None' port_data['port']['binding:host_id'] = hostname port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) p = self.deserialize(self.fmt, port_res) port_id = p['port']['id'] else: port_data = { 'port': {'device_id': uuidutils.generate_uuid(), 'device_owner': 'compute:None', 'binding:host_id': hostname}} port_req = self.new_update_request('ports', port_data, port_id, self.fmt) port_res = port_req.get_response(self.api) self.deserialize(self.fmt, port_res) return port_id def _verify_vif_details(self, port_id, expected_host_name, expected_vif_type, expected_vif_details): port_req = self.new_show_request('ports', port_id) port_res = port_req.get_response(self.api) p = self.deserialize(self.fmt, port_res) self.assertEqual(expected_host_name, p['port']['binding:host_id']) self.assertEqual(expected_vif_type, p['port']['binding:vif_type']) self.assertEqual(expected_vif_details, p['port']['binding:vif_details']) def test_port_binding_create_port(self): port_id = self._create_or_update_port(hostname=self.ovs_host) self._verify_vif_details(port_id, self.ovs_host, 'ovs', {'port_filter': True}) port_id = self._create_or_update_port(hostname=self.dpdk_host) expected_vif_details = {'port_filter': False, 'vhostuser_mode': self.vhu_mode, 'vhostuser_ovs_plug': True} expected_vif_details['vhostuser_socket'] = ( utils.ovn_vhu_sockpath(cfg.CONF.ovn.vhost_sock_dir, port_id)) self._verify_vif_details(port_id, self.dpdk_host, 'vhostuser', expected_vif_details) port_id = self._create_or_update_port(hostname=self.invalid_dpdk_host) self._verify_vif_details(port_id, self.invalid_dpdk_host, 'ovs', {'port_filter': True}) def test_port_binding_update_port(self): port_id = self._create_or_update_port() self._verify_vif_details(port_id, '', 'unbound', {}) port_id = self._create_or_update_port(port_id=port_id, hostname=self.ovs_host) self._verify_vif_details(port_id, self.ovs_host, 'ovs', {'port_filter': True}) port_id = self._create_or_update_port(port_id=port_id, hostname=self.dpdk_host) expected_vif_details = {'port_filter': False, 'vhostuser_mode': self.vhu_mode, 'vhostuser_ovs_plug': True} expected_vif_details['vhostuser_socket'] = ( utils.ovn_vhu_sockpath(cfg.CONF.ovn.vhost_sock_dir, port_id)) self._verify_vif_details(port_id, self.dpdk_host, 'vhostuser', expected_vif_details) port_id = self._create_or_update_port(port_id=port_id, hostname=self.invalid_dpdk_host) self._verify_vif_details(port_id, self.invalid_dpdk_host, 'ovs', {'port_filter': True}) class TestPortBindingOverTcp(TestPortBinding): def get_ovsdb_server_protocol(self): return 'tcp' # TODO(mjozefcz): This test class hangs during execution. class TestPortBindingOverSsl(TestPortBinding): def get_ovsdb_server_protocol(self): return 'ssl' class TestNetworkMTUUpdate(base.TestOVNFunctionalBase): def setUp(self): super(TestNetworkMTUUpdate, self).setUp() self._ovn_client = self.mech_driver._ovn_client self.n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, self.n1['network']['id'], '10.0.0.0/24') self.sub = self.deserialize(self.fmt, res) def test_update_network_mtu(self): mtu_value = self.n1['network']['mtu'] - 100 dhcp_options = ( self.mech_driver._ovn_client._nb_idl.get_subnet_dhcp_options( self.sub['subnet']['id']) ) self.assertNotEqual( int(dhcp_options['subnet']['options']['mtu']), mtu_value) data = {'network': {'mtu': mtu_value}} req = self.new_update_request( 'networks', data, self.n1['network']['id'], self.fmt) req.get_response(self.api) dhcp_options = ( self.mech_driver._ovn_client._nb_idl.get_subnet_dhcp_options( self.sub['subnet']['id']) ) self.assertEqual( int(dhcp_options['subnet']['options']['mtu']), mtu_value) def test_no_update_network_mtu(self): mtu_value = self.n1['network']['mtu'] base_revision = db_rev.get_revision_row( self.context, self.sub['subnet']['id']) data = {'network': {'mtu': mtu_value}} req = self.new_update_request( 'networks', data, self.n1['network']['id'], self.fmt) req.get_response(self.api) second_revision = db_rev.get_revision_row( self.context, self.sub['subnet']['id']) self.assertEqual( base_revision.updated_at, second_revision.updated_at) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.' 'ovsdb.ovn_client.OVNClient._is_virtual_port_supported', lambda *args: True) class TestVirtualPorts(base.TestOVNFunctionalBase): def setUp(self): super(TestVirtualPorts, self).setUp() self._ovn_client = self.mech_driver._ovn_client self.n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, self.n1['network']['id'], '10.0.0.0/24') self.sub = self.deserialize(self.fmt, res) def _create_port(self, fixed_ip=None, allowed_address=None): port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id}} if fixed_ip: port_data['port']['fixed_ips'] = [{'ip_address': fixed_ip}] if allowed_address: port_data['port']['allowed_address_pairs'] = [ {'ip_address': allowed_address}] port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) return self.deserialize(self.fmt, port_res)['port'] def _update_allowed_address_pair(self, port_id, data): port_data = { 'port': {'allowed_address_pairs': data}} port_req = self.new_update_request('ports', port_data, port_id, self.fmt) port_res = port_req.get_response(self.api) self.assertEqual(200, port_res.status_int) return self.deserialize(self.fmt, port_res)['port'] def _set_allowed_address_pair(self, port_id, ip): return self._update_allowed_address_pair(port_id, [{'ip_address': ip}]) def _unset_allowed_address_pair(self, port_id): return self._update_allowed_address_pair(port_id, []) def _find_port_row(self, port_id): cmd = self.nb_api.db_find_rows( 'Logical_Switch_Port', ('name', '=', port_id)) rows = cmd.execute(check_error=True) return rows[0] if rows else None def _is_ovn_port_type(self, port_id, port_type): ovn_vport = self._find_port_row(port_id) return port_type == ovn_vport.type def _check_port_type(self, port_id, type): check = functools.partial(self._is_ovn_port_type, port_id, type) n_utils.wait_until_true(check, timeout=10) @tests_base.unstable_test("bug 1865453") def test_virtual_port_created_before(self): virt_port = self._create_port() virt_ip = virt_port['fixed_ips'][0]['ip_address'] # Create the master port with the VIP address already set in # the allowed_address_pairs field master = self._create_port(allowed_address=virt_ip) # Assert the virt port has the type virtual and master is set # as parent self._check_port_type(virt_port['id'], ovn_const.LSP_TYPE_VIRTUAL) ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertEqual( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Create the backport parent port backup = self._create_port(allowed_address=virt_ip) # Assert the virt port now also includes the backup port as a parent self._check_port_type(virt_port['id'], ovn_const.LSP_TYPE_VIRTUAL) ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertIn( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) self.assertIn( backup['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) @tests_base.unstable_test("bug 1865453") def test_virtual_port_update_address_pairs(self): master = self._create_port() backup = self._create_port() virt_port = self._create_port() virt_ip = virt_port['fixed_ips'][0]['ip_address'] # Assert the virt port does not yet have the type virtual (no # address pairs were set yet) self._check_port_type(virt_port['id'], ''), ovn_vport = self._find_port_row(virt_port['id']) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ovn_vport.options) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY, ovn_vport.options) # Set the virt IP to the allowed address pairs of the master port self._set_allowed_address_pair(master['id'], virt_ip) # Assert the virt port is now updated self._check_port_type(virt_port['id'], ovn_const.LSP_TYPE_VIRTUAL), ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertEqual( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Set the virt IP to the allowed address pairs of the backup port self._set_allowed_address_pair(backup['id'], virt_ip) # Assert the virt port now includes the backup port as a parent self._check_port_type(virt_port['id'], ovn_const.LSP_TYPE_VIRTUAL), ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertIn( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) self.assertIn( backup['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Remove the address pairs from the master port self._unset_allowed_address_pair(master['id']) # Assert the virt port now only has the backup port as a parent self._check_port_type(virt_port['id'], ovn_const.LSP_TYPE_VIRTUAL), ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertEqual( backup['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Remove the address pairs from the backup port self._unset_allowed_address_pair(backup['id']) # Assert the virt port is not type virtual anymore and the virtual # port options are cleared self._check_port_type(virt_port['id'], ''), ovn_vport = self._find_port_row(virt_port['id']) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ovn_vport.options) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY, ovn_vport.options) @tests_base.unstable_test("bug 1865453") def test_virtual_port_created_after(self): master = self._create_port(fixed_ip='10.0.0.11') backup = self._create_port(fixed_ip='10.0.0.12') virt_ip = '10.0.0.55' # Set the virt IP to the master and backup ports *before* creating # the virtual port self._set_allowed_address_pair(master['id'], virt_ip) self._set_allowed_address_pair(backup['id'], virt_ip) virt_port = self._create_port(fixed_ip=virt_ip) # Assert the virtual port has been created with the # right type and parents ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual(ovn_const.LSP_TYPE_VIRTUAL, ovn_vport.type) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertIn( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) self.assertIn( backup['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) @tests_base.unstable_test("bug 1865453") def test_virtual_port_delete_parents(self): master = self._create_port() backup = self._create_port() virt_port = self._create_port() virt_ip = virt_port['fixed_ips'][0]['ip_address'] # Assert the virt port does not yet have the type virtual (no # address pairs were set yet) ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual("", ovn_vport.type) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ovn_vport.options) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY, ovn_vport.options) # Set allowed address paris to the master and backup ports self._set_allowed_address_pair(master['id'], virt_ip) self._set_allowed_address_pair(backup['id'], virt_ip) # Assert the virtual port is correct ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual(ovn_const.LSP_TYPE_VIRTUAL, ovn_vport.type) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertIn( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) self.assertIn( backup['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Delete the backup port self._delete('ports', backup['id']) # Assert the virt port now only has the master port as a parent ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual(ovn_const.LSP_TYPE_VIRTUAL, ovn_vport.type) self.assertEqual( virt_ip, ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertEqual( master['id'], ovn_vport.options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) # Delete the master port self._delete('ports', master['id']) # Assert the virt port is not type virtual anymore and the virtual # port options are cleared ovn_vport = self._find_port_row(virt_port['id']) self.assertEqual("", ovn_vport.type) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ovn_vport.options) self.assertNotIn(ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY, ovn_vport.options) class TestExternalPorts(base.TestOVNFunctionalBase): def setUp(self): super(TestExternalPorts, self).setUp() self._ovn_client = self.mech_driver._ovn_client self.n1 = self._make_network(self.fmt, 'n1', True) res = self._create_subnet(self.fmt, self.n1['network']['id'], '10.0.0.0/24') self.sub = self.deserialize(self.fmt, res) # The default group will be created by the maintenance task ( # which is disabled in the functional jobs). So let's add it self.default_ch_grp = self.nb_api.ha_chassis_group_add( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME).execute(check_error=True) def _find_port_row_by_name(self, name): cmd = self.nb_api.db_find_rows( 'Logical_Switch_Port', ('name', '=', name)) rows = cmd.execute(check_error=True) return rows[0] if rows else None def test_external_port_create(self): port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id, portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) self.assertEqual(ovn_const.LSP_TYPE_EXTERNAL, ovn_port.type) self.assertEqual(1, len(ovn_port.ha_chassis_group)) self.assertEqual(str(self.default_ch_grp.uuid), str(ovn_port.ha_chassis_group[0].uuid)) def test_external_port_update(self): port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id}} port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) self.assertEqual('', ovn_port.type) self.assertEqual([], ovn_port.ha_chassis_group) port_upt_data = { 'port': {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} port_req = self.new_update_request( 'ports', port_upt_data, port['id'], self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) self.assertEqual(ovn_const.LSP_TYPE_EXTERNAL, ovn_port.type) self.assertEqual(1, len(ovn_port.ha_chassis_group)) self.assertEqual(str(self.default_ch_grp.uuid), str(ovn_port.ha_chassis_group[0].uuid)) def test_external_port_create_switchdev(self): port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id, portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, ovn_const.OVN_PORT_BINDING_PROFILE: { 'capabilities': [ovn_const.PORT_CAP_SWITCHDEV]}}} port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) # When "switchdev" is set, we should treat it as a normal # port instead of "external" type self.assertEqual("", ovn_port.type) # Assert the poer hasn't been added to any HA Chassis Group either self.assertEqual(0, len(ovn_port.ha_chassis_group)) def test_external_port_update_switchdev(self): port_data = { 'port': {'network_id': self.n1['network']['id'], 'tenant_id': self._tenant_id, portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} # Create a VNIC_DIRECT type port without the "switchdev" # capability and assert that it's an "external" port port_req = self.new_create_request('ports', port_data, self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) self.assertEqual(ovn_const.LSP_TYPE_EXTERNAL, ovn_port.type) self.assertEqual(1, len(ovn_port.ha_chassis_group)) self.assertEqual(str(self.default_ch_grp.uuid), str(ovn_port.ha_chassis_group[0].uuid)) # Now, update the port to add a "switchdev" capability and make # sure it's not treated as an "external" port anymore nor it's # included in a HA Chassis Group port_upt_data = { 'port': {ovn_const.OVN_PORT_BINDING_PROFILE: { 'capabilities': [ovn_const.PORT_CAP_SWITCHDEV]}}} port_req = self.new_update_request( 'ports', port_upt_data, port['id'], self.fmt) port_res = port_req.get_response(self.api) port = self.deserialize(self.fmt, port_res)['port'] ovn_port = self._find_port_row_by_name(port['id']) # When "switchdev" is set, we should treat it as a normal # port instead of "external" type self.assertEqual("", ovn_port.type) # Assert the poer hasn't been added to any HA Chassis Group either self.assertEqual(0, len(ovn_port.ha_chassis_group)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/plugins/ml2/test_plugin.py0000644000175000017500000000665600000000000027240 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import context from neutron.db import agents_db from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import base as ml2_test_base DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class TestMl2PortBinding(ml2_test_base.ML2TestFramework, agents_db.AgentDbMixin): def setUp(self): super(TestMl2PortBinding, self).setUp() self.admin_context = context.get_admin_context() self.host_args = {portbindings.HOST_ID: helpers.HOST, 'admin_state_up': True} def test_port_bind_successfully(self): helpers.register_ovs_agent(host=helpers.HOST) with self.network() as network: with self.subnet(network=network) as subnet: with self.port( subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **self.host_args) as port: # Note: Port creation invokes _bind_port_if_needed(), # therefore it is all we need in order to test a successful # binding self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_OVS) def test_port_bind_retry(self): agent = helpers.register_ovs_agent(host=helpers.HOST) helpers.kill_agent(agent_id=agent.id) with self.network() as network: with self.subnet(network=network) as subnet: with self.port( subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **self.host_args) as port: # Since the agent is dead, expect binding to fail self.assertEqual(port['port']['binding:vif_type'], portbindings.VIF_TYPE_BINDING_FAILED) helpers.revive_agent(agent.id) # When an agent starts, The RPC call get_device_details() # will invoke get_bound_port_context() which eventually use # _bind_port_if_needed() bound_context = self.plugin.get_bound_port_context( self.admin_context, port['port']['id'], helpers.HOST) # Since the agent is back online, expect binding to succeed self.assertEqual(bound_context.vif_type, portbindings.VIF_TYPE_OVS) self.assertEqual(bound_context.current['binding:vif_type'], portbindings.VIF_TYPE_OVS) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/0000755000175000017500000000000000000000000024273 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/__init__.py0000644000175000017500000000000000000000000026372 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/0000755000175000017500000000000000000000000025371 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/__init__.py0000644000175000017500000000000000000000000027470 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/linux/0000755000175000017500000000000000000000000026530 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/linux/__init__.py0000644000175000017500000000000000000000000030627 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/linux/test_ip_lib.py0000644000175000017500000007131100000000000031402 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import threading import netaddr from neutron_lib import constants as n_cons from oslo_utils import uuidutils from pyroute2.ipdb import routes as ipdb_routes from pyroute2.iproute import linux as iproute_linux import testtools from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.privileged.agent.linux import ip_lib as priv_ip_lib from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base class GetDeviceNamesTestCase(functional_base.BaseSudoTestCase): def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_get_device_names(self): namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(namespace) self.addCleanup(self._remove_ns, namespace) interfaces = ['int_01', 'int_02', 'int_03', 'int_04', 'int_05'] interfaces_to_check = (interfaces + ip_lib.FB_TUNNEL_DEVICE_NAMES + [ip_lib.LOOPBACK_DEVNAME]) for interface in interfaces: priv_ip_lib.create_interface(interface, namespace, 'dummy') device_names = priv_ip_lib.get_device_names(namespace) self.assertGreater(len(device_names), 0) for name in device_names: self.assertIn(name, interfaces_to_check) for interface in interfaces: priv_ip_lib.delete_interface(interface, namespace) device_names = priv_ip_lib.get_device_names(namespace) self.assertGreater(len(device_names), 0) for name in device_names: self.assertNotIn(name, interfaces) class GetDevicesInfoTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(GetDevicesInfoTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns, self.namespace) self.interfaces = ['int_01', 'int_02'] self.interfaces_to_exclude = (ip_lib.FB_TUNNEL_DEVICE_NAMES + [ip_lib.LOOPBACK_DEVNAME]) def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_get_devices_info_lo(self): devices = priv_ip_lib.get_link_devices(self.namespace) self.assertGreater(len(devices), 0) for device in devices: if ip_lib.get_attr(device, 'IFLA_IFNAME') != 'lo': continue self.assertIsNone(ip_lib.get_attr(device, 'IFLA_LINKINFO')) break else: self.fail('Device "lo" not found') def test_get_devices_info_dummy(self): interfaces_tested = [] for interface in self.interfaces: priv_ip_lib.create_interface(interface, self.namespace, 'dummy') devices = priv_ip_lib.get_link_devices(self.namespace) self.assertGreater(len(devices), 0) for device in devices: name = ip_lib.get_attr(device, 'IFLA_IFNAME') if name in self.interfaces_to_exclude: continue self.assertIn(name, self.interfaces) ifla_linkinfo = ip_lib.get_attr(device, 'IFLA_LINKINFO') self.assertEqual(ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND'), 'dummy') interfaces_tested.append(name) self.assertEqual(sorted(interfaces_tested), sorted(self.interfaces)) def test_get_devices_info_vlan(self): interfaces_tested = [] vlan_interfaces = [] vlan_id = 1000 for interface in self.interfaces: priv_ip_lib.create_interface(interface, self.namespace, 'dummy') vlan_interface = interface + '_' + str(vlan_id) vlan_interfaces.append(vlan_interface) priv_ip_lib.create_interface( vlan_interface, self.namespace, 'vlan', physical_interface=interface, vlan_id=vlan_id) vlan_id += 1 devices = priv_ip_lib.get_link_devices(self.namespace) self.assertGreater(len(devices), 0) device_name_index = {} for device in devices: name = ip_lib.get_attr(device, 'IFLA_IFNAME') device_name_index[name] = device['index'] for device in devices: name = ip_lib.get_attr(device, 'IFLA_IFNAME') if name in self.interfaces_to_exclude: continue self.assertIn(name, self.interfaces + vlan_interfaces) ifla_linkinfo = ip_lib.get_attr(device, 'IFLA_LINKINFO') if name in vlan_interfaces: self.assertEqual( ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND'), 'vlan') ifla_infodata = ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_DATA') vlan_id = int(name.split('_')[-1]) self.assertEqual( ip_lib.get_attr(ifla_infodata, 'IFLA_VLAN_ID'), vlan_id) vlan_link_name = self.interfaces[vlan_interfaces.index(name)] vlan_link_index = device_name_index[vlan_link_name] self.assertEqual(vlan_link_index, ip_lib.get_attr(device, 'IFLA_LINK')) interfaces_tested.append(name) self.assertEqual(sorted(interfaces_tested), sorted(self.interfaces + vlan_interfaces)) def test_get_devices_info_vxlan(self): interfaces_tested = [] vxlan_interfaces = [] vxlan_id = 1000 for interface in self.interfaces: priv_ip_lib.create_interface(interface, self.namespace, 'dummy') vxlan_interface = interface + '_' + str(vxlan_id) vxlan_interfaces.append(vxlan_interface) priv_ip_lib.create_interface( vxlan_interface, self.namespace, 'vxlan', physical_interface=interface, vxlan_id=vxlan_id, vxlan_group='239.1.1.1') vxlan_id += 1 devices = priv_ip_lib.get_link_devices(self.namespace) self.assertGreater(len(devices), 0) device_name_index = {} for device in devices: name = ip_lib.get_attr(device, 'IFLA_IFNAME') device_name_index[name] = device['index'] for device in devices: name = ip_lib.get_attr(device, 'IFLA_IFNAME') if name in self.interfaces_to_exclude: continue self.assertIn(name, self.interfaces + vxlan_interfaces) ifla_linkinfo = ip_lib.get_attr(device, 'IFLA_LINKINFO') if name in vxlan_interfaces: self.assertEqual( ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND'), 'vxlan') ifla_infodata = ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_DATA') vxlan_id = int(name.split('_')[-1]) self.assertEqual( ip_lib.get_attr(ifla_infodata, 'IFLA_VXLAN_ID'), vxlan_id) self.assertEqual( ip_lib.get_attr(ifla_infodata, 'IFLA_VXLAN_GROUP'), '239.1.1.1') vxlan_link_name = self.interfaces[vxlan_interfaces.index(name)] vxlan_link_index = device_name_index[vxlan_link_name] self.assertEqual( vxlan_link_index, ip_lib.get_attr(ifla_infodata, 'IFLA_VXLAN_LINK')) interfaces_tested.append(name) self.assertEqual(sorted(interfaces_tested), sorted(self.interfaces + vxlan_interfaces)) def _retrieve_interface(self, interface_name, namespace): for device in priv_ip_lib.get_link_devices(namespace): if interface_name == ip_lib.get_attr(device, 'IFLA_IFNAME'): return device else: self.fail('Interface "%s" not found' % interface_name) def test_get_devices_info_veth_different_namespaces(self): namespace2 = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(namespace2) self.addCleanup(self._remove_ns, namespace2) # Create a random number of dummy interfaces in namespace2, in order # to increase the 'veth1_2' interface index in its namespace. for idx in range(5, random.randint(15, 20)): priv_ip_lib.create_interface('int_%s' % idx, namespace2, 'dummy') ip_wrapper = ip_lib.IPWrapper(self.namespace) ip_wrapper.add_veth('veth1_1', 'veth1_2', namespace2) veth1_1 = self._retrieve_interface('veth1_1', self.namespace) veth1_2 = self._retrieve_interface('veth1_2', namespace2) ifla_linkinfo = ip_lib.get_attr(veth1_1, 'IFLA_LINKINFO') self.assertEqual(ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND'), 'veth') # NOTE(ralonsoh): since kernel_version=4.15.0-60-generic, iproute2 # provides the veth pair index, even if the pair interface is in other # namespace. In previous versions, the parameter 'IFLA_LINK' was not # present. We need to handle both cases. self.assertIn(ip_lib.get_attr(veth1_1, 'IFLA_LINK'), [None, veth1_2['index']]) def test_get_devices_info_veth_same_namespaces(self): ip_wrapper = ip_lib.IPWrapper(self.namespace) ip_wrapper.add_veth('veth1_1', 'veth1_2') veth1_1 = self._retrieve_interface('veth1_1', self.namespace) veth1_2 = self._retrieve_interface('veth1_2', self.namespace) veth1_1_link = ip_lib.get_attr(veth1_1, 'IFLA_LINK') veth1_2_link = ip_lib.get_attr(veth1_2, 'IFLA_LINK') self.assertEqual(veth1_1['index'], veth1_2_link) self.assertEqual(veth1_2['index'], veth1_1_link) class ListIpRulesTestCase(functional_base.BaseSudoTestCase): RULE_TABLES = {'default': 253, 'main': 254, 'local': 255} def setUp(self): super(ListIpRulesTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() self.ns = priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns) def _remove_ns(self): priv_ip_lib.remove_netns(self.namespace) def test_list_default_rules_ipv4(self): rules_ipv4 = priv_ip_lib.list_ip_rules(self.namespace, 4) self.assertEqual(3, len(rules_ipv4)) rule_tables = list(self.RULE_TABLES.values()) for rule in rules_ipv4: rule_tables.remove(rule['table']) self.assertEqual(0, len(rule_tables)) def test_list_rules_ipv4(self): ip_lib.add_ip_rule(self.namespace, '192.168.0.1/24', table=10) rules_ipv4 = priv_ip_lib.list_ip_rules(self.namespace, 4) for rule in rules_ipv4: if rule['table'] == 10: self.assertEqual('192.168.0.1', rule['attrs']['FRA_SRC']) self.assertEqual(24, rule['src_len']) break else: self.fail('Rule added (192.168.0.1/24, table 10) not found') def test_list_default_rules_ipv6(self): rules_ipv6 = priv_ip_lib.list_ip_rules(self.namespace, 6) self.assertEqual(2, len(rules_ipv6)) rule_tables = [255, 254] for rule in rules_ipv6: rule_tables.remove(rule['table']) self.assertEqual(0, len(rule_tables)) def test_list_rules_ipv6(self): ip_lib.add_ip_rule(self.namespace, '2001:db8::1/64', table=20) rules_ipv6 = priv_ip_lib.list_ip_rules(self.namespace, 6) for rule in rules_ipv6: if rule['table'] == 20: self.assertEqual('2001:db8::1', rule['attrs']['FRA_SRC']) self.assertEqual(64, rule['src_len']) break else: self.fail('Rule added (2001:db8::1/64, table 20) not found') class RuleTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(RuleTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() self.ns = priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns) def _remove_ns(self): priv_ip_lib.remove_netns(self.namespace) def _check_rules(self, rules, parameters, values, exception_string=None, raise_exception=True): for rule in rules: if all(rule.get(parameter) == value for parameter, value in zip(parameters, values)): return True else: if raise_exception: self.fail('Rule with %s was expected' % exception_string) else: return False def test_add_rule_ip(self): ip_addresses = ['192.168.200.250', '2001::250'] for ip_address in ip_addresses: ip_version = common_utils.get_ip_version(ip_address) ip_lenght = common_utils.get_network_length(ip_version) ip_family = common_utils.get_socket_address_family(ip_version) priv_ip_lib.add_ip_rule(self.namespace, src=ip_address, src_len=ip_lenght, family=ip_family) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self._check_rules(rules, ['from'], [ip_address], '"from" IP address %s' % ip_address) priv_ip_lib.delete_ip_rule(self.namespace, family=ip_family, src=ip_address, src_len=ip_lenght) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self.assertFalse( self._check_rules(rules, ['from'], [ip_address], raise_exception=False)) def test_add_rule_iif(self): iif = 'iif_device_1' priv_ip_lib.create_interface(iif, self.namespace, 'dummy') priv_ip_lib.add_ip_rule(self.namespace, iifname=iif) rules = ip_lib.list_ip_rules(self.namespace, 4) self._check_rules(rules, ['iif'], [iif], 'iif name %s' % iif) priv_ip_lib.delete_ip_rule(self.namespace, iifname=iif) rules = ip_lib.list_ip_rules(self.namespace, 4) self.assertFalse( self._check_rules(rules, ['iif'], [iif], raise_exception=False)) def test_add_rule_table(self): table = 212 ip_addresses = ['192.168.200.251', '2001::251'] for ip_address in ip_addresses: ip_version = common_utils.get_ip_version(ip_address) ip_lenght = common_utils.get_network_length(ip_version) ip_family = common_utils.get_socket_address_family(ip_version) priv_ip_lib.add_ip_rule(self.namespace, table=table, src=ip_address, src_len=ip_lenght, family=ip_family) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self._check_rules( rules, ['table', 'from'], [str(table), ip_address], 'table %s and "from" IP address %s' % (table, ip_address)) priv_ip_lib.delete_ip_rule(self.namespace, table=table, src=ip_address, src_len=ip_lenght, family=ip_family) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self.assertFalse( self._check_rules(rules, ['table', 'from'], [str(table), ip_address], raise_exception=False)) def test_add_rule_priority(self): priority = 12345 ip_addresses = ['192.168.200.252', '2001::252'] for ip_address in ip_addresses: ip_version = common_utils.get_ip_version(ip_address) ip_lenght = common_utils.get_network_length(ip_version) ip_family = common_utils.get_socket_address_family(ip_version) priv_ip_lib.add_ip_rule(self.namespace, priority=priority, src=ip_address, src_len=ip_lenght, family=ip_family) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self._check_rules( rules, ['priority', 'from'], [str(priority), ip_address], 'priority %s and "from" IP address %s' % (priority, ip_address)) priv_ip_lib.delete_ip_rule(self.namespace, priority=priority, src=ip_address, src_len=ip_lenght, family=ip_family) rules = ip_lib.list_ip_rules(self.namespace, ip_version) self.assertFalse( self._check_rules(rules, ['priority', 'from'], [str(priority), ip_address], raise_exception=False)) def test_add_rule_priority_table_iif(self): table = 213 priority = 12346 iif = 'iif_device_2' priv_ip_lib.create_interface(iif, self.namespace, 'dummy') priv_ip_lib.add_ip_rule(self.namespace, priority=priority, iifname=iif, table=table) rules = ip_lib.list_ip_rules(self.namespace, 4) self._check_rules( rules, ['priority', 'iif', 'table'], [str(priority), iif, str(table)], 'priority %s, table %s and iif name %s' % (priority, table, iif)) priv_ip_lib.delete_ip_rule(self.namespace, priority=priority, iifname=iif, table=table) rules = ip_lib.list_ip_rules(self.namespace, 4) self.assertFalse( self._check_rules(rules, ['priority', 'iif', 'table'], [str(priority), iif, str(table)], raise_exception=False)) @testtools.skip('https://github.com/svinota/pyroute2/issues/566') def test_add_rule_exists(self): iif = 'iif_device_1' priv_ip_lib.create_interface(iif, self.namespace, 'dummy') priv_ip_lib.add_ip_rule(self.namespace, iifname=iif) rules = ip_lib.list_ip_rules(self.namespace, 4) self._check_rules(rules, ['iif'], [iif], 'iif name %s' % iif) self.assertEqual(4, len(rules)) # pyroute2.netlink.exceptions.NetlinkError(17, 'File exists') # exception is catch. priv_ip_lib.add_ip_rule(self.namespace, iifname=iif) rules = ip_lib.list_ip_rules(self.namespace, 4) self._check_rules(rules, ['iif'], [iif], 'iif name %s' % iif) self.assertEqual(4, len(rules)) class GetIpAddressesTestCase(functional_base.BaseSudoTestCase): def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_get_ip_addresses(self): namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(namespace) self.addCleanup(self._remove_ns, namespace) interfaces = { '20': {'cidr': '192.168.10.20/24', 'scope': 'link', 'add_broadcast': True}, '30': {'cidr': '2001::1/64', 'scope': 'global', 'add_broadcast': False}} for int_name, int_parameters in interfaces.items(): priv_ip_lib.create_interface(int_name, namespace, 'dummy', index=int(int_name)) ip_lib.add_ip_address( int_parameters['cidr'], int_name, namespace, int_parameters['scope'], int_parameters['add_broadcast']) ip_addresses = priv_ip_lib.get_ip_addresses(namespace) for ip_address in ip_addresses: int_name = str(ip_address['index']) ip = ip_lib.get_attr(ip_address, 'IFA_ADDRESS') mask = ip_address['prefixlen'] cidr = common_utils.ip_to_cidr(ip, mask) self.assertEqual(interfaces[int_name]['cidr'], cidr) self.assertEqual(interfaces[int_name]['scope'], ip_lib.IP_ADDRESS_SCOPE[ip_address['scope']]) class RouteTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(RouteTestCase, self).setUp() self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.device_name = 'test_device' ip_lib.IPWrapper(self.namespace).add_dummy(self.device_name) self.device = ip_lib.IPDevice(self.device_name, self.namespace) self.device.link.set_up() def _check_routes(self, cidrs, table=None, gateway=None, metric=None, scope=None): table = table or iproute_linux.DEFAULT_TABLE if not scope: scope = 'universe' if gateway else 'link' scope = priv_ip_lib._get_scope_name(scope) for cidr in cidrs: ip_version = common_utils.get_ip_version(cidr) if ip_version == n_cons.IP_VERSION_6 and not metric: metric = ipdb_routes.IP6_RT_PRIO_USER if ip_version == n_cons.IP_VERSION_6: scope = 0 routes = priv_ip_lib.list_ip_routes(self.namespace, ip_version) for route in routes: ip = ip_lib.get_attr(route, 'RTA_DST') mask = route['dst_len'] if not (ip == str(netaddr.IPNetwork(cidr).ip) and mask == netaddr.IPNetwork(cidr).cidr.prefixlen): continue self.assertEqual(table, route['table']) self.assertEqual( priv_ip_lib._IP_VERSION_FAMILY_MAP[ip_version], route['family']) self.assertEqual(gateway, ip_lib.get_attr(route, 'RTA_GATEWAY')) self.assertEqual(metric, ip_lib.get_attr(route, 'RTA_PRIORITY')) self.assertEqual(scope, route['scope']) break else: self.fail('CIDR %s not found in the list of routes' % cidr) def _check_gateway(self, gateway, table=None, metric=None): table = table or iproute_linux.DEFAULT_TABLE ip_version = common_utils.get_ip_version(gateway) if ip_version == n_cons.IP_VERSION_6 and not metric: metric = ipdb_routes.IP6_RT_PRIO_USER scope = 0 routes = priv_ip_lib.list_ip_routes(self.namespace, ip_version) for route in routes: if not (ip_lib.get_attr(route, 'RTA_GATEWAY') == gateway): continue self.assertEqual(table, route['table']) self.assertEqual( priv_ip_lib._IP_VERSION_FAMILY_MAP[ip_version], route['family']) self.assertEqual(gateway, ip_lib.get_attr(route, 'RTA_GATEWAY')) self.assertEqual(scope, route['scope']) self.assertEqual(0, route['dst_len']) self.assertEqual(metric, ip_lib.get_attr(route, 'RTA_PRIORITY')) break else: self.fail('Default gateway %s not found in the list of routes' % gateway) def _add_route_device_and_check(self, table=None, metric=None, scope='link'): cidrs = ['192.168.0.0/24', '172.90.0.0/16', '10.0.0.0/8', '2001:db8::/64'] for cidr in cidrs: ip_version = common_utils.get_ip_version(cidr) priv_ip_lib.add_ip_route(self.namespace, cidr, ip_version, device=self.device_name, table=table, metric=metric, scope=scope) self._check_routes(cidrs, table=table, metric=metric, scope=scope) def test_add_route_device(self): self._add_route_device_and_check(table=None) def test_add_route_device_table(self): self._add_route_device_and_check(table=100) def test_add_route_device_metric(self): self._add_route_device_and_check(metric=50) def test_add_route_device_table_metric(self): self._add_route_device_and_check(table=200, metric=30) def test_add_route_device_scope_global(self): self._add_route_device_and_check(scope='global') def test_add_route_device_scope_site(self): self._add_route_device_and_check(scope='site') def test_add_route_device_scope_host(self): self._add_route_device_and_check(scope='host') def test_add_route_via_ipv4(self): cidrs = ['192.168.0.0/24', '172.90.0.0/16', '10.0.0.0/8'] int_cidr = '192.168.20.1/24' int_ip_address = str(netaddr.IPNetwork(int_cidr).ip) self.device.addr.add(int_cidr) for cidr in cidrs: ip_version = common_utils.get_ip_version(cidr) priv_ip_lib.add_ip_route(self.namespace, cidr, ip_version, via=int_ip_address) self._check_routes(cidrs, gateway=int_ip_address) def test_add_route_via_ipv6(self): cidrs = ['2001:db8::/64', 'faaa::/96'] int_cidr = 'fd00::1/64' via_ip = 'fd00::2' self.device.addr.add(int_cidr) for cidr in cidrs: ip_version = common_utils.get_ip_version(cidr) priv_ip_lib.add_ip_route(self.namespace, cidr, ip_version, via=via_ip) self._check_routes(cidrs, gateway=via_ip) def test_add_default(self): ip_addresses = ['192.168.0.1/24', '172.90.0.1/16', '10.0.0.1/8', '2001:db8::1/64', 'faaa::1/96'] for ip_address in ip_addresses: ip_version = common_utils.get_ip_version(ip_address) if ip_version == n_cons.IP_VERSION_4: _ip = str(netaddr.IPNetwork(ip_address).ip) else: _ip = str(netaddr.IPNetwork(ip_address).ip + 1) self.device.addr.add(ip_address) priv_ip_lib.add_ip_route(self.namespace, None, ip_version, device=self.device_name, via=_ip) self._check_gateway(_ip) class GetLinkAttributesTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(GetLinkAttributesTestCase, self).setUp() self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.device_name = 'test_device' ip_lib.IPWrapper(self.namespace).add_dummy(self.device_name) self.device = ip_lib.IPDevice(self.device_name, self.namespace) self.pyroute_dev = priv_ip_lib.get_link_devices( self.namespace, ifname=self.device_name) self.assertEqual(1, len(self.pyroute_dev)) self.pyroute_dev = self.pyroute_dev[0] def test_get_link_attribute_kind(self): ifla_linkinfo = ip_lib.get_attr(self.pyroute_dev, 'IFLA_LINKINFO') ifla_link_kind = ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND') self.assertEqual('dummy', ifla_link_kind) self.assertEqual(ifla_link_kind, self.device.link.link_kind) def test_get_link_attributes(self): expected_attr = ['mtu', 'qlen', 'state', 'qdisc', 'brd', 'link/ether', 'alias', 'allmulticast', 'link_kind'] attr = self.device.link.attributes self.assertSetEqual(set(expected_attr), set(attr.keys())) class ListNamespacePids(functional_base.BaseSudoTestCase): def setUp(self): super(ListNamespacePids, self).setUp() self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name self.timeout = 3 @staticmethod def _run_sleep(namespace, timeout): ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(['sleep', timeout], check_exit_code=False) def _check_pids(self, num_pids, namespace=None): namespace = self.namespace if not namespace else namespace self.pids = priv_ip_lib.list_ns_pids(namespace) return len(self.pids) == num_pids def test_list_namespace_pids(self): thread = threading.Thread(target=self._run_sleep, args=(self.namespace, self.timeout)) thread.start() try: check_pids = functools.partial(self._check_pids, 1) common_utils.wait_until_true(check_pids, timeout=self.timeout) except common_utils.WaitTimeout: self.fail('Process no found in namespace %s' % self.namespace) thread.join(timeout=self.timeout) def test_list_namespace_pids_nothing_running_inside(self): self.assertTrue(self._check_pids(0)) def test_list_namespace_not_created(self): self.assertTrue(self._check_pids(0, namespace='othernamespace')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/privileged/agent/linux/test_tc_lib.py0000644000175000017500000003204300000000000031377 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from oslo_utils import uuidutils import pyroute2 from pyroute2.netlink import rtnl from neutron.agent.linux import tc_lib from neutron.privileged.agent.linux import ip_lib as priv_ip_lib from neutron.privileged.agent.linux import tc_lib as priv_tc_lib from neutron.tests.functional import base as functional_base class TcQdiscTestCase(functional_base.BaseSudoTestCase): def setUp(self): super(TcQdiscTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface(self.device, self.namespace, 'dummy') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_add_tc_qdisc_htb(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='htb', handle='5:', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual(rtnl.TC_H_ROOT, qdiscs[0]['parent']) self.assertEqual(0x50000, qdiscs[0]['handle']) self.assertEqual('htb', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) priv_tc_lib.delete_tc_qdisc(self.device, rtnl.TC_H_ROOT, namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(0, len(qdiscs)) def test_add_tc_qdisc_htb_no_handle(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='htb', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual(rtnl.TC_H_ROOT, qdiscs[0]['parent']) self.assertEqual(0, qdiscs[0]['handle'] & 0xFFFF) self.assertEqual('htb', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) priv_tc_lib.delete_tc_qdisc(self.device, parent=rtnl.TC_H_ROOT, namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(0, len(qdiscs)) def test_add_tc_qdisc_tbf(self): burst = 192000 rate = 320000 latency = 50000 priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='tbf', burst=burst, rate=rate, latency=latency, namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual(rtnl.TC_H_ROOT, qdiscs[0]['parent']) self.assertEqual('tbf', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) tca_options = tc_lib._get_attr(qdiscs[0], 'TCA_OPTIONS') tca_tbf_parms = tc_lib._get_attr(tca_options, 'TCA_TBF_PARMS') self.assertEqual(rate, tca_tbf_parms['rate']) self.assertEqual(burst, tc_lib._calc_burst(tca_tbf_parms['rate'], tca_tbf_parms['buffer'])) self.assertEqual(latency, tc_lib._calc_latency_ms( tca_tbf_parms['limit'], burst, tca_tbf_parms['rate']) * 1000) priv_tc_lib.delete_tc_qdisc(self.device, parent=rtnl.TC_H_ROOT, namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(0, len(qdiscs)) def test_add_tc_qdisc_ingress(self): priv_tc_lib.add_tc_qdisc(self.device, kind='ingress', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual('ingress', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) self.assertEqual(rtnl.TC_H_INGRESS, qdiscs[0]['parent']) self.assertEqual(0xffff0000, qdiscs[0]['handle']) priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(0, len(qdiscs)) def test_delete_tc_qdisc_no_device(self): self.assertRaises( priv_ip_lib.NetworkInterfaceNotFound, priv_tc_lib.delete_tc_qdisc, 'other_device', rtnl.TC_H_ROOT, namespace=self.namespace) def test_delete_tc_qdisc_no_device_no_exception(self): self.assertIsNone(priv_tc_lib.delete_tc_qdisc( 'other_device', rtnl.TC_H_ROOT, namespace=self.namespace, raise_interface_not_found=False)) def test_delete_tc_qdisc_no_qdisc(self): self.assertRaises( pyroute2.NetlinkError, priv_tc_lib.delete_tc_qdisc, self.device, rtnl.TC_H_ROOT, namespace=self.namespace) def test_delete_tc_qdisc_no_qdisc_no_exception(self): self.assertEqual(2, priv_tc_lib.delete_tc_qdisc( self.device, rtnl.TC_H_ROOT, namespace=self.namespace, raise_qdisc_not_found=False)) def test_delete_tc_qdisc_ingress_twice(self): priv_tc_lib.add_tc_qdisc(self.device, kind='ingress', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual('ingress', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) self.assertIsNone( priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', namespace=self.namespace)) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) self.assertEqual(0, len(qdiscs)) self.assertEqual( errno.EINVAL, priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', namespace=self.namespace, raise_qdisc_not_found=False)) class TcPolicyClassTestCase(functional_base.BaseSudoTestCase): CLASSES = {'1:1': {'rate': 10000, 'ceil': 20000, 'burst': 1500}, '1:3': {'rate': 20000, 'ceil': 50000, 'burst': 1600}, '1:5': {'rate': 30000, 'ceil': 90000, 'burst': 1700}, '1:7': {'rate': 35001, 'ceil': 90000, 'burst': 1701}} def setUp(self): super(TcPolicyClassTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface('int_dummy', self.namespace, 'dummy') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_add_tc_policy_class_htb(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='htb', handle='1:', namespace=self.namespace) for classid, rates in self.CLASSES.items(): priv_tc_lib.add_tc_policy_class( self.device, '1:', classid, 'htb', namespace=self.namespace, **rates) tc_classes = priv_tc_lib.list_tc_policy_classes( self.device, namespace=self.namespace) self.assertEqual(len(self.CLASSES), len(tc_classes)) for tc_class in tc_classes: handle = tc_lib._handle_from_hex_to_string(tc_class['handle']) tca_options = tc_lib._get_attr(tc_class, 'TCA_OPTIONS') tca_htb_params = tc_lib._get_attr(tca_options, 'TCA_HTB_PARMS') self.assertEqual(self.CLASSES[handle]['rate'], tca_htb_params['rate']) self.assertEqual(self.CLASSES[handle]['ceil'], tca_htb_params['ceil']) burst = tc_lib._calc_burst(self.CLASSES[handle]['rate'], tca_htb_params['buffer']) self.assertEqual(self.CLASSES[handle]['burst'], burst) def test_delete_tc_policy_class_htb(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='htb', handle='1:', namespace=self.namespace) for classid, rates in self.CLASSES.items(): priv_tc_lib.add_tc_policy_class( self.device, '1:', classid, 'htb', namespace=self.namespace, **rates) tc_classes = priv_tc_lib.list_tc_policy_classes( self.device, namespace=self.namespace) self.assertEqual(len(self.CLASSES), len(tc_classes)) for classid in self.CLASSES: priv_tc_lib.delete_tc_policy_class( self.device, '1:', classid, namespace=self.namespace) tc_classes = priv_tc_lib.list_tc_policy_classes( self.device, namespace=self.namespace) for tc_class in tc_classes: handle = tc_lib._handle_from_hex_to_string(tc_class['handle']) self.assertIsNot(classid, handle) tc_classes = priv_tc_lib.list_tc_policy_classes( self.device, namespace=self.namespace) self.assertEqual(0, len(tc_classes)) def test_delete_tc_policy_class_no_namespace(self): self.assertRaises( priv_ip_lib.NetworkNamespaceNotFound, priv_tc_lib.delete_tc_policy_class, 'device', 'parent', 'classid', namespace='non_existing_namespace') def test_delete_tc_policy_class_no_class(self): self.assertRaises( priv_tc_lib.TrafficControlClassNotFound, priv_tc_lib.delete_tc_policy_class, self.device, '1:', '1:1000', namespace=self.namespace) class TcFilterClassTestCase(functional_base.BaseSudoTestCase): CLASSES = {'1:1': {'rate': 10000, 'ceil': 20000, 'burst': 1500}, '1:3': {'rate': 20000, 'ceil': 50000, 'burst': 1600}, '1:5': {'rate': 30000, 'ceil': 90000, 'burst': 1700}, '1:7': {'rate': 35001, 'ceil': 90000, 'burst': 1701}} def setUp(self): super(TcFilterClassTestCase, self).setUp() self.namespace = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(self.namespace) self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface('int_dummy', self.namespace, 'dummy') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) def test_add_tc_filter_match32(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='htb', handle='1:', namespace=self.namespace) priv_tc_lib.add_tc_policy_class( self.device, '1:', '1:10', 'htb', namespace=self.namespace, rate=10000) keys = tc_lib._mac_to_pyroute2_keys('7a:8c:f9:1f:e5:cb', 41) priv_tc_lib.add_tc_filter_match32( self.device, '1:0', 10, '1:10', [keys[0]['key'], keys[1]['key']], namespace=self.namespace) filters = tc_lib.list_tc_filters( self.device, '1:0', namespace=self.namespace) self.assertEqual(1, len(filters)) filter_keys = filters[0]['keys'] self.assertEqual(len(keys), len(filter_keys)) for index, value in enumerate(keys): value.pop('key') self.assertEqual(value, filter_keys[index]) def test_add_tc_filter_policy(self): priv_tc_lib.add_tc_qdisc( self.device, parent=rtnl.TC_H_ROOT, kind='ingress', namespace=self.namespace) # NOTE(ralonsoh): # - rate: 320000 bytes/sec (pyroute2 units) = 2500 kbits/sec (OS units) # - burst: 192000 bytes/sec = 1500 kbits/sec priv_tc_lib.add_tc_filter_policy( self.device, 'ffff:', 49, 320000, 192000, 1200, 'drop', namespace=self.namespace) filters = tc_lib.list_tc_filters( self.device, 'ffff:', namespace=self.namespace) self.assertEqual(1, len(filters)) self.assertEqual(2500, filters[0]['rate_kbps']) self.assertEqual(1500, filters[0]['burst_kb']) self.assertEqual(1200, filters[0]['mtu']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/requirements.txt0000644000175000017500000000047700000000000025435 0ustar00coreycorey00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. psycopg2 psutil>=1.1.1,<3.2.2 PyMySQL>=0.6.2 # MIT License ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/0000755000175000017500000000000000000000000024153 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/__init__.py0000644000175000017500000000000000000000000026252 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/ovsdb/0000755000175000017500000000000000000000000025270 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/ovsdb/__init__.py0000644000175000017500000000000000000000000027367 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/ovsdb/events.py0000644000175000017500000000445700000000000027160 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import threading from ovsdbapp.backend.ovs_idl import event from ovsdbapp.tests.functional.schema.ovn_southbound import event as test_event class WaitForCrLrpPortBindingEvent(event.RowEvent): event_name = 'WaitForCrLrpPortBindingEvent' PREFIX = 'cr-lrp-' TABLE = 'Port_Binding' def __init__(self, timeout=5): self.logical_port_events = collections.defaultdict(threading.Event) self.timeout = timeout super(WaitForCrLrpPortBindingEvent, self).__init__( (self.ROW_CREATE,), 'Port_Binding', None) def match_fn(self, event, row, old=None): return row.logical_port.startswith(self.PREFIX) def run(self, event, row, old): self.logical_port_events[row.logical_port].set() def wait(self, logical_port_name): wait_val = self.logical_port_events[logical_port_name].wait( self.timeout) del self.logical_port_events[logical_port_name] return wait_val class WaitForCreatePortBindingEvent(test_event.WaitForPortBindingEvent): event_name = 'WaitForCreatePortBindingEvent' def run(self, event, row, old): self.row = row super(WaitForCreatePortBindingEvent, self).run(event, row, old) class WaitForUpdatePortBindingEvent(test_event.WaitForPortBindingEvent): event_name = 'WaitForUpdatePortBindingEvent' def __init__(self, port, mac, timeout=5): # Call the super of the superclass to avoid passing CREATE event type # to the superclass. super(test_event.WaitForPortBindingEvent, self).__init__( (self.ROW_UPDATE,), 'Port_Binding', (('logical_port', '=', port), ('mac', '=', mac)), timeout=timeout) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/ovsdb/fixtures.py0000644000175000017500000000225400000000000027516 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from ovsdbapp.backend.ovs_idl import connection from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor class OVNIdlConnectionFixture(fixtures.Fixture): def __init__(self, idl=None, constr=None, schema=None, timeout=60): self.idl = idl or ovsdb_monitor.BaseOvnIdl.from_server( constr, schema) self.connection = connection.Connection( idl=self.idl, timeout=timeout) def _setUp(self): self.addCleanup(self.stop) self.connection.start() def stop(self): self.connection.stop() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/resources/process.py0000644000175000017500000002367300000000000026216 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import spawn import os import fixtures import psutil import tenacity from neutron.agent.linux import utils class DaemonProcessFixture(fixtures.Fixture): def __init__(self, temp_dir): super(DaemonProcessFixture, self).__init__() self.temp_dir = temp_dir def _get_pid_from_pidfile(self, pidfile): with open(os.path.join(self.temp_dir, pidfile), 'r') as pidfile_f: pid = pidfile_f.read().strip() try: return int(pid) except ValueError: raise RuntimeError( "Pidfile %(pidfile)s contains %(pid)s that " "is not a pid" % {'pidfile': pidfile, 'pid': pid} ) class OvnNorthd(DaemonProcessFixture): def __init__(self, temp_dir, ovn_nb_db, ovn_sb_db, protocol='unix', debug=True): super(OvnNorthd, self).__init__(temp_dir) self.ovn_nb_db = ovn_nb_db self.ovn_sb_db = ovn_sb_db self.protocol = protocol self.unixctl_path = os.path.join(self.temp_dir, 'ovn_northd.ctl') self.log_file_path = os.path.join(self.temp_dir, 'ovn_northd.log') self.debug = debug if self.protocol == 'ssl': self.private_key = os.path.join(self.temp_dir, 'ovn-privkey.pem') self.certificate = os.path.join(self.temp_dir, 'ovn-cert.pem') self.ca_cert = os.path.join(self.temp_dir, 'controllerca', 'cacert.pem') def _setUp(self): self.addCleanup(self.stop) self.start() def start(self): # start the ovn-northd ovn_northd_cmd = [ spawn.find_executable('ovn-northd'), '-vconsole:off', '--detach', '--ovnnb-db=%s' % self.ovn_nb_db, '--ovnsb-db=%s' % self.ovn_sb_db, '--no-chdir', '--unixctl=%s' % self.unixctl_path, '--log-file=%s' % (self.log_file_path)] if self.protocol == 'ssl': ovn_northd_cmd.append('--private-key=%s' % self.private_key) ovn_northd_cmd.append('--certificate=%s' % self.certificate) ovn_northd_cmd.append('--ca-cert=%s' % self.ca_cert) if self.debug: ovn_northd_cmd.append('--verbose') obj, _ = utils.create_process(ovn_northd_cmd) obj.communicate() def stop(self): try: stop_cmd = ['ovs-appctl', '-t', self.unixctl_path, 'exit'] utils.execute(stop_cmd) except Exception: pass class OvsdbServer(DaemonProcessFixture): def __init__(self, temp_dir, ovs_dir, ovn_nb_db=True, ovn_sb_db=False, protocol='unix', debug=True): super(OvsdbServer, self).__init__(temp_dir) self.ovs_dir = ovs_dir self.ovn_nb_db = ovn_nb_db self.ovn_sb_db = ovn_sb_db # The value of the protocol must be unix or tcp or ssl self.protocol = protocol self.ovsdb_server_processes = [] self.private_key = os.path.join(self.temp_dir, 'ovn-privkey.pem') self.certificate = os.path.join(self.temp_dir, 'ovn-cert.pem') self.ca_cert = os.path.join(self.temp_dir, 'controllerca', 'cacert.pem') self.debug = debug def _setUp(self): if self.ovn_nb_db: self.ovsdb_server_processes.append( {'db_path': os.path.join(self.temp_dir, 'ovn_nb.db'), 'schema_path': os.path.join(self.ovs_dir, 'ovn-nb.ovsschema'), 'remote_path': os.path.join(self.temp_dir, 'ovnnb_db.sock'), 'protocol': self.protocol, 'remote_ip': '127.0.0.1', 'remote_port': '0', 'pidfile': 'ovn-nb.pid', 'unixctl_path': os.path.join(self.temp_dir, 'ovnnb_db.ctl'), 'log_file_path': os.path.join(self.temp_dir, 'ovn_nb.log'), 'db_type': 'nb', 'connection': 'db:OVN_Northbound,NB_Global,connections', 'ctl_cmd': 'ovn-nbctl'}) if self.ovn_sb_db: self.ovsdb_server_processes.append( {'db_path': os.path.join(self.temp_dir, 'ovn_sb.db'), 'schema_path': os.path.join(self.ovs_dir, 'ovn-sb.ovsschema'), 'remote_path': os.path.join(self.temp_dir, 'ovnsb_db.sock'), 'protocol': self.protocol, 'remote_ip': '127.0.0.1', 'remote_port': '0', 'pidfile': 'ovn-sb.pid', 'unixctl_path': os.path.join(self.temp_dir, 'ovnsb_db.ctl'), 'log_file_path': os.path.join(self.temp_dir, 'ovn_sb.log'), 'db_type': 'sb', 'connection': 'db:OVN_Southbound,SB_Global,connections', 'ctl_cmd': 'ovn-sbctl'}) self.addCleanup(self.stop) self.start() def _init_ovsdb_pki(self): os.chdir(self.temp_dir) pki_init_cmd = [spawn.find_executable('ovs-pki'), 'init', '-d', self.temp_dir, '-l', os.path.join(self.temp_dir, 'pki.log'), '--force'] utils.execute(pki_init_cmd) pki_req_sign = [spawn.find_executable('ovs-pki'), 'req+sign', 'ovn', 'controller', '-d', self.temp_dir, '-l', os.path.join(self.temp_dir, 'pki.log'), '--force'] utils.execute(pki_req_sign) def delete_dbs(self): for ovsdb in self.ovsdb_server_processes: try: os.remove(ovsdb['db_path']) except OSError: pass def start(self): pki_done = False for ovsdb_process in self.ovsdb_server_processes: # create the db from the schema using ovsdb-tool ovsdb_tool_cmd = [spawn.find_executable('ovsdb-tool'), 'create', ovsdb_process['db_path'], ovsdb_process['schema_path']] utils.execute(ovsdb_tool_cmd) # start the ovsdb-server ovsdb_server_cmd = [ spawn.find_executable('ovsdb-server'), '-vconsole:off', '--detach', '--pidfile=%s' % os.path.join( self.temp_dir, ovsdb_process['pidfile']), '--log-file=%s' % (ovsdb_process['log_file_path']), '--remote=punix:%s' % (ovsdb_process['remote_path']), '--remote=%s' % (ovsdb_process['connection']), '--unixctl=%s' % (ovsdb_process['unixctl_path']), '--detach'] if ovsdb_process['protocol'] == 'ssl': if not pki_done: pki_done = True self._init_ovsdb_pki() ovsdb_server_cmd.append('--private-key=%s' % self.private_key) ovsdb_server_cmd.append('--certificate=%s' % self.certificate) ovsdb_server_cmd.append('--ca-cert=%s' % self.ca_cert) ovsdb_server_cmd.append(ovsdb_process['db_path']) if self.debug: ovsdb_server_cmd.append('--verbose') obj, _ = utils.create_process(ovsdb_server_cmd) obj.communicate() conn_cmd = [spawn.find_executable(ovsdb_process['ctl_cmd']), '--db=unix:%s' % ovsdb_process['remote_path'], 'set-connection', 'p%s:%s:%s' % (ovsdb_process['protocol'], ovsdb_process['remote_port'], ovsdb_process['remote_ip']), '--', 'set', 'connection', '.', 'inactivity_probe=60000'] @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.1), stop=tenacity.stop_after_delay(3), reraise=True) def _set_connection(): utils.execute(conn_cmd) @tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.1), stop=tenacity.stop_after_delay(10), reraise=True) def get_ovsdb_remote_port_retry(pid): process = psutil.Process(pid) for connect in process.connections(): if connect.status == 'LISTEN': return connect.laddr[1] raise Exception(_("Could not find LISTEN port.")) if ovsdb_process['protocol'] != 'unix': _set_connection() pid = self._get_pid_from_pidfile(ovsdb_process['pidfile']) ovsdb_process['remote_port'] = \ get_ovsdb_remote_port_retry(pid) def stop(self): for ovsdb_process in self.ovsdb_server_processes: try: stop_cmd = ['ovs-appctl', '-t', ovsdb_process['unixctl_path'], 'exit'] utils.execute(stop_cmd) except Exception: pass def get_ovsdb_connection_path(self, db_type='nb'): for ovsdb_process in self.ovsdb_server_processes: if ovsdb_process['db_type'] == db_type: if ovsdb_process['protocol'] == 'unix': return 'unix:' + ovsdb_process['remote_path'] else: return '%s:%s:%s' % (ovsdb_process['protocol'], ovsdb_process['remote_ip'], ovsdb_process['remote_port']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4070456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/sanity/0000755000175000017500000000000000000000000023450 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/sanity/__init__.py0000644000175000017500000000000000000000000025547 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/sanity/test_sanity.py0000644000175000017500000000620400000000000026372 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.cmd.sanity import checks from neutron.conf.agent import dhcp as dhcp_conf from neutron.tests.functional import base class SanityTestCase(base.BaseLoggingTestCase): """Sanity checks that do not require root access. Tests that just call checks.some_function() are to ensure that neutron-sanity-check runs without throwing an exception, as in the case where someone modifies the API without updating the check script. """ def setUp(self): super(SanityTestCase, self).setUp() # needed for test_dnsmasq_version() cfg.CONF.register_opts(dhcp_conf.DNSMASQ_OPTS) def test_nova_notify_runs(self): checks.nova_notify_supported() def test_dnsmasq_version(self): checks.dnsmasq_version_supported() def test_dibbler_version(self): checks.dibbler_version_supported() def test_ipset_support(self): checks.ipset_supported() def test_ip6tables_support(self): checks.ip6tables_supported() class SanityTestCaseRoot(base.BaseSudoTestCase): """Sanity checks that require root access. Tests that just call checks.some_function() are to ensure that neutron-sanity-check runs without throwing an exception, as in the case where someone modifies the API without updating the check script. """ def test_ovs_vxlan_support_runs(self): checks.ovs_vxlan_supported() def test_ovs_geneve_support_runs(self): checks.ovs_geneve_supported() def test_iproute2_vxlan_support_runs(self): checks.iproute2_vxlan_supported() def test_ovs_patch_support_runs(self): checks.patch_supported() def test_arp_responder_runs(self): checks.arp_responder_supported() def test_arp_header_match_runs(self): checks.arp_header_match_supported() def test_icmpv6_header_match_runs(self): checks.icmpv6_header_match_supported() def test_vf_management_runs(self): checks.vf_management_supported() def test_vf_extended_management_runs(self): checks.vf_extended_management_supported() def test_namespace_root_read_detection_runs(self): checks.netns_read_requires_helper() def test_ovsdb_native_supported_runs(self): checks.ovsdb_native_supported() def test_keepalived_ipv6_support(self): checks.keepalived_ipv6_supported() def test_bridge_firewalling_enabled(self): checks.bridge_firewalling_enabled() def test_ip_nonlocal_bind(self): checks.ip_nonlocal_bind() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/scheduler/0000755000175000017500000000000000000000000024117 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/scheduler/__init__.py0000644000175000017500000000000000000000000026216 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py0000644000175000017500000006163700000000000031677 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from operator import attrgetter from neutron_lib.api.definitions import provider_net as providernet from neutron_lib import constants from neutron_lib import context from oslo_utils import uuidutils import testscenarios from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.objects import network from neutron.scheduler import dhcp_agent_scheduler from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit.scheduler import (test_dhcp_agent_scheduler as test_dhcp_sch) # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class BaseTestScheduleNetwork(object): """Base class which defines scenarios for schedulers. agent_count Number of dhcp agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. scheduled_agent_count Number of agents the network has previously scheduled down_agent_count Number of dhcp agents which are down expected_scheduled_agent_count Number of scheduled agents the schedule() should return or 'None' if the schedule() cannot schedule the network. """ scenarios = [ ('No agents scheduled if no agents are present', dict(agent_count=0, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=0, expected_scheduled_agent_count=None)), ('No agents scheduled if network already hosted and' ' max_agents_per_network reached', dict(agent_count=1, max_agents_per_network=1, scheduled_agent_count=1, down_agent_count=0, expected_scheduled_agent_count=None)), ('No agents scheduled if all agents are down', dict(agent_count=2, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=2, expected_scheduled_agent_count=None)), ('Agent scheduled to the network if network is not yet hosted', dict(agent_count=1, max_agents_per_network=1, scheduled_agent_count=0, down_agent_count=0, expected_scheduled_agent_count=1)), ('Additional Agents scheduled to the network if max_agents_per_network' ' is not yet reached', dict(agent_count=3, max_agents_per_network=3, scheduled_agent_count=1, down_agent_count=0, expected_scheduled_agent_count=2)), ('No agent scheduled if agent is dead', dict(agent_count=3, max_agents_per_network=3, scheduled_agent_count=1, down_agent_count=1, expected_scheduled_agent_count=1)), ] class TestChanceScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, BaseTestScheduleNetwork): """Test various scenarios for ChanceScheduler.schedule.""" def test_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() # create dhcp agents hosts = ['host-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count) active_agents = dhcp_agents[self.down_agent_count:] # schedule some agents before calling schedule if self.scheduled_agent_count: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count] scheduler.resource_filter.bind(self.ctx, schedule_agents, self.network_id) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) if self.expected_scheduled_agent_count: self.assertEqual(self.expected_scheduled_agent_count, len(actual_scheduled_agents)) hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) self.assertEqual(self.scheduled_agent_count + len(actual_scheduled_agents), len(hosted_agents['agents'])) else: self.assertEqual([], actual_scheduled_agents) class TestWeightScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin, BaseTestScheduleNetwork): """Test various scenarios for WeightScheduler.schedule.""" def test_weight_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.WeightScheduler() # create dhcp agents hosts = ['host-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count) active_agents = dhcp_agents[self.down_agent_count:] unscheduled_active_agents = list(active_agents) # schedule some agents before calling schedule if self.scheduled_agent_count: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count] scheduler.resource_filter.bind(self.ctx, schedule_agents, self.network_id) for agent in schedule_agents: unscheduled_active_agents.remove(agent) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) if self.expected_scheduled_agent_count: sorted_unscheduled_active_agents = sorted( unscheduled_active_agents, key=attrgetter('load'))[0:self.expected_scheduled_agent_count] self.assertItemsEqual( (agent['id'] for agent in actual_scheduled_agents), (agent['id'] for agent in sorted_unscheduled_active_agents)) self.assertEqual(self.expected_scheduled_agent_count, len(actual_scheduled_agents)) hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) self.assertEqual(self.scheduled_agent_count + len(actual_scheduled_agents), len(hosted_agents['agents'])) else: self.assertEqual([], actual_scheduled_agents) class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin): """Test various scenarios for ChanceScheduler.auto_schedule_networks. Below is the brief description of the scenario variables -------------------------------------------------------- agent_count number of DHCP agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. network_count Number of networks. networks_with_dhcp_disabled List of networks with dhcp disabled hosted_networks A mapping of agent id to the ids of the networks that they should be initially hosting. expected_auto_schedule_return_value Expected return value of 'auto_schedule_networks'. expected_hosted_networks This stores the expected networks that should have been scheduled (or that could have already been scheduled) for each agent after the 'auto_schedule_networks' function is called. no_network_with_az_match If this parameter is True, there is no unscheduled network with availability_zone_hints matches to an availability_zone of agents to be scheduled. The default is False. """ scenarios = [ ('Agent scheduled to the network if network is not yet hosted', dict(agent_count=1, max_agents_per_network=1, network_count=1, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0']})), ('No agent scheduled if no networks are present', dict(agent_count=1, max_agents_per_network=1, network_count=0, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=False, expected_hosted_networks={'agent-0': []})), ('Agents scheduled to the networks if networks are not yet hosted', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']})), ('No new agents scheduled if networks are already hosted', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0', 'network-1']})), ('Additional agents scheduled to the networks if' ' max_agents_per_network is not yet reached', dict(agent_count=4, max_agents_per_network=3, network_count=4, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0', 'network-1'], 'agent-1': ['network-0'], 'agent-2': ['network-2'], 'agent-3': ['network-0', 'network-2']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0', 'network-1', 'network-2', 'network-3'], 'agent-1': ['network-0', 'network-1', 'network-3'], 'agent-2': ['network-1', 'network-2', 'network-3'], 'agent-3': ['network-0', 'network-2']})), ('No agents scheduled if networks already hosted and' ' max_agents_per_network reached', dict(agent_count=4, max_agents_per_network=1, network_count=4, networks_with_dhcp_disabled=[], hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-2'], 'agent-2': ['network-1'], 'agent-3': ['network-3']}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-2'], 'agent-2': ['network-1'], 'agent-3': ['network-3']})), ('No agents scheduled to the network with dhcp disabled', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=['network-1'], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': ['network-0'], 'agent-1': ['network-0']})), ('No agents scheduled if all networks have dhcp disabled', dict(agent_count=2, max_agents_per_network=3, network_count=2, networks_with_dhcp_disabled=['network-0', 'network-1'], hosted_networks={}, expected_auto_schedule_return_value=False, expected_hosted_networks={'agent-0': [], 'agent-1': []})), ('No agents scheduled if unscheduled network does not match AZ', dict(agent_count=1, max_agents_per_network=1, network_count=1, networks_with_dhcp_disabled=[], hosted_networks={}, expected_auto_schedule_return_value=True, expected_hosted_networks={'agent-0': []}, no_network_with_az_match=True)), ] def _extract_index(self, name): """Extracts the index number and returns. Eg. if name = 'agent-3', then 3 is returned """ return int(name.split('-')[-1]) def get_subnets(self, context, fields=None): subnets = [] for net in self._networks: enable_dhcp = (net['name'] not in self.networks_with_dhcp_disabled) subnets.append({'network_id': net.id, 'enable_dhcp': enable_dhcp, 'segment_id': None}) return subnets def get_network(self, context, net_id): az_hints = [] if getattr(self, 'no_network_with_az_match', False): az_hints = ['not-match'] return {'availability_zone_hints': az_hints} def _get_hosted_networks_on_dhcp_agent(self, agent_id): binding_objs = network.NetworkDhcpAgentBinding.get_objects( self.ctx, dhcp_agent_id=agent_id) return [item.network_id for item in binding_objs] def test_auto_schedule(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() self.ctx = context.get_admin_context() # create dhcp agents hosts = ['agent-%s' % i for i in range(self.agent_count)] dhcp_agents = self._create_and_set_agents_down(hosts) # create networks self._networks = [ network.Network( self.ctx, id=uuidutils.generate_uuid(), name='network-%s' % i) for i in range(self.network_count) ] for i in range(len(self._networks)): self._networks[i].create() network_ids = [net.id for net in self._networks] # pre schedule the networks to the agents defined in # self.hosted_networks before calling auto_schedule_network for agent, networks in self.hosted_networks.items(): agent_index = self._extract_index(agent) for net in networks: net_index = self._extract_index(net) scheduler.resource_filter.bind(self.ctx, [dhcp_agents[agent_index]], network_ids[net_index]) for host_index in range(self.agent_count): msg = 'host_index = %s' % host_index retval = scheduler.auto_schedule_networks(self, self.ctx, hosts[host_index]) self.assertEqual(self.expected_auto_schedule_return_value, retval, message=msg) agent_id = dhcp_agents[host_index].id hosted_net_ids = self._get_hosted_networks_on_dhcp_agent(agent_id) hosted_net_names = [ net['name'] for net in network.Network.get_objects(self.ctx, id=hosted_net_ids)] expected_hosted_networks = self.expected_hosted_networks[ 'agent-%s' % host_index] self.assertItemsEqual( hosted_net_names, expected_hosted_networks, msg) class TestAZAwareWeightScheduler(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agentschedulers_db.DhcpAgentSchedulerDbMixin, agents_db.AgentDbMixin): """Test various scenarios for AZAwareWeightScheduler.schedule. az_count Number of AZs. network_az_hints Number of AZs in availability_zone_hints of the network. agent_count[each az] Number of dhcp agents (also number of hosts). max_agents_per_network Maximum DHCP Agents that can be scheduled for a network. scheduled_agent_count[each az] Number of agents the network has previously scheduled down_agent_count[each az] Number of dhcp agents which are down expected_scheduled_agent_count[each az] Number of scheduled agents the schedule() should return or 'None' if the schedule() cannot schedule the network. """ scenarios = [ ('Single hint, Single agent, Scheduled an agent of the specified AZ', dict(az_count=2, network_az_hints=1, agent_count=[1, 1], max_agents_per_network=1, scheduled_agent_count=[0, 0], down_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('Multi hints, Multi agents Scheduled agents of the specified AZs', dict(az_count=3, network_az_hints=2, agent_count=[1, 1, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 0])), ('Single hint, Multi agents, Scheduled agents of the specified AZ', dict(az_count=2, network_az_hints=1, agent_count=[2, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0], down_agent_count=[0, 0], expected_scheduled_agent_count=[2, 0])), ('Multi hints, Multi agents, Only single AZ available', dict(az_count=2, network_az_hints=2, agent_count=[2, 1], max_agents_per_network=2, scheduled_agent_count=[0, 0], down_agent_count=[0, 1], expected_scheduled_agent_count=[2, 0])), ('Multi hints, Multi agents, Not enough agents', dict(az_count=3, network_az_hints=3, agent_count=[1, 1, 1], max_agents_per_network=3, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 1, 0], expected_scheduled_agent_count=[1, 0, 1])), ('Multi hints, Multi agents, Partially scheduled, Another AZ selected', dict(az_count=3, network_az_hints=2, agent_count=[1, 1, 1], max_agents_per_network=2, scheduled_agent_count=[1, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 1, 0])), ('No hint, Scheduled independent to AZ', dict(az_count=3, network_az_hints=0, agent_count=[1, 1, 1], max_agents_per_network=3, scheduled_agent_count=[0, 0, 0], down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 1])), ] def _set_network_az_hints(self): self.network['availability_zone_hints'] = [] for i in range(self.network_az_hints): self.network['availability_zone_hints'].append('az%s' % i) def test_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.AZAwareWeightScheduler() self._set_network_az_hints() # create dhcp agents for i in range(self.az_count): az = 'az%s' % i hosts = ['%s-host-%s' % (az, j) for j in range(self.agent_count[i])] dhcp_agents = self._create_and_set_agents_down( hosts, down_agent_count=self.down_agent_count[i], az=az) active_agents = dhcp_agents[self.down_agent_count[i]:] # schedule some agents before calling schedule if self.scheduled_agent_count[i]: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count[i]] scheduler.resource_filter.bind( self.ctx, schedule_agents, self.network_id) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) scheduled_azs = collections.defaultdict(int) for agent in actual_scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 hosted_agents = self.list_dhcp_agents_hosting_network( self.ctx, self.network_id) hosted_azs = collections.defaultdict(int) for agent in hosted_agents['agents']: hosted_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) self.assertEqual(self.scheduled_agent_count[i] + scheduled_azs.get('az%s' % i, 0), hosted_azs.get('az%s' % i, 0)) class TestDHCPSchedulerWithNetworkAccessibility( test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch'] def test_dhcp_scheduler_filters_hosts_without_network_access(self): dhcp_agent1 = helpers.register_dhcp_agent(host='host1') dhcp_agent2 = helpers.register_dhcp_agent(host='host2') dhcp_agent3 = helpers.register_dhcp_agent(host='host3') dhcp_agents = [dhcp_agent1, dhcp_agent2, dhcp_agent3] helpers.register_ovs_agent( host='host1', bridge_mappings={'physnet1': 'br-eth-1'}) helpers.register_ovs_agent( host='host2', bridge_mappings={'physnet2': 'br-eth-1'}) helpers.register_ovs_agent( host='host3', bridge_mappings={'physnet2': 'br-eth-1'}) admin_context = context.get_admin_context() net = self.driver.create_network( admin_context, {'network': {'name': 'net1', providernet.NETWORK_TYPE: 'vlan', providernet.PHYSICAL_NETWORK: 'physnet1', providernet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one', 'admin_state_up': True, 'shared': True}}) self.driver.create_subnet( admin_context, {'subnet': {'name': 'name', 'ip_version': constants.IP_VERSION_4, 'network_id': net['id'], 'cidr': '10.0.0.0/24', 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'tenant_id': 'tenant_one', 'enable_dhcp': True}}) self.plugin.schedule_network(admin_context, net) dhcp_agents = self.driver.get_dhcp_agents_hosting_networks( admin_context, [net['id']]) self.assertEqual(1, len(dhcp_agents)) self.assertEqual('host1', dhcp_agents[0]['host']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py0000644000175000017500000007523000000000000031271 0ustar00coreycorey00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import random from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_utils import uuidutils import testscenarios from neutron.objects import network as net_obj from neutron.scheduler import l3_agent_scheduler from neutron.services.l3_router import l3_router_plugin from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios class L3SchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Base class for functional test of L3 schedulers. Provides basic setup and utility functions. """ def setUp(self): super(L3SchedulerBaseTest, self).setUp(PLUGIN_NAME) self.l3_plugin = l3_router_plugin.L3RouterPlugin() directory.add_plugin(plugin_constants.L3, self.l3_plugin) self.adminContext = context.get_admin_context() self.adminContext.tenant_id = _uuid() def _create_l3_agent(self, host, context, agent_mode='legacy', state=True, ext_net_id=''): agent = helpers.register_l3_agent(host, agent_mode) helpers.set_agent_admin_state(agent.id, state) return agent def _create_router(self, name): router = {'name': name, 'admin_state_up': True, 'tenant_id': self.adminContext.tenant_id} return self.l3_plugin.create_router( self.adminContext, {'router': router}) def _create_legacy_agents(self, agent_count, down_agent_count): # Creates legacy l3 agents and sets admin state based on # down agent count. self.hosts = ['host-%s' % i for i in range(agent_count)] self.l3_agents = [self._create_l3_agent(self.hosts[i], self.adminContext, 'legacy', (i >= down_agent_count)) for i in range(agent_count)] def _create_routers(self, scheduled_router_count, expected_scheduled_router_count): routers = [] if (scheduled_router_count + expected_scheduled_router_count): for i in range(scheduled_router_count + expected_scheduled_router_count): router = self._create_router('schd_rtr' + str(i)) routers.append(router) else: # create at least one router to test scheduling routers.append(self._create_router('schd_rtr0')) return routers def _pre_scheduler_routers(self, scheduler, count): hosting_agents = [] # schedule routers before calling schedule: for i in range(count): router = self.routers[i] agent = random.choice(self.l3_agents) scheduler.bind_router(self.l3_plugin, self.adminContext, router['id'], agent.id) hosting_agents.append(agent) return hosting_agents def _test_auto_schedule(self, expected_count): router_ids = [rtr['id'] for rtr in self.routers] hosting_before = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, router_ids) # Try scheduling on each host for host in self.hosts: self.scheduler.auto_schedule_routers( self.l3_plugin, self.adminContext, host) hosting_after = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, router_ids) if expected_count: self.assertNotEqual(hosting_before, hosting_after, 'Failed to schedule agent') else: self.assertEqual(hosting_before, hosting_after, 'Agent scheduled, not expected') class L3ChanceSchedulerTestCase(L3SchedulerBaseTest): """Test various scenarios for chance scheduler. agent_count Number of l3 agents (also number of hosts). down_agent_count Number of l3 agents which are down. scheduled_router_count Number of routers that have been previously scheduled. expected_scheduled_router_count Number of newly scheduled routers. """ scenarios = [ ('No routers scheduled if no agents are present', dict(agent_count=0, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=0)), ('No routers scheduled if it is already hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=0)), ('No routers scheduled if all agents are down', dict(agent_count=2, down_agent_count=2, scheduled_router_count=0, expected_scheduled_router_count=0)), ('Router scheduled to the agent if router is not yet hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=1)), ('Router scheduled to the agent even if it already hosts a router', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ] def setUp(self): super(L3ChanceSchedulerTestCase, self).setUp() self._create_legacy_agents(self.agent_count, self.down_agent_count) self.routers = self._create_routers(self.scheduled_router_count, self.expected_scheduled_router_count) self.scheduler = l3_agent_scheduler.ChanceScheduler() def test_chance_schedule_router(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # schedule: actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.routers[-1]['id']) if self.expected_scheduled_router_count: self.assertIsNotNone(actual_scheduled_agent, message='Failed to schedule agent') else: self.assertIsNone(actual_scheduled_agent, message='Agent scheduled but not expected') def test_auto_schedule_routers(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # The test self._test_auto_schedule(self.expected_scheduled_router_count) class L3LeastRoutersSchedulerTestCase(L3SchedulerBaseTest): """Test various scenarios for least router scheduler. agent_count Number of l3 agents (also number of hosts). down_agent_count Number of l3 agents which are down. scheduled_router_count Number of routers that have been previously scheduled expected_scheduled_router_count Number of newly scheduled routers """ scenarios = [ ('No routers scheduled if no agents are present', dict(agent_count=0, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=0)), ('No routers scheduled if it is already hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ('No routers scheduled if all agents are down', dict(agent_count=2, down_agent_count=2, scheduled_router_count=0, expected_scheduled_router_count=0)), ('Router scheduled to the agent if router is not yet hosted', dict(agent_count=1, down_agent_count=0, scheduled_router_count=0, expected_scheduled_router_count=1)), ('Router scheduled to the agent even if it already hosts a router', dict(agent_count=1, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ('Router is scheduled to agent hosting least routers', dict(agent_count=2, down_agent_count=0, scheduled_router_count=1, expected_scheduled_router_count=1)), ] def setUp(self): super(L3LeastRoutersSchedulerTestCase, self).setUp() self._create_legacy_agents(self.agent_count, self.down_agent_count) self.routers = self._create_routers(self.scheduled_router_count, self.expected_scheduled_router_count) self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() def test_least_routers_schedule(self): # Pre schedule routers hosting_agents = self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.routers[-1]['id']) if self.expected_scheduled_router_count: # For case where there is just one agent: if self.agent_count == 1: self.assertEqual(actual_scheduled_agent.id, self.l3_agents[0].id) else: self.assertNotIn(actual_scheduled_agent.id, [x.id for x in hosting_agents], message='The expected agent was not scheduled') else: self.assertIsNone(actual_scheduled_agent, message='Expected no agent to be scheduled,' ' but it got scheduled') def test_auto_schedule_routers(self): # Pre schedule routers self._pre_scheduler_routers(self.scheduler, self.scheduled_router_count) # The test self._test_auto_schedule(self.expected_scheduled_router_count) class L3AZSchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): super(L3AZSchedulerBaseTest, self).setUp(plugin='ml2') self.l3_plugin = l3_router_plugin.L3RouterPlugin() directory.add_plugin(plugin_constants.L3, self.l3_plugin) self.l3_plugin.router_scheduler = None directory.add_plugin(plugin_constants.L3, self.l3_plugin) self.adminContext = context.get_admin_context() self.adminContext.tenant_id = '_func_test_tenant_' def _create_l3_agent(self, host, context, agent_mode='legacy', plugin=None, state=True, az='nova'): agent = helpers.register_l3_agent(host, agent_mode, az=az) helpers.set_agent_admin_state(agent.id, state) return agent def _create_legacy_agents(self, agent_count, down_agent_count, az): # Creates legacy l3 agents and sets admin state based on # down agent count. hosts = ['%s-host-%s' % (az, i) for i in range(agent_count)] l3_agents = [ self._create_l3_agent(hosts[i], self.adminContext, 'legacy', self.l3_plugin, (i >= down_agent_count), az=az) for i in range(agent_count)] return l3_agents def _create_router(self, az_hints, ha): router = {'name': 'router1', 'admin_state_up': True, 'availability_zone_hints': az_hints, 'tenant_id': self._tenant_id} if ha: router['ha'] = True return self.l3_plugin.create_router( self.adminContext, {'router': router}) class L3AZLeastRoutersSchedulerTestCase(L3AZSchedulerBaseTest): """Test various scenarios for AZ router scheduler. az_count Number of AZs. router_az_hints Number of AZs in availability_zone_hints of the router. agent_count[each az] Number of l3 agents (also number of hosts). max_l3_agents_per_router Maximum number of agents on which a router will be scheduled. 0 means test for regular router. down_agent_count[each az] Number of l3 agents which are down. expected_scheduled_agent_count[each az] Number of newly scheduled l3 agents. """ scenarios = [ ('Regular router, Scheduled specified AZ', dict(az_count=2, router_az_hints=1, agent_count=[1, 1], max_l3_agents_per_router=0, down_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('HA router, Scheduled specified AZs', dict(az_count=3, router_az_hints=2, agent_count=[1, 1, 1], max_l3_agents_per_router=2, down_agent_count=[0, 0, 0], expected_scheduled_agent_count=[1, 1, 0])), ('HA router, max_l3_agents_per_routers > az_hints', dict(az_count=2, router_az_hints=2, agent_count=[2, 1], max_l3_agents_per_router=3, down_agent_count=[0, 0], expected_scheduled_agent_count=[2, 1])), ] def setUp(self): super(L3AZLeastRoutersSchedulerTestCase, self).setUp() self.scheduler = l3_agent_scheduler.AZLeastRoutersScheduler() self.l3_plugin.router_scheduler = self.scheduler def test_schedule_router(self): ha = False if self.max_l3_agents_per_router: self.config(max_l3_agents_per_router=self.max_l3_agents_per_router) ha = True # create l3 agents for i in range(self.az_count): az = 'az%s' % i self._create_legacy_agents(self.agent_count[i], self.down_agent_count[i], az) # create router. # note that ha-router needs enough agents beforehand. az_hints = ['az%s' % i for i in range(self.router_az_hints)] router = self._create_router(az_hints, ha) self.scheduler.schedule(self.l3_plugin, self.adminContext, router['id']) # schedule returns only one agent. so get all agents scheduled. scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) scheduled_azs = collections.defaultdict(int) for agent in scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) class L3AZAutoScheduleTestCaseBase(L3AZSchedulerBaseTest): """Test various scenarios for AZ router scheduler. az_count Number of AZs. router_az_hints Number of AZs in availability_zone_hints of the router. agent_az AZ of newly activated l3 agent. agent_count[each az] Number of l3 agents (also number of hosts). max_l3_agents_per_router Maximum number of agents on which a router will be scheduled. 0 means test for regular router. down_agent_count[each az] Number of l3 agents which are down. scheduled_agent_count[each az] Number of l3 agents that have been previously scheduled expected_scheduled_agent_count[each az] Number of newly scheduled l3 agents """ scenarios = [ ('Regular router, not scheduled, agent in specified AZ activated', dict(az_count=2, router_az_hints=1, agent_az='az0', agent_count=[1, 1], max_l3_agents_per_router=0, down_agent_count=[1, 1], scheduled_agent_count=[0, 0], expected_scheduled_agent_count=[1, 0])), ('Regular router, not scheduled, agent not in specified AZ activated', dict(az_count=2, router_az_hints=1, agent_az='az1', agent_count=[1, 1], max_l3_agents_per_router=0, down_agent_count=[1, 1], scheduled_agent_count=[0, 0], expected_scheduled_agent_count=[0, 0])), ('HA router, not scheduled, agent in specified AZ activated', dict(az_count=3, router_az_hints=2, agent_az='az1', agent_count=[1, 1, 1], max_l3_agents_per_router=2, down_agent_count=[0, 1, 0], scheduled_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 1, 0])), ('HA router, not scheduled, agent not in specified AZ activated', dict(az_count=3, router_az_hints=2, agent_az='az2', agent_count=[1, 1, 1], max_l3_agents_per_router=2, down_agent_count=[0, 0, 1], scheduled_agent_count=[0, 0, 0], expected_scheduled_agent_count=[0, 0, 0])), ] def test_auto_schedule_router(self): scheduler = l3_agent_scheduler.AZLeastRoutersScheduler() ha = False if self.max_l3_agents_per_router: self.config(max_l3_agents_per_router=self.max_l3_agents_per_router) ha = True # create l3 agents l3_agents = {} for i in range(self.az_count): az = 'az%s' % i l3_agents[az] = self._create_legacy_agents( self.agent_count[i], self.down_agent_count[i], az) # create router. # note that ha-router needs enough agents beforehand. az_hints = ['az%s' % i for i in range(self.router_az_hints)] router = self._create_router(az_hints, ha) # schedule some agents before calling auto schedule for i in range(self.az_count): az = 'az%s' % i for j in range(self.scheduled_agent_count[i]): agent = l3_agents[az][j + self.down_agent_count[i]] scheduler.bind_router(self.l3_plugin, self.adminContext, router['id'], agent.id) # activate down agent and call auto_schedule_routers activate_agent = l3_agents[self.agent_az][0] helpers.set_agent_admin_state(activate_agent['id'], admin_state_up=True) scheduler.auto_schedule_routers(self.l3_plugin, self.adminContext, activate_agent['host']) scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) scheduled_azs = collections.defaultdict(int) for agent in scheduled_agents: scheduled_azs[agent['availability_zone']] += 1 for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) class L3DVRSchedulerBaseTest(L3SchedulerBaseTest): """Base class for functional test of DVR L3 schedulers. Provides basic setup and utility functions. """ def setUp(self): super(L3DVRSchedulerBaseTest, self).setUp() self.default_ext_net_id = _uuid() self.default_ext_subnet_id = _uuid() self.router_ext_net_id = _uuid() self.router_ext_subnet_id = _uuid() def _create_router(self, name, distributed, ext_net_id=None): router = {'name': name, 'admin_state_up': True, 'tenant_id': self.adminContext.tenant_id, 'distributed': distributed} if ext_net_id: router['external_gateway_info'] = {'network_id': ext_net_id} return self.l3_plugin.create_router(self.adminContext, {'router': router}) def _create_network(self, net_id, name=None, external=False): network_dict = {'tenant_id': self.adminContext.tenant_id, 'id': net_id, 'name': name, 'admin_state_up': True, 'shared': False, 'status': constants.NET_STATUS_ACTIVE} network = self.plugin.create_network(self.adminContext, {'network': network_dict}) if external: network = net_obj.ExternalNetwork( self.adminContext, network_id=net_id) network.create() return network def _create_subnet(self, sub_id, network_id, cidr, gw_ip, name='test_sub'): subnet = {'tenant_id': self.adminContext.tenant_id, 'id': sub_id, 'name': name, 'network_id': network_id, 'ip_version': constants.IP_VERSION_4, 'cidr': cidr, 'enable_dhcp': False, 'gateway_ip': gw_ip, 'shared': False, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED} return self.plugin.create_subnet(self.adminContext, {'subnet': subnet}) class L3DVRSchedulerTestCase(L3DVRSchedulerBaseTest): """Test various scenarios for L3 DVR schedulers: agent_mode L3 agent mode. second_agent_mode Second L3 agent mode for scenarios with two agents. agent_has_ext_network Is there external network on the host. router_is_distributed Is router distributed. router_already_hosted Is router already hosted. router_has_ext_gw Does router have external gateway. router_agent_have_same_ext_net Do router and agent have the same external network. expected_router_scheduled To verify do we expect router to get scheduled. """ def get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, second_agent_mode=None, agent_has_ext_network=False, router_is_distributed=False, router_already_hosted=False, router_has_ext_gw=False, router_agent_have_same_ext_net=False, expected_router_scheduled=False): return dict(agent_mode=agent_mode, second_agent_mode=second_agent_mode, agent_has_ext_network=agent_has_ext_network, router_is_distributed=router_is_distributed, router_already_hosted=router_already_hosted, router_has_ext_gw=router_has_ext_gw, router_agent_have_same_ext_net=router_agent_have_same_ext_net, expected_router_scheduled=expected_router_scheduled) scenarios = [ ('Legacy router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR)), ('Legacy router scheduled on dvr_snat agent', get_scenario(expected_router_scheduled=True)), ('Distributed router not scheduled on legacy agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, router_is_distributed=True)), ('Distributed router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_is_distributed=True)), ('Distributed router scheduled on dvr_snat agent', get_scenario(router_is_distributed=True, expected_router_scheduled=True)), ('Already hosted legacy router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_already_hosted=True)), ('Already hosted legacy router not scheduled on dvr_snat agent', get_scenario(router_already_hosted=True)), ('Already hosted distributed router not scheduled on legacy agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, router_already_hosted=True, router_is_distributed=True)), ('Already hosted distributed router not scheduled on dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, router_is_distributed=True, router_already_hosted=True)), ('Already hosted distributed router not scheduled on dvr_snat agent', get_scenario(router_is_distributed=True, router_already_hosted=True)), ('Already hosted legacy router not scheduled on additional dvr agent', get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, second_agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, router_already_hosted=True)), ('Distributed router not scheduled if it is on a different ' 'external network than the dvr_snat agent', get_scenario(agent_has_ext_network=True, router_is_distributed=True, router_has_ext_gw=True, router_agent_have_same_ext_net=False)), ] def setUp(self): super(L3DVRSchedulerTestCase, self).setUp() agent_cnt = 2 if self.second_agent_mode else 1 # create hosts for each agent self.hosts = ['host-%s' % i for i in range(agent_cnt)] # create default external network self._create_network(self.default_ext_net_id, name='_test-ext-net', external=True) self._create_subnet(self.default_ext_subnet_id, self.default_ext_net_id, '10.10.9.0/24', '10.10.9.1', '_test-ext-net-subnet') if self.router_has_ext_gw and not self.router_agent_have_same_ext_net: # for the test cases in which router and agent are not on same # external network, we create an external network for router self._create_network(self.router_ext_net_id, name='_test-ext-net2', external=True) self._create_subnet(self.router_ext_subnet_id, self.router_ext_net_id, '10.10.8.0/24', '10.10.8.1', '_test-ext-net2-subnet') # create agents: self.l3_agents = [self._create_l3_agent(self.hosts[0], self.adminContext, self.agent_mode, True, self.default_ext_net_id if self.agent_has_ext_network else '')] if self.second_agent_mode: self.l3_agents.append(self._create_l3_agent(self.hosts[1], self.adminContext, self.second_agent_mode, True, self.default_ext_net_id if self.agent_has_ext_network else '')) # The router to schedule: self.router_to_schedule = self._create_router_to_schedule() def _create_router_to_schedule(self): router_to_schedule = None if self.router_has_ext_gw: if self.router_agent_have_same_ext_net: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed, self.default_ext_net_id) else: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed, self.router_ext_net_id) else: router_to_schedule = self._create_router('schd_rtr', self.router_is_distributed) return router_to_schedule def _test_schedule_router(self): if self.router_already_hosted: self.scheduler.bind_router(self.l3_plugin, self.adminContext, self.router_to_schedule['id'], self.l3_agents[0].id) # schedule: actual_scheduled_agent = self.scheduler.schedule( self.l3_plugin, self.adminContext, self.router_to_schedule['id']) # check for router scheduling: self.assertEqual(self.expected_router_scheduled, bool(actual_scheduled_agent), message='Failed to schedule agent') def _test_auto_schedule_routers(self): if self.router_already_hosted: self.scheduler.bind_router(self.l3_plugin, self.adminContext, self.router_to_schedule['id'], self.l3_agents[0].id) # schedule: hosting_before = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [self.router_to_schedule['id']]) for host in self.hosts: self.scheduler.auto_schedule_routers( self.l3_plugin, self.adminContext, host) hosting_after = self.l3_plugin.get_l3_agents_hosting_routers( self.adminContext, [self.router_to_schedule['id']]) if self.router_already_hosted: self.assertEqual(hosting_before, hosting_after, 'Agent pre scheduled, yet no binding found!') elif self.expected_router_scheduled: self.assertNotEqual(hosting_before, hosting_after, 'Agent not scheduled, not expected') else: self.assertEqual(hosting_before, hosting_after, 'Agent scheduled, not expected') def test_least_routers_schedule_router(self): self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() self._test_schedule_router() def test_least_routers_auto_schedule_routers(self): self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() self._test_auto_schedule_routers() def test_chance_schedule_router(self): self.scheduler = l3_agent_scheduler.ChanceScheduler() self._test_schedule_router() def test_chance_auto_schedule_routers(self): self.scheduler = l3_agent_scheduler.ChanceScheduler() self._test_auto_schedule_routers() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/0000755000175000017500000000000000000000000023764 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/__init__.py0000644000175000017500000000000000000000000026063 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/conntrack_helper/0000755000175000017500000000000000000000000027305 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/conntrack_helper/__init__.py0000644000175000017500000000000000000000000031404 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/conntrack_helper/test_conntrack_helper.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/conntrack_helper/test_conntrack_helper.p0000644000175000017500000001217700000000000034056 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import l3_conntrack_helper as apidef from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import l3 as lib_l3_exc from neutron_lib.plugins import directory from oslo_utils import uuidutils from neutron.services.conntrack_helper.common import exceptions as cth_exc from neutron.services.conntrack_helper import plugin as cth_plugin from neutron.tests.functional import base as functional_base from neutron.tests.unit.plugins.ml2 import base as ml2_test_base INVALID_ID = uuidutils.generate_uuid() class ConntrackHelperTestCase(ml2_test_base.ML2TestFramework, functional_base.BaseLoggingTestCase): def setUp(self): super(ConntrackHelperTestCase, self).setUp() self.cth_plugin = cth_plugin.Plugin() directory.add_plugin("CONNTRACKHELPER", self.cth_plugin) self.router = self._create_router(distributed=True) self.conntack_helper = { apidef.RESOURCE_NAME: {apidef.PROTOCOL: 'udp', apidef.PORT: 69, apidef.HELPER: 'tftp'} } def test_create_conntrack_helper(self): res = self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], self.conntack_helper) expected = { 'id': mock.ANY, 'protocol': 'udp', 'port': 69, 'helper': 'tftp', 'router_id': self.router['id'] } self.assertEqual(expected, res) def test_negative_duplicate_create_conntrack_helper(self): self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], self.conntack_helper) self.assertRaises(lib_exc.BadRequest, self.cth_plugin.create_router_conntrack_helper, self.context, self.router['id'], self.conntack_helper) def test_negative_create_conntrack_helper(self): self.assertRaises(lib_l3_exc.RouterNotFound, self.cth_plugin.create_router_conntrack_helper, self.context, INVALID_ID, self.conntack_helper) def test_update_conntrack_helper(self): res = self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], self.conntack_helper) new_conntack_helper = { apidef.RESOURCE_NAME: {apidef.PROTOCOL: 'udp', apidef.PORT: 6969, apidef.HELPER: 'tftp'} } update = self.cth_plugin.update_router_conntrack_helper( self.context, res['id'], self.router['id'], new_conntack_helper) expected = { 'id': res['id'], 'protocol': 'udp', 'port': 6969, 'helper': 'tftp', 'router_id': self.router['id'] } self.assertEqual(expected, update) def test_negative_update_conntrack_helper(self): self.assertRaises(cth_exc.ConntrackHelperNotFound, self.cth_plugin.update_router_conntrack_helper, self.context, INVALID_ID, self.router['id'], {}) def test_negative_duplicate_update_conntrack_helper(self): self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], self.conntack_helper) new_conntack_helper = { apidef.RESOURCE_NAME: {apidef.PROTOCOL: 'udp', apidef.PORT: 6969, apidef.HELPER: 'tftp'} } res = self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], new_conntack_helper) new_conntack_helper[apidef.RESOURCE_NAME][apidef.PORT] = 69 self.assertRaises(lib_exc.BadRequest, self.cth_plugin.update_router_conntrack_helper, self.context, res['id'], self.router['id'], new_conntack_helper) def test_delete_conntrack_helper(self): res = self.cth_plugin.create_router_conntrack_helper( self.context, self.router['id'], self.conntack_helper) delete = self.cth_plugin.delete_router_conntrack_helper( self.context, res['id'], self.router['id']) self.assertIsNone(delete) def test_negative_delete_conntrack_helper(self): self.assertRaises(cth_exc.ConntrackHelperNotFound, self.cth_plugin.delete_router_conntrack_helper, self.context, INVALID_ID, self.router['id']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/l3_router/0000755000175000017500000000000000000000000025702 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/l3_router/__init__.py0000644000175000017500000000000000000000000030001 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.p0000644000175000017500000004664200000000000033755 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import topics from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron.tests.common import helpers from neutron.tests.functional.services.l3_router import \ test_l3_dvr_router_plugin DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase): def setUp(self): super(L3DvrHATestCase, self).setUp() self.l3_agent_2 = helpers.register_l3_agent( host="standby", agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, distributed=True, ha=True, admin_state_up=True): return (super(L3DvrHATestCase, self). _create_router(distributed=distributed, ha=ha, admin_state_up=admin_state_up)) def test_update_router_db_cvr_to_dvrha(self): router = self._create_router(distributed=False, ha=False, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True, 'ha': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_dvrha_to_cvr(self): router = self._create_router(distributed=True, ha=True, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': False, 'ha': False}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertFalse(router['distributed']) self.assertFalse(router['ha']) def test_update_router_db_dvrha_to_dvr(self): router = self._create_router(distributed=True, ha=True, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'admin_state_up': False}}) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True, 'ha': False}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertFalse(router['ha']) def test_update_router_db_dvrha_to_cvrha(self): router = self._create_router(distributed=True, ha=True, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': False, 'ha': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertFalse(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_dvr_to_dvrha(self): router = self._create_router(distributed=True, ha=False, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True, 'ha': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def test_update_router_db_cvrha_to_dvrha(self): router = self._create_router(distributed=False, ha=True, admin_state_up=False) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True, 'ha': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) def _assert_router_is_hosted_on_both_dvr_snat_agents(self, router): agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(2, len(agents['agents'])) dvr_snat_agents = self.l3_plugin.get_ha_router_port_bindings( self.context, [router['id']]) dvr_snat_agent_ids = [a.l3_agent_id for a in dvr_snat_agents] self.assertIn(self.l3_agent['id'], dvr_snat_agent_ids) self.assertIn(self.l3_agent_2['id'], dvr_snat_agent_ids) def test_router_notifications(self): """Check that notifications go to the right hosts in different conditions """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(distributed=True, ha=True) arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet, \ self.subnet(cidr='20.0.0.0/24') as subnet1, \ self.subnet(cidr='30.0.0.0/24') as subnet2, \ self.subnet(cidr='40.0.0.0/24') as subnet3, \ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}), \ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: HOST2}), \ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: HOST3}): # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client, 'prepare') as mock_prepare: # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) # router has no interfaces so notification goes # to only dvr_snat agents (self.l3_agent and self.l3_agent_2) self.assertEqual(2, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) self.assertEqual(4, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet3['subnet']['id']}) # there are no dvr serviceable ports on HOST3, so notification # goes to the same hosts self.assertEqual(4, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=self.l3_agent_2['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) def test_router_is_not_removed_from_snat_agent_on_interface_removal(self): """Check that dvr router is not removed from dvr_snat l3 agents on router interface removal """ router = self._create_router(distributed=True, ha=True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.subnet() as subnet, \ self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net, cidr='20.0.0.0/24'): gw_info = {'network_id': ext_net['network']['id']} self.l3_plugin.update_router( self.context, router['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self._assert_router_is_hosted_on_both_dvr_snat_agents(router) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self._assert_router_is_hosted_on_both_dvr_snat_agents(router) self.assertFalse(l3_notifier.router_removed_from_agent.called) def test_router_is_not_removed_from_snat_agent_on_dhcp_port_deletion(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on DHCP port removal """ router = self._create_router(distributed=True, ha=True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net), \ self.subnet(cidr='20.0.0.0/24') as subnet, \ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) gw_info = {'network_id': ext_net['network']['id']} self.l3_plugin.update_router( self.context, router['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) # router should be scheduled to both dvr_snat l3 agents self._assert_router_is_hosted_on_both_dvr_snat_agents(router) notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent', side_effect=Exception("BOOOOOOM!")) as remove_mock: self._delete('ports', port['port']['id']) # now when port is deleted the router still has external # gateway and should still be scheduled to the snat agent remove_mock.assert_not_called() self._assert_router_is_hosted_on_both_dvr_snat_agents(router) def _get_ha_interface_list_for_router(self, router): return self.l3_plugin.get_ha_router_port_bindings(self.context, [router['id']]) def _delete_router(self, router): self.l3_plugin.delete_router(self.context, router['id']) def _check_dvr_ha_interfaces_presence(self, rtr, int_cnt): self.assertEqual(int_cnt, len(self._get_ha_interface_list_for_router(rtr))) def _create_external_network(self): kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self._make_subnet( self.fmt, ext_net, '2001:db8::1', '2001:db8::/64', ip_version=constants.IP_VERSION_6, enable_dhcp=True) return ext_net def _set_external_gateway(self, router, ext_net): gw_info = {'network_id': ext_net['network']['id']} self.l3_plugin.update_router( self.context, router['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) def _clear_external_gateway(self, router): self.l3_plugin.update_router( self.context, router['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: {}}}) def _remove_interface_from_router(self, router, subnet): self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) def _check_snat_external_gateway_presence(self, ext_net, router, gw_count): ext_net_id = ext_net['network']['id'] gw_port = (self.l3_plugin._core_plugin. _get_router_gw_ports_by_network(self.context, ext_net_id)) self.assertEqual(gw_count, len(gw_port)) if gw_count > 1: self.assertEqual(router['id'], gw_port[0].device_id) def _check_snat_internal_gateways_presence(self, router, subnet, int_cnt): snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]) if int_cnt == 0: self.assertEqual(0, len(snat_router_intfs)) else: snat_interfaces = snat_router_intfs[router['id']] self.assertEqual(1, len(snat_interfaces)) self.assertEqual(subnet['subnet']['id'], snat_interfaces[0]['fixed_ips'][0]['subnet_id']) def _check_internal_subnet_interface_presence(self, router, subnet, int_cnt): router_ints = self.l3_plugin._get_sync_interfaces( self.context, [router['id']], device_owners=constants.ROUTER_INTERFACE_OWNERS) self.assertEqual(int_cnt, len(router_ints)) if int_cnt > 1: self.assertEqual(subnet['subnet']['id'], router_ints[0]['fixed_ips'][0]['subnet_id']) def _add_internal_subnet_to_router(self, router): int_net = self._make_network(self.fmt, 'int_net', True) int_subnet = self._make_subnet( self.fmt, int_net, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': int_subnet['subnet']['id']}) return int_subnet def _create_dvrha_router(self): router = self._create_router(distributed=True, ha=True) self.assertTrue(router['distributed']) self.assertTrue(router['ha']) return router def test_dvr_ha_router_create_attach_internal_external_detach_delete(self): """DVRHA Attach internal subnet followed by attach external""" # create router router = self._create_dvrha_router() self._check_dvr_ha_interfaces_presence(router, 2) # add subnet interface to router int_subnet = self._add_internal_subnet_to_router(router) self._check_internal_subnet_interface_presence(router, int_subnet, 1) # set router external gateway ext_net = self._create_external_network() self._set_external_gateway(router, ext_net) self._check_dvr_ha_interfaces_presence(router, 2) self._check_snat_external_gateway_presence(ext_net, router, 1) self._check_internal_subnet_interface_presence(router, int_subnet, 1) self._check_snat_internal_gateways_presence(router, int_subnet, 1) # clear router external gateway self._clear_external_gateway(router) self._check_dvr_ha_interfaces_presence(router, 2) self._check_snat_external_gateway_presence(ext_net, router, 0) self._check_internal_subnet_interface_presence(router, int_subnet, 1) self._check_snat_internal_gateways_presence(router, int_subnet, 0) # remove subnet interface from router self._remove_interface_from_router(router, int_subnet) self._check_internal_subnet_interface_presence(router, int_subnet, 0) # delete router self._delete_router(router) self._check_dvr_ha_interfaces_presence(router, 0) def test_get_device_owner_centralized(self): self.skipTest('Valid for DVR-only routers') def test_update_router_db_centralized_to_distributed(self): self.skipTest('Valid for DVR-only routers') def test__get_router_ids_for_agent(self): self.skipTest('Valid for DVR-only routers') def test_router_auto_scheduling(self): self.skipTest('Valid for DVR-only routers') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py0000644000175000017500000025554700000000000033504 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import topics from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron.api.rpc.handlers import l3_rpc from neutron.tests.common import helpers from neutron.tests.functional import base as functional_base from neutron.tests.unit.plugins.ml2 import base as ml2_test_base DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3DvrTestCaseBase(ml2_test_base.ML2TestFramework, functional_base.BaseLoggingTestCase): def setUp(self): super(L3DvrTestCaseBase, self).setUp() self.l3_agent = helpers.register_l3_agent( host="host0", agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) # register OVS agents to avoid time wasted on committing # port binding failures on every port update helpers.register_ovs_agent(host='host1') helpers.register_ovs_agent(host='host2') def _create_router(self, distributed=True, ha=False, admin_state_up=True): return (super(L3DvrTestCaseBase, self). _create_router(distributed=distributed, ha=ha, admin_state_up=admin_state_up)) class MultipleL3PluginTestCase(L3DvrTestCaseBase): def get_additional_service_plugins(self): p = super(MultipleL3PluginTestCase, self).get_additional_service_plugins() p.update({'l3_plugin_name_1': self.l3_plugin, 'l3_plugin_name_2': self.l3_plugin}) return p def test_create_router(self): router = self._create_router() self.assertEqual( constants.DEVICE_OWNER_DVR_INTERFACE, self.l3_plugin._get_device_owner(self.context, router)) class L3DvrTestCase(L3DvrTestCaseBase): def test_update_router_db_centralized_to_distributed(self): router = self._create_router(distributed=False) # router needs to be in admin state down in order to be upgraded to DVR self.l3_plugin.update_router( self.context, router['id'], {'router': {'admin_state_up': False}}) self.assertFalse(router['distributed']) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) def test_get_device_owner_distributed_router_object(self): router = self._create_router() self.assertEqual( constants.DEVICE_OWNER_DVR_INTERFACE, self.l3_plugin._get_device_owner(self.context, router)) def test_get_device_owner_distributed_router_id(self): router = self._create_router() self.assertEqual( constants.DEVICE_OWNER_DVR_INTERFACE, self.l3_plugin._get_device_owner(self.context, router['id'])) def test_get_device_owner_centralized(self): router = self._create_router(distributed=False) self.assertEqual( constants.DEVICE_OWNER_ROUTER_INTF, self.l3_plugin._get_device_owner(self.context, router['id'])) def test_get_agent_gw_ports_exist_for_network_no_port(self): self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, 'network_id', 'host', 'agent_id')) def test_csnat_ports_are_created_and_deleted_based_on_router_subnet(self): kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} net1 = self._make_network(self.fmt, 'net1', True) subnet1 = self._make_subnet( self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) subnet2 = self._make_subnet( self.fmt, net1, '10.2.0.1', '10.2.0.0/24', enable_dhcp=True) ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs) self._make_subnet( self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True) # Create first router and add an interface router1 = self._create_router() ext_net_id = ext_net['network']['id'] net1_id = net1['network']['id'] # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router1['id'], {'network_id': ext_net_id}) # Now add router interface (subnet1) from net1 to router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) # Now add router interface (subnet2) from net1 to router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet2['subnet']['id']}) # Now check the valid snat interfaces passed to the agent snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(2, len(snat_router_intfs[router1['id']])) # Also make sure that there are no csnat ports created and # left over. csnat_ports = self.core_plugin.get_ports( self.context, filters={ 'network_id': [net1_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_SNAT]}) self.assertEqual(2, len(csnat_ports)) # Now remove router interface (subnet1) from net1 to router self.l3_plugin.remove_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) # Now remove router interface (subnet2) from net1 to router self.l3_plugin.remove_router_interface( self.context, router1['id'], {'subnet_id': subnet2['subnet']['id']}) snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(0, len(snat_router_intfs[router1['id']])) # Also make sure that there are no csnat ports created and # left over. csnat_ports = self.core_plugin.get_ports( self.context, filters={ 'network_id': [net1_id], 'device_owner': [constants.DEVICE_OWNER_ROUTER_SNAT]}) self.assertEqual(0, len(csnat_ports)) def _test_remove_router_interface_leaves_snat_intact(self, by_subnet): with self.subnet() as subnet1, \ self.subnet(cidr='20.0.0.0/24') as subnet2: kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net, cidr='30.0.0.0/24'): router = self._create_router() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) gw_info = {'network_id': ext_net['network']['id']} self.l3_plugin.update_router( self.context, router['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]) self.assertEqual( 2, len(snat_router_intfs[router['id']])) if by_subnet: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) else: port = self.core_plugin.get_ports( self.context, filters={ 'network_id': [subnet1['subnet']['network_id']], 'device_owner': [constants.DEVICE_OWNER_DVR_INTERFACE]})[0] self.l3_plugin.remove_router_interface( self.context, router['id'], {'port_id': port['id']}) self.assertEqual( 1, len(self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]))) def test_remove_router_interface_by_subnet_leaves_snat_intact(self): self._test_remove_router_interface_leaves_snat_intact(by_subnet=True) def test_remove_router_interface_by_port_leaves_snat_intact(self): self._test_remove_router_interface_leaves_snat_intact( by_subnet=False) def setup_create_agent_gw_port_for_network(self, network=None): if not network: network = self._make_network(self.fmt, '', True) network_id = network['network']['id'] port = self.core_plugin.create_port( self.context, {'port': {'tenant_id': '', 'network_id': network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'device_id': self.l3_agent['id'], 'device_owner': constants.DEVICE_OWNER_AGENT_GW, portbindings.HOST_ID: '', 'admin_state_up': True, 'name': ''}}) return network_id, port def test_get_agent_gw_port_for_network(self): network_id, port = ( self.setup_create_agent_gw_port_for_network()) self.assertEqual( port['id'], self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, network_id, None, self.l3_agent['id'])['id']) def test_delete_agent_gw_port_for_network(self): network_id, port = ( self.setup_create_agent_gw_port_for_network()) self.l3_plugin.delete_floatingip_agent_gateway_port( self.context, "", network_id) self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, network_id, "", self.l3_agent['id'])) def test_get_fip_agent_gw_ports(self): self.setup_create_agent_gw_port_for_network() self.assertEqual( 1, len(self.l3_plugin._get_fip_agent_gw_ports( self.context, self.l3_agent['id']))) def test_process_routers(self): router = self._create_router() if not router.get('gw_port_id'): router['gw_port_id'] = 'fake_gw_id' self.l3_plugin._get_fip_agent_gw_ports = mock.Mock( return_value='fip_interface') self.l3_plugin._get_snat_sync_interfaces = mock.Mock( return_value={router['id']: 'snat_interface'}) result = self.l3_plugin._process_routers(self.context, [router], self.l3_agent) self.assertEqual( router['id'], result[router['id']]['id']) self.assertIn(constants.FLOATINGIP_AGENT_INTF_KEY, result[router['id']]) self.l3_plugin._get_fip_agent_gw_ports.assert_called_once_with( self.context, self.l3_agent['id']) self.l3_plugin._get_snat_sync_interfaces.assert_called_once_with( self.context, [router['id']]) def test_agent_gw_port_delete_when_last_gateway_for_ext_net_removed(self): kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} net1 = self._make_network(self.fmt, 'net1', True) net2 = self._make_network(self.fmt, 'net2', True) subnet1 = self._make_subnet( self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) subnet2 = self._make_subnet( self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True) ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs) self._make_subnet( self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True) # Create first router and add an interface router1 = self._create_router() ext_net_id = ext_net['network']['id'] self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) # Set gateway to first router self.l3_plugin._update_router_gw_info( self.context, router1['id'], {'network_id': ext_net_id}) # Create second router and add an interface router2 = self._create_router() self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) # Set gateway to second router self.l3_plugin._update_router_gw_info( self.context, router2['id'], {'network_id': ext_net_id}) # Create an agent gateway port for the external network net_id, agent_gw_port = ( self.setup_create_agent_gw_port_for_network(network=ext_net)) # Check for agent gateway ports self.assertIsNotNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) self.l3_plugin._update_router_gw_info( self.context, router1['id'], {}) # Check for agent gateway port after deleting one of the gw self.assertIsNotNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) self.l3_plugin._update_router_gw_info( self.context, router2['id'], {}) # Check for agent gateway port after deleting last gw self.assertIsNone( self.l3_plugin._get_agent_gw_ports_exist_for_network( self.context, ext_net_id, "", self.l3_agent['id'])) def test_create_floating_ip_with_no_dvr_agents(self): self._test_create_floating_ip_agent_notification( test_agent_mode=None) def _test_create_floating_ip_agent_notification( self, dvr=True, test_agent_mode=constants.L3_AGENT_MODE_DVR): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet,\ self.port(subnet=int_subnet, device_owner=DEVICE_OWNER_COMPUTE) as int_port: self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: 'host1'}}) # and create l3 agents on corresponding hosts if test_agent_mode is not None: helpers.register_l3_agent(host='host1', agent_mode=test_agent_mode) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) router = self._create_router(distributed=dvr) self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': int_subnet['subnet']['id']}) if test_agent_mode is None: self.assertIsNone( self.l3_plugin.create_fip_agent_gw_port_if_not_exists( self.context, ext_net_id, 'host1')) return floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': int_port['port']['id'], 'tenant_id': int_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) if dvr: if (test_agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): if router['ha']: expected_calls = [ mock.call(self.context, [router['id']], 'host0'), mock.call(self.context, [router['id']], 'standby')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls, any_order=True) self.assertFalse(l3_notif.routers_updated.called) if not router['ha']: l3_notif.routers_updated_on_host.\ assert_called_once_with( self.context, [router['id']], 'host0') self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated_on_host.\ assert_called_once_with( self.context, [router['id']], 'host1') self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated.assert_called_once_with( self.context, [router['id']], None) self.assertFalse( l3_notif.routers_updated_on_host.called) def test_create_floating_ip_agent_notification(self): self._test_create_floating_ip_agent_notification() def test_create_floating_ip_agent_notification_for_dvr_no_external_agent( self): agent_mode = constants.L3_AGENT_MODE_DVR_NO_EXTERNAL self._test_create_floating_ip_agent_notification( test_agent_mode=agent_mode) def test_create_floating_ip_agent_notification_non_dvr(self): self._test_create_floating_ip_agent_notification(dvr=False) def _test_update_floating_ip_agent_notification( self, dvr=True, test_agent_mode=constants.L3_AGENT_MODE_DVR): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet1,\ self.subnet(cidr='30.0.0.0/24') as int_subnet2,\ self.port(subnet=int_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\ self.port(subnet=int_subnet2, device_owner=DEVICE_OWNER_COMPUTE) as int_port2: # locate internal ports on different hosts self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: 'host1'}}) self.core_plugin.update_port( self.context, int_port2['port']['id'], {'port': {portbindings.HOST_ID: 'host2'}}) # and create l3 agents on corresponding hosts helpers.register_l3_agent(host='host1', agent_mode=test_agent_mode) helpers.register_l3_agent(host='host2', agent_mode=test_agent_mode) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) router1 = self._create_router(distributed=dvr) router2 = self._create_router(distributed=dvr) for router in (router1, router2): self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': int_subnet1['subnet']['id']}) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': int_subnet2['subnet']['id']}) floating_ip = {'floating_network_id': ext_net_id, 'router_id': router1['id'], 'port_id': int_port1['port']['id'], 'tenant_id': int_port1['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: updated_floating_ip = {'router_id': router2['id'], 'port_id': int_port2['port']['id']} self.l3_plugin.update_floatingip( self.context, floating_ip['id'], {'floatingip': updated_floating_ip}) if dvr: if (test_agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): if router1['ha'] and router2['ha']: self.assertEqual( 4, l3_notif.routers_updated_on_host.call_count) expected_calls = [ mock.call(self.context, [router1['id']], 'host0'), mock.call(self.context, [router1['id']], 'standby'), mock.call(self.context, [router2['id']], 'host0'), mock.call(self.context, [router2['id']], 'standby')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls, any_order=True) self.assertFalse(l3_notif.routers_updated.called) else: self.assertEqual( 2, l3_notif.routers_updated_on_host.call_count) expected_calls = [ mock.call(self.context, [router1['id']], 'host0'), mock.call(self.context, [router2['id']], 'host0')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls) self.assertFalse(l3_notif.routers_updated.called) else: self.assertEqual( 2, l3_notif.routers_updated_on_host.call_count) expected_calls = [ mock.call(self.context, [router1['id']], 'host1'), mock.call(self.context, [router2['id']], 'host2')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls) self.assertFalse(l3_notif.routers_updated.called) else: self.assertEqual( 2, l3_notif.routers_updated.call_count) expected_calls = [ mock.call(self.context, [router1['id']], None), mock.call(self.context, [router2['id']], None)] l3_notif.routers_updated.assert_has_calls( expected_calls) self.assertFalse(l3_notif.routers_updated_on_host.called) def test_update_floating_ip_agent_notification(self): self._test_update_floating_ip_agent_notification() def test_update_floating_ip_agent_notification_with_dvr_no_external_agents( self): agent_mode = constants.L3_AGENT_MODE_DVR_NO_EXTERNAL self._test_update_floating_ip_agent_notification( test_agent_mode=agent_mode) def test_update_floating_ip_agent_notification_non_dvr(self): self._test_update_floating_ip_agent_notification(dvr=False) def test_delete_floating_ip_with_no_agents(self): self._test_delete_floating_ip_agent_notification( test_agent_mode=None) def _test_delete_floating_ip_agent_notification( self, dvr=True, test_agent_mode=constants.L3_AGENT_MODE_DVR): with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as int_subnet,\ self.port(subnet=int_subnet, device_owner=DEVICE_OWNER_COMPUTE) as int_port: self.core_plugin.update_port( self.context, int_port['port']['id'], {'port': {portbindings.HOST_ID: 'host1'}}) # and create l3 agents on corresponding hosts helpers.register_l3_agent(host='host1', agent_mode=test_agent_mode) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) router = self._create_router(distributed=dvr) self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': int_subnet['subnet']['id']}) floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': int_port['port']['id'], 'tenant_id': int_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) if test_agent_mode is None: with mock.patch.object( self.l3_plugin, 'get_dvr_agent_on_host') as a_mock,\ mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notif: a_mock.return_value = None self.l3_plugin.delete_floatingip( self.context, floating_ip['id']) self.assertFalse( l3_notif.routers_updated_on_host.called) return with mock.patch.object( self.l3_plugin, '_l3_rpc_notifier') as l3_notif: self.l3_plugin.delete_floatingip( self.context, floating_ip['id']) if dvr: if test_agent_mode == ( constants.L3_AGENT_MODE_DVR_NO_EXTERNAL): if router['ha']: expected_calls = [ mock.call(self.context, [router['id']], 'host0'), mock.call(self.context, [router['id']], 'standby')] l3_notif.routers_updated_on_host.assert_has_calls( expected_calls, any_order=True) self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated_on_host.\ assert_called_once_with( self.context, [router['id']], 'host0') self.assertFalse(l3_notif.routers_updated.called) if test_agent_mode == ( constants.L3_AGENT_MODE_DVR): l3_notif.routers_updated_on_host.\ assert_called_once_with( self.context, [router['id']], 'host1') self.assertFalse(l3_notif.routers_updated.called) else: l3_notif.routers_updated.assert_called_once_with( self.context, [router['id']], None) self.assertFalse( l3_notif.routers_updated_on_host.called) def test_delete_floating_ip_agent_notification(self): self._test_delete_floating_ip_agent_notification() def test_delete_floating_ip_agent_notification_with_dvr_no_external_agents( self): agent_mode = constants.L3_AGENT_MODE_DVR_NO_EXTERNAL self._test_delete_floating_ip_agent_notification( test_agent_mode=agent_mode) def test_delete_floating_ip_agent_notification_non_dvr(self): self._test_delete_floating_ip_agent_notification(dvr=False) def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self): kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self._make_subnet( self.fmt, ext_net, '2001:db8::1', '2001:db8::/64', ip_version=constants.IP_VERSION_6, enable_dhcp=True) router1 = self._create_router() self.l3_plugin._update_router_gw_info( self.context, router1['id'], {'network_id': ext_net['network']['id']}) snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(0, len(snat_router_intfs[router1['id']])) private_net1 = self._make_network(self.fmt, 'net1', True) private_ipv6_subnet1 = self._make_subnet(self.fmt, private_net1, 'fd00::1', cidr='fd00::1/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode='slaac', ipv6_address_mode='slaac') private_ipv6_subnet2 = self._make_subnet(self.fmt, private_net1, 'fd01::1', cidr='fd01::1/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode='slaac', ipv6_address_mode='slaac') # Add the first IPv6 subnet to the router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet1['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) self.assertEqual(1, len(snat_router_intfs[router1['id']])) # Add the second IPv6 subnet to the router self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet2['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) snat_intf_list = snat_router_intfs[router1['id']] fixed_ips = snat_intf_list[0]['fixed_ips'] self.assertEqual(1, len(snat_router_intfs[router1['id']])) self.assertEqual(2, len(fixed_ips)) # Now delete the router interface and it should update the # SNAT port with the right fixed_ips instead of deleting it. self.l3_plugin.remove_router_interface( self.context, router1['id'], {'subnet_id': private_ipv6_subnet2['subnet']['id']}) # Check for the internal snat port interfaces snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router1['id']]) snat_intf_list = snat_router_intfs[router1['id']] fixed_ips = snat_intf_list[0]['fixed_ips'] self.assertEqual(1, len(snat_router_intfs[router1['id']])) self.assertEqual(1, len(fixed_ips)) def test_unbound_allowed_addr_pairs_fip_with_multiple_active_vms(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(ha=False) private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', 'end': '10.1.0.20'}] fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=constants.IP_VERSION_4, allocation_pools=test_allocation_pools, enable_dhcp=True) vrrp_port = self._make_port( self.fmt, private_net1['network']['id'], device_owner='', fixed_ips=fixed_vrrp_ip) allowed_address_pairs = [ {'ip_address': '10.1.0.201', 'mac_address': vrrp_port['port']['mac_address']}] with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\ self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port2: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) router_handle = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) self.assertEqual(self.l3_agent['host'], router_handle[0]['gw_port_host']) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: vm_port1 = self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) vm_port2 = self.core_plugin.update_port( self.context, int_port2['port']['id'], {'port': {portbindings.HOST_ID: HOST2}}) vrrp_port_db = self.core_plugin.get_port( self.context, vrrp_port['port']['id']) # Make sure that the VRRP port is not bound to any host self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1) self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST2) self.assertNotEqual( vrrp_port_db[portbindings.HOST_ID], self.l3_agent['host']) # Now update both the VM ports with the allowed_address_pair ip self.core_plugin.update_port( self.context, vm_port1['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) updated_vm_port1 = self.core_plugin.get_port( self.context, vm_port1['id']) expected_allowed_address_pairs1 = updated_vm_port1.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs1, allowed_address_pairs) self.core_plugin.update_port( self.context, vm_port2['id'], {'port': { 'allowed_address_pairs': allowed_address_pairs}}) updated_vm_port2 = self.core_plugin.get_port( self.context, vm_port2['id']) expected_allowed_address_pairs2 = updated_vm_port2.get( 'allowed_address_pairs') self.assertEqual(expected_allowed_address_pairs2, allowed_address_pairs) # Now let us assign the floatingip to the vrrp port that is # unbound to any host. floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vrrp_port['port']['id'], 'tenant_id': vrrp_port['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) expected_routers_updated_calls = [ mock.call(self.context, mock.ANY, 'host0'), mock.call(self.context, mock.ANY, HOST1), mock.call(self.context, mock.ANY, HOST2)] l3_notifier.routers_updated_on_host.assert_has_calls( expected_routers_updated_calls, any_order=True) self.assertFalse(l3_notifier.routers_updated.called) router_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) floatingips = router_info[0][constants.FLOATINGIP_KEY] self.assertTrue(floatingips[0][constants.DVR_SNAT_BOUND]) def test_dvr_process_floatingips_for_dvr_on_full_sync(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(ha=False) private_net1 = self._make_network(self.fmt, 'net1', True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) # Schedule the router to the dvr_snat node self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\ self.port( subnet=private_subnet1) as int_port2: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) router_handle = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) self.assertEqual(self.l3_agent['host'], router_handle[0]['gw_port_host']) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) # Now let us assign the floatingip to the bound port # and unbound port. fip1 = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': int_port1['port']['id'], 'tenant_id': int_port1['port']['tenant_id']} self.l3_plugin.create_floatingip( self.context, {'floatingip': fip1}) expected_routers_updated_calls = [ mock.call(self.context, mock.ANY, HOST1)] l3_notifier.routers_updated_on_host.assert_has_calls( expected_routers_updated_calls) self.assertFalse(l3_notifier.routers_updated.called) fip2 = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': int_port2['port']['id'], 'tenant_id': int_port2['port']['tenant_id']} self.l3_plugin.create_floatingip( self.context, {'floatingip': fip2}) router_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) floatingips = router_info[0][constants.FLOATINGIP_KEY] self.assertEqual(1, len(floatingips)) self.assertTrue(floatingips[0][constants.DVR_SNAT_BOUND]) self.assertEqual(constants.FLOATING_IP_HOST_NEEDS_BINDING, floatingips[0]['host']) router1_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, HOST1, [router['id']])) floatingips = router1_info[0][constants.FLOATINGIP_KEY] self.assertEqual(1, len(floatingips)) self.assertEqual(HOST1, floatingips[0]['host']) self.assertIsNone(floatingips[0]['dest_host']) def test_dvr_router_unbound_floating_ip_migrate_to_bound_host(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(ha=False) private_net1 = self._make_network(self.fmt, 'net1', True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) router_handle = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) self.assertEqual(self.l3_agent['host'], router_handle[0]['gw_port_host']) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: # Next we can try to associate the floatingip to the # VM port floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': int_port1['port']['id'], 'tenant_id': int_port1['port']['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) expected_routers_updated_calls = [ mock.call(self.context, mock.ANY, 'host0')] l3_notifier.routers_updated_on_host.assert_has_calls( expected_routers_updated_calls) self.assertFalse(l3_notifier.routers_updated.called) router_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) floatingips = router_info[0][constants.FLOATINGIP_KEY] self.assertTrue(floatingips[0][constants.DVR_SNAT_BOUND]) # Now do the host binding to the fip port self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) expected_routers_updated_calls = [ mock.call(self.context, mock.ANY, 'host0'), mock.call(self.context, mock.ANY, HOST1)] l3_notifier.routers_updated_on_host.assert_has_calls( expected_routers_updated_calls) updated_router_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, HOST1, [router['id']])) floatingips = updated_router_info[0][constants.FLOATINGIP_KEY] self.assertFalse(floatingips[0].get(constants.DVR_SNAT_BOUND)) self.assertEqual(HOST1, floatingips[0]['host']) def test_dvr_router_centralized_floating_ip(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR_NO_EXTERNAL) router = self._create_router(ha=False) private_net1 = self._make_network(self.fmt, 'net1', True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) with self.port( subnet=private_subnet1, device_owner=DEVICE_OWNER_COMPUTE) as int_port1: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) router_handle = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) self.assertEqual(self.l3_agent['host'], router_handle[0]['gw_port_host']) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: vm_port = self.core_plugin.update_port( self.context, int_port1['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) self.assertEqual( 1, l3_notifier.routers_updated_on_host.call_count) # Next we can try to associate the floatingip to the # VM port floating_ip = {'floating_network_id': ext_net['network']['id'], 'router_id': router['id'], 'port_id': vm_port['id'], 'tenant_id': vm_port['tenant_id']} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) expected_routers_updated_calls = [ mock.call(self.context, mock.ANY, HOST1), mock.call(self.context, mock.ANY, 'host0')] l3_notifier.routers_updated_on_host.assert_has_calls( expected_routers_updated_calls) self.assertFalse(l3_notifier.routers_updated.called) router_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) floatingips = router_info[0][constants.FLOATINGIP_KEY] self.assertTrue(floatingips[0][constants.DVR_SNAT_BOUND]) # Test case to make sure when an agent in this case # dvr_no_external restarts and does a full sync, we need # to make sure that the returned router_info has # DVR_SNAT_BOUND flag enabled, otherwise the floating IP # state would error out. router_sync_info = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, HOST1, [router['id']])) floatingips = router_sync_info[0][constants.FLOATINGIP_KEY] self.assertTrue(floatingips[0][constants.DVR_SNAT_BOUND]) def test_dvr_gateway_host_binding_is_set(self): router = self._create_router(ha=False) private_net1 = self._make_network(self.fmt, 'net1', True) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} ext_net = self._make_network(self.fmt, '', True, **kwargs) self._make_subnet( self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Set gateway to router self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) private_subnet1 = self._make_subnet( self.fmt, private_net1, '10.1.0.1', cidr='10.1.0.0/24', ip_version=constants.IP_VERSION_4, enable_dhcp=True) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': private_subnet1['subnet']['id']}) # Check for the gw_port_host in the router dict to make # sure that the _build_routers_list in l3_dvr_db is called. router_handle = ( self.l3_plugin.list_active_sync_routers_on_active_l3_agent( self.context, self.l3_agent['host'], [router['id']])) self.assertEqual(self.l3_agent['host'], router_handle[0]['gw_port_host']) def test_update_vm_port_host_router_update(self): # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.subnet() as subnet: self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] = l3_notifier self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST1}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST1) self.assertFalse(l3_notifier.routers_updated.called) # updating port's host (instance migration) l3_notifier.reset_mock() self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST2}}) l3_notifier.routers_updated_on_host.assert_called_once_with( self.context, {router['id']}, HOST2) l3_notifier.router_removed_from_agent.assert_called_once_with( mock.ANY, router['id'], HOST1) def test_dvr_router_manual_rescheduling_removes_router(self): router = self._create_router() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet): self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Now the VM should be also scheduled on the node notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent') as rtr_remove_mock: self.l3_plugin.remove_router_from_l3_agent( self.context, self.l3_agent['id'], router['id']) rtr_remove_mock.assert_called_once_with( self.context, router['id'], self.l3_agent['host']) def test_dvr_router_manual_rescheduling_updates_router(self): router = self._create_router() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id'], candidates=[self.l3_agent]) # Now the VM should be also scheduled on the node notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'routers_updated_on_host') as rtr_update_mock: self.l3_plugin.remove_router_from_l3_agent( self.context, self.l3_agent['id'], router['id']) rtr_update_mock.assert_called_once_with( self.context, [router['id']], self.l3_agent['host']) def _test_router_remove_from_agent_on_vm_port_deletion( self, non_admin_port=False): # register l3 agent in dvr mode in addition to existing dvr_snat agent HOST = 'host1' non_admin_tenant = 'tenant1' helpers.register_l3_agent( host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.network(shared=True) as net,\ self.subnet(network=net) as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, tenant_id=non_admin_tenant, set_context=non_admin_port) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {portbindings.HOST_ID: HOST}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier, 'router_removed_from_agent') as remove_mock: ctx = context.Context( '', non_admin_tenant) if non_admin_port else self.context self._delete('ports', port['port']['id'], neutron_context=ctx) remove_mock.assert_called_once_with( mock.ANY, router['id'], HOST) def test_router_remove_from_agent_on_vm_port_deletion(self): self._test_router_remove_from_agent_on_vm_port_deletion() def test_admin_router_remove_from_agent_on_vm_port_deletion(self): self._test_router_remove_from_agent_on_vm_port_deletion( non_admin_port=True) def test_dvr_router_notifications_for_live_migration_with_fip(self): self._dvr_router_notifications_for_live_migration( with_floatingip=True) def test_dvr_router_notifications_for_live_migration_without_fip(self): self._dvr_router_notifications_for_live_migration() def _dvr_router_notifications_for_live_migration( self, with_floatingip=False): """Check the router notifications go to the right hosts with live migration without hostbinding on the port. """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2 = 'host1', 'host2' for host in [HOST1, HOST2]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}) as vm_port: # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) if with_floatingip: floating_ip = {'floating_network_id': ext_net_id, 'router_id': router['id'], 'port_id': vm_port['port']['id'], 'tenant_id': vm_port['port']['tenant_id'], 'dns_name': '', 'dns_domain': ''} floating_ip = self.l3_plugin.create_floatingip( self.context, {'floatingip': floating_ip}) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier,\ mock.patch.object( self.l3_plugin, 'create_fip_agent_gw_port_if_not_exists' ) as fip_agent: live_migration_port_profile = { 'migrating_to': HOST2 } # Update the VM Port with Migration porbinding Profile. # With this change, it should trigger a notification to # the Destination host to create a Router ahead of time # before the VM Port binding has changed to HOST2. updated_port = self.core_plugin.update_port( self.context, vm_port['port']['id'], {'port': { portbindings.PROFILE: live_migration_port_profile}}) # this will be called twice, once for port update, and once # for new binding l3_notifier.routers_updated_on_host.assert_any_call( self.context, {router['id']}, HOST2) # Check the port-binding is still with the old HOST1, but # the router update notification has been sent to the new # host 'HOST2' based on the live migration profile change. self.assertEqual(updated_port[portbindings.HOST_ID], HOST1) self.assertNotEqual(updated_port[portbindings.HOST_ID], HOST2) if with_floatingip: fip_agent.return_value = True # Since we have already created the floatingip for the # port, it should be creating the floatingip agent gw # port for the new host if it does not exist. fip_agent.assert_any_call( mock.ANY, floating_ip['floating_network_id'], HOST2) def test_router_notifications(self): """Check that notifications go to the right hosts in different conditions """ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.subnet(cidr='40.0.0.0/24') as subnet3,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: HOST2}),\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: HOST3}): # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client, 'prepare') as mock_prepare: # add external gateway to router self.l3_plugin.update_router( self.context, router['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) # router has no interfaces so notification goes # to only dvr_snat agent mock_prepare.assert_called_once_with( server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1') mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.assertEqual(2, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet2['subnet']['id']}) self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) mock_prepare.reset_mock() self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet3['subnet']['id']}) # there are no dvr serviceable ports on HOST3, so notification # goes to the same hosts self.assertEqual(3, mock_prepare.call_count) expected = [mock.call(server=self.l3_agent['host'], topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST1, topic=topics.L3_AGENT, version='1.1'), mock.call(server=HOST2, topic=topics.L3_AGENT, version='1.1')] mock_prepare.assert_has_calls(expected, any_order=True) def test_router_is_not_removed_from_snat_agent_on_interface_removal(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on router interface removal """ router = self._create_router() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.subnet() as subnet,\ self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net, cidr='20.0.0.0/24'): self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) with mock.patch.object(self.l3_plugin, '_l3_rpc_notifier') as l3_notifier: self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertFalse(l3_notifier.router_removed_from_agent.called) def test_router_is_not_removed_from_snat_agent_on_dhcp_port_deletion(self): """Check that dvr router is not removed from l3 agent hosting SNAT for it on DHCP port removal """ router = self._create_router() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net,\ self.subnet(network=ext_net),\ self.subnet(cidr='20.0.0.0/24') as subnet,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: self.core_plugin.update_port( self.context, port['port']['id'], {'port': {'binding:host_id': self.l3_agent['host']}}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) # router should be scheduled to the dvr_snat l3 agent agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) notifier = self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] with mock.patch.object( notifier, 'router_removed_from_agent') as remove_mock: self._delete('ports', port['port']['id']) # now when port is deleted the router still has external # gateway and should still be scheduled to the snat agent agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) self.assertFalse(remove_mock.called) def test__get_dvr_subnet_ids_on_host_query(self): with self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.subnet(cidr='40.0.0.0/24') as subnet3,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE) as p1,\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP) as p2,\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_NEUTRON_PREFIX)\ as p3,\ self.port(subnet=subnet3, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX)\ as p4: host = 'host1' subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual([], subnet_ids) self.core_plugin.update_port( self.context, p1['port']['id'], {'port': {portbindings.HOST_ID: host}}) expected = {subnet1['subnet']['id']} subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p2['port']['id'], {'port': {portbindings.HOST_ID: host}}) expected.add(subnet2['subnet']['id']) subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p3['port']['id'], {'port': {portbindings.HOST_ID: host}}) # p3 is non dvr serviceable so no subnet3 expected subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) other_host = 'other' + host self.core_plugin.update_port( self.context, p4['port']['id'], {'port': {portbindings.HOST_ID: other_host}}) # p4 is on other host so no subnet3 expected subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) self.core_plugin.update_port( self.context, p4['port']['id'], {'port': {portbindings.HOST_ID: host}}) # finally p4 is on the right host so subnet3 is expected expected.add(subnet3['subnet']['id']) subnet_ids = [item[0] for item in self.l3_plugin._get_dvr_subnet_ids_on_host_query( self.context, host)] self.assertEqual(expected, set(subnet_ids)) def test__get_dvr_router_ids_for_host(self): router1 = self._create_router() router2 = self._create_router() host = 'host1' arg_list = (portbindings.HOST_ID,) with self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: host}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: host}): router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) self.assertEqual([], router_ids) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) expected = {router1['id']} self.assertEqual(expected, set(router_ids)) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) router_ids = self.l3_plugin._get_dvr_router_ids_for_host( self.context, host) expected.add(router2['id']) self.assertEqual(expected, set(router_ids)) def test__get_router_ids_for_agent(self): router1 = self._create_router() router2 = self._create_router() router3 = self._create_router() arg_list = (portbindings.HOST_ID,) host = self.l3_agent['host'] with self.subnet() as ext_subnet,\ self.subnet(cidr='20.0.0.0/24') as subnet1,\ self.subnet(cidr='30.0.0.0/24') as subnet2,\ self.port(subnet=subnet1, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: host}),\ self.port(subnet=subnet2, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **{portbindings.HOST_ID: host}): ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual([], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual([], ids) self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': subnet1['subnet']['id']}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router2['id']]) self.assertEqual([], ids) self.l3_plugin.add_router_interface( self.context, router2['id'], {'subnet_id': subnet2['subnet']['id']}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual({router1['id'], router2['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id']]) self.assertEqual([router1['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router2['id']]) self.assertEqual({router1['id'], router2['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router2['id']]) self.assertEqual([router2['id']], ids) # make net external ext_net_id = ext_subnet['subnet']['network_id'] self._update('networks', ext_net_id, {'network': {extnet_apidef.EXTERNAL: True}}) # add external gateway to router self.l3_plugin.update_router( self.context, router3['id'], {'router': { 'external_gateway_info': {'network_id': ext_net_id}}}) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, []) self.assertEqual({router1['id'], router2['id'], router3['id']}, set(ids)) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router3['id']]) self.assertEqual([router3['id']], ids) ids = self.l3_plugin._get_router_ids_for_agent( self.context, self.l3_agent, [router1['id'], router3['id']]) self.assertEqual({router1['id'], router3['id']}, set(ids)) def test_remove_router_interface(self): HOST1 = 'host1' helpers.register_l3_agent( host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as subnet,\ self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=arg_list, **{portbindings.HOST_ID: HOST1}): l3_notifier = mock.Mock() self.l3_plugin.l3_rpc_notifier = l3_notifier self.l3_plugin.agent_notifiers[ constants.AGENT_TYPE_L3] = l3_notifier self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) self.l3_plugin.schedule_router(self.context, router['id']) self.l3_plugin.remove_router_interface( self.context, router['id'], {'subnet_id': subnet['subnet']['id']}) l3_notifier.router_removed_from_agent.assert_called_once_with( mock.ANY, router['id'], HOST1) def test_router_auto_scheduling(self): router = self._create_router() agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) # router is not scheduled yet self.assertEqual([], agents['agents']) l3_rpc_handler = l3_rpc.L3RpcCallback() # router should be auto scheduled once l3 agent requests router ids l3_rpc_handler.get_router_ids(self.context, self.l3_agent['host']) agents = self.l3_plugin.list_l3_agents_hosting_router( self.context, router['id']) self.assertEqual(1, len(agents['agents'])) self.assertEqual(self.l3_agent['id'], agents['agents'][0]['id']) def test_add_router_interface_by_subnet_notifications(self): notif_handler_before = mock.Mock() notif_handler_after = mock.Mock() registry.subscribe(notif_handler_before.callback, resources.ROUTER_INTERFACE, events.BEFORE_CREATE) registry.subscribe(notif_handler_after.callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) router = self._create_router() with self.network() as net, \ self.subnet(network=net) as subnet: interface_info = {'subnet_id': subnet['subnet']['id']} self.l3_plugin.add_router_interface( self.context, router['id'], interface_info) kwargs = {'context': self.context, 'router_id': router['id'], 'network_id': net['network']['id'], 'router_db': mock.ANY, 'port': mock.ANY, 'interface_info': interface_info} notif_handler_before.callback.assert_called_once_with( resources.ROUTER_INTERFACE, events.BEFORE_CREATE, mock.ANY, **kwargs) kwargs_after = {'cidrs': mock.ANY, 'context': mock.ANY, 'gateway_ips': mock.ANY, 'interface_info': mock.ANY, 'network_id': None, 'port': mock.ANY, 'new_interface': True, 'subnets': mock.ANY, 'port_id': mock.ANY, 'router_id': router['id']} notif_handler_after.callback.assert_called_once_with( resources.ROUTER_INTERFACE, events.AFTER_CREATE, mock.ANY, **kwargs_after) def test_add_router_interface_by_port_notifications(self): notif_handler_before = mock.Mock() notif_handler_after = mock.Mock() registry.subscribe(notif_handler_before.callback, resources.ROUTER_INTERFACE, events.BEFORE_CREATE) registry.subscribe(notif_handler_after.callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) router = self._create_router() with self.network() as net, \ self.subnet(network=net) as subnet, \ self.port(subnet=subnet) as port: interface_info = {'port_id': port['port']['id']} self.l3_plugin.add_router_interface( self.context, router['id'], interface_info) kwargs = {'context': self.context, 'router_id': router['id'], 'network_id': net['network']['id'], 'router_db': mock.ANY, 'port': mock.ANY, 'interface_info': interface_info} notif_handler_before.callback.assert_called_once_with( resources.ROUTER_INTERFACE, events.BEFORE_CREATE, mock.ANY, **kwargs) kwargs_after = {'cidrs': mock.ANY, 'context': mock.ANY, 'gateway_ips': mock.ANY, 'interface_info': mock.ANY, 'network_id': None, 'port': mock.ANY, 'new_interface': True, 'subnets': mock.ANY, 'port_id': port['port']['id'], 'router_id': router['id']} notif_handler_after.callback.assert_called_once_with( resources.ROUTER_INTERFACE, events.AFTER_CREATE, mock.ANY, **kwargs_after) class L3DvrTestCaseMigration(L3DvrTestCaseBase): def test_update_router_db_centralized_to_distributed_with_ports(self): with self.subnet() as subnet1: kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as ext_net, \ self.subnet(network=ext_net, cidr='30.0.0.0/24'): router = self._create_router(distributed=False) self.l3_plugin.add_router_interface( self.context, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.l3_plugin._update_router_gw_info( self.context, router['id'], {'network_id': ext_net['network']['id']}) self.assertEqual( 0, len(self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]))) # router needs to be in admin state down in order to be # upgraded to DVR self.l3_plugin.update_router( self.context, router['id'], {'router': {'admin_state_up': False}}) self.assertFalse(router['distributed']) self.l3_plugin.update_router( self.context, router['id'], {'router': {'distributed': True}}) router = self.l3_plugin.get_router(self.context, router['id']) self.assertTrue(router['distributed']) self.assertEqual( 1, len(self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/logapi/0000755000175000017500000000000000000000000025237 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/logapi/__init__.py0000644000175000017500000000000000000000000027336 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/logapi/test_logging.py0000644000175000017500000001446600000000000030311 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from neutron_lib import constants from neutron_lib import context as neutron_context from oslo_config import cfg from oslo_log import log as logging import testscenarios from neutron.objects.logapi import logging_resource as log_object from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as ovs_consts) from neutron.services.logapi.drivers.openvswitch import ( ovs_firewall_log as ovs_fw_log) from neutron.tests.functional.agent import test_firewall LOG = logging.getLogger(__name__) load_tests = testscenarios.load_tests_apply_scenarios FAKE_LOG_ID = 'a2d72369-4246-4f19-bd3c-af51ec8d70cd' FAKE_PROJECT_ID = 'fake_project' log_object_dict = { 'id': FAKE_LOG_ID, 'resource_type': 'security_group', 'project_id': FAKE_PROJECT_ID, 'event': 'ALL' } FAKE_LOG_OBJECT = log_object.Log(**log_object_dict) class LoggingExtensionTestFramework(test_firewall.BaseFirewallTestCase): def setUp(self): super(LoggingExtensionTestFramework, self).setUp() cfg.CONF.set_override('extensions', ['log'], group='agent') self.context = neutron_context.get_admin_context_without_session() self._set_resource_rpc_mock() if self.firewall_name != 'openvswitch': self.skipTest("Logging extension doesn't support firewall driver" " %s at that time " % self.firewall_name) self.log_driver = self.initialize_ovs_fw_log() def initialize_ovs_fw_log(self): self.int_br = ovs_ext_api.OVSCookieBridge( self.of_helper.br_int_cls(self.tester.bridge.br_name)) self.tun_br = self.of_helper.br_tun_cls('br-tun') agent_api = ovs_ext_api.OVSAgentExtensionAPI( self.int_br, self.tun_br, {'physnet1': self.of_helper.br_phys_cls('br-physnet1')}) log_driver = ovs_fw_log.OVSFirewallLoggingDriver(agent_api) log_driver.initialize(self.resource_rpc) return log_driver def _set_resource_rpc_mock(self): self.log_info = [] def _get_sg_info_mock(context, **kwargs): return self.log_info self.resource_rpc = mock.patch( 'neutron.services.logapi.rpc.agent.LoggingApiStub').start() self.resource_rpc.get_sg_log_info_for_log_resources.side_effect = ( _get_sg_info_mock) def _set_ports_log(self, sg_rules): fake_sg_log_info = [ { 'id': FAKE_LOG_ID, 'ports_log': [ {'port_id': self.src_port_desc['device'], 'security_group_rules': sg_rules}], 'event': 'ALL', 'project_id': FAKE_PROJECT_ID }] self.log_info = fake_sg_log_info class TestLoggingExtension(LoggingExtensionTestFramework): ip_cidr = '192.168.0.1/24' def _is_log_flow_set(self, table, actions): flows = self.log_driver.int_br.br.dump_flows_for_table(table) pattern = re.compile( r"^.* table=%s.* actions=%s" % (table, actions) ) for flow in flows.splitlines(): if pattern.match(flow.strip()): return True return False def _assert_logging_flows_set(self): self.assertTrue(self._is_log_flow_set( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, actions=r"resubmit\(,%d\),CONTROLLER:65535" % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE))) self.assertTrue(self._is_log_flow_set( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, actions="CONTROLLER:65535")) self.assertTrue(self._is_log_flow_set( table=ovs_consts.DROPPED_TRAFFIC_TABLE, actions="CONTROLLER:65535")) def _assert_logging_flows_not_set(self): self.assertFalse(self._is_log_flow_set( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, actions=r"resubmit\(,%d\),CONTROLLER:65535" % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE))) self.assertFalse(self._is_log_flow_set( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, actions="CONTROLLER:65535")) self.assertFalse(self._is_log_flow_set( table=ovs_consts.DROPPED_TRAFFIC_TABLE, actions="CONTROLLER:65535")) def test_log_lifecycle(self): sg_rules = [{'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP, 'security_group_id': self.FAKE_SECURITY_GROUP_ID}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION, 'security_group_id': self.FAKE_SECURITY_GROUP_ID}, {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NAME_TCP, 'port_range_min': 22, 'port_range_max': 22, 'remote_group_id': 2, 'direction': constants.EGRESS_DIRECTION, 'security_group_id': self.FAKE_SECURITY_GROUP_ID}, ] self.firewall.update_security_group_rules( self.FAKE_SECURITY_GROUP_ID, sg_rules) self.firewall.update_port_filter(self.src_port_desc) self._set_ports_log(sg_rules) # start log self.log_driver.start_logging( self.context, log_resources=[FAKE_LOG_OBJECT]) self._assert_logging_flows_set() # stop log self.log_driver.stop_logging( self.context, log_resources=[FAKE_LOG_OBJECT]) self._assert_logging_flows_not_set() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/ovn_l3/0000755000175000017500000000000000000000000025164 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/ovn_l3/__init__.py0000644000175000017500000000000000000000000027263 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/ovn_l3/test_plugin.py0000644000175000017500000006410300000000000030077 0ustar00coreycorey00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils from neutron.common import utils as n_utils from neutron.scheduler import l3_ovn_scheduler as l3_sched from neutron.tests.functional import base from neutron.tests.functional.resources.ovsdb import events from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants as n_consts from neutron_lib.plugins import directory from ovsdbapp.backend.ovs_idl import idlutils class TestRouter(base.TestOVNFunctionalBase): def setUp(self): super(TestRouter, self).setUp() self.chassis1 = self.add_fake_chassis( 'ovs-host1', physical_nets=['physnet1', 'physnet3']) self.chassis2 = self.add_fake_chassis( 'ovs-host2', physical_nets=['physnet2', 'physnet3']) self.cr_lrp_pb_event = events.WaitForCrLrpPortBindingEvent() self.sb_api.idl.notify_handler.watch_event(self.cr_lrp_pb_event) def _create_router(self, name, gw_info=None): router = {'router': {'name': name, 'admin_state_up': True, 'tenant_id': self._tenant_id}} if gw_info: router['router']['external_gateway_info'] = gw_info return self.l3_plugin.create_router(self.context, router) def _create_ext_network(self, name, net_type, physnet, seg, gateway, cidr): arg_list = (pnet.NETWORK_TYPE, external_net.EXTERNAL,) net_arg = {pnet.NETWORK_TYPE: net_type, external_net.EXTERNAL: True} if seg: arg_list = arg_list + (pnet.SEGMENTATION_ID,) net_arg[pnet.SEGMENTATION_ID] = seg if physnet: arg_list = arg_list + (pnet.PHYSICAL_NETWORK,) net_arg[pnet.PHYSICAL_NETWORK] = physnet network = self._make_network(self.fmt, name, True, arg_list=arg_list, **net_arg) if cidr: self._make_subnet(self.fmt, network, gateway, cidr, ip_version=n_consts.IP_VERSION_4) return network def _set_redirect_chassis_to_invalid_chassis(self, ovn_client): with ovn_client._nb_idl.transaction(check_error=True) as txn: for lrp in self.nb_api.tables[ 'Logical_Router_Port'].rows.values(): txn.add(ovn_client._nb_idl.update_lrouter_port( lrp.name, gateway_chassis=[ovn_const.OVN_GATEWAY_INVALID_CHASSIS])) def test_gateway_chassis_on_router_gateway_port(self): ext2 = self._create_ext_network( 'ext2', 'flat', 'physnet3', None, "20.0.0.1", "20.0.0.0/24") gw_info = {'network_id': ext2['network']['id']} self._create_router('router1', gw_info=gw_info) expected = [row.name for row in self.sb_api.tables['Chassis'].rows.values()] for row in self.nb_api.tables[ 'Logical_Router_Port'].rows.values(): if self._l3_ha_supported(): chassis = [gwc.chassis_name for gwc in row.gateway_chassis] self.assertItemsEqual(expected, chassis) else: rc = row.options.get(ovn_const.OVN_GATEWAY_CHASSIS_KEY) self.assertIn(rc, expected) def _check_gateway_chassis_candidates(self, candidates): # In this test, fake_select() is called once from _create_router() # and later from schedule_unhosted_gateways() ovn_client = self.l3_plugin._ovn_client ext1 = self._create_ext_network( 'ext1', 'vlan', 'physnet1', 1, "10.0.0.1", "10.0.0.0/24") # mock select function and check if it is called with expected # candidates. def fake_select(*args, **kwargs): self.assertItemsEqual(candidates, kwargs['candidates']) # We are not interested in further processing, let us return # INVALID_CHASSIS to avoid erros return [ovn_const.OVN_GATEWAY_INVALID_CHASSIS] with mock.patch.object(ovn_client._ovn_scheduler, 'select', side_effect=fake_select) as client_select,\ mock.patch.object(self.l3_plugin.scheduler, 'select', side_effect=fake_select) as plugin_select: gw_info = {'network_id': ext1['network']['id']} self._create_router('router1', gw_info=gw_info) self.assertFalse(plugin_select.called) self.assertTrue(client_select.called) client_select.reset_mock() plugin_select.reset_mock() # set redirect-chassis to neutron-ovn-invalid-chassis, so # that schedule_unhosted_gateways will try to schedule it self._set_redirect_chassis_to_invalid_chassis(ovn_client) self.l3_plugin.schedule_unhosted_gateways() self.assertFalse(client_select.called) self.assertTrue(plugin_select.called) def test_gateway_chassis_with_cms_and_bridge_mappings(self): # Both chassis1 and chassis3 are having proper bridge mappings, # but only chassis3 is having enable-chassis-as-gw. # Test if chassis3 is selected as candidate or not. self.chassis3 = self.add_fake_chassis( 'ovs-host3', physical_nets=['physnet1'], external_ids={'ovn-cms-options': 'enable-chassis-as-gw'}) self._check_gateway_chassis_candidates([self.chassis3]) def test_gateway_chassis_with_cms_and_no_bridge_mappings(self): # chassis1 is having proper bridge mappings. # chassis3 is having enable-chassis-as-gw, but no bridge mappings. self.chassis3 = self.add_fake_chassis( 'ovs-host3', external_ids={'ovn-cms-options': 'enable-chassis-as-gw'}) ovn_client = self.l3_plugin._ovn_client ext1 = self._create_ext_network( 'ext1', 'vlan', 'physnet1', 1, "10.0.0.1", "10.0.0.0/24") # As we have 'gateways' in the system, but without required # chassis we should not schedule gw in that case at all. self._set_redirect_chassis_to_invalid_chassis(ovn_client) with mock.patch.object(ovn_client._ovn_scheduler, 'select', return_value=[self.chassis1]), \ mock.patch.object(self.l3_plugin.scheduler, 'select', side_effect=[self.chassis1]): gw_info = {'network_id': ext1['network']['id']} self._create_router('router1', gw_info=gw_info) with mock.patch.object( ovn_client._nb_idl, 'update_lrouter_port') as ulrp: self.l3_plugin.schedule_unhosted_gateways() # Make sure that we don't schedule on chassis3 # and do not updated the lrp port. ulrp.assert_not_called() def test_gateway_chassis_with_bridge_mappings_and_no_cms(self): # chassis1 is configured with proper bridge mappings, # but none of the chassis having enable-chassis-as-gw. # Test if chassis1 is selected as candidate or not. self._check_gateway_chassis_candidates([self.chassis1]) def _l3_ha_supported(self): # If the Gateway_Chassis table exists in SB database, then it # means that L3 HA is supported. return self.nb_api.tables.get('Gateway_Chassis') def test_gateway_chassis_least_loaded_scheduler(self): # This test will create 4 routers each with its own gateway. # Using the least loaded policy for scheduling gateway ports, we # expect that they are equally distributed across the two available # chassis. ovn_client = self.l3_plugin._ovn_client ovn_client._ovn_scheduler = l3_sched.OVNGatewayLeastLoadedScheduler() ext1 = self._create_ext_network( 'ext1', 'flat', 'physnet3', None, "20.0.0.1", "20.0.0.0/24") gw_info = {'network_id': ext1['network']['id']} # Create 4 routers with a gateway. Since we're using physnet3, the # chassis candidates will be chassis1 and chassis2. for i in range(1, 5): self._create_router('router%d' % i, gw_info=gw_info) # At this point we expect two gateways to be present in chassis1 # and two in chassis2. If schema supports L3 HA, we expect each # chassis to host 2 priority 2 gateways and 2 priority 1 ones. if self._l3_ha_supported(): # Each chassis contains a dict of (priority, # of ports hosted). # {1: 2, 2: 2} means that this chassis hosts 2 ports of prio 1 # and two ports of prio 2. expected = {self.chassis1: {1: 2, 2: 2}, self.chassis2: {1: 2, 2: 2}} else: # For non L3 HA, each chassis should contain two gateway ports. expected = {self.chassis1: 2, self.chassis2: 2} sched_info = {} for row in self.nb_api.tables[ 'Logical_Router_Port'].rows.values(): if self._l3_ha_supported(): for gwc in row.gateway_chassis: chassis = sched_info.setdefault(gwc.chassis_name, {}) chassis[gwc.priority] = chassis.get(gwc.priority, 0) + 1 else: rc = row.options.get(ovn_const.OVN_GATEWAY_CHASSIS_KEY) sched_info[rc] = sched_info.get(rc, 0) + 1 self.assertEqual(expected, sched_info) def _get_gw_port(self, router_id): router = self.l3_plugin._get_router(self.context, router_id) gw_port_id = router.get('gw_port_id', '') for row in self.nb_api.tables['Logical_Router_Port'].rows.values(): if row.name == 'lrp-%s' % gw_port_id: return row def test_gateway_chassis_with_subnet_changes(self): """Launchpad bug #1843485: logical router port is getting lost Test cases when subnets are added to an external network after router has been configured to use that network via "set --external-gateway" """ ovn_client = self.l3_plugin._ovn_client with mock.patch.object( ovn_client._ovn_scheduler, 'select', return_value=[ovn_const.OVN_GATEWAY_INVALID_CHASSIS]) as \ client_select: router1 = self._create_router('router1', gw_info=None) router_id = router1['id'] self.assertIsNone(self._get_gw_port(router_id), "router logical port unexpected before ext net") # Create external network with no subnets and assign it to router ext1 = self._create_ext_network( 'ext1', 'flat', 'physnet3', None, gateway=None, cidr=None) net_id = ext1['network']['id'] gw_info = {'network_id': ext1['network']['id']} self.l3_plugin.update_router( self.context, router_id, {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self.assertIsNotNone(self._get_gw_port(router_id), "router logical port must exist after gw add") # Add subnets to external network. This should percolate # into l3_plugin.update_router() kwargs = {'ip_version': n_consts.IP_VERSION_4, 'gateway_ip': '10.0.0.1', 'cidr': '10.0.0.0/24'} subnet4_res = self._create_subnet( self.fmt, net_id, **kwargs) subnet4 = self.deserialize(self.fmt, subnet4_res).get('subnet') self.assertIsNotNone(self._get_gw_port(router_id), "router logical port must exist after v4 add") kwargs = {'ip_version': n_consts.IP_VERSION_6, 'gateway_ip': 'fe81::1', 'cidr': 'fe81::/64', 'ipv6_ra_mode': n_consts.IPV6_SLAAC, 'ipv6_address_mode': n_consts.IPV6_SLAAC} subnet6_res = self._create_subnet( self.fmt, net_id, **kwargs) subnet6 = self.deserialize(self.fmt, subnet6_res).get('subnet') self.assertIsNotNone(self._get_gw_port(router_id), "router logical port must exist after v6 add") self.assertGreaterEqual(client_select.call_count, 3) # Verify that ports have had the subnets created kwargs = {'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW} ports_res = self._list_ports(self.fmt, net_id=net_id, **kwargs) ports = self.deserialize(self.fmt, ports_res).get('ports') subnet4_ip = None subnet6_ip = None for port in ports: for fixed_ip in port.get('fixed_ips', []): if fixed_ip.get('subnet_id') == subnet4['id']: subnet4_ip = fixed_ip.get('ip_address') if fixed_ip.get('subnet_id') == subnet6['id']: subnet6_ip = fixed_ip.get('ip_address') self.assertIsNotNone(subnet4_ip) self.assertIsNotNone(subnet6_ip) # Verify that logical router port is properly configured gw_port = self._get_gw_port(router_id) self.assertIsNotNone(gw_port) expected_networks = ['%s/24' % subnet4_ip, '%s/64' % subnet6_ip] self.assertItemsEqual( expected_networks, gw_port.networks, 'networks in ovn port must match fixed_ips in neutron') def test_logical_router_port_creation(self): """Launchpad bug #1844652: Verify creation and removal of lrp This test verifies that logical router port is created and removed based on attaching and detaching the external network to a router. """ router = self._create_router('router1', gw_info=None) router_id = router['id'] self.assertIsNone(self._get_gw_port(router_id), "router logical port unexpected before ext net") # Create external network and assign it to router ext1 = self._create_ext_network( 'ext1', 'flat', 'physnet3', None, gateway=None, cidr=None) gw_info = {'network_id': ext1['network']['id']} self.l3_plugin.update_router( self.context, router_id, {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self.assertIsNotNone(self._get_gw_port(router_id), "router logical port missing after ext net add") # Un-assign external network from router self.l3_plugin.update_router( self.context, router_id, {'router': {l3_apidef.EXTERNAL_GW_INFO: None}}) self.assertIsNone(self._get_gw_port(router_id), "router logical port exists after ext net removal") def test_gateway_chassis_with_bridge_mappings(self): """Check selected ovn chassis based on external network This test sets different gateway values to ensure that the proper chassis are candidates, based on the physical network mappings. """ ovn_client = self.l3_plugin._ovn_client # Create external networks with vlan, flat and geneve network types ext1 = self._create_ext_network( 'ext1', 'vlan', 'physnet1', 1, "10.0.0.1", "10.0.0.0/24") ext2 = self._create_ext_network( 'ext2', 'flat', 'physnet3', None, "20.0.0.1", "20.0.0.0/24") ext3 = self._create_ext_network( 'ext3', 'geneve', None, 10, "30.0.0.1", "30.0.0.0/24") # mock select function and check if it is called with expected # candidates. self.candidates = [] def fake_select(*args, **kwargs): self.assertItemsEqual(self.candidates, kwargs['candidates']) # We are not interested in further processing, let us return # INVALID_CHASSIS to avoid erros return [ovn_const.OVN_GATEWAY_INVALID_CHASSIS] with mock.patch.object(ovn_client._ovn_scheduler, 'select', side_effect=fake_select) as client_select,\ mock.patch.object(self.l3_plugin.scheduler, 'select', side_effect=fake_select) as plugin_select: self.candidates = [self.chassis1] gw_info = {'network_id': ext1['network']['id']} router1 = self._create_router('router1', gw_info=gw_info) # set redirect-chassis to neutron-ovn-invalid-chassis, so # that schedule_unhosted_gateways will try to schedule it self._set_redirect_chassis_to_invalid_chassis(ovn_client) self.l3_plugin.schedule_unhosted_gateways() self.candidates = [self.chassis1, self.chassis2] gw_info = {'network_id': ext2['network']['id']} self.l3_plugin.update_router( self.context, router1['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self._set_redirect_chassis_to_invalid_chassis(ovn_client) self.l3_plugin.schedule_unhosted_gateways() self.candidates = [] gw_info = {'network_id': ext3['network']['id']} self.l3_plugin.update_router( self.context, router1['id'], {'router': {l3_apidef.EXTERNAL_GW_INFO: gw_info}}) self._set_redirect_chassis_to_invalid_chassis(ovn_client) self.l3_plugin.schedule_unhosted_gateways() # We can't test call_count for these mocks, as we have disabled # maintenance_worker which will trigger chassis events # and eventually calling schedule_unhosted_gateways. # However, we know for sure that these mocks must have been # called at least 3 times because that is the number of times # this test invokes them: 1x create_router + 2x update_router # for client_select mock; and 3x schedule_unhosted_gateways for # plugin_select mock. self.assertGreaterEqual(client_select.call_count, 3) self.assertGreaterEqual(plugin_select.call_count, 3) def test_router_gateway_port_binding_host_id(self): # Test setting chassis on chassisredirect port in Port_Binding table, # will update host_id of corresponding router gateway port # with this chassis. chassis = idlutils.row_by_value(self.sb_api.idl, 'Chassis', 'name', self.chassis1) host_id = chassis.hostname ext = self._create_ext_network( 'ext1', 'vlan', 'physnet1', 1, "10.0.0.1", "10.0.0.0/24") gw_info = {'network_id': ext['network']['id']} router = self._create_router('router1', gw_info=gw_info) core_plugin = directory.get_plugin() gw_port_id = router.get('gw_port_id') # Set chassis on chassisredirect port in Port_Binding table logical_port = 'cr-lrp-%s' % gw_port_id self.assertTrue(self.cr_lrp_pb_event.wait(logical_port)) self.sb_api.lsp_bind(logical_port, self.chassis1, may_exist=True).execute(check_error=True) def check_port_binding_host_id(port_id): port = core_plugin.get_ports( self.context, filters={'id': [port_id]})[0] return port[portbindings.HOST_ID] == host_id # Test if router gateway port updated with this chassis n_utils.wait_until_true(lambda: check_port_binding_host_id( gw_port_id)) def _validate_router_ipv6_ra_configs(self, lrp_name, expected_ra_confs): lrp = idlutils.row_by_value(self.nb_api.idl, 'Logical_Router_Port', 'name', lrp_name) self.assertEqual(expected_ra_confs, lrp.ipv6_ra_configs) def _test_router_port_ipv6_ra_configs_helper( self, cidr='aef0::/64', ip_version=6, address_mode=n_consts.IPV6_SLAAC,): router1 = self._create_router('router1') n1 = self._make_network(self.fmt, 'n1', True) if ip_version == 6: kwargs = {'ip_version': 6, 'cidr': 'aef0::/64', 'ipv6_address_mode': address_mode, 'ipv6_ra_mode': address_mode} else: kwargs = {'ip_version': 4, 'cidr': '10.0.0.0/24'} res = self._create_subnet(self.fmt, n1['network']['id'], **kwargs) n1_s1 = self.deserialize(self.fmt, res) n1_s1_id = n1_s1['subnet']['id'] router_iface_info = self.l3_plugin.add_router_interface( self.context, router1['id'], {'subnet_id': n1_s1_id}) lrp_name = ovn_utils.ovn_lrouter_port_name( router_iface_info['port_id']) if ip_version == 6: expected_ra_configs = { 'address_mode': ovn_utils.get_ovn_ipv6_address_mode( address_mode), 'send_periodic': 'true', 'mtu': '1450'} else: expected_ra_configs = {} self._validate_router_ipv6_ra_configs(lrp_name, expected_ra_configs) def test_router_port_ipv6_ra_configs_addr_mode_slaac(self): self._test_router_port_ipv6_ra_configs_helper() def test_router_port_ipv6_ra_configs_addr_mode_dhcpv6_stateful(self): self._test_router_port_ipv6_ra_configs_helper( address_mode=n_consts.DHCPV6_STATEFUL) def test_router_port_ipv6_ra_configs_addr_mode_dhcpv6_stateless(self): self._test_router_port_ipv6_ra_configs_helper( address_mode=n_consts.DHCPV6_STATELESS) def test_router_port_ipv6_ra_configs_ipv4(self): self._test_router_port_ipv6_ra_configs_helper( ip_version=4) def test_gateway_chassis_rebalance(self): def _get_result_dict(): sched_info = {} for row in self.nb_api.tables[ 'Logical_Router_Port'].rows.values(): for gwc in row.gateway_chassis: chassis = sched_info.setdefault(gwc.chassis_name, {}) chassis[gwc.priority] = chassis.get(gwc.priority, 0) + 1 return sched_info if not self._l3_ha_supported(): self.skipTest('L3 HA not supported') ovn_client = self.l3_plugin._ovn_client chassis4 = self.add_fake_chassis( 'ovs-host4', physical_nets=['physnet4'], external_ids={ 'ovn-cms-options': 'enable-chassis-as-gw'}) ovn_client._ovn_scheduler = l3_sched.OVNGatewayLeastLoadedScheduler() ext1 = self._create_ext_network( 'ext1', 'flat', 'physnet4', None, "30.0.0.1", "30.0.0.0/24") gw_info = {'network_id': ext1['network']['id']} # Create 20 routers with a gateway. Since we're using physnet4, the # chassis candidates will be chassis4 initially. for i in range(20): router = self._create_router('router%d' % i, gw_info=gw_info) gw_port_id = router.get('gw_port_id') logical_port = 'cr-lrp-%s' % gw_port_id self.assertTrue(self.cr_lrp_pb_event.wait(logical_port)) self.sb_api.lsp_bind(logical_port, chassis4, may_exist=True).execute(check_error=True) self.l3_plugin.schedule_unhosted_gateways() expected = {chassis4: {1: 20}} self.assertEqual(expected, _get_result_dict()) # Add another chassis as a gateway chassis chassis5 = self.add_fake_chassis( 'ovs-host5', physical_nets=['physnet4'], external_ids={ 'ovn-cms-options': 'enable-chassis-as-gw'}) # Add a node as compute node. Compute node wont be # used to schedule the router gateway ports therefore # priority values wont be changed. Therefore chassis4 would # still have priority 2 self.add_fake_chassis('ovs-host6', physical_nets=['physnet4']) # Chassis4 should have all ports at Priority 2 self.l3_plugin.schedule_unhosted_gateways() self.assertEqual({2: 20}, _get_result_dict()[chassis4]) # Chassis5 should have all ports at Priority 1 self.assertEqual({1: 20}, _get_result_dict()[chassis5]) # delete chassis that hosts all the gateways self.del_fake_chassis(chassis4) self.l3_plugin.schedule_unhosted_gateways() # As Chassis4 has been removed so all gateways that were # hosted there are now masters on chassis5 and have # priority 1. self.assertEqual({1: 20}, _get_result_dict()[chassis5]) def test_gateway_chassis_rebalance_max_chassis(self): chassis_list = [] # spawn 6 chassis and check if port has MAX_CHASSIS candidates. for i in range(0, ovn_const.MAX_GW_CHASSIS + 1): chassis_list.append( self.add_fake_chassis( 'ovs-host%s' % i, physical_nets=['physnet1'], external_ids={ 'ovn-cms-options': 'enable-chassis-as-gw'})) ext1 = self._create_ext_network( 'ext1', 'vlan', 'physnet1', 1, "10.0.0.1", "10.0.0.0/24") gw_info = {'network_id': ext1['network']['id']} router = self._create_router('router', gw_info=gw_info) gw_port_id = router.get('gw_port_id') logical_port = 'cr-lrp-%s' % gw_port_id self.assertTrue(self.cr_lrp_pb_event.wait(logical_port)) self.sb_api.lsp_bind(logical_port, chassis_list[0], may_exist=True).execute(check_error=True) self.l3_plugin.schedule_unhosted_gateways() for row in self.nb_api.tables[ 'Logical_Router_Port'].rows.values(): self.assertEqual(ovn_const.MAX_GW_CHASSIS, len(row.gateway_chassis)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/portforwarding/0000755000175000017500000000000000000000000027033 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/portforwarding/__init__.py0000644000175000017500000000000000000000000031132 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/portforwarding/test_port_forwarding.py0000644000175000017500000005251100000000000033656 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import fip_pf_description as ext_apidef from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef from neutron_lib.callbacks import exceptions as c_exc from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import l3 as lib_l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_utils import uuidutils from neutron.services.portforwarding.common import exceptions as pf_exc from neutron.services.portforwarding import pf_plugin from neutron.tests.functional import base as functional_base from neutron.tests.unit.plugins.ml2 import base as ml2_test_base class PortForwardingTestCaseBase(ml2_test_base.ML2TestFramework, functional_base.BaseLoggingTestCase): def setUp(self): super(PortForwardingTestCaseBase, self).setUp() self.pf_plugin = pf_plugin.PortForwardingPlugin() directory.add_plugin(plugin_constants.PORTFORWARDING, self.pf_plugin) def _create_floatingip(self, network_id, port_id=None, fixed_ip_address=None): body = {"floating_network_id": network_id, "port_id": port_id, "fixed_ip_address": fixed_ip_address, "tenant_id": self._tenant_id, "project_id": self._tenant_id} return self.l3_plugin.create_floatingip( self.context, {"floatingip": body}) def _get_floatingip(self, floatingip_id): return self.l3_plugin.get_floatingip(self.context, floatingip_id) def _update_floatingip(self, fip_id, update_info): return self.l3_plugin.update_floatingip( self.context, fip_id, {"floatingip": update_info}) def _delete_floatingip(self, fip_id): return self.l3_plugin.delete_floatingip(self.context, fip_id) def _get_ports(self, filters): return self.core_plugin.get_ports(self.context, filters=filters) def _update_port(self, port_id, update_info): return self.core_plugin.update_port( self.context, port_id, {'port': update_info}) def _delete_port(self, port_id): return self.core_plugin.delete_port(self.context, port_id) def _add_router_interface(self, router_id, subnet_id): interface_info = {"subnet_id": subnet_id} self.l3_plugin.add_router_interface( self.context, router_id, interface_info=interface_info) def _remove_router_interface(self, router_id, subnet_id): interface_info = {"subnet_id": subnet_id} self.l3_plugin.remove_router_interface( self.context, router_id, interface_info=interface_info) def _set_router_gw(self, router_id, ext_net_id): body = { 'router': {'external_gateway_info': {'network_id': ext_net_id}}} self.l3_plugin.update_router(self.context, router_id, body) class PortForwardingTestCase(PortForwardingTestCaseBase): def setUp(self): super(PortForwardingTestCase, self).setUp() self._prepare_env() def _get_network_port_ips(self): net_ports = self._get_ports( filters={"network_id": [self.net['id']]}) net_port_ips = [ p['fixed_ips'][0]['ip_address'] for p in net_ports] return net_port_ips def _prepare_env(self): self.router = self._create_router(distributed=True) self.ext_net = self._create_network( self.fmt, 'ext-net', True, arg_list=("router:external",), **{"router:external": True}).json['network'] self.ext_subnet = self._create_subnet( self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet'] self.net = self._create_network(self.fmt, 'private', True).json[ 'network'] self.subnet = self._create_subnet( self.fmt, self.net['id'], '10.0.0.0/24', enable_dhcp=False).json['subnet'] self._set_router_gw(self.router['id'], self.ext_net['id']) self._add_router_interface(self.router['id'], self.subnet['id']) self.fip = self._create_floatingip(self.ext_net['id']) self.port_ip = self._find_ip_address( self.subnet, exclude=self._get_network_port_ips(), is_random=True) self.port = self._create_port( self.fmt, self.net['id'], fixed_ips=[{'subnet_id': self.subnet['id'], 'ip_address': self.port_ip}]).json['port'] self.port_forwarding = { apidef.RESOURCE_NAME: {apidef.EXTERNAL_PORT: 2225, apidef.INTERNAL_PORT: 25, apidef.INTERNAL_PORT_ID: self.port['id'], apidef.PROTOCOL: "tcp", ext_apidef.DESCRIPTION_FIELD: 'Some description', apidef.INTERNAL_IP_ADDRESS: self.port['fixed_ips'][0]['ip_address']}} def test_create_floatingip_port_forwarding_and_remove_subnets(self): subnet_2 = self._create_subnet(self.fmt, self.net['id'], '10.0.2.0/24').json['subnet'] self._add_router_interface(self.router['id'], subnet_2['id']) subnet_3 = self._create_subnet(self.fmt, self.net['id'], '10.0.3.0/24').json['subnet'] self._add_router_interface(self.router['id'], subnet_3['id']) res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) expect = { "external_port": 2225, "internal_port": 25, "internal_port_id": self.port['id'], "protocol": "tcp", "internal_ip_address": self.port['fixed_ips'][0]['ip_address'], 'id': mock.ANY, 'router_id': self.router['id'], 'floating_ip_address': self.fip['floating_ip_address'], 'description': 'Some description', 'revision_number': 0, 'created_at': mock.ANY, 'floatingip_id': self.fip['id']} self.assertEqual(expect, res) self.assertRaises(lib_l3_exc.RouterInterfaceInUseByFloatingIP, self._remove_router_interface, self.router['id'], self.subnet['id']) self._remove_router_interface(self.router['id'], subnet_2['id']) self._remove_router_interface(self.router['id'], subnet_3['id']) def test_create_floatingip_port_forwarding_external_port_0(self): self.port_forwarding[apidef.RESOURCE_NAME][apidef.EXTERNAL_PORT] = 0 self.assertRaises(ValueError, self.pf_plugin.create_floatingip_port_forwarding, self.context, self.fip['id'], self.port_forwarding) def test_create_floatingip_port_forwarding_internal_port_0(self): self.port_forwarding[apidef.RESOURCE_NAME][apidef.INTERNAL_PORT] = 0 self.assertRaises(ValueError, self.pf_plugin.create_floatingip_port_forwarding, self.context, self.fip['id'], self.port_forwarding) def test_negative_create_floatingip_port_forwarding(self): self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) # This will be fail with the same params self.assertRaises(lib_exc.BadRequest, self.pf_plugin.create_floatingip_port_forwarding, self.context, self.fip['id'], self.port_forwarding) def test_create_port_forwarding_port_in_used_by_fip(self): normal_fip = self._create_floatingip(self.ext_net['id']) self._update_floatingip(normal_fip['id'], {'port_id': self.port['id']}) self.assertRaises( pf_exc.PortHasBindingFloatingIP, self.pf_plugin.create_floatingip_port_forwarding, self.context, self.fip['id'], self.port_forwarding) def test_update_port_forwarding_port_in_used_by_fip(self): normal_fip = self._create_floatingip(self.ext_net['id']) normal_port = self._create_port( self.fmt, self.net['id']).json['port'] self._update_floatingip( normal_fip['id'], {'port_id': normal_port['id']}) res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) expect = { "external_port": 2225, "internal_port": 25, "internal_port_id": self.port['id'], "protocol": "tcp", "internal_ip_address": self.port['fixed_ips'][0]['ip_address'], 'id': mock.ANY, 'router_id': self.router['id'], 'floating_ip_address': self.fip['floating_ip_address'], 'description': 'Some description', 'revision_number': 0, 'created_at': mock.ANY, 'floatingip_id': self.fip['id']} self.assertEqual(expect, res) # Directly update port forwarding to a port which already has # bound floating IP. self.port_forwarding[apidef.RESOURCE_NAME].update( {apidef.INTERNAL_PORT_ID: normal_port['id'], apidef.INTERNAL_IP_ADDRESS: normal_port['fixed_ips'][0]['ip_address']}) self.assertRaises( pf_exc.PortHasBindingFloatingIP, self.pf_plugin.update_floatingip_port_forwarding, self.context, res['id'], self.fip['id'], self.port_forwarding) def test_update_floatingip_port_forwarding(self): # create a test port forwarding res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) # update the socket port only update_body = { apidef.RESOURCE_NAME: { "external_port": 2226, "internal_port": 26, "protocol": "udp" } } update_res = self.pf_plugin.update_floatingip_port_forwarding( self.context, res['id'], self.fip['id'], update_body) expect = { "external_port": 2226, "internal_port": 26, "internal_port_id": self.port['id'], "protocol": "udp", "internal_ip_address": self.port['fixed_ips'][0]['ip_address'], 'id': res['id'], 'router_id': self.router['id'], 'floating_ip_address': self.fip['floating_ip_address'], 'description': 'Some description', 'revision_number': 0, 'created_at': mock.ANY, 'floatingip_id': self.fip['id']} self.assertEqual(expect, update_res) # update the neutron port and success new_port = self._create_port(self.fmt, self.net['id']).json['port'] update_body = { apidef.RESOURCE_NAME: { "external_port": 2227, "internal_port": 27, "protocol": "tcp", "internal_port_id": new_port['id'], "internal_ip_address": new_port['fixed_ips'][0]['ip_address'] } } update_res = self.pf_plugin.update_floatingip_port_forwarding( self.context, res['id'], self.fip['id'], update_body) expect = { "external_port": 2227, "internal_port": 27, "internal_port_id": new_port['id'], "protocol": "tcp", "internal_ip_address": new_port['fixed_ips'][0]['ip_address'], 'id': res['id'], 'router_id': self.router['id'], 'floating_ip_address': self.fip['floating_ip_address'], 'description': 'Some description', 'revision_number': 0, 'created_at': mock.ANY, 'floatingip_id': self.fip['id']} self.assertEqual(expect, update_res) def test_negative_update_floatingip_port_forwarding(self): # prepare a port forwarding res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) # prepare another port and make its gateway set on other router new_router = self._create_router() new_subnet = self._create_subnet(self.fmt, self.net['id'], '11.0.0.0/24').json['subnet'] self._set_router_gw(new_router['id'], self.ext_net['id']) self._add_router_interface(new_router['id'], new_subnet['id']) # create a port based on the new subnet new_port = self._create_port( self.fmt, self.net['id'], fixed_ips=[{'subnet_id': new_subnet['id']}]).json['port'] update_body = { apidef.RESOURCE_NAME: { "external_port": 2227, "internal_port": 27, "protocol": "tcp", "internal_port_id": new_port['id'], "internal_ip_address": new_port['fixed_ips'][0]['ip_address'] } } # This will be fail, as the new found router_id not match. self.assertRaises(lib_exc.BadRequest, self.pf_plugin.update_floatingip_port_forwarding, self.context, res['id'], self.fip['id'], update_body) # There is already a port forwarding. We create another port forwarding # with the new_port, and update the new one with the same params of the # existing one. new_port = self._create_port(self.fmt, self.net['id']).json['port'] self.port_forwarding[apidef.RESOURCE_NAME].update({ 'internal_port_id': new_port['id'], 'internal_ip_address': new_port['fixed_ips'][0]['ip_address'], 'external_port': self.port_forwarding[ apidef.RESOURCE_NAME]['external_port'] + 1 }) new_res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) self.port_forwarding[apidef.RESOURCE_NAME].update({ 'internal_port_id': self.port['id'], 'internal_ip_address': self.port['fixed_ips'][0]['ip_address'], 'external_port': self.port_forwarding[ apidef.RESOURCE_NAME]['external_port'] - 1 }) # This will be fail, as the duplicate record. self.assertRaises(lib_exc.BadRequest, self.pf_plugin.update_floatingip_port_forwarding, self.context, new_res['id'], self.fip['id'], update_body) def test_delete_floatingip_port_forwarding(self): # create two port forwardings for a floatingip pf_1 = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) new_port = self._create_port(self.fmt, self.net['id']).json['port'] self.port_forwarding[apidef.RESOURCE_NAME].update({ 'external_port': 2226, 'internal_port_id': new_port['id'], 'internal_ip_address': new_port['fixed_ips'][0]['ip_address'] }) pf_2 = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) floatingip = self._get_floatingip(self.fip['id']) self.assertEqual(self.router['id'], floatingip['router_id']) # delete pf_1, check the router_id of floatingip is not change. self.pf_plugin.delete_floatingip_port_forwarding( self.context, pf_1['id'], self.fip['id']) exist_pfs = self.pf_plugin.get_floatingip_port_forwardings( self.context, floatingip_id=self.fip['id']) self.assertEqual(1, len(exist_pfs)) self.assertEqual(pf_2['id'], exist_pfs[0]['id']) # delete pf_2, it's the last port forwarding of floatingip. self.pf_plugin.delete_floatingip_port_forwarding( self.context, pf_2['id'], self.fip['id']) exist_pfs = self.pf_plugin.get_floatingip_port_forwardings( self.context, floatingip_id=self.fip['id']) self.assertEqual(0, len(exist_pfs)) floatingip = self._get_floatingip(self.fip['id']) self.assertIsNone(floatingip['router_id']) def test_negative_delete_floatingip_port_forwarding(self): # prepare a good port forwarding res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) # pass non-existing port forwarding id self.assertRaises(pf_exc.PortForwardingNotFound, self.pf_plugin.delete_floatingip_port_forwarding, self.context, uuidutils.generate_uuid(), self.fip['id']) # pass existing port forwarding but non-existing floatingip_id self.assertRaises(pf_exc.PortForwardingNotFound, self.pf_plugin.delete_floatingip_port_forwarding, self.context, res['id'], uuidutils.generate_uuid()) def test_concurrent_create_port_forwarding_delete_fip(self): func1 = self.pf_plugin.create_floatingip_port_forwarding func2 = self._delete_floatingip funcs = [func1, func2] args_list = [(self.context, self.fip['id'], self.port_forwarding), (self.fip['id'],)] self.assertRaises(c_exc.CallbackFailure, self._simulate_concurrent_requests_process_and_raise, funcs, args_list) port_forwardings = self.pf_plugin.get_floatingip_port_forwardings( self.context, floatingip_id=self.fip['id'], fields=['id']) self.pf_plugin.delete_floatingip_port_forwarding( self.context, port_forwardings[0][apidef.ID], floatingip_id=self.fip['id']) funcs.reverse() args_list.reverse() self.assertRaises(lib_l3_exc.FloatingIPNotFound, self._simulate_concurrent_requests_process_and_raise, funcs, args_list) def test_concurrent_create_port_forwarding_update_fip(self): newport = self._create_port(self.fmt, self.net['id']).json['port'] func1 = self.pf_plugin.create_floatingip_port_forwarding func2 = self._update_floatingip funcs = [func1, func2] args_list = [(self.context, self.fip['id'], self.port_forwarding), (self.fip['id'], {'port_id': newport['id']})] self.assertRaises(c_exc.CallbackFailure, self._simulate_concurrent_requests_process_and_raise, funcs, args_list) funcs.reverse() args_list.reverse() self.assertRaises(c_exc.CallbackFailure, self._simulate_concurrent_requests_process_and_raise, funcs, args_list) def test_concurrent_create_port_forwarding_update_port(self): new_ip = self._find_ip_address( self.subnet, exclude=self._get_network_port_ips(), is_random=True) funcs = [self.pf_plugin.create_floatingip_port_forwarding, self._update_port] args_list = [(self.context, self.fip['id'], self.port_forwarding), (self.port['id'], { 'fixed_ips': [{'subnet_id': self.subnet['id'], 'ip_address': new_ip}]})] self._simulate_concurrent_requests_process_and_raise(funcs, args_list) self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings( self.context, floatingip_id=self.fip['id'])) def test_concurrent_create_port_forwarding_delete_port(self): funcs = [self.pf_plugin.create_floatingip_port_forwarding, self._delete_port] args_list = [(self.context, self.fip['id'], self.port_forwarding), (self.port['id'],)] self._simulate_concurrent_requests_process_and_raise(funcs, args_list) self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings( self.context, floatingip_id=self.fip['id'])) def test_create_floatingip_port_forwarding_port_in_use(self): res = self.pf_plugin.create_floatingip_port_forwarding( self.context, self.fip['id'], self.port_forwarding) expected = { "external_port": 2225, "internal_port": 25, "internal_port_id": self.port['id'], "protocol": "tcp", "internal_ip_address": self.port['fixed_ips'][0]['ip_address'], 'id': mock.ANY, 'router_id': self.router['id'], 'floating_ip_address': self.fip['floating_ip_address'], 'description': 'Some description', 'revision_number': 0, 'created_at': mock.ANY, 'floatingip_id': self.fip['id']} self.assertEqual(expected, res) fip_2 = self._create_floatingip(self.ext_net['id']) self.assertRaises( pf_exc.PortHasPortForwarding, self._update_floatingip, fip_2['id'], {'port_id': self.port['id']}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/0000755000175000017500000000000000000000000025127 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/__init__.py0000644000175000017500000000000000000000000027226 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/0000755000175000017500000000000000000000000026605 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/__init__.py0000644000175000017500000000000000000000000030704 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/0000755000175000017500000000000000000000000031156 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000033255 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/0000755000175000017500000000000000000000000032254 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/__init__0000644000175000017500000000000000000000000033724 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_ovsdb_handler.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_ovs0000644000175000017500000002117400000000000034052 0ustar00coreycorey00000000000000# Copyright (c) 2016 SUSE Linux Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_consts from neutron_lib.utils import helpers from neutron_lib.utils import net from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.common import utils as common_utils from neutron.objects import trunk as trunk_obj from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.tests.functional.agent.l2 import base def generate_tap_device_name(): return n_consts.TAP_DEVICE_PREFIX + helpers.get_random_string( n_consts.DEVICE_NAME_MAX_LEN - len(n_consts.TAP_DEVICE_PREFIX)) class OVSDBHandlerTestCase(base.OVSAgentTestFramework): """Test functionality of OVSDBHandler. This suite aims for interaction between events coming from OVSDB monitor, agent and wiring ports via trunk bridge to integration bridge. """ def setUp(self): """Prepare resources. Set up trunk_dict representing incoming data from Neutron-server when fetching for trunk details. Another resource trunk_br represents the trunk bridge which its creation is simulated when creating a port in l2 agent framework. """ super(OVSDBHandlerTestCase, self).setUp() trunk_id = uuidutils.generate_uuid() self.trunk_dict = { 'id': trunk_id, 'mac_address': net.get_random_mac('fa:16:3e:00:00:00'.split(':')), 'sub_ports': []} self.trunk_port_name = generate_tap_device_name() self.trunk_br = trunk_manager.TrunkBridge(trunk_id) self.ovsdb_handler = self._prepare_mocked_ovsdb_handler() def _prepare_mocked_ovsdb_handler(self): handler = ovsdb_handler.OVSDBHandler( trunk_manager.TrunkManager(ovs_lib.OVSBridge(self.br_int))) mock.patch.object(handler, 'trunk_rpc').start() handler.trunk_rpc.get_trunk_details.side_effect = ( self._mock_get_trunk_details) handler.trunk_rpc.update_subport_bindings.side_effect = ( self._mock_update_subport_binding) return handler def _mock_get_trunk_details(self, context, parent_port_id): if parent_port_id == self.trunk_dict['port_id']: return trunk_obj.Trunk(**self.trunk_dict) def _mock_update_subport_binding(self, context, subports): return {self.trunk_dict['id']: [ {'id': subport['port_id'], 'mac_address': subport['mac_address']} for subport in subports] } def _plug_ports(self, network, ports, agent, bridge=None, namespace=None): # creates only the trunk, the sub_port will be plugged by the # trunk manager if not self.trunk_br.exists(): self.trunk_br.create() self.addCleanup(self.trunk_br.destroy) self.driver.plug( network['id'], self.trunk_dict['port_id'], self.trunk_port_name, self.trunk_dict['mac_address'], self.trunk_br.br_name) def _mock_get_events(self, agent, polling_manager, ports): get_events = polling_manager.get_events p_ids = [p['id'] for p in ports] def filter_events(): events = get_events() filtered_events = { 'added': [], 'removed': [], 'modified': [] } for event_type in filtered_events: for dev in events[event_type]: iface_id = agent.int_br.portid_from_external_ids( dev.get('external_ids', [])) is_for_this_test = ( iface_id in p_ids or iface_id == self.trunk_dict['port_id'] or dev['name'] == self.trunk_br.br_name) if is_for_this_test: # if the event is not about a port that was created by # this test, we filter the event out. Since these tests # are not run in isolation processing all the events # might make some test fail ( e.g. the agent might keep # resycing because it keeps finding not ready ports # that are created by other tests) filtered_events[event_type].append(dev) return filtered_events mock.patch.object(polling_manager, 'get_events', side_effect=filter_events).start() def _fill_trunk_dict(self, num=3): ports = self.create_test_ports(amount=num) self.trunk_dict['port_id'] = ports[0]['id'] self.trunk_dict['sub_ports'] = [trunk_obj.SubPort( id=uuidutils.generate_uuid(), port_id=ports[i]['id'], mac_address=ports[i]['mac_address'], segmentation_id=i, trunk_id=self.trunk_dict['id']) for i in range(1, num)] return ports def _test_trunk_creation_helper(self, ports): self.setup_agent_and_ports(port_dicts=ports) self.wait_until_ports_state(self.ports, up=True) self.trunk_br.delete_port(self.trunk_port_name) self.wait_until_ports_state(self.ports, up=False) common_utils.wait_until_true(lambda: not self.trunk_br.bridge_exists(self.trunk_br.br_name)) def test_trunk_creation_with_subports(self): ports = self._fill_trunk_dict() self._test_trunk_creation_helper(ports[:1]) def test_trunk_creation_with_no_subports(self): ports = self.create_test_ports(amount=1) self.trunk_dict['port_id'] = ports[0]['id'] self._test_trunk_creation_helper(ports) def test_resync(self): ports = self._fill_trunk_dict() self.setup_agent_and_ports(port_dicts=ports) self.wait_until_ports_state(self.ports, up=True) self.agent.fullsync = True self.wait_until_ports_state(self.ports, up=True) def test_restart_subport_events(self): ports = self._fill_trunk_dict() self.setup_agent_and_ports(port_dicts=ports) self.wait_until_ports_state(self.ports, up=True) # restart and simulate a subport delete deleted_port = self.ports[2] deleted_sp = trunk_manager.SubPort( self.trunk_dict['id'], deleted_port['id']) self.stop_agent(self.agent, self.agent_thread) self.polling_manager.stop() self.trunk_dict['sub_ports'] = self.trunk_dict['sub_ports'][:1] self.setup_agent_and_ports(port_dicts=ports[:2]) # NOTE: the port_dicts passed in setup_agent_and_ports is stored in # self.ports so we are waiting here only for ports[:2] self.wait_until_ports_state(self.ports, up=True) common_utils.wait_until_true( lambda: (deleted_sp.patch_port_trunk_name not in self.trunk_br.get_port_name_list())) def test_cleanup_on_vm_delete(self): with mock.patch.object(self.ovsdb_handler, 'handle_trunk_remove'): br_int = ovs_lib.OVSBridge(self.br_int) ports = self._fill_trunk_dict() self.setup_agent_and_ports(port_dicts=ports[:1]) self.wait_until_ports_state(self.ports, up=True) self.trunk_br.delete_port(self.trunk_port_name) # We do not expect any instance port to show up on the trunk # bridge so we can set a much more aggressive timeout and # fail fast(er). self.ovsdb_handler.timeout = 1 self.ovsdb_handler.handle_trunk_add(self.trunk_br.br_name) # Check no resources are left behind. self.assertFalse(self.trunk_br.exists()) self.assertFalse(ovsdb_handler.bridge_has_service_port(br_int)) def test_do_not_delete_trunk_bridge_with_instance_ports(self): ports = self._fill_trunk_dict() self.setup_agent_and_ports(port_dicts=ports) self.wait_until_ports_state(self.ports, up=True) self.ovsdb_handler.handle_trunk_remove(self.trunk_br.br_name, ports.pop()) self.assertTrue(self.trunk_br.exists()) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_tru0000644000175000017500000002534400000000000034060 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.utils import net from oslo_log import log as logging from oslo_utils import uuidutils import testtools from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.services.trunk.drivers.openvswitch import utils from neutron.tests.common import conn_testers from neutron.tests.common import helpers from neutron.tests.common import net_helpers from neutron.tests.functional import base from neutron.tests.functional import constants as test_constants LOG = logging.getLogger(__name__) VLAN_RANGE = set(range(1, test_constants.VLAN_COUNT - 1)) class FakeOVSDBException(Exception): pass class TrunkParentPortTestCase(base.BaseSudoTestCase): def setUp(self): super(TrunkParentPortTestCase, self).setUp() trunk_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) self.trunk = trunk_manager.TrunkParentPort(trunk_id, port_id, port_mac) self.trunk.bridge = self.useFixture( net_helpers.OVSTrunkBridgeFixture( self.trunk.bridge.br_name)).bridge self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge def test_plug(self): self.trunk.plug(self.br_int) self.assertIn(self.trunk.patch_port_trunk_name, self.trunk.bridge.get_port_name_list()) self.assertIn(self.trunk.patch_port_int_name, self.br_int.get_port_name_list()) def test_plug_failure_doesnt_create_ports(self): with mock.patch.object( self.trunk.bridge.ovsdb, 'db_set', side_effect=FakeOVSDBException): with testtools.ExpectedException(FakeOVSDBException): self.trunk.plug(self.br_int) self.assertNotIn(self.trunk.patch_port_trunk_name, self.trunk.bridge.get_port_name_list()) self.assertNotIn(self.trunk.patch_port_int_name, self.br_int.get_port_name_list()) def test_unplug(self): self.trunk.plug(self.br_int) self.trunk.unplug(self.br_int) self.assertFalse( self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name)) self.assertNotIn(self.trunk.patch_port_int_name, self.br_int.get_port_name_list()) def test_unplug_failure_doesnt_delete_bridge(self): self.trunk.plug(self.br_int) with mock.patch.object( self.trunk.bridge.ovsdb, 'del_port', side_effect=FakeOVSDBException): with testtools.ExpectedException(FakeOVSDBException): self.trunk.unplug(self.br_int) self.assertTrue( self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name)) self.assertIn(self.trunk.patch_port_trunk_name, self.trunk.bridge.get_port_name_list()) self.assertIn(self.trunk.patch_port_int_name, self.br_int.get_port_name_list()) class SubPortTestCase(base.BaseSudoTestCase): def setUp(self): super(SubPortTestCase, self).setUp() trunk_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) trunk_bridge_name = utils.gen_trunk_br_name(trunk_id) trunk_bridge = self.useFixture( net_helpers.OVSTrunkBridgeFixture(trunk_bridge_name)).bridge segmentation_id = helpers.get_not_used_vlan( trunk_bridge, VLAN_RANGE) self.subport = trunk_manager.SubPort( trunk_id, port_id, port_mac, segmentation_id) self.subport.bridge = trunk_bridge self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge def test_plug(self): self.subport.plug(self.br_int) self.assertIn(self.subport.patch_port_trunk_name, self.subport.bridge.get_port_name_list()) self.assertIn(self.subport.patch_port_int_name, self.br_int.get_port_name_list()) self.assertEqual( self.subport.segmentation_id, self.subport.bridge.db_get_val( 'Port', self.subport.patch_port_trunk_name, 'tag')) def test_plug_failure_doesnt_create_ports(self): with mock.patch.object( self.subport.bridge.ovsdb, 'db_set', side_effect=FakeOVSDBException): with testtools.ExpectedException(FakeOVSDBException): self.subport.plug(self.br_int) self.assertNotIn(self.subport.patch_port_trunk_name, self.subport.bridge.get_port_name_list()) self.assertNotIn(self.subport.patch_port_int_name, self.br_int.get_port_name_list()) def test_unplug(self): self.subport.plug(self.br_int) self.subport.unplug(self.br_int) self.assertNotIn(self.subport.patch_port_trunk_name, self.subport.bridge.get_port_name_list()) self.assertNotIn(self.subport.patch_port_int_name, self.br_int.get_port_name_list()) def test_unplug_failure(self): self.subport.plug(self.br_int) with mock.patch.object( self.subport.bridge.ovsdb, 'del_port', side_effect=FakeOVSDBException): with testtools.ExpectedException(FakeOVSDBException): self.subport.unplug(self.br_int) self.assertIn(self.subport.patch_port_trunk_name, self.subport.bridge.get_port_name_list()) self.assertIn(self.subport.patch_port_int_name, self.br_int.get_port_name_list()) class TrunkManagerTestCase(base.BaseSudoTestCase): net1_cidr = '192.178.0.1/24' net2_cidr = '192.168.0.1/24' def setUp(self): super(TrunkManagerTestCase, self).setUp() trunk_id = uuidutils.generate_uuid() self.tester = self.useFixture( conn_testers.OVSTrunkConnectionTester( self.net1_cidr, utils.gen_trunk_br_name(trunk_id))) self.trunk_manager = trunk_manager.TrunkManager( self.tester.bridge) self.trunk = trunk_manager.TrunkParentPort( trunk_id, uuidutils.generate_uuid()) def test_connectivity(self): """Test connectivity with trunk and sub ports. In this test we create a vm that has a trunk on net1 and a vm peer on the same network. We check connectivity between the peer and the vm. We create a sub port on net2 and a peer, check connectivity again. """ vlan_net1 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE) vlan_net2 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE) trunk_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) sub_port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) sub_port_segmentation_id = helpers.get_not_used_vlan( self.tester.bridge, VLAN_RANGE) LOG.debug("Using %(n1)d vlan tag as local vlan ID for net1 and %(n2)d " "for local vlan ID for net2", { 'n1': vlan_net1, 'n2': vlan_net2}) self.tester.set_peer_tag(vlan_net1) self.trunk_manager.create_trunk(self.trunk.trunk_id, self.trunk.port_id, trunk_mac) # tag the patch port, this should be done by the ovs agent but we mock # it for this test conn_testers.OVSBaseConnectionTester.set_tag( self.trunk.patch_port_int_name, self.tester.bridge, vlan_net1) self.tester.wait_for_connection(self.tester.INGRESS) self.tester.wait_for_connection(self.tester.EGRESS) self.tester.add_vlan_interface_and_peer(sub_port_segmentation_id, self.net2_cidr) conn_testers.OVSBaseConnectionTester.set_tag( self.tester._peer2.port.name, self.tester.bridge, vlan_net2) sub_port = trunk_manager.SubPort(self.trunk.trunk_id, uuidutils.generate_uuid(), sub_port_mac, sub_port_segmentation_id) self.trunk_manager.add_sub_port(sub_port.trunk_id, sub_port.port_id, sub_port.port_mac, sub_port.segmentation_id) # tag the patch port, this should be done by the ovs agent but we mock # it for this test conn_testers.OVSBaseConnectionTester.set_tag( sub_port.patch_port_int_name, self.tester.bridge, vlan_net2) self.tester.wait_for_sub_port_connectivity(self.tester.INGRESS) self.tester.wait_for_sub_port_connectivity(self.tester.EGRESS) self.trunk_manager.remove_sub_port(sub_port.trunk_id, sub_port.port_id) self.tester.wait_for_sub_port_no_connectivity(self.tester.INGRESS) self.tester.wait_for_sub_port_no_connectivity(self.tester.EGRESS) self.trunk_manager.remove_trunk(self.trunk.trunk_id, self.trunk.port_id) self.tester.wait_for_no_connection(self.tester.INGRESS) class TrunkManagerDisposeTrunkTestCase(base.BaseSudoTestCase): def setUp(self): super(TrunkManagerDisposeTrunkTestCase, self).setUp() trunk_id = uuidutils.generate_uuid() self.trunk = trunk_manager.TrunkParentPort( trunk_id, uuidutils.generate_uuid()) self.trunk.bridge = self.useFixture( net_helpers.OVSTrunkBridgeFixture( self.trunk.bridge.br_name)).bridge self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.trunk_manager = trunk_manager.TrunkManager( self.br_int) def test_dispose_trunk(self): self.trunk.plug(self.br_int) self.trunk_manager.dispose_trunk(self.trunk.bridge) self.assertFalse( self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name)) self.assertNotIn(self.trunk.patch_port_int_name, self.br_int.get_port_name_list()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/ovn/0000755000175000017500000000000000000000000027407 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/ovn/__init__.py0000644000175000017500000000000000000000000031506 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py0000644000175000017500000001202700000000000033540 0ustar00coreycorey00000000000000# Copyright 2017 DT Dream Technology Co.,Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from neutron.services.trunk import plugin as trunk_plugin from neutron.tests.functional import base from neutron_lib import constants as n_consts from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import utils from neutron_lib.services.trunk import constants as trunk_consts from oslo_utils import uuidutils class TestOVNTrunkDriver(base.TestOVNFunctionalBase): def setUp(self): super(TestOVNTrunkDriver, self).setUp() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type( trunk_consts.SEGMENTATION_TYPE_VLAN, utils.is_valid_vlan_tag) @contextlib.contextmanager def trunk(self, sub_ports=None): sub_ports = sub_ports or [] with self.network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as parent_port: tenant_id = uuidutils.generate_uuid() trunk = {'trunk': { 'port_id': parent_port['port']['id'], 'tenant_id': tenant_id, 'project_id': tenant_id, 'admin_state_up': True, 'name': 'trunk', 'sub_ports': sub_ports}} trunk = self.trunk_plugin.create_trunk(self.context, trunk) yield trunk @contextlib.contextmanager def subport(self): with self.port() as port: sub_port = {'segmentation_type': 'vlan', 'segmentation_id': 1000, 'port_id': port['port']['id']} yield sub_port def _get_ovn_trunk_info(self): ovn_trunk_info = [] for row in self.nb_api.tables[ 'Logical_Switch_Port'].rows.values(): if row.parent_name and row.tag: ovn_trunk_info.append({'port_id': row.name, 'parent_port_id': row.parent_name, 'tag': row.tag}) return ovn_trunk_info def _verify_trunk_info(self, trunk, has_items): ovn_subports_info = self._get_ovn_trunk_info() neutron_subports_info = [] for subport in trunk.get('sub_ports', []): neutron_subports_info.append({'port_id': subport['port_id'], 'parent_port_id': [trunk['port_id']], 'tag': [subport['segmentation_id']]}) # Check that the subport has the binding is active. binding = obj_reg.load_class('PortBinding').get_object( self.context, port_id=subport['port_id'], host='') self.assertEqual(n_consts.PORT_STATUS_ACTIVE, binding['status']) self.assertItemsEqual(ovn_subports_info, neutron_subports_info) self.assertEqual(has_items, len(neutron_subports_info) != 0) if trunk.get('status'): self.assertEqual(trunk_consts.TRUNK_ACTIVE_STATUS, trunk['status']) def test_trunk_create(self): with self.trunk() as trunk: self._verify_trunk_info(trunk, has_items=False) def test_trunk_create_with_subports(self): with self.subport() as subport: with self.trunk([subport]) as trunk: self._verify_trunk_info(trunk, has_items=True) def test_subport_add(self): with self.subport() as subport: with self.trunk() as trunk: self.trunk_plugin.add_subports(self.context, trunk['id'], {'sub_ports': [subport]}) new_trunk = self.trunk_plugin.get_trunk(self.context, trunk['id']) self._verify_trunk_info(new_trunk, has_items=True) def test_subport_delete(self): with self.subport() as subport: with self.trunk([subport]) as trunk: self.trunk_plugin.remove_subports(self.context, trunk['id'], {'sub_ports': [subport]}) new_trunk = self.trunk_plugin.get_trunk(self.context, trunk['id']) self._verify_trunk_info(new_trunk, has_items=False) def test_trunk_delete(self): with self.trunk() as trunk: self.trunk_plugin.delete_trunk(self.context, trunk['id']) self._verify_trunk_info({}, has_items=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/rpc/0000755000175000017500000000000000000000000025713 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/rpc/__init__.py0000644000175000017500000000000000000000000030012 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/rpc/test_server.py0000644000175000017500000000272400000000000030637 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.services.trunk import constants from neutron.services.trunk import plugin as trunk_plugin from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import base as ml2_test_base class TrunkSkeletonTestCase(ml2_test_base.ML2TestFramework): def setUp(self): super(TrunkSkeletonTestCase, self).setUp() self.trunk_plugin = trunk_plugin.TrunkPlugin() def test__handle_port_binding_set_device_owner(self): helpers.register_ovs_agent(host=helpers.HOST) with self.port() as subport: port = ( self.trunk_plugin. _rpc_backend._skeleton._handle_port_binding( self.context, subport['port']['id'], mock.ANY, helpers.HOST)) self.assertEqual( constants.TRUNK_SUBPORT_OWNER, port['device_owner']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/services/trunk/test_plugin.py0000644000175000017500000000600400000000000030036 0ustar00coreycorey00000000000000# (c) Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings as pb from neutron.services.trunk.drivers.openvswitch import utils from neutron.services.trunk import plugin as trunk_plugin from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import base as ml2_test_base class TestTrunkServicePlugin(ml2_test_base.ML2TestFramework): def setUp(self): super(TestTrunkServicePlugin, self).setUp() self.trunk_plugin = trunk_plugin.TrunkPlugin() def test_ovs_bridge_name_set_when_trunk_bound(self): helpers.register_ovs_agent(host=helpers.HOST) with self.port() as port: trunk_port_id = port['port']['id'] trunk_req = {'port_id': trunk_port_id, 'tenant_id': 'test_tenant', 'sub_ports': []} trunk_res = self.trunk_plugin.create_trunk(self.context, {'trunk': trunk_req}) port['port'][pb.HOST_ID] = helpers.HOST bound_port = self.core_plugin.update_port(self.context, trunk_port_id, port) self.assertEqual( utils.gen_trunk_br_name(trunk_res['id']), bound_port[pb.VIF_DETAILS][pb.VIF_DETAILS_BRIDGE_NAME]) def test_ovs_bridge_name_set_to_integration_bridge_when_not_trunk(self): helpers.register_ovs_agent(host=helpers.HOST, integration_bridge='br-fake') with self.port() as port: port['port'][pb.HOST_ID] = helpers.HOST bound_port = self.core_plugin.update_port(self.context, port['port']['id'], port) self.assertEqual('br-fake', bound_port[pb.VIF_DETAILS].get(pb.VIF_DETAILS_BRIDGE_NAME)) def test_ovs_bridge_name_not_set_if_integration_bridge_not_set(self): """This will only happen if a stein or later ml2 driver is binding an interface for a pre stein ml2 agent. """ helpers.register_ovs_agent(host=helpers.HOST) with self.port() as port: port['port'][pb.HOST_ID] = helpers.HOST bound_port = self.core_plugin.update_port(self.context, port['port']['id'], port) self.assertIsNone( bound_port[pb.VIF_DETAILS].get(pb.VIF_DETAILS_BRIDGE_NAME)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/test_server.py0000644000175000017500000002532000000000000025062 0ustar00coreycorey00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import socket import time import traceback import httplib2 import mock from neutron_lib import worker as neutron_worker from oslo_config import cfg import psutil from neutron.common import utils from neutron import manager from neutron import service from neutron.tests.functional import base from neutron import wsgi CONF = cfg.CONF # Those messages will be written to temporary file each time # start/reset methods are called. FAKE_START_MSG = b"start" FAKE_RESET_MSG = b"reset" TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' class TestNeutronServer(base.BaseLoggingTestCase): def setUp(self): super(TestNeutronServer, self).setUp() self.service_pid = None self.workers = None self.temp_file = self.get_temp_file_path("test_server.tmp") self.health_checker = self._check_active self.pipein, self.pipeout = os.pipe() self.addCleanup(self._destroy_workers) def _destroy_workers(self): if self.service_pid: # Make sure all processes are stopped os.kill(self.service_pid, signal.SIGKILL) def _start_server(self, callback, workers): """Run a given service. :param callback: callback that will start the required service :param workers: number of service workers :returns: list of spawned workers' pids """ self.workers = workers # Fork a new process in which server will be started pid = os.fork() if pid == 0: status = 0 try: callback(workers) except SystemExit as exc: status = exc.code except BaseException: traceback.print_exc() status = 2 # Really exit os._exit(status) self.service_pid = pid # If number of workers is 1 it is assumed that we run # a service in the current process. if self.workers > 1: # Wait at most 10 seconds to spawn workers condition = lambda: self.workers == len(self._get_workers()) utils.wait_until_true( condition, timeout=10, sleep=0.1, exception=RuntimeError( "Failed to start %d workers." % self.workers)) workers = self._get_workers() self.assertEqual(len(workers), self.workers) return workers # Wait for a service to start. utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1, exception=RuntimeError( "Failed to start service.")) return [self.service_pid] def _get_workers(self): """Get the list of processes in which WSGI server is running.""" def safe_ppid(proc): try: return proc.ppid() except psutil.NoSuchProcess: return None if self.workers > 1: return [proc.pid for proc in psutil.process_iter() if safe_ppid(proc) == self.service_pid] else: return [proc.pid for proc in psutil.process_iter() if proc.pid == self.service_pid] def _check_active(self): """Dummy service activity check.""" time.sleep(5) return True def _fake_start(self): with open(self.temp_file, 'ab') as f: f.write(FAKE_START_MSG) def _fake_reset(self): with open(self.temp_file, 'ab') as f: f.write(FAKE_RESET_MSG) def _test_restart_service_on_sighup(self, service, workers=1): """Test that a service correctly (re)starts on receiving SIGHUP. 1. Start a service with a given number of workers. 2. Send SIGHUP to the service. 3. Wait for workers (if any) to (re)start. """ self._start_server(callback=service, workers=workers) os.kill(self.service_pid, signal.SIGHUP) # After sending SIGHUP it is expected that there will be as many # FAKE_RESET_MSG as number of workers + one additional for main # process expected_msg = ( FAKE_START_MSG * workers + FAKE_RESET_MSG * (workers + 1)) # Wait for temp file to be created and its size reaching the expected # value expected_size = len(expected_msg) condition = lambda: (os.path.isfile(self.temp_file) and os.stat(self.temp_file).st_size == expected_size) try: utils.wait_until_true(condition, timeout=5, sleep=1) except utils.TimerTimeout: if not os.path.isfile(self.temp_file): raise RuntimeError( "Timed out waiting for file %(filename)s to be created" % {'filename': self.temp_file}) else: raise RuntimeError( "Expected size for file %(filename)s: %(size)s, current " "size: %(current_size)s" % {'filename': self.temp_file, 'size': expected_size, 'current_size': os.stat(self.temp_file).st_size}) # Verify that start has been called twice for each worker (one for # initial start, and the second one on SIGHUP after children were # terminated). with open(self.temp_file, 'rb') as f: res = f.readline() self.assertEqual(expected_msg, res) class TestWsgiServer(TestNeutronServer): """Tests for neutron.wsgi.Server.""" def setUp(self): super(TestWsgiServer, self).setUp() self.health_checker = self._check_active self.port = None @staticmethod def application(environ, start_response): """A primitive test application.""" response_body = 'Response' status = '200 OK' response_headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(response_body)))] start_response(status, response_headers) return [response_body] def _check_active(self): """Check a wsgi service is active by making a GET request.""" port = int(os.read(self.pipein, 5)) conn = httplib2.HTTPConnectionWithTimeout("localhost", port) try: conn.request("GET", "/") resp = conn.getresponse() return resp.status == 200 except socket.error: return False def _run_wsgi(self, workers=1): """Start WSGI server with a test application.""" # Mock start method to check that children are started again on # receiving SIGHUP. with mock.patch("neutron.wsgi.WorkerService.start") as start_method,\ mock.patch("neutron.wsgi.WorkerService.reset") as reset_method: start_method.side_effect = self._fake_start reset_method.side_effect = self._fake_reset server = wsgi.Server("Test") server.start(self.application, 0, "0.0.0.0", workers=workers) # Memorize a port that was chosen for the service self.port = server.port os.write(self.pipeout, bytes(self.port)) server.wait() def test_restart_wsgi_on_sighup_multiple_workers(self): self._test_restart_service_on_sighup(service=self._run_wsgi, workers=2) class TestRPCServer(TestNeutronServer): """Tests for neutron RPC server.""" def setUp(self): super(TestRPCServer, self).setUp() self.setup_coreplugin('ml2', load_plugins=False) self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.rpc_workers_supported = True def _serve_rpc(self, workers=1): """Start RPC server with a given number of workers.""" # Mock start method to check that children are started again on # receiving SIGHUP. with mock.patch("neutron.service.RpcWorker.start") as start_method,\ mock.patch( "neutron.service.RpcWorker.reset") as reset_method,\ mock.patch( "neutron_lib.plugins.directory.get_plugin") as get_plugin: start_method.side_effect = self._fake_start reset_method.side_effect = self._fake_reset get_plugin.return_value = self.plugin CONF.set_override("rpc_workers", workers) # not interested in state report workers specifically CONF.set_override("rpc_state_report_workers", 0) rpc_workers_launcher = service.start_rpc_workers() rpc_workers_launcher.wait() def test_restart_rpc_on_sighup_multiple_workers(self): self._test_restart_service_on_sighup(service=self._serve_rpc, workers=2) class TestPluginWorker(TestNeutronServer): """Ensure that a plugin returning Workers spawns workers""" def setUp(self): super(TestPluginWorker, self).setUp() self.setup_coreplugin('ml2', load_plugins=False) self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() manager.init() def _start_plugin(self, workers=1): with mock.patch('neutron_lib.plugins.directory.get_plugin') as gp: gp.return_value = self.plugin plugin_workers_launcher = service.start_plugins_workers() plugin_workers_launcher.wait() def test_start(self): class FakeWorker(neutron_worker.BaseWorker): def start(self): pass def wait(self): pass def stop(self): pass def reset(self): pass # Make both ABC happy and ensure 'self' is correct FakeWorker.start = self._fake_start FakeWorker.reset = self._fake_reset workers = [FakeWorker()] self.plugin.return_value.get_workers.return_value = workers self._test_restart_service_on_sighup(service=self._start_plugin, workers=len(workers)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/test_service.py0000644000175000017500000000322700000000000025216 0ustar00coreycorey00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_config import cfg from oslo_service import service from neutron import service as neutron_service from neutron.tests.functional import base from neutron.tests.functional import test_server class TestService(base.BaseLoggingTestCase): def test_api_workers_default(self): # This value may end being scaled downward based on available RAM. self.assertGreaterEqual(processutils.get_worker_count(), neutron_service._get_api_workers()) def test_api_workers_from_config(self): cfg.CONF.set_override('api_workers', 1234) self.assertEqual(1234, neutron_service._get_api_workers()) class TestServiceRestart(test_server.TestNeutronServer): def _start_service(self, host, binary, topic, manager, workers, *args, **kwargs): server = neutron_service.Service(host, binary, topic, manager, *args, **kwargs) service.launch(cfg.CONF, server, workers).wait() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/0000755000175000017500000000000000000000000023303 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/__init__.py0000644000175000017500000000000000000000000025402 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/0000755000175000017500000000000000000000000024573 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/__init__.py0000644000175000017500000000000000000000000026672 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4110456 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/0000755000175000017500000000000000000000000030674 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/__init__.py0000644000175000017500000000000000000000000032773 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_ip_address.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_ip_address.0000644000175000017500000000220000000000000034043 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.tests.common.exclusive_resources import ip_address from neutron.tests.functional import base class TestExclusiveIPAddress(base.BaseLoggingTestCase): def test_ip_address(self): address_1 = self.useFixture( ip_address.ExclusiveIPAddress('10.0.0.1', '10.0.0.2')).address address_2 = self.useFixture( ip_address.ExclusiveIPAddress('10.0.0.1', '10.0.0.2')).address self.assertIsInstance(address_1, netaddr.IPAddress) self.assertNotEqual(address_1, address_2) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_ip_network.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_ip_network.0000644000175000017500000000236200000000000034120 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.functional import base class TestExclusiveIPNetwork(base.BaseLoggingTestCase): def test_ip_network(self): network_1 = self.useFixture( ip_network.ExclusiveIPNetwork( '240.0.0.1', '240.255.255.254', '24')).network network_2 = self.useFixture( ip_network.ExclusiveIPNetwork( '240.0.0.1', '240.255.255.254', '24')).network self.assertIsInstance(network_1, netaddr.IPNetwork) self.assertEqual(network_1.cidr, network_1) self.assertNotEqual(network_1, network_2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_port.py0000644000175000017500000000211200000000000033265 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron.tests.common.exclusive_resources import port from neutron.tests.functional import base class TestExclusivePort(base.BaseSudoTestCase): def test_port(self): port_1 = self.useFixture(port.ExclusivePort( constants.PROTO_NAME_TCP)).port port_2 = self.useFixture(port.ExclusivePort( constants.PROTO_NAME_TCP)).port self.assertIsInstance(port_1, str) self.assertNotEqual(port_1, port_2) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_resource_allocator.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/exclusive_resources/test_resource_al0000644000175000017500000000425400000000000034166 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from neutron_lib.utils import helpers import testtools from neutron.tests.common.exclusive_resources import resource_allocator from neutron.tests.functional import base def safe_remove_file(file_path): try: os.remove(file_path) except OSError: pass class TestResourceAllocator(base.BaseLoggingTestCase): def setUp(self): super(TestResourceAllocator, self).setUp() self.ra = resource_allocator.ResourceAllocator( helpers.get_random_string(6), lambda: 42) self.addCleanup(safe_remove_file, self.ra._state_file_path) def test_allocate_and_release(self): # Assert that we can allocate a resource resource = self.ra.allocate() self.assertEqual('42', resource) # Assert that we cannot allocate any more resources, since we're # using an allocator that always returns the same value with testtools.ExpectedException(ValueError): self.ra.allocate() # Assert that releasing the resource and allocating again works self.ra.release(resource) resource = self.ra.allocate() self.assertEqual('42', resource) def test_file_manipulation(self): # The file should not be created until the first allocation self.assertFalse(os.path.exists(self.ra._state_file_path)) resource = self.ra.allocate() self.assertTrue(os.path.exists(self.ra._state_file_path)) # Releasing the last resource should delete the file self.ra.release(resource) self.assertFalse(os.path.exists(self.ra._state_file_path)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/functional/tests/common/test_net_helpers.py0000644000175000017500000000631500000000000030521 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import mock from neutron_lib import exceptions as n_exc from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as n_utils from neutron.tests import base as test_base from neutron.tests.common import net_helpers from neutron.tests.functional import base class NetcatTesterTestCase(base.BaseSudoTestCase): def setUp(self): super(NetcatTesterTestCase, self).setUp() self.ns1 = self.useFixture(net_helpers.NamespaceFixture('nc-')).name self.ns2 = self.useFixture(net_helpers.NamespaceFixture('nc-')).name self.nc = net_helpers.NetcatTester(self.ns1, self.ns2, '10.30.0.2', '1234', 'tcp') ip_wrapper = ip_lib.IPWrapper(namespace=self.ns1) veth1, veth2 = ip_wrapper.add_veth('veth1', 'veth2', self.ns2) veth1.link.set_up() veth1.addr.add('10.30.0.1/24') veth2.link.set_up() veth2.addr.add('10.30.0.2/24') @test_base.unstable_test("bug 1862927") def test_stop_process(self): self.nc.test_connectivity() server_pid = self.nc.server_process.child_pid client_pid = self.nc.client_process.child_pid self.assertTrue(utils.process_is_running(server_pid)) self.assertTrue(utils.process_is_running(client_pid)) self.nc.stop_processes() self.assertFalse(utils.process_is_running(server_pid)) self.assertFalse(utils.process_is_running(client_pid)) @test_base.unstable_test("bug 1862927") def test_stop_process_no_process(self): self.nc.test_connectivity() client_pid = self.nc.client_process.child_pid utils.execute(['kill', '-%d' % signal.SIGKILL, client_pid], run_as_root=True) n_utils.wait_until_true( lambda: not utils.process_is_running(client_pid), timeout=5) with mock.patch.object(net_helpers.RootHelperProcess, 'poll', return_value=None): self.assertRaises(n_exc.ProcessExecutionError, self.nc.stop_processes, skip_errors=[]) @test_base.unstable_test("bug 1862927") def test_stop_process_no_process_skip_no_process_exception(self): self.nc.test_connectivity() server_pid = self.nc.server_process.child_pid utils.execute(['kill', '-%d' % signal.SIGKILL, server_pid], run_as_root=True) with mock.patch.object(net_helpers.RootHelperProcess, 'poll', side_effect=[None, True, None, True]): self.nc.stop_processes() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/post_mortem_debug.py0000644000175000017500000001021500000000000024066 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import traceback def get_exception_handler(debugger_name): debugger = _get_debugger(debugger_name) return functools.partial(_exception_handler, debugger) def _get_debugger(debugger_name): try: debugger = __import__(debugger_name) except ImportError: raise ValueError("can't import %s module as a post mortem debugger" % debugger_name) if 'post_mortem' in dir(debugger): return debugger else: raise ValueError("%s is not a supported post mortem debugger" % debugger_name) def _exception_handler(debugger, exc_info): """Exception handler enabling post-mortem debugging. A class extending testtools.TestCase can add this handler in setUp(): self.addOnException(post_mortem_debug.exception_handler) When an exception occurs, the user will be dropped into a debugger session in the execution environment of the failure. Frames associated with the testing framework are excluded so that the post-mortem session for an assertion failure will start at the assertion call (e.g. self.assertTrue) rather than the framework code that raises the failure exception (e.g. the assertTrue method). """ tb = exc_info[2] ignored_traceback = get_ignored_traceback(tb) if ignored_traceback: tb = FilteredTraceback(tb, ignored_traceback) traceback.print_exception(exc_info[0], exc_info[1], tb) debugger.post_mortem(tb) def get_ignored_traceback(tb): """Retrieve the first traceback of an ignored trailing chain. Given an initial traceback, find the first traceback of a trailing chain of tracebacks that should be ignored. The criteria for whether a traceback should be ignored is whether its frame's globals include the __unittest marker variable. This criteria is culled from: unittest.TestResult._is_relevant_tb_level For example: tb.tb_next => tb0.tb_next => tb1.tb_next - If no tracebacks were to be ignored, None would be returned. - If only tb1 was to be ignored, tb1 would be returned. - If tb0 and tb1 were to be ignored, tb0 would be returned. - If either of only tb or only tb0 was to be ignored, None would be returned because neither tb or tb0 would be part of a trailing chain of ignored tracebacks. """ # Turn the traceback chain into a list tb_list = [] while tb: tb_list.append(tb) tb = tb.tb_next # Find all members of an ignored trailing chain ignored_tracebacks = [] for tb in reversed(tb_list): if '__unittest' in tb.tb_frame.f_globals: ignored_tracebacks.append(tb) else: break # Return the first member of the ignored trailing chain if ignored_tracebacks: return ignored_tracebacks[-1] class FilteredTraceback(object): """Wraps a traceback to filter unwanted frames.""" def __init__(self, tb, filtered_traceback): """Constructor. :param tb: The start of the traceback chain to filter. :param filtered_traceback: The first traceback of a trailing chain that is to be filtered. """ self._tb = tb self.tb_lasti = self._tb.tb_lasti self.tb_lineno = self._tb.tb_lineno self.tb_frame = self._tb.tb_frame self._filtered_traceback = filtered_traceback @property def tb_next(self): tb_next = self._tb.tb_next if tb_next and tb_next != self._filtered_traceback: return FilteredTraceback(tb_next, self._filtered_traceback) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/tools.py0000644000175000017500000002122700000000000021515 0ustar00coreycorey00000000000000# Copyright (c) 2013 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random import unittest import fixtures import netaddr from neutron_lib import constants from neutron_lib.services.logapi import constants as log_const from neutron_lib.utils import helpers from neutron_lib.utils import net from oslo_utils import netutils from oslo_utils import timeutils # NOTE(yamahata): from neutron-lib 1.9.1, callback priority was added and # priority_group module was added for constants of priority. # test the existence of the module of priority_group to check if # callback priority is supported or not. _CALLBACK_PRIORITY_SUPPORTED = True try: from neutron_lib.callbacks import priority_group # noqa except ImportError: _CALLBACK_PRIORITY_SUPPORTED = False class SafeCleanupFixture(fixtures.Fixture): """Catch errors in daughter fixture cleanup.""" def __init__(self, fixture): self.fixture = fixture def _setUp(self): def cleanUp(): try: self.fixture.cleanUp() except Exception: pass self.fixture.setUp() self.addCleanup(cleanUp) def setup_mock_calls(mocked_call, expected_calls_and_values): """A convenient method to setup a sequence of mock calls. expected_calls_and_values is a list of (expected_call, return_value): expected_calls_and_values = [ (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", self.BR_NAME, pname]), None), (mock.call(["ovs-vsctl", self.TO, "set", "Interface", pname, "type=gre"]), None), .... ] * expected_call should be mock.call(expected_arg, ....) * return_value is passed to side_effect of a mocked call. A return value or an exception can be specified. """ return_values = [call[1] for call in expected_calls_and_values] mocked_call.side_effect = return_values def verify_mock_calls(mocked_call, expected_calls_and_values, any_order=False): """A convenient method to setup a sequence of mock calls. expected_calls_and_values is a list of (expected_call, return_value): expected_calls_and_values = [ (mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port", self.BR_NAME, pname]), None), (mock.call(["ovs-vsctl", self.TO, "set", "Interface", pname, "type=gre"]), None), .... ] * expected_call should be mock.call(expected_arg, ....) * return_value is passed to side_effect of a mocked call. A return value or an exception can be specified. """ expected_calls = [call[0] for call in expected_calls_and_values] mocked_call.assert_has_calls(expected_calls, any_order=any_order) def _make_magic_method(method_mock): # NOTE(yamahata): new environment needs to be created to keep actual # method_mock for each callables. def __call__(*args, **kwargs): value_mock = method_mock._orig___call__(*args, **kwargs) value_mock.__json__ = lambda: {} return value_mock def _get_child_mock(**kwargs): value_mock = method_mock._orig__get_child_mock(**kwargs) value_mock.__json__ = lambda: {} return value_mock return __call__, _get_child_mock def make_mock_plugin_json_encodable(plugin_instance_mock): # NOTE(yamahata): Make return value of plugin method json encodable # e.g. the return value of plugin_instance.create_network() needs # to be json encodable # plugin instance -> method -> return value # Mock MagicMock Mock # plugin_instance_mock method_mock value_mock # # From v1.3 of pecan, pecan.jsonify uses json.Encoder unconditionally. # pecan v1.2 uses simplejson.Encoder which accidentally encodes # Mock as {} due to check of '_asdict' attributes. # pecan.jsonify uses __json__ magic method for encoding when # it's defined, so add __json__ method to return {} for method_mock in plugin_instance_mock._mock_children.values(): if not callable(method_mock): continue method_mock._orig___call__ = method_mock.__call__ method_mock._orig__get_child_mock = method_mock._get_child_mock __call__, _get_child_mock = _make_magic_method(method_mock) method_mock.__call__ = __call__ method_mock._get_child_mock = _get_child_mock def get_subscribe_args(*args): # NOTE(yamahata): from neutron-lib 1.9.1, callback priority was added. # old signature: (callback, resource, event) # new signature: (callback, resource, event, priority=PRIORITY_DEFAULT) if len(args) == 3 and _CALLBACK_PRIORITY_SUPPORTED: args = list(args) # don't modify original list args.append(priority_group.PRIORITY_DEFAULT) return args def fail(msg=None): """Fail immediately, with the given message. This method is equivalent to TestCase.fail without requiring a testcase instance (usefully for reducing coupling). """ raise unittest.TestCase.failureException(msg) def get_random_string_list(i=3, n=5): return [helpers.get_random_string(n) for _ in range(0, i)] def get_random_boolean(): return bool(random.getrandbits(1)) def get_random_datetime(start_time=None, end_time=None): start_time = start_time or timeutils.utcnow() end_time = end_time or (start_time + datetime.timedelta(days=1)) # calculate the seconds difference between start and end time delta_seconds_difference = int(timeutils.delta_seconds(start_time, end_time)) # get a random time_delta_seconds between 0 and # delta_seconds_difference random_time_delta = random.randint(0, delta_seconds_difference) # generate a random datetime between start and end time return start_time + datetime.timedelta(seconds=random_time_delta) def get_random_integer(range_begin=0, range_end=1000): return random.randint(range_begin, range_end) def get_random_prefixlen(version=4): maxlen = constants.IPv4_BITS if version == 6: maxlen = constants.IPv6_BITS return random.randint(0, maxlen) def get_random_port(start=constants.PORT_RANGE_MIN): return random.randint(start, constants.PORT_RANGE_MAX) def get_random_vlan(): return random.randint(constants.MIN_VLAN_TAG, constants.MAX_VLAN_TAG) def get_random_ip_version(): return random.choice(constants.IP_ALLOWED_VERSIONS) def get_random_ip_address(version=4): if version == 4: ip_string = '10.%d.%d.%d' % (random.randint(3, 254), random.randint(3, 254), random.randint(3, 254)) return netaddr.IPAddress(ip_string) else: ip = netutils.get_ipv6_addr_by_EUI64( '2001:db8::/64', net.get_random_mac(['fe', '16', '3e', '00', '00', '00']) ) return ip def get_random_router_status(): return random.choice(constants.VALID_ROUTER_STATUS) def get_random_floatingip_status(): return random.choice(constants.VALID_FLOATINGIP_STATUS) def get_random_flow_direction(): return random.choice(constants.VALID_DIRECTIONS) def get_random_ha_states(): return random.choice(constants.VALID_HA_STATES) def get_random_ether_type(): return random.choice(constants.VALID_ETHERTYPES) def get_random_ipam_status(): return random.choice(constants.VALID_IPAM_ALLOCATION_STATUSES) def get_random_ip_protocol(): return random.choice(list(constants.IP_PROTOCOL_MAP.keys())) def get_random_port_binding_statuses(): return random.choice(constants.PORT_BINDING_STATUSES) def get_random_network_segment_range_network_type(): return random.choice([constants.TYPE_VLAN, constants.TYPE_VXLAN, constants.TYPE_GRE, constants.TYPE_GENEVE]) def get_random_ipv6_mode(): return random.choice(constants.IPV6_MODES) def get_random_security_event(): return random.choice(log_const.LOG_EVENTS) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4150455 neutron-16.0.0.0b2.dev214/neutron/tests/unit/0000755000175000017500000000000000000000000020756 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/__init__.py0000644000175000017500000000130700000000000023070 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/_test_extension_portbindings.py0000644000175000017500000004022700000000000027331 0ustar00coreycorey00000000000000# Copyright 2013 NEC Corporation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from six.moves import http_client as httplib from webob import exc from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import dummy_plugin class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): # VIF_TYPE must be overridden according to plugin vif_type VIF_TYPE = portbindings.VIF_TYPE_OTHER # VIF_DETAILS must be overridden according to plugin vif_details VIF_DETAILS = None def _check_response_portbindings(self, port): self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE) # REVISIT(rkukura): Consider reworking tests to enable ML2 to bind if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED]: # NOTE(r-mibu): The following six lines are just for backward # compatibility. In this class, HAS_PORT_FILTER has been replaced # by VIF_DETAILS which can be set expected vif_details to check, # but all replacement of HAS_PORT_FILTER in successor has not been # completed. if self.VIF_DETAILS is None: expected = getattr(self, 'HAS_PORT_FILTER', False) vif_details = port[portbindings.VIF_DETAILS] port_filter = vif_details[portbindings.CAP_PORT_FILTER] self.assertEqual(expected, port_filter) return self.assertEqual(self.VIF_DETAILS, port[portbindings.VIF_DETAILS]) def _check_response_no_portbindings(self, port): self.assertIn('status', port) self.assertNotIn(portbindings.VIF_TYPE, port) self.assertNotIn(portbindings.VIF_DETAILS, port) def _get_non_admin_context(self): return context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) def test_port_vif_details(self): with self.port(name='name') as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings(port) # By default user is admin - now test non admin user ctx = self._get_non_admin_context() non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_no_portbindings(non_admin_port) def test_ports_vif_details(self): plugin = directory.get_plugin() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(), self.port(): ctx = context.get_admin_context() ports = plugin.get_ports(ctx) self.assertEqual(len(ports), 2) for port in ports: self._check_response_portbindings(port) # By default user is admin - now test non admin user ctx = self._get_non_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(len(ports), 2) for non_admin_port in ports: self._check_response_no_portbindings(non_admin_port) def _check_port_binding_profile(self, port, profile=None): # For plugins which does not use binding:profile attr # we just check an operation for the port succeed. self.assertIn('id', port) def _test_create_port_binding_profile(self, profile): profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: port_id = port['port']['id'] self._check_port_binding_profile(port['port'], profile) port = self._show('ports', port_id) self._check_port_binding_profile(port['port'], profile) def test_create_port_binding_profile_none(self): self._test_create_port_binding_profile(None) def test_create_port_binding_profile_with_empty_dict(self): self._test_create_port_binding_profile({}) def _test_update_port_binding_profile(self, profile): profile_arg = {portbindings.PROFILE: profile} with self.port() as port: self._check_port_binding_profile(port['port']) port_id = port['port']['id'] ctx = context.get_admin_context() port = self._update('ports', port_id, {'port': profile_arg}, neutron_context=ctx)['port'] self._check_port_binding_profile(port, profile) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port, profile) def test_update_port_binding_profile_none(self): self._test_update_port_binding_profile(None) def test_update_port_binding_profile_with_empty_dict(self): self._test_update_port_binding_profile({}) def test_port_create_portinfo_non_admin(self): profile_arg = {portbindings.PROFILE: {dummy_plugin.RESOURCE_NAME: dummy_plugin.RESOURCE_NAME}} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: # succeed without binding:profile with self.port(subnet=subnet1, set_context=True, tenant_id='test'): pass # fail with binding:profile try: with self.port(subnet=subnet1, expected_res_status=403, arg_list=(portbindings.PROFILE,), set_context=True, tenant_id='test', **profile_arg): pass except exc.HTTPClientError: pass def test_port_update_portinfo_non_admin(self): profile_arg = {portbindings.PROFILE: {dummy_plugin.RESOURCE_NAME: dummy_plugin.RESOURCE_NAME}} with self.network() as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1) as port: # By default user is admin - now test non admin user port_id = port['port']['id'] ctx = self._get_non_admin_context() port = self._update('ports', port_id, {'port': profile_arg}, expected_code=exc.HTTPForbidden.code, neutron_context=ctx) class PortBindingsHostTestCaseMixin(object): fmt = 'json' hostname = 'testhost' def _check_response_portbindings_host(self, port): self.assertEqual(port[portbindings.HOST_ID], self.hostname) def _check_response_no_portbindings_host(self, port): self.assertIn('status', port) self.assertNotIn(portbindings.HOST_ID, port) def test_port_vif_non_admin(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: host_arg = {portbindings.HOST_ID: self.hostname} try: with self.port(subnet=subnet1, expected_res_status=403, arg_list=(portbindings.HOST_ID,), set_context=True, tenant_id='test', **host_arg): pass except exc.HTTPClientError: pass def test_port_vif_host(self): host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings_host(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_host(port) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_no_portbindings_host(non_admin_port) def test_ports_vif_host(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg), self.port(name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_host(port) else: self.assertFalse(port[portbindings.HOST_ID]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_no_portbindings_host(non_admin_port) def test_ports_vif_host_update(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg) as port1, self.port(name='name2') as port2: data = {'port': {portbindings.HOST_ID: 'testhosttemp'}} req = self.new_update_request('ports', data, port1['port']['id']) req.get_response(self.api) req = self.new_update_request('ports', data, port2['port']['id']) ctx = context.get_admin_context() req.get_response(self.api) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: self.assertEqual('testhosttemp', port[portbindings.HOST_ID]) def test_ports_vif_non_host_update(self): host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][portbindings.HOST_ID], res['port'][portbindings.HOST_ID]) def test_ports_vif_non_host_update_when_host_null(self): with self.port() as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][portbindings.HOST_ID], res['port'][portbindings.HOST_ID]) def test_ports_vif_host_list(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg) as port1,\ self.port(name='name2'),\ self.port(name='name3', arg_list=(portbindings.HOST_ID,), **host_arg) as port3: self._test_list_resources( 'port', (port1, port3), query_params='%s=%s' % (portbindings.HOST_ID, self.hostname)) class PortBindingsVnicTestCaseMixin(object): fmt = 'json' vnic_type = portbindings.VNIC_NORMAL def _check_response_portbindings_vnic_type(self, port): self.assertIn('status', port) self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type) def test_port_vnic_type_non_admin(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(subnet=subnet1, expected_res_status=httplib.CREATED, arg_list=(portbindings.VNIC_TYPE,), set_context=True, tenant_id='test', **vnic_arg) as port: # Check a response of create_port self._check_response_portbindings_vnic_type(port['port']) def test_port_vnic_type(self): vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port: port_id = port['port']['id'] # Check a response of create_port self._check_response_portbindings_vnic_type(port['port']) # Check a response of get_port ctx = context.get_admin_context() port = self._show('ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_vnic_type(port) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_vnic_type(non_admin_port) def test_ports_vnic_type(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg), self.port(name='name2'): ctx = context.get_admin_context() ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_vnic_type(port) else: self.assertEqual(portbindings.VNIC_NORMAL, port[portbindings.VNIC_TYPE]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_portbindings_vnic_type(non_admin_port) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4150455 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/0000755000175000017500000000000000000000000022054 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/__init__.py0000644000175000017500000000000000000000000024153 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4150455 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/0000755000175000017500000000000000000000000023344 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/__init__.py0000644000175000017500000000000000000000000025443 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_async_process.py0000644000175000017500000003306300000000000027635 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import sys import eventlet.event from eventlet.green import subprocess import eventlet.queue import mock import testtools from neutron.agent.common import async_process from neutron.agent.common import utils from neutron.tests import base from neutron.tests.unit.agent.linux import failing_process class TestAsyncProcess(base.BaseTestCase): def setUp(self): super(TestAsyncProcess, self).setUp() self.proc = async_process.AsyncProcess(['fake']) def test_construtor_raises_exception_for_negative_respawn_interval(self): with testtools.ExpectedException(ValueError): async_process.AsyncProcess(['fake'], respawn_interval=-1) def test__spawn(self): expected_process = 'Foo' proc = self.proc with mock.patch.object(utils, 'create_process') as mock_create_process: mock_create_process.return_value = [expected_process, None] with mock.patch('eventlet.spawn') as mock_spawn: proc._spawn() self.assertTrue(self.proc._is_running) self.assertIsInstance(proc._kill_event, eventlet.event.Event) self.assertEqual(proc._process, expected_process) mock_spawn.assert_has_calls([ mock.call(proc._watch_process, proc._read_stdout, proc._kill_event), mock.call(proc._watch_process, proc._read_stderr, proc._kill_event), ]) self.assertEqual(len(proc._watchers), 2) def test__pid_none(self): pid = 1 self.proc._pid = None with mock.patch.object(self.proc, '_process') as _process: with mock.patch.object(utils, 'get_root_helper_child_pid') as func: func.return_value = pid self.assertEqual(self.proc.pid, pid) func.assert_called_once_with(_process.pid, ['fake'], run_as_root=False) self.assertEqual(self.proc._pid, pid) def test__pid_not_none(self): self.proc._pid = 1 with mock.patch.object(self.proc, '_process'),\ mock.patch.object(utils, 'get_root_helper_child_pid') as func: self.assertEqual(self.proc.pid, 1) func.assert_not_called() def test__handle_process_error_kills_with_respawn(self): with mock.patch.object(self.proc, '_kill') as kill: self.proc._handle_process_error() kill.assert_has_calls([mock.call(signal.SIGKILL)]) def test__handle_process_error_kills_without_respawn(self): self.proc.respawn_interval = 1 with mock.patch.object(self.proc, '_kill') as kill: with mock.patch.object(self.proc, '_spawn') as spawn: with mock.patch('eventlet.sleep') as sleep: self.proc._handle_process_error() kill.assert_has_calls([mock.call(signal.SIGKILL)]) sleep.assert_has_calls([mock.call(self.proc.respawn_interval)]) spawn.assert_called_once_with() def test__handle_process_error_no_crash_if_started(self): self.proc._is_running = True with mock.patch.object(self.proc, '_kill'): with mock.patch.object(self.proc, '_spawn') as mock_spawn: self.proc._handle_process_error() mock_spawn.assert_not_called() def _watch_process_exception(self): raise Exception('Error!') def _test__watch_process(self, callback, kill_event): self.proc._is_running = True self.proc._kill_event = kill_event # Ensure the test times out eventually if the watcher loops endlessly with self.assert_max_execution_time(): with mock.patch.object(self.proc, '_handle_process_error') as func: self.proc._watch_process(callback, kill_event) if not kill_event.ready(): func.assert_called_once_with() def test__watch_process_exits_on_callback_failure(self): self._test__watch_process(lambda: None, eventlet.event.Event()) def test__watch_process_exits_on_exception(self): self._test__watch_process(self._watch_process_exception, eventlet.event.Event()) with mock.patch.object(self.proc, '_handle_process_error') as func: self.proc._watch_process(self._watch_process_exception, self.proc._kill_event) func.assert_not_called() def test__watch_process_exits_on_sent_kill_event(self): kill_event = eventlet.event.Event() kill_event.send() self._test__watch_process(None, kill_event) def _test_read_output_queues_and_returns_result(self, output): queue = eventlet.queue.LightQueue() mock_stream = mock.Mock() with mock.patch.object(mock_stream, 'readline') as mock_readline: mock_readline.return_value = output result = self.proc._read(mock_stream, queue) if output: self.assertEqual(output, result) self.assertEqual(output, queue.get_nowait()) else: self.assertFalse(result) self.assertTrue(queue.empty()) def test__read_queues_and_returns_output(self): self._test_read_output_queues_and_returns_result('foo') def test__read_returns_none_for_missing_output(self): self._test_read_output_queues_and_returns_result('') def test_start_raises_exception_if_process_already_started(self): self.proc._is_running = True with testtools.ExpectedException(async_process.AsyncProcessException): self.proc.start() def test_start_invokes__spawn(self): with mock.patch.object(self.proc, '_spawn') as mock_start: self.proc.start() mock_start.assert_called_once_with() def test__iter_queue_returns_empty_list_for_empty_queue(self): result = list(self.proc._iter_queue(eventlet.queue.LightQueue(), False)) self.assertEqual([], result) def test__iter_queue_returns_queued_data(self): queue = eventlet.queue.LightQueue() queue.put('foo') result = list(self.proc._iter_queue(queue, False)) self.assertEqual(result, ['foo']) def _test_iter_output_calls_iter_queue_on_output_queue(self, output_type): expected_value = 'foo' with mock.patch.object(self.proc, '_iter_queue') as mock_iter_queue: mock_iter_queue.return_value = expected_value target_func = getattr(self.proc, 'iter_%s' % output_type, None) value = target_func() self.assertEqual(value, expected_value) queue = getattr(self.proc, '_%s_lines' % output_type, None) mock_iter_queue.assert_called_with(queue, False) def test_iter_stdout(self): self._test_iter_output_calls_iter_queue_on_output_queue('stdout') def test_iter_stderr(self): self._test_iter_output_calls_iter_queue_on_output_queue('stderr') def test__kill_targets_process_for_pid(self): pid = 1 with mock.patch.object(self.proc, '_kill_event' ) as mock_kill_event,\ mock.patch.object(utils, 'get_root_helper_child_pid', return_value=pid),\ mock.patch.object(self.proc, '_kill_process_and_wait' ) as mock_kill_process_and_wait,\ mock.patch.object(self.proc, '_process'): self.proc._kill(signal.SIGKILL) self.assertIsNone(self.proc._kill_event) self.assertFalse(self.proc._is_running) self.assertIsNone(self.proc._pid) mock_kill_event.send.assert_called_once_with() if pid: mock_kill_process_and_wait.assert_called_once_with( pid, signal.SIGKILL, None) def _test__kill_process_and_wait(self, pid, expected, exception_message=None, kill_signal=signal.SIGKILL): self.proc.run_as_root = True if exception_message: exc = RuntimeError(exception_message) else: exc = None with mock.patch.object(utils, 'kill_process', side_effect=exc) as mock_kill_process: actual = self.proc._kill_process(pid, kill_signal) self.assertEqual(expected, actual) mock_kill_process.assert_called_with(pid, kill_signal, self.proc.run_as_root) def test__kill_process_and_wait_returns_true_for_valid_pid(self): self._test__kill_process_and_wait('1', True) def test__kill_process_and_wait_returns_false_for_execute_exception(self): self._test__kill_process_and_wait('1', False, 'Invalid') def test_kill_process_and_wait_with_different_signal(self): self._test__kill_process_and_wait( '1', True, kill_signal=signal.SIGTERM) def test__kill_process_timeout_reached(self): self.proc.run_as_root = True kill_timeout = 5 pid = '1' with mock.patch.object(utils, 'kill_process') as mock_kill_process, \ mock.patch.object(self.proc, '_process') as process_mock: process_mock.wait.side_effect = subprocess.TimeoutExpired( self.proc.cmd, kill_timeout) self.assertTrue( self.proc._kill_process_and_wait( pid, signal.SIGTERM, kill_timeout)) process_mock.wait.assert_called_once_with(kill_timeout) mock_kill_process.assert_has_calls([ mock.call(pid, signal.SIGTERM, self.proc.run_as_root), mock.call(pid, signal.SIGKILL, self.proc.run_as_root)]) def test_stop_calls_kill_with_provided_signal_number(self): self.proc._is_running = True with mock.patch.object(self.proc, '_kill') as mock_kill: self.proc.stop(kill_signal=signal.SIGTERM) mock_kill.assert_called_once_with(signal.SIGTERM, None) def test_stop_raises_exception_if_already_started(self): with testtools.ExpectedException(async_process.AsyncProcessException): self.proc.stop() def test_cmd(self): for expected, cmd in (('ls -l file', ['ls', '-l', 'file']), ('fake', ['fake'])): proc = async_process.AsyncProcess(cmd) self.assertEqual(expected, proc.cmd) class TestAsyncProcessLogging(base.BaseTestCase): def setUp(self): super(TestAsyncProcessLogging, self).setUp() self.log_mock = mock.patch.object(async_process, 'LOG').start() def _test__read_stdout_logging(self, enable): proc = async_process.AsyncProcess(['fakecmd'], log_output=enable) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): proc._read_stdout() self.assertEqual(enable, self.log_mock.debug.called) def _test__read_stderr_logging(self, enable): proc = async_process.AsyncProcess(['fake'], log_output=enable) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): proc._read_stderr() self.assertEqual(enable, self.log_mock.error.called) def test__read_stdout_logging_enabled(self): self._test__read_stdout_logging(enable=True) def test__read_stdout_logging_disabled(self): self._test__read_stdout_logging(enable=False) def test__read_stderr_logging_enabled(self): self._test__read_stderr_logging(enable=True) def test__read_stderr_logging_disabled(self): self._test__read_stderr_logging(enable=False) class TestAsyncProcessDieOnError(base.BaseTestCase): def test__read_stderr_returns_none_on_error(self): proc = async_process.AsyncProcess(['fakecmd'], die_on_error=True) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): self.assertIsNone(proc._read_stderr()) class TestFailingAsyncProcess(base.BaseTestCase): def setUp(self): super(TestFailingAsyncProcess, self).setUp() path = self.get_temp_file_path('async.tmp', self.get_new_temp_dir()) self.process = async_process.AsyncProcess([sys.executable, failing_process.__file__, path], respawn_interval=0) def test_failing_async_process_handle_error_once(self): with mock.patch.object(self.process, '_handle_process_error')\ as handle_error_mock: self.process.start() self.process._process.wait() # Wait for the monitor process to complete for thread in self.process._watchers: thread.wait() self.assertEqual(1, handle_error_mock.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_ovs_lib.py0000644000175000017500000007221500000000000026421 0ustar00coreycorey00000000000000# Copyright 2012, VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron_lib import exceptions from oslo_serialization import jsonutils from oslo_utils import uuidutils import tenacity import testtools from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as p_const from neutron.tests import base class OFCTLParamListMatcher(object): def _parse(self, params): actions_pos = params.find('actions') return set(params[:actions_pos].split(',')), params[actions_pos:] def __init__(self, params): self.expected = self._parse(params) def __eq__(self, other): return self.expected == self._parse(other) def __str__(self): return 'ovs-ofctl parameters: %s, "%s"' % self.expected __repr__ = __str__ class StringSetMatcher(object): """A helper object for unordered CSV strings Will compare equal if both strings, when read as a comma-separated set of values, represent the same set. Example: "a,b,45" == "b,45,a" """ def __init__(self, string, separator=','): self.separator = separator self.set = set(string.split(self.separator)) def __eq__(self, other): return self.set == set(other.split(self.separator)) def __ne__(self, other): return self.set != set(other.split(self.separator)) def __repr__(self): sep = '' if self.separator == ',' else " on %s" % self.separator return '' % (self.set, sep) class OVS_Lib_Test(base.BaseTestCase): """A test suite to exercise the OVS libraries shared by Neutron agents. Note: these tests do not actually execute ovs-* utilities, and thus can run on any system. That does, however, limit their scope. """ def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" # Don't attempt to connect to ovsdb mock.patch('neutron.agent.ovsdb.impl_idl.api_factory').start() self.br = ovs_lib.OVSBridge(self.BR_NAME) self.execute = mock.patch.object( utils, "execute", spec=utils.execute).start() def test_vifport(self): """Create and stringify vif port, confirm no exceptions.""" pname = "vif1.0" ofport = 5 vif_id = uuidutils.generate_uuid() mac = "ca:fe:de:ad:be:ef" # test __init__ port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) self.assertEqual(port.port_name, pname) self.assertEqual(port.ofport, ofport) self.assertEqual(port.vif_id, vif_id) self.assertEqual(port.vif_mac, mac) self.assertEqual(port.switch.br_name, self.BR_NAME) # test __str__ str(port) def test_add_flow(self): ofport = "99" vid = 4000 lsw_id = 18 cidr = '192.168.1.0/24' flow_dict_1 = collections.OrderedDict([ ('cookie', 1234), ('priority', 2), ('dl_src', 'ca:fe:de:ad:be:ef'), ('actions', 'strip_vlan,output:0')]) flow_dict_2 = collections.OrderedDict([ ('cookie', 1254), ('priority', 1), ('actions', 'normal')]) flow_dict_3 = collections.OrderedDict([ ('cookie', 1257), ('priority', 2), ('actions', 'drop')]) flow_dict_4 = collections.OrderedDict([ ('cookie', 1274), ('priority', 2), ('in_port', ofport), ('actions', 'drop')]) flow_dict_5 = collections.OrderedDict([ ('cookie', 1284), ('priority', 4), ('in_port', ofport), ('dl_vlan', vid), ('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))]) flow_dict_6 = collections.OrderedDict([ ('cookie', 1754), ('priority', 3), ('tun_id', lsw_id), ('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))]) flow_dict_7 = collections.OrderedDict([ ('cookie', 1256), ('priority', 4), ('nw_src', cidr), ('proto', 'arp'), ('actions', 'drop')]) self.br.add_flow(**flow_dict_1) self.br.add_flow(**flow_dict_2) self.br.add_flow(**flow_dict_3) self.br.add_flow(**flow_dict_4) self.br.add_flow(**flow_dict_5) self.br.add_flow(**flow_dict_6) self.br.add_flow(**flow_dict_7) expected_calls = [ self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1234," "priority=2,dl_src=ca:fe:de:ad:be:ef," "actions=strip_vlan,output:0")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1254," "priority=1,actions=normal")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1257," "priority=2,actions=drop")), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1274," "priority=2,in_port=%s,actions=drop" % ofport )), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1284," "priority=4,dl_vlan=%s,in_port=%s," "actions=strip_vlan,set_tunnel:%s,normal" % (vid, ofport, lsw_id))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1754," "priority=3," "tun_id=%s,actions=mod_vlan_vid:%s,output:%s" % (lsw_id, vid, ofport))), self._ofctl_mock("add-flows", self.BR_NAME, '-', process_input=OFCTLParamListMatcher( "hard_timeout=0,idle_timeout=0,cookie=1256," "priority=4,nw_src=%s,arp,actions=drop" % cidr)), ] self.execute.assert_has_calls(expected_calls) def _ofctl_args(self, cmd, *args): cmd = ['ovs-ofctl', cmd, '-O', self.br._highest_protocol_needed] cmd += args return cmd def _ofctl_mock(self, cmd, *args, **kwargs): cmd = self._ofctl_args(cmd, *args) return mock.call(cmd, run_as_root=True, **kwargs) def _verify_ofctl_mock(self, cmd, *args, **kwargs): cmd = self._ofctl_args(cmd, *args) return self.execute.assert_called_once_with(cmd, run_as_root=True, **kwargs) def test_add_flow_timeout_set(self): flow_dict = collections.OrderedDict([ ('cookie', 1234), ('priority', 1), ('hard_timeout', 1000), ('idle_timeout', 2000), ('actions', 'normal')]) self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', process_input="hard_timeout=1000,idle_timeout=2000," "priority=1,cookie=1234,actions=normal") def test_add_flow_default_priority(self): flow_dict = collections.OrderedDict([('actions', 'normal'), ('cookie', 1234)]) self.br.add_flow(**flow_dict) self._verify_ofctl_mock( "add-flows", self.BR_NAME, '-', process_input="hard_timeout=0,idle_timeout=0,priority=1," "cookie=1234,actions=normal") def test_default_datapath(self): # verify kernel datapath is default expected = p_const.OVS_DATAPATH_SYSTEM self.assertEqual(expected, self.br.datapath_type) def test_non_default_datapath(self): expected = p_const.OVS_DATAPATH_NETDEV self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected) br2 = self.br.add_bridge('another-br', datapath_type=expected) self.assertEqual(expected, self.br.datapath_type) self.assertEqual(expected, br2.datapath_type) def test_count_flows(self): self.execute.return_value = 'ignore\nflow-1\n' # counts the number of flows as total lines of output - 2 self.assertEqual(self.br.count_flows(), 1) self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None) def test_delete_flow(self): ofport = 5 lsw_id = 40 vid = 39 self.br.delete_flows(in_port=ofport) self.br.delete_flows(tun_id=lsw_id) self.br.delete_flows(dl_vlan=vid) self.br.delete_flows() cookie_spec = "cookie=%s/-1" % self.br._default_cookie expected_calls = [ self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input=StringSetMatcher( "%s,in_port=%d" % (cookie_spec, ofport))), self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input=StringSetMatcher( "%s,tun_id=%s" % (cookie_spec, lsw_id))), self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input=StringSetMatcher( "%s,dl_vlan=%s" % (cookie_spec, vid))), self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="%s" % cookie_spec), ] self.execute.assert_has_calls(expected_calls) def test_delete_flows_cookie_nomask(self): self.br.delete_flows(cookie=42) self.execute.assert_has_calls([ self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="cookie=42/-1"), ]) def test_do_action_flows_delete_flows(self): # test what the deferred bridge implementation calls, in the case of a # delete_flows(cookie=ovs_lib.COOKIE_ANY) among calls to # delete_flows(foo=bar) self.br.do_action_flows('del', [{'in_port': 5}, {'cookie': ovs_lib.COOKIE_ANY}]) expected_calls = [ self._ofctl_mock("del-flows", self.BR_NAME, process_input=None), ] self.execute.assert_has_calls(expected_calls) def test_delete_flows_any_cookie(self): self.br.delete_flows(in_port=5, cookie=ovs_lib.COOKIE_ANY) self.br.delete_flows(cookie=ovs_lib.COOKIE_ANY) expected_calls = [ self._ofctl_mock("del-flows", self.BR_NAME, '-', process_input="in_port=5"), self._ofctl_mock("del-flows", self.BR_NAME, process_input=None), ] self.execute.assert_has_calls(expected_calls) def test_mod_delete_flows_strict(self): self.br.delete_flows(in_port=5, priority=1, strict=True) self.br.mod_flow(in_port=5, priority=1, strict=True, actions='drop') cookie_spec = "cookie=%s" % self.br._default_cookie expected_calls = [ self._ofctl_mock("del-flows", self.BR_NAME, '--strict', '-', process_input=StringSetMatcher( "%s/-1,in_port=5,priority=1" % cookie_spec)), self._ofctl_mock("mod-flows", self.BR_NAME, '--strict', '-', process_input=StringSetMatcher( "%s,in_port=5,priority=1,actions=drop" % cookie_spec)), ] self.execute.assert_has_calls(expected_calls) def test_mod_delete_flows_priority_without_strict(self): self.assertRaises(exceptions.InvalidInput, self.br.delete_flows, in_port=5, priority=1) def test_mod_delete_flows_mixed_strict(self): deferred_br = self.br.deferred() deferred_br.delete_flows(in_port=5) deferred_br.delete_flows(in_port=5, priority=1, strict=True) self.assertRaises(exceptions.InvalidInput, deferred_br.apply_flows) def test_dump_flows(self): table = 23 nxst_flow = "NXST_FLOW reply (xid=0x4):" flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, " "n_packets=6, n_bytes=468, " "priority=2,in_port=1 actions=drop", " cookie=0x0, duration=18027.562s, table=0, " "n_packets=0, n_bytes=0, " "priority=3,in_port=1,dl_vlan=100 " "actions=mod_vlan_vid:1,NORMAL", " cookie=0x0, duration=18044.351s, table=0, " "n_packets=9, n_bytes=594, priority=1 " "actions=NORMAL", " cookie=0x0, " "duration=18044.211s, table=23, n_packets=0, " "n_bytes=0, priority=0 actions=drop"]) flow_args = '\n'.join([nxst_flow, flows]) run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() run_ofctl.side_effect = [flow_args] retflows = self.br.dump_flows_for_table(table) self.assertEqual(flows, retflows) def test_dump_flows_ovs_dead(self): table = 23 run_ofctl = mock.patch.object(self.br, 'run_ofctl').start() run_ofctl.side_effect = [''] retflows = self.br.dump_flows_for_table(table) self.assertIsNone(retflows) def test_mod_flow_with_priority_set(self): params = {'in_port': '1', 'priority': '1'} self.assertRaises(exceptions.InvalidInput, self.br.mod_flow, **params) def test_mod_flow_no_actions_set(self): params = {'in_port': '1'} self.assertRaises(exceptions.InvalidInput, self.br.mod_flow, **params) def test_run_ofctl_retry_on_socket_error(self): err = RuntimeError('failed to connect to socket') self.execute.side_effect = [err] * 5 with mock.patch('time.sleep') as sleep: self.br.run_ofctl('add-flows', []) self.assertEqual(5, sleep.call_count) self.assertEqual(6, self.execute.call_count) # a regular exception fails right away self.execute.side_effect = RuntimeError('garbage') self.execute.reset_mock() with mock.patch('time.sleep') as sleep: self.br.run_ofctl('add-flows', []) self.assertEqual(0, sleep.call_count) self.assertEqual(1, self.execute.call_count) def _encode_ovs_json(self, headings, data): # See man ovs-vsctl(8) for the encoding details. r = {"data": [], "headings": headings} for row in data: ovs_row = [] r["data"].append(ovs_row) for cell in row: if isinstance(cell, (str, int, list)): ovs_row.append(cell) elif isinstance(cell, dict): ovs_row.append(["map", cell.items()]) elif isinstance(cell, set): ovs_row.append(["set", cell]) else: raise TypeError('%r not int, str, list, set or dict' % type(cell)) return jsonutils.dumps(r) def test_get_vif_ports(self): pname = "tap99" ofport = 6 vif_id = uuidutils.generate_uuid() mac = "ca:fe:de:ad:be:ef" id_field = 'iface-id' external_ids = {"attached-mac": mac, id_field: vif_id} self.br.get_ports_attributes = mock.Mock(return_value=[{ 'name': pname, 'ofport': ofport, 'external_ids': external_ids}]) ports = self.br.get_vif_ports() self.assertEqual(1, len(ports)) self.assertEqual(ports[0].port_name, pname) self.assertEqual(ports[0].ofport, ofport) self.assertEqual(ports[0].vif_id, vif_id) self.assertEqual(ports[0].vif_mac, mac) self.assertEqual(ports[0].switch.br_name, self.BR_NAME) self.br.get_ports_attributes.assert_called_once_with( 'Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True) def test_delete_all_ports(self): with mock.patch.object(self.br, 'get_port_name_list', return_value=['port1']) as get_port: with mock.patch.object(self.br, 'delete_port') as delete_port: self.br.delete_ports(all_ports=True) get_port.assert_called_once_with() delete_port.assert_called_once_with('port1') def test_delete_neutron_ports(self): port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(), 'ca:fe:de:ad:be:ef', 'br') port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(), 'ca:ee:de:ad:be:ef', 'br') with mock.patch.object(self.br, 'get_vif_ports', return_value=[port1, port2]) as get_ports: with mock.patch.object(self.br, 'delete_port') as delete_port: self.br.delete_ports(all_ports=False) get_ports.assert_called_once_with() delete_port.assert_has_calls([ mock.call('tap1234'), mock.call('tap5678') ]) def test_get_local_port_mac_succeeds(self): with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', return_value=mock.Mock(address='foo')): self.assertEqual('foo', self.br.get_local_port_mac()) def test_get_local_port_mac_raises_exception_for_missing_mac(self): with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand', return_value=mock.Mock(address=None)): with testtools.ExpectedException(Exception): self.br.get_local_port_mac() def test_delete_egress_bw_limit_for_port(self): with mock.patch.object( self.br, "_set_egress_bw_limit_for_port" ) as set_egress_mock, mock.patch.object( self.br, "port_exists", return_value=True ) as port_exists_mock: self.br.delete_egress_bw_limit_for_port("test_port") port_exists_mock.assert_called_once_with("test_port") set_egress_mock.assert_called_once_with("test_port", 0, 0) def test_delete_egress_bw_limit_for_port_port_not_exists(self): with mock.patch.object( self.br, "_set_egress_bw_limit_for_port" ) as set_egress_mock, mock.patch.object( self.br, "port_exists", return_value=False ) as port_exists_mock: self.br.delete_egress_bw_limit_for_port("test_port") port_exists_mock.assert_called_once_with("test_port") set_egress_mock.assert_not_called() def test_get_vifs_by_ids(self): db_list_res = [ {'name': 'qvo1', 'ofport': 1, 'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}}, {'name': 'qvo2', 'ofport': 2, 'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}}, {'name': 'qvo4', 'ofport': -1, 'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}}, ] self.br.get_ports_attributes = mock.Mock(return_value=db_list_res) self.br.ovsdb = mock.Mock() self.br.ovsdb.list_ports.return_value.execute.return_value = [ 'qvo1', 'qvo2', 'qvo4'] by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4']) # pid3 isn't on bridge and pid4 doesn't have a valid ofport self.assertIsNone(by_id['pid3']) self.assertIsNone(by_id['pid4']) self.assertEqual('pid1', by_id['pid1'].vif_id) self.assertEqual('qvo1', by_id['pid1'].port_name) self.assertEqual(1, by_id['pid1'].ofport) self.assertEqual('pid2', by_id['pid2'].vif_id) self.assertEqual('qvo2', by_id['pid2'].port_name) self.assertEqual(2, by_id['pid2'].ofport) self.br.get_ports_attributes.assert_has_calls( [mock.call('Interface', columns=['name', 'external_ids', 'ofport'], if_exists=True)]) def test_get_port_ofport_retry(self): with mock.patch.object( self.br, 'db_get_val', side_effect=[[], [], [], [], 1]): self.assertEqual(1, self.br._get_port_val('1', 'ofport')) def test_get_port_ofport_retry_fails(self): # reduce timeout for faster execution self.br.ovsdb_timeout = 1 # after 7 calls the retry will timeout and raise with mock.patch.object( self.br, 'db_get_val', side_effect=[[] for _ in range(7)]): self.assertRaises(tenacity.RetryError, self.br._get_port_val, '1', 'ofport') def test_set_controller_rate_limit(self): with mock.patch.object( self.br, "set_controller_field" ) as set_ctrl_field_mock: self.br.set_controller_rate_limit(200) set_ctrl_field_mock.assert_called_once_with( 'controller_rate_limit', 200) def test_set_controller_rate_limit_with_value_less_than_min(self): with mock.patch.object( self.br, "set_controller_field" ) as set_ctrl_field_mock: self.br.set_controller_rate_limit(50) set_ctrl_field_mock.assert_called_once_with( 'controller_rate_limit', ovs_lib.CTRL_RATE_LIMIT_MIN) def test_set_controller_burst_limit(self): with mock.patch.object( self.br, "set_controller_field" ) as set_ctrl_field_mock: self.br.set_controller_burst_limit(100) set_ctrl_field_mock.assert_called_once_with( 'controller_burst_limit', 100) def test_set_controller_burst_limit_with_value_less_than_min(self): with mock.patch.object( self.br, "set_controller_field" ) as set_ctrl_field_mock: self.br.set_controller_burst_limit(10) set_ctrl_field_mock.assert_called_once_with( 'controller_burst_limit', ovs_lib.CTRL_BURST_LIMIT_MIN) def test_hw_offload_enabled_false(self): config_mock1 = mock.PropertyMock(return_value={"other_config": {}}) config_mock2 = mock.PropertyMock( return_value={"other_config": {"hw-offload": "false"}}) config_mock3 = mock.PropertyMock( return_value={"other_config": {"hw-offload": "False"}}) for config_mock in (config_mock1, config_mock2, config_mock3): with mock.patch("neutron.agent.common.ovs_lib.OVSBridge.config", new_callable=config_mock): self.assertFalse(self.br.is_hw_offload_enabled) def test_hw_offload_enabled_true(self): config_mock1 = mock.PropertyMock( return_value={"other_config": {"hw-offload": "true"}}) config_mock2 = mock.PropertyMock( return_value={"other_config": {"hw-offload": "True"}}) for config_mock in (config_mock1, config_mock2): with mock.patch("neutron.agent.common.ovs_lib.OVSBridge.config", new_callable=config_mock): self.assertTrue(self.br.is_hw_offload_enabled) class TestDeferredOVSBridge(base.BaseTestCase): def setUp(self): super(TestDeferredOVSBridge, self).setUp() self.br = mock.Mock() self.mocked_do_action_flows = mock.patch.object( self.br, 'do_action_flows').start() self.add_flow_dict1 = dict(in_port=11, actions='drop') self.add_flow_dict2 = dict(in_port=12, actions='drop') self.mod_flow_dict1 = dict(in_port=21, actions='drop') self.mod_flow_dict2 = dict(in_port=22, actions='drop') self.del_flow_dict1 = dict(in_port=31) self.del_flow_dict2 = dict(in_port=32) def test_right_allowed_passthroughs(self): expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port') self.assertEqual(expected_passthroughs, ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS) def _verify_mock_call(self, expected_calls): self.mocked_do_action_flows.assert_has_calls(expected_calls) self.assertEqual(len(expected_calls), len(self.mocked_do_action_flows.mock_calls)) def test_apply_on_exit(self): expected_calls = [ mock.call('add', [self.add_flow_dict1], False), mock.call('mod', [self.mod_flow_dict1], False), mock.call('del', [self.del_flow_dict1], False), ] with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) self._verify_mock_call([]) self._verify_mock_call(expected_calls) def test_apply_on_exit_with_errors(self): try: with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) raise Exception() except Exception: self._verify_mock_call([]) else: self.fail('Exception would be reraised') def test_apply(self): expected_calls = [ mock.call('add', [self.add_flow_dict1], False), mock.call('mod', [self.mod_flow_dict1], False), mock.call('del', [self.del_flow_dict1], False), ] with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) self._verify_mock_call([]) deferred_br.apply_flows() self._verify_mock_call(expected_calls) self._verify_mock_call(expected_calls) def test_apply_order(self): expected_calls = [ mock.call( 'del', [self.del_flow_dict1, self.del_flow_dict2], False), mock.call( 'mod', [self.mod_flow_dict1, self.mod_flow_dict2], False), mock.call( 'add', [self.add_flow_dict1, self.add_flow_dict2], False), ] order = 'del', 'mod', 'add' with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict2) deferred_br.add_flow(**self.add_flow_dict2) deferred_br.mod_flow(**self.mod_flow_dict2) self._verify_mock_call(expected_calls) def test_apply_full_ordered(self): expected_calls = [ mock.call('add', [self.add_flow_dict1], False), mock.call('mod', [self.mod_flow_dict1], False), mock.call( 'del', [self.del_flow_dict1, self.del_flow_dict2], False), mock.call('add', [self.add_flow_dict2], False), mock.call('mod', [self.mod_flow_dict2], False), ] with ovs_lib.DeferredOVSBridge(self.br, full_ordered=True) as deferred_br: deferred_br.add_flow(**self.add_flow_dict1) deferred_br.mod_flow(**self.mod_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict1) deferred_br.delete_flows(**self.del_flow_dict2) deferred_br.add_flow(**self.add_flow_dict2) deferred_br.mod_flow(**self.mod_flow_dict2) self._verify_mock_call(expected_calls) def test_getattr_unallowed_attr(self): with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: self.assertEqual(self.br.add_port, deferred_br.add_port) def test_getattr_unallowed_attr_failure(self): with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: self.assertRaises(AttributeError, getattr, deferred_br, 'failure') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_ovsdb_monitor.py0000644000175000017500000001053200000000000027642 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from neutron.agent.common import async_process from neutron.agent.common import ovs_lib from neutron.agent.common import ovsdb_monitor from neutron.agent.ovsdb.native import helpers from neutron.tests import base class TestOvsdbMonitor(base.BaseTestCase): def setUp(self): super(TestOvsdbMonitor, self).setUp() mock.patch.object(helpers, 'enable_connection_uri').start() def test___init__(self): ovsdb_monitor.OvsdbMonitor('Interface') @mock.patch.object(async_process.AsyncProcess, '__init__') def test___init___with_columns(self, init): columns = ['col1', 'col2'] ovsdb_monitor.OvsdbMonitor('Interface', columns=columns) cmd = init.call_args_list[0][0][0] self.assertEqual('col1,col2', cmd[-1]) @mock.patch.object(async_process.AsyncProcess, '__init__') def test___init___with_format(self, init): ovsdb_monitor.OvsdbMonitor('Interface', format='blob') cmd = init.call_args_list[0][0][0] self.assertEqual('--format=blob', cmd[-1]) @mock.patch.object(async_process.AsyncProcess, '__init__') def test__init__with_connection_columns(self, init): conn_info = 'tcp:10.10.10.10:6640' columns = ['col1', 'col2'] ovsdb_monitor.OvsdbMonitor('Interface', columns=columns, ovsdb_connection=conn_info) cmd_all = init.call_args_list[0][0][0] cmd_expect = ['ovsdb-client', 'monitor', 'tcp:10.10.10.10:6640', 'Interface', 'col1,col2'] self.assertEqual(cmd_expect, cmd_all) class TestSimpleInterfaceMonitor(base.BaseTestCase): def setUp(self): super(TestSimpleInterfaceMonitor, self).setUp() self.monitor = ovsdb_monitor.SimpleInterfaceMonitor() def test_has_updates_is_false_if_active_with_no_output(self): with mock.patch.object(self.monitor, 'is_active', return_value=True): self.assertFalse(self.monitor.has_updates) def test_has_updates_after_calling_get_events_is_false(self): with mock.patch.object( self.monitor, 'process_events') as process_events: self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1'], 'modified': []} self.assertTrue(self.monitor.has_updates) self.monitor.get_events() self.assertTrue(process_events.called) self.assertFalse(self.monitor.has_updates) def _get_event(self, ovs_id='e040fbec-0579-4990-8324-d338da33ae88', action="insert", name="fake_dev", ofport=10, external_ids=None, as_string=True): event = {"data": [[ovs_id, action, name, ["set", [ofport]], ["map", external_ids or []]]]} if as_string: event = jsonutils.dumps(event) return event def process_event_unassigned_of_port(self): output = self._get_event() with mock.patch.object( self.monitor, 'iter_stdout', return_value=[output]): self.monitor.process_events() self.assertEqual(self.monitor.new_events['added'][0]['ofport'], ovs_lib.UNASSIGNED_OFPORT) def test_process_changed_of_port(self): event0 = self._get_event(action="old", ofport=-1) event1 = self._get_event(action="new", ofport=10) expected_dev = { 'name': 'fake_dev', 'ofport': [10], 'external_ids': {} } with mock.patch.object( self.monitor, 'iter_stdout', return_value=[event0, event1]): self.monitor.process_events() self.assertIn(expected_dev, self.monitor.new_events['modified']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_placement_report.py0000644000175000017500000002133000000000000030317 0ustar00coreycorey00000000000000# Copyright 2018 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import mock from neutron.agent.common import placement_report from neutron.tests import base class DeferredCallTestCase(base.BaseTestCase): def test_defer_not_called(self): func = mock.Mock() placement_report.DeferredCall(func) func.assert_not_called() def test_execute(self): func = mock.Mock() deferred = placement_report.DeferredCall( func, 'some arg', kwarg='some kwarg') deferred.execute() func.assert_called_once_with('some arg', kwarg='some kwarg') def test___str__(self): def func(): pass deferred = placement_report.DeferredCall(func, 42, foo='bar') self.assertEqual("func(42, foo='bar')", str(deferred)) class PlacementStateTestCase(base.BaseTestCase): def setUp(self): super(PlacementStateTestCase, self).setUp() self.client_mock = mock.Mock() self.driver_uuid_namespace = uuid.UUID( '00000000-0000-0000-0000-000000000001') # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' self.hypervisor1_rp_uuid = uuid.UUID( 'c0b4abe5-516f-54b8-b965-ff94060dcbcc') # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost2' self.hypervisor2_rp_uuid = uuid.UUID( '544155b7-1295-5f10-b5f0-eadc50abc6d4') self.kwargs = { 'rp_bandwidths': {}, 'rp_inventory_defaults': {}, 'driver_uuid_namespace': self.driver_uuid_namespace, 'agent_type': 'fake agent type', 'hypervisor_rps': { 'eth0': {'name': 'fakehost', 'uuid': self.hypervisor1_rp_uuid}, 'eth1': {'name': 'fakehost', 'uuid': self.hypervisor1_rp_uuid}, }, 'device_mappings': {}, 'supported_vnic_types': [], 'client': self.client_mock, } def test__deferred_update_physnet_traits(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], 'physnet1': ['eth1'], }, 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_update_physnet_traits(): deferred.execute() self.client_mock.update_trait.assert_called_with( name='CUSTOM_PHYSNET_PHYSNET0') def test__deferred_update_vnic_type_traits(self): self.kwargs.update({ 'supported_vnic_types': ['direct'], }) state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_update_vnic_type_traits(): deferred.execute() self.client_mock.update_trait.assert_any_call( name='CUSTOM_VNIC_TYPE_DIRECT') def test__deferred_create_agent_rps(self): state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_create_agent_rps(): deferred.execute() self.client_mock.ensure_resource_provider.assert_called_with( resource_provider={ 'name': 'fakehost:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' 'uuid': uuid.UUID('c0b4abe5-516f-54b8-b965-ff94060dcbcc'), 'parent_provider_uuid': self.hypervisor1_rp_uuid}) def test__deferred_create_agent_rps_multiple_hypervisors(self): self.kwargs['hypervisor_rps']['eth1'] = { 'name': 'fakehost2', 'uuid': self.hypervisor2_rp_uuid, } state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_create_agent_rps(): deferred.execute() self.client_mock.ensure_resource_provider.assert_has_calls( any_order=True, calls=[ mock.call(resource_provider={ 'name': 'fakehost:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost' 'uuid': uuid.UUID('c0b4abe5-516f-54b8-b965-ff94060dcbcc'), 'parent_provider_uuid': self.hypervisor1_rp_uuid}), mock.call(resource_provider={ 'name': 'fakehost2:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost2' 'uuid': uuid.UUID('544155b7-1295-5f10-b5f0-eadc50abc6d4'), 'parent_provider_uuid': self.hypervisor2_rp_uuid}), ] ) def test_deferred_create_resource_providers(self): self.kwargs.update({ 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_create_resource_providers(): deferred.execute() self.client_mock.ensure_resource_provider.assert_called_with( {'name': 'fakehost:fake agent type:eth0', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' # 'fakehost:eth0' 'uuid': uuid.UUID('1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' 'parent_provider_uuid': uuid.UUID( 'c0b4abe5-516f-54b8-b965-ff94060dcbcc')}) def test_deferred_update_resource_provider_traits(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], }, 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, 'supported_vnic_types': ['normal'], }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_update_resource_provider_traits(): deferred.execute() self.client_mock.update_resource_provider_traits.assert_called() self.assertEqual( # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost:eth0' uuid.UUID('1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), self.client_mock.update_resource_provider_traits.call_args[1][ 'resource_provider_uuid']) # NOTE(bence romsics): To avoid testing the _order_ of traits. self.assertEqual( set(['CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL']), set(self.client_mock.update_resource_provider_traits.call_args[1][ 'traits'])) def test_deferred_update_resource_provider_inventories(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], }, 'rp_bandwidths': { 'eth0': {'egress': 100, 'ingress': None}, }, 'rp_inventory_defaults': { 'step_size': 10, 'max_unit': 50, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_update_resource_provider_inventories(): deferred.execute() self.client_mock.\ update_resource_provider_inventories.assert_called_with( # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost:eth0' resource_provider_uuid=uuid.UUID( '1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), inventories={ 'NET_BW_EGR_KILOBIT_PER_SEC': { 'total': 100, 'step_size': 10, 'max_unit': 50}}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_polling.py0000644000175000017500000001132200000000000026420 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import base_polling from neutron.agent.common import polling from neutron.agent.ovsdb.native import helpers from neutron.tests import base class TestBasePollingManager(base.BaseTestCase): def setUp(self): super(TestBasePollingManager, self).setUp() self.pm = base_polling.BasePollingManager() def test__is_polling_required_should_not_be_implemented(self): self.assertRaises(NotImplementedError, self.pm._is_polling_required) def test_force_polling_sets_interval_attribute(self): self.assertFalse(self.pm._force_polling) self.pm.force_polling() self.assertTrue(self.pm._force_polling) def test_polling_completed_sets_interval_attribute(self): self.pm._polling_completed = False self.pm.polling_completed() self.assertTrue(self.pm._polling_completed) def mock_is_polling_required(self, return_value): return mock.patch.object(self.pm, '_is_polling_required', return_value=return_value) def test_is_polling_required_returns_true_when_forced(self): with self.mock_is_polling_required(False): self.pm.force_polling() self.assertTrue(self.pm.is_polling_required) self.assertFalse(self.pm._force_polling) def test_is_polling_required_returns_true_when_polling_not_completed(self): with self.mock_is_polling_required(False): self.pm._polling_completed = False self.assertTrue(self.pm.is_polling_required) def test_is_polling_required_returns_true_when_updates_are_present(self): with self.mock_is_polling_required(True): self.assertTrue(self.pm.is_polling_required) self.assertFalse(self.pm._polling_completed) def test_is_polling_required_returns_false_for_no_updates(self): with self.mock_is_polling_required(False): self.assertFalse(self.pm.is_polling_required) class TestAlwaysPoll(base.BaseTestCase): def test_is_polling_required_always_returns_true(self): pm = base_polling.AlwaysPoll() self.assertTrue(pm.is_polling_required) class TestGetPollingManager(base.BaseTestCase): def setUp(self): super(TestGetPollingManager, self).setUp() mock.patch.object(helpers, 'enable_connection_uri').start() def test_return_always_poll_by_default(self): with polling.get_polling_manager() as pm: self.assertEqual(pm.__class__, base_polling.AlwaysPoll) def test_manage_polling_minimizer(self): mock_target = 'neutron.agent.common.polling.InterfacePollingMinimizer' with mock.patch('%s.start' % mock_target) as mock_start: with mock.patch('%s.stop' % mock_target) as mock_stop: with polling.get_polling_manager(minimize_polling=True) as pm: self.assertEqual(pm.__class__, polling.InterfacePollingMinimizer) mock_stop.assert_has_calls([mock.call()]) mock_start.assert_has_calls([mock.call()]) class TestInterfacePollingMinimizer(base.BaseTestCase): def setUp(self): super(TestInterfacePollingMinimizer, self).setUp() mock.patch.object(helpers, 'enable_connection_uri').start() self.pm = polling.InterfacePollingMinimizer() def test_start_calls_monitor_start(self): with mock.patch.object(self.pm._monitor, 'start') as mock_start: self.pm.start() mock_start.assert_called_with(block=True) def test_stop_calls_monitor_stop(self): with mock.patch.object(self.pm._monitor, 'stop') as mock_stop: self.pm.stop() mock_stop.assert_called_with() def mock_has_updates(self, return_value): target = ('neutron.agent.common.ovsdb_monitor.SimpleInterfaceMonitor' '.has_updates') return mock.patch( target, new_callable=mock.PropertyMock(return_value=return_value), ) def test__is_polling_required_returns_when_updates_are_present(self): with self.mock_has_updates(True): self.assertTrue(self.pm._is_polling_required()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_resource_processing_queue.py0000644000175000017500000001033000000000000032241 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime from oslo_utils import uuidutils from neutron.agent.common import resource_processing_queue as queue from neutron.tests import base _uuid = uuidutils.generate_uuid FAKE_ID = _uuid() FAKE_ID_2 = _uuid() PRIORITY_RPC = 0 class TestExclusiveResourceProcessor(base.BaseTestCase): def test_i_am_master(self): master = queue.ExclusiveResourceProcessor(FAKE_ID) not_master = queue.ExclusiveResourceProcessor(FAKE_ID) master_2 = queue.ExclusiveResourceProcessor(FAKE_ID_2) not_master_2 = queue.ExclusiveResourceProcessor(FAKE_ID_2) self.assertTrue(master._i_am_master()) self.assertFalse(not_master._i_am_master()) self.assertTrue(master_2._i_am_master()) self.assertFalse(not_master_2._i_am_master()) master.__exit__(None, None, None) master_2.__exit__(None, None, None) def test_master(self): master = queue.ExclusiveResourceProcessor(FAKE_ID) not_master = queue.ExclusiveResourceProcessor(FAKE_ID) master_2 = queue.ExclusiveResourceProcessor(FAKE_ID_2) not_master_2 = queue.ExclusiveResourceProcessor(FAKE_ID_2) self.assertEqual(master, master._master) self.assertEqual(master, not_master._master) self.assertEqual(master_2, master_2._master) self.assertEqual(master_2, not_master_2._master) master.__exit__(None, None, None) master_2.__exit__(None, None, None) def test__enter__(self): self.assertNotIn(FAKE_ID, queue.ExclusiveResourceProcessor._masters) master = queue.ExclusiveResourceProcessor(FAKE_ID) master.__enter__() self.assertIn(FAKE_ID, queue.ExclusiveResourceProcessor._masters) master.__exit__(None, None, None) def test__exit__(self): master = queue.ExclusiveResourceProcessor(FAKE_ID) not_master = queue.ExclusiveResourceProcessor(FAKE_ID) master.__enter__() self.assertIn(FAKE_ID, queue.ExclusiveResourceProcessor._masters) not_master.__enter__() not_master.__exit__(None, None, None) self.assertIn(FAKE_ID, queue.ExclusiveResourceProcessor._masters) master.__exit__(None, None, None) self.assertNotIn(FAKE_ID, queue.ExclusiveResourceProcessor._masters) def test_data_fetched_since(self): master = queue.ExclusiveResourceProcessor(FAKE_ID) self.assertEqual(datetime.datetime.min, master._get_resource_data_timestamp()) ts1 = datetime.datetime.utcnow() - datetime.timedelta(seconds=10) ts2 = datetime.datetime.utcnow() master.fetched_and_processed(ts2) self.assertEqual(ts2, master._get_resource_data_timestamp()) master.fetched_and_processed(ts1) self.assertEqual(ts2, master._get_resource_data_timestamp()) master.__exit__(None, None, None) def test_updates(self): master = queue.ExclusiveResourceProcessor(FAKE_ID) not_master = queue.ExclusiveResourceProcessor(FAKE_ID) master.queue_update(queue.ResourceUpdate(FAKE_ID, 0)) not_master.queue_update(queue.ResourceUpdate(FAKE_ID, 0)) for update in not_master.updates(): raise Exception("Only the master should process a resource") self.assertEqual(2, len([i for i in master.updates()])) def test_hit_retry_limit(self): tries = 1 rpqueue = queue.ResourceProcessingQueue() update = queue.ResourceUpdate(FAKE_ID, PRIORITY_RPC, tries=tries) rpqueue.add(update) self.assertFalse(update.hit_retry_limit()) rpqueue.add(update) self.assertTrue(update.hit_retry_limit()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/common/test_utils.py0000644000175000017500000000770400000000000026125 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock from neutron.agent.common import utils from neutron.agent.linux import interface from neutron.conf.agent import common as config from neutron.tests import base from neutron.tests.unit import testlib_api class TestLoadInterfaceDriver(base.BaseTestCase): def setUp(self): super(TestLoadInterfaceDriver, self).setUp() self.conf = config.setup_conf() config.register_interface_opts(self.conf) config.register_interface_driver_opts_helper(self.conf) def test_load_interface_driver_not_set(self): with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) def test_load_interface_driver_wrong_driver(self): self.conf.set_override('interface_driver', 'neutron.NonExistentDriver') with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) def test_load_interface_driver_does_not_consume_irrelevant_errors(self): self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') with mock.patch('oslo_utils.importutils.import_class', side_effect=RuntimeError()): with testlib_api.ExpectedException(RuntimeError): utils.load_interface_driver(self.conf) def test_load_interface_driver_success(self): self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.NullDriver) def test_load_null_interface_driver_success(self): self.conf.set_override('interface_driver', 'null') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.NullDriver) def test_load_linuxbridge_interface_driver_success(self): self.conf.set_override('interface_driver', 'linuxbridge') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.BridgeInterfaceDriver) def test_load_ovs_interface_driver_success(self): self.conf.set_override('interface_driver', 'openvswitch') self.assertIsInstance(utils.load_interface_driver(self.conf), interface.OVSInterfaceDriver) def test_load_interface_driver_as_alias_wrong_driver(self): self.conf.set_override('interface_driver', 'openvswitchXX') with testlib_api.ExpectedException(SystemExit): utils.load_interface_driver(self.conf) # TODO(bence romsics): rehome this to neutron_lib class TestDefaultRpHypervisors(base.BaseTestCase): def test_defaults(self): this_host = socket.gethostname() self.assertEqual( {'eth0': this_host, 'eth1': this_host}, utils.default_rp_hypervisors( hypervisors={}, device_mappings={'physnet0': ['eth0', 'eth1']}, ) ) self.assertEqual( {'eth0': 'thathost', 'eth1': this_host}, utils.default_rp_hypervisors( hypervisors={'eth0': 'thathost'}, device_mappings={'physnet0': ['eth0', 'eth1']}, ) ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4150455 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/dhcp/0000755000175000017500000000000000000000000022772 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/dhcp/__init__.py0000644000175000017500000000000000000000000025071 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/dhcp/test_agent.py0000644000175000017500000031744500000000000025517 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import sys import uuid import eventlet import mock from neutron_lib.agent import constants as agent_consts from neutron_lib import constants as const from neutron_lib import exceptions from oslo_config import cfg import oslo_messaging from oslo_utils import netutils from oslo_utils import timeutils import testtools from neutron.agent.dhcp import agent as dhcp_agent from neutron.agent import dhcp_agent as entry from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.agent.metadata import driver as metadata_driver from neutron.common import config as common_config from neutron.common import utils from neutron.conf.agent import common as config from neutron.conf.agent import dhcp as dhcp_config from neutron.tests import base HOSTNAME = 'hostname' dev_man = dhcp.DeviceManager rpc_api = dhcp_agent.DhcpPluginApi DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__) DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__) FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab' FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID FAKE_TENANT_ID = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_PRIORITY = 6 fake_subnet1_allocation_pools = dhcp.DictModel(id='', start='172.9.9.2', end='172.9.9.254') fake_subnet1 = dhcp.DictModel(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb', network_id=FAKE_NETWORK_UUID, cidr='172.9.9.0/24', enable_dhcp=True, name='', tenant_id=FAKE_TENANT_ID, gateway_ip='172.9.9.1', host_routes=[], dns_nameservers=[], ip_version=const.IP_VERSION_4, ipv6_ra_mode=None, ipv6_address_mode=None, allocation_pools=fake_subnet1_allocation_pools) fake_subnet2_allocation_pools = dhcp.DictModel(id='', start='172.9.8.2', end='172.9.8.254') fake_subnet2 = dhcp.DictModel(id='dddddddd-dddd-dddd-dddddddddddd', network_id=FAKE_NETWORK_UUID, cidr='172.9.8.0/24', enable_dhcp=False, name='', tenant_id=FAKE_TENANT_ID, gateway_ip='172.9.8.1', host_routes=[], dns_nameservers=[], ip_version=const.IP_VERSION_4, allocation_pools=fake_subnet2_allocation_pools) fake_subnet3 = dhcp.DictModel(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id=FAKE_NETWORK_UUID, cidr='192.168.1.1/24', enable_dhcp=True, ip_version=const.IP_VERSION_4) fake_ipv6_subnet = dhcp.DictModel(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id=FAKE_NETWORK_UUID, cidr='2001:0db8::0/64', enable_dhcp=True, tenant_id=FAKE_TENANT_ID, gateway_ip='2001:0db8::1', ip_version=const.IP_VERSION_6, ipv6_ra_mode='slaac', ipv6_address_mode=None) fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb', network_id=FAKE_NETWORK_UUID, cidr='169.254.169.252/30', gateway_ip='169.254.169.253', enable_dhcp=True, ip_version=const.IP_VERSION_4)) fake_fixed_ip1 = dhcp.DictModel(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.9') fake_fixed_ip_subnet2 = dhcp.DictModel(id='', subnet_id=fake_subnet2.id, ip_address='172.9.8.9') fake_fixed_ip2 = dhcp.DictModel(id='', subnet_id=fake_subnet1.id, ip_address='172.9.9.10') fake_fixed_ipv6 = dhcp.DictModel(id='', subnet_id=fake_ipv6_subnet.id, ip_address='2001:db8::a8bb:ccff:fedd:ee99') fake_meta_fixed_ip = dhcp.DictModel(id='', subnet=fake_meta_subnet, ip_address='169.254.169.254') fake_allocation_pool_subnet1 = dhcp.DictModel(id='', start='172.9.9.2', end='172.9.9.254') fake_port1 = dhcp.DictModel(id='12345678-1234-aaaa-1234567890ab', device_id='dhcp-12345678-1234-aaaa-1234567890ab', device_owner='', allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:ff', network_id=FAKE_NETWORK_UUID, fixed_ips=[fake_fixed_ip1]) fake_dhcp_port = dhcp.DictModel( id='12345678-1234-aaaa-123456789022', device_id='dhcp-12345678-1234-aaaa-123456789022', device_owner=const.DEVICE_OWNER_DHCP, allocation_pools=fake_subnet1_allocation_pools, mac_address='aa:bb:cc:dd:ee:22', network_id=FAKE_NETWORK_UUID, fixed_ips=[fake_fixed_ip2]) fake_port2 = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', device_id='dhcp-12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id=FAKE_NETWORK_UUID, revision_number=77, fixed_ips=[fake_fixed_ip2]) fake_ipv6_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', device_owner='', mac_address='aa:bb:cc:dd:ee:99', network_id=FAKE_NETWORK_UUID, fixed_ips=[fake_fixed_ipv6]) fake_meta_port = dhcp.DictModel(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id=FAKE_NETWORK_UUID, device_owner=const.DEVICE_OWNER_ROUTER_INTF, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip]) fake_meta_dvr_port = dhcp.DictModel(fake_meta_port) fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE fake_dist_port = dhcp.DictModel(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id=FAKE_NETWORK_UUID, device_owner=const.DEVICE_OWNER_DVR_INTERFACE, device_id='forzanapoli', fixed_ips=[fake_meta_fixed_ip]) fake_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_subnet1, fake_subnet2], ports=[fake_port1]) fake_network_ipv6 = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_ipv6_subnet], ports=[fake_ipv6_port]) fake_network_ipv6_ipv4 = dhcp.NetModel( id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_ipv6_subnet, fake_subnet1], ports=[fake_port1]) isolated_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1]) nonisolated_dist_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_subnet1], ports=[fake_port1, fake_port2]) empty_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_subnet1], ports=[]) fake_meta_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port]) fake_meta_dvr_network = dhcp.NetModel(fake_meta_network) fake_meta_dvr_network.ports = [fake_meta_dvr_port] fake_dist_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[fake_meta_subnet], ports=[fake_meta_port, fake_dist_port]) fake_down_network = dhcp.NetModel(id='12345678-dddd-dddd-1234567890ab', tenant_id=FAKE_TENANT_ID, admin_state_up=False, subnets=[], ports=[]) class TestDhcpAgent(base.BaseTestCase): METADATA_DEFAULT_IP = dhcp.METADATA_DEFAULT_IP def setUp(self): super(TestDhcpAgent, self).setUp() entry.register_options(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') self.driver_cls_p = mock.patch( 'neutron.agent.dhcp.agent.importutils.import_class') self.driver = mock.Mock(name='driver') self.driver.existing_dhcp_networks.return_value = [] self.driver_cls = self.driver_cls_p.start() self.driver_cls.return_value = self.driver self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_create_metadata_proxy_cfg = mock.patch( "neutron.agent.metadata.driver.HaproxyConfigurator") self.mock_create_metadata_proxy_cfg.start() self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() def test_init_resync_throttle_conf(self): try: dhcp_agent.DhcpAgent(HOSTNAME) except exceptions.InvalidConfigurationOption: self.fail("DHCP agent initialization unexpectedly raised an " "InvalidConfigurationOption exception. No exception is " "expected with the default configurations.") # default resync_interval = 5; default resync_throttle = 1 cfg.CONF.set_override('resync_throttle', 10) # resync_throttle must be <= resync_interval, otherwise an # InvalidConfigurationOption exception would be raised with log # message. with mock.patch.object(dhcp_agent.LOG, 'exception') as log: with testtools.ExpectedException( exceptions.InvalidConfigurationOption): dhcp_agent.DhcpAgent(HOSTNAME) log.assert_any_call("DHCP agent must have resync_throttle <= " "resync_interval") def test_init_host(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'sync_state') as sync_state: dhcp.init_host() sync_state.assert_called_once_with() def test_dhcp_agent_manager(self): state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' # sync_state is needed for this test cfg.CONF.set_override('report_interval', 1, 'AGENT') mock_start_ready = mock.patch.object( dhcp_agent.DhcpAgentWithStateReport, 'start_ready_ports_loop', autospec=True).start() with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport, 'periodic_resync', autospec=True) as mock_periodic_resync: with mock.patch(state_rpc_str) as state_rpc: test_args = [ 'dhcp', '--config-file', base.etcdir('neutron.conf') ] with mock.patch.object(sys, 'argv', test_args): cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) config.register_interface_opts(cfg.CONF) common_config.init(sys.argv[1:]) agent_mgr = dhcp_agent.DhcpAgentWithStateReport( 'testhost') eventlet.greenthread.sleep(1) agent_mgr.after_start() mock_periodic_resync.assert_called_once_with(agent_mgr) mock_start_ready.assert_called_once_with(agent_mgr) state_rpc.assert_has_calls( [mock.call(mock.ANY), mock.call().report_state(mock.ANY, mock.ANY, mock.ANY)]) def test_run_completes_single_pass(self): with mock.patch(DEVICE_MANAGER): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict( (a, mock.DEFAULT) for a in ['periodic_resync', 'start_ready_ports_loop', '_process_loop']) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: with mock.patch.object(dhcp_agent.eventlet, 'spawn_n') as spawn_n: dhcp.run() mocks['periodic_resync'].assert_called_once_with() mocks['start_ready_ports_loop'].assert_called_once_with() spawn_n.assert_called_once_with(mocks['_process_loop']) def test_call_driver(self): network = mock.Mock() network.id = '1' dhcp = dhcp_agent.DhcpAgent(cfg.CONF) self.assertTrue(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) def _test_call_driver_failure(self, exc=None, trace_level='exception', expected_sync=True): network = mock.Mock() network.id = '1' self.driver.return_value.foo.side_effect = exc or Exception dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: self.assertIsNone(dhcp.call_driver('foo', network)) self.driver.assert_called_once_with(cfg.CONF, mock.ANY, mock.ANY, mock.ANY, mock.ANY) self.assertEqual(expected_sync, schedule_resync.called) def test_call_driver_ip_address_generation_failure(self): error = oslo_messaging.RemoteError( exc_type='IpAddressGenerationFailure') self._test_call_driver_failure(exc=error, expected_sync=False) def test_call_driver_failure(self): self._test_call_driver_failure() def test_call_driver_remote_error_net_not_found(self): self._test_call_driver_failure( exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'), trace_level='warning') def test_call_driver_network_not_found(self): self._test_call_driver_failure( exc=exceptions.NetworkNotFound(net_id='1'), trace_level='warning') def test_call_driver_conflict(self): self._test_call_driver_failure( exc=exceptions.Conflict(), trace_level='warning', expected_sync=False) def _test_sync_state_helper(self, known_net_ids, active_net_ids): active_networks = set(mock.Mock(id=netid) for netid in active_net_ids) with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.return_value = active_networks plug.return_value = mock_plugin dhcp = dhcp_agent.DhcpAgent(HOSTNAME) attrs_to_mock = dict((a, mock.DEFAULT) for a in ['disable_dhcp_helper', 'cache', 'safe_configure_dhcp_for_network']) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: mocks['cache'].get_network_ids.return_value = known_net_ids mocks['cache'].get_port_ids.return_value = range(4) dhcp.sync_state() diff = set(known_net_ids) - set(active_net_ids) exp_disable = [mock.call(net_id) for net_id in diff] mocks['cache'].assert_has_calls([mock.call.get_network_ids()]) mocks['disable_dhcp_helper'].assert_has_calls(exp_disable) self.assertEqual(set(range(4)), dhcp.dhcp_ready_ports) def test_sync_state_initial(self): self._test_sync_state_helper([], ['a']) def test_sync_state_same(self): self._test_sync_state_helper(['a'], ['a']) def test_sync_state_disabled_net(self): self._test_sync_state_helper(['b'], ['a']) def test_sync_state_waitall(self): with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w: active_net_ids = ['1', '2', '3', '4', '5'] known_net_ids = ['1', '2', '3', '4', '5'] self._test_sync_state_helper(known_net_ids, active_net_ids) w.assert_called_once_with() def test_sync_state_for_all_networks_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.side_effect = Exception plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state() self.assertTrue(log.called) self.assertTrue(schedule_resync.called) def test_sync_state_for_one_network_plugin_error(self): with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() exc = Exception() mock_plugin.get_active_networks_info.side_effect = exc plug.return_value = mock_plugin with mock.patch.object(dhcp_agent.LOG, 'exception') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp, 'schedule_resync') as schedule_resync: dhcp.sync_state(['foo_network']) self.assertTrue(log.called) schedule_resync.assert_called_with(exc, 'foo_network') def test_periodic_resync(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn: dhcp.periodic_resync() spawn.assert_called_once_with(dhcp._periodic_resync_helper) def test_start_ready_ports_loop(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn: dhcp.start_ready_ports_loop() spawn.assert_called_once_with(dhcp._dhcp_ready_ports_loop) def test__dhcp_ready_ports_doesnt_log_exception_on_timeout(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) dhcp.dhcp_ready_ports = set(range(4)) with mock.patch.object(dhcp.plugin_rpc, 'dhcp_ready_on_ports', side_effect=oslo_messaging.MessagingTimeout): # exit after 2 iterations with mock.patch.object(dhcp_agent.eventlet, 'sleep', side_effect=[0, 0, RuntimeError]): with mock.patch.object(dhcp_agent.LOG, 'exception') as lex: with testtools.ExpectedException(RuntimeError): dhcp._dhcp_ready_ports_loop() self.assertFalse(lex.called) def test__dhcp_ready_ports_loop(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) dhcp.dhcp_ready_ports = set(range(4)) with mock.patch.object(dhcp.plugin_rpc, 'dhcp_ready_on_ports', side_effect=[RuntimeError, 0]) as ready: # exit after 2 iterations with mock.patch.object(dhcp_agent.eventlet, 'sleep', side_effect=[0, 0, RuntimeError]): with testtools.ExpectedException(RuntimeError): dhcp._dhcp_ready_ports_loop() # should have been called with all ports again after the failure ready.assert_has_calls([mock.call(set(range(4)))] * 2) def test_dhcp_ready_ports_loop_with_limit_ports_per_call(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) sync_max = dhcp_agent.DHCP_READY_PORTS_SYNC_MAX port_count = sync_max + 1 dhcp.dhcp_ready_ports = set(range(port_count)) with mock.patch.object(dhcp.plugin_rpc, 'dhcp_ready_on_ports') as ready: # exit after 2 iterations with mock.patch.object(dhcp_agent.eventlet, 'sleep', side_effect=[0, 0, RuntimeError]): with testtools.ExpectedException(RuntimeError): dhcp._dhcp_ready_ports_loop() # all ports should have been processed self.assertEqual(set(), dhcp.dhcp_ready_ports) # two calls are expected, one with DHCP_READY_PORTS_SYNC_MAX ports, # second one with one port self.assertEqual(2, ready.call_count) self.assertEqual(sync_max, len(ready.call_args_list[0][0][0])) self.assertEqual(1, len(ready.call_args_list[1][0][0])) # all ports need to be ready ports_ready = (ready.call_args_list[0][0][0] | ready.call_args_list[1][0][0]) self.assertEqual(set(range(port_count)), ports_ready) def test_dhcp_ready_ports_loop_with_limit_ports_per_call_prio(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) sync_max = dhcp_agent.DHCP_READY_PORTS_SYNC_MAX port_count = 4 # port set ranges must be unique to differentiate results dhcp.dhcp_prio_ready_ports = set(range(sync_max)) dhcp.dhcp_ready_ports = set(range(sync_max, sync_max + port_count)) with mock.patch.object(dhcp.plugin_rpc, 'dhcp_ready_on_ports') as ready: # exit after 1 iteration with mock.patch.object(dhcp_agent.eventlet, 'sleep', side_effect=[0, RuntimeError]): with testtools.ExpectedException(RuntimeError): dhcp._dhcp_ready_ports_loop() # only priority ports should have been processed self.assertEqual(set(), dhcp.dhcp_prio_ready_ports) self.assertEqual(set(range(sync_max, sync_max + port_count)), dhcp.dhcp_ready_ports) # one call is expected, with DHCP_READY_PORTS_SYNC_MAX ports ready.assert_called_once() self.assertEqual(sync_max, len(ready.call_args_list[0][0][0])) # priority ports need to be ready ports_ready = ready.call_args_list[0][0][0] self.assertEqual(set(range(sync_max)), ports_ready) # add some priority ports, to make sure they are processed dhcp.dhcp_prio_ready_ports = set(range(port_count)) with mock.patch.object(dhcp.plugin_rpc, 'dhcp_ready_on_ports') as ready: # exit after 1 iteration with mock.patch.object(dhcp_agent.eventlet, 'sleep', side_effect=[0, RuntimeError]): with testtools.ExpectedException(RuntimeError): dhcp._dhcp_ready_ports_loop() # all ports should have been processed self.assertEqual(set(), dhcp.dhcp_prio_ready_ports) self.assertEqual(set(), dhcp.dhcp_ready_ports) # one call is expected, with (port_count * 2) ports ready.assert_called_once() self.assertEqual(port_count * 2, len(ready.call_args_list[0][0][0])) # all ports need to be ready ports_ready = ready.call_args_list[0][0][0] all_ports = (set(range(port_count)) | set(range(sync_max, sync_max + port_count))) self.assertEqual(all_ports, ports_ready) def test_dhcp_ready_ports_updates_after_enable_dhcp(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.assertEqual(set(), dhcp.dhcp_ready_ports) dhcp.configure_dhcp_for_network(fake_network) self.assertEqual({fake_port1.id}, dhcp.dhcp_ready_ports) def test_dhcp_metadata_destroy(self): cfg.CONF.set_override('force_metadata', True) cfg.CONF.set_override('enable_isolated_metadata', False) with mock.patch.object(metadata_driver, 'MetadataDriver') as md_cls: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) dhcp.configure_dhcp_for_network(fake_network) md_cls.spawn_monitored_metadata_proxy.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, mock.ANY, bind_address=self.METADATA_DEFAULT_IP, network_id=fake_network.id) dhcp.disable_dhcp_helper(fake_network.id) md_cls.destroy_monitored_metadata_proxy.assert_called_once_with( mock.ANY, fake_network.id, mock.ANY, fake_network.namespace) def test_report_state_revival_logic(self): dhcp = dhcp_agent.DhcpAgentWithStateReport(HOSTNAME) with mock.patch.object(dhcp.state_rpc, 'report_state') as report_state,\ mock.patch.object(dhcp, "run"): report_state.return_value = agent_consts.AGENT_ALIVE dhcp._report_state() self.assertEqual({}, dhcp.needs_resync_reasons) report_state.return_value = agent_consts.AGENT_REVIVED dhcp._report_state() self.assertEqual(dhcp.needs_resync_reasons[None], ['Agent has just been revived']) def test_periodic_resync_helper(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) resync_reasons = collections.OrderedDict( (('a', 'reason1'), ('b', 'reason2'))) dhcp.needs_resync_reasons = resync_reasons with mock.patch.object(dhcp, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): dhcp._periodic_resync_helper() sync_state.assert_called_once_with(resync_reasons.keys()) self.assertEqual(0, len(dhcp.needs_resync_reasons)) def test_periodic_resync_helper_with_event(self): with mock.patch.object(dhcp_agent.LOG, 'debug') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) dhcp.schedule_resync('reason1', 'a') dhcp.schedule_resync('reason1', 'b') reasons = dhcp.needs_resync_reasons.keys() with mock.patch.object(dhcp, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): dhcp._periodic_resync_helper() log.assert_any_call("Resync event has been scheduled") sync_state.assert_called_once_with(reasons) self.assertEqual(0, len(dhcp.needs_resync_reasons)) def test_populate_cache_on_start_without_active_networks_support(self): # emul dhcp driver that doesn't support retrieving of active networks self.driver.existing_dhcp_networks.side_effect = NotImplementedError with mock.patch.object(dhcp_agent.LOG, 'debug') as log: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertFalse(dhcp.cache.get_network_ids()) self.assertTrue(log.called) def test_populate_cache_on_start(self): networks = ['aaa', 'bbb'] self.driver.existing_dhcp_networks.return_value = networks dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.driver.existing_dhcp_networks.assert_called_once_with( dhcp.conf, ) self.assertEqual(set(networks), set(dhcp.cache.get_network_ids())) def test_none_interface_driver(self): cfg.CONF.set_override('interface_driver', None) self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, mock.Mock()) def test_nonexistent_interface_driver(self): # Temporarily turn off mock, so could use the real import_class # to import interface_driver. self.driver_cls_p.stop() self.addCleanup(self.driver_cls_p.start) cfg.CONF.set_override('interface_driver', 'foo.bar') self.assertRaises(SystemExit, dhcp.DeviceManager, cfg.CONF, mock.Mock()) class TestLogArgs(base.BaseTestCase): def test_log_args_without_log_dir_and_file(self): conf_dict = {'debug': True, 'log_dir': None, 'log_file': None, 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--use-syslog', '--syslog-log-facility=LOG_USER'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_file(self): conf_dict = {'debug': True, 'log_dir': '/etc/tests', 'log_file': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=/etc/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_log_dir_and_file(self): conf_dict = {'debug': True, 'log_dir': '/etc/tests', 'log_file': 'tests/filelog', 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_without_log_dir(self): conf_dict = {'debug': True, 'log_file': 'tests/filelog', 'log_dir': None, 'use_syslog': False, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) def test_log_args_with_filelog_and_syslog(self): conf_dict = {'debug': True, 'log_file': 'tests/filelog', 'log_dir': '/etc/tests', 'use_syslog': True, 'syslog_log_facility': 'LOG_USER'} conf = dhcp.DictModel(conf_dict) expected_args = ['--debug', '--log-file=log_file_name', '--log-dir=/etc/tests/tests'] args = config.get_log_args(conf, 'log_file_name') self.assertEqual(expected_args, args) class TestDhcpAgentEventHandler(base.BaseTestCase): def setUp(self): super(TestDhcpAgentEventHandler, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') entry.register_options(cfg.CONF) # register all dhcp cfg options self.plugin_p = mock.patch(DHCP_PLUGIN) plugin_cls = self.plugin_p.start() self.plugin = mock.Mock() plugin_cls.return_value = self.plugin self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache') cache_cls = self.cache_p.start() self.cache = mock.Mock() self.cache.is_port_message_stale.return_value = False cache_cls.return_value = self.cache self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.' 'DhcpAgent._populate_networks_cache') self.mock_init = self.mock_init_p.start() self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver') self.call_driver = self.call_driver_p.start() self.schedule_resync_p = mock.patch.object(self.dhcp, 'schedule_resync') self.schedule_resync = self.schedule_resync_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager' ) self.external_process = self.external_process_p.start() self.mock_resize_p = mock.patch('neutron.agent.dhcp.agent.' 'DhcpAgent._resize_process_pool') self.mock_resize = self.mock_resize_p.start() def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS): return mock.call(conf=cfg.CONF, uuid=FAKE_NETWORK_UUID, namespace=ns, service='haproxy', default_cmd_callback=mock.ANY) def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, is_isolated_network=False): self.dhcp._process_monitor = mock.Mock() if enable_isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.plugin.get_network_info.return_value = network self.dhcp.enable_dhcp_helper(network.id) self.plugin.assert_has_calls([ mock.call.get_network_info(network.id)]) self.call_driver.assert_called_once_with('enable', network) self.cache.assert_has_calls([mock.call.put(network)]) if is_isolated_network and enable_isolated_metadata: self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable()], any_order=True) else: self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().disable()]) def test_enable_dhcp_helper_enable_metadata_isolated_network(self): self._enable_dhcp_helper(isolated_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_no_gateway(self): isolated_network_no_gateway = copy.deepcopy(isolated_network) isolated_network_no_gateway.subnets[0].gateway_ip = None self._enable_dhcp_helper(isolated_network_no_gateway, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self): nonisolated_network = copy.deepcopy(isolated_network) nonisolated_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self): nonisolated_dvr_network = copy.deepcopy(nonisolated_dist_network) nonisolated_dvr_network.ports[0].device_owner = ( const.DEVICE_OWNER_ROUTER_INTF) nonisolated_dvr_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1' nonisolated_dvr_network.ports[1].device_owner = ( const.DEVICE_OWNER_DVR_INTERFACE) nonisolated_dvr_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1' self._enable_dhcp_helper(nonisolated_dvr_network, enable_isolated_metadata=True, is_isolated_network=False) def test_enable_dhcp_helper_enable_metadata_empty_network(self): self._enable_dhcp_helper(empty_network, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self): self._enable_dhcp_helper(fake_network_ipv6_ipv4, enable_isolated_metadata=True, is_isolated_network=True) def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self): self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4 self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) with mock.patch.object(self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata: self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network_ipv6_ipv4.id)]) self.call_driver.assert_called_once_with('enable', fake_network_ipv6_ipv4) self.assertFalse(self.cache.called) self.assertFalse(enable_metadata.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper(self): self._enable_dhcp_helper(fake_network) def test_enable_dhcp_helper_ipv6_network(self): self._enable_dhcp_helper(fake_network_ipv6) def test_enable_dhcp_helper_down_network(self): self.plugin.get_network_info.return_value = fake_down_network self.dhcp.enable_dhcp_helper(fake_down_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_down_network.id)]) self.assertFalse(self.call_driver.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_network_none(self): self.plugin.get_network_info.return_value = None self.dhcp.enable_dhcp_helper('fake_id') self.plugin.assert_has_calls( [mock.call.get_network_info('fake_id')]) self.assertFalse(self.call_driver.called) self.assertFalse(self.dhcp.schedule_resync.called) def test_enable_dhcp_helper_exception_during_rpc(self): self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.assertFalse(self.call_driver.called) self.assertTrue(log.called) self.assertTrue(self.schedule_resync.called) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def test_enable_dhcp_helper_driver_failure(self): self.plugin.get_network_info.return_value = fake_network self.call_driver.return_value = False cfg.CONF.set_override('enable_isolated_metadata', True) self.dhcp.enable_dhcp_helper(fake_network.id) self.plugin.assert_has_calls( [mock.call.get_network_info(fake_network.id)]) self.call_driver.assert_called_once_with('enable', fake_network) self.assertFalse(self.cache.called) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_known_network(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().disable()]) def test_disable_dhcp_helper_known_network_isolated_metadata(self): self._disable_dhcp_helper_known_network(isolated_metadata=True) def test_disable_dhcp_helper_known_network(self): self._disable_dhcp_helper_known_network() def test_disable_dhcp_helper_unknown_network(self): self.cache.get_network_by_id.return_value = None self.dhcp.disable_dhcp_helper('abcdef') self.cache.assert_has_calls( [mock.call.get_network_by_id('abcdef')]) self.assertEqual(0, self.call_driver.call_count) self.assertFalse(self.external_process.called) def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False): if isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) self.cache.get_network_by_id.return_value = fake_network self.call_driver.return_value = False self.dhcp.disable_dhcp_helper(fake_network.id) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.call_driver.assert_called_once_with('disable', fake_network) self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_network.id)]) self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().disable() ]) def test_disable_dhcp_helper_driver_failure_isolated_metadata(self): self._disable_dhcp_helper_driver_failure(isolated_metadata=True) def test_disable_dhcp_helper_driver_failure(self): self._disable_dhcp_helper_driver_failure() def test_enable_isolated_metadata_proxy(self): self.dhcp._process_monitor = mock.Mock() self.dhcp.enable_isolated_metadata_proxy(fake_network) self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable() ], any_order=True) def test_disable_isolated_metadata_proxy(self): method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.disable_isolated_metadata_proxy(fake_network) destroy.assert_called_once_with(self.dhcp._process_monitor, fake_network.id, cfg.CONF, fake_network.namespace) def _test_enable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) cfg.CONF.set_override('debug', True) cfg.CONF.set_override('log_file', 'test.log') method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.spawn_monitored_metadata_proxy') with mock.patch(method_path) as spawn: self.dhcp.enable_isolated_metadata_proxy(network) metadata_ip = dhcp.METADATA_DEFAULT_IP spawn.assert_called_once_with(self.dhcp._process_monitor, network.namespace, dhcp.METADATA_PORT, cfg.CONF, bind_address=metadata_ip, router_id='forzanapoli') def test_enable_isolated_metadata_proxy_with_metadata_network(self): self._test_enable_isolated_metadata_proxy(fake_meta_network) def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_enable_isolated_metadata_proxy(fake_meta_dvr_network) def test_enable_isolated_metadata_proxy_with_dist_network(self): self._test_enable_isolated_metadata_proxy(fake_dist_network) def _test_disable_isolated_metadata_proxy(self, network): cfg.CONF.set_override('enable_metadata_network', True) method_path = ('neutron.agent.metadata.driver.MetadataDriver' '.destroy_monitored_metadata_proxy') with mock.patch(method_path) as destroy: self.dhcp.enable_isolated_metadata_proxy(network) self.dhcp.disable_isolated_metadata_proxy(network) destroy.assert_called_once_with(self.dhcp._process_monitor, 'forzanapoli', cfg.CONF, network.namespace) def test_disable_isolated_metadata_proxy_with_metadata_network(self): self._test_disable_isolated_metadata_proxy(fake_meta_network) def test_disable_isolated_metadata_proxy_with_metadata_network_dvr(self): self._test_disable_isolated_metadata_proxy(fake_meta_dvr_network) def test_disable_isolated_metadata_proxy_with_dist_network(self): self._test_disable_isolated_metadata_proxy(fake_dist_network) def test_network_create_end(self): payload = dict(network=dict(id=fake_network.id), priority=FAKE_PRIORITY) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_create_end(None, payload) self.dhcp._process_resource_update() enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_up(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=True), priority=FAKE_PRIORITY) with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable: self.dhcp.network_update_end(None, payload) self.dhcp._process_resource_update() enable.assert_called_once_with(fake_network.id) def test_network_update_end_admin_state_down(self): payload = dict(network=dict(id=fake_network.id, admin_state_up=False), priority=FAKE_PRIORITY) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_update_end(None, payload) self.dhcp._process_resource_update() disable.assert_called_once_with(fake_network.id) def test_network_delete_end(self): payload = dict(network_id=fake_network.id, priority=FAKE_PRIORITY) with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.network_delete_end(None, payload) self.dhcp._process_resource_update() disable.assert_called_once_with(fake_network.id) def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self): network = dhcp.NetModel(dict(id='net-id', tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.return_value = network with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable: self.dhcp.refresh_dhcp_helper(network.id) disable.assert_called_once_with(network.id) self.assertFalse(self.cache.called) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) def test_refresh_dhcp_helper_exception_during_rpc(self): network = dhcp.NetModel(dict(id='net-id', tenant_id=FAKE_TENANT_ID, admin_state_up=True, subnets=[], ports=[])) self.cache.get_network_by_id.return_value = network self.plugin.get_network_info.side_effect = Exception with mock.patch.object(dhcp_agent.LOG, 'exception') as log: self.dhcp.refresh_dhcp_helper(network.id) self.assertFalse(self.call_driver.called) self.cache.assert_has_calls( [mock.call.get_network_by_id('net-id')]) self.assertTrue(log.called) self.assertTrue(self.dhcp.schedule_resync.called) def test_subnet_create_end(self): # We should call reload_allocations when subnet's enable_dhcp # attribute isn't True. payload = dict(subnet=dhcp.DictModel( dict(network_id=fake_network.id, enable_dhcp=False, cidr='99.99.99.0/24', ip_version=const.IP_VERSION_4)), priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network new_net = copy.deepcopy(fake_network) new_net.subnets.append(payload['subnet']) self.plugin.get_network_info.return_value = new_net self.dhcp.subnet_create_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([mock.call.put(new_net)]) self.call_driver.assert_called_once_with('reload_allocations', new_net) # We should call restart when subnet's enable_dhcp attribute is True. self.call_driver.reset_mock() payload = dict(subnet=dhcp.DictModel( dict(network_id=fake_network.id, enable_dhcp=True, cidr='99.99.88.0/24', ip_version=const.IP_VERSION_4)), priority=FAKE_PRIORITY) new_net = copy.deepcopy(fake_network) new_net.subnets.append(payload['subnet']) self.plugin.get_network_info.return_value = new_net self.dhcp.subnet_create_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([mock.call.put(new_net)]) self.call_driver.assert_called_once_with('restart', new_net) def test_subnet_update_end(self): payload = dict(subnet=dict(network_id=fake_network.id), priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_update_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) # ensure all ports flagged as ready self.assertEqual({p.id for p in fake_network.ports}, self.dhcp.dhcp_ready_ports) def test_subnet_update_dhcp(self): payload = dict(subnet=dict(network_id=fake_network.id), priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network new_net = copy.deepcopy(fake_network) new_subnet1 = copy.deepcopy(fake_subnet1) new_subnet2 = copy.deepcopy(fake_subnet2) new_subnet2.enable_dhcp = True new_net.subnets = [new_subnet1, new_subnet2] self.plugin.get_network_info.return_value = new_net self.dhcp.subnet_update_end(None, payload) self.dhcp._process_resource_update() self.call_driver.assert_called_once_with('restart', new_net) self.call_driver.reset_mock() self.cache.get_network_by_id.return_value = new_net new_net2 = copy.deepcopy(new_net) new_subnet1 = copy.deepcopy(new_subnet1) new_subnet1.enable_dhcp = False new_subnet2 = copy.deepcopy(new_subnet2) new_net2.subnets = [new_subnet1, new_subnet2] self.plugin.get_network_info.return_value = new_net2 self.dhcp.subnet_update_end(None, payload) self.dhcp._process_resource_update() self.call_driver.assert_called_once_with('restart', new_net2) def test_subnet_update_end_restart(self): new_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet=dict(network_id=fake_network.id), priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network self.plugin.get_network_info.return_value = new_state self.dhcp.subnet_update_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([mock.call.put(new_state)]) self.call_driver.assert_called_once_with('restart', new_state) def test_subnet_delete_end_no_network_id(self): prev_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet_id=fake_subnet1.id, priority=FAKE_PRIORITY) self.cache.get_network_by_subnet_id.return_value = prev_state self.cache.get_network_by_id.return_value = prev_state self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_delete_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([ mock.call.get_network_by_subnet_id( 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), mock.call.get_network_by_subnet_id( 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'), mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('restart', fake_network) def test_subnet_update_end_delete_payload(self): prev_state = dhcp.NetModel(dict(id=fake_network.id, tenant_id=fake_network.tenant_id, admin_state_up=True, subnets=[fake_subnet1, fake_subnet3], ports=[fake_port1])) payload = dict(subnet_id=fake_subnet1.id, network_id=fake_network.id, priority=FAKE_PRIORITY) self.cache.get_network_by_subnet_id.return_value = prev_state self.cache.get_network_by_id.return_value = prev_state self.plugin.get_network_info.return_value = fake_network self.dhcp.subnet_delete_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([ mock.call.get_network_by_subnet_id( 'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'), mock.call.get_network_by_id(FAKE_NETWORK_UUID), mock.call.put(fake_network)]) self.call_driver.assert_called_once_with('restart', fake_network) def test_port_update_end(self): self.reload_allocations_p = mock.patch.object(self.dhcp, 'reload_allocations') self.reload_allocations = self.reload_allocations_p.start() payload = dict(port=copy.deepcopy(fake_port2)) self.cache.get_network_by_id.return_value = fake_network self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.reload_allocations.assert_called_once_with(fake_port2, fake_network, prio=True) def test_reload_allocations(self): self.cache.get_port_by_id.return_value = fake_port2 with mock.patch.object( self.dhcp, 'update_isolated_metadata_proxy') as ump: self.dhcp.reload_allocations(fake_port2, fake_network) self.cache.assert_has_calls([mock.call.put_port(mock.ANY)]) self.call_driver.assert_called_once_with('reload_allocations', fake_network) self.assertTrue(ump.called) def test_port_create_end(self): self.reload_allocations_p = mock.patch.object(self.dhcp, 'reload_allocations') self.reload_allocations = self.reload_allocations_p.start() payload = dict(port=copy.deepcopy(fake_port2)) self.cache.get_network_by_id.return_value = fake_network self.dhcp.port_create_end(None, payload) self.dhcp._process_resource_update() self.reload_allocations.assert_called_once_with(fake_port2, fake_network, prio=True) def test_port_create_end_no_resync_if_same_port_already_in_cache(self): self.reload_allocations_p = mock.patch.object(self.dhcp, 'reload_allocations') self.reload_allocations = self.reload_allocations_p.start() payload = dict(port=copy.deepcopy(fake_port2)) cached_port = copy.deepcopy(fake_port2) new_fake_network = copy.deepcopy(fake_network) new_fake_network.ports = [cached_port] self.cache.get_network_by_id.return_value = new_fake_network self.dhcp.port_create_end(None, payload) self.dhcp._process_resource_update() self.reload_allocations.assert_called_once_with(fake_port2, new_fake_network, prio=True) self.schedule_resync.assert_not_called() def test_port_update_change_ip_on_port(self): payload = dict(port=fake_port1, priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network updated_fake_port1 = copy.deepcopy(fake_port1) updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99' self.cache.get_port_by_id.return_value = updated_fake_port1 with mock.patch.object( self.dhcp, 'update_isolated_metadata_proxy') as ump: self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls( [mock.call.get_network_by_id(fake_port1.network_id), mock.call.put_port(mock.ANY)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) self.assertTrue(ump.called) def test_port_update_change_subnet_on_dhcp_agents_port(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=copy.deepcopy(fake_port1), priority=FAKE_PRIORITY) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['fixed_ips'][0]['subnet_id'] = '77777-7777' payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.assertFalse(self.call_driver.called) def test_port_update_change_ip_on_dhcp_agents_port(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=copy.deepcopy(fake_port1), priority=FAKE_PRIORITY) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99' payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.call_driver.assert_has_calls( [mock.call.call_driver('restart', fake_network)]) def test_port_update_change_ip_on_dhcp_agents_port_cache_miss(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = None payload = dict(port=copy.deepcopy(fake_port1), priority=FAKE_PRIORITY) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99' payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.schedule_resync.assert_called_once_with(mock.ANY, fake_port1.network_id) def test_port_create_duplicate_ip_on_dhcp_agents_same_network(self): self.cache.get_network_by_id.return_value = fake_network payload = dict(port=copy.deepcopy(fake_port2)) duplicate_ip = fake_port1['fixed_ips'][0]['ip_address'] payload['port']['fixed_ips'][0]['ip_address'] = duplicate_ip self.dhcp.port_create_end(None, payload) self.dhcp._process_resource_update() self.schedule_resync.assert_called_once_with(mock.ANY, fake_port2.network_id) def test_port_update_on_dhcp_agents_port_no_ip_change(self): self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port1 payload = dict(port=fake_port1, priority=FAKE_PRIORITY) device_id = utils.get_dhcp_agent_device_id( payload['port']['network_id'], self.dhcp.conf.host) payload['port']['device_id'] = device_id self.dhcp.port_update_end(None, payload) self.dhcp._process_resource_update() self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) def test_port_delete_end_no_network_id(self): payload = dict(port_id=fake_port2.id, priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 with mock.patch.object( self.dhcp, 'update_isolated_metadata_proxy') as ump: self.dhcp.port_delete_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls( [mock.call.get_port_by_id(fake_port2.id), mock.call.get_port_by_id(fake_port2.id), mock.call.add_to_deleted_ports(fake_port2.id), mock.call.get_network_by_id(fake_network.id), mock.call.remove_port(fake_port2)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) self.assertTrue(ump.called) def test_port_delete_end(self): payload = dict(port_id=fake_port2.id, network_id=fake_network.id, priority=FAKE_PRIORITY) self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = fake_port2 with mock.patch.object( self.dhcp, 'update_isolated_metadata_proxy') as ump: self.dhcp.port_delete_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls( [mock.call.get_port_by_id(fake_port2.id), mock.call.add_to_deleted_ports(fake_port2.id), mock.call.get_network_by_id(fake_network.id), mock.call.remove_port(fake_port2)]) self.call_driver.assert_has_calls( [mock.call.call_driver('reload_allocations', fake_network)]) self.assertTrue(ump.called) def test_port_delete_end_unknown_port(self): payload = dict(port_id='unknown', network_id='unknown', priority=FAKE_PRIORITY) self.cache.get_port_by_id.return_value = None self.dhcp.port_delete_end(None, payload) self.dhcp._process_resource_update() self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')]) self.assertEqual(self.call_driver.call_count, 0) def test_port_delete_end_agents_port(self): port = dhcp.DictModel(copy.deepcopy(fake_port1)) device_id = utils.get_dhcp_agent_device_id( port.network_id, self.dhcp.conf.host) port['device_id'] = device_id self.cache.get_network_by_id.return_value = fake_network self.cache.get_port_by_id.return_value = port self.dhcp.port_delete_end(None, {'port_id': port.id, 'network_id': fake_network.id, 'priority': FAKE_PRIORITY}) self.dhcp._process_resource_update() self.call_driver.assert_has_calls( [mock.call.call_driver('disable', fake_network)]) class TestDhcpPluginApiProxy(base.BaseTestCase): def _test_dhcp_api(self, method, **kwargs): proxy = dhcp_agent.DhcpPluginApi('foo', host='foo') with mock.patch.object(proxy.client, 'call') as rpc_mock,\ mock.patch.object(proxy.client, 'prepare') as prepare_mock: prepare_mock.return_value = proxy.client rpc_mock.return_value = kwargs.pop('return_value', []) prepare_args = {} if 'version' in kwargs: prepare_args['version'] = kwargs.pop('version') retval = getattr(proxy, method)(**kwargs) self.assertEqual(retval, rpc_mock.return_value) prepare_mock.assert_called_once_with(**prepare_args) kwargs['host'] = proxy.host rpc_mock.assert_called_once_with(mock.ANY, method, **kwargs) def test_get_active_networks_info(self): self._test_dhcp_api('get_active_networks_info', version='1.1') def test_get_network_info(self): self._test_dhcp_api('get_network_info', network_id='fake_id', return_value=None) def test_create_dhcp_port(self): self._test_dhcp_api('create_dhcp_port', port='fake_port', return_value=None, version='1.1') def test_update_dhcp_port(self): self._test_dhcp_api('update_dhcp_port', port_id='fake_id', port='fake_port', return_value=None, version='1.1') def test_release_dhcp_port(self): self._test_dhcp_api('release_dhcp_port', network_id='fake_id', device_id='fake_id_2') class TestNetworkCache(base.BaseTestCase): def setUp(self): super(TestNetworkCache, self).setUp() self.nc = dhcp_agent.NetworkCache() def test_update_of_deleted_port_ignored(self): self.nc.put(fake_network) self.nc.add_to_deleted_ports(fake_port2['id']) self.assertTrue(self.nc.is_port_message_stale(fake_port2)) def test_stale_update_ignored(self): self.nc.put(fake_network) self.nc.put_port(fake_port2) stale = copy.copy(fake_port2) stale['revision_number'] = 2 self.assertTrue(self.nc.is_port_message_stale(stale)) def test_put_network(self): self.nc.put(fake_network) self.assertEqual(self.nc.cache, {fake_network.id: fake_network}) self.assertEqual(self.nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(self.nc.port_lookup, {fake_port1.id: fake_network.id}) def test_put_network_existing(self): prev_network_info = mock.Mock() with mock.patch.object(self.nc, 'remove') as remove: self.nc.cache[fake_network.id] = prev_network_info self.nc.put(fake_network) remove.assert_called_once_with(prev_network_info) self.assertEqual(self.nc.cache, {fake_network.id: fake_network}) self.assertEqual(self.nc.subnet_lookup, {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id}) self.assertEqual(self.nc.port_lookup, {fake_port1.id: fake_network.id}) def test_remove_network(self): self.nc.cache = {fake_network.id: fake_network} self.nc.subnet_lookup = {fake_subnet1.id: fake_network.id, fake_subnet2.id: fake_network.id} self.nc.port_lookup = {fake_port1.id: fake_network.id} self.nc.remove(fake_network) self.assertEqual(0, len(self.nc.cache)) self.assertEqual(0, len(self.nc.subnet_lookup)) self.assertEqual(0, len(self.nc.port_lookup)) def test_get_network_by_id(self): self.nc.put(fake_network) self.assertEqual(self.nc.get_network_by_id(fake_network.id), fake_network) def test_get_network_ids(self): self.nc.put(fake_network) self.assertEqual(list(self.nc.get_network_ids()), [fake_network.id]) def test_get_network_by_subnet_id(self): self.nc.put(fake_network) self.assertEqual(self.nc.get_network_by_subnet_id(fake_subnet1.id), fake_network) def test_get_network_by_port_id(self): self.nc.put(fake_network) self.assertEqual(self.nc.get_network_by_port_id(fake_port1.id), fake_network) def test_get_port_ids(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port1])) self.nc.put(fake_net) self.nc.put_port(fake_port2) self.assertEqual(set([fake_port1['id'], fake_port2['id']]), set(self.nc.get_port_ids())) def test_get_port_ids_limited_nets(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port1])) fake_port2 = copy.deepcopy(fake_port1) fake_port2['id'] = 'fp2' fake_port2['network_id'] = '12345678-1234-5678-1234567890ac' fake_net2 = dhcp.NetModel( dict(id='12345678-1234-5678-1234567890ac', tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port2])) self.nc.put(fake_net) self.nc.put(fake_net2) self.assertEqual(set([fake_port1['id']]), set(self.nc.get_port_ids([fake_net.id, 'net2']))) self.assertEqual(set(), set(self.nc.get_port_ids(['net2']))) self.assertEqual(set([fake_port2['id']]), set(self.nc.get_port_ids([fake_port2.network_id, 'net2']))) def test_put_port(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port1])) self.nc.put(fake_net) self.nc.put_port(fake_port2) self.assertEqual(2, len(self.nc.port_lookup)) self.assertIn(fake_port2, fake_net.ports) def test_put_port_existing(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) self.nc.put(fake_net) self.nc.put_port(fake_port2) self.assertEqual(2, len(self.nc.port_lookup)) self.assertIn(fake_port2, fake_net.ports) def test_remove_port_existing(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID, subnets=[fake_subnet1], ports=[fake_port1, fake_port2])) self.nc.put(fake_net) self.nc.remove_port(fake_port2) self.assertEqual(1, len(self.nc.port_lookup)) self.assertNotIn(fake_port2, fake_net.ports) def test_get_port_by_id(self): self.nc.put(fake_network) self.assertEqual(self.nc.get_port_by_id(fake_port1.id), fake_port1) def _reset_deleted_port_max_age(self, old_value): dhcp_agent.DELETED_PORT_MAX_AGE = old_value def test_cleanup_deleted_ports(self): self.addCleanup(self._reset_deleted_port_max_age, dhcp_agent.DELETED_PORT_MAX_AGE) dhcp_agent.DELETED_PORT_MAX_AGE = 10 with mock.patch.object(timeutils, 'utcnow_ts') as mock_utcnow: mock_utcnow.side_effect = [1, 2, 11] self.nc.add_to_deleted_ports(fake_port1.id) self.nc.add_to_deleted_ports(fake_port2.id) self.nc.add_to_deleted_ports(fake_port2.id) self.assertEqual({fake_port1.id, fake_port2.id}, self.nc._deleted_ports) self.assertEqual([(1, fake_port1.id), (2, fake_port2.id)], self.nc._deleted_ports_ts) self.nc.cleanup_deleted_ports() self.assertEqual({fake_port2.id}, self.nc._deleted_ports) self.assertEqual([(2, fake_port2.id)], self.nc._deleted_ports_ts) def test_cleanup_deleted_ports_no_old_ports(self): self.addCleanup(self._reset_deleted_port_max_age, dhcp_agent.DELETED_PORT_MAX_AGE) dhcp_agent.DELETED_PORT_MAX_AGE = 10 with mock.patch.object(timeutils, 'utcnow_ts') as mock_utcnow: mock_utcnow.side_effect = [1, 2, 3] self.nc.add_to_deleted_ports(fake_port1.id) self.nc.add_to_deleted_ports(fake_port2.id) self.assertEqual({fake_port1.id, fake_port2.id}, self.nc._deleted_ports) self.assertEqual([(1, fake_port1.id), (2, fake_port2.id)], self.nc._deleted_ports_ts) self.nc.cleanup_deleted_ports() self.assertEqual({fake_port1.id, fake_port2.id}, self.nc._deleted_ports) self.assertEqual([(1, fake_port1.id), (2, fake_port2.id)], self.nc._deleted_ports_ts) def test_cleanup_deleted_ports_no_ports(self): self.assertEqual(set(), self.nc._deleted_ports) self.assertEqual([], self.nc._deleted_ports_ts) self.nc.cleanup_deleted_ports() self.assertEqual(set(), self.nc._deleted_ports) self.assertEqual([], self.nc._deleted_ports_ts) def test_cleanup_deleted_ports_loop_call(self): self.addCleanup(self._reset_deleted_port_max_age, dhcp_agent.DELETED_PORT_MAX_AGE) dhcp_agent.DELETED_PORT_MAX_AGE = 2 nc = dhcp_agent.NetworkCache() nc.add_to_deleted_ports(fake_port1.id) utils.wait_until_true(lambda: nc._deleted_ports == set(), timeout=7) class FakePort1(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' class FakePort2(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' class FakeV4Subnet(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.ip_version = const.IP_VERSION_4 self.cidr = '192.168.0.0/24' self.gateway_ip = '192.168.0.1' self.enable_dhcp = True class FakeV6Subnet(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = const.IP_VERSION_6 self.cidr = '2001:db8:0:1::/64' self.gateway_ip = '2001:db8:0:1::1' self.enable_dhcp = True class FakeV4SubnetOutsideGateway(FakeV4Subnet): def __init__(self): super(FakeV4SubnetOutsideGateway, self).__init__() self.gateway_ip = '192.168.1.1' class FakeV6SubnetOutsideGateway(FakeV6Subnet): def __init__(self): super(FakeV6SubnetOutsideGateway, self).__init__() self.gateway_ip = '2001:db8:1:1::1' class FakeV4SubnetNoGateway(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = const.IP_VERSION_4 self.cidr = '192.168.1.0/24' self.gateway_ip = None self.enable_dhcp = True class FakeV6SubnetNoGateway(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = const.IP_VERSION_6 self.cidr = '2001:db8:1:0::/64' self.gateway_ip = None self.enable_dhcp = True class FakeV4Network(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1()] self.namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class FakeDualNetwork(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet(), FakeV6Subnet()] self.ports = [FakePort1(), FakePort2()] self.namespace = 'qdhcp-dddddddd-dddd-dddd-dddd-dddddddddddd' class FakeV4NetworkOutsideGateway(FakeV4Network): def __init__(self): super(FakeV4NetworkOutsideGateway, self).__init__() self.subnets = [FakeV4SubnetOutsideGateway()] class FakeDualNetworkOutsideGateway(FakeDualNetwork): def __init__(self): super(FakeDualNetworkOutsideGateway, self).__init__() self.subnets = [FakeV4SubnetOutsideGateway(), FakeV6SubnetOutsideGateway()] class FakeDualNetworkNoSubnet(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [] self.ports = [] class FakeDualNetworkNoGateway(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetNoGateway(), FakeV6SubnetNoGateway()] self.ports = [FakePort1(), FakePort2()] class TestDeviceManager(base.BaseTestCase): def setUp(self): super(TestDeviceManager, self).setUp() config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') cfg.CONF.set_override('enable_isolated_metadata', True) self.ensure_device_is_ready_p = mock.patch( 'neutron.agent.linux.ip_lib.ensure_device_is_ready') self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start()) self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') self.iproute_cls_p = mock.patch('neutron.agent.linux.' 'ip_lib.IpRouteCommand') driver_cls = self.dvr_cls_p.start() iproute_cls = self.iproute_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) self.mock_driver.use_gateway_ips = False self.mock_iproute = mock.MagicMock() driver_cls.return_value = self.mock_driver iproute_cls.return_value = self.mock_iproute iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') iptables_cls = iptables_cls_p.start() self.iptables_inst = mock.Mock() iptables_cls.return_value = self.iptables_inst self.mangle_inst_v4 = mock.Mock() self.iptables_inst.ipv4 = {'mangle': self.mangle_inst_v4} self.mangle_inst_v6 = mock.Mock() self.iptables_inst.ipv6 = {'mangle': self.mangle_inst_v6} self.mock_ip_wrapper_p = mock.patch("neutron.agent.linux.ip_lib." "IPWrapper") self.mock_ip_wrapper = self.mock_ip_wrapper_p.start() self.mock_ipv6_enabled_p = mock.patch.object(netutils, 'is_ipv6_enabled') self.mock_ipv6_enabled = self.mock_ipv6_enabled_p.start() self.mock_ipv6_enabled.return_value = True def _test_setup_helper(self, device_is_ready, ipv6_enabled=True, net=None, port=None): net = net or fake_network port = port or fake_port1 plugin = mock.Mock() plugin.create_dhcp_port.return_value = port or fake_port1 self.ensure_device_is_ready.return_value = device_is_ready self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() interface_name = dh.setup(net) self.assertEqual('tap12345678-12', interface_name) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) if port == fake_ipv6_port: expected_ips = ['2001:db8::a8bb:ccff:fedd:ee99/64', '169.254.169.254/16'] else: expected_ips = ['172.9.9.9/24', '169.254.169.254/16'] expected = [mock.call.get_device_name(port)] if ipv6_enabled: expected.append( mock.call.configure_ipv6_ra(net.namespace, 'default', 0)) if not device_is_ready: expected.append(mock.call.plug(net.id, port.id, 'tap12345678-12', 'aa:bb:cc:dd:ee:ff', namespace=net.namespace, mtu=None)) expected.append(mock.call.init_l3( 'tap12345678-12', expected_ips, namespace=net.namespace)) self.mock_driver.assert_has_calls(expected) dh._set_default_route.assert_called_once_with(net, 'tap12345678-12') def test_setup(self): cfg.CONF.set_override('enable_metadata_network', False) self._test_setup_helper(False) cfg.CONF.set_override('enable_metadata_network', True) self._test_setup_helper(False) def test_setup_without_ipv6_enabled(self): # NOTE(mjozefcz): This test checks if IPv6 RA is *not* # configured when host doesn't support IPv6. self.mock_ipv6_enabled.return_value = False self._test_setup_helper(False, ipv6_enabled=False) def test_setup_calls_fill_dhcp_udp_checksums_v4(self): self._test_setup_helper(False) rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % const.DHCP_RESPONSE_PORT) expected = [mock.call.add_rule('POSTROUTING', rule)] self.mangle_inst_v4.assert_has_calls(expected) def test_setup_calls_fill_dhcp_udp_checksums_v6(self): self._test_setup_helper(False) rule = ('-p udp -m udp --dport %d -j CHECKSUM --checksum-fill' % const.DHCPV6_CLIENT_PORT) expected = [mock.call.add_rule('POSTROUTING', rule)] self.mangle_inst_v6.assert_has_calls(expected) def test_setup_dhcp_port_doesnt_orphan_devices(self): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None net = copy.deepcopy(fake_network) plugin.create_dhcp_port.side_effect = exceptions.Conflict() dh = dhcp.DeviceManager(cfg.CONF, plugin) clean = mock.patch.object(dh, '_cleanup_stale_devices').start() with testtools.ExpectedException(exceptions.Conflict): dh.setup(net) clean.assert_called_once_with(net, dhcp_port=None) def test_setup_create_dhcp_port(self): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None net = copy.deepcopy(fake_network) plugin.create_dhcp_port.return_value = fake_dhcp_port dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.setup(net) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': net.id, 'tenant_id': net.tenant_id, 'fixed_ips': [{'subnet_id': fake_dhcp_port.fixed_ips[0].subnet_id}], 'device_id': mock.ANY}})]) self.assertIn(fake_dhcp_port, net.ports) def test_setup_plug_exception(self): plugin = mock.Mock() plugin.create_dhcp_port.return_value = fake_dhcp_port self.ensure_device_is_ready.return_value = False self.mock_driver.get_device_name.return_value = 'tap12345678-12' dh = dhcp.DeviceManager(cfg.CONF, plugin) dh._set_default_route = mock.Mock() dh._cleanup_stale_devices = mock.Mock() dh.driver = mock.Mock() dh.driver.plug.side_effect = OSError() net = copy.deepcopy(fake_network) self.assertRaises(OSError, dh.setup, net) dh.driver.unplug.assert_called_once_with(mock.ANY, namespace=net.namespace) plugin.release_dhcp_port.assert_called_once_with( net.id, mock.ANY) def test_setup_ipv6(self): self._test_setup_helper(True, net=fake_network_ipv6, port=fake_ipv6_port) def test_setup_device_is_ready(self): self._test_setup_helper(True) def test_create_dhcp_port_raise_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network) def test_create_dhcp_port_create_new(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) plugin.create_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network) plugin.assert_has_calls([ mock.call.create_dhcp_port( {'port': {'name': '', 'admin_state_up': True, 'network_id': fake_network.id, 'tenant_id': fake_network.tenant_id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id}], 'device_id': mock.ANY}})]) def test__check_dhcp_port_subnet(self): # this can go away once bug/1627480 is fixed plugin = mock.Mock() fake_port_copy = copy.deepcopy(fake_port1) fake_port_copy.fixed_ips = [fake_fixed_ip1, fake_fixed_ip_subnet2] plugin.get_dhcp_port.return_value = fake_port_copy dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = fake_network.ports[0] dh.setup_dhcp_port(fake_network_copy) self.assertEqual(fake_port_copy, fake_network_copy.ports[0]) def test__check_dhcp_port_subnet_port_missing_subnet(self): # this can go away once bug/1627480 is fixed plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = fake_network.ports[0] plugin.get_dhcp_port.return_value = fake_network_copy.ports[0] with testtools.ExpectedException(exceptions.SubnetMismatchForPort): dh.setup_dhcp_port(fake_network_copy) def test_create_dhcp_port_update_add_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True updated_port = copy.deepcopy(fake_network_copy.ports[0]) updated_port.fixed_ips.append(fake_fixed_ip_subnet2) plugin.update_dhcp_port.return_value = updated_port dh.setup_dhcp_port(fake_network_copy) port_body = {'port': { 'network_id': fake_network.id, 'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id, 'ip_address': fake_fixed_ip1.ip_address}, {'subnet_id': fake_subnet2.id}]}} plugin.assert_has_calls([ mock.call.update_dhcp_port(fake_network_copy.ports[0].id, port_body)]) def test_update_dhcp_port_raises_conflict(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) fake_network_copy.subnets[1].enable_dhcp = True plugin.update_dhcp_port.return_value = None self.assertRaises(exceptions.Conflict, dh.setup_dhcp_port, fake_network_copy) def test_create_dhcp_port_no_update_or_create(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) dh.setup_dhcp_port(fake_network_copy) self.assertFalse(plugin.setup_dhcp_port.called) self.assertFalse(plugin.update_dhcp_port.called) def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self): plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) fake_network_copy = copy.deepcopy(fake_network) fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) plugin.update_dhcp_port.return_value = fake_port1 self.assertEqual(fake_subnet1.id, dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id) def test_destroy(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID)) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, 'tap12345678-12') dvr_cls.assert_called_once_with( cfg.CONF, get_networks_callback=plugin.get_networks) mock_driver.assert_has_calls( [mock.call.unplug('tap12345678-12', namespace='qdhcp-' + fake_net.id)]) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) def test_destroy_with_none(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID)) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, None) dvr_cls.assert_called_once_with( cfg.CONF, get_networks_callback=plugin.get_networks) plugin.assert_has_calls( [mock.call.release_dhcp_port(fake_net.id, mock.ANY)]) self.assertFalse(mock_driver.called) def test_get_interface_name(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID)) fake_port = dhcp.DictModel( dict(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff')) with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.get_interface_name(fake_net, fake_port) dvr_cls.assert_called_once_with( cfg.CONF, get_networks_callback=plugin.get_networks) mock_driver.assert_has_calls( [mock.call.get_device_name(fake_port)]) self.assertEqual(0, len(plugin.mock_calls)) def test_get_device_id(self): fake_net = dhcp.NetModel( dict(id=FAKE_NETWORK_UUID, tenant_id=FAKE_TENANT_ID)) expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-' '5678-1234567890ab') # the DHCP port name only contains the hostname and not the domain name local_hostname = cfg.CONF.host.split('.')[0] with mock.patch('uuid.uuid5') as uuid5: uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457' dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) self.assertEqual(expected, dh.get_device_id(fake_net)) uuid5.assert_called_once_with(uuid.NAMESPACE_DNS, local_hostname) def test_update(self): # Try with namespaces and no metadata network cfg.CONF.set_override('enable_metadata_network', False) dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) dh._set_default_route = mock.Mock() network = mock.Mock() dh.update(network, 'ns-12345678-12') dh._set_default_route.assert_called_once_with(network, 'ns-12345678-12') # Meta data network enabled, don't interfere with its gateway. cfg.CONF.set_override('enable_metadata_network', True) dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) dh._set_default_route = mock.Mock() dh.update(FakeV4Network(), 'ns-12345678-12') self.assertTrue(dh._set_default_route.called) def test_set_default_route(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway. network = FakeV4Network() dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') def test_set_default_route_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None # Basic one subnet with gateway outside the subnet. network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') device.route.add_gateway.assert_called_once_with('192.168.1.1') def test_set_default_route_no_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeDualNetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_subnet_delete_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.0.1' v6_gateway = '2001:db8:0:1::1' expected = [mock.call(v4_gateway), mock.call(v6_gateway)] with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.side_effect = [ dict(gateway=v4_gateway), dict(gateway=v6_gateway)] network = FakeDualNetworkNoSubnet() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertEqual(2, device.route.delete_gateway.call_count) device.route.delete_gateway.assert_has_calls(expected) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_no_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.0.1' v6_gateway = '2001:db8:0:1::1' expected = [mock.call(v4_gateway), mock.call(v6_gateway)] with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.side_effect = [ dict(gateway=v4_gateway), dict(gateway=v6_gateway)] network = FakeDualNetworkNoGateway() network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertEqual(2, device.route.delete_gateway.call_count) device.route.delete_gateway.assert_has_calls(expected) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_do_nothing(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.0.1' v6_gateway = '2001:db8:0:1::1' with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.side_effect = [ dict(gateway=v4_gateway), dict(gateway=v6_gateway)] network = FakeDualNetwork() dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) def test_set_default_route_change_gateway(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.0.1' old_v4_gateway = '192.168.0.2' v6_gateway = '2001:db8:0:1::1' old_v6_gateway = '2001:db8:0:1::2' expected = [mock.call(v4_gateway), mock.call(v6_gateway)] with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.side_effect = [ dict(gateway=old_v4_gateway), dict(gateway=old_v6_gateway)] network = FakeDualNetwork() dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_has_calls(expected) def test_set_default_route_change_gateway_outside_subnet(self): dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.1.1' old_v4_gateway = '192.168.2.1' v6_gateway = '2001:db8:1:1::1' old_v6_gateway = '2001:db8:2:0::1' add_route_expected = [mock.call(v4_gateway, scope='link'), mock.call(v6_gateway, scope='link')] add_gw_expected = [mock.call(v4_gateway), mock.call(v6_gateway)] with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.list_onlink_routes.side_effect = [ [{'cidr': old_v4_gateway}], []] device.route.get_gateway.side_effect = [ dict(gateway=old_v4_gateway), dict(gateway=old_v6_gateway)] network = FakeDualNetworkOutsideGateway() dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertEqual(2, device.route.list_onlink_routes.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.delete_route.assert_called_once_with(old_v4_gateway, scope='link') device.route.add_route.assert_has_calls(add_route_expected) device.route.add_gateway.assert_has_calls(add_gw_expected) def test_set_default_route_two_subnets(self): # Try two subnets. Should set gateway from the first. dh = dhcp.DeviceManager(cfg.CONF, mock.Mock()) v4_gateway = '192.168.1.1' v6_gateway = '2001:db8:1:1::1' expected = [mock.call(v4_gateway), mock.call(v6_gateway)] with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None network = FakeDualNetwork() subnet2 = FakeV4Subnet() subnet2.gateway_ip = v4_gateway subnet3 = FakeV6Subnet() subnet3.gateway_ip = v6_gateway network.subnets = [subnet2, FakeV4Subnet(), subnet3, FakeV6Subnet()] dh._set_default_route(network, 'tap-name') self.assertEqual(2, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_has_calls(expected) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4150455 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/0000755000175000017500000000000000000000000022371 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/__init__.py0000644000175000017500000000000000000000000024470 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4190457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/extensions/0000755000175000017500000000000000000000000024570 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/extensions/__init__.py0000644000175000017500000000000000000000000026667 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/extensions/test_fdb_population.py0000644000175000017500000002172300000000000031213 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants from neutron_lib.utils import helpers from oslo_config import cfg import six from neutron.agent.l2.extensions.fdb_population import ( FdbPopulationAgentExtension) from neutron.agent.linux import ip_lib from neutron.plugins.ml2.drivers.linuxbridge.agent.common import ( constants as linux_bridge_constants) from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as ovs_constants) from neutron.tests import base class FdbPopulationExtensionTestCase(base.BaseTestCase): UPDATE_MSG = {u'device_owner': constants.DEVICE_OWNER_ROUTER_INTF, u'physical_network': u'physnet1', u'mac_address': u'fa:16:3e:ba:bc:21', u'port_id': u'17ceda02-43e1-48d8-beb6-35885b20cae6'} DELETE_MSG = {u'port_id': u'17ceda02-43e1-48d8-beb6-35885b20cae6'} FDB_TABLE = ("aa:aa:aa:aa:aa:aa self permanent\n" "bb:bb:bb:bb:bb:bb self permanent") def setUp(self): super(FdbPopulationExtensionTestCase, self).setUp() cfg.CONF.set_override('shared_physical_device_mappings', ['physnet1:p1p1'], 'FDB') self.DEVICE = self._get_existing_device() def _get_existing_device(self): device_mappings = helpers.parse_mappings( cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) DEVICES = six.next(six.itervalues(device_mappings)) return DEVICES[0] def _get_fdb_extension(self, mock_execute, fdb_table): mock_execute.return_value = fdb_table fdb_pop = FdbPopulationAgentExtension() fdb_pop.initialize(None, ovs_constants.EXTENSION_DRIVER_TYPE) return fdb_pop @mock.patch('neutron.agent.common.utils.execute') def test_initialize(self, mock_execute): fdb_extension = FdbPopulationAgentExtension() fdb_extension.initialize(None, ovs_constants.EXTENSION_DRIVER_TYPE) fdb_extension.initialize(None, linux_bridge_constants.EXTENSION_DRIVER_TYPE) @mock.patch('neutron.agent.common.utils.execute') def test_initialize_invalid_agent(self, mock_execute): fdb_extension = FdbPopulationAgentExtension() self.assertRaises(SystemExit, fdb_extension.initialize, None, 'sriov') @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_construct_empty_fdb_table(self, mock_execute): self._get_fdb_extension(mock_execute, fdb_table='') cmd = ['bridge', 'fdb', 'show', 'dev', self.DEVICE] mock_execute.assert_called_once_with(cmd, run_as_root=True) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_construct_existing_fdb_table(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, fdb_table=self.FDB_TABLE) cmd = ['bridge', 'fdb', 'show', 'dev', self.DEVICE] mock_execute.assert_called_once_with(cmd, run_as_root=True) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) macs = [line.split()[0] for line in self.FDB_TABLE.split('\n')] for mac in macs: self.assertIn(mac, updated_macs_for_device) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_update_port_add_rule(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, self.FDB_TABLE) mock_execute.reset_mock() fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) cmd = ['bridge', 'fdb', 'add', self.UPDATE_MSG['mac_address'], 'dev', self.DEVICE] mock_execute.assert_called_once_with(cmd, run_as_root=True) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) mac = self.UPDATE_MSG['mac_address'] self.assertIn(mac, updated_macs_for_device) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_update_port_changed_mac(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, self.FDB_TABLE) mock_execute.reset_mock() mac = self.UPDATE_MSG['mac_address'] updated_mac = 'fa:16:3e:ba:bc:33' commands = [] fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) commands.append(['bridge', 'fdb', 'add', mac, 'dev', self.DEVICE]) self.UPDATE_MSG['mac_address'] = updated_mac fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) commands.append(['bridge', 'fdb', 'delete', mac, 'dev', self.DEVICE]) commands.append(['bridge', 'fdb', 'add', updated_mac, 'dev', self.DEVICE]) calls = [] for cmd in commands: calls.append(mock.call(cmd, run_as_root=True)) mock_execute.assert_has_calls(calls) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) self.assertIn(updated_mac, updated_macs_for_device) self.assertNotIn(mac, updated_macs_for_device) @mock.patch('neutron.agent.common.utils.execute') def test_unpermitted_device_owner(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, '') mock_execute.reset_mock() details = copy.deepcopy(self.UPDATE_MSG) details['device_owner'] = constants.DEVICE_OWNER_LOADBALANCER fdb_extension.handle_port(context=None, details=details) self.assertFalse(mock_execute.called) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) mac = self.UPDATE_MSG['mac_address'] self.assertNotIn(mac, updated_macs_for_device) @mock.patch('neutron.agent.common.utils.execute') def test_catch_init_exception(self, mock_execute): mock_execute.side_effect = RuntimeError fdb_extension = self._get_fdb_extension(mock_execute, '') updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) self.assertIsNone(updated_macs_for_device) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_catch_update_port_exception(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, '') mock_execute.side_effect = RuntimeError fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) mac = self.UPDATE_MSG['mac_address'] self.assertNotIn(mac, updated_macs_for_device) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_catch_delete_port_exception(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, '') fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) mock_execute.side_effect = RuntimeError fdb_extension.delete_port(context=None, details=self.DELETE_MSG) updated_macs_for_device = ( fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) mac = self.UPDATE_MSG['mac_address'] self.assertIn(mac, updated_macs_for_device) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_delete_port(self, mock_execute): fdb_extension = self._get_fdb_extension(mock_execute, '') fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) mock_execute.reset_mock() fdb_extension.delete_port(context=None, details=self.DELETE_MSG) cmd = ['bridge', 'fdb', 'delete', self.UPDATE_MSG['mac_address'], 'dev', self.DEVICE] mock_execute.assert_called_once_with(cmd, run_as_root=True) @mock.patch.object(ip_lib.IpNetnsCommand, 'execute') def test_multiple_devices(self, mock_execute): cfg.CONF.set_override('shared_physical_device_mappings', ['physnet1:p1p1', 'physnet1:p2p2'], 'FDB') fdb_extension = self._get_fdb_extension(mock_execute, '') fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) mac = self.UPDATE_MSG['mac_address'] calls = [] cmd = ['bridge', 'fdb', 'add', mac, 'dev', 'p1p1'] calls.append(mock.call(cmd, run_as_root=True)) cmd = ['bridge', 'fdb', 'add', mac, 'dev', 'p2p2'] calls.append(mock.call(cmd, run_as_root=True)) mock_execute.assert_has_calls(calls, any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/extensions/test_qos.py0000644000175000017500000006017200000000000027011 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as common_constants from neutron_lib import context from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.agent.l2.extensions import qos from neutron.agent.l2.extensions import qos_linux from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron import manager from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import ( ovs_bridge) from neutron.tests import base BASE_TEST_POLICY = {'context': None, 'name': 'test1', 'id': uuidutils.generate_uuid()} TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY) TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr', **BASE_TEST_POLICY) TEST_POLICY2 = policy.QosPolicy(context=None, name='test2', id=uuidutils.generate_uuid()) TEST_PORT = {'port_id': 'test_port_id', 'qos_policy_id': TEST_POLICY.id} TEST_PORT2 = {'port_id': 'test_port_id_2', 'qos_policy_id': TEST_POLICY2.id} FAKE_RULE_ID = uuidutils.generate_uuid() FAKE_RULE_ID_2 = uuidutils.generate_uuid() REALLY_FAKE_RULE_ID = uuidutils.generate_uuid() class FakeDriver(qos_linux.QosLinuxAgentDriver): SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [common_constants.EGRESS_DIRECTION, common_constants.INGRESS_DIRECTION]} }, } def __init__(self): super(FakeDriver, self).__init__() self.create_bandwidth_limit = mock.Mock() self.update_bandwidth_limit = mock.Mock() self.delete_bandwidth_limit = mock.Mock() self.delete_bandwidth_limit_ingress = mock.Mock() def initialize(self): pass class QosFakeRule(rule.QosRule): rule_type = 'fake_type' class QosAgentDriverTestCase(base.BaseTestCase): def setUp(self): super(QosAgentDriverTestCase, self).setUp() self.driver = FakeDriver() self.policy = TEST_POLICY self.egress_bandwidth_limit_rule = ( rule.QosBandwidthLimitRule( context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=200, direction=common_constants.EGRESS_DIRECTION)) self.ingress_bandwidth_limit_rule = ( rule.QosBandwidthLimitRule( context=None, id=FAKE_RULE_ID_2, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=200, direction=common_constants.INGRESS_DIRECTION)) self.policy.rules = [self.egress_bandwidth_limit_rule, self.ingress_bandwidth_limit_rule] self.port = {'qos_policy_id': None, 'network_qos_policy_id': None, 'device_owner': 'random-device-owner'} self.fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) def test_create(self): self.driver.create(self.port, self.policy) self.driver.create_bandwidth_limit.assert_has_calls([ mock.call(self.port, self.egress_bandwidth_limit_rule), mock.call(self.port, self.ingress_bandwidth_limit_rule) ]) def test_update(self): self.driver.update(self.port, self.policy) self.driver.update_bandwidth_limit.assert_has_calls([ mock.call(self.port, self.egress_bandwidth_limit_rule), mock.call(self.port, self.ingress_bandwidth_limit_rule) ]) def test_delete(self): self.driver.delete(self.port, self.policy) self.driver.delete_bandwidth_limit.assert_called_with(self.port) self.driver.delete_bandwidth_limit_ingress.assert_called_with( self.port) def test_delete_no_policy(self): self.driver.delete(self.port, qos_policy=None) self.driver.delete_bandwidth_limit.assert_called_with(self.port) self.driver.delete_bandwidth_limit_ingress.assert_called_with( self.port) def test__iterate_rules_with_unknown_rule_type(self): self.policy.rules.append(self.fake_rule) rules = list(self.driver._iterate_rules(self.policy.rules)) self.assertEqual(2, len(rules)) self.assertIsInstance(rules[0], rule.QosBandwidthLimitRule) self.assertIsInstance(rules[1], rule.QosBandwidthLimitRule) def test__handle_update_create_rules_checks_should_apply_to_port(self): self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=False) self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=False) self.driver.create(self.port, self.policy) self.assertFalse(self.driver.create_bandwidth_limit.called) self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=True) self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=True) self.driver.create(self.port, self.policy) self.assertTrue(self.driver.create_bandwidth_limit.called) def test__get_max_burst_value(self): rule = self.egress_bandwidth_limit_rule rule.max_burst_kbps = 0 expected_burst = rule.max_kbps * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.driver._get_egress_burst_value(rule) ) def test__rule_type_has_ingress_direction(self): self.assertTrue( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)) # Should return False for rule type other than # RULE_TYPE_BANDWIDTH_LIMIT supported_rules = { qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: { 'type:values': common_constants.VALID_DSCP_MARKS} } } with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules): self.assertFalse( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_DSCP_MARKING)) # Should return False for rule type RULE_TYPE_BANDWIDTH_LIMIT but # without INGRESS_DIRECTION in supported values supported_rules = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { 'type:values': [common_constants.EGRESS_DIRECTION] } } with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules): self.assertFalse( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)) def test__rule_is_ingress_direction(self): self.assertFalse( self.driver._rule_is_ingress_direction( self.egress_bandwidth_limit_rule)) self.assertFalse( self.driver._rule_is_ingress_direction( self.fake_rule)) self.assertTrue( self.driver._rule_is_ingress_direction( self.ingress_bandwidth_limit_rule)) class QosExtensionBaseTestCase(base.BaseTestCase): def setUp(self): super(QosExtensionBaseTestCase, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.qos_ext = qos.QosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() os_ken_app = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge( 'br-int', os_ken_app=os_ken_app), ovs_bridge.OVSAgentBridge( 'br-tun', os_ken_app=os_ken_app), {'phynet1': ovs_bridge.OVSAgentBridge( 'br-phynet1', os_ken_app=os_ken_app)}) self.qos_ext.consume_api(self.agent_api) # Don't rely on used driver mock.patch.object( manager.NeutronManager, 'load_class_for_provider', return_value=lambda: mock.Mock( spec=qos_linux.QosLinuxAgentDriver)).start() setattr(TEST_POLICY, 'rules', []) class QosExtensionRpcTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionRpcTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() def _create_test_port_dict(self, qos_policy_id=None): return {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': qos_policy_id or TEST_POLICY.id} def test_handle_port_with_no_policy(self): port = self._create_test_port_dict() del port['qos_policy_id'] self.qos_ext._process_reset_port = mock.Mock() self.qos_ext.handle_port(self.context, port) self.qos_ext._process_reset_port.assert_called_with(port) def test_handle_unknown_port(self): port = self._create_test_port_dict() qos_policy_id = port['qos_policy_id'] port_id = port['port_id'] TEST_POLICY.rules = [rule.QosBandwidthLimitRule( context=None, id=FAKE_RULE_ID, qos_policy_id=TEST_POLICY.id, max_kbps=100, max_burst_kbps=200, direction=common_constants.EGRESS_DIRECTION)] self.qos_ext.handle_port(self.context, port) # we make sure the underlying qos driver is called with the # right parameters self.qos_ext.qos_driver.create.assert_called_once_with( port, TEST_POLICY) self.assertEqual(port, self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id]) self.assertIn(port_id, self.qos_ext.policy_map.port_policies) self.assertEqual(TEST_POLICY, self.qos_ext.policy_map.known_policies[qos_policy_id]) def test_handle_unknown_port_with_no_rules(self): test_policy_with_rules = {'context': None, 'name': 'test1', 'id': uuidutils.generate_uuid()} test_policy = policy.QosPolicy(**test_policy_with_rules) test_policy.rules = [] port = self._create_test_port_dict(test_policy.id) qos_policy_id = port['qos_policy_id'] port_id = port['port_id'] self.pull_mock.return_value = test_policy self.qos_ext.handle_port(self.context, port) # we make sure the underlying qos driver is called with the # right parameters self.qos_ext.qos_driver.delete.assert_called_once_with(port, None) self.assertEqual(port, self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id]) self.assertIn(port_id, self.qos_ext.policy_map.port_policies) self.assertEqual(test_policy, self.qos_ext.policy_map.known_policies[qos_policy_id]) def test_handle_known_port(self): port_obj1 = self._create_test_port_dict() port_obj2 = dict(port_obj1) self.qos_ext.handle_port(self.context, port_obj1) self.qos_ext.qos_driver.reset_mock() self.qos_ext.handle_port(self.context, port_obj2) self.assertFalse(self.qos_ext.qos_driver.create.called) def test_handle_known_port_change_policy_id(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.resource_rpc.pull.reset_mock() test_policy_with_rules = {'context': None, 'name': 'test1', 'id': uuidutils.generate_uuid()} test_policy = policy.QosPolicy(**test_policy_with_rules) setattr(test_policy, 'rules', []) self.pull_mock.return_value = test_policy port['qos_policy_id'] = test_policy.id self.qos_ext.handle_port(self.context, port) self.pull_mock.assert_called_once_with( self.context, resources.QOS_POLICY, port['qos_policy_id']) def test_handle_diff_ports_same_policy_id(self): port_obj1 = self._create_test_port_dict() port_obj2 = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port_obj1) self.pull_mock.assert_called_once_with( self.context, resources.QOS_POLICY, port_obj1['qos_policy_id']) self.assertIsNotNone( self.qos_ext.policy_map.get_port_policy(port_obj1)) self.assertIsNone( self.qos_ext.policy_map.get_port_policy(port_obj2)) self.qos_ext.resource_rpc.pull.reset_mock() self.qos_ext.handle_port(self.context, port_obj2) self.assertFalse(self.pull_mock.called) self.assertIsNotNone( self.qos_ext.policy_map.get_port_policy(port_obj2)) self.assertEqual( self.qos_ext.policy_map.get_port_policy(port_obj1), self.qos_ext.policy_map.get_port_policy(port_obj2)) def test_delete_known_port(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.qos_driver.reset_mock() self.qos_ext.delete_port(self.context, port) self.qos_ext.qos_driver.delete.assert_called_with(port) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test_delete_unknown_port(self): port = self._create_test_port_dict() self.qos_ext.delete_port(self.context, port) self.assertTrue(self.qos_ext.qos_driver.delete.called) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test__handle_notification_ignores_all_event_types_except_updated(self): with mock.patch.object(self.qos_ext, '_process_update_policy') as update_mock: for event_type in set(events.VALID) - {events.UPDATED}: self.qos_ext._handle_notification(mock.Mock(), 'QOS', object(), event_type) self.assertFalse(update_mock.called) def test__handle_notification_passes_update_events(self): with mock.patch.object(self.qos_ext, '_process_update_policy') as update_mock: policy_obj = mock.Mock() self.qos_ext._handle_notification(mock.Mock(), 'QOS', [policy_obj], events.UPDATED) update_mock.assert_called_with(policy_obj) def test__process_update_policy(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._policy_rules_modified = mock.Mock(return_value=True) policy_obj = mock.Mock() policy_obj.id = port1['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port1, policy_obj) self.qos_ext.qos_driver.update.reset_mock() policy_obj.id = port2['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port2, policy_obj) def test__process_update_policy_descr_not_propagated_into_driver(self): port = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) self.qos_ext.policy_map.set_port_policy(port, TEST_POLICY) self.qos_ext._policy_rules_modified = mock.Mock(return_value=False) self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.qos_ext._policy_rules_modified.assert_called_with(TEST_POLICY, TEST_POLICY_DESCR) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertEqual(TEST_POLICY_DESCR, self.qos_ext.policy_map.get_policy(TEST_POLICY.id)) def test__process_update_policy_not_known(self): self.qos_ext._policy_rules_modified = mock.Mock() self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.assertFalse(self.qos_ext._policy_rules_modified.called) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertIsNone(self.qos_ext.policy_map.get_policy( TEST_POLICY_DESCR.id)) def test__process_reset_port(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._process_reset_port(port1) self.qos_ext.qos_driver.delete.assert_called_with(port1) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port1)) self.assertIsNotNone(self.qos_ext.policy_map.get_port_policy(port2)) self.qos_ext.qos_driver.delete.reset_mock() self.qos_ext._process_reset_port(port2) self.qos_ext.qos_driver.delete.assert_called_with(port2) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port2)) class QosExtensionInitializeTestCase(QosExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) for resource_type in self.qos_ext.SUPPORTED_RESOURCE_TYPES] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) class QosExtensionReflushRulesTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionReflushRulesTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() self.policy = policy.QosPolicy(**BASE_TEST_POLICY) self.rule = ( rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=10)) self.policy.rules = [self.rule] self.port = {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': TEST_POLICY.id} self.new_policy = policy.QosPolicy(description='descr', **BASE_TEST_POLICY) def test_is_reflush_required_change_policy_descr(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] self.assertFalse(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_change_policy_rule(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) updated_rule = (rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=200, max_burst_kbps=20)) self.new_policy.rules = [updated_rule] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_remove_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_add_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) self.new_policy.rules.append(fake_rule) self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) class PortPolicyMapTestCase(base.BaseTestCase): def setUp(self): super(PortPolicyMapTestCase, self).setUp() self.policy_map = qos.PortPolicyMap() def test_update_policy(self): self.policy_map.update_policy(TEST_POLICY) self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) def _set_ports(self): self.policy_map.set_port_policy(TEST_PORT, TEST_POLICY) self.policy_map.set_port_policy(TEST_PORT2, TEST_POLICY2) def test_set_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) self.assertIn(TEST_PORT['port_id'], self.policy_map.qos_policy_ports[TEST_POLICY.id]) def test_get_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.get_port_policy(TEST_PORT)) self.assertEqual(TEST_POLICY2, self.policy_map.get_port_policy(TEST_PORT2)) def test_get_ports(self): self._set_ports() self.assertEqual([TEST_PORT], list(self.policy_map.get_ports(TEST_POLICY))) self.assertEqual([TEST_PORT2], list(self.policy_map.get_ports(TEST_POLICY2))) def test_clean_by_port(self): self._set_ports() self.policy_map.clean_by_port(TEST_PORT) self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies) self.assertNotIn(TEST_PORT['port_id'], self.policy_map.port_policies) self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies) def test_clean_by_port_for_unknown_port(self): self.policy_map._clean_policy_info = mock.Mock() self.policy_map.clean_by_port(TEST_PORT) self.policy_map._clean_policy_info.assert_not_called() def test_has_policy_changed(self): self._set_ports() self.assertTrue( self.policy_map.has_policy_changed(TEST_PORT, 'a_new_policy_id')) self.assertFalse( self.policy_map.has_policy_changed(TEST_PORT, TEST_POLICY.id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py0000644000175000017500000000363500000000000031455 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.agent.l2 import l2_agent_extensions_manager as l2_ext_manager from neutron.tests import base class TestL2AgentExtensionsManager(base.BaseTestCase): def setUp(self): super(TestL2AgentExtensionsManager, self).setUp() mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', autospec=True).start() conf = cfg.CONF l2_ext_manager.register_opts(conf) cfg.CONF.set_override('extensions', ['qos'], 'agent') self.manager = l2_ext_manager.L2AgentExtensionsManager(conf) def _get_extension(self): return self.manager.extensions[0].obj def test_initialize(self): connection = object() self.manager.initialize(connection, 'fake_driver_type') ext = self._get_extension() ext.initialize.assert_called_once_with(connection, 'fake_driver_type') def test_handle_port(self): context = object() data = object() self.manager.handle_port(context, data) ext = self._get_extension() ext.handle_port.assert_called_once_with(context, data) def test_delete_port(self): context = object() data = object() self.manager.delete_port(context, data) ext = self._get_extension() ext.delete_port.assert_called_once_with(context, data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4190457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/0000755000175000017500000000000000000000000022372 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/__init__.py0000644000175000017500000000000000000000000024471 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4190457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/0000755000175000017500000000000000000000000024571 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/__init__.py0000644000175000017500000000000000000000000026670 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4230456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/qos/0000755000175000017500000000000000000000000025373 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/qos/__init__.py0000644000175000017500000000000000000000000027472 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/qos/test_base.py0000644000175000017500000000603700000000000027724 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3.extensions.qos import base as qos_base from neutron.objects.qos import policy from neutron.tests import base _uuid = uuidutils.generate_uuid TEST_POLICY = policy.QosPolicy(context=None, name='test1', id=_uuid()) TEST_POLICY2 = policy.QosPolicy(context=None, name='test2', id=_uuid()) TEST_RES_1 = "res1" TEST_RES_2 = "res2" class RateLimitMapsTestCase(base.BaseTestCase): def setUp(self): super(RateLimitMapsTestCase, self).setUp() self.policy_map = qos_base.RateLimitMaps("cache-lock") def test_update_policy(self): self.policy_map.update_policy(TEST_POLICY) self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) def _set_resources(self): self.policy_map.set_resource_policy(TEST_RES_1, TEST_POLICY) self.policy_map.set_resource_policy(TEST_RES_2, TEST_POLICY2) def test_set_resource_policy(self): self._set_resources() self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) self.assertIn(TEST_RES_1, self.policy_map.qos_policy_resources[TEST_POLICY.id]) def test_get_resource_policy(self): self._set_resources() self.assertEqual(TEST_POLICY, self.policy_map.get_resource_policy(TEST_RES_1)) self.assertEqual(TEST_POLICY2, self.policy_map.get_resource_policy(TEST_RES_2)) def test_get_resources(self): self._set_resources() self.assertEqual([TEST_RES_1], list(self.policy_map.get_resources(TEST_POLICY))) self.assertEqual([TEST_RES_2], list(self.policy_map.get_resources(TEST_POLICY2))) def test_clean_by_resource(self): self._set_resources() self.policy_map.clean_by_resource(TEST_RES_1) self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies) self.assertNotIn(TEST_RES_1, self.policy_map.resource_policies) self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies) def test_clean_by_resource_for_unknown_resource(self): self.policy_map._clean_policy_info = mock.Mock() self.policy_map.clean_by_resource(TEST_RES_1) self.policy_map._clean_policy_info.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/qos/test_fip.py0000644000175000017500000004751400000000000027575 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as lib_const from neutron_lib import context from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3.extensions.qos import fip as fip_qos from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import router_info as l3router from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests import base from neutron.tests.unit.agent.l3 import test_agent _uuid = uuidutils.generate_uuid TEST_QOS_FIP = "3.3.3.3" TEST_FIP = "1.1.1.1" TEST_FIP2 = "2.2.2.2" HOSTNAME = 'myhost' class QosExtensionBaseTestCase(test_agent.BasicRouterOperationsFramework): def setUp(self): super(QosExtensionBaseTestCase, self).setUp() self.fip_qos_ext = fip_qos.FipQosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() self.policy = policy.QosPolicy(context=None, name='test1', id=_uuid()) self.ingress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=1111, max_burst_kbps=2222, direction=lib_const.INGRESS_DIRECTION)) self.egress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=3333, max_burst_kbps=4444, direction=lib_const.EGRESS_DIRECTION)) self.policy.rules = [self.ingress_rule, self.egress_rule] self.new_ingress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=5555, max_burst_kbps=6666, direction=lib_const.INGRESS_DIRECTION)) self.ingress_rule_only_has_max_kbps = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=5555, max_burst_kbps=0, direction=lib_const.INGRESS_DIRECTION)) self.policy2 = policy.QosPolicy(context=None, name='test2', id=_uuid()) self.policy2.rules = [self.ingress_rule] self.policy3 = policy.QosPolicy(context=None, name='test3', id=_uuid()) self.policy3.rules = [self.egress_rule] self.policy4 = policy.QosPolicy(context=None, name='test4', id=_uuid()) self.dscp = rule.QosDscpMarkingRule(context=None, id=_uuid(), qos_policy_id=self.policy4.id, dscp_mark=32) self.dscp.obj_reset_changes() self.policy4.rules = [self.dscp] self.qos_policies = {self.policy.id: self.policy, self.policy2.id: self.policy2, self.policy3.id: self.policy3, self.policy4.id: self.policy4} self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.ex_gw_port = {'id': _uuid()} self.fip = {'id': _uuid(), 'floating_ip_address': TEST_QOS_FIP, 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME, 'qos_policy_id': self.policy.id} self.router_id = _uuid() self.router = {'id': self.router_id, 'gw_port': self.ex_gw_port, 'ha': False, 'distributed': False, lib_const.FLOATINGIP_KEY: [self.fip]} self.router_info = l3router.RouterInfo(self.agent, self.router_id, self.router, **self.ri_kwargs) self.router_info.ex_gw_port = self.ex_gw_port self.agent.router_info[self.router_id] = self.router_info def _mock_get_router_info(router_id): return self.router_info self.get_router_info = mock.patch( 'neutron.agent.l3.l3_agent_extension_api.' 'L3AgentExtensionAPI.get_router_info').start() self.get_router_info.side_effect = _mock_get_router_info self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None) self.fip_qos_ext.consume_api(self.agent_api) class FipQosExtensionInitializeTestCase(QosExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): with mock.patch.object(n_rpc, 'Connection', return_value=self.connection) as create_connection: self.fip_qos_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) create_connection.assert_has_calls([mock.call()]) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic( resources.QOS_POLICY), [rpc_mock()], fanout=True)] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) class FipQosExtensionTestCase(QosExtensionBaseTestCase): def setUp(self): super(FipQosExtensionTestCase, self).setUp() self.fip_qos_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) self._set_pull_mock() def _set_pull_mock(self): def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def _test_new_fip_add(self, func): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): func(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) def test_add_router(self): self._test_new_fip_add(self.fip_qos_ext.add_router) def test_update_router(self): self._test_new_fip_add(self.fip_qos_ext.update_router) def test_update_router_fip_policy_changed(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) # the policy of floating IP has been changed to # which only has one egress rule self.fip[qos_consts.QOS_POLICY_ID] = self.policy3.id self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.clear_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP)]) def test_update_router_fip_policy_changed_to_none(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) # floating IP remove the qos_policy bonding self.fip[qos_consts.QOS_POLICY_ID] = None self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.clear_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP)], any_order=True) def test__process_update_policy(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) # the rules of floating IP policy has been changed self.fip_qos_ext._policy_rules_modified = mock.Mock( return_value=True) self.policy.rules = [self.new_ingress_rule, self.egress_rule] self.fip_qos_ext._process_update_policy(self.policy) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 5555, 6666)]) def _test_qos_policy_scenarios(self, fip_removed=True, qos_rules_removed=False): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) if fip_removed: # floating IP dissociated, then it does not belong to # this router self.router[lib_const.FLOATINGIP_KEY] = [] if qos_rules_removed: self.policy.rules = [] self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.clear_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP)], any_order=True) def test_delete_router(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) self.fip_qos_ext.delete_router(self.context, self.router) self.assertIsNone( self.fip_qos_ext.fip_qos_map.router_floating_ips.get( self.router_id)) self.assertIsNone( self.fip_qos_ext.fip_qos_map.ingress_ratelimits.get( TEST_QOS_FIP)) self.assertIsNone( self.fip_qos_ext.fip_qos_map.egress_ratelimits.get( TEST_QOS_FIP)) self.assertIsNone( self.fip_qos_ext.fip_qos_map.get_resource_policy( TEST_QOS_FIP)) def test_update_router_fip_removed(self): self._test_qos_policy_scenarios() def test_fip_qos_changed_to_none(self): self._test_qos_policy_scenarios(qos_rules_removed=True) def _test_only_one_direction_rule(self, func, policy, direction): tc_wrapper = mock.Mock() with mock.patch.object( self.fip_qos_ext.resource_rpc, 'pull', return_value=policy): with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): func(self.context, self.router) if direction == lib_const.INGRESS_DIRECTION: calls = [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222)] else: calls = [mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)] tc_wrapper.set_ip_rate_limit.assert_has_calls(calls) def test_add_router_only_ingress(self): self._test_only_one_direction_rule(self.fip_qos_ext.add_router, self.policy2, lib_const.INGRESS_DIRECTION) def test_add_router_only_egress(self): self._test_only_one_direction_rule(self.fip_qos_ext.add_router, self.policy3, lib_const.EGRESS_DIRECTION) def test_update_router_only_ingress(self): self._test_only_one_direction_rule(self.fip_qos_ext.update_router, self.policy2, lib_const.INGRESS_DIRECTION) def test_update_router_only_egress(self): self._test_only_one_direction_rule(self.fip_qos_ext.update_router, self.policy3, lib_const.EGRESS_DIRECTION) def test_rule_only_has_max_kbps(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_FIP, 3333, 4444)], any_order=True) # policy ingress rule changed to only has one max_kbps value self.policy.rules = [self.ingress_rule_only_has_max_kbps, self.egress_rule] self.fip_qos_ext.update_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_FIP, 5555, 0)]) def test_qos_policy_has_no_bandwidth_limit_rule(self): tc_wrapper = mock.Mock() with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.fip['qos_policy_id'] = self.policy4.id self.fip_qos_ext.add_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_not_called() def _test_process_ip_rates(self, with_cache): rates = {'egress': {'rate': 333, 'burst': 444}, 'ingress': {'rate': 111, 'burst': 222}} fip = '123.123.123.123' device = mock.Mock() tc_wrapper = mock.Mock() with mock.patch.object( self.fip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper) as get_tc_wrapper: with mock.patch.object( self.fip_qos_ext, 'process_ip_rate_limit') as process_ip: self.fip_qos_ext.process_ip_rates( fip, device, rates, with_cache=with_cache) if with_cache: self.assertEqual(2, process_ip.call_count) else: self.assertEqual(2, get_tc_wrapper.call_count) self.assertEqual( 2, tc_wrapper.set_ip_rate_limit.call_count) def test_process_ip_rates_with_cache(self): self._test_process_ip_rates(with_cache=True) def test_process_ip_rates_without_cache(self): self._test_process_ip_rates(with_cache=False) class RouterFipRateLimitMapsTestCase(base.BaseTestCase): def setUp(self): super(RouterFipRateLimitMapsTestCase, self).setUp() self.policy_map = fip_qos.RouterFipRateLimitMaps() def test_find_fip_router_id(self): router_id = _uuid() self.policy_map.router_floating_ips[router_id] = set([TEST_FIP, TEST_FIP2]) self.assertIsNone(self.policy_map.find_fip_router_id("8.8.8.8")) self.assertEqual(router_id, self.policy_map.find_fip_router_id(TEST_FIP)) def test_get_router_floating_ips(self): router_id = _uuid() test_ips = [TEST_FIP, TEST_FIP2] self.policy_map.router_floating_ips[router_id] = set([TEST_FIP, TEST_FIP2]) get_ips = self.policy_map.get_router_floating_ips(router_id) self.assertEqual(len(test_ips), len(get_ips)) def test_remove_fip_ratelimit_cache(self): fip = "1.1.1.1" self.policy_map.set_fip_ratelimit_cache( "ingress", fip, 100, 200) self.policy_map.set_fip_ratelimit_cache( "egress", fip, 100, 200) self.policy_map.remove_fip_ratelimit_cache("ingress", fip) self.assertIsNone(self.policy_map.ingress_ratelimits.get(fip)) self.policy_map.remove_fip_ratelimit_cache("egress", fip) self.assertIsNone(self.policy_map.egress_ratelimits.get(fip)) def test_set_fip_ratelimit_cache(self): fip = "1.1.1.1" self.policy_map.set_fip_ratelimit_cache( "ingress", fip, 100, 200) self.policy_map.set_fip_ratelimit_cache( "egress", fip, 300, 400) in_rate, in_burst = self.policy_map.get_fip_ratelimit_cache( "ingress", fip) self.assertEqual(100, in_rate) self.assertEqual(200, in_burst) e_rate, e_burst = self.policy_map.get_fip_ratelimit_cache( "egress", fip) self.assertEqual(300, e_rate) self.assertEqual(400, e_burst) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/qos/test_gateway_ip.py0000644000175000017500000002450200000000000031140 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants as lib_const from neutron_lib import context from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3.extensions.qos import gateway_ip as gateway_ip_qos from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import router_info as l3router from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.unit.agent.l3 import test_agent _uuid = uuidutils.generate_uuid TEST_QOS_GW_IP = "172.24.4.1" HOSTNAME = 'myhost' class QosExtensionBaseTestCase(test_agent.BasicRouterOperationsFramework): def setUp(self): super(QosExtensionBaseTestCase, self).setUp() self.gw_ip_qos_ext = gateway_ip_qos.RouterGatewayIPQosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() self.policy = policy.QosPolicy(context=None, name='test1', id=_uuid()) self.ingress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=1111, max_burst_kbps=2222, direction=lib_const.INGRESS_DIRECTION)) self.egress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=3333, max_burst_kbps=4444, direction=lib_const.EGRESS_DIRECTION)) self.policy.rules = [self.ingress_rule, self.egress_rule] self.new_ingress_rule = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=5555, max_burst_kbps=6666, direction=lib_const.INGRESS_DIRECTION)) self.ingress_rule_only_has_max_kbps = ( rule.QosBandwidthLimitRule(context=None, id=_uuid(), qos_policy_id=self.policy.id, max_kbps=5555, max_burst_kbps=0, direction=lib_const.INGRESS_DIRECTION)) self.policy2 = policy.QosPolicy(context=None, name='test2', id=_uuid()) self.policy2.rules = [self.ingress_rule] self.policy3 = policy.QosPolicy(context=None, name='test3', id=_uuid()) self.policy3.rules = [self.egress_rule] self.policy4 = policy.QosPolicy(context=None, name='test4', id=_uuid()) self.dscp = rule.QosDscpMarkingRule(context=None, id=_uuid(), qos_policy_id=self.policy4.id, dscp_mark=32) self.dscp.obj_reset_changes() self.policy4.rules = [self.dscp] self.qos_policies = {self.policy.id: self.policy, self.policy2.id: self.policy2, self.policy3.id: self.policy3, self.policy4.id: self.policy4} self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.ex_gw_port = {'id': _uuid(), 'fixed_ips': [ {'ip_address': TEST_QOS_GW_IP}], 'qos_policy_id': self.policy.id, 'enable_snat': True} self.fip = {'id': _uuid(), 'floating_ip_address': '172.24.4.9', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME, 'qos_policy_id': self.policy.id} self.router = {'id': _uuid(), 'gw_port': self.ex_gw_port, 'ha': False, 'distributed': False, lib_const.FLOATINGIP_KEY: [self.fip], 'external_gateway_info': self.ex_gw_port} self.router_info = l3router.RouterInfo(self.agent, self.router['id'], self.router, **self.ri_kwargs) self.router_info.ex_gw_port = self.ex_gw_port self.agent.router_info[self.router['id']] = self.router_info def _mock_get_router_info(router_id): return self.router_info self.get_router_info = mock.patch( 'neutron.agent.l3.l3_agent_extension_api.' 'L3AgentExtensionAPI.get_router_info').start() self.get_router_info.side_effect = _mock_get_router_info self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None) self.gw_ip_qos_ext.consume_api(self.agent_api) class RouterGatewayIPQosAgentExtensionInitializeTestCase( QosExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): call_to_patch = 'neutron_lib.rpc.Connection' with mock.patch(call_to_patch, return_value=self.connection) as create_connection: self.gw_ip_qos_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) create_connection.assert_has_calls([mock.call()]) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic( resources.QOS_POLICY), [rpc_mock()], fanout=True)] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) class RouterGatewayIPQosAgentExtensionTestCase( QosExtensionBaseTestCase): def setUp(self): super(RouterGatewayIPQosAgentExtensionTestCase, self).setUp() self.gw_ip_qos_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) self._set_pull_mock() def _set_pull_mock(self): def _pull_mock(context, resource_type, resource_id): return self.qos_policies[resource_id] self.pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.pull').start() self.pull.side_effect = _pull_mock def _test_gateway_ip_add(self, func): tc_wrapper = mock.Mock() with mock.patch.object(self.gw_ip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): func(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_GW_IP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_GW_IP, 3333, 4444)], any_order=True) self.assertEqual( {self.router_info.router_id: self.policy.id}, self.gw_ip_qos_ext.gateway_ip_qos_map.resource_policies) def test_add_router(self): self._test_gateway_ip_add(self.gw_ip_qos_ext.add_router) def test_update_router(self): self._test_gateway_ip_add(self.gw_ip_qos_ext.update_router) def test_delete_router(self): tc_wrapper = mock.Mock() with mock.patch.object(self.gw_ip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.gw_ip_qos_ext.add_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_GW_IP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_GW_IP, 3333, 4444)], any_order=True) self.gw_ip_qos_ext.delete_router(self.context, self.router) self.assertIsNone( self.gw_ip_qos_ext.gateway_ip_qos_map.get_resource_policy( self.router_info.router_id)) def test__process_update_policy(self): tc_wrapper = mock.Mock() with mock.patch.object(self.gw_ip_qos_ext, '_get_tc_wrapper', return_value=tc_wrapper): self.gw_ip_qos_ext.add_router(self.context, self.router) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_GW_IP, 1111, 2222), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_GW_IP, 3333, 4444)], any_order=True) new_policy = copy.deepcopy(self.policy) new_policy.rules = [self.new_ingress_rule, self.egress_rule] self.gw_ip_qos_ext._process_update_policy(new_policy) self.assertEqual( {self.router_info.router_id: self.policy.id}, self.gw_ip_qos_ext.gateway_ip_qos_map.resource_policies) tc_wrapper.set_ip_rate_limit.assert_has_calls( [mock.call(lib_const.INGRESS_DIRECTION, TEST_QOS_GW_IP, 5555, 6666), mock.call(lib_const.EGRESS_DIRECTION, TEST_QOS_GW_IP, 3333, 4444)], any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/test_conntrack_helper.py0000644000175000017500000003352300000000000031531 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import context from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3.extensions import conntrack_helper as cth from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import router_info as l3router from neutron.agent.linux import iptables_manager from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects import conntrack_helper as cth_obj from neutron.tests import base from neutron.tests.unit.agent.l3 import test_agent BINARY_NAME = iptables_manager.get_binary_name() DEFAULT_RULE = ('PREROUTING', '-j %s-' % BINARY_NAME + cth.DEFAULT_CONNTRACK_HELPER_CHAIN) HOSTNAME = 'testhost' class ConntrackHelperExtensionBaseTestCase( test_agent.BasicRouterOperationsFramework): def setUp(self): super(ConntrackHelperExtensionBaseTestCase, self).setUp() self.cth_ext = cth.ConntrackHelperAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() self.router_id = uuidutils.generate_uuid() self.conntrack_helper1 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=69, helper='tftp', router_id=self.router_id) self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.router = {'id': self.router_id, 'ha': False, 'distributed': False} self.router_info = l3router.RouterInfo(self.agent, self.router_id, self.router, **self.ri_kwargs) self.agent.router_info[self.router['id']] = self.router_info self.get_router_info = mock.patch( 'neutron.agent.l3.l3_agent_extension_api.' 'L3AgentExtensionAPI.get_router_info').start() self.get_router_info.return_value = self.router_info self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None) self.cth_ext.consume_api(self.agent_api) self.conntrack_helpers = [self.conntrack_helper1] class ConntrackHelperExtensionInitializeTestCase( ConntrackHelperExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): call_to_patch = 'neutron_lib.rpc.Connection' with mock.patch(call_to_patch, return_value=self.connection) as create_connection: self.cth_ext.initialize( self.connection, constants.L3_AGENT_MODE) create_connection.assert_has_calls([mock.call()]) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic( resources.CONNTRACKHELPER), [rpc_mock()], fanout=True)] ) subscribe_mock.assert_called_with( mock.ANY, resources.CONNTRACKHELPER) class ConntrackHelperExtensionTestCase(ConntrackHelperExtensionBaseTestCase): def setUp(self): super(ConntrackHelperExtensionTestCase, self).setUp() self.cth_ext.initialize( self.connection, constants.L3_AGENT_MODE) self._set_bulk_pull_mock() def _set_bulk_pull_mock(self): def _bulk_pull_mock(context, resource_type, filter_kwargs=None): if 'router_id' in filter_kwargs: result = [] for cthobj in self.conntrack_helpers: if cthobj.router_id in filter_kwargs['router_id']: result.append(cthobj) return result return self.conntrack_helpers self.bulk_pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.bulk_pull').start() self.bulk_pull.side_effect = _bulk_pull_mock @mock.patch.object(iptables_manager.IptablesTable, 'add_rule') @mock.patch.object(iptables_manager.IptablesTable, 'add_chain') def test_create_router(self, mock_add_chain, mock_add_rule): self.cth_ext.add_router(self.context, self.router) chain_name = (cth.CONNTRACK_HELPER_CHAIN_PREFIX + self.conntrack_helper1.id)[ :constants.MAX_IPTABLES_CHAIN_LEN_WRAP] chain_rule = ('-p %(protocol)s --dport %(dport)s -j CT --helper ' '%(helper)s' % {'protocol': self.conntrack_helper1.protocol, 'dport': self.conntrack_helper1.port, 'helper': self.conntrack_helper1.helper}) tag = cth.CONNTRACK_HELPER_PREFIX + self.conntrack_helper1.id self.assertEqual(mock_add_chain.call_count, 6) self.assertEqual(mock_add_rule.call_count, 6) mock_add_chain.assert_has_calls([ mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(chain_name), mock.call(chain_name) ]) mock_add_rule.assert_has_calls([ mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN, '-j %s-' % BINARY_NAME + chain_name, tag=tag), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN, '-j %s-' % BINARY_NAME + chain_name, tag=tag), mock.call(chain_name, chain_rule, tag=tag), mock.call(chain_name, chain_rule, tag=tag) ]) @mock.patch.object(iptables_manager.IptablesTable, 'add_rule') @mock.patch.object(iptables_manager.IptablesTable, 'add_chain') def test_update_roter(self, mock_add_chain, mock_add_rule): self.cth_ext.add_router(self.context, self.router) mock_add_chain.reset_mock() mock_add_rule.reset_mock() self.cth_ext.update_router(self.context, self.router) mock_add_chain.assert_not_called() mock_add_rule.assert_not_called() @mock.patch.object(iptables_manager.IptablesTable, 'add_rule') @mock.patch.object(iptables_manager.IptablesTable, 'add_chain') def test_add_conntrack_helper_update_router(self, mock_add_chain, mock_add_rule): self.cth_ext.add_router(self.context, self.router) # Create another conntrack helper with the same router_id mock_add_chain.reset_mock() mock_add_rule.reset_mock() test_conntrackhelper = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='tcp', port=21, helper='ftp', router_id=self.conntrack_helper1.router_id) self.conntrack_helpers.append(test_conntrackhelper) self.cth_ext.update_router(self.context, self.router) chain_name = (cth.CONNTRACK_HELPER_CHAIN_PREFIX + test_conntrackhelper.id)[ :constants.MAX_IPTABLES_CHAIN_LEN_WRAP] chain_rule = ('-p %(protocol)s --dport %(dport)s -j CT --helper ' '%(helper)s' % {'protocol': test_conntrackhelper.protocol, 'dport': test_conntrackhelper.port, 'helper': test_conntrackhelper.helper}) tag = cth.CONNTRACK_HELPER_PREFIX + test_conntrackhelper.id self.assertEqual(mock_add_chain.call_count, 6) self.assertEqual(mock_add_rule.call_count, 6) mock_add_chain.assert_has_calls([ mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN), mock.call(chain_name), mock.call(chain_name) ]) mock_add_rule.assert_has_calls([ mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN, '-j %s-' % BINARY_NAME + chain_name, tag=tag), mock.call(cth.DEFAULT_CONNTRACK_HELPER_CHAIN, '-j %s-' % BINARY_NAME + chain_name, tag=tag), mock.call(chain_name, chain_rule, tag=tag), mock.call(chain_name, chain_rule, tag=tag) ]) @mock.patch.object(cth.ConntrackHelperMapping, 'clear_by_router_id') def test_delete_router(self, mock_clear_by_router_id): router_data = {'id': self.router_id, 'ha': False, 'distributed': False} self.cth_ext.delete_router(self.context, router_data) mock_clear_by_router_id.assert_called_with(self.router_id) class ConntrackHelperMappingTestCase(base.BaseTestCase): def setUp(self): super(ConntrackHelperMappingTestCase, self).setUp() self.mapping = cth.ConntrackHelperMapping() self.router1 = uuidutils.generate_uuid() self.router2 = uuidutils.generate_uuid() self.conntrack_helper1 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=69, helper='tftp', router_id=self.router1) self.conntrack_helper2 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=69, helper='tftp', router_id=self.router2) self.conntrack_helper3 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=21, helper='ftp', router_id=self.router1) self.conntrack_helper4 = cth_obj.ConntrackHelper( context=None, id=uuidutils.generate_uuid(), protocol='udp', port=21, helper='ftp', router_id=self.router2) self.conntrack_helper_dict = { self.conntrack_helper1.id: self.conntrack_helper1, self.conntrack_helper2.id: self.conntrack_helper2, self.conntrack_helper3.id: self.conntrack_helper3, self.conntrack_helper4.id: self.conntrack_helper4} def _set_cth(self): self.mapping.set_conntrack_helpers( self.conntrack_helper_dict.values()) def test_set_conntrack_helpers(self): self._set_cth() cth_ids = self.conntrack_helper_dict.keys() managed_cths = self.mapping.get_managed_conntrack_helpers() for cth_id, obj in managed_cths.items(): self.assertIn(cth_id, cth_ids) self.assertEqual(obj, self.conntrack_helper_dict[cth_id]) self.assertEqual( len(cth_ids), len(managed_cths.keys())) def test_update_conntrack_helper(self): self._set_cth() new_conntrack_helper1 = cth_obj.ConntrackHelper( context=None, id=self.conntrack_helper1.id, protocol='udp', port=6969, helper='tftp', router_id=self.router1) self.mapping.update_conntrack_helpers([new_conntrack_helper1]) managed_cths = self.mapping.get_managed_conntrack_helpers() self.assertEqual( new_conntrack_helper1, managed_cths[self.conntrack_helper1.id]) for router_id in self.mapping._router_conntrack_helper_mapping.keys(): self.assertIn(router_id, [self.router1, self.router2]) self.assertEqual( len([self.router1, self.router2]), len(self.mapping._router_conntrack_helper_mapping.keys())) def test_del_conntrack_helper(self): self._set_cth() self.mapping.del_conntrack_helpers([self.conntrack_helper3, self.conntrack_helper2, self.conntrack_helper4]) managed_cths = self.mapping.get_managed_conntrack_helpers() self.assertEqual([self.conntrack_helper1.id], list(managed_cths.keys())) self.assertNotIn(self.conntrack_helper3.id, self.mapping._router_conntrack_helper_mapping[ self.conntrack_helper3.router_id]) self.assertNotIn(self.router2, self.mapping._router_conntrack_helper_mapping.keys()) def test_clear_by_router_id(self): self._set_cth() self.mapping.clear_by_router_id(self.router2) managed_cths = self.mapping.get_managed_conntrack_helpers() self.assertNotIn(self.conntrack_helper2, managed_cths.keys()) self.assertNotIn(self.conntrack_helper4, managed_cths.keys()) def test_check_conntrack_helper_changes(self): self._set_cth() new_cth = cth_obj.ConntrackHelper( context=None, id=self.conntrack_helper1.id, protocol='udp', port=6969, helper='tftp', router_id=self.router1) self.assertTrue(self.mapping.check_conntrack_helper_changes(new_cth)) def test_check_conntrack_helper_changes_no_change(self): self._set_cth() new_cth = cth_obj.ConntrackHelper( context=None, id=self.conntrack_helper1.id, protocol='udp', port=69, helper='tftp', router_id=self.router1) self.assertFalse(self.mapping.check_conntrack_helper_changes(new_cth)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/test_port_forwarding.py0000644000175000017500000005040700000000000031416 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as lib_const from neutron_lib import context from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3.extensions import port_forwarding as pf from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import router_info as l3router from neutron.agent.linux import iptables_manager from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects import port_forwarding as pf_obj from neutron.objects import router from neutron.tests import base from neutron.tests.unit.agent.l3 import test_agent _uuid = uuidutils.generate_uuid TEST_FIP = '10.100.2.45' BINARY_NAME = iptables_manager.get_binary_name() DEFAULT_RULE = ('PREROUTING', '-j %s-fip-pf' % BINARY_NAME) DEFAULT_CHAIN = 'fip-pf' HOSTNAME = 'testhost' class PortForwardingExtensionBaseTestCase( test_agent.BasicRouterOperationsFramework): def setUp(self): super(PortForwardingExtensionBaseTestCase, self).setUp() self.fip_pf_ext = pf.PortForwardingAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() self.floatingip2 = router.FloatingIP(context=None, id=_uuid(), floating_ip_address='172.24.6.12', floating_network_id=_uuid(), router_id=_uuid(), status='ACTIVE') self.portforwarding1 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip2.id, external_port=1111, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.1', internal_port=11111, floating_ip_address=self.floatingip2.floating_ip_address, router_id=self.floatingip2.router_id) self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.ex_gw_port = {'id': _uuid()} self.fip = {'id': _uuid(), 'floating_ip_address': TEST_FIP, 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME} self.router = {'id': self.floatingip2.router_id, 'gw_port': self.ex_gw_port, 'ha': False, 'distributed': False, lib_const.FLOATINGIP_KEY: [self.fip]} self.router_info = l3router.RouterInfo( self.agent, self.floatingip2.router_id, self.router, **self.ri_kwargs) self.centralized_port_forwarding_fip_set = set( [str(self.floatingip2.floating_ip_address) + '/32']) self.pf_managed_fips = [self.floatingip2.id] self.router_info.ex_gw_port = self.ex_gw_port self.router_info.fip_managed_by_port_forwardings = self.pf_managed_fips self.agent.router_info[self.router['id']] = self.router_info self.get_router_info = mock.patch( 'neutron.agent.l3.l3_agent_extension_api.' 'L3AgentExtensionAPI.get_router_info').start() self.get_router_info.return_value = self.router_info self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None) self.fip_pf_ext.consume_api(self.agent_api) self.port_forwardings = [self.portforwarding1] class FipPortForwardingExtensionInitializeTestCase( PortForwardingExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): call_to_patch = 'neutron_lib.rpc.Connection' with mock.patch(call_to_patch, return_value=self.connection) as create_connection: self.fip_pf_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) create_connection.assert_has_calls([mock.call()]) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic( resources.PORTFORWARDING), [rpc_mock()], fanout=True)] ) subscribe_mock.assert_called_with( mock.ANY, resources.PORTFORWARDING) class FipPortForwardingExtensionTestCase(PortForwardingExtensionBaseTestCase): def setUp(self): super(FipPortForwardingExtensionTestCase, self).setUp() self.fip_pf_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) self._set_bulk_pull_mock() def _set_bulk_pull_mock(self): def _bulk_pull_mock(context, resource_type, filter_kwargs=None): if 'floatingip_id' in filter_kwargs: result = [] for pfobj in self.port_forwardings: if pfobj.floatingip_id in filter_kwargs['floatingip_id']: result.append(pfobj) return result return self.port_forwardings self.bulk_pull = mock.patch( 'neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPullRpcApi.bulk_pull').start() self.bulk_pull.side_effect = _bulk_pull_mock def _get_chainrule_tag_from_pf_obj(self, target_obj): rule_tag = 'fip_portforwarding-' + target_obj.id chain_name = ( 'pf-' + target_obj.id)[:lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP] chain_rule = (chain_name, '-d %s/32 -p %s -m %s --dport %s ' '-j DNAT --to-destination %s:%s' % ( target_obj.floating_ip_address, target_obj.protocol, target_obj.protocol, target_obj.external_port, target_obj.internal_ip_address, target_obj.internal_port)) return chain_name, chain_rule, rule_tag def _assert_called_iptables_process(self, mock_add_chain, mock_add_rule, mock_add_fip, mock_send_fip_status, target_obj=None): if target_obj: obj = target_obj else: obj = self.portforwarding1 (chain_name, chain_rule, rule_tag) = self._get_chainrule_tag_from_pf_obj(obj) mock_add_chain.assert_has_calls([mock.call('fip-pf'), mock.call(chain_name)]) mock_add_rule.assert_has_calls( [mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(DEFAULT_CHAIN, ('-j %s-' % BINARY_NAME) + chain_name, tag=rule_tag), mock.call(chain_name, chain_rule[1], tag=rule_tag)]) mock_add_fip.assert_called_once_with( {'floating_ip_address': str(obj.floating_ip_address)}, mock.ANY, mock.ANY) fip_status = { obj.floatingip_id: lib_const.FLOATINGIP_STATUS_ACTIVE} mock_send_fip_status.assert_called_once_with(mock.ANY, fip_status) @mock.patch.object(pf.PortForwardingAgentExtension, '_sending_port_forwarding_fip_status') @mock.patch.object(iptables_manager.IptablesTable, 'add_rule') @mock.patch.object(iptables_manager.IptablesTable, 'add_chain') @mock.patch.object(l3router.RouterInfo, 'add_floating_ip') def test_add_update_router(self, mock_add_fip, mock_add_chain, mock_add_rule, mock_send_fip_status): # simulate the router add and already there is a port forwarding # resource association. mock_add_fip.return_value = lib_const.FLOATINGIP_STATUS_ACTIVE self.fip_pf_ext.add_router(self.context, self.router) self._assert_called_iptables_process( mock_add_chain, mock_add_rule, mock_add_fip, mock_send_fip_status, target_obj=self.portforwarding1) # Then we create another port forwarding with the same fip mock_add_fip.reset_mock() mock_send_fip_status.reset_mock() mock_add_chain.reset_mock() mock_add_rule.reset_mock() test_portforwarding = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip2.id, external_port=2222, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='2.2.2.2', internal_port=22222, floating_ip_address=self.floatingip2.floating_ip_address, router_id=self.floatingip2.router_id) self.pf_managed_fips.append(self.floatingip2.id) self.port_forwardings.append(test_portforwarding) self.fip_pf_ext.update_router(self.context, self.router) self._assert_called_iptables_process( mock_add_chain, mock_add_rule, mock_add_fip, mock_send_fip_status, target_obj=test_portforwarding) @mock.patch.object(iptables_manager.IptablesTable, 'add_rule') @mock.patch.object(iptables_manager.IptablesTable, 'add_chain') @mock.patch('neutron.agent.linux.ip_lib.IPDevice') @mock.patch.object(iptables_manager.IptablesTable, 'remove_chain') def test_add_update_router_port_forwarding_change( self, mock_remove_chain, mock_ip_device, mock_add_chain, mock_add_rule): self.fip_pf_ext.add_router(self.context, self.router) update_portforwarding = pf_obj.PortForwarding( context=None, id=self.portforwarding1.id, floatingip_id=self.portforwarding1.floatingip_id, external_port=2222, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='2.2.2.2', internal_port=22222, floating_ip_address=self.portforwarding1.floating_ip_address, router_id=self.portforwarding1.router_id) self.port_forwardings = [update_portforwarding] mock_delete = mock.Mock() mock_ip_device.return_value = mock_delete self.fip_pf_ext.update_router(self.context, self.router) current_chain = ('pf-' + self.portforwarding1.id)[ :lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP] mock_remove_chain.assert_called_once_with(current_chain) mock_delete.delete_socket_conntrack_state.assert_called_once_with( str(self.portforwarding1.floating_ip_address), self.portforwarding1.external_port, protocol=self.portforwarding1.protocol) (chain_name, chain_rule, rule_tag) = self._get_chainrule_tag_from_pf_obj( update_portforwarding) mock_add_chain.assert_has_calls([mock.call('fip-pf'), mock.call(chain_name)]) mock_add_rule.assert_has_calls( [mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]), mock.call(DEFAULT_CHAIN, ('-j %s-' % BINARY_NAME) + chain_name, tag=rule_tag), mock.call(chain_name, chain_rule[1], tag=rule_tag)]) @mock.patch.object(pf.PortForwardingAgentExtension, '_sending_port_forwarding_fip_status') @mock.patch('neutron.agent.linux.ip_lib.IPDevice') @mock.patch.object(iptables_manager.IptablesTable, 'remove_chain') def test_add_update_router_port_forwarding_remove( self, mock_remove_chain, mock_ip_device, mock_send_fip_status): self.fip_pf_ext.add_router(self.context, self.router) mock_send_fip_status.reset_mock() self.port_forwardings = [] mock_device = mock.Mock() mock_ip_device.return_value = mock_device self.fip_pf_ext.update_router(self.context, self.router) current_chain = ('pf-' + self.portforwarding1.id)[ :lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP] mock_remove_chain.assert_called_once_with(current_chain) mock_device.delete_socket_conntrack_state.assert_called_once_with( str(self.portforwarding1.floating_ip_address), self.portforwarding1.external_port, protocol=self.portforwarding1.protocol) mock_device.delete_addr_and_conntrack_state.assert_called_once_with( str(self.portforwarding1.floating_ip_address)) fip_status = { self.portforwarding1.floatingip_id: lib_const.FLOATINGIP_STATUS_DOWN} mock_send_fip_status.assert_called_once_with(mock.ANY, fip_status) def test_check_if_need_process_no_snat_ns(self): ex_gw_port = {'id': _uuid()} router_id = _uuid() router = {'id': router_id, 'gw_port': ex_gw_port, 'ha': False, 'distributed': True} router_info = l3router.RouterInfo( self.agent, router_id, router, **self.ri_kwargs) router_info.agent_conf.agent_mode = lib_const.L3_AGENT_MODE_DVR_SNAT router_info.fip_managed_by_port_forwardings = True router_info.snat_namespace = mock.Mock() router_info.snat_namespace.exists.return_value = False self.assertFalse(self.fip_pf_ext._check_if_need_process(router_info)) class RouterFipPortForwardingMappingTestCase(base.BaseTestCase): def setUp(self): super(RouterFipPortForwardingMappingTestCase, self).setUp() self.mapping = pf.RouterFipPortForwardingMapping() self.router1 = _uuid() self.router2 = _uuid() self.floatingip1 = _uuid() self.floatingip2 = _uuid() self.floatingip3 = _uuid() self.portforwarding1 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip1, external_port=1111, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.1', internal_port=11111, floating_ip_address='111.111.111.111', router_id=self.router1, description='Some description') self.portforwarding2 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip1, external_port=1112, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.2', internal_port=11112, floating_ip_address='111.111.111.111', router_id=self.router1, description='Some description') self.portforwarding3 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip2, external_port=1113, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='1.1.1.3', internal_port=11113, floating_ip_address='111.222.111.222', router_id=self.router1, description=None) self.portforwarding4 = pf_obj.PortForwarding( context=None, id=_uuid(), floatingip_id=self.floatingip3, external_port=2222, protocol='tcp', internal_port_id=_uuid(), internal_ip_address='2.2.2.2', internal_port=22222, floating_ip_address='222.222.222.222', router_id=self.router2, description='') self.portforwardings_dict = { self.portforwarding1.id: self.portforwarding1, self.portforwarding2.id: self.portforwarding2, self.portforwarding3.id: self.portforwarding3, self.portforwarding4.id: self.portforwarding4} def _set_pf(self): self.mapping.set_port_forwardings(self.portforwardings_dict.values()) def test_set_port_forwardings(self): self._set_pf() pf_ids = self.portforwardings_dict.keys() for pf_id, obj in self.mapping.managed_port_forwardings.items(): self.assertIn(pf_id, pf_ids) self.assertEqual(obj, self.portforwardings_dict[pf_id]) self.assertEqual( len(pf_ids), len(self.mapping.managed_port_forwardings.keys())) fip_pf_set = { self.floatingip1: set( [self.portforwarding1.id, self.portforwarding2.id]), self.floatingip2: set([self.portforwarding3.id]), self.floatingip3: set([self.portforwarding4.id]) } for fip_id, pf_set in self.mapping.fip_port_forwarding.items(): self.assertIn( fip_id, [self.floatingip1, self.floatingip2, self.floatingip3]) self.assertEqual(0, len(pf_set - fip_pf_set[fip_id])) self.assertEqual( len([self.floatingip1, self.floatingip2, self.floatingip3]), len(self.mapping.fip_port_forwarding)) router_fip = { self.router1: set([self.floatingip1, self.floatingip2]), self.router2: set([self.floatingip3]) } for router_id, fip_set in self.mapping.router_fip_mapping.items(): self.assertIn(router_id, [self.router1, self.router2]) self.assertEqual(0, len(fip_set - router_fip[router_id])) self.assertEqual( len([self.router1, self.router2]), len(self.mapping.router_fip_mapping.keys())) def test_update_port_forwarding(self): self._set_pf() description = 'Some description' new_pf1 = pf_obj.PortForwarding( context=None, id=self.portforwarding2.id, floatingip_id=self.floatingip1, external_port=11122, protocol='tcp', internal_port_id=self.portforwarding2.internal_port_id, internal_ip_address='1.1.1.22', internal_port=11122, floating_ip_address='111.111.111.111', router_id=self.router1, description=description) self.mapping.update_port_forwardings([new_pf1]) self.assertEqual( new_pf1, self.mapping.managed_port_forwardings[self.portforwarding2.id]) def test_del_port_forwardings(self): self._set_pf() del_pfs = [self.portforwarding3, self.portforwarding2, self.portforwarding4] self.mapping.del_port_forwardings(del_pfs) self.assertEqual( [self.portforwarding1.id], list(self.mapping.managed_port_forwardings.keys())) self.assertEqual({self.floatingip1: set([self.portforwarding1.id])}, self.mapping.fip_port_forwarding) self.assertEqual({self.router1: set([self.floatingip1])}, self.mapping.router_fip_mapping) def test_clear_by_fip(self): self._set_pf() self.mapping.clear_by_fip(self.floatingip1, self.router1) router_fip = { self.router1: set([self.floatingip2]), self.router2: set([self.floatingip3]) } for router_id, fip_set in self.mapping.router_fip_mapping.items(): self.assertIn(router_id, [self.router1, self.router2]) self.assertEqual(0, len(fip_set - router_fip[router_id])) fip_pf_set = { self.floatingip2: set([self.portforwarding3.id]), self.floatingip3: set([self.portforwarding4.id]) } for fip_id, pf_set in self.mapping.fip_port_forwarding.items(): self.assertIn( fip_id, [self.floatingip2, self.floatingip3]) self.assertEqual(0, len(pf_set - fip_pf_set[fip_id])) self.assertEqual( len([self.floatingip2, self.floatingip3]), len(self.mapping.fip_port_forwarding)) pfs_dict = {self.portforwarding3.id: self.portforwarding3, self.portforwarding4.id: self.portforwarding4} for pf_id, obj in self.mapping.managed_port_forwardings.items(): self.assertIn(pf_id, [self.portforwarding3.id, self.portforwarding4.id]) self.assertEqual(obj, pfs_dict[pf_id]) self.assertEqual( len([self.portforwarding3.id, self.portforwarding4.id]), len(self.mapping.managed_port_forwardings.keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/extensions/test_snat_log.py0000644000175000017500000000415500000000000030015 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as lib_const from neutron.agent.l3.extensions import snat_log from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.tests.unit.services.logapi.agent.l3 import test_base class SnatLogExtensionInitializeTestCase(test_base.L3LoggingExtBaseTestCase): def setUp(self): super(SnatLogExtensionInitializeTestCase, self).setUp() self.snat_log_ext = snat_log.SNATLoggingExtension() self.snat_log_ext.consume_api(self.agent_api) @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): call_to_patch = 'neutron_lib.rpc.Connection' with mock.patch(call_to_patch, return_value=self.connection) as create_connection: self.snat_log_ext.initialize( self.connection, lib_const.L3_AGENT_MODE) create_connection.assert_has_calls([mock.call()]) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic( resources.LOGGING_RESOURCE), [rpc_mock()], fanout=True)] ) subscribe_mock.assert_called_with( mock.ANY, resources.LOGGING_RESOURCE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_agent.py0000644000175000017500000055340200000000000025112 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools from itertools import chain as iter_chain from itertools import combinations as iter_combinations import os import pwd import eventlet import fixtures import mock import netaddr from neutron_lib.agent import constants as agent_consts from neutron_lib.api.definitions import portbindings from neutron_lib import constants as lib_constants from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import netutils from oslo_utils import timeutils from oslo_utils import uuidutils from testtools import matchers from neutron.agent.common import resource_processing_queue from neutron.agent.common import utils as common_utils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import dvr_edge_ha_router from neutron.agent.l3 import dvr_edge_router as dvr_router from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import dvr_router_base from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import ha_router from neutron.agent.l3 import legacy_router from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.l3 import router_info as l3router from neutron.agent.linux import dibbler from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import pd from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc from neutron.conf.agent import common as agent_config from neutron.conf.agent.l3 import config as l3_config from neutron.conf.agent.l3 import ha as ha_conf from neutron.conf import common as base_config from neutron.tests import base from neutron.tests.common import l3_test_common from neutron.tests.unit.agent.linux.test_utils import FakeUser _uuid = uuidutils.generate_uuid HOSTNAME = 'myhost' FAKE_ID = _uuid() FAKE_ID_2 = _uuid() FIP_PRI = 32768 class BasicRouterOperationsFramework(base.BaseTestCase): def setUp(self): super(BasicRouterOperationsFramework, self).setUp() mock.patch('eventlet.spawn').start() self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf) ha_conf.register_l3_agent_ha_opts(self.conf) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) agent_config.register_availability_zone_opts_helper(self.conf) agent_config.register_interface_opts(self.conf) agent_config.register_external_process_opts(self.conf) agent_config.register_pd_opts(self.conf) agent_config.register_ra_opts(self.conf) self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('state_path', cfg.CONF.state_path) self.conf.set_override('pd_dhcp_driver', '') self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() self.list_network_namespaces_p = mock.patch( 'neutron.agent.linux.ip_lib.list_network_namespaces') self.list_network_namespaces = self.list_network_namespaces_p.start() self.ensure_dir = mock.patch( 'oslo_utils.fileutils.ensure_tree').start() mock.patch('neutron.agent.linux.keepalived.KeepalivedManager' '.get_full_config_file_path').start() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.utils_replace_file_p = mock.patch( 'neutron_lib.utils.file.replace_file') self.utils_replace_file = self.utils_replace_file_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager') self.external_process = self.external_process_p.start() self.process_monitor = mock.patch( 'neutron.agent.linux.external_process.ProcessMonitor').start() self.send_adv_notif_p = mock.patch( 'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif') self.send_adv_notif = self.send_adv_notif_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) driver_cls.return_value = self.mock_driver self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip self.mock_add_ip_rule = mock.patch.object(ip_lib, 'add_ip_rule').start() self.mock_add_ip_rule = mock.patch.object(ip_lib, 'delete_ip_rule').start() ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start() self.mock_ip_dev = mock.MagicMock() ip_dev.return_value = self.mock_ip_dev self.l3pluginApi_cls_p = mock.patch( 'neutron.agent.l3.agent.L3PluginApi') l3pluginApi_cls = self.l3pluginApi_cls_p.start() self.plugin_api = mock.MagicMock() l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() subnet_id_2 = _uuid() self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16', 'gateway_ip': '152.2.0.1', 'id': subnet_id_1}], 'mtu': 1500, 'network_id': _uuid(), 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}, {'subnets': [{'cidr': '152.10.0.0/16', 'gateway_ip': '152.10.0.1', 'id': subnet_id_2}], 'mtu': 1450, 'network_id': _uuid(), 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}] self.ri_kwargs = {'agent_conf': self.conf, 'interface_driver': self.mock_driver} def _process_router_instance_for_agent(self, agent, ri, router): ri.router = router if not ri.radvd: ri.radvd = ra.DaemonMonitor(router['id'], ri.ns_name, agent.process_monitor, ri.get_internal_device_name, self.conf) ri.process() class IptablesFixture(fixtures.Fixture): def _setUp(self): # We MUST save and restore random_fully because it is a class # attribute and could change state in some tests, which can cause # the other router test cases to randomly fail due to race conditions. self.random_fully = iptables_manager.IptablesManager.random_fully iptables_manager.IptablesManager.random_fully = True self.addCleanup(self._reset) def _reset(self): iptables_manager.IptablesManager.random_fully = self.random_fully class TestBasicRouterOperations(BasicRouterOperationsFramework): def setUp(self): super(TestBasicRouterOperations, self).setUp() self.useFixture(IptablesFixture()) def test_request_id_changes(self): a = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertNotEqual(a.context.request_id, a.context.request_id) self.useFixture(IptablesFixture()) def test_init_ha_conf(self): with mock.patch('os.path.dirname', return_value='/etc/ha/'): l3_agent.L3NATAgent(HOSTNAME, self.conf) self.ensure_dir.assert_called_once_with('/etc/ha/', mode=0o755) def test_enqueue_state_change_router_not_found(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) non_existent_router = 42 # Make sure the exceptional code path has coverage agent.enqueue_state_change(non_existent_router, 'master') def _enqueue_state_change_transitions(self, transitions, num_called): self.conf.set_override('ha_vrrp_advert_int', 1) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._update_transition_state('router_id') with mock.patch.object(agent, '_get_router_info', return_value=None) \ as mock_get_router_info: for state in transitions: agent.enqueue_state_change('router_id', state) eventlet.sleep(0.2) # NOTE(ralonsoh): the wait process should be done inside the mock # context, to allow the spawned thread to call the mocked function # before the context ends. eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) if num_called: mock_get_router_info.assert_has_calls( [mock.call('router_id') for _ in range(num_called)]) else: mock_get_router_info.assert_not_called() def test_enqueue_state_change_from_none_to_master(self): self._enqueue_state_change_transitions(['master'], 1) def test_enqueue_state_change_from_none_to_backup(self): self._enqueue_state_change_transitions(['backup'], 1) def test_enqueue_state_change_from_none_to_master_to_backup(self): self._enqueue_state_change_transitions(['master', 'backup'], 0) def test_enqueue_state_change_from_none_to_backup_to_master(self): self._enqueue_state_change_transitions(['backup', 'master'], 2) def test_enqueue_state_change_metadata_disable(self): self.conf.set_override('enable_metadata_proxy', False) self.conf.set_override('ha_vrrp_advert_int', 1) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router_info = mock.MagicMock() agent.router_info[router.id] = router_info agent._update_metadata_proxy = mock.Mock() agent.enqueue_state_change(router.id, 'master') eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) self.assertFalse(agent._update_metadata_proxy.call_count) def test_enqueue_state_change_l3_extension(self): self.conf.set_override('ha_vrrp_advert_int', 1) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router_info = mock.MagicMock() router_info.agent = agent agent.router_info[router.id] = router_info agent.l3_ext_manager.ha_state_change = mock.Mock() agent.enqueue_state_change(router.id, 'master') eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) agent.l3_ext_manager.ha_state_change.assert_called_once_with( agent.context, {'router_id': router.id, 'state': 'master', 'host': agent.host}) def test_enqueue_state_change_router_active_ha(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'distributed': False} router_info = mock.MagicMock(router=router) with mock.patch.object( agent.metadata_driver, 'spawn_monitored_metadata_proxy' ) as spawn_metadata_proxy, mock.patch.object( agent.metadata_driver, 'destroy_monitored_metadata_proxy' ) as destroy_metadata_proxy: agent._update_metadata_proxy(router_info, "router_id", "master") spawn_metadata_proxy.assert_called() destroy_metadata_proxy.assert_not_called() def test_enqueue_state_change_router_standby_ha(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'distributed': False} router_info = mock.MagicMock(router=router) with mock.patch.object( agent.metadata_driver, 'spawn_monitored_metadata_proxy' ) as spawn_metadata_proxy, mock.patch.object( agent.metadata_driver, 'destroy_monitored_metadata_proxy' ) as destroy_metadata_proxy: agent._update_metadata_proxy(router_info, "router_id", "standby") spawn_metadata_proxy.assert_not_called() destroy_metadata_proxy.assert_called() def test_enqueue_state_change_router_standby_ha_dvr(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'distributed': True} router_info = mock.MagicMock(router=router) with mock.patch.object( agent.metadata_driver, 'spawn_monitored_metadata_proxy' ) as spawn_metadata_proxy, mock.patch.object( agent.metadata_driver, 'destroy_monitored_metadata_proxy' ) as destroy_metadata_proxy: agent._update_metadata_proxy(router_info, "router_id", "standby") spawn_metadata_proxy.assert_called() destroy_metadata_proxy.assert_not_called() def _test__configure_ipv6_params_helper(self, state, gw_port_id): with mock.patch.object(netutils, 'is_ipv6_enabled', return_value=True): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs) if gw_port_id: router_info.ex_gw_port = {'id': gw_port_id} expected_forwarding_state = state == 'master' with mock.patch.object( router_info.driver, "configure_ipv6_forwarding" ) as configure_ipv6_forwarding, mock.patch.object( router_info, "_configure_ipv6_params_on_gw" ) as configure_ipv6_on_gw: agent._configure_ipv6_params(router_info, state) if state == 'master': configure_ipv6_forwarding.assert_called_once_with( router_info.ns_name, 'all', expected_forwarding_state) else: configure_ipv6_forwarding.assert_not_called() if gw_port_id: interface_name = router_info.get_external_device_name( router_info.ex_gw_port['id']) configure_ipv6_on_gw.assert_called_once_with( router_info.ex_gw_port, router_info.ns_name, interface_name, expected_forwarding_state) else: configure_ipv6_on_gw.assert_not_called() def test__configure_ipv6_params_master(self): self._test__configure_ipv6_params_helper('master', gw_port_id=_uuid()) def test__configure_ipv6_params_backup(self): self._test__configure_ipv6_params_helper('backup', gw_port_id=_uuid()) def test__configure_ipv6_params_master_no_gw_port(self): self._test__configure_ipv6_params_helper('master', gw_port_id=None) def test__configure_ipv6_params_backup_no_gw_port(self): self._test__configure_ipv6_params_helper('backup', gw_port_id=None) def test_check_ha_state_for_router_master_standby(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router.id = '1234' router_info = mock.MagicMock() agent.router_info[router.id] = router_info router_info.ha_state = 'master' with mock.patch.object(agent.state_change_notifier, 'queue_event') as queue_event: agent.check_ha_state_for_router( router.id, lib_constants.HA_ROUTER_STATE_STANDBY) queue_event.assert_called_once_with((router.id, 'master')) def test_check_ha_state_for_router_standby_standby(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router.id = '1234' router_info = mock.MagicMock() agent.router_info[router.id] = router_info router_info.ha_state = 'backup' with mock.patch.object(agent.state_change_notifier, 'queue_event') as queue_event: agent.check_ha_state_for_router( router.id, lib_constants.HA_ROUTER_STATE_STANDBY) queue_event.assert_not_called() def test_periodic_sync_routers_task_raise_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_router_ids.return_value = ['fake_id'] self.plugin_api.get_routers.side_effect = ValueError self.assertRaises(ValueError, agent.periodic_sync_routers_task, agent.context) self.assertTrue(agent.fullsync) def test_l3_initial_report_state_done(self): with mock.patch.object(l3_agent.L3NATAgentWithStateReport, 'periodic_sync_routers_task'),\ mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as report_state,\ mock.patch.object(eventlet, 'spawn_n'): agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME, conf=self.conf) self.assertTrue(agent.agent_state['start_flag']) agent.after_start() report_state.assert_called_once_with(agent.context, agent.agent_state, True) self.assertIsNone(agent.agent_state.get('start_flag')) def test_report_state_revival_logic(self): with mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as report_state: agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME, conf=self.conf) report_state.return_value = agent_consts.AGENT_REVIVED agent._report_state() self.assertTrue(agent.fullsync) agent.fullsync = False report_state.return_value = agent_consts.AGENT_ALIVE agent._report_state() self.assertFalse(agent.fullsync) def test_periodic_sync_routers_task_call_clean_stale_namespaces(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_routers.return_value = [] agent.periodic_sync_routers_task(agent.context) self.assertFalse(agent.namespaces_manager._clean_stale) def test_periodic_sync_routers_task_call_ensure_snat_cleanup(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' dvr_ha_router = {'id': _uuid(), 'external_gateway_info': {}, 'routes': [], 'distributed': True, 'ha': True} dvr_router = {'id': _uuid(), 'external_gateway_info': {}, 'routes': [], 'distributed': True, 'ha': False} routers = [dvr_router, dvr_ha_router] self.plugin_api.get_router_ids.return_value = [r['id'] for r in routers] self.plugin_api.get_routers.return_value = routers with mock.patch.object(namespace_manager.NamespaceManager, 'ensure_snat_cleanup') as ensure_snat_cleanup: agent.periodic_sync_routers_task(agent.context) ensure_snat_cleanup.assert_called_once_with(dvr_router['id']) def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_router_ids = [_uuid(), _uuid()] active_routers = [{'id': _uuid()}, {'id': _uuid()}] self.plugin_api.get_router_ids.return_value = [r['id'] for r in active_routers] self.plugin_api.get_routers.return_value = active_routers namespace_list = [namespaces.NS_PREFIX + r_id for r_id in stale_router_ids] namespace_list += [namespaces.NS_PREFIX + r['id'] for r in active_routers] self.list_network_namespaces.return_value = namespace_list driver = metadata_driver.MetadataDriver with mock.patch.object( driver, 'destroy_monitored_metadata_proxy') as destroy_proxy: agent.periodic_sync_routers_task(agent.context) expected_calls = [ mock.call( mock.ANY, r_id, agent.conf, namespaces.NS_PREFIX + r_id) for r_id in stale_router_ids] self.assertEqual(len(stale_router_ids), destroy_proxy.call_count) destroy_proxy.assert_has_calls(expected_calls, any_order=True) def test__create_router_legacy_agent(self): router = {'distributed': False, 'ha': False} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(legacy_router.LegacyRouter, type(router_info)) def test__create_router_ha_agent(self): router = {'distributed': False, 'ha': True} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(ha_router.HaRouter, type(router_info)) def test__create_router_dvr_agent(self): router = {'distributed': True, 'ha': False} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(dvr_local_router.DvrLocalRouter, type(router_info)) def test__create_router_dvr_agent_with_dvr_snat_mode(self): router = {'distributed': True, 'ha': False} self.conf.set_override('agent_mode', lib_constants.L3_AGENT_MODE_DVR_SNAT) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(dvr_router.DvrEdgeRouter, type(router_info)) def test__create_router_dvr_ha_agent(self): router = {'distributed': True, 'ha': True} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(dvr_local_router.DvrLocalRouter, type(router_info)) def test__create_router_dvr_ha_agent_with_dvr_snat_mode(self): router = {'distributed': True, 'ha': True, lib_constants.HA_INTERFACE_KEY: None} self.conf.set_override('agent_mode', lib_constants.L3_AGENT_MODE_DVR_SNAT) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(dvr_router.DvrEdgeRouter, type(router_info)) router = {'distributed': True, 'ha': True, lib_constants.HA_INTERFACE_KEY: True} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_info = agent._create_router(_uuid(), router) self.assertEqual(dvr_edge_ha_router.DvrEdgeHaRouter, type(router_info)) def test_router_info_create(self): id = _uuid() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, id, {}, **self.ri_kwargs) self.assertTrue(ri.ns_name.endswith(id)) def test_router_info_create_with_router(self): ns_id = _uuid() subnet_id = _uuid() ex_gw_port = {'id': _uuid(), 'network_id': _uuid(), 'fixed_ips': [{'ip_address': '19.4.4.4', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1'}]} router = { 'id': _uuid(), 'enable_snat': True, 'routes': [], 'gw_port': ex_gw_port} agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, ns_id, router, **self.ri_kwargs) self.assertTrue(ri.ns_name.endswith(ns_id)) self.assertEqual(router, ri.router) def test_agent_create(self): l3_agent.L3NATAgent(HOSTNAME, self.conf) def _test_internal_network_action(self, action): router = l3_test_common.prepare_router_data(num_internal_ports=2) router_id = router['id'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router_id, router, **self.ri_kwargs) port = {'network_id': _uuid(), 'id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'mtu': 1500, 'fixed_ips': [{'subnet_id': _uuid(), 'ip_address': '99.0.1.9', 'prefixlen': 24}]} interface_name = ri.get_internal_device_name(port['id']) if action == 'add': self.device_exists.return_value = False ri.internal_network_added(port) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) self.send_adv_notif.assert_called_once_with(ri.ns_name, interface_name, '99.0.1.9') elif action == 'remove': self.device_exists.return_value = True ri.internal_network_removed(port) self.assertEqual(1, self.mock_driver.unplug.call_count) else: raise Exception("Invalid action %s" % action) @staticmethod def _fixed_ip_cidr(fixed_ip): return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen']) def _test_internal_network_action_dist(self, action, scope_match=False): router = l3_test_common.prepare_router_data(num_internal_ports=2) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) subnet_id = _uuid() port = {'network_id': _uuid(), 'id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'mtu': 1500, 'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': '99.0.1.9', 'prefixlen': 24}], 'subnets': [{'id': subnet_id}]} ri.router['gw_port_host'] = HOSTNAME agent.host = HOSTNAME agent.conf.agent_mode = 'dvr_snat' sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31', 'subnet_id': _uuid()}], 'subnets': [{'gateway_ip': '20.0.0.1'}], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': _uuid()}], 'subnets': [{'gateway_ip': '20.0.0.1'}], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), portbindings.HOST_ID: HOSTNAME, 'network_id': _uuid(), 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} ri.snat_ports = sn_port ri.ex_gw_port = ex_gw_port ri.snat_namespace = mock.Mock() if scope_match: ri._check_if_address_scopes_match = mock.Mock(return_value=True) else: ri._check_if_address_scopes_match = mock.Mock(return_value=False) if action == 'add': self.device_exists.return_value = False ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._snat_redirect_add = mock.Mock() ri._set_subnet_arp_info = mock.Mock() ri._internal_network_added = mock.Mock() ri._set_subnet_arp_info = mock.Mock() ri._port_has_ipv6_subnet = mock.Mock(return_value=False) ri._add_interface_routing_rule_to_router_ns = mock.Mock() ri._add_interface_route_to_fip_ns = mock.Mock() ri.internal_network_added(port) self.assertEqual(2, ri._internal_network_added.call_count) ri._set_subnet_arp_info.assert_called_once_with(subnet_id) ri._internal_network_added.assert_called_with( dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']), sn_port['network_id'], sn_port['id'], sn_port['fixed_ips'], sn_port['mac_address'], ri._get_snat_int_device_name(sn_port['id']), lib_constants.SNAT_INT_DEV_PREFIX, mtu=1500) self.assertTrue(ri._check_if_address_scopes_match.called) if scope_match: self.assertTrue( ri._add_interface_routing_rule_to_router_ns.called) self.assertTrue( ri._add_interface_route_to_fip_ns.called) self.assertEqual(0, ri._snat_redirect_add.call_count) else: self.assertFalse( ri._add_interface_routing_rule_to_router_ns.called) self.assertFalse( ri._add_interface_route_to_fip_ns.called) self.assertEqual(1, ri._snat_redirect_add.call_count) elif action == 'remove': self.device_exists.return_value = False ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._delete_arp_cache_for_internal_port = mock.Mock() ri._snat_redirect_modify = mock.Mock() ri._port_has_ipv6_subnet = mock.Mock(return_value=False) ri._delete_interface_routing_rule_in_router_ns = mock.Mock() ri._delete_interface_route_in_fip_ns = mock.Mock() ri.internal_network_removed(port) self.assertEqual( 1, ri._delete_arp_cache_for_internal_port.call_count) self.assertTrue(ri._check_if_address_scopes_match.called) if scope_match: self.assertFalse(ri._snat_redirect_modify.called) self.assertTrue( ri._delete_interface_routing_rule_in_router_ns.called) self.assertTrue( ri._delete_interface_route_in_fip_ns.called) else: ri._snat_redirect_modify.assert_called_with( sn_port, port, ri.get_internal_device_name(port['id']), is_add=False) self.assertFalse( ri._delete_interface_routing_rule_in_router_ns.called) self.assertFalse( ri._delete_interface_route_in_fip_ns.called) def test_agent_add_internal_network(self): self._test_internal_network_action('add') def test_agent_add_internal_network_dist(self): self._test_internal_network_action_dist('add') def test_agent_add_internal_network_dist_with_addr_scope_match(self): self._test_internal_network_action_dist('add', scope_match=True) def test_agent_remove_internal_network(self): self._test_internal_network_action('remove') def test_agent_remove_internal_network_dist_with_addr_scope_mismatch(self): self._test_internal_network_action_dist('remove', scope_match=True) def test_agent_remove_internal_network_dist(self): self._test_internal_network_action_dist('remove') def _add_external_gateway(self, ri, router, ex_gw_port, interface_name, use_fake_fip=False, no_subnet=False, no_sub_gw=None, dual_stack=False): self.device_exists.return_value = False if no_sub_gw is None: no_sub_gw = [] if use_fake_fip: fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_added(ex_gw_port, interface_name) if not router.get('distributed'): self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) if no_subnet and not dual_stack: self.assertEqual(0, self.send_adv_notif.call_count) ip_cidrs = [] kwargs = {'preserve_ips': [], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [], 'clean_connections': True} else: exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30')] if dual_stack and not no_sub_gw: exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2')] self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] if dual_stack: if not no_sub_gw: ip_cidrs.append('2001:192:168:100::2/64') kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'clean_connections': True} self.mock_driver.init_router_port.assert_called_with( interface_name, ip_cidrs, **kwargs) else: ri._create_dvr_gateway.assert_called_once_with( ex_gw_port, interface_name) def _set_ri_kwargs(self, agent, router_id, router): self.ri_kwargs['agent'] = agent self.ri_kwargs['router_id'] = router_id self.ri_kwargs['router'] = router def _test_external_gateway_action(self, action, router, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ex_net_id = _uuid() sn_port = self.snat_ports[1] # Special setup for dvr routers if router.get('distributed'): agent.conf.agent_mode = 'dvr_snat' agent.host = HOSTNAME self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri._create_dvr_gateway = mock.Mock() ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports) ri.snat_ports = self.snat_ports ri._create_snat_namespace() ri.fip_ns = agent.get_fip_ns(ex_net_id) ri.internal_ports = self.snat_ports else: ri = l3router.RouterInfo( agent, router['id'], router, **self.ri_kwargs) ri.use_ipv6 = False subnet_id = _uuid() fixed_ips = [{'subnet_id': subnet_id, 'ip_address': '20.0.0.30', 'prefixlen': 24}] subnets = [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}] if dual_stack: ri.use_ipv6 = True subnet_id_v6 = _uuid() fixed_ips.append({'subnet_id': subnet_id_v6, 'ip_address': '2001:192:168:100::2', 'prefixlen': 64}) subnets.append({'id': subnet_id_v6, 'cidr': '2001:192:168:100::/64', 'gateway_ip': '2001:192:168:100::1'}) ex_gw_port = {'fixed_ips': fixed_ips, 'subnets': subnets, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': ex_net_id, 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} ex_gw_port_no_sub = {'fixed_ips': [], 'id': _uuid(), 'network_id': ex_net_id, 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri.get_external_device_name(ex_gw_port['id']) if action == 'add': self._add_external_gateway(ri, router, ex_gw_port, interface_name, use_fake_fip=True, dual_stack=dual_stack) elif action == 'add_no_sub': ri.use_ipv6 = True self._add_external_gateway(ri, router, ex_gw_port_no_sub, interface_name, no_subnet=True) elif action == 'add_no_sub_v6_gw': ri.use_ipv6 = True self.conf.set_override('ipv6_gateway', 'fe80::f816:3eff:fe2e:1') if dual_stack: use_fake_fip = True # Remove v6 entries del ex_gw_port['fixed_ips'][-1] del ex_gw_port['subnets'][-1] else: use_fake_fip = False ex_gw_port = ex_gw_port_no_sub self._add_external_gateway(ri, router, ex_gw_port, interface_name, no_subnet=True, no_sub_gw='fe80::f816:3eff:fe2e:1', use_fake_fip=use_fake_fip, dual_stack=dual_stack) elif action == 'remove': self.device_exists.return_value = True ri.get_snat_port_for_internal_port = mock.Mock( return_value=sn_port) ri._snat_redirect_remove = mock.Mock() if router.get('distributed'): ri.snat_iptables_manager = iptables_manager.IptablesManager( namespace=ri.snat_namespace.name, use_ipv6=ri.use_ipv6) ri.fip_ns.delete_rtr_2_fip_link = mock.Mock() ri.router['gw_port'] = "" ri.external_gateway_removed(ex_gw_port, interface_name) if not router.get('distributed'): self.mock_driver.unplug.assert_called_once_with( interface_name, namespace=mock.ANY, prefix=mock.ANY) else: ri._snat_redirect_remove.assert_called_with( sn_port, sn_port, ri.get_internal_device_name(sn_port['id'])) ri.get_snat_port_for_internal_port.assert_called_with( mock.ANY, ri.snat_ports) self.assertTrue(ri.fip_ns.delete_rtr_2_fip_link.called) else: raise Exception("Invalid action %s" % action) def _test_external_gateway_updated(self, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.use_ipv6 = False interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri, dual_stack=dual_stack) fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_updated(ex_gw_port, interface_name) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30')] if dual_stack: ri.use_ipv6 = True exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2')] self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] gateway_ips = ['20.0.0.1'] if dual_stack: ip_cidrs.append('2001:192:168:100::2/64') gateway_ips.append('2001:192:168:100::1') kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'clean_connections': True} self.mock_driver.init_router_port.assert_called_with(interface_name, ip_cidrs, **kwargs) def test_external_gateway_updated(self): self._test_external_gateway_updated() def test_external_gateway_updated_dual_stack(self): self._test_external_gateway_updated(dual_stack=True) def test_external_gateway_updated_dvr(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' agent.host = HOSTNAME router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri._create_dvr_gateway = mock.Mock() ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports) ri.snat_ports = self.snat_ports ri._create_snat_namespace() ex_net_id = _uuid() ri.fip_ns = agent.get_fip_ns(ex_net_id) ri.internal_ports = self.snat_ports ri.use_ipv6 = False interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri) fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid(), 'dvr_snat_bound': True}]} router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_updated(ex_gw_port, interface_name) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) exp_arp_calls = [mock.call(ri.snat_namespace.name, interface_name, '20.0.0.30')] self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': ri.snat_namespace.name, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'clean_connections': True} self.mock_driver.init_router_port.assert_called_with(interface_name, ip_cidrs, **kwargs) def test_dvr_edge_router_init_for_snat_namespace_object(self): router = {'id': _uuid()} self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) # Make sure that ri.snat_namespace object is created when the # router is initialized, and that it's name matches the gw # namespace name self.assertIsNotNone(ri.snat_namespace) self.assertEqual(ri.snat_namespace.name, ri.get_gw_ns_name()) def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none(self): """Test to check the impact of snat_namespace object. This function specifically checks the impact of the snat namespace object value on external_gateway_removed for deleting snat_namespace when the gw_port_host mismatches or none. """ router = l3_test_common.prepare_router_data(num_internal_ports=2) self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) with mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete') as snat_ns_delete: interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri) router['gw_port_host'] = '' ri._snat_redirect_remove = mock.Mock() ri.external_gateway_updated(ex_gw_port, interface_name) if router['gw_port_host'] != ri.host: self.assertFalse(ri._snat_redirect_remove.called) self.assertEqual(1, snat_ns_delete.call_count) @mock.patch.object(namespaces.Namespace, 'delete') def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist( self, mock_ns_del): """Test to check the impact of snat_namespace object. This function specifically checks the impact of the snat namespace object initialization without the actual creation of snat_namespace. When deletes are issued to the snat namespace based on the snat namespace object existence, it should be checking for the valid namespace existence before it tries to delete. """ router = l3_test_common.prepare_router_data(num_internal_ports=2) self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) # Make sure we set a return value to emulate the non existence # of the namespace. self.mock_ip.netns.exists.return_value = False self.assertIsNotNone(ri.snat_namespace) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_removed = mock.Mock() ri.external_gateway_removed(ex_gw_port, interface_name) self.assertFalse(mock_ns_del.called) def _test_ext_gw_updated_dvr_edge_router(self, host_match, snat_hosted_before=True): """Helper to test external gw update for edge router on dvr_snat agent :param host_match: True if new gw host should be the same as agent host :param snat_hosted_before: True if agent has already been hosting snat for the router """ router = l3_test_common.prepare_router_data(num_internal_ports=2) self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) if snat_hosted_before: ri._create_snat_namespace() snat_ns_name = ri.snat_namespace.name interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_added = mock.Mock() router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo') ri.external_gateway_updated(ex_gw_port, interface_name) if not host_match: self.assertFalse(ri._external_gateway_added.called) if snat_hosted_before: # host mismatch means that snat was rescheduled to another # agent, hence need to verify that gw port was unplugged and # snat namespace was deleted self.mock_driver.unplug.assert_called_with( interface_name, namespace=snat_ns_name, prefix=namespaces.EXTERNAL_DEV_PREFIX) else: if not snat_hosted_before: self.assertIsNotNone(ri.snat_namespace) self.assertTrue(ri._external_gateway_added.called) def test_ext_gw_updated_dvr_edge_router(self): self._test_ext_gw_updated_dvr_edge_router(host_match=True) def test_ext_gw_updated_dvr_edge_router_host_mismatch(self): self._test_ext_gw_updated_dvr_edge_router(host_match=False) def test_ext_gw_updated_dvr_edge_router_snat_rescheduled(self): self._test_ext_gw_updated_dvr_edge_router(host_match=True, snat_hosted_before=False) def test_agent_add_external_gateway(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('add', router) def test_agent_add_external_gateway_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('add', router, dual_stack=True) def test_agent_add_external_gateway_dist(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('add', router) def test_agent_add_external_gateway_dist_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('add', router, dual_stack=True) def test_agent_add_external_gateway_no_subnet(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub', router) def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub_v6_gw', router) def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self): router = l3_test_common.prepare_router_data(num_internal_ports=2, v6_ext_gw_with_sub=False) self._test_external_gateway_action('add_no_sub_v6_gw', router, dual_stack=True) def test_agent_remove_external_gateway(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('remove', router) def test_agent_remove_external_gateway_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('remove', router, dual_stack=True) def test_agent_remove_external_gateway_dist(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('remove', router) def test_agent_remove_external_gateway_dist_dual_stack(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('remove', router, dual_stack=True) def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router, random_fully, negate=False): interfaces = router[lib_constants.INTERFACE_KEY] source_cidrs = [] for iface in interfaces: for subnet in iface['subnets']: prefix = subnet['cidr'].split('/')[1] source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'], prefix) source_cidrs.append(source_cidr) source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address'] interface_name = ('qg-%s' % router['gw_port']['id'])[:14] mask_rule = ('-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT ' '-j SNAT --to-source %s' % (lib_constants.ROUTER_MARK_MASK, source_nat_ip)) snat_rule = ('-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip)) if random_fully: mask_rule += ' --random-fully' snat_rule += ' --random-fully' expected_rules = [ '! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' % (interface_name, interface_name), mask_rule, snat_rule] for r in nat_rules: if negate: self.assertNotIn(r.rule, expected_rules) else: self.assertIn(r.rule, expected_rules) expected_rules = [ '-i %s -j MARK --set-xmark 0x2/%s' % (interface_name, lib_constants.ROUTER_MARK_MASK), '-o %s -m connmark --mark 0x0/%s -j CONNMARK ' '--save-mark --nfmask %s --ctmask %s' % (interface_name, l3router.ADDRESS_SCOPE_MARK_MASK, l3router.ADDRESS_SCOPE_MARK_MASK, l3router.ADDRESS_SCOPE_MARK_MASK)] for r in mangle_rules: if negate: self.assertNotIn(r.rule, expected_rules) else: self.assertIn(r.rule, expected_rules) @mock.patch.object(dvr_router_base.LOG, 'error') def test_get_snat_port_for_internal_port(self, log_error): router = l3_test_common.prepare_router_data(num_internal_ports=4) self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) test_port = { 'mac_address': '00:12:23:34:45:56', 'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id( router[lib_constants.INTERFACE_KEY][0]), 'ip_address': '101.12.13.14'}]} internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) # test valid case with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = [test_port] res_port = ri.get_snat_port_for_internal_port(internal_ports[0]) self.assertEqual(test_port, res_port) # test invalid case test_port['fixed_ips'][0]['subnet_id'] = 1234 res_ip = ri.get_snat_port_for_internal_port(internal_ports[0]) self.assertNotEqual(test_port, res_ip) self.assertIsNone(res_ip) self.assertTrue(log_error.called) @mock.patch.object(dvr_router_base.LOG, 'error') def test_get_snat_port_for_internal_port_ipv6_same_port(self, log_error): router = l3_test_common.prepare_router_data( ip_version=lib_constants.IP_VERSION_4, enable_snat=True, num_internal_ports=1) ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent, HOSTNAME, router['id'], router, **self.ri_kwargs) # Add two additional IPv6 prefixes on the same interface l3_test_common.router_append_interface( router, count=2, ip_version=lib_constants.IP_VERSION_6, same_port=True) internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = internal_ports # get the second internal interface in the list res_port = ri.get_snat_port_for_internal_port(internal_ports[1]) self.assertEqual(internal_ports[1], res_port) # tweak the first subnet_id, should still find port based # on second subnet_id test_port = copy.deepcopy(res_port) test_port['fixed_ips'][0]['subnet_id'] = 1234 res_ip = ri.get_snat_port_for_internal_port(test_port) self.assertEqual(internal_ports[1], res_ip) # tweak the second subnet_id, shouldn't match now test_port['fixed_ips'][1]['subnet_id'] = 1234 res_ip = ri.get_snat_port_for_internal_port(test_port) self.assertIsNone(res_ip) self.assertTrue(log_error.called) def test_process_cent_router(self): router = l3_test_common.prepare_router_data() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) self._test_process_router(ri, agent) def test_process_dist_router(self): router = l3_test_common.prepare_router_data() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.snat_iptables_manager = iptables_manager.IptablesManager( namespace=ri.snat_namespace.name, use_ipv6=ri.use_ipv6) subnet_id = l3_test_common.get_subnet_id( router[lib_constants.INTERFACE_KEY][0]) ri.router['distributed'] = True ri.router['_snat_router_interfaces'] = [{ 'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': '1.2.3.4'}]}] ri.router['gw_port_host'] = None self._test_process_router(ri, agent, is_dvr_edge=True) def _test_process_router(self, ri, agent, is_dvr_edge=False): router = ri.router agent.host = HOSTNAME fake_fip_id = 'fake_fip_id' ri.create_dvr_external_gateway_on_agent = mock.Mock() ri.process_floating_ip_addresses = mock.Mock() ri.process_floating_ip_nat_rules = mock.Mock() ri.process_floating_ip_nat_rules_for_centralized_floatingip = ( mock.Mock()) ri.process_floating_ip_addresses.return_value = { fake_fip_id: 'ACTIVE'} ri.external_gateway_added = mock.Mock() ri.external_gateway_updated = mock.Mock() ri.process_address_scope = mock.Mock() fake_floatingips1 = {'floatingips': [ {'id': fake_fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': _uuid(), 'host': HOSTNAME}]} ri.process() ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() if not is_dvr_edge: ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() elif ri.router.get('gw_port_host') == agent.host: ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ assert_called_with() ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ reset_mock() ri.external_gateway_added.reset_mock() # remap floating IP to a new fixed ip fake_floatingips2 = copy.deepcopy(fake_floatingips1) fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8' router[lib_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] ri.process() ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() if not is_dvr_edge: ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() elif ri.router.get('gw_port_host') == agent.host: ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ assert_called_with() ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ reset_mock() self.assertEqual(0, ri.external_gateway_added.call_count) self.assertEqual(0, ri.external_gateway_updated.call_count) ri.external_gateway_added.reset_mock() ri.external_gateway_updated.reset_mock() # change the ex_gw_port a bit to test gateway update new_gw_port = copy.deepcopy(ri.router['gw_port']) ri.router['gw_port'] = new_gw_port old_ip = (netaddr.IPAddress(ri.router['gw_port'] ['fixed_ips'][0]['ip_address'])) ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1) ri.process() ri.process_floating_ip_addresses.reset_mock() ri.process_floating_ip_nat_rules.reset_mock() self.assertEqual(0, ri.external_gateway_added.call_count) self.assertEqual(1, ri.external_gateway_updated.call_count) # remove just the floating ips del router[lib_constants.FLOATINGIP_KEY] ri.process() ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() if not is_dvr_edge: ri.process_floating_ip_nat_rules.assert_called_with() ri.process_floating_ip_nat_rules.reset_mock() elif ri.router.get('gw_port_host') == agent.host: ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ assert_called_with() ri.process_floating_ip_nat_rules_for_centralized_floatingip. \ reset_mock() # now no ports so state is torn down del router[lib_constants.INTERFACE_KEY] del router['gw_port'] ri.process() self.assertEqual(1, self.send_adv_notif.call_count) distributed = ri.router.get('distributed', False) self.assertEqual(distributed, ri.process_floating_ip_addresses.called) self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called) def _test_process_floating_ip_addresses_add(self, ri, agent): floating_ips = ri.get_floating_ips() fip_id = floating_ips[0]['id'] device = self.mock_ip_dev device.addr.list.return_value = [] ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id} ri.add_floating_ip = mock.Mock( return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE) with mock.patch.object(lla.LinkLocalAllocator, '_write'): if ri.router['distributed']: ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id']) ri.create_dvr_external_gateway_on_agent(ex_gw_port) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) ri.add_floating_ip.assert_called_once_with( floating_ips[0], mock.sentinel.interface_name, device) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_if_fipnamespace_exist(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) agent.process_router_add = mock.Mock() ri.fip_ns.create_rtr_2_fip_link = mock.Mock() with mock.patch.object(ri, 'get_floating_ips') as fips, \ mock.patch.object(ri.fip_ns, 'create') as create_fip, \ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertTrue(create_fip.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_connect) # Now let us associate the fip to the router ri.floating_ip_added_dist(fips, "192.168.0.1/32") # Now let us disassociate the fip from the router ri.floating_ip_removed_dist("192.168.0.1/32") # Calling create_dvr_external_gateway_interfaces again to make # sure that the fip namespace create is not called again. # If the create is not called again, that would contain # the duplicate rules configuration in the fip namespace. ri.create_dvr_external_gateway_on_agent(ext_gw_port) self.assertTrue(fip_gw_port.called) create_fip.assert_called_once_with() self.assertEqual(1, ri.fip_ns.create_rtr_2_fip_link.call_count) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_floating_ip_not_configured_if_no_host_or_dest_host(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid()}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) agent.process_router_add = mock.Mock() ri.fip_ns.create_rtr_2_fip_link = mock.Mock() with mock.patch.object(ri, 'get_floating_ips') as fips, \ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port, \ mock.patch.object(ri, '_add_floating_ip_rule') as add_rule, \ mock.patch.object(ri.fip_ns, 'create') as create_fip: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertTrue(create_fip.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_connect) # Now let us associate the fip to the router status = ri.floating_ip_added_dist(fips, "192.168.0.1/32") self.assertIsNone(status) self.assertEqual(0, self.send_adv_notif.call_count) self.assertFalse(add_rule.called) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_floating_ip_centralized(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'dvr_snat_bound': True, 'host': None}, {'id': _uuid(), 'floating_ip_address': '20.0.0.4', 'fixed_ip_address': '192.168.0.2', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'dvr_snat_bound': True, 'host': None}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) agent.process_router_add = mock.Mock() ri.fip_ns.create_rtr_2_fip_link = mock.Mock() with mock.patch.object(ri, 'get_floating_ips') as fips, \ mock.patch.object(ri, 'add_centralized_floatingip') as add_fip, \ mock.patch.object(ri, 'get_centralized_fip_cidr_set' ) as get_fip_cidrs, \ mock.patch.object(ri, 'get_floating_agent_gw_interface' ) as fip_gw_port, \ mock.patch.object(ri.fip_ns, 'create') as create_fip, \ mock.patch.object(ri, 'remove_centralized_floatingip') as rem_fip: fips.return_value = fake_floatingips fip_gw_port.return_value = agent_gateway_port[0] add_fip.return_value = lib_constants.FLOATINGIP_STATUS_ACTIVE ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertTrue(create_fip.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_connect) # Now let us associate the fip to the router status = ri.floating_ip_added_dist(fips, "192.168.0.1/32") add_fip.assert_called_once_with(fips, "192.168.0.1/32") self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, status) # Now let us add the second fip status = ri.floating_ip_added_dist(fips, "192.168.0.2/32") self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, status) device = mock.Mock() get_fip_cidrs.return_value = set( ["192.168.0.2/32", "192.168.0.1/32"]) self.assertEqual(set(["192.168.0.2/32", "192.168.0.1/32"]), ri.get_router_cidrs(device)) ri.floating_ip_removed_dist("192.168.0.1/32") rem_fip.assert_called_once_with("192.168.0.1/32") self.assertTrue(get_fip_cidrs.called) get_fip_cidrs.return_value = set(["192.168.0.2/32"]) self.assertEqual(set(["192.168.0.2/32"]), ri.get_router_cidrs(device)) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write): fake_network_id = _uuid() fake_subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( {'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': fake_subnet_id}], 'subnets': [ {'id': fake_subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = [] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.fip_ns.subscribe = mock.Mock() with mock.patch.object(agent.plugin_rpc, 'get_agent_gateway_port') as fip_gw_port: fip_gw_port.return_value = agent_gateway_port ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertTrue(ri.rtr_fip_connect) self.assertEqual(agent_gateway_port, ri.fip_ns.agent_gateway_port) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces(self, lla_write): fake_network_id = _uuid() subnet_id = _uuid() fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [ {'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': fake_network_id, 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.fip_ns.subscribe = mock.Mock() ri.fip_ns.agent_router_gateway = mock.Mock() agent.process_router_add = mock.Mock() with mock.patch.object( ri, 'get_floating_agent_gw_interface') as fip_gw_port: fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_connect) self.assertTrue(ri.rtr_fip_subnet) @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self, lla_write): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '20.0.0.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} agent_gateway_port = ( [{'fixed_ips': [ {'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': 'subnet_id'}], 'subnets': [ {'id': 'subnet_id', 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': 'fake_network_id', 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ext_gw_port = ri.router.get('gw_port') ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id']) ri.fip_ns.subscribe = mock.Mock(return_value=True) ri.fip_ns.agent_router_gateway = mock.Mock() ri.rtr_fip_subnet = None with mock.patch.object( ri, 'get_floating_agent_gw_interface') as fip_gw_port: fip_gw_port.return_value = agent_gateway_port[0] ri.create_dvr_external_gateway_on_agent(ext_gw_port) ri.connect_rtr_2_fip() self.assertTrue(fip_gw_port.called) self.assertEqual(agent_gateway_port[0], ri.fip_ns.agent_gateway_port) self.assertTrue(ri.rtr_fip_subnet) self.assertTrue(ri.rtr_fip_connect) def test_process_router_cent_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'status': 'DOWN', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ri.get_external_device_name = mock.Mock(return_value='exgw') self._test_process_floating_ip_addresses_add(ri, agent) def _test_process_router_snat_disabled(self, random_fully): iptables_manager.IptablesManager.random_fully = random_fully agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(enable_snat=True) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process() orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:] # Reprocess without NAT router['enable_snat'] = False # Reassign the router object to RouterInfo ri.router = router ri.process() # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in orig_nat_rules if r not in ri.iptables_manager.ipv4['nat'].rules] self.assertEqual(1, len(nat_rules_delta)) mangle_rules_delta = [ r for r in orig_mangle_rules if r not in ri.iptables_manager.ipv4['mangle'].rules] self.assertEqual(1, len(mangle_rules_delta)) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router, random_fully) self.assertEqual(1, self.send_adv_notif.call_count) def test_process_router_snat_disabled_random_fully(self): self._test_process_router_snat_disabled(True) def test_process_router_snat_disabled_random_fully_false(self): self._test_process_router_snat_disabled(False) def _test_process_router_snat_enabled(self, random_fully): iptables_manager.IptablesManager.random_fully = random_fully agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(enable_snat=False) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process without NAT ri.process() orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:] # Reprocess with NAT router['enable_snat'] = True # Reassign the router object to RouterInfo ri.router = router ri.process() # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertEqual(1, len(nat_rules_delta)) mangle_rules_delta = [ r for r in ri.iptables_manager.ipv4['mangle'].rules if r not in orig_mangle_rules] self.assertEqual(1, len(mangle_rules_delta)) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router, random_fully) self.assertEqual(1, self.send_adv_notif.call_count) def test_process_router_snat_enabled_random_fully(self): self._test_process_router_snat_enabled(True) def test_process_router_snat_enabled_random_fully_false(self): self._test_process_router_snat_enabled(False) def _test_update_routing_table(self, is_snat_host=True): router = l3_test_common.prepare_router_data() uuid = router['id'] s_netns = 'snat-' + uuid q_netns = 'qrouter-' + uuid fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '19.4.4.200'} calls = [mock.call('replace', fake_route1, q_netns)] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, uuid, router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri._update_routing_table = mock.Mock() with mock.patch.object(ri, '_is_this_snat_host') as snat_host: snat_host.return_value = is_snat_host ri.update_routing_table('replace', fake_route1) if is_snat_host: ri._update_routing_table('replace', fake_route1, s_netns) calls += [mock.call('replace', fake_route1, s_netns)] ri._update_routing_table.assert_has_calls(calls, any_order=True) def test_process_update_snat_routing_table(self): self._test_update_routing_table() def test_process_not_update_snat_routing_table(self): self._test_update_routing_table(is_snat_host=False) def test_process_router_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process() # Add an interface and reprocess l3_test_common.router_append_interface(router) # Reassign the router object to RouterInfo ri.router = router ri.process() # send_ip_addr_adv_notif is called both times process is called self.assertEqual(2, self.send_adv_notif.call_count) def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data( ip_version=lib_constants.IP_VERSION_6, dual_stack=dual_stack) # Get NAT rules without the gw_port gw_port = router['gw_port'] router['gw_port'] = None ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Get NAT rules with the gw_port router['gw_port'] = gw_port ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) p = ri.external_gateway_nat_fip_rules s = ri.external_gateway_nat_snat_rules attrs_to_mock = dict( (a, mock.DEFAULT) for a in ['external_gateway_nat_fip_rules', 'external_gateway_nat_snat_rules'] ) with mock.patch.multiple(ri, **attrs_to_mock) as mocks: mocks['external_gateway_nat_fip_rules'].side_effect = p mocks['external_gateway_nat_snat_rules'].side_effect = s self._process_router_instance_for_agent(agent, ri, router) new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # NAT rules should only change for dual_stack operation if dual_stack: self.assertTrue( mocks['external_gateway_nat_fip_rules'].called) self.assertTrue( mocks['external_gateway_nat_snat_rules'].called) self.assertNotEqual(orig_nat_rules, new_nat_rules) else: self.assertFalse( mocks['external_gateway_nat_fip_rules'].called) self.assertFalse( mocks['external_gateway_nat_snat_rules'].called) self.assertEqual(orig_nat_rules, new_nat_rules) def test_process_ipv6_only_gw(self): self._test_process_ipv6_only_or_dual_stack_gw() def test_process_dual_stack_gw(self): self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True) def _process_router_ipv6_interface_added( self, router, ra_mode=None, addr_mode=None): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process() orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Add an IPv6 interface and reprocess l3_test_common.router_append_interface( router, count=1, ip_version=lib_constants.IP_VERSION_6, ra_mode=ra_mode, addr_mode=addr_mode) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) # IPv4 NAT rules should not be changed by adding an IPv6 interface nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertFalse(nat_rules_delta) return ri def _radvd_expected_call_external_process(self, ri, enable=True): expected_calls = [mock.call(uuid=ri.router['id'], service='radvd', default_cmd_callback=mock.ANY, namespace=ri.ns_name, conf=mock.ANY, run_as_root=True)] if enable: expected_calls.append(mock.call().enable(reload_cfg=True)) else: expected_calls.append(mock.call().disable()) return expected_calls def _process_router_ipv6_subnet_added(self, router, ipv6_subnet_modes=None, dns_nameservers=None, network_mtu=0): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface with len(ipv6_subnet_modes) subnets # and reprocess l3_test_common.router_append_subnet( router, count=len(ipv6_subnet_modes), ip_version=lib_constants.IP_VERSION_6, ipv6_subnet_modes=ipv6_subnet_modes, dns_nameservers=dns_nameservers, network_mtu=network_mtu) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) return ri def _assert_ri_process_enabled(self, ri): """Verify that process was enabled for a router instance.""" expected_calls = self._radvd_expected_call_external_process(ri) self.assertEqual(expected_calls, self.external_process.mock_calls) def _assert_ri_process_disabled(self, ri): """Verify that process was disabled for a router instance.""" expected_calls = self._radvd_expected_call_external_process(ri, False) self.assertEqual(expected_calls, self.external_process.mock_calls) def test_process_router_ipv6_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added(router) self._assert_ri_process_enabled(ri) # Expect radvd configured without prefix self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1]) def test_process_router_ipv6_slaac_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=lib_constants.IPV6_SLAAC) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] self.assertIn('prefix', radvd_config_str) self.assertIn('AdvAutonomous on', radvd_config_str) def test_process_router_ipv6_dhcpv6_stateful_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=lib_constants.DHCPV6_STATEFUL) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] self.assertIn('prefix', radvd_config_str) self.assertIn('AdvAutonomous off', radvd_config_str) def test_process_router_ipv6_subnets_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[ {'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}, {'ra_mode': lib_constants.DHCPV6_STATELESS, 'address_mode': lib_constants.DHCPV6_STATELESS}, {'ra_mode': lib_constants.DHCPV6_STATEFUL, 'address_mode': lib_constants.DHCPV6_STATEFUL}]) self._assert_ri_process_enabled(ri) radvd_config_str = self.utils_replace_file.call_args[0][1] # Assert we have a prefix from IPV6_SLAAC and a prefix from # DHCPV6_STATELESS on one interface self.assertEqual(3, radvd_config_str.count("prefix")) self.assertEqual(1, radvd_config_str.count("interface")) self.assertEqual(2, radvd_config_str.count("AdvAutonomous on")) self.assertEqual(1, radvd_config_str.count("AdvAutonomous off")) def test_process_router_ipv6_subnets_added_to_existing_port(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add the first subnet on a new interface l3_test_common.router_append_subnet( router, count=1, ip_version=lib_constants.IP_VERSION_6, ipv6_subnet_modes=[ {'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}]) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(1, radvd_config.count("prefix")) self.assertEqual(1, radvd_config.count("interface")) # Reset mocks to verify radvd enabled and configured correctly # after second subnet added to interface self.external_process.reset_mock() self.utils_replace_file.reset_mock() # Add the second subnet on the same interface interface_id = router[lib_constants.INTERFACE_KEY][1]['id'] l3_test_common.router_append_subnet( router, count=1, ip_version=lib_constants.IP_VERSION_6, ipv6_subnet_modes=[ {'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}], interface_id=interface_id) self._process_router_instance_for_agent(agent, ri, router) # radvd should have been enabled again and the interface # should have two prefixes self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(2, len(ri.internal_ports[1]['subnets'])) self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(2, radvd_config.count("prefix")) self.assertEqual(1, radvd_config.count("interface")) def test_process_router_ipv6v4_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process() # Add an IPv4 and IPv6 interface and reprocess l3_test_common.router_append_interface( router, count=1, ip_version=lib_constants.IP_VERSION_4) l3_test_common.router_append_interface( router, count=1, ip_version=lib_constants.IP_VERSION_6) # Reassign the router object to RouterInfo self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) def test_process_router_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # Process with NAT ri.process() # Add an interface and reprocess del router[lib_constants.INTERFACE_KEY][1] # Reassign the router object to RouterInfo ri.router = router ri.process() # send_ip_addr_adv_notif is called both times process is called self.assertEqual(2, self.send_adv_notif.call_count) def test_process_router_ipv6_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface and reprocess l3_test_common.router_append_interface( router, count=1, ip_version=lib_constants.IP_VERSION_6) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) # Reset the calls so we can check for disable radvd self.external_process.reset_mock() self.process_monitor.reset_mock() # Remove the IPv6 interface and reprocess del router[lib_constants.INTERFACE_KEY][1] self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_disabled(ri) def test_process_router_ipv6_subnet_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) agent.external_gateway_added = mock.Mock() self._process_router_instance_for_agent(agent, ri, router) # Add an IPv6 interface with two subnets and reprocess l3_test_common.router_append_subnet( router, count=2, ip_version=lib_constants.IP_VERSION_6, ipv6_subnet_modes=([{'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}] * 2)) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) # Reset mocks to check for modified radvd config self.utils_replace_file.reset_mock() self.external_process.reset_mock() # Remove one subnet from the interface and reprocess interfaces = copy.deepcopy(router[lib_constants.INTERFACE_KEY]) del interfaces[1]['subnets'][0] del interfaces[1]['fixed_ips'][0] router[lib_constants.INTERFACE_KEY] = interfaces self._process_router_instance_for_agent(agent, ri, router) # Assert radvd was enabled again and that we only have one # prefix on the interface self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() self.assertEqual(1, len(ri.internal_ports[1]['subnets'])) self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips'])) self.assertEqual(1, radvd_config.count("interface")) self.assertEqual(1, radvd_config.count("prefix")) def test_process_router_internal_network_added_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() with mock.patch.object( ri, 'internal_network_added') as internal_network_added: # raise RuntimeError to simulate that an unexpected exception # occurs internal_network_added.side_effect = RuntimeError self.assertRaises(RuntimeError, ri.process) self.assertNotIn( router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_network_added.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. ri.process() # We were able to add the port to ri.internal_ports self.assertIn( router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_internal_network_removed_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() # add an internal port ri.process() with mock.patch.object( ri, 'internal_network_removed') as internal_net_removed: # raise RuntimeError to simulate that an unexpected exception # occurs internal_net_removed.side_effect = RuntimeError ri.internal_ports[0]['admin_state_up'] = False # The above port is set to down state, remove it. self.assertRaises(RuntimeError, ri.process) self.assertIn( router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_net_removed.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. ri.process() # We were able to remove the port from ri.internal_ports self.assertNotIn( router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_floatingip_nochange(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '9.9.9.9'}) router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses' ) as mock_update_fip_status,\ mock.patch.object( ri, 'get_centralized_fip_cidr_set') as cent_cidrs,\ mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs: cent_cidrs.return_value = set() mock_get_cidrs.return_value = set( [fip1['floating_ip_address'] + '/32']) ri.process() # make sure only the one that wasn't in existing cidrs was sent mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'}) @mock.patch.object(l3_agent.LOG, 'exception') def _retrigger_initialize(self, log_exception, delete_fail=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid(), 'external_gateway_info': {'network_id': 'aaa'}} self.plugin_api.get_routers.return_value = [router] update = resource_processing_queue.ResourceUpdate( router['id'], l3_agent.PRIORITY_SYNC_ROUTERS_TASK, resource=router, timestamp=timeutils.utcnow()) agent._queue.add(update) ri = legacy_router.LegacyRouter(agent, router['id'], router, **self.ri_kwargs) calls = [mock.call('Error while initializing router %s', router['id'])] if delete_fail: # if delete fails, then also retrigger initialize ri.delete = mock.Mock(side_effect=RuntimeError()) calls.append( mock.call('Error while deleting router %s', router['id'])) else: ri.delete = mock.Mock() calls.append( mock.call('Failed to process compatible router: %s' % router['id'])) ri.process = mock.Mock() ri.initialize = mock.Mock(side_effect=RuntimeError()) agent._create_router = mock.Mock(return_value=ri) agent._fetch_external_net_id = mock.Mock( return_value=router['external_gateway_info']['network_id']) agent._process_router_update() log_exception.assert_has_calls(calls) ri.initialize.side_effect = None agent._process_router_update() self.assertTrue(ri.delete.called) self.assertEqual(2, ri.initialize.call_count) self.assertEqual(2, agent._create_router.call_count) self.assertEqual(1, ri.process.call_count) self.assertIn(ri.router_id, agent.router_info) def test_initialize_fail_retrigger_initialize(self): self._retrigger_initialize() def test_initialize_and_delete_fail_retrigger_initialize(self): self._retrigger_initialize(delete_fail=True) def test_process_router_floatingip_status_update_if_processed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', }) router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() ri.iptables_manager.ipv4['nat'] = mock.MagicMock() with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses' ) as mock_update_fip_status,\ mock.patch.object( ri, 'get_centralized_fip_cidr_set') as cent_cidrs,\ mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs: mock_get_cidrs.return_value = set() cent_cidrs.return_value = set() ri.process() # make sure both was sent since not existed in existing cidrs mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE', fip2['id']: 'ACTIVE'}) def test_process_router_floatingip_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object(agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'DOWN', 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = legacy_router.LegacyRouter(agent, router['id'], router, **self.ri_kwargs) ri.external_gateway_added = mock.Mock() ri.process() # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}) mock_update_fip_status.reset_mock() # Process the router again, this time without floating IPs router[lib_constants.FLOATINGIP_KEY] = [] ri.router = router ri.process() # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: lib_constants.FLOATINGIP_STATUS_DOWN}) def test_process_router_floatingip_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object(agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.process_floating_ip_addresses = mock.Mock( side_effect=RuntimeError) ri.external_gateway_added = mock.Mock() ri.process() # Assess the call for putting the floating IP into Error # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}) def test_process_external_iptables_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object(agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.iptables_manager._apply = mock.Mock(side_effect=Exception) ri.process_external() # Assess the call for putting the floating IP into Error # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}) self.assertEqual(1, ri.iptables_manager._apply.call_count) def test_handle_router_snat_rules_distributed_without_snat_manager(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, 'foo_router_id', {}) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.iptables_manager = mock.MagicMock() ri._is_this_snat_host = mock.Mock(return_value=True) ri.get_ex_gw_port = mock.Mock(return_value=None) ri._handle_router_snat_rules(None, mock.ANY) self.assertIsNone(ri.snat_iptables_manager) self.assertFalse(ri.iptables_manager.called) def test_handle_router_snat_rules_add_back_jump(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs) ri.iptables_manager = mock.MagicMock() port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri._handle_router_snat_rules(port, "iface") nat = ri.iptables_manager.ipv4['nat'] nat.empty_chain.assert_any_call('snat') nat.add_rule.assert_any_call('snat', '-j $float-snat') for call in nat.mock_calls: name, args, kwargs = call if name == 'add_rule': self.assertEqual(('snat', '-j $float-snat'), args) self.assertEqual({}, kwargs) break def test_handle_router_snat_rules_add_rules(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs) ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri.router = {'distributed': False} ri._handle_router_snat_rules(ex_gw_port, "iface") nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules)) wrap_name = ri.iptables_manager.wrap_name jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name, wrap_name) snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s " "--random-fully") % ( wrap_name, ex_gw_port['fixed_ips'][0]['ip_address']) snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s " "-m conntrack --ctstate DNAT " "-j SNAT --to-source %s --random-fully") % ( wrap_name, lib_constants.ROUTER_MARK_MASK, ex_gw_port['fixed_ips'][0]['ip_address']) self.assertIn(jump_float_rule, nat_rules) self.assertIn(snat_rule1, nat_rules) self.assertIn(snat_rule2, nat_rules) self.assertThat(nat_rules.index(jump_float_rule), matchers.LessThan(nat_rules.index(snat_rule1))) mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules)) mangle_rule = ("-A %s-mark -i iface " "-j MARK --set-xmark 0x2/%s" % (wrap_name, lib_constants.ROUTER_MARK_MASK)) self.assertIn(mangle_rule, mangle_rules) def test_process_router_delete_stale_internal_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'), l3_test_common.FakeDev('qr-b2c3d4e5-f6')] stale_devnames = [dev.name for dev in stale_devlist] get_devices_return = [] get_devices_return.extend(stale_devlist) self.mock_ip.get_devices.return_value = get_devices_return router = l3_test_common.prepare_router_data(enable_snat=True, num_internal_ports=1) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) self.assertEqual(1, len(internal_ports)) internal_port = internal_ports[0] with mock.patch.object(ri, 'internal_network_removed' ) as internal_network_removed,\ mock.patch.object(ri, 'internal_network_added' ) as internal_network_added,\ mock.patch.object(ri, 'external_gateway_removed' ) as external_gateway_removed,\ mock.patch.object(ri, 'external_gateway_added' ) as external_gateway_added: ri.process() self.assertEqual(1, external_gateway_added.call_count) self.assertFalse(external_gateway_removed.called) self.assertFalse(internal_network_removed.called) internal_network_added.assert_called_once_with(internal_port) self.assertEqual(len(stale_devnames), self.mock_driver.unplug.call_count) calls = [mock.call(stale_devname, namespace=ri.ns_name, prefix=namespaces.INTERNAL_DEV_PREFIX) for stale_devname in stale_devnames] self.mock_driver.unplug.assert_has_calls(calls, any_order=True) def test_process_router_delete_stale_external_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')] stale_devnames = [dev.name for dev in stale_devlist] router = l3_test_common.prepare_router_data(enable_snat=True, num_internal_ports=1) del router['gw_port'] ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) self.mock_ip.get_devices.return_value = stale_devlist ri.process() self.mock_driver.unplug.assert_called_with( stale_devnames[0], namespace=ri.ns_name, prefix=namespaces.EXTERNAL_DEV_PREFIX) def test_process_dvr_router_delete_stale_external_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')] stale_devnames = [dev.name for dev in stale_devlist] router = l3_test_common.prepare_router_data(enable_snat=True, num_internal_ports=1) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.snat_iptables_manager = iptables_manager.IptablesManager( namespace=ri.snat_namespace.name, use_ipv6=ri.use_ipv6) self.mock_ip.get_devices.return_value = stale_devlist ri.process() self.mock_driver.unplug.assert_called_with( stale_devnames[0], namespace=ri.snat_namespace.name, prefix=namespaces.EXTERNAL_DEV_PREFIX) def test_process_dvr_router_delete_stale_external_devices_no_snat_ns(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(enable_gw=False, num_internal_ports=1) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) self.mock_ip.netns.exists.return_value = False ri._delete_stale_external_devices('qg-a1b2c3d4-e5') self.assertFalse(self.mock_ip.get_devices.called) def test_router_deleted(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_deleted(None, FAKE_ID) self.assertEqual(1, agent._queue.add.call_count) def test_routers_updated(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.routers_updated(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_removed_from_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_removed_from_agent(None, {'router_id': FAKE_ID}) self.assertEqual(1, agent._queue.add.call_count) def test_added_to_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_added_to_agent(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_network_update_not_called(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() network = {'id': _uuid()} agent.network_update(None, network=network) self.assertFalse(agent._queue.add.called) def test_network_update(self): router = l3_test_common.prepare_router_data(num_internal_ports=2) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._process_added_router(router) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) network_id = internal_ports[0]['network_id'] agent._queue = mock.Mock() network = {'id': network_id} agent.network_update(None, network=network) self.assertEqual(1, agent._queue.add.call_count) def test_create_router_namespace(self): self.mock_ip.ensure_namespace.return_value = self.mock_ip agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = namespaces.Namespace( 'qrouter-bar', self.conf, agent.driver, agent.use_ipv6) ns.create() calls = [mock.call(['sysctl', '-w', 'net.ipv4.ip_forward=1']), mock.call(['sysctl', '-w', 'net.ipv4.conf.all.arp_ignore=1']), mock.call( ['sysctl', '-w', 'net.ipv4.conf.all.arp_announce=2'])] if agent.use_ipv6: calls.append(mock.call( ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1'])) self.mock_ip.netns.execute.assert_has_calls(calls) def test_destroy_namespace(self): namespace = 'qrouter-bar' self.list_network_namespaces.return_value = [namespace] self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev('qr-aaaa'), l3_test_common.FakeDev('rfp-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = namespaces.RouterNamespace( 'bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() self.mock_driver.unplug.assert_called_once_with('qr-aaaa', prefix='qr-', namespace='qrouter' '-bar') self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa') def test_destroy_router_namespace(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = namespaces.Namespace( 'qrouter-bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar") def test_destroy_snat_namespace(self): namespace = 'snat-bar' self.list_network_namespaces.return_value = [namespace] self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev('qg-aaaa'), l3_test_common.FakeDev('sg-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ns = dvr_snat_ns.SnatNamespace( 'bar', self.conf, agent.driver, agent.use_ipv6) ns.create() ns.delete() calls = [mock.call('qg-aaaa', namespace=namespace, prefix=namespaces.EXTERNAL_DEV_PREFIX), mock.call('sg-aaaa', namespace=namespace, prefix=lib_constants.SNAT_INT_DEV_PREFIX)] self.mock_driver.unplug.assert_has_calls(calls, any_order=True) def _configure_metadata_proxy(self, enableflag=True): if not enableflag: self.conf.set_override('enable_metadata_proxy', False) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router = {'id': router_id, 'external_gateway_info': {}, 'routes': [], 'distributed': False} driver = metadata_driver.MetadataDriver with mock.patch.object( driver, 'destroy_monitored_metadata_proxy') as destroy_proxy: with mock.patch.object( driver, 'spawn_monitored_metadata_proxy') as spawn_proxy: agent._process_added_router(router) if enableflag: spawn_proxy.assert_called_with( mock.ANY, mock.ANY, self.conf.metadata_port, mock.ANY, router_id=router_id ) else: self.assertFalse(spawn_proxy.call_count) agent._safe_router_removed(router_id) if enableflag: destroy_proxy.assert_called_with(mock.ANY, router_id, mock.ANY, 'qrouter-' + router_id) else: self.assertFalse(destroy_proxy.call_count) def test_enable_metadata_proxy(self): self._configure_metadata_proxy() def test_disable_metadata_proxy_spawn(self): self._configure_metadata_proxy(enableflag=False) def _test_process_routers_update_rpc_timeout(self, ext_net_call=False, ext_net_call_failed=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.fullsync = False agent._process_router_if_compatible = mock.Mock() router_id = _uuid() router = {'id': router_id, 'external_gateway_info': {'network_id': 'aaa'}} self.plugin_api.get_routers.return_value = [router] if ext_net_call_failed: agent._process_router_if_compatible.side_effect = ( oslo_messaging.MessagingTimeout) agent._queue = mock.Mock() agent._resync_router = mock.Mock() update = mock.Mock() update.id = router_id update.resource = None agent._queue.each_update_to_next_resource.side_effect = [ [(None, update)]] agent._process_router_update() self.assertFalse(agent.fullsync) self.assertEqual(ext_net_call, agent._process_router_if_compatible.called) agent._resync_router.assert_called_with(update) def test_process_routers_update_rpc_timeout_on_get_routers(self): self.plugin_api.get_routers.side_effect = ( oslo_messaging.MessagingTimeout) self._test_process_routers_update_rpc_timeout() def test_process_routers_update_resyncs_failed_router(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router = {'id': router_id} # Attempting to configure the router will fail agent._process_router_if_compatible = mock.MagicMock() agent._process_router_if_compatible.side_effect = RuntimeError() # Queue an update from a full sync update = resource_processing_queue.ResourceUpdate( router_id, l3_agent.PRIORITY_SYNC_ROUTERS_TASK, resource=router, timestamp=timeutils.utcnow()) agent._queue.add(update) agent._process_router_update() # The update contained the router object, get_routers won't be called self.assertFalse(agent.plugin_rpc.get_routers.called) # The update failed, assert that get_routers was called agent._process_router_update() self.assertTrue(agent.plugin_rpc.get_routers.called) def test_process_routers_update_rpc_timeout_on_get_ext_net(self): self._test_process_routers_update_rpc_timeout(ext_net_call=True, ext_net_call_failed=True) @mock.patch.object(pd, 'remove_router') def _test_process_routers_update_router_deleted(self, remove_router, error=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() update = mock.Mock() update.resource = None update.action = 1 # ROUTER_DELETED router_info = mock.MagicMock() agent.router_info[update.id] = router_info router_processor = mock.Mock() agent._queue.each_update_to_next_resource.side_effect = [ [(router_processor, update)]] agent._resync_router = mock.Mock() if error: agent._safe_router_removed = mock.Mock() agent._safe_router_removed.return_value = False agent._process_router_update() if error: self.assertFalse(router_processor.fetched_and_processed.called) agent._resync_router.assert_called_with(update) self.assertFalse(remove_router.called) else: router_info.delete.assert_called_once_with() self.assertFalse(agent.router_info) self.assertFalse(agent._resync_router.called) router_processor.fetched_and_processed.assert_called_once_with( update.timestamp) self.assertTrue(remove_router.called) def test_process_routers_update_router_deleted_success(self): self._test_process_routers_update_router_deleted() def test_process_routers_update_router_deleted_error(self): self._test_process_routers_update_router_deleted(error=True) def test_process_routers_if_compatible(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid()} related_router = {'id': _uuid()} routers = [router, related_router] self.plugin_api.get_routers.return_value = routers update = resource_processing_queue.ResourceUpdate( router['id'], l3_agent.PRIORITY_RPC, resource=router) events_queue = [] def add_mock(update): events_queue.append(update) agent._queue = mock.Mock() agent._queue.add.side_effect = add_mock with mock.patch.object( agent, "_process_router_if_compatible" ) as process_router_if_compatible, mock.patch.object( agent, "_safe_router_removed" ) as safe_router_removed: self.assertTrue( agent._process_routers_if_compatible(routers, update)) process_router_if_compatible.assert_called_once_with( router) safe_router_removed.assert_not_called() self.assertEqual(1, len(events_queue)) self.assertEqual(related_router['id'], events_queue[0].id) self.assertEqual(l3_agent.PRIORITY_RELATED_ROUTER, events_queue[0].priority) def test_process_dvr_routers_ha_on_update_when_router_unbound(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = mock.Mock() router.id = '1234' router.distributed = True router.ha = True router_info = mock.MagicMock() agent.router_info[router.id] = router_info updated_router = {'id': '1234', 'distributed': True, 'ha': True, 'external_gateway_info': {}, 'routes': [], 'admin_state_up': True} self.plugin_api.get_routers.return_value = [updated_router] update = resource_processing_queue.ResourceUpdate( updated_router['id'], l3_agent.PRIORITY_RPC, resource=updated_router) with mock.patch.object(agent, "_safe_router_removed" ) as router_remove,\ mock.patch.object(agent, "_process_added_router" ) as add_router: agent._process_routers_if_compatible([updated_router], update) router_remove.assert_called_once_with(updated_router['id']) add_router.assert_called_once_with(updated_router) def test_process_dvr_routers_ha_on_update_without_ha_interface(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = mock.Mock() router.id = '1234' router.distributed = True router._ha_interface = True router.ha = True router_info = mock.MagicMock() agent.router_info[router.id] = router_info updated_router = {'id': '1234', 'distributed': True, 'ha': True, 'external_gateway_info': {}, 'routes': [], 'admin_state_up': True} self.plugin_api.get_routers.return_value = [updated_router] update = resource_processing_queue.ResourceUpdate( updated_router['id'], l3_agent.PRIORITY_RPC, resource=updated_router) with mock.patch.object(agent, "_safe_router_removed" ) as router_remove,\ mock.patch.object(agent, "_process_added_router" ) as add_router: agent._process_routers_if_compatible([updated_router], update) router_remove.assert_called_once_with(updated_router['id']) add_router.assert_called_once_with(updated_router) def test_process_routers_if_compatible_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid()} self.plugin_api.get_routers.return_value = [router] update = resource_processing_queue.ResourceUpdate( router['id'], l3_agent.PRIORITY_RPC, resource=router) with mock.patch.object( agent, "_process_router_if_compatible", side_effect=Exception( "Test failure during _process_routers_if_compatible") ) as process_router_if_compatible, mock.patch.object( agent, "_safe_router_removed" ) as safe_router_removed: self.assertFalse( agent._process_routers_if_compatible([router], update)) process_router_if_compatible.assert_called_once_with( router) safe_router_removed.assert_not_called() def test_process_ha_dvr_router_if_compatible_no_ha_interface(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = {'id': _uuid(), 'distributed': True, 'ha': True, 'external_gateway_info': {}, 'routes': [], 'admin_state_up': True} with mock.patch.object(agent, 'check_ha_state_for_router') as chsfr: agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.assertFalse(chsfr.called) def test_process_router_if_compatible(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) def test_nonexistent_interface_driver(self): self.conf.set_override('interface_driver', None) self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) self.conf.set_override('interface_driver', 'wrong.driver') self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) @mock.patch.object(namespaces.RouterNamespace, 'delete') @mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete') def _cleanup_namespace_test(self, stale_namespace_list, router_list, other_namespaces, mock_snat_ns, mock_router_ns): good_namespace_list = [namespaces.NS_PREFIX + r['id'] for r in router_list] good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id'] for r in router_list] self.list_network_namespaces.return_value = (stale_namespace_list + good_namespace_list + other_namespaces) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertTrue(agent.namespaces_manager._clean_stale) pm = self.external_process.return_value pm.reset_mock() with agent.namespaces_manager as ns_manager: for r in router_list: ns_manager.keep_router(r['id']) qrouters = [n for n in stale_namespace_list if n.startswith(namespaces.NS_PREFIX)] self.assertEqual(len(qrouters), mock_router_ns.call_count) self.assertEqual( len(stale_namespace_list) - len(qrouters), mock_snat_ns.call_count) self.assertFalse(agent.namespaces_manager._clean_stale) def test_cleanup_namespace(self): stale_namespaces = [namespaces.NS_PREFIX + 'foo', namespaces.NS_PREFIX + 'bar', dvr_snat_ns.SNAT_NS_PREFIX + 'foo'] other_namespaces = ['unknown'] self._cleanup_namespace_test(stale_namespaces, [], other_namespaces) def test_cleanup_namespace_with_registered_router_ids(self): stale_namespaces = [namespaces.NS_PREFIX + 'cccc', namespaces.NS_PREFIX + 'eeeee', dvr_snat_ns.SNAT_NS_PREFIX + 'fffff'] router_list = [{'id': 'foo', 'distributed': False}, {'id': 'aaaa', 'distributed': False}] other_namespaces = ['qdhcp-aabbcc', 'unknown'] self._cleanup_namespace_test(stale_namespaces, router_list, other_namespaces) def test_create_dvr_gateway(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data() self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) port_id = _uuid() subnet_id = _uuid() dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': port_id, 'network_id': _uuid(), 'mtu': 1500, 'mac_address': 'ca:fe:de:ad:be:ef'} interface_name = ri._get_snat_int_device_name(port_id) self.device_exists.return_value = False with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = self.snat_ports ri._create_dvr_gateway(dvr_gw_port, interface_name) # check 2 internal ports are plugged # check 1 ext-gw-port is plugged self.assertEqual(3, self.mock_driver.plug.call_count) self.assertEqual(3, self.mock_driver.init_router_port.call_count) def test_process_address_scope(self): router = l3_test_common.prepare_router_data() router['distributed'] = True router['gw_port_host'] = HOSTNAME agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.get_ex_gw_port = mock.Mock(return_value=None) # Make sure the code doesn't crash if ri.snat_iptables_manager is None. ri.process_address_scope() with mock.patch.object(ri, '_add_address_scope_mark') as mocked_func: ri.snat_iptables_manager = iptables_manager.IptablesManager( namespace=mock.ANY, use_ipv6=False) ri.snat_iptables_manager.defer_apply_off = mock.Mock() ri.process_address_scope() self.assertEqual(2, mocked_func.call_count) def test_get_host_ha_router_count(self): self.plugin_api.get_host_ha_router_count.return_value = 1 agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(1, agent.ha_router_count) self.assertTrue(self.plugin_api.get_host_ha_router_count.called) def test_get_host_ha_router_count_retried(self): raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout the first 2 times it calls # get_host_ha_router_count then return 0 self.plugin_api.get_host_ha_router_count.side_effect = ( raise_timeout, 0 ) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(0, agent.ha_router_count) def test_external_gateway_removed_ext_gw_port_no_fip_ns(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = l3_test_common.prepare_router_data(num_internal_ports=2) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() external_net_id = router['gw_port']['network_id'] self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.remove_floating_ip = mock.Mock() agent._fetch_external_net_id = mock.Mock(return_value=external_net_id) ri.ex_gw_port = ri.router['gw_port'] del ri.router['gw_port'] ri.external_gateway_added( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) ri.fip_ns = None nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag = mock.Mock() nat.add_rule = mock.Mock() ri.snat_namespace = mock.Mock() ri.external_gateway_removed( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) self.assertFalse(ri.remove_floating_ip.called) @mock.patch.object(os, 'geteuid', return_value=mock.ANY) @mock.patch.object(pwd, 'getpwuid') def test_spawn_radvd(self, mock_getpwuid, *args): router = l3_test_common.prepare_router_data( ip_version=lib_constants.IP_VERSION_6) conffile = '/fake/radvd.conf' pidfile = '/fake/radvd.pid' agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) # we don't want the whole process manager to be mocked to be # able to catch execute() calls self.external_process_p.stop() self.ip_cls_p.stop() get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name' get_pid_file_name = ('neutron.agent.linux.external_process.' 'ProcessManager.get_pid_file_name') utils_execute = 'neutron.agent.common.utils.execute' mock.patch(get_conf_file_name).start().return_value = conffile mock.patch(get_pid_file_name).start().return_value = pidfile execute = mock.patch(utils_execute).start() radvd = ra.DaemonMonitor( router['id'], namespaces.RouterNamespace._get_ns_name(router['id']), agent.process_monitor, l3_test_common.FakeDev, self.conf) test_users = [('', 'stack', '-u stack'), ('neutron', mock.ANY, '-u neutron'), ('root', mock.ANY, None)] for radvd_user, os_user, to_check in test_users: self.conf.set_override('radvd_user', radvd_user) mock_getpwuid.return_value = FakeUser(os_user) radvd.enable(router['_interfaces']) cmd = execute.call_args[0][0] _join = lambda *args: ' '.join(args) cmd = _join(*cmd) self.assertIn('radvd', cmd) self.assertIn(_join('-C', conffile), cmd) self.assertIn(_join('-p', pidfile), cmd) self.assertIn(_join('-m', 'syslog'), cmd) if to_check: self.assertIn(to_check, cmd) else: self.assertNotIn('-u', cmd) def test_generate_radvd_mtu_conf(self): router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}] network_mtu = '1446' ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, None, network_mtu) # Verify that MTU is advertised expected = "AdvLinkMTU 1446" ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def test_generate_radvd_conf_other_and_managed_flag(self): # expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...} expected = {lib_constants.IPV6_SLAAC: (False, False), lib_constants.DHCPV6_STATELESS: (True, False), lib_constants.DHCPV6_STATEFUL: (False, True)} modes = [lib_constants.IPV6_SLAAC, lib_constants.DHCPV6_STATELESS, lib_constants.DHCPV6_STATEFUL] mode_combos = list(iter_chain(*[[list(combo) for combo in iter_combinations(modes, i)] for i in range(1, len(modes) + 1)])) for mode_list in mode_combos: ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode} for mode in mode_list] router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) def assertFlag(flag): return (self.assertIn if flag else self.assertNotIn) other_flag, managed_flag = ( any(expected[mode][0] for mode in mode_list), any(expected[mode][1] for mode in mode_list)) assertFlag(other_flag)('AdvOtherConfigFlag on;', self.utils_replace_file.call_args[0][1]) assertFlag(managed_flag)('AdvManagedFlag on;', self.utils_replace_file.call_args[0][1]) def test_generate_radvd_intervals(self): self.conf.set_override('min_rtr_adv_interval', 22) self.conf.set_override('max_rtr_adv_interval', 66) router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) self.assertIn("MinRtrAdvInterval 22", self.utils_replace_file.call_args[0][1]) self.assertIn("MaxRtrAdvInterval 66", self.utils_replace_file.call_args[0][1]) def test_generate_radvd_rdnss_conf(self): router = l3_test_common.prepare_router_data() ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, 'address_mode': lib_constants.IPV6_SLAAC}] dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400'] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, dns_nameservers=dns_list) ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) # Verify that radvd configuration file includes RDNSS entries expected = "RDNSS " for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]: expected += "%s " % dns self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def _pd_expected_call_external_process(self, requestor, ri, enable=True, ha=False): expected_calls = [] if enable: expected_calls.append(mock.call(uuid=requestor, service='dibbler', default_cmd_callback=mock.ANY, namespace=ri.ns_name, conf=mock.ANY, pid_file=mock.ANY)) expected_calls.append(mock.call().enable(reload_cfg=False)) else: expected_calls.append(mock.call(uuid=requestor, service='dibbler', namespace=ri.ns_name, conf=mock.ANY, pid_file=mock.ANY)) # in the HA switchover case, disable is called without arguments if ha: expected_calls.append(mock.call().disable()) else: expected_calls.append(mock.call().disable( get_stop_command=mock.ANY)) return expected_calls def _pd_setup_agent_router(self, enable_ha=False): router = l3_test_common.prepare_router_data() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._router_added(router['id'], router) # Make sure radvd monitor is created ri = agent.router_info[router['id']] ri.iptables_manager.ipv6['mangle'] = mock.MagicMock() ri._process_pd_iptables_rules = mock.MagicMock() if not ri.radvd: ri.radvd = ra.DaemonMonitor(router['id'], ri.ns_name, agent.process_monitor, ri.get_internal_device_name, self.conf) if enable_ha: agent.pd.routers[router['id']]['master'] = False return agent, router, ri def _pd_remove_gw_interface(self, intfs, agent, ri): expected_pd_update = {} expected_calls = [] for intf in intfs: requestor_id = self._pd_get_requestor_id(intf, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False)) for subnet in intf['subnets']: expected_pd_update[subnet['id']] = ( lib_constants.PROVISIONAL_IPV6_PD_PREFIX) # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = {} def pd_notifier(context, prefix_update): self.pd_update = prefix_update for subnet_id, prefix in prefix_update.items(): for intf in intfs: for subnet in intf['subnets']: if subnet['id'] == subnet_id: # Update the prefix subnet['cidr'] = prefix break # Remove the gateway interface agent.pd.notifier = pd_notifier agent.pd.remove_gw_interface(ri.router['id']) self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls):]) self.assertEqual(expected_pd_update, self.pd_update) def _pd_remove_interfaces(self, intfs, agent, ri): expected_pd_update = [] expected_calls = [] for intf in intfs: # Remove the router interface ri.router[lib_constants.INTERFACE_KEY].remove(intf) requestor_id = self._pd_get_requestor_id(intf, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False)) for subnet in intf['subnets']: expected_pd_update += [{subnet['id']: lib_constants.PROVISIONAL_IPV6_PD_PREFIX}] # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = [] def pd_notifier(context, prefix_update): self.pd_update.append(prefix_update) for intf in intfs: for subnet in intf['subnets']: if subnet['id'] in prefix_update: # Update the prefix subnet['cidr'] = prefix_update[subnet['id']] # Process the router for removed interfaces agent.pd.notifier = pd_notifier ri.process() # The number of external process calls takes radvd into account. # This is because there is no ipv6 interface any more after removing # the interfaces, and radvd will be killed because of that self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls) - 2:]) self._pd_assert_radvd_calls(ri, False) self.assertEqual(expected_pd_update, self.pd_update) def _pd_get_requestor_id(self, intf, ri): ifname = ri.get_internal_device_name(intf['id']) for subnet in intf['subnets']: return dibbler.PDDibbler(ri.router['id'], subnet['id'], ifname).requestor_id def _pd_assert_dibbler_calls(self, expected, actual): '''Check the external process calls for dibbler are expected in the case of multiple pd-enabled router ports, the exact sequence of these calls are not deterministic. It's known, though, that each external_process call is followed with either an enable() or disable() ''' num_ext_calls = len(expected) // 2 expected_ext_calls = [] actual_ext_calls = [] expected_action_calls = [] actual_action_calls = [] for c in range(num_ext_calls): expected_ext_calls.append(expected[c * 2]) actual_ext_calls.append(actual[c * 2]) expected_action_calls.append(expected[c * 2 + 1]) actual_action_calls.append(actual[c * 2 + 1]) self.assertEqual(expected_action_calls, actual_action_calls) for exp in expected_ext_calls: for act in actual_ext_calls: if exp == act: break else: msg = "Unexpected dibbler external process call." self.fail(msg) def _pd_assert_radvd_calls(self, ri, enable=True): exp_calls = self._radvd_expected_call_external_process(ri, enable) self.assertEqual(exp_calls, self.external_process.mock_calls[-len(exp_calls):]) def _pd_assert_update_subnet_calls(self, router_id, intfs, mock_pd_update_subnet): for intf in intfs: mock_pd_update_subnet.assert_any_call(router_id, intf['subnets'][0]['id'], intf['subnets'][0]['cidr']) def _pd_get_prefixes(self, agent, ri, existing_intfs, new_intfs, mock_get_prefix): # First generate the prefixes that will be used for each interface prefixes = {} expected_pd_update = {} expected_calls = [] last_prefix = '' for ifno, intf in enumerate(existing_intfs + new_intfs): requestor_id = self._pd_get_requestor_id(intf, ri) prefixes[requestor_id] = "2001:db8:%d::/64" % ifno last_prefix = prefixes[requestor_id] if intf in new_intfs: subnet_id = (intf['subnets'][0]['id'] if intf['subnets'] else None) expected_pd_update[subnet_id] = prefixes[requestor_id] expected_calls += ( self._pd_expected_call_external_process(requestor_id, ri)) # Implement the prefix update notifier # Keep track of the updated prefix self.pd_update = {} def pd_notifier(context, prefix_update): self.pd_update = prefix_update for subnet_id, prefix in prefix_update.items(): gateway_ip = '%s1' % netaddr.IPNetwork(prefix).network for intf in new_intfs: for fip in intf['fixed_ips']: if fip['subnet_id'] == subnet_id: fip['ip_address'] = gateway_ip for subnet in intf['subnets']: if subnet['id'] == subnet_id: # Update the prefix subnet['cidr'] = prefix subnet['gateway_ip'] = gateway_ip break # Start the dibbler client agent.pd.notifier = pd_notifier agent.pd.process_prefix_update() # Get the prefix and check that the neutron server is notified def get_prefix(pdo): key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname) return prefixes[key] mock_get_prefix.side_effect = get_prefix agent.pd.process_prefix_update() # Make sure that the updated prefixes are expected self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls):]) self.assertEqual(expected_pd_update, self.pd_update) return last_prefix def _pd_verify_update_results(self, ri, pd_intfs, mock_pd_update_subnet): # verify router port initialized for intf in pd_intfs: self.mock_driver.init_router_port.assert_any_call( ri.get_internal_device_name(intf['id']), ip_cidrs=l3router.common_utils.fixed_ip_cidrs( intf['fixed_ips']), namespace=ri.ns_name) # verify that subnet is updated in PD self._pd_assert_update_subnet_calls(ri.router['id'], pd_intfs, mock_pd_update_subnet) # Check that radvd is started self._pd_assert_radvd_calls(ri) def _pd_add_gw_interface(self, agent, ri): gw_ifname = ri.get_external_device_name(ri.router['gw_port']['id']) agent.pd.add_gw_interface(ri.router['id'], gw_ifname) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_have_subnet(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Add one pd-enabled subnet that has already been assigned ''' prefix = '2001:db8:10::/64' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router, prefix=prefix) ri.process() pd_intfs = l3_test_common.get_assigned_pd_interfaces(router) subnet_id = pd_intfs[0]['subnets'][0]['id'] # Check that _process_pd_iptables_rules() is called correctly self.assertEqual({subnet_id: prefix}, ri.pd_subnets) ri._process_pd_iptables_rules.assert_called_once_with(prefix, subnet_id) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Add and remove one pd-enabled subnet Remove the interface by deleting it from the router ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router) ri.process() # Provisional PD prefix on startup, so nothing cached self.assertEqual({}, ri.pd_subnets) # No client should be started since there is no gateway port self.assertFalse(self.external_process.call_count) self.assertFalse(mock_get_prefix.call_count) # Add the gateway interface self._pd_add_gw_interface(agent, ri) update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) subnet_id = pd_intfs[0]['subnets'][0]['id'] # Get one prefix prefix = self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) # Check that _process_pd_iptables_rules() is called correctly self.assertEqual({subnet_id: prefix}, ri.pd_subnets) ri._process_pd_iptables_rules.assert_called_once_with(prefix, subnet_id) # Now remove the interface self._pd_remove_interfaces(pd_intfs, agent, ri) self.assertEqual({}, ri.pd_subnets) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Add one pd-enabled subnet and remove the gateway port Remove the gateway port and check the prefix is removed ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router) ri.process() # Add the gateway interface self._pd_add_gw_interface(agent, ri) update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) # Get one prefix self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) # Now remove the gw interface self._pd_remove_gw_interface(pd_intfs, agent, ri) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Add and remove two pd-enabled subnets Remove the interfaces by deleting them from the router ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Create 2 pd-enabled subnets and add router interfaces l3_test_common.router_append_pd_enabled_subnet(router, count=2) ri.process() # No client should be started self.assertFalse(self.external_process.call_count) self.assertFalse(mock_get_prefix.call_count) # Add the gateway interface self._pd_add_gw_interface(agent, ri) update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) # Get prefixes self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) # Now remove the interface self._pd_remove_interfaces(pd_intfs, agent, ri) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Add one pd-enabled subnet, followed by adding another one Remove the gateway port and check the prefix is removed ''' # Initial setup agent, router, ri = self._pd_setup_agent_router() # Add the gateway interface self._pd_add_gw_interface(agent, ri) # Create 1 pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router, count=1) ri.process() update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) # Get prefixes self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) # Now add another interface # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(update_router, count=1) ri.process() update_router_2 = copy.deepcopy(update_router) pd_intfs1 = l3_test_common.get_unassigned_pd_interfaces( update_router_2) # Get prefixes self._pd_get_prefixes(agent, ri, pd_intfs, pd_intfs1, mock_get_prefix) # Update the router with the new prefix ri.router = update_router_2 ri.process() self._pd_verify_update_results(ri, pd_intfs1, mock_pd_update_subnet) # Now remove the gw interface self._pd_remove_gw_interface(pd_intfs + pd_intfs1, agent, ri) @mock.patch.object(l3router.RouterInfo, 'enable_radvd') @mock.patch.object(pd.PrefixDelegation, '_add_lla') @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_ha_standby(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet, mock_add_lla, mock_enable_radvd): '''Test HA in the standby router The intent is to test the PD code with HA. To avoid unnecessary complexities, use the regular router. ''' # Initial setup agent, router, ri = self._pd_setup_agent_router(enable_ha=True) # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router) self._pd_add_gw_interface(agent, ri) ri.process() self.assertFalse(mock_add_lla.called) # No client should be started since it's standby router agent.pd.process_prefix_update() self.assertFalse(self.external_process.called) self.assertFalse(mock_get_prefix.called) update_router = copy.deepcopy(router) pd_intfs = l3_test_common.assign_prefix_for_pd_interfaces( update_router) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_assert_update_subnet_calls(router['id'], pd_intfs, mock_pd_update_subnet) # No client should be started since it's standby router agent.pd.process_prefix_update() self.assertFalse(self.external_process.called) self.assertFalse(mock_get_prefix.called) @mock.patch.object(pd.PrefixDelegation, '_add_lla') @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_ha_active(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet, mock_add_lla): '''Test HA in the active router The intent is to test the PD code with HA. To avoid unnecessary complexities, use the regular router. ''' # Initial setup agent, router, ri = self._pd_setup_agent_router(enable_ha=True) # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router) self._pd_add_gw_interface(agent, ri) ri.process() self.assertFalse(mock_add_lla.called) # No client should be started since it's standby router agent.pd.process_prefix_update() self.assertFalse(self.external_process.called) self.assertFalse(mock_get_prefix.called) update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) # Turn the router to be active agent.pd.process_ha_state(router['id'], True) # Get prefixes self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) @mock.patch.object(pd.PrefixDelegation, 'update_subnet') @mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True) @mock.patch.object(dibbler.os, 'getpid', return_value=1234) @mock.patch.object(pd.PrefixDelegation, '_is_lla_active', return_value=True) @mock.patch.object(dibbler.os, 'chmod') @mock.patch.object(dibbler.shutil, 'rmtree') @mock.patch.object(pd.PrefixDelegation, '_get_sync_data') def test_pd_ha_switchover(self, mock1, mock2, mock3, mock4, mock_getpid, mock_get_prefix, mock_pd_update_subnet): '''Test HA in the active router The intent is to test the PD code with HA. To avoid unnecessary complexities, use the regular router. ''' # Initial setup agent, router, ri = self._pd_setup_agent_router(enable_ha=True) # Turn the router to be active agent.pd.process_ha_state(router['id'], True) # Create one pd-enabled subnet and add router interface l3_test_common.router_append_pd_enabled_subnet(router) self._pd_add_gw_interface(agent, ri) ri.process() update_router = copy.deepcopy(router) pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router) # Get prefixes self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix) # Update the router with the new prefix ri.router = update_router ri.process() self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet) # Turn the router to be standby agent.pd.process_ha_state(router['id'], False) expected_calls = [] for intf in pd_intfs: requestor_id = self._pd_get_requestor_id(intf, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False, ha=True)) self._pd_assert_dibbler_calls(expected_calls, self.external_process.mock_calls[-len(expected_calls):]) @mock.patch.object(dibbler.os, 'chmod') def test_pd_generate_dibbler_conf(self, mock_chmod): pddib = dibbler.PDDibbler("router_id", "subnet-id", "ifname") pddib._generate_dibbler_conf("ex_gw_ifname", "fe80::f816:3eff:fef5:a04e", None) expected = 'bind-to-address fe80::f816:3eff:fef5:a04e\n'\ '# ask for address\n \n pd 1\n \n}' self.assertIn(expected, self.utils_replace_file.call_args[0][1]) pddib._generate_dibbler_conf("ex_gw_ifname", "fe80::f816:3eff:fef5:a04e", "2001:db8:2c50:2026::/64") expected = 'bind-to-address fe80::f816:3eff:fef5:a04e\n'\ '# ask for address\n \n pd 1 '\ '{\n prefix 2001:db8:2c50:2026::/64\n }\n \n}' self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def _verify_address_scopes_iptables_rule(self, mock_iptables_manager): filter_calls = [mock.call.add_chain('scope'), mock.call.add_rule('FORWARD', '-j $scope')] v6_mangle_calls = [mock.call.add_chain('scope'), mock.call.add_rule('PREROUTING', '-j $scope'), mock.call.add_rule( 'PREROUTING', '-m connmark ! --mark 0x0/0xffff0000 ' '-j CONNMARK --restore-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000')] v4_mangle_calls = (v6_mangle_calls + [mock.call.add_chain('floatingip'), mock.call.add_chain('float-snat'), mock.call.add_rule('PREROUTING', '-j $floatingip'), mock.call.add_rule( 'PREROUTING', '-d 169.254.169.254/32 -i %(interface_name)s ' '-p tcp -m tcp --dport 80 ' '-j MARK --set-xmark %(value)s/%(mask)s' % {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+', 'value': self.conf.metadata_access_mark, 'mask': lib_constants.ROUTER_MARK_MASK}), mock.call.add_rule( 'float-snat', '-m connmark --mark 0x0/0xffff0000 ' '-j CONNMARK --save-mark ' '--nfmask 0xffff0000 --ctmask 0xffff0000')]) mock_iptables_manager.ipv4['filter'].assert_has_calls(filter_calls) mock_iptables_manager.ipv6['filter'].assert_has_calls(filter_calls) mock_iptables_manager.ipv4['mangle'].assert_has_calls(v4_mangle_calls, any_order=True) mock_iptables_manager.ipv6['mangle'].assert_has_calls(v6_mangle_calls, any_order=True) def test_initialize_address_scope_iptables_rules(self): id = _uuid() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch('neutron.agent.linux.iptables_manager.' 'IptablesManager'): ri = l3router.RouterInfo(agent, id, {}, **self.ri_kwargs) self._verify_address_scopes_iptables_rule(ri.iptables_manager) def test_initialize_address_scope_iptables_rules_dvr(self): router = l3_test_common.prepare_router_data() with mock.patch('neutron.agent.linux.iptables_manager.' 'IptablesManager'): self._set_ri_kwargs(mock.Mock(), router['id'], router) ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) self._verify_address_scopes_iptables_rule(ri.iptables_manager) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( self, ri) router['gw_port_host'] = ri.host ri._external_gateway_added = mock.Mock() ri._create_dvr_gateway(ex_gw_port, interface_name) self._verify_address_scopes_iptables_rule( ri.snat_iptables_manager) def _verify_metadata_iptables_rule(self, mock_iptables_manager): v4_mangle_calls = ([mock.call.add_rule( 'PREROUTING', '-d 169.254.169.254/32 -i %(interface_name)s ' '-p tcp -m tcp --dport 80 ' '-j MARK --set-xmark %(value)s/%(mask)s' % {'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+', 'value': self.conf.metadata_access_mark, 'mask': lib_constants.ROUTER_MARK_MASK})]) mock_iptables_manager.ipv4['mangle'].assert_has_calls(v4_mangle_calls, any_order=True) def test_initialize_metadata_iptables_rules(self): id = _uuid() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch('neutron.agent.linux.iptables_manager.' 'IptablesManager'): ri = l3router.RouterInfo(agent, id, {}, **self.ri_kwargs) self._verify_metadata_iptables_rule(ri.iptables_manager) @mock.patch.object(l3router.RouterInfo, 'delete') @mock.patch.object(ha_router.HaRouter, 'destroy_state_change_monitor') def test_delete_ha_router_initialize_fails(self, mock_dscm, mock_delete): router = l3_test_common.prepare_router_data(enable_ha=True) router[lib_constants.HA_INTERFACE_KEY] = None agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) # an early failure of an HA router initiailization shouldn't try # and cleanup a state change monitor process that was never spawned. # Cannot use self.assertRaises(Exception, ...) as that causes an H202 # pep8 failure. try: agent._router_added(router['id'], router) raise Exception("agent._router_added() should have raised an " "exception") except Exception: pass self.assertTrue(mock_delete.called) self.assertFalse(mock_dscm.called) @mock.patch.object(lla.LinkLocalAllocator, '_write') @mock.patch.object(l3router.RouterInfo, '_get_gw_ips_cidr') def test_process_floating_ip_addresses_not_care_port_forwarding( self, mock_get_gw_cidr, mock_lla_write): pf_used_fip = [{'cidr': '15.1.2.4/32'}, {'cidr': '15.1.2.5/32'}] gw_cidr = {'cidr': '15.1.2.79/24'} need_to_remove_fip = [{'cidr': '15.1.2.99/32'}] fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'status': 'DOWN', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs) ri.centralized_port_forwarding_fip_set = set( [i['cidr'] for i in pf_used_fip]) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ri.get_external_device_name = mock.Mock(return_value='exgw') floating_ips = ri.get_floating_ips() fip_id = floating_ips[0]['id'] device = self.mock_ip_dev device.addr.list.return_value = ( pf_used_fip + need_to_remove_fip + [gw_cidr]) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() mock_get_gw_cidr.return_value = set([gw_cidr['cidr']]) ri.add_floating_ip = mock.Mock( return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE) ri.remove_floating_ip = mock.Mock() fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) ri.add_floating_ip.assert_called_once_with( floating_ips[0], mock.sentinel.interface_name, device) ri.remove_floating_ip.assert_called_once_with( device, need_to_remove_fip[0]['cidr']) @mock.patch.object(functools, 'partial') @mock.patch.object(common_utils, 'load_interface_driver') def test_interface_driver_init(self, load_driver_mock, funct_partial_mock): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) load_driver_mock.assert_called_once_with( self.conf, get_networks_callback=mock.ANY) funct_partial_mock.assert_called_once_with( self.plugin_api.get_networks, agent.context) def test_stop_no_cleanup(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() agent.router_info[1] = router agent.stop() self.assertFalse(router.delete.called) def test_stop_cleanup(self): self.conf.set_override('cleanup_on_shutdown', True) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() agent.router_info[1] = router self.assertFalse(agent._exiting) agent.stop() self.assertTrue(router.delete.called) self.assertTrue(agent._exiting) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py0000644000175000017500000003742700000000000026151 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import utils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import router_info from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.common import utils as n_utils from neutron.tests import base _uuid = uuidutils.generate_uuid class TestDvrFipNs(base.BaseTestCase): def setUp(self): super(TestDvrFipNs, self).setUp() self.conf = mock.Mock() self.conf.state_path = cfg.CONF.state_path self.driver = mock.Mock() self.driver.DEV_NAME_LEN = 14 self.net_id = _uuid() self.fip_ns = dvr_fip_ns.FipNamespace(self.net_id, self.conf, self.driver, use_ipv6=True) def test_subscribe(self): is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id) self.assertTrue(is_first) def test_subscribe_not_first(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id2) self.assertFalse(is_first) def test_unsubscribe(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id) self.assertTrue(is_last) def test_unsubscribe_not_last(self): self.fip_ns.subscribe(mock.sentinel.external_net_id) self.fip_ns.subscribe(mock.sentinel.external_net_id2) is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id2) self.assertFalse(is_last) def test_allocate_rule_priority(self): pr = self.fip_ns.allocate_rule_priority('20.0.0.30') self.assertIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) self.assertNotIn(pr, self.fip_ns._rule_priorities.pool) def test_deallocate_rule_priority(self): pr = self.fip_ns.allocate_rule_priority('20.0.0.30') self.fip_ns.deallocate_rule_priority('20.0.0.30') self.assertNotIn('20.0.0.30', self.fip_ns._rule_priorities.allocations) self.assertIn(pr, self.fip_ns._rule_priorities.pool) def _get_agent_gw_port(self): v4_subnet_id = _uuid() v6_subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': v4_subnet_id}, {'ip_address': 'cafe:dead:beef::3', 'prefixlen': 64, 'subnet_id': v6_subnet_id}], 'subnets': [{'id': v4_subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}, {'id': v6_subnet_id, 'cidr': 'cafe:dead:beef::/64', 'gateway_ip': 'cafe:dead:beef::1'}], 'id': _uuid(), 'network_id': self.net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} return agent_gw_port @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'device_exists') @mock.patch.object(dvr_fip_ns.FipNamespace, 'create') def test_create_gateway_port(self, fip_create, device_exists, ip_wrapper): agent_gw_port = self._get_agent_gw_port() device_exists.return_value = False with mock.patch.object(self.fip_ns.driver, 'set_onlink_routes') as \ mock_set_onlink_routes: self.fip_ns.create_or_update_gateway_port(agent_gw_port) self.assertTrue(fip_create.called) self.assertEqual(1, self.driver.plug.call_count) self.assertEqual(1, self.driver.init_l3.call_count) interface_name = self.fip_ns.get_ext_device_name(agent_gw_port['id']) gw_cidrs = [sn['cidr'] for sn in agent_gw_port['subnets'] if sn.get('cidr')] mock_set_onlink_routes.assert_called_once_with( interface_name, self.fip_ns.name, [], preserve_ips=gw_cidrs, is_ipv6=False) @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') @mock.patch.object(dvr_fip_ns.FipNamespace, '_add_default_gateway_for_fip') def test_update_gateway_port(self, def_gw, fip_sub, send_adv_notif, IPDevice): fip_sub.return_value = False self.fip_ns._check_for_gateway_ip_change = mock.Mock(return_value=True) agent_gw_port = self._get_agent_gw_port() interface_name = self.fip_ns.get_ext_device_name(agent_gw_port['id']) self.fip_ns.agent_gateway_port = agent_gw_port with mock.patch.object(self.fip_ns.driver, 'set_onlink_routes'): self.fip_ns.create_or_update_gateway_port(agent_gw_port) expected = [ mock.call(self.fip_ns.get_name(), interface_name, agent_gw_port['fixed_ips'][0]['ip_address']), mock.call(self.fip_ns.get_name(), interface_name, agent_gw_port['fixed_ips'][1]['ip_address'])] send_adv_notif.assert_has_calls(expected) self.assertTrue(def_gw.called) @mock.patch.object(ip_lib.IPDevice, 'exists') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') @mock.patch.object(dvr_fip_ns.FipNamespace, 'delete') @mock.patch.object(dvr_fip_ns.FipNamespace, 'unsubscribe') def test_update_gateway_port_raises_exception(self, fip_unsub, fip_delete, fip_sub, exists): agent_gw_port = self._get_agent_gw_port() self.fip_ns._create_gateway_port = mock.Mock() self.fip_ns.create_or_update_gateway_port(agent_gw_port) exists.return_value = False fip_sub.return_value = False self.fip_ns._check_for_gateway_ip_change = mock.Mock(return_value=True) self.fip_ns.agent_gateway_port = agent_gw_port self.assertRaises(l3_exc.FloatingIpSetupException, self.fip_ns.create_or_update_gateway_port, agent_gw_port) self.assertTrue(fip_unsub.called) self.assertTrue(fip_delete.called) @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(dvr_fip_ns.FipNamespace, 'subscribe') @mock.patch.object(dvr_fip_ns.FipNamespace, '_add_default_gateway_for_fip') def test_update_gateway_port_gateway_outside_subnet_added( self, def_gw, fip_sub, send_adv_notif, IPDevice): fip_sub.return_value = False self.fip_ns.agent_gateway_port = None agent_gw_port = self._get_agent_gw_port() agent_gw_port['subnets'][0]['gateway_ip'] = '20.0.1.1' self.fip_ns._check_for_gateway_ip_change = mock.Mock(return_value=True) self.fip_ns.agent_gateway_port = agent_gw_port with mock.patch.object(self.fip_ns.driver, 'set_onlink_routes'): self.fip_ns.create_or_update_gateway_port(agent_gw_port) IPDevice().route.add_route.assert_called_once_with('20.0.1.1', scope='link') self.assertTrue(def_gw.called) def test_check_gateway_ip_changed_no_change(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['mac_address'] = 'aa:bb:cc:dd:ee:ff' self.assertFalse(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) def test_check_gateway_ip_changed_v4(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['subnets'][0]['gateway_ip'] = '20.0.0.2' self.assertTrue(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) def test_check_gateway_ip_changed_v6(self): agent_gw_port = self._get_agent_gw_port() self.fip_ns.agent_gateway_port = copy.deepcopy(agent_gw_port) agent_gw_port['subnets'][1]['gateway_ip'] = 'cafe:dead:beef::2' self.assertTrue(self.fip_ns._check_for_gateway_ip_change( agent_gw_port)) @mock.patch.object(iptables_manager, 'IptablesManager') @mock.patch.object(utils, 'execute') @mock.patch.object(ip_lib.IpNetnsCommand, 'exists') def _test_create(self, old_kernel, exists, execute, IPTables): exists.return_value = True # There are up to six sysctl calls - two to enable forwarding, # two for arp_ignore and arp_announce, and two for ip_nonlocal_bind execute.side_effect = [None, None, None, None, RuntimeError if old_kernel else None, None] self.fip_ns._iptables_manager = IPTables() self.fip_ns.create() ns_name = self.fip_ns.get_name() netns_cmd = ['ip', 'netns', 'exec', ns_name] bind_cmd = ['sysctl', '-w', 'net.ipv4.ip_nonlocal_bind=1'] expected = [mock.call(netns_cmd + bind_cmd, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=False, run_as_root=True)] if old_kernel: expected.append(mock.call(bind_cmd, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True)) execute.assert_has_calls(expected) def test_create_old_kernel(self): self._test_create(True) def test_create_new_kernel(self): self._test_create(False) @mock.patch.object(ip_lib, 'IPWrapper') def test_destroy(self, IPWrapper): ip_wrapper = IPWrapper() dev1 = mock.Mock() dev1.name = 'fpr-aaaa' dev2 = mock.Mock() dev2.name = 'fg-aaaa' ip_wrapper.get_devices.return_value = [dev1, dev2] with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'delete') as delete,\ mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'exists', return_value=True) as exists: self.fip_ns.delete() exists.assert_called_once_with(self.fip_ns.name) delete.assert_called_once_with(self.fip_ns.name) ns_name = self.fip_ns.get_name() self.driver.unplug.assert_called_once_with('fg-aaaa', prefix='fg-', namespace=ns_name) ip_wrapper.del_veth.assert_called_once_with('fpr-aaaa') def test_destroy_no_namespace(self): with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'delete') as delete,\ mock.patch.object(self.fip_ns.ip_wrapper_root.netns, 'exists', return_value=False) as exists: self.fip_ns.delete() exists.assert_called_once_with(self.fip_ns.name) self.assertFalse(delete.called) @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') def _test_create_rtr_2_fip_link(self, dev_exists, addr_exists, IPDevice, IPWrapper): ri = mock.Mock() ri.router_id = _uuid() ri.rtr_fip_subnet = None ri.ns_name = mock.sentinel.router_ns ri.get_ex_gw_port.return_value = {'mtu': 2000} rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(ri.router_id) fip_ns_name = self.fip_ns.get_name() self.fip_ns.local_subnets = allocator = mock.Mock() pair = lla.LinkLocalAddressPair('169.254.31.28/31') allocator.allocate.return_value = pair addr_pair = pair.get_pair() ip_wrapper = IPWrapper() ip_wrapper.add_veth.return_value = (IPDevice(), IPDevice()) device = IPDevice() device.exists.return_value = dev_exists device.addr.list.return_value = addr_exists ri._get_snat_idx = mock.Mock() self.fip_ns._add_rtr_ext_route_rule_to_route_table = mock.Mock() self.fip_ns.create_rtr_2_fip_link(ri) if not dev_exists: ip_wrapper.add_veth.assert_called_with(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) self.assertEqual(2, device.link.set_up.call_count) device.link.set_mtu.assert_called_with(2000) self.assertEqual(2, device.link.set_mtu.call_count) if not addr_exists: expected = [mock.call(str(addr_pair[0]), add_broadcast=False), mock.call(str(addr_pair[1]), add_broadcast=False)] device.addr.add.assert_has_calls(expected) self.assertEqual(2, device.addr.add.call_count) expected = [mock.call(n_utils.cidr_to_ip(addr_pair[1]), mock.ANY), mock.call(n_utils.cidr_to_ip(addr_pair[0]), mock.ANY)] device.neigh.add.assert_has_calls(expected) self.assertEqual(2, device.neigh.add.call_count) device.route.add_gateway.assert_called_once_with( '169.254.31.29', table=16) self.assertTrue( self.fip_ns._add_rtr_ext_route_rule_to_route_table.called) def test_create_rtr_2_fip_link(self): self._test_create_rtr_2_fip_link(False, False) def test_create_rtr_2_fip_link_already_exists(self): self._test_create_rtr_2_fip_link(True, False) def test_create_rtr_2_fip_link_and_addr_already_exist(self): self._test_create_rtr_2_fip_link(True, True) @mock.patch.object(router_info.RouterInfo, 'get_router_cidrs') @mock.patch.object(ip_lib, 'IPDevice') def _test_scan_fip_ports(self, ri, ip_list, stale_list, IPDevice, get_router_cidrs): IPDevice.return_value = device = mock.Mock() device.exists.return_value = True ri.get_router_cidrs.return_value = ip_list get_router_cidrs.return_value = stale_list self.fip_ns.get_rtr_ext_device_name = mock.Mock( return_value=mock.sentinel.rtr_ext_device_name) self.fip_ns.scan_fip_ports(ri) if stale_list: device.delete_addr_and_conntrack_state.assert_called_once_with( stale_list[0]) def test_scan_fip_ports_restart_fips(self): ri = mock.Mock() ri.floating_ips_dict = {} ip_list = [{'cidr': '111.2.3.4'}, {'cidr': '111.2.3.5'}] stale_list = ['111.2.3.7/32'] self._test_scan_fip_ports(ri, ip_list, stale_list) self.assertTrue(ri.rtr_fip_connect) def test_scan_fip_ports_restart_none(self): ri = mock.Mock() ri.floating_ips_dict = {} ri.rtr_fip_connect = False self._test_scan_fip_ports(ri, [], []) self.assertFalse(ri.rtr_fip_connect) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_dvr_local_router.py0000644000175000017500000012426300000000000027360 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants as lib_constants from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import dvr_edge_ha_router as dvr_edge_ha_rtr from neutron.agent.l3 import dvr_edge_router as dvr_edge_rtr from neutron.agent.l3 import dvr_local_router as dvr_router from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import router_info from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.conf.agent import common as agent_config from neutron.conf.agent.l3 import config as l3_config from neutron.conf.agent.l3 import ha as ha_conf from neutron.conf import common as base_config from neutron.tests import base from neutron.tests.common import l3_test_common _uuid = uuidutils.generate_uuid FIP_PRI = 32768 HOSTNAME = 'myhost' class TestDvrRouterOperations(base.BaseTestCase): def setUp(self): super(TestDvrRouterOperations, self).setUp() mock.patch('eventlet.spawn').start() self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf) ha_conf.register_l3_agent_ha_opts(self.conf) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) agent_config.register_interface_opts(self.conf) agent_config.register_external_process_opts(self.conf) self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('state_path', cfg.CONF.state_path) self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() self.ensure_dir = mock.patch( 'oslo_utils.fileutils.ensure_tree').start() mock.patch('neutron.agent.linux.keepalived.KeepalivedManager' '.get_full_config_file_path').start() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.utils_replace_file_p = mock.patch( 'neutron_lib.utils.file.replace_file') self.utils_replace_file = self.utils_replace_file_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager') self.external_process = self.external_process_p.start() self.process_monitor = mock.patch( 'neutron.agent.linux.external_process.ProcessMonitor').start() self.send_adv_notif_p = mock.patch( 'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif') self.send_adv_notif = self.send_adv_notif_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) driver_cls.return_value = self.mock_driver self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip self.mock_delete_ip_rule = mock.patch.object(ip_lib, 'delete_ip_rule').start() ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start() self.mock_ip_dev = mock.MagicMock() ip_dev.return_value = self.mock_ip_dev self.l3pluginApi_cls_p = mock.patch( 'neutron.agent.l3.agent.L3PluginApi') l3pluginApi_cls = self.l3pluginApi_cls_p.start() self.plugin_api = mock.MagicMock() l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() subnet_id_2 = _uuid() self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16', 'gateway_ip': '152.2.0.1', 'id': subnet_id_1}], 'network_id': _uuid(), 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}, {'subnets': [{'cidr': '152.10.0.0/16', 'gateway_ip': '152.10.0.1', 'id': subnet_id_2}], 'network_id': _uuid(), 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', 'prefixlen': 16}], 'id': _uuid(), 'device_id': _uuid()}] self.ri_kwargs = {'agent_conf': self.conf, 'interface_driver': self.mock_driver} def _create_router(self, router=None, **kwargs): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.router_id = _uuid() if not router: router = mock.MagicMock() kwargs['agent'] = agent kwargs['router_id'] = self.router_id kwargs['router'] = router kwargs['agent_conf'] = self.conf kwargs['interface_driver'] = mock.Mock() return dvr_router.DvrLocalRouter(HOSTNAME, **kwargs) def _set_ri_kwargs(self, agent, router_id, router): self.ri_kwargs['agent'] = agent self.ri_kwargs['router_id'] = router_id self.ri_kwargs['router'] = router def test_gw_ns_name(self): ri = self._create_router() self.assertEqual(ri.ns_name, ri.get_gw_ns_name()) def test_create_dvr_fip_interfaces_update(self): ri = self._create_router() fip_agent_port = {'subnets': []} ri.get_floating_agent_gw_interface = mock.Mock( return_value=fip_agent_port) ri.get_floating_ips = mock.Mock(return_value=True) ri.fip_ns = mock.Mock() ri.fip_ns.subscribe.return_value = False ri.rtr_fip_connect = True ex_gw_port = {'network_id': 'fake_net_id'} ri.create_dvr_external_gateway_on_agent(ex_gw_port) ri.fip_ns.create_or_update_gateway_port.assert_called_once_with( fip_agent_port) def test_create_dvr_fip_interfaces_with_matching_address_scope(self): self._setup_create_dvr_fip_interfaces_for_setting_routing_rules( address_scopes_match=True) def test_create_dvr_fip_interfaces_with_address_scope_mismatch(self): self._setup_create_dvr_fip_interfaces_for_setting_routing_rules() def _setup_create_dvr_fip_interfaces_for_setting_routing_rules( self, address_scopes_match=False): ri = self._create_router() ri.get_floating_agent_gw_interface = mock.Mock() ri.fip_ns = mock.Mock() ri._add_interface_routing_rule_to_router_ns = mock.Mock() ri._add_interface_route_to_fip_ns = mock.Mock() ri.fip_ns._create_rtr_2_fip_link = mock.Mock() ri.internal_ports = ['moke_port_1', 'moke_port_2'] if address_scopes_match: ri._check_if_address_scopes_match = mock.Mock( return_value=True) else: ri._check_if_address_scopes_match = mock.Mock( return_value=False) ri.rtr_fip_connect = False ex_gw_port = {'network_id': 'fake_net_id'} ri.create_dvr_external_gateway_on_agent(ex_gw_port) ri._check_rtr_2_fip_connect = mock.Mock() ri.connect_rtr_2_fip() self.assertTrue(ri._check_if_address_scopes_match.called) if address_scopes_match: self.assertTrue( ri.fip_ns.create_rtr_2_fip_link.called) self.assertTrue( ri._add_interface_routing_rule_to_router_ns.called) self.assertTrue( ri._add_interface_route_to_fip_ns.called) else: self.assertFalse( ri._add_interface_routing_rule_to_router_ns.called) self.assertFalse( ri._add_interface_route_to_fip_ns.called) self.assertTrue( ri.fip_ns.create_rtr_2_fip_link.called) def test_get_floating_ips_dvr(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) fips = ri.get_floating_ips() self.assertEqual( [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}], fips) def test_floating_forward_rules_no_fip_ns(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] fip = {'id': _uuid()} ri = self._create_router(router) self.assertFalse(ri.floating_forward_rules(fip)) def test_floating_forward_rules(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) floating_ip = '15.1.2.3' rtr_2_fip_name = 'fake_router' fixed_ip = '192.168.0.1' fip = {'id': _uuid(), 'fixed_ip_address': '192.168.0.1', 'floating_ip_address': '15.1.2.3'} instance = mock.Mock() instance.get_rtr_ext_device_name = mock.Mock( return_value=rtr_2_fip_name) ri.fip_ns = instance dnat_from_floatingip_to_fixedip = ( 'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % ( floating_ip, rtr_2_fip_name, fixed_ip)) to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) if ri.iptables_manager.random_fully: to_source += ' --random-fully' snat_from_fixedip_to_floatingip = ('float-snat', to_source) actual = ri.floating_forward_rules(fip) expected = [dnat_from_floatingip_to_fixedip, snat_from_fixedip_to_floatingip] self.assertEqual(expected, actual) def test_floating_mangle_rules_no_fip_ns(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) floating_ip = mock.Mock() fixed_ip = mock.Mock() internal_mark = mock.Mock() self.assertFalse(ri.floating_mangle_rules(floating_ip, fixed_ip, internal_mark)) def test_floating_mangle_rules(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) floating_ip = '15.1.2.3' fixed_ip = '192.168.0.1' internal_mark = 'fake_mark' rtr_2_fip_name = 'fake_router' instance = mock.Mock() instance.get_rtr_ext_device_name = mock.Mock( return_value=rtr_2_fip_name) ri.fip_ns = instance mark_traffic_to_floating_ip = ( 'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % ( floating_ip, rtr_2_fip_name, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) actual = ri.floating_mangle_rules(floating_ip, fixed_ip, internal_mark) expected = [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] self.assertEqual(expected, actual) @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'add_ip_rule') def test_floating_ip_added_dist(self, mock_add_ip_rule, mIPDevice, mock_adv_notif): router = mock.MagicMock() ri = self._create_router(router) ri.ex_gw_port = ri.router['gw_port'] ext_net_id = _uuid() subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': ext_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} fip = {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': ext_net_id, 'port_id': _uuid()} ri.fip_ns = mock.Mock() ri.fip_ns.agent_gateway_port = agent_gw_port ri.create_dvr_external_gateway_on_agent(ri.ex_gw_port) ri._check_rtr_2_fip_connect = mock.Mock() ri.connect_rtr_2_fip() self.assertTrue(ri.rtr_fip_connect) ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI subnet = lla.LinkLocalAddressPair('169.254.30.42/31') ri.rtr_fip_subnet = subnet ri.fip_ns.local_subnets = mock.Mock() ri.fip_ns.local_subnets.allocate.return_value = subnet ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) ri.floating_ip_added_dist(fip, ip_cidr) mock_add_ip_rule.assert_called_with( namespace=ri.router_namespace.name, ip='192.168.0.1', table=16, priority=FIP_PRI) ri.fip_ns.local_subnets.allocate.assert_not_called() # Validate that fip_ns.local_subnets is called when # ri.rtr_fip_subnet is None ri.rtr_fip_subnet = None ri.floating_ip_added_dist(fip, ip_cidr) mock_add_ip_rule.assert_called_with( namespace=ri.router_namespace.name, ip='192.168.0.1', table=16, priority=FIP_PRI) ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id) # TODO(mrsmith): add more asserts @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') def test_floating_ip_removed_dist(self, mIPDevice, mIPWrapper): router = mock.MagicMock() ri = self._create_router(router) ri.ex_gw_port = ri.router['gw_port'] subnet_id = _uuid() fixed_ip = '20.0.0.30' agent_gw_port = {'fixed_ips': [{'ip_address': fixed_ip, 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} fip_cidr = '11.22.33.44/24' ri.fip_ns = mock.Mock() ri.fip_ns.get_name.return_value = 'fip_ns_name' ri.floating_ips_dict['11.22.33.44'] = (fixed_ip, FIP_PRI) ri.fip_2_rtr = '11.22.33.42' ri.rtr_2_fip = '11.22.33.40' ri.fip_ns.agent_gateway_port = agent_gw_port s = lla.LinkLocalAddressPair('169.254.30.42/31') ri.rtr_fip_subnet = s ri.fip_ns.local_subnets = mock.Mock() ri.floating_ip_removed_dist(fip_cidr) self.mock_delete_ip_rule.assert_called_with( ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI) mIPDevice().route.delete_route.assert_called_with(fip_cidr, via=str(s.ip)) ri.fip_ns.local_subnets.allocate.assert_not_called() @mock.patch.object(ip_lib, 'add_ip_rule') def test_floating_ip_moved_dist(self, mock_add_ip_rule): router = mock.MagicMock() ri = self._create_router(router) floating_ip_address = '15.1.2.3' fixed_ip = '192.168.0.1' fip = {'floating_ip_address': floating_ip_address, 'fixed_ip_address': fixed_ip} ri.floating_ips_dict['15.1.2.3'] = (fixed_ip, FIP_PRI) ri.fip_ns = mock.Mock() ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI ri.floating_ip_moved_dist(fip) self.mock_delete_ip_rule.assert_called_once_with( ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI) ri.fip_ns.deallocate_rule_priority.assert_called_once_with( floating_ip_address) ri.fip_ns.allocate_rule_priority.assert_called_once_with( floating_ip_address) mock_add_ip_rule.assert_called_with( namespace=ri.router_namespace.name, ip=fixed_ip, table=16, priority=FIP_PRI) def _test_add_floating_ip(self, ri, fip, is_failure=False): if not is_failure: ri.floating_ip_added_dist = mock.Mock( return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE) else: ri.floating_ip_added_dist = mock.Mock( return_value=lib_constants.FLOATINGIP_STATUS_ERROR) result = ri.add_floating_ip(fip, mock.sentinel.interface_name, mock.sentinel.device) ri.floating_ip_added_dist.assert_called_once_with( fip, mock.ANY) return result def test_add_floating_ip(self): ri = self._create_router(mock.MagicMock()) ip = '15.1.2.3' fip = {'floating_ip_address': ip} result = self._test_add_floating_ip(ri, fip) ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32') self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result) def test_add_floating_ip_failure(self): ri = self._create_router(mock.MagicMock()) ip = '15.1.2.3' fip = {'floating_ip_address': ip} result = self._test_add_floating_ip(ri, fip, True) ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32') self.assertEqual(lib_constants.FLOATINGIP_STATUS_ERROR, result) @mock.patch.object(router_info.RouterInfo, 'remove_floating_ip') def test_remove_floating_ip(self, super_remove_floating_ip): ri = self._create_router(mock.MagicMock()) ri.floating_ip_removed_dist = mock.Mock() ri.remove_floating_ip(mock.sentinel.device, mock.sentinel.ip_cidr) self.assertFalse(super_remove_floating_ip.called) ri.floating_ip_removed_dist.assert_called_once_with( mock.sentinel.ip_cidr) def test__get_internal_port(self): ri = self._create_router() port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]} router_ports = [port] ri.router.get.return_value = router_ports self.assertEqual(port, ri._get_internal_port(mock.sentinel.subnet_id)) def test__get_internal_port_not_found(self): ri = self._create_router() port = {'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]} router_ports = [port] ri.router.get.return_value = router_ports self.assertIsNone(ri._get_internal_port(mock.sentinel.subnet_id2)) def test__get_snat_idx_ipv4(self): ip_cidr = '101.12.13.00/24' ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x650C0D00 is numerical value of 101.12.13.00 self.assertEqual(0x650C0D00, snat_idx) def test__get_snat_idx_ipv6(self): ip_cidr = '2620:0:a03:e100::/64' ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr self.assertEqual(0x3D345705, snat_idx) def test__get_snat_idx_ipv6_below_32768(self): ip_cidr = 'd488::/30' # crc32 of this ip_cidr is 0x1BD7 ri = self._create_router(mock.MagicMock()) snat_idx = ri._get_snat_idx(ip_cidr) # 0x1BD7 + 0x3FFFFFFF = 0x40001BD6 self.assertEqual(0x40001BD6, snat_idx) def test__set_subnet_arp_info(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) ports = ri.router.get(lib_constants.INTERFACE_KEY, []) subnet_id = l3_test_common.get_subnet_id(ports[0]) test_ports = [{'mac_address': '00:11:22:33:44:55', 'device_owner': lib_constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'ip_address': '1.2.3.4', 'prefixlen': 24, 'subnet_id': subnet_id}]}, {'mac_address': '11:22:33:44:55:66', 'device_owner': lib_constants.DEVICE_OWNER_LOADBALANCER, 'fixed_ips': [{'ip_address': '1.2.3.5', 'prefixlen': 24, 'subnet_id': subnet_id}]}, {'mac_address': '22:33:44:55:66:77', 'device_owner': lib_constants.DEVICE_OWNER_LOADBALANCERV2, 'fixed_ips': [{'ip_address': '1.2.3.6', 'prefixlen': 24, 'subnet_id': subnet_id}]}] self.plugin_api.get_ports_by_subnet.return_value = test_ports # Test basic case ports[0]['subnets'] = [{'id': subnet_id, 'cidr': '1.2.3.0/24'}] with mock.patch.object(ri, '_process_arp_cache_for_internal_port') as parp: ri._set_subnet_arp_info(subnet_id) self.assertEqual(1, parp.call_count) self.mock_ip_dev.neigh.add.assert_called_once_with( '1.2.3.4', '00:11:22:33:44:55') # Test negative case router['distributed'] = False ri._set_subnet_arp_info(subnet_id) self.mock_ip_dev.neigh.add.never_called() def test_add_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) agent.add_arp_entry(None, payload) agent.router_deleted(None, router['id']) self.mock_ip_dev.neigh.add.assert_called_once_with( '1.7.23.11', '00:11:22:33:44:55') def test_add_arp_entry_no_routerinfo(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) subnet_id = l3_test_common.get_subnet_id( router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent.add_arp_entry(None, payload) def test_get_arp_related_dev_no_subnet(self): self._set_ri_kwargs(mock.sentinel.agent, 'foo_router_id', {'distributed': True, 'gw_port_host': HOSTNAME}) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as f: ri.get_arp_related_dev('foo_subnet_id') self.assertFalse(f.call_count) def _setup_test_for_arp_entry_cache(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) subnet_id = l3_test_common.get_subnet_id( ri.router[lib_constants.INTERFACE_KEY][0]) return ri, subnet_id def test__update_arp_entry_calls_arp_cache_with_no_device(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() state = True with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as rtrdev,\ mock.patch.object(ri, '_cache_arp_entry') as arp_cache: state = ri._update_arp_entry( mock.ANY, mock.ANY, subnet_id, 'add', mock.ANY, device_exists=False) self.assertFalse(state) self.assertTrue(arp_cache.called) arp_cache.assert_called_once_with(mock.ANY, mock.ANY, subnet_id, 'add') self.assertFalse(rtrdev.neigh.add.called) def test__process_arp_cache_for_internal_port(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55', subnet_id, 'add') self.assertEqual(1, len(ri._pending_arp_set)) with mock.patch.object(ri, '_update_arp_entry') as update_arp: update_arp.return_value = True ri._process_arp_cache_for_internal_port(subnet_id) self.assertEqual(0, len(ri._pending_arp_set)) def test__delete_arp_cache_for_internal_port(self): ri, subnet_id = self._setup_test_for_arp_entry_cache() ri._cache_arp_entry('1.7.23.11', '00:11:22:33:44:55', subnet_id, 'add') self.assertEqual(1, len(ri._pending_arp_set)) ri._delete_arp_cache_for_internal_port(subnet_id) self.assertEqual(0, len(ri._pending_arp_set)) def test_del_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.5.25.15', 'mac_address': '00:44:33:22:11:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) # first add the entry agent.add_arp_entry(None, payload) # now delete it agent.del_arp_entry(None, payload) self.mock_ip_dev.neigh.delete.assert_called_once_with( '1.5.25.15', '00:44:33:22:11:55') agent.router_deleted(None, router['id']) def test_get_floating_agent_gw_interfaces(self): fake_network_id = _uuid() subnet_id = _uuid() agent_gateway_port = ( [{'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), portbindings.HOST_ID: 'myhost', 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) self.assertEqual( agent_gateway_port[0], ri.get_floating_agent_gw_interface(fake_network_id)) def test_process_router_dist_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': mock.sentinel.ext_net_id, 'port_id': _uuid()}, {'id': _uuid(), 'host': 'some-other-host', 'floating_ip_address': '15.1.2.4', 'fixed_ip_address': '192.168.0.10', 'floating_network_id': mock.sentinel.ext_net_id, 'port_id': _uuid()}]} router = l3_test_common.prepare_router_data(enable_snat=True) router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() fip_ns = agent.get_fip_ns(mock.sentinel.ext_net_id) subnet_id = _uuid() fip_ns.agent_gateway_port = ( {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': subnet_id}], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} ) def _test_ext_gw_updated_dvr_agent_mode(self, host, agent_mode, expected_call_count): router = l3_test_common.prepare_router_data(num_internal_ports=2) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._set_ri_kwargs(agent, router['id'], router) ri = dvr_router.DvrLocalRouter(HOSTNAME, **self.ri_kwargs) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._external_gateway_added = mock.Mock() # test agent mode = dvr (compute node) router['gw_port_host'] = host agent.conf.agent_mode = agent_mode ri.external_gateway_updated(ex_gw_port, interface_name) # no gateway should be added on dvr node self.assertEqual(expected_call_count, ri._external_gateway_added.call_count) def test_ext_gw_updated_dvr_agent_mode(self): # no gateway should be added on dvr node self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0) def test_ext_gw_updated_dvr_agent_mode_host(self): # no gateway should be added on dvr node self._test_ext_gw_updated_dvr_agent_mode(HOSTNAME, 'dvr', 0) def test_external_gateway_removed_ext_gw_port_and_fip(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router = l3_test_common.prepare_router_data(num_internal_ports=2) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() external_net_id = router['gw_port']['network_id'] self._set_ri_kwargs(agent, router['id'], router) ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.remove_floating_ip = mock.Mock() agent._fetch_external_net_id = mock.Mock(return_value=external_net_id) ri.ex_gw_port = ri.router['gw_port'] del ri.router['gw_port'] ri.external_gateway_added( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) ri.fip_ns = None nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag = mock.Mock() nat.add_rule = mock.Mock() ri.fip_ns = agent.get_fip_ns(external_net_id) subnet_id = _uuid() ri.fip_ns.agent_gateway_port = { 'fixed_ips': [{ 'ip_address': '20.0.0.30', 'prefixlen': 24, 'subnet_id': subnet_id }], 'subnets': [{'id': subnet_id, 'cidr': '20.0.0.0/24', 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), 'network_id': external_net_id, 'mac_address': 'ca:fe:de:ad:be:ef'} vm_floating_ip = '19.4.4.2' ri.floating_ips_dict[vm_floating_ip] = FIP_PRI ri.rtr_fip_subnet = ri.fip_ns.local_subnets.allocate(ri.router_id) _, fip_to_rtr = ri.rtr_fip_subnet.get_pair() self.mock_ip.get_devices.return_value = [ l3_test_common.FakeDev(ri.fip_ns.get_ext_device_name(_uuid()))] ri.get_router_cidrs = mock.Mock( return_value={vm_floating_ip + '/32', '19.4.4.1/24'}) self.device_exists.return_value = True ri.external_gateway_removed( ri.ex_gw_port, ri.get_external_device_name(ri.ex_gw_port['id'])) ri.remove_floating_ip.assert_called_once_with(self.mock_ip_dev, '19.4.4.2/32') def test_get_router_cidrs_no_fip_ns(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) device = mock.Mock() self.assertFalse(ri.get_router_cidrs(device)) def test_get_router_cidrs_no_device_exists(self): router = mock.MagicMock() router.get.return_value = [{'host': HOSTNAME}, {'host': mock.sentinel.otherhost}] ri = self._create_router(router) fake_fip_ns = mock.Mock(return_value=True) fake_fip_ns.get_name = mock.Mock(return_value=None) fake_fip_ns.get_int_device_name = mock.Mock(return_value=None) ri.fip_ns = fake_fip_ns device = mock.Mock() device.exists = mock.Mock(return_value=False) with mock.patch.object(ip_lib, 'IPDevice', return_value=device): self.assertFalse(ri.get_router_cidrs(device)) @mock.patch.object(router_info.RouterInfo, '_add_snat_rules') @mock.patch.object(dvr_router.DvrLocalRouter, '_handle_router_snat_rules') def test_handle_snat_rule_for_centralized_fip( self, _add_snat_rules, _handle_router_snat_rules): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT self.mock_driver.unplug.reset_mock() router = l3_test_common.prepare_router_data(enable_floating_ip=True) router['gw_port_host'] = HOSTNAME self._set_ri_kwargs(agent, router['id'], router) ri = dvr_edge_rtr.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs) ri.snat_iptables_manager = mock.MagicMock() ipv4_nat = ri.snat_iptables_manager.ipv4['nat'] interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) ri._handle_router_snat_rules(ex_gw_port, interface_name) ipv4_nat.add_rule.assert_called_once_with('snat', '-j $float-snat') @mock.patch.object(dvr_edge_rtr.DvrEdgeRouter, 'add_centralized_floatingip') def test_add_centralized_floatingip_dvr_ha( self, super_add_centralized_floatingip): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router = l3_test_common.prepare_router_data( num_internal_ports=2, enable_ha=True) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() self._set_ri_kwargs(agent, router['id'], router) fip = {'id': _uuid()} fip_cidr = '11.22.33.44/24' ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri.is_router_master = mock.Mock(return_value=False) ri._add_vip = mock.Mock() interface_name = ri.get_snat_external_device_interface_name( ri.get_ex_gw_port()) ri.add_centralized_floatingip(fip, fip_cidr) ri._add_vip.assert_called_once_with(fip_cidr, interface_name) super_add_centralized_floatingip.assert_not_called() router[lib_constants.HA_INTERFACE_KEY]['status'] = 'DOWN' self._set_ri_kwargs(agent, router['id'], router) ri_1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri_1.is_router_master = mock.Mock(return_value=True) ri_1._add_vip = mock.Mock() interface_name = ri_1.get_snat_external_device_interface_name( ri_1.get_ex_gw_port()) ri_1.add_centralized_floatingip(fip, fip_cidr) ri_1._add_vip.assert_called_once_with(fip_cidr, interface_name) super_add_centralized_floatingip.assert_not_called() router[lib_constants.HA_INTERFACE_KEY]['status'] = 'ACTIVE' self._set_ri_kwargs(agent, router['id'], router) ri_2 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri_2.is_router_master = mock.Mock(return_value=True) ri_2._add_vip = mock.Mock() interface_name = ri_2.get_snat_external_device_interface_name( ri_2.get_ex_gw_port()) ri_2.add_centralized_floatingip(fip, fip_cidr) ri_2._add_vip.assert_called_once_with(fip_cidr, interface_name) super_add_centralized_floatingip.assert_called_once_with(fip, fip_cidr) @mock.patch.object(dvr_edge_rtr.DvrEdgeRouter, 'remove_centralized_floatingip') def test_remove_centralized_floatingip(self, super_remove_centralized_floatingip): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router = l3_test_common.prepare_router_data(num_internal_ports=2) router['gw_port_host'] = HOSTNAME self.mock_driver.unplug.reset_mock() self._set_ri_kwargs(agent, router['id'], router) fip_cidr = '11.22.33.44/24' ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri.is_router_master = mock.Mock(return_value=False) ri._remove_vip = mock.Mock() ri.remove_centralized_floatingip(fip_cidr) ri._remove_vip.assert_called_once_with(fip_cidr) super_remove_centralized_floatingip.assert_not_called() ri1 = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri1.is_router_master = mock.Mock(return_value=True) ri1._remove_vip = mock.Mock() ri1.remove_centralized_floatingip(fip_cidr) ri1._remove_vip.assert_called_once_with(fip_cidr) super_remove_centralized_floatingip.assert_called_once_with(fip_cidr) def test_initialize_dvr_ha_router_snat_ns_once(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router = l3_test_common.prepare_router_data( num_internal_ports=2, enable_ha=True) router['gw_port_host'] = HOSTNAME router[lib_constants.HA_INTERFACE_KEY]['status'] = 'ACTIVE' self.mock_driver.unplug.reset_mock() self._set_ri_kwargs(agent, router['id'], router) ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri._ha_state_path = self.get_temp_file_path('router_ha_state') ri._create_snat_namespace = mock.Mock() ri.update_initial_state = mock.Mock() ri._plug_external_gateway = mock.Mock() ri.initialize(mock.Mock()) ri._create_dvr_gateway(mock.Mock(), mock.Mock()) ri._create_snat_namespace.assert_called_once_with() def test_initialize_dvr_ha_router_reset_state(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = lib_constants.L3_AGENT_MODE_DVR_SNAT router = l3_test_common.prepare_router_data( num_internal_ports=2, enable_ha=True) router['gw_port_host'] = HOSTNAME router[lib_constants.HA_INTERFACE_KEY]['status'] = 'ACTIVE' self.mock_driver.unplug.reset_mock() self._set_ri_kwargs(agent, router['id'], router) ri = dvr_edge_ha_rtr.DvrEdgeHaRouter(HOSTNAME, [], **self.ri_kwargs) ri._ha_state_path = self.get_temp_file_path('router_ha_state') with open(ri._ha_state_path, "w") as f: f.write("master") ri._create_snat_namespace = mock.Mock() ri.update_initial_state = mock.Mock() ri._plug_external_gateway = mock.Mock() with mock.patch("neutron.agent.linux.keepalived." "KeepalivedManager.check_processes", return_value=False): ri.initialize(mock.Mock()) with open(ri._ha_state_path, "r") as f: state = f.readline() self.assertEqual("backup", state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_dvr_snat_ns.py0000644000175000017500000000427100000000000026327 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import utils from neutron.agent.l3 import dvr_snat_ns from neutron.agent.linux import ip_lib from neutron.tests import base _uuid = uuidutils.generate_uuid class TestDvrSnatNs(base.BaseTestCase): def setUp(self): super(TestDvrSnatNs, self).setUp() self.conf = mock.Mock() self.conf.state_path = cfg.CONF.state_path self.driver = mock.Mock() self.driver.DEV_NAME_LEN = 14 self.router_id = _uuid() self.snat_ns = dvr_snat_ns.SnatNamespace(self.router_id, self.conf, self.driver, use_ipv6=False) @mock.patch('neutron.privileged.agent.linux.ip_lib.set_link_attribute') @mock.patch.object(utils, 'execute') @mock.patch.object(ip_lib, 'create_network_namespace') @mock.patch.object(ip_lib, 'network_namespace_exists') def test_create(self, exists, create, execute, set_link_attr): exists.return_value = False self.snat_ns.create() netns_cmd = ['ip', 'netns', 'exec', self.snat_ns.name] loose_cmd = ['sysctl', '-w', 'net.netfilter.nf_conntrack_tcp_loose=0'] expected = [mock.call(netns_cmd + loose_cmd, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True)] create.assert_called_once_with(self.snat_ns.name) execute.assert_has_calls(expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py0000644000175000017500000000435400000000000031617 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.l3 import fip_rule_priority_allocator as frpa from neutron.tests import base class TestFipPriority(base.BaseTestCase): def test__init__(self): test_pr = frpa.FipPriority(10) self.assertEqual(10, test_pr.index) def test__repr__(self): test_pr = frpa.FipPriority(20) self.assertEqual("20", str(test_pr)) def test__eq__(self): left_pr = frpa.FipPriority(10) right_pr = frpa.FipPriority(10) other_pr = frpa.FipPriority(20) self.assertEqual(left_pr, right_pr) self.assertNotEqual(left_pr, other_pr) self.assertNotEqual(right_pr, other_pr) def test__hash__(self): left_pr = frpa.FipPriority(10) right_pr = frpa.FipPriority(10) other_pr = frpa.FipPriority(20) self.assertEqual(hash(left_pr), hash(right_pr)) self.assertNotEqual(hash(left_pr), hash(other_pr)) self.assertNotEqual(hash(other_pr), hash(right_pr)) class TestFipRulePriorityAllocator(base.BaseTestCase): def setUp(self): super(TestFipRulePriorityAllocator, self).setUp() self.priority_rule_start = 100 self.priority_rule_end = 200 self.data_store_path = '/data_store_path_test' def test__init__(self): _frpa = frpa.FipRulePriorityAllocator(self.data_store_path, self.priority_rule_start, self.priority_rule_end) self.assertEqual(self.data_store_path, _frpa.state_file) self.assertEqual(frpa.FipPriority, _frpa.ItemClass) self.assertEqual(100, len(_frpa.pool)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_ha_router.py0000644000175000017500000002013400000000000025773 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import signal import mock from neutron_lib import constants as n_consts from neutron_lib import fixture as lib_fixtures from oslo_utils import uuidutils from neutron.agent.l3 import ha_router from neutron.agent.l3 import router_info from neutron.tests import base from neutron.tests.common import l3_test_common _uuid = uuidutils.generate_uuid class TestBasicRouterOperations(base.BaseTestCase): def setUp(self): super(TestBasicRouterOperations, self).setUp() self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.router_id = _uuid() return ha_router.HaRouter(mock.sentinel.enqueue_state, mock.sentinel.agent, self.router_id, router, self.agent_conf, mock.sentinel.driver, **kwargs) def test_get_router_cidrs_returns_ha_cidrs(self): ri = self._create_router() device = mock.MagicMock() device.name.return_value = 'eth2' addresses = ['15.1.2.2/24', '15.1.2.3/32'] ri._get_cidrs_from_keepalived = mock.MagicMock(return_value=addresses) self.assertEqual(set(addresses), ri.get_router_cidrs(device)) def test__add_default_gw_virtual_route(self): ri = self._create_router() mock_instance = mock.Mock() mock_instance.virtual_routes.gateway_routes = [] ri._get_keepalived_instance = mock.Mock(return_value=mock_instance) subnets = [{'id': _uuid(), 'cidr': '20.0.0.0/24', 'gateway_ip': None}] ex_gw_port = {'fixed_ips': [], 'subnets': subnets, 'extra_subnets': [], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef'} # Make sure no exceptional code ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes)) subnets.append({'id': _uuid(), 'cidr': '30.0.0.0/24', 'gateway_ip': '30.0.0.1'}) ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(1, len(mock_instance.virtual_routes.gateway_routes)) subnets[1]['gateway_ip'] = None ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes)) @mock.patch.object(router_info.RouterInfo, 'remove_floating_ip') def test_remove_floating_ip(self, super_remove_floating_ip): ri = self._create_router(mock.MagicMock()) mock_instance = mock.Mock() ri._get_keepalived_instance = mock.Mock(return_value=mock_instance) device = mock.Mock() fip_cidr = '15.1.2.3/32' ri.remove_floating_ip(device, fip_cidr) self.assertTrue(super_remove_floating_ip.called) def test_destroy_state_change_monitor_ok(self): ri = self._create_router(mock.MagicMock()) # need a port for destroy_state_change_monitor() to call PM code ri.ha_port = {'id': _uuid()} with mock.patch.object(ri, '_get_state_change_monitor_process_manager')\ as m_get_state: mock_pm = m_get_state.return_value mock_pm.active = False ri.destroy_state_change_monitor(mock_pm) mock_pm.disable.assert_called_once_with( sig=str(int(signal.SIGTERM))) def test_destroy_state_change_monitor_force(self): ri = self._create_router(mock.MagicMock()) # need a port for destroy_state_change_monitor() to call PM code ri.ha_port = {'id': _uuid()} with mock.patch.object(ri, '_get_state_change_monitor_process_manager')\ as m_get_state: mock_pm = m_get_state.return_value mock_pm.active = False with mock.patch.object(ha_router, 'SIGTERM_TIMEOUT', 0): ri.destroy_state_change_monitor(mock_pm) calls = ["sig='str(%d)'" % signal.SIGTERM, "sig='str(%d)'" % signal.SIGKILL] mock_pm.disable.has_calls(calls) def _test_ha_state(self, read_return, expected): ri = self._create_router(mock.MagicMock()) ri.keepalived_manager = mock.Mock() ri.keepalived_manager.get_full_config_file_path.return_value = ( 'ha_state') self.mock_open = self.useFixture( lib_fixtures.OpenFixture('ha_state', read_return)).mock_open self.assertEqual(expected, ri.ha_state) def test_ha_state_master(self): self._test_ha_state('master', 'master') def test_ha_state_unknown(self): # an empty state file should yield 'unknown' self._test_ha_state('', 'unknown') def test_ha_state_ioerror(self): # an error reading the state file should yield 'unknown' ri = self._create_router(mock.MagicMock()) ri.keepalived_manager = mock.Mock() ri.keepalived_manager.get_full_config_file_path.return_value = ( 'ha_state') self.mock_open = IOError self.assertEqual('unknown', ri.ha_state) def test_gateway_ports_equal(self): ri = self._create_router(mock.MagicMock()) ri.driver = mock.MagicMock() subnet_id, qos_policy_id = _uuid(), _uuid() _, old_gw_port = l3_test_common.prepare_ext_gw_test( self, ri, True) old_gw_port['qos_policy_id'] = qos_policy_id new_gw_port = copy.deepcopy(old_gw_port) new_gw_port.update({'binding:host_id': 'node02', 'updated_at': '2018-11-02T14:07:00', 'revision_number': 101, 'qos_policy_id': qos_policy_id}) self.assertTrue(ri._gateway_ports_equal(old_gw_port, new_gw_port)) fixed_ip = {'ip_address': '10.10.10.3', 'subnet_id': subnet_id} new_gw_port['fixed_ips'].append(fixed_ip) self.assertFalse(ri._gateway_ports_equal(old_gw_port, new_gw_port)) new_gw_port['fixed_ips'].remove(fixed_ip) new_gw_port['qos_policy_id'] = _uuid() self.assertFalse(ri._gateway_ports_equal(old_gw_port, new_gw_port)) def test_set_ha_port(self): ri = self._create_router() self.assertIsNone(ri.ha_port) ri.router = {} ri.set_ha_port() self.assertIsNone(ri.ha_port) # HA_INTERFACE_KEY from None to some value ri.router = {n_consts.HA_INTERFACE_KEY: {"id": _uuid(), "status": "DOWN"}} ri.set_ha_port() self.assertIsNotNone(ri.ha_port) self.assertEqual('DOWN', ri.ha_port["status"]) # HA port state change ri.router = {n_consts.HA_INTERFACE_KEY: {"id": _uuid(), "status": "ACTIVE"}} ri.set_ha_port() self.assertIsNotNone(ri.ha_port) self.assertEqual('ACTIVE', ri.ha_port["status"]) ri.router = {} ri.set_ha_port() # neutron server return empty HA_INTERFACE_KEY, but # agent side router info should remain the original value. self.assertIsNotNone(ri.ha_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_item_allocator.py0000644000175000017500000001321600000000000027004 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.l3 import item_allocator as ia from neutron.tests import base class TestObject(object): def __init__(self, value): super(TestObject, self).__init__() self._value = value def __str__(self): return str(self._value) class TestItemAllocator(base.BaseTestCase): def test__init__(self): test_pool = set(TestObject(s) for s in range(32768, 40000)) with mock.patch.object(ia.ItemAllocator, '_write') as write: a = ia.ItemAllocator('/file', TestObject, test_pool) test_object = a.allocate('test') self.assertIn('test', a.allocations) self.assertIn(test_object, a.allocations.values()) self.assertNotIn(test_object, a.pool) self.assertTrue(write.called) def test__init__readfile(self): test_pool = set(TestObject(s) for s in range(32768, 40000)) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["da873ca2,10\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) self.assertIn('da873ca2', a.remembered) self.assertEqual({}, a.allocations) def test__init__readfile_error(self): test_pool = set(TestObject(s) for s in range(32768, 40000)) with mock.patch.object(ia.ItemAllocator, '_read') as read,\ mock.patch.object(ia.ItemAllocator, '_write') as write: read.return_value = ["da873ca2,10\n", "corrupt_entry_no_delimiter\n", "42c9daf7,11\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) self.assertIn('da873ca2', a.remembered) self.assertIn('42c9daf7', a.remembered) self.assertNotIn('corrupt_entry_no_delimiter', a.remembered) self.assertEqual({}, a.allocations) self.assertTrue(write.called) def test_allocate_and_lookup(self): test_pool = set([TestObject(33000), TestObject(33001)]) a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: test_object = a.allocate('test') # a lookup should find the same object lookup_object = a.lookup('test') self.assertIn('test', a.allocations) self.assertIn(test_object, a.allocations.values()) self.assertNotIn(test_object, a.pool) self.assertTrue(write.called) self.assertEqual(test_object, lookup_object) def test_allocate_repeated_call_with_same_key(self): test_pool = set([TestObject(33000), TestObject(33001), TestObject(33002), TestObject(33003), TestObject(33004), TestObject(33005)]) a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write'): test_object = a.allocate('test') test_object1 = a.allocate('test') test_object2 = a.allocate('test') test_object3 = a.allocate('test1') # same value for same key on repeated calls self.assertEqual(test_object, test_object1) self.assertEqual(test_object1, test_object2) # values for different keys should be diffent self.assertNotEqual(test_object, test_object3) def test_allocate_from_file(self): test_pool = set([TestObject(33000), TestObject(33001)]) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["deadbeef,33000\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: t_obj = a.allocate('deadbeef') self.assertEqual('33000', t_obj._value) self.assertIn('deadbeef', a.allocations) self.assertIn(t_obj, a.allocations.values()) self.assertNotIn(33000, a.pool) self.assertFalse(write.called) def test_allocate_exhausted_pool(self): test_pool = set([TestObject(33000)]) with mock.patch.object(ia.ItemAllocator, '_read') as read: read.return_value = ["deadbeef,33000\n"] a = ia.ItemAllocator('/file', TestObject, test_pool) with mock.patch.object(ia.ItemAllocator, '_write') as write: allocation = a.allocate('abcdef12') self.assertNotIn('deadbeef', a.allocations) self.assertNotIn(allocation, a.pool) self.assertTrue(write.called) def test_release(self): test_pool = set([TestObject(33000), TestObject(33001)]) with mock.patch.object(ia.ItemAllocator, '_write') as write: a = ia.ItemAllocator('/file', TestObject, test_pool) allocation = a.allocate('deadbeef') write.reset_mock() a.release('deadbeef') # Just try to release the item again to see if it # throws any error a.release('deadbeef') self.assertNotIn('deadbeef', a.allocations) self.assertIn(allocation, a.pool) self.assertEqual({}, a.allocations) write.assert_called_once_with([]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_l3_agent_extension_api.py0000644000175000017500000001264000000000000030427 0ustar00coreycorey00000000000000# Copyright 2016 Comcast # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from oslo_utils import uuidutils from neutron.agent.l3 import agent from neutron.agent.l3 import l3_agent_extension_api as l3_agent_api from neutron.agent.l3 import router_info as l3router from neutron.agent.linux import ip_lib from neutron.conf.agent import common as config from neutron.conf.agent.l3 import config as l3_config from neutron.tests import base class TestL3AgentExtensionApi(base.BaseTestCase): def _prepare_router_data(self, ports=None): self.router_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.conf = config.setup_conf() l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf) ri_kwargs = {'router': {'id': self.router_id, 'project_id': self.project_id}, 'agent_conf': self.conf, 'interface_driver': mock.ANY, 'use_ipv6': mock.ANY} ri = l3router.RouterInfo(mock.Mock(), self.router_id, **ri_kwargs) ri.internal_ports = ports return {ri.router_id: ri}, ri def test_get_router_hosting_port_for_router_not_in_ns(self): port_ids = [1, 2] ports = [{'id': pid} for pid in port_ids] router_info, ri = self._prepare_router_data(ports) with mock.patch.object(ip_lib, 'list_network_namespaces') as mock_list_netns: mock_list_netns.return_value = [] api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) router = api_object.get_router_hosting_port(port_ids[0]) mock_list_netns.assert_called_once_with() self.assertFalse(router) def test_get_router_hosting_port_for_router_in_ns(self): port_ids = [1, 2] ports = [{'id': pid} for pid in port_ids] router_info, ri = self._prepare_router_data(ports) with mock.patch.object(ip_lib, 'list_network_namespaces') as mock_list_netns: mock_list_netns.return_value = [ri.ns_name] api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) router = api_object.get_router_hosting_port(port_ids[0]) self.assertEqual(ri, router) def test_get_routers_in_project(self): router_info, ri = self._prepare_router_data() with mock.patch.object(ip_lib, 'list_network_namespaces') as mock_list_netns: mock_list_netns.return_value = [ri.ns_name] api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) routers = api_object.get_routers_in_project(self.project_id) self.assertEqual([ri], routers) def test_is_router_in_namespace_for_in_ns(self): router_info, ri = self._prepare_router_data() with mock.patch.object(ip_lib, 'list_network_namespaces') as mock_list_netns: mock_list_netns.return_value = [ri.ns_name] api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) router_in_ns = api_object.is_router_in_namespace(ri.router_id) self.assertTrue(router_in_ns) def test_is_router_in_namespace_for_not_in_ns(self): router_info, ri = self._prepare_router_data() with mock.patch.object(ip_lib, 'list_network_namespaces') as mock_list_netns: mock_list_netns.return_value = [uuidutils.generate_uuid()] api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) router_in_ns = api_object.is_router_in_namespace(ri.router_id) self.assertFalse(router_in_ns) def test_get_router_info(self): router_info, ri = self._prepare_router_data() api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) self.assertEqual(ri, api_object.get_router_info(self.router_id)) def test_get_router_info_nonexistent(self): router_info, ri = self._prepare_router_data() api_object = l3_agent_api.L3AgentExtensionAPI(router_info, None) self.assertIsNone( api_object.get_router_info(uuidutils.generate_uuid())) def test_register_router(self): router_info, ri = self._prepare_router_data() router_info_cls = l3router.BaseRouterInfo router_factory = agent.RouterFactory() api_object = l3_agent_api.L3AgentExtensionAPI(router_info, router_factory) self.assertIsNone( api_object.register_router([], router_info_cls)) self.assertIsNone( api_object.register_router(['ha'], router_info_cls)) self.assertIsNone( api_object.register_router(['distributed'], router_info_cls)) self.assertIsNone( api_object.register_router(['ha', 'distributed'], router_info_cls)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_legacy_router.py0000644000175000017500000001060100000000000026645 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as lib_constants from oslo_utils import uuidutils from neutron.agent.l3 import legacy_router from neutron.agent.linux import ip_lib from neutron.tests import base _uuid = uuidutils.generate_uuid class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.driver = mock.Mock() self.router_id = _uuid() return legacy_router.LegacyRouter(mock.Mock(), self.router_id, router, self.agent_conf, self.driver, **kwargs) class TestBasicRouterOperations(BasicRouterTestCaseFramework): def test_remove_floating_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '15.1.2.3/32' ri.remove_floating_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) def test_remove_external_gateway_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '172.16.0.0/24' ri.remove_external_gateway_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) @mock.patch.object(ip_lib, 'IPDevice') def test_remove_multiple_external_gateway_ips(self, IPDevice): ri = self._create_router(mock.MagicMock()) IPDevice.return_value = device = mock.Mock() gw_ip_pri = '172.16.5.110' gw_ip_sec = '172.16.5.111' gw_ip6_pri = '2001:db8::1' gw_ip6_sec = '2001:db8::2' v4_prefixlen = 24 v6_prefixlen = 64 ex_gw_port = {'fixed_ips': [ {'ip_address': gw_ip_pri, 'prefixlen': v4_prefixlen}, {'ip_address': gw_ip_sec}, {'ip_address': gw_ip6_pri, 'prefixlen': v6_prefixlen}, {'ip_address': gw_ip6_sec}]} ri.external_gateway_removed(ex_gw_port, "qg-fake-name") cidr_pri = '%s/%s' % (gw_ip_pri, v4_prefixlen) cidr_sec = '%s/%s' % (gw_ip_sec, lib_constants.IPv4_BITS) cidr_v6 = '%s/%s' % (gw_ip6_pri, v6_prefixlen) cidr_v6_sec = '%s/%s' % (gw_ip6_sec, lib_constants.IPv6_BITS) device.delete_addr_and_conntrack_state.assert_has_calls( [mock.call(cidr_pri), mock.call(cidr_sec), mock.call(cidr_v6), mock.call(cidr_v6_sec)]) @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework): def test_add_floating_ip(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=True) ip = '15.1.2.3' result = ri.add_floating_ip({'floating_ip_address': ip}, mock.sentinel.interface_name, mock.sentinel.device) ip_lib.send_ip_addr_adv_notif.assert_called_once_with( ri.ns_name, mock.sentinel.interface_name, ip) self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result) def test_add_floating_ip_error(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=False) result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'}, mock.sentinel.interface_name, mock.sentinel.device) self.assertFalse(ip_lib.send_ip_addr_adv_notif.called) self.assertEqual(lib_constants.FLOATINGIP_STATUS_ERROR, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_link_local_allocator.py0000644000175000017500000000213000000000000030146 0ustar00coreycorey00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron.agent.l3 import link_local_allocator as lla from neutron.tests import base class TestLinkLocalAddrAllocator(base.BaseTestCase): def setUp(self): super(TestLinkLocalAddrAllocator, self).setUp() self.subnet = netaddr.IPNetwork('169.254.31.0/24') def test__init__(self): a = lla.LinkLocalAllocator('/file', self.subnet.cidr) self.assertEqual('/file', a.state_file) self.assertEqual({}, a.allocations) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_namespace_manager.py0000644000175000017500000001122100000000000027426 0ustar00coreycorey00000000000000# Copyright (c) 2015 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import uuidutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.tests import base _uuid = uuidutils.generate_uuid class NamespaceManagerTestCaseFramework(base.BaseTestCase): def _create_namespace_manager(self): self.agent_conf = mock.Mock() self.driver = mock.Mock() return namespace_manager.NamespaceManager(self.agent_conf, self.driver) class TestNamespaceManager(NamespaceManagerTestCaseFramework): def setUp(self): super(TestNamespaceManager, self).setUp() self.ns_manager = self._create_namespace_manager() def test_get_prefix_and_id(self): router_id = _uuid() ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( namespaces.NS_PREFIX + router_id) self.assertEqual(namespaces.NS_PREFIX, ns_prefix) self.assertEqual(router_id, ns_id) ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( dvr_snat_ns.SNAT_NS_PREFIX + router_id) self.assertEqual(dvr_snat_ns.SNAT_NS_PREFIX, ns_prefix) self.assertEqual(router_id, ns_id) ns_name = 'dhcp-' + router_id self.assertIsNone(self.ns_manager.get_prefix_and_id(ns_name)) def test_is_managed(self): router_id = _uuid() router_ns_name = namespaces.NS_PREFIX + router_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) router_ns_name = dvr_snat_ns.SNAT_NS_PREFIX + router_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) ext_net_id = _uuid() router_ns_name = dvr_fip_ns.FIP_NS_PREFIX + ext_net_id self.assertTrue(self.ns_manager.is_managed(router_ns_name)) self.assertFalse(self.ns_manager.is_managed('dhcp-' + router_id)) def test_list_all(self): ns_names = [namespaces.NS_PREFIX + _uuid(), dvr_snat_ns.SNAT_NS_PREFIX + _uuid(), dvr_fip_ns.FIP_NS_PREFIX + _uuid(), 'dhcp-' + _uuid(), ] # Test the normal path with mock.patch.object(ip_lib, 'list_network_namespaces', return_value=ns_names): retrieved_ns_names = self.ns_manager.list_all() self.assertEqual(len(ns_names) - 1, len(retrieved_ns_names)) for i in range(len(retrieved_ns_names)): self.assertIn(ns_names[i], retrieved_ns_names) self.assertNotIn(ns_names[-1], retrieved_ns_names) # Test path where list_network_namespaces() raises exception with mock.patch.object(ip_lib, 'list_network_namespaces', side_effect=RuntimeError): retrieved_ns_names = self.ns_manager.list_all() self.assertFalse(retrieved_ns_names) def test_ensure_snat_cleanup(self): router_id = _uuid() with mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: self.ns_manager.ensure_snat_cleanup(router_id) mock_cleanup.assert_called_once_with(dvr_snat_ns.SNAT_NS_PREFIX, router_id) def test_ensure_router_cleanup(self): router_id = _uuid() ns_names = [namespaces.NS_PREFIX + _uuid() for _ in range(5)] ns_names += [dvr_snat_ns.SNAT_NS_PREFIX + _uuid() for _ in range(5)] ns_names += [namespaces.NS_PREFIX + router_id, dvr_snat_ns.SNAT_NS_PREFIX + router_id] with mock.patch.object(ip_lib, 'list_network_namespaces', return_value=ns_names), \ mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: self.ns_manager.ensure_router_cleanup(router_id) expected = [mock.call(namespaces.NS_PREFIX, router_id), mock.call(dvr_snat_ns.SNAT_NS_PREFIX, router_id)] mock_cleanup.assert_has_calls(expected, any_order=True) self.assertEqual(2, mock_cleanup.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/l3/test_router_info.py0000644000175000017500000005341400000000000026345 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as lib_constants from neutron_lib.exceptions import l3 as l3_exc from oslo_utils import uuidutils from neutron.agent.l3 import router_info from neutron.agent.linux import ip_lib from neutron.conf.agent import common as config from neutron.conf.agent.l3 import config as l3_config from neutron.tests import base _uuid = uuidutils.generate_uuid class TestRouterInfo(base.BaseTestCase): def setUp(self): super(TestRouterInfo, self).setUp() conf = config.setup_conf() l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf) self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip self.ri_kwargs = {'agent_conf': conf, 'interface_driver': mock.sentinel.interface_driver} def _check_agent_method_called(self, calls): self.mock_ip.netns.execute.assert_has_calls( [mock.call(call, check_exit_code=False) for call in calls], any_order=True) def test_routing_table_update(self): ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) ri.router = {} fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'} fake_route2 = {'destination': '135.207.111.111/32', 'nexthop': '1.2.3.4'} ri.update_routing_table('replace', fake_route1) expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('delete', fake_route1) expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('replace', fake_route2) expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) ri.update_routing_table('delete', fake_route2) expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(expected) def test_update_routing_table(self): # Just verify the correct namespace was used in the call uuid = _uuid() netns = 'qrouter-' + uuid fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'} ri = router_info.RouterInfo(mock.Mock(), uuid, {'id': uuid}, **self.ri_kwargs) ri._update_routing_table = mock.Mock() ri.update_routing_table('replace', fake_route1) ri._update_routing_table.assert_called_once_with('replace', fake_route1, netns) def test_routes_updated(self): ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) ri.router = {} fake_old_routes = [] fake_new_routes = [{'destination': "110.100.31.0/24", 'nexthop': "10.100.10.30"}, {'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.routes = fake_old_routes ri.router['routes'] = fake_new_routes ri.routes_updated(fake_old_routes, fake_new_routes) expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24', 'via', '10.100.10.30'], ['ip', 'route', 'replace', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) ri.routes = fake_new_routes fake_new_routes = [{'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.router['routes'] = fake_new_routes ri.routes_updated(ri.routes, fake_new_routes) expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) fake_new_routes = [] ri.router['routes'] = fake_new_routes ri.routes_updated(ri.routes, fake_new_routes) expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(expected) def test__process_pd_iptables_rules(self): subnet_id = _uuid() ex_gw_port = {'id': _uuid()} prefix = '2001:db8:cafe::/64' ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) ipv6_mangle = ri.iptables_manager.ipv6['mangle'] = mock.MagicMock() ri.get_ex_gw_port = mock.Mock(return_value=ex_gw_port) ri.get_external_device_name = mock.Mock(return_value='fake_device') ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark') ri._process_pd_iptables_rules(prefix, subnet_id) mangle_rule = '-d %s ' % prefix mangle_rule += ri.address_scope_mangle_rule('fake_device', 'fake_mark') ipv6_mangle.add_rule.assert_called_once_with( 'scope', mangle_rule, tag='prefix_delegation_%s' % subnet_id) def test_add_ports_address_scope_iptables(self): ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) port = { 'id': _uuid(), 'fixed_ips': [{'ip_address': '172.9.9.9'}], 'address_scopes': {lib_constants.IP_VERSION_4: '1234'} } ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark') ri.get_internal_device_name = mock.Mock(return_value='fake_device') ri.rt_tables_manager = mock.MagicMock() ri.process_external_port_address_scope_routing = mock.Mock() ri.process_floating_ip_address_scope_rules = mock.Mock() ri.iptables_manager._apply = mock.Mock() ri.router[lib_constants.INTERFACE_KEY] = [port] ri.process_address_scope() ipv4_mangle.add_rule.assert_called_once_with( 'scope', ri.address_scope_mangle_rule('fake_device', 'fake_mark')) def test_address_scope_mark_ids_handling(self): mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN, router_info.ADDRESS_SCOPE_MARK_ID_MAX)) ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) # first mark id is used for the default address scope scope_to_mark_id = {router_info.DEFAULT_ADDRESS_SCOPE: mark_ids.pop()} self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id) self.assertEqual(mark_ids, ri.available_mark_ids) # new id should be used for new address scope ri.get_address_scope_mark_mask('new_scope') scope_to_mark_id['new_scope'] = mark_ids.pop() self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id) self.assertEqual(mark_ids, ri.available_mark_ids) # new router should have it's own mark ids set new_mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN, router_info.ADDRESS_SCOPE_MARK_ID_MAX)) new_ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) new_mark_ids.pop() self.assertEqual(new_mark_ids, new_ri.available_mark_ids) self.assertNotEqual(ri.available_mark_ids, new_ri.available_mark_ids) def test_process_delete(self): ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) ri.router = {'id': _uuid()} with mock.patch.object(ri, '_process_internal_ports') as p_i_p,\ mock.patch.object(ri, '_process_external_on_delete') as p_e_o_d: self.mock_ip.netns.exists.return_value = False ri.process_delete() self.assertFalse(p_i_p.called) self.assertFalse(p_e_o_d.called) p_i_p.reset_mock() p_e_o_d.reset_mock() self.mock_ip.netns.exists.return_value = True ri.process_delete() p_i_p.assert_called_once_with() p_e_o_d.assert_called_once_with() def test__update_internal_ports_cache(self): ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs) ri.internal_ports = [ {'id': 'port-id-1', 'mtu': 1500}, {'id': 'port-id-2', 'mtu': 2000}] initial_internal_ports = ri.internal_ports[:] # Test add new element to the cache new_port = {'id': 'new-port-id', 'mtu': 1500} ri._update_internal_ports_cache(new_port) self.assertEqual( initial_internal_ports + [new_port], ri.internal_ports) # Test update existing port in cache updated_port = new_port.copy() updated_port['mtu'] = 2500 ri._update_internal_ports_cache(updated_port) self.assertEqual( initial_internal_ports + [updated_port], ri.internal_ports) class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.router_id = _uuid() return router_info.RouterInfo(mock.Mock(), self.router_id, router, self.agent_conf, mock.sentinel.interface_driver, **kwargs) class TestBasicRouterOperations(BasicRouterTestCaseFramework): def test_get_floating_ips(self): router = mock.MagicMock() router.get.return_value = [mock.sentinel.floating_ip] ri = self._create_router(router) fips = ri.get_floating_ips() self.assertEqual([mock.sentinel.floating_ip], fips) def test_process_floating_ip_nat_rules(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip}] ri.get_floating_ips = mock.Mock(return_value=fips) ri.iptables_manager = mock.MagicMock() ipv4_nat = ri.iptables_manager.ipv4['nat'] ri.floating_forward_rules = mock.Mock( return_value=[(mock.sentinel.chain, mock.sentinel.rule)]) ri.process_floating_ip_nat_rules() # Be sure that the rules are cleared first and apply is called last self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_nat.mock_calls[0]) self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1]) # Be sure that add_rule is called somewhere in the middle ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain, mock.sentinel.rule, tag='floating_ip') def test_process_floating_ip_nat_rules_removed(self): ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[]) ri.iptables_manager = mock.MagicMock() ipv4_nat = ri.iptables_manager.ipv4['nat'] ri.process_floating_ip_nat_rules() # Be sure that the rules are cleared first and apply is called last self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_nat.mock_calls[0]) self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1]) # Be sure that add_rule is called somewhere in the middle self.assertFalse(ipv4_nat.add_rule.called) def test_process_floating_ip_address_scope_rules_diff_scopes(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip, 'fixed_ip_address_scope': 'scope1'}] ri.get_floating_ips = mock.Mock(return_value=fips) ri._get_external_address_scope = mock.Mock(return_value='scope2') ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.floating_mangle_rules = mock.Mock( return_value=[(mock.sentinel.chain1, mock.sentinel.rule1)]) ri.get_external_device_name = mock.Mock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is called somewhere in the middle self.assertEqual(1, ipv4_mangle.add_rule.call_count) self.assertEqual(mock.call.add_rule(mock.sentinel.chain1, mock.sentinel.rule1, tag='floating_ip'), ipv4_mangle.mock_calls[1]) def test_process_floating_ip_address_scope_rules_same_scopes(self): ri = self._create_router() fips = [{'fixed_ip_address': mock.sentinel.ip, 'floating_ip_address': mock.sentinel.fip, 'fixed_ip_address_scope': 'scope1'}] ri.get_floating_ips = mock.Mock(return_value=fips) ri._get_external_address_scope = mock.Mock(return_value='scope1') ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is not called somewhere in the middle self.assertFalse(ipv4_mangle.add_rule.called) def test_process_floating_ip_mangle_rules_removed(self): ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[]) ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.process_floating_ip_address_scope_rules() # Be sure that the rules are cleared first self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'), ipv4_mangle.mock_calls[0]) # Be sure that add_rule is not called somewhere in the middle self.assertFalse(ipv4_mangle.add_rule.called) def _test_add_fip_addr_to_device_error(self, device): ri = self._create_router() ip = '15.1.2.3' result = ri._add_fip_addr_to_device( {'id': mock.sentinel.id, 'floating_ip_address': ip}, device) device.addr.add.assert_called_with(ip + '/32') return result def test__add_fip_addr_to_device(self): result = self._test_add_fip_addr_to_device_error(mock.Mock()) self.assertTrue(result) def test__add_fip_addr_to_device_error(self): device = mock.Mock() device.addr.add.side_effect = RuntimeError result = self._test_add_fip_addr_to_device_error(device) self.assertFalse(result) def test_process_snat_dnat_for_fip(self): ri = self._create_router() ri.process_floating_ip_nat_rules = mock.Mock(side_effect=Exception) self.assertRaises(l3_exc.FloatingIpSetupException, ri.process_snat_dnat_for_fip) ri.process_floating_ip_nat_rules.assert_called_once_with() def test_put_fips_in_error_state(self): ri = self._create_router() ri.router = mock.Mock() ri.router.get.return_value = [{'id': mock.sentinel.id1}, {'id': mock.sentinel.id2}] statuses = ri.put_fips_in_error_state() expected = [{mock.sentinel.id1: lib_constants.FLOATINGIP_STATUS_ERROR, mock.sentinel.id2: lib_constants.FLOATINGIP_STATUS_ERROR}] self.assertNotEqual(expected, statuses) def test_configure_fip_addresses(self): ri = self._create_router() ri.process_floating_ip_addresses = mock.Mock( side_effect=Exception) self.assertRaises(l3_exc.FloatingIpSetupException, ri.configure_fip_addresses, mock.sentinel.interface_name) ri.process_floating_ip_addresses.assert_called_once_with( mock.sentinel.interface_name) def test_get_router_cidrs_returns_cidrs(self): ri = self._create_router() addresses = ['15.1.2.2/24', '15.1.2.3/32'] device = mock.MagicMock() device.addr.list.return_value = [{'cidr': addresses[0]}, {'cidr': addresses[1]}] self.assertEqual(set(addresses), ri.get_router_cidrs(device)) @mock.patch.object(ip_lib, 'IPDevice') class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework): def test_process_floating_ip_addresses_remap(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2', 'status': lib_constants.FLOATINGIP_STATUS_DOWN } IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[fip]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) self.assertFalse(device.addr.add.called) self.assertFalse(device.addr.delete.called) def test_process_router_with_disabled_floating_ip(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2' } ri = self._create_router() ri.floating_ips = [fip] ri.get_floating_ips = mock.Mock(return_value=[]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertIsNone(fip_statuses.get(fip_id)) def test_process_router_floating_ip_with_device_add_error(self, IPDevice): IPDevice.return_value = device = mock.Mock(side_effect=RuntimeError) device.addr.list.return_value = [] fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2', 'status': 'DOWN' } ri = self._create_router() ri.add_floating_ip = mock.Mock( return_value=lib_constants.FLOATINGIP_STATUS_ERROR) ri.get_floating_ips = mock.Mock(return_value=[fip]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}, fip_statuses) # TODO(mrsmith): refactor for DVR cases def test_process_floating_ip_addresses_remove(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = self._create_router() ri.remove_floating_ip = mock.Mock() ri.router.get = mock.Mock(return_value=[]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) self.assertEqual({}, fip_statuses) ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32') def test_process_floating_ip_reassignment(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.3', 'status': 'DOWN' } ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[fip]) ri.move_floating_ip = mock.Mock() ri.fip_map = {'15.1.2.3': '192.168.0.2'} ri.process_floating_ip_addresses(mock.sentinel.interface_name) ri.move_floating_ip.assert_called_once_with(fip) def test_process_floating_ip_addresses_gw_secondary_ip_not_removed( self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '1.1.1.1/16'}, {'cidr': '2.2.2.2/32'}, {'cidr': '3.3.3.3/32'}, {'cidr': '4.4.4.4/32'}] ri = self._create_router() ri.get_floating_ips = mock.Mock(return_value=[ {'id': _uuid(), 'floating_ip_address': '3.3.3.3', 'status': 'DOWN'}]) ri.add_floating_ip = mock.Mock() ri.get_ex_gw_port = mock.Mock(return_value={ "fixed_ips": [{"ip_address": "1.1.1.1"}, {"ip_address": "2.2.2.2"}]}) ri.remove_floating_ip = mock.Mock() ri.process_floating_ip_addresses("qg-fake-device") ri.remove_floating_ip.assert_called_once_with(device, '4.4.4.4/32') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4270456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/0000755000175000017500000000000000000000000023213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/__init__.py0000644000175000017500000000000000000000000025312 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/failing_process.py0000644000175000017500000000137500000000000026742 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys def main(): filename = sys.argv[1] if not os.path.exists(filename): sys.exit(1) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4270456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/openvswitch_firewall/0000755000175000017500000000000000000000000027451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/openvswitch_firewall/__init__.py0000644000175000017500000000000000000000000031550 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py0000644000175000017500000012241600000000000032675 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events as callbacks_events from neutron_lib.callbacks import registry as callbacks_registry from neutron_lib.callbacks import resources as callbacks_resources from neutron_lib import constants from neutron_lib.utils import helpers from oslo_config import cfg import testtools from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import exceptions from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw from neutron.conf.agent import securitygroups_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge from neutron.tests import base TESTING_VLAN_TAG = 1 TESTING_SEGMENT = 1000 def create_ofport(port_dict, network_type=None, physical_network=None): ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00', ofport=1, port_name="port-name") return ovsfw.OFPort(port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG, segment_id=TESTING_SEGMENT, network_type=network_type, physical_network=physical_network) class TestCreateRegNumbers(base.BaseTestCase): def test_no_registers_defined(self): flow = {'foo': 'bar'} ovsfw.create_reg_numbers(flow) self.assertEqual({'foo': 'bar'}, flow) def test_all_registers_defined(self): flow = {'foo': 'bar', 'reg_port': 1, 'reg_net': 2, 'reg_remote_group': 3} expected_flow = {'foo': 'bar', 'reg{:d}'.format(ovsfw_consts.REG_PORT): 1, 'reg{:d}'.format(ovsfw_consts.REG_NET): 2, 'reg{:d}'.format(ovsfw_consts.REG_REMOTE_GROUP): 3} ovsfw.create_reg_numbers(flow) self.assertEqual(expected_flow, flow) class TestSecurityGroup(base.BaseTestCase): def setUp(self): super(TestSecurityGroup, self).setUp() self.sg = ovsfw.SecurityGroup('123') self.sg.members = {'type': [1, 2, 3, 4]} def test_update_rules_split(self): rules = [ {'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}, {'remote_group_id': '123456', 'foo': 'bar'}] expected_raw_rules = [{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}] expected_remote_rules = [{'remote_group_id': '123456', 'foo': 'bar'}] self.sg.update_rules(rules) self.assertEqual(expected_raw_rules, self.sg.raw_rules) self.assertEqual(expected_remote_rules, self.sg.remote_rules) def test_update_rules_protocols(self): rules = [ {'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP, 'ethertype': constants.IPv4}, {'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP, 'ethertype': constants.IPv6}, {'foo': 'bar', 'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY, 'ethertype': constants.IPv6}, {'foo': 'bar', 'protocol': constants.PROTO_NAME_TCP}, {'foo': 'bar', 'protocol': '94'}, {'foo': 'bar', 'protocol': 'baz'}, {'foo': 'no_proto'}] self.sg.update_rules(rules) self.assertEqual({'foo': 'no_proto'}, self.sg.raw_rules.pop()) protos = [rule['protocol'] for rule in self.sg.raw_rules] self.assertEqual([constants.PROTO_NUM_ICMP, constants.PROTO_NUM_IPV6_ICMP, constants.PROTO_NUM_IPV6_ICMP, constants.PROTO_NUM_TCP, 94, 'baz'], protos) def test_get_ethertype_filtered_addresses(self): addresses = self.sg.get_ethertype_filtered_addresses('type') expected_addresses = [1, 2, 3, 4] self.assertEqual(expected_addresses, addresses) class TestOFPort(base.BaseTestCase): def setUp(self): super(TestOFPort, self).setUp() self.ipv4_addresses = ['10.0.0.1', '192.168.0.1'] self.ipv6_addresses = ['fe80::f816:3eff:fe2e:1'] port_dict = {'device': 1, 'fixed_ips': self.ipv4_addresses + self.ipv6_addresses} self.port = create_ofport(port_dict) def test_ipv4_address(self): ipv4_addresses = self.port.ipv4_addresses self.assertEqual(self.ipv4_addresses, ipv4_addresses) def test_ipv6_address(self): ipv6_addresses = self.port.ipv6_addresses self.assertEqual(self.ipv6_addresses, ipv6_addresses) def test__get_allowed_pairs(self): port = { 'allowed_address_pairs': [ {'mac_address': 'foo', 'ip_address': '10.0.0.1'}, {'mac_address': 'bar', 'ip_address': '192.168.0.1'}, {'mac_address': 'qux', 'ip_address': '169.254.0.0/16'}, {'mac_address': 'baz', 'ip_address': '2003::f'}, ]} allowed_pairs_v4 = ovsfw.OFPort._get_allowed_pairs(port, version=4) allowed_pairs_v6 = ovsfw.OFPort._get_allowed_pairs(port, version=6) expected_aap_v4 = {('foo', '10.0.0.1'), ('bar', '192.168.0.1'), ('qux', '169.254.0.0/16')} expected_aap_v6 = {('baz', '2003::f')} self.assertEqual(expected_aap_v4, allowed_pairs_v4) self.assertEqual(expected_aap_v6, allowed_pairs_v6) def test__get_allowed_pairs_empty(self): port = {} allowed_pairs = ovsfw.OFPort._get_allowed_pairs(port, version=4) self.assertFalse(allowed_pairs) def test_update(self): old_port_dict = self.port.neutron_port_dict new_port_dict = old_port_dict.copy() added_ips = [1, 2, 3] new_port_dict.update({ 'fixed_ips': added_ips, 'allowed_address_pairs': [ {'mac_address': '00:00:00:00:00:01', 'ip_address': '192.168.0.1'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '2003::f'}], }) self.port.update(new_port_dict) self.assertEqual(new_port_dict, self.port.neutron_port_dict) self.assertIsNot(new_port_dict, self.port.neutron_port_dict) self.assertEqual(added_ips, self.port.fixed_ips) self.assertEqual({('00:00:00:00:00:01', '192.168.0.1')}, self.port.allowed_pairs_v4) self.assertIn(('00:00:00:00:00:01', '2003::f'), self.port.allowed_pairs_v6) class TestSGPortMap(base.BaseTestCase): def setUp(self): super(TestSGPortMap, self).setUp() self.map = ovsfw.SGPortMap() def test_get_or_create_sg_existing_sg(self): self.map.sec_groups['id'] = mock.sentinel sg = self.map.get_or_create_sg('id') self.assertIs(mock.sentinel, sg) def test_get_or_create_sg_nonexisting_sg(self): with mock.patch.object(ovsfw, 'SecurityGroup') as sg_mock: sg = self.map.get_or_create_sg('id') self.assertEqual(sg_mock.return_value, sg) def _check_port(self, port_id, expected_sg_ids): port = self.map.ports[port_id] expected_sgs = [self.map.sec_groups[sg_id] for sg_id in expected_sg_ids] self.assertEqual(port.sec_groups, expected_sgs) def _check_sg(self, sg_id, expected_port_ids): sg = self.map.sec_groups[sg_id] expected_ports = {self.map.ports[port_id] for port_id in expected_port_ids} self.assertEqual(sg.ports, expected_ports) def _create_ports_and_sgroups(self): sg_1 = ovsfw.SecurityGroup(1) sg_2 = ovsfw.SecurityGroup(2) sg_3 = ovsfw.SecurityGroup(3) port_a = create_ofport({'device': 'a'}) port_b = create_ofport({'device': 'b'}) self.map.ports = {'a': port_a, 'b': port_b} self.map.sec_groups = {1: sg_1, 2: sg_2, 3: sg_3} port_a.sec_groups = [sg_1, sg_2] port_b.sec_groups = [sg_2, sg_3] sg_1.ports = {port_a} sg_2.ports = {port_a, port_b} sg_3.ports = {port_b} def test_create_port(self): port = create_ofport({'device': 'a'}) sec_groups = ['1', '2'] port_dict = {'security_groups': sec_groups} self.map.create_port(port, port_dict) self._check_port('a', sec_groups) self._check_sg('1', ['a']) self._check_sg('2', ['a']) def test_update_port_sg_added(self): self._create_ports_and_sgroups() port_dict = {'security_groups': [1, 2, 3]} self.map.update_port(self.map.ports['b'], port_dict) self._check_port('a', [1, 2]) self._check_port('b', [1, 2, 3]) self._check_sg(1, ['a', 'b']) self._check_sg(2, ['a', 'b']) self._check_sg(3, ['b']) def test_update_port_sg_removed(self): self._create_ports_and_sgroups() port_dict = {'security_groups': [1]} self.map.update_port(self.map.ports['b'], port_dict) self._check_port('a', [1, 2]) self._check_port('b', [1]) self._check_sg(1, ['a', 'b']) self._check_sg(2, ['a']) self._check_sg(3, []) def test_remove_port(self): self._create_ports_and_sgroups() self.map.remove_port(self.map.ports['a']) self._check_port('b', [2, 3]) self._check_sg(1, []) self._check_sg(2, ['b']) self._check_sg(3, ['b']) self.assertNotIn('a', self.map.ports) def test_update_rules(self): """Just make sure it doesn't crash""" self.map.update_rules(1, []) def test_update_members(self): """Just make sure we doesn't crash""" self.map.update_members(1, []) class TestConjIdMap(base.BaseTestCase): def setUp(self): super(TestConjIdMap, self).setUp() self.conj_id_map = ovsfw.ConjIdMap() def test_get_conj_id(self): allocated = [] for direction in [constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION]: id_ = self.conj_id_map.get_conj_id( 'sg', 'remote', direction, constants.IPv4) allocated.append(id_) self.assertEqual(len(set(allocated)), 2) self.assertEqual(len(self.conj_id_map.id_map), 2) self.assertEqual(self.conj_id_map.get_conj_id( 'sg', 'remote', constants.EGRESS_DIRECTION, constants.IPv4), allocated[0]) def test_get_conj_id_invalid(self): self.assertRaises(ValueError, self.conj_id_map.get_conj_id, 'sg', 'remote', 'invalid-direction', constants.IPv6) def test_delete_sg(self): test_data = [('sg1', 'sg1'), ('sg1', 'sg2')] ids = [] for sg_id, remote_sg_id in test_data: ids.append(self.conj_id_map.get_conj_id( sg_id, remote_sg_id, constants.INGRESS_DIRECTION, constants.IPv6)) result = self.conj_id_map.delete_sg('sg1') self.assertIn(('sg1', ids[0]), result) self.assertIn(('sg2', ids[1]), result) self.assertFalse(self.conj_id_map.id_map) reallocated = self.conj_id_map.get_conj_id( 'sg-foo', 'sg-foo', constants.INGRESS_DIRECTION, constants.IPv6) self.assertIn(reallocated, ids) class TestConjIPFlowManager(base.BaseTestCase): def setUp(self): super(TestConjIPFlowManager, self).setUp() self.driver = mock.Mock() self.manager = ovsfw.ConjIPFlowManager(self.driver) self.vlan_tag = 100 self.conj_id = 16 def test_update_flows_for_vlan_no_members(self): remote_group = self.driver.sg_port_map.get_sg.return_value remote_group.members = {} with mock.patch.object(self.manager.conj_id_map, 'get_conj_id') as get_conj_id_mock: get_conj_id_mock.return_value = self.conj_id self.manager.add(self.vlan_tag, 'sg', 'remote_id', constants.INGRESS_DIRECTION, constants.IPv4, 0) self.manager.update_flows_for_vlan(self.vlan_tag) self.assertFalse(remote_group.get_ethertype_filtered_addresses.called) self.assertFalse(self.driver._add_flow.called) def test_update_flows_for_vlan_no_ports_but_members(self): remote_group = self.driver.sg_port_map.get_sg.return_value remote_group.ports = set() remote_group.members = {constants.IPv4: ['10.22.3.4']} remote_group.get_ethertype_filtered_addresses.return_value = [ '10.22.3.4'] with mock.patch.object(self.manager.conj_id_map, 'get_conj_id') as get_conj_id_mock: get_conj_id_mock.return_value = self.conj_id self.manager.add(self.vlan_tag, 'sg', 'remote_id', constants.INGRESS_DIRECTION, constants.IPv4, 0) self.manager.update_flows_for_vlan(self.vlan_tag) self.assertTrue(remote_group.get_ethertype_filtered_addresses.called) self.assertTrue(self.driver._add_flow.called) def test_update_flows_for_vlan(self): remote_group = self.driver.sg_port_map.get_sg.return_value remote_group.get_ethertype_filtered_addresses.return_value = [ '10.22.3.4'] with mock.patch.object(self.manager.conj_id_map, 'get_conj_id') as get_conj_id_mock: get_conj_id_mock.return_value = self.conj_id self.manager.add(self.vlan_tag, 'sg', 'remote_id', constants.INGRESS_DIRECTION, constants.IPv4, 0) self.manager.add(self.vlan_tag, 'sg', 'remote_id', constants.INGRESS_DIRECTION, constants.IPv4, 3) self.manager.update_flows_for_vlan(self.vlan_tag) self.assertEqual(self.driver._add_flow.call_args_list, [mock.call(actions='conjunction(16,1/2)', ct_state='+est-rel-rpl', dl_type=2048, nw_src='10.22.3.4/32', priority=70, reg_net=self.vlan_tag, table=82), mock.call(actions='conjunction(17,1/2)', ct_state='+new-est', dl_type=2048, nw_src='10.22.3.4/32', priority=70, reg_net=self.vlan_tag, table=82), mock.call(actions='conjunction(22,1/2)', ct_state='+est-rel-rpl', dl_type=2048, nw_src='10.22.3.4/32', priority=73, reg_net=self.vlan_tag, table=82), mock.call(actions='conjunction(23,1/2)', ct_state='+new-est', dl_type=2048, nw_src='10.22.3.4/32', priority=73, reg_net=self.vlan_tag, table=82)]) def test_sg_removed(self): with mock.patch.object(self.manager.conj_id_map, 'get_conj_id') as get_id_mock, \ mock.patch.object(self.manager.conj_id_map, 'delete_sg') as delete_sg_mock: get_id_mock.return_value = self.conj_id delete_sg_mock.return_value = [('remote_id', self.conj_id)] self.manager.add(self.vlan_tag, 'sg', 'remote_id', constants.INGRESS_DIRECTION, constants.IPv4, 0) self.manager.flow_state[self.vlan_tag][( constants.INGRESS_DIRECTION, constants.IPv4)] = { '10.22.3.4': [self.conj_id]} self.manager.sg_removed('sg') self.driver._add_flow.assert_not_called() self.driver.delete_flows_for_ip_addresses.assert_called_once_with( {'10.22.3.4'}, constants.INGRESS_DIRECTION, constants.IPv4, self.vlan_tag) class FakeOVSPort(object): def __init__(self, name, port, mac): self.port_name = name self.ofport = port self.vif_mac = mac class TestOVSFirewallDriver(base.BaseTestCase): def setUp(self): super(TestOVSFirewallDriver, self).setUp() mock_bridge = mock.patch.object( ovs_lib, 'OVSBridge', autospec=True).start() securitygroups_rpc.register_securitygroups_opts() self.firewall = ovsfw.OVSFirewallDriver(mock_bridge) self.mock_bridge = self.firewall.int_br self.mock_bridge.reset_mock() self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00') self.mock_bridge.br.get_vif_port_by_id.return_value = \ self.fake_ovs_port cfg.CONF.set_override('explicitly_egress_direct', True, 'AGENT') def _prepare_security_group(self): security_group_rules = [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_TCP, 'direction': constants.INGRESS_DIRECTION, 'port_range_min': 123, 'port_range_max': 123}] self.firewall.update_security_group_rules(1, security_group_rules) security_group_rules = [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_UDP, 'direction': constants.EGRESS_DIRECTION}, {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NAME_TCP, 'remote_group_id': 2, 'direction': constants.EGRESS_DIRECTION}] self.firewall.update_security_group_rules(2, security_group_rules) @property def port_ofport(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport @property def port_mac(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac def test_callbacks_registered(self): with mock.patch.object(callbacks_registry, "subscribe") as subscribe: firewall = ovsfw.OVSFirewallDriver(mock.MagicMock()) subscribe.assert_called_once_with( firewall._init_firewall_callback, callbacks_resources.AGENT, callbacks_events.OVS_RESTARTED) def test_initialize_bridge(self): br = self.firewall.initialize_bridge(self.mock_bridge) self.assertEqual(br, self.mock_bridge.deferred.return_value) def test__add_flow_dl_type_formatted_to_string(self): dl_type = 0x0800 self.firewall._add_flow(dl_type=dl_type) def test__add_flow_registers_are_replaced(self): self.firewall._add_flow(in_port=1, reg_port=1, reg_net=2) expected_calls = {'in_port': 1, 'reg{:d}'.format(ovsfw_consts.REG_PORT): 1, 'reg{:d}'.format(ovsfw_consts.REG_NET): 2} self.mock_bridge.br.add_flow.assert_called_once_with( **expected_calls) def test__drop_all_unmatched_flows(self): self.firewall._drop_all_unmatched_flows() expected_calls = [ mock.call(actions='drop', priority=0, table=ovs_consts.BASE_EGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.RULES_EGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.ACCEPT_OR_INGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.BASE_INGRESS_TABLE), mock.call(actions='drop', priority=0, table=ovs_consts.RULES_INGRESS_TABLE)] actual_calls = self.firewall.int_br.br.add_flow.call_args_list self.assertEqual(expected_calls, actual_calls) def test_get_or_create_ofport_non_existing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} port = self.firewall.get_or_create_ofport(port_dict) sg1, sg2 = sorted( self.firewall.sg_port_map.sec_groups.values(), key=lambda x: x.id) self.assertIn(port, self.firewall.sg_port_map.ports.values()) self.assertEqual( sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2]) self.assertIn(port, sg1.ports) self.assertIn(port, sg2.ports) def test_get_or_create_ofport_existing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} of_port = create_ofport(port_dict) self.firewall.sg_port_map.ports[of_port.id] = of_port port = self.firewall.get_or_create_ofport(port_dict) sg1, sg2 = sorted( self.firewall.sg_port_map.sec_groups.values(), key=lambda x: x.id) self.assertIs(of_port, port) self.assertIn(port, self.firewall.sg_port_map.ports.values()) self.assertEqual( sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2]) self.assertIn(port, sg1.ports) self.assertIn(port, sg2.ports) def test_get_or_create_ofport_changed(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} of_port = create_ofport(port_dict) self.firewall.sg_port_map.ports[of_port.id] = of_port fake_ovs_port = FakeOVSPort('port', 2, '00:00:00:00:00:00') self.mock_bridge.br.get_vif_port_by_id.return_value = \ fake_ovs_port port = self.firewall.get_or_create_ofport(port_dict) self.assertIn(of_port.id, self.firewall.sg_port_map.ports.keys()) self.assertEqual(port.ofport, 2) def test_get_or_create_ofport_missing(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} self.mock_bridge.br.get_vif_port_by_id.return_value = None with testtools.ExpectedException(exceptions.OVSFWPortNotFound): self.firewall.get_or_create_ofport(port_dict) def test_get_or_create_ofport_missing_nocreate(self): port_dict = { 'device': 'port-id', 'security_groups': [123, 456]} self.mock_bridge.br.get_vif_port_by_id.return_value = None self.assertIsNone(self.firewall.get_ofport(port_dict)) self.assertFalse(self.mock_bridge.br.get_vif_port_by_id.called) def test_is_port_managed_managed_port(self): port_dict = {'device': 'port-id'} self.firewall.sg_port_map.ports[port_dict['device']] = object() is_managed = self.firewall.is_port_managed(port_dict) self.assertTrue(is_managed) def test_is_port_managed_not_managed_port(self): port_dict = {'device': 'port-id'} is_managed = self.firewall.is_port_managed(port_dict) self.assertFalse(is_managed) def test_prepare_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1], 'fixed_ips': ["10.0.0.1"]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) exp_egress_classifier = mock.call( actions='set_field:{:d}->reg5,set_field:{:d}->reg6,' 'resubmit(,{:d})'.format( self.port_ofport, TESTING_VLAN_TAG, ovs_consts.BASE_EGRESS_TABLE), in_port=self.port_ofport, priority=100, table=ovs_consts.TRANSIENT_TABLE) exp_ingress_classifier = mock.call( actions='set_field:{:d}->reg5,set_field:{:d}->reg6,' 'strip_vlan,resubmit(,{:d})'.format( self.port_ofport, TESTING_VLAN_TAG, ovs_consts.BASE_INGRESS_TABLE), dl_dst=self.port_mac, dl_vlan='0x%x' % TESTING_VLAN_TAG, priority=90, table=ovs_consts.TRANSIENT_TABLE) filter_rule = mock.call( actions='ct(commit,zone=NXM_NX_REG6[0..15]),' 'output:{:d},resubmit(,{:d})'.format( self.port_ofport, ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE), dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_TCP, priority=77, reg5=self.port_ofport, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, table=ovs_consts.RULES_INGRESS_TABLE, tcp_dst='0x007b') calls = self.mock_bridge.br.add_flow.call_args_list for call in exp_ingress_classifier, exp_egress_classifier, filter_rule: self.assertIn(call, calls) def test_prepare_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1], 'port_security_enabled': False} self._prepare_security_group() with mock.patch.object( self.firewall, 'initialize_port_flows') as m_init_flows: self.firewall.prepare_port_filter(port_dict) self.assertFalse(m_init_flows.called) def test_initialize_port_flows_vlan_dvr_conntrack_direct(self): port_dict = { 'device': 'port-id', 'security_groups': [1]} of_port = create_ofport(port_dict, network_type=constants.TYPE_VXLAN) self.firewall.sg_port_map.ports[of_port.id] = of_port port = self.firewall.get_or_create_ofport(port_dict) fake_patch_port = 999 self.mock_bridge.br.get_port_ofport.return_value = fake_patch_port self.firewall.initialize_port_flows(port) call_args1 = { 'table': ovs_consts.TRANSIENT_TABLE, 'priority': 100, 'in_port': port.ofport, 'actions': 'set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_EGRESS_TABLE)} egress_flow_call = mock.call(**call_args1) call_args2 = { 'table': ovs_consts.TRANSIENT_TABLE, 'priority': 90, 'dl_dst': port.mac, 'dl_vlan': '0x%x' % port.segment_id, 'actions': 'set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'strip_vlan,resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_INGRESS_TABLE)} ingress_flow_call1 = mock.call(**call_args2) call_args3 = { 'table': ovs_consts.TRANSIENT_TABLE, 'priority': 90, 'dl_dst': port.mac, 'dl_vlan': '0x%x' % port.vlan_tag, 'actions': 'set_field:{:d}->reg{:d},' 'set_field:{:d}->reg{:d},' 'strip_vlan,resubmit(,{:d})'.format( port.ofport, ovsfw_consts.REG_PORT, port.vlan_tag, ovsfw_consts.REG_NET, ovs_consts.BASE_INGRESS_TABLE)} ingress_flow_call2 = mock.call(**call_args3) self.mock_bridge.br.add_flow.assert_has_calls( [egress_flow_call, ingress_flow_call1, ingress_flow_call2]) def test_initialize_port_flows_vlan_dvr_conntrack_direct_vlan(self): port_dict = { 'device': 'port-id', 'security_groups': [1]} of_port = create_ofport(port_dict, network_type=constants.TYPE_VLAN, physical_network='vlan1') self.firewall.sg_port_map.ports[of_port.id] = of_port port = self.firewall.get_or_create_ofport(port_dict) fake_patch_port = 999 self.mock_bridge.br.get_port_ofport.return_value = fake_patch_port with mock.patch.object(helpers, "parse_mappings", return_value={"vlan1": "br-vlan1"}): self.firewall.initialize_port_flows(port) def test_delete_all_port_flows(self): port_dict = { 'device': 'port-id', 'security_groups': [1]} of_port = create_ofport(port_dict, network_type=constants.TYPE_VXLAN) self.firewall.sg_port_map.ports[of_port.id] = of_port port = self.firewall.get_or_create_ofport(port_dict) self.firewall.delete_all_port_flows(port) call_args1 = {"strict": True, "priority": 90, "table": ovs_consts.TRANSIENT_TABLE, "dl_dst": port.mac, "dl_vlan": port.vlan_tag} flow1 = mock.call(**call_args1) call_args2 = {"strict": True, "priority": 90, "table": ovs_consts.TRANSIENT_TABLE, "dl_dst": port.mac, "dl_vlan": port.segment_id} flow2 = mock.call(**call_args2) call_args3 = {"table": ovs_consts.ACCEPT_OR_INGRESS_TABLE, "dl_dst": port.mac, "reg6": port.vlan_tag} flow3 = mock.call(**call_args3) call_args4 = {"in_port": port.ofport, "strict": True, "priority": 100, "table": ovs_consts.TRANSIENT_TABLE} flow4 = mock.call(**call_args4) call_args5 = {"reg5": port.ofport} flow5 = mock.call(**call_args5) call_args6 = {"table": ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, "dl_dst": port.mac, "reg6": port.vlan_tag} flow6 = mock.call(**call_args6) call_args7 = {"table": ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, "dl_src": port.mac, "reg6": port.vlan_tag} flow7 = mock.call(**call_args7) self.mock_bridge.br.delete_flows.assert_has_calls( [flow1, flow2, flow3, flow6, flow7, flow4, flow5]) def test_prepare_port_filter_initialized_port(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.assertFalse(self.mock_bridge.br.delete_flows.called) self.firewall.prepare_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_update_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) port_dict['security_groups'] = [2] self.mock_bridge.reset_mock() self.firewall.update_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) conj_id = self.firewall.conj_ip_manager.conj_id_map.get_conj_id( 2, 2, constants.EGRESS_DIRECTION, constants.IPv6) filter_rules = [mock.call( actions='resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_UDP, priority=77, ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, reg5=self.port_ofport, table=ovs_consts.RULES_EGRESS_TABLE), mock.call( actions='conjunction({:d},2/2)'.format(conj_id + 6), ct_state=ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY, dl_type=mock.ANY, nw_proto=6, priority=73, reg5=self.port_ofport, table=ovs_consts.RULES_EGRESS_TABLE)] self.mock_bridge.br.add_flow.assert_has_calls( filter_rules, any_order=True) def test_update_port_filter_create_new_port_if_not_present(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() with mock.patch.object( self.firewall, 'prepare_port_filter' ) as prepare_mock, mock.patch.object( self.firewall, 'initialize_port_flows' ) as initialize_port_flows_mock, mock.patch.object( self.firewall, 'add_flows_from_rules' ) as add_flows_from_rules_mock: self.firewall.update_port_filter(port_dict) self.assertFalse(prepare_mock.called) self.assertFalse(self.mock_bridge.br.delete_flows.called) self.assertTrue(initialize_port_flows_mock.called) self.assertTrue(add_flows_from_rules_mock.called) def test_update_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) port_dict['port_security_enabled'] = False self.firewall.update_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_update_port_filter_applies_added_flows(self): """Check flows are applied right after _set_flows is called.""" port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) with self.firewall.defer_apply(): self.firewall.update_port_filter(port_dict) self.assertEqual(2, self.mock_bridge.apply_flows.call_count) def test_update_port_filter_clean_when_port_not_found(self): """Check flows are cleaned if port is not found in the bridge.""" port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.mock_bridge.br.get_vif_port_by_id.return_value = None self.firewall.update_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) def test_remove_port_filter(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self._prepare_security_group() self.firewall.prepare_port_filter(port_dict) self.firewall.remove_port_filter(port_dict) self.assertTrue(self.mock_bridge.br.delete_flows.called) self.assertIn(1, self.firewall.sg_to_delete) def test_remove_port_filter_port_security_disabled(self): port_dict = {'device': 'port-id', 'security_groups': [1]} self.firewall.remove_port_filter(port_dict) self.assertFalse(self.mock_bridge.br.delete_flows.called) def test_update_security_group_rules(self): """Just make sure it doesn't crash""" new_rules = [ {'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'protocol': constants.PROTO_NAME_ICMP}, {'ethertype': constants.IPv4, 'direction': constants.EGRESS_DIRECTION, 'remote_group_id': 2}] self.firewall.update_security_group_rules(1, new_rules) def test_update_security_group_members(self): """Just make sure it doesn't crash""" new_members = {constants.IPv4: [1, 2, 3, 4]} self.firewall.update_security_group_members(2, new_members) def test__cleanup_stale_sg(self): self._prepare_security_group() self.firewall.sg_to_delete = {1} with mock.patch.object(self.firewall.conj_ip_manager, 'sg_removed') as sg_removed_mock,\ mock.patch.object(self.firewall.sg_port_map, 'delete_sg') as delete_sg_mock: self.firewall._cleanup_stale_sg() sg_removed_mock.assert_called_once_with(1) delete_sg_mock.assert_called_once_with(1) def test_get_ovs_port(self): ovs_port = self.firewall.get_ovs_port('port_id') self.assertEqual(self.fake_ovs_port, ovs_port) def test_get_ovs_port_non_existent(self): self.mock_bridge.br.get_vif_port_by_id.return_value = None with testtools.ExpectedException(exceptions.OVSFWPortNotFound): self.firewall.get_ovs_port('port_id') def test__initialize_egress_no_port_security_sends_to_egress(self): self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG} self.firewall._initialize_egress_no_port_security('port_id') expected_call = mock.call( table=ovs_consts.TRANSIENT_TABLE, priority=100, in_port=self.fake_ovs_port.ofport, actions='set_field:%d->reg%d,' 'set_field:%d->reg%d,' 'resubmit(,%d)' % ( self.fake_ovs_port.ofport, ovsfw_consts.REG_PORT, TESTING_VLAN_TAG, ovsfw_consts.REG_NET, ovs_consts.ACCEPT_OR_INGRESS_TABLE) ) calls = self.mock_bridge.br.add_flow.call_args_list self.assertIn(expected_call, calls) def test__initialize_egress_no_port_security_no_tag(self): self.mock_bridge.br.db_get_val.return_value = {} self.firewall._initialize_egress_no_port_security('port_id') self.assertFalse(self.mock_bridge.br.add_flow.called) def test__remove_egress_no_port_security_deletes_flow(self): self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG} self.firewall.sg_port_map.unfiltered['port_id'] = ( self.fake_ovs_port, 100) self.firewall._remove_egress_no_port_security('port_id') expected_call = mock.call( table=ovs_consts.TRANSIENT_TABLE, in_port=self.fake_ovs_port.ofport, ) calls = self.mock_bridge.br.delete_flows.call_args_list self.assertIn(expected_call, calls) def test__remove_egress_no_port_security_non_existing_port(self): with testtools.ExpectedException(exceptions.OVSFWPortNotHandled): self.firewall._remove_egress_no_port_security('foo') def test_process_trusted_ports_caches_port_id(self): vif_port = ovs_lib.VifPort('name', 1, 'id', 'mac', mock.ANY) with mock.patch.object(self.firewall.int_br.br, 'get_vifs_by_ids', return_value={'port_id': vif_port}): self.firewall.process_trusted_ports(['port_id']) self.assertEqual(1, len(self.firewall.sg_port_map.unfiltered.keys())) ofport, _ = self.firewall.sg_port_map.unfiltered['port_id'] self.assertEqual(vif_port.ofport, ofport.ofport) def test_process_trusted_ports_port_not_found(self): """Check that exception is not propagated outside.""" with mock.patch.object(self.firewall.int_br.br, 'get_vifs_by_ids', return_value={}): self.firewall.process_trusted_ports(['port_id']) # Processing should have failed so port is not cached self.assertEqual(0, len(self.firewall.sg_port_map.unfiltered)) def test_remove_trusted_ports_clears_cached_port_id(self): self.firewall.sg_port_map.unfiltered['port_id'] = ( self.fake_ovs_port, 100) self.firewall.remove_trusted_ports(['port_id']) self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered) def test_remove_trusted_ports_not_managed_port(self): """Check that exception is not propagated outside.""" self.firewall.remove_trusted_ports(['port_id']) class TestCookieContext(base.BaseTestCase): def setUp(self): super(TestCookieContext, self).setUp() # Don't attempt to connect to ovsdb mock.patch('neutron.agent.ovsdb.impl_idl.api_factory').start() # Don't trigger iptables -> ovsfw migration mock.patch( 'neutron.agent.linux.openvswitch_firewall.iptables.Helper').start() self.execute = mock.patch.object( utils, "execute", spec=utils.execute).start() bridge = ovs_bridge.OVSAgentBridge('foo', os_ken_app=mock.Mock()) mock.patch.object( ovsfw.OVSFirewallDriver, 'initialize_bridge', return_value=bridge.deferred( full_ordered=True, use_bundle=True)).start() securitygroups_rpc.register_securitygroups_opts() self.firewall = ovsfw.OVSFirewallDriver(bridge) # Remove calls from firewall initialization self.execute.reset_mock() def test_cookie_is_different_in_context(self): default_cookie = self.firewall.int_br.br.default_cookie with self.firewall.update_cookie_context(): self.firewall._add_flow(actions='drop') update_cookie = self.firewall._update_cookie self.firewall._add_flow(actions='drop') expected_calls = [ mock.call( mock.ANY, process_input='hard_timeout=0,idle_timeout=0,priority=1,' 'cookie=%d,actions=drop' % cookie, run_as_root=mock.ANY, ) for cookie in (update_cookie, default_cookie) ] self.execute.assert_has_calls(expected_calls) def test_context_cookie_is_not_left_as_used(self): with self.firewall.update_cookie_context(): update_cookie = self.firewall._update_cookie self.assertNotIn( update_cookie, self.firewall.int_br.br._reserved_cookies) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/openvswitch_firewall/test_iptables.py0000644000175000017500000001207700000000000032674 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import iptables_firewall from neutron.agent.linux.openvswitch_firewall import iptables from neutron.tests import base class TestHelper(base.BaseTestCase): def setUp(self): super(TestHelper, self).setUp() self.helper = iptables.Helper(mock.Mock()) mock.patch.object(iptables_firewall, 'cfg').start() mock.patch('neutron.agent.linux.ip_conntrack.get_conntrack').start() def test_get_hybrid_ports(self): present_ports = ['tap1234', 'qvo-1234', 'tap9876', 'qvo-fghfhfh'] self.helper.int_br.get_port_name_list.return_value = present_ports expected_hybrid_ports = ['qvo-1234', 'qvo-fghfhfh'] observed = self.helper.get_hybrid_ports() self.assertItemsEqual(expected_hybrid_ports, observed) def test_has_not_been_cleaned_no_value(self): other_config = {'foo': 'bar'} self.helper.int_br.db_get_val.return_value = other_config self.assertTrue(self.helper.has_not_been_cleaned) def test_has_not_been_cleaned_true(self): other_config = {'foo': 'bar', iptables.Helper.CLEANED_METADATA: 'true'} self.helper.int_br.db_get_val.return_value = other_config self.assertFalse(self.helper.has_not_been_cleaned) def test_has_not_been_cleaned_false(self): other_config = {'foo': 'bar', iptables.Helper.CLEANED_METADATA: 'false'} self.helper.int_br.db_get_val.return_value = other_config self.assertTrue(self.helper.has_not_been_cleaned) def test_load_driver_if_needed_no_hybrid_ports(self): self.helper.int_br.get_port_name_list.return_value = [ 'tap1234', 'tap9876'] self.helper.load_driver_if_needed() self.assertIsNone(self.helper.iptables_driver) def test_load_driver_if_needed_hybrid_ports_cleaned(self): """If was cleaned, driver shouldn't be loaded.""" self.helper.int_br.get_port_name_list.return_value = [ 'tap1234', 'qvo-1234', 'tap9876', 'qvo-fghfhfh'] self.helper.int_br.db_get_val.return_value = { 'foo': 'bar', iptables.Helper.CLEANED_METADATA: 'true'} self.helper.load_driver_if_needed() self.assertIsNone(self.helper.iptables_driver) def test_load_driver_if_needed_hybrid_ports_not_cleaned(self): """If hasn't been cleaned, driver should be loaded.""" self.helper.int_br.get_port_name_list.return_value = [ 'tap1234', 'qvo-1234', 'tap9876', 'qvo-fghfhfh'] self.helper.int_br.db_get_val.return_value = {'foo': 'bar'} self.helper.load_driver_if_needed() self.assertIsNotNone(self.helper.iptables_driver) def test_get_iptables_driver_instance_has_correct_instance(self): instance = iptables.get_iptables_driver_instance() self.assertIsInstance( instance, iptables_firewall.OVSHybridIptablesFirewallDriver) def test_cleanup_port_last_port_marks_cleaned(self): self.helper.iptables_driver = mock.Mock() self.helper.hybrid_ports = {'qvoport'} with mock.patch.object(self.helper, 'mark_as_cleaned') as mock_mark: self.helper.cleanup_port({'device': 'port'}) self.assertIsNone(self.helper.iptables_driver) self.assertTrue(mock_mark.called) def test_cleanup_port_existing_ports(self): self.helper.iptables_driver = mock.Mock() self.helper.hybrid_ports = {'qvoport', 'qvoanother'} with mock.patch.object(self.helper, 'mark_as_cleaned') as mock_mark: self.helper.cleanup_port({'device': 'port'}) self.assertIsNotNone(self.helper.iptables_driver) self.assertFalse(mock_mark.called) def test_cleanup_port_unknown(self): self.helper.iptables_driver = mock.Mock() self.helper.hybrid_ports = {'qvoanother'} self.helper.cleanup_port({'device': 'port'}) self.assertFalse(self.helper.iptables_driver.remove_port_filter.called) class TestHybridIptablesHelper(base.BaseTestCase): def test_overloaded_remove_conntrack(self): with mock.patch.object(iptables_firewall.IptablesFirewallDriver, '_remove_conntrack_entries_from_port_deleted') as rcefpd, \ mock.patch("neutron.agent.linux.ip_conntrack.IpConntrackManager" "._populate_initial_zone_map"): firewall = iptables.get_iptables_driver_instance() firewall._remove_conntrack_entries_from_port_deleted(None) rcefpd.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py0000644000175000017500000005163100000000000032222 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw from neutron.agent.linux.openvswitch_firewall import rules from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.tests import base TESTING_VLAN_TAG = 1 class TestIsValidPrefix(base.BaseTestCase): def test_valid_prefix_ipv4(self): is_valid = rules.is_valid_prefix('10.0.0.0/0') self.assertTrue(is_valid) def test_invalid_prefix_ipv4(self): is_valid = rules.is_valid_prefix('0.0.0.0/0') self.assertFalse(is_valid) def test_valid_prefix_ipv6(self): is_valid = rules.is_valid_prefix('ffff::0/0') self.assertTrue(is_valid) def test_invalid_prefix_ipv6(self): is_valid = rules.is_valid_prefix('0000:0::0/0') self.assertFalse(is_valid) is_valid = rules.is_valid_prefix('::0/0') self.assertFalse(is_valid) is_valid = rules.is_valid_prefix('::/0') self.assertFalse(is_valid) class TestCreateFlowsFromRuleAndPort(base.BaseTestCase): def setUp(self): super(TestCreateFlowsFromRuleAndPort, self).setUp() ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00') ovs_port.ofport = 1 port_dict = {'device': 'port_id'} self.port = ovsfw.OFPort( port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) self.create_flows_mock = mock.patch.object( rules, 'create_protocol_flows').start() @property def passed_flow_template(self): return self.create_flows_mock.call_args[0][1] def _test_create_flows_from_rule_and_port_helper( self, rule, expected_template): rules.create_flows_from_rule_and_port(rule, self.port) self.assertEqual(expected_template, self.passed_flow_template) def test_create_flows_from_rule_and_port_no_ip_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'source_ip_prefix': '192.168.0.0/24', 'dest_ip_prefix': '10.0.0.1/32', } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, 'nw_src': '192.168.0.0/24', 'nw_dst': '10.0.0.1/32', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv4(self): rule = { 'ethertype': constants.IPv4, 'direction': constants.INGRESS_DIRECTION, 'source_ip_prefix': '192.168.0.0/24', 'dest_ip_prefix': '0.0.0.0/0', } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IP, 'reg_port': self.port.ofport, 'nw_src': '192.168.0.0/24', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_no_ip_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'source_ip_prefix': '2001:db8:bbbb::1/64', 'dest_ip_prefix': '2001:db8:aaaa::1/64', } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, 'ipv6_src': '2001:db8:bbbb::1/64', 'ipv6_dst': '2001:db8:aaaa::1/64', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv6(self): rule = { 'ethertype': constants.IPv6, 'direction': constants.INGRESS_DIRECTION, 'source_ip_prefix': '2001:db8:bbbb::1/64', 'dest_ip_prefix': '::/0', } expected_template = { 'priority': 74, 'dl_type': constants.ETHERTYPE_IPV6, 'reg_port': self.port.ofport, 'ipv6_src': '2001:db8:bbbb::1/64', } self._test_create_flows_from_rule_and_port_helper(rule, expected_template) class TestCreateProtocolFlows(base.BaseTestCase): def setUp(self): super(TestCreateProtocolFlows, self).setUp() ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00') ovs_port.ofport = 1 port_dict = {'device': 'port_id'} self.port = ovsfw.OFPort( port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) def _test_create_protocol_flows_helper(self, direction, rule, expected_flows): flow_template = {'some_settings': 'foo'} for flow in expected_flows: flow.update(flow_template) flows = rules.create_protocol_flows( direction, flow_template, self.port, rule) self.assertEqual(expected_flows, flows) def test_create_protocol_flows_ingress(self): rule = {'protocol': constants.PROTO_NUM_TCP} expected_flows = [{ 'table': ovs_consts.RULES_INGRESS_TABLE, 'actions': 'output:1', 'nw_proto': constants.PROTO_NUM_TCP, }] self._test_create_protocol_flows_helper( constants.INGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_egress(self): rule = {'protocol': constants.PROTO_NUM_TCP} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_TCP, }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_no_protocol(self): rule = {} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_icmp6(self): rule = {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NUM_IPV6_ICMP} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_IPV6_ICMP, }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_port_range(self): rule = {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NUM_TCP, 'port_range_min': 22, 'port_range_max': 23} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_TCP, 'tcp_dst': '0x0016/0xfffe' }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_icmp(self): rule = {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NUM_ICMP, 'port_range_min': 0} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_ICMP, 'icmp_type': 0 }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) def test_create_protocol_flows_ipv6_icmp(self): rule = {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NUM_IPV6_ICMP, 'port_range_min': 5, 'port_range_max': 0} expected_flows = [{ 'table': ovs_consts.RULES_EGRESS_TABLE, 'actions': 'resubmit(,{:d})'.format( ovs_consts.ACCEPT_OR_INGRESS_TABLE), 'nw_proto': constants.PROTO_NUM_IPV6_ICMP, 'icmp_type': 5, 'icmp_code': 0, }] self._test_create_protocol_flows_helper( constants.EGRESS_DIRECTION, rule, expected_flows) class TestCreatePortRangeFlows(base.BaseTestCase): def _test_create_port_range_flows_helper(self, expected_flows, rule): flow_template = {'some_settings': 'foo'} for flow in expected_flows: flow.update(flow_template) port_range_flows = rules.create_port_range_flows(flow_template, rule) self.assertEqual(expected_flows, port_range_flows) def test_create_port_range_flows_with_source_and_destination(self): rule = { 'protocol': constants.PROTO_NUM_TCP, 'source_port_range_min': 123, 'source_port_range_max': 124, 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [ {'tcp_src': '0x007b', 'tcp_dst': '0x000a/0xfffe'}, {'tcp_src': '0x007c', 'tcp_dst': '0x000a/0xfffe'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_with_source(self): rule = { 'protocol': constants.PROTO_NUM_TCP, 'source_port_range_min': 123, 'source_port_range_max': 124, } expected_flows = [ {'tcp_src': '0x007b'}, {'tcp_src': '0x007c'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_with_destination(self): rule = { 'protocol': constants.PROTO_NUM_TCP, 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [ {'tcp_dst': '0x000a/0xfffe'}, ] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_flows_without_port_range(self): rule = { 'protocol': constants.PROTO_NUM_TCP, } expected_flows = [] self._test_create_port_range_flows_helper(expected_flows, rule) def test_create_port_range_with_icmp_protocol(self): # NOTE: such call is prevented by create_protocols_flows rule = { 'protocol': constants.PROTO_NUM_ICMP, 'port_range_min': 10, 'port_range_max': 11, } expected_flows = [] self._test_create_port_range_flows_helper(expected_flows, rule) class TestCreateFlowsForIpAddress(base.BaseTestCase): def _generate_conjuncion_actions(self, conj_ids, offset): return ','.join( ["conjunction(%d,1/2)" % (c + offset) for c in conj_ids]) def test_create_flows_for_ip_address_egress(self): expected_template = { 'table': ovs_consts.RULES_EGRESS_TABLE, 'priority': 72, 'dl_type': constants.ETHERTYPE_IP, 'reg_net': 0x123, 'nw_dst': '192.168.0.1/32' } conj_ids = [12, 20] flows = rules.create_flows_for_ip_address( '192.168.0.1', constants.EGRESS_DIRECTION, constants.IPv4, 0x123, conj_ids) self.assertEqual(2, len(flows)) self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY, flows[0]['ct_state']) self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, flows[1]['ct_state']) for i in range(2): self.assertEqual(self._generate_conjuncion_actions(conj_ids, i), flows[i]['actions']) for f in flows: del f['actions'] del f['ct_state'] self.assertEqual(expected_template, f) class TestCreateConjFlows(base.BaseTestCase): def test_create_conj_flows(self): ovs_port = mock.Mock(ofport=1, vif_mac='00:00:00:00:00:00') port_dict = {'device': 'port_id'} port = ovsfw.OFPort( port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG) conj_id = 1234 expected_template = { 'table': ovs_consts.RULES_INGRESS_TABLE, 'dl_type': constants.ETHERTYPE_IPV6, 'priority': 71, 'conj_id': conj_id, 'reg_port': port.ofport } flows = rules.create_conj_flows(port, conj_id, constants.INGRESS_DIRECTION, constants.IPv6) self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY, flows[0]['ct_state']) self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED, flows[1]['ct_state']) self.assertEqual("output:{:d}".format(port.ofport), flows[0]['actions']) self.assertEqual("ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s}," "resubmit(,{:d})".format( ovsfw_consts.REG_NET, flows[0]['actions'], ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE), flows[1]['actions']) for f in flows: del f['actions'] del f['ct_state'] self.assertEqual(expected_template, f) expected_template['conj_id'] += 1 class TestMergeRules(base.BaseTestCase): def setUp(self): super(TestMergeRules, self).setUp() self.rule_tmpl = [('direction', 'ingress'), ('ethertype', 'IPv4'), ('protocol', 6)] def _test_merge_port_ranges_helper(self, expected, result): """Take a list of (port_range_min, port_range_max, conj_ids) and an output from rules.merge_port_ranges and check if they are identical, ignoring the other rule fields. """ self.assertEqual(len(expected), len(result)) for (range_min, range_max, conj_ids), result1 in zip( expected, result): self.assertEqual(range_min, result1[0].get('port_range_min')) self.assertEqual(range_max, result1[0].get('port_range_max')) self.assertEqual(conj_ids, set(result1[1])) def test__assert_mergeable_rules(self): self.assertRaises(RuntimeError, rules._assert_mergeable_rules, [({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1}, 8), ({'direction': 'ingress', 'ethertype': 'IPv6'}, 16)]) def test_merge_common_rules_single(self): rule_conj_tuple = ({'direction': 'egress', 'ethertype': 'IPv4', 'protocol': 1}, 8) result = rules.merge_common_rules([rule_conj_tuple]) self.assertEqual([(rule_conj_tuple[0], [rule_conj_tuple[1]])], result) def test_merge_common_rules(self): rule_conj_list = [({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1}, 8), ({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1, 'port_range_min': 3}, 16), ({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1, 'port_range_min': 3, 'port_range_max': 0}, 40), ({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1}, 24)] result = rules.merge_common_rules(rule_conj_list) self.assertItemsEqual( [({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1}, [8, 24]), ({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1, 'port_range_min': 3}, [16]), ({'direction': 'ingress', 'ethertype': 'IPv4', 'protocol': 1, 'port_range_min': 3, 'port_range_max': 0}, [40])], result) def test_merge_port_ranges_overlapping(self): result = rules.merge_port_ranges( [(dict([('port_range_min', 20), ('port_range_max', 30)] + self.rule_tmpl), 6), (dict([('port_range_min', 30), ('port_range_max', 40)] + self.rule_tmpl), 14), (dict([('port_range_min', 35), ('port_range_max', 40)] + self.rule_tmpl), 22), (dict([('port_range_min', 20), ('port_range_max', 20)] + self.rule_tmpl), 30)]) self._test_merge_port_ranges_helper([ # port_range_min, port_range_max, conj_ids (20, 20, {6, 30}), (21, 29, {6}), (30, 30, {6, 14}), (31, 34, {14}), (35, 40, {14, 22})], result) def test_merge_port_ranges_no_port_ranges(self): result = rules.merge_port_ranges( [(dict(self.rule_tmpl), 10), (dict(self.rule_tmpl), 12), (dict([('port_range_min', 30), ('port_range_max', 40)] + self.rule_tmpl), 4)]) self._test_merge_port_ranges_helper([ (1, 29, {10, 12}), (30, 40, {10, 12, 4}), (41, 65535, {10, 12})], result) def test_merge_port_ranges_no_port_ranges_same_conj_id(self): result = rules.merge_port_ranges( [(dict(self.rule_tmpl), 10), (dict(self.rule_tmpl), 12), (dict([('port_range_min', 30), ('port_range_max', 30)] + self.rule_tmpl), 10)]) self._test_merge_port_ranges_helper([ (None, None, {10, 12})], result) def test_merge_port_ranges_nonoverlapping(self): result = rules.merge_port_ranges( [(dict([('port_range_min', 30), ('port_range_max', 40)] + self.rule_tmpl), 32), (dict([('port_range_min', 100), ('port_range_max', 140)] + self.rule_tmpl), 40)]) self._test_merge_port_ranges_helper( [(30, 40, {32}), (100, 140, {40})], result) class TestFlowPriority(base.BaseTestCase): def test_flow_priority_offset(self): self.assertEqual(0, rules.flow_priority_offset( {'foo': 'bar', 'remote_group_id': 'hoge'})) self.assertEqual(4, rules.flow_priority_offset({'foo': 'bar'})) self.assertEqual(5, rules.flow_priority_offset( {'protocol': constants.PROTO_NUM_ICMP})) self.assertEqual(7, rules.flow_priority_offset( {'protocol': constants.PROTO_NUM_TCP})) self.assertEqual(6, rules.flow_priority_offset( {'protocol': constants.PROTO_NUM_ICMP, 'port_range_min': 0})) self.assertEqual(7, rules.flow_priority_offset( {'protocol': constants.PROTO_NUM_IPV6_ICMP, 'port_range_min': 0, 'port_range_max': 0})) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_bridge_lib.py0000644000175000017500000001164400000000000026714 0ustar00coreycorey00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import netutils from neutron.agent.linux import bridge_lib from neutron.privileged.agent.linux import ip_lib as priv_lib from neutron.tests import base class BridgeLibTest(base.BaseTestCase): """A test suite to exercise the bridge libraries """ _NAMESPACE = 'test-namespace' _BR_NAME = 'test-br' _IF_NAME = 'test-if' def setUp(self): super(BridgeLibTest, self).setUp() mock.patch.object(netutils, 'is_ipv6_enabled', return_value=True).start() ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start() self.execute = ip_wrapper.return_value.netns.execute self.create_p = mock.patch.object(priv_lib, 'create_interface') self.create = self.create_p.start() self.delete_p = mock.patch.object(priv_lib, 'delete_interface') self.delete = self.delete_p.start() def _verify_bridge_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True) self.execute.reset_mock() def _verify_bridge_sysctl_mock(self, cmd): self.execute.assert_called_once_with(cmd, run_as_root=True, log_fail_as_error=True) self.execute.reset_mock() def test_is_bridged_interface(self): exists = lambda path: path == "/sys/class/net/tapOK/brport" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(bridge_lib.is_bridged_interface("tapOK")) self.assertFalse(bridge_lib.is_bridged_interface("tapKO")) def test_get_interface_bridge(self): with mock.patch('os.readlink', side_effect=["prefix/br0", OSError()]): br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsInstance(br, bridge_lib.BridgeDevice) self.assertEqual("br0", br.name) br = bridge_lib.BridgeDevice.get_interface_bridge('tap0') self.assertIsNone(br) def _test_br(self, namespace=None): br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace) self.assertEqual(namespace, br.namespace) self.create.assert_called_once_with(self._BR_NAME, br.namespace, 'bridge') br.setfd(0) self._verify_bridge_mock(['ip', 'link', 'set', 'dev', self._BR_NAME, 'type', 'bridge', 'forward_delay', '0']) br.disable_stp() self._verify_bridge_mock(['ip', 'link', 'set', 'dev', self._BR_NAME, 'type', 'bridge', 'stp_state', 0]) br.disable_ipv6() cmd = 'net.ipv6.conf.%s.disable_ipv6=1' % self._BR_NAME self._verify_bridge_sysctl_mock(['sysctl', '-w', cmd]) br.addif(self._IF_NAME) self._verify_bridge_mock( ['ip', 'link', 'set', 'dev', self._IF_NAME, 'master', self._BR_NAME]) br.delif(self._IF_NAME) self._verify_bridge_mock( ['ip', 'link', 'set', 'dev', self._IF_NAME, 'nomaster']) br.delbr() self.delete.assert_called_once_with(self._BR_NAME, br.namespace) def test_addbr_with_namespace(self): self._test_br(self._NAMESPACE) def test_addbr_without_namespace(self): self._test_br() def test_addbr_exists(self): self.create.side_effect = RuntimeError() with mock.patch.object(bridge_lib.BridgeDevice, 'exists', return_value=True): bridge_lib.BridgeDevice.addbr(self._BR_NAME) bridge_lib.BridgeDevice.addbr(self._BR_NAME) def test_owns_interface(self): br = bridge_lib.BridgeDevice('br-int') exists = lambda path: path == "/sys/class/net/br-int/brif/abc" with mock.patch('os.path.exists', side_effect=exists): self.assertTrue(br.owns_interface("abc")) self.assertFalse(br.owns_interface("def")) def test_get_interfaces(self): br = bridge_lib.BridgeDevice('br-int') interfaces = ["tap1", "tap2"] with mock.patch('os.listdir', side_effect=[interfaces, OSError()]): self.assertEqual(interfaces, br.get_interfaces()) self.assertEqual([], br.get_interfaces()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_daemon.py0000644000175000017500000002745000000000000026077 0ustar00coreycorey00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from logging import handlers import os import sys import mock from neutron_lib import exceptions from neutron_lib import fixture as lib_fixtures import testtools from neutron.agent.linux import daemon from neutron.tests import base FAKE_FD = 8 class FakeEntry(object): def __init__(self, name, value): setattr(self, name, value) class TestUnwatchLog(base.BaseTestCase): def setUp(self): super(TestUnwatchLog, self).setUp() self.temp_file = self.get_temp_file_path('unwatch_log_temp_file') def test_unwatch_log(self): stream_handler = logging.StreamHandler() logger = logging.Logger('fake') logger.addHandler(stream_handler) logger.addHandler(handlers.WatchedFileHandler(self.temp_file)) with mock.patch('logging.getLogger', return_value=logger): daemon.unwatch_log() self.assertEqual(2, len(logger.handlers)) logger.handlers.remove(stream_handler) observed = logger.handlers[0] self.assertEqual(logging.FileHandler, type(observed)) self.assertEqual(self.temp_file, observed.baseFilename) class TestPrivileges(base.BaseTestCase): def test_setuid_with_name(self): with mock.patch('pwd.getpwnam', return_value=FakeEntry('pw_uid', 123)): with mock.patch('os.setuid') as setuid_mock: daemon.setuid('user') setuid_mock.assert_called_once_with(123) def test_setuid_with_id(self): with mock.patch('os.setuid') as setuid_mock: daemon.setuid('321') setuid_mock.assert_called_once_with(321) def test_setuid_fails(self): with mock.patch('os.setuid', side_effect=OSError()): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.setuid, '321') log_critical.assert_called_once_with(mock.ANY) def test_setgid_with_name(self): with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)): with mock.patch('os.setgid') as setgid_mock: daemon.setgid('group') setgid_mock.assert_called_once_with(123) def test_setgid_with_id(self): with mock.patch('os.setgid') as setgid_mock: daemon.setgid('321') setgid_mock.assert_called_once_with(321) def test_setgid_fails(self): with mock.patch('os.setgid', side_effect=OSError()): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.setgid, '321') log_critical.assert_called_once_with(mock.ANY) @mock.patch.object(os, 'setgroups') @mock.patch.object(daemon, 'setgid') @mock.patch.object(daemon, 'setuid') def test_drop_no_privileges(self, mock_setuid, mock_setgid, mock_setgroups): daemon.drop_privileges() for cursor in (mock_setuid, mock_setgid, mock_setgroups): self.assertFalse(cursor.called) @mock.patch.object(os, 'geteuid', return_value=0) @mock.patch.object(os, 'setgroups') @mock.patch.object(daemon, 'setgid') @mock.patch.object(daemon, 'setuid') def _test_drop_privileges(self, setuid, setgid, setgroups, geteuid, user=None, group=None): daemon.drop_privileges(user=user, group=group) if user: setuid.assert_called_once_with(user) else: self.assertFalse(setuid.called) if group: setgroups.assert_called_once_with([]) setgid.assert_called_once_with(group) else: self.assertFalse(setgroups.called) self.assertFalse(setgid.called) def test_drop_user_privileges(self): self._test_drop_privileges(user='user') def test_drop_uid_privileges(self): self._test_drop_privileges(user='321') def test_drop_group_privileges(self): self._test_drop_privileges(group='group') def test_drop_gid_privileges(self): self._test_drop_privileges(group='654') def test_drop_privileges_without_root_permissions(self): with mock.patch('os.geteuid', return_value=1): with mock.patch.object(daemon.LOG, 'critical') as log_critical: self.assertRaises(exceptions.FailToDropPrivilegesExit, daemon.drop_privileges, 'user') log_critical.assert_called_once_with(mock.ANY) class TestPidfile(base.BaseTestCase): def setUp(self): super(TestPidfile, self).setUp() self.os_p = mock.patch.object(daemon, 'os') self.os = self.os_p.start() self.os.open.return_value = FAKE_FD self.fcntl_p = mock.patch.object(daemon, 'fcntl') self.fcntl = self.fcntl_p.start() self.fcntl.flock.return_value = 0 def test_init(self): self.os.O_CREAT = os.O_CREAT self.os.O_RDWR = os.O_RDWR daemon.Pidfile('thefile', sys.executable) self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR) self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB) def test_init_open_fail(self): self.os.open.side_effect = IOError with mock.patch.object(daemon.sys, 'stderr'): with testtools.ExpectedException(SystemExit): daemon.Pidfile('thefile', sys.executable) sys.assert_has_calls([ mock.call.stderr.write(mock.ANY), mock.call.exit(1)] ) def test_unlock(self): p = daemon.Pidfile('thefile', sys.executable) p.unlock() self.fcntl.flock.assert_has_calls([ mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB), mock.call(FAKE_FD, self.fcntl.LOCK_UN)] ) def test_write(self): p = daemon.Pidfile('thefile', sys.executable) p.write(34) self.os.assert_has_calls([ mock.call.ftruncate(FAKE_FD, 0), mock.call.write(FAKE_FD, b'34'), mock.call.fsync(FAKE_FD)] ) def test_read(self): self.os.read.return_value = '34' p = daemon.Pidfile('thefile', sys.executable) self.assertEqual(34, p.read()) def test_is_running(self): mock_open = self.useFixture( lib_fixtures.OpenFixture('/proc/34/cmdline', sys.executable)).mock_open p = daemon.Pidfile('thefile', sys.executable) with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertTrue(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') def test_is_running_uuid_true(self): mock_open = self.useFixture( lib_fixtures.OpenFixture( '/proc/34/cmdline', '{} 1234'.format( sys.executable))).mock_open p = daemon.Pidfile('thefile', sys.executable, uuid='1234') with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertTrue(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') def test_is_running_uuid_false(self): mock_open = self.useFixture( lib_fixtures.OpenFixture( '/proc/34/cmdline', '{} 1234'.format( sys.executable))).mock_open p = daemon.Pidfile('thefile', sys.executable, uuid='6789') with mock.patch.object(p, 'read') as read: read.return_value = 34 self.assertFalse(p.is_running()) mock_open.assert_called_once_with('/proc/34/cmdline', 'r') class TestDaemon(base.BaseTestCase): def setUp(self): super(TestDaemon, self).setUp() self.os_p = mock.patch.object(daemon, 'os') self.os = self.os_p.start() self.pidfile_p = mock.patch.object(daemon, 'Pidfile') self.pidfile = self.pidfile_p.start() def test_init(self): d = daemon.Daemon('pidfile') self.assertEqual(d.procname, sys.executable) def test_init_nopidfile(self): d = daemon.Daemon(pidfile=None) self.assertEqual(d.procname, sys.executable) self.assertFalse(self.pidfile.called) def test_fork_parent(self): self.os.fork.return_value = 1 d = daemon.Daemon('pidfile') d._fork() self.os._exit.assert_called_once_with(mock.ANY) def test_fork_child(self): self.os.fork.return_value = 0 d = daemon.Daemon('pidfile') self.assertIsNone(d._fork()) def test_fork_error(self): self.os.fork.side_effect = OSError(1) with mock.patch.object(daemon.sys, 'stderr'): with testtools.ExpectedException(SystemExit): d = daemon.Daemon('pidfile', 'stdin') d._fork() def test_daemonize(self): self.os.devnull = '/dev/null' d = daemon.Daemon('pidfile') with mock.patch.object(d, '_fork') as fork: with mock.patch.object(daemon, 'atexit') as atexit: with mock.patch.object(daemon, 'signal') as signal: signal.SIGTERM = 15 with mock.patch.object(daemon, 'sys') as sys: sys.stdin.fileno.return_value = 0 sys.stdout.fileno.return_value = 1 sys.stderr.fileno.return_value = 2 d.daemonize() signal.signal.assert_called_once_with(15, d.handle_sigterm) atexit.register.assert_called_once_with(d.delete_pid) fork.assert_has_calls([mock.call(), mock.call()]) self.os.assert_has_calls([ mock.call.chdir('/'), mock.call.setsid(), mock.call.umask(0), mock.call.dup2(mock.ANY, 0), mock.call.dup2(mock.ANY, 1), mock.call.dup2(mock.ANY, 2), mock.call.getpid()] ) def test_delete_pid(self): self.pidfile.return_value.__str__.return_value = 'pidfile' d = daemon.Daemon('pidfile') d.delete_pid() self.os.remove.assert_called_once_with('pidfile') def test_handle_sigterm(self): d = daemon.Daemon('pidfile') with mock.patch.object(daemon, 'sys') as sys: d.handle_sigterm(15, 1234) sys.exit.assert_called_once_with(0) def test_start(self): self.pidfile.return_value.is_running.return_value = False d = daemon.Daemon('pidfile') with mock.patch.object(d, 'daemonize') as daemonize: with mock.patch.object(d, 'run') as run: d.start() run.assert_called_once_with() daemonize.assert_called_once_with() def test_start_running(self): self.pidfile.return_value.is_running.return_value = True d = daemon.Daemon('pidfile') with mock.patch.object(daemon.sys, 'stderr'): with mock.patch.object(d, 'daemonize') as daemonize: with testtools.ExpectedException(SystemExit): d.start() self.assertFalse(daemonize.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_dhcp.py0000644000175000017500000042630100000000000025550 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import mock import netaddr from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib import constants from neutron_lib import exceptions from neutron_lib import fixture as lib_fixtures from oslo_config import cfg import oslo_messaging from oslo_utils import fileutils from oslo_utils import uuidutils import testtools from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.conf.agent import common as config from neutron.conf.agent import dhcp as dhcp_config from neutron.conf import common as base_config from neutron.tests import base class FakeIPAllocation(object): def __init__(self, address, subnet_id=None): self.ip_address = address self.subnet_id = subnet_id class FakeDNSAssignment(object): def __init__(self, ip_address, dns_name='', domain='openstacklocal'): if dns_name: self.hostname = dns_name else: self.hostname = 'host-%s' % ip_address.replace( '.', '-').replace(':', '-') self.ip_address = ip_address self.fqdn = self.hostname if domain: self.fqdn = '%s.%s.' % (self.hostname, domain) class DhcpOpt(object): def __init__(self, **kwargs): self.__dict__.update(ip_version=constants.IP_VERSION_4) self.__dict__.update(kwargs) def __str__(self): return str(self.__dict__) # A base class where class attributes can also be accessed by treating # an instance as a dict. class Dictable(object): def __getitem__(self, k): return self.__class__.__dict__.get(k) class FakeDhcpPort(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.1', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:80:aa:bb:ee' self.device_id = 'fake_dhcp_port' self.extra_dhcp_opts = [] class FakeReservedPort(object): def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'): self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('fdca:3ba5:a17a:4ba3::2', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:80:aa:bb:ee' self.device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT self.extra_dhcp_opts = [] self.id = id class FakePort1(object): def __init__(self, domain='openstacklocal'): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.admin_state_up = True self.device_owner = 'foo1' self.fixed_ips = [ FakeIPAllocation('192.168.0.2', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:80:aa:bb:cc' self.device_id = 'fake_port1' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('192.168.0.2', domain=domain)] class FakePort2(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.admin_state_up = False self.device_owner = 'foo2' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.mac_address = '00:00:f3:aa:bb:cc' self.device_id = 'fake_port2' self.dns_assignment = [FakeDNSAssignment('192.168.0.3')] self.extra_dhcp_opts = [] class FakePort3(object): def __init__(self): self.id = '44444444-4444-4444-4444-444444444444' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.4', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('192.168.1.2', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [FakeDNSAssignment('192.168.0.4'), FakeDNSAssignment('192.168.1.2')] self.mac_address = '00:00:0f:aa:bb:cc' self.device_id = 'fake_port3' self.extra_dhcp_opts = [] class FakePort4(object): def __init__(self): self.id = 'gggggggg-gggg-gggg-gggg-gggggggggggg' self.admin_state_up = False self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.4', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('192.168.0.4'), FakeDNSAssignment('ffda:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3E:C2:77:1D' self.device_id = 'fake_port4' self.extra_dhcp_opts = [] class FakePort5(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeee' self.admin_state_up = True self.device_owner = 'foo5' self.fixed_ips = [ FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.5')] self.mac_address = '00:00:0f:aa:bb:55' self.device_id = 'fake_port5' self.extra_dhcp_opts = [ DhcpOpt(opt_name=edo_ext.DHCP_OPT_CLIENT_ID, opt_value='test5')] class FakePort6(object): def __init__(self): self.id = 'ccccccccc-cccc-cccc-cccc-ccccccccc' self.admin_state_up = True self.device_owner = 'foo6' self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.6')] self.mac_address = '00:00:0f:aa:bb:66' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name=edo_ext.DHCP_OPT_CLIENT_ID, opt_value='test6', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='dns-server', opt_value='123.123.123.45', ip_version=constants.IP_VERSION_4)] class FakeV6Port(object): def __init__(self, domain='openstacklocal'): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('fdca:3ba5:a17a:4ba3::2', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:f3:aa:bb:cc' self.device_id = 'fake_port6' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('fdca:3ba5:a17a:4ba3::2', domain=domain)] class FakeV6PortExtraOpt(object): def __init__(self): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3e:c2:77:1d' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name='dns-server', opt_value='ffea:3ba5:a17a:4ba3::100', ip_version=constants.IP_VERSION_6)] class FakeV6PortMultipleFixedIpsSameSubnet(object): def __init__(self, domain='openstacklocal'): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('fdca:3ba5:a17a:4ba3::2', 'ffffffff-ffff-ffff-ffff-ffffffffffff'), FakeIPAllocation('fdca:3ba5:a17a:4ba3::4', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:f3:aa:bb:cc' self.device_id = 'fake_port6' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('fdca:3ba5:a17a:4ba3::2', domain=domain), FakeDNSAssignment('fdca:3ba5:a17a:4ba3::4', domain=domain)] class FakeDualPortWithV6ExtraOpt(object): def __init__(self): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [ FakeDNSAssignment('192.168.0.3'), FakeDNSAssignment('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d')] self.mac_address = '00:16:3e:c2:77:1d' self.device_id = 'fake_port6' self.extra_dhcp_opts = [ DhcpOpt(opt_name='dns-server', opt_value='ffea:3ba5:a17a:4ba3::100', ip_version=constants.IP_VERSION_6)] class FakeDualPort(object): def __init__(self, domain='openstacklocal'): self.id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' self.admin_state_up = True self.device_owner = 'foo3' self.fixed_ips = [ FakeIPAllocation('192.168.0.3', 'dddddddd-dddd-dddd-dddd-dddddddddddd'), FakeIPAllocation('fdca:3ba5:a17a:4ba3::3', 'ffffffff-ffff-ffff-ffff-ffffffffffff')] self.mac_address = '00:00:0f:aa:bb:cc' self.device_id = 'fake_dual_port' self.extra_dhcp_opts = [] self.dns_assignment = [FakeDNSAssignment('192.168.0.3', domain=domain), FakeDNSAssignment('fdca:3ba5:a17a:4ba3::3', domain=domain)] class FakeRouterPort(object): def __init__(self, dev_owner=constants.DEVICE_OWNER_ROUTER_INTF, ip_address='192.168.0.1', domain='openstacklocal'): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.mac_address = '00:00:0f:rr:rr:rr' self.device_id = 'fake_router_port' self.dns_assignment = [] self.extra_dhcp_opts = [] self.device_owner = dev_owner self.fixed_ips = [FakeIPAllocation( ip_address, 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment(ip.ip_address, domain=domain) for ip in self.fixed_ips] class FakeRouterPortNoDHCP(object): def __init__(self, dev_owner=constants.DEVICE_OWNER_ROUTER_INTF, ip_address='192.168.0.1', domain='openstacklocal'): self.id = 'ssssssss-ssss-ssss-ssss-ssssssssssss' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.mac_address = '00:00:0f:rr:rr:rr' self.device_id = 'fake_router_port_no_dhcp' self.dns_assignment = [] self.extra_dhcp_opts = [] self.device_owner = dev_owner self.fixed_ips = [FakeIPAllocation( ip_address, 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] self.dns_assignment = [FakeDNSAssignment(ip.ip_address, domain=domain) for ip in self.fixed_ips] class FakeRouterPort2(object): def __init__(self): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.fixed_ips = [ FakeIPAllocation('192.168.1.1', 'cccccccc-cccc-cccc-cccc-cccccccccccc')] self.dns_assignment = [FakeDNSAssignment('192.168.1.1')] self.mac_address = '00:00:0f:rr:rr:r2' self.device_id = 'fake_router_port2' self.extra_dhcp_opts = [] class FakeRouterPortSegmentID(object): def __init__(self): self.id = 'qqqqqqqq-qqqq-qqqq-qqqq-qqqqqqqqqqqq' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_ROUTER_INTF self.fixed_ips = [ FakeIPAllocation('192.168.2.1', 'iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii')] self.dns_assignment = [FakeDNSAssignment('192.168.2.1')] self.mac_address = '00:00:0f:rr:rr:r3' self.device_id = 'fake_router_port3' self.extra_dhcp_opts = [] class FakePortMultipleAgents1(object): def __init__(self): self.id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.5', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.5')] self.mac_address = '00:00:0f:dd:dd:dd' self.device_id = 'fake_multiple_agents_port' self.extra_dhcp_opts = [] class FakePortMultipleAgents2(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.admin_state_up = True self.device_owner = constants.DEVICE_OWNER_DHCP self.fixed_ips = [ FakeIPAllocation('192.168.0.6', 'dddddddd-dddd-dddd-dddd-dddddddddddd')] self.dns_assignment = [FakeDNSAssignment('192.168.0.6')] self.mac_address = '00:00:0f:ee:ee:ee' self.device_id = 'fake_multiple_agents_port2' self.extra_dhcp_opts = [] class FakePortWithClientIdNum(object): def __init__(self): self.extra_dhcp_opts = [ DhcpOpt(opt_name=dhcp.DHCP_OPT_CLIENT_ID_NUM, opt_value='test_client_id_num')] class FakePortWithClientIdNumStr(object): def __init__(self): self.extra_dhcp_opts = [ DhcpOpt(opt_name=str(dhcp.DHCP_OPT_CLIENT_ID_NUM), opt_value='test_client_id_num')] class FakeV4HostRoute(object): def __init__(self): self.destination = '20.0.0.1/24' self.nexthop = '20.0.0.1' class FakeV4HostRouteGateway(object): def __init__(self): self.destination = constants.IPv4_ANY self.nexthop = '10.0.0.1' class FakeV6HostRoute(object): def __init__(self): self.destination = '2001:0200:feed:7ac0::/64' self.nexthop = '2001:0200:feed:7ac0::1' class FakeV4Subnet(Dictable): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.ip_version = constants.IP_VERSION_4 self.cidr = '192.168.0.0/24' self.gateway_ip = '192.168.0.1' self.enable_dhcp = True self.host_routes = [FakeV4HostRoute()] self.dns_nameservers = ['8.8.8.8'] class FakeV4Subnet2(FakeV4Subnet): def __init__(self): super(FakeV4Subnet2, self).__init__() self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.host_routes = [] class FakeV4SubnetSegmentID(FakeV4Subnet): def __init__(self): super(FakeV4SubnetSegmentID, self).__init__() self.id = 'iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii' self.cidr = '192.168.2.0/24' self.gateway_ip = '192.168.2.1' self.host_routes = [] self.segment_id = 1 class FakeV4SubnetSegmentID2(FakeV4Subnet): def __init__(self): super(FakeV4SubnetSegmentID2, self).__init__() self.id = 'jjjjjjjj-jjjj-jjjj-jjjj-jjjjjjjjjjjj' self.host_routes = [] self.segment_id = 2 class FakeV4MetadataSubnet(FakeV4Subnet): def __init__(self): super(FakeV4MetadataSubnet, self).__init__() self.cidr = '169.254.169.254/30' self.gateway_ip = '169.254.169.253' self.host_routes = [] self.dns_nameservers = [] class FakeV4SubnetGatewayRoute(FakeV4Subnet): def __init__(self): super(FakeV4SubnetGatewayRoute, self).__init__() self.host_routes = [FakeV4HostRouteGateway()] class FakeV4SubnetMultipleAgentsWithoutDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetMultipleAgentsWithoutDnsProvided, self).__init__() self.dns_nameservers = [] self.host_routes = [] class FakeV4SubnetAgentWithManyDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetAgentWithManyDnsProvided, self).__init__() self.dns_nameservers = ['2.2.2.2', '9.9.9.9', '1.1.1.1', '3.3.3.3'] self.host_routes = [] class FakeV4SubnetAgentWithNoDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetAgentWithNoDnsProvided, self).__init__() self.dns_nameservers = ['0.0.0.0'] self.host_routes = [] class FakeV4MultipleAgentsWithoutDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1(), FakePortMultipleAgents2()] self.namespace = 'qdhcp-ns' class FakeV4AgentWithoutDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1()] self.namespace = 'qdhcp-ns' class FakeV4AgentWithManyDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetAgentWithManyDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1()] self.namespace = 'qdhcp-ns' class FakeV4AgentWithNoDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetAgentWithNoDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1()] self.namespace = 'qdhcp-ns' class FakeV4SubnetMultipleAgentsWithDnsProvided(FakeV4Subnet): def __init__(self): super(FakeV4SubnetMultipleAgentsWithDnsProvided, self).__init__() self.host_routes = [] class FakeV4MultipleAgentsWithDnsProvided(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(), FakePortMultipleAgents1(), FakePortMultipleAgents2()] self.namespace = 'qdhcp-ns' class FakeV6Subnet(object): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = constants.IP_VERSION_6 self.cidr = 'fdca:3ba5:a17a:4ba3::/64' self.gateway_ip = 'fdca:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.dns_nameservers = ['2001:0200:feed:7ac0::1'] self.ipv6_ra_mode = None self.ipv6_address_mode = None class FakeV4SubnetNoDHCP(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = constants.IP_VERSION_4 self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.enable_dhcp = False self.host_routes = [] self.dns_nameservers = [] class FakeV6SubnetDHCPStateful(Dictable): def __init__(self): self.id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' self.ip_version = constants.IP_VERSION_6 self.cidr = 'fdca:3ba5:a17a:4ba3::/64' self.gateway_ip = 'fdca:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.dns_nameservers = ['2001:0200:feed:7ac0::1'] self.ipv6_ra_mode = None self.ipv6_address_mode = constants.DHCPV6_STATEFUL class FakeV6SubnetSlaac(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = constants.IP_VERSION_6 self.cidr = 'ffda:3ba5:a17a:4ba3::/64' self.gateway_ip = 'ffda:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] self.ipv6_address_mode = constants.IPV6_SLAAC self.ipv6_ra_mode = None class FakeV6SubnetStateless(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = constants.IP_VERSION_6 self.cidr = 'ffea:3ba5:a17a:4ba3::/64' self.gateway_ip = 'ffea:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.dns_nameservers = [] self.host_routes = [] self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None class FakeV6SubnetStatelessNoDnsProvided(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = constants.IP_VERSION_6 self.cidr = 'ffea:3ba5:a17a:4ba3::/64' self.gateway_ip = 'ffea:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.dns_nameservers = ['::'] self.host_routes = [] self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None class FakeV6SubnetStatelessBadPrefixLength(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.ip_version = constants.IP_VERSION_6 self.cidr = 'ffeb:3ba5:a17a:4ba3::/56' self.gateway_ip = 'ffeb:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.dns_nameservers = [] self.host_routes = [] self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None class FakeV4SubnetNoGateway(FakeV4Subnet): def __init__(self): super(FakeV4SubnetNoGateway, self).__init__() self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.cidr = '192.168.1.0/24' self.gateway_ip = None self.enable_dhcp = True self.host_routes = [] self.dns_nameservers = [] class FakeV4SubnetNoRouter(FakeV4Subnet): def __init__(self): super(FakeV4SubnetNoRouter, self).__init__() self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.cidr = '192.168.1.0/24' self.gateway_ip = '192.168.1.1' self.host_routes = [] self.dns_nameservers = [] class FakeV4Network(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1()] self.namespace = 'qdhcp-ns' class FakeV4NetworkClientId(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort5(), FakePort6()] self.namespace = 'qdhcp-ns' class FakeV4NetworkClientIdNum(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePortWithClientIdNum()] self.namespace = 'qdhcp-ns' class FakeV4NetworkClientIdNumStr(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePortWithClientIdNumStr()] self.namespace = 'qdhcp-ns' class FakeV6Network(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6Subnet()] self.ports = [FakePort2()] self.namespace = 'qdhcp-ns' class FakeDualNetwork(object): def __init__(self, domain='openstacklocal'): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.namespace = 'qdhcp-ns' self.ports = [FakePort1(domain=domain), FakeV6Port(domain=domain), FakeDualPort(domain=domain), FakeRouterPort(domain=domain)] class FakeDeviceManagerNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkReserved(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), FakeReservedPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkReserved2(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakeV6Port(), FakeDualPort(), FakeRouterPort(), FakeReservedPort(), FakeReservedPort(id='reserved-2')] self.namespace = 'qdhcp-ns' class FakeNetworkDhcpPort(object): def __init__(self): self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeDhcpPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkGatewayRoute(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetGatewayRoute(), FakeV6SubnetDHCPStateful()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkSingleDHCP(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkSingleDHCPBothAttaced(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' # dhcp-agent actually can't get the subnet with dhcp disabled self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeRouterPortNoDHCP(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeDualNetworkDualDHCP(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4Subnet2()] self.ports = [FakePort1(), FakeRouterPort(), FakeRouterPort2()] self.namespace = 'qdhcp-ns' class FakeDualNetworkDualDHCPOnLinkSubnetRoutesDisabled(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4SubnetSegmentID()] self.ports = [FakePort1(), FakeRouterPort(), FakeRouterPortSegmentID()] self.namespace = 'qdhcp-ns' class FakeNonLocalSubnets(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetSegmentID2()] self.non_local_subnets = [FakeV4SubnetSegmentID()] self.ports = [FakePort1(), FakeRouterPort(), FakeRouterPortSegmentID()] self.namespace = 'qdhcp-ns' class FakeDualNetworkTriDHCPOneOnLinkSubnetRoute(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4Subnet2(), FakeV4SubnetSegmentID()] self.ports = [FakePort1(), FakeRouterPort(), FakeRouterPort2(), FakeRouterPortSegmentID()] self.namespace = 'qdhcp-ns' class FakeV4NoGatewayNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetNoGateway()] self.ports = [FakePort1()] class FakeV4NetworkNoRouter(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4SubnetNoRouter()] self.ports = [FakePort1()] class FakeV4MetadataNetwork(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4MetadataSubnet()] self.ports = [FakeRouterPort(ip_address='169.254.169.253')] class FakeV4NetworkDistRouter(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeRouterPort( dev_owner=constants.DEVICE_OWNER_DVR_INTERFACE)] class FakeDualV4Pxe3Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' self.subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] class FakeV4NetworkPxe2Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort2(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] class FakeV4NetworkPxe3Ports(object): def __init__(self, port_detail="portsSame"): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()] self.namespace = 'qdhcp-ns' if port_detail == "portsSame": self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] else: self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')] self.ports[1].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')] self.ports[2].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')] class FakeV4NetworkPxePort(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='nd98', opt_value='option-nondigit-98', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='99', opt_value='option-99', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0', ip_version=constants.IP_VERSION_4)] class FakeV6NetworkPxePort(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV6SubnetDHCPStateful()] self.ports = [FakeV6Port()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='2001:192:168::1', ip_version=constants.IP_VERSION_6), DhcpOpt(opt_name='nd98', opt_value='option-nondigit-98', ip_version=constants.IP_VERSION_6), DhcpOpt(opt_name='99', opt_value='option-99', ip_version=constants.IP_VERSION_6), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0', ip_version=constants.IP_VERSION_6)] class FakeV6NetworkPxePortWrongOptVersion(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV6SubnetDHCPStateful()] self.ports = [FakeV6Port()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7', ip_version=constants.IP_VERSION_4), DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0', ip_version=constants.IP_VERSION_6)] class FakeDualStackNetworkSingleDHCP(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()] self.ports = [FakePort1(), FakePort4(), FakeRouterPort()] class FakeDualStackNetworkingSingleDHCPTags(object): def __init__(self): self.id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' self.subnets = [FakeV4Subnet(), FakeV6SubnetSlaac()] self.ports = [FakePort1(), FakePort4(), FakeRouterPort()] for port in self.ports: port.extra_dhcp_opts = [ DhcpOpt(opt_name='tag:ipxe,bootfile-name', opt_value='pxelinux.0')] class FakeV4NetworkMultipleTags(object): def __init__(self): self.id = 'dddddddd-dddd-dddd-dddd-dddddddddddd' self.subnets = [FakeV4Subnet()] self.ports = [FakePort1(), FakeRouterPort()] self.namespace = 'qdhcp-ns' self.ports[0].extra_dhcp_opts = [ DhcpOpt(opt_name='tag:ipxe,bootfile-name', opt_value='pxelinux.0')] class FakeV6NetworkStatelessDHCP(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStateless()] self.ports = [FakeV6PortExtraOpt()] self.namespace = 'qdhcp-ns' class FakeV6NetworkStatelessDHCPNoDnsProvided(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStatelessNoDnsProvided()] self.ports = [FakeV6Port()] self.namespace = 'qdhcp-ns' class FakeV6NetworkStatelessDHCPBadPrefixLength(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStatelessBadPrefixLength()] self.ports = [FakeV6PortExtraOpt()] self.namespace = 'qdhcp-ns' class FakeNetworkWithV6SatelessAndV4DHCPSubnets(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetStateless(), FakeV4Subnet()] self.ports = [FakeDualPortWithV6ExtraOpt(), FakeRouterPort()] self.namespace = 'qdhcp-ns' class FakeV6NetworkStatefulDHCPSameSubnetFixedIps(object): def __init__(self): self.id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' self.subnets = [FakeV6SubnetDHCPStateful()] self.ports = [FakeV6PortMultipleFixedIpsSameSubnet()] self.namespace = 'qdhcp-ns' class LocalChild(dhcp.DhcpLocalProcess): PORTS = {4: [4], 6: [6]} def __init__(self, *args, **kwargs): self.process_monitor = mock.Mock() kwargs['process_monitor'] = self.process_monitor super(LocalChild, self).__init__(*args, **kwargs) self.called = [] def reload_allocations(self): self.called.append('reload') def spawn_process(self): self.called.append('spawn') class TestConfBase(base.BaseTestCase): def setUp(self): super(TestConfBase, self).setUp() self.conf = config.setup_conf() self.conf.register_opts(base_config.core_opts) self.conf.register_opts(dhcp_config.DHCP_OPTS) self.conf.register_opts(dhcp_config.DNSMASQ_OPTS) config.register_external_process_opts(self.conf) config.register_interface_driver_opts_helper(self.conf) class TestBase(TestConfBase): def setUp(self): super(TestBase, self).setUp() instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager") self.mock_mgr = instance.start() self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=True)) self.conf.register_opt(cfg.BoolOpt("force_metadata", default=False)) self.conf.register_opt(cfg.BoolOpt('enable_metadata_network', default=False)) self.config_parse(self.conf) self.conf.set_override('state_path', '') self.replace_p = mock.patch('neutron_lib.utils.file.replace_file') self.execute_p = mock.patch('neutron.agent.common.utils.execute') mock.patch('neutron.agent.linux.utils.execute').start() self.safe = self.replace_p.start() self.execute = self.execute_p.start() self.makedirs = mock.patch('os.makedirs').start() self.rmtree = mock.patch('shutil.rmtree').start() self.external_process = mock.patch( 'neutron.agent.linux.external_process.ProcessManager').start() self.mock_mgr.return_value.driver.bridged = True class TestDhcpBase(TestBase): def test_existing_dhcp_networks_abstract_error(self): self.assertRaises(NotImplementedError, dhcp.DhcpBase.existing_dhcp_networks, None) def test_check_version_abstract_error(self): self.assertRaises(NotImplementedError, dhcp.DhcpBase.check_version) def test_base_abc_error(self): self.assertRaises(TypeError, dhcp.DhcpBase, None) def test_restart(self): class SubClass(dhcp.DhcpBase): def __init__(self): dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(), mock.Mock(), None) self.called = [] def enable(self): self.called.append('enable') def disable(self, retain_port=False, block=False): self.called.append('disable %s %s' % (retain_port, block)) def reload_allocations(self): pass @property def active(self): return True c = SubClass() c.restart() self.assertEqual(c.called, ['disable True True', 'enable']) class TestDhcpLocalProcess(TestBase): def test_get_conf_file_name(self): tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev' lp = LocalChild(self.conf, FakeV4Network()) self.assertEqual(lp.get_conf_file_name('dev'), tpl) @mock.patch.object(fileutils, 'ensure_tree') def test_ensure_dir_called(self, ensure_dir): LocalChild(self.conf, FakeV4Network()) ensure_dir.assert_called_once_with( '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', mode=0o755) def test_enable_already_active(self): with mock.patch.object(LocalChild, 'active') as patched: patched.__get__ = mock.Mock(side_effect=[True, False]) lp = LocalChild(self.conf, FakeV4Network()) with mock.patch.object(ip_lib, 'delete_network_namespace'): lp.enable() self.assertEqual(lp.called, ['spawn']) self.assertTrue(self.mock_mgr.return_value.setup.called) @mock.patch.object(fileutils, 'ensure_tree') def test_enable(self, ensure_dir): attrs_to_mock = dict( (a, mock.DEFAULT) for a in ['active', 'interface_name', 'spawn_process'] ) with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) mocks['interface_name'].__set__ = mock.Mock() mocks['spawn_process'].side_effect = [ exceptions.ProcessExecutionError( returncode=2, message="Test dnsmasq start failed"), None] lp = LocalChild(self.conf, FakeDualNetwork()) lp.enable() self.mock_mgr.assert_has_calls( [mock.call(self.conf, None), mock.call().setup(mock.ANY)]) self.assertEqual(2, mocks['interface_name'].__set__.call_count) ensure_dir.assert_has_calls([ mock.call( '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc', mode=0o755), mock.call( '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc', mode=0o755)]) def _assert_disabled(self, lp): self.assertTrue(lp.process_monitor.unregister.called) self.assertTrue(self.external_process().disable.called) def test_disable_not_active(self): attrs_to_mock = dict((a, mock.DEFAULT) for a in ['active', 'interface_name']) with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') network = FakeDualNetwork() lp = LocalChild(self.conf, network) lp.device_manager = mock.Mock() with mock.patch('neutron.agent.linux.ip_lib.' 'delete_network_namespace') as delete_ns: lp.disable() lp.device_manager.destroy.assert_called_once_with( network, 'tap0') self._assert_disabled(lp) delete_ns.assert_called_with('qdhcp-ns') def test_disable_retain_port(self): attrs_to_mock = dict((a, mock.DEFAULT) for a in ['active', 'interface_name']) network = FakeDualNetwork() with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=True) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') lp = LocalChild(self.conf, network) lp.disable(retain_port=True) self._assert_disabled(lp) def test_disable(self): attrs_to_mock = {'active': mock.DEFAULT} with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks: mocks['active'].__get__ = mock.Mock(return_value=False) lp = LocalChild(self.conf, FakeDualNetwork()) with mock.patch('neutron.agent.linux.ip_lib.' 'delete_network_namespace') as delete_ns: lp.disable() self._assert_disabled(lp) delete_ns.assert_called_with('qdhcp-ns') def test_disable_config_dir_removed_after_destroy(self): parent = mock.MagicMock() parent.attach_mock(self.rmtree, 'rmtree') parent.attach_mock(self.mock_mgr, 'DeviceManager') lp = LocalChild(self.conf, FakeDualNetwork()) with mock.patch('neutron.agent.linux.ip_lib.' 'delete_network_namespace') as delete_ns: lp.disable(retain_port=False) expected = [mock.call.DeviceManager().destroy(mock.ANY, mock.ANY), mock.call.rmtree(mock.ANY, ignore_errors=True)] parent.assert_has_calls(expected) delete_ns.assert_called_with('qdhcp-ns') def test_get_interface_name(self): net = FakeDualNetwork() path = '/dhcp/%s/interface' % net.id self.useFixture(lib_fixtures.OpenFixture(path, 'tap0')) lp = LocalChild(self.conf, net) self.assertEqual(lp.interface_name, 'tap0') def test_set_interface_name(self): with mock.patch('neutron_lib.utils.file.replace_file') as replace: lp = LocalChild(self.conf, FakeDualNetwork()) with mock.patch.object(lp, 'get_conf_file_name') as conf_file: conf_file.return_value = '/interface' lp.interface_name = 'tap0' conf_file.assert_called_once_with('interface') replace.assert_called_once_with(mock.ANY, 'tap0') class TestDnsmasq(TestBase): def setUp(self): super(TestDnsmasq, self).setUp() self._mock_get_devices_with_ip = mock.patch.object( ip_lib, 'get_devices_with_ip') self.mock_get_devices_with_ip = self._mock_get_devices_with_ip.start() self.addCleanup(self._stop_mocks) def _stop_mocks(self): self._mock_get_devices_with_ip.stop() def _get_dnsmasq(self, network, process_monitor=None): process_monitor = process_monitor or mock.Mock() return dhcp.Dnsmasq(self.conf, network, process_monitor=process_monitor) def _test_spawn(self, extra_options, network=FakeDualNetwork(), max_leases=16777216, lease_duration=86400, has_static=True, no_resolv='--no-resolv', has_stateless=True, dhcp_t1=0, dhcp_t2=0, bridged=True): def mock_get_conf_file_name(kind): return '/dhcp/%s/%s' % (network.id, kind) # if you need to change this path here, think twice, # that means pid files will move around, breaking upgrades # or backwards-compatibility expected_pid_file = '/dhcp/%s/pid' % network.id expected = [ 'dnsmasq', '--no-hosts', no_resolv, '--pid-file=%s' % expected_pid_file, '--dhcp-hostsfile=/dhcp/%s/host' % network.id, '--addn-hosts=/dhcp/%s/addn_hosts' % network.id, '--dhcp-optsfile=/dhcp/%s/opts' % network.id, '--dhcp-leasefile=/dhcp/%s/leases' % network.id, '--dhcp-match=set:ipxe,175', '--dhcp-userclass=set:ipxe6,iPXE', '--local-service', '--bind-dynamic', ] if not bridged: expected += [ '--bridge-interface=tap0,tap*' ] seconds = '' if lease_duration == -1: lease_duration = 'infinite' else: seconds = 's' if has_static: prefix = '--dhcp-range=set:subnet-%s,%s,static,%s,%s%s' prefix6 = '--dhcp-range=set:subnet-%s,%s,static,%s,%s%s' elif has_stateless: prefix = '--dhcp-range=set:subnet-%s,%s,%s,%s%s' prefix6 = '--dhcp-range=set:subnet-%s,%s,%s,%s%s' possible_leases = 0 for s in network.subnets: if (s.ip_version != constants.IP_VERSION_6 or s.ipv6_address_mode == constants.DHCPV6_STATEFUL): if s.ip_version == constants.IP_VERSION_4: expected.extend([prefix % ( s.id, s.cidr.split('/')[0], netaddr.IPNetwork(s.cidr).netmask, lease_duration, seconds)]) else: expected.extend([prefix6 % ( s.id, s.cidr.split('/')[0], s.cidr.split('/')[1], lease_duration, seconds)]) possible_leases += netaddr.IPNetwork(s.cidr).size if hasattr(network, 'mtu'): expected.append( '--dhcp-option-force=option:mtu,%s' % network.mtu) expected.append('--dhcp-lease-max=%d' % min( possible_leases, max_leases)) if dhcp_t1: expected.append('--dhcp-option-force=option:T1,%ds' % dhcp_t1) if dhcp_t2: expected.append('--dhcp-option-force=option:T2,%ds' % dhcp_t2) expected.extend(extra_options) self.execute.return_value = ('', '') attrs_to_mock = dict( (a, mock.DEFAULT) for a in ['_output_opts_file', 'get_conf_file_name', 'interface_name'] ) test_pm = mock.Mock() with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks: mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name mocks['_output_opts_file'].return_value = ( '/dhcp/%s/opts' % network.id ) mocks['interface_name'].__get__ = mock.Mock(return_value='tap0') dm = self._get_dnsmasq(network, test_pm) dm.spawn_process() self.assertTrue(mocks['_output_opts_file'].called) self.assertTrue(test_pm.register.called) self.external_process().enable.assert_called_once_with( ensure_active=True, reload_cfg=False) call_kwargs = self.external_process.mock_calls[0][2] cmd_callback = call_kwargs['default_cmd_callback'] result_cmd = cmd_callback(expected_pid_file) self.assertEqual(expected, result_cmd) def test_spawn(self): self._test_spawn(['--conf-file=', '--domain=openstacklocal']) def test_spawn_not_bridged(self): self.mock_mgr.return_value.driver.bridged = False self._test_spawn(['--conf-file=', '--domain=openstacklocal'], bridged=False) def test_spawn_infinite_lease_duration(self): self.conf.set_override('dhcp_lease_duration', -1) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], FakeDualNetwork(), 16777216, -1) def test_spawn_cfg_config_file(self): self.conf.set_override('dnsmasq_config_file', '/foo') self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal']) def test_spawn_no_dns_domain(self): (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data) = self._test_no_dns_domain_alloc_data self.conf.set_override('dns_domain', '') network = FakeDualNetwork(domain=self.conf.dns_domain) self._test_spawn(['--conf-file='], network=network) self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_addn_name, exp_addn_data)]) def test_spawn_no_dhcp_range(self): network = FakeV6Network() subnet = FakeV6SubnetSlaac() network.subnets = [subnet] self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network, has_static=False) def test_spawn_no_dhcp_range_bad_prefix_length(self): network = FakeV6NetworkStatelessDHCPBadPrefixLength() subnet = FakeV6SubnetStatelessBadPrefixLength() network.subnets = [subnet] self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network, has_static=False, has_stateless=False) def test_spawn_cfg_dns_server(self): self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--domain=openstacklocal']) def test_spawn_cfg_multiple_dns_server(self): self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8', '9.9.9.9']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--server=9.9.9.9', '--domain=openstacklocal']) def test_spawn_cfg_enable_dnsmasq_log(self): self.conf.set_override('dnsmasq_base_log_dir', '/tmp') network = FakeV4Network() dhcp_dns_log = \ '/tmp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dhcp_dns_log' self._test_spawn(['--conf-file=', '--domain=openstacklocal', '--log-queries', '--log-dhcp', ('--log-facility=%s' % dhcp_dns_log)], network) self.makedirs.assert_called_with(os.path.join('/tmp', network.id)) def test_spawn_cfg_with_local_resolv(self): self.conf.set_override('dnsmasq_local_resolv', True) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], no_resolv='') def test_spawn_cfg_with_local_resolv_overridden(self): self.conf.set_override('dnsmasq_local_resolv', True) self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8']) self._test_spawn(['--conf-file=', '--server=8.8.8.8', '--domain=openstacklocal']) def test_spawn_max_leases_is_smaller_than_cap(self): self._test_spawn( ['--conf-file=', '--domain=openstacklocal'], network=FakeV4Network(), max_leases=256) def test_spawn_cfg_broadcast(self): self.conf.set_override('dhcp_broadcast_reply', True) self._test_spawn(['--conf-file=', '--domain=openstacklocal', '--dhcp-broadcast']) def test_spawn_cfg_advertise_mtu(self): network = FakeV4Network() network.mtu = 1500 self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network) def test_spawn_cfg_advertise_mtu_plugin_doesnt_pass_mtu_value(self): network = FakeV4Network() self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network) def test_spawn_cfg_with_dhcp_timers(self): self.conf.set_override('dhcp_renewal_time', 30) self.conf.set_override('dhcp_rebinding_time', 100) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], dhcp_t1=30, dhcp_t2=100) def _test_output_init_lease_file(self, timestamp): expected = [ '00:00:80:aa:bb:cc 192.168.0.2 * *', '00:00:0f:aa:bb:cc 192.168.0.3 * *', '00:00:0f:rr:rr:rr 192.168.0.1 * *\n'] expected = "\n".join(['%s %s' % (timestamp, l) for l in expected]) with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/leases' dm = self._get_dnsmasq(FakeDualNetwork()) dm._output_init_lease_file() self.safe.assert_called_once_with('/foo/leases', expected) @mock.patch('time.time') def test_output_init_lease_file(self, tmock): self.conf.set_override('dhcp_lease_duration', 500) tmock.return_value = 1000000 # lease duration should be added to current time timestamp = 1000000 + 500 self._test_output_init_lease_file(timestamp) def test_output_init_lease_file_infinite_duration(self): self.conf.set_override('dhcp_lease_duration', -1) # when duration is infinite, lease db timestamp should be 0 timestamp = 0 self._test_output_init_lease_file(timestamp) def _test_output_opts_file(self, expected, network, ipm_retval=None): with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/opts' dm = self._get_dnsmasq(network) if ipm_retval: with mock.patch.object( dm, '_make_subnet_interface_ip_map') as ipm: ipm.return_value = ipm_retval dm._output_opts_file() self.assertTrue(ipm.called) else: dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) def test_output_opts_file(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,%s\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal' ).lstrip() % ('[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeDualNetwork()) def test_output_opts_file_gateway_route(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,%s\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal' ).lstrip() % ('[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeDualNetworkGatewayRoute()) def test_output_opts_file_multiple_agents_without_dns_provided(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,192.168.0.5,192.168.0.6').lstrip() self._test_output_opts_file(expected, FakeV4MultipleAgentsWithoutDnsProvided()) def test_output_opts_file_agent_dns_provided(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4AgentWithoutDnsProvided()) def test_output_opts_file_agent_with_many_dns_provided(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,2.2.2.2,9.9.9.9,1.1.1.1,3.3.3.3\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4AgentWithManyDnsProvided()) def test_output_opts_file_agent_with_no_dns_provided(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4AgentWithNoDnsProvided()) def test_output_opts_file_multiple_agents_with_dns_provided(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeV4MultipleAgentsWithDnsProvided()) def test_output_opts_file_single_dhcp(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,192.168.1.0/24,0.0.0.0,' '20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,192.168.1.0/24,0.0.0.0,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkSingleDHCP()) def test_output_opts_file_single_dhcp_both_not_isolated(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkSingleDHCPBothAttaced()) def test_output_opts_file_dual_dhcp_rfc3442(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,192.168.1.0/24,0.0.0.0,' '20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,192.168.1.0/24,0.0.0.0,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:classless-static-route,192.168.0.0/24,0.0.0.0,' '169.254.169.254/32,192.168.1.1,0.0.0.0/0,192.168.1.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' '249,192.168.0.0/24,0.0.0.0,169.254.169.254/32,192.168.1.1,' '0.0.0.0/0,192.168.1.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:router,192.168.1.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkDualDHCP()) def test_output_opts_file_dual_dhcp_rfc3442_no_on_link_subnet_routes(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:classless-static-route,169.254.169.254/32,192.168.2.1,' '0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' '249,169.254.169.254/32,192.168.2.1,0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:router,192.168.2.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkDualDHCPOnLinkSubnetRoutesDisabled()) def test_output_opts_file_dual_dhcp_rfc3442_one_on_link_subnet_route(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,192.168.1.0/24,0.0.0.0,' '20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,192.168.1.0/24,0.0.0.0,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:classless-static-route,192.168.0.0/24,0.0.0.0,' '169.254.169.254/32,192.168.1.1,0.0.0.0/0,192.168.1.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' '249,192.168.0.0/24,0.0.0.0,169.254.169.254/32,192.168.1.1,' '0.0.0.0/0,192.168.1.1\n' 'tag:subnet-cccccccc-cccc-cccc-cccc-cccccccccccc,' 'option:router,192.168.1.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:classless-static-route,169.254.169.254/32,192.168.2.1,' '0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' '249,169.254.169.254/32,192.168.2.1,0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:router,192.168.2.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkTriDHCPOneOnLinkSubnetRoute()) def test_output_opts_file_no_gateway(self): expected = ( 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:classless-static-route,169.254.169.254/32,192.168.1.1\n' 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' '249,169.254.169.254/32,192.168.1.1\n' 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:router').lstrip() ipm_retval = {FakeV4SubnetNoGateway().id: '192.168.1.1'} self._test_output_opts_file(expected, FakeV4NoGatewayNetwork(), ipm_retval=ipm_retval) def test_non_local_subnets(self): expected = ( 'tag:subnet-jjjjjjjj-jjjj-jjjj-jjjj-jjjjjjjjjjjj,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-jjjjjjjj-jjjj-jjjj-jjjj-jjjjjjjjjjjj,' 'option:classless-static-route,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-jjjjjjjj-jjjj-jjjj-jjjj-jjjjjjjjjjjj,' '249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-jjjjjjjj-jjjj-jjjj-jjjj-jjjjjjjjjjjj,' 'option:router,192.168.0.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:classless-static-route,169.254.169.254/32,192.168.2.1,' '0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' '249,169.254.169.254/32,192.168.2.1,0.0.0.0/0,192.168.2.1\n' 'tag:subnet-iiiiiiii-iiii-iiii-iiii-iiiiiiiiiiii,' 'option:router,192.168.2.1').lstrip() ipm_retval = {FakeV4SubnetSegmentID2().id: '192.168.0.1'} self._test_output_opts_file(expected, FakeNonLocalSubnets(), ipm_retval=ipm_retval) def test_output_opts_file_no_neutron_router_on_subnet(self): expected = ( 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:classless-static-route,' '169.254.169.254/32,192.168.1.2,0.0.0.0/0,192.168.1.1\n' 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' '249,169.254.169.254/32,192.168.1.2,0.0.0.0/0,192.168.1.1\n' 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:router,192.168.1.1').lstrip() ipm_retval = {FakeV4SubnetNoRouter().id: '192.168.1.2'} self._test_output_opts_file(expected, FakeV4NetworkNoRouter(), ipm_retval=ipm_retval) def test_output_opts_file_dist_neutron_router_on_subnet(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1').lstrip() ipm_retval = {FakeV4Subnet().id: '192.168.0.1'} self._test_output_opts_file(expected, FakeV4NetworkDistRouter(), ipm_retval=ipm_retval) def test_output_opts_file_pxe_2port_1net(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.0.3\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.0.2\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux.0').lstrip() self._test_output_opts_file(expected, FakeV4NetworkPxe2Ports()) def test_output_opts_file_pxe_2port_1net_diff_details(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,192.168.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.0.5\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.0.5\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux.0').lstrip() self._test_output_opts_file(expected, FakeV4NetworkPxe2Ports("portsDiff")) def test_output_opts_file_pxe_3port_2net(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,192.168.1.0/24,0.0.0.0,20.0.0.1/24,' '20.0.0.1,169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,192.168.1.0/24,0.0.0.0,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:tftp-server,192.168.1.3\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:server-ip-address,192.168.1.2\n' 'tag:port-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option:bootfile-name,pxelinux2.0\n' 'tag:port-44444444-4444-4444-4444-444444444444,' 'option:tftp-server,192.168.1.3\n' 'tag:port-44444444-4444-4444-4444-444444444444,' 'option:server-ip-address,192.168.1.2\n' 'tag:port-44444444-4444-4444-4444-444444444444,' 'option:bootfile-name,pxelinux3.0').lstrip() self._test_output_opts_file(expected, FakeDualV4Pxe3Ports()) def test_output_opts_file_pxe_port(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:server-ip-address,192.168.0.2\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:nd98,option-nondigit-98\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' '99,option-99\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:bootfile-name,pxelinux.0').lstrip() self._test_output_opts_file(expected, FakeV4NetworkPxePort()) def test_output_opts_file_multiple_tags(self): expected = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'tag:ipxe,option:bootfile-name,pxelinux.0') expected = expected.lstrip() with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/opts' dm = self._get_dnsmasq(FakeV4NetworkMultipleTags()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name', return_value='/foo/opts') def test_output_opts_file_pxe_ipv6_port_with_ipv6_opt(self, mock_get_conf_fn): expected = ( 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,[2001:0200:feed:7ac0::1]\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:tftp-server,2001:192:168::1\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:nd98,option-nondigit-98\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:99,option-99\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:bootfile-name,pxelinux.0') expected = expected.lstrip() dm = self._get_dnsmasq(FakeV6NetworkPxePort()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) @mock.patch('neutron.agent.linux.dhcp.Dnsmasq.get_conf_file_name', return_value='/foo/opts') def test_output_opts_file_pxe_ipv6_port_with_ipv4_opt(self, mock_get_conf_fn): expected = ( 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,[2001:0200:feed:7ac0::1]\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:bootfile-name,pxelinux.0') expected = expected.lstrip() dm = self._get_dnsmasq(FakeV6NetworkPxePortWrongOptVersion()) dm._output_opts_file() self.safe.assert_called_once_with('/foo/opts', expected) def test_output_opts_file_ipv6_address_mode_unset(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,%s\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') self._test_output_opts_file(expected, FakeV6Network()) def test_output_opts_file_ipv6_address_force_metadata(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ( 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,%s\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') self.conf.force_metadata = True self._test_output_opts_file(expected, FakeV6Network()) @property def _test_no_dns_domain_alloc_data(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2,' '[fdca:3ba5:a17a:4ba3::2]\n' '00:00:0f:aa:bb:cc,host-192-168-0-3,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3,' '[fdca:3ba5:a17a:4ba3::3]\n' '00:00:0f:rr:rr:rr,host-192-168-0-1,' '192.168.0.1\n').lstrip() exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' exp_addn_data = ( '192.168.0.2\t' 'host-192-168-0-2 host-192-168-0-2\n' 'fdca:3ba5:a17a:4ba3::2\t' 'host-fdca-3ba5-a17a-4ba3--2 ' 'host-fdca-3ba5-a17a-4ba3--2\n' '192.168.0.3\thost-192-168-0-3 ' 'host-192-168-0-3\n' 'fdca:3ba5:a17a:4ba3::3\t' 'host-fdca-3ba5-a17a-4ba3--3 ' 'host-fdca-3ba5-a17a-4ba3--3\n' '192.168.0.1\t' 'host-192-168-0-1 ' 'host-192-168-0-1\n' ).lstrip() return (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data) @property def _test_reload_allocation_data(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.' 'openstacklocal.,[fdca:3ba5:a17a:4ba3::2]\n' '00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.' 'openstacklocal.,[fdca:3ba5:a17a:4ba3::3]\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts' exp_addn_data = ( '192.168.0.2\t' 'host-192-168-0-2.openstacklocal. host-192-168-0-2\n' 'fdca:3ba5:a17a:4ba3::2\t' 'host-fdca-3ba5-a17a-4ba3--2.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--2\n' '192.168.0.3\thost-192-168-0-3.openstacklocal. ' 'host-192-168-0-3\n' 'fdca:3ba5:a17a:4ba3::3\t' 'host-fdca-3ba5-a17a-4ba3--3.openstacklocal. ' 'host-fdca-3ba5-a17a-4ba3--3\n' '192.168.0.1\t' 'host-192-168-0-1.openstacklocal. ' 'host-192-168-0-1\n' ).lstrip() exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts' fake_v6 = '2001:0200:feed:7ac0::1' exp_opt_data = ( 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,%s\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal').lstrip() % ( '[' + fake_v6 + ']') return (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data, exp_opt_name, exp_opt_data,) def test_reload_allocations_no_interface(self): net = FakeDualNetwork() ipath = '/dhcp/%s/interface' % net.id self.useFixture(lib_fixtures.OpenFixture(ipath)) test_pm = mock.Mock() dm = self._get_dnsmasq(net, test_pm) dm.reload_allocations() self.assertFalse(test_pm.register.called) def test_reload_allocations(self): (exp_host_name, exp_host_data, exp_addn_name, exp_addn_data, exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data net = FakeDualNetwork() hpath = '/dhcp/%s/host' % net.id ipath = '/dhcp/%s/interface' % net.id self.useFixture(lib_fixtures.OpenFixture(hpath)) self.useFixture(lib_fixtures.OpenFixture(ipath, 'tapdancingmice')) test_pm = mock.Mock() dm = self._get_dnsmasq(net, test_pm) dm.reload_allocations() self.assertTrue(test_pm.register.called) self.external_process().enable.assert_called_once_with( ensure_active=True, reload_cfg=True) self.safe.assert_has_calls([ mock.call(exp_host_name, exp_host_data), mock.call(exp_addn_name, exp_addn_data), mock.call(exp_opt_name, exp_opt_data), ]) def test_release_unused_leases(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' ip3 = '0001:0002:0003:0004:0005:0006:0007:0008' mac3 = '00:00:80:bb:aa:cc' old_leases = {(ip1, mac1, None), (ip2, mac2, None), (ip3, mac3, None)} dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip1: {'iaid': mac1, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': 'client_id', 'server_id': 'server_id'}, ip3: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }, {}]) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [] dnsmasq.device_manager.unplug = mock.Mock() dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1, constants.IP_VERSION_4, None, 'server_id', mac1), mock.call(mac2, ip2, constants.IP_VERSION_4, None, 'server_id', mac2), mock.call(mac3, ip3, constants.IP_VERSION_6, 'client_id', 'server_id', 0xff), ], any_order=True) def test_release_for_ipv6_lease(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = 'fdca:3ba5:a17a::1' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' old_leases = set([(ip1, mac1, 'client_id'), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip1: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }, {}]) ipw = mock.patch( 'neutron.agent.linux.ip_lib.IpNetnsCommand.execute').start() dnsmasq._release_unused_leases() # Verify that dhcp_release is called both for ipv4 and ipv6 addresses. self.assertEqual(2, ipw.call_count) ipw.assert_has_calls([mock.call(['dhcp_release6', '--iface', None, '--ip', ip1, '--client-id', 'client_id', '--server-id', 'server_id', '--iaid', 0xff], run_as_root=True)]) ipw.assert_has_calls([mock.call(['dhcp_release', None, ip2, mac2], run_as_root=True), ]) def test_release_for_ipv6_lease_no_dhcp_release6(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = 'fdca:3ba5:a17a::1' mac1 = '00:00:80:aa:bb:cc' old_leases = set([(ip1, mac1, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._read_leases_file_leases = mock.Mock( return_value={'fdca:3ba5:a17a::1': {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }) ipw = mock.patch( 'neutron.agent.linux.ip_lib.IpNetnsCommand.execute').start() dnsmasq._IS_DHCP_RELEASE6_SUPPORTED = False dnsmasq._release_unused_leases() # Verify that dhcp_release6 is not called when it is not present ipw.assert_not_called() def test_release_unused_leases_with_dhcp_port(self): dnsmasq = self._get_dnsmasq(FakeNetworkDhcpPort()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._read_leases_file_leases = mock.Mock( return_value={ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.device_manager.get_device_id = mock.Mock( return_value='fake_dhcp_port') dnsmasq._release_unused_leases() self.assertFalse( dnsmasq.device_manager.unplug.called) self.assertFalse( dnsmasq.device_manager.driver.unplug.called) def test_release_unused_leases_with_client_id(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.1.2' mac1 = '00:00:80:aa:bb:cc' client_id1 = 'client1' ip2 = '192.168.1.3' mac2 = '00:00:80:cc:bb:aa' client_id2 = 'client2' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip1: {'iaid': mac1, 'client_id': client_id1, 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': client_id2, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }]) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_has_calls( [mock.call(mac1, ip1, constants.IP_VERSION_4, client_id1, 'server_id', mac1), mock.call(mac2, ip2, constants.IP_VERSION_4, client_id2, 'server_id', mac2)], any_order=True) def test_release_unused_leases_one_lease(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.0.3' mac2 = '00:00:80:cc:bb:aa' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }]) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort1()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac2, ip2, constants.IP_VERSION_4, None, 'server_id', mac2) def test_release_unused_leases_one_lease_with_client_id(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' client_id1 = 'client1' ip2 = '192.168.0.5' mac2 = '00:00:0f:aa:bb:55' client_id2 = 'test5' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, client_id2)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip1: {'iaid': mac1, 'client_id': client_id1, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }]) dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort5()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac1, ip1, constants.IP_VERSION_4, client_id1, 'server_id', mac1) def test_release_unused_leases_one_lease_with_client_id_none(self): dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' client_id1 = 'client1' ip2 = '192.168.0.4' mac2 = '00:16:3E:C2:77:1D' client_id2 = 'test4' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, client_id1), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) dnsmasq._output_hosts_file = mock.Mock() # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip1: {'iaid': mac1, 'client_id': client_id1, 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': client_id2, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': client_id2, 'server_id': 'server_id'} }]) dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort4()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac1, ip1, constants.IP_VERSION_4, client_id1, 'server_id', mac1) def test_release_unused_leases_one_lease_from_leases_file(self): # leases file has a stale entry that is not in the host file dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.0.3' mac2 = '00:00:80:cc:bb:aa' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second read of the lease file must not have the entries that # would have been released. dnsmasq._read_leases_file_leases = mock.Mock( side_effect=[{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'} }]) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort1()] dnsmasq._release_unused_leases() dnsmasq._release_lease.assert_called_once_with( mac2, ip2, constants.IP_VERSION_4, None, 'server_id', mac2) @mock.patch.object(dhcp.LOG, 'warn') def _test_release_unused_leases_one_lease_mult_times(self, mock_log_warn, removed): # Simulate a dhcp_release failure where the lease remains in the # lease file despite multiple dhcp_release calls dnsmasq = self._get_dnsmasq(FakeDualNetwork()) ip1 = '192.168.0.2' mac1 = '00:00:80:aa:bb:cc' ip2 = '192.168.0.3' mac2 = '00:00:80:cc:bb:aa' ip6 = '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d' old_leases = set([(ip1, mac1, None), (ip2, mac2, None)]) dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases) # Because the lease release code could fire multiple times, the # second and subsequent reads of the lease file must have the # entries that were not released. side_effect = [{ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }, {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }] # entry did/didn't go away after final dhcp_release try if not removed: side_effect.append( {ip6: {'iaid': 0xff, 'client_id': 'client_id', 'server_id': 'server_id'}, ip2: {'iaid': mac2, 'client_id': None, 'server_id': 'server_id'} }) else: side_effect.append({}) dnsmasq._read_leases_file_leases = mock.Mock(side_effect=side_effect) dnsmasq._output_hosts_file = mock.Mock() dnsmasq._release_lease = mock.Mock() dnsmasq.network.ports = [FakePort1()] dnsmasq._release_unused_leases() self.assertEqual(dhcp.DHCP_RELEASE_TRIES, dnsmasq._release_lease.call_count) self.assertEqual(dhcp.DHCP_RELEASE_TRIES + 1, dnsmasq._read_leases_file_leases.call_count) if not removed: self.assertTrue(mock_log_warn.called) def test_release_unused_leases_one_lease_mult_times_not_removed(self): self._test_release_unused_leases_one_lease_mult_times(False) def test_release_unused_leases_one_lease_mult_times_removed(self): self._test_release_unused_leases_one_lease_mult_times(True) def test_read_hosts_file_leases(self): filename = '/path/to/file' lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1", "00:00:80:aa:bb:cc,inst-name,[fdca:3ba5:a17a::1]"] mock_open = self.useFixture( lib_fixtures.OpenFixture(filename, '\n'.join(lines))).mock_open dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", None), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", None)]), leases) mock_open.assert_called_once_with(filename) def test_read_hosts_file_leases_with_client_id(self): filename = '/path/to/file' lines = ["00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1", "00:00:80:aa:bb:cc,id:client2,inst-name," "[fdca:3ba5:a17a::1]"] mock_open = self.useFixture( lib_fixtures.OpenFixture(filename, '\n'.join(lines))).mock_open dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", 'client2')]), leases) mock_open.assert_called_once_with(filename) def test_read_hosts_file_leases_with_stateless_IPv6_tag(self): filename = self.get_temp_file_path('leases') with open(filename, "w") as leasesfile: lines = [ "00:00:80:aa:bb:cc,id:client1,inst-name,192.168.0.1\n", "00:00:80:aa:bb:cc,set:ccccccccc-cccc-cccc-cccc-cccccccc\n", "00:00:80:aa:bb:cc,id:client2,inst-name,[fdca:3ba5:a17a::1]\n"] for line in lines: leasesfile.write(line) dnsmasq = self._get_dnsmasq(FakeDualNetwork()) leases = dnsmasq._read_hosts_file_leases(filename) self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc", 'client1'), ("fdca:3ba5:a17a::1", "00:00:80:aa:bb:cc", 'client2')]), leases) def _test_read_leases_file_leases(self, ip_version, add_bad_line=False): filename = '/path/to/file' lines = [ "1472673289 aa:bb:cc:00:00:02 192.168.1.2 host-192-168-1-2 *", "1472673289 aa:bb:cc:00:00:03 192.168.1.3 host-192-168-1-3 *", "1472673289 aa:bb:cc:00:00:04 192.168.1.4 host-192-168-1-4 *", "duid 00:01:00:01:02:03:04:05:06:07:08:09:0a:0b", "1472597740 1044800001 [2001:DB8::a] host-2001-db8--a " "00:04:4a:d0:d2:34:19:2b:49:08:84:e8:34:bd:0c:dc:b9:3b", "1472597823 1044800002 [2001:DB8::b] host-2001-db8--b " "00:04:ce:96:53:3d:f2:c2:4c:4c:81:7d:db:c9:8d:d2:74:22:3b:0a", "1472599048 1044800003 [2001:DB8::c] host-2001-db8--c " "00:04:4f:f0:cd:ca:5e:77:41:bc:9d:7f:5c:33:31:37:5d:80:77:b4" ] bad_line = '1472673289 aa:bb:cc:00:00:05 192.168.1.5 host-192.168-1-5' if add_bad_line: lines.append(bad_line) mock_open = self.useFixture( lib_fixtures.OpenFixture(filename, '\n'.join(lines))).mock_open dnsmasq = self._get_dnsmasq(FakeDualNetwork()) with mock.patch('os.path.exists', return_value=True), \ mock.patch.object(dhcp.LOG, 'warning') as mock_log_warn: leases = dnsmasq._read_leases_file_leases(filename, ip_version) server_id = '00:01:00:01:02:03:04:05:06:07:08:09:0a:0b' entry1 = {'iaid': '1044800001', 'client_id': '00:04:4a:d0:d2:34:19:2b:49:08:84:' 'e8:34:bd:0c:dc:b9:3b', 'server_id': server_id } entry2 = {'iaid': '1044800002', 'client_id': '00:04:ce:96:53:3d:f2:c2:4c:4c:81:' '7d:db:c9:8d:d2:74:22:3b:0a', 'server_id': server_id } entry3 = {'iaid': '1044800003', 'client_id': '00:04:4f:f0:cd:ca:5e:77:41:bc:9d:' '7f:5c:33:31:37:5d:80:77:b4', 'server_id': server_id } v6_expected = {'2001:DB8::a': entry1, '2001:DB8::b': entry2, '2001:DB8::c': entry3 } entry4 = {'iaid': 'aa:bb:cc:00:00:02', 'client_id': '*', 'server_id': None } entry5 = {'iaid': 'aa:bb:cc:00:00:03', 'client_id': '*', 'server_id': None } entry6 = {'iaid': 'aa:bb:cc:00:00:04', 'client_id': '*', 'server_id': None } v4_expected = {'192.168.1.2': entry4, '192.168.1.3': entry5, '192.168.1.4': entry6 } expected = {} if not ip_version or ip_version == constants.IP_VERSION_6: expected.update(v6_expected) if not ip_version or ip_version == constants.IP_VERSION_4: expected.update(v4_expected) mock_open.assert_called_once_with(filename) self.assertEqual(expected, leases) if add_bad_line: self.assertTrue(mock_log_warn.called) def test_read_v6_leases_file_leases(self): self._test_read_leases_file_leases(constants.IP_VERSION_6) def test_read_v4_leases_file_leases(self): self._test_read_leases_file_leases(constants.IP_VERSION_4) def test_read_all_leases_file_leases(self): self._test_read_leases_file_leases(None) def test_read_all_leases_file_leases_with_bad_line(self): self._test_read_leases_file_leases(None, True) def test_make_subnet_interface_ip_map(self): with mock.patch('neutron.agent.linux.ip_lib.' 'get_devices_with_ip') as list_mock: list_mock.return_value = [{'cidr': '192.168.0.1/24'}] dm = self._get_dnsmasq(FakeDualNetwork()) self.assertEqual( dm._make_subnet_interface_ip_map(), {FakeV4Subnet().id: '192.168.0.1'} ) def test_remove_config_files(self): net = FakeV4Network() path = '/opt/data/neutron/dhcp' self.conf.dhcp_confs = path lp = LocalChild(self.conf, net) lp._remove_config_files() self.rmtree.assert_called_once_with(os.path.join(path, net.id), ignore_errors=True) def test_existing_dhcp_networks(self): path = '/opt/data/neutron/dhcp' self.conf.dhcp_confs = path cases = { # network_uuid --> is_dhcp_alive? 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True, 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False, 'not_uuid_like_name': True } def active_fake(self, instance, cls): return cases[instance.network.id] with mock.patch('os.listdir') as mock_listdir: with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active: mock_active.__get__ = active_fake mock_listdir.return_value = list(cases) result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf) mock_listdir.assert_called_once_with(path) self.assertItemsEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'], result) def test__output_hosts_file_log_only_twice(self): dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) with mock.patch.object(dhcp, 'LOG') as logger: logger.process.return_value = ('fake_message', {}) dm._output_hosts_file() # The method logs twice, at the start of and the end. There should be # no other logs, no matter how many hosts there are to dump in the # file. self.assertEqual(2, len(logger.method_calls)) def test_only_populates_dhcp_enabled_subnets(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_only_populates_dhcp_client_id(self): exp_host_name = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/host' exp_host_data = ( '00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:0f:aa:bb:55,id:test5,' 'host-192-168-0-5.openstacklocal.,' '192.168.0.5\n' '00:00:0f:aa:bb:66,id:test6,' 'host-192-168-0-6.openstacklocal.,192.168.0.6,' 'set:port-ccccccccc-cccc-cccc-cccc-ccccccccc\n').lstrip() dm = self._get_dnsmasq(FakeV4NetworkClientId()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_only_populates_dhcp_enabled_subnet_on_a_network(self): exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,' '192.168.0.2\n' '00:00:f3:aa:bb:cc,host-192-168-0-3.openstacklocal.,' '192.168.0.3\n' '00:00:0f:aa:bb:cc,host-192-168-0-4.openstacklocal.,' '192.168.0.4\n' '00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal.,' '192.168.0.1\n').lstrip() dm = self._get_dnsmasq(FakeDualNetworkSingleDHCP()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_host_and_opts_file_on_stateless_dhcpv6_network(self): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ( '00:16:3e:c2:77:1d,' 'set:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n').lstrip() exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ('tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option6:domain-search,openstacklocal\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip() dm = self._get_dnsmasq(FakeV6NetworkStatelessDHCP()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) def test_host_and_opts_file_on_stateful_dhcpv6_same_subnet_fixedips(self): self.conf.set_override('dnsmasq_enable_addr6_list', True) exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ( '00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.openstacklocal.,' '[fdca:3ba5:a17a:4ba3::2],[fdca:3ba5:a17a:4ba3::4]\n'.lstrip()) exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ('tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:dns-server,[2001:0200:feed:7ac0::1]\n' 'tag:subnet-ffffffff-ffff-ffff-ffff-ffffffffffff,' 'option6:domain-search,openstacklocal').lstrip() dm = self._get_dnsmasq(FakeV6NetworkStatefulDHCPSameSubnetFixedIps()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) def test_host_and_opts_file_on_stateless_dhcpv6_network_no_dns(self): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ('tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option6:dns-server\n' 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option6:domain-search,openstacklocal').lstrip() dm = self._get_dnsmasq(FakeV6NetworkStatelessDHCPNoDnsProvided()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, ''), mock.call(exp_opt_name, exp_opt_data)]) def test_host_file_on_net_with_v6_slaac_and_v4(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' exp_host_data = ( '00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal.,192.168.0.2,' 'set:port-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee\n' '00:16:3E:C2:77:1D,host-192-168-0-4.openstacklocal.,192.168.0.4,' 'set:port-gggggggg-gggg-gggg-gggg-gggggggggggg\n00:00:0f:rr:rr:rr,' 'host-192-168-0-1.openstacklocal.,192.168.0.1,' 'set:port-rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr\n').lstrip() dm = self._get_dnsmasq(FakeDualStackNetworkingSingleDHCPTags()) dm._output_hosts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data)]) def test_host_and_opts_file_on_net_with_V6_stateless_and_V4_subnets( self): exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' exp_host_data = ( '00:16:3e:c2:77:1d,set:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal.,' '192.168.0.3,set:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' '00:00:0f:rr:rr:rr,' 'host-192-168-0-1.openstacklocal.,192.168.0.1\n').lstrip() exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' exp_opt_data = ( 'tag:subnet-eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option6:domain-search,openstacklocal\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:dns-server,8.8.8.8\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:classless-static-route,20.0.0.1/24,20.0.0.1,' '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' '249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,' '192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:subnet-dddddddd-dddd-dddd-dddd-dddddddddddd,' 'option:router,192.168.0.1\n' 'tag:port-hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' 'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip() dm = self._get_dnsmasq(FakeNetworkWithV6SatelessAndV4DHCPSubnets()) dm._output_hosts_file() dm._output_opts_file() self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) def test_has_metadata_subnet_returns_true(self): self.assertTrue(dhcp.Dnsmasq.has_metadata_subnet( [FakeV4MetadataSubnet()])) def test_has_metadata_subnet_returns_false(self): self.assertFalse(dhcp.Dnsmasq.has_metadata_subnet( [FakeV4Subnet()])) def test_should_enable_metadata_isolated_network_returns_true(self): self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkNoRouter())) def test_should_enable_metadata_non_isolated_network_returns_false(self): self.assertFalse(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkDistRouter())) def test_should_enable_metadata_isolated_meta_disabled_returns_false(self): self.conf.set_override('enable_isolated_metadata', False) self.assertFalse(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4MetadataNetwork())) def test_should_enable_metadata_with_metadata_network_returns_true(self): self.conf.set_override('enable_metadata_network', True) self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4MetadataNetwork())) def test_should_force_metadata_returns_true(self): self.conf.set_override("force_metadata", True) self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeDualNetworkDualDHCP())) def _test__generate_opts_per_subnet_helper( self, config_opts, expected_mdt_ip, network_class=FakeNetworkDhcpPort): for key, value in config_opts.items(): self.conf.set_override(key, value) dm = self._get_dnsmasq(network_class()) with mock.patch('neutron.agent.linux.ip_lib.' 'get_devices_with_ip') as list_mock: list_mock.return_value = [{'cidr': alloc.ip_address + '/24'} for alloc in FakeDhcpPort().fixed_ips] options, idx_map = dm._generate_opts_per_subnet() contains_metadata_ip = any(['%s/32' % dhcp.METADATA_DEFAULT_IP in line for line in options]) self.assertEqual(expected_mdt_ip, contains_metadata_ip) def test__generate_opts_per_subnet_no_metadata(self): config = {'enable_isolated_metadata': False, 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, False) def test__generate_opts_per_subnet_isolated_metadata_with_router(self): config = {'enable_isolated_metadata': True, 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, True) def test__generate_opts_per_subnet_forced_metadata(self): config = {'enable_isolated_metadata': False, 'force_metadata': True} self._test__generate_opts_per_subnet_helper(config, True) def test__generate_opts_per_subnet_forced_metadata_non_local_subnet(self): config = {'enable_isolated_metadata': False, 'force_metadata': True} self._test__generate_opts_per_subnet_helper( config, True, network_class=FakeNonLocalSubnets) def test_client_id_num(self): dm = self._get_dnsmasq(FakeV4NetworkClientIdNum()) self.assertEqual('test_client_id_num', dm._get_client_id(FakePortWithClientIdNum())) def test_client_id_num_str(self): dm = self._get_dnsmasq(FakeV4NetworkClientIdNumStr()) self.assertEqual('test_client_id_num', dm._get_client_id(FakePortWithClientIdNumStr())) class TestDeviceManager(TestConfBase): def setUp(self): super(TestDeviceManager, self).setUp() ip_lib_patcher = mock.patch('neutron.agent.linux.dhcp.ip_lib') load_interface_driver_patcher = mock.patch( 'neutron.agent.linux.dhcp.agent_common_utils.' 'load_interface_driver') self.mock_ip_lib = ip_lib_patcher.start() self.mock_load_interface_driver = load_interface_driver_patcher.start() def _test_setup(self, load_interface_driver, ip_lib, use_gateway_ips): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=False)) self.conf.register_opt(cfg.BoolOpt('force_metadata', default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) load_interface_driver.assert_called_with( self.conf, get_networks_callback=plugin.get_networks) # Setup with no existing DHCP port - expect a new DHCP port to # be created. network = FakeDeviceManagerNetwork() network.tenant_id = 'Tenant A' def mock_create(dict): port = dhcp.DictModel(dict['port']) port.id = 'abcd-123456789' port.mac_address = '00-12-34-56-78-90' port.fixed_ips = [ dhcp.DictModel({'subnet_id': ip['subnet_id'], 'ip_address': 'unique-IP-address'}) for ip in port.fixed_ips ] # server rudely gave us an extra address we didn't ask for port.fixed_ips.append(dhcp.DictModel( {'subnet_id': 'ffffffff-6666-6666-6666-ffffffffffff', 'ip_address': '2003::f816:3eff:fe45:e893'})) return port plugin.create_dhcp_port.side_effect = mock_create mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = use_gateway_ips ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.create_dhcp_port.assert_called_with(mock.ANY) mgr.driver.init_l3.assert_called_with('ns-XXX', mock.ANY, namespace='qdhcp-ns') cidrs = set(mgr.driver.init_l3.call_args[0][1]) if use_gateway_ips: self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip, s.cidr.split('/')[1]) for s in network.subnets])) else: self.assertEqual(cidrs, set(['unique-IP-address/24', 'unique-IP-address/64'])) # Now call setup again. This time we go through the existing # port code path, and the driver's init_l3 method is called # again. plugin.create_dhcp_port.reset_mock() mgr.driver.init_l3.reset_mock() mgr.setup(network) mgr.driver.init_l3.assert_called_with('ns-XXX', mock.ANY, namespace='qdhcp-ns') cidrs = set(mgr.driver.init_l3.call_args[0][1]) if use_gateway_ips: self.assertEqual(cidrs, set(['%s/%s' % (s.gateway_ip, s.cidr.split('/')[1]) for s in network.subnets])) else: self.assertEqual(cidrs, set(['unique-IP-address/24', 'unique-IP-address/64'])) self.assertFalse(plugin.create_dhcp_port.called) def test_setup_device_manager_dhcp_port_without_gateway_ips(self): self._test_setup(self.mock_load_interface_driver, self.mock_ip_lib, use_gateway_ips=False) def test_setup_device_manager_dhcp_port_with_gateway_ips(self): self._test_setup(self.mock_load_interface_driver, self.mock_ip_lib, use_gateway_ips=True) def _test_setup_reserved(self, enable_isolated_metadata=False, force_metadata=False): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt( cfg.BoolOpt('enable_isolated_metadata', default=enable_isolated_metadata)) self.conf.register_opt( cfg.BoolOpt('force_metadata', default=force_metadata)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) self.mock_load_interface_driver.assert_called_with( self.conf, get_networks_callback=plugin.get_networks) # Setup with a reserved DHCP port. network = FakeDualNetworkReserved() network.tenant_id = 'Tenant A' reserved_port = network.ports[-1] def mock_update(port_id, dict): port = reserved_port port.network_id = dict['port']['network_id'] port.device_id = dict['port']['device_id'] return port plugin.update_dhcp_port.side_effect = mock_update mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = False self.mock_ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.update_dhcp_port.assert_called_with(reserved_port.id, mock.ANY) expect_ips = ['192.168.0.6/24', 'fdca:3ba5:a17a:4ba3::2/64'] if enable_isolated_metadata or force_metadata: expect_ips.append(dhcp.METADATA_DEFAULT_CIDR) mgr.driver.init_l3.assert_called_with('ns-XXX', expect_ips, namespace='qdhcp-ns') def test_setup_reserved_and_disable_metadata(self): """Test reserved port case of DeviceManager's DHCP port setup logic which metadata disabled. """ self._test_setup_reserved() def test_setup_reserved_with_isolated_metadata_enable(self): """Test reserved port case of DeviceManager's DHCP port setup logic which isolated_ metadata enabled. """ self._test_setup_reserved(enable_isolated_metadata=True) def test_setup_reserved_with_force_metadata_enable(self): """Test reserved port case of DeviceManager's DHCP port setup logic which force_metadata enabled. """ self._test_setup_reserved(force_metadata=True) def test_setup_reserved_and_enable_metadata(self): """Test reserved port case of DeviceManager's DHCP port setup logic which both isolated_metadata and force_metadata enabled. """ self._test_setup_reserved(enable_isolated_metadata=True, force_metadata=True) def test_setup_reserved_2(self): """Test scenario where a network has two reserved ports, and update_dhcp_port fails for the first of those. """ with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. self.conf.register_opt( cfg.BoolOpt('enable_isolated_metadata', default=False)) self.conf.register_opt( cfg.BoolOpt('force_metadata', default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device device.route.get_gateway.return_value = None mgr = dhcp.DeviceManager(self.conf, plugin) self.mock_load_interface_driver.assert_called_with( self.conf, get_networks_callback=plugin.get_networks) # Setup with a reserved DHCP port. network = FakeDualNetworkReserved2() network.tenant_id = 'Tenant A' reserved_port_1 = network.ports[-2] reserved_port_2 = network.ports[-1] def mock_update(port_id, dict): if port_id == reserved_port_1.id: return None port = reserved_port_2 port.network_id = dict['port']['network_id'] port.device_id = dict['port']['device_id'] return port plugin.update_dhcp_port.side_effect = mock_update mgr.driver.get_device_name.return_value = 'ns-XXX' mgr.driver.use_gateway_ips = False self.mock_ip_lib.ensure_device_is_ready.return_value = True mgr.setup(network) plugin.update_dhcp_port.assert_called_with(reserved_port_2.id, mock.ANY) mgr.driver.init_l3.assert_called_with( 'ns-XXX', ['192.168.0.6/24', 'fdca:3ba5:a17a:4ba3::2/64'], namespace='qdhcp-ns') def test__setup_reserved_dhcp_port_with_fake_remote_error(self): """Test scenario where a fake_network has two reserved ports, and update_dhcp_port fails for the first of those with a RemoteError. """ # Setup with a reserved DHCP port. fake_network = FakeDualNetworkReserved2() fake_network.tenant_id = 'Tenant A' reserved_port_2 = fake_network.ports[-1] mock_plugin = mock.Mock() dh = dhcp.DeviceManager(cfg.CONF, mock_plugin) messaging_error = oslo_messaging.RemoteError( exc_type='FakeRemoteError') mock_plugin.update_dhcp_port.side_effect = [messaging_error, reserved_port_2] with testtools.ExpectedException(oslo_messaging.RemoteError): dh.setup_dhcp_port(fake_network) class TestDictModel(base.BaseTestCase): def setUp(self): super(TestDictModel, self).setUp() self._a = uuidutils.generate_uuid() self._b = uuidutils.generate_uuid() self.dm = dhcp.DictModel(a=self._a, b=self._b) def test_basic_dict(self): d = dict(a=1, b=2) m = dhcp.DictModel(d) self.assertEqual(1, m.a) self.assertEqual(2, m.b) def test_dict_has_sub_dict(self): d = dict(a=dict(b=2)) m = dhcp.DictModel(d) self.assertEqual(2, m.a.b) def test_dict_contains_list(self): d = dict(a=[1, 2]) m = dhcp.DictModel(d) self.assertEqual([1, 2], m.a) def test_dict_contains_list_of_dicts(self): d = dict(a=[dict(b=2), dict(c=3)]) m = dhcp.DictModel(d) self.assertEqual(2, m.a[0].b) self.assertEqual(3, m.a[1].c) def test_string_representation_port(self): port = dhcp.DictModel({'id': 'id', 'network_id': 'net_id'}) self.assertEqual('id=id, network_id=net_id', str(port)) def test_string_representation_network(self): net = dhcp.DictModel({'id': 'id', 'name': 'myname'}) self.assertEqual('id=id, name=myname', str(net)) def test__init_parameters(self): self.assertEqual(self._a, self.dm.a) self.assertEqual(self._b, self.dm.b) def test__init_dictmodel(self): dm2 = dhcp.DictModel(self.dm) self.assertEqual(self._a, dm2.a) self.assertEqual(self._b, dm2.b) dm2.a = 'new_value' self.assertEqual('new_value', dm2.a) self.assertEqual(self._a, self.dm.a) def test__getattr(self): self.assertEqual({'a': self._a, 'b': self._b}, self.dm._dictmodel_internal_storage) try: self.dm.z except AttributeError: pass except Exception: self.fail('Getting a non existing attribute from a DictModel ' 'object should raise AttributeError') def test__setattr(self): self.dm.c = 'c_value' self.assertEqual('c_value', self.dm.c) def test__delattr(self): del self.dm.a self.assertIsNone(self.dm.get('a')) def test__str(self): reference = 'a=%s, b=%s' % (self._a, self._b) self.assertEqual(reference, str(self.dm)) def test__getitem(self): self.assertEqual(self._a, self.dm['a']) self.assertEqual(self._b, self.dm['b']) def test__setitem(self): self.dm['a'] = 'a_new_value' self.assertEqual('a_new_value', self.dm.a) self.assertEqual('a_new_value', self.dm['a']) self.assertEqual(self._b, self.dm.b) def test__iter(self): list_keys = sorted(list(self.dm)) self.assertEqual(['a', 'b'], list_keys) def test__len(self): self.assertEqual(2, len(self.dm)) def test__copy_and_deepcopy(self): for method in (copy.copy, copy.deepcopy): self.dm._tuple = (10, 11) self.dm._list = [20, 21] dm2 = method(self.dm) dm2._tuple = (30, 31) dm2._list[0] = 200 self.assertEqual((10, 11), self.dm._tuple) self.assertEqual([20, 21], self.dm._list) self.assertEqual((30, 31), dm2._tuple) self.assertEqual([200, 21], dm2._list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_external_process.py0000644000175000017500000003750700000000000030220 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import mock from neutron_lib import fixture as lib_fixtures from oslo_config import cfg from oslo_utils import fileutils import psutil from neutron.agent.linux import external_process as ep from neutron.common import utils as common_utils from neutron.tests import base TEST_UUID = 'test-uuid' TEST_SERVICE = 'testsvc' TEST_PID = 1234 TEST_CMDLINE = 'python foo --router_id=%s' class BaseTestProcessMonitor(base.BaseTestCase): def setUp(self): super(BaseTestProcessMonitor, self).setUp() self.log_patch = mock.patch("neutron.agent.linux.external_process." "LOG.error") self.error_log = self.log_patch.start() self.spawn_patch = mock.patch("eventlet.spawn") self.eventlent_spawn = self.spawn_patch.start() # create a default process monitor self.create_child_process_monitor('respawn') def create_child_process_monitor(self, action): conf = mock.Mock() conf.AGENT.check_child_processes_action = action conf.AGENT.check_child_processes = True self.pmonitor = ep.ProcessMonitor( config=conf, resource_type='test') def get_monitored_process(self, uuid, service=None): monitored_process = mock.Mock() self.pmonitor.register(uuid=uuid, service_name=service, monitored_process=monitored_process) return monitored_process class TestProcessMonitor(BaseTestProcessMonitor): def test_error_logged(self): pm = self.get_monitored_process(TEST_UUID) pm.active = False self.pmonitor._check_child_processes() self.assertTrue(self.error_log.called) def test_exit_handler(self): self.create_child_process_monitor('exit') pm = self.get_monitored_process(TEST_UUID) pm.active = False with mock.patch.object(ep.ProcessMonitor, '_exit_handler') as exit_handler: self.pmonitor._check_child_processes() exit_handler.assert_called_once_with(TEST_UUID, None) def test_register(self): pm = self.get_monitored_process(TEST_UUID) self.assertEqual(len(self.pmonitor._monitored_processes), 1) self.assertIn(pm, self.pmonitor._monitored_processes.values()) def test_register_same_service_twice(self): self.get_monitored_process(TEST_UUID) self.get_monitored_process(TEST_UUID) self.assertEqual(len(self.pmonitor._monitored_processes), 1) def test_register_different_service_types(self): self.get_monitored_process(TEST_UUID) self.get_monitored_process(TEST_UUID, TEST_SERVICE) self.assertEqual(len(self.pmonitor._monitored_processes), 2) def test_unregister(self): self.get_monitored_process(TEST_UUID) self.pmonitor.unregister(TEST_UUID, None) self.assertEqual(len(self.pmonitor._monitored_processes), 0) def test_unregister_unknown_process(self): self.pmonitor.unregister(TEST_UUID, None) self.assertEqual(len(self.pmonitor._monitored_processes), 0) class TestProcessManager(base.BaseTestCase): def setUp(self): super(TestProcessManager, self).setUp() self.execute_p = mock.patch('neutron.agent.common.utils.execute') self.execute = self.execute_p.start() self.delete_if_exists = mock.patch( 'oslo_utils.fileutils.delete_if_exists').start() self.ensure_dir = mock.patch.object( fileutils, 'ensure_tree').start() self.conf = mock.Mock() self.conf.external_pids = '/var/path' def test_processmanager_ensures_pid_dir(self): pid_file = os.path.join(self.conf.external_pids, 'pid') ep.ProcessManager(self.conf, 'uuid', pid_file=pid_file) self.ensure_dir.assert_called_once_with(self.conf.external_pids, mode=0o755) def test_enable_no_namespace(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = 'pidfile' with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) manager = ep.ProcessManager(self.conf, 'uuid') manager.enable(callback) callback.assert_called_once_with('pidfile') self.execute.assert_called_once_with(['the', 'cmd'], check_exit_code=True, extra_ok_codes=None, run_as_root=False, log_fail_as_error=True) def test_enable_with_namespace(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = 'pidfile' with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'ip_lib') as ip_lib: manager.enable(callback) callback.assert_called_once_with('pidfile') ip_lib.assert_has_calls([ mock.call.IPWrapper(namespace='ns'), mock.call.IPWrapper().netns.execute( ['the', 'cmd'], addl_env=None, run_as_root=True)]) def test_enable_with_namespace_process_active(self): callback = mock.Mock() callback.return_value = ['the', 'cmd'] with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'ip_lib'): manager.enable(callback) self.assertFalse(callback.called) def test_enable_with_ensure_active(self): def _create_cmd(*args): return ['sleep', 0] pm = ep.ProcessManager(self.conf, 'uuid', pid_file='pid_file', default_cmd_callback=_create_cmd) with mock.patch.object(psutil, 'Process') as mock_psutil_process, \ mock.patch.object(ep.ProcessManager, 'pid', new_callable=mock.PropertyMock) as mock_pid: mock_pid.return_value = 'pid_value' mock_process = mock.Mock() mock_process.cmdline.side_effect = [[], ['the', 'cmd', 'uuid']] mock_psutil_process.return_value = mock_process try: pm.enable(ensure_active=True) except common_utils.WaitTimeout: self.fail('ProcessManager.enable() raised WaitTimeout') def test_reload_cfg_without_custom_reload_callback(self): with mock.patch.object(ep.ProcessManager, 'disable') as disable: manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') manager.reload_cfg() disable.assert_called_once_with('HUP') def test_reload_cfg_with_custom_reload_callback(self): reload_callback = mock.sentinel.callback with mock.patch.object(ep.ProcessManager, 'disable') as disable: manager = ep.ProcessManager( self.conf, 'uuid', namespace='ns', custom_reload_callback=reload_callback) manager.reload_cfg() disable.assert_called_once_with(get_stop_command=reload_callback) def test_disable_get_stop_command(self): cmd = ['the', 'cmd'] reload_callback = mock.Mock(return_value=cmd) with mock.patch.object(ep.ProcessManager, 'pid', mock.PropertyMock(return_value=4)): with mock.patch.object(ep.ProcessManager, 'active', mock.PropertyMock(return_value=True)): manager = ep.ProcessManager( self.conf, 'uuid', custom_reload_callback=reload_callback) manager.disable( get_stop_command=manager.custom_reload_callback) self.assertIn(cmd, self.execute.call_args[0]) def test_disable_no_namespace(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid') with mock.patch.object(ep, 'utils') as utils: manager.disable() utils.assert_has_calls([ mock.call.execute(['kill', '-9', 4], run_as_root=False)]) def test_disable_namespace(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') with mock.patch.object(ep, 'utils') as utils: manager.disable() utils.assert_has_calls([ mock.call.execute(['kill', '-9', 4], run_as_root=True)]) def test_disable_not_active(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) with mock.patch.object(ep.LOG, 'debug') as debug: manager = ep.ProcessManager(self.conf, 'uuid') manager.disable() debug.assert_called_once_with(mock.ANY, mock.ANY) def test_disable_no_pid(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=None) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=False) with mock.patch.object(ep.LOG, 'debug') as debug: manager = ep.ProcessManager(self.conf, 'uuid') manager.disable() debug.assert_called_once_with(mock.ANY, mock.ANY) def _test_disable_custom_kill_script(self, kill_script_exists, namespace, kill_scripts_path='test-path/'): cfg.CONF.set_override("kill_scripts_path", kill_scripts_path, "AGENT") if kill_script_exists: expected_cmd = ['test-service-kill', '9', 4] else: expected_cmd = ['kill', '-9', 4] with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) with mock.patch.object(ep.ProcessManager, 'active') as active: active.__get__ = mock.Mock(return_value=True) manager = ep.ProcessManager( self.conf, 'uuid', namespace=namespace, service='test-service') with mock.patch.object(ep, 'utils') as utils, \ mock.patch.object(os.path, 'isfile', return_value=kill_script_exists): manager.disable() utils.execute.assert_called_with( expected_cmd, run_as_root=bool(namespace)) def test_disable_custom_kill_script_no_namespace(self): self._test_disable_custom_kill_script( kill_script_exists=True, namespace=None) def test_disable_custom_kill_script_namespace(self): self._test_disable_custom_kill_script( kill_script_exists=True, namespace="ns") def test_disable_custom_kill_script_no_kill_script_no_namespace(self): self._test_disable_custom_kill_script( kill_script_exists=False, namespace=None) def test_disable_custom_kill_script_no_kill_script_namespace(self): self._test_disable_custom_kill_script( kill_script_exists=False, namespace="ns") def test_disable_custom_kill_script_namespace_no_path(self): self._test_disable_custom_kill_script( kill_script_exists=False, namespace="ns", kill_scripts_path=None) def test_get_pid_file_name_default(self): manager = ep.ProcessManager(self.conf, 'uuid') retval = manager.get_pid_file_name() self.assertEqual(retval, '/var/path/uuid.pid') def test_pid(self): self.useFixture(lib_fixtures.OpenFixture('/var/path/uuid.pid', '5')) manager = ep.ProcessManager(self.conf, 'uuid') self.assertEqual(manager.pid, 5) def test_pid_no_an_int(self): self.useFixture(lib_fixtures.OpenFixture('/var/path/uuid.pid', 'foo')) manager = ep.ProcessManager(self.conf, 'uuid') self.assertIsNone(manager.pid) def test_pid_invalid_file(self): with mock.patch.object(ep.ProcessManager, 'get_pid_file_name') as name: name.return_value = '.doesnotexist/pid' manager = ep.ProcessManager(self.conf, 'uuid') self.assertIsNone(manager.pid) def test_active(self): with mock.patch.object(ep.ProcessManager, 'cmdline') as cmdline: cmdline.__get__ = mock.Mock( return_value=TEST_CMDLINE % 'uuid') manager = ep.ProcessManager(self.conf, 'uuid') self.assertTrue(manager.active) def test_active_none(self): with mock.patch.object(ep.ProcessManager, 'cmdline') as cmdline: cmdline.__get__ = mock.Mock(return_value=None) manager = ep.ProcessManager(self.conf, 'uuid') self.assertFalse(manager.active) def test_active_cmd_mismatch(self): with mock.patch.object(ep.ProcessManager, 'cmdline') as cmdline: cmdline.__get__ = mock.Mock( return_value=TEST_CMDLINE % 'anotherid') manager = ep.ProcessManager(self.conf, 'uuid') self.assertFalse(manager.active) def test_cmdline(self): with mock.patch.object(psutil, 'Process') as proc: proc().cmdline.return_value = (TEST_CMDLINE % 'uuid').split(' ') with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) manager = ep.ProcessManager(self.conf, 'uuid') self.assertEqual(TEST_CMDLINE % 'uuid', manager.cmdline) proc().cmdline.assert_called_once_with() def test_cmdline_none(self): with mock.patch.object(psutil, 'Process') as proc: proc.side_effect = psutil.NoSuchProcess(4) with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) manager = ep.ProcessManager(self.conf, 'uuid') self.assertIsNone(manager.cmdline) proc.assert_called_once_with(4) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_interface.py0000644000175000017500000006543400000000000026600 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from oslo_utils import excutils from pyroute2.netlink import exceptions as pyroute2_exc from neutron.agent.common import ovs_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import utils from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.tests import base class BaseChild(interface.LinuxInterfaceDriver): def plug_new(*args): pass def unplug(*args): pass class FakeNetwork(object): id = '12345678-1234-5678-90ab-ba0987654321' class FakeSubnet(object): cidr = '192.168.1.1/24' class FakeAllocation(object): subnet = FakeSubnet() ip_address = '192.168.1.2' ip_version = constants.IP_VERSION_4 class FakePort(object): id = 'abcdef01-1234-5678-90ab-ba0987654321' fixed_ips = [FakeAllocation] device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' network = FakeNetwork() network_id = network.id class TestBase(base.BaseTestCase): def setUp(self): super(TestBase, self).setUp() self.conf = config.setup_conf() ovs_conf.register_ovs_opts(self.conf) config.register_interface_opts(self.conf) self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') self.ip_dev = self.ip_dev_p.start() self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') self.ip = self.ip_p.start() self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') self.device_exists = self.device_exists_p.start() self.get_devices_with_ip_p = mock.patch.object(ip_lib, 'get_devices_with_ip') self.get_devices_with_ip = self.get_devices_with_ip_p.start() class TestABCDriver(TestBase): def setUp(self): super(TestABCDriver, self).setUp() mock_link_addr = mock.PropertyMock(return_value='aa:bb:cc:dd:ee:ff') type(self.ip_dev().link).address = mock_link_addr def test_get_device_name(self): bc = BaseChild(self.conf) device_name = bc.get_device_name(FakePort()) self.assertEqual('tapabcdef01-12', device_name) def test_init_router_port(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns, extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(), mock.call().addr.delete('172.16.77.240/24'), mock.call().addr.add('192.168.1.2/24'), mock.call('tap0', namespace=ns), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')]) def test_init_router_port_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [ {'cidr': '172.20.0.0/24'}] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.delete_onlink_route('172.20.0.0/24')]) def test_l3_init_with_preserve(self): addresses = [dict(scope='global', dynamic=False, cidr='192.168.1.3/32')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, preserve_ips=['192.168.1.3/32']) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(), mock.call().addr.add('192.168.1.2/24')]) self.assertFalse(self.ip_dev().addr.delete.called) self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) def _test_l3_init_clean_connections(self, clean_connections): addresses = [ dict(scope='global', dynamic=False, cidr='10.0.0.1/24'), dict(scope='global', dynamic=False, cidr='10.0.0.3/32')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['10.0.0.1/24'], namespace=ns, clean_connections=clean_connections) delete = self.ip_dev().delete_addr_and_conntrack_state if clean_connections: delete.assert_called_once_with('10.0.0.3/32') else: self.assertFalse(delete.called) def test_l3_init_with_clean_connections(self): self._test_l3_init_clean_connections(True) def test_l3_init_without_clean_connections(self): self._test_l3_init_clean_connections(False) def test_init_router_port_ipv6_with_gw_ip(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' new_cidr = '2001:db8:a::124/64' kwargs = {'namespace': ns, 'extra_subnets': [{'cidr': '2001:db8:b::/64'}]} bc.init_router_port('tap0', [new_cidr], **kwargs) expected_calls = ( [mock.call('tap0', namespace=ns), mock.call().addr.list(), mock.call().addr.delete('2001:db8:a::123/64'), mock.call().addr.add('2001:db8:a::124/64')]) expected_calls += ( [mock.call('tap0', namespace=ns), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('2001:db8:b::/64')]) self.ip_dev.assert_has_calls(expected_calls) def test_init_router_port_ext_gw_with_dual_stack(self): old_addrs = [dict(ip_version=constants.IP_VERSION_4, scope='global', dynamic=False, cidr='172.16.77.240/24'), dict(ip_version=constants.IP_VERSION_6, scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=old_addrs) self.ip_dev().route.list_onlink_routes.return_value = [] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' new_cidrs = ['192.168.1.2/24', '2001:db8:a::124/64'] bc.init_router_port('tap0', new_cidrs, namespace=ns, extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.add('2001:db8:a::124/64'), mock.call().addr.delete('172.16.77.240/24'), mock.call().addr.delete('2001:db8:a::123/64'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')], any_order=True) def test_init_router_port_with_ipv6_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] route = '2001:db8:a::/64' self.ip_dev().addr.list = mock.Mock(return_value=addresses) self.ip_dev().route.list_onlink_routes.return_value = [{'cidr': route}] bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_router_port('tap0', ['2001:db8:a::124/64'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.delete_onlink_route(route)]) def test_l3_init_with_duplicated_ipv6(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['2001:db8:a::123/64'], namespace=ns) self.assertFalse(self.ip_dev().addr.add.called) def test_l3_init_with_duplicated_ipv6_uncompact(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['2001:db8:a:0000:0000:0000:0000:0123/64'], namespace=ns) self.assertFalse(self.ip_dev().addr.add.called) def test_l3_init_with_duplicated_ipv6_dynamic(self): device_name = 'tap0' cidr = '2001:db8:a::123/64' ns = '12345678-1234-5678-90ab-ba0987654321' addresses = [dict(scope='global', dynamic=True, cidr=cidr)] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) bc.init_l3(device_name, [cidr], namespace=ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list(), mock.call().addr.delete(cidr), mock.call().addr.add(cidr)]) def test_l3_init_with_duplicated_ipv6_lla(self): device_name = 'tap0' cidr = 'fe80::a8bb:ccff:fedd:eeff/64' ns = '12345678-1234-5678-90ab-ba0987654321' addresses = [dict(scope='link', dynamic=False, cidr=cidr)] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild(self.conf) bc.init_l3(device_name, [cidr], namespace=ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list()]) # The above assert won't verify there were no extra calls right # after list() self.assertFalse(self.ip_dev().addr.add.called) def test_l3_init_with_not_present_ipv6_lla(self): device_name = 'tap0' cidr = 'fe80::a8bb:ccff:fedd:eeff/64' ns = '12345678-1234-5678-90ab-ba0987654321' self.ip_dev().addr.list = mock.Mock(return_value=[]) bc = BaseChild(self.conf) bc.init_l3(device_name, [cidr], namespace=ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list(), mock.call().addr.add(cidr)]) def test_add_ipv6_addr(self): device_name = 'tap0' cidr = '2001:db8::/64' ns = '12345678-1234-5678-90ab-ba0987654321' bc = BaseChild(self.conf) bc.add_ipv6_addr(device_name, cidr, ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.add(cidr, 'global')]) def test_delete_ipv6_addr(self): device_name = 'tap0' cidr = '2001:db8::/64' ns = '12345678-1234-5678-90ab-ba0987654321' bc = BaseChild(self.conf) bc.delete_ipv6_addr(device_name, cidr, ns) self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().delete_addr_and_conntrack_state(cidr)]) def test_delete_ipv6_addr_with_prefix(self): device_name = 'tap0' prefix = '2001:db8::/48' in_cidr = '2001:db8::/64' out_cidr = '2001:db7::/64' ns = '12345678-1234-5678-90ab-ba0987654321' in_addresses = [dict(scope='global', dynamic=False, cidr=in_cidr)] out_addresses = [dict(scope='global', dynamic=False, cidr=out_cidr)] # Initially set the address list to be empty self.ip_dev().addr.list = mock.Mock(return_value=[]) bc = BaseChild(self.conf) # Call delete_v6addr_with_prefix when the address list is empty bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete isn't called self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) # Set the address list to contain only an address outside of the range # of the given prefix self.ip_dev().addr.list = mock.Mock(return_value=out_addresses) bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete isn't called self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) # Set the address list to contain only an address inside of the range # of the given prefix self.ip_dev().addr.list = mock.Mock(return_value=in_addresses) bc.delete_ipv6_addr_with_prefix(device_name, prefix, ns) # Assert that delete is called self.ip_dev.assert_has_calls( [mock.call(device_name, namespace=ns), mock.call().addr.list(scope='global', filters=['permanent']), mock.call().delete_addr_and_conntrack_state(in_cidr)]) def test_get_ipv6_llas(self): ns = '12345678-1234-5678-90ab-ba0987654321' addresses = [dict(scope='link', dynamic=False, cidr='fe80:cafe::/64')] self.get_devices_with_ip.return_value = addresses device_name = self.ip_dev().name bc = BaseChild(self.conf) llas = bc.get_ipv6_llas(device_name, ns) self.assertEqual(addresses, llas) kwargs = {'family': utils.get_socket_address_family( constants.IP_VERSION_6), 'scope': 'link'} self.get_devices_with_ip.assert_called_with( ns, name=device_name, **kwargs) def test_set_mtu_logs_once(self): bc = BaseChild(self.conf) with mock.patch('neutron.agent.linux.interface.LOG.warning') as log: bc.set_mtu('dev', 9999) log.assert_called_once_with(mock.ANY) class TestOVSInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.OVSInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('tapabcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def test_plug_alt_bridge(self): self._test_plug(bridge='br-foo') def test_plug_configured_bridge(self): br = 'br-v' self.conf.set_override('ovs_use_veth', False) self.conf.set_override('integration_bridge', br, 'OVS') self.assertEqual(self.conf.OVS.integration_bridge, br) def device_exists(dev, namespace=None): return dev == br ovs = interface.OVSInterfaceDriver(self.conf) with mock.patch.object(ovs, '_ovs_add_port') as add_port: self.device_exists.side_effect = device_exists ovs.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=None, namespace=None) add_port.assert_called_once_with('br-v', 'tap0', 'port-1234', 'aa:bb:cc:dd:ee:ff', internal=True) def _test_plug(self, bridge=None, namespace=None): with mock.patch('neutron.agent.ovsdb.impl_idl._connection'): if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: ovs = interface.OVSInterfaceDriver(self.conf) self.device_exists.side_effect = device_exists link = self.ip.return_value.device.return_value.link link.set_address.side_effect = (RuntimeError, None) ovs.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace, mtu=9000) replace.assert_called_once_with( 'tap0', ('type', 'internal'), ('external_ids', { 'iface-id': 'port-1234', 'iface-status': 'active', 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) expected = [ mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) expected.extend([ mock.call(namespace=namespace), mock.call().device('tap0'), mock.call().device().link.set_mtu(9000), mock.call().device().link.set_up(), ]) self.ip.assert_has_calls(expected) def test_plug_new(self): with mock.patch('neutron.agent.ovsdb.impl_idl._connection'): bridge = 'br-int' namespace = '01234567-1234-1234-99' with mock.patch.object(ovs_lib.OVSBridge, 'delete_port') as delete_port: with mock.patch.object(ovs_lib.OVSBridge, 'replace_port'): ovs = interface.OVSInterfaceDriver(self.conf) reraise = mock.patch.object( excutils, 'save_and_reraise_exception') reraise.start() ip_wrapper = mock.Mock() for exception in (OSError(), pyroute2_exc.NetlinkError(22)): ip_wrapper.ensure_namespace.side_effect = exception self.ip.return_value = ip_wrapper delete_port.reset_mock() ovs.plug_new( '01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace, prefix='veth', mtu=9000) delete_port.assert_called_once_with('tap0') def test_unplug(self): with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver(self.conf) ovs.unplug('tap0') ovs_br.assert_has_calls([mock.call('br-int'), mock.call().delete_port('tap0')]) class TestOVSInterfaceDriverWithVeth(TestOVSInterfaceDriver): def setUp(self): super(TestOVSInterfaceDriverWithVeth, self).setUp() self.conf.set_override('ovs_use_veth', True) def test_get_device_name(self): br = interface.OVSInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_with_prefix(self): self._test_plug(devname='qr-0', prefix='qr-') def _test_plug(self, devname=None, bridge=None, namespace=None, prefix=None): with mock.patch('neutron.agent.ovsdb.impl_idl._connection'): if not devname: devname = 'ns-0' if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge ovs = interface.OVSInterfaceDriver(self.conf) self.device_exists.side_effect = device_exists root_dev = mock.Mock() ns_dev = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) mock.patch.object( interface, '_get_veth', return_value=(root_dev, ns_dev)).start() expected = [mock.call(), mock.call().add_veth('tap0', devname, namespace2=namespace)] with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: ovs.plug('01234567-1234-1234-99', 'port-1234', devname, 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace, prefix=prefix, mtu=9000) replace.assert_called_once_with( 'tap0', ('external_ids', { 'iface-id': 'port-1234', 'iface-status': 'active', 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) ns_dev.assert_has_calls( [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) ns_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) root_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) self.ip.assert_has_calls(expected) root_dev.assert_has_calls([mock.call.link.set_up()]) ns_dev.assert_has_calls([mock.call.link.set_up()]) def test_plug_new(self): # The purpose of test_plug_new in parent class(TestOVSInterfaceDriver) # is to test exception(exceptions.ProcessExecutionError), method here # would not go through that code, So just pass pass def test_unplug(self): bridge = 'br-int' with mock.patch('neutron.agent.common.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver(self.conf) ovs.unplug('ns-0', bridge=bridge) ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) self.ip_dev.assert_has_calls([mock.call('ns-0', namespace=None), mock.call().link.delete()]) class TestBridgeInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.BridgeInterfaceDriver(self.conf) device_name = br.get_device_name(FakePort()) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def _test_plug(self, namespace=None): def device_exists(device, namespace=None): return device.startswith('brq') root_veth = mock.Mock() ns_veth = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) mock.patch.object( interface, '_get_veth', return_value=(root_veth, ns_veth)).start() self.device_exists.side_effect = device_exists br = interface.BridgeInterfaceDriver(self.conf) mac_address = 'aa:bb:cc:dd:ee:ff' br.plug('01234567-1234-1234-99', 'port-1234', 'ns-0', mac_address, namespace=namespace, mtu=9000) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) ns_veth.assert_has_calls([mock.call.link.set_mtu(9000)]) root_veth.assert_has_calls([mock.call.link.set_mtu(9000)]) self.ip.assert_has_calls(ip_calls) root_veth.assert_has_calls([mock.call.link.set_up()]) ns_veth.assert_has_calls([mock.call.link.set_up()]) def test_plug_dev_exists(self): self.device_exists.return_value = True with mock.patch('neutron.agent.linux.interface.LOG.info') as log: br = interface.BridgeInterfaceDriver(self.conf) br.plug('01234567-1234-1234-99', 'port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff') self.assertFalse(self.ip_dev.called) self.assertEqual(log.call_count, 1) def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError with mock.patch('neutron.agent.linux.interface.LOG') as log: br = interface.BridgeInterfaceDriver(self.conf) br.unplug('tap0') [mock.call(), mock.call('tap0'), mock.call().link.delete()] self.assertEqual(log.error.call_count, 1) def test_unplug(self): self.device_exists.return_value = True with mock.patch('neutron.agent.linux.interface.LOG.debug') as log: br = interface.BridgeInterfaceDriver(self.conf) br.unplug('tap0') self.assertEqual(log.call_count, 1) self.ip_dev.assert_has_calls([mock.call('tap0', namespace=None), mock.call().link.delete()]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_ip_conntrack.py0000644000175000017500000000306200000000000027277 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_conntrack from neutron.tests import base class IPConntrackTestCase(base.BaseTestCase): def setUp(self): super(IPConntrackTestCase, self).setUp() self.execute = mock.Mock() self.filtered_port = {} self.unfiltered_port = {} self.mgr = ip_conntrack.IpConntrackManager( self._get_rule_for_table, self.filtered_port, self.unfiltered_port, self.execute, zone_per_port=True) def _get_rule_for_table(self, table): return ['test --physdev-in tapdevice -j CT --zone 100'] def test_delete_conntrack_state_dedupes(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress'} dev_info = {'device': 'tapdevice', 'fixed_ips': ['1.2.3.4']} dev_info_list = [dev_info for _ in range(10)] self.mgr._delete_conntrack_state(dev_info_list, rule) self.assertEqual(1, len(self.execute.mock_calls)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_ip_lib.py0000644000175000017500000021410300000000000026063 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import socket import mock import netaddr from neutron_lib import constants from neutron_lib import exceptions from oslo_utils import netutils from oslo_utils import uuidutils import pyroute2 from pyroute2.netlink.rtnl import ifaddrmsg from pyroute2.netlink.rtnl import ifinfmsg from pyroute2.netlink.rtnl import ndmsg from pyroute2 import NetlinkError import testtools from neutron.agent.common import utils # noqa from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron import privileged from neutron.privileged.agent.linux import ip_lib as priv_lib from neutron.tests import base NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) GATEWAY_SAMPLE5 = (""" default via 192.168.99.1 proto static """) GATEWAY_SAMPLE6 = (""" default via 192.168.99.1 proto static metric 100 """) GATEWAY_SAMPLE7 = (""" default dev qg-31cd36 metric 1 """) IPv6_GATEWAY_SAMPLE1 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 100 2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE2 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 100 """) IPv6_GATEWAY_SAMPLE3 = (""" 2001:db8::/64 proto kernel scope link src 2001:470:9:1224:dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE4 = (""" default via fe80::dfcc:aaff:feb9:76ce """) IPv6_GATEWAY_SAMPLE5 = (""" default via 2001:470:9:1224:4508:b885:5fb:740b metric 1024 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") class TestSubProcessBase(base.BaseTestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('neutron.agent.common.utils.execute') self.execute = self.execute_p.start() def test_execute_wrapper(self): base = ip_lib.SubProcessBase() base._execute(['o'], 'link', ('list',), run_as_root=True) self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'], run_as_root=True, log_fail_as_error=True) def test_execute_wrapper_int_options(self): base = ip_lib.SubProcessBase() base._execute([4], 'link', ('list',)) self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_execute_wrapper_no_options(self): base = ip_lib.SubProcessBase() base._execute([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_run_no_namespace(self): base = ip_lib.SubProcessBase() base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'link', 'list'], run_as_root=False, log_fail_as_error=True) def test_run_namespace(self): base = ip_lib.SubProcessBase(namespace='ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, log_fail_as_error=True) def test_as_root_namespace(self): base = ip_lib.SubProcessBase(namespace='ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, log_fail_as_error=True) class TestIpWrapper(base.BaseTestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() @mock.patch.object(priv_lib, 'get_device_names') def test_get_devices(self, mock_get_devices): interfaces = ['br01', 'lo', 'gre0'] mock_get_devices.return_value = interfaces devices = ip_lib.IPWrapper(namespace='foo').get_devices() for device in devices: self.assertEqual('br01', device.name) interfaces.remove(device.name) @mock.patch.object(priv_lib, 'get_device_names') def test_get_devices_include_loopback_and_gre(self, mock_get_devices): interfaces = ['br01', 'lo', 'gre0'] mock_get_devices.return_value = interfaces devices = ip_lib.IPWrapper(namespace='foo').get_devices( exclude_loopback=False, exclude_fb_tun_devices=False) for device in devices: self.assertIn(device.name, interfaces) interfaces.remove(device.name) self.assertEqual(0, len(interfaces)) @mock.patch.object(priv_lib, 'get_device_names') def test_get_devices_no_netspace(self, mock_get_devices): mock_get_devices.side_effect = priv_lib.NetworkNamespaceNotFound( netns_name='foo') self.assertEqual([], ip_lib.IPWrapper(namespace='foo').get_devices()) @mock.patch.object(pyroute2.netns, 'listnetns') @mock.patch.object(priv_lib, 'list_netns') def test_get_namespaces_non_root(self, priv_listnetns, listnetns): self.config(group='AGENT', use_helper_for_ns_read=False) listnetns.return_value = NETNS_SAMPLE retval = ip_lib.list_network_namespaces() self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.assertEqual(1, listnetns.call_count) self.assertFalse(priv_listnetns.called) @mock.patch.object(pyroute2.netns, 'listnetns') @mock.patch.object(priv_lib, 'list_netns') def test_get_namespaces_root(self, priv_listnetns, listnetns): self.config(group='AGENT', use_helper_for_ns_read=True) priv_listnetns.return_value = NETNS_SAMPLE retval = ip_lib.list_network_namespaces() self.assertEqual(retval, ['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) self.assertEqual(1, priv_listnetns.call_count) self.assertFalse(listnetns.called) @mock.patch.object(priv_lib, 'create_interface') def test_add_tuntap(self, create): ip_lib.IPWrapper().add_tuntap('tap0') create.assert_called_once_with('tap0', None, 'tuntap', mode='tap') @mock.patch.object(priv_lib, 'create_interface') def test_add_veth(self, create): ip_lib.IPWrapper().add_veth('tap0', 'tap1') create.assert_called_once_with( 'tap0', None, 'veth', peer={'ifname': 'tap1'}) @mock.patch.object(priv_lib, 'create_interface') def test_add_macvtap(self, create): ip_lib.IPWrapper().add_macvtap('macvtap0', 'eth0', 'bridge') create.assert_called_once_with( 'macvtap0', None, 'macvtap', physical_interface='eth0', mode='bridge') @mock.patch.object(priv_lib, 'delete_interface') def test_del_veth(self, delete): ip_lib.IPWrapper().del_veth('fpr-1234') delete.assert_called_once_with('fpr-1234', None) @mock.patch.object(priv_lib, 'create_interface') def test_add_veth_with_namespaces(self, create): ns2 = 'ns2' with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2) en.assert_has_calls([mock.call(ns2)]) create.assert_called_once_with( 'tap0', None, 'veth', peer={'ifname': 'tap1', 'net_ns_fd': 'ns2'}) @mock.patch.object(priv_lib, 'create_interface') def test_add_dummy(self, create): ip_lib.IPWrapper().add_dummy('dummy0') create.assert_called_once_with('dummy0', None, 'dummy') def test_get_device(self): dev = ip_lib.IPWrapper(namespace='ns').device('eth0') self.assertEqual(dev.namespace, 'ns') self.assertEqual(dev.name, 'eth0') @mock.patch.object(priv_lib, 'create_netns') def test_ensure_namespace(self, create): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper() with mock.patch.object(ip.netns, 'exists') as ns_exists: with mock.patch('neutron.agent.common.utils.execute'): ns_exists.return_value = False ip.ensure_namespace('ns') create.assert_called_once_with('ns') ns_exists.assert_called_once_with('ns') ip_dev.assert_has_calls([mock.call('lo', namespace='ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper().ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual(ns.namespace, 'ns') def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) self.assertTrue(get_devices.called) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) self.assertTrue(get_devices.called) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual([], mock_is_empty.mock_calls) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper(namespace='ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(ip_ns_cmd_cls.mock_calls, expected) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) @mock.patch.object(priv_lib, 'create_interface') def test_add_vlan(self, create): retval = ip_lib.IPWrapper().add_vlan('eth0.1', 'eth0', '1') self.assertIsInstance(retval, ip_lib.IPDevice) self.assertEqual(retval.name, 'eth0.1') create.assert_called_once_with('eth0.1', None, 'vlan', physical_interface='eth0', vlan_id='1') @mock.patch.object(priv_lib, 'create_interface') def test_add_vxlan_valid_srcport_length(self, create): self.call_params = {} def fake_create_interface(ifname, namespace, kind, **kwargs): self.call_params = dict( ifname=ifname, namespace=namespace, kind=kind, **kwargs) create.side_effect = fake_create_interface expected_call_params = { 'ifname': 'vxlan0', 'namespace': None, 'kind': 'vxlan', 'vxlan_id': 'vni0', 'vxlan_group': 'group0', 'physical_interface': 'dev0', 'vxlan_ttl': 'ttl0', 'vxlan_tos': 'tos0', 'vxlan_local': 'local0', 'vxlan_proxy': True, 'vxlan_port_range': ('1', '2')} retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, srcport=(1, 2)) self.assertIsInstance(retval, ip_lib.IPDevice) self.assertEqual(retval.name, 'vxlan0') self.assertDictEqual(expected_call_params, self.call_params) def test_add_vxlan_invalid_srcport_length(self): wrapper = ip_lib.IPWrapper() self.assertRaises(exceptions.NetworkVxlanPortRangeError, wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, srcport=('1', '2', '3')) def test_add_vxlan_invalid_srcport_range(self): wrapper = ip_lib.IPWrapper() self.assertRaises(exceptions.NetworkVxlanPortRangeError, wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, srcport=(2000, 1000)) @mock.patch.object(priv_lib, 'create_interface') def test_add_vxlan_dstport(self, create): self.call_params = {} def fake_create_interface(ifname, namespace, kind, **kwargs): self.call_params = dict( ifname=ifname, namespace=namespace, kind=kind, **kwargs) create.side_effect = fake_create_interface expected_call_params = { 'ifname': 'vxlan0', 'namespace': None, 'kind': 'vxlan', 'vxlan_id': 'vni0', 'vxlan_group': 'group0', 'physical_interface': 'dev0', 'vxlan_ttl': 'ttl0', 'vxlan_tos': 'tos0', 'vxlan_local': 'local0', 'vxlan_proxy': True, 'vxlan_port_range': ('1', '2'), 'vxlan_port': 4789} retval = ip_lib.IPWrapper().add_vxlan('vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, srcport=(1, 2), dstport=4789) self.assertIsInstance(retval, ip_lib.IPDevice) self.assertEqual(retval.name, 'vxlan0') self.assertDictEqual(expected_call_params, self.call_params) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper(namespace='ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper().add_device_to_namespace(dev) self.assertEqual([], dev.mock_calls) class TestIPDevice(base.BaseTestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', namespace='ns1') dev2 = ip_lib.IPDevice('tap0', namespace='ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', namespace='ns1') self.assertIsNotNone(dev1) def test_str(self): self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0') class TestIPCommandBase(base.BaseTestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run([], ('link', 'show')) self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run(['o'], ('link')) self.ip.assert_has_calls([mock.call._run(['o'], 'foo', ('link'))]) def test_as_root_namespace_false(self): self.ip_cmd._as_root([], ('link')) self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link'), use_root_namespace=False)]) def test_as_root_namespace_true(self): self.ip_cmd._as_root([], ('link'), use_root_namespace=True) self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link'), use_root_namespace=True)]) def test_as_root_namespace_true_with_options(self): self.ip_cmd._as_root('o', 'link', use_root_namespace=True) self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link'), use_root_namespace=True)]) class TestIPDeviceCommandBase(base.BaseTestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual(self.ip_cmd.name, 'eth0') class TestIPCmdBase(base.BaseTestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' def _assert_call(self, options, args): self.parent._run.assert_has_calls([ mock.call(options, self.command, args)]) def _assert_sudo(self, options, args, use_root_namespace=False): self.parent._as_root.assert_has_calls( [mock.call(options, self.command, args, use_root_namespace=use_root_namespace)]) class TestIpRuleCommand(TestIPCmdBase): def setUp(self): super(TestIpRuleCommand, self).setUp() self.parent._as_root.return_value = '' self.ns = uuidutils.generate_uuid() self.parent.namespace = self.ns self.command = 'rule' self._mock_priv_list_ip_rules = mock.patch.object(priv_lib, 'list_ip_rules') self.mock_priv_list_ip_rules = self._mock_priv_list_ip_rules.start() self.addCleanup(self._stop_mock) def _stop_mock(self): self._mock_priv_list_ip_rules.stop() def _test_add_rule(self, ip, iif, table, priority): ip_version = netaddr.IPNetwork(ip).version ip_family = common_utils.get_socket_address_family(ip_version) table_num = ip_lib.IP_RULE_TABLES.get(table) or int(table) cmd_args = {'table': table_num, 'priority': priority, 'family': ip_family} if iif: cmd_args['iifname'] = iif else: cmd_args['src'] = ip cmd_args['src_len'] = common_utils.get_network_length(ip_version) with mock.patch.object(priv_lib, 'add_ip_rule') as mock_add_ip_rule: ip_lib.add_ip_rule('namespace', ip, iif=iif, table=table, priority=priority) mock_add_ip_rule.assert_called_once_with('namespace', **cmd_args) def _test_add_rule_exists(self, ip, table, priority, output): self.parent._as_root.return_value = output with mock.patch.object(ip_lib, '_exist_ip_rule', return_value=True) \ as mock_exists: ip_lib.add_ip_rule(self.ns, ip, table=table, priority=priority) kwargs = {'from': ip, 'priority': str(priority), 'table': str(table), 'type': 'unicast'} mock_exists.assert_called_once_with( common_utils.get_ip_version(ip), **kwargs) def _test_delete_rule(self, ip, table, priority): with mock.patch.object(priv_lib, 'delete_ip_rule') as mock_delete: ip_lib.delete_ip_rule(self.ns, ip, table=table, priority=priority) args = ip_lib._make_pyroute2_args(ip, None, table, priority, None) mock_delete.assert_called_with(self.ns, **args) def test_add_rule_v4(self): self._test_add_rule('192.168.45.100', None, 2, 100) def test_add_rule_v4_iif(self): self._test_add_rule('192.168.45.100', 'iif_name', 2, 100) def test_add_rule_v6(self): self._test_add_rule('2001:db8::1', None, 3, 200) def test_add_rule_table_string(self): self._test_add_rule('2001:db8::1', None, 'default', 200) self._test_add_rule('2001:db8::1', None, 'main', 200) self._test_add_rule('2001:db8::1', None, 'local', 200) self._test_add_rule('2001:db8::1', None, '100', 200) def test_delete_rule_v4(self): self._test_delete_rule('192.168.45.100', 2, 100) def test_delete_rule_v6(self): self._test_delete_rule('2001:db8::1', 3, 200) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_address(self, set_link_attribute): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') set_link_attribute.assert_called_once_with( self.parent.name, self.parent.namespace, address='aa:bb:cc:dd:ee:ff') @mock.patch.object(priv_lib, 'set_link_flags') def test_set_allmulticast_on(self, set_link_flags): self.link_cmd.set_allmulticast_on() set_link_flags.assert_called_once_with( self.parent.name, self.parent.namespace, ifinfmsg.IFF_ALLMULTI) @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_mtu(self, set_link_attribute): self.link_cmd.set_mtu(1500) set_link_attribute.assert_called_once_with( self.parent.name, self.parent.namespace, mtu=1500) @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_up(self, set_link_attribute): self.link_cmd.set_up() set_link_attribute.assert_called_once_with( self.parent.name, self.parent.namespace, state='up') @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_down(self, set_link_attribute): self.link_cmd.set_down() set_link_attribute.assert_called_once_with( self.parent.name, self.parent.namespace, state='down') @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_netns(self, set_link_attribute): original_namespace = self.parent.namespace self.link_cmd.set_netns('foo') set_link_attribute.assert_called_once_with( 'eth0', original_namespace, net_ns_fd='foo') self.assertEqual(self.parent.namespace, 'foo') @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_name(self, set_link_attribute): original_name = self.parent.name self.link_cmd.set_name('tap1') set_link_attribute.assert_called_once_with( original_name, self.parent.namespace, ifname='tap1') self.assertEqual(self.parent.name, 'tap1') @mock.patch.object(priv_lib, 'set_link_attribute') def test_set_alias(self, set_link_attribute): self.link_cmd.set_alias('openvswitch') set_link_attribute.assert_called_once_with( self.parent.name, self.parent.namespace, ifalias='openvswitch') @mock.patch.object(priv_lib, 'create_interface') def test_create(self, create): self.link_cmd.create() create.assert_called_once_with(self.parent.name, self.parent.namespace, self.parent.kind) @mock.patch.object(priv_lib, 'delete_interface') def test_delete(self, delete): self.link_cmd.delete() delete.assert_called_once_with(self.parent.name, self.parent.namespace) @mock.patch.object(priv_lib, 'get_link_attributes') def test_settings_property(self, get_link_attributes): self.link_cmd.attributes get_link_attributes.assert_called_once_with( self.parent.name, self.parent.namespace) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) @mock.patch.object(priv_lib, 'add_ip_address') def test_add_address(self, add): self.addr_cmd.add('192.168.45.100/24') add.assert_called_once_with( 4, '192.168.45.100', 24, self.parent.name, self.addr_cmd._parent.namespace, 'global', '192.168.45.255') @mock.patch.object(priv_lib, 'add_ip_address') def test_add_address_scoped(self, add): self.addr_cmd.add('192.168.45.100/24', scope='link') add.assert_called_once_with( 4, '192.168.45.100', 24, self.parent.name, self.addr_cmd._parent.namespace, 'link', '192.168.45.255') @mock.patch.object(priv_lib, 'add_ip_address') def test_add_address_no_broadcast(self, add): self.addr_cmd.add('192.168.45.100/24', add_broadcast=False) add.assert_called_once_with( 4, '192.168.45.100', 24, self.parent.name, self.addr_cmd._parent.namespace, 'global', None) @mock.patch.object(priv_lib, 'delete_ip_address') def test_del_address(self, delete): self.addr_cmd.delete('192.168.45.100/24') delete.assert_called_once_with( 4, '192.168.45.100', 24, self.parent.name, self.addr_cmd._parent.namespace) @mock.patch.object(priv_lib, 'flush_ip_addresses') def test_flush(self, flush): self.addr_cmd.flush(6) flush.assert_called_once_with( 6, self.parent.name, self.addr_cmd._parent.namespace) def test_wait_until_address_ready(self): self.addr_cmd.list = mock.Mock(return_value=[{'tentative': False}]) # this address is not tentative or failed so it should return self.assertIsNone(self.addr_cmd.wait_until_address_ready( '2001:470:9:1224:fd91:272:581e:3a32')) def test_wait_until_address_ready_non_existent_address(self): self.addr_cmd.list = mock.Mock(return_value=[]) with testtools.ExpectedException(ip_lib.AddressNotReady): self.addr_cmd.wait_until_address_ready('abcd::1234') def test_wait_until_address_ready_timeout(self): tentative_address = 'fe80::3023:39ff:febc:22ae' self.addr_cmd.list = mock.Mock(return_value=[ dict(scope='link', dadfailed=False, tentative=True, dynamic=False, cidr=tentative_address + '/64')]) with testtools.ExpectedException(ip_lib.AddressNotReady): self.addr_cmd.wait_until_address_ready(tentative_address, wait_time=1) @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' self.addr_cmd.list() mock_get_dev_ip.assert_called_once_with('test_ns', name=self.addr_cmd.name) @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list_scope(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' self.addr_cmd.list(scope='link') mock_get_dev_ip.assert_called_once_with('test_ns', name=self.addr_cmd.name, scope=253) @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list_to(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' cidrs = [{'cidr': '1.2.3.4', 'mask': None}, {'cidr': '1.2.3.4/24', 'mask': 24}, {'cidr': '2001:db8::1', 'mask': None}, {'cidr': '2001:db8::1/64', 'mask': 64}] for cidr in cidrs: self.addr_cmd.list(to=cidr['cidr']) args = {'name': self.addr_cmd.name, 'address': common_utils.cidr_to_ip(cidr['cidr'])} if cidr['mask']: args['mask'] = cidr['mask'] mock_get_dev_ip.assert_called_once_with('test_ns', **args) mock_get_dev_ip.reset_mock() @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list_ip_version(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' ip_versions = [ {'ip_version': constants.IP_VERSION_4, 'family': socket.AF_INET}, {'ip_version': constants.IP_VERSION_6, 'family': socket.AF_INET6}] for ip_version in ip_versions: self.addr_cmd.list(ip_version=ip_version['ip_version']) mock_get_dev_ip.assert_called_once_with( 'test_ns', name=self.addr_cmd.name, family=ip_version['family']) mock_get_dev_ip.reset_mock() @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list_filters_dynamic_permanent(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' mock_get_dev_ip.return_value = [{'dynamic': True}] retval = self.addr_cmd.list(filters=['dynamic']) self.assertEqual(1, len(retval)) retval = self.addr_cmd.list(filters=['permanent']) self.assertEqual(0, len(retval)) @mock.patch.object(ip_lib, 'get_devices_with_ip') def test_list_filters_tentative_dadfailed(self, mock_get_dev_ip): self.addr_cmd._parent.namespace = 'test_ns' mock_get_dev_ip.return_value = [{'tentative': True, 'dadfailed': False}] retval = self.addr_cmd.list(filters=['tentative']) self.assertEqual(1, len(retval)) retval = self.addr_cmd.list(filters=['tentative', 'dadfailed']) self.assertEqual(0, len(retval)) class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) @mock.patch.object(priv_lib, 'create_netns') def test_add_namespace(self, create): with mock.patch('neutron.agent.common.utils.execute') as execute: ns = self.netns_cmd.add('ns') create.assert_called_once_with('ns') self.assertEqual(ns.namespace, 'ns') execute.assert_called_once_with( ['ip', 'netns', 'exec', 'ns', 'sysctl', '-w', 'net.ipv4.conf.all.promote_secondaries=1'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) @mock.patch.object(priv_lib, 'remove_netns') def test_delete_namespace(self, remove): self.netns_cmd.delete('ns') remove.assert_called_once_with('ns') @mock.patch.object(pyroute2.netns, 'listnetns') @mock.patch.object(priv_lib, 'list_netns') def test_namespace_exists_use_helper(self, priv_listnetns, listnetns): self.config(group='AGENT', use_helper_for_ns_read=True) priv_listnetns.return_value = NETNS_SAMPLE # need another instance to avoid mocking netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) self.assertTrue( netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) self.assertEqual(1, priv_listnetns.call_count) self.assertFalse(listnetns.called) @mock.patch.object(pyroute2.netns, 'listnetns') @mock.patch.object(priv_lib, 'list_netns') def test_namespace_does_not_exist_no_helper(self, priv_listnetns, listnetns): self.config(group='AGENT', use_helper_for_ns_read=False) listnetns.return_value = NETNS_SAMPLE # need another instance to avoid mocking netns_cmd = ip_lib.IpNetnsCommand(ip_lib.SubProcessBase()) self.assertFalse( netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) self.assertEqual(1, listnetns.call_count) self.assertFalse(priv_listnetns.called) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('neutron.agent.common.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('neutron.agent.common.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( ['ip', 'netns', 'exec', 'ns', 'env'] + ['%s=%s' % (k, v) for k, v in env.items()] + ['ip', 'link', 'list'], run_as_root=True, check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True) def test_execute_nosudo_with_no_namespace(self): with mock.patch('neutron.agent.common.utils.execute') as execute: self.parent.namespace = None self.netns_cmd.execute(['test']) execute.assert_called_once_with(['test'], check_exit_code=True, extra_ok_codes=None, run_as_root=False, log_fail_as_error=True) class TestDeviceExists(base.BaseTestCase): def test_ensure_device_is_ready(self): ip_lib_mock = mock.Mock() with mock.patch.object(ip_lib, 'IPDevice', return_value=ip_lib_mock): self.assertTrue(ip_lib.ensure_device_is_ready("eth0")) self.assertTrue(ip_lib_mock.link.set_up.called) ip_lib_mock.reset_mock() # device doesn't exists ip_lib_mock.link.set_up.side_effect = RuntimeError self.assertFalse(ip_lib.ensure_device_is_ready("eth0")) def test_ensure_device_is_ready_no_link_address(self): with mock.patch.object( priv_lib, 'get_link_attributes') as get_link_attributes, \ mock.patch.object(priv_lib, 'set_link_attribute') as \ set_link_attribute, \ mock.patch.object(priv_lib, 'interface_exists', return_value=True): get_link_attributes.return_value = {} self.assertFalse(ip_lib.ensure_device_is_ready("lo")) get_link_attributes.assert_called_once_with("lo", None) set_link_attribute.assert_not_called() def test_ensure_device_is_ready_no_device(self): with mock.patch.object(priv_lib, 'interface_exists', return_value=False): self.assertFalse(ip_lib.ensure_device_is_ready("lo")) class TestGetRoutingTable(base.BaseTestCase): ip_db_interfaces = { 1: { 'family': 0, 'txqlen': 0, 'ipdb_scope': 'system', 'index': 1, 'operstate': 'DOWN', 'num_tx_queues': 1, 'group': 0, 'carrier_changes': 0, 'ipaddr': [], 'neighbours': [], 'ifname': 'lo', 'promiscuity': 0, 'linkmode': 0, 'broadcast': '00:00:00:00:00:00', 'address': '00:00:00:00:00:00', 'vlans': [], 'ipdb_priority': 0, 'qdisc': 'noop', 'mtu': 65536, 'num_rx_queues': 1, 'carrier': 1, 'flags': 8, 'ifi_type': 772, 'ports': [] }, 2: { 'family': 0, 'txqlen': 500, 'ipdb_scope': 'system', 'index': 2, 'operstate': 'DOWN', 'num_tx_queues': 1, 'group': 0, 'carrier_changes': 1, 'ipaddr': ['1111:1111:1111:1111::3/64', '10.0.0.3/24'], 'neighbours': [], 'ifname': 'tap-1', 'promiscuity': 0, 'linkmode': 0, 'broadcast': 'ff:ff:ff:ff:ff:ff', 'address': 'b6:d5:f6:a8:2e:62', 'vlans': [], 'ipdb_priority': 0, 'kind': 'tun', 'qdisc': 'fq_codel', 'mtu': 1500, 'num_rx_queues': 1, 'carrier': 0, 'flags': 4099, 'ifi_type': 1, 'ports': [] }, 'tap-1': { 'family': 0, 'txqlen': 500, 'ipdb_scope': 'system', 'index': 2, 'operstate': 'DOWN', 'num_tx_queues': 1, 'group': 0, 'carrier_changes': 1, 'ipaddr': ['1111:1111:1111:1111::3/64', '10.0.0.3/24'], 'neighbours': [], 'ifname': 'tap-1', 'promiscuity': 0, 'linkmode': 0, 'broadcast': 'ff:ff:ff:ff:ff:ff', 'address': 'b6:d5:f6:a8:2e:62', 'vlans': [], 'ipdb_priority': 0, 'kind': 'tun', 'qdisc': 'fq_codel', 'mtu': 1500, 'num_rx_queues': 1, 'carrier': 0, 'flags': 4099, 'ifi_type': 1, 'ports': [] }, 'lo': { 'family': 0, 'txqlen': 0, 'ipdb_scope': 'system', 'index': 1, 'operstate': 'DOWN', 'num_tx_queues': 1, 'group': 0, 'carrier_changes': 0, 'ipaddr': [], 'neighbours': [], 'ifname': 'lo', 'promiscuity': 0, 'linkmode': 0, 'broadcast': '00:00:00:00:00:00', 'address': '00:00:00:00:00:00', 'vlans': [], 'ipdb_priority': 0, 'qdisc': 'noop', 'mtu': 65536, 'num_rx_queues': 1, 'carrier': 1, 'flags': 8, 'ifi_type': 772, 'ports': [] } } ip_db_routes = [ { 'oif': 2, 'dst_len': 24, 'family': 2, 'proto': 3, 'tos': 0, 'dst': '10.0.1.0/24', 'flags': 16, 'ipdb_priority': 0, 'metrics': {}, 'scope': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': [], 'type': 1, 'gateway': '10.0.0.1', 'ipdb_scope': 'system' }, { 'oif': 2, 'type': 1, 'dst_len': 24, 'family': 2, 'proto': 2, 'tos': 0, 'dst': '10.0.0.0/24', 'ipdb_priority': 0, 'metrics': {}, 'flags': 16, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': [], 'prefsrc': '10.0.0.3', 'scope': 253, 'ipdb_scope': 'system' }, { 'oif': 2, 'dst_len': 0, 'family': 2, 'proto': 3, 'tos': 0, 'dst': 'default', 'flags': 16, 'ipdb_priority': 0, 'metrics': {}, 'scope': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': [], 'type': 1, 'gateway': '10.0.0.2', 'ipdb_scope': 'system' }, { 'metrics': {}, 'oif': 2, 'dst_len': 64, 'family': socket.AF_INET6, 'proto': 2, 'tos': 0, 'dst': '1111:1111:1111:1111::/64', 'pref': '00', 'ipdb_priority': 0, 'priority': 256, 'flags': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': [], 'type': 1, 'scope': 0, 'ipdb_scope': 'system' }, { 'metrics': {}, 'oif': 2, 'dst_len': 64, 'family': socket.AF_INET6, 'proto': 3, 'tos': 0, 'dst': '1111:1111:1111:1112::/64', 'pref': '00', 'flags': 0, 'ipdb_priority': 0, 'priority': 1024, 'scope': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': [], 'type': 1, 'gateway': '1111:1111:1111:1111::1', 'ipdb_scope': 'system' } ] ip_db_multipath_routes = [ { 'dst_len': 24, 'family': socket.AF_INET, 'proto': 3, 'tos': 0, 'dst': '10.0.1.0/24', 'flags': 16, 'ipdb_priority': 0, 'metrics': {}, 'scope': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': ({'oif': 1, 'family': socket.AF_INET}, {'oif': 2, 'dst_len': 24, 'family': socket.AF_INET, 'proto': 2, 'tos': 0, 'pref': '00', 'priority': 256, 'flags': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'type': 1, 'scope': 0}), 'type': 1, 'gateway': '10.0.0.1', 'ipdb_scope': 'system' }, { 'metrics': {}, 'dst_len': 64, 'family': socket.AF_INET6, 'proto': 2, 'tos': 0, 'dst': '1111:1111:1111:1111::/64', 'pref': '00', 'ipdb_priority': 0, 'priority': 256, 'flags': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'multipath': ({'oif': 1, 'family': socket.AF_INET6}, {'oif': 2, 'dst_len': 64, 'family': socket.AF_INET6, 'proto': 2, 'tos': 0, 'pref': '00', 'priority': 256, 'flags': 0, 'encap': {}, 'src_len': 0, 'table': 254, 'type': 1, 'scope': 0}), 'type': 1, 'scope': 0, 'ipdb_scope': 'system' } ] def setUp(self): super(TestGetRoutingTable, self).setUp() self.addCleanup(privileged.default.set_client_mode, True) privileged.default.set_client_mode(False) @mock.patch.object(pyroute2, 'IPDB') @mock.patch.object(pyroute2, 'NetNS') def test_get_routing_table_nonexistent_namespace(self, mock_netns, mock_ip_db): mock_netns.side_effect = OSError(errno.ENOENT, None) with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound): ip_lib.get_routing_table(4, 'ns') @mock.patch.object(pyroute2, 'IPDB') @mock.patch.object(pyroute2, 'NetNS') def test_get_routing_table_other_error(self, mock_netns, mock_ip_db): expected_exception = OSError(errno.EACCES, None) mock_netns.side_effect = expected_exception with testtools.ExpectedException(expected_exception.__class__): ip_lib.get_routing_table(4, 'ns') @mock.patch.object(pyroute2, 'IPDB') @mock.patch.object(pyroute2, 'NetNS') def _test_get_routing_table(self, version, ip_db_routes, expected, mock_netns, mock_ip_db): mock_ip_db_instance = mock_ip_db.return_value mock_ip_db_enter = mock_ip_db_instance.__enter__.return_value mock_ip_db_enter.interfaces = self.ip_db_interfaces mock_ip_db_enter.routes = ip_db_routes self.assertEqual(expected, ip_lib.get_routing_table(version)) def test_get_routing_table_4(self): expected = [{'destination': '10.0.1.0/24', 'nexthop': '10.0.0.1', 'device': 'tap-1', 'scope': 'universe'}, {'destination': '10.0.0.0/24', 'nexthop': None, 'device': 'tap-1', 'scope': 'link'}, {'destination': 'default', 'nexthop': '10.0.0.2', 'device': 'tap-1', 'scope': 'universe'}] self._test_get_routing_table(4, self.ip_db_routes, expected) def test_get_routing_table_6(self): expected = [{'destination': '1111:1111:1111:1111::/64', 'nexthop': None, 'device': 'tap-1', 'scope': 'universe'}, {'destination': '1111:1111:1111:1112::/64', 'nexthop': '1111:1111:1111:1111::1', 'device': 'tap-1', 'scope': 'universe'}] self._test_get_routing_table(6, self.ip_db_routes, expected) def test_get_routing_table_multipath_4(self): expected = [{'destination': '10.0.1.0/24', 'nexthop': '10.0.0.1', 'device': 'lo', 'scope': 'universe'}, {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.1', 'device': 'tap-1', 'scope': 'universe'}] self._test_get_routing_table(4, self.ip_db_multipath_routes, expected) def test_get_routing_table_multipath_6(self): expected = [{'destination': '1111:1111:1111:1111::/64', 'nexthop': None, 'device': 'lo', 'scope': 'universe'}, {'destination': '1111:1111:1111:1111::/64', 'nexthop': None, 'device': 'tap-1', 'scope': 'universe'}] self._test_get_routing_table(6, self.ip_db_multipath_routes, expected) class TestIpNeighCommand(TestIPCmdBase): def setUp(self): super(TestIpNeighCommand, self).setUp() self.parent.name = 'tap0' self.command = 'neigh' self.neigh_cmd = ip_lib.IpNeighCommand(self.parent) self.addCleanup(privileged.default.set_client_mode, True) privileged.default.set_client_mode(False) @mock.patch.object(pyroute2, 'NetNS') def test_add_entry(self, mock_netns): mock_netns_instance = mock_netns.return_value mock_netns_enter = mock_netns_instance.__enter__.return_value mock_netns_enter.link_lookup.return_value = [1] self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd') mock_netns_enter.link_lookup.assert_called_once_with(ifname='tap0') mock_netns_enter.neigh.assert_called_once_with( 'replace', dst='192.168.45.100', lladdr='cc:dd:ee:ff:ab:cd', family=2, ifindex=1, state=ndmsg.states['permanent']) @mock.patch.object(pyroute2, 'NetNS') def test_add_entry_nonexistent_namespace(self, mock_netns): mock_netns.side_effect = OSError(errno.ENOENT, None) with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound): self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd') @mock.patch.object(pyroute2, 'NetNS') def test_add_entry_other_error(self, mock_netns): expected_exception = OSError(errno.EACCES, None) mock_netns.side_effect = expected_exception with testtools.ExpectedException(expected_exception.__class__): self.neigh_cmd.add('192.168.45.100', 'cc:dd:ee:ff:ab:cd') @mock.patch.object(pyroute2, 'NetNS') def test_delete_entry(self, mock_netns): mock_netns_instance = mock_netns.return_value mock_netns_enter = mock_netns_instance.__enter__.return_value mock_netns_enter.link_lookup.return_value = [1] self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd') mock_netns_enter.link_lookup.assert_called_once_with(ifname='tap0') mock_netns_enter.neigh.assert_called_once_with( 'delete', dst='192.168.45.100', lladdr='cc:dd:ee:ff:ab:cd', family=2, ifindex=1) @mock.patch.object(priv_lib, '_run_iproute_neigh') def test_delete_entry_not_exist(self, mock_run_iproute): # trying to delete a non-existent entry shouldn't raise an error mock_run_iproute.side_effect = NetlinkError(errno.ENOENT, None) self.neigh_cmd.delete('192.168.45.100', 'cc:dd:ee:ff:ab:cd') @mock.patch.object(pyroute2, 'NetNS') def test_dump_entries(self, mock_netns): mock_netns_instance = mock_netns.return_value mock_netns_enter = mock_netns_instance.__enter__.return_value mock_netns_enter.link_lookup.return_value = [1] self.neigh_cmd.dump(4) mock_netns_enter.link_lookup.assert_called_once_with(ifname='tap0') mock_netns_enter.neigh.assert_called_once_with( 'dump', family=2, ifindex=1) def test_flush(self): self.neigh_cmd.flush(4, '192.168.0.1') self._assert_sudo([4], ('flush', 'to', '192.168.0.1')) class TestArpPing(TestIPCmdBase): @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch('eventlet.spawn_n') def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper): spawn_n.side_effect = lambda f: f() ARPING_COUNT = 3 address = '20.0.0.1' ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, mock.sentinel.iface_name, address, ARPING_COUNT) self.assertTrue(spawn_n.called) mIPWrapper.assert_has_calls([ mock.call(namespace=mock.sentinel.ns_name), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1]), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1]), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1, 2]), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1, 2]), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1, 2]), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1, 2])]) ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name) # Just test that arping is called with the right arguments for arg in ('-A', '-U'): arping_cmd = ['arping', arg, '-I', mock.sentinel.iface_name, '-c', 1, '-w', mock.ANY, address] ip_wrapper.netns.execute.assert_any_call(arping_cmd, extra_ok_codes=mock.ANY) @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch('eventlet.spawn_n') def test_send_ipv4_addr_adv_notif_nodev(self, spawn_n, mIPWrapper): spawn_n.side_effect = lambda f: f() ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name) ip_wrapper.netns.execute.side_effect = RuntimeError ARPING_COUNT = 3 address = '20.0.0.1' with mock.patch.object(ip_lib, 'device_exists_with_ips_and_mac', return_value=False): ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, mock.sentinel.iface_name, address, ARPING_COUNT) # should return early with a single call when ENODEV mIPWrapper.assert_has_calls([ mock.call(namespace=mock.sentinel.ns_name), mock.call().netns.execute(mock.ANY, extra_ok_codes=[1]) ] * 1) @mock.patch('eventlet.spawn_n') def test_no_ipv6_addr_notif(self, spawn_n): ipv6_addr = 'fd00::1' ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, mock.sentinel.iface_name, ipv6_addr, 3) self.assertFalse(spawn_n.called) class TestAddNamespaceToCmd(base.BaseTestCase): def test_add_namespace_to_cmd_with_namespace(self): cmd = ['ping', '8.8.8.8'] self.assertEqual(['ip', 'netns', 'exec', 'tmp'] + cmd, ip_lib.add_namespace_to_cmd(cmd, 'tmp')) def test_add_namespace_to_cmd_without_namespace(self): cmd = ['ping', '8.8.8.8'] self.assertEqual(cmd, ip_lib.add_namespace_to_cmd(cmd, None)) class TestSetIpNonlocalBindForHaNamespace(base.BaseTestCase): def test_setting_failure(self): """Make sure message is formatted correctly.""" with mock.patch.object(ip_lib, 'set_ip_nonlocal_bind', return_value=1): ip_lib.set_ip_nonlocal_bind_for_namespace('foo', value=1) class TestSysctl(base.BaseTestCase): def setUp(self): super(TestSysctl, self).setUp() self.execute_p = mock.patch.object(ip_lib.IpNetnsCommand, 'execute') self.execute = self.execute_p.start() def test_disable_ipv6_when_ipv6_globally_enabled(self): dev = ip_lib.IPDevice('tap0', 'ns1') with mock.patch.object(netutils, 'is_ipv6_enabled', return_value=True): dev.disable_ipv6() self.execute.assert_called_once_with( ['sysctl', '-w', 'net.ipv6.conf.tap0.disable_ipv6=1'], log_fail_as_error=True, run_as_root=True) def test_disable_ipv6_when_ipv6_globally_disabled(self): dev = ip_lib.IPDevice('tap0', 'ns1') with mock.patch.object(netutils, 'is_ipv6_enabled', return_value=False): dev.disable_ipv6() self.assertFalse(self.execute.called) class TestConntrack(base.BaseTestCase): def setUp(self): super(TestConntrack, self).setUp() self.execute_p = mock.patch.object(ip_lib.IpNetnsCommand, 'execute') self.execute = self.execute_p.start() def test_delete_socket_conntrack_state(self): device = ip_lib.IPDevice('tap0', 'ns1') ip_str = '1.1.1.1' dport = '3378' protocol = 'tcp' expect_cmd = ["conntrack", "-D", "-d", ip_str, '-p', protocol, '--dport', dport] device.delete_socket_conntrack_state(ip_str, dport, protocol) self.execute.assert_called_once_with(expect_cmd, check_exit_code=True, extra_ok_codes=[1]) class ParseIpRuleTestCase(base.BaseTestCase): BASE_RULE = { 'family': 2, 'dst_len': 0, 'res2': 0, 'tos': 0, 'res1': 0, 'flags': 0, 'header': { 'pid': 18152, 'length': 44, 'flags': 2, 'error': None, 'type': 32, 'sequence_number': 281}, 'attrs': {'FRA_TABLE': 255, 'FRA_SUPPRESS_PREFIXLEN': 4294967295}, 'table': 255, 'action': 1, 'src_len': 0, 'event': 'RTM_NEWRULE'} def setUp(self): super(ParseIpRuleTestCase, self).setUp() self.rule = copy.deepcopy(self.BASE_RULE) def test_parse_priority(self): self.rule['attrs']['FRA_PRIORITY'] = 1000 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('1000', parsed_rule['priority']) def test_parse_from_ipv4(self): self.rule['attrs']['FRA_SRC'] = '192.168.0.1' self.rule['src_len'] = 24 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('192.168.0.1/24', parsed_rule['from']) def test_parse_from_ipv6(self): self.rule['attrs']['FRA_SRC'] = '2001:db8::1' self.rule['src_len'] = 64 parsed_rule = ip_lib._parse_ip_rule(self.rule, 6) self.assertEqual('2001:db8::1/64', parsed_rule['from']) def test_parse_from_any_ipv4(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('0.0.0.0/0', parsed_rule['from']) def test_parse_from_any_ipv6(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 6) self.assertEqual('::/0', parsed_rule['from']) def test_parse_to_ipv4(self): self.rule['attrs']['FRA_DST'] = '192.168.10.1' self.rule['dst_len'] = 24 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('192.168.10.1/24', parsed_rule['to']) def test_parse_to_ipv6(self): self.rule['attrs']['FRA_DST'] = '2001:db8::1' self.rule['dst_len'] = 64 parsed_rule = ip_lib._parse_ip_rule(self.rule, 6) self.assertEqual('2001:db8::1/64', parsed_rule['to']) def test_parse_to_none(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertIsNone(parsed_rule.get('to')) def test_parse_table(self): self.rule['attrs']['FRA_TABLE'] = 255 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('local', parsed_rule['table']) self.rule['attrs']['FRA_TABLE'] = 254 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('main', parsed_rule['table']) self.rule['attrs']['FRA_TABLE'] = 253 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('default', parsed_rule['table']) self.rule['attrs']['FRA_TABLE'] = 1000 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('1000', parsed_rule['table']) def test_parse_fwmark(self): self.rule['attrs']['FRA_FWMARK'] = 1000 self.rule['attrs']['FRA_FWMASK'] = 10 parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('0x3e8/0xa', parsed_rule['fwmark']) def test_parse_fwmark_none(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertIsNone(parsed_rule.get('fwmark')) def test_parse_iif(self): self.rule['attrs']['FRA_IIFNAME'] = 'input_interface_name' parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('input_interface_name', parsed_rule['iif']) def test_parse_iif_none(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertIsNone(parsed_rule.get('iif')) def test_parse_oif(self): self.rule['attrs']['FRA_OIFNAME'] = 'output_interface_name' parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertEqual('output_interface_name', parsed_rule['oif']) def test_parse_oif_none(self): parsed_rule = ip_lib._parse_ip_rule(self.rule, 4) self.assertIsNone(parsed_rule.get('oif')) class ListIpRulesTestCase(base.BaseTestCase): def test_list_ip_rules(self): rule1 = {'family': 2, 'src_len': 24, 'action': 1, 'attrs': {'FRA_SRC': '10.0.0.1', 'FRA_TABLE': 100}} rule2 = {'family': 2, 'src_len': 0, 'action': 6, 'attrs': {'FRA_TABLE': 255}} rules = [rule1, rule2] with mock.patch.object(priv_lib, 'list_ip_rules') as mock_list_rules: mock_list_rules.return_value = rules retval = ip_lib.list_ip_rules(mock.ANY, 4) reference = [ {'type': 'unicast', 'from': '10.0.0.1/24', 'priority': '0', 'table': '100'}, {'type': 'blackhole', 'from': '0.0.0.0/0', 'priority': '0', 'table': 'local'}] self.assertEqual(reference, retval) class ParseLinkDeviceTestCase(base.BaseTestCase): def setUp(self): super(ParseLinkDeviceTestCase, self).setUp() self._mock_get_ip_addresses = mock.patch.object(priv_lib, 'get_ip_addresses') self.mock_get_ip_addresses = self._mock_get_ip_addresses.start() self.addCleanup(self._stop_mock) def _stop_mock(self): self._mock_get_ip_addresses.stop() def test_parse_link_devices(self): device = ({'index': 1, 'attrs': [['IFLA_IFNAME', 'int_name']]}) self.mock_get_ip_addresses.return_value = [ {'prefixlen': 24, 'scope': 200, 'event': 'RTM_NEWADDR', 'attrs': [ ['IFA_ADDRESS', '192.168.10.20'], ['IFA_FLAGS', ifaddrmsg.IFA_F_PERMANENT]]}, {'prefixlen': 64, 'scope': 200, 'event': 'RTM_DELADDR', 'attrs': [ ['IFA_ADDRESS', '2001:db8::1'], ['IFA_FLAGS', ifaddrmsg.IFA_F_PERMANENT]]}] retval = ip_lib._parse_link_device('namespace', device) expected = [{'scope': 'site', 'cidr': '192.168.10.20/24', 'dynamic': False, 'dadfailed': False, 'name': 'int_name', 'broadcast': None, 'tentative': False, 'event': 'added'}, {'scope': 'site', 'cidr': '2001:db8::1/64', 'dynamic': False, 'dadfailed': False, 'name': 'int_name', 'broadcast': None, 'tentative': False, 'event': 'removed'}] self.assertEqual(expected, retval) class GetDevicesInfoTestCase(base.BaseTestCase): DEVICE_LO = { 'index': 2, 'attrs': (('IFLA_IFNAME', 'lo'), ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 1000), ('IFLA_PROMISCUITY', 0), ('IFLA_ADDRESS', '5a:76:ed:cc:ce:90'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:f0'), ) } DEVICE_DUMMY = { 'index': 2, 'attrs': (('IFLA_IFNAME', 'int_01'), ('IFLA_OPERSTATE', 'DOWN'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 1500), ('IFLA_PROMISCUITY', 0), ('IFLA_ADDRESS', '5a:76:ed:cc:ce:90'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:f0'), ('IFLA_LINKINFO', { 'attrs': (('IFLA_INFO_KIND', 'dummy'), )})) } DEVICE_VLAN = { 'index': 5, 'attrs': (('IFLA_IFNAME', 'int_02'), ('IFLA_OPERSTATE', 'DOWN'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 1400), ('IFLA_PROMISCUITY', 0), ('IFLA_ADDRESS', '5a:76:ed:cc:ce:91'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:f1'), ('IFLA_LINK', 2), ('IFLA_LINKINFO', {'attrs': ( ('IFLA_INFO_KIND', 'vlan'), ('IFLA_INFO_DATA', {'attrs': (('IFLA_VLAN_ID', 1000), )}) )})) } DEVICE_VXLAN = { 'index': 9, 'attrs': (('IFLA_IFNAME', 'int_03'), ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 1300), ('IFLA_PROMISCUITY', 0), ('IFLA_ADDRESS', '5a:76:ed:cc:ce:92'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:f2'), ('IFLA_LINKINFO', {'attrs': ( ('IFLA_INFO_KIND', 'vxlan'), ('IFLA_INFO_DATA', {'attrs': ( ('IFLA_VXLAN_ID', 1001), ('IFLA_VXLAN_GROUP', '239.1.1.1'), ('IFLA_VXLAN_LINK', 2))}) )})) } DEVICE_VETH = { 'index': 11, 'attrs': (('IFLA_IFNAME', 'int_04'), ('IFLA_OPERSTATE', 'UP'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 900), ('IFLA_PROMISCUITY', 0), ('IFLA_ADDRESS', '5a:76:ed:cc:ce:93'), ('IFLA_BROADCAST', 'ff:ff:ff:ff:ff:f3'), ('IFLA_LINK', 30), ('IFLA_LINKINFO', { 'attrs': (('IFLA_INFO_KIND', 'veth'), )})) } def setUp(self): super(GetDevicesInfoTestCase, self).setUp() self.mock_getdevs = mock.patch.object(priv_lib, 'get_link_devices').start() def test_get_devices_info_lo(self): self.mock_getdevs.return_value = (self.DEVICE_LO, ) ret = ip_lib.get_devices_info('namespace') expected = {'index': 2, 'name': 'lo', 'operstate': 'UP', 'linkmode': 0, 'mtu': 1000, 'promiscuity': 0, 'mac': '5a:76:ed:cc:ce:90', 'broadcast': 'ff:ff:ff:ff:ff:f0'} self.assertEqual(expected, ret[0]) def test_get_devices_info_dummy(self): self.mock_getdevs.return_value = (self.DEVICE_DUMMY, ) ret = ip_lib.get_devices_info('namespace') expected = {'index': 2, 'name': 'int_01', 'operstate': 'DOWN', 'linkmode': 0, 'mtu': 1500, 'promiscuity': 0, 'mac': '5a:76:ed:cc:ce:90', 'broadcast': 'ff:ff:ff:ff:ff:f0', 'kind': 'dummy'} self.assertEqual(expected, ret[0]) def test_get_devices_info_vlan(self): self.mock_getdevs.return_value = (self.DEVICE_VLAN, self.DEVICE_DUMMY) ret = ip_lib.get_devices_info('namespace') expected = {'index': 5, 'name': 'int_02', 'operstate': 'DOWN', 'linkmode': 0, 'mtu': 1400, 'promiscuity': 0, 'mac': '5a:76:ed:cc:ce:91', 'broadcast': 'ff:ff:ff:ff:ff:f1', 'kind': 'vlan', 'vlan_id': 1000, 'parent_index': 2, 'parent_name': 'int_01'} for device in (device for device in ret if device['kind'] == 'vlan'): self.assertEqual(expected, device) break else: self.fail('No VLAN device found') def test_get_devices_info_vxlan(self): self.mock_getdevs.return_value = (self.DEVICE_VXLAN, self.DEVICE_DUMMY) ret = ip_lib.get_devices_info('namespace') expected = {'index': 9, 'name': 'int_03', 'operstate': 'UP', 'linkmode': 0, 'mtu': 1300, 'promiscuity': 0, 'mac': '5a:76:ed:cc:ce:92', 'broadcast': 'ff:ff:ff:ff:ff:f2', 'kind': 'vxlan', 'vxlan_id': 1001, 'vxlan_group': '239.1.1.1', 'vxlan_link_index': 2, 'vxlan_link_name': 'int_01'} for device in (device for device in ret if device['kind'] == 'vxlan'): self.assertEqual(expected, device) break else: self.fail('No VXLAN device found') def test_get_devices_info_veth(self): self.mock_getdevs.return_value = (self.DEVICE_VETH, self.DEVICE_DUMMY) ret = ip_lib.get_devices_info('namespace') expected = {'index': 11, 'name': 'int_04', 'operstate': 'UP', 'linkmode': 0, 'mtu': 900, 'promiscuity': 0, 'mac': '5a:76:ed:cc:ce:93', 'broadcast': 'ff:ff:ff:ff:ff:f3', 'kind': 'veth', 'parent_index': 30} for device in (device for device in ret if device['kind'] == 'veth'): self.assertEqual(expected, device) break else: self.fail('No VETH device found') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_ip_link_support.py0000644000175000017500000001630400000000000030051 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_link_support as ip_link from neutron.tests import base class TestIpLinkSupport(base.BaseTestCase): IP_LINK_HELP = """Usage: ip link add [link DEV] [ name ] NAME [ txqueuelen PACKETS ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [index IDX ] [ numtxqueues QUEUE_COUNT ] [ numrxqueues QUEUE_COUNT ] type TYPE [ ARGS ] ip link delete DEV type TYPE [ ARGS ] ip link set { dev DEVICE | group DEVGROUP } [ { up | down } ] [ arp { on | off } ] [ dynamic { on | off } ] [ multicast { on | off } ] [ allmulticast { on | off } ] [ promisc { on | off } ] [ trailers { on | off } ] [ txqueuelen PACKETS ] [ name NEWNAME ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [ netns PID ] [ netns NAME ] [ alias NAME ] [ vf NUM [ mac LLADDR ] [ vlan VLANID [ qos VLAN-QOS ] ] [ rate TXRATE ] ] [ spoofchk { on | off} ] ] [ state { auto | enable | disable} ] ] [ master DEVICE ] [ nomaster ] ip link show [ DEVICE | group GROUP ] [up] TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap | can | bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | gre | gretap | ip6gre | ip6gretap | vti } """ IP_LINK_HELP_NO_STATE = """Usage: ip link add link DEV [ name ] NAME [ txqueuelen PACKETS ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] type TYPE [ ARGS ] ip link delete DEV type TYPE [ ARGS ] ip link set DEVICE [ { up | down } ] [ arp { on | off } ] [ dynamic { on | off } ] [ multicast { on | off } ] [ allmulticast { on | off } ] [ promisc { on | off } ] [ trailers { on | off } ] [ txqueuelen PACKETS ] [ name NEWNAME ] [ address LLADDR ] [ broadcast LLADDR ] [ mtu MTU ] [ netns PID ] [ alias NAME ] [ vf NUM [ mac LLADDR ] [ vlan VLANID [ qos VLAN-QOS ] ] [ rate TXRATE ] ] ip link show [ DEVICE ] TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | can } """ IP_LINK_HELP_NO_SPOOFCHK = IP_LINK_HELP_NO_STATE IP_LINK_HELP_NO_VF = """Usage: ip link set DEVICE { up | down | arp { on | off } | dynamic { on | off } | multicast { on | off } | allmulticast { on | off } | promisc { on | off } | trailers { on | off } | txqueuelen PACKETS | name NEWNAME | address LLADDR | broadcast LLADDR | mtu MTU } ip link show [ DEVICE ] """ def _test_capability(self, capability, subcapability=None, expected=True, stdout="", stderr=""): with mock.patch("neutron.agent.linux.utils.execute") as mock_exec: mock_exec.return_value = (stdout, stderr) vf_section = ip_link.IpLinkSupport.get_vf_mgmt_section() capable = ip_link.IpLinkSupport.vf_mgmt_capability_supported( vf_section, capability, subcapability) self.assertEqual(expected, capable) mock_exec.assert_called_once_with(['ip', 'link', 'help'], check_exit_code=False, return_stderr=True, log_fail_as_error=False) def test_vf_mgmt(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, stderr=self.IP_LINK_HELP) def test_execute_with_stdout(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, stdout=self.IP_LINK_HELP) def test_vf_mgmt_no_state(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, expected=False, stderr=self.IP_LINK_HELP_NO_STATE) def test_vf_mgmt_no_spoofchk(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK, expected=False, stderr=self.IP_LINK_HELP_NO_SPOOFCHK) def test_vf_mgmt_no_vf(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, expected=False, stderr=self.IP_LINK_HELP_NO_VF) def test_vf_mgmt_unknown_capability(self): self._test_capability( "state1", expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN, ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability_mismatch(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE, ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS, expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_sub_capability_invalid(self): self._test_capability( ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN, "qos1", expected=False, stderr=self.IP_LINK_HELP) def test_vf_mgmt_error(self): with mock.patch("neutron.agent.linux.utils.execute") as mock_exec: mock_exec.side_effect = Exception() self.assertRaises( ip_link.UnsupportedIpLinkCommand, ip_link.IpLinkSupport.get_vf_mgmt_section) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_ipset_manager.py0000644000175000017500000001447700000000000027457 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ipset_manager from neutron.tests import base TEST_SET_ID = 'fake_sgid' ETHERTYPE = 'IPv4' TEST_SET_NAME = ipset_manager.IpsetManager.get_name(TEST_SET_ID, ETHERTYPE) TEST_SET_NAME_NEW = TEST_SET_NAME + ipset_manager.SWAP_SUFFIX FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5', '10.0.0.6'] class BaseIpsetManagerTest(base.BaseTestCase): def setUp(self): super(BaseIpsetManagerTest, self).setUp() self.ipset = ipset_manager.IpsetManager() self.execute = mock.patch.object(self.ipset, "execute").start() self.expected_calls = [] self.expect_create() self.force_sorted_get_set_ips() def force_sorted_get_set_ips(self): """Force sorted responses by self.ipset._get_new/deleted_set_ips. _get_new/deleted_set_ips use internally sets and return randomly ordered responses. This method ensures sorted responses from them in order to guarantee call order in self.ipset.set_members. """ original_get_new_set_ips = self.ipset._get_new_set_ips original_get_deleted_set_ips = self.ipset._get_deleted_set_ips def sorted_get_new_set_ips(set_name, expected_ips): unsorted = original_get_new_set_ips(set_name, expected_ips) return sorted(unsorted) def sorted_get_deleted_set_ips(set_name, expected_ips): unsorted = original_get_deleted_set_ips(set_name, expected_ips) return sorted(unsorted) mock.patch.object(self.ipset, '_get_new_set_ips', side_effect=sorted_get_new_set_ips).start() mock.patch.object(self.ipset, '_get_deleted_set_ips', side_effect=sorted_get_deleted_set_ips).start() def verify_mock_calls(self): self.execute.assert_has_calls(self.expected_calls, any_order=False) def expect_set(self, addresses): temp_input = ['create %s hash:net family inet' % TEST_SET_NAME_NEW] temp_input.extend('add %s %s' % (TEST_SET_NAME_NEW, ip) for ip in self.ipset._sanitize_addresses(addresses)) input = '\n'.join(temp_input) self.expected_calls.extend([ mock.call(['ipset', 'restore', '-exist'], process_input=input, run_as_root=True, check_exit_code=True), mock.call(['ipset', 'swap', TEST_SET_NAME_NEW, TEST_SET_NAME], process_input=None, run_as_root=True, check_exit_code=True), mock.call(['ipset', 'destroy', TEST_SET_NAME_NEW], process_input=None, run_as_root=True, check_exit_code=False)]) def expect_add(self, addresses): self.expected_calls.extend( mock.call(['ipset', 'add', '-exist', TEST_SET_NAME, ip], process_input=None, run_as_root=True, check_exit_code=True) for ip in self.ipset._sanitize_addresses(addresses)) def expect_del(self, addresses): self.expected_calls.extend( mock.call(['ipset', 'del', TEST_SET_NAME, ip], process_input=None, run_as_root=True, check_exit_code=False) for ip in self.ipset._sanitize_addresses(addresses)) def expect_create(self): self.expected_calls.append( mock.call(['ipset', 'create', '-exist', TEST_SET_NAME, 'hash:net', 'family', 'inet'], process_input=None, run_as_root=True, check_exit_code=True)) def expect_destroy(self): self.expected_calls.append( mock.call(['ipset', 'destroy', TEST_SET_NAME], process_input=None, run_as_root=True, check_exit_code=False)) def add_first_ip(self): self.expect_set([FAKE_IPS[0]]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, [FAKE_IPS[0]]) def add_all_ips(self): self.expect_set(FAKE_IPS) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS) class IpsetManagerTestCase(BaseIpsetManagerTest): def test_set_name_exists(self): self.add_first_ip() self.assertTrue(self.ipset.set_name_exists('N' + ETHERTYPE + TEST_SET_ID)) def test_set_members_with_first_add_member(self): self.add_first_ip() self.verify_mock_calls() def test_set_members_adding_less_than_5(self): self.add_first_ip() self.expect_add(FAKE_IPS[1:5]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5]) self.verify_mock_calls() def test_set_members_deleting_less_than_5(self): self.add_all_ips() self.expect_del(FAKE_IPS[3:]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3]) self.verify_mock_calls() def test_set_members_adding_more_than_5(self): self.add_first_ip() self.expect_set(FAKE_IPS) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS) self.verify_mock_calls() def test_set_members_adding_all_zero_ipv4(self): self.expect_set(['0.0.0.0/0']) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['0.0.0.0/0']) self.verify_mock_calls() def test_set_members_adding_all_zero_ipv6(self): self.expect_set(['::/0']) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, ['::/0']) self.verify_mock_calls() def test_destroy(self): self.add_first_ip() self.expect_destroy() self.ipset.destroy(TEST_SET_ID, ETHERTYPE) self.verify_mock_calls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_iptables_firewall.py0000644000175000017500000032236200000000000030324 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants from neutron_lib import exceptions from oslo_config import cfg import testtools from neutron.agent import firewall from neutron.agent.linux import ip_conntrack from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_firewall from neutron.common import utils from neutron.conf.agent import common as agent_config from neutron.conf.agent import securitygroups_rpc as security_config from neutron.tests import base from neutron.tests.unit.api.v2 import test_base _uuid = test_base._uuid # TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants FAKE_PREFIX = {'IPv4': '10.0.0.0/24', 'IPv6': 'fe80::/48'} FAKE_IP = {'IPv4': '10.0.0.1', 'IPv6': 'fe80::1'} # TODO(mangelajo): replace all '*_sgid' strings for the constants FAKE_SGID = 'fake_sgid' OTHER_SGID = 'other_sgid' _IPv6 = constants.IPv6 _IPv4 = constants.IPv4 RAW_TABLE_OUTPUT = """ # Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015 *raw :PREROUTING ACCEPT [11561:3470468] :OUTPUT ACCEPT [11504:4064044] :neutron-openvswi-OUTPUT - [0:0] :neutron-openvswi-PREROUTING - [0:0] -A PREROUTING -j neutron-openvswi-PREROUTING -A OUTPUT -j neutron-openvswi-OUTPUT -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 4097 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 4097 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 4098 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 4098 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 4098 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 4098 -A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 4105 -A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 4105 COMMIT # Completed on Fri Jul 31 16:13:28 2015 """ # noqa class BaseIptablesFirewallTestCase(base.BaseTestCase): def setUp(self): super(BaseIptablesFirewallTestCase, self).setUp() mock.patch('eventlet.spawn_n').start() security_config.register_securitygroups_opts() agent_config.register_root_helper(cfg.CONF) cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') iptables_cls = self.iptables_cls_p.start() self.iptables_inst = mock.Mock() self.v4filter_inst = mock.Mock() self.v6filter_inst = mock.Mock() self.iptables_inst.ipv4 = {'filter': self.v4filter_inst, 'raw': self.v4filter_inst } self.iptables_inst.ipv6 = {'filter': self.v6filter_inst, 'raw': self.v6filter_inst } iptables_cls.return_value = self.iptables_inst self.iptables_inst.get_rules_for_table.return_value = ( RAW_TABLE_OUTPUT.splitlines()) self.firewall = iptables_firewall.IptablesFirewallDriver() self.utils_exec.reset_mock() self.firewall.iptables = self.iptables_inst # don't mess with sysctl knobs in unit tests self.firewall._enabled_netfilter_for_bridges = True # initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above. self._dev_zone_map = {'61634509-31': 4098, '8f46cf18-12': 4105, '95c24827-02': 4098, 'e804433b-61': 4097} get_rules_for_table_func = lambda x: RAW_TABLE_OUTPUT.split('\n') filtered_ports = {port_id: self._fake_port() for port_id in self._dev_zone_map} self.firewall.ipconntrack = ip_conntrack.IpConntrackManager( get_rules_for_table_func, filtered_ports=filtered_ports, unfiltered_ports=dict()) def _fake_port(self): return {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': [FAKE_IP['IPv4'], FAKE_IP['IPv6']]} class IptablesFirewallTestCase(BaseIptablesFirewallTestCase): def test_prepare_port_filter_with_no_sg(self): port = self._fake_port() self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) def test_filter_ipv4_ingress(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress'} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp'} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p tcp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_icmp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp'} ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'icmp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p icmp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_bad_vrrp_with_dport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'vrrp', 'port_range_min': 10, 'port_range_max': 10} # Dest port isn't support with VRRP, so don't send it # down to iptables. ingress = mock.call.add_rule('ifake_dev', '-p vrrp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_port_by_num(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': '6', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp'} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p udp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_dccp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'dccp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p dccp -m dccp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_sctp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'sctp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p sctp -m sctp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udplite_port(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udplite', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule( 'ifake_dev', '-p udplite -m multiport --dports 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_udplite_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'udplite', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p udplite -m multiport --dports 10:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_protocol_blank(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': ''} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_protocol_zero(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': '0'} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_protocol_encap(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': 'encap'} ingress = mock.call.add_rule('ifake_dev', '-p encap -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_protocol_encap_by_num(self): rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': '98'} ingress = mock.call.add_rule('ifake_dev', '-p encap -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_ingress_protocol_999_local(self): # There is no protocol 999, so let's return a mapping # that says there is and make sure the rule is created # using the name and not the number. rule = {'ethertype': 'IPv4', 'direction': 'ingress', 'protocol': '999'} ingress = mock.call.add_rule('ifake_dev', '-p fooproto -j RETURN', top=False, comment=None) egress = None with mock.patch.object(self.firewall, '_local_protocol_name_map') as lpnm: lpnm.return_value = {'999': 'fooproto'} self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress(self): rule = {'ethertype': 'IPv4', 'direction': 'egress'} egress = mock.call.add_rule('ofake_dev', '-j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_dest_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_source_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'source_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp'} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-d %s -p tcp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp'} egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p icmp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 8, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type_name(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 'echo-request', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p icmp -m icmp --icmp-type echo-request ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type_code(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 8, 'port_range_max': 0, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_icmp_type_code_protocol_num(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': '1', 'port_range_min': 8, 'port_range_max': 0, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp --dport 10 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp'} egress = mock.call.add_rule( 'ofake_dev', '-p udp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-d %s -p udp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_port(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p udp -m udp --dport 10 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_mport(self): rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv4_egress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv4'] rule = {'ethertype': 'IPv4', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress'} ingress = mock.call.add_rule('ifake_dev', '-j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp'} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p tcp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p tcp -m tcp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_icmp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'icmp'} ingress = mock.call.add_rule( 'ifake_dev', '-p ipv6-icmp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'icmp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_tcp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def _test_filter_ingress_tcp_min_port_0(self, ethertype): rule = {'ethertype': ethertype, 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 0, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p tcp -m tcp -m multiport --dports 0:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ingress_tcp_min_port_0_for_ipv4(self): self._test_filter_ingress_tcp_min_port_0('IPv4') def test_filter_ingress_tcp_min_port_0_for_ipv6(self): self._test_filter_ingress_tcp_min_port_0('IPv6') def test_filter_ipv6_ingress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp'} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'source_ip_prefix': prefix} ingress = mock.call.add_rule('ifake_dev', '-s %s -p udp -j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} ingress = mock.call.add_rule('ifake_dev', '-p udp -m udp --dport 10 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} ingress = mock.call.add_rule( 'ifake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_ingress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'ingress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'source_ip_prefix': prefix} ingress = mock.call.add_rule( 'ifake_dev', '-s %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) egress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress(self): rule = {'ethertype': 'IPv6', 'direction': 'egress'} egress = mock.call.add_rule('ofake_dev', '-j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp'} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-d %s -p tcp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp'} egress = mock.call.add_rule( 'ofake_dev', '-p ipv6-icmp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 8, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_name(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 'echo-request', 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_code(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmp', 'port_range_min': 8, 'port_range_max': 0, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_code_protocol_num(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': '58', 'port_range_min': 8, 'port_range_max': 0, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_icmp_type_code_protocol_legacy_name(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'icmpv6', 'port_range_min': 8, 'port_range_max': 0, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p tcp -m tcp --dport 10 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p tcp -m tcp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_tcp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'tcp', 'port_range_min': 10, 'port_range_max': 100, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p tcp -m tcp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp'} egress = mock.call.add_rule( 'ofake_dev', '-p udp -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'dest_ip_prefix': prefix} egress = mock.call.add_rule('ofake_dev', '-d %s -p udp -j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_port(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 10} egress = mock.call.add_rule('ofake_dev', '-p udp -m udp --dport 10 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_mport(self): rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100} egress = mock.call.add_rule( 'ofake_dev', '-p udp -m udp -m multiport --dports 10:100 -j RETURN', top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def test_filter_ipv6_egress_udp_mport_prefix(self): prefix = FAKE_PREFIX['IPv6'] rule = {'ethertype': 'IPv6', 'direction': 'egress', 'protocol': 'udp', 'port_range_min': 10, 'port_range_max': 100, 'dest_ip_prefix': prefix} egress = mock.call.add_rule( 'ofake_dev', '-d %s -p udp -m udp -m multiport --dports 10:100 ' '-j RETURN' % prefix, top=False, comment=None) ingress = None self._test_prepare_port_filter(rule, ingress, egress) def _test_process_trusted_ports(self, configured): port = self._fake_port() port['id'] = 'tapfake_dev' calls = [ mock.call.add_chain('sg-fallback'), mock.call.add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP)] if configured: self.firewall.trusted_ports.append(port['id']) else: calls.append( mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j ACCEPT', top=False, comment=ic.TRUSTED_ACCEPT)) calls.append( mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j ACCEPT', top=False, comment=ic.TRUSTED_ACCEPT)) self.firewall.process_trusted_ports([port['id']]) for filter_inst in [self.v4filter_inst, self.v6filter_inst]: comb = zip(calls, filter_inst.mock_calls) for (l, r) in comb: self.assertEqual(l, r) filter_inst.assert_has_calls(calls) self.assertIn(port['id'], self.firewall.trusted_ports) def test_process_trusted_ports(self): self._test_process_trusted_ports(False) def test_process_trusted_ports_already_configured(self): self._test_process_trusted_ports(True) def _test_remove_trusted_ports(self, configured): port = self._fake_port() port['id'] = 'tapfake_dev' calls = [ mock.call.add_chain('sg-fallback'), mock.call.add_rule('sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP)] if configured: self.firewall.trusted_ports.append(port['id']) calls.append( mock.call.remove_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j ACCEPT')) calls.append( mock.call.remove_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j ACCEPT')) self.firewall.remove_trusted_ports([port['id']]) for filter_inst in [self.v4filter_inst, self.v6filter_inst]: comb = zip(calls, filter_inst.mock_calls) for (l, r) in comb: self.assertEqual(l, r) filter_inst.assert_has_calls(calls) self.assertNotIn(port['id'], self.firewall.trusted_ports) def test_remove_trusted_ports(self): self._test_remove_trusted_ports(True) def test_process_remove_ports_not_configured(self): self._test_remove_trusted_ports(False) def _test_prepare_port_filter(self, rule, ingress_expected_call=None, egress_expected_call=None): port = self._fake_port() ethertype = rule['ethertype'] prefix = utils.ip_to_cidr(FAKE_IP[ethertype]) filter_inst = self.v4filter_inst dhcp_rule = [mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None)] if ethertype == 'IPv6': filter_inst = self.v6filter_inst dhcp_rule = [mock.call.add_rule('ofake_dev', '-s ::/128 -d ff02::/16 ' '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j RETURN' % icmp6_type, top=False, comment=None) for icmp6_type in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES] sg = [rule] port['security_group_rules'] = sg self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG) ] if ethertype == 'IPv6': for icmp6_type in firewall.ICMPV6_ALLOWED_INGRESS_TYPES: calls.append( mock.call.add_rule('ifake_dev', '-p ipv6-icmp -m icmp6 --icmpv6-type ' '%s -j RETURN' % icmp6_type, top=False, comment=None)) calls += [ mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None ) ] if ingress_expected_call: calls.append(ingress_expected_call) calls += [mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN' % prefix, comment=ic.PAIR_ALLOW)] if ethertype == 'IPv6': calls.append(mock.call.add_rule('sfake_dev', '-s fe80::fdff:ffff:feff:ffff/128 -m mac ' '--mac-source FF:FF:FF:FF:FF:FF -j RETURN', comment=ic.PAIR_ALLOW)) calls.append(mock.call.add_rule('sfake_dev', '-j DROP', comment=ic.PAIR_DROP)) calls += dhcp_rule calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None)) if ethertype == 'IPv4': calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None)) calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None)) if ethertype == 'IPv6': calls.append(mock.call.add_rule('ofake_dev', '-p ipv6-icmp -m icmp6 ' '--icmpv6-type %s -j DROP' % constants.ICMPV6_TYPE_RA, top=False, comment=None)) calls.append(mock.call.add_rule('ofake_dev', '-p ipv6-icmp -j RETURN', top=False, comment=None)) calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp ' '--sport 546 --dport 547 ' '-j RETURN', top=False, comment=None)) calls.append(mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 547 --dport 546 -j DROP', top=False, comment=None)) calls += [ mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), ] if egress_expected_call: calls.append(egress_expected_call) calls += [mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] comb = zip(calls, filter_inst.mock_calls) for (l, r) in comb: self.assertEqual(l, r) filter_inst.assert_has_calls(calls) def _test_remove_conntrack_entries(self, ethertype, protocol, direction, ct_zone): port = self._fake_port() port['security_groups'] = 'fake_sg_id' self.firewall.filtered_ports[port['device']] = port self.firewall.updated_rule_sg_ids = set(['fake_sg_id']) self.firewall.sg_rules['fake_sg_id'] = [ {'direction': direction, 'ethertype': ethertype, 'protocol': protocol}] with mock.patch.dict(self.firewall.ipconntrack._device_zone_map, {port['network_id']: ct_zone}): self.firewall.filter_defer_apply_on() self.firewall.sg_rules['fake_sg_id'] = [] self.firewall.filter_defer_apply_off() if not ct_zone: self.assertFalse(self.utils_exec.called) return # process conntrack updates in the queue while not self.firewall.ipconntrack._queue.empty(): self.firewall.ipconntrack._process_queue() cmd = ['conntrack', '-D'] if protocol is not None: if str(protocol) == '0': protocol = 'ip' cmd.extend(['-p', str(protocol)]) if ethertype == 'IPv4': cmd.extend(['-f', 'ipv4']) if direction == 'ingress': cmd.extend(['-d', '10.0.0.1']) else: cmd.extend(['-s', '10.0.0.1']) else: cmd.extend(['-f', 'ipv6']) if direction == 'ingress': cmd.extend(['-d', 'fe80::1']) else: cmd.extend(['-s', 'fe80::1']) cmd.extend(['-w', ct_zone]) calls = [ mock.call(cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1])] self.utils_exec.assert_has_calls(calls) def test_remove_conntrack_entries_for_delete_rule_ipv4(self): for direction in ['ingress', 'egress']: for pro in [None, 'ip', 'tcp', 'icmp', 'udp', '0']: self._test_remove_conntrack_entries( 'IPv4', pro, direction, ct_zone=10) def test_remove_conntrack_entries_for_delete_rule_ipv4_by_num(self): for direction in ['ingress', 'egress']: for pro in [None, 0, 6, 1, 17]: self._test_remove_conntrack_entries( 'IPv4', pro, direction, ct_zone=10) def test_remove_conntrack_entries_for_delete_rule_ipv4_no_ct_zone(self): for direction in ['ingress', 'egress']: for pro in [None, 'tcp', 'icmp', 'udp']: self._test_remove_conntrack_entries( 'IPv4', pro, direction, ct_zone=None) def test_remove_conntrack_entries_for_delete_rule_ipv6(self): for direction in ['ingress', 'egress']: for pro in [None, 'tcp', 'icmp', 'udp']: self._test_remove_conntrack_entries( 'IPv6', pro, direction, ct_zone=10) def test_remove_conntrack_entries_for_delete_rule_ipv6_no_ct_zone(self): for direction in ['ingress', 'egress']: for pro in [None, 'tcp', 'icmp', 'udp']: self._test_remove_conntrack_entries( 'IPv6', pro, direction, ct_zone=None) def test_remove_conntrack_entries_for_port_sec_group_change(self): self._test_remove_conntrack_entries_for_port_sec_group_change( ct_zone=10) def test_remove_conntrack_entries_for_port_sec_group_change_no_ct_zone( self): self._test_remove_conntrack_entries_for_port_sec_group_change( ct_zone=None) def _get_expected_conntrack_calls(self, ips, ct_zone): expected_calls = [] for ip_item in ips: proto = ip_item[0] ip = ip_item[1] for direction in ['-d', '-s']: cmd = ['conntrack', '-D', '-f', proto, direction, ip] if ct_zone: cmd.extend(['-w', ct_zone]) expected_calls.append( mock.call(cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1])) return expected_calls def _test_remove_conntrack_entries_for_port_sec_group_change(self, ct_zone): port = self._fake_port() port['security_groups'] = ['fake_sg_id'] self.firewall.filtered_ports[port['device']] = port self.firewall.updated_sg_members = set(['tapfake_dev']) with mock.patch.dict(self.firewall.ipconntrack._device_zone_map, {port['network_id']: ct_zone}): self.firewall.filter_defer_apply_on() new_port = copy.deepcopy(port) new_port['security_groups'] = ['fake_sg_id2'] self.firewall.filtered_ports[port['device']] = new_port self.firewall.filter_defer_apply_off() if not ct_zone: self.assertFalse(self.utils_exec.called) return # process conntrack updates in the queue while not self.firewall.ipconntrack._queue.empty(): self.firewall.ipconntrack._process_queue() calls = self._get_expected_conntrack_calls( [('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone) self.utils_exec.assert_has_calls(calls) def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self): for direction in ['ingress', 'egress']: self._test_remove_conntrack_entries_sg_member_changed( 'IPv4', direction, ct_zone=10) def test_remove_conntrack_entries_for_sg_member_changed_ipv4_no_ct_zone( self): for direction in ['ingress', 'egress']: self._test_remove_conntrack_entries_sg_member_changed( 'IPv4', direction, ct_zone=None) def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self): for direction in ['ingress', 'egress']: self._test_remove_conntrack_entries_sg_member_changed( 'IPv6', direction, ct_zone=10) def test_remove_conntrack_entries_for_sg_member_changed_ipv6_no_ct_zone( self): for direction in ['ingress', 'egress']: self._test_remove_conntrack_entries_sg_member_changed( 'IPv6', direction, ct_zone=None) def _test_remove_conntrack_entries_sg_member_changed(self, ethertype, direction, ct_zone): port = self._fake_port() port['security_groups'] = ['fake_sg_id'] port['security_group_source_groups'] = ['fake_sg_id2'] port['security_group_rules'] = [{'security_group_id': 'fake_sg_id', 'direction': direction, 'remote_group_id': 'fake_sg_id2', 'ethertype': ethertype}] self.firewall.filtered_ports = {port['device']: port} if ethertype == "IPv4": ethertype = "ipv4" members_add = {'IPv4': ['10.0.0.2', '10.0.0.3']} members_after_delete = {'IPv4': ['10.0.0.3']} else: ethertype = "ipv6" members_add = {'IPv6': ['fe80::2', 'fe80::3']} members_after_delete = {'IPv6': ['fe80::3']} with mock.patch.dict(self.firewall.ipconntrack._device_zone_map, {port['network_id']: ct_zone}): # add ['10.0.0.2', '10.0.0.3'] or ['fe80::2', 'fe80::3'] self.firewall.security_group_updated('sg_member', ['fake_sg_id2']) self.firewall.update_security_group_members( 'fake_sg_id2', members_add) # delete '10.0.0.2' or 'fe80::2' self.firewall.security_group_updated('sg_member', ['fake_sg_id2']) self.firewall.update_security_group_members( 'fake_sg_id2', members_after_delete) # check conntrack deletion from '10.0.0.1' to '10.0.0.2' or # from 'fe80::1' to 'fe80::2' ips = {"ipv4": ['10.0.0.1', '10.0.0.2'], "ipv6": ['fe80::1', 'fe80::2']} calls = [] # process conntrack updates in the queue while not self.firewall.ipconntrack._queue.empty(): self.firewall.ipconntrack._process_queue() for direction in ['ingress', 'egress']: direction = '-d' if direction == 'ingress' else '-s' remote_ip_direction = '-s' if direction == '-d' else '-d' conntrack_cmd = ['conntrack', '-D', '-f', ethertype, direction, ips[ethertype][0]] if not ct_zone: continue conntrack_cmd.extend(['-w', 10]) conntrack_cmd.extend([remote_ip_direction, ips[ethertype][1]]) calls.append(mock.call(conntrack_cmd, run_as_root=True, check_exit_code=True, extra_ok_codes=[1])) self.utils_exec.assert_has_calls(calls) def test_user_sg_rules_deduped_before_call_to_iptables_manager(self): port = self._fake_port() port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'ingress'}] * 2 self.firewall.prepare_port_filter(port) rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls] self.assertEqual(len(set(rules)), len(rules)) def test_update_delete_port_filter(self): port = self._fake_port() port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'ingress'}] self.firewall.prepare_port_filter(port) port['security_group_rules'] = [{'ethertype': 'IPv4', 'direction': 'egress'}] self.firewall.update_port_filter(port) self.firewall.update_port_filter({'device': 'no-exist-device'}) self.firewall.remove_port_filter(port) self.firewall.remove_port_filter({'device': 'no-exist-device'}) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_chain('ifake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j RETURN', top=False, comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT'), mock.call.remove_chain('ifake_dev'), mock.call.remove_chain('ofake_dev'), mock.call.remove_chain('sfake_dev'), mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_rule('PREROUTING', mock.ANY, comment=None), # zone set mock.call.add_chain('ifake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule( 'FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule( 'sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT'), mock.call.remove_chain('ifake_dev'), mock.call.remove_chain('ofake_dev'), mock.call.remove_chain('sfake_dev'), mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_rule('PREROUTING', mock.ANY), # zone set mock.call.remove_chain('sg-chain'), mock.call.add_chain('sg-chain')] self.v4filter_inst.assert_has_calls(calls) def test_delete_conntrack_from_delete_port(self): self._test_delete_conntrack_from_delete_port(ct_zone=10) def test_delete_conntrack_from_delete_port_no_ct_zone(self): self._test_delete_conntrack_from_delete_port(ct_zone=None) def _test_delete_conntrack_from_delete_port(self, ct_zone): port = self._fake_port() port['security_groups'] = ['fake_sg_id'] self.firewall.filtered_ports = {'tapfake_dev': port} self.firewall.devices_with_updated_sg_members['fake_sg_id2' ] = ['tapfake_dev'] new_port = copy.deepcopy(port) new_port['security_groups'] = ['fake_sg_id2'] new_port['device'] = ['tapfake_dev2'] new_port['fixed_ips'] = ['10.0.0.2', 'fe80::2'] self.firewall.sg_members['fake_sg_id2'] = {'IPv4': ['10.0.0.2'], 'IPv6': ['fe80::2']} mock.patch.object(self.firewall.ipconntrack, 'get_device_zone', return_value=ct_zone).start() self.firewall.remove_port_filter(port) if not ct_zone: self.assertFalse(self.utils_exec.called) return # process conntrack updates in the queue while not self.firewall.ipconntrack._queue.empty(): self.firewall.ipconntrack._process_queue() calls = self._get_expected_conntrack_calls( [('ipv4', '10.0.0.1'), ('ipv6', 'fe80::1')], ct_zone) self.utils_exec.assert_has_calls(calls) def test_remove_unknown_port(self): port = self._fake_port() self.firewall.remove_port_filter(port) # checking no exception occurs self.assertFalse(self.v4filter_inst.called) def test_defer_apply(self): with self.firewall.defer_apply(): pass self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), mock.call.defer_apply_off()]) def test_filter_defer_with_exception(self): try: with self.firewall.defer_apply(): raise Exception("same exception") except Exception: pass self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(), mock.call.defer_apply_off()]) def _mock_chain_applies(self): class CopyingMock(mock.MagicMock): """Copies arguments so mutable arguments can be asserted on. Copied verbatim from unittest.mock documentation. """ def __call__(self, *args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) return super(CopyingMock, self).__call__(*args, **kwargs) # Need to use CopyingMock because _{setup,remove}_chains_apply are # usually called with that's modified between calls (i.e., # self.firewall.filtered_ports). chain_applies = CopyingMock() self.firewall._setup_chains_apply = chain_applies.setup self.firewall._remove_chains_apply = chain_applies.remove return chain_applies def test_mock_chain_applies(self): chain_applies = self._mock_chain_applies() port_prepare = {'device': 'd1', 'mac_address': 'prepare', 'network_id': 'fake_net'} port_update = {'device': 'd1', 'mac_address': 'update', 'network_id': 'fake_net'} self.firewall.prepare_port_filter(port_prepare) self.firewall.update_port_filter(port_update) self.firewall.remove_port_filter(port_update) chain_applies.assert_has_calls([ mock.call.setup({'d1': port_prepare}, {}), mock.call.remove({'d1': port_prepare}, {}), mock.call.setup({'d1': port_update}, {}), mock.call.remove({'d1': port_update}, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_need_pre_defer_copy(self): chain_applies = self._mock_chain_applies() port = self._fake_port() device2port = {port['device']: port} self.firewall.prepare_port_filter(port) with self.firewall.defer_apply(): self.firewall.remove_port_filter(port) chain_applies.assert_has_calls([mock.call.setup(device2port, {}), mock.call.remove(device2port, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_coalesce_simple(self): chain_applies = self._mock_chain_applies() port = self._fake_port() with self.firewall.defer_apply(): self.firewall.prepare_port_filter(port) self.firewall.update_port_filter(port) self.firewall.remove_port_filter(port) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup({}, {})]) def test_defer_chain_apply_coalesce_multiple_ports(self): chain_applies = self._mock_chain_applies() port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'} port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'} device2port = {'d1': port1, 'd2': port2} with self.firewall.defer_apply(): self.firewall.prepare_port_filter(port1) self.firewall.prepare_port_filter(port2) chain_applies.assert_has_calls([mock.call.remove({}, {}), mock.call.setup(device2port, {})]) def test_ip_spoofing_filter_with_multiple_ips(self): port = {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']} self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF ' '-j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) def test_ip_spoofing_no_fixed_ips(self): port = {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': []} self.firewall.prepare_port_filter(port) calls = [mock.call.add_chain('sg-fallback'), mock.call.add_rule( 'sg-fallback', '-j DROP', comment=ic.UNMATCH_DROP), mock.call.add_chain('sg-chain'), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_rule('PREROUTING', mock.ANY, # zone set comment=None), mock.call.add_chain('ifake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-out tapfake_dev ' '--physdev-is-bridged -j $ifake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule( 'ifake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ifake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ifake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_chain('ofake_dev'), mock.call.add_rule('FORWARD', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $sg-chain', top=True, comment=ic.VM_INT_SG), mock.call.add_rule('sg-chain', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.SG_TO_VM_SG), mock.call.add_rule('INPUT', '-m physdev --physdev-in tapfake_dev ' '--physdev-is-bridged -j $ofake_dev', top=False, comment=ic.INPUT_TO_SG), mock.call.add_chain('sfake_dev'), mock.call.add_rule( 'sfake_dev', '-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN', comment=ic.PAIR_ALLOW), mock.call.add_rule( 'sfake_dev', '-j DROP', comment=ic.PAIR_DROP), mock.call.add_rule( 'ofake_dev', '-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp ' '--sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sfake_dev', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 68 --dport 67 -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-p udp -m udp --sport 67 --dport 68 -j DROP', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state RELATED,ESTABLISHED -j RETURN', top=False, comment=None), mock.call.add_rule( 'ofake_dev', '-m state --state INVALID -j DROP', top=False, comment=None), mock.call.add_rule('ofake_dev', '-j $sg-fallback', top=False, comment=None), mock.call.add_rule('sg-chain', '-j ACCEPT')] self.v4filter_inst.assert_has_calls(calls) class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): def setUp(self): super(IptablesFirewallEnhancedIpsetTestCase, self).setUp() self.firewall.ipset = mock.Mock() self.firewall.ipset.get_name.side_effect = ( ipset_manager.IpsetManager.get_name) self.firewall.ipset.set_name_exists.return_value = True self.firewall.ipset.set_members = mock.Mock(return_value=([], [])) def _fake_port(self, sg_id=FAKE_SGID): return {'device': 'tapfake_dev', 'mac_address': 'ff:ff:ff:ff:ff:ff', 'network_id': 'fake_net', 'fixed_ips': [FAKE_IP['IPv4'], FAKE_IP['IPv6']], 'security_groups': [sg_id], 'security_group_source_groups': [sg_id]} def _fake_sg_rule_for_ethertype(self, ethertype, remote_group): return {'direction': 'ingress', 'remote_group_id': remote_group, 'ethertype': ethertype} def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None): remote_groups = remote_groups or {_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]} rules = [] for ip_version, remote_group_list in remote_groups.items(): for remote_group in remote_group_list: rules.append(self._fake_sg_rule_for_ethertype(ip_version, remote_group)) return {sg_id: rules} def _fake_sg_members(self, sg_ids=None): return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])} def test_update_security_group_members(self): sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']} self.firewall.update_security_group_members('fake_sgid', sg_members) calls = [ mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1', '10.0.0.2']), mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']) ] self.firewall.ipset.assert_has_calls(calls, any_order=True) def _setup_fake_firewall_members_and_rules(self, firewall): firewall.sg_rules = self._fake_sg_rules() firewall.pre_sg_rules = self._fake_sg_rules() firewall.sg_members = self._fake_sg_members() firewall.pre_sg_members = firewall.sg_members def _prepare_rules_and_members_for_removal(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.pre_sg_members[OTHER_SGID] = ( self.firewall.pre_sg_members[FAKE_SGID]) def test_determine_remote_sgs_to_remove(self): self._prepare_rules_and_members_for_removal() ports = [self._fake_port()] self.assertEqual( {_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._determine_remote_sgs_to_remove(ports)) def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self): self._prepare_rules_and_members_for_removal() ports = [self._fake_port()] self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID], _IPv6: [FAKE_SGID]}) self.assertEqual( {_IPv4: set(), _IPv6: set([OTHER_SGID])}, self.firewall._determine_remote_sgs_to_remove(ports)) def test_get_remote_sg_ids_by_ipversion(self): self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]}) ports = [self._fake_port()] self.assertEqual( {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._get_remote_sg_ids_sets_by_ipversion(ports)) def test_get_remote_sg_ids(self): self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID], _IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]}) port = self._fake_port() self.assertEqual( {_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])}, self.firewall._get_remote_sg_ids(port)) def test_determine_sg_rules_to_remove(self): self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID) ports = [self._fake_port()] self.assertEqual(set([OTHER_SGID]), self.firewall._determine_sg_rules_to_remove(ports)) def test_get_sg_ids_set_for_ports(self): sg_ids = set([FAKE_SGID, OTHER_SGID]) ports = [self._fake_port(sg_id) for sg_id in sg_ids] self.assertEqual(sg_ids, self.firewall._get_sg_ids_set_for_ports(ports)) def test_remove_sg_members(self): self.firewall.sg_members = self._fake_sg_members([FAKE_SGID, OTHER_SGID]) remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]), _IPv6: set([FAKE_SGID, OTHER_SGID])} self.firewall._remove_sg_members(remote_sgs_to_remove) self.assertIn(OTHER_SGID, self.firewall.sg_members) self.assertNotIn(FAKE_SGID, self.firewall.sg_members) def test_remove_unused_security_group_info_clears_unused_rules(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.prepare_port_filter(self._fake_port()) # create another SG which won't be referenced by any filtered port fake_sg_rules = self.firewall.sg_rules['fake_sgid'] self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules # call the cleanup function, and check the unused sg_rules are out self.firewall._remove_unused_security_group_info() self.assertNotIn(OTHER_SGID, self.firewall.sg_rules) def test_remove_unused_security_group_info(self): self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} self.firewall.pre_sg_members = self.firewall.sg_members self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]}) self.firewall.pre_sg_rules = self.firewall.sg_rules port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._remove_unused_security_group_info() self.assertNotIn(OTHER_SGID, self.firewall.sg_members) def test_not_remove_used_security_group_info(self): self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}} self.firewall.pre_sg_members = self.firewall.sg_members self.firewall.sg_rules = self._fake_sg_rules( remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]}) self.firewall.pre_sg_rules = self.firewall.sg_rules port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._remove_unused_security_group_info() self.assertIn(OTHER_SGID, self.firewall.sg_members) def test_remove_all_unused_info(self): self._setup_fake_firewall_members_and_rules(self.firewall) self.firewall.filtered_ports = {} self.firewall._remove_unused_security_group_info() self.assertFalse(self.firewall.sg_members) self.assertFalse(self.firewall.sg_rules) def test_single_fallback_accept_rule(self): p1, p2 = self._fake_port(), self._fake_port() self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {}) v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls sg_chain_v4_accept = [call for call in v4_adds if call == mock.call('sg-chain', '-j ACCEPT')] sg_chain_v6_accept = [call for call in v6_adds if call == mock.call('sg-chain', '-j ACCEPT')] self.assertEqual(1, len(sg_chain_v4_accept)) self.assertEqual(1, len(sg_chain_v6_accept)) def test_remove_port_filter_with_destroy_ipset_chain(self): self.firewall.sg_rules = self._fake_sg_rules() port = self._fake_port() self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': [], 'IPv6': []}} sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']} self.firewall.update_security_group_members('fake_sgid', sg_members) self.firewall.prepare_port_filter(port) self.firewall.filter_defer_apply_on() self.firewall.sg_members = {'fake_sgid': { 'IPv4': [], 'IPv6': []}} self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']}} self.firewall.remove_port_filter(port) self.firewall.filter_defer_apply_off() calls = [ mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']), mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']), mock.call.get_name('fake_sgid', 'IPv4'), mock.call.set_name_exists('NIPv4fake_sgid'), mock.call.get_name('fake_sgid', 'IPv6'), mock.call.set_name_exists('NIPv6fake_sgid'), mock.call.destroy('fake_sgid', 'IPv4'), mock.call.destroy('fake_sgid', 'IPv6')] self.firewall.ipset.assert_has_calls(calls, any_order=True) def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self): self.firewall.sg_rules = self._fake_sg_rules() self.firewall.pre_sg_rules = self._fake_sg_rules() self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'], 'IPv6fake_sgid': ['fe80::1']} self.firewall.sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.2'], 'IPv6': ['fe80::1']}} self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': ['10.0.0.2'], 'IPv6': ['fe80::1']}} self.firewall.sg_rules['fake_sgid'].remove( {'direction': 'ingress', 'remote_group_id': 'fake_sgid', 'ethertype': 'IPv4'}) self.firewall.sg_rules.update() self.firewall._defer_apply = True port = self._fake_port() self.firewall.filtered_ports['tapfake_dev'] = port self.firewall._pre_defer_filtered_ports = {} self.firewall._pre_defer_unfiltered_ports = {} self.firewall.filter_defer_apply_off() calls = [mock.call.destroy('fake_sgid', 'IPv4')] self.firewall.ipset.assert_has_calls(calls, True) def test_sg_rule_expansion_with_remote_ips(self): other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4'] self.firewall.sg_members = {'fake_sgid': { 'IPv4': [FAKE_IP['IPv4']] + other_ips, 'IPv6': [FAKE_IP['IPv6']]}} port = self._fake_port() rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID) rules = self.firewall._expand_sg_rule_with_remote_ips( rule, port, 'ingress') self.assertEqual(list(rules), [dict(list(rule.items()) + [('source_ip_prefix', '%s/32' % ip)]) for ip in other_ips]) def test_build_ipv4v6_mac_ip_list(self): mac_oth = 'ffff-ff0f-ffff' mac_unix = 'FF:FF:FF:0F:FF:FF' ipv4 = FAKE_IP['IPv4'] ipv6 = FAKE_IP['IPv6'] fake_ipv4_pair = [] fake_ipv4_pair.append((mac_unix, ipv4)) fake_ipv6_pair = [] fake_ipv6_pair.append((mac_unix, ipv6)) fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff')) mac_ipv4_pairs = [] mac_ipv6_pairs = [] self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4, mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs) self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6, mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) # ensure that LLA is not added again for another v6 addr ipv62 = 'fe81::1' self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv62, mac_ipv4_pairs, mac_ipv6_pairs) fake_ipv6_pair.append((mac_unix, ipv62)) self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase): def test__populate_initial_zone_map(self): self.assertEqual(self._dev_zone_map, self.firewall.ipconntrack._device_zone_map) def test__generate_device_zone(self): # initial data has 4097, 4098, and 4105 in use. # we fill from top up first. self.assertEqual(4106, self.firewall.ipconntrack._generate_device_zone('test')) # once it's maxed out, it scans for gaps self.firewall.ipconntrack._device_zone_map['someport'] = ( ip_conntrack.MAX_CONNTRACK_ZONES) for i in range(4099, 4105): self.assertEqual(i, self.firewall.ipconntrack._generate_device_zone(i)) # 4105 and 4106 are taken so next should be 4107 self.assertEqual(4107, self.firewall.ipconntrack._generate_device_zone('p11')) # take out zone 4097 and make sure it's selected self.firewall.ipconntrack._device_zone_map.pop('e804433b-61') self.assertEqual(4097, self.firewall.ipconntrack._generate_device_zone('p1')) # fill it up and then make sure an extra throws an error for i in range(ip_conntrack.ZONE_START, ip_conntrack.MAX_CONNTRACK_ZONES): self.firewall.ipconntrack._device_zone_map['dev-%s' % i] = i with testtools.ExpectedException(exceptions.CTZoneExhaustedError): self.firewall.ipconntrack._find_open_zone() # with it full, try again, this should trigger a cleanup # and return 4097 self.assertEqual(ip_conntrack.ZONE_START, self.firewall.ipconntrack._generate_device_zone('p12')) self.assertEqual({'p12': ip_conntrack.ZONE_START}, self.firewall.ipconntrack._device_zone_map) def test_get_device_zone(self): dev = {'device': 'tap1234', 'network_id': '12345678901234567'} # initial data has 4097, 4098, and 4105 in use. self.assertEqual(4106, self.firewall.ipconntrack.get_device_zone(dev)) # should have been truncated to 11 chars self._dev_zone_map.update({'12345678901': 4106}) self.assertEqual(self._dev_zone_map, self.firewall.ipconntrack._device_zone_map) def test_multiple_firewall_with_common_conntrack(self): self.firewall1 = iptables_firewall.OVSHybridIptablesFirewallDriver() self.firewall2 = iptables_firewall.OVSHybridIptablesFirewallDriver() self.assertEqual(id(self.firewall1.ipconntrack), id(self.firewall2.ipconntrack)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_iptables_manager.py0000644000175000017500000015626400000000000030137 0ustar00coreycorey00000000000000# Copyright 2012 Locaweb. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import fixtures import mock from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg import testtools from neutron._i18n import _ from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils as linux_utils from neutron.tests import base from neutron.tests import tools IPTABLES_ARG = {'bn': iptables_manager.binary_name, 'snat_out_comment': ic.SNAT_OUT, 'filter_rules': '', 'mark': constants.ROUTER_MARK_MASK} NAT_TEMPLATE = ('# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n' '-I %(bn)s-snat 1 -j ' '%(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n') NAT_DUMP = NAT_TEMPLATE % IPTABLES_ARG FILTER_TEMPLATE = ('# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-local - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' 'COMMIT\n' '# Completed by iptables_manager\n') FILTER_DUMP = FILTER_TEMPLATE % IPTABLES_ARG FILTER_WITH_RULES_TEMPLATE = ( '# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-filter - [0:0]\n' ':%(bn)s-local - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' '%(filter_rules)s' 'COMMIT\n' '# Completed by iptables_manager\n') COMMENTED_NAT_DUMP = ( '# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 ' '-m comment --comment "%(snat_out_comment)s" -j %(bn)s-snat\n' '-I %(bn)s-snat 1 -j ' '%(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) TRAFFIC_COUNTERS_DUMP = ( 'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n' ' pkts bytes target prot opt in out source' ' destination \n' ' 400 65901 chain1 all -- * * 0.0.0.0/0' ' 0.0.0.0/0 \n' ' 400 65901 chain2 all -- * * 0.0.0.0/0' ' 0.0.0.0/0 \n') FILTER_RESTORE_DUMP = ('# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-test-filter - [0:0]\n' ':%(bn)s-local - [0:0]\n' '-A FORWARD -j neutron-filter-top\n' '-A FORWARD -j %(bn)s-FORWARD\n' '-A INPUT -j %(bn)s-INPUT\n' '-A OUTPUT -j neutron-filter-top\n' '-A OUTPUT -j %(bn)s-OUTPUT\n' '-A neutron-filter-top -j %(bn)s-local\n' '%(filter_rules)s' 'COMMIT\n' '# Completed by iptables_manager\n') NAT_RESTORE_TEMPLATE = ('# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-A OUTPUT -j %(bn)s-OUTPUT\n' '-A POSTROUTING -j %(bn)s-POSTROUTING\n' '-A POSTROUTING -j neutron-postrouting-bottom\n' '-A PREROUTING -j %(bn)s-PREROUTING\n' '-A neutron-postrouting-bottom -j %(bn)s-snat\n' '-A %(bn)s-snat -j ' '%(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n') NAT_RESTORE_DUMP = NAT_RESTORE_TEMPLATE % IPTABLES_ARG class IptablesTestCase(base.BaseTestCase): def test_get_binary_name_in_unittest(self): # Corresponds to sys.argv content when running python -m unittest class with mock.patch('sys.argv', ['python -m unittest', 'class']): binary_name = iptables_manager.get_binary_name() self.assertEqual('python_-m_unitte', binary_name) class IptablesCommentsTestCase(base.BaseTestCase): def setUp(self): super(IptablesCommentsTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', True, 'AGENT') self.iptables = iptables_manager.IptablesManager() self.execute = mock.patch.object(self.iptables, "execute").start() def test_comments_short_enough(self): for attr in dir(ic): if not attr.startswith('__') and len(getattr(ic, attr)) > 255: self.fail("Iptables comment %s is longer than 255 characters." % attr) def test_reordering_of_jump_rule_comments(self): # jump at the start self.assertEqual( '-m comment --comment "aloha" -j sg-chain', iptables_manager.comment_rule('-j sg-chain', 'aloha')) # jump in the middle self.assertEqual( '-s source -m comment --comment "aloha" -j sg-chain', iptables_manager.comment_rule('-s source -j sg-chain', 'aloha')) # no jump rule self.assertEqual( '-s source -m comment --comment "aloha"', iptables_manager.comment_rule('-s source', 'aloha')) def test_add_filter_rule(self): iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(IPTABLES_ARG) mangle_dump = _generate_mangle_dump(IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + COMMENTED_NAT_DUMP + raw_dump), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + mangle_dump + COMMENTED_NAT_DUMP + raw_dump), run_as_root=True, log_fail_as_error=False), None), ] tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-j DROP') self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP') self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def _generate_mangle_dump(iptables_args): return ('# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-mark - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_mangle_dump_v6(iptables_args): return ('# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_raw_dump(iptables_args): return ('# Generated by iptables_manager\n' '*raw\n' ':OUTPUT - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_mangle_restore_dump(iptables_args): return ('# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-mark - [0:0]\n' '-A FORWARD -j %(bn)s-FORWARD\n' '-A INPUT -j %(bn)s-INPUT\n' '-A OUTPUT -j %(bn)s-OUTPUT\n' '-A POSTROUTING -j %(bn)s-POSTROUTING\n' '-A PREROUTING -j %(bn)s-PREROUTING\n' '-A %(bn)s-PREROUTING -j %(bn)s-mark\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) def _generate_raw_restore_dump(iptables_args): return ('# Generated by iptables_manager\n' '*raw\n' ':OUTPUT - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' '-A OUTPUT -j %(bn)s-OUTPUT\n' '-A PREROUTING -j %(bn)s-PREROUTING\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) MANGLE_DUMP = _generate_mangle_dump(IPTABLES_ARG) MANGLE_DUMP_V6 = _generate_mangle_dump_v6(IPTABLES_ARG) RAW_DUMP = _generate_raw_dump(IPTABLES_ARG) MANGLE_RESTORE_DUMP = _generate_mangle_restore_dump(IPTABLES_ARG) RAW_RESTORE_DUMP = _generate_raw_restore_dump(IPTABLES_ARG) class IptablesFixture(fixtures.Fixture): def _setUp(self): # We MUST save and restore use_table_lock because it is a class # attribute and could change state in some tests, which can cause # the other iptables_manager test cases to randomly fail due to # race conditions. self.use_table_lock = iptables_manager.IptablesManager.use_table_lock iptables_manager.IptablesManager.use_table_lock = False self.addCleanup(self._reset) def _reset(self): iptables_manager.IptablesManager.use_table_lock = self.use_table_lock class IptablesManagerBaseTestCase(base.BaseTestCase): def setUp(self): super(IptablesManagerBaseTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') cfg.CONF.set_override('report_interval', 30, 'AGENT') self.execute = mock.patch.object(linux_utils, "execute").start() self.useFixture(IptablesFixture()) def _extend_with_ip6tables_filter_end(self, expected_calls, filter_dump): expected_calls.extend([ (mock.call(['ip6tables-save'], run_as_root=True), ''), (mock.call(['ip6tables-restore', '-n'], process_input=filter_dump, run_as_root=True, log_fail_as_error=False), None)]) def _extend_with_ip6tables_filter(self, expected_calls, filter_dump): expected_calls.insert(2, ( mock.call(['ip6tables-save'], run_as_root=True), '')) expected_calls.insert(3, ( mock.call(['ip6tables-restore', '-n'], process_input=filter_dump, run_as_root=True, log_fail_as_error=False), None)) self._extend_with_ip6tables_filter_end(expected_calls, filter_dump) class IptablesManagerStateFulTestCase(IptablesManagerBaseTestCase): use_ipv6 = False def setUp(self): super(IptablesManagerStateFulTestCase, self).setUp() self.iptables = iptables_manager.IptablesManager( use_ipv6=self.use_ipv6) def test_binary_name(self): expected = os.path.basename(sys.argv[0])[:16] self.assertEqual(expected, iptables_manager.binary_name) def test_get_chain_name(self): name = '0123456789' * 5 # 28 chars is the maximum length of iptables chain name. self.assertEqual(iptables_manager.get_chain_name(name, wrap=False), name[:28]) # 11 chars is the maximum length of chain name of iptable_manager # if binary_name is prepended. self.assertEqual(iptables_manager.get_chain_name(name, wrap=True), name[:11]) def test_defer_apply_with_exception(self): self.iptables._apply = mock.Mock(side_effect=Exception) with testtools.ExpectedException(l3_exc.IpTablesApplyException): with self.iptables.defer_apply(): pass def test_add_and_remove_chain(self): filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.apply() self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_filter_rule(self): iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-j DROP') self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP') self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' %(bn)s-filter' % IPTABLES_ARG) self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_rule_with_wrap_target(self): name = '0123456789' * 5 wrap = "%s-%s" % (iptables_manager.binary_name, iptables_manager.get_chain_name(name)) iptables_args = {'bn': iptables_manager.binary_name, 'wrap': wrap} filter_dump_mod = ('# Generated by iptables_manager\n' '*filter\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':neutron-filter-top - [0:0]\n' ':%(wrap)s - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-local - [0:0]\n' '-I FORWARD 1 -j neutron-filter-top\n' '-I FORWARD 2 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j neutron-filter-top\n' '-I OUTPUT 2 -j %(bn)s-OUTPUT\n' '-I neutron-filter-top 1 -j %(bn)s-local\n' '-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j ' '%(wrap)s\n' 'COMMIT\n' '# Completed by iptables_manager\n' % iptables_args) raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain(name) self.iptables.ipv4['filter'].add_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' $%s' % name) self.iptables.apply() self.iptables.ipv4['filter'].remove_rule('INPUT', '-s 0/0 -d 192.168.0.2 -j' ' $%s' % name) self.iptables.ipv4['filter'].remove_chain(name) self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_mangle_rule(self): mangle_dump_mod = ( '# Generated by iptables_manager\n' '*mangle\n' ':FORWARD - [0:0]\n' ':INPUT - [0:0]\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-FORWARD - [0:0]\n' ':%(bn)s-INPUT - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-mangle - [0:0]\n' ':%(bn)s-mark - [0:0]\n' '-I FORWARD 1 -j %(bn)s-FORWARD\n' '-I INPUT 1 -j %(bn)s-INPUT\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n' '-I %(bn)s-PREROUTING 2 -j MARK --set-xmark 0x1/%(mark)s\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + mangle_dump_mod + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['mangle'].add_chain('mangle') self.iptables.ipv4['mangle'].add_rule( 'PREROUTING', '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK) self.iptables.apply() self.iptables.ipv4['mangle'].remove_rule( 'PREROUTING', '-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK) self.iptables.ipv4['mangle'].remove_chain('mangle') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_nat_rule(self): nat_dump = NAT_TEMPLATE % IPTABLES_ARG nat_dump_mod = ('# Generated by iptables_manager\n' '*nat\n' ':OUTPUT - [0:0]\n' ':POSTROUTING - [0:0]\n' ':PREROUTING - [0:0]\n' ':neutron-postrouting-bottom - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-POSTROUTING - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-float-snat - [0:0]\n' ':%(bn)s-nat - [0:0]\n' ':%(bn)s-snat - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n' '-I POSTROUTING 2 -j neutron-postrouting-bottom\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n' '-I %(bn)s-PREROUTING 1 -d 192.168.0.3 -j ' '%(bn)s-nat\n' '-I %(bn)s-nat 1 -p tcp --dport 8080 -j ' 'REDIRECT --to-port 80\n' '-I %(bn)s-snat 1 -j %(bn)s-float-snat\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) raw_dump = RAW_DUMP % IPTABLES_ARG expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump_mod + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['nat'].add_chain('nat') self.iptables.ipv4['nat'].add_rule('PREROUTING', '-d 192.168.0.3 -j ' '%(bn)s-nat' % IPTABLES_ARG) self.iptables.ipv4['nat'].add_rule('nat', '-p tcp --dport 8080' + ' -j REDIRECT --to-port 80') self.iptables.apply() self.iptables.ipv4['nat'].remove_rule('nat', '-p tcp --dport 8080 -j' ' REDIRECT --to-port 80') self.iptables.ipv4['nat'].remove_rule('PREROUTING', '-d 192.168.0.3 -j ' '%(bn)s-nat' % IPTABLES_ARG) self.iptables.ipv4['nat'].remove_chain('nat') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_raw_rule(self): raw_dump_mod = ('# Generated by iptables_manager\n' '*raw\n' ':OUTPUT - [0:0]\n' ':PREROUTING - [0:0]\n' ':%(bn)s-OUTPUT - [0:0]\n' ':%(bn)s-PREROUTING - [0:0]\n' ':%(bn)s-raw - [0:0]\n' '-I OUTPUT 1 -j %(bn)s-OUTPUT\n' '-I PREROUTING 1 -j %(bn)s-PREROUTING\n' '-I %(bn)s-PREROUTING 1 -j CT --notrack\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + raw_dump_mod), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP + RAW_DUMP), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['raw'].add_chain('raw') self.iptables.ipv4['raw'].add_rule('PREROUTING', '-j CT --notrack') self.iptables.apply() self.iptables.ipv4['raw'].remove_rule('PREROUTING', '-j CT --notrack') self.iptables.ipv4['raw'].remove_chain('raw') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_rule_to_a_nonexistent_chain(self): if self.use_ipv6: add_rule = self.iptables.ipv6['filter'].add_rule else: add_rule = self.iptables.ipv4['filter'].add_rule self.assertRaises(LookupError, add_rule, 'nonexistent', '-j DROP') def test_remove_nonexistent_chain(self): if self.use_ipv6: remove_chain = self.iptables.ipv6['filter'].remove_chain else: remove_chain = self.iptables.ipv4['filter'].remove_chain with mock.patch.object(iptables_manager, "LOG") as log: remove_chain('nonexistent') log.debug.assert_called_once_with( 'Attempted to remove chain %s which does not exist', 'nonexistent') def test_remove_nonexistent_rule(self): if self.use_ipv6: remove_rule = self.iptables.ipv6['filter'].remove_rule else: remove_rule = self.iptables.ipv4['filter'].remove_rule with mock.patch.object(iptables_manager, "LOG") as log: remove_rule('nonexistent', '-j DROP') log.warning.assert_called_once_with( 'Tried to remove rule that was not there: ' '%(chain)r %(rule)r %(wrap)r %(top)r', {'wrap': True, 'top': False, 'rule': '-j DROP', 'chain': 'nonexistent'}) def test_iptables__apply_synchronized_no_namespace(self): self.execute.side_effect = RuntimeError # no namespace set so should raise self.assertRaises(RuntimeError, self.iptables._apply_synchronized) self.iptables.namespace = 'test' with mock.patch('neutron.agent.linux.ip_lib.network_namespace_exists', return_value=True): self.assertRaises(RuntimeError, self.iptables._apply_synchronized) with mock.patch('neutron.agent.linux.ip_lib.network_namespace_exists', return_value=False): self.assertEqual([], self.iptables._apply_synchronized()) def test_iptables_failure_with_no_failing_line_number(self): with mock.patch.object(iptables_manager, "LOG") as log: # generate Runtime errors on iptables-restore calls def iptables_restore_failer(*args, **kwargs): if 'iptables-restore' in args[0]: self.input_lines = kwargs['process_input'].split('\n') # don't provide a specific failure message so all lines # are logged raise RuntimeError() return FILTER_DUMP self.execute.side_effect = iptables_restore_failer # _apply_synchronized calls iptables-restore so it should raise # a RuntimeError self.assertRaises(RuntimeError, self.iptables._apply_synchronized) # The RuntimeError should have triggered a log of the input to the # process that it failed to execute. Verify by comparing the log # call to the 'process_input' arg given to the failed iptables-restore # call. # Failure without a specific line number in the error should cause # all lines to be logged with numbers. logged = ['%7d. %s' % (n, l) for n, l in enumerate(self.input_lines, 1)] log.error.assert_called_once_with(_( 'IPTablesManager.apply failed to apply the ' 'following set of iptables rules:\n%s'), '\n'.join(logged) ) def test_iptables_failure(self): with mock.patch.object(iptables_manager, "LOG") as log: # generate Runtime errors on iptables-restore calls def iptables_restore_failer(*args, **kwargs): if 'iptables-restore' in args[0]: self.input_lines = kwargs['process_input'].split('\n') # pretend line 11 failed msg = ("Exit code: 1\nStdout: ''\n" "Stderr: 'iptables-restore: line 11 failed\n'") raise exceptions.ProcessExecutionError( msg, iptables_manager.XTABLES_RESOURCE_PROBLEM_CODE) return FILTER_DUMP self.execute.side_effect = iptables_restore_failer # _apply_synchronized calls iptables-restore so it should raise # a RuntimeError self.assertRaises(RuntimeError, self.iptables._apply_synchronized) # check that we tried with -w when the first attempt failed self.execute.assert_has_calls( [mock.call(['iptables-restore', '-n'], process_input=mock.ANY, run_as_root=True, log_fail_as_error=False), mock.call(['iptables-restore', '-n', '-w', '10', '-W', iptables_manager.XLOCK_WAIT_INTERVAL], process_input=mock.ANY, run_as_root=True)]) # The RuntimeError should have triggered a log of the input to the # process that it failed to execute. Verify by comparing the log # call to the 'process_input' arg given to the failed iptables-restore # call. # Line 11 of the input was marked as failing so lines (11 - context) # to (11 + context) should be logged ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT log_start = max(0, 11 - ctx) log_end = 11 + ctx logged = ['%7d. %s' % (n, l) for n, l in enumerate(self.input_lines[log_start:log_end], log_start + 1)] log.error.assert_called_once_with(_( 'IPTablesManager.apply failed to apply the ' 'following set of iptables rules:\n%s'), '\n'.join(logged) ) def test_iptables_use_table_lock(self): # Under normal operation, if we do call iptables-restore with a -w # and it succeeds, the next call will only use -w. PE_error = exceptions.ProcessExecutionError( "", iptables_manager.XTABLES_RESOURCE_PROBLEM_CODE) num_calls = 3 expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), FILTER_DUMP), (mock.call(['iptables-restore', '-n'], process_input=mock.ANY, run_as_root=True, log_fail_as_error=False), PE_error), (mock.call(['iptables-restore', '-n', '-w', '10', '-W', iptables_manager.XLOCK_WAIT_INTERVAL], process_input=mock.ANY, run_as_root=True), None), ] if self.use_ipv6: num_calls += 2 expected_calls_and_values.append( (mock.call(['ip6tables-save'], run_as_root=True), FILTER_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables-restore', '-n', '-w', '10', '-W', iptables_manager.XLOCK_WAIT_INTERVAL], process_input=mock.ANY, run_as_root=True), None)) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables._apply_synchronized() self.assertEqual(num_calls, self.execute.call_count) tools.verify_mock_calls(self.execute, expected_calls_and_values) self.execute.reset_mock() num_calls = 2 expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), FILTER_DUMP), (mock.call(['iptables-restore', '-n', '-w', '10', '-W', iptables_manager.XLOCK_WAIT_INTERVAL], process_input=mock.ANY, run_as_root=True), None), ] if self.use_ipv6: num_calls += 2 expected_calls_and_values.append( (mock.call(['ip6tables-save'], run_as_root=True), FILTER_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables-restore', '-n', '-w', '10', '-W', iptables_manager.XLOCK_WAIT_INTERVAL], process_input=mock.ANY, run_as_root=True), None)) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables._apply_synchronized() self.assertEqual(num_calls, self.execute.call_count) tools.verify_mock_calls(self.execute, expected_calls_and_values) # Another instance of the class should behave similarly now self.execute.reset_mock() iptm = iptables_manager.IptablesManager(use_ipv6=self.use_ipv6) tools.setup_mock_calls(self.execute, expected_calls_and_values) iptm._apply_synchronized() self.assertEqual(num_calls, self.execute.call_count) tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_get_traffic_counters_chain_notexists(self): with mock.patch.object(iptables_manager, "LOG") as log: acc = self.iptables.get_traffic_counters('chain1') self.assertIsNone(acc) self.assertEqual(0, self.execute.call_count) log.warning.assert_called_once_with( 'Attempted to get traffic counters of chain %s which ' 'does not exist', 'chain1') def test_get_traffic_counters(self): exp_packets = 800 exp_bytes = 131802 expected_calls_and_values = [ (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), TRAFFIC_COUNTERS_DUMP), (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), ''), ] if self.use_ipv6: expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), '')) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), TRAFFIC_COUNTERS_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10'], run_as_root=True), '')) exp_packets *= 2 exp_bytes *= 2 tools.setup_mock_calls(self.execute, expected_calls_and_values) acc = self.iptables.get_traffic_counters('OUTPUT') self.assertEqual(acc['pkts'], exp_packets) self.assertEqual(acc['bytes'], exp_bytes) tools.verify_mock_calls(self.execute, expected_calls_and_values, any_order=True) def test_get_traffic_counters_and_zero(self): exp_packets = 800 exp_bytes = 131802 expected_calls_and_values = [ (mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), TRAFFIC_COUNTERS_DUMP), (mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), ''), (mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), '') ] if self.use_ipv6: expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), '')) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), TRAFFIC_COUNTERS_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT', '-n', '-v', '-x', '-w', '10', '-Z'], run_as_root=True), '')) exp_packets *= 2 exp_bytes *= 2 tools.setup_mock_calls(self.execute, expected_calls_and_values) acc = self.iptables.get_traffic_counters('OUTPUT', zero=True) self.assertEqual(acc['pkts'], exp_packets) self.assertEqual(acc['bytes'], exp_bytes) tools.verify_mock_calls(self.execute, expected_calls_and_values, any_order=True) def test_add_blank_rule(self): iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-A %(bn)s-test-filter\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_RESTORE_DUMP % iptables_args expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), (filter_dump_mod + MANGLE_RESTORE_DUMP + NAT_RESTORE_DUMP + RAW_RESTORE_DUMP)), ] if self.use_ipv6: expected_calls_and_values.append( (mock.call(['ip6tables-save'], run_as_root=True), FILTER_DUMP)) expected_calls_and_values.append( (mock.call(['ip6tables-restore', '-n'], process_input=mock.ANY, run_as_root=True, log_fail_as_error=False), None)) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('test-filter') self.iptables.ipv4['filter'].add_rule('test-filter', '') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) def test_add_rule_exchanged_interface_and_ip(self): iptables_args = {} iptables_args.update(IPTABLES_ARG) filter_rules = ('-A %(bn)s-test-filter -d 192.168.0.2 -i tap-xxx ' '-j ACCEPT\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_RESTORE_DUMP % iptables_args RESTORE_INPUT = ('# Generated by iptables_manager\n' '*filter\n' '-D run.py-test-filter 1\n' '-I run.py-test-filter 1 ' '-i tap-xxx -d 192.168.0.2 -j ACCEPT\n' 'COMMIT\n' '# Completed by iptables_manager\n' % IPTABLES_ARG) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), (filter_dump_mod + MANGLE_RESTORE_DUMP + NAT_RESTORE_DUMP + RAW_RESTORE_DUMP)), (mock.call(['iptables-restore', '-n'], process_input=RESTORE_INPUT, run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: self._extend_with_ip6tables_filter_end( expected_calls_and_values, FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('test-filter') self.iptables.ipv4['filter'].add_rule('test-filter', '-i tap-xxx -d 192.168.0.2 ' '-j ACCEPT') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) class IptablesManagerStateFulTestCaseIPv6(IptablesManagerStateFulTestCase): use_ipv6 = True class IptablesManagerStateFulTestCaseCustomBinaryName( IptablesManagerBaseTestCase): use_ipv6 = False bn = ("xbcdef" * 5) def setUp(self): super(IptablesManagerStateFulTestCaseCustomBinaryName, self).setUp() self.iptables = iptables_manager.IptablesManager( binary_name=self.bn, use_ipv6=self.use_ipv6) def test_add_and_remove_chain_custom_binary_name(self): iptables_args = {'bn': self.bn[:16], 'filter_rules': ''} filter_dump = FILTER_WITH_RULES_TEMPLATE % iptables_args filter_dump_ipv6 = FILTER_TEMPLATE % iptables_args filter_dump_mod = filter_dump nat_dump = NAT_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(iptables_args) mangle_dump = _generate_mangle_dump(iptables_args) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + nat_dump + raw_dump), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump + mangle_dump + nat_dump + raw_dump), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args) self._extend_with_ip6tables_filter( expected_calls_and_values, filter_dump_ipv6 + mangle_dump_v6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.apply() self.iptables.ipv4['filter'].empty_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) class IptablesManagerStateFulTestCaseCustomBinaryNameIPv6( IptablesManagerStateFulTestCaseCustomBinaryName): use_ipv6 = True class IptablesManagerStateFulTestCaseEmptyCustomBinaryName( IptablesManagerBaseTestCase): use_ipv6 = False bn = ("xbcdef" * 5)[:16] def setUp(self): super(IptablesManagerStateFulTestCaseEmptyCustomBinaryName, self).setUp() self.iptables = iptables_manager.IptablesManager( binary_name=self.bn, use_ipv6=self.use_ipv6) def test_empty_chain_custom_binary_name(self): iptables_args = {'bn': self.bn} filter_dump = FILTER_TEMPLATE % iptables_args filter_rules = ('-I %(bn)s-filter 1 -s 0/0 -d 192.168.0.2\n' % iptables_args) iptables_args['filter_rules'] = filter_rules filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args nat_dump = NAT_TEMPLATE % iptables_args raw_dump = _generate_raw_dump(iptables_args) mangle_dump = _generate_mangle_dump(iptables_args) expected_calls_and_values = [ (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump_mod + mangle_dump + nat_dump + raw_dump), run_as_root=True, log_fail_as_error=False), None), (mock.call(['iptables-save'], run_as_root=True), ''), (mock.call(['iptables-restore', '-n'], process_input=(filter_dump + mangle_dump + nat_dump + raw_dump), run_as_root=True, log_fail_as_error=False), None), ] if self.use_ipv6: mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args) self._extend_with_ip6tables_filter( expected_calls_and_values, filter_dump + mangle_dump_v6 + raw_dump) tools.setup_mock_calls(self.execute, expected_calls_and_values) self.iptables.ipv4['filter'].add_chain('filter') self.iptables.ipv4['filter'].add_rule('filter', '-s 0/0 -d 192.168.0.2') self.iptables.apply() self.iptables.ipv4['filter'].remove_chain('filter') self.iptables.apply() tools.verify_mock_calls(self.execute, expected_calls_and_values) class IptablesManagerStateFulTestCaseEmptyCustomBinaryNameIPv6( IptablesManagerStateFulTestCaseEmptyCustomBinaryName): use_ipv6 = True class IptablesManagerStateLessTestCase(base.BaseTestCase): def setUp(self): super(IptablesManagerStateLessTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.iptables = (iptables_manager.IptablesManager(state_less=True)) def test_nat_not_found(self): self.assertNotIn('nat', self.iptables.ipv4) def test_mangle_not_found(self): self.assertNotIn('mangle', self.iptables.ipv4) def test_initialize_mangle_table(self): iptables = iptables_manager.IptablesManager(state_less=True) iptables.initialize_mangle_table() self.assertIn('mangle', iptables.ipv4) self.assertNotIn('nat', iptables.ipv4) def test_initialize_nat_table(self): iptables = iptables_manager.IptablesManager(state_less=True) iptables.initialize_nat_table() self.assertIn('nat', iptables.ipv4) self.assertNotIn('mangle', iptables.ipv4) class IptablesManagerNoNatTestCase(base.BaseTestCase): def setUp(self): super(IptablesManagerNoNatTestCase, self).setUp() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.iptables = (iptables_manager.IptablesManager(nat=False)) def test_nat_not_found(self): self.assertNotIn('nat', self.iptables.ipv4) def test_mangle_found(self): self.assertIn('mangle', self.iptables.ipv4) def test_initialize_nat_table(self): iptables = iptables_manager.IptablesManager(nat=False) iptables.initialize_nat_table() self.assertIn('nat', iptables.ipv4) self.assertIn('mangle', iptables.ipv4) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_keepalived.py0000644000175000017500000004017300000000000026742 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import textwrap import mock from neutron_lib import constants as n_consts import testtools from neutron.agent.linux import keepalived from neutron.tests import base # Keepalived user guide: # http://www.keepalived.org/pdf/UserGuide.pdf KEEPALIVED_GLOBAL_CONFIG = textwrap.dedent("""\ global_defs { notification_email_from %(email_from)s router_id %(router_id)s }""") % dict( email_from=keepalived.KEEPALIVED_EMAIL_FROM, router_id=keepalived.KEEPALIVED_ROUTER_ID) VRRP_ID = 1 VRRP_INTERVAL = 5 class KeepalivedGetFreeRangeTestCase(base.BaseTestCase): def test_get_free_range(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=['169.254.0.0/24', '169.254.1.0/24', '169.254.2.0/24'], size=24) self.assertEqual('169.254.3.0/24', free_range) def test_get_free_range_without_excluded(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=[], size=20) self.assertEqual('169.254.0.0/20', free_range) def test_get_free_range_excluded_out_of_parent(self): free_range = keepalived.get_free_range( parent_range='169.254.0.0/16', excluded_ranges=['255.255.255.0/24'], size=24) self.assertEqual('169.254.0.0/24', free_range) def test_get_free_range_not_found(self): tiny_parent_range = '192.168.1.0/24' huge_size = 8 with testtools.ExpectedException(ValueError): keepalived.get_free_range( parent_range=tiny_parent_range, excluded_ranges=[], size=huge_size) class KeepalivedConfBaseMixin(object): def _get_config(self): config = keepalived.KeepalivedConf() instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18'], advert_int=5) instance1.set_authentication('AH', 'pass123') instance1.track_interfaces.append("eth0") vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24', 'eth1') vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24', 'eth2') vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24', 'eth2') vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24', 'eth10') instance1.vips.append(vip_address1) instance1.vips.append(vip_address2) instance1.vips.append(vip_address3) instance1.vips.append(vip_address_ex) virtual_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, "192.168.1.1", "eth1") instance1.virtual_routes.gateway_routes = [virtual_route] instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2, ['169.254.192.0/18'], mcast_src_ip='224.0.0.1') instance2.track_interfaces.append("eth4") vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24', 'eth6') instance2.vips.append(vip_address1) instance2.vips.append(vip_address2) instance2.vips.append(vip_address_ex) config.add_instance(instance1) config.add_instance(instance2) return config class KeepalivedConfTestCase(base.BaseTestCase, KeepalivedConfBaseMixin): expected = KEEPALIVED_GLOBAL_CONFIG + textwrap.dedent(""" vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 advert_int 5 authentication { auth_type AH auth_pass pass123 } track_interface { eth0 } virtual_ipaddress { 169.254.0.1/24 dev eth0 } virtual_ipaddress_excluded { 192.168.1.0/24 dev eth1 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth2 192.168.55.0/24 dev eth10 } virtual_routes { 0.0.0.0/0 via 192.168.1.1 dev eth1 } } vrrp_instance VR_2 { state MASTER interface eth4 virtual_router_id 2 priority 50 garp_master_delay 60 mcast_src_ip 224.0.0.1 track_interface { eth4 } virtual_ipaddress { 169.254.0.2/24 dev eth4 } virtual_ipaddress_excluded { 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth6 192.168.55.0/24 dev eth10 } }""") def test_config_generation(self): config = self._get_config() self.assertEqual(self.expected, config.get_config_str()) def test_config_with_reset(self): config = self._get_config() self.assertEqual(self.expected, config.get_config_str()) config.reset() self.assertEqual(KEEPALIVED_GLOBAL_CONFIG, config.get_config_str()) def test_get_existing_vip_ip_addresses_returns_list(self): config = self._get_config() instance = config.get_instance(1) current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2')) self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips) class KeepalivedStateExceptionTestCase(base.BaseTestCase): def test_state_exception(self): invalid_vrrp_state = 'a seal walks' self.assertRaises(keepalived.InvalidInstanceStateException, keepalived.KeepalivedInstance, invalid_vrrp_state, 'eth0', 33, ['169.254.192.0/18']) invalid_auth_type = 'into a club' instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18']) self.assertRaises(keepalived.InvalidAuthenticationTypeException, instance.set_authentication, invalid_auth_type, 'some_password') class KeepalivedInstanceRoutesTestCase(base.BaseTestCase): @classmethod def _get_instance_routes(cls): routes = keepalived.KeepalivedInstanceRoutes() default_gw_eth0 = keepalived.KeepalivedVirtualRoute( '0.0.0.0/0', '1.0.0.254', 'eth0') default_gw_eth1 = keepalived.KeepalivedVirtualRoute( '::/0', 'fe80::3e97:eff:fe26:3bfa/64', 'eth1') routes.gateway_routes = [default_gw_eth0, default_gw_eth1] extra_routes = [ keepalived.KeepalivedVirtualRoute('10.0.0.0/8', '1.0.0.1'), keepalived.KeepalivedVirtualRoute('20.0.0.0/8', '2.0.0.2')] routes.extra_routes = extra_routes extra_subnets = [ keepalived.KeepalivedVirtualRoute( '30.0.0.0/8', None, 'eth0', scope='link')] routes.extra_subnets = extra_subnets return routes def test_routes(self): routes = self._get_instance_routes() self.assertEqual(len(routes.routes), 5) def test_remove_routes_on_interface(self): routes = self._get_instance_routes() routes.remove_routes_on_interface('eth0') self.assertEqual(len(routes.routes), 3) routes.remove_routes_on_interface('eth1') self.assertEqual(len(routes.routes), 2) def test_build_config(self): expected = """ virtual_routes { 0.0.0.0/0 via 1.0.0.254 dev eth0 ::/0 via fe80::3e97:eff:fe26:3bfa/64 dev eth1 10.0.0.0/8 via 1.0.0.1 20.0.0.0/8 via 2.0.0.2 30.0.0.0/8 dev eth0 scope link }""" routes = self._get_instance_routes() self.assertEqual(expected, '\n'.join(routes.build_config())) class KeepalivedInstanceTestCase(base.BaseTestCase, KeepalivedConfBaseMixin): def test_get_primary_vip(self): instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42, ['169.254.192.0/18']) self.assertEqual('169.254.0.42/24', instance.get_primary_vip()) def test_remove_addresses_by_interface(self): config = self._get_config() instance = config.get_instance(1) instance.remove_vips_vroutes_by_interface('eth2') instance.remove_vips_vroutes_by_interface('eth10') expected = KEEPALIVED_GLOBAL_CONFIG + textwrap.dedent(""" vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 advert_int 5 authentication { auth_type AH auth_pass pass123 } track_interface { eth0 } virtual_ipaddress { 169.254.0.1/24 dev eth0 } virtual_ipaddress_excluded { 192.168.1.0/24 dev eth1 } virtual_routes { 0.0.0.0/0 via 192.168.1.1 dev eth1 } } vrrp_instance VR_2 { state MASTER interface eth4 virtual_router_id 2 priority 50 garp_master_delay 60 mcast_src_ip 224.0.0.1 track_interface { eth4 } virtual_ipaddress { 169.254.0.2/24 dev eth4 } virtual_ipaddress_excluded { 192.168.2.0/24 dev eth2 192.168.3.0/24 dev eth6 192.168.55.0/24 dev eth10 } }""") self.assertEqual(expected, config.get_config_str()) def test_build_config_no_vips(self): expected = textwrap.dedent("""\ vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 virtual_ipaddress { 169.254.0.1/24 dev eth0 } }""") instance = keepalived.KeepalivedInstance( 'MASTER', 'eth0', VRRP_ID, ['169.254.192.0/18']) self.assertEqual(expected, os.linesep.join(instance.build_config())) def test_build_config_no_vips_track_script(self): expected = """ vrrp_script ha_health_check_1 { script "/etc/ha_confs/qrouter-x/ha_check_script_1.sh" interval 5 fall 2 rise 2 } vrrp_instance VR_1 { state MASTER interface eth0 virtual_router_id 1 priority 50 garp_master_delay 60 virtual_ipaddress { 169.254.0.1/24 dev eth0 } }""" instance = keepalived.KeepalivedInstance( 'MASTER', 'eth0', VRRP_ID, ['169.254.192.0/18']) instance.track_script = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, '/etc/ha_confs/qrouter-x', VRRP_ID) self.assertEqual(expected, '\n'.join(instance.build_config())) class KeepalivedVipAddressTestCase(base.BaseTestCase): def test_vip_with_scope(self): vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64', 'eth1', 'link') self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link', vip.build_config()) def test_add_vip_idempotent(self): instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1, ['169.254.192.0/18']) instance.add_vip('192.168.222.1/32', 'eth11', None) instance.add_vip('192.168.222.1/32', 'eth12', 'link') self.assertEqual(1, len(instance.vips)) class KeepalivedVirtualRouteTestCase(base.BaseTestCase): def test_virtual_route_with_dev(self): route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, '1.2.3.4', 'eth0') self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0', route.build_config()) def test_virtual_route_without_dev(self): route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4') self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config()) class KeepalivedTrackScriptTestCase(base.BaseTestCase): def test_build_config_preamble(self): exp_conf = [ '', 'vrrp_script ha_health_check_1 {', ' script "/etc/ha_confs/qrouter-x/ha_check_script_1.sh"', ' interval 5', ' fall 2', ' rise 2', '}', ''] ts = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, '/etc/ha_confs/qrouter-x', VRRP_ID) self.assertEqual(exp_conf, ts.build_config_preamble()) def test_get_config_str(self): ts = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, '/etc/ha_confs/qrouter-x', VRRP_ID) ts.routes = [ keepalived.KeepalivedVirtualRoute('12.0.0.0/24', '10.0.0.1'), ] self.assertEqual(''' track_script { ha_health_check_1 }''', ts.get_config_str()) def test_get_script_str(self): ts = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, '/etc/ha_confs/qrouter-x', VRRP_ID) ts.routes = [ keepalived.KeepalivedVirtualRoute('12.0.0.0/24', '10.0.0.1'), ] ts.vips = [ keepalived.KeepalivedVipAddress('192.168.0.3/18', 'ha-xxx'), ] self.assertEqual("""#!/bin/bash -eu ip a | grep 192.168.0.3 || exit 0 ping -c 1 -w 1 10.0.0.1 1>/dev/null || exit 1""", ts._get_script_str()) def test_get_script_str_no_routes(self): ts = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, '/etc/ha_confs/qrouter-x', VRRP_ID) self.assertEqual('#!/bin/bash -eu\n', ts._get_script_str()) def test_write_check_script(self): conf_dir = '/etc/ha_confs/qrouter-x' ts = keepalived.KeepalivedTrackScript(VRRP_INTERVAL, conf_dir, VRRP_ID) ts.routes = [ keepalived.KeepalivedVirtualRoute('12.0.0.0/24', '10.0.0.1'), keepalived.KeepalivedVirtualRoute('2001:db8::1', '2001:db8::1'), ] with mock.patch.object(keepalived, 'file_utils') as patched_utils: ts.write_check_script() patched_utils.replace_file.assert_called_with( os.path.join(conf_dir, 'ha_check_script_1.sh'), """#!/bin/bash -eu ping -c 1 -w 1 10.0.0.1 1>/dev/null || exit 1 ping6 -c 1 -w 1 2001:db8::1 1>/dev/null || exit 1""", 0o520 ) def test_write_check_script_no_routes(self): conf_dir = '/etc/ha_confs/qrouter-x' ts = keepalived.KeepalivedTrackScript( VRRP_INTERVAL, conf_dir, VRRP_ID) with mock.patch.object(keepalived, 'file_utils') as patched_utils: ts.write_check_script() patched_utils.replace_file.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_l3_tc_lib.py0000644000175000017500000004743100000000000026467 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import exceptions from neutron.agent.linux import l3_tc_lib as tc_lib from neutron.agent.linux import tc_lib as base_tc_lib from neutron.tests import base FLOATING_IP_DEVICE_NAME = "qg-device_rfp" FLOATING_IP_ROUTER_NAMESPACE = "qrouter-namespace_snat-namespace" FLOATING_IP_1 = "172.16.5.146" FLOATING_IP_2 = "172.16.10.105" FILETER_ID_1 = "800::800" FILETER_ID_2 = "800::801" TC_INGRESS_FILTERS_BASE = ( 'filter protocol ip u32 \n' 'filter protocol ip u32 %(chain_value)sfh 800: ht divisor 1 \n' 'filter protocol ip u32 %(chain_value)sfh %(filter_id1)s order 2048 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP dst %(fip1)s/32 (success 0 ) \n' ' police 0x3 rate 3000Kbit burst 3Mb mtu 64Kb action drop overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0) \n' 'filter protocol ip u32 %(chain_value)sfh %(filter_id2)s order 2049 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP dst %(fip2)s/32 (success 0 ) \n' ' police 0x1b rate 22000Kbit burst 22Mb mtu 64Kb action drop ' 'overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0)\n') TC_INGRESS_FILTERS_WITHOUT_CHAIN = TC_INGRESS_FILTERS_BASE % { "chain_value": "", "filter_id1": FILETER_ID_1, "fip1": FLOATING_IP_1, "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} # NOTE(slaweq): in iproute 4.15 chain value was added to filter output TC_INGRESS_FILTERS_WITH_CHAIN = TC_INGRESS_FILTERS_BASE % { "chain_value": "chain 1 ", "filter_id1": FILETER_ID_1, "fip1": FLOATING_IP_1, "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} TC_INGRESS_FILTERS_DUP_WITHOUT_CHAIN = TC_INGRESS_FILTERS_WITHOUT_CHAIN + ( 'filter protocol ip u32 %(chain_value)sfh %(filter_id2)s order 2049 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP dst %(fip2)s/32 (success 0 ) \n' ' police 0x1b rate 22000Kbit burst 22Mb mtu 64Kb action drop ' 'overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0)\n') % { "chain_value": "", "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} TC_INGRESS_FILTERS_DUP_WITH_CHAIN = TC_INGRESS_FILTERS_WITH_CHAIN + ( 'filter protocol ip u32 %(chain_value)sfh %(filter_id2)s order 2049 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP dst %(fip2)s/32 (success 0 ) \n' ' police 0x1b rate 22000Kbit burst 22Mb mtu 64Kb action drop ' 'overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0)\n') % { "chain_value": "chain 1 ", "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} TC_EGRESS_FILTERS_BASE = ( 'filter protocol ip u32 \n' 'filter protocol ip u32 %(chain_name)sfh 800: ht divisor 1 \n' 'filter protocol ip u32 %(chain_name)sfh %(filter_id1)s order 2048 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP src %(fip1)s/32 (success 0 ) \n' ' police 0x4 rate 3000Kbit burst 3Mb mtu 64Kb action drop overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0) \n' 'filter protocol ip u32 %(chain_name)sfh %(filter_id2)s order 2049 key ' 'ht 800 bkt 0 ' 'flowid :1 (rule hit 0 success 0)\n' ' match IP src %(fip2)s/32 (success 0 ) \n' ' police 0x1c rate 22000Kbit burst 22Mb mtu 64Kb action drop ' 'overhead 0b \n' 'ref 1 bind 1\n' '\n' ' Sent 111 bytes 222 pkts (dropped 0, overlimits 0)\n') TC_EGRESS_FILTERS_WITHOUT_CHAIN = TC_EGRESS_FILTERS_BASE % { "chain_name": "", "filter_id1": FILETER_ID_1, "fip1": FLOATING_IP_1, "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} TC_EGRESS_FILTERS_WITH_CHAIN = TC_EGRESS_FILTERS_BASE % { "chain_name": "chain 1 ", "filter_id1": FILETER_ID_1, "fip1": FLOATING_IP_1, "filter_id2": FILETER_ID_2, "fip2": FLOATING_IP_2} INGRESS_QSIC_ID = "ffff:" EGRESS_QDISC_ID = "1:" QDISC_IDS = {constants.INGRESS_DIRECTION: INGRESS_QSIC_ID, constants.EGRESS_DIRECTION: EGRESS_QDISC_ID} TC_QDISCS = [{'handle': '1:', 'qdisc_type': 'htb', 'parent': 'root'}, {'handle': 'ffff:', 'qdisc_type': 'ingress', 'parent': 'ingress'}] class TestFloatingIPTcCommandBase(base.BaseTestCase): def setUp(self): super(TestFloatingIPTcCommandBase, self).setUp() self.tc = tc_lib.FloatingIPTcCommandBase( FLOATING_IP_DEVICE_NAME, namespace=FLOATING_IP_ROUTER_NAMESPACE) self.execute = mock.patch('neutron.agent.common.utils.execute').start() def test__get_qdisc_id_for_filter(self): with mock.patch.object(base_tc_lib, 'list_tc_qdiscs', return_value=TC_QDISCS): q1 = self.tc._get_qdisc_id_for_filter(constants.INGRESS_DIRECTION) self.assertEqual(INGRESS_QSIC_ID, q1) q2 = self.tc._get_qdisc_id_for_filter(constants.EGRESS_DIRECTION) self.assertEqual(EGRESS_QDISC_ID, q2) @mock.patch.object(base_tc_lib, 'add_tc_qdisc') def test__add_qdisc(self, mock_add_tc_qdisc): self.tc._add_qdisc(constants.INGRESS_DIRECTION) mock_add_tc_qdisc.assert_called_once_with( self.tc.name, 'ingress', namespace=self.tc.namespace) mock_add_tc_qdisc.reset_mock() self.tc._add_qdisc(constants.EGRESS_DIRECTION) mock_add_tc_qdisc.assert_called_once_with( self.tc.name, 'htb', parent='root', namespace=self.tc.namespace) def test__get_filters(self): self.tc._get_filters(INGRESS_QSIC_ID) self.execute.assert_called_with( ['ip', 'netns', 'exec', FLOATING_IP_ROUTER_NAMESPACE, 'tc', '-p', '-s', '-d', 'filter', 'show', 'dev', FLOATING_IP_DEVICE_NAME, 'parent', INGRESS_QSIC_ID, 'prio', 1], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def _test__get_filterid_for_ip(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters f_id = self.tc._get_filterid_for_ip(INGRESS_QSIC_ID, FLOATING_IP_1) self.assertEqual(FILETER_ID_1, f_id) def test__get_filterid_for_ip_without_chain(self): self._test__get_filterid_for_ip(TC_EGRESS_FILTERS_WITHOUT_CHAIN) def test__get_filterid_for_ip_with_chain(self): self._test__get_filterid_for_ip(TC_EGRESS_FILTERS_WITH_CHAIN) def test__get_filterid_for_ip_no_output(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = "" self.assertRaises(exceptions.FilterIDForIPNotFound, self.tc._get_filterid_for_ip, INGRESS_QSIC_ID, FLOATING_IP_1) def _test__get_filterid_for_ip_duplicated(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters self.assertRaises(exceptions.MultipleFilterIDForIPFound, self.tc._get_filterid_for_ip, INGRESS_QSIC_ID, FLOATING_IP_2) def test__get_filterid_for_ip_duplicated_without_chain(self): self._test__get_filterid_for_ip_duplicated( TC_INGRESS_FILTERS_DUP_WITHOUT_CHAIN) def test__get_filterid_for_ip_duplicated_with_chain(self): self._test__get_filterid_for_ip_duplicated( TC_INGRESS_FILTERS_DUP_WITH_CHAIN) def _test__get_filterid_for_ip_not_found(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters self.assertRaises(exceptions.FilterIDForIPNotFound, self.tc._get_filterid_for_ip, INGRESS_QSIC_ID, "1.1.1.1") def test__get_filterid_for_ip_not_found_without_chain(self): self._test__get_filterid_for_ip_not_found( TC_EGRESS_FILTERS_WITHOUT_CHAIN) def test__get_filterid_for_ip_not_found_with_chain(self): self._test__get_filterid_for_ip_not_found(TC_EGRESS_FILTERS_WITH_CHAIN) def test__del_filter_by_id(self): self.tc._del_filter_by_id(INGRESS_QSIC_ID, FLOATING_IP_1) self.execute.assert_called_once_with( ['ip', 'netns', 'exec', FLOATING_IP_ROUTER_NAMESPACE, 'tc', 'filter', 'del', 'dev', FLOATING_IP_DEVICE_NAME, 'parent', INGRESS_QSIC_ID, 'prio', 1, 'handle', FLOATING_IP_1, 'u32'], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def _test__get_qdisc_filters(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters f_ids = self.tc._get_qdisc_filters(INGRESS_QSIC_ID) self.assertEqual([FILETER_ID_1, FILETER_ID_2], f_ids) def test__get_qdisc_filters_without_chain(self): self._test__get_qdisc_filters(TC_EGRESS_FILTERS_WITHOUT_CHAIN) def test__get_qdisc_filters_with_chain(self): self._test__get_qdisc_filters(TC_EGRESS_FILTERS_WITH_CHAIN) def test__get_qdisc_filters_no_output(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = "" f_ids = self.tc._get_qdisc_filters(INGRESS_QSIC_ID) self.assertEqual(0, len(f_ids)) def test__add_filter(self): protocol = ['protocol', 'ip'] prio = ['prio', 1] match = ['u32', 'match', 'ip', 'dst', FLOATING_IP_1] police = ['police', 'rate', '1kbit', 'burst', '1kbit', 'mtu', '64kb', 'drop', 'flowid', ':1'] args = protocol + prio + match + police cmd = ['tc', 'filter', 'add', 'dev', FLOATING_IP_DEVICE_NAME, 'parent', INGRESS_QSIC_ID] + args self.tc._add_filter(INGRESS_QSIC_ID, constants.INGRESS_DIRECTION, FLOATING_IP_1, 1, 1) self.execute.assert_called_once_with( ['ip', 'netns', 'exec', FLOATING_IP_ROUTER_NAMESPACE] + cmd, run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def test__get_or_create_qdisc(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc1: get_disc1.return_value = None with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_add_qdisc'): with mock.patch.object( tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc2: get_disc2.return_value = INGRESS_QSIC_ID qdisc_id = self.tc._get_or_create_qdisc( constants.INGRESS_DIRECTION) self.assertEqual(INGRESS_QSIC_ID, qdisc_id) def test__get_or_create_qdisc_failed(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc1: get_disc1.return_value = None with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_add_qdisc'): with mock.patch.object( tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc2: get_disc2.return_value = None self.assertRaises(exceptions.FailedToAddQdiscToDevice, self.tc._get_or_create_qdisc, constants.INGRESS_DIRECTION) class TestFloatingIPTcCommand(base.BaseTestCase): def setUp(self): super(TestFloatingIPTcCommand, self).setUp() self.tc = tc_lib.FloatingIPTcCommand( FLOATING_IP_DEVICE_NAME, namespace=FLOATING_IP_ROUTER_NAMESPACE) self.execute = mock.patch('neutron.agent.common.utils.execute').start() def _test_clear_all_filters(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters self.tc.clear_all_filters(constants.EGRESS_DIRECTION) self.assertEqual(2, self.execute.call_count) def test_clear_all_filters_without_chain(self): self._test_clear_all_filters(TC_EGRESS_FILTERS_WITHOUT_CHAIN) def test_clear_all_filters_with_chain(self): self._test_clear_all_filters(TC_EGRESS_FILTERS_WITH_CHAIN) def test_set_ip_rate_limit_filter_existed(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filterid_for_ip') as get_filter: get_filter.return_value = FILETER_ID_1 with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_del_filter_by_id') as del_filter: with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_add_filter') as add_filter: ip = "111.111.111.111" self.tc.set_ip_rate_limit(constants.EGRESS_DIRECTION, ip, 1, 1) del_filter.assert_called_once_with( EGRESS_QDISC_ID, FILETER_ID_1) add_filter.assert_called_once_with( EGRESS_QDISC_ID, constants.EGRESS_DIRECTION, ip, 1, 1) def _test_set_ip_rate_limit_no_qdisc(self, filters): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = None with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_add_qdisc'): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filters') as get_filters: get_filters.return_value = filters get_disc.return_value = INGRESS_QSIC_ID ip = "111.111.111.111" self.tc.set_ip_rate_limit(constants.INGRESS_DIRECTION, ip, 1, 1) protocol = ['protocol', 'ip'] prio = ['prio', 1] _match = 'dst' match = ['u32', 'match', 'ip', _match, ip] police = ['police', 'rate', '1kbit', 'burst', '1kbit', 'mtu', '64kb', 'drop', 'flowid', ':1'] args = protocol + prio + match + police self.execute.assert_called_once_with( ['ip', 'netns', 'exec', FLOATING_IP_ROUTER_NAMESPACE, 'tc', 'filter', 'add', 'dev', FLOATING_IP_DEVICE_NAME, 'parent', INGRESS_QSIC_ID] + args, run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def test_set_ip_rate_limit_no_qdisc_without_chain(self): self._test_set_ip_rate_limit_no_qdisc(TC_INGRESS_FILTERS_WITHOUT_CHAIN) def test_set_ip_rate_limit_no_qdisc_with_chain(self): self._test_set_ip_rate_limit_no_qdisc(TC_INGRESS_FILTERS_WITH_CHAIN) def test_clear_ip_rate_limit(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filterid_for_ip') as get_filter_id: get_filter_id.return_value = FILETER_ID_1 self.tc.clear_ip_rate_limit(constants.EGRESS_DIRECTION, FLOATING_IP_1) self.execute.assert_called_once_with( ['ip', 'netns', 'exec', FLOATING_IP_ROUTER_NAMESPACE, 'tc', 'filter', 'del', 'dev', FLOATING_IP_DEVICE_NAME, 'parent', EGRESS_QDISC_ID, 'prio', 1, 'handle', FILETER_ID_1, 'u32'], run_as_root=True, check_exit_code=True, log_fail_as_error=True, extra_ok_codes=None ) def test_get_filter_id_for_ip(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_filterid_for_ip') as get_filter_id: self.tc.get_filter_id_for_ip(constants.EGRESS_DIRECTION, '8.8.8.8') get_filter_id.assert_called_once_with(EGRESS_QDISC_ID, '8.8.8.8') def test_get_existing_filter_ids(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_filters') as get_filter_ids: self.tc.get_existing_filter_ids(constants.EGRESS_DIRECTION) get_filter_ids.assert_called_once_with(EGRESS_QDISC_ID) def test_delete_filter_ids(self): with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_get_qdisc_id_for_filter') as get_disc: get_disc.return_value = EGRESS_QDISC_ID with mock.patch.object(tc_lib.FloatingIPTcCommandBase, '_del_filter_by_id') as del_filter_id: self.tc.delete_filter_ids(constants.EGRESS_DIRECTION, [FILETER_ID_1, FILETER_ID_2]) del_filter_id.assert_has_calls( [mock.call(EGRESS_QDISC_ID, FILETER_ID_1), mock.call(EGRESS_QDISC_ID, FILETER_ID_2)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_pd.py0000644000175000017500000001066500000000000025237 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.l3 import dvr_edge_router from neutron.agent.l3 import dvr_local_router from neutron.agent.l3 import legacy_router from neutron.agent.linux import pd from neutron.tests import base as tests_base class FakeRouter(object): def __init__(self, router_id): self.router_id = router_id class TestPrefixDelegation(tests_base.DietTestCase): def test_remove_router(self): l3_agent = mock.Mock() router_id = 1 l3_agent.pd.routers = {router_id: pd.get_router_entry(None, True)} pd.remove_router(None, None, l3_agent, router=FakeRouter(router_id)) self.assertTrue(l3_agent.pd.delete_router_pd.called) self.assertEqual({}, l3_agent.pd.routers) def _test_add_update_pd(self, l3_agent, router, ns_name): # add entry pd.add_router(None, None, l3_agent, router=router) pd_router = l3_agent.pd.routers.get(router.router_id) self.assertEqual(ns_name, pd_router.get('ns_name')) # clear namespace name, update entry pd_router['ns_name'] = None pd.update_router(None, None, l3_agent, router=router) pd_router = l3_agent.pd.routers.get(router.router_id) self.assertEqual(ns_name, pd_router.get('ns_name')) def test_add_update_dvr_edge_router(self): l3_agent = mock.Mock() l3_agent.pd.routers = {} router_id = '1' ri = dvr_edge_router.DvrEdgeRouter(l3_agent, 'host', router_id, mock.Mock(), mock.Mock(), mock.Mock()) ns_name = ri.snat_namespace.name self._test_add_update_pd(l3_agent, ri, ns_name) def test_add_update_dvr_local_router(self): l3_agent = mock.Mock() l3_agent.pd.routers = {} router_id = '1' ri = dvr_local_router.DvrLocalRouter(l3_agent, 'host', router_id, mock.Mock(), mock.Mock(), mock.Mock()) ns_name = ri.ns_name self._test_add_update_pd(l3_agent, ri, ns_name) def test_add_update_legacy_router(self): l3_agent = mock.Mock() l3_agent.pd.routers = {} router_id = '1' ri = legacy_router.LegacyRouter(l3_agent, router_id, mock.Mock(), mock.Mock(), mock.Mock()) ns_name = ri.ns_name self._test_add_update_pd(l3_agent, ri, ns_name) def test_update_no_router_exception(self): l3_agent = mock.Mock() l3_agent.pd.routers = {} router = mock.Mock() router.router_id = '1' with mock.patch.object(pd.LOG, 'exception') as log: pd.update_router(None, None, l3_agent, router=router) self.assertTrue(log.called) def test_remove_stale_ri_ifname(self): pd_info_1 = mock.Mock() pd_info_1.ri_ifname = 'STALE' pd_info_2 = mock.Mock() pd_info_2.ri_ifname = 'NOT_STALE' router = { 'subnets': { 'FAKE_SUBNET_ID1': pd_info_1, 'FAKE_SUBNET_ID2': pd_info_2}} class FakePD(pd.PrefixDelegation): def __init__(self, router): self.routers = {'FAKE_ROUTER_ID': router} fake_pd = FakePD(router) fake_pd._delete_pd = mock.Mock() fake_pd.remove_stale_ri_ifname('FAKE_ROUTER_ID', 'STALE') fake_pd._delete_pd.assert_called_with(router, pd_info_1) self.assertEqual(len(router['subnets'].keys()), 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_tc_lib.py0000644000175000017500000004020400000000000026060 0ustar00coreycorey00000000000000# Copyright 2016 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib.exceptions import qos as qos_exc from neutron_lib.services.qos import constants as qos_consts from pyroute2.netlink import rtnl from neutron.agent.linux import tc_lib from neutron.common import utils from neutron.privileged.agent.linux import tc_lib as priv_tc_lib from neutron.tests import base DEVICE_NAME = "tap_device" KERNEL_HZ_VALUE = 1000 BW_LIMIT = 2000 # [kbps] BURST = 100 # [kbit] LATENCY = 50 # [ms] TC_FILTERS_OUTPUT = ( 'filter protocol all pref 49152 u32 \nfilter protocol all pref ' '49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh ' '800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n ' 'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n' 'drop overhead 0b \n ref 1 bind 1' ) % {'bw': BW_LIMIT, 'burst': BURST} class BaseUnitConversionTest(object): def test_convert_to_kilobits_bare_value(self): value = "1000" expected_value = 8 # kbit self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_bytes_value(self): value = "1000b" expected_value = 8 # kbit self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_bits_value(self): value = "1000bit" expected_value = utils.bits_to_kilobits(1000, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_megabytes_value(self): value = "1m" expected_value = utils.bits_to_kilobits( self.base_unit ** 2 * 8, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_kilobits_megabits_value(self): value = "1mbit" expected_value = utils.bits_to_kilobits( self.base_unit ** 2, self.base_unit) self.assertEqual( expected_value, tc_lib.convert_to_kilobits(value, self.base_unit) ) def test_convert_to_bytes_wrong_unit(self): value = "1Zbit" self.assertRaises( tc_lib.InvalidUnit, tc_lib.convert_to_kilobits, value, self.base_unit ) class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = constants.SI_BASE class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = constants.IEC_BASE class TestHandleFromHexToString(base.BaseTestCase): def test_run(self): test_cases = [(0x1, '0:1'), (0x2a003f, '2a:3f'), (0xf0000, 'f:0'), (0xffffffff, 'ffff:ffff'), (0x12345678, '1234:5678')] for _in, expected in test_cases: self.assertEqual(expected, tc_lib._handle_from_hex_to_string(_in)) class TestTcCommand(base.BaseTestCase): def setUp(self): super(TestTcCommand, self).setUp() self.tc = tc_lib.TcCommand(DEVICE_NAME, KERNEL_HZ_VALUE) self.mock_list_tc_qdiscs = mock.patch.object(tc_lib, 'list_tc_qdiscs').start() self.mock_add_tc_qdisc = mock.patch.object(tc_lib, 'add_tc_qdisc').start() self.mock_delete_tc_qdisc = mock.patch.object( tc_lib, 'delete_tc_qdisc').start() self.mock_list_tc_filters = mock.patch.object( tc_lib, 'list_tc_filters').start() self.mock_add_tc_filter_policy = mock.patch.object( tc_lib, 'add_tc_filter_policy').start() def test_check_kernel_hz_lower_then_zero(self): self.assertRaises( tc_lib.InvalidKernelHzValue, tc_lib.TcCommand, DEVICE_NAME, 0 ) self.assertRaises( tc_lib.InvalidKernelHzValue, tc_lib.TcCommand, DEVICE_NAME, -100 ) def test_get_filters_bw_limits(self): self.mock_list_tc_filters.return_value = [{'rate_kbps': BW_LIMIT, 'burst_kb': BURST}] bw_limit, burst_limit = self.tc.get_filters_bw_limits() self.assertEqual(BW_LIMIT, bw_limit) self.assertEqual(BURST, burst_limit) def test_get_filters_bw_limits_no_filters(self): self.mock_list_tc_filters.return_value = [] bw_limit, burst_limit = self.tc.get_filters_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst_limit) def test_get_filters_bw_limits_no_rate_info(self): self.mock_list_tc_filters.return_value = [{'other_values': 1}] bw_limit, burst_limit = self.tc.get_filters_bw_limits() self.assertIsNone(bw_limit) self.assertIsNone(burst_limit) def test_get_tbf_bw_limits(self): self.mock_list_tc_qdiscs.return_value = [ {'qdisc_type': 'tbf', 'max_kbps': BW_LIMIT, 'burst_kb': BURST}] self.assertEqual((BW_LIMIT, BURST), self.tc.get_tbf_bw_limits()) def test_get_tbf_bw_limits_when_wrong_qdisc(self): self.mock_list_tc_qdiscs.return_value = [{'qdisc_type': 'other_type'}] self.assertEqual((None, None), self.tc.get_tbf_bw_limits()) def test_set_tbf_bw_limit(self): self.tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY) self.mock_add_tc_qdisc.assert_called_once_with( DEVICE_NAME, 'tbf', parent='root', max_kbps=BW_LIMIT, burst_kb=BURST, latency_ms=LATENCY, kernel_hz=self.tc.kernel_hz, namespace=self.tc.namespace) def test_update_filters_bw_limit(self): self.tc.update_filters_bw_limit(BW_LIMIT, BURST) self.mock_add_tc_qdisc.assert_called_once_with( self.tc.name, 'ingress', namespace=self.tc.namespace) self.mock_delete_tc_qdisc.assert_called_once_with( self.tc.name, is_ingress=True, raise_interface_not_found=False, raise_qdisc_not_found=False, namespace=self.tc.namespace) self.mock_add_tc_filter_policy.assert_called_once_with( self.tc.name, tc_lib.INGRESS_QDISC_ID, BW_LIMIT, BURST, tc_lib.MAX_MTU_VALUE, 'drop', priority=49) def test_delete_filters_bw_limit(self): self.tc.delete_filters_bw_limit() self.mock_delete_tc_qdisc.assert_called_once_with( DEVICE_NAME, is_ingress=True, raise_interface_not_found=False, raise_qdisc_not_found=False, namespace=self.tc.namespace) def test_delete_tbf_bw_limit(self): self.tc.delete_tbf_bw_limit() self.mock_delete_tc_qdisc.assert_called_once_with( DEVICE_NAME, parent='root', raise_interface_not_found=False, raise_qdisc_not_found=False, namespace=self.tc.namespace) def test_get_ingress_qdisc_burst_value_burst_not_none(self): self.assertEqual( BURST, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, BURST) ) def test_get_ingress_qdisc_burst_no_burst_value_given(self): expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, None) ) def test_get_ingress_qdisc_burst_burst_value_zero(self): expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, 0) ) class TcTestCase(base.BaseTestCase): def setUp(self): super(TcTestCase, self).setUp() self.mock_add_tc_qdisc = mock.patch.object( priv_tc_lib, 'add_tc_qdisc').start() self.namespace = 'namespace' def test_add_tc_qdisc_htb(self): tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='1:', namespace=self.namespace) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='1:0', namespace=self.namespace) self.mock_add_tc_qdisc.reset_mock() tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='2', namespace=self.namespace) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='2:0', namespace=self.namespace) self.mock_add_tc_qdisc.reset_mock() tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='3:12', namespace=self.namespace) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='3:0', namespace=self.namespace) self.mock_add_tc_qdisc.reset_mock() tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=4, namespace=self.namespace) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='4:0', namespace=self.namespace) self.mock_add_tc_qdisc.reset_mock() tc_lib.add_tc_qdisc('device', 'htb', parent='root', namespace=self.namespace) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', namespace=self.namespace) self.mock_add_tc_qdisc.reset_mock() tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=5) self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='5:0', namespace=None) self.mock_add_tc_qdisc.reset_mock() def test_add_tc_qdisc_tbf(self): tc_lib.add_tc_qdisc('device', 'tbf', parent='root', max_kbps=10000, burst_kb=1500, latency_ms=70, kernel_hz=250, namespace=self.namespace) burst = tc_lib._get_tbf_burst_value(10000, 1500, 70) * 1024 / 8 self.mock_add_tc_qdisc.assert_called_once_with( 'device', parent=rtnl.TC_H_ROOT, kind='tbf', rate=10000 * 128, burst=burst, latency=70000, namespace=self.namespace) def test_add_tc_qdisc_tbf_missing_arguments(self): self.assertRaises( qos_exc.TcLibQdiscNeededArguments, tc_lib.add_tc_qdisc, 'device', 'tbf', parent='root') def test_add_tc_qdisc_wrong_qdisc_type(self): self.assertRaises(qos_exc.TcLibQdiscTypeError, tc_lib.add_tc_qdisc, mock.ANY, 'wrong_qdic_type_name') def test_list_tc_qdiscs_htb(self): qdisc = {'index': 2, 'handle': 327680, 'parent': 4294967295, 'attrs': (('TCA_KIND', 'htb'), )} with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \ mock_list_tc_qdiscs: mock_list_tc_qdiscs.return_value = tuple([qdisc]) qdiscs = tc_lib.list_tc_qdiscs('device', namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual('root', qdiscs[0]['parent']) self.assertEqual('5:0', qdiscs[0]['handle']) self.assertEqual('htb', qdiscs[0]['qdisc_type']) @mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625) def test_list_tc_qdiscs_tbf(self): tca_tbf_params = {'buffer': 9375000, 'rate': 320000, 'limit': 208000} qdisc = {'index': 2, 'handle': 327681, 'parent': 4294967295, 'attrs': ( ('TCA_KIND', 'tbf'), ('TCA_OPTIONS', {'attrs': ( ('TCA_TBF_PARMS', tca_tbf_params), )})) } with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \ mock_list_tc_qdiscs: mock_list_tc_qdiscs.return_value = tuple([qdisc]) qdiscs = tc_lib.list_tc_qdiscs('device', namespace=self.namespace) self.assertEqual(1, len(qdiscs)) self.assertEqual('root', qdiscs[0]['parent']) self.assertEqual('5:1', qdiscs[0]['handle']) self.assertEqual('tbf', qdiscs[0]['qdisc_type']) self.assertEqual(2500, qdiscs[0]['max_kbps']) self.assertEqual(1500, qdiscs[0]['burst_kb']) self.assertEqual(50, qdiscs[0]['latency_ms']) def test__get_tbf_burst_value_when_burst_bigger_then_minimal(self): result = tc_lib._get_tbf_burst_value(BW_LIMIT, BURST, KERNEL_HZ_VALUE) self.assertEqual(BURST, result) def test__get_tbf_burst_value_when_burst_smaller_then_minimal(self): result = tc_lib._get_tbf_burst_value(BW_LIMIT, 0, KERNEL_HZ_VALUE) self.assertEqual(2, result) class TcPolicyClassTestCase(base.BaseTestCase): def setUp(self): super(TcPolicyClassTestCase, self).setUp() self.mock_add_tc_policy_class = mock.patch.object( priv_tc_lib, 'add_tc_policy_class').start() self.mock_list_tc_policy_classes = mock.patch.object( priv_tc_lib, 'list_tc_policy_classes').start() self.namespace = 'namespace' def test_add_tc_policy_class(self): tc_lib.add_tc_policy_class( 'device', 'root', '1:10', min_kbps=1000, max_kbps=2000, burst_kb=1600, namespace=self.namespace) self.mock_add_tc_policy_class.assert_called_once_with( 'device', rtnl.TC_H_ROOT, '1:10', 'htb', rate=1000 * 128, ceil=2000 * 128, burst=1600 * 128, namespace=self.namespace) @mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625) def test_list_tc_policy_classes(self): htb_params = {'buffer': 12500000, 'ceil': 256000, 'rate': 192000} self.mock_list_tc_policy_classes.return_value = tuple([ {'index': 3, 'handle': 65537, 'parent': 4294967295, 'attrs': ( ('TCA_KIND', 'htb'), ('TCA_OPTIONS', { 'attrs': tuple([('TCA_HTB_PARMS', htb_params)])})) }]) _class = tc_lib.list_tc_policy_class('device', namespace=self.namespace)[0] reference = {'device': 'device', 'index': 3, 'namespace': self.namespace, 'parent': 'root', 'classid': '1:1', 'qdisc_type': 'htb', 'min_kbps': 1500, 'max_kbps': 2000, 'burst_kb': 1200} self.assertEqual(reference, _class) class TcFilterTestCase(base.BaseTestCase): def test__mac_to_pyroute2_keys(self): mac = '01:23:45:67:89:ab' offset = 10 keys = tc_lib._mac_to_pyroute2_keys(mac, offset) high = {'value': 0x1234567, 'mask': 0xffffffff, 'offset': 10, 'key': '0x1234567/0xffffffff+10'} low = {'value': 0x89ab0000, 'mask': 0xffff0000, 'offset': 14, 'key': '0x89ab0000/0xffff0000+14'} self.assertEqual(high, keys[0]) self.assertEqual(low, keys[1]) @mock.patch.object(priv_tc_lib, 'add_tc_filter_match32') def test_add_tc_filter_vxlan(self, mock_add_filter): tc_lib.add_tc_filter_vxlan('device', 'parent', 'classid', '12:34:56:78:90:ab', 52, namespace='ns') keys = ['0x3400/0xffffff00+32', '0x12345678/0xffffffff+42', '0x90ab0000/0xffff0000+46'] mock_add_filter.assert_called_once_with( 'device', 'parent', 1, 'classid', keys, namespace='ns') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_utils.py0000644000175000017500000006160400000000000025773 0ustar00coreycorey00000000000000# Copyright 2012, VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import signal import socket import mock import six import testtools from neutron_lib import exceptions from neutron_lib import fixture as lib_fixtures from oslo_config import cfg import oslo_i18n from neutron.agent.linux import utils from neutron.tests import base _marker = object() class AgentUtilsExecuteTest(base.BaseTestCase): def setUp(self): super(AgentUtilsExecuteTest, self).setUp() self.test_file = self.get_temp_file_path('test_execute.tmp') open(self.test_file, 'w').close() self.process = mock.patch('eventlet.green.subprocess.Popen').start() self.process.return_value.returncode = 0 self.mock_popen = self.process.return_value.communicate def test_xenapi_root_helper(self): token = utils.xenapi_root_helper.ROOT_HELPER_DAEMON_TOKEN self.config(group='AGENT', root_helper_daemon=token) with mock.patch( 'neutron.agent.linux.utils.xenapi_root_helper.XenAPIClient')\ as mock_xenapi_class: mock_client = mock_xenapi_class.return_value cmd_client = utils.RootwrapDaemonHelper.get_client() self.assertEqual(cmd_client, mock_client) def test_without_helper(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] result = utils.execute(["ls", self.test_file]) self.assertEqual(result, expected) def test_with_helper(self): expected = "ls %s\n" % self.test_file self.mock_popen.return_value = [expected, ""] self.config(group='AGENT', root_helper='echo') result = utils.execute(["ls", self.test_file], run_as_root=True) self.assertEqual(result, expected) @mock.patch.object(utils.RootwrapDaemonHelper, 'get_client') def test_with_helper_exception(self, get_client): client_inst = mock.Mock() client_inst.execute.side_effect = RuntimeError get_client.return_value = client_inst self.config(group='AGENT', root_helper_daemon='echo') with mock.patch.object(utils, 'LOG') as log: self.assertRaises(RuntimeError, utils.execute, ['ls'], run_as_root=True) self.assertTrue(log.error.called) def test_stderr_true(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] out = utils.execute(["ls", self.test_file], return_stderr=True) self.assertIsInstance(out, tuple) self.assertEqual(out, (expected, "")) def test_check_exit_code(self): self.mock_popen.return_value = ["", ""] stdout = utils.execute(["ls", self.test_file[:-1]], check_exit_code=False) self.assertEqual("", stdout) def test_execute_raises(self): self.mock_popen.side_effect = RuntimeError self.assertRaises(RuntimeError, utils.execute, ["ls", self.test_file[:-1]]) def test_process_input(self): expected = "%s\n" % self.test_file[:-1] self.mock_popen.return_value = [expected, ""] result = utils.execute(["cat"], process_input="%s\n" % self.test_file[:-1]) self.assertEqual(result, expected) def test_with_addl_env(self): expected = "%s\n" % self.test_file self.mock_popen.return_value = [expected, ""] result = utils.execute(["ls", self.test_file], addl_env={'foo': 'bar'}) self.assertEqual(result, expected) def test_return_code_log_error_raise_runtime(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: self.assertRaises(RuntimeError, utils.execute, ['ls']) self.assertTrue(log.error.called) def test_return_code_log_error_no_raise_runtime(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls'], check_exit_code=False) self.assertTrue(log.error.called) def test_return_code_log_debug(self): self.mock_popen.return_value = ('', '') with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls']) self.assertTrue(log.debug.called) def test_return_code_log_error_change_locale(self): ja_output = 'std_out in Japanese' ja_error = 'std_err in Japanese' ja_message_out = oslo_i18n._message.Message(ja_output) ja_message_err = oslo_i18n._message.Message(ja_error) ja_translate_out = oslo_i18n._translate.translate(ja_message_out, 'ja') ja_translate_err = oslo_i18n._translate.translate(ja_message_err, 'ja') self.mock_popen.return_value = (ja_translate_out, ja_translate_err) self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: utils.execute(['ls'], check_exit_code=False) self.assertIn(ja_translate_out, str(log.error.call_args_list)) self.assertIn(ja_translate_err, str(log.error.call_args_list)) def test_return_code_raise_runtime_do_not_log_fail_as_error(self): self.mock_popen.return_value = ('', '') self.process.return_value.returncode = 1 with mock.patch.object(utils, 'LOG') as log: self.assertRaises(exceptions.ProcessExecutionError, utils.execute, ['ls'], log_fail_as_error=False) self.assertFalse(log.error.called) def test_encode_process_input(self): str_idata = "%s\n" % self.test_file[:-1] str_odata = "%s\n" % self.test_file if six.PY3: bytes_idata = str_idata.encode(encoding='utf-8') bytes_odata = str_odata.encode(encoding='utf-8') self.mock_popen.return_value = [bytes_odata, b''] result = utils.execute(['cat'], process_input=str_idata) self.mock_popen.assert_called_once_with(bytes_idata) else: self.mock_popen.return_value = [str_odata, ''] result = utils.execute(['cat'], process_input=str_idata) self.mock_popen.assert_called_once_with(str_idata) self.assertEqual(str_odata, result) def test_return_str_data(self): str_data = "%s\n" % self.test_file self.mock_popen.return_value = [str_data, ''] result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((str_data, ''), result) def test_surrogateescape_in_decoding_out_data(self): bytes_err_data = b'\xed\xa0\xbd' err_data = bytes_err_data.decode('utf-8', 'surrogateescape') out_data = "%s\n" % self.test_file bytes_out_data = out_data.encode(encoding='utf-8') self.mock_popen.return_value = [bytes_out_data, bytes_err_data] result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((out_data, err_data), result) class AgentUtilsExecuteEncodeTest(base.BaseTestCase): def setUp(self): super(AgentUtilsExecuteEncodeTest, self).setUp() self.test_file = self.get_temp_file_path('test_execute.tmp') open(self.test_file, 'w').close() def test_decode_return_data(self): str_data = "%s\n" % self.test_file result = utils.execute(['ls', self.test_file], return_stderr=True) self.assertEqual((str_data, ''), result) class TestFindParentPid(base.BaseTestCase): def setUp(self): super(TestFindParentPid, self).setUp() self.m_execute = mock.patch.object(utils, 'execute').start() def test_returns_none_for_no_valid_pid(self): self.m_execute.side_effect = exceptions.ProcessExecutionError( '', returncode=1) self.assertIsNone(utils.find_parent_pid(-1)) def test_returns_parent_id_for_good_ouput(self): self.m_execute.return_value = '123 \n' self.assertEqual(utils.find_parent_pid(-1), '123') def test_raises_exception_returncode_0(self): with testtools.ExpectedException(exceptions.ProcessExecutionError): self.m_execute.side_effect = \ exceptions.ProcessExecutionError('', returncode=0) utils.find_parent_pid(-1) def test_raises_unknown_exception(self): with testtools.ExpectedException(RuntimeError): self.m_execute.side_effect = RuntimeError() utils.find_parent_pid(-1) class TestFindForkTopParent(base.BaseTestCase): def _test_find_fork_top_parent(self, expected=_marker, find_parent_pid_retvals=None, pid_invoked_with_cmdline_retvals=None): def _find_parent_pid(x): if find_parent_pid_retvals: return find_parent_pid_retvals.pop(0) pid_invoked_with_cmdline = {} if pid_invoked_with_cmdline_retvals: pid_invoked_with_cmdline['side_effect'] = ( pid_invoked_with_cmdline_retvals) else: pid_invoked_with_cmdline['return_value'] = False with mock.patch.object(utils, 'find_parent_pid', side_effect=_find_parent_pid), \ mock.patch.object(utils, 'pid_invoked_with_cmdline', **pid_invoked_with_cmdline): actual = utils.find_fork_top_parent(_marker) self.assertEqual(expected, actual) def test_returns_own_pid_no_parent(self): self._test_find_fork_top_parent() def test_returns_own_pid_nofork(self): self._test_find_fork_top_parent(find_parent_pid_retvals=['2', '3']) def test_returns_first_parent_pid_fork(self): self._test_find_fork_top_parent( expected='2', find_parent_pid_retvals=['2', '3', '4'], pid_invoked_with_cmdline_retvals=[True, False, False]) def test_returns_top_parent_pid_fork(self): self._test_find_fork_top_parent( expected='4', find_parent_pid_retvals=['2', '3', '4'], pid_invoked_with_cmdline_retvals=[True, True, True]) class TestKillProcess(base.BaseTestCase): def _test_kill_process(self, pid, raise_exception=False, kill_signal=signal.SIGKILL, pid_killed=True): if raise_exception: exc = exceptions.ProcessExecutionError('', returncode=0) else: exc = None with mock.patch.object(utils, 'execute', side_effect=exc) as mock_execute: with mock.patch.object(utils, 'process_is_running', return_value=not pid_killed): utils.kill_process(pid, kill_signal, run_as_root=True) mock_execute.assert_called_with(['kill', '-%d' % kill_signal, pid], run_as_root=True) def test_kill_process_returns_none_for_valid_pid(self): self._test_kill_process('1') def test_kill_process_returns_none_for_stale_pid(self): self._test_kill_process('1', raise_exception=True) def test_kill_process_raises_exception_for_execute_exception(self): with testtools.ExpectedException(exceptions.ProcessExecutionError): # Simulate that the process is running after trying to kill due to # any reason such as, for example, Permission denied self._test_kill_process('1', raise_exception=True, pid_killed=False) def test_kill_process_with_different_signal(self): self._test_kill_process('1', kill_signal=signal.SIGTERM) class TestGetCmdlineFromPid(base.BaseTestCase): def setUp(self): super(TestGetCmdlineFromPid, self).setUp() self.pid = 34 self.process_is_running_mock = mock.patch.object( utils, "process_is_running").start() def _test_cmdline(self, process, expected_cmd): self.process_is_running_mock.return_value = True mock_open = self.useFixture( lib_fixtures.OpenFixture('/proc/%s/cmdline' % self.pid, process) ).mock_open cmdline = utils.get_cmdline_from_pid(self.pid) mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid, 'r') self.assertEqual(expected_cmd, cmdline) def test_cmdline_separated_with_null_char(self): process_cmd = "python3\0test-binary\0test option\0" expected_cmdline = ["python3", "test-binary", "test option"] self._test_cmdline(process_cmd, expected_cmdline) def test_cmdline_separated_with_space_char(self): process_cmd = "python3 test-binary test option\0" expected_cmdline = ["python3", "test-binary", "test", "option"] self._test_cmdline(process_cmd, expected_cmdline) def test_cmdline_with_single_argument(self): process_cmd = "test-binary\0" expected_cmdline = ["test-binary"] self._test_cmdline(process_cmd, expected_cmdline) def test_no_process_running(self): self.process_is_running_mock.return_value = False mock_open = self.useFixture( lib_fixtures.OpenFixture('/proc/%s/cmdline' % self.pid) ).mock_open cmdline = utils.get_cmdline_from_pid(self.pid) mock_open.assert_not_called() self.assertEqual([], cmdline) def test_cmdline_process_disappearing(self): self.process_is_running_mock.return_value = True mock_open = self.useFixture( lib_fixtures.OpenFixture('/proc/%s/cmdline' % self.pid, 'process') ).mock_open mock_open.side_effect = IOError() cmdline = utils.get_cmdline_from_pid(self.pid) mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid, 'r') self.assertEqual([], cmdline) class TestFindChildPids(base.BaseTestCase): def test_returns_empty_list_for_exit_code_1(self): with mock.patch.object(utils, 'execute', side_effect=exceptions.ProcessExecutionError( '', returncode=1)): self.assertEqual([], utils.find_child_pids(-1)) def test_returns_empty_list_for_no_output(self): with mock.patch.object(utils, 'execute', return_value=''): self.assertEqual([], utils.find_child_pids(-1)) def test_returns_list_of_child_process_ids_for_good_ouput(self): with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'): self.assertEqual(utils.find_child_pids(-1), ['123', '185']) def test_returns_list_of_child_process_ids_recursively(self): with mock.patch.object(utils, 'execute', side_effect=[' 123 \n 185\n', ' 40 \n', '\n', '41\n', '\n']): actual = utils.find_child_pids(-1, True) self.assertEqual(actual, ['123', '185', '40', '41']) def test_raises_unknown_exception(self): with testtools.ExpectedException(RuntimeError): with mock.patch.object(utils, 'execute', side_effect=RuntimeError()): utils.find_child_pids(-1) class TestGetRoothelperChildPid(base.BaseTestCase): def _test_get_root_helper_child_pid(self, expected=_marker, run_as_root=False, pids=None, cmds=None): def _find_child_pids(x): if not pids: return [] pids.pop(0) return pids mock_pid = object() pid_invoked_with_cmdline = {} if cmds: pid_invoked_with_cmdline['side_effect'] = cmds else: pid_invoked_with_cmdline['return_value'] = False with mock.patch.object(utils, 'find_child_pids', side_effect=_find_child_pids), \ mock.patch.object(utils, 'pid_invoked_with_cmdline', **pid_invoked_with_cmdline): actual = utils.get_root_helper_child_pid( mock_pid, mock.ANY, run_as_root) if expected is _marker: expected = str(mock_pid) self.assertEqual(expected, actual) def test_returns_process_pid_not_root(self): self._test_get_root_helper_child_pid() def test_returns_child_pid_as_root(self): self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'], run_as_root=True, cmds=[True]) def test_returns_last_child_pid_as_root(self): self._test_get_root_helper_child_pid(expected='3', pids=['1', '2', '3'], run_as_root=True, cmds=[False, True]) def test_returns_first_non_root_helper_child(self): self._test_get_root_helper_child_pid( expected='2', pids=['1', '2', '3'], run_as_root=True, cmds=[True, False]) def test_returns_none_as_root(self): self._test_get_root_helper_child_pid(expected=None, run_as_root=True) class TestPathUtilities(base.BaseTestCase): def test_remove_abs_path(self): self.assertEqual(['ping', '8.8.8.8'], utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8'])) def test_cmd_matches_expected_matches_abs_path(self): cmd = ['/bar/../foo'] self.assertTrue(utils.cmd_matches_expected(cmd, cmd)) def test_cmd_matches_expected_matches_script(self): self.assertTrue(utils.cmd_matches_expected(['python', 'script'], ['script'])) def test_cmd_matches_expected_doesnt_match(self): self.assertFalse(utils.cmd_matches_expected('foo', 'bar')) class FakeUser(object): def __init__(self, name): self.pw_name = name class FakeGroup(object): def __init__(self, name): self.gr_name = name class TestBaseOSUtils(base.BaseTestCase): EUID = 123 EUNAME = 'user' EGID = 456 EGNAME = 'group' @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_id(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(self.EUID)) geteuid.assert_called_once_with() self.assertFalse(getpwuid.called) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_str_id(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(str(self.EUID))) geteuid.assert_called_once_with() self.assertFalse(getpwuid.called) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_effective_user_name(self, getpwuid, geteuid): self.assertTrue(utils.is_effective_user(self.EUNAME)) geteuid.assert_called_once_with() getpwuid.assert_called_once_with(self.EUID) @mock.patch('os.geteuid', return_value=EUID) @mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME)) def test_is_not_effective_user(self, getpwuid, geteuid): self.assertFalse(utils.is_effective_user('wrong')) geteuid.assert_called_once_with() getpwuid.assert_called_once_with(self.EUID) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_id(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(self.EGID)) getegid.assert_called_once_with() self.assertFalse(getgrgid.called) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_str_id(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(str(self.EGID))) getegid.assert_called_once_with() self.assertFalse(getgrgid.called) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_effective_group_name(self, getgrgid, getegid): self.assertTrue(utils.is_effective_group(self.EGNAME)) getegid.assert_called_once_with() getgrgid.assert_called_once_with(self.EGID) @mock.patch('os.getegid', return_value=EGID) @mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME)) def test_is_not_effective_group(self, getgrgid, getegid): self.assertFalse(utils.is_effective_group('wrong')) getegid.assert_called_once_with() getgrgid.assert_called_once_with(self.EGID) class TestUnixDomainHttpConnection(base.BaseTestCase): def test_connect(self): with mock.patch.object(utils, 'cfg') as cfg: cfg.CONF.metadata_proxy_socket = '/the/path' with mock.patch('socket.socket') as socket_create: conn = utils.UnixDomainHTTPConnection('169.254.169.254', timeout=3) conn.connect() socket_create.assert_has_calls([ mock.call(socket.AF_UNIX, socket.SOCK_STREAM), mock.call().settimeout(3), mock.call().connect('/the/path')] ) self.assertEqual(conn.timeout, 3) class TestUnixDomainHttpProtocol(base.BaseTestCase): def setUp(self): super(TestUnixDomainHttpProtocol, self).setUp() self.ewhi = mock.patch('eventlet.wsgi.HttpProtocol.__init__').start() def test_init_empty_client(self): for addr in ('', b''): utils.UnixDomainHttpProtocol(mock.Mock(), addr, mock.Mock()) self.ewhi.assert_called_once_with(mock.ANY, mock.ANY, ('', 0), mock.ANY) self.ewhi.reset_mock() def test_init_with_client(self): utils.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock()) self.ewhi.assert_called_once_with(mock.ANY, mock.ANY, 'foo', mock.ANY) def test_init_new_style_empty_client(self): conn_state = ['', mock.Mock(), mock.Mock()] # have to make a copy since the init will modify what we pass csc = copy.copy(conn_state) csc[0] = ('', 0) utils.UnixDomainHttpProtocol(conn_state, mock.Mock()) self.ewhi.assert_called_once_with(mock.ANY, csc, mock.ANY) def test_init_new_style_client(self): conn_state = ['foo', mock.Mock(), mock.Mock()] utils.UnixDomainHttpProtocol(conn_state, mock.Mock()) self.ewhi.assert_called_once_with(mock.ANY, conn_state, mock.ANY) def test_init_unknown_client(self): utils.UnixDomainHttpProtocol('foo') self.ewhi.assert_called_once_with(mock.ANY, 'foo') class TestUnixDomainWSGIServer(base.BaseTestCase): def setUp(self): super(TestUnixDomainWSGIServer, self).setUp() self.eventlet_p = mock.patch.object(utils, 'eventlet') self.eventlet = self.eventlet_p.start() def test_start(self): self.server = utils.UnixDomainWSGIServer('test') mock_app = mock.Mock() with mock.patch.object(self.server, '_launch') as launcher: self.server.start(mock_app, '/the/path', workers=5, backlog=128) self.eventlet.assert_has_calls([ mock.call.listen( '/the/path', family=socket.AF_UNIX, backlog=128 )] ) launcher.assert_called_once_with(mock_app, workers=5) def test_run(self): self.server = utils.UnixDomainWSGIServer('test') self.server._run('app', 'sock') self.eventlet.wsgi.server.assert_called_once_with( 'sock', 'app', protocol=utils.UnixDomainHttpProtocol, log=mock.ANY, log_format=cfg.CONF.wsgi_log_format, max_size=self.server.num_threads ) def test_num_threads(self): num_threads = 8 self.server = utils.UnixDomainWSGIServer('test', num_threads=num_threads) self.server._run('app', 'sock') self.eventlet.wsgi.server.assert_called_once_with( 'sock', 'app', protocol=utils.UnixDomainHttpProtocol, log=mock.ANY, log_format=cfg.CONF.wsgi_log_format, max_size=num_threads ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/linux/test_xenapi_root_helper.py0000644000175000017500000000664500000000000030525 0ustar00coreycorey00000000000000# Copyright 2016 Citrix System. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_rootwrap import cmd as oslo_rootwrap_cmd from neutron.agent.linux import xenapi_root_helper as helper from neutron.conf.agent import xenapi_conf from neutron.tests import base class TestXenapiRootHelper(base.BaseTestCase): def _get_fake_xenapi_client(self): class FakeXenapiClient(helper.XenAPIClient): def __init__(self): self._session = mock.MagicMock() return FakeXenapiClient() def setUp(self): super(TestXenapiRootHelper, self).setUp() conf = cfg.CONF xenapi_conf.register_xenapi_opts(conf) def test_get_return_code_unauthourized(self): failure_details = [helper.XENAPI_PLUGIN_FAILURE_ID, 'run_command', 'PluginError', helper.MSG_UNAUTHORIZED] xenapi_client = self._get_fake_xenapi_client() rc = xenapi_client._get_return_code(failure_details) self.assertEqual(oslo_rootwrap_cmd.RC_UNAUTHORIZED, rc) def test_get_return_code_noexecfound(self): failure_details = [helper.XENAPI_PLUGIN_FAILURE_ID, 'run_command', 'PluginError', helper.MSG_NOT_FOUND] xenapi_client = self._get_fake_xenapi_client() rc = xenapi_client._get_return_code(failure_details) self.assertEqual(oslo_rootwrap_cmd.RC_NOEXECFOUND, rc) def test_get_return_code_unknown_error(self): failure_details = [helper.XENAPI_PLUGIN_FAILURE_ID, 'run_command', 'PluginError', 'Any unknown error'] xenapi_client = self._get_fake_xenapi_client() rc = xenapi_client._get_return_code(failure_details) self.assertEqual(helper.RC_UNKNOWN_XENAPI_ERROR, rc) def test_execute(self): cmd = ["ovs-vsctl", "list-ports", "xapi2"] expect_cmd_args = {'cmd': '["ovs-vsctl", "list-ports", "xapi2"]', 'cmd_input': 'null'} raw_result = '{"returncode": 0, "err": "", "out": "vif158.2"}' with mock.patch.object(helper.XenAPIClient, "_call_plugin", return_value=raw_result) as mock_call_plugin: xenapi_client = self._get_fake_xenapi_client() rc, out, err = xenapi_client.execute(cmd) mock_call_plugin.assert_called_once_with( 'netwrap.py', 'run_command', expect_cmd_args) self.assertEqual(0, rc) self.assertEqual("vif158.2", out) self.assertEqual("", err) def test_execute_nocommand(self): cmd = [] xenapi_client = self._get_fake_xenapi_client() rc, out, err = xenapi_client.execute(cmd) self.assertEqual(oslo_rootwrap_cmd.RC_NOCOMMAND, rc) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4270456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/metadata/0000755000175000017500000000000000000000000023634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/metadata/__init__.py0000644000175000017500000000000000000000000025733 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/metadata/test_agent.py0000644000175000017500000005271400000000000026354 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const import testtools import webob from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import fileutils from neutron.agent.linux import utils as agent_utils from neutron.agent.metadata import agent from neutron.agent import metadata_agent from neutron.common import cache_utils as cache from neutron.common import utils from neutron.conf.agent.metadata import config as meta_conf from neutron.tests import base class ConfFixture(config_fixture.Config): def setUp(self): super(ConfFixture, self).setUp() meta_conf.register_meta_conf_opts( meta_conf.METADATA_PROXY_HANDLER_OPTS, self.conf) self.config(auth_ca_cert=None, nova_metadata_host='9.9.9.9', nova_metadata_port=8775, metadata_proxy_shared_secret='secret', nova_metadata_protocol='http', nova_metadata_insecure=True, nova_client_cert='nova_cert', nova_client_priv_key='nova_priv_key') cache.register_oslo_configs(self.conf) class NewCacheConfFixture(ConfFixture): def setUp(self): super(NewCacheConfFixture, self).setUp() self.config( group='cache', enabled=True, backend='oslo_cache.dict', expiration_time=5) class TestMetadataProxyHandlerBase(base.BaseTestCase): fake_conf = cfg.CONF fake_conf_fixture = ConfFixture(fake_conf) def setUp(self): super(TestMetadataProxyHandlerBase, self).setUp() self.useFixture(self.fake_conf_fixture) self.log_p = mock.patch.object(agent, 'LOG') self.log = self.log_p.start() self.handler = agent.MetadataProxyHandler(self.fake_conf) self.handler.plugin_rpc = mock.Mock() self.handler.context = mock.Mock() class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase): def test_get_port_filters(self): router_id = 'test_router_id' ip = '1.2.3.4' networks = ('net_id1', 'net_id2') expected = {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS, 'network_id': networks, 'fixed_ips': {'ip_address': [ip]}} actual = self.handler._get_port_filters(router_id, ip, networks) self.assertEqual(expected, actual) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] self.handler.plugin_rpc.get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) self.assertEqual(expected, networks) def test_get_ports_for_remote_address(self): ip = '1.1.1.1' networks = ('network_id1', 'network_id2') expected = [{'port_id': 'port_id1'}, {'port_id': 'port_id2'}] self.handler.plugin_rpc.get_ports.return_value = expected ports = self.handler._get_ports_for_remote_address(ip, networks) self.assertEqual(expected, ports) class _TestMetadataProxyHandlerCacheMixin(object): def test_call(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = ('instance_id', 'tenant_id') with mock.patch.object(self.handler, '_proxy_request') as proxy: proxy.return_value = 'value' retval = self.handler(req) self.assertEqual('value', retval) def test_call_skip_cache(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = ('instance_id', 'tenant_id') with mock.patch.object(self.handler, '_proxy_request') as proxy: proxy.return_value = webob.exc.HTTPNotFound() self.handler(req) get_ids.assert_called_with(req, skip_cache=True) def test_call_no_instance_match(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.return_value = None, None retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPNotFound) def test_call_internal_server_error(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_tenant_id') as get_ids: get_ids.side_effect = Exception retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) self.assertEqual(2, len(self.log.mock_calls)) def test_get_router_networks(self): router_id = 'router-id' expected = ('network_id1', 'network_id2') ports = [{'network_id': 'network_id1', 'something': 42}, {'network_id': 'network_id2', 'something_else': 32}] mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected, networks) def _test_get_router_networks_twice_helper(self): router_id = 'router-id' ports = [{'network_id': 'network_id1', 'something': 42}] expected_networks = ('network_id1',) with mock.patch('oslo_utils.timeutils.utcnow_ts', return_value=0): mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = ports networks = self.handler._get_router_networks(router_id) mock_get_ports.assert_called_once_with( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS}) self.assertEqual(expected_networks, networks) networks = self.handler._get_router_networks(router_id) def test_get_router_networks_twice(self): self._test_get_router_networks_twice_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def _get_ports_for_remote_address_cache_hit_helper(self): remote_address = 'remote_address' networks = ('net1', 'net2') mock_get_ports = self.handler.plugin_rpc.get_ports mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}] self.handler._get_ports_for_remote_address(remote_address, networks) mock_get_ports.assert_called_once_with( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': [remote_address]}} ) self.assertEqual(1, mock_get_ports.call_count) self.handler._get_ports_for_remote_address(remote_address, networks) def test_get_ports_for_remote_address_cache_hit(self): self._get_ports_for_remote_address_cache_hit_helper() self.assertEqual( 1, self.handler.plugin_rpc.get_ports.call_count) def test_get_ports_network_id(self): network_id = 'network-id' router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = (network_id,) with mock.patch.object(self.handler, '_get_ports_for_remote_address' ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks' ) as mock_get_router_networks: mock_get_ip_addr.return_value = expected ports = self.handler._get_ports(remote_address, network_id, router_id) mock_get_ip_addr.assert_called_once_with(remote_address, networks, skip_cache=False) self.assertFalse(mock_get_router_networks.called) self.assertEqual(expected, ports) def test_get_ports_router_id(self): router_id = 'router-id' remote_address = 'remote-address' expected = ['port1'] networks = ('network1', 'network2') with mock.patch.object(self.handler, '_get_ports_for_remote_address', return_value=expected ) as mock_get_ip_addr,\ mock.patch.object(self.handler, '_get_router_networks', return_value=networks ) as mock_get_router_networks: ports = self.handler._get_ports(remote_address, router_id=router_id) mock_get_router_networks.assert_called_once_with( router_id, skip_cache=False) mock_get_ip_addr.assert_called_once_with( remote_address, networks, skip_cache=False) self.assertEqual(expected, ports) def test_get_ports_no_id(self): self.assertRaises(TypeError, self.handler._get_ports, 'remote_address') def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval, networks=None, router_id=None): remote_address = '192.168.1.1' headers['X-Forwarded-For'] = remote_address req = mock.Mock(headers=headers) def mock_get_ports(*args, **kwargs): return list_ports_retval.pop(0) self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req) expected = [] if networks and router_id: return (instance_id, tenant_id) if router_id: expected.append( mock.call( mock.ANY, {'device_id': [router_id], 'device_owner': n_const.ROUTER_INTERFACE_OWNERS} ) ) expected.append( mock.call( mock.ANY, {'network_id': networks, 'fixed_ips': {'ip_address': ['192.168.1.1']}} ) ) self.handler.plugin_rpc.get_ports.assert_has_calls(expected) return (instance_id, tenant_id) def test_get_instance_id_router_id(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'net1'}] ] self.assertEqual( ('device_id', 'tenant_id'), self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id) ) def test_get_instance_id_router_id_no_match(self): router_id = 'the_id' headers = { 'X-Neutron-Router-ID': router_id } networks = ('net1', 'net2') ports = [ [{'network_id': 'net1'}, {'network_id': 'net2'}], [] ] self.assertEqual( (None, None), self._get_instance_and_tenant_id_helper(headers, ports, networks=networks, router_id=router_id) ) def test_get_instance_id_network_id(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [ [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': 'the_id'}] ] self.assertEqual( ('device_id', 'tenant_id'), self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)) ) def test_get_instance_id_network_id_no_match(self): network_id = 'the_id' headers = { 'X-Neutron-Network-ID': network_id } ports = [[]] self.assertEqual( (None, None), self._get_instance_and_tenant_id_helper(headers, ports, networks=('the_id',)) ) def test_get_instance_id_network_id_and_router_id_invalid(self): network_id = 'the_nid' router_id = 'the_rid' headers = { 'X-Neutron-Network-ID': network_id, 'X-Neutron-Router-ID': router_id } # The call should never do a port lookup, but mock it to verify ports = [ [{'device_id': 'device_id', 'tenant_id': 'tenant_id', 'network_id': network_id}] ] self.assertEqual( (None, None), self._get_instance_and_tenant_id_helper(headers, ports, networks=(network_id,), router_id=router_id) ) def _proxy_request_test_helper(self, response_code=200, method='GET'): hdrs = {'X-Forwarded-For': '8.8.8.8'} body = 'body' req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs, method=method, body=body) resp = mock.MagicMock(status_code=response_code) resp.status.__str__.side_effect = AttributeError resp.content = 'content' req.response = resp with mock.patch.object(self.handler, '_sign_instance_id') as sign: sign.return_value = 'signed' with mock.patch('requests.request') as mock_request: resp.headers = {'content-type': 'text/plain'} mock_request.return_value = resp retval = self.handler._proxy_request('the_id', 'tenant_id', req) mock_request.assert_called_once_with( method=method, url='http://9.9.9.9:8775/the_path', headers={ 'X-Forwarded-For': '8.8.8.8', 'X-Instance-ID-Signature': 'signed', 'X-Instance-ID': 'the_id', 'X-Tenant-ID': 'tenant_id' }, data=body, cert=(self.fake_conf.nova_client_cert, self.fake_conf.nova_client_priv_key), verify=False) return retval def test_proxy_request_post(self): response = self._proxy_request_test_helper(method='POST') self.assertEqual('text/plain', response.content_type) self.assertEqual('content', response.body) def test_proxy_request_200(self): response = self._proxy_request_test_helper(200) self.assertEqual('text/plain', response.content_type) self.assertEqual('content', response.body) def test_proxy_request_400(self): self.assertIsInstance(self._proxy_request_test_helper(400), webob.exc.HTTPBadRequest) def test_proxy_request_403(self): self.assertIsInstance(self._proxy_request_test_helper(403), webob.exc.HTTPForbidden) def test_proxy_request_404(self): self.assertIsInstance(self._proxy_request_test_helper(404), webob.exc.HTTPNotFound) def test_proxy_request_409(self): self.assertIsInstance(self._proxy_request_test_helper(409), webob.exc.HTTPConflict) def test_proxy_request_500(self): self.assertIsInstance(self._proxy_request_test_helper(500), webob.exc.HTTPInternalServerError) def test_proxy_request_other_code(self): with testtools.ExpectedException(Exception): self._proxy_request_test_helper(302) def test_sign_instance_id(self): self.assertEqual( '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4', self.handler._sign_instance_id('foo') ) class TestMetadataProxyHandlerNewCache(TestMetadataProxyHandlerBase, _TestMetadataProxyHandlerCacheMixin): fake_conf = cfg.CONF fake_conf_fixture = NewCacheConfFixture(fake_conf) class TestUnixDomainMetadataProxy(base.BaseTestCase): def setUp(self): super(TestUnixDomainMetadataProxy, self).setUp() self.cfg_p = mock.patch.object(agent, 'cfg') self.cfg = self.cfg_p.start() looping_call_p = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_mock = looping_call_p.start() self.cfg.CONF.metadata_proxy_socket = '/the/path' self.cfg.CONF.metadata_workers = 0 self.cfg.CONF.metadata_backlog = 128 self.cfg.CONF.metadata_proxy_socket_mode = meta_conf.USER_MODE @mock.patch.object(fileutils, 'ensure_tree') def test_init_doesnot_exists(self, ensure_dir): agent.UnixDomainMetadataProxy(mock.Mock()) ensure_dir.assert_called_once_with('/the', mode=0o755) def test_init_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: isdir.return_value = True agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_no_file(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = False unlink.side_effect = OSError agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_fails_file_still_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = True unlink.side_effect = OSError with testtools.ExpectedException(OSError): agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') @mock.patch.object(agent, 'MetadataProxyHandler') @mock.patch.object(agent_utils, 'UnixDomainWSGIServer') @mock.patch.object(fileutils, 'ensure_tree') def test_run(self, ensure_dir, server, handler): p = agent.UnixDomainMetadataProxy(self.cfg.CONF) p.run() ensure_dir.assert_called_once_with('/the', mode=0o755) server.assert_has_calls([ mock.call('neutron-metadata-agent'), mock.call().start(handler.return_value, '/the/path', workers=0, backlog=128, mode=0o644), mock.call().wait()] ) self.looping_mock.assert_called_once_with(p._report_state) self.looping_mock.return_value.start.assert_called_once_with( interval=mock.ANY) def test_main(self): with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy: with mock.patch.object(metadata_agent, 'config') as config: with mock.patch.object(metadata_agent, 'cfg') as cfg: with mock.patch.object(utils, 'cfg'): metadata_agent.main() self.assertTrue(config.setup_logging.called) proxy.assert_has_calls([ mock.call(cfg.CONF), mock.call().run()] ) def test_report_state(self): with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api: with mock.patch('os.makedirs'): proxy = agent.UnixDomainMetadataProxy(self.cfg.CONF) proxy._init_state_reporting() self.assertTrue(proxy.agent_state['start_flag']) proxy._report_state() self.assertNotIn('start_flag', proxy.agent_state) state_api_inst = state_api.return_value state_api_inst.report_state.assert_called_once_with( proxy.context, proxy.agent_state, use_call=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/metadata/test_driver.py0000644000175000017500000002111500000000000026540 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from neutron_lib import constants from neutron_lib import fixture as lib_fixtures from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import router_info from neutron.agent.linux import iptables_manager from neutron.agent.metadata import driver as metadata_driver from neutron.conf.agent import common as agent_config from neutron.conf.agent.l3 import config as l3_config from neutron.conf.agent.l3 import ha as ha_conf from neutron.conf.agent.metadata import config as meta_conf from neutron.tests import base from neutron.tests.unit.agent.linux import test_utils _uuid = uuidutils.generate_uuid class TestMetadataDriverRules(base.BaseTestCase): def test_metadata_nat_rules(self): rules = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ ' '-p tcp -m tcp --dport 80 -j REDIRECT --to-ports 9697') self.assertEqual( [rules], metadata_driver.MetadataDriver.metadata_nat_rules(9697)) def test_metadata_filter_rules(self): rules = [('INPUT', '-m mark --mark 0x1/%s -j ACCEPT' % constants.ROUTER_MARK_MASK), ('INPUT', '-p tcp -m tcp --dport 9697 -j DROP')] self.assertEqual( rules, metadata_driver.MetadataDriver.metadata_filter_rules(9697, '0x1')) class TestMetadataDriverProcess(base.BaseTestCase): EUNAME = 'neutron' EGNAME = 'neutron' METADATA_DEFAULT_IP = '169.254.169.254' METADATA_PORT = 8080 METADATA_SOCKET = '/socket/path' PIDFILE = 'pidfile' def setUp(self): super(TestMetadataDriverProcess, self).setUp() mock.patch('eventlet.spawn').start() agent_config.register_interface_driver_opts_helper(cfg.CONF) cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') mock.patch('neutron.agent.l3.agent.L3PluginApi').start() mock.patch('neutron.agent.l3.ha.AgentMixin' '._init_ha_conf_path').start() l3_config.register_l3_agent_config_opts(l3_config.OPTS, cfg.CONF) ha_conf.register_l3_agent_ha_opts() meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, cfg.CONF) def test_after_router_updated_called_on_agent_process_update(self): with mock.patch.object(metadata_driver, 'after_router_updated') as f,\ mock.patch.object(router_info.RouterInfo, 'process'): agent = l3_agent.L3NATAgent('localhost') router_id = _uuid() router = {'id': router_id} ri = router_info.RouterInfo(mock.Mock(), router_id, router, agent.conf, mock.ANY) agent.router_info[router_id] = ri agent._process_updated_router(router) f.assert_called_once_with( 'router', 'after_update', agent, router=ri) def test_after_router_updated_should_not_call_add_metadata_rules(self): with mock.patch.object(iptables_manager.IptablesTable, 'add_rule') as f,\ mock.patch.object(iptables_manager.IptablesManager, 'apply'),\ mock.patch.object(metadata_driver.MetadataDriver, 'spawn_monitored_metadata_proxy'),\ mock.patch.object(router_info.RouterInfo, 'process'): agent = l3_agent.L3NATAgent('localhost') router_id = _uuid() router = {'id': router_id} ri = router_info.RouterInfo(mock.Mock(), router_id, router, agent.conf, mock.ANY) agent.router_info[router_id] = ri f.reset_mock() agent._process_updated_router(router) f.assert_not_called() def test_spawn_metadata_proxy(self): router_id = _uuid() router_ns = 'qrouter-%s' % router_id ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper' cfg.CONF.set_override('metadata_proxy_user', self.EUNAME) cfg.CONF.set_override('metadata_proxy_group', self.EGNAME) cfg.CONF.set_override('metadata_proxy_socket', self.METADATA_SOCKET) cfg.CONF.set_override('debug', True) agent = l3_agent.L3NATAgent('localhost') with mock.patch(ip_class_path) as ip_mock,\ mock.patch( 'neutron.agent.linux.external_process.' 'ProcessManager.get_pid_file_name', return_value=self.PIDFILE),\ mock.patch('pwd.getpwnam', return_value=test_utils.FakeUser(self.EUNAME)),\ mock.patch('grp.getgrnam', return_value=test_utils.FakeGroup(self.EGNAME)),\ mock.patch('os.makedirs'): cfg_file = os.path.join( metadata_driver.HaproxyConfigurator.get_config_path( agent.conf.state_path), "%s.conf" % router_id) mock_open = self.useFixture( lib_fixtures.OpenFixture(cfg_file)).mock_open agent.metadata_driver.spawn_monitored_metadata_proxy( agent.process_monitor, router_ns, self.METADATA_PORT, agent.conf, bind_address=self.METADATA_DEFAULT_IP, router_id=router_id) netns_execute_args = [ 'haproxy', '-f', cfg_file] log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME + "-" + router_id) cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % { 'user': self.EUNAME, 'group': self.EGNAME, 'host': self.METADATA_DEFAULT_IP, 'port': self.METADATA_PORT, 'unix_socket_path': self.METADATA_SOCKET, 'res_type': 'Router', 'res_id': router_id, 'res_type_del': 'Network', 'pidfile': self.PIDFILE, 'log_level': 'debug', 'log_tag': log_tag} mock_open.assert_has_calls([ mock.call(cfg_file, 'w'), mock.call().write(cfg_contents)], any_order=True) ip_mock.assert_has_calls([ mock.call(namespace=router_ns), mock.call().netns.execute(netns_execute_args, addl_env=None, run_as_root=True) ]) def test_create_config_file_wrong_user(self): with mock.patch('pwd.getpwnam', side_effect=KeyError): config = metadata_driver.HaproxyConfigurator(_uuid(), mock.ANY, mock.ANY, mock.ANY, mock.ANY, self.EUNAME, self.EGNAME, mock.ANY, mock.ANY) self.assertRaises(metadata_driver.InvalidUserOrGroupException, config.create_config_file) def test_create_config_file_wrong_group(self): with mock.patch('grp.getgrnam', side_effect=KeyError),\ mock.patch('pwd.getpwnam', return_value=test_utils.FakeUser(self.EUNAME)): config = metadata_driver.HaproxyConfigurator(_uuid(), mock.ANY, mock.ANY, mock.ANY, mock.ANY, self.EUNAME, self.EGNAME, mock.ANY, mock.ANY) self.assertRaises(metadata_driver.InvalidUserOrGroupException, config.create_config_file) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4270456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/0000755000175000017500000000000000000000000022656 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/__init__.py0000644000175000017500000000000000000000000024755 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4270456 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/metadata/0000755000175000017500000000000000000000000024436 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/metadata/__init__.py0000644000175000017500000000000000000000000026535 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/metadata/test_agent.py0000644000175000017500000003342600000000000027155 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from neutron.agent.linux import ip_lib from neutron.agent.linux.ip_lib import IpAddrCommand as ip_addr from neutron.agent.linux.ip_lib import IpLinkCommand as ip_link from neutron.agent.linux.ip_lib import IpNetnsCommand as ip_netns from neutron.agent.linux.ip_lib import IPWrapper as ip_wrap from neutron.agent.ovn.metadata import agent from neutron.agent.ovn.metadata import driver from neutron.conf.agent.metadata import config as meta_conf from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf from neutron.tests import base OvnPortInfo = collections.namedtuple( 'OvnPortInfo', ['datapath', 'type', 'mac', 'external_ids', 'logical_port']) DatapathInfo = collections.namedtuple('DatapathInfo', 'uuid') def makePort(datapath=None, type='', mac=None, external_ids=None, logical_port=None): return OvnPortInfo(datapath, type, mac, external_ids, logical_port) class ConfFixture(config_fixture.Config): def setUp(self): super(ConfFixture, self).setUp() ovn_meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, self.conf) ovn_meta_conf.register_meta_conf_opts( meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS, self.conf) ovn_meta_conf.register_meta_conf_opts( meta_conf.METADATA_PROXY_HANDLER_OPTS, self.conf) ovn_meta_conf.register_meta_conf_opts( ovn_meta_conf.OVS_OPTS, self.conf, group='ovs') class TestMetadataAgent(base.BaseTestCase): fake_conf = cfg.CONF fake_conf_fixture = ConfFixture(fake_conf) def setUp(self): super(TestMetadataAgent, self).setUp() self.useFixture(self.fake_conf_fixture) self.log_p = mock.patch.object(agent, 'LOG') self.log = self.log_p.start() self.agent = agent.MetadataAgent(self.fake_conf) self.agent.sb_idl = mock.Mock() self.agent.ovs_idl = mock.Mock() self.agent.ovs_idl.transaction = mock.MagicMock() self.agent.chassis = 'chassis' self.agent.ovn_bridge = 'br-int' def test_sync(self): with mock.patch.object( self.agent, 'ensure_all_networks_provisioned') as enp,\ mock.patch.object( ip_lib, 'list_network_namespaces') as lnn,\ mock.patch.object( self.agent, 'teardown_datapath') as tdp: enp.return_value = ['ovnmeta-1', 'ovnmeta-2'] lnn.return_value = ['ovnmeta-1', 'ovnmeta-2'] self.agent.sync() enp.assert_called_once_with() lnn.assert_called_once_with() tdp.assert_not_called() def test_sync_teardown_namespace(self): """Test that sync tears down unneeded metadata namespaces.""" with mock.patch.object( self.agent, 'ensure_all_networks_provisioned') as enp,\ mock.patch.object( ip_lib, 'list_network_namespaces') as lnn,\ mock.patch.object( self.agent, 'teardown_datapath') as tdp: enp.return_value = ['ovnmeta-1', 'ovnmeta-2'] lnn.return_value = ['ovnmeta-1', 'ovnmeta-2', 'ovnmeta-3', 'ns1', 'ns2'] self.agent.sync() enp.assert_called_once_with() lnn.assert_called_once_with() tdp.assert_called_once_with('3') def test_ensure_all_networks_provisioned(self): """Test networks are provisioned. This test simulates that this chassis has the following ports: * datapath '0': 1 port * datapath '1': 2 ports * datapath '2': 1 port * datapath '3': 1 port with type 'external' * datapath '5': 1 port with type 'unknown' It is expected that only datapaths '0', '1' and '2' are provisioned once. """ ports = [] for i in range(0, 3): ports.append(makePort(datapath=DatapathInfo(uuid=str(i)))) ports.append(makePort(datapath=DatapathInfo(uuid='1'))) ports.append(makePort(datapath=DatapathInfo(uuid='3'), type='external')) ports.append(makePort(datapath=DatapathInfo(uuid='5'), type='unknown')) with mock.patch.object(self.agent, 'provision_datapath', return_value=None) as pdp,\ mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', return_value=ports): self.agent.ensure_all_networks_provisioned() expected_calls = [mock.call(str(i)) for i in range(0, 4)] self.assertEqual(sorted(expected_calls), sorted(pdp.call_args_list)) def test_update_datapath_provision(self): ports = [] for i in range(0, 3): ports.append(makePort(datapath=DatapathInfo(uuid=str(i)))) ports.append(makePort(datapath=DatapathInfo(uuid='3'), type='external')) with mock.patch.object(self.agent, 'provision_datapath', return_value=None) as pdp,\ mock.patch.object(self.agent, 'teardown_datapath') as tdp,\ mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', return_value=ports): self.agent.update_datapath('1') self.agent.update_datapath('3') expected_calls = [mock.call('1'), mock.call('3')] pdp.assert_has_calls(expected_calls) tdp.assert_not_called() def test_update_datapath_teardown(self): ports = [] for i in range(0, 3): ports.append(makePort(datapath=DatapathInfo(uuid=str(i)))) with mock.patch.object(self.agent, 'provision_datapath', return_value=None) as pdp,\ mock.patch.object(self.agent, 'teardown_datapath') as tdp,\ mock.patch.object(self.agent.sb_idl, 'get_ports_on_chassis', return_value=ports): self.agent.update_datapath('5') tdp.assert_called_once_with('5') pdp.assert_not_called() def test_teardown_datapath(self): """Test teardown datapath. Check that the VETH pair, OVS port and namespace associated to this namespace are deleted and the metadata proxy is destroyed. """ with mock.patch.object(self.agent, 'update_chassis_metadata_networks'),\ mock.patch.object( ip_netns, 'exists', return_value=True),\ mock.patch.object( ip_lib, 'device_exists', return_value=True),\ mock.patch.object( ip_wrap, 'garbage_collect_namespace') as garbage_collect,\ mock.patch.object( ip_wrap, 'del_veth') as del_veth,\ mock.patch.object(agent.MetadataAgent, '_get_veth_name', return_value=['veth_0', 'veth_1']),\ mock.patch.object( driver.MetadataDriver, 'destroy_monitored_metadata_proxy') as destroy_mdp: self.agent.teardown_datapath('1') destroy_mdp.assert_called_once_with( mock.ANY, '1', mock.ANY, 'ovnmeta-1') self.agent.ovs_idl.del_port.assert_called_once_with('veth_0') del_veth.assert_called_once_with('veth_0') garbage_collect.assert_called_once_with() def test_provision_datapath(self): """Test datapath provisioning. Check that the VETH pair, OVS port and namespace associated to this namespace are created, that the interface is properly configured with the right IP addresses and that the metadata proxy is spawned. """ metadata_port = makePort(mac=['aa:bb:cc:dd:ee:ff'], external_ids={ 'neutron:cidrs': '10.0.0.1/23 ' '2001:470:9:1224:5595:dd51:6ba2:e788/64'}, logical_port='port') with mock.patch.object(self.agent.sb_idl, 'get_metadata_port_network', return_value=metadata_port),\ mock.patch.object( ip_lib, 'device_exists', return_value=False),\ mock.patch.object( ip_lib.IPDevice, 'exists', return_value=False),\ mock.patch.object(agent.MetadataAgent, '_get_veth_name', return_value=['veth_0', 'veth_1']),\ mock.patch.object(agent.MetadataAgent, '_get_namespace_name', return_value='namespace'),\ mock.patch.object(ip_link, 'set_up') as link_set_up,\ mock.patch.object(ip_link, 'set_address') as link_set_addr,\ mock.patch.object(ip_addr, 'list', return_value=[]),\ mock.patch.object(ip_addr, 'add') as ip_addr_add,\ mock.patch.object( ip_wrap, 'add_veth', return_value=[ip_lib.IPDevice('ip1'), ip_lib.IPDevice('ip2')]) as add_veth,\ mock.patch.object( self.agent, 'update_chassis_metadata_networks') as update_chassis,\ mock.patch.object( driver.MetadataDriver, 'spawn_monitored_metadata_proxy') as spawn_mdp: # Simulate that the VETH pair was already present in 'br-fake'. # We need to assert that it was deleted first. self.agent.ovs_idl.list_br.return_value.execute.return_value = ( ['br-int', 'br-fake']) self.agent.provision_datapath('1') # Check that the port was deleted from br-fake self.agent.ovs_idl.del_port.assert_called_once_with( 'veth_0', bridge='br-fake', if_exists=True) # Check that the VETH pair is created add_veth.assert_called_once_with('veth_0', 'veth_1', 'namespace') # Make sure that the two ends of the VETH pair have been set as up. self.assertEqual(2, link_set_up.call_count) link_set_addr.assert_called_once_with('aa:bb:cc:dd:ee:ff') # Make sure that the port has been added to OVS. self.agent.ovs_idl.add_port.assert_called_once_with( 'br-int', 'veth_0') self.agent.ovs_idl.db_set.assert_called_once_with( 'Interface', 'veth_0', ('external_ids', {'iface-id': 'port'})) # Check that the metadata port has the IP addresses properly # configured and that IPv6 address has been skipped. expected_calls = [mock.call('10.0.0.1/23'), mock.call('169.254.169.254/16')] self.assertEqual(sorted(expected_calls), sorted(ip_addr_add.call_args_list)) # Check that metadata proxy has been spawned spawn_mdp.assert_called_once_with( mock.ANY, 'namespace', 80, mock.ANY, bind_address='169.254.169.254', network_id='1') # Check that the chassis has been updated with the datapath. update_chassis.assert_called_once_with('1') def _test_update_chassis_metadata_networks_helper( self, dp, remove, expected_dps, txn_called=True): current_dps = ['0', '1', '2'] with mock.patch.object(self.agent.sb_idl, 'get_chassis_metadata_networks', return_value=current_dps),\ mock.patch.object(self.agent.sb_idl, 'set_chassis_metadata_networks', retrurn_value=True),\ mock.patch.object(self.agent.sb_idl, 'create_transaction') as create_txn_mock: self.agent.update_chassis_metadata_networks(dp, remove=remove) updated_dps = self.agent.sb_idl.get_chassis_metadata_networks( self.agent.chassis) self.assertEqual(updated_dps, expected_dps) self.assertEqual(create_txn_mock.called, txn_called) def test_update_chassis_metadata_networks_add(self): dp = '4' remove = False expected_dps = ['0', '1', '2', '4'] self._test_update_chassis_metadata_networks_helper( dp, remove, expected_dps) def test_update_chassis_metadata_networks_remove(self): dp = '2' remove = True expected_dps = ['0', '1'] self._test_update_chassis_metadata_networks_helper( dp, remove, expected_dps) def test_update_chassis_metadata_networks_add_dp_exists(self): dp = '2' remove = False expected_dps = ['0', '1', '2'] self._test_update_chassis_metadata_networks_helper( dp, remove, expected_dps, txn_called=False) def test_update_chassis_metadata_networks_remove_no_dp(self): dp = '3' remove = True expected_dps = ['0', '1', '2'] self._test_update_chassis_metadata_networks_helper( dp, remove, expected_dps, txn_called=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/metadata/test_driver.py0000644000175000017500000001264300000000000027350 0ustar00coreycorey00000000000000# Copyright 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from neutron_lib import fixture as lib_fixtures from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.ovn.metadata import agent as metadata_agent from neutron.agent.ovn.metadata import driver as metadata_driver from neutron.conf.agent.metadata import config as meta_conf from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf from neutron.tests import base from neutron.tests.unit.agent.linux import test_utils _uuid = uuidutils.generate_uuid class TestMetadataDriverProcess(base.BaseTestCase): EUNAME = 'neutron' EGNAME = 'neutron' METADATA_DEFAULT_IP = '169.254.169.254' METADATA_PORT = 8080 METADATA_SOCKET = '/socket/path' PIDFILE = 'pidfile' def setUp(self): super(TestMetadataDriverProcess, self).setUp() mock.patch('eventlet.spawn').start() ovn_meta_conf.register_meta_conf_opts(meta_conf.SHARED_OPTS, cfg.CONF) def test_spawn_metadata_proxy(self): datapath_id = _uuid() metadata_ns = metadata_agent.NS_PREFIX + datapath_id ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper' cfg.CONF.set_override('metadata_proxy_user', self.EUNAME) cfg.CONF.set_override('metadata_proxy_group', self.EGNAME) cfg.CONF.set_override('metadata_proxy_socket', self.METADATA_SOCKET) cfg.CONF.set_override('debug', True) agent = metadata_agent.MetadataAgent(cfg.CONF) with mock.patch(ip_class_path) as ip_mock,\ mock.patch( 'neutron.agent.linux.external_process.' 'ProcessManager.get_pid_file_name', return_value=self.PIDFILE),\ mock.patch('pwd.getpwnam', return_value=test_utils.FakeUser(self.EUNAME)),\ mock.patch('grp.getgrnam', return_value=test_utils.FakeGroup(self.EGNAME)),\ mock.patch('os.makedirs'): cfg_file = os.path.join( metadata_driver.HaproxyConfigurator.get_config_path( cfg.CONF.state_path), "%s.conf" % datapath_id) mock_open = self.useFixture( lib_fixtures.OpenFixture(cfg_file)).mock_open metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy( agent._process_monitor, metadata_ns, self.METADATA_PORT, cfg.CONF, bind_address=self.METADATA_DEFAULT_IP, network_id=datapath_id) netns_execute_args = [ 'haproxy', '-f', cfg_file] cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % { 'user': self.EUNAME, 'group': self.EGNAME, 'host': self.METADATA_DEFAULT_IP, 'port': self.METADATA_PORT, 'unix_socket_path': self.METADATA_SOCKET, 'res_type': 'Network', 'res_id': datapath_id, 'pidfile': self.PIDFILE, 'log_level': 'debug'} mock_open.assert_has_calls([ mock.call(cfg_file, 'w'), mock.call().write(cfg_contents)], any_order=True) ip_mock.assert_has_calls([ mock.call(namespace=metadata_ns), mock.call().netns.execute(netns_execute_args, addl_env=None, run_as_root=True) ]) def test_create_config_file_wrong_user(self): with mock.patch('pwd.getpwnam', side_effect=KeyError): config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, self.EUNAME, self.EGNAME, mock.ANY, mock.ANY) self.assertRaises(metadata_driver.InvalidUserOrGroupException, config.create_config_file) def test_create_config_file_wrong_group(self): with mock.patch('grp.getgrnam', side_effect=KeyError),\ mock.patch('pwd.getpwnam', return_value=test_utils.FakeUser(self.EUNAME)): config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, self.EUNAME, self.EGNAME, mock.ANY, mock.ANY) self.assertRaises(metadata_driver.InvalidUserOrGroupException, config.create_config_file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovn/metadata/test_server.py0000644000175000017500000002512000000000000027355 0ustar00coreycorey00000000000000# Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import fileutils import testtools import webob from neutron.agent.linux import utils as agent_utils from neutron.agent.ovn.metadata import server as agent from neutron.conf.agent.metadata import config as meta_conf from neutron.conf.agent.ovn.metadata import config as ovn_meta_conf from neutron.tests import base OvnPortInfo = collections.namedtuple('OvnPortInfo', 'external_ids') class ConfFixture(config_fixture.Config): def setUp(self): super(ConfFixture, self).setUp() ovn_meta_conf.register_meta_conf_opts( meta_conf.METADATA_PROXY_HANDLER_OPTS, self.conf) self.config(auth_ca_cert=None, nova_metadata_host='9.9.9.9', nova_metadata_port=8775, metadata_proxy_shared_secret='secret', nova_metadata_protocol='http', nova_metadata_insecure=True, nova_client_cert='nova_cert', nova_client_priv_key='nova_priv_key') class TestMetadataProxyHandler(base.BaseTestCase): fake_conf = cfg.CONF fake_conf_fixture = ConfFixture(fake_conf) def setUp(self): super(TestMetadataProxyHandler, self).setUp() self.useFixture(self.fake_conf_fixture) self.log_p = mock.patch.object(agent, 'LOG') self.log = self.log_p.start() self.handler = agent.MetadataProxyHandler(self.fake_conf) self.handler.sb_idl = mock.Mock() def test_call(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_project_id') as get_ids: get_ids.return_value = ('instance_id', 'project_id') with mock.patch.object(self.handler, '_proxy_request') as proxy: proxy.return_value = 'value' retval = self.handler(req) self.assertEqual(retval, 'value') def test_call_no_instance_match(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_project_id') as get_ids: get_ids.return_value = None, None retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPNotFound) def test_call_internal_server_error(self): req = mock.Mock() with mock.patch.object(self.handler, '_get_instance_and_project_id') as get_ids: get_ids.side_effect = Exception retval = self.handler(req) self.assertIsInstance(retval, webob.exc.HTTPInternalServerError) self.assertEqual(len(self.log.mock_calls), 2) def _get_instance_and_project_id_helper(self, headers, list_ports_retval, network=None): remote_address = '192.168.1.1' headers['X-Forwarded-For'] = remote_address req = mock.Mock(headers=headers) def mock_get_network_port_bindings_by_ip(*args, **kwargs): return list_ports_retval.pop(0) self.handler.sb_idl.get_network_port_bindings_by_ip.side_effect = ( mock_get_network_port_bindings_by_ip) instance_id, project_id = ( self.handler._get_instance_and_project_id(req)) expected = [mock.call(network, '192.168.1.1')] self.handler.sb_idl.get_network_port_bindings_by_ip.assert_has_calls( expected) return (instance_id, project_id) def test_get_instance_id_network_id(self): network_id = 'the_id' headers = { 'X-OVN-Network-ID': network_id } ovn_port = OvnPortInfo( external_ids={'neutron:device_id': 'device_id', 'neutron:project_id': 'project_id'}) ports = [[ovn_port]] self.assertEqual( self._get_instance_and_project_id_helper(headers, ports, network='the_id'), ('device_id', 'project_id') ) def test_get_instance_id_network_id_no_match(self): network_id = 'the_id' headers = { 'X-OVN-Network-ID': network_id } ports = [[]] expected = (None, None) observed = self._get_instance_and_project_id_helper(headers, ports, network='the_id') self.assertEqual(expected, observed) def _proxy_request_test_helper(self, response_code=200, method='GET'): hdrs = {'X-Forwarded-For': '8.8.8.8'} body = 'body' req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs, method=method, body=body) resp = mock.MagicMock(status_code=response_code) resp.status.__str__.side_effect = AttributeError resp.content = 'content' req.response = resp with mock.patch.object(self.handler, '_sign_instance_id') as sign: sign.return_value = 'signed' with mock.patch('requests.request') as mock_request: resp.headers = {'content-type': 'text/plain'} mock_request.return_value = resp retval = self.handler._proxy_request('the_id', 'tenant_id', req) mock_request.assert_called_once_with( method=method, url='http://9.9.9.9:8775/the_path', headers={ 'X-Forwarded-For': '8.8.8.8', 'X-Instance-ID-Signature': 'signed', 'X-Instance-ID': 'the_id', 'X-Tenant-ID': 'tenant_id' }, data=body, cert=(self.fake_conf.nova_client_cert, self.fake_conf.nova_client_priv_key), verify=False) return retval def test_proxy_request_post(self): response = self._proxy_request_test_helper(method='POST') self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_200(self): response = self._proxy_request_test_helper(200) self.assertEqual(response.content_type, "text/plain") self.assertEqual(response.body, 'content') def test_proxy_request_400(self): self.assertIsInstance(self._proxy_request_test_helper(400), webob.exc.HTTPBadRequest) def test_proxy_request_403(self): self.assertIsInstance(self._proxy_request_test_helper(403), webob.exc.HTTPForbidden) def test_proxy_request_404(self): self.assertIsInstance(self._proxy_request_test_helper(404), webob.exc.HTTPNotFound) def test_proxy_request_409(self): self.assertIsInstance(self._proxy_request_test_helper(409), webob.exc.HTTPConflict) def test_proxy_request_500(self): self.assertIsInstance(self._proxy_request_test_helper(500), webob.exc.HTTPInternalServerError) def test_proxy_request_other_code(self): with testtools.ExpectedException(Exception): self._proxy_request_test_helper(302) def test_sign_instance_id(self): self.assertEqual( self.handler._sign_instance_id('foo'), '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4' ) class TestUnixDomainMetadataProxy(base.BaseTestCase): def setUp(self): super(TestUnixDomainMetadataProxy, self).setUp() self.cfg_p = mock.patch.object(agent, 'cfg') self.cfg = self.cfg_p.start() self.cfg.CONF.metadata_proxy_socket = '/the/path' self.cfg.CONF.metadata_workers = 0 self.cfg.CONF.metadata_backlog = 128 self.cfg.CONF.metadata_proxy_socket_mode = meta_conf.USER_MODE @mock.patch.object(fileutils, 'ensure_tree') def test_init_doesnot_exists(self, ensure_dir): agent.UnixDomainMetadataProxy(mock.Mock()) ensure_dir.assert_called_once_with('/the', mode=0o755) def test_init_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: isdir.return_value = True agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_no_file(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = False unlink.side_effect = OSError agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') def test_init_exists_unlink_fails_file_still_exists(self): with mock.patch('os.path.isdir') as isdir: with mock.patch('os.unlink') as unlink: with mock.patch('os.path.exists') as exists: isdir.return_value = True exists.return_value = True unlink.side_effect = OSError with testtools.ExpectedException(OSError): agent.UnixDomainMetadataProxy(mock.Mock()) unlink.assert_called_once_with('/the/path') @mock.patch.object(agent, 'MetadataProxyHandler') @mock.patch.object(agent_utils, 'UnixDomainWSGIServer') @mock.patch.object(fileutils, 'ensure_tree') def test_run(self, ensure_dir, server, handler): p = agent.UnixDomainMetadataProxy(self.cfg.CONF) p.run() ensure_dir.assert_called_once_with('/the', mode=0o755) server.assert_has_calls([ mock.call('networking-ovn-metadata-agent'), mock.call().start(handler.return_value, '/the/path', workers=0, backlog=128, mode=0o644)] ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/0000755000175000017500000000000000000000000023171 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/__init__.py0000644000175000017500000000000000000000000025270 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/native/0000755000175000017500000000000000000000000024457 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/native/__init__.py0000644000175000017500000000000000000000000026556 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/native/test_connection.py0000644000175000017500000001527500000000000030241 0ustar00coreycorey00000000000000# Copyright 2015, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ovs.db import idl from ovs import jsonrpc from ovsdbapp.backend.ovs_idl import connection from ovsdbapp import event as ovsdb_event from neutron.agent.ovsdb.native import connection as native_conn from neutron.agent.ovsdb.native import exceptions as ovsdb_exc from neutron.tests import base SSL_KEY_FILE = '/tmp/dummy.pem' SSL_CERT_FILE = '/tmp/dummy.crt' SSL_CA_FILE = '/tmp/ca.crt' COLUMN_NAME = {'name': {'mutable': False, 'type': 'string'}} SCHEMA = {'tables': {'Bridge': {'columns': COLUMN_NAME}, 'Open_vSwitch': {'columns': COLUMN_NAME}, 'Port': {'columns': COLUMN_NAME}, 'Interface': {'columns': COLUMN_NAME}}, 'version': '7.15.1', 'name': 'Open_vSwitch', 'cksum': '3682332033 23608'} class ConfigureSslConnTestCase(base.BaseTestCase): def setUp(self): super(ConfigureSslConnTestCase, self).setUp() self._mock_cfg = mock.patch.object(native_conn, 'cfg') self.mock_cfg = self._mock_cfg.start() self._mock_os = mock.patch.object(native_conn, 'os') self.mock_os = self._mock_os.start() self._mock_stream = mock.patch.object(native_conn, 'Stream') self.mock_stream = self._mock_stream.start() self._mock_has_ever_connected = mock.patch.object( idl.Idl, 'has_ever_connected') self.mock_has_ever_connected = self._mock_has_ever_connected.start() self.addCleanup(self._clean_mocks) def _get_ovs_idl_monitor(self): with mock.patch.object(ovsdb_event, 'RowEventHandler'), \ mock.patch.object( native_conn.OvsIdl, '_get_ovsdb_helper', return_value=idl.SchemaHelper(None, SCHEMA)), \ mock.patch.object(jsonrpc.Session, 'open'), \ mock.patch.object(connection.OvsdbIdl, '__init__'): return native_conn.OvsIdlMonitor() def _clean_mocks(self): self._mock_cfg.stop() self._mock_os.stop() self._mock_stream.stop() self._mock_has_ever_connected.stop() def test_ssl_connection(self): self.mock_os.path.isfile.return_value = True self.mock_cfg.CONF.OVS.ovsdb_connection = 'ssl:127.0.0.1:6640' self.mock_cfg.CONF.OVS.ssl_key_file = SSL_KEY_FILE self.mock_cfg.CONF.OVS.ssl_cert_file = SSL_CERT_FILE self.mock_cfg.CONF.OVS.ssl_ca_cert_file = SSL_CA_FILE ovs_idl_monitor = self._get_ovs_idl_monitor() conn = connection.Connection(idl=ovs_idl_monitor, timeout=1) conn.start() self.mock_stream.ssl_set_private_key_file.assert_called_once_with( SSL_KEY_FILE) self.mock_stream.ssl_set_certificate_file.assert_called_once_with( SSL_CERT_FILE) self.mock_stream.ssl_set_ca_cert_file.assert_called_once_with( SSL_CA_FILE) def test_ssl_conn_file_missing(self): self.mock_cfg.CONF.OVS.ovsdb_connection = 'ssl:127.0.0.1:6640' self.mock_cfg.CONF.OVS.ssl_key_file = SSL_KEY_FILE self.mock_cfg.CONF.OVS.ssl_cert_file = SSL_CERT_FILE self.mock_cfg.CONF.OVS.ssl_ca_cert_file = SSL_CA_FILE self.mock_os.path.exists.return_value = False self.assertRaises(ovsdb_exc.OvsdbSslConfigNotFound, self._get_ovs_idl_monitor) def test_ssl_conn_cfg_missing(self): self.mock_cfg.CONF.OVS.ovsdb_connection = 'ssl:127.0.0.1:6640' self.mock_cfg.CONF.OVS.ssl_key_file = None self.mock_cfg.CONF.OVS.ssl_cert_file = None self.mock_cfg.CONF.OVS.ssl_ca_cert_file = None self.assertRaises(ovsdb_exc.OvsdbSslRequiredOptError, self._get_ovs_idl_monitor) class BridgeCreateEventTestCase(base.BaseTestCase): class MetadataAgent(object): bridges = [] def add_bridge(self, row_name): self.bridges.append(row_name) def test_run(self): agent = self.MetadataAgent() mock_row = mock.Mock() mock_row.name = 'row_name' bridge_create_event = native_conn.BridgeCreateEvent(agent) bridge_create_event.run(mock.ANY, mock_row, mock.ANY) self.assertEqual([mock_row.name], agent.bridges) class OvsIdlMonitorTestCase(base.BaseTestCase): def setUp(self): super(OvsIdlMonitorTestCase, self).setUp() self._mock_get_ovsdb_helper = mock.patch.object( native_conn.OvsIdl, '_get_ovsdb_helper') self._mock_get_ovsdb_helper.start() self._mock_row_event_handler = mock.patch.object(ovsdb_event, 'RowEventHandler') self._mock_row_event_handler.start() self._mock_idl = mock.patch.object(idl.Idl, '__init__') self._mock_idl.start() self.addCleanup(self._stop_mocks) self.ovs_idl_monitor = native_conn.OvsIdlMonitor() def _stop_mocks(self): self._mock_get_ovsdb_helper.stop() self._mock_row_event_handler.stop() self._mock_idl.stop() @mock.patch.object(native_conn, 'BridgeCreateEvent') def test_start_bridge_monitor(self, mock_bridge_event): mock_bridge_event.return_value = 'bridge_event' self.ovs_idl_monitor.start_bridge_monitor(['br01', 'br02']) self.assertEqual(['br01', 'br02'], self.ovs_idl_monitor._bridges_to_monitor) self.ovs_idl_monitor.notify_handler.\ watch_event.assert_called_once_with('bridge_event') def test_add_bridge(self): self.ovs_idl_monitor.start_bridge_monitor(['br01', 'br02']) self.ovs_idl_monitor.add_bridge('br01') self.ovs_idl_monitor.add_bridge('br02') self.ovs_idl_monitor.add_bridge('br03') self.assertEqual(['br01', 'br02'], self.ovs_idl_monitor._bridges_added_list) def test_bridged_added(self): self.ovs_idl_monitor.start_bridge_monitor(['br01', 'br02']) self.ovs_idl_monitor.add_bridge('br01') self.ovs_idl_monitor.add_bridge('br02') self.assertEqual(['br01', 'br02'], self.ovs_idl_monitor.bridges_added) self.assertEqual([], self.ovs_idl_monitor.bridges_added) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/ovsdb/test_impl_idl.py0000644000175000017500000000301000000000000026365 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from ovsdbapp import exceptions from neutron.agent.ovsdb import impl_idl from neutron.tests import base class TransactionTestCase(base.BaseTestCase): def test_commit_raises_exception_on_timeout(self): transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, mock.Mock(), 1) with self.assert_max_execution_time(10): with testtools.ExpectedException(exceptions.TimeoutException): transaction.commit() def test_post_commit_does_not_raise_exception(self): with mock.patch.object(impl_idl.NeutronOVSDBTransaction, "do_post_commit", side_effect=Exception): transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, mock.Mock(), 0) transaction.post_commit(mock.Mock()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/test_agent_extensions_manager.py0000644000175000017500000000313200000000000030533 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.agent import agent_extensions_manager as ext_manager from neutron.conf.agent import agent_extensions_manager as ext_manager_config from neutron.tests import base class TestAgentExtensionsManager(base.BaseTestCase): def setUp(self): super(TestAgentExtensionsManager, self).setUp() mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', autospec=True).start() conf = cfg.CONF ext_manager_config.register_agent_ext_manager_opts() cfg.CONF.set_override('extensions', ['qos'], 'agent') namespace = 'neutron.agent.l2.extensions' self.manager = ext_manager.AgentExtensionsManager(conf, namespace) def _get_extension(self): return self.manager.extensions[0].obj def test_initialize(self): connection = object() self.manager.initialize(connection, 'fake_driver_type') ext = self._get_extension() ext.initialize.assert_called_once_with(connection, 'fake_driver_type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/test_resource_cache.py0000644000175000017500000002212500000000000026441 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import context from neutron.agent import resource_cache from neutron.api.rpc.callbacks import events as events_rpc from neutron.tests import base class OVOLikeThing(object): def __init__(self, id, revision_number=10, **kwargs): self.id = id self.fields = ['id', 'revision_number'] self.revision_number = revision_number for k, v in kwargs.items(): self.fields.append(k) setattr(self, k, v) def to_dict(self): return {f: getattr(self, f) for f in self.fields} def get(self, k): return getattr(self, k, None) class RemoteResourceCacheTestCase(base.BaseTestCase): def setUp(self): super(RemoteResourceCacheTestCase, self).setUp() rtypes = ['duck', 'goose'] self.goose = OVOLikeThing(1) self.duck = OVOLikeThing(2) self.ctx = context.get_admin_context() self.rcache = resource_cache.RemoteResourceCache(rtypes) self._pullmock = mock.patch.object(self.rcache, '_puller').start() def test_get_resource_by_id(self): self.rcache.record_resource_update(self.ctx, 'goose', self.goose) self.assertEqual(self.goose, self.rcache.get_resource_by_id('goose', 1)) self.assertIsNone(self.rcache.get_resource_by_id('goose', 2)) def test__flood_cache_for_query_pulls_once(self): resources = [OVOLikeThing(66), OVOLikeThing(67)] received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_UPDATE) self._pullmock.bulk_pull.side_effect = [ resources, [resources[0]], [resources[1]], [resources[1]] ] self.rcache._flood_cache_for_query('goose', id=(66, 67), name=('a', 'b')) self._pullmock.bulk_pull.assert_called_once_with( mock.ANY, 'goose', filter_kwargs={'id': (66, 67), 'name': ('a', 'b')}) self._pullmock.bulk_pull.reset_mock() self.rcache._flood_cache_for_query('goose', id=(66, ), name=('a', )) self.assertFalse(self._pullmock.called) self.rcache._flood_cache_for_query('goose', id=(67, ), name=('b', )) self.assertFalse(self._pullmock.called) # querying by just ID should trigger a new call since ID+name is a more # specific query self.rcache._flood_cache_for_query('goose', id=(67, )) self._pullmock.bulk_pull.assert_called_once_with( mock.ANY, 'goose', filter_kwargs={'id': (67, )}) self.assertItemsEqual( resources, [rec['updated'] for rec in received_kw]) def test_bulk_pull_doesnt_wipe_out_newer_data(self): self.rcache.record_resource_update( self.ctx, 'goose', OVOLikeThing(1, revision_number=5)) updated = OVOLikeThing(1) updated.revision_number = 1 # older revision number self._pullmock.bulk_pull.return_value = [updated] self.rcache._flood_cache_for_query('goose', id=(1,),) self.assertEqual( 5, self.rcache.get_resource_by_id('goose', 1).revision_number) def test_get_resources(self): geese = [OVOLikeThing(3, size='large'), OVOLikeThing(5, size='medium'), OVOLikeThing(4, size='large'), OVOLikeThing(6, size='small')] for goose in geese: self.rcache.record_resource_update(self.ctx, 'goose', goose) is_large = {'size': ('large', )} is_small = {'size': ('small', )} self.assertItemsEqual([geese[0], geese[2]], self.rcache.get_resources('goose', is_large)) self.assertItemsEqual([geese[3]], self.rcache.get_resources('goose', is_small)) def test_match_resources_with_func(self): geese = [OVOLikeThing(3, size='large'), OVOLikeThing(5, size='medium'), OVOLikeThing(4, size='xlarge'), OVOLikeThing(6, size='small')] for goose in geese: self.rcache.record_resource_update(self.ctx, 'goose', goose) has_large = lambda o: 'large' in o.size self.assertItemsEqual([geese[0], geese[2]], self.rcache.match_resources_with_func('goose', has_large)) def test__is_stale(self): goose = OVOLikeThing(3, size='large') self.rcache.record_resource_update(self.ctx, 'goose', goose) # same revision id is not considered stale updated = OVOLikeThing(3, size='large') self.assertFalse(self.rcache._is_stale('goose', updated)) updated.revision_number = 0 self.assertTrue(self.rcache._is_stale('goose', updated)) updated.revision_number = 200 self.assertFalse(self.rcache._is_stale('goose', updated)) # once deleted, all updates are stale self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertTrue(self.rcache._is_stale('goose', updated)) def test_record_resource_update(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_UPDATE) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large')) self.assertEqual(1, len(received_kw)) self.assertIsNone(received_kw[0]['existing']) # another update with no changed fields results in no callback self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large', revision_number=100)) self.assertEqual(1, len(received_kw)) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='small', revision_number=101)) self.assertEqual(2, len(received_kw)) self.assertEqual('large', received_kw[1]['existing'].size) self.assertEqual('small', received_kw[1]['updated'].size) self.assertEqual(set(['size']), received_kw[1]['changed_fields']) def test_record_resource_delete(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_DELETE) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large')) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(1, len(received_kw)) self.assertEqual(3, received_kw[0]['existing'].id) self.assertEqual(3, received_kw[0]['resource_id']) # deletes of non-existing cache items are still honored self.rcache.record_resource_delete(self.ctx, 'goose', 4) self.assertEqual(2, len(received_kw)) self.assertIsNone(received_kw[1]['existing']) self.assertEqual(4, received_kw[1]['resource_id']) def test_record_resource_delete_ignores_dups(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_DELETE) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(1, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 4) self.assertEqual(2, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(2, len(received_kw)) def test_resource_change_handler(self): with mock.patch.object(resource_cache.RemoteResourceWatcher, '_init_rpc_listeners'): watch = resource_cache.RemoteResourceWatcher(self.rcache) geese = [OVOLikeThing(3, size='large'), OVOLikeThing(5, size='medium'), OVOLikeThing(4, size='large'), OVOLikeThing(6, size='small')] watch.resource_change_handler(self.ctx, 'goose', geese, events_rpc.UPDATED) for goose in geese: self.assertEqual(goose, self.rcache.get_resource_by_id('goose', goose.id)) watch.resource_change_handler(self.ctx, 'goose', geese, events_rpc.DELETED) for goose in geese: self.assertIsNone( self.rcache.get_resource_by_id('goose', goose.id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/test_rpc.py0000644000175000017500000004070200000000000024254 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock import netaddr from neutron_lib.agent import topics as lib_topics from neutron_lib.callbacks import events from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import rpc as n_rpc from oslo_context import context as oslo_context from oslo_utils import uuidutils from neutron.agent import rpc from neutron.objects import network from neutron.objects import ports from neutron.tests import base class AgentRPCPluginApi(base.BaseTestCase): def _test_rpc_call(self, method): agent = rpc.PluginApi('fake_topic') ctxt = oslo_context.RequestContext(user_id='fake_user', project_id='fake_project') expect_val = 'foo' with mock.patch.object(agent.client, 'call') as mock_call,\ mock.patch.object(agent.client, 'prepare') as mock_prepare: mock_prepare.return_value = agent.client mock_call.return_value = expect_val func_obj = getattr(agent, method) if method == 'tunnel_sync': actual_val = func_obj(ctxt, 'fake_tunnel_ip') elif method == 'get_ports_by_vnic_type_and_host': actual_val = func_obj(ctxt, 'fake_vnic_type', 'fake_host') mock_call.assert_called_once_with( ctxt, 'get_ports_by_vnic_type_and_host', host='fake_host', vnic_type='fake_vnic_type') else: actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id') self.assertEqual(actual_val, expect_val) def test_get_device_details(self): self._test_rpc_call('get_device_details') def test_get_devices_details_list(self): self._test_rpc_call('get_devices_details_list') def test_get_network_details(self): self._test_rpc_call('get_network_details') def test_update_device_down(self): self._test_rpc_call('update_device_down') def test_tunnel_sync(self): self._test_rpc_call('tunnel_sync') def test_get_ports_by_vnic_type_and_host(self): self._test_rpc_call('get_ports_by_vnic_type_and_host') class AgentPluginReportState(base.BaseTestCase): def test_plugin_report_state_use_call(self): topic = 'test' reportStateAPI = rpc.PluginReportStateAPI(topic) expected_agent_state = {'agent': 'test'} with mock.patch.object(reportStateAPI.client, 'call') as mock_call, \ mock.patch.object(reportStateAPI.client, 'cast'), \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext(user_id='fake_user', project_id='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state, use_call=True) self.assertEqual(mock_call.call_args[0][0], ctxt) self.assertEqual(mock_call.call_args[0][1], 'report_state') self.assertEqual(mock_call.call_args[1]['agent_state'], {'agent_state': expected_agent_state}) self.assertIsInstance(mock_call.call_args[1]['time'], str) def test_plugin_report_state_cast(self): topic = 'test' reportStateAPI = rpc.PluginReportStateAPI(topic) expected_agent_state = {'agent': 'test'} with mock.patch.object(reportStateAPI.client, 'call'), \ mock.patch.object(reportStateAPI.client, 'cast' ) as mock_cast, \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext(user_id='fake_user', project_id='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(mock_cast.call_args[0][0], ctxt) self.assertEqual(mock_cast.call_args[0][1], 'report_state') self.assertEqual(mock_cast.call_args[1]['agent_state'], {'agent_state': expected_agent_state}) self.assertIsInstance(mock_cast.call_args[1]['time'], str) def test_plugin_report_state_microsecond_is_0(self): topic = 'test' expected_time = datetime.datetime(2015, 7, 27, 15, 33, 30, 0) expected_time_str = '2015-07-27T15:33:30.000000' expected_agent_state = {'agent': 'test'} with mock.patch('neutron.agent.rpc.datetime') as mock_datetime: reportStateAPI = rpc.PluginReportStateAPI(topic) mock_datetime.utcnow.return_value = expected_time with mock.patch.object(reportStateAPI.client, 'call'), \ mock.patch.object(reportStateAPI.client, 'cast' ) as mock_cast, \ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client ctxt = oslo_context.RequestContext(user_id='fake_user', project_id='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(expected_time_str, mock_cast.call_args[1]['time']) class AgentRPCMethods(base.BaseTestCase): def _test_create_consumers(self, endpoints, method, expected, topics, listen): with mock.patch.object(n_rpc, 'Connection') as create_connection: rpc.create_consumers( endpoints, method, topics, start_listening=listen) create_connection.assert_has_calls(expected) def test_create_consumers_start_listening(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), mock.call().consume_in_threads() ] method = 'foo' topics = [('topic', 'op')] self._test_create_consumers( endpoints, method, expected, topics, True) def test_create_consumers_do_not_listen(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), ] method = 'foo' topics = [('topic', 'op')] self._test_create_consumers( endpoints, method, expected, topics, False) def test_create_consumers_with_node_name(self): endpoints = [mock.Mock()] expected = [ mock.call(), mock.call().create_consumer('foo-topic-op', endpoints, fanout=True), mock.call().create_consumer('foo-topic-op.node1', endpoints, fanout=False), mock.call().consume_in_threads() ] with mock.patch.object(n_rpc, 'Connection') as create_connection: rpc.create_consumers(endpoints, 'foo', [('topic', 'op', 'node1')]) create_connection.assert_has_calls(expected) class TestCacheBackedPluginApi(base.BaseTestCase): def setUp(self): super(TestCacheBackedPluginApi, self).setUp() self._api = rpc.CacheBackedPluginApi(lib_topics.PLUGIN) self._api._legacy_interface = mock.Mock() self._api.remote_resource_cache = mock.Mock() self._network_id = uuidutils.generate_uuid() self._segment_id = uuidutils.generate_uuid() self._segment = network.NetworkSegment( id=self._segment_id, network_id=self._network_id, network_type=constants.TYPE_FLAT) self._port_id = uuidutils.generate_uuid() self._network = network.Network(id=self._network_id, segments=[self._segment]) self._port = ports.Port( id=self._port_id, network_id=self._network_id, device_id='vm_uuid', mac_address=netaddr.EUI('fa:16:3e:ec:c7:d9'), admin_state_up=True, security_group_ids=set([uuidutils.generate_uuid()]), fixed_ips=[], allowed_address_pairs=[], device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, bindings=[ports.PortBinding(port_id=self._port_id, host='host1', status=constants.ACTIVE, profile={}, vif_type='vif_type', vnic_type='vnic_type')], binding_levels=[ports.PortBindingLevel(port_id=self._port_id, host='host1', level=0, segment=self._segment)]) def test__legacy_notifier_resource_delete(self): self._api._legacy_notifier(resources.PORT, events.AFTER_DELETE, self, mock.ANY, resource_id=self._port_id, existing=self._port) self._api._legacy_interface.port_update.assert_not_called() self._api._legacy_interface.port_delete.assert_called_once_with( mock.ANY, port={'id': self._port_id}, port_id=self._port_id) self._api._legacy_interface.binding_deactivate.assert_not_called() def test__legacy_notifier_resource_update(self): updated_port = ports.Port(id=self._port_id, name='updated_port') self._api._legacy_notifier(resources.PORT, events.AFTER_UPDATE, self, mock.ANY, changed_fields=set(['name']), resource_id=self._port_id, existing=self._port, updated=updated_port) self._api._legacy_interface.port_delete.assert_not_called() self._api._legacy_interface.port_update.assert_called_once_with( mock.ANY, port={'id': self._port_id}, port_id=self._port_id) self._api._legacy_interface.binding_deactivate.assert_not_called() def _test__legacy_notifier_binding_activated(self): updated_port = ports.Port( id=self._port_id, name='updated_port', bindings=[ports.PortBinding(port_id=self._port_id, host='host2', status=constants.ACTIVE), ports.PortBinding(port_id=self._port_id, host='host1', status=constants.INACTIVE)]) self._api._legacy_notifier(resources.PORT, events.AFTER_UPDATE, self, mock.ANY, changed_fields=set(['name', 'bindings']), resource_id=self._port_id, existing=self._port, updated=updated_port) self._api._legacy_interface.port_update.assert_not_called() self._api._legacy_interface.port_delete.assert_not_called() def test__legacy_notifier_new_binding_activated(self): self._test__legacy_notifier_binding_activated() self._api._legacy_interface.binding_deactivate.assert_called_once_with( mock.ANY, host='host1', port_id=self._port_id) self._api._legacy_interface.binding_activate.assert_called_once_with( mock.ANY, host='host2', port_id=self._port_id) def test__legacy_notifier_no_new_binding_activated(self): updated_port = ports.Port( id=self._port_id, name='updated_port', bindings=[ports.PortBinding(port_id=self._port_id, host='host2', status=constants.ACTIVE)]) self._api._legacy_notifier(resources.PORT, events.AFTER_UPDATE, self, mock.ANY, changed_fields=set(['name', 'bindings']), resource_id=self._port_id, existing=self._port, updated=updated_port) self._api._legacy_interface.port_update.assert_called_once_with( mock.ANY, port={'id': self._port_id}, port_id=self._port_id) self._api._legacy_interface.port_delete.assert_not_called() self._api._legacy_interface.binding_deactivate.assert_not_called() def test__legacy_notifier_existing_or_updated_is_none(self): self._api._legacy_notifier(resources.PORT, events.AFTER_UPDATE, self, mock.ANY, changed_fields=set(['name', 'bindings']), resource_id=self._port_id, existing=None, updated=None) self._api._legacy_notifier(resources.PORT, events.AFTER_UPDATE, self, mock.ANY, changed_fields=set(['name', 'bindings']), resource_id=self._port_id, existing=self._port, updated=None) call = mock.call(mock.ANY, port={'id': self._port_id}, port_id=self._port_id) self._api._legacy_interface.port_update.assert_has_calls([call, call]) self._api._legacy_interface.port_delete.assert_not_called() self._api._legacy_interface.binding_deactivate.assert_not_called() def test__legacy_notifier_binding_activated_not_supported(self): del self._api._legacy_interface.binding_deactivate self._test__legacy_notifier_binding_activated() def test_get_device_details_binding_in_host(self): self._api.remote_resource_cache.get_resource_by_id.side_effect = [ self._port, self._network] entry = self._api.get_device_details(mock.ANY, self._port_id, mock.ANY, 'host1') self.assertEqual(self._port_id, entry['device']) self.assertEqual(self._port_id, entry['port_id']) self.assertEqual(self._network_id, entry['network_id']) self.assertNotIn(constants.NO_ACTIVE_BINDING, entry) def test_get_device_details_binding_not_in_host(self): self._api.remote_resource_cache.get_resource_by_id.side_effect = [ self._port, self._network] entry = self._api.get_device_details(mock.ANY, self._port_id, mock.ANY, 'host2') self.assertEqual(self._port_id, entry['device']) self.assertNotIn('port_id', entry) self.assertNotIn('network_id', entry) self.assertIn(constants.NO_ACTIVE_BINDING, entry) @mock.patch('neutron.agent.resource_cache.RemoteResourceCache') def test_initialization_with_default_resources(self, rcache_class): rcache_obj = mock.MagicMock() rcache_class.return_value = rcache_obj rpc.CacheBackedPluginApi(lib_topics.PLUGIN) rcache_class.assert_called_once_with( rpc.CacheBackedPluginApi.RESOURCE_TYPES) rcache_obj.start_watcher.assert_called_once_with() @mock.patch('neutron.agent.resource_cache.RemoteResourceCache') def test_initialization_with_custom_resource(self, rcache_class): CUSTOM = 'test' rcache_obj = mock.MagicMock() rcache_class.return_value = rcache_obj class CustomCacheBackedPluginApi(rpc.CacheBackedPluginApi): RESOURCE_TYPES = [resources.PORT, CUSTOM] CustomCacheBackedPluginApi(lib_topics.PLUGIN) rcache_class.assert_called_once_with( CustomCacheBackedPluginApi.RESOURCE_TYPES) rcache_obj.start_watcher.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/test_securitygroups_rpc.py0000644000175000017500000044341300000000000027451 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import mock import netaddr from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib import constants as const from neutron_lib import context from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.tests import tools from oslo_config import cfg import oslo_messaging from testtools import matchers import webob.exc from neutron.agent import firewall as firewall_base from neutron.agent.linux import ip_conntrack from neutron.agent.linux import iptables_manager from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import securitygroups_rpc from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.extensions import securitygroup as ext_sg from neutron.tests import base from neutron.tests.unit.extensions import test_securitygroup as test_sg FAKE_PREFIX = {const.IPv4: '10.0.0.0/24', const.IPv6: '2001:db8::/64'} FAKE_IP = {const.IPv4: '10.0.0.1', const.IPv6: 'fe80::1', 'IPv6_GLOBAL': '2001:db8::1', 'IPv6_LLA': 'fe80::123', 'IPv6_DHCP': '2001:db8::3'} TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.' 'SecurityGroupRpcTestPlugin') FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.' FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver' FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE + 'OVSHybridIptablesFirewallDriver') FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver' def ingress_address_assignment_rules(port): rules = [] v4_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips'] if netaddr.IPNetwork(ip['ip_address']).version == 4] v6_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips'] if netaddr.IPNetwork(ip['ip_address']).version == 6] if v6_addrs: rules.append({'direction': 'ingress', 'ethertype': 'IPv6', 'protocol': 'ipv6-icmp', 'source_port_range_min': 134}) for dest in v4_addrs + ['255.255.255.255']: rules.append({'direction': 'ingress', 'ethertype': 'IPv4', 'port_range_max': 68, 'port_range_min': 68, 'protocol': 'udp', 'source_port_range_max': 67, 'source_port_range_min': 67, 'dest_ip_prefix': '%s/32' % dest}) for dest in v6_addrs: rules.append({'direction': 'ingress', 'ethertype': 'IPv6', 'port_range_max': 546, 'port_range_min': 546, 'protocol': 'udp', 'source_port_range_max': 547, 'source_port_range_min': 547, 'dest_ip_prefix': '%s/128' % dest}) for dest in ['fe80::/64']: rules.append({'direction': 'ingress', 'ethertype': 'IPv6', 'port_range_max': 546, 'port_range_min': 546, 'protocol': 'udp', 'source_port_range_max': 547, 'source_port_range_min': 547, 'dest_ip_prefix': '%s' % dest}) return rules def set_enable_security_groups(enabled): cfg.CONF.set_override('enable_security_group', enabled, group='SECURITYGROUP') def set_firewall_driver(firewall_driver): cfg.CONF.set_override('firewall_driver', firewall_driver, group='SECURITYGROUP') class FakeFirewallDriver(firewall_base.FirewallDriver): """Fake FirewallDriver FirewallDriver is base class for other types of drivers. To be able to use it in tests, it's needed to overwrite all abstract methods. """ def prepare_port_filter(self, port): raise NotImplementedError() def update_port_filter(self, port): raise NotImplementedError() class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin, sg_db_rpc.SecurityGroupServerRpcMixin): def __init__(self): super(SecurityGroupRpcTestPlugin, self).__init__() self.notifier = mock.Mock() self.devices = {} def create_port(self, context, port): result = super(SecurityGroupRpcTestPlugin, self).create_port(context, port) self.devices[result['id']] = result self.notify_security_groups_member_updated(context, result) return result def update_port(self, context, id, port): original_port = self.get_port(context, id) updated_port = super(SecurityGroupRpcTestPlugin, self).update_port(context, id, port) self.devices[id] = updated_port self.update_security_group_on_port( context, id, port, original_port, updated_port) return updated_port def delete_port(self, context, id): port = self.get_port(context, id) super(SecurityGroupRpcTestPlugin, self).delete_port(context, id) self.notify_security_groups_member_updated(context, port) del self.devices[id] def get_port_from_device(self, context, device): device = self.devices.get(device) if device: device['security_group_rules'] = [] device['security_group_source_groups'] = [] device['fixed_ips'] = [ip['ip_address'] for ip in device['fixed_ips']] return device class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase): def setUp(self, plugin=None): plugin = plugin or TEST_PLUGIN_CLASS set_firewall_driver(FIREWALL_NOOP_DRIVER) super(SGServerRpcCallBackTestCase, self).setUp(plugin) self.notifier = directory.get_plugin().notifier self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback() def _test_security_group_port(self, device_owner, gw_ip, cidr, ip_version, ip_address): with self.network() as net: with self.subnet(net, gateway_ip=gw_ip, cidr=cidr, ip_version=ip_version) as subnet: kwargs = { 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_address}]} if device_owner: kwargs['device_owner'] = device_owner res = self._create_port( self.fmt, net['network']['id'], **kwargs) res = self.deserialize(self.fmt, res) port_id = res['port']['id'] if device_owner in const.ROUTER_INTERFACE_OWNERS: data = {'port': {'fixed_ips': []}} req = self.new_update_request('ports', data, port_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self._delete('ports', port_id) def _test_sg_rules_for_devices_ipv4_ingress_port_range( self, min_port, max_port): fake_prefix = FAKE_PREFIX[const.IPv4] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, str(min_port), str(max_port)) rule2 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': max_port, 'security_group_id': sg1_id, 'port_range_min': min_port}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_sg_rules_for_devices_ipv4_ingress_port_range_min_port_1(self): self._test_sg_rules_for_devices_ipv4_ingress_port_range(1, 10) def test_security_group_info_for_ports_with_no_rules(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg: sg_id = sg['security_group']['id'] self._delete_default_security_group_egress_rules(sg_id) res = self._create_port( self.fmt, n['network']['id'], security_groups=[sg_id]) ports_rest = self.deserialize(self.fmt, res) port_id = ports_rest['port']['id'] self.rpc.devices = {port_id: ports_rest['port']} devices = [port_id] ctx = context.get_admin_context() sg_info = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = {sg_id: []} self.assertEqual(expected, sg_info['security_groups']) self._delete('ports', port_id) @contextlib.contextmanager def _port_with_addr_pairs_and_security_group(self): plugin_obj = directory.get_plugin() if ('allowed-address-pairs' not in plugin_obj.supported_extension_aliases): self.skipTest("Test depends on allowed-address-pairs extension") fake_prefix = FAKE_PREFIX['IPv4'] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', 'tcp', '22', '22', remote_group_id=sg1_id) rule2 = self._build_security_group_rule( sg1_id, 'ingress', 'tcp', '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(res.status_int, 201) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.1.0/24'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '11.0.0.1'}] res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) yield self.deserialize(self.fmt, res1) def test_security_group_info_for_devices_ipv4_addr_pair(self): with self._port_with_addr_pairs_and_security_group() as port: port_id = port['port']['id'] sg_id = port['port']['security_groups'][0] devices = [port_id, 'no_exist_device'] ctx = context.get_admin_context() # verify that address pairs are included in remote SG IPs sg_member_ips = self.rpc.security_group_info_for_devices( ctx, devices=devices)['sg_member_ips'] expected_member_ips = [ '10.0.1.0/24', '11.0.0.1', port['port']['fixed_ips'][0]['ip_address']] self.assertEqual(sorted(expected_member_ips), sorted(sg_member_ips[sg_id]['IPv4'])) self._delete('ports', port_id) def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self): fake_prefix = FAKE_PREFIX[const.IPv4] with self._port_with_addr_pairs_and_security_group() as port: port_id = port['port']['id'] sg_id = port['port']['security_groups'][0] devices = [port_id, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id] expected = [{'direction': 'egress', 'ethertype': 'IPv4', 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': 'IPv6', 'security_group_id': sg_id}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 22, 'remote_group_id': sg_id, 'security_group_id': sg_id, 'source_ip_prefix': '11.0.0.1/32', 'port_range_min': 22}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 22, 'remote_group_id': sg_id, 'security_group_id': sg_id, 'source_ip_prefix': '10.0.1.0/24', 'port_range_min': 22}, {'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'port_range_max': 23, 'security_group_id': sg_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] + ingress_address_assignment_rules(port) expected = tools.UnorderedList(expected) self.assertEqual(expected, port_rpc['security_group_rules']) self.assertEqual(port['port']['allowed_address_pairs'], port_rpc['allowed_address_pairs']) self._delete('ports', port_id) def test_security_group_rules_for_devices_ipv4_egress(self): fake_prefix = FAKE_PREFIX[const.IPv4] with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_TCP, '22', '22') rule2 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_UDP, '23', '23', fake_prefix) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'egress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'dest_ip_prefix': fake_prefix}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv4_source_group(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id, sg2_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] res2 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg2_id]) ports_rest2 = self.deserialize(self.fmt, res2) port_id2 = ports_rest2['port']['id'] port_fixed_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg2_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg2_id}, {'direction': u'ingress', 'source_ip_prefix': port_fixed_ip2 + '/32', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id, 'security_group_id': sg1_id}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(expected, port_rpc['security_group_rules']) self._delete('ports', port_id1) self._delete('ports', port_id2) def test_security_group_info_for_devices_ipv4_source_group(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] res2 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg2_id]) ports_rest2 = self.deserialize(self.fmt, res2) port_id2 = ports_rest2['port']['id'] port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = { 'security_groups': {sg1_id: [ {'direction': 'egress', 'ethertype': const.IPv4, 'stateful': True}, {'direction': 'egress', 'ethertype': const.IPv6, 'stateful': True}, {'direction': u'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id, 'stateful': True} ]}, 'sg_member_ips': {sg2_id: { 'IPv4': set([port_ip2]), 'IPv6': set(), }} } self.assertEqual(expected['security_groups'], ports_rpc['security_groups']) self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'], ports_rpc['sg_member_ips'][sg2_id]['IPv4']) self._delete('ports', port_id1) self._delete('ports', port_id2) def test_security_group_rules_for_devices_ipv6_ingress(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=const.IP_VERSION_6 ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rule2 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_UDP, '23', '23', fake_prefix, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'], 'ip_address': FAKE_IP['IPv6_DHCP']}], device_owner=const.DEVICE_OWNER_DHCP, security_groups=[sg1_id]) res1 = self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv6, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_info_for_devices_only_ipv6_rule(self): with self.network() as n,\ self.subnet(n),\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_group_id=sg1_id, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) res1 = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_info_for_devices( ctx, devices=devices) expected = { 'security_groups': {sg1_id: [ {'direction': 'egress', 'ethertype': const.IPv4, 'stateful': True}, {'direction': 'egress', 'ethertype': const.IPv6, 'stateful': True}, {'direction': u'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'port_range_min': 22, 'remote_group_id': sg1_id, 'stateful': True} ]}, 'sg_member_ips': {sg1_id: { 'IPv6': set(), }} } self.assertEqual(expected['security_groups'], ports_rpc['security_groups']) self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'], ports_rpc['sg_member_ips'][sg1_id]['IPv6']) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv6_egress(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=const.IP_VERSION_6 ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_TCP, '22', '22', ethertype=const.IPv6) rule2 = self._build_security_group_rule( sg1_id, 'egress', const.PROTO_NAME_UDP, '23', '23', fake_prefix, ethertype=const.IPv6) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 22, 'security_group_id': sg1_id, 'port_range_min': 22}, {'direction': 'egress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv6, 'port_range_max': 23, 'security_group_id': sg1_id, 'port_range_min': 23, 'dest_ip_prefix': fake_prefix}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) def test_security_group_rules_for_devices_ipv6_source_group(self): fake_prefix = FAKE_PREFIX[const.IPv6] fake_gateway = FAKE_IP[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=const.IP_VERSION_6 ) as subnet_v6,\ self.security_group() as sg1,\ self.security_group() as sg2: sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', const.PROTO_NAME_TCP, '24', '25', ethertype=const.IPv6, remote_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} self._make_security_group_rule(self.fmt, rules) ports_rest1 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id, sg2_id]) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] ports_rest2 = self._make_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg2_id]) port_id2 = ports_rest2['port']['id'] port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg1_id}, {'direction': 'egress', 'ethertype': const.IPv4, 'security_group_id': sg2_id}, {'direction': 'egress', 'ethertype': const.IPv6, 'security_group_id': sg2_id}, {'direction': 'ingress', 'source_ip_prefix': port_ip2 + '/128', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv6, 'port_range_max': 25, 'port_range_min': 24, 'remote_group_id': sg2_id, 'security_group_id': sg1_id}, ] + ingress_address_assignment_rules(ports_rest1) self.assertEqual(expected, port_rpc['security_group_rules']) self._delete('ports', port_id1) self._delete('ports', port_id2) class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase): def test_init_firewall_with_none_driver(self): set_enable_security_groups(False) agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=mock.Mock()) self.assertEqual(agent.firewall.__class__.__name__, 'NoopFirewallDriver') def test_get_trusted_devices(self): agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=mock.Mock()) device_ids = ['port_1_id', 'tap_2', 'tap_3', 'port_4_id'] devices = { 'port_1_id': {'device': 'tap_1'}, 'port_3_id': {'device': 'tap_3'}, } trusted_devices = agent._get_trusted_devices( device_ids, devices) self.assertEqual(['tap_2', 'port_4_id'], trusted_devices) class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase): def setUp(self, defer_refresh_firewall=False): super(BaseSecurityGroupAgentRpcTestCase, self).setUp() set_firewall_driver(FIREWALL_NOOP_DRIVER) self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=mock.Mock(), defer_refresh_firewall=defer_refresh_firewall) mock.patch('neutron.agent.linux.iptables_manager').start() self.default_firewall = self.agent.firewall self.firewall = mock.Mock() firewall_object = FakeFirewallDriver() self.firewall.defer_apply.side_effect = firewall_object.defer_apply self.agent.firewall = self.firewall self.fake_device = {'device': 'fake_device', 'network_id': 'fake_net', 'security_groups': ['fake_sgid1', 'fake_sgid2'], 'security_group_source_groups': ['fake_sgid2'], 'security_group_rules': [{'security_group_id': 'fake_sgid1', 'remote_group_id': 'fake_sgid2'}]} self.firewall.ports = {'fake_device': self.fake_device} self.firewall.security_group_updated = mock.Mock() class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase): def setUp(self, defer_refresh_firewall=False): super(SecurityGroupAgentRpcTestCase, self).setUp( defer_refresh_firewall) rpc = self.agent.plugin_rpc rpc.security_group_info_for_devices.side_effect = ( oslo_messaging.UnsupportedVersion('1.2')) rpc.security_group_rules_for_devices.return_value = ( self.firewall.ports) def test_prepare_and_remove_devices_filter(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.remove_devices_filter(['fake_device']) # ignore device which is not filtered self.firewall.assert_has_calls([mock.call.defer_apply(), mock.call.prepare_port_filter( self.fake_device), mock.call.process_trusted_ports([]), mock.call.defer_apply(), mock.call.remove_port_filter( self.fake_device), ]) def test_prepare_devices_filter_with_noopfirewall(self): self.agent.firewall = self.default_firewall self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.prepare_devices_filter(['fake_device']) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) def test_prepare_devices_filter_with_firewall_disabled(self): cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP') self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.prepare_devices_filter(['fake_device']) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) def test_prepare_devices_filter_with_trusted_ports(self): devices_to_filter = {k: {'device': k} for k in range(4, 8)} all_devices = range(10) expected_devices = [0, 1, 2, 3, 8, 9] self.agent._use_enhanced_rpc = True with mock.patch.object( self.agent.plugin_rpc, 'security_group_info_for_devices', return_value={ 'devices': devices_to_filter, 'security_groups': {}, 'sg_member_ips': {}}): with mock.patch.object( self.agent.firewall, 'process_trusted_ports') as m_process: self.agent.prepare_devices_filter(all_devices) m_process.assert_called_once_with(expected_devices) def test_remove_devices_filter_with_trusted_ports(self): all_devices = range(10) firewall_managed_ports = {k: k for k in range(4, 8)} trusted_port_ids = [0, 1, 2, 3, 8, 9] with mock.patch.object(self.agent, 'firewall') as mock_firewall: mock_firewall.ports = firewall_managed_ports self.agent.remove_devices_filter(all_devices) mock_firewall.remove_port_filter.assert_has_calls( [mock.call(i) for i in firewall_managed_ports.keys()]) mock_firewall.remove_trusted_ports( [mock.call([i]) for i in trusted_port_ids]) def test_security_groups_rule_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall([self.fake_device['device']])]) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_rule_not_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.agent.refresh_firewall.assert_has_calls( [mock.call.refresh_firewall([self.fake_device['device']])]) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_not_updated(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_refresh_firewall(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall() calls = [mock.call.defer_apply(), mock.call.prepare_port_filter(self.fake_device), mock.call.process_trusted_ports(['fake_port_id']), mock.call.defer_apply(), mock.call.update_port_filter(self.fake_device), mock.call.process_trusted_ports([])] self.firewall.assert_has_calls(calls) def test_refresh_firewall_devices(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall([self.fake_device['device']]) calls = [mock.call.defer_apply(), mock.call.prepare_port_filter(self.fake_device), mock.call.process_trusted_ports(['fake_port_id']), mock.call.defer_apply(), mock.call.update_port_filter(self.fake_device), mock.call.process_trusted_ports([])] self.firewall.assert_has_calls(calls) def test_refresh_firewall_none(self): self.agent.refresh_firewall([]) self.assertFalse(self.firewall.called) def test_refresh_firewall_with_firewall_disabled(self): cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP') self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.firewall.defer_apply = mock.Mock() self.agent.refresh_firewall([self.fake_device['device']]) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) self.assertFalse(self.agent.firewall.defer_apply.called) def test_refresh_firewall_with_noopfirewall(self): self.agent.firewall = self.default_firewall self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock() self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock() self.agent.firewall.defer_apply = mock.Mock() self.agent.refresh_firewall([self.fake_device]) self.assertFalse(self.agent.plugin_rpc. security_group_info_for_devices.called) self.assertFalse(self.agent.plugin_rpc. security_group_rules_for_devices.called) self.assertFalse(self.agent.firewall.defer_apply.called) class SecurityGroupAgentEnhancedRpcTestCase(BaseSecurityGroupAgentRpcTestCase): def setUp(self, defer_refresh_firewall=False): super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp( defer_refresh_firewall=defer_refresh_firewall) fake_sg_info = { 'security_groups': collections.OrderedDict([ ('fake_sgid2', []), ('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]), 'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}}, 'devices': self.firewall.ports} self.agent.plugin_rpc.security_group_info_for_devices.return_value = ( fake_sg_info) def test_prepare_and_remove_devices_filter_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.remove_devices_filter(['fake_device']) # these two mocks are too long, just use tmp_mock to replace them tmp_mock1 = mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]) tmp_mock2 = mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}) # ignore device which is not filtered self.firewall.assert_has_calls([mock.call.defer_apply(), mock.call.update_security_group_rules( 'fake_sgid2', []), tmp_mock1, tmp_mock2, mock.call.prepare_port_filter( self.fake_device), mock.call.process_trusted_ports([]), mock.call.defer_apply(), mock.call.remove_port_filter( self.fake_device), ], any_order=True) def test_security_groups_rule_updated_enhanced_rpc(self): sg_list = ['fake_sgid1', 'fake_sgid3'] self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(sg_list) self.agent.refresh_firewall.assert_called_once_with( [self.fake_device['device']]) self.firewall.security_group_updated.assert_called_once_with( 'sg_rule', set(sg_list)) def test_security_groups_rule_not_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated_enhanced_rpc(self): sg_list = ['fake_sgid2', 'fake_sgid3'] self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated(sg_list) self.agent.refresh_firewall.assert_called_once_with( [self.fake_device['device']]) self.firewall.security_group_updated.assert_called_once_with( 'sg_member', set(sg_list)) def test_security_groups_member_not_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() self.agent.prepare_devices_filter(['fake_port_id']) self.agent.security_groups_member_updated( ['fake_sgid3', 'fake_sgid4']) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_refresh_firewall_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_port_id']) self.agent.refresh_firewall() calls = [mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}), mock.call.prepare_port_filter(self.fake_device), mock.call.process_trusted_ports(['fake_port_id']), mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules( 'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members( 'fake_sgid2', {'IPv4': [], 'IPv6': []}), mock.call.update_port_filter(self.fake_device), mock.call.process_trusted_ports([])] self.firewall.assert_has_calls(calls, any_order=True) def test_refresh_firewall_devices_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_device']) self.agent.refresh_firewall([self.fake_device['device']]) calls = [mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules('fake_sgid1', [ {'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members('fake_sgid2', { 'IPv4': [], 'IPv6': [] }), mock.call.prepare_port_filter(self.fake_device), mock.call.process_trusted_ports([]), mock.call.defer_apply(), mock.call.update_security_group_rules('fake_sgid2', []), mock.call.update_security_group_rules('fake_sgid1', [ {'remote_group_id': 'fake_sgid2'}]), mock.call.update_security_group_members('fake_sgid2', { 'IPv4': [], 'IPv6': []}), mock.call.update_port_filter(self.fake_device), mock.call.process_trusted_ports([]), ] self.firewall.assert_has_calls(calls, any_order=True) def test_refresh_firewall_none_enhanced_rpc(self): self.agent.refresh_firewall([]) self.assertFalse(self.firewall.called) class SecurityGroupAgentRpcWithDeferredRefreshTestCase( SecurityGroupAgentRpcTestCase): def setUp(self): super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp( defer_refresh_firewall=True) @contextlib.contextmanager def add_fake_device(self, device, sec_groups, source_sec_groups=None): fake_device = {'device': device, 'security_groups': sec_groups, 'security_group_source_groups': source_sec_groups or [], 'security_group_rules': [{'security_group_id': 'fake_sgid1', 'remote_group_id': 'fake_sgid2'}]} self.firewall.ports[device] = fake_device yield del self.firewall.ports[device] def test_security_groups_rule_updated(self): self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_rule_updated_same_port(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgidX']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1']) self.agent.security_groups_rule_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_rule_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid2']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_rule_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid2']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_rule_updated(['fake_sgid1']) self.agent.security_groups_rule_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated(self): self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_member_updated_same_port(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgidX']): self.agent.refresh_firewall = mock.Mock() self.agent.security_groups_member_updated(['fake_sgid1', 'fake_sgid3']) self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertNotIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_security_groups_member_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgid2']): self.agent.security_groups_member_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_multiple_security_groups_member_updated_multiple_ports(self): with self.add_fake_device(device='fake_device_2', sec_groups=['fake_sgid1', 'fake_sgid1B'], source_sec_groups=['fake_sgid1B']): self.agent.security_groups_member_updated(['fake_sgid1B']) self.agent.security_groups_member_updated(['fake_sgid2']) self.assertIn('fake_device', self.agent.devices_to_refilter) self.assertIn('fake_device_2', self.agent.devices_to_refilter) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_new_ports_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.setup_port_filters(set(['fake_new_device']), set()) self.assertFalse(self.agent.devices_to_refilter) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_updated_ports_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.setup_port_filters(set(), set(['fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.agent.refresh_firewall.assert_called_once_with( set(['fake_updated_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filter_new_and_updated_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.setup_port_filters(set(['fake_new_device']), set(['fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_updated_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device']) self.agent.setup_port_filters(set(), set()) self.assertFalse(self.agent.devices_to_refilter) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_and_new_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device']) self.agent.setup_port_filters(set(['fake_new_device']), set()) self.assertFalse(self.agent.devices_to_refilter) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.firewall.security_group_updated.called) def _test_prepare_devices_filter(self, devices): # simulate an RPC arriving and calling _security_group_updated() self.agent.devices_to_refilter |= set(['fake_new_device']) def test_setup_port_filters_new_port_and_rpc(self): # Make sure that if an RPC arrives and adds a device to # devices_to_refilter while we are in setup_port_filters() # that it is not cleared, and will be processed later. self.agent.prepare_devices_filter = self._test_prepare_devices_filter self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['new_device', 'fake_device']) self.agent.setup_port_filters(set(['new_device']), set()) self.assertEqual(self.agent.devices_to_refilter, set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_sg_updates_and_updated_ports(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) self.agent.setup_port_filters( set(), set(['fake_device', 'fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device', 'fake_device_2', 'fake_updated_device'])) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_all_updates(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2']) self.agent.setup_port_filters( set(['fake_new_device']), set(['fake_device', 'fake_updated_device'])) self.assertFalse(self.agent.devices_to_refilter) self.agent.prepare_devices_filter.assert_called_once_with( set(['fake_new_device'])) self.agent.refresh_firewall.assert_called_once_with( set(['fake_device', 'fake_device_2', 'fake_updated_device'])) self.assertFalse(self.firewall.security_group_updated.called) def test_setup_port_filters_no_update(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() self.agent.devices_to_refilter = set() self.agent.setup_port_filters(set(), set()) self.assertFalse(self.agent.devices_to_refilter) self.assertFalse(self.agent.refresh_firewall.called) self.assertFalse(self.agent.prepare_devices_filter.called) self.assertFalse(self.firewall.security_group_updated.called) class FakeSGNotifierAPI(securitygroups_rpc.SecurityGroupAgentRpcApiMixin): def __init__(self): self.topic = 'fake' target = oslo_messaging.Target(topic=self.topic, version='1.0') self.client = n_rpc.get_client(target) class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase): def setUp(self): super(SecurityGroupAgentRpcApiTestCase, self).setUp() self.notifier = FakeSGNotifierAPI() self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare', return_value=self.notifier.client).start() self.mock_cast = mock.patch.object(self.notifier.client, 'cast').start() def test_security_groups_rule_updated(self): self.notifier.security_groups_rule_updated( None, security_groups=['fake_sgid']) self.mock_cast.assert_has_calls( [mock.call(None, 'security_groups_rule_updated', security_groups=['fake_sgid'])]) def test_security_groups_member_updated(self): self.notifier.security_groups_member_updated( None, security_groups=['fake_sgid']) self.mock_cast.assert_has_calls( [mock.call(None, 'security_groups_member_updated', security_groups=['fake_sgid'])]) def test_security_groups_rule_not_updated(self): self.notifier.security_groups_rule_updated( None, security_groups=[]) self.assertFalse(self.mock_cast.called) def test_security_groups_member_not_updated(self): self.notifier.security_groups_member_updated( None, security_groups=[]) self.assertFalse(self.mock_cast.called) # Note(nati) bn -> binary_name # id -> device_id PHYSDEV_MOD = '-m physdev' PHYSDEV_IS_BRIDGED = '--physdev-is-bridged' IPTABLES_ARG = {'bn': iptables_manager.binary_name, 'physdev_mod': PHYSDEV_MOD, 'physdev_is_bridged': PHYSDEV_IS_BRIDGED} CHAINS_MANGLE = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark' IPTABLES_ARG['chains'] = CHAINS_MANGLE CHAINS_MANGLE_V6 = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING' IPTABLES_ARG['chains'] = CHAINS_MANGLE_V6 CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat' IPTABLES_ARG['port1'] = 'port1' IPTABLES_ARG['port2'] = 'port2' IPTABLES_ARG['port3'] = 'port3' IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC' IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD' IPTABLES_ARG['ip1'] = '10.0.0.3/32' IPTABLES_ARG['ip2'] = '10.0.0.4/32' IPTABLES_ARG['chains'] = CHAINS_NAT IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_BRIDGE_NET_1 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 -I %(bn)s-PREROUTING 2 -i brqfakenet1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 -I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_BRIDGE_NET_2 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 -I %(bn)s-PREROUTING 2 -i brqfakenet1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 -I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \ -m comment --comment "Set zone for port1" -j CT --zone 4097 -I %(bn)s-PREROUTING 4 -m physdev --physdev-in brqfakenet2 \ -m comment --comment "Set zone for port2" -j CT --zone 4098 -I %(bn)s-PREROUTING 5 -i brqfakenet2 \ -m comment --comment "Set zone for port2" -j CT --zone 4098 -I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_port2 \ -m comment --comment "Set zone for port2" -j CT --zone 4098 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_port1 \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 -I %(bn)s-PREROUTING 2 -i qvbtap_port1 \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 -I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-OUTPUT - [0:0] :%(bn)s-PREROUTING - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING -I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_%(port1)s \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 -I %(bn)s-PREROUTING 2 -i qvbtap_%(port1)s \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 -I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_%(port1)s \ -m comment --comment "Set zone for %(port1)s" -j CT --zone 4097 -I %(bn)s-PREROUTING 4 -m physdev --physdev-in qvbtap_%(port2)s \ -m comment --comment "Set zone for %(port2)s" -j CT --zone 4098 -I %(bn)s-PREROUTING 5 -i qvbtap_%(port2)s \ -m comment --comment "Set zone for %(port2)s" -j CT --zone 4098 -I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_%(port2)s \ -m comment --comment "Set zone for %(port2)s" -j CT --zone 4098 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_RAW = 'OUTPUT|PREROUTING' IPTABLES_ARG['chains'] = CHAINS_RAW IPTABLES_RAW = """# Generated by iptables_manager *raw :OUTPUT - [0:0] :PREROUTING - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I OUTPUT 1 -j %(bn)s-OUTPUT -I PREROUTING 1 -j %(bn)s-PREROUTING COMMIT # Completed by iptables_manager """ % IPTABLES_ARG CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback' CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1' CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2' IPTABLES_ARG['chains'] = CHAINS_1 IPSET_FILTER_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -m set --match-set NIPv4security_group1 src -j \ RETURN -I %(bn)s-i_port1 5 -m state --state INVALID -j DROP -I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -m state --state INVALID -j DROP -I %(bn)s-i_port1 5 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_1_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_port1 4 -s 10.0.0.4/32 -j RETURN -I %(bn)s-i_port1 5 -m state --state INVALID -j DROP -I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 2 -j %(bn)s-s_port1 -I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 6 -j RETURN -I %(bn)s-o_port1 7 -m state --state INVALID -j DROP -I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback -I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \ -j RETURN -I %(bn)s-s_port1 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_2 IPSET_FILTER_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPSET_FILTER_2_TRUSTED = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-INGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-FORWARD 6 %(physdev_mod)s --physdev-EGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPSET_FILTER_2_3_TRUSTED = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-INGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-FORWARD 6 %(physdev_mod)s --physdev-EGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN -I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_TRUSTED = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-INGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-FORWARD 6 %(physdev_mod)s --physdev-EGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 5 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_3 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_2_3_TRUSTED = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-INGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-FORWARD 6 %(physdev_mod)s --physdev-EGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN -I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN -I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN -I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN -I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN -I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s -I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 6 -j RETURN -I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \ --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s -I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN -I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP -I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 6 -j RETURN -I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback -I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN -I %(bn)s-s_%(port1)s 2 -j DROP -I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN -I %(bn)s-s_%(port2)s 2 -j DROP -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_EMPTY IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-sg-chain 1 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_1 IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-i_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_port1 2 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_port1 3 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_port1 4 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_port1 5 -m state --state INVALID -j DROP -I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback -I %(bn)s-o_port1 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_port1 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_port1 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_port1 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_port1 6 -p udp -m udp --sport 546 --dport 547 -j RETURN -I %(bn)s-o_port1 7 -p udp -m udp --sport 547 --dport 546 -j DROP -I %(bn)s-o_port1 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_port1 9 -m state --state INVALID -j DROP -I %(bn)s-o_port1 10 -j %(bn)s-sg-fallback -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-i_port1 -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \ %(physdev_is_bridged)s -j %(bn)s-o_port1 -I %(bn)s-sg-chain 3 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_2 IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port1)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port1)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port1)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port1)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN -I %(bn)s-o_%(port1)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP -I %(bn)s-o_%(port1)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 10 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port2)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port2)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port2)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port2)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN -I %(bn)s-o_%(port2)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP -I %(bn)s-o_%(port2)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 10 -j %(bn)s-sg-fallback -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_FILTER_V6_2_TRUSTED = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-sg-chain -I %(bn)s-FORWARD 5 %(physdev_mod)s --physdev-INGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-FORWARD 6 %(physdev_mod)s --physdev-EGRESS tap_%(port3)s \ %(physdev_is_bridged)s -j ACCEPT -I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port1)s 4 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback -I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN -I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN -I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN -I %(bn)s-i_%(port2)s 4 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP -I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port1)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port1)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port1)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port1)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port1)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN -I %(bn)s-o_%(port1)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP -I %(bn)s-o_%(port1)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port1)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port1)s 10 -j %(bn)s-sg-fallback -I %(bn)s-o_%(port2)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 131 -j RETURN -I %(bn)s-o_%(port2)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 135 -j RETURN -I %(bn)s-o_%(port2)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \ --icmpv6-type 143 -j RETURN -I %(bn)s-o_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP -I %(bn)s-o_%(port2)s 5 -p ipv6-icmp -j RETURN -I %(bn)s-o_%(port2)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN -I %(bn)s-o_%(port2)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP -I %(bn)s-o_%(port2)s 8 -m state --state RELATED,ESTABLISHED -j RETURN -I %(bn)s-o_%(port2)s 9 -m state --state INVALID -j DROP -I %(bn)s-o_%(port2)s 10 -j %(bn)s-sg-fallback -I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port1)s -I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port1)s -I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-i_%(port2)s -I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ %(physdev_is_bridged)s -j %(bn)s-o_%(port2)s -I %(bn)s-sg-chain 5 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG IPTABLES_ARG['chains'] = CHAINS_EMPTY IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager *filter :FORWARD - [0:0] :INPUT - [0:0] :OUTPUT - [0:0] :neutron-filter-top - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] :%(bn)s-(%(chains)s) - [0:0] -I FORWARD 1 -j neutron-filter-top -I FORWARD 2 -j %(bn)s-FORWARD -I INPUT 1 -j %(bn)s-INPUT -I OUTPUT 1 -j neutron-filter-top -I OUTPUT 2 -j %(bn)s-OUTPUT -I neutron-filter-top 1 -j %(bn)s-local -I %(bn)s-sg-chain 1 -j ACCEPT -I %(bn)s-sg-fallback 1 -j DROP COMMIT # Completed by iptables_manager """ % IPTABLES_ARG class TestSecurityGroupAgentWithIptables(base.BaseTestCase): FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER PHYSDEV_INGRESS = 'physdev-out' PHYSDEV_EGRESS = 'physdev-in' def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True): clear_mgrs = lambda: ip_conntrack.CONTRACK_MGRS.clear() self.addCleanup(clear_mgrs) clear_mgrs() # clear before start in case other tests didn't clean up super(TestSecurityGroupAgentWithIptables, self).setUp() set_firewall_driver(self.FIREWALL_DRIVER) cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP') cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT') self.utils_exec = mock.patch( 'neutron.agent.linux.utils.execute').start() self.rpc = mock.Mock() self._init_agent(defer_refresh_firewall) if test_rpc_v1_1: self.rpc.security_group_info_for_devices.side_effect = ( oslo_messaging.UnsupportedVersion('1.2')) self.iptables = self.agent.firewall.iptables self.ipconntrack = self.agent.firewall.ipconntrack # TODO(jlibosva) Get rid of mocking iptables execute and mock out # firewall instead self.iptables.use_ipv6 = True self.iptables_execute = mock.patch.object(self.iptables, "execute").start() self.iptables_execute_return_values = [] self.expected_call_count = 0 self.expected_calls = [] self.expected_process_inputs = [] self.iptables_execute.side_effect = self.iptables_execute_return_values rule1 = [{'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'source_ip_prefix': '10.0.0.2/32', 'source_port_range_min': 67, 'source_port_range_max': 67, 'port_range_min': 68, 'port_range_max': 68}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_min': 22, 'port_range_max': 22}, {'direction': 'egress', 'ethertype': const.IPv4}] rule2 = rule1[:] rule2 += [{'direction': 'ingress', 'source_ip_prefix': '10.0.0.4/32', 'ethertype': const.IPv4}] rule3 = rule2[:] rule3 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] rule4 = rule1[:] rule4 += [{'direction': 'ingress', 'source_ip_prefix': '10.0.0.3/32', 'ethertype': const.IPv4}] rule5 = rule4[:] rule5 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] self.devices1 = {'tap_port1': self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule1)} self.devices2 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule2)), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', rule4)) ]) self.devices3 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', rule3)), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', rule5)) ]) self.agent.firewall.security_group_updated = mock.Mock() @staticmethod def _enforce_order_in_firewall(firewall): # for the sake of the test, eliminate any order randomness: # it helps to match iptables output against regexps consistently for attr in ('filtered_ports', 'unfiltered_ports'): setattr(firewall, attr, collections.OrderedDict()) def _init_agent(self, defer_refresh_firewall): self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=self.rpc, defer_refresh_firewall=defer_refresh_firewall) self._enforce_order_in_firewall(self.agent.firewall) # don't mess with sysctl knobs in unit tests self.agent.firewall._enabled_netfilter_for_bridges = True def _device(self, device, ip, mac_address, rule): return {'device': device, 'network_id': 'fakenet%s' % device[-1:], 'fixed_ips': [ip], 'mac_address': mac_address, 'security_groups': ['security_group1'], 'security_group_rules': rule, 'security_group_source_groups': [ 'security_group1']} def _regex(self, value): value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS) value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS) value = value.replace('\n', '\\n') value = value.replace('[', r'\[') value = value.replace(']', r'\]') value = value.replace('*', r'\*') return value def _register_mock_call(self, *args, **kwargs): return_value = kwargs.pop('return_value', None) self.iptables_execute_return_values.append(return_value) has_process_input = 'process_input' in kwargs process_input = kwargs.get('process_input') self.expected_process_inputs.append((has_process_input, process_input)) if has_process_input: kwargs['process_input'] = mock.ANY self.expected_calls.append(mock.call(*args, **kwargs)) self.expected_call_count += 1 def _verify_mock_calls(self, exp_fw_sg_updated_call=False): self.assertEqual(self.expected_call_count, self.iptables_execute.call_count) self.iptables_execute.assert_has_calls(self.expected_calls) for i, expected in enumerate(self.expected_process_inputs): check, expected_regex = expected if not check: continue # The second or later arguments of self.iptables.execute # are keyword parameter, so keyword argument is extracted by [1] kwargs = self.iptables_execute.call_args_list[i][1] self.assertThat(kwargs['process_input'], matchers.MatchesRegex(expected_regex)) self.assertEqual(exp_fw_sg_updated_call, self.agent.firewall.security_group_updated.called) def _replay_iptables(self, v4_filter, v6_filter, raw): self._register_mock_call( ['iptables-save'], run_as_root=True, return_value='') self._register_mock_call( ['iptables-restore', '-n'], process_input=self._regex(v4_filter + raw), run_as_root=True, log_fail_as_error=False, return_value='') self._register_mock_call( ['ip6tables-save'], run_as_root=True, return_value='') self._register_mock_call( ['ip6tables-restore', '-n'], process_input=self._regex(v6_filter + raw), run_as_root=True, log_fail_as_error=False, return_value='') def test_prepare_remove_port(self): self.ipconntrack._device_zone_map = {} self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.rpc.security_group_rules_for_devices.return_value = self.devices2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.rpc.security_group_rules_for_devices.return_value = self.devices1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_rule_updated(self): self.rpc.security_group_rules_for_devices.return_value = self.devices2 self._replay_iptables( IPTABLES_FILTER_2_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables( IPTABLES_FILTER_2_3_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.rpc.security_group_rules_for_devices.return_value = self.devices3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls() class TestSecurityGroupAgentEnhancedRpcWithIptables( TestSecurityGroupAgentWithIptables): def setUp(self, defer_refresh_firewall=False): super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp( defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False) self.sg_info = self.rpc.security_group_info_for_devices rule1 = [{'direction': 'ingress', 'protocol': const.PROTO_NAME_UDP, 'ethertype': const.IPv4, 'source_ip_prefix': '10.0.0.2/32', 'source_port_range_min': 67, 'source_port_range_max': 67, 'port_range_min': 68, 'port_range_max': 68}, {'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'ethertype': const.IPv4, 'port_range_min': 22, 'port_range_max': 22}, {'direction': 'egress', 'ethertype': const.IPv4}, {'direction': 'ingress', 'remote_group_id': 'security_group1', 'ethertype': const.IPv4}] rule2 = rule1[:] rule2 += [{'direction': 'ingress', 'protocol': const.PROTO_NAME_ICMP, 'ethertype': const.IPv4}] devices_info1 = {'tap_port1': self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', [])} self.devices_info1 = {'security_groups': {'security_group1': rule1}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32'], 'IPv6': []}}, 'devices': devices_info1} devices_info2 = collections.OrderedDict([ ('tap_port1', self._device('tap_port1', '10.0.0.3/32', '12:34:56:78:9a:bc', [])), ('tap_port2', self._device('tap_port2', '10.0.0.4/32', '12:34:56:78:9a:bd', [])) ]) self.devices_info2 = {'security_groups': {'security_group1': rule1}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'], 'IPv6': []}}, 'devices': devices_info2} self.devices_info3 = {'security_groups': {'security_group1': rule2}, 'sg_member_ips': { 'security_group1': { 'IPv4': ['10.0.0.3/32', '10.0.0.4/32'], 'IPv6': []}}, 'devices': devices_info2} def test_prepare_remove_port(self): self.ipconntrack._device_zone_map = {} self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.sg_info.return_value = self.devices_info2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.sg_info.return_value = self.devices_info1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls(True) self.assertEqual( 2, self.agent.firewall.security_group_updated.call_count) def test_security_group_rule_updated(self): self.sg_info.return_value = self.devices_info2 self._replay_iptables( IPTABLES_FILTER_2_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables( IPTABLES_FILTER_2_3_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.sg_info.return_value = self.devices_info3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls(True) self.agent.firewall.security_group_updated.assert_called_with( 'sg_rule', set(['security_group1'])) class TestSecurityGroupAgentEnhancedIpsetWithIptables( TestSecurityGroupAgentEnhancedRpcWithIptables): def setUp(self, defer_refresh_firewall=False): super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp( defer_refresh_firewall) self.agent.firewall.enable_ipset = True self.ipset = self.agent.firewall.ipset self.ipset_execute = mock.patch.object(self.ipset, "execute").start() def test_prepare_remove_port(self): self.ipconntrack._device_zone_map = {} self.sg_info.return_value = self.devices_info1 self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.sg_info.return_value = self.devices_info1 self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3']) self.ipset._get_deleted_set_ips = mock.Mock(return_value=[]) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_BRIDGE_NET_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.sg_info.return_value = self.devices_info2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.sg_info.return_value = self.devices_info1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls(True) self.assertEqual( 2, self.agent.firewall.security_group_updated.call_count) def test_security_group_rule_updated(self): self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3']) self.ipset._get_deleted_set_ips = mock.Mock(return_value=[]) self.sg_info.return_value = self.devices_info2 self._replay_iptables( IPSET_FILTER_2_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self._replay_iptables( IPSET_FILTER_2_3_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_BRIDGE_NET_2) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.sg_info.return_value = self.devices_info3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls(True) self.agent.firewall.security_group_updated.assert_called_with( 'sg_rule', set(['security_group1'])) class SGNotificationTestMixin(object): def test_security_group_rule_updated(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: with self.security_group(name, description): security_group_id = sg['security_group']['id'] rule = self._build_security_group_rule( security_group_id, direction='ingress', proto=const.PROTO_NAME_TCP) security_group_rule = self._make_security_group_rule(self.fmt, rule) self._delete('security-group-rules', security_group_rule['security_group_rule']['id']) self.notifier.assert_has_calls( [mock.call.security_groups_rule_updated(mock.ANY, [security_group_id]), mock.call.security_groups_rule_updated(mock.ANY, [security_group_id])]) def test_security_group_member_updated(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: security_group_id = sg['security_group']['id'] res = self._create_port(self.fmt, n['network']['id']) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], ext_sg.SECURITYGROUPS: [security_group_id]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0], security_group_id) self._delete('ports', port['port']['id']) self.notifier.assert_has_calls( [mock.call.security_groups_member_updated( mock.ANY, [mock.ANY])]) class TestSecurityGroupAgentWithOVSIptables( TestSecurityGroupAgentWithIptables): FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True): super(TestSecurityGroupAgentWithOVSIptables, self).setUp( defer_refresh_firewall, test_rpc_v1_1) def _init_agent(self, defer_refresh_firewall): self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=self.rpc, defer_refresh_firewall=defer_refresh_firewall) self._enforce_order_in_firewall(self.agent.firewall) # don't mess with sysctl knobs in unit tests self.agent.firewall._enabled_netfilter_for_bridges = True def test_prepare_remove_port(self): self.ipconntrack._device_zone_map = {} self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_prepare_remove_port_no_ct_zone(self): self.ipconntrack.get_device_zone = mock.Mock() self.ipconntrack.get_device_zone.side_effect = [{}, {}] self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEFAULT) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_member_updated(self): self.ipconntrack._device_zone_map = {} self.rpc.security_group_rules_for_devices.return_value = self.devices1 self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2, IPTABLES_RAW_DEVICE_2) self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1, IPTABLES_RAW_DEVICE_1) self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY, IPTABLES_RAW_DEFAULT) self.agent.prepare_devices_filter(['tap_port1']) self.rpc.security_group_rules_for_devices.return_value = self.devices2 self.agent.security_groups_member_updated(['security_group1']) self.agent.prepare_devices_filter(['tap_port2']) self.rpc.security_group_rules_for_devices.return_value = self.devices1 self.agent.security_groups_member_updated(['security_group1']) self.agent.remove_devices_filter(['tap_port2']) self.agent.remove_devices_filter(['tap_port1']) self._verify_mock_calls() def test_security_group_rule_updated(self): self.ipconntrack._device_zone_map = {} self.rpc.security_group_rules_for_devices.return_value = self.devices2 self._replay_iptables( IPTABLES_FILTER_2_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_DEVICE_2) self._replay_iptables( IPTABLES_FILTER_2_3_TRUSTED, IPTABLES_FILTER_V6_2_TRUSTED, IPTABLES_RAW_DEVICE_2) self.agent.prepare_devices_filter(['tap_port1', 'tap_port3']) self.rpc.security_group_rules_for_devices.return_value = self.devices3 self.agent.security_groups_rule_updated(['security_group1']) self._verify_mock_calls() def _regex(self, value): # Note(nati): tap is prefixed on the device # in the OVSHybridIptablesFirewallDriver value = value.replace('tap_port', 'taptap_port') value = value.replace('qvbtaptap_port', 'qvbtap_port') value = value.replace('o_port', 'otap_port') value = value.replace('i_port', 'itap_port') value = value.replace('s_port', 'stap_port') return super( TestSecurityGroupAgentWithOVSIptables, self)._regex(value) class TestSecurityGroupExtensionControl(base.BaseTestCase): def test_disable_security_group_extension_by_config(self): set_enable_security_groups(False) exp_aliases = ['dummy1', 'dummy2'] ext_aliases = ['dummy1', 'security-group', 'dummy2'] sg_rpc.disable_security_group_extension_by_config(ext_aliases) self.assertEqual(ext_aliases, exp_aliases) def test_enable_security_group_extension_by_config(self): set_enable_security_groups(True) exp_aliases = ['dummy1', 'security-group', 'dummy2'] ext_aliases = ['dummy1', 'security-group', 'dummy2'] sg_rpc.disable_security_group_extension_by_config(ext_aliases) self.assertEqual(ext_aliases, exp_aliases) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/windows/0000755000175000017500000000000000000000000023546 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/windows/__init__.py0000644000175000017500000000000000000000000025645 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/windows/test_ip_lib.py0000644000175000017500000001134700000000000026423 0ustar00coreycorey00000000000000# Copyright 2016 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netifaces from neutron.agent.windows import ip_lib from neutron.tests import base class TestIpWrapper(base.BaseTestCase): def test_get_device_by_ip_no_ip(self): ret = ip_lib.IPWrapper().get_device_by_ip(None) self.assertIsNone(ret) @mock.patch.object(ip_lib.IPWrapper, 'get_devices') def test_get_device_by_ip(self, mock_get_devices): mock_dev1 = mock.MagicMock() mock_dev2 = mock.MagicMock() mock_dev1.device_has_ip.return_value = False mock_dev2.device_has_ip.return_value = True mock_get_devices.return_value = [mock_dev1, mock_dev2] ret = ip_lib.IPWrapper().get_device_by_ip('fake_ip') self.assertEqual(mock_dev2, ret) @mock.patch('netifaces.interfaces') def test_get_devices(self, mock_interfaces): mock_interfaces.return_value = [mock.sentinel.dev1, mock.sentinel.dev2] ret = ip_lib.IPWrapper().get_devices() self.assertEqual(mock.sentinel.dev1, ret[0].name) self.assertEqual(mock.sentinel.dev2, ret[1].name) @mock.patch('netifaces.interfaces') def test_get_devices_error(self, mock_interfaces): mock_interfaces.side_effect = OSError ret = ip_lib.IPWrapper().get_devices() self.assertEqual([], ret) class TestIpDevice(base.BaseTestCase): @mock.patch('netifaces.ifaddresses') def test_read_ifaddresses(self, mock_netifaces): mock_address = {'addr': mock.sentinel.fake_addr} mock_netifaces.return_value = {netifaces.AF_INET: [mock_address]} ret = ip_lib.IPDevice("fake_dev").read_ifaddresses() self.assertTrue(ret) @mock.patch('netifaces.ifaddresses') def test_read_ifaddresses_no_ip(self, mock_netifaces): mock_netifaces.return_value = {} ret = ip_lib.IPDevice("fake_dev").read_ifaddresses() self.assertFalse(ret) @mock.patch('netifaces.ifaddresses') def test_read_ifaddresses_ip_error(self, mock_netifaces): mock_netifaces.side_effect = OSError ret = ip_lib.IPDevice("fake_dev").read_ifaddresses() self.assertFalse(ret) @mock.patch('netifaces.ifaddresses') def test_read_faddresses_not_found(self, mock_netifaces): mock_netifaces.side_effect = ValueError ret = ip_lib.IPDevice("fake_dev").read_ifaddresses() self.assertFalse(ret) def test_device_has_ip(self): mock_address = {'addr': mock.sentinel.fake_addr} ip_device = ip_lib.IPDevice("fake_dev") with mock.patch.object(ip_device, "read_ifaddresses", return_value=( {netifaces.AF_INET: [mock_address]})): ret = ip_device.device_has_ip(mock.sentinel.fake_addr) self.assertTrue(ret) def test_device_has_ip_false(self): ip_device = ip_lib.IPDevice("fake_dev") with mock.patch.object(ip_device, "read_ifaddresses", return_value={}): ret = ip_device.device_has_ip(mock.sentinel.fake_addr) self.assertFalse(ret) def test_device_has_ip_error(self): ip_device = ip_lib.IPDevice("fake_dev") with mock.patch.object(ip_device, "read_ifaddresses", return_value=None): ret = ip_device.device_has_ip(mock.sentinel.fake_addr) self.assertFalse(ret) class TestIPLink(base.BaseTestCase): def setUp(self): super(TestIPLink, self).setUp() parent = ip_lib.IPDevice("fake_dev") self.ip_link = ip_lib.IPLink(parent) self.ip_link._parent.read_ifaddresses = mock.Mock() def test_address(self): mock_address = {'addr': mock.sentinel.fake_addr} self.ip_link._parent.read_ifaddresses.return_value = { netifaces.AF_LINK: [mock_address]} self.assertEqual([mock_address['addr']], self.ip_link.address) def test_address_no_address(self): self.ip_link._parent.read_ifaddresses.return_value = { netifaces.AF_LINK: []} self.assertEqual([], self.ip_link.address) def test_address_error(self): self.ip_link._parent.read_ifaddresses.return_value = None self.assertFalse(self.ip_link.address) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/agent/windows/test_utils.py0000644000175000017500000001751200000000000026325 0ustar00coreycorey00000000000000# Copyright 2018 Cloudbase Solutions. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import ddt import eventlet from eventlet import tpool import mock from neutron_lib import exceptions import six from neutron.agent.windows import utils from neutron.tests import base @ddt.ddt class WindowsUtilsTestCase(base.BaseTestCase): @mock.patch('os.environ', {mock.sentinel.key0: mock.sentinel.val0}) @mock.patch.object(utils.subprocess, 'Popen') @mock.patch.object(tpool, 'Proxy') @mock.patch.object(eventlet, 'getcurrent') def test_create_process(self, mock_get_current_gt, mock_tpool_proxy, mock_popen): cmd = ['fake_cmd'] popen_obj, ret_cmd = utils.create_process( cmd, run_as_root=mock.sentinel.run_as_root, addl_env={mock.sentinel.key1: mock.sentinel.val1}, tpool_proxy=True) exp_env = {mock.sentinel.key0: mock.sentinel.val0, mock.sentinel.key1: mock.sentinel.val1} mock_popen.assert_called_once_with( cmd, shell=False, stdin=utils.subprocess.PIPE, stdout=utils.subprocess.PIPE, stderr=utils.subprocess.PIPE, env=exp_env, preexec_fn=None, close_fds=False) file_type = getattr(six.moves.builtins, 'file', io.IOBase) mock_tpool_proxy.assert_called_once_with( mock_popen.return_value, autowrap=(file_type, )) self.assertEqual(mock_tpool_proxy.return_value, popen_obj) self.assertEqual(ret_cmd, cmd) @ddt.data({}, {'pid': None}, {'process_exists': True}) @ddt.unpack @mock.patch.object(utils, 'wmi', create=True) def test_get_wmi_process(self, mock_wmi, pid=mock.sentinel.pid, process_exists=False): mock_conn = mock_wmi.WMI.return_value if not pid: exp_process = None elif process_exists: exp_process = mock.sentinel.wmi_obj mock_conn.Win32_Process.return_value = [exp_process] else: exp_process = None mock_conn.Win32_Process.return_value = [] wmi_obj = utils._get_wmi_process(pid) self.assertEqual(exp_process, wmi_obj) if pid: mock_conn.Win32_Process.assert_called_once_with(ProcessId=pid) @ddt.data(True, False) @mock.patch.object(utils, '_get_wmi_process') def test_kill_process(self, process_exists, mock_get_process): if not process_exists: mock_get_process.return_value = None utils.kill_process(mock.sentinel.pid, mock.sentinel.signal, run_as_root=False) mock_get_process.assert_called_once_with(mock.sentinel.pid) if process_exists: mock_get_process.return_value.Terminate.assert_called_once_with() @ddt.data(True, False) @mock.patch.object(utils, '_get_wmi_process') def test_kill_process_exception(self, process_still_running, mock_get_process): mock_process = mock.Mock() mock_process.Terminate.side_effect = OSError mock_get_process.side_effect = [ mock_process, mock_process if process_still_running else None] if process_still_running: self.assertRaises(OSError, utils.kill_process, mock.sentinel.pid, mock.sentinel.signal) else: utils.kill_process(mock.sentinel.pid, mock.sentinel.signal) @ddt.data({'return_stder': True}, {'returncode': 1, 'check_exit_code': False, 'log_fail_as_error': True}, {'returncode': 1, 'log_fail_as_error': True, 'extra_ok_codes': [1]}, {'returncode': 1, 'log_fail_as_error': True, 'exp_fail': True}) @ddt.unpack @mock.patch.object(utils, 'create_process') @mock.patch.object(utils, 'avoid_blocking_call') def test_execute(self, mock_avoid_blocking_call, mock_create_process, returncode=0, check_exit_code=True, return_stder=True, log_fail_as_error=True, extra_ok_codes=None, exp_fail=False): fake_stdin = 'fake_stdin' fake_stdout = 'fake_stdout' fake_stderr = 'fake_stderr' mock_popen = mock.Mock() mock_popen.communicate.return_value = fake_stdout, fake_stderr mock_popen.returncode = returncode mock_create_process.return_value = mock_popen, mock.sentinel.cmd mock_avoid_blocking_call.side_effect = ( lambda func, *args, **kwargs: func(*args, **kwargs)) args = (mock.sentinel.cmd, fake_stdin, mock.sentinel.env, check_exit_code, return_stder, log_fail_as_error, extra_ok_codes) if exp_fail: self.assertRaises(exceptions.ProcessExecutionError, utils.execute, *args) else: ret_val = utils.execute(*args) if return_stder: exp_ret_val = (fake_stdout, fake_stderr) else: exp_ret_val = fake_stdout self.assertEqual(exp_ret_val, ret_val) mock_create_process.assert_called_once_with( mock.sentinel.cmd, addl_env=mock.sentinel.env, tpool_proxy=False) mock_avoid_blocking_call.assert_called_once_with( mock_popen.communicate, six.b(fake_stdin)) mock_popen.communicate.assert_called_once_with(six.b(fake_stdin)) mock_popen.stdin.close.assert_called_once_with() def test_get_root_helper_child_pid(self): pid = utils.get_root_helper_child_pid( mock.sentinel.pid, mock.sentinel.exp_cmd, run_as_root=False) self.assertEqual(str(mock.sentinel.pid), pid) @ddt.data(True, False) @mock.patch.object(utils, '_get_wmi_process') def test_process_is_running(self, process_running, mock_get_process): mock_get_process.return_value = ( mock.sentinel.wmi_obj if process_running else None) self.assertEqual(process_running, utils.process_is_running(mock.sentinel.pid)) mock_get_process.assert_called_once_with(mock.sentinel.pid) @ddt.data({}, {'process_running': False}, {'command_matches': False}) @ddt.unpack @mock.patch.object(utils, '_get_wmi_process') def test_pid_invoked_with_cmdline(self, mock_get_process, process_running=True, command_matches=False): exp_cmd = 'exp_cmd' mock_process = mock.Mock() mock_get_process.return_value = ( mock_process if process_running else None) mock_process.CommandLine = ( exp_cmd if command_matches else 'unexpected_cmd') exp_result = process_running and command_matches result = utils.pid_invoked_with_cmdline(mock.sentinel.pid, [exp_cmd]) self.assertEqual(exp_result, result) mock_get_process.assert_called_once_with(mock.sentinel.pid) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/0000755000175000017500000000000000000000000021527 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/__init__.py0000644000175000017500000000000000000000000023626 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/0000755000175000017500000000000000000000000022313 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/__init__.py0000644000175000017500000000000000000000000024412 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/agentnotifiers/0000755000175000017500000000000000000000000025334 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/agentnotifiers/__init__.py0000644000175000017500000000000000000000000027433 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py0000644000175000017500000003457200000000000032551 0ustar00coreycorey00000000000000# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import mock from neutron_lib.api import extensions from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.plugins import directory from oslo_utils import timeutils from oslo_utils import uuidutils from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.db.agentschedulers_db import cfg from neutron.objects import agent as agent_obj from neutron.tests import base class TestDhcpAgentNotifyAPI(base.BaseTestCase): def setUp(self): super(TestDhcpAgentNotifyAPI, self).setUp() self.notifier = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock())) mock_util_p = mock.patch.object(extensions, 'is_extension_supported') mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG') mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message') mock_cast_p = mock.patch.object(self.notifier, '_cast_message') self.mock_util = mock_util_p.start() self.mock_log = mock_log_p.start() self.mock_fanout = mock_fanout_p.start() self.mock_cast = mock_cast_p.start() def _test__schedule_network(self, network, new_agents=None, existing_agents=None, expected_casts=0, expected_warnings=0): self.notifier.plugin.schedule_network.return_value = new_agents agents = self.notifier._schedule_network( mock.ANY, network, existing_agents) if new_agents is None: new_agents = [] self.assertEqual(new_agents + existing_agents, agents) self.assertEqual(expected_casts, self.mock_cast.call_count) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) def test__schedule_network(self): agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid(), host='host') agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=[agent], existing_agents=[], expected_casts=1, expected_warnings=0) def test__schedule_network_no_existing_agents(self): agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[agent], expected_casts=0, expected_warnings=0) def test__schedule_network_no_new_agents(self): network = {'id': 'foo_net_id'} self._test__schedule_network(network, new_agents=None, existing_agents=[], expected_casts=0, expected_warnings=1) def _test__get_enabled_agents(self, network, agents=None, port_count=0, expected_warnings=0, expected_errors=0): self.notifier.plugin.get_ports_count.return_value = port_count enabled_agents = self.notifier._get_enabled_agents( mock.ANY, network, agents, mock.ANY, mock.ANY) if not cfg.CONF.enable_services_on_agents_with_admin_state_down: agents = [x for x in agents if x.admin_state_up] self.assertEqual(agents, enabled_agents) self.assertEqual(expected_warnings, self.mock_log.warning.call_count) self.assertEqual(expected_errors, self.mock_log.error.call_count) def test__get_enabled_agents(self): agent1 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1]) def test__get_enabled_agents_with_inactive_ones(self): agent1 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent2.admin_state_up = True # This is effectively an inactive agent agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0) network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2], expected_warnings=1, expected_errors=0) def test__get_enabled_agents_with_notification_required(self): network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']} agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent.admin_state_up = False agent.heartbeat_timestamp = timeutils.utcnow() self._test__get_enabled_agents(network, [agent], port_count=20, expected_warnings=0, expected_errors=1) def test__get_enabled_agents_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) agent1 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent1.admin_state_up = True agent1.heartbeat_timestamp = timeutils.utcnow() agent2 = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent2.admin_state_up = False agent2.heartbeat_timestamp = timeutils.utcnow() network = {'id': 'foo_network_id'} self._test__get_enabled_agents(network, agents=[agent1, agent2]) def test__notify_agents_allocate_priority(self): mock_context = mock.MagicMock() mock_context.is_admin = True methods = ['network_create_end', 'network_update_end', 'network_delete_end', 'subnet_create_end', 'subnet_update_end', 'subnet_delete_end', 'port_create_end', 'port_update_end', 'port_delete_end'] with mock.patch.object(self.notifier, '_schedule_network') as f: with mock.patch.object(self.notifier, '_get_enabled_agents') as g: for method in methods: f.return_value = [mock.MagicMock()] g.return_value = [mock.MagicMock()] payload = {} if method.startswith('port'): payload['port'] = \ {'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} expected_payload = copy.deepcopy(payload) expected_payload['priority'] = \ dhcp_rpc_agent_api.METHOD_PRIORITY_MAP.get(method) self.notifier._notify_agents(mock_context, method, payload, 'fake_network_id') if method == 'network_delete_end': self.mock_fanout.assert_called_with(mock.ANY, method, expected_payload) elif method != 'network_create_end': if method == 'port_create_end': expected_payload['priority'] = \ dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH self.mock_cast.assert_called_with(mock.ANY, method, expected_payload, mock.ANY, mock.ANY) def test__notify_agents_fanout_required(self): self.notifier._notify_agents(mock.ANY, 'network_delete_end', {}, 'foo_network_id') self.assertEqual(1, self.mock_fanout.call_count) def _test__notify_agents_with_function(self, function, expected_scheduling=0, expected_casts=0): with mock.patch.object(self.notifier, '_schedule_network') as f: with mock.patch.object(self.notifier, '_get_enabled_agents') as g: agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid(), host='host', topic='topic') agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() g.return_value = [agent] function() self.assertEqual(expected_scheduling, f.call_count) self.assertEqual(expected_casts, self.mock_cast.call_count) def _test__notify_agents(self, method, expected_scheduling=0, expected_casts=0, payload=None): payload = payload or {'port': {}} self._test__notify_agents_with_function( lambda: self.notifier._notify_agents( mock.Mock(), method, payload, 'foo_network_id'), expected_scheduling, expected_casts) def test__notify_agents_cast_required_with_scheduling(self): self._test__notify_agents('port_create_end', expected_scheduling=1, expected_casts=1) def test__notify_agents_cast_required_wo_scheduling_on_port_update(self): self._test__notify_agents('port_update_end', expected_scheduling=0, expected_casts=1) def test__notify_agents_cast_required_with_scheduling_subnet_create(self): self._test__notify_agents('subnet_create_end', expected_scheduling=1, expected_casts=1, payload={'subnet': {}}) def test__notify_agents_cast_required_with_scheduling_segment(self): network_id = 'foo_network_id' segment_id = 'foo_segment_id' subnet = {'subnet': {'segment_id': segment_id}} segment = {'id': segment_id, 'network_id': network_id, 'hosts': ['host-a']} self.notifier.plugin.get_network.return_value = {'id': network_id} segment_sp = mock.Mock() segment_sp.get_segment.return_value = segment directory.add_plugin('segments', segment_sp) self._test__notify_agents('subnet_create_end', expected_scheduling=1, expected_casts=1, payload=subnet) get_agents = self.notifier.plugin.get_dhcp_agents_hosting_networks get_agents.assert_called_once_with( mock.ANY, [network_id], hosts=segment['hosts']) def test__notify_agents_no_action(self): self._test__notify_agents('network_create_end', expected_scheduling=0, expected_casts=0) def test__notify_agents_with_router_interface_add(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_created( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=1, expected_casts=1) def test__notify_agents_with_router_interface_delete(self): self._test__notify_agents_with_function( lambda: self.notifier._after_router_interface_deleted( mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(), port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}), expected_scheduling=0, expected_casts=1) def test__fanout_message(self): self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_fanout.call_count) def test__cast_message(self): self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY, mock.ANY) self.assertEqual(1, self.mock_cast.call_count) def test__native_notification_unsubscribes(self): self.assertFalse(self.notifier._unsubscribed_resources) for res in (resources.PORT, resources.NETWORK, resources.SUBNET): self.notifier._unsubscribed_resources = [] kwargs = {res: {}} registry.notify(res, events.AFTER_CREATE, self, context=mock.Mock(), **kwargs) # don't unsubscribe until all three types are observed self.assertEqual([], self.notifier._unsubscribed_resources) registry.notify(res, events.AFTER_UPDATE, self, context=mock.Mock(), **kwargs) self.assertEqual([], self.notifier._unsubscribed_resources) registry.notify(res, events.AFTER_DELETE, self, context=mock.Mock(), **kwargs) self.assertEqual([res], self.notifier._unsubscribed_resources) # after first time, no further unsubscribing should happen registry.notify(res, events.AFTER_CREATE, self, context=mock.Mock(), **kwargs) self.assertEqual([res], self.notifier._unsubscribed_resources) def test__only_status_changed(self): p1 = {'id': 1, 'status': 'DOWN', 'updated_at': '10:00:00', 'revision_number': 1} p2 = dict(p1) p2['status'] = 'ACTIVE' p2['revision_number'] = 2 p2['updated_at'] = '10:00:01' self.assertTrue(self.notifier._only_status_changed(p1, p2)) p2['name'] = 'test' self.assertFalse(self.notifier._only_status_changed(p1, p2)) p1['name'] = 'test' self.assertTrue(self.notifier._only_status_changed(p1, p2)) p1['name'] = 'test1' self.assertFalse(self.notifier._only_status_changed(p1, p2)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py0000644000175000017500000000336200000000000032142 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import rpc from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.tests import base class TestL3AgentNotifyAPI(base.BaseTestCase): def setUp(self): super(TestL3AgentNotifyAPI, self).setUp() self.rpc_client_mock = mock.patch.object( rpc, 'get_client').start().return_value self.l3_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() def _test_arp_update(self, method): arp_table = {'ip_address': '1.1.1.1', 'mac_address': '22:f1:6c:9c:79:4a', 'subnet_id': 'subnet_id'} router_id = 'router_id' getattr(self.l3_notifier, method)(mock.Mock(), router_id, arp_table) self.rpc_client_mock.prepare.assert_called_once_with( fanout=True, version='1.2') cctxt = self.rpc_client_mock.prepare.return_value cctxt.cast.assert_called_once_with( mock.ANY, method, payload={'router_id': router_id, 'arp_table': arp_table}) def test_add_arp_entry(self): self._test_arp_update('add_arp_entry') def test_del_arp_entry(self): self._test_arp_update('del_arp_entry') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/0000755000175000017500000000000000000000000024232 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/__init__.py0000644000175000017500000000000000000000000026331 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/consumer/0000755000175000017500000000000000000000000026065 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py0000644000175000017500000000000000000000000030164 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py0000644000175000017500000000424500000000000031353 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks.consumer import registry from neutron.tests import base class ConsumerRegistryTestCase(base.BaseTestCase): def test__get_manager_is_singleton(self): self.assertIs(registry._get_manager(), registry._get_manager()) @mock.patch.object(registry, '_get_manager') def test_register(self, manager_mock): callback = lambda: None registry.register(callback, 'TYPE') manager_mock().register.assert_called_with(callback, 'TYPE') @mock.patch.object(registry, '_get_manager') def test_unsubscribe(self, manager_mock): callback = lambda: None registry.unsubscribe(callback, 'TYPE') manager_mock().unregister.assert_called_with(callback, 'TYPE') @mock.patch.object(registry, '_get_manager') def test_clear(self, manager_mock): registry.clear() manager_mock().clear.assert_called_with() @mock.patch.object(registry, '_get_manager') def test_push(self, manager_mock): resource_type_ = object() resource_ = object() event_type_ = object() context = mock.Mock() callback1 = mock.Mock() callback2 = mock.Mock() registry.register(callback1, 'x') registry.register(callback2, 'x') callbacks = {callback1, callback2} manager_mock().get_callbacks.return_value = callbacks registry.push(context, resource_type_, [resource_], event_type_) for callback in (callback1, callback2): callback.assert_called_with(context, resource_type_, [resource_], event_type_) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/producer/0000755000175000017500000000000000000000000026055 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/producer/__init__.py0000644000175000017500000000000000000000000030154 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py0000644000175000017500000000536000000000000031342 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks.producer import registry from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.tests.unit.services.qos import base class ProducerRegistryTestCase(base.BaseQosTestCase): def test_pull_returns_callback_result(self): policy_obj = policy.QosPolicy(context=None) def _fake_policy_cb(*args, **kwargs): return policy_obj registry.provide(_fake_policy_cb, resources.QOS_POLICY) self.assertEqual( policy_obj, registry.pull(resources.QOS_POLICY, 'fake_id')) def test_pull_does_not_raise_on_none(self): def _none_cb(*args, **kwargs): pass registry.provide(_none_cb, resources.QOS_POLICY) obj = registry.pull(resources.QOS_POLICY, 'fake_id') self.assertIsNone(obj) def test_pull_raises_on_wrong_object_type(self): def _wrong_type_cb(*args, **kwargs): return object() registry.provide(_wrong_type_cb, resources.QOS_POLICY) self.assertRaises( exceptions.CallbackWrongResourceType, registry.pull, resources.QOS_POLICY, 'fake_id') def test_pull_raises_on_callback_not_found(self): self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') def test__get_manager_is_singleton(self): self.assertIs(registry._get_manager(), registry._get_manager()) def test_unprovide(self): def _fake_policy_cb(*args, **kwargs): pass registry.provide(_fake_policy_cb, resources.QOS_POLICY) registry.unprovide(_fake_policy_cb, resources.QOS_POLICY) self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') def test_clear_unprovides_all_producers(self): def _fake_policy_cb(*args, **kwargs): pass registry.provide(_fake_policy_cb, resources.QOS_POLICY) registry.clear() self.assertRaises( exceptions.CallbackNotFound, registry.pull, resources.QOS_POLICY, 'fake_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py0000644000175000017500000001266300000000000031174 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import exceptions from neutron.api.rpc.callbacks import exceptions as rpc_exc from neutron.api.rpc.callbacks import resource_manager from neutron.tests.unit.services.qos import base IS_VALID_RESOURCE_TYPE = ( 'neutron.api.rpc.callbacks.resources.is_valid_resource_type') class ResourceCallbacksManagerTestCaseMixin(object): def test_register_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.register, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_clear_unregisters_all_callbacks(self, *mocks): self.mgr.register(lambda: None, 'TYPE1') self.mgr.register(lambda: None, 'TYPE2') self.mgr.clear() self.assertEqual([], self.mgr.get_subscribed_types()) def test_unregister_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.unregister, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_unregister_fails_on_unregistered_callback(self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.unregister, lambda: None, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_unregister_unregisters_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.mgr.unregister(callback, 'TYPE') self.assertEqual([], self.mgr.get_subscribed_types()) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test___init___does_not_reset_callbacks(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') resource_manager.ProducerResourceCallbacksManager() self.assertEqual(['TYPE'], self.mgr.get_subscribed_types()) class ProducerResourceCallbacksManagerTestCase( base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): def setUp(self): super(ProducerResourceCallbacksManagerTestCase, self).setUp() self.mgr = self.prod_mgr @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_registers_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.assertEqual(callback, self.mgr.get_callback('TYPE')) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_fails_on_multiple_calls(self, *mocks): self.mgr.register(lambda: None, 'TYPE') self.assertRaises( rpc_exc.CallbacksMaxLimitReached, self.mgr.register, lambda: None, 'TYPE') def test_get_callback_fails_on_invalid_type(self): self.assertRaises( exceptions.Invalid, self.mgr.get_callback, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callback_fails_on_unregistered_callback( self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.get_callback, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callback_returns_proper_callback(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE1') self.mgr.register(callback2, 'TYPE2') self.assertEqual(callback1, self.mgr.get_callback('TYPE1')) self.assertEqual(callback2, self.mgr.get_callback('TYPE2')) class ConsumerResourceCallbacksManagerTestCase( base.BaseQosTestCase, ResourceCallbacksManagerTestCaseMixin): def setUp(self): super(ConsumerResourceCallbacksManagerTestCase, self).setUp() self.mgr = self.cons_mgr @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_registers_callback(self, *mocks): callback = lambda: None self.mgr.register(callback, 'TYPE') self.assertEqual({callback}, self.mgr.get_callbacks('TYPE')) @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_register_succeeds_on_multiple_calls(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE') self.mgr.register(callback2, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callbacks_fails_on_unregistered_callback(self, *mocks): self.assertRaises( rpc_exc.CallbackNotFound, self.mgr.get_callbacks, 'TYPE') @mock.patch(IS_VALID_RESOURCE_TYPE, return_value=True) def test_get_callbacks_returns_proper_callbacks(self, *mocks): callback1 = lambda: None callback2 = lambda: None self.mgr.register(callback1, 'TYPE1') self.mgr.register(callback2, 'TYPE2') self.assertEqual(set([callback1]), self.mgr.get_callbacks('TYPE1')) self.assertEqual(set([callback2]), self.mgr.get_callbacks('TYPE2')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/test_resources.py0000644000175000017500000000514700000000000027664 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import base as obj_base from neutron.api.rpc.callbacks import resources from neutron.objects.qos import policy from neutron.tests import base class GetResourceTypeTestCase(base.BaseTestCase): def test_get_resource_type_none(self): self.assertIsNone(resources.get_resource_type(None)) def test_get_resource_type_wrong_type(self): self.assertIsNone(resources.get_resource_type(object())) def test_get_resource_type(self): # we could use any other registered NeutronObject type here self.assertEqual(policy.QosPolicy.obj_name(), resources.get_resource_type(policy.QosPolicy())) class IsValidResourceTypeTestCase(base.BaseTestCase): def test_known_type(self): # it could be any other NeutronObject, assuming it's known to RPC # callbacks self.assertTrue(resources.is_valid_resource_type( policy.QosPolicy.obj_name())) def test_unknown_type(self): self.assertFalse( resources.is_valid_resource_type('unknown-resource-type')) class GetResourceClsTestCase(base.BaseTestCase): def test_known_type(self): # it could be any other NeutronObject, assuming it's known to RPC # callbacks self.assertEqual(policy.QosPolicy, resources.get_resource_cls(resources.QOS_POLICY)) def test_unknown_type(self): self.assertIsNone(resources.get_resource_cls('unknown-resource-type')) class RegisterResourceClass(base.BaseTestCase): def test_register_resource_class(self): class DummyOVO(obj_base.VersionedObject): pass self.assertFalse( resources.is_valid_resource_type('DummyOVO')) resources.register_resource_class(DummyOVO) self.assertTrue( resources.is_valid_resource_type('DummyOVO')) def test_register_bogus_resource_class(self): class DummyOVO(object): pass self.assertRaises(ValueError, resources.register_resource_class, DummyOVO) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/callbacks/test_version_manager.py0000644000175000017500000001472500000000000031033 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks import exceptions from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.db import agents_db from neutron.tests import base TEST_RESOURCE_TYPE = 'TestResourceType' TEST_VERSION_A = '1.11' TEST_VERSION_B = '1.12' TEST_RESOURCE_TYPE_2 = 'AnotherResource' AGENT_HOST_1 = 'host-1' AGENT_HOST_2 = 'host-2' AGENT_TYPE_1 = 'dhcp-agent' AGENT_TYPE_2 = 'openvswitch-agent' CONSUMER_1 = version_manager.AgentConsumer(AGENT_TYPE_1, AGENT_HOST_1) CONSUMER_2 = version_manager.AgentConsumer(AGENT_TYPE_2, AGENT_HOST_2) class ResourceConsumerTrackerTest(base.BaseTestCase): def test_consumer_set_versions(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.assertIn(TEST_VERSION_A, cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_updates_version(self): cv = version_manager.ResourceConsumerTracker() for version in [TEST_VERSION_A, TEST_VERSION_B]: cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: version}) self.assertEqual(set([TEST_VERSION_B]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_multiple_consumer_version_update(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) cv.set_versions(CONSUMER_2, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B}) self.assertEqual(set([TEST_VERSION_A, TEST_VERSION_B]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_downgrades_removing_resource(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.assertEqual(set(), cv.get_resource_versions(TEST_RESOURCE_TYPE_2)) self.assertEqual(set([TEST_VERSION_A]), cv.get_resource_versions(TEST_RESOURCE_TYPE)) def test_consumer_downgrades_stops_reporting(self): cv = version_manager.ResourceConsumerTracker() cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) cv.set_versions(CONSUMER_1, {}) for resource_type in [TEST_RESOURCE_TYPE, TEST_RESOURCE_TYPE_2]: self.assertEqual(set(), cv.get_resource_versions(resource_type)) def test_different_adds_triggers_recalculation(self): cv = version_manager.ResourceConsumerTracker() for version in [TEST_VERSION_A, TEST_VERSION_B]: cv.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: version}) self.assertTrue(cv._needs_recalculation) cv._recalculate_versions = mock.Mock() cv.get_resource_versions(TEST_RESOURCE_TYPE) cv._recalculate_versions.assert_called_once_with() class CachedResourceConsumerTrackerTest(base.BaseTestCase): def setUp(self): super(CachedResourceConsumerTrackerTest, self).setUp() self.refreshed = False class _FakePlugin(agents_db.AgentDbMixin): @staticmethod def get_agents_resource_versions(tracker): self.refreshed = True tracker.set_versions(CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_A}) self.get_plugin = mock.patch('neutron_lib.plugins.directory' '.get_plugin').start() self.get_plugin.return_value = _FakePlugin() def test_plugin_does_not_implement_agentsdb_exception(self): self.get_plugin.return_value = object() cached_tracker = version_manager.CachedResourceConsumerTracker() self.assertRaises(exceptions.NoAgentDbMixinImplemented, cached_tracker.get_resource_versions, resources.QOS_POLICY) def test_consumer_versions_callback(self): cached_tracker = version_manager.CachedResourceConsumerTracker() self.assertIn(TEST_VERSION_A, cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE)) def test_update_versions(self): cached_tracker = version_manager.CachedResourceConsumerTracker() initial_versions = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE) initial_versions_2 = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE_2) cached_tracker.update_versions( CONSUMER_1, {TEST_RESOURCE_TYPE: TEST_VERSION_B, TEST_RESOURCE_TYPE_2: TEST_VERSION_A}) final_versions = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE) final_versions_2 = cached_tracker.get_resource_versions( TEST_RESOURCE_TYPE_2) self.assertNotEqual(initial_versions, final_versions) self.assertNotEqual(initial_versions_2, final_versions_2) def test_versions_ttl(self): cached_tracker = version_manager.CachedResourceConsumerTracker() with mock.patch('time.time') as time_patch: time_patch.return_value = 1 cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertTrue(self.refreshed) self.refreshed = False time_patch.return_value = 2 cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertFalse(self.refreshed) time_patch.return_value = 2 + version_manager.VERSIONS_TTL cached_tracker.get_resource_versions(TEST_RESOURCE_TYPE) self.assertTrue(self.refreshed) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4310458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/0000755000175000017500000000000000000000000024113 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/__init__.py0000644000175000017500000000000000000000000026212 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py0000644000175000017500000004274400000000000027321 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_db import exception as db_exc from oslo_messaging.rpc import dispatcher as rpc_dispatcher from neutron.api.rpc.handlers import dhcp_rpc from neutron.common import utils from neutron.db import provisioning_blocks from neutron.tests import base class TestDhcpRpcCallback(base.BaseTestCase): def setUp(self): super(TestDhcpRpcCallback, self).setUp() self.plugin = mock.MagicMock() directory.add_plugin(plugin_constants.CORE, self.plugin) self.callbacks = dhcp_rpc.DhcpRpcCallback() self.log_p = mock.patch('neutron.api.rpc.handlers.dhcp_rpc.LOG') self.log = self.log_p.start() set_dirty_p = mock.patch('neutron.quota.resource_registry.' 'set_resources_dirty') self.mock_set_dirty = set_dirty_p.start() self.utils_p = mock.patch('neutron_lib.plugins.utils.create_port') self.utils = self.utils_p.start() self.agent_hosting_network_p = mock.patch.object(self.callbacks, '_is_dhcp_agent_hosting_network') self.mock_agent_hosting_network = self.agent_hosting_network_p.start() self.mock_agent_hosting_network.return_value = True self.segment_plugin = mock.MagicMock() directory.add_plugin('segments', self.segment_plugin) def test_group_by_network_id(self): port1 = {'network_id': 'a'} port2 = {'network_id': 'b'} port3 = {'network_id': 'a'} grouped_ports = self.callbacks._group_by_network_id( [port1, port2, port3]) expected = {'a': [port1, port3], 'b': [port2]} self.assertEqual(expected, grouped_ports) def test_get_active_networks_info(self): plugin_retval = [{'id': 'a'}, {'id': 'b'}] self.plugin.get_networks.return_value = plugin_retval port = {'network_id': 'a'} subnet = {'network_id': 'b', 'id': 'c'} self.plugin.get_ports.return_value = [port] self.plugin.get_subnets.return_value = [subnet] networks = self.callbacks.get_active_networks_info(mock.Mock(), host='host') expected = [{'id': 'a', 'non_local_subnets': [], 'subnets': [], 'ports': [port]}, {'id': 'b', 'non_local_subnets': [], 'subnets': [subnet], 'ports': []}] self.assertEqual(expected, networks) def test_get_active_networks_info_with_routed_networks(self): plugin_retval = [{'id': 'a'}, {'id': 'b'}] port = {'network_id': 'a'} subnets = [{'network_id': 'b', 'id': 'c', 'segment_id': '1'}, {'network_id': 'a', 'id': 'e'}, {'network_id': 'b', 'id': 'd', 'segment_id': '3'}] self.plugin.get_ports.return_value = [port] self.plugin.get_networks.return_value = plugin_retval hostseg_retval = ['1', '2'] self.segment_plugin.get_segments_by_hosts.return_value = hostseg_retval self.plugin.get_subnets.return_value = subnets networks = self.callbacks.get_active_networks_info(mock.Mock(), host='host') expected = [{'id': 'a', 'non_local_subnets': [], 'subnets': [subnets[1]], 'ports': [port]}, {'id': 'b', 'non_local_subnets': [subnets[2]], 'subnets': [subnets[0]], 'ports': []}] self.assertEqual(expected, networks) def _test_get_active_networks_info_enable_dhcp_filter(self, enable_dhcp_filter): plugin_retval = [{'id': 'a'}, {'id': 'b'}] self.plugin.get_networks.return_value = plugin_retval self.callbacks.get_active_networks_info(mock.Mock(), host='host', enable_dhcp_filter=enable_dhcp_filter) filters = {'network_id': ['a', 'b']} if enable_dhcp_filter: filters['enable_dhcp'] = [True] self.plugin.get_subnets.assert_called_once_with(mock.ANY, filters=filters) def test_get_active_networks_info_enable_dhcp_filter_false(self): self._test_get_active_networks_info_enable_dhcp_filter(False) def test_get_active_networks_info_enable_dhcp_filter_true(self): self._test_get_active_networks_info_enable_dhcp_filter(True) def _test__port_action_with_failures(self, exc=None, action=None): port = { 'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] } self.plugin.create_port.side_effect = exc self.utils.side_effect = exc self.assertIsNone(self.callbacks._port_action(self.plugin, mock.Mock(), {'port': port}, action)) def _test__port_action_good_action(self, action, port, expected_call): self.callbacks._port_action(self.plugin, mock.Mock(), port, action) if action == 'create_port': self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) else: self.plugin.assert_has_calls([expected_call]) def test_port_action_create_port(self): self._test__port_action_good_action( 'create_port', mock.Mock(), mock.call.create_port(mock.ANY, mock.ANY)) def test_port_action_update_port(self): fake_port = {'id': 'foo_port_id', 'port': mock.Mock()} self._test__port_action_good_action( 'update_port', fake_port, mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY)) def test__port_action_bad_action(self): self.assertRaises( exceptions.Invalid, self._test__port_action_with_failures, exc=None, action='foo_action') def test_create_port_catch_network_not_found(self): self._test__port_action_with_failures( exc=exceptions.NetworkNotFound(net_id='foo_network_id'), action='create_port') def test_create_port_catch_subnet_not_found(self): self._test__port_action_with_failures( exc=exceptions.SubnetNotFound(subnet_id='foo_subnet_id'), action='create_port') def test_create_port_catch_db_reference_error(self): self._test__port_action_with_failures( exc=db_exc.DBReferenceError('a', 'b', 'c', 'd'), action='create_port') def test_create_port_catch_ip_generation_failure_reraise(self): self.assertRaises( exceptions.IpAddressGenerationFailure, self._test__port_action_with_failures, exc=exceptions.IpAddressGenerationFailure(net_id='foo_network_id'), action='create_port') def test_create_port_catch_and_handle_ip_generation_failure(self): self.plugin.get_subnet.side_effect = ( exceptions.SubnetNotFound(subnet_id='foo_subnet_id')) self._test__port_action_with_failures( exc=exceptions.IpAddressGenerationFailure(net_id='foo_network_id'), action='create_port') self._test__port_action_with_failures( exc=exceptions.InvalidInput(error_message='sorry'), action='create_port') def test_update_port_missing_port_on_get(self): self.plugin.get_port.side_effect = exceptions.PortNotFound( port_id='66') self.assertIsNone(self.callbacks.update_dhcp_port( context='ctx', host='host', port_id='66', port={'port': {'network_id': 'a'}})) def test_update_port_missing_port_on_update(self): self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.plugin.update_port.side_effect = exceptions.PortNotFound( port_id='66') self.assertIsNone(self.callbacks.update_dhcp_port( context='ctx', host='host', port_id='66', port={'port': {'network_id': 'a'}})) def test_get_network_info_return_none_on_not_found(self): self.plugin.get_network.side_effect = exceptions.NetworkNotFound( net_id='a') retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertIsNone(retval) def _test_get_network_info(self, segmented_network=False, routed_network=False): network_retval = dict(id='a') if not routed_network: subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')] else: subnet_retval = [dict(id='c', segment_id='1'), dict(id='b', segment_id='2'), dict(id='a', segment_id='1')] port_retval = mock.Mock() self.plugin.get_network.return_value = network_retval self.plugin.get_subnets.return_value = subnet_retval self.plugin.get_ports.return_value = port_retval if segmented_network: self.segment_plugin.get_segments.return_value = [dict(id='1'), dict(id='2')] self.segment_plugin.get_segments_by_hosts.return_value = ['1'] retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertEqual(retval, network_retval) sorted_nonlocal_subnet_retval = [] if not routed_network: sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')] else: sorted_subnet_retval = [dict(id='a', segment_id='1'), dict(id='c', segment_id='1')] sorted_nonlocal_subnet_retval = [dict(id='b', segment_id='2')] self.assertEqual(retval['subnets'], sorted_subnet_retval) self.assertEqual(retval['non_local_subnets'], sorted_nonlocal_subnet_retval) self.assertEqual(retval['ports'], port_retval) def test_get_network_info(self): self._test_get_network_info() def test_get_network_info_with_routed_network(self): self._test_get_network_info(segmented_network=True, routed_network=True) def test_get_network_info_with_segmented_network_but_not_routed(self): self._test_get_network_info(segmented_network=True) def test_get_network_info_with_non_segmented_network(self): self._test_get_network_info() def test_update_dhcp_port_verify_port_action_port_dict(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } def _fake_port_action(plugin, context, port, action): self.assertEqual(expected_port, port) self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.callbacks._port_action = _fake_port_action self.callbacks.update_dhcp_port(mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) def test_update_reserved_dhcp_port(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } def _fake_port_action(plugin, context, port, action): self.assertEqual(expected_port, port) self.plugin.get_port.return_value = { 'device_id': utils.get_dhcp_agent_device_id('foo_network_id', 'foo_host')} self.callbacks._port_action = _fake_port_action self.callbacks.update_dhcp_port( mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) self.plugin.get_port.return_value = { 'device_id': 'other_id'} res = self.callbacks.update_dhcp_port(mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) self.assertIsNone(res) def test_update_dhcp_port(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } expected_port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, portbindings.HOST_ID: 'foo_host', 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}] }, 'id': 'foo_port_id' } self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.callbacks.update_dhcp_port(mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) self.plugin.assert_has_calls([ mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)]) def test_update_dhcp_port_with_agent_not_hosting_network(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]} } self.plugin.get_port.return_value = { 'device_id': constants.DEVICE_ID_RESERVED_DHCP_PORT} self.mock_agent_hosting_network.return_value = False self.assertRaises(rpc_dispatcher.ExpectedException, self.callbacks.update_dhcp_port, mock.Mock(), host='foo_host', port_id='foo_port_id', port=port) def test__is_dhcp_agent_hosting_network(self): self.agent_hosting_network_p.stop() agent = mock.Mock() with mock.patch.object(self.plugin, 'get_dhcp_agents_hosting_networks', return_value=[agent]): ret = self.callbacks._is_dhcp_agent_hosting_network(self.plugin, mock.Mock(), host='foo_host', network_id='foo_network_id') self.assertTrue(ret) def test__is_dhcp_agent_hosting_network_false(self): self.agent_hosting_network_p.stop() with mock.patch.object(self.plugin, 'get_dhcp_agents_hosting_networks', return_value=[]): ret = self.callbacks._is_dhcp_agent_hosting_network(self.plugin, mock.Mock(), host='foo_host', network_id='foo_network_id') self.assertFalse(ret) def test_release_dhcp_port(self): port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) self.plugin.get_ports.return_value = [port_retval] self.callbacks.release_dhcp_port(mock.ANY, network_id='netid', device_id='devid') self.plugin.assert_has_calls([ mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')]) def test_dhcp_ready_on_ports(self): context = mock.Mock() port_ids = range(10) with mock.patch.object(provisioning_blocks, 'provisioning_complete') as pc: self.callbacks.dhcp_ready_on_ports(context, port_ids) calls = [mock.call(context, port_id, resources.PORT, provisioning_blocks.DHCP_ENTITY) for port_id in port_ids] pc.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py0000644000175000017500000000407700000000000027173 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api.rpc.handlers import dvr_rpc from neutron.tests import base class DVRServerRpcApiTestCase(base.BaseTestCase): def setUp(self): self.client_p = mock.patch.object(dvr_rpc.n_rpc, "get_client") self.client = self.client_p.start() self.rpc = dvr_rpc.DVRServerRpcApi('fake_topic') self.mock_cctxt = self.rpc.client.prepare.return_value self.ctxt = mock.ANY super(DVRServerRpcApiTestCase, self).setUp() def test_get_dvr_mac_address_by_host(self): self.rpc.get_dvr_mac_address_by_host(self.ctxt, 'foo_host') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_by_host', host='foo_host') def test_get_dvr_mac_address_list(self): self.rpc.get_dvr_mac_address_list(self.ctxt) self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_dvr_mac_address_list') def test_get_ports_on_host_by_subnet(self): self.rpc.get_ports_on_host_by_subnet( self.ctxt, 'foo_host', 'foo_subnet') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_ports_on_host_by_subnet', host='foo_host', subnet='foo_subnet') def test_get_subnet_for_dvr(self): self.rpc.get_subnet_for_dvr( self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips') self.mock_cctxt.call.assert_called_with( self.ctxt, 'get_subnet_for_dvr', subnet='foo_subnet', fixed_ips='foo_fixed_ips') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py0000644000175000017500000000557200000000000026717 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from neutron.api.rpc.handlers import l3_rpc from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class TestL3RpcCallback(testlib_api.SqlTestCase): def setUp(self): super(TestL3RpcCallback, self).setUp() self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() cfg.CONF.set_override('ipv6_pd_enabled', True) self.callbacks = l3_rpc.L3RpcCallback() self.network = self._prepare_network() def _prepare_network(self): network = {'network': {'name': 'abc', 'shared': False, 'tenant_id': 'tenant_id', 'admin_state_up': True}} return self.plugin.create_network(self.ctx, network) def _prepare_ipv6_pd_subnet(self): subnet = {'subnet': {'network_id': self.network['id'], 'tenant_id': 'tenant_id', 'cidr': None, 'ip_version': constants.IP_VERSION_6, 'use_default_subnetpool': True, 'name': 'ipv6_pd', 'enable_dhcp': True, 'host_routes': None, 'dns_nameservers': None, 'allocation_pools': None, 'ipv6_ra_mode': constants.IPV6_SLAAC, 'ipv6_address_mode': constants.IPV6_SLAAC}} return self.plugin.create_subnet(self.ctx, subnet) def test_process_prefix_update(self): subnet = self._prepare_ipv6_pd_subnet() data = {subnet['id']: netaddr.IPNetwork('2001:db8::/64')} allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::ffff:ffff:ffff:ffff'}] res = self.callbacks.process_prefix_update(self.ctx, subnets=data) updated_subnet = res[0] self.assertEqual(str(data[subnet['id']]), updated_subnet['cidr']) self.assertEqual(updated_subnet['allocation_pools'], allocation_pools) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py0000644000175000017500000003064400000000000030411 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.agent import topics from neutron_lib import context from neutron_lib.objects import common_types from oslo_utils import uuidutils from oslo_versionedobjects import fields as obj_fields import testtools from neutron.api.rpc.callbacks import resources from neutron.api.rpc.callbacks import version_manager from neutron.api.rpc.handlers import resources_rpc from neutron.objects import base as objects_base from neutron.tests import base from neutron.tests.unit.objects import test_base as objects_test_base TEST_EVENT = 'test_event' TEST_VERSION = '1.0' def _create_test_dict(uuid=None): return {'id': uuid or uuidutils.generate_uuid(), 'field': 'foo'} def _create_test_resource(context=None, resource_cls=None): resource_cls = resource_cls or FakeResource resource_dict = _create_test_dict() resource = resource_cls(context, **resource_dict) resource.obj_reset_changes() return resource class BaseFakeResource(objects_base.NeutronObject): @classmethod def get_objects(cls, context, **kwargs): return list() class FakeResource(BaseFakeResource): VERSION = TEST_VERSION fields = { 'id': common_types.UUIDField(), 'field': obj_fields.StringField() } class FakeResource2(BaseFakeResource): VERSION = TEST_VERSION fields = { 'id': common_types.UUIDField(), 'field': obj_fields.StringField() } class ResourcesRpcBaseTestCase(base.BaseTestCase): def setUp(self): super(ResourcesRpcBaseTestCase, self).setUp() self.obj_registry = self.useFixture( objects_test_base.NeutronObjectRegistryFixture()) self.context = context.get_admin_context() mock.patch.object(resources_rpc.resources, 'is_valid_resource_type').start() mock.patch.object(resources_rpc.resources, 'get_resource_cls', side_effect=self._get_resource_cls).start() self.resource_objs = [_create_test_resource(self.context) for _ in range(2)] self.resource_objs2 = [_create_test_resource(self.context, FakeResource2) for _ in range(2)] @staticmethod def _get_resource_cls(resource_type): return {FakeResource.obj_name(): FakeResource, FakeResource2.obj_name(): FakeResource2}.get(resource_type) class _ValidateResourceTypeTestCase(base.BaseTestCase): def setUp(self): super(_ValidateResourceTypeTestCase, self).setUp() self.is_valid_mock = mock.patch.object( resources_rpc.resources, 'is_valid_resource_type').start() def test_valid_type(self): self.is_valid_mock.return_value = True resources_rpc._validate_resource_type('foo') def test_invalid_type(self): self.is_valid_mock.return_value = False with testtools.ExpectedException( resources_rpc.InvalidResourceTypeClass): resources_rpc._validate_resource_type('foo') class _ResourceTypeVersionedTopicTestCase(base.BaseTestCase): @mock.patch.object(resources_rpc, '_validate_resource_type') def test_resource_type_versioned_topic(self, validate_mock): obj_name = FakeResource.obj_name() expected = topics.RESOURCE_TOPIC_PATTERN % { 'resource_type': 'FakeResource', 'version': '1.0'} with mock.patch.object(resources_rpc.resources, 'get_resource_cls', return_value=FakeResource): observed = resources_rpc.resource_type_versioned_topic(obj_name) self.assertEqual(expected, observed) class ResourcesPullRpcApiTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPullRpcApiTestCase, self).setUp() self.rpc = resources_rpc.ResourcesPullRpcApi() mock.patch.object(self.rpc, 'client').start() self.cctxt_mock = self.rpc.client.prepare.return_value def test_is_singleton(self): self.assertIs(self.rpc, resources_rpc.ResourcesPullRpcApi()) def test_pull(self): self.obj_registry.register(FakeResource) expected_obj = _create_test_resource(self.context) resource_id = expected_obj.id self.cctxt_mock.call.return_value = expected_obj.obj_to_primitive() result = self.rpc.pull( self.context, FakeResource.obj_name(), resource_id) self.cctxt_mock.call.assert_called_once_with( self.context, 'pull', resource_type='FakeResource', version=TEST_VERSION, resource_id=resource_id) self.assertEqual(expected_obj, result) def test_bulk_pull(self): self.obj_registry.register(FakeResource) expected_objs = [_create_test_resource(self.context), _create_test_resource(self.context)] self.cctxt_mock.call.return_value = [ e.obj_to_primitive() for e in expected_objs] filter_kwargs = {'a': 'b', 'c': 'd'} result = self.rpc.bulk_pull( self.context, FakeResource.obj_name(), filter_kwargs=filter_kwargs) self.cctxt_mock.call.assert_called_once_with( self.context, 'bulk_pull', resource_type='FakeResource', version=TEST_VERSION, filter_kwargs=filter_kwargs) self.assertEqual(expected_objs, result) def test_pull_resource_not_found(self): resource_dict = _create_test_dict() resource_id = resource_dict['id'] self.cctxt_mock.call.return_value = None with testtools.ExpectedException(resources_rpc.ResourceNotFound): self.rpc.pull(self.context, FakeResource.obj_name(), resource_id) class ResourcesPushToServerRpcCallbackTestCase(ResourcesRpcBaseTestCase): def test_report_versions(self): callbacks = resources_rpc.ResourcesPushToServerRpcCallback() with mock.patch('neutron.api.rpc.callbacks.version_manager' '.update_versions') as update_versions: version_map = {'A': '1.0'} callbacks.report_agent_resource_versions(context=mock.ANY, agent_type='DHCP Agent', agent_host='fake-host', version_map=version_map) update_versions.assert_called_once_with(mock.ANY, version_map) class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase): def setUp(self): super(ResourcesPullRpcCallbackTestCase, self).setUp() self.obj_registry.register(FakeResource) self.callbacks = resources_rpc.ResourcesPullRpcCallback() self.resource_obj = _create_test_resource(self.context) def test_pull(self): resource_dict = _create_test_dict(uuid=self.resource_obj.id) with mock.patch.object( resources_rpc.prod_registry, 'pull', return_value=self.resource_obj) as registry_mock: primitive = self.callbacks.pull( self.context, resource_type=FakeResource.obj_name(), version=TEST_VERSION, resource_id=self.resource_obj.id) registry_mock.assert_called_once_with( 'FakeResource', self.resource_obj.id, context=self.context) self.assertEqual(resource_dict, primitive['versioned_object.data']) self.assertEqual(self.resource_obj.obj_to_primitive(), primitive) def test_bulk_pull(self): r1 = self.resource_obj r2 = _create_test_resource(self.context) @classmethod def get_objs(*args, **kwargs): if 'id' not in kwargs: return [r1, r2] return [r for r in [r1, r2] if r.id == kwargs['id']] # the bulk interface currently retrieves directly from the registry with mock.patch.object(FakeResource, 'get_objects', new=get_objs): objs = self.callbacks.bulk_pull( self.context, resource_type=FakeResource.obj_name(), version=TEST_VERSION) self.assertItemsEqual([r1.obj_to_primitive(), r2.obj_to_primitive()], objs) objs = self.callbacks.bulk_pull( self.context, resource_type=FakeResource.obj_name(), version=TEST_VERSION, filter_kwargs={'id': r1.id}) self.assertEqual([r1.obj_to_primitive()], objs) @mock.patch.object(FakeResource, 'obj_to_primitive') def test_pull_backports_to_older_version(self, to_prim_mock): with mock.patch.object(resources_rpc.prod_registry, 'pull', return_value=self.resource_obj): self.callbacks.pull( self.context, resource_type=FakeResource.obj_name(), version='0.9', # less than initial version 1.0 resource_id=self.resource_obj.id) to_prim_mock.assert_called_with(target_version='0.9') class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase): """Tests the neutron server side of the RPC interface.""" def setUp(self): super(ResourcesPushRpcApiTestCase, self).setUp() mock.patch.object(resources_rpc.n_rpc, 'get_client').start() self.rpc = resources_rpc.ResourcesPushRpcApi() self.cctxt_mock = self.rpc.client.prepare.return_value mock.patch.object(version_manager, 'get_resource_versions', return_value=set([TEST_VERSION])).start() def test__prepare_object_fanout_context(self): expected_topic = topics.RESOURCE_TOPIC_PATTERN % { 'resource_type': resources.get_resource_type( self.resource_objs[0]), 'version': TEST_VERSION} observed = self.rpc._prepare_object_fanout_context( self.resource_objs[0], self.resource_objs[0].VERSION, '1.0') self.rpc.client.prepare.assert_called_once_with( fanout=True, topic=expected_topic, version='1.0') self.assertEqual(self.cctxt_mock, observed) def test_push_single_type(self): self.rpc.push( self.context, self.resource_objs, TEST_EVENT) self.cctxt_mock.cast.assert_called_once_with( self.context, 'push', resource_list=[resource.obj_to_primitive() for resource in self.resource_objs], event_type=TEST_EVENT) def test_push_mixed(self): self.rpc.push( self.context, self.resource_objs + self.resource_objs2, event_type=TEST_EVENT) self.cctxt_mock.cast.assert_any_call( self.context, 'push', resource_list=[resource.obj_to_primitive() for resource in self.resource_objs], event_type=TEST_EVENT) self.cctxt_mock.cast.assert_any_call( self.context, 'push', resource_list=[resource.obj_to_primitive() for resource in self.resource_objs2], event_type=TEST_EVENT) class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase): """Tests the agent-side of the RPC interface.""" def setUp(self): super(ResourcesPushRpcCallbackTestCase, self).setUp() self.callbacks = resources_rpc.ResourcesPushRpcCallback() @mock.patch.object(resources_rpc.cons_registry, 'push') def test_push(self, reg_push_mock): self.obj_registry.register(FakeResource) self.callbacks.push(self.context, resource_list=[resource.obj_to_primitive() for resource in self.resource_objs], event_type=TEST_EVENT) reg_push_mock.assert_called_once_with(self.context, self.resource_objs[0].obj_name(), self.resource_objs, TEST_EVENT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py0000644000175000017500000001602000000000000031476 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from oslo_utils import uuidutils from neutron.agent import resource_cache from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import securitygroups_rpc from neutron import objects from neutron.objects.port.extensions import port_security as psec from neutron.objects import ports from neutron.objects import securitygroup from neutron.tests import base class SecurityGroupServerRpcApiTestCase(base.BaseTestCase): def test_security_group_rules_for_devices(self): rpcapi = securitygroups_rpc.SecurityGroupServerRpcApi('fake_topic') with mock.patch.object(rpcapi.client, 'call') as rpc_mock,\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpcapi.security_group_rules_for_devices('context', ['fake_device']) rpc_mock.assert_called_once_with( 'context', 'security_group_rules_for_devices', devices=['fake_device']) class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase): def setUp(self): super(SGAgentRpcCallBackMixinTestCase, self).setUp() self.rpc = securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin() self.rpc.sg_agent = mock.Mock() def test_security_groups_rule_updated(self): self.rpc.security_groups_rule_updated(None, security_groups=['fake_sgid']) self.rpc.sg_agent.assert_has_calls( [mock.call.security_groups_rule_updated(['fake_sgid'])]) def test_security_groups_member_updated(self): self.rpc.security_groups_member_updated(None, security_groups=['fake_sgid']) self.rpc.sg_agent.assert_has_calls( [mock.call.security_groups_member_updated(['fake_sgid'])]) class SecurityGroupServerAPIShimTestCase(base.BaseTestCase): def setUp(self): super(SecurityGroupServerAPIShimTestCase, self).setUp() objects.register_objects() resource_types = [resources.PORT, resources.SECURITYGROUP, resources.SECURITYGROUPRULE] self.rcache = resource_cache.RemoteResourceCache(resource_types) # prevent any server lookup attempts mock.patch.object(self.rcache, '_flood_cache_for_query').start() self.shim = securitygroups_rpc.SecurityGroupServerAPIShim(self.rcache) self.sg_agent = mock.Mock() self.shim.register_legacy_sg_notification_callbacks(self.sg_agent) self.ctx = context.get_admin_context() def _make_port_ovo(self, ip, **kwargs): attrs = {'id': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid(), 'security_group_ids': set(), 'device_owner': 'compute:None', 'allowed_address_pairs': []} attrs['fixed_ips'] = [ports.IPAllocation( port_id=attrs['id'], subnet_id=uuidutils.generate_uuid(), network_id=attrs['network_id'], ip_address=ip)] attrs.update(**kwargs) p = ports.Port(self.ctx, **attrs) self.rcache.record_resource_update(self.ctx, 'Port', p) return p @mock.patch.object(securitygroup.SecurityGroup, 'is_shared_with_tenant', return_value=False) def _make_security_group_ovo(self, *args, **kwargs): attrs = {'id': uuidutils.generate_uuid(), 'revision_number': 1} sg_rule = securitygroup.SecurityGroupRule( id=uuidutils.generate_uuid(), security_group_id=attrs['id'], direction='ingress', ethertype='IPv4', protocol='tcp', port_range_min=400, remote_group_id=attrs['id'], revision_number=1, ) attrs['rules'] = [sg_rule] attrs.update(**kwargs) sg = securitygroup.SecurityGroup(self.ctx, **attrs) self.rcache.record_resource_update(self.ctx, 'SecurityGroup', sg) return sg def test_sg_parent_ops_affect_rules(self): s1 = self._make_security_group_ovo() filters = {'security_group_id': (s1.id, )} self.assertEqual( s1.rules, self.rcache.get_resources('SecurityGroupRule', filters)) self.sg_agent.security_groups_rule_updated.assert_called_once_with( [s1.id]) self.sg_agent.security_groups_rule_updated.reset_mock() self.rcache.record_resource_delete(self.ctx, 'SecurityGroup', s1.id) self.assertEqual( [], self.rcache.get_resources('SecurityGroupRule', filters)) self.sg_agent.security_groups_rule_updated.assert_called_once_with( [s1.id]) def test_security_group_info_for_devices(self): s1 = self._make_security_group_ovo() p1 = self._make_port_ovo(ip='1.1.1.1', security_group_ids={s1.id}) p2 = self._make_port_ovo( ip='2.2.2.2', security_group_ids={s1.id}, security=psec.PortSecurity(port_security_enabled=False)) p3 = self._make_port_ovo(ip='3.3.3.3', security_group_ids={s1.id}, device_owner='network:dhcp') ids = [p1.id, p2.id, p3.id] info = self.shim.security_group_info_for_devices(self.ctx, ids) self.assertIn('1.1.1.1', info['sg_member_ips'][s1.id]['IPv4']) self.assertIn('2.2.2.2', info['sg_member_ips'][s1.id]['IPv4']) self.assertIn('3.3.3.3', info['sg_member_ips'][s1.id]['IPv4']) self.assertIn(p1.id, info['devices'].keys()) self.assertIn(p2.id, info['devices'].keys()) # P3 is a trusted port so it doesn't have rules self.assertNotIn(p3.id, info['devices'].keys()) self.assertEqual([s1.id], list(info['security_groups'].keys())) self.assertTrue(info['devices'][p1.id]['port_security_enabled']) self.assertFalse(info['devices'][p2.id]['port_security_enabled']) def test_sg_member_update_events(self): s1 = self._make_security_group_ovo() p1 = self._make_port_ovo(ip='1.1.1.1', security_group_ids={s1.id}) self._make_port_ovo(ip='2.2.2.2', security_group_ids={s1.id}) self.sg_agent.security_groups_member_updated.assert_called_with( {s1.id}) self.sg_agent.security_groups_member_updated.reset_mock() self.rcache.record_resource_delete(self.ctx, 'Port', p1.id) self.sg_agent.security_groups_member_updated.assert_called_with( {s1.id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/test_api_common.py0000644000175000017500000000512600000000000025265 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import webob from neutron.api import api_common from neutron.tests import base class PrepareUrlTestCase(base.BaseTestCase): def test_no_configured_prefix(self): self.assertFalse(cfg.CONF.network_link_prefix) requrl = 'http://neutron.example/sub/ports.json?test=1' # should be unchanged self.assertEqual(requrl, api_common.prepare_url(requrl)) def test_configured_prefix(self): cfg.CONF.set_override('network_link_prefix', 'http://quantum.example') requrl = 'http://neutron.example/sub/ports.json?test=1' expected = 'http://quantum.example/sub/ports.json?test=1' self.assertEqual(expected, api_common.prepare_url(requrl)) class GetPathUrlTestCase(base.BaseTestCase): def test_no_headers(self): base_http_url = 'http://neutron.example/sub/ports.json' base_https_url = 'https://neutron.example/sub/ports.json' path = '' http_req = webob.Request.blank(path, base_url=base_http_url) https_req = webob.Request.blank(path, base_url=base_https_url) # should be unchanged self.assertEqual(base_http_url, api_common.get_path_url(http_req)) self.assertEqual(base_https_url, api_common.get_path_url(https_req)) def test_http_to_https(self): base_url = 'http://neutron.example/sub/ports.json' path = '' request = webob.Request.blank( path, base_url=base_url, headers={'X-Forwarded-Proto': 'https'}) path_url = api_common.get_path_url(request) # should replace http:// with https:// self.assertTrue(path_url.startswith("https://")) def test_https_to_http(self): base_url = 'https://neutron.example/sub/ports.json' path = '' request = webob.Request.blank( path, base_url=base_url, headers={'X-Forwarded-Proto': 'http'}) path_url = api_common.get_path_url(request) # should replace https:// with http:// self.assertTrue(path_url.startswith("http://")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/test_extensions.py0000644000175000017500000012764300000000000025354 0ustar00coreycorey00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock from neutron_lib import exceptions from neutron_lib.plugins import constants as lib_const from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import wsgi as base_wsgi import routes import testtools import webob import webob.exc as webexc import webtest import neutron from neutron.api import extensions from neutron.common import config from neutron.plugins.common import constants from neutron import quota from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import dummy_plugin from neutron.tests.unit import extension_stubs as ext_stubs import neutron.tests.unit.extensions from neutron.tests.unit.extensions import extendedattribute as extattr from neutron.tests.unit import testlib_api from neutron import wsgi LOG = logging.getLogger(__name__) _uuid = test_base._uuid _get_path = test_base._get_path extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) class CustomExtensionCheckMapMemento(fixtures.Fixture): """Create a copy of the custom extension support check map so it can be restored during test cleanup. """ def _setUp(self): self._map_contents_backup = copy.deepcopy( extensions.EXTENSION_SUPPORTED_CHECK_MAP ) self._plugin_agnostic_extensions_backup = set( extensions._PLUGIN_AGNOSTIC_EXTENSIONS ) self.addCleanup(self._restore) def _restore(self): extensions.EXTENSION_SUPPORTED_CHECK_MAP = self._map_contents_backup extensions._PLUGIN_AGNOSTIC_EXTENSIONS = ( self._plugin_agnostic_extensions_backup ) class ExtensionsTestApp(base_wsgi.Router): def __init__(self, options=None): options = options or {} mapper = routes.Mapper() controller = ext_stubs.StubBaseAppController() mapper.resource("dummy_resource", "/dummy_resources", controller=controller) super(ExtensionsTestApp, self).__init__(mapper) class FakePluginWithExtension(service_base.ServicePluginBase): """A fake plugin used only for extension testing in this file.""" supported_extension_aliases = ["FOXNSOX"] def method_to_support_foxnsox_extension(self, context): self._log("method_to_support_foxnsox_extension", context) def get_plugin_type(self): pass def get_plugin_description(self): pass class ExtensionPathTest(base.BaseTestCase): def setUp(self): self.base_path = extensions.get_extensions_path() super(ExtensionPathTest, self).setUp() def test_get_extensions_path_with_plugins(self): cfg.CONF.set_override('api_extensions_path', 'neutron/tests/unit/extensions') path = extensions.get_extensions_path( {lib_const.CORE: FakePluginWithExtension()}) self.assertEqual(path, '%s:neutron/tests/unit/extensions' % self.base_path) def test_get_extensions_path_no_extensions(self): # Reset to default value, as it's overridden by base class cfg.CONF.set_override('api_extensions_path', '') path = extensions.get_extensions_path() self.assertEqual(path, self.base_path) def test_get_extensions_path_single_extension(self): cfg.CONF.set_override('api_extensions_path', 'path1') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1' % self.base_path) def test_get_extensions_path_multiple_extensions(self): cfg.CONF.set_override('api_extensions_path', 'path1:path2') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1:path2' % self.base_path) def test_get_extensions_path_duplicate_extensions(self): cfg.CONF.set_override('api_extensions_path', 'path1:path1') path = extensions.get_extensions_path() self.assertEqual(path, '%s:path1' % self.base_path) class ResourceExtensionTest(base.BaseTestCase): class ResourceExtensionController(wsgi.Controller): def index(self, request): return "resource index" def show(self, request, id): return {'data': {'id': id}} def notimplemented_function(self, request, id): return webob.exc.HTTPNotImplemented() def custom_member_action(self, request, id): return {'member_action': 'value'} def custom_collection_method(self, request, **kwargs): return {'collection': 'value'} def custom_collection_action(self, request, **kwargs): return {'collection': 'value'} class DummySvcPlugin(wsgi.Controller): @classmethod def get_plugin_type(cls): return dummy_plugin.DUMMY_SERVICE_TYPE def index(self, request, **kwargs): return "resource index" def custom_member_action(self, request, **kwargs): return {'member_action': 'value'} def collection_action(self, request, **kwargs): return {'collection': 'value'} def show(self, request, id): return {'data': {'id': id}} def test_exceptions_notimplemented(self): controller = self.ResourceExtensionController() member = {'notimplemented_function': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) # Ideally we would check for a 501 code here but webtest doesn't take # anything that is below 200 or above 400 so we can't actually check # it. It throws webtest.AppError instead. try: test_app.get("/tweedles/some_id/notimplemented_function") # Shouldn't be reached self.fail() except webtest.AppError as e: self.assertIn('501', str(e)) def test_resource_can_be_added_as_extension(self): res_ext = extensions.ResourceExtension( 'tweedles', self.ResourceExtensionController()) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/tweedles") self.assertEqual(200, index_response.status_int) self.assertEqual(b"resource index", index_response.body) show_response = test_app.get("/tweedles/25266") self.assertEqual({'data': {'id': "25266"}}, show_response.json) def test_resource_gets_prefix_of_plugin(self): class DummySvcPlugin(wsgi.Controller): def index(self, request): return "" @classmethod def get_plugin_type(cls): return dummy_plugin.DUMMY_SERVICE_TYPE res_ext = extensions.ResourceExtension( 'tweedles', DummySvcPlugin(), path_prefix="/dummy_svc") test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/dummy_svc/tweedles") self.assertEqual(200, index_response.status_int) def test_resource_extension_with_custom_member_action(self): controller = self.ResourceExtensionController() member = {'custom_member_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/some_id/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") def test_resource_ext_with_custom_member_action_gets_plugin_prefix(self): controller = self.DummySvcPlugin() member = {'custom_member_action': "GET"} collections = {'collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, path_prefix="/dummy_svc", member_actions=member, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/dummy_svc/tweedles/1/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") response = test_app.get("/dummy_svc/tweedles/collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_plugin_prefix_with_parent_resource(self): controller = self.DummySvcPlugin() parent = dict(member_name="tenant", collection_name="tenants") member = {'custom_member_action': "GET"} collections = {'collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, parent, path_prefix="/dummy_svc", member_actions=member, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) index_response = test_app.get("/dummy_svc/tenants/1/tweedles") self.assertEqual(200, index_response.status_int) response = test_app.get("/dummy_svc/tenants/1/" "tweedles/1/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") response = test_app.get("/dummy_svc/tenants/2/" "tweedles/collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_get_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) LOG.debug(jsonutils.loads(response.body)) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_put_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "PUT"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.put("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_extension_for_post_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "POST"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.post("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_extension_for_delete_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "DELETE"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.delete("/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], 'value') def test_resource_ext_for_formatted_req_on_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/custom_collection_action.json") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_ext_for_nested_resource_custom_collection_action(self): controller = self.ResourceExtensionController() collections = {'custom_collection_action': "GET"} parent = dict(collection_name='beetles', member_name='beetle') res_ext = extensions.ResourceExtension('tweedles', controller, collection_actions=collections, parent=parent) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/beetles/beetle_id" "/tweedles/custom_collection_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['collection'], "value") def test_resource_extension_for_get_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_extension_for_put_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "PUT"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.put("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_extension_for_post_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "POST"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.post("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_extension_for_delete_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "DELETE"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.delete("/tweedles") self.assertEqual(200, response.status_int) self.assertEqual('value', jsonutils.loads(response.body)['collection']) def test_resource_ext_for_formatted_req_on_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles.json") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_ext_for_nested_resource_custom_collection_method(self): controller = self.ResourceExtensionController() collections = {'custom_collection_method': "GET"} parent = {'collection_name': 'beetles', 'member_name': 'beetle'} res_ext = extensions.ResourceExtension('tweedles', controller, collection_methods=collections, parent=parent) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/beetles/beetle_id/tweedles") self.assertEqual(200, response.status_int) self.assertEqual("value", jsonutils.loads(response.body)['collection']) def test_resource_extension_with_custom_member_action_and_attr_map(self): controller = self.ResourceExtensionController() member = {'custom_member_action': "GET"} params = { 'tweedles': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, } } res_ext = extensions.ResourceExtension('tweedles', controller, member_actions=member, attr_map=params) test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext)) response = test_app.get("/tweedles/some_id/custom_member_action") self.assertEqual(200, response.status_int) self.assertEqual(jsonutils.loads(response.body)['member_action'], "value") def test_returns_404_for_non_existent_extension(self): test_app = _setup_extensions_test_app(SimpleExtensionManager(None)) response = test_app.get("/non_extistant_extension", status='*') self.assertEqual(404, response.status_int) class ActionExtensionTest(base.BaseTestCase): def setUp(self): super(ActionExtensionTest, self).setUp() self.extension_app = _setup_extensions_test_app() def test_extended_action_for_adding_extra_data(self): action_name = 'FOXNSOX:add_tweedle' action_params = dict(name='Beetle') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post('/dummy_resources/1/action', req_body, content_type='application/json') self.assertEqual(b"Tweedle Beetle Added.", response.body) def test_extended_action_for_deleting_extra_data(self): action_name = 'FOXNSOX:delete_tweedle' action_params = dict(name='Bailey') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post("/dummy_resources/1/action", req_body, content_type='application/json') self.assertEqual(b"Tweedle Bailey Deleted.", response.body) def test_returns_404_for_non_existent_action(self): non_existent_action = 'blah_action' action_params = dict(name="test") req_body = jsonutils.dumps({non_existent_action: action_params}) response = self.extension_app.post("/dummy_resources/1/action", req_body, content_type='application/json', status='*') self.assertEqual(404, response.status_int) def test_returns_404_for_non_existent_resource(self): action_name = 'add_tweedle' action_params = dict(name='Beetle') req_body = jsonutils.dumps({action_name: action_params}) response = self.extension_app.post("/asdf/1/action", req_body, content_type='application/json', status='*') self.assertEqual(404, response.status_int) class RequestExtensionTest(base.BaseTestCase): def test_headers_can_be_extended(self): def extend_headers(req, res): assert req.headers['X-NEW-REQUEST-HEADER'] == "sox" res.headers['X-NEW-RESPONSE-HEADER'] = "response_header_data" return res app = self._setup_app_with_request_handler(extend_headers, 'GET') response = app.get("/dummy_resources/1", headers={'X-NEW-REQUEST-HEADER': "sox"}) self.assertEqual(response.headers['X-NEW-RESPONSE-HEADER'], "response_header_data") def test_extend_get_resource_response(self): def extend_response_data(req, res): data = jsonutils.loads(res.body) data['FOXNSOX:extended_key'] = req.GET.get('extended_key') res.body = jsonutils.dump_as_bytes(data) return res app = self._setup_app_with_request_handler(extend_response_data, 'GET') response = app.get("/dummy_resources/1?extended_key=extended_data") self.assertEqual(200, response.status_int) response_data = jsonutils.loads(response.body) self.assertEqual('extended_data', response_data['FOXNSOX:extended_key']) self.assertEqual('knox', response_data['fort']) def test_get_resources(self): app = _setup_extensions_test_app() response = app.get("/dummy_resources/1?chewing=newblue") response_data = jsonutils.loads(response.body) self.assertEqual('newblue', response_data['FOXNSOX:googoose']) self.assertEqual("Pig Bands!", response_data['FOXNSOX:big_bands']) def test_edit_previously_uneditable_field(self): def _update_handler(req, res): data = jsonutils.loads(res.body) data['uneditable'] = req.params['uneditable'] res.body = jsonutils.dump_as_bytes(data) return res base_app = webtest.TestApp(setup_base_app(self)) response = base_app.put("/dummy_resources/1", {'uneditable': "new_value"}) self.assertEqual(response.json['uneditable'], "original_value") ext_app = self._setup_app_with_request_handler(_update_handler, 'PUT') ext_response = ext_app.put("/dummy_resources/1", {'uneditable': "new_value"}) self.assertEqual(ext_response.json['uneditable'], "new_value") def _setup_app_with_request_handler(self, handler, verb): req_ext = extensions.RequestExtension(verb, '/dummy_resources/:(id)', handler) manager = SimpleExtensionManager(None, None, req_ext) return _setup_extensions_test_app(manager) class ExtensionManagerTest(base.BaseTestCase): def test_optional_extensions_no_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(ext_stubs.StubExtension('foo_alias', optional=['cats'])) ext_mgr.extend_resources("2.0", attr_map) self.assertIn('foo_alias', ext_mgr.extensions) def test_missing_required_extensions_raise_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs('foo_alias')) self.assertRaises(exceptions.ExtensionsNotFound, ext_mgr.extend_resources, "2.0", attr_map) def test_missing_required_extensions_gracefully_error(self): ext_mgr = extensions.ExtensionManager('') attr_map = {} default_ext = list(constants.DEFAULT_SERVICE_PLUGINS.values())[0] ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs(default_ext)) ext_mgr.extend_resources("2.0", attr_map) # none of the default extensions should be loaded as their # requirements are not satisfied, and yet we do not fail. self.assertFalse(ext_mgr.extensions) def test__check_faulty_extensions_raise_not_default_ext(self): ext_mgr = extensions.ExtensionManager('') with testtools.ExpectedException(exceptions.ExtensionsNotFound): ext_mgr._check_faulty_extensions(set(['foo'])) def test_invalid_extensions_are_not_registered(self): class InvalidExtension(object): """Invalid extension. This Extension doesn't implement extension methods : get_name, get_description and get_updated """ def get_alias(self): return "invalid_extension" ext_mgr = extensions.ExtensionManager('') ext_mgr.add_extension(InvalidExtension()) ext_mgr.add_extension(ext_stubs.StubExtension("valid_extension")) self.assertIn('valid_extension', ext_mgr.extensions) self.assertNotIn('invalid_extension', ext_mgr.extensions) def test_assignment_of_attr_map(self): """Unit test for bug 1443342 In this bug, an extension that extended multiple resources with the same dict would cause future extensions to inadvertently modify the resources of all of the resources since they were referencing the same dictionary. """ class MultiResourceExtension(ext_stubs.StubExtension): """Generated Extended Resources. This extension's extended resource will assign to more than one resource. """ def get_extended_resources(self, version): EXTENDED_TIMESTAMP = { 'created_at': {'allow_post': False, 'allow_put': False, 'is_visible': True}} EXTENDED_RESOURCES = ["ext1", "ext2"] attrs = {} for resources in EXTENDED_RESOURCES: attrs[resources] = EXTENDED_TIMESTAMP return attrs class AttrExtension(ext_stubs.StubExtension): def get_extended_resources(self, version): attrs = { self.alias: { '%s-attr' % self.alias: {'allow_post': False, 'allow_put': False, 'is_visible': True}}} return attrs ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(MultiResourceExtension('timestamp')) ext_mgr.extend_resources("2.0", attr_map) ext_mgr.add_extension(AttrExtension("ext1")) ext_mgr.add_extension(AttrExtension("ext2")) ext_mgr.extend_resources("2.0", attr_map) self.assertIn('created_at', attr_map['ext2']) self.assertIn('created_at', attr_map['ext1']) # now we need to make sure the attrextensions didn't leak across self.assertNotIn('ext1-attr', attr_map['ext2']) self.assertNotIn('ext2-attr', attr_map['ext1']) def test_extension_extends_sub_resource(self): """Unit test for bug 1722842 Check that an extension can extend a sub-resource """ RESOURCE = "test_resource" SUB_RESOURCE_NAME = "test_sub_resource" INITIAL_PARAM = "dummy_param1" ADDITIONAL_PARAM = "dummy_param2" SUB_RESOURCE = { 'parent': {'member_name': RESOURCE}, 'parameters': { INITIAL_PARAM: {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True} } } class BaseExtension(ext_stubs.StubExtension): def get_extended_resources(self, version): return { SUB_RESOURCE_NAME: SUB_RESOURCE } class ExtensionExtendingASubresource(ext_stubs.StubExtension): def get_extended_resources(self, version): return { SUB_RESOURCE_NAME: { 'parameters': { ADDITIONAL_PARAM: {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True} } } } def get_required_extensions(self): return ['base_extension'] ext_mgr = extensions.ExtensionManager('') attr_map = {} ext_mgr.add_extension(BaseExtension('base_extension')) ext_mgr.add_extension( ExtensionExtendingASubresource()) ext_mgr.extend_resources("2.0", attr_map) # check that the parent descriptor is untouched self.assertEqual(SUB_RESOURCE['parent'], attr_map[SUB_RESOURCE_NAME]['parent']) # check that the initial attribute is still here self.assertIn(INITIAL_PARAM, attr_map[SUB_RESOURCE_NAME]['parameters']) # check that the new attribute is here as well self.assertIn(ADDITIONAL_PARAM, attr_map[SUB_RESOURCE_NAME]['parameters']) class PluginAwareExtensionManagerTest(base.BaseTestCase): def test_unsupported_extensions_are_not_loaded(self): stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1", "e3"]) plugin_info = {lib_const.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.StubExtension("e1")) ext_mgr.add_extension(ext_stubs.StubExtension("e2")) ext_mgr.add_extension(ext_stubs.StubExtension("e3")) self.assertIn("e1", ext_mgr.extensions) self.assertNotIn("e2", ext_mgr.extensions) self.assertIn("e3", ext_mgr.extensions) def test_extensions_are_not_loaded_for_plugins_unaware_of_extensions(self): class ExtensionUnawarePlugin(object): """This plugin does not implement supports_extension method. Extensions will not be loaded when this plugin is used. """ pass plugin_info = {lib_const.CORE: ExtensionUnawarePlugin()} ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.StubExtension("e1")) self.assertNotIn("e1", ext_mgr.extensions) def test_extensions_not_loaded_for_plugin_without_expected_interface(self): class PluginWithoutExpectedIface(object): """Does not implement get_foo method as expected by extension.""" supported_extension_aliases = ["supported_extension"] plugin_info = {lib_const.CORE: PluginWithoutExpectedIface()} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( "supported_extension")) self.assertNotIn("e1", ext_mgr.extensions) def test_extensions_are_loaded_for_plugin_with_expected_interface(self): class PluginWithExpectedInterface(service_base.ServicePluginBase): """Implements get_foo method as expected by extension.""" supported_extension_aliases = ["supported_extension"] def get_foo(self, bar=None): pass def get_plugin_type(self): pass def get_plugin_description(self): pass plugin_info = {lib_const.CORE: PluginWithExpectedInterface()} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext_stubs.ExtensionExpectingPluginInterface( "supported_extension")) self.assertIn("supported_extension", ext_mgr.extensions) def test_extensions_expecting_neutron_plugin_interface_are_loaded(self): class ExtensionForQuamtumPluginInterface(ext_stubs.StubExtension): """This Extension does not implement get_plugin_interface method. This will work with any plugin implementing NeutronPluginBase """ pass stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {lib_const.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ExtensionForQuamtumPluginInterface("e1")) self.assertIn("e1", ext_mgr.extensions) def test_extensions_without_need_for__plugin_interface_are_loaded(self): class ExtensionWithNoNeedForPluginInterface(ext_stubs.StubExtension): """This Extension does not need any plugin interface. This will work with any plugin implementing NeutronPluginBase """ def get_plugin_interface(self): return None stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {lib_const.CORE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ExtensionWithNoNeedForPluginInterface("e1")) self.assertIn("e1", ext_mgr.extensions) def test_extension_loaded_for_non_core_plugin(self): class NonCorePluginExtenstion(ext_stubs.StubExtension): def get_plugin_interface(self): return None stub_plugin = ext_stubs.StubPlugin(supported_extensions=["e1"]) plugin_info = {dummy_plugin.DUMMY_SERVICE_TYPE: stub_plugin} with mock.patch("neutron.api.extensions.PluginAwareExtensionManager." "check_if_plugin_extensions_loaded"): ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(NonCorePluginExtenstion("e1")) self.assertIn("e1", ext_mgr.extensions) def test_unloaded_supported_extensions_raises_exception(self): stub_plugin = ext_stubs.StubPlugin( supported_extensions=["unloaded_extension"]) plugin_info = {lib_const.CORE: stub_plugin} self.assertRaises(exceptions.ExtensionsNotFound, extensions.PluginAwareExtensionManager, '', plugin_info) def test_custom_supported_implementation(self): self.useFixture(CustomExtensionCheckMapMemento()) class FakePlugin(object): pass class FakeExtension(ext_stubs.StubExtension): extensions.register_custom_supported_check( 'stub_extension', lambda: True, plugin_agnostic=True ) ext = FakeExtension() plugin_info = {lib_const.CORE: FakePlugin()} ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext) self.assertIn("stub_extension", ext_mgr.extensions) extensions.register_custom_supported_check( 'stub_extension', lambda: False, plugin_agnostic=True ) ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) ext_mgr.add_extension(ext) self.assertNotIn("stub_extension", ext_mgr.extensions) def test_custom_supported_implementation_plugin_specific(self): self.useFixture(CustomExtensionCheckMapMemento()) class FakePlugin(object): pass class FakeExtension(ext_stubs.StubExtension): extensions.register_custom_supported_check( 'stub_plugin_extension', lambda: True, plugin_agnostic=False ) plugin_info = {lib_const.CORE: FakePlugin()} self.assertRaises( exceptions.ExtensionsNotFound, extensions.PluginAwareExtensionManager, '', plugin_info) class ExtensionControllerTest(testlib_api.WebTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.test_app = _setup_extensions_test_app() def test_index_gets_all_registerd_extensions(self): response = self.test_app.get("/extensions." + self.fmt) res_body = self.deserialize(response) foxnsox = res_body["extensions"][0] self.assertEqual(foxnsox["alias"], "FOXNSOX") def test_extension_can_be_accessed_by_alias(self): response = self.test_app.get("/extensions/FOXNSOX." + self.fmt) foxnsox_extension = self.deserialize(response) foxnsox_extension = foxnsox_extension['extension'] self.assertEqual(foxnsox_extension["alias"], "FOXNSOX") def test_show_returns_not_found_for_non_existent_extension(self): response = self.test_app.get("/extensions/non_existent" + self.fmt, status="*") self.assertEqual(response.status_int, 404) def app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return ExtensionsTestApp(conf) def setup_base_app(test): base.BaseTestCase.config_parse() app = config.load_paste_app('extensions_test_app') return app def setup_extensions_middleware(extension_manager=None): extension_manager = (extension_manager or extensions.PluginAwareExtensionManager( extensions_path, {lib_const.CORE: FakePluginWithExtension()})) base.BaseTestCase.config_parse() app = config.load_paste_app('extensions_test_app') return extensions.ExtensionMiddleware(app, ext_mgr=extension_manager) def _setup_extensions_test_app(extension_manager=None): return webtest.TestApp(setup_extensions_middleware(extension_manager)) class SimpleExtensionManager(object): def __init__(self, resource_ext=None, action_ext=None, request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext self.request_ext = request_ext def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) return resource_exts def get_actions(self): action_exts = [] if self.action_ext: action_exts.append(self.action_ext) return action_exts def get_request_extensions(self): request_extensions = [] if self.request_ext: request_extensions.append(self.request_ext) return request_extensions class ExtensionExtendedAttributeTestPlugin(object): supported_extension_aliases = [ 'ext-obj-test', "extended-ext-attr" ] def __init__(self, configfile=None): super(ExtensionExtendedAttributeTestPlugin, self) self.objs = [] self.objh = {} def create_ext_test_resource(self, context, ext_test_resource): obj = ext_test_resource['ext_test_resource'] id = _uuid() obj['id'] = id self.objs.append(obj) self.objh.update({id: obj}) return obj def get_ext_test_resources(self, context, filters=None, fields=None): return self.objs def get_ext_test_resource(self, context, id, fields=None): return self.objh[id] class ExtensionExtendedAttributeTestCase(base.BaseTestCase): def setUp(self): super(ExtensionExtendedAttributeTestCase, self).setUp() plugin = ( "neutron.tests.unit.api.test_extensions." "ExtensionExtendedAttributeTestPlugin" ) # point config file to: neutron/tests/etc/neutron.conf self.config_parse() self.setup_coreplugin(plugin) ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {lib_const.CORE: ExtensionExtendedAttributeTestPlugin()} ) ext_mgr.extend_resources("2.0", {}) extensions.PluginAwareExtensionManager._instance = ext_mgr app = config.load_paste_app('extensions_test_app') self._api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" self.agentscheduler_dbMinxin = directory.get_plugin() quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def _do_request(self, method, path, data=None, params=None, action=None): content_type = 'application/json' body = None if data is not None: # empty dict is valid body = wsgi.Serializer().serialize(data, content_type) req = testlib_api.create_request( path, body, content_type, method, query_string=params) res = req.get_response(self._api) if res.status_code >= 400: raise webexc.HTTPClientError(detail=res.body, code=res.status_code) if res.status_code != webexc.HTTPNoContent.code: return res.json def _ext_test_resource_create(self, attr=None): data = { "ext_test_resource": { "tenant_id": self._tenant_id, "name": "test", extattr.EXTENDED_ATTRIBUTE: attr } } res = self._do_request('POST', _get_path('ext_test_resources'), data) return res['ext_test_resource'] def test_ext_test_resource_create(self): ext_test_resource = self._ext_test_resource_create() attr = _uuid() ext_test_resource = self._ext_test_resource_create(attr) self.assertEqual(ext_test_resource[extattr.EXTENDED_ATTRIBUTE], attr) def test_ext_test_resource_get(self): attr = _uuid() obj = self._ext_test_resource_create(attr) obj_id = obj['id'] res = self._do_request('GET', _get_path( 'ext_test_resources/{0}'.format(obj_id))) obj2 = res['ext_test_resource'] self.assertEqual(obj2[extattr.EXTENDED_ATTRIBUTE], attr) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/v2/0000755000175000017500000000000000000000000022056 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/v2/__init__.py0000644000175000017500000000000000000000000024155 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/v2/test_base.py0000644000175000017500000021530300000000000024405 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from neutron_lib.api import attributes from neutron_lib.api import converters from neutron_lib.api.definitions import empty_string_filtering from neutron_lib.api.definitions import filter_validation from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib import fixture from neutron_lib.plugins import directory from neutron_lib.tests.unit import fake_notifier from oslo_config import cfg from oslo_db import exception as db_exc from oslo_policy import policy as oslo_policy from oslo_utils import uuidutils import six from six.moves import urllib import webob from webob import exc import webtest from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import base as v2_base from neutron.api.v2 import router from neutron import policy from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import dummy_plugin from neutron.tests.unit import testlib_api EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions') _uuid = uuidutils.generate_uuid def _get_path(resource, id=None, action=None, fmt=None, endpoint=None): path = '/%s' % resource if id is not None: path = path + '/%s' % id if action is not None: path = path + '/%s' % action if endpoint is not None: path = path + '/%s' % endpoint if fmt is not None: path = path + '.%s' % fmt return path class APIv2TestBase(base.BaseTestCase): def setUp(self): super(APIv2TestBase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None # Create the default configurations self.config_parse() # Update the plugin self.setup_coreplugin(plugin, load_plugins=False) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() instance = self.plugin.return_value instance.supported_extension_aliases = [empty_string_filtering.ALIAS, filter_validation.ALIAS] instance._NeutronPluginBaseV2__native_pagination_support = True instance._NeutronPluginBaseV2__native_sorting_support = True instance._NeutronPluginBaseV2__filter_validation_support = True tools.make_mock_plugin_json_encodable(instance) api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') # APIRouter initialization resets policy module, re-initializing it policy.init() class _ArgMatcher(object): """An adapter to assist mock assertions, used to custom compare.""" def __init__(self, cmp, obj): self.cmp = cmp self.obj = obj def __eq__(self, other): return self.cmp(self.obj, other) def _list_cmp(l1, l2): return set(l1) == set(l2) class APIv2TestCase(APIv2TestBase): @staticmethod def _get_policy_attrs(attr_info): policy_attrs = {name for (name, info) in attr_info.items() if info.get('required_by_policy')} if 'tenant_id' in policy_attrs: policy_attrs.add('project_id') return sorted(policy_attrs) def _do_field_list(self, resource, base_fields): attr_info = attributes.RESOURCES[resource] policy_attrs = self._get_policy_attrs(attr_info) for name, info in attr_info.items(): if info.get('primary_key'): policy_attrs.append(name) fields = base_fields fields.extend(policy_attrs) return fields def _get_collection_kwargs(self, skipargs=None, **kwargs): skipargs = skipargs or [] args_list = ['filters', 'fields', 'sorts', 'limit', 'marker', 'page_reverse'] args_dict = dict( (arg, mock.ANY) for arg in set(args_list) - set(skipargs)) args_dict.update(kwargs) return args_dict def test_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': 'foo'}) fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['bar', 'foo']) self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] fields = self._do_field_list('networks', ['foo']) self.api.get(_get_path('networks'), {'fields': ['foo', '']}) kwargs = self._get_collection_kwargs(fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ''}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_fields_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'fields': ['', '']}) kwargs = self._get_collection_kwargs(fields=[]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar'}) filters = {'name': ['bar']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ''}) filters = {'name': ['']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['', '']}) filters = {'name': ['', '']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_with_empty(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', '']}) filters = {'name': ['bar', '']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple_values(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']}) filters = {'name': ['bar', 'bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_multiple(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'tenant_id': 'bar2'}) filters = {'name': ['bar'], 'tenant_id': ['bar2']} kwargs = self._get_collection_kwargs(filters=filters) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_fields(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'}) filters = {'name': ['bar']} fields = self._do_field_list('networks', ['foo']) kwargs = self._get_collection_kwargs(filters=filters, fields=fields) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'admin_state_up': 'true'}) filters = {'admin_state_up': [True]} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_filters_with_convert_list_to(self): instance = self.plugin.return_value instance.get_ports.return_value = [] self.api.get(_get_path('ports'), {'fixed_ips': ['ip_address=foo', 'subnet_id=bar']}) filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}} kwargs = self._get_collection_kwargs(filters=filters) instance.get_ports.assert_called_once_with(mock.ANY, **kwargs) def test_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '10'}) kwargs = self._get_collection_kwargs(limit=10) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_great_than_max_limit(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '1001'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_zero(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'limit': '0'}) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_unspecific(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=1000) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_value(self): cfg.CONF.set_default('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': -1}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_limit_with_non_integer(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'limit': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) self.assertIn('abc', res) def test_limit_with_infinite_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_override('pagination_max_limit', 'Infinite') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_negative_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', '-1') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_limit_with_non_integer_pagination_max_limit(self): instance = self.plugin.return_value instance.get_networks.return_value = [] cfg.CONF.set_default('pagination_max_limit', 'abc') self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(limit=None) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_marker(self): cfg.CONF.set_override('pagination_max_limit', '1000') instance = self.plugin.return_value instance.get_networks.return_value = [] marker = _uuid() self.api.get(_get_path('networks'), {'marker': marker}) kwargs = self._get_collection_kwargs(limit=1000, marker=marker) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse(self): calls = [] instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'True'}) kwargs = self._get_collection_kwargs(page_reverse=True) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) instance.get_networks.reset_mock() self.api.get(_get_path('networks'), {'page_reverse': 'False'}) kwargs = self._get_collection_kwargs(page_reverse=False) calls.append(mock.call.get_networks(mock.ANY, **kwargs)) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_non_bool(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'page_reverse': 'abc'}) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_page_reverse_with_unspecific(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks')) kwargs = self._get_collection_kwargs(page_reverse=False) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', True)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_with_primary_key(self): instance = self.plugin.return_value instance.get_networks.return_value = [] self.api.get(_get_path('networks'), {'sort_key': ['name', 'admin_state_up', 'id'], 'sort_dir': ['desc', 'asc', 'desc']}) kwargs = self._get_collection_kwargs(sorts=[('name', False), ('admin_state_up', True), ('id', False)]) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_sort_without_direction(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': ['name']}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_attribute(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'abc', 'sort_dir': 'asc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_sort_with_invalid_dirs(self): instance = self.plugin.return_value instance.get_networks.return_value = [] res = self.api.get(_get_path('networks'), {'sort_key': 'name', 'sort_dir': 'abc'}, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_emulated_sort(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_sort_without_sort_field(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance._NeutronPluginBaseV2__native_sorting_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'sort_key': ['name', 'status'], 'sort_dir': ['desc', 'asc'], 'fields': ['subnets']}) kwargs = self._get_collection_kwargs( skipargs=['sorts', 'limit', 'marker', 'page_reverse'], fields=_ArgMatcher(_list_cmp, ['name', 'status', 'id', 'subnets', 'shared', 'project_id', 'tenant_id'])) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_emulated_pagination(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_pagination_support = False instance.get_networks.return_value = [] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks'), {'limit': 10, 'marker': 'foo', 'page_reverse': False}) kwargs = self._get_collection_kwargs(skipargs=['limit', 'marker', 'page_reverse']) instance.get_networks.assert_called_once_with(mock.ANY, **kwargs) def test_native_pagination_without_native_sorting(self): instance = self.plugin.return_value instance._NeutronPluginBaseV2__native_sorting_support = False self.assertRaises(n_exc.Invalid, router.APIRouter) # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): def _test_list(self, req_tenant_id, real_tenant_id): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} input_dict = {'id': uuidutils.generate_uuid(), 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': real_tenant_id, 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value res = self.api.get(_get_path('networks', fmt=self.fmt), extra_environ=env) res = self.deserialize(res) self.assertIn('networks', res) if not req_tenant_id or req_tenant_id == real_tenant_id: # expect full list returned self.assertEqual(1, len(res['networks'])) output_dict = res['networks'][0] input_dict['shared'] = False self.assertEqual(len(input_dict), len(output_dict)) for k, v in input_dict.items(): self.assertEqual(v, output_dict[k]) else: # expect no results self.assertEqual(0, len(res['networks'])) def test_list_noauth(self): self._test_list(None, _uuid()) def test_list_keystone(self): tenant_id = _uuid() self._test_list(tenant_id, tenant_id) def test_list_keystone_bad(self): tenant_id = _uuid() self._test_list(tenant_id + "bad", tenant_id) def test_list_pagination(self): id1 = str(_uuid()) id2 = str(_uuid()) input_dict1 = {'id': id1, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} input_dict2 = {'id': id2, 'name': 'net2', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict1, input_dict2] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'sort_key': ['name'], 'sort_dir': ['asc']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(2, len(res['networks'])) self.assertEqual(sorted([id1, id2]), sorted([res['networks'][0]['id'], res['networks'][1]['id']])) self.assertIn('networks_links', res) next_links = [] previous_links = [] for r in res['networks_links']: if r['rel'] == 'next': next_links.append(r) if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(next_links)) self.assertEqual(1, len(previous_links)) url = urllib.parse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id2] self.assertEqual(params, urllib.parse.parse_qs(url.query)) url = urllib.parse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) params['marker'] = [id1] params['page_reverse'] = ['True'] self.assertEqual(params, urllib.parse.parse_qs(url.query)) def test_list_pagination_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(1, len(res['networks'])) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) previous_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urllib.parse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() expect_params['marker'] = [id] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urllib.parse.parse_qs(url.query)) def test_list_pagination_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': str(_uuid())} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) previous_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'next') if r['rel'] == 'previous': previous_links.append(r) self.assertEqual(1, len(previous_links)) url = urllib.parse.urlparse(previous_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] expect_params['page_reverse'] = ['True'] self.assertEqual(expect_params, urllib.parse.parse_qs(url.query)) def test_list_pagination_reverse_with_last_page(self): id = str(_uuid()) input_dict = {'id': id, 'name': 'net1', 'admin_state_up': True, 'status': "ACTIVE", 'tenant_id': '', 'shared': False, 'subnets': []} return_value = [input_dict] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual(len(res['networks']), 1) self.assertEqual(id, res['networks'][0]['id']) self.assertIn('networks_links', res) next_links = [] for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urllib.parse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expected_params = params.copy() del expected_params['page_reverse'] expected_params['marker'] = [id] self.assertEqual(expected_params, urllib.parse.parse_qs(url.query)) def test_list_pagination_reverse_with_empty_page(self): return_value = [] instance = self.plugin.return_value instance.get_networks.return_value = return_value params = {'limit': ['2'], 'marker': [str(_uuid())], 'page_reverse': ['True']} res = self.api.get(_get_path('networks'), params=params).json self.assertEqual([], res['networks']) next_links = [] if 'networks_links' in res: for r in res['networks_links']: self.assertNotEqual(r['rel'], 'previous') if r['rel'] == 'next': next_links.append(r) self.assertEqual(1, len(next_links)) url = urllib.parse.urlparse(next_links[0]['href']) self.assertEqual(url.path, _get_path('networks')) expect_params = params.copy() del expect_params['marker'] del expect_params['page_reverse'] self.assertEqual(expect_params, urllib.parse.parse_qs(url.query)) def test_create(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) def test_create_use_defaults(self): net_id = _uuid() tenant_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': tenant_id, 'project_id': tenant_id}} full_input = {'network': {'admin_state_up': True, 'shared': False}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertTrue(net['admin_state_up']) self.assertEqual("ACTIVE", net['status']) def test_create_no_keystone_env(self): data = {'name': 'net1'} self._test_create_failure_bad_request('networks', data) def test_create_with_keystone_env(self): tenant_id = _uuid() net_id = _uuid() env = {'neutron.context': context.Context('', tenant_id)} # tenant_id should be fetched from env initial_input = {'network': {'name': 'net1'}} full_input = {'network': {'admin_state_up': True, 'shared': False, 'tenant_id': tenant_id, 'project_id': tenant_id}} full_input['network'].update(initial_input['network']) return_value = {'id': net_id, 'status': "ACTIVE"} return_value.update(full_input['network']) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt, extra_environ=env) instance.create_network.assert_called_with(mock.ANY, network=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_create_bad_keystone_tenant(self): tenant_id = _uuid() data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} env = {'neutron.context': context.Context('', tenant_id + "bad")} self._test_create_failure_bad_request('networks', data, extra_environ=env) def test_create_no_body(self): data = {'whoa': None} self._test_create_failure_bad_request('networks', data) def test_create_body_string_not_json(self): data = 'a string' self._test_create_failure_bad_request('networks', data) def test_create_body_boolean_not_json(self): data = True self._test_create_failure_bad_request('networks', data) def test_create_no_resource(self): data = {} self._test_create_failure_bad_request('networks', data) def test_create_object_string_not_json(self): data = {'network': 'a string'} self._test_create_failure_bad_request('networks', data) def test_create_object_boolean_not_json(self): data = {'network': True} self._test_create_failure_bad_request('networks', data) def test_create_missing_attr(self): data = {'port': {'what': 'who', 'tenant_id': _uuid()}} self._test_create_failure_bad_request('ports', data) def test_create_readonly_attr(self): data = {'network': {'name': 'net1', 'tenant_id': _uuid(), 'status': "ACTIVE"}} self._test_create_failure_bad_request('networks', data) def test_create_with_too_long_name(self): data = {'network': {'name': "12345678" * 32, 'admin_state_up': True, 'tenant_id': _uuid()}} res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk(self): data = {'networks': [{'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}, {'name': 'net2', 'admin_state_up': True, 'tenant_id': _uuid()}]} def side_effect(context, network): net = network.copy() net['network'].update({'subnets': []}) return net['network'] instance = self.plugin.return_value instance.create_network.side_effect = side_effect instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) def _test_create_failure_bad_request(self, resource, data, **kwargs): res = self.api.post(_get_path(resource, fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True, **kwargs) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_bulk_networks_none(self): self._test_create_failure_bad_request('networks', {'networks': None}) def test_create_bulk_networks_empty_list(self): self._test_create_failure_bad_request('networks', {'networks': []}) def test_create_bulk_missing_attr(self): data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_bulk_partial_body(self): data = {'ports': [{'device_id': 'device_1', 'tenant_id': _uuid()}, {'tenant_id': _uuid()}]} self._test_create_failure_bad_request('ports', data) def test_create_attr_not_specified(self): net_id = _uuid() tenant_id = _uuid() device_id = _uuid() initial_input = {'port': {'name': '', 'network_id': net_id, 'tenant_id': tenant_id, 'project_id': tenant_id, 'device_id': device_id, 'admin_state_up': True}} full_input = {'port': {'admin_state_up': True, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'device_owner': ''}} full_input['port'].update(initial_input['port']) return_value = {'id': _uuid(), 'status': 'ACTIVE', 'admin_state_up': True, 'mac_address': 'ca:fe:de:ad:be:ef', 'device_id': device_id, 'device_owner': ''} return_value.update(initial_input['port']) instance = self.plugin.return_value instance.get_network.return_value = { 'tenant_id': six.text_type(tenant_id) } instance.get_ports_count.return_value = 1 instance.create_port.return_value = return_value res = self.api.post(_get_path('ports', fmt=self.fmt), self.serialize(initial_input), content_type='application/' + self.fmt) instance.create_port.assert_called_with(mock.ANY, port=full_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('port', res) port = res['port'] self.assertEqual(net_id, port['network_id']) self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address']) def test_create_return_extra_attr(self): net_id = _uuid() data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post(_get_path('networks', fmt=self.fmt), self.serialize(data), content_type='application/' + self.fmt) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('network', res) net = res['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertNotIn('v2attrs:something', net) def test_fields(self): return_value = {'name': 'net1', 'admin_state_up': True, 'subnets': []} instance = self.plugin.return_value instance.get_network.return_value = return_value self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt)) def _test_delete(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.delete_network.return_value = None res = self.api.delete(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) def test_delete_noauth(self): self._test_delete(None, _uuid(), exc.HTTPNoContent.code) def test_delete_keystone(self): tenant_id = _uuid() self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code) def test_delete_keystone_bad_tenant(self): tenant_id = _uuid() self._test_delete(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def _test_get(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} shared = False if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} if req_tenant_id.endswith('another'): shared = True env['neutron.context'].roles = ['tenant_admin'] data = {'tenant_id': real_tenant_id, 'shared': shared} instance = self.plugin.return_value instance.get_network.return_value = data res = self.api.get(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), extra_environ=env, expect_errors=expect_errors) self.assertEqual(expected_code, res.status_int) return res def test_get_noauth(self): self._test_get(None, _uuid(), 200) def test_get_keystone(self): tenant_id = _uuid() self._test_get(tenant_id, tenant_id, 200) def test_get_keystone_bad_tenant(self): tenant_id = _uuid() self._test_get(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_get_keystone_shared_network(self): tenant_id = _uuid() self._test_get(tenant_id + "another", tenant_id, 200) def test_get_keystone_strip_admin_only_attribute(self): tenant_id = _uuid() # Inject rule in policy engine rules = oslo_policy.Rules.from_dict( {'get_network:name': "rule:admin_only"}) policy.set_rules(rules, overwrite=False) res = self._test_get(tenant_id, tenant_id, 200) res = self.deserialize(res) self.assertNotIn('name', res['network']) def _test_update(self, req_tenant_id, real_tenant_id, expected_code, expect_errors=False): env = {} if req_tenant_id: env = {'neutron.context': context.Context('', req_tenant_id)} # leave out 'name' field intentionally data = {'network': {'admin_state_up': True}} return_value = {'subnets': []} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': real_tenant_id, 'shared': False} instance.update_network.return_value = return_value res = self.api.put(_get_path('networks', id=uuidutils.generate_uuid(), fmt=self.fmt), self.serialize(data), extra_environ=env, expect_errors=expect_errors) # Ensure id attribute is included in fields returned by GET call # in update procedure. self.assertEqual(1, instance.get_network.call_count) self.assertIn('id', instance.get_network.call_args[1]['fields']) self.assertEqual(res.status_int, expected_code) def test_update_noauth(self): self._test_update(None, _uuid(), 200) def test_update_keystone(self): tenant_id = _uuid() self._test_update(tenant_id, tenant_id, 200) def test_update_keystone_bad_tenant(self): tenant_id = _uuid() self._test_update(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) def test_update_keystone_no_tenant(self): tenant_id = _uuid() self._test_update(tenant_id, None, exc.HTTPNotFound.code, expect_errors=True) def test_update_readonly_field(self): data = {'network': {'status': "NANANA"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_invalid_attribute_field(self): data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}} res = self.api.put(_get_path('networks', id=_uuid()), self.serialize(data), content_type='application/' + self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_retry_on_index(self): instance = self.plugin.return_value instance.get_networks.side_effect = [db_exc.RetryRequest(None), []] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', fmt=self.fmt)) self.assertTrue(instance.get_networks.called) def test_retry_on_show(self): instance = self.plugin.return_value instance.get_network.side_effect = [db_exc.RetryRequest(None), {}] api = webtest.TestApp(router.APIRouter()) api.get(_get_path('networks', _uuid(), fmt=self.fmt)) self.assertTrue(instance.get_network.called) class SubresourceTest(base.BaseTestCase): def setUp(self): super(SubresourceTest, self).setUp() raise self.skipException('this class will be deleted') plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin' extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) self.config_parse() self.setup_coreplugin(plugin, load_plugins=False) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() api = router.APIRouter() SUB_RESOURCES = {} RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCES[dummy_plugin.RESOURCE_NAME] = { 'collection_name': 'dummies', 'parent': {'collection_name': 'networks', 'member_name': 'network'} } RESOURCE_ATTRIBUTE_MAP['dummies'] = { 'foo': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True} } collection_name = SUB_RESOURCES[ dummy_plugin.RESOURCE_NAME].get('collection_name') resource_name = dummy_plugin.RESOURCE_NAME parent = SUB_RESOURCES[dummy_plugin.RESOURCE_NAME].get('parent') params = RESOURCE_ATTRIBUTE_MAP['dummies'] member_actions = {'mactions': 'GET'} _plugin = directory.get_plugin() controller = v2_base.create_resource(collection_name, resource_name, _plugin, params, member_actions=member_actions, parent=parent, allow_bulk=True, allow_pagination=True, allow_sorting=True) path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], parent['member_name'], collection_name) mapper_kwargs = dict(controller=controller, path_prefix=path_prefix) api.map.collection(collection_name, resource_name, **mapper_kwargs) api.map.resource(collection_name, collection_name, controller=controller, parent_resource=parent, member=member_actions) self.api = webtest.TestApp(api) def test_index_sub_resource(self): instance = self.plugin.return_value self.api.get('/networks/id1/dummies') instance.get_network_dummies.assert_called_once_with(mock.ANY, filters=mock.ANY, fields=mock.ANY, network_id='id1') def test_show_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.get_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', fields=mock.ANY) def test_create_sub_resource(self): instance = self.plugin.return_value tenant_id = _uuid() body = { dummy_plugin.RESOURCE_NAME: { 'foo': 'bar', 'tenant_id': tenant_id, 'project_id': tenant_id } } self.api.post_json('/networks/id1/dummies', body) instance.create_network_dummy.assert_called_once_with(mock.ANY, network_id='id1', dummy=body) def test_update_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() body = {dummy_plugin.RESOURCE_NAME: {'foo': 'bar'}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_update_subresource_to_none(self): instance = self.plugin.return_value dummy_id = _uuid() body = {dummy_plugin.RESOURCE_NAME: {}} self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id), body) instance.update_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1', dummy=body) def test_delete_sub_resource(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id)) instance.delete_network_dummy.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') def test_sub_resource_member_actions(self): instance = self.plugin.return_value dummy_id = _uuid() self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id, action='mactions')) instance.mactions.assert_called_once_with(mock.ANY, dummy_id, network_id='id1') # Note: since all resources use the same controller and validation # logic, we actually get really good coverage from testing just networks. class V2Views(base.BaseTestCase): def _view(self, keys, collection, resource): data = dict((key, 'value') for key in keys) data['fake'] = 'value' attr_info = attributes.RESOURCES[collection] controller = v2_base.Controller(None, collection, resource, attr_info) res = controller._view(context.get_admin_context(), data) self.assertNotIn('fake', res) for key in keys: self.assertIn(key, res) def test_network(self): keys = ('id', 'name', 'subnets', 'admin_state_up', 'status', 'tenant_id') self._view(keys, 'networks', 'network') def test_port(self): keys = ('id', 'network_id', 'mac_address', 'fixed_ips', 'device_id', 'admin_state_up', 'tenant_id', 'status') self._view(keys, 'ports', 'port') def test_subnet(self): keys = ('id', 'network_id', 'tenant_id', 'gateway_ip', 'ip_version', 'cidr', 'enable_dhcp') self._view(keys, 'subnets', 'subnet') class NotificationTest(APIv2TestBase): def setUp(self): super(NotificationTest, self).setUp() fake_notifier.reset() def _resource_op_notifier(self, opname, resource, expected_errors=False): initial_input = {resource: {'name': 'myname'}} instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code if opname == 'create': initial_input[resource]['tenant_id'] = _uuid() res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=expected_errors) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input, expect_errors=expected_errors) expected_code = exc.HTTPOk.code if opname == 'delete': initial_input[resource]['tenant_id'] = _uuid() res = self.api.delete( _get_path('networks', id=_uuid()), expect_errors=expected_errors) expected_code = exc.HTTPNoContent.code expected_events = ('.'.join([resource, opname, "start"]), '.'.join([resource, opname, "end"])) self.assertEqual(len(expected_events), len(fake_notifier.NOTIFICATIONS)) for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): self.assertEqual('INFO', msg['priority']) self.assertEqual(event, msg['event_type']) if opname == 'delete' and event == 'network.delete.end': self.assertIn('payload', msg) resource = msg['payload'] self.assertIn('network_id', resource) self.assertIn('network', resource) self.assertEqual(expected_code, res.status_int) def test_network_create_notifer(self): self._resource_op_notifier('create', 'network') def test_network_delete_notifer(self): self._resource_op_notifier('delete', 'network') def test_network_update_notifer(self): self._resource_op_notifier('update', 'network') class RegistryNotificationTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(RegistryNotificationTest, self).setUp() def _test_registry_notify(self, opname, resource, initial_input=None): instance = self.plugin.return_value instance.get_networks.return_value = initial_input instance.get_networks_count.return_value = 0 expected_code = exc.HTTPCreated.code with mock.patch.object(registry, 'publish') as notify: if opname == 'create': res = self.api.post_json( _get_path('networks'), initial_input) if opname == 'update': res = self.api.put_json( _get_path('networks', id=_uuid()), initial_input) expected_code = exc.HTTPOk.code if opname == 'delete': res = self.api.delete(_get_path('networks', id=_uuid())) expected_code = exc.HTTPNoContent.code self.assertTrue(notify.called) self.assertEqual(expected_code, res.status_int) def test_network_create_registry_notify(self): input = {'network': {'name': 'net', 'tenant_id': _uuid()}} self._test_registry_notify('create', 'network', input) def test_network_delete_registry_notify(self): self._test_registry_notify('delete', 'network') def test_network_update_registry_notify(self): input = {'network': {'name': 'net'}} self._test_registry_notify('update', 'network', input) def test_networks_create_bulk_registry_notify(self): input = {'networks': [{'name': 'net1', 'tenant_id': _uuid()}, {'name': 'net2', 'tenant_id': _uuid()}]} self._test_registry_notify('create', 'network', input) class QuotaTest(APIv2TestBase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(QuotaTest, self).setUp() # Use mock to let the API use a different QuotaEngine instance for # unit test in this class. This will ensure resource are registered # again and instantiated with neutron.quota.resource.CountableResource replacement_registry = resource_registry.ResourceRegistry() registry_patcher = mock.patch('neutron.quota.resource_registry.' 'ResourceRegistry.get_instance') mock_registry = registry_patcher.start().return_value mock_registry.get_resource = replacement_registry.get_resource mock_registry.resources = replacement_registry.resources # Register a resource replacement_registry.register_resource_by_name('network') def test_create_network_quota(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.return_value = 1 res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_no_counts(self): cfg.CONF.set_override('quota_network', 1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} full_input = {'network': {'admin_state_up': True, 'subnets': []}} full_input['network'].update(initial_input['network']) instance = self.plugin.return_value instance.get_networks_count.side_effect = ( NotImplementedError()) instance.get_networks.return_value = ["foo"] res = self.api.post_json( _get_path('networks'), initial_input, expect_errors=True) instance.get_networks_count.assert_called_with(mock.ANY, filters=mock.ANY) self.assertIn("Quota exceeded for resources", res.json['NeutronError']['message']) def test_create_network_quota_without_limit(self): cfg.CONF.set_override('quota_network', -1, group='QUOTAS') initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}} instance = self.plugin.return_value instance.get_networks_count.return_value = 3 res = self.api.post_json( _get_path('networks'), initial_input) self.assertEqual(exc.HTTPCreated.code, res.status_int) class ExtensionTestCase(base.BaseTestCase): def setUp(self): # This test does not have database support so tracking cannot be used cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') super(ExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin(plugin, load_plugins=False) cfg.CONF.set_override('api_extensions_path', EXTDIR) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Instantiate mock plugin and enable the V2attributes extension self.plugin.return_value.supported_extension_aliases = ["v2attrs"] api = router.APIRouter() self.api = webtest.TestApp(api) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def test_extended_create(self): net_id = _uuid() tenant_id = _uuid() initial_input = {'network': {'name': 'net1', 'tenant_id': tenant_id, 'project_id': tenant_id, 'v2attrs:something_else': "abc"}} data = {'network': {'admin_state_up': True, 'shared': False}} data['network'].update(initial_input['network']) return_value = {'subnets': [], 'status': "ACTIVE", 'id': net_id, 'v2attrs:something': "123"} return_value.update(data['network'].copy()) instance = self.plugin.return_value instance.create_network.return_value = return_value instance.get_networks_count.return_value = 0 res = self.api.post_json(_get_path('networks'), initial_input) instance.create_network.assert_called_with(mock.ANY, network=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) self.assertIn('network', res.json) net = res.json['network'] self.assertEqual(net_id, net['id']) self.assertEqual("ACTIVE", net['status']) self.assertEqual("123", net['v2attrs:something']) self.assertNotIn('v2attrs:something_else', net) class TestSubresourcePlugin(object): def get_network_dummies(self, context, network_id, filters=None, fields=None): return [] def get_network_dummy(self, context, id, network_id, fields=None): return {} def create_network_dummy(self, context, network_id, dummy): return {} def update_network_dummy(self, context, id, network_id, dummy): return {} def delete_network_dummy(self, context, id, network_id): return def mactions(self, context, id, network_id): return class ListArgsTestCase(base.BaseTestCase): def test_list_args(self): path = '/?fields=4&foo=3&fields=2&bar=1' request = webob.Request.blank(path) expect_val = ['2', '4'] actual_val = api_common.list_args(request, 'fields') self.assertEqual(expect_val, sorted(actual_val)) def test_list_args_with_empty(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) self.assertEqual([], api_common.list_args(request, 'fields')) class SortingTestCase(base.BaseTestCase): def test_get_sorts(self): path = '/?sort_key=foo&sort_dir=desc&sort_key=bar&sort_dir=asc' request = webob.Request.blank(path) attr_info = { 'foo': {'key': 'val', 'is_sort_key': True}, 'bar': {'key': 'val', 'is_sort_key': True} } expect_val = [('foo', False), ('bar', True)] actual_val = api_common.get_sorts(request, attr_info) self.assertEqual(expect_val, actual_val) def test_get_sorts_with_project_id(self): path = '/?sort_key=project_id&sort_dir=desc' request = webob.Request.blank(path) attr_info = {'tenant_id': {'key': 'val', 'is_sort_key': True}} expect_val = [('project_id', False)] actual_val = api_common.get_sorts(request, attr_info) self.assertEqual(expect_val, actual_val) def test_get_sorts_with_non_sort_key(self): path = '/?sort_key=created_at&sort_dir=desc' request = webob.Request.blank(path) attr_info = { 'foo': {'key': 'val', 'is_sort_key': True}, 'bar': {'key': 'val', 'is_sort_key': True}, 'created_at': {'key': 'val'} } self.assertRaises(exc.HTTPBadRequest, api_common.get_sorts, request, attr_info) class FiltersTestCase(base.BaseTestCase): def test_all_skip_args(self): path = '/?fields=4&fields=3&fields=2&fields=1' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, {}, ["fields"])) @mock.patch('neutron.api.api_common.is_empty_string_filtering_supported', return_value=False) def test_blank_values(self, mock_is_supported): path = '/?foo=&bar=&baz=&qux=' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, {})) @mock.patch('neutron.api.api_common.is_empty_string_filtering_supported', return_value=True) def test_blank_values_with_filtering_supported(self, mock_is_supported): path = '/?foo=&bar=&baz=&qux=' request = webob.Request.blank(path) self.assertEqual({'foo': [''], 'bar': [''], 'baz': [''], 'qux': ['']}, api_common.get_filters(request, {})) def test_no_attr_info(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, {}) self.assertEqual(expect_val, actual_val) def test_attr_info_with_project_info_populated(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'tenant_id': {'key': 'val'}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) expect_attr_info = {'tenant_id': {'key': 'val'}, 'project_id': {'key': 'val'}} self.assertEqual(expect_attr_info, attr_info) @mock.patch('neutron.api.api_common.is_filter_validation_enabled', return_value=True) def test_attr_info_with_filter_validation(self, mock_validation_enabled): attr_info = {} self._test_attr_info(attr_info) attr_info = {'foo': {}} self._test_attr_info(attr_info) attr_info = {'foo': {'is_filter': False}} self._test_attr_info(attr_info) attr_info = {'foo': {'is_filter': False}, 'bar': {'is_filter': True}, 'baz': {'is_filter': True}, 'qux': {'is_filter': True}} self._test_attr_info(attr_info) attr_info = {'foo': {'is_filter': True}, 'bar': {'is_filter': True}, 'baz': {'is_filter': True}, 'qux': {'is_filter': True}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} self._test_attr_info(attr_info, expect_val) attr_info = {'foo': {'is_filter': True}, 'bar': {'is_filter': True}, 'baz': {'is_filter': True}, 'qux': {'is_filter': True}, 'quz': {}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} self._test_attr_info(attr_info, expect_val) attr_info = {'foo': {'is_filter': True}, 'bar': {'is_filter': True}, 'baz': {'is_filter': True}, 'qux': {'is_filter': True}, 'quz': {'is_filter': False}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} self._test_attr_info(attr_info, expect_val) def _test_attr_info(self, attr_info, expect_val=None): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) if expect_val: actual_val = api_common.get_filters( request, attr_info, is_filter_validation_supported=True) self.assertEqual(expect_val, actual_val) else: self.assertRaises( exc.HTTPBadRequest, api_common.get_filters, request, attr_info, is_filter_validation_supported=True) def test_attr_info_without_conversion(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'key': 'val'}} expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) def test_attr_info_with_convert_list_to(self): path = '/?foo=key=4&bar=3&foo=key=2&qux=1' request = webob.Request.blank(path) attr_info = { 'foo': { 'convert_list_to': converters.convert_kvp_list_to_dict, } } expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertOrderedEqual(expect_val, actual_val) def test_attr_info_with_convert_to(self): path = '/?foo=4&bar=3&baz=2&qux=1' request = webob.Request.blank(path) attr_info = {'foo': {'convert_to': converters.convert_to_int}} expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']} actual_val = api_common.get_filters(request, attr_info) self.assertEqual(expect_val, actual_val) def test_attr_info_with_base_db_attributes(self): path = '/?__contains__=1&__class__=2' request = webob.Request.blank(path) self.assertEqual({}, api_common.get_filters(request, {})) class CreateResourceTestCase(base.BaseTestCase): def test_resource_creation(self): resource = v2_base.create_resource('fakes', 'fake', None, {}) self.assertIsInstance(resource, webob.dec.wsgify) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/v2/test_resource.py0000644000175000017500000004025700000000000025326 0ustar00coreycorey00000000000000# Copyright (c) 2012 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import exceptions as n_exc import oslo_i18n from webob import exc import webtest from neutron._i18n import _ from neutron.api.v2 import resource as wsgi_resource from neutron.common import utils from neutron.tests import base from neutron import wsgi class RequestTestCase(base.BaseTestCase): def setUp(self): super(RequestTestCase, self).setUp() self.req = wsgi_resource.Request({'foo': 'bar'}) def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_with_partial_matched_string(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/j" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.json') request.headers["Accept"] = "application/xml" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_context_with_neutron_context(self): ctxt = context.Context('fake_user', 'fake_tenant') self.req.environ['neutron.context'] = ctxt self.assertEqual(self.req.context, ctxt) def test_context_without_neutron_context(self): self.assertTrue(self.req.context.is_admin) def test_request_context_elevated(self): user_context = context.Context( 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertNotIn('admin', user_context.roles) self.assertIn('admin', admin_context.roles) def test_best_match_language(self): # Test that we are actually invoking language negotiation by webop request = wsgi.Request.blank('/') oslo_i18n.get_available_languages = mock.MagicMock() oslo_i18n.get_available_languages.return_value = ['known-language', 'es', 'zh'] request.headers['Accept-Language'] = 'known-language' language = request.best_match_language() self.assertEqual('known-language', language) # If the Accept-Leader is an unknown language, missing or empty, # the best match locale should be None request.headers['Accept-Language'] = 'unknown-language' language = request.best_match_language() self.assertIsNone(language) request.headers['Accept-Language'] = '' language = request.best_match_language() self.assertIsNone(language) request.headers.pop('Accept-Language') language = request.best_match_language() self.assertIsNone(language) class ResourceTestCase(base.BaseTestCase): @staticmethod def _get_deserializer(): return wsgi.JSONDeserializer() def test_unmapped_neutron_error_with_json(self): msg = u'\u7f51\u7edc' class TestException(n_exc.NeutronException): message = msg expected_res = {'body': { 'NeutronError': { 'type': 'TestException', 'message': msg, 'detail': ''}}} controller = mock.MagicMock() controller.test.side_effect = TestException() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertEqual(expected_res, wsgi.JSONDeserializer().deserialize(res.body)) @mock.patch('oslo_i18n.translate') def test_unmapped_neutron_error_localized(self, mock_translation): msg_translation = 'Translated error' mock_translation.return_value = msg_translation msg = _('Unmapped error') class TestException(n_exc.NeutronException): message = msg controller = mock.MagicMock() controller.test.side_effect = TestException() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertIn(msg_translation, str(wsgi.JSONDeserializer().deserialize(res.body))) def test_mapped_neutron_error_with_json(self): msg = u'\u7f51\u7edc' class TestException(n_exc.NeutronException): message = msg expected_res = {'body': { 'NeutronError': { 'type': 'TestException', 'message': msg, 'detail': ''}}} controller = mock.MagicMock() controller.test.side_effect = TestException() faults = {TestException: exc.HTTPGatewayTimeout} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults=faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) self.assertEqual(expected_res, wsgi.JSONDeserializer().deserialize(res.body)) @mock.patch('oslo_i18n.translate') def test_mapped_neutron_error_localized(self, mock_translation): msg_translation = 'Translated error' mock_translation.return_value = msg_translation msg = _('Unmapped error') class TestException(n_exc.NeutronException): message = msg controller = mock.MagicMock() controller.test.side_effect = TestException() faults = {TestException: exc.HTTPGatewayTimeout} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults=faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test', 'format': 'json'})} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) self.assertIn(msg_translation, str(wsgi.JSONDeserializer().deserialize(res.body))) @staticmethod def _make_request_with_side_effect(side_effect): controller = mock.MagicMock() controller.test.side_effect = side_effect resource = webtest.TestApp(wsgi_resource.Resource(controller)) routing_args = {'action': 'test'} environ = {'wsgiorg.routing_args': (None, routing_args)} res = resource.get('', extra_environ=environ, expect_errors=True) return res def test_http_error(self): res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout()) # verify that the exception structure is the one expected # by the python-neutronclient self.assertEqual(exc.HTTPGatewayTimeout().explanation, res.json['NeutronError']['message']) self.assertEqual('HTTPGatewayTimeout', res.json['NeutronError']['type']) self.assertEqual('', res.json['NeutronError']['detail']) self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int) def test_unhandled_error(self): expected_res = {'body': {'NeutronError': {'detail': '', 'message': _( 'Request Failed: internal server ' 'error while processing your request.'), 'type': 'HTTPInternalServerError'}}} res = self._make_request_with_side_effect(side_effect=Exception()) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) self.assertEqual(expected_res, self._get_deserializer().deserialize(res.body)) def test_not_implemented_error(self): expected_res = {'body': {'NeutronError': {'detail': '', 'message': _( 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.'), 'type': 'HTTPNotImplemented'}}} res = self._make_request_with_side_effect(exc.HTTPNotImplemented()) self.assertEqual(exc.HTTPNotImplemented.code, res.status_int) self.assertEqual(expected_res, self._get_deserializer().deserialize(res.body)) def test_status_200(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} res = resource.get('', extra_environ=environ) self.assertEqual(200, res.status_int) def _test_unhandled_error_logs_details(self, e, expected_details): with mock.patch.object(wsgi_resource.LOG, 'exception') as log: self._make_request_with_side_effect(side_effect=e) log.assert_called_with( mock.ANY, {'action': mock.ANY, 'details': expected_details}) def test_unhandled_error_logs_attached_details(self): e = Exception() utils.attach_exc_details(e, 'attached_details') self._test_unhandled_error_logs_details(e, 'attached_details') def test_unhandled_error_logs_no_attached_details(self): e = Exception() self._test_unhandled_error_logs_details(e, 'No details.') def test_status_204(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})} res = resource.delete('', extra_environ=environ) self.assertEqual(204, res.status_int) def test_action_status(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} action_status = {'test_200': 200, 'test_201': 201, 'test_204': 204} resource = webtest.TestApp( wsgi_resource.Resource(controller, action_status=action_status)) for action in action_status: environ = {'wsgiorg.routing_args': (None, {'action': action})} res = resource.get('', extra_environ=environ) self.assertEqual(action_status[action], res.status_int) def _test_error_log_level(self, expected_webob_exc, expect_log_info=False, use_fault_map=True, exc_raised=None): if not exc_raised: class TestException(n_exc.NeutronException): message = 'Test Exception' exc_raised = TestException controller = mock.MagicMock() controller.test.side_effect = exc_raised() faults = {exc_raised: expected_webob_exc} if use_fault_map else {} resource = webtest.TestApp(wsgi_resource.Resource(controller, faults)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} with mock.patch.object(wsgi_resource, 'LOG') as log: res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(expected_webob_exc.code, res.status_int) self.assertEqual(expect_log_info, log.info.called) self.assertNotEqual(expect_log_info, log.exception.called) def test_4xx_error_logged_info_level(self): self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True) def test_non_4xx_error_logged_exception_level(self): self._test_error_log_level(exc.HTTPServiceUnavailable, expect_log_info=False) def test_unmapped_error_logged_exception_level(self): self._test_error_log_level(exc.HTTPInternalServerError, expect_log_info=False, use_fault_map=False) def test_webob_4xx_logged_info_level(self): self._test_error_log_level(exc.HTTPNotFound, use_fault_map=False, expect_log_info=True, exc_raised=exc.HTTPNotFound) def test_webob_5xx_logged_info_level(self): self._test_error_log_level(exc.HTTPServiceUnavailable, use_fault_map=False, expect_log_info=False, exc_raised=exc.HTTPServiceUnavailable) def test_no_route_args(self): controller = mock.MagicMock() resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {} res = resource.get('', extra_environ=environ, expect_errors=True) self.assertEqual(exc.HTTPInternalServerError.code, res.status_int) def test_post_with_body(self): controller = mock.MagicMock() controller.test = lambda request, body: {'foo': 'bar'} resource = webtest.TestApp(wsgi_resource.Resource(controller)) environ = {'wsgiorg.routing_args': (None, {'action': 'test'})} res = resource.post('', params='{"key": "val"}', extra_environ=environ) self.assertEqual(200, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/api/v2/test_router.py0000644000175000017500000000174600000000000025017 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.v2 import router from neutron.tests import base @mock.patch('neutron.api.v2.router.APIRouter.__init__', return_value=None) @mock.patch('neutron.pecan_wsgi.app.v2_factory') class TestRouter(base.BaseTestCase): def test_pecan_factory(self, pecan_mock, legacy_mock): router.APIRouter.factory({}) pecan_mock.assert_called_once_with({}) legacy_mock.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/0000755000175000017500000000000000000000000021521 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/__init__.py0000644000175000017500000000000000000000000023620 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/test_netns_cleanup.py0000644000175000017500000004172000000000000025774 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import signal import mock import testtools from neutron.cmd import netns_cleanup as util from neutron.tests import base NETSTAT_NETNS_OUTPUT = (""" Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State\ PID/Program name tcp 0 0 0.0.0.0:9697 0.0.0.0:* LISTEN\ 1347/python raw 0 0 0.0.0.0:112 0.0.0.0:* 7\ 1279/keepalived raw 0 0 0.0.0.0:112 0.0.0.0:* 7\ 1279/keepalived raw6 0 0 :::58 :::* 7\ 1349/radvd Active UNIX domain sockets (only servers) Proto RefCnt Flags Type State I-Node PID/Program name\ Path unix 2 [ ACC ] STREAM LISTENING 82039530 1353/python\ /tmp/rootwrap-VKSm8a/rootwrap.sock """) NETSTAT_NO_NAMESPACE = (""" Cannot open network namespace "qrouter-e6f206b2-4e8d-4597-a7e1-c3a20337e9c6":\ No such file or directory """) NETSTAT_NO_LISTEN_PROCS = (""" Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State\ PID/Program name Active UNIX domain sockets (only servers) Proto RefCnt Flags Type State I-Node PID/Program name\ Path """) class TestNetnsCleanup(base.BaseTestCase): def setUp(self): super(TestNetnsCleanup, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) def test_kill_dhcp(self, dhcp_active=True): conf = mock.Mock() conf.dhcp_driver = 'driver' method_to_patch = 'oslo_utils.importutils.import_object' with mock.patch(method_to_patch) as import_object: driver = mock.Mock() driver.active = dhcp_active import_object.return_value = driver util.kill_dhcp(conf, 'ns') expected_params = {'conf': conf, 'network': mock.ANY, 'process_monitor': mock.ANY, 'plugin': mock.ANY} import_object.assert_called_once_with('driver', **expected_params) if dhcp_active: driver.assert_has_calls([mock.call.disable()]) else: self.assertFalse(driver.called) def test_kill_dhcp_no_active(self): self.test_kill_dhcp(False) def test_eligible_for_deletion_ns_not_uuid(self): conf = mock.Mock() conf.agent_type = None ns = 'not_a_uuid' self.assertFalse(util.eligible_for_deletion(conf, ns)) def _test_eligible_for_deletion_helper(self, prefix, force, is_empty, expected): ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() conf.agent_type = None with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = is_empty self.assertEqual(expected, util.eligible_for_deletion(conf, ns, force)) expected_calls = [mock.call(namespace=ns)] if not force: expected_calls.append(mock.call().namespace_is_empty()) ip_wrap.assert_has_calls(expected_calls) def test_eligible_for_deletion_empty(self): self._test_eligible_for_deletion_helper('qrouter-', False, True, True) def test_eligible_for_deletion_not_empty(self): self._test_eligible_for_deletion_helper('qdhcp-', False, False, False) def test_eligible_for_deletion_not_empty_forced(self): self._test_eligible_for_deletion_helper('qdhcp-', True, False, True) def test_eligible_for_deletion_fip_namespace(self): self._test_eligible_for_deletion_helper('fip-', False, True, True) def test_eligible_for_deletion_snat_namespace(self): self._test_eligible_for_deletion_helper('snat-', False, True, True) def test_eligible_for_deletion_filtered_by_agent_type(self): ns_dhcp = 'qdhcp-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' ns_l3 = 'qrouter-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() conf.agent_type = 'dhcp' with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = True self.assertTrue(util.eligible_for_deletion(conf, ns_dhcp, False)) self.assertFalse(util.eligible_for_deletion(conf, ns_l3, False)) expected_calls = [mock.call(namespace=ns_dhcp), mock.call().namespace_is_empty()] ip_wrap.assert_has_calls(expected_calls) def test_unplug_device_regular_device(self): device = mock.Mock() util.unplug_device(device) device.assert_has_calls([mock.call.link.delete()]) def test_unplug_device_ovs_port(self): device = mock.Mock() device.name = 'tap1' device.link.delete.side_effect = RuntimeError with mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls: br_patch = mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface') with br_patch as mock_get_bridge_for_iface: mock_get_bridge_for_iface.return_value = 'br-int' ovs_bridge = mock.Mock() ovs_br_cls.return_value = ovs_bridge util.unplug_device(device) mock_get_bridge_for_iface.assert_called_once_with('tap1') ovs_br_cls.assert_called_once_with('br-int') ovs_bridge.assert_has_calls( [mock.call.delete_port(device.name)]) def test_unplug_device_cannot_determine_bridge_port(self): device = mock.Mock() device.name = 'tap1' device.link.delete.side_effect = RuntimeError with mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls: br_patch = mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface') with br_patch as mock_get_bridge_for_iface: with mock.patch.object(util.LOG, 'debug') as debug: mock_get_bridge_for_iface.return_value = None ovs_bridge = mock.Mock() ovs_br_cls.return_value = ovs_bridge util.unplug_device(device) mock_get_bridge_for_iface.assert_called_once_with('tap1') self.assertEqual([], ovs_br_cls.mock_calls) self.assertTrue(debug.called) def _test_find_listen_pids_namespace_helper(self, expected, netstat_output=None): with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.netns.execute.return_value = netstat_output observed = util.find_listen_pids_namespace(mock.ANY) self.assertEqual(expected, observed) def test_find_listen_pids_namespace_correct_output(self): expected = set(['1347', '1279', '1349', '1353']) self._test_find_listen_pids_namespace_helper(expected, NETSTAT_NETNS_OUTPUT) def test_find_listen_pids_namespace_no_procs(self): expected = set() self._test_find_listen_pids_namespace_helper(expected, NETSTAT_NO_LISTEN_PROCS) def test_find_listen_pids_namespace_no_namespace(self): expected = set() self._test_find_listen_pids_namespace_helper(expected, NETSTAT_NO_NAMESPACE) def _test__kill_listen_processes_helper(self, pids, parents, children, kills_expected, force): def _get_element(dct, x): return dct.get(x, []) def _find_childs(x, recursive): return _get_element(children, x) def _find_parent(x): return _get_element(parents, x) utils_mock = dict( find_fork_top_parent=mock.DEFAULT, find_child_pids=mock.DEFAULT, get_cmdline_from_pid=mock.DEFAULT, kill_process=mock.DEFAULT) self.log_mock = mock.patch.object(util, 'LOG').start() with mock.patch.multiple('neutron.agent.linux.utils', **utils_mock)\ as mocks: mocks['find_fork_top_parent'].side_effect = _find_parent mocks['find_child_pids'].side_effect = _find_childs with mock.patch.object(util, 'find_listen_pids_namespace', return_value=pids): calls = [] for pid, sig in kills_expected: calls.append(mock.call(pid, sig, run_as_root=True)) util._kill_listen_processes(mock.ANY, force=force) mocks['kill_process'].assert_has_calls(calls, any_order=True) def test__kill_listen_processes_only_parents_force_false(self): pids = ['4', '5', '6'] parents = {'4': '1', '5': '5', '6': '2'} children = {} kills_expected = [('1', signal.SIGTERM), ('5', signal.SIGTERM), ('2', signal.SIGTERM)] self._test__kill_listen_processes_helper(pids, parents, children, kills_expected, False) def test__kill_listen_processes_parents_and_childs(self): pids = ['4', '5', '6'] parents = {'4': '1', '5': '2', '6': '3'} children = {'1': ['4'], '2': ['5'], '3': ['6', '8', '7']} kills_expected = [(str(x), signal.SIGKILL) for x in range(1, 9)] self._test__kill_listen_processes_helper(pids, parents, children, kills_expected, True) def test_kill_listen_processes(self): with mock.patch.object(util, '_kill_listen_processes', return_value=1) as mock_kill_listen: with mock.patch.object(util, 'wait_until_no_listen_pids_namespace', side_effect=[util.PidsInNamespaceException, None]): namespace = mock.ANY util.kill_listen_processes(namespace) mock_kill_listen.assert_has_calls( [mock.call(namespace, force=False), mock.call(namespace, force=True)]) def test_kill_listen_processes_still_procs(self): with mock.patch.object(util, '_kill_listen_processes', return_value=1): with mock.patch.object(util, 'wait_until_no_listen_pids_namespace', side_effect=util.PidsInNamespaceException): namespace = mock.ANY with testtools.ExpectedException( util.PidsInNamespaceException): util.kill_listen_processes(namespace) def test_kill_listen_processes_no_procs(self): with mock.patch.object(util, '_kill_listen_processes', return_value=0) as mock_kill_listen: with mock.patch.object(util, 'wait_until_no_listen_pids_namespace')\ as wait_until_mock: namespace = mock.ANY util.kill_listen_processes(namespace) mock_kill_listen.assert_called_once_with(namespace, force=False) self.assertFalse(wait_until_mock.called) def _test_destroy_namespace_helper(self, force, num_devices): ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() lo_device = mock.Mock() lo_device.name = 'lo' devices = [lo_device] while num_devices: dev = mock.Mock() dev.name = 'tap%d' % num_devices devices.append(dev) num_devices -= 1 with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.get_devices.return_value = devices ip_wrap.return_value.netns.exists.return_value = True with mock.patch.object(util, 'kill_listen_processes'): with mock.patch.object(util, 'unplug_device') as unplug: with mock.patch.object(util, 'kill_dhcp') as kill_dhcp: util.destroy_namespace(conf, ns, force) expected = [mock.call(namespace=ns)] if force: expected.extend([ mock.call().netns.exists(ns), mock.call().get_devices()]) self.assertTrue(kill_dhcp.called) unplug.assert_has_calls( [mock.call(d) for d in devices[1:]]) expected.append( mock.call().garbage_collect_namespace()) ip_wrap.assert_has_calls(expected) def test_destroy_namespace_empty(self): self._test_destroy_namespace_helper(False, 0) def test_destroy_namespace_not_empty(self): self._test_destroy_namespace_helper(False, 1) def test_destroy_namespace_not_empty_forced(self): self._test_destroy_namespace_helper(True, 2) def test_destroy_namespace_exception(self): ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d' conf = mock.Mock() with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.side_effect = Exception() util.destroy_namespace(conf, ns) def test_main(self): namespaces = ['ns1', 'ns2'] with mock.patch('neutron.agent.linux.ip_lib.' 'list_network_namespaces') as listnetns: listnetns.return_value = namespaces with mock.patch('time.sleep') as time_sleep: conf = mock.Mock() conf.force = False methods_to_mock = dict( eligible_for_deletion=mock.DEFAULT, destroy_namespace=mock.DEFAULT, setup_conf=mock.DEFAULT) with mock.patch.multiple(util, **methods_to_mock) as mocks: mocks['eligible_for_deletion'].return_value = True mocks['setup_conf'].return_value = conf with mock.patch('neutron.common.config.setup_logging'): util.main() mocks['eligible_for_deletion'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) mocks['destroy_namespace'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) self.assertEqual(1, listnetns.call_count) time_sleep.assert_called_once_with(2) def test_main_no_candidates(self): namespaces = ['ns1', 'ns2'] with mock.patch('neutron.agent.linux.ip_lib.' 'list_network_namespaces') as listnetns: listnetns.return_value = namespaces with mock.patch('time.sleep') as time_sleep: conf = mock.Mock() conf.force = False methods_to_mock = dict( eligible_for_deletion=mock.DEFAULT, destroy_namespace=mock.DEFAULT, setup_conf=mock.DEFAULT) with mock.patch.multiple(util, **methods_to_mock) as mocks: mocks['eligible_for_deletion'].return_value = False mocks['setup_conf'].return_value = conf with mock.patch('neutron.common.config.setup_logging'): util.main() self.assertEqual(1, listnetns.call_count) mocks['eligible_for_deletion'].assert_has_calls( [mock.call(conf, 'ns1', False), mock.call(conf, 'ns2', False)]) self.assertFalse(mocks['destroy_namespace'].called) self.assertFalse(time_sleep.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/test_ovs_cleanup.py0000644000175000017500000000274000000000000025453 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.cmd import ovs_cleanup as util from neutron.tests import base class TestOVSCleanup(base.BaseTestCase): def test_clean_ovs_bridges(self): conf = mock.Mock() conf.ovs_all_ports = True conf.ovs_integration_bridge = 'br-int' conf.external_network_bridge = 'br-ex' bridges = [conf.ovs_integration_bridge, conf.external_network_bridge] with mock.patch('neutron.agent.common.ovs_lib.BaseOVS') as ovs_cls: ovs_base = mock.Mock() ovs_base.get_bridges.return_value = bridges ovs_cls.return_value = ovs_base util.do_main(conf) ovs_base.ovsdb.ovs_cleanup.assert_has_calls( [mock.call(conf.ovs_integration_bridge, True), mock.call(conf.external_network_bridge, True)], any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/test_sanity_check.py0000644000175000017500000000201500000000000025574 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.cmd import sanity_check from neutron.tests import base class TestSanityCheck(base.BaseTestCase): def test_setup_conf(self): # verify that configuration can be successfully imported with mock.patch.object(sanity_check.cfg, 'CONF', return_value=cfg.ConfigOpts()): sanity_check.setup_conf() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/test_status.py0000644000175000017500000000270000000000000024454 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.cmd import status from neutron.tests import base class TestUpgradeChecks(base.BaseTestCase): def test_load_checks(self): checks = [("test check", "test_check_method")] expected_checks = tuple(checks) checks_class_1 = mock.MagicMock() checks_class_1.entry_point.load()().get_checks.return_value = ( checks) checks_class_2 = mock.MagicMock() checks_class_2.entry_point.load()().get_checks.return_value = None with mock.patch( "neutron_lib.utils.runtime.NamespacedPlugins" ) as namespace_plugins_mock: namespace_plugins = namespace_plugins_mock.return_value namespace_plugins._extensions = { "tests": checks_class_1, "no-checks-class": checks_class_2} self.assertEqual(expected_checks, status.load_checks()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/upgrade_checks/0000755000175000017500000000000000000000000024470 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/upgrade_checks/__init__.py0000644000175000017500000000000000000000000026567 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/cmd/upgrade_checks/test_checks.py0000644000175000017500000001724600000000000027353 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_upgradecheck.upgradecheck import Code from neutron.cmd.upgrade_checks import checks from neutron.tests import base class TestChecks(base.BaseTestCase): def setUp(self): super(TestChecks, self).setUp() self.checks = checks.CoreChecks() def test_get_checks_list(self): self.assertIsInstance(self.checks.get_checks(), list) def test_worker_check_good(self): cfg.CONF.set_override("api_workers", 2) cfg.CONF.set_override("rpc_workers", 2) result = checks.CoreChecks.worker_count_check(mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) def test_worker_check_api_missing(self): cfg.CONF.set_override("api_workers", None) cfg.CONF.set_override("rpc_workers", 2) result = checks.CoreChecks.worker_count_check(mock.Mock()) self.assertEqual(Code.WARNING, result.code) def test_worker_check_rpc_missing(self): cfg.CONF.set_override("api_workers", 2) cfg.CONF.set_override("rpc_workers", None) result = checks.CoreChecks.worker_count_check(mock.Mock()) self.assertEqual(Code.WARNING, result.code) def test_worker_check_both_missing(self): cfg.CONF.set_override("api_workers", None) cfg.CONF.set_override("rpc_workers", None) result = checks.CoreChecks.worker_count_check(mock.Mock()) self.assertEqual(Code.WARNING, result.code) def test_external_network_bridge_check_good(self): agents = [ {'host': 'Host A', 'configurations': '{}'}, {'host': 'Host B', 'configurations': '{"external_network_bridge": ""}'} ] with mock.patch.object(checks, "get_l3_agents", return_value=agents): result = checks.CoreChecks.external_network_bridge_check( mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) def test_external_network_bridge_check_bad(self): agents = [ {'host': 'Host A', 'configurations': '{}'}, {'host': 'Host B', 'configurations': '{"external_network_bridge": "br-ex"}'}, {'host': 'Host C', 'configurations': '{"external_network_bridge": ""}'} ] with mock.patch.object(checks, "get_l3_agents", return_value=agents): result = checks.CoreChecks.external_network_bridge_check( mock.Mock()) self.assertEqual(Code.WARNING, result.code) self.assertIn('Host B', result.details) self.assertNotIn('Host A', result.details) self.assertNotIn('Host C', result.details) def test_gateway_external_network_check_good(self): agents = [ {'host': 'Host A', 'configurations': '{}'}, {'host': 'Host B', 'configurations': '{"gateway_external_network_id": ""}'} ] with mock.patch.object(checks, "get_l3_agents", return_value=agents): result = checks.CoreChecks.gateway_external_network_check( mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) def test_gateway_external_network_check_bad(self): agents = [ {'host': 'Host A', 'configurations': '{}'}, {'host': 'Host B', 'configurations': '{"gateway_external_network_id": "net-uuid"}'}, {'host': 'Host C', 'configurations': '{"gateway_external_network_id": ""}'} ] with mock.patch.object(checks, "get_l3_agents", return_value=agents): result = checks.CoreChecks.gateway_external_network_check( mock.Mock()) self.assertEqual(Code.WARNING, result.code) self.assertIn('Host B', result.details) self.assertNotIn('Host A', result.details) self.assertNotIn('Host C', result.details) def test_network_mtu_check_good(self): networks = [ {'id': 'net-uuid-a', 'mtu': 1500}, {'id': 'net-uuid-b', 'mtu': 1450} ] with mock.patch.object(checks, "get_networks", return_value=networks): result = checks.CoreChecks.network_mtu_check( mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) def test_network_mtu_check_bad(self): networks = [ {'id': 'net-uuid-a', 'mtu': None}, {'id': 'net-uuid-b', 'mtu': 1500}, ] with mock.patch.object(checks, "get_networks", return_value=networks): result = checks.CoreChecks.network_mtu_check( mock.Mock()) self.assertEqual(Code.WARNING, result.code) self.assertIn('net-uuid-a', result.details) self.assertNotIn('net-uuid-b', result.details) def test_ovn_db_revision_check_no_networking_ovn_installed(self): with mock.patch.object(checks, "table_exists", return_value=False),\ mock.patch.object( checks, "get_ovn_db_revisions") as get_ovn_db_revisions: result = checks.CoreChecks.ovn_db_revision_check(mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) get_ovn_db_revisions.assert_not_called() def test_ovn_db_revision_check_networking_ovn_latest_revision(self): revisions = [ checks.LAST_NETWORKING_OVN_EXPAND_HEAD, checks.LAST_NETWORKING_OVN_CONTRACT_HEAD] with mock.patch.object(checks, "table_exists", return_value=True),\ mock.patch.object( checks, "get_ovn_db_revisions", return_value=revisions) as get_ovn_db_revisions: result = checks.CoreChecks.ovn_db_revision_check(mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) get_ovn_db_revisions.assert_called_once_with() def test_ovn_db_revision_check_networking_ovn_not_latest_revision(self): revisions = ["some_older_revision"] with mock.patch.object(checks, "table_exists", return_value=True),\ mock.patch.object( checks, "get_ovn_db_revisions", return_value=revisions) as get_ovn_db_revisions: result = checks.CoreChecks.ovn_db_revision_check(mock.Mock()) self.assertEqual(Code.FAILURE, result.code) get_ovn_db_revisions.assert_called_once_with() def test_nic_switch_agent_min_kernel_check_no_nic_switch_agents(self): with mock.patch.object(checks, "get_nic_switch_agents", return_value=[]): result = checks.CoreChecks.nic_switch_agent_min_kernel_check( mock.Mock()) self.assertEqual(Code.SUCCESS, result.code) def test_nic_switch_agent_min_kernel_check(self): agents = [ {'host': 'Host A'}, {'host': 'Host B'} ] with mock.patch.object(checks, "get_nic_switch_agents", return_value=agents): result = checks.CoreChecks.nic_switch_agent_min_kernel_check( mock.Mock()) self.assertEqual(Code.WARNING, result.code) self.assertIn('Host A', result.details) self.assertIn('Host B', result.details) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/0000755000175000017500000000000000000000000022246 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/__init__.py0000644000175000017500000000000000000000000024345 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/moved_globals_code1.py0000644000175000017500000000200200000000000026502 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Used by test cases in test__deprecate.py """ from neutron.common import _deprecate from neutron.tests.unit.common import moved_globals_target # a has been moved to moved_globals_target.a b = 'barasingha' # c has been renamed to d d = 'capybara' # e has been moved to moved_globals_target.f g = 'gelada' _deprecate._moved_global('c', new_name='d') _deprecate._moved_global('e', new_name='f', new_module=moved_globals_target) _deprecate._MovedGlobals(moved_globals_target) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/moved_globals_code2.py0000644000175000017500000000140700000000000026513 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Used by test cases in test__deprecate.py """ from neutron.common import _deprecate from neutron.tests.unit.common import moved_globals_target global1 = 'foo' _deprecate._MovedGlobals(moved_globals_target) global2 = 'bar' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/moved_globals_target.py0000644000175000017500000000116100000000000027002 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Used by test cases in test__deprecate.py """ a = 'aardvark' f = 'echidna' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/ovn/0000755000175000017500000000000000000000000023050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/ovn/__init__.py0000644000175000017500000000000000000000000025147 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/ovn/test_acl.py0000644000175000017500000007607600000000000025240 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants as const from oslo_config import cfg from ovsdbapp.backend.ovs_idl import idlutils from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils from neutron.conf.agent import securitygroups_rpc from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import commands as cmd from neutron.tests import base from neutron.tests.unit import fake_resources as fakes class TestACLs(base.BaseTestCase): def setUp(self): super(TestACLs, self).setUp() self.driver = mock.Mock() self.driver._nb_ovn = fakes.FakeOvsdbNbOvnIdl() self.plugin = fakes.FakePlugin() self.admin_context = mock.Mock() self.fake_port = fakes.FakePort.create_one_port({ 'id': 'fake_port_id1', 'network_id': 'network_id1', 'fixed_ips': [{'subnet_id': 'subnet_id1', 'ip_address': '1.1.1.1'}], }).info() self.fake_subnet = fakes.FakeSubnet.create_one_subnet({ 'id': 'subnet_id1', 'ip_version': 4, 'cidr': '1.1.1.0/24', }).info() mock_row_by_value = mock.patch.object(idlutils, 'row_by_value') mock_row_by_value.start() self.addCleanup(mock_row_by_value.stop) mock_acl_columns_severity = mock.patch.object( ovn_acl, '_acl_columns_name_severity_supported', return_value=True) mock_acl_columns_severity.start() self.addCleanup(mock_acl_columns_severity.stop) securitygroups_rpc.register_securitygroups_opts() def test_drop_all_ip_traffic_for_port(self): acls = ovn_acl.drop_all_ip_traffic_for_port(self.fake_port) acl_to_lport = {'action': 'drop', 'direction': 'to-lport', 'external_ids': {'neutron:lport': self.fake_port['id']}, 'log': False, 'name': [], 'severity': [], 'lport': self.fake_port['id'], 'lswitch': 'neutron-network_id1', 'match': 'outport == "fake_port_id1" && ip', 'priority': 1001} acl_from_lport = {'action': 'drop', 'direction': 'from-lport', 'external_ids': {'neutron:lport': self.fake_port['id']}, 'log': False, 'name': [], 'severity': [], 'lport': self.fake_port['id'], 'lswitch': 'neutron-network_id1', 'match': 'inport == "fake_port_id1" && ip', 'priority': 1001} for acl in acls: if 'to-lport' in acl.values(): self.assertEqual(acl_to_lport, acl) if 'from-lport' in acl.values(): self.assertEqual(acl_from_lport, acl) def test_add_acl_dhcp(self): ovn_dhcp_acls = ovn_acl.add_acl_dhcp(self.fake_port, self.fake_subnet) other_dhcp_acls = ovn_acl.add_acl_dhcp(self.fake_port, self.fake_subnet, ovn_dhcp=False) expected_match_to_lport = ( 'outport == "%s" && ip4 && ip4.src == %s && udp && udp.src == 67 ' '&& udp.dst == 68') % (self.fake_port['id'], self.fake_subnet['cidr']) acl_to_lport = {'action': 'allow', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'fake_port_id1'}, 'log': False, 'name': [], 'severity': [], 'lport': 'fake_port_id1', 'lswitch': 'neutron-network_id1', 'match': expected_match_to_lport, 'priority': 1002} expected_match_from_lport = ( 'inport == "%s" && ip4 && ' 'ip4.dst == {255.255.255.255, %s} && ' 'udp && udp.src == 68 && udp.dst == 67' ) % (self.fake_port['id'], self.fake_subnet['cidr']) acl_from_lport = {'action': 'allow', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'fake_port_id1'}, 'log': False, 'name': [], 'severity': [], 'lport': 'fake_port_id1', 'lswitch': 'neutron-network_id1', 'match': expected_match_from_lport, 'priority': 1002} self.assertEqual(1, len(ovn_dhcp_acls)) self.assertEqual(acl_from_lport, ovn_dhcp_acls[0]) self.assertEqual(2, len(other_dhcp_acls)) for acl in other_dhcp_acls: if 'to-lport' in acl.values(): self.assertEqual(acl_to_lport, acl) if 'from-lport' in acl.values(): self.assertEqual(acl_from_lport, acl) def _test_add_sg_rule_acl_for_port(self, sg_rule, direction, match): port = {'id': 'port-id', 'network_id': 'network-id'} acl = ovn_acl.add_sg_rule_acl_for_port(port, sg_rule, match) self.assertEqual({'lswitch': 'neutron-network-id', 'lport': 'port-id', 'priority': ovn_const.ACL_PRIORITY_ALLOW, 'action': ovn_const.ACL_ACTION_ALLOW_RELATED, 'log': False, 'name': [], 'severity': [], 'direction': direction, 'match': match, 'external_ids': { 'neutron:lport': 'port-id', 'neutron:security_group_rule_id': 'sgr_id'}}, acl) def test_add_sg_rule_acl_for_port_remote_ip_prefix(self): sg_rule = {'id': 'sgr_id', 'direction': 'ingress', 'ethertype': 'IPv4', 'remote_group_id': None, 'remote_ip_prefix': '1.1.1.0/24', 'protocol': None} match = 'outport == "port-id" && ip4 && ip4.src == 1.1.1.0/24' self._test_add_sg_rule_acl_for_port(sg_rule, 'to-lport', match) sg_rule['direction'] = 'egress' match = 'inport == "port-id" && ip4 && ip4.dst == 1.1.1.0/24' self._test_add_sg_rule_acl_for_port(sg_rule, 'from-lport', match) def test_add_sg_rule_acl_for_port_remote_group(self): sg_rule = {'id': 'sgr_id', 'direction': 'ingress', 'ethertype': 'IPv4', 'remote_group_id': 'sg1', 'remote_ip_prefix': None, 'protocol': None} match = 'outport == "port-id" && ip4 && (ip4.src == 1.1.1.100' \ ' || ip4.src == 1.1.1.101' \ ' || ip4.src == 1.1.1.102)' self._test_add_sg_rule_acl_for_port(sg_rule, 'to-lport', match) sg_rule['direction'] = 'egress' match = 'inport == "port-id" && ip4 && (ip4.dst == 1.1.1.100' \ ' || ip4.dst == 1.1.1.101' \ ' || ip4.dst == 1.1.1.102)' self._test_add_sg_rule_acl_for_port(sg_rule, 'from-lport', match) def test__update_acls_compute_difference(self): lswitch_name = 'lswitch-1' port1 = {'id': 'port-id1', 'network_id': lswitch_name, 'fixed_ips': [{'subnet_id': 'subnet-id', 'ip_address': '1.1.1.101'}, {'subnet_id': 'subnet-id-v6', 'ip_address': '2001:0db8::1:0:0:1'}]} port2 = {'id': 'port-id2', 'network_id': lswitch_name, 'fixed_ips': [{'subnet_id': 'subnet-id', 'ip_address': '1.1.1.102'}, {'subnet_id': 'subnet-id-v6', 'ip_address': '2001:0db8::1:0:0:2'}]} ports = [port1, port2] # OLD ACLs, allow IPv4 communication aclport1_old1 = {'priority': 1002, 'direction': 'from-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip4 && (ip.src == %s)' % (port1['id'], port1['fixed_ips'][0]['ip_address'])} aclport1_old2 = {'priority': 1002, 'direction': 'from-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip6 && (ip.src == %s)' % (port1['id'], port1['fixed_ips'][1]['ip_address'])} aclport1_old3 = {'priority': 1002, 'direction': 'to-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'ip4 && (ip.src == %s)' % (port2['fixed_ips'][0]['ip_address'])} port1_acls_old = [aclport1_old1, aclport1_old2, aclport1_old3] aclport2_old1 = {'priority': 1002, 'direction': 'from-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip4 && (ip.src == %s)' % (port2['id'], port2['fixed_ips'][0]['ip_address'])} aclport2_old2 = {'priority': 1002, 'direction': 'from-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip6 && (ip.src == %s)' % (port2['id'], port2['fixed_ips'][1]['ip_address'])} aclport2_old3 = {'priority': 1002, 'direction': 'to-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'ip4 && (ip.src == %s)' % (port1['fixed_ips'][0]['ip_address'])} port2_acls_old = [aclport2_old1, aclport2_old2, aclport2_old3] acls_old_dict = {'%s' % (port1['id']): port1_acls_old, '%s' % (port2['id']): port2_acls_old} acl_obj_dict = {str(aclport1_old1): 'row1', str(aclport1_old2): 'row2', str(aclport1_old3): 'row3', str(aclport2_old1): 'row4', str(aclport2_old2): 'row5', str(aclport2_old3): 'row6'} # NEW ACLs, allow IPv6 communication aclport1_new1 = {'priority': 1002, 'direction': 'from-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip4 && (ip.src == %s)' % (port1['id'], port1['fixed_ips'][0]['ip_address'])} aclport1_new2 = {'priority': 1002, 'direction': 'from-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip6 && (ip.src == %s)' % (port1['id'], port1['fixed_ips'][1]['ip_address'])} aclport1_new3 = {'priority': 1002, 'direction': 'to-lport', 'lport': port1['id'], 'lswitch': lswitch_name, 'match': 'ip6 && (ip.src == %s)' % (port2['fixed_ips'][1]['ip_address'])} port1_acls_new = [aclport1_new1, aclport1_new2, aclport1_new3] aclport2_new1 = {'priority': 1002, 'direction': 'from-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip4 && (ip.src == %s)' % (port2['id'], port2['fixed_ips'][0]['ip_address'])} aclport2_new2 = {'priority': 1002, 'direction': 'from-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'inport == %s && ip6 && (ip.src == %s)' % (port2['id'], port2['fixed_ips'][1]['ip_address'])} aclport2_new3 = {'priority': 1002, 'direction': 'to-lport', 'lport': port2['id'], 'lswitch': lswitch_name, 'match': 'ip6 && (ip.src == %s)' % (port1['fixed_ips'][1]['ip_address'])} port2_acls_new = [aclport2_new1, aclport2_new2, aclport2_new3] acls_new_dict = {'%s' % (port1['id']): port1_acls_new, '%s' % (port2['id']): port2_acls_new} acls_new_dict_copy = copy.deepcopy(acls_new_dict) # Invoke _compute_acl_differences update_cmd = cmd.UpdateACLsCommand(self.driver._nb_ovn, [lswitch_name], iter(ports), acls_new_dict ) acl_dels, acl_adds =\ update_cmd._compute_acl_differences(iter(ports), acls_old_dict, acls_new_dict, acl_obj_dict) # Expected Difference (Sorted) acl_del_exp = {lswitch_name: ['row3', 'row6']} acl_adds_exp = {lswitch_name: [{'priority': 1002, 'direction': 'to-lport', 'match': 'ip6 && (ip.src == %s)' % (port2['fixed_ips'][1]['ip_address'])}, {'priority': 1002, 'direction': 'to-lport', 'match': 'ip6 && (ip.src == %s)' % (port1['fixed_ips'][1]['ip_address'])}]} self.assertEqual(acl_del_exp, acl_dels) self.assertEqual(acl_adds_exp, acl_adds) # make sure argument add_acl=False will take no affect in # need_compare=True scenario update_cmd_with_acl = cmd.UpdateACLsCommand(self.driver._nb_ovn, [lswitch_name], iter(ports), acls_new_dict_copy, need_compare=True, is_add_acl=False) new_acl_dels, new_acl_adds =\ update_cmd_with_acl._compute_acl_differences(iter(ports), acls_old_dict, acls_new_dict_copy, acl_obj_dict) self.assertEqual(acl_dels, new_acl_dels) self.assertEqual(acl_adds, new_acl_adds) def test__get_update_data_without_compare(self): lswitch_name = 'lswitch-1' port1 = {'id': 'port-id1', 'network_id': lswitch_name, 'fixed_ips': mock.Mock()} port2 = {'id': 'port-id2', 'network_id': lswitch_name, 'fixed_ips': mock.Mock()} ports = [port1, port2] aclport1_new = {'priority': 1002, 'direction': 'to-lport', 'match': 'outport == %s && ip4 && icmp4' % (port1['id']), 'external_ids': {}} aclport2_new = {'priority': 1002, 'direction': 'to-lport', 'match': 'outport == %s && ip4 && icmp4' % (port2['id']), 'external_ids': {}} acls_new_dict = {'%s' % (port1['id']): aclport1_new, '%s' % (port2['id']): aclport2_new} # test for creating new acls update_cmd_add_acl = cmd.UpdateACLsCommand(self.driver._nb_ovn, [lswitch_name], iter(ports), acls_new_dict, need_compare=False, is_add_acl=True) lswitch_dict, acl_del_dict, acl_add_dict = \ update_cmd_add_acl._get_update_data_without_compare() self.assertIn('neutron-lswitch-1', lswitch_dict) self.assertEqual({}, acl_del_dict) expected_acls = {'neutron-lswitch-1': [aclport1_new, aclport2_new]} self.assertEqual(expected_acls, acl_add_dict) # test for deleting existing acls acl1 = mock.Mock( match='outport == port-id1 && ip4 && icmp4', external_ids={}) acl2 = mock.Mock( match='outport == port-id2 && ip4 && icmp4', external_ids={}) acl3 = mock.Mock( match='outport == port-id1 && ip4 && (ip4.src == fake_ip)', external_ids={}) lswitch_obj = mock.Mock( name='neutron-lswitch-1', acls=[acl1, acl2, acl3]) with mock.patch('ovsdbapp.backend.ovs_idl.idlutils.row_by_value', return_value=lswitch_obj): update_cmd_del_acl = cmd.UpdateACLsCommand(self.driver._nb_ovn, [lswitch_name], iter(ports), acls_new_dict, need_compare=False, is_add_acl=False) lswitch_dict, acl_del_dict, acl_add_dict = \ update_cmd_del_acl._get_update_data_without_compare() self.assertIn('neutron-lswitch-1', lswitch_dict) expected_acls = {'neutron-lswitch-1': [acl1, acl2]} self.assertEqual(expected_acls, acl_del_dict) self.assertEqual({}, acl_add_dict) def test_acl_protocol_and_ports_for_tcp_udp_and_sctp_number(self): sg_rule = {'port_range_min': None, 'port_range_max': None} sg_rule['protocol'] = str(const.PROTO_NUM_TCP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && tcp', match) sg_rule['protocol'] = str(const.PROTO_NUM_UDP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && udp', match) sg_rule['protocol'] = str(const.PROTO_NUM_SCTP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && sctp', match) def test_acl_protocol_and_ports_for_tcp_udp_and_sctp_number_one(self): sg_rule = {'port_range_min': 22, 'port_range_max': 22} sg_rule['protocol'] = str(const.PROTO_NUM_TCP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && tcp && tcp.dst == 22', match) sg_rule['protocol'] = str(const.PROTO_NUM_UDP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && udp && udp.dst == 22', match) sg_rule['protocol'] = str(const.PROTO_NUM_SCTP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && sctp && sctp.dst == 22', match) def test_acl_protocol_and_ports_for_tcp_udp_and_sctp_number_range(self): sg_rule = {'port_range_min': 21, 'port_range_max': 23} sg_rule['protocol'] = str(const.PROTO_NUM_TCP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && tcp && tcp.dst >= 21 && tcp.dst <= 23', match) sg_rule['protocol'] = str(const.PROTO_NUM_UDP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && udp && udp.dst >= 21 && udp.dst <= 23', match) sg_rule['protocol'] = str(const.PROTO_NUM_SCTP) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && sctp && sctp.dst >= 21 && sctp.dst <= 23', match) def test_acl_protocol_and_ports_for_ipv6_icmp_protocol(self): sg_rule = {'port_range_min': None, 'port_range_max': None} icmp = 'icmp6' expected_match = ' && icmp6' sg_rule['protocol'] = const.PROTO_NAME_ICMP match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) sg_rule['protocol'] = str(const.PROTO_NUM_ICMP) match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) sg_rule['protocol'] = const.PROTO_NAME_IPV6_ICMP match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) sg_rule['protocol'] = const.PROTO_NAME_IPV6_ICMP_LEGACY match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) sg_rule['protocol'] = str(const.PROTO_NUM_IPV6_ICMP) match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) def test_acl_protocol_and_ports_for_icmp4_and_icmp6_port_range(self): match_list = [ (None, None, ' && icmp4'), (0, None, ' && icmp4 && icmp4.type == 0'), (0, 0, ' && icmp4 && icmp4.type == 0 && icmp4.code == 0'), (0, 5, ' && icmp4 && icmp4.type == 0 && icmp4.code == 5')] v6_match_list = [ (None, None, ' && icmp6'), (133, None, ' && icmp6 && icmp6.type == 133'), (1, 1, ' && icmp6 && icmp6.type == 1 && icmp6.code == 1'), (138, 1, ' && icmp6 && icmp6.type == 138 && icmp6.code == 1')] sg_rule = {'protocol': const.PROTO_NAME_ICMP} icmp = 'icmp4' for pmin, pmax, expected_match in match_list: sg_rule['port_range_min'] = pmin sg_rule['port_range_max'] = pmax match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) sg_rule = {'protocol': const.PROTO_NAME_IPV6_ICMP} icmp = 'icmp6' for pmin, pmax, expected_match in v6_match_list: sg_rule['port_range_min'] = pmin sg_rule['port_range_max'] = pmax match = ovn_acl.acl_protocol_and_ports(sg_rule, icmp) self.assertEqual(expected_match, match) def test_acl_protocol_and_ports_protocol_not_supported(self): sg_rule = {'port_range_min': None, 'port_range_max': None} sg_rule['protocol'] = '1234567' self.assertRaises(ovn_acl.ProtocolNotSupported, ovn_acl.acl_protocol_and_ports, sg_rule, None) def test_acl_protocol_and_ports_protocol_range(self): sg_rule = {'port_range_min': None, 'port_range_max': None} # For more common protocols such as TCP, UDP and ICMP, we # prefer to use the protocol name in the match string instead of # the protocol number (e.g: the word "tcp" instead of "ip.proto # == 6"). This improves the readability/debbugability when # troubleshooting the ACLs skip_protos = (const.PROTO_NUM_TCP, const.PROTO_NUM_UDP, const.PROTO_NUM_SCTP, const.PROTO_NUM_ICMP, const.PROTO_NUM_IPV6_ICMP) for proto in range(256): if proto in skip_protos: continue sg_rule['protocol'] = str(proto) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && ip.proto == %s' % proto, match) def test_acl_protocol_and_ports_name_to_number(self): sg_rule = {'port_range_min': None, 'port_range_max': None} sg_rule['protocol'] = str(const.PROTO_NAME_OSPF) match = ovn_acl.acl_protocol_and_ports(sg_rule, None) self.assertEqual(' && ip.proto == 89', match) def test_acl_direction(self): sg_rule = fakes.FakeSecurityGroupRule.create_one_security_group_rule({ 'direction': 'ingress' }).info() match = ovn_acl.acl_direction(sg_rule, self.fake_port) self.assertEqual('outport == "' + self.fake_port['id'] + '"', match) sg_rule['direction'] = 'egress' match = ovn_acl.acl_direction(sg_rule, self.fake_port) self.assertEqual('inport == "' + self.fake_port['id'] + '"', match) def test_acl_ethertype(self): sg_rule = fakes.FakeSecurityGroupRule.create_one_security_group_rule({ 'ethertype': 'IPv4' }).info() match, ip_version, icmp = ovn_acl.acl_ethertype(sg_rule) self.assertEqual(' && ip4', match) self.assertEqual('ip4', ip_version) self.assertEqual('icmp4', icmp) sg_rule['ethertype'] = 'IPv6' match, ip_version, icmp = ovn_acl.acl_ethertype(sg_rule) self.assertEqual(' && ip6', match) self.assertEqual('ip6', ip_version) self.assertEqual('icmp6', icmp) sg_rule['ethertype'] = 'IPv10' match, ip_version, icmp = ovn_acl.acl_ethertype(sg_rule) self.assertEqual('', match) self.assertIsNone(ip_version) self.assertIsNone(icmp) def test_acl_remote_ip_prefix(self): sg_rule = fakes.FakeSecurityGroupRule.create_one_security_group_rule({ 'direction': 'ingress', 'remote_ip_prefix': None }).info() ip_version = 'ip4' remote_ip_prefix = '10.10.0.0/24' match = ovn_acl.acl_remote_ip_prefix(sg_rule, ip_version) self.assertEqual('', match) sg_rule['remote_ip_prefix'] = remote_ip_prefix match = ovn_acl.acl_remote_ip_prefix(sg_rule, ip_version) expected_match = ' && %s.src == %s' % (ip_version, remote_ip_prefix) self.assertEqual(expected_match, match) sg_rule['direction'] = 'egress' match = ovn_acl.acl_remote_ip_prefix(sg_rule, ip_version) expected_match = ' && %s.dst == %s' % (ip_version, remote_ip_prefix) self.assertEqual(expected_match, match) def test_acl_remote_group_id(self): sg_rule = fakes.FakeSecurityGroupRule.create_one_security_group_rule({ 'direction': 'ingress', 'remote_group_id': None }).info() ip_version = 'ip4' sg_id = sg_rule['security_group_id'] addrset_name = ovn_utils.ovn_addrset_name(sg_id, ip_version) match = ovn_acl.acl_remote_group_id(sg_rule, ip_version) self.assertEqual('', match) sg_rule['remote_group_id'] = sg_id match = ovn_acl.acl_remote_group_id(sg_rule, ip_version) self.assertEqual(' && ip4.src == $' + addrset_name, match) sg_rule['direction'] = 'egress' match = ovn_acl.acl_remote_group_id(sg_rule, ip_version) self.assertEqual(' && ip4.dst == $' + addrset_name, match) def _test_update_acls_for_security_group(self, use_cache=True): sg = fakes.FakeSecurityGroup.create_one_security_group().info() remote_sg = fakes.FakeSecurityGroup.create_one_security_group().info() sg_rule = fakes.FakeSecurityGroupRule.create_one_security_group_rule({ 'security_group_id': sg['id'], 'remote_group_id': remote_sg['id'] }).info() port = fakes.FakePort.create_one_port({ 'security_groups': [sg['id']] }).info() self.plugin.get_ports.return_value = [port] if use_cache: sg_ports_cache = {sg['id']: [{'port_id': port['id']}], remote_sg['id']: []} else: sg_ports_cache = None self.plugin._get_port_security_group_bindings.return_value = \ [{'port_id': port['id']}] # Build ACL for validation. expected_acl = ovn_acl._add_sg_rule_acl_for_port(port, sg_rule) expected_acl.pop('lport') expected_acl.pop('lswitch') # Validate ACLs when port has security groups. ovn_acl.update_acls_for_security_group(self.plugin, self.admin_context, self.driver._nb_ovn, sg['id'], sg_rule, sg_ports_cache=sg_ports_cache) self.driver._nb_ovn.update_acls.assert_called_once_with( [port['network_id']], mock.ANY, {port['id']: expected_acl}, need_compare=False, is_add_acl=True ) def test_update_acls_for_security_group_cache(self): self._test_update_acls_for_security_group(use_cache=True) def test_update_acls_for_security_group_no_cache(self): self._test_update_acls_for_security_group(use_cache=False) def test_acl_port_ips(self): port4 = fakes.FakePort.create_one_port({ 'fixed_ips': [{'subnet_id': 'subnet-ipv4', 'ip_address': '10.0.0.1'}], }).info() port46 = fakes.FakePort.create_one_port({ 'fixed_ips': [{'subnet_id': 'subnet-ipv4', 'ip_address': '10.0.0.2'}, {'subnet_id': 'subnet-ipv6', 'ip_address': 'fde3:d45:df72::1'}], }).info() port6 = fakes.FakePort.create_one_port({ 'fixed_ips': [{'subnet_id': 'subnet-ipv6', 'ip_address': '2001:db8::8'}], }).info() addresses = ovn_acl.acl_port_ips(port4) self.assertEqual({'ip4': [port4['fixed_ips'][0]['ip_address']], 'ip6': []}, addresses) addresses = ovn_acl.acl_port_ips(port46) self.assertEqual({'ip4': [port46['fixed_ips'][0]['ip_address']], 'ip6': [port46['fixed_ips'][1]['ip_address']]}, addresses) addresses = ovn_acl.acl_port_ips(port6) self.assertEqual({'ip4': [], 'ip6': [port6['fixed_ips'][0]['ip_address']]}, addresses) def test_sg_disabled(self): sg = fakes.FakeSecurityGroup.create_one_security_group().info() port = fakes.FakePort.create_one_port({ 'security_groups': [sg['id']] }).info() cfg.CONF.set_override('enable_security_group', 'False', 'SECURITYGROUP') acl_list = ovn_acl.add_acls(self.plugin, self.admin_context, port, {}, {}, self.driver._ovn) self.assertEqual([], acl_list) ovn_acl.update_acls_for_security_group(self.plugin, self.admin_context, self.driver._ovn, sg['id'], None) self.driver._ovn.update_acls.assert_not_called() addresses = ovn_acl.acl_port_ips(port) self.assertEqual({'ip4': [], 'ip6': []}, addresses) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/ovn/test_hash_ring_manager.py0000644000175000017500000001317300000000000030122 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from neutron_lib import context from oslo_utils import timeutils from neutron.common.ovn import constants from neutron.common.ovn import exceptions from neutron.common.ovn import hash_ring_manager from neutron.db import ovn_hash_ring_db as db_hash_ring from neutron.tests.unit import testlib_api HASH_RING_TEST_GROUP = 'test_group' class TestHashRingManager(testlib_api.SqlTestCaseLight): def setUp(self): super(TestHashRingManager, self).setUp() self.hash_ring_manager = hash_ring_manager.HashRingManager( HASH_RING_TEST_GROUP) self.admin_ctx = context.get_admin_context() def _verify_hashes(self, hash_dict): for uuid_, target_node in hash_dict.items(): self.assertEqual(target_node, self.hash_ring_manager.get_node(uuid_)) def test_get_node(self): # Use pre-defined UUIDs to make the hashes predictable node_1_uuid = db_hash_ring.add_node( self.admin_ctx, HASH_RING_TEST_GROUP, 'node-1') node_2_uuid = db_hash_ring.add_node( self.admin_ctx, HASH_RING_TEST_GROUP, 'node-2') hash_dict_before = {'fake-uuid': node_1_uuid, 'fake-uuid-0': node_2_uuid} self._verify_hashes(hash_dict_before) def test_get_node_no_active_nodes(self): self.assertRaises( exceptions.HashRingIsEmpty, self.hash_ring_manager.get_node, 'fake-uuid') def test_ring_rebalance(self): # Use pre-defined UUIDs to make the hashes predictable node_1_uuid = db_hash_ring.add_node( self.admin_ctx, HASH_RING_TEST_GROUP, 'node-1') node_2_uuid = db_hash_ring.add_node( self.admin_ctx, HASH_RING_TEST_GROUP, 'node-2') # Add another node from a different host with mock.patch.object(db_hash_ring, 'CONF') as mock_conf: mock_conf.host = 'another-host-52359446-c366' another_host_node = db_hash_ring.add_node( self.admin_ctx, HASH_RING_TEST_GROUP, 'another-host') # Assert all nodes are alive in the ring self.hash_ring_manager.refresh() self.assertEqual(3, len(self.hash_ring_manager._hash_ring.nodes)) # Hash certain values against the nodes hash_dict_before = {'fake-uuid': node_1_uuid, 'fake-uuid-0': node_2_uuid, 'fake-uuid-ABCDE': another_host_node} self._verify_hashes(hash_dict_before) # Mock utcnow() as the HASH_RING_NODES_TIMEOUT have expired # already and touch the nodes from our host fake_utcnow = timeutils.utcnow() - datetime.timedelta( seconds=constants.HASH_RING_NODES_TIMEOUT) with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = fake_utcnow db_hash_ring.touch_nodes_from_host( self.admin_ctx, HASH_RING_TEST_GROUP) # Now assert that the ring was re-balanced and only the node from # another host is marked as alive self.hash_ring_manager.refresh() self.assertEqual([another_host_node], list(self.hash_ring_manager._hash_ring.nodes.keys())) # Now only "another_host_node" is alive, all values should hash to it hash_dict_after_rebalance = {'fake-uuid': another_host_node, 'fake-uuid-0': another_host_node, 'fake-uuid-ABCDE': another_host_node} self._verify_hashes(hash_dict_after_rebalance) # Now touch the nodes so they appear active again db_hash_ring.touch_nodes_from_host( self.admin_ctx, HASH_RING_TEST_GROUP) self.hash_ring_manager.refresh() # The ring should re-balance and as it was before self._verify_hashes(hash_dict_before) def test__wait_startup_before_caching(self): db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-1') db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-2') # Assert it will return True until created_at != updated_at self.assertTrue(self.hash_ring_manager._wait_startup_before_caching) self.assertTrue(self.hash_ring_manager._cache_startup_timeout) # Touch the nodes (== update the updated_at column) db_hash_ring.touch_nodes_from_host( self.admin_ctx, HASH_RING_TEST_GROUP) # Assert it's now False. Waiting is not needed anymore self.assertFalse(self.hash_ring_manager._wait_startup_before_caching) self.assertFalse(self.hash_ring_manager._cache_startup_timeout) # Now assert that since the _cache_startup_timeout has been # flipped, we no longer will read from the database with mock.patch.object(hash_ring_manager.db_hash_ring, 'get_active_nodes') as get_nodes_mock: self.assertFalse( self.hash_ring_manager._wait_startup_before_caching) self.assertFalse(get_nodes_mock.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/ovn/test_utils.py0000644000175000017500000001161000000000000025620 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from neutron.common.ovn import constants from neutron.common.ovn import utils from neutron.tests import base from neutron.tests.unit import fake_resources as fakes RESOLV_CONF_TEMPLATE = """# TEST TEST TEST # Geneated by OVN test nameserver 10.0.0.1 #nameserver 10.0.0.2 nameserver 10.0.0.3 nameserver foo 10.0.0.4 nameserver aef0::4 foo 10.0.0.5 """ class TestUtils(base.BaseTestCase): def test_get_system_dns_resolvers(self): tempdir = self.useFixture(fixtures.TempDir()).path resolver_file_name = tempdir + '/resolv.conf' tmp_resolv_file = open(resolver_file_name, 'w') tmp_resolv_file.writelines(RESOLV_CONF_TEMPLATE) tmp_resolv_file.close() expected_dns_resolvers = ['10.0.0.1', '10.0.0.3'] observed_dns_resolvers = utils.get_system_dns_resolvers( resolver_file=resolver_file_name) self.assertEqual(expected_dns_resolvers, observed_dns_resolvers) def test_is_gateway_chassis(self): chassis = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'external_ids': {'ovn-cms-options': 'enable-chassis-as-gw'}}) non_gw_chassis_0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'external_ids': {'ovn-cms-options': ''}}) non_gw_chassis_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={}) non_gw_chassis_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'external_ids': {}}) self.assertTrue(utils.is_gateway_chassis(chassis)) self.assertFalse(utils.is_gateway_chassis(non_gw_chassis_0)) self.assertFalse(utils.is_gateway_chassis(non_gw_chassis_1)) self.assertFalse(utils.is_gateway_chassis(non_gw_chassis_2)) class TestGateWayChassisValidity(base.BaseTestCase): def setUp(self): super(TestGateWayChassisValidity, self).setUp() self.gw_chassis = ['host1', 'host2'] self.chassis_name = self.gw_chassis[0] self.physnet = 'physical-nw-1' self.chassis_physnets = {self.chassis_name: [self.physnet]} def test_gateway_chassis_valid(self): # Return False, since everything is valid self.assertFalse(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_due_to_invalid_chassis_name(self): # Return True since chassis is invalid self.chassis_name = constants.OVN_GATEWAY_INVALID_CHASSIS self.assertTrue(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_for_chassis_not_in_chassis_physnets(self): # Return True since chassis is not in chassis_physnets self.chassis_name = 'host-2' self.assertTrue(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_for_undefined_physnet(self): # Return True since physnet is not defined self.chassis_name = 'host-1' self.physnet = None self.assertTrue(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_for_physnet_not_in_chassis_physnets(self): # Return True since physnet is not in chassis_physnets self.physnet = 'physical-nw-2' self.assertTrue(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_for_gw_chassis_empty(self): # Return False if gw_chassis is [] # This condition states that the chassis is valid, has valid # physnets and there are no gw_chassis present in the system. self.gw_chassis = [] self.assertFalse(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) def test_gateway_chassis_for_chassis_not_in_gw_chassis_list(self): # Return True since chassis_name not in gw_chassis self.gw_chassis = ['host-2'] self.assertTrue(utils.is_gateway_chassis_invalid( self.chassis_name, self.gw_chassis, self.physnet, self.chassis_physnets)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/test__deprecate.py0000644000175000017500000000617100000000000025757 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import importutils from neutron.tests import base from neutron.tests.unit.common import moved_globals_target as new_mod def module_path(code): return 'neutron.tests.unit.common.moved_globals_' + code def import_code(code): return importutils.import_module(module_path(code)) def expect_moved(code, name, new_name=None): old_path = '.'.join([module_path(code), name]) new_path = '.'.join([new_mod.__name__, new_name or name]) message = 'moved to ' + new_path return old_path, message def expect_renamed(code, old_name, new_name): old_path = '.'.join([module_path(code), old_name]) new_path = '.'.join([module_path(code), new_name]) message = 'renamed to ' + new_path return old_path, message class TestMovedGlobals(base.BaseTestCase): def test_moved_global(self): code = 'code1' old_mod = import_code(code) with mock.patch('debtcollector.deprecate') as dc: self.assertEqual(new_mod.a, old_mod.a) old_path, msg = expect_moved(code, 'a') dc.assert_called_once_with(old_path, message=msg, stacklevel=4) def test_moved_global_no_attr(self): mod = import_code('code1') self.assertRaises(AttributeError, lambda: mod.NO_SUCH_ATTRIBUTE) def test_renamed_global(self): code = 'code1' mod = import_code(code) with mock.patch('debtcollector.deprecate') as dc: self.assertEqual(mod.d, mod.c) old_path, msg = expect_renamed(code, 'c', 'd') dc.assert_called_once_with(old_path, message=msg, stacklevel=4) def test_moved_global_renamed(self): code = 'code1' old_mod = import_code(code) with mock.patch('debtcollector.deprecate') as dc: self.assertEqual(new_mod.f, old_mod.e) old_path, msg = expect_moved(code, 'e', new_name='f') dc.assert_called_once_with(old_path, message=msg, stacklevel=4) def test_set_unmoved_global(self): mod = import_code('code1') mod.d = 'dibatag' self.assertEqual('dibatag', mod.d) def test_set_new_global(self): mod = import_code('code1') mod.n = 'nyala' self.assertEqual('nyala', mod.n) def test_delete_unmoved_global(self): mod = import_code('code1') self.assertEqual('gelada', mod.g) def delete_g(): del mod.g delete_g() self.assertRaises(AttributeError, lambda: mod.g) self.failUnlessRaises(AttributeError, delete_g) def test_not_last_line(self): self.assertRaises(SystemExit, import_code, 'code2') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/test_cache_utils.py0000644000175000017500000001147200000000000026147 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from neutron.common import cache_utils as cache from neutron.tests import base class CacheConfFixture(config_fixture.Config): def setUp(self): super(CacheConfFixture, self).setUp() cache.register_oslo_configs(self.conf) self.config(enabled=True, group='cache') class TestOsloCache(base.BaseTestCase): def setUp(self): super(TestOsloCache, self).setUp() self.memory_conf = cfg.ConfigOpts() memory_conf_fixture = CacheConfFixture(self.memory_conf) self.useFixture(memory_conf_fixture) self.dict_conf = cfg.ConfigOpts() dict_conf_fixture = CacheConfFixture(self.dict_conf) self.useFixture(dict_conf_fixture) dict_conf_fixture.config(expiration_time=60, backend='oslo_cache.dict', group='cache') self.null_cache_conf = cfg.ConfigOpts() null_conf_fixture = CacheConfFixture(self.null_cache_conf) self.useFixture(null_conf_fixture) null_conf_fixture.config(expiration_time=600, backend='dogpile.cache.null', group='cache') def _test_get_cache_region_helper(self, conf): region = cache._get_cache_region(conf) self.assertIsNotNone(region) def test_get_cache_region(self): self._test_get_cache_region_helper(self.dict_conf) self._test_get_cache_region_helper(self.null_cache_conf) @mock.patch('neutron.common.cache_utils._get_cache_region') def test_get_cache(self, mock_get_cache_region): self.assertIsNotNone(cache.get_cache(self.memory_conf)) self.assertIsNotNone(cache.get_cache(self.dict_conf)) self.assertIsNotNone(cache.get_cache(self.null_cache_conf)) mock_get_cache_region.assert_has_calls( [mock.call(self.dict_conf), mock.call(self.null_cache_conf)] ) class _CachingDecorator(object): def __init__(self): self.func_retval = 'bar' self._cache = mock.Mock() @cache.cache_method_results def func(self, *args, **kwargs): return self.func_retval class TestCachingDecorator(base.BaseTestCase): def setUp(self): super(TestCachingDecorator, self).setUp() self.decor = _CachingDecorator() self.func_name = '%(module)s._CachingDecorator.func' % { 'module': self.__module__ } self.not_cached = self.decor.func.func.__self__._not_cached def test_cache_miss(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} self.decor._cache.get.return_value = self.not_cached retval = self.decor.func(*args, **kwargs) self.decor._cache.set.assert_called_once_with( str(expected_key), self.decor.func_retval) self.assertEqual(self.decor.func_retval, retval) def test_cache_hit(self): expected_key = (self.func_name, 1, 2, ('foo', 'bar')) args = (1, 2) kwargs = {'foo': 'bar'} retval = self.decor.func(*args, **kwargs) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor._cache.get.return_value, retval) self.decor._cache.get.assert_called_once_with(str(expected_key)) def test_get_unhashable(self): expected_key = (self.func_name, [1], 2) self.decor._cache.get.side_effect = TypeError retval = self.decor.func([1], 2) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor.func_retval, retval) self.decor._cache.get.assert_called_once_with(str(expected_key)) def test_missing_cache(self): delattr(self.decor, '_cache') self.assertRaises(NotImplementedError, self.decor.func, (1, 2)) def test_no_cache(self): self.decor._cache = False retval = self.decor.func((1, 2)) self.assertEqual(self.decor.func_retval, retval) def test_skip_cache(self): self.decor.func(1, 2, skip_cache=True) expected_key = (self.func_name, 1, 2) self.decor._cache.get.assert_not_called() self.decor._cache.set.assert_called_once_with(str(expected_key), self.decor.func_retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/test_coordination.py0000644000175000017500000000216500000000000026353 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_concurrency import lockutils from neutron.common import coordination from neutron.tests import base @mock.patch.object(lockutils, 'lock') class CoordinationTestCase(base.BaseTestCase): def test_synchronized(self, get_lock): @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') def func(foo, bar): pass foo = mock.Mock() foo.val = 7 bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) get_lock.assert_called_with('lock-func-7-8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/test_ipv6_utils.py0000644000175000017500000000675300000000000025776 0ustar00coreycorey00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib import constants from neutron.common import ipv6_utils from neutron.tests import base class TestIsAutoAddressSubnet(base.BaseTestCase): def setUp(self): self.subnet = { 'cidr': '2001:200::/64', 'gateway_ip': '2001:200::1', 'ip_version': constants.IP_VERSION_6, 'ipv6_address_mode': None, 'ipv6_ra_mode': None } super(TestIsAutoAddressSubnet, self).setUp() def test_combinations(self): Mode = collections.namedtuple('Mode', "addr_mode ra_mode " "is_auto_address") subnets = [ Mode(None, None, False), Mode(constants.DHCPV6_STATEFUL, None, False), Mode(constants.DHCPV6_STATELESS, None, True), Mode(constants.IPV6_SLAAC, None, True), Mode(None, constants.DHCPV6_STATEFUL, False), Mode(None, constants.DHCPV6_STATELESS, True), Mode(None, constants.IPV6_SLAAC, True), Mode(constants.DHCPV6_STATEFUL, constants.DHCPV6_STATEFUL, False), Mode(constants.DHCPV6_STATELESS, constants.DHCPV6_STATELESS, True), Mode(constants.IPV6_SLAAC, constants.IPV6_SLAAC, True), ] for subnet in subnets: self.subnet['ipv6_address_mode'] = subnet.addr_mode self.subnet['ipv6_ra_mode'] = subnet.ra_mode self.assertEqual(subnet.is_auto_address, ipv6_utils.is_auto_address_subnet(self.subnet)) class TestIsEui64Address(base.BaseTestCase): def _test_eui_64(self, ips, expected): for ip in ips: self.assertEqual(expected, ipv6_utils.is_eui64_address(ip), "Error on %s" % ip) def test_invalid_eui64_addresses(self): ips = ('192.168.1.1', '192.168.1.0', '255.255.255.255', '0.0.0.0', 'fffe::', 'ff80::1', 'fffe::0cad:12ff:ff44:5566', 'fffe::0cad:12fe:fe44:5566', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') self._test_eui_64(ips, False) class TestValidIpv6URL(base.BaseTestCase): def test_valid_ipv6_url(self): host = "::1" port = 443 self.assertEqual("[::1]:443", ipv6_utils.valid_ipv6_url(host, port)) def test_invalid_ipv6_url(self): host = "::1" port = 443 self.assertNotEqual("::1:443", ipv6_utils.valid_ipv6_url(host, port)) def test_valid_ipv4_url(self): host = "192.168.1.2" port = 443 self.assertEqual("192.168.1.2:443", ipv6_utils.valid_ipv6_url(host, port)) def test_valid_hostname_url(self): host = "controller" port = 443 self.assertEqual("controller:443", ipv6_utils.valid_ipv6_url(host, port)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/common/test_utils.py0000644000175000017500000005021600000000000025023 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import random import re import sys import time import ddt import eventlet from eventlet import queue import mock import netaddr from neutron_lib import constants from oslo_log import log as logging from osprofiler import profiler import six import testscenarios import testtools from neutron.common import utils from neutron.tests import base from neutron.tests.unit import tests load_tests = testscenarios.load_tests_apply_scenarios class _PortRange(object): """A linked list of port ranges.""" def __init__(self, base, prev_ref=None): self.base = base self.mask = 0xffff self.prev_ref = prev_ref @property def possible_mask_base(self): return self.base & (self.mask << 1) @property def can_merge(self): return (self.prev_ref and self.possible_mask_base == self.prev_ref.possible_mask_base and self.mask == self.prev_ref.mask) def shake(self): """Try to merge ranges created earlier. If previous number in a list can be merged with current item under common mask, it's merged. Then it continues to do the same with the rest of the list. """ while self.can_merge: self.mask <<= 1 self.base = self.prev_ref.base if self.prev_ref: self.prev_ref = self.prev_ref.prev_ref def __str__(self): return _hex_format(self.base, self.mask) def get_list(self): if self.prev_ref: return self.prev_ref.get_list() + [str(self)] return [str(self)] _hex_str = lambda num: format(num, '#06x') def _hex_format(port, mask): if mask != 0xffff: return "%s/%s" % (_hex_str(port), _hex_str(0xffff & mask)) return _hex_str(port) def _port_rule_masking(port_min, port_max): current = None for num in range(port_min, port_max + 1): port_range = _PortRange(num, prev_ref=current) port_range.shake() current = port_range return current.get_list() class TestExceptionLogger(base.BaseTestCase): def test_normal_call(self): result = "Result" @utils.exception_logger() def func(): return result self.assertEqual(result, func()) def test_raise(self): result = "Result" @utils.exception_logger() def func(): raise RuntimeError(result) self.assertRaises(RuntimeError, func) def test_spawn_normal(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): return result gt = eventlet.spawn(func) self.assertEqual(result, gt.wait()) self.assertFalse(logger.called) def test_spawn_raise(self): result = "Result" logger = mock.Mock() @utils.exception_logger(logger=logger) def func(): raise RuntimeError(result) gt = eventlet.spawn(func) self.assertRaises(RuntimeError, gt.wait) self.assertTrue(logger.called) def test_pool_spawn_normal(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(2), mock.call(3)], any_order=True) self.assertFalse(logger.called) def test_pool_spawn_raise(self): logger = mock.Mock() calls = mock.Mock() @utils.exception_logger(logger=logger) def func(i): if i == 2: raise RuntimeError(2) else: calls(i) pool = eventlet.GreenPool(4) for i in range(0, 4): pool.spawn(func, i) pool.waitall() calls.assert_has_calls([mock.call(0), mock.call(1), mock.call(3)], any_order=True) self.assertTrue(logger.called) class TestDvrServices(base.BaseTestCase): def _test_is_dvr_serviced(self, device_owner, expected): self.assertEqual(expected, utils.is_dvr_serviced(device_owner)) def test_is_dvr_serviced_with_lb_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCER, True) def test_is_dvr_serviced_with_lbv2_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_LOADBALANCERV2, True) def test_is_dvr_serviced_with_dhcp_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_DHCP, True) def test_is_dvr_serviced_with_vm_port(self): self._test_is_dvr_serviced(constants.DEVICE_OWNER_COMPUTE_PREFIX, True) class TestFipServices(base.BaseTestCase): def _test_is_fip_serviced(self, device_owner, expected): self.assertEqual(expected, utils.is_fip_serviced(device_owner)) def test_is_fip_serviced_with_lb_port(self): self._test_is_fip_serviced(constants.DEVICE_OWNER_LOADBALANCER, True) def test_is_fip_serviced_with_lbv2_port(self): self._test_is_fip_serviced(constants.DEVICE_OWNER_LOADBALANCERV2, True) def test_is_fip_serviced_with_dhcp_port(self): self._test_is_fip_serviced(constants.DEVICE_OWNER_DHCP, False) def test_is_fip_serviced_with_vm_port(self): self._test_is_fip_serviced(constants.DEVICE_OWNER_COMPUTE_PREFIX, True) class TestIpToCidr(base.BaseTestCase): def test_ip_to_cidr_ipv4_default(self): self.assertEqual('15.1.2.3/32', utils.ip_to_cidr('15.1.2.3')) def test_ip_to_cidr_ipv4_prefix(self): self.assertEqual('15.1.2.3/24', utils.ip_to_cidr('15.1.2.3', 24)) def test_ip_to_cidr_ipv4_netaddr(self): ip_address = netaddr.IPAddress('15.1.2.3') self.assertEqual('15.1.2.3/32', utils.ip_to_cidr(ip_address)) def test_ip_to_cidr_ipv4_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '15.1.2.3', 33) def test_ip_to_cidr_ipv6_default(self): self.assertEqual('::1/128', utils.ip_to_cidr('::1')) def test_ip_to_cidr_ipv6_prefix(self): self.assertEqual('::1/64', utils.ip_to_cidr('::1', 64)) def test_ip_to_cidr_ipv6_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, utils.ip_to_cidr, '2000::1', 129) class TestCidrIsHost(base.BaseTestCase): def test_is_cidr_host_ipv4(self): self.assertTrue(utils.is_cidr_host('15.1.2.3/32')) def test_is_cidr_host_ipv4_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '15.1.2.3') def test_is_cidr_host_ipv6(self): self.assertTrue(utils.is_cidr_host('2000::1/128')) def test_is_cidr_host_ipv6_netaddr(self): net = netaddr.IPNetwork("2000::1") self.assertTrue(utils.is_cidr_host(net)) def test_is_cidr_host_ipv6_32(self): self.assertFalse(utils.is_cidr_host('2000::1/32')) def test_is_cidr_host_ipv6_not_cidr(self): self.assertRaises(ValueError, utils.is_cidr_host, '2000::1') def test_is_cidr_host_ipv6_not_cidr_netaddr(self): ip_address = netaddr.IPAddress("2000::3") self.assertRaises(ValueError, utils.is_cidr_host, ip_address) class TestIpVersionFromInt(base.BaseTestCase): def test_ip_version_from_int_ipv4(self): self.assertEqual(constants.IPv4, utils.ip_version_from_int(constants.IP_VERSION_4)) def test_ip_version_from_int_ipv6(self): self.assertEqual(constants.IPv6, utils.ip_version_from_int(constants.IP_VERSION_6)) def test_ip_version_from_int_illegal_int(self): self.assertRaises(ValueError, utils.ip_version_from_int, 8) class TestIsVersionGreaterEqual(base.BaseTestCase): def test_is_version_greater_equal_greater(self): self.assertTrue(utils.is_version_greater_equal('1.6.2', '1.6.0')) def test_is_version_greater_equal_equal(self): self.assertTrue(utils.is_version_greater_equal('1.6.2', '1.6.2')) def test_is_version_greater_equal_less(self): self.assertFalse(utils.is_version_greater_equal('1.6.0', '1.6.2')) class TestDelayedStringRenderer(base.BaseTestCase): def test_call_deferred_until_str(self): my_func = mock.MagicMock(return_value='Brie cheese!') delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44) self.assertFalse(my_func.called) string = "Type: %s" % delayed my_func.assert_called_once_with(1, 2, key_arg=44) self.assertEqual("Type: Brie cheese!", string) def test_not_called_with_low_log_level(self): LOG = logging.getLogger(__name__) # make sure we return logging to previous level current_log_level = LOG.logger.getEffectiveLevel() self.addCleanup(LOG.logger.setLevel, current_log_level) my_func = mock.MagicMock() delayed = utils.DelayedStringRenderer(my_func) # set to warning so we shouldn't be logging debug messages LOG.logger.setLevel(logging.logging.WARNING) LOG.debug("Hello %s", delayed) self.assertFalse(my_func.called) # but it should be called with the debug level LOG.logger.setLevel(logging.logging.DEBUG) LOG.debug("Hello %s", delayed) self.assertTrue(my_func.called) class TestPortRuleMasking(base.BaseTestCase): def test_port_rule_wrong_input(self): with testtools.ExpectedException(ValueError): utils.port_rule_masking(12, 5) def compare_port_ranges_results(self, port_min, port_max): observed = utils.port_rule_masking(port_min, port_max) expected = _port_rule_masking(port_min, port_max) self.assertItemsEqual(expected, observed) def test_port_rule_masking_random_ranges(self): # calling randint a bunch of times is really slow randports = sorted(random.sample(six.moves.range(1, 65536), 2000)) port_max = 0 for i in randports: port_min = port_max port_max = i self.compare_port_ranges_results(port_min, port_max) def test_port_rule_masking_edge_cases(self): # (port_min, port_max) tuples TESTING_DATA = [ (5, 12), (20, 130), (4501, 33057), (0, 65535), (22, 22), (5001, 5001), (0, 7), (8, 15), (1, 127), ] for port_min, port_max in TESTING_DATA: self.compare_port_ranges_results(port_min, port_max) class TestExcDetails(base.BaseTestCase): def test_attach_exc_details(self): e = Exception() utils.attach_exc_details(e, 'details') self.assertEqual('details', utils.extract_exc_details(e)) def test_attach_exc_details_with_interpolation(self): e = Exception() utils.attach_exc_details(e, 'details: %s', 'foo') self.assertEqual('details: foo', utils.extract_exc_details(e)) def test_attach_exc_details_with_None_interpolation(self): e = Exception() utils.attach_exc_details(e, 'details: %s', None) self.assertEqual( 'details: %s' % str(None), utils.extract_exc_details(e)) def test_attach_exc_details_with_multiple_interpolation(self): e = Exception() utils.attach_exc_details( e, 'details: %s, %s', ('foo', 'bar')) self.assertEqual('details: foo, bar', utils.extract_exc_details(e)) def test_attach_exc_details_with_dict_interpolation(self): e = Exception() utils.attach_exc_details( e, 'details: %(foo)s, %(bar)s', {'foo': 'foo', 'bar': 'bar'}) self.assertEqual('details: foo, bar', utils.extract_exc_details(e)) def test_extract_exc_details_no_details_attached(self): self.assertIsInstance( utils.extract_exc_details(Exception()), six.text_type) @ddt.ddt class ImportModulesRecursivelyTestCase(base.BaseTestCase): @ddt.data('/', r'\\') def test_recursion(self, separator): expected_modules = ( 'neutron.tests.unit.tests.example.dir.example_module', 'neutron.tests.unit.tests.example.dir.subdir.example_module', ) for module in expected_modules: sys.modules.pop(module, None) topdir = re.sub(r'[/\\]+', separator, os.path.dirname(tests.__file__)) modules = utils.import_modules_recursively(topdir) for module in expected_modules: self.assertIn(module, modules) self.assertIn(module, sys.modules) class TestThrottler(base.BaseTestCase): def test_throttler(self): threshold = 1 orig_function = mock.Mock() # Add this magic name as it's required by functools orig_function.__name__ = 'mock_func' throttled_func = utils.throttler(threshold)(orig_function) throttled_func() sleep = utils.eventlet.sleep def sleep_mock(amount_to_sleep): sleep(amount_to_sleep) self.assertGreater(threshold, amount_to_sleep) with mock.patch.object(utils.eventlet, "sleep", side_effect=sleep_mock): throttled_func() self.assertEqual(2, orig_function.call_count) lock_with_timer = six.get_function_closure( throttled_func)[1].cell_contents timestamp = lock_with_timer.timestamp - threshold lock_with_timer.timestamp = timestamp throttled_func() self.assertEqual(3, orig_function.call_count) self.assertLess(timestamp, lock_with_timer.timestamp) def test_method_docstring_is_preserved(self): class Klass(object): @utils.throttler() def method(self): """Docstring""" self.assertEqual("Docstring", Klass.method.__doc__) def test_method_still_callable(self): class Klass(object): @utils.throttler() def method(self): pass obj = Klass() obj.method() class BaseUnitConversionTest(object): def test_bytes_to_bits(self): test_values = [ (0, 0), # 0 bytes should be 0 bits (1, 8) # 1 byte should be 8 bits ] for input_bytes, expected_bits in test_values: self.assertEqual( expected_bits, utils.bytes_to_bits(input_bytes) ) class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = constants.SI_BASE def test_bits_to_kilobits(self): test_values = [ (0, 0), # 0 bites should be 0 kilobites (1, 1), # 1 bit should be 1 kilobit (999, 1), # 999 bits should be 1 kilobit (1000, 1), # 1000 bits should be 1 kilobit (1001, 2) # 1001 bits should be 2 kilobits ] for input_bits, expected_kilobits in test_values: self.assertEqual( expected_kilobits, utils.bits_to_kilobits(input_bits, self.base_unit) ) class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase): base_unit = constants.IEC_BASE def test_bits_to_kilobits(self): test_values = [ (0, 0), # 0 bites should be 0 kilobites (1, 1), # 1 bit should be 1 kilobit (1023, 1), # 1023 bits should be 1 kilobit (1024, 1), # 1024 bits should be 1 kilobit (1025, 2) # 1025 bits should be 2 kilobits ] for input_bits, expected_kilobits in test_values: self.assertEqual( expected_kilobits, utils.bits_to_kilobits(input_bits, self.base_unit) ) class TestRpBandwidthValidator(base.BaseTestCase): def setUp(self): super(TestRpBandwidthValidator, self).setUp() self.device_name_set = {'ens4', 'ens7'} self.valid_rp_bandwidths = { 'ens7': {'egress': 10000, 'ingress': 10000} } self.not_valid_rp_bandwidth = { 'ens8': {'egress': 10000, 'ingress': 10000} } def test_validate_rp_bandwidth_with_device_names(self): try: utils.validate_rp_bandwidth(self.valid_rp_bandwidths, self.device_name_set) except ValueError: self.fail("validate_rp_bandwidth failed to validate %s" % self.valid_rp_bandwidths) self.assertRaises(ValueError, utils.validate_rp_bandwidth, self.not_valid_rp_bandwidth, self.device_name_set) class TimerTestCase(base.BaseTestCase): def test__getattr(self): with utils.Timer() as timer: time.sleep(1) self.assertEqual(1, round(timer.total_seconds())) self.assertEqual(1, timer.delta.seconds) def test__enter_with_timeout(self): with utils.Timer(timeout=10) as timer: time.sleep(1) self.assertEqual(1, round(timer.total_seconds())) def test__enter_with_timeout_exception(self): msg = r'Timer timeout expired after 1 second\(s\).' with self.assertRaisesRegex(utils.TimerTimeout, msg): with utils.Timer(timeout=1): time.sleep(2) def test__enter_with_timeout_no_exception(self): with utils.Timer(timeout=1, raise_exception=False): time.sleep(2) def test__iter(self): iterations = [] for i in utils.Timer(timeout=2): iterations.append(i) time.sleep(1.1) self.assertEqual(2, len(iterations)) def test_delta_time_sec(self): with utils.Timer() as timer: self.assertIsInstance(timer.delta_time_sec, float) class SpawnWithOrWithoutProfilerTestCase( testscenarios.WithScenarios, base.BaseTestCase): scenarios = [ ('spawn', {'spawn_variant': utils.spawn}), ('spawn_n', {'spawn_variant': utils.spawn_n}), ] def _compare_profilers_in_parent_and_in_child(self, init_profiler): q = queue.Queue() def is_profiler_initialized(where): # Instead of returning a single boolean add information so we can # identify which thread produced the result without depending on # queue order. return {where: bool(profiler.get())} def thread_with_no_leaked_profiler(): if init_profiler: profiler.init(hmac_key='fake secret') self.spawn_variant( lambda: q.put(is_profiler_initialized('in-child'))) q.put(is_profiler_initialized('in-parent')) # Make sure in parent we start with an uninitialized profiler by # eventlet.spawn()-ing a new thread. Otherwise the unit test runner # thread may leak an initialized profiler from one test to another. eventlet.spawn(thread_with_no_leaked_profiler) # In order to have some global protection against leaking initialized # profilers neutron.test.base.BaseTestCase.setup() also calls # addCleanup(profiler.clean) # Merge the results independently of queue order. results = {} results.update(q.get()) results.update(q.get()) self.assertEqual( {'in-parent': init_profiler, 'in-child': init_profiler}, results) def test_spawn_with_profiler(self): self._compare_profilers_in_parent_and_in_child(init_profiler=True) def test_spawn_without_profiler(self): self._compare_profilers_in_parent_and_in_child(init_profiler=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982290.9950418 neutron-16.0.0.0b2.dev214/neutron/tests/unit/conf/0000755000175000017500000000000000000000000021703 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/conf/agent/0000755000175000017500000000000000000000000023001 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/conf/agent/__init__.py0000644000175000017500000000000000000000000025100 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/conf/agent/test_common.py0000644000175000017500000000306100000000000025702 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.conf.agent import common as config from neutron.tests import base def test_setup_conf(): conf = config.setup_conf() assert conf.state_path.endswith('/var/lib/neutron') class TestRootHelper(base.BaseTestCase): def test_agent_root_helper(self): conf = config.setup_conf() config.register_root_helper(conf) conf.set_override('root_helper', 'my_root_helper', 'AGENT') self.assertEqual(config.get_root_helper(conf), 'my_root_helper') def test_root_default(self): conf = config.setup_conf() config.register_root_helper(conf) self.assertEqual(config.get_root_helper(conf), 'sudo') def test_agent_root_helper_daemon(self): conf = config.setup_conf() config.register_root_helper(conf) rhd = 'my_root_helper_daemon' conf.set_override('root_helper_daemon', rhd, 'AGENT') self.assertEqual(rhd, conf.AGENT.root_helper_daemon) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4350457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/core_extensions/0000755000175000017500000000000000000000000024165 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/core_extensions/__init__.py0000644000175000017500000000000000000000000026264 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/core_extensions/test_qos.py0000644000175000017500000004253400000000000026410 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import mock from neutron_lib import context from neutron_lib.exceptions import qos as qos_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.core_extensions import base as base_core from neutron.core_extensions import qos as qos_core from neutron.objects.qos import policy from neutron.tests import base def _get_test_dbdata(qos_policy_id, qos_network_policy_id=None): retval = {'id': None, 'qos_policy_binding': {'policy_id': qos_policy_id, 'network_id': 'fake_net_id'}} if qos_network_policy_id: retval['qos_network_policy_binding'] = { 'policy_id': qos_network_policy_id} return retval class QosCoreResourceExtensionTestCase(base.BaseTestCase): def setUp(self): super(QosCoreResourceExtensionTestCase, self).setUp() self.core_extension = qos_core.QosCoreResourceExtension() policy_p = mock.patch('neutron.objects.qos.policy.QosPolicy') self.policy_m = policy_p.start() self.context = context.get_admin_context() self.non_admin_context = context.Context('user_id', 'tenant_id') def test_process_fields_no_qos_policy_id(self): self.core_extension.process_fields( self.context, base_core.PORT, mock.ANY, {}, None) self.assertFalse(self.policy_m.called) def _mock_plugin_loaded(self, plugin_loaded): plugins = {} if plugin_loaded: plugins[plugin_constants.QOS] = None return mock.patch('neutron_lib.plugins.directory.get_plugins', return_value=plugins) def test_process_fields_no_qos_plugin_loaded(self): with self._mock_plugin_loaded(False): self.core_extension.process_fields( self.context, base_core.PORT, mock.ANY, {qos_consts.QOS_POLICY_ID: None}, None) self.assertFalse(self.policy_m.called) def test_process_fields_port_new_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() actual_port = {'id': mock.Mock(), qos_consts.QOS_POLICY_ID: qos_policy_id} qos_policy = mock.MagicMock() self.policy_m.get_policy_obj = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_port) qos_policy.attach_port.assert_called_once_with(actual_port['id']) def test_process_fields_port_updated_policy(self): with self._mock_plugin_loaded(True): qos_policy1_id = mock.Mock() qos_policy2_id = mock.Mock() port_id = mock.Mock() actual_port = {'id': port_id, qos_consts.QOS_POLICY_ID: qos_policy1_id} old_qos_policy = mock.MagicMock() self.policy_m.get_port_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_policy_obj = mock.Mock( return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: qos_policy2_id}, actual_port) old_qos_policy.detach_port.assert_called_once_with(port_id) new_qos_policy.attach_port.assert_called_once_with(port_id) self.assertEqual(qos_policy2_id, actual_port['qos_policy_id']) def test_process_resource_port_updated_no_policy(self): with self._mock_plugin_loaded(True): port_id = mock.Mock() qos_policy_id = mock.Mock() actual_port = {'id': port_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_port_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.PORT, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: None}, actual_port) old_qos_policy.detach_port.assert_called_once_with(port_id) self.assertIsNone(actual_port['qos_policy_id']) def _process_port_updated_policy(self, context, shared, policy_tenant_id): with self._mock_plugin_loaded(True): port_id = mock.sentinel.port_id qos_policy_id = mock.sentinel.policy_id actual_port = {'id': port_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() old_qos_policy.shared = shared old_qos_policy.tenant_id = policy_tenant_id self.policy_m.get_port_policy = mock.Mock( return_value=old_qos_policy) self.core_extension.process_fields( context, base_core.PORT, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: None}, actual_port) old_qos_policy.detach_port.assert_called_once_with(port_id) def test_process_resource_port_updated_remove_own_policy(self): self._process_port_updated_policy( context=self.non_admin_context, shared=False, policy_tenant_id=self.non_admin_context.tenant_id) def test_process_resource_port_updated_admin_remove_provided_policy(self): self._process_port_updated_policy( context=self.context, shared=False, policy_tenant_id=self.non_admin_context.tenant_id) def test_process_resource_port_updated_remove_shared_policy(self): self._process_port_updated_policy( context=self.non_admin_context, shared=True, policy_tenant_id=self.context.tenant_id) def test_process_resource_port_updated_remove_provided_policy(self): self.policy_m.is_accessible.return_value = False self.assertRaises(qos_exc.PolicyRemoveAuthorizationError, self._process_port_updated_policy, context=self.non_admin_context, shared=False, policy_tenant_id=self.context.tenant_id) def test_process_resource_update_network_updated_no_policy(self): with self._mock_plugin_loaded(True): network_id = mock.Mock() qos_policy_id = mock.Mock() actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_network_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_object = mock.Mock(return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: None}, actual_network) old_qos_policy.detach_network.assert_called_once_with(network_id) self.assertIsNone(actual_network['qos_policy_id']) def test_process_fields_update_network_new_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() actual_network = {'id': mock.Mock(), qos_consts.QOS_POLICY_ID: qos_policy_id} qos_policy = mock.MagicMock() self.policy_m.get_policy_obj = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) qos_policy.attach_network.assert_called_once_with( actual_network['id']) def test_process_fields_update_network_updated_policy(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() network_id = mock.Mock() actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() self.policy_m.get_network_policy = mock.Mock( return_value=old_qos_policy) new_qos_policy = mock.MagicMock() self.policy_m.get_policy_obj = mock.Mock( return_value=new_qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: qos_policy_id}, actual_network) old_qos_policy.detach_network.assert_called_once_with(network_id) new_qos_policy.attach_network.assert_called_once_with(network_id) def _process_network_updated_policy(self, context, shared, policy_tenant_id): with self._mock_plugin_loaded(True): qos_policy_id = mock.sentinel.policy_id network_id = mock.sentinel.net_id actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} old_qos_policy = mock.MagicMock() old_qos_policy.shared = shared old_qos_policy.tenant_id = policy_tenant_id self.policy_m.get_network_policy.return_value = old_qos_policy self.core_extension.process_fields( context, base_core.NETWORK, base_core.EVENT_UPDATE, {qos_consts.QOS_POLICY_ID: None}, actual_network) old_qos_policy.detach_network.assert_called_once_with(network_id) def test_process_fields_update_network_updated_remove_shared_policy(self): self._process_network_updated_policy( context=self.non_admin_context, shared=True, policy_tenant_id=self.context.tenant_id) def test_process_fields_network_updated_remove_own_policy(self): self._process_network_updated_policy( context=self.non_admin_context, shared=True, policy_tenant_id=self.non_admin_context.tenant_id) def test_process_fields_update_network_admin_remove_provided_policy(self): self._process_network_updated_policy( context=self.context, shared=True, policy_tenant_id=self.non_admin_context.tenant_id) def test_process_fields_update_network_remove_provided_policy(self): self.policy_m.is_accessible.return_value = False self.assertRaises(qos_exc.PolicyRemoveAuthorizationError, self._process_network_updated_policy, context=self.non_admin_context, shared=False, policy_tenant_id=self.context.tenant_id) def test_process_fields_create_network(self): with self._mock_plugin_loaded(True): qos_policy_id = mock.Mock() network_id = mock.Mock() actual_network = {'id': network_id, qos_consts.QOS_POLICY_ID: qos_policy_id} self.policy_m.get_network_policy = mock.Mock( return_value=qos_policy_id) qos_policy = mock.MagicMock() self.policy_m.get_policy_obj = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_CREATE, actual_network, actual_network) qos_policy.attach_network.assert_called_once_with(network_id) def test_process_fields_create_network_no_policy(self): with self._mock_plugin_loaded(True): project_id = mock.Mock() network_id = mock.Mock() actual_network = {'project_id': project_id, 'id': network_id, qos_consts.QOS_POLICY_ID: None} qos_policy_id = mock.Mock() qos_policy = mock.MagicMock() with mock.patch.object(policy.QosPolicyDefault, "get_object", return_value=qos_policy_id) as mock_get_default_policy_id: self.policy_m.get_policy_obj = mock.Mock( return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_CREATE, actual_network, actual_network) qos_policy.attach_network.assert_called_once_with(network_id) mock_get_default_policy_id.assert_called_once_with( self.context, project_id=project_id) def test_process_fields_create_network_no_default_policy(self): with self._mock_plugin_loaded(True): project_id = mock.Mock() network_id = mock.Mock() actual_network = {'project_id': project_id, 'id': network_id, qos_consts.QOS_POLICY_ID: None} qos_policy = mock.MagicMock() with mock.patch.object(policy.QosPolicyDefault, "get_object", return_value=None) as mock_get_default_policy_id: self.policy_m.get_object = mock.Mock(return_value=qos_policy) self.core_extension.process_fields( self.context, base_core.NETWORK, base_core.EVENT_CREATE, actual_network, actual_network) qos_policy.attach_network.assert_not_called() mock_get_default_policy_id.assert_called_once_with( self.context, project_id=project_id) def test_extract_fields_plugin_not_loaded(self): with self._mock_plugin_loaded(False): fields = self.core_extension.extract_fields(None, None) self.assertEqual({}, fields) def _test_extract_fields_for_port(self, qos_policy_id, qos_network_policy_id=None): with self._mock_plugin_loaded(True): fields = self.core_extension.extract_fields( base_core.PORT, _get_test_dbdata(qos_policy_id, qos_network_policy_id)) expected = { qos_consts.QOS_POLICY_ID: qos_policy_id, qos_consts.QOS_NETWORK_POLICY_ID: qos_network_policy_id} self.assertEqual(expected, fields) def test_extract_fields_for_port(self): port_qos_policies = [None, uuidutils.generate_uuid()] network_qos_policies = [None, uuidutils.generate_uuid()] for port_qos, net_qos in itertools.product(port_qos_policies, network_qos_policies): self._test_extract_fields_for_port(port_qos, net_qos) def _test_extract_fields_for_network(self, qos_policy_id): with self._mock_plugin_loaded(True): fields = self.core_extension.extract_fields( base_core.NETWORK, _get_test_dbdata(qos_policy_id)) self.assertEqual({qos_consts.QOS_POLICY_ID: qos_policy_id}, fields) def test_extract_fields_no_network_policy(self): self._test_extract_fields_for_network(None) def test_extract_fields_network_policy_exists(self): qos_policy_id = mock.Mock() qos_policy = mock.Mock() qos_policy.id = qos_policy_id self._test_extract_fields_for_network(qos_policy_id) def test__create_network_policy(self): default_policy_id = uuidutils.generate_uuid() network_policy_id = uuidutils.generate_uuid() policy_mock = mock.MagicMock(qos_policy_id=default_policy_id) network_changes = mock.Mock() network = {'id': 'dummy_id', 'project_id': 'dummy_project', qos_consts.QOS_POLICY_ID: None} with mock.patch.object(policy.QosPolicyDefault, 'get_object', return_value=policy_mock),\ mock.patch.object(policy.QosPolicy, 'get_object'): # Creating network with policy id network_changes.get.return_value = network_policy_id self.core_extension._create_network_policy( self.context, network, network_changes) self.assertEqual(network_policy_id, network[qos_consts.QOS_POLICY_ID]) # Creating network without policy id network_changes.get.return_value = None self.core_extension._create_network_policy( self.context, network, network_changes) self.assertEqual(default_policy_id, network[qos_consts.QOS_POLICY_ID]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4390457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/0000755000175000017500000000000000000000000021343 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/__init__.py0000644000175000017500000000000000000000000023442 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4390457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/metering/0000755000175000017500000000000000000000000023155 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/metering/__init__.py0000644000175000017500000000000000000000000025254 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/metering/test_metering_db.py0000644000175000017500000003647700000000000027066 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from neutron_lib.api.definitions import metering as metering_apidef from neutron_lib import constants as n_consts from neutron_lib import context from neutron_lib.db import constants as db_const from neutron_lib.plugins import constants from oslo_utils import uuidutils import webob.exc from neutron.api import extensions from neutron.common import config import neutron.extensions from neutron.services.metering import metering_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 DB_METERING_PLUGIN_KLASS = ( "neutron.services.metering." "metering_plugin.MeteringPlugin" ) extensions_path = ':'.join(neutron.extensions.__path__) _long_description_ok = 'x' * (db_const.LONG_DESCRIPTION_FIELD_SIZE) _long_description_ng = 'x' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1) _fake_uuid = uuidutils.generate_uuid class MeteringPluginDbTestCaseMixin(object): def _create_metering_label(self, fmt, name, description, **kwargs): data = {'metering_label': {'name': name, 'tenant_id': kwargs.get('tenant_id', 'test-tenant'), 'shared': kwargs.get('shared', False), 'description': description}} req = self.new_create_request('metering-labels', data, fmt) if kwargs.get('set_context') and 'tenant_id' in kwargs: # create a specific auth context for this request req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'], is_admin=kwargs.get('is_admin', True))) return req.get_response(self.ext_api) def _make_metering_label(self, fmt, name, description, **kwargs): res = self._create_metering_label(fmt, name, description, **kwargs) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _create_metering_label_rule(self, fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs): data = {'metering_label_rule': {'metering_label_id': metering_label_id, 'tenant_id': kwargs.get('tenant_id', 'test-tenant'), 'direction': direction, 'excluded': excluded, 'remote_ip_prefix': remote_ip_prefix}} req = self.new_create_request('metering-label-rules', data, fmt) if kwargs.get('set_context') and 'tenant_id' in kwargs: # create a specific auth context for this request req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) return req.get_response(self.ext_api) def _make_metering_label_rule(self, fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs): res = self._create_metering_label_rule(fmt, metering_label_id, direction, remote_ip_prefix, excluded, **kwargs) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def metering_label(self, name='label', description='desc', fmt=None, **kwargs): if not fmt: fmt = self.fmt metering_label = self._make_metering_label(fmt, name, description, **kwargs) yield metering_label @contextlib.contextmanager def metering_label_rule(self, metering_label_id=None, direction='ingress', remote_ip_prefix='10.0.0.0/24', excluded='false', fmt=None): if not fmt: fmt = self.fmt metering_label_rule = self._make_metering_label_rule(fmt, metering_label_id, direction, remote_ip_prefix, excluded) yield metering_label_rule class MeteringPluginDbTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, MeteringPluginDbTestCaseMixin): fmt = 'json' resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self, plugin=None): service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS} super(MeteringPluginDbTestCase, self).setUp( plugin=plugin, service_plugins=service_plugins ) self.plugin = metering_plugin.MeteringPlugin() ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.METERING: self.plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) class TestMetering(MeteringPluginDbTestCase): def test_create_metering_label(self): name = 'my label' description = 'my metering label' keys = [('name', name,), ('description', description)] with self.metering_label(name, description) as metering_label: for k, v, in keys: self.assertEqual(metering_label['metering_label'][k], v) def test_create_metering_label_shared(self): name = 'my label' description = 'my metering label' shared = True keys = [('name', name,), ('description', description), ('shared', shared)] with self.metering_label(name, description, shared=shared) as metering_label: for k, v, in keys: self.assertEqual(metering_label['metering_label'][k], v) def test_create_metering_label_with_max_description_length(self): res = self._create_metering_label(self.fmt, 'my label', _long_description_ok) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_metering_label_with_too_long_description(self): res = self._create_metering_label(self.fmt, 'my label', _long_description_ng) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_metering_label(self): name = 'my label' description = 'my metering label' data = {'metering_label': {}} with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] self._update('metering-labels', metering_label_id, data, webob.exc.HTTPNotImplemented.code) def test_delete_metering_label(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] self._delete('metering-labels', metering_label_id, 204) def test_list_metering_label(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as v1,\ self.metering_label(name, description) as v2: metering_label = (v1, v2) self._test_list_resources('metering-label', metering_label) def test_create_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True keys = [('metering_label_id', metering_label_id), ('direction', direction), ('excluded', excluded), ('remote_ip_prefix', remote_ip_prefix)] with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as label_rule: for k, v, in keys: self.assertEqual(label_rule['metering_label_rule'][k], v) def test_create_metering_label_rule_with_non_existent_label(self): direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True res = self._create_metering_label_rule(self.fmt, _fake_uuid(), direction, remote_ip_prefix, excluded) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_metering_label_rule(self): name = 'my label' description = 'my metering label' direction = 'egress' remote_ip_prefix = '192.168.0.0/24' data = {'metering_label_rule': {}} with self.metering_label(name, description) as metering_label, \ self.metering_label_rule( metering_label['metering_label']['id'], direction, remote_ip_prefix) as label_rule: rule_id = label_rule['metering_label_rule']['id'] self._update('metering-label-rules', rule_id, data, webob.exc.HTTPNotImplemented.code) def test_delete_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as label_rule: rule_id = label_rule['metering_label_rule']['id'] self._delete('metering-label-rules', rule_id, 204) def test_list_metering_label_rule(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id, 'ingress', remote_ip_prefix, excluded) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) def test_create_metering_label_rules(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id, direction, n_consts.IPv4_ANY, False) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) def test_create_overlap_metering_label_rules(self): name = 'my label' description = 'my metering label' with self.metering_label(name, description) as metering_label: metering_label_id = metering_label['metering_label']['id'] direction = 'egress' remote_ip_prefix1 = '192.168.0.0/24' remote_ip_prefix2 = '192.168.0.0/16' excluded = True with self.metering_label_rule(metering_label_id, direction, remote_ip_prefix1, excluded): res = self._create_metering_label_rule(self.fmt, metering_label_id, direction, remote_ip_prefix2, excluded) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_metering_label_rule_two_labels(self): name1 = 'my label 1' name2 = 'my label 2' description = 'my metering label' with self.metering_label(name1, description) as metering_label1: metering_label_id1 = metering_label1['metering_label']['id'] with self.metering_label(name2, description) as metering_label2: metering_label_id2 = metering_label2['metering_label']['id'] direction = 'egress' remote_ip_prefix = '192.168.0.0/24' excluded = True with self.metering_label_rule(metering_label_id1, direction, remote_ip_prefix, excluded) as v1,\ self.metering_label_rule(metering_label_id2, direction, remote_ip_prefix, excluded) as v2: metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4390457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/quota/0000755000175000017500000000000000000000000022474 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/quota/__init__.py0000644000175000017500000000000000000000000024573 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/quota/test_api.py0000644000175000017500000003626400000000000024671 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from neutron_lib import context from neutron_lib.plugins import constants as const from neutron_lib.plugins import directory from oslo_config import cfg from neutron.db.quota import api as quota_api from neutron.tests.unit.db.quota import test_driver from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestQuotaDbApi(testlib_api.SqlTestCaseLight): def _set_context(self): self.tenant_id = 'Higuain' self.context = context.Context('Gonzalo', self.tenant_id, is_admin=False, is_advsvc=False) def _create_reservation(self, resource_deltas, tenant_id=None, expiration=None): tenant_id = tenant_id or self.tenant_id return quota_api.create_reservation( self.context, tenant_id, resource_deltas, expiration) def _create_quota_usage(self, resource, used, tenant_id=None): tenant_id = tenant_id or self.tenant_id return quota_api.set_quota_usage(context.get_admin_context(), resource, tenant_id, in_use=used) def _verify_quota_usage(self, usage_info, expected_resource=None, expected_used=None, expected_dirty=None): self.assertEqual(self.tenant_id, usage_info.tenant_id) if expected_resource: self.assertEqual(expected_resource, usage_info.resource) if expected_dirty is not None: self.assertEqual(expected_dirty, usage_info.dirty) if expected_used is not None: self.assertEqual(expected_used, usage_info.used) def setUp(self): super(TestQuotaDbApi, self).setUp() self._set_context() self.plugin = test_driver.FakePlugin() directory.add_plugin(const.CORE, self.plugin) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) def test_create_quota_usage(self): usage_info = self._create_quota_usage('goals', 26) self._verify_quota_usage(usage_info, expected_resource='goals', expected_used=26) def test_update_quota_usage(self): self._create_quota_usage('goals', 26) # Higuain scores a double usage_info_1 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=28) self._verify_quota_usage(usage_info_1, expected_used=28) usage_info_2 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=24) self._verify_quota_usage(usage_info_2, expected_used=24) def test_update_quota_usage_with_deltas(self): self._create_quota_usage('goals', 26) # Higuain scores a double usage_info_1 = quota_api.set_quota_usage( self.context, 'goals', self.tenant_id, in_use=2, delta=True) self._verify_quota_usage(usage_info_1, expected_used=28) def test_set_quota_usage_dirty(self): self._create_quota_usage('goals', 26) # Higuain needs a shower after the match self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=True) # Higuain is clean now self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id, dirty=False)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=False) def test_set_dirty_non_existing_quota_usage(self): self.assertEqual(0, quota_api.set_quota_usage_dirty( self.context, 'meh', self.tenant_id)) def test_set_resources_quota_usage_dirty(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('bookings', 3) self.assertEqual(2, quota_api.set_resources_quota_usage_dirty( self.context, ['goals', 'bookings'], self.tenant_id)) usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'assists', self.tenant_id) usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'bookings', self.tenant_id) self._verify_quota_usage(usage_info_goals, expected_dirty=True) self._verify_quota_usage(usage_info_assists, expected_dirty=False) self._verify_quota_usage(usage_info_bookings, expected_dirty=True) def test_set_resources_quota_usage_dirty_with_empty_list(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('bookings', 3) # Expect all the resources for the tenant to be set dirty self.assertEqual(3, quota_api.set_resources_quota_usage_dirty( self.context, [], self.tenant_id)) usage_info_goals = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) usage_info_assists = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'assists', self.tenant_id) usage_info_bookings = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'bookings', self.tenant_id) self._verify_quota_usage(usage_info_goals, expected_dirty=True) self._verify_quota_usage(usage_info_assists, expected_dirty=True) self._verify_quota_usage(usage_info_bookings, expected_dirty=True) # Higuain is clean now self.assertEqual(1, quota_api.set_quota_usage_dirty( self.context, 'goals', self.tenant_id, dirty=False)) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_dirty=False) def _test_set_all_quota_usage_dirty(self, expected): self._create_quota_usage('goals', 26) self._create_quota_usage('goals', 12, tenant_id='Callejon') self.assertEqual(expected, quota_api.set_all_quota_usage_dirty( self.context, 'goals')) def test_set_all_quota_usage_dirty(self): # All goal scorers need a shower after the match, but since this is not # admin context we can clean only one self._test_set_all_quota_usage_dirty(expected=1) def test_get_quota_usage_by_tenant(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) # Create a resource for a different tenant self._create_quota_usage('mehs', 99, tenant_id='buffon') usage_infos = quota_api.get_quota_usage_by_tenant_id( self.context, self.tenant_id) self.assertEqual(2, len(usage_infos)) resources = [info.resource for info in usage_infos] self.assertIn('goals', resources) self.assertIn('assists', resources) def test_get_quota_usage_by_resource(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('goals', 12, tenant_id='Callejon') usage_infos = quota_api.get_quota_usage_by_resource( self.context, 'goals') # Only 1 result expected in tenant context self.assertEqual(1, len(usage_infos)) self._verify_quota_usage(usage_infos[0], expected_resource='goals', expected_used=26) def test_get_quota_usage_by_tenant_and_resource(self): self._create_quota_usage('goals', 26) usage_info = quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id) self._verify_quota_usage(usage_info, expected_resource='goals', expected_used=26) def test_get_non_existing_quota_usage_returns_none(self): self.assertIsNone(quota_api.get_quota_usage_by_resource_and_tenant( self.context, 'goals', self.tenant_id)) def _verify_reserved_resources(self, expected, actual): for (resource, delta) in actual.items(): self.assertIn(resource, expected) self.assertEqual(delta, expected[resource]) del expected[resource] self.assertFalse(expected) def test_create_reservation(self): resources = {'goals': 2, 'assists': 1} resv = self._create_reservation(resources) self.assertEqual(self.tenant_id, resv.tenant_id) self._verify_reserved_resources(resources, resv.deltas) def test_create_reservation_with_expiration(self): resources = {'goals': 2, 'assists': 1} exp_date = datetime.datetime(2016, 3, 31, 14, 30) resv = self._create_reservation(resources, expiration=exp_date) self.assertEqual(self.tenant_id, resv.tenant_id) self.assertEqual(exp_date, resv.expiration) self._verify_reserved_resources(resources, resv.deltas) def test_remove_non_existent_reservation(self): self.assertIsNone(quota_api.remove_reservation(self.context, 'meh')) def _get_reservations_for_resource_helper(self): # create three reservation, 1 expired resources_1 = {'goals': 2, 'assists': 1} resources_2 = {'goals': 3, 'bookings': 1} resources_3 = {'bookings': 2, 'assists': 2} exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) self._create_reservation(resources_1, expiration=exp_date_1) self._create_reservation(resources_2, expiration=exp_date_1) self._create_reservation(resources_3, expiration=exp_date_2) def test_get_reservations_for_resources(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: self._get_reservations_for_resource_helper() mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) deltas = quota_api.get_reservations_for_resources( self.context, self.tenant_id, ['goals', 'assists', 'bookings']) self.assertIn('goals', deltas) self.assertEqual(5, deltas['goals']) self.assertIn('assists', deltas) self.assertEqual(1, deltas['assists']) self.assertIn('bookings', deltas) self.assertEqual(1, deltas['bookings']) self.assertEqual(3, len(deltas)) def test_get_expired_reservations_for_resources(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) self._get_reservations_for_resource_helper() deltas = quota_api.get_reservations_for_resources( self.context, self.tenant_id, ['goals', 'assists', 'bookings'], expired=True) self.assertIn('assists', deltas) self.assertEqual(2, deltas['assists']) self.assertIn('bookings', deltas) self.assertEqual(2, deltas['bookings']) self.assertEqual(2, len(deltas)) def test_get_reservation_for_resources_with_empty_list(self): self.assertIsNone(quota_api.get_reservations_for_resources( self.context, self.tenant_id, [])) def test_remove_expired_reservations(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) resources = {'goals': 2, 'assists': 1} exp_date_1 = datetime.datetime(2016, 3, 31, 14, 30) resv_1 = self._create_reservation(resources, expiration=exp_date_1) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) resv_2 = self._create_reservation(resources, expiration=exp_date_2) self.assertEqual(1, quota_api.remove_expired_reservations( self.context, self.tenant_id)) self.assertIsNone(quota_api.get_reservation( self.context, resv_2.reservation_id)) self.assertIsNotNone(quota_api.get_reservation( self.context, resv_1.reservation_id)) def test_remove_expired_reservations_no_tenant(self): with mock.patch('neutron.db.quota.api.utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime( 2015, 5, 20, 0, 0) resources = {'goals': 2, 'assists': 1} exp_date_1 = datetime.datetime(2014, 3, 31, 14, 30) resv_1 = self._create_reservation(resources, expiration=exp_date_1) exp_date_2 = datetime.datetime(2015, 3, 31, 14, 30) resv_2 = self._create_reservation(resources, expiration=exp_date_2, tenant_id='Callejon') self.assertEqual(2, quota_api.remove_expired_reservations( context.get_admin_context())) self.assertIsNone(quota_api.get_reservation( self.context, resv_2.reservation_id)) self.assertIsNone(quota_api.get_reservation( self.context, resv_1.reservation_id)) class TestQuotaDbApiAdminContext(TestQuotaDbApi): def _set_context(self): self.tenant_id = 'Higuain' self.context = context.Context('Gonzalo', self.tenant_id, is_admin=True, is_advsvc=True) def test_get_quota_usage_by_resource(self): self._create_quota_usage('goals', 26) self._create_quota_usage('assists', 11) self._create_quota_usage('goals', 12, tenant_id='Callejon') usage_infos = quota_api.get_quota_usage_by_resource( self.context, 'goals') # 2 results expected in admin context self.assertEqual(2, len(usage_infos)) for usage_info in usage_infos: self.assertEqual('goals', usage_info.resource) def test_set_all_quota_usage_dirty(self): # All goal scorers need a shower after the match, and with admin # context we should be able to clean all of them self._test_set_all_quota_usage_dirty(expected=2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/quota/test_driver.py0000644000175000017500000003127500000000000025410 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import context from neutron_lib import exceptions from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db.quota import api as quota_api from neutron.db.quota import driver from neutron.objects import quota as quota_obj from neutron.quota import resource from neutron.tests import base from neutron.tests.unit import quota as test_quota from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' def _count_resource(context, resource, tenant_id): """A fake counting function to determine current used counts""" if resource[-1] == 's': resource = resource[:-1] result = quota_obj.QuotaUsage.get_object_dirty_protected( context, resource=resource) return 0 if not result else result.in_use class FakePlugin(base_plugin.NeutronDbPluginV2, driver.DbQuotaDriver): """A fake plugin class containing all DB methods.""" class TestResource(object): """Describe a test resource for quota checking.""" def __init__(self, name, default, fake_count=0): self.name = name self.quota = default self.fake_count = fake_count @property def default(self): return self.quota def count(self, *args, **kwargs): return self.fake_count class TestTrackedResource(resource.TrackedResource): """Describes a test tracked resource for detailed quota checking""" def __init__(self, name, model_class, flag=None, plural_name=None): super(TestTrackedResource, self).__init__( name, model_class, flag=flag, plural_name=None) @property def default(self): return self.flag class TestCountableResource(resource.CountableResource): """Describes a test countable resource for detailed quota checking""" def __init__(self, name, count, flag=-1, plural_name=None): super(TestCountableResource, self).__init__( name, count, flag=flag, plural_name=None) @property def default(self): return self.flag PROJECT = 'prj_test' RESOURCE = 'res_test' ALT_RESOURCE = 'res_test_meh' class TestDbQuotaDriver(testlib_api.SqlTestCase, base.BaseTestCase): def setUp(self): super(TestDbQuotaDriver, self).setUp() self.plugin = FakePlugin() self.context = context.get_admin_context() self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS) def test_create_quota_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(2, quotas[RESOURCE]) def test_update_quota_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(3, quotas[RESOURCE]) def test_delete_tenant_quota_restores_default_limit(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.delete_tenant_quota(self.context, PROJECT) quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT) self.assertEqual(4, quotas[RESOURCE]) def test_get_default_quotas(self): defaults = {RESOURCE: TestResource(RESOURCE, 4)} user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT) self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) quotas = self.plugin.get_default_quotas(user_ctx, defaults, PROJECT) self.assertEqual(4, quotas[RESOURCE]) def test_get_tenant_quotas(self): user_ctx = context.Context(user_id=PROJECT, tenant_id=PROJECT) self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) quotas = self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT) self.assertEqual(2, quotas[RESOURCE]) def test_get_tenant_quotas_different_tenant(self): user_ctx = context.Context(user_id=PROJECT, tenant_id='another_project') self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) # It is appropriate to use assertFalse here as the expected return # value is an empty dict (the defaults passed in the statement below # after the request context) self.assertFalse(self.plugin.get_tenant_quotas(user_ctx, {}, PROJECT)) def test_get_all_quotas(self): project_1 = 'prj_test_1' project_2 = 'prj_test_2' resource_1 = 'res_test_1' resource_2 = 'res_test_2' resources = {resource_1: TestResource(resource_1, 3), resource_2: TestResource(resource_2, 5)} self.plugin.update_quota_limit(self.context, project_1, resource_1, 7) self.plugin.update_quota_limit(self.context, project_2, resource_2, 9) quotas = self.plugin.get_all_quotas(self.context, resources) # Expect two tenants' quotas self.assertEqual(2, len(quotas)) # But not quotas for the same tenant twice self.assertNotEqual(quotas[0]['tenant_id'], quotas[1]['tenant_id']) # Check the expected limits. The quotas can be in any order. for quota in quotas: project = quota['tenant_id'] self.assertIn(project, (project_1, project_2)) if project == project_1: expected_limit_r1 = 7 expected_limit_r2 = 5 if project == project_2: expected_limit_r1 = 3 expected_limit_r2 = 9 self.assertEqual(expected_limit_r1, quota[resource_1]) self.assertEqual(expected_limit_r2, quota[resource_2]) def test_limit_check(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.limit_check(self.context, PROJECT, resources, values) def test_limit_check_over_quota(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 3} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.OverQuota, self.plugin.limit_check, context.get_admin_context(), PROJECT, resources, values) def test_limit_check_equals_to_quota(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: 2} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.limit_check(self.context, PROJECT, resources, values) def test_limit_check_value_lower_than_zero(self): resources = {RESOURCE: TestResource(RESOURCE, 2)} values = {RESOURCE: -1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.InvalidQuotaValue, self.plugin.limit_check, context.get_admin_context(), PROJECT, resources, values) def _test_make_reservation_success(self, quota_driver, resource_name, deltas): resources = {resource_name: TestResource(resource_name, 2)} self.plugin.update_quota_limit(self.context, PROJECT, resource_name, 2) reservation = quota_driver.make_reservation( self.context, self.context.tenant_id, resources, deltas, self.plugin) self.assertIn(resource_name, reservation.deltas) self.assertEqual(deltas[resource_name], reservation.deltas[resource_name]) self.assertEqual(self.context.tenant_id, reservation.tenant_id) def test_make_reservation_single_resource(self): quota_driver = driver.DbQuotaDriver() self._test_make_reservation_success( quota_driver, RESOURCE, {RESOURCE: 1}) def test_make_reservation_fill_quota(self): quota_driver = driver.DbQuotaDriver() self._test_make_reservation_success( quota_driver, RESOURCE, {RESOURCE: 2}) def test_make_reservation_multiple_resources(self): quota_driver = driver.DbQuotaDriver() resources = {RESOURCE: TestResource(RESOURCE, 2), ALT_RESOURCE: TestResource(ALT_RESOURCE, 2)} deltas = {RESOURCE: 1, ALT_RESOURCE: 2} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.plugin.update_quota_limit(self.context, PROJECT, ALT_RESOURCE, 2) reservation = quota_driver.make_reservation( self.context, self.context.tenant_id, resources, deltas, self.plugin) self.assertIn(RESOURCE, reservation.deltas) self.assertIn(ALT_RESOURCE, reservation.deltas) self.assertEqual(1, reservation.deltas[RESOURCE]) self.assertEqual(2, reservation.deltas[ALT_RESOURCE]) self.assertEqual(self.context.tenant_id, reservation.tenant_id) def test_make_reservation_over_quota_fails(self): quota_driver = driver.DbQuotaDriver() resources = {RESOURCE: TestResource(RESOURCE, 2, fake_count=2)} deltas = {RESOURCE: 1} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2) self.assertRaises(exceptions.OverQuota, quota_driver.make_reservation, self.context, self.context.tenant_id, resources, deltas, self.plugin) def test_get_detailed_tenant_quotas_resource(self): res = {RESOURCE: TestTrackedResource(RESOURCE, test_quota.MehModel)} self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 6) quota_driver = driver.DbQuotaDriver() quota_driver.make_reservation(self.context, PROJECT, res, {RESOURCE: 1}, self.plugin) quota_api.set_quota_usage(self.context, RESOURCE, PROJECT, 2) detailed_quota = self.plugin.get_detailed_tenant_quotas(self.context, res, PROJECT) self.assertEqual(6, detailed_quota[RESOURCE]['limit']) self.assertEqual(2, detailed_quota[RESOURCE]['used']) self.assertEqual(1, detailed_quota[RESOURCE]['reserved']) def test_get_detailed_tenant_quotas_multiple_resource(self): project_1 = 'prj_test_1' resource_1 = 'res_test_1' resource_2 = 'res_test_2' resources = {resource_1: TestTrackedResource(resource_1, test_quota.MehModel), resource_2: TestCountableResource(resource_2, _count_resource)} self.plugin.update_quota_limit(self.context, project_1, resource_1, 6) self.plugin.update_quota_limit(self.context, project_1, resource_2, 9) quota_driver = driver.DbQuotaDriver() quota_driver.make_reservation(self.context, project_1, resources, {resource_1: 1, resource_2: 7}, self.plugin) quota_api.set_quota_usage(self.context, resource_1, project_1, 2) quota_api.set_quota_usage(self.context, resource_2, project_1, 3) detailed_quota = self.plugin.get_detailed_tenant_quotas(self.context, resources, project_1) self.assertEqual(6, detailed_quota[resource_1]['limit']) self.assertEqual(1, detailed_quota[resource_1]['reserved']) self.assertEqual(2, detailed_quota[resource_1]['used']) self.assertEqual(9, detailed_quota[resource_2]['limit']) self.assertEqual(7, detailed_quota[resource_2]['reserved']) self.assertEqual(3, detailed_quota[resource_2]['used']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test__utils.py0000644000175000017500000000367700000000000024270 0ustar00coreycorey00000000000000# Copyright 2016 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron.db import _utils as db_utils from neutron.tests.unit import testlib_api class TestCommonHelpFunctions(testlib_api.SqlTestCase): def setUp(self): super(TestCommonHelpFunctions, self).setUp() self.admin_ctx = context.get_admin_context() def test__safe_creation_create_bindings_fails(self): create_fn = mock.Mock(return_value={'id': 1234}) create_bindings = mock.Mock(side_effect=ValueError) tx_check = lambda i: setattr(self, '_active', self.admin_ctx.session.is_active) delete_fn = mock.Mock(side_effect=tx_check) self.assertRaises(ValueError, db_utils.safe_creation, self.admin_ctx, create_fn, delete_fn, create_bindings) delete_fn.assert_called_once_with(1234) self.assertTrue(self._active) def test__safe_creation_deletion_fails(self): create_fn = mock.Mock(return_value={'id': 1234}) create_bindings = mock.Mock(side_effect=ValueError) delete_fn = mock.Mock(side_effect=EnvironmentError) self.assertRaises(ValueError, db_utils.safe_creation, self.admin_ctx, create_fn, delete_fn, create_bindings) delete_fn.assert_called_once_with(1234) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_agents_db.py0000644000175000017500000004031600000000000024706 0ustar00coreycorey00000000000000# pylint: disable=pointless-string-statement # Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import mock from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_db import exception as exc from oslo_utils import timeutils from oslo_utils import uuidutils import testscenarios from neutron.db import agents_db from neutron.db import db_base_plugin_v2 as base_plugin from neutron.objects import agent as agent_obj from neutron.objects import base from neutron.tests.unit import testlib_api # the below code is required for the following reason # (as documented in testscenarios) """Multiply tests depending on their 'scenarios' attribute. This can be assigned to 'load_tests' in any test module to make this automatically work across tests in the module. """ load_tests = testscenarios.load_tests_apply_scenarios TEST_RESOURCE_VERSIONS = {"A": "1.0"} AGENT_STATUS = {'agent_type': 'Open vSwitch agent', 'binary': 'neutron-openvswitch-agent', 'host': 'overcloud-notcompute', 'topic': 'N/A', 'resource_versions': TEST_RESOURCE_VERSIONS} TEST_TIME = '2016-02-26T17:08:06.116' class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin): """A fake plugin class containing all DB methods.""" class TestAgentsDbBase(testlib_api.SqlTestCase): def setUp(self): super(TestAgentsDbBase, self).setUp() self.context = context.get_admin_context() self.plugin = FakePlugin() def _get_agents(self, hosts, agent_type): return [ agent_obj.Agent( context=self.context, binary='foo-agent', host=host, agent_type=agent_type, topic='foo_topic', configurations="{}", created_at=timeutils.utcnow(), started_at=timeutils.utcnow(), heartbeat_timestamp=timeutils.utcnow()) for host in hosts ] def _create_and_save_agents(self, hosts, agent_type, down_agents_count=0, down_but_version_considered=0): agents = self._get_agents(hosts, agent_type) # bring down the specified agents for agent in agents[:down_agents_count]: agent['heartbeat_timestamp'] -= datetime.timedelta(minutes=60) # bring down just enough so their version is still considered for agent in agents[down_agents_count:( down_but_version_considered + down_agents_count)]: agent['heartbeat_timestamp'] -= datetime.timedelta( seconds=(cfg.CONF.agent_down_time + 1)) for agent in agents: agent.create() return agents class TestAgentsDbMixin(TestAgentsDbBase): def setUp(self): super(TestAgentsDbMixin, self).setUp() self.agent_status = dict(AGENT_STATUS) def test_get_enabled_agent_on_host_found(self): agents = self._create_and_save_agents(['foo_host'], constants.AGENT_TYPE_L3) expected = self.plugin.get_enabled_agent_on_host( self.context, constants.AGENT_TYPE_L3, 'foo_host') self.assertEqual(expected, agents[0]) def test_get_enabled_agent_on_host_not_found(self): with mock.patch.object(agents_db.LOG, 'debug') as mock_log: agent = self.plugin.get_enabled_agent_on_host( self.context, constants.AGENT_TYPE_L3, 'foo_agent') self.assertIsNone(agent) self.assertTrue(mock_log.called) def _assert_ref_fields_are_equal(self, reference, result): """Compare (key, value) pairs of a reference dict with the result Note: the result MAY have additional keys """ for field, value in reference.items(): self.assertEqual(value, result[field], field) def test_create_or_update_agent_new_entry(self): self.plugin.create_or_update_agent(self.context, self.agent_status, timeutils.utcnow()) agent = self.plugin.get_agents(self.context)[0] self._assert_ref_fields_are_equal(self.agent_status, agent) def test_create_or_update_agent_existing_entry(self): self.plugin.create_or_update_agent(self.context, self.agent_status, timeutils.utcnow()) self.plugin.create_or_update_agent(self.context, self.agent_status, timeutils.utcnow()) self.plugin.create_or_update_agent(self.context, self.agent_status, timeutils.utcnow()) agents = self.plugin.get_agents(self.context) self.assertEqual(len(agents), 1) agent = agents[0] self._assert_ref_fields_are_equal(self.agent_status, agent) def test_create_or_update_agent_logs_heartbeat(self): status = self.agent_status.copy() status['configurations'] = {'log_agent_heartbeats': True} with mock.patch.object(agents_db.LOG, 'info') as info: self.plugin.create_or_update_agent(self.context, status, timeutils.utcnow()) self.assertTrue(info.called) status['configurations'] = {'log_agent_heartbeats': False} info.reset_mock() self.plugin.create_or_update_agent(self.context, status, timeutils.utcnow()) self.assertFalse(info.called) def test_create_or_update_agent_concurrent_insert(self): # NOTE(rpodolyaka): emulate violation of the unique constraint caused # by a concurrent insert. Ensure we make another # attempt on fail mock.patch( 'neutron.objects.base.NeutronDbObject.modify_fields_from_db' ).start() counter = {'value': 0} def create_obj_side_effect(obj_cls, context, values, populate_id=True): if counter['value'] < 1: counter['value'] += 1 raise exc.DBDuplicateEntry() obj_cls.id = uuidutils.generate_uuid() with mock.patch('neutron.objects.db.api.create_object') as add_mock: add_mock.side_effect = create_obj_side_effect self.plugin.create_or_update_agent(self.context, self.agent_status, 'any_timestamp') self.assertEqual(add_mock.call_count, 2, "Agent entry creation hasn't been retried") def test_create_or_update_agent_disable_new_agents(self): cfg.CONF.set_override('enable_new_agents', False) self.plugin.create_or_update_agent(self.context, self.agent_status, 'any_timestamp') agent = self.plugin.get_agents(self.context)[0] self.assertFalse(agent['admin_state_up']) def test_agent_health_check(self): agents = [{'agent_type': "DHCP Agent", 'heartbeat_timestamp': '2015-05-06 22:40:40.432295', 'host': 'some.node', 'alive': True}] with mock.patch.object(self.plugin, 'get_agents', return_value=agents),\ mock.patch.object(agents_db.LOG, 'warning') as warn,\ mock.patch.object(agents_db.LOG, 'debug') as debug: self.plugin.agent_health_check() self.assertTrue(debug.called) self.assertFalse(warn.called) agents[0]['alive'] = False self.plugin.agent_health_check() warn.assert_called_once_with( mock.ANY, {'count': 1, 'total': 1, 'data': " Type Last heartbeat host\n" " DHCP Agent 2015-05-06 22:40:40.432295 some.node"} ) def test__get_dict(self): db_obj = mock.Mock(conf1='{"test": "1234"}') conf1 = self.plugin._get_dict(db_obj, 'conf1') self.assertIn('test', conf1) self.assertEqual("1234", conf1['test']) def test__get_dict_missing(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(spec=['agent_type', 'host']) self.plugin._get_dict(db_obj, 'missing_conf') self.assertTrue(warn.called) def test__get_dict_ignore_missing(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(spec=['agent_type', 'host']) missing_conf = self.plugin._get_dict(db_obj, 'missing_conf', ignore_missing=True) self.assertEqual({}, missing_conf) warn.assert_not_called() def test__get_dict_broken(self): with mock.patch.object(agents_db.LOG, 'warning') as warn: db_obj = mock.Mock(conf1='{"test": BROKEN') conf1 = self.plugin._get_dict(db_obj, 'conf1', ignore_missing=True) self.assertEqual({}, conf1) self.assertTrue(warn.called) def get_configurations_dict(self): db_obj = mock.Mock(configurations='{"cfg1": "val1"}') cfg = self.plugin.get_configuration_dict(db_obj) self.assertIn('cfg', cfg) def test_get_agents_resource_versions(self): tracker = mock.Mock() self._create_and_save_agents( ['host-%d' % i for i in range(5)], constants.AGENT_TYPE_L3, down_agents_count=3, down_but_version_considered=2) self.plugin.get_agents_resource_versions(tracker) self.assertEqual(tracker.set_versions.call_count, 2) class TestAgentsDbGetAgents(TestAgentsDbBase): scenarios = [ ('Get all agents', dict(agents=5, down_agents=2, agents_alive=None, expected_agents=5)), ('Get alive agents (True)', dict(agents=5, down_agents=2, agents_alive='True', expected_agents=3)), ('Get down agents (False)', dict(agents=5, down_agents=2, agents_alive='False', expected_agents=2)), ('Get alive agents (true)', dict(agents=5, down_agents=2, agents_alive='true', expected_agents=3)), ('Get down agents (false)', dict(agents=5, down_agents=2, agents_alive='false', expected_agents=2)), ('Get agents invalid alive filter', dict(agents=5, down_agents=2, agents_alive='invalid', expected_agents=None)), ] def setUp(self): # ensure that the first scenario will execute with nosetests if not hasattr(self, 'agents'): self.__dict__.update(self.scenarios[0][1]) super(TestAgentsDbGetAgents, self).setUp() def test_get_agents(self): hosts = ['host-%s' % i for i in range(self.agents)] self._create_and_save_agents(hosts, constants.AGENT_TYPE_L3, down_agents_count=self.down_agents) if self.agents_alive == 'invalid': self.assertRaises(n_exc.InvalidInput, self.plugin.get_agents, self.context, filters={'alive': [self.agents_alive]}) else: returned_agents = self.plugin.get_agents( self.context, filters={'alive': [self.agents_alive]} if self.agents_alive else None) self.assertEqual(self.expected_agents, len(returned_agents)) if self.agents_alive: alive = (self.agents_alive == 'True' or self.agents_alive == 'true') for agent in returned_agents: self.assertEqual(alive, agent['alive']) class TestAgentExtRpcCallback(TestAgentsDbBase): def setUp(self): super(TestAgentExtRpcCallback, self).setUp() self.callback = agents_db.AgentExtRpcCallback(self.plugin) self.callback.server_versions_rpc = mock.Mock() self.versions_rpc = self.callback.server_versions_rpc self.callback.START_TIME = datetime.datetime(datetime.MINYEAR, 1, 1) self.update_versions = mock.patch( 'neutron.api.rpc.callbacks.version_manager.' 'update_versions').start() self.agent_state = {'agent_state': dict(AGENT_STATUS)} def test_create_or_update_agent_updates_version_manager(self): self.callback.report_state(self.context, agent_state=self.agent_state, time=TEST_TIME) self.update_versions.assert_called_once_with( mock.ANY, TEST_RESOURCE_VERSIONS) def test_create_or_update_agent_updates_other_servers(self): callback = self.callback callback.report_state(self.context, agent_state=self.agent_state, time=TEST_TIME) report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS) def test_no_version_updates_on_further_state_reports(self): self.test_create_or_update_agent_updates_version_manager() # agents include resource_versions only in the first report after # start so versions should not be updated on the second report second_agent_state = copy.deepcopy(self.agent_state) second_agent_state['agent_state'].pop('resource_versions') self.update_versions.reset_mock() report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.reset_mock() self.callback.report_state(self.context, agent_state=second_agent_state, time=TEST_TIME) self.assertFalse(self.update_versions.called) self.assertFalse(report_agent_resource_versions.called) def test_version_updates_on_agent_revival(self): self.test_create_or_update_agent_updates_version_manager() second_agent_state = copy.deepcopy(self.agent_state) second_agent_state['agent_state'].pop('resource_versions') self._take_down_agent() self.update_versions.reset_mock() report_agent_resource_versions = ( self.versions_rpc.report_agent_resource_versions) report_agent_resource_versions.reset_mock() # agent didn't include resource_versions in report but server will # take them from db for the revived agent self.callback.report_state(self.context, agent_state=second_agent_state, time=TEST_TIME) self.update_versions.assert_called_once_with( mock.ANY, TEST_RESOURCE_VERSIONS) report_agent_resource_versions.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS) def _take_down_agent(self): with db_api.CONTEXT_WRITER.using(self.context): pager = base.Pager(limit=1) agent_objs = agent_obj.Agent.get_objects(self.context, _pager=pager) agent_objs[0].heartbeat_timestamp = ( agent_objs[0].heartbeat_timestamp - datetime.timedelta( hours=1)) agent_objs[0].update() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_agentschedulers_db.py0000644000175000017500000024115700000000000026613 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from neutron_lib.api.definitions import dhcpagentscheduler as das_apidef from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.tests.unit import fake_notifier from oslo_config import cfg from oslo_db import exception as db_exc import oslo_messaging from oslo_utils import uuidutils from webob import exc from neutron.api import extensions from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db.models import agent as agent_model from neutron.extensions import l3agentscheduler from neutron.objects import agent as ag_obj from neutron.objects import l3agent as rb_obj from neutron.tests.common import helpers from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_agent from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api from neutron import wsgi L3_HOSTA = 'hosta' DHCP_HOSTA = 'hosta' L3_HOSTB = 'hostb' DHCP_HOSTC = 'hostc' DHCP_HOSTD = 'hostd' DEVICE_OWNER_COMPUTE = ''.join([constants.DEVICE_OWNER_COMPUTE_PREFIX, 'test:', DHCP_HOSTA]) class AgentSchedulerTestMixIn(object): block_dhcp_notifier = False def _request_list(self, path, admin_context=True, expected_code=exc.HTTPOk.code): req = self._path_req(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _path_req(self, path, method='GET', data=None, query_string=None, admin_context=True): content_type = 'application/%s' % self.fmt body = None if data is not None: # empty dict is valid body = wsgi.Serializer().serialize(data, content_type) if admin_context: return testlib_api.create_request( path, body, content_type, method, query_string=query_string) else: return testlib_api.create_request( path, body, content_type, method, query_string=query_string, context=context.Context('', 'tenant_id')) def _path_create_request(self, path, data, admin_context=True): return self._path_req(path, method='POST', data=data, admin_context=admin_context) def _path_show_request(self, path, admin_context=True): return self._path_req(path, admin_context=admin_context) def _path_delete_request(self, path, admin_context=True): return self._path_req(path, method='DELETE', admin_context=admin_context) def _path_update_request(self, path, data, admin_context=True): return self._path_req(path, method='PUT', data=data, admin_context=admin_context) def _list_routers_hosted_by_l3_agent(self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, l3agentscheduler.L3_ROUTERS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_networks_hosted_by_dhcp_agent(self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, das_apidef.DHCP_NETS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_l3_agents_hosting_router(self, router_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/routers/%s/%s.%s" % (router_id, l3agentscheduler.L3_AGENTS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _list_dhcp_agents_hosting_network(self, network_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/networks/%s/%s.%s" % (network_id, das_apidef.DHCP_AGENTS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _add_router_to_l3_agent(self, id, router_id, expected_code=exc.HTTPCreated.code, admin_context=True): path = "/agents/%s/%s.%s" % (id, l3agentscheduler.L3_ROUTERS, self.fmt) req = self._path_create_request(path, {'router_id': router_id}, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _add_network_to_dhcp_agent(self, id, network_id, expected_code=exc.HTTPCreated.code, admin_context=True): path = "/agents/%s/%s.%s" % (id, das_apidef.DHCP_NETS, self.fmt) req = self._path_create_request(path, {'network_id': network_id}, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _remove_network_from_dhcp_agent(self, id, network_id, expected_code=exc.HTTPNoContent.code, admin_context=True): path = "/agents/%s/%s/%s.%s" % (id, das_apidef.DHCP_NETS, network_id, self.fmt) req = self._path_delete_request(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _remove_router_from_l3_agent(self, id, router_id, expected_code=exc.HTTPNoContent.code, admin_context=True): path = "/agents/%s/%s/%s.%s" % (id, l3agentscheduler.L3_ROUTERS, router_id, self.fmt) req = self._path_delete_request(path, admin_context=admin_context) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int) def _assert_notify(self, notifications, expected_event_type): event_types = [event['event_type'] for event in notifications] self.assertIn(expected_event_type, event_types) def test_agent_registration_bad_timestamp(self): callback = agents_db.AgentExtRpcCallback() delta_time = datetime.datetime.now() - datetime.timedelta(days=1) str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f') callback.report_state( self.adminContext, agent_state={ 'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)}, time=str_time) def test_agent_registration_invalid_timestamp_allowed(self): callback = agents_db.AgentExtRpcCallback() utc_time = datetime.datetime.utcnow() delta_time = utc_time - datetime.timedelta(seconds=10) str_time = delta_time.strftime('%Y-%m-%dT%H:%M:%S.%f') callback.report_state( self.adminContext, agent_state={ 'agent_state': helpers._get_dhcp_agent_dict(DHCP_HOSTA)}, time=str_time) def _disable_agent(self, agent_id, admin_state_up=False): new_agent = {} new_agent['agent'] = {} new_agent['agent']['admin_state_up'] = admin_state_up self._update('agents', agent_id, new_agent) def _get_agent_id(self, agent_type, host): agents = self._list_agents() for agent_data in agents['agents']: if (agent_data['agent_type'] == agent_type and agent_data['host'] == host): return agent_data['id'] class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin, test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): fmt = 'json' l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): if self.l3_plugin: service_plugins = { 'l3_plugin_name': self.l3_plugin, 'flavors_plugin_name': 'neutron.services.flavors.' 'flavors_plugin.FlavorsPlugin' } else: service_plugins = None # NOTE(ivasilevskaya) mocking this way allows some control over mocked # client like further method mocking with asserting calls self.client_mock = mock.MagicMock(name="mocked client") mock.patch.object( n_rpc, 'get_client').start().return_value = self.client_mock super(OvsAgentSchedulerTestCaseBase, self).setUp( 'ml2', service_plugins=service_plugins) mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() self.l3plugin = directory.get_plugin(plugin_constants.L3) self.l3_notify_p = mock.patch( 'neutron.extensions.l3agentscheduler.notify') self.patched_l3_notify = self.l3_notify_p.start() self.l3_periodic_p = mock.patch('neutron.db.l3_agentschedulers_db.' 'L3AgentSchedulerDbMixin.' 'add_periodic_l3_agent_status_check') self.patched_l3_periodic = self.l3_periodic_p.start() self.dhcp_notify_p = mock.patch( 'neutron.extensions.dhcpagentscheduler.notify') self.patched_dhcp_notify = self.dhcp_notify_p.start() class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_report_states(self): self._register_agent_states() agents = self._list_agents() self.assertEqual(4, len(agents['agents'])) def test_list_router_ids_on_host_no_l3_agent(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self.assertEqual( [], l3_rpc_cb.get_router_ids(self.adminContext, host="fake host")) def test_network_scheduling_on_network_creation(self): self._register_agent_states() with self.network() as net: dhcp_agents = self._list_dhcp_agents_hosting_network( net['network']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_auto_schedule_with_disabled(self): cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) self._disable_agent(hosta_id) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) # second agent will host all the networks since first is disabled. dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(0, num_hosta_nets) self.assertEqual(2, num_hostc_nets) def test_network_auto_schedule_with_no_dhcp(self): cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(enable_dhcp=False), self.subnet(enable_dhcp=False): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) self._disable_agent(hosta_id) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(0, num_hosta_nets) self.assertEqual(0, num_hostc_nets) def test_network_auto_schedule_with_multiple_agents(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(2, num_hosta_nets) self.assertEqual(2, num_hostc_nets) def test_network_auto_schedule_restart_dhcp_agent(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) with self.subnet() as sub1: dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) dhcp_agents = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) def test_network_auto_schedule_with_hosted(self): # one agent hosts all the networks, other hosts none cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet() as sub1, self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) # second agent will not host the network since first has got it. dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTC) dhcp_agents = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(hosta_nets['networks']) hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(hostc_nets['networks']) self.assertEqual(2, num_hosta_nets) self.assertEqual(0, num_hostc_nets) self.assertEqual(1, len(dhcp_agents['agents'])) self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host']) def test_network_auto_schedule_with_hosted_2(self): # one agent hosts one network dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet() as sub1: helpers.register_dhcp_agent(DHCP_HOSTA) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) with self.subnet() as sub2: helpers.register_dhcp_agent(DHCP_HOSTC) dhcp_rpc_cb.get_active_networks_info(self.adminContext, host=DHCP_HOSTC) dhcp_agents_1 = self._list_dhcp_agents_hosting_network( sub1['subnet']['network_id']) dhcp_agents_2 = self._list_dhcp_agents_hosting_network( sub2['subnet']['network_id']) hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(hosta_nets['networks']) hostc_id = self._get_agent_id( constants.AGENT_TYPE_DHCP, DHCP_HOSTC) hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(hostc_nets['networks']) self.assertEqual(1, num_hosta_nets) self.assertEqual(1, num_hostc_nets) self.assertEqual(1, len(dhcp_agents_1['agents'])) self.assertEqual(1, len(dhcp_agents_2['agents'])) self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host']) self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host']) def test_network_scheduling_on_port_creation(self): with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(1, result1) def test_network_ha_scheduling_on_port_creation(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(2, result1) def test_network_ha_scheduling_on_port_creation_with_new_agent(self): cfg.CONF.set_override('dhcp_agents_per_network', 3) with self.subnet() as subnet: dhcp_agents = self._list_dhcp_agents_hosting_network( subnet['subnet']['network_id']) result0 = len(dhcp_agents['agents']) self._register_agent_states() with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result1 = len(dhcp_agents['agents']) helpers.register_dhcp_agent('host1') with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE) as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) result2 = len(dhcp_agents['agents']) self.assertEqual(0, result0) self.assertEqual(2, result1) self.assertEqual(3, result2) def test_network_scheduler_with_disabled_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) with self.port() as port1: dhcp_agents = self._list_dhcp_agents_hosting_network( port1['port']['network_id']) self._delete('ports', port1['port']['id']) self._delete('networks', port1['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) agents = self._list_agents() self._disable_agent(agents['agents'][0]['id']) with self.port() as port2: dhcp_agents = self._list_dhcp_agents_hosting_network( port2['port']['network_id']) self._delete('ports', port2['port']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_is_eligible_agent(self): agent_startup = ('neutron.db.agentschedulers_db.' 'DhcpAgentSchedulerDbMixin.agent_starting_up') is_eligible_agent = ('neutron.db.agentschedulers_db.' 'AgentSchedulerDbMixin.is_eligible_agent') dhcp_mixin = agentschedulers_db.DhcpAgentSchedulerDbMixin() with mock.patch(agent_startup) as startup,\ mock.patch(is_eligible_agent) as elig: tests = [(True, True), (True, False), (False, True), (False, False)] for rv1, rv2 in tests: startup.return_value = rv1 elig.return_value = rv2 self.assertEqual(rv1 or rv2, dhcp_mixin.is_eligible_agent(None, None, None)) def test_network_scheduler_with_down_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) eligible_agent_str = ('neutron.db.agentschedulers_db.' 'DhcpAgentSchedulerDbMixin.is_eligible_agent') with mock.patch(eligible_agent_str) as eligible_agent: eligible_agent.return_value = True with self.port() as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) self._delete('ports', port['port']['id']) self._delete('networks', port['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) with mock.patch(eligible_agent_str) as eligible_agent: eligible_agent.return_value = False with self.port() as port: dhcp_agents = self._list_dhcp_agents_hosting_network( port['port']['network_id']) self._delete('ports', port['port']['id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_scheduler_with_hosted_network(self): plugin = directory.get_plugin() helpers.register_dhcp_agent(DHCP_HOSTA) with self.port() as port1: dhcp_agents = self._list_dhcp_agents_hosting_network( port1['port']['network_id']) self.assertEqual(1, len(dhcp_agents['agents'])) with mock.patch.object(plugin, 'get_dhcp_agents_hosting_networks', autospec=True) as mock_hosting_agents: mock_hosting_agents.return_value = plugin.get_agent_objects( self.adminContext) with self.network('test') as net1: pass with self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1: pass with self.port(subnet=subnet1) as port2: pass dhcp_agents = self._list_dhcp_agents_hosting_network( port2['port']['network_id']) self.assertEqual(0, len(dhcp_agents['agents'])) def test_network_policy(self): with self.network() as net1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._list_networks_hosted_by_dhcp_agent( hosta_id, expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_network_to_dhcp_agent( hosta_id, net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_network_to_dhcp_agent(hosta_id, net1['network']['id']) self._remove_network_from_dhcp_agent( hosta_id, net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_dhcp_agents_hosting_network( net1['network']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) def _test_network_add_to_dhcp_agent(self, admin_state_up=True): with self.network() as net1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) if not admin_state_up: self._set_agent_admin_state_up(DHCP_HOSTA, False) num_before_add = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self._add_network_to_dhcp_agent(hosta_id, net1['network']['id']) num_after_add = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self.assertEqual(0, num_before_add) self.assertEqual(1, num_after_add) def test_network_add_to_dhcp_agent(self): self._test_network_add_to_dhcp_agent() def test_network_add_to_dhcp_agent_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) self._test_network_add_to_dhcp_agent(admin_state_up=False) def _test_network_remove_from_dhcp_agent(self, concurrent_port_delete=False): agent = helpers.register_dhcp_agent(DHCP_HOSTA) hosta_id = agent.id with self.port(device_owner=constants.DEVICE_OWNER_DHCP, host=DHCP_HOSTA) as port1: num_before_remove = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) if concurrent_port_delete: plugin = directory.get_plugin() # Return a foo port to emulate the port not found scenario # caused by a concurrent port deletion during unscheduling port = {'id': 'foo_port_id', 'device_id': 'foo_device_id'} mock.patch.object(plugin, 'get_ports', return_value=[port]).start() self._remove_network_from_dhcp_agent(hosta_id, port1['port']['network_id']) num_after_remove = len( self._list_networks_hosted_by_dhcp_agent( hosta_id)['networks']) self.assertEqual(1, num_before_remove) self.assertEqual(0, num_after_remove) def test_network_remove_from_dhcp_agent(self): self._test_network_remove_from_dhcp_agent() def test_network_remove_from_dhcp_agent_on_concurrent_port_delete(self): self._test_network_remove_from_dhcp_agent(concurrent_port_delete=True) def test_list_active_networks_on_not_registered_yet_dhcp_agent(self): plugin = directory.get_plugin() nets = plugin.list_active_networks_on_active_dhcp_agent( self.adminContext, host=DHCP_HOSTA) self.assertEqual([], nets) def test_reserved_port_after_network_remove_from_dhcp_agent(self): helpers.register_dhcp_agent(DHCP_HOSTA) hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) with self.port(device_owner=constants.DEVICE_OWNER_DHCP, host=DHCP_HOSTA) as port1: self._remove_network_from_dhcp_agent(hosta_id, port1['port']['network_id']) port_res = self._list_ports( 'json', 200, network_id=port1['port']['network_id']) port_list = self.deserialize('json', port_res) self.assertEqual(port_list['ports'][0]['device_id'], constants.DEVICE_ID_RESERVED_DHCP_PORT) def _test_get_active_networks_from_admin_state_down_agent(self, keep_services): if keep_services: cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) helpers.register_dhcp_agent(DHCP_HOSTA) dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() with self.port(): nets = dhcp_rpc_cb.get_active_networks_info(self.adminContext, host=DHCP_HOSTA) self.assertEqual(1, len(nets)) self._set_agent_admin_state_up(DHCP_HOSTA, False) nets = dhcp_rpc_cb.get_active_networks_info(self.adminContext, host=DHCP_HOSTA) if keep_services: self.assertEqual(1, len(nets)) else: self.assertEqual(0, len(nets)) def test_dhcp_agent_keep_services_off(self): self._test_get_active_networks_from_admin_state_down_agent(False) def test_dhcp_agent_keep_services_on(self): self._test_get_active_networks_from_admin_state_down_agent(True) def _take_down_agent_and_run_reschedule(self, host): # take down the agent on host A and ensure B is alive with db_api.CONTEXT_WRITER.using(self.adminContext): query = self.adminContext.session.query(agent_model.Agent) agt = query.filter_by(host=host).first() agt.heartbeat_timestamp = ( agt.heartbeat_timestamp - datetime.timedelta(hours=1)) plugin = directory.get_plugin(plugin_constants.L3) plugin.reschedule_routers_from_down_agents() def _set_agent_admin_state_up(self, host, state): with db_api.CONTEXT_WRITER.using(self.adminContext): query = self.adminContext.session.query(agent_model.Agent) agt_db = query.filter_by(host=host).first() agt_db.admin_state_up = state def test_router_rescheduler_catches_rpc_db_and_reschedule_exceptions(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) plugin = directory.get_plugin(plugin_constants.L3) mock.patch.object( plugin, 'reschedule_router', side_effect=[ db_exc.DBError(), oslo_messaging.RemoteError(), l3agentscheduler.RouterReschedulingFailed(router_id='f', agent_id='f'), ValueError('this raises'), Exception() ]).start() self._take_down_agent_and_run_reschedule(L3_HOSTA) # DBError self._take_down_agent_and_run_reschedule(L3_HOSTA) # RemoteError self._take_down_agent_and_run_reschedule(L3_HOSTA) # schedule err self._take_down_agent_and_run_reschedule(L3_HOSTA) # Value error self._take_down_agent_and_run_reschedule(L3_HOSTA) # Exception def test_router_rescheduler_catches_exceptions_on_fetching_bindings(self): with mock.patch('neutron_lib.context.get_admin_context') as get_ctx: mock_ctx = mock.Mock() get_ctx.return_value = mock_ctx mock_ctx.session.query.side_effect = db_exc.DBError() plugin = directory.get_plugin(plugin_constants.L3) # check that no exception is raised plugin.reschedule_routers_from_down_agents() def test_router_rescheduler_iterates_after_reschedule_failure(self): plugin = directory.get_plugin(plugin_constants.L3) l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as r1, self.router() as r2: # schedule the routers to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) rs_mock = mock.patch.object( plugin, 'reschedule_router', side_effect=l3agentscheduler.RouterReschedulingFailed( router_id='f', agent_id='f'), ).start() self._take_down_agent_and_run_reschedule(L3_HOSTA) # make sure both had a reschedule attempt even though first failed rs_mock.assert_has_calls([mock.call(mock.ANY, r1['router']['id']), mock.call(mock.ANY, r2['router']['id'])], any_order=True) def test_router_is_not_rescheduled_from_alive_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) with mock.patch('neutron.db.l3_agentschedulers_db.' 'L3AgentSchedulerDbMixin.reschedule_router') as rr: # take down some unrelated agent and run reschedule check self._take_down_agent_and_run_reschedule(DHCP_HOSTC) self.assertFalse(rr.called) def test_router_is_not_rescheduled_if_agent_is_back_online(self): plugin = directory.get_plugin(plugin_constants.L3) l3_rpc_cb = l3_rpc.L3RpcCallback() agent = helpers.register_l3_agent(host=L3_HOSTA) with self.router(),\ self.router(),\ mock.patch.object(plugin, 'reschedule_router') as rs_mock,\ mock.patch.object(plugin, '_get_agent') as get_agent_mock: # schedule the routers to the agent l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._take_down_agent_and_run_reschedule(L3_HOSTA) # since _get_agent is mocked it will return Mock object and # agent.is_active will return true, so no rescheduling will be done self.assertFalse(rs_mock.called) # should be called only once as for second router alive agent id # will be in cache get_agent_mock.assert_called_once_with(mock.ANY, agent['id']) def test_router_reschedule_from_dead_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A ret_a = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._take_down_agent_and_run_reschedule(L3_HOSTA) # B should now pick up the router ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) self.assertEqual(ret_b, ret_a) def test_router_no_reschedule_from_dead_admin_down_agent(self): with self.router() as r: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._set_agent_admin_state_up(L3_HOSTA, False) self._take_down_agent_and_run_reschedule(L3_HOSTA) # A should still have it even though it was inactive due to the # admin_state being down bindings = rb_obj.RouterL3AgentBinding.get_objects( self.adminContext, router_id=r['router']['id']) binding = bindings.pop() if bindings else None l3_agent = ag_obj.Agent.get_objects( self.adminContext, id=binding.l3_agent_id) self.assertEqual(l3_agent[0].host, L3_HOSTA) # B should not pick up the router ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) self.assertFalse(ret_b) def test_router_reschedule_succeeded_after_failed_notification(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as router: # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) ctxt_mock = mock.MagicMock() call_mock = mock.MagicMock( side_effect=[oslo_messaging.MessagingTimeout, None]) ctxt_mock.call = call_mock self.client_mock.prepare = mock.MagicMock(return_value=ctxt_mock) self._take_down_agent_and_run_reschedule(L3_HOSTA) self.assertEqual(2, call_mock.call_count) # make sure router was rescheduled even when first attempt # failed to notify l3 agent l3_agents = self._list_l3_agents_hosting_router( router['router']['id'])['agents'] self.assertEqual(1, len(l3_agents)) self.assertEqual(L3_HOSTB, l3_agents[0]['host']) def test_router_reschedule_failed_notification_all_attempts(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() with self.router() as router: # schedule the router to host A l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) # mock client.prepare and context.call ctxt_mock = mock.MagicMock() call_mock = mock.MagicMock( side_effect=oslo_messaging.MessagingTimeout) ctxt_mock.call = call_mock self.client_mock.prepare = mock.MagicMock(return_value=ctxt_mock) # perform operations self._take_down_agent_and_run_reschedule(L3_HOSTA) self.assertEqual( l3_rpc_agent_api.AGENT_NOTIFY_MAX_ATTEMPTS, call_mock.call_count) l3_agents = self._list_l3_agents_hosting_router( router['router']['id'])['agents'] self.assertEqual(0, len(l3_agents)) def test_router_reschedule_no_remove_if_agent_has_dvr_service_ports(self): l3_notifier = self.l3plugin.agent_notifiers[constants.AGENT_TYPE_L3] agent_a = helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) agent_b = helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s, \ mock.patch.object(l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare, \ mock.patch.object(l3_notifier.client, 'cast') as mock_cast, \ mock.patch.object(l3_notifier.client, 'call'): net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) # schedule the dvr to one of the agents self.l3plugin.schedule_router(self.adminContext, r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) agent = l3agents['agents'][0] # emulating dvr serviceable ports exist on the host with mock.patch.object( self.l3plugin, '_check_dvr_serviceable_ports_on_host') \ as ports_exist: ports_exist.return_value = True # reschedule the dvr to one of the other agent candidate_agent = (agent_b if agent['host'] == L3_HOSTA else agent_a) self.l3plugin.reschedule_router(self.adminContext, r['id'], candidates=[candidate_agent]) # make sure dvr serviceable ports are checked when rescheduling self.assertTrue(ports_exist.called) # make sure sending update instead of removing for dvr mock_prepare.assert_called_with(server=candidate_agent['host']) mock_cast.assert_called_with( mock.ANY, 'routers_updated', routers=[r['id']]) # make sure the rescheduling completes l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(1, len(l3agents['agents'])) new_agent_host = l3agents['agents'][0]['host'] self.assertNotEqual(agent['host'], new_agent_host) def test_router_auto_schedule_with_invalid_router(self): with self.router() as router: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() self._delete('routers', router['router']['id']) # deleted router ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router['router']['id']]) self.assertFalse(ret_a) # non-existent router ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[uuidutils.generate_uuid()]) self.assertFalse(ret_a) def test_router_auto_schedule_with_hosted(self): with self.router() as router: l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() ret_a = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) ret_b = l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) l3_agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(ret_a)) self.assertIn(router['router']['id'], ret_a) self.assertFalse(len(ret_b)) self.assertEqual(1, len(l3_agents['agents'])) self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host']) def test_router_auto_schedule_restart_l3_agent(self): with self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) def test_router_auto_schedule_with_hosted_2(self): # one agent hosts one router l3_rpc_cb = l3_rpc.L3RpcCallback() with self.router() as router1: hosta_id = helpers.register_l3_agent(host=L3_HOSTA).id l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) with self.router() as router2: hostb_id = helpers.register_l3_agent(host=L3_HOSTB).id l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) l3_agents_1 = self._list_l3_agents_hosting_router( router1['router']['id']) l3_agents_2 = self._list_l3_agents_hosting_router( router2['router']['id']) hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) num_hosta_routers = len(hosta_routers['routers']) hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) num_hostb_routers = len(hostb_routers['routers']) self.assertEqual(1, num_hosta_routers) self.assertEqual(1, num_hostb_routers) self.assertEqual(1, len(l3_agents_1['agents'])) self.assertEqual(1, len(l3_agents_2['agents'])) self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host']) self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host']) def test_router_auto_schedule_with_disabled(self): with self.router(), self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._disable_agent(hosta_id) # first agent will not host router since it is disabled l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) # second agent will host all the routers since first is disabled. l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTB) hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id) num_hostb_routers = len(hostb_routers['routers']) hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id) num_hosta_routers = len(hosta_routers['routers']) self.assertEqual(2, num_hostb_routers) self.assertEqual(0, num_hosta_routers) def test_rpc_sync_routers(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() # No routers ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(0, len(ret_a)) with self.router() as v1, self.router() as v2, self.router() as v3: routers = (v1, v2, v3) router_ids = [r['router']['id'] for r in routers] # auto schedule routers first l3_rpc_cb.get_router_ids(self.adminContext, host=L3_HOSTA) # Get all routers ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(3, len(ret_a)) self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) # Get all routers (router_ids=None) ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=None) self.assertEqual(3, len(ret_a)) self.assertEqual(set(router_ids), set([r['id'] for r in ret_a])) # Get router2 only ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router_ids[1]]) self.assertEqual(1, len(ret_a)) self.assertIn(router_ids[1], [r['id'] for r in ret_a]) # Get router1 and router3 ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[router_ids[0], router_ids[2]]) self.assertEqual(2, len(ret_a)) self.assertIn(router_ids[0], [r['id'] for r in ret_a]) self.assertIn(router_ids[2], [r['id'] for r in ret_a]) def test_sync_router(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) with self.router() as r1: ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, router_ids=[r1['router']['id']]) # Not return router to agent if the router is not bound to it. self.assertEqual([], ret_a) host_routers = self._list_routers_hosted_by_l3_agent(hosta_id) # No router will be auto scheduled. self.assertEqual(0, len(host_routers['routers'])) def test_sync_dvr_router(self): l3_rpc_cb = l3_rpc.L3RpcCallback() dvr_agents = self._register_dvr_agents() with self.router() as r1, \ mock.patch.object(self.l3plugin, 'get_subnet_ids_on_router', return_value=['fake_subnet_id']), \ mock.patch.object(self.l3plugin, '_check_dvr_serviceable_ports_on_host', return_value=True): for l3_agent in dvr_agents: host = l3_agent['host'] ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=host, router_ids=[r1['router']['id']]) router_ids = [r['id'] for r in ret_a] # Return router to agent if there is dvr service port in agent. self.assertIn(r1['router']['id'], router_ids) host_routers = self._list_routers_hosted_by_l3_agent( l3_agent['id']) # No router will be auto scheduled. self.assertEqual(0, len(host_routers['routers'])) def test_sync_dvr_router_with_fixedip_on_fip_net(self): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_dvr_agents() with self.subnet() as s: # first create an external network net_id = s['subnet']['network_id'] self._set_net_external(net_id) # create router with external gateway router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) self.l3plugin.schedule_router(self.adminContext, r['id']) with self.port(subnet=s, device_owner=DEVICE_OWNER_COMPUTE) as port: # bind port to L3_HOSTB updated_port = { "port": { portbindings.HOST_ID: L3_HOSTB } } self.plugin.update_port( self.adminContext, port['port']['id'], updated_port ) ret_b = l3_rpc_cb.sync_routers( self.adminContext, host=L3_HOSTB, router_ids=[r['id']]) router_ids = [r['id'] for r in ret_b] self.assertEqual(0, len(router_ids)) def test_router_without_l3_agents(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) data = {'router': {'tenant_id': uuidutils.generate_uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) l3agents = ( self.l3plugin.get_l3_agents_hosting_routers( self.adminContext, [router['router']['id']])) self._delete('routers', router['router']['id']) self.assertEqual(0, len(l3agents)) def test_dvr_router_scheduling_to_only_dvr_snat_agent(self): self._register_dvr_agents() with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) with mock.patch.object( self.l3plugin, '_check_dvr_serviceable_ports_on_host') as ports_exist: # emulating dvr serviceable ports exist on compute node ports_exist.return_value = True self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) agent = l3agents['agents'][0] self.assertEqual('dvr_snat', agent['configurations']['agent_mode']) def test_dvr_router_csnat_rescheduling(self): helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) agent_host = l3agents['agents'][0]['host'] self._take_down_agent_and_run_reschedule(agent_host) l3agents = self._list_l3_agents_hosting_router(r['id']) self.assertEqual(1, len(l3agents['agents'])) new_agent_host = l3agents['agents'][0]['host'] self.assertNotEqual(agent_host, new_agent_host) def test_dvr_router_manual_rescheduling(self): helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) router = {'name': 'router1', 'external_gateway_info': {'network_id': net_id}, 'tenant_id': 'tenant_id', 'admin_state_up': True, 'distributed': True} r = self.l3plugin.create_router(self.adminContext, {'router': router}) self.l3plugin.schedule_router( self.adminContext, r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(1, len(l3agents['agents'])) agent = l3agents['agents'][0] # NOTE: Removing the router from the l3_agent will # remove all the namespace since there is no other # serviceable ports in the node that requires it. self.l3plugin.remove_router_from_l3_agent( self.adminContext, agent['id'], r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(0, len(l3agents['agents'])) self.l3plugin.add_router_to_l3_agent( self.adminContext, agent['id'], r['id']) l3agents = self.l3plugin.list_l3_agents_hosting_router( self.adminContext, r['id']) self.assertEqual(1, len(l3agents['agents'])) new_agent = l3agents['agents'][0] self.assertEqual(agent['id'], new_agent['id']) def test_router_sync_data(self): with self.subnet() as s1,\ self.subnet(cidr='10.0.2.0/24') as s2,\ self.subnet(cidr='10.0.3.0/24') as s3: self._register_agent_states() self._set_net_external(s1['subnet']['network_id']) data = {'router': {'tenant_id': uuidutils.generate_uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s1['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self._router_interface_action('add', router['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', router['router']['id'], s3['subnet']['id'], None) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(l3agents['agents'])) agents = self._list_agents() another_l3_agent_id = None another_l3_agent_host = None default = l3agents['agents'][0]['id'] for com in agents['agents']: if (com['id'] != default and com['agent_type'] == constants.AGENT_TYPE_L3): another_l3_agent_id = com['id'] another_l3_agent_host = com['host'] break self.assertIsNotNone(another_l3_agent_id) self._add_router_to_l3_agent(another_l3_agent_id, router['router']['id'], expected_code=exc.HTTPConflict.code) self._remove_router_from_l3_agent(default, router['router']['id']) self._add_router_to_l3_agent(another_l3_agent_id, router['router']['id']) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(another_l3_agent_host, l3agents['agents'][0]['host']) self._remove_router_from_l3_agent(another_l3_agent_id, router['router']['id']) self._router_interface_action('remove', router['router']['id'], s2['subnet']['id'], None) l3agents = self._list_l3_agents_hosting_router( router['router']['id']) self.assertEqual(1, len(l3agents['agents'])) self._router_interface_action('remove', router['router']['id'], s3['subnet']['id'], None) self._delete('routers', router['router']['id']) def _test_router_add_to_l3_agent(self, admin_state_up=True): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) if not admin_state_up: self._set_agent_admin_state_up(L3_HOSTA, False) num_before_add = len( self._list_routers_hosted_by_l3_agent( hosta_id)['routers']) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._add_router_to_l3_agent(hostb_id, router1['router']['id'], expected_code=exc.HTTPConflict.code) num_after_add = len( self._list_routers_hosted_by_l3_agent( hosta_id)['routers']) self.assertEqual(0, num_before_add) self.assertEqual(1, num_after_add) def test_router_add_to_l3_agent(self): self._test_router_add_to_l3_agent() def test_router_add_to_l3_agent_with_admin_state_down(self): cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) self._test_router_add_to_l3_agent(admin_state_up=False) def test_router_add_to_l3_agent_two_times(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) # scheduling twice on the same agent is fine self._add_router_to_l3_agent(hosta_id, router1['router']['id']) def test_router_add_to_two_l3_agents(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTB) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) self._add_router_to_l3_agent(hostb_id, router1['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_policy(self): with self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._list_routers_hosted_by_l3_agent( hosta_id, expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_router_to_l3_agent( hosta_id, router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._add_router_to_l3_agent( hosta_id, router1['router']['id']) self._remove_router_from_l3_agent( hosta_id, router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_l3_agents_hosting_router( router1['router']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) def _test_sync_routers_from_admin_state_down_agent(self, keep_services): if keep_services: cfg.CONF.set_override( 'enable_services_on_agents_with_admin_state_down', True) l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) with self.router() as router: self._add_router_to_l3_agent(hosta_id, router['router']['id']) routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(1, len(routers)) self._set_agent_admin_state_up(L3_HOSTA, False) routers = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) if keep_services: self.assertEqual(1, len(routers)) else: self.assertEqual(0, len(routers)) def test_l3_agent_keep_services_off(self): self._test_sync_routers_from_admin_state_down_agent(False) def test_l3_agent_keep_services_on(self): self._test_sync_routers_from_admin_state_down_agent(True) def test_list_routers_hosted_by_l3_agent_with_invalid_agent(self): invalid_agentid = 'non_existing_agent' self._list_routers_hosted_by_l3_agent(invalid_agentid, exc.HTTPNotFound.code) def test_list_networks_hosted_by_dhcp_agent_with_invalid_agent(self): invalid_agentid = 'non_existing_agent' self._list_networks_hosted_by_dhcp_agent(invalid_agentid, exc.HTTPNotFound.code) def test_network_no_reschedule(self): cfg.CONF.set_override('allow_overlapping_ips', True) cfg.CONF.set_override('network_auto_schedule', False) with self.subnet() as sb1, self.subnet(): network1_id = sb1['subnet']['network_id'] dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTC) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTA) dhcp_rpc_cb.get_active_networks_info( self.adminContext, host=DHCP_HOSTC) networks = self._list_networks_hosted_by_dhcp_agent(hostc_id) num_hostc_nets = len(networks['networks']) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(0, num_hosta_nets) self.assertEqual(0, num_hostc_nets) # After this patch, network which requires DHCP # has to be manually mapped self._add_network_to_dhcp_agent(hosta_id, network1_id) networks = self._list_networks_hosted_by_dhcp_agent(hosta_id) num_hosta_nets = len(networks['networks']) self.assertEqual(1, num_hosta_nets) class OvsDhcpAgentNotifierTestCase(test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): def setUp(self): super(OvsDhcpAgentNotifierTestCase, self).setUp('ml2') mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() plugin = directory.get_plugin() self.dhcp_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP] self.dhcp_notifier_cast = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI._cast_message').start() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() fake_notifier.reset() def test_network_add_to_dhcp_agent_notification(self): with self.network() as net1: network_id = net1['network']['id'] self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._add_network_to_dhcp_agent(hosta_id, network_id) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'network_create_end', {'network': {'id': network_id}, 'priority': dhcp_rpc_agent_api.PRIORITY_NETWORK_CREATE}, DHCP_HOSTA) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'dhcp_agent.network.add' self._assert_notify(notifications, expected_event_type) def test_network_remove_from_dhcp_agent_notification(self): with self.network() as net1: network_id = net1['network']['id'] self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._add_network_to_dhcp_agent(hosta_id, network_id) self._remove_network_from_dhcp_agent(hosta_id, network_id) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'network_delete_end', {'network_id': network_id, 'priority': dhcp_rpc_agent_api.PRIORITY_NETWORK_DELETE}, DHCP_HOSTA) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'dhcp_agent.network.remove' self._assert_notify(notifications, expected_event_type) def test_agent_updated_dhcp_agent_notification(self): self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, DHCP_HOSTA) self._disable_agent(hosta_id, admin_state_up=False) self.dhcp_notifier_cast.assert_called_with( mock.ANY, 'agent_updated', {'admin_state_up': False}, DHCP_HOSTA) def _api_network_port_create( self, hosts, gateway=constants.ATTR_NOT_SPECIFIED, owner=None): for host in hosts: helpers.register_dhcp_agent(host) with self.network() as net1: with self.subnet(network=net1, gateway_ip=gateway) as subnet1: if owner: with self.port(subnet=subnet1, device_owner=owner) as port: return [net1, subnet1, port] else: with self.port(subnet=subnet1) as port: return [net1, subnet1, port] def _network_port_create(self, *args, **kwargs): net, sub, port = self._api_network_port_create(*args, **kwargs) dhcp_notifier = self.plugin.agent_notifiers[constants.AGENT_TYPE_DHCP] if (not hasattr(dhcp_notifier, 'uses_native_notifications') or not all(dhcp_notifier.uses_native_notifications[r]['create'] for r in ('port', 'subnet', 'network'))): return net, sub, port # since plugin has native dhcp notifications, the payloads will be the # same as the getter outputs ctx = context.get_admin_context() net['network'] = self.plugin.get_network(ctx, net['network']['id']) sub['subnet'] = self.plugin.get_subnet(ctx, sub['subnet']['id']) sub['priority'] = dhcp_rpc_agent_api.PRIORITY_SUBNET_UPDATE port['port'] = self.plugin.get_port(ctx, port['port']['id']) return net, sub, port def _notification_mocks(self, hosts, net, subnet, port, port_priority): host_calls = {} for host in hosts: expected_calls = [ mock.call( mock.ANY, 'network_create_end', {'priority': dhcp_rpc_agent_api.PRIORITY_NETWORK_CREATE, 'network': {'id': net['network']['id']}}, host), mock.call( mock.ANY, 'subnet_create_end', subnet, host, 'dhcp_agent'), mock.call( mock.ANY, 'port_create_end', {'port': port['port'], 'priority': port_priority}, host, 'dhcp_agent')] host_calls[host] = expected_calls return host_calls def test_network_port_create_notification(self): hosts = [DHCP_HOSTA] net, subnet, port = self._network_port_create(hosts) expected_calls = self._notification_mocks( hosts, net, subnet, port, dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH) self.assertEqual( expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list) def test_network_ha_port_create_notification(self): cfg.CONF.set_override('dhcp_agents_per_network', 3) hosts = [DHCP_HOSTA, DHCP_HOSTC, DHCP_HOSTD] net, subnet, port = self._network_port_create(hosts) for host_call in self.dhcp_notifier_cast.call_args_list: if ("'priority': " + str( dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH) in str(host_call)): if DHCP_HOSTA in str(host_call): expected_high_calls = self._notification_mocks( [DHCP_HOSTA], net, subnet, port, dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH) high_host = DHCP_HOSTA hosts.pop(0) elif DHCP_HOSTC in str(host_call): expected_high_calls = self._notification_mocks( [DHCP_HOSTC], net, subnet, port, dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH) high_host = DHCP_HOSTC hosts.pop(1) elif DHCP_HOSTD in str(host_call): expected_high_calls = self._notification_mocks( [DHCP_HOSTD], net, subnet, port, dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_HIGH) high_host = DHCP_HOSTD hosts.pop(2) expected_low_calls = self._notification_mocks( hosts, net, subnet, port, dhcp_rpc_agent_api.PRIORITY_PORT_CREATE_LOW) for expected in expected_high_calls[high_host]: self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) for host, low_expecteds in expected_low_calls.items(): for expected in low_expecteds: self.assertIn(expected, self.dhcp_notifier_cast.call_args_list) def _is_schedule_network_called(self, device_id): dhcp_notifier_schedule = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI._schedule_network').start() plugin = directory.get_plugin() with self.subnet() as subnet,\ self.port(subnet=subnet, device_id=device_id),\ mock.patch.object(plugin, 'get_dhcp_agents_hosting_networks', return_value=[]): return dhcp_notifier_schedule.call_count > 1 def test_reserved_dhcp_port_creation(self): device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT self.assertFalse(self._is_schedule_network_called(device_id)) def test_unreserved_dhcp_port_creation(self): device_id = 'not_reserved' self.assertTrue(self._is_schedule_network_called(device_id)) class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin, test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): self.dhcp_notifier_cls_p = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI') self.dhcp_notifier = mock.Mock(name='dhcp_notifier') self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start() self.dhcp_notifier_cls.return_value = self.dhcp_notifier if self.l3_plugin: service_plugins = { 'l3_plugin_name': self.l3_plugin, 'flavors_plugin_name': 'neutron.services.flavors.' 'flavors_plugin.FlavorsPlugin' } else: service_plugins = None super(OvsL3AgentNotifierTestCase, self).setUp( 'ml2', service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() fake_notifier.reset() def test_router_add_to_l3_agent_notification(self): l3_plugin = directory.get_plugin(plugin_constants.L3) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'call') as mock_call,\ self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) routers = [router1['router']['id']] mock_prepare.assert_called_with(server='hosta') mock_call.assert_called_with( mock.ANY, 'router_added_to_agent', payload=routers) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'l3_agent.router.add' self._assert_notify(notifications, expected_event_type) def test_router_remove_from_l3_agent_notification(self): l3_plugin = directory.get_plugin(plugin_constants.L3) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'cast') as mock_cast,\ mock.patch.object(l3_notifier.client, 'call'),\ self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) self._add_router_to_l3_agent(hosta_id, router1['router']['id']) self._remove_router_from_l3_agent(hosta_id, router1['router']['id']) mock_prepare.assert_called_with(server='hosta') mock_cast.assert_called_with( mock.ANY, 'router_removed_from_agent', payload={'router_id': router1['router']['id']}) notifications = fake_notifier.NOTIFICATIONS expected_event_type = 'l3_agent.router.remove' self._assert_notify(notifications, expected_event_type) def test_agent_updated_l3_agent_notification(self): l3_plugin = directory.get_plugin(plugin_constants.L3) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] with mock.patch.object( l3_notifier.client, 'prepare', return_value=l3_notifier.client) as mock_prepare,\ mock.patch.object(l3_notifier.client, 'cast') as mock_cast: agent_id = helpers.register_l3_agent(L3_HOSTA).id self._disable_agent(agent_id, admin_state_up=False) mock_prepare.assert_called_with(server=L3_HOSTA) mock_cast.assert_called_with( mock.ANY, 'agent_updated', payload={'admin_state_up': False}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_allowedaddresspairs_db.py0000644000175000017500000004265200000000000027466 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import validators from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from oslo_config import cfg from webob import exc as web_exc from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db from neutron.extensions import securitygroup as secgroup from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.db.test_allowedaddresspairs_db.' 'AllowedAddressPairTestPlugin') class AllowedAddressPairTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): super(AllowedAddressPairTestCase, self).setUp(plugin) # Check if a plugin supports security groups plugin_obj = directory.get_plugin() self._skip_port_security = ('port-security' not in plugin_obj.supported_extension_aliases) class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin, db_base_plugin_v2.NeutronDbPluginV2, addr_pair_db.AllowedAddressPairsMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with port security and allowed address pairs. """ supported_extension_aliases = [addr_apidef.ALIAS] def create_port(self, context, port): p = port['port'] with db_api.CONTEXT_WRITER.using(context): neutron_db = super(AllowedAddressPairTestPlugin, self).create_port( context, port) p.update(neutron_db) if validators.is_attr_set(p.get(addr_apidef.ADDRESS_PAIRS)): self._process_create_allowed_address_pairs( context, p, p[addr_apidef.ADDRESS_PAIRS]) else: p[addr_apidef.ADDRESS_PAIRS] = None return port['port'] def update_port(self, context, id, port): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with db_api.CONTEXT_WRITER.using(context): ret_port = super(AllowedAddressPairTestPlugin, self).update_port( context, id, port) # copy values over - but not fixed_ips port['port'].pop('fixed_ips', None) ret_port.update(port['port']) if (delete_addr_pairs or has_addr_pairs): # delete address pairs and readd them self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_apidef.ADDRESS_PAIRS]) return ret_port class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase): def setUp(self, plugin=None, ext_mgr=None): plugin = plugin or DB_PLUGIN_KLASS super(AllowedAddressPairDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) class TestAllowedAddressPairs(AllowedAddressPairDBTestCase): def test_create_port_allowed_address_pairs_bad_format(self): with self.network() as net: bad_values = [False, True, 1.1, 1, ['ip_address'], ['mac_address']] for value in bad_values: self._create_port( self.fmt, net['network']['id'], expected_res_status=web_exc.HTTPBadRequest.code, arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=value) def test_create_port_allowed_address_pairs(self): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_security_true_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), port_security_enabled=True, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), port_security_enabled=False, allowed_address_pairs=address_pairs) self.deserialize(self.fmt, res) self.assertEqual(409, res.status_int) address_pairs = [] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), port_security_enabled=False, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_create_port_bad_mac(self): address_pairs = [{'mac_address': 'invalid_mac', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_port_bad_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1222'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_missing_mac_field(self): address_pairs = [{'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 201) def test_create_missing_ip_field(self): address_pairs = [{'mac_address': '00:00:00:00:00:01'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_duplicate_mac_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_more_than_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:02', 'ip_address': '10.0.0.2'}, {'mac_address': '00:00:00:00:00:03', 'ip_address': '10.0.0.3'}, {'mac_address': '00:00:00:00:00:04', 'ip_address': '10.0.0.4'}] self._create_port_with_address_pairs(address_pairs, 400) def test_equal_to_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}, {'mac_address': '00:00:00:00:00:02', 'ip_address': '10.0.0.2'}, {'mac_address': '00:00:00:00:00:03', 'ip_address': '10.0.0.3'}] self._create_port_with_address_pairs(address_pairs, 201) def test_create_overlap_with_fixed_ip(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.2'}] with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] res = self._create_port(self.fmt, network['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs, fixed_ips=fixed_ips) self.assertEqual(res.status_int, 201) port = self.deserialize(self.fmt, res) self._delete('ports', port['port']['id']) def test_create_port_extra_args(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1', 'icbb': 'agreed'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_port_with_unexpected_address_pairs_format(self): address_pairs = {'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'} self._create_port_with_address_pairs(address_pairs, 400) def _create_port_with_address_pairs(self, address_pairs, ret_code): with self.network() as net: res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertEqual(res.status_int, ret_code) if ret_code == 201: self._delete('ports', port['port']['id']) def test_update_port_allowed_address_pairs_bad_format(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) bad_values = [False, True, 1.1, 1, ['ip_address'], ['mac_address']] for value in bad_values: update_port = {'port': {addr_apidef.ADDRESS_PAIRS: value}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_add_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_update_add_address_pairs_with_unexpected_format(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = {'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'} update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_address_gets_port_mac(self): with self.network() as net: address_pairs = [{'ip_address': '23.23.23.23'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res)['port'] port_addr_mac = port[addr_apidef.ADDRESS_PAIRS][0]['mac_address'] self.assertEqual(port_addr_mac, port['mac_address']) self._delete('ports', port['id']) def test_update_port_security_off_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: with self.subnet(network=net) as subnet: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] # The port should not have any security-groups associated to it with self.port(subnet=subnet, arg_list=(psec.PORTSECURITY, addr_apidef.ADDRESS_PAIRS, secgroup.SECURITYGROUPS), port_security_enabled=True, allowed_address_pairs=address_pairs, security_groups=[]) as port: update_port = {'port': {psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) def test_update_with_none_and_own_mac_for_duplicate_ip(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) mac_address = port['port']['mac_address'] address_pairs = [{'ip_address': '10.0.0.1'}, {'mac_address': mac_address, 'ip_address': '10.0.0.1'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_port_remove_allowed_address_pairs_with_list(self): self._test_create_port_remove_allowed_address_pairs([]) def test_create_port_remove_allowed_address_pairs_with_none(self): self._test_create_port_remove_allowed_address_pairs(None) def _test_create_port_remove_allowed_address_pairs(self, update_value): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) update_port = {'port': {addr_apidef.ADDRESS_PAIRS: update_value}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], port['port'][addr_apidef.ADDRESS_PAIRS]) self._delete('ports', port['port']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_db_base_plugin_common.py0000644000175000017500000000617000000000000027265 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import db_base_plugin_common from neutron.tests import base class DummyObject(object): def __init__(self, **kwargs): self.kwargs = kwargs def to_dict(self): return self.kwargs class ConvertToDictTestCase(base.BaseTestCase): @db_base_plugin_common.convert_result_to_dict def method_dict(self, fields=None): return DummyObject(one=1, two=2, three=3) @db_base_plugin_common.convert_result_to_dict def method_list(self): return [DummyObject(one=1, two=2, three=3)] * 3 def test_simple_object(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_list_of_objects(self): expected = [{'one': 1, 'two': 2, 'three': 3}] * 3 observed = self.method_list() self.assertEqual(expected, observed) class FilterFieldsTestCase(base.BaseTestCase): @db_base_plugin_common.filter_fields def method_dict(self, fields=None): return {'one': 1, 'two': 2, 'three': 3} @db_base_plugin_common.filter_fields def method_list(self, fields=None): return [self.method_dict() for _ in range(3)] @db_base_plugin_common.filter_fields def method_multiple_arguments(self, not_used, fields=None, also_not_used=None): return {'one': 1, 'two': 2, 'three': 3} def test_no_fields(self): expected = {'one': 1, 'two': 2, 'three': 3} observed = self.method_dict() self.assertEqual(expected, observed) def test_dict(self): expected = {'two': 2} observed = self.method_dict(['two']) self.assertEqual(expected, observed) def test_list(self): expected = [{'two': 2}, {'two': 2}, {'two': 2}] observed = self.method_list(['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), ['two']) self.assertEqual(expected, observed) def test_multiple_arguments_positional_and_keywords(self): expected = {'two': 2} observed = self.method_multiple_arguments(fields=['two'], not_used=None) self.assertEqual(expected, observed) def test_multiple_arguments_keyword(self): expected = {'two': 2} observed = self.method_multiple_arguments(list(), fields=['two']) self.assertEqual(expected, observed) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_db_base_plugin_v2.py0000644000175000017500000125660300000000000026335 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import functools import itertools import random import eventlet import mock import netaddr from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as lib_exc from neutron_lib import fixture from neutron_lib.plugins import directory from neutron_lib.tests import tools from neutron_lib.utils import helpers from neutron_lib.utils import net from oslo_concurrency import lockutils from oslo_config import cfg from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import uuidutils from sqlalchemy import orm import testtools from testtools import matchers import webob.exc import neutron from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import router from neutron.common import ipv6_utils from neutron.common import test_lib from neutron.common import utils from neutron.conf import policies from neutron.db import db_base_plugin_common from neutron.db import ipam_backend_mixin from neutron.db.models import l3 as l3_models from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import standard_attr from neutron.ipam.drivers.neutrondb_ipam import driver as ipam_driver from neutron.ipam import exceptions as ipam_exc from neutron.objects import network as network_obj from neutron.objects import router as l3_obj from neutron import policy from neutron.tests import base from neutron.tests.unit.api import test_extensions from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP TEST_TENANT_ID = '46f70361-ba71-4bd0-9769-3573fd227c4b' def optional_ctx(obj, fallback, **kwargs): if not obj: return fallback(**kwargs) @contextlib.contextmanager def context_wrapper(): yield obj return context_wrapper() def _fake_get_pagination_helper(self, request): return api_common.PaginationEmulatedHelper(request, self._primary_key) def _fake_get_sorting_helper(self, request): return api_common.SortingEmulatedHelper(request, self._attr_info) # TODO(banix): Move the following method to ML2 db test module when ML2 # mechanism driver unit tests are corrected to use Ml2PluginV2TestCase # instead of directly using NeutronDbPluginV2TestCase def _get_create_db_method(resource): ml2_method = '_create_%s_db' % resource if hasattr(directory.get_plugin(), ml2_method): return ml2_method else: return 'create_%s' % resource class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): fmt = 'json' resource_prefix_map = {} block_dhcp_notifier = True def setUp(self, plugin=None, service_plugins=None, ext_mgr=None): super(NeutronDbPluginV2TestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) cfg.CONF.set_override('allow_overlapping_ips', True) # Make sure at each test according extensions for the plugin is loaded extensions.PluginAwareExtensionManager._instance = None # Save the attributes map in case the plugin will alter it # loading extensions self.useFixture(fixture.APIDefinitionFixture()) self._tenant_id = TEST_TENANT_ID if not plugin: plugin = DB_PLUGIN_KLASS if self.block_dhcp_notifier: mock.patch('neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI').start() # Update the plugin self.setup_coreplugin(plugin, load_plugins=False) if isinstance(service_plugins, (list, tuple)): # Sometimes we needs these test service_plugins to be ordered. cfg.CONF.set_override('service_plugins', service_plugins) else: cfg.CONF.set_override( 'service_plugins', [test_lib.test_config.get(key, default) for key, default in (service_plugins or {}).items()] ) cfg.CONF.set_override('base_mac', "12:34:56:78:00:00") cfg.CONF.set_override('max_dns_nameservers', 2) cfg.CONF.set_override('max_subnet_host_routes', 2) self.api = router.APIRouter() # Set the default status self.net_create_status = 'ACTIVE' self.port_create_status = 'ACTIVE' def _is_native_bulk_supported(): plugin_obj = directory.get_plugin() native_bulk_attr_name = ("_%s__native_bulk_support" % plugin_obj.__class__.__name__) return getattr(plugin_obj, native_bulk_attr_name, False) self._skip_native_bulk = not _is_native_bulk_supported() def _is_native_pagination_support(): native_pagination_attr_name = ( "_%s__native_pagination_support" % directory.get_plugin().__class__.__name__) return getattr(directory.get_plugin(), native_pagination_attr_name, False) self._skip_native_pagination = not _is_native_pagination_support() def _is_filter_validation_support(): return 'filter-validation' in (directory.get_plugin(). supported_extension_aliases) self._skip_filter_validation = not _is_filter_validation_support() def _is_native_sorting_support(): native_sorting_attr_name = ( "_%s__native_sorting_support" % directory.get_plugin().__class__.__name__) return getattr(directory.get_plugin(), native_sorting_attr_name, False) self.plugin = directory.get_plugin() self._skip_native_sorting = not _is_native_sorting_support() if ext_mgr: self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) # NOTE(amotoki): policy._ENFORCER is initialized in # neutron.tests.base.BaseTestCase.setUp() but this is too early # and neutron.policy.FieldCheck conv_func does not work # because extended resources are not populated to # attributes.RESOURCES yet. # Thus we need to refresh the default policy rules after loading # extensions. Especially it is important to re-instantiate # DefaultRule() under neutron.conf.policies. To do this, # we need to reload the default policy modules. policy.reset() # TODO(amotoki): Consider this should be part of # neutron.policy.reset (or refresh), but as of now # this is only required for unit testing. policies.reload_default_policies() policy.init() def setup_config(self): # Create the default configurations args = ['--config-file', base.etcdir('neutron.conf')] # If test_config specifies some config-file, use it, as well for config_file in test_lib.test_config.get('config_files', []): args.extend(['--config-file', config_file]) super(NeutronDbPluginV2TestCase, self).setup_config(args=args) def _req(self, method, resource, data=None, fmt=None, id=None, params=None, action=None, subresource=None, sub_id=None, context=None, headers=None): fmt = fmt or self.fmt path = '/%s.%s' % ( '/'.join(p for p in (resource, id, subresource, sub_id, action) if p), fmt ) prefix = self.resource_prefix_map.get(resource) if prefix: path = prefix + path content_type = 'application/%s' % fmt body = None if data is not None: # empty dict is valid body = self.serialize(data) return testlib_api.create_request(path, body, content_type, method, query_string=params, context=context, headers=headers) def new_create_request(self, resource, data, fmt=None, id=None, subresource=None, context=None): return self._req('POST', resource, data, fmt, id=id, subresource=subresource, context=context) def new_list_request(self, resource, fmt=None, params=None, subresource=None): return self._req( 'GET', resource, None, fmt, params=params, subresource=subresource ) def new_show_request(self, resource, id, fmt=None, subresource=None, fields=None, sub_id=None): if fields: params = "&".join(["fields=%s" % x for x in fields]) else: params = None return self._req('GET', resource, None, fmt, id=id, params=params, subresource=subresource, sub_id=sub_id) def new_delete_request(self, resource, id, fmt=None, subresource=None, sub_id=None, data=None, headers=None): return self._req( 'DELETE', resource, data, fmt, id=id, subresource=subresource, sub_id=sub_id, headers=headers ) def new_update_request(self, resource, data, id, fmt=None, subresource=None, context=None, sub_id=None, headers=None): return self._req( 'PUT', resource, data, fmt, id=id, subresource=subresource, sub_id=sub_id, context=context, headers=headers ) def new_action_request(self, resource, data, id, action, fmt=None, subresource=None, sub_id=None): return self._req( 'PUT', resource, data, fmt, id=id, action=action, subresource=subresource, sub_id=sub_id ) def deserialize(self, content_type, response): ctype = 'application/%s' % content_type data = self._deserializers[ctype].deserialize(response.body)['body'] return data def _find_ip_address(self, subnet, exclude=None, is_random=False): network_ports = self._list_ports( "json", 200, subnet['network_id']).json['ports'] used_ips = set() if exclude: if isinstance(exclude, (list, set, tuple)): used_ips = set(exclude) else: used_ips.add(exclude) for port in network_ports: for ip in port['fixed_ips']: if ip['subnet_id'] == subnet['id']: used_ips.add(ip['ip_address']) for pool in subnet['allocation_pools']: ips_range = netaddr.IPRange(pool['start'], pool['end']) ip_list = [str(ip) for ip in ips_range if str(ip) not in used_ips] if ip_list: if is_random: return random.choice(ip_list) return ip_list[0] def _create_bulk_from_list(self, fmt, resource, objects, **kwargs): """Creates a bulk request from a list of objects.""" collection = "%ss" % resource req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs): """Creates a bulk request for any kind of resource.""" objects = [] collection = "%ss" % resource for i in range(number): obj = copy.deepcopy(data) obj[resource]['name'] = "%s_%s" % (name, i) if 'override' in kwargs and i in kwargs['override']: obj[resource].update(kwargs['override'][i]) objects.append(obj) req_data = {collection: objects} req = self.new_create_request(collection, req_data, fmt) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) elif 'context' in kwargs: req.environ['neutron.context'] = kwargs['context'] return req.get_response(self.api) def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'tenant_id', 'shared', 'vlan_transparent', 'availability_zone_hints') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def _create_network_bulk(self, fmt, number, name, admin_state_up, **kwargs): base_data = {'network': {'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'network', base_data, **kwargs) def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None, **kwargs): data = {'subnet': {'network_id': net_id, 'ip_version': constants.IP_VERSION_4, 'tenant_id': self._tenant_id}} if cidr: data['subnet']['cidr'] = cidr for arg in ('ip_version', 'tenant_id', 'subnetpool_id', 'prefixlen', 'enable_dhcp', 'allocation_pools', 'segment_id', 'dns_nameservers', 'host_routes', 'shared', 'ipv6_ra_mode', 'ipv6_address_mode', 'service_types'): # Arg must be present and not null (but can be false) if kwargs.get(arg) is not None: data['subnet'][arg] = kwargs[arg] if ('gateway_ip' in kwargs and kwargs['gateway_ip'] is not constants.ATTR_NOT_SPECIFIED): data['subnet']['gateway_ip'] = kwargs['gateway_ip'] subnet_req = self.new_create_request('subnets', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request subnet_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) subnet_res = subnet_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, subnet_res.status_int) return subnet_res def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=constants.IP_VERSION_4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'tenant_id': self._tenant_id}} if 'ipv6_mode' in kwargs: base_data['subnet']['ipv6_ra_mode'] = kwargs['ipv6_mode'] base_data['subnet']['ipv6_address_mode'] = kwargs['ipv6_mode'] # auto-generate cidrs as they should not overlap base_cidr = "10.0.%s.0/24" if ip_version == constants.IP_VERSION_6: base_cidr = "fd%s::/64" overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': base_cidr % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) def _create_subnetpool(self, fmt, prefixes, expected_res_status=None, admin=False, **kwargs): subnetpool = {'subnetpool': {'prefixes': prefixes}} for k, v in kwargs.items(): subnetpool['subnetpool'][k] = str(v) api = self._api_for_resource('subnetpools') subnetpools_req = self.new_create_request('subnetpools', subnetpool, fmt) if not admin: neutron_context = context.Context('', kwargs['tenant_id']) subnetpools_req.environ['neutron.context'] = neutron_context subnetpool_res = subnetpools_req.get_response(api) if expected_res_status: self.assertEqual(expected_res_status, subnetpool_res.status_int) return subnetpool_res def _create_port(self, fmt, net_id, expected_res_status=None, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'port': {'network_id': net_id, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'device_id', 'mac_address', 'name', 'fixed_ips', 'tenant_id', 'device_owner', 'security_groups', 'propagate_uplink_status') + (arg_list or ())): # Arg must be present if arg in kwargs: data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) data['port']['device_id'] = device_id port_req = self.new_create_request('ports', data, fmt) if set_context and tenant_id: # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', tenant_id) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _list_ports(self, fmt, expected_res_status=None, net_id=None, **kwargs): query_params = [] if net_id: query_params.append("network_id=%s" % net_id) if kwargs.get('device_owner'): query_params.append("device_owner=%s" % kwargs.get('device_owner')) port_req = self.new_list_request('ports', fmt, '&'.join(query_params)) if ('set_context' in kwargs and kwargs['set_context'] is True and 'tenant_id' in kwargs): # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', kwargs['tenant_id']) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _create_port_bulk(self, fmt, number, net_id, name, admin_state_up, **kwargs): base_data = {'port': {'network_id': net_id, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} return self._create_bulk(fmt, number, 'port', base_data, **kwargs) def _make_network(self, fmt, name, admin_state_up, **kwargs): res = self._create_network(fmt, name, admin_state_up, **kwargs) # TODO(salvatore-orlando): do exception handling in this test module # in a uniform way (we do it differently for ports, subnets, and nets # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_subnet(self, fmt, network, gateway, cidr, subnetpool_id=None, allocation_pools=None, ip_version=constants.IP_VERSION_4, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None, tenant_id=None, set_context=False, segment_id=None): res = self._create_subnet(fmt, net_id=network['network']['id'], cidr=cidr, subnetpool_id=subnetpool_id, segment_id=segment_id, gateway_ip=gateway, tenant_id=(tenant_id or network['network']['tenant_id']), allocation_pools=allocation_pools, ip_version=ip_version, enable_dhcp=enable_dhcp, dns_nameservers=dns_nameservers, host_routes=host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode, set_context=set_context) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_v6_subnet(self, network, ra_addr_mode, ipv6_pd=False): cidr = 'fe80::/64' gateway = 'fe80::1' subnetpool_id = None if ipv6_pd: cidr = None gateway = None subnetpool_id = constants.IPV6_PD_POOL_ID cfg.CONF.set_override('ipv6_pd_enabled', True) return (self._make_subnet(self.fmt, network, gateway=gateway, subnetpool_id=subnetpool_id, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=ra_addr_mode, ipv6_address_mode=ra_addr_mode)) def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs): res = self._create_subnetpool(fmt, prefixes, None, admin, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs): res = self._create_port(fmt, net_id, expected_res_status, **kwargs) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _api_for_resource(self, resource): if resource in ['networks', 'subnets', 'ports', 'subnetpools', 'security-groups']: return self.api else: return self.ext_api def _delete(self, collection, id, expected_code=webob.exc.HTTPNoContent.code, neutron_context=None, headers=None): req = self.new_delete_request(collection, id, headers=headers) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(collection)) self.assertEqual(expected_code, res.status_int) def _show_response(self, resource, id, neutron_context=None): req = self.new_show_request(resource, id) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context elif hasattr(self, 'tenant_id'): req.environ['neutron.context'] = context.Context('', self.tenant_id) return req.get_response(self._api_for_resource(resource)) def _show(self, resource, id, expected_code=webob.exc.HTTPOk.code, neutron_context=None): res = self._show_response(resource, id, neutron_context=neutron_context) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _update(self, resource, id, new_data, expected_code=webob.exc.HTTPOk.code, neutron_context=None, headers=None): req = self.new_update_request(resource, new_data, id, headers=headers) if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(expected_code, res.status_int) return self.deserialize(self.fmt, res) def _list(self, resource, fmt=None, neutron_context=None, query_params=None, expected_code=webob.exc.HTTPOk.code): fmt = fmt or self.fmt req = self.new_list_request(resource, fmt, query_params) if neutron_context: req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(expected_code, res.status_int) return self.deserialize(fmt, res) def _fail_second_call(self, patched_plugin, orig, *args, **kwargs): """Invoked by test cases for injecting failures in plugin.""" def second_call(*args, **kwargs): raise lib_exc.NeutronException(message="_fail_second_call") patched_plugin.side_effect = second_call return orig(*args, **kwargs) def _validate_behavior_on_bulk_failure( self, res, collection, errcode=webob.exc.HTTPClientError.code): self.assertEqual(errcode, res.status_int) req = self.new_list_request(collection) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) items = self.deserialize(self.fmt, res) self.assertEqual(0, len(items[collection])) def _validate_behavior_on_bulk_success(self, res, collection, names=['test_0', 'test_1']): self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) items = self.deserialize(self.fmt, res)[collection] self.assertEqual(len(items), 2) self.assertEqual(items[0]['name'], 'test_0') self.assertEqual(items[1]['name'], 'test_1') def _test_list_resources(self, resource, items, neutron_context=None, query_params=None, expected_code=webob.exc.HTTPOk.code): res = self._list('%ss' % resource, neutron_context=neutron_context, query_params=query_params, expected_code=expected_code) if expected_code == webob.exc.HTTPOk.code: resource = resource.replace('-', '_') self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], [i[resource]['id'] for i in items]) @contextlib.contextmanager def network(self, name='net1', admin_state_up=True, fmt=None, **kwargs): network = self._make_network(fmt or self.fmt, name, admin_state_up, **kwargs) yield network @contextlib.contextmanager def subnet(self, network=None, gateway_ip=constants.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', subnetpool_id=None, segment_id=None, fmt=None, ip_version=constants.IP_VERSION_4, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, ipv6_ra_mode=None, ipv6_address_mode=None, tenant_id=None, service_types=None, set_context=False): cidr = netaddr.IPNetwork(cidr) if cidr else None if (gateway_ip is not None and gateway_ip != constants.ATTR_NOT_SPECIFIED): gateway_ip = netaddr.IPAddress(gateway_ip) with optional_ctx(network, self.network, set_context=set_context, tenant_id=tenant_id) as network_to_use: subnet = self._make_subnet(fmt or self.fmt, network_to_use, gateway_ip, cidr, subnetpool_id, allocation_pools, ip_version, enable_dhcp, dns_nameservers, host_routes, segment_id=segment_id, shared=shared, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_address_mode, tenant_id=tenant_id, set_context=set_context) yield subnet @contextlib.contextmanager def subnetpool(self, prefixes, admin=False, **kwargs): subnetpool = self._make_subnetpool(self.fmt, prefixes, admin, **kwargs) yield subnetpool @contextlib.contextmanager def port(self, subnet=None, fmt=None, set_context=False, tenant_id=None, **kwargs): with optional_ctx( subnet, self.subnet, set_context=set_context, tenant_id=tenant_id) as subnet_to_use: net_id = subnet_to_use['subnet']['network_id'] port = self._make_port( fmt or self.fmt, net_id, set_context=set_context, tenant_id=tenant_id, **kwargs) yield port def _test_list_with_sort(self, resource, items, sorts, resources=None, query_params=''): query_str = query_params for key, direction in sorts: query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key, direction) if not resources: resources = '%ss' % resource req = self.new_list_request(resources, params=query_str) api = self._api_for_resource(resources) res = self.deserialize(self.fmt, req.get_response(api)) resource = resource.replace('-', '_') resources = resources.replace('-', '_') expected_res = [item[resource]['id'] for item in items] self.assertEqual(expected_res, [n['id'] for n in res[resources]]) def _test_list_with_pagination(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', verify_key='id'): if not resources: resources = '%ss' % resource query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&sort_key=%s&" "sort_dir=%s") % (limit, sort[0], sort[1]) req = self.new_list_request(resources, params=query_str) items_res = [] page_num = 0 api = self._api_for_resource(resources) resource = resource.replace('-', '_') resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) items_res = items_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'next': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) self.assertEqual([item[resource][verify_key] for item in items], [n[verify_key] for n in items_res]) def _test_list_with_pagination_reverse(self, resource, items, sort, limit, expected_page_num, resources=None, query_params=''): if not resources: resources = '%ss' % resource resource = resource.replace('-', '_') api = self._api_for_resource(resources) marker = items[-1][resource]['id'] query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&page_reverse=True&" "sort_key=%s&sort_dir=%s&" "marker=%s") % (limit, sort[0], sort[1], marker) req = self.new_list_request(resources, params=query_str) item_res = [items[-1][resource]] page_num = 0 resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) res[resources].reverse() item_res = item_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'previous': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) expected_res = [item[resource]['id'] for item in items] expected_res.reverse() self.assertEqual(expected_res, [n['id'] for n in item_res]) def _compare_resource(self, observed_res, expected_res, res_name): '''Compare the observed and expected resources (ie compare subnets)''' for k in expected_res: self.assertIn(k, observed_res[res_name]) if isinstance(expected_res[k], list): self.assertEqual(sorted(expected_res[k]), sorted(observed_res[res_name][k])) else: self.assertEqual(expected_res[k], observed_res[res_name][k]) def _validate_resource(self, resource, keys, res_name): ipv6_zero_gateway = False ipv6_null_gateway = False if res_name == 'subnet': attrs = resource[res_name] if not attrs['gateway_ip']: ipv6_null_gateway = True elif (attrs['ip_version'] is constants.IP_VERSION_6 and attrs['gateway_ip'][-2:] == "::"): ipv6_zero_gateway = True for k in keys: self.assertIn(k, resource[res_name]) if isinstance(keys[k], list): self.assertEqual( sorted(keys[k], key=helpers.safe_sort_key), sorted(resource[res_name][k], key=helpers.safe_sort_key)) else: if not ipv6_null_gateway: if (k == 'gateway_ip' and ipv6_zero_gateway and keys[k][-3:] == "::0"): self.assertEqual(keys[k][:-1], resource[res_name][k]) else: self.assertEqual(keys[k], resource[res_name][k]) class TestBasicGet(NeutronDbPluginV2TestCase): def test_single_get_admin(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) def test_single_get_tenant(self): plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id) class TestV2HTTPResponse(NeutronDbPluginV2TestCase): def test_create_returns_201(self): res = self._create_network(self.fmt, 'net2', True) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_list_returns_200(self): req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def _check_list_with_fields(self, res, field_name): self.assertEqual(webob.exc.HTTPOk.code, res.status_int) body = self.deserialize(self.fmt, res) # further checks: 1 networks self.assertEqual(1, len(body['networks'])) # 1 field in the network record self.assertEqual(1, len(body['networks'][0])) # field is 'name' self.assertIn(field_name, body['networks'][0]) def test_list_with_fields(self): self._create_network(self.fmt, 'some_net', True) req = self.new_list_request('networks', params="fields=name") res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin(self): tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=name") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'name') def test_list_with_fields_noadmin_and_policy_field(self): """If a field used by policy is selected, do not duplicate it. Verifies that if the field parameter explicitly specifies a field which is used by the policy engine, then it is not duplicated in the response. """ tenant_id = 'some_tenant' self._create_network(self.fmt, 'some_net', True, tenant_id=tenant_id, set_context=True) req = self.new_list_request('networks', params="fields=tenant_id") req.environ['neutron.context'] = context.Context('', tenant_id) res = req.get_response(self.api) self._check_list_with_fields(res, 'tenant_id') def test_show_returns_200(self): with self.network() as net: req = self.new_show_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_delete_returns_204(self): res = self._create_network(self.fmt, 'net1', True) net = self.deserialize(self.fmt, res) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_with_req_body_returns_400(self): res = self._create_network(self.fmt, 'net1', True) net = self.deserialize(self.fmt, res) data = {"network": {"id": net['network']['id']}} req = self.new_delete_request('networks', net['network']['id'], data=data) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_returns_200(self): with self.network() as net: req = self.new_update_request('networks', {'network': {'name': 'steve'}}, net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_update_invalid_json_400(self): with self.network() as net: req = self.new_update_request('networks', '{{"name": "aaa"}}', net['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_bad_route_404(self): req = self.new_list_request('doohickeys') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) class TestPortsV2(NeutronDbPluginV2TestCase): def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.network(shared=True) as network: with self.subnet(network=network) as subnet: with self.port(name='myname', subnet=subnet) as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] subnet_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) self.assertEqual(1, len(ips)) self.assertIn(netaddr.IPAddress(ips[0]['ip_address']), subnet_ip_net) self.assertEqual('myname', port['port']['name']) def test_create_port_as_admin(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=False) def test_create_port_bad_tenant(self): with self.network() as network: self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPNotFound.code, tenant_id='bad_tenant_id', device_id='fake_device', device_owner='fake_owner', fixed_ips=[], set_context=True) def test_create_port_public_network(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_None_values(self): with self.network() as network: keys = ['device_owner', 'name', 'device_id'] for key in keys: # test with each as None and rest as '' kwargs = {k: '' for k in keys} kwargs[key] = None self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPClientError.code, tenant_id='tenant_id', fixed_ips=[], set_context=False, **kwargs) def test_create_port_public_network_with_ip(self): with self.network(shared=True) as network: ip_net = netaddr.IPNetwork('10.0.0.0/24') with self.subnet(network=network, cidr=str(ip_net)): keys = [('admin_state_up', True), ('status', self.port_create_status)] port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) for k, v in keys: self.assertEqual(port['port'][k], v) port_ip = port['port']['fixed_ips'][0]['ip_address'] self.assertIn(port_ip, ip_net) self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) def test_create_port_anticipating_allocation(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, fixed_ips=fixed_ips) def test_create_port_public_network_with_invalid_ip_no_subnet_id(self, expected_error='InvalidIpForNetwork'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): ips = [{'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(lib_exc.InvalidIpForNetwork(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_port_public_network_with_invalid_ip_and_subnet_id(self, expected_error='InvalidIpForSubnet'): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}] res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPBadRequest.code, fixed_ips=ips, set_context=True) data = self.deserialize(self.fmt, res) msg = str(lib_exc.InvalidIpForSubnet(ip_address='1.1.1.1')) self.assertEqual(expected_error, data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_ports_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) self._validate_behavior_on_bulk_success(res, 'ports') for p in self.deserialize(self.fmt, res)['ports']: self._delete('ports', p['id']) def test_create_ports_bulk_wrong_input(self): with self.network() as net: overrides = {1: {'admin_state_up': 'doh'}} res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, override=overrides) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) req = self.new_list_request('ports') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) ports = self.deserialize(self.fmt, res) self.assertEqual(0, len(ports['ports'])) def test_get_ports_count(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) pl = directory.get_plugin() count = pl.get_ports_count(ctx, filters={'tenant_id': [tenid]}) self.assertEqual(4, count) def test_create_ports_bulk_emulated_plugin_failure(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): plugin = directory.get_plugin() method_to_patch = '_process_port_binding' if real_has_attr(plugin, method_to_patch): orig = plugin._process_port_binding else: method_to_patch = '_make_port_dict' orig = plugin._make_port_dict with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code ) def test_create_ports_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") ctx = context.get_admin_context() with self.network() as net: plugin = directory.get_plugin() orig = plugin.create_port method_to_patch = _get_create_db_method('port_bulk') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as v1, self.port() as v2, self.port() as v3: ports = (v1, v2, v3) self._test_list_resources('port', ports) def _test_list_ports_filtered_by_fixed_ip(self, **kwargs): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as port1, self.port(): fixed_ips = port1['port']['fixed_ips'][0] query_params = """ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % (fixed_ips['ip_address'], '192.168.126.5', fixed_ips['subnet_id']) extra_params = "&".join(["{}={}".format(k, v) for k, v in kwargs.items()]) if extra_params: query_params = "{}&{}".format(query_params, extra_params) self._test_list_resources('port', [port1], query_params=query_params) def test_list_ports_filtered_by_fixed_ip(self): self._test_list_ports_filtered_by_fixed_ip() def test_list_ports_filtered_by_fixed_ip_with_limit(self): self._test_list_ports_filtered_by_fixed_ip(limit=500) def test_list_ports_public_network(self): with self.network(shared=True) as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port n_context = context.Context('', 'tenant_1') self._test_list_resources('port', [port1], neutron_context=n_context) # Tenant_2 request - must return single port n_context = context.Context('', 'tenant_2') self._test_list_resources('port', [port2], neutron_context=n_context) def test_list_ports_for_network_owner(self): with self.network(tenant_id='tenant_1') as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_1') as port1,\ self.port(subnet, tenant_id='tenant_2') as port2: # network owner request, should return all ports port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_1') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(2, len(port_list)) self.assertIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) # another tenant request, only return ports belong to it port_res = self._list_ports( 'json', set_context=True, tenant_id='tenant_2') port_list = self.deserialize('json', port_res)['ports'] port_ids = [p['id'] for p in port_list] self.assertEqual(1, len(port_list)) self.assertNotIn(port1['port']['id'], port_ids) self.assertIn(port2['port']['id'], port_ids) def test_list_ports_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(admin_state_up='True', mac_address='00:00:00:00:00:01') as port1,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:02') as port2,\ self.port(admin_state_up='False', mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) def test_list_ports_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_list_ports_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) with self.port(mac_address='00:00:00:00:00:01') as port1,\ self.port(mac_address='00:00:00:00:00:02') as port2,\ self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) def test_show_port(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port']['id'], sport['port']['id']) def test_delete_port(self): with self.port() as port: self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_port_public_network(self): with self.network(shared=True) as network: port_res = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='another_tenant', set_context=True) port = self.deserialize(self.fmt, port_res) self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_port_by_network_owner(self): with self.network(tenant_id='tenant_1') as network: with self.subnet(network) as subnet: with self.port(subnet, tenant_id='tenant_2') as port: self._delete( 'ports', port['port']['id'], neutron_context=context.Context('', 'tenant_1')) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port_with_stale_subnet(self): with self.network(shared=True) as network: port = self._make_port(self.fmt, network['network']['id']) subnet = self._make_subnet(self.fmt, network, '10.0.0.1', '10.0.0.0/24') data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} # mock _get_subnets, to return this subnet mock.patch.object(ipam_backend_mixin.IpamBackendMixin, '_ipam_get_subnets', return_value=[subnet['subnet']]).start() # Delete subnet, to mock the subnet as stale. self._delete('subnets', subnet['subnet']['id']) self._show('subnets', subnet['subnet']['id'], expected_code=webob.exc.HTTPNotFound.code) # Though _get_subnets returns the subnet, subnet was deleted later # while ipam is updating the port. So port update should fail. req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_port_update_with_ipam_error(self): with self.network() as network,\ self.subnet(), self.subnet(),\ self.port(network=network) as port,\ mock.patch('neutron.ipam.drivers.neutrondb_ipam.' 'driver.NeutronDbSubnet.deallocate') as f: f.side_effect = [ ipam_exc.IpAddressAllocationNotFound( ip_address='foo_i', subnet_id='foo_s'), None, ] data = {'port': {'name': 'fool-me'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual('fool-me', res['port']['name']) def test_update_port(self): with self.port() as port: data = {'port': {'admin_state_up': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) def update_port_mac(self, port, updated_fixed_ips=None): orig_mac = port['mac_address'] mac = orig_mac.split(':') mac[5] = '01' if mac[5] != '01' else '00' new_mac = ':'.join(mac) data = {'port': {'mac_address': new_mac}} if updated_fixed_ips: data['port']['fixed_ips'] = updated_fixed_ips req = self.new_update_request('ports', data, port['id']) return req.get_response(self.api), new_mac def _verify_ips_after_mac_change(self, orig_port, new_port): for fip in orig_port['port']['fixed_ips']: subnet = self._show('subnets', fip['subnet_id']) if ipv6_utils.is_auto_address_subnet(subnet['subnet']): port_mac = new_port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) fip = {'ip_address': eui_addr, 'subnet_id': subnet['subnet']['id']} self.assertIn(fip, new_port['port']['fixed_ips']) self.assertEqual(len(orig_port['port']['fixed_ips']), len(new_port['port']['fixed_ips'])) def check_update_port_mac( self, expected_status=webob.exc.HTTPOk.code, expected_error='StateInvalid', subnet=None, device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None, host_arg=None, arg_list=None): host_arg = host_arg or {} arg_list = arg_list or [] with self.port(device_owner=device_owner, subnet=subnet, arg_list=arg_list, **host_arg) as port: self.assertIn('mac_address', port['port']) res, new_mac = self.update_port_mac( port['port'], updated_fixed_ips=updated_fixed_ips) self.assertEqual(expected_status, res.status_int) if expected_status == webob.exc.HTTPOk.code: result = self.deserialize(self.fmt, res) self.assertIn('port', result) self.assertEqual(new_mac, result['port']['mac_address']) if updated_fixed_ips is None: self._verify_ips_after_mac_change(port, result) else: self.assertEqual(len(updated_fixed_ips), len(result['port']['fixed_ips'])) else: error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac(self): self.check_update_port_mac() # sub-classes for plugins/drivers that support mac address update # override this method def test_dhcp_port_ips_prefer_next_available_ip(self): # test to check that DHCP ports get the first available IP in the # allocation range with self.subnet() as subnet: port_ips = [] for _ in range(10): with self.port(device_owner=constants.DEVICE_OWNER_DHCP, subnet=subnet) as port: port_ips.append(port['port']['fixed_ips'][0]['ip_address']) first_ip = netaddr.IPAddress(port_ips[0]) expected = [str(first_ip + i) for i in range(10)] self.assertEqual(expected, port_ips) def test_update_port_mac_ip(self): with self.subnet() as subnet: updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}] self.check_update_port_mac(subnet=subnet, updated_fixed_ips=updated_fixed_ips) def test_update_port_mac_v6_slaac(self): with self.network() as n: pass # add a couple of v4 networks to ensure they aren't interferred with with self.subnet(network=n) as v4_1, \ self.subnet(network=n, cidr='7.0.0.0/24') as v4_2: pass with self.subnet(network=n, gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: self.assertTrue( ipv6_utils.is_auto_address_subnet(subnet['subnet'])) fixed_ips_req = { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': v4_1['subnet']['id']}, {'subnet_id': v4_1['subnet']['id']}, {'subnet_id': v4_2['subnet']['id']}, {'subnet_id': v4_2['subnet']['id']}] } self.check_update_port_mac(subnet=subnet, host_arg=fixed_ips_req) def test_update_port_mac_bad_owner(self): self.check_update_port_mac( device_owner=DEVICE_OWNER_NOT_COMPUTE, expected_status=webob.exc.HTTPConflict.code, expected_error='UnsupportedPortDeviceOwner') def check_update_port_mac_used(self, expected_error='MacAddressInUse'): with self.subnet() as subnet: with self.port(subnet=subnet) as port: with self.port(subnet=subnet) as port2: self.assertIn('mac_address', port['port']) new_mac = port2['port']['mac_address'] data = {'port': {'mac_address': new_mac}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual(expected_error, error['NeutronError']['type']) def test_update_port_mac_used(self): self.check_update_port_mac_used() def test_update_port_not_admin(self): res = self._create_network(self.fmt, 'net1', True, tenant_id='not_admin', set_context=True) net1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net1['network']['id'], tenant_id='not_admin', set_context=True) port = self.deserialize(self.fmt, res) data = {'port': {'admin_state_up': False}} neutron_context = context.Context('', 'not_admin') port = self._update('ports', port['port']['id'], data, neutron_context=neutron_context) self.assertFalse(port['port']['admin_state_up']) def test_update_device_id_unchanged(self): with self.port() as port: data = {'port': {'admin_state_up': True, 'device_id': port['port']['device_id']}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['port']['admin_state_up']) def test_update_device_id_null(self): with self.port() as port: data = {'port': {'device_id': None}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_network_if_port_exists(self): with self.port() as port: req = self.new_delete_request('networks', port['port']['network_id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_delete_network_port_exists_owned_by_network(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_network_port_exists_owned_by_network_race(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) # skip first port delete to simulate create after auto clean plugin = directory.get_plugin() p = mock.patch.object(plugin, 'delete_port') mock_del_port = p.start() mock_del_port.side_effect = lambda *a, **k: p.stop() req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_network_port_exists_owned_by_network_port_not_found(self): """Tests that we continue to gracefully delete the network even if a neutron:dhcp-owned port was deleted concurrently. """ res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) # Raise PortNotFound when trying to delete the port to simulate a # concurrent delete race; note that we actually have to delete the port # "out of band" otherwise deleting the network will fail because of # constraints in the data model. plugin = directory.get_plugin() orig_delete = plugin.delete_port def fake_delete_port(context, id): # Delete the port for real from the database and then raise # PortNotFound to simulate the race. self.assertIsNone(orig_delete(context, id)) raise lib_exc.PortNotFound(port_id=id) p = mock.patch.object(plugin, 'delete_port') mock_del_port = p.start() mock_del_port.side_effect = fake_delete_port req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_update_port_delete_ip(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self.assertEqual(data['port']['fixed_ips'], res['port']['fixed_ips']) def test_no_more_port_exception(self): with self.subnet(cidr='10.0.0.0/31', enable_dhcp=False) as subnet: id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, id) data = self.deserialize(self.fmt, res) msg = str(lib_exc.IpAddressGenerationFailure(net_id=id)) self.assertEqual(data['NeutronError']['message'], msg) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_ports_native_quotas(self): quota = 1 cfg.CONF.set_override('quota_port', quota, group='QUOTAS') with self.network() as network: res = self._create_port(self.fmt, network['network']['id']) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res = self._create_port(self.fmt, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_ports_bulk_native_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") quota = 4 cfg.CONF.set_override('quota_port', quota, group='QUOTAS') with self.network() as network: res = self._create_port_bulk(self.fmt, quota + 1, network['network']['id'], 'test', True) self._validate_behavior_on_bulk_failure( res, 'ports', errcode=webob.exc.HTTPConflict.code) def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet() as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.10', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_update_ip_address_only(self): with self.subnet() as subnet: ip_address = '10.0.0.2' fixed_ip_data = [{'ip_address': ip_address, 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ip_address, ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}, {'ip_address': ip_address}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': ip_address, 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.10', 'subnet_id': subnet['subnet']['id']}, ips) def test_update_port_update_ips(self): """Update IP and associate new IP on port. Check a port update with the specified subnet_id's. A IP address will be allocated for each subnet_id. """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.3', ips[0]['ip_address'], '10.0.0.3') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_add_additional_ip(self): """Test update of port with additional IP.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertNotEqual(ips[0]['ip_address'], ips[1]['ip_address']) network_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) self.assertIn(ips[0]['ip_address'], network_ip_net) self.assertIn(ips[1]['ip_address'], network_ip_net) def test_update_port_invalid_fixed_ip_address_v6_slaac(self): with self.subnet(cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.IPV6_SLAAC, gateway_ip=constants.ATTR_NOT_SPECIFIED) as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] ip_address = '2607:f0d0:1002:51::5' self.assertEqual(1, len(ips)) port_mac = port['port']['mac_address'] subnet_id = subnet['subnet']['id'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(ips[0]['ip_address'], eui_addr) self.assertEqual(ips[0]['subnet_id'], subnet_id) data = {'port': {'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': ip_address}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) err = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) self.assertEqual('AllocationOnAutoAddressSubnet', err['NeutronError']['type']) msg = str(ipam_exc.AllocationOnAutoAddressSubnet( ip=ip_address, subnet_id=subnet_id)) self.assertEqual(err['NeutronError']['message'], msg) def test_requested_duplicate_mac(self): with self.port() as port: mac = port['port']['mac_address'] # check that MAC address matches base MAC base_mac = cfg.CONF.base_mac[0:2] self.assertTrue(mac.startswith(base_mac)) kwargs = {"mac_address": mac} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_mac_generation(self): cfg.CONF.set_override('base_mac', "12:34:56:00:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56")) def test_mac_generation_4octet(self): cfg.CONF.set_override('base_mac', "12:34:56:78:00:00") with self.port() as port: mac = port['port']['mac_address'] self.assertTrue(mac.startswith("12:34:56:78")) def test_duplicate_mac_generation(self): # simulate duplicate mac generation to make sure DBDuplicate is retried responses = ['12:34:56:78:00:00', '12:34:56:78:00:00', '12:34:56:78:00:01'] with mock.patch.object(net, 'random_mac_generator', return_value=itertools.cycle(responses)) as grand_mac: with self.subnet() as s: with self.port(subnet=s) as p1, self.port(subnet=s) as p2: self.assertEqual('12:34:56:78:00:00', p1['port']['mac_address']) self.assertEqual('12:34:56:78:00:01', p2['port']['mac_address']) self.assertEqual(3, grand_mac.call_count) def test_bad_mac_format(self): cfg.CONF.set_override('base_mac', "bad_mac") try: self.plugin._check_base_mac_format() except Exception: return self.fail("No exception for illegal base_mac format") def test_is_mac_in_use(self): ctx = context.get_admin_context() with self.port() as port: net_id = port['port']['network_id'] mac = port['port']['mac_address'] self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id, mac)) mac2 = '00:22:00:44:00:66' # other mac, same network self.assertFalse(self.plugin._is_mac_in_use(ctx, net_id, mac2)) net_id2 = port['port']['id'] # other net uuid, same mac self.assertFalse(self.plugin._is_mac_in_use(ctx, net_id2, mac)) def test_requested_duplicate_ip(self): with self.subnet() as subnet: subnet_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertIn(ips[0]['ip_address'], subnet_ip_net) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Check configuring of duplicate IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': ips[0]['ip_address']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_requested_subnet_id(self): with self.subnet() as subnet: subnet_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertIn(netaddr.IPAddress(ips[0]['ip_address']), netaddr.IPSet(subnet_ip_net)) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertIn(ips[0]['ip_address'], subnet_ip_net) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._delete('ports', port2['port']['id']) def test_requested_subnet_id_not_on_network(self): with self.subnet() as subnet: with self.port(subnet=subnet) as port: # Create new network res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=constants.IP_VERSION_4) net_id = port['port']['network_id'] # Request a IP from specific subnet kwargs = {"fixed_ips": [{'subnet_id': subnet2['subnet']['id']}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_overlapping_subnets(self): with self.subnet() as subnet: tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet(self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='10.0.0.225/28', ip_version=constants.IP_VERSION_4, gateway_ip=constants.ATTR_NOT_SPECIFIED) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_subnet_id_v4_and_v6(self): with self.subnet() as subnet: # Get a IPv4 and IPv6 address tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet( self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='2607:f0d0:1002:51::/124', ip_version=constants.IP_VERSION_6, gateway_ip=constants.ATTR_NOT_SPECIFIED) subnet2 = self.deserialize(self.fmt, res) kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port3 = self.deserialize(self.fmt, res) ips = port3['port']['fixed_ips'] cidr_v4 = subnet['subnet']['cidr'] cidr_v6 = subnet2['subnet']['cidr'] self.assertEqual(2, len(ips)) self._test_requested_port_subnet_ids(ips, [subnet['subnet']['id'], subnet2['subnet']['id']]) self._test_dual_stack_port_ip_addresses_in_subnets(ips, cidr_v4, cidr_v6) res = self._create_port(self.fmt, net_id=net_id) port4 = self.deserialize(self.fmt, res) # Check that a v4 and a v6 address are allocated ips = port4['port']['fixed_ips'] self.assertEqual(2, len(ips)) self._test_requested_port_subnet_ids(ips, [subnet['subnet']['id'], subnet2['subnet']['id']]) self._test_dual_stack_port_ip_addresses_in_subnets(ips, cidr_v4, cidr_v6) self._delete('ports', port3['port']['id']) self._delete('ports', port4['port']['id']) def _test_requested_port_subnet_ids(self, ips, expected_subnet_ids): self.assertEqual(set(x['subnet_id'] for x in ips), set(expected_subnet_ids)) def _test_dual_stack_port_ip_addresses_in_subnets(self, ips, cidr_v4, cidr_v6): ip_net_v4 = netaddr.IPNetwork(cidr_v4) ip_net_v6 = netaddr.IPNetwork(cidr_v6) for address in ips: ip_addr = netaddr.IPAddress(address['ip_address']) expected_ip_net = ip_net_v4 if ip_addr.version == 4 else ip_net_v6 self.assertIn(ip_addr, expected_ip_net) def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( network, constants.IPV6_SLAAC, ipv6_pd=True) net_id = subnet['subnet']['network_id'] subnet_id = subnet['subnet']['id'] # update subnet with new prefix prefix = '2001::/64' data = {'subnet': {'cidr': prefix}} self.plugin.update_subnet(context.get_admin_context(), subnet_id, data) kwargs = {"fixed_ips": [{'subnet_id': subnet_id, 'ip_address': '2001::2'}]} # pd is a auto address subnet, so can't have 2001::2 res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( network, constants.IPV6_SLAAC, ipv6_pd=True) net_id = subnet['subnet']['network_id'] subnet_id = subnet['subnet']['id'] # update subnet with new prefix prefix = '2001::/64' data = {'subnet': {'cidr': prefix}} self.plugin.update_subnet(context.get_admin_context(), subnet_id, data) # create port and check for eui addr with 2001::/64 prefix. res = self._create_port(self.fmt, net_id=net_id) port = self.deserialize(self.fmt, res) port_mac = port['port']['mac_address'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64( prefix, port_mac)) fixedips = [{'subnet_id': subnet_id, 'ip_address': eui_addr}] self.assertEqual(fixedips, port['port']['fixed_ips']) # try update port with 2001::2. update should fail as # pd is a auto address subnet, so can't have 2001::2 data = {'port': {"fixed_ips": [{'subnet_id': subnet_id, 'ip_address': '2001::2'}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_port_invalid_subnet_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( network, constants.IPV6_SLAAC, ipv6_pd=True) subnet_id = subnet['subnet']['id'] # update subnet with new prefix prefix = '2001::/64' data = {'subnet': {'cidr': prefix}} self.plugin.update_subnet(context.get_admin_context(), subnet_id, data) # Create port on network2 res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=constants.IP_VERSION_4) res = self._create_port(self.fmt, net_id=network2['network']['id']) port = self.deserialize(self.fmt, res) # try update port with 1st network's PD subnet data = {'port': {"fixed_ips": [{'subnet_id': subnet_id}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_invalid_fixed_ip_address_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_fixed_ip_address_v6_slaac_router_iface(self): with self.subnet(gateway_ip='fe80::1', cidr='fe80::/64', ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': 'fe80::1'}]} net_id = subnet['subnet']['network_id'] device_owner = constants.DEVICE_OWNER_ROUTER_INTF res = self._create_port(self.fmt, net_id=net_id, device_owner=device_owner, **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(len(port['port']['fixed_ips']), 1) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], 'fe80::1') def test_requested_subnet_id_v6_slaac(self): with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}]) as port: port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], eui_addr) def test_requested_subnet_id_v4_and_v6_slaac(self): with self.network() as network: with self.subnet(network) as subnet,\ self.subnet( network, cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}] ) as port: ips = port['port']['fixed_ips'] subnet1_net = netaddr.IPNetwork(subnet['subnet']['cidr']) subnet2_net = netaddr.IPNetwork(subnet2['subnet']['cidr']) network_ip_set = netaddr.IPSet(subnet1_net) network_ip_set.add(subnet2_net) self.assertEqual(2, len(ips)) port_mac = port['port']['mac_address'] subnet_cidr = subnet2['subnet']['cidr'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64( subnet_cidr, port_mac)) self.assertIn(ips[0]['ip_address'], network_ip_set) self.assertIn(ips[1]['ip_address'], network_ip_set) self.assertIn({'ip_address': eui_addr, 'subnet_id': subnet2['subnet']['id']}, ips) def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): with self.network() as network: # Create an IPv4 and an IPv6 SLAAC subnet on the network with self.subnet(network) as subnet_v4,\ self.subnet(network, cidr='2607:f0d0:1002:51::/64', ip_version=constants.IP_VERSION_6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC): subnet_ip_net = netaddr.IPNetwork(subnet_v4['subnet']['cidr']) # Create a router port without specifying fixed_ips port = self._make_port( self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_ROUTER_INTF) # Router port should only have an IPv4 address fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(fixed_ips)) self.assertIn(fixed_ips[0]['ip_address'], subnet_ip_net) @staticmethod def _calc_ipv6_addr_by_EUI64(port, subnet): port_mac = port['port']['mac_address'] subnet_cidr = subnet['subnet']['cidr'] return str(netutils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet), port['port']['fixed_ips'][0]['ip_address']) def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode, ipv6_pd=False): """Test port create with an IPv6 subnet incl in fixed IPs.""" with self.network(name='net') as network: subnet = self._make_v6_subnet(network, addr_mode, ipv6_pd) subnet_id = subnet['subnet']['id'] fixed_ips = [{'subnet_id': subnet_id}] with self.port(subnet=subnet, fixed_ips=fixed_ips) as port: port_fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(port_fixed_ips)) if addr_mode == constants.IPV6_SLAAC: exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet) self.assertEqual(exp_ip_addr, port_fixed_ips[0]['ip_address']) self.assertIn(port_fixed_ips[0]['ip_address'], netaddr.IPNetwork(subnet['subnet']['cidr'])) def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.IPV6_SLAAC) def test_create_port_with_ipv6_pd_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.IPV6_SLAAC, ipv6_pd=True) def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( addr_mode=constants.DHCPV6_STATEFUL) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dicts = [ {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None}, {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) subnets[subnet['subnet']['id']] = sub_dict res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) # Since the create port request was made without a list of fixed IPs, # the port should be associated with addresses for one of the # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) def test_delete_port_with_ipv6_slaac_address(self): """Test that a port with an IPv6 SLAAC address can be deleted.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port that has an associated IPv6 SLAAC address self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # Confirm that the port can be deleted self._delete('ports', port['port']['id']) self._show('ports', port['port']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): """Test port update with an IPv6 SLAAC subnet in fixed IPs.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=constants.IP_VERSION_4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv6 SLAAC subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v6['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should only have an address corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6), ips[0]['ip_address']) def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): """Test port update with a new IPv6 SLAAC subnet in fixed IPs.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=constants.IP_VERSION_4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port to have only IPv4 address ips = [{'subnet_id': subnet_v4['subnet']['id']}, {'subnet_id': subnet_v6['subnet']['id'], 'delete_subnet': True}] data = {'port': {'fixed_ips': ips}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should only have an address corresponding to IPv4 subnet ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(subnet_v4['subnet']['id'], ips[0]['subnet_id']) # Now update port and request an additional address on the IPv6 SLAAC # subnet. ips.append({'subnet_id': subnet_v6['subnet']['id']}) data = {'port': {'fixed_ips': ips}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] # Port should have IPs on both IPv4 and IPv6 subnets self.assertEqual(2, len(ips)) self.assertEqual(set([subnet_v4['subnet']['id'], subnet_v6['subnet']['id']]), set([ip['subnet_id'] for ip in ips])) def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): """Test port update excluding IPv6 SLAAC subnet from fixed ips.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=constants.IP_VERSION_4) subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) # Update port including only the IPv4 subnet data = {'port': {'fixed_ips': [{'subnet_id': subnet_v4['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Port should still have an addr corresponding to IPv6 SLAAC subnet ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6) expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'], 'ip_address': eui_addr} self.assertIn(expected_v6_ip, ips) def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) v6_subnet_1 = self._make_subnet(self.fmt, network, gateway='2001:100::1', cidr='2001:100::0/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC) v6_subnet_2 = self._make_subnet(self.fmt, network, gateway='2001:200::1', cidr='2001:200::0/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) port_mac = port['port']['mac_address'] cidr_1 = v6_subnet_1['subnet']['cidr'] cidr_2 = v6_subnet_2['subnet']['cidr'] eui_addr_1 = str(netutils.get_ipv6_addr_by_EUI64(cidr_1, port_mac)) eui_addr_2 = str(netutils.get_ipv6_addr_by_EUI64(cidr_2, port_mac)) self.assertEqual({eui_addr_1, eui_addr_2}, {fixed_ip['ip_address'] for fixed_ip in port['port']['fixed_ips']}) def test_range_allocation(self): with self.subnet(gateway_ip='10.0.0.3', cidr='10.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(5, len(ips)) alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5', '10.0.0.6'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(0, len(alloc)) self._delete('ports', port['port']['id']) with self.subnet(gateway_ip='11.0.0.6', cidr='11.0.0.0/29') as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ips = port['port']['fixed_ips'] self.assertEqual(5, len(ips)) alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4', '11.0.0.5'] for ip in ips: self.assertIn(ip['ip_address'], alloc) self.assertEqual(ip['subnet_id'], subnet['subnet']['id']) alloc.remove(ip['ip_address']) self.assertEqual(0, len(alloc)) self._delete('ports', port['port']['id']) def test_requested_invalid_fixed_ips(self): with self.subnet() as subnet: subnet_ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertIn(ips[0]['ip_address'], subnet_ip_net) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) # Test invalid subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': '00000000-ffff-ffff-ffff-000000000000'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) # Test invalid IP address on specified subnet_id kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1.1.1.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) # Test invalid addresses - IP's not on subnet or network # address or broadcast address bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255'] net_id = port['port']['network_id'] for ip in bad_ips: kwargs = {"fixed_ips": [{'ip_address': ip}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) # Enable allocation of gateway address kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) ips = port2['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.1', ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) self._delete('ports', port2['port']['id']) def test_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '1011.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_duplicate_ips(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}, {'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_fixed_ip_invalid_subnet_id(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid', 'ip_address': '10.0.0.5'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_fixed_ip_invalid_ip(self): with self.subnet() as subnet: # Allocate specific IP kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.55555'}]} net_id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_requested_ips_only(self): with self.subnet() as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual('10.0.0.2', ips[0]['ip_address']) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21', '10.0.0.3', '10.0.0.17', '10.0.0.19'] ports_to_delete = [] for i in ips_only: kwargs = {"fixed_ips": [{'ip_address': i}]} net_id = port['port']['network_id'] res = self._create_port(self.fmt, net_id=net_id, **kwargs) port = self.deserialize(self.fmt, res) ports_to_delete.append(port) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(i, ips[0]['ip_address']) self.assertEqual(subnet['subnet']['id'], ips[0]['subnet_id']) for p in ports_to_delete: self._delete('ports', p['port']['id']) def test_invalid_admin_state(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 7, 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_mac_address(self): with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'admin_state_up': 1, 'mac_address': 'mac', 'fixed_ips': []}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_ports_by_device_id(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p1,\ self.port(subnet=subnet, device_id='owner1') as p2,\ self.port(subnet=subnet, device_id='owner2') as p3: network_id = subnet['subnet']['network_id'] plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) self._show('ports', p1['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p2['port']['id'], expected_code=webob.exc.HTTPNotFound.code) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def _test_delete_ports_by_device_id_second_call_failure(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p1,\ self.port(subnet=subnet, device_id='owner1') as p2,\ self.port(subnet=subnet, device_id='owner2') as p3: orig = plugin.delete_port with mock.patch.object(plugin, 'delete_port') as del_port: def side_effect(*args, **kwargs): return self._fail_second_call(del_port, orig, *args, **kwargs) del_port.side_effect = side_effect network_id = subnet['subnet']['network_id'] self.assertRaises(lib_exc.NeutronException, plugin.delete_ports_by_device_id, ctx, 'owner1', network_id) statuses = { self._show_response('ports', p['port']['id']).status_int for p in [p1, p2]} expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code} self.assertEqual(expected, statuses) self._show('ports', p3['port']['id'], expected_code=webob.exc.HTTPOk.code) def test_delete_ports_by_device_id_second_call_failure(self): plugin = directory.get_plugin() self._test_delete_ports_by_device_id_second_call_failure(plugin) def _test_delete_ports_ignores_port_not_found(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: with self.port(subnet=subnet, device_id='owner1') as p,\ mock.patch.object(plugin, 'delete_port') as del_port: del_port.side_effect = lib_exc.PortNotFound( port_id=p['port']['id'] ) network_id = subnet['subnet']['network_id'] try: plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) except lib_exc.PortNotFound: self.fail("delete_ports_by_device_id unexpectedly raised " "a PortNotFound exception. It should ignore " "this exception because it is often called at " "the same time other concurrent operations are " "deleting some of the same ports.") def test_delete_ports_ignores_port_not_found(self): plugin = directory.get_plugin() self._test_delete_ports_ignores_port_not_found(plugin) class TestNetworksV2(NeutronDbPluginV2TestCase): # NOTE(cerberus): successful network update and delete are # effectively tested above def test_create_network(self): name = 'net1' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', False)] with self.network(name=name) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network(self): name = 'public_net' keys = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', self.net_create_status), ('shared', True)] with self.network(name=name, shared=True) as net: for k, v in keys: self.assertEqual(net['network'][k], v) def test_create_public_network_no_admin_tenant(self): name = 'public_net' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, shared=True, tenant_id="another_tenant", set_context=True): pass self.assertEqual(webob.exc.HTTPForbidden.code, ctx_manager.exception.code) def test_update_network(self): with self.network() as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['network']['name'], res['network']['name']) def test_update_shared_network_noadmin_returns_403(self): with self.network(shared=True) as network: data = {'network': {'name': 'a_brand_new_name'}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_network_set_shared(self): with self.network(shared=False) as network: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) def test_update_network_set_shared_owner_returns_403(self): with self.network(shared=False) as network: net_owner = network['network']['tenant_id'] data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('u', net_owner) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_update_network_with_subnet_set_shared(self): with self.network(shared=False) as network: with self.subnet(network=network) as subnet: data = {'network': {'shared': True}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['network']['shared']) # must query db to see whether subnet's shared attribute # has been updated or not ctx = context.Context('', '', is_admin=True) subnet_db = directory.get_plugin().get_subnet( ctx, subnet['subnet']['id']) self.assertTrue(subnet_db['shared']) def test_update_network_set_not_shared_single_tenant(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertFalse(res['network']['shared']) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_other_tenant_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_other_tenant_access_via_rbac(self): with self.network(shared=True) as network: ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): network_obj.NetworkRBAC( ctx, object_id=network['network']['id'], action='access_as_shared', project_id=network['network']['tenant_id'], target_tenant='somebody_else').create() network_obj.NetworkRBAC( ctx, object_id=network['network']['id'], action='access_as_shared', project_id=network['network']['tenant_id'], target_tenant='one_more_somebody_else').create() res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertFalse(res['network']['shared']) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_update_network_set_not_shared_multi_tenants_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) res2 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) port2 = self.deserialize(self.fmt, res2) self._delete('ports', port1['port']['id']) self._delete('ports', port2['port']['id']) def test_update_network_set_not_shared_multi_tenants2_returns_409(self): with self.network(shared=True) as network: res1 = self._create_port(self.fmt, network['network']['id'], webob.exc.HTTPCreated.code, tenant_id='somebody_else', set_context=True) self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPCreated.code, tenant_id=network['network']['tenant_id'], set_context=True) data = {'network': {'shared': False}} req = self.new_update_request('networks', data, network['network']['id']) self.assertEqual(webob.exc.HTTPConflict.code, req.get_response(self.api).status_int) port1 = self.deserialize(self.fmt, res1) self._delete('ports', port1['port']['id']) def test_create_networks_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_native_quotas(self): quota = 1 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_networks_bulk_native_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 4 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') res = self._create_network_bulk(self.fmt, quota + 1, 'test', True) self._validate_behavior_on_bulk_failure( res, 'networks', errcode=webob.exc.HTTPConflict.code) def test_create_networks_bulk_tenants_and_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_networks_bulk_tenants_and_quotas_fail(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") quota = 2 cfg.CONF.set_override('quota_network', quota, group='QUOTAS') networks = [{'network': {'name': 'n1', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n1', 'tenant_id': 't1'}}, {'network': {'name': 'n3', 'tenant_id': self._tenant_id}}, {'network': {'name': 'n2', 'tenant_id': 't1'}}] res = self._create_bulk_from_list(self.fmt, 'network', networks) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_networks_bulk_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): res = self._create_network_bulk(self.fmt, 2, 'test', True) self._validate_behavior_on_bulk_success(res, 'networks') def test_create_networks_bulk_wrong_input(self): res = self._create_network_bulk(self.fmt, 2, 'test', True, override={1: {'admin_state_up': 'doh'}}) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) req = self.new_list_request('networks') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) nets = self.deserialize(self.fmt, res) self.assertEqual(0, len(nets['networks'])) def test_create_networks_bulk_emulated_plugin_failure(self): real_has_attr = hasattr def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) orig = directory.get_plugin().create_network # ensures the API choose the emulation code path with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): method_to_patch = _get_create_db_method('network') with mock.patch.object(directory.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_create_networks_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") orig = directory.get_plugin().create_network method_to_patch = _get_create_db_method('network') with mock.patch.object(directory.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'networks', webob.exc.HTTPServerError.code ) def test_list_networks(self): with self.network() as v1, self.network() as v2, self.network() as v3: networks = (v1, v2, v3) self._test_list_resources('network', networks) def test_list_networks_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1') as net1,\ self.network(admin_state_up=False, name='net2') as net2,\ self.network(admin_state_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_sort_extended_attr_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1'),\ self.network(admin_state_up=False, name='net2'),\ self.network(admin_state_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=provider:segmentation_id&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_remote_key_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.network(admin_state_up=True, name='net1'),\ self.network(admin_state_up=False, name='net2'),\ self.network(admin_state_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=subnets&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_networks_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with self.network(admin_state_up=True, name='net1') as net1,\ self.network(admin_state_up=False, name='net2') as net2,\ self.network(admin_state_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) def test_list_networks_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_without_pk_in_fields_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1', shared=True) as net1,\ self.network(name='net2', shared=False) as net2,\ self.network(name='net3', shared=True) as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=name", verify_key='name') def test_list_networks_without_pk_in_fields_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, query_params="fields=shared", verify_key='shared') def test_list_networks_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.network(name='net1') as net1,\ self.network(name='net2') as net2,\ self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_parameters(self): with self.network(name='net1', admin_state_up=False) as net1,\ self.network(name='net2') as net2: query_params = 'admin_state_up=False' self._test_list_resources('network', [net1], query_params=query_params) query_params = 'admin_state_up=True' self._test_list_resources('network', [net2], query_params=query_params) def test_list_networks_with_fields(self): with self.network(name='net1'): req = self.new_list_request('networks', params='fields=name') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['networks'])) net = res['networks'][0] self.assertEqual('net1', net['name']) self.assertNotIn('id', net) self.assertNotIn('tenant_id', net) self.assertNotIn('project_id', net) def test_list_networks_with_parameters_invalid_values(self): with self.network(name='net1', admin_state_up=False),\ self.network(name='net2'): req = self.new_list_request('networks', params='admin_state_up=fake') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_shared_networks_with_non_admin_user(self): with self.network(shared=False, name='net1', tenant_id='tenant1') as net1,\ self.network(shared=True, name='net2', tenant_id='another_tenant') as net2,\ self.network(shared=False, name='net3', tenant_id='another_tenant'): ctx = context.Context(user_id='non_admin', tenant_id='tenant1', is_admin=False) self._test_list_resources('network', (net1, net2), ctx) def test_show_network(self): with self.network(name='net1') as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['name'], net['network']['name']) def test_show_network_with_subnet(self): with self.network(name='net1') as net: with self.subnet(net) as subnet: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['network']['subnets'][0], subnet['subnet']['id']) def test_invalid_admin_status(self): value = [[7, False, webob.exc.HTTPClientError.code], [True, True, webob.exc.HTTPCreated.code], ["True", True, webob.exc.HTTPCreated.code], ["true", True, webob.exc.HTTPCreated.code], [1, True, webob.exc.HTTPCreated.code], ["False", False, webob.exc.HTTPCreated.code], [False, False, webob.exc.HTTPCreated.code], ["false", False, webob.exc.HTTPCreated.code], ["7", False, webob.exc.HTTPClientError.code]] for v in value: data = {'network': {'name': 'net', 'admin_state_up': v[0], 'tenant_id': self._tenant_id}} network_req = self.new_create_request('networks', data) req = network_req.get_response(self.api) self.assertEqual(req.status_int, v[2]) if v[2] == webob.exc.HTTPCreated.code: res = self.deserialize(self.fmt, req) self.assertEqual(res['network']['admin_state_up'], v[1]) class TestSubnetsV2(NeutronDbPluginV2TestCase): def _test_create_subnet(self, network=None, expected=None, **kwargs): keys = kwargs.copy() keys.setdefault('cidr', '10.0.0.0/24') keys.setdefault('ip_version', constants.IP_VERSION_4) keys.setdefault('enable_dhcp', True) with self.subnet(network=network, **keys) as subnet: # verify the response has each key with the correct value self._validate_resource(subnet, keys, 'subnet') # verify the configured validations are correct if expected: self._compare_resource(subnet, expected, 'subnet') self._delete('subnets', subnet['subnet']['id']) return subnet def test_create_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' subnet = self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr) self.assertEqual(constants.IP_VERSION_4, subnet['subnet']['ip_version']) self.assertIn('name', subnet['subnet']) def test_create_subnet_with_network_different_tenant(self): with self.network(shared=False, tenant_id='tenant1') as network: ctx = context.Context(user_id='non_admin', tenant_id='tenant2', is_admin=False) data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'gateway_ip': '10.0.2.1'}} req = self.new_create_request('subnets', data, self.fmt, context=ctx) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_two_subnets(self): gateway_ips = ['10.0.0.1', '10.0.1.1'] cidrs = ['10.0.0.0/24', '10.0.1.0/24'] with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ips[0], cidr=cidrs[0]): with self.subnet(network=network, gateway_ip=gateway_ips[1], cidr=cidrs[1]): net_req = self.new_show_request('networks', network['network']['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertIn(sub_res['subnet']['cidr'], cidrs) self.assertIn(sub_res['subnet']['gateway_ip'], gateway_ips) def test_create_two_subnets_same_cidr_returns_400(self): gateway_ip_1 = '10.0.0.1' cidr_1 = '10.0.0.0/24' gateway_ip_2 = '10.0.0.10' cidr_2 = '10.0.0.0/24' with self.network() as network: with self.subnet(network=network, gateway_ip=gateway_ip_1, cidr=cidr_1): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(network=network, gateway_ip=gateway_ip_2, cidr=cidr_2): pass self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_bad_V4_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_invalid_gw_V4_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/4', 'ip_version': '4', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_invalid_gw_32_V4_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/4', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1/32'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_cidr_and_default_subnetpool(self): """Expect subnet-create to keep semantic with default pools.""" with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True): data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) subnet = self.deserialize(self.fmt, res)['subnet'] self.assertIsNone(subnet['subnetpool_id']) def test_create_subnet_no_cidr_and_default_subnetpool(self): """Expect subnet-create to keep semantic with default pools.""" with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True): data = {'subnet': {'network_id': network['network']['id'], 'ip_version': constants.IP_VERSION_4, 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual( webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_no_ip_version(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_only_ip_version_v6_no_pool(self): with self.network() as network: tenant_id = network['network']['tenant_id'] cfg.CONF.set_override('ipv6_pd_enabled', False) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': constants.IP_VERSION_6, 'tenant_id': tenant_id}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_V4_cidr_prefix_len(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': constants.IPv4_ANY, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '0.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_V6_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::', 'ip_version': constants.IP_VERSION_6, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_invalid_gw_V6_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '2001:db8:0:1::/64', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '2001:db8::1/64'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_invalid_gw_128_V6_cidr(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '2001:db8:0:1::/64', 'ip_version': '6', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '2001:db8:0:1:1/128'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_V6_slaac_big_prefix(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '2014::/65', 'ip_version': constants.IP_VERSION_6, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'fe80::1', 'ipv6_address_mode': 'slaac'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', True) with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): cidr_1 = '10.0.0.0/23' cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', False) with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnets_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_native_ipv6(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test', ip_version=constants.IP_VERSION_6, ipv6_mode=constants.IPV6_SLAAC) self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._validate_behavior_on_bulk_success(res, 'subnets') def test_create_subnets_bulk_emulated_plugin_failure(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): orig = directory.get_plugin().create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(directory.get_plugin(), method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') self._delete('networks', net['network']['id']) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_create_subnets_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") plugin = directory.get_plugin() orig = plugin.create_subnet method_to_patch = _get_create_db_method('subnet') with mock.patch.object(plugin, method_to_patch) as patched_plugin: def side_effect(*args, **kwargs): return self._fail_second_call(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'subnets', webob.exc.HTTPServerError.code ) def test_delete_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_port_exists_owned_by_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4) self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_dhcp_port_associated_with_other_subnets(self): res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1', '10.0.0.0/24', ip_version=constants.IP_VERSION_4) subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1', '10.0.1.0/24', ip_version=constants.IP_VERSION_4) res = self._create_port(self.fmt, network['network']['id'], device_owner=constants.DEVICE_OWNER_DHCP, fixed_ips=[ {'subnet_id': subnet1['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']} ]) port = self.deserialize(self.fmt, res) expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) port = self._show('ports', port['port']['id']) expected_subnets = [subnet2['subnet']['id']] self.assertEqual(expected_subnets, [s['subnet_id'] for s in port['port']['fixed_ips']]) req = self.new_delete_request('subnets', subnet2['subnet']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) port = self._show('ports', port['port']['id']) self.assertFalse(port['port']['fixed_ips']) def test_delete_subnet_port_exists_owned_by_other(self): with self.subnet() as subnet: with self.port(subnet=subnet): id = subnet['subnet']['id'] req = self.new_delete_request('subnets', id) res = req.get_response(self.api) data = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) msg = str(lib_exc.SubnetInUse(subnet_id=id)) self.assertEqual(data['NeutronError']['message'], msg) def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): with self.network() as network: with self.subnet(network=network) as subnet1,\ self.subnet(network=network, cidr='10.0.1.0/24') as subnet2: subnet1_id = subnet1['subnet']['id'] subnet2_id = subnet2['subnet']['id'] with self.port(subnet=subnet1, fixed_ips=[{'subnet_id': subnet1_id}]): req = self.new_delete_request('subnets', subnet2_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def _create_slaac_subnet_and_port(self, port_owner=None): # Create an IPv6 SLAAC subnet and a port using that subnet res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway='fe80::1', cidr='fe80::/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) kwargs = {} if port_owner: kwargs['device_owner'] = port_owner if port_owner in constants.ROUTER_INTERFACE_OWNERS: kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}] res = self._create_port(self.fmt, net_id=network['network']['id'], **kwargs) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) # The port should have an address from the subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) return subnet, port def test_delete_subnet_ipv6_slaac_port_exists(self): """Test IPv6 SLAAC subnet delete when a port is still using subnet.""" subnet, port = self._create_slaac_subnet_and_port() # Delete the subnet req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) # The port should no longer have an address from the deleted subnet req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(0, len(sport['port']['fixed_ips'])) def test_delete_subnet_ipv6_slaac_router_port_exists(self): """Test IPv6 SLAAC subnet delete with a router port using the subnet""" subnet, port = self._create_slaac_subnet_and_port( constants.DEVICE_OWNER_ROUTER_INTF) # Delete the subnet and assert that we get a HTTP 409 error req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) # The subnet should still exist and the port should still have an # address from the subnet req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = req.get_response(self.api) ssubnet = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNotNone(ssubnet) req = self.new_show_request('ports', port['port']['id'], self.fmt) res = req.get_response(self.api) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(sport['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in sport['port']['fixed_ips']] self.assertIn(subnet['subnet']['id'], port_subnet_ids) def test_delete_network(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4) req = self.new_delete_request('networks', network['network']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req = self.new_show_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_subnet_bad_tenant(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPNotFound.code, ip_version=constants.IP_VERSION_4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=True) def test_create_subnet_as_admin(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.2.0/24', webob.exc.HTTPCreated.code, ip_version=constants.IP_VERSION_4, tenant_id='bad_tenant_id', gateway_ip='10.0.2.1', device_owner='fake_owner', set_context=False) def test_create_subnet_nonzero_cidr(self): # Pass None as gateway_ip to prevent ip auto allocation for gw # Previously gateway ip was allocated after validations, # so no errors were raised if gw ip was out of range. with self.subnet(cidr='10.129.122.5/8') as v1,\ self.subnet(cidr='11.129.122.5/15') as v2,\ self.subnet(cidr='12.129.122.5/16') as v3,\ self.subnet(cidr='13.129.122.5/18') as v4,\ self.subnet(cidr='14.129.122.5/22') as v5,\ self.subnet(cidr='15.129.122.5/24') as v6,\ self.subnet(cidr='16.129.122.5/28') as v7,\ self.subnet(cidr='17.129.122.5/32', gateway_ip=None, enable_dhcp=False) as v8: subs = (v1, v2, v3, v4, v5, v6, v7, v8) # the API should accept and correct these for users self.assertEqual('10.0.0.0/8', subs[0]['subnet']['cidr']) self.assertEqual('11.128.0.0/15', subs[1]['subnet']['cidr']) self.assertEqual('12.129.0.0/16', subs[2]['subnet']['cidr']) self.assertEqual('13.129.64.0/18', subs[3]['subnet']['cidr']) self.assertEqual('14.129.120.0/22', subs[4]['subnet']['cidr']) self.assertEqual('15.129.122.0/24', subs[5]['subnet']['cidr']) self.assertEqual('16.129.122.0/28', subs[6]['subnet']['cidr']) self.assertEqual('17.129.122.5/32', subs[7]['subnet']['cidr']) def _test_create_subnet_with_invalid_netmask_returns_400(self, *args): with self.network() as network: for cidr in args: ip_version = netaddr.IPNetwork(cidr).version self._create_subnet(self.fmt, network['network']['id'], cidr, webob.exc.HTTPClientError.code, ip_version=ip_version) def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self): self._test_create_subnet_with_invalid_netmask_returns_400( '10.0.0.0/31', '10.0.0.0/32') def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self): self._test_create_subnet_with_invalid_netmask_returns_400( 'cafe:cafe::/127', 'cafe:cafe::/128') def test_create_subnet_bad_ip_version(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 'abc', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_ip_version_null(self): with self.network() as network: # Check bad IP version data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_uuid(self): with self.network() as network: # Check invalid UUID data = {'subnet': {'network_id': None, 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_boolean(self): with self.network() as network: # Check invalid boolean data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'enable_dhcp': None, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_pools(self): with self.network() as network: # Check allocation pools allocation_pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], None, [{'start': '10.0.0.200', 'end': '10.0.3.20'}], [{'start': '10.0.2.250', 'end': '10.0.3.5'}], [{'start': '10.0.2.10', 'end': '10.0.2.5'}], [{'start': '10.0.0.2', 'end': '10.0.0.3'}, {'start': '10.0.0.2', 'end': '10.0.0.3'}]] tenant_id = network['network']['tenant_id'] for pool in allocation_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'allocation_pools': pool}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_nameserver(self): with self.network() as network: # Check nameservers nameserver_pools = [['1100.0.0.2'], ['1.1.1.2', '1.1000.1.3'], ['1.1.1.2', '1.1.1.2']] tenant_id = network['network']['tenant_id'] for nameservers in nameserver_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'dns_nameservers': nameservers}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_bad_hostroutes(self): with self.network() as network: # Check hostroutes hostroute_pools = [[{'destination': '100.0.0.0/24'}], [{'nexthop': '10.0.2.20'}], [{'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}, {'nexthop': '10.0.2.20', 'destination': '100.0.0.0/8'}], [{'destination': '100.1.1.1/8', 'nexthop': '10.0.2.20'}]] tenant_id = network['network']['tenant_id'] for hostroutes in hostroute_pools: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': tenant_id, 'gateway_ip': '10.0.2.1', 'host_routes': hostroutes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_defaults(self): gateway = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] enable_dhcp = True subnet = self._test_create_subnet() # verify cidr & gw have been correctly generated self.assertEqual(cidr, subnet['subnet']['cidr']) self.assertEqual(gateway, subnet['subnet']['gateway_ip']) self.assertEqual(enable_dhcp, subnet['subnet']['enable_dhcp']) self.assertEqual(allocation_pools, subnet['subnet']['allocation_pools']) def test_create_subnet_gw_values(self): cidr = '10.0.0.0/24' # Gateway is last IP in range gateway = '10.0.0.254' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.253'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) # Gateway is first in subnet gateway = '10.0.0.1' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway) def test_create_subnet_ipv6_gw_values(self): cidr = '2001::/64' # Gateway is last IP in IPv6 DHCPv6 stateful subnet gateway = '2001::ffff:ffff:ffff:ffff' allocation_pools = [{'start': '2001::1', 'end': '2001::ffff:ffff:ffff:fffe'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # Gateway is first IP in IPv6 DHCPv6 stateful subnet gateway = '2001::1' allocation_pools = [{'start': '2001::2', 'end': '2001::ffff:ffff:ffff:ffff'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': str(netaddr.IPNetwork(cidr).network), 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) @testtools.skipIf(tools.is_bsd(), 'bug/1484837') def test_create_subnet_ipv6_pd_gw_values(self): cidr = constants.PROVISIONAL_IPV6_PD_PREFIX # Gateway is last IP in IPv6 DHCPv6 Stateless subnet gateway = '::ffff:ffff:ffff:ffff' allocation_pools = [{'start': '::1', 'end': '::ffff:ffff:ffff:fffe'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATELESS, ipv6_address_mode=constants.DHCPV6_STATELESS) # Gateway is first IP in IPv6 DHCPv6 Stateless subnet gateway = '::1' allocation_pools = [{'start': '::2', 'end': '::ffff:ffff:ffff:ffff'}] expected = {'gateway_ip': gateway, 'cidr': cidr, 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATELESS, ipv6_address_mode=constants.DHCPV6_STATELESS) # If gateway_ip is not specified and the subnet is using prefix # delegation, until the CIDR is assigned, this value should be "None" expected = {'gateway_ip': None, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_gw_outside_cidr_returns_201(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPCreated.code, gateway_ip='100.0.0.1') def test_create_subnet_gw_is_nw_addr_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.0') def test_create_subnet_gw_is_broadcast_addr_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.255') def test_create_subnet_gw_of_network_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.0') def test_create_subnet_gw_bcast_returns_400(self): with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], '10.0.0.0/24', webob.exc.HTTPClientError.code, gateway_ip='10.0.0.255') def test_create_subnet_with_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_none_gateway(self): cidr = '10.0.0.0/24' self._test_create_subnet(gateway_ip=None, cidr=cidr) def test_create_subnet_with_none_gateway_fully_allocated(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.254'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_subnet_with_allocation_range(self): with self.network() as network: net_id = network['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'gateway_ip': '10.0.0.1', 'tenant_id': network['network']['tenant_id'], 'allocation_pools': [{'start': '10.0.0.100', 'end': '10.0.0.120'}]}} subnet_req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, subnet_req.get_response(self.api)) # Check fixed IP not in allocation range kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.10'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) # Check when fixed IP is gateway kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.1'}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) port = self.deserialize(self.fmt, res) # delete the port self._delete('ports', port['port']['id']) def test_create_subnet_with_none_gateway_allocation_pool(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] self._test_create_subnet(gateway_ip=None, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_v6_allocation_pool(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' allocation_pools = [{'start': 'fe80::2', 'end': 'fe80::ffff:fffa:ffff'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, allocation_pools=allocation_pools) @testtools.skipIf(tools.is_bsd(), 'bug/1484837') def test_create_subnet_with_v6_pd_allocation_pool(self): gateway_ip = '::1' cidr = constants.PROVISIONAL_IPV6_PD_PREFIX allocation_pools = [{'start': '::2', 'end': '::ffff:ffff:ffff:fffe'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, allocation_pools=allocation_pools) def test_create_subnet_with_large_allocation_pool(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/8' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.1.0.0', 'end': '10.200.0.100'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_multiple_allocation_pools(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.0.0.110', 'end': '10.0.0.150'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) def test_create_subnet_with_dhcp_disabled(self): enable_dhcp = False self._test_create_subnet(enable_dhcp=enable_dhcp) def test_create_subnet_default_gw_conflict_allocation_pool_returns_409( self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.5'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_gateway_in_allocation_pool_returns_409(self): gateway_ip = '10.0.0.50' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.1', 'end': '10.0.0.100'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_overlapping_allocation_pools_returns_409(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.150'}, {'start': '10.0.0.140', 'end': '10.0.0.180'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPConflict.code, ctx_manager.exception.code) def test_create_subnet_invalid_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.256'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_out_of_range_allocation_pool_returns_400(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.1.6'}] with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_shared_returns_400(self): cidr = '10.0.0.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, shared=True) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_inconsistent_ipv6_cidrv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_6, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_cidrv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'gateway_ip': 'fe80::1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': constants.IP_VERSION_6, 'gateway_ip': '192.168.0.1', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv6_dns_v4(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'fe80::0/80', 'ip_version': constants.IP_VERSION_6, 'dns_nameservers': ['192.168.0.1'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _test_validate_subnet_ipv6_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = directory.get_plugin() ctx = context.get_admin_context() new_subnet = {'ip_version': constants.IP_VERSION_6, 'cidr': 'fe80::/64', 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None} for mode, value in modes.items(): new_subnet[mode] = value if expect_success: plugin._validate_subnet(ctx, new_subnet, cur_subnet) else: self.assertRaises(lib_exc.InvalidInput, plugin._validate_subnet, ctx, new_subnet, cur_subnet) def _test_validate_subnet_ipv6_pd_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = directory.get_plugin() ctx = context.get_admin_context() new_subnet = {'ip_version': constants.IP_VERSION_6, 'cidr': constants.PROVISIONAL_IPV6_PD_PREFIX, 'enable_dhcp': True, 'ipv6_address_mode': None, 'ipv6_ra_mode': None} for mode, value in modes.items(): new_subnet[mode] = value if expect_success: plugin._validate_subnet(ctx, new_subnet, cur_subnet) else: self.assertRaises(lib_exc.InvalidInput, plugin._validate_subnet, ctx, new_subnet, cur_subnet) def test_create_subnet_ipv6_ra_modes(self): # Test all RA modes with no address mode specified for ra_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ra_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_ra_mode=ra_mode) def test_create_subnet_ipv6_addr_modes(self): # Test all address modes with no RA mode specified for addr_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_address_mode=addr_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_same_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode==addr_mode for ipv6_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) self._test_validate_subnet_ipv6_pd_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) def test_create_subnet_ipv6_different_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode!=addr_mode for ra_mode, addr_mode in itertools.permutations( constants.IPV6_MODES, 2): self._test_validate_subnet_ipv6_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, ipv6_address_mode=addr_mode) self._test_validate_subnet_ipv6_pd_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, ipv6_address_mode=addr_mode) def test_create_subnet_ipv6_out_of_cidr_global(self): gateway_ip = '2000::1' cidr = '2001::/64' subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) self.assertEqual(gateway_ip, subnet['subnet']['gateway_ip']) self.assertEqual(cidr, subnet['subnet']['cidr']) def _create_subnet_ipv6_gw(self, gateway_ip, cidr): subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.DHCPV6_STATEFUL, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) if gateway_ip and gateway_ip[-3:] == '::0': self.assertEqual(gateway_ip[:-1], subnet['subnet']['gateway_ip']) else: self.assertEqual(gateway_ip, subnet['subnet']['gateway_ip']) self.assertEqual(cidr, subnet['subnet']['cidr']) def test_create_subnet_ipv6_gw_is_nw_start_addr(self): gateway_ip = '2001::0' cidr = '2001::/64' self._create_subnet_ipv6_gw(gateway_ip, cidr) def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): gateway_ip = '2001::' cidr = '2001::/64' self._create_subnet_ipv6_gw(gateway_ip, cidr) def test_create_subnet_ipv6_gw_is_nw_end_addr(self): gateway_ip = '2001::ffff' cidr = '2001::/112' self._create_subnet_ipv6_gw(gateway_ip, cidr) def test_create_subnet_ipv6_out_of_cidr_lla(self): gateway_ip = 'fe80::1' cidr = '2001::/64' self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_ipv6_first_ip_owned_by_router(self): cidr = '2001::/64' with self.network() as network: net_id = network['network']['id'] with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr=cidr) as subnet: fixed_ip = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2001::'}] kwargs = {'fixed_ips': fixed_ip, 'tenant_id': 'tenant_id', 'device_id': 'fake_device', 'device_owner': constants.DEVICE_OWNER_ROUTER_GW} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): cidr = '2001::/64' with self.network() as network: net_id = network['network']['id'] with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr=cidr) as subnet: fixed_ip = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2001::'}] kwargs = {'fixed_ips': fixed_ip, 'tenant_id': 'tenant_id', 'device_id': 'fake_device', 'device_owner': 'fake_owner'} res = self._create_port(self.fmt, net_id=net_id, **kwargs) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self): gateway_ip = 'fe80::1' cidr = 'fe80::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: for mode in constants.IPV6_MODES: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, enable_dhcp=False, ipv6_ra_mode=mode, ipv6_address_mode=mode) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_invalid_ipv6_ra_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode='foo', ipv6_address_mode='slaac') self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_invalid_ipv6_address_mode(self): gateway_ip = 'fe80::1' cidr = 'fe80::/80' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, ipv6_ra_mode='slaac', ipv6_address_mode='baz') self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_ra_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, ip_version=constants.IP_VERSION_4, ipv6_ra_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def test_create_subnet_ipv6_address_mode_ip_version_4(self): cidr = '10.0.2.0/24' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( cidr=cidr, ip_version=constants.IP_VERSION_4, ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) def _test_create_subnet_ipv6_auto_addr_with_port_on_network( self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE, insert_db_reference_error=False, insert_port_not_found=False, insert_address_allocated=False): # Create a network with one IPv4 subnet and one port with self.network() as network,\ self.subnet(network=network) as v4_subnet,\ self.port(subnet=v4_subnet, device_owner=device_owner) as port: if insert_db_reference_error: orig_fn = orm.Session.add def db_ref_err_for_ipalloc(s, instance): if instance.__class__.__name__ == 'IPAllocation': # tweak port_id to cause a FK violation, # thus DBReferenceError instance.port_id = 'nonexistent' return orig_fn(s, instance) mock.patch.object(orm.Session, 'add', side_effect=db_ref_err_for_ipalloc, autospec=True).start() v6_subnet = {'ip_version': constants.IP_VERSION_6, 'cidr': 'fe80::/64', 'gateway_ip': 'fe80::1', 'tenant_id': v4_subnet['subnet']['tenant_id']} mock.patch.object(db_base_plugin_common.DbBasePluginCommon, '_get_subnet', return_value=v6_subnet).start() # Add an IPv6 auto-address subnet to the network with mock.patch.object(directory.get_plugin(), 'update_port') as mock_updated_port: if insert_port_not_found: mock_updated_port.side_effect = lib_exc.PortNotFound( port_id=port['port']['id']) if insert_address_allocated: mock.patch.object( ipam_driver.NeutronDbSubnet, '_verify_ip', side_effect=ipam_exc.IpAddressAlreadyAllocated( subnet_id=mock.ANY, ip=mock.ANY)).start() v6_subnet = self._make_subnet( self.fmt, network, 'fe80::1', 'fe80::/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode=addr_mode, ipv6_address_mode=addr_mode) if (insert_db_reference_error or insert_address_allocated or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or device_owner in constants.ROUTER_INTERFACE_OWNERS): # DVR SNAT, router interfaces and DHCP ports should not have # been updated with addresses from the new auto-address subnet self.assertEqual(1, len(port['port']['fixed_ips'])) else: # Confirm that the port has been updated with an address # from the new auto-address subnet mock_updated_port.assert_called_with(mock.ANY, port['port']['id'], mock.ANY) req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) fixed_ips = sport['port']['fixed_ips'] self.assertEqual(2, len(fixed_ips)) self.assertIn(v6_subnet['subnet']['id'], [fixed_ip['subnet_id'] for fixed_ip in fixed_ips]) def test_create_subnet_ipv6_slaac_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC) def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.DHCPV6_STATELESS) def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_DHCP) def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.DHCPV6_STATELESS, insert_address_allocated=True) def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, insert_address_allocated=True) def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_SNAT) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, insert_db_reference_error=True) def test_create_subnet_ipv6_slaac_with_port_not_found(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( constants.IPV6_SLAAC, insert_port_not_found=True) def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): # Create a network with one IPv4 subnet and one port with self.network() as network,\ self.subnet(network=network) as v4_subnet,\ self.port(subnet=v4_subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: # Add 2 IPv6 auto-address subnets in a bulk request self._create_subnet_bulk( self.fmt, 2, network['network']['id'], 'test', ip_version=constants.IP_VERSION_6, ipv6_mode=constants.IPV6_SLAAC) # Confirm that the port has been updated with addresses # from the new auto-address subnets req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) fixed_ips = sport['port']['fixed_ips'] self.assertEqual(3, len(fixed_ips)) def test_update_subnet_no_gateway(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) data = {'subnet': {'gateway_ip': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNone(data['subnet']['gateway_ip']) def test_subnet_usable_after_update(self): with self.subnet() as subnet: data = {'subnet': {'name': 'newname'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['name'], res['subnet']['name']) with self.port(subnet=subnet): pass def test_update_subnet(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) def test_update_subnet_adding_additional_host_routes_and_dns(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}] with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'dns_nameservers': ['192.168.0.1'], 'host_routes': host_routes, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, subnet_req.get_response(self.api)) host_routes = [{'destination': '172.16.0.0/24', 'nexthop': '10.0.2.2'}, {'destination': '192.168.0.0/24', 'nexthop': '10.0.2.3'}] dns_nameservers = ['192.168.0.1', '192.168.0.2'] data = {'subnet': {'host_routes': host_routes, 'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, res['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual( sorted(res['subnet']['host_routes'], key=helpers.safe_sort_key), sorted(host_routes, key=helpers.safe_sort_key)) self.assertEqual(dns_nameservers, res['subnet']['dns_nameservers']) def _test_subnet_update_ipv4_and_ipv6_pd_subnets(self, ra_addr_mode): # Test prefix update for IPv6 PD subnet # when the network has both IPv4 and IPv6 PD subnets. # Created two networks. First network has IPv4 and IPv6 PD subnets. # Second network has IPv4 subnet. A port is created on each network. # When update_subnet called for PD subnet with new prefix, port on # network with PD subnet should get new IPV6 address, but IPv4 # addresses should remain same. # And update_port should be called only for this port and with # v4 subnet along with v4 address and v6 pd subnet as fixed_ips. orig_update_port = self.plugin.update_port with self.network() as network: with self.subnet(network=network), ( mock.patch.object(self.plugin, 'update_port')) as update_port: # Create port on second network network2 = self._make_network(self.fmt, 'net2', True) self._make_subnet(self.fmt, network2, "1.1.1.1", "1.1.1.0/24", ip_version=constants.IP_VERSION_4) self._make_port(self.fmt, net_id=network2['network']['id']) subnet = self._make_v6_subnet( network, ra_addr_mode, ipv6_pd=True) port = self._make_port(self.fmt, subnet['subnet']['network_id']) port_dict = port['port'] # When update_port called, port should have below fips fips = port_dict['fixed_ips'] for fip in fips: if fip['subnet_id'] == subnet['subnet']['id']: fip.pop('ip_address') def mock_update_port(context, id, port): self.assertEqual(port_dict['id'], id) self.assertEqual(fips, port['port']['fixed_ips']) orig_update_port(context, id, port) update_port.side_effect = mock_update_port # update subnet with new prefix prefix = '2001::/64' data = {'subnet': {'cidr': prefix}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) # create expected fixed_ips port_mac = port_dict['mac_address'] eui_addr = str(netutils.get_ipv6_addr_by_EUI64(prefix, port_mac)) ips = port_dict['fixed_ips'] for fip in ips: if fip['subnet_id'] == subnet['subnet']['id']: fip['ip_address'] = eui_addr # check if port got IPv6 address with new prefix req = self.new_show_request('ports', port['port']['id'], self.fmt) updated_port = self.deserialize(self.fmt, req.get_response(self.api)) new_ips = updated_port['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertEqual(ips, new_ips) def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self._test_subnet_update_ipv4_and_ipv6_pd_subnets( ra_addr_mode=constants.IPV6_SLAAC) def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self._test_subnet_update_ipv4_and_ipv6_pd_subnets( ra_addr_mode=constants.DHCPV6_STATELESS) def test_update_subnet_shared_returns_400(self): with self.network(shared=True) as network: with self.subnet(network=network) as subnet: data = {'subnet': {'shared': True}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_gw_outside_cidr_returns_200(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': '100.0.0.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) def test_update_subnet_gw_ip_in_use_by_router_returns_409(self): with self.network() as network: with self.subnet(network=network, allocation_pools=[{'start': '10.0.0.2', 'end': '10.0.0.8'}]) as subnet: s = subnet['subnet'] with self.port( subnet=subnet, fixed_ips=[{'subnet_id': s['id'], 'ip_address': s['gateway_ip']}] ) as port: # this protection only applies to router ports so we need # to make this port belong to a router ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): router = l3_models.Router() ctx.session.add(router) rp = l3_obj.RouterPort(ctx, router_id=router.id, port_id=port['port']['id']) rp.create() data = {'subnet': {'gateway_ip': '10.0.0.99'}} req = self.new_update_request('subnets', data, s['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) # should work fine if it's not a router port rp.delete() with db_api.CONTEXT_WRITER.using(ctx): ctx.session.delete(router) res = req.get_response(self.api) self.assertEqual(res.status_int, 200) def test_update_subnet_invalid_gw_V4_cidr(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'cidr': '10.0.0.0/4'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv4_gatewayv6(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'gateway_ip': 'fe80::1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_gatewayv4(self): with self.network() as network: with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr='fe80::/48') as subnet: data = {'subnet': {'gateway_ip': '10.1.1.1'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv4_dns_v6(self): dns_nameservers = ['fe80::1'] with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'dns_nameservers': dns_nameservers}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): host_routes = [{'destination': 'fe80::0/48', 'nexthop': '10.0.2.20'}] with self.network() as network: with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): host_routes = [{'destination': '172.16.0.0/24', 'nexthop': 'fe80::1'}] with self.network() as network: with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr='fe80::/48') as subnet: data = {'subnet': {'host_routes': host_routes}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_gateway_in_allocation_pool_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'gateway_ip': '10.0.0.50'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_subnet_ipv6_attributes_fails(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL, 'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_ra_mode_fails(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_address_mode_fails(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_cannot_disable_dhcp(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_ra_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_ipv6_address_mode_ip_version_4(self): with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _verify_updated_subnet_allocation_pools(self, res, with_gateway_ip): res = self.deserialize(self.fmt, res) self.assertEqual(2, len(res['subnet']['allocation_pools'])) res_vals = ( list(res['subnet']['allocation_pools'][0].values()) + list(res['subnet']['allocation_pools'][1].values()) ) for pool_val in ['10', '20', '30', '40']: self.assertIn('192.168.0.%s' % (pool_val), res_vals) if with_gateway_ip: self.assertEqual('192.168.0.9', (res['subnet']['gateway_ip'])) def _test_update_subnet_allocation_pools(self, with_gateway_ip=False): """Test that we can successfully update with sane params. This will create a subnet with specified allocation_pools Then issue an update (PUT) to update these using correct (i.e. non erroneous) params. Finally retrieve the updated subnet and verify. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}]}} if with_gateway_ip: data['subnet']['gateway_ip'] = '192.168.0.9' req = self.new_update_request('subnets', data, subnet['subnet']['id']) # check res code and contents res = req.get_response(self.api) self.assertEqual(200, res.status_code) self._verify_updated_subnet_allocation_pools(res, with_gateway_ip) # GET subnet to verify DB updated correctly req = self.new_show_request('subnets', subnet['subnet']['id'], self.fmt) res = req.get_response(self.api) self._verify_updated_subnet_allocation_pools(res, with_gateway_ip) def test_update_subnet_allocation_pools(self): self._test_update_subnet_allocation_pools() def test_update_subnet_allocation_pools_and_gateway_ip(self): self._test_update_subnet_allocation_pools(with_gateway_ip=True) # updating alloc pool to something outside subnet.cidr def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self): """Test update alloc pool to something outside subnet.cidr. This makes sure that an erroneous allocation_pool specified in a subnet update (outside subnet cidr) will result in an error. """ allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.10', 'end': '10.0.0.20'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) # updating alloc pool on top of existing subnet.gateway_ip def test_update_subnet_allocation_pools_over_gateway_ip_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.1', 'end': '10.0.0.254'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_subnet_allocation_pools_invalid_returns_400(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: # Check allocation pools invalid_pools = [[{'end': '10.0.0.254'}], [{'start': '10.0.0.254'}], [{'start': '1000.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}, {'end': '10.0.0.254'}], None, [{'start': '10.0.0.200', 'end': '10.0.3.20'}], [{'start': '10.0.2.250', 'end': '10.0.3.5'}], [{'start': '10.0.0.0', 'end': '10.0.0.50'}], [{'start': '10.0.2.10', 'end': '10.0.2.5'}], [{'start': 'fe80::2', 'end': 'fe80::ffff'}]] for pool in invalid_pools: data = {'subnet': {'allocation_pools': pool}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_allocation_pools_overlapping_returns_409(self): allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.network() as network: with self.subnet(network=network, allocation_pools=allocation_pools, cidr='10.0.0.0/24') as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.20', 'end': '10.0.0.40'}, {'start': '10.0.0.30', 'end': '10.0.0.50'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_subnets_native_quotas(self): quota = 1 cfg.CONF.set_override('quota_subnet', quota, group='QUOTAS') with self.network() as network: res = self._create_subnet( self.fmt, network['network']['id'], '10.0.0.0/24', tenant_id=network['network']['tenant_id']) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res = self._create_subnet( self.fmt, network['network']['id'], '10.1.0.0/24', tenant_id=network['network']['tenant_id']) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_subnets_bulk_native_quotas(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") quota = 4 cfg.CONF.set_override('quota_subnet', quota, group='QUOTAS') with self.network() as network: res = self._create_subnet_bulk(self.fmt, quota + 1, network['network']['id'], 'test') self._validate_behavior_on_bulk_failure( res, 'subnets', errcode=webob.exc.HTTPConflict.code) def test_show_subnet(self): with self.network() as network: with self.subnet(network=network) as subnet: req = self.new_show_request('subnets', subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['subnet']['id'], subnet['subnet']['id']) self.assertEqual(res['subnet']['network_id'], network['network']['id']) def test_get_subnets_count(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24'),\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24'),\ self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24'): project_id = network['network']['project_id'] ctx = context.Context(user_id=None, tenant_id=project_id, is_admin=False) pl = directory.get_plugin() count = pl.get_subnets_count( ctx, filters={'project_id': [project_id]}) self.assertEqual(3, count) def test_get_subnets_count_filter_by_project_id(self): project_id = uuidutils.generate_uuid() with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24', tenant_id=project_id),\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24'),\ self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24'): ctx = context.Context(user_id=None, tenant_id=project_id, is_admin=True) pl = directory.get_plugin() count = pl.get_subnets_count(ctx, filters={'project_id': [project_id]}) self.assertEqual(1, count) net_project_id = network['network']['project_id'] count = pl.get_subnets_count( ctx, filters={'project_id': [net_project_id]}) self.assertEqual(2, count) def test_get_subnets_count_filter_by_unknown_filter(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24'),\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24'),\ self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24'): project_id = network['network']['project_id'] ctx = context.Context(user_id=None, tenant_id=project_id, is_admin=False) pl = directory.get_plugin() count = pl.get_subnets_count(ctx, filters={'fake_filter': [True]}) self.assertEqual(3, count) # change the filter value and get the same result count = pl.get_subnets_count(ctx, filters={'fake_filter': [False]}) self.assertEqual(3, count) def test_list_subnets(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2,\ self.subnet(network=network, gateway_ip='10.0.2.1', cidr='10.0.2.0/24') as v3: subnets = (v1, v2, v3) self._test_list_resources('subnet', subnets) def test_list_subnets_shared(self): with self.network(shared=True) as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as priv_subnet: # normal user should see only 1 subnet req = self.new_list_request('subnets') req.environ['neutron.context'] = context.Context( '', 'some_tenant') res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, len(res['subnets'])) self.assertEqual(res['subnets'][0]['cidr'], subnet['subnet']['cidr']) # admin will see both subnets admin_req = self.new_list_request('subnets') admin_res = self.deserialize( self.fmt, admin_req.get_response(self.api)) self.assertEqual(2, len(admin_res['subnets'])) cidrs = [sub['cidr'] for sub in admin_res['subnets']] self.assertIn(subnet['subnet']['cidr'], cidrs) self.assertIn(priv_subnet['subnet']['cidr'], cidrs) def test_list_subnets_filtering_by_project_id(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2: subnets = (v1, v2) query_params = ('project_id={0}'. format(network['network']['project_id'])) self._test_list_resources('subnet', subnets, query_params=query_params) query_params = ('project_id={0}'. format(uuidutils.generate_uuid())) self._test_list_resources('subnet', [], query_params=query_params) def test_list_subnets_filtering_by_cidr_used_on_create(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.11/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.11/24') as v2: subnets = (v1, v2) query_params = ('cidr=10.0.0.11/24&cidr=10.0.1.11/24') self._test_list_resources('subnet', subnets, query_params=query_params) def test_list_subnets_filtering_by_unknown_filter(self): if self._skip_filter_validation: self.skipTest("Plugin does not support filter validation") with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2: subnets = (v1, v2) query_params = 'admin_state_up=True' self._test_list_resources('subnet', subnets, query_params=query_params, expected_code=webob.exc.HTTPClientError.code) # test with other value to check if we have the same results query_params = 'admin_state_up=False' self._test_list_resources('subnet', subnets, query_params=query_params, expected_code=webob.exc.HTTPClientError.code) def test_list_subnets_with_parameter(self): with self.network() as network: with self.subnet(network=network, gateway_ip='10.0.0.1', cidr='10.0.0.0/24') as v1,\ self.subnet(network=network, gateway_ip='10.0.1.1', cidr='10.0.1.0/24') as v2: subnets = (v1, v2) query_params = 'ip_version=%s&ip_version=%s' % ( constants.IP_VERSION_4, constants.IP_VERSION_6) self._test_list_resources('subnet', subnets, query_params=query_params) query_params = 'ip_version=%s' % constants.IP_VERSION_6 self._test_list_resources('subnet', [], query_params=query_params) def test_list_subnets_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_sort_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) def test_list_subnets_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_list_subnets_with_pagination_reverse_emulated(self): helper_patcher = mock.patch( 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() with self.subnet(cidr='10.0.0.0/24') as subnet1,\ self.subnet(cidr='11.0.0.0/24') as subnet2,\ self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) def test_invalid_ip_version(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': 7, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_subnet(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': 'invalid', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def _test_unsupported_subnet_cidr(self, subnet_cidr): with self.network() as network: subnet = {'network_id': network['network']['id'], 'cidr': subnet_cidr, 'ip_version': constants.IP_VERSION_4, 'enable_dhcp': True, 'tenant_id': network['network']['tenant_id']} plugin = directory.get_plugin() if hasattr(plugin, '_validate_subnet'): self.assertRaises(lib_exc.InvalidInput, plugin._validate_subnet, context.get_admin_context(), subnet) def test_unsupported_subnet_cidr_multicast(self): self._test_unsupported_subnet_cidr("224.0.0.1/16") def test_unsupported_subnet_cidr_loopback(self): self._test_unsupported_subnet_cidr("127.0.0.1/8") def test_invalid_ip_address(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': 'ipaddress'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_invalid_uuid(self): with self.network() as network: data = {'subnet': {'network_id': 'invalid-uuid', 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1'}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_one_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_two_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] dns_nameservers = ['1.2.3.4', '4.3.2.1'] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, dns_nameservers=dns_nameservers) def test_create_subnet_with_too_many_dns(self): with self.network() as network: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'dns_nameservers': dns_list}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_create_subnet_with_one_host_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_two_host_routes(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.100'}] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}] self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, allocation_pools=allocation_pools, host_routes=host_routes) def test_create_subnet_with_too_many_routes(self): with self.network() as network: host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}, {'destination': '12.0.0.0/8', 'nexthop': '4.3.2.1'}, {'destination': '141.212.0.0/16', 'nexthop': '2.2.2.2'}] data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.0.1', 'host_routes': host_routes}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_dns(self): with self.subnet() as subnet: data = {'subnet': {'dns_nameservers': ['11.0.0.1']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) def test_subnet_lifecycle_dns_retains_order(self): cfg.CONF.set_override('max_dns_nameservers', 3) with self.subnet(dns_nameservers=['1.1.1.1', '2.2.2.2', '3.3.3.3']) as subnet: subnets = self._show('subnets', subnet['subnet']['id'], expected_code=webob.exc.HTTPOk.code) self.assertEqual(['1.1.1.1', '2.2.2.2', '3.3.3.3'], subnets['subnet']['dns_nameservers']) data = {'subnet': {'dns_nameservers': ['2.2.2.2', '3.3.3.3', '1.1.1.1']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) subnets = self._show('subnets', subnet['subnet']['id'], expected_code=webob.exc.HTTPOk.code) self.assertEqual(data['subnet']['dns_nameservers'], subnets['subnet']['dns_nameservers']) def test_update_subnet_dns_to_None(self): with self.subnet(dns_nameservers=['11.0.0.1']) as subnet: data = {'subnet': {'dns_nameservers': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['dns_nameservers']) data = {'subnet': {'dns_nameservers': ['11.0.0.3']}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['dns_nameservers'], res['subnet']['dns_nameservers']) def test_update_subnet_dns_with_too_many_entries(self): with self.subnet() as subnet: dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] data = {'subnet': {'dns_nameservers': dns_list}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnet_route(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['host_routes'], res['subnet']['host_routes']) def test_update_subnet_route_to_None(self): with self.subnet(host_routes=[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]) as subnet: data = {'subnet': {'host_routes': None}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['subnet']['host_routes']) data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['host_routes'], res['subnet']['host_routes']) def _test_update_subnet(self, old_gw=None, new_gw=None, check_gateway=False): allocation_pools = [{'start': '192.168.0.16', 'end': '192.168.0.254'}] with self.network() as network: with self.subnet(network=network, gateway_ip=old_gw, allocation_pools=allocation_pools, cidr='192.168.0.0/24') as subnet: data = { 'subnet': { 'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}], 'gateway_ip': new_gw}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) self._verify_updated_subnet_allocation_pools( res, with_gateway_ip=check_gateway) def test_update_subnet_from_no_gw_to_no_gw(self): self._test_update_subnet() def test_update_subnet_from_gw_to_no_gw(self): self._test_update_subnet(old_gw='192.168.0.15') def test_update_subnet_from_gw_to_new_gw(self): self._test_update_subnet(old_gw='192.168.0.15', new_gw='192.168.0.9', check_gateway=True) def test_update_subnet_route_with_too_many_entries(self): with self.subnet() as subnet: data = {'subnet': {'host_routes': [ {'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}, {'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'}, {'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_subnet_with_dns(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4, dns_nameservers=dns_nameservers) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_dns_and_route(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' dns_nameservers = ['1.2.3.4'] host_routes = [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}] # Create new network res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4, dns_nameservers=dns_nameservers, host_routes=host_routes) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_subnet_with_callback(self): with self.subnet() as subnet,\ mock.patch.object(registry, 'publish') as publish: errors = [ exceptions.NotificationError( 'fake_id', lib_exc.NeutronException()), ] publish.side_effect = [ exceptions.CallbackFailure(errors=errors), None ] # Make sure the delete request fails delete_request = self.new_delete_request('subnets', subnet['subnet']['id']) delete_response = delete_request.get_response(self.api) self.assertIn('NeutronError', delete_response.json) self.assertEqual('SubnetInUse', delete_response.json['NeutronError']['type']) # Make sure the subnet wasn't deleted list_request = self.new_list_request( 'subnets', params="id=%s" % subnet['subnet']['id']) list_response = list_request.get_response(self.api) self.assertEqual(subnet['subnet']['id'], list_response.json['subnets'][0]['id']) def _helper_test_validate_subnet(self, option, exception): cfg.CONF.set_override(option, 0) with self.network() as network: subnet = {'network_id': network['network']['id'], 'cidr': '10.0.2.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.2.1', 'dns_nameservers': ['8.8.8.8'], 'host_routes': [{'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}]} plugin = directory.get_plugin() e = self.assertRaises(exception, plugin._validate_subnet, context.get_admin_context(), subnet) self.assertThat( str(e), matchers.Not(matchers.Contains('built-in function id'))) def test_validate_subnet_dns_nameservers_exhausted(self): self._helper_test_validate_subnet( 'max_dns_nameservers', lib_exc.DNSNameServersExhausted) def test_validate_subnet_host_routes_exhausted(self): self._helper_test_validate_subnet( 'max_subnet_host_routes', lib_exc.HostRoutesExhausted) def test_port_prevents_network_deletion(self): with self.port() as p: self._delete('networks', p['port']['network_id'], expected_code=webob.exc.HTTPConflict.code) def test_port_prevents_subnet_deletion(self): with self.port() as p: self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'], expected_code=webob.exc.HTTPConflict.code) class TestSubnetPoolsV2(NeutronDbPluginV2TestCase): _POOL_NAME = 'test-pool' def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def _validate_default_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix) def _validate_min_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix) def _validate_max_prefix(self, prefix, subnetpool): self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix) def _validate_is_default(self, subnetpool): self.assertTrue(subnetpool['subnetpool']['is_default']) def test_create_subnetpool_empty_prefix_list(self): self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') def test_create_default_subnetpools(self): for cidr, min_prefixlen in (['fe80::/48', '64'], ['10.10.10.0/24', '24']): pool = self._test_create_subnetpool([cidr], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) self._validate_is_default(pool) def test_cannot_create_multiple_default_subnetpools(self): for cidr1, cidr2, min_prefixlen in (['fe80::/48', '2001::/48', '64'], ['10.10.10.0/24', '10.10.20.0/24', '24']): pool = self._test_create_subnetpool([cidr1], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) self._validate_is_default(pool) self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [cidr2], admin=True, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen=min_prefixlen, is_default=True) def test_create_subnetpool_ipv4_24_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_21_with_defaults(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], name=self._POOL_NAME, tenant_id=self._tenant_id, min_prefixlen='21') self._validate_default_prefix('21', subnetpool) self._validate_min_prefix('21', subnetpool) def test_create_subnetpool_ipv4_default_prefix_too_small(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='20') def test_create_subnetpool_ipv4_default_prefix_too_large(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, max_prefixlen=24, default_prefixlen='32') def test_create_subnetpool_ipv4_default_prefix_bounds(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('8', subnetpool) self._validate_default_prefix('8', subnetpool) self._validate_max_prefix('32', subnetpool) def test_create_subnetpool_ipv6_default_prefix_bounds(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME) self._validate_min_prefix('64', subnetpool) self._validate_default_prefix('64', subnetpool) self._validate_max_prefix('128', subnetpool) def test_create_subnetpool_ipv4_supported_default_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='26') self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_supported_min_prefix(self): subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='26') self._validate_min_prefix('26', subnetpool) self._validate_default_prefix('26', subnetpool) def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self): subnet = netaddr.IPNetwork('10.10.10.0/21') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, default_prefixlen='22', min_prefixlen='23') def test_create_subnetpool_mixed_ip_version(self): subnet_v4 = netaddr.IPNetwork('10.10.10.0/21') subnet_v6 = netaddr.IPNetwork('fe80::/48') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet_v4.cidr, subnet_v6.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') def test_create_subnetpool_ipv6_with_defaults(self): subnet = netaddr.IPNetwork('fe80::/48') subnetpool = self._test_create_subnetpool([subnet.cidr], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='48') self._validate_default_prefix('48', subnetpool) self._validate_min_prefix('48', subnetpool) def test_get_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_get_subnetpool_different_tenants_not_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], shared=False, tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', 'not-the-owner') req.environ['neutron.context'] = neutron_context res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_get_subnetpool_different_tenants_shared(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) req = self.new_show_request('subnetpools', subnetpool['subnetpool']['id']) neutron_context = context.Context('', self._tenant_id) req.environ['neutron.context'] = neutron_context res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(subnetpool['subnetpool']['id'], res['subnetpool']['id']) def test_list_subnetpools_different_tenants_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['subnetpools'])) self.assertEqual(1, len(mortal_res['subnetpools'])) def test_list_subnetpools_different_tenants_not_shared(self): self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=False) admin_res = self._list('subnetpools') mortal_res = self._list('subnetpools', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['subnetpools'])) self.assertEqual(0, len(mortal_res['subnetpools'])) def test_list_subnetpools_filters_none(self): subnet_pool = self._test_create_subnetpool(['10.10.10.0/24'], None, True, name=self._POOL_NAME, min_prefixlen='24', shared=True) sp_list = self.plugin.get_subnetpools( context.Context('', 'not-the-owner')) self.assertEqual(1, len(sp_list)) self.assertEqual(subnet_pool['subnetpool']['id'], sp_list[0]['id']) def test_delete_subnetpool(self): subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') req = self.new_delete_request('subnetpools', subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_delete_nonexistent_subnetpool(self): req = self.new_delete_request('subnetpools', 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa') res = req.get_response(self._api_for_resource('subnetpools')) self.assertEqual(404, res.status_int) def test_update_subnetpool_prefix_list_append(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24']) def test_update_subnetpool_prefix_list_compaction(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.10.0/24', '10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertItemsEqual(res['subnetpool']['prefixes'], ['10.10.10.0/23']) def test_illegal_subnetpool_prefix_list_update(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self.assertEqual(26, res['subnetpool']['default_prefixlen']) def test_update_subnetpool_min_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(21, res['subnetpool']['min_prefixlen']) def test_update_subnetpool_min_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'min_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_max_prefix(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '26'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(26, res['subnetpool']['max_prefixlen']) def test_update_subnetpool_max_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '21'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_max_prefix_less_than_default(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_prefixlen='24') data = {'subnetpool': {'max_prefixlen': '22'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix_less_than_min(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnetpool': {'default_prefixlen': '20'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_prefix_larger_than_max(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='24') data = {'subnetpool': {'default_prefixlen': '28'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_prefix_list_mixed_ip_version(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24') data = {'subnetpool': {'prefixes': ['fe80::/48']}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_subnetpool_default_quota(self): initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='24', default_quota=10) self.assertEqual(10, initial_subnetpool['subnetpool']['default_quota']) data = {'subnetpool': {'default_quota': '1'}} req = self.new_update_request('subnetpools', data, initial_subnetpool['subnetpool']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1, res['subnetpool']['default_quota']) def test_allocate_subnet_bad_gateway(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/8'], tenant_id=self._tenant_id, name=self._POOL_NAME, default_prefixlen='24') # Request a subnet allocation (no CIDR) data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': 32, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) result = req.get_response(self.api) self.assertEqual(201, result.status_int) def test_allocate_any_subnet_with_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a subnet allocation (no CIDR) data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': 24, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(24, subnet.prefixlen) # Assert the allocated subnet CIDR is a subnet of our pool prefix supernet = netaddr.smallest_matching_cidr( subnet, sp['subnetpool']['prefixes']) self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16')) def test_allocate_any_subnet_with_default_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request any subnet allocation using default prefix data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(subnet.prefixlen, int(sp['subnetpool']['default_prefixlen'])) def test_allocate_specific_subnet_with_mismatch_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 26, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_with_matching_prefixlen(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'prefixlen': 24, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) # Assert the allocated subnet CIDR is what we expect subnet = netaddr.IPNetwork(res['subnet']['cidr']) self.assertEqual(netaddr.IPNetwork('10.10.1.0/24'), subnet) def test_allocate_specific_subnet_non_existent_prefix(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '192.168.1.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) def test_allocate_specific_subnet_already_allocated(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.10.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate the subnet res = req.get_response(self.api) self.assertEqual(201, res.status_int) # Attempt to allocate it again res = req.get_response(self.api) # Assert error self.assertEqual(500, res.status_int) def test_allocate_specific_subnet_prefix_too_small(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/20', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_prefix_specific_gw(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.254', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual('10.10.1.254', res['subnet']['gateway_ip']) def test_allocate_specific_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request a specific subnet allocation pools = [{'start': '10.10.1.2', 'end': '10.10.1.253'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.1.0/24', 'gateway_ip': '10.10.1.1', 'ip_version': constants.IP_VERSION_4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(pools[0]['start'], res['subnet']['allocation_pools'][0]['start']) self.assertEqual(pools[0]['end'], res['subnet']['allocation_pools'][0]['end']) def test_allocate_any_subnet_prefix_allocation_pools(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.10.0/24'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') # Request an any subnet allocation pools = [{'start': '10.10.10.1', 'end': '10.10.10.254'}] data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'prefixlen': '24', 'ip_version': constants.IP_VERSION_4, 'allocation_pools': pools, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_specific_subnet_prefix_too_large(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', max_prefixlen='21') # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_delete_subnetpool_existing_allocations(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21') data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'cidr': '10.10.0.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) req.get_response(self.api) req = self.new_delete_request('subnetpools', sp['subnetpool']['id']) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_allocate_subnet_over_quota(self): with self.network() as network: sp = self._test_create_subnetpool(['10.10.0.0/16'], tenant_id=self._tenant_id, name=self._POOL_NAME, min_prefixlen='21', default_quota=2048) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': constants.IP_VERSION_4, 'prefixlen': 21, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) # Allocate a subnet to fill the quota res = req.get_response(self.api) self.assertEqual(201, res.status_int) # Attempt to allocate a /21 again res = req.get_response(self.api) # Assert error self.assertEqual(409, res.status_int) def test_allocate_any_ipv4_subnet_ipv6_pool(self): with self.network() as network: sp = self._test_create_subnetpool(['2001:db8:1:2::/63'], tenant_id=self._tenant_id, name=self._POOL_NAME) # Request a specific subnet allocation data = {'subnet': {'network_id': network['network']['id'], 'subnetpool_id': sp['subnetpool']['id'], 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) class DbModelMixin(object): """DB model tests.""" def test_make_network_dict_outside_engine_facade_manager(self): mock.patch.object(directory, 'get_plugin').start() ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): network = models_v2.Network(name="net_net", status="OK", admin_state_up=True, project_id='fake_project', mtu=1500) ctx.session.add(network) # ensure db rels aren't loaded until commit for network object # by sharing after flush ctx.session.flush() network_obj.NetworkRBAC( ctx, object_id=network.id, action='access_as_shared', project_id=network.project_id, target_tenant='*').create() net2 = models_v2.Network(name="net_net2", status="OK", admin_state_up=True, mtu=1500) ctx.session.add(net2) pl = db_base_plugin_common.DbBasePluginCommon() self.assertTrue(pl._make_network_dict(network, context=ctx)['shared']) self.assertFalse(pl._make_network_dict(net2, context=ctx)['shared']) def test_repr(self): """testing the string representation of 'model' classes.""" network = models_v2.Network(name="net_net", status="OK", admin_state_up=True) actual_repr_output = repr(network) exp_start_with = "") final_exp = exp_start_with + exp_middle + exp_end_with self.assertEqual(final_exp, actual_repr_output) def _make_security_group_and_rule(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): sg = sg_models.SecurityGroup(name='sg', description='sg') rule = sg_models.SecurityGroupRule( security_group=sg, port_range_min=1, port_range_max=2, protocol='TCP', ethertype='v4', direction='ingress', remote_ip_prefix='0.0.0.0/0') ctx.session.add(sg) ctx.session.add(rule) return sg, rule def _make_floating_ip(self, ctx, port_id): with db_api.CONTEXT_WRITER.using(ctx): flip = l3_obj.FloatingIP( ctx, floating_ip_address=netaddr.IPAddress('1.2.3.4'), floating_network_id=uuidutils.generate_uuid(), floating_port_id=port_id) flip.create() return flip def _make_router(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): router = l3_models.Router() ctx.session.add(router) return router def _get_neutron_attr(self, ctx, attr_id): return ctx.session.query( standard_attr.StandardAttribute).filter( standard_attr.StandardAttribute.id == attr_id).one() def _test_standardattr_removed_on_obj_delete(self, ctx, obj): attr_id = obj.standard_attr_id self.assertEqual( obj.__table__.name, self._get_neutron_attr(ctx, attr_id).resource_type) with db_api.CONTEXT_WRITER.using(ctx): ctx.session.delete(obj) with testtools.ExpectedException(orm.exc.NoResultFound): # we want to make sure that the attr resource was removed self._get_neutron_attr(ctx, attr_id) def test_staledata_error_on_concurrent_object_update_network(self): ctx = context.get_admin_context() network = self._make_network(ctx) self._test_staledata_error_on_concurrent_object_update( models_v2.Network, network['id']) def test_staledata_error_on_concurrent_object_update_port(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) self._test_staledata_error_on_concurrent_object_update( models_v2.Port, port['id']) def test_staledata_error_on_concurrent_object_update_subnet(self): ctx = context.get_admin_context() network = self._make_network(ctx) subnet = self._make_subnet(ctx, network.id) self._test_staledata_error_on_concurrent_object_update( models_v2.Subnet, subnet['id']) def test_staledata_error_on_concurrent_object_update_subnetpool(self): ctx = context.get_admin_context() subnetpool = self._make_subnetpool(ctx) self._test_staledata_error_on_concurrent_object_update( models_v2.SubnetPool, subnetpool['id']) def test_staledata_error_on_concurrent_object_update_router(self): ctx = context.get_admin_context() router = self._make_router(ctx) self._test_staledata_error_on_concurrent_object_update( l3_models.Router, router['id']) def test_staledata_error_on_concurrent_object_update_floatingip(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) flip = self._make_floating_ip(ctx, port.id) self._test_staledata_error_on_concurrent_object_update( flip.db_model, flip.id) def test_staledata_error_on_concurrent_object_update_sg(self): ctx = context.get_admin_context() sg, rule = self._make_security_group_and_rule(ctx) self._test_staledata_error_on_concurrent_object_update( sg_models.SecurityGroup, sg['id']) self._test_staledata_error_on_concurrent_object_update( sg_models.SecurityGroupRule, rule['id']) def _test_staledata_error_on_concurrent_object_update(self, model, dbid): """Test revision compare and swap update breaking on concurrent update. In this test we start an update of the name on a model in an eventlet coroutine where it will be blocked before it can commit the results. Then while it is blocked, we will update the description of the model in the foregound and ensure that this results in the coroutine receiving a StaleDataError as expected. """ lock = functools.partial(lockutils.lock, uuidutils.generate_uuid()) self._blocked_on_lock = False def _lock_blocked_name_update(): ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): thing = ctx.session.query(model).filter_by(id=dbid).one() thing.bump_revision() thing.name = 'newname' self._blocked_on_lock = True with lock(): return thing with lock(): coro = eventlet.spawn(_lock_blocked_name_update) # wait for the coroutine to get blocked on the lock before # we proceed to update the record underneath it while not self._blocked_on_lock: eventlet.sleep(0) ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): thing = ctx.session.query(model).filter_by(id=dbid).one() thing.bump_revision() thing.description = 'a description' revision_after_build = thing.revision_number with testtools.ExpectedException(orm.exc.StaleDataError): # the coroutine should have encountered a stale data error because # the status update thread would have bumped the revision number # while it was waiting to commit coro.wait() # another attempt should work fine thing = _lock_blocked_name_update() self.assertEqual('a description', thing.description) self.assertEqual('newname', thing.name) self.assertGreater(thing.revision_number, revision_after_build) def test_standardattr_removed_on_subnet_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) subnet = self._make_subnet(ctx, network.id) self._test_standardattr_removed_on_obj_delete(ctx, subnet) def test_standardattr_removed_on_network_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) self._test_standardattr_removed_on_obj_delete(ctx, network) def test_standardattr_removed_on_subnetpool_delete(self): ctx = context.get_admin_context() spool = self._make_subnetpool(ctx) self._test_standardattr_removed_on_obj_delete(ctx, spool) def test_standardattr_removed_on_port_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) self._test_standardattr_removed_on_obj_delete(ctx, port) def test_standardattr_removed_on_sg_delete(self): ctx = context.get_admin_context() sg, rule = self._make_security_group_and_rule(ctx) self._test_standardattr_removed_on_obj_delete(ctx, sg) # make sure the attr entry was wiped out for the rule as well with testtools.ExpectedException(orm.exc.NoResultFound): self._get_neutron_attr(ctx, rule.standard_attr_id) def test_standardattr_removed_on_floating_ip_delete(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) flip = self._make_floating_ip(ctx, port.id) # TODO(lujinluo): Change flip.db_obj to flip once all # codes are migrated to use Floating IP OVO object. self._test_standardattr_removed_on_obj_delete(ctx, flip.db_obj) def test_standardattr_removed_on_router_delete(self): ctx = context.get_admin_context() router = self._make_router(ctx) self._test_standardattr_removed_on_obj_delete(ctx, router) def test_resource_type_fields(self): ctx = context.get_admin_context() network = self._make_network(ctx) port = self._make_port(ctx, network.id) subnet = self._make_subnet(ctx, network.id) spool = self._make_subnetpool(ctx) for disc, obj in (('ports', port), ('networks', network), ('subnets', subnet), ('subnetpools', spool)): self.assertEqual( disc, obj.standard_attr.resource_type) class DbModelTenantTestCase(DbModelMixin, testlib_api.SqlTestCase): def _make_network(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): network = models_v2.Network(name="net_net", status="OK", tenant_id='dbcheck', admin_state_up=True) ctx.session.add(network) return network def _make_subnet(self, ctx, network_id): with db_api.CONTEXT_WRITER.using(ctx): subnet = models_v2.Subnet(name="subsub", ip_version=constants.IP_VERSION_4, tenant_id='dbcheck', cidr='turn_down_for_what', network_id=network_id) ctx.session.add(subnet) return subnet def _make_port(self, ctx, network_id): with db_api.CONTEXT_WRITER.using(ctx): port = models_v2.Port(network_id=network_id, mac_address='1', tenant_id='dbcheck', admin_state_up=True, status="COOL", device_id="devid", device_owner="me") ctx.session.add(port) return port def _make_subnetpool(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): subnetpool = models_v2.SubnetPool( ip_version=constants.IP_VERSION_4, default_prefixlen=4, min_prefixlen=4, max_prefixlen=4, default_quota=4, address_scope_id='f', tenant_id='dbcheck', is_default=False ) ctx.session.add(subnetpool) return subnetpool class DbModelProjectTestCase(DbModelMixin, testlib_api.SqlTestCase): def _make_network(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): network = models_v2.Network(name="net_net", status="OK", project_id='dbcheck', admin_state_up=True) ctx.session.add(network) return network def _make_subnet(self, ctx, network_id): with db_api.CONTEXT_WRITER.using(ctx): subnet = models_v2.Subnet(name="subsub", ip_version=constants.IP_VERSION_4, project_id='dbcheck', cidr='turn_down_for_what', network_id=network_id) ctx.session.add(subnet) return subnet def _make_port(self, ctx, network_id): with db_api.CONTEXT_WRITER.using(ctx): port = models_v2.Port(network_id=network_id, mac_address='1', project_id='dbcheck', admin_state_up=True, status="COOL", device_id="devid", device_owner="me") ctx.session.add(port) return port def _make_subnetpool(self, ctx): with db_api.CONTEXT_WRITER.using(ctx): subnetpool = models_v2.SubnetPool( ip_version=constants.IP_VERSION_4, default_prefixlen=4, min_prefixlen=4, max_prefixlen=4, default_quota=4, address_scope_id='f', project_id='dbcheck', is_default=False ) ctx.session.add(subnetpool) return subnetpool class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase, testlib_api.SqlTestCase): """Tests for NeutronDbPluginV2 as Mixin. While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as a complete plugin, this test case verifies abilities of NeutronDbPlugin which are provided to other plugins (e.g. DB operations). This test case may include tests only for NeutronDbPlugin, so this should not be used in unit tests for other plugins. """ def setUp(self): super(NeutronDbPluginV2AsMixinTestCase, self).setUp() self.plugin = importutils.import_object(DB_PLUGIN_KLASS) self.context = context.get_admin_context() self.net_data = {'network': {'id': 'fake-id', 'name': 'net1', 'admin_state_up': True, 'tenant_id': TEST_TENANT_ID, 'shared': False}} def test_create_network_with_default_status(self): net = self.plugin.create_network(self.context, self.net_data) default_net_create_status = 'ACTIVE' expected = [('id', 'fake-id'), ('name', 'net1'), ('admin_state_up', True), ('tenant_id', TEST_TENANT_ID), ('shared', False), ('status', default_net_create_status)] for k, v in expected: self.assertEqual(net[k], v) def test_create_network_with_status_BUILD(self): self.net_data['network']['status'] = 'BUILD' net = self.plugin.create_network(self.context, self.net_data) self.assertEqual(net['status'], 'BUILD') def test_get_user_allocation_for_dhcp_port_returns_none(self): plugin = directory.get_plugin() with self.network() as net, self.network() as net1: with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\ self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1: with self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP),\ self.port(subnet=subnet1): # check that user allocations on another network don't # affect _subnet_get_user_allocation method res = plugin._subnet_get_user_allocation( context.get_admin_context(), subnet['subnet']['id']) self.assertIsNone(res) def test__validate_network_subnetpools(self): network = models_v2.Network() network.subnets = [models_v2.Subnet(subnetpool_id='test_id', ip_version=constants.IP_VERSION_4)] new_subnetpool_id = None self.assertRaises(lib_exc.NetworkSubnetPoolAffinityError, self.plugin.ipam._validate_network_subnetpools, network, constants.IP_VERSION_4, new_subnetpool_id, None) class TestNetworks(testlib_api.SqlTestCase): def setUp(self): super(TestNetworks, self).setUp() self._tenant_id = TEST_TENANT_ID # Update the plugin self.setup_coreplugin(DB_PLUGIN_KLASS) def _create_network(self, plugin, ctx, shared=True): network = {'network': {'name': 'net', 'shared': shared, 'admin_state_up': True, 'tenant_id': self._tenant_id}} created_network = plugin.create_network(ctx, network) return (network, created_network['id']) def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id): port = {'port': {'name': 'port', 'network_id': net_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': device_owner, 'tenant_id': tenant_id}} plugin.create_port(ctx, port) def _test_update_shared_net_used(self, device_owner, expected_exception=None): plugin = directory.get_plugin() ctx = context.get_admin_context() network, net_id = self._create_network(plugin, ctx) self._create_port(plugin, ctx, net_id, device_owner, self._tenant_id + '1') network['network']['shared'] = False if (expected_exception): with testlib_api.ExpectedException(expected_exception): plugin.update_network(ctx, net_id, network) else: plugin.update_network(ctx, net_id, network) def test_update_shared_net_used_fails(self): self._test_update_shared_net_used('', lib_exc.InvalidSharedSetting) def test_update_shared_net_used_as_router_gateway(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_ROUTER_GW) def test_update_shared_net_used_by_floating_ip(self): self._test_update_shared_net_used( constants.DEVICE_OWNER_FLOATINGIP) class DbOperationBoundMixin(object): """Mixin to support tests that assert constraints on DB operations.""" admin = True def setUp(self, *args, **kwargs): super(DbOperationBoundMixin, self).setUp(*args, **kwargs) self.useFixture(fixture.APIDefinitionFixture()) self._recorded_statements = [] def _event_incrementer(conn, clauseelement, *args, **kwargs): self._recorded_statements.append(str(clauseelement)) engine = db_api.CONTEXT_WRITER.get_engine() db_api.sqla_listen(engine, 'after_execute', _event_incrementer) def _get_context(self): if self.admin: return context.get_admin_context() return context.Context('', 'fake') def get_api_kwargs(self): context_ = self._get_context() return {'set_context': True, 'tenant_id': context_.project_id} def _list_and_record_queries(self, resource, query_params=None): kwargs = {'neutron_context': self._get_context()} if query_params: kwargs['query_params'] = query_params # list once before tracking to flush out any quota recalculations. # otherwise the first list after a create will be different than # a subsequent list with no create. self._list(resource, **kwargs) self._recorded_statements = [] self.assertNotEqual([], self._list(resource, **kwargs)) # sanity check to make sure queries are being observed self.assertNotEqual(0, len(self._recorded_statements)) return list(self._recorded_statements) def _assert_object_list_queries_constant(self, obj_creator, plural, filters=None): obj_creator() before_queries = self._list_and_record_queries(plural) # one more thing shouldn't change the db query count obj = list(obj_creator().values())[0] after_queries = self._list_and_record_queries(plural) self.assertEqual(len(before_queries), len(after_queries), self._qry_fail_msg(before_queries, after_queries)) # using filters shouldn't change the count either if filters: query_params = "&".join(["%s=%s" % (f, obj[f]) for f in filters]) after_queries = self._list_and_record_queries(plural, query_params) self.assertEqual(len(before_queries), len(after_queries), self._qry_fail_msg(before_queries, after_queries)) def _qry_fail_msg(self, before, after): return "\n".join(["queries before:"] + before + ["queries after:"] + after) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_dvr_mac_db.py0000644000175000017500000002335000000000000025037 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import dvr as dvr_exc from neutron_lib import fixture from neutron_lib.plugins import directory from neutron_lib.tests import tools from neutron_lib.utils import net from neutron.db import dvr_mac_db from neutron.objects import router from neutron.tests.unit.plugins.ml2 import test_plugin class DVRDbMixinImpl(dvr_mac_db.DVRDbMixin): def __init__(self, notifier): self.notifier = notifier class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(DvrDbMixinTestCase, self).setUp() self.ctx = context.get_admin_context() self.mixin = DVRDbMixinImpl(mock.Mock()) def _create_dvr_mac_entry(self, host, mac_address): router.DVRMacAddress( self.ctx, host=host, mac_address=mac_address).create() def test__get_dvr_mac_address_by_host(self): entry = router.DVRMacAddress( self.ctx, host='foo_host', mac_address=tools.get_random_EUI()) entry.create() result = self.mixin._get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(entry.to_dict(), result) def test__get_dvr_mac_address_by_host_not_found(self): self.assertRaises(dvr_exc.DVRMacAddressNotFound, self.mixin._get_dvr_mac_address_by_host, self.ctx, 'foo_host') def test__create_dvr_mac_address_success(self): entry = {'host': 'foo_host', 'mac_address': tools.get_random_EUI()} with mock.patch.object(net, 'get_random_mac') as f: f.return_value = entry['mac_address'] expected = self.mixin._create_dvr_mac_address( self.ctx, entry['host']) self.assertEqual(expected, entry) def test__create_dvr_mac_address_retries_exceeded_retry_logic(self): # limit retries so test doesn't take 40 seconds retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2) retry_fixture.setUp() non_unique_mac = tools.get_random_EUI() self._create_dvr_mac_entry('foo_host_1', non_unique_mac) with mock.patch.object(net, 'get_random_mac') as f: f.return_value = non_unique_mac self.assertRaises(lib_exc.HostMacAddressGenerationFailure, self.mixin._create_dvr_mac_address, self.ctx, "foo_host_2") retry_fixture.cleanUp() def test_mac_not_cleared_on_agent_delete_event_with_remaining_agents(self): plugin = directory.get_plugin() mac_1 = tools.get_random_EUI() mac_2 = tools.get_random_EUI() self._create_dvr_mac_entry('host_1', mac_1) self._create_dvr_mac_entry('host_2', mac_2) agent1 = {'host': 'host_1', 'id': 'a1'} agent2 = {'host': 'host_1', 'id': 'a2'} with mock.patch.object(plugin, 'get_agents', return_value=[agent2]): with mock.patch.object(plugin, 'notifier') as notifier: registry.publish(resources.AGENT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( self.ctx, states=(agent1,))) mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) for mac in mac_list: self.assertIsInstance(mac, dict) self.assertEqual(2, len(mac_list)) self.assertFalse(notifier.dvr_mac_address_update.called) def test_mac_cleared_on_agent_delete_event(self): plugin = directory.get_plugin() mac_1 = tools.get_random_EUI() mac_2 = tools.get_random_EUI() self._create_dvr_mac_entry('host_1', mac_1) self._create_dvr_mac_entry('host_2', mac_2) agent = {'host': 'host_1', 'id': 'a1'} with mock.patch.object(plugin, 'notifier') as notifier: registry.publish(resources.AGENT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( self.ctx, states=(agent,))) mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(1, len(mac_list)) for mac in mac_list: self.assertIsInstance(mac, dict) self.assertEqual('host_2', mac_list[0]['host']) notifier.dvr_mac_address_update.assert_called_once_with( self.ctx, mac_list) def test_get_dvr_mac_address_list(self): mac_1 = tools.get_random_EUI() mac_2 = tools.get_random_EUI() self._create_dvr_mac_entry('host_1', mac_1) self._create_dvr_mac_entry('host_2', mac_2) mac_list = self.mixin.get_dvr_mac_address_list(self.ctx) self.assertEqual(2, len(mac_list)) for mac in mac_list: self.assertIsInstance(mac, dict) def test_get_dvr_mac_address_by_host_existing_host(self): self._create_dvr_mac_entry('foo_host', tools.get_random_EUI()) with mock.patch.object(self.mixin, '_get_dvr_mac_address_by_host') as f: self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(1, f.call_count) def test_get_dvr_mac_address_by_host_missing_host(self): with mock.patch.object(self.mixin, '_create_dvr_mac_address') as f: self.mixin.get_dvr_mac_address_by_host(self.ctx, 'foo_host') self.assertEqual(1, f.call_count) def test_get_subnet_for_dvr_returns_correct_mac(self): with self.subnet() as subnet,\ self.port(subnet=subnet),\ self.port(subnet=subnet): dvr_subnet = self.mixin.get_subnet_for_dvr(self.ctx, subnet['subnet']['id']) # no gateway port should be found so no info should be returned self.assertEqual({}, dvr_subnet) with self.port( subnet=subnet, fixed_ips=[{'ip_address': subnet['subnet'][ 'gateway_ip']}]) as gw_port: dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id']) self.assertEqual(gw_port['port']['mac_address'], dvr_subnet['gateway_mac']) def test_get_subnet_for_dvr_returns_correct_mac_fixed_ips_passed(self): with self.subnet() as subnet,\ self.port(subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.2'}]),\ self.port(subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.3'}]): fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}] dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id'], fixed_ips) # no gateway port should be found so no info should be returned self.assertEqual({}, dvr_subnet) with self.port( subnet=subnet, fixed_ips=[{'ip_address': '10.0.0.4'}]) as gw_port: dvr_subnet = self.mixin.get_subnet_for_dvr( self.ctx, subnet['subnet']['id'], fixed_ips) self.assertEqual(gw_port['port']['mac_address'], dvr_subnet['gateway_mac']) def test_get_ports_on_host_by_subnet(self): HOST = 'host1' host_arg = {portbindings.HOST_ID: HOST} arg_list = (portbindings.HOST_ID,) with self.subnet() as subnet,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **host_arg) as compute_port,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP, arg_list=arg_list, **host_arg) as dhcp_port,\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_LOADBALANCER, arg_list=arg_list, **host_arg) as lb_port,\ self.port(device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **host_arg),\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX, arg_list=arg_list, **{portbindings.HOST_ID: 'other'}),\ self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX, arg_list=arg_list, **host_arg): expected_ids = [port['port']['id'] for port in [compute_port, dhcp_port, lb_port]] dvr_ports = self.mixin.get_ports_on_host_by_subnet( self.ctx, HOST, subnet['subnet']['id']) self.assertEqual(len(expected_ids), len(dvr_ports)) self.assertItemsEqual(expected_ids, [port['id'] for port in dvr_ports]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_extraroute_db.py0000644000175000017500000001412200000000000025623 0ustar00coreycorey00000000000000# Copyright (c) 2016 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import context from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.db import extraroute_db from neutron.tests.unit import testlib_api class _Plugin(extraroute_db.ExtraRoute_dbonly_mixin): pass class TestExtraRouteDb(testlib_api.SqlTestCase): def setUp(self): super(TestExtraRouteDb, self).setUp() self._plugin = _Plugin() directory.add_plugin(constants.CORE, self._plugin) def test_update(self): ctx = context.get_admin_context() create_request = { 'router': { 'name': 'my router', 'tenant_id': 'my tenant', 'admin_state_up': True, } } router = self._plugin.create_router(ctx, create_request) self.assertItemsEqual(router['routes'], []) router_id = router['id'] routes = [ {'destination': '10.0.0.0/24', 'nexthop': '1.1.1.4'}, {'destination': '10.1.0.0/24', 'nexthop': '1.1.1.3'}, {'destination': '10.2.0.0/24', 'nexthop': '1.1.1.2'}, ] self._test_update_routes(ctx, router_id, router, routes) routes = [ {'destination': '10.0.0.0/24', 'nexthop': '1.1.1.4'}, {'destination': '10.2.0.0/24', 'nexthop': '1.1.1.2'}, {'destination': '10.3.0.0/24', 'nexthop': '1.1.1.1'}, ] self._test_update_routes(ctx, router_id, router, routes) def _test_update_routes(self, ctx, router_id, router, routes): router['routes'] = routes update_request = { 'router': router, } with mock.patch.object(registry, "publish") as mock_cb: with mock.patch.object(self._plugin, '_validate_routes'): updated_router = self._plugin.update_router(ctx, router_id, update_request) mock_cb.assert_called_with('router', events.PRECOMMIT_UPDATE, self._plugin, payload=mock.ANY) self.assertItemsEqual(updated_router['routes'], routes) got_router = self._plugin.get_router(ctx, router_id) self.assertItemsEqual(got_router['routes'], routes) def assertEqualRoutes(self, a, b): """Compare a list of routes without caring for the list order.""" return self.assertSetEqual( set(frozenset(r.items()) for r in a), set(frozenset(r.items()) for r in b)) def test_add_extra_routes(self): self.assertEqual( [], self._plugin._add_extra_routes([], []), ) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] add = [] self.assertEqual(old, self._plugin._add_extra_routes(old, add)) old = [] add = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] self.assertEqual(add, self._plugin._add_extra_routes(old, add)) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] add = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] self.assertEqual(old, self._plugin._add_extra_routes(old, add)) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] add = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.11"}] self.assertEqualRoutes( old + add, self._plugin._add_extra_routes(old, add)) def test_remove_extra_routes(self): old = [] remove = [] self.assertEqual(old, self._plugin._remove_extra_routes(old, remove)) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] remove = [] self.assertEqual(old, self._plugin._remove_extra_routes(old, remove)) old = [] remove = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] self.assertEqual(old, self._plugin._remove_extra_routes(old, remove)) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] remove = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.11"}] self.assertEqual(old, self._plugin._remove_extra_routes(old, remove)) old = [{"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}] remove = old self.assertEqual([], self._plugin._remove_extra_routes(old, remove)) old = [ {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, {"destination": "10.0.11.0/24", "nexthop": "10.0.0.11"}, ] remove = old[1:] self.assertEqual( old[:1], self._plugin._remove_extra_routes(old, remove)) old = [ {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, {"destination": "10.0.10.0/24", "nexthop": "10.0.0.11"}, ] remove = old[1:] self.assertEqual( old[:1], self._plugin._remove_extra_routes(old, remove)) old = [] remove = [ {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, ] self.assertEqual([], self._plugin._remove_extra_routes(old, remove)) old = [ {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, ] remove = [ {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, {"destination": "10.0.10.0/24", "nexthop": "10.0.0.10"}, ] self.assertEqual([], self._plugin._remove_extra_routes(old, remove)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_ipam_backend_mixin.py0000644000175000017500000004137500000000000026567 0ustar00coreycorey00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import exceptions as exc from neutron_lib.exceptions import address_scope as addr_scope_exc from oslo_utils import uuidutils import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import ipam_backend_mixin from neutron.db import portbindings_db from neutron.objects import subnet as subnet_obj from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 class TestIpamBackendMixin(base.BaseTestCase): def setUp(self): super(TestIpamBackendMixin, self).setUp() self.mixin = ipam_backend_mixin.IpamBackendMixin() self.ctx = mock.Mock() self.default_new_ips = (('id-1', '192.168.1.1'), ('id-2', '192.168.1.2')) self.default_original_ips = (('id-1', '192.168.1.1'), ('id-5', '172.20.16.5')) self.owner_non_router = constants.DEVICE_OWNER_DHCP self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF def _prepare_ips(self, ips): results = [] for ip in ips: ip_dict = {'ip_address': ip[1], 'subnet_id': ip[0]} if len(ip) > 2: ip_dict['delete_subnet'] = ip[2] results.append(ip_dict) return results def _mock_slaac_subnet_on(self): slaac_subnet_obj = subnet_obj.Subnet( self.ctx, ipv6_address_mode=constants.IPV6_SLAAC, ipv6_ra_mode=constants.IPV6_SLAAC) self.mixin._get_subnet_object = mock.Mock( return_value=slaac_subnet_obj) def _mock_slaac_subnet_off(self): non_slaac_subnet_obj = subnet_obj.Subnet( self.ctx, ipv6_address_mode=None, ipv6_ra_mode=None) self.mixin._get_subnet_object = mock.Mock( return_value=non_slaac_subnet_obj) def _mock_slaac_for_subnet_ids(self, subnet_ids): """Mock incoming subnets as autoaddressed.""" def _get_subnet_object(context, subnet_id): if subnet_id in subnet_ids: return subnet_obj.Subnet( self.ctx, ipv6_address_mode=constants.IPV6_SLAAC, ipv6_ra_mode=constants.IPV6_SLAAC) else: return subnet_obj.Subnet( self.ctx, ipv6_address_mode=None, ipv6_ra_mode=None) self.mixin._get_subnet_object = mock.Mock( side_effect=_get_subnet_object) def _test_get_changed_ips_for_port(self, expected, original_ips, new_ips, owner): change = self.mixin._get_changed_ips_for_port(self.ctx, original_ips, new_ips, owner) self.assertItemsEqual(expected.add, change.add) self.assertItemsEqual(expected.original, change.original) self.assertItemsEqual(expected.remove, change.remove) def test__get_changed_ips_for_port(self): new_ips = self._prepare_ips(self.default_new_ips) original_ips = self._prepare_ips(self.default_original_ips) expected_change = self.mixin.Changes(add=[new_ips[1]], original=[original_ips[0]], remove=[original_ips[1]]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_router) def test__get_changed_ips_for_port_autoaddress(self): new_ips = self._prepare_ips(self.default_new_ips) original = (('id-1', '192.168.1.1'), ('id-5', '2000:1234:5678::12FF:FE34:5678')) original_ips = self._prepare_ips(original) self._mock_slaac_subnet_on() expected_change = self.mixin.Changes(add=[new_ips[1]], original=original_ips, remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_remove_autoaddress(self): new = (('id-5', '2000:1234:5678::12FF:FE34:5678', True), ('id-1', '192.168.1.1')) new_ips = self._prepare_ips(new) reference_ips = [ip for ip in new_ips if ip['subnet_id'] == 'id-1'] original = (('id-5', '2000:1234:5678::12FF:FE34:5678'),) original_ips = self._prepare_ips(original) # mock ipv6 subnet as auto addressed and leave ipv4 as regular self._mock_slaac_for_subnet_ids([new[0][0]]) # Autoaddressed ip allocation has to be removed # if it has 'delete_subnet' flag set to True expected_change = self.mixin.Changes(add=reference_ips, original=[], remove=original_ips) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_autoaddress_ipv6_pd_enabled(self): owner_not_router = constants.DEVICE_OWNER_DHCP new_ips = self._prepare_ips(self.default_new_ips) original = (('id-1', '192.168.1.1'), ('id-5', '2000:1234:5678::12FF:FE34:5678')) original_ips = self._prepare_ips(original) # mock to test auto address part pd_subnet_obj = subnet_obj.Subnet( self.ctx, id=uuidutils.generate_uuid(), subnetpool_id=constants.IPV6_PD_POOL_ID, ipv6_address_mode=constants.IPV6_SLAAC, ipv6_ra_mode=constants.IPV6_SLAAC) self.mixin._get_subnet_object = mock.Mock(return_value=pd_subnet_obj) # make a copy of original_ips # since it is changed by _get_changed_ips_for_port expected_change = self.mixin.Changes(add=[new_ips[1]], original=[original_ips[0]], remove=[original_ips[1]]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, owner_not_router) def _test_get_changed_ips_for_port_no_ip_address(self): # IP address should be added if only subnet_id is provided, # independently from auto_address status for subnet new_ips = [{'subnet_id': 'id-3'}] original_ips = [] expected_change = self.mixin.Changes(add=[new_ips[0]], original=[], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_no_ip_address_no_slaac(self): self._mock_slaac_subnet_off() self._test_get_changed_ips_for_port_no_ip_address() def test__get_changed_ips_for_port_no_ip_address_slaac(self): self._mock_slaac_subnet_on() self._test_get_changed_ips_for_port_no_ip_address() def test__get_changed_ips_for_port_subnet_id_no_ip(self): # If a subnet is specified without an IP address only allocate a new # address if one doesn't exist self._mock_slaac_subnet_off() new_ips = [{'subnet_id': 'id-3'}] original_ips = [{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}] expected_change = self.mixin.Changes( add=[], original=[{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_multiple_ips_one_subnet_add_third(self): # If a subnet is specified without an IP address only allocate a new # address if one doesn't exist self._mock_slaac_subnet_off() new_ips = [{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}, {'subnet_id': 'id-3'}, {'subnet_id': 'id-3', 'ip_address': '4.3.2.10'}] original_ips = [{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}, {'subnet_id': 'id-3', 'ip_address': '4.3.2.10'}] expected_change = self.mixin.Changes( add=[{'subnet_id': 'id-3'}], original=[{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}, {'subnet_id': 'id-3', 'ip_address': '4.3.2.10'}], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_multiple_ips_one_subnet_noip(self): # If a subnet is specified without an IP address only allocate a new # address if one doesn't exist self._mock_slaac_subnet_off() new_ips = [{'subnet_id': 'id-3'}, {'subnet_id': 'id-3'}] original_ips = [{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}, {'subnet_id': 'id-3', 'ip_address': '4.3.2.10'}] expected_change = self.mixin.Changes( add=[], original=[{'subnet_id': 'id-3', 'ip_address': '4.3.2.1'}, {'subnet_id': 'id-3', 'ip_address': '4.3.2.10'}], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_subnet_id_no_ip_ipv6(self): # If a subnet is specified without an IP address only allocate a new # address if one doesn't exist self._mock_slaac_subnet_off() new_ips = [{'subnet_id': 'id-3'}] original_ips = [{'subnet_id': 'id-3', 'ip_address': '2001:db8::8'}] expected_change = self.mixin.Changes( add=[], original=[{'subnet_id': 'id-3', 'ip_address': '2001:db8::8'}], remove=[]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__get_changed_ips_for_port_subnet_id_no_ip_eui64(self): # If a subnet is specified without an IP address allocate a new address # if the address is eui-64. This supports changing prefix when prefix # delegation is in use. self._mock_slaac_subnet_off() new_ips = [{'subnet_id': 'id-3'}] original_ips = [{'subnet_id': 'id-3', 'ip_address': '2001::eeb1:d7ff:fe2c:9c5f'}] expected_change = self.mixin.Changes( add=[{'subnet_id': 'id-3'}], original=[], remove=[{'subnet_id': 'id-3', 'ip_address': '2001::eeb1:d7ff:fe2c:9c5f'}]) self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) def test__is_ip_required_by_subnet_for_router_port(self): # Owner -> router: # _get_subnet_object should not be called, # expected True self._mock_slaac_subnet_off() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_router) self.assertTrue(result) self.assertFalse(self.mixin._get_subnet_object.called) def test__is_ip_required_by_subnet_for_non_router_port(self): # Owner -> not router: # _get_subnet_object should be called, # expected True, because subnet is not slaac self._mock_slaac_subnet_off() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_non_router) self.assertTrue(result) self.assertTrue(self.mixin._get_subnet_object.called) def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self): # Owner -> not router: # _get_subnet_object should be called, # expected False, because subnet is slaac self._mock_slaac_subnet_on() result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', self.owner_non_router) self.assertFalse(result) self.assertTrue(self.mixin._get_subnet_object.called) def test__validate_network_subnetpools_mismatch_address_scopes(self): address_scope_id = "dummy-scope" subnetpool = mock.MagicMock() address_scope = mock.MagicMock() subnetpool.address_scope.return_value = address_scope_id address_scope.id.return_value = address_scope_id self.assertRaises(addr_scope_exc.NetworkAddressScopeAffinityError, self.mixin._validate_network_subnetpools, mock.MagicMock(), constants.IP_VERSION_4, subnetpool, address_scope) def test__validate_network_subnetpools_subnetpool_mismatch(self): subnet = mock.MagicMock(ip_version=constants.IP_VERSION_4) subnet.subnetpool_id = 'fake-subnetpool' network = mock.MagicMock(subnets=[subnet]) subnetpool = mock.MagicMock(id=uuidutils.generate_uuid()) subnetpool.ip_version = constants.IP_VERSION_4 self.assertRaises(exc.NetworkSubnetPoolAffinityError, self.mixin._validate_network_subnetpools, network, constants.IP_VERSION_4, subnetpool, None) class TestPlugin(db_base_plugin_v2.NeutronDbPluginV2, portbindings_db.PortBindingMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [portbindings.ALIAS] def get_plugin_description(self): return "Test Plugin" @classmethod def get_plugin_type(cls): return "test_plugin" def create_port(self, context, port): port_dict = super(TestPlugin, self).create_port(context, port) self._process_portbindings_create_and_update( context, port['port'], port_dict) return port_dict class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None): if not plugin: plugin = 'neutron.tests.unit.db.test_ipam_backend_mixin.TestPlugin' super(TestPortUpdateIpam, self).setUp(plugin=plugin) def test_port_update_allocate_from_net_subnet(self): """Tests that a port can get address by updating fixed_ips""" with self.network() as network: pass # Create a bound port with no IP address (since there is not subnet) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) port = self.deserialize(self.fmt, response) # Create the subnet and try to update the port to get an IP with self.subnet(network=network) as subnet: data = {'port': { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self.assertEqual(1, len(res['port']['fixed_ips'])) ip = res['port']['fixed_ips'][0]['ip_address'] ip_net = netaddr.IPNetwork(subnet['subnet']['cidr']) self.assertIn(ip, ip_net) class TestPortUpdateIpamML2(TestPortUpdateIpam): def setUp(self): super(TestPortUpdateIpamML2, self).setUp(plugin='ml2') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_ipam_pluggable_backend.py0000644000175000017500000012677000000000000027410 0ustar00coreycorey00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from neutron_lib import constants from neutron_lib import context as ncontext from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import netutils from oslo_utils import uuidutils import webob.exc from neutron.db import ipam_backend_mixin from neutron.db import ipam_pluggable_backend from neutron.ipam import requests as ipam_req from neutron.objects import network as network_obj from neutron.objects import ports as port_obj from neutron.objects import subnet as obj_subnet from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base class UseIpamMixin(object): def setUp(self): cfg.CONF.set_override("ipam_driver", 'internal') super(UseIpamMixin, self).setUp() class TestIpamHTTPResponse(UseIpamMixin, test_db_base.TestV2HTTPResponse): pass class TestIpamPorts(UseIpamMixin, test_db_base.TestPortsV2): pass class TestIpamNetworks(UseIpamMixin, test_db_base.TestNetworksV2): pass class TestIpamSubnets(UseIpamMixin, test_db_base.TestSubnetsV2): pass class TestIpamSubnetPool(UseIpamMixin, test_db_base.TestSubnetPoolsV2): pass class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): def setUp(self): cfg.CONF.set_override("ipam_driver", 'internal') super(TestDbBasePluginIpam, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() self.admin_context = ncontext.get_admin_context() def _prepare_mocks(self, address_factory=None, subnet_factory=None): if address_factory is None: address_factory = ipam_req.AddressRequestFactory if subnet_factory is None: subnet_factory = ipam_req.SubnetRequestFactory mocks = { 'driver': mock.Mock(), 'subnet': mock.Mock(), 'subnets': mock.Mock(), 'port': { 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' }, 'subnet_request': ipam_req.SpecificSubnetRequest( self.tenant_id, self.subnet_id, '10.0.0.0/24', '10.0.0.1', [netaddr.IPRange('10.0.0.2', '10.0.0.254')]), } mocks['driver'].get_subnet.return_value = mocks['subnet'] mocks['driver'].allocate_subnet.return_value = mocks['subnet'] mocks['driver'].get_allocator.return_value = mocks['subnets'] mocks['subnets'].allocate.return_value = ( '127.0.0.1', uuidutils.generate_uuid()) mocks['driver'].get_subnet_request_factory.return_value = ( subnet_factory) mocks['driver'].get_address_request_factory.return_value = ( address_factory) mocks['subnet'].get_details.return_value = mocks['subnet_request'] return mocks def _prepare_ipam(self): mocks = self._prepare_mocks() mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() return mocks def _prepare_mocks_with_pool_mock(self, pool_mock, address_factory=None, subnet_factory=None): mocks = self._prepare_mocks(address_factory=address_factory, subnet_factory=subnet_factory) pool_mock.get_instance.return_value = mocks['driver'] return mocks def _get_allocate_mock(self, subnet_id, auto_ip='10.0.0.2', fail_ip='127.0.0.1', exception=n_exc.InvalidInput( error_message='SomeError')): def allocate_mock(request): if isinstance(request, ipam_req.SpecificAddressRequest): if request.address == netaddr.IPAddress(fail_ip): raise exception else: return str(request.address), subnet_id else: return auto_ip, subnet_id return allocate_mock def _get_deallocate_mock(self, fail_ip='127.0.0.1', exception=n_exc.InvalidInput( error_message='SomeError')): def deallocate_mock(ip): if str(ip) == fail_ip: raise exception return deallocate_mock def _validate_allocate_calls(self, expected_calls, mocks): self.assertTrue(mocks['subnets'].allocate.called) actual_calls = mocks['subnets'].allocate.call_args_list self.assertEqual(len(expected_calls), len(actual_calls)) i = 0 for call in expected_calls: if call['ip_address']: self.assertIsInstance(actual_calls[i][0][0], ipam_req.SpecificAddressRequest) self.assertEqual(netaddr.IPAddress(call['ip_address']), actual_calls[i][0][0].address) else: self.assertIsInstance(actual_calls[i][0][0], ipam_req.AnyAddressRequest) i += 1 def _convert_to_ips(self, data): ips = [{'ip_address': ip, 'subnet_id': data[ip][1], 'subnet_cidr': data[ip][0]} for ip in data] return sorted(ips, key=lambda t: t['subnet_cidr']) def _gen_subnet_id(self): return uuidutils.generate_uuid() def test_deallocate_single_ip(self): mocks = self._prepare_ipam() ip = '192.168.12.45' data = {ip: ['192.168.12.0/24', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'], mock.ANY, ips) mocks['driver'].get_subnet.assert_called_once_with(data[ip][1]) mocks['subnet'].deallocate.assert_called_once_with(ip) def test_deallocate_multiple_ips(self): mocks = self._prepare_ipam() data = {'192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()], '172.23.158.84': ['172.23.128.0/17', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['ipam']._ipam_deallocate_ips(mock.ANY, mocks['driver'], mock.ANY, ips) get_calls = [mock.call(data[ip][1]) for ip in data] mocks['driver'].get_subnet.assert_has_calls(get_calls, any_order=True) ip_calls = [mock.call(ip) for ip in data] mocks['subnet'].deallocate.assert_has_calls(ip_calls, any_order=True) def _single_ip_allocate_helper(self, mocks, ip, network, subnet): ips = [{'subnet_cidr': network, 'subnet_id': subnet}] if ip: ips[0]['ip_address'] = ip allocated_ips = mocks['ipam']._ipam_allocate_ips( mock.ANY, mocks['driver'], mocks['port'], ips) mocks['driver'].get_allocator.assert_called_once_with([subnet]) self.assertTrue(mocks['subnets'].allocate.called) request = mocks['subnets'].allocate.call_args[0][0] return {'ips': allocated_ips, 'request': request} def test_allocate_single_fixed_ip(self): mocks = self._prepare_ipam() ip = '192.168.15.123' subnet_id = self._gen_subnet_id() mocks['subnets'].allocate.return_value = ip, subnet_id results = self._single_ip_allocate_helper(mocks, ip, '192.168.15.0/24', subnet_id) self.assertIsInstance(results['request'], ipam_req.SpecificAddressRequest) self.assertEqual(netaddr.IPAddress(ip), results['request'].address) self.assertEqual(ip, results['ips'][0]['ip_address'], 'Should allocate the same ip as passed') def test_allocate_single_any_ip(self): mocks = self._prepare_ipam() network = '192.168.15.0/24' ip = '192.168.15.83' subnet_id = self._gen_subnet_id() mocks['subnets'].allocate.return_value = ip, subnet_id results = self._single_ip_allocate_helper(mocks, '', network, subnet_id) self.assertIsInstance(results['request'], ipam_req.AnyAddressRequest) self.assertEqual(ip, results['ips'][0]['ip_address']) def test_allocate_eui64_ip(self): mocks = self._prepare_ipam() ip = {'subnet_id': self._gen_subnet_id(), 'subnet_cidr': '2001:470:abcd::/64', 'mac': '6c:62:6d:de:cf:49', 'eui64_address': True} eui64_ip = netutils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'], ip['mac']) mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'], mock.ANY, [ip]) request = mocks['subnets'].allocate.call_args[0][0] self.assertIsInstance(request, ipam_req.AutomaticAddressRequest) self.assertEqual(eui64_ip, request.address) def test_allocate_multiple_eui64_ips(self): mocks = self._prepare_ipam() ips = [{'subnet_id': self._gen_subnet_id(), 'subnet_cidr': '2001:470:abcd::/64', 'mac': '6c:62:6d:de:cf:49', 'eui64_address': True}, {'subnet_id': self._gen_subnet_id(), 'subnet_cidr': '2001:360:abcd::/64', 'mac': '6c:62:6d:de:cf:49', 'eui64_address': True}] mocks['ipam']._ipam_allocate_ips(mock.ANY, mocks['driver'], mock.ANY, ips) eui64_ips = [] request_ips = [] i = 0 requests = mocks['subnets'].allocate.call_args_list for ip in ips: eui64_ip = netutils.get_ipv6_addr_by_EUI64(ip['subnet_cidr'], ip['mac']) self.assertIsInstance(requests[i][0][0], ipam_req.AutomaticAddressRequest) self.assertEqual(eui64_ip, requests[i][0][0].address) request_ips.append(requests[i][0][0].address) eui64_ips.append(eui64_ip) i += 1 self.assertEqual(request_ips, eui64_ips) def test_allocate_multiple_ips(self): mocks = self._prepare_ipam() subnet_id = self._gen_subnet_id() data = {'': ['172.23.128.0/17', subnet_id], '192.168.43.15': ['192.168.43.0/24', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['subnets'].allocate.side_effect = self._get_allocate_mock( subnet_id, auto_ip='172.23.128.94') mocks['ipam']._ipam_allocate_ips( mock.ANY, mocks['driver'], mocks['port'], ips) get_calls = [mock.call([data[ip][1]]) for ip in data] mocks['driver'].get_allocator.assert_has_calls( get_calls, any_order=True) self._validate_allocate_calls(ips, mocks) def _test_allocate_multiple_ips_with_exception(self, exc_on_deallocate=False): mocks = self._prepare_ipam() fail_ip = '192.168.43.15' auto_ip = '172.23.128.94' subnet_id = self._gen_subnet_id() data = {'': ['172.23.128.0/17', subnet_id], fail_ip: ['192.168.43.0/24', self._gen_subnet_id()], '8.8.8.8': ['8.0.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['subnets'].allocate.side_effect = self._get_allocate_mock( subnet_id, auto_ip=auto_ip, fail_ip=fail_ip, exception=db_exc.DBDeadlock()) # Exception should be raised on attempt to allocate second ip. # Revert action should be performed for the already allocated ips, # In this test case only one ip should be deallocated # and original error should be reraised self.assertRaises(db_exc.DBDeadlock, mocks['ipam']._ipam_allocate_ips, mock.ANY, mocks['driver'], mocks['port'], ips) # get_subnet should be called only for the first two networks get_calls = [mock.call([data[ip][1]]) for ip in ['', fail_ip]] mocks['driver'].get_allocator.assert_has_calls( get_calls, any_order=True) # Allocate should be called for the first two ips only self._validate_allocate_calls(ips[:-1], mocks) # Deallocate should be called for the first ip only mocks['subnet'].deallocate.assert_called_once_with(auto_ip) def test_allocate_multiple_ips_with_exception(self): self._test_allocate_multiple_ips_with_exception() def test_allocate_multiple_ips_with_exception_on_rollback(self): # Validate that original exception is not replaced with one raised on # rollback (during deallocate) self._test_allocate_multiple_ips_with_exception(exc_on_deallocate=True) def test_deallocate_multiple_ips_with_exception(self): mocks = self._prepare_ipam() fail_ip = '192.168.43.15' data = {fail_ip: ['192.168.43.0/24', self._gen_subnet_id()], '0.10.8.8': ['0.10.0.0/8', self._gen_subnet_id()]} ips = self._convert_to_ips(data) mocks['subnet'].deallocate.side_effect = self._get_deallocate_mock( fail_ip=fail_ip, exception=db_exc.DBDeadlock()) mocks['subnet'].allocate.side_effect = ValueError('Some-error') # Validate that exception from deallocate (DBDeadlock) is not replaced # by exception from allocate (ValueError) in rollback block, # so original exception is not changed self.assertRaises(db_exc.DBDeadlock, mocks['ipam']._ipam_deallocate_ips, mock.ANY, mocks['driver'], mock.ANY, ips) mocks['subnets'].allocate.assert_called_once_with(mock.ANY) def test_test_fixed_ips_for_port_pd_gateway(self): context = mock.Mock() pluggable_backend = ipam_pluggable_backend.IpamPluggableBackend() with self.subnet(cidr=constants.PROVISIONAL_IPV6_PD_PREFIX, ip_version=constants.IP_VERSION_6) as subnet: subnet = subnet['subnet'] fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': '::1'}] filtered_ips = (pluggable_backend. _test_fixed_ips_for_port(context, subnet['network_id'], fixed_ips, constants.DEVICE_OWNER_ROUTER_INTF, [subnet])) # Assert that ports created on prefix delegation subnets # will be returned without an ip address. This prevents router # interfaces being given the ::1 gateway address. self.assertEqual(1, len(filtered_ips)) self.assertEqual(subnet['id'], filtered_ips[0]['subnet_id']) self.assertNotIn('ip_address', filtered_ips[0]) @mock.patch('neutron.ipam.driver.Pool') def test_create_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cidr = '192.168.0.0/24' allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}] with self.subnet(allocation_pools=allocation_pools, cidr=cidr): pool_mock.get_instance.assert_called_once_with(None, mock.ANY) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) @mock.patch('neutron.ipam.driver.Pool') def test_create_ipv6_pd_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cfg.CONF.set_override('ipv6_pd_enabled', True) cidr = constants.PROVISIONAL_IPV6_PD_PREFIX cidr_network = netaddr.IPNetwork(cidr) allocation_pools = [netaddr.IPRange(cidr_network.ip + 1, cidr_network.last)] with self.subnet(cidr=None, ip_version=constants.IP_VERSION_6, subnetpool_id=constants.IPV6_PD_POOL_ID, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC): self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) self.assertEqual(allocation_pools, request.allocation_pools) @mock.patch('neutron.ipam.driver.Pool') def test_create_subnet_over_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['driver'].allocate_subnet.side_effect = ValueError cidr = '10.0.2.0/24' with self.network() as network: self._create_subnet(self.fmt, network['network']['id'], cidr, expected_res_status=500) pool_mock.get_instance.assert_called_once_with(None, mock.ANY) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) # Verify no subnet was created for network req = self.new_show_request('networks', network['network']['id']) res = req.get_response(self.api) net = self.deserialize(self.fmt, res) self.assertEqual(0, len(net['network']['subnets'])) def _test_rollback_on_subnet_creation(self, pool_mock, driver_mocks): cidr = '10.0.2.0/24' with mock.patch.object( ipam_backend_mixin.IpamBackendMixin, '_save_subnet', side_effect=ValueError), self.network() as network: self._create_subnet(self.fmt, network['network']['id'], cidr, expected_res_status=500) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(driver_mocks['driver'].allocate_subnet.called) request = driver_mocks['driver'].allocate_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) # Verify remove ipam subnet was called driver_mocks['driver'].remove_subnet.assert_called_once_with( self.subnet_id) @mock.patch('neutron.ipam.driver.Pool') def test_ipam_subnet_deallocated_if_create_fails(self, pool_mock): driver_mocks = self._prepare_mocks_with_pool_mock(pool_mock) self._test_rollback_on_subnet_creation(pool_mock, driver_mocks) @mock.patch('neutron.ipam.driver.Pool') def test_ipam_subnet_create_and_rollback_fails(self, pool_mock): driver_mocks = self._prepare_mocks_with_pool_mock(pool_mock) # remove_subnet is called on rollback stage and n_exc.NotFound # typically produces 404 error. Validate that exception from # rollback stage is silenced and main exception (ValueError in this # case) is reraised. So resulting http status should be 500. driver_mocks['driver'].remove_subnet.side_effect = n_exc.NotFound self._test_rollback_on_subnet_creation(pool_mock, driver_mocks) @mock.patch('neutron.ipam.driver.Pool') def test_update_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] with self.subnet(allocation_pools=allocation_pools, cidr=cidr) as subnet: data = {'subnet': {'allocation_pools': [ {'start': '10.0.0.10', 'end': '10.0.0.20'}, {'start': '10.0.0.30', 'end': '10.0.0.40'}]}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) self.assertTrue(mocks['driver'].update_subnet.called) request = mocks['driver'].update_subnet.call_args[0][0] self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(netaddr.IPNetwork(cidr), request.subnet_cidr) ip_ranges = [netaddr.IPRange(p['start'], p['end']) for p in data['subnet']['allocation_pools']] self.assertEqual(ip_ranges, request.allocation_pools) @mock.patch('neutron.ipam.driver.Pool') def test_delete_subnet_over_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) mocks['driver'].remove_subnet.assert_called_once_with( subnet['subnet']['id']) @mock.patch('neutron.ipam.driver.Pool') def test_delete_subnet_over_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['driver'].remove_subnet.side_effect = ValueError gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr, ip_version=constants.IP_VERSION_4) req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPServerError.code, res.status_int) pool_mock.get_instance.assert_any_call(None, mock.ANY) self.assertEqual(2, pool_mock.get_instance.call_count) mocks['driver'].remove_subnet.assert_called_once_with( subnet['subnet']['id']) # Verify subnet was recreated after failed ipam call subnet_req = self.new_show_request('subnets', subnet['subnet']['id']) raw_res = subnet_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertIn(sub_res['subnet']['cidr'], cidr) self.assertIn(sub_res['subnet']['gateway_ip'], gateway_ip) @mock.patch('neutron.ipam.driver.Pool') def test_create_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' expected_calls = [{'ip_address': ''}] with self.subnet() as subnet: mocks['subnets'].allocate.side_effect = self._get_allocate_mock( subnet['subnet']['id'], auto_ip=auto_ip) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['ip_address'], auto_ip) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._validate_allocate_calls(expected_calls, mocks) @mock.patch('neutron.ipam.driver.Pool') def test_create_port_ipam_with_rollback(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['subnet'].allocate.side_effect = ValueError with self.network() as network: with self.subnet(network=network): net_id = network['network']['id'] data = { 'port': {'network_id': net_id, 'tenant_id': network['network']['tenant_id']}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPServerError.code, res.status_int) # verify no port left after failure req = self.new_list_request('ports', self.fmt, "network_id=%s" % net_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(0, len(res['ports'])) @mock.patch('neutron.ipam.driver.Pool') def test_update_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' new_ip = '10.0.0.15' expected_calls = [{'ip_address': ip} for ip in ['', new_ip]] with self.subnet() as subnet: mocks['subnets'].allocate.side_effect = self._get_allocate_mock( subnet['subnet']['id'], auto_ip=auto_ip) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(auto_ip, ips[0]['ip_address']) # Update port with another new ip data = {"port": {"fixed_ips": [{ 'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(new_ip, ips[0]['ip_address']) # Allocate should be called for the first two networks self._validate_allocate_calls(expected_calls, mocks) # Deallocate should be called for the first ip only mocks['subnet'].deallocate.assert_called_once_with(auto_ip) @mock.patch('neutron.ipam.driver.Pool') def test_delete_port_ipam(self, pool_mock): mocks = self._prepare_mocks_with_pool_mock(pool_mock) auto_ip = '10.0.0.2' with self.subnet() as subnet: mocks['subnets'].allocate.side_effect = self._get_allocate_mock( subnet['subnet']['id'], auto_ip=auto_ip) with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(auto_ip, ips[0]['ip_address']) req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) mocks['subnet'].deallocate.assert_called_once_with(auto_ip) def test_recreate_port_ipam(self): with self.subnet() as subnet: subnet_cidr = subnet['subnet']['cidr'] with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) orig_ip = ips[0]['ip_address'] self.assertIn(netaddr.IPAddress(ips[0]['ip_address']), netaddr.IPSet(netaddr.IPNetwork(subnet_cidr))) req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) with self.port(subnet=subnet, fixed_ips=ips) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(orig_ip, ips[0]['ip_address']) def test_recreate_port_ipam_specific_ip(self): with self.subnet() as subnet: ip = '10.0.0.2' fixed_ip_data = [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ip, ips[0]['ip_address']) req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) with self.port(subnet=subnet, fixed_ips=ips) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ip, ips[0]['ip_address']) @mock.patch('neutron.ipam.driver.Pool') def test_update_ips_for_port_passes_port_dict_to_factory(self, pool_mock): address_factory = mock.Mock() mocks = self._prepare_mocks_with_pool_mock( pool_mock, address_factory=address_factory) context = mock.Mock() new_ips = mock.Mock() original_ips = mock.Mock() mac = mock.Mock() ip_dict = {'ip_address': '192.1.1.10', 'subnet_id': uuidutils.generate_uuid()} changes = ipam_pluggable_backend.IpamPluggableBackend.Changes( add=[ip_dict], original=[], remove=[]) changes_mock = mock.Mock(return_value=changes) fixed_ips_mock = mock.Mock(return_value=changes.add) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam']._get_changed_ips_for_port = changes_mock mocks['ipam']._ipam_get_subnets = mock.Mock(return_value=[]) mocks['ipam']._test_fixed_ips_for_port = fixed_ips_mock mocks['ipam']._update_ips_for_pd_subnet = mock.Mock(return_value=[]) port_dict = {'device_owner': uuidutils.generate_uuid(), 'network_id': uuidutils.generate_uuid()} mocks['ipam']._update_ips_for_port(context, port_dict, None, original_ips, new_ips, mac) mocks['driver'].get_address_request_factory.assert_called_once_with() mocks['ipam']._ipam_get_subnets.assert_called_once_with( context, network_id=port_dict['network_id'], fixed_configured=True, fixed_ips=[ip_dict], host=None, service_type=port_dict['device_owner']) # Validate port_dict is passed into address_factory address_factory.get_request.assert_called_once_with(context, port_dict, ip_dict) @mock.patch('neutron.ipam.driver.Pool') def test_update_ips_for_port_passes_port_id_to_factory(self, pool_mock): port_id = uuidutils.generate_uuid() network_id = uuidutils.generate_uuid() address_factory = mock.Mock() mocks = self._prepare_mocks_with_pool_mock( pool_mock, address_factory=address_factory) context = mock.Mock() ip_dict = {'ip_address': '192.1.1.10', 'subnet_id': uuidutils.generate_uuid()} port_dict = {'port': {'device_owner': uuidutils.generate_uuid(), 'network_id': network_id, 'fixed_ips': [ip_dict]}} subnets = [{'id': ip_dict['subnet_id'], 'network_id': network_id, 'cidr': '192.1.1.0/24', 'ip_version': constants.IP_VERSION_4, 'ipv6_address_mode': None, 'ipv6_ra_mode': None}] get_subnets_mock = mock.Mock(return_value=subnets) get_subnet_mock = mock.Mock(return_value=subnets[0]) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam']._ipam_get_subnets = get_subnets_mock mocks['ipam']._get_subnet = get_subnet_mock with mock.patch.object(port_obj.IPAllocation, 'create'): mocks['ipam'].allocate_ips_for_port_and_store(context, port_dict, port_id) mocks['driver'].get_address_request_factory.assert_called_once_with() port_dict_with_id = port_dict['port'].copy() port_dict_with_id['id'] = port_id # Validate port id is added to port dict before address_factory call ip_dict.pop('device_owner') address_factory.get_request.assert_called_once_with(context, port_dict_with_id, ip_dict) # Verify incoming port dict is not changed ('id' is not added to it) self.assertIsNone(port_dict['port'].get('id')) def _test_update_db_subnet(self, pool_mock, subnet, expected_subnet, old_pools): subnet_factory = mock.Mock() context = self.admin_context if 'cidr' in subnet: subnet['cidr'] = netaddr.IPNetwork(subnet['cidr']) if 'cidr' in expected_subnet: expected_subnet['cidr'] = netaddr.IPNetwork( expected_subnet['cidr']) mocks = self._prepare_mocks_with_pool_mock( pool_mock, subnet_factory=subnet_factory) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() mocks['ipam'].update_db_subnet( context, subnet['id'], subnet, old_pools) mocks['driver'].get_subnet_request_factory.assert_called_once_with() subnet_factory.get_request.assert_called_once_with(context, expected_subnet, None) @mock.patch('neutron.ipam.driver.Pool') def test_update_db_subnet_unchanged_pools(self, pool_mock): old_pools = [{'start': '192.1.1.2', 'end': '192.1.1.254'}] context = self.admin_context network_id = uuidutils.generate_uuid() network_obj.Network(context, id=network_id).create() subnet = {'id': uuidutils.generate_uuid(), 'ip_version': constants.IP_VERSION_4, 'cidr': netaddr.IPNetwork('192.1.1.0/24'), 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'network_id': network_id} subnet_with_pools = subnet.copy() subnet_obj = obj_subnet.Subnet(context, **subnet_with_pools) subnet_obj.create() subnet_with_pools['allocation_pools'] = old_pools # if subnet has no allocation pools set, then old pools has to # be added to subnet dict passed to request factory self._test_update_db_subnet(pool_mock, subnet, subnet_with_pools, old_pools) @mock.patch('neutron.ipam.driver.Pool') def test_update_db_subnet_new_pools(self, pool_mock): old_pools = [{'start': '192.1.1.2', 'end': '192.1.1.254'}] context = self.admin_context network_id = uuidutils.generate_uuid() network_obj.Network(context, id=network_id).create() subnet = {'id': uuidutils.generate_uuid(), 'ip_version': constants.IP_VERSION_4, 'cidr': netaddr.IPNetwork('192.1.1.0/24'), 'ipv6_address_mode': None, 'ipv6_ra_mode': None, 'network_id': network_id} # make a copy of subnet for validation, since update_subnet changes # incoming subnet dict expected_subnet = subnet.copy() subnet_obj = obj_subnet.Subnet(context, **subnet) subnet_obj.create() subnet['allocation_pools'] = [ netaddr.IPRange('192.1.1.10', '192.1.1.254')] expected_subnet = subnet.copy() obj_subnet.IPAllocationPool(context, subnet_id=subnet['id'], start='192.1.1.10', end='192.1.1.254').create() # validate that subnet passed to request factory is the same as # incoming one, i.e. new pools in it are not overwritten by old pools self._test_update_db_subnet(pool_mock, subnet, expected_subnet, old_pools) @mock.patch('neutron.ipam.driver.Pool') def test_update_db_subnet_new_pools_exception(self, pool_mock): context = mock.Mock() mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() new_port = {'fixed_ips': [{'ip_address': '192.168.1.20', 'subnet_id': uuidutils.generate_uuid()}, {'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}]} db_port = port_obj.Port(context, id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) old_port = {'fixed_ips': [{'ip_address': '192.168.1.10', 'subnet_id': uuidutils.generate_uuid()}, {'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}]} changes = mocks['ipam'].Changes( add=[{'ip_address': '192.168.1.20', 'subnet_id': uuidutils.generate_uuid()}], original=[{'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}], remove=[{'ip_address': '192.168.1.10', 'subnet_id': uuidutils.generate_uuid()}]) mocks['ipam']._delete_ip_allocation = mock.Mock() mocks['ipam']._make_port_dict = mock.Mock(return_value=old_port) mocks['ipam']._update_ips_for_port = mock.Mock(return_value=changes) mocks['ipam']._update_db_port = mock.Mock( side_effect=db_exc.DBDeadlock) # emulate raising exception on rollback actions mocks['ipam']._ipam_deallocate_ips = mock.Mock(side_effect=ValueError) mocks['ipam']._ipam_allocate_ips = mock.Mock(side_effect=ValueError) # Validate original exception (DBDeadlock) is not overridden by # exception raised on rollback (ValueError) with mock.patch.object(port_obj.IPAllocation, 'create'): self.assertRaises(db_exc.DBDeadlock, mocks['ipam'].update_port_with_ips, context, None, db_port, new_port, mock.Mock()) mocks['ipam']._ipam_deallocate_ips.assert_called_once_with( context, mocks['driver'], db_port, changes.add, revert_on_fail=False) mocks['ipam']._ipam_allocate_ips.assert_called_once_with( context, mocks['driver'], db_port, changes.remove, revert_on_fail=False) class TestRollback(test_db_base.NeutronDbPluginV2TestCase): def setUp(self): cfg.CONF.set_override('ipam_driver', 'internal') super(TestRollback, self).setUp() def test_ipam_rollback_not_broken_on_session_rollback(self): """Triggers an error that calls rollback on session.""" with self.network() as net: with self.subnet(network=net, cidr='10.0.1.0/24') as subnet1: with self.subnet(network=net, cidr='10.0.2.0/24') as subnet2: pass # If this test fails and this method appears in the server side stack # trace then IPAM rollback was likely tried using a session which had # already been rolled back by the DB exception. def rollback(func, *args, **kwargs): func(*args, **kwargs) # Ensure DBDuplicate exception is raised in the context where IPAM # rollback is triggered. It "breaks" the session because it triggers DB # rollback. Inserting a flush in _store_ip_allocation does this. orig = ipam_pluggable_backend.IpamPluggableBackend._store_ip_allocation def store(context, ip_address, *args, **kwargs): try: return orig(context, ip_address, *args, **kwargs) finally: context.session.flush() # Create a port to conflict with later. Simulates a race for addresses. result = self._create_port( self.fmt, net_id=net['network']['id'], fixed_ips=[{'subnet_id': subnet1['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]) port = self.deserialize(self.fmt, result) fixed_ips = port['port']['fixed_ips'] # Hands out the same 2nd IP to create conflict and trigger rollback ips = [{'subnet_id': fixed_ips[0]['subnet_id'], 'ip_address': fixed_ips[0]['ip_address']}, {'subnet_id': fixed_ips[1]['subnet_id'], 'ip_address': fixed_ips[1]['ip_address']}] def alloc(*args, **kwargs): def increment_address(a): a['ip_address'] = str(netaddr.IPAddress(a['ip_address']) + 1) # Increment 1st address to return a free address on the first call increment_address(ips[0]) try: return copy.deepcopy(ips) finally: # Increment 2nd address to return free address on the 2nd call increment_address(ips[1]) Backend = ipam_pluggable_backend.IpamPluggableBackend with mock.patch.object(Backend, '_store_ip_allocation', wraps=store),\ mock.patch.object(Backend, '_safe_rollback', wraps=rollback),\ mock.patch.object(Backend, '_allocate_ips_for_port', wraps=alloc): # Create port with two addresses. The wrapper lets one succeed # then simulates race for the second to trigger IPAM rollback. response = self._create_port( self.fmt, net_id=net['network']['id'], fixed_ips=[{'subnet_id': subnet1['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]) # When all goes well, retry kicks in and the operation is successful. self.assertEqual(webob.exc.HTTPCreated.code, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_l3_db.py0000644000175000017500000005756600000000000023762 0ustar00coreycorey00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_utils import uuidutils import testtools from neutron.db import l3_db from neutron.db.models import l3 as l3_models from neutron.objects import base as base_obj from neutron.objects import network as network_obj from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj from neutron.objects import subnet as subnet_obj from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 class TestL3_NAT_dbonly_mixin(base.BaseTestCase): def setUp(self): super(TestL3_NAT_dbonly_mixin, self).setUp() self.db = l3_db.L3_NAT_dbonly_mixin() def test__each_port_having_fixed_ips_none(self): """Be sure the method returns an empty list when None is passed""" filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(None) self.assertEqual([], list(filtered)) def test__new__passes_args(self): class T(l3_db.L3_NAT_db_mixin): def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs t = T(1, 2, a=3) self.assertEqual((1, 2), t.args) self.assertEqual({'a': 3}, t.kwargs) def test__each_port_having_fixed_ips(self): """Basic test that ports without fixed ips are filtered out""" ports = [{'id': 'a', 'fixed_ips': [mock.sentinel.fixedip]}, {'id': 'b'}] filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(ports) ids = [p['id'] for p in filtered] self.assertEqual(['a'], ids) def test__get_subnets_by_network_no_query(self): """Basic test that no query is performed if no Ports are passed""" context = mock.Mock() with mock.patch.object(directory, 'get_plugin') as get_p: self.db._get_subnets_by_network_list(context, []) self.assertFalse(context.session.query.called) self.assertFalse(get_p.called) def test__get_subnets_by_network(self): """Basic test that the right query is called""" context = mock.MagicMock() query = context.session.query().outerjoin().filter() query.__iter__.return_value = [(mock.sentinel.subnet_db, mock.sentinel.address_scope_id)] with mock.patch.object(directory, 'get_plugin') as get_p: get_p()._make_subnet_dict.return_value = { 'network_id': mock.sentinel.network_id} subnets = self.db._get_subnets_by_network_list( context, [mock.sentinel.network_id]) self.assertEqual({ mock.sentinel.network_id: [{ 'address_scope_id': mock.sentinel.address_scope_id, 'network_id': mock.sentinel.network_id}]}, subnets) def test__get_mtus_by_network_list(self): """Basic test that the query get_networks is correctly""" network = {'id': mock.sentinel.network_id, 'name': mock.sentinel.name, 'mtu': mock.sentinel.mtu} with mock.patch.object(directory, 'get_plugin') as get_p: get_p().get_networks.return_value = [network] result = self.db._get_mtus_by_network_list( mock.sentinel.context, [mock.sentinel.network_id]) get_p().get_networks.assert_called_once_with( mock.sentinel.context, filters={'id': [mock.sentinel.network_id]}, fields=['id', 'mtu']) self.assertEqual({mock.sentinel.network_id: mock.sentinel.mtu}, result) def test__populate_ports_for_subnets_none(self): """Basic test that the method runs correctly with no ports""" ports = [] with mock.patch.object(directory, 'get_plugin') as get_p: get_p().get_networks.return_value = [] self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context, ports) self.assertEqual([], ports) @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_subnets_by_network_list') def test__populate_ports_for_subnets(self, get_subnets_by_network): cidr = "2001:db8::/64" subnet = {'id': mock.sentinel.subnet_id, 'cidr': cidr, 'gateway_ip': mock.sentinel.gateway_ip, 'dns_nameservers': mock.sentinel.dns_nameservers, 'ipv6_ra_mode': mock.sentinel.ipv6_ra_mode, 'subnetpool_id': mock.sentinel.subnetpool_id, 'address_scope_id': mock.sentinel.address_scope_id} get_subnets_by_network.return_value = {'net_id': [subnet]} ports = [{'network_id': 'net_id', 'id': 'port_id', 'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}] with mock.patch.object(directory, 'get_plugin') as get_p: get_p().get_networks.return_value = [{'id': 'net_id', 'mtu': 1446}] self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context, ports) keys = ('id', 'cidr', 'gateway_ip', 'ipv6_ra_mode', 'subnetpool_id', 'dns_nameservers') address_scopes = {4: None, 6: mock.sentinel.address_scope_id} self.assertEqual([{'extra_subnets': [], 'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id, 'prefixlen': 64}], 'id': 'port_id', 'mtu': 1446, 'network_id': 'net_id', 'subnets': [{k: subnet[k] for k in keys}], 'address_scopes': address_scopes}], ports) def test__get_sync_floating_ips_no_query(self): """Basic test that no query is performed if no router ids are passed""" db = l3_db.L3_NAT_dbonly_mixin() context = mock.Mock() db._get_sync_floating_ips(context, []) self.assertFalse(context.session.query.called) @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_floatingip_dict') def test__make_floatingip_dict_with_scope(self, make_fip_dict): db = l3_db.L3_NAT_dbonly_mixin() make_fip_dict.return_value = {'id': mock.sentinel.fip_ip} result = db._make_floatingip_dict_with_scope( mock.sentinel.floating_ip_db, mock.sentinel.address_scope_id) self.assertEqual({ 'fixed_ip_address_scope': mock.sentinel.address_scope_id, 'id': mock.sentinel.fip_ip}, result) def test__unique_floatingip_iterator(self): context = mock.MagicMock() query = mock.MagicMock() query.order_by().__iter__.return_value = [ ({'id': 'id1'}, 'scope1'), ({'id': 'id1'}, 'scope1'), ({'id': 'id2'}, 'scope2'), ({'id': 'id2'}, 'scope2'), ({'id': 'id2'}, 'scope2'), ({'id': 'id3'}, 'scope3')] query.reset_mock() with mock.patch.object( l3_obj.FloatingIP, '_load_object', side_effect=({'id': 'id1'}, {'id': 'id2'}, {'id': 'id3'})): result = list( l3_obj.FloatingIP._unique_floatingip_iterator(context, query)) query.order_by.assert_called_once_with(l3_models.FloatingIP.id) self.assertEqual([({'id': 'id1'}, 'scope1'), ({'id': 'id2'}, 'scope2'), ({'id': 'id3'}, 'scope3')], result) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_deletion_port_not_found(self, gp): # port not found doesn't prevent gp.return_value.get_port.side_effect = n_exc.PortNotFound(port_id='1') self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_device_owner_not_router(self, gp): # ignores other device owners gp.return_value.get_port.return_value = {'device_owner': 'cat'} self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_no_fixed_ips(self, gp): # without fixed IPs is allowed gp.return_value.get_port.return_value = { 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'fixed_ips': [], 'id': 'f' } self.db.prevent_l3_port_deletion(None, None) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_no_router(self, gp): # without router is allowed gp.return_value.get_port.return_value = { 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'device_id': '44', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() self.db.get_router.side_effect = l3_exc.RouterNotFound(router_id='44') self.db.prevent_l3_port_deletion(mock.Mock(), None) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_existing_router(self, gp): gp.return_value.get_port.return_value = { 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'device_id': 'some_router', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() with testtools.ExpectedException(n_exc.ServicePortInUse): self.db.prevent_l3_port_deletion(mock.Mock(), None) @mock.patch.object(directory, 'get_plugin') def test_prevent_l3_port_existing_floating_ip(self, gp): ctx = context.get_admin_context() gp.return_value.get_port.return_value = { 'device_owner': n_const.DEVICE_OWNER_FLOATINGIP, 'device_id': 'some_flip', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} with mock.patch.object(l3_obj.FloatingIP, 'objects_exist', return_value=mock.Mock()),\ testtools.ExpectedException(n_exc.ServicePortInUse): self.db.prevent_l3_port_deletion(ctx, None) @mock.patch.object(directory, 'get_plugin') def test_subscribe_address_scope_of_subnetpool(self, gp): l3_db.L3RpcNotifierMixin() registry.publish(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, mock.ANY, payload=events.DBEventPayload( mock.MagicMock(), resource_id='fake_id')) self.assertTrue(gp.return_value.notify_routers_updated.called) def test__check_and_get_fip_assoc_with_extra_association_no_change(self): fip = {'extra_key': 'value'} context = mock.MagicMock() floatingip_obj = l3_obj.FloatingIP( context, id=uuidutils.generate_uuid(), floating_network_id=uuidutils.generate_uuid(), floating_ip_address=netaddr.IPAddress('8.8.8.8'), fixed_port_id=uuidutils.generate_uuid(), floating_port_id=uuidutils.generate_uuid()) with mock.patch.object( l3_db.L3_NAT_dbonly_mixin, '_get_assoc_data', return_value=('1', '2', '3')) as mock_get_assoc_data: self.db._check_and_get_fip_assoc(context, fip, floatingip_obj) context.session.query.assert_not_called() mock_get_assoc_data.assert_called_once_with( mock.ANY, fip, floatingip_obj) def test__notify_attaching_interface(self): with mock.patch.object(l3_db.registry, 'notify') as mock_notify: context = mock.MagicMock() router_id = 'router_id' net_id = 'net_id' router_db = mock.Mock() router_db.id = router_id port = {'network_id': net_id} intf = {} self.db._notify_attaching_interface(context, router_db, port, intf) kwargs = {'context': context, 'router_id': router_id, 'network_id': net_id, 'interface_info': intf, 'router_db': router_db, 'port': port} mock_notify.assert_called_once_with( resources.ROUTER_INTERFACE, events.BEFORE_CREATE, self.db, **kwargs) def test__create_gw_port(self): router_id = '2afb8434-7380-43a2-913f-ba3a5ad5f349' router = l3_models.Router(id=router_id) new_network_id = 'net-id' ext_ips = [{'subnet_id': 'subnet-id', 'ip_address': '1.1.1.1'}] gw_port = {'fixed_ips': [{'subnet_id': 'subnet-id', 'ip_address': '1.1.1.1'}], 'id': '8742d007-6f05-4b7e-abdb-11818f608959'} ctx = context.get_admin_context() with mock.patch.object(directory, 'get_plugin') as get_p, \ mock.patch.object(get_p(), 'get_subnets_by_network', return_value=mock.ANY), \ mock.patch.object(get_p(), '_get_port', return_value=gw_port), \ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_check_for_dup_router_subnets') as cfdrs,\ mock.patch.object(plugin_utils, 'create_port', return_value=gw_port), \ mock.patch.object(ctx.session, 'add'), \ mock.patch.object(base_obj.NeutronDbObject, 'create'), \ mock.patch.object(l3_db.registry, 'publish') as mock_notify: self.db._create_gw_port(ctx, router_id=router_id, router=router, new_network_id=new_network_id, ext_ips=ext_ips) expected_gw_ips = ['1.1.1.1'] self.assertTrue(cfdrs.called) mock_notify.assert_called_with( resources.ROUTER_GATEWAY, events.AFTER_CREATE, self.db._create_gw_port, payload=mock.ANY) cb_payload = mock_notify.mock_calls[1][2]['payload'] self.assertEqual(ctx, cb_payload.context) self.assertEqual(expected_gw_ips, cb_payload.metadata.get('gateway_ips')) self.assertEqual(new_network_id, cb_payload.metadata.get('network_id')) self.assertEqual(router_id, cb_payload.resource_id) class L3_NAT_db_mixin(base.BaseTestCase): def setUp(self): super(L3_NAT_db_mixin, self).setUp() self.db = l3_db.L3_NAT_db_mixin() def _test_create_router(self, external_gateway_info=None): router_db = l3_models.Router(id='123') router_dict = {'id': '123', 'tenant_id': '456', 'external_gateway_info': external_gateway_info} # Need to use a copy here as the create_router method pops the gateway # information router_input = {'router': router_dict.copy()} with mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_create_router_db', return_value=router_db) as crd,\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', return_value=router_dict),\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_update_router_gw_info') as urgi,\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_router', return_value=router_db),\ mock.patch.object(l3_db.L3_NAT_db_mixin, 'notify_router_updated') as nru: self.db.create_router(mock.Mock(), router_input) self.assertTrue(crd.called) if external_gateway_info: self.assertTrue(urgi.called) self.assertTrue(nru.called) else: self.assertFalse(urgi.called) self.assertFalse(nru.called) def test_create_router_no_gateway(self): self._test_create_router() def test_create_router_gateway(self): ext_gateway_info = {'network_id': 'net-id', 'enable_snat': True, 'external_fixed_ips': [ {'subnet_id': 'subnet-id', 'ip_address': 'ip'}]} self._test_create_router(ext_gateway_info) def test_add_router_interface_no_interface_info(self): router_db = l3_models.Router(id='123') with mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_router', return_value=router_db): self.assertRaises( n_exc.BadRequest, self.db.add_router_interface, mock.Mock(), router_db.id) class FakeL3Plugin(l3_db.L3_NAT_dbonly_mixin): pass class L3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): GET_PORTS_BY_ROUTER_MSG = ( 'The following ports, assigned to router %(router_id)s, do not have a ' '"routerport" register: %(port_ids)s') def setUp(self, *args, **kwargs): super(L3TestCase, self).setUp(plugin='ml2') self.core_plugin = directory.get_plugin() self.ctx = context.get_admin_context() self.mixin = FakeL3Plugin() directory.add_plugin(plugin_constants.L3, self.mixin) self.network = self.create_network() self.subnets = [] self.subnets.append(self.create_subnet(self.network, '1.1.1.1', '1.1.1.0/24')) self.subnets.append(self.create_subnet(self.network, '1.1.2.1', '1.1.2.0/24')) router = {'router': {'name': 'foo_router', 'admin_state_up': True, 'tenant_id': 'foo_tenant'}} self.router = self.create_router(router) self.ports = [] for subnet in self.subnets: ipa = str(netaddr.IPNetwork(subnet['subnet']['cidr']).ip + 10) fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': ipa}] self.ports.append(self.create_port( self.network['network']['id'], {'fixed_ips': fixed_ips})) self.addCleanup(self._clean_objs) def _clean_objs(self): port_obj.Port.delete_objects( self.ctx, network_id=self.network['network']['id']) subnet_obj.Subnet.delete_objects( self.ctx, network_id=self.network['network']['id']) network_obj.Network.get_object( self.ctx, id=self.network['network']['id']).delete() l3_obj.Router.get_object(self.ctx, id=self.router['id']).delete() def create_router(self, router): with self.ctx.session.begin(subtransactions=True): return self.mixin.create_router(self.ctx, router) def create_port(self, net_id, port_info): with self.ctx.session.begin(subtransactions=True): return self._make_port(self.fmt, net_id, **port_info) def create_network(self, name=None, **kwargs): name = name or 'network1' with self.ctx.session.begin(subtransactions=True): return self._make_network(self.fmt, name, True, **kwargs) def create_subnet(self, network, gateway, cidr, **kwargs): with self.ctx.session.begin(subtransactions=True): return self._make_subnet(self.fmt, network, gateway, cidr, **kwargs) def _add_router_interfaces(self): return [self.mixin.add_router_interface( self.ctx, self.router['id'], interface_info={'port_id': port['port']['id']}) for port in self.ports] def _check_routerports(self, ri_statuses): port_ids = [] for idx, ri_status in enumerate(ri_statuses): rp_obj = l3_obj.RouterPort.get_object( self.ctx, port_id=self.ports[idx]['port']['id'], router_id=self.router['id']) if ri_status: self.assertEqual(self.ports[idx]['port']['id'], rp_obj.port_id) port_ids.append(rp_obj.port_id) else: self.assertIsNone(rp_obj) _router_obj = l3_obj.Router.get_object(self.ctx, id=self.router['id']) router_port_ids = [rp.port_id for rp in _router_obj.db_obj.attached_ports] self.assertEqual(sorted(port_ids), sorted(router_port_ids)) @mock.patch.object(port_obj, 'LOG') def test_remove_router_interface_by_port(self, mock_log): self._add_router_interfaces() self._check_routerports((True, True)) interface_info = {'port_id': self.ports[0]['port']['id']} self.mixin.remove_router_interface(self.ctx, self.router['id'], interface_info) mock_log.warning.assert_not_called() self._check_routerports((False, True)) @mock.patch.object(port_obj, 'LOG') def test_remove_router_interface_by_port_removed_rport(self, mock_log): self._add_router_interfaces() self._check_routerports((True, True)) rp_obj = l3_obj.RouterPort.get_object( self.ctx, router_id=self.router['id'], port_id=self.ports[0]['port']['id']) rp_obj.delete() interface_info = {'port_id': self.ports[0]['port']['id']} self.mixin.remove_router_interface(self.ctx, self.router['id'], interface_info) msg_vars = {'router_id': self.router['id'], 'port_ids': {self.ports[0]['port']['id']}} mock_log.warning.assert_called_once_with(self.GET_PORTS_BY_ROUTER_MSG, msg_vars) self._check_routerports((False, True)) @mock.patch.object(port_obj, 'LOG') def test_remove_router_interface_by_subnet(self, mock_log): self._add_router_interfaces() self._check_routerports((True, True)) interface_info = {'subnet_id': self.subnets[1]['subnet']['id']} self.mixin.remove_router_interface(self.ctx, self.router['id'], interface_info) mock_log.warning.not_called_once() self._check_routerports((True, False)) @mock.patch.object(port_obj, 'LOG') def test_remove_router_interface_by_subnet_removed_rport(self, mock_log): self._add_router_interfaces() self._check_routerports((True, True)) rp_obj = l3_obj.RouterPort.get_object( self.ctx, router_id=self.router['id'], port_id=self.ports[0]['port']['id']) rp_obj.delete() interface_info = {'subnet_id': self.subnets[0]['subnet']['id']} self.mixin.remove_router_interface(self.ctx, self.router['id'], interface_info) msg_vars = {'router_id': self.router['id'], 'port_ids': {self.ports[0]['port']['id']}} mock_log.warning.assert_called_once_with(self.GET_PORTS_BY_ROUTER_MSG, msg_vars) self._check_routerports((False, True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_l3_dvr_db.py0000644000175000017500000016613300000000000024624 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context from neutron_lib import exceptions from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.objects import exceptions as o_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_utils import uuidutils from neutron.db import agents_db from neutron.db import l3_dvr_db from neutron.db import l3_dvrscheduler_db from neutron.db.models import l3 as l3_models from neutron.db import models_v2 from neutron.objects import agent as agent_obj from neutron.objects import l3agent as rb_obj from neutron.objects import router as router_obj from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 _uuid = uuidutils.generate_uuid class FakeL3Plugin(test_l3.TestL3PluginBaseAttributes, l3_dvr_db.L3_NAT_with_dvr_db_mixin, l3_dvrscheduler_db.L3_DVRsch_db_mixin, agents_db.AgentDbMixin): pass class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): super(L3DvrTestCase, self).setUp(plugin='ml2') self.core_plugin = directory.get_plugin() self.ctx = context.get_admin_context() self.mixin = FakeL3Plugin() directory.add_plugin(plugin_constants.L3, self.mixin) def _create_router(self, router): with self.ctx.session.begin(subtransactions=True): return self.mixin._create_router_db(self.ctx, router, 'foo_tenant') def create_port(self, net_id, port_info): with self.ctx.session.begin(subtransactions=True): return self._create_port(self.fmt, net_id, **port_info) def _test__create_router_db(self, expected=False, distributed=None): router = {'name': 'foo_router', 'admin_state_up': True} if distributed is not None: router['distributed'] = distributed result = self._create_router(router) self.assertEqual(expected, result.extra_attributes['distributed']) def test_create_router_db_default(self): self._test__create_router_db(expected=False) def test_create_router_db_centralized(self): self._test__create_router_db(expected=False, distributed=False) def test_create_router_db_distributed(self): self._test__create_router_db(expected=True, distributed=True) def _test__validate_router_migration_on_router_update(self, mock_arg): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True } router_db = self._create_router(router) self.assertFalse(self.mixin._validate_router_migration( self.ctx, router_db, {'name': 'foo_router_2'})) # mock the check function to indicate that the variable # _admin_state_down_necessary set to True @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test__validate_router_migration_on_router_update_mock(self, mock_arg): # call test with admin_state_down_before_update ENABLED self._test__validate_router_migration_on_router_update(mock_arg) # mock the check function to indicate that the variable # _admin_state_down_necessary set to False @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=False) def test__validate_router_migration_on_router_update(self, mock_arg): # call test with admin_state_down_before_update DISABLED self._test__validate_router_migration_on_router_update(mock_arg) def _test__validate_router_migration_raise_error(self): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True } router_db = self._create_router(router) self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, {'distributed': False}) @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test__validate_router_migration_raise_error_mocked(self, mock_arg): # call test with admin_state_down_before_update ENABLED self._test__validate_router_migration_raise_error() @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=False) def test__validate_router_migration_raise_error(self, mock_arg): # call test with admin_state_down_before_update DISABLED self._test__validate_router_migration_raise_error() @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test__validate_router_migration_old_router_up_raise_error(self, mock_arg): # call test with admin_state_down_before_update ENABLED old_router = { 'name': 'bar_router', 'admin_state_up': True, 'distributed': True } new_router = { 'name': 'foo_router', 'admin_state_up': False, 'distributed': False } update = {'distributed': False} router_db = self._create_router(new_router) self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, update, old_router) def _test_upgrade_inactive_router_to_distributed_validation_success(self): router = {'name': 'foo_router', 'admin_state_up': False, 'distributed': False} router_db = self._create_router(router) update = {'distributed': True} self.assertTrue(self.mixin._validate_router_migration( self.ctx, router_db, update)) @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test_upgrade_inactive_router_to_distributed_validation_success_mocked( self, mock_arg): # call test with admin_state_down_before_update ENABLED self._test_upgrade_inactive_router_to_distributed_validation_success() @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=False) def test_upgrade_inactive_router_to_distributed_validation_success(self, mock_arg): # call test with admin_state_down_before_update DISABLED self._test_upgrade_inactive_router_to_distributed_validation_success() def _test_upgrade_active_router_to_distributed_validation_failure(self): router = {'name': 'foo_router', 'admin_state_up': True, 'distributed': False} router_db = self._create_router(router) update = {'distributed': True} self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, update) @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test_upgrade_active_router_to_distributed_validation_failure(self, mock_arg): # call test with admin_state_down_before_update ENABLED self._test_upgrade_active_router_to_distributed_validation_failure() @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test_downgrade_active_router_to_centralized_validation_failure(self, mock_arg): # call test with admin_state_down_before_update ENABLED router = {'name': 'foo_router', 'admin_state_up': True, 'distributed': True} router_db = self._create_router(router) update = {'distributed': False} self.assertRaises(exceptions.BadRequest, self.mixin._validate_router_migration, self.ctx, router_db, update) def test_update_router_db_centralized_to_distributed(self): router = {'name': 'foo_router', 'admin_state_up': True} agent = {'id': _uuid()} distributed = {'distributed': True} router_db = self._create_router(router) router_id = router_db['id'] self.assertFalse(router_db.extra_attributes.distributed) self.mixin._get_router = mock.Mock(return_value=router_db) self.mixin._validate_router_migration = mock.Mock() self.mixin._migrate_router_ports = mock.Mock() self.mixin.list_l3_agents_hosting_router = mock.Mock( return_value={'agents': [agent]}) self.mixin._unbind_router = mock.Mock() router_db = self.mixin._update_router_db( self.ctx, router_id, distributed) # Assert that the DB value has changed self.assertTrue(router_db.extra_attributes.distributed) self.assertEqual(1, self.mixin._migrate_router_ports.call_count) def test_update_router_db_distributed_to_centralized(self): router = {'name': 'foo_router', 'admin_state_up': True, 'distributed': True} agent = {'id': _uuid(), 'host': 'xyz'} router_db = self._create_router(router) router_id = router_db['id'] self.assertTrue(router_db.extra_attributes.distributed) self.mixin._get_router = mock.Mock(return_value=router_db) self.mixin._validate_router_migration = mock.Mock() self.mixin._migrate_router_ports = mock.Mock() self.mixin._core_plugin.\ delete_distributed_port_bindings_by_router_id = mock.Mock() self.mixin.list_l3_agents_hosting_router = mock.Mock( return_value={'agents': [agent]}) self.mixin._unbind_router = mock.Mock() updated_router = self.mixin.update_router(self.ctx, router_id, {'router': {'distributed': False}}) # Assert that the DB value has changed self.assertFalse(updated_router['distributed']) self.assertEqual(1, self.mixin._migrate_router_ports.call_count) self.assertEqual( 1, self.mixin._core_plugin. delete_distributed_port_bindings_by_router_id.call_count) def _test_get_device_owner(self, is_distributed=False, expected=const.DEVICE_OWNER_ROUTER_INTF, pass_router_id=True): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': is_distributed } router_db = self._create_router(router) router_pass = router_db['id'] if pass_router_id else router_db with mock.patch.object(self.mixin, '_get_router') as f: f.return_value = router_db result = self.mixin._get_device_owner(self.ctx, router_pass) self.assertEqual(expected, result) def test_get_device_owner_by_router_id(self): self._test_get_device_owner() def test__get_device_owner_centralized(self): self._test_get_device_owner(pass_router_id=False) def test__get_device_owner_distributed(self): self._test_get_device_owner( is_distributed=True, expected=const.DEVICE_OWNER_DVR_INTERFACE, pass_router_id=False) def _test__is_distributed_router(self, router, expected): result = l3_dvr_db.is_distributed_router(router) self.assertEqual(expected, result) def test__is_distributed_router_by_db_object(self): router = {'name': 'foo_router', 'admin_state_up': True} router_db = self._create_router(router) self.mixin._get_device_owner(mock.ANY, router_db) def test__is_distributed_router_default(self): router = {'id': 'foo_router_id'} self._test__is_distributed_router(router, False) def test__is_distributed_router_centralized(self): router = {'id': 'foo_router_id', 'distributed': False} self._test__is_distributed_router(router, False) def test__is_distributed_router_distributed(self): router = {'id': 'foo_router_id', 'distributed': True} self._test__is_distributed_router(router, True) def test__get_agent_gw_ports_exist_for_network(self): plugin = mock.Mock() directory.add_plugin(plugin_constants.CORE, plugin) plugin.get_ports.return_value = [] self.mixin._get_agent_gw_ports_exist_for_network( self.ctx, 'network_id', 'host', 'agent_id') plugin.get_ports.assert_called_with(self.ctx, { 'network_id': ['network_id'], 'device_id': ['agent_id'], 'device_owner': [const.DEVICE_OWNER_AGENT_GW]}) def _help_check_and_create_fip_gw_port(self, fip=None): port = { 'id': '1234', portbindings.HOST_ID: 'myhost', 'floating_network_id': 'external_net' } ctxt = mock.Mock() with mock.patch.object(self.mixin, 'create_fip_agent_gw_port_if_not_exists') as c_fip,\ mock.patch.object(router_obj.FloatingIP, 'get_objects', return_value=[fip] if fip else None): (self.mixin. check_for_fip_and_create_agent_gw_port_on_host_if_not_exists( ctxt, port, 'host')) if fip: c_fip.assert_called_once_with(ctxt.elevated(), fip['floating_network_id'], 'host') else: c_fip.assert_not_called() def test_check_for_fip_and_create_agent_gw_port_no_fip(self): self._help_check_and_create_fip_gw_port() def test_check_for_fip_and_create_agent_gw_port_with_dvr_true(self): fip = { 'id': _uuid(), 'floating_network_id': 'fake_net_id', 'router_id': 'foo_router_id' } self._help_check_and_create_fip_gw_port(fip=fip) def _test_prepare_direct_delete_dvr_internal_ports(self, port): plugin = mock.Mock() directory.add_plugin(plugin_constants.CORE, plugin) plugin.get_port.return_value = port self.mixin._router_exists = mock.Mock(return_value=True) self.assertRaises(exceptions.ServicePortInUse, self.mixin.prevent_l3_port_deletion, self.ctx, port['id']) def test_prevent_delete_floatingip_agent_gateway_port(self): port = { 'id': 'my_port_id', 'fixed_ips': mock.ANY, 'device_id': 'r_id', 'device_owner': const.DEVICE_OWNER_AGENT_GW } self._test_prepare_direct_delete_dvr_internal_ports(port) def test_prevent_delete_csnat_port(self): port = { 'id': 'my_port_id', 'fixed_ips': mock.ANY, 'device_id': 'r_id', 'device_owner': const.DEVICE_OWNER_ROUTER_SNAT } self._test_prepare_direct_delete_dvr_internal_ports(port) def test__create_gw_port_with_no_gateway(self): router = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True, } router_db = self._create_router(router) router_id = router_db['id'] self.assertTrue(router_db.extra_attributes.distributed) with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_create_gw_port'),\ mock.patch.object( self.mixin, '_create_snat_intf_ports_if_not_exists') as cs: self.mixin._create_gw_port( self.ctx, router_id, router_db, mock.ANY, mock.ANY) self.assertFalse(cs.call_count) def test_build_routers_list_with_gw_port_mismatch(self): routers = [{'gw_port_id': 'foo_gw_port_id', 'id': 'foo_router_id'}] gw_ports = {} routers = self.mixin._build_routers_list(self.ctx, routers, gw_ports) self.assertIsNone(routers[0].get('gw_port')) def _helper_delete_floatingip_agent_gateway_port( self, port_host, delete_dvr_fip_agent_port_side_effect=None): ports = [{ 'id': 'my_port_id', portbindings.HOST_ID: 'foo_host', 'network_id': 'ext_network_id', 'device_owner': const.DEVICE_OWNER_ROUTER_GW }, { 'id': 'my_new_port_id', portbindings.HOST_ID: 'my_foo_host', 'network_id': 'ext_network_id', 'device_owner': const.DEVICE_OWNER_ROUTER_GW }] plugin = mock.Mock() directory.add_plugin(plugin_constants.CORE, plugin) plugin.get_ports.return_value = ports self.mixin._get_agent_by_type_and_host = mock.Mock( return_value={'id': uuidutils.generate_uuid()}) with mock.patch.object( router_obj, "DvrFipGatewayPortAgentBinding" ) as dvr_fip_agent_port_obj: dvr_fip_agent_port_obj_instance = ( dvr_fip_agent_port_obj.return_value) dvr_fip_agent_port_obj_instance.delete.side_effect = ( delete_dvr_fip_agent_port_side_effect) self.mixin.delete_floatingip_agent_gateway_port( self.ctx, port_host, 'ext_network_id') plugin.get_ports.assert_called_with(self.ctx, filters={ 'network_id': ['ext_network_id'], 'device_owner': [const.DEVICE_OWNER_AGENT_GW]}) if port_host: plugin.ipam.delete_port.assert_called_once_with( self.ctx, 'my_port_id') dvr_fip_agent_port_obj_instance.delete.assert_called_once() else: plugin.ipam.delete_port.assert_called_with( self.ctx, 'my_new_port_id') dvr_fip_agent_port_obj_instance.delete.assert_called() def test_delete_floatingip_agent_gateway_port_without_host_id(self): self._helper_delete_floatingip_agent_gateway_port(None) def test_delete_floatingip_agent_gateway_port_with_host_id(self): self._helper_delete_floatingip_agent_gateway_port( 'foo_host') def test_delete_floatingip_agent_gateway_port_no_host_id_fip_gw_not_found( self): self._helper_delete_floatingip_agent_gateway_port( None, exceptions.ObjectNotFound(id='my_port_id')) def test_delete_floatingip_agent_gateway_port_host_id_fip_gw_not_found( self): self._helper_delete_floatingip_agent_gateway_port( 'foo_host', exceptions.ObjectNotFound(id='my_port_id')) def _setup_delete_current_gw_port_deletes_dvr_internal_ports( self, port=None, gw_port=True, new_network_id='ext_net_id_2'): router_db = { 'name': 'foo_router', 'admin_state_up': True, 'distributed': True } router = self._create_router(router_db) if gw_port: with self.subnet(cidr='10.10.10.0/24') as subnet: port_dict = { 'device_id': router.id, 'device_owner': const.DEVICE_OWNER_ROUTER_GW, 'admin_state_up': True, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.10.10.100'}] } net_id = subnet['subnet']['network_id'] port_res = self.create_port(net_id, port_dict) port_res_dict = self.deserialize(self.fmt, port_res) with self.ctx.session.begin(subtransactions=True): port_db = self.ctx.session.query(models_v2.Port).filter_by( id=port_res_dict['port']['id']).one() router.gw_port = port_db router_port = l3_models.RouterPort( router_id=router.id, port_id=port_db.id, port_type=const.DEVICE_OWNER_ROUTER_GW ) self.ctx.session.add(router) self.ctx.session.add(router_port) else: net_id = None plugin = mock.Mock() directory.add_plugin(plugin_constants.CORE, plugin) with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, 'router_gw_port_has_floating_ips', return_value=False),\ mock.patch.object( self.mixin, '_get_router') as grtr,\ mock.patch.object( self.mixin, 'delete_csnat_router_interface_ports') as del_csnat_port,\ mock.patch.object( self.mixin, 'delete_floatingip_agent_gateway_port') as del_agent_gw_port,\ mock.patch.object( self.mixin.l3_rpc_notifier, 'delete_fipnamespace_for_ext_net') as del_fip: plugin.get_ports.return_value = port grtr.return_value = router self.mixin._delete_current_gw_port( self.ctx, router['id'], router, new_network_id) return router, plugin, net_id, del_csnat_port,\ del_agent_gw_port, del_fip def test_delete_current_gw_port_deletes_fip_agent_gw_port_and_fipnamespace( self): rtr, plugin, ext_net_id, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_dvr_internal_ports()) self.assertFalse(d_csnat_port.called) self.assertTrue(d_agent_gw_port.called) d_agent_gw_port.assert_called_once_with(mock.ANY, None, ext_net_id) del_fip.assert_called_once_with(self.ctx, ext_net_id) def test_delete_current_gw_port_never_calls_delete_fip_agent_gw_port(self): port = [{ 'id': 'my_port_id', 'network_id': 'ext_net_id', 'device_owner': const.DEVICE_OWNER_ROUTER_GW }, { 'id': 'my_new_port_id', 'network_id': 'ext_net_id', 'device_owner': const.DEVICE_OWNER_ROUTER_GW }] rtr, plugin, ext_net_id, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_dvr_internal_ports( port=port)) self.assertFalse(d_csnat_port.called) self.assertFalse(d_agent_gw_port.called) self.assertFalse(del_fip.called) self.assertIsNotNone(ext_net_id) def test_delete_current_gw_port_never_calls_delete_fipnamespace(self): rtr, plugin, ext_net_id, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_dvr_internal_ports( gw_port=False)) self.assertFalse(d_csnat_port.called) self.assertFalse(d_agent_gw_port.called) self.assertFalse(del_fip.called) self.assertIsNone(ext_net_id) def test_delete_current_gw_port_deletes_csnat_port(self): rtr, plugin, ext_net_id, d_csnat_port, d_agent_gw_port, del_fip = ( self._setup_delete_current_gw_port_deletes_dvr_internal_ports( new_network_id=None)) self.assertTrue(d_csnat_port.called) self.assertTrue(d_agent_gw_port.called) d_csnat_port.assert_called_once_with(mock.ANY, rtr) d_agent_gw_port.assert_called_once_with(mock.ANY, None, ext_net_id) del_fip.assert_called_once_with(mock.ANY, ext_net_id) def _floatingip_on_port_test_setup(self, hostid): router = {'id': 'foo_router_id', 'distributed': True} floatingip = { 'id': _uuid(), 'port_id': _uuid(), 'router_id': 'foo_router_id', } if hostid is not None: floatingip['host'] = hostid else: hostid = 'not_my_host_id' routers = { 'foo_router_id': router } fipagent = { 'id': _uuid() } # NOTE: mock.patch is not needed here since self.mixin is created fresh # for each test. It doesn't work with some methods since the mixin is # tested in isolation (e.g. _get_agent_by_type_and_host). self.mixin._get_dvr_service_port_hostid = mock.Mock( return_value=hostid) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) self.mixin._get_fip_agent_gw_ports = mock.Mock( return_value='fip_interface') agent = mock.Mock() agent.id = fipagent['id'] self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip], hostid, agent) return (router, floatingip) def test_floatingip_on_port_no_host_key(self): router, fip = self._floatingip_on_port_test_setup(None) self.assertNotIn(const.FLOATINGIP_KEY, router) def test_floatingip_on_port_with_host(self): router, fip = self._floatingip_on_port_test_setup(_uuid()) self.assertIn(const.FLOATINGIP_KEY, router) self.assertIn(fip, router[const.FLOATINGIP_KEY]) def _setup_test_create_floatingip(self, fip, floatingip_db, router_db): port = { 'id': '1234', portbindings.HOST_ID: 'myhost', 'network_id': 'external_net' } with mock.patch.object(self.mixin, 'get_router') as grtr,\ mock.patch.object(self.mixin, '_get_dvr_service_port_hostid') as vmp,\ mock.patch.object( self.mixin, '_get_dvr_migrating_service_port_hostid' ) as mvmp,\ mock.patch.object( self.mixin, 'create_fip_agent_gw_port_if_not_exists') as c_fip,\ mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, '_update_fip_assoc'): grtr.return_value = router_db vmp.return_value = 'my-host' mvmp.return_value = 'my-future-host' registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self, context=mock.Mock(), router_id=router_db['id'], fixed_port_id=port['id'], floating_ip_id=fip['id'], floating_network_id=fip['floating_network_id'], fixed_ip_address='1.2.3.4', association_event=True) return c_fip def test_create_floatingip_agent_gw_port_with_dvr_router(self): floatingip = { 'id': _uuid(), 'router_id': 'foo_router_id' } router = {'id': 'foo_router_id', 'distributed': True} fip = { 'id': _uuid(), 'floating_network_id': _uuid(), 'port_id': _uuid() } create_fip = ( self._setup_test_create_floatingip( fip, floatingip, router)) self.assertTrue(create_fip.called) def test_create_fip_agent_gw_port_if_not_exists_with_l3_agent(self): network_id = _uuid() fport_db = {'id': _uuid()} self.mixin._get_agent_gw_ports_exist_for_network = mock.Mock( return_value=fport_db) fipagent = agent_obj.Agent( self.ctx, id=_uuid(), binary='foo-agent', host='host', agent_type='L3 agent', topic='foo_topic', configurations={"agent_mode": "dvr_no_external"}) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) fport = self.mixin.create_fip_agent_gw_port_if_not_exists( self.ctx, network_id, 'host') self.assertIsNone(fport) fipagent = agent_obj.Agent( self.ctx, id=_uuid(), binary='foo-agent', host='host', agent_type='L3 agent', topic='foo_topic', configurations={"agent_mode": "dvr"}) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) with mock.patch.object( router_obj, "DvrFipGatewayPortAgentBinding", ) as dvr_fip_agent_port_obj: dvr_fip_agent_port_obj_instance = ( dvr_fip_agent_port_obj.return_value) fport = self.mixin.create_fip_agent_gw_port_if_not_exists( self.ctx, network_id, 'host') dvr_fip_agent_port_obj_instance.create.assert_not_called() self.assertIsNotNone(fport) dvr_fip_agent_port_obj_instance.delete.assert_not_called() def test_create_fip_agent_gw_port_agent_port_not_created(self): network_id = _uuid() self.mixin._get_agent_gw_ports_exist_for_network = mock.Mock( return_value=None) fipagent = agent_obj.Agent( self.ctx, id=_uuid(), binary='foo-agent', host='host', agent_type='L3 agent', topic='foo_topic', configurations={"agent_mode": "dvr"}) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) with mock.patch.object( router_obj, "DvrFipGatewayPortAgentBinding", ) as dvr_fip_agent_port_obj,\ mock.patch.object( plugin_utils, "create_port", return_value=None): dvr_fip_agent_port_obj_instance = ( dvr_fip_agent_port_obj.return_value) self.assertRaises( exceptions.BadRequest, self.mixin.create_fip_agent_gw_port_if_not_exists, self.ctx, network_id, 'host') dvr_fip_agent_port_obj_instance.create.assert_called_once_with() self.mixin._get_agent_gw_ports_exist_for_network.\ assert_called_once_with( self.ctx, network_id, 'host', fipagent['id']) dvr_fip_agent_port_obj_instance.delete.assert_called_once_with() def test_create_fip_agent_gw_port_if_not_exists_duplicate_port(self): network_id = _uuid() fport_db = {'id': _uuid()} self.mixin._get_agent_gw_ports_exist_for_network = mock.Mock( side_effect=[None, fport_db]) fipagent = agent_obj.Agent( self.ctx, id=_uuid(), binary='foo-agent', host='host', agent_type='L3 agent', topic='foo_topic', configurations={"agent_mode": "dvr"}) self.mixin._get_agent_by_type_and_host = mock.Mock( return_value=fipagent) with mock.patch.object( router_obj.DvrFipGatewayPortAgentBinding, 'create', side_effect=o_exc.NeutronDbObjectDuplicateEntry( mock.Mock(), mock.Mock()) ) as dvr_fip_gateway_port_agent_binding_create: fport = self.mixin.create_fip_agent_gw_port_if_not_exists( self.ctx, network_id, 'host') dvr_fip_gateway_port_agent_binding_create.assert_called_once_with() self.mixin._get_agent_gw_ports_exist_for_network.assert_has_calls([ mock.call(self.ctx, network_id, 'host', fipagent['id']), mock.call(self.ctx, network_id, 'host', fipagent['id'])]) self.assertIsNotNone(fport) def test_create_floatingip_agent_gw_port_with_non_dvr_router(self): floatingip = { 'id': _uuid(), 'router_id': 'foo_router_id' } router = {'id': 'foo_router_id', 'distributed': False} fip = { 'id': _uuid(), 'floating_network_id': _uuid(), 'port_id': _uuid() } create_fip = ( self._setup_test_create_floatingip( fip, floatingip, router)) self.assertFalse(create_fip.called) def _test_update_router_gw_info_external_network_change(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext_1,\ self.network() as net_ext_2,\ self.subnet() as subnet: ext_net_1_id = net_ext_1['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_1_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_1_id}}}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) ext_net_2_id = net_ext_2['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_2_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_2_id}}}) csnat_filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=True) def test_update_router_gw_info_external_network_change_mocked(self, mock_arg): # call test with admin_state_down_before_update ENABLED self._test_update_router_gw_info_external_network_change() @mock.patch('neutron.db.l3_dvr_db.is_admin_state_down_necessary', return_value=False) def test_update_router_gw_info_external_network_change(self, mock_arg): # call test with admin_state_down_before_update DISABLED self._test_update_router_gw_info_external_network_change() def _test_csnat_ports_removal(self, ha=False): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.subnet() as subnet: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) csnat_filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.mixin.update_router( self.ctx, router['id'], {'router': {'admin_state_up': False}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'distributed': False, 'ha': ha}}) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(0, len(csnat_ports)) def test_distributed_to_centralized_csnat_ports_removal(self): self._test_csnat_ports_removal() def test_distributed_to_ha_csnat_ports_removal(self): self._test_csnat_ports_removal(ha=True) def test_update_router_gw_info_csnat_ports_add(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.network() as net_int,\ self.subnet( network=net_int, cidr='2001:db8:1::/64', gateway_ip='2001:db8:1::1', ip_version=const.IP_VERSION_6) as v6_subnet1,\ self.subnet( network=net_int, cidr='2001:db8:2::/64', gateway_ip='2001:db8:2::1', ip_version=const.IP_VERSION_6) as v6_subnet2,\ self.subnet( network=net_int, cidr='10.10.10.0/24') as v4_subnet: self.core_plugin.update_network( self.ctx, net_ext['network']['id'], {'network': {'router:external': True}}) # Add router interface, then set router gateway self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': v6_subnet1['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': v6_subnet2['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': v4_subnet['subnet']['id']}) dvr_filters = {'device_owner': [const.DEVICE_OWNER_DVR_INTERFACE]} dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) # One for IPv4, one for two IPv6 subnets self.assertEqual(2, len(dvr_ports)) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': net_ext['network']['id']}}}) csnat_filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) # One for IPv4, one for two IPv6 subnets self.assertEqual(2, len(csnat_ports)) # Remove v4 subnet interface from router self.mixin.remove_router_interface( self.ctx, router['id'], {'subnet_id': v4_subnet['subnet']['id']}) dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.assertEqual(2, len(csnat_ports[0]['fixed_ips'])) def _test_update_router_interface_port_ip_not_allowed(self, device_owner): router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() device_filter = {'device_owner': [device_owner]} ports = self.core_plugin.get_ports(self.ctx, filters=device_filter) self.assertRaises( exceptions.BadRequest, self.core_plugin.update_port, self.ctx, ports[0]['id'], {'port': {'fixed_ips': [ {'ip_address': "20.0.0.100", 'subnet_id': subnet_v4['subnet']['id']}, {'ip_address': "20.0.0.101", 'subnet_id': subnet_v4['subnet']['id']}]}}) def test_update_router_centralized_snat_port_ip_not_allowed(self): self._test_update_router_interface_port_ip_not_allowed( const.DEVICE_OWNER_ROUTER_SNAT) def test_update_router_interface_distributed_port_ip_not_allowed(self): self._test_update_router_interface_port_ip_not_allowed( const.DEVICE_OWNER_DVR_INTERFACE) def test_remove_router_interface_csnat_ports_removal(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.subnet() as subnet1,\ self.subnet(cidr='20.0.0.0/24') as subnet2: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet1['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet2['subnet']['id']}) csnat_filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(2, len(csnat_ports)) dvr_filters = {'device_owner': [const.DEVICE_OWNER_DVR_INTERFACE]} dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(2, len(dvr_ports)) self.mixin.remove_router_interface( self.ctx, router['id'], {'port_id': dvr_ports[0]['id']}) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.assertEqual(dvr_ports[1]['fixed_ips'][0]['subnet_id'], csnat_ports[0]['fixed_ips'][0]['subnet_id']) dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) def _setup_router_with_v4_and_v6(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext, self.network() as net_int: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) with self.subnet( network=net_int, cidr='20.0.0.0/24') as subnet_v4,\ self.subnet(network=net_int, cidr='fe80::/64', gateway_ip='fe80::1', ip_version=const.IP_VERSION_6 ) as subnet_v6: self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet_v4['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet_v6['subnet']['id']}) return router, subnet_v4, subnet_v6 def test_undo_router_interface_change_on_csnat_error(self): self._test_undo_router_interface_change_on_csnat_error(False) def test_undo_router_interface_change_on_csnat_error_revert_failure(self): self._test_undo_router_interface_change_on_csnat_error(True) def _test_undo_router_interface_change_on_csnat_error(self, fail_revert): router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() net = {'network': {'id': subnet_v6['subnet']['network_id'], 'tenant_id': subnet_v6['subnet']['tenant_id']}} orig_update = self.mixin._core_plugin.update_port def update_port(*args, **kwargs): # 1st port update is the interface, 2nd is csnat, 3rd is revert # we want to simulate errors after the 1st update_port.calls += 1 if update_port.calls == 2: raise RuntimeError('csnat update failure') if update_port.calls == 3 and fail_revert: # this is to ensure that if the revert fails, the original # exception is raised (not this ValueError) raise ValueError('failure from revert') return orig_update(*args, **kwargs) update_port.calls = 0 self.mixin._core_plugin.update_port = update_port with self.subnet(network=net, cidr='fe81::/64', gateway_ip='fe81::1', ip_version=const.IP_VERSION_6 ) as subnet2_v6: self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet2_v6['subnet']['id']}) if fail_revert: # a revert failure will mean the interface is still added # so we can't re-add it return # starting over should work if first interface was cleaned up self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet2_v6['subnet']['id']}) def test_remove_router_interface_csnat_ports_removal_with_ipv6(self): router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() csnat_filters = {'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT]} csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(2, len(csnat_ports)) dvr_filters = {'device_owner': [const.DEVICE_OWNER_DVR_INTERFACE]} dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(2, len(dvr_ports)) self.mixin.remove_router_interface( self.ctx, router['id'], {'subnet_id': subnet_v4['subnet']['id']}) csnat_ports = self.core_plugin.get_ports( self.ctx, filters=csnat_filters) self.assertEqual(1, len(csnat_ports)) self.assertEqual( subnet_v6['subnet']['id'], csnat_ports[0]['fixed_ips'][0]['subnet_id']) dvr_ports = self.core_plugin.get_ports( self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) def _test__validate_router_migration_notify_advanced_services(self): router = {'name': 'foo_router', 'admin_state_up': False} router_db = self._create_router(router) with mock.patch.object(l3_dvr_db.registry, 'notify') as mock_notify: self.mixin._validate_router_migration( self.ctx, router_db, {'distributed': True}) kwargs = {'context': self.ctx, 'router': router_db} mock_notify.assert_called_once_with( 'router', 'before_update', self.mixin, **kwargs) def test__validate_router_migration_notify_advanced_services_mocked(self): # call test with admin_state_down_before_update ENABLED self._test__validate_router_migration_notify_advanced_services() def test__validate_router_migration_notify_advanced_services(self): # call test with admin_state_down_before_update DISABLED self._test__validate_router_migration_notify_advanced_services() def test_validate_add_router_interface_by_subnet_notify_advanced_services( self): router = {'name': 'foo_router', 'admin_state_up': False} router_db = self._create_router(router) with self.network() as net, \ self.subnet(network={'network': net['network']}) as sub, \ mock.patch.object( self.mixin, '_notify_attaching_interface') as mock_notify: interface_info = {'subnet_id': sub['subnet']['id']} self.mixin.add_router_interface(self.ctx, router_db.id, interface_info) mock_notify.assert_called_once_with(self.ctx, router_db=router_db, port=mock.ANY, interface_info=interface_info) def test_validate_add_router_interface_by_port_notify_advanced_services( self): router = {'name': 'foo_router', 'admin_state_up': False} router_db = self._create_router(router) with self.network() as net, \ self.subnet(network={'network': net['network']}) as sub, \ self.port(subnet=sub) as port, \ mock.patch.object( self.mixin, '_notify_attaching_interface') as mock_notify: interface_info = {'port_id': port['port']['id']} self.mixin.add_router_interface(self.ctx, router_db.id, interface_info) mock_notify.assert_called_once_with(self.ctx, router_db=router_db, port=mock.ANY, interface_info=interface_info) def test_add_router_interface_csnat_ports_failure(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.subnet() as subnet: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) with mock.patch.object(self.mixin, '_add_csnat_router_interface_port') as f: f.side_effect = RuntimeError() self.assertRaises( l3_exc.RouterInterfaceAttachmentConflict, self.mixin.add_router_interface, self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) filters = { 'device_id': [router['id']], } router_ports = self.core_plugin.get_ports(self.ctx, filters) self.assertEqual(1, len(router_ports)) self.assertEqual(const.DEVICE_OWNER_ROUTER_GW, router_ports[0]['device_owner']) def test_csnat_port_not_created_on_RouterPort_update_exception(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as net_ext,\ self.subnet() as subnet: ext_net_id = net_ext['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_id}}}) net_id = subnet['subnet']['network_id'] with mock.patch.object(router_obj.RouterPort, 'create') as rtrport_update: rtrport_update.side_effect = Exception() self.assertRaises( l3_exc.RouterInterfaceAttachmentConflict, self.mixin.add_router_interface, self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) filters = { 'network_id': [net_id], 'device_owner': [const.DEVICE_OWNER_ROUTER_SNAT] } router_ports = self.core_plugin.get_ports(self.ctx, filters) self.assertEqual(0, len(router_ports)) def test_add_router_interface_by_port_failure(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.subnet(cidr='10.10.10.0/24') as subnet: port_dict = { 'device_id': '', 'device_owner': '', 'admin_state_up': True, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.10.10.100'}] } net_id = subnet['subnet']['network_id'] port_res = self.create_port(net_id, port_dict) port = self.deserialize(self.fmt, port_res) self.assertIn('port', port, message='Create port failed.') orig_update_port = self.mixin._core_plugin.update_port call_info = {'count': 0} def _fake_update_port(*args, **kwargs): call_info['count'] += 1 if call_info['count'] == 2: raise RuntimeError() else: return orig_update_port(*args, **kwargs) # NOTE(trananhkma): expect that update_port() only raises an error # at the 2nd function call (Update owner after actual process # again in order). with mock.patch.object(self.mixin._core_plugin, 'update_port', side_effect=_fake_update_port): self.assertRaises( RuntimeError, self.mixin.add_router_interface, self.ctx, router['id'], {'port_id': port['port']['id']}) # expire since we are re-using the session which might have stale # ports in it self.ctx.session.expire_all() port_info = self.core_plugin.get_port(self.ctx, port['port']['id']) self.assertEqual(port_dict['device_id'], port_info['device_id']) self.assertEqual(port_dict['device_owner'], port_info['device_owner']) def test__get_sync_routers_check_gw_port_host(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as public,\ self.subnet() as subnet: ext_net_1_id = public['network']['id'] self.core_plugin.update_network( self.ctx, ext_net_1_id, {'network': {'router:external': True}}) self.mixin.update_router( self.ctx, router['id'], {'router': {'external_gateway_info': {'network_id': ext_net_1_id}}}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) routers = self.mixin._get_sync_routers(self.ctx, router_ids=[router['id']]) self.assertIsNone(routers[0]['gw_port_host']) agent = mock.Mock() agent.host = "fake-host" bind = mock.Mock() bind.l3_agent_id = "fake-id" with mock.patch.object( rb_obj.RouterL3AgentBinding, 'get_objects', return_value=[bind]), mock.patch.object( agent_obj.Agent, 'get_object', return_value=agent): routers = self.mixin._get_sync_routers( self.ctx, router_ids=[router['id']]) self.assertEqual("fake-host", routers[0]['gw_port_host']) def test_is_router_distributed(self): router_id = 'router_id' with mock.patch.object(self.mixin, 'get_router') as \ mock_get_router: mock_get_router.return_value = {'distributed': True} self.assertTrue( self.mixin.is_router_distributed(self.ctx, router_id)) @mock.patch.object(l3_dvr_db, "is_port_bound") def test_get_ports_under_dvr_connected_subnet(self, is_port_bound_mock): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) with self.network() as network,\ self.subnet(network=network) as subnet: fake_bound_ports_ids = [] def fake_is_port_bound(port): return port['id'] in fake_bound_ports_ids is_port_bound_mock.side_effect = fake_is_port_bound for _ in range(4): port_res = self.create_port( network['network']['id'], {'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}) port_id = self.deserialize(self.fmt, port_res)['port']['id'] if len(fake_bound_ports_ids) < 2: fake_bound_ports_ids.append(port_id) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet['subnet']['id']}) dvr_subnet_ports = self.mixin.get_ports_under_dvr_connected_subnet( self.ctx, subnet['subnet']['id']) dvr_subnet_ports_ids = [p['id'] for p in dvr_subnet_ports] self.assertItemsEqual(fake_bound_ports_ids, dvr_subnet_ports_ids) @mock.patch.object(plugin_utils, 'can_port_be_bound_to_virtual_bridge', return_value=True) def test__get_assoc_data_valid_vnic_type(self, *args): with mock.patch.object(self.mixin, '_internal_fip_assoc_data') as \ mock_fip_assoc_data, \ mock.patch.object(self.mixin, '_get_router_for_floatingip') \ as mock_router_fip, \ mock.patch.object(self.mixin, 'is_router_distributed', return_value=True): port = {portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL} mock_fip_assoc_data.return_value = (port, 'subnet_id', 'ip_addr') mock_router_fip.return_value = 'router_id' fip = {'port_id': 'port_id'} self.assertEqual( ('port_id', 'ip_addr', 'router_id'), self.mixin._get_assoc_data(self.ctx, fip, mock.Mock())) @mock.patch.object(plugin_utils, 'can_port_be_bound_to_virtual_bridge', return_value=False) def test__get_assoc_data_invalid_vnic_type(self, *args): with mock.patch.object(self.mixin, '_internal_fip_assoc_data') as \ mock_fip_assoc_data, \ mock.patch.object(self.mixin, '_get_router_for_floatingip') \ as mock_router_fip, \ mock.patch.object(self.mixin, 'is_router_distributed', return_value=True): port = {portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL} mock_fip_assoc_data.return_value = (port, 'subnet_id', 'ip_addr') mock_router_fip.return_value = 'router_id' self.assertRaises( exceptions.BadRequest, self.mixin._get_assoc_data, self.ctx, mock.ANY, mock.Mock()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_l3_hamode_db.py0000644000175000017500000020230000000000000025251 0ustar00coreycorey00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3_ext_ha_mode from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as c_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import l3_ext_ha_mode as l3ha_exc from neutron_lib.objects import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm import testtools from neutron.agent.common import utils as agent_utils from neutron.api.rpc.handlers import l3_rpc from neutron.db import agents_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_hamode_db from neutron.objects import l3_hamode from neutron import quota from neutron.scheduler import l3_agent_scheduler from neutron.services.revisions import revision_plugin from neutron.tests.common import helpers from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api _uuid = uuidutils.generate_uuid class FakeL3PluginWithAgents(test_l3.TestL3PluginBaseAttributes, l3_hamode_db.L3_HA_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agents_db.AgentDbMixin): pass class L3HATestFramework(testlib_api.SqlTestCase): def setUp(self): super(L3HATestFramework, self).setUp() self.setup_coreplugin('ml2') self.core_plugin = directory.get_plugin() notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_router_updated') self.notif_m = notif_p.start() make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() cfg.CONF.set_override('allow_overlapping_ips', True) self.plugin = FakeL3PluginWithAgents() directory.add_plugin(plugin_constants.L3, self.plugin) self.plugin.router_scheduler = l3_agent_scheduler.ChanceScheduler() self.agent1 = helpers.register_l3_agent() self.agent2 = helpers.register_l3_agent( 'host_2', constants.L3_AGENT_MODE_DVR_SNAT) @property def admin_ctx(self): # Property generates a new session on each reference so different # API calls don't share a session with possible stale objects return context.get_admin_context() def _create_router(self, ha=True, tenant_id='tenant1', distributed=None, ctx=None, admin_state_up=True): if ctx is None: ctx = self.admin_ctx ctx.tenant_id = tenant_id router = {'name': 'router1', 'admin_state_up': admin_state_up, 'tenant_id': tenant_id} if ha is not None: router['ha'] = ha if distributed is not None: router['distributed'] = distributed return self.plugin.create_router(ctx, {'router': router}) def _migrate_router(self, router_id, ha): self._update_router(router_id, admin_state=False) self._update_router(router_id, ha=ha) return self._update_router(router_id, admin_state=True) def _update_router(self, router_id, ha=None, distributed=None, ctx=None, admin_state=None): if ctx is None: ctx = self.admin_ctx data = {'ha': ha} if ha is not None else {} if distributed is not None: data['distributed'] = distributed if admin_state is not None: data['admin_state_up'] = admin_state self.plugin.update_router(ctx, router_id, {'router': data}) return self.plugin._get_router(ctx, router_id) class L3HATestCase(L3HATestFramework): def test_verify_configuration_succeed(self): # Default configuration should pass self.plugin._verify_configuration() def test_verify_configuration_l3_ha_net_cidr_is_not_a_cidr(self): cfg.CONF.set_override('l3_ha_net_cidr', 'not a cidr') self.assertRaises( l3ha_exc.HANetworkCIDRNotValid, self.plugin._verify_configuration) def test_verify_configuration_l3_ha_net_cidr_is_not_a_subnet(self): cfg.CONF.set_override('l3_ha_net_cidr', '10.0.0.1/8') self.assertRaises( l3ha_exc.HANetworkCIDRNotValid, self.plugin._verify_configuration) def test_verify_configuration_max_l3_agents_below_0(self): cfg.CONF.set_override('max_l3_agents_per_router', -5) self.assertRaises( l3ha_exc.HAMaximumAgentsNumberNotValid, self.plugin._check_num_agents_per_router) def test_verify_configuration_max_l3_agents_unlimited(self): cfg.CONF.set_override('max_l3_agents_per_router', l3_hamode_db.UNLIMITED_AGENTS_PER_ROUTER) self.plugin._check_num_agents_per_router() def test_get_ha_router_port_bindings(self): router = self._create_router() bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router['id']]) binding_dicts = [{'router_id': binding['router_id'], 'l3_agent_id': binding['l3_agent_id']} for binding in bindings] self.assertIn({'router_id': router['id'], 'l3_agent_id': self.agent1['id']}, binding_dicts) self.assertIn({'router_id': router['id'], 'l3_agent_id': self.agent2['id']}, binding_dicts) def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id']) agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings] self.assertIn((self.agent1['id'], 'active'), agent_ids) self.assertIn((self.agent2['id'], 'standby'), agent_ids) def test_get_l3_bindings_hosting_router_with_ha_states_not_scheduled(self): router = self._create_router(ha=False) # Check that there no L3 agents scheduled for this router res = l3_hamode.L3HARouterAgentPortBinding.get_objects( self.admin_ctx, router_id=router['id']) self.assertEqual([], [r.agent for r in res]) bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id']) self.assertEqual([], bindings) def _assert_ha_state_for_agent(self, router, agent, state): bindings = ( self.plugin.get_l3_bindings_hosting_router_with_ha_states( self.admin_ctx, router['id'])) agent_ids = [(a[0]['id'], a[1]) for a in bindings] self.assertIn((agent['id'], state), agent_ids) def test_get_l3_bindings_hosting_router_with_ha_states_active_and_dead( self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent2['host']) with mock.patch.object(agent_utils, 'is_agent_down', return_value=True): self._assert_ha_state_for_agent(router, self.agent1, constants.HA_ROUTER_STATE_UNKNOWN) def test_get_l3_bindings_hosting_router_agents_admin_state_up_is_false( self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent2['host']) helpers.set_agent_admin_state(self.agent1['id'], admin_state_up=False) self._assert_ha_state_for_agent(router, self.agent1, constants.HA_ROUTER_STATE_STANDBY) def test_get_l3_bindings_hosting_router_agents_admin_state_up_is_true( self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent2['host']) helpers.set_agent_admin_state(self.agent1['id'], admin_state_up=True) self._assert_ha_state_for_agent(router, self.agent1, constants.HA_ROUTER_STATE_ACTIVE) def test_get_l3_bindings_hosting_router_with_ha_states_one_dead(self): router = self._create_router() self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_STANDBY}, self.agent2['host']) with mock.patch.object(agent_utils, 'is_agent_down', return_value=True): # With above mock all agents are in dead state # hence router state is Unknown overall. self._assert_ha_state_for_agent( router, self.agent1, constants.HA_ROUTER_STATE_UNKNOWN) def test_ha_router_create(self): router = self._create_router() self.assertTrue(router['ha']) def test_ha_router_create_with_distributed(self): helpers.register_l3_agent( 'host_3', constants.L3_AGENT_MODE_DVR_SNAT) router = self._create_router(ha=True, distributed=True) self.assertTrue(router['ha']) self.assertTrue(router['distributed']) ha_network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) self.assertIsNotNone(ha_network) def test_no_ha_router_create(self): router = self._create_router(ha=False) self.assertFalse(router['ha']) def test_add_ha_network_settings(self): cfg.CONF.set_override('l3_ha_network_type', 'abc') cfg.CONF.set_override('l3_ha_network_physical_name', 'def') network = {} self.plugin._add_ha_network_settings(network) self.assertEqual('abc', network[providernet.NETWORK_TYPE]) self.assertEqual('def', network[providernet.PHYSICAL_NETWORK]) def test_router_create_with_ha_conf_enabled(self): cfg.CONF.set_override('l3_ha', True) router = self._create_router(ha=None) self.assertTrue(router['ha']) def test_ha_interface_concurrent_create_on_delete(self): # this test depends on protection from the revision plugin so # we have to initialize it revision_plugin.RevisionPlugin() router = self._create_router(ha=True) def jam_in_interface(*args, **kwargs): ctx = context.get_admin_context() net = self.plugin._ensure_vr_id_and_network( ctx, self.plugin._get_router(ctx, router['id'])) self.plugin.add_ha_port( ctx, router['id'], net.network_id, router['tenant_id']) registry.unsubscribe(jam_in_interface, resources.ROUTER, events.PRECOMMIT_DELETE) registry.subscribe(jam_in_interface, resources.ROUTER, events.PRECOMMIT_DELETE) self.plugin.delete_router(self.admin_ctx, router['id']) def test_ha_router_delete_with_distributed(self): router = self._create_router(ha=True, distributed=True) self.plugin.delete_router(self.admin_ctx, router['id']) self.assertRaises(l3_exc.RouterNotFound, self.plugin._get_router, self.admin_ctx, router['id']) def test_migration_from_ha(self): router = self._create_router() self.assertTrue(router['ha']) router = self._migrate_router(router['id'], False) self.assertFalse(router.extra_attributes['ha']) self.assertIsNone(router.extra_attributes['ha_vr_id']) def test_migration_to_ha(self): router = self._create_router(ha=False) self.assertFalse(router['ha']) router = self._migrate_router(router['id'], True) self.assertTrue(router.extra_attributes['ha']) self.assertIsNotNone(router.extra_attributes['ha_vr_id']) def test_migration_requires_admin_state_down(self): router = self._create_router(ha=False) e = self.assertRaises(c_exc.CallbackFailure, self._update_router, router['id'], ha=True) self.assertIsInstance(e.inner_exceptions[0], n_exc.BadRequest) def test_migrate_ha_router_to_distributed_and_ha(self): router = self._create_router(ha=True, admin_state_up=False, distributed=False) self.assertTrue(router['ha']) after_update = self._update_router(router['id'], ha=True, distributed=True) self.assertTrue(after_update.extra_attributes.ha) self.assertTrue(after_update.extra_attributes.distributed) def test_migrate_ha_router_to_distributed_and_not_ha(self): router = self._create_router(ha=True, admin_state_up=False, distributed=False) self.assertTrue(router['ha']) after_update = self._update_router(router['id'], ha=False, distributed=True) self.assertFalse(after_update.extra_attributes.ha) self.assertTrue(after_update.extra_attributes.distributed) def test_migrate_dvr_router_to_ha_and_not_dvr(self): router = self._create_router(ha=False, admin_state_up=False, distributed=True) self.assertTrue(router['distributed']) after_update = self._update_router(router['id'], ha=True, distributed=False) self.assertTrue(after_update.extra_attributes.ha) self.assertFalse(after_update.extra_attributes.distributed) def test_migrate_dvr_router_to_ha_and_dvr(self): router = self._create_router(ha=False, admin_state_up=False, distributed=True) self.assertTrue(router['distributed']) after_update = self._update_router(router['id'], ha=True, distributed=True) self.assertTrue(after_update.extra_attributes.ha) self.assertTrue(after_update.extra_attributes.distributed) def test_migrate_distributed_router_to_ha(self): router = self._create_router(ha=False, admin_state_up=False, distributed=True) self.assertFalse(router['ha']) self.assertTrue(router['distributed']) after_update = self._update_router(router['id'], ha=True, distributed=False) self.assertTrue(after_update.extra_attributes.ha) self.assertFalse(after_update.extra_attributes.distributed) def test_migrate_legacy_router_to_distributed_and_ha(self): router = self._create_router(ha=False, admin_state_up=False, distributed=False) self.assertFalse(router['ha']) self.assertFalse(router['distributed']) after_update = self._update_router(router['id'], ha=True, distributed=True) self.assertTrue(after_update.extra_attributes.ha) self.assertTrue(after_update.extra_attributes.distributed) def test_unbind_ha_router(self): router = self._create_router() bound_agents = self.plugin.get_l3_agents_hosting_routers( self.admin_ctx, [router['id']]) self.assertEqual(2, len(bound_agents)) self.plugin._unbind_ha_router(self.admin_ctx, router['id']) bound_agents = self.plugin.get_l3_agents_hosting_routers( self.admin_ctx, [router['id']]) self.assertEqual(0, len(bound_agents)) def test_get_ha_sync_data_for_host_with_non_dvr_agent(self): with mock.patch.object(self.plugin, '_get_dvr_sync_data') as mock_get_sync: self.plugin.supported_extension_aliases = [ dvr_apidef.ALIAS, l3_ext_ha_mode.ALIAS] self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent1['host'], self.agent1) self.assertFalse(mock_get_sync.called) def test_get_ha_sync_data_for_host_with_dvr_agent(self): with mock.patch.object(self.plugin, '_get_dvr_sync_data') as mock_get_sync: self.plugin.supported_extension_aliases = [ dvr_apidef.ALIAS, l3_ext_ha_mode.ALIAS] self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent2['host'], self.agent2) self.assertTrue(mock_get_sync.called) def test_l3_agent_routers_query_interface(self): router = self._create_router() routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('ha')) interface = router.get(constants.HA_INTERFACE_KEY) self.assertIsNotNone(interface) self.assertEqual(constants.DEVICE_OWNER_ROUTER_HA_INTF, interface['device_owner']) subnets = interface['subnets'] self.assertEqual(1, len(subnets)) self.assertEqual(cfg.CONF.l3_ha_net_cidr, subnets[0]['cidr']) def test_l3_agent_routers_query_interface_includes_dvrsnat(self): router = self._create_router(distributed=True) routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx, 'a-dvr_snat-host', self.agent2) self.assertEqual(1, len(routers)) router = routers[0] self.assertTrue(router.get('ha')) interface = router.get(constants.HA_INTERFACE_KEY) self.assertIsNone(interface) def test_unique_ha_network_per_tenant(self): tenant1 = _uuid() tenant2 = _uuid() self._create_router(tenant_id=tenant1) self._create_router(tenant_id=tenant2) ha_network1 = self.plugin.get_ha_network(self.admin_ctx, tenant1) ha_network2 = self.plugin.get_ha_network(self.admin_ctx, tenant2) self.assertNotEqual( ha_network1['network_id'], ha_network2['network_id']) def _deployed_router_change_ha_flag(self, to_ha): router1 = self._create_router(ha=not to_ha) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) router = routers[0] interface = router.get(constants.HA_INTERFACE_KEY) if to_ha: self.assertIsNone(interface) else: self.assertIsNotNone(interface) self._migrate_router(router['id'], to_ha) self.plugin.schedule_router(self.admin_ctx, router1['id']) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) router = routers[0] interface = router.get(constants.HA_INTERFACE_KEY) if to_ha: self.assertIsNotNone(interface) else: self.assertIsNone(interface) def test_deployed_router_can_have_ha_enabled(self): self._deployed_router_change_ha_flag(to_ha=True) def test_deployed_router_can_have_ha_disabled(self): self._deployed_router_change_ha_flag(to_ha=False) def test_create_ha_router_notifies_agent(self): self._create_router() self.assertTrue(self.notif_m.called) def test_update_router_to_ha_notifies_agent(self): router = self._create_router(ha=False) self.notif_m.reset_mock() self._migrate_router(router['id'], True) self.assertTrue(self.notif_m.called) def test_unique_vr_id_between_routers(self): self._create_router() self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) self.assertNotEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id']) @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 1))) def test_vr_id_depleted(self): self.assertEqual(constants.ERROR, self._create_router()['status']) @mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2))) def test_vr_id_unique_range_per_tenant(self): self._create_router() self._create_router(tenant_id=_uuid()) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) self.assertEqual(routers[0]['ha_vr_id'], routers[1]['ha_vr_id']) @mock.patch('neutron.db.l3_hamode_db.MAX_ALLOCATION_TRIES', new=2) def test_vr_id_allocation_contraint_conflict(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) router_db = self.plugin._get_router(self.admin_ctx, router['id']) self.assertIsNone(self.plugin._ensure_vr_id(self.admin_ctx, router_db, network)) def test_vr_id_allocation_delete_router(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) router = self._create_router() allocs_current = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertNotEqual(allocs_before, allocs_current) self.plugin.delete_router(self.admin_ctx, router['id']) allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertEqual(allocs_before, allocs_after) def test_vr_id_allocation_router_migration(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) allocs_before = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) router = self._create_router() self._migrate_router(router['id'], False) allocs_after = self.plugin._get_allocated_vr_id(self.admin_ctx, network.network_id) self.assertEqual(allocs_before, allocs_after) def test_migration_delete_ha_network_if_last_router(self): router = self._create_router() self._migrate_router(router['id'], False) self.assertIsNone( self.plugin.get_ha_network(self.admin_ctx, router['tenant_id'])) def test_migration_no_delete_ha_network_if_not_last_router(self): router = self._create_router() router2 = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) network2 = self.plugin.get_ha_network(self.admin_ctx, router2['tenant_id']) self.assertEqual(network.network_id, network2.network_id) self._migrate_router(router['id'], False) self.assertIsNotNone( self.plugin.get_ha_network(self.admin_ctx, router2['tenant_id'])) def test_one_ha_router_one_not(self): self._create_router(ha=False) self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) ha0 = routers[0]['ha'] ha1 = routers[1]['ha'] self.assertNotEqual(ha0, ha1) def test_add_ha_port_subtransactions_blocked(self): ctx = self.admin_ctx with db_api.CONTEXT_WRITER.using(ctx): self.assertRaises(RuntimeError, self.plugin.add_ha_port, ctx, 'id', 'id', 'id') def test_add_ha_port_binding_failure_rolls_back_port(self): router = self._create_router() device_filter = {'device_id': [router['id']]} ports_before = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) with mock.patch.object(l3_hamode, 'L3HARouterAgentPortBinding', side_effect=ValueError): self.assertRaises(ValueError, self.plugin.add_ha_port, self.admin_ctx, router['id'], network.network_id, router['tenant_id']) ports_after = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) self.assertEqual(ports_before, ports_after) def test_create_ha_network_binding_failure_rolls_back_network(self): networks_before = self.core_plugin.get_networks(self.admin_ctx) with mock.patch.object(l3_hamode, 'L3HARouterNetwork', side_effect=ValueError): self.assertRaises(ValueError, self.plugin._create_ha_network, self.admin_ctx, _uuid()) networks_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(networks_before, networks_after) def test_create_ha_network_subnet_failure_rolls_back_network(self): networks_before = self.core_plugin.get_networks(self.admin_ctx) with mock.patch.object(self.plugin, '_create_ha_subnet', side_effect=ValueError): self.assertRaises(ValueError, self.plugin._create_ha_network, self.admin_ctx, _uuid()) networks_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(networks_before, networks_after) def test_update_router_ha_interface_port_ip_not_allow(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) self.plugin.add_ha_port( self.admin_ctx, router['id'], network.network_id, router['tenant_id']) device_filter = { 'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_ROUTER_HA_INTF]} ports = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) port = {"port": {"fixed_ips": [ {"ip_address": "169.254.192.100"}, {"ip_address": "169.254.192.200"}]}} self.assertRaises(n_exc.BadRequest, self.core_plugin.update_port, self.admin_ctx, ports[0]['id'], port) def test_ensure_vr_id_and_network_net_exists(self): router = self._create_router() router_db = self.plugin._get_router(self.admin_ctx, router['id']) with mock.patch.object(self.plugin, '_create_ha_network') as create: self.plugin._ensure_vr_id_and_network( self.admin_ctx, router_db) self.assertFalse(create.called) def test_ensure_vr_id_and_network_concurrent_create(self): # create a non-ha router so we can manually invoke the create ha # interfaces call down below router = self._create_router(ha=False) router_db = self.plugin._get_router(self.admin_ctx, router['id']) orig_create = self.plugin._create_ha_network created_nets = [] def _create_ha_network(*args, **kwargs): # create the network and then raise the error to simulate another # worker creating the network before us. created_nets.append(orig_create(*args, **kwargs)) raise db_exc.DBDuplicateEntry(columns=['tenant_id']) with mock.patch.object(self.plugin, '_create_ha_network', new=_create_ha_network): net = self.plugin._ensure_vr_id_and_network( self.admin_ctx, router_db) # ensure that it used the concurrently created network self.assertEqual([net], created_nets) def _test_ensure_with_patched_ensure_vr_id(self, _ensure_vr_id): # create a non-ha router so we can manually invoke the create ha # interfaces call down below router = self._create_router(ha=False) router_db = self.plugin._get_router(self.admin_ctx, router['id']) with mock.patch.object(self.plugin, '_ensure_vr_id', new=_ensure_vr_id): self.plugin._ensure_vr_id_and_network( self.admin_ctx, router_db) self.assertTrue(_ensure_vr_id.called) def test_ensure_vr_id_and_network_interface_failure(self): def _ensure_vr_id(ctx, rdb, ha_net): raise ValueError('broken') with testtools.ExpectedException(ValueError): self._test_ensure_with_patched_ensure_vr_id(_ensure_vr_id) self.assertEqual([], self.core_plugin.get_networks(self.admin_ctx)) def test_ensure_vr_id_and_network_concurrent_delete(self): orig_create = self.plugin._ensure_vr_id def _ensure_vr_id(ctx, rdb, ha_net): # concurrent delete on the first attempt if not getattr(_ensure_vr_id, 'called', False): setattr(_ensure_vr_id, 'called', True) self.core_plugin.delete_network(self.admin_ctx, ha_net['network_id']) return orig_create(ctx, rdb, ha_net) self._test_ensure_with_patched_ensure_vr_id(_ensure_vr_id) def test_ensure_vr_id_and_network_concurrent_swap(self): orig_create = self.plugin._ensure_vr_id def _ensure_vr_id(ctx, rdb, ha_net): # concurrent delete on the first attempt if not getattr(_ensure_vr_id, 'called', False): setattr(_ensure_vr_id, 'called', True) self.core_plugin.delete_network(self.admin_ctx, ha_net['network_id']) self.plugin._create_ha_network(self.admin_ctx, rdb.tenant_id) return orig_create(ctx, rdb, ha_net) self._test_ensure_with_patched_ensure_vr_id(_ensure_vr_id) def test_create_ha_network_tenant_binding_raises_duplicate(self): router = self._create_router() network = self.plugin.get_ha_network(self.admin_ctx, router['tenant_id']) self.plugin._create_ha_network_tenant_binding( self.admin_ctx, 't1', network['network_id']) with testtools.ExpectedException( exceptions.NeutronDbObjectDuplicateEntry): self.plugin._create_ha_network_tenant_binding( self.admin_ctx, 't1', network['network_id']) def test_create_router_db_vr_id_allocation_goes_to_error(self): for method in ('_ensure_vr_id', '_notify_router_updated'): with mock.patch.object(self.plugin, method, side_effect=ValueError): self.assertEqual(constants.ERROR, self._create_router()['status']) def test_get_active_host_for_ha_router(self): router = self._create_router() self.assertIsNone( self.plugin.get_active_host_for_ha_router( self.admin_ctx, router['id'])) self.plugin.update_routers_states( self.admin_ctx, {router['id']: 'active'}, self.agent2['host']) self.assertEqual( self.agent2['host'], self.plugin.get_active_host_for_ha_router( self.admin_ctx, router['id'])) def test_update_routers_states(self): router1 = self._create_router() router2 = self._create_router() routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) for router in routers: self.assertEqual('standby', router[constants.HA_ROUTER_STATE_KEY]) states = {router1['id']: 'active', router2['id']: 'standby'} self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) for router in routers: self.assertEqual(states[router['id']], router[constants.HA_ROUTER_STATE_KEY]) def test_sync_ha_router_info_ha_interface_port_concurrently_deleted(self): ctx = self.admin_ctx router1 = self._create_router() router2 = self._create_router() # retrieve all router ha port bindings bindings = self.plugin.get_ha_router_port_bindings( ctx, [router1['id'], router2['id']]) self.assertEqual(4, len(bindings)) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(2, len(routers)) bindings = self.plugin.get_ha_router_port_bindings( ctx, [router1['id'], router2['id']], self.agent1['host']) self.assertEqual(2, len(bindings)) fake_binding = mock.Mock() fake_binding.router_id = bindings[1].router_id fake_binding.port = None with mock.patch.object( self.plugin, "get_ha_router_port_bindings", return_value=[bindings[0], fake_binding]): routers = self.plugin.get_ha_sync_data_for_host( ctx, self.agent1['host'], self.agent1) self.assertEqual(1, len(routers)) self.assertIsNotNone(routers[0].get(constants.HA_INTERFACE_KEY)) def test_sync_ha_router_info_router_concurrently_deleted(self): self._create_router() with mock.patch.object( self.plugin, "get_ha_router_port_bindings", return_value=[]): routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(0, len(routers)) def test_sync_ha_router_info_router_concurrently_deleted_agent_dvr(self): self._create_router() orig_func = self.plugin._process_sync_ha_data def process_sync_ha_data(context, routers, host, agent_mode): return orig_func(context, routers, host, is_any_dvr_agent=True) with mock.patch.object(self.plugin, '_process_sync_ha_data', side_effect=process_sync_ha_data): routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual(1, len(routers)) def test_set_router_states_handles_concurrently_deleted_router(self): router1 = self._create_router() router2 = self._create_router() ctx = self.admin_ctx bindings = self.plugin.get_ha_router_port_bindings( ctx, [router1['id'], router2['id']]) self.plugin.delete_router(self.admin_ctx, router1['id']) self.plugin._set_router_states( ctx, bindings, {router1['id']: 'active', router2['id']: 'active'}) routers = self.plugin.get_ha_sync_data_for_host( self.admin_ctx, self.agent1['host'], self.agent1) self.assertEqual('active', routers[0][constants.HA_ROUTER_STATE_KEY]) def test_update_routers_states_port_not_found(self): router1 = self._create_router() port = {'id': 'foo', 'device_id': router1['id']} with mock.patch.object(self.core_plugin, 'get_ports', return_value=[port]): with mock.patch.object( self.core_plugin, 'update_port', side_effect=n_exc.PortNotFound(port_id='foo')): states = {router1['id']: 'active'} self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) def test_exclude_dvr_agents_for_ha_candidates(self): """Test dvr agents configured with "dvr" only, as opposed to "dvr_snat", are excluded. This test case tests that when get_number_of_agents_for_scheduling is called, it does not count dvr only agents. """ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(2, num_ha_candidates) def test_include_dvr_snat_agents_for_ha_candidates(self): """Test dvr agents configured with "dvr_snat" are excluded. This test case tests that when get_number_of_agents_for_scheduling is called, it ounts dvr_snat agents. """ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR_SNAT) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(3, num_ha_candidates) def test_ha_network_deleted_if_no_ha_router_present_two_tenants(self): # Create two routers in different tenants. router1 = self._create_router() router2 = self._create_router(tenant_id='tenant2') nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] # Check that HA networks created for each tenant self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.assertIn('HA network tenant %s' % router2['tenant_id'], nets_before) # Delete router1 self.plugin.delete_router(self.admin_ctx, router1['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] # Check that HA network for tenant1 is deleted and for tenant2 is not. self.assertNotIn('HA network tenant %s' % router1['tenant_id'], nets_after) self.assertIn('HA network tenant %s' % router2['tenant_id'], nets_after) def test_ha_network_is_not_delete_if_ha_router_is_present(self): # Create 2 routers in one tenant and check if one is deleted, HA # network still exists. router1 = self._create_router() router2 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.plugin.delete_router(self.admin_ctx, router2['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_after) def test_ha_network_delete_ha_and_non_ha_router(self): # Create HA and non-HA router. Check after deletion HA router HA # network is deleted. router1 = self._create_router(ha=False) router2 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) self.plugin.delete_router(self.admin_ctx, router2['id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertNotIn('HA network tenant %s' % router1['tenant_id'], nets_after) def _test_ha_network_is_not_deleted_raise_exception(self, exception): router1 = self._create_router() nets_before = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) ha_network = self.plugin.get_ha_network(self.admin_ctx, router1['tenant_id']) with mock.patch.object(self.plugin, '_delete_ha_network', side_effect=exception): self.plugin.safe_delete_ha_network(self.admin_ctx, ha_network, router1['tenant_id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_after) def test_ha_network_is_not_deleted_if_another_ha_router_is_created(self): # If another router was created during deletion of current router, # _delete_ha_network will fail with InvalidRequestError. Check that HA # network won't be deleted. self._test_ha_network_is_not_deleted_raise_exception( sa.exc.InvalidRequestError) def test_ha_network_is_not_deleted_if_network_in_use(self): self._test_ha_network_is_not_deleted_raise_exception( n_exc.NetworkInUse(net_id="foo_net_id")) def test_ha_network_is_not_deleted_if_db_deleted_error(self): self._test_ha_network_is_not_deleted_raise_exception( orm.exc.ObjectDeletedError(None)) def test_ha_router_create_failed_no_ha_network_delete(self): tenant_id = "foo_tenant_id" nets_before = self.core_plugin.get_networks(self.admin_ctx) self.assertNotIn('HA network tenant %s' % tenant_id, nets_before) # Unable to create HA network with mock.patch.object(self.core_plugin, 'create_network', side_effect=n_exc.NoNetworkAvailable): e = self.assertRaises(c_exc.CallbackFailure, self._create_router, True, tenant_id) self.assertIsInstance(e.inner_exceptions[0], n_exc.NoNetworkAvailable) nets_after = self.core_plugin.get_networks(self.admin_ctx) self.assertEqual(nets_before, nets_after) self.assertNotIn('HA network tenant %s' % tenant_id, nets_after) def test_update_port_status_port_bingding_deleted_concurrently(self): router1 = self._create_router() states = {router1['id']: 'active'} with mock.patch.object(self.plugin, 'get_ha_router_port_bindings'): (l3_hamode.L3HARouterAgentPortBinding.delete_objects( self.admin_ctx, router_id=router1['id'])) self.plugin.update_routers_states( self.admin_ctx, states, self.agent1['host']) class L3HAModeDbTestCase(L3HATestFramework): def _create_network(self, plugin, ctx, name='net', tenant_id='tenant1', external=False): network = {'network': {'name': name, 'shared': False, 'admin_state_up': True, 'tenant_id': tenant_id, extnet_apidef.EXTERNAL: external}} return plugin.create_network(ctx, network)['id'] def _create_subnet(self, plugin, ctx, network_id, cidr='10.0.0.0/8', name='subnet', tenant_id='tenant1'): subnet = {'subnet': {'name': name, 'ip_version': constants.IP_VERSION_4, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'tenant_id': tenant_id, 'enable_dhcp': True, 'ipv6_ra_mode': constants.ATTR_NOT_SPECIFIED}} created_subnet = plugin.create_subnet(ctx, subnet) return created_subnet def _test_device_owner(self, router_id, dvr, ha): if dvr: device_owner = constants.DEVICE_OWNER_DVR_INTERFACE elif ha: device_owner = constants.DEVICE_OWNER_HA_REPLICATED_INT else: device_owner = constants.DEVICE_OWNER_ROUTER_INTF filters = {'device_id': [router_id], 'device_owner': [device_owner]} ports = self.core_plugin.get_ports(self.admin_ctx, filters=filters) self.assertEqual(1, len(ports)) def _test_device_owner_during_router_migration( self, before_ha=False, before_dvr=False, after_ha=False, after_dvr=False): # As HA router is supported only in this test file, # we test all migrations here router = self._create_router( ctx=self.admin_ctx, ha=before_ha, distributed=before_dvr) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet( self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface( self.admin_ctx, router['id'], interface_info) self._test_device_owner(router['id'], before_dvr, before_ha) self.plugin.update_router( self.admin_ctx, router['id'], {'router': {'admin_state_up': False}}) self.plugin.update_router( self.admin_ctx, router['id'], {'router': {'distributed': after_dvr, 'ha': after_ha}}) self._test_device_owner(router['id'], after_dvr, after_ha) def test_device_owner_during_router_migration_from_dvr_to_ha(self): self._test_device_owner_during_router_migration( before_dvr=True, after_ha=True) def test_device_owner_during_router_migration_from_dvr_to_dvrha(self): self._test_device_owner_during_router_migration( before_dvr=True, after_ha=True, after_dvr=True) def test_device_owner_during_router_migration_from_dvr_to_legacy(self): self._test_device_owner_during_router_migration(before_dvr=True) def test_device_owner_during_router_migration_from_ha_to_legacy(self): self._test_device_owner_during_router_migration(before_ha=True) def test_device_owner_during_router_migration_from_ha_to_dvr(self): self._test_device_owner_during_router_migration( before_ha=True, after_dvr=True) def test_device_owner_during_router_migration_from_ha_to_dvrha(self): self._test_device_owner_during_router_migration( before_ha=True, after_ha=True, after_dvr=True) def test_device_owner_during_router_migration_from_legacy_to_dvr(self): self._test_device_owner_during_router_migration(after_dvr=True) def test_device_owner_during_router_migration_from_legacy_to_ha(self): self._test_device_owner_during_router_migration(after_ha=True) def test_remove_ha_in_use(self): router = self._create_router(ctx=self.admin_ctx) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) self.assertRaises(l3_exc.RouterInUse, self.plugin.delete_router, self.admin_ctx, router['id']) bindings = self.plugin.get_ha_router_port_bindings( self.admin_ctx, [router['id']]) self.assertEqual(2, len(bindings)) def test_update_ha_router_replicated_interface_port_ip_not_allowed(self): router = self._create_router() network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) filters = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_HA_REPLICATED_INT]} ports = self.core_plugin.get_ports(self.admin_ctx, filters=filters) port = {'port': {'fixed_ips': [ {'ip_address': '10.0.0.100'}, {'ip_address': '10.0.0.101'}]}} self.assertRaises(n_exc.BadRequest, self.core_plugin.update_port, self.admin_ctx, ports[0]['id'], port) def test_update_router_port_bindings_no_ports(self): self.plugin._update_router_port_bindings( self.admin_ctx, {}, self.agent1['host']) def _get_first_interface(self, router_id): device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_HA_REPLICATED_INT]} return self.core_plugin.get_ports( self.admin_ctx, filters=device_filter)[0] def _get_router_port_bindings(self, router_id): device_filter = {'device_id': [router_id], 'device_owner': [constants.DEVICE_OWNER_HA_REPLICATED_INT, constants.DEVICE_OWNER_ROUTER_SNAT, constants.DEVICE_OWNER_ROUTER_GW]} return self.core_plugin.get_ports( self.admin_ctx, filters=device_filter) def test_update_router_port_bindings_updates_host(self): ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin._update_router_gw_info(self.admin_ctx, router['id'], {'network_id': ext_net}) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) self.plugin._update_router_port_bindings( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) for port in self._get_router_port_bindings(router['id']): self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) self.plugin._update_router_port_bindings( self.admin_ctx, {router['id']: 'active'}, self.agent2['host']) for port in self._get_router_port_bindings(router['id']): self.assertEqual(self.agent2['host'], port[portbindings.HOST_ID]) def test_update_router_port_bindings_updates_host_only(self): ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin._update_router_gw_info(self.admin_ctx, router['id'], {'network_id': ext_net}) iface = self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) with mock.patch.object( self.plugin._core_plugin, 'update_port') as update_port_mock: self.plugin._update_router_port_bindings( self.admin_ctx, {router['id']: 'active'}, self.agent1['host']) port_payload = { port_def.RESOURCE_NAME: { portbindings.HOST_ID: self.agent1['host'] } } update_port_mock.assert_called_with( mock.ANY, iface['port_id'], port_payload) def test_update_all_ha_network_port_statuses(self): router = self._create_router(ha=True) callback = l3_rpc.L3RpcCallback() callback._l3plugin = self.plugin host = self.agent1['host'] ctx = self.admin_ctx bindings = self.plugin.get_ha_router_port_bindings( ctx, [router['id']]) binding = [binding for binding in bindings if binding.l3_agent_id == self.agent1['id']][0] port = self.core_plugin.get_port(ctx, binding.port_id) # As network segments are not available, mock bind_port # to avoid binding failures def bind_port(context): binding = context._binding binding.vif_type = portbindings.VIF_TYPE_OVS with mock.patch.object(self.core_plugin.mechanism_manager, 'bind_port', side_effect=bind_port): callback._ensure_host_set_on_port( ctx, host, port, router_id=router['id']) # Port status will be DOWN by default as we are not having # l2 agent in test, so update it to ACTIVE. self.core_plugin.update_port_status( ctx, port['id'], constants.PORT_STATUS_ACTIVE, host=host) port = self.core_plugin.get_port(ctx, port['id']) self.assertEqual(constants.PORT_STATUS_ACTIVE, port['status']) callback.update_all_ha_network_port_statuses(ctx, host) port = self.core_plugin.get_port(ctx, port['id']) self.assertEqual(constants.PORT_STATUS_DOWN, port['status']) def test_ensure_host_set_on_ports_dvr_ha_binds_to_active(self): agent3 = helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR_SNAT) ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) int_net = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, int_net) interface_info = {'subnet_id': subnet['id']} router = self._create_router(ha=True, distributed=True) self.plugin._update_router_gw_info(self.admin_ctx, router['id'], {'network_id': ext_net}) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) ctx = self.admin_ctx bindings = self.plugin.get_ha_router_port_bindings( ctx, router_ids=[router['id']], host=self.agent2['host']) self.plugin._set_router_states(ctx, bindings, {router['id']: 'active'}) callback = l3_rpc.L3RpcCallback() callback._l3plugin = self.plugin # Get router with interfaces router = self.plugin._get_dvr_sync_data(self.admin_ctx, self.agent2['host'], self.agent2, [router['id']])[0] callback._ensure_host_set_on_ports(self.admin_ctx, agent3['host'], [router]) device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_ROUTER_SNAT] } port = self.core_plugin.get_ports(self.admin_ctx, filters=device_filter)[0] self.assertNotEqual(agent3['host'], port[portbindings.HOST_ID]) callback._ensure_host_set_on_ports(self.admin_ctx, self.agent2['host'], [router]) port = self.core_plugin.get_ports(self.admin_ctx, filters=device_filter)[0] self.assertEqual(self.agent2['host'], port[portbindings.HOST_ID]) def test_ensure_host_set_on_ports_binds_correctly(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) port = self._get_first_interface(router['id']) self.assertEqual('', port[portbindings.HOST_ID]) # Update the router object to include the first interface router = ( self.plugin.list_active_sync_routers_on_active_l3_agent( self.admin_ctx, self.agent1['host'], [router['id']]))[0] # ensure_host_set_on_ports binds an unbound port callback = l3_rpc.L3RpcCallback() callback._l3plugin = self.plugin callback._ensure_host_set_on_ports( self.admin_ctx, self.agent1['host'], [router]) port = self._get_first_interface(router['id']) self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) # ensure_host_set_on_ports does not rebind a bound port router = ( self.plugin.list_active_sync_routers_on_active_l3_agent( self.admin_ctx, self.agent1['host'], [router['id']]))[0] callback._ensure_host_set_on_ports( self.admin_ctx, self.agent2['host'], [router]) port = self._get_first_interface(router['id']) self.assertEqual(self.agent1['host'], port[portbindings.HOST_ID]) def test_is_ha_router_port(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) port = self._get_first_interface(router['id']) self.assertTrue(l3_hamode_db.is_ha_router_port( self.admin_ctx, port['device_owner'], port['device_id'])) def test_is_ha_router_port_for_normal_port(self): network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router(ha=False) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} port = self.core_plugin.get_ports( self.admin_ctx, filters=device_filter)[0] self.assertFalse(l3_hamode_db.is_ha_router_port( self.admin_ctx, port['device_owner'], port['device_id'])) def test_migration_from_ha(self): router = self._create_router() self.assertTrue(router['ha']) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) router = self._migrate_router(router['id'], False) self.assertFalse(router.extra_attributes['ha']) for routerport in router.attached_ports: self.assertEqual(constants.DEVICE_OWNER_ROUTER_INTF, routerport.port_type) self.assertEqual(constants.DEVICE_OWNER_ROUTER_INTF, routerport.port.device_owner) def test__get_sync_routers_with_state_change_and_check_gw_port_host(self): ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) network_id = self._create_network(self.core_plugin, self.admin_ctx) subnet = self._create_subnet(self.core_plugin, self.admin_ctx, network_id) interface_info = {'subnet_id': subnet['id']} router = self._create_router() self.plugin._update_router_gw_info(self.admin_ctx, router['id'], {'network_id': ext_net}) self.plugin.add_router_interface(self.admin_ctx, router['id'], interface_info) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_STANDBY}, self.agent2['host']) routers = self.plugin._get_sync_routers(self.admin_ctx, router_ids=[router['id']]) self.assertEqual(self.agent1['host'], routers[0]['gw_port_host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_STANDBY}, self.agent1['host']) self.plugin.update_routers_states( self.admin_ctx, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, self.agent2['host']) routers = self.plugin._get_sync_routers(self.admin_ctx, router_ids=[router['id']]) self.assertEqual(self.agent2['host'], routers[0]['gw_port_host']) class L3HAUserTestCase(L3HATestFramework): def setUp(self): super(L3HAUserTestCase, self).setUp() self.user_ctx = context.Context('', _uuid()) def test_create_ha_router(self): self._create_router(ctx=self.user_ctx) def test_update_router(self): router = self._create_router(ctx=self.user_ctx) with mock.patch.object(registry, 'publish') as mock_cb: self._update_router(router['id'], ctx=self.user_ctx) mock_cb.assert_called_with('router', events.PRECOMMIT_UPDATE, self.plugin, payload=mock.ANY) def test_delete_router(self): router = self._create_router(ctx=self.user_ctx) self.plugin.delete_router(self.user_ctx, router['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_migration.py0000644000175000017500000007375600000000000024767 0ustar00coreycorey00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import re import sys import textwrap from alembic.autogenerate import api as alembic_ag_api from alembic import config as alembic_config from alembic.operations import ops as alembic_ops from alembic import script as alembic_script import fixtures import mock from neutron_lib import fixture as lib_fixtures from neutron_lib.utils import helpers from oslo_utils import fileutils import pkg_resources import sqlalchemy as sa from testtools import matchers from neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import testlib_api class FakeConfig(object): service = '' class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels = set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints map so it can be restored during test cleanup. ''' def _setUp(self): self.ep_backup = {} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch.object(cli, "log_error").start() self.mock_alembic_warn = mock.patch.object(cli, "log_warning").start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and entrypoints for tests to chew on self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\ mock.patch.object(cli, 'run_sanity_checks'),\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args) if 'autogenerate' in args and not args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): # Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( lib_fixtures.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\n')).mock_open mock_open_ex = self.useFixture( lib_fixtures.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\n')).mock_open if contract_head in heads and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con = self.useFixture( lib_fixtures.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( lib_fixtures.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch: ( "/foo/expand" if branch == 'expand' else "/foo/contract") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ), # these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization table and ' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract = directives[1] self.assertEqual("/foo/expand", expand.version_path) self.assertEqual("/foo/contract", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added additional leading '# ' before comments return s.replace('\\#\\#\\#\\ ', '(# )?### ') expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', """ """sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', """ """'organization', ['organization_id'], ['id']) ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self, *mocks): cli.validate_revisions(cli.get_neutron_config()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_ovn_hash_ring_db.py0000644000175000017500000002254500000000000026255 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from neutron_lib import context from neutron_lib.db import api as db_api from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy.orm import exc from neutron.db.models import ovn as ovn_models from neutron.db import ovn_hash_ring_db from neutron.tests.unit import testlib_api HASH_RING_TEST_GROUP = 'test_group' class TestHashRing(testlib_api.SqlTestCaseLight): def setUp(self): super(TestHashRing, self).setUp() self.admin_ctx = context.get_admin_context() self.addCleanup(self._delete_objs) def _delete_objs(self): with db_api.CONTEXT_WRITER.using(self.admin_ctx): self.admin_ctx.session.query( ovn_models.OVNRevisionNumbers).delete() def _get_node_row(self, node_uuid): try: with db_api.CONTEXT_WRITER.using(self.admin_ctx): return self.admin_ctx.session.query( ovn_models.OVNHashRing).filter_by( node_uuid=node_uuid).one() except exc.NoResultFound: return def _add_nodes_and_assert_exists(self, count=1, group_name=HASH_RING_TEST_GROUP): nodes = [] for i in range(count): node_uuid = ovn_hash_ring_db.add_node(self.admin_ctx, group_name) self.assertIsNotNone(self._get_node_row(node_uuid)) nodes.append(node_uuid) return nodes def test_add_node(self): self._add_nodes_and_assert_exists() def test_remove_nodes_from_host(self): nodes = self._add_nodes_and_assert_exists(count=3) # Add another node from a different host with mock.patch.object(ovn_hash_ring_db, 'CONF') as mock_conf: mock_conf.host = 'another-host-' + uuidutils.generate_uuid() another_host_node = self._add_nodes_and_assert_exists()[0] ovn_hash_ring_db.remove_nodes_from_host(self.admin_ctx, HASH_RING_TEST_GROUP) # Assert that all nodes from that host have been removed for n in nodes: self.assertIsNone(self._get_node_row(n)) # Assert that the node from another host wasn't removed self.assertIsNotNone(self._get_node_row(another_host_node)) def test_touch_nodes_from_host(self): nodes = self._add_nodes_and_assert_exists(count=3) # Add another node from a different host with mock.patch.object(ovn_hash_ring_db, 'CONF') as mock_conf: mock_conf.host = 'another-host-' + uuidutils.generate_uuid() another_host_node = self._add_nodes_and_assert_exists()[0] # Assert that updated_at isn't updated yet for node in nodes: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) # Assert the same for the node from another host node_db = self._get_node_row(another_host_node) self.assertEqual(node_db.created_at, node_db.updated_at) # Touch the nodes from our host ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, HASH_RING_TEST_GROUP) # Assert that updated_at is now updated for node in nodes: node_db = self._get_node_row(node) self.assertGreater(node_db.updated_at, node_db.created_at) # Assert that the node from another host hasn't been touched # (updated_at is not updated) node_db = self._get_node_row(another_host_node) self.assertEqual(node_db.created_at, node_db.updated_at) def test_active_nodes(self): self._add_nodes_and_assert_exists(count=3) # Add another node from a different host with mock.patch.object(ovn_hash_ring_db, 'CONF') as mock_conf: mock_conf.host = 'another-host-' + uuidutils.generate_uuid() another_host_node = self._add_nodes_and_assert_exists()[0] # Assert all nodes are active (within 60 seconds) self.assertEqual(4, len(ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP))) # Substract 60 seconds from utcnow() and touch the nodes from our host fake_utcnow = timeutils.utcnow() - datetime.timedelta(seconds=60) with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = fake_utcnow ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, HASH_RING_TEST_GROUP) # Now assert that all nodes from our host are seeing as offline. # Only the node from another host should be active active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP) self.assertEqual(1, len(active_nodes)) self.assertEqual(another_host_node, active_nodes[0].node_uuid) def test_active_nodes_from_host(self): self._add_nodes_and_assert_exists(count=3) # Add another node from a different host another_host_id = 'another-host-52359446-c366' with mock.patch.object(ovn_hash_ring_db, 'CONF') as mock_conf: mock_conf.host = another_host_id self._add_nodes_and_assert_exists() # Assert only the 3 nodes from this host is returned active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP, from_host=True) self.assertEqual(3, len(active_nodes)) self.assertNotIn(another_host_id, active_nodes) def test_touch_node(self): nodes = self._add_nodes_and_assert_exists(count=3) # Assert no nodes were updated yet for node in nodes: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) # Touch one of the nodes ovn_hash_ring_db.touch_node(self.admin_ctx, nodes[0]) # Assert it has been updated node_db = self._get_node_row(nodes[0]) self.assertGreater(node_db.updated_at, node_db.created_at) # Assert the other two nodes hasn't been updated for node in nodes[1:]: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) def test_active_nodes_different_groups(self): another_group = 'another_test_group' self._add_nodes_and_assert_exists(count=3) self._add_nodes_and_assert_exists(count=2, group_name=another_group) active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP) self.assertEqual(3, len(active_nodes)) for node in active_nodes: self.assertEqual(HASH_RING_TEST_GROUP, node.group_name) active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=another_group) self.assertEqual(2, len(active_nodes)) for node in active_nodes: self.assertEqual(another_group, node.group_name) def test_remove_nodes_from_host_different_groups(self): another_group = 'another_test_group' group1 = self._add_nodes_and_assert_exists(count=3) group2 = self._add_nodes_and_assert_exists( count=2, group_name=another_group) ovn_hash_ring_db.remove_nodes_from_host(self.admin_ctx, HASH_RING_TEST_GROUP) # Assert that all nodes from that group have been removed for node in group1: self.assertIsNone(self._get_node_row(node)) # Assert that all nodes from a different group are intact for node in group2: self.assertIsNotNone(self._get_node_row(node)) def test_touch_nodes_from_host_different_groups(self): another_group = 'another_test_group' group1 = self._add_nodes_and_assert_exists(count=3) group2 = self._add_nodes_and_assert_exists( count=2, group_name=another_group) # Assert that updated_at isn't updated yet for node in group1 + group2: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) # Touch the nodes from group1 ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, HASH_RING_TEST_GROUP) # Assert that updated_at was updated for group1 for node in group1: node_db = self._get_node_row(node) self.assertGreater(node_db.updated_at, node_db.created_at) # Assert that updated_at wasn't updated for group2 for node in group2: node_db = self._get_node_row(node) self.assertEqual(node_db.created_at, node_db.updated_at) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_ovn_revision_numbers_db.py0000644000175000017500000002615400000000000027704 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib.db import api as db_api from oslo_db import exception as db_exc from neutron.api import extensions from neutron.common import config from neutron.db.models import ovn as ovn_models from neutron.db import ovn_revision_numbers_db as ovn_rn_db import neutron.extensions from neutron.services.revisions import revision_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.extensions import test_securitygroup EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__) PLUGIN_CLASS = ( 'neutron.tests.unit.db.test_ovn_revision_numbers_db.TestMaintenancePlugin') class TestRevisionNumber(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): super(TestRevisionNumber, self).setUp() self.ctx = context.get_admin_context() self.addCleanup(self._delete_objs) res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) self.net = self.deserialize(self.fmt, res)['network'] def _delete_objs(self): with db_api.CONTEXT_WRITER.using(self.ctx): self.ctx.session.query( ovn_models.OVNRevisionNumbers).delete() def _create_initial_revision(self, resource_uuid, resource_type, revision_number=ovn_rn_db.INITIAL_REV_NUM, may_exist=False): with self.ctx.session.begin(subtransactions=True): ovn_rn_db.create_initial_revision( self.ctx, resource_uuid, resource_type, revision_number=revision_number, may_exist=may_exist) def test_bump_revision(self): self._create_initial_revision(self.net['id'], ovn_rn_db.TYPE_NETWORKS) self.net['revision_number'] = 123 ovn_rn_db.bump_revision(self.ctx, self.net, ovn_rn_db.TYPE_NETWORKS) row = ovn_rn_db.get_revision_row(self.ctx, self.net['id']) self.assertEqual(123, row.revision_number) def test_bump_older_revision(self): self._create_initial_revision(self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=124) self.net['revision_number'] = 1 ovn_rn_db.bump_revision(self.ctx, self.net, ovn_rn_db.TYPE_NETWORKS) row = ovn_rn_db.get_revision_row(self.ctx, self.net['id']) self.assertEqual(124, row.revision_number) @mock.patch.object(ovn_rn_db.LOG, 'warning') def test_bump_revision_row_not_found(self, mock_log): self.net['revision_number'] = 123 ovn_rn_db.bump_revision(self.ctx, self.net, ovn_rn_db.TYPE_NETWORKS) # Assert the revision number wasn't bumped row = ovn_rn_db.get_revision_row(self.ctx, self.net['id']) self.assertEqual(123, row.revision_number) self.assertIn('No revision row found for', mock_log.call_args[0][0]) def test_delete_revision(self): self._create_initial_revision(self.net['id'], ovn_rn_db.TYPE_NETWORKS) ovn_rn_db.delete_revision(self.ctx, self.net['id'], ovn_rn_db.TYPE_NETWORKS) row = ovn_rn_db.get_revision_row(self.ctx, self.net['id']) self.assertIsNone(row) def test_create_initial_revision_may_exist_duplicated_entry(self): args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS) self._create_initial_revision(*args) # Assert DBDuplicateEntry is raised when may_exist is False (default) self.assertRaises(db_exc.DBDuplicateEntry, self._create_initial_revision, *args) try: self._create_initial_revision(*args, may_exist=True) except db_exc.DBDuplicateEntry: self.fail("create_initial_revision shouldn't raise " "DBDuplicateEntry when may_exist is True") class TestMaintenancePlugin(test_securitygroup.SecurityGroupTestPlugin, test_l3.TestL3NatBasePlugin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ['external-net', 'security-group'] class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase, test_l3.L3NatTestCaseMixin): def setUp(self): service_plugins = { 'router': 'neutron.tests.unit.extensions.test_l3.TestL3NatServicePlugin'} l3_plugin = test_l3.TestL3NatServicePlugin() sec_plugin = test_securitygroup.SecurityGroupTestPlugin() ext_mgr = extensions.PluginAwareExtensionManager( EXTENSIONS_PATH, {'router': l3_plugin, 'sec': sec_plugin} ) super(TestRevisionNumberMaintenance, self).setUp( plugin=PLUGIN_CLASS, service_plugins=service_plugins) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.session = db_api.get_writer_session() revision_plugin.RevisionPlugin() self.net = self._make_network(self.fmt, 'net1', True)['network'] # Mock the default value for INCONSISTENCIES_OLDER_THAN so # tests won't need to wait for the timeout in order to validate # the database inconsistencies self.older_than_mock = mock.patch( 'neutron.db.ovn_revision_numbers_db.INCONSISTENCIES_OLDER_THAN', -1) self.older_than_mock.start() self.addCleanup(self.older_than_mock.stop) self.ctx = context.get_admin_context() def _create_initial_revision(self, resource_uuid, resource_type, revision_number=ovn_rn_db.INITIAL_REV_NUM, may_exist=False): with self.ctx.session.begin(subtransactions=True): ovn_rn_db.create_initial_revision( self.ctx, resource_uuid, resource_type, revision_number=revision_number, may_exist=may_exist) def test_get_inconsistent_resources(self): # Set the intial revision to -1 to force it to be incosistent self._create_initial_revision( self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=-1) res = ovn_rn_db.get_inconsistent_resources(self.ctx) self.assertEqual(1, len(res)) self.assertEqual(self.net['id'], res[0].resource_uuid) def test_get_inconsistent_resources_older_than(self): # Stop the mock so the INCONSISTENCIES_OLDER_THAN will have # it's default value self.older_than_mock.stop() self._create_initial_revision( self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=-1) res = ovn_rn_db.get_inconsistent_resources(self.ctx) # Assert that nothing is returned because the entry is not old # enough to be picked as an inconsistency self.assertEqual(0, len(res)) # Start the mock again and make sure it nows shows up as an # inconsistency self.older_than_mock.start() res = ovn_rn_db.get_inconsistent_resources(self.ctx) self.assertEqual(1, len(res)) self.assertEqual(self.net['id'], res[0].resource_uuid) def test_get_inconsistent_resources_consistent(self): # Set the initial revision to 0 which is the initial revision_number # for recently created resources self._create_initial_revision( self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=0) res = ovn_rn_db.get_inconsistent_resources(self.ctx) # Assert nothing is inconsistent self.assertEqual([], res) def test_get_deleted_resources(self): self._create_initial_revision( self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=0) self._delete('networks', self.net['id']) res = ovn_rn_db.get_deleted_resources(self.ctx) self.assertEqual(1, len(res)) self.assertEqual(self.net['id'], res[0].resource_uuid) self.assertIsNone(res[0].standard_attr_id) def _prepare_resources_for_ordering_test(self, delete=False): subnet = self._make_subnet(self.fmt, {'network': self.net}, '10.0.0.1', '10.0.0.0/24')['subnet'] self._set_net_external(self.net['id']) info = {'network_id': self.net['id']} router = self._make_router(self.fmt, None, external_gateway_info=info)['router'] fip = self._make_floatingip(self.fmt, self.net['id'])['floatingip'] port = self._make_port(self.fmt, self.net['id'])['port'] sg = self._make_security_group(self.fmt, 'sg1', '')['security_group'] rule = self._build_security_group_rule( sg['id'], 'ingress', n_const.PROTO_NUM_TCP) sg_rule = self._make_security_group_rule( self.fmt, rule)['security_group_rule'] self._create_initial_revision(router['id'], ovn_rn_db.TYPE_ROUTERS) self._create_initial_revision(subnet['id'], ovn_rn_db.TYPE_SUBNETS) self._create_initial_revision(fip['id'], ovn_rn_db.TYPE_FLOATINGIPS) self._create_initial_revision(port['id'], ovn_rn_db.TYPE_PORTS) self._create_initial_revision(port['id'], ovn_rn_db.TYPE_ROUTER_PORTS) self._create_initial_revision(sg['id'], ovn_rn_db.TYPE_SECURITY_GROUPS) self._create_initial_revision(sg_rule['id'], ovn_rn_db.TYPE_SECURITY_GROUP_RULES) self._create_initial_revision(self.net['id'], ovn_rn_db.TYPE_NETWORKS) if delete: self._delete('security-group-rules', sg_rule['id']) self._delete('floatingips', fip['id']) self._delete('ports', port['id']) self._delete('security-groups', sg['id']) self._delete('routers', router['id']) self._delete('subnets', subnet['id']) self._delete('networks', self.net['id']) def test_get_inconsistent_resources_order(self): self._prepare_resources_for_ordering_test() res = ovn_rn_db.get_inconsistent_resources(self.ctx) actual_order = tuple(r.resource_type for r in res) self.assertEqual(ovn_rn_db._TYPES_PRIORITY_ORDER, actual_order) def test_get_deleted_resources_order(self): self._prepare_resources_for_ordering_test(delete=True) res = ovn_rn_db.get_deleted_resources(self.ctx) actual_order = tuple(r.resource_type for r in res) self.assertEqual(tuple(reversed(ovn_rn_db._TYPES_PRIORITY_ORDER)), actual_order) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_portsecurity_db.py0000644000175000017500000000366000000000000026202 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import port_security from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.db import portsecurity_db as pd from neutron.db import portsecurity_db_common as pdc from neutron.tests import base common = pdc.PortSecurityDbCommon class FakePlugin(pd.PortSecurityDbMixin): supported_extension_aliases = [port_security.ALIAS] class PortSecurityDbMixinTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbMixinTestCase, self).setUp() self.plugin = FakePlugin() directory.add_plugin(constants.CORE, self.plugin) @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_relies_on_common(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin._extend_port_security_dict(response, dbdata) extend.assert_called_once_with(response, dbdata) @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_ignored_if_extension_disabled(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin.supported_extension_aliases = [] self.plugin._extend_port_security_dict(response, dbdata) self.assertFalse(extend.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_portsecurity_db_common.py0000644000175000017500000000616700000000000027557 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import port_security as psec from neutron.db import portsecurity_db_common as pdc from neutron.objects import base as objects_base from neutron.objects import network from neutron.objects.port.extensions import port_security as p_ps from neutron.tests import base class FakePlugin(pdc.PortSecurityDbCommon): pass class PortSecurityDbCommonTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbCommonTestCase, self).setUp() self.plugin = FakePlugin() def _test__get_security_binding_no_binding(self, getter): port_sec_enabled = True req = {psec.PORTSECURITY: port_sec_enabled} res = {} with mock.patch.object( objects_base.NeutronDbObject, 'get_object', return_value=None): val = getter(req, res) self.assertEqual(port_sec_enabled, val) def test__get_port_security_binding_no_binding(self): self._test__get_security_binding_no_binding( self.plugin._get_port_security_binding) def test__get_network_security_binding_no_binding(self): self._test__get_security_binding_no_binding( self.plugin._get_network_security_binding) def _test__process_security_update_no_binding(self, res_name, obj_cls, updater): req = {psec.PORTSECURITY: False} res = {'id': 'fake-id'} context = mock.MagicMock() with mock.patch.object( self.plugin, '_process_port_security_create') as creator: with mock.patch.object( objects_base.NeutronDbObject, 'get_object', return_value=None): updater(context, req, res) creator.assert_called_with(context, obj_cls, res_name, req, res) def test__process_port_port_security_update_no_binding(self): self._test__process_security_update_no_binding( 'port', p_ps.PortSecurity, self.plugin._process_port_port_security_update) def test__process_network_port_security_update_no_binding(self): self._test__process_security_update_no_binding( 'network', network.NetworkPortSecurity, self.plugin._process_network_port_security_update) def test__extend_port_security_dict_no_port_security(self): for db_data in ({'port_security': None, 'name': 'net1'}, {}): response_data = {} self.plugin._extend_port_security_dict(response_data, db_data) self.assertTrue(response_data[psec.PORTSECURITY]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_provisioning_blocks.py0000644000175000017500000001545200000000000027046 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_ctx import testtools from neutron.db import models_v2 from neutron.db import provisioning_blocks as pb from neutron.objects import network as net_obj from neutron.objects import ports as port_obj from neutron.tests.unit import testlib_api CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestStatusBarriers(testlib_api.SqlTestCase): def setUp(self): super(TestStatusBarriers, self).setUp() self.setup_coreplugin(CORE_PLUGIN) self.ctx = n_ctx.get_admin_context() self.provisioned = mock.Mock() self.port = self._make_port() registry.subscribe(self.provisioned, resources.PORT, pb.PROVISIONING_COMPLETE) def _make_net(self): network_obj = net_obj.Network(self.ctx, name='net_net', status='ACTIVE', project_id='1', admin_state_up=True) network_obj.create() return network_obj def _make_port(self): net = self._make_net() mac_address = netaddr.EUI('1') port = port_obj.Port(self.ctx, network_id=net.id, device_owner='3', project_id='1', admin_state_up=True, status='DOWN', device_id='2', mac_address=mac_address) port.create() return port def test_no_callback_on_missing_object(self): pb.provisioning_complete(self.ctx, 'someid', resources.PORT, 'entity') self.assertFalse(self.provisioned.called) def test_provisioned_with_no_components(self): pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity') self.assertTrue(self.provisioned.called) def test_provisioned_after_component_finishes(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity') pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity') self.assertTrue(self.provisioned.called) def test_not_provisioned_until_final_component_complete(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity1') pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity2') pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity1') self.assertFalse(self.provisioned.called) pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity2') self.assertTrue(self.provisioned.called) def test_provisioning_of_correct_item(self): port2 = self._make_port() pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity1') pb.provisioning_complete(self.ctx, port2.id, resources.PORT, 'entity1') self.provisioned.assert_called_once_with( resources.PORT, pb.PROVISIONING_COMPLETE, mock.ANY, payload=mock.ANY) payload = self.provisioned.mock_calls[0][2]['payload'] self.assertEqual(self.ctx, payload.context) self.assertEqual(port2.id, payload.resource_id) def test_not_provisioned_when_wrong_component_reports(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity1') pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity2') self.assertFalse(self.provisioned.called) def test_is_object_blocked(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'e1') self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id, resources.PORT)) self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz', resources.PORT)) pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'e1') self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id, resources.PORT)) def test_remove_provisioning_component(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'e1') pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'e2') self.assertTrue(pb.remove_provisioning_component( self.ctx, self.port.id, resources.PORT, 'e1')) self.assertFalse(self.provisioned.called) pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'other') self.assertFalse(self.provisioned.called) pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'e2') self.assertTrue(self.provisioned.called) def test_adding_component_idempotent(self): for i in range(5): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'entity1') pb.provisioning_complete(self.ctx, self.port.id, resources.PORT, 'entity1') self.assertTrue(self.provisioned.called) def test_adding_component_for_new_resource_type(self): provisioned = mock.Mock() registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE) net = self._make_net() # expect failed because the model was not registered for the type with testtools.ExpectedException(RuntimeError): pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent') pb.add_model_for_resource('NETWORK', models_v2.Network) pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent') pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent') self.assertTrue(provisioned.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_rbac_db_mixin.py0000644000175000017500000003541100000000000025540 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib import constants from neutron_lib import context from oslo_utils import uuidutils import testtools from neutron.db.db_base_plugin_v2 import NeutronDbPluginV2 as db_plugin_v2 from neutron.db import rbac_db_models from neutron.extensions import rbac as ext_rbac from neutron.objects import network as network_obj from neutron.objects.qos import policy as qos_policy_obj from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin class NetworkRbacTestcase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): self.context = context.get_admin_context() super(NetworkRbacTestcase, self).setUp(plugin='ml2') def _make_networkrbac(self, network, target, action='access_as_shared'): policy = { 'rbac_policy': {'project_id': network['network']['project_id'], 'object_id': network['network']['id'], 'object_type': 'network', 'action': action, 'target_tenant': target}} return policy def _setup_networkrbac_and_port(self, network, target_tenant): policy = self._make_networkrbac(network, target_tenant) netrbac = self.plugin.create_rbac_policy(self.context, policy) test_port = {'port': {'name': 'test-port', 'network_id': network['network']['id'], 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': 'device_owner', 'project_id': target_tenant, 'tenant_id': target_tenant}} port = self.plugin.create_port(self.context, test_port) return netrbac, port def _assert_external_net_state(self, net_id, is_external): req = self.new_show_request('networks', net_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(is_external, res['network']['router:external']) def test_create_network_rbac_external(self): with self.network() as ext_net: net_id = ext_net['network']['id'] self._assert_external_net_state(net_id, is_external=False) policy = self._make_networkrbac(ext_net, '*', 'access_as_external') self.plugin.create_rbac_policy(self.context, policy) self._assert_external_net_state(net_id, is_external=True) def test_create_network_rbac_shared_existing(self): tenant = 'test-tenant' with self.network() as net: policy = self._make_networkrbac(net, tenant, rbac_db_models.ACCESS_SHARED) self.plugin.create_rbac_policy(self.context, policy) # Give server maximum of 10 seconds to make sure we don't hit DB # retry mechanism when resource already exists with self.assert_max_execution_time(10): with testtools.ExpectedException( ext_rbac.DuplicateRbacPolicy): self.plugin.create_rbac_policy(self.context, policy) def test_update_network_rbac_external_valid(self): orig_target = 'test-tenant-2' new_target = 'test-tenant-3' with self.network() as ext_net: policy = self._make_networkrbac(ext_net, orig_target, 'access_as_external') netrbac = self.plugin.create_rbac_policy(self.context, policy) update_policy = {'rbac_policy': {'target_tenant': new_target}} netrbac2 = self.plugin.update_rbac_policy(self.context, netrbac['id'], update_policy) policy['rbac_policy']['target_tenant'] = new_target for k, v in policy['rbac_policy'].items(): self.assertEqual(netrbac2[k], v) def test_delete_network_rbac_external(self): with self.network() as ext_net: net_id = ext_net['network']['id'] self._assert_external_net_state(net_id, is_external=False) policy = self._make_networkrbac(ext_net, '*', 'access_as_external') net_rbac = self.plugin.create_rbac_policy(self.context, policy) self._assert_external_net_state(net_id, is_external=True) self.plugin.delete_rbac_policy(self.context, net_rbac['id']) self._assert_external_net_state(net_id, is_external=False) def test_delete_network_rbac_external_with_multi_rbac_policy(self): with self.network() as ext_net: net_id = ext_net['network']['id'] self._assert_external_net_state(net_id, is_external=False) policy1 = self._make_networkrbac(ext_net, 'test-tenant-1', 'access_as_external') net_rbac1 = self.plugin.create_rbac_policy(self.context, policy1) self._assert_external_net_state(net_id, is_external=True) policy2 = self._make_networkrbac(ext_net, 'test-tenant-2', 'access_as_external') self.plugin.create_rbac_policy(self.context, policy2) self._assert_external_net_state(net_id, is_external=True) self.plugin.delete_rbac_policy(self.context, net_rbac1['id']) self._assert_external_net_state(net_id, is_external=True) def test_delete_external_network_shared_rbac(self): with self.network() as ext_net: net_id = ext_net['network']['id'] self.plugin.update_network( self.context, net_id, {'network': {'router:external': True}}) self._assert_external_net_state(net_id, is_external=True) policy = self._make_networkrbac(ext_net, 'test-tenant-2') net_rbac = self.plugin.create_rbac_policy(self.context, policy) self.plugin.delete_rbac_policy(self.context, net_rbac['id']) # Make sure that external attribute not changed. self._assert_external_net_state(net_id, is_external=True) def test_update_networkrbac_valid(self): orig_target = 'test-tenant-2' new_target = 'test-tenant-3' with self.network() as net: policy = self._make_networkrbac(net, orig_target) netrbac = self.plugin.create_rbac_policy(self.context, policy) update_policy = {'rbac_policy': {'target_tenant': new_target}} netrbac2 = self.plugin.update_rbac_policy(self.context, netrbac['id'], update_policy) policy['rbac_policy']['target_tenant'] = new_target for k, v in policy['rbac_policy'].items(): self.assertEqual(netrbac2[k], v) def test_delete_networkrbac_in_use_fail(self): with self.network() as net: netrbac, _ = self._setup_networkrbac_and_port( network=net, target_tenant='test-tenant-2') self.assertRaises(ext_rbac.RbacPolicyInUse, self.plugin.delete_rbac_policy, self.context, netrbac['id']) def test_port_presence_prevents_network_rbac_policy_deletion(self): with self.network() as net: netrbac, port = self._setup_networkrbac_and_port( network=net, target_tenant='alice') self.assertRaises(ext_rbac.RbacPolicyInUse, self.plugin.delete_rbac_policy, self.context, netrbac['id']) # a wildcard policy should allow the specific policy to be deleted # since it allows the remaining port wild_policy = self._make_networkrbac(net, '*') wild_policy = self.plugin.create_rbac_policy(self.context, wild_policy) self.plugin.delete_rbac_policy(self.context, netrbac['id']) # now that wildcard is the only remaining, it should be subjected # to to the same restriction self.assertRaises(ext_rbac.RbacPolicyInUse, self.plugin.delete_rbac_policy, self.context, wild_policy['id']) # similarly, we can't update the policy to a different tenant update_policy = {'rbac_policy': {'target_tenant': 'bob'}} self.assertRaises(ext_rbac.RbacPolicyInUse, self.plugin.update_rbac_policy, self.context, wild_policy['id'], update_policy) # after port anchor is gone, update and delete should pass self.plugin.delete_port(self.context, port['id']) self.plugin.update_rbac_policy( self.context, wild_policy['id'], update_policy) self.plugin.delete_rbac_policy(self.context, wild_policy['id']) # check that policy is indeed gone self.assertRaises(ext_rbac.RbacPolicyNotFound, self.plugin.get_rbac_policy, self.context, wild_policy['id']) def test_delete_networkrbac_self_share(self): net_id = 'my-network' net_owner = 'my-tenant-id' # NOTE(ralonsoh): keep "tenant_id" for compatibility purposes in # NeutronDbPluginV2.validate_network_rbac_policy_change() net = {'network': {'id': net_id, 'tenant_id': net_owner, 'project_id': net_owner}} policy = self._make_networkrbac(net, net_owner)['rbac_policy'] with mock.patch.object(db_plugin_v2, '_get_network') as get_net,\ mock.patch.object(db_plugin_v2, 'ensure_no_tenant_ports_on_network') as ensure: get_net.return_value = net['network'] payload = events.DBEventPayload( self.context, states=(policy,), metadata={'object_type': 'network'}) self.plugin.validate_network_rbac_policy_change( None, events.BEFORE_DELETE, None, payload=payload) self.assertEqual(0, ensure.call_count) def test_update_self_share_networkrbac(self): net_id = 'my-network' net_owner = 'my-tenant-id' # NOTE(ralonsoh): keep "tenant_id" for compatibility purposes in # NeutronDbPluginV2.validate_network_rbac_policy_change() net = {'network': {'id': net_id, 'tenant_id': net_owner, 'project_id': net_owner}} policy = self._make_networkrbac(net, net_owner)['rbac_policy'] with mock.patch.object(db_plugin_v2, '_get_network') as get_net,\ mock.patch.object(db_plugin_v2, 'ensure_no_tenant_ports_on_network') as ensure: get_net.return_value = net['network'] payload = events.DBEventPayload( self.context, states=(policy,), request_body={'target_tenant': 'new-target-tenant'}, metadata={'object_type': 'network'}) self.plugin.validate_network_rbac_policy_change( None, events.BEFORE_UPDATE, None, payload=payload) self.assertEqual(0, ensure.call_count) def _create_rbac_obj(self, _class): return _class(id=uuidutils.generate_uuid(), project_id='project_id', object_id=uuidutils.generate_uuid(), target_tenant='target_tenant', action=rbac_db_models.ACCESS_SHARED) @mock.patch.object(qos_policy_obj.QosPolicyRBAC, 'get_objects') def test_get_rbac_policies_qos_policy(self, mock_qos_get_objects): qos_policy_rbac = self._create_rbac_obj(qos_policy_obj.QosPolicyRBAC) mock_qos_get_objects.return_value = [qos_policy_rbac] filters = {'object_type': ['qos_policy']} rbac_policies = self.plugin.get_rbac_policies(self.context, filters) self.assertEqual(1, len(rbac_policies)) self.assertEqual(self.plugin._make_rbac_policy_dict(qos_policy_rbac), rbac_policies[0]) @mock.patch.object(network_obj.NetworkRBAC, 'get_objects') def test_get_rbac_policies_network(self, mock_net_get_objects): net_rbac = self._create_rbac_obj(network_obj.NetworkRBAC) mock_net_get_objects.return_value = [net_rbac] filters = {'object_type': ['network']} rbac_policies = self.plugin.get_rbac_policies(self.context, filters) self.assertEqual(1, len(rbac_policies)) self.assertEqual(self.plugin._make_rbac_policy_dict(net_rbac), rbac_policies[0]) @mock.patch.object(qos_policy_obj.QosPolicyRBAC, 'get_objects') @mock.patch.object(network_obj.NetworkRBAC, 'get_objects') def test_get_rbac_policies_all_classes(self, mock_net_get_objects, mock_qos_get_objects): net_rbac = self._create_rbac_obj(network_obj.NetworkRBAC) qos_policy_rbac = self._create_rbac_obj(qos_policy_obj.QosPolicyRBAC) mock_net_get_objects.return_value = [net_rbac] mock_qos_get_objects.return_value = [qos_policy_rbac] rbac_policies = self.plugin.get_rbac_policies(self.context) self.assertEqual(2, len(rbac_policies)) rbac_policies = sorted(rbac_policies, key=lambda k: k['object_type']) self.assertEqual(self.plugin._make_rbac_policy_dict(net_rbac), rbac_policies[0]) self.assertEqual(self.plugin._make_rbac_policy_dict(qos_policy_rbac), rbac_policies[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_securitygroups_db.py0000644000175000017500000006725200000000000026544 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context import sqlalchemy import testtools from neutron.db import securitygroups_db from neutron.extensions import securitygroup from neutron import quota from neutron.services.revisions import revision_plugin from neutron.tests.unit import testlib_api FAKE_SECGROUP = { 'security_group': { "tenant_id": 'fake', 'description': 'fake', 'name': 'fake' } } FAKE_SECGROUP_RULE = { 'security_group_rule': { "tenant_id": 'fake', 'description': 'fake', 'name': 'fake', 'port_range_min': '21', 'protocol': 'tcp', 'port_range_max': '23', 'remote_ip_prefix': '10.0.0.1', 'ethertype': 'IPv4', 'remote_group_id': None, 'security_group_id': 'None', 'direction': 'ingress' } } DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' def fake_callback(resource, event, *args, **kwargs): raise KeyError('bar') class SecurityGroupDbMixinImpl(securitygroups_db.SecurityGroupDbMixin): pass class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase): def setUp(self): super(SecurityGroupDbMixinTestCase, self).setUp() self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS) self.ctx = context.get_admin_context() self.mixin = SecurityGroupDbMixinImpl() make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() def test_create_security_group_conflict(self): with mock.patch.object(registry, "publish") as mock_publish: mock_publish.side_effect = exceptions.CallbackFailure(Exception()) secgroup = {'security_group': mock.ANY} with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.create_security_group(self.ctx, secgroup) def test_delete_security_group_in_use(self): with mock.patch.object(self.mixin, '_get_port_security_group_bindings'),\ mock.patch.object(self.mixin, '_get_security_group'),\ mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException(securitygroup.SecurityGroupInUse): self.mixin.delete_security_group(self.ctx, mock.ANY) def test_update_security_group_statefulness_binded_conflict(self): FAKE_SECGROUP['security_group']['stateful'] = mock.ANY sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) FAKE_SECGROUP['security_group']['stateful'] = not sg_dict['stateful'] with mock.patch.object(self.mixin, '_get_port_security_group_bindings'), \ mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException(securitygroup.SecurityGroupInUse): self.mixin.update_security_group(self.ctx, sg_dict['id'], FAKE_SECGROUP) def test_update_security_group_conflict(self): with mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) secgroup = {'security_group': FAKE_SECGROUP} with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.update_security_group(self.ctx, 'foo_id', secgroup) def test_create_security_group_rule_conflict(self): with mock.patch.object(self.mixin, '_validate_security_group_rule'),\ mock.patch.object(self.mixin, '_check_for_duplicate_rules'),\ mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupConflict): self.mixin.create_security_group_rule( self.ctx, mock.MagicMock()) def test__check_for_duplicate_rules_does_not_drop_protocol(self): with mock.patch.object(self.mixin, 'get_security_group', return_value=None): context = mock.Mock() rule_dict = { 'security_group_rule': {'protocol': None, 'tenant_id': 'fake', 'security_group_id': 'fake', 'direction': 'fake'} } self.mixin._check_for_duplicate_rules(context, 'fake', [rule_dict]) self.assertIn('protocol', rule_dict['security_group_rule']) def test__check_for_duplicate_rules_ignores_rule_id(self): rules = [{'security_group_rule': {'protocol': 'tcp', 'id': 'fake1'}}, {'security_group_rule': {'protocol': 'tcp', 'id': 'fake2'}}] # NOTE(arosen): the name of this exception is a little misleading # in this case as this test, tests that the id fields are dropped # while being compared. This is in the case if a plugin specifies # the rule ids themselves. with mock.patch.object(self.mixin, 'get_security_group', return_value=None): self.assertRaises(securitygroup.DuplicateSecurityGroupRuleInPost, self.mixin._check_for_duplicate_rules, context, 'fake', rules) def test_check_for_duplicate_diff_rules_remote_ip_prefix_ipv4(self): fake_secgroup = copy.deepcopy(FAKE_SECGROUP) fake_secgroup['security_group_rules'] = \ [{'id': 'fake', 'tenant_id': 'fake', 'ethertype': 'IPv4', 'direction': 'ingress', 'security_group_id': 'fake', 'remote_ip_prefix': None}] with mock.patch.object(self.mixin, 'get_security_group', return_value=fake_secgroup): context = mock.Mock() rule_dict = { 'security_group_rule': {'id': 'fake2', 'tenant_id': 'fake', 'security_group_id': 'fake', 'ethertype': 'IPv4', 'direction': 'ingress', 'remote_ip_prefix': '0.0.0.0/0'} } self.assertRaises(securitygroup.SecurityGroupRuleExists, self.mixin._check_for_duplicate_rules, context, 'fake', [rule_dict]) def test_check_for_duplicate_diff_rules_remote_ip_prefix_ipv6(self): fake_secgroup = copy.deepcopy(FAKE_SECGROUP) fake_secgroup['security_group_rules'] = \ [{'id': 'fake', 'tenant_id': 'fake', 'ethertype': 'IPv6', 'direction': 'ingress', 'security_group_id': 'fake', 'remote_ip_prefix': None}] with mock.patch.object(self.mixin, 'get_security_group', return_value=fake_secgroup): context = mock.Mock() rule_dict = { 'security_group_rule': {'id': 'fake2', 'tenant_id': 'fake', 'security_group_id': 'fake', 'ethertype': 'IPv6', 'direction': 'ingress', 'remote_ip_prefix': '::/0'} } self.assertRaises(securitygroup.SecurityGroupRuleExists, self.mixin._check_for_duplicate_rules, context, 'fake', [rule_dict]) def test_delete_security_group_rule_in_use(self): with mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupRuleInUse): self.mixin.delete_security_group_rule(self.ctx, mock.ANY) def test_delete_security_group_rule_raise_error_on_not_found(self): with testtools.ExpectedException( securitygroup.SecurityGroupRuleNotFound): self.mixin.delete_security_group_rule(self.ctx, 'foo_rule') def test_validate_ethertype_and_protocol(self): fake_ipv4_rules = [{'protocol': constants.PROTO_NAME_IPV6_ICMP, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ENCAP, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_ROUTE, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_FRAG, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_NONXT, 'ethertype': constants.IPv4}, {'protocol': constants.PROTO_NAME_IPV6_OPTS, 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_ICMP), 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_ENCAP), 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_ROUTE), 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_FRAG), 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_NONXT), 'ethertype': constants.IPv4}, {'protocol': str(constants.PROTO_NUM_IPV6_OPTS), 'ethertype': constants.IPv4}] # test wrong protocols for rule in fake_ipv4_rules: with testtools.ExpectedException( securitygroup.SecurityGroupEthertypeConflictWithProtocol): self.mixin._validate_ethertype_and_protocol(rule) def test_security_group_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_CREATE) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group, self.ctx, FAKE_SECGROUP) self.assertTrue(mock_rollback.called) def test_security_group_precommit_update_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.update_security_group, self.ctx, sg_dict['id'], FAKE_SECGROUP) self.assertTrue(mock_rollback.called) def test_security_group_precommit_delete_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_DELETE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupInUse, self.mixin.delete_security_group, self.ctx, sg_dict['id']) self.assertTrue(mock_rollback.called) def _test_security_group_precommit_create_event(self, with_revisions=False): DEFAULT_SECGROUP = { 'tenant_id': FAKE_SECGROUP['security_group']['tenant_id'], 'name': 'default', 'description': 'Default security group', } DEFAULT_SECGROUP_DICT = { 'id': mock.ANY, 'tenant_id': FAKE_SECGROUP['security_group']['tenant_id'], 'project_id': FAKE_SECGROUP['security_group']['tenant_id'], 'name': 'default', 'description': 'Default security group', 'stateful': mock.ANY, 'security_group_rules': [ # Four rules for egress/ingress and ipv4/ipv6 mock.ANY, mock.ANY, mock.ANY, mock.ANY, ], } if with_revisions: DEFAULT_SECGROUP_DICT.update({ 'revision_number': mock.ANY, }) with mock.patch.object(registry, 'publish') as publish, \ mock.patch.object(registry, "notify") as mock_notify: sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) mock_notify.assert_has_calls([ mock.call('security_group', 'precommit_create', mock.ANY, context=mock.ANY, is_default=True, security_group=DEFAULT_SECGROUP_DICT), mock.call('security_group', 'after_create', mock.ANY, context=mock.ANY, is_default=True, security_group=DEFAULT_SECGROUP_DICT), mock.call('security_group', 'precommit_create', mock.ANY, context=mock.ANY, is_default=False, security_group=sg_dict), mock.call('security_group', 'after_create', mock.ANY, context=mock.ANY, is_default=False, security_group=sg_dict)]) publish.assert_has_calls([ mock.call('security_group', 'before_create', mock.ANY, payload=mock.ANY), mock.call('security_group', 'before_create', mock.ANY, payload=mock.ANY)]) payload = publish.mock_calls[0][2]['payload'] self.assertDictEqual(payload.desired_state, FAKE_SECGROUP['security_group']) payload = publish.mock_calls[1][2]['payload'] self.assertDictEqual(payload.desired_state, DEFAULT_SECGROUP) # Ensure that the result of create is same as get. # Especially we want to check the revision number here. sg_dict_got = self.mixin.get_security_group( self.ctx, sg_dict['id']) self.assertEqual(sg_dict, sg_dict_got) def test_security_group_precommit_create_event_with_revisions(self): revision = revision_plugin.RevisionPlugin() self._test_security_group_precommit_create_event(True) del revision # appease pep8 def test_security_group_precommit_create_event(self): self._test_security_group_precommit_create_event() def test_security_group_precommit_update_event(self): FAKE_SECGROUP['security_group']['stateful'] = mock.ANY original_sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) sg_id = original_sg_dict['id'] with mock.patch.object(self.mixin, '_get_port_security_group_bindings'), \ mock.patch.object(registry, "publish") as mock_notify: fake_secgroup = copy.deepcopy(FAKE_SECGROUP) fake_secgroup['security_group']['name'] = 'updated_fake' fake_secgroup['security_group']['stateful'] = mock.ANY sg_dict = self.mixin.update_security_group( self.ctx, sg_id, fake_secgroup) mock_notify.assert_has_calls( [mock.call('security_group', 'precommit_update', mock.ANY, payload=mock.ANY)]) payload = mock_notify.call_args[1]['payload'] self.assertEqual(original_sg_dict, payload.states[0]) self.assertEqual(sg_id, payload.resource_id) self.assertEqual(sg_dict, payload.desired_state) def test_security_group_precommit_and_after_delete_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(registry, "notify") as mock_notify: self.mixin.delete_security_group(self.ctx, sg_dict['id']) sg_dict['security_group_rules'] = mock.ANY mock_notify.assert_has_calls( [mock.call('security_group', 'precommit_delete', mock.ANY, context=mock.ANY, security_group=sg_dict, security_group_id=sg_dict['id'], security_group_rule_ids=[mock.ANY, mock.ANY]), mock.call('security_group', 'after_delete', mock.ANY, context=mock.ANY, security_group_id=sg_dict['id'], security_group_rule_ids=[mock.ANY, mock.ANY])]) def test_security_group_rule_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback,\ mock.patch.object(self.mixin, '_get_security_group'): self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group_rule, self.ctx, fake_rule) self.assertTrue(mock_rollback.called) def test_security_group_rule_precommit_delete_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_DELETE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback,\ mock.patch.object(self.mixin, '_get_security_group'): sg_rule_dict = self.mixin.create_security_group_rule(self.ctx, fake_rule) self.assertRaises(securitygroup.SecurityGroupRuleInUse, self.mixin.delete_security_group_rule, self.ctx, sg_rule_dict['id']) self.assertTrue(mock_rollback.called) def test_security_group_rule_precommit_create_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(registry, "notify") as mock_notify, \ mock.patch.object(self.mixin, '_get_security_group'): mock_notify.assert_has_calls([mock.call('security_group_rule', 'precommit_create', mock.ANY, context=mock.ANY, security_group_rule=self.mixin.create_security_group_rule( self.ctx, fake_rule))]) def test_sg_rule_before_precommit_and_after_delete_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(registry, "notify") as mock_notify, \ mock.patch.object(self.mixin, '_get_security_group'): sg_rule_dict = self.mixin.create_security_group_rule(self.ctx, fake_rule) self.mixin.delete_security_group_rule(self.ctx, sg_rule_dict['id']) mock_notify.assert_has_calls([mock.call('security_group_rule', 'before_delete', mock.ANY, context=mock.ANY, security_group_rule_id=sg_rule_dict['id'])]) mock_notify.assert_has_calls([mock.call('security_group_rule', 'precommit_delete', mock.ANY, context=mock.ANY, security_group_id=sg_dict['id'], security_group_rule_id=sg_rule_dict['id'])]) mock_notify.assert_has_calls([mock.call('security_group_rule', 'after_delete', mock.ANY, context=mock.ANY, security_group_rule_id=sg_rule_dict['id'], security_group_id=sg_dict['id'])]) def test_get_ip_proto_name_and_num(self): protocols = [constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_TCP), 'blah', '111'] protocol_names_nums = ( [[constants.PROTO_NAME_UDP, str(constants.PROTO_NUM_UDP)], [constants.PROTO_NAME_TCP, str(constants.PROTO_NUM_TCP)], ['blah', 'blah'], ['111', '111']]) for i, protocol in enumerate(protocols): self.assertEqual(protocol_names_nums[i], self.mixin._get_ip_proto_name_and_num(protocol)) def test__validate_port_range_for_icmp_exception(self): states = [(1, 256, securitygroup.SecurityGroupInvalidIcmpValue), (None, 6, securitygroup.SecurityGroupMissingIcmpType), (300, 1, securitygroup.SecurityGroupInvalidIcmpValue)] for protocol in (constants.PROTO_NAME_ICMP, constants.PROTO_NAME_IPV6_ICMP, constants.PROTO_NAME_IPV6_ICMP_LEGACY): for pmin, pmax, exception in states: self.assertRaises(exception, self.mixin._validate_port_range, {'port_range_min': pmin, 'port_range_max': pmax, 'protocol': protocol}) def test__validate_port_range_exception(self): self.assertRaises(securitygroup.SecurityGroupInvalidPortValue, self.mixin._validate_port_range, {'port_range_min': 0, 'port_range_max': None, 'protocol': constants.PROTO_NAME_TCP}) self.assertRaises(securitygroup.SecurityGroupInvalidPortRange, self.mixin._validate_port_range, {'port_range_min': 1, 'port_range_max': None, 'protocol': constants.PROTO_NAME_SCTP}) self.assertRaises(securitygroup.SecurityGroupInvalidPortRange, self.mixin._validate_port_range, {'port_range_min': 1000, 'port_range_max': 1, 'protocol': constants.PROTO_NAME_UDPLITE}) self.assertRaises( securitygroup.SecurityGroupInvalidProtocolForPort, self.mixin._validate_port_range, {'port_range_min': 100, 'port_range_max': 200, 'protocol': '111'}) self.assertRaises( securitygroup.SecurityGroupInvalidProtocolForPort, self.mixin._validate_port_range, {'port_range_min': 100, 'port_range_max': None, 'protocol': constants.PROTO_NAME_VRRP}) self.assertRaises( securitygroup.SecurityGroupInvalidProtocolForPort, self.mixin._validate_port_range, {'port_range_min': None, 'port_range_max': 200, 'protocol': constants.PROTO_NAME_VRRP}) def _create_environment(self): self.sg = copy.deepcopy(FAKE_SECGROUP) self.user_ctx = context.Context(user_id='user1', tenant_id='tenant_1', is_admin=False, overwrite=False) self.admin_ctx = context.Context(user_id='user2', tenant_id='tenant_2', is_admin=True, overwrite=False) self.sg_user = self.mixin.create_security_group( self.user_ctx, {'security_group': {'name': 'name', 'tenant_id': 'tenant_1', 'description': 'fake'}}) def test_get_security_group_rules(self): self._create_environment() rules_before = self.mixin.get_security_group_rules(self.user_ctx) rule = copy.deepcopy(FAKE_SECGROUP_RULE) rule['security_group_rule']['security_group_id'] = self.sg_user['id'] rule['security_group_rule']['tenant_id'] = 'tenant_2' self.mixin.create_security_group_rule(self.admin_ctx, rule) rules_after = self.mixin.get_security_group_rules(self.user_ctx) self.assertEqual(len(rules_before) + 1, len(rules_after)) for rule in (rule for rule in rules_after if rule not in rules_before): self.assertEqual('tenant_2', rule['tenant_id']) def test_get_security_group_rules_filters_passed(self): self._create_environment() filters = {'security_group_id': self.sg_user['id']} rules_before = self.mixin.get_security_group_rules(self.user_ctx, filters=filters) default_sg = self.mixin.get_security_groups( self.user_ctx, filters={'name': 'default'})[0] rule = copy.deepcopy(FAKE_SECGROUP_RULE) rule['security_group_rule']['security_group_id'] = default_sg['id'] rule['security_group_rule']['tenant_id'] = 'tenant_1' self.mixin.create_security_group_rule(self.user_ctx, rule) rules_after = self.mixin.get_security_group_rules(self.user_ctx, filters=filters) self.assertEqual(rules_before, rules_after) def test_get_security_group_rules_admin_context(self): self._create_environment() rules_before = self.mixin.get_security_group_rules(self.ctx) rule = copy.deepcopy(FAKE_SECGROUP_RULE) rule['security_group_rule']['security_group_id'] = self.sg_user['id'] rule['security_group_rule']['tenant_id'] = 'tenant_1' self.mixin.create_security_group_rule(self.user_ctx, rule) rules_after = self.mixin.get_security_group_rules(self.ctx) self.assertEqual(len(rules_before) + 1, len(rules_after)) for rule in (rule for rule in rules_after if rule not in rules_before): self.assertEqual('tenant_1', rule['tenant_id']) self.assertEqual(self.sg_user['id'], rule['security_group_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/db/test_segments_db.py0000644000175000017500000000176300000000000025255 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.db import segments_db from neutron.tests import base class TestSegmentsDb(base.BaseTestCase): def test_get_networks_segments_with_empty_networks(self): context = mock.MagicMock() net_segs = segments_db.get_networks_segments(context, []) self.assertFalse(context.session.query.called) self.assertEqual({}, net_segs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4390457 neutron-16.0.0.0b2.dev214/neutron/tests/unit/debug/0000755000175000017500000000000000000000000022044 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/debug/__init__.py0000644000175000017500000000000000000000000024143 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/debug/test_commands.py0000644000175000017500000003617100000000000025266 0ustar00coreycorey00000000000000# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_config import cfg from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.conf.agent import common as config from neutron.debug import commands from neutron.debug import debug_agent from neutron.tests import base class MyApp(object): def __init__(self, _stdout): self.stdout = _stdout class TestDebugCommands(base.BaseTestCase): def setUp(self): super(TestDebugCommands, self).setUp() config.register_interface_opts() common_config.init([]) config.register_interface_driver_opts_helper(cfg.CONF) device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists', return_value=False) device_exists_p.start() namespace_e_p = mock.patch( 'neutron.agent.linux.ip_lib.network_namespace_exists') namespace_e_p.start() namespace_d_p = mock.patch( 'neutron.agent.linux.ip_lib.delete_network_namespace') namespace_d_p.start() ensure_namespace_p = mock.patch( 'neutron.agent.linux.ip_lib.IPWrapper.ensure_namespace') ensure_namespace_p.start() dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = dvr_cls_p.start() mock_driver = mock.MagicMock() mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) mock_driver.get_device_name.return_value = 'tap12345678-12' driver_cls.return_value = mock_driver self.driver = mock_driver client_cls_p = mock.patch('neutronclient.v2_0.client.Client') client_cls = client_cls_p.start() client_inst = mock.Mock() client_cls.return_value = client_inst fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'subnets': ['fake_subnet']}} fake_port = {'port': {'id': 'fake_port', 'device_owner': 'fake_device', 'mac_address': 'aa:bb:cc:dd:ee:ffa', 'network_id': 'fake_net', 'fixed_ips': [{'subnet_id': 'fake_subnet', 'ip_address': '10.0.0.3'}] }} fake_ports = {'ports': [fake_port['port']]} self.fake_ports = fake_ports allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}] fake_subnet_v4 = {'subnet': {'name': 'fake_subnet_v4', 'id': 'fake_subnet', 'network_id': 'fake_net', 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['10.0.0.2'], 'host_routes': [], 'cidr': '10.0.0.0/24', 'allocation_pools': allocation_pools, 'enable_dhcp': True, 'ip_version': constants.IP_VERSION_4}} client_inst.list_ports.return_value = fake_ports client_inst.create_port.return_value = fake_port client_inst.show_port.return_value = fake_port client_inst.show_network.return_value = fake_network client_inst.show_subnet.return_value = fake_subnet_v4 self.client = client_inst mock_std = mock.Mock() self.app = MyApp(mock_std) self.app.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF, client_inst, mock_driver) def _test_create_probe(self, device_owner): cmd = commands.CreateProbe(self.app, None) cmd_parser = cmd.get_parser('create_probe') if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: args = ['fake_net', '--device-owner', 'compute'] else: args = ['fake_net'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) fake_port = {'port': {'device_owner': device_owner, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.plug('fake_net', 'fake_port', 'tap12345678-12', 'aa:bb:cc:dd:ee:ffa', namespace=namespace), mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_create_network_probe(self): self._test_create_probe(debug_agent.DEVICE_OWNER_NETWORK_PROBE) def test_create_nova_probe(self): self._test_create_probe(debug_agent.DEVICE_OWNER_COMPUTE_PROBE) def _test_create_probe_external(self, device_owner): fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'router:external': True, 'subnets': ['fake_subnet']}} self.client.show_network.return_value = fake_network cmd = commands.CreateProbe(self.app, None) cmd_parser = cmd.get_parser('create_probe') if device_owner == debug_agent.DEVICE_OWNER_COMPUTE_PROBE: args = ['fake_net', '--device-owner', 'compute'] else: args = ['fake_net'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) fake_port = {'port': {'device_owner': device_owner, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.plug('fake_net', 'fake_port', 'tap12345678-12', 'aa:bb:cc:dd:ee:ffa', namespace=namespace), mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_create_network_probe_external(self): self._test_create_probe_external( debug_agent.DEVICE_OWNER_NETWORK_PROBE) def test_create_nova_probe_external(self): self._test_create_probe_external( debug_agent.DEVICE_OWNER_COMPUTE_PROBE) def test_delete_probe(self): cmd = commands.DeleteProbe(self.app, None) cmd_parser = cmd.get_parser('delete_probe') args = ['fake_port'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_port('fake_port'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace)]) def test_delete_probe_external(self): fake_network = {'network': {'id': 'fake_net', 'tenant_id': 'fake_tenant', 'router:external': True, 'subnets': ['fake_subnet']}} self.client.show_network.return_value = fake_network cmd = commands.DeleteProbe(self.app, None) cmd_parser = cmd.get_parser('delete_probe') args = ['fake_port'] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls([mock.call.show_port('fake_port'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace)]) def test_list_probe(self): cmd = commands.ListProbe(self.app, None) cmd_parser = cmd.get_parser('list_probe') args = [] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) self.client.assert_has_calls( [mock.call.list_ports( device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, debug_agent.DEVICE_OWNER_COMPUTE_PROBE])]) def test_exec_command(self): cmd = commands.ExecProbe(self.app, None) cmd_parser = cmd.get_parser('exec_command') args = ['fake_port', 'fake_command'] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) self.client.assert_has_calls([mock.call.show_port('fake_port')]) def test_clear_probe(self): cmd = commands.ClearProbe(self.app, None) cmd_parser = cmd.get_parser('clear_probe') args = [] parsed_args = cmd_parser.parse_args(args) cmd.run(parsed_args) namespace = 'qprobe-fake_port' self.client.assert_has_calls( [mock.call.list_ports( device_id=socket.gethostname(), device_owner=[debug_agent.DEVICE_OWNER_NETWORK_PROBE, debug_agent.DEVICE_OWNER_COMPUTE_PROBE]), mock.call.show_port('fake_port'), mock.call.delete_port('fake_port')]) self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace)]) def test_ping_all_with_ensure_port(self): fake_ports = self.fake_ports def fake_port_list(network_id=None, device_owner=None, device_id=None): if network_id: # In order to test ensure_port, return [] return {'ports': []} return fake_ports self.client.list_ports.side_effect = fake_port_list cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) namespace = 'qprobe-fake_port' with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) fake_port = {'port': {'device_owner': debug_agent.DEVICE_OWNER_NETWORK_PROBE, 'admin_state_up': True, 'network_id': 'fake_net', 'tenant_id': 'fake_tenant', portbindings.HOST_ID: cfg.CONF.host, 'fixed_ips': [{'subnet_id': 'fake_subnet'}], 'device_id': socket.gethostname()}} expected = [mock.call.show_network('fake_net'), mock.call.show_subnet('fake_subnet'), mock.call.create_port(fake_port), mock.call.show_subnet('fake_subnet')] self.client.assert_has_calls(expected) self.driver.assert_has_calls([mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], namespace=namespace )]) def test_ping_all(self): cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) expected = [mock.call.list_ports(), mock.call.list_ports( network_id='fake_net', device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE, device_id=socket.gethostname()), mock.call.show_subnet('fake_subnet'), mock.call.show_port('fake_port')] self.client.assert_has_calls(expected) def test_ping_all_v6(self): fake_subnet_v6 = {'subnet': {'name': 'fake_v6', 'ip_version': constants.IP_VERSION_6}} self.client.show_subnet.return_value = fake_subnet_v6 cmd = commands.PingAll(self.app, None) cmd_parser = cmd.get_parser('ping_all') args = [] parsed_args = cmd_parser.parse_args(args) with mock.patch('neutron.agent.linux.ip_lib.IpNetnsCommand') as ns: cmd.run(parsed_args) ns.assert_has_calls([mock.call.execute(mock.ANY)]) self.client.assert_has_calls([mock.call.list_ports()]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/dummy_plugin.py0000644000175000017500000001400400000000000024040 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import servicetype as svctype_apidef from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_utils import uuidutils from neutron.api import extensions from neutron.api.v2 import base from neutron.db import servicetype_db from neutron import neutron_plugin_base_v2 RESOURCE_NAME = "dummy" COLLECTION_NAME = "%ss" % RESOURCE_NAME DUMMY_SERVICE_TYPE = "DUMMY" DUMMY_SERVICE_WITH_REQUIRE_TYPE = "DUMMY_REQIURE" # Attribute Map for dummy resource RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'is_visible': True}, 'service_type': {'allow_post': True, 'allow_put': False, 'validate': {'type:servicetype_ref': None}, 'is_visible': True, 'default': None} } } class Dummy(object): @classmethod def get_name(cls): return RESOURCE_NAME @classmethod def get_alias(cls): return RESOURCE_NAME @classmethod def get_description(cls): return "Dummy stuff" @classmethod def get_updated(cls): return "2012-11-20T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Extended Resource for dummy management.""" dummy_inst = directory.get_plugin(DUMMY_SERVICE_TYPE) controller = base.create_resource( COLLECTION_NAME, RESOURCE_NAME, dummy_inst, RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME]) return [extensions.ResourceExtension(COLLECTION_NAME, controller)] class DummyServicePlugin(service_base.ServicePluginBase): """This is a simple plugin for managing instances of a fictional 'dummy' service. This plugin is provided as a proof-of-concept of how advanced service might leverage the service type extension. Ideally, instances of real advanced services, such as firewall or VPN will adopt a similar solution. """ supported_extension_aliases = [RESOURCE_NAME, svctype_apidef.ALIAS] path_prefix = "/dummy_svc" agent_notifiers = {RESOURCE_NAME: 'dummy_agent_notifier'} def __init__(self): self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() self.dummys = {} @classmethod def get_plugin_type(cls): return DUMMY_SERVICE_TYPE def get_plugin_description(self): return "Neutron Dummy Service Plugin" def get_dummys(self, context, filters, fields): return self.dummys.values() def get_dummy(self, context, id, fields): try: return self.dummys[id] except KeyError: raise exceptions.NotFound() def create_dummy(self, context, dummy): d = dummy[RESOURCE_NAME] d['id'] = uuidutils.generate_uuid() self.dummys[d['id']] = d self.svctype_mgr.increase_service_type_refcount(context, d['service_type']) return d def update_dummy(self, context, id, dummy): pass def delete_dummy(self, context, id): try: svc_type_id = self.dummys[id]['service_type'] del self.dummys[id] self.svctype_mgr.decrease_service_type_refcount(context, svc_type_id) except KeyError: raise exceptions.NotFound() class DummyWithRequireServicePlugin(DummyServicePlugin): required_service_plugins = ['dummy'] @classmethod def get_plugin_type(cls): return DUMMY_SERVICE_WITH_REQUIRE_TYPE def get_plugin_description(self): return "Neutron Dummy Service Plugin with requirements" class DummyCorePluginWithoutDatastore( neutron_plugin_base_v2.NeutronPluginBaseV2): def create_subnet(self, context, subnet): pass def update_subnet(self, context, id, subnet): pass def get_subnet(self, context, id, fields=None): pass def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def delete_subnet(self, context, id): pass def create_network(self, context, network): pass def update_network(self, context, id, network): pass def get_network(self, context, id, fields=None): pass def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def delete_network(self, context, id): pass def create_port(self, context, port): pass def update_port(self, context, id, port): pass def get_port(self, context, id, fields=None): pass def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def delete_port(self, context, id): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extension_stubs.py0000644000175000017500000000443600000000000024573 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions as api_extensions from neutron_lib.services import base from neutron import wsgi class StubExtension(api_extensions.ExtensionDescriptor): def __init__(self, alias="stub_extension", optional=None): self.alias = alias self.optional = optional or [] def get_name(self): return "Stub Extension" def get_alias(self): return self.alias def get_description(self): return "" def get_updated(self): return "" def get_optional_extensions(self): return self.optional class StubExtensionWithReqs(StubExtension): def get_required_extensions(self): return ["foo"] class StubPlugin(object): def __init__(self, supported_extensions=None): supported_extensions = supported_extensions or [] self.supported_extension_aliases = supported_extensions class ExtensionExpectingPluginInterface(StubExtension): """Expect plugin to implement all methods in StubPluginInterface. This extension expects plugin to implement all the methods defined in StubPluginInterface. """ def get_plugin_interface(self): return StubPluginInterface class StubPluginInterface(base.ServicePluginBase): @abc.abstractmethod def get_foo(self, bar=None): pass def get_plugin_type(self): pass def get_plugin_description(self): pass class StubBaseAppController(wsgi.Controller): def index(self, request): return "base app index" def show(self, request, id): return {'fort': 'knox'} def update(self, request, id): return {'uneditable': 'original_value'} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4470458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/0000755000175000017500000000000000000000000023155 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/__init__.py0000644000175000017500000000000000000000000025254 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/base.py0000644000175000017500000001104400000000000024441 0ustar00coreycorey00000000000000# Copyright 2014 Intel Corporation. # Copyright 2014 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import fixture from oslo_config import cfg from oslo_utils import uuidutils from webob import exc import webtest from neutron.api import extensions from neutron import manager from neutron import quota from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class ExtensionTestCase(testlib_api.WebTestCase): def setup_extension(self, plugin, service_type, extension_class, resource_prefix, plural_mappings=None, translate_resource_name=False, allow_pagination=False, allow_sorting=False, supported_extension_aliases=None, use_quota=False): self._resource_prefix = resource_prefix self._plural_mappings = plural_mappings or {} self._translate_resource_name = translate_resource_name # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) # Create the default configurations self.config_parse() core_plugin = CORE_PLUGIN if service_type else plugin self.setup_coreplugin(core_plugin, load_plugins=False) if service_type: cfg.CONF.set_override('service_plugins', [plugin]) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() instance = self.plugin.return_value if service_type: instance.get_plugin_type.return_value = service_type manager.init() if supported_extension_aliases is not None: instance.supported_extension_aliases = supported_extension_aliases if allow_pagination: # instance.__native_pagination_support = True native_pagination_attr_name = ("_%s__native_pagination_support" % instance.__class__.__name__) setattr(instance, native_pagination_attr_name, True) if allow_sorting: # instance.__native_sorting_support = True native_sorting_attr_name = ("_%s__native_sorting_support" % instance.__class__.__name__) setattr(instance, native_sorting_attr_name, True) if use_quota: quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') setattr(instance, 'path_prefix', resource_prefix) class ExtensionTestExtensionManager(object): def get_resources(self): return extension_class.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] ext_mgr = ExtensionTestExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.api = webtest.TestApp(self.ext_mdw) def _test_entity_delete(self, entity): """Does the entity deletion based on naming convention.""" entity_id = uuidutils.generate_uuid() path = self._resource_prefix + '/' if self._resource_prefix else '' path += self._plural_mappings.get(entity, entity + 's') if self._translate_resource_name: path = path.replace('_', '-') res = self.api.delete( test_base._get_path(path, id=entity_id, fmt=self.fmt)) delete_entity = getattr(self.plugin.return_value, "delete_" + entity) delete_entity.assert_called_with(mock.ANY, entity_id) self.assertEqual(exc.HTTPNoContent.code, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/extendedattribute.py0000644000175000017500000000312600000000000027255 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions EXTENDED_ATTRIBUTE = 'extended_attribute' EXTENDED_ATTRIBUTES_2_0 = { 'ext_test_resources': { EXTENDED_ATTRIBUTE: {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': True}, } } class Extendedattribute(extensions.ExtensionDescriptor): """Extension class supporting extended attribute for router.""" @classmethod def get_name(cls): return "Extended Extension Attributes" @classmethod def get_alias(cls): return "extended-ext-attr" @classmethod def get_description(cls): return "Provides extended_attr attribute to router" @classmethod def get_updated(cls): return "2013-02-05T00:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/extensionattribute.py0000644000175000017500000000625200000000000027474 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory from neutron.api import extensions from neutron.api.v2 import base from neutron.quota import resource_registry # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'ext_test_resources': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, } } class Extensionattribute(api_extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Extension Test Resource" @classmethod def get_alias(cls): return "ext-obj-test" @classmethod def get_description(cls): return "Extension Test Resource" @classmethod def get_updated(cls): return "2013-02-05T10:00:00-00:00" def update_attributes_map(self, attributes): super(Extensionattribute, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = directory.get_plugin() resource_name = 'ext_test_resource' collection_name = resource_name + "s" params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, member_actions={}) ex = extensions.ResourceExtension(collection_name, controller, member_actions={}) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class ExtensionObjectTestPluginBase(object): @abc.abstractmethod def create_ext_test_resource(self, context, router): pass @abc.abstractmethod def get_ext_test_resource(self, context, id, fields=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/foxinsocks.py0000644000175000017500000000720400000000000025720 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions as api_extensions from neutron_lib.services import base from oslo_serialization import jsonutils import six from neutron.api import extensions from neutron import wsgi class FoxInSocksController(wsgi.Controller): def index(self, request): return "Try to say this Mr. Knox, sir..." @six.add_metaclass(abc.ABCMeta) class FoxInSocksPluginInterface(base.ServicePluginBase): @abc.abstractmethod def method_to_support_foxnsox_extension(self): pass class Foxinsocks(api_extensions.ExtensionDescriptor): def __init__(self): pass def get_plugin_interface(self): return FoxInSocksPluginInterface def get_name(self): return "Fox In Socks" def get_alias(self): return "FOXNSOX" def get_description(self): return "The Fox In Socks Extension" def get_updated(self): return "2011-01-22T13:25:27-06:00" def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_actions(self): return [extensions.ActionExtension('dummy_resources', 'FOXNSOX:add_tweedle', self._add_tweedle_handler), extensions.ActionExtension('dummy_resources', 'FOXNSOX:delete_tweedle', self._delete_tweedle_handler)] def get_request_extensions(self): request_exts = [] def _goose_handler(req, res): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:googoose'] = req.GET.get('chewing') res.body = jsonutils.dump_as_bytes(data) return res req_ext1 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', _goose_handler) request_exts.append(req_ext1) def _bands_handler(req, res): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = jsonutils.loads(res.body) data['FOXNSOX:big_bands'] = 'Pig Bands!' res.body = jsonutils.dump_as_bytes(data) return res req_ext2 = extensions.RequestExtension('GET', '/dummy_resources/:(id)', _bands_handler) request_exts.append(req_ext2) return request_exts def _add_tweedle_handler(self, input_dict, req, id): return "Tweedle {0} Added.".format( input_dict['FOXNSOX:add_tweedle']['name']) def _delete_tweedle_handler(self, input_dict, req, id): return "Tweedle {0} Deleted.".format( input_dict['FOXNSOX:delete_tweedle']['name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_address_scope.py0000644000175000017500000010425100000000000027407 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import netaddr from neutron_lib.api.definitions import address_scope as apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory import webob.exc from neutron.db import address_scope_db from neutron.db import db_base_plugin_v2 from neutron.extensions import address_scope as ext_address_scope from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_address_scope.' 'AddressScopeTestPlugin') class AddressScopeTestExtensionManager(object): def get_resources(self): return ext_address_scope.Address_scope.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4, expected_res_status=None, admin=False, **kwargs): address_scope = {'address_scope': {}} address_scope['address_scope']['ip_version'] = ip_version for k, v in kwargs.items(): address_scope['address_scope'][k] = str(v) address_scope_req = self.new_create_request('address-scopes', address_scope, fmt) if not admin: neutron_context = context.Context('', kwargs.get('tenant_id', self._tenant_id)) address_scope_req.environ['neutron.context'] = neutron_context address_scope_res = address_scope_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, address_scope_res.status_int) return address_scope_res def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs): res = self._create_address_scope(fmt, ip_version, admin=admin, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def address_scope(self, ip_version=constants.IP_VERSION_4, admin=False, **kwargs): addr_scope = self._make_address_scope(self.fmt, ip_version, admin, **kwargs) yield addr_scope def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4, admin=False, expected=None, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.address_scope(ip_version, admin=admin, **keys) as addr_scope: keys['ip_version'] = ip_version self._validate_resource(addr_scope, keys, 'address_scope') if expected: self._compare_resource(addr_scope, expected, 'address_scope') return addr_scope def _test_update_address_scope(self, addr_scope_id, data, admin=False, expected=None, tenant_id=None): update_req = self.new_update_request( 'address-scopes', data, addr_scope_id) update_req.environ['neutron.context'] = context.Context( '', tenant_id or self._tenant_id, is_admin=admin) update_res = update_req.get_response(self.ext_api) if expected: addr_scope = self.deserialize(self.fmt, update_res) self._compare_resource(addr_scope, expected, 'address_scope') return addr_scope return update_res class AddressScopeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, address_scope_db.AddressScopeDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [apidef.ALIAS] class TestAddressScope(AddressScopeTestCase): def setUp(self): plugin = DB_PLUGIN_KLASS ext_mgr = AddressScopeTestExtensionManager() super(TestAddressScope, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_create_address_scope_ipv4(self): expected_addr_scope = {'name': 'foo-address-scope', 'tenant_id': self._tenant_id, 'shared': False, 'ip_version': constants.IP_VERSION_4} self._test_create_address_scope(name='foo-address-scope', expected=expected_addr_scope) def test_create_address_scope_ipv6(self): expected_addr_scope = {'name': 'foo-address-scope', 'tenant_id': self._tenant_id, 'shared': False, 'ip_version': constants.IP_VERSION_6} self._test_create_address_scope(constants.IP_VERSION_6, name='foo-address-scope', expected=expected_addr_scope) def test_create_address_scope_empty_name(self): expected_addr_scope = {'name': '', 'tenant_id': self._tenant_id, 'shared': False} self._test_create_address_scope(name='', expected=expected_addr_scope) # no name specified self._test_create_address_scope(expected=expected_addr_scope) def test_create_address_scope_shared_admin(self): expected_addr_scope = {'name': 'foo-address-scope', 'shared': True} self._test_create_address_scope(name='foo-address-scope', admin=True, shared=True, expected=expected_addr_scope) def test_created_address_scope_shared_non_admin(self): res = self._create_address_scope(self.fmt, name='foo-address-scope', tenant_id=self._tenant_id, admin=False, shared=True) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_created_address_scope_specify_id(self): res = self._create_address_scope(self.fmt, name='foo-address-scope', id='foo-id') self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_delete_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: self._delete('address-scopes', addr_scope['address_scope']['id']) self._show('address-scopes', addr_scope['address_scope']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_update_address_scope(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'name': 'bar-address-scope'}} self._test_update_address_scope(addr_scope['address_scope']['id'], data, expected=data['address_scope']) def test_update_address_scope_shared_true_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'shared': True}} self._test_update_address_scope(addr_scope['address_scope']['id'], data, admin=True, expected=data['address_scope']) def test_update_address_scope_shared_true_non_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') data = {'address_scope': {'shared': True}} res = self._test_update_address_scope( addr_scope['address_scope']['id'], data, admin=False) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_update_address_scope_shared_false_admin(self): addr_scope = self._test_create_address_scope(name='foo-address-scope', admin=True, shared=True) data = {'address_scope': {'shared': False}} res = self._test_update_address_scope( addr_scope['address_scope']['id'], data, admin=True) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_get_address_scope(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(addr_scope['address_scope']['id'], res['address_scope']['id']) def test_get_address_scope_different_tenants_not_shared(self): addr_scope = self._test_create_address_scope(name='foo-address-scope') req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) neutron_context = context.Context('', 'not-the-owner') req.environ['neutron.context'] = neutron_context res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_get_address_scope_different_tenants_shared(self): addr_scope = self._test_create_address_scope(name='foo-address-scope', shared=True, admin=True) req = self.new_show_request('address-scopes', addr_scope['address_scope']['id']) neutron_context = context.Context('', 'test-tenant-2') req.environ['neutron.context'] = neutron_context res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(addr_scope['address_scope']['id'], res['address_scope']['id']) def test_list_address_scopes(self): self._test_create_address_scope(name='foo-address-scope') self._test_create_address_scope(constants.IP_VERSION_6, name='bar-address-scope') res = self._list('address-scopes') self.assertEqual(2, len(res['address_scopes'])) def test_list_address_scopes_different_tenants_shared(self): self._test_create_address_scope(name='foo-address-scope', shared=True, admin=True) admin_res = self._list('address-scopes') mortal_res = self._list( 'address-scopes', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['address_scopes'])) self.assertEqual(1, len(mortal_res['address_scopes'])) def test_list_address_scopes_different_tenants_not_shared(self): self._test_create_address_scope(constants.IP_VERSION_6, name='foo-address-scope') admin_res = self._list('address-scopes') mortal_res = self._list( 'address-scopes', neutron_context=context.Context('', 'not-the-owner')) self.assertEqual(1, len(admin_res['address_scopes'])) self.assertEqual(0, len(mortal_res['address_scopes'])) class TestSubnetPoolsWithAddressScopes(AddressScopeTestCase): def setUp(self): plugin = DB_PLUGIN_KLASS ext_mgr = AddressScopeTestExtensionManager() super(TestSubnetPoolsWithAddressScopes, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def test_create_subnetpool_associate_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) def test_create_subnetpool_associate_invalid_address_scope(self): self.assertRaises( webob.exc.HTTPClientError, self._test_create_subnetpool, [], min_prefixlen='21', address_scope_id='foo-addr-scope-id') def test_create_subnetpool_assoc_address_scope_with_prefix_intersect(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) overlap_subnet = netaddr.IPNetwork('10.10.10.10/24') self.assertRaises( webob.exc.HTTPClientError, self._test_create_subnetpool, [overlap_subnet.cidr], min_prefixlen='21', address_scope_id=address_scope_id) def test_update_subnetpool_associate_address_scope(self): subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool([subnet.cidr], name='foo-sp', min_prefixlen='21') with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] data = {'subnetpool': {'address_scope_id': address_scope_id}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, data['subnetpool'], 'subnetpool') def test_update_subnetpool_associate_invalid_address_scope(self): subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool([subnet.cidr], name='foo-sp', min_prefixlen='21') data = {'subnetpool': {'address_scope_id': 'foo-addr-scope-id'}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_update_subnetpool_disassociate_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='21', address_scope_id=address_scope_id) data = {'subnetpool': {'address_scope_id': None}} req = self.new_update_request( 'subnetpools', data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, data['subnetpool'], 'subnetpool') def test_update_subnetpool_associate_another_address_scope(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='21', address_scope_id=address_scope_id) with self.address_scope(name='foo-address-scope') as other_a_s: other_a_s_id = other_a_s['address_scope']['id'] update_data = {'subnetpool': {'address_scope_id': other_a_s_id}} req = self.new_update_request( 'subnetpools', update_data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = self.deserialize(self.fmt, req.get_response(api)) self._compare_resource(res, update_data['subnetpool'], 'subnetpool') def _test_update_subnetpool_address_scope_notify(self, as_change=True): with self.address_scope(name='foo-address-scope') as addr_scope: foo_as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='foo-sp', min_prefixlen='21', address_scope_id=foo_as_id) subnetpool_id = initial_subnetpool['subnetpool']['id'] with self.address_scope(name='bar-address-scope') as other_as, \ self.network() as network: data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': subnetpool_id, 'prefixlen': 24, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, req.get_response(self.api)) with mock.patch.object(registry, 'publish') as publish: plugin = db_base_plugin_v2.NeutronDbPluginV2() plugin.is_address_scope_owned_by_tenant = mock.Mock( return_value=True) plugin._validate_address_scope_id = mock.Mock() ctx = context.get_admin_context() bar_as_id = other_as['address_scope']['id'] data = {'subnetpool': { 'name': 'bar-sp'}} if as_change: data['subnetpool']['address_scope_id'] = bar_as_id updated_sp = plugin.update_subnetpool( ctx, subnetpool_id, data) self.assertEqual('bar-sp', updated_sp['name']) if as_change: self.assertEqual(bar_as_id, updated_sp['address_scope_id']) publish.assert_called_once_with( resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, plugin.update_subnetpool, payload=mock.ANY) payload = publish.mock_calls[0][2]['payload'] self.assertEqual(ctx, payload.context) self.assertEqual(subnetpool_id, payload.resource_id) else: self.assertEqual(foo_as_id, updated_sp['address_scope_id']) self.assertFalse(publish.called) def test_update_subnetpool_address_scope_notify(self): self._test_update_subnetpool_address_scope_notify() def test_not_update_subnetpool_address_scope_not_notify(self): self._test_update_subnetpool_address_scope_notify(False) def test_network_create_contain_address_scope_attr(self): with self.network() as network: result = self._show('networks', network['network']['id']) keys = [apidef.IPV4_ADDRESS_SCOPE, apidef.IPV6_ADDRESS_SCOPE] for k in keys: # Correlated address scopes should initially be None self.assertIsNone(result['network'][k]) def test_correlate_network_with_address_scope(self): with self.address_scope(name='v4-as') as v4_addr_scope, \ self.address_scope( name='v6-as', ip_version=constants.IP_VERSION_6) as v6_addr_scope, \ self.network() as network: v4_as_id = v4_addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') v4_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='v4-sp', min_prefixlen='24', address_scope_id=v4_as_id) v4_subnetpool_id = v4_subnetpool['subnetpool']['id'] v6_as_id = v6_addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('fd5c:6ee1:c7ae::/64') v6_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='v6-sp', min_prefixlen='64', address_scope_id=v6_as_id) v6_subnetpool_id = v6_subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': v4_subnetpool_id, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id']}} req = self.new_create_request('subnets', data) self.deserialize(self.fmt, req.get_response(self.api)) data['subnet']['subnetpool_id'] = v6_subnetpool_id data['subnet']['ip_version'] = constants.IP_VERSION_6 req = self.new_create_request('subnets', data) self.deserialize(self.fmt, req.get_response(self.api)) result = self._show('networks', network['network']['id']) self.assertEqual( v4_as_id, result['network'][apidef.IPV4_ADDRESS_SCOPE]) self.assertEqual( v6_as_id, result['network'][apidef.IPV6_ADDRESS_SCOPE]) def test_delete_address_scope_in_use(self): with self.address_scope(name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') expected = {'address_scope_id': address_scope_id} self._test_create_subnetpool([subnet.cidr], expected=expected, name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) self._delete('address-scopes', address_scope_id, expected_code=webob.exc.HTTPConflict.code) def test_add_subnetpool_address_scope_wrong_address_family(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') self.assertRaises(webob.exc.HTTPClientError, self._test_create_subnetpool, [subnet.cidr], name='foo-subnetpool', min_prefixlen='21', address_scope_id=address_scope_id) def test_update_subnetpool_associate_address_scope_wrong_family(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: address_scope_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('2001:db8::/64') expected = {'address_scope_id': address_scope_id} initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], expected=expected, name='foo-sp', min_prefixlen='64', address_scope_id=address_scope_id) with self.address_scope(name='foo-address-scope') as other_a_s: other_a_s_id = other_a_s['address_scope']['id'] update_data = {'subnetpool': {'address_scope_id': other_a_s_id}} req = self.new_update_request( 'subnetpools', update_data, initial_subnetpool['subnetpool']['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_two_subnets_different_subnetpools_same_network(self): with self.address_scope(constants.IP_VERSION_4, name='foo-address-scope') as addr_scope: addr_scope = addr_scope['address_scope'] with self.subnetpool( ['10.10.0.0/16'], name='subnetpool_a', tenant_id=addr_scope['tenant_id'], default_prefixlen=24, address_scope_id=addr_scope['id']) as subnetpool_a,\ self.subnetpool( ['10.20.0.0/16'], name='subnetpool_b', tenant_id=addr_scope['tenant_id'], default_prefixlen=24, address_scope_id=addr_scope['id']) as subnetpool_b: subnetpool_a = subnetpool_a['subnetpool'] subnetpool_b = subnetpool_b['subnetpool'] with self.network( tenant_id=addr_scope['tenant_id']) as network: subnet_a = self._make_subnet( self.fmt, network, constants.ATTR_NOT_SPECIFIED, None, subnetpool_id=subnetpool_a['id'], ip_version=constants.IP_VERSION_4, tenant_id=addr_scope['tenant_id']) subnet_b = self._make_subnet( self.fmt, network, constants.ATTR_NOT_SPECIFIED, None, subnetpool_id=subnetpool_b['id'], ip_version=constants.IP_VERSION_4, tenant_id=addr_scope['tenant_id']) # Look up subnet counts and perform assertions ctx = context.Context('', addr_scope['tenant_id']) pl = directory.get_plugin() total_count = pl.get_subnets_count( ctx, filters={'network_id': [network['network']['id']]}) subnets_pool_a_count = pl.get_subnets_count( ctx, filters={'id': [subnet_a['subnet']['id']], 'subnetpool_id': [subnetpool_a['id']], 'network_id': [network['network']['id']]}) subnets_pool_b_count = pl.get_subnets_count( ctx, filters={'id': [subnet_b['subnet']['id']], 'subnetpool_id': [subnetpool_b['id']], 'network_id': [network['network']['id']]}) self.assertEqual(2, total_count) self.assertEqual(1, subnets_pool_a_count) self.assertEqual(1, subnets_pool_b_count) def test_block_update_subnetpool_network_affinity(self): with self.address_scope(constants.IP_VERSION_4, name='scope-a') as scope_a,\ self.address_scope(constants.IP_VERSION_4, name='scope-b') as scope_b: scope_a = scope_a['address_scope'] scope_b = scope_b['address_scope'] with self.subnetpool( ['10.10.0.0/16'], name='subnetpool_a', tenant_id=scope_a['tenant_id'], default_prefixlen=24, address_scope_id=scope_a['id']) as subnetpool_a,\ self.subnetpool( ['10.20.0.0/16'], name='subnetpool_b', tenant_id=scope_a['tenant_id'], default_prefixlen=24, address_scope_id=scope_a['id']) as subnetpool_b: subnetpool_a = subnetpool_a['subnetpool'] subnetpool_b = subnetpool_b['subnetpool'] with self.network( tenant_id=scope_a['tenant_id']) as network: self._make_subnet( self.fmt, network, constants.ATTR_NOT_SPECIFIED, None, subnetpool_id=subnetpool_a['id'], ip_version=constants.IP_VERSION_4, tenant_id=scope_a['tenant_id']) self._make_subnet( self.fmt, network, constants.ATTR_NOT_SPECIFIED, None, subnetpool_id=subnetpool_b['id'], ip_version=constants.IP_VERSION_4, tenant_id=scope_a['tenant_id']) # Attempt to update subnetpool_b's address scope and # assert failure. data = {'subnetpool': {'address_scope_id': scope_b['id']}} req = self.new_update_request('subnetpools', data, subnetpool_b['id']) api = self._api_for_resource('subnetpools') res = req.get_response(api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_ipv6_pd_add_non_pd_subnet_to_same_network(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: addr_scope = addr_scope['address_scope'] with self.subnetpool( ['2001:db8:1234::/48'], name='non_pd_pool', tenant_id=addr_scope['tenant_id'], default_prefixlen=64, address_scope_id=addr_scope['id']) as non_pd_pool: non_pd_pool = non_pd_pool['subnetpool'] with self.network( tenant_id=addr_scope['tenant_id']) as network: with self.subnet(cidr=None, network=network, ip_version=constants.IP_VERSION_6, subnetpool_id=constants.IPV6_PD_POOL_ID, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC): res = self._create_subnet( self.fmt, cidr=None, net_id=network['network']['id'], subnetpool_id=non_pd_pool['id'], tenant_id=addr_scope['tenant_id'], ip_version=constants.IP_VERSION_6) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_ipv6_non_pd_add_pd_subnet_to_same_network(self): with self.address_scope(constants.IP_VERSION_6, name='foo-address-scope') as addr_scope: addr_scope = addr_scope['address_scope'] with self.subnetpool( ['2001:db8:1234::/48'], name='non_pd_pool', tenant_id=addr_scope['tenant_id'], default_prefixlen=64, address_scope_id=addr_scope['id']) as non_pd_pool: non_pd_pool = non_pd_pool['subnetpool'] with self.network( tenant_id=addr_scope['tenant_id']) as network: with self.subnet(cidr=None, network=network, ip_version=constants.IP_VERSION_6, subnetpool_id=non_pd_pool['id']): res = self._create_subnet( self.fmt, cidr=None, net_id=network['network']['id'], tenant_id=addr_scope['tenant_id'], subnetpool_id=constants.IPV6_PD_POOL_ID, ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_agent.py0000644000175000017500000001277200000000000025675 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib import constants from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.db import agents_db from neutron.db import db_base_plugin_v2 from neutron.extensions import agent from neutron.tests.common import helpers from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path L3_HOSTA = 'hosta' DHCP_HOSTA = 'hosta' L3_HOSTB = 'hostb' DHCP_HOSTC = 'hostc' class AgentTestExtensionManager(object): def get_resources(self): return agent.Agent.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # This plugin class is just for testing class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2, agents_db.AgentDbMixin): supported_extension_aliases = [agent_apidef.ALIAS] class AgentDBTestMixIn(object): def _list_agents(self, expected_res_status=None, neutron_context=None, query_string=None): agent_res = self._list('agents', neutron_context=neutron_context, query_params=query_string) if expected_res_status: self.assertEqual(expected_res_status, agent_res.status_int) return agent_res def _register_agent_states(self): """Register two L3 agents and two DHCP agents.""" l3_hosta = helpers._get_l3_agent_dict( L3_HOSTA, constants.L3_AGENT_MODE_LEGACY) l3_hostb = helpers._get_l3_agent_dict( L3_HOSTB, constants.L3_AGENT_MODE_LEGACY) dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA) dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC) helpers.register_l3_agent(host=L3_HOSTA) helpers.register_l3_agent(host=L3_HOSTB) helpers.register_dhcp_agent(host=DHCP_HOSTA) helpers.register_dhcp_agent(host=DHCP_HOSTC) return [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc] def _register_dvr_agents(self): dvr_snat_agent = helpers.register_l3_agent( host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) dvr_agent = helpers.register_l3_agent( host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR) return [dvr_snat_agent, dvr_agent] def _register_l3_agent(self, host): helpers.register_l3_agent(host) class AgentDBTestCase(AgentDBTestMixIn, test_db_base_plugin_v2.NeutronDbPluginV2TestCase): fmt = 'json' def setUp(self): plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin' # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = AgentTestExtensionManager() super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.adminContext = context.get_admin_context() def test_create_agent(self): data = {'agent': {}} _req = self.new_create_request('agents', data, self.fmt) _req.environ['neutron.context'] = context.Context( '', 'tenant_id') res = _req.get_response(self.ext_api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_list_agent(self): agents = self._register_agent_states() res = self._list('agents') self.assertEqual(len(agents), len(res['agents'])) def test_show_agent(self): self._register_agent_states() agents = self._list_agents( query_string='binary=neutron-l3-agent') self.assertEqual(2, len(agents['agents'])) agent = self._show('agents', agents['agents'][0]['id']) self.assertEqual('neutron-l3-agent', agent['agent']['binary']) def test_update_agent(self): self._register_agent_states() agents = self._list_agents( query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) self.assertEqual(1, len(agents['agents'])) com_id = agents['agents'][0]['id'] agent = self._show('agents', com_id) new_agent = {} new_agent['agent'] = {} new_agent['agent']['admin_state_up'] = False new_agent['agent']['description'] = 'description' self._update('agents', com_id, new_agent) agent = self._show('agents', com_id) self.assertFalse(agent['agent']['admin_state_up']) self.assertEqual('description', agent['agent']['description']) def test_dead_agent(self): cfg.CONF.set_override('agent_down_time', 1) self._register_agent_states() time.sleep(1.5) agents = self._list_agents( query_string='binary=neutron-l3-agent&host=' + L3_HOSTB) self.assertFalse(agents['agents'][0]['alive']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_availability_zone.py0000644000175000017500000001674500000000000030310 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import availability_zone_filter as azf_def from neutron_lib import context from neutron_lib.exceptions import availability_zone as az_exc from neutron.db import agents_db from neutron.db import db_base_plugin_v2 from neutron.extensions import agent from neutron.extensions import availability_zone as az_ext from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 class AZExtensionManager(object): def get_resources(self): agent.Agent().update_attributes_map(az_def.RESOURCE_ATTRIBUTE_MAP) return (az_ext.Availability_zone.get_resources() + agent.Agent.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class AZTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, agents_db.AgentDbMixin): supported_extension_aliases = [agent_apidef.ALIAS, az_def.ALIAS, azf_def.ALIAS] class AZTestCommon(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _register_azs(self): self.agent1 = helpers.register_dhcp_agent(host='host1', az='nova1') self.agent2 = helpers.register_dhcp_agent(host='host2', az='nova2') self.agent3 = helpers.register_l3_agent(host='host2', az='nova2') self.agent4 = helpers.register_l3_agent(host='host3', az='nova3') self.agent5 = helpers.register_l3_agent(host='host4', az='nova2') class TestAZAgentCase(AZTestCommon): def setUp(self): plugin = ('neutron.tests.unit.extensions.' 'test_availability_zone.AZTestPlugin') ext_mgr = AZExtensionManager() super(TestAZAgentCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_list_availability_zones(self): self._register_azs() helpers.set_agent_admin_state(self.agent3['id'], admin_state_up=False) helpers.set_agent_admin_state(self.agent4['id'], admin_state_up=False) expected = [ {'name': 'nova1', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'router', 'state': 'available'}, {'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}] res = self._list('availability_zones') azs = res['availability_zones'] self.assertItemsEqual(expected, azs) # not admin case ctx = context.Context('', 'noadmin') res = self._list('availability_zones', neutron_context=ctx) azs = res['availability_zones'] self.assertItemsEqual(expected, azs) def test_list_availability_zones_with_filter(self): self._register_azs() helpers.set_agent_admin_state(self.agent3['id'], admin_state_up=False) helpers.set_agent_admin_state(self.agent4['id'], admin_state_up=False) expected = [ {'name': 'nova1', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'network', 'state': 'available'}, {'name': 'nova2', 'resource': 'router', 'state': 'available'}, {'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}] res = self._list('availability_zones') azs = res['availability_zones'] self.assertItemsEqual(expected, azs) # list with filter of 'name' res = self._list('availability_zones', query_params="name=nova1") azs = res['availability_zones'] self.assertItemsEqual(expected[:1], azs) # list with filter of 'resource' res = self._list('availability_zones', query_params="resource=router") azs = res['availability_zones'] self.assertItemsEqual(expected[-2:], azs) # list with filter of 'state' as 'available' res = self._list('availability_zones', query_params="state=available") azs = res['availability_zones'] self.assertItemsEqual(expected[:3], azs) # list with filter of 'state' as 'unavailable' res = self._list('availability_zones', query_params="state=unavailable") azs = res['availability_zones'] self.assertItemsEqual(expected[-1:], azs) def test_list_agent_with_az(self): helpers.register_dhcp_agent(host='host1', az='nova1') res = self._list('agents') self.assertEqual('nova1', res['agents'][0]['availability_zone']) def test_validate_availability_zones(self): self._register_azs() ctx = context.Context('', 'tenant_id') self.plugin.validate_availability_zones(ctx, 'network', ['nova1', 'nova2']) self.plugin.validate_availability_zones(ctx, 'router', ['nova2', 'nova3']) self.assertRaises(az_exc.AvailabilityZoneNotFound, self.plugin.validate_availability_zones, ctx, 'router', ['nova1']) class TestAZNetworkCase(AZTestCommon): def setUp(self): ext_mgr = AZExtensionManager() super(TestAZNetworkCase, self).setUp(plugin='ml2', ext_mgr=ext_mgr) def test_availability_zones_in_create_response(self): with self.network() as net: self.assertIn('availability_zone_hints', net['network']) self.assertIn('availability_zones', net['network']) def test_create_network_with_az(self): self._register_azs() az_hints = ['nova1'] with self.network(availability_zone_hints=az_hints) as net: res = self._show('networks', net['network']['id']) self.assertItemsEqual(az_hints, res['network']['availability_zone_hints']) def test_create_network_with_azs(self): self._register_azs() az_hints = ['nova1', 'nova2'] with self.network(availability_zone_hints=az_hints) as net: res = self._show('networks', net['network']['id']) self.assertItemsEqual(az_hints, res['network']['availability_zone_hints']) def test_create_network_without_az(self): with self.network() as net: res = self._show('networks', net['network']['id']) self.assertEqual([], res['network']['availability_zone_hints']) def test_create_network_with_empty_az(self): with self.network(availability_zone_hints=[]) as net: res = self._show('networks', net['network']['id']) self.assertEqual([], res['network']['availability_zone_hints']) def test_create_network_with_not_exist_az(self): res = self._create_network(self.fmt, 'net', True, availability_zone_hints=['nova3']) self.assertEqual(404, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_data_plane_status.py0000644000175000017500000001335300000000000030266 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc as web_exc from neutron_lib.api.definitions import data_plane_status as dps_lib from neutron_lib.api.definitions import port as port_def from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.tests.unit import fake_notifier from neutron.db import data_plane_status_db as dps_db from neutron.db import db_base_plugin_v2 from neutron.extensions import data_plane_status as dps_ext from neutron.tests.unit.db import test_db_base_plugin_v2 class DataPlaneStatusTestExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return dps_ext.Data_plane_status.get_extended_resources(version) @resource_extend.has_resource_extenders class DataPlaneStatusExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, dps_db.DataPlaneStatusMixin): supported_extension_aliases = [dps_lib.ALIAS] @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_data_plane_status(port_res, port_db): return dps_db.DataPlaneStatusMixin._extend_port_data_plane_status( port_res, port_db) def update_port(self, context, id, port): with db_api.CONTEXT_WRITER.using(context): ret_port = super(DataPlaneStatusExtensionTestPlugin, self).update_port(context, id, port) if dps_lib.DATA_PLANE_STATUS in port['port']: self._process_update_port_data_plane_status(context, port['port'], ret_port) return ret_port class DataPlaneStatusExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): plugin = ('neutron.tests.unit.extensions.test_data_plane_status.' 'DataPlaneStatusExtensionTestPlugin') ext_mgr = DataPlaneStatusTestExtensionManager() super(DataPlaneStatusExtensionTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) def test_update_port_data_plane_status(self): with self.port() as port: data = {'port': {'data_plane_status': constants.ACTIVE}} req = self.new_update_request(port_def.COLLECTION_NAME, data, port['port']['id']) res = req.get_response(self.api) p = self.deserialize(self.fmt, res)['port'] self.assertEqual(200, res.status_code) self.assertEqual(p[dps_lib.DATA_PLANE_STATUS], constants.ACTIVE) def test_port_create_data_plane_status_default_none(self): with self.port(name='port1') as port: req = self.new_show_request( port_def.COLLECTION_NAME, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNone(res['port'][dps_lib.DATA_PLANE_STATUS]) def test_port_create_invalid_attr_data_plane_status(self): kwargs = {dps_lib.DATA_PLANE_STATUS: constants.ACTIVE} with self.network() as network: with self.subnet(network=network): res = self._create_port(self.fmt, network['network']['id'], arg_list=(dps_lib.DATA_PLANE_STATUS,), **kwargs) self.assertEqual(400, res.status_code) def test_port_update_preserves_data_plane_status(self): with self.port(name='port1') as port: res = self._update(port_def.COLLECTION_NAME, port['port']['id'], {'port': {dps_lib.DATA_PLANE_STATUS: constants.ACTIVE}}) res = self._update(port_def.COLLECTION_NAME, port['port']['id'], {'port': {'name': 'port2'}}) self.assertEqual(res['port']['name'], 'port2') self.assertEqual(res['port'][dps_lib.DATA_PLANE_STATUS], constants.ACTIVE) def test_port_update_with_invalid_data_plane_status(self): with self.port(name='port1') as port: self._update(port_def.COLLECTION_NAME, port['port']['id'], {'port': {dps_lib.DATA_PLANE_STATUS: "abc"}}, web_exc.HTTPBadRequest.code) def test_port_update_event_on_data_plane_status(self): expect_notify = set(['port.update.start', 'port.update.end']) with self.port(name='port1') as port: self._update(port_def.COLLECTION_NAME, port['port']['id'], {'port': {dps_lib.DATA_PLANE_STATUS: constants.ACTIVE}}) notify = set(n['event_type'] for n in fake_notifier.NOTIFICATIONS) duplicated_notify = expect_notify & notify self.assertEqual(expect_notify, duplicated_notify) fake_notifier.reset() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_default_subnetpools.py0000644000175000017500000002064100000000000030652 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.api.definitions import default_subnetpools as api_def from neutron_lib import constants from oslo_config import cfg import webob.exc from neutron.db import db_base_plugin_v2 from neutron.extensions import default_subnetpools from neutron.tests.unit.db import test_db_base_plugin_v2 class DefaultSubnetpoolsExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): extension = default_subnetpools.Default_subnetpools() return extension.get_extended_resources(version) class DefaultSubnetpoolsExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2): """Test plugin to mixin the default subnet pools extension. """ supported_extension_aliases = [api_def.ALIAS, "subnet_allocation"] class DefaultSubnetpoolsExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test API extension default_subnetpools attributes. """ def setUp(self): plugin = ('neutron.tests.unit.extensions.test_default_subnetpools.' + 'DefaultSubnetpoolsExtensionTestPlugin') ext_mgr = DefaultSubnetpoolsExtensionManager() super(DefaultSubnetpoolsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _create_subnet_using_default_subnetpool( self, network_id, tenant_id, ip_version=constants.IP_VERSION_4, **kwargs): data = {'subnet': { 'network_id': network_id, 'ip_version': str(ip_version), 'tenant_id': tenant_id, 'use_default_subnetpool': True}} data['subnet'].update(kwargs) subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def _update_subnetpool(self, subnetpool_id, **data): update_req = self.new_update_request( 'subnetpools', {'subnetpool': data}, subnetpool_id) res = update_req.get_response(self.api) return self.deserialize(self.fmt, res)['subnetpool'] def test_create_subnet_only_ip_version_v4(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True) as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] subnet = self._create_subnet_using_default_subnetpool( network['network']['id'], tenant_id, prefixlen='27') ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(27, ip_net.prefixlen) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) def test_convert_subnetpool_to_default_subnetpool(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=False) as subnetpool: self.assertFalse(subnetpool['subnetpool']['is_default']) subnetpool_id = subnetpool['subnetpool']['id'] updated_subnetpool = self._update_subnetpool( subnetpool_id, is_default=True) self.assertTrue(updated_subnetpool['is_default']) subnet = self._create_subnet_using_default_subnetpool( network['network']['id'], tenant_id) ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) def test_convert_default_subnetpool_to_non_default(self): with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '10.0.0.0/8' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My subnet pool", tenant_id=tenant_id, min_prefixlen='25', is_default=True) as subnetpool: self.assertTrue(subnetpool['subnetpool']['is_default']) subnetpool_id = subnetpool['subnetpool']['id'] updated_subnetpool = self._update_subnetpool( subnetpool_id, is_default=False) self.assertFalse(updated_subnetpool['is_default']) def test_create_subnet_only_ip_version_v6(self): # this test mirrors its v4 counterpart with self.network() as network: tenant_id = network['network']['tenant_id'] subnetpool_prefix = '2000::/56' with self.subnetpool(prefixes=[subnetpool_prefix], admin=True, name="My ipv6 subnet pool", tenant_id=tenant_id, min_prefixlen='64', is_default=True) as subnetpool: subnetpool_id = subnetpool['subnetpool']['id'] cfg.CONF.set_override('ipv6_pd_enabled', False) subnet = self._create_subnet_using_default_subnetpool( network['network']['id'], tenant_id, ip_version=constants.IP_VERSION_6) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) ip_net = netaddr.IPNetwork(subnet['cidr']) self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) self.assertEqual(64, ip_net.prefixlen) def _test_create_subnet_V6_pd_modes(self, ra_addr_mode, expect_fail=False): cfg.CONF.set_override('ipv6_pd_enabled', True) with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'ip_version': constants.IP_VERSION_6, 'tenant_id': network['network']['tenant_id'], 'use_default_subnetpool': True}} if ra_addr_mode: data['subnet']['ipv6_ra_mode'] = ra_addr_mode data['subnet']['ipv6_address_mode'] = ra_addr_mode subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) if expect_fail: self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) else: subnet = self.deserialize(self.fmt, res)['subnet'] self.assertEqual(constants.IPV6_PD_POOL_ID, subnet['subnetpool_id']) def test_create_subnet_V6_pd_slaac(self): self._test_create_subnet_V6_pd_modes('slaac') def test_create_subnet_V6_pd_stateless(self): self._test_create_subnet_V6_pd_modes('dhcpv6-stateless') def test_create_subnet_V6_pd_statefull(self): self._test_create_subnet_V6_pd_modes('dhcpv6-statefull', expect_fail=True) def test_create_subnet_V6_pd_no_mode(self): self._test_create_subnet_V6_pd_modes(None, expect_fail=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_dns.py0000644000175000017500000006135700000000000025366 0ustar00coreycorey00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import netaddr from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import l3 as l3_apdef from neutron_lib import constants from neutron_lib import context from neutron_lib.db import constants as db_const from neutron_lib.plugins import directory from oslo_config import cfg from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.extensions import dns from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.plugins.ml2 import test_plugin class DnsExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return dns.Dns().get_extended_resources(version) class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2): """Test plugin to mixin the DNS Integration extensions. """ supported_extension_aliases = [dns_apidef.ALIAS, l3_apdef.ALIAS] class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase): """Test API extension dns attributes. """ _extension_drivers = ['dns'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(DnsExtensionTestCase, self).setUp() def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): new_arg_list = ('dns_domain',) if arg_list is not None: new_arg_list = arg_list + new_arg_list return super(DnsExtensionTestCase, self)._create_network(fmt, name, admin_state_up, arg_list=new_arg_list, set_context=set_context, tenant_id=tenant_id, **kwargs) def _create_port(self, fmt, net_id, expected_res_status=None, arg_list=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'port': {'network_id': net_id, 'tenant_id': tenant_id}} for arg in (('admin_state_up', 'device_id', 'mac_address', 'name', 'fixed_ips', 'tenant_id', 'device_owner', 'security_groups', 'dns_name') + (arg_list or ())): # Arg must be present if arg in kwargs: data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) data['port']['device_id'] = device_id port_req = self.new_create_request('ports', data, fmt) if set_context and tenant_id: # create a specific auth context for this request port_req.environ['neutron.context'] = context.Context( '', tenant_id) port_res = port_req.get_response(self.api) if expected_res_status: self.assertEqual(expected_res_status, port_res.status_int) return port_res def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): res = self._list('%ss' % resource, neutron_context=neutron_context, query_params=query_params) resource = resource.replace('-', '_') self.assertItemsEqual([i['id'] for i in res['%ss' % resource]], [i[resource]['id'] for i in items]) return res def test_create_port_json(self): keys = [('admin_state_up', True), ('status', self.port_create_status)] with self.port(name='myname') as port: for k, v in keys: self.assertEqual(port['port'][k], v) self.assertIn('mac_address', port['port']) ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) subnet_db = directory.get_plugin().get_subnet( context.get_admin_context(), ips[0]['subnet_id']) self.assertIn(netaddr.IPAddress(ips[0]['ip_address']), netaddr.IPSet(netaddr.IPNetwork(subnet_db['cidr']))) self.assertEqual('myname', port['port']['name']) self._verify_dns_assigment(port['port'], ips_list=[ips[0]['ip_address']]) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as v1, self.port() as v2, self.port() as v3: ports = (v1, v2, v3) res = self._test_list_resources('port', ports) for port in res['ports']: self._verify_dns_assigment( port, ips_list=[port['fixed_ips'][0]['ip_address']]) def test_show_port(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port']['id'], sport['port']['id']) self._verify_dns_assigment( sport['port'], ips_list=[sport['port']['fixed_ips'][0]['ip_address']]) def test_update_port_non_default_dns_domain_with_dns_name(self): with self.port() as port: port_ip = port['port']['fixed_ips'][0]['ip_address'] cfg.CONF.set_override('dns_domain', 'example.com') data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self._verify_dns_assigment(res['port'], ips_list=[port_ip], dns_name='vm1') def test_update_port_default_dns_domain_with_dns_name(self): with self.port() as port: port_ip = port['port']['fixed_ips'][0]['ip_address'] data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['port']['admin_state_up'], res['port']['admin_state_up']) self._verify_dns_assigment(res['port'], ips_list=[port_ip]) def _verify_dns_assigment(self, port, ips_list=None, exp_ips_ipv4=0, exp_ips_ipv6=0, ipv4_cidrs=None, ipv6_cidrs=None, dns_name=''): ips_list = ips_list or [] ipv4_cidrs = ipv4_cidrs or [] ipv6_cidrs = ipv6_cidrs or [] self.assertEqual(dns_name, port['dns_name']) dns_assignment = port['dns_assignment'] if ips_list: self.assertEqual(len(dns_assignment), len(ips_list)) ips_set = set(ips_list) else: self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6) ipv4_count = 0 ipv6_count = 0 subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs] subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs] request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn( dns_name) for assignment in dns_assignment: if ips_list: self.assertIn(assignment['ip_address'], ips_set) ips_set.remove(assignment['ip_address']) else: ip = netaddr.IPAddress(assignment['ip_address']) if ip.version == 4: self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4)) ipv4_count += 1 else: self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6)) ipv6_count += 1 hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name, request_fqdn, assignment) self.assertEqual(assignment['hostname'], hostname) self.assertEqual(assignment['fqdn'], fqdn) if ips_list: self.assertFalse(ips_set) else: self.assertEqual(ipv4_count, exp_ips_ipv4) self.assertEqual(ipv6_count, exp_ips_ipv6) def _get_dns_domain(self): if not cfg.CONF.dns_domain: return '' if cfg.CONF.dns_domain.endswith('.'): return cfg.CONF.dns_domain return '%s.' % cfg.CONF.dns_domain def _get_request_hostname_and_fqdn(self, dns_name): request_dns_name = '' request_fqdn = '' dns_domain = self._get_dns_domain() if dns_name and dns_domain and dns_domain != 'openstacklocal.': request_dns_name = dns_name request_fqdn = request_dns_name if not request_dns_name.endswith('.'): request_fqdn = '%s.%s' % (dns_name, dns_domain) return request_dns_name, request_fqdn def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn, assignment): dns_domain = self._get_dns_domain() if request_dns_name: hostname = request_dns_name fqdn = request_fqdn else: hostname = 'host-%s' % assignment['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) return hostname, fqdn def _verify_ip_in_subnet(self, ip, subnets_list): for subnet in subnets_list: if ip in subnet: return True return False def test_update_port_update_ip(self): """Test update of port IP. Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10. """ with self.subnet() as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2'}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['ip_address'], '10.0.0.10') self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10']) def test_update_port_update_ip_address_only(self): with self.subnet() as subnet: fixed_ip_data = [{'ip_address': '10.0.0.2'}] with self.port(subnet=subnet, fixed_ips=fixed_ip_data) as port: ips = port['port']['fixed_ips'] self.assertEqual(1, len(ips)) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}, {'ip_address': "10.0.0.2"}]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) self.assertIn({'ip_address': '10.0.0.2', 'subnet_id': subnet['subnet']['id']}, ips) self.assertIn({'ip_address': '10.0.0.10', 'subnet_id': subnet['subnet']['id']}, ips) self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10', '10.0.0.2']) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period( self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period( self): cfg.CONF.set_override('dns_domain', 'example.com.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain( self): cfg.CONF.set_override('dns_domain', '') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period( self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.example.com.') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period( self): cfg.CONF.set_override('dns_domain', 'example.com.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.example.com.') self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period( self): cfg.CONF.set_override('dns_domain', 'openstacklocal.') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(201, res.status_code) def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain(self): cfg.CONF.set_override('dns_domain', 'example.com') res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name='vm1.bad-domain.com.') self.assertEqual(400, res.status_code) expected_error = ('The dns_name passed is a FQDN. Its higher level ' 'labels must be equal to the dns_domain option in ' 'neutron.conf') self.assertIn(expected_error, res.text) def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain(self): cfg.CONF.set_override('dns_domain', 'example.com') num_labels = int( math.floor(db_const.FQDN_FIELD_SIZE / constants.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor(db_const.FQDN_FIELD_SIZE % constants.DNS_LABEL_MAX_LEN)) dns_name = (('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * filler_len) res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets( dns_name=dns_name) self.assertEqual(400, res.status_code) expected_error = ("When the two are concatenated to form a FQDN " "(with a '.' at the end), the resulting length " "exceeds the maximum size") self.assertIn(expected_error, res.text) def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self, dns_name=''): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dicts = [ {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None}, {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', 'ip_version': constants.IP_VERSION_6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) subnets[subnet['subnet']['id']] = sub_dict res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) if res.status_code != 201: return res port = self.deserialize(self.fmt, res) # Since the create port request was made without a list of fixed IPs, # the port should be associated with addresses for one of the # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) self._verify_dns_assigment(port['port'], exp_ips_ipv4=1, exp_ips_ipv6=3, ipv4_cidrs=[sub_dicts[0]['cidr'], sub_dicts[1]['cidr']], ipv6_cidrs=[sub_dicts[2]['cidr'], sub_dicts[3]['cidr'], sub_dicts[4]['cidr'], sub_dicts[5]['cidr']], dns_name=dns_name) return res def test_api_extension_validation_with_bad_dns_names(self): num_labels = int( math.floor(db_const.FQDN_FIELD_SIZE / constants.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor(db_const.FQDN_FIELD_SIZE % constants.DNS_LABEL_MAX_LEN)) dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-', '-vm01.test1', 'vm01.-test1', 'vm01._test1', 'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.', 'vm01.123.', '-' + 'a' * constants.DNS_LABEL_MAX_LEN, 'a' * (constants.DNS_LABEL_MAX_LEN + 1), ('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * (filler_len + 1)] res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None} self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) for dns_name in dns_names: res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) self.assertEqual(400, res.status_code) error_message = res.json['NeutronError']['message'] is_expected_message = ( 'cannot be converted to lowercase string' in error_message or 'not a valid PQDN or FQDN. Reason:' in error_message or 'must be string type' in error_message) self.assertTrue(is_expected_message) def test_api_extension_validation_with_good_dns_names(self): cfg.CONF.set_override('dns_domain', 'example.com') higher_labels_len = len('example.com.') num_labels = int( math.floor((db_const.FQDN_FIELD_SIZE - higher_labels_len) / constants.DNS_LABEL_MAX_LEN)) filler_len = int( math.floor((db_const.FQDN_FIELD_SIZE - higher_labels_len) % constants.DNS_LABEL_MAX_LEN)) dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.', '8vm01', 'vm-01.example.com.', 'vm01.test', 'vm01.test.example.com.', 'vm01.test-100', 'vm01.test-100.example.com.', 'a' * constants.DNS_LABEL_MAX_LEN, ('a' * constants.DNS_LABEL_MAX_LEN) + '.example.com.', ('a' * (constants.DNS_LABEL_MAX_LEN - 1) + '.') * num_labels + 'a' * (filler_len - 1)] res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24', 'ip_version': constants.IP_VERSION_4, 'ra_addr_mode': None} self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'], cidr=sub_dict['cidr'], ip_version=sub_dict['ip_version'], ipv6_ra_mode=sub_dict['ra_addr_mode'], ipv6_address_mode=sub_dict['ra_addr_mode']) for dns_name in dns_names: res = self._create_port(self.fmt, net_id=network['network']['id'], dns_name=dns_name) self.assertEqual(201, res.status_code) class DnsExtensionTestNetworkDnsDomain( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): plugin = ('neutron.tests.unit.extensions.test_dns.' + 'DnsExtensionTestPlugin') ext_mgr = DnsExtensionManager() super(DnsExtensionTestNetworkDnsDomain, self).setUp( plugin=plugin, ext_mgr=ext_mgr) def test_update_network_dns_domain(self): with self.network() as network: data = {'network': {'dns_domain': 'my-domain.org.'}} req = self.new_update_request('networks', data, network['network']['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_code) self.assertNotIn('dns_domain', self.deserialize(self.fmt, res)['network']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py0000644000175000017500000004315400000000000032712 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_utils import uuidutils from webob import exc from neutron.db import l3_dvr_db from neutron.db import l3_fip_qos from neutron.extensions import floating_ip_port_forwarding as pf_ext from neutron.extensions import l3 from neutron.objects.qos import policy from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 PF_PLUGIN_NAME = ('neutron.services.portforwarding.' 'pf_plugin.PortForwardingPlugin') L3_PLUGIN = ('neutron.tests.unit.extensions.' 'test_expose_port_forwarding_in_fip.' 'TestL3PorForwardingServicePlugin') CORE_PLUGIN = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' class TestL3PorForwardingServicePlugin(test_l3.TestL3NatServicePlugin, l3_fip_qos.FloatingQoSDbMixin): supported_extension_aliases = [l3_apidef.ALIAS, dns_apidef.ALIAS, dvr_apidef.ALIAS, "qos-fip"] def _get_expected(ref): want_fields = [apidef.INTERNAL_IP_ADDRESS, apidef.PROTOCOL, apidef.INTERNAL_PORT, apidef.EXTERNAL_PORT] expect = { key: value for key, value in ref[apidef.RESOURCE_NAME].items() if key in want_fields} return expect class ExtendFipPortForwardingExtensionManager(object): def get_resources(self): return (l3.L3.get_resources() + pf_ext.Floating_ip_port_forwarding.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class TestExtendFipPortForwardingExtension( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin): def setUp(self): mock.patch('neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPushRpcApi').start() svc_plugins = (PF_PLUGIN_NAME, L3_PLUGIN, 'neutron.services.qos.qos_plugin.QoSPlugin') ext_mgr = ExtendFipPortForwardingExtensionManager() super(TestExtendFipPortForwardingExtension, self).setUp( plugin=CORE_PLUGIN, ext_mgr=ext_mgr, service_plugins=svc_plugins) self.l3_plugin = directory.get_plugin(plugin_constants.L3) self.pf_plugin = directory.get_plugin(plugin_constants.PORTFORWARDING) ctx = context.get_admin_context() self.policy_1 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) self.policy_1.create() self.policy_2 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol2', rules=[]) self.policy_2.create() def test_create_floatingip_port_forwarding_same_port_diff_protocol(self): port_forwarding = { apidef.RESOURCE_NAME: {apidef.EXTERNAL_PORT: 2225, apidef.INTERNAL_PORT: 25, apidef.INTERNAL_PORT_ID: None, apidef.PROTOCOL: constants.PROTO_NAME_TCP, apidef.INTERNAL_IP_ADDRESS: None}} ctx = context.get_admin_context() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as extnet, self.network() as innet: with self.subnet(network=extnet, cidr='200.0.0.0/22'), \ self.subnet(network=innet, cidr='10.0.0.0/24') as insub, \ self.router() as router: fip = self._make_floatingip(self.fmt, extnet['network']['id']) self._add_external_gateway_to_router(router['router']['id'], extnet['network']['id']) self._router_interface_action('add', router['router']['id'], insub['subnet']['id'], None) with self.port(subnet=insub) as port1: update_dict1 = { apidef.INTERNAL_PORT_ID: port1['port']['id'], apidef.INTERNAL_IP_ADDRESS: port1['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict1) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) update_dict2 = { apidef.PROTOCOL: constants.PROTO_NAME_UDP } port_forwarding[apidef.RESOURCE_NAME].update(update_dict2) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) def test_get_fip_after_port_forwarding_create(self): port_forwarding = { apidef.RESOURCE_NAME: {apidef.EXTERNAL_PORT: 2225, apidef.INTERNAL_PORT: 25, apidef.INTERNAL_PORT_ID: None, apidef.PROTOCOL: "tcp", apidef.INTERNAL_IP_ADDRESS: None}} ctx = context.get_admin_context() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as extnet, self.network() as innet: with self.subnet(network=extnet, cidr='200.0.0.0/22'),\ self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\ self.router() as router: fip = self._make_floatingip(self.fmt, extnet['network']['id']) # check the floatingip response contains port_forwarding field self.assertIn(apidef.COLLECTION_NAME, fip['floatingip']) self._add_external_gateway_to_router(router['router']['id'], extnet['network']['id']) self._router_interface_action('add', router['router']['id'], insub['subnet']['id'], None) with self.port(subnet=insub) as port1,\ self.port(subnet=insub) as port2: update_dict1 = { apidef.INTERNAL_PORT_ID: port1['port']['id'], apidef.INTERNAL_IP_ADDRESS: port1['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict1) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual( 1, len(body['floatingip'][apidef.COLLECTION_NAME])) expect_result1 = _get_expected(port_forwarding) self.assertEqual( expect_result1, body['floatingip'][apidef.COLLECTION_NAME][0]) update_dict2 = { apidef.EXTERNAL_PORT: 2226, apidef.INTERNAL_PORT_ID: port2['port']['id'], apidef.INTERNAL_IP_ADDRESS: port2['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict2) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual( 2, len(body['floatingip'][apidef.COLLECTION_NAME])) expect_result2 = _get_expected(port_forwarding) expect = [expect_result1, expect_result2] self.assertEqual( expect, body['floatingip'][apidef.COLLECTION_NAME]) router_id = body['floatingip']['router_id'] self.assertIsNotNone(router_id) self.l3_plugin.update_floatingip( ctx, fip['floatingip']['id'], {'floatingip': {}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(router_id, body['floatingip']['router_id']) self.l3_plugin.update_floatingip( ctx, fip['floatingip']['id'], {'floatingip': {'qos_policy_id': self.policy_1.id}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(router_id, body['floatingip']['router_id']) self.assertEqual(self.policy_1.id, body['floatingip']['qos_policy_id']) self.l3_plugin.update_floatingip( ctx, fip['floatingip']['id'], {'floatingip': {'qos_policy_id': self.policy_2.id}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(router_id, body['floatingip']['router_id']) self.assertEqual(self.policy_2.id, body['floatingip']['qos_policy_id']) self.l3_plugin.update_floatingip( ctx, fip['floatingip']['id'], {'floatingip': {'qos_policy_id': None}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(router_id, body['floatingip']['router_id']) self.assertIsNone(body['floatingip']['qos_policy_id']) def test_create_port_forwarding_and_remove_subnets(self): port_forwarding = { apidef.RESOURCE_NAME: {apidef.EXTERNAL_PORT: 2225, apidef.INTERNAL_PORT: 25, apidef.INTERNAL_PORT_ID: None, apidef.PROTOCOL: "tcp", apidef.INTERNAL_IP_ADDRESS: None}} ctx = context.get_admin_context() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as extnet, self.network() as innet: with self.subnet(network=extnet, cidr='200.0.0.0/22'),\ self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\ self.subnet(network=innet, cidr='10.0.8.0/24') as insub2,\ self.subnet(network=innet, cidr='10.0.9.0/24') as insub3,\ self.router() as router: fip = self._make_floatingip(self.fmt, extnet['network']['id']) # check the floatingip response contains port_forwarding field self.assertIn(apidef.COLLECTION_NAME, fip['floatingip']) self._add_external_gateway_to_router(router['router']['id'], extnet['network']['id']) self._router_interface_action('add', router['router']['id'], insub['subnet']['id'], None) self._router_interface_action('add', router['router']['id'], insub2['subnet']['id'], None) self._router_interface_action('add', router['router']['id'], insub3['subnet']['id'], None) with self.port(subnet=insub) as port1,\ self.port(subnet=insub) as port2: update_dict1 = { apidef.INTERNAL_PORT_ID: port1['port']['id'], apidef.INTERNAL_IP_ADDRESS: port1['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict1) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual( 1, len(body['floatingip'][apidef.COLLECTION_NAME])) expect_result1 = _get_expected(port_forwarding) self.assertEqual( expect_result1, body['floatingip'][apidef.COLLECTION_NAME][0]) update_dict2 = { apidef.EXTERNAL_PORT: 2226, apidef.INTERNAL_PORT_ID: port2['port']['id'], apidef.INTERNAL_IP_ADDRESS: port2['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict2) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual( 2, len(body['floatingip'][apidef.COLLECTION_NAME])) expect_result2 = _get_expected(port_forwarding) expect = [expect_result1, expect_result2] self.assertEqual( expect, body['floatingip'][apidef.COLLECTION_NAME]) self._router_interface_action( 'remove', router['router']['id'], insub2['subnet']['id'], None) self._router_interface_action( 'remove', router['router']['id'], insub3['subnet']['id'], None) @mock.patch.object(l3_dvr_db.L3_NAT_with_dvr_db_mixin, '_notify_floating_ip_change') @mock.patch.object(l3_dvr_db.DVRResourceOperationHandler, '_create_dvr_floating_gw_port') def test_port_in_used_by_port_forwarding(self, mock_gw_port, mock_notify): port_forwarding = { apidef.RESOURCE_NAME: {apidef.EXTERNAL_PORT: 2225, apidef.INTERNAL_PORT: 25, apidef.INTERNAL_PORT_ID: None, apidef.PROTOCOL: "tcp", apidef.INTERNAL_IP_ADDRESS: None}} ctx = context.get_admin_context() kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as extnet, self.network() as innet: with self.subnet(network=extnet, cidr='200.0.0.0/22'),\ self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\ self.router(distributed=True) as router: fip = self._make_floatingip(self.fmt, extnet['network']['id']) # check the floatingip response contains port_forwarding field self.assertIn(apidef.COLLECTION_NAME, fip['floatingip']) self._add_external_gateway_to_router(router['router']['id'], extnet['network']['id']) self._router_interface_action('add', router['router']['id'], insub['subnet']['id'], None) with self.port(subnet=insub) as port1: update_dict1 = { apidef.INTERNAL_PORT_ID: port1['port']['id'], apidef.INTERNAL_IP_ADDRESS: port1['port']['fixed_ips'][0]['ip_address']} port_forwarding[apidef.RESOURCE_NAME].update(update_dict1) self.pf_plugin.create_floatingip_port_forwarding( ctx, fip['floatingip']['id'], port_forwarding) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual( 1, len(body['floatingip'][apidef.COLLECTION_NAME])) self._make_floatingip( self.fmt, extnet['network']['id'], port_id=port1['port']['id'], http_status=exc.HTTPBadRequest.code) fip_2 = self._make_floatingip(self.fmt, extnet['network']['id']) self._update( 'floatingips', fip_2['floatingip']['id'], {'floatingip': {'port_id': port1['port']['id']}}, expected_code=exc.HTTPBadRequest.code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_external_net.py0000644000175000017500000002117200000000000027261 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_utils import uuidutils import testtools from webob import exc from neutron.db import external_net_db from neutron.db import models_v2 from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path class ExtNetTestExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, **kwargs): """Override the routine for allowing the router:external attribute.""" # attributes containing a colon should be passed with # a double underscore new_args = dict(zip(map(lambda x: x.replace('__', ':'), kwargs), kwargs.values())) arg_list = new_args.pop('arg_list', ()) + (extnet_apidef.EXTERNAL,) return super(ExtNetDBTestCase, self)._create_network( fmt, name, admin_state_up, arg_list=arg_list, **new_args) def setUp(self): plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' ext_mgr = ExtNetTestExtensionManager() super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {extnet_apidef.EXTERNAL: True}}) def test_list_nets_external(self): with self.network() as n1: self._set_net_external(n1['network']['id']) with self.network(): body = self._list('networks') self.assertEqual(2, len(body['networks'])) body = self._list('networks', query_params="%s=True" % extnet_apidef.EXTERNAL) self.assertEqual(1, len(body['networks'])) body = self._list('networks', query_params="%s=False" % extnet_apidef.EXTERNAL) self.assertEqual(1, len(body['networks'])) def test_list_nets_external_pagination(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") with self.network(name='net1') as n1, self.network(name='net3') as n3: self._set_net_external(n1['network']['id']) self._set_net_external(n3['network']['id']) with self.network(name='net2') as n2: self._test_list_with_pagination( 'network', (n1, n3), ('name', 'asc'), 1, 3, query_params='router:external=True') self._test_list_with_pagination( 'network', (n2, ), ('name', 'asc'), 1, 2, query_params='router:external=False') def test_get_network_succeeds_without_filter(self): plugin = directory.get_plugin() ctx = context.Context(None, None, is_admin=True) result = plugin.get_networks(ctx, filters=None) self.assertEqual([], result) def test_update_network_set_external_non_admin_fails(self): # Assert that a non-admin user cannot update the # router:external attribute with self.network(tenant_id='noadmin') as network: data = {'network': {'router:external': True}} req = self.new_update_request('networks', data, network['network']['id']) req.environ['neutron.context'] = context.Context('', 'noadmin') res = req.get_response(self.api) self.assertEqual(exc.HTTPForbidden.code, res.status_int) def test_update_network_external_net_with_ports_set_not_shared(self): with self.network(router__external=True, shared=True) as ext_net,\ self.subnet(network=ext_net) as ext_subnet, \ self.port(subnet=ext_subnet, tenant_id='', device_owner=constants.DEVICE_OWNER_ROUTER_SNAT): data = {'network': {'shared': False}} req = self.new_update_request('networks', data, ext_net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPOk.code, res.status_int) ctx = context.Context(None, None, is_admin=True) plugin = directory.get_plugin() result = plugin.get_networks(ctx) self.assertFalse(result[0]['shared']) def test_network_filter_hook_admin_context(self): ctx = context.Context(None, None, is_admin=True) model = models_v2.Network conditions = external_net_db._network_filter_hook(ctx, model, []) self.assertEqual([], conditions) def test_network_filter_hook_nonadmin_context(self): ctx = context.Context('edinson', 'cavani') model = models_v2.Network txt = ("networkrbacs.action = :action_1 AND " "networkrbacs.target_tenant = :target_tenant_1 OR " "networkrbacs.target_tenant = :target_tenant_2") conditions = external_net_db._network_filter_hook(ctx, model, []) self.assertEqual(conditions.__str__(), txt) # Try to concatenate conditions txt2 = (txt.replace('tenant_1', 'tenant_3'). replace('tenant_2', 'tenant_4'). replace('action_1', 'action_2')) conditions = external_net_db._network_filter_hook(ctx, model, conditions) self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2)) def test_create_port_external_network_non_admin_fails(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with testtools.ExpectedException( exc.HTTPClientError) as ctx_manager: with self.port(subnet=ext_subnet, set_context='True', tenant_id='noadmin'): pass self.assertEqual(403, ctx_manager.exception.code) def test_create_port_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: with self.subnet(network=ext_net) as ext_subnet: with self.port(subnet=ext_subnet) as port: self.assertEqual(port['port']['network_id'], ext_net['network']['id']) def test_create_external_network_non_admin_fails(self): with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager: with self.network(router__external=True, set_context='True', tenant_id='noadmin'): pass self.assertEqual(403, ctx_manager.exception.code) def test_create_external_network_admin_succeeds(self): with self.network(router__external=True) as ext_net: self.assertTrue(ext_net['network'][extnet_apidef.EXTERNAL]) def test_delete_network_check_disassociated_floatingips(self): l3_mock = mock.Mock() directory.add_plugin(plugin_constants.L3, l3_mock) with self.network() as net: req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) (l3_mock.delete_disassociated_floatingips .assert_called_once_with(mock.ANY, net['network']['id'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_extra_dhcp_opt.py0000644000175000017500000003543300000000000027601 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib import constants from neutron_lib.db import api as db_api import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import extradhcpopt_db as edo_db from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ( 'neutron.tests.unit.extensions.test_extra_dhcp_opt.ExtraDhcpOptTestPlugin') class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, edo_db.ExtraDhcpOptMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with extra dhcp options. """ supported_extension_aliases = [edo_ext.ALIAS] def create_port(self, context, port): with db_api.CONTEXT_WRITER.using(context): edos = port['port'].get(edo_ext.EXTRADHCPOPTS, []) new_port = super(ExtraDhcpOptTestPlugin, self).create_port( context, port) self._process_port_create_extra_dhcp_opts(context, new_port, edos) return new_port def update_port(self, context, id, port): with db_api.CONTEXT_WRITER.using(context): rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port( context, id, port) self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port) return rtn_port class ExtraDhcpOptDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=DB_PLUGIN_KLASS): super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin) class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase): def _check_opts(self, expected, returned): self.assertEqual(len(expected), len(returned)) for opt in returned: name = opt['opt_name'] for exp in expected: if (name == exp['opt_name'] and opt['ip_version'] == exp.get( 'ip_version', constants.IP_VERSION_4)): val = exp['opt_value'] break self.assertEqual(val, opt['opt_value']) def test_create_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_none_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': None}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] expected = [{'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(expected, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_empty_router_extradhcpopts(self): opt_list = [{'opt_name': 'router', 'opt_value': ''}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv4_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': constants.IP_VERSION_4}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456', 'ip_version': constants.IP_VERSION_4}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': constants.IP_VERSION_4}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv6_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': constants.IP_VERSION_6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': constants.IP_VERSION_6}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def _test_update_port_with_extradhcpopts(self, opt_list, upd_opts, expected_opts): params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) port = self.deserialize('json', res) self._check_opts(expected_opts, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_with_same(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = opt_list[:] for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_additional_extradhcpopt(self): opt_list = [{'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) expected_opts.append(upd_opts[0]) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopt_delete(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] expected_opts = [opt for opt in opt_list if opt['opt_name'] != 'bootfile-name'] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_without_extradhcpopt_delete(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_adding_extradhcpopts(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] expected_opts = copy.deepcopy(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_blank_string_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_with_blank_name_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}, {'opt_name': 'server-ip-address', 'opt_value': '123.123.123.456'}] upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_with_blank_router_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': constants.IP_VERSION_4}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': constants.IP_VERSION_4}, {'opt_name': 'router', 'opt_value': '123.123.123.1', 'ip_version': constants.IP_VERSION_4}] upd_opts = [{'opt_name': 'router', 'opt_value': '', 'ip_version': constants.IP_VERSION_4}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts_ipv6_change_value(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': constants.IP_VERSION_6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': constants.IP_VERSION_6}] upd_opts = [{'opt_name': 'tftp-server', 'opt_value': '2001:192:168::2', 'ip_version': constants.IP_VERSION_6}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts_add_another_ver_opt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': constants.IP_VERSION_6}, {'opt_name': 'tftp-server', 'opt_value': '2001:192:168::1', 'ip_version': constants.IP_VERSION_6}] upd_opts = [{'opt_name': 'tftp-server', 'opt_value': '123.123.123.123', 'ip_version': constants.IP_VERSION_4}] expected_opts = copy.deepcopy(opt_list) expected_opts.extend(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_extraroute.py0000644000175000017500000006307700000000000027005 0ustar00coreycorey00000000000000# Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import extraroute as xroute_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib import constants from neutron_lib import context from neutron_lib.utils import helpers from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.db import extraroute_db from neutron.extensions import l3 from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.extensions import test_l3 _uuid = uuidutils.generate_uuid _get_path = test_base._get_path class ExtraRouteTestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # This plugin class is for tests with plugin that integrates L3. class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin, extraroute_db.ExtraRoute_db_mixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, xroute_apidef.ALIAS] # A fake l3 service plugin class with extra route capability for # plugins that delegate away L3 routing functionality class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin, extraroute_db.ExtraRoute_db_mixin): supported_extension_aliases = [l3_apidef.ALIAS, xroute_apidef.ALIAS] class ExtraRouteDBTestCaseBase(object): def _routes_update_prepare( self, router_id, subnet_id, port_id, routes, skip_add=False, tenant_id=None): if not skip_add: self._router_interface_action( 'add', router_id, subnet_id, port_id, tenant_id=None) ctxt = context.Context('', tenant_id) if tenant_id else None self._update('routers', router_id, {'router': {'routes': routes}}, neutron_context=ctxt) return self._show('routers', router_id) def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes): self._update('routers', router_id, {'router': {'routes': routes}}) self._router_interface_action('remove', router_id, subnet_id, port_id) def test_route_update_with_one_route(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual(routes, body['router']['routes']) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_route_update_with_external_route(self): my_tenant = 'tenant1' with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet,\ self.port(subnet=ext_subnet) as nexthop_port: nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address'] routes = [{'destination': '135.207.0.0/16', 'nexthop': nexthop_ip}] self._set_net_external(ext_subnet['subnet']['network_id']) ext_info = {'network_id': ext_subnet['subnet']['network_id']} with self.router( external_gateway_info=ext_info, tenant_id=my_tenant) as r: body = self._routes_update_prepare( r['router']['id'], None, None, routes, skip_add=True, tenant_id=my_tenant) self.assertEqual(routes, body['router']['routes']) def test_route_update_with_route_via_another_tenant_subnet(self): my_tenant = 'tenant1' with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet,\ self.port(subnet=subnet) as nexthop_port: nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address'] routes = [{'destination': '135.207.0.0/16', 'nexthop': nexthop_ip}] with self.router(tenant_id=my_tenant) as r: body = self._routes_update_prepare( r['router']['id'], subnet['subnet']['id'], None, routes, tenant_id=my_tenant) self.assertEqual(routes, body['router']['routes']) def test_route_clear_routes_with_None(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) body = self._update('routers', r['router']['id'], {'router': {'routes': None}}) self.assertEqual([], body['router']['routes']) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_router_interface_in_use_by_route(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual(routes, body['router']['routes']) self._router_interface_action( 'remove', r['router']['id'], None, p['port']['id'], expected_code=exc.HTTPConflict.code) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_route_update_with_multi_routes(self): routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes) self.assertEqual( sorted(body['router']['routes'], key=helpers.safe_sort_key), sorted(routes, key=helpers.safe_sort_key)) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def test_routes_update_for_multiple_routers(self): with self.router() as r1,\ self.router() as r2,\ self.subnet(cidr='10.0.0.0/24') as s: with self.port(subnet=s) as p1,\ self.port(subnet=s) as p2: p1_ip = p1['port']['fixed_ips'][0]['ip_address'] p2_ip = p2['port']['fixed_ips'][0]['ip_address'] routes1 = [{'destination': '135.207.0.0/16', 'nexthop': p2_ip}] routes2 = [{'destination': '12.0.0.0/8', 'nexthop': p1_ip}] body = self._routes_update_prepare(r1['router']['id'], None, p1['port']['id'], routes1) self.assertEqual(routes1, body['router']['routes']) body = self._routes_update_prepare(r2['router']['id'], None, p2['port']['id'], routes2) self.assertEqual(routes2, body['router']['routes']) self._routes_update_cleanup(p1['port']['id'], None, r1['router']['id'], []) self._routes_update_cleanup(p2['port']['id'], None, r2['router']['id'], []) def test_router_update_delete_routes(self): routes_orig = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] routes_left = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes_orig) self.assertEqual( sorted(body['router']['routes'], key=helpers.safe_sort_key), sorted(routes_orig, key=helpers.safe_sort_key)) body = self._routes_update_prepare(r['router']['id'], None, p['port']['id'], routes_left, skip_add=True) self.assertEqual( sorted(body['router']['routes'], key=helpers.safe_sort_key), sorted(routes_left, key=helpers.safe_sort_key)) self._routes_update_cleanup(p['port']['id'], None, r['router']['id'], []) def _test_malformed_route(self, routes): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_no_destination_route(self): self._test_malformed_route([{'nexthop': '10.0.1.6'}]) def test_no_nexthop_route(self): self._test_malformed_route({'destination': '135.207.0.0/16'}) def test_none_destination(self): self._test_malformed_route([{'destination': None, 'nexthop': '10.0.1.3'}]) def test_none_nexthop(self): self._test_malformed_route([{'destination': '135.207.0.0/16', 'nexthop': None}]) def test_nexthop_is_port_ip(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) port_ip = p['port']['fixed_ips'][0]['ip_address'] routes = [{'destination': '135.207.0.0/16', 'nexthop': port_ip}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_too_many_routes(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '12.0.0.0/8', 'nexthop': '10.0.1.4'}, {'destination': '141.212.0.0/16', 'nexthop': '10.0.1.5'}, {'destination': '192.168.0.0/16', 'nexthop': '10.0.1.6'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_dup_address(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}, {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_invalid_ip_address(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '512.207.0.0/16', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) routes = [{'destination': '127.207.0.0/48', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) routes = [{'destination': 'invalid_ip_address', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) routes = [{'destination': '1.1.1.1/24', 'nexthop': '10.0.1.3'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_invalid_nexthop_ip(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '127.207.0.0/16', 'nexthop': ' 300.10.10.4'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_with_nexthop_is_outside_port_subnet(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routes = [{'destination': '127.207.0.0/16', 'nexthop': ' 20.10.10.4'}] self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=exc.HTTPBadRequest.code) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_update_on_external_port(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) port_res = self._list_ports( 'json', 200, s['subnet']['network_id'], tenant_id=r['router']['tenant_id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW) port_list = self.deserialize('json', port_res) self.assertEqual(1, len(port_list['ports'])) with self.port(subnet=s) as p: next_hop = p['port']['fixed_ips'][0]['ip_address'] routes = [{'destination': '135.207.0.0/16', 'nexthop': next_hop}] body = self._update('routers', r['router']['id'], {'router': {'routes': routes}}) body = self._show('routers', r['router']['id']) self.assertEqual(routes, body['router']['routes']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_list_with_sort(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), ('name', 'asc'), 2, 2) class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase, ExtraRouteDBTestCaseBase): def setUp(self, plugin=None, ext_mgr=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_extraroute.' 'TestExtraRouteIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = ExtraRouteTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.setup_notification_driver() class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase, ExtraRouteDBTestCaseBase): def setUp(self): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.' 'TestExtraRouteL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = ExtraRouteTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_fip_port_details.py0000644000175000017500000001331400000000000030117 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import fip_port_details as apidef from neutron_lib.api.definitions import l3 as l3_apidef from oslo_config import cfg from neutron.db import l3_fip_port_details from neutron.extensions import l3 from neutron.tests.unit.extensions import test_l3 class FloatingIPPortDetailsTestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestFloatingIPPortDetailsIntPlugin( test_l3.TestL3NatIntPlugin, l3_fip_port_details.Fip_port_details_db_mixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, apidef.ALIAS] class TestFloatingIPPortDetailsL3NatServicePlugin( test_l3.TestL3NatServicePlugin, l3_fip_port_details.Fip_port_details_db_mixin): supported_extension_aliases = [l3_apidef.ALIAS, apidef.ALIAS] class FloatingIPPortDetailsDBTestCaseBase(test_l3.L3NatTestCaseMixin): def _assert_port_details(self, port, port_details): port['name'] = port_details['name'] port['network_id'] = port_details['network_id'] port['mac_address'] = port_details['mac_address'] port['admin_state_up'] = port_details['admin_state_up'] port['status'] = port_details['status'] port['device_id'] = port_details['device_id'] port['device_owner'] = port_details['device_owner'] def test_floatingip_create_with_port_details(self): with self.port() as p: with self.floatingip_with_assoc(port_id=p['port']['id']) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) self.assertEqual(body['floatingip']['port_id'], fip['floatingip']['port_id']) self._assert_port_details( p['port'], body['floatingip']['port_details']) def test_floatingip_update_with_port_details(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['port_details']) port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self._assert_port_details( p['port'], body['floatingip']['port_details']) def test_floatingip_list_with_port_details(self): with self.port() as p: with self.floatingip_with_assoc(port_id=p['port']['id']) as fip: body = self._list('floatingips') self.assertEqual(body['floatingips'][0]['id'], fip['floatingip']['id']) self.assertEqual(body['floatingips'][0]['port_id'], fip['floatingip']['port_id']) self._assert_port_details( p['port'], body['floatingips'][0]['port_details']) class FloatingIPPortDetailsDBIntTestCase(test_l3.L3BaseForIntTests, FloatingIPPortDetailsDBTestCaseBase): def setUp(self, plugin=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_fip_port_details.' 'TestFloatingIPPortDetailsIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPPortDetailsTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self.setup_notification_driver() class FloatingIPPortDetailsDBSepTestCase(test_l3.L3BaseForSepTests, FloatingIPPortDetailsDBTestCaseBase): def setUp(self): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_fip_port_details.' 'TestFloatingIPPortDetailsL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPPortDetailsTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_flavors.py0000644000175000017500000007614100000000000026253 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import fixtures import mock from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import constants as db_const from neutron_lib.exceptions import flavors as flav_exc from neutron_lib.plugins import constants from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.db.models import l3 as l3_models from neutron.db import servicetype_db from neutron.extensions import flavors from neutron.objects import flavor as flavor_obj from neutron.services.flavors import flavors_plugin from neutron.services import provider_configuration as provconf from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import dummy_plugin from neutron.tests.unit.extensions import base as extension _uuid = uuidutils.generate_uuid _get_path = test_base._get_path _driver = ('neutron.tests.unit.extensions.test_flavors.' 'DummyServiceDriver') _provider = dummy_plugin.RESOURCE_NAME _long_name = 'x' * (db_const.NAME_FIELD_SIZE + 1) _long_description = 'x' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1) class FlavorExtensionTestCase(extension.ExtensionTestCase): def setUp(self): super(FlavorExtensionTestCase, self).setUp() self.setup_extension( 'neutron.services.flavors.flavors_plugin.FlavorsPlugin', constants.FLAVORS, flavors.Flavors, '', supported_extension_aliases=['flavors']) def test_create_flavor(self): tenant_id = uuidutils.generate_uuid() # Use service_type FLAVORS since plugin must be loaded to validate data = {'flavor': {'name': 'GOLD', 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'project_id': tenant_id, 'enabled': True}} expected = copy.deepcopy(data) expected['flavor']['service_profiles'] = [] instance = self.plugin.return_value instance.create_flavor.return_value = expected['flavor'] res = self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_flavor.assert_called_with(mock.ANY, flavor=expected) res = self.deserialize(res) self.assertIn('flavor', res) self.assertEqual(expected, res) def test_create_flavor_invalid_service_type(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': 'GOLD', 'service_type': 'BROKEN', 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_too_long_name(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_too_long_description(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': _long_description, 'tenant_id': tenant_id, 'enabled': True}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_flavor_invalid_enabled(self): tenant_id = uuidutils.generate_uuid() data = {'flavor': {'name': _long_name, 'service_type': constants.FLAVORS, 'description': 'the best flavor', 'tenant_id': tenant_id, 'enabled': 'BROKEN'}} self.api.post(_get_path('flavors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': 'the best flavor', 'enabled': True}} expected = copy.copy(data) expected['flavor']['service_profiles'] = [] instance = self.plugin.return_value instance.update_flavor.return_value = expected['flavor'] res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.update_flavor.assert_called_with(mock.ANY, flavor_id, flavor=expected) res = self.deserialize(res) self.assertIn('flavor', res) self.assertEqual(expected, res) def test_update_flavor_too_long_name(self): flavor_id = 'fake_id' data = {'flavor': {'name': _long_name, 'description': 'the best flavor', 'enabled': True}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor_too_long_description(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': _long_description, 'enabled': True}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_flavor_invalid_enabled(self): flavor_id = 'fake_id' data = {'flavor': {'name': 'GOLD', 'description': _long_description, 'enabled': 'BROKEN'}} self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_delete_flavor(self): flavor_id = 'fake_id' instance = self.plugin.return_value self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt), content_type='application/%s' % self.fmt) instance.delete_flavor.assert_called_with(mock.ANY, flavor_id) def test_show_flavor(self): flavor_id = 'fake_id' expected = {'flavor': {'id': flavor_id, 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-1']}} instance = self.plugin.return_value instance.get_flavor.return_value = expected['flavor'] res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt)) instance.get_flavor.assert_called_with(mock.ANY, flavor_id, fields=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_get_flavors(self): data = {'flavors': [{'id': 'id1', 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-1']}, {'id': 'id2', 'name': 'GOLD', 'description': 'the best flavor', 'enabled': True, 'service_profiles': ['profile-2', 'profile-1']}]} instance = self.plugin.return_value instance.get_flavors.return_value = data['flavors'] res = self.api.get(_get_path('flavors', fmt=self.fmt)) instance.get_flavors.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) res = self.deserialize(res) self.assertEqual(data, res) def test_create_service_profile(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': '', 'tenant_id': tenant_id, 'project_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} instance = self.plugin.return_value instance.create_service_profile.return_value = ( expected['service_profile']) res = self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt) instance.create_service_profile.assert_called_with( mock.ANY, service_profile=expected) res = self.deserialize(res) self.assertIn('service_profile', res) self.assertEqual(expected, res) def test_create_service_profile_too_long_description(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': _long_description, 'driver': '', 'tenant_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_service_profile_too_long_driver(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': _long_description, 'tenant_id': tenant_id, 'enabled': True, 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_create_service_profile_invalid_enabled(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'description': 'the best sp', 'driver': '', 'tenant_id': tenant_id, 'enabled': 'BROKEN', 'metainfo': '{"data": "value"}'}} self.api.post(_get_path('service_profiles', fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_service_profile(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': False, 'metainfo': '{"data1": "value3"}'}} instance = self.plugin.return_value instance.update_service_profile.return_value = ( expected['service_profile']) res = self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt) instance.update_service_profile.assert_called_with( mock.ANY, sp_id, service_profile=expected) res = self.deserialize(res) self.assertIn('service_profile', res) self.assertEqual(expected, res) def test_update_service_profile_too_long_description(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': 'BROKEN', 'metainfo': '{"data1": "value3"}'}} self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_update_service_profile_invalid_enabled(self): sp_id = "fake_id" expected = {'service_profile': {'description': 'the best sp', 'enabled': 'BROKEN', 'metainfo': '{"data1": "value3"}'}} self.api.put(_get_path('service_profiles', id=sp_id, fmt=self.fmt), self.serialize(expected), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) def test_delete_service_profile(self): sp_id = 'fake_id' instance = self.plugin.return_value self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt), content_type='application/%s' % self.fmt) instance.delete_service_profile.assert_called_with(mock.ANY, sp_id) def test_show_service_profile(self): sp_id = 'fake_id' expected = {'service_profile': {'id': 'id1', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}} instance = self.plugin.return_value instance.get_service_profile.return_value = ( expected['service_profile']) res = self.api.get(_get_path('service_profiles', id=sp_id, fmt=self.fmt)) instance.get_service_profile.assert_called_with(mock.ANY, sp_id, fields=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_get_service_profiles(self): expected = {'service_profiles': [{'id': 'id1', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}, {'id': 'id2', 'driver': _driver, 'description': 'desc', 'metainfo': '{}', 'enabled': True}]} instance = self.plugin.return_value instance.get_service_profiles.return_value = ( expected['service_profiles']) res = self.api.get(_get_path('service_profiles', fmt=self.fmt)) instance.get_service_profiles.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) res = self.deserialize(res) self.assertEqual(expected, res) def test_associate_service_profile_with_flavor(self): tenant_id = uuidutils.generate_uuid() expected = {'service_profile': {'id': _uuid(), 'tenant_id': tenant_id, 'project_id': tenant_id}} instance = self.plugin.return_value instance.create_flavor_service_profile.return_value = ( expected['service_profile']) res = self.api.post('/flavors/fl_id/service_profiles', self.serialize(expected), content_type='application/%s' % self.fmt) instance.create_flavor_service_profile.assert_called_with( mock.ANY, service_profile=expected, flavor_id='fl_id') res = self.deserialize(res) self.assertEqual(expected, res) def test_disassociate_service_profile_with_flavor(self): instance = self.plugin.return_value instance.delete_flavor_service_profile.return_value = None self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid', content_type='application/%s' % self.fmt) instance.delete_flavor_service_profile.assert_called_with( mock.ANY, 'fake_spid', flavor_id='fl_id') def test_update_association_error(self): """Confirm that update is not permitted with user error.""" new_id = uuidutils.generate_uuid() data = {'service_profile': {'id': new_id}} self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid', self.serialize(data), content_type='application/%s' % self.fmt, status=exc.HTTPBadRequest.code) class DummyServicePlugin(object): def driver_loaded(self, driver, service_profile): pass @classmethod def get_plugin_type(cls): return dummy_plugin.DUMMY_SERVICE_TYPE def get_plugin_description(self): return "Dummy service plugin, aware of flavors" class DummyServiceDriver(object): @staticmethod def get_service_type(): return dummy_plugin.DUMMY_SERVICE_TYPE def __init__(self, plugin): pass class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, base.PluginFixture): def setUp(self): super(FlavorPluginTestCase, self).setUp() self.config_parse() cfg.CONF.set_override( 'service_plugins', ['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin']) self.useFixture( fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance')) self.plugin = flavors_plugin.FlavorsPlugin() self.ctx = context.get_admin_context() providers = [DummyServiceDriver.get_service_type() + ":" + _provider + ":" + _driver] self.service_manager = servicetype_db.ServiceTypeManager.get_instance() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() self.service_providers.return_value = providers for provider in providers: self.service_manager.add_provider_configuration( provider.split(':')[0], provconf.ProviderConfiguration()) db_api.CONTEXT_WRITER.get_engine() def _create_flavor(self, description=None): flavor = {'flavor': {'name': 'GOLD', 'service_type': dummy_plugin.DUMMY_SERVICE_TYPE, 'description': description or 'the best flavor', 'enabled': True}} return self.plugin.create_flavor(self.ctx, flavor), flavor def test_create_flavor(self): self._create_flavor() res = flavor_obj.Flavor.get_objects(self.ctx) self.assertEqual(1, len(res)) self.assertEqual('GOLD', res[0]['name']) self.assertEqual( dummy_plugin.DUMMY_SERVICE_TYPE, res[0]['service_type']) def test_update_flavor(self): fl, flavor = self._create_flavor() flavor = {'flavor': {'name': 'Silver', 'enabled': False}} self.plugin.update_flavor(self.ctx, fl['id'], flavor) # don't reuse cached models from previous plugin call self.ctx.session.expire_all() res = flavor_obj.Flavor.get_object(self.ctx, id=fl['id']) self.assertEqual('Silver', res['name']) self.assertFalse(res['enabled']) def test_delete_flavor(self): fl, _ = self._create_flavor() self.plugin.delete_flavor(self.ctx, fl['id']) self.assertFalse(flavor_obj.Flavor.objects_exist(self.ctx)) def test_show_flavor(self): fl, _ = self._create_flavor() show_fl = self.plugin.get_flavor(self.ctx, fl['id']) self.assertEqual(fl, show_fl) def test_get_flavors(self): fl, flavor = self._create_flavor() flavor['flavor']['name'] = 'SILVER' self.plugin.create_flavor(self.ctx, flavor) show_fl = self.plugin.get_flavors(self.ctx) self.assertEqual(2, len(show_fl)) def _create_service_profile(self, description=None): data = {'service_profile': {'description': description or 'the best sp', 'driver': _driver, 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) return sp, data def test_create_service_profile(self): sp, data = self._create_service_profile() res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id']) self.assertIsNotNone(res) self.assertEqual(data['service_profile']['driver'], res.driver) self.assertEqual(data['service_profile']['metainfo'], res.metainfo) def test_create_service_profile_empty_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': '', 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id']) self.assertIsNotNone(res) self.assertEqual(data['service_profile']['driver'], res.driver) self.assertEqual(data['service_profile']['metainfo'], res.metainfo) def test_create_service_profile_invalid_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': "Broken", 'enabled': True, 'metainfo': '{"data": "value"}'}} self.assertRaises(flav_exc.ServiceProfileDriverNotFound, self.plugin.create_service_profile, self.ctx, data) def test_create_service_profile_invalid_empty(self): data = {'service_profile': {'description': '', 'driver': '', 'enabled': True, 'metainfo': ''}} self.assertRaises(flav_exc.ServiceProfileEmpty, self.plugin.create_service_profile, self.ctx, data) def test_update_service_profile(self): sp, data = self._create_service_profile() data['service_profile']['metainfo'] = '{"data": "value1"}' sp = self.plugin.update_service_profile(self.ctx, sp['id'], data) # don't reuse cached models from previous plugin call self.ctx.session.expire_all() res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id']) self.assertEqual(data['service_profile']['metainfo'], res['metainfo']) def test_delete_service_profile(self): sp, data = self._create_service_profile() self.plugin.delete_service_profile(self.ctx, sp['id']) res = flavor_obj.ServiceProfile.get_objects(self.ctx) self.assertFalse(res) def test_show_service_profile(self): sp, data = self._create_service_profile() sp_show = self.plugin.get_service_profile(self.ctx, sp['id']) self.assertEqual(sp, sp_show) def test_get_service_profiles(self): self._create_service_profile() self._create_service_profile(description='another sp') self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx))) def test_associate_service_profile_with_flavor(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) binding = flavor_obj.FlavorServiceProfileBinding.get_objects( self.ctx)[0] self.assertEqual(fl['id'], binding['flavor_id']) self.assertEqual(sp['id'], binding['service_profile_id']) # don't reuse cached models from previous plugin call self.ctx.session.expire_all() res = self.plugin.get_flavor(self.ctx, fl['id']) self.assertEqual(1, len(res['service_profiles'])) self.assertEqual(sp['id'], res['service_profiles'][0]) res = self.plugin.get_service_profile(self.ctx, sp['id']) self.assertEqual(1, len(res['flavors'])) self.assertEqual(fl['id'], res['flavors'][0]) def test_autodelete_flavor_associations(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.plugin.delete_flavor(self.ctx, fl['id']) self.assertFalse( flavor_obj.FlavorServiceProfileBinding.objects_exist(self.ctx)) def test_associate_service_profile_with_flavor_exists(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises(flav_exc.FlavorServiceProfileBindingExists, self.plugin.create_flavor_service_profile, self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) def test_disassociate_service_profile_with_flavor(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.plugin.delete_flavor_service_profile( self.ctx, sp['id'], fl['id']) self.assertFalse( flavor_obj.FlavorServiceProfileBinding.objects_exist(self.ctx)) self.assertRaises( flav_exc.FlavorServiceProfileBindingNotFound, self.plugin.delete_flavor_service_profile, self.ctx, sp['id'], fl['id']) def test_delete_service_profile_in_use(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flav_exc.ServiceProfileInUse, self.plugin.delete_service_profile, self.ctx, sp['id']) def test_delete_flavor_in_use(self): # make use of router since it has a flavor id fl, data = self._create_flavor() with db_api.CONTEXT_WRITER.using(self.ctx): self.ctx.session.add(l3_models.Router(flavor_id=fl['id'])) self.assertRaises( flav_exc.FlavorInUse, self.plugin.delete_flavor, self.ctx, fl['id']) def test_get_flavor_next_provider_no_binding(self): fl, data = self._create_flavor() self.assertRaises( flav_exc.FlavorServiceProfileBindingNotFound, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider_disabled(self): data = {'service_profile': {'description': 'the best sp', 'driver': _driver, 'enabled': False, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flav_exc.ServiceProfileDisabled, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider_no_driver(self): data = {'service_profile': {'description': 'the best sp', 'driver': '', 'enabled': True, 'metainfo': '{"data": "value"}'}} sp = self.plugin.create_service_profile(self.ctx, data) fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) self.assertRaises( flav_exc.ServiceProfileDriverNotFound, self.plugin.get_flavor_next_provider, self.ctx, fl['id']) def test_get_flavor_next_provider(self): sp, data = self._create_service_profile() fl, data = self._create_flavor() self.plugin.create_flavor_service_profile( self.ctx, {'service_profile': {'id': sp['id']}}, fl['id']) providers = self.plugin.get_flavor_next_provider( self.ctx, fl['id']) self.assertEqual(_provider, providers[0].get('provider', None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py0000644000175000017500000001342600000000000032355 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron_lib import context from oslo_utils import uuidutils from webob import exc from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import \ test_expose_port_forwarding_in_fip as test_fip_pf from neutron.tests.unit.extensions import test_l3 _uuid = uuidutils.generate_uuid class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests, test_l3.L3NatTestCaseMixin): fmt = 'json' def setUp(self): mock.patch('neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPushRpcApi').start() svc_plugins = (test_fip_pf.PF_PLUGIN_NAME, test_fip_pf.L3_PLUGIN, 'neutron.services.qos.qos_plugin.QoSPlugin') ext_mgr = test_fip_pf.ExtendFipPortForwardingExtensionManager() super(FloatingIPPorForwardingTestCase, self).setUp( ext_mgr=ext_mgr, service_plugins=svc_plugins) self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def _create_fip_port_forwarding(self, fmt, floating_ip_id, external_port, internal_port, protocol, internal_ip_address, internal_port_id, tenant_id=None, description=None): tenant_id = tenant_id or _uuid() data = {'port_forwarding': { "external_port": external_port, "internal_port": internal_port, "protocol": protocol, "internal_ip_address": internal_ip_address, "internal_port_id": internal_port_id} } if description: data['port_forwarding']['description'] = description fip_pf_req = self._req( 'POST', 'floatingips', data, fmt or self.fmt, id=floating_ip_id, subresource='port_forwardings') fip_pf_req.environ['neutron.context'] = context.Context( '', tenant_id, is_admin=True) return fip_pf_req.get_response(self.ext_api) def test_create_floatingip_port_forwarding_with_port_number_0(self): with self.network() as ext_net: network_id = ext_net['network']['id'] self._set_net_external(network_id) with self.subnet(ext_net, cidr='10.10.10.0/24'), \ self.router() as router, \ self.subnet(cidr='11.0.0.0/24') as private_subnet, \ self.port(private_subnet) as port: self._add_external_gateway_to_router( router['router']['id'], network_id) self._router_interface_action( 'add', router['router']['id'], private_subnet['subnet']['id'], None) fip = self._make_floatingip( self.fmt, network_id) self.assertIsNone(fip['floatingip'].get('port_id')) res = self._create_fip_port_forwarding( self.fmt, fip['floatingip']['id'], 2222, 0, 'tcp', port['port']['fixed_ips'][0]['ip_address'], port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) res = self._create_fip_port_forwarding( self.fmt, fip['floatingip']['id'], 0, 22, 'tcp', port['port']['fixed_ips'][0]['ip_address'], port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_port_forwarding_with_description(self): with self.network() as ext_net: network_id = ext_net['network']['id'] self._set_net_external(network_id) with self.subnet(ext_net, cidr='10.10.10.0/24'), \ self.router() as router, \ self.subnet(cidr='11.0.0.0/24') as private_subnet, \ self.port(private_subnet) as port: self._add_external_gateway_to_router( router['router']['id'], network_id) self._router_interface_action( 'add', router['router']['id'], private_subnet['subnet']['id'], None) fip = self._make_floatingip( self.fmt, network_id) self.assertIsNone(fip['floatingip'].get('port_id')) res = self._create_fip_port_forwarding( self.fmt, fip['floatingip']['id'], 2222, 22, 'tcp', port['port']['fixed_ips'][0]['ip_address'], port['port']['id'], description="blablablabla") self.assertEqual(exc.HTTPCreated.code, res.status_int) pf_body = self.deserialize(self.fmt, res) self.assertEqual( "blablablabla", pf_body['port_forwarding']['description']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_floatingip_pools.py0000644000175000017500000001430200000000000030136 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import ddt import mock from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import floatingip_pools as apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib import constants as lib_const from neutron_lib import context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from neutron.db import l3_fip_pools_db from neutron.extensions import l3 from neutron.objects import network as net_obj from neutron.objects import subnet as subnet_obj from neutron.tests.unit.extensions import test_l3 class FloatingIPPoolsTestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestFloatingIPPoolsIntPlugin( test_l3.TestL3NatIntPlugin, l3_fip_pools_db.FloatingIPPoolsDbMixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, apidef.ALIAS] class TestFloatingIPPoolsL3NatServicePlugin( test_l3.TestL3NatServicePlugin, l3_fip_pools_db.FloatingIPPoolsDbMixin): supported_extension_aliases = [l3_apidef.ALIAS, apidef.ALIAS] @ddt.ddt class FloatingIPPoolsDBTestCaseBase(test_l3.L3NatTestCaseMixin): def test_get_floatingip_pools_ipv4(self): self._test_get_floatingip_pools(lib_const.IP_VERSION_4, False) @ddt.data(True, False) def test_get_floatingip_pools_ipv6(self, fake_is_v6_supported): self._test_get_floatingip_pools(lib_const.IP_VERSION_6, fake_is_v6_supported) def _test_get_floatingip_pools(self, ip_version, is_v6_supported): fake_network_id = uuidutils.generate_uuid() fake_subnet_id = uuidutils.generate_uuid() fake_ext_network = mock.Mock(network_id=fake_network_id) if ip_version == lib_const.IP_VERSION_4: fake_cidr = '10.0.0.0/24' else: fake_cidr = 'fe80:cafe::/64' fake_subnet = mock.Mock(id=fake_subnet_id, network_id=fake_network_id, cidr=fake_cidr, ip_version=ip_version, tenant_id='fake_tenant', project_id='fake_tenant') fake_subnet.name = 'fake_subnet' self.plugin._is_v6_supported = is_v6_supported with mock.patch.object( subnet_obj.Subnet, 'get_objects', return_value=[fake_subnet] ) as mock_subnet_get_objects, mock.patch.object( net_obj.ExternalNetwork, 'get_objects', return_value=[fake_ext_network] ) as mock_extnet_get_objects, mock.patch.object( self.ctxt, 'elevated', return_value=self.admin_ctxt ) as mock_context_elevated: fip_pools = self.plugin.get_floatingip_pools(self.ctxt) expected_fip_pools = [] if ip_version == lib_const.IP_VERSION_4 or is_v6_supported: expected_fip_pools = [{'cidr': fake_cidr, 'subnet_id': fake_subnet_id, 'subnet_name': 'fake_subnet', 'network_id': fake_network_id, 'project_id': 'fake_tenant', 'tenant_id': 'fake_tenant'}] self.assertEqual(expected_fip_pools, fip_pools) mock_subnet_get_objects.assert_called_once_with( self.admin_ctxt, _pager=mock.ANY, network_id=[fake_network_id]) mock_extnet_get_objects.assert_called_once_with(self.ctxt) mock_context_elevated.assert_called_once_with() class FloatingIPPoolsDBIntTestCase(test_l3.L3BaseForIntTests, FloatingIPPoolsDBTestCaseBase): def setUp(self, plugin=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_floatingip_pools.' 'TestFloatingIPPoolsIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPPoolsTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self.setup_notification_driver() self.ctxt = context.Context('fake_user', 'fake_tenant') self.admin_ctxt = self.ctxt.elevated() class FloatingIPPoolsDBSepTestCase(test_l3.L3BaseForSepTests, FloatingIPPoolsDBTestCaseBase): def setUp(self): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_floatingip_pools.' 'TestFloatingIPPoolsL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPPoolsTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() self.plugin = directory.get_plugin(plugin_constants.L3) self.ctxt = context.Context('fake_user', 'fake_tenant') self.admin_ctxt = self.ctxt.elevated() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_l3.py0000644000175000017500000070106500000000000025115 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib import copy import mock import netaddr from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as lib_constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.tests.unit import fake_notifier from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from sqlalchemy import orm import testtools from webob import exc from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.db import db_base_plugin_v2 from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_hamode_db from neutron.db.models import l3 as l3_models from neutron.db import models_v2 from neutron.extensions import l3 from neutron.services.revisions import revision_plugin from neutron.tests import base from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import base as test_extensions_base from neutron.tests.unit.extensions import test_agent from neutron.tests.unit.plugins.ml2 import base as ml2_base from neutron.tests.unit import testlib_api _uuid = uuidutils.generate_uuid _get_path = test_base._get_path DEVICE_OWNER_COMPUTE = lib_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3TestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatExtensionTestCase(test_extensions_base.ExtensionTestCase): fmt = 'json' def setUp(self): super(L3NatExtensionTestCase, self).setUp() self.setup_extension( 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', plugin_constants.L3, l3.L3, '', allow_pagination=True, allow_sorting=True, supported_extension_aliases=['router'], use_quota=True) def test_router_create(self): router_id = _uuid() tenant_id = _uuid() data = {'router': {'name': 'router1', 'admin_state_up': True, 'tenant_id': tenant_id, 'project_id': tenant_id, 'external_gateway_info': None}} return_value = copy.deepcopy(data['router']) return_value.update({'status': "ACTIVE", 'id': router_id}) instance = self.plugin.return_value instance.create_router.return_value = return_value instance.get_routers_count.return_value = 0 res = self.api.post(_get_path('routers', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_router.assert_called_with(mock.ANY, router=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertTrue(router['admin_state_up']) def test_router_list(self): router_id = _uuid() return_value = [{'name': 'router1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': router_id}] instance = self.plugin.return_value instance.get_routers.return_value = return_value res = self.api.get(_get_path('routers', fmt=self.fmt)) instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY, sorts=mock.ANY, limit=mock.ANY, marker=mock.ANY, page_reverse=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('routers', res) self.assertEqual(1, len(res['routers'])) self.assertEqual(router_id, res['routers'][0]['id']) def test_router_update(self): router_id = _uuid() update_data = {'router': {'admin_state_up': False}} return_value = {'name': 'router1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': router_id} instance = self.plugin.return_value instance.update_router.return_value = return_value res = self.api.put(_get_path('routers', id=router_id, fmt=self.fmt), self.serialize(update_data)) instance.update_router.assert_called_with(mock.ANY, router_id, router=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertFalse(router['admin_state_up']) def test_router_get(self): router_id = _uuid() return_value = {'name': 'router1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': router_id} instance = self.plugin.return_value instance.get_router.return_value = return_value res = self.api.get(_get_path('routers', id=router_id, fmt=self.fmt)) instance.get_router.assert_called_with(mock.ANY, router_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('router', res) router = res['router'] self.assertEqual(router_id, router['id']) self.assertEqual("ACTIVE", router['status']) self.assertFalse(router['admin_state_up']) def test_router_delete(self): router_id = _uuid() res = self.api.delete(_get_path('routers', id=router_id)) instance = self.plugin.return_value instance.delete_router.assert_called_with(mock.ANY, router_id) self.assertEqual(exc.HTTPNoContent.code, res.status_int) def test_router_add_interface(self): router_id = _uuid() subnet_id = _uuid() port_id = _uuid() interface_data = {'subnet_id': subnet_id} return_value = copy.deepcopy(interface_data) return_value['port_id'] = port_id instance = self.plugin.return_value instance.add_router_interface.return_value = return_value path = _get_path('routers', id=router_id, action="add_router_interface", fmt=self.fmt) res = self.api.put(path, self.serialize(interface_data)) instance.add_router_interface.assert_called_with(mock.ANY, router_id, interface_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('port_id', res) self.assertEqual(port_id, res['port_id']) self.assertEqual(subnet_id, res['subnet_id']) def test_router_add_interface_empty_body(self): router_id = _uuid() instance = self.plugin.return_value path = _get_path('routers', id=router_id, action="add_router_interface", fmt=self.fmt) res = self.api.put(path) self.assertEqual(exc.HTTPOk.code, res.status_int) instance.add_router_interface.assert_called_with(mock.ANY, router_id) class TestL3PluginBaseAttributes(object): IP_UPDATE_NOT_ALLOWED_LIST = [ lib_constants.DEVICE_OWNER_ROUTER_INTF, lib_constants.DEVICE_OWNER_ROUTER_HA_INTF, lib_constants.DEVICE_OWNER_HA_REPLICATED_INT, lib_constants.DEVICE_OWNER_ROUTER_SNAT, lib_constants.DEVICE_OWNER_DVR_INTERFACE] def router_supports_scheduling(self, context, router_id): return True # This base plugin class is for tests. class TestL3NatBasePlugin(TestL3PluginBaseAttributes, db_base_plugin_v2.NeutronDbPluginV2, external_net_db.External_net_db_mixin): __native_pagination_support = True __native_sorting_support = True def create_network(self, context, network): with db_api.CONTEXT_WRITER.using(context): net = super(TestL3NatBasePlugin, self).create_network(context, network) self._process_l3_create(context, net, network['network']) return net def update_network(self, context, id, network): with db_api.CONTEXT_WRITER.using(context): net = super(TestL3NatBasePlugin, self).update_network(context, id, network) self._process_l3_update(context, net, network['network']) return net def delete_port(self, context, id, l3_port_check=True): plugin = directory.get_plugin(plugin_constants.L3) if plugin: if l3_port_check: plugin.prevent_l3_port_deletion(context, id) plugin.disassociate_floatingips(context, id) return super(TestL3NatBasePlugin, self).delete_port(context, id) def update_port(self, context, id, port): original_port = self.get_port(context, id) session = context.session with session.begin(subtransactions=True): new_port = super(TestL3NatBasePlugin, self).update_port( context, id, port) # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': new_port, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return new_port # This plugin class is for tests with plugin that integrates L3. class TestL3NatIntPlugin(TestL3NatBasePlugin, l3_db.L3_NAT_db_mixin, dns_db.DNSDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [extnet_apidef.ALIAS, l3_apidef.ALIAS, dns_apidef.ALIAS] # This plugin class is for tests with plugin that integrates L3 and L3 agent # scheduling. class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin, l3_agentschedulers_db. L3AgentSchedulerDbMixin, l3_hamode_db.L3_HA_NAT_db_mixin): supported_extension_aliases = [extnet_apidef.ALIAS, l3_apidef.ALIAS, lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS] router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) # This plugin class is for tests with plugin not supporting L3. class TestNoL3NatPlugin(TestL3NatBasePlugin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [extnet_apidef.ALIAS] # A L3 routing service plugin class for tests with plugins that # delegate away L3 routing functionality class TestL3NatServicePlugin(TestL3PluginBaseAttributes, l3_dvr_db.L3_NAT_with_dvr_db_mixin, l3_db.L3_NAT_db_mixin, dns_db.DNSDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [l3_apidef.ALIAS, dns_apidef.ALIAS] @classmethod def get_plugin_type(cls): return plugin_constants.L3 def get_plugin_description(self): return "L3 Routing Service Plugin for testing" # A L3 routing with L3 agent scheduling service plugin class for tests with # plugins that delegate away L3 routing functionality class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin, l3_dvrscheduler_db. L3_DVRsch_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin): supported_extension_aliases = [l3_apidef.ALIAS, lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS] def __init__(self): super(TestL3NatAgentSchedulingServicePlugin, self).__init__() self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.agent_notifiers.update( {lib_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) class L3NatTestCaseMixin(object): def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up is not None: data['router']['admin_state_up'] = admin_state_up flavor_id = kwargs.get('flavor_id', None) if flavor_id: data['router']['flavor_id'] = flavor_id for arg in (('admin_state_up', 'tenant_id', 'availability_zone_hints') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs: data['router'][arg] = kwargs[arg] if 'distributed' in kwargs: data['router']['distributed'] = bool(kwargs['distributed']) router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, external_gateway_info=None, set_context=False, arg_list=None, **kwargs): if external_gateway_info: arg_list = ('external_gateway_info', ) + (arg_list or ()) res = self._create_router(fmt, tenant_id, name, admin_state_up, set_context, arg_list=arg_list, external_gateway_info=external_gateway_info, **kwargs) return self.deserialize(fmt, res) def _add_external_gateway_to_router(self, router_id, network_id, expected_code=exc.HTTPOk.code, neutron_context=None, ext_ips=None, **kwargs): ext_ips = ext_ips or [] body = {'router': {'external_gateway_info': {'network_id': network_id}}} if ext_ips: body['router']['external_gateway_info'][ 'external_fixed_ips'] = ext_ips if 'policy_id' in kwargs: body['router']['external_gateway_info'][ 'qos_policy_id'] = kwargs.get('policy_id') return self._update('routers', router_id, body, expected_code=expected_code, neutron_context=neutron_context) def _remove_external_gateway_from_router(self, router_id, network_id, expected_code=exc.HTTPOk.code, external_gw_info=None): return self._update('routers', router_id, {'router': {'external_gateway_info': external_gw_info}}, expected_code=expected_code) def _router_interface_action(self, action, router_id, subnet_id, port_id, expected_code=exc.HTTPOk.code, expected_body=None, tenant_id=None, msg=None): interface_data = {} if subnet_id is not None: interface_data.update({'subnet_id': subnet_id}) if port_id is not None: interface_data.update({'port_id': port_id}) req = self.new_action_request('routers', interface_data, router_id, "%s_router_interface" % action) # if tenant_id was specified, create a tenant context for this request if tenant_id: req.environ['neutron.context'] = context.Context( '', tenant_id) res = req.get_response(self.ext_api) self.assertEqual(expected_code, res.status_int, msg) response = self.deserialize(self.fmt, res) if expected_body: self.assertEqual(expected_body, response, msg) return response @contextlib.contextmanager def router(self, name='router1', admin_state_up=True, fmt=None, tenant_id=None, external_gateway_info=None, set_context=False, **kwargs): router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) yield router def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {extnet_apidef.EXTERNAL: True}}) def _create_floatingip(self, fmt, network_id, port_id=None, fixed_ip=None, set_context=False, floating_ip=None, subnet_id=None, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'floatingip': {'floating_network_id': network_id, 'tenant_id': tenant_id}} if port_id: data['floatingip']['port_id'] = port_id if fixed_ip: data['floatingip']['fixed_ip_address'] = fixed_ip if floating_ip: data['floatingip']['floating_ip_address'] = floating_ip if subnet_id: data['floatingip']['subnet_id'] = subnet_id data['floatingip'].update(kwargs) floatingip_req = self.new_create_request('floatingips', data, fmt) if set_context and tenant_id: # create a specific auth context for this request floatingip_req.environ['neutron.context'] = context.Context( '', tenant_id) return floatingip_req.get_response(self.ext_api) def _make_floatingip(self, fmt, network_id, port_id=None, fixed_ip=None, set_context=False, tenant_id=None, floating_ip=None, http_status=exc.HTTPCreated.code, **kwargs): res = self._create_floatingip(fmt, network_id, port_id, fixed_ip, set_context, floating_ip, tenant_id=tenant_id, **kwargs) self.assertEqual(http_status, res.status_int) return self.deserialize(fmt, res) def _validate_floating_ip(self, fip): body = self._list('floatingips') self.assertEqual(1, len(body['floatingips'])) self.assertEqual(body['floatingips'][0]['id'], fip['floatingip']['id']) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) @contextlib.contextmanager def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None, public_cidr='11.0.0.0/24', set_context=False, tenant_id=None, flavor_id=None, **kwargs): with self.subnet(cidr=public_cidr, set_context=set_context, tenant_id=tenant_id) as public_sub: self._set_net_external(public_sub['subnet']['network_id']) args_list = {'set_context': set_context, 'tenant_id': tenant_id} if flavor_id: args_list['flavor_id'] = flavor_id private_port = None if port_id: private_port = self._show('ports', port_id) with test_db_base_plugin_v2.optional_ctx( private_port, self.port, set_context=set_context, tenant_id=tenant_id) as private_port: with self.router(**args_list) as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} floatingip = None self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) floatingip = self._make_floatingip( fmt or self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], fixed_ip=fixed_ip, tenant_id=tenant_id, set_context=set_context, **kwargs) yield floatingip if floatingip: self._delete('floatingips', floatingip['floatingip']['id']) @contextlib.contextmanager def floatingip_no_assoc_with_public_sub(self, private_sub, fmt=None, set_context=False, public_sub=None, flavor_id=None, **kwargs): self._set_net_external(public_sub['subnet']['network_id']) args_list = {} if flavor_id: # NOTE(manjeets) Flavor id None is not accepted # and return Flavor None not found error. So for # neutron testing this argument should not be passed # at all to router. args_list['flavor_id'] = flavor_id with self.router(**args_list) as r: floatingip = None self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) floatingip = self._make_floatingip( fmt or self.fmt, public_sub['subnet']['network_id'], set_context=set_context, **kwargs) yield floatingip, r if floatingip: self._delete('floatingips', floatingip['floatingip']['id']) @contextlib.contextmanager def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False, flavor_id=None, **kwargs): with self.subnet(cidr='12.0.0.0/24') as public_sub: with self.floatingip_no_assoc_with_public_sub( private_sub, fmt, set_context, public_sub, flavor_id, **kwargs) as (f, r): # Yield only the floating ip object yield f class ExtraAttributesMixinTestCase(testlib_api.SqlTestCase): def setUp(self): super(ExtraAttributesMixinTestCase, self).setUp() self.mixin = l3_attrs_db.ExtraAttributesMixin() directory.add_plugin(plugin_constants.L3, self.mixin) self.ctx = context.get_admin_context() self.router = l3_models.Router() with db_api.CONTEXT_WRITER.using(self.ctx): self.ctx.session.add(self.router) def _get_default_api_values(self): return {k: v.get('transform_from_db', lambda x: x)(v['default']) for k, v in l3_attrs_db.get_attr_info().items()} def test_set_extra_attr_key_bad(self): with testtools.ExpectedException(RuntimeError): with db_api.CONTEXT_WRITER.using(self.ctx): self.mixin.set_extra_attr_value(self.ctx, self.router, 'bad', 'value') def test_set_attrs_and_extend_no_transaction(self): with testtools.ExpectedException(RuntimeError): self.mixin.set_extra_attr_value(self.ctx, self.router, 'ha_vr_id', 99) def test__extend_extra_router_dict_defaults(self): rdict = {} self.mixin._extend_extra_router_dict(rdict, self.router) self.assertEqual(self._get_default_api_values(), rdict) def test_set_attrs_and_extend(self): with db_api.CONTEXT_WRITER.using(self.ctx): self.mixin.set_extra_attr_value(self.ctx, self.router, 'ha_vr_id', 99) self.mixin.set_extra_attr_value(self.ctx, self.router, 'availability_zone_hints', ['x', 'y', 'z']) expected = self._get_default_api_values() expected.update({'ha_vr_id': 99, 'availability_zone_hints': ['x', 'y', 'z']}) rdict = {} self.mixin._extend_extra_router_dict(rdict, self.router) self.assertEqual(expected, rdict) self.mixin.set_extra_attr_value(self.ctx, self.router, 'availability_zone_hints', ['z', 'y', 'z']) expected['availability_zone_hints'] = ['z', 'y', 'z'] self.mixin._extend_extra_router_dict(rdict, self.router) self.assertEqual(expected, rdict) class L3NatTestCaseBase(L3NatTestCaseMixin): def test_router_create(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: for k, v in expected_value: self.assertEqual(router['router'][k], v) def test_router_create_call_extensions(self): self.extension_called = False def _extend_router_dict_test_attr(*args, **kwargs): self.extension_called = True resource_extend.register_funcs( l3_apidef.ROUTERS, [_extend_router_dict_test_attr]) self.assertFalse(self.extension_called) with self.router(): self.assertTrue(self.extension_called) def test_router_create_with_gwinfo(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) data = {'router': {'tenant_id': _uuid()}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], router['router']['external_gateway_info']['network_id']) def test_router_create_with_gwinfo_ext_ip(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.99'}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info ) router = self.deserialize(self.fmt, res) self.assertEqual( [{'ip_address': '10.0.0.99', 'subnet_id': s['subnet']['id']}], router['router']['external_gateway_info'][ 'external_fixed_ips']) def test_router_create_with_gwinfo_ext_ip_subnet(self): with self.network() as n: with self.subnet(network=n) as v1,\ self.subnet(network=n, cidr='1.0.0.0/24') as v2,\ self.subnet(network=n, cidr='2.0.0.0/24') as v3: subnets = (v1, v2, v3) self._set_net_external(n['network']['id']) for s in subnets: ext_info = { 'network_id': n['network']['id'], 'external_fixed_ips': [ {'subnet_id': s['subnet']['id']}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info ) router = self.deserialize(self.fmt, res) ext_ips = router['router']['external_gateway_info'][ 'external_fixed_ips'] self.assertEqual( [{'subnet_id': s['subnet']['id'], 'ip_address': mock.ANY}], ext_ips) def test_router_create_with_gwinfo_ext_ip_non_admin(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.99'}] } res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), set_context=True, external_gateway_info=ext_info ) self.assertEqual(exc.HTTPForbidden.code, res.status_int) def test_create_routers_native_quotas(self): tenant_id = _uuid() quota = 1 cfg.CONF.set_override('quota_router', quota, group='QUOTAS') res = self._create_router(self.fmt, tenant_id) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self._create_router(self.fmt, tenant_id) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_router_list(self): with self.router() as v1, self.router() as v2, self.router() as v3: routers = (v1, v2, v3) self._test_list_resources('router', routers) def test_router_list_with_parameters(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2: query_params = 'name=router1' self._test_list_resources('router', [router1], query_params=query_params) query_params = 'name=router2' self._test_list_resources('router', [router2], query_params=query_params) query_params = 'name=router3' self._test_list_resources('router', [], query_params=query_params) def test_router_list_with_sort(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): with self.router(name='router1') as router1,\ self.router(name='router2') as router2,\ self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_update(self): rname1 = "yourrouter" rname2 = "nachorouter" with self.router(name=rname1) as r: body = self._show('routers', r['router']['id']) self.assertEqual(body['router']['name'], rname1) body = self._update('routers', r['router']['id'], {'router': {'name': rname2}}) body = self._show('routers', r['router']['id']) self.assertEqual(body['router']['name'], rname2) def test_router_update_gateway(self): with self.router() as r: with self.subnet() as s1: with self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) # Validate that we can clear the gateway with # an empty dict, in any other case, we fall back # on None as default value self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id'], external_gw_info={}) def test_router_update_gateway_with_external_ip_used_by_gw(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], ext_ips=[{'ip_address': s['subnet']['gateway_ip']}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_invalid_external_ip(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], ext_ips=[{'ip_address': '99.99.99.99'}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_invalid_external_subnet(self): with self.subnet() as s1,\ self.subnet(cidr='1.0.0.0/24') as s2,\ self.router() as r: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], # this subnet is not on the same network so this should fail ext_ips=[{'subnet_id': s2['subnet']['id']}], expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_different_external_subnet(self): with self.network() as n: with self.subnet(network=n) as s1,\ self.subnet(network=n, cidr='1.0.0.0/24') as s2,\ self.router() as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) res2 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s2['subnet']['id']}]) fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0] fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0] self.assertEqual(s1['subnet']['id'], fip1['subnet_id']) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_update_gateway_with_existed_floatingip(self): with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=exc.HTTPConflict.code) def test_router_update_gateway_to_empty_with_existed_floatingip(self): with self.floatingip_with_assoc() as fip: self._remove_external_gateway_from_router( fip['floatingip']['router_id'], None, expected_code=exc.HTTPConflict.code) def test_router_update_gateway_add_multiple_prefixes_ipv6(self): with self.network() as n: with self.subnet(network=n) as s1, \ self.subnet(network=n, ip_version=lib_constants.IP_VERSION_6, cidr='2001:db8::/32') \ as s2, (self.router()) as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) fip1 = (res1['router']['external_gateway_info'] ['external_fixed_ips'][0]) self.assertEqual(s1['subnet']['id'], fip1['subnet_id']) res2 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'ip_address': fip1['ip_address'], 'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}]) self.assertEqual(fip1, res2['router']['external_gateway_info'] ['external_fixed_ips'][0]) fip2 = (res2['router']['external_gateway_info'] ['external_fixed_ips'][1]) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_concurrent_delete_upon_subnet_create(self): with self.network() as n: with self.subnet(network=n) as s1, self.router() as r: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) plugin = directory.get_plugin(plugin_constants.L3) mock.patch.object( plugin, 'update_router', side_effect=l3_exc.RouterNotFound(router_id='1')).start() # ensure the router disappearing doesn't interfere with subnet # creation self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=lib_constants.IP_VERSION_6, cidr='2001:db8::/32', expected_res_status=(exc.HTTPCreated.code)) def test_router_update_gateway_upon_subnet_create_ipv6(self): with self.network() as n: with self.subnet(network=n) as s1, self.router() as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}]) fip1 = (res1['router']['external_gateway_info'] ['external_fixed_ips'][0]) sres = self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=lib_constants.IP_VERSION_6, cidr='2001:db8::/32', expected_res_status=( exc.HTTPCreated.code)) s2 = self.deserialize(self.fmt, sres) res2 = self._show('routers', r['router']['id']) self.assertEqual(fip1, res2['router']['external_gateway_info'] ['external_fixed_ips'][0]) fip2 = (res2['router']['external_gateway_info'] ['external_fixed_ips'][1]) self.assertEqual(s2['subnet']['id'], fip2['subnet_id']) self.assertNotEqual(fip1['subnet_id'], fip2['subnet_id']) self.assertNotEqual(fip1['ip_address'], fip2['ip_address']) def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): """Create subnet should not cause excess fixed IPs on router gw If a router gateway port has the maximum of one IPv4 and one IPv6 fixed, create subnet should not add any more IP addresses to the port (unless this is the subnet is a SLAAC/DHCPv6-stateless subnet in which case the addresses are added automatically) """ with self.router() as r, self.network() as n: with self.subnet(cidr='10.0.0.0/24', network=n) as s1, ( self.subnet(ip_version=lib_constants.IP_VERSION_6, cidr='2001:db8::/64', network=n)) as s2: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}], expected_code=exc.HTTPOk.code) res1 = self._show('routers', r['router']['id']) original_fips = (res1['router']['external_gateway_info'] ['external_fixed_ips']) # Add another IPv4 subnet - a fip SHOULD NOT be added # to the external gateway port as it already has a v4 address self._create_subnet(self.fmt, net_id=n['network']['id'], cidr='10.0.1.0/24') res2 = self._show('routers', r['router']['id']) self.assertEqual(original_fips, res2['router']['external_gateway_info'] ['external_fixed_ips']) # Add a SLAAC subnet - a fip from this subnet SHOULD be added # to the external gateway port s3 = self.deserialize(self.fmt, self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=lib_constants.IP_VERSION_6, cidr='2001:db8:1::/64', ipv6_ra_mode=lib_constants.IPV6_SLAAC, ipv6_address_mode=lib_constants.IPV6_SLAAC)) res3 = self._show('routers', r['router']['id']) fips = (res3['router']['external_gateway_info'] ['external_fixed_ips']) fip_subnet_ids = [fip['subnet_id'] for fip in fips] self.assertIn(s1['subnet']['id'], fip_subnet_ids) self.assertIn(s2['subnet']['id'], fip_subnet_ids) self.assertIn(s3['subnet']['id'], fip_subnet_ids) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) def _test_router_add_interface_subnet(self, router, subnet, msg=None): exp_notifications = ['router.create.start', 'router.create.end', 'network.create.start', 'network.create.end', 'subnet.create.start', 'subnet.create.end', 'router.interface.create', 'router.interface.delete'] body = self._router_interface_action('add', router['router']['id'], subnet['subnet']['id'], None) self.assertIn('port_id', body, msg) # fetch port and confirm device_id r_port_id = body['port_id'] port = self._show('ports', r_port_id) self.assertEqual(port['port']['device_id'], router['router']['id'], msg) self._router_interface_action('remove', router['router']['id'], subnet['subnet']['id'], None) self._show('ports', r_port_id, expected_code=exc.HTTPNotFound.code) self.assertEqual( set(exp_notifications), set(n['event_type'] for n in fake_notifier.NOTIFICATIONS), msg) for n in fake_notifier.NOTIFICATIONS: if n['event_type'].startswith('router.interface.'): payload = n['payload']['router_interface'] self.assertIn('id', payload) self.assertEqual(payload['id'], router['router']['id']) self.assertIn('tenant_id', payload) rtid = router['router']['tenant_id'] # tolerate subnet tenant deliberately set to '' in the # nsx metadata access case self.assertIn(payload['tenant_id'], [rtid, ''], msg) def test_router_add_interface_bad_values(self): with self.router() as r: exp_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], False, None, expected_code=exp_code) self._router_interface_action('add', r['router']['id'], None, False, expected_code=exp_code) def test_router_add_interface_subnet(self): fake_notifier.reset() with self.router() as r: with self.network() as n: with self.subnet(network=n) as s: self._test_router_add_interface_subnet(r, s) def test_router_delete_race_with_interface_add(self): # this test depends on protection from the revision plugin so # we have to initialize it revision_plugin.RevisionPlugin() with self.router() as r, self.subnet() as s: def jam_in_interface(*args, **kwargs): self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # unsubscribe now that the evil is done registry.unsubscribe(jam_in_interface, resources.ROUTER, events.PRECOMMIT_DELETE) registry.subscribe(jam_in_interface, resources.ROUTER, events.PRECOMMIT_DELETE) self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_add_interface_ipv6_subnet(self): """Test router-interface-add for valid ipv6 subnets. Verify the valid use-cases of an IPv6 subnet where we are allowed to associate to the Neutron Router are successful. """ slaac = lib_constants.IPV6_SLAAC stateful = lib_constants.DHCPV6_STATEFUL stateless = lib_constants.DHCPV6_STATELESS use_cases = [{'msg': 'IPv6 Subnet Modes (slaac, none)', 'ra_mode': slaac, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (none, none)', 'ra_mode': None, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful, none)', 'ra_mode': stateful, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless, none)', 'ra_mode': stateless, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (slaac, slaac)', 'ra_mode': slaac, 'address_mode': slaac}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateful,' 'dhcpv6-stateful)', 'ra_mode': stateful, 'address_mode': stateful}, {'msg': 'IPv6 Subnet Modes (dhcpv6-stateless,' 'dhcpv6-stateless)', 'ra_mode': stateless, 'address_mode': stateless}] for uc in use_cases: fake_notifier.reset() with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=uc['ra_mode'], ipv6_address_mode=uc['address_mode']) as s: self._test_router_add_interface_subnet(r, s, uc['msg']) def test_router_add_interface_multiple_ipv4_subnets(self): """Test router-interface-add for multiple ipv4 subnets. Verify that adding multiple ipv4 subnets from the same network to a router places them all on different router interfaces. """ with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='10.0.0.0/24') as s1, ( self.subnet(network=n, cidr='10.0.1.0/24')) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertNotEqual(pid1, pid2) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnets_same_net(self): """Test router-interface-add for multiple ipv6 subnets on a network. Verify that adding multiple ipv6 subnets from the same network to a router places them all on the same router interface. """ with self.router() as r, self.network() as n: with (self.subnet(network=n, cidr='fd00::1/64', ip_version=lib_constants.IP_VERSION_6) ) as s1, self.subnet(network=n, cidr='fd01::1/64', ip_version=lib_constants.IP_VERSION_6 ) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertEqual(pid1, pid2) port = self._show('ports', pid1) self.assertEqual(2, len(port['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in port['port']['fixed_ips']] self.assertIn(s1['subnet']['id'], port_subnet_ids) self.assertIn(s2['subnet']['id'], port_subnet_ids) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnets_different_net(self): """Test router-interface-add for ipv6 subnets on different networks. Verify that adding multiple ipv6 subnets from different networks to a router places them on different router interfaces. """ with self.router() as r, self.network() as n1, self.network() as n2: with (self.subnet(network=n1, cidr='fd00::1/64', ip_version=lib_constants.IP_VERSION_6) ) as s1, self.subnet(network=n2, cidr='fd01::1/64', ip_version=lib_constants.IP_VERSION_6 ) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertNotEqual(pid1, pid2) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): """Test router-interface-add for in-valid ipv6 subnets. Verify that an appropriate error message is displayed when an IPv6 subnet configured to use an external_router for Router Advertisements (i.e., ipv6_ra_mode is None and ipv6_address_mode is not None) is attempted to associate with a Neutron Router. """ use_cases = [{'msg': 'IPv6 Subnet Modes (none, slaac)', 'ra_mode': None, 'address_mode': lib_constants.IPV6_SLAAC}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateful)', 'ra_mode': None, 'address_mode': lib_constants.DHCPV6_STATEFUL}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateless)', 'ra_mode': None, 'address_mode': lib_constants.DHCPV6_STATELESS}] for uc in use_cases: with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=uc['ra_mode'], ipv6_address_mode=uc['address_mode']) as s: exp_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exp_code, msg=uc['msg']) def test_router_add_interface_ipv6_subnet_without_gateway_ip(self): with self.router() as r: with self.subnet(ip_version=lib_constants.IP_VERSION_6, cidr='fe80::/64', gateway_ip=None) as s: error_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=error_code) def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): tenant_id = _uuid() with self.router(tenant_id=tenant_id, set_context=True) as r: with self.network(tenant_id=tenant_id, set_context=True) as n: with self.subnet(network=n, set_context=True) as s: err_code = exc.HTTPNotFound.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=err_code, tenant_id='bad_tenant') body = self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self.assertIn('port_id', body) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None, expected_code=err_code, tenant_id='bad_tenant') def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400( self): router_tenant_id = _uuid() with self.router(tenant_id=router_tenant_id, set_context=True) as r: with self.network(shared=True) as n: with self.subnet(network=n) as s: err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=err_code, tenant_id=router_tenant_id) def _test_router_add_interface_by_port_allocation_pool( self, out_of_pool=False, router_action_as_admin=False, expected_code=exc.HTTPOk.code): router_tenant_id = _uuid() with self.router(tenant_id=router_tenant_id, set_context=True) as r: with self.network(shared=True) as n: with self.subnet(network=n) as s1, ( self.subnet(network=n, cidr='fd00::/64', ip_version=lib_constants.IP_VERSION_6) ) as s2, ( self.subnet(network=n, cidr='fd01::/64', ip_version=lib_constants.IP_VERSION_6) ) as s3: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}, {'subnet_id': s3['subnet']['id']}] if out_of_pool: fixed_ips[1] = {'subnet_id': s2['subnet']['id'], 'ip_address': s2['subnet']['gateway_ip']} with self.port(subnet=s1, fixed_ips=fixed_ips, tenant_id=router_tenant_id) as p: kwargs = {'expected_code': expected_code} if not router_action_as_admin: kwargs['tenant_id'] = router_tenant_id self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], **kwargs) def test_router_add_interface_by_port_other_tenant_address_in_pool( self): self._test_router_add_interface_by_port_allocation_pool() def test_router_add_interface_by_port_other_tenant_address_out_of_pool( self): self._test_router_add_interface_by_port_allocation_pool( out_of_pool=True, expected_code=exc.HTTPBadRequest.code) def test_router_add_interface_by_port_admin_address_out_of_pool( self): self._test_router_add_interface_by_port_allocation_pool( out_of_pool=True, router_action_as_admin=True) def test_router_add_interface_subnet_with_port_from_other_tenant(self): tenant_id = _uuid() other_tenant_id = _uuid() with self.router(tenant_id=tenant_id) as r,\ self.network(tenant_id=tenant_id) as n1,\ self.network(tenant_id=other_tenant_id) as n2: with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\ self.subnet(network=n2, cidr='10.1.0.0/24') as s2: body = self._router_interface_action( 'add', r['router']['id'], s2['subnet']['id'], None) self.assertIn('port_id', body) self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) self.assertIn('port_id', body) def test_router_add_interface_port(self): orig_update_port = self.plugin.update_port with self.router() as r, ( self.port()) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', body) self.assertEqual(p['port']['id'], body['port_id']) expected_port_update = { 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_INTF, 'device_id': r['router']['id']} update_port.assert_any_call( mock.ANY, p['port']['id'], {'port': expected_port_update}) # fetch port and confirm device_id body = self._show('ports', p['port']['id']) self.assertEqual(r['router']['id'], body['port']['device_id']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_update_router_interface_port_ip_not_allowed(self): with self.router() as r, self.port() as p: body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', body) self.assertEqual(p['port']['id'], body['port_id']) body = self._show('ports', p['port']['id']) self.assertEqual(r['router']['id'], body['port']['device_id']) data = {'port': {'fixed_ips': [ {'ip_address': '1.1.1.1'}, {'ip_address': '2.2.2.2'}]}} self._update('ports', p['port']['id'], data, neutron_context=context.get_admin_context(), expected_code=exc.HTTPBadRequest.code) self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_add_interface_delete_port_after_failure(self): with self.router() as r, self.subnet(enable_dhcp=False) as s: plugin = directory.get_plugin() # inject a failure in the update port that happens at the end # to ensure the port gets deleted with mock.patch.object( plugin, 'update_port', side_effect=n_exc.InvalidInput(error_message='x')): self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, exc.HTTPBadRequest.code) self.assertFalse(plugin.get_ports(context.get_admin_context())) def test_router_add_interface_dup_port(self): '''This tests that if multiple routers add one port as their interfaces. Only the first router's interface would be added to this port. All the later requests would return exceptions. ''' with self.router() as r1, self.router() as r2, self.network() as n: with self.subnet(network=n) as s: with self.port(subnet=s) as p: self._router_interface_action('add', r1['router']['id'], None, p['port']['id']) # mock out the sequential check plugin = 'neutron.db.l3_db.L3_NAT_dbonly_mixin' check_p = mock.patch(plugin + '._check_router_port', port_id=p['port']['id'], device_id=r2['router']['id'], return_value=p['port']) checkport = check_p.start() # do regular checkport after first skip checkport.side_effect = check_p.stop() self._router_interface_action('add', r2['router']['id'], None, p['port']['id'], exc.HTTPConflict.code) # clean-up self._router_interface_action('remove', r1['router']['id'], None, p['port']['id']) def _assert_body_port_id_and_update_port(self, body, mock_update_port, port_id, device_id): self.assertNotIn('port_id', body) expected_port_update_before_update = { 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_INTF, 'device_id': device_id} expected_port_update_after_fail = { 'device_owner': '', 'device_id': ''} mock_update_port.assert_has_calls( [mock.call( mock.ANY, port_id, {'port': expected_port_update_before_update}), mock.call( mock.ANY, port_id, {'port': expected_port_update_after_fail})], any_order=False) # fetch port and confirm device_id and device_owner body = self._show('ports', port_id) self.assertEqual('', body['port']['device_owner']) self.assertEqual('', body['port']['device_id']) def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): """Test adding router port with multiple IPv4 subnets fails. Multiple IPv4 subnets are not allowed on a single router port. Ensure that adding a port with multiple IPv4 subnets to a router fails. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='10.0.0.0/24') as s1, ( self.subnet(network=n, cidr='10.0.1.0/24')) as s2: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}] orig_update_port = self.plugin.update_port with self.port(subnet=s1, fixed_ips=fixed_ips) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port exp_code = exc.HTTPBadRequest.code body = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._assert_body_port_id_and_update_port( body, update_port, p['port']['id'], r['router']['id']) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=lib_constants.IP_VERSION_6) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=lib_constants.IP_VERSION_6)) as s2: orig_update_port = self.plugin.update_port with self.port(subnet=s1) as p, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) exp_code = exc.HTTPBadRequest.code body = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._assert_body_port_id_and_update_port( body, update_port, p['port']['id'], r['router']['id']) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_multiple_ipv6_subnet_port(self): """A port with multiple IPv6 subnets can be added to a router Create a port with multiple associated IPv6 subnets and attach it to a router. The action should succeed. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=lib_constants.IP_VERSION_6) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=lib_constants.IP_VERSION_6)) as s2: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}] with self.port(subnet=s1, fixed_ips=fixed_ips) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_add_interface_empty_port_and_subnet_ids(self): with self.router() as r: self._router_interface_action('add', r['router']['id'], None, None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_port_bad_tenant_returns_404(self): tenant_id = _uuid() with self.router(tenant_id=tenant_id, set_context=True) as r: with self.network(tenant_id=tenant_id, set_context=True) as n: with self.subnet(tenant_id=tenant_id, network=n, set_context=True) as s: with self.port(tenant_id=tenant_id, subnet=s, set_context=True) as p: err_code = exc.HTTPNotFound.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=err_code, tenant_id='bad_tenant') self._router_interface_action('add', r['router']['id'], None, p['port']['id'], tenant_id=tenant_id) # clean-up should fail as well self._router_interface_action('remove', r['router']['id'], None, p['port']['id'], expected_code=err_code, tenant_id='bad_tenant') def test_router_add_interface_port_without_ips(self): with self.network() as network, self.router() as r: # Create a router port without ips p = self._make_port(self.fmt, network['network']['id'], device_owner=lib_constants.DEVICE_OWNER_ROUTER_INTF) err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=err_code) def test_router_add_interface_dup_subnet1_returns_400(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_dup_subnet2_returns_400(self): with self.router() as r: with self.subnet() as s1, self.subnet(cidr='1.0.0.0/24') as s2: with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: orig_update_port = self.plugin.update_port with self.port(subnet=s1) as p3, ( mock.patch.object(self.plugin, 'update_port')) as update_port: update_port.side_effect = orig_update_port for p in [p1, p2]: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) body = self._router_interface_action( 'add', r['router']['id'], None, p3['port']['id'], expected_code=exc.HTTPBadRequest.code) self._assert_body_port_id_and_update_port( body, update_port, p3['port']['id'], r['router']['id']) def test_router_add_interface_overlapped_cidr_returns_400(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s1, self.subnet( cidr='10.0.2.0/24') as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) def try_overlapped_cidr(cidr): with self.subnet(cidr=cidr) as s3: self._router_interface_action('add', r['router']['id'], s3['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) # another subnet with same cidr try_overlapped_cidr('10.0.1.0/24') try_overlapped_cidr('10.0.2.0/24') # another subnet with overlapped cidr including s1 try_overlapped_cidr('10.0.0.0/16') # another subnet with overlapped cidr including s2 try_overlapped_cidr('10.0.2.128/28') def test_router_add_interface_no_data_returns_400(self): with self.router() as r: self._router_interface_action('add', r['router']['id'], None, None, expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_with_both_ids_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], p['port']['id'], expected_code=exc. HTTPBadRequest.code) def test_router_add_interface_cidr_overlapped_with_gateway(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s1, self.subnet( cidr='10.0.0.0/16') as s2: self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self.assertIn('port_id', res) def test_router_add_interface_by_port_cidr_overlapped_with_gateway(self): with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s1, self.subnet( cidr='10.0.0.0/16') as s2: with self.port(subnet=s1) as p: self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self.assertIn('port_id', res) def test_router_add_gateway_dup_subnet1_returns_400(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_dup_subnet2_returns_400(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, expected_code=exc. HTTPBadRequest.code) def test_router_add_gateway_multiple_subnets_ipv6(self): """Ensure external gateway set doesn't add excess IPs on router gw Setting the gateway of a router to an external network with more than one IPv4 and one IPv6 subnet should only add an address from the first IPv4 subnet, an address from the first IPv6-stateful subnet, and an address from each IPv6-stateless (SLAAC and DHCPv6-stateless) subnet """ with self.router() as r, self.network() as n: with self.subnet( cidr='10.0.0.0/24', network=n) as s1, ( self.subnet( cidr='10.0.1.0/24', network=n)) as s2, ( self.subnet( cidr='2001:db8::/64', network=n, ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=lib_constants.IPV6_SLAAC, ipv6_address_mode=lib_constants.IPV6_SLAAC)) as s3, ( self.subnet( cidr='2001:db8:1::/64', network=n, ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=lib_constants.DHCPV6_STATEFUL, ipv6_address_mode=lib_constants.DHCPV6_STATEFUL)) as s4, ( self.subnet( cidr='2001:db8:2::/64', network=n, ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=lib_constants.DHCPV6_STATELESS, ipv6_address_mode=lib_constants.DHCPV6_STATELESS)) as s5: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id']) res = self._show('routers', r['router']['id']) fips = (res['router']['external_gateway_info'] ['external_fixed_ips']) fip_subnet_ids = {fip['subnet_id'] for fip in fips} # one of s1 or s2 should be in the list. if s1['subnet']['id'] in fip_subnet_ids: self.assertEqual({s1['subnet']['id'], s3['subnet']['id'], s4['subnet']['id'], s5['subnet']['id']}, fip_subnet_ids) else: self.assertEqual({s2['subnet']['id'], s3['subnet']['id'], s4['subnet']['id'], s5['subnet']['id']}, fip_subnet_ids) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) def test_router_add_and_remove_gateway(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_add_and_remove_gateway_tenant_ctx(self): with self.router(tenant_id='noadmin', set_context=True) as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ctx = context.Context('', 'noadmin') self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], neutron_context=ctx) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_create_router_port_with_device_id_of_other_tenants_router(self): with self.router() as admin_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS: self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', device_id=admin_router['router']['id'], device_owner=device_owner, set_context=True, expected_res_status=exc.HTTPConflict.code) def test_create_non_router_port_device_id_of_other_tenants_router_update( self): # This tests that HTTPConflict is raised if we create a non-router # port that matches the device_id of another tenants router and then # we change the device_owner to be network:router_interface. with self.router() as admin_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS: port_res = self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', device_id=admin_router['router']['id'], set_context=True) port = self.deserialize(self.fmt, port_res) neutron_context = context.Context('', 'tenant_a') data = {'port': {'device_owner': device_owner}} self._update('ports', port['port']['id'], data, neutron_context=neutron_context, expected_code=exc.HTTPConflict.code) def test_update_port_device_id_to_different_tenants_router(self): with self.router() as admin_router: with self.router(tenant_id='tenant_a', set_context=True) as tenant_router: with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n) as s: port = self._router_interface_action( 'add', tenant_router['router']['id'], s['subnet']['id'], None, tenant_id='tenant_a') neutron_context = context.Context('', 'tenant_a') data = {'port': {'device_id': admin_router['router']['id']}} self._update('ports', port['port_id'], data, neutron_context=neutron_context, expected_code=exc.HTTPConflict.code) def test_router_add_gateway_invalid_network_returns_400(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], "foobar", expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_non_existent_network_returns_404(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], _uuid(), expected_code=exc.HTTPNotFound.code) def test_router_add_gateway_net_not_external_returns_400(self): with self.router() as r: with self.subnet() as s: # intentionally do not set net as external self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_no_subnet(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, n['network']['id']) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) with mock.patch.object(registry, 'publish') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InvalidInput(error_message='forbidden')), ] def failing_publish(resource, event, trigger, payload): if (resource == resources.ROUTER_GATEWAY and event == events.BEFORE_CREATE): raise exceptions.CallbackFailure( errors=errors) return mock.DEFAULT notify.side_effect = failing_publish self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=exc.HTTPBadRequest.code) notify.assert_any_call( resources.ROUTER_GATEWAY, events.BEFORE_CREATE, mock.ANY, payload=mock.ANY) # Find the call and look at the payload calls = [call for call in notify.mock_calls if call[1][0] == resources.ROUTER_GATEWAY and call[1][1] == events.BEFORE_CREATE] self.assertEqual(1, len(calls)) payload = calls[0][2]['payload'] self.assertEqual(r['router']['id'], payload.resource_id) self.assertEqual(n['network']['id'], payload.metadata.get('network_id')) self.assertEqual([], payload.metadata.get('subnets')) def test_router_add_gateway_notifications(self): with self.router() as r: with self.network() as n: with self.subnet(network=n) as s: self._set_net_external(n['network']['id']) with mock.patch.object(registry, 'publish') as notify: res = self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], ext_ips=[{'subnet_id': s['subnet']['id'], 'ip_address': '10.0.0.4'}]) gw_info = res['router']['external_gateway_info'] ext_ips = gw_info['external_fixed_ips'][0] expected_gw_ips = [ext_ips['ip_address']] expected = [mock.call( resources.ROUTER_GATEWAY, events.AFTER_CREATE, mock.ANY, payload=mock.ANY)] notify.assert_has_calls(expected) # Find the call and look at the payload calls = [call for call in notify.mock_calls if call[1][0] == resources.ROUTER_GATEWAY and call[1][1] == events.AFTER_CREATE] self.assertEqual(1, len(calls)) payload = calls[0][2]['payload'] self.assertEqual(r['router']['id'], payload.resource_id) self.assertEqual(n['network']['id'], payload.metadata.get('network_id')) self.assertEqual(expected_gw_ips, payload.metadata.get('gateway_ips')) def test_router_remove_interface_inuse_returns_409(self): with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_remove_interface_callback_failure_returns_409(self): with self.router() as r,\ self.subnet() as s,\ mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), ] self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # we fail the first time, but not the second, when # the clean-up takes place notify.side_effect = [ exceptions.CallbackFailure(errors=errors), None ] self._router_interface_action( 'remove', r['router']['id'], s['subnet']['id'], None, exc.HTTPConflict.code) def test_router_clear_gateway_callback_failure_returns_409(self): with self.router() as r,\ self.subnet() as s,\ mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), ] self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) notify.side_effect = exceptions.CallbackFailure(errors=errors) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id'], external_gw_info={}, expected_code=exc.HTTPConflict.code) def test_router_remove_interface_wrong_subnet_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], p['port']['id'], exc.HTTPBadRequest.code) def test_router_remove_interface_nothing_returns_400(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, None, exc.HTTPBadRequest.code) # remove properly to clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) def test_router_remove_interface_returns_200(self): with self.router() as r: with self.port() as p: body = self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], None, p['port']['id'], expected_body=body) def test_router_remove_interface_with_both_ids_returns_200(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], p['port']['id']) def test_router_remove_interface_wrong_port_returns_404(self): with self.router() as r: with self.subnet() as s: with self.port(subnet=s) as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) # create another port for testing failure case res = self._create_port(self.fmt, p['port']['network_id']) p2 = self.deserialize(self.fmt, res) self._router_interface_action('remove', r['router']['id'], None, p2['port']['id'], exc.HTTPNotFound.code) def test_router_remove_ipv6_subnet_from_interface(self): """Delete a subnet from a router interface Verify that deleting a subnet with router-interface-delete removes that subnet when there are multiple subnets on the interface and removes the interface when it is the last subnet on the interface. """ with self.router() as r, self.network() as n: with (self.subnet(network=n, cidr='fd00::1/64', ip_version=lib_constants.IP_VERSION_6) ) as s1, self.subnet(network=n, cidr='fd01::1/64', ip_version=lib_constants.IP_VERSION_6 ) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) port = self._show('ports', body['port_id']) self.assertEqual(2, len(port['port']['fixed_ips'])) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) port = self._show('ports', body['port_id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) exp_code = exc.HTTPNotFound.code port = self._show('ports', body['port_id'], expected_code=exp_code) def test_router_delete(self): with self.router() as router: router_id = router['router']['id'] req = self.new_show_request('router', router_id) res = req.get_response(self._api_for_resource('router')) self.assertEqual(404, res.status_int) def test_router_delete_with_port_existed_returns_409(self): with self.subnet() as subnet: res = self._create_router(self.fmt, _uuid()) router = self.deserialize(self.fmt, res) self._router_interface_action('add', router['router']['id'], subnet['subnet']['id'], None) self._delete('routers', router['router']['id'], exc.HTTPConflict.code) def test_router_delete_with_floatingip_existed_returns_409(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_router(self.fmt, _uuid()) r = self.deserialize(self.fmt, res) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=p['port']['id']) self.assertEqual(exc.HTTPCreated.code, res.status_int) self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) def test_router_show(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def test_network_update_external_failure(self): with self.router() as r: with self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._update('networks', s1['subnet']['network_id'], {'network': {extnet_apidef.EXTERNAL: False}}, expected_code=exc.HTTPConflict.code) def test_network_update_external(self): with self.router() as r: with self.network('test_net') as testnet: self._set_net_external(testnet['network']['id']) with self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) self._update('networks', testnet['network']['id'], {'network': {extnet_apidef.EXTERNAL: False}}) def test_floatingip_crd_ops(self): with self.floatingip_with_assoc() as fip: self._validate_floating_ip(fip) # post-delete, check that it is really gone body = self._list('floatingips') self.assertEqual(0, len(body['floatingips'])) self._show('floatingips', fip['floatingip']['id'], expected_code=exc.HTTPNotFound.code) def _test_floatingip_with_assoc_fails(self, plugin_method): with self.subnet(cidr='200.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router() as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch(plugin_method) as pl: pl.side_effect = n_exc.BadRequest( resource='floatingip', msg='fake_error') res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) self.assertEqual(400, res.status_int) for p in self._list('ports')['ports']: if (p['device_owner'] == lib_constants.DEVICE_OWNER_FLOATINGIP): self.fail('garbage port is not deleted') def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( 'neutron.db.l3_db.L3_NAT_dbonly_mixin._check_and_get_fip_assoc') def test_create_floatingip_with_assoc( self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): with self.floatingip_with_assoc() as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) self.assertEqual(body['floatingip']['port_id'], fip['floatingip']['port_id']) self.assertEqual(expected_status, body['floatingip']['status']) self.assertIsNotNone(body['floatingip']['fixed_ip_address']) self.assertIsNotNone(body['floatingip']['router_id']) def test_create_floatingip_non_admin_context_agent_notification(self): plugin = directory.get_plugin(plugin_constants.L3) if not hasattr(plugin, 'l3_rpc_notifier'): self.skipTest("Plugin does not support l3_rpc_notifier") with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.port() as private_port,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) subnet_id = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': subnet_id}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch.object(plugin.l3_rpc_notifier, 'routers_updated') as agent_notification: self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], set_context=False) self.assertTrue(agent_notification.called) def test_floating_port_status_not_applicable(self): with self.floatingip_with_assoc(): port_body = self._list('ports', query_params='device_owner=network:floatingip')['ports'][0] self.assertEqual(lib_constants.PORT_STATUS_NOTAPPLICABLE, port_body['status']) def test_floatingip_update( self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertEqual(expected_status, body['floatingip']['status']) port_id = p['port']['id'] ip_address = p['port']['fixed_ips'][0]['ip_address'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(ip_address, body['floatingip']['fixed_ip_address']) def test_floatingip_update_subnet_gateway_disabled( self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): """Attach a floating IP to an instance Verify that the floating IP can be associated to a port whose subnet's gateway ip is not connected to the external router, but the router has an ip in that subnet. """ with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub: with self.port(private_sub) as p: subnet_id = p['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': subnet_id}} port_id = p['port']['id'] with self.router() as r: self._router_interface_action('add', r['router']['id'], None, port_id) with self.subnet(cidr='12.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) fip = self._make_floatingip(self.fmt, public_sub['subnet']['network_id']) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(expected_status, body['floatingip']['status']) body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(p['port']['fixed_ips'][0]['ip_address'], body['floatingip']['fixed_ip_address']) self.assertEqual(r['router']['id'], body['floatingip']['router_id']) def test_floatingip_create_different_fixed_ip_same_port(self): '''This tests that it is possible to delete a port that has multiple floating ip addresses associated with it (each floating address associated with a unique fixed address). ''' with self.router() as r: with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) with self.subnet() as private_sub: ip_range = list(netaddr.IPNetwork( private_sub['subnet']['cidr'])) fixed_ips = [{'ip_address': str(ip_range[-3])}, {'ip_address': str(ip_range[-2])}] self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with self.port(subnet=private_sub, fixed_ips=fixed_ips) as p: fip1 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-2])) fip2 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-3])) # Test that floating ips are assigned successfully. body = self._show('floatingips', fip1['floatingip']['id']) self.assertEqual( body['floatingip']['port_id'], fip1['floatingip']['port_id']) body = self._show('floatingips', fip2['floatingip']['id']) self.assertEqual( body['floatingip']['port_id'], fip2['floatingip']['port_id']) self._delete('ports', p['port']['id']) # Test that port has been successfully deleted. body = self._show('ports', p['port']['id'], expected_code=exc.HTTPNotFound.code) def test_floatingip_update_different_fixed_ip_same_port(self): with self.subnet() as s: ip_range = list(netaddr.IPNetwork(s['subnet']['cidr'])) fixed_ips = [{'ip_address': str(ip_range[-3])}, {'ip_address': str(ip_range[-2])}] with self.port(subnet=s, fixed_ips=fixed_ips) as p: with self.floatingip_with_assoc( port_id=p['port']['id'], fixed_ip=str(ip_range[-3])) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(fip['floatingip']['id'], body['floatingip']['id']) self.assertEqual(fip['floatingip']['port_id'], body['floatingip']['port_id']) self.assertEqual(str(ip_range[-3]), body['floatingip']['fixed_ip_address']) self.assertIsNotNone(body['floatingip']['router_id']) body_2 = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id'], 'fixed_ip_address': str(ip_range[-2])} }) self.assertEqual(fip['floatingip']['port_id'], body_2['floatingip']['port_id']) self.assertEqual(str(ip_range[-2]), body_2['floatingip']['fixed_ip_address']) def test_floatingip_update_invalid_fixed_ip(self): with self.subnet() as s: with self.port(subnet=s) as p: with self.floatingip_with_assoc( port_id=p['port']['id']) as fip: self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id'], 'fixed_ip_address': '2001:db8::a'}}, expected_code=exc.HTTPBadRequest.code) def test_floatingip_update_to_same_port_id_twice( self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertEqual(expected_status, body['floatingip']['status']) port_id = p['port']['id'] ip_address = p['port']['fixed_ips'][0]['ip_address'] # 1. Update floating IP with port_id (associate) body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(ip_address, body['floatingip']['fixed_ip_address']) # 2. Update floating IP with same port again body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) # No errors, and nothing changed self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(ip_address, body['floatingip']['fixed_ip_address']) def test_floatingip_update_same_fixed_ip_same_port(self): with self.subnet() as private_sub: ip_range = list(netaddr.IPNetwork(private_sub['subnet']['cidr'])) fixed_ip = [{'ip_address': str(ip_range[-3])}] with self.port(subnet=private_sub, fixed_ips=fixed_ip) as p: with self.router() as r: with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external( public_sub['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) fip1 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id']) fip2 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id']) # 1. Update floating IP 1 with port_id and fixed_ip body_1 = self._update( 'floatingips', fip1['floatingip']['id'], {'floatingip': {'port_id': p['port']['id'], 'fixed_ip_address': str(ip_range[-3])} }) self.assertEqual(str(ip_range[-3]), body_1['floatingip']['fixed_ip_address']) self.assertEqual(p['port']['id'], body_1['floatingip']['port_id']) # 2. Update floating IP 2 with port_id and fixed_ip # mock out the sequential check plugin = 'neutron.db.l3_db.L3_NAT_dbonly_mixin' check_get = mock.patch( plugin + '._check_and_get_fip_assoc', fip=fip2, floating_db=mock.ANY, return_value=(p['port']['id'], str(ip_range[-3]), r['router']['id'])) check_and_get = check_get.start() # do regular _check_and_get_fip_assoc() after skip check_and_get.side_effect = check_get.stop() self._update( 'floatingips', fip2['floatingip']['id'], {'floatingip': {'port_id': p['port']['id'], 'fixed_ip_address': str(ip_range[-3]) }}, exc.HTTPConflict.code) body = self._show('floatingips', fip2['floatingip']['id']) self.assertIsNone( body['floatingip']['fixed_ip_address']) self.assertIsNone( body['floatingip']['port_id']) def test_create_multiple_floatingips_same_fixed_ip_same_port(self): '''This tests that if multiple API requests arrive to create floating IPs on same external network to same port with one fixed ip, the latter API requests would be blocked at database side. ''' with self.router() as r: with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) with self.subnet() as private_sub: ip_range = list(netaddr.IPNetwork( private_sub['subnet']['cidr'])) fixed_ips = [{'ip_address': str(ip_range[-3])}, {'ip_address': str(ip_range[-2])}] self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with self.port(subnet=private_sub, fixed_ips=fixed_ips) as p: # 1. Create floating IP 1 fip1 = self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-3])) # 2. Create floating IP 2 # mock out the sequential check plugin = 'neutron.db.l3_db.L3_NAT_dbonly_mixin' check_get = mock.patch( plugin + '._check_and_get_fip_assoc', fip=mock.ANY, floating_db=mock.ANY, return_value=(p['port']['id'], str(ip_range[-3]), r['router']['id'])) check_and_get = check_get.start() # do regular _check_and_get_fip_assoc() after skip check_and_get.side_effect = check_get.stop() self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], p['port']['id'], fixed_ip=str(ip_range[-3]), http_status=exc.HTTPConflict.code) # Test that floating IP 1 is successfully created body = self._show('floatingips', fip1['floatingip']['id']) self.assertEqual( body['floatingip']['port_id'], fip1['floatingip']['port_id']) self._delete('ports', p['port']['id']) # Test that port has been successfully deleted. body = self._show('ports', p['port']['id'], expected_code=exc.HTTPNotFound.code) def test_first_floatingip_associate_notification(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: port_id = p['port']['id'] ip_address = p['port']['fixed_ips'][0]['ip_address'] with mock.patch.object(registry, 'notify') as notify: body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) fip_addr = fip['floatingip']['floating_ip_address'] fip_network_id = fip['floatingip']['floating_network_id'] fip_id = fip['floatingip']['id'] router_id = body['floatingip']['router_id'] body = self._show('routers', router_id) notify.assert_any_call(resources.FLOATING_IP, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, fixed_ip_address=ip_address, fixed_port_id=port_id, floating_ip_address=fip_addr, floating_network_id=fip_network_id, last_known_router_id=None, floating_ip_id=fip_id, router_id=router_id, association_event=True) def test_floatingip_disassociate_notification(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) with mock.patch.object(registry, 'notify') as notify: fip_addr = fip['floatingip']['floating_ip_address'] fip_network_id = fip['floatingip']['floating_network_id'] fip_id = fip['floatingip']['id'] router_id = body['floatingip']['router_id'] self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': None}}) notify.assert_any_call(resources.FLOATING_IP, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, fixed_ip_address=None, fixed_port_id=None, floating_ip_address=fip_addr, floating_network_id=fip_network_id, last_known_router_id=router_id, floating_ip_id=fip_id, router_id=None, association_event=False) def test_floatingip_association_on_unowned_router(self): # create a router owned by one tenant and associate the FIP with a # different tenant, assert that the FIP association succeeds with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router(tenant_id='router-owner', set_context=True) as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) self._make_floatingip(self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], fixed_ip=None, set_context=True) def test_floatingip_update_different_router(self): # Create subnet with different CIDRs to account for plugins which # do not support overlapping IPs with self.subnet(cidr='10.0.0.0/24') as s1,\ self.subnet(cidr='10.0.1.0/24') as s2: with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: private_sub1 = {'subnet': {'id': p1['port']['fixed_ips'][0]['subnet_id']}} private_sub2 = {'subnet': {'id': p2['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: with self.floatingip_no_assoc_with_public_sub( private_sub1, public_sub=public_sub) as (fip1, r1),\ self.floatingip_no_assoc_with_public_sub( private_sub2, public_sub=public_sub) as (fip2, r2): def assert_no_assoc(fip): body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone( body['floatingip']['fixed_ip_address']) assert_no_assoc(fip1) assert_no_assoc(fip2) def associate_and_assert(fip, port): port_id = port['port']['id'] ip_address = (port['port']['fixed_ips'] [0]['ip_address']) body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual( ip_address, body['floatingip']['fixed_ip_address']) return body['floatingip']['router_id'] fip1_r1_res = associate_and_assert(fip1, p1) self.assertEqual(fip1_r1_res, r1['router']['id']) # The following operation will associate the floating # ip to a different router fip1_r2_res = associate_and_assert(fip1, p2) self.assertEqual(fip1_r2_res, r2['router']['id']) fip2_r1_res = associate_and_assert(fip2, p1) self.assertEqual(fip2_r1_res, r1['router']['id']) # disassociate fip1 self._update( 'floatingips', fip1['floatingip']['id'], {'floatingip': {'port_id': None}}) fip2_r2_res = associate_and_assert(fip2, p2) self.assertEqual(fip2_r2_res, r2['router']['id']) def test_floatingip_update_different_port_owner_as_admin(self): with self.subnet() as private_sub: with self.floatingip_no_assoc(private_sub) as fip: with self.port(subnet=private_sub, tenant_id='other') as p: body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id']}}) self.assertEqual(p['port']['id'], body['floatingip']['port_id']) def test_floatingip_port_delete(self): with self.subnet() as private_sub: with self.floatingip_no_assoc(private_sub) as fip: with self.port(subnet=private_sub) as p: body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': p['port']['id']}}) # note: once this port goes out of scope, the port will be # deleted, which is what we want to test. We want to confirm # that the fields are set back to None self._delete('ports', p['port']['id']) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertIsNone(body['floatingip']['router_id']) def test_two_fips_one_port_invalid_return_409(self): with self.floatingip_with_assoc() as fip1: res = self._create_floatingip( self.fmt, fip1['floatingip']['floating_network_id'], fip1['floatingip']['port_id']) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_floating_ip_direct_port_delete_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == lib_constants.DEVICE_OWNER_FLOATINGIP: self._delete('ports', p['id'], expected_code=exc.HTTPConflict.code) found = True self.assertTrue(found) def _test_floatingip_with_invalid_create_port(self, plugin_class): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_router(self.fmt, _uuid()) r = self.deserialize(self.fmt, res) self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) with mock.patch(plugin_class + '.create_port') as createport: createport.return_value = {'fixed_ips': [], 'id': '44'} res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=p['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2') def test_create_floatingip_with_subnet_id_non_admin(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], subnet_id=public_sub['subnet']['id'], set_context=True) self.assertEqual(exc.HTTPCreated.code, res.status_int) def test_create_floatingip_with_subnet_id_and_fip_address(self): with self.network() as ext_net: self._set_net_external(ext_net['network']['id']) with self.subnet(ext_net, cidr='10.10.10.0/24') as ext_subnet: with self.router(): res = self._create_floatingip( self.fmt, ext_net['network']['id'], subnet_id=ext_subnet['subnet']['id'], floating_ip='10.10.10.100') fip = self.deserialize(self.fmt, res) self.assertEqual(exc.HTTPCreated.code, res.status_int) self.assertEqual('10.10.10.100', fip['floatingip']['floating_ip_address']) def test_create_floatingip_with_subnet_and_invalid_fip_address(self): with self.network() as ext_net: self._set_net_external(ext_net['network']['id']) with self.subnet(ext_net, cidr='10.10.10.0/24') as ext_subnet: with self.router(): res = self._create_floatingip( self.fmt, ext_net['network']['id'], subnet_id=ext_subnet['subnet']['id'], floating_ip='20.20.20.200') data = self.deserialize(self.fmt, res) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) msg = str(n_exc.InvalidIpForSubnet(ip_address='20.20.20.200')) self.assertEqual('InvalidIpForSubnet', data['NeutronError']['type']) self.assertEqual(msg, data['NeutronError']['message']) def test_create_floatingip_with_multisubnet_id(self): with self.network() as network: self._set_net_external(network['network']['id']) with self.subnet(network, cidr='10.0.12.0/24') as subnet1: with self.subnet(network, cidr='10.0.13.0/24') as subnet2: with self.router(): res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet1['subnet']['id']) fip1 = self.deserialize(self.fmt, res) res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet2['subnet']['id']) fip2 = self.deserialize(self.fmt, res) self.assertTrue( fip1['floatingip']['floating_ip_address'].startswith('10.0.12')) self.assertTrue( fip2['floatingip']['floating_ip_address'].startswith('10.0.13')) def test_create_floatingip_with_wrong_subnet_id(self): with self.network() as network1: self._set_net_external(network1['network']['id']) with self.subnet(network1, cidr='10.0.12.0/24') as subnet1: with self.network() as network2: self._set_net_external(network2['network']['id']) with self.subnet(network2, cidr='10.0.13.0/24') as subnet2: with self.router(): res = self._create_floatingip( self.fmt, subnet1['subnet']['network_id'], subnet_id=subnet2['subnet']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_no_ext_gateway_return_404(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.port() as private_port: with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) # this should be some kind of error self.assertEqual(exc.HTTPNotFound.code, res.status_int) def test_create_floating_non_ext_network_returns_400(self): with self.subnet() as public_sub: # normally we would set the network of public_sub to be # external, but the point of this test is to handle when # that is not the case with self.router(): res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_no_public_subnet_returns_400(self): with self.network() as public_network: with self.port() as private_port: with self.router() as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} self._router_interface_action('add', r['router']['id'], private_sub['subnet']['id'], None) res = self._create_floatingip( self.fmt, public_network['network']['id'], port_id=private_port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_invalid_floating_network_id_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, 'iamnotanuuid', uuidutils.generate_uuid(), '192.168.0.1') self.assertEqual(400, res.status_int) def test_create_floatingip_invalid_floating_port_id_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), 'iamnotanuuid', '192.168.0.1') self.assertEqual(400, res.status_int) def test_create_floatingip_invalid_fixed_ip_address_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), uuidutils.generate_uuid(), 'iamnotnanip') self.assertEqual(400, res.status_int) def test_create_floatingip_invalid_fixed_ipv6_address_returns_400(self): # API-level test - no need to create all objects for l3 plugin res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), uuidutils.generate_uuid(), '2001:db8::a') self.assertEqual(400, res.status_int) def test_floatingip_list_with_sort(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_sort('floatingip', (fp3, fp2, fp1), [('floating_ip_address', 'desc')]) def test_floatingip_list_with_port_id(self): with self.floatingip_with_assoc() as fip: port_id = fip['floatingip']['port_id'] res = self._list('floatingips', query_params="port_id=%s" % port_id) self.assertEqual(1, len(res['floatingips'])) res = self._list('floatingips', query_params="port_id=aaa") self.assertEqual(0, len(res['floatingips'])) def test_floatingip_list_with_pagination(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_pagination( 'floatingip', (fp1, fp2, fp3), ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_list_with_pagination_reverse(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] self._set_net_external(network_id1) self._set_net_external(network_id2) self._set_net_external(network_id3) fp1 = self._make_floatingip(self.fmt, network_id1) fp2 = self._make_floatingip(self.fmt, network_id2) fp3 = self._make_floatingip(self.fmt, network_id3) self._test_list_with_pagination_reverse( 'floatingip', (fp1, fp2, fp3), ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_multi_external_one_internal(self): with self.subnet(cidr="10.0.0.0/24") as exs1,\ self.subnet(cidr="11.0.0.0/24") as exs2,\ self.subnet(cidr="12.0.0.0/24") as ins1: network_ex_id1 = exs1['subnet']['network_id'] network_ex_id2 = exs2['subnet']['network_id'] self._set_net_external(network_ex_id1) self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins1, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id1) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id2) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) with self.port(subnet=ins1, fixed_ips=[{'ip_address': '12.0.0.3'}] ) as private_port: fp1 = self._make_floatingip(self.fmt, network_ex_id1, private_port['port']['id']) fp2 = self._make_floatingip(self.fmt, network_ex_id2, private_port['port']['id']) self.assertEqual(fp1['floatingip']['router_id'], r1['router']['id']) self.assertEqual(fp2['floatingip']['router_id'], r2['router']['id']) def test_floatingip_same_external_and_internal(self): # Select router with subnet's gateway_ip for floatingip when # routers connected to same subnet and external network. with self.subnet(cidr="10.0.0.0/24") as exs,\ self.subnet(cidr="12.0.0.0/24", gateway_ip="12.0.0.50") as ins: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) self._router_interface_action('add', r1['router']['id'], ins['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id) with self.port(subnet=ins, fixed_ips=[{'ip_address': '12.0.0.8'}] ) as private_port: fp = self._make_floatingip(self.fmt, network_ex_id, private_port['port']['id']) self.assertEqual(r1['router']['id'], fp['floatingip']['router_id']) def _test_floatingip_via_router_interface(self, http_status): # NOTE(yamamoto): "exs" subnet is just to provide a gateway port # for the router. Otherwise the test would fail earlier without # reaching the code we want to test. (bug 1556884) with self.subnet(cidr="10.0.0.0/24") as exs, \ self.subnet(cidr="10.0.1.0/24") as ins1, \ self.subnet(cidr="10.0.2.0/24") as ins2: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) network_in2_id = ins2['subnet']['network_id'] self._set_net_external(network_in2_id) with self.router() as r1, self.port(subnet=ins1) as private_port: self._add_external_gateway_to_router(r1['router']['id'], network_ex_id) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._router_interface_action('add', r1['router']['id'], ins2['subnet']['id'], None) self._make_floatingip(self.fmt, network_id=network_in2_id, port_id=private_port['port']['id'], http_status=http_status) def _get_router_for_floatingip_without_device_owner_check( self, context, internal_port, internal_subnet, external_network_id): gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( l3_models.RouterPort.router_id, models_v2.IPAllocation.ip_address ).join( l3_models.RouterPort.port, models_v2.Port.fixed_ips ).filter( models_v2.Port.network_id == internal_port['network_id'], l3_models.RouterPort.port_type.in_( lib_constants.ROUTER_INTERFACE_OWNERS ), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join( gw_port, gw_port.device_id == l3_models.RouterPort.router_id ).filter( gw_port.network_id == external_network_id, ).distinct() first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3_exc.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id']) def test_floatingip_via_router_interface_returns_404(self): self._test_floatingip_via_router_interface(exc.HTTPNotFound.code) def test_floatingip_via_router_interface_returns_201(self): # Override get_router_for_floatingip, as # networking-midonet's L3 service plugin would do. plugin = directory.get_plugin(plugin_constants.L3) with mock.patch.object(plugin, "get_router_for_floatingip", self._get_router_for_floatingip_without_device_owner_check): self._test_floatingip_via_router_interface(exc.HTTPCreated.code) def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == lib_constants.DEVICE_OWNER_ROUTER_INTF: subnet_id = p['fixed_ips'][0]['subnet_id'] router_id = p['device_id'] self._router_interface_action( 'remove', router_id, subnet_id, None, expected_code=exc.HTTPConflict.code) found = True break self.assertTrue(found) def test_floatingip_delete_router_intf_with_port_id_returns_409(self): found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: if p['device_owner'] == lib_constants.DEVICE_OWNER_ROUTER_INTF: router_id = p['device_id'] self._router_interface_action( 'remove', router_id, None, p['id'], expected_code=exc.HTTPConflict.code) found = True break self.assertTrue(found) def _test_router_delete_subnet_inuse_returns_409(self, router, subnet): r, s = router, subnet self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # subnet cannot be deleted as it's attached to a router self._delete('subnets', s['subnet']['id'], expected_code=exc.HTTPConflict.code) def _ipv6_subnet(self, mode): return self.subnet(cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=lib_constants.IP_VERSION_6, ipv6_ra_mode=mode, ipv6_address_mode=mode) def test_router_delete_subnet_inuse_returns_409(self): with self.router() as r: with self.subnet() as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): with self.router() as r: with self._ipv6_subnet(lib_constants.IPV6_SLAAC) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): with self.router() as r: with self._ipv6_subnet(lib_constants.DHCPV6_STATELESS) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_delete_ext_net_with_disassociated_floating_ips(self): with self.network() as net: net_id = net['network']['id'] self._set_net_external(net_id) with self.subnet(network=net): self._make_floatingip(self.fmt, net_id) def test_create_floatingip_with_specific_ip(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self.assertEqual('10.0.0.10', fp['floatingip']['floating_ip_address']) def test_create_floatingip_with_specific_ip_out_of_allocation(self): with self.subnet(cidr='10.0.0.0/24', allocation_pools=[ {'start': '10.0.0.10', 'end': '10.0.0.20'}] ) as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.30') self.assertEqual('10.0.0.30', fp['floatingip']['floating_ip_address']) def test_create_floatingip_with_specific_ip_non_admin(self): ctx = context.Context('user_id', 'tenant_id') with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, set_context=ctx, floating_ip='10.0.0.10', http_status=exc.HTTPForbidden.code) def test_create_floatingip_with_specific_ip_out_of_subnet(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, floating_ip='10.0.1.10', http_status=exc.HTTPBadRequest.code) def test_create_floatingip_with_duplicated_specific_ip(self): with self.subnet(cidr='10.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10', http_status=exc.HTTPConflict.code) def test_create_floatingips_native_quotas(self): quota = 1 cfg.CONF.set_override('quota_floatingip', quota, group='QUOTAS') with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], subnet_id=public_sub['subnet']['id']) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], subnet_id=public_sub['subnet']['id']) self.assertEqual(exc.HTTPConflict.code, res.status_int) def test_router_specify_id_backend(self): plugin = directory.get_plugin(plugin_constants.L3) router_req = {'router': {'id': _uuid(), 'name': 'router', 'tenant_id': 'foo', 'admin_state_up': True}} result = plugin.create_router(context.Context('', 'foo'), router_req) self.assertEqual(router_req['router']['id'], result['id']) def test_create_floatingip_ipv6_only_network_returns_400(self): with self.subnet(cidr="2001:db8::/48", ip_version=lib_constants.IP_VERSION_6) as public_sub: self._set_net_external(public_sub['subnet']['network_id']) res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self): with self.network() as n,\ self.subnet(cidr="2001:db8::/48", ip_version=lib_constants.IP_VERSION_6, network=n),\ self.subnet(cidr="192.168.1.0/24", ip_version=lib_constants.IP_VERSION_4, network=n): self._set_net_external(n['network']['id']) fip = self._make_floatingip(self.fmt, n['network']['id']) fip_set = netaddr.IPSet(netaddr.IPNetwork("192.168.1.0/24")) fip_ip = fip['floatingip']['floating_ip_address'] self.assertIn(netaddr.IPAddress(fip_ip), fip_set) def test_create_floatingip_with_assoc_to_ipv6_subnet(self): with self.subnet() as public_sub: self._set_net_external(public_sub['subnet']['network_id']) with self.subnet(cidr="2001:db8::/48", ip_version=lib_constants.IP_VERSION_6 ) as private_sub: with self.port(subnet=private_sub) as private_port: res = self._create_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self): with self.network() as n,\ self.subnet(cidr='10.0.0.0/24', network=n) as s4,\ self.subnet(cidr='2001:db8::/64', ip_version=lib_constants.IP_VERSION_6, network=n),\ self.port(subnet=s4) as p: self.assertEqual(2, len(p['port']['fixed_ips'])) ipv4_address = next(i['ip_address'] for i in p['port']['fixed_ips'] if netaddr.IPAddress(i['ip_address']).version == 4) with self.floatingip_with_assoc(port_id=p['port']['id']) as fip: self.assertEqual(fip['floatingip']['fixed_ip_address'], ipv4_address) floating_ip = netaddr.IPAddress( fip['floatingip']['floating_ip_address']) self.assertEqual(4, floating_ip.version) def test_create_router_gateway_fails_nested(self): # Force _update_router_gw_info failure plugin = directory.get_plugin(plugin_constants.L3) if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") ctx = context.Context('', 'foo') data = {'router': { 'name': 'router1', 'admin_state_up': True, 'external_gateway_info': {'network_id': 'some_uuid'}, 'tenant_id': 'some_tenant'}} def mock_fail__update_router_gw_info(ctx, router_id, info, router=None): # Fail with breaking transaction with db_api.CONTEXT_WRITER.using(self.ctx): raise n_exc.NeutronException mock.patch.object(plugin, '_update_router_gw_info', side_effect=mock_fail__update_router_gw_info).start() def create_router_with_transaction(ctx, data): # Emulates what many plugins do with db_api.CONTEXT_WRITER.using(ctx): plugin.create_router(ctx, data) # Verify router doesn't persist on failure self.assertRaises(n_exc.NeutronException, create_router_with_transaction, ctx, data) routers = plugin.get_routers(ctx) self.assertEqual(0, len(routers)) def test_create_router_gateway_fails_nested_delete_router_failed(self): # Force _update_router_gw_info failure plugin = directory.get_plugin(plugin_constants.L3) if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") ctx = context.Context('', 'foo') data = {'router': { 'name': 'router1', 'admin_state_up': True, 'external_gateway_info': {'network_id': 'some_uuid'}, 'tenant_id': 'some_tenant'}} def mock_fail__update_router_gw_info(ctx, router_id, info, router=None): # Fail with breaking transaction with db_api.CONTEXT_WRITER.using(ctx): raise n_exc.NeutronException def mock_fail_delete_router(ctx, router_id): with db_api.CONTEXT_WRITER.using(ctx): raise Exception() mock.patch.object(plugin, '_update_router_gw_info', side_effect=mock_fail__update_router_gw_info).start() mock.patch.object(plugin, 'delete_router', mock_fail_delete_router).start() def create_router_with_transaction(ctx, data): # Emulates what many plugins do with db_api.CONTEXT_WRITER.using(ctx): plugin.create_router(ctx, data) # Verify router doesn't persist on failure self.assertRaises(n_exc.NeutronException, create_router_with_transaction, ctx, data) routers = plugin.get_routers(ctx) self.assertEqual(0, len(routers)) def test_router_add_interface_by_port_fails_nested(self): # Force _validate_router_port_info failure plugin = directory.get_plugin(plugin_constants.L3) if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") orig_update_port = self.plugin.update_port def mock_fail__validate_router_port_info(ctx, router, port_id): # Fail with raising BadRequest exception msg = "Failure mocking..." raise n_exc.BadRequest(resource='router', msg=msg) def mock_update_port_with_transaction(ctx, id, port): # Update port within a sub-transaction with db_api.CONTEXT_WRITER.using(ctx): orig_update_port(ctx, id, port) def add_router_interface_with_transaction(ctx, router_id, interface_info): # Call add_router_interface() within a sub-transaction with db_api.CONTEXT_WRITER.using(ctx): plugin.add_router_interface(ctx, router_id, interface_info) tenant_id = _uuid() ctx = context.Context('', tenant_id) with self.network(tenant_id=tenant_id) as network, ( self.router(name='router1', admin_state_up=True, tenant_id=tenant_id)) as router: with self.subnet(network=network, cidr='10.0.0.0/24', tenant_id=tenant_id) as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, fixed_ips=fixed_ips, tenant_id=tenant_id) as port: mock.patch.object( self.plugin, 'update_port', side_effect=( mock_update_port_with_transaction)).start() mock.patch.object( plugin, '_validate_router_port_info', side_effect=( mock_fail__validate_router_port_info)).start() self.assertRaises(n_exc.BadRequest, add_router_interface_with_transaction, ctx, router['router']['id'], {'port_id': port['port']['id']}) # fetch port and confirm device_id and device_owner body = self._show('ports', port['port']['id']) self.assertEqual('', body['port']['device_owner']) self.assertEqual('', body['port']['device_id']) def _test__notify_gateway_port_ip_changed_helper(self, gw_ip_change=True): plugin = directory.get_plugin(plugin_constants.L3) if not hasattr(plugin, 'l3_rpc_notifier'): self.skipTest("Plugin does not support l3_rpc_notifier") # make sure the callback is registered. registry.subscribe( l3_db.L3RpcNotifierMixin._notify_gateway_port_ip_changed, resources.PORT, events.AFTER_UPDATE) with mock.patch.object(plugin.l3_rpc_notifier, 'routers_updated') as chk_method: with self.router() as router: with self.subnet(cidr='1.1.1.0/24') as subnet: self._set_net_external(subnet['subnet']['network_id']) router_id = router['router']['id'] self._add_external_gateway_to_router( router_id, subnet['subnet']['network_id']) body = self._show('routers', router_id) gateway_ips = body['router']['external_gateway_info'][ 'external_fixed_ips'] gateway_ip_len = len(gateway_ips) self.assertEqual(1, gateway_ip_len) gw_port_id = None for p in self._list('ports')['ports']: if (p['device_owner'] == lib_constants.DEVICE_OWNER_ROUTER_GW and p['device_id'] == router_id): gw_port_id = p['id'] self.assertIsNotNone(gw_port_id) gw_ip_len = 1 if gw_ip_change: gw_ip_len += 1 data = {'port': {'fixed_ips': [ {'ip_address': '1.1.1.101'}, {'ip_address': '1.1.1.100'}]}} else: gw_ip = gateway_ips[0]['ip_address'] data = {'port': {'fixed_ips': [ {'ip_address': gw_ip}]}} req = self.new_update_request('ports', data, gw_port_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(gw_ip_len, len(res['port']['fixed_ips'])) body = self._show('routers', router_id) gateway_ip_len = len( body['router']['external_gateway_info'][ 'external_fixed_ips']) self.assertEqual(gw_ip_len, gateway_ip_len) chk_method.assert_called_with(mock.ANY, [router_id], None) self.assertEqual(gw_ip_len, chk_method.call_count) def test__notify_gateway_port_ip_changed(self): """Test to make sure notification to routers occurs when the gateway ip address changed. """ self._test__notify_gateway_port_ip_changed_helper() def test__notify_gateway_port_ip_not_changed(self): """Test to make sure no notification to routers occurs when the gateway ip address is not changed. """ self._test__notify_gateway_port_ip_changed_helper(gw_ip_change=False) def test_update_subnet_gateway_for_external_net(self): """Test to make sure notification to routers occurs when the gateway ip address of a subnet of the external network is changed. """ plugin = directory.get_plugin(plugin_constants.L3) if not hasattr(plugin, 'l3_rpc_notifier'): self.skipTest("Plugin does not support l3_rpc_notifier") # make sure the callback is registered. registry.subscribe( l3_db.L3RpcNotifierMixin._notify_subnet_gateway_ip_update, resources.SUBNET, events.AFTER_UPDATE) with mock.patch.object(plugin.l3_rpc_notifier, 'routers_updated') as chk_method: with self.network() as network: allocation_pools = [{'start': '120.0.0.3', 'end': '120.0.0.254'}] with self.subnet(network=network, gateway_ip='120.0.0.1', allocation_pools=allocation_pools, cidr='120.0.0.0/24') as subnet: kwargs = { 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_GW, 'device_id': 'fake_device'} with self.port(subnet=subnet, **kwargs): data = {'subnet': {'gateway_ip': '120.0.0.2'}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(data['subnet']['gateway_ip'], res['subnet']['gateway_ip']) chk_method.assert_called_with(mock.ANY, ['fake_device'], None) def test__notify_subnetpool_address_scope_update(self): plugin = directory.get_plugin(plugin_constants.L3) tenant_id = _uuid() with mock.patch.object( plugin, 'notify_routers_updated') as chk_method, \ self.subnetpool(prefixes=['10.0.0.0/24'], admin=True, name='sp', tenant_id=tenant_id) as subnetpool, \ self.router(tenant_id=tenant_id) as router, \ self.network(tenant_id=tenant_id) as network: subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': subnetpool_id, 'prefixlen': 24, 'ip_version': lib_constants.IP_VERSION_4, 'tenant_id': tenant_id}} req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, req.get_response(self.api)) admin_ctx = context.get_admin_context() plugin.add_router_interface( admin_ctx, router['router']['id'], {'subnet_id': subnet['subnet']['id']}) l3_db.L3RpcNotifierMixin._notify_subnetpool_address_scope_update( mock.ANY, mock.ANY, mock.ANY, payload=events.DBEventPayload( admin_ctx, resource_id=subnetpool_id)) chk_method.assert_called_with(admin_ctx, [router['router']['id']]) def test_janitor_clears_orphaned_floatingip_port(self): plugin = directory.get_plugin(plugin_constants.L3) with self.network() as n: # floating IP ports are initially created with a device ID of # PENDING and are updated after the floating IP is actually # created. port_res = self._create_port( self.fmt, n['network']['id'], tenant_id=n['network']['tenant_id'], device_id='PENDING', device_owner=lib_constants.DEVICE_OWNER_FLOATINGIP) port = self.deserialize(self.fmt, port_res) plugin._clean_garbage() # first call should just have marked it as a candidate so port # should still exist port = self._show('ports', port['port']['id']) self.assertEqual('PENDING', port['port']['device_id']) # second call will delete the port since it has no associated # floating IP plugin._clean_garbage() self._show('ports', port['port']['id'], expected_code=exc.HTTPNotFound.code) def test_janitor_updates_port_device_id(self): # if a server dies after the floating IP is created but before it # updates the floating IP port device ID, the janitor will be # responsible for updating the device ID to the correct value. plugin = directory.get_plugin(plugin_constants.L3) with self.floatingip_with_assoc() as fip: fip_port = self._list('ports', query_params='device_owner=network:floatingip')['ports'][0] # simulate a failed update by just setting the device_id of # the fip port back to PENDING data = {'port': {'device_id': 'PENDING'}} self._update('ports', fip_port['id'], data) plugin._clean_garbage() # first call just marks as candidate, so it shouldn't be changed port = self._show('ports', fip_port['id']) self.assertEqual('PENDING', port['port']['device_id']) # second call updates device ID to fip plugin._clean_garbage() # first call just marks as candidate, so it shouldn't be changed port = self._show('ports', fip_port['id']) self.assertEqual(fip['floatingip']['id'], port['port']['device_id']) def test_janitor_doesnt_delete_if_fixed_in_interim(self): # here we ensure that the janitor doesn't delete the port on the second # call if the conditions have been fixed plugin = directory.get_plugin(plugin_constants.L3) with self.network() as n: port_res = self._create_port( self.fmt, n['network']['id'], tenant_id=n['network']['tenant_id'], device_id='PENDING', device_owner=lib_constants.DEVICE_OWNER_FLOATINGIP) port = self.deserialize(self.fmt, port_res) plugin._clean_garbage() # first call should just have marked it as a candidate so port # should still exist port = self._show('ports', port['port']['id']) self.assertEqual('PENDING', port['port']['device_id']) data = {'port': {'device_id': 'something_else'}} self._update('ports', port['port']['id'], data) # now that the device ID has changed, the janitor shouldn't delete plugin._clean_garbage() self._show('ports', port['port']['id']) def test_router_delete_callback(self): def prevent_router_deletion(*args, **kwargs): # unsubscribe now that we have invoked the callback registry.unsubscribe(prevent_router_deletion, resources.ROUTER, events.BEFORE_DELETE) raise exc.HTTPForbidden registry.subscribe(prevent_router_deletion, resources.ROUTER, events.BEFORE_DELETE) with self.subnet(): res = self._create_router(self.fmt, _uuid()) router = self.deserialize(self.fmt, res) self._delete('routers', router['router']['id'], exc.HTTPForbidden.code) def test_associate_to_dhcp_port_fails(self): with self.subnet(cidr="10.0.0.0/24", ip_version=lib_constants.IP_VERSION_4) as sub: with self.port(subnet=sub, device_owner=lib_constants.DEVICE_OWNER_DHCP) as p: res = self._create_floatingip( self.fmt, sub['subnet']['network_id'], port_id=p['port']['id']) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) class L3AgentDbTestCaseBase(L3NatTestCaseMixin): """Unit tests for methods called by the L3 agent.""" def test_l3_agent_routers_query_interfaces(self): with self.router() as r: with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) routers = self.plugin.get_sync_data( context.get_admin_context(), None) self.assertEqual(1, len(routers)) interfaces = routers[0][lib_constants.INTERFACE_KEY] self.assertEqual(1, len(interfaces)) subnets = interfaces[0]['subnets'] self.assertEqual(1, len(subnets)) subnet_id = subnets[0]['id'] wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id'] self.assertEqual(wanted_subnetid, subnet_id) def test_l3_agent_sync_interfaces(self): """Test L3 interfaces query return valid result""" with self.router() as router1, self.router() as router2: with self.port() as port1, self.port() as port2: self._router_interface_action('add', router1['router']['id'], None, port1['port']['id']) self._router_interface_action('add', router2['router']['id'], None, port2['port']['id']) admin_ctx = context.get_admin_context() router1_id = router1['router']['id'] router2_id = router2['router']['id'] # Verify if router1 pass in, return only interface from router1 ifaces = self.plugin._get_sync_interfaces(admin_ctx, [router1_id]) self.assertEqual(1, len(ifaces)) self.assertEqual(router1_id, ifaces[0]['device_id']) # Verify if router1 and router2 pass in, return both interfaces ifaces = self.plugin._get_sync_interfaces(admin_ctx, [router1_id, router2_id]) self.assertEqual(2, len(ifaces)) device_list = [i['device_id'] for i in ifaces] self.assertIn(router1_id, device_list) self.assertIn(router2_id, device_list) # Verify if no router pass in, return empty list ifaces = self.plugin._get_sync_interfaces(admin_ctx, None) self.assertEqual(0, len(ifaces)) def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self): with self.router() as r, self.subnet( cidr='9.0.1.0/24') as subnet, self.port( subnet=subnet, fixed_ips=[{'ip_address': '9.0.1.3'}]) as p1, self.port( subnet=subnet, fixed_ips=[{'ip_address': '9.0.1.100'}, {'ip_address': '9.0.1.101'}]) as p2: # Cannot have multiple IPv4 subnets on router port, # see neutron.db.l3_db line L752-L754. self._router_interface_action( 'add', r['router']['id'], None, p2['port']['id'], expected_code=exc.HTTPBadRequest.code) self._router_interface_action('add', r['router']['id'], None, p1['port']['id']) port = {'port': {'fixed_ips': [{'ip_address': '9.0.1.4', 'subnet_id': subnet['subnet']['id']}, {'ip_address': '9.0.1.5', 'subnet_id': subnet['subnet']['id']}]}} ctx = context.get_admin_context() self.assertRaises( n_exc.BadRequest, self.core_plugin.update_port, ctx, p1['port']['id'], port) routers = self.plugin.get_sync_data(ctx, None) self.assertEqual(1, len(routers)) interfaces = routers[0].get(lib_constants.INTERFACE_KEY, []) self.assertEqual(1, len(interfaces)) self._router_interface_action('remove', r['router']['id'], None, p1['port']['id']) def test_l3_agent_routers_query_gateway(self): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) routers = self.plugin.get_sync_data( context.get_admin_context(), [r['router']['id']]) self.assertEqual(1, len(routers)) gw_port = routers[0]['gw_port'] subnets = gw_port.get('subnets') self.assertEqual(1, len(subnets)) self.assertEqual(s['subnet']['id'], subnets[0]['id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) def test_l3_agent_routers_query_floatingips(self): with self.floatingip_with_assoc() as fip: routers = self.plugin.get_sync_data( context.get_admin_context(), [fip['floatingip']['router_id']]) self.assertEqual(1, len(routers)) floatingips = routers[0][lib_constants.FLOATINGIP_KEY] self.assertEqual(1, len(floatingips)) self.assertEqual(floatingips[0]['id'], fip['floatingip']['id']) self.assertEqual(floatingips[0]['port_id'], fip['floatingip']['port_id']) self.assertIsNotNone(floatingips[0]['fixed_ip_address']) self.assertIsNotNone(floatingips[0]['router_id']) def _test_notify_op_agent(self, target_func, *args): l3_rpc_agent_api_str = ( 'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI') with mock.patch(l3_rpc_agent_api_str): plugin = directory.get_plugin(plugin_constants.L3) notifyApi = plugin.l3_rpc_notifier kargs = [item for item in args] kargs.append(notifyApi) target_func(*kargs) def _test_router_gateway_op_agent(self, notifyApi): with self.router() as r: with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) self.assertEqual( 2, notifyApi.routers_updated.call_count) def test_router_gateway_op_agent(self): self._test_notify_op_agent(self._test_router_gateway_op_agent) def _test_interfaces_op_agent(self, r, notifyApi): with self.port() as p: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) # clean-up self._router_interface_action('remove', r['router']['id'], None, p['port']['id']) self.assertEqual(2, notifyApi.routers_updated.call_count) def test_interfaces_op_agent(self): with self.router() as r: self._test_notify_op_agent( self._test_interfaces_op_agent, r) def _test_floatingips_op_agent(self, notifyApi): with self.floatingip_with_assoc(): pass # add gateway, add interface, associate, deletion of floatingip self.assertEqual(4, notifyApi.routers_updated.call_count) def test_floatingips_op_agent(self): self._test_notify_op_agent(self._test_floatingips_op_agent) def test_floatingips_create_precommit_event(self): fake_method = mock.Mock() try: registry.subscribe(fake_method, resources.FLOATING_IP, events.PRECOMMIT_CREATE) with self.floatingip_with_assoc() as f: fake_method.assert_called_once_with( resources.FLOATING_IP, events.PRECOMMIT_CREATE, mock.ANY, context=mock.ANY, floatingip=mock.ANY, floatingip_id=f['floatingip']['id'], floatingip_db=mock.ANY) finally: registry.unsubscribe(fake_method, resources.FLOATING_IP, events.PRECOMMIT_CREATE) def test_floatingip_delete_after_event(self): fake_method = mock.Mock() try: registry.subscribe(fake_method, resources.FLOATING_IP, events.AFTER_DELETE) with self.subnet(cidr='11.0.0.0/24') as public_sub: self._set_net_external(public_sub['subnet']['network_id']) f = self._make_floatingip(self.fmt, public_sub['subnet']['network_id'], port_id=None, fixed_ip=None, set_context=True) self._delete('floatingips', f['floatingip']['id']) fake_method.assert_called_once_with( resources.FLOATING_IP, events.AFTER_DELETE, mock.ANY, context=mock.ANY, description=mock.ANY, dns_domain=mock.ANY, dns_name=mock.ANY, fixed_ip_address=f['floatingip']['fixed_ip_address'], floating_ip_address=f['floatingip']['floating_ip_address'], floating_network_id=f['floatingip']['floating_network_id'], id=f['floatingip']['id'], port_id=f['floatingip']['port_id'], project_id=f['floatingip']['project_id'], router_id=f['floatingip']['router_id'], status=f['floatingip']['status'], tenant_id=f['floatingip']['tenant_id']) finally: registry.unsubscribe(fake_method, resources.FLOATING_IP, events.AFTER_DELETE) def test_router_create_precommit_event(self): nset = lambda *a, **k: setattr(k['router_db'], 'name', 'hello') registry.subscribe(nset, resources.ROUTER, events.PRECOMMIT_CREATE) with self.router() as r: self.assertEqual('hello', r['router']['name']) def test_router_create_event_exception_preserved(self): # this exception should be propagated out of the callback and # converted into its API equivalent of 404 e404 = mock.Mock(side_effect=l3_exc.RouterNotFound(router_id='1')) registry.subscribe(e404, resources.ROUTER, events.PRECOMMIT_CREATE) res = self._create_router(self.fmt, 'tenid') self.assertEqual(exc.HTTPNotFound.code, res.status_int) # make sure nothing committed body = self._list('routers') self.assertFalse(body['routers']) def test_router_update_precommit_event(self): def _nset(r, v, s, payload=None): setattr(payload.desired_state, 'name', payload.states[0]['name'] + '_ha!') registry.subscribe(_nset, resources.ROUTER, events.PRECOMMIT_UPDATE) with self.router(name='original') as r: update = self._update('routers', r['router']['id'], {'router': {'name': 'hi'}}) # our rude callback should have changed the name to the original # plus some extra self.assertEqual('original_ha!', update['router']['name']) def test_router_update_event_exception_preserved(self): # this exception should be propagated out of the callback and # converted into its API equivalent of 404 e404 = mock.Mock(side_effect=l3_exc.RouterNotFound(router_id='1')) registry.subscribe(e404, resources.ROUTER, events.PRECOMMIT_UPDATE) with self.router(name='a') as r: self._update('routers', r['router']['id'], {'router': {'name': 'hi'}}, expected_code=exc.HTTPNotFound.code) # ensure it stopped the commit new = self._show('routers', r['router']['id']) self.assertEqual('a', new['router']['name']) def test_router_delete_precommit_event(self): deleted = [] auditor = lambda *a, **k: deleted.append(k['router_id']) registry.subscribe(auditor, resources.ROUTER, events.PRECOMMIT_DELETE) with self.router() as r: self._delete('routers', r['router']['id']) self.assertEqual([r['router']['id']], deleted) def test_router_delete_event_exception_preserved(self): # this exception should be propagated out of the callback and # converted into its API equivalent of 409 e409 = mock.Mock(side_effect=l3_exc.RouterInUse(router_id='1')) registry.subscribe(e409, resources.ROUTER, events.PRECOMMIT_DELETE) with self.router() as r: self._delete('routers', r['router']['id'], expected_code=exc.HTTPConflict.code) # ensure it stopped the commit self.assertTrue(self._show('routers', r['router']['id'])) class L3BaseForIntTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): if not plugin: plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin' # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = ext_mgr or L3TestExtensionManager() super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class L3BaseForSepTests(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): # the plugin without L3 support if not plugin: plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) if not ext_mgr: ext_mgr = L3TestExtensionManager() super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests, L3NatTestCaseMixin, test_agent. AgentDBTestMixIn): """Unit tests for core plugin with L3 routing and scheduling integrated.""" def setUp(self, plugin='neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin', ext_mgr=None, service_plugins=None): super(L3NatDBIntAgentSchedulingTestCase, self).setUp( plugin, ext_mgr, service_plugins) self.adminContext = context.get_admin_context() def _assert_router_on_agent(self, router_id, agent_host): plugin = directory.get_plugin(plugin_constants.L3) agents = plugin.list_l3_agents_hosting_router( self.adminContext, router_id)['agents'] self.assertEqual(1, len(agents)) self.assertEqual(agents[0]['host'], agent_host) def test_router_update_gateway_scheduling_not_supported(self): plugin = directory.get_plugin(plugin_constants.L3) mock.patch.object(plugin, 'router_supports_scheduling', return_value=False).start() with self.router() as r: with self.subnet() as s1: with self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) self._set_net_external(s2['subnet']['network_id']) # this should pass even though there are multiple # external networks since no scheduling decision needs # to be made self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) class L3RpcCallbackTestCase(base.BaseTestCase): def setUp(self): super(L3RpcCallbackTestCase, self).setUp() self.mock_plugin = mock.patch.object( l3_rpc.L3RpcCallback, 'plugin', new_callable=mock.PropertyMock).start() self.mock_l3plugin = mock.patch.object( l3_rpc.L3RpcCallback, 'l3plugin', new_callable=mock.PropertyMock).start() self.l3_rpc_cb = l3_rpc.L3RpcCallback() def test__ensure_host_set_on_port_host_id_none(self): port = {'id': 'id', portbindings.HOST_ID: 'somehost'} self.l3_rpc_cb._ensure_host_set_on_port(None, None, port) self.assertFalse(self.l3_rpc_cb.plugin.update_port.called) def test__ensure_host_set_on_port_bad_bindings(self): for b in (portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_UNBOUND): port = {'id': 'id', portbindings.HOST_ID: 'somehost', portbindings.VIF_TYPE: b} self.l3_rpc_cb._ensure_host_set_on_port(None, 'somehost', port) self.assertTrue(self.l3_rpc_cb.plugin.update_port.called) def test__ensure_host_set_on_port_update_on_concurrent_delete(self): port_id = 'foo_port_id' port = { 'id': port_id, 'device_owner': DEVICE_OWNER_COMPUTE, portbindings.HOST_ID: '', portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED } router_id = 'foo_router_id' self.l3_rpc_cb.plugin.update_port.side_effect = n_exc.PortNotFound( port_id=port_id) with mock.patch.object(l3_rpc.LOG, 'debug') as mock_log: self.l3_rpc_cb._ensure_host_set_on_port( mock.ANY, mock.ANY, port, router_id) self.l3_rpc_cb.plugin.update_port.assert_called_once_with( mock.ANY, port_id, {'port': {portbindings.HOST_ID: mock.ANY}}) self.assertTrue(mock_log.call_count) expected_message = ('Port foo_port_id not found while updating ' 'agent binding for router foo_router_id.') actual_message = mock_log.call_args[0][0] % mock_log.call_args[0][1] self.assertEqual(expected_message, actual_message) def test__ensure_host_set_on_ports_dvr_ha_router_with_gatway(self): context = mock.Mock() host = "fake_host" router_id = 'foo_router_id' router = {"id": router_id, "gw_port_host": host, "gw_port": {"id": "foo_port_id"}, "distributed": True, "ha": True} mock__ensure = mock.Mock() self.l3_rpc_cb._ensure_host_set_on_port = mock__ensure self.l3_rpc_cb._ensure_host_set_on_ports(context, host, [router]) mock__ensure.assert_called_once_with( context, host, router["gw_port"], router_id, ha_router_port=True) class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase): """Unit tests for methods called by the L3 agent for the case where core plugin implements L3 routing. """ def setUp(self): super(L3AgentDbIntTestCase, self).setUp() self.core_plugin = TestL3NatIntPlugin() self.plugin = self.core_plugin class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase): """Unit tests for methods called by the L3 agent for the case where separate service plugin implements L3 routing. """ def setUp(self): super(L3AgentDbSepTestCase, self).setUp() self.core_plugin = TestNoL3NatPlugin() self.plugin = TestL3NatServicePlugin() class TestL3DbOperationBounds(test_db_base_plugin_v2.DbOperationBoundMixin, L3NatTestCaseMixin, ml2_base.ML2TestFramework): def setUp(self): super(TestL3DbOperationBounds, self).setUp() ext_mgr = L3TestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.kwargs = self.get_api_kwargs() def test_router_list_queries_constant(self): with self.subnet(**self.kwargs) as s: self._set_net_external(s['subnet']['network_id']) def router_maker(): ext_info = {'network_id': s['subnet']['network_id']} res = self._create_router( self.fmt, arg_list=('external_gateway_info',), external_gateway_info=ext_info, **self.kwargs) return self.deserialize(self.fmt, res) self._assert_object_list_queries_constant(router_maker, 'routers') class TestL3DbOperationBoundsTenant(TestL3DbOperationBounds): admin = False class L3NatDBTestCaseMixin(object): """L3_NAT_dbonly_mixin specific test cases.""" def setUp(self): super(L3NatDBTestCaseMixin, self).setUp() plugin = directory.get_plugin(plugin_constants.L3) if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): self.skipTest("Plugin is not L3_NAT_dbonly_mixin") def test_create_router_gateway_fails(self): """Force _update_router_gw_info failure and see the exception is propagated. """ plugin = directory.get_plugin(plugin_constants.L3) ctx = context.Context('', 'foo') class MyException(Exception): pass mock.patch.object(plugin, '_update_router_gw_info', side_effect=MyException).start() with self.network() as n: data = {'router': { 'name': 'router1', 'admin_state_up': True, 'tenant_id': ctx.tenant_id, 'external_gateway_info': {'network_id': n['network']['id']}}} self.assertRaises(MyException, plugin.create_router, ctx, data) # Verify router doesn't persist on failure routers = plugin.get_routers(ctx) self.assertEqual(0, len(routers)) class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase, L3NatDBTestCaseMixin): """Unit tests for core plugin with L3 routing integrated.""" pass class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase, L3NatDBTestCaseMixin): """Unit tests for a separate L3 routing service plugin.""" def test_port_deletion_prevention_handles_missing_port(self): pl = directory.get_plugin(plugin_constants.L3) self.assertIsNone( pl.prevent_l3_port_deletion(context.get_admin_context(), 'fakeid') ) class L3TestExtensionManagerWithDNS(L3TestExtensionManager): def get_resources(self): return l3.L3.get_resources() class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin): """Unit tests for floating ip with external DNS integration""" fmt = 'json' DNS_NAME = 'test' DNS_DOMAIN = 'test-domain.org.' PUBLIC_CIDR = '11.0.0.0/24' PRIVATE_CIDR = '10.0.0.0/24' mock_client = mock.MagicMock() mock_admin_client = mock.MagicMock() MOCK_PATH = ('neutron.services.externaldns.drivers.' 'designate.driver.get_clients') mock_config = {'return_value': (mock_client, mock_admin_client)} _extension_drivers = ['dns'] def setUp(self): ext_mgr = L3TestExtensionManagerWithDNS() plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(L3NatDBFloatingIpTestCaseWithDNS, self).setUp(plugin=plugin, ext_mgr=ext_mgr) cfg.CONF.set_override('external_dns_driver', 'designate') self.mock_client.reset_mock() self.mock_admin_client.reset_mock() def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): new_arg_list = ('dns_domain',) if arg_list is not None: new_arg_list = arg_list + new_arg_list return super(L3NatDBFloatingIpTestCaseWithDNS, self)._create_network(fmt, name, admin_state_up, arg_list=new_arg_list, set_context=set_context, tenant_id=tenant_id, **kwargs) def _create_port(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, **kwargs): new_arg_list = ('dns_name',) if arg_list is not None: new_arg_list = arg_list + new_arg_list return super(L3NatDBFloatingIpTestCaseWithDNS, self)._create_port(fmt, name, admin_state_up, arg_list=new_arg_list, set_context=set_context, tenant_id=tenant_id, **kwargs) def _create_net_sub_port(self, dns_domain='', dns_name=''): with self.network(dns_domain=dns_domain) as n: with self.subnet(cidr=self.PRIVATE_CIDR, network=n) as private_sub: with self.port(private_sub, dns_name=dns_name) as p: return n, private_sub, p @contextlib.contextmanager def _create_floatingip_with_dns(self, net_dns_domain='', port_dns_name='', flip_dns_domain='', flip_dns_name='', assoc_port=False, private_sub=None): if private_sub is None: n, private_sub, p = self._create_net_sub_port( dns_domain=net_dns_domain, dns_name=port_dns_name) data = {'fmt': self.fmt} data['dns_domain'] = flip_dns_domain data['dns_name'] = flip_dns_name # Set ourselves up to call the right function with # the right arguments for the with block if assoc_port: data['tenant_id'] = n['network']['tenant_id'] data['port_id'] = p['port']['id'] create_floatingip = self.floatingip_with_assoc else: data['private_sub'] = private_sub create_floatingip = self.floatingip_no_assoc with create_floatingip(**data) as flip: yield flip['floatingip'] @contextlib.contextmanager def _create_floatingip_with_dns_on_update(self, net_dns_domain='', port_dns_name='', flip_dns_domain='', flip_dns_name=''): n, private_sub, p = self._create_net_sub_port( dns_domain=net_dns_domain, dns_name=port_dns_name) with self._create_floatingip_with_dns(flip_dns_domain=flip_dns_domain, flip_dns_name=flip_dns_name, private_sub=private_sub) as flip: flip_id = flip['id'] data = {'floatingip': {'port_id': p['port']['id']}} req = self.new_update_request('floatingips', data, flip_id) res = req.get_response(self._api_for_resource('floatingip')) self.assertEqual(200, res.status_code) floatingip = self.deserialize(self.fmt, res)['floatingip'] self.assertEqual(p['port']['id'], floatingip['port_id']) yield flip def _get_in_addr_zone_name(self, in_addr_name): units = self._get_bytes_or_nybles_to_skip(in_addr_name) return '.'.join(in_addr_name.split('.')[int(units):]) def _get_bytes_or_nybles_to_skip(self, in_addr_name): if 'in-addr.arpa' in in_addr_name: return (( 32 - cfg.CONF.designate.ipv4_ptr_zone_prefix_size) / 8) return (128 - cfg.CONF.designate.ipv6_ptr_zone_prefix_size) / 4 def _get_in_addr(self, record): in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name) return in_addr_name, in_addr_zone_name def _assert_recordset_created(self, floating_ip_address): # The recordsets.create function should be called with: # dns_domain, dns_name, 'A', ip_address ('A' for IPv4, 'AAAA' for IPv6) self.mock_client.recordsets.create.assert_called_with(self.DNS_DOMAIN, self.DNS_NAME, 'A', [floating_ip_address]) in_addr_name, in_addr_zone_name = self._get_in_addr( floating_ip_address) self.mock_admin_client.recordsets.create.assert_called_with( in_addr_zone_name, in_addr_name, 'PTR', ['%s.%s' % (self.DNS_NAME, self.DNS_DOMAIN)]) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_create(self, mock_args): with self._create_floatingip_with_dns(): pass self.mock_client.recordsets.create.assert_not_called() self.mock_admin_client.recordsets.create.assert_not_called() @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_create_with_flip_dns(self, mock_args): with self._create_floatingip_with_dns(flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME) as flip: floatingip = flip self._assert_recordset_created(floatingip['floating_ip_address']) self.assertEqual(self.DNS_DOMAIN, floatingip['dns_domain']) self.assertEqual(self.DNS_NAME, floatingip['dns_name']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_create_with_net_port_dns(self, mock_args): cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns(net_dns_domain=self.DNS_DOMAIN, port_dns_name=self.DNS_NAME, assoc_port=True) as flip: floatingip = flip self._assert_recordset_created(floatingip['floating_ip_address']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_create_with_flip_and_net_port_dns(self, mock_args): # If both network+port and the floating ip have dns domain and # dns name, floating ip's information should take priority cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns(net_dns_domain='junkdomain.org.', port_dns_name='junk', flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME, assoc_port=True) as flip: floatingip = flip # External DNS service should have been called with floating ip's # dns information, not the network+port's dns information self._assert_recordset_created(floatingip['floating_ip_address']) self.assertEqual(self.DNS_DOMAIN, floatingip['dns_domain']) self.assertEqual(self.DNS_NAME, floatingip['dns_name']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_associate_port(self, mock_args): with self._create_floatingip_with_dns_on_update(): pass self.mock_client.recordsets.create.assert_not_called() self.mock_admin_client.recordsets.create.assert_not_called() @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_associate_port_with_flip_dns(self, mock_args): with self._create_floatingip_with_dns_on_update( flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME) as flip: floatingip = flip self._assert_recordset_created(floatingip['floating_ip_address']) self.assertEqual(self.DNS_DOMAIN, floatingip['dns_domain']) self.assertEqual(self.DNS_NAME, floatingip['dns_name']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_associate_port_with_net_port_dns(self, mock_args): cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns_on_update( net_dns_domain=self.DNS_DOMAIN, port_dns_name=self.DNS_NAME) as flip: floatingip = flip self._assert_recordset_created(floatingip['floating_ip_address']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_associate_port_with_flip_and_net_port_dns(self, mock_args): # If both network+port and the floating ip have dns domain and # dns name, floating ip's information should take priority cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns_on_update( net_dns_domain='junkdomain.org.', port_dns_name='junk', flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME) as flip: floatingip = flip self._assert_recordset_created(floatingip['floating_ip_address']) self.assertEqual(self.DNS_DOMAIN, floatingip['dns_domain']) self.assertEqual(self.DNS_NAME, floatingip['dns_name']) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_disassociate_port(self, mock_args): cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns(net_dns_domain=self.DNS_DOMAIN, port_dns_name=self.DNS_NAME, assoc_port=True) as flip: fake_recordset = {'id': '', 'records': [flip['floating_ip_address']]} # This method is called during recordset deletion, which # will fail unless the list function call returns something like # this fake value self.mock_client.recordsets.list.return_value = ([fake_recordset]) # Port gets disassociated if port_id is not in the request body data = {'floatingip': {}} req = self.new_update_request('floatingips', data, flip['id']) res = req.get_response(self._api_for_resource('floatingip')) floatingip = self.deserialize(self.fmt, res)['floatingip'] flip_port_id = floatingip['port_id'] self.assertEqual(200, res.status_code) self.assertIsNone(flip_port_id) in_addr_name, in_addr_zone_name = self._get_in_addr( floatingip['floating_ip_address']) self.mock_client.recordsets.delete.assert_called_with( self.DNS_DOMAIN, '') self.mock_admin_client.recordsets.delete.assert_called_with( in_addr_zone_name, in_addr_name) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_delete(self, mock_args): cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) with self._create_floatingip_with_dns(flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME) as flip: floatingip = flip # This method is called during recordset deletion, which will # fail unless the list function call returns something like # this fake value fake_recordset = {'id': '', 'records': [floatingip['floating_ip_address']]} self.mock_client.recordsets.list.return_value = [fake_recordset] in_addr_name, in_addr_zone_name = self._get_in_addr( floatingip['floating_ip_address']) self.mock_client.recordsets.delete.assert_called_with( self.DNS_DOMAIN, '') self.mock_admin_client.recordsets.delete.assert_called_with( in_addr_zone_name, in_addr_name) @mock.patch(MOCK_PATH, **mock_config) def test_floatingip_no_PTR_record(self, mock_args): cfg.CONF.set_override('dns_domain', self.DNS_DOMAIN) # Disabling this option should stop the admin client from creating # PTR records. So set this option and make sure the admin client # wasn't called to create any records cfg.CONF.set_override('allow_reverse_dns_lookup', False, group='designate') with self._create_floatingip_with_dns(flip_dns_domain=self.DNS_DOMAIN, flip_dns_name=self.DNS_NAME) as flip: floatingip = flip self.mock_client.recordsets.create.assert_called_with(self.DNS_DOMAIN, self.DNS_NAME, 'A', [floatingip['floating_ip_address']]) self.mock_admin_client.recordsets.create.assert_not_called() self.assertEqual(self.DNS_DOMAIN, floatingip['dns_domain']) self.assertEqual(self.DNS_NAME, floatingip['dns_name']) class L3DBFloatingIpTestCaseLogging(L3BaseForSepTests, L3NatTestCaseMixin): def setUp(self, *args, **kwargs): ext_mgr = L3TestExtensionManagerWithDNS() plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' super(L3DBFloatingIpTestCaseLogging, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.mock_log = mock.patch.object(l3_db, 'LOG').start() def test_create_floatingip_event_logging_port_assoc(self): with self.floatingip_with_assoc() as fip: msg_vars = {'fip_id': fip['floatingip']['id'], 'ext_ip': fip['floatingip']['floating_ip_address'], 'port_id': fip['floatingip']['port_id'], 'assoc': 'associated'} self.mock_log.info.assert_called_once_with(l3_db.FIP_ASSOC_MSG, msg_vars) def test_update_floatingip_event_logging(self): with self.port() as port: private_subnet = {'subnet': { 'id': port['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_subnet) as fip: self.mock_log.info.assert_not_called() fip_id = fip['floatingip']['id'] data = {'floatingip': {'port_id': port['port']['id']}} req = self.new_update_request('floatingips', data, fip_id) res = req.get_response(self._api_for_resource('floatingip')) self.assertEqual(200, res.status_code) msg_vars = {'fip_id': fip['floatingip']['id'], 'ext_ip': fip['floatingip']['floating_ip_address'], 'port_id': port['port']['id'], 'assoc': 'associated'} self.mock_log.info.assert_called_once_with(l3_db.FIP_ASSOC_MSG, msg_vars) def test_update_floatingip_event_logging_disassociate(self): with self.floatingip_with_assoc() as fip: self.mock_log.reset_mock() fip_id = fip['floatingip']['id'] data = {'floatingip': {'port_id': None}} req = self.new_update_request('floatingips', data, fip_id) res = req.get_response(self._api_for_resource('floatingip')) self.assertEqual(200, res.status_code) msg_vars = {'fip_id': fip['floatingip']['id'], 'ext_ip': fip['floatingip']['floating_ip_address'], 'port_id': fip['floatingip']['port_id'], 'assoc': 'disassociated'} self.mock_log.info.assert_called_once_with(l3_db.FIP_ASSOC_MSG, msg_vars) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py0000644000175000017500000005551100000000000027474 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock import netaddr from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_gw_mode from neutron_lib import constants from neutron_lib import context as nctx from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib.utils import net as net_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_serialization import jsonutils from oslo_utils import uuidutils import testscenarios from webob import exc from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_models from neutron.extensions import l3 from neutron.objects import network as net_obj from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj from neutron.objects import subnet as subnet_obj from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api _uuid = uuidutils.generate_uuid FAKE_GW_PORT_ID = _uuid() FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff' FAKE_FIP_EXT_PORT_ID = _uuid() FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66' FAKE_FIP_INT_PORT_ID = _uuid() FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa' FAKE_ROUTER_PORT_ID = _uuid() FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb' class TestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] # A simple class for making a concrete class out of the mixin # for the case of a plugin that integrates l3 routing. class TestDbIntPlugin(test_l3.TestL3NatIntPlugin, l3_gwmode_db.L3_NAT_db_mixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS] # A simple class for making a concrete class out of the mixin # for the case of a l3 router service plugin class TestDbSepPlugin(test_l3.TestL3NatServicePlugin, l3_gwmode_db.L3_NAT_db_mixin): supported_extension_aliases = [l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS] class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase): scenarios = [ ('enabled', {'enable_snat_by_default': True}), ('disabled', {'enable_snat_by_default': False})] def setUp(self): super(TestGetEnableSnat, self).setUp() self.config(enable_snat_by_default=self.enable_snat_by_default) def _test_get_enable_snat(self, expected, info): observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info) self.assertEqual(expected, observed) def test_get_enable_snat_without_gw_info(self): self._test_get_enable_snat(self.enable_snat_by_default, {}) def test_get_enable_snat_without_enable_snat(self): info = {'network_id': _uuid()} self._test_get_enable_snat(self.enable_snat_by_default, info) def test_get_enable_snat_with_snat_enabled(self): self._test_get_enable_snat(True, {'enable_snat': True}) def test_get_enable_snat_with_snat_disabled(self): self._test_get_enable_snat(False, {'enable_snat': False}) class TestL3GwModeMixin(testlib_api.SqlTestCase): def setUp(self): super(TestL3GwModeMixin, self).setUp() plugin = __name__ + '.' + TestDbIntPlugin.__name__ self.setup_coreplugin(plugin) self.target_object = TestDbIntPlugin() # Patch the context ctx_patcher = mock.patch('neutron_lib.context', autospec=True) mock_context = ctx_patcher.start() self.context = mock_context.get_admin_context() # This ensure also calls to elevated work in unit tests self.context.elevated.return_value = self.context self.context.session = db_api.get_writer_session() # Create sample data for tests self.ext_net_id = _uuid() self.int_net_id = _uuid() self.int_sub_id = _uuid() self.tenant_id = 'the_tenant' self.network = net_obj.Network( self.context, id=self.ext_net_id, project_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.net_ext = net_obj.ExternalNetwork( self.context, network_id=self.ext_net_id) self.network.create() self.net_ext.create() self.router = l3_models.Router( id=_uuid(), name=None, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE, enable_snat=True, gw_port_id=None) self.context.session.add(self.router) self.context.session.flush() self.router_gw_port = port_obj.Port( self.context, id=FAKE_GW_PORT_ID, project_id=self.tenant_id, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_GW_PORT_MAC), network_id=self.ext_net_id) self.router_gw_port.create() self.router.gw_port_id = self.router_gw_port.id self.context.session.add(self.router) self.context.session.flush() self.fip_ext_port = port_obj.Port( self.context, id=FAKE_FIP_EXT_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_FLOATINGIP, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_FIP_EXT_PORT_MAC), network_id=self.ext_net_id) self.fip_ext_port.create() self.context.session.flush() self.int_net = net_obj.Network( self.context, id=self.int_net_id, project_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.int_sub = subnet_obj.Subnet(self.context, id=self.int_sub_id, project_id=self.tenant_id, ip_version=constants.IP_VERSION_4, cidr=net_utils.AuthenticIPNetwork('3.3.3.0/24'), gateway_ip=netaddr.IPAddress('3.3.3.1'), network_id=self.int_net_id) self.router_port = port_obj.Port( self.context, id=FAKE_ROUTER_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_ROUTER_PORT_MAC), network_id=self.int_net_id) self.router_port_ip_info = port_obj.IPAllocation(self.context, port_id=self.router_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.1') self.int_net.create() self.int_sub.create() self.router_port.create() self.router_port_ip_info.create() self.context.session.flush() self.fip_int_port = port_obj.Port( self.context, id=FAKE_FIP_INT_PORT_ID, project_id=self.tenant_id, admin_state_up=True, device_id='something', device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova', status=constants.PORT_STATUS_ACTIVE, mac_address=netaddr.EUI(FAKE_FIP_INT_PORT_MAC), network_id=self.int_net_id) self.fip_int_ip_info = port_obj.IPAllocation(self.context, port_id=self.fip_int_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.3') self.fip = l3_obj.FloatingIP( self.context, id=_uuid(), floating_ip_address=netaddr.IPAddress('1.1.1.2'), floating_network_id=self.ext_net_id, floating_port_id=FAKE_FIP_EXT_PORT_ID, fixed_port_id=None, fixed_ip_address=None, router_id=None) self.fip_int_port.create() self.fip_int_ip_info.create() self.fip.create() self.context.session.flush() self.context.session.expire_all() self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID, 'tenant_id': self.tenant_id} def _get_gwports_dict(self, gw_ports): return dict((gw_port['id'], gw_port) for gw_port in gw_ports) def _reset_ext_gw(self): # Reset external gateway self.router.gw_port_id = None self.context.session.add(self.router) self.context.session.flush() def _test_update_router_gw(self, current_enable_snat, gw_info=None, expected_enable_snat=True): if not current_enable_snat: previous_gw_info = {'network_id': self.ext_net_id, 'enable_snat': current_enable_snat} self.target_object._update_router_gw_info( self.context, self.router.id, previous_gw_info) self.target_object._update_router_gw_info( self.context, self.router.id, gw_info) router = self.target_object._get_router( self.context, self.router.id) try: self.assertEqual(FAKE_GW_PORT_ID, router.gw_port.id) self.assertEqual(netaddr.EUI(FAKE_GW_PORT_MAC), router.gw_port.mac_address) except AttributeError: self.assertIsNone(router.gw_port) self.assertEqual(expected_enable_snat, router.enable_snat) def test_update_router_gw_with_gw_info_none(self): self._test_update_router_gw(current_enable_snat=True) def test_update_router_gw_without_info_and_snat_disabled_previously(self): self._test_update_router_gw(current_enable_snat=False) def test_update_router_gw_with_network_only(self): info = {'network_id': self.ext_net_id} self._test_update_router_gw(current_enable_snat=True, gw_info=info) def test_update_router_gw_with_network_and_snat_disabled_previously(self): info = {'network_id': self.ext_net_id} self._test_update_router_gw(current_enable_snat=False, gw_info=info) def test_update_router_gw_with_snat_disabled(self): info = {'network_id': self.ext_net_id, 'enable_snat': False} self._test_update_router_gw( current_enable_snat=True, gw_info=info, expected_enable_snat=False) def test_update_router_gw_with_snat_enabled(self): info = {'network_id': self.ext_net_id, 'enable_snat': True} self._test_update_router_gw(current_enable_snat=False, gw_info=info) def test_make_router_dict_no_ext_gw(self): self._reset_ext_gw() router_dict = self.target_object._make_router_dict(self.router) self.assertIsNone(router_dict[l3_apidef.EXTERNAL_GW_INFO]) def test_make_router_dict_with_ext_gw(self): router_dict = self.target_object._make_router_dict(self.router) self.assertEqual({'network_id': self.ext_net_id, 'enable_snat': True, 'external_fixed_ips': []}, router_dict[l3_apidef.EXTERNAL_GW_INFO]) def test_make_router_dict_with_ext_gw_snat_disabled(self): self.router.enable_snat = False router_dict = self.target_object._make_router_dict(self.router) self.assertEqual({'network_id': self.ext_net_id, 'enable_snat': False, 'external_fixed_ips': []}, router_dict[l3_apidef.EXTERNAL_GW_INFO]) def test_build_routers_list_no_ext_gw(self): self._reset_ext_gw() router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list(self.context, [router_dict], []) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNone(router.get('gw_port')) self.assertIsNone(router.get('enable_snat')) def test_build_routers_list_with_ext_gw(self): router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], self._get_gwports_dict([self.router.gw_port])) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('gw_port')) self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) self.assertTrue(router.get('enable_snat')) def test_build_routers_list_with_ext_gw_snat_disabled(self): self.router.enable_snat = False router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], self._get_gwports_dict([self.router.gw_port])) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNotNone(router.get('gw_port')) self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id']) self.assertFalse(router.get('enable_snat')) def test_build_routers_list_with_gw_port_mismatch(self): router_dict = self.target_object._make_router_dict(self.router) routers = self.target_object._build_routers_list( self.context, [router_dict], {}) self.assertEqual(1, len(routers)) router = routers[0] self.assertIsNone(router.get('gw_port')) self.assertIsNone(router.get('enable_snat')) class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin): def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None): plugin = plugin or ( 'neutron.tests.unit.extensions.test_l3_ext_gw_mode.' 'TestDbIntPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = ext_mgr or TestExtensionManager() super(ExtGwModeIntTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=svc_plugins) def _set_router_external_gateway(self, router_id, network_id, snat_enabled=None, expected_code=exc.HTTPOk.code, neutron_context=None): ext_gw_info = {'network_id': network_id} # Need to set enable_snat also if snat_enabled == False if snat_enabled is not None: ext_gw_info['enable_snat'] = snat_enabled return self._update('routers', router_id, {'router': {'external_gateway_info': ext_gw_info}}, expected_code=expected_code, neutron_context=neutron_context) def test_router_gateway_set_fail_after_port_create(self): with self.router() as r, self.subnet() as s: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) plugin = directory.get_plugin() with mock.patch.object(plugin, '_get_port', side_effect=ValueError()): self._set_router_external_gateway(r['router']['id'], ext_net_id, expected_code=500) ports = [p for p in plugin.get_ports(nctx.get_admin_context()) if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW] self.assertFalse(ports) def test_router_gateway_set_retry(self): with self.router() as r, self.subnet() as s: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) with mock.patch.object( l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info', side_effect=[db_exc.RetryRequest(None), ext_net_id]): self._set_router_external_gateway(r['router']['id'], ext_net_id) res = self._show('routers', r['router']['id'])['router'] self.assertEqual(ext_net_id, res['external_gateway_info']['network_id']) def test_router_create_with_gwinfo_invalid_ext_ip(self): with self.subnet() as s: self._set_net_external(s['subnet']['network_id']) ext_info = { 'network_id': s['subnet']['network_id'], 'external_fixed_ips': [{'ip_address': '10.0.0.'}] } error_code = exc.HTTPBadRequest.code res = self._create_router( self.fmt, _uuid(), arg_list=('external_gateway_info',), external_gateway_info=ext_info, expected_code=error_code ) msg = ("Invalid input for external_gateway_info. " "Reason: '10.0.0.' is not a valid IP address.") body = jsonutils.loads(res.body) self.assertEqual(msg, body['NeutronError']['message']) def test_router_create_show_no_ext_gwinfo(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name=name, admin_state_up=True, tenant_id=tenant_id) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def _test_router_create_show_ext_gwinfo(self, snat_input_value, snat_expected_value): name = 'router1' tenant_id = _uuid() with self.subnet() as s: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) input_value = {'network_id': ext_net_id} if snat_input_value in (True, False): input_value['enable_snat'] = snat_input_value expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', {'network_id': ext_net_id, 'enable_snat': snat_expected_value, 'external_fixed_ips': [{ 'ip_address': mock.ANY, 'subnet_id': s['subnet']['id']}]})] with self.router(name=name, admin_state_up=True, tenant_id=tenant_id, external_gateway_info=input_value) as router: res = self._show('routers', router['router']['id']) for k, v in expected_value: self.assertEqual(res['router'][k], v) def test_router_create_show_ext_gwinfo_default(self): self._test_router_create_show_ext_gwinfo(None, True) def test_router_create_show_ext_gwinfo_with_snat_enabled(self): self._test_router_create_show_ext_gwinfo(True, True) def test_router_create_show_ext_gwinfo_with_snat_disabled(self): self._test_router_create_show_ext_gwinfo(False, False) def _test_router_update_ext_gwinfo(self, snat_input_value, snat_expected_value=False, expected_http_code=exc.HTTPOk.code): with self.router() as r: with self.subnet() as s: try: ext_net_id = s['subnet']['network_id'] self._set_net_external(ext_net_id) self._set_router_external_gateway( r['router']['id'], ext_net_id, snat_enabled=snat_input_value, expected_code=expected_http_code) if expected_http_code != exc.HTTPOk.code: return body = self._show('routers', r['router']['id']) res_gw_info = body['router']['external_gateway_info'] self.assertEqual(ext_net_id, res_gw_info['network_id']) self.assertEqual(snat_expected_value, res_gw_info['enable_snat']) finally: self._remove_external_gateway_from_router( r['router']['id'], ext_net_id) def test_router_update_ext_gwinfo_default(self): self._test_router_update_ext_gwinfo(None, True) def test_router_update_ext_gwinfo_with_snat_enabled(self): self._test_router_update_ext_gwinfo(True, True) def test_router_update_ext_gwinfo_with_snat_disabled(self): self._test_router_update_ext_gwinfo(False, False) def test_router_update_ext_gwinfo_with_invalid_snat_setting(self): self._test_router_update_ext_gwinfo( 'xxx', None, expected_http_code=exc.HTTPBadRequest.code) class ExtGwModeSepTestCase(ExtGwModeIntTestCase): def setUp(self, plugin=None): # Store l3 resource attribute map as it will be updated self._l3_attribute_map_bk = {} for item in l3_apidef.RESOURCE_ATTRIBUTE_MAP: self._l3_attribute_map_bk[item] = ( l3_apidef.RESOURCE_ATTRIBUTE_MAP[item].copy()) plugin = plugin or ( 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin') # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.' 'TestDbSepPlugin') svc_plugins = {'l3_plugin_name': l3_plugin} # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) super(ExtGwModeSepTestCase, self).setUp(plugin=plugin, svc_plugins=svc_plugins) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_network_ip_availability.py0000644000175000017500000005203700000000000031510 0ustar00coreycorey00000000000000# Copyright 2016 GoDaddy. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from neutron_lib import constants import neutron.api.extensions as api_ext import neutron.common.config as config import neutron.extensions import neutron.services.network_ip_availability.plugin as plugin_module import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2 API_RESOURCE = 'network-ip-availabilities' IP_AVAIL_KEY = 'network_ip_availability' IP_AVAILS_KEY = 'network_ip_availabilities' EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__) PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__, plugin_module.NetworkIPAvailabilityPlugin.__name__) class TestNetworkIPAvailabilityAPI( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): svc_plugins = {'plugin_name': PLUGIN_NAME} super(TestNetworkIPAvailabilityAPI, self).setUp( service_plugins=svc_plugins) self.plugin = plugin_module.NetworkIPAvailabilityPlugin() ext_mgr = api_ext.PluginAwareExtensionManager( EXTENSIONS_PATH, {"network-ip-availability": self.plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) def _validate_availability(self, network, availability, expected_used_ips, expected_total_ips=253): self.assertEqual(network['name'], availability['network_name']) self.assertEqual(network['id'], availability['network_id']) self.assertEqual(expected_used_ips, availability['used_ips']) self.assertEqual(expected_total_ips, availability['total_ips']) def _validate_from_availabilities(self, availabilities, wrapped_network, expected_used_ips, expected_total_ips=253): network = wrapped_network['network'] availability = self._find_availability(availabilities, network['id']) self.assertIsNotNone(availability) self._validate_availability(network, availability, expected_used_ips=expected_used_ips, expected_total_ips=expected_total_ips) def test_usages_query_list_with_fields_total_ips(self): with self.network() as net: with self.subnet(network=net): # list by query fields: total_ips params = 'fields=total_ips' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) availability = response[IP_AVAILS_KEY][0] self.assertIn('total_ips', availability) self.assertEqual(253, availability['total_ips']) self.assertNotIn('network_id', availability) def test_usages_query_show_with_fields_total_ips(self): with self.network() as net: with self.subnet(network=net): network = net['network'] # Show by query fields: total_ips params = ['total_ips'] request = self.new_show_request(API_RESOURCE, network['id'], fields=params) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) availability = response[IP_AVAIL_KEY] self.assertIn('total_ips', availability) self.assertEqual(253, availability['total_ips']) self.assertNotIn('network_id', availability) @staticmethod def _find_availability(availabilities, net_id): for ip_availability in availabilities: if net_id == ip_availability['network_id']: return ip_availability def test_basic(self): with self.network() as net: with self.subnet(network=net): network = net['network'] # Get ALL request = self.new_list_request(API_RESOURCE, self.fmt) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get single via id request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) usage = response[IP_AVAIL_KEY] self._validate_availability(network, usage, 0) def test_usages_multi_nets_subnets(self): with self.network(name='net1') as n1,\ self.network(name='net2') as n2,\ self.network(name='net3') as n3: # n1 should have 2 subnets, n2 should have none, n3 has 1 with self.subnet(network=n1) as subnet1_1, \ self.subnet(cidr='40.0.0.0/24', network=n3) as subnet3_1: # Consume 3 ports n1, none n2, 2 ports on n3 with self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_1),\ self.port(subnet=subnet3_1),\ self.port(subnet=subnet3_1): # Test get ALL request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(3, len(response[IP_AVAILS_KEY])) data = response[IP_AVAILS_KEY] self._validate_from_availabilities(data, n1, 3, 253) self._validate_from_availabilities(data, n2, 0, 0) self._validate_from_availabilities(data, n3, 2, 253) # Test get single via network id network = n1['network'] request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) self._validate_availability(network, response[IP_AVAIL_KEY], 3, 253) def test_usages_multi_nets_subnets_sums(self): with self.network(name='net1') as n1: # n1 has 2 subnets with self.subnet(network=n1) as subnet1_1, \ self.subnet(cidr='40.0.0.0/24', network=n1) as subnet1_2: # Consume 3 ports n1: 1 on subnet 1 and 2 on subnet 2 with self.port(subnet=subnet1_1),\ self.port(subnet=subnet1_2),\ self.port(subnet=subnet1_2): # Get ALL request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], n1, 3, 506) # Get single via network id network = n1['network'] request = self.new_show_request(API_RESOURCE, network['id']) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAIL_KEY, response) self._validate_availability(network, response[IP_AVAIL_KEY], 3, 506) def test_usages_port_consumed_v4(self): with self.network() as net: with self.subnet(network=net) as subnet: request = self.new_list_request(API_RESOURCE) # Consume 2 ports with self.port(subnet=subnet), self.port(subnet=subnet): response = self.deserialize(self.fmt, request.get_response( self.ext_api)) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 2) def test_usages_query_ip_version_v4(self): with self.network() as net: with self.subnet(network=net): # Get IPv4 params = 'ip_version=%s' % constants.IP_VERSION_4 request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get IPv6 should return empty array params = 'ip_version=%s' % constants.IP_VERSION_6 request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_ip_version_v6(self): cidr_ipv6 = '2001:db8:1002:51::/64' cidr_ipv6_net = netaddr.IPNetwork(cidr_ipv6) with self.network() as net: with self.subnet( network=net, cidr=cidr_ipv6, ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.DHCPV6_STATELESS): # Get IPv6 params = 'ip_version=%s' % constants.IP_VERSION_6 request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities( response[IP_AVAILS_KEY], net, 0, cidr_ipv6_net.size - 1) # Get IPv4 should return empty array params = 'ip_version=%s' % constants.IP_VERSION_4 request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_ports_consumed_v6(self): cidr_ipv6 = '2001:db8:1002:51::/64' cidr_ipv6_net = netaddr.IPNetwork(cidr_ipv6) with self.network() as net: with self.subnet( network=net, cidr=cidr_ipv6, ip_version=constants.IP_VERSION_6, ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet: request = self.new_list_request(API_RESOURCE) # Consume 3 ports with self.port(subnet=subnet),\ self.port(subnet=subnet), \ self.port(subnet=subnet): response = self.deserialize( self.fmt, request.get_response(self.ext_api)) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 3, cidr_ipv6_net.size - 1) def test_usages_query_network_id(self): with self.network() as net: with self.subnet(network=net): network = net['network'] test_id = network['id'] # Get by query param: network_id params = 'network_id=%s' % test_id request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get by NON-matching query param: network_id params = 'network_id=clearlywontmatch' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_network_name(self): test_name = 'net_name_1' with self.network(name=test_name) as net: with self.subnet(network=net): # Get by query param: network_name params = 'network_name=%s' % test_name request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) # Get by NON-matching query param: network_name params = 'network_name=clearly-wont-match' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_tenant_id(self): test_tenant_id = 'a-unique-test-id' with self.network(tenant_id=test_tenant_id) as net: with self.subnet(network=net): # Get by query param: tenant_id params = 'tenant_id=%s' % test_tenant_id request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) for net_avail in response[IP_AVAILS_KEY]: self.assertEqual(test_tenant_id, net_avail['tenant_id']) # Get by NON-matching query param: tenant_id params = 'tenant_id=clearly-wont-match' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_query_project_id(self): test_project_id = 'a-unique-project-id' with self.network(tenant_id=test_project_id) as net: with self.subnet(network=net): # Get by query param: project_id params = 'project_id=%s' % test_project_id request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertIn(IP_AVAILS_KEY, response) self.assertEqual(1, len(response[IP_AVAILS_KEY])) self._validate_from_availabilities(response[IP_AVAILS_KEY], net, 0) for net_avail in response[IP_AVAILS_KEY]: self.assertEqual(test_project_id, net_avail['project_id']) # Get by NON-matching query param: project_id params = 'project_id=clearly-wont-match' request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize(self.fmt, request.get_response(self.ext_api)) self.assertEqual(0, len(response[IP_AVAILS_KEY])) def test_usages_multi_net_multi_subnet_46(self): # Setup mixed v4/v6 networks with IPs consumed on each cidr_ipv6 = '2001:db8:1003:52::/64' cidr_ipv6_net = netaddr.IPNetwork(cidr_ipv6) with self.network(name='net-v6-1') as net_v6_1, \ self.network(name='net-v6-2') as net_v6_2, \ self.network(name='net-v4-1') as net_v4_1, \ self.network(name='net-v4-2') as net_v4_2: with self.subnet(network=net_v6_1, cidr='2001:db8:1002:51::/64', ip_version=constants.IP_VERSION_6) as s61, \ self.subnet(network=net_v6_2, cidr=cidr_ipv6, ip_version=constants.IP_VERSION_6) as s62, \ self.subnet(network=net_v4_1, cidr='10.0.0.0/24') as s41, \ self.subnet(network=net_v4_2, cidr='10.0.1.0/24') as s42: with self.port(subnet=s61),\ self.port(subnet=s62), self.port(subnet=s62), \ self.port(subnet=s41), \ self.port(subnet=s42), self.port(subnet=s42): # Verify consumption across all request = self.new_list_request(API_RESOURCE) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) avails_list = response[IP_AVAILS_KEY] self._validate_from_availabilities( avails_list, net_v6_1, 1, cidr_ipv6_net.size - 1) self._validate_from_availabilities( avails_list, net_v6_2, 2, cidr_ipv6_net.size - 1) self._validate_from_availabilities( avails_list, net_v4_1, 1, 253) self._validate_from_availabilities( avails_list, net_v4_2, 2, 253) # Query by IP versions. Ensure subnet versions match for ip_ver in [constants.IP_VERSION_4, constants.IP_VERSION_6]: params = 'ip_version=%i' % ip_ver request = self.new_list_request(API_RESOURCE, params=params) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) for net_avail in response[IP_AVAILS_KEY]: for sub in net_avail['subnet_ip_availability']: self.assertEqual(ip_ver, sub['ip_version']) # Verify consumption querying 2 network ids (IN clause) request = self.new_list_request( API_RESOURCE, params='network_id=%s&network_id=%s' % (net_v4_2['network']['id'], net_v6_2['network']['id'])) response = self.deserialize( self.fmt, request.get_response(self.ext_api)) avails_list = response[IP_AVAILS_KEY] self._validate_from_availabilities( avails_list, net_v6_2, 2, cidr_ipv6_net.size - 1) self._validate_from_availabilities( avails_list, net_v4_2, 2, 253) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_network_segment_range.py0000644000175000017500000004017200000000000031161 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import context from oslo_config import cfg import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import segments_db from neutron.extensions import network_segment_range as ext_range from neutron.services.network_segment_range import plugin as plugin_range from neutron.tests.unit.db import test_db_base_plugin_v2 SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.' 'NetworkSegmentRangePlugin') TEST_PLUGIN_KLASS = ( 'neutron.tests.unit.extensions.test_network_segment_range.' 'NetworkSegmentRangeTestPlugin') TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' class NetworkSegmentRangeExtensionManager(object): def get_resources(self): return ext_range.Network_segment_range.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class NetworkSegmentRangeTestBase(test_db_base_plugin_v2. NeutronDbPluginV2TestCase): def _create_network_segment_range(self, fmt, expected_res_status=None, **kwargs): network_segment_range = {'network_segment_range': {}} for k, v in kwargs.items(): network_segment_range['network_segment_range'][k] = str(v) network_segment_range_req = self.new_create_request( 'network-segment-ranges', network_segment_range, fmt) network_segment_range_res = network_segment_range_req.get_response( self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, network_segment_range_res.status_int) return network_segment_range_res def network_segment_range(self, **kwargs): res = self._create_network_segment_range(self.fmt, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) def _test_create_network_segment_range(self, expected=None, **kwargs): network_segment_range = self.network_segment_range(**kwargs) self._validate_resource(network_segment_range, kwargs, 'network_segment_range') if expected: self._compare_resource(network_segment_range, expected, 'network_segment_range') return network_segment_range def _test_update_network_segment_range(self, range_id, data, expected=None): update_req = self.new_update_request( 'network-segment-ranges', data, range_id) update_res = update_req.get_response(self.ext_api) if expected: network_segment_range = self.deserialize(self.fmt, update_res) self._compare_resource(network_segment_range, expected, 'network_segment_range') return network_segment_range return update_res class NetworkSegmentRangeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, plugin_range.NetworkSegmentRangePlugin): """Test plugin to mixin the network segment range extension.""" __native_pagination_support = True __native_sorting_support = True __filter_validation_support = True supported_extension_aliases = ["provider", "network-segment-range"] def __init__(self): super(NetworkSegmentRangeTestPlugin, self).__init__() self.type_manager = mock.Mock() class TestNetworkSegmentRange(NetworkSegmentRangeTestBase): def setUp(self, plugin=None): if not plugin: plugin = TEST_PLUGIN_KLASS service_plugins = {'network_segment_range_plugin_name': SERVICE_PLUGIN_KLASS} cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) ext_mgr = NetworkSegmentRangeExtensionManager() super(TestNetworkSegmentRange, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def _test_create_network_segment_range(self, expected=None, **kwargs): for d in (kwargs, expected): if d is None: continue d.setdefault('name', '') d.setdefault('shared', True) d.setdefault('project_id', None) d.setdefault('network_type', constants.TYPE_VLAN) d.setdefault('physical_network', 'phys_net') d.setdefault('minimum', 200) d.setdefault('maximum', 300) return (super(TestNetworkSegmentRange, self). _test_create_network_segment_range(expected, **kwargs)) def test_create_network_segment_range_empty_name(self): expected_range = {'name': '', 'shared': True, 'project_id': None, 'network_type': constants.TYPE_VLAN, 'physical_network': 'phys_net', 'minimum': 200, 'maximum': 300} self._test_create_network_segment_range(expected=expected_range) def test_create_network_segment_range_with_name(self): expected_range = {'name': 'foo-range-name', 'shared': True, 'project_id': None, 'network_type': constants.TYPE_VLAN, 'physical_network': 'phys_net', 'minimum': 200, 'maximum': 300} self._test_create_network_segment_range( name='foo-range-name', expected=expected_range) def test_create_network_segment_range_unsupported_network_type(self): exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_network_segment_range, network_type='foo-network-type') self.assertEqual(webob.exc.HTTPClientError.code, exc.code) self.assertIn('The server could not comply with the request', exc.explanation) def test_create_network_segment_range_no_physical_network(self): expected_range = {'shared': True, 'project_id': None, 'network_type': constants.TYPE_VXLAN, 'physical_network': None} self._test_create_network_segment_range( network_type=constants.TYPE_VXLAN, physical_network=None, expected=expected_range) def test_create_network_segment_range_tenant_specific(self): expected_range = {'shared': False, 'project_id': test_db_base_plugin_v2.TEST_TENANT_ID, 'network_type': constants.TYPE_VLAN, 'physical_network': 'phys_net', 'minimum': 200, 'maximum': 300} self._test_create_network_segment_range( shared=False, project_id=test_db_base_plugin_v2.TEST_TENANT_ID, network_type=constants.TYPE_VLAN, physical_network='phys_net', expected=expected_range) def test_create_network_segment_ranges_in_certain_order(self): ctx = context.get_admin_context() range1 = self._test_create_network_segment_range( name='foo-range1', physical_network='phys_net1') range2 = self._test_create_network_segment_range( name='foo-range2', physical_network='phys_net2') range3 = self._test_create_network_segment_range( name='foo-range3', physical_network='phys_net3') network_segment_ranges = ( NetworkSegmentRangeTestPlugin.get_network_segment_ranges( NetworkSegmentRangeTestPlugin(), ctx)) self.assertEqual(range1['network_segment_range']['id'], network_segment_ranges[0]['id']) self.assertEqual(range2['network_segment_range']['id'], network_segment_ranges[1]['id']) self.assertEqual(range3['network_segment_range']['id'], network_segment_ranges[2]['id']) def test_create_network_segment_range_failed_with_vlan_minimum_id(self): exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_network_segment_range, minimum=0) self.assertEqual(webob.exc.HTTPClientError.code, exc.code) self.assertIn('The server could not comply with the request', exc.explanation) def test_create_network_segment_range_failed_with_vlan_maximum_id(self): exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_network_segment_range, minimum=4095) self.assertEqual(webob.exc.HTTPServerError.code, exc.code) self.assertIn('The server could not comply with the request', exc.explanation) def test_create_network_segment_range_failed_with_tunnel_minimum_id(self): tunnel_type = [constants.TYPE_VXLAN, constants.TYPE_GRE, constants.TYPE_GENEVE] for network_type in tunnel_type: exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_network_segment_range, network_type=network_type, physical_network=None, minimum=0) self.assertEqual(webob.exc.HTTPClientError.code, exc.code) self.assertIn('The server could not comply with the request', exc.explanation) def test_create_network_segment_range_failed_with_tunnel_maximum_id(self): expected_res = [(constants.TYPE_VXLAN, 2 ** 24), (constants.TYPE_GRE, 2 ** 32), (constants.TYPE_GENEVE, 2 ** 24)] for network_type, max_id in expected_res: exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_network_segment_range, network_type=network_type, physical_network=None, maximum=max_id) if network_type == constants.TYPE_GRE: self.assertEqual(webob.exc.HTTPClientError.code, exc.code) else: self.assertEqual(webob.exc.HTTPServerError.code, exc.code) self.assertIn('The server could not comply with the request', exc.explanation) def test_update_network_segment_range_set_name(self): network_segment_range = self._test_create_network_segment_range() with mock.patch.object(segments_db, 'min_max_actual_segments_in_range', return_value=(None, None)): result = self._update( 'network-segment-ranges', network_segment_range['network_segment_range']['id'], {'network_segment_range': {'name': 'foo-name'}}, expected_code=webob.exc.HTTPOk.code) self.assertEqual('foo-name', result['network_segment_range']['name']) def test_update_network_segment_range_set_name_to_empty(self): network_segment_range = self._test_create_network_segment_range( name='foo-range-name') with mock.patch.object(segments_db, 'min_max_actual_segments_in_range', return_value=(None, None)): result = self._update( 'network-segment-ranges', network_segment_range['network_segment_range']['id'], {'network_segment_range': {'name': ''}}, expected_code=webob.exc.HTTPOk.code) self.assertEqual('', result['network_segment_range']['name']) def test_update_network_segment_range_min_max(self): network_segment_range = self._test_create_network_segment_range() with mock.patch.object(segments_db, 'min_max_actual_segments_in_range', return_value=(None, None)): result = self._update( 'network-segment-ranges', network_segment_range['network_segment_range']['id'], {'network_segment_range': {'minimum': 1200, 'maximum': 1300}}, expected_code=webob.exc.HTTPOk.code) self.assertEqual(1200, result['network_segment_range']['minimum']) self.assertEqual(1300, result['network_segment_range']['maximum']) def test_get_network_segment_range(self): network_segment_range = self._test_create_network_segment_range() req = self.new_show_request( 'network-segment-ranges', network_segment_range['network_segment_range']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual( network_segment_range['network_segment_range']['id'], res['network_segment_range']['id']) def test_list_network_segment_ranges(self): self._test_create_network_segment_range(name='foo-range1') self._test_create_network_segment_range( name='foo-range2', minimum=400, maximum=500) res = self._list('network-segment-ranges') self.assertEqual(2, len(res['network_segment_ranges'])) def test_list_network_segment_ranges_with_sort(self): range1 = self._test_create_network_segment_range( name='foo-range1', physical_network='phys_net1') range2 = self._test_create_network_segment_range( name='foo-range2', physical_network='phys_net2') self._test_list_with_sort('network-segment-range', (range2, range1), [('name', 'desc')]) def test_list_network_segment_ranges_with_pagination(self): range1 = self._test_create_network_segment_range( name='foo-range1', physical_network='phys_net1') range2 = self._test_create_network_segment_range( name='foo-range2', physical_network='phys_net2') range3 = self._test_create_network_segment_range( name='foo-range3', physical_network='phys_net3') self._test_list_with_pagination( 'network-segment-range', (range1, range2, range3), ('name', 'asc'), 2, 2) def test_list_network_segment_ranges_with_pagination_reverse(self): range1 = self._test_create_network_segment_range( name='foo-range1', physical_network='phys_net1') range2 = self._test_create_network_segment_range( name='foo-range2', physical_network='phys_net2') range3 = self._test_create_network_segment_range( name='foo-range3', physical_network='phys_net3') self._test_list_with_pagination_reverse( 'network-segment-range', (range1, range2, range3), ('name', 'asc'), 2, 2) def test_delete_network_segment_range(self): network_segment_range = self._test_create_network_segment_range() with mock.patch.object(segments_db, 'network_segments_exist_in_range', return_value=False): self._delete('network-segment-ranges', network_segment_range['network_segment_range']['id']) self._show('network-segment-ranges', network_segment_range['network_segment_range']['id'], expected_code=webob.exc.HTTPNotFound.code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_portsecurity.py0000644000175000017500000004676300000000000027362 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from neutron_lib.api.definitions import port_security as psec from neutron_lib.api import validators from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import directory from webob import exc from neutron.db import db_base_plugin_v2 from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_securitygroup DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_portsecurity.' 'PortSecurityTestPlugin') class PortSecurityTestCase(test_securitygroup.SecurityGroupsTestCase, test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None): self._backup = copy.deepcopy(ext_sg.RESOURCE_ATTRIBUTE_MAP) self.addCleanup(self._restore) ext_mgr = ( test_securitygroup.SecurityGroupTestExtensionManager()) super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) # Check if a plugin supports security groups plugin_obj = directory.get_plugin() self._skip_security_group = ('security-group' not in plugin_obj.supported_extension_aliases) def _restore(self): ext_sg.RESOURCE_ATTRIBUTE_MAP = self._backup class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, securitygroups_db.SecurityGroupDbMixin, portsecurity_db.PortSecurityDbMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with security groups and port security. """ supported_extension_aliases = ["security-group", psec.ALIAS] def create_network(self, context, network): tenant_id = network['network'].get('tenant_id') self._ensure_default_security_group(context, tenant_id) with db_api.CONTEXT_WRITER.using(context): neutron_db = super(PortSecurityTestPlugin, self).create_network( context, network) neutron_db.update(network['network']) self._process_network_port_security_create( context, network['network'], neutron_db) return neutron_db def update_network(self, context, id, network): with db_api.CONTEXT_WRITER.using(context): neutron_db = super(PortSecurityTestPlugin, self).update_network( context, id, network) if psec.PORTSECURITY in network['network']: self._process_network_port_security_update( context, network['network'], neutron_db) return neutron_db def get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): net = super(PortSecurityTestPlugin, self).get_network( context, id) return db_utils.resource_fields(net, fields) def create_port(self, context, port): p = port['port'] neutron_db = super(PortSecurityTestPlugin, self).create_port( context, port) p.update(neutron_db) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, p) p[psec.PORTSECURITY] = port_security self._process_port_port_security_create(context, p, neutron_db) if (validators.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and not (port_security and has_ip)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # Port requires ip and port_security enabled for security group if has_ip and port_security: self._ensure_default_security_group_on_port(context, port) sgs = self._get_security_groups_on_port(context, port) p[ext_sg.SECURITYGROUPS] = [sg['id'] for sg in sgs] if sgs else None if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]): self._process_port_create_security_group( context, p, sgs) return port['port'] def update_port(self, context, id, port): delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) with db_api.CONTEXT_WRITER.using(context): ret_port = super(PortSecurityTestPlugin, self).update_port( context, id, port) # copy values over - but not fixed_ips port['port'].pop('fixed_ips', None) ret_port.update(port['port']) # populate port_security setting if psec.PORTSECURITY not in ret_port: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) has_ip = self._ip_on_port(ret_port) # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip if (has_security_groups and (not ret_port[psec.PORTSECURITY] or not has_ip)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # Port security/IP was updated off. Need to check that no security # groups are on port. if ret_port[psec.PORTSECURITY] is not True or not has_ip: if has_security_groups: raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # get security groups on port filters = {'port_id': [id]} security_groups = (super(PortSecurityTestPlugin, self). _get_port_security_group_bindings( context, filters)) if security_groups and not delete_security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() if (delete_security_groups or has_security_groups): # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgs = self._get_security_groups_on_port(context, port) # process port create sec groups needs port id port['id'] = id self._process_port_create_security_group(context, ret_port, sgs) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], ret_port) return ret_port class PortSecurityDBTestCase(PortSecurityTestCase): def setUp(self, plugin=None, service_plugins=None): plugin = plugin or DB_PLUGIN_KLASS super(PortSecurityDBTestCase, self).setUp(plugin) class TestPortSecurity(PortSecurityDBTestCase): def test_create_network_with_portsecurity_mac(self): res = self._create_network('json', 'net1', True) net = self.deserialize('json', res) self.assertTrue(net['network'][psec.PORTSECURITY]) def test_create_network_with_portsecurity_false(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self.assertFalse(net['network'][psec.PORTSECURITY]) def test_updating_network_port_security(self): res = self._create_network('json', 'net1', True, port_security_enabled='True') net = self.deserialize('json', res) self.assertTrue(net['network'][psec.PORTSECURITY]) update_net = {'network': {psec.PORTSECURITY: False}} req = self.new_update_request('networks', update_net, net['network']['id']) net = self.deserialize('json', req.get_response(self.api)) self.assertFalse(net['network'][psec.PORTSECURITY]) req = self.new_show_request('networks', net['network']['id']) net = self.deserialize('json', req.get_response(self.api)) self.assertFalse(net['network'][psec.PORTSECURITY]) def test_create_port_default_true(self): with self.network() as net: res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_passing_true(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=True) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_on_port_security_false_network(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_security_overrides_network_value(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_create_port_fails_with_secgroup_and_port_security_false(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): security_group = self.deserialize( 'json', self._create_security_group(self.fmt, 'asdf', 'asdf')) security_group_id = security_group['security_group']['id'] res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), security_groups=[security_group_id], port_security_enabled=False) self.assertEqual(400, res.status_int) def test_create_port_with_default_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(1, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_with_security_group_and_net_sec_false(self): # This tests that port_security_enabled is true when creating # a port on a network that is marked as port_security_enabled=False # that has a subnet and security_groups are passed it. if self._skip_security_group: self.skipTest("Plugin does not support security groups") res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self._create_subnet('json', net['network']['id'], '10.0.0.0/24') security_group = self.deserialize( 'json', self._create_security_group(self.fmt, 'asdf', 'asdf')) security_group_id = security_group['security_group']['id'] res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), port_security_enabled=True, security_groups=[security_group_id]) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(port['port']['security_groups'], [security_group_id]) self._delete('ports', port['port']['id']) def test_create_port_without_security_group_and_net_sec_false(self): res = self._create_network('json', 'net1', True, arg_list=('port_security_enabled',), port_security_enabled=False) net = self.deserialize('json', res) self._create_subnet('json', net['network']['id'], '10.0.0.0/24') res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self._delete('ports', port['port']['id']) def test_update_port_security_off_with_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id']) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) update_port = {'port': {psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(409, res.status_int) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None}} req = self.new_update_request('ports', update_port, port['port']['id']) self.deserialize('json', req.get_response(self.api)) self._delete('ports', port['port']['id']) def test_update_port_remove_port_security_security_group(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize('json', req.get_response(self.api)) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual(0, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_update_port_remove_port_security_security_group_read(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) self.assertTrue(port['port'][psec.PORTSECURITY]) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) self.deserialize('json', req.get_response(self.api)) sg_id = port['port'][ext_sg.SECURITYGROUPS] update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]], psec.PORTSECURITY: True}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize('json', req.get_response(self.api)) self.assertTrue(port['port'][psec.PORTSECURITY]) self.assertEqual(1, len(port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_security_off_shared_network(self): with self.network(shared=True) as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=False, tenant_id='not_network_owner', set_context=True) self.deserialize('json', res) self.assertEqual(403, res.status_int) def test_update_port_security_off_shared_network(self): with self.network(shared=True) as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], tenant_id='not_network_owner', set_context=True) port = self.deserialize('json', res) # remove security group on port update_port = {'port': {ext_sg.SECURITYGROUPS: None, psec.PORTSECURITY: False}} req = self.new_update_request('ports', update_port, port['port']['id']) req.environ['neutron.context'] = context.Context( '', 'not_network_owner') res = req.get_response(self.api) self.assertEqual(exc.HTTPForbidden.code, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_providernet.py0000644000175000017500000001626300000000000027137 0ustar00coreycorey00000000000000# Copyright 2013 VMware # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import provider_net from neutron_lib import context from neutron_lib import fixture from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from webob import exc as web_exc import webtest from neutron.api import extensions from neutron.api.v2 import router from neutron.extensions import providernet as pnet from neutron import quota from neutron.tests import tools from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api class ProviderExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return pnet.Providernet().get_extended_resources(version) class ProvidernetExtensionTestCase(testlib_api.WebTestCase): fmt = 'json' def setUp(self): super(ProvidernetExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) # Update the plugin and extensions path self.setup_coreplugin(plugin, load_plugins=False) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Ensure Quota checks never fail because of mock instance = self.plugin.return_value instance.get_networks_count.return_value = 1 # Register mock plugin and enable the 'provider' extension instance.supported_extension_aliases = [provider_net.ALIAS] tools.make_mock_plugin_json_encodable(instance) directory.add_plugin(constants.CORE, instance) ext_mgr = ProviderExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.addCleanup(self._plugin_patcher.stop) self.api = webtest.TestApp(router.APIRouter()) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def _prepare_net_data(self): return {'name': 'net1', provider_net.NETWORK_TYPE: 'sometype', provider_net.PHYSICAL_NETWORK: 'physnet', provider_net.SEGMENTATION_ID: 666} def _put_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': ctx.tenant_id, 'shared': False} net_id = uuidutils.generate_uuid() res = self.api.put(test_base._get_path('networks', id=net_id, fmt=self.fmt), self.serialize({'network': data}), extra_environ=env, expect_errors=expect_errors) return res, data, net_id def _post_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} res = self.api.post(test_base._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def _post_network_with_bad_provider_attrs(self, ctx, bad_data, expect_errors=False): data = self._prepare_net_data() data.update(bad_data) env = {'neutron.context': ctx} res = self.api.post(test_base._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def test_network_create_with_provider_attrs(self): ctx = context.get_admin_context() tenant_id = 'an_admin' ctx.tenant_id = tenant_id res, data = self._post_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} exp_input['network'].update({'admin_state_up': True, 'tenant_id': tenant_id, 'project_id': tenant_id, 'shared': False}) instance.create_network.assert_called_with(mock.ANY, network=exp_input) self.assertEqual(web_exc.HTTPCreated.code, res.status_int) def test_network_create_with_bad_provider_attrs_400(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' bad_data = {provider_net.SEGMENTATION_ID: "abc"} res, _1 = self._post_network_with_bad_provider_attrs(ctx, bad_data, True) self.assertEqual(web_exc.HTTPBadRequest.code, res.status_int) def test_network_update_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data, net_id = self._put_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} instance.update_network.assert_called_with(mock.ANY, net_id, network=exp_input) self.assertEqual(web_exc.HTTPOk.code, res.status_int) def test_network_create_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1 = self._post_network_with_provider_attrs(ctx, True) self.assertEqual(web_exc.HTTPForbidden.code, res.status_int) def test_network_update_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1, _2 = self._put_network_with_provider_attrs(ctx, True) self.assertEqual(web_exc.HTTPForbidden.code, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_qos_fip.py0000644000175000017500000004403200000000000026231 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib import context from neutron_lib.exceptions import qos as qos_exc from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_utils import uuidutils from neutron.conf.db import extraroute_db from neutron.db import l3_fip_qos from neutron.extensions import l3 from neutron.extensions import qos_fip from neutron.objects.qos import policy from neutron.services.revisions import revision_plugin from neutron.tests.unit.extensions import test_l3 class FloatingIPQoSTestExtensionManager(object): def get_resources(self): return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestFloatingIPQoSIntPlugin( test_l3.TestL3NatIntPlugin, l3_fip_qos.FloatingQoSDbMixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, qos_fip.FIP_QOS_ALIAS] class TestFloatingIPQoSL3NatServicePlugin( test_l3.TestL3NatServicePlugin, l3_fip_qos.FloatingQoSDbMixin): supported_extension_aliases = [l3_apidef.ALIAS, qos_fip.FIP_QOS_ALIAS] class FloatingIPQoSDBTestCaseBase(object): def test_create_fip_with_qos_policy_id(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id, qos_policy_id=policy_obj.id) self.assertEqual(policy_obj.id, fip['floatingip'][qos_consts.QOS_POLICY_ID]) def test_fip_has_qos_policy_id_remove_policy(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id, qos_policy_id=policy_obj.id) self.assertEqual(policy_obj.id, fip['floatingip'][qos_consts.QOS_POLICY_ID]) self.assertRaises(qos_exc.QosPolicyInUse, policy_obj.delete) def test_floatingip_update_qos_policy_id(self): ctx = context.get_admin_context() policy_obj_1 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol2', rules=[]) policy_obj_1.create() policy_obj_2 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol3', rules=[]) policy_obj_2.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id, qos_policy_id=policy_obj_1.id) self.assertEqual(policy_obj_1.id, fip['floatingip'][qos_consts.QOS_POLICY_ID]) body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(policy_obj_1.id, body['floatingip'][qos_consts.QOS_POLICY_ID]) body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj_2.id}}) self.assertEqual(policy_obj_2.id, body['floatingip'][qos_consts.QOS_POLICY_ID]) updated_revision_number = body['floatingip'].get('revision_number') fip_revision_number = fip['floatingip'].get('revision_number') if updated_revision_number and fip_revision_number: self.assertGreater(updated_revision_number, fip_revision_number) def test_floatingip_adding_qos_policy_id_by_update(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol4', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id) self.assertIsNone(fip['floatingip'].get(qos_consts.QOS_POLICY_ID)) body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj.id}}) body = self._show('floatingips', body['floatingip']['id']) self.assertEqual(policy_obj.id, body['floatingip'][qos_consts.QOS_POLICY_ID]) def _update_fip_with_port_or_qos_and_verify( self, fip_id, port_id=None, qos_policy_id=None, revision_number=None): update_body = {'floatingip': { 'port_id': port_id, qos_consts.QOS_POLICY_ID: qos_policy_id}} body = self._update('floatingips', fip_id, update_body) body = self._show('floatingips', body['floatingip']['id']) self.assertEqual(port_id, body['floatingip']['port_id']) self.assertEqual(qos_policy_id, body['floatingip'].get(qos_consts.QOS_POLICY_ID)) updated_revision_number = body['floatingip'].get('revision_number') if updated_revision_number and revision_number: self.assertGreater(updated_revision_number, revision_number) def test_floatingip_update_with_port_and_qos(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol4', rules=[]) policy_obj.create() with self.network() as ext_net: network_id = ext_net['network']['id'] self._set_net_external(network_id) with self.subnet( ext_net, cidr='10.10.10.0/24' ), self.router() as router, self.subnet( cidr='11.0.0.0/24') as private_subnet, self.port( private_subnet) as port: self._add_external_gateway_to_router( router['router']['id'], network_id) self._router_interface_action( 'add', router['router']['id'], private_subnet['subnet']['id'], None) fip = self._make_floatingip( self.fmt, network_id) self.assertIsNone(fip['floatingip'].get('port_id')) self.assertIsNone( fip['floatingip'].get(qos_consts.QOS_POLICY_ID)) self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], port['port']['id'], policy_obj.id, revision_number=fip['floatingip'].get('revision_number')) def test_floatingip_update_with_port_and_qos_scenarios(self): ctx = context.get_admin_context() policy_obj_1 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol2', rules=[]) policy_obj_1.create() policy_obj_2 = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol3', rules=[]) policy_obj_2.create() with self.network() as ext_net: network_id = ext_net['network']['id'] self._set_net_external(network_id) with self.subnet( ext_net, cidr='10.10.10.0/24' ), self.router() as router, self.subnet( cidr='11.0.0.0/24') as private_subnet, self.port( private_subnet) as port_1, self.port( private_subnet) as port_2: self._add_external_gateway_to_router( router['router']['id'], network_id) self._router_interface_action( 'add', router['router']['id'], private_subnet['subnet']['id'], None) fip = self._make_floatingip(self.fmt, network_id) self.assertIsNone(fip['floatingip'].get('port_id')) self.assertIsNone( fip['floatingip'].get(qos_consts.QOS_POLICY_ID)) # update from: {port_id: null, qos_policy_id: null} # to : {port_id: port_id_1, qos_policy_id: null} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], port_1['port']['id'], None, revision_number=fip['floatingip'].get('revision_number')) # update from: {port_id: port_id_1, qos_policy_id: null} # to : {port_id: port_id_1, qos_policy_id: policy_1} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], port_1['port']['id'], policy_obj_1.id) # update from: {port_id: port_id_1, qos_policy_id: policy_1} # to : {port_id: port_id_2, qos_policy_id: policy_2} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], port_2['port']['id'], policy_obj_2.id) # update from: {port_id: port_id_2, qos_policy_id: policy_2} # to : {port_id: port_id_1, qos_policy_id: null} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], port_1['port']['id'], None) # update from: {port_id: port_id_1, qos_policy_id: null} # to : {port_id: null, qos_policy_id: policy_1} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id'], None, policy_obj_1.id) # update from: {port_id: null, qos_policy_id: policy_1} # to : {port_id: null, qos_policy_id: null} self._update_fip_with_port_or_qos_and_verify( fip['floatingip']['id']) def test_floatingip_remove_qos_policy_id(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol5', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id, qos_policy_id=policy_obj.id) self.assertEqual(policy_obj.id, fip['floatingip'][qos_consts.QOS_POLICY_ID]) self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: None}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone( body['floatingip'].get(qos_consts.QOS_POLICY_ID)) def test_floatingip_update_change_nothing(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol2', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fip = self._make_floatingip( self.fmt, network_id) self.assertIsNone(fip['floatingip'].get(qos_consts.QOS_POLICY_ID)) # Updating policy_id from None to None body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: None}}) self.assertIsNone( body['floatingip'].get(qos_consts.QOS_POLICY_ID)) body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone( body['floatingip'].get(qos_consts.QOS_POLICY_ID)) body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj.id}}) self.assertEqual(policy_obj.id, body['floatingip'][qos_consts.QOS_POLICY_ID]) # Updating again with same policy_id body = self._update( 'floatingips', fip['floatingip']['id'], {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj.id}}) self.assertEqual(policy_obj.id, body['floatingip'][qos_consts.QOS_POLICY_ID]) class FloatingIPQoSDBIntTestCase(test_l3.L3BaseForIntTests, test_l3.L3NatTestCaseMixin, FloatingIPQoSDBTestCaseBase): def setUp(self, plugin=None, service_plugins=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_qos_fip.' 'TestFloatingIPQoSIntPlugin') if not service_plugins: service_plugins = {'qos': 'neutron.services.qos.qos_plugin.' 'QoSPlugin'} extraroute_db.register_db_extraroute_opts() # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPQoSTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class FloatingIPQoSDBSepTestCase(test_l3.L3BaseForSepTests, test_l3.L3NatTestCaseMixin, FloatingIPQoSDBTestCaseBase): def setUp(self, service_plugins=None): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin if not service_plugins: l3_plugin = ('neutron.tests.unit.extensions.test_qos_fip.' 'TestFloatingIPQoSL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin, 'qos': 'neutron.services.qos.qos_plugin.' 'QoSPlugin'} extraroute_db.register_db_extraroute_opts() # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = FloatingIPQoSTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class FloatingIPQoSDBWithRevisionIntTestCase(FloatingIPQoSDBIntTestCase): def setUp(self, plugin=None): service_plugins = {'qos': 'neutron.services.qos.qos_plugin.QoSPlugin', 'revision_plugin_name': 'revisions'} super(FloatingIPQoSDBWithRevisionIntTestCase, self).setUp( service_plugins=service_plugins) revision_plugin.RevisionPlugin() class FloatingIPQoSDBWithRevisionSepTestCase(FloatingIPQoSDBSepTestCase): def setUp(self): l3_plugin = ('neutron.tests.unit.extensions.test_qos_fip.' 'TestFloatingIPQoSL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin, 'qos': 'neutron.services.qos.qos_plugin.QoSPlugin', 'revision_plugin_name': 'revisions'} super(FloatingIPQoSDBWithRevisionSepTestCase, self).setUp( service_plugins=service_plugins) revision_plugin.RevisionPlugin() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_qos_gateway_ip.py0000644000175000017500000002247600000000000027614 0ustar00coreycorey00000000000000# Copyright 2018 OpenStack Foundation # Copyright 2017 Letv Cloud Computing # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_gw_mode from neutron_lib.api.definitions import qos_gateway_ip from neutron_lib import context from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_utils import uuidutils from neutron.conf.db import extraroute_db from neutron.db import l3_gateway_ip_qos from neutron.extensions import l3 from neutron.objects.qos import policy from neutron.tests.unit.extensions import test_l3 class GatewayIPQoSTestExtensionManager(object): def get_resources(self): l3_apidef.RESOURCE_ATTRIBUTE_MAP['routers'].update( qos_gateway_ip.RESOURCE_ATTRIBUTE_MAP['routers']) return l3.L3.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestGatewayIPQoSIntPlugin( test_l3.TestL3NatIntPlugin, l3_gateway_ip_qos.L3_gw_ip_qos_db_mixin): supported_extension_aliases = [enet_apidef.ALIAS, l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS, qos_gateway_ip.ALIAS] class TestGatewayIPQoSL3NatServicePlugin( test_l3.TestL3NatServicePlugin, l3_gateway_ip_qos.L3_gw_ip_qos_db_mixin): supported_extension_aliases = [l3_apidef.ALIAS, l3_ext_gw_mode.ALIAS, qos_gateway_ip.ALIAS] class GatewayIPQoSDBTestCaseBase(object): def test_create_router_gateway_with_qos_policy(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id'], policy_id=policy_obj.id) self.assertEqual( policy_obj.id, res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) def test_update_router_gateway_with_qos_policy(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self.assertIsNone( res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) # update router gateway res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id'], policy_id=policy_obj.id) self.assertEqual( policy_obj.id, res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) def test_clear_router_gateway_and_create_with_old_qos_policy_implicitly( self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id'], policy_id=policy_obj.id) self.assertEqual( policy_obj.id, res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) # Clear router gateway self._remove_external_gateway_from_router( r['router']['id'], public_sub['subnet']['network_id'], external_gw_info={}) # Create router gateway again, then the qos policy binding will be # reused here. res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self.assertEqual( policy_obj.id, res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) def test_clear_router_gateway_qos_policy(self): ctx = context.get_admin_context() policy_obj = policy.QosPolicy(ctx, id=uuidutils.generate_uuid(), project_id='tenant', name='pol1', rules=[]) policy_obj.create() with self.subnet(cidr='11.0.0.0/24') as public_sub,\ self.router() as r: self._set_net_external(public_sub['subnet']['network_id']) res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self.assertIsNone( res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) # update router gateway res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id'], policy_id=policy_obj.id) self.assertEqual( policy_obj.id, res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) # Explicitly clear router gateway qos policy binding res = self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id'], policy_id=None, is_remove=True) self.assertIsNone( res['router']['external_gateway_info'].get( qos_consts.QOS_POLICY_ID)) class GatewayIPQoSDBIntTestCase(test_l3.L3BaseForIntTests, test_l3.L3NatTestCaseMixin, GatewayIPQoSDBTestCaseBase): def setUp(self, plugin=None): if not plugin: plugin = ('neutron.tests.unit.extensions.test_qos_gateway_ip.' 'TestGatewayIPQoSIntPlugin') service_plugins = {'qos': 'neutron.services.qos.qos_plugin.QoSPlugin'} extraroute_db.register_db_extraroute_opts() # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = GatewayIPQoSTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() class GatewayIPQoSDBSepTestCase(test_l3.L3BaseForSepTests, test_l3.L3NatTestCaseMixin, GatewayIPQoSDBTestCaseBase): def setUp(self): # the plugin without L3 support plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' # the L3 service plugin l3_plugin = ('neutron.tests.unit.extensions.test_qos_gateway_ip.' 'TestGatewayIPQoSL3NatServicePlugin') service_plugins = {'l3_plugin_name': l3_plugin, 'qos': 'neutron.services.qos.qos_plugin.QoSPlugin'} extraroute_db.register_db_extraroute_opts() # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = GatewayIPQoSTestExtensionManager() super(test_l3.L3BaseForSepTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.setup_notification_driver() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_quotasv2.py0000644000175000017500000005666200000000000026371 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from neutron_lib import context from neutron_lib.db import constants from neutron_lib import exceptions from neutron_lib import fixture from oslo_config import cfg import testtools from webob import exc import webtest from neutron.api import extensions from neutron.api.v2 import router from neutron.common import config from neutron.conf import quota as qconf from neutron.db.quota import driver from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api DEFAULT_QUOTAS_ACTION = 'default' TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' _get_path = test_base._get_path class QuotaExtensionTestCase(testlib_api.WebTestCase): def setUp(self): super(QuotaExtensionTestCase, self).setUp() # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin('ml2') quota.QUOTAS = quota.QuotaEngine() self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.supported_extension_aliases = ['quotas'] # QUOTAS will register the items in conf when starting # extra1 here is added later, so have to do it manually resource_registry.register_resource_by_name('extra1') ext_mgr = extensions.PluginAwareExtensionManager.get_instance() app = config.load_paste_app('extensions_test_app') ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.api = webtest.TestApp(ext_middleware) # Initialize the router for the core API in order to ensure core quota # resources are registered router.APIRouter() def _test_quota_default_values(self, expected_values): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) quota = self.deserialize(res) for resource, expected_value in expected_values.items(): self.assertEqual(expected_value, quota['quota'][resource]) class QuotaExtensionDbTestCase(QuotaExtensionTestCase): fmt = 'json' def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.db.quota.driver.DbQuotaDriver', group='QUOTAS') super(QuotaExtensionDbTestCase, self).setUp() def test_quotas_loaded_right(self): res = self.api.get(_get_path('quotas', fmt=self.fmt)) quota = self.deserialize(res) self.assertEqual([], quota['quotas']) self.assertEqual(200, res.status_int) def test_quotas_default_values(self): self._test_quota_default_values( {'network': qconf.DEFAULT_QUOTA_NETWORK, 'subnet': qconf.DEFAULT_QUOTA_SUBNET, 'port': qconf.DEFAULT_QUOTA_PORT, 'extra1': qconf.DEFAULT_QUOTA}) def test_quotas_negative_default_value(self): cfg.CONF.set_override( 'quota_port', -666, group='QUOTAS') cfg.CONF.set_override( 'quota_network', -10, group='QUOTAS') cfg.CONF.set_override( 'quota_subnet', -50, group='QUOTAS') self._test_quota_default_values( {'network': qconf.DEFAULT_QUOTA, 'subnet': qconf.DEFAULT_QUOTA, 'port': qconf.DEFAULT_QUOTA, 'extra1': qconf.DEFAULT_QUOTA}) def test_show_default_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, action=DEFAULT_QUOTAS_ACTION, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual( qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']) self.assertEqual( qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']) self.assertEqual( qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']) def test_show_default_quotas_with_owner_tenant(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, action=DEFAULT_QUOTAS_ACTION, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual( qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']) self.assertEqual( qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']) self.assertEqual( qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']) def test_show_default_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, action=DEFAULT_QUOTAS_ACTION, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_show_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual( qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']) self.assertEqual( qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']) self.assertEqual( qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']) def test_show_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_show_quotas_with_owner_tenant(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual( qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']) self.assertEqual( qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']) self.assertEqual( qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']) def test_list_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} res = self.api.get(_get_path('quotas', fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual([], quota['quotas']) def test_list_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_with_non_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 'abc'}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_negative_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': -2}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_out_of_range_integer_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_update_quotas_to_unlimited(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': -1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_exceeding_current_limit(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 120}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_with_non_support_resource_returns_400(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'abc': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_update_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) env2 = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env2) quota = self.deserialize(res) self.assertEqual(100, quota['quota']['network']) self.assertEqual(qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']) self.assertEqual(qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']) def test_update_attributes(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} quotas = {'quota': {'extra1': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) env2 = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env2) quota = self.deserialize(res) self.assertEqual(100, quota['quota']['extra1']) def test_delete_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} # Create a quota to ensure we have something to delete quotas = {'quota': {'network': 100}} self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(204, res.status_int) def test_delete_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_delete_quota_with_unknown_tenant_returns_404(self): tenant_id = 'idnotexist' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(exc.HTTPNotFound.code, res.status_int) def test_quotas_loaded_bad_returns_404(self): try: res = self.api.get(_get_path('quotas'), expect_errors=True) self.assertEqual(404, res.status_int) except Exception: pass def test_quotas_limit_check(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'network': 5}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) quota.QUOTAS.limit_check(context.Context('', tenant_id), tenant_id, network=4) def test_quotas_limit_check_with_invalid_quota_value(self): tenant_id = 'tenant_id1' with testtools.ExpectedException(exceptions.InvalidQuotaValue): quota.QUOTAS.limit_check(context.Context('', tenant_id), tenant_id, network=-2) def test_quotas_limit_check_with_not_registered_resource_fails(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.QuotaResourceUnknown, quota.QUOTAS.limit_check, context.get_admin_context(), tenant_id, foobar=1) def test_quotas_get_tenant_from_request_context(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(quota['tenant']['tenant_id'], tenant_id) def test_quotas_get_tenant_from_empty_request_context_returns_400(self): env = {'neutron.context': context.Context('', '', is_admin=True)} res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(400, res.status_int) def test_make_reservation_resource_unknown_raises(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.QuotaResourceUnknown, quota.QUOTAS.make_reservation, context.get_admin_context(), tenant_id, {'foobar': 1}, plugin=None) def test_make_reservation_negative_delta_raises(self): tenant_id = 'tenant_id1' self.assertRaises(exceptions.InvalidQuotaValue, quota.QUOTAS.make_reservation, context.get_admin_context(), tenant_id, {'network': -1}, plugin=None) class QuotaExtensionCfgTestCase(QuotaExtensionTestCase): fmt = 'json' def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') super(QuotaExtensionCfgTestCase, self).setUp() def test_quotas_default_values(self): self._test_quota_default_values( {'network': qconf.DEFAULT_QUOTA_NETWORK, 'subnet': qconf.DEFAULT_QUOTA_SUBNET, 'port': qconf.DEFAULT_QUOTA_PORT, 'extra1': qconf.DEFAULT_QUOTA}) def test_quotas_negative_default_value(self): cfg.CONF.set_override( 'quota_port', -666, group='QUOTAS') self._test_quota_default_values( {'network': qconf.DEFAULT_QUOTA_NETWORK, 'subnet': qconf.DEFAULT_QUOTA_SUBNET, 'port': qconf.DEFAULT_QUOTA, 'extra1': qconf.DEFAULT_QUOTA}) def test_show_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) def test_show_quotas_without_admin_forbidden(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) def test_update_quotas_forbidden(self): tenant_id = 'tenant_id1' quotas = {'quota': {'network': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), expect_errors=True) self.assertEqual(403, res.status_int) def test_delete_quotas_forbidden(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) class TestDbQuotaDriver(base.BaseTestCase): """Test for neutron.db.quota.driver.DbQuotaDriver.""" def test_get_tenant_quotas_arg(self): """Call neutron.db.quota.driver.DbQuotaDriver._get_quotas.""" quota_driver = driver.DbQuotaDriver() ctx = context.Context('', 'bar') foo_quotas = {'network': 5} default_quotas = {'network': 10} target_tenant = 'foo' with mock.patch.object(driver.DbQuotaDriver, 'get_tenant_quotas', return_value=foo_quotas) as get_tenant_quotas: quotas = quota_driver._get_quotas(ctx, target_tenant, default_quotas) self.assertEqual(quotas, foo_quotas) get_tenant_quotas.assert_called_once_with(ctx, default_quotas, target_tenant) class TestQuotaDriverLoad(base.BaseTestCase): def setUp(self): super(TestQuotaDriverLoad, self).setUp() # Make sure QuotaEngine is reinitialized in each test. quota.QUOTAS._driver = None def _test_quota_driver(self, cfg_driver, loaded_driver, with_quota_db_module=True): cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS') with mock.patch.dict(sys.modules, {}): if (not with_quota_db_module and 'neutron.db.quota.driver' in sys.modules): del sys.modules['neutron.db.quota.driver'] driver = quota.QUOTAS.get_driver() self.assertEqual(loaded_driver, driver.__class__.__name__) def test_quota_db_driver_with_quotas_table(self): self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver', 'DbQuotaDriver', True) def test_quota_db_driver_fallback_conf_driver(self): self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver', 'ConfDriver', False) def test_quota_conf_driver(self): self._test_quota_driver('neutron.quota.ConfDriver', 'ConfDriver', True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_quotasv2_detail.py0000644000175000017500000001532300000000000027700 0ustar00coreycorey00000000000000# Copyright 2017 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import fixture from oslo_config import cfg import webtest from neutron.api import extensions from neutron.api.v2 import router from neutron.common import config from neutron.conf import quota as qconf from neutron import quota from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit import testlib_api DEFAULT_QUOTAS_ACTION = 'details' TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' _get_path = test_base._get_path class DetailQuotaExtensionTestCase(testlib_api.WebTestCase): def setUp(self): super(DetailQuotaExtensionTestCase, self).setUp() # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None self.useFixture(fixture.APIDefinitionFixture()) # Create the default configurations self.config_parse() # Update the plugin and extensions path self.setup_coreplugin('ml2') quota.QUOTAS = quota.QuotaEngine() self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.supported_extension_aliases = \ ['quotas', 'quota_details'] # QUOTAS will register the items in conf when starting ext_mgr = extensions.PluginAwareExtensionManager.get_instance() app = config.load_paste_app('extensions_test_app') ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.api = webtest.TestApp(ext_middleware) # Initialize the router for the core API in order to ensure core quota # resources are registered router.APIRouter() class DetailQuotaExtensionDbTestCase(DetailQuotaExtensionTestCase): fmt = 'json' def test_show_detail_quotas(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt, endpoint=DEFAULT_QUOTAS_ACTION), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(0, quota['quota']['network']['reserved']) self.assertEqual(0, quota['quota']['subnet']['reserved']) self.assertEqual(0, quota['quota']['port']['reserved']) self.assertEqual(0, quota['quota']['network']['used']) self.assertEqual(0, quota['quota']['subnet']['used']) self.assertEqual(0, quota['quota']['port']['used']) self.assertEqual(qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']['limit']) def test_detail_quotas_negative_limit_value(self): cfg.CONF.set_override( 'quota_port', -666, group='QUOTAS') cfg.CONF.set_override( 'quota_network', -10, group='QUOTAS') cfg.CONF.set_override( 'quota_subnet', -50, group='QUOTAS') tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt, endpoint=DEFAULT_QUOTAS_ACTION), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(0, quota['quota']['network']['reserved']) self.assertEqual(0, quota['quota']['subnet']['reserved']) self.assertEqual(0, quota['quota']['port']['reserved']) self.assertEqual(0, quota['quota']['network']['used']) self.assertEqual(0, quota['quota']['subnet']['used']) self.assertEqual(0, quota['quota']['port']['used']) self.assertEqual(qconf.DEFAULT_QUOTA, quota['quota']['network']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA, quota['quota']['subnet']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA, quota['quota']['port']['limit']) def test_show_detail_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt, endpoint=DEFAULT_QUOTAS_ACTION), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(0, quota['quota']['network']['reserved']) self.assertEqual(0, quota['quota']['subnet']['reserved']) self.assertEqual(0, quota['quota']['port']['reserved']) self.assertEqual(0, quota['quota']['network']['used']) self.assertEqual(0, quota['quota']['subnet']['used']) self.assertEqual(0, quota['quota']['port']['used']) self.assertEqual(qconf.DEFAULT_QUOTA_NETWORK, quota['quota']['network']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA_SUBNET, quota['quota']['subnet']['limit']) self.assertEqual(qconf.DEFAULT_QUOTA_PORT, quota['quota']['port']['limit']) def test_detail_quotas_without_admin_forbidden_returns_403(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt, endpoint=DEFAULT_QUOTAS_ACTION), extra_environ=env, expect_errors=True) self.assertEqual(403, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_router_availability_zone.py0000644000175000017500000000740200000000000031676 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import router_availability_zone from neutron_lib import constants as lib_const from neutron_lib.plugins import constants from neutron.db.availability_zone import router as router_az_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_db from neutron.extensions import l3 from neutron.tests.unit.extensions import test_availability_zone as test_az from neutron.tests.unit.extensions import test_l3 class AZL3ExtensionManager(test_az.AZExtensionManager): def get_resources(self): return (super(AZL3ExtensionManager, self).get_resources() + l3.L3.get_resources()) class AZRouterTestPlugin(l3_db.L3_NAT_db_mixin, router_az_db.RouterAvailabilityZoneMixin, l3_agentschedulers_db.AZL3AgentSchedulerDbMixin): supported_extension_aliases = [l3_apidef.ALIAS, lib_const.L3_AGENT_SCHEDULER_EXT_ALIAS, router_availability_zone.ALIAS] @classmethod def get_plugin_type(cls): return constants.L3 def get_plugin_description(self): return "L3 Routing Service Plugin for testing" class TestAZRouterCase(test_az.AZTestCommon, test_l3.L3NatTestCaseMixin): def setUp(self): plugin = ('neutron.tests.unit.extensions.' 'test_availability_zone.AZTestPlugin') l3_plugin = ('neutron.tests.unit.extensions.' 'test_router_availability_zone.AZRouterTestPlugin') service_plugins = {'l3_plugin_name': l3_plugin} ext_mgr = AZL3ExtensionManager() super(TestAZRouterCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def test_create_router_with_az(self): self._register_azs() az_hints = ['nova2'] with self.router(availability_zone_hints=az_hints) as router: res = self._show('routers', router['router']['id']) self.assertItemsEqual(az_hints, res['router']['availability_zone_hints']) def test_create_router_with_azs(self): self._register_azs() az_hints = ['nova2', 'nova3'] with self.router(availability_zone_hints=az_hints) as router: res = self._show('routers', router['router']['id']) self.assertItemsEqual(az_hints, res['router']['availability_zone_hints']) def test_create_router_without_az(self): with self.router() as router: res = self._show('routers', router['router']['id']) self.assertEqual([], res['router']['availability_zone_hints']) def test_create_router_with_empty_az(self): with self.router(availability_zone_hints=[]) as router: res = self._show('routers', router['router']['id']) self.assertEqual([], res['router']['availability_zone_hints']) def test_create_router_with_none_existing_az(self): res = self._create_router(self.fmt, 'tenant_id', availability_zone_hints=['nova4']) self.assertEqual(404, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_securitygroup.py0000644000175000017500000030163300000000000027520 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy import mock from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import constants as db_const from neutron_lib import exceptions from neutron_lib.plugins import directory from oslo_config import cfg import oslo_db.exception as exc import testtools import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.extensions import standardattrdescription from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_securitygroup.' 'SecurityGroupTestPlugin') LONG_NAME_OK = 'x' * (db_const.NAME_FIELD_SIZE) LONG_NAME_NG = 'x' * (db_const.NAME_FIELD_SIZE + 1) class SecurityGroupTestExtensionManager(object): def get_resources(self): # The description of security_group_rules will be added by extending # standardattrdescription. But as API router will not be initialized # in test code, manually add it. ext_res = (standardattrdescription.Standardattrdescription(). get_extended_resources("2.0")) if ext_sg.SECURITYGROUPRULES in ext_res: existing_sg_rule_attr_map = ( ext_sg.RESOURCE_ATTRIBUTE_MAP[ext_sg.SECURITYGROUPRULES]) sg_rule_attr_desc = ext_res[ext_sg.SECURITYGROUPRULES] existing_sg_rule_attr_map.update(sg_rule_attr_desc) if ext_sg.SECURITYGROUPS in ext_res: existing_sg_attr_map = ( ext_sg.RESOURCE_ATTRIBUTE_MAP[ext_sg.SECURITYGROUPS]) sg_attr_desc = ext_res[ext_sg.SECURITYGROUPS] existing_sg_attr_map.update(sg_attr_desc) return ext_sg.Securitygroup.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _build_security_group(self, name, description, **kwargs): data = { 'security_group': { 'name': name, 'tenant_id': kwargs.get( 'tenant_id', test_db_base_plugin_v2.TEST_TENANT_ID), 'description': description}} return data def _create_security_group_response(self, fmt, data, **kwargs): security_group_req = self.new_create_request('security-groups', data, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request security_group_req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) return security_group_req.get_response(self.ext_api) def _create_security_group(self, fmt, name, description, **kwargs): data = self._build_security_group(name, description, **kwargs) return self._create_security_group_response(fmt, data, **kwargs) def _build_security_group_rule( self, security_group_id, direction, proto, port_range_min=None, port_range_max=None, remote_ip_prefix=None, remote_group_id=None, tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID, ethertype=const.IPv4): data = {'security_group_rule': {'security_group_id': security_group_id, 'direction': direction, 'protocol': proto, 'ethertype': ethertype, 'tenant_id': tenant_id}} if port_range_min: data['security_group_rule']['port_range_min'] = port_range_min if port_range_max: data['security_group_rule']['port_range_max'] = port_range_max if remote_ip_prefix: data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix if remote_group_id: data['security_group_rule']['remote_group_id'] = remote_group_id return data def _create_security_group_rule(self, fmt, rules, **kwargs): security_group_rule_req = self.new_create_request( 'security-group-rules', rules, fmt) if (kwargs.get('set_context') and 'tenant_id' in kwargs): # create a specific auth context for this request security_group_rule_req.environ['neutron.context'] = ( context.Context('', kwargs['tenant_id'])) elif kwargs.get('admin_context'): security_group_rule_req.environ['neutron.context'] = ( context.Context(user_id='admin', tenant_id='admin-tenant', is_admin=True)) return security_group_rule_req.get_response(self.ext_api) def _make_security_group(self, fmt, name, description, **kwargs): res = self._create_security_group(fmt, name, description, **kwargs) if res.status_int >= webob.exc.HTTPBadRequest.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _make_security_group_rule(self, fmt, rules, **kwargs): res = self._create_security_group_rule(self.fmt, rules) if res.status_int >= webob.exc.HTTPBadRequest.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) @contextlib.contextmanager def security_group(self, name='webservers', description='webservers', fmt=None): if not fmt: fmt = self.fmt security_group = self._make_security_group(fmt, name, description) yield security_group @contextlib.contextmanager def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7' 'd1db38eb087', direction='ingress', protocol=const.PROTO_NAME_TCP, port_range_min='22', port_range_max='22', remote_ip_prefix=None, remote_group_id=None, fmt=None, ethertype=const.IPv4): if not fmt: fmt = self.fmt rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id, ethertype=ethertype) security_group_rule = self._make_security_group_rule(self.fmt, rule) yield security_group_rule def _delete_default_security_group_egress_rules(self, security_group_id): """Deletes default egress rules given a security group ID.""" res = self._list( 'security-group-rules', query_params='security_group_id=%s' % security_group_id) for r in res['security_group_rules']: if (r['direction'] == 'egress' and not r['port_range_max'] and not r['port_range_min'] and not r['protocol'] and not r['remote_ip_prefix']): self._delete('security-group-rules', r['id']) def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs): """Asserts that the sg rule has expected key/value pairs passed in as expected_kvs dictionary """ for k, v in expected_kvs.items(): self.assertEqual(v, security_group_rule[k]) class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, securitygroups_db.SecurityGroupDbMixin): """Test plugin that implements necessary calls on create/delete port for associating ports with security groups. """ __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = ["security-group"] def create_port(self, context, port): tenant_id = port['port']['tenant_id'] default_sg = self._ensure_default_security_group(context, tenant_id) if not validators.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): port['port'][ext_sg.SECURITYGROUPS] = [default_sg] with db_api.CONTEXT_WRITER.using(context): sgs = self._get_security_groups_on_port(context, port) port = super(SecurityGroupTestPlugin, self).create_port(context, port) self._process_port_create_security_group(context, port, sgs) return port def update_port(self, context, id, port): with db_api.CONTEXT_WRITER.using(context): if ext_sg.SECURITYGROUPS in port['port']: sgs = self._get_security_groups_on_port(context, port) port['port'][ext_sg.SECURITYGROUPS] = [ sg['id'] for sg in sgs] if sgs else None # delete the port binding and read it with the new rules self._delete_port_security_group_bindings(context, id) port['port']['id'] = id self._process_port_create_security_group( context, port['port'], sgs) port = super(SecurityGroupTestPlugin, self).update_port( context, id, port) return port def create_network(self, context, network): self._ensure_default_security_group(context, network['network']['tenant_id']) return super(SecurityGroupTestPlugin, self).create_network(context, network) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): sorts = sorts or [] neutron_lports = super(SecurityGroupTestPlugin, self).get_ports( context, filters, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) return neutron_lports class SecurityGroupDBTestCase(SecurityGroupsTestCase): def setUp(self, plugin=None, ext_mgr=None): self._backup = copy.deepcopy(ext_sg.RESOURCE_ATTRIBUTE_MAP) self.addCleanup(self._restore) plugin = plugin or DB_PLUGIN_KLASS ext_mgr = ext_mgr or SecurityGroupTestExtensionManager() super(SecurityGroupDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _restore(self): ext_sg.RESOURCE_ATTRIBUTE_MAP = self._backup class TestSecurityGroups(SecurityGroupDBTestCase): def test_create_security_group(self): name = 'webservers' description = 'my webservers' keys = [('name', name,), ('description', description)] with self.security_group(name, description) as security_group: for k, v, in keys: self.assertEqual(v, security_group['security_group'][k]) # Verify that default egress rules have been created sg_rules = security_group['security_group']['security_group_rules'] self.assertEqual(2, len(sg_rules)) v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4] self.assertEqual(1, len(v4_rules)) v4_rule = v4_rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv4, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_rule, expected) v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6] self.assertEqual(1, len(v6_rules)) v6_rule = v6_rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv6, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_rule, expected) def test_create_security_group_bulk(self): rule1 = self._build_security_group("sg_1", "sec_grp_1") rule2 = self._build_security_group("sg_2", "sec_grp_2") rules = {'security_groups': [rule1['security_group'], rule2['security_group']]} res = self._create_security_group_response(self.fmt, rules) ret = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self.assertEqual(2, len(ret['security_groups'])) def test_skip_duplicate_default_sg_error(self): num_called = [0] original_func = self.plugin.create_security_group def side_effect(context, security_group, default_sg): # can't always raise, or create_security_group will hang self.assertTrue(default_sg) self.assertLess(num_called[0], 2) num_called[0] += 1 ret = original_func(context, security_group, default_sg) if num_called[0] == 1: return ret # make another call to cause an exception. # NOTE(yamamoto): raising the exception by ourselves # doesn't update the session state appropriately. self.assertRaises(exc.DBDuplicateEntry, original_func, context, security_group, default_sg) with mock.patch.object(SecurityGroupTestPlugin, 'create_security_group', side_effect=side_effect): self.plugin.create_network( context.get_admin_context(), {'network': {'name': 'foo', 'admin_state_up': True, 'shared': False, 'tenant_id': 'bar'}}) def test_update_security_group(self): with self.security_group() as sg: data = {'security_group': {'name': 'new_name', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(data['security_group']['name'], res['security_group']['name']) self.assertEqual(data['security_group']['description'], res['security_group']['description']) def test_update_security_group_name_to_default_fail(self): with self.security_group() as sg: data = {'security_group': {'name': 'default', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_update_default_security_group_name_fail(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) data = {'security_group': {'name': 'new_name', 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_groups'][0]['id']) req.environ['neutron.context'] = context.Context('', 'somebody') res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_default_security_group_with_description(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) data = {'security_group': {'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_groups'][0]['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(data['security_group']['description'], res['security_group']['description']) def test_update_security_group_with_max_name_length(self): with self.security_group() as sg: data = {'security_group': {'name': LONG_NAME_OK, 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(data['security_group']['name'], res['security_group']['name']) self.assertEqual(data['security_group']['description'], res['security_group']['description']) def test_update_security_group_with_too_long_name(self): with self.security_group() as sg: data = {'security_group': {'name': LONG_NAME_NG, 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_security_group_with_boolean_type_name(self): with self.security_group() as sg: data = {'security_group': {'name': True, 'description': 'new_desc'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_check_default_security_group_description(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual('Default security group', sg['security_groups'][0]['description']) def test_default_security_group(self): with self.network(): res = self.new_list_request('security-groups') groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(1, len(groups['security_groups'])) def test_create_default_security_group_fail(self): name = 'default' description = 'my webservers' res = self._create_security_group(self.fmt, name, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_default_security_group_check_case_insensitive(self): name = 'DEFAULT' description = 'my webservers' res = self._create_security_group(self.fmt, name, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_with_max_name_length(self): description = 'my webservers' res = self._create_security_group(self.fmt, LONG_NAME_OK, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_with_too_long_name(self): description = 'my webservers' res = self._create_security_group(self.fmt, LONG_NAME_NG, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_with_boolean_type_name(self): description = 'my webservers' res = self._create_security_group(self.fmt, True, description) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_list_security_groups(self): with self.security_group(name='sg1', description='sg') as v1,\ self.security_group(name='sg2', description='sg') as v2,\ self.security_group(name='sg3', description='sg') as v3: security_groups = (v1, v2, v3) self._test_list_resources('security-group', security_groups, query_params='description=sg') def test_list_security_groups_with_sort(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_sort('security-group', (sg3, sg2, sg1), [('name', 'desc')], query_params='description=sg') def test_list_security_groups_with_pagination(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination('security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') def test_list_security_groups_with_pagination_reverse(self): with self.security_group(name='sg1', description='sg') as sg1,\ self.security_group(name='sg2', description='sg') as sg2,\ self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination_reverse( 'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') def test_create_security_group_rule_ethertype_invalid_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] ethertype = 2 rule = self._build_security_group_rule( security_group_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', None, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_ethertype_invalid_for_protocol(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] rule = self._build_security_group_rule( security_group_id, 'ingress', const.PROTO_NAME_IPV6_FRAG) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_invalid_ip_prefix(self): name = 'webservers' description = 'my webservers' for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']: with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] remote_ip_prefix = bad_prefix rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_invalid_ethertype_for_prefix(self): name = 'webservers' description = 'my webservers' test_addr = {'192.168.1.1/24': 'IPv6', '2001:db8:1234::/48': 'IPv4', '192.168.2.1/24': 'BadEthertype'} for remote_ip_prefix, ethertype in test_addr.items(): with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_unmasked_prefix(self): name = 'webservers' description = 'my webservers' addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'}, 'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}} for ip in addr: with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] ethertype = addr[ip]['ethertype'] remote_ip_prefix = ip rule = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', remote_ip_prefix, None, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res_sg = self.deserialize(self.fmt, res) prefix = res_sg['security_group_rule']['remote_ip_prefix'] self.assertEqual('%s/%s' % (ip, addr[ip]['mask']), prefix) def test_create_security_group_rule_tcp_protocol_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = const.PROTO_NUM_TCP # TCP rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_protocol_as_number(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 2 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_protocol_as_number_with_port_bad(self): # When specifying ports, neither can be None name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 6 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, '70', None) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_protocol_as_number_range(self): # This is a SG rule with a port range, but treated as a single # port since min/max are the same. name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 6 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, '70', '70') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_protocol_as_number_port_bad(self): # Only certain protocols support a SG rule with a port name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] protocol = 111 rule = self._build_security_group_rule( security_group_id, 'ingress', protocol, '70', '70') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_case_insensitive(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'TCP' port_range_min = 22 port_range_max = 22 ethertype = 'ipV4' with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, ethertype=ethertype) as rule: # the lower case value will be return self.assertEqual(protocol.lower(), rule['security_group_rule']['protocol']) self.assertEqual(const.IPv4, rule['security_group_rule']['ethertype']) def test_get_security_group(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] res = self.new_show_request('security-groups', remote_group_id) security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix): group = self.deserialize( self.fmt, res.get_response(self.ext_api)) sg_rule = group['security_group']['security_group_rules'] self.assertEqual(remote_group_id, group['security_group']['id']) self.assertEqual(3, len(sg_rule)) sg_rule = [r for r in sg_rule if r['direction'] == 'ingress'] for k, v, in keys: self.assertEqual(v, sg_rule[0][k]) def test_get_security_group_empty_rules(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] self._delete_default_security_group_egress_rules( remote_group_id) res = self.new_show_request('security-groups', remote_group_id) group = self.deserialize( self.fmt, res.get_response(self.ext_api)) sg_rule = group['security_group']['security_group_rules'] self.assertEqual(remote_group_id, group['security_group']['id']) self.assertEqual(0, len(sg_rule)) def test_get_security_group_empty_rules_id_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] self._delete_default_security_group_egress_rules( remote_group_id) res = self.new_show_request('security-groups', remote_group_id, fields=['id']) group = self.deserialize( self.fmt, res.get_response(self.ext_api)) secgroup = group['security_group'] self.assertFalse('security_group_rules' in secgroup) self.assertEqual(remote_group_id, group['security_group']['id']) # This test case checks that admins from a different tenant can add rules # as themselves. This is an odd behavior, with some weird GET semantics, # but this test is checking that we don't break that old behavior, at least # until we make a conscious choice to do so. def test_create_security_group_rules_admin_tenant(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: # Add a couple normal rules rule = self._build_security_group_rule( sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP, port_range_min=22, port_range_max=22, remote_ip_prefix="10.0.0.0/24", ethertype=const.IPv4) self._make_security_group_rule(self.fmt, rule) rule = self._build_security_group_rule( sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP, port_range_min=22, port_range_max=22, remote_ip_prefix="10.0.1.0/24", ethertype=const.IPv4) self._make_security_group_rule(self.fmt, rule) # Let's add a rule as admin, with a different tenant_id. The # results of this call are arguably a bug, but it is past behavior. rule = self._build_security_group_rule( sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP, port_range_min=22, port_range_max=22, remote_ip_prefix="10.0.2.0/24", ethertype=const.IPv4, tenant_id='admin-tenant') self._make_security_group_rule(self.fmt, rule, admin_context=True) # Now, let's make sure all the rules are there, with their odd # tenant_id behavior. res = self.new_list_request('security-groups') sgs = self.deserialize(self.fmt, res.get_response(self.ext_api)) for sg in sgs['security_groups']: if sg['name'] == "webservers": rules = sg['security_group_rules'] self.assertEqual(5, len(rules)) self.assertNotEqual('admin-tenant', rules[3]['tenant_id']) self.assertEqual('admin-tenant', rules[4]['tenant_id']) def test_get_security_group_on_port_from_wrong_tenant(self): plugin = directory.get_plugin() if not hasattr(plugin, '_get_security_groups_on_port'): self.skipTest("plugin doesn't use the mixin with this method") neutron_context = context.get_admin_context() res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') sg1 = self.deserialize(self.fmt, res) with testtools.ExpectedException(ext_sg.SecurityGroupNotFound): plugin._get_security_groups_on_port( neutron_context, {'port': {'security_groups': [sg1['security_group']['id']], 'tenant_id': 'tenant'}} ) def test_delete_security_group(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: remote_group_id = sg['security_group']['id'] self._delete('security-groups', remote_group_id, webob.exc.HTTPNoContent.code) def test_delete_default_security_group_admin(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) self._delete('security-groups', sg['security_groups'][0]['id'], webob.exc.HTTPNoContent.code) def test_delete_default_security_group_nonadmin(self): with self.network(): res = self.new_list_request('security-groups') sg = self.deserialize(self.fmt, res.get_response(self.ext_api)) neutron_context = context.Context( '', test_db_base_plugin_v2.TEST_TENANT_ID) self._delete('security-groups', sg['security_groups'][0]['id'], webob.exc.HTTPConflict.code, neutron_context=neutron_context) def test_security_group_list_creates_default_security_group(self): neutron_context = context.Context( '', test_db_base_plugin_v2.TEST_TENANT_ID) sg = self._list('security-groups', neutron_context=neutron_context).get('security_groups') self.assertEqual(1, len(sg)) def test_security_group_port_create_creates_default_security_group(self): res = self._create_network(self.fmt, 'net1', True, tenant_id='not_admin', set_context=True) net1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net1['network']['id'], tenant_id='not_admin', set_context=True) sg = self._list('security-groups').get('security_groups') self.assertEqual(1, len(sg)) def test_default_security_group_rules(self): with self.network(): res = self.new_list_request('security-groups') groups = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(1, len(groups['security_groups'])) security_group_id = groups['security_groups'][0]['id'] res = self.new_list_request('security-group-rules') rules = self.deserialize(self.fmt, res.get_response(self.ext_api)) self.assertEqual(4, len(rules['security_group_rules'])) # Verify default rule for v4 egress sg_rules = rules['security_group_rules'] rules = [ r for r in sg_rules if r['direction'] == 'egress' and r['ethertype'] == const.IPv4 ] self.assertEqual(1, len(rules)) v4_egress = rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv4, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_egress, expected) # Verify default rule for v6 egress rules = [ r for r in sg_rules if r['direction'] == 'egress' and r['ethertype'] == const.IPv6 ] self.assertEqual(1, len(rules)) v6_egress = rules[0] expected = {'direction': 'egress', 'ethertype': const.IPv6, 'remote_group_id': None, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_egress, expected) # Verify default rule for v4 ingress rules = [ r for r in sg_rules if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4 ] self.assertEqual(1, len(rules)) v4_ingress = rules[0] expected = {'direction': 'ingress', 'ethertype': const.IPv4, 'remote_group_id': security_group_id, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v4_ingress, expected) # Verify default rule for v6 ingress rules = [ r for r in sg_rules if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6 ] self.assertEqual(1, len(rules)) v6_ingress = rules[0] expected = {'direction': 'ingress', 'ethertype': const.IPv6, 'remote_group_id': security_group_id, 'remote_ip_prefix': None, 'protocol': None, 'port_range_max': None, 'port_range_min': None} self._assert_sg_rule_has_kvs(v6_ingress, expected) def test_create_security_group_rule_remote_ip_prefix(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_group_id(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: with self.security_group(name, description) as sg2: security_group_id = sg['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 keys = [('remote_group_id', remote_group_id), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_group_id=remote_group_id ) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_port_range_min_max_limits(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" protocol = const.PROTO_NAME_TCP port_range_min = const.PORT_RANGE_MIN port_range_max = const.PORT_RANGE_MAX # The returned rule should have port range min/max as None keys = [('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', None), ('port_range_max', None)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_icmp_with_type_and_code(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_ICMP # port_range_min (ICMP type) is greater than port_range_max # (ICMP code) in order to confirm min <= max port check is # not called for ICMP. port_range_min = 8 port_range_max = 5 keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_icmp_with_type_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_ICMP # ICMP type port_range_min = 8 # ICMP code port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_icmpv6_with_type_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" ethertype = const.IPv6 remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128" protocol = const.PROTO_NAME_IPV6_ICMP # ICMPV6 type port_range_min = const.ICMPV6_TYPE_RA # ICMPV6 code port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('ethertype', ethertype), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, None, None, ethertype) as rule: for k, v, in keys: self.assertEqual(v, rule['security_group_rule'][k]) def _test_create_security_group_rule_legacy_protocol_name(self, protocol): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" ethertype = const.IPv6 remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128" keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('ethertype', ethertype), ('protocol', protocol)] with self.security_group_rule(security_group_id, direction, protocol, None, None, remote_ip_prefix, None, None, ethertype) as rule: for k, v, in keys: # IPv6 ICMP protocol will always be 'ipv6-icmp' if k == 'protocol': v = const.PROTO_NAME_IPV6_ICMP self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_ipv6_icmp_legacy_protocol_name(self): protocol = const.PROTO_NAME_ICMP self._test_create_security_group_rule_legacy_protocol_name(protocol) def test_create_security_group_rule_icmpv6_legacy_protocol_name(self): protocol = const.PROTO_NAME_IPV6_ICMP_LEGACY self._test_create_security_group_rule_legacy_protocol_name(protocol) def _test_create_security_group_rule_legacy_protocol_num(self, protocol): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" ethertype = const.IPv6 remote_ip_prefix = "2001::f401:56ff:fefe:d3dc/128" keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('ethertype', ethertype), ('protocol', protocol)] with self.security_group_rule(security_group_id, direction, protocol, None, None, remote_ip_prefix, None, None, ethertype) as rule: for k, v, in keys: # IPv6 ICMP protocol will always be '58' if k == 'protocol': v = str(const.PROTO_NUM_IPV6_ICMP) self.assertEqual(v, rule['security_group_rule'][k]) def test_create_security_group_rule_ipv6_icmp_legacy_protocol_num(self): protocol = const.PROTO_NUM_ICMP self._test_create_security_group_rule_legacy_protocol_num(protocol) def test_create_security_group_rule_ipv6_icmp_protocol_num(self): protocol = const.PROTO_NUM_IPV6_ICMP self._test_create_security_group_rule_legacy_protocol_num(protocol) def test_create_security_group_source_group_ip_and_ip_prefix(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_bad_security_group_id(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant(self): with self.security_group() as sg: rule = {'security_group_rule': {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': "bad_tenant"}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant_remote_group_id(self): with self.security_group() as sg: res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') sg2 = self.deserialize(self.fmt, res) rule = {'security_group_rule': {'security_group_id': sg2['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': 'bad_tenant', 'remote_group_id': sg['security_group']['id']}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_tenant_security_group_rule(self): with self.security_group() as sg: res = self._create_security_group(self.fmt, 'webservers', 'webservers', tenant_id='bad_tenant') self.deserialize(self.fmt, res) rule = {'security_group_rule': {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'protocol': const.PROTO_NAME_TCP, 'port_range_min': '22', 'port_range_max': '22', 'tenant_id': 'bad_tenant'}} res = self._create_security_group_rule(self.fmt, rule, tenant_id='bad_tenant', set_context=True) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_bad_remote_group_id(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_group_id=remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_create_security_group_rule_duplicate_rules(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id) as sgr: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) self.assertIn(sgr['security_group_rule']['id'], res.json['NeutronError']['message']) def test_create_security_group_rule_duplicate_rules_diff_desc(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id) as sgr: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22') rule['security_group_rule']['description'] = "description" res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) self.assertIn(sgr['security_group_rule']['id'], res.json['NeutronError']['message']) def test_create_security_group_rule_duplicate_rules_proto_name_num(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22') self._create_security_group_rule(self.fmt, rule) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rules_proto_num_name(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_UDP, '50', '100') self._create_security_group_rule(self.fmt, rule) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_UDP, '50', '100') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_min_port_greater_max(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP, const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', protocol, '50', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_ports_but_no_protocol(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', None, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_port_range_min_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', None) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_port_range_max_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, None, '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_type_too_big(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, '256', None) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_code_too_big(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, '8', '256') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_icmp_with_code_only(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id): for code in ['2', '0']: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_ICMP, None, code) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_list_ports_security_group(self): with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id']) req = self.new_list_request('ports') res = req.get_response(self.api) ports = self.deserialize(self.fmt, res) port = ports['ports'][0] self.assertEqual(1, len(port[ext_sg.SECURITYGROUPS])) self._delete('ports', port['id']) def test_list_security_group_rules(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_resources('security-group-rule', [sgr1, sgr2, sgr3], query_params=q) def test_list_security_group_rules_with_sort(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_with_sort('security-group-rule', (sgr3, sgr2, sgr1), [('port_range_max', 'desc')], query_params=q) def test_list_security_group_rules_with_pagination(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. self._delete_default_security_group_egress_rules( security_group_id) q = 'direction=egress&security_group_id=' + security_group_id self._test_list_with_pagination( 'security-group-rule', (sgr3, sgr2, sgr1), ('port_range_max', 'desc'), 2, 2, query_params=q) def test_list_security_group_rules_with_pagination_reverse(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] with self.security_group_rule(security_group_id, direction='egress', port_range_min=22, port_range_max=22) as sgr1,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=23, port_range_max=23) as sgr2,\ self.security_group_rule(security_group_id, direction='egress', port_range_min=24, port_range_max=24) as sgr3: self._test_list_with_pagination_reverse( 'security-group-rule', (sgr3, sgr2, sgr1), ('port_range_max', 'desc'), 2, 2, query_params='direction=egress') def test_create_port_with_multiple_security_groups(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg1: with self.security_group() as sg2: res = self._create_port( self.fmt, n['network']['id'], security_groups=[sg1['security_group']['id'], sg2['security_group']['id']]) port = self.deserialize(self.fmt, res) self.assertEqual(2, len( port['port'][ext_sg.SECURITYGROUPS])) self._delete('ports', port['port']['id']) def test_create_port_with_no_security_groups(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=[]) port = self.deserialize(self.fmt, res) self.assertEqual([], port['port'][ext_sg.SECURITYGROUPS]) def test_update_port_with_security_group(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id']) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], ext_sg.SECURITYGROUPS: [sg['security_group']['id']]}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(sg['security_group']['id'], res['port'][ext_sg.SECURITYGROUPS][0]) # Test update port without security group data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name']}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(sg['security_group']['id'], res['port'][ext_sg.SECURITYGROUPS][0]) self._delete('ports', port['port']['id']) def test_update_port_with_multiple_security_groups(self): with self.network() as n: with self.subnet(n) as s: with self.port(s) as port: with self.security_group() as sg1: with self.security_group() as sg2: data = {'port': {ext_sg.SECURITYGROUPS: [sg1['security_group']['id'], sg2['security_group']['id']]}} req = self.new_update_request( 'ports', data, port['port']['id']) port = self.deserialize( self.fmt, req.get_response(self.api)) self.assertEqual( 2, len(port['port'][ext_sg.SECURITYGROUPS])) def test_update_port_remove_security_group_empty_list(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['port'].get(ext_sg.SECURITYGROUPS)) self._delete('ports', port['port']['id']) def test_update_port_remove_security_group_none(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], 'security_groups': None}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([], res['port'].get(ext_sg.SECURITYGROUPS)) self._delete('ports', port['port']['id']) def test_update_port_with_invalid_type_in_security_groups_param(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) data = {'port': {'fixed_ips': port['port']['fixed_ips'], 'name': port['port']['name'], 'security_groups': True}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_bad_security_group(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=['bad_id']) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_invalid_type_in_security_groups_param(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=True) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_delete_security_group_port_in_use(self): with self.network() as n: with self.subnet(n): with self.security_group() as sg: res = self._create_port(self.fmt, n['network']['id'], security_groups=( [sg['security_group']['id']])) port = self.deserialize(self.fmt, res) self.assertEqual(sg['security_group']['id'], port['port'][ext_sg.SECURITYGROUPS][0]) # try to delete security group that's in use self._delete('security-groups', sg['security_group']['id'], webob.exc.HTTPConflict.code) # delete the blocking port self._delete('ports', port['port']['id']) def test_create_security_group_rule_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule1 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) ret = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self.assertEqual(2, len(ret['security_group_rules'])) def test_create_security_group_rule_bulk_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule1 = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']] } res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_allow_all_ipv4(self): with self.security_group() as sg: rule = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv4, 'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID} res = self._create_security_group_rule( self.fmt, {'security_group_rule': rule}) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule_v4 = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv4, 'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID} rule_v6 = {'security_group_id': sg['security_group']['id'], 'direction': 'ingress', 'ethertype': const.IPv6, 'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID} rules = {'security_group_rules': [rule_v4, rule_v6]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) def test_create_security_group_rule_duplicate_rule_in_post(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule['security_group_rule'], rule['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_in_post_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule['security_group_rule'], rule['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_db(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg: rule = self._build_security_group_rule(sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule]} self._create_security_group_rule(self.fmt, rules) res = self._create_security_group_rule(self.fmt, rules) rule = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_duplicate_rule_db_emulated(self): real_has_attr = hasattr # ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('six.moves.builtins.hasattr', new=fakehasattr): with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rules = {'security_group_rules': [rule]} self._create_security_group_rule(self.fmt, rules) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_groups_native_quotas(self): quota = 1 cfg.CONF.set_override('quota_security_group', quota, group='QUOTAS') name = 'quota_test' description = 'quota_test' res = self._create_security_group(self.fmt, name, description) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) res = self._create_security_group(self.fmt, name, description) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rules_native_quotas(self): name = 'quota_test' description = 'quota_test' with self.security_group(name, description) as sg: # avoid the number of default security group rules sgr = self._list('security-group-rules').get( 'security_group_rules') quota = len(sgr) + 1 cfg.CONF.set_override( 'quota_security_group_rule', quota, group='QUOTAS') security_group_id = sg['security_group']['id'] rule = self._build_security_group_rule( security_group_id, 'ingress', const.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) rule = self._build_security_group_rule( security_group_id, 'egress', const.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) def test_create_security_group_rule_different_security_group_ids(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk " "security_group_rule create") with self.security_group() as sg1: with self.security_group() as sg2: rule1 = self._build_security_group_rule( sg1['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule( sg2['security_group']['id'], 'ingress', const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']] } res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_ethertype(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = const.PROTO_NAME_TCP port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id, ethertype='IPv5') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_protocol(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'tcp/ip' port_range_min = 22 port_range_max = 22 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_invalid_tcp_or_udp_protocol(self): security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = 'tcp' port_range_min = 0 port_range_max = 80 remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087" rule = self._build_security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix, remote_group_id) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_non_uuid(self): with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], security_groups=['not_valid']) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_security_group_rule_with_specific_id(self): neutron_context = context.Context( '', test_db_base_plugin_v2.TEST_TENANT_ID) specified_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087" with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP) rule['security_group_rule'].update({'id': specified_id, 'port_range_min': None, 'port_range_max': None, 'remote_ip_prefix': None, 'remote_group_id': None}) result = self.plugin.create_security_group_rule( neutron_context, rule) self.assertEqual(specified_id, result['id']) class TestConvertIPPrefixToCIDR(base.BaseTestCase): def test_convert_bad_ip_prefix_to_cidr(self): for val in ['bad_ip', 256, "2001:db8:a::123/129"]: self.assertRaises(exceptions.InvalidCIDR, ext_sg.convert_ip_prefix_to_cidr, val) self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None)) def test_convert_ip_prefix_no_netmask_to_cidr(self): addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'} for k, v in addr.items(): self.assertEqual('%s/%s' % (k, v), ext_sg.convert_ip_prefix_to_cidr(k)) def test_convert_ip_prefix_with_netmask_to_cidr(self): addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48'] for addr in addresses: self.assertEqual(addr, ext_sg.convert_ip_prefix_to_cidr(addr)) class TestConvertProtocol(base.BaseTestCase): def test_convert_numeric_protocol(self): self.assertIsInstance(ext_sg.convert_protocol('2'), str) def test_convert_bad_protocol(self): for val in ['bad', '256', '-1']: self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol, ext_sg.convert_protocol, val) def test_convert_numeric_protocol_to_string(self): self.assertIsInstance(ext_sg.convert_protocol(2), str) class TestConvertEtherType(base.BaseTestCase): def test_convert_unsupported_ethertype(self): for val in ['ip', 'ip4', 'ip6', '']: self.assertRaises(ext_sg.SecurityGroupRuleInvalidEtherType, ext_sg.convert_ethertype_to_case_insensitive, val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_segment.py0000644000175000017500000041054300000000000026237 0ustar00coreycorey00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef from neutron_lib.api.definitions import l2_adjacency as l2adj_apidef from neutron_lib.api.definitions import port as port_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import segment as seg_apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import placement as placement_exc from neutron_lib.plugins import directory from novaclient import exceptions as nova_exc from oslo_config import cfg from oslo_utils import uuidutils import webob.exc from neutron.conf.plugins.ml2 import config as ml2_config from neutron.conf.plugins.ml2.drivers import driver_type from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import db_base_plugin_v2 from neutron.db import portbindings_db from neutron.db import segments_db from neutron.extensions import segment as ext_segment from neutron.extensions import standardattrdescription as ext_stddesc from neutron.objects import network from neutron.services.segments import db from neutron.services.segments import exceptions as segment_exc from neutron.services.segments import plugin as seg_plugin from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 SERVICE_PLUGIN_KLASS = 'neutron.services.segments.plugin.Plugin' TEST_PLUGIN_KLASS = ( 'neutron.tests.unit.extensions.test_segment.SegmentTestPlugin') DHCP_HOSTA = 'dhcp-host-a' DHCP_HOSTB = 'dhcp-host-b' HTTP_NOT_FOUND = 404 class SegmentTestExtensionManager(object): def get_resources(self): ext_segment.Segment().update_attributes_map( {ext_segment.SEGMENTS: ext_stddesc.DESCRIPTION_BODY}) return ext_segment.Segment.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class SegmentTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=None): # Remove MissingAuthPlugin exception from logs self.patch_notifier = mock.patch( 'neutron.notifiers.batch_notifier.BatchNotifier._notify') self.patch_notifier.start() if not plugin: plugin = TEST_PLUGIN_KLASS service_plugins = {'segments_plugin_name': SERVICE_PLUGIN_KLASS} cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) ext_mgr = SegmentTestExtensionManager() super(SegmentTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def _create_segment(self, fmt, expected_res_status=None, **kwargs): segment = {'segment': {}} for k, v in kwargs.items(): segment['segment'][k] = None if v is None else str(v) segment_req = self.new_create_request( 'segments', segment, fmt) segment_res = segment_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(segment_res.status_int, expected_res_status) return segment_res def _make_segment(self, fmt, **kwargs): res = self._create_segment(fmt, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: res.charset = 'utf8' raise webob.exc.HTTPClientError( code=res.status_int, explanation=str(res)) return self.deserialize(fmt, res) def segment(self, **kwargs): kwargs.setdefault('network_type', 'net_type') return self._make_segment( self.fmt, tenant_id=self._tenant_id, **kwargs) def _test_create_segment(self, expected=None, **kwargs): keys = kwargs.copy() segment = self.segment(**keys) self._validate_resource(segment, keys, 'segment') if expected: self._compare_resource(segment, expected, 'segment') return segment class SegmentTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, portbindings_db.PortBindingMixin, db.SegmentDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [seg_apidef.ALIAS, portbindings.ALIAS, ipalloc_apidef.ALIAS] def get_plugin_description(self): return "Network Segments" @classmethod def get_plugin_type(cls): return "segments" def create_port(self, context, port): port_dict = super(SegmentTestPlugin, self).create_port(context, port) self._process_portbindings_create_and_update( context, port['port'], port_dict) return port_dict def update_port(self, context, id, port): port_dict = super(SegmentTestPlugin, self).update_port( context, id, port) self._process_portbindings_create_and_update( context, port['port'], port_dict) return port_dict class TestSegmentNameDescription(SegmentTestCase): def setUp(self): super(TestSegmentNameDescription, self).setUp() with self.network() as network: self.network = network['network'] def _test_create_segment(self, expected=None, **kwargs): for d in (kwargs, expected): if d is None: continue d.setdefault('network_id', self.network['id']) d.setdefault('name', None) d.setdefault('description', 'desc') d.setdefault('physical_network', 'phys_net') d.setdefault('network_type', 'net_type') d.setdefault('segmentation_id', 200) return super(TestSegmentNameDescription, self)._test_create_segment( expected, **kwargs) def test_create_segment_no_name(self): self._test_create_segment(expected={}) def test_create_segment_with_name(self): expected_segment = {'name': 'segment_name'} self._test_create_segment(name='segment_name', expected=expected_segment) def test_create_segment_with_description(self): expected_segment = {'description': 'A segment'} self._test_create_segment(description='A segment', expected=expected_segment) def test_update_segment_set_name(self): segment = self._test_create_segment() result = self._update('segments', segment['segment']['id'], {'segment': {'name': 'Segment name'}}, expected_code=webob.exc.HTTPOk.code) self.assertEqual('Segment name', result['segment']['name']) def test_update_segment_set_description(self): segment = self._test_create_segment() result = self._update('segments', segment['segment']['id'], {'segment': {'description': 'Segment desc'}}, expected_code=webob.exc.HTTPOk.code) self.assertEqual('Segment desc', result['segment']['description']) def test_update_segment_set_name_to_none(self): segment = self._test_create_segment( description='A segment', name='segment') result = self._update('segments', segment['segment']['id'], {'segment': {'name': None}}, expected_code=webob.exc.HTTPOk.code) self.assertIsNone(result['segment']['name']) def test_update_segment_set_description_to_none(self): segment = self._test_create_segment( description='A segment', name='segment') self._update('segments', segment['segment']['id'], {'segment': {'description': None}}, expected_code=webob.exc.HTTPBadRequest.code) class TestSegment(SegmentTestCase): def test_create_segment(self): with self.network() as network: network = network['network'] expected_segment = {'network_id': network['id'], 'physical_network': 'phys_net', 'network_type': 'net_type', 'segmentation_id': 200} self._test_create_segment(network_id=network['id'], physical_network='phys_net', segmentation_id=200, expected=expected_segment) def test_create_segment_non_existent_network(self): exc = self.assertRaises(webob.exc.HTTPClientError, self._test_create_segment, network_id=uuidutils.generate_uuid(), physical_network='phys_net', segmentation_id=200) self.assertEqual(HTTP_NOT_FOUND, exc.code) self.assertIn('NetworkNotFound', exc.explanation) def test_create_segment_no_phys_net(self): with self.network() as network: network = network['network'] expected_segment = {'network_id': network['id'], 'physical_network': None, 'network_type': 'net_type', 'segmentation_id': 200} self._test_create_segment(network_id=network['id'], segmentation_id=200, expected=expected_segment) def test_create_segment_no_segmentation_id(self): def _mock_reserve_segmentation_id(rtype, event, trigger, context, segment): if not segment.get('segmentation_id'): segment['segmentation_id'] = 200 with self.network() as network: network = network['network'] registry.subscribe(_mock_reserve_segmentation_id, resources.SEGMENT, events.PRECOMMIT_CREATE) expected_segment = {'network_id': network['id'], 'physical_network': 'phys_net', 'network_type': 'net_type', 'segmentation_id': 200} self._test_create_segment(network_id=network['id'], physical_network='phys_net', expected=expected_segment) def test_create_segment_with_exception_in_core_plugin(self): cxt = context.get_admin_context() with self.network() as network: network = network['network'] with mock.patch.object(registry, 'notify') as notify: notify.side_effect = exceptions.CallbackFailure(errors=Exception) self.assertRaises(webob.exc.HTTPClientError, self.segment, network_id=network['id'], segmentation_id=200) network_segments = segments_db.get_network_segments(cxt, network['id']) self.assertEqual([], network_segments) def test_create_segments_in_certain_order(self): cxt = context.get_admin_context() with self.network() as network: network = network['network'] segment1 = self.segment( network_id=network['id'], segmentation_id=200) segment2 = self.segment( network_id=network['id'], segmentation_id=201) segment3 = self.segment( network_id=network['id'], segmentation_id=202) network_segments = segments_db.get_network_segments(cxt, network['id']) self.assertEqual(segment1['segment']['id'], network_segments[0]['id']) self.assertEqual(segment2['segment']['id'], network_segments[1]['id']) self.assertEqual(segment3['segment']['id'], network_segments[2]['id']) def test_delete_segment(self): with self.network() as network: network = network['network'] self.segment(network_id=network['id'], segmentation_id=200) segment = self.segment(network_id=network['id'], segmentation_id=201) self._delete('segments', segment['segment']['id']) self._show('segments', segment['segment']['id'], expected_code=webob.exc.HTTPNotFound.code) def test_delete_segment_failed_with_subnet_associated(self): with self.network() as network: net = network['network'] segment = self._test_create_segment(network_id=net['id'], segmentation_id=200) segment_id = segment['segment']['id'] with self.subnet(network=network, segment_id=segment_id): self._delete('segments', segment_id, expected_code=webob.exc.HTTPConflict.code) exist_segment = self._show('segments', segment_id) self.assertEqual(segment_id, exist_segment['segment']['id']) def test_get_segment(self): with self.network() as network: network = network['network'] segment = self._test_create_segment(network_id=network['id'], physical_network='phys_net', segmentation_id=200) req = self.new_show_request('segments', segment['segment']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(segment['segment']['id'], res['segment']['id']) def test_list_segments(self): with self.network() as network: network = network['network'] self._test_create_segment(network_id=network['id'], physical_network='phys_net1', segmentation_id=200) self._test_create_segment(network_id=network['id'], physical_network='phys_net2', segmentation_id=201) res = self._list('segments') self.assertEqual(2, len(res['segments'])) def test_list_segments_with_sort(self): with self.network() as network: network = network['network'] s1 = self._test_create_segment(network_id=network['id'], physical_network='phys_net1', segmentation_id=200) s2 = self._test_create_segment(network_id=network['id'], physical_network='phys_net2', segmentation_id=201) self._test_list_with_sort('segment', (s2, s1), [('physical_network', 'desc')], query_params='network_id=%s' % network['id']) def test_list_segments_with_pagination(self): with self.network() as network: network = network['network'] s1 = self._test_create_segment(network_id=network['id'], physical_network='phys_net1', segmentation_id=200) s2 = self._test_create_segment(network_id=network['id'], physical_network='phys_net2', segmentation_id=201) s3 = self._test_create_segment(network_id=network['id'], physical_network='phys_net3', segmentation_id=202) self._test_list_with_pagination( 'segment', (s1, s2, s3), ('physical_network', 'asc'), 2, 2, query_params='network_id=%s' % network['id']) def test_list_segments_with_pagination_reverse(self): with self.network() as network: network = network['network'] s1 = self._test_create_segment(network_id=network['id'], physical_network='phys_net1', segmentation_id=200) s2 = self._test_create_segment(network_id=network['id'], physical_network='phys_net2', segmentation_id=201) s3 = self._test_create_segment(network_id=network['id'], physical_network='phys_net3', segmentation_id=202) self._test_list_with_pagination_reverse( 'segment', (s1, s2, s3), ('physical_network', 'asc'), 2, 2, query_params='network_id=%s' % network['id']) def test_update_segments(self): with self.network() as network: net = network['network'] segment = self._test_create_segment(network_id=net['id'], segmentation_id=200) segment['segment']['segmentation_id'] = '201' self._update('segments', segment['segment']['id'], segment, expected_code=webob.exc.HTTPClientError.code) def test_segment_notification_on_delete_network(self): with mock.patch.object(db, '_delete_segments_for_network') as dsn: db.subscribe() with self.network() as network: network = network['network'] self._delete('networks', network['id']) dsn.assert_called_with(resources.NETWORK, events.PRECOMMIT_DELETE, mock.ANY, context=mock.ANY, network_id=mock.ANY) class TestSegmentML2(SegmentTestCase): def setUp(self): super(TestSegmentML2, self).setUp(plugin='ml2') def test_segment_notification_on_create_network(self): with mock.patch.object(registry, 'notify') as notify: with self.network(): pass notify.assert_any_call(resources.SEGMENT, events.PRECOMMIT_CREATE, context=mock.ANY, segment=mock.ANY, trigger=mock.ANY) class TestSegmentSubnetAssociation(SegmentTestCase): def test_basic_association(self): with self.network() as network: net = network['network'] segment = self._test_create_segment(network_id=net['id'], segmentation_id=200) segment_id = segment['segment']['id'] with self.subnet(network=network, segment_id=segment_id) as subnet: subnet = subnet['subnet'] request = self.new_show_request('subnets', subnet['id']) response = request.get_response(self.api) res = self.deserialize(self.fmt, response) self.assertEqual(segment_id, res['subnet']['segment_id']) def test_association_network_mismatch(self): with self.network() as network1: with self.network() as network2: net = network1['network'] segment = self._test_create_segment(network_id=net['id'], segmentation_id=200) res = self._create_subnet(self.fmt, net_id=network2['network']['id'], tenant_id=network2['network']['tenant_id'], gateway_ip=constants.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', segment_id=segment['segment']['id']) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_association_segment_not_found(self): with self.network() as network: net = network['network'] segment_id = uuidutils.generate_uuid() res = self._create_subnet(self.fmt, net_id=net['id'], tenant_id=net['tenant_id'], gateway_ip=constants.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', segment_id=segment_id) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_only_some_subnets_associated_not_allowed(self): with self.network() as network: with self.subnet(network=network): net = network['network'] segment = self._test_create_segment(network_id=net['id'], segmentation_id=200) res = self._create_subnet(self.fmt, net_id=net['id'], tenant_id=net['tenant_id'], gateway_ip=constants.ATTR_NOT_SPECIFIED, cidr='10.0.1.0/24', segment_id=segment['segment']['id']) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_association_to_dynamic_segment_not_allowed(self): cxt = context.get_admin_context() with self.network() as network: net = network['network'] # Can't create a dynamic segment through the API segment = {segments_db.NETWORK_TYPE: 'phys_net', segments_db.PHYSICAL_NETWORK: 'net_type', segments_db.SEGMENTATION_ID: 200} segments_db.add_network_segment(cxt, network_id=net['id'], segment=segment, is_dynamic=True) res = self._create_subnet(self.fmt, net_id=net['id'], tenant_id=net['tenant_id'], gateway_ip=constants.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24', segment_id=segment['id']) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_associate_existing_subnet_with_segment(self): with self.network() as network: net = network['network'] segment = self._test_create_segment(network_id=net['id'], physical_network='phys_net', segmentation_id=200)['segment'] with self.subnet(network=network, segment_id=None) as subnet: subnet = subnet['subnet'] data = {'subnet': {'segment_id': segment['id']}} request = self.new_update_request('subnets', data, subnet['id']) response = request.get_response(self.api) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self.assertEqual(res['subnet']['segment_id'], segment['id']) def test_update_subnet_with_current_segment_id(self): with self.network() as network: net = network['network'] segment1 = self._test_create_segment(network_id=net['id'], physical_network='phys_net1', segmentation_id=200)['segment'] self._test_create_segment(network_id=net['id'], physical_network='phys_net2', segmentation_id=200)['segment'] with self.subnet(network=network, segment_id=segment1['id']) as subnet: subnet = subnet['subnet'] data = {'subnet': {'segment_id': segment1['id']}} request = self.new_update_request('subnets', data, subnet['id']) response = request.get_response(self.api) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self.assertEqual(res['subnet']['segment_id'], segment1['id']) def test_associate_existing_subnet_fail_if_multiple_segments(self): with self.network() as network: net = network['network'] segment1 = self._test_create_segment(network_id=net['id'], physical_network='phys_net1', segmentation_id=201)['segment'] self._test_create_segment(network_id=net['id'], physical_network='phys_net2', segmentation_id=202)['segment'] with self.subnet(network=network, segment_id=None) as subnet: subnet = subnet['subnet'] data = {'subnet': {'segment_id': segment1['id']}} request = self.new_update_request('subnets', data, subnet['id']) response = request.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def test_associate_existing_subnet_fail_if_multiple_subnets(self): with self.network() as network: net = network['network'] segment1 = self._test_create_segment(network_id=net['id'], physical_network='phys_net1', segmentation_id=201)['segment'] with self.subnet(network=network, segment_id=None, ip_version=constants.IP_VERSION_4, cidr='10.0.0.0/24') as subnet1, \ self.subnet(network=network, segment_id=None, ip_version=constants.IP_VERSION_4, cidr='10.0.1.0/24') as subnet2: subnet1 = subnet1['subnet'] subnet2 = subnet2['subnet'] data = {'subnet': {'segment_id': segment1['id']}} request = self.new_update_request('subnets', data, subnet1['id']) response = request.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def test_change_existing_subnet_segment_association_not_allowed(self): with self.network() as network: net = network['network'] segment1 = self._test_create_segment(network_id=net['id'], physical_network='phys_net2', segmentation_id=201)['segment'] with self.subnet(network=network, segment_id=segment1['id']) as subnet: subnet = subnet['subnet'] segment2 = self._test_create_segment(network_id=net['id'], physical_network='phys_net2', segmentation_id=202)['segment'] data = {'subnet': {'segment_id': segment2['id']}} request = self.new_update_request('subnets', data, subnet['id']) response = request.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) class HostSegmentMappingTestCase(SegmentTestCase): _mechanism_drivers = ['logger'] def setUp(self, plugin=None): cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') # NOTE(dasm): ml2_type_vlan requires to be registered before used. # This piece was refactored and removed from .config, so it causes # a problem, when tests are executed with pdb. # There is no problem when tests are running without debugger. driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override('network_vlan_ranges', ['phys_net1', 'phys_net2'], group='ml2_type_vlan') if not plugin: plugin = 'ml2' super(HostSegmentMappingTestCase, self).setUp(plugin=plugin) db.subscribe() def _get_segments_for_host(self, host): ctx = context.get_admin_context() segment_host_mapping = network.SegmentHostMapping.get_objects( ctx, host=host) return {seg_host['segment_id']: seg_host for seg_host in segment_host_mapping} def _register_agent(self, host, mappings=None, plugin=None, start_flag=True): helpers.register_ovs_agent(host=host, bridge_mappings=mappings, plugin=self.plugin, start_flag=start_flag) def _test_one_segment_one_host(self, host): physical_network = 'phys_net1' with self.network() as network: network = network['network'] segment = self._test_create_segment( network_id=network['id'], physical_network=physical_network, segmentation_id=200, network_type=constants.TYPE_VLAN)['segment'] self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host) self.assertEqual(1, len(segments_host_db)) self.assertEqual(segment['id'], segments_host_db[segment['id']]['segment_id']) self.assertEqual(host, segments_host_db[segment['id']]['host']) return segment class TestMl2HostSegmentMappingNoAgent(HostSegmentMappingTestCase): def setUp(self, plugin=None): if not plugin: plugin = TEST_PLUGIN_KLASS super(TestMl2HostSegmentMappingNoAgent, self).setUp(plugin=plugin) def test_update_segment_host_mapping(self): ctx = context.get_admin_context() host = 'host1' physnets = ['phys_net1'] with self.network() as network: network = network['network'] segment = self._test_create_segment( network_id=network['id'], physical_network='phys_net1', segmentation_id=200, network_type=constants.TYPE_VLAN)['segment'] self._test_create_segment( network_id=network['id'], physical_network='phys_net2', segmentation_id=201, network_type=constants.TYPE_VLAN)['segment'] segments = db.get_segments_with_phys_nets(ctx, physnets) segment_ids = {segment['id'] for segment in segments} db.update_segment_host_mapping(ctx, host, segment_ids) segments_host_db = self._get_segments_for_host(host) self.assertEqual(1, len(segments_host_db)) self.assertEqual(segment['id'], segments_host_db[segment['id']]['segment_id']) self.assertEqual(host, segments_host_db[segment['id']]['host']) def test_map_segment_to_hosts(self): ctx = context.get_admin_context() hosts = {'host1', 'host2', 'host3'} with self.network() as network: network = network['network'] segment = self._test_create_segment( network_id=network['id'], physical_network='phys_net1', segmentation_id=200, network_type=constants.TYPE_VLAN)['segment'] db.map_segment_to_hosts(ctx, segment['id'], hosts) updated_segment = self.plugin.get_segment(ctx, segment['id']) self.assertEqual(hosts, set(updated_segment['hosts'])) def test_get_all_hosts_mapped_with_segments(self): ctx = context.get_admin_context() hosts = set() with self.network() as network: network_id = network['network']['id'] for i in range(1, 3): host = "host%s" % i segment = self._test_create_segment( network_id=network_id, physical_network='phys_net%s' % i, segmentation_id=200 + i, network_type=constants.TYPE_VLAN) db.update_segment_host_mapping( ctx, host, {segment['segment']['id']}) hosts.add(host) # Now they are 2 hosts with segment being mapped. actual_hosts = db.get_hosts_mapped_with_segments(ctx) self.assertEqual(hosts, actual_hosts) class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase): _mechanism_drivers = ['openvswitch', 'logger'] mock_path = 'neutron.services.segments.db.update_segment_host_mapping' def test_new_agent(self): host = 'host1' self._test_one_segment_one_host(host) def test_updated_agent_changed_physical_networks(self): host = 'host1' physical_networks = ['phys_net1', 'phys_net2'] networks = [] segments = [] for i in range(len(physical_networks)): with self.network() as network: networks.append(network['network']) segments.append(self._test_create_segment( network_id=networks[i]['id'], physical_network=physical_networks[i], segmentation_id=200, network_type=constants.TYPE_VLAN)['segment']) self._register_agent(host, mappings={physical_networks[0]: 'br-eth-1', physical_networks[1]: 'br-eth-2'}, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host) self.assertEqual(len(physical_networks), len(segments_host_db)) for segment in segments: self.assertEqual(segment['id'], segments_host_db[segment['id']]['segment_id']) self.assertEqual(host, segments_host_db[segment['id']]['host']) self._register_agent(host, mappings={physical_networks[0]: 'br-eth-1'}, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host) self.assertEqual(1, len(segments_host_db)) self.assertEqual(segments[0]['id'], segments_host_db[segments[0]['id']]['segment_id']) self.assertEqual(host, segments_host_db[segments[0]['id']]['host']) def test_same_segment_two_hosts(self): host1 = 'host1' host2 = 'host2' physical_network = 'phys_net1' segment = self._test_one_segment_one_host(host1) self._register_agent(host2, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host2) self.assertEqual(1, len(segments_host_db)) self.assertEqual(segment['id'], segments_host_db[segment['id']]['segment_id']) self.assertEqual(host2, segments_host_db[segment['id']]['host']) def test_update_agent_only_change_agent_host_mapping(self): host1 = 'host1' host2 = 'host2' physical_network = 'phys_net1' with self.network() as network: network = network['network'] segment1 = self._test_create_segment( network_id=network['id'], physical_network=physical_network, segmentation_id=200, network_type=constants.TYPE_VLAN)['segment'] self._register_agent(host1, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) self._register_agent(host2, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) # Update agent at host2 should only change mapping with host2. other_phys_net = 'phys_net2' segment2 = self._test_create_segment( network_id=network['id'], physical_network=other_phys_net, segmentation_id=201, network_type=constants.TYPE_VLAN)['segment'] self._register_agent(host2, mappings={other_phys_net: 'br-eth-2'}, plugin=self.plugin) # We should have segment1 map to host1 and segment2 map to host2 now segments_host_db1 = self._get_segments_for_host(host1) self.assertEqual(1, len(segments_host_db1)) self.assertEqual(segment1['id'], segments_host_db1[segment1['id']]['segment_id']) self.assertEqual(host1, segments_host_db1[segment1['id']]['host']) segments_host_db2 = self._get_segments_for_host(host2) self.assertEqual(1, len(segments_host_db2)) self.assertEqual(segment2['id'], segments_host_db2[segment2['id']]['segment_id']) self.assertEqual(host2, segments_host_db2[segment2['id']]['host']) def test_new_segment_after_host_reg(self): host1 = 'host1' physical_network = 'phys_net1' segment = self._test_one_segment_one_host(host1) with self.network() as network: network = network['network'] segment2 = self._test_create_segment( network_id=network['id'], physical_network=physical_network, segmentation_id=201, network_type=constants.TYPE_VLAN)['segment'] segments_host_db = self._get_segments_for_host(host1) self.assertEqual(set((segment['id'], segment2['id'])), set(segments_host_db)) def test_segment_deletion_removes_host_mapping(self): host = 'host1' segment = self._test_one_segment_one_host(host) self._delete('segments', segment['id']) segments_host_db = self._get_segments_for_host(host) self.assertFalse(segments_host_db) @mock.patch(mock_path) def test_agent_with_no_mappings(self, mock): host = 'host1' physical_network = 'phys_net1' with self.network() as network: network = network['network'] self._test_create_segment( network_id=network['id'], physical_network=physical_network, segmentation_id=200, network_type=constants.TYPE_VLAN) self._register_agent(host, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host) self.assertFalse(segments_host_db) self.assertFalse(mock.mock_calls) class TestMl2HostSegmentMappingLinuxBridge(TestMl2HostSegmentMappingOVS): _mechanism_drivers = ['linuxbridge', 'logger'] def _register_agent(self, host, mappings=None, plugin=None): helpers.register_linuxbridge_agent(host=host, bridge_mappings=mappings, plugin=self.plugin) class TestMl2HostSegmentMappingMacvtap(TestMl2HostSegmentMappingOVS): _mechanism_drivers = ['macvtap', 'logger'] def _register_agent(self, host, mappings=None, plugin=None): helpers.register_macvtap_agent(host=host, interface_mappings=mappings, plugin=self.plugin) class TestMl2HostSegmentMappingSriovNicSwitch(TestMl2HostSegmentMappingOVS): _mechanism_drivers = ['sriovnicswitch', 'logger'] def _register_agent(self, host, mappings=None, plugin=None): helpers.register_sriovnicswitch_agent(host=host, device_mappings=mappings, plugin=self.plugin) class NoSupportHostSegmentMappingPlugin(db_base_plugin_v2.NeutronDbPluginV2, db.SegmentDbMixin, agents_db.AgentDbMixin): __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [] class TestHostSegmentMappingNoSupportFromPlugin(HostSegmentMappingTestCase): mock_path = 'neutron.services.segments.db.update_segment_host_mapping' def setUp(self): plugin = ('neutron.tests.unit.extensions.test_segment.' 'NoSupportHostSegmentMappingPlugin') super(TestHostSegmentMappingNoSupportFromPlugin, self).setUp( plugin=plugin) @mock.patch(mock_path) def test_host_segments_not_updated(self, mock): host = 'host1' physical_network = 'phys_net1' with self.network() as network: network = network['network'] self._test_create_segment(network_id=network['id'], physical_network=physical_network, segmentation_id=200, network_type=constants.TYPE_VLAN) self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) segments_host_db = self._get_segments_for_host(host) self.assertFalse(segments_host_db) self.assertFalse(mock.mock_calls) class TestMl2HostSegmentMappingAgentServerSynch(HostSegmentMappingTestCase): _mechanism_drivers = ['openvswitch', 'logger'] mock_path = 'neutron.services.segments.db.update_segment_host_mapping' @mock.patch(mock_path) def test_starting_server_processes_agents(self, mock_function): host = 'agent_updating_starting_server' physical_network = 'phys_net1' self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin, start_flag=False) self.assertIn(host, db.reported_hosts) self.assertEqual(1, mock_function.call_count) expected_call = mock.call(mock.ANY, host, set()) mock_function.assert_has_calls([expected_call]) @mock.patch(mock_path) def test_starting_agent_is_processed(self, mock_function): host = 'starting_agent' physical_network = 'phys_net1' self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin, start_flag=False) self.assertIn(host, db.reported_hosts) self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin, start_flag=True) self.assertIn(host, db.reported_hosts) self.assertEqual(2, mock_function.call_count) expected_call = mock.call(mock.ANY, host, set()) mock_function.assert_has_calls([expected_call, expected_call]) @mock.patch(mock_path) def test_no_starting_agent_is_not_processed(self, mock_function): host = 'agent_with_no_start_update' physical_network = 'phys_net1' self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin, start_flag=False) self.assertIn(host, db.reported_hosts) mock_function.reset_mock() self._register_agent(host, mappings={physical_network: 'br-eth-1'}, plugin=self.plugin, start_flag=False) self.assertIn(host, db.reported_hosts) mock_function.assert_not_called() class SegmentAwareIpamTestCase(SegmentTestCase): def _setup_host_mappings(self, mappings=()): ctx = context.get_admin_context() for segment_id, host in mappings: network.SegmentHostMapping( ctx, segment_id=segment_id, host=host).create() def _create_test_segment_with_subnet(self, network=None, cidr='2001:db8:0:0::/64', physnet='physnet'): """Creates one network with one segment and one subnet""" network, segment = self._create_test_network_and_segment(network, physnet) subnet = self._create_test_subnet_with_segment(network, segment, cidr) return network, segment, subnet def _create_test_network_and_segment(self, network=None, physnet='physnet'): if not network: with self.network() as network: pass segment = self._test_create_segment( network_id=network['network']['id'], physical_network=physnet, network_type=constants.TYPE_VLAN) return network, segment def _create_test_subnet_with_segment(self, network, segment, cidr='2001:db8:0:0::/64', allocation_pools=None): ip_version = netaddr.IPNetwork(cidr).version if cidr else None with self.subnet(network=network, segment_id=segment['segment']['id'], ip_version=ip_version, cidr=cidr, allocation_pools=allocation_pools) as subnet: self._validate_l2_adjacency(network['network']['id'], is_adjacent=False) return subnet def _create_test_slaac_subnet_with_segment( self, network, segment, cidr='2001:db8:0:0::/64'): with self.subnet(network=network, segment_id=segment['segment']['id'], ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC, ipv6_address_mode=constants.IPV6_SLAAC, cidr=cidr, allocation_pools=None) as subnet: self._validate_l2_adjacency(network['network']['id'], is_adjacent=False) return subnet def _validate_l2_adjacency(self, network_id, is_adjacent): request = self.new_show_request('networks', network_id) response = self.deserialize(self.fmt, request.get_response(self.api)) self.assertEqual(is_adjacent, response['network'][l2adj_apidef.L2_ADJACENCY]) class TestSegmentAwareIpam(SegmentAwareIpamTestCase): def _create_test_segments_with_subnets(self, num): """Creates one network with num segments and num subnets""" with self.network() as network: segments, subnets = [], [] for i in range(num): cidr = '2001:db8:0:%s::/64' % i physnet = 'physnet%s' % i _net, segment, subnet = self._create_test_segment_with_subnet( network=network, cidr=cidr, physnet=physnet) segments.append(segment) subnets.append(subnet) return network, segments, subnets def _create_net_two_segments_four_slaac_subnets(self): with self.network() as network: segment_a = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet_a', network_type=constants.TYPE_FLAT) segment_b = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet_b', network_type=constants.TYPE_FLAT) subnet_a0 = self._create_test_slaac_subnet_with_segment( network, segment_a, '2001:db8:a:0::/64') subnet_a1 = self._create_test_slaac_subnet_with_segment( network, segment_a, '2001:db8:a:1::/64') subnet_b0 = self._create_test_slaac_subnet_with_segment( network, segment_b, '2001:db8:b:0::/64') subnet_b1 = self._create_test_slaac_subnet_with_segment( network, segment_b, '2001:db8:b:1::/64') return (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) def test_port_create_with_segment_subnets(self): """No binding information is provided, defer IP allocation""" network, segment, subnet = self._create_test_segment_with_subnet() response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id']) res = self.deserialize(self.fmt, response) # Don't allocate IPs in this case because we didn't give binding info self.assertEqual(0, len(res['port']['fixed_ips'])) def test_port_create_fixed_ips_with_segment_subnets_no_binding_info(self): """Fixed IP provided and no binding info, do not defer IP allocation""" network, segment, subnet = self._create_test_segment_with_subnet() response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], fixed_ips=[ {'subnet_id': subnet['subnet']['id']} ]) res = self.deserialize(self.fmt, response) # We gave fixed_ips, allocate IPs in this case despite no binding info self._validate_immediate_ip_allocation(res['port']['id']) def _assert_one_ip_in_subnet(self, response, cidr): res = self.deserialize(self.fmt, response) self.assertEqual(1, len(res['port']['fixed_ips'])) ip = res['port']['fixed_ips'][0]['ip_address'] ip_net = netaddr.IPNetwork(cidr) self.assertIn(ip, ip_net) def test_port_create_with_binding_information(self): """Binding information is provided, subnets are on segments""" network, segments, subnets = self._create_test_segments_with_subnets(3) # Map the host to the middle segment (by mocking host/segment mapping) self._setup_host_mappings([ (segments[1]['segment']['id'], 'fakehost'), (segments[1]['segment']['id'], 'otherhost'), (segments[0]['segment']['id'], 'thirdhost')]) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) self._validate_immediate_ip_allocation(res['port']['id']) # Since host mapped to middle segment, IP must come from middle subnet self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) def test_port_create_with_binding_and_no_subnets(self): """Binding information is provided, no subnets.""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet', network_type=constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) # No subnets, so no allocation. But, it shouldn't be an error. self.assertEqual(0, len(res['port']['fixed_ips'])) def test_port_create_with_binding_information_fallback(self): """Binding information is provided, subnets not on segments""" with self.network() as network: with self.subnet(network=network, ip_version=constants.IP_VERSION_6, cidr='2001:db8:0:0::/64') as subnet: segment = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet', network_type=constants.TYPE_VLAN) self._validate_l2_adjacency(network['network']['id'], is_adjacent=True) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) self._validate_immediate_ip_allocation(res['port']['id']) # Since the subnet is not on a segment, fall back to it self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def test_port_create_on_unconnected_host(self): """Binding information provided, host not connected to any segment""" network, segment, _subnet = self._create_test_segment_with_subnet() response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__, res['NeutronError']['type']) # Ensure that mapping the segment to other hosts doesn't trip it up self._setup_host_mappings([(segment['segment']['id'], 'otherhost')]) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__, res['NeutronError']['type']) def test_port_create_on_multiconnected_host(self): """Binding information provided, host connected to multiple segments""" network, segments, subnets = self._create_test_segments_with_subnets(2) # This host is bound to multiple hosts self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost'), (segments[1]['segment']['id'], 'fakehost')]) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) res = self.deserialize(self.fmt, response) self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(segment_exc.HostConnectedToMultipleSegments.__name__, res['NeutronError']['type']) def test_port_update_with_fixed_ips_ok_if_no_binding_host(self): """No binding host information is provided, subnets on segments""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet', network_type=constants.TYPE_VLAN) # Create a port with no IP address (since there is no subnet) port = self._create_deferred_ip_port(network) # Create the subnet and try to update the port to get an IP with self.subnet(network=network, segment_id=segment['segment']['id']) as subnet: # Try requesting an IP (but the only subnet is on a segment) data = {'port': { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # The IP is allocated since there is no binding host info any # subnet can be used for allocation. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) def test_port_update_with_fixed_ips_fail_if_host_not_on_segment(self): """Binding information is provided, subnets on segments. Update to subnet on different segment fails. """ network, segments, subnets = self._create_test_segments_with_subnets(2) # Setup host mappings self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost')]) # Create a port and validate immediate ip allocation res = self._create_port_and_show(network, arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) self._validate_immediate_ip_allocation(res['port']['id']) # Try requesting an new IP, but the subnet does not match host segment port_id = res['port']['id'] data = {'port': { 'fixed_ips': [{'subnet_id': subnets[1]['subnet']['id']}]}} port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # Port update fails. self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def _create_port_and_show(self, network, **kwargs): response = self._create_port( self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], **kwargs) port = self.deserialize(self.fmt, response) request = self.new_show_request('ports', port['port']['id']) return self.deserialize(self.fmt, request.get_response(self.api)) def test_port_create_with_no_fixed_ips_no_ipam_on_routed_network(self): """Ports requesting no fixed_ips not deferred, even on routed net""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet', network_type=constants.TYPE_VLAN) with self.subnet(network=network, segment_id=segment['segment']['id']): pass # Create an unbound port requesting no IP addresses response = self._create_port_and_show(network, fixed_ips=[]) self.assertEqual([], response['port']['fixed_ips']) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE, response['port'][ipalloc_apidef.IP_ALLOCATION]) def test_port_create_with_no_fixed_ips_no_ipam(self): """Ports without addresses on non-routed networks are not deferred""" with self.network() as network: with self.subnet(network=network): pass # Create an unbound port requesting no IP addresses response = self._create_port_and_show(network, fixed_ips=[]) self.assertEqual([], response['port']['fixed_ips']) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE, response['port'][ipalloc_apidef.IP_ALLOCATION]) def test_port_without_ip_not_deferred(self): """Ports without addresses on non-routed networks are not deferred""" with self.network() as network: pass # Create a bound port with no IP address (since there is no subnet) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) port = self.deserialize(self.fmt, response) request = self.new_show_request('ports', port['port']['id']) response = self.deserialize(self.fmt, request.get_response(self.api)) self.assertEqual([], response['port']['fixed_ips']) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE, response['port'][ipalloc_apidef.IP_ALLOCATION]) def test_port_without_ip_not_deferred_no_binding(self): """Ports without addresses on non-routed networks are not deferred""" with self.network() as network: pass # Create a unbound port with no IP address (since there is no subnet) response = self._create_port_and_show(network) self.assertEqual([], response['port']['fixed_ips']) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE, response['port'][ipalloc_apidef.IP_ALLOCATION]) def test_port_update_is_host_aware(self): """Binding information is provided, subnets on segments""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], physical_network='physnet', network_type=constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) # Create a bound port with no IP address (since there is no subnet) response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) port = self.deserialize(self.fmt, response) # Create the subnet and try to update the port to get an IP with self.subnet(network=network, segment_id=segment['segment']['id']) as subnet: self._validate_l2_adjacency(network['network']['id'], is_adjacent=False) # Try requesting an IP (but the only subnet is on a segment) data = {'port': { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # Since port is bound and there is a mapping to segment, it succeeds. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def _validate_deferred_ip_allocation(self, port_id): request = self.new_show_request('ports', port_id) response = self.deserialize(self.fmt, request.get_response(self.api)) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_DEFERRED, response['port'][ipalloc_apidef.IP_ALLOCATION]) ips = response['port']['fixed_ips'] self.assertEqual(0, len(ips)) def _validate_immediate_ip_allocation(self, port_id): request = self.new_show_request('ports', port_id) response = self.deserialize(self.fmt, request.get_response(self.api)) self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE, response['port'][ipalloc_apidef.IP_ALLOCATION]) ips = response['port']['fixed_ips'] self.assertNotEqual(0, len(ips)) def _create_deferred_ip_port(self, network): response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id']) port = self.deserialize(self.fmt, response) ips = port['port']['fixed_ips'] self.assertEqual(0, len(ips)) return port def test_port_update_deferred_allocation(self): """Binding information is provided on update, subnets on segments""" network, segment, subnet = self._create_test_segment_with_subnet() # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) port = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port['port']['id']) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # Port update succeeds and allocates a new IP address. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def test_port_update_deferred_allocation_no_segments(self): """Binding information is provided, subnet created after port""" with self.network() as network: pass port = self._create_deferred_ip_port(network) # Create the subnet and try to update the port to get an IP with self.subnet(network=network): data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) res = self.deserialize(self.fmt, response) self.assertEqual(0, len(res['port']['fixed_ips'])) def test_port_update_deferred_allocation_no_ipam(self): """Binding information is provided on update. Don't allocate.""" with self.network() as network: with self.subnet(network=network): pass response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], fixed_ips=[]) port = self.deserialize(self.fmt, response) ips = port['port']['fixed_ips'] self.assertEqual(0, len(ips)) # Create the subnet and try to update the port to get an IP data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) res = self.deserialize(self.fmt, response) self.assertEqual(0, len(res['port']['fixed_ips'])) def test_port_update_deferred_allocation_no_segments_manual_alloc(self): """Binding information is provided, subnet created after port""" with self.network() as network: pass port = self._create_deferred_ip_port(network) # Create the subnet and try to update the port to get an IP with self.subnet(network=network) as subnet: data = {'port': { portbindings.HOST_ID: 'fakehost', 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) # Do a show to be sure that only one IP is recorded port_req = self.new_show_request('ports', port_id) response = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def test_port_update_deferred_allocation_no_segments_empty_alloc(self): """Binding information is provided, subnet created after port""" with self.network() as network: pass port = self._create_deferred_ip_port(network) # Create the subnet and update the port but specify no IPs with self.subnet(network=network): data = {'port': { portbindings.HOST_ID: 'fakehost', 'fixed_ips': []}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPOk.code, response.status_int) res = self.deserialize(self.fmt, response) # Since I specifically requested no IP addresses, I shouldn't get one. self.assertEqual(0, len(res['port']['fixed_ips'])) def test_port_update_deferred_allocation_no_host_mapping(self): """Binding information is provided on update, subnets on segments""" network, segment, subnet = self._create_test_segment_with_subnet() port = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port['port']['id']) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) res = self.deserialize(self.fmt, response) # Gets conflict because it can't map the host to a segment self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__, res['NeutronError']['type']) def test_port_update_deferred_allocation_multiple_host_mapping(self): """Binding information is provided on update, subnets on segments""" network, segments, _s = self._create_test_segments_with_subnets(2) port = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port['port']['id']) # This host is bound to multiple segments self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost'), (segments[1]['segment']['id'], 'fakehost')]) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) res = self.deserialize(self.fmt, response) # Gets conflict because it can't map the host to a segment self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(segment_exc.HostConnectedToMultipleSegments.__name__, res['NeutronError']['type']) def test_port_update_allocate_no_segments(self): """Binding information is provided, subnet created after port""" with self.network() as network: pass # Create a bound port with no IP address (since there is not subnet) port = self._create_deferred_ip_port(network) # Create the subnet and try to update the port to get an IP with self.subnet(network=network) as subnet: # Try requesting an IP (but the only subnet is on a segment) data = {'port': { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # Since port is bound and there is a mapping to segment, it succeeds. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def test_port_update_deferred_allocation_no_ips(self): """Binding information is provided on update, subnets on segments""" network, segments, subnets = self._create_test_segments_with_subnets(2) self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), (segments[1]['segment']['id'], 'fakehost')]) port = self._create_deferred_ip_port(network) # Update the subnet on the second segment to be out of IPs subnet_data = {'subnet': {'allocation_pools': []}} subnet_req = self.new_update_request('subnets', subnet_data, subnets[1]['subnet']['id']) subnet_response = subnet_req.get_response(self.api) res = self.deserialize(self.fmt, subnet_response) # Try requesting an IP (but the subnet ran out of ips) data = {'port': {portbindings.HOST_ID: 'fakehost'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) res = self.deserialize(self.fmt, response) # Since port is bound and there is a mapping to segment, it succeeds. self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) self.assertEqual(n_exc.IpAddressGenerationFailure.__name__, res['NeutronError']['type']) def test_port_update_fails_if_host_on_wrong_segment(self): """Update a port with existing IPs to a host where they don't work""" network, segments, subnets = self._create_test_segments_with_subnets(2) self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), (segments[1]['segment']['id'], 'fakehost')]) # Create a bound port with an IP address response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) port = self.deserialize(self.fmt, response) # Now, try to update binding to a host on the other segment data = {'port': {portbindings.HOST_ID: 'fakehost2'}} port_req = self.new_update_request('ports', data, port['port']['id']) response = port_req.get_response(self.api) # It fails since the IP address isn't compatible with the new segment self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) def test_port_update_fails_if_host_on_good_segment(self): """Update a port with existing IPs to a host where they don't work""" network, segments, subnets = self._create_test_segments_with_subnets(2) self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), (segments[1]['segment']['id'], 'fakehost1'), (segments[1]['segment']['id'], 'fakehost')]) # Create a bound port with an IP address response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) port = self.deserialize(self.fmt, response) # Now, try to update binding to another host in same segment data = {'port': {portbindings.HOST_ID: 'fakehost1'}} port_req = self.new_update_request('ports', data, port['port']['id']) response = port_req.get_response(self.api) # Since the new host is in the same segment, it succeeds. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) def test_port_update_deferred_allocation_binding_info_and_new_mac(self): """Binding information and new mac address is provided on update""" network, segment, subnet = self._create_test_segment_with_subnet() # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) port = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port['port']['id']) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost', port_apidef.PORT_MAC_ADDRESS: '00:00:00:00:00:01'}} port_id = port['port']['id'] port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) # Port update succeeds and allocates a new IP address. self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) def test_slaac_segment_aware_no_binding_info(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() # Create a port with no IP address (since there is no subnet) port_deferred = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port_deferred['port']['id']) def test_slaac_segment_aware_immediate_fixed_ips_no_binding_info_(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() # Create two ports, port_a with subnet_a0 in fixed_ips and port_b # with subnet_b0 in fixed_ips port_a = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_a0['subnet']['id']}]) port_b = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_b0['subnet']['id']}]) self._validate_immediate_ip_allocation(port_a['port']['id']) self._validate_immediate_ip_allocation(port_b['port']['id']) self.assertEqual(2, len(port_a['port']['fixed_ips'])) self.assertEqual(2, len(port_b['port']['fixed_ips'])) port_a_snet_ids = [f['subnet_id'] for f in port_a['port']['fixed_ips']] port_b_snet_ids = [f['subnet_id'] for f in port_b['port']['fixed_ips']] self.assertIn(subnet_a0['subnet']['id'], port_a_snet_ids) self.assertIn(subnet_a1['subnet']['id'], port_a_snet_ids) self.assertIn(subnet_b0['subnet']['id'], port_b_snet_ids) self.assertIn(subnet_b1['subnet']['id'], port_b_snet_ids) self.assertNotIn(subnet_a0['subnet']['id'], port_b_snet_ids) self.assertNotIn(subnet_a1['subnet']['id'], port_b_snet_ids) self.assertNotIn(subnet_b0['subnet']['id'], port_a_snet_ids) self.assertNotIn(subnet_b1['subnet']['id'], port_a_snet_ids) def test_slaac_segment_aware_immediate_with_binding_info(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() self._setup_host_mappings([(segment_a['segment']['id'], 'fakehost_a')]) # Create a port with host ID, validate immediate allocation on subnets # with correct segment_id. response = self._create_port(self.fmt, net_id=network['network']['id'], tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost_a'}) res = self.deserialize(self.fmt, response) self._validate_immediate_ip_allocation(res['port']['id']) # Since host mapped to segment_a, IP's must come from subnets: # subnet_a0 and subnet_a1 self.assertEqual(2, len(res['port']['fixed_ips'])) res_subnet_ids = [f['subnet_id'] for f in res['port']['fixed_ips']] self.assertIn(subnet_a0['subnet']['id'], res_subnet_ids) self.assertIn(subnet_a1['subnet']['id'], res_subnet_ids) self.assertNotIn(subnet_b0['subnet']['id'], res_subnet_ids) self.assertNotIn(subnet_b1['subnet']['id'], res_subnet_ids) def test_slaac_segment_aware_add_subnet(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() # Create a port with no IP address (since there is no subnet) port_deferred = self._create_deferred_ip_port(network) self._validate_deferred_ip_allocation(port_deferred['port']['id']) # Create two ports, port_a with subnet_a0 in fixed_ips and port_b # with subnet_b0 in fixed_ips port_a = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_a0['subnet']['id']}]) port_b = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_b0['subnet']['id']}]) self._validate_immediate_ip_allocation(port_a['port']['id']) self._validate_immediate_ip_allocation(port_b['port']['id']) self.assertEqual(2, len(port_a['port']['fixed_ips'])) self.assertEqual(2, len(port_b['port']['fixed_ips'])) # Add another subnet on segment_a subnet_a2 = self._create_test_slaac_subnet_with_segment( network, segment_a, '2001:db8:a:2::/64') # The port with deferred allocation should not have an allocation req = self.new_show_request('ports', port_deferred['port']['id']) res = req.get_response(self.api) port_deferred = self.deserialize(self.fmt, res) self._validate_deferred_ip_allocation(port_deferred['port']['id']) self.assertEqual(0, len(port_deferred['port']['fixed_ips'])) # port_a should get an allocation on the new subnet. # port_b does not get an allocation. req = self.new_show_request('ports', port_a['port']['id']) res = req.get_response(self.api) port_a = self.deserialize(self.fmt, res) req = self.new_show_request('ports', port_b['port']['id']) res = req.get_response(self.api) port_b = self.deserialize(self.fmt, res) self.assertEqual(3, len(port_a['port']['fixed_ips'])) self.assertEqual(2, len(port_b['port']['fixed_ips'])) port_a_snet_ids = [f['subnet_id'] for f in port_a['port']['fixed_ips']] self.assertIn(subnet_a2['subnet']['id'], port_a_snet_ids) def test_slaac_segment_aware_delete_subnet(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() # Create two ports, port_a with subnet_a0 in fixed_ips and port_b # with subnet_b0 in fixed_ips port_a = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_a0['subnet']['id']}]) port_b = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_b0['subnet']['id']}]) self._validate_immediate_ip_allocation(port_a['port']['id']) self._validate_immediate_ip_allocation(port_b['port']['id']) self.assertEqual(2, len(port_a['port']['fixed_ips'])) self.assertEqual(2, len(port_b['port']['fixed_ips'])) # Delete subnet_b1 on segment_b, port_a should keep it's allocations # on the new subnet. Allocation for deleted subnet removed on port_b. req = self.new_delete_request('subnets', subnet_b1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req = self.new_show_request('ports', port_a['port']['id']) res = req.get_response(self.api) port_a = self.deserialize(self.fmt, res) req = self.new_show_request('ports', port_b['port']['id']) res = req.get_response(self.api) port_b = self.deserialize(self.fmt, res) self.assertEqual(2, len(port_a['port']['fixed_ips'])) self.assertEqual(1, len(port_b['port']['fixed_ips'])) port_b_snet_ids = [f['subnet_id'] for f in port_b['port']['fixed_ips']] self.assertNotIn(subnet_b1['subnet']['id'], port_b_snet_ids) def test_slaac_segment_aware_delete_last_subnet_on_segment_fails(self): (network, segment_a, segment_b, subnet_a0, subnet_a1, subnet_b0, subnet_b1) = self._create_net_two_segments_four_slaac_subnets() # Create two ports, port_a with subnet_a0 in fixed_ips and port_b # with subnet_b0 in fixed_ips port_a = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_a0['subnet']['id']}]) port_b = self._create_port_and_show( network, fixed_ips=[{'subnet_id': subnet_b0['subnet']['id']}]) self._validate_immediate_ip_allocation(port_a['port']['id']) self._validate_immediate_ip_allocation(port_b['port']['id']) self.assertEqual(2, len(port_a['port']['fixed_ips'])) self.assertEqual(2, len(port_b['port']['fixed_ips'])) # Delete subnet_b1 on segment_b req = self.new_delete_request('subnets', subnet_b1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) # Delete subnet_b0 on segment_b fails because port_b has no other # allocation, SubnetInUse req = self.new_delete_request('subnets', subnet_b0['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) # Delete port_b req = self.new_delete_request('ports', port_b['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) # Try to delete subnet_b0 again, should not fail with no ports req = self.new_delete_request('subnets', subnet_b0['subnet']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) class TestSegmentAwareIpamML2(TestSegmentAwareIpam): VLAN_MIN = 200 VLAN_MAX = 209 def setUp(self): # NOTE(mlavalle): ml2_type_vlan requires to be registered before used. # This piece was refactored and removed from .config, so it causes # a problem, when tests are executed with pdb. # There is no problem when tests are running without debugger. driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override( 'network_vlan_ranges', ['physnet:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet0:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet1:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet2:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX)], group='ml2_type_vlan') super(TestSegmentAwareIpamML2, self).setUp(plugin='ml2') def test_segmentation_id_stored_in_db(self): network, segment, subnet = self._create_test_segment_with_subnet() self.assertTrue(self.VLAN_MIN <= segment['segment']['segmentation_id'] <= self.VLAN_MAX) retrieved_segment = self._show('segments', segment['segment']['id']) self.assertEqual(segment['segment']['segmentation_id'], retrieved_segment['segment']['segmentation_id']) class TestNovaSegmentNotifier(SegmentAwareIpamTestCase): _mechanism_drivers = ['openvswitch', 'logger'] def setUp(self): ml2_config.register_ml2_plugin_opts() driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') cfg.CONF.set_override('network_vlan_ranges', ['physnet:200:209', 'physnet0:200:209', 'physnet1:200:209', 'physnet2:200:209'], group='ml2_type_vlan') super(TestNovaSegmentNotifier, self).setUp(plugin='ml2') # Need notifier here self.patch_notifier.stop() self._mock_keystone_auth() self.segments_plugin = directory.get_plugin(ext_segment.SEGMENTS) nova_updater = self.segments_plugin.nova_updater nova_updater.p_client = mock.MagicMock() self.mock_p_client = nova_updater.p_client nova_updater.n_client = mock.MagicMock() self.mock_n_client = nova_updater.n_client self.batch_notifier = nova_updater.batch_notifier self.batch_notifier._waiting_to_send = True def _mock_keystone_auth(self): # Use to remove MissingAuthPlugin exception when notifier is needed self.mock_load_auth_p = mock.patch( 'keystoneauth1.loading.load_auth_from_conf_options') self.mock_load_auth = self.mock_load_auth_p.start() self.mock_request_p = mock.patch( 'keystoneauth1.session.Session.request') self.mock_request = self.mock_request_p.start() def _calculate_inventory_total_and_reserved(self, subnet): total = 0 reserved = 0 allocation_pools = subnet.get('allocation_pools') or [] for pool in allocation_pools: total += int(netaddr.IPAddress(pool['end']) - netaddr.IPAddress(pool['start'])) + 1 if total: if subnet.get('gateway_ip'): total += 1 reserved += 1 if subnet.get('enable_dhcp'): reserved += 1 return total, reserved def test__create_nova_inventory_no_microversion(self): network, segment = self._create_test_network_and_segment() segment_id = segment['segment']['id'] aggregate = mock.Mock(spec=["id"]) aggregate.id = 1 self.mock_n_client.aggregates.create.return_value = aggregate with mock.patch.object(seg_plugin.LOG, 'exception') as log: self.assertRaises( AttributeError, self.segments_plugin.nova_updater._create_nova_inventory, segment_id, 63, 2, []) self.assertTrue(log.called) def _assert_inventory_creation(self, segment_id, aggregate, subnet): self.batch_notifier._notify() self.mock_p_client.get_inventory.assert_called_with( segment_id, seg_plugin.IPV4_RESOURCE_CLASS) self.mock_p_client.update_resource_provider_inventory.\ assert_not_called() name = seg_plugin.SEGMENT_NAME_STUB % segment_id resource_provider = {'name': name, 'uuid': segment_id} self.mock_p_client.create_resource_provider.assert_called_with( resource_provider) self.mock_n_client.aggregates.create.assert_called_with(name, None) self.mock_p_client.associate_aggregates.assert_called_with( segment_id, [aggregate.uuid]) self.mock_n_client.aggregates.add_host.assert_called_with(aggregate.id, 'fakehost') total, reserved = self._calculate_inventory_total_and_reserved( subnet['subnet']) inventory, _ = self._get_inventory(total, reserved) ipv4_classname = seg_plugin.IPV4_RESOURCE_CLASS self.mock_p_client.update_resource_provider_inventories.\ assert_called_with(segment_id, {ipv4_classname: inventory}) self.assertEqual( inventory['total'], self.mock_p_client.update_resource_provider_inventories. call_args[0][1][ipv4_classname]['total']) self.assertEqual( inventory['reserved'], self.mock_p_client.update_resource_provider_inventories. call_args[0][1][ipv4_classname]['reserved']) self.mock_p_client.reset_mock() self.mock_p_client.get_inventory.side_effect = None self.mock_n_client.reset_mock() def _test_first_subnet_association_with_segment(self, cidr='10.0.0.0/24', allocation_pools=None): network, segment = self._create_test_network_and_segment() segment_id = segment['segment']['id'] self._setup_host_mappings([(segment_id, 'fakehost')]) self.mock_p_client.get_inventory.side_effect = ( placement_exc.PlacementResourceProviderNotFound( resource_provider=segment_id, resource_class=seg_plugin.IPV4_RESOURCE_CLASS)) aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 self.mock_n_client.aggregates.create.return_value = aggregate subnet = self._create_test_subnet_with_segment( network, segment, cidr=cidr, allocation_pools=allocation_pools) self._assert_inventory_creation(segment_id, aggregate, subnet) return network, segment, subnet def test_first_subnet_association_with_segment(self): self._test_first_subnet_association_with_segment() def test_update_subnet_association_with_segment(self, cidr='10.0.0.0/24', allocation_pools=None): with self.network() as network: segment_id = self._list('segments')['segments'][0]['id'] network_id = network['network']['id'] self._setup_host_mappings([(segment_id, 'fakehost')]) self.mock_p_client.get_inventory.side_effect = ( placement_exc.PlacementResourceProviderNotFound( resource_provider=segment_id, resource_class=seg_plugin.IPV4_RESOURCE_CLASS)) aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 self.mock_n_client.aggregates.create.return_value = aggregate ip_version = netaddr.IPNetwork(cidr).version with self.subnet(network=network, cidr=cidr, ip_version=ip_version, allocation_pools=allocation_pools, segment_id=None) as subnet: self._validate_l2_adjacency(network_id, is_adjacent=True) data = {'subnet': {'segment_id': segment_id}} self.new_update_request('subnets', data, subnet['subnet']['id']) self.new_update_request( 'subnets', data, subnet['subnet']['id']).get_response(self.api) self._validate_l2_adjacency(network_id, is_adjacent=False) self._assert_inventory_creation(segment_id, aggregate, subnet) def _assert_inventory_update(self, segment_id, inventory, subnet=None, original_subnet=None): self.batch_notifier._notify() self.mock_p_client.get_inventory.assert_called_with( segment_id, seg_plugin.IPV4_RESOURCE_CLASS) original_total = original_reserved = total = reserved = 0 if original_subnet: original_total, original_reserved = ( self._calculate_inventory_total_and_reserved(original_subnet)) if subnet: total, reserved = self._calculate_inventory_total_and_reserved( subnet) inventory['total'] += total - original_total inventory['reserved'] += reserved - original_reserved self.mock_p_client.update_resource_provider_inventory.\ assert_called_with(segment_id, inventory, seg_plugin.IPV4_RESOURCE_CLASS) self.assertEqual( inventory['total'], self.mock_p_client.update_resource_provider_inventory. call_args[0][1]['total']) self.assertEqual( inventory['reserved'], self.mock_p_client.update_resource_provider_inventory. call_args[0][1]['reserved']) self.mock_p_client.reset_mock() self.mock_n_client.reset_mock() def _get_inventory(self, total, reserved): inventory = {'total': total, 'reserved': reserved, 'min_unit': 1, 'max_unit': 1, 'step_size': 1, 'allocation_ratio': 1.0} return inventory, copy.deepcopy(inventory) def _test_second_subnet_association_with_segment(self): network, segment, first_subnet = ( self._test_first_subnet_association_with_segment()) segment_id = segment['segment']['id'] # Associate an IPv6 subnet with the segment self._create_test_subnet_with_segment(network, segment) first_total, first_reserved = ( self._calculate_inventory_total_and_reserved( first_subnet['subnet'])) inventory, original_inventory = self._get_inventory(first_total, first_reserved) self.mock_p_client.get_inventory.return_value = inventory second_subnet = self._create_test_subnet_with_segment( network, segment, cidr='10.0.1.0/24') self._assert_inventory_update(segment_id, original_inventory, subnet=second_subnet['subnet']) return segment_id, first_subnet, second_subnet def test_second_subnet_association_with_segment(self): self._test_second_subnet_association_with_segment() def test_delete_last_ipv4_subnet(self): network, segment, subnet = ( self._test_first_subnet_association_with_segment()) # Associate an IPv6 subnet with the segment self._create_test_subnet_with_segment(network, segment) segment_id = segment['segment']['id'] aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 aggregate.hosts = ['fakehost1'] self.mock_p_client.list_aggregates.return_value = { 'aggregates': [aggregate.uuid]} self.mock_n_client.aggregates.list.return_value = [aggregate] self.mock_n_client.aggregates.get_details.return_value = aggregate self._delete('subnets', subnet['subnet']['id']) self.batch_notifier._notify() self._assert_inventory_delete(segment_id, aggregate) def _assert_inventory_delete(self, segment_id, aggregate): self.mock_p_client.list_aggregates.assert_called_with(segment_id) self.assertEqual(1, self.mock_n_client.aggregates.list.call_count) self.mock_n_client.aggregates.get_details.assert_called_with( aggregate.id) calls = [mock.call(aggregate.id, host) for host in aggregate.hosts] self.mock_n_client.aggregates.remove_host.assert_has_calls(calls) self.mock_n_client.aggregates.delete.assert_called_with(aggregate.id) self.mock_p_client.delete_resource_provider.assert_called_with( segment_id) self.mock_p_client.reset_mock() self.mock_n_client.reset_mock() def test_delete_ipv4_subnet(self): segment_id, first_subnet, second_subnet = ( self._test_second_subnet_association_with_segment()) first_total, first_reserved = ( self._calculate_inventory_total_and_reserved( first_subnet['subnet'])) second_total, second_reserved = ( self._calculate_inventory_total_and_reserved( second_subnet['subnet'])) inventory, original_inventory = self._get_inventory( first_total + second_total, first_reserved + second_reserved) self.mock_p_client.get_inventory.return_value = inventory self._delete('subnets', first_subnet['subnet']['id']) self._assert_inventory_update(segment_id, original_inventory, original_subnet=first_subnet['subnet']) def _test_update_ipv4_subnet_allocation_pools(self, allocation_pools, new_allocation_pools): network, segment, original_subnet = ( self._test_first_subnet_association_with_segment( cidr='10.0.0.0/24', allocation_pools=allocation_pools)) segment_id = segment['segment']['id'] self.mock_p_client.reset_mock() self.mock_n_client.reset_mock() total, reserved = self._calculate_inventory_total_and_reserved( original_subnet['subnet']) inventory, original_inventory = self._get_inventory(total, reserved) self.mock_p_client.get_inventory.return_value = inventory subnet_data = {'subnet': {'allocation_pools': new_allocation_pools}} subnet_req = self.new_update_request('subnets', subnet_data, original_subnet['subnet']['id']) subnet = self.deserialize(self.fmt, subnet_req.get_response(self.api)) self._assert_inventory_update( segment_id, original_inventory, subnet=subnet['subnet'], original_subnet=original_subnet['subnet']) def test_update_ipv4_subnet_expand_allocation_pool(self): self._test_update_ipv4_subnet_allocation_pools( [{'start': '10.0.0.2', 'end': '10.0.0.100'}], [{'start': '10.0.0.2', 'end': '10.0.0.254'}]) def test_update_ipv4_subnet_add_allocation_pool(self): self._test_update_ipv4_subnet_allocation_pools( [{'start': '10.0.0.2', 'end': '10.0.0.100'}], [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.0.0.200', 'end': '10.0.0.254'}]) def test_update_ipv4_subnet_contract_allocation_pool(self): self._test_update_ipv4_subnet_allocation_pools( [{'start': '10.0.0.2', 'end': '10.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.100'}]) def test_update_ipv4_subnet_remove_allocation_pool(self): self._test_update_ipv4_subnet_allocation_pools( [{'start': '10.0.0.2', 'end': '10.0.0.100'}, {'start': '10.0.0.200', 'end': '10.0.0.254'}], [{'start': '10.0.0.2', 'end': '10.0.0.100'}]) def _test_update_ipv4_subnet_delete_allocation_pools(self): segment_id, first_subnet, second_subnet = ( self._test_second_subnet_association_with_segment()) first_total, first_reserved = ( self._calculate_inventory_total_and_reserved( first_subnet['subnet'])) second_total, second_reserved = ( self._calculate_inventory_total_and_reserved( second_subnet['subnet'])) inventory, original_inventory = self._get_inventory( first_total + second_total, first_reserved + second_reserved) self.mock_p_client.get_inventory.return_value = inventory subnet_data = {'subnet': {'allocation_pools': []}} subnet_req = self.new_update_request('subnets', subnet_data, first_subnet['subnet']['id']) subnet_req.get_response(self.api) self._assert_inventory_update(segment_id, original_inventory, original_subnet=first_subnet['subnet']) return segment_id, second_subnet def test_update_ipv4_subnet_delete_allocation_pools(self): self._test_update_ipv4_subnet_delete_allocation_pools() def test_update_ipv4_subnet_delete_restore_last_allocation_pool(self): segment_id, subnet = ( self._test_update_ipv4_subnet_delete_allocation_pools()) self.mock_p_client.reset_mock() self.mock_n_client.reset_mock() allocation_pools = subnet['subnet']['allocation_pools'] aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 aggregate.hosts = ['fakehost1'] self.mock_p_client.list_aggregates.return_value = { 'aggregates': [aggregate.uuid]} self.mock_n_client.aggregates.list.return_value = [aggregate] self.mock_n_client.aggregates.get_details.return_value = aggregate subnet_data = {'subnet': {'allocation_pools': []}} self._update('subnets', subnet['subnet']['id'], subnet_data) self.batch_notifier._notify() self._assert_inventory_delete(segment_id, aggregate) self.mock_p_client.get_inventory.side_effect = ( placement_exc.PlacementResourceProviderNotFound( resource_provider=segment_id, resource_class=seg_plugin.IPV4_RESOURCE_CLASS)) aggregate.hosts = [] self.mock_n_client.aggregates.create.return_value = aggregate subnet_data = {'subnet': {'allocation_pools': allocation_pools}} subnet = self._update('subnets', subnet['subnet']['id'], subnet_data) self._assert_inventory_creation(segment_id, aggregate, subnet) def test_add_host_to_segment_aggregate(self): db.subscribe() network, segment, first_subnet = ( self._test_first_subnet_association_with_segment()) segment_id = segment['segment']['id'] aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 aggregate.hosts = ['fakehost1'] self.mock_p_client.list_aggregates.return_value = { 'aggregates': [aggregate.uuid]} self.mock_n_client.aggregates.list.return_value = [aggregate] host = 'otherfakehost' helpers.register_ovs_agent(host=host, bridge_mappings={'physnet': 'br-eth-1'}, plugin=self.plugin, start_flag=True) self.batch_notifier._notify() self.mock_p_client.list_aggregates.assert_called_with(segment_id) self.assertEqual(1, self.mock_n_client.aggregates.list.call_count) self.mock_n_client.aggregates.add_host.assert_called_with(aggregate.id, host) def test_add_host_to_non_existent_segment_aggregate(self): self._mock_keystone_auth() db.subscribe() network, segment, first_subnet = ( self._test_first_subnet_association_with_segment()) with mock.patch.object(seg_plugin.LOG, 'info') as log: segment_id = segment['segment']['id'] aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 aggregate.hosts = ['fakehost1'] self.mock_p_client.list_aggregates.side_effect = ( placement_exc.PlacementAggregateNotFound( resource_provider=segment_id)) self.mock_n_client.aggregates.list.return_value = [aggregate] host = 'otherfakehost' helpers.register_ovs_agent(host=host, bridge_mappings={'physnet': 'br-eth-1'}, plugin=self.plugin, start_flag=True) self.batch_notifier._notify() self.mock_p_client.list_aggregates.assert_called_with(segment_id) self.assertTrue(log.called) self.mock_n_client.aggregates.add_host.assert_not_called() def test_add_host_segment_aggregate_conflict(self): db.subscribe() network, segment, first_subnet = ( self._test_first_subnet_association_with_segment()) with mock.patch.object(seg_plugin.LOG, 'info') as log: segment_id = segment['segment']['id'] aggregate = mock.MagicMock() aggregate.uuid = uuidutils.generate_uuid() aggregate.id = 1 aggregate.hosts = ['fakehost1'] self.mock_p_client.list_aggregates.return_value = { 'aggregates': [aggregate.uuid]} self.mock_n_client.aggregates.add_host.side_effect = ( nova_exc.Conflict(nova_exc.Conflict.http_status)) self.mock_n_client.aggregates.list.return_value = [aggregate] host = 'otherfakehost' helpers.register_ovs_agent(host=host, bridge_mappings={'physnet': 'br-eth-1'}, plugin=self.plugin, start_flag=True) self.batch_notifier._notify() self.mock_p_client.list_aggregates.assert_called_with(segment_id) self.mock_n_client.aggregates.add_host.assert_called_with( aggregate.id, host) self.assertTrue(log.called) def _assert_inventory_update_port(self, segment_id, inventory, num_fixed_ips): inventory['reserved'] += num_fixed_ips self.mock_p_client.get_inventory.assert_called_with( segment_id, seg_plugin.IPV4_RESOURCE_CLASS) self.mock_p_client.update_resource_provider_inventory.\ assert_called_with(segment_id, inventory, seg_plugin.IPV4_RESOURCE_CLASS) self.assertEqual( inventory['total'], self.mock_p_client.update_resource_provider_inventory. call_args[0][1]['total']) self.assertEqual( inventory['reserved'], self.mock_p_client.update_resource_provider_inventory. call_args[0][1]['reserved']) self.mock_p_client.reset_mock() self.mock_n_client.reset_mock() def _create_test_port(self, network_id, tenant_id, subnet, **kwargs): port = self._make_port(self.fmt, network_id, tenant_id=tenant_id, arg_list=(portbindings.HOST_ID,), **kwargs) self.batch_notifier._notify() return port def _test_create_port(self, **kwargs): network, segment, subnet = ( self._test_first_subnet_association_with_segment()) total, reserved = self._calculate_inventory_total_and_reserved( subnet['subnet']) inventory, original_inventory = self._get_inventory(total, reserved) self.mock_p_client.get_inventory.return_value = inventory port = self._create_test_port(network['network']['id'], network['network']['tenant_id'], subnet, **kwargs) return segment['segment']['id'], original_inventory, port def test_create_bound_port(self): kwargs = {portbindings.HOST_ID: 'fakehost'} segment_id, original_inventory, _ = self._test_create_port(**kwargs) self._assert_inventory_update_port(segment_id, original_inventory, 1) def test_create_bound_port_compute_owned(self): kwargs = {portbindings.HOST_ID: 'fakehost', 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX} self._test_create_port(**kwargs) self.mock_p_client.get_inventory.assert_not_called() self.mock_p_client.update_resource_provider_inventory.\ assert_not_called() def test_create_bound_port_dhcp_owned(self): kwargs = {portbindings.HOST_ID: 'fakehost', 'device_owner': constants.DEVICE_OWNER_DHCP} self._test_create_port(**kwargs) self.mock_p_client.get_inventory.assert_not_called() self.mock_p_client.update_resource_provider_inventory.\ assert_not_called() def test_create_unbound_port(self): self._test_create_port() self.mock_p_client.get_inventory.assert_not_called() self.mock_p_client.update_resource_provider_inventory.\ assert_not_called() def test_delete_bound_port(self): kwargs = {portbindings.HOST_ID: 'fakehost'} segment_id, before_create_inventory, port = self._test_create_port( **kwargs) self.mock_p_client.reset_mock() inventory, original_inventory = self._get_inventory( before_create_inventory['total'], before_create_inventory['reserved'] + 1) self.mock_p_client.get_inventory.return_value = inventory self._delete('ports', port['port']['id']) self.batch_notifier._notify() self._assert_inventory_update_port(segment_id, original_inventory, -1) def _create_port_for_update_test(self, num_fixed_ips=1, dhcp_owned=False, compute_owned=False): segment_id, first_subnet, second_subnet = ( self._test_second_subnet_association_with_segment()) first_total, first_reserved = ( self._calculate_inventory_total_and_reserved( first_subnet['subnet'])) second_total, second_reserved = ( self._calculate_inventory_total_and_reserved( second_subnet['subnet'])) inventory, original_inventory = self._get_inventory( first_total + second_total, first_reserved + second_reserved) self.mock_p_client.get_inventory.return_value = inventory kwargs = {portbindings.HOST_ID: 'fakehost', 'fixed_ips': [{'subnet_id': first_subnet['subnet']['id']}]} created_fixed_ips = num_fixed_ips if num_fixed_ips > 1: kwargs['fixed_ips'].append( {'subnet_id': second_subnet['subnet']['id']}) if dhcp_owned: kwargs['device_owner'] = constants.DEVICE_OWNER_DHCP if compute_owned: kwargs['device_owner'] = constants.DEVICE_OWNER_COMPUTE_PREFIX port = self._create_test_port(first_subnet['subnet']['network_id'], first_subnet['subnet']['tenant_id'], first_subnet, **kwargs) if dhcp_owned or compute_owned: self.mock_p_client.get_inventory.assert_not_called() self.mock_p_client.update_resource_provider_inventory.\ assert_not_called() else: self._assert_inventory_update_port(segment_id, original_inventory, created_fixed_ips) return first_subnet, second_subnet, port def _port_update(self, first_subnet, second_subnet, fixed_ips_subnets, port, reserved_increment_before=1, reserved_increment_after=1, dhcp_owned=False, compute_owned=False): first_total, first_reserved = ( self._calculate_inventory_total_and_reserved( first_subnet['subnet'])) second_total, second_reserved = ( self._calculate_inventory_total_and_reserved( second_subnet['subnet'])) inventory, original_inventory = self._get_inventory( first_total + second_total, first_reserved + second_reserved + reserved_increment_before) self.mock_p_client.get_inventory.return_value = inventory port_data = {'port': {'device_owner': ''}} if fixed_ips_subnets: port_data['port']['fixed_ips'] = [] for subnet in fixed_ips_subnets: port_data['port']['fixed_ips'].append( {'subnet_id': subnet['subnet']['id']}) if dhcp_owned: port_data['port']['device_owner'] = constants.DEVICE_OWNER_DHCP if compute_owned: port_data['port']['device_owner'] = ( constants.DEVICE_OWNER_COMPUTE_PREFIX) self._update('ports', port['port']['id'], port_data) self.batch_notifier._notify() self._assert_inventory_update_port( first_subnet['subnet']['segment_id'], original_inventory, reserved_increment_after) def test_update_port_add_fixed_ip(self): first_subnet, second_subnet, port = self._create_port_for_update_test() self._port_update(first_subnet, second_subnet, [first_subnet, second_subnet], port) def test_update_port_remove_fixed_ip(self): first_subnet, second_subnet, port = self._create_port_for_update_test( num_fixed_ips=2) self._port_update(first_subnet, second_subnet, [first_subnet], port, reserved_increment_before=2, reserved_increment_after=-1) def test_update_port_change_to_dhcp_owned(self): first_subnet, second_subnet, port = self._create_port_for_update_test() self._port_update(first_subnet, second_subnet, [], port, reserved_increment_after=-1, dhcp_owned=True) def test_update_port_change_to_no_dhcp_owned(self): first_subnet, second_subnet, port = self._create_port_for_update_test( dhcp_owned=True) self._port_update(first_subnet, second_subnet, [], port, reserved_increment_before=0, reserved_increment_after=1) def test_update_port_change_to_compute_owned(self): first_subnet, second_subnet, port = self._create_port_for_update_test() self._port_update(first_subnet, second_subnet, [], port, reserved_increment_after=-1, compute_owned=True) def test_update_port_change_to_no_compute_owned(self): first_subnet, second_subnet, port = self._create_port_for_update_test( compute_owned=True) self._port_update(first_subnet, second_subnet, [], port, reserved_increment_before=0, reserved_increment_after=1) def test_placement_api_inventory_update_conflict(self): with mock.patch.object(seg_plugin.LOG, 'debug') as log_debug: with mock.patch.object(seg_plugin.LOG, 'error') as log_error: event = seg_plugin.Event(mock.ANY, mock.ANY, total=1, reserved=0) inventory, original_inventory = self._get_inventory(100, 2) self.mock_p_client.get_inventory.return_value = inventory self.mock_p_client.update_resource_provider_inventory.\ side_effect = ( placement_exc. PlacementResourceProviderGenerationConflict( resource_provider=mock.ANY, generation=1)) self.segments_plugin.nova_updater._update_nova_inventory(event) self.assertEqual(seg_plugin.MAX_INVENTORY_UPDATE_RETRIES, self.mock_p_client.get_inventory.call_count) self.assertEqual( seg_plugin.MAX_INVENTORY_UPDATE_RETRIES, self.mock_p_client.update_resource_provider_inventory. call_count) self.assertEqual( seg_plugin.MAX_INVENTORY_UPDATE_RETRIES, log_debug.call_count) self.assertTrue(log_error.called) def test_placement_api_not_available(self): with mock.patch.object(seg_plugin.LOG, 'debug') as log: event = seg_plugin.Event( self.segments_plugin.nova_updater._update_nova_inventory, mock.ANY, total=1, reserved=0) self.mock_p_client.get_inventory.side_effect = ( placement_exc.PlacementEndpointNotFound()) self.segments_plugin.nova_updater._send_notifications([event]) self.assertTrue(log.called) class TestDhcpAgentSegmentScheduling(HostSegmentMappingTestCase): _mechanism_drivers = ['openvswitch', 'logger'] mock_path = 'neutron.services.segments.db.update_segment_host_mapping' block_dhcp_notifier = False def setUp(self): super(TestDhcpAgentSegmentScheduling, self).setUp() self.dhcp_agent_db = agentschedulers_db.DhcpAgentSchedulerDbMixin() self.ctx = context.get_admin_context() def _test_create_network_and_segment(self, phys_net): with self.network() as net: network = net['network'] segment = self._test_create_segment(network_id=network['id'], physical_network=phys_net, segmentation_id=200, network_type='vlan') dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( self.ctx, [network['id']]) self.assertEqual(0, len(dhcp_agents)) return network, segment['segment'] def _test_create_subnet(self, network, segment, cidr=None, enable_dhcp=True): cidr = cidr or '10.0.0.0/24' ip_version = constants.IP_VERSION_4 with self.subnet(network={'network': network}, segment_id=segment['id'], ip_version=ip_version, cidr=cidr, enable_dhcp=enable_dhcp) as subnet: pass return subnet['subnet'] def _register_dhcp_agents(self, hosts=None): hosts = hosts or [DHCP_HOSTA, DHCP_HOSTB] for host in hosts: helpers.register_dhcp_agent(host) def test_network_scheduling_on_segment_creation(self): self._register_dhcp_agents() self._test_create_network_and_segment('phys_net1') def test_segment_scheduling_no_host_mapping(self): self._register_dhcp_agents() network, segment = self._test_create_network_and_segment('phys_net1') self._test_create_subnet(network, segment) dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( self.ctx, [network['id']]) self.assertEqual(0, len(dhcp_agents)) def test_segment_scheduling_with_host_mapping(self): phys_net1 = 'phys_net1' self._register_dhcp_agents() network, segment = self._test_create_network_and_segment(phys_net1) self._register_agent(DHCP_HOSTA, mappings={phys_net1: 'br-eth-1'}, plugin=self.plugin) self._test_create_subnet(network, segment) dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( self.ctx, [network['id']]) self.assertEqual(1, len(dhcp_agents)) self.assertEqual(DHCP_HOSTA, dhcp_agents[0]['host']) def test_segment_scheduling_with_multiple_host_mappings(self): phys_net1 = 'phys_net1' phys_net2 = 'phys_net2' self._register_dhcp_agents([DHCP_HOSTA, DHCP_HOSTB, 'MEHA', 'MEHB']) network, segment1 = self._test_create_network_and_segment(phys_net1) segment2 = self._test_create_segment(network_id=network['id'], physical_network=phys_net2, segmentation_id=200, network_type='vlan')['segment'] self._register_agent(DHCP_HOSTA, mappings={phys_net1: 'br-eth-1'}, plugin=self.plugin) self._register_agent(DHCP_HOSTB, mappings={phys_net2: 'br-eth-1'}, plugin=self.plugin) self._test_create_subnet(network, segment1) self._test_create_subnet(network, segment2, cidr='11.0.0.0/24') dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( self.ctx, [network['id']]) self.assertEqual(2, len(dhcp_agents)) agent_hosts = [agent['host'] for agent in dhcp_agents] self.assertIn(DHCP_HOSTA, agent_hosts) self.assertIn(DHCP_HOSTB, agent_hosts) class TestSegmentHostRoutes(TestSegmentML2): VLAN_MIN = 200 VLAN_MAX = 209 def setUp(self): # NOTE(mlavalle): ml2_type_vlan requires to be registered before used. # This piece was refactored and removed from .config, so it causes # a problem, when tests are executed with pdb. # There is no problem when tests are running without debugger. driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override( 'network_vlan_ranges', ['physnet:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet0:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet1:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX), 'physnet2:%s:%s' % (self.VLAN_MIN, self.VLAN_MAX)], group='ml2_type_vlan') super(TestSegmentHostRoutes, self).setUp() def _create_subnets_segments(self, gateway_ips, cidrs): with self.network() as network: net = network['network'] segment0 = self._test_create_segment( network_id=net['id'], physical_network='physnet1', network_type=constants.TYPE_VLAN, segmentation_id=201)['segment'] segment1 = self._test_create_segment( network_id=net['id'], physical_network='physnet2', network_type=constants.TYPE_VLAN, segmentation_id=202)['segment'] with self.subnet(network=network, segment_id=segment0['id'], gateway_ip=gateway_ips[0], cidr=cidrs[0]) as subnet0, \ self.subnet(network=network, segment_id=segment1['id'], gateway_ip=gateway_ips[1], cidr=cidrs[1]) as subnet1: pass return net, subnet0['subnet'], subnet1['subnet'] def test_host_routes_two_subnets_with_segments_association(self): """Creates two subnets associated to different segments. Since the two subnets are associated with different segments on the same network host routes will be created. """ gateway_ips = ['10.0.1.1', '10.0.2.1'] cidrs = ['10.0.1.0/24', '10.0.2.0/24'] host_routes = [{'destination': cidrs[1], 'nexthop': gateway_ips[0]}, {'destination': cidrs[0], 'nexthop': gateway_ips[1]}] net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips, cidrs) net_req = self.new_show_request('networks', net['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res)['subnet'] self.assertIn(sub_res['cidr'], cidrs) self.assertIn(sub_res['gateway_ip'], gateway_ips) self.assertIn(sub_res['host_routes'][0], host_routes) def test_host_routes_two_subnets_with_same_segment_association(self): """Creates two subnets associated to the same segment. Since the two subnets are both associated with the same segment no host routes will be created. """ gateway_ips = ['10.0.1.1', '10.0.2.1'] cidrs = ['10.0.1.0/24', '10.0.2.0/24'] with self.network() as network: net = network['network'] segment = self._test_create_segment( network_id=net['id'], physical_network='physnet1', network_type=constants.TYPE_VLAN, segmentation_id=201)['segment'] with self.subnet(network=network, segment_id=segment['id'], gateway_ip=gateway_ips[0], cidr=cidrs[0]) as subnet0, \ self.subnet(network=network, segment_id=segment['id'], gateway_ip=gateway_ips[1], cidr=cidrs[1]) as subnet1: subnet0 = subnet0['subnet'] subnet1 = subnet1['subnet'] req = self.new_show_request('subnets', subnet0['id']) res = req.get_response(self.api) res_subnet0 = self.deserialize(self.fmt, res) req = self.new_show_request('subnets', subnet1['id']) res = req.get_response(self.api) res_subnet1 = self.deserialize(self.fmt, res) self.assertEqual([], res_subnet0['subnet']['host_routes']) self.assertEqual([], res_subnet1['subnet']['host_routes']) def test_host_routes_create_two_subnets_then_delete_one(self): """Delete subnet after creating two subnets associated same segment. Host routes with destination to the subnet that is deleted are removed from the remaining subnets. """ gateway_ips = ['10.0.1.1', '10.0.2.1'] cidrs = ['10.0.1.0/24', '10.0.2.0/24'] net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips, cidrs) sh_req = self.new_show_request('subnets', subnet1['id']) raw_res = sh_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertEqual([{'destination': cidrs[0], 'nexthop': gateway_ips[1]}], sub_res['subnet']['host_routes']) del_req = self.new_delete_request('subnets', subnet0['id']) del_req.get_response(self.api) sh_req = self.new_show_request('subnets', subnet1['id']) raw_res = sh_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertEqual([], sub_res['subnet']['host_routes']) def test_host_routes_two_subnets_then_change_gateway_ip(self): gateway_ips = ['10.0.1.1', '10.0.2.1'] cidrs = ['10.0.1.0/24', '10.0.2.0/24'] host_routes = [{'destination': cidrs[1], 'nexthop': gateway_ips[0]}, {'destination': cidrs[0], 'nexthop': gateway_ips[1]}] net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips, cidrs) net_req = self.new_show_request('networks', net['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res)['subnet'] self.assertIn(sub_res['cidr'], cidrs) self.assertIn(sub_res['gateway_ip'], gateway_ips) self.assertIn(sub_res['host_routes'][0], host_routes) new_gateway_ip = '10.0.1.254' data = {'subnet': {'gateway_ip': new_gateway_ip, 'allocation_pools': [{'start': '10.0.1.1', 'end': '10.0.1.253'}]}} self.new_update_request( 'subnets', data, subnet0['id']).get_response(self.api) sh_req = self.new_show_request('subnets', subnet0['id']) raw_res = sh_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res) self.assertEqual([{'destination': cidrs[1], 'nexthop': new_gateway_ip}], sub_res['subnet']['host_routes']) def test_host_routes_two_subnets_summary_route_in_request(self): gateway_ips = ['10.0.1.1', '10.0.2.1'] cidrs = ['10.0.1.0/24', '10.0.2.0/24'] summary_net = '10.0.0.0/16' host_routes = [{'destination': summary_net, 'nexthop': gateway_ips[0]}, {'destination': summary_net, 'nexthop': gateway_ips[1]}] with self.network() as network: net = network['network'] segment0 = self._test_create_segment( network_id=net['id'], physical_network='physnet1', network_type=constants.TYPE_VLAN, segmentation_id=201)['segment'] segment1 = self._test_create_segment( network_id=net['id'], physical_network='physnet2', network_type=constants.TYPE_VLAN, segmentation_id=202)['segment'] self.subnet(network=network, segment_id=segment0['id'], gateway_ip=gateway_ips[0], cidr=cidrs[0], host_routes=[host_routes[0]]) self.subnet(network=network, segment_id=segment1['id'], gateway_ip=gateway_ips[1], cidr=cidrs[1], host_routes=[host_routes[1]]) net_req = self.new_show_request('networks', net['id']) raw_res = net_req.get_response(self.api) net_res = self.deserialize(self.fmt, raw_res) for subnet_id in net_res['network']['subnets']: sub_req = self.new_show_request('subnets', subnet_id) raw_res = sub_req.get_response(self.api) sub_res = self.deserialize(self.fmt, raw_res)['subnet'] self.assertIn(sub_res['cidr'], cidrs) self.assertIn(sub_res['gateway_ip'], gateway_ips) self.assertEqual(len(sub_res['host_routes']), 1) self.assertIn(sub_res['host_routes'][0], host_routes) class TestSegmentHostMappingNoStore( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override('network_vlan_ranges', ['phys_net1'], group='ml2_type_vlan') cfg.CONF.set_override('service_plugins', []) super(TestSegmentHostMappingNoStore, self).setUp( plugin='neutron.plugins.ml2.plugin.Ml2Plugin') # set to None for simulating server start db._USER_CONFIGURED_SEGMENT_PLUGIN = None db.subscribe() @mock.patch('neutron.services.segments.db.update_segment_host_mapping') @mock.patch('neutron.services.segments.db.map_segment_to_hosts') def test_no_segmenthostmapping_when_disable_segment( self, mock_map_segment_to_hosts, mock_update_segment_mapping): with self.network( arg_list=('provider:network_type', 'provider:physical_network', 'provider:segmentation_id'), **{'provider:network_type': 'vlan', 'provider:physical_network': 'phys_net1', 'provider:segmentation_id': '400'}) as network: network['network'] mock_map_segment_to_hosts.assert_not_called() host1 = 'test_host' physical_network = 'phys_net1' helpers.register_ovs_agent( host=host1, bridge_mappings={physical_network: 'br-eth-1'}, plugin=self.plugin) mock_update_segment_mapping.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_servicetype.py0000644000175000017500000002504500000000000027136 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import servicetype as svctype_apidef from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants from oslo_config import cfg from oslo_utils import uuidutils import webob.exc as webexc import webtest from neutron.api import extensions from neutron.db import servicetype_db as st_db from neutron.extensions import servicetype from neutron.objects import servicetype as servicetype_obj from neutron.services import provider_configuration as provconf from neutron.tests.unit.api import test_extensions from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import dummy_plugin as dp from neutron.tests.unit import testlib_api _uuid = test_base._uuid _get_path = test_base._get_path PLUGIN_NAME = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class ServiceTypeManagerTestCase(testlib_api.SqlTestCase): def setUp(self): self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() super(ServiceTypeManagerTestCase, self).setUp() self.ctx = context.get_admin_context() self.setup_coreplugin(PLUGIN_NAME) def _set_override(self, service_providers): self.service_providers.return_value = service_providers st_db.ServiceTypeManager._instance = None self.manager = st_db.ServiceTypeManager.get_instance() for provider in service_providers: service_type = provider.split(':')[0] self.manager.add_provider_configuration( service_type, provconf.ProviderConfiguration( svc_type=service_type)) def test_service_provider_driver_not_unique(self): self._set_override([constants.FIREWALL + ':fwaas:driver']) prov = {'service_type': constants.FIREWALL, 'name': 'name2', 'driver': 'driver', 'default': False} self.assertRaises( n_exc.Invalid, self.manager.config['FIREWALL'].add_provider, prov) def test_get_service_providers(self): """Test that get_service_providers filters correctly.""" self._set_override( [constants.VPN + ':vpnaas:driver_path1', constants.FIREWALL + ':fwaas:driver_path2']) ctx = context.get_admin_context() res = self.manager.get_service_providers(ctx) self.assertEqual(2, len(res)) res = self.manager.get_service_providers( ctx, filters=dict(service_type=[constants.FIREWALL]) ) self.assertEqual(1, len(res)) res = self.manager.get_service_providers( ctx, filters=dict(service_type=[constants.FIREWALL]) ) self.assertEqual(1, len(res)) def test_multiple_default_providers_specified_for_service(self): self.assertRaises( n_exc.Invalid, self._set_override, [constants.FIREWALL + ':fwaas1:driver_path:default', constants.FIREWALL + ':fwaas2:driver_path:default']) def test_get_default_provider(self): self._set_override([constants.FIREWALL + ':fwaas1:driver_path:default', dp.DUMMY_SERVICE_TYPE + ':fwaas2:driver_path2']) # can pass None as a context p = self.manager.get_default_service_provider(None, constants.FIREWALL) self.assertEqual({'service_type': constants.FIREWALL, 'name': 'fwaas1', 'driver': 'driver_path', 'default': True}, p) self.assertRaises( provconf.DefaultServiceProviderNotFound, self.manager.get_default_service_provider, None, dp.DUMMY_SERVICE_TYPE ) def test_get_provider_names_by_resource_ids(self): self._set_override([dp.DUMMY_SERVICE_TYPE + ':dummy1:driver_path', dp.DUMMY_SERVICE_TYPE + ':dummy2:driver_path2']) ctx = context.get_admin_context() test_data = [{'provider_name': 'dummy1', 'resource_id': uuidutils.generate_uuid()}, {'provider_name': 'dummy1', 'resource_id': uuidutils.generate_uuid()}, {'provider_name': 'dummy2', 'resource_id': uuidutils.generate_uuid()}] self.manager.add_resource_association(ctx, dp.DUMMY_SERVICE_TYPE, **test_data[0]) self.manager.add_resource_association(ctx, dp.DUMMY_SERVICE_TYPE, **test_data[1]) self.manager.add_resource_association(ctx, dp.DUMMY_SERVICE_TYPE, **test_data[2]) names_by_id = self.manager.get_provider_names_by_resource_ids( ctx, [td['resource_id'] for td in test_data]) # unmatched IDs will be excluded from the result self.assertEqual({td['resource_id']: td['provider_name'] for td in test_data}, names_by_id) def test_add_resource_association(self): self._set_override([constants.FIREWALL + ':fwaas1:driver_path:default', dp.DUMMY_SERVICE_TYPE + ':fwaas2:driver_path2']) ctx = context.get_admin_context() self.manager.add_resource_association(ctx, constants.FIREWALL, 'fwaas1', uuidutils.generate_uuid()) self.assertEqual( 1, servicetype_obj.ProviderResourceAssociation.count(ctx)) servicetype_obj.ProviderResourceAssociation.delete_objects(ctx) def test_invalid_resource_association(self): self._set_override([constants.FIREWALL + ':fwaas1:driver_path:default', dp.DUMMY_SERVICE_TYPE + ':fwaas2:driver_path2']) ctx = context.get_admin_context() self.assertRaises(provconf.ServiceProviderNotFound, self.manager.add_resource_association, ctx, 'BLABLA_svc', 'name', '123-123') class TestServiceTypeExtensionManager(object): """Mock extensions manager.""" def get_resources(self): return (servicetype.Servicetype.get_resources() + dp.Dummy.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase): fmt = 'json' def setUp(self): # This is needed because otherwise a failure will occur due to # nonexisting core_plugin self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) cfg.CONF.set_override('service_plugins', ["%s.%s" % (dp.__name__, dp.DummyServicePlugin.__name__)]) # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None ext_mgr = TestServiceTypeExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.api = webtest.TestApp(self.ext_mdw) self.resource_name = svctype_apidef.RESOURCE_NAME.replace('-', '_') super(ServiceTypeExtensionTestCaseBase, self).setUp() class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase): def setUp(self): self._patcher = mock.patch( "neutron.db.servicetype_db.ServiceTypeManager", autospec=True) self.mock_mgr = self._patcher.start() self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value super(ServiceTypeExtensionTestCase, self).setUp() def test_service_provider_list(self): instance = self.mock_mgr.return_value res = self.api.get(_get_path('service-providers', fmt=self.fmt)) instance.get_service_providers.assert_called_with(mock.ANY, filters={}, fields=[]) self.assertEqual(webexc.HTTPOk.code, res.status_int) class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase): """Tests ServiceTypemanager as a public API.""" def setUp(self): self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() service_providers = [ constants.FIREWALL + ':fwaas:driver_path', dp.DUMMY_SERVICE_TYPE + ':dummy:dummy_dr' ] self.service_providers.return_value = service_providers # Blank out service type manager instance st_db.ServiceTypeManager._instance = None self.manager = st_db.ServiceTypeManager.get_instance() for provider in service_providers: service_type = provider.split(':')[0] self.manager.add_provider_configuration( service_type, provconf.ProviderConfiguration( svc_type=service_type)) super(ServiceTypeManagerExtTestCase, self).setUp() def _list_service_providers(self): return self.api.get(_get_path('service-providers', fmt=self.fmt)) def test_list_service_providers(self): res = self._list_service_providers() self.assertEqual(webexc.HTTPOk.code, res.status_int) data = self.deserialize(res) self.assertIn('service_providers', data) self.assertGreaterEqual(len(data['service_providers']), 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_subnet_dns_publish_fixed_ip.py0000644000175000017500000001000200000000000032320 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import subnet_dns_publish_fixed_ip as api_def from neutron_lib import constants from oslo_config import cfg from neutron.db import db_base_plugin_v2 from neutron.extensions import subnet_dns_publish_fixed_ip from neutron.tests.unit.plugins.ml2 import test_plugin class SubnetDNSPublishFixedIPExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): extension = subnet_dns_publish_fixed_ip.Subnet_dns_publish_fixed_ip() return extension.get_extended_resources(version) class SubnetDNSPublishFixedIPExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2): """Test plugin to mixin the subnet_dns_publish_fixed_ip extension. """ supported_extension_aliases = [api_def.ALIAS, dns_apidef.ALIAS, l3_apidef.ALIAS] class SubnetDNSPublishFixedIPExtensionTestCase( test_plugin.Ml2PluginV2TestCase): """Test API extension subnet_dns_publish_fixed_ip attributes. """ _extension_drivers = ['subnet_dns_publish_fixed_ip'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(SubnetDNSPublishFixedIPExtensionTestCase, self).setUp() def _create_subnet( self, network, ip_version=constants.IP_VERSION_4, cidr=None, **kwargs): cidr = cidr or '192.0.2.0/24' network_id = network['network']['id'] tenant_id = network['network']['tenant_id'] data = {'subnet': { 'network_id': network_id, 'ip_version': str(ip_version), 'tenant_id': tenant_id, 'cidr': cidr}} data['subnet'].update(kwargs) subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return self.deserialize(self.fmt, res)['subnet'] def test_create_subnet_default(self): with self.network() as network: subnet = self._create_subnet(network) self.assertIn('dns_publish_fixed_ip', subnet) self.assertFalse(subnet['dns_publish_fixed_ip']) data = {'subnet': {'dns_publish_fixed_ip': 'true'}} req = self.new_update_request('subnets', data, subnet['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(res['subnet']['dns_publish_fixed_ip']) data = {'subnet': {'dns_publish_fixed_ip': 'false'}} req = self.new_update_request('subnets', data, subnet['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertFalse(res['subnet']['dns_publish_fixed_ip']) def test_create_subnet_with_arg(self): with self.network() as network: subnet = self._create_subnet(network, dns_publish_fixed_ip=True) self.assertIn('dns_publish_fixed_ip', subnet) self.assertTrue(subnet['dns_publish_fixed_ip']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_subnet_onboard.py0000644000175000017500000003065100000000000027577 0ustar00coreycorey00000000000000# (c) Copyright 2019 SUSE LLC # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import netaddr from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from oslo_utils import uuidutils from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj from neutron.tests.unit.plugins.ml2 import test_plugin _uuid = uuidutils.generate_uuid class SubnetOnboardTestsBase(object): @contextlib.contextmanager def address_scope(self, ip_version, prefixes=None, shared=False, admin=True, name='test-scope', is_default_pool=False, tenant_id=None, **kwargs): if not tenant_id: tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version, 'shared': shared, 'name': name + '-scope'} with db_api.CONTEXT_WRITER.using(self.context): yield self.driver.create_address_scope( self.context, {'address_scope': scope_data}) @contextlib.contextmanager def subnetpool(self, ip_version, prefixes=None, shared=False, admin=True, name='test-pool', is_default_pool=False, tenant_id=None, address_scope_id=None, **kwargs): if not tenant_id: tenant_id = _uuid() pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name, 'address_scope_id': address_scope_id, 'prefixes': prefixes, 'is_default': is_default_pool} for key in kwargs: pool_data[key] = kwargs[key] with db_api.CONTEXT_WRITER.using(self.context): yield self.driver.create_subnetpool(self.context, {'subnetpool': pool_data}) def test_onboard_subnet_no_address_scope(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self._test_onboard_cidr(subnetpool['id'], self.cidr_to_onboard) def test_onboard_subnet_address_scope(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes, address_scope_id=addr_scope['id']) as subnetpool: self._test_onboard_cidr(subnetpool['id'], self.cidr_to_onboard) def test_onboard_subnet_overlapping_cidr_no_address_scope(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: with self.subnet(cidr=self.overlapping_cidr, subnetpool_id=subnetpool['id'], ip_version=self.ip_version): self.assertRaises(exc.IllegalSubnetPoolUpdate, self._test_onboard_cidr, subnetpool['id'], self.overlapping_cidr) def test_onboard_subnet_address_scope_multiple_pools(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[0]], address_scope_id=addr_scope['id']) as onboard_pool,\ self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[1]], address_scope_id=addr_scope['id']): self._test_onboard_cidr(onboard_pool['id'], self.cidr_to_onboard) def test_onboard_subnet_address_scope_overlap_multiple_pools(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[0]], address_scope_id=addr_scope['id']) as onboard_pool,\ self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[1]], address_scope_id=addr_scope['id']) as other_pool: self.assertRaises(exc.AddressScopePrefixConflict, self._test_onboard_cidr, onboard_pool['id'], other_pool['prefixes'][0]) def test_onboard_subnet_move_between_pools_same_address_scope(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=[self.cidr_to_onboard], address_scope_id=addr_scope['id']) as source: with self.subnetpool(self.ip_version, address_scope_id=addr_scope['id'], prefixes=self.subnetpool_prefixes) as target: with self.subnet(cidr=self.cidr_to_onboard, ip_version=self.ip_version) as subnet_to_onboard: subnet_to_onboard = subnet_to_onboard['subnet'] # Onboard subnet into an initial subnet pool self._test_onboard_network_subnets( subnet_to_onboard['network_id'], source['id']) source_pool_subnets = subnet_obj.Subnet.get_objects( self.context, subnetpool_id=source['id']) self.assertEqual(1, len(source_pool_subnets)) # Attempt to move the subnet to the target pool self.assertRaises(exc.AddressScopePrefixConflict, self._test_onboard_network_subnets, subnet_to_onboard['network_id'], target['id']) def test_onboard_subnet_move_between_pools(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as source: with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as target: with self.subnet(cidr=self.cidr_to_onboard, ip_version=self.ip_version) as subnet_to_onboard: subnet_to_onboard = subnet_to_onboard['subnet'] # Onboard subnet into an initial subnet pool self._test_onboard_network_subnets( subnet_to_onboard['network_id'], source['id']) source_pool_subnets = subnet_obj.Subnet.get_objects( self.context, subnetpool_id=source['id']) self.assertEqual(1, len(source_pool_subnets)) # Attempt to onboard subnet into a different pool self._test_onboard_network_subnets( subnet_to_onboard['network_id'], target['id']) source_pool_subnets = subnet_obj.Subnet.get_objects( self.context, subnetpool_id=source['id']) target_pool_subnets = subnet_obj.Subnet.get_objects( self.context, subnetpool_id=target['id']) source_subnetpool = subnetpool_obj.SubnetPool.get_object( self.context, id=source['id']) # Assert that the subnet prefix has not been removed # from the the source prefix list. The prefix should # simply be released back to the pool, not removed. self.assertIn( netaddr.IPNetwork(self.cidr_to_onboard), netaddr.IPSet(source_subnetpool['prefixes'])) # Assert the subnet is associated with the proper pool self.assertEqual(0, len(source_pool_subnets)) self.assertEqual(1, len(target_pool_subnets)) def test_onboard_subnet_invalid_request(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(exc.InvalidInput, self._test_onboard_subnet_no_network_id, subnetpool['id'], self.cidr_to_onboard) def test_onboard_subnet_network_not_found(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(exc.NetworkNotFound, self._test_onboard_subnet_non_existing_network, subnetpool['id'], self.cidr_to_onboard) def _test_onboard_subnet_no_network_id(self, subnetpool_id, cidr_to_onboard): with self.subnet(cidr=cidr_to_onboard, ip_version=self.ip_version) as subnet_to_onboard: subnet_to_onboard = subnet_to_onboard['subnet'] self.driver.onboard_network_subnets( self.context, subnetpool_id, {}) def _test_onboard_subnet_non_existing_network(self, subnetpool_id, cidr_to_onboard): with self.subnet(cidr=cidr_to_onboard, ip_version=self.ip_version) as subnet_to_onboard: subnet_to_onboard = subnet_to_onboard['subnet'] self.driver.onboard_network_subnets( self.context, subnetpool_id, {'network_id': _uuid()}) def _test_onboard_network_subnets(self, network_id, subnetpool_id): response = self.driver.onboard_network_subnets( self.context, subnetpool_id, {'network_id': network_id}) subnetpool = subnetpool_obj.SubnetPool.get_object(self.context, id=subnetpool_id) subnetpool_prefixes = netaddr.IPSet(subnetpool.prefixes) for onboarded_subnet in subnet_obj.Subnet.get_objects( self.context, ip_version=self.ip_version, network_id=network_id): onboarded_prefix = netaddr.IPNetwork(onboarded_subnet.cidr) self.assertIn({'id': onboarded_subnet.id, 'cidr': onboarded_subnet.cidr}, response) self.assertEqual(subnetpool_id, onboarded_subnet.subnetpool_id) self.assertIn(onboarded_prefix, subnetpool_prefixes) def _test_onboard_cidr(self, subnetpool_id, cidr_to_onboard): with self.subnet(cidr=cidr_to_onboard, ip_version=self.ip_version) as subnet_to_onboard: subnet_to_onboard = subnet_to_onboard['subnet'] self._test_onboard_network_subnets( subnet_to_onboard['network_id'], subnetpool_id) class SubnetOnboardTestsIpv4(SubnetOnboardTestsBase, test_plugin.Ml2PluginV2TestCase): subnetpool_prefixes = ["192.168.1.0/24", "192.168.2.0/24"] cidr_to_onboard = "10.0.0.0/24" overlapping_cidr = "192.168.1.128/25" default_prefixlen = 24 ip_version = 4 class SubnetOnboardTestsIpv6(SubnetOnboardTestsBase, test_plugin.Ml2PluginV2TestCase): subnetpool_prefixes = ["2001:db8:1234::/48", "2001:db8:1235::/48"] cidr_to_onboard = "2001:db8:4321::/48" overlapping_cidr = "2001:db8:1234:1111::/64" default_prefixlen = 64 ip_version = 6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_subnet_service_types.py0000644000175000017500000003651300000000000031042 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from neutron_lib.api.definitions import portbindings from neutron.db import db_base_plugin_v2 from neutron.db import subnet_service_type_mixin from neutron.extensions import subnet_service_types from neutron.tests.unit.db import test_db_base_plugin_v2 class SubnetServiceTypesExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): extension = subnet_service_types.Subnet_service_types() return extension.get_extended_resources(version) class SubnetServiceTypesExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2, subnet_service_type_mixin.SubnetServiceTypeMixin): """Test plugin to mixin the subnet service_types extension. """ supported_extension_aliases = ["subnet-service-types", portbindings.ALIAS] class SubnetServiceTypesExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test API extension subnet_service_types attributes. """ CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8'] IP_VERSION = 4 def setUp(self): plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' + 'SubnetServiceTypesExtensionTestPlugin') ext_mgr = SubnetServiceTypesExtensionManager() super(SubnetServiceTypesExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _create_service_subnet(self, service_types=None, cidr=None, network=None, enable_dhcp=False): if not network: with self.network() as network: pass network = network['network'] if not cidr: cidr = self.CIDRS[0] args = {'net_id': network['id'], 'tenant_id': network['tenant_id'], 'cidr': cidr, 'ip_version': self.IP_VERSION, 'enable_dhcp': enable_dhcp} if service_types: args['service_types'] = service_types return self._create_subnet(self.fmt, **args) def _test_create_subnet(self, service_types, expect_fail=False): res = self._create_service_subnet(service_types) if expect_fail: self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) else: subnet = self.deserialize('json', res) subnet = subnet['subnet'] self.assertEqual(len(service_types), len(subnet['service_types'])) for service in service_types: self.assertIn(service, subnet['service_types']) def test_create_subnet_blank_type(self): self._test_create_subnet([]) def test_create_subnet_bar_type(self): self._test_create_subnet(['network:bar']) def test_create_subnet_foo_type(self): self._test_create_subnet(['compute:foo']) def test_create_subnet_bar_and_foo_type(self): self._test_create_subnet(['network:bar', 'compute:foo']) def test_create_subnet_invalid_type(self): self._test_create_subnet(['foo'], expect_fail=True) self._test_create_subnet([1], expect_fail=True) def test_create_subnet_no_type(self): res = self._create_service_subnet() subnet = self.deserialize('json', res) subnet = subnet['subnet'] self.assertFalse(subnet['service_types']) def _test_update_subnet(self, subnet, service_types, fail_code=None): data = {'subnet': {'service_types': service_types}} req = self.new_update_request('subnets', data, subnet['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) if fail_code is not None: self.assertEqual(fail_code, res['NeutronError']['type']) else: subnet = res['subnet'] self.assertEqual(len(service_types), len(subnet['service_types'])) for service in service_types: self.assertIn(service, subnet['service_types']) def test_update_subnet_zero_to_one(self): service_types = ['network:foo'] # Create a subnet with no service type res = self._create_service_subnet() subnet = self.deserialize('json', res)['subnet'] # Update it with a single service type self._test_update_subnet(subnet, service_types) def test_update_subnet_one_to_two(self): service_types = ['network:foo'] # Create a subnet with one service type res = self._create_service_subnet(service_types) subnet = self.deserialize('json', res)['subnet'] # Update it with two service types service_types.append('compute:bar') self._test_update_subnet(subnet, service_types) def test_update_subnet_two_to_one(self): service_types = ['network:foo', 'compute:bar'] # Create a subnet with two service types res = self._create_service_subnet(service_types) subnet = self.deserialize('json', res)['subnet'] # Update it with one service type service_types = ['network:foo'] self._test_update_subnet(subnet, service_types) def test_update_subnet_one_to_zero(self): service_types = ['network:foo'] # Create a subnet with one service type res = self._create_service_subnet(service_types) subnet = self.deserialize('json', res)['subnet'] # Update it with zero service types service_types = [] self._test_update_subnet(subnet, service_types) def test_update_subnet_invalid_type(self): # Create a subnet with no service type res = self._create_service_subnet() subnet = self.deserialize('json', res)['subnet'] # Update it with invalid service type(s) self._test_update_subnet(subnet, ['foo'], fail_code='InvalidSubnetServiceType') self._test_update_subnet(subnet, [2], fail_code='InvalidInputSubnetServiceType') def _assert_port_res(self, port, service_type, subnet, fallback, error='IpAddressGenerationFailureNoMatchingSubnet'): res = self.deserialize('json', port) if fallback: port = res['port'] self.assertEqual(1, len(port['fixed_ips'])) self.assertEqual(service_type, port['device_owner']) self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id']) else: self.assertEqual(error, res['NeutronError']['type']) def test_create_port_with_matching_service_type(self): with self.network() as network: pass matching_type = 'network:foo' non_matching_type = 'network:bar' # Create a subnet with no service types self._create_service_subnet(network=network) # Create a subnet with a non-matching service type self._create_service_subnet([non_matching_type], cidr=self.CIDRS[2], network=network) # Create a subnet with a service type to match the port device owner res = self._create_service_subnet([matching_type], cidr=self.CIDRS[1], network=network) service_subnet = self.deserialize('json', res)['subnet'] # Create a port with device owner matching the correct service subnet network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], device_owner=matching_type) self._assert_port_res(port, matching_type, service_subnet, True) def test_create_port_without_matching_service_type(self, fallback=True): with self.network() as network: pass subnet = '' matching_type = 'compute:foo' non_matching_type = 'network:foo' if fallback: # Create a subnet with no service types res = self._create_service_subnet(network=network) subnet = self.deserialize('json', res)['subnet'] # Create a subnet with a non-matching service type self._create_service_subnet([non_matching_type], cidr=self.CIDRS[1], network=network) # Create a port with device owner not matching the service subnet network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], device_owner=matching_type) self._assert_port_res(port, matching_type, subnet, fallback) def test_create_port_without_matching_service_type_no_fallback(self): self.test_create_port_without_matching_service_type(fallback=False) def test_create_port_no_device_owner(self, fallback=True): with self.network() as network: pass subnet = '' service_type = 'compute:foo' if fallback: # Create a subnet with no service types res = self._create_service_subnet(network=network) subnet = self.deserialize('json', res)['subnet'] # Create a subnet with a service_type self._create_service_subnet([service_type], cidr=self.CIDRS[1], network=network) # Create a port without a device owner network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id']) self._assert_port_res(port, '', subnet, fallback) def test_create_port_no_device_owner_no_fallback(self): self.test_create_port_no_device_owner(fallback=False) def test_create_port_exhausted_subnet(self, fallback=True): with self.network() as network: pass subnet = '' service_type = 'compute:foo' if fallback: # Create a subnet with no service types res = self._create_service_subnet(network=network) subnet = self.deserialize('json', res)['subnet'] # Create a subnet with a service_type res = self._create_service_subnet([service_type], cidr=self.CIDRS[1], network=network) service_subnet = self.deserialize('json', res)['subnet'] # Update the service subnet with empty allocation pools data = {'subnet': {'allocation_pools': []}} req = self.new_update_request('subnets', data, service_subnet['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) # Create a port with a matching device owner network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], device_owner=service_type) self._assert_port_res(port, service_type, subnet, fallback, error='IpAddressGenerationFailure') def test_create_port_exhausted_subnet_no_fallback(self): self.test_create_port_exhausted_subnet(fallback=False) def test_create_dhcp_port_compute_subnet(self, enable_dhcp=True): with self.network() as network: pass res = self._create_service_subnet(['compute:nova'], network=network, enable_dhcp=enable_dhcp) subnet = self.deserialize('json', res)['subnet'] network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], fixed_ips=[{'subnet_id': subnet['id']}], device_owner='network:dhcp') self._assert_port_res(port, 'network:dhcp', subnet, enable_dhcp) def test_create_dhcp_port_compute_subnet_no_dhcp(self): self.test_create_dhcp_port_compute_subnet(enable_dhcp=False) def test_update_port_fixed_ips(self): with self.network() as network: pass service_type = 'compute:foo' # Create a subnet with a service_type res = self._create_service_subnet([service_type], cidr=self.CIDRS[1], network=network) service_subnet = self.deserialize('json', res)['subnet'] # Create a port with a matching device owner network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], device_owner=service_type) port = self.deserialize('json', port)['port'] # Update the port's fixed_ips. It's ok to reuse the same IP it already # has. ip_address = port['fixed_ips'][0]['ip_address'] data = {'port': {'fixed_ips': [{'subnet_id': service_subnet['id'], 'ip_address': ip_address}]}} # self._update will fail with a MismatchError if the update cannot be # applied port = self._update('ports', port['id'], data) def test_update_port_host_binding(self): with self.network() as network: pass service_type = 'compute:foo' # Create a subnet with a service_type self._create_service_subnet([service_type], cidr=self.CIDRS[1], network=network) # Create a port with a matching device owner network = network['network'] port = self._create_port(self.fmt, net_id=network['id'], tenant_id=network['tenant_id'], device_owner=service_type, arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) port = self.deserialize('json', port)['port'] # Update the port's host binding. data = {'port': {portbindings.HOST_ID: 'fakehost2'}} # self._update will fail with a MismatchError if the update cannot be # applied port = self._update('ports', port['id'], data) class SubnetServiceTypesExtensionTestCasev6( SubnetServiceTypesExtensionTestCase): CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64'] IP_VERSION = 6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py0000644000175000017500000002460100000000000031221 0ustar00coreycorey00000000000000# (c) Copyright 2019 SUSE LLC # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import netaddr from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from oslo_utils import uuidutils import webob.exc from neutron.objects import subnetpool as subnetpool_obj from neutron.tests.unit.plugins.ml2 import test_plugin _uuid = uuidutils.generate_uuid class SubnetpoolPrefixOpsTestBase(object): @contextlib.contextmanager def address_scope(self, ip_version, prefixes=None, shared=False, admin=True, name='test-scope', is_default_pool=False, tenant_id=None, **kwargs): if not tenant_id: tenant_id = _uuid() scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version, 'shared': shared, 'name': name + '-scope'} with db_api.CONTEXT_WRITER.using(self.context): yield self.driver.create_address_scope( self.context, {'address_scope': scope_data}) @contextlib.contextmanager def subnetpool(self, ip_version, prefixes=None, shared=False, admin=True, name='test-pool', is_default_pool=False, tenant_id=None, address_scope_id=None, **kwargs): if not tenant_id: tenant_id = _uuid() pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name, 'address_scope_id': address_scope_id, 'prefixes': prefixes, 'is_default': is_default_pool} for key in kwargs: pool_data[key] = kwargs[key] with db_api.CONTEXT_WRITER.using(self.context): yield self.driver.create_subnetpool(self.context, {'subnetpool': pool_data}) def _make_request_payload(self, prefixes): return {'prefixes': prefixes} def test_add_prefix_no_address_scope(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.driver.add_prefixes( self.context, subnetpool['id'], self._make_request_payload([self.cidr_to_add])) self._validate_prefix_list(subnetpool['id'], [self.cidr_to_add]) def test_add_prefix_invalid_request_body_structure(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(webob.exc.HTTPBadRequest, self.driver.add_prefixes, self.context, subnetpool['id'], [self.cidr_to_add]) def test_add_prefix_invalid_request_data(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(webob.exc.HTTPBadRequest, self.driver.add_prefixes, self.context, subnetpool['id'], ['not a CIDR']) def test_add_prefix_no_address_scope_overlapping_cidr(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: prefixes_to_add = [self.cidr_to_add, self.overlapping_cidr] self.driver.add_prefixes( self.context, subnetpool['id'], self._make_request_payload([self.cidr_to_add])) self._validate_prefix_list(subnetpool['id'], prefixes_to_add) def test_add_prefix_with_address_scope_overlapping_cidr(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[0]], address_scope_id=addr_scope['id']) as sp_to_augment,\ self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[1]], address_scope_id=addr_scope['id']): prefixes_to_add = [self.cidr_to_add] self.driver.add_prefixes( self.context, sp_to_augment['id'], self._make_request_payload([self.cidr_to_add])) self._validate_prefix_list(sp_to_augment['id'], prefixes_to_add) def test_add_prefix_with_address_scope(self): with self.address_scope(self.ip_version) as addr_scope: with self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[1]], address_scope_id=addr_scope['id']) as sp_to_augment,\ self.subnetpool(self.ip_version, prefixes=[self.subnetpool_prefixes[0]], address_scope_id=addr_scope['id']): prefixes_to_add = [self.overlapping_cidr] self.assertRaises(exc.AddressScopePrefixConflict, self.driver.add_prefixes, self.context, sp_to_augment['id'], self._make_request_payload(prefixes_to_add)) def test_remove_prefix(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: prefixes_to_remove = [self.subnetpool_prefixes[0]] self.driver.remove_prefixes( self.context, subnetpool['id'], self._make_request_payload(prefixes_to_remove)) self._validate_prefix_list(subnetpool['id'], [self.subnetpool_prefixes[1]], excluded_prefixes=prefixes_to_remove) def test_remove_prefix_invalid_request_body_structure(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(webob.exc.HTTPBadRequest, self.driver.remove_prefixes, self.context, subnetpool['id'], [self.subnetpool_prefixes[0]]) def test_remove_prefix_invalid_request_data(self): with self.subnetpool(self.ip_version, prefixes=self.subnetpool_prefixes) as subnetpool: self.assertRaises(webob.exc.HTTPBadRequest, self.driver.remove_prefixes, self.context, subnetpool['id'], ['not a CIDR']) def test_remove_prefix_with_allocated_subnet(self): with self.subnetpool(self.ip_version, default_prefixlen=self.default_prefixlen, min_prefixlen=self.default_prefixlen, prefixes=self.subnetpool_prefixes) as subnetpool: with self.subnet(cidr=None, subnetpool_id=subnetpool['id'], ip_version=self.ip_version) as subnet: subnet = subnet['subnet'] prefixes_to_remove = [subnet['cidr']] self.assertRaises( exc.IllegalSubnetPoolPrefixUpdate, self.driver.remove_prefixes, self.context, subnetpool['id'], self._make_request_payload(prefixes_to_remove)) def test_remove_overlapping_prefix_with_allocated_subnet(self): with self.subnetpool( self.ip_version, default_prefixlen=self.default_prefixlen, min_prefixlen=self.default_prefixlen, prefixes=[self.subnetpool_prefixes[0]]) as subnetpool: with self.subnet(cidr=None, subnetpool_id=subnetpool['id'], ip_version=self.ip_version) as subnet: subnet = subnet['subnet'] prefixes_to_remove = [self.overlapping_cidr] self.assertRaises( exc.IllegalSubnetPoolPrefixUpdate, self.driver.remove_prefixes, self.context, subnetpool['id'], self._make_request_payload(prefixes_to_remove)) def _validate_prefix_list(self, subnetpool_id, expected_prefixes, excluded_prefixes=None): if not excluded_prefixes: excluded_prefixes = [] subnetpool = subnetpool_obj.SubnetPool.get_object( self.context, id=subnetpool_id) current_prefix_set = netaddr.IPSet([x for x in subnetpool.prefixes]) expected_prefix_set = netaddr.IPSet(expected_prefixes) excluded_prefix_set = netaddr.IPSet(excluded_prefixes) self.assertTrue(expected_prefix_set.issubset(current_prefix_set)) self.assertTrue(excluded_prefix_set.isdisjoint(current_prefix_set)) class SubnetpoolPrefixOpsTestsIpv4(SubnetpoolPrefixOpsTestBase, test_plugin.Ml2PluginV2TestCase): subnetpool_prefixes = ["192.168.1.0/24", "192.168.2.0/24"] cidr_to_add = "10.0.0.0/24" overlapping_cidr = "192.168.1.128/25" default_prefixlen = 24 ip_version = 4 class SubnetpoolPrefixOpsTestsIpv6(SubnetpoolPrefixOpsTestBase, test_plugin.Ml2PluginV2TestCase): subnetpool_prefixes = ["2001:db8:1234::/48", "2001:db8:1235::/48"] cidr_to_add = "2001:db8:4321::/48" overlapping_cidr = "2001:db8:1234:1111::/64" default_prefixlen = 48 ip_version = 6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_timestamp.py0000644000175000017500000003062600000000000026600 0ustar00coreycorey00000000000000# Copyright 2015 HuaWei Technologies. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from neutron_lib import context from neutron_lib.plugins import directory from oslo_utils import timeutils from oslo_utils import uuidutils import six from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.extensions import timestamp from neutron import manager from neutron.objects import network as net_obj from neutron.objects import tag as tag_obj from neutron.tests.unit.db import test_db_base_plugin_v2 class TimeStampExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return timestamp.Timestamp().get_extended_resources(version) class TimeStampTestPlugin(db_base_plugin_v2.NeutronDbPluginV2): """Just for test with TimeStampPlugin""" class TimeStampChangedsinceTestCase(test_db_base_plugin_v2. NeutronDbPluginV2TestCase): plugin = ('neutron.tests.unit.extensions.test_timestamp.' + 'TimeStampTestPlugin') def setUp(self): ext_mgr = TimeStampExtensionManager() super(TimeStampChangedsinceTestCase, self).setUp(plugin=self.plugin, ext_mgr=ext_mgr) self.addCleanup(manager.NeutronManager.clear_instance) def setup_coreplugin(self, core_plugin=None, load_plugins=True): super(TimeStampChangedsinceTestCase, self).setup_coreplugin( self.plugin, load_plugins=False) self.patched_default_svc_plugins.return_value = ['timestamp'] manager.init() def _get_resp_with_changed_since(self, resource_type, changed_since): query_params = 'changed_since=%s' % changed_since req = self.new_list_request('%ss' % resource_type, self.fmt, query_params) resources = self.deserialize(self.fmt, req.get_response(self.api)) return resources def _return_by_timedelay(self, resource, timedelay): resource_type = six.next(six.iterkeys(resource)) time_create = timeutils.parse_isotime( resource[resource_type]['updated_at']) time_before = datetime.timedelta(seconds=timedelay) addedtime_string = (datetime.datetime. strftime(time_create + time_before, '%Y-%m-%dT%H:%M:%S')) + 'Z' return self._get_resp_with_changed_since(resource_type, addedtime_string) def _update_test_resource_by_name(self, resource): resource_type = six.next(six.iterkeys(resource)) name = resource[resource_type]['name'] data = {resource_type: {'name': '%s_new' % name}} req = self.new_update_request('%ss' % resource_type, data, resource[resource_type]['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) return res def _set_timestamp_by_show(self, resource, type): req = self.new_show_request('%ss' % type, resource[type]['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) resource[type]['created_at'] = res[type]['created_at'] resource[type]['updated_at'] = res[type]['updated_at'] def _list_resources_with_changed_since(self, resource): # assert list results contain the net info when # changed_since equal with the net updated time. resource_type = six.next(six.iterkeys(resource)) if resource_type in ['network', 'port']: self._set_timestamp_by_show(resource, resource_type) resources = self._get_resp_with_changed_since(resource_type, resource[resource_type][ 'updated_at']) self.assertEqual(resource[resource_type]['id'], resources[resource_type + 's'][0]['id']) # assert list results contain the net info when changed_since # is earlier than the net updated time. resources = self._return_by_timedelay(resource, -1) self.assertEqual(resource[resource_type]['id'], resources[resource_type + 's'][0]['id']) # assert list results is Null when changed_since # is later with the net updated time. resources = self._return_by_timedelay(resource, 1) self.assertEqual([], resources[resource_type + 's']) def _test_list_mutiple_resources_with_changed_since(self, first, second): resource_type = six.next(six.iterkeys(first)) if resource_type in ['network', 'port']: self._set_timestamp_by_show(first, resource_type) self._set_timestamp_by_show(second, resource_type) # update names of second new_second = self._update_test_resource_by_name(second) # now the queue of order by # updated_at is first < new_second # test changed_since < first's updated_at resources = self._return_by_timedelay(first, -1) for resource in [first[resource_type]['id'], new_second[resource_type]['id']]: self.assertIn(resource, [n['id'] for n in resources[resource_type + 's']]) # test changed_since = first's updated_at resources = self._return_by_timedelay(first, 0) for resource in [first[resource_type]['id'], new_second[resource_type]['id']]: self.assertIn(resource, [n['id'] for n in resources[resource_type + 's']]) # test first < changed_since < second resources = self._return_by_timedelay(new_second, -1) self.assertIn(new_second[resource_type]['id'], [n['id'] for n in resources[resource_type + 's']]) # test first < changed_since = second resources = self._return_by_timedelay(new_second, 0) self.assertIn(new_second[resource_type]['id'], [n['id'] for n in resources[resource_type + 's']]) # test first < second < changed_since resources = self._return_by_timedelay(new_second, 3) self.assertEqual({resource_type + 's': []}, resources) def test_list_networks_with_changed_since(self): with self.network('net1') as net: self._list_resources_with_changed_since(net) def test_list_subnets_with_changed_since(self): with self.network('net2') as net: with self.subnet(network=net) as subnet: self._list_resources_with_changed_since(subnet) def test_list_ports_with_changed_since(self): with self.network('net3') as net: with self.subnet(network=net) as subnet: with self.port(subnet=subnet) as port: self._list_resources_with_changed_since(port) def test_list_subnetpools_with_changed_since(self): prefixes = ['3.3.3.3/24', '4.4.4.4/24'] with self.subnetpool(prefixes, tenant_id=self._tenant_id, name='sp_test02') as subnetpool: self._list_resources_with_changed_since(subnetpool) def test_list_mutiple_networks_with_changed_since(self): with self.network('net1') as net1, self.network('net2') as net2: self._test_list_mutiple_resources_with_changed_since(net1, net2) def test_list_mutiple_subnets_with_changed_since(self): with self.network('net1') as net1, self.network('net2') as net2: with self.subnet(network=net1) as subnet1, self.subnet( network=net2) as subnet2: self._test_list_mutiple_resources_with_changed_since(subnet1, subnet2) def test_list_mutiple_subnetpools_with_changed_since(self): prefixes1 = ['3.3.3.3/24', '4.4.4.4/24'] prefixes2 = ['5.5.5.5/24', '6.6.6.6/24'] with self.subnetpool(prefixes1, tenant_id=self._tenant_id, name='sp01') as sp1: with self.subnetpool(prefixes2, tenant_id=self._tenant_id, name='sp02') as sp2: self._test_list_mutiple_resources_with_changed_since(sp1, sp2) def test_list_mutiple_ports_with_changed_since(self): with self.network('net') as net: with self.subnet(network=net) as subnet: with self.port(subnet=subnet) as p1, self.port( subnet=subnet) as p2: self._test_list_mutiple_resources_with_changed_since(p1, p2) def test_list_resources_with_invalid_changed_since(self): # check when input --changed-since with no arg, then filters # stored as 'True'. And also check other invalid inputs changed_sinces = ['123', 'True', 'AAAA-BB-CCTDD-EE-FFZ', '9a9b-11-00T99-1a-r3Z', '0000-00-00T00-00-00Z'] for resource in ['network', 'subnet', 'port', 'subnetpool']: for changed_since in changed_sinces: req = self.new_list_request('%ss' % resource, self.fmt, 'changed_since=%s' % changed_since) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(list(res.values())[0]['type'], 'InvalidInput') def test_timestamp_fields_ignored_in_update(self): ctx = context.get_admin_context() with self.port() as port: plugin = directory.get_plugin() port = plugin.get_port(ctx, port['port']['id']) port['name'] = 'updated' port['created_at'] = '2011-04-06T14:34:23' port['updated_at'] = '2012-04-06T15:34:23' updated = plugin.update_port(ctx, port['id'], {'port': port}) self.assertEqual('updated', updated['name']) self.assertNotEqual(port['updated_at'], updated['updated_at']) self.assertNotEqual(port['created_at'], updated['created_at']) class TimeStampDBMixinTestCase(TimeStampChangedsinceTestCase): """Test timestamp_db.TimeStamp_db_mixin()""" def _save_network(self, network_id): ctx = context.get_admin_context() obj = net_obj.Network(ctx, id=network_id) obj.create() return obj # Use tag as non StandardAttribute object def _save_tag(self, tags, standard_attr_id): ctx = context.get_admin_context() ret = [] for tag in tags: _tag_obj = tag_obj.Tag(ctx, standard_attr_id=standard_attr_id, tag=tag) _tag_obj.create() ret.append(_tag_obj) return ret def test_update_timpestamp(self): self._network = None self._tags = [] def save_network(): if self._network: self._network.delete() timenow.reset_mock() self._network = self._save_network(network_id) return 1 == timenow.call_count def save_tag(): for tag in self._tags: tag.delete() timenow.reset_mock() self._tags = self._save_tag(tags, self._network.standard_attr_id) return 0 == timenow.call_count network_id = uuidutils.generate_uuid() tags = ["red", "blue"] with mock.patch.object(timeutils, 'utcnow') as timenow: timenow.return_value = datetime.datetime(2016, 3, 11, 0, 0) # Test to update StandardAttribute object utils.wait_until_true(save_network, timeout=5, sleep=0.1) # Test not to update non StandardAttribute object utils.wait_until_true(save_tag, timeout=5, sleep=0.1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_uplink_status_propagation.py0000644000175000017500000000553200000000000032103 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from neutron_lib.api.definitions import uplink_status_propagation as apidef from neutron_lib.db import resource_extend from neutron.db import db_base_plugin_v2 from neutron.db import uplink_status_propagation_db as usp_db from neutron.tests.unit.db import test_db_base_plugin_v2 class UplinkStatusPropagationExtensionTestPlugin( db_base_plugin_v2.NeutronDbPluginV2, usp_db.UplinkStatusPropagationMixin): """Test plugin to mixin the uplink status propagation extension. """ supported_extension_aliases = [apidef.ALIAS] @staticmethod @resource_extend.extends([apidef.COLLECTION_NAME]) def _extend_network_project_default(port_res, port_db): return usp_db.UplinkStatusPropagationMixin._extend_port_dict( port_res, port_db) def create_port(self, context, port): with context.session.begin(subtransactions=True): new_port = super(UplinkStatusPropagationExtensionTestPlugin, self).create_port(context, port) # Update the propagate_uplink_status in the database p = port['port'] if 'propagate_uplink_status' not in p: p['propagate_uplink_status'] = False self._process_create_port(context, p, new_port) return new_port @ddt.ddt class UplinkStatusPropagationExtensionTestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Test API extension propagate_uplink_status attributes. """ def setUp(self): plugin = ('neutron.tests.unit.extensions.test_uplink_status_' 'propagation.UplinkStatusPropagationExtensionTestPlugin') super(UplinkStatusPropagationExtensionTestCase, self).setUp(plugin=plugin) @ddt.data(True, False) def test_create_port_propagate_uplink_status( self, propagate_uplink_status): name = 'propagate_uplink_status' keys = [('name', name), ('admin_state_up', True), ('status', self.port_create_status), ('propagate_uplink_status', propagate_uplink_status)] with self.port(name=name, propagate_uplink_status=propagate_uplink_status ) as port: for k, v in keys: self.assertEqual(v, port['port'][k]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/test_vlantransparent.py0000644000175000017500000001050500000000000030011 0ustar00coreycorey00000000000000# Copyright (c) 2015 Cisco Systems Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.db import api as db_api from oslo_config import cfg from webob import exc as web_exc from neutron.db import db_base_plugin_v2 from neutron.db import vlantransparent_db as vlt_db from neutron.extensions import vlantransparent as vlt from neutron import quota from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class VlanTransparentExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return vlt.Vlantransparent.get_extended_resources(version) class VlanTransparentExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, vlt_db.Vlantransparent_db_mixin): """Test plugin to mixin the VLAN transparent extensions.""" supported_extension_aliases = [vlan_apidef.ALIAS] def create_network(self, context, network): with db_api.CONTEXT_WRITER.using(context): new_net = super(VlanTransparentExtensionTestPlugin, self).create_network(context, network) # Update the vlan_transparent in the database n = network['network'] vlan_transparent = vlan_apidef.get_vlan_transparent(n) network = self._get_network(context, new_net['id']) n['vlan_transparent'] = vlan_transparent network.update(n) return new_net class VlanTransparentExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): fmt = 'json' def setUp(self): plugin = ('neutron.tests.unit.extensions.test_vlantransparent.' 'VlanTransparentExtensionTestPlugin') # Update the plugin and extensions path ext_mgr = VlanTransparentExtensionManager() super(VlanTransparentExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def test_network_create_with_vlan_transparent_attr(self): vlantrans = {'vlan_transparent': True} with self.network(name='net1', **vlantrans) as net: req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(net['network']['name'], res['network']['name']) self.assertTrue(res['network'][vlan_apidef.VLANTRANSPARENT]) def test_network_create_with_bad_vlan_transparent_attr(self): vlantrans = {'vlan_transparent': "abc"} with testlib_api.ExpectedException( web_exc.HTTPClientError) as ctx_manager: with self.network(name='net1', **vlantrans): pass self.assertEqual(web_exc.HTTPClientError.code, ctx_manager.exception.code) def test_network_update_with_vlan_transparent_exception(self): with self.network(name='net1') as net: self._update('networks', net['network']['id'], {'network': {vlan_apidef.VLANTRANSPARENT: False}}, web_exc.HTTPBadRequest.code) req = self.new_show_request('networks', net['network']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(net['network']['name'], res['network']['name']) self.assertFalse(res['network'][vlan_apidef.VLANTRANSPARENT]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/extensions/v2attributes.py0000644000175000017500000000275000000000000026171 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.api import extensions EXTENDED_ATTRIBUTES_2_0 = { 'networks': { 'v2attrs:something': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'v2attrs:something_else': {'allow_post': True, 'allow_put': False, 'is_visible': False}, } } class V2attributes(extensions.ExtensionDescriptor): def get_name(self): return "V2 Extended Attributes Example" def get_alias(self): return "v2attrs" def get_description(self): return "Demonstrates extended attributes on V2 core resources" def get_updated(self): return "2012-07-18T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/fake_resources.py0000644000175000017500000006145600000000000024344 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy import mock from neutron_lib.api.definitions import l3 from oslo_utils import uuidutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils class FakeOvsdbNbOvnIdl(object): def __init__(self, **kwargs): self.lswitch_table = FakeOvsdbTable.create_one_ovsdb_table() self.lsp_table = FakeOvsdbTable.create_one_ovsdb_table() self.lrouter_table = FakeOvsdbTable.create_one_ovsdb_table() self.lrouter_static_route_table = \ FakeOvsdbTable.create_one_ovsdb_table() self.lrp_table = FakeOvsdbTable.create_one_ovsdb_table() self.addrset_table = FakeOvsdbTable.create_one_ovsdb_table() self.acl_table = FakeOvsdbTable.create_one_ovsdb_table() self.dhcp_options_table = FakeOvsdbTable.create_one_ovsdb_table() self.nat_table = FakeOvsdbTable.create_one_ovsdb_table() self.port_group_table = FakeOvsdbTable.create_one_ovsdb_table() self._tables = {} self._tables['Logical_Switch'] = self.lswitch_table self._tables['Logical_Switch_Port'] = self.lsp_table self._tables['Logical_Router'] = self.lrouter_table self._tables['Logical_Router_Port'] = self.lrp_table self._tables['Logical_Router_Static_Route'] = \ self.lrouter_static_route_table self._tables['ACL'] = self.acl_table self._tables['Address_Set'] = self.addrset_table self._tables['DHCP_Options'] = self.dhcp_options_table self._tables['NAT'] = self.nat_table self._tables['Port_Group'] = self.port_group_table self.transaction = mock.MagicMock() self.create_transaction = mock.MagicMock() self.ls_add = mock.Mock() self.ls_del = mock.Mock() self.create_lswitch_port = mock.Mock() self.set_lswitch_port = mock.Mock() self.delete_lswitch_port = mock.Mock() self.get_acls_for_lswitches = mock.Mock() self.create_lrouter = mock.Mock() self.lrp_del = mock.Mock() self.update_lrouter = mock.Mock() self.delete_lrouter = mock.Mock() self.add_lrouter_port = mock.Mock() self.update_lrouter_port = mock.Mock() self.delete_lrouter_port = mock.Mock() self.set_lrouter_port_in_lswitch_port = mock.Mock() self.add_acl = mock.Mock() self.delete_acl = mock.Mock() self.update_acls = mock.Mock() self.idl = mock.Mock() self.add_static_route = mock.Mock() self.delete_static_route = mock.Mock() self.create_address_set = mock.Mock() self.update_address_set_ext_ids = mock.Mock() self.delete_address_set = mock.Mock() self.update_address_set = mock.Mock() self.get_all_chassis_gateway_bindings = mock.Mock() self.get_chassis_gateways = mock.Mock() self.get_gateway_chassis_binding = mock.Mock() self.get_unhosted_gateways = mock.Mock() self.add_dhcp_options = mock.Mock() self.delete_dhcp_options = mock.Mock() self.get_subnet_dhcp_options = mock.Mock() self.get_subnet_dhcp_options.return_value = { 'subnet': None, 'ports': []} self.get_subnets_dhcp_options = mock.Mock() self.get_subnets_dhcp_options.return_value = [] self.get_all_dhcp_options = mock.Mock() self.get_router_port_options = mock.MagicMock() self.get_router_port_options.return_value = {} self.add_nat_rule_in_lrouter = mock.Mock() self.delete_nat_rule_in_lrouter = mock.Mock() self.get_lrouter_nat_rules = mock.Mock() self.get_lrouter_nat_rules.return_value = [] self.set_nat_rule_in_lrouter = mock.Mock() self.check_for_row_by_value_and_retry = mock.Mock() self.get_parent_port = mock.Mock() self.get_parent_port.return_value = [] self.dns_add = mock.Mock() self.get_lswitch = mock.Mock() fake_ovs_row = FakeOvsdbRow.create_one_ovsdb_row() self.get_lswitch.return_value = fake_ovs_row self.get_lswitch_port = mock.Mock() self.get_lswitch_port.return_value = fake_ovs_row self.get_ls_and_dns_record = mock.Mock() self.get_ls_and_dns_record.return_value = (fake_ovs_row, None) self.ls_set_dns_records = mock.Mock() self.get_floatingip = mock.Mock() self.get_floatingip.return_value = None self.check_revision_number = mock.Mock() self.lookup = mock.MagicMock() # TODO(lucasagomes): The get_floatingip_by_ips() method is part # of a backwards compatibility layer for the Pike -> Queens release, # remove it in the Rocky release. self.get_floatingip_by_ips = mock.Mock() self.get_floatingip_by_ips.return_value = None self.is_col_present = mock.Mock() self.is_col_present.return_value = False self.get_lrouter = mock.Mock() self.get_lrouter.return_value = None self.delete_lrouter_ext_gw = mock.Mock() self.delete_lrouter_ext_gw.return_value = None self.is_port_groups_supported = mock.Mock() # TODO(lucasagomes): Flip this return value to True at some point, # port groups should be the default method used by networking-ovn self.is_port_groups_supported.return_value = False self.get_address_set = mock.Mock() self.get_address_set.return_value = None self.pg_acl_add = mock.Mock() self.pg_acl_del = mock.Mock() self.pg_del = mock.Mock() self.pg_add = mock.Mock() self.get_port_group = mock.Mock() self.pg_add_ports = mock.Mock() self.pg_del_ports = mock.Mock() self.lsp_get_up = mock.Mock() self.nb_global = mock.Mock() self.db_list_rows = mock.Mock() self.lsp_list = mock.MagicMock() self.db_find = mock.Mock() self.db_find_rows = mock.Mock() self.db_set = mock.Mock() self.db_clear = mock.Mock() self.db_remove = mock.Mock() self.set_lswitch_port_to_virtual_type = mock.Mock() self.unset_lswitch_port_to_virtual_type = mock.Mock() self.ls_get = mock.Mock() self.check_liveness = mock.Mock() self.ha_chassis_group_get = mock.Mock() class FakeOvsdbSbOvnIdl(object): def __init__(self, **kwargs): self.chassis_exists = mock.Mock() self.chassis_exists.return_value = True self.get_chassis_hostname_and_physnets = mock.Mock() self.get_chassis_hostname_and_physnets.return_value = {} self.get_all_chassis = mock.Mock() self.get_chassis_data_for_ml2_bind_port = mock.Mock() self.get_chassis_data_for_ml2_bind_port.return_value = \ ('fake', '', ['fake-physnet']) self.get_logical_port_chassis_and_datapath = mock.Mock() self.get_logical_port_chassis_and_datapath.return_value = \ ('fake', 'fake-dp') self.get_chassis_and_physnets = mock.Mock() self.get_gateway_chassis_from_cms_options = mock.Mock() self.is_col_present = mock.Mock() self.is_col_present.return_value = False self.db_set = mock.Mock() class FakeOvsdbTransaction(object): def __init__(self, **kwargs): self.insert = mock.Mock() class FakePlugin(object): def __init__(self, **kwargs): self.get_ports = mock.Mock() self._get_port_security_group_bindings = mock.Mock() class FakeResource(dict): def __init__(self, manager=None, info=None, loaded=False, methods=None): """Set attributes and methods for a resource. :param manager: The resource manager :param Dictionary info: A dictionary with all attributes :param bool loaded: True if the resource is loaded in memory :param Dictionary methods: A dictionary with all methods """ info = info or {} super(FakeResource, self).__init__(info) methods = methods or {} self.__name__ = type(self).__name__ self.manager = manager self._info = info self._add_details(info) self._add_methods(methods) self._loaded = loaded # Add a revision number by default setattr(self, 'revision_number', 1) @property def db_obj(self): return self def _add_details(self, info): for (k, v) in info.items(): setattr(self, k, v) def _add_methods(self, methods): """Fake methods with MagicMock objects. For each <@key, @value> pairs in methods, add an callable MagicMock object named @key as an attribute, and set the mock's return_value to @value. When users access the attribute with (), @value will be returned, which looks like a function call. """ for (name, ret) in methods.items(): method = mock.MagicMock(return_value=ret) setattr(self, name, method) def __repr__(self): reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager') info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) return "<%s %s>" % (self.__class__.__name__, info) def keys(self): return self._info.keys() def info(self): return self._info def update(self, info): super(FakeResource, self).update(info) self._add_details(info) class FakeNetwork(object): """Fake one or more networks.""" @staticmethod def create_one_network(attrs=None): """Create a fake network. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the network """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() network_attrs = { 'id': 'network-id-' + fake_uuid, 'name': 'network-name-' + fake_uuid, 'status': 'ACTIVE', 'tenant_id': 'project-id-' + fake_uuid, 'admin_state_up': True, 'shared': False, 'subnets': [], 'provider:network_type': 'geneve', 'provider:physical_network': None, 'provider:segmentation_id': 10, 'router:external': False, 'availability_zones': [], 'availability_zone_hints': [], 'is_default': False, } # Overwrite default attributes. network_attrs.update(attrs) return FakeResource(info=copy.deepcopy(network_attrs), loaded=True) class FakeNetworkContext(object): def __init__(self, network, segments): self.fake_network = network self.fake_segments = segments self._plugin_context = mock.MagicMock() @property def current(self): return self.fake_network @property def original(self): return None @property def network_segments(self): return self.fake_segments class FakeSubnetContext(object): def __init__(self, subnet, original_subnet=None, network=None): self.fake_subnet = subnet self.fake_original_subnet = original_subnet self.fake_network = FakeNetworkContext(network, None) self._plugin_context = mock.MagicMock() @property def current(self): return self.fake_subnet @property def original(self): return self.fake_original_subnet @property def network(self): return self.fake_network class FakeOvsdbRow(FakeResource): """Fake one or more OVSDB rows.""" @staticmethod def create_one_ovsdb_row(attrs=None, methods=None): """Create a fake OVSDB row. :param Dictionary attrs: A dictionary with all attributes :param Dictionary methods: A dictionary with all methods :return: A FakeResource object faking the OVSDB row """ attrs = attrs or {} methods = methods or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() ovsdb_row_attrs = { 'uuid': fake_uuid, 'name': 'name-' + fake_uuid, 'external_ids': {}, } # Set default methods. ovsdb_row_methods = { 'addvalue': None, 'delete': None, 'delvalue': None, 'verify': None, 'setkey': None, } # Overwrite default attributes and methods. ovsdb_row_attrs.update(attrs) ovsdb_row_methods.update(methods) return FakeResource(info=copy.deepcopy(ovsdb_row_attrs), loaded=True, methods=copy.deepcopy(ovsdb_row_methods)) class FakeOvsdbTable(FakeResource): """Fake one or more OVSDB tables.""" @staticmethod def create_one_ovsdb_table(attrs=None): """Create a fake OVSDB table. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the OVSDB table """ attrs = attrs or {} # Set default attributes. ovsdb_table_attrs = { 'rows': {}, 'columns': {}, } # Overwrite default attributes. ovsdb_table_attrs.update(attrs) return FakeResource(info=copy.deepcopy(ovsdb_table_attrs), loaded=True) class FakePort(object): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'admin_state_up': True, 'allowed_address_pairs': [{}], 'binding:host_id': 'binding-host-id-' + fake_uuid, 'binding:profile': {}, 'binding:vif_details': {}, 'binding:vif_type': 'ovs', 'binding:vnic_type': 'normal', 'device_id': 'device-id-' + fake_uuid, 'device_owner': 'compute:nova', 'dns_assignment': [{}], 'dns_name': 'dns-name-' + fake_uuid, 'extra_dhcp_opts': [{}], 'fixed_ips': [{'subnet_id': 'subnet-id-' + fake_uuid, 'ip_address': '10.10.10.20'}], 'id': 'port-id-' + fake_uuid, 'mac_address': 'fa:16:3e:a9:4e:72', 'name': 'port-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'port_security_enabled': True, 'security_groups': [], 'status': 'ACTIVE', 'tenant_id': 'project-id-' + fake_uuid, } # Overwrite default attributes. port_attrs.update(attrs) return FakeResource(info=copy.deepcopy(port_attrs), loaded=True) class FakePortContext(object): def __init__(self, port, host, segments_to_bind): self.fake_port = port self.fake_host = host self.fake_segments_to_bind = segments_to_bind self.set_binding = mock.Mock() @property def current(self): return self.fake_port @property def host(self): return self.fake_host @property def segments_to_bind(self): return self.fake_segments_to_bind class FakeSecurityGroup(object): """Fake one or more security groups.""" @staticmethod def create_one_security_group(attrs=None): """Create a fake security group. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the security group """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() security_group_attrs = { 'id': 'security-group-id-' + fake_uuid, 'name': 'security-group-name-' + fake_uuid, 'description': 'security-group-description-' + fake_uuid, 'tenant_id': 'project-id-' + fake_uuid, 'security_group_rules': [], } # Overwrite default attributes. security_group_attrs.update(attrs) return FakeResource(info=copy.deepcopy(security_group_attrs), loaded=True) class FakeSecurityGroupRule(object): """Fake one or more security group rules.""" @staticmethod def create_one_security_group_rule(attrs=None): """Create a fake security group rule. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the security group rule """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() security_group_rule_attrs = { 'direction': 'ingress', 'ethertype': 'IPv4', 'id': 'security-group-rule-id-' + fake_uuid, 'port_range_max': 22, 'port_range_min': 22, 'protocol': 'tcp', 'remote_group_id': None, 'remote_ip_prefix': '0.0.0.0/0', 'security_group_id': 'security-group-id-' + fake_uuid, 'tenant_id': 'project-id-' + fake_uuid, } # Overwrite default attributes. security_group_rule_attrs.update(attrs) return FakeResource(info=copy.deepcopy(security_group_rule_attrs), loaded=True) class FakeSegment(object): """Fake one or more segments.""" @staticmethod def create_one_segment(attrs=None): """Create a fake segment. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the segment """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() segment_attrs = { 'id': 'segment-id-' + fake_uuid, 'network_type': 'geneve', 'physical_network': None, 'segmentation_id': 10, } # Overwrite default attributes. segment_attrs.update(attrs) return FakeResource(info=copy.deepcopy(segment_attrs), loaded=True) class FakeSubnet(object): """Fake one or more subnets.""" @staticmethod def create_one_subnet(attrs=None): """Create a fake subnet. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the subnet """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() subnet_attrs = { 'id': 'subnet-id-' + fake_uuid, 'name': 'subnet-name-' + fake_uuid, 'network_id': 'network-id-' + fake_uuid, 'cidr': '10.10.10.0/24', 'tenant_id': 'project-id-' + fake_uuid, 'enable_dhcp': True, 'dns_nameservers': [], 'allocation_pools': [], 'host_routes': [], 'ip_version': 4, 'gateway_ip': '10.10.10.1', 'ipv6_address_mode': 'None', 'ipv6_ra_mode': 'None', 'subnetpool_id': None, } # Overwrite default attributes. subnet_attrs.update(attrs) return FakeResource(info=copy.deepcopy(subnet_attrs), loaded=True) class FakeFloatingIp(object): """Fake one or more floating ips.""" @staticmethod def create_one_fip(attrs=None): """Create a fake floating ip. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the floating ip """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() fip_attrs = { 'id': 'fip-id-' + fake_uuid, 'tenant_id': '', 'fixed_ip_address': '10.0.0.10', 'fixed_port': FakePort.create_one_port(), 'floating_ip_address': '172.21.0.100', 'router_id': 'router-id', 'port_id': 'port_id', 'fixed_port_id': 'port_id', 'floating_port_id': 'fip-port-id', 'status': 'Active', 'floating_network_id': 'fip-net-id', 'dns': '', 'dns_domain': '', 'dns_name': '', 'project_id': '', } # Overwrite default attributes. fip_attrs.update(attrs) return FakeResource(info=copy.deepcopy(fip_attrs), loaded=True) class FakeOVNPort(object): """Fake one or more ports.""" @staticmethod def create_one_port(attrs=None): """Create a fake ovn port. :param Dictionary attrs: A dictionary with all attributes :return: A FakeResource object faking the port """ attrs = attrs or {} # Set default attributes. fake_uuid = uuidutils.generate_uuid() port_attrs = { 'addresses': [], 'dhcpv4_options': '', 'dhcpv6_options': [], 'enabled': True, 'external_ids': {}, 'name': fake_uuid, 'options': {}, 'parent_name': [], 'port_security': [], 'tag': [], 'tag_request': [], 'type': '', 'up': False, } # Overwrite default attributes. port_attrs.update(attrs) return type('Logical_Switch_Port', (object, ), port_attrs) @staticmethod def from_neutron_port(port): """Create a fake ovn port based on a neutron port.""" external_ids = { ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: ovn_utils.ovn_name(port['network_id']), ovn_const.OVN_SG_IDS_EXT_ID_KEY: ' '.join(port['security_groups']), ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: port.get('device_owner', '')} addresses = [port['mac_address'], ] addresses += [x['ip_address'] for x in port.get('fixed_ips', [])] port_security = ( addresses + [x['ip_address'] for x in port.get('allowed_address_pairs', [])]) return FakeOVNPort.create_one_port( {'external_ids': external_ids, 'addresses': addresses, 'port_security': port_security}) FakeStaticRoute = collections.namedtuple( 'Static_Routes', ['ip_prefix', 'nexthop', 'external_ids']) class FakeOVNRouter(object): @staticmethod def create_one_router(attrs=None): router_attrs = { 'enabled': False, 'external_ids': {}, 'load_balancer': [], 'name': '', 'nat': [], 'options': {}, 'ports': [], 'static_routes': [], } # Overwrite default attributes. router_attrs.update(attrs) return type('Logical_Router', (object, ), router_attrs) @staticmethod def from_neutron_router(router): def _get_subnet_id(gw_info): subnet_id = '' ext_ips = gw_info.get('external_fixed_ips', []) if ext_ips: subnet_id = ext_ips[0]['subnet_id'] return subnet_id external_ids = { ovn_const.OVN_GW_PORT_EXT_ID_KEY: router.get('gw_port_id') or '', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: router.get('name', 'no_router_name')} # Get the routes routes = [] for r in router.get('routes', []): routes.append(FakeStaticRoute(ip_prefix=r['destination'], nexthop=r['nexthop'], external_ids={})) gw_info = router.get(l3.EXTERNAL_GW_INFO) if gw_info: external_ids = { ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: _get_subnet_id(gw_info)} routes.append(FakeStaticRoute( ip_prefix='0.0.0.0/0', nexthop='', external_ids=external_ids)) return FakeOVNRouter.create_one_router( {'external_ids': external_ids, 'enabled': router.get('admin_state_up') or False, 'name': ovn_utils.ovn_name(router['id']), 'static_routes': routes}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4470458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/hacking/0000755000175000017500000000000000000000000022362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/hacking/__init__.py0000644000175000017500000000000000000000000024461 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/hacking/test_checks.py0000644000175000017500000002677300000000000025252 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from flake8 import engine from hacking.tests import test_doctest as hacking_doctest import pkg_resources import pycodestyle import testscenarios import testtools from testtools import content from testtools import matchers from neutron.hacking import checks from neutron.tests import base CREATE_DUMMY_MATCH_OBJECT = re.compile('a') class HackingTestCase(base.BaseTestCase): def assertLinePasses(self, func, line): with testtools.ExpectedException(StopIteration): next(func(line)) def assertLineFails(self, func, line): self.assertIsInstance(next(func(line)), tuple) def test_assert_called_once_with(self): fail_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assertCalledOnceWith() """ fail_code3 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.called_once_with() """ fail_code4 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_called() """ pass_code = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once_with() """ pass_code2 = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_has_calls() """ self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code2, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code3, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code4, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code2, "neutron/tests/test_assert.py")))) def test_asserttruefalse(self): true_fail_code1 = """ test_bool = True self.assertEqual(True, test_bool) """ true_fail_code2 = """ test_bool = True self.assertEqual(test_bool, True) """ true_pass_code = """ test_bool = True self.assertTrue(test_bool) """ false_fail_code1 = """ test_bool = False self.assertEqual(False, test_bool) """ false_fail_code2 = """ test_bool = False self.assertEqual(test_bool, False) """ false_pass_code = """ test_bool = False self.assertFalse(test_bool) """ self.assertEqual( 1, len(list( checks.check_asserttruefalse(true_fail_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list( checks.check_asserttruefalse(true_fail_code2, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list( checks.check_asserttruefalse(true_pass_code, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list( checks.check_asserttruefalse(false_fail_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 1, len(list( checks.check_asserttruefalse(false_fail_code2, "neutron/tests/test_assert.py")))) self.assertFalse( list( checks.check_asserttruefalse(false_pass_code, "neutron/tests/test_assert.py"))) def test_assertempty(self): fail_code = """ test_empty = %s self.assertEqual(test_empty, %s) """ pass_code1 = """ test_empty = %s self.assertEqual(%s, test_empty) """ pass_code2 = """ self.assertEqual(123, foo(abc, %s)) """ empty_cases = ['{}', '[]', '""', "''", '()', 'set()'] for ec in empty_cases: self.assertEqual( 1, len(list(checks.check_assertempty(fail_code % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code1 % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_asserttruefalse(pass_code2 % ec, "neutron/tests/test_assert.py")))) def test_assertisinstance(self): fail_code = """ self.assertTrue(isinstance(observed, ANY_TYPE)) """ pass_code1 = """ self.assertEqual(ANY_TYPE, type(observed)) """ pass_code2 = """ self.assertIsInstance(observed, ANY_TYPE) """ self.assertEqual( 1, len(list(checks.check_assertisinstance(fail_code, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code1, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertisinstance(pass_code2, "neutron/tests/test_assert.py")))) def test_assertequal_for_httpcode(self): fail_code = """ self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) """ pass_code = """ self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) """ self.assertEqual( 1, len(list(checks.check_assertequal_for_httpcode(fail_code, "neutron/tests/test_assert.py")))) self.assertEqual( 0, len(list(checks.check_assertequal_for_httpcode(pass_code, "neutron/tests/test_assert.py")))) def test_check_no_imports_from_tests(self): fail_codes = ('from neutron import tests', 'from neutron.tests import base', 'import neutron.tests.base') for fail_code in fail_codes: self.assertEqual( 1, len(list( checks.check_no_imports_from_tests( fail_code, "neutron/common/utils.py", None)))) self.assertEqual( 0, len(list( checks.check_no_imports_from_tests( fail_code, "neutron/tests/test_fake.py", None)))) def test_check_python3_filter(self): f = checks.check_python3_no_filter self.assertLineFails(f, "filter(lambda obj: test(obj), data)") self.assertLinePasses(f, "[obj for obj in data if test(obj)]") self.assertLinePasses(f, "filter(function, range(0,10))") self.assertLinePasses(f, "lambda x, y: x+y") # The following is borrowed from hacking/tests/test_doctest.py. # Tests defined in docstring is easier to understand # in some cases, for example, hacking rules which take tokens as argument. # TODO(amotoki): Migrate existing unit tests above to docstring tests. # NOTE(amotoki): Is it better to enhance HackingDocTestCase in hacking repo to # pass filename to pycodestyle.Checker so that we can reuse it in this test. # I am not sure whether unit test class is public. SELFTEST_REGEX = re.compile(r'\b(Okay|N\d{3})(\((\S+)\))?:\s(.*)') # Each scenario is (name, dict(filename=..., lines=.., options=..., code=...)) file_cases = [] class HackingDocTestCase(hacking_doctest.HackingTestCase): scenarios = file_cases def test_pycodestyle(self): # NOTE(jecarey): Add tests marked as off_by_default to enable testing turn_on = set(['H106']) if self.options.select: turn_on.update(self.options.select) self.options.select = tuple(turn_on) self.options.ignore = ('N530',) report = pycodestyle.BaseReport(self.options) checker = pycodestyle.Checker(filename=self.filename, lines=self.lines, options=self.options, report=report) checker.check_all() self.addDetail('doctest', content.text_content(self.raw)) if self.code == 'Okay': self.assertThat( len(report.counters), matchers.Not(matchers.GreaterThan( len(self.options.benchmark_keys))), "incorrectly found %s" % ', '.join( [key for key in report.counters if key not in self.options.benchmark_keys])) else: self.addDetail('reason', content.text_content("Failed to trigger rule %s" % self.code)) self.assertIn(self.code, report.counters) def _get_lines(check): for line in check.__doc__.splitlines(): line = line.lstrip() match = SELFTEST_REGEX.match(line) if match is None: continue yield (line, match.groups()) def load_tests(loader, tests, pattern): default_checks = [e.name for e in pkg_resources.iter_entry_points('flake8.extension')] flake8_style = engine.get_style_guide( parse_argv=False, # We are testing neutron-specific hacking rules, so there is no need # to run the checks registered by hacking or other flake8 extensions. ignore=default_checks) options = flake8_style.options for name, check in checks.__dict__.items(): if not hasattr(check, 'name'): continue if check.name != checks.__name__: continue if not check.__doc__: continue for (lineno, (raw, line)) in enumerate(_get_lines(check)): code, __, filename, source = line lines = [part.replace(r'\t', '\t') + '\n' for part in source.split(r'\n')] file_cases.append(("%s-line-%s" % (name, lineno), dict(lines=lines, raw=raw, options=options, code=code, filename=filename))) return testscenarios.load_tests_apply_scenarios(loader, tests, pattern) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4470458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/0000755000175000017500000000000000000000000021704 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/__init__.py0000644000175000017500000000000000000000000024003 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4470458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/0000755000175000017500000000000000000000000023362 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/__init__.py0000644000175000017500000000000000000000000025461 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4470458 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/neutrondb_ipam/0000755000175000017500000000000000000000000026370 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py0000644000175000017500000000000000000000000030467 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py0000644000175000017500000001117600000000000031225 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context from oslo_utils import uuidutils from neutron.ipam.drivers.neutrondb_ipam import db_api from neutron.objects import ipam as ipam_obj from neutron.tests.unit import testlib_api CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestIpamSubnetManager(testlib_api.SqlTestCase): """Test case for SubnetManager DB helper class""" def setUp(self): super(TestIpamSubnetManager, self).setUp() self.setup_coreplugin(core_plugin=CORE_PLUGIN) self.ctx = context.get_admin_context() self.neutron_subnet_id = uuidutils.generate_uuid() self.ipam_subnet_id = uuidutils.generate_uuid() self.subnet_ip = '1.2.3.4' self.single_pool = ('1.2.3.4', '1.2.3.10') self.multi_pool = (('1.2.3.2', '1.2.3.12'), ('1.2.3.15', '1.2.3.24')) self.subnet_manager = db_api.IpamSubnetManager(self.ipam_subnet_id, self.neutron_subnet_id) self.subnet_manager_id = self.subnet_manager.create(self.ctx) self.ctx.session.flush() def test_create(self): self.assertEqual(self.ipam_subnet_id, self.subnet_manager_id) subnet_count = ipam_obj.IpamSubnet.count( self.ctx, id=self.ipam_subnet_id) self.assertEqual(1, subnet_count) def test_remove(self): count = db_api.IpamSubnetManager.delete(self.ctx, self.neutron_subnet_id) self.assertEqual(1, count) subnet_exists = ipam_obj.IpamSubnet.objects_exist( self.ctx, id=self.ipam_subnet_id) self.assertFalse(subnet_exists) def test_remove_non_existent_subnet(self): count = db_api.IpamSubnetManager.delete(self.ctx, 'non-existent') self.assertEqual(0, count) def _validate_ips(self, pools, db_pool): self.assertTrue( any(pool == (str(db_pool.first_ip), str(db_pool.last_ip)) for pool in pools)) def test_create_pool(self): self.subnet_manager.create_pool(self.ctx, self.single_pool[0], self.single_pool[1]) ipam_pools = ipam_obj.IpamAllocationPool.get_objects( self.ctx, ipam_subnet_id=self.ipam_subnet_id) self._validate_ips([self.single_pool], ipam_pools[0]) def test_check_unique_allocation(self): self.assertTrue(self.subnet_manager.check_unique_allocation( self.ctx, self.subnet_ip)) def test_check_unique_allocation_negative(self): self.subnet_manager.create_allocation(self.ctx, self.subnet_ip) self.assertFalse(self.subnet_manager.check_unique_allocation( self.ctx, self.subnet_ip)) def test_list_allocations(self): ips = ['1.2.3.4', '1.2.3.6', '1.2.3.7'] for ip in ips: self.subnet_manager.create_allocation(self.ctx, ip) allocs = self.subnet_manager.list_allocations(self.ctx) self.assertEqual(len(ips), len(allocs)) for allocation in allocs: self.assertIn(str(allocation.ip_address), ips) def _test_create_allocation(self): self.subnet_manager.create_allocation(self.ctx, self.subnet_ip) alloc = ipam_obj.IpamAllocation.get_objects( self.ctx, ipam_subnet_id=self.ipam_subnet_id) self.assertEqual(1, len(alloc)) self.assertEqual(self.subnet_ip, str(alloc[0].ip_address)) return alloc def test_create_allocation(self): self._test_create_allocation() def test_delete_allocation(self): allocs = self._test_create_allocation() self.subnet_manager.delete_allocation(self.ctx, allocs[0].ip_address) alloc_exists = ipam_obj.IpamAllocation.objects_exist( self.ctx, ipam_subnet_id=self.ipam_subnet_id) self.assertFalse(alloc_exists) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py0000644000175000017500000005307200000000000031303 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_utils import uuidutils from neutron.ipam.drivers.neutrondb_ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.objects import ipam as ipam_obj from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron.tests.unit import testlib_api def convert_firstip_to_ipaddress(range_item): return netaddr.IPAddress(range_item['first_ip']) class TestNeutronDbIpamMixin(object): def _create_network(self, plugin, ctx, shared=False): network = {'network': {'name': 'net', 'shared': shared, 'admin_state_up': True, 'tenant_id': self._tenant_id}} created_network = plugin.create_network(ctx, network) return (created_network, created_network['id']) def _create_subnet(self, plugin, ctx, network_id, cidr, ip_version=constants.IP_VERSION_4, v6_address_mode=constants.ATTR_NOT_SPECIFIED, allocation_pools=constants.ATTR_NOT_SPECIFIED): subnet = {'subnet': {'name': 'sub', 'cidr': cidr, 'ip_version': ip_version, 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': allocation_pools, 'enable_dhcp': True, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'ipv6_address_mode': v6_address_mode, 'ipv6_ra_mode': constants.ATTR_NOT_SPECIFIED, 'network_id': network_id, 'tenant_id': self._tenant_id}} return plugin.create_subnet(ctx, subnet) class TestNeutronDbIpamPool(testlib_api.SqlTestCase, TestNeutronDbIpamMixin): """Test case for the Neutron's DB IPAM driver subnet pool interface.""" def setUp(self): super(TestNeutronDbIpamPool, self).setUp() self._tenant_id = 'test-tenant' # Configure plugin for tests self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) # Prepare environment for tests self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() self.network, self.net_id = self._create_network(self.plugin, self.ctx) # Allocate IPAM driver self.ipam_pool = driver.NeutronDbPool(None, self.ctx) def _verify_ipam_subnet_details(self, ipam_subnet, cidr=None, tenant_id=None, gateway_ip=None, allocation_pools=None): ipam_subnet_details = ipam_subnet.get_details() gateway_ip_address = None cidr_ip_network = None if gateway_ip: gateway_ip_address = netaddr.IPAddress(gateway_ip) if cidr: cidr_ip_network = netaddr.IPNetwork(cidr) self.assertEqual(tenant_id, ipam_subnet_details.tenant_id) self.assertEqual(gateway_ip_address, ipam_subnet_details.gateway_ip) self.assertEqual(cidr_ip_network, ipam_subnet_details.subnet_cidr) self.assertEqual(allocation_pools, ipam_subnet_details.allocation_pools) def test_allocate_ipam_subnet_no_neutron_subnet_id(self): cidr = '10.0.0.0/24' allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), netaddr.IPRange('10.0.0.200', '10.0.0.250')] subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, None, cidr, allocation_pools=allocation_pools, gateway_ip='10.0.0.101') ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) self._verify_ipam_subnet_details(ipam_subnet, cidr, self._tenant_id, '10.0.0.101', allocation_pools) def _prepare_specific_subnet_request(self, cidr): subnet = self._create_subnet( self.plugin, self.ctx, self.net_id, cidr) subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip']) return subnet, subnet_req def test_allocate_ipam_subnet_with_neutron_subnet_id(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) def test_allocate_any_subnet_fails(self): self.assertRaises( ipam_exc.InvalidSubnetRequestType, self.ipam_pool.allocate_subnet, ipam_req.AnySubnetRequest(self._tenant_id, 'meh', constants.IPv4, 24)) def _test_update_subnet_pools(self, allocation_pools, expected_pools=None): if expected_pools is None: expected_pools = allocation_pools cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) update_subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip'], allocation_pools=allocation_pools) self.ipam_pool.update_subnet(update_subnet_req) ipam_subnet = self.ipam_pool.get_subnet(subnet['id']) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], expected_pools) def test_update_subnet_pools(self): allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), netaddr.IPRange('10.0.0.200', '10.0.0.250')] self._test_update_subnet_pools(allocation_pools) def test_update_subnet_pools_with_blank_pools(self): allocation_pools = [] self._test_update_subnet_pools(allocation_pools) def test_update_subnet_pools_with_none_pools(self): allocation_pools = None expected_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')] # Pools should not be changed on update self._test_update_subnet_pools(allocation_pools, expected_pools=expected_pools) def test_get_subnet(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) # Retrieve the subnet ipam_subnet = self.ipam_pool.get_subnet(subnet['id']) self._verify_ipam_subnet_details( ipam_subnet, cidr, self._tenant_id, subnet['gateway_ip'], [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) def test_get_non_existing_subnet_fails(self): self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.get_subnet, 'boo') def test_remove_ipam_subnet(self): cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) # Remove ipam subnet by neutron subnet id self.ipam_pool.remove_subnet(subnet['id']) def test_remove_non_existent_subnet_fails(self): self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.remove_subnet, 'non-existent-id') def test_get_details_for_invalid_subnet_id_fails(self): cidr = '10.0.0.0/24' non_existent_id = uuidutils.generate_uuid() subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, non_existent_id, cidr) self.ipam_pool.allocate_subnet(subnet_req) # Neutron subnet does not exist, so get_subnet should fail self.assertRaises(n_exc.SubnetNotFound, self.ipam_pool.get_subnet, 'non-existent-id') class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase, TestNeutronDbIpamMixin): """Test case for Subnet interface for Neutron's DB IPAM driver. This test case exercises the reference IPAM driver. Even if it loads a plugin, the unit tests in this class do not exercise it at all; they simply perform white box testing on the IPAM driver. The plugin is exclusively used to create the neutron objects on which the IPAM driver will operate. """ def _create_and_allocate_ipam_subnet( self, cidr, allocation_pools=constants.ATTR_NOT_SPECIFIED, ip_version=constants.IP_VERSION_4, v6_auto_address=False, tenant_id=None): v6_address_mode = constants.ATTR_NOT_SPECIFIED if v6_auto_address: # set ip version to 6 regardless of what's been passed to the # method ip_version = constants.IP_VERSION_6 v6_address_mode = constants.IPV6_SLAAC subnet = self._create_subnet( self.plugin, self.ctx, self.net_id, cidr, ip_version=ip_version, allocation_pools=allocation_pools, v6_address_mode=v6_address_mode) # Build netaddr.IPRanges from allocation pools since IPAM SubnetRequest # objects are strongly typed allocation_pool_ranges = [netaddr.IPRange( pool['start'], pool['end']) for pool in subnet['allocation_pools']] subnet_req = ipam_req.SpecificSubnetRequest( tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip'], allocation_pools=allocation_pool_ranges) ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) return ipam_subnet, subnet def setUp(self): super(TestNeutronDbIpamSubnet, self).setUp() self._tenant_id = 'test-tenant' # Configure plugin for tests self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) # Prepare environment for tests self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() self.network, self.net_id = self._create_network(self.plugin, self.ctx) # Allocate IPAM driver self.ipam_pool = driver.NeutronDbPool(None, self.ctx) def test__verify_ip_succeeds(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] ipam_subnet._verify_ip(self.ctx, '10.0.0.2') def test__verify_ip_not_in_subnet_fails(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx, '192.168.0.2') def test__verify_ip_bcast_and_network_fail(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx, '10.0.0.255') self.assertRaises(ipam_exc.InvalidIpForSubnet, ipam_subnet._verify_ip, self.ctx, '10.0.0.0') def _allocate_address(self, cidr, ip_version, address_request): ipam_subnet = self._create_and_allocate_ipam_subnet( cidr, ip_version=ip_version)[0] return ipam_subnet.allocate(address_request) def test_allocate_any_v4_address_succeeds(self): self._test_allocate_any_address_succeeds('10.0.0.0/24', 4) def test_allocate_any_v6_address_succeeds(self): self._test_allocate_any_address_succeeds('fde3:abcd:4321:1::/64', 6) def _test_allocate_any_address_succeeds(self, subnet_cidr, ip_version): ip_address = self._allocate_address( subnet_cidr, ip_version, ipam_req.AnyAddressRequest) self.assertIn(netaddr.IPAddress(ip_address), netaddr.IPSet(netaddr.IPNetwork(subnet_cidr))) def test_allocate_specific_v4_address_succeeds(self): ip_address = self._allocate_address( '10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('10.0.0.33')) self.assertEqual('10.0.0.33', ip_address) def test_allocate_specific_v6_address_succeeds(self): ip_address = self._allocate_address( 'fde3:abcd:4321:1::/64', 6, ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33')) self.assertEqual('fde3:abcd:4321:1::33', ip_address) def test_allocate_specific_v4_address_out_of_range_fails(self): self.assertRaises(ipam_exc.InvalidIpForSubnet, self._allocate_address, '10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('192.168.0.1')) def test_allocate_specific_v6_address_out_of_range_fails(self): self.assertRaises(ipam_exc.InvalidIpForSubnet, self._allocate_address, 'fde3:abcd:4321:1::/64', 6, ipam_req.SpecificAddressRequest( 'fde3:abcd:eeee:1::33')) def test_allocate_specific_address_in_use_fails(self): ipam_subnet = self._create_and_allocate_ipam_subnet( 'fde3:abcd:4321:1::/64', ip_version=constants.IP_VERSION_6)[0] addr_req = ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33') ipam_subnet.allocate(addr_req) self.assertRaises(ipam_exc.IpAddressAlreadyAllocated, ipam_subnet.allocate, addr_req) def test_allocate_any_address_exhausted_pools_fails(self): # Same as above, the ranges will be recalculated always ipam_subnet = self._create_and_allocate_ipam_subnet( '192.168.0.0/30', ip_version=constants.IP_VERSION_4)[0] ipam_subnet.allocate(ipam_req.AnyAddressRequest) # The second address generation request on a /30 for v4 net must fail self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.allocate, ipam_req.AnyAddressRequest) def test_bulk_allocate_v4_address(self): target_ip_count = 10 ipam_subnet = self._create_and_allocate_ipam_subnet( '192.168.0.0/28', ip_version=constants.IP_VERSION_4)[0] ip_addresses = ipam_subnet.bulk_allocate( ipam_req.BulkAddressRequest(target_ip_count)) self.assertEqual(target_ip_count, len(ip_addresses)) self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.bulk_allocate, ipam_req.BulkAddressRequest(target_ip_count)) def test_bulk_allocate_v6_address(self): target_ip_count = 10 ipam_subnet = self._create_and_allocate_ipam_subnet( 'fd00::/124', ip_version=constants.IP_VERSION_6)[0] ip_addresses = ipam_subnet.bulk_allocate( ipam_req.BulkAddressRequest(target_ip_count)) self.assertEqual(target_ip_count, len(ip_addresses)) self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.bulk_allocate, ipam_req.BulkAddressRequest(target_ip_count)) def test_bulk_allocate_multiple_address_pools(self): target_ip_count = 10 # 11 addresses available allocation_pools = [{'start': '192.168.0.5', 'end': '192.168.0.9'}, {'start': '192.168.0.15', 'end': '192.168.0.20'}] ipam_subnet = self._create_and_allocate_ipam_subnet( '192.168.0.0/24', allocation_pools=allocation_pools, ip_version=constants.IP_VERSION_4)[0] ip_addresses = ipam_subnet.bulk_allocate( ipam_req.BulkAddressRequest(target_ip_count)) self.assertEqual(target_ip_count, len(ip_addresses)) self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.bulk_allocate, ipam_req.BulkAddressRequest(2)) def test_prefernext_allocate_multiple_address_pools(self): ipam_subnet = self._create_and_allocate_ipam_subnet( '192.168.0.0/30', ip_version=constants.IP_VERSION_4)[0] ipam_subnet.allocate(ipam_req.PreferNextAddressRequest()) # The second address generation request on a /30 for v4 net must fail self.assertRaises(ipam_exc.IpAddressGenerationFailure, ipam_subnet.allocate, ipam_req.PreferNextAddressRequest) def _test_deallocate_address(self, cidr, ip_version): ipam_subnet = self._create_and_allocate_ipam_subnet( cidr, ip_version=ip_version)[0] ip_address = ipam_subnet.allocate(ipam_req.AnyAddressRequest) ipam_subnet.deallocate(ip_address) def test_deallocate_v4_address(self): self._test_deallocate_address('10.0.0.0/24', 4) def test_deallocate_v6_address(self): # This test does not really exercise any different code path wrt # test_deallocate_v4_address. It is provided for completeness and for # future proofing in case v6-specific logic will be added. self._test_deallocate_address('fde3:abcd:4321:1::/64', 6) def test_allocate_unallocated_address_fails(self): ipam_subnet = self._create_and_allocate_ipam_subnet( '10.0.0.0/24', ip_version=constants.IP_VERSION_4)[0] self.assertRaises(ipam_exc.IpAddressAllocationNotFound, ipam_subnet.deallocate, '10.0.0.2') def test_allocate_all_pool_addresses_triggers_range_recalculation(self): # This test instead might be made to pass, but for the wrong reasons! pass def test_allocate_subnet_for_non_existent_subnet_pass(self): # This test should pass because ipam subnet is no longer # have foreign key relationship with neutron subnet. # Creating ipam subnet before neutron subnet is a valid case. tenant_id = uuidutils.generate_uuid() subnet_id = uuidutils.generate_uuid() subnet_req = ipam_req.SpecificSubnetRequest( tenant_id, subnet_id, '192.168.0.0/24') self.ipam_pool.allocate_subnet(subnet_req) def test_update_allocation_pools_with_no_pool_change(self): cidr = '10.0.0.0/24' ipam_subnet = self._create_and_allocate_ipam_subnet( cidr)[0] ipam_subnet.subnet_manager.delete_allocation_pools = mock.Mock() ipam_subnet.create_allocation_pools = mock.Mock() alloc_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')] # Make sure allocation pools recreation does not happen in case of # unchanged allocation pools ipam_subnet.update_allocation_pools(alloc_pools, cidr) self.assertFalse( ipam_subnet.subnet_manager.delete_allocation_pools.called) self.assertFalse(ipam_subnet.create_allocation_pools.called) def _test__no_pool_changes(self, new_pools): id = uuidutils.generate_uuid() ipam_subnet = driver.NeutronDbSubnet(id, self.ctx) pools = [ipam_obj.IpamAllocationPool(self.ctx, ipam_subnet_id=id, first_ip='192.168.10.20', last_ip='192.168.10.41'), ipam_obj.IpamAllocationPool(self.ctx, ipam_subnet_id=id, first_ip='192.168.10.50', last_ip='192.168.10.60')] ipam_subnet.subnet_manager.list_pools = mock.Mock(return_value=pools) return ipam_subnet._no_pool_changes(self.ctx, new_pools) def test__no_pool_changes_negative(self): pool_list = [[netaddr.IPRange('192.168.10.2', '192.168.10.254')], [netaddr.IPRange('192.168.10.20', '192.168.10.41')], [netaddr.IPRange('192.168.10.20', '192.168.10.41'), netaddr.IPRange('192.168.10.51', '192.168.10.60')]] for pools in pool_list: self.assertFalse(self._test__no_pool_changes(pools)) def test__no_pool_changes_positive(self): pools = [netaddr.IPRange('192.168.10.20', '192.168.10.41'), netaddr.IPRange('192.168.10.50', '192.168.10.60')] self.assertTrue(self._test__no_pool_changes(pools)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/fake_driver.py0000644000175000017500000000220000000000000024531 0ustar00coreycorey00000000000000# Copyright (c) 2015 Infoblox Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.ipam import driver class FakeDriver(driver.Pool): """Fake IPAM driver for tests only Just implement IPAM Driver interface without any functionality inside """ def allocate_subnet(self, subnet): return driver.Subnet() def get_subnet(self, cidr): return driver.Subnet() def get_allocator(self, subnet_ids): return driver.SubnetGroup() def update_subnet(self, request): return driver.Subnet() def remove_subnet(self, cidr): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/test_requests.py0000644000175000017500000004003400000000000025171 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib import constants from neutron_lib import context from oslo_config import cfg from oslo_utils import netutils from oslo_utils import uuidutils from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron import manager from neutron.tests import base from neutron.tests.unit.ipam import fake_driver FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver' class IpamSubnetRequestTestCase(base.BaseTestCase): def setUp(self): super(IpamSubnetRequestTestCase, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid() class TestIpamSubnetRequests(IpamSubnetRequestTestCase): def test_subnet_request(self): pool = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id) self.assertEqual(self.tenant_id, pool.tenant_id) self.assertEqual(self.subnet_id, pool.subnet_id) self.assertIsNone(pool.gateway_ip) self.assertIsNone(pool.allocation_pools) def test_subnet_request_gateway(self): request = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id, gateway_ip='1.2.3.1') self.assertEqual('1.2.3.1', str(request.gateway_ip)) def test_subnet_request_bad_gateway(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, gateway_ip='1.2.3.') def test_subnet_request_with_range(self): allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'), netaddr.IPRange('1.2.3.7', '1.2.3.9')] request = ipam_req.SubnetRequest(self.tenant_id, self.subnet_id, allocation_pools=allocation_pools) self.assertEqual(allocation_pools, request.allocation_pools) def test_subnet_request_range_not_list(self): self.assertRaises(TypeError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=1) def test_subnet_request_bad_range(self): self.assertRaises(TypeError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=['1.2.3.4']) def test_subnet_request_different_versions(self): pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'), netaddr.IPRange('::1', '::2')] self.assertRaises(ValueError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=pools) def test_subnet_request_overlap(self): pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'), netaddr.IPRange('0.0.0.8', '0.0.0.10')] self.assertRaises(ValueError, ipam_req.SubnetRequest, self.tenant_id, self.subnet_id, allocation_pools=pools) class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase): def test_subnet_request(self): request = ipam_req.AnySubnetRequest(self.tenant_id, self.subnet_id, constants.IPv4, 24, gateway_ip='0.0.0.1') self.assertEqual(24, request.prefixlen) def test_subnet_request_bad_prefix_type(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 'A') def test_subnet_request_bad_prefix(self): self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 33) self.assertRaises(netaddr.core.AddrFormatError, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv6, 129) def test_subnet_request_gateway(self): request = ipam_req.AnySubnetRequest(self.tenant_id, self.subnet_id, constants.IPv6, 64, gateway_ip='2000::1') self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip) def test_subnet_request_allocation_pool_wrong_version(self): pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')] self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv6, 64, allocation_pools=pools) def test_subnet_request_allocation_pool_not_in_net(self): pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')] self.assertRaises(ipam_exc.IpamValueInvalid, ipam_req.AnySubnetRequest, self.tenant_id, self.subnet_id, constants.IPv4, 25, allocation_pools=pools) class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase): def test_subnet_request(self): request = ipam_req.SpecificSubnetRequest(self.tenant_id, self.subnet_id, '1.2.3.0/24', gateway_ip='1.2.3.1') self.assertEqual(24, request.prefixlen) self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip) self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr) def test_subnet_request_gateway(self): request = ipam_req.SpecificSubnetRequest(self.tenant_id, self.subnet_id, '2001::1', gateway_ip='2000::1') self.assertEqual(netaddr.IPAddress('2000::1'), request.gateway_ip) class TestAddressRequest(base.BaseTestCase): # This class doesn't test much. At least running through all of the # constructors may shake out some trivial bugs. EUI64 = ipam_req.AutomaticAddressRequest.EUI64 def test_specific_address_ipv6(self): request = ipam_req.SpecificAddressRequest('2000::45') self.assertEqual(netaddr.IPAddress('2000::45'), request.address) def test_specific_address_ipv4(self): request = ipam_req.SpecificAddressRequest('1.2.3.32') self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address) def test_any_address(self): ipam_req.AnyAddressRequest() def test_automatic_address_request_eui64(self): subnet_cidr = '2607:f0d0:1002:51::/64' port_mac = 'aa:bb:cc:dd:ee:ff' eui_addr = str(netutils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac)) request = ipam_req.AutomaticAddressRequest( address_type=self.EUI64, prefix=subnet_cidr, mac=port_mac) self.assertEqual(request.address, netaddr.IPAddress(eui_addr)) def test_automatic_address_request_invalid_address_type_raises(self): self.assertRaises(ipam_exc.InvalidAddressType, ipam_req.AutomaticAddressRequest, address_type='kaboom') def test_automatic_address_request_eui64_no_mac_raises(self): self.assertRaises(ipam_exc.AddressCalculationFailure, ipam_req.AutomaticAddressRequest, address_type=self.EUI64, prefix='meh') def test_automatic_address_request_eui64_alien_param_raises(self): self.assertRaises(ipam_exc.AddressCalculationFailure, ipam_req.AutomaticAddressRequest, address_type=self.EUI64, mac='meh', alien='et', prefix='meh') class TestIpamDriverLoader(base.BaseTestCase): def setUp(self): super(TestIpamDriverLoader, self).setUp() self.ctx = context.get_admin_context() def _verify_fake_ipam_driver_is_loaded(self, driver_name): mgr = manager.NeutronManager ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers', driver_name) self.assertEqual( fake_driver.FakeDriver, ipam_driver, "loaded ipam driver should be FakeDriver") def _verify_import_error_is_generated(self, driver_name): mgr = manager.NeutronManager self.assertRaises(ImportError, mgr.load_class_for_provider, 'neutron.ipam_drivers', driver_name) def test_ipam_driver_is_loaded_by_class(self): self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS) def test_ipam_driver_is_loaded_by_name(self): self._verify_fake_ipam_driver_is_loaded('fake') def test_ipam_driver_raises_import_error(self): self._verify_import_error_is_generated( 'neutron.tests.unit.ipam_req.SomeNonExistentClass') def test_ipam_driver_raises_import_error_for_none(self): self._verify_import_error_is_generated(None) def _load_ipam_driver(self, driver_name, subnet_pool_id): cfg.CONF.set_override("ipam_driver", driver_name) return driver.Pool.get_instance(subnet_pool_id, self.ctx) def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self): ipam_driver = self._load_ipam_driver('fake', None) self.assertIsInstance( ipam_driver, fake_driver.FakeDriver, "loaded ipam driver should be of type FakeDriver") @mock.patch(FAKE_IPAM_CLASS) def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock): subnet_pool_id = 'SomePoolID' self._load_ipam_driver('fake', subnet_pool_id) ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx) class TestAddressRequestFactory(base.BaseTestCase): def test_specific_address_request_is_loaded(self): for address in ('10.12.0.15', 'fffe::1'): ip = {'ip_address': address} port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.SpecificAddressRequest) def test_any_address_request_is_loaded(self): for addr in [None, '']: ip = {'ip_address': addr} port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.AnyAddressRequest) def test_automatic_address_request_is_loaded(self): ip = {'mac': '6c:62:6d:de:cf:49', 'subnet_cidr': '2001:470:abcd::/64', 'eui64_address': True} port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.AutomaticAddressRequest) def test_prefernext_address_request_on_dhcp_port(self): ip = {} port = {'device_owner': constants.DEVICE_OWNER_DHCP} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.PreferNextAddressRequest) class TestSubnetRequestFactory(IpamSubnetRequestTestCase): def _build_subnet_dict(self, id=None, cidr='192.168.1.0/24', prefixlen=8, ip_version=constants.IP_VERSION_4): subnet = {'cidr': cidr, 'prefixlen': prefixlen, 'ip_version': ip_version, 'tenant_id': self.tenant_id, 'gateway_ip': None, 'allocation_pools': None, 'id': id or self.subnet_id} subnetpool = {'ip_version': ip_version, 'default_prefixlen': prefixlen} return subnet, subnetpool def test_specific_subnet_request_is_loaded(self): addresses = [ '10.12.0.15/24', '10.12.0.0/24', 'fffe::1/64', 'fffe::/64'] for address in addresses: subnet, subnetpool = self._build_subnet_dict(cidr=address) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.SpecificSubnetRequest) def test_any_address_request_is_loaded_for_ipv4(self): subnet, subnetpool = self._build_subnet_dict( cidr=None, ip_version=constants.IP_VERSION_4) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.AnySubnetRequest) def test_any_address_request_is_loaded_for_ipv6(self): subnet, subnetpool = self._build_subnet_dict( cidr=None, ip_version=constants.IP_VERSION_6) self.assertIsInstance( ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool), ipam_req.AnySubnetRequest) def test_args_are_passed_to_specific_request(self): subnet, subnetpool = self._build_subnet_dict() request = ipam_req.SubnetRequestFactory.get_request(None, subnet, subnetpool) self.assertIsInstance(request, ipam_req.SpecificSubnetRequest) self.assertEqual(self.tenant_id, request.tenant_id) self.assertEqual(self.subnet_id, request.subnet_id) self.assertIsNone(request.gateway_ip) self.assertIsNone(request.allocation_pools) class TestGetRequestFactory(base.BaseTestCase): def setUp(self): super(TestGetRequestFactory, self).setUp() cfg.CONF.set_override('ipam_driver', 'fake') self.driver = driver.Pool.get_instance(None, None) def test_get_subnet_request_factory(self): self.assertEqual( self.driver.get_subnet_request_factory(), ipam_req.SubnetRequestFactory) def test_get_address_request_factory(self): self.assertEqual( self.driver.get_address_request_factory(), ipam_req.AddressRequestFactory) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/test_subnet_alloc.py0000644000175000017500000002401400000000000025770 0ustar00coreycorey00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api class TestSubnetAllocation(testlib_api.SqlTestCase): def setUp(self): super(TestSubnetAllocation, self).setUp() self._tenant_id = 'test-tenant' self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS) self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() cfg.CONF.set_override('allow_overlapping_ips', True) def _create_subnet_pool(self, plugin, ctx, name, prefix_list, min_prefixlen, ip_version, max_prefixlen=constants.ATTR_NOT_SPECIFIED, default_prefixlen=constants.ATTR_NOT_SPECIFIED, default_quota=constants.ATTR_NOT_SPECIFIED, shared=False, is_default=False): subnetpool = {'subnetpool': {'name': name, 'tenant_id': self._tenant_id, 'prefixes': prefix_list, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'default_prefixlen': default_prefixlen, 'shared': shared, 'is_default': is_default, 'default_quota': default_quota}} return plugin.create_subnetpool(ctx, subnetpool) def _get_subnetpool(self, ctx, plugin, id): return plugin.get_subnetpool(ctx, id) def test_allocate_any_subnet(self): prefix_list = ['10.1.0.0/16', '192.168.1.0/24'] sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', prefix_list, 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with db_api.CONTEXT_WRITER.using(self.ctx): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) res = sa.allocate_subnet(req) detail = res.get_details() prefix_set = netaddr.IPSet(iterable=prefix_list) allocated_set = netaddr.IPSet(iterable=[detail.subnet_cidr]) self.assertTrue(allocated_set.issubset(prefix_set)) self.assertEqual(21, detail.prefixlen) def test_allocate_specific_subnet(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) with db_api.CONTEXT_WRITER.using(self.ctx): sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24') res = sa.allocate_subnet(req) detail = res.get_details() sp = self._get_subnetpool(self.ctx, self.plugin, sp['id']) self.assertEqual('10.1.2.0/24', str(detail.subnet_cidr)) self.assertEqual(24, detail.prefixlen) def test_insufficient_prefix_space_for_any_allocation(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.1.0/24', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) self.assertRaises(exceptions.SubnetAllocationError, sa.allocate_subnet, req) def test_insufficient_prefix_space_for_specific_allocation(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.0.0/21') self.assertRaises(exceptions.SubnetAllocationError, sa.allocate_subnet, req) def test_allocate_any_subnet_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with db_api.CONTEXT_WRITER.using(self.ctx): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(detail.gateway_ip, detail.subnet_cidr.network + 1) def test_allocate_specific_subnet_specific_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with db_api.CONTEXT_WRITER.using(self.ctx): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24', gateway_ip='10.1.2.254') res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(netaddr.IPAddress('10.1.2.254'), detail.gateway_ip) def test_allocate_specific_ipv6_subnet_specific_gateway(self): # Same scenario as described in bug #1466322 sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['2210::/64'], 64, 6) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with db_api.CONTEXT_WRITER.using(self.ctx): sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '2210::/64', '2210::ffff:ffff:ffff:ffff') res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(netaddr.IPAddress('2210::ffff:ffff:ffff:ffff'), detail.gateway_ip) def test__allocation_value_for_tenant_no_allocations(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) value = sa._allocations_used_by_tenant(32) self.assertEqual(0, value) def test_subnetpool_default_quota_exceeded(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['fe80::/48'], 48, 6, default_quota=1) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), 'fe80::/63') self.assertRaises(exceptions.SubnetPoolQuotaExceeded, sa.allocate_subnet, req) def test_subnetpool_concurrent_allocation_exception(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['fe80::/48'], 48, 6, default_quota=1) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam_req.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), 'fe80::/63') with mock.patch("sqlalchemy.orm.query.Query.update", return_value=0): self.assertRaises(db_exc.RetryRequest, sa.allocate_subnet, req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/ipam/test_utils.py0000644000175000017500000001275300000000000024465 0ustar00coreycorey00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib import constants from neutron.ipam import utils from neutron.tests import base class TestIpamUtils(base.BaseTestCase): def test_check_subnet_ip_v4_network(self): self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.0')) def test_check_subnet_ip_v4_broadcast(self): self.assertFalse(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.255')) def test_check_subnet_ip_v4_valid(self): self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.1')) self.assertTrue(utils.check_subnet_ip('1.1.1.0/24', '1.1.1.254')) def test_check_subnet_ip_v6_owner_router_or_not_defined(self): for port_owner in (constants.ROUTER_PORT_OWNERS + ('', )): # IP address == network self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::0', port_owner=port_owner)) # IP address == broadcast self.assertTrue(utils.check_subnet_ip( 'F111::0/64', 'F111::FFFF:FFFF:FFFF:FFFF', port_owner=port_owner)) # IP address in network self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::50', port_owner=port_owner)) # IP address not in network self.assertFalse(utils.check_subnet_ip('F111::0/64', 'F112::50', port_owner=port_owner)) def test_check_subnet_ip_v6_owner_not_router(self): port_owner = 'nova:compute' # IP address == network self.assertFalse(utils.check_subnet_ip('F111::0/64', 'F111::0', port_owner=port_owner)) # IP address == broadcast self.assertTrue(utils.check_subnet_ip( 'F111::0/64', 'F111::FFFF:FFFF:FFFF:FFFF', port_owner=port_owner)) # IP address in network self.assertTrue(utils.check_subnet_ip('F111::0/64', 'F111::50', port_owner=port_owner)) # IP address not in network self.assertFalse(utils.check_subnet_ip('F111::0/64', 'F112::50', port_owner=port_owner)) def test_generate_pools_v4_nogateway(self): cidr = '192.168.0.0/24' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_gateway_first(self): cidr = '192.168.0.0/24' gateway = '192.168.0.1' expected = [netaddr.IPRange('192.168.0.2', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v4_gateway_last(self): cidr = '192.168.0.0/24' gateway = '192.168.0.254' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.253')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v4_32(self): # 32 is special because it should have 1 usable address cidr = '192.168.0.0/32' expected = [netaddr.IPRange('192.168.0.0', '192.168.0.0')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_31(self): cidr = '192.168.0.0/31' expected = [] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v4_gateway_middle(self): cidr = '192.168.0.0/24' gateway = '192.168.0.128' expected = [netaddr.IPRange('192.168.0.1', '192.168.0.127'), netaddr.IPRange('192.168.0.129', '192.168.0.254')] self.assertEqual(expected, utils.generate_pools(cidr, gateway)) def test_generate_pools_v6_nogateway(self): # other than the difference in the last address, the rest of the # logic is the same as v4 so we only need one test cidr = 'F111::0/64' expected = [netaddr.IPRange('F111::1', 'F111::FFFF:FFFF:FFFF:FFFF')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_generate_pools_v6_empty(self): # We want to be sure the range will begin and end with an IPv6 # address, even if an ambiguous ::/64 cidr is given. cidr = '::/64' expected = [netaddr.IPRange('::1', '::FFFF:FFFF:FFFF:FFFF')] self.assertEqual(expected, utils.generate_pools(cidr, None)) def test_check_gateway_invalid_in_subnet(self): data = [('10.0.0.1', '10.0.0.0/8', False), ('10.255.255.255', '10.0.0.0/8', True), ('10.0.0.0', '10.0.0.0/8', True), ('192.168.100.10', '10.0.0.0/8', False), ('2001:db8::1', '2001:db8::/64', False), ] for gw_ip, network_cidr, result in data: self.assertEqual(result, utils.check_gateway_invalid_in_subnet( network_cidr, gw_ip)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.451046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/notifiers/0000755000175000017500000000000000000000000022760 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/notifiers/__init__.py0000644000175000017500000000000000000000000025057 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/notifiers/test_batch_notifier.py0000644000175000017500000000612000000000000027350 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import mock from neutron.common import utils from neutron.notifiers import batch_notifier from neutron.tests import base class TestBatchNotifier(base.BaseTestCase): def setUp(self): super(TestBatchNotifier, self).setUp() self._received_events = eventlet.Queue() self.notifier = batch_notifier.BatchNotifier(2, self._queue_events) self.spawn_n_p = mock.patch.object(eventlet, 'spawn_n') def _queue_events(self, events): for event in events: self._received_events.put(event) def test_queue_event_no_event(self): spawn_n = self.spawn_n_p.start() self.notifier.queue_event(None) self.assertEqual(0, len(self.notifier._pending_events.queue)) self.assertEqual(0, spawn_n.call_count) def test_queue_event_first_event(self): spawn_n = self.spawn_n_p.start() self.notifier.queue_event(mock.Mock()) self.assertEqual(1, len(self.notifier._pending_events.queue)) self.assertEqual(1, spawn_n.call_count) def test_queue_event_multiple_events_notify_method(self): def _batch_notifier_dequeue(): while not self.notifier._pending_events.empty(): self.notifier._pending_events.get() c_mock = mock.patch.object(self.notifier, '_notify', side_effect=_batch_notifier_dequeue).start() events = 20 for i in range(events): self.notifier.queue_event('Event %s' % i) eventlet.sleep(0) # yield to let coro execute utils.wait_until_true(self.notifier._pending_events.empty, timeout=5) # Called twice: when the first thread calls "synced_send" and then, # in the same loop, when self._pending_events is not empty(). All # self.notifier.queue_event calls are done in just one # "batch_interval" (2 secs). self.assertEqual(2, c_mock.call_count) def test_queue_event_multiple_events_callback_method(self): events = 20 for i in range(events): self.notifier.queue_event('Event %s' % i) eventlet.sleep(0) # yield to let coro execute utils.wait_until_true(self.notifier._pending_events.empty, timeout=5) expected = ['Event %s' % i for i in range(events)] # Check the events have been handled in the same input order. self.assertEqual(expected, list(self._received_events.queue)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/notifiers/test_ironic.py0000644000175000017500000002215500000000000025661 0ustar00coreycorey00000000000000# Copyright (c) 2019 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet import mock from neutron_lib.api.definitions import portbindings as portbindings_def from neutron_lib import constants as n_const from openstack import connection from openstack import exceptions as os_exc from neutron.notifiers import batch_notifier from neutron.notifiers import ironic from neutron.tests import base DEVICE_OWNER_BAREMETAL = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'fake' def get_fake_port(): return {'id': '11111111-aaaa-bbbb-cccc-555555555555', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be'} class TestIronicNotifier(base.BaseTestCase): def setUp(self): super(TestIronicNotifier, self).setUp() with mock.patch.object(connection.Connection, 'baremetal', autospec=False): self.ironic_notifier = ironic.Notifier() @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_update_event_bind_port(self, mock_queue_event): port = get_fake_port() port.update({'status': n_const.PORT_STATUS_ACTIVE}) original_port = get_fake_port() original_port.update({'status': n_const.PORT_STATUS_DOWN}) self.ironic_notifier.process_port_update_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=original_port, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.bind_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': n_const.PORT_STATUS_ACTIVE}) @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_update_event_bind_port_err(self, mock_queue_event): port = get_fake_port() port.update({'status': n_const.PORT_STATUS_ERROR}) original_port = get_fake_port() original_port.update({'status': n_const.PORT_STATUS_DOWN}) self.ironic_notifier.process_port_update_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=original_port, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.bind_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': n_const.PORT_STATUS_ERROR}) @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_update_event_unbind_port(self, mock_queue_event): port = get_fake_port() port.update({'status': n_const.PORT_STATUS_DOWN}) original_port = get_fake_port() original_port.update({'status': n_const.PORT_STATUS_ACTIVE}) self.ironic_notifier.process_port_update_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=original_port, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.unbind_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': n_const.PORT_STATUS_DOWN}) @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_update_event_unbind_port_err(self, mock_queue_event): port = get_fake_port() port.update({'status': n_const.PORT_STATUS_ERROR}) original_port = get_fake_port() original_port.update({'status': n_const.PORT_STATUS_ACTIVE}) self.ironic_notifier.process_port_update_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=original_port, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.unbind_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': n_const.PORT_STATUS_ERROR}) @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_delete_event(self, mock_queue_event): port = get_fake_port() self.ironic_notifier.process_port_delete_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=None, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.delete_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'device_id': '22222222-aaaa-bbbb-cccc-555555555555', 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': 'DELETED'}) @mock.patch.object(batch_notifier.BatchNotifier, 'queue_event', autospec=True) def test_process_port_event_empty_uuid_field(self, mock_queue_event): port = get_fake_port() port.update({'device_id': ''}) self.ironic_notifier.process_port_delete_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=None, port=port, **{}) mock_queue_event.assert_called_with( self.ironic_notifier.batch_notifier, {'event': 'network.delete_port', 'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555', 'binding:vnic_type': portbindings_def.VNIC_BAREMETAL, 'port_id': '11111111-aaaa-bbbb-cccc-555555555555', 'mac_address': 'de:ad:ca:fe:ba:be', 'status': 'DELETED'}) @mock.patch.object(eventlet, 'spawn_n', autospec=True) def test_queue_events(self, mock_spawn_n): port = get_fake_port() self.ironic_notifier.process_port_delete_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=None, port=port, **{}) port = get_fake_port() port.update({'status': n_const.PORT_STATUS_ACTIVE}) original_port = get_fake_port() original_port.update({'status': n_const.PORT_STATUS_DOWN}) self.ironic_notifier.process_port_update_event( 'fake_resource', 'fake_event', 'fake_trigger', original_port=original_port, port=port, **{}) self.assertEqual( 2, len(self.ironic_notifier.batch_notifier._pending_events.queue)) self.assertEqual(2, mock_spawn_n.call_count) @mock.patch.object(os_exc, 'raise_from_response', return_value=None) @mock.patch.object(connection.Connection, 'baremetal', autospec=True) def test_send_events(self, mock_client, mock_os_raise_exc): self.ironic_notifier.irclient = mock_client self.ironic_notifier.send_events(['test', 'events']) mock_client.post.assert_called_with( '/events', json={'events': ['test', 'events']}, microversion='1.54') @mock.patch.object(ironic.LOG, 'exception', autospec=True) @mock.patch.object(connection.Connection, 'baremetal', autospec=True) def test_send_event_exception(self, mock_client, mock_log): self.ironic_notifier.irclient = mock_client mock_client.post.side_effect = Exception() self.ironic_notifier.send_events(['test', 'events']) self.assertEqual(1, mock_log.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/notifiers/test_nova.py0000644000175000017500000004127600000000000025346 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const from neutron_lib import context as n_ctx from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from novaclient import api_versions from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_utils import uuidutils from sqlalchemy.orm import attributes as sql_attr from neutron.notifiers import nova from neutron.objects import ports as port_obj from neutron.tests import base DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_BAREMETAL = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'fake' class TestNovaNotify(base.BaseTestCase): def setUp(self, plugin=None): super(TestNovaNotify, self).setUp() self.ctx = n_ctx.get_admin_context() self.port_uuid = uuidutils.generate_uuid() class FakePlugin(object): def get_port(self, context, port_id): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' return {'device_id': device_id, 'device_owner': DEVICE_OWNER_COMPUTE, 'id': port_id} self.nova_notifier = nova.Notifier() directory.add_plugin(plugin_constants.CORE, FakePlugin()) def test_notify_port_status_all_values(self): states = [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_ERROR, n_const.PORT_STATUS_BUILD, sql_attr.NO_VALUE] device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' # test all combinations for previous_port_status in states: for current_port_status in states: port = port_obj.Port(self.ctx, id=self.port_uuid, device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE, status=current_port_status) self._record_port_status_changed_helper(current_port_status, previous_port_status, port) def test_port_without_uuid_device_id_no_notify(self): port = port_obj.Port(self.ctx, id=self.port_uuid, device_id='compute_probe:', device_owner=DEVICE_OWNER_COMPUTE, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_port_without_device_owner_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = port_obj.Port(self.ctx, id=self.port_uuid, device_id=device_id, device_owner="", status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_port_without_device_id_no_notify(self): port = port_obj.Port(self.ctx, id=self.port_uuid, device_id="", device_owner=n_const.DEVICE_OWNER_DHCP, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def test_non_compute_instances_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = port_obj.Port(self.ctx, id=self.port_uuid, device_id=device_id, device_owner=n_const.DEVICE_OWNER_DHCP, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port) def _record_port_status_changed_helper(self, current_port_status, previous_port_status, port): if not (port.device_id and port.id and port.device_owner and port.device_owner.startswith( n_const.DEVICE_OWNER_COMPUTE_PREFIX) and uuidutils.is_uuid_like(port.device_id)): return if (previous_port_status == n_const.PORT_STATUS_ACTIVE and current_port_status == n_const.PORT_STATUS_DOWN): event_name = nova.VIF_UNPLUGGED elif (previous_port_status in [sql_attr.NO_VALUE, n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_BUILD] and current_port_status in [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_ERROR]): event_name = nova.VIF_PLUGGED else: return status = nova.NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status) self.nova_notifier.record_port_status_changed(port, current_port_status, previous_port_status, None) event = {'server_uuid': port.device_id, 'status': status, 'name': event_name, 'tag': self.port_uuid} self.assertEqual(event, port._notify_event) def test_update_fixed_ip_changed(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'port': {'device_owner': DEVICE_OWNER_COMPUTE, 'id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222', 'device_id': device_id}} expected_event = {'server_uuid': device_id, 'name': 'network-changed', 'tag': returned_obj['port']['id']} event = self.nova_notifier.create_port_changed_event('update_port', {}, returned_obj) self.assertEqual(event, expected_event) def test_create_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} expected_event = {'server_uuid': device_id, 'name': 'network-changed', 'tag': returned_obj['floatingip']['port_id']} event = self.nova_notifier.create_port_changed_event( 'create_floatingip', {}, returned_obj) self.assertEqual(event, expected_event) def test_create_floatingip_no_port_id_no_notify(self): returned_obj = {'floatingip': {'port_id': None}} event = self.nova_notifier.create_port_changed_event( 'create_floatingip', {}, returned_obj) self.assertIsNone(event) def test_delete_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}} expected_event = {'server_uuid': device_id, 'name': 'network-changed', 'tag': returned_obj['floatingip']['port_id']} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertEqual(expected_event, event) def test_delete_floatingip_deleted_port_no_notify(self): port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' with mock.patch.object(directory.get_plugin(), 'get_port', side_effect=n_exc.PortNotFound(port_id=port_id)): returned_obj = {'floatingip': {'port_id': port_id}} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertIsNone(event) def test_delete_floatingip_no_port_id_no_notify(self): returned_obj = {'floatingip': {'port_id': None}} event = self.nova_notifier.create_port_changed_event( 'delete_floatingip', {}, returned_obj) self.assertIsNone(event) def test_associate_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': u'5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}} original_obj = {'port_id': None} expected_event = {'server_uuid': device_id, 'name': 'network-changed', 'tag': returned_obj['floatingip']['port_id']} event = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual(expected_event, event) def test_disassociate_floatingip_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' returned_obj = {'floatingip': {'port_id': None}} original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'} expected_event = {'server_uuid': device_id, 'name': 'network-changed', 'tag': original_obj['port_id']} event = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual(expected_event, event) def test_no_notification_notify_nova_on_port_data_changes_false(self): cfg.CONF.set_override('notify_nova_on_port_data_changes', False) with mock.patch.object(self.nova_notifier, 'send_events') as send_events: self.nova_notifier.send_network_change('update_floatingip', {}, {}) self.assertFalse(send_events.called) @mock.patch('novaclient.client.Client') def test_nova_send_events_returns_bad_list(self, mock_client): mock_client.server_external_events.create.return_value = ( 'i am a string!') self.nova_notifier.send_events([]) @mock.patch('novaclient.client.Client') def test_nova_send_event_rasies_404(self, mock_client): mock_client.server_external_events.create.return_value = ( nova_exceptions.NotFound) self.nova_notifier.send_events([]) @mock.patch('novaclient.client.Client') def test_nova_send_events_raises(self, mock_client): mock_client.server_external_events.create.return_value = Exception self.nova_notifier.send_events([]) @mock.patch('novaclient.client.Client') def test_nova_send_events_returns_non_200(self, mock_client): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' mock_client.server_external_events.create.return_value = [ {'code': 404, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events([{'name': 'network-changed', 'server_uuid': device_id}]) @mock.patch('novaclient.client.Client') def test_nova_send_events_return_200(self, mock_client): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' mock_client.server_external_events.create.return_value = [ {'code': 200, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events([{'name': 'network-changed', 'server_uuid': device_id}]) @mock.patch('novaclient.client.Client') def test_nova_send_events_multiple(self, mock_create): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' mock_create.server_external_events.create.return_value = [ {'code': 200, 'name': 'network-changed', 'server_uuid': device_id}, {'code': 200, 'name': 'network-changed', 'server_uuid': device_id}] self.nova_notifier.send_events([ {'name': 'network-changed', 'server_uuid': device_id}, {'name': 'network-changed', 'server_uuid': device_id}]) def test_reassociate_floatingip_without_disassociate_event(self): returned_obj = {'floatingip': {'port_id': 'f5348a16-609a-4971-b0f0-4b8def5235fb'}} original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'} self.nova_notifier._waiting_to_send = True self.nova_notifier.send_network_change( 'update_floatingip', original_obj, returned_obj) self.assertEqual( 2, len(self.nova_notifier.batch_notifier._pending_events.queue)) returned_obj_non = {'floatingip': {'port_id': None}} event_dis = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj_non) event_assoc = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual( self.nova_notifier.batch_notifier._pending_events.get(), event_dis) self.assertEqual( self.nova_notifier.batch_notifier._pending_events.get(), event_assoc) def test_delete_port_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' returned_obj = {'port': {'device_owner': DEVICE_OWNER_COMPUTE, 'id': port_id, 'device_id': device_id}} expected_event = {'server_uuid': device_id, 'name': nova.VIF_DELETED, 'tag': port_id} event = self.nova_notifier.create_port_changed_event('delete_port', {}, returned_obj) self.assertEqual(expected_event, event) @mock.patch('novaclient.client.Client') def test_endpoint_types(self, mock_client): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' batched_events = [{'code': 200, 'name': 'network-changed', 'server_uuid': device_id}] response = [{'name': 'network-changed', 'server_uuid': device_id}] mock_client.server_external_events.create.return_value = ( batched_events) self.nova_notifier.send_events(response) mock_client.assert_called_once_with( api_versions.APIVersion(nova.NOVA_API_VERSION), session=mock.ANY, region_name=cfg.CONF.nova.region_name, endpoint_type='public', extensions=mock.ANY, global_request_id=mock.ANY) mock_client.reset_mock() cfg.CONF.set_override('endpoint_type', 'internal', 'nova') mock_client.server_external_events.create.return_value = ( batched_events) self.nova_notifier.send_events(response) mock_client.assert_called_once_with( api_versions.APIVersion(nova.NOVA_API_VERSION), session=mock.ANY, region_name=cfg.CONF.nova.region_name, endpoint_type='internal', extensions=mock.ANY, global_request_id=mock.ANY) def test_notify_port_active_direct(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' port = port_obj.Port(self.ctx, id=port_id, device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE) expected_event = {'server_uuid': device_id, 'name': nova.VIF_PLUGGED, 'status': 'completed', 'tag': port_id} self.nova_notifier.notify_port_active_direct(port) self.assertEqual( 1, len(self.nova_notifier.batch_notifier._pending_events.queue)) self.assertEqual( expected_event, self.nova_notifier.batch_notifier._pending_events.get()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.451046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/0000755000175000017500000000000000000000000022407 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/__init__.py0000644000175000017500000000000000000000000024506 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.451046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/db/0000755000175000017500000000000000000000000022774 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/db/__init__.py0000644000175000017500000000000000000000000025073 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/db/test_api.py0000644000175000017500000002343700000000000025167 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import context from neutron_lib.db import model_query from neutron_lib import exceptions as n_exc from neutron_lib.objects import utils as obj_utils from neutron.objects import base from neutron.objects.db import api from neutron.objects import network from neutron.tests import base as test_base from neutron.tests.unit import testlib_api class FakeModel(object): def __init__(self, *args, **kwargs): pass class FakeObj(base.NeutronDbObject): db_model = FakeModel class GetObjectsTestCase(test_base.BaseTestCase): def test_get_objects_pass_marker_obj_when_limit_and_marker_passed(self): ctxt = context.get_admin_context() marker = mock.sentinel.marker limit = mock.sentinel.limit pager = base.Pager(marker=marker, limit=limit) with mock.patch.object( model_query, 'get_collection') as get_collection: with mock.patch.object(api, 'get_object') as get_object: api.get_objects(FakeObj, ctxt, _pager=pager) get_object.assert_called_with(FakeObj, ctxt, id=marker) get_collection.assert_called_with( ctxt, FakeObj.db_model, dict_func=None, filters={}, limit=limit, marker_obj=get_object.return_value) class GetValuesTestCase(test_base.BaseTestCase): def test_get_values(self): ctxt = context.get_admin_context() fake_field = 'fake_field' with mock.patch.object( model_query, 'get_values') as get_values: api.get_values(FakeObj, ctxt, fake_field) get_values.assert_called_with( ctxt, FakeObj.db_model, fake_field, filters={}) class CreateObjectTestCase(test_base.BaseTestCase): def test_populate_id(self, populate_id=True): ctxt = context.get_admin_context() values = {'x': 1, 'y': 2, 'z': 3} with mock.patch.object(FakeObj, 'db_model') as db_model_mock: with mock.patch.object(ctxt.__class__, 'session'): api.create_object(FakeObj, ctxt, values, populate_id=populate_id) expected = copy.copy(values) if populate_id: expected['id'] = mock.ANY db_model_mock.assert_called_with(**expected) def test_populate_id_False(self): self.test_populate_id(populate_id=False) class CRUDScenarioTestCase(testlib_api.SqlTestCase): CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' def setUp(self): super(CRUDScenarioTestCase, self).setUp() # TODO(ihrachys): revisit plugin setup once we decouple # neutron.objects.db.api from core plugin instance self.setup_coreplugin(self.CORE_PLUGIN) # NOTE(ihrachys): nothing specific to networks in this test case, but # we needed to pick some real object, so we picked the network. Any # other object would work as well for our needs here. self.obj_cls = network.Network self.ctxt = context.get_admin_context() def test_get_object_with_None_value_in_filters(self): obj = api.create_object(self.obj_cls, self.ctxt, {'name': 'foo'}) new_obj = api.get_object( self.obj_cls, self.ctxt, name='foo', status=None) self.assertEqual(obj, new_obj) def test_get_objects_with_None_value_in_filters(self): obj = api.create_object(self.obj_cls, self.ctxt, {'name': 'foo'}) new_objs = api.get_objects( self.obj_cls, self.ctxt, name='foo', status=None) self.assertEqual(obj, new_objs[0]) def test_get_objects_with_string_matching_filters_contains(self): obj1 = api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_con_1'}) obj2 = api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_con_2'}) obj3 = api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_3'}) objs = api.get_objects( self.obj_cls, self.ctxt, name=obj_utils.StringContains('con')) self.assertEqual(2, len(objs)) self.assertIn(obj1, objs) self.assertIn(obj2, objs) self.assertNotIn(obj3, objs) def test_get_objects_with_string_matching_filters_starts(self): obj1 = api.create_object(self.obj_cls, self.ctxt, {'name': 'pre_obj1'}) obj2 = api.create_object(self.obj_cls, self.ctxt, {'name': 'pre_obj2'}) obj3 = api.create_object(self.obj_cls, self.ctxt, {'name': 'obj_3'}) objs = api.get_objects( self.obj_cls, self.ctxt, name=obj_utils.StringStarts('pre')) self.assertEqual(2, len(objs)) self.assertIn(obj1, objs) self.assertIn(obj2, objs) self.assertNotIn(obj3, objs) def test_get_objects_with_string_matching_filters_ends(self): obj1 = api.create_object(self.obj_cls, self.ctxt, {'name': 'obj1_end'}) obj2 = api.create_object(self.obj_cls, self.ctxt, {'name': 'obj2_end'}) obj3 = api.create_object(self.obj_cls, self.ctxt, {'name': 'obj_3'}) objs = api.get_objects( self.obj_cls, self.ctxt, name=obj_utils.StringEnds('end')) self.assertEqual(2, len(objs)) self.assertIn(obj1, objs) self.assertIn(obj2, objs) self.assertNotIn(obj3, objs) def test_get_values_with_None_value_in_filters(self): api.create_object(self.obj_cls, self.ctxt, {'name': 'foo'}) values = api.get_values( self.obj_cls, self.ctxt, 'name', name='foo', status=None) self.assertEqual('foo', values[0]) def test_get_values_with_string_matching_filters_contains(self): api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_con_1'}) api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_con_2'}) api.create_object( self.obj_cls, self.ctxt, {'name': 'obj_3'}) values = api.get_values( self.obj_cls, self.ctxt, 'name', name=obj_utils.StringContains('con')) self.assertEqual(2, len(values)) self.assertIn('obj_con_1', values) self.assertIn('obj_con_2', values) self.assertNotIn('obj_3', values) def test_get_values_with_string_matching_filters_starts(self): api.create_object(self.obj_cls, self.ctxt, {'name': 'pre_obj1'}) api.create_object(self.obj_cls, self.ctxt, {'name': 'pre_obj2'}) api.create_object(self.obj_cls, self.ctxt, {'name': 'obj_3'}) values = api.get_values( self.obj_cls, self.ctxt, 'name', name=obj_utils.StringStarts('pre')) self.assertEqual(2, len(values)) self.assertIn('pre_obj1', values) self.assertIn('pre_obj2', values) self.assertNotIn('obj_3', values) def test_get_values_with_string_matching_filters_ends(self): api.create_object(self.obj_cls, self.ctxt, {'name': 'obj1_end'}) api.create_object(self.obj_cls, self.ctxt, {'name': 'obj2_end'}) api.create_object(self.obj_cls, self.ctxt, {'name': 'obj_3'}) values = api.get_values( self.obj_cls, self.ctxt, 'name', name=obj_utils.StringEnds('end')) self.assertEqual(2, len(values)) self.assertIn('obj1_end', values) self.assertIn('obj2_end', values) self.assertNotIn('obj_3', values) def test_get_object_create_update_delete(self): obj = api.create_object(self.obj_cls, self.ctxt, {'name': 'foo'}) new_obj = api.get_object(self.obj_cls, self.ctxt, id=obj.id) self.assertEqual(obj, new_obj) obj = new_obj api.update_object(self.obj_cls, self.ctxt, {'name': 'bar'}, id=obj.id) new_obj = api.get_object(self.obj_cls, self.ctxt, id=obj.id) self.assertEqual(obj, new_obj) obj = new_obj api.delete_object(self.obj_cls, self.ctxt, id=obj.id) new_obj = api.get_object(self.obj_cls, self.ctxt, id=obj.id) self.assertIsNone(new_obj) # delete_object raises an exception on missing object self.assertRaises( n_exc.ObjectNotFound, api.delete_object, self.obj_cls, self.ctxt, id=obj.id) # but delete_objects does not not api.delete_objects(self.obj_cls, self.ctxt, id=obj.id) def test_delete_objects_removes_all_matching_objects(self): # create some objects with identical description for i in range(10): api.create_object( self.obj_cls, self.ctxt, {'name': 'foo%d' % i, 'description': 'bar'}) # create some more objects with a different description descriptions = set() for i in range(10, 20): desc = 'bar%d' % i descriptions.add(desc) api.create_object( self.obj_cls, self.ctxt, {'name': 'foo%d' % i, 'description': desc}) # make sure that all objects are in the database self.assertEqual(20, api.count(self.obj_cls, self.ctxt)) # now delete just those with the 'bar' description api.delete_objects(self.obj_cls, self.ctxt, description='bar') # check that half of objects are gone, and remaining have expected # descriptions objs = api.get_objects(self.obj_cls, self.ctxt) self.assertEqual(10, len(objs)) self.assertEqual( descriptions, {obj.description for obj in objs}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/logapi/0000755000175000017500000000000000000000000023662 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/logapi/__init__.py0000644000175000017500000000000000000000000025761 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/logapi/test_logging_resource.py0000644000175000017500000000701000000000000030626 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from neutron.objects.logapi import logging_resource as log_res from neutron.objects import securitygroup from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class LogObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = log_res.Log class LogDBObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = log_res.Log def setUp(self): super(LogDBObjectTestCase, self).setUp() self._network_id = self._create_test_network_id() self._port_id = self._create_test_port_id(network_id=self._network_id) self._security_group = self._create_test_security_group() self.update_obj_fields({'resource_id': self._security_group['id'], 'target_id': self._port_id}) def _create_test_security_group(self): sg_fields = self.get_random_object_fields(securitygroup.SecurityGroup) sg_obj = securitygroup.SecurityGroup(self.context, **sg_fields) return sg_obj def test_create_sg_log_with_secgroup(self): sg = self._create_test_security_group() sg_log = log_res.Log(context=self.context, id=uuidutils.generate_uuid(), name='test-create', resource_type='security_group', resource_id=sg.id, enabled=False) sg_log.create() self.assertEqual(sg.id, sg_log.resource_id) def test_create_sg_log_with_port(self): port_id = self._create_test_port_id(network_id=self._network_id) sg_log = log_res.Log(context=self.context, id=uuidutils.generate_uuid(), name='test-create', resource_type='security_group', target_id=port_id, enabled=False) sg_log.create() self.assertEqual(port_id, sg_log.target_id) def test_update_multiple_log_fields(self): sg_log = log_res.Log(context=self.context, id=uuidutils.generate_uuid(), name='test-create', description='test-description', resource_type='security_group', enabled=False) sg_log.create() fields = {'name': 'test-update', 'description': 'test-update-descr', 'enabled': True} sg_log.update_fields(fields) sg_log.update() new_sg_log = log_res.Log.get_object(self.context, id=sg_log.id) self._assert_attrs(new_sg_log, **fields) def _assert_attrs(self, sg_log, **kwargs): """Check the values passed in kwargs match the values of the sg log""" for k in sg_log.fields: if k in kwargs: self.assertEqual(kwargs[k], sg_log[k]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/0000755000175000017500000000000000000000000024070 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/__init__.py0000644000175000017500000000000000000000000026167 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/0000755000175000017500000000000000000000000024562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/__init__.py0000644000175000017500000000000000000000000026661 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_base.py0000644000175000017500000000235000000000000027105 0ustar00coreycorey00000000000000# Copyright (c) 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class SegmentAllocationDbObjTestCase(object): def test_get_unallocated_segments(self): self.assertEqual( [], self._test_class.get_unallocated_segments(self.context)) obj = self.objs[0] obj.allocated = True obj.create() self.assertEqual( [], self._test_class.get_unallocated_segments(self.context)) obj = self.objs[1] obj.allocated = False obj.create() allocations = self._test_class.get_unallocated_segments(self.context) self.assertEqual(1, len(allocations)) self.assertEqual(obj.segmentation_id, allocations[0].segmentation_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_flatallocation.py0000644000175000017500000000204700000000000031172 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.plugins.ml2 import flatallocation from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class FlatAllocationIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = flatallocation.FlatAllocation class FlatAllocationDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = flatallocation.FlatAllocation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_geneveallocation.py0000644000175000017500000000272000000000000031513 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.plugins.ml2 import geneveallocation from neutron.tests.unit.objects.plugins.ml2 import test_base as ml2_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class GeneveAllocationIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = geneveallocation.GeneveAllocation class GeneveAllocationDbObjTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, ml2_test_base.SegmentAllocationDbObjTestCase): _test_class = geneveallocation.GeneveAllocation class GeneveEndpointIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = geneveallocation.GeneveEndpoint class GeneveEndpointDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = geneveallocation.GeneveEndpoint ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_greallocation.py0000644000175000017500000000265100000000000031022 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.plugins.ml2 import greallocation as gre_object from neutron.tests.unit.objects.plugins.ml2 import test_base as ml2_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class GreAllocationIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = gre_object.GreAllocation class GreAllocationDbObjTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, ml2_test_base.SegmentAllocationDbObjTestCase): _test_class = gre_object.GreAllocation class GreEndpointIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = gre_object.GreEndpoint class GreEndpointDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = gre_object.GreEndpoint ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_vlanallocation.py0000644000175000017500000000222200000000000031177 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.plugins.ml2 import vlanallocation from neutron.tests.unit.objects.plugins.ml2 import test_base as ml2_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class VlanAllocationIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = vlanallocation.VlanAllocation class VlanAllocationDbObjTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, ml2_test_base.SegmentAllocationDbObjTestCase): _test_class = vlanallocation.VlanAllocation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/plugins/ml2/test_vxlanallocation.py0000644000175000017500000000267000000000000031376 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.plugins.ml2 import vxlanallocation as vxlan_obj from neutron.tests.unit.objects.plugins.ml2 import test_base as ml2_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class VxlanAllocationIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = vxlan_obj.VxlanAllocation class VxlanAllocationDbObjTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, ml2_test_base.SegmentAllocationDbObjTestCase): _test_class = vxlan_obj.VxlanAllocation class VxlanEndpointIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = vxlan_obj.VxlanEndpoint class VxlanEndpointDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = vxlan_obj.VxlanEndpoint ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/0000755000175000017500000000000000000000000023373 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/__init__.py0000644000175000017500000000000000000000000025472 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/0000755000175000017500000000000000000000000025572 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/__init__.py0000644000175000017500000000000000000000000027671 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_allowedaddresspairs.py0000644000175000017500000000246600000000000033247 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.port.extensions import allowedaddresspairs from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class AllowedAddrPairsIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = allowedaddresspairs.AllowedAddressPair # TODO(mhickey): Add common base db test class specifically for port extensions class AllowedAddrPairsDbObjTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = allowedaddresspairs.AllowedAddressPair def setUp(self): super(AllowedAddrPairsDbObjTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_data_plane_status.py0000644000175000017500000000255300000000000032703 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.port.extensions import data_plane_status from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class DataPlaneStatusIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = data_plane_status.PortDataPlaneStatus class DataPlaneStatusDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = data_plane_status.PortDataPlaneStatus def setUp(self): super(DataPlaneStatusDbObjectTestCase, self).setUp() net = self._create_test_network() getter = lambda: self._create_test_port(network_id=net.id).id self.update_obj_fields({'port_id': getter}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_extra_dhcp_opt.py0000644000175000017500000000230700000000000032210 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.port.extensions import extra_dhcp_opt from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class ExtraDhcpOptIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = extra_dhcp_opt.ExtraDhcpOpt class ExtraDhcpOptDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = extra_dhcp_opt.ExtraDhcpOpt def setUp(self): super(ExtraDhcpOptDbObjectTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_port_security.py0000644000175000017500000000250300000000000032116 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.port.extensions import port_security from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class PortSecurityIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = port_security.PortSecurity class PortSecurityDbObjTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = port_security.PortSecurity def setUp(self): super(PortSecurityDbObjTestCase, self).setUp() network_id = self._create_test_network_id() for obj in self.db_objs: self._create_test_port( id=obj['port_id'], network_id=network_id) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_uplink_status_propagation.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/port/extensions/test_uplink_status_propagation.0000644000175000017500000000242600000000000034146 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.port.extensions import uplink_status_propagation from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class UplinkStatusPropagationIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = uplink_status_propagation.PortUplinkStatusPropagation class UplinkStatusPropagationDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = uplink_status_propagation.PortUplinkStatusPropagation def setUp(self): super(UplinkStatusPropagationDbObjectTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id()}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/0000755000175000017500000000000000000000000023211 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/__init__.py0000644000175000017500000000000000000000000025310 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/test_binding.py0000644000175000017500000000616100000000000026240 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects.qos import binding from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class QosPolicyPortBindingObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = binding.QosPolicyPortBinding class QosPolicyPortBindingDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = binding.QosPolicyPortBinding def setUp(self): super(QosPolicyPortBindingDbObjectTestCase, self).setUp() network_id = self._create_test_network_id() for db_obj in self.db_objs: self._create_test_qos_policy(id=db_obj['policy_id']) self._create_test_port(network_id=network_id, id=db_obj['port_id']) class QosPolicyNetworkBindingObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = binding.QosPolicyNetworkBinding class QosPolicyNetworkBindingDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = binding.QosPolicyNetworkBinding def setUp(self): super(QosPolicyNetworkBindingDbObjectTestCase, self).setUp() for db_obj in self.db_objs: self._create_test_qos_policy(id=db_obj['policy_id']) self._create_test_network(network_id=db_obj['network_id']) class QosPolicyFloatingIPBindingObjectTestCase( test_base.BaseObjectIfaceTestCase): _test_class = binding.QosPolicyFloatingIPBinding class QosPolicyFloatingIPBindingDbObjectTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = binding.QosPolicyFloatingIPBinding def setUp(self): super(QosPolicyFloatingIPBindingDbObjectTestCase, self).setUp() for db_obj in self.db_objs: self._create_test_qos_policy(id=db_obj['policy_id']) self._create_test_fip_id(fip_id=db_obj['fip_id']) class QosPolicyRouterGatewayIPBindingObjectTestCase( test_base.BaseObjectIfaceTestCase): _test_class = binding.QosPolicyRouterGatewayIPBinding class QosPolicyRouterGatewayIPBindingDbObjectTestCase( test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = binding.QosPolicyRouterGatewayIPBinding def setUp(self): super(QosPolicyRouterGatewayIPBindingDbObjectTestCase, self).setUp() for db_obj in self.db_objs: self._create_test_qos_policy(id=db_obj['policy_id']) self._create_test_router_id(router_id=db_obj['router_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/test_policy.py0000644000175000017500000004631300000000000026130 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import mock from neutron_lib.exceptions import qos as qos_exc from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from oslo_versionedobjects import exception from neutron.objects.db import api as db_api from neutron.objects import network as net_obj from neutron.objects import ports as port_obj from neutron.objects.qos import binding from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api RULE_OBJ_CLS = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: rule.QosBandwidthLimitRule, qos_consts.RULE_TYPE_DSCP_MARKING: rule.QosDscpMarkingRule, qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: rule.QosMinimumBandwidthRule, } class _QosPolicyRBACBase(object): def get_random_object_fields(self, obj_cls=None): fields = (super(_QosPolicyRBACBase, self). get_random_object_fields(obj_cls)) rnd_actions = self._test_class.db_model.get_valid_actions() idx = random.randint(0, len(rnd_actions) - 1) fields['action'] = rnd_actions[idx] return fields class QosPolicyRBACDbObjectTestCase(_QosPolicyRBACBase, test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = policy.QosPolicyRBAC def setUp(self): super(QosPolicyRBACDbObjectTestCase, self).setUp() for obj in self.db_objs: policy_obj = policy.QosPolicy(self.context, id=obj['object_id'], project_id=obj['project_id']) policy_obj.create() def _create_test_qos_policy_rbac(self): self.objs[0].create() return self.objs[0] def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self): qos_policy_rbac_obj = self._create_test_qos_policy_rbac() qos_policy_rbac_dict = qos_policy_rbac_obj.obj_to_primitive('1.0') self.assertNotIn('project_id', qos_policy_rbac_dict['versioned_object.data']) self.assertNotIn('id', qos_policy_rbac_dict['versioned_object.data']) class QosPolicyRBACIfaceObjectTestCase(_QosPolicyRBACBase, test_base.BaseObjectIfaceTestCase): _test_class = policy.QosPolicyRBAC class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = policy.QosPolicy def setUp(self): super(QosPolicyObjectTestCase, self).setUp() mock.patch.object(policy.QosPolicy, 'get_default').start() # qos_policy_ids will be incorrect, but we don't care in this test self.db_qos_bandwidth_rules = [ self.get_random_db_fields(rule.QosBandwidthLimitRule) for _ in range(3)] self.db_qos_dscp_rules = [ self.get_random_db_fields(rule.QosDscpMarkingRule) for _ in range(3)] self.db_qos_minimum_bandwidth_rules = [ self.get_random_db_fields(rule.QosMinimumBandwidthRule) for _ in range(3)] self.model_map.update({ self._test_class.db_model: self.db_objs, binding.QosPolicyPortBinding.db_model: [], binding.QosPolicyNetworkBinding.db_model: [], rule.QosBandwidthLimitRule.db_model: self.db_qos_bandwidth_rules, rule.QosDscpMarkingRule.db_model: self.db_qos_dscp_rules, rule.QosMinimumBandwidthRule.db_model: self.db_qos_minimum_bandwidth_rules}) # TODO(ihrachys): stop overriding those test cases, instead base test cases # should be expanded if there are missing bits there to support QoS objects def test_get_objects(self): objs = self._test_class.get_objects(self.context) self.get_objects_mock.assert_any_call( self._test_class, self.context, _pager=None) self.assertItemsEqual( [test_base.get_obj_persistent_fields(obj) for obj in self.objs], [test_base.get_obj_persistent_fields(obj) for obj in objs]) def test_get_objects_valid_fields(self): with mock.patch.object(db_api, 'get_objects', return_value=[self.db_objs[0]]) as get_objects_mock: objs = self._test_class.get_objects( self.context, **self.valid_field_filter) get_objects_mock.assert_any_call( self._test_class, self.context, _pager=None, **self.valid_field_filter) self._check_equal(self.objs[0], objs[0]) def test_get_object(self): with mock.patch.object(db_api, 'get_object', return_value=self.db_objs[0]) as get_object_mock: obj = self._test_class.get_object(self.context, id='fake_id') self.assertTrue(self._is_test_class(obj)) self._check_equal(self.objs[0], obj) get_object_mock.assert_called_once_with( self._test_class, self.context, id='fake_id') def test_to_dict_makes_primitive_field_value(self): # is_shared_with_tenant requires DB with mock.patch.object(self._test_class, 'is_shared_with_tenant', return_value=False): (super(QosPolicyObjectTestCase, self). test_to_dict_makes_primitive_field_value()) def test_get_policy_obj_not_found(self): context = self.context.elevated() self.assertRaises(qos_exc.QosPolicyNotFound, policy.QosPolicy.get_policy_obj, context, "fake_id") class QosPolicyDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = policy.QosPolicy def setUp(self): super(QosPolicyDbObjectTestCase, self).setUp() self._network_id = self._create_test_network_id() self._port = self._create_test_port(network_id=self._network_id) def _create_test_policy(self): self.objs[0].create() return self.objs[0] def _create_test_policy_with_rules(self, rule_type, reload_rules=False, bwlimit_direction=None): policy_obj = self._create_test_policy() rules = [] for obj_cls in (RULE_OBJ_CLS.get(rule_type) for rule_type in rule_type): rule_fields = self.get_random_object_fields(obj_cls=obj_cls) rule_fields['qos_policy_id'] = policy_obj.id if (obj_cls.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT and bwlimit_direction is not None): rule_fields['direction'] = bwlimit_direction rule_obj = obj_cls(self.context, **rule_fields) rule_obj.create() rules.append(rule_obj) if reload_rules: policy_obj.obj_load_attr('rules') return policy_obj, rules def test_attach_network_get_network_policy(self): obj = self._create_test_policy() policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network_id) self.assertIsNone(policy_obj) # Now attach policy and repeat obj.attach_network(self._network_id) policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network_id) self.assertEqual(obj, policy_obj) def test_attach_network_nonexistent_network(self): obj = self._create_test_policy() self.assertRaises(qos_exc.NetworkQosBindingError, obj.attach_network, uuidutils.generate_uuid()) def test_attach_network_get_policy_network(self): obj = self._create_test_policy() obj.attach_network(self._network_id) networks = obj.get_bound_networks() self.assertEqual(1, len(networks)) self.assertEqual(self._network_id, networks[0]) def test_attach_and_get_multiple_policy_networks(self): net1_id = self._network_id net2 = net_obj.Network(self.context, name='test-network2') net2.create() net2_id = net2['id'] obj = self._create_test_policy() obj.attach_network(net1_id) obj.attach_network(net2_id) networks = obj.get_bound_networks() self.assertEqual(2, len(networks)) self.assertIn(net1_id, networks) self.assertIn(net2_id, networks) def test_attach_port_nonexistent_port(self): obj = self._create_test_policy() self.assertRaises(qos_exc.PortQosBindingError, obj.attach_port, uuidutils.generate_uuid()) def test_attach_network_nonexistent_policy(self): policy_obj = self._make_object(self.obj_fields[0]) self.assertRaises(qos_exc.NetworkQosBindingError, policy_obj.attach_network, self._network_id) def test_attach_port_nonexistent_policy(self): policy_obj = self._make_object(self.obj_fields[0]) self.assertRaises(qos_exc.PortQosBindingError, policy_obj.attach_port, self._port['id']) def test_attach_port_get_port_policy(self): obj = self._create_test_policy() policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network_id) self.assertIsNone(policy_obj) # Now attach policy and repeat obj.attach_port(self._port['id']) policy_obj = policy.QosPolicy.get_port_policy(self.context, self._port['id']) self.assertEqual(obj, policy_obj) def test_attach_and_get_multiple_policy_ports(self): port1_id = self._port['id'] port2 = db_api.create_object(port_obj.Port, self.context, {'tenant_id': 'fake_tenant_id', 'name': 'test-port2', 'network_id': self._network_id, 'mac_address': 'fake_mac2', 'admin_state_up': True, 'status': 'ACTIVE', 'device_id': 'fake_device', 'device_owner': 'fake_owner'}) port2_id = port2['id'] obj = self._create_test_policy() obj.attach_port(port1_id) obj.attach_port(port2_id) ports = obj.get_bound_ports() self.assertEqual(2, len(ports)) self.assertIn(port1_id, ports) self.assertIn(port2_id, ports) def test_attach_port_get_policy_port(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) ports = obj.get_bound_ports() self.assertEqual(1, len(ports)) self.assertEqual(self._port['id'], ports[0]) def test_detach_port(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) obj.detach_port(self._port['id']) policy_obj = policy.QosPolicy.get_port_policy(self.context, self._port['id']) self.assertIsNone(policy_obj) def test_detach_network(self): obj = self._create_test_policy() obj.attach_network(self._network_id) obj.detach_network(self._network_id) policy_obj = policy.QosPolicy.get_network_policy(self.context, self._network_id) self.assertIsNone(policy_obj) def test_detach_port_nonexistent_port(self): obj = self._create_test_policy() self.assertRaises(qos_exc.PortQosBindingNotFound, obj.detach_port, 'non-existent-port') def test_detach_network_nonexistent_network(self): obj = self._create_test_policy() self.assertRaises(qos_exc.NetworkQosBindingNotFound, obj.detach_network, 'non-existent-port') def test_detach_port_nonexistent_policy(self): policy_obj = self._make_object(self.obj_fields[0]) self.assertRaises(qos_exc.PortQosBindingNotFound, policy_obj.detach_port, self._port['id']) def test_detach_network_nonexistent_policy(self): policy_obj = self._make_object(self.obj_fields[0]) self.assertRaises(qos_exc.NetworkQosBindingNotFound, policy_obj.detach_network, self._network_id) @mock.patch.object(policy.QosPolicyDefault, 'create') def test_set_default_no_default_policy_exists(self, mock_default_create): obj = self._create_test_policy() with mock.patch.object(obj, 'get_default', return_value=None): obj.set_default() mock_default_create.assert_called_once_with() def test_set_default_default_policy_exists(self): obj = self._create_test_policy() with mock.patch.object(obj, 'get_default', return_value=mock.Mock()): self.assertRaises(qos_exc.QoSPolicyDefaultAlreadyExists, obj.set_default) def test_set_default_is_default_policy(self): obj = self._create_test_policy() with mock.patch.object(obj, 'get_default', return_value=obj.id), \ mock.patch.object(obj, 'set_default'): obj.set_default() @mock.patch.object(policy.QosPolicyDefault, 'get_object') @mock.patch.object(policy.QosPolicyDefault, 'delete') def test_unset_default_default_policy_exists(self, mock_default_delete, mock_default_get): obj = self._create_test_policy() with mock.patch.object(obj, 'get_default', return_value=obj.id): mock_default_get.return_value = policy.QosPolicyDefault() obj.unset_default() mock_default_get.assert_called_once_with(obj.obj_context, project_id=obj.project_id) mock_default_delete.assert_called_once_with() def test_unset_default_no_default_policy_exists(self): obj = self._create_test_policy() with mock.patch.object(obj, 'get_default', return_value=None): obj.unset_default() def test_synthetic_rule_fields(self): policy_obj, rule_obj = self._create_test_policy_with_rules( [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]) policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) self.assertEqual(rule_obj, policy_obj.rules) def test_get_object_fetches_rules_non_lazily(self): policy_obj, rule_obj = self._create_test_policy_with_rules( [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]) policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) self.assertEqual(rule_obj, policy_obj.rules) primitive = policy_obj.obj_to_primitive() self.assertNotEqual([], (primitive['versioned_object.data']['rules'])) def test_to_dict_returns_rules_as_dicts(self): policy_obj, rule_obj = self._create_test_policy_with_rules( [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]) policy_obj = policy.QosPolicy.get_object(self.context, id=policy_obj.id) obj_dict = policy_obj.to_dict() rule_dict = rule_obj[0].to_dict() # first make sure that to_dict() is still sane and does not return # objects for obj in (rule_dict, obj_dict): self.assertIsInstance(obj, dict) self.assertEqual(rule_dict, obj_dict['rules'][0]) def test_shared_default(self): obj = self._make_object(self.obj_fields[0]) self.assertFalse(obj.shared) def test_delete_not_allowed_if_policy_in_use_by_port(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) self.assertRaises(qos_exc.QosPolicyInUse, obj.delete) obj.detach_port(self._port['id']) obj.delete() def test_delete_not_allowed_if_policy_in_use_by_network(self): obj = self._create_test_policy() obj.attach_network(self._network_id) self.assertRaises(qos_exc.QosPolicyInUse, obj.delete) obj.detach_network(self._network_id) obj.delete() def test_reload_rules_reloads_rules(self): policy_obj, rule_obj = self._create_test_policy_with_rules( [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT]) self.assertEqual([], policy_obj.rules) policy_obj._reload_rules() self.assertEqual(rule_obj, policy_obj.rules) def test_reload_is_default(self): policy_obj = self._create_test_policy() self.assertFalse(policy_obj.is_default) policy_obj.set_default() policy_obj._reload_is_default() self.assertTrue(policy_obj.is_default) def test_get_bound_tenant_ids_returns_set_of_tenant_ids(self): obj = self._create_test_policy() obj.attach_port(self._port['id']) ids = self._test_class.get_bound_tenant_ids(self.context, obj['id']) self.assertEqual(ids.pop(), self._port.project_id) self.assertEqual(len(ids), 0) obj.detach_port(self._port['id']) obj.delete() def test_object_version_degradation_less_than_1_8(self): policy_obj = self._create_test_policy() self.assertRaises(exception.IncompatibleObjectVersion, policy_obj.obj_to_primitive, '1.7') @mock.patch.object(policy.QosPolicy, 'unset_default') def test_filter_by_shared(self, *mocks): project_id = uuidutils.generate_uuid() policy_obj = policy.QosPolicy( self.context, name='shared-policy', shared=True, project_id=project_id, is_default=False) policy_obj.create() policy_obj = policy.QosPolicy( self.context, name='private-policy', shared=False, project_id=project_id) policy_obj.create() shared_policies = policy.QosPolicy.get_objects( self.context, shared=True) self.assertEqual(1, len(shared_policies)) self.assertEqual('shared-policy', shared_policies[0].name) private_policies = policy.QosPolicy.get_objects( self.context, shared=False) self.assertEqual(1, len(private_policies)) self.assertEqual('private-policy', private_policies[0].name) def test_get_objects_queries_constant(self): # NOTE(korzen) QoSPolicy is using extra queries to reload rules. # QoSPolicy currently cannot be loaded using constant queries number. # It can be reworked in follow-up patch. pass class QosPolicyDefaultObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = policy.QosPolicyDefault ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/test_rule.py0000644000175000017500000002413300000000000025574 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from oslo_versionedobjects import exception from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.tests import base as neutron_test_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api POLICY_ID_A = 'policy-id-a' POLICY_ID_B = 'policy-id-b' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class QosRuleObjectTestCase(neutron_test_base.BaseTestCase): def _test_should_apply_to_port(self, rule_policy_id, port_policy_id, device_owner, expected_result): test_rule = rule.QosRule(qos_policy_id=rule_policy_id) port = {qos_consts.QOS_POLICY_ID: port_policy_id, 'device_owner': device_owner} self.assertEqual(expected_result, test_rule.should_apply_to_port(port)) def test_should_apply_to_port_with_network_port_and_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_INTF, expected_result=False) def test_should_apply_to_port_with_network_port_and_only_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=None, device_owner=constants.DEVICE_OWNER_ROUTER_INTF, expected_result=False) def test_should_apply_to_port_with_network_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_INTF, expected_result=True) def test_should_apply_to_port_with_compute_port_and_net_policy(self): # NOTE(ralonsoh): in this case the port has a port QoS policy; the # network QoS policy can't be applied. self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=DEVICE_OWNER_COMPUTE, expected_result=False) def test_should_apply_to_port_with_compute_port_and_only_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=None, device_owner=DEVICE_OWNER_COMPUTE, expected_result=True) def test_should_apply_to_port_with_compute_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=DEVICE_OWNER_COMPUTE, expected_result=True) def test_should_apply_to_port_with_router_gw_port_and_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_GW, expected_result=False) def test_should_apply_to_port_with_router_gw_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_ROUTER_GW, expected_result=True) def test_should_apply_to_port_with_agent_gw_port_and_net_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_B, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_AGENT_GW, expected_result=False) def test_should_apply_to_port_with_agent_gw_port_and_port_policy(self): self._test_should_apply_to_port( rule_policy_id=POLICY_ID_A, port_policy_id=POLICY_ID_A, device_owner=constants.DEVICE_OWNER_AGENT_GW, expected_result=True) class QosBandwidthLimitRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = rule.QosBandwidthLimitRule def test_to_dict_returns_type(self): obj = rule.QosBandwidthLimitRule(self.context, **self.db_objs[0]) dict_ = obj.to_dict() self.assertEqual(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, dict_['type']) def test_duplicate_rules(self): policy_id = uuidutils.generate_uuid() ingress_rule_1 = rule.QosBandwidthLimitRule( self.context, qos_policy_id=policy_id, max_kbps=1000, max_burst=500, direction=constants.INGRESS_DIRECTION) ingress_rule_2 = rule.QosBandwidthLimitRule( self.context, qos_policy_id=policy_id, max_kbps=2000, max_burst=500, direction=constants.INGRESS_DIRECTION) egress_rule = rule.QosBandwidthLimitRule( self.context, qos_policy_id=policy_id, max_kbps=1000, max_burst=500, direction=constants.EGRESS_DIRECTION) dscp_rule = rule.QosDscpMarkingRule( self.context, qos_policy_id=policy_id, dscp_mark=16) self.assertTrue(ingress_rule_1.duplicates(ingress_rule_2)) self.assertFalse(ingress_rule_1.duplicates(egress_rule)) self.assertFalse(ingress_rule_1.duplicates(dscp_rule)) def test_object_version_degradation_less_than_1_3(self): rule_obj = rule.QosBandwidthLimitRule() self.assertRaises(exception.IncompatibleObjectVersion, rule_obj.obj_to_primitive, '1.2') class QosBandwidthLimitRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = rule.QosBandwidthLimitRule def setUp(self): super(QosBandwidthLimitRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule for obj in self.db_objs: generated_qos_policy_id = obj['qos_policy_id'] policy_obj = policy.QosPolicy(self.context, id=generated_qos_policy_id, project_id=uuidutils.generate_uuid()) policy_obj.create() class QosDscpMarkingRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = rule.QosDscpMarkingRule def test_object_version_degradation_less_than_1_3(self): rule_obj = rule.QosDscpMarkingRule() self.assertRaises(exception.IncompatibleObjectVersion, rule_obj.obj_to_primitive, '1.2') def test_duplicate_rules(self): policy_id = uuidutils.generate_uuid() dscp_rule_1 = rule.QosDscpMarkingRule( self.context, qos_policy_id=policy_id, dscp_mark=16) dscp_rule_2 = rule.QosDscpMarkingRule( self.context, qos_policy_id=policy_id, dscp_mark=32) bw_limit_rule = rule.QosBandwidthLimitRule( self.context, qos_policy_id=policy_id, max_kbps=1000, max_burst=500, direction=constants.EGRESS_DIRECTION) self.assertTrue(dscp_rule_1.duplicates(dscp_rule_2)) self.assertFalse(dscp_rule_1.duplicates(bw_limit_rule)) class QosDscpMarkingRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = rule.QosDscpMarkingRule def setUp(self): super(QosDscpMarkingRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule for obj in self.db_objs: generated_qos_policy_id = obj['qos_policy_id'] policy_obj = policy.QosPolicy(self.context, id=generated_qos_policy_id, project_id=uuidutils.generate_uuid()) policy_obj.create() class QosMinimumBandwidthRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = rule.QosMinimumBandwidthRule def test_object_version_degradation_less_than_1_3(self): rule_obj = rule.QosMinimumBandwidthRule() self.assertRaises(exception.IncompatibleObjectVersion, rule_obj.obj_to_primitive, '1.2') def test_duplicate_rules(self): policy_id = uuidutils.generate_uuid() ingress_rule_1 = rule.QosMinimumBandwidthRule( self.context, qos_policy_id=policy_id, min_kbps=1000, direction=constants.INGRESS_DIRECTION) ingress_rule_2 = rule.QosMinimumBandwidthRule( self.context, qos_policy_id=policy_id, min_kbps=2000, direction=constants.INGRESS_DIRECTION) egress_rule = rule.QosMinimumBandwidthRule( self.context, qos_policy_id=policy_id, min_kbps=1000, direction=constants.EGRESS_DIRECTION) dscp_rule = rule.QosDscpMarkingRule( self.context, qos_policy_id=policy_id, dscp_mark=16) self.assertTrue(ingress_rule_1.duplicates(ingress_rule_2)) self.assertFalse(ingress_rule_1.duplicates(egress_rule)) self.assertFalse(ingress_rule_1.duplicates(dscp_rule)) class QosMinimumBandwidthRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = rule.QosMinimumBandwidthRule def setUp(self): super(QosMinimumBandwidthRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule for obj in self.db_objs: generated_qos_policy_id = obj['qos_policy_id'] policy_obj = policy.QosPolicy(self.context, id=generated_qos_policy_id, project_id=uuidutils.generate_uuid()) policy_obj.create() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/qos/test_rule_type.py0000644000175000017500000000734300000000000026641 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # rule types are so different from other objects that we don't base the test # class on the common base class for all objects import mock from neutron_lib import constants as lib_consts from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_versionedobjects import exception from neutron import manager from neutron.objects.qos import rule_type from neutron.services.qos import qos_plugin from neutron.tests import base as test_base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DRIVER_SUPPORTED_PARAMETERS = [ { 'parameter_name': qos_consts.MAX_KBPS, 'parameter_type': lib_consts.VALUES_TYPE_RANGE, 'parameter_values': {"start": 0, "end": db_consts.DB_INTEGER_MAX_VALUE} }, { 'parameter_name': qos_consts.MAX_BURST, 'parameter_type': lib_consts.VALUES_TYPE_RANGE, 'parameter_values': {"start": 0, "end": db_consts.DB_INTEGER_MAX_VALUE} }, { 'parameter_name': qos_consts.DIRECTION, 'parameter_type': lib_consts.VALUES_TYPE_CHOICES, 'parameter_values': lib_consts.VALID_DIRECTIONS } ] class QosRuleTypeObjectTestCase(test_base.BaseTestCase): def setUp(self): super(QosRuleTypeObjectTestCase, self).setUp() self.config_parse() self.setup_coreplugin(load_plugins=False) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["qos"]) manager.init() def test_get_object(self): driver_details = { 'name': "backend_driver", 'supported_parameters': DRIVER_SUPPORTED_PARAMETERS } with mock.patch.object( qos_plugin.QoSPlugin, 'supported_rule_type_details', return_value=[driver_details] ): rule_type_details = rule_type.QosRuleType.get_object( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT) self.assertEqual( driver_details['name'], rule_type_details.drivers[0].name) self.assertEqual( driver_details['supported_parameters'], rule_type_details.drivers[0].supported_parameters) self.assertEqual(1, len(rule_type_details.drivers)) self.assertEqual( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, rule_type_details.type) def test_get_objects(self): rule_types_mock = mock.PropertyMock( return_value=set(qos_consts.VALID_RULE_TYPES)) with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types', new_callable=rule_types_mock): types = rule_type.QosRuleType.get_objects() self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES), sorted(type_['type'] for type_ in types)) def test_wrong_type(self): self.assertRaises(ValueError, rule_type.QosRuleType, type='bad_type') def test_object_version_degradation_less_than_1_3(self): qos_rule_type = rule_type.QosRuleType() self.assertRaises(exception.IncompatibleObjectVersion, qos_rule_type.obj_to_primitive, '1.2') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_address_scope.py0000644000175000017500000000420200000000000026634 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as lib_constants from neutron.objects import address_scope from neutron.tests.unit.objects import test_base from neutron.tests.unit.objects import test_rbac from neutron.tests.unit import testlib_api class AddressScopeIfaceObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = address_scope.AddressScope class AddressScopeDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = address_scope.AddressScope class AddressScopeRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = address_scope.AddressScopeRBAC def setUp(self): super(AddressScopeRBACDbObjectTestCase, self).setUp() for obj in self.db_objs: as_obj = address_scope.AddressScope( self.context, id=obj['object_id'], name="test_as_%s_%s" % (obj['object_id'], obj['project_id']), project_id=obj['project_id'], ip_version=lib_constants.IP_ALLOWED_VERSIONS[0], ) as_obj.create() def _create_test_address_scope_rbac(self): self.objs[0].create() return self.objs[0] class AddressScopeRBACIfaceObjectTestCase(test_rbac.TestRBACObjectMixin, test_base.BaseObjectIfaceTestCase): _test_class = address_scope.AddressScopeRBAC ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_agent.py0000644000175000017500000000522300000000000025120 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import agent from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class AgentIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = agent.Agent class AgentDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = agent.Agent def test_configurations(self): obj = self.objs[0] obj.create() obj.configurations = {} obj.update() db_fields = obj.modify_fields_to_db(obj) self.assertEqual('', db_fields['configurations']) obj = agent.Agent.get_object(self.context, id=obj.id) self.assertEqual({}, obj.configurations) conf = {"tunnel_types": ["vxlan"], "tunneling_ip": "20.0.0.1", "bridge_mappings": {"phys_net1": "br-eth-1"}} obj.configurations = conf obj.update() obj = agent.Agent.get_object(self.context, id=obj.id) self.assertEqual(conf, obj.configurations) def test_resource_versions(self): obj = self.objs[0] versions = {'obj1': 'ver1', 'obj2': 1.1} obj.resource_versions = versions obj.create() obj = agent.Agent.get_object(self.context, id=obj.id) self.assertEqual(versions, obj.resource_versions) obj.resource_versions = {} obj.update() db_fields = obj.modify_fields_to_db(obj) self.assertIsNone(db_fields['resource_versions']) obj = agent.Agent.get_object(self.context, id=obj.id) self.assertIsNone(obj.resource_versions) obj.resource_versions = None obj.update() self.assertIsNone(obj.resource_versions) db_fields = obj.modify_fields_to_db(obj) self.assertIsNone(db_fields['resource_versions']) def test_resources_synced_10(self): obj = agent.Agent() primitive = obj.obj_to_primitive(target_version='1.0') self.assertNotIn( 'resources_synced', primitive['versioned_object.data']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_auto_allocate.py0000644000175000017500000000253700000000000026643 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import auto_allocate from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class AutoAllocateTopologyIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = auto_allocate.AutoAllocatedTopology class AutoAllocateTopologyDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = auto_allocate.AutoAllocatedTopology def setUp(self): super(AutoAllocateTopologyDbObjectTestCase, self).setUp() self.update_obj_fields({ 'network_id': lambda: self._create_test_network_id(), 'router_id': lambda: self._create_test_router_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_base.py0000644000175000017500000025244300000000000024744 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import itertools import random import fixtures import mock import netaddr from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib import exceptions as n_exc from neutron_lib.objects import common_types from neutron_lib.objects import exceptions as o_exc from neutron_lib.objects.logapi import event_types from neutron_lib.objects import utils as obj_utils from neutron_lib.tests import tools as lib_test_tools from neutron_lib.utils import helpers from oslo_db import exception as obj_exc from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import uuidutils from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields from sqlalchemy import orm import testtools from neutron import objects from neutron.objects import agent from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects import flavor from neutron.objects import network as net_obj from neutron.objects import ports from neutron.objects.qos import policy as qos_policy from neutron.objects import rbac_db from neutron.objects import router from neutron.objects import securitygroup from neutron.objects import stdattrs from neutron.objects import subnet from neutron.tests import base as test_base from neutron.tests import tools from neutron.tests.unit.db import test_db_base_plugin_v2 SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl' SQLALCHEMY_CLOSE = 'sqlalchemy.engine.Connection.close' OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.' 'VersionedObject.obj_from_primitive') TIMESTAMP_FIELDS = ['created_at', 'updated_at', 'revision_number'] class FakeModel(dict): pass class ObjectFieldsModel(dict): pass @base.NeutronObjectRegistry.register_if(False) class FakeSmallNeutronObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel primary_keys = ['field1'] foreign_keys = { 'FakeNeutronObjectCompositePrimaryKeyWithId': {'field1': 'id'}, 'FakeNeutronDbObject': {'field2': 'id'}, 'FakeNeutronObjectUniqueKey': {'field3': 'id'}, } fields = { 'field1': common_types.UUIDField(), 'field2': common_types.UUIDField(), 'field3': common_types.UUIDField(), } @base.NeutronObjectRegistry.register_if(False) class FakeSmallNeutronObjectNewEngineFacade(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel new_facade = True primary_keys = ['field1'] foreign_keys = { 'FakeNeutronObjectCompositePrimaryKeyWithId': {'field1': 'id'}, 'FakeNeutronDbObject': {'field2': 'id'}, 'FakeNeutronObjectUniqueKey': {'field3': 'id'}, } fields = { 'field1': common_types.UUIDField(), 'field2': common_types.UUIDField(), 'field3': common_types.UUIDField(), } @base.NeutronObjectRegistry.register_if(False) class FakeSmallNeutronObjectWithMultipleParents(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel primary_keys = ['field1', 'field2'] foreign_keys = { 'FakeParent': {'field1': 'id'}, 'FakeParent2': {'field2': 'id'}, } fields = { 'field1': common_types.UUIDField(), 'field2': obj_fields.StringField(), } @base.NeutronObjectRegistry.register_if(False) class FakeParent(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel primary_keys = ['field1', 'field2'] fields = { 'id': common_types.UUIDField(), 'children': obj_fields.ListOfObjectsField( 'FakeSmallNeutronObjectWithMultipleParents', nullable=True) } synthetic_fields = ['children'] @base.NeutronObjectRegistry.register_if(False) class FakeWeirdKeySmallNeutronObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel primary_keys = ['field1'] foreign_keys = { 'FakeNeutronObjectNonStandardPrimaryKey': {'field1': 'weird_key'}, 'FakeNeutronObjectCompositePrimaryKey': {'field2': 'weird_key'}, } fields = { 'field1': common_types.UUIDField(), 'field2': obj_fields.StringField(), } class NeutronObjectRegistryFixture(fixtures.Fixture): """Use a NeutronObjectRegistry as a temp registry pattern fixture. It is fixture similar to oslo_versionedobjects.fixture.VersionedObjectRegistryFixture but it uses Neutron's base registry class """ def setUp(self): super(NeutronObjectRegistryFixture, self).setUp() self._base_test_obj_backup = copy.deepcopy( base.NeutronObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) @staticmethod def register(cls_name): base.NeutronObjectRegistry.register(cls_name) def _restore_obj_registry(self): base.NeutronObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup @base.NeutronObjectRegistry.register_if(False) class FakeNeutronDbObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'field1': obj_fields.StringField(), 'obj_field': obj_fields.ObjectField('FakeSmallNeutronObject', nullable=True) } primary_keys = ['id'] fields_no_update = ['field1'] synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectNonStandardPrimaryKey(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['weird_key'] fields = { 'weird_key': common_types.UUIDField(), 'field1': obj_fields.StringField(), 'obj_field': obj_fields.ListOfObjectsField( 'FakeWeirdKeySmallNeutronObject'), 'field2': obj_fields.StringField() } synthetic_fields = ['obj_field', 'field2'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectCompositePrimaryKey(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['weird_key', 'field1'] fields = { 'weird_key': common_types.UUIDField(), 'field1': obj_fields.StringField(), 'obj_field': obj_fields.ListOfObjectsField( 'FakeWeirdKeySmallNeutronObject') } synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectUniqueKey(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id', 'id2'] unique_keys = [['unique_key'], ['id2']] fields = { 'id': common_types.UUIDField(), 'id2': common_types.UUIDField(), 'unique_key': obj_fields.StringField(), 'field1': obj_fields.StringField(), 'obj_field': obj_fields.ObjectField('FakeSmallNeutronObject', nullable=True) } synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectRenamedField(base.NeutronDbObject): """Testing renaming the parameter from DB to NeutronDbObject For tests: - db fields: id, field_db, field2 - object: id, field_ovo, field2 """ # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id'] fields = { 'id': common_types.UUIDField(), 'field_ovo': obj_fields.StringField(), 'field2': obj_fields.StringField() } synthetic_fields = ['field2'] fields_need_translation = {'field_ovo': 'field_db'} @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectCompositePrimaryKeyWithId(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id', 'field1'] fields = { 'id': common_types.UUIDField(), 'field1': obj_fields.StringField(), 'obj_field': obj_fields.ListOfObjectsField('FakeSmallNeutronObject') } synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectMultipleForeignKeys(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = ObjectFieldsModel foreign_keys = { 'FakeNeutronObjectSyntheticField': {'field1': 'id', 'field2': 'id'}, } fields = { 'field1': common_types.UUIDField(), 'field2': common_types.UUIDField(), } @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectSyntheticField(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'obj_field': obj_fields.ListOfObjectsField( 'FakeNeutronObjectMultipleForeignKeys') } synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectSyntheticField2(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'obj_field': obj_fields.ObjectField('FakeSmallNeutronObject') } synthetic_fields = ['obj_field'] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectWithProjectId(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'field2': common_types.UUIDField(), } @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObject(base.NeutronObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'field2': common_types.UUIDField(), } @classmethod def get_object(cls, context, **kwargs): if not hasattr(cls, '_obj'): cls._obj = FakeNeutronObject(id=uuidutils.generate_uuid(), project_id='fake-id', field2=uuidutils.generate_uuid()) return cls._obj @classmethod def get_objects(cls, context, _pager=None, count=1, **kwargs): return [ cls.get_object(context, **kwargs) for i in range(count) ] @classmethod def get_values(cls, context, field, **kwargs): return [ getattr(obj, field) for obj in cls.get_objects(**kwargs) ] @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectDictOfMiscValues(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'dict_field': common_types.DictOfMiscValuesField(), } @base.NeutronObjectRegistry.register_if(False) class FakeNeutronObjectListOfDictOfMiscValues(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'list_of_dicts_field': common_types.ListOfDictOfMiscValuesField(), } def get_random_dscp_mark(): return random.choice(constants.VALID_DSCP_MARKS) def get_list_of_random_networks(num=10): for i in range(5): res = [lib_test_tools.get_random_ip_network() for i in range(num)] # make sure there are no duplicates if len(set(res)) == num: return res raise Exception('Failed to generate unique networks') def get_random_domain_name(): return '.'.join([ helpers.get_random_string(62)[:random.choice(range(63))] for i in range(4) ]) def get_random_dict_of_strings(): return { helpers.get_random_string(10): helpers.get_random_string(10) for i in range(10) } def get_random_dict(): return { helpers.get_random_string(6): helpers.get_random_string(6), helpers.get_random_string(6): tools.get_random_boolean(), helpers.get_random_string(6): tools.get_random_integer(), helpers.get_random_string(6): [ tools.get_random_integer(), helpers.get_random_string(6), tools.get_random_boolean(), ], helpers.get_random_string(6): { helpers.get_random_string(6): helpers.get_random_string(6) } } def get_random_dicts_list(): return [get_random_dict() for _ in range(5)] def get_set_of_random_uuids(): return { uuidutils.generate_uuid() for i in range(10) } # NOTE: The keys in this dictionary have alphabetic order. FIELD_TYPE_VALUE_GENERATOR_MAP = { common_types.DictOfMiscValuesField: get_random_dict, common_types.ListOfDictOfMiscValuesField: get_random_dicts_list, common_types.DomainNameField: get_random_domain_name, common_types.DscpMarkField: get_random_dscp_mark, common_types.EtherTypeEnumField: tools.get_random_ether_type, common_types.FloatingIPStatusEnumField: tools.get_random_floatingip_status, common_types.FlowDirectionEnumField: tools.get_random_flow_direction, common_types.HARouterEnumField: tools.get_random_ha_states, common_types.IpamAllocationStatusEnumField: tools.get_random_ipam_status, common_types.IPNetworkField: lib_test_tools.get_random_ip_network, common_types.IPNetworkPrefixLenField: tools.get_random_prefixlen, common_types.IPV6ModeEnumField: tools.get_random_ipv6_mode, common_types.IPVersionEnumField: tools.get_random_ip_version, common_types.IpProtocolEnumField: tools.get_random_ip_protocol, common_types.ListOfIPNetworksField: get_list_of_random_networks, common_types.MACAddressField: lib_test_tools.get_random_EUI, common_types.NetworkSegmentRangeNetworkTypeEnumField: tools.get_random_network_segment_range_network_type, common_types.PortBindingStatusEnumField: tools.get_random_port_binding_statuses, common_types.PortRangeField: tools.get_random_port, common_types.PortRangeWith0Field: lambda: tools.get_random_port(0), common_types.RouterStatusEnumField: tools.get_random_router_status, common_types.SetOfUUIDsField: get_set_of_random_uuids, common_types.UUIDField: uuidutils.generate_uuid, common_types.VlanIdRangeField: tools.get_random_vlan, event_types.SecurityEventField: tools.get_random_security_event, obj_fields.BooleanField: tools.get_random_boolean, obj_fields.DateTimeField: tools.get_random_datetime, obj_fields.DictOfStringsField: get_random_dict_of_strings, obj_fields.IPAddressField: tools.get_random_ip_address, obj_fields.IPV4AddressField: lambda: tools.get_random_ip_address( version=constants.IP_VERSION_4), obj_fields.IntegerField: tools.get_random_integer, obj_fields.ListOfObjectsField: lambda: [], obj_fields.ListOfStringsField: tools.get_random_string_list, obj_fields.ObjectField: lambda: None, obj_fields.StringField: lambda: helpers.get_random_string(10), } def get_obj_persistent_fields(obj): return {field: getattr(obj, field) for field in obj.fields if field not in obj.synthetic_fields if field in obj} def get_value(generator, version): if 'version' in generator.__code__.co_varnames: return generator(version=version) return generator() def remove_timestamps_from_fields(obj_fields, cls_fields): obj_fields_result = obj_fields.copy() for ts_field in TIMESTAMP_FIELDS: if ts_field in cls_fields.keys() and cls_fields[ts_field].nullable: obj_fields_result.pop(ts_field) return obj_fields_result def get_non_synthetic_fields(objclass, obj_fields): return {field: value for field, value in obj_fields.items() if not objclass.is_synthetic(field)} class _BaseObjectTestCase(object): _test_class = FakeNeutronDbObject def setUp(self): super(_BaseObjectTestCase, self).setUp() # make sure all objects are loaded and registered in the registry objects.register_objects() self.context = context.get_admin_context() self._unique_tracker = collections.defaultdict(set) self.locked_obj_fields = collections.defaultdict(set) self.db_objs = [ self._test_class.db_model(**self.get_random_db_fields()) for _ in range(3) ] # TODO(ihrachys) remove obj_fields since they duplicate self.objs self.obj_fields = [self._test_class.modify_fields_from_db(db_obj) for db_obj in self.db_objs] self.objs = [ self._test_class(self.context, **fields) for fields in self.obj_fields ] invalid_fields = ( set(self._test_class.synthetic_fields).union(set(TIMESTAMP_FIELDS)) ) self.valid_field = [f for f in self._test_class.fields if f not in invalid_fields][0] self.valid_field_filter = {self.valid_field: self.obj_fields[-1].get(self.valid_field)} self.obj_registry = self.useFixture( NeutronObjectRegistryFixture()) self.obj_registry.register(FakeSmallNeutronObject) self.obj_registry.register(FakeWeirdKeySmallNeutronObject) self.obj_registry.register(FakeNeutronObjectMultipleForeignKeys) synthetic_obj_fields = self.get_random_db_fields( FakeSmallNeutronObject) self.model_map = { self._test_class.db_model: self.db_objs, ObjectFieldsModel: [ObjectFieldsModel(**synthetic_obj_fields)]} def _get_random_update_fields(self): return self.get_updatable_fields( self.get_random_object_fields(self._test_class)) def get_random_object_fields(self, obj_cls=None): obj_cls = obj_cls or self._test_class fields = {} ip_version = tools.get_random_ip_version() for field, field_obj in obj_cls.fields.items(): if field not in obj_cls.synthetic_fields: generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] fields[field] = get_value(generator, ip_version) for k, v in self.locked_obj_fields.items(): if k in fields: fields[k] = v for keys in obj_cls.unique_keys: keytup = tuple(keys) unique_values = tuple(fields[k] for k in keytup) if unique_values in self._unique_tracker[keytup]: # if you get a recursion depth error here, it means # your random generator didn't generate unique values return self.get_random_object_fields(obj_cls) self._unique_tracker[keytup].add(unique_values) return fields def get_random_db_fields(self, obj_cls=None): obj_cls = obj_cls or self._test_class return obj_cls.modify_fields_to_db( self.get_random_object_fields(obj_cls)) def update_obj_fields(self, values_dict, db_objs=None, obj_fields=None, objs=None): '''Update values for test objects with specific values. The default behaviour is using random values for all fields of test objects. Sometimes it's not practical, for example, when some fields, often those referencing other objects, require non-random values (None or UUIDs of valid objects). If that's the case, a test subclass may call the method to override some field values for test objects. Receives a single ``values_dict`` dict argument where keys are names of test class fields, and values are either actual values for the keys, or callables that will be used to generate different values for each test object. Note: if a value is a dict itself, the method will recursively update corresponding embedded objects. ''' # TODO(ihrachys) make the method update db_objs to keep generated test # objects unique despite new locked fields for k, v in values_dict.items(): for db_obj, fields, obj in zip( db_objs or self.db_objs, obj_fields or self.obj_fields, objs or self.objs): val = v() if callable(v) else v db_obj_key = obj.fields_need_translation.get(k, k) if isinstance(val, collections.Mapping): self.update_obj_fields( val, db_obj[db_obj_key], fields[k], obj[k]) else: db_obj[db_obj_key] = val fields[k] = val obj[k] = val if k in self.valid_field_filter: self.valid_field_filter[k] = val self.locked_obj_fields[k] = v() if callable(v) else v @classmethod def generate_object_keys(cls, obj_cls, field_names=None): if field_names is None: field_names = obj_cls.primary_keys keys = {} for field in field_names: field_obj = obj_cls.fields[field] generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] keys[field] = generator() return keys def get_updatable_fields(self, fields): return obj_utils.get_updatable_fields(self._test_class, fields) @classmethod def _is_test_class(cls, obj): return isinstance(obj, cls._test_class) def fake_get_objects(self, obj_cls, context, **kwargs): return self.model_map[obj_cls.db_model] def fake_get_values(self, obj_cls, context, field, **kwargs): return [model.get(field) for model in self.model_map[obj_cls.db_model]] def _get_object_synthetic_fields(self, objclass): return [field for field in objclass.synthetic_fields if objclass.is_object_field(field)] def _get_ovo_object_class(self, objclass, field): try: name = objclass.fields[field].objname return base.NeutronObjectRegistry.obj_classes().get(name)[0] except TypeError: # NOTE(korzen) some synthetic fields are not handled by # this method, for example the ones that have subclasses, see # QosRule return class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase): def setUp(self): super(BaseObjectIfaceTestCase, self).setUp() self.model_map = collections.defaultdict(list) self.model_map[self._test_class.db_model] = self.db_objs self.pager_map = collections.defaultdict(lambda: None) self.extra_fields_not_in_dict = [] self.get_objects_mock = mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects).start() self.get_object_mock = mock.patch.object( obj_db_api, 'get_object', side_effect=self.fake_get_object).start() # NOTE(ihrachys): for matters of basic object behaviour validation, # mock out rbac code accessing database. There are separate tests that # cover RBAC, per object type. if self._test_class.rbac_db_cls is not None: if getattr(self._test_class.rbac_db_cls, 'db_model', None): mock.patch.object( rbac_db.RbacNeutronDbObjectMixin, 'is_shared_with_tenant', return_value=False).start() mock.patch.object( rbac_db.RbacNeutronDbObjectMixin, 'get_shared_with_tenant', return_value=False).start() def fake_get_object(self, context, model, **kwargs): objs = self.model_map[model] if not objs: return None return [obj for obj in objs if obj['id'] == kwargs['id']][0] def fake_get_objects(self, obj_cls, context, **kwargs): return self.model_map[obj_cls.db_model] # TODO(ihrachys) document the intent of all common test cases in docstrings def test_get_object(self, context=None): if context is None: context = self.context with mock.patch.object( obj_db_api, 'get_object', return_value=self.db_objs[0]) as get_object_mock: with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj_keys = self.generate_object_keys(self._test_class) obj = self._test_class.get_object(self.context, **obj_keys) self.assertTrue(self._is_test_class(obj)) self._check_equal(self.objs[0], obj) get_object_mock.assert_called_once_with( self._test_class, context, **self._test_class.modify_fields_to_db(obj_keys)) def test_get_object_missing_object(self): with mock.patch.object(obj_db_api, 'get_object', return_value=None): obj_keys = self.generate_object_keys(self._test_class) obj = self._test_class.get_object(self.context, **obj_keys) self.assertIsNone(obj) def test_get_object_missing_primary_key(self): non_unique_fields = (set(self._test_class.fields.keys()) - set(self._test_class.primary_keys) - set(itertools.chain.from_iterable( self._test_class.unique_keys))) obj_keys = self.generate_object_keys(self._test_class, non_unique_fields) exception = self.assertRaises(o_exc.NeutronPrimaryKeyMissing, self._test_class.get_object, self.context, **obj_keys) self.assertIn(self._test_class.__name__, str(exception)) def test_get_object_unique_key(self): if not self._test_class.unique_keys: self.skipTest('No unique keys found in test class %r' % self._test_class) for unique_keys in self._test_class.unique_keys: with mock.patch.object(obj_db_api, 'get_object', return_value=self.db_objs[0]) \ as get_object_mock: with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj_keys = self.generate_object_keys(self._test_class, unique_keys) obj = self._test_class.get_object(self.context, **obj_keys) self.assertTrue(self._is_test_class(obj)) self._check_equal(self.objs[0], obj) get_object_mock.assert_called_once_with( self._test_class, mock.ANY, **self._test_class.modify_fields_to_db(obj_keys)) def _get_synthetic_fields_get_objects_calls(self, db_objs): mock_calls = [] for db_obj in db_objs: for field in self._test_class.synthetic_fields: if self._test_class.is_object_field(field): obj_class = self._get_ovo_object_class(self._test_class, field) filter_kwargs = { obj_class.fields_need_translation.get(k, k): db_obj[v] for k, v in obj_class.foreign_keys.get( self._test_class.__name__).items() } mock_calls.append( mock.call( obj_class, self.context, _pager=self.pager_map[obj_class.obj_name()], **filter_kwargs)) return mock_calls def test_get_objects(self, context=None): if context is None: context = self.context '''Test that get_objects fetches data from database.''' with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects) as get_objects_mock: objs = self._test_class.get_objects(self.context) self.assertItemsEqual( [get_obj_persistent_fields(obj) for obj in self.objs], [get_obj_persistent_fields(obj) for obj in objs]) get_objects_mock.assert_any_call( self._test_class, context, _pager=self.pager_map[self._test_class.obj_name()] ) def test_get_objects_valid_fields(self): '''Test that a valid filter does not raise an error.''' with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects): self._test_class.get_objects(self.context, **self.valid_field_filter) def test_get_objects_mixed_fields(self): synthetic_fields = ( set(self._test_class.synthetic_fields) - self._test_class.extra_filter_names ) if not synthetic_fields: self.skipTest('No synthetic fields that are not extra filters ' 'found in test class %r' % self._test_class) filters = copy.copy(self.valid_field_filter) filters[synthetic_fields.pop()] = 'xxx' with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs): self.assertRaises(n_exc.InvalidInput, self._test_class.get_objects, self.context, **filters) def test_get_objects_synthetic_fields_not_extra_filters(self): synthetic_fields = ( set(self._test_class.synthetic_fields) - self._test_class.extra_filter_names ) if not synthetic_fields: self.skipTest('No synthetic fields that are not extra filters ' 'found in test class %r' % self._test_class) with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): self.assertRaises(n_exc.InvalidInput, self._test_class.get_objects, self.context, **{synthetic_fields.pop(): 'xxx'}) def test_get_objects_invalid_fields(self): with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): self.assertRaises(n_exc.InvalidInput, self._test_class.get_objects, self.context, fake_field='xxx') def test_get_objects_without_validate_filters(self): with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects): objs = self._test_class.get_objects(self.context, validate_filters=False, unknown_filter='value') self.assertItemsEqual( [get_obj_persistent_fields(obj) for obj in self.objs], [get_obj_persistent_fields(obj) for obj in objs]) def test_get_values(self): field = self.valid_field db_field = self._test_class.fields_need_translation.get(field, field) with mock.patch.object( obj_db_api, 'get_values', side_effect=self.fake_get_values) as get_values_mock: values = self._test_class.get_values(self.context, field) self.assertItemsEqual( [getattr(obj, field) for obj in self.objs], values) get_values_mock.assert_any_call( self._test_class, self.context, db_field ) def test_get_values_with_validate_filters(self): field = self.valid_field with mock.patch.object( obj_db_api, 'get_values', side_effect=self.fake_get_values): self._test_class.get_values(self.context, field, **self.valid_field_filter) def test_get_values_without_validate_filters(self): field = self.valid_field with mock.patch.object( obj_db_api, 'get_values', side_effect=self.fake_get_values): values = self._test_class.get_values(self.context, field, validate_filters=False, unknown_filter='value') self.assertItemsEqual( [getattr(obj, field) for obj in self.objs], values) def test_get_values_mixed_field(self): synthetic_fields = ( set(self._test_class.synthetic_fields) - self._test_class.extra_filter_names ) if not synthetic_fields: self.skipTest('No synthetic fields that are not extra filters ' 'found in test class %r' % self._test_class) field = synthetic_fields.pop() with mock.patch.object(obj_db_api, 'get_values', side_effect=self.fake_get_values): self.assertRaises(n_exc.InvalidInput, self._test_class.get_values, self.context, field) def test_get_values_invalid_field(self): field = 'fake_field' with mock.patch.object(obj_db_api, 'get_values', side_effect=self.fake_get_values): self.assertRaises(n_exc.InvalidInput, self._test_class.get_values, self.context, field) @mock.patch.object(obj_db_api, 'update_object', return_value={}) @mock.patch.object(obj_db_api, 'update_objects', return_value=0) def test_update_objects_valid_fields(self, *mocks): '''Test that a valid filter does not raise an error.''' self._test_class.update_objects( self.context, {}, **self.valid_field_filter) def test_update_objects_invalid_fields(self): with mock.patch.object(obj_db_api, 'update_objects'): self.assertRaises(n_exc.InvalidInput, self._test_class.update_objects, self.context, {}, fake_field='xxx') @mock.patch.object(obj_db_api, 'update_objects') @mock.patch.object(obj_db_api, 'update_object', return_value={}) def test_update_objects_without_validate_filters(self, *mocks): self._test_class.update_objects( self.context, {'unknown_filter': 'new_value'}, validate_filters=False, unknown_filter='value') def _prep_string_field(self): self.filter_string_field = None # find the first string field to use as string matching filter for field in self.obj_fields[0]: if isinstance(field, obj_fields.StringField): self.filter_string_field = field break if self.filter_string_field is None: self.skipTest('There is no string field in this object') def test_get_objects_with_string_matching_filters_contains(self): self._prep_string_field() filter_dict_contains = { self.filter_string_field: obj_utils.StringContains( "random_thing")} with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects): res = self._test_class.get_objects(self.context, **filter_dict_contains) self.assertEqual([], res) def test_get_objects_with_string_matching_filters_starts(self): self._prep_string_field() filter_dict_starts = { self.filter_string_field: obj_utils.StringStarts( "random_thing") } with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects): res = self._test_class.get_objects(self.context, **filter_dict_starts) self.assertEqual([], res) def test_get_objects_with_string_matching_filters_ends(self): self._prep_string_field() filter_dict_ends = { self.filter_string_field: obj_utils.StringEnds( "random_thing") } with mock.patch.object( obj_db_api, 'get_objects', side_effect=self.fake_get_objects): res = self._test_class.get_objects(self.context, **filter_dict_ends) self.assertEqual([], res) def test_delete_objects(self): '''Test that delete_objects calls to underlying db_api.''' with mock.patch.object( obj_db_api, 'delete_objects', return_value=0 ) as delete_objects_mock: self.assertEqual(0, self._test_class.delete_objects(self.context)) delete_objects_mock.assert_any_call( self._test_class, self.context) def test_delete_objects_valid_fields(self): '''Test that a valid filter does not raise an error.''' with mock.patch.object(obj_db_api, 'delete_objects', return_value=0): self._test_class.delete_objects(self.context, **self.valid_field_filter) def test_delete_objects_invalid_fields(self): with mock.patch.object(obj_db_api, 'delete_objects'): self.assertRaises(n_exc.InvalidInput, self._test_class.delete_objects, self.context, fake_field='xxx') def test_delete_objects_without_validate_filters(self): with mock.patch.object( obj_db_api, 'delete_objects'): self._test_class.delete_objects(self.context, validate_filters=False, unknown_filter='value') def test_count(self): if not isinstance(self._test_class, base.NeutronDbObject): self.skipTest('Class %s does not inherit from NeutronDbObject' % self._test_class) expected = 10 with mock.patch.object(obj_db_api, 'count', return_value=expected): self.assertEqual(expected, self._test_class.count(self.context)) def test_count_invalid_fields(self): self.assertRaises(n_exc.InvalidInput, self._test_class.count, self.context, fake_field='xxx') def _check_equal(self, expected, observed): self.assertItemsEqual(get_obj_persistent_fields(expected), get_obj_persistent_fields(observed)) def test_count_validate_filters_false(self): if not isinstance(self._test_class, base.NeutronDbObject): self.skipTest('Class %s does not inherit from NeutronDbObject' % self._test_class) expected = 10 with mock.patch.object(obj_db_api, 'count', return_value=expected): self.assertEqual(expected, self._test_class.count(self.context, validate_filters=False, fake_field='xxx')) # Adding delete_objects mock because some objects are using delete_objects # while calling create(), Port for example @mock.patch.object(obj_db_api, 'delete_objects') def test_create(self, *mocks): with mock.patch.object(obj_db_api, 'create_object', return_value=self.db_objs[0]) as create_mock: with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj = self._test_class(self.context, **self.obj_fields[0]) self._check_equal(self.objs[0], obj) obj.create() self._check_equal(self.objs[0], obj) create_mock.assert_called_once_with( obj, self.context, self._test_class.modify_fields_to_db( get_obj_persistent_fields(self.objs[0]))) # Adding delete_objects mock because some objects are using delete_objects # while calling create(), Port for example @mock.patch.object(obj_db_api, 'delete_objects') def test_create_updates_from_db_object(self, *mocks): with mock.patch.object(obj_db_api, 'create_object', return_value=self.db_objs[0]): with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): self.objs[1].create() self._check_equal(self.objs[0], self.objs[1]) # Adding delete_objects mock because some objects are using delete_objects # while calling create(), Port for example @mock.patch.object(obj_db_api, 'delete_objects') def test_create_duplicates(self, delete_object): with mock.patch.object(obj_db_api, 'create_object', side_effect=obj_exc.DBDuplicateEntry): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(o_exc.NeutronDbObjectDuplicateEntry, obj.create) def test_update_fields(self): if not self._test_class.primary_keys: self.skipTest( 'Test class %r has no primary keys' % self._test_class) with mock.patch.object(obj_base.VersionedObject, 'obj_reset_changes'): expected = self._test_class(self.context, **self.obj_fields[0]) for key, val in self.obj_fields[1].items(): if key not in expected.fields_no_update: setattr(expected, key, val) observed = self._test_class(self.context, **self.obj_fields[0]) observed.update_fields(self.obj_fields[1], reset_changes=True) self.assertEqual(expected, observed) self.assertTrue(observed.obj_reset_changes.called) with mock.patch.object(obj_base.VersionedObject, 'obj_reset_changes'): obj = self._test_class(self.context, **self.obj_fields[0]) obj.update_fields(self.obj_fields[1]) self.assertFalse(obj.obj_reset_changes.called) def test_extra_fields(self): if not len(self._test_class.obj_extra_fields): self.skipTest( 'Test class %r has no obj_extra_fields' % self._test_class) obj = self._test_class(self.context, **self.obj_fields[0]) for field in self._test_class.obj_extra_fields: # field is accessible and cannot be set by any value getattr(obj, field) if field in self.extra_fields_not_in_dict: self.assertNotIn(field, obj.to_dict().keys()) else: self.assertIn(field, obj.to_dict().keys()) self.assertRaises(AttributeError, setattr, obj, field, "1") def test_to_dict_makes_primitive_field_value(self): obj = self._test_class(self.context, **self.obj_fields[0]) dict_ = obj.to_dict() for k, v in dict_.items(): if k not in obj.fields: continue field = obj.fields[k] self.assertEqual(v, field.to_primitive(obj, k, getattr(obj, k))) def test_to_dict_with_unset_project_id(self): if 'project_id' not in self._test_class.fields: self.skipTest( 'Test class %r has no project_id in fields' % self._test_class) obj_data = copy.copy(self.obj_fields[0]) obj_data.pop('project_id') obj = self._test_class(self.context, **obj_data) dict_ = obj.to_dict() self.assertNotIn('project_id', dict_) self.assertNotIn('tenant_id', dict_) def test_fields_no_update(self): obj = self._test_class(self.context, **self.obj_fields[0]) for field in self._test_class.fields_no_update: self.assertTrue(hasattr(obj, field)) def test_get_tenant_id(self): if not hasattr(self._test_class, 'project_id'): self.skipTest( 'Test class %r has no project_id field' % self._test_class) obj = self._test_class(self.context, **self.obj_fields[0]) project_id = self.obj_fields[0]['project_id'] self.assertEqual(project_id, obj.tenant_id) # Adding delete_objects mock because some objects are using delete_objects # while calling update(), Port for example @mock.patch.object(obj_db_api, 'delete_objects') @mock.patch.object(obj_db_api, 'update_object') def test_update_changes(self, update_mock, del_mock): fields_to_update = self.get_updatable_fields( self._test_class.modify_fields_from_db(self.db_objs[0])) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) with mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value=fields_to_update): with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj = self._test_class(self.context, **self.obj_fields[0]) # get new values and fix keys update_mock.return_value = self.db_objs[1] fixed_keys = self._test_class.modify_fields_to_db( obj._get_composite_keys()) for key, value in fixed_keys.items(): update_mock.return_value[key] = value obj.update() update_mock.assert_called_once_with( obj, self.context, self._test_class.modify_fields_to_db(fields_to_update), **fixed_keys) @mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value={'a': 'a', 'b': 'b', 'c': 'c'}) def test_update_changes_forbidden(self, *mocks): with mock.patch.object( self._test_class, 'fields_no_update', new_callable=mock.PropertyMock(return_value=['a', 'c']), create=True): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(o_exc.NeutronObjectUpdateForbidden, obj.update) # Adding delete_objects mock because some objects are using delete_objects # while calling update(), Port and Network for example @mock.patch.object(obj_db_api, 'delete_objects') def test_update_updates_from_db_object(self, *mocks): with mock.patch.object(obj_db_api, 'update_object', return_value=self.db_objs[0]): with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj = self._test_class(self.context, **self.obj_fields[1]) fields_to_update = self.get_updatable_fields( self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test ' 'class %r' % self._test_class) with mock.patch.object(base.NeutronDbObject, '_get_changed_persistent_fields', return_value=fields_to_update): with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): obj.update() self._check_equal(self.objs[0], obj) @mock.patch.object(obj_db_api, 'delete_object') def test_delete(self, delete_mock): obj = self._test_class(self.context, **self.obj_fields[0]) self._check_equal(self.objs[0], obj) obj.delete() self._check_equal(self.objs[0], obj) delete_mock.assert_called_once_with( obj, self.context, **self._test_class.modify_fields_to_db(obj._get_composite_keys())) @mock.patch(OBJECTS_BASE_OBJ_FROM_PRIMITIVE) def test_clean_obj_from_primitive(self, get_prim_m): expected_obj = get_prim_m.return_value observed_obj = self._test_class.clean_obj_from_primitive('foo', 'bar') self.assertIs(expected_obj, observed_obj) self.assertTrue(observed_obj.obj_reset_changes.called) def test_update_primary_key_forbidden_fail(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.obj_reset_changes() if not self._test_class.primary_keys: self.skipTest( 'All non-updatable fields found in test class %r ' 'are primary keys' % self._test_class) for key, val in self.obj_fields[0].items(): if key in self._test_class.primary_keys: setattr(obj, key, val) self.assertRaises(o_exc.NeutronObjectUpdateForbidden, obj.update) def test_to_dict_synthetic_fields(self): cls_ = self._test_class object_fields = self._get_object_synthetic_fields(cls_) if not object_fields: self.skipTest( 'No object fields found in test class %r' % cls_) for field in object_fields: obj = cls_(self.context, **self.obj_fields[0]) objclass = self._get_ovo_object_class(cls_, field) if not objclass: continue child = objclass( self.context, **objclass.modify_fields_from_db( self.get_random_db_fields(obj_cls=objclass)) ) child_dict = child.to_dict() if isinstance(cls_.fields[field], obj_fields.ListOfObjectsField): setattr(obj, field, [child]) dict_ = obj.to_dict() self.assertEqual([child_dict], dict_[field]) else: setattr(obj, field, child) dict_ = obj.to_dict() self.assertEqual(child_dict, dict_[field]) def test_get_objects_pager_is_passed_through(self): with mock.patch.object(obj_db_api, 'get_objects') as get_objects: pager = base.Pager() self._test_class.get_objects(self.context, _pager=pager) get_objects.assert_called_once_with( self._test_class, mock.ANY, _pager=pager) class BaseDbObjectNonStandardPrimaryKeyTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectNonStandardPrimaryKey class BaseDbObjectNewEngineFacade(BaseObjectIfaceTestCase): _test_class = FakeSmallNeutronObjectNewEngineFacade class BaseDbObjectCompositePrimaryKeyTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectCompositePrimaryKey class BaseDbObjectUniqueKeysTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectUniqueKey class UniqueKeysTestCase(test_base.BaseTestCase): def test_class_creation(self): m_get_unique_keys = mock.patch.object(db_utils, 'get_unique_keys') with m_get_unique_keys as get_unique_keys: get_unique_keys.return_value = [['field1'], ['field2', 'db_field3']] @base.NeutronObjectRegistry.register_if(False) class UniqueKeysTestObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel primary_keys = ['id'] fields = { 'id': common_types.UUIDField(), 'field1': common_types.UUIDField(), 'field2': common_types.UUIDField(), 'field3': common_types.UUIDField(), } fields_need_translation = {'field3': 'db_field3'} expected = {('field1',), ('field2', 'field3')} observed = {tuple(sorted(key)) for key in UniqueKeysTestObject.unique_keys} self.assertEqual(expected, observed) class NeutronObjectCountTestCase(test_base.BaseTestCase): def test_count(self): expected = 10 self.assertEqual( expected, FakeNeutronObject.count(None, count=expected)) class BaseDbObjectCompositePrimaryKeyWithIdTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectCompositePrimaryKeyWithId class BaseDbObjectRenamedFieldTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectRenamedField class BaseObjectIfaceWithProjectIdTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectWithProjectId def test_update_fields_using_tenant_id(self): obj = self._test_class(self.context, **self.obj_fields[0]) obj.obj_reset_changes() tenant_id = obj['tenant_id'] new_obj_fields = dict() new_obj_fields['tenant_id'] = uuidutils.generate_uuid() new_obj_fields['field2'] = uuidutils.generate_uuid() obj.update_fields(new_obj_fields) self.assertEqual(set(['field2']), obj.obj_what_changed()) self.assertEqual(tenant_id, obj.project_id) def test_tenant_id_filter_added_when_project_id_present(self): self._test_class.get_objects( self.context, tenant_id=self.obj_fields[0]['project_id']) class BaseDbObjectMultipleForeignKeysTestCase(_BaseObjectTestCase, test_base.BaseTestCase): _test_class = FakeNeutronObjectSyntheticField def test_load_synthetic_db_fields_with_multiple_foreign_keys(self): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(o_exc.NeutronSyntheticFieldMultipleForeignKeys, obj.load_synthetic_db_fields) class BaseDbObjectForeignKeysNotFoundTestCase(_BaseObjectTestCase, test_base.BaseTestCase): _test_class = FakeNeutronObjectSyntheticField2 def test_load_foreign_keys_not_belong_class(self): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(o_exc.NeutronSyntheticFieldsForeignKeysNotFound, obj.load_synthetic_db_fields) class BaseDbObjectMultipleParentsForForeignKeysTestCase( _BaseObjectTestCase, test_base.BaseTestCase): _test_class = FakeParent def test_load_synthetic_db_fields_with_multiple_parents(self): child_cls = FakeSmallNeutronObjectWithMultipleParents self.obj_registry.register(child_cls) self.obj_registry.register(FakeParent) obj = self._test_class(self.context, **self.obj_fields[0]) fake_children = [ child_cls( self.context, **child_cls.modify_fields_from_db( self.get_random_db_fields(obj_cls=child_cls)) ) for _ in range(5) ] with mock.patch.object(child_cls, 'get_objects', return_value=fake_children) as get_objects: obj.load_synthetic_db_fields() get_objects.assert_called_once_with(self.context, field1=obj.id) self.assertEqual(fake_children, obj.children) class BaseObjectIfaceDictMiscValuesTestCase(_BaseObjectTestCase, test_base.BaseTestCase): _test_class = FakeNeutronObjectDictOfMiscValues def test_dict_of_misc_values(self): obj_id = uuidutils.generate_uuid() float_value = 1.23 misc_list = [True, float_value] obj_dict = { 'bool': True, 'float': float_value, 'misc_list': misc_list } obj = self._test_class(self.context, id=obj_id, dict_field=obj_dict) self.assertTrue(obj.dict_field['bool']) self.assertEqual(float_value, obj.dict_field['float']) self.assertEqual(misc_list, obj.dict_field['misc_list']) class BaseObjectIfaceListDictMiscValuesTestCase(_BaseObjectTestCase, test_base.BaseTestCase): _test_class = FakeNeutronObjectListOfDictOfMiscValues def test_list_of_dict_of_misc_values(self): obj_id = uuidutils.generate_uuid() float_value = 1.23 misc_list = [True, float_value] obj_dict = { 'bool': True, 'float': float_value, 'misc_list': misc_list } obj = self._test_class( self.context, id=obj_id, list_of_dicts_field=[obj_dict]) self.assertEqual(1, len(obj.list_of_dicts_field)) self.assertTrue(obj.list_of_dicts_field[0]['bool']) self.assertEqual(float_value, obj.list_of_dicts_field[0]['float']) self.assertEqual(misc_list, obj.list_of_dicts_field[0]['misc_list']) class BaseDbObjectTestCase(_BaseObjectTestCase, test_db_base_plugin_v2.DbOperationBoundMixin): def setUp(self): super(BaseDbObjectTestCase, self).setUp() synthetic_fields = self._get_object_synthetic_fields(self._test_class) for synth_field in synthetic_fields: objclass = self._get_ovo_object_class(self._test_class, synth_field) if not objclass: continue for db_obj in self.db_objs: objclass_fields = self.get_random_db_fields(objclass) if isinstance(self._test_class.fields[synth_field], obj_fields.ObjectField): db_obj[synth_field] = objclass.db_model(**objclass_fields) else: db_obj[synth_field] = [ objclass.db_model(**objclass_fields) ] def _create_test_network(self, name='test-network1', network_id=None, qos_policy_id=None): network_id = (uuidutils.generate_uuid() if network_id is None else network_id) _network = net_obj.Network(self.context, name=name, id=network_id, qos_policy_id=qos_policy_id) _network.create() return _network def _create_test_network_id(self): return self._create_test_network( "test-network-%s" % helpers.get_random_string(4)).id def _create_external_network_id(self): test_network_id = self._create_test_network_id() ext_net = net_obj.ExternalNetwork(self.context, network_id=test_network_id) ext_net.create() return ext_net.network_id def _create_test_fip_id(self, fip_id=None): fake_fip = '172.23.3.0' ext_net_id = self._create_external_network_id() values = { 'floating_ip_address': netaddr.IPAddress(fake_fip), 'floating_network_id': ext_net_id, 'floating_port_id': self._create_test_port_id( network_id=ext_net_id) } if fip_id: values['id'] = fip_id fip_obj = router.FloatingIP(self.context, **values) fip_obj.create() return fip_obj.id def _create_test_subnet_id(self, network_id=None): if not network_id: network_id = self._create_test_network_id() test_subnet = { 'project_id': uuidutils.generate_uuid(), 'name': 'test-subnet1', 'network_id': network_id, 'ip_version': constants.IP_VERSION_4, 'cidr': netaddr.IPNetwork('10.0.0.0/24'), 'gateway_ip': '10.0.0.1', 'enable_dhcp': 1, 'ipv6_ra_mode': None, 'ipv6_address_mode': None } subnet_obj = subnet.Subnet(self.context, **test_subnet) subnet_obj.create() return subnet_obj.id def _create_test_port_id(self, **port_attrs): return self._create_test_port(**port_attrs)['id'] def _create_test_port(self, **port_attrs): if 'network_id' not in port_attrs: port_attrs['network_id'] = self._create_test_network_id() if not hasattr(self, '_mac_address_generator'): self._mac_address_generator = ( netaddr.EUI(":".join(["%02x" % i] * 6)) for i in itertools.count() ) if not hasattr(self, '_port_name_generator'): self._port_name_generator = ("test-port%d" % i for i in itertools.count(1)) attrs = {'project_id': uuidutils.generate_uuid(), 'admin_state_up': True, 'status': 'ACTIVE', 'device_id': 'fake_device', 'device_owner': 'fake_owner'} attrs.update(port_attrs) if 'name' not in attrs: attrs['name'] = next(self._port_name_generator) if 'mac_address' not in attrs: attrs['mac_address'] = next(self._mac_address_generator) port = ports.Port(self.context, **attrs) port.create() return port def _create_test_segment_id(self, network_id=None): attr = self.get_random_object_fields(net_obj.NetworkSegment) attr['network_id'] = network_id or self._create_test_network_id() segment = net_obj.NetworkSegment(self.context, **attr) segment.create() return segment.id def _create_test_router_id(self, router_id=None): attrs = { 'name': 'test_router', } if router_id: attrs['id'] = router_id self._router = router.Router(self.context, **attrs) self._router.create() return self._router['id'] def _create_test_security_group_id(self, fields=None): sg_fields = self.get_random_object_fields(securitygroup.SecurityGroup) fields = fields or {} for field, value in ((f, v) for (f, v) in fields.items() if f in sg_fields): sg_fields[field] = value _securitygroup = securitygroup.SecurityGroup( self.context, **sg_fields) _securitygroup.create() return _securitygroup.id def _create_test_agent_id(self): attrs = self.get_random_object_fields(obj_cls=agent.Agent) _agent = agent.Agent(self.context, **attrs) _agent.create() return _agent['id'] def _create_test_standard_attribute_id(self): attrs = { 'resource_type': helpers.get_random_string(4), 'revision_number': tools.get_random_integer() } return obj_db_api.create_object( stdattrs.StandardAttribute, self.context, attrs, populate_id=False)['id'] def _create_test_flavor_id(self): attrs = self.get_random_object_fields(obj_cls=flavor.Flavor) flavor_obj = flavor.Flavor(self.context, **attrs) flavor_obj.create() return flavor_obj.id def _create_test_service_profile_id(self): attrs = self.get_random_object_fields(obj_cls=flavor.ServiceProfile) service_profile_obj = flavor.ServiceProfile(self.context, **attrs) service_profile_obj.create() return service_profile_obj.id def _create_test_qos_policy(self, **qos_policy_attrs): _qos_policy = qos_policy.QosPolicy(self.context, **qos_policy_attrs) _qos_policy.create() return _qos_policy def test_get_standard_attr_id(self): if not self._test_class.has_standard_attributes(): self.skipTest( 'No standard attributes found in test class %r' % self._test_class) obj = self._make_object(self.obj_fields[0]) obj.create() model = self.context.session.query(obj.db_model).filter_by( **obj._get_composite_keys()).one() retrieved_obj = self._test_class.get_object( self.context, **obj._get_composite_keys()) self.assertIsNotNone(retrieved_obj.standard_attr_id) self.assertEqual( model.standard_attr_id, retrieved_obj.standard_attr_id) def _make_object(self, fields): fields = get_non_synthetic_fields(self._test_class, fields) return self._test_class(self.context, **remove_timestamps_from_fields( fields, self._test_class.fields)) def test_downgrade_to_1_0(self): for obj in self.objs: try: obj.obj_to_primitive(target_version='1.0') except exception.IncompatibleObjectVersion: # the only exception we should allow is IncompatibleVersion pass def test_get_object_create_update_delete(self): # Timestamps can't be initialized and multiple objects may use standard # attributes so we need to remove timestamps when creating objects obj = self._make_object(self.obj_fields[0]) obj.create() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(obj, new) obj = new for key, val in self.get_updatable_fields(self.obj_fields[1]).items(): setattr(obj, key, val) obj.update() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(obj, new) obj = new new.delete() new = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertIsNone(new) def test_update_non_existent_object_raises_not_found(self): obj = self._make_object(self.obj_fields[0]) obj.obj_reset_changes() fields_to_update = self.get_updatable_fields(self.obj_fields[0]) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) for key, val in fields_to_update.items(): setattr(obj, key, val) self.assertRaises(n_exc.ObjectNotFound, obj.update) def test_delete_non_existent_object_raises_not_found(self): obj = self._make_object(self.obj_fields[0]) self.assertRaises(n_exc.ObjectNotFound, obj.delete) @mock.patch(SQLALCHEMY_COMMIT) def test_create_single_transaction(self, mock_commit): obj = self._make_object(self.obj_fields[0]) obj.create() self.assertEqual(1, mock_commit.call_count) def test_update_single_transaction(self): obj = self._make_object(self.obj_fields[0]) obj.create() fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test class %r' % self._test_class) for key, val in fields_to_update.items(): setattr(obj, key, val) with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: obj.update() self.assertEqual(1, mock_commit.call_count) def test_delete_single_transaction(self): obj = self._make_object(self.obj_fields[0]) obj.create() with mock.patch(SQLALCHEMY_COMMIT) as mock_commit: obj.delete() self.assertEqual(1, mock_commit.call_count) def _get_ro_txn_exit_func_name(self): # with no engine facade, we didn't have distinction between r/o and # r/w transactions and so we always call commit even for getters when # no facade is used return ( SQLALCHEMY_CLOSE if self._test_class._use_db_facade else SQLALCHEMY_COMMIT) def test_get_objects_single_transaction(self): with mock.patch(self._get_ro_txn_exit_func_name()) as mock_exit: with db_api.autonested_transaction(self.context.session): self._test_class.get_objects(self.context) self.assertEqual(1, mock_exit.call_count) def test_get_objects_single_transaction_enginefacade(self): with mock.patch(self._get_ro_txn_exit_func_name()) as mock_exit: with db_api.CONTEXT_READER.using(self.context): self._test_class.get_objects(self.context) self.assertEqual(1, mock_exit.call_count) def test_get_object_single_transaction(self): obj = self._make_object(self.obj_fields[0]) obj.create() with mock.patch(self._get_ro_txn_exit_func_name()) as mock_exit: with db_api.autonested_transaction(self.context.session): obj = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(1, mock_exit.call_count) def test_get_object_single_transaction_enginefacade(self): obj = self._make_object(self.obj_fields[0]) obj.create() with mock.patch(self._get_ro_txn_exit_func_name()) as mock_exit: with db_api.CONTEXT_READER.using(self.context): obj = self._test_class.get_object(self.context, **obj._get_composite_keys()) self.assertEqual(1, mock_exit.call_count) def test_get_objects_supports_extra_filtername(self): self.filtered_args = None def foo_filter(query, filters): self.filtered_args = filters return query self.obj_registry.register(self._test_class) model_query.register_hook( self._test_class.db_model, 'foo_filter', query_hook=None, filter_hook=None, result_filters=foo_filter) base.register_filter_hook_on_model(self._test_class.db_model, 'foo') self._test_class.get_objects(self.context, foo=42) self.assertEqual({'foo': [42]}, self.filtered_args) def test_filtering_by_fields(self): obj = self._make_object(self.obj_fields[0]) obj.create() for field in get_obj_persistent_fields(obj): if not isinstance(obj[field], list): filters = {field: [obj[field]]} else: filters = {field: obj[field]} new = self._test_class.get_objects(self.context, **filters) self.assertItemsEqual( [obj._get_composite_keys()], [obj_._get_composite_keys() for obj_ in new], 'Filtering by %s failed.' % field) def _get_non_synth_fields(self, objclass, db_attrs): fields = objclass.modify_fields_from_db(db_attrs) fields = remove_timestamps_from_fields(fields, objclass.fields) fields = get_non_synthetic_fields(objclass, fields) return fields def _create_object_with_synthetic_fields(self, db_obj): cls_ = self._test_class object_fields = self._get_object_synthetic_fields(cls_) # create base object obj = cls_(self.context, **self._get_non_synth_fields(cls_, db_obj)) obj.create() # create objects that are going to be loaded into the base object # through synthetic fields for field in object_fields: objclass = self._get_ovo_object_class(cls_, field) if not objclass: continue # check that the stored database model does not have non-empty # relationships dbattr = obj.fields_need_translation.get(field, field) # Skipping empty relationships for the following reasons: # 1) db_obj have the related object loaded - In this case we do not # have to create the related objects and the loop can continue. # 2) when the related objects are not loaded - In this # case they need to be created because of the foreign key # relationships. But we still need to check whether the # relationships are loaded or not. That is achieved by the # assertTrue statement after retrieving the dbattr in # this method. if getattr(obj.db_obj, dbattr, None): continue if isinstance(cls_.fields[field], obj_fields.ObjectField): objclass_fields = self._get_non_synth_fields(objclass, db_obj[field]) else: objclass_fields = self._get_non_synth_fields(objclass, db_obj[field][0]) # make sure children point to the base object foreign_keys = objclass.foreign_keys.get(obj.__class__.__name__) for local_field, foreign_key in foreign_keys.items(): objclass_fields[local_field] = obj.get(foreign_key) # remember which fields were nullified so that later we know what # to assert for each child field nullified_fields = set() # cut off more depth levels to simplify object field generation # (for example, nullify segment field for PortBindingLevel objects # to avoid creating a Segment object (and back-linking it to the # original network of the port) for child_field in self._get_object_synthetic_fields(objclass): if objclass.fields[child_field].nullable: objclass_fields[child_field] = None nullified_fields.add(child_field) # initialize the child object synth_field_obj = objclass(self.context, **objclass_fields) # nullify nullable UUID fields since they may otherwise trigger # foreign key violations for field_name in get_obj_persistent_fields(synth_field_obj): child_field = objclass.fields[field_name] if child_field.nullable: if isinstance(child_field, common_types.UUIDField): synth_field_obj[field_name] = None nullified_fields.add(field_name) synth_field_obj.create() # reload the parent object under test obj = cls_.get_object(self.context, **obj._get_composite_keys()) # check that the stored database model now has correct attr values dbattr = obj.fields_need_translation.get(field, field) if field in nullified_fields: self.assertIsNone(getattr(obj.db_obj, dbattr, None)) else: self.assertIsNotNone(getattr(obj.db_obj, dbattr, None)) # reset the object so that we can compare it to other clean objects obj.obj_reset_changes([field]) return obj def _test_get_with_synthetic_fields(self, getter): object_fields = self._get_object_synthetic_fields(self._test_class) if not object_fields: self.skipTest( 'No synthetic object fields found ' 'in test class %r' % self._test_class ) obj = self._create_object_with_synthetic_fields(self.db_objs[0]) listed_obj = getter(self.context, **obj._get_composite_keys()) self.assertTrue(listed_obj) self.assertEqual(obj, listed_obj) def test_get_object_with_synthetic_fields(self): self._test_get_with_synthetic_fields(self._test_class.get_object) def test_get_objects_with_synthetic_fields(self): def getter(*args, **kwargs): objs = self._test_class.get_objects(*args, **kwargs) self.assertEqual(1, len(objs)) return objs[0] self._test_get_with_synthetic_fields(getter) # NOTE(korzen) _list method is used in neutron.tests.db.unit.db. # test_db_base_plugin_v2.DbOperationBoundMixin in _list_and_count_queries() # This is used in test_subnet for asserting that number of queries is # constant. It can be used also for port and network objects when ready. def _list(self, resource, neutron_context): cls_ = resource return cls_.get_objects(neutron_context) @test_base.unstable_test("bug 1775220") def test_get_objects_queries_constant(self): iter_db_obj = iter(self.db_objs) def _create(): return self._create_object_with_synthetic_fields(next(iter_db_obj)) self._assert_object_list_queries_constant(_create, self._test_class) def test_count(self): for fields in self.obj_fields: self._make_object(fields).create() self.assertEqual( len(self.obj_fields), self._test_class.count(self.context)) def test_count_validate_filters_false(self): for fields in self.obj_fields: self._make_object(fields).create() self.assertEqual( len(self.obj_fields), self._test_class.count(self.context, validate_filters=False, fake_filter='xxx')) def test_count_invalid_filters(self): for fields in self.obj_fields: self._make_object(fields).create() self.assertRaises(n_exc.InvalidInput, self._test_class.count, self.context, fake_field='xxx') def test_objects_exist(self): for fields in self.obj_fields: self._make_object(fields).create() self.assertTrue(self._test_class.objects_exist(self.context)) def test_objects_exist_false(self): self.assertFalse(self._test_class.objects_exist(self.context)) def test_objects_exist_validate_filters(self): self.assertRaises(n_exc.InvalidInput, self._test_class.objects_exist, self.context, fake_field='xxx') def test_objects_exist_validate_filters_false(self): for fields in self.obj_fields: self._make_object(fields).create() self.assertTrue(self._test_class.objects_exist( self.context, validate_filters=False, fake_filter='xxx')) def test_update_object(self): fields_to_update = self.get_updatable_fields( self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test ' 'class %r' % self._test_class) for fields in self.obj_fields: self._make_object(fields).create() obj = self._test_class.get_objects( self.context, **self.valid_field_filter) for k, v in self.valid_field_filter.items(): self.assertEqual(v, obj[0][k]) new_values = self._get_random_update_fields() keys = self.objs[0]._get_composite_keys() updated_obj = self._test_class.update_object( self.context, new_values, **keys) # Check the correctness of the updated object for k, v in new_values.items(): self.assertEqual(v, updated_obj[k]) def test_update_objects(self): fields_to_update = self.get_updatable_fields( self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test ' 'class %r' % self._test_class) for fields in self.obj_fields: self._make_object(fields).create() objs = self._test_class.get_objects( self.context, **self.valid_field_filter) for k, v in self.valid_field_filter.items(): self.assertEqual(v, objs[0][k]) count = self._test_class.update_objects( self.context, {}, **self.valid_field_filter) # we haven't updated anything, but got the number of matching records self.assertEqual(len(objs), count) # and the request hasn't changed the number of matching records new_objs = self._test_class.get_objects( self.context, **self.valid_field_filter) self.assertEqual(len(objs), len(new_objs)) # now update an object with new values new_values = self._get_random_update_fields() keys = self.objs[0]._get_composite_keys() count_updated = self._test_class.update_objects( self.context, new_values, **keys) self.assertEqual(1, count_updated) new_filter = keys.copy() new_filter.update(new_values) # check that we can fetch using new values new_objs = self._test_class.get_objects( self.context, **new_filter) self.assertEqual(1, len(new_objs)) def test_update_objects_nothing_to_update(self): fields_to_update = self.get_updatable_fields( self.obj_fields[1]) if not fields_to_update: self.skipTest('No updatable fields found in test ' 'class %r' % self._test_class) self.assertEqual( 0, self._test_class.update_objects(self.context, {})) def test_delete_objects(self): for fields in self.obj_fields: self._make_object(fields).create() objs = self._test_class.get_objects( self.context, **self.valid_field_filter) for k, v in self.valid_field_filter.items(): self.assertEqual(v, objs[0][k]) count = self._test_class.delete_objects( self.context, **self.valid_field_filter) self.assertEqual(len(objs), count) new_objs = self._test_class.get_objects(self.context) self.assertEqual(len(self.obj_fields) - len(objs), len(new_objs)) for obj in new_objs: for k, v in self.valid_field_filter.items(): self.assertNotEqual(v, obj[k]) def test_delete_objects_nothing_to_delete(self): self.assertEqual( 0, self._test_class.delete_objects(self.context)) def test_db_obj(self): obj = self._make_object(self.obj_fields[0]) self.assertIsNone(obj.db_obj) obj.create() self.assertIsNotNone(obj.db_obj) fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if fields_to_update: old_fields = {} for key, val in fields_to_update.items(): db_model_attr = ( obj.fields_need_translation.get(key, key)) old_fields[db_model_attr] = obj.db_obj[db_model_attr] setattr(obj, key, val) obj.update() self.assertIsNotNone(obj.db_obj) for k, v in obj.modify_fields_to_db(fields_to_update).items(): if isinstance(obj.db_obj[k], orm.dynamic.AppenderQuery): self.assertIsInstance(v, list) else: self.assertEqual(v, obj.db_obj[k], '%s attribute differs' % k) obj.delete() self.assertIsNone(obj.db_obj) class UniqueObjectBase(test_base.BaseTestCase): def setUp(self): super(UniqueObjectBase, self).setUp() obj_registry = self.useFixture( NeutronObjectRegistryFixture()) self.db_model = FakeModel class RegisteredObject(base.NeutronDbObject): db_model = self.db_model self.registered_object = RegisteredObject obj_registry.register(self.registered_object) class GetObjectClassByModelTestCase(UniqueObjectBase): def setUp(self): super(GetObjectClassByModelTestCase, self).setUp() self.not_registered_object = FakeSmallNeutronObject def test_object_found_by_model(self): found_obj = base.get_object_class_by_model( self.registered_object.db_model) self.assertIs(self.registered_object, found_obj) def test_not_registed_object_raises_exception(self): with testtools.ExpectedException(o_exc.NeutronDbObjectNotFoundByModel): base.get_object_class_by_model(self.not_registered_object.db_model) class RegisterFilterHookOnModelTestCase(UniqueObjectBase): def test_filtername_is_added(self): filter_name = 'foo' self.assertNotIn( filter_name, self.registered_object.extra_filter_names) base.register_filter_hook_on_model( FakeNeutronDbObject.db_model, filter_name) self.assertIn(filter_name, self.registered_object.extra_filter_names) class PagerTestCase(test_base.BaseTestCase): def test_comparison(self): pager = base.Pager(sorts=[('order', True)]) pager2 = base.Pager(sorts=[('order', True)]) self.assertEqual(pager, pager2) pager3 = base.Pager() self.assertNotEqual(pager, pager3) class OperationOnStringAndJsonTestCase(test_base.BaseTestCase): def test_load_empty_string_to_json(self): for field_val in ['', None]: for default_val in [None, {}]: res = base.NeutronDbObject.load_json_from_str(field_val, default_val) self.assertEqual(res, default_val) def test_dump_field_to_string(self): for field_val in [{}, None]: for default_val in ['', None]: res = base.NeutronDbObject.filter_to_json_str(field_val, default_val) self.assertEqual(default_val, res) class NeutronObjectValidatorTestCase(test_base.BaseTestCase): def test_load_wrong_synthetic_fields(self): try: @obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectSyntheticFieldWrong(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = FakeModel fields = { 'id': common_types.UUIDField(), 'obj_field': common_types.UUIDField() } synthetic_fields = ['obj_field', 'wrong_synthetic_field_name'] except o_exc.NeutronObjectValidatorException as exc: self.assertIn('wrong_synthetic_field_name', str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_conntrack_helper.py0000644000175000017500000000236300000000000027345 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import conntrack_helper from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class ConntrackHelperObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = conntrack_helper.ConntrackHelper class ConntrackHelperDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = conntrack_helper.ConntrackHelper def setUp(self): super(ConntrackHelperDbObjectTestCase, self).setUp() self.update_obj_fields( {'router_id': lambda: self._create_test_router_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_flavor.py0000644000175000017500000000457500000000000025324 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import flavor from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class FlavorServiceProfileBindingIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = flavor.FlavorServiceProfileBinding class FlavorServiceProfileBindingDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = flavor.FlavorServiceProfileBinding def setUp(self): super(FlavorServiceProfileBindingDbObjectTestCase, self).setUp() self.update_obj_fields( {'flavor_id': lambda: self._create_test_flavor_id(), 'service_profile_id': lambda: self._create_test_service_profile_id()}) class ServiceProfileIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = flavor.ServiceProfile class ServiceProfileDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = flavor.ServiceProfile def test_get_objects_queries_constant(self): # FIXME(electrocucaracha): There are no lazy loading for flavors # relationship in ServiceProfile model db disable this UT to avoid # failing pass class FlavorIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = flavor.Flavor class FlavorDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = flavor.Flavor def test_get_objects_queries_constant(self): # FIXME(electrocucaracha): There are no lazy loading for # service_profiles relationship in Flavor model db disable this UT to # avoid failing pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_floatingip.py0000644000175000017500000000237400000000000026162 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import floatingip from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class FloatingIPDNSIfaceObjectTestcase( obj_test_base.BaseObjectIfaceTestCase): _test_class = floatingip.FloatingIPDNS class FloatingIPDNSDbObjectTestcase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = floatingip.FloatingIPDNS def setUp(self): super(FloatingIPDNSDbObjectTestcase, self).setUp() self.update_obj_fields( {'floatingip_id': lambda: self._create_test_fip_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_ipam.py0000644000175000017500000000510400000000000024746 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import ipam from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class IpamSubnetObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ipam.IpamSubnet class IpamSubnetDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ipam.IpamSubnet def setUp(self): super(IpamSubnetDbObjectTestCase, self).setUp() self.update_obj_fields( {'neutron_subnet_id': lambda: self._create_test_subnet_id()}) class IpamAllocationPoolObjectIfaceTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = ipam.IpamAllocationPool class IpamAllocationPoolDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ipam.IpamAllocationPool def setUp(self): super(IpamAllocationPoolDbObjectTestCase, self).setUp() self._create_test_ipam_subnet() self.update_obj_fields({'ipam_subnet_id': self._ipam_subnet['id']}) def _create_test_ipam_subnet(self): attrs = self.get_random_object_fields(obj_cls=ipam.IpamSubnet) self._ipam_subnet = ipam.IpamSubnet(self.context, **attrs) self._ipam_subnet.create() class IpamAllocationObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ipam.IpamAllocation class IpamAllocationDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ipam.IpamAllocation def setUp(self): super(IpamAllocationDbObjectTestCase, self).setUp() self._create_test_ipam_subnet() self.update_obj_fields({'ipam_subnet_id': self._ipam_subnet['id']}) def _create_test_ipam_subnet(self): attrs = self.get_random_object_fields(obj_cls=ipam.IpamSubnet) self._ipam_subnet = ipam.IpamSubnet(self.context, **attrs) self._ipam_subnet.create() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_l3_hamode.py0000644000175000017500000000505700000000000025662 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import l3_hamode from neutron.tests.unit.objects import test_base as base from neutron.tests.unit import testlib_api class L3HARouterAgentPortBindingIfaceObjectTestCase( base.BaseObjectIfaceTestCase): _test_class = l3_hamode.L3HARouterAgentPortBinding class L3HARouterAgentPortBindingDbObjectTestCase(base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = l3_hamode.L3HARouterAgentPortBinding def setUp(self): super(L3HARouterAgentPortBindingDbObjectTestCase, self).setUp() _network_id = self._create_test_network_id() def get_port(): return self._create_test_port_id(network_id=_network_id) self.update_obj_fields({'port_id': get_port, 'router_id': self._create_test_router_id, 'l3_agent_id': self._create_test_agent_id}) class L3HARouterNetworkIfaceObjectTestCase(base.BaseObjectIfaceTestCase): _test_class = l3_hamode.L3HARouterNetwork class L3HARouterNetworkDbObjectTestCase(base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = l3_hamode.L3HARouterNetwork def setUp(self): super(L3HARouterNetworkDbObjectTestCase, self).setUp() network = self._create_test_network() self.update_obj_fields({'network_id': network.id}) class L3HARouterVRIdAllocationIfaceObjectTestCase( base.BaseObjectIfaceTestCase): _test_class = l3_hamode.L3HARouterVRIdAllocation class L3HARouterVRIdAllocationDbObjectTestCase(base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = l3_hamode.L3HARouterVRIdAllocation def setUp(self): super(L3HARouterVRIdAllocationDbObjectTestCase, self).setUp() self.update_obj_fields( {'network_id': lambda: self._create_test_network().id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_l3agent.py0000644000175000017500000000263200000000000025360 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import l3agent from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class RouterL3AgentBindingIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = l3agent.RouterL3AgentBinding class RouterL3AgentBindingDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = l3agent.RouterL3AgentBinding def setUp(self): super(RouterL3AgentBindingDbObjTestCase, self).setUp() router_id = self._create_test_router_id() index = iter(range(1, len(self.objs) + 2)) self.update_obj_fields( {'router_id': router_id, 'binding_index': lambda: next(index), 'l3_agent_id': lambda: self._create_test_agent_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_metering.py0000644000175000017500000000344000000000000025633 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import metering from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class MeteringLabelRuleObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = metering.MeteringLabelRule class MeteringLabelRuleDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = metering.MeteringLabelRule def _create_test_metering_label_id(self, **attrs): attrs = self.get_random_object_fields(metering.MeteringLabel) metering_label = metering.MeteringLabel(self.context, **attrs) metering_label.create() return metering_label.id def setUp(self): super(MeteringLabelRuleDbObjectTestCase, self).setUp() self.update_obj_fields( {'metering_label_id': self._create_test_metering_label_id}) class MeteringLabelObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = metering.MeteringLabel class MeteringLabelDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = metering.MeteringLabel ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_network.py0000644000175000017500000002476300000000000025525 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.db import rbac_db_models from neutron.objects import base as obj_base from neutron.objects import network from neutron.objects.qos import binding from neutron.objects.qos import policy from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit.objects import test_rbac from neutron.tests.unit import testlib_api class NetworkRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.NetworkRBAC def setUp(self): self._mock_get_valid_actions = mock.patch.object( rbac_db_models.NetworkRBAC, 'get_valid_actions', return_value=(rbac_db_models.ACCESS_EXTERNAL, rbac_db_models.ACCESS_SHARED)) self.mock_get_valid_actions = self._mock_get_valid_actions.start() super(NetworkRBACDbObjectTestCase, self).setUp() for obj in self.db_objs: net_obj = network.Network(self.context, id=obj['object_id']) net_obj.create() def _create_test_network_rbac(self): self.objs[0].create() return self.objs[0] def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self): network_rbac_obj = self._create_test_network_rbac() network_rbac_obj = network_rbac_obj.obj_to_primitive('1.0') self.assertNotIn('project_id', network_rbac_obj['versioned_object.data']) self.assertNotIn('id', network_rbac_obj['versioned_object.data']) class NetworkRBACIfaceOjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseObjectIfaceTestCase): _test_class = network.NetworkRBAC def setUp(self): self._mock_get_valid_actions = mock.patch.object( rbac_db_models.NetworkRBAC, 'get_valid_actions', return_value=(rbac_db_models.ACCESS_EXTERNAL, rbac_db_models.ACCESS_SHARED)) self.mock_get_valid_actions = self._mock_get_valid_actions.start() super(NetworkRBACIfaceOjectTestCase, self).setUp() class NetworkDhcpAgentBindingObjectIfaceTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network.NetworkDhcpAgentBinding class NetworkDhcpAgentBindingDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.NetworkDhcpAgentBinding def setUp(self): super(NetworkDhcpAgentBindingDbObjectTestCase, self).setUp() self._network = self._create_test_network() index = iter(range(1, len(self.objs) + 2)) self.update_obj_fields( {'network_id': self._network.id, 'dhcp_agent_id': lambda: self._create_test_agent_id(), 'binding_index': lambda: next(index)}) class NetworkPortSecurityIfaceObjTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network.NetworkPortSecurity class NetworkPortSecurityDbObjTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.NetworkPortSecurity def setUp(self): super(NetworkPortSecurityDbObjTestCase, self).setUp() self.update_obj_fields({'id': lambda: self._create_test_network_id()}) class NetworkSegmentIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = network.NetworkSegment def setUp(self): super(NetworkSegmentIfaceObjTestCase, self).setUp() # TODO(ihrachys): we should not need to duplicate that in every single # place, instead we should move the default pager into the base class # attribute and pull it from there for testing matters. Leaving it for # a follow up. self.pager_map[self._test_class.obj_name()] = ( obj_base.Pager( sorts=[('network_id', True), ('segment_index', True)])) class NetworkSegmentDbObjTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.NetworkSegment def setUp(self): super(NetworkSegmentDbObjTestCase, self).setUp() self.update_obj_fields( {'network_id': lambda: self._create_test_network_id()}) def test_hosts(self): hosts = ['host1', 'host2'] obj = self._make_object(self.obj_fields[0]) obj.hosts = hosts obj.create() obj = network.NetworkSegment.get_object(self.context, id=obj.id) self.assertEqual(hosts, obj.hosts) obj.hosts = ['host3'] obj.update() obj = network.NetworkSegment.get_object(self.context, id=obj.id) self.assertEqual(['host3'], obj.hosts) obj.hosts = None obj.update() obj = network.NetworkSegment.get_object(self.context, id=obj.id) self.assertFalse(obj.hosts) class NetworkObjectIfaceTestCase(test_rbac.RBACBaseObjectIfaceTestCase): _test_class = network.Network def setUp(self): super(NetworkObjectIfaceTestCase, self).setUp() self.pager_map[network.NetworkSegment.obj_name()] = ( obj_base.Pager( sorts=[('network_id', True), ('segment_index', True)])) class NetworkDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.Network @mock.patch.object(policy.QosPolicy, 'unset_default') def test_qos_policy_id(self, *mocks): policy_obj = policy.QosPolicy(self.context) policy_obj.create() obj = self._make_object(self.obj_fields[0]) obj.qos_policy_id = policy_obj.id obj.create() obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual(policy_obj.id, obj.qos_policy_id) policy_obj2 = policy.QosPolicy(self.context) policy_obj2.create() obj.qos_policy_id = policy_obj2.id obj.update() obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual(policy_obj2.id, obj.qos_policy_id) obj.qos_policy_id = None obj.update() obj = network.Network.get_object(self.context, id=obj.id) self.assertIsNone(obj.qos_policy_id) @mock.patch.object(policy.QosPolicy, 'unset_default') def test__attach_qos_policy(self, *mocks): obj = self._make_object(self.obj_fields[0]) obj.create() policy_obj = policy.QosPolicy(self.context) policy_obj.create() obj._attach_qos_policy(policy_obj.id) obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual(policy_obj.id, obj.qos_policy_id) qos_binding_obj = binding.QosPolicyNetworkBinding.get_object( self.context, network_id=obj.id) self.assertEqual(qos_binding_obj.policy_id, obj.qos_policy_id) old_policy_id = policy_obj.id policy_obj2 = policy.QosPolicy(self.context) policy_obj2.create() obj._attach_qos_policy(policy_obj2.id) obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual(policy_obj2.id, obj.qos_policy_id) qos_binding_obj2 = binding.QosPolicyNetworkBinding.get_object( self.context, network_id=obj.id) self.assertEqual(qos_binding_obj2.policy_id, obj.qos_policy_id) qos_binding_obj = binding.QosPolicyNetworkBinding.get_objects( self.context, policy_id=old_policy_id) self.assertEqual(0, len(qos_binding_obj)) def test_dns_domain(self): obj = self._make_object(self.obj_fields[0]) obj.dns_domain = 'foo.com' obj.create() obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual('foo.com', obj.dns_domain) obj.dns_domain = 'bar.com' obj.update() obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual('bar.com', obj.dns_domain) obj.dns_domain = None obj.update() obj = network.Network.get_object(self.context, id=obj.id) self.assertIsNone(obj.dns_domain) def test__set_dns_domain(self): obj = self._make_object(self.obj_fields[0]) obj.create() obj._set_dns_domain('foo.com') obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual('foo.com', obj.dns_domain) obj._set_dns_domain('bar.com') obj = network.Network.get_object(self.context, id=obj.id) self.assertEqual('bar.com', obj.dns_domain) class SegmentHostMappingIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network.SegmentHostMapping class SegmentHostMappingDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.SegmentHostMapping def setUp(self): super(SegmentHostMappingDbObjectTestCase, self).setUp() self.update_obj_fields( {'segment_id': lambda: self._create_test_segment_id()}) class NetworkDNSDomainIfaceObjectTestcase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network.NetworkDNSDomain class NetworkDNSDomainDbObjectTestcase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.NetworkDNSDomain def setUp(self): super(NetworkDNSDomainDbObjectTestcase, self).setUp() self.update_obj_fields( {'network_id': lambda: self._create_test_network_id()}) class ExternalNetworkIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network.ExternalNetwork class ExternalNetworkDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network.ExternalNetwork def setUp(self): super(ExternalNetworkDbObjectTestCase, self).setUp() self.update_obj_fields( {'network_id': lambda: self._create_test_network_id()}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_network_segment_range.py0000644000175000017500000003210700000000000030412 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import random import mock from neutron_lib import constants from neutron_lib import exceptions as n_exc from neutron_lib.utils import helpers from oslo_utils import uuidutils from neutron.objects import network as net_obj from neutron.objects import network_segment_range from neutron.objects.plugins.ml2 import base as ml2_base from neutron.objects.plugins.ml2 import vlanallocation as vlan_alloc_obj from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api TEST_TENANT_ID = '46f70361-ba71-4bd0-9769-3573fd227c4b' TEST_PHYSICAL_NETWORK = 'phys_net' NUM_ALLOCATIONS = 3 class NetworkSegmentRangeIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = network_segment_range.NetworkSegmentRange def setUp(self): self._mock_get_available_allocation = mock.patch.object( network_segment_range.NetworkSegmentRange, '_get_available_allocation', return_value=[]) self.mock_get_available_allocation = ( self._mock_get_available_allocation.start()) self._mock_get_used_allocation_mapping = mock.patch.object( network_segment_range.NetworkSegmentRange, '_get_used_allocation_mapping', return_value={}) self.mock_get_used_allocation_mapping = ( self._mock_get_used_allocation_mapping.start()) super(NetworkSegmentRangeIfaceObjectTestCase, self).setUp() # `project_id` and `physical_network` attributes in # network_segment_range are nullable, depending on the value of # `shared` and `network_type` respectively. # Hack to always populate test project_id and physical_network # fields in network segment range Iface object testing so that related # tests like `test_create_updates_from_db_object` and # `test_update_updates_from_db_object` can have those fields. # Alternatives can be skipping those tests when executing # NetworkSegmentRangeIfaceObjectTestCase, or making base test case # adjustments. self.update_obj_fields({'project_id': TEST_TENANT_ID, 'physical_network': TEST_PHYSICAL_NETWORK}) self.extra_fields_not_in_dict = ['tenant_id'] class NetworkSegmentRangeDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = network_segment_range.NetworkSegmentRange def _create_allocation(self, allocation_class, segmentation_id=None, physical_network=None, allocated=False): attr = self.get_random_object_fields(allocation_class) attr['allocated'] = allocated allocation_class.update_primary_keys( attr, segmentation_id=segmentation_id, physical_network=physical_network or 'foo') allocation = allocation_class(self.context, **attr) allocation.create() return allocation def _create_test_network(self, name=None, network_id=None): name = "test-network-%s" % helpers.get_random_string(4) network_id = (uuidutils.generate_uuid() if network_id is None else network_id) _network = net_obj.Network(self.context, name=name, id=network_id, project_id=uuidutils.generate_uuid()) _network.create() return _network def _create_segment(self, segmentation_id=None, network_id=None, physical_network=None, network_type=None): attr = self.get_random_object_fields(net_obj.NetworkSegment) attr.update({ 'network_id': network_id or self._create_test_network_id(), 'network_type': network_type or constants.TYPE_VLAN, 'physical_network': physical_network or 'foo', 'segmentation_id': segmentation_id or random.randint( constants.MIN_VLAN_TAG, constants.MAX_VLAN_TAG)}) _segment = net_obj.NetworkSegment(self.context, **attr) _segment.create() return _segment def _create_network_segment_range( self, minimum, maximum, network_type=None, physical_network=None, project_id=None, default=False): kwargs = self.get_random_db_fields() kwargs.update({'network_type': network_type or constants.TYPE_VLAN, 'physical_network': physical_network or 'foo', 'minimum': minimum, 'maximum': maximum, 'default': default, 'shared': default, 'project_id': project_id}) db_obj = self._test_class.db_model(**kwargs) obj_fields = self._test_class.modify_fields_from_db(db_obj) obj = self._test_class(self.context, **obj_fields) return obj def test__get_available_allocation(self): range_minimum = 100 range_maximum = 120 to_alloc = range(range_minimum, range_maximum - 5) not_to_alloc = range(range_maximum - 5, range_maximum + 1) for vlan_id in to_alloc: self._create_allocation(vlan_alloc_obj.VlanAllocation, segmentation_id=vlan_id, allocated=True, physical_network='foo') for vlan_id in not_to_alloc: self._create_allocation(vlan_alloc_obj.VlanAllocation, segmentation_id=vlan_id, allocated=False, physical_network='foo') obj = self._create_network_segment_range(range_minimum, range_maximum) available_alloc = self._test_class._get_available_allocation(obj) self.assertItemsEqual(not_to_alloc, available_alloc) def test__get_used_allocation_mapping(self): alloc_mapping = {} for _ in range(5): network = self._create_test_network() segment = self._create_segment(network_id=network.id) alloc_mapping.update({segment.segmentation_id: network.project_id}) obj = self._create_network_segment_range( minimum=min(list(alloc_mapping.keys())), maximum=max(list(alloc_mapping.keys()))) ret_alloc_mapping = self._test_class._get_used_allocation_mapping(obj) self.assertDictEqual(alloc_mapping, ret_alloc_mapping) def _define_network_segment_range(self, shared=False, remove_project_id=False): attrs = self.get_random_object_fields(obj_cls=self._test_class) obj = self._test_class(self.context, **attrs) obj.shared = shared obj.project_id = None if remove_project_id else obj.project_id return obj def test_create_not_shared_with_project_id(self): obj = self._define_network_segment_range() obj.create() def test_create_not_shared_without_project_id(self): obj = self._define_network_segment_range(remove_project_id=True) self.assertRaises(n_exc.ObjectActionError, obj.create) def test_update_not_shared_with_project_id(self): obj = self._define_network_segment_range(shared=True) obj.create() obj.shared = False obj.update() def test_update_not_shared_without_project_id(self): obj = self._define_network_segment_range(shared=True, remove_project_id=True) obj.create() obj.shared = False self.assertRaises(n_exc.ObjectActionError, obj.update) def _create_environment(self): self.projects = [uuidutils.generate_uuid() for _ in range(3)] self.segment_ranges = { 'default': [100, 120], self.projects[0]: [90, 105], self.projects[1]: [109, 114], self.projects[2]: [117, 130]} self.seg_min = self.segment_ranges['default'][0] self.seg_max = self.segment_ranges['default'][1] for subclass in ml2_base.SegmentAllocation.__subclasses__(): # Build segment ranges: default one and project specific ones. for name, ranges in self.segment_ranges.items(): default = True if name == 'default' else False project = name if not default else None self._create_network_segment_range( ranges[0], ranges[1], network_type=subclass.network_type, project_id=project, default=default).create() # Build allocations (non allocated). for segmentation_id in range(self.seg_min, self.seg_max + 1): self._create_allocation(subclass, segmentation_id=segmentation_id) def _default_range_set(self, project_id=None): range_set = set(range(self.segment_ranges['default'][0], self.segment_ranges['default'][1] + 1)) for p_id, ranges in ((p, r) for (p, r) in self.segment_ranges.items() if p not in [project_id, 'default']): pranges = self.segment_ranges.get(p_id, [0, 0]) prange_set = set(range(pranges[0], pranges[1] + 1)) range_set.difference_update(prange_set) return range_set def _allocate_random_allocations(self, allocations, subclass): pk_cols = subclass.db_model.__table__.primary_key.columns primary_keys = [col.name for col in pk_cols] allocated = [] for allocation in random.sample(allocations, k=NUM_ALLOCATIONS): segment = dict((k, allocation[k]) for k in primary_keys) allocated.append(segment) self.assertEqual(1, subclass.allocate(self.context, **segment)) return allocated def test_get_segments_for_project(self): self._create_environment() for project_id, subclass in itertools.product( self.projects, ml2_base.SegmentAllocation.__subclasses__()): allocations = network_segment_range.NetworkSegmentRange. \ get_segments_for_project( self.context, subclass.db_model, subclass.network_type, subclass.get_segmentation_id(), project_id=project_id) project_min = max(self.seg_min, self.segment_ranges[project_id][0]) project_max = min(self.seg_max, self.segment_ranges[project_id][1]) project_segment_ids = list(range(project_min, project_max + 1)) self.assertEqual(len(allocations), len(project_segment_ids)) for allocation in allocations: self.assertFalse(allocation.allocated) self.assertIn(allocation.segmentation_id, project_segment_ids) # Allocate random segments inside the project range. self._allocate_random_allocations(allocations, subclass) allocations = network_segment_range.NetworkSegmentRange. \ get_segments_for_project( self.context, subclass.db_model, subclass.network_type, subclass.get_segmentation_id(), project_id=project_id) self.assertEqual(len(allocations), len(project_segment_ids) - NUM_ALLOCATIONS) def test_get_segments_shared(self): self._create_environment() self.projects.append(None) for project_id, subclass in itertools.product( self.projects, ml2_base.SegmentAllocation.__subclasses__()): filters = {'project_id': project_id, 'physical_network': 'foo'} allocations = network_segment_range.NetworkSegmentRange. \ get_segments_shared( self.context, subclass.db_model, subclass.network_type, subclass.get_segmentation_id(), **filters) prange = self._default_range_set(project_id) self.assertEqual(len(prange), len(allocations)) # Allocate random segments inside the project shared range. allocated = self._allocate_random_allocations(allocations, subclass) allocations = network_segment_range.NetworkSegmentRange. \ get_segments_shared( self.context, subclass.db_model, subclass.network_type, subclass.get_segmentation_id(), **filters) self.assertEqual(len(allocations), len(prange) - NUM_ALLOCATIONS) # Deallocate the allocated segments because can be allocated in # a segmentation ID not belonging to any project. for alloc in allocated: self.assertEqual(1, subclass.deallocate(self.context, **alloc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_objects.py0000644000175000017500000001706100000000000025456 0ustar00coreycorey00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint from oslo_versionedobjects import fixture from neutron import objects from neutron.objects import base from neutron.tests import base as test_base # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. Please keep the list in # alphabetic order. object_data = { 'AddressScope': '1.1-dd0dfdb67775892d3adc090e28e43bd8', 'AddressScopeRBAC': '1.0-192845c5ed0718e1c54fac36936fcd7d', 'Agent': '1.1-64b670752d57b3c7602cb136e0338507', 'AllowedAddressPair': '1.0-9f9186b6f952fbf31d257b0458b852c0', 'AutoAllocatedTopology': '1.0-74642e58c53bf3610dc224c59f81b242', 'ConntrackHelper': '1.0-b1a50cfe18178db50c7f206e75613f4b', 'DefaultSecurityGroup': '1.0-971520cb2e0ec06d747885a0cf78347f', 'DistributedPortBinding': '1.0-39c0d17b281991dcb66716fee5a8bef2', 'DNSNameServer': '1.0-bf87a85327e2d812d1666ede99d9918b', 'ExternalNetwork': '1.0-53d885e033cb931f9bb3bdd6bbe3f0ce', 'DvrFipGatewayPortAgentBinding': '1.0-ee2af3296265a5463de0bc3695b35b51', 'DVRMacAddress': '1.0-d3c61a8338d20da74db2364d4d6554f2', 'ExtraDhcpOpt': '1.0-632f689cbeb36328995a7aed1d0a78d3', 'FlatAllocation': '1.0-bf666f24f4642b047eeca62311fbcb41', 'Flavor': '1.0-82194de5c9aafce08e8527bb7977f5c6', 'FlavorServiceProfileBinding': '1.0-a2c8731e16cefdac4571f80abf1f8930', 'FloatingIP': '1.0-0205cc99ec79e8089d641ed1b565ddae', 'FloatingIPDNS': '1.0-ee3db848500fa1825235f701828c06d5', 'GeneveAllocation': '1.0-d5f76e8eac60a778914d61dd8e23e90f', 'GeneveEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6', 'GreAllocation': '1.0-9ee1bbc4d999bea84c99425484b11ac5', 'GreEndpoint': '1.0-040f026996b5952e2ae4ccd40ac61ca6', 'IPAllocation': '1.0-47251b4c6d45c3b5feb0297fe5c461f2', 'IPAllocationPool': '1.0-371016a6480ed0b4299319cb46d9215d', 'IpamAllocation': '1.0-ace65431abd0a7be84cc4a5f32d034a3', 'IpamAllocationPool': '1.0-c4fa1460ed1b176022ede7af7d1510d5', 'IpamSubnet': '1.0-713de401682a70f34891e13af645fa08', 'L3HARouterAgentPortBinding': '1.0-d1d7ee13f35d56d7e225def980612ee5', 'L3HARouterNetwork': '1.0-87acea732853f699580179a94d2baf91', 'L3HARouterVRIdAllocation': '1.0-37502aebdbeadc4f9e3bd5e9da714ab9', 'MeteringLabel': '1.0-cc4b620a3425222447cbe459f62de533', 'MeteringLabelRule': '1.0-b5c5717e7bab8d1af1623156012a5842', 'Log': '1.0-6391351c0f34ed34375a19202f361d24', 'Network': '1.1-c3e9ecc0618ee934181d91b143a48901', 'NetworkDhcpAgentBinding': '1.1-d9443c88809ffa4c45a0a5a48134b54a', 'NetworkDNSDomain': '1.0-420db7910294608534c1e2e30d6d8319', 'NetworkPortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3', 'NetworkRBAC': '1.2-192845c5ed0718e1c54fac36936fcd7d', 'NetworkSegment': '1.0-57b7f2960971e3b95ded20cbc59244a8', 'NetworkSegmentRange': '1.0-bdec1fffc9058ea676089b1f2f2b3cf3', 'NetworkSubnetLock': '1.0-140de39d4b86ae346dc3d70b885bea53', 'Port': '1.5-98f35183d876c9beb188f4bf44d4d886', 'PortBinding': '1.0-3306deeaa6deb01e33af06777d48d578', 'PortBindingLevel': '1.1-50d47f63218f87581b6cd9a62db574e5', 'PortDataPlaneStatus': '1.0-25be74bda46c749653a10357676c0ab2', 'PortDNS': '1.1-c5ca2dc172bdd5fafee3fc986d1d7023', 'PortForwarding': '1.2-f772f03b82a616603c7f3d4497bf577f', 'PortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3', 'PortUplinkStatusPropagation': '1.0-3cfb3f7da716ca9687e4f04ca72b081d', 'ProviderResourceAssociation': '1.0-05ab2d5a3017e5ce9dd381328f285f34', 'ProvisioningBlock': '1.0-c19d6d05bfa8143533471c1296066125', 'QosBandwidthLimitRule': '1.3-51b662b12a8d1dfa89288d826c6d26d3', 'QosDscpMarkingRule': '1.3-0313c6554b34fd10c753cb63d638256c', 'QosMinimumBandwidthRule': '1.3-314c3419f4799067cc31cc319080adff', 'QosPolicyRBAC': '1.1-192845c5ed0718e1c54fac36936fcd7d', 'QosRuleType': '1.3-7286188edeb3a0386f9cf7979b9700fc', 'QosRuleTypeDriver': '1.0-7d8cb9f0ef661ac03700eae97118e3db', 'QosPolicy': '1.8-4adb0cde3102c10d8970ec9487fd7fe7', 'QosPolicyDefault': '1.0-59e5060eedb1f06dd0935a244d27d11c', 'QosPolicyFloatingIPBinding': '1.0-5625df4205a18778cd6aa40f99be024e', 'QosPolicyRouterGatewayIPBinding': '1.0-da064fbfe5ee18c950b905b483bf59e3', 'QosPolicyNetworkBinding': '1.0-df53a1e0f675aab8d27a1ccfed38dc42', 'QosPolicyPortBinding': '1.0-66cb364ac99aa64523ade07f9f868ea6', 'Quota': '1.0-6bb6a0f1bd5d66a2134ffa1a61873097', 'QuotaUsage': '1.0-6fbf820368681aac7c5d664662605cf9', 'Reservation': '1.0-49929fef8e82051660342eed51b48f2a', 'ResourceDelta': '1.0-a980b37e0a52618b5af8db29af18be76', 'Route': '1.0-a9883a63b416126f9e345523ec09483b', 'Router': '1.0-adb984d9b73aa11566d40abbeb790df1', 'RouterExtraAttributes': '1.0-ef8d61ae2864f0ec9af0ab7939cab318', 'RouterL3AgentBinding': '1.0-c5ba6c95e3a4c1236a55f490cd67da82', 'RouterPort': '1.0-c8c8f499bcdd59186fcd83f323106908', 'RouterRoute': '1.0-07fc5337c801fb8c6ccfbcc5afb45907', 'SecurityGroup': '1.2-7b63b834e511856f54a09282d6843ecc', 'SecurityGroupPortBinding': '1.0-6879d5c0af80396ef5a72934b6a6ef20', 'SecurityGroupRBAC': '1.0-192845c5ed0718e1c54fac36936fcd7d', 'SecurityGroupRule': '1.0-e9b8dace9d48b936c62ad40fe1f339d5', 'SegmentHostMapping': '1.0-521597cf82ead26217c3bd10738f00f0', 'ServiceProfile': '1.0-9beafc9e7d081b8258f3c5cb66ac5eed', 'StandardAttribute': '1.0-617d4f46524c4ce734a6fc1cc0ac6a0b', 'Subnet': '1.1-5b7e1789a1732259d1e28b4bd87eb1c2', 'SubnetDNSPublishFixedIP': '1.0-db22af6fa20b143986f0cbe06cbfe0ea', 'SubnetPool': '1.1-a0e03895d1a6e7b9d4ab7b0ca13c3867', 'SubnetPoolPrefix': '1.0-13c15144135eb869faa4a76dc3ee3b6c', 'SubnetPoolRBAC': '1.0-192845c5ed0718e1c54fac36936fcd7d', 'SubnetServiceType': '1.0-05ae4cdb2a9026a697b143926a1add8c', 'SubPort': '1.0-72c8471068db1f0491b5480fe49b52bb', 'Tag': '1.0-1a0d20379920ffa3cebfd3e016d2f7a0', 'Trunk': '1.1-aa3922b39e37fbb89886c2ee8715cf49', 'VlanAllocation': '1.0-72636c1b7d5c8eef987bd09666e64f3e', 'VxlanAllocation': '1.0-934638cd32d00f81d6fbf93c8eb5755a', 'VxlanEndpoint': '1.0-40522eafdcf838758711dfa886cbdb2e', } class TestObjectVersions(test_base.BaseTestCase): def setUp(self): super(TestObjectVersions, self).setUp() # NOTE(ihrachys): seed registry with all objects under neutron.objects # before validating the hashes objects.register_objects() def test_versions(self): checker = fixture.ObjectVersionChecker( base.NeutronObjectRegistry.obj_classes()) fingerprints = checker.get_hashes() if os.getenv('GENERATE_HASHES'): with open('object_hashes.txt', 'w') as hashes_file: hashes_file.write(pprint.pformat(fingerprints)) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, 'Some objects have changed; please make sure the ' 'versions have been bumped, and then update their ' 'hashes in the object_data map in this test module.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_port_forwarding.py0000644000175000017500000001555400000000000027240 0ustar00coreycorey00000000000000# Copyright (c) 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron.objects import port_forwarding from neutron.objects import router from neutron.tests import tools from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class PortForwardingObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = port_forwarding.PortForwarding def setUp(self): super(PortForwardingObjectTestCase, self).setUp() self.fip_db_fields = self.get_random_db_fields(router.FloatingIP) del self.fip_db_fields['floating_ip_address'] # 'portforwardings' table will store the 'internal_ip_address' and # 'internal_port' as a single 'socket' column. # Port forwarding object accepts 'internal_ip_address' and # 'internal_port', but can not filter the records in db, so the # valid filters can not contain them. not_supported_filter_fields = ['internal_ip_address', 'internal_port'] invalid_fields = set( self._test_class.synthetic_fields).union( set(not_supported_filter_fields)) self.valid_field = [f for f in self._test_class.fields if f not in invalid_fields][0] def random_generate_fip_obj(db_fields, **floatingip): if db_fields.get( 'id', None) and floatingip.get( 'id', None) and db_fields.get('id') == floatingip.get('id'): return db_fields db_fields['id'] = floatingip.get('id', None) db_fields['floating_ip_address'] = tools.get_random_ip_address( version=4) return self.fip_db_fields self.mock_fip_obj = mock.patch.object( router.FloatingIP, 'get_object', side_effect=lambda _, **y: router.FloatingIP.db_model( **random_generate_fip_obj(self.fip_db_fields, **y))).start() class PortForwardingDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = port_forwarding.PortForwarding def setUp(self): super(PortForwardingDbObjectTestCase, self).setUp() self.update_obj_fields( {'floatingip_id': lambda: self._create_test_fip_id_for_port_forwarding(), 'internal_port_id': lambda: self._create_test_port_id()}) # 'portforwardings' table will store the 'internal_ip_address' and # 'internal_port' as a single 'socket' column. # Port forwarding object accepts 'internal_ip_address' and # 'internal_port', but can not filter the records in db, so the # valid filters can not contain them. not_supported_filter_fields = ['internal_ip_address', 'internal_port'] invalid_fields = set( self._test_class.synthetic_fields).union( set(not_supported_filter_fields)) self.valid_field = [f for f in self._test_class.fields if f not in invalid_fields][0] self.valid_field_filter = {self.valid_field: self.obj_fields[-1][self.valid_field]} def _create_test_fip_id_for_port_forwarding(self): fake_fip = '172.23.3.0' ext_net_id = self._create_external_network_id() router_id = self._create_test_router_id() values = { 'floating_ip_address': netaddr.IPAddress(fake_fip), 'floating_network_id': ext_net_id, 'floating_port_id': self._create_test_port_id( network_id=ext_net_id), 'router_id': router_id, } fip_obj = router.FloatingIP(self.context, **values) fip_obj.create() return fip_obj.id def test_db_obj(self): # The reason for rewriting this test is: # 1. Currently, the existing test_db_obj test in # obj_test_base.BaseDbObjectTestCase is not suitable for the case, # for example, the db model is not the same with obj fields # definition. # 2. For port forwarding, the db model will store and accept 'socket', # but the obj fields just only support accepting the parameters # generate 'socket', such as 'internal_ip_address' and # 'internal_port'. obj = self._make_object(self.obj_fields[0]) self.assertIsNone(obj.db_obj) obj.create() self.assertIsNotNone(obj.db_obj) # Make sure the created obj socket field is correct. created_socket = obj.db_obj.socket.split(":") self.assertEqual(created_socket[0], str(obj.internal_ip_address)) self.assertEqual(created_socket[1], str(obj.internal_port)) fields_to_update = self.get_updatable_fields(self.obj_fields[1]) if fields_to_update: old_fields = {} for key, val in fields_to_update.items(): db_model_attr = ( obj.fields_need_translation.get(key, key)) old_fields[db_model_attr] = obj.db_obj[ db_model_attr] if hasattr( obj.db_obj, db_model_attr) else getattr( obj, db_model_attr) setattr(obj, key, val) obj.update() self.assertIsNotNone(obj.db_obj) # Make sure the updated obj socket field is correct. updated_socket = obj.db_obj.socket.split(":") self.assertEqual(updated_socket[0], str(self.obj_fields[1]['internal_ip_address'])) self.assertEqual(updated_socket[1], str(self.obj_fields[1]['internal_port'])) # Then check all update fields had been updated. for k, v in obj.modify_fields_to_db(fields_to_update).items(): self.assertEqual(v, obj.db_obj[k], '%s attribute differs' % k) obj.delete() self.assertIsNone(obj.db_obj) def test_get_objects_queries_constant(self): # NOTE(bzhao) Port Forwarding uses query FLoatingIP for injecting # floating_ip_address and router_id, not depends on relationship, # so it will cost extra SQL query each time for finding the # associated Floating IP by floatingip_id each time(or each # Port Forwarding Object). Rework this if this customized OVO # needs to be changed. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_ports.py0000644000175000017500000005140100000000000025170 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib.tests import tools from oslo_utils import uuidutils import testscenarios from neutron.objects import base as obj_base from neutron.objects import network from neutron.objects import ports from neutron.objects.qos import binding from neutron.objects.qos import policy from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class SecurityGroupPortBindingIfaceObjTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.SecurityGroupPortBinding class SecurityGroupPortBindingDbObjectTestCase( obj_test_base.BaseDbObjectTestCase): _test_class = ports.SecurityGroupPortBinding class BasePortBindingDbObjectTestCase(obj_test_base._BaseObjectTestCase, testlib_api.SqlTestCase): def setUp(self): super(BasePortBindingDbObjectTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id()}) class PortBindingIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.PortBinding class PortBindingDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, BasePortBindingDbObjectTestCase): _test_class = ports.PortBinding class DistributedPortBindingIfaceObjTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.DistributedPortBinding class DistributedPortBindingDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, BasePortBindingDbObjectTestCase): _test_class = ports.DistributedPortBinding # TODO(ihrachys): this test case copies some functions from the base module. # This is because we currently cannot inherit from the base class that contains # those functions, because that same class provides test cases that we don't # want to execute. Ideally, we would need to copy paste, but that would require # some significant refactoring in the base test classes. Leaving it for a # follow up. class PortBindingVifDetailsTestCase(testscenarios.WithScenarios, obj_test_base._BaseObjectTestCase, testlib_api.SqlTestCase): scenarios = [ (cls.__name__, {'_test_class': cls}) for cls in (ports.PortBinding, ports.DistributedPortBinding) ] def setUp(self): super(PortBindingVifDetailsTestCase, self).setUp() self._create_test_network() getter = lambda: self._create_port(network_id=self._network['id']).id self.update_obj_fields({'port_id': getter}) def _create_port(self, **port_attrs): attrs = {'project_id': uuidutils.generate_uuid(), 'admin_state_up': True, 'status': 'ACTIVE', 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'mac_address': tools.get_random_EUI()} attrs.update(port_attrs) port = ports.Port(self.context, **attrs) port.create() return port def _create_test_network(self): self._network = network.Network(self.context, name='test-network1') self._network.create() def _make_object(self, fields): fields = obj_test_base.get_non_synthetic_fields( self._test_class, fields ) return self._test_class( self.context, **obj_test_base.remove_timestamps_from_fields( fields, self._test_class.fields)) def test_vif_details(self): vif_details = {'item1': 'val1', 'item2': 'val2'} obj = self._make_object(self.obj_fields[0]) obj.vif_details = vif_details obj.create() obj = self._test_class.get_object( self.context, **obj._get_composite_keys()) self.assertEqual(vif_details, obj.vif_details) vif_details['item1'] = 1.23 del vif_details['item2'] vif_details['item3'] = True obj.vif_details = vif_details obj.update() obj = self._test_class.get_object( self.context, **obj._get_composite_keys()) self.assertEqual(vif_details, obj.vif_details) obj.vif_details = None obj.update() # here the obj is reloaded from DB, # so we test if vif_details is still none self.assertIsNone(obj.vif_details) obj = self._test_class.get_object( self.context, **obj._get_composite_keys()) self.assertIsNone(obj.vif_details) def test_null_vif_details_in_db(self): # the null case for vif_details in our db model is an # empty string. add that here to simulate it correctly # in the tests kwargs = self.get_random_db_fields() kwargs['vif_details'] = '' db_obj = self._test_class.db_model(**kwargs) obj_fields = self._test_class.modify_fields_from_db(db_obj) obj = self._test_class(self.context, **obj_fields) self.assertIsNone(obj.vif_details) class IPAllocationIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.IPAllocation class IPAllocationDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ports.IPAllocation def setUp(self): super(IPAllocationDbObjectTestCase, self).setUp() network_id = self._create_test_network_id() port_id = self._create_test_port_id(network_id=network_id) self.update_obj_fields( {'port_id': port_id, 'network_id': network_id, 'subnet_id': lambda: self._create_test_subnet_id(network_id)}) class PortDNSIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.PortDNS class PortDNSDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ports.PortDNS def setUp(self): super(PortDNSDbObjectTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id()}) class PortBindingLevelIfaceObjTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.PortBindingLevel def setUp(self): super(PortBindingLevelIfaceObjTestCase, self).setUp() self.pager_map[self._test_class.obj_name()] = ( obj_base.Pager(sorts=[('port_id', True), ('level', True)])) class PortBindingLevelDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ports.PortBindingLevel def setUp(self): super(PortBindingLevelDbObjectTestCase, self).setUp() self.update_obj_fields( {'port_id': lambda: self._create_test_port_id(), 'segment_id': lambda: self._create_test_segment_id()}) class PortIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = ports.Port def setUp(self): super(PortIfaceObjTestCase, self).setUp() self.pager_map[ports.PortBindingLevel.obj_name()] = ( obj_base.Pager(sorts=[('port_id', True), ('level', True)])) class PortDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = ports.Port def setUp(self): super(PortDbObjectTestCase, self).setUp() network_id = self._create_test_network_id() segment_id = self._create_test_segment_id(network_id) subnet_id = self._create_test_subnet_id(network_id) self.update_obj_fields( {'network_id': network_id, 'fixed_ips': {'subnet_id': subnet_id, 'network_id': network_id}, 'device_owner': 'not_a_router', 'binding_levels': {'segment_id': segment_id}}) def test_security_group_ids(self): groups = [] objs = [] for i in range(2): groups.append(self._create_test_security_group_id()) objs.append(self._make_object(self.obj_fields[i])) objs[i].security_group_ids = {groups[i]} objs[i].create() self.assertEqual([objs[0]], ports.Port.get_objects( self.context, security_group_ids=(groups[0], ))) self.assertEqual([objs[1]], ports.Port.get_objects( self.context, security_group_ids=(groups[1], ))) sg3_id = self._create_test_security_group_id() objs[0].security_group_ids = {sg3_id} objs[0].update() objs[0] = ports.Port.get_object(self.context, id=objs[0].id) self.assertEqual({sg3_id}, objs[0].security_group_ids) objs[0].security_group_ids = set() objs[0].update() objs[0] = ports.Port.get_object(self.context, id=objs[0].id) self.assertFalse(objs[0].security_group_ids) def test_security_group_ids_and_port_id(self): objs = [] group = self._create_test_security_group_id() for i in range(2): objs.append(self._make_object(self.obj_fields[i])) objs[i].security_group_ids = {group} objs[i].create() for i in range(2): self.assertEqual( [objs[i]], ports.Port.get_objects( self.context, id=(objs[i].id, ), security_group_ids=(group, ))) def test__attach_security_group(self): obj = self._make_object(self.obj_fields[0]) obj.create() sg_id = self._create_test_security_group_id() obj._attach_security_group(sg_id) obj = ports.Port.get_object(self.context, id=obj.id) self.assertIn(sg_id, obj.security_group_ids) sg2_id = self._create_test_security_group_id() obj._attach_security_group(sg2_id) obj = ports.Port.get_object(self.context, id=obj.id) self.assertIn(sg2_id, obj.security_group_ids) @mock.patch.object(policy.QosPolicy, 'unset_default') def test_qos_policy_id(self, *mocks): policy_obj = policy.QosPolicy(self.context) policy_obj.create() obj = self._make_object(self.obj_fields[0]) obj.qos_policy_id = policy_obj.id obj.create() obj = ports.Port.get_object(self.context, id=obj.id) self.assertEqual(policy_obj.id, obj.qos_policy_id) policy_obj2 = policy.QosPolicy(self.context) policy_obj2.create() obj.qos_policy_id = policy_obj2.id obj.update() obj = ports.Port.get_object(self.context, id=obj.id) self.assertEqual(policy_obj2.id, obj.qos_policy_id) obj.qos_policy_id = None obj.update() obj = ports.Port.get_object(self.context, id=obj.id) self.assertIsNone(obj.qos_policy_id) @mock.patch.object(policy.QosPolicy, 'unset_default') def test__attach_qos_policy(self, *mocks): obj = self._make_object(self.obj_fields[0]) obj.create() policy_obj = policy.QosPolicy(self.context) policy_obj.create() obj._attach_qos_policy(policy_obj.id) obj = ports.Port.get_object(self.context, id=obj.id) self.assertEqual(policy_obj.id, obj.qos_policy_id) qos_binding_obj = binding.QosPolicyPortBinding.get_object( self.context, port_id=obj.id) self.assertEqual(qos_binding_obj.policy_id, obj.qos_policy_id) old_policy_id = policy_obj.id policy_obj2 = policy.QosPolicy(self.context) policy_obj2.create() obj._attach_qos_policy(policy_obj2.id) obj = ports.Port.get_object(self.context, id=obj.id) self.assertEqual(policy_obj2.id, obj.qos_policy_id) qos_binding_obj2 = binding.QosPolicyPortBinding.get_object( self.context, port_id=obj.id) self.assertEqual(qos_binding_obj2.policy_id, obj.qos_policy_id) qos_binding_obj = binding.QosPolicyPortBinding.get_objects( self.context, policy_id=old_policy_id) self.assertEqual(0, len(qos_binding_obj)) @mock.patch.object(policy.QosPolicy, 'unset_default') def test_qos_network_policy_id(self, *mocks): policy_obj = policy.QosPolicy(self.context) policy_obj.create() obj = self._make_object(self.obj_fields[0]) obj.create() obj = ports.Port.get_object(self.context, id=obj.id) self.assertIsNone(obj.qos_network_policy_id) self.assertIsNone(obj.qos_policy_id) network = self._create_test_network(qos_policy_id=policy_obj.id) self.update_obj_fields({'network_id': network.id}) obj = self._make_object(self.obj_fields[1]) obj.create() obj = ports.Port.get_object(self.context, id=obj.id) self.assertEqual(policy_obj.id, obj.qos_network_policy_id) self.assertIsNone(obj.qos_policy_id) def test_get_objects_queries_constant(self): self.skipTest( 'Port object loads segment info without relationships') def test_v1_1_to_v1_0_drops_data_plane_status(self): port_new = self._create_test_port() port_v1_0 = port_new.obj_to_primitive(target_version='1.0') self.assertNotIn('data_plane_status', port_v1_0['versioned_object.data']) def test_v1_2_to_v1_1_drops_segment_id_in_binding_levels(self): port_new = self._create_test_port() segment = network.NetworkSegment( self.context, # TODO(ihrachys) we should be able to create a segment object # without explicitly specifying id, but it's currently not working id=uuidutils.generate_uuid(), network_id=port_new.network_id, network_type='vxlan') segment.create() # TODO(ihrachys) we should be able to create / update level objects via # Port object, but it's currently not working binding = ports.PortBindingLevel( self.context, port_id=port_new.id, host='host1', level=0, segment_id=segment.id) binding.create() port_new = ports.Port.get_object(self.context, id=port_new.id) port_v1_1 = port_new.obj_to_primitive(target_version='1.1') lvl = port_v1_1['versioned_object.data']['binding_levels'][0] self.assertNotIn('segment_id', lvl['versioned_object.data']) # check that we also downgraded level object version self.assertEqual('1.0', lvl['versioned_object.version']) # finally, prove that binding primitive is now identical to direct # downgrade of the binding object binding_v1_0 = binding.obj_to_primitive(target_version='1.0') self.assertEqual(binding_v1_0, lvl) def test_v1_3_to_v1_2_unlists_distributed_bindings(self): port_new = self._create_test_port() # empty list transforms into None port_v1_2 = port_new.obj_to_primitive(target_version='1.2') port_data = port_v1_2['versioned_object.data'] self.assertIsNone(port_data['distributed_binding']) # now insert a distributed binding binding = ports.DistributedPortBinding( self.context, host='host1', port_id=port_new.id, status='ACTIVE', vnic_type='vnic_type1', vif_type='vif_type1') binding.create() # refetch port object to include binding port_new = ports.Port.get_object(self.context, id=port_new.id) # new primitive should contain the binding data port_v1_2 = port_new.obj_to_primitive(target_version='1.2') port_data = port_v1_2['versioned_object.data'] binding_data = ( port_data['distributed_binding']['versioned_object.data']) self.assertEqual(binding.host, binding_data['host']) def test_v1_4_to_v1_3_converts_binding_to_portbinding_object(self): port_v1_4 = self._create_test_port() port_v1_3 = port_v1_4.obj_to_primitive(target_version='1.3') # Port has no bindings, so binding attribute should be None self.assertIsNone(port_v1_3['versioned_object.data']['binding']) active_binding = ports.PortBinding(self.context, port_id=port_v1_4.id, host='host1', vif_type='type') inactive_binding = ports.PortBinding( self.context, port_id=port_v1_4.id, host='host2', vif_type='type', status=constants.INACTIVE) active_binding.create() inactive_binding.create() port_v1_4 = ports.Port.get_object(self.context, id=port_v1_4.id) port_v1_3 = port_v1_4.obj_to_primitive(target_version='1.3') binding = port_v1_3['versioned_object.data']['binding'] # Port has active binding, so the binding attribute should point to it self.assertEqual('host1', binding['versioned_object.data']['host']) active_binding.delete() port_v1_4 = ports.Port.get_object(self.context, id=port_v1_4.id) port_v1_3 = port_v1_4.obj_to_primitive(target_version='1.3') # Port has no active bindings, so binding attribute should be None self.assertIsNone(port_v1_3['versioned_object.data']['binding']) # bindings attribute in V1.4 port should have one inactive binding primitive = port_v1_4.obj_to_primitive() self.assertEqual(1, len(primitive['versioned_object.data']['bindings'])) binding = primitive['versioned_object.data']['bindings'][0] self.assertEqual(constants.INACTIVE, binding['versioned_object.data']['status']) # Port with no binding attribute should be handled without raising # exception primitive['versioned_object.data'].pop('bindings') port_v1_4_no_binding = port_v1_4.obj_from_primitive(primitive) port_v1_4_no_binding.obj_to_primitive(target_version='1.3') def test_v1_5_to_v1_4_drops_qos_network_policy_id(self): port_new = self._create_test_port() port_v1_4 = port_new.obj_to_primitive(target_version='1.4') self.assertNotIn('qos_network_policy_id', port_v1_4['versioned_object.data']) def test_get_ports_ids_by_security_groups_except_router(self): sg_id = self._create_test_security_group_id() filter_owner = constants.ROUTER_INTERFACE_OWNERS_SNAT obj = self._make_object(self.obj_fields[0]) obj.create() obj.security_group_ids = {sg_id} obj.update() self.assertEqual(1, len( ports.Port.get_ports_ids_by_security_groups( self.context, security_group_ids=(sg_id, ), excluded_device_owners=filter_owner))) obj.device_owner = constants.DEVICE_OWNER_ROUTER_SNAT obj.update() self.assertEqual(0, len( ports.Port.get_ports_ids_by_security_groups( self.context, security_group_ids=(sg_id, ), excluded_device_owners=filter_owner))) def test_get_ports_by_vnic_type_and_host(self): port1 = self._create_test_port() ports.PortBinding( self.context, host='host1', port_id=port1.id, status='ACTIVE', vnic_type='vnic_type1', vif_type='vif_type1').create() port2 = self._create_test_port() ports.PortBinding( self.context, host='host1', port_id=port2.id, status='ACTIVE', vnic_type='vnic_type2', vif_type='vif_type1').create() self.assertEqual(1, len( ports.Port.get_ports_by_vnic_type_and_host( self.context, 'vnic_type1', 'host1'))) def test_check_network_ports_by_binding_types(self): port1 = self._create_test_port() network_id = port1.network_id ports.PortBinding( self.context, host='host1', port_id=port1.id, status='ACTIVE', vnic_type='vnic_type1', vif_type='vif_type1').create() port2 = self._create_test_port(network_id=network_id) ports.PortBinding( self.context, host='host2', port_id=port2.id, status='ACTIVE', vnic_type='vnic_type2', vif_type='vif_type2').create() self.assertTrue( ports.Port.check_network_ports_by_binding_types( self.context, network_id, binding_types=['vif_type1', 'vif_type2'])) self.assertFalse( ports.Port.check_network_ports_by_binding_types( self.context, network_id, binding_types=['vif_type1', 'vif_type2'], negative_search=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_provisioning_blocks.py0000644000175000017500000000245000000000000030104 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import provisioning_blocks from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class ProvisioningBlockIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = provisioning_blocks.ProvisioningBlock class ProvisioningBlockDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = provisioning_blocks.ProvisioningBlock def setUp(self): super(ProvisioningBlockDbObjectTestCase, self).setUp() self.update_obj_fields( { 'standard_attr_id': lambda: self._create_test_standard_attribute_id() }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_quota.py0000644000175000017500000001054000000000000025151 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import uuidutils from neutron.objects import quota from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class ResourceDeltaObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = quota.ResourceDelta class ResourceDeltaDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = quota.ResourceDelta def setUp(self): super(ResourceDeltaDbObjectTestCase, self).setUp() for obj in self.obj_fields: self._create_test_reservation(res_id=obj['reservation_id']) def _create_test_reservation(self, res_id): self._reservation = quota.Reservation(self.context, id=res_id) self._reservation.create() class ReservationObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = quota.Reservation class ReservationDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = quota.Reservation def _create_test_reservation(self, res=None, exp=None): res_id = uuidutils.generate_uuid() reservation = self._test_class(self.context, id=res_id, resource=res, expiration=exp) reservation.create() return reservation def test_delete_expired(self): dt = datetime.datetime.utcnow() resources = {'goals': 2, 'assists': 1} exp_date1 = datetime.datetime(2016, 3, 31, 14, 30) res1 = self._create_test_reservation(resources, exp_date1) exp_date2 = datetime.datetime(2015, 3, 31, 14, 30) res2 = self._create_test_reservation(resources, exp_date2) self.assertEqual(2, self._test_class.delete_expired( self.context, dt, None)) objs = self._test_class.get_objects(self.context, id=[res1.id, res2.id]) self.assertEqual([], objs) def test_reservation_synthetic_field(self): res = self._create_test_reservation() resource = 'test-res' res_delta = quota.ResourceDelta(self.context, resource=resource, reservation_id=res.id, amount='10') res_delta.create() obj = self._test_class.get_object(self.context, id=res.id) self.assertEqual(res_delta, obj.resource_deltas[0]) res_delta.delete() obj.update() # NOTE(manjeets) update on reservation should reflect # changes on synthetic field when it is deleted. obj = self._test_class.get_object(self.context, id=res.id) self.assertEqual([], obj.resource_deltas) class QuotaObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = quota.Quota class QuotaDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = quota.Quota class QuotaUsageObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = quota.QuotaUsage class QuotaUsageDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = quota.QuotaUsage def _test_get_object_dirty_protected(self, obj, dirty=True): obj.create() obj.dirty = dirty obj.update() new = self._test_class.get_object_dirty_protected( self.context, **obj._get_composite_keys()) self.assertEqual(obj, new) self.assertEqual(dirty, new.dirty) def test_get_object_dirty_protected(self): obj = self._make_object(self.obj_fields[0]) obj1 = self._make_object(self.obj_fields[1]) self._test_get_object_dirty_protected(obj, dirty=False) self._test_get_object_dirty_protected(obj1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_rbac.py0000644000175000017500000000417500000000000024736 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import mock from neutron.objects import address_scope from neutron.objects import network from neutron.objects.qos import policy from neutron.objects import rbac from neutron.objects import securitygroup from neutron.objects import subnetpool from neutron.tests import base as neutron_test_base from neutron.tests.unit.objects import test_base class TestRBACObjectMixin(object): def get_random_object_fields(self, obj_cls=None): fields = (super(TestRBACObjectMixin, self). get_random_object_fields(obj_cls)) rnd_actions = self._test_class.db_model.get_valid_actions() idx = random.randint(0, len(rnd_actions) - 1) fields['action'] = rnd_actions[idx] return fields class RBACBaseObjectTestCase(neutron_test_base.BaseTestCase): def test_get_type_class_map(self): class_map = {'address_scope': address_scope.AddressScopeRBAC, 'qos_policy': policy.QosPolicyRBAC, 'network': network.NetworkRBAC, 'security_group': securitygroup.SecurityGroupRBAC, 'subnetpool': subnetpool.SubnetPoolRBAC} self.assertEqual(class_map, rbac.RBACBaseObject.get_type_class_map()) class RBACBaseObjectIfaceTestCase(test_base.BaseObjectIfaceTestCase): def test_get_object(self, context=None): super(RBACBaseObjectIfaceTestCase, self).test_get_object(context=mock.ANY) def test_get_objects(self, context=None): super(RBACBaseObjectIfaceTestCase, self).test_get_objects(context=mock.ANY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_rbac_db.py0000644000175000017500000003577300000000000025413 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib import context as n_context from neutron_lib.db import model_base from neutron_lib import exceptions as n_exc from neutron_lib.objects import common_types from oslo_versionedobjects import fields as obj_fields import sqlalchemy as sa from neutron.db import rbac_db_models from neutron.extensions import rbac as ext_rbac from neutron.objects import base from neutron.objects.db import api as obj_db_api from neutron.objects import rbac_db from neutron.tests.unit.objects import test_rbac from neutron.tests.unit import testlib_api class FakeDbModel(dict): pass class FakeRbacModel(rbac_db_models.RBACColumns, model_base.BASEV2): object_id = sa.Column(sa.String(36), nullable=False) object_type = 'fake_rbac_object' def get_valid_actions(self): return (rbac_db_models.ACCESS_SHARED,) @base.NeutronObjectRegistry.register_if(False) class FakeNeutronRbacObject(base.NeutronDbObject): VERSION = '1.0' db_model = FakeRbacModel fields = { 'object_id': obj_fields.StringField(), 'target_tenant': obj_fields.StringField(), 'action': obj_fields.StringField(), } @base.NeutronObjectRegistry.register_if(False) class FakeNeutronDbObject(rbac_db.NeutronRbacObject): # Version 1.0: Initial version VERSION = '1.0' rbac_db_cls = FakeNeutronRbacObject db_model = FakeDbModel fields = { 'id': common_types.UUIDField(), 'field1': obj_fields.StringField(), 'field2': obj_fields.StringField(), 'shared': obj_fields.BooleanField(default=False), } fields_no_update = ['id'] synthetic_fields = ['field2'] def get_bound_tenant_ids(cls, context, policy_id): pass class RbacNeutronDbObjectTestCase(test_rbac.RBACBaseObjectIfaceTestCase, testlib_api.SqlTestCase): _test_class = FakeNeutronDbObject def setUp(self): super(RbacNeutronDbObjectTestCase, self).setUp() FakeNeutronDbObject.update_post = mock.Mock() @mock.patch.object(_test_class.rbac_db_cls, 'db_model') def test_get_tenants_with_shared_access_to_db_obj_return_tenant_ids( self, *mocks): ctx = mock.Mock() fake_ids = {'tenant_id_' + str(i) for i in range(10)} ctx.session.query.return_value.filter.return_value = [ (fake_id,) for fake_id in fake_ids] ret_ids = self._test_class._get_tenants_with_shared_access_to_db_obj( ctx, 'fake_db_obj_id') self.assertEqual(fake_ids, ret_ids) def test_is_accessible_for_admin(self): ctx = mock.Mock(is_admin=True, tenant_id='we_dont_care') self.assertTrue(self._test_class.is_accessible(ctx, None)) def test_is_accessible_for_db_object_owner(self): ctx = mock.Mock(is_admin=False, tenant_id='db_object_owner') db_obj = mock.Mock(tenant_id=ctx.tenant_id) self.assertTrue(self._test_class.is_accessible(ctx, db_obj)) @mock.patch.object(_test_class, 'is_shared_with_tenant', return_value=True) def test_is_accessible_if_shared_with_tenant(self, mock_is_shared): ctx = mock.Mock(is_admin=False, tenant_id='db_object_shareholder') db_obj = mock.Mock(tenant_id='db_object_owner') self.assertTrue(self._test_class.is_accessible(ctx, db_obj)) mock_is_shared.assert_called_once_with( mock.ANY, db_obj.id, ctx.tenant_id) @mock.patch.object(_test_class, 'is_shared_with_tenant', return_value=False) def test_is_accessible_fails_for_unauthorized_tenant(self, mock_is_shared): ctx = mock.Mock(is_admin=False, tenant_id='Billy_the_kid') db_obj = mock.Mock(tenant_id='db_object_owner') self.assertFalse(self._test_class.is_accessible(ctx, db_obj)) mock_is_shared.assert_called_once_with( mock.ANY, db_obj.id, ctx.tenant_id) def _rbac_policy_generate_change_events(self, resource, trigger, context, object_type, policy, event_list): for event in event_list: payload = events.DBEventPayload( context, states=(policy,), metadata={'object_type': object_type}) if event == events.BEFORE_CREATE: payload.states = [] payload.request_body = policy self._test_class.validate_rbac_policy_change( resource, event, trigger, payload=payload) @mock.patch.object(_test_class, 'validate_rbac_policy_update') def test_validate_rbac_policy_change_handles_only_object_type( self, mock_validate_rbac_update): self._rbac_policy_generate_change_events( resource=None, trigger='dummy_trigger', context=None, object_type='dummy_object_type', policy=None, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE, events.BEFORE_DELETE)) mock_validate_rbac_update.assert_not_called() @mock.patch.object(_test_class, 'validate_rbac_policy_update') @mock.patch.object(obj_db_api, 'get_object', return_value={'tenant_id': 'tyrion_lannister'}) def test_validate_rbac_policy_change_allowed_for_admin_or_owner( self, mock_get_object, mock_validate_update): context = mock.Mock(is_admin=True, tenant_id='db_obj_owner_id') self._rbac_policy_generate_change_events( resource=None, trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_cls.db_model.object_type, policy={'object_id': 'fake_object_id'}, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE)) self.assertTrue(self._test_class.validate_rbac_policy_update.called) @mock.patch.object(_test_class, 'validate_rbac_policy_update') @mock.patch.object(obj_db_api, 'get_object', return_value={'tenant_id': 'king_beyond_the_wall'}) def test_validate_rbac_policy_change_forbidden_for_outsiders( self, mock_get_object, mock_validate_update): context = mock.Mock(is_admin=False, tenant_id='db_obj_owner_id') self.assertRaises( n_exc.InvalidInput, self._rbac_policy_generate_change_events, resource=mock.Mock(), trigger='dummy_trigger', context=context, object_type=self._test_class.rbac_db_cls.db_model.object_type, policy={'object_id': 'fake_object_id'}, event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE)) self.assertFalse(mock_validate_update.called) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def _test_validate_rbac_policy_delete_handles_policy( self, policy, mock_validate_delete): payload = events.DBEventPayload( n_context.get_admin_context(), states=(policy,), metadata={ 'object_type': self._test_class.rbac_db_cls.db_model.object_type}) self._test_class.validate_rbac_policy_delete( resource=mock.Mock(), event=events.BEFORE_DELETE, trigger='dummy_trigger', payload=payload) mock_validate_delete.assert_not_called() def test_validate_rbac_policy_delete_handles_shared_action(self): self._test_validate_rbac_policy_delete_handles_policy( {'action': 'unknown_action'}) @mock.patch.object(obj_db_api, 'get_object') def test_validate_rbac_policy_delete_skips_db_object_owner(self, mock_get_object): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': 'fake_tenant_id', 'object_id': 'fake_obj_id', 'tenant_id': 'fake_tenant_id'} mock_get_object.return_value.tenant_id = policy['target_tenant'] self._test_validate_rbac_policy_delete_handles_policy(policy) @mock.patch.object(obj_db_api, 'get_object') @mock.patch.object(_test_class, 'get_bound_tenant_ids', return_value='tenant_id_shared_with') def test_validate_rbac_policy_delete_fails_single_tenant_and_in_use( self, get_bound_tenant_ids_mock, mock_get_object): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': 'tenant_id_shared_with', 'tenant_id': 'object_owner_tenant_id', 'object_id': 'fake_obj_id'} context = mock.Mock() with mock.patch.object( self._test_class, '_get_db_obj_rbac_entries') as target_tenants_mock: filter_mock = target_tenants_mock.return_value.filter filter_mock.return_value.count.return_value = 0 payload = events.DBEventPayload( context, states=(policy,), metadata={ 'object_type': self._test_class.rbac_db_cls.db_model.object_type}) self.assertRaises( ext_rbac.RbacPolicyInUse, self._test_class.validate_rbac_policy_delete, resource=None, event=events.BEFORE_DELETE, trigger='dummy_trigger', payload=payload) def test_validate_rbac_policy_delete_not_bound_tenant_success(self): context = mock.Mock() with mock.patch.object( self._test_class, 'get_bound_tenant_ids', return_value={'fake_tid2', 'fake_tid3'}), \ mock.patch.object(self._test_class, '_get_db_obj_rbac_entries') as get_rbac_entries_mock, \ mock.patch.object( self._test_class, '_get_tenants_with_shared_access_to_db_obj') as sh_tids: get_rbac_entries_mock.filter.return_value.count.return_value = 0 self._test_class._validate_rbac_policy_delete( context=context, obj_id='fake_obj_id', target_tenant='fake_tid1') sh_tids.assert_not_called() @mock.patch.object(_test_class, '_get_db_obj_rbac_entries') @mock.patch.object(_test_class, '_get_tenants_with_shared_access_to_db_obj', return_value=['some_other_tenant']) @mock.patch.object(_test_class, 'get_bound_tenant_ids', return_value={'fake_id1'}) def test_validate_rbac_policy_delete_fails_single_used_wildcarded( self, get_bound_tenant_ids_mock, mock_tenants_with_shared_access, _get_db_obj_rbac_entries_mock): policy = {'action': rbac_db_models.ACCESS_SHARED, 'target_tenant': '*', 'tenant_id': 'object_owner_tenant_id', 'object_id': 'fake_obj_id'} context = mock.Mock() payload = events.DBEventPayload( context, states=(policy,), metadata={ 'object_type': self._test_class.rbac_db_cls.db_model.object_type}) with mock.patch.object(obj_db_api, 'get_object'): self.assertRaises( ext_rbac.RbacPolicyInUse, self._test_class.validate_rbac_policy_delete, resource=mock.Mock(), event=events.BEFORE_DELETE, trigger='dummy_trigger', payload=payload) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=['fake_rbac_policy']) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_avoid_duplicate_update( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' obj = self._test_class(mock.Mock()) obj.update_shared(is_shared_new=True, obj_id=obj_id) get_object_mock.assert_called_with( obj.rbac_db_cls, mock.ANY, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) self.assertFalse(mock_validate_delete.called) self.assertFalse(attach_rbac_mock.called) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=[]) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_wildcard( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' test_neutron_obj = self._test_class(mock.Mock()) test_neutron_obj.update_shared(is_shared_new=True, obj_id=obj_id) get_object_mock.assert_called_with( test_neutron_obj.rbac_db_cls, mock.ANY, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) attach_rbac_mock.assert_called_with( obj_id, test_neutron_obj.obj_context.tenant_id) def test_shared_field_false_without_context(self): test_neutron_obj = self._test_class() self.assertFalse(test_neutron_obj.to_dict()['shared']) @mock.patch.object(_test_class, 'attach_rbac') @mock.patch.object(obj_db_api, 'get_object', return_value=['fake_rbac_policy']) @mock.patch.object(_test_class, '_validate_rbac_policy_delete') def test_update_shared_remove_wildcard_sharing( self, mock_validate_delete, get_object_mock, attach_rbac_mock): obj_id = 'fake_obj_id' obj = self._test_class(mock.Mock()) obj.update_shared(is_shared_new=False, obj_id=obj_id) get_object_mock.assert_called_with( obj.rbac_db_cls, mock.ANY, object_id=obj_id, target_tenant='*', action=rbac_db_models.ACCESS_SHARED) self.assertFalse(attach_rbac_mock.attach_rbac.called) mock_validate_delete.assert_called_with(mock.ANY, obj_id, '*') @mock.patch.object(_test_class, 'create_rbac_policy') def test_attach_rbac_returns_type(self, create_rbac_mock): obj_id = 'fake_obj_id' tenant_id = 'fake_tenant_id' target_tenant = 'fake_target_tenant' self._test_class(mock.Mock()).attach_rbac(obj_id, tenant_id, target_tenant) rbac_pol = create_rbac_mock.call_args_list[0][0][1]['rbac_policy'] self.assertEqual(rbac_pol['object_id'], obj_id) self.assertEqual(rbac_pol['target_tenant'], target_tenant) self.assertEqual(rbac_pol['action'], rbac_db_models.ACCESS_SHARED) self.assertEqual(rbac_pol['object_type'], self._test_class.rbac_db_cls.db_model.object_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_router.py0000644000175000017500000001355400000000000025350 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from neutron.objects import router from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class RouterRouteIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = router.RouterRoute class RouterRouteDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.RouterRoute def setUp(self): super(RouterRouteDbObjectTestCase, self).setUp() self.update_obj_fields( {'router_id': lambda: self._create_test_router_id()}) class RouterExtraAttrsIfaceObjTestCase(obj_test_base. BaseObjectIfaceTestCase): _test_class = router.RouterExtraAttributes class RouterExtraAttrsDbObjTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.RouterExtraAttributes def setUp(self): super(RouterExtraAttrsDbObjTestCase, self).setUp() self.update_obj_fields( {'router_id': lambda: self._create_test_router_id()}) class RouterIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = router.Router class RouterDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.Router def setUp(self): super(RouterDbObjectTestCase, self).setUp() self.update_obj_fields( {'gw_port_id': lambda: self._create_test_port_id(), 'flavor_id': lambda: self._create_test_flavor_id()}) def _create_router(self, router_id, gw_port_id, project_id): r = router.Router(self.context, id=router_id, gw_port_id=gw_port_id, project_id=project_id) r.create() def test_check_routers_not_owned_by_projects(self): for obj in self.obj_fields: self._create_router(router_id=obj['id'], gw_port_id=obj['gw_port_id'], project_id=obj['project_id']) obj = self.obj_fields[0] gw_port = obj['gw_port_id'] project = obj['project_id'] new_project = project gw_port_no_match = uuidutils.generate_uuid() project_no_match = uuidutils.generate_uuid() new_project_no_match = uuidutils.generate_uuid() # Check router match with gw_port BUT no projects router_exist = router.Router.check_routers_not_owned_by_projects( self.context, [gw_port], [project_no_match, new_project_no_match]) self.assertTrue(router_exist) # Check router doesn't match with gw_port router_exist = router.Router.check_routers_not_owned_by_projects( self.context, [gw_port_no_match], [project]) self.assertFalse(router_exist) # Check router match with gw_port AND project router_exist = router.Router.check_routers_not_owned_by_projects( self.context, [gw_port], [project, new_project_no_match]) self.assertFalse(router_exist) # Check router match with gw_port AND new project router_exist = router.Router.check_routers_not_owned_by_projects( self.context, [gw_port], [project_no_match, new_project]) self.assertFalse(router_exist) # Check router match with gw_port AND project AND new project router_exist = router.Router.check_routers_not_owned_by_projects( self.context, [gw_port], [project, new_project]) self.assertFalse(router_exist) class RouterPortIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = router.RouterPort class RouterPortDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.RouterPort def setUp(self): super(RouterPortDbObjectTestCase, self).setUp() self.update_obj_fields( {'router_id': lambda: self._create_test_router_id(), 'port_id': lambda: self._create_test_port_id()}) class DVRMacAddressIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = router.DVRMacAddress class DVRMacAddressDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.DVRMacAddress class FloatingIPIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = router.FloatingIP class FloatingIPDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = router.FloatingIP def setUp(self): super(FloatingIPDbObjectTestCase, self).setUp() self.update_obj_fields( {'floating_port_id': lambda: self._create_test_port_id(), 'fixed_port_id': lambda: self._create_test_port_id(), 'router_id': lambda: self._create_test_router_id()}) class DvrFipGatewayPortAgentBindingTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = router.DvrFipGatewayPortAgentBinding ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_securitygroup.py0000644000175000017500000002212600000000000026747 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools from oslo_utils import uuidutils from neutron.objects import securitygroup from neutron.tests.unit.objects import test_base from neutron.tests.unit.objects import test_rbac from neutron.tests.unit import testlib_api class SecurityGroupRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = securitygroup.SecurityGroupRBAC def setUp(self): super(SecurityGroupRBACDbObjectTestCase, self).setUp() for obj in self.db_objs: sg_obj = securitygroup.SecurityGroup(self.context, id=obj['object_id'], project_id=obj['project_id']) sg_obj.create() def _create_test_security_group_rbac(self): self.objs[0].create() return self.objs[0] def test_object_version_degradation_1_1_to_1_0_no_shared(self): security_group_rbac_obj = self._create_test_security_group_rbac() x = security_group_rbac_obj.obj_to_primitive('1.0') security_group_rbac_dict = x self.assertNotIn('shared', security_group_rbac_dict['versioned_object.data']) class SecurityGroupRBACIfaceObjectTestCase(test_rbac.TestRBACObjectMixin, test_base.BaseObjectIfaceTestCase): _test_class = securitygroup.SecurityGroupRBAC class SecurityGroupIfaceObjTestCase(test_rbac.RBACBaseObjectIfaceTestCase): _test_class = securitygroup.SecurityGroup class SecurityGroupDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = securitygroup.SecurityGroup def setUp(self): super(SecurityGroupDbObjTestCase, self).setUp() # TODO(ihrachys): consider refactoring base test class to set None for # all nullable fields for db_obj in self.db_objs: for rule in db_obj['rules']: # we either make it null, or create remote groups for each rule # generated; we picked the former here rule['remote_group_id'] = None def _create_test_security_group(self): self.objs[0].create() return self.objs[0] def test_object_version_degradation_1_2_to_1_1_no_stateful(self): sg_stateful_obj = self._create_test_security_group() sg_no_stateful_obj = sg_stateful_obj.obj_to_primitive('1.1') self.assertNotIn('stateful', sg_no_stateful_obj['versioned_object.data']) def test_is_default_True(self): fields = self.obj_fields[0].copy() sg_obj = self._make_object(fields) sg_obj.is_default = True sg_obj.create() default_sg_obj = securitygroup.DefaultSecurityGroup.get_object( self.context, project_id=sg_obj.project_id, security_group_id=sg_obj.id) self.assertIsNotNone(default_sg_obj) sg_obj = securitygroup.SecurityGroup.get_object( self.context, id=sg_obj.id, project_id=sg_obj.project_id ) self.assertTrue(sg_obj.is_default) def test_is_default_False(self): fields = self.obj_fields[0].copy() sg_obj = self._make_object(fields) sg_obj.is_default = False sg_obj.create() default_sg_obj = securitygroup.DefaultSecurityGroup.get_object( self.context, project_id=sg_obj.project_id, security_group_id=sg_obj.id) self.assertIsNone(default_sg_obj) sg_obj = securitygroup.SecurityGroup.get_object( self.context, id=sg_obj.id, project_id=sg_obj.project_id ) self.assertFalse(sg_obj.is_default) def test_get_object_filter_by_is_default(self): fields = self.obj_fields[0].copy() sg_obj = self._make_object(fields) sg_obj.is_default = True sg_obj.create() listed_obj = securitygroup.SecurityGroup.get_object( self.context, id=sg_obj.id, project_id=sg_obj.project_id, is_default=True ) self.assertIsNotNone(listed_obj) self.assertEqual(sg_obj, listed_obj) def test_get_objects_queries_constant(self): # TODO(electrocucaracha) SecurityGroup is using SecurityGroupRule # object to reload rules, which costs extra SQL query each time # is_default field is loaded as part of get_object(s). SecurityGroup # has defined relationship for SecurityGroupRules, so it should be # possible to reuse side loaded values fo this. To be reworked in # follow-up patch. pass def test_get_object_no_synth(self): fields = self.obj_fields[0].copy() sg_obj = self._make_object(fields) sg_obj.is_default = True sg_obj.create() listed_obj = securitygroup.SecurityGroup.get_object( self.context, fields=['id', 'name'], id=sg_obj.id, project_id=sg_obj.project_id ) self.assertIsNotNone(listed_obj) self.assertEqual(len(sg_obj.rules), 0) self.assertIsNone(listed_obj.rules) def test_get_objects_no_synth(self): fields = self.obj_fields[0].copy() sg_obj = self._make_object(fields) sg_obj.is_default = True sg_obj.create() listed_objs = securitygroup.SecurityGroup.get_objects( self.context, fields=['id', 'name'], id=sg_obj.id, project_id=sg_obj.project_id ) self.assertEqual(len(listed_objs), 1) self.assertEqual(len(sg_obj.rules), 0) self.assertIsNone(listed_objs[0].rules) class DefaultSecurityGroupIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = securitygroup.DefaultSecurityGroup class DefaultSecurityGroupDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = securitygroup.DefaultSecurityGroup def setUp(self): super(DefaultSecurityGroupDbObjTestCase, self).setUp() self.update_obj_fields( { 'security_group_id': lambda: self._create_test_security_group_id() }) class SecurityGroupRuleIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): _test_class = securitygroup.SecurityGroupRule class SecurityGroupRuleDbObjTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = securitygroup.SecurityGroupRule def setUp(self): super(SecurityGroupRuleDbObjTestCase, self).setUp() self.update_obj_fields( { 'security_group_id': lambda: self._create_test_security_group_id(), 'remote_group_id': lambda: self._create_test_security_group_id() }) def test_get_security_group_rule_ids(self): """Retrieve the SG rules associated to a project (see method desc.) SG1 (PROJECT1) SG2 (PROJECT2) rule1a (PROJECT1) rule2a (PROJECT1) rule1b (PROJECT2) rule2b (PROJECT2) query PROJECT1: rule1a, rule1b, rule2a query PROJECT2: rule1b, rule2a, rule2b """ projects = [uuidutils.generate_uuid(), uuidutils.generate_uuid()] sgs = [ self._create_test_security_group_id({'project_id': projects[0]}), self._create_test_security_group_id({'project_id': projects[1]})] rules_per_project = collections.defaultdict(list) rules_per_sg = collections.defaultdict(list) for project, sg in itertools.product(projects, sgs): sgrule_fields = self.get_random_object_fields( securitygroup.SecurityGroupRule) sgrule_fields['project_id'] = project sgrule_fields['security_group_id'] = sg rule = securitygroup.SecurityGroupRule(self.context, **sgrule_fields) rule.create() rules_per_project[project].append(rule.id) rules_per_sg[sg].append(rule.id) for idx in range(2): rule_ids = securitygroup.SecurityGroupRule.\ get_security_group_rule_ids(projects[idx]) rule_ids_ref = set(rules_per_project[projects[idx]]) rule_ids_ref.update(set(rules_per_sg[sgs[idx]])) self.assertEqual(rule_ids_ref, set(rule_ids)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_servicetype.py0000644000175000017500000000215400000000000026364 0ustar00coreycorey00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import servicetype from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class ProviderResourceAssociationIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = servicetype.ProviderResourceAssociation class ProviderResourceAssociationDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = servicetype.ProviderResourceAssociation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_subnet.py0000644000175000017500000002750600000000000025332 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from oslo_utils import uuidutils from neutron.db import rbac_db_models from neutron.objects import base as obj_base from neutron.objects.db import api as obj_db_api from neutron.objects import network as net_obj from neutron.objects import rbac_db from neutron.objects import subnet from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class IPAllocationPoolObjectIfaceTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.IPAllocationPool class IPAllocationPoolDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.IPAllocationPool def setUp(self): super(IPAllocationPoolDbObjectTestCase, self).setUp() self.update_obj_fields( {'subnet_id': lambda: self._create_test_subnet_id()}) class DNSNameServerObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.DNSNameServer def setUp(self): super(DNSNameServerObjectIfaceTestCase, self).setUp() self.pager_map[self._test_class.obj_name()] = ( obj_base.Pager(sorts=[('order', True)])) class DNSNameServerDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.DNSNameServer def setUp(self): super(DNSNameServerDbObjectTestCase, self).setUp() self._subnet_id = self._create_test_subnet_id() self.update_obj_fields({'subnet_id': self._subnet_id}) def _create_dnsnameservers(self): for obj in self.obj_fields: dns = self._make_object(obj) dns.create() def test_get_objects_sort_by_order_asc(self): self._create_dnsnameservers() objs = self._test_class.get_objects(self.context) fields_sorted = sorted([obj['order'] for obj in self.obj_fields]) self.assertEqual(fields_sorted, [obj.order for obj in objs]) def test_get_objects_sort_by_order_desc(self): self._create_dnsnameservers() pager = obj_base.Pager(sorts=[('order', False)]) objs = self._test_class.get_objects(self.context, _pager=pager, subnet_id=self._subnet_id) fields_sorted = sorted([obj['order'] for obj in self.obj_fields], reverse=True) self.assertEqual(fields_sorted, [obj.order for obj in objs]) def test_get_objects_sort_by_address_asc_using_pager(self): self._create_dnsnameservers() pager = obj_base.Pager(sorts=[('address', True)]) objs = self._test_class.get_objects(self.context, _pager=pager) fields_sorted = sorted([obj['address'] for obj in self.obj_fields]) self.assertEqual(fields_sorted, [obj.address for obj in objs]) class RouteObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.Route class RouteDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.Route def setUp(self): super(RouteDbObjectTestCase, self).setUp() self.update_obj_fields( {'subnet_id': lambda: self._create_test_subnet_id()}) class SubnetServiceTypeObjectIfaceTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.SubnetServiceType class SubnetServiceTypeDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.SubnetServiceType def setUp(self): super(SubnetServiceTypeDbObjectTestCase, self).setUp() self.update_obj_fields( {'subnet_id': lambda: self._create_test_subnet_id()}) class SubnetObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.Subnet def setUp(self): super(SubnetObjectIfaceTestCase, self).setUp() self.pager_map[subnet.DNSNameServer.obj_name()] = ( obj_base.Pager(sorts=[('order', True)])) # Base class will mock those out only when rbac_db_model is set for the # object. Since subnets don't have their own models but only derive # shared value from networks, we need to unconditionally mock those # entry points out here, otherwise they will trigger database access, # which is not allowed in 'Iface' test classes. mock.patch.object( rbac_db.RbacNeutronDbObjectMixin, 'is_shared_with_tenant', return_value=False).start() mock.patch.object( rbac_db.RbacNeutronDbObjectMixin, 'get_shared_with_tenant').start() class SubnetDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.Subnet CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' def setUp(self): super(SubnetDbObjectTestCase, self).setUp() # set up plugin because some models used here require a plugin # (specifically, rbac models and their get_valid_actions validators) self.setup_coreplugin(self.CORE_PLUGIN) network_id = self._create_test_network_id() self.update_obj_fields( {'network_id': network_id, 'segment_id': lambda: self._create_test_segment_id(network_id)}) def test_get_dns_nameservers_in_order(self): obj = self._make_object(self.obj_fields[0]) obj.create() dns_nameservers = [(2, '1.2.3.4'), (1, '5.6.7.8'), (4, '7.7.7.7')] for order, address in dns_nameservers: dns = subnet.DNSNameServer(self.context, order=order, address=address, subnet_id=obj.id) dns.create() new = self._test_class.get_object(self.context, id=obj.id) self.assertEqual(1, new.dns_nameservers[0].order) self.assertEqual(2, new.dns_nameservers[1].order) self.assertEqual(4, new.dns_nameservers[-1].order) def _create_shared_network_rbac_entry(self, network): attrs = { 'object_id': network['id'], 'target_tenant': '*', 'action': rbac_db_models.ACCESS_SHARED } obj_db_api.create_object(net_obj.NetworkRBAC, self.context, attrs) def test_get_subnet_shared_true(self): network = self._create_test_network() self._create_shared_network_rbac_entry(network) subnet_data = dict(self.obj_fields[0]) subnet_data['network_id'] = network['id'] obj = self._make_object(subnet_data) # check if shared will be load by 'obj_load_attr' and using extra query # by RbacNeutronDbObjectMixin get_shared_with_tenant self.assertTrue(obj.shared) obj.create() # here the shared should be load by is_network_shared self.assertTrue(obj.shared) new = self._test_class.get_object(self.context, **obj._get_composite_keys()) # again, the shared should be load by is_network_shared self.assertTrue(new.shared) def test_filter_by_shared(self): network = self._create_test_network() self._create_shared_network_rbac_entry(network) subnet_data = dict(self.obj_fields[0]) subnet_data['network_id'] = network['id'] obj = self._make_object(subnet_data) obj.create() result = self._test_class.get_objects(self.context, shared=True) self.assertEqual(obj, result[0]) def test_get_shared_subnet_with_another_tenant(self): network_shared = self._create_test_network() self._create_shared_network_rbac_entry(network_shared) subnet_data = dict(self.obj_fields[0]) subnet_data['network_id'] = network_shared['id'] shared_subnet = self._make_object(subnet_data) shared_subnet.create() priv_subnet = self._make_object(self.obj_fields[1]) priv_subnet.create() # Situation here: # - we have one network with a subnet that are private # - shared network with its subnet # creating new context, user should have access to one shared network all_subnets = self._test_class.get_objects(self.context) self.assertEqual(2, len(all_subnets)) # access with new tenant_id, should be able to access to one subnet new_ctx = context.Context('', uuidutils.generate_uuid()) public_subnets = self._test_class.get_objects(new_ctx) self.assertEqual([shared_subnet], public_subnets) # test get_object to fetch the private and then the shared subnet fetched_private_subnet = self._test_class.get_object(new_ctx, id=priv_subnet.id) self.assertIsNone(fetched_private_subnet) fetched_public_subnet = ( self._test_class.get_object(new_ctx, id=shared_subnet.id)) self.assertEqual(shared_subnet, fetched_public_subnet) def test_get_service_types(self): obj = self._make_object(self.obj_fields[0]) obj.create() service_type_obj = subnet.SubnetServiceType( self.context, subnet_id=obj.id, service_type='dhcp-agent') service_type_obj.create() listed_obj = subnet.Subnet.get_object(self.context, id=obj.id) self.assertEqual([service_type_obj.service_type], listed_obj.service_types) # Try to load the service_types by obj_load_attr obj1 = self._make_object(self.obj_fields[0]) self.assertEqual([service_type_obj.service_type], obj1.service_types) class NetworkSubnetLockTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = subnet.NetworkSubnetLock class NetworkSubnetLockDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = subnet.NetworkSubnetLock def setUp(self): super(NetworkSubnetLockDbObjectTestCase, self).setUp() self.update_obj_fields( {'network_id': lambda: self._create_test_network_id()}) def test_lock_subnet_update(self): obj = self._make_object(self.obj_fields[0]) obj.create() subnet_id = self._create_test_subnet_id(network_id=obj.network_id) subnet.NetworkSubnetLock.lock_subnet(self.context, obj.network_id, subnet_id) obj = subnet.NetworkSubnetLock.get_object(self.context, network_id=obj.network_id) self.assertEqual(subnet_id, obj.subnet_id) def test_lock_subnet_create(self): network_id = self._create_test_network_id() subnet_id = self._create_test_subnet_id(network_id=network_id) obj = subnet.NetworkSubnetLock.get_object(self.context, network_id=network_id) self.assertIsNone(obj) subnet.NetworkSubnetLock.lock_subnet(self.context, network_id, subnet_id) obj = subnet.NetworkSubnetLock.get_object(self.context, network_id=network_id) self.assertEqual(network_id, obj.network_id) self.assertEqual(subnet_id, obj.subnet_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_subnetpool.py0000644000175000017500000001676200000000000026226 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib.db import model_query from oslo_utils import uuidutils from neutron.extensions import rbac as ext_rbac from neutron.objects.db import api as obj_db_api from neutron.objects import subnetpool from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit.objects import test_rbac from neutron.tests.unit import testlib_api class SubnetPoolTestMixin(object): def _create_test_subnetpool(self, snp_id=None): if not snp_id: snp_id = uuidutils.generate_uuid() obj = subnetpool.SubnetPool( self.context, id=snp_id, ip_version=constants.IP_VERSION_4, default_prefixlen=24, min_prefixlen=0, max_prefixlen=32, shared=False) obj.create() return obj class SubnetPoolIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = subnetpool.SubnetPool class SubnetPoolDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, SubnetPoolTestMixin): _test_class = subnetpool.SubnetPool def test_subnetpool_prefixes(self): pool = self._create_test_subnetpool() prefixes = obj_test_base.get_list_of_random_networks() pool.prefixes = prefixes pool.update() new_pool = self._test_class.get_object(self.context, id=pool.id) self.assertItemsEqual(prefixes, new_pool.prefixes) prefixes.pop() pool.prefixes = prefixes pool.update() new_pool = self._test_class.get_object(self.context, id=pool.id) self.assertItemsEqual(prefixes, new_pool.prefixes) def test_get_objects_queries_constant(self): # TODO(korzen) SubnetPool is using SubnetPoolPrefix object to reload # prefixes, which costs extra SQL query each time reload_prefixes # are called in get_object(s). SubnetPool has defined relationship # for SubnetPoolPrefixes, so it should be possible to reuse side loaded # values fo this. To be reworked in follow-up patch. pass @mock.patch.object(model_query, 'query_with_hooks') @mock.patch.object(obj_db_api, 'get_object') def test_rbac_policy_create_no_address_scope(self, mock_get_object, mock_query_with_hooks): context = mock.Mock(is_admin=False, tenant_id='db_obj_owner_id') payload = mock.Mock( context=context, request_body=dict(object_id="fake_id") ) mock_get_object.return_value = dict(address_scope_id=None) subnetpool.SubnetPool.validate_rbac_policy_create( None, None, None, payload=payload ) mock_query_with_hooks.assert_not_called() def _validate_rbac_filter_mock(self, filter_mock, project_id, address_scope_id): filter_mock.assert_called_once() self.assertEqual( "addressscoperbacs.target_tenant IN ('*', '%(project_id)s') " "AND addressscoperbacs.object_id = '%(address_scope_id)s'" % { "project_id": project_id, "address_scope_id": address_scope_id, }, filter_mock.call_args[0][0].compile( compile_kwargs={"literal_binds": True} ).string ) @mock.patch.object(model_query, 'query_with_hooks') @mock.patch.object(obj_db_api, 'get_object') def test_rbac_policy_create_no_matching_policies(self, mock_get_object, mock_query_with_hooks): context = mock.Mock(is_admin=False, tenant_id='db_obj_owner_id') fake_project_id = "fake_target_tenant_id" payload = mock.Mock( context=context, request_body=dict( object_id="fake_id", target_tenant=fake_project_id ) ) fake_address_scope_id = "fake_as_id" mock_get_object.return_value = dict( address_scope_id=fake_address_scope_id ) filter_mock = mock.Mock( return_value=mock.Mock(count=mock.Mock(return_value=0)) ) mock_query_with_hooks.return_value = mock.Mock(filter=filter_mock) self.assertRaises( ext_rbac.RbacPolicyInitError, subnetpool.SubnetPool.validate_rbac_policy_create, resource=None, event=None, trigger=None, payload=payload ) self._validate_rbac_filter_mock( filter_mock, fake_project_id, fake_address_scope_id ) @mock.patch.object(model_query, 'query_with_hooks') @mock.patch.object(obj_db_api, 'get_object') def test_rbac_policy_create_valid(self, mock_get_object, mock_query_with_hooks): context = mock.Mock(is_admin=False, tenant_id='db_obj_owner_id') fake_project_id = "fake_target_tenant_id" payload = mock.Mock( context=context, request_body=dict( object_id="fake_id", target_tenant=fake_project_id ) ) fake_address_scope_id = "fake_as_id" mock_get_object.return_value = dict( address_scope_id=fake_address_scope_id ) filter_mock = mock.Mock(count=1) mock_query_with_hooks.return_value = mock.Mock(filter=filter_mock) subnetpool.SubnetPool.validate_rbac_policy_create( None, None, None, payload=payload ) self._validate_rbac_filter_mock( filter_mock, fake_project_id, fake_address_scope_id ) class SubnetPoolPrefixIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): _test_class = subnetpool.SubnetPoolPrefix class SubnetPoolPrefixDbObjectTestCase( obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, SubnetPoolTestMixin): _test_class = subnetpool.SubnetPoolPrefix def setUp(self): super(SubnetPoolPrefixDbObjectTestCase, self).setUp() self.update_obj_fields( {'subnetpool_id': lambda: self._create_test_subnetpool().id}) class SubnetPoolRBACDbObjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, SubnetPoolTestMixin): _test_class = subnetpool.SubnetPoolRBAC def setUp(self): super(SubnetPoolRBACDbObjectTestCase, self).setUp() for obj in self.db_objs: self._create_test_subnetpool(obj['object_id']) def _create_test_subnetpool_rbac(self): self.objs[0].create() return self.objs[0] class SubnetPoolRBACIfaceObjectTestCase(test_rbac.TestRBACObjectMixin, obj_test_base.BaseObjectIfaceTestCase): _test_class = subnetpool.SubnetPoolRBAC ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_tag.py0000644000175000017500000000225100000000000024573 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.objects import tag from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api class TagIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): _test_class = tag.Tag class TagDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = tag.Tag def setUp(self): super(TagDbObjectTestCase, self).setUp() self.update_obj_fields( { 'standard_attr_id': lambda: self._create_test_standard_attribute_id() }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/objects/test_trunk.py0000644000175000017500000001621300000000000025166 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import mock from neutron_lib import exceptions as n_exc from neutron_lib.services.trunk import constants from oslo_db import exception as obj_exc from oslo_utils import uuidutils from neutron.objects.db import api as obj_db_api from neutron.objects import trunk as t_obj from neutron.services.trunk import exceptions as t_exc from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class SubPortObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = t_obj.SubPort def test_create_duplicates(self): with mock.patch.object(obj_db_api, 'create_object', side_effect=obj_exc.DBDuplicateEntry): obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(t_exc.DuplicateSubPort, obj.create) class SubPortDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = t_obj.SubPort def setUp(self): super(SubPortDbObjectTestCase, self).setUp() self._network_id = self._create_test_network_id() for obj in self.obj_fields: self._create_test_port( id=obj['port_id'], network_id=self._network_id) self._create_trunk(trunk_id=obj['trunk_id']) def _create_trunk(self, trunk_id): port_id = self._create_test_port_id(network_id=self._network_id) trunk = t_obj.Trunk(self.context, id=trunk_id, port_id=port_id) trunk.create() def test_create_port_not_found(self): obj = self.obj_fields[0] obj['port_id'] = uuidutils.generate_uuid() sub_port = self._make_object(obj) self.assertRaises(n_exc.PortNotFound, sub_port.create) def test_create_trunk_not_found(self): obj = self.obj_fields[0] obj['trunk_id'] = uuidutils.generate_uuid() sub_port = self._make_object(obj) self.assertRaises(t_exc.TrunkNotFound, sub_port.create) class TrunkObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = t_obj.Trunk class TrunkDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): _test_class = t_obj.Trunk def setUp(self): super(TrunkDbObjectTestCase, self).setUp() self._network_id = self._create_test_network_id() sub_ports = [] for obj in self.db_objs: sub_ports.extend(obj['sub_ports']) for obj in itertools.chain(self.obj_fields, sub_ports): self._create_test_port( id=obj['port_id'], network_id=self._network_id) def test_create_port_not_found(self): obj = self.obj_fields[0] obj['port_id'] = uuidutils.generate_uuid() trunk = self._make_object(obj) self.assertRaises(n_exc.PortNotFound, trunk.create) def _test_create_trunk_with_subports(self, port_id, vids): project_id = uuidutils.generate_uuid() sub_ports = [] for vid in vids: vid_port_id = self._create_test_port_id( network_id=self._network_id) sub_ports.append(t_obj.SubPort( self.context, port_id=vid_port_id, segmentation_type='vlan', segmentation_id=vid)) trunk = t_obj.Trunk( self.context, port_id=port_id, sub_ports=sub_ports, project_id=project_id) trunk.create() self.assertEqual(sub_ports, trunk.sub_ports) return trunk def test_create_with_sub_ports(self): trunk = self._test_create_trunk_with_subports( self.db_objs[0]['port_id'], [1, 2]) def _as_tuple(sub_port): return (sub_port['port_id'], sub_port['segmentation_type'], sub_port['segmentation_id']) expected = {_as_tuple(port) for port in trunk.sub_ports} sub_ports = t_obj.SubPort.get_objects(self.context, trunk_id=trunk.id) self.assertEqual(expected, {_as_tuple(port) for port in sub_ports}) def test_get_object_includes_correct_subports(self): trunk1_vids = [1, 2, 3] trunk2_vids = [4, 5, 6] port_id1 = self.db_objs[0]['port_id'] trunk1 = self._test_create_trunk_with_subports(port_id1, trunk1_vids) port_id2 = uuidutils.generate_uuid() self._create_test_port( id=port_id2, network_id=self._network_id) self._test_create_trunk_with_subports(port_id2, trunk2_vids) listed_trunk1 = t_obj.Trunk.get_object( self.context, id=trunk1.id, port_id=port_id1 ) self.assertEqual( set(trunk1_vids), {sp.segmentation_id for sp in listed_trunk1.sub_ports} ) def test_update_multiple_fields(self): trunk = t_obj.Trunk(context=self.context, admin_state_up=False, port_id=self.db_objs[0]['port_id'], status=constants.TRUNK_DOWN_STATUS) trunk.create() fields = {'admin_state_up': True, 'status': constants.TRUNK_ACTIVE_STATUS} trunk.update(**fields) trunk = t_obj.Trunk.get_object(self.context, id=trunk.id) self._assert_trunk_attrs(trunk, **fields) def _assert_trunk_attrs(self, trunk, **kwargs): """Check the values passed in kwargs match the values of the trunk""" for k in trunk.fields: if k in kwargs: self.assertEqual(kwargs[k], trunk[k]) def test_v1_1_to_v1_0_drops_project_id(self): trunk_new = self._test_create_trunk_with_subports( self.db_objs[0]['port_id'], [1, 2]) trunk_v1_0 = trunk_new.obj_to_primitive(target_version='1.0') self.assertNotIn('project_id', trunk_v1_0['versioned_object.data']) self.assertIn('tenant_id', trunk_v1_0['versioned_object.data']) def test_get_objects_tenant_id(self): trunk = t_obj.Trunk(context=self.context, project_id='faketenant', port_id=self.db_objs[0]['port_id']) trunk.create() self.assertIsNotNone( t_obj.Trunk.get_objects(self.context, tenant_id='faketenant')) def test_get_objects_both_tenant_and_project_ids(self): trunk = t_obj.Trunk(context=self.context, project_id='faketenant', port_id=self.db_objs[0]['port_id']) trunk.create() self.assertIsNotNone( t_obj.Trunk.get_objects( self.context, tenant_id='faketenant', project_id='faketenant')) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/pecan_wsgi/0000755000175000017500000000000000000000000023075 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/pecan_wsgi/test_app.py0000644000175000017500000000157400000000000025275 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import pecan_wsgi from neutron.tests import base @mock.patch('neutron.pecan_wsgi.app.versions_factory') class TestVersionsFactory(base.BaseTestCase): def test_versions_factory(self, pecan_mock): pecan_wsgi.app.versions_factory({}) pecan_mock.assert_called_once_with({}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.455046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/0000755000175000017500000000000000000000000022437 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/__init__.py0000644000175000017500000000000000000000000024536 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.459046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/0000755000175000017500000000000000000000000023131 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/__init__.py0000644000175000017500000000000000000000000025230 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/_test_mech_agent.py0000644000175000017500000003073300000000000027001 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_config import fixture as config_fixture from neutron_lib.api.definitions import portbindings from neutron_lib.plugins.ml2 import api from neutron.tests import base NETWORK_ID = "fake_network" PORT_ID = "fake_port" class FakeNetworkContext(api.NetworkContext): def __init__(self, segments): self._network_segments = segments @property def current(self): return {'id': NETWORK_ID} @property def original(self): return None @property def network_segments(self): return self._network_segments class FakePortContext(api.PortContext): def __init__(self, agent_type, agents, segments, vnic_type=portbindings.VNIC_NORMAL, original=None, profile=None): self._agent_type = agent_type self._agents = agents self._network_context = FakeNetworkContext(segments) self._bound_vnic_type = vnic_type self._bound_profile = profile self._bound_segment_id = None self._bound_vif_type = None self._bound_vif_details = None self._original = original self._binding_levels = [] @property def current(self): current_data = {'id': PORT_ID, portbindings.VNIC_TYPE: self._bound_vnic_type, portbindings.PROFILE: self._bound_profile} ret_value = current_data if self._original: ret_value = copy.deepcopy(self.original) ret_value.update(current_data) return ret_value @property def original(self): return self._original @property def status(self): return 'DOWN' @property def original_status(self): return None @property def network(self): return self._network_context def _prepare_to_bind(self, segments_to_bind): self._segments_to_bind = segments_to_bind self._new_bound_segment = None self._next_segments_to_bind = None def _push_binding_level(self, binding_level): self._binding_levels.append(binding_level) def _pop_binding_level(self): return self._binding_levels.pop() @property def binding_levels(self): if self._binding_levels: return [{ api.BOUND_DRIVER: level.driver, api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._binding_levels] @property def original_binding_levels(self): return None @property def top_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[0].segment_id) @property def original_top_bound_segment(self): return None @property def bottom_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[-1].segment_id) @property def original_bottom_bound_segment(self): return None def _expand_segment(self, segment_id): for segment in self._network_context.network_segments: if segment[api.ID] == self._bound_segment_id: return segment @property def host(self): return '' @property def original_host(self): return None @property def vif_type(self): return portbindings.UNBOUND @property def original_vif_type(self): return portbindings.UNBOUND @property def vif_details(self): return None @property def original_vif_details(self): return None @property def segments_to_bind(self): return self._network_context.network_segments def host_agents(self, agent_type): if agent_type == self._agent_type: return self._agents else: return [] def set_binding(self, segment_id, vif_type, vif_details): self._bound_segment_id = segment_id self._bound_vif_type = vif_type self._bound_vif_details = vif_details def continue_binding(self, segment_id, next_segments_to_bind): pass def allocate_dynamic_segment(self, segment): pass def release_dynamic_segment(self, segment_id): pass class MechDriverConfFixture(config_fixture.Config): def __init__(self, conf=cfg.CONF, blacklist_cfg=None, registration_func=None): """ConfigFixture for vnic_type_blacklist :param conf: The driver configuration object :param blacklist_cfg: A dictionary in the form {'group': {'opt': 'value'}}, i.e.: {'OVS_DRIVER': {'vnic_type_blacklist': ['foo']}} :param registration_func: The method which do the config group's registration. """ super(MechDriverConfFixture, self).__init__(conf) self.blacklist_cfg = blacklist_cfg self.registration_func = registration_func def setUp(self): super(MechDriverConfFixture, self).setUp() self.registration_func(self.conf) for group, option in self.blacklist_cfg.items(): self.config(group=group, **option) class AgentMechanismBaseTestCase(base.BaseTestCase): # The following must be overridden for the specific mechanism # driver being tested: VIF_TYPE = None VIF_DETAILS = None AGENT_TYPE = None AGENTS = None AGENTS_DEAD = None AGENTS_BAD = None VNIC_TYPE = portbindings.VNIC_NORMAL def _check_unbound(self, context): self.assertIsNone(context._bound_segment_id) self.assertIsNone(context._bound_vif_type) self.assertIsNone(context._bound_vif_details) def _check_bound(self, context, segment): self.assertEqual(context._bound_segment_id, segment[api.ID]) self.assertEqual(context._bound_vif_type, self.VIF_TYPE) vif_details = context._bound_vif_details self.assertIsNotNone(vif_details) # NOTE(r-mibu): The following five lines are just for backward # compatibility. In this class, HAS_PORT_FILTER has been replaced # by VIF_DETAILS which can be set expected vif_details to check, # but all replacement of HAS_PORT_FILTER in successor has not been # completed. if self.VIF_DETAILS is None: expected = getattr(self, 'CAP_PORT_FILTER', None) port_filter = vif_details[portbindings.CAP_PORT_FILTER] self.assertEqual(expected, port_filter) return self.assertEqual(self.VIF_DETAILS, vif_details) class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase): UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type', api.NETWORK_ID: 'fake_network_id'}] def test_unknown_type(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.UNKNOWN_TYPE_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) def test_driver_not_responsible_for_ports_allocation(self): agents = [ {'configurations': {'rp_bandwidths': {'eth0': {}}}, 'host': 'host'}, ] profile = {} segments = [] port_ctx = FakePortContext( self.AGENT_TYPE, agents, segments, vnic_type=portbindings.VNIC_DIRECT, profile=profile) self.assertFalse( self.driver.responsible_for_ports_allocation(port_ctx)) class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase): LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type', api.NETWORK_ID: 'fake_network_id'}, {api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local', api.NETWORK_ID: 'fake_network_id'}] def test_type_local(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.LOCAL_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.LOCAL_SEGMENTS[1]) def test_type_local_dead(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_DEAD, self.LOCAL_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase): FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type', api.NETWORK_ID: 'fake_network_id'}, {api.ID: 'flat_segment_id', api.NETWORK_TYPE: 'flat', api.PHYSICAL_NETWORK: 'fake_physical_network', api.NETWORK_ID: 'fake_network_id'}] def test_type_flat(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.FLAT_SEGMENTS[1]) def test_type_flat_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type', api.NETWORK_ID: 'fake_network_id'}, {api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234, api.NETWORK_ID: 'fake_network_id'}] def test_type_vlan(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.VLAN_SEGMENTS[1]) def test_type_vlan_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) class AgentMechanismGreTestCase(AgentMechanismBaseTestCase): GRE_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type', api.NETWORK_ID: 'fake_network_id'}, {api.ID: 'gre_segment_id', api.NETWORK_TYPE: 'gre', api.SEGMENTATION_ID: 1234, api.NETWORK_ID: 'fake_network_id'}] def test_type_gre(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, self.GRE_SEGMENTS) self.driver.bind_port(context) self._check_bound(context, self.GRE_SEGMENTS[1]) def test_type_gre_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.GRE_SEGMENTS) self.driver.bind_port(context) self._check_unbound(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/base.py0000644000175000017500000000336000000000000024417 0ustar00coreycorey00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.tests.unit.plugins.ml2 import test_plugin class ML2TestFramework(test_plugin.Ml2PluginV2TestCase): l3_plugin = ('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin') _mechanism_drivers = ['openvswitch'] def get_additional_service_plugins(self): p = super(ML2TestFramework, self).get_additional_service_plugins() p.update({'flavors_plugin_name': 'neutron.services.flavors.' 'flavors_plugin.FlavorsPlugin'}) return p def setUp(self): super(ML2TestFramework, self).setUp() self.core_plugin = directory.get_plugin() self.l3_plugin = directory.get_plugin(constants.L3) def _create_router(self, distributed=False, ha=False, admin_state_up=True): return self.l3_plugin.create_router( self.context, {'router': {'name': 'router', 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id, 'ha': ha, 'distributed': distributed}}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.459046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/0000755000175000017500000000000000000000000024607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/__init__.py0000644000175000017500000000000000000000000026706 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/agent/0000755000175000017500000000000000000000000025705 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/agent/__init__.py0000644000175000017500000000000000000000000030004 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/agent/test__agent_manager_base.py0000644000175000017500000000335700000000000033247 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.tests import base class RPCCallBackImpl(amb.CommonAgentManagerRpcCallBackBase): def security_groups_rule_updated(self, context, **kwargs): pass def security_groups_member_updated(self, context, **kwargs): pass class Test_CommonAgentManagerRpcCallBackBase(base.BaseTestCase): def setUp(self): super(Test_CommonAgentManagerRpcCallBackBase, self).setUp() self.rpc_callbacks = RPCCallBackImpl(None, None, None) def test_get_and_clear_updated_devices(self): updated_devices = ['tap1', 'tap2'] self.rpc_callbacks.updated_devices = updated_devices self.assertEqual(updated_devices, self.rpc_callbacks.get_and_clear_updated_devices()) self.assertEqual(set(), self.rpc_callbacks.updated_devices) def test_add_network(self): segment = amb.NetworkSegment('vlan', 'physnet1', 100) network_id = "foo" self.rpc_callbacks.add_network(network_id, segment) self.assertEqual(segment, self.rpc_callbacks.network_map[network_id]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py0000644000175000017500000006403600000000000032134 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import constants as agent_consts from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from oslo_config import cfg import testtools from neutron.agent.linux import bridge_lib from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca from neutron.tests import base LOCAL_IP = '192.168.0.33' LOCAL_IPV6 = '2001:db8:1::33' VXLAN_GROUPV6 = 'ff05::/120' PORT_1 = 'abcdef01-12ddssdfds-fdsfsd' DEVICE_1 = 'tapabcdef01-12' NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909' BRIDGE_MAPPING_VALUE = 'br-eth2' BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE} INTERFACE_MAPPINGS = {'physnet1': 'eth1'} FAKE_DEFAULT_DEV = mock.Mock() FAKE_DEFAULT_DEV.name = 'eth1' PORT_DATA = { "port_id": PORT_1, "device": DEVICE_1 } class TestCommonAgentLoop(base.BaseTestCase): def setUp(self): super(TestCommonAgentLoop, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') self.get_bridge_names_p = mock.patch.object(bridge_lib, 'get_bridge_names') self.get_bridge_names = self.get_bridge_names_p.start() self.get_bridge_names.return_value = ["br-int", "brq1"] manager = mock.Mock() manager.get_all_devices.return_value = [] manager.get_agent_configurations.return_value = {} manager.get_rpc_consumers.return_value = [] with mock.patch.object(ca.CommonAgentLoop, '_validate_manager_class'),\ mock.patch.object(ca.CommonAgentLoop, '_validate_rpc_endpoints'): self.agent = ca.CommonAgentLoop(manager, 0, 10, 'fake_agent', 'foo-binary') with mock.patch.object(self.agent, "daemon_loop"): self.agent.start() def test_treat_devices_removed_notify(self): handler = mock.Mock() registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_DELETE) devices = [DEVICE_1] self.agent.treat_devices_removed(devices) handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, payload=mock.ANY) def test_treat_devices_added_updated_notify(self): handler = mock.Mock() registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_UPDATE) agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': 'horse'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True agent.treat_devices_added_updated(set(['dev123'])) handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, payload=mock.ANY) payload = handler.mock_calls[0][2]['payload'] self.assertDictEqual(mock_details, payload.latest_state) self.assertEqual(mock_details['device'], payload.resource_id) def test_treat_devices_removed_with_existed_device(self): agent = self.agent agent.mgr.ensure_port_admin_state = mock.Mock() devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(ca.LOG, 'info') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(2, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertNotIn(PORT_DATA, agent.network_ports[NETWORK_ID]) def test_treat_devices_removed_with_not_existed_device(self): agent = self.agent devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.return_value = {'device': DEVICE_1, 'exists': False} with mock.patch.object(ca.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertNotIn(PORT_DATA, agent.network_ports[NETWORK_ID]) def test_treat_devices_removed_failed(self): agent = self.agent devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: fn_udd.side_effect = Exception() resync = agent.treat_devices_removed(devices) self.assertTrue(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertNotIn(PORT_DATA, agent.network_ports[NETWORK_ID]) def test_treat_devices_removed_failed_extension(self): agent = self.agent devices = [DEVICE_1] agent.network_ports[NETWORK_ID].append(PORT_DATA) with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter") as fn_rdf,\ mock.patch.object(agent.ext_manager, "delete_port") as ext_mgr_delete_port: ext_mgr_delete_port.side_effect = Exception() resync = agent.treat_devices_removed(devices) self.assertTrue(resync) self.assertTrue(fn_udd.called) self.assertTrue(fn_rdf.called) self.assertTrue(ext_mgr_delete_port.called) self.assertNotIn(PORT_DATA, agent.network_ports[NETWORK_ID]) def test_treat_devices_removed_delete_arp_spoofing(self): agent = self.agent agent._ensure_port_admin_state = mock.Mock() devices = [DEVICE_1] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd,\ mock.patch.object(agent.sg_agent, "remove_devices_filter"): fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(agent.mgr, 'delete_arp_spoofing_protection') as de_arp: agent.treat_devices_removed(devices) de_arp.assert_called_with(devices) def test__get_devices_locally_modified(self): new_ts = {1: 1000, 2: 2000, 3: 3000} old_ts = {1: 10, 2: 2000, 4: 900} # 3 and 4 are not returned because 3 is a new device and 4 is a # removed device self.assertEqual( set([1]), self.agent._get_devices_locally_modified(new_ts, old_ts)) def _test_scan_devices(self, previous, updated, fake_current, expected, sync, fake_ts_current=None): self.agent.mgr = mock.Mock() self.agent.mgr.get_all_devices.return_value = fake_current self.agent.mgr.get_devices_modified_timestamps.return_value = ( fake_ts_current or {}) self.agent.rpc_callbacks.get_and_clear_updated_devices.return_value =\ updated results = self.agent.scan_devices(previous, sync) self.assertEqual(expected, results) def test_scan_devices_no_changes(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_timestamp_triggers_updated_None_to_something(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {2: None}} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set([2]), 'added': set(), 'removed': set(), 'timestamps': {2: 1000}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False, fake_ts_current={2: 1000}) def test_scan_devices_timestamp_triggers_updated(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {2: 600}} fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set([2]), 'added': set(), 'removed': set(), 'timestamps': {2: 1000}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False, fake_ts_current={2: 1000}) def test_scan_devices_added_removed(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([3]), 'removed': set([1]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_removed_retried_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1]), 'timestamps': {}} fake_current = set([2, 3]) updated = set() expected = {'current': set([2, 3]), 'updated': set(), 'added': set([2, 3]), 'removed': set([1]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_vanished_removed_on_sync(self): previous = {'current': set([2, 3]), 'updated': set(), 'added': set(), 'removed': set([1]), 'timestamps': {}} # Device 2 disappeared. fake_current = set([3]) updated = set() # Device 1 should be retried. expected = {'current': set([3]), 'updated': set(), 'added': set([3]), 'removed': set([1, 2]), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_updated(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([1]) expected = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_non_existing(self): previous = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([3]) expected = {'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) def test_scan_devices_updated_deleted_concurrently(self): previous = { 'current': set([1, 2]), 'updated': set(), 'added': set(), 'removed': set(), 'timestamps': {} } # Device 2 disappeared. fake_current = set([1]) # Device 2 got an concurrent update via network_update updated = set([2]) expected = { 'current': set([1]), 'updated': set(), 'added': set(), 'removed': set([2]), 'timestamps': {} } self._test_scan_devices( previous, updated, fake_current, expected, sync=False ) def test_scan_devices_updated_on_sync(self): previous = {'current': set([1, 2]), 'updated': set([1]), 'added': set(), 'removed': set(), 'timestamps': {}} fake_current = set([1, 2]) updated = set([2]) expected = {'current': set([1, 2]), 'updated': set([1, 2]), 'added': set([1, 2]), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=True) def test_scan_devices_with_delete_arp_protection(self): previous = None fake_current = set([1, 2]) updated = set() expected = {'current': set([1, 2]), 'updated': set(), 'added': set([1, 2]), 'removed': set(), 'timestamps': {}} self._test_scan_devices(previous, updated, fake_current, expected, sync=False) self.agent.mgr.delete_unreferenced_arp_protection.assert_called_with( fake_current) def test_process_network_devices(self): agent = self.agent device_info = {'current': set(), 'added': set(['tap3', 'tap4']), 'updated': set(['tap2', 'tap3']), 'removed': set(['tap1'])} agent.sg_agent.setup_port_filters = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) agent.sg_agent.setup_port_filters.assert_called_with( device_info['added'], device_info['updated']) agent.treat_devices_added_updated.assert_called_with(set(['tap2', 'tap3', 'tap4'])) agent.treat_devices_removed.assert_called_with(set(['tap1'])) def test_treat_devices_added_updated_no_local_interface(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} agent.ext_manager = mock.Mock() agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = False agent.mgr.ensure_port_admin_state = mock.Mock() agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(agent.mgr.ensure_port_admin_state.called) def test_treat_devices_added_updated_admin_state_up_true(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} mock_port_data = { 'port_id': mock_details['port_id'], 'device': mock_details['device'] } agent.ext_manager = mock.Mock() agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True agent.mgr.ensure_port_admin_state = mock.Mock() mock_segment = amb.NetworkSegment(mock_details['network_type'], mock_details['physical_network'], mock_details['segmentation_id']) with mock.patch('neutron.plugins.ml2.drivers.agent.' '_agent_manager_base.NetworkSegment', return_value=mock_segment): resync_needed = agent.treat_devices_added_updated(set(['tap1'])) self.assertFalse(resync_needed) agent.rpc_callbacks.add_network.assert_called_with('net123', mock_segment) agent.mgr.plug_interface.assert_called_with( 'net123', mock_segment, 'dev123', constants.DEVICE_OWNER_NETWORK_PREFIX) self.assertTrue(agent.plugin_rpc.update_device_up.called) self.assertTrue(agent.ext_manager.handle_port.called) self.assertIn(mock_port_data, agent.network_ports[ mock_details['network_id']] ) def test_treat_devices_added_updated_setup_arp_protection(self): agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True with mock.patch.object(agent.mgr, 'setup_arp_spoofing_protection') as set_arp: agent.treat_devices_added_updated(set(['tap1'])) set_arp.assert_called_with(mock_details['device'], mock_details) def test__process_device_if_exists_missing_intf(self): mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} self.agent.mgr = mock.Mock() self.agent.mgr.get_all_devices.return_value = [] self.agent.mgr.plug_interface.side_effect = RuntimeError() self.agent._process_device_if_exists(mock_details) def test__process_device_if_exists_error(self): mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} self.agent.mgr = mock.Mock() self.agent.mgr.get_all_devices.return_value = ['dev123'] self.agent.mgr.plug_interface.side_effect = RuntimeError() with testtools.ExpectedException(RuntimeError): # device exists so it should raise self.agent._process_device_if_exists(mock_details) def test__process_device_if_exists_no_active_binding_in_host(self): mock_details = {'device': 'dev123', constants.NO_ACTIVE_BINDING: True} self.agent.mgr = mock.Mock() self.agent._process_device_if_exists(mock_details) self.agent.mgr.setup_arp_spoofing_protection.assert_not_called() def test_set_rpc_timeout(self): self.agent.stop() for rpc_client in (self.agent.plugin_rpc.client, self.agent.sg_plugin_rpc.client, self.agent.state_rpc.client): self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout, rpc_client.timeout) def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: self.agent.stop() self.assertFalse(mock_set_rpc.called) def test_report_state_revived(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.return_value = agent_consts.AGENT_REVIVED self.agent._report_state() self.assertTrue(self.agent.fullsync) def test_update_network_ports(self): port_1_data = PORT_DATA NETWORK_2_ID = 'fake_second_network' port_2_data = { 'port_id': 'fake_port_2', 'device': 'fake_port_2_device_name' } self.agent.network_ports[NETWORK_ID].append( port_1_data ) self.agent.network_ports[NETWORK_ID].append( port_2_data ) # check update port: self.agent._update_network_ports( NETWORK_2_ID, port_2_data['port_id'], port_2_data['device'] ) self.assertNotIn(port_2_data, self.agent.network_ports[NETWORK_ID]) self.assertIn(port_2_data, self.agent.network_ports[NETWORK_2_ID]) def test_clean_network_ports(self): port_1_data = PORT_DATA port_2_data = { 'port_id': 'fake_port_2', 'device': 'fake_port_2_device_name' } self.agent.network_ports[NETWORK_ID].append( port_1_data ) self.agent.network_ports[NETWORK_ID].append( port_2_data ) # check removing port from network when other ports are still there: cleaned_port_id = self.agent._clean_network_ports(DEVICE_1) self.assertIn(NETWORK_ID, self.agent.network_ports.keys()) self.assertNotIn(port_1_data, self.agent.network_ports[NETWORK_ID]) self.assertIn(port_2_data, self.agent.network_ports[NETWORK_ID]) self.assertEqual(PORT_1, cleaned_port_id) # and now remove last port from network: cleaned_port_id = self.agent._clean_network_ports( port_2_data['device'] ) self.assertNotIn(NETWORK_ID, self.agent.network_ports.keys()) self.assertEqual(port_2_data['port_id'], cleaned_port_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py0000644000175000017500000000340000000000000031744 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib import fixture from neutron.plugins.ml2.drivers.agent import capabilities from neutron.tests import base from neutron.tests import tools class CapabilitiesTest(base.BaseTestCase): def setUp(self): super(CapabilitiesTest, self).setUp() self._mgr = mock.Mock() self.useFixture(fixture.CallbackRegistryFixture( callback_manager=self._mgr)) def test_notify_init_event(self): mock_agent_type = mock.Mock() mock_agent = mock.Mock() capabilities.notify_init_event(mock_agent_type, mock_agent) self._mgr.publish.assert_called_with(mock_agent_type, events.AFTER_INIT, mock_agent, payload=None) def test_register(self): mock_callback = mock.Mock() mock_agent_type = mock.Mock() capabilities.register(mock_callback, mock_agent_type) args = tools.get_subscribe_args( mock_callback, mock_agent_type, events.AFTER_INIT) self._mgr.subscribe.assert_called_with(*args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py0000644000175000017500000005045500000000000030532 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import constants as p_const from neutron_lib import context from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from oslo_config import cfg from six import moves import testtools from testtools import matchers from neutron.objects import network_segment_range as obj_network_segment_range from neutron.plugins.ml2.drivers import type_tunnel TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" TUNNEL_IPV6_ONE = "2001:db8:1::10" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' TUN_MIN = 100 TUN_MAX = 109 RAW_TUNNEL_RANGES = [str(TUN_MIN) + ':' + str(TUN_MAX)] TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)] UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.' 'NetworkSegmentRangePlugin') class TunnelTypeTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelTypeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = TUNNEL_RANGES self.driver.sync_allocations() self.context = context.Context() def test_tunnel_type(self): self.assertEqual(self.TYPE, self.driver.get_type()) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'phys_net', api.SEGMENTATION_ID: None} with testtools.ExpectedException(exc.InvalidInput): self.driver.validate_provider_segment(segment) segment[api.PHYSICAL_NETWORK] = None self.driver.validate_provider_segment(segment) segment[api.SEGMENTATION_ID] = 1 self.driver.validate_provider_segment(segment) def test_sync_tunnel_allocations(self): self.assertIsNone( self.driver.get_allocation(self.context, (TUN_MIN - 1))) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MIN)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MIN + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MAX - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MAX)).allocated) self.assertIsNone( self.driver.get_allocation(self.context, (TUN_MAX + 1))) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertIsNone( self.driver.get_allocation(self.context, (TUN_MIN + 5 - 1))) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MIN + 5)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MIN + 5 + 1)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MAX + 5 - 1)).allocated) self.assertFalse( self.driver.get_allocation(self.context, (TUN_MAX + 5)).allocated) self.assertIsNone( self.driver.get_allocation(self.context, (TUN_MAX + 5 + 1))) def _test_sync_allocations_and_allocated(self, tunnel_id): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: tunnel_id} self.driver.reserve_provider_segment(self.context, segment) self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES self.driver.sync_allocations() self.assertTrue( self.driver.get_allocation(self.context, tunnel_id).allocated) def test_sync_allocations_and_allocated_in_initial_range(self): self._test_sync_allocations_and_allocated(TUN_MIN + 2) def test_sync_allocations_and_allocated_in_final_range(self): self._test_sync_allocations_and_allocated(TUN_MAX + 2) def test_sync_allocations_no_op(self): def verify_no_chunk(iterable, chunk_size): # no segment removed/added self.assertEqual(0, len(list(iterable))) return [] with mock.patch.object( type_tunnel, 'chunks', side_effect=verify_no_chunk) as chunks: self.driver.sync_allocations() self.assertEqual(2, len(chunks.mock_calls)) def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: None} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} self.assertFalse(self.driver.is_partial_segment(segment)) def test_reserve_provider_segment_full_specs(self): segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: None, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self.driver.get_allocation(self.context, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) with testtools.ExpectedException(exc.TunnelIdInUse): self.driver.reserve_provider_segment(self.context, segment) self.driver.release_segment(self.context, segment) alloc = self.driver.get_allocation(self.context, observed[api.SEGMENTATION_ID]) self.assertFalse(alloc.allocated) segment[api.SEGMENTATION_ID] = 1000 observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self.driver.get_allocation(self.context, observed[api.SEGMENTATION_ID]) self.assertTrue(alloc.allocated) self.driver.release_segment(self.context, segment) alloc = self.driver.get_allocation(self.context, observed[api.SEGMENTATION_ID]) self.assertIsNone(alloc) def test_reserve_provider_segment(self): tunnel_ids = set() specs = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: None} for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.reserve_provider_segment(self.context, specs) self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE]) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) with testtools.ExpectedException(exc.NoNetworkAvailable): segment = self.driver.reserve_provider_segment(self.context, specs) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.context, segment) segment = self.driver.reserve_provider_segment(self.context, specs) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.context, segment) def test_allocate_tenant_segment(self): tunnel_ids = set() for x in moves.range(TUN_MIN, TUN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.context) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) segment = self.driver.allocate_tenant_segment(self.context) self.assertIsNone(segment) segment = {api.NETWORK_TYPE: self.TYPE, api.PHYSICAL_NETWORK: 'None', api.SEGMENTATION_ID: tunnel_ids.pop()} self.driver.release_segment(self.context, segment) segment = self.driver.allocate_tenant_segment(self.context) self.assertThat(segment[api.SEGMENTATION_ID], matchers.GreaterThan(TUN_MIN - 1)) self.assertThat(segment[api.SEGMENTATION_ID], matchers.LessThan(TUN_MAX + 1)) tunnel_ids.add(segment[api.SEGMENTATION_ID]) for tunnel_id in tunnel_ids: segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.context, segment) def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE): return self.driver.add_endpoint(ip, host) def test_add_endpoint(self): endpoint = self.add_endpoint() self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) self.assertEqual(HOST_ONE, endpoint.host) return endpoint def test_add_endpoint_for_existing_tunnel_ip(self): self.add_endpoint() with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn: self.add_endpoint() log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) def test_get_endpoint_by_host(self): self.add_endpoint() host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) return host_endpoint def test_get_endpoint_by_host_for_not_existing_host(self): ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) self.assertIsNone(ip_endpoint) def test_get_endpoint_by_ip(self): self.add_endpoint() ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) self.assertEqual(HOST_ONE, ip_endpoint.host) return ip_endpoint def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) self.assertIsNone(ip_endpoint) def test_delete_endpoint(self): self.add_endpoint() self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) # Get all the endpoints and verify its empty endpoints = self.driver.get_endpoints() self.assertNotIn(TUNNEL_IP_ONE, endpoints) class TunnelTypeMultiRangeTestMixin(object): DRIVER_CLASS = None TUN_MIN0 = 100 TUN_MAX0 = 101 TUN_MIN1 = 200 TUN_MAX1 = 201 TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)] def setUp(self): super(TunnelTypeMultiRangeTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES self.driver.sync_allocations() self.context = context.Context() def test_release_segment(self): segments = [self.driver.allocate_tenant_segment(self.context) for i in range(4)] # Release them in random order. No special meaning. for i in (0, 2, 1, 3): self.driver.release_segment(self.context, segments[i]) for key in (self.TUN_MIN0, self.TUN_MAX0, self.TUN_MIN1, self.TUN_MAX1): alloc = self.driver.get_allocation(self.context, key) self.assertFalse(alloc.allocated) class TunnelRpcCallbackTestMixin(object): DRIVER_CLASS = None TYPE = None def setUp(self): super(TunnelRpcCallbackTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() def _test_tunnel_sync(self, kwargs, delete_tunnel=False): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: details = self.callbacks.tunnel_sync('fake_context', **kwargs) tunnels = details['tunnels'] for tunnel in tunnels: self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address']) self.assertEqual(kwargs['host'], tunnel['host']) self.assertTrue(tunnel_update.called) if delete_tunnel: self.assertTrue(tunnel_delete.called) else: self.assertFalse(tunnel_delete.called) def _test_tunnel_sync_raises(self, kwargs): with mock.patch.object(self.notifier, 'tunnel_update') as tunnel_update,\ mock.patch.object(self.notifier, 'tunnel_delete') as tunnel_delete: self.assertRaises(exc.InvalidInput, self.callbacks.tunnel_sync, 'fake_context', **kwargs) self.assertFalse(tunnel_update.called) self.assertFalse(tunnel_delete.called) def test_tunnel_sync_called_without_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_with_host_passed_ipv6(self): cfg.CONF.set_override('overlay_ip_version', p_const.IP_VERSION_6, group='ml2') kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_endpoint(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs) def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync(kwargs, True) def test_tunnel_sync_called_with_used_tunnel_ip_host_roaming(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync(kwargs, False) def test_tunnel_sync_called_with_used_tunnel_ip_roaming_case_two(self): self.driver.add_endpoint(TUNNEL_IP_ONE, None) self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_TWO} self._test_tunnel_sync(kwargs, False) def test_tunnel_sync_called_without_tunnel_ip(self): kwargs = {'tunnel_type': self.TYPE, 'host': None} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_without_tunnel_type(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_with_tunnel_overlay_mismatch(self): cfg.CONF.set_override('overlay_ip_version', p_const.IP_VERSION_6, group='ml2') kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync_raises(kwargs) def test_tunnel_sync_called_with_tunnel_overlay_mismatch_ipv6(self): cfg.CONF.set_override('overlay_ip_version', p_const.IP_VERSION_4, group='ml2') kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE, 'host': HOST_ONE} self._test_tunnel_sync_raises(kwargs) class TunnelTypeMTUTestMixin(object): DRIVER_CLASS = None TYPE = None ENCAP_OVERHEAD = 0 def setUp(self): super(TunnelTypeMTUTestMixin, self).setUp() self.driver = self.DRIVER_CLASS() def _test_get_mtu(self, ip_version): cfg.CONF.set_override('overlay_ip_version', ip_version, group='ml2') ip_header_length = p_const.IP_HEADER_LENGTH[ip_version] cfg.CONF.set_override('global_physnet_mtu', 1500) cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1475 - self.ENCAP_OVERHEAD - ip_header_length, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 1450) cfg.CONF.set_override('path_mtu', 1475, group='ml2') self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425} self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 1450, group='ml2') self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400} self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_get_mtu_ipv4(self): self._test_get_mtu(4) def test_get_mtu_ipv6(self): self._test_get_mtu(6) class TunnelTypeNetworkSegmentRangeTestMixin(object): DRIVER_CLASS = None def setUp(self): super(TunnelTypeNetworkSegmentRangeTestMixin, self).setUp() cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) self.context = context.Context() self.driver = self.DRIVER_CLASS() def test__populate_new_default_network_segment_ranges(self): # _populate_new_default_network_segment_ranges will be called when # the type driver initializes with `network_segment_range` loaded as # one of the `service_plugins` self.driver._initialize(RAW_TUNNEL_RANGES) self.driver.initialize_network_segment_range_support() self.driver.sync_allocations() ret = obj_network_segment_range.NetworkSegmentRange.get_objects( self.context) self.assertEqual(1, len(ret)) network_segment_range = ret[0] self.assertTrue(network_segment_range.default) self.assertTrue(network_segment_range.shared) self.assertIsNone(network_segment_range.project_id) self.assertEqual(self.driver.get_type(), network_segment_range.network_type) self.assertIsNone(network_segment_range.physical_network) self.assertEqual(TUN_MIN, network_segment_range.minimum) self.assertEqual(TUN_MAX, network_segment_range.maximum) def test__delete_expired_default_network_segment_ranges(self): self.driver.tunnel_ranges = TUNNEL_RANGES self.driver.sync_allocations() self.driver._delete_expired_default_network_segment_ranges() ret = obj_network_segment_range.NetworkSegmentRange.get_objects( self.context) self.assertEqual(0, len(ret)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ext_test.py0000644000175000017500000002274300000000000027030 0ustar00coreycorey00000000000000# Copyright 2015 Intel Corporation. # Copyright 2015 Isaku Yamahata # # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import model_base from neutron_lib.plugins.ml2 import api import oslo_db.sqlalchemy.session import sqlalchemy as sa from sqlalchemy import orm from neutron.api import extensions from neutron.db import models_v2 from neutron.objects import subnet as subnet_obj from neutron.tests.unit.plugins.ml2 import extensions as test_extensions class TestExtensionDriverBase(api.ExtensionDriver): _supported_extension_aliases = 'fake_extension' def initialize(self): extensions.append_api_extensions_path(test_extensions.__path__) @property def extension_alias(self): return self._supported_extension_aliases class TestExtensionDriver(TestExtensionDriverBase): def initialize(self): super(TestExtensionDriver, self).initialize() # keep track of values self.val_by_id = {} def _check_create(self, session, data, result): assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(data, dict)) assert('id' not in data) assert(isinstance(result, dict)) assert(result['id'] is not None) def _check_update(self, session, data, result): assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(data, dict)) assert(isinstance(result, dict)) assert(result['id'] is not None) def _check_extend(self, session, result, entry, expected_db_entry_class, expected_obj_entry_class=None): # TODO(slaweq): After converting all code to use Subnet OVO, # expected_db_entry_class can be removed as only OVO object # should be expected here assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(result, dict)) assert(result['id'] is not None) if expected_obj_entry_class: assert(isinstance(entry, expected_db_entry_class) or isinstance(entry, expected_obj_entry_class)) else: assert(isinstance(entry, expected_db_entry_class)) assert(entry.id == result['id']) def _store_change(self, result, data, field): if field in data and data[field] != constants.ATTR_NOT_SPECIFIED: self.val_by_id[result['id']] = data[field] elif result['id'] not in self.val_by_id: self.val_by_id[result['id']] = 'default_%s' % field def process_create_network(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) self._store_change(result, data, 'network_extension') result['network_extension'] = self.val_by_id[result['id']] def process_update_network(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self._store_change(result, data, 'network_extension') result['network_extension'] = self.val_by_id[result['id']] def extend_network_dict(self, session, net_db, result): self._check_extend(session, result, net_db, models_v2.Network) result['network_extension'] = self.val_by_id.get(result['id']) def process_create_subnet(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) self._store_change(result, data, 'subnet_extension') result['subnet_extension'] = self.val_by_id[result['id']] def process_update_subnet(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self._store_change(result, data, 'subnet_extension') result['subnet_extension'] = self.val_by_id[result['id']] def extend_subnet_dict(self, session, subnet_db, result): self._check_extend( session, result, subnet_db, expected_db_entry_class=models_v2.Subnet, expected_obj_entry_class=subnet_obj.Subnet) result['subnet_extension'] = self.val_by_id.get(result['id']) def process_create_port(self, plugin_context, data, result): session = plugin_context.session self._check_create(session, data, result) self._store_change(result, data, 'port_extension') result['port_extension'] = self.val_by_id[result['id']] def process_update_port(self, plugin_context, data, result): session = plugin_context.session self._check_update(session, data, result) self._store_change(result, data, 'port_extension') result['port_extension'] = self.val_by_id[result['id']] def extend_port_dict(self, session, port_db, result): self._check_extend(session, result, port_db, models_v2.Port) result['port_extension'] = self.val_by_id.get(result['id']) class TestNetworkExtension(model_base.BASEV2): network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) network = orm.relationship( models_v2.Network, backref=orm.backref('extension', cascade='delete', uselist=False, lazy='joined')) class TestSubnetExtension(model_base.BASEV2): subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) subnet = orm.relationship( models_v2.Subnet, backref=orm.backref('extension', cascade='delete', uselist=False, lazy='joined')) class TestPortExtension(model_base.BASEV2): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) value = sa.Column(sa.String(64)) port = orm.relationship( models_v2.Port, backref=orm.backref('extension', cascade='delete', uselist=False, lazy='joined')) class TestDBExtensionDriver(TestExtensionDriverBase): def _get_value(self, data, key): value = data[key] if not validators.is_attr_set(value): value = '' return value def process_create_network(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'network_extension') record = TestNetworkExtension(network_id=result['id'], value=value) session.add(record) result['network_extension'] = value def process_update_network(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestNetworkExtension). filter_by(network_id=result['id']).one()) value = data.get('network_extension') if value and value != record.value: record.value = value result['network_extension'] = record.value def extend_network_dict(self, session, net_db, result): result['network_extension'] = net_db.extension.value def process_create_subnet(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'subnet_extension') record = TestSubnetExtension(subnet_id=result['id'], value=value) session.add(record) result['subnet_extension'] = value def process_update_subnet(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestSubnetExtension). filter_by(subnet_id=result['id']).one()) value = data.get('subnet_extension') if value and value != record.value: record.value = value result['subnet_extension'] = record.value def extend_subnet_dict(self, session, subnet_db, result): value = subnet_db.extension.value if subnet_db.extension else '' result['subnet_extension'] = value def process_create_port(self, plugin_context, data, result): session = plugin_context.session value = self._get_value(data, 'port_extension') record = TestPortExtension(port_id=result['id'], value=value) session.add(record) result['port_extension'] = value def process_update_port(self, plugin_context, data, result): session = plugin_context.session record = (session.query(TestPortExtension). filter_by(port_id=result['id']).one()) value = data.get('port_extension') if value and value != record.value: record.value = value result['port_extension'] = record.value def extend_port_dict(self, session, port_db, result): value = port_db.extension.value if port_db.extension else '' result['port_extension'] = value ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/0000755000175000017500000000000000000000000025643 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py0000644000175000017500000000000000000000000027742 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/0000755000175000017500000000000000000000000030121 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py0000644000175000017500000000000000000000000032220 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_0000644000175000017500000001340000000000000033475 0ustar00coreycorey00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import \ test_vlanmanager class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin): def fdb_add(self, context, fdb_entries): pass def fdb_remove(self, context, fdb_entries): pass def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): pass def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): pass def setup_tunnel_port(self, br, remote_ip, network_type): pass def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): pass def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): pass class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase): def setUp(self): super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp() self.vlan_manager = self.useFixture( test_vlanmanager.LocalVlanManagerFixture()).manager self.fakeagent = FakeNeutronAgent() self.fakebr = mock.Mock() Port = collections.namedtuple('Port', 'ip, ofport') LVM = collections.namedtuple( 'LVM', 'net, vlan, phys, segid, mac, ip, vif, port') self.local_ip = '127.0.0.1' self.type_gre = 'gre' self.ports = [Port(ip='10.1.0.1', ofport='ofport1'), Port(ip='10.1.0.2', ofport='ofport2'), Port(ip='10.1.0.3', ofport='ofport3')] self.ofports = { self.type_gre: { self.ports[0].ip: self.ports[0].ofport, self.ports[1].ip: self.ports[1].ofport, self.ports[2].ip: self.ports[2].ofport, } } self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1', mac='mac1', ip='1.1.1.1', vif='vifid1', port='port1'), LVM(net='net2', vlan=2, phys='phys2', segid='tun2', mac='mac2', ip='2.2.2.2', vif='vifid2', port='port2'), LVM(net='net3', vlan=3, phys='phys3', segid='tun3', mac='mac3', ip='3.3.3.3', vif='vifid3', port='port3')] self.agent_ports = { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)], self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], } self.fdb_entries1 = { self.lvms[0].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[0].segid, 'ports': { self.local_ip: [], self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)]}, }, self.lvms[1].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[1].segid, 'ports': { self.local_ip: [], self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)]}, }, self.lvms[2].net: { 'network_type': self.type_gre, 'segment_id': self.lvms[2].segid, 'ports': { self.local_ip: [], self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)]}, }, } for i in range(3): self.vlan_manager.add( self.lvms[i].net, self.lvms[i].vlan, self.type_gre, self.lvms[i].phys, self.lvms[i].segid, {self.lvms[i].vif: self.lvms[i].port}) setattr(self, 'lvm%d' % i, self.vlan_manager.get(self.lvms[i].net)) self.upd_fdb_entry1_val = { self.lvms[0].net: { self.ports[0].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[1].mac, self.lvms[1].ip)], }, self.ports[1].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[1].mac, self.lvms[1].ip)], }, }, self.lvms[1].net: { self.ports[2].ip: { 'before': [l2pop_rpc.PortInfo(self.lvms[0].mac, self.lvms[0].ip)], 'after': [l2pop_rpc.PortInfo(self.lvms[2].mac, self.lvms[2].ip)], }, }, } self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val} def _tunnel_port_lookup(self, network_type, remote_ip): return self.ofports[network_type].get(remote_ip) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population0000644000175000017500000003012000000000000033527 0ustar00coreycorey00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const from neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc_base class TestL2populationRpcCallBackTunnelMixin( l2population_rpc_base.TestL2populationRpcCallBackTunnelMixinBase): def test_get_agent_ports_no_data(self): # Make sure vlan manager has no mappings that were added in setUp() self.vlan_manager.mapping = {} self.assertFalse( list(self.fakeagent.get_agent_ports(self.fdb_entries1))) def test_get_agent_ports_non_existence_key_in_lvm(self): results = {} self.vlan_manager.pop(self.lvms[1].net) for lvm, agent_ports in self.fakeagent.get_agent_ports( self.fdb_entries1): results[lvm] = agent_ports expected = { self.lvm0: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, self.lvm2: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } self.assertEqual(expected, results) def test_get_agent_ports_no_agent_ports(self): results = {} self.fdb_entries1[self.lvms[1].net]['ports'] = {} for lvm, agent_ports in self.fakeagent.get_agent_ports( self.fdb_entries1): results[lvm] = agent_ports expected = { self.lvm0: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, self.lvm1: {}, self.lvm2: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } self.assertEqual(expected, results) def test_fdb_add_tun(self): with mock.patch.object(self.fakeagent, 'setup_tunnel_port'),\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_add_tun_non_existence_key_in_ofports(self): ofport = self.lvm0.network_type + '0a0a0a0a' del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'setup_tunnel_port', return_value=ofport ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ip, self.lvm0.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm0, ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_add_tun_unavailable_ofport(self): del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'setup_tunnel_port', return_value=0 ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ip, self.lvm0.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_remove_tun(self): with mock.patch.object(self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) def test_fdb_remove_tun_flooding_entry(self): self.agent_ports[self.ports[1].ip] = [n_const.FLOODING_ENTRY] with mock.patch.object(self.fakeagent, 'del_fdb_flow' ) as mock_del_fdb_flow,\ mock.patch.object(self.fakeagent, 'cleanup_tunnel_port' ) as mock_cleanup_tunnel_port: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (n_const.FLOODING_ENTRY[0], n_const.FLOODING_ENTRY[1]), self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) mock_cleanup_tunnel_port.assert_called_once_with( self.fakebr, self.ports[1].ofport, self.lvm0.network_type) def test_fdb_remove_tun_non_existence_key_in_ofports(self): del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) def test_fdb_update(self): fake__fdb_chg_ip = mock.Mock() self.fakeagent._fdb_chg_ip = fake__fdb_chg_ip self.fakeagent.fdb_update('context', self.upd_fdb_entry1) fake__fdb_chg_ip.assert_called_once_with( 'context', self.upd_fdb_entry1_val) def test_fdb_update_non_existence_method(self): self.assertRaises(NotImplementedError, self.fakeagent.fdb_update, 'context', self.upd_fdb_entry1) def test__fdb_chg_ip(self): with mock.patch.object( self.fakeagent, 'setup_entry_for_arp_reply') as m_setup_entry_for_arp_reply: self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, self.upd_fdb_entry1_val, self.local_ip) expected = [ mock.call(self.fakebr, 'remove', self.lvm0.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm0.vlan, self.lvms[1].mac, self.lvms[1].ip), mock.call(self.fakebr, 'remove', self.lvm0.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm0.vlan, self.lvms[1].mac, self.lvms[1].ip), mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, self.lvms[0].ip), mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[2].mac, self.lvms[2].ip), ] m_setup_entry_for_arp_reply.assert_has_calls(expected, any_order=True) def test__fdb_chg_ip_no_lvm(self): m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun( 'context', self.fakebr, self.upd_fdb_entry1, self.local_ip) self.assertFalse(m_setup_entry_for_arp_reply.call_count) def test__fdb_chg_ip_ip_is_local_ip(self): upd_fdb_entry_val = { self.lvms[0].net: { self.local_ip: { 'before': [(self.lvms[0].mac, self.lvms[0].ip)], 'after': [(self.lvms[1].mac, self.lvms[1].ip)], }, }, } m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, upd_fdb_entry_val, self.local_ip) self.assertFalse(m_setup_entry_for_arp_reply.call_count) def test_fdb_chg_ip_tun_empty_before_after(self): upd_fdb_entry_val = { self.lvms[0].net: { self.local_ip: {}, }, } m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply # passing non-local ip self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, upd_fdb_entry_val, "8.8.8.8") self.assertFalse(m_setup_entry_for_arp_reply.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py0000644000175000017500000003055500000000000027651 0ustar00coreycorey00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import context from neutron_lib.tests import tools from neutron_lib.utils import net from oslo_utils import uuidutils from neutron.db.models import l3 as l3_models from neutron.objects import l3_hamode from neutron.objects import network as network_obj from neutron.objects import ports as port_obj from neutron.objects import router as l3_objs from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2 import models from neutron.tests.common import helpers from neutron.tests.unit import testlib_api HOST = helpers.HOST HOST_2 = 'HOST_2' HOST_3 = 'HOST_3' HOST_2_TUNNELING_IP = '20.0.0.2' HOST_3_TUNNELING_IP = '20.0.0.3' TEST_ROUTER_ID = uuidutils.generate_uuid() TEST_NETWORK_ID = uuidutils.generate_uuid() TEST_HA_NETWORK_ID = uuidutils.generate_uuid() PLUGIN_NAME = 'ml2' class TestL2PopulationDBTestCase(testlib_api.SqlTestCase): def setUp(self): super(TestL2PopulationDBTestCase, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.ctx = context.get_admin_context() self._create_network() def _create_network(self, network_id=TEST_NETWORK_ID): network_obj.Network(self.ctx, id=network_id).create() def _create_router(self, distributed=True, ha=False): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(l3_models.Router(id=TEST_ROUTER_ID)) l3_objs.RouterExtraAttributes( self.ctx, router_id=TEST_ROUTER_ID, distributed=distributed, ha=ha).create() def _create_ha_router(self, distributed=False): helpers.register_l3_agent(HOST_2) helpers.register_ovs_agent(HOST_2, tunneling_ip=HOST_2_TUNNELING_IP) # Register l3 agent on host3, which doesn't host any HA router. # Tests should test that host3 is not a HA agent host. helpers.register_l3_agent(HOST_3) helpers.register_ovs_agent(HOST_3, tunneling_ip=HOST_3_TUNNELING_IP) with self.ctx.session.begin(subtransactions=True): network_obj.Network(self.ctx, id=TEST_HA_NETWORK_ID).create() self._create_router(distributed=distributed, ha=True) for state, host in [(constants.HA_ROUTER_STATE_ACTIVE, HOST), (constants.HA_ROUTER_STATE_STANDBY, HOST_2)]: self._setup_port_binding( network_id=TEST_HA_NETWORK_ID, device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF, device_id=TEST_ROUTER_ID, host_state=state, host=host) def get_l3_agent_by_host(self, agent_host): plugin = helpers.FakePlugin() return plugin._get_agent_by_type_and_host( self.ctx, constants.AGENT_TYPE_L3, agent_host) def test_get_agent_by_host(self): helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() agent = l2pop_db.get_agent_by_host( self.ctx, helpers.HOST) self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_agent_by_host_no_candidate(self): helpers.register_l3_agent() helpers.register_dhcp_agent() agent = l2pop_db.get_agent_by_host( self.ctx, helpers.HOST) self.assertIsNone(agent) def _setup_port_binding(self, **kwargs): with self.ctx.session.begin(subtransactions=True): mac = netaddr.EUI( net.get_random_mac('fa:16:3e:00:00:00'.split(':')), dialect=netaddr.mac_unix_expanded) port_id = uuidutils.generate_uuid() network_id = kwargs.get('network_id', TEST_NETWORK_ID) device_owner = kwargs.get('device_owner', '') device_id = kwargs.get('device_id', uuidutils.generate_uuid()) host = kwargs.get('host', helpers.HOST) port_obj.Port(self.ctx, id=port_id, network_id=network_id, mac_address=mac, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id=device_id, device_owner=device_owner).create() port_binding_cls = models.PortBinding binding_kwarg = {'port_id': port_id, 'host': host, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL} if device_owner == constants.DEVICE_OWNER_DVR_INTERFACE: port_binding_cls = models.DistributedPortBinding binding_kwarg['router_id'] = TEST_ROUTER_ID binding_kwarg['status'] = constants.PORT_STATUS_DOWN self.ctx.session.add(port_binding_cls(**binding_kwarg)) if network_id == TEST_HA_NETWORK_ID: agent = self.get_l3_agent_by_host(host) l3_hamode.L3HARouterAgentPortBinding( self.ctx, port_id=port_id, router_id=device_id, l3_agent_id=agent['id'], state=kwargs.get( 'host_state', constants.HA_ROUTER_STATE_ACTIVE)).create() def test_get_distributed_active_network_ports(self): self._setup_port_binding( device_owner=constants.DEVICE_OWNER_DVR_INTERFACE) # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(1, len(tunnel_network_ports)) _, agent = tunnel_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_distributed_active_network_ports_no_candidate(self): self._setup_port_binding( device_owner=constants.DEVICE_OWNER_DVR_INTERFACE) # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(0, len(tunnel_network_ports)) def test_get_nondistributed_active_network_ports(self): self._setup_port_binding(dvr=False) # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(1, len(fdb_network_ports)) _, agent = fdb_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) def test_get_nondistributed_active_network_ports_no_candidate(self): self._setup_port_binding(dvr=False) # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(0, len(fdb_network_ports)) def test__get_ha_router_interface_ids_with_ha_dvr_snat_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx, TEST_NETWORK_ID) self.assertEqual(1, len(list(ha_iface_ids))) def test__get_ha_router_interface_ids_with_ha_replicated_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_HA_REPLICATED_INT, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx, TEST_NETWORK_ID) self.assertEqual(1, len(list(ha_iface_ids))) def test__get_ha_router_interface_ids_with_no_ha_port(self): self._create_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) ha_iface_ids = l2pop_db._get_ha_router_interface_ids( self.ctx, TEST_NETWORK_ID) self.assertEqual(0, len(list(ha_iface_ids))) def test_active_network_ports_with_dvr_snat_port(self): # Test to get agent hosting dvr snat port helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() # create DVR router self._create_router() # setup DVR snat port self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) helpers.register_dhcp_agent() fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(1, len(fdb_network_ports)) def test_active_network_ports_with_ha_dvr_snat_port(self): # test to get HA agents hosting HA+DVR snat port helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() # create HA+DVR router self._create_ha_router() # setup HA snat port self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(0, len(fdb_network_ports)) ha_ports = l2pop_db.get_ha_active_network_ports( self.ctx, TEST_NETWORK_ID) self.assertEqual(2, len(ha_ports)) def test_active_port_count_with_dvr_snat_port(self): helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() self._create_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) helpers.register_dhcp_agent() port_count = l2pop_db.get_agent_network_active_port_count( self.ctx, HOST, TEST_NETWORK_ID) self.assertEqual(1, port_count) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx, HOST_2, TEST_NETWORK_ID) self.assertEqual(0, port_count) def test_active_port_count_with_ha_dvr_snat_port(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx, HOST, TEST_NETWORK_ID) self.assertEqual(1, port_count) port_count = l2pop_db.get_agent_network_active_port_count( self.ctx, HOST_2, TEST_NETWORK_ID) self.assertEqual(1, port_count) def test_get_ha_agents_by_router_id(self): helpers.register_dhcp_agent() helpers.register_l3_agent() helpers.register_ovs_agent() self._create_ha_router() self._setup_port_binding( device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, device_id=TEST_ROUTER_ID) agents = l2pop_db.get_ha_agents_by_router_id( self.ctx, TEST_ROUTER_ID) ha_agents = [agent.host for agent in agents] self.assertEqual(tools.UnorderedList([HOST, HOST_2]), ha_agents) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py0000644000175000017500000021151000000000000031543 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import topics from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_serialization import jsonutils import testtools from neutron.db import agents_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_hamode_db from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc from neutron.scheduler import l3_agent_scheduler from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin HOST = 'my_l2_host' HOST_2 = HOST + '_2' HOST_3 = HOST + '_3' HOST_4 = HOST + '_4' HOST_5 = HOST + '_5' TEST_ROUTER_ID = 'router_id' NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_ROUTER_HA_INTF = constants.DEVICE_OWNER_ROUTER_HA_INTF + 'fake' class FakeL3PluginWithAgents(l3_hamode_db.L3_HA_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin, agents_db.AgentDbMixin): pass class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population'] tenant = 'tenant' def setUp(self): super(TestL2PopulationRpcTestCase, self).setUp() self.adminContext = context.get_admin_context() self.type_manager = managers.TypeManager() self.notifier = rpc.AgentNotifierApi(topics.AGENT) self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager) net_arg = {pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '1'} self._network = self._make_network(self.fmt, 'net1', True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg) net_arg = {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: '2'} self._network2 = self._make_network(self.fmt, 'net2', True, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID,), **net_arg) net_arg = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'noagent'} self._network3 = self._make_network(self.fmt, 'net3', True, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,), **net_arg) notifier_patch = mock.patch(NOTIFIER) notifier_patch.start() self.fanout_topic = topics.get_topic_name(topics.AGENT, topics.L2POPULATION, topics.UPDATE) fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.' 'L2populationAgentNotifyAPI._notification_fanout') fanout_patch = mock.patch(fanout) self.mock_fanout = fanout_patch.start() cast = ('neutron.plugins.ml2.drivers.l2pop.rpc.' 'L2populationAgentNotifyAPI._notification_host') cast_patch = mock.patch(cast) self.mock_cast = cast_patch.start() uptime = ('neutron.plugins.ml2.drivers.l2pop.db.get_agent_uptime') uptime_patch = mock.patch(uptime, return_value=190) uptime_patch.start() def _setup_l3(self): notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_router_updated') self.notif_m = notif_p.start() self.plugin = FakeL3PluginWithAgents() self._register_ml2_agents() self._register_l3_agents() def _register_l3_agents(self): self.agent1 = helpers.register_l3_agent(host=HOST) self.agent2 = helpers.register_l3_agent(host=HOST_2) def _register_ml2_agents(self): helpers.register_ovs_agent(host=HOST, tunneling_ip='20.0.0.1') helpers.register_ovs_agent(host=HOST_2, tunneling_ip='20.0.0.2') helpers.register_ovs_agent(host=HOST_3, tunneling_ip='20.0.0.3', tunnel_types=[]) helpers.register_ovs_agent(host=HOST_4, tunneling_ip='20.0.0.4') helpers.register_ovs_agent(host=HOST_5, tunneling_ip='20.0.0.5', binary='neutron-fake-agent', tunnel_types=[], interface_mappings={'physnet1': 'eth9'}, agent_type=constants.AGENT_TYPE_OFA, l2pop_network_types=['vlan']) def test_port_info_compare(self): # An assumption the code makes is that PortInfo compares equal to # equivalent regular tuples. self.assertEqual(("mac", "ip"), l2pop_rpc.PortInfo("mac", "ip")) flooding_entry = l2pop_rpc.PortInfo(*constants.FLOODING_ENTRY) self.assertEqual(constants.FLOODING_ENTRY, flooding_entry) def test__unmarshall_fdb_entries(self): entries = {'foouuid': { 'segment_id': 1001, 'ports': {'192.168.0.10': [['00:00:00:00:00:00', '0.0.0.0'], ['fa:16:3e:ff:8c:0f', '10.0.0.6']]}, 'network_type': 'vxlan'}} entries['chg_ip'] = { 'foouuid': { '192.168.0.1': {'before': [['fa:16:3e:ff:8c:0f', '10.0.0.6']], 'after': [['fa:16:3e:ff:8c:0f', '10.0.0.7']]}, '192.168.0.2': {'before': [['fa:16:3e:ff:8c:0e', '10.0.0.8']]} }, 'foouuid2': { '192.168.0.1': {'before': [['ff:16:3e:ff:8c:0e', '1.0.0.8']]} } } mixin = l2population_rpc.L2populationRpcCallBackMixin entries = mixin._unmarshall_fdb_entries(entries) port_info_list = entries['foouuid']['ports']['192.168.0.10'] # Check that the lists have been properly converted to PortInfo self.assertIsInstance(port_info_list[0], l2pop_rpc.PortInfo) self.assertIsInstance(port_info_list[1], l2pop_rpc.PortInfo) self.assertEqual(('00:00:00:00:00:00', '0.0.0.0'), port_info_list[0]) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), port_info_list[1]) agt1 = entries['chg_ip']['foouuid']['192.168.0.1'] self.assertIsInstance(agt1['before'][0], l2pop_rpc.PortInfo) self.assertIsInstance(agt1['after'][0], l2pop_rpc.PortInfo) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), agt1['before'][0]) self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.7'), agt1['after'][0]) agt1_net2 = entries['chg_ip']['foouuid2']['192.168.0.1'] self.assertEqual(('ff:16:3e:ff:8c:0e', '1.0.0.8'), agt1_net2['before'][0]) self.assertIsInstance(agt1_net2['before'][0], l2pop_rpc.PortInfo) agt2 = entries['chg_ip']['foouuid']['192.168.0.2'] self.assertIsInstance(agt2['before'][0], l2pop_rpc.PortInfo) self.assertEqual(('fa:16:3e:ff:8c:0e', '10.0.0.8'), agt2['before'][0]) def test_portinfo_marshalled_as_list(self): entry = ['fa:16:3e:ff:8c:0f', '10.0.0.6'] payload = {'netuuid': {'ports': {'1': [l2pop_rpc.PortInfo(*entry)]}}} result = jsonutils.loads(jsonutils.dumps(payload)) self.assertEqual(entry, result['netuuid']['ports']['1'][0]) def _create_router(self, ha=True, tenant_id='tenant1', distributed=None, ctx=None): if ctx is None: ctx = self.adminContext ctx.tenant_id = tenant_id router = {'name': TEST_ROUTER_ID, 'admin_state_up': True, 'tenant_id': ctx.tenant_id} if ha is not None: router['ha'] = ha if distributed is not None: router['distributed'] = distributed return self.plugin.create_router(ctx, {'router': router}) def _bind_router(self, router_id, tenant_id): scheduler = l3_agent_scheduler.ChanceScheduler() filters = {'agent_type': [constants.AGENT_TYPE_L3]} agents_object = self.plugin.get_agent_objects( self.adminContext, filters=filters) for agent_obj in agents_object: scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router_id, tenant_id, agent_obj) self._bind_ha_network_ports(router_id) def _bind_ha_network_ports(self, router_id): port_bindings = self.plugin.get_ha_router_port_bindings( self.adminContext, [router_id]) plugin = directory.get_plugin() for port_binding in port_bindings: filters = {'id': [port_binding.port_id]} port = plugin.get_ports(self.adminContext, filters=filters)[0] if port_binding.l3_agent_id == self.agent1['id']: port[portbindings.HOST_ID] = self.agent1['host'] else: port[portbindings.HOST_ID] = self.agent2['host'] plugin.update_port(self.adminContext, port['id'], {port_def.RESOURCE_NAME: port}) def _get_first_interface(self, net_id, router): plugin = directory.get_plugin() if router['distributed']: device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_DVR_INTERFACE]} else: device_filter = {'device_id': [router['id']], 'device_owner': [constants.DEVICE_OWNER_HA_REPLICATED_INT]} ports = plugin.get_ports(self.adminContext, filters=device_filter) if ports: return ports[0] def _add_router_interface(self, subnet, router, host): interface_info = {'subnet_id': subnet['id']} self.plugin.add_router_interface(self.adminContext, router['id'], interface_info) self.plugin.update_routers_states( self.adminContext, {router['id']: constants.HA_ROUTER_STATE_ACTIVE}, host) port = self._get_first_interface(subnet['network_id'], router) self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=host, device=port['id'], host=host) return port def _create_ha_router(self): self._setup_l3() router = self._create_router() self._bind_router(router['id'], router['tenant_id']) return router def _create_dvr_router(self): self._setup_l3() router = self._create_router(distributed=True) self._bind_router(router['id'], router['tenant_id']) return router def _verify_remove_fdb(self, expected, agent_id, device, host=None): self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=host, device=device, host=host) self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_other_agents_get_flood_entries_for_ha_agents(self): # First HA router port is added on HOST and HOST2, then network port # is added on HOST4. # HOST4 should get flood entries for HOST1 and HOST2 router = self._create_ha_router() directory.add_plugin(plugin_constants.L3, self.plugin) with self.subnet(network=self._network, enable_dhcp=False) as snet: subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST_4, device=device1) cast_expected = { port['network_id']: { 'ports': {'20.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.assertEqual(1, self.mock_cast.call_count) self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected, HOST_4) def test_delete_ha_port(self): # First network port is added on HOST, and then HA router port # is added on HOST and HOST2. # Remove_fdb should carry flood entry of only HOST2 and not HOST router = self._create_ha_router() directory.add_plugin(plugin_constants.L3, self.plugin) with self.subnet(network=self._network, enable_dhcp=False) as snet: host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) expected = {port['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.reset_mock() interface_info = {'subnet_id': subnet['id']} self.plugin.remove_router_interface(self.adminContext, router['id'], interface_info) self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_ovs_agent_restarted_with_dvr_port(self): plugin = directory.get_plugin() router = self._create_dvr_router() with self.subnet(network=self._network, enable_dhcp=False) as snet: with self.port( subnet=snet, project_id=self.tenant, device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)\ as port: port_id = port['port']['id'] plugin.update_distributed_port_binding(self.adminContext, port_id, {'port': {portbindings.HOST_ID: HOST_4, 'device_id': router['id']}}) port = self._show('ports', port_id, neutron_context=self.adminContext) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.callbacks.update_device_up(self.adminContext, agent_id=HOST_4, device=port_id, host=HOST_4, refresh_tunnels=True) fanout_expected = {port['port']['network_id']: { 'network_type': u'vxlan', 'ports': {u'20.0.0.4': [('00:00:00:00:00:00', '0.0.0.0')]}, 'segment_id': 1}} self.mock_fanout.assert_called_with(mock.ANY, 'add_fdb_entries', fanout_expected) def test_ha_agents_with_dvr_rtr_does_not_get_other_fdb(self): router = self._create_dvr_router() directory.add_plugin(plugin_constants.L3, self.plugin) with self.subnet(network=self._network, enable_dhcp=False) as snet: host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=HOST_4, device=device1) subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) self.mock_cast.assert_not_called() self.mock_fanout.assert_not_called() self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST_2, device=port['id'], host=HOST_2) self.mock_cast.assert_not_called() self.mock_fanout.assert_not_called() def test_ha_agents_get_other_fdb(self): # First network port is added on HOST4, then HA router port is # added on HOST and HOST2. # Both HA agents should create tunnels to HOST4 and among themselves. # Both HA agents should be notified to other agents. router = self._create_ha_router() directory.add_plugin(plugin_constants.L3, self.plugin) with self.subnet(network=self._network, enable_dhcp=False) as snet: host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=HOST_4, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] subnet = snet['subnet'] port = self._add_router_interface(subnet, router, HOST) fanout_expected = {port['network_id']: { 'ports': {'20.0.0.1': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} cast_expected_host = {port['network_id']: { 'ports': { '20.0.0.4': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo(p1['mac_address'], p1_ips[0])], '20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected_host, HOST) self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', fanout_expected) self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST_2, device=port['id'], host=HOST_2) cast_expected_host2 = {port['network_id']: { 'ports': { '20.0.0.4': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo(p1['mac_address'], p1_ips[0])], '20.0.0.1': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} fanout_expected = {port['network_id']: { 'ports': {'20.0.0.2': [constants.FLOODING_ENTRY]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', cast_expected_host2, HOST_2) self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', fanout_expected) def test_fdb_add_called(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_fdb_add_not_called_type_local(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_3'} with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.assertFalse(self.mock_fanout.called) def test_fdb_add_called_for_l2pop_network_types(self): self._register_ml2_agents() host = HOST + '_5' with self.subnet(network=self._network2) as subnet: host_arg = {portbindings.HOST_ID: host} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=host, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.5': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vlan', 'segment_id': 2}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_fdb_called_for_active_ports(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): p1 = port1['port'] device1 = 'tap' + p1['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] self.assertFalse(self.mock_cast.called) expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_two_agents(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port1: host_arg = {portbindings.HOST_ID: HOST + '_2', 'admin_state_up': True} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID, 'admin_state_up',), **host_arg) as port2: p1 = port1['port'] p2 = port2['port'] device1 = 'tap' + p1['id'] device2 = 'tap' + p2['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST + '_2', device=device2) self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected1 = {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with(mock.ANY, 'add_fdb_entries', expected1, HOST) expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_called_two_networks(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.subnet(cidr='10.1.0.0/24') as subnet2: with self.port(subnet=subnet2, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port3: p1 = port1['port'] p3 = port3['port'] device1 = 'tap' + p1['id'] device3 = 'tap' + p3['id'] self.mock_cast.reset_mock() self.mock_fanout.reset_mock() self.callbacks.update_device_up( self.adminContext, agent_id=HOST + '_2', device=device1) self.callbacks.update_device_up( self.adminContext, agent_id=HOST, device=device3) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected1 = {p1['network_id']: {'ports': {'20.0.0.2': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_with( mock.ANY, 'add_fdb_entries', expected1, HOST) p3_ips = [p['ip_address'] for p in p3['fixed_ips']] expected2 = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p3['mac_address'], p3_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected2) def test_fdb_add_called_dualstack(self): self._register_ml2_agents() host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.subnet(self._network) as subnet,\ self.subnet( self._network, cidr='2001:db8::/64', ip_version=constants.IP_VERSION_6, gateway_ip='fe80::1', ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}], device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg ) as port: p1 = port['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0]), l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[1])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', expected) def test_update_port_up_two_active_ports(self): '''The test will check that even with 2 active ports on the host, agent will be provided with the whole list of fdb entries. Bug 1789846 ''' self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} # 2 ports on host 1 with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: # 1 port on another host to have fdb entree to update # agent on host 1 host_arg = {portbindings.HOST_ID: HOST + '_2'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port3: p1 = port1['port'] p2 = port2['port'] p3 = port3['port'] # only ACTIVE ports count plugin = directory.get_plugin() p2['status'] = 'ACTIVE' plugin.update_port(self.adminContext, p2['id'], port2) p3['status'] = 'ACTIVE' plugin.update_port(self.adminContext, p3['id'], port3) self.mock_cast.reset_mock() p1['status'] = 'ACTIVE' plugin.update_port(self.adminContext, p1['id'], port1) # agent on host 1 should be updated with entry from # another host expected = {p3['network_id']: {'ports': {'20.0.0.2': [ constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p3['mac_address'], p3['fixed_ips'][0]['ip_address'])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_cast.assert_called_once_with( mock.ANY, 'add_fdb_entries', expected, HOST) def test_update_port_down(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_update_port_down_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device2 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device2) self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=device2) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_update_port_down_ha_router_port(self): router = self._create_ha_router() directory.add_plugin(plugin_constants.L3, self.plugin) with self.subnet(network=self._network, enable_dhcp=False) as snet: subnet = snet['subnet'] router_port = self._add_router_interface(subnet, router, HOST) router_port_device = 'tap' + router_port['id'] host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True} with self.port(subnet=snet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self.mock_fanout.reset_mock() self.callbacks.update_device_down(self.adminContext, agent_id=HOST, device=router_port_device, host=HOST) router_port_ips = [ p['ip_address'] for p in router_port['fixed_ips']] expected = { router_port['network_id']: { 'ports': { '20.0.0.1': [ l2pop_rpc.PortInfo(router_port['mac_address'], router_port_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_delete_port(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port2: p2 = port2['port'] device1 = 'tap' + p2['id'] self.mock_fanout.reset_mock() self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device1) self._delete('ports', port2['port']['id']) p2_ips = [p['ip_address'] for p in p2['fixed_ips']] expected = {p2['network_id']: {'ports': {'20.0.0.1': [l2pop_rpc.PortInfo( p2['mac_address'], p2_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_any_call( mock.ANY, 'remove_fdb_entries', expected) def test_delete_port_last_port_up(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg): with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port: p1 = port['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self._delete('ports', port['port']['id']) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {'20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_any_call( mock.ANY, 'remove_fdb_entries', expected) def test_mac_addr_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST + '_5'} with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] p1_ip = p1['fixed_ips'][0]['ip_address'] self.mock_fanout.reset_mock() device = 'tap' + p1['id'] old_mac = p1['mac_address'] mac = old_mac.split(':') mac[5] = '01' if mac[5] != '01' else '00' new_mac = ':'.join(mac) data = {'port': {'mac_address': new_mac, portbindings.HOST_ID: HOST}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIn('port', res) self.assertEqual(new_mac, res['port']['mac_address']) # port was not bound before, so no fdb call expected yet self.assertFalse(self.mock_fanout.called) self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.assertEqual(1, self.mock_fanout.call_count) add_expected = { p1['network_id']: { 'segment_id': 1, 'network_type': 'vxlan', 'ports': { '20.0.0.1': [ l2pop_rpc.PortInfo('00:00:00:00:00:00', '0.0.0.0'), l2pop_rpc.PortInfo(new_mac, p1_ip) ] } } } self.mock_fanout.assert_called_with( mock.ANY, 'add_fdb_entries', add_expected) def test_fixed_ips_changed_vlan(self): self._register_ml2_agents() with self.subnet(network=self._network2) as subnet: host_arg = {portbindings.HOST_ID: HOST} fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), fixed_ips=fixed_ips, **host_arg) as port: p = port['port'] device = 'tap' + p['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.10'}]}} self.new_update_request('ports', data, p['id']) l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2notify = l2pop_mech.L2PopulationAgentNotify l2notify.update_fdb_entries = mock.Mock() self.assertFalse(l2notify.update_fdb_entries.called) def test_fixed_ips_changed(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), fixed_ips=fixed_ips, **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.10'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) add_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'after': [(p1['mac_address'], '10.0.0.10')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', add_expected) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(2, len(ips)) upd_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [(p1['mac_address'], '10.0.0.10')], 'after': [(p1['mac_address'], '10.0.0.16')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', upd_expected) self.mock_fanout.reset_mock() data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}} req = self.new_update_request('ports', data, p1['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ips = res['port']['fixed_ips'] self.assertEqual(1, len(ips)) del_expected = {'chg_ip': {p1['network_id']: {'20.0.0.1': {'before': [(p1['mac_address'], '10.0.0.2')]}}}} self.mock_fanout.assert_any_call( mock.ANY, 'update_fdb_entries', del_expected) def test_no_fdb_updates_without_port_updates(self): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: p1 = port1['port'] device = 'tap' + p1['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) p1['status'] = 'ACTIVE' self.mock_fanout.reset_mock() plugin = directory.get_plugin() plugin.update_port(self.adminContext, p1['id'], port1) self.assertFalse(self.mock_fanout.called) def test_get_device_details_port_id(self): self._register_ml2_agents() host_arg = {portbindings.HOST_ID: HOST} with self.port(arg_list=(portbindings.HOST_ID,), **host_arg) as port: port_id = port['port']['id'] # ensure various formats all result in correct port_id formats = ['tap' + port_id[0:8], port_id, port['port']['mac_address']] for device in formats: details = self.callbacks.get_device_details( self.adminContext, device=device, agent_id=HOST_2) self.assertEqual(port_id, details['port_id']) def _update_and_check_portbinding(self, port_id, host_id): data = {'port': {portbindings.HOST_ID: host_id}} req = self.new_update_request('ports', data, port_id) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(host_id, res['port'][portbindings.HOST_ID]) def _test_host_changed(self, twice): self._register_ml2_agents() with self.subnet(network=self._network) as subnet: host_arg = {portbindings.HOST_ID: HOST} with self.port(subnet=subnet, cidr='10.0.0.0/24', device_owner=DEVICE_OWNER_COMPUTE, arg_list=(portbindings.HOST_ID,), **host_arg) as port1: tunnel_ip = '20.0.0.1' p1 = port1['port'] device1 = 'tap' + p1['id'] self.callbacks.update_device_up( self.adminContext, agent_id=HOST, device=device1) if twice: tunnel_ip = '20.0.0.4' self._update_and_check_portbinding(p1['id'], HOST_4) self.callbacks.update_device_up(self.adminContext, agent_id=HOST_4, device=device1) self.mock_fanout.reset_mock() self._update_and_check_portbinding(p1['id'], HOST_2) p1_ips = [p['ip_address'] for p in p1['fixed_ips']] expected = {p1['network_id']: {'ports': {tunnel_ip: [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( p1['mac_address'], p1_ips[0])]}, 'network_type': 'vxlan', 'segment_id': 1}} self.mock_fanout.assert_called_with( mock.ANY, 'remove_fdb_entries', expected) def test_host_changed(self): self._test_host_changed(twice=False) def test_host_changed_twice(self): self._test_host_changed(twice=True) def test_delete_port_no_fdb_entries_with_ha_port(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() port = {'device_owner': l2pop_db.HA_ROUTER_PORTS[0]} context = mock.Mock() context.current = port with mock.patch.object(l2pop_mech, '_get_agent_fdb', return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, 'remove_fdb_entries'): l2pop_mech.delete_port_postcommit(context) self.assertTrue(upd_port_down.called) def test_delete_port_invokes_update_device_down(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() port = {'device_owner': ''} context = mock.Mock() context.current = port with mock.patch.object(l2pop_mech, '_get_agent_fdb', return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, 'remove_fdb_entries'): l2pop_mech.delete_port_postcommit(context) self.assertTrue(upd_port_down.called) def test_delete_unbound_port(self): self._test_delete_port_handles_agentless_host_id(None) def test_delete_port_bound_to_agentless_host(self): self._test_delete_port_handles_agentless_host_id('test') def _test_delete_port_handles_agentless_host_id(self, host): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.initialize() with self.port() as port: port['port'][portbindings.HOST_ID] = host bindings = [models.PortBindingLevel()] port_context = driver_context.PortContext( self.driver, self.context, port['port'], self.driver.get_network( self.context, port['port']['network_id']), models.PortBinding(), bindings) # The point is to provide coverage and to assert that no exceptions # are raised. l2pop_mech.delete_port_postcommit(port_context) def test_delete_dvr_snat_port_fdb_entries(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.initialize() self._setup_l3() with self.subnet(network=self._network, enable_dhcp=False) as snet: host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True} with self.port(subnet=snet, device_owner=constants.DEVICE_OWNER_ROUTER_SNAT, arg_list=(portbindings.HOST_ID,), **host_arg) as p: device = 'tap' + p['port']['id'] self.callbacks.update_device_up(self.adminContext, agent_id=HOST, device=device) self.mock_fanout.reset_mock() p['port'][portbindings.HOST_ID] = HOST bindings = [models.PortBindingLevel()] port_context = driver_context.PortContext( self.driver, self.context, p['port'], self.driver.get_network( self.context, p['port']['network_id']), models.PortBinding(), bindings) fdbs = { p['port']['network_id']: { 'segment_id': 'fakeid', 'ports': {}, } } mock.patch.object( l2pop_mech, '_get_agent_fdb', return_value=fdbs).start() # The point is to provide coverage and to assert that # no exceptions are raised. l2pop_mech.delete_port_postcommit(port_context) def test_fixed_ips_change_unbound_port_no_rpc(self): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.initialize() l2pop_mech.L2populationAgentNotify = mock.Mock() with self.port() as port: port_context = driver_context.PortContext( self.driver, self.context, port['port'], self.driver.get_network( self.context, port['port']['network_id']), models.PortBinding(), None) l2pop_mech._fixed_ips_changed( port_context, None, port['port'], (set(['10.0.0.1']), set())) # There's no need to send an RPC update if the IP address for an # unbound port changed. self.assertFalse( l2pop_mech.L2populationAgentNotify.update_fdb_entries.called) class TestL2PopulationMechDriver(base.BaseTestCase): def _test_get_tunnels(self, agent_ip, exclude_host=True): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() agent = mock.Mock() agent.host = HOST network_ports = ((None, agent),) with mock.patch.object(l2pop_db, 'get_agent_ip', return_value=agent_ip): excluded_host = HOST + '-EXCLUDE' if exclude_host else HOST return mech_driver._get_tunnels(network_ports, excluded_host) def test_get_tunnels(self): tunnels = self._test_get_tunnels('20.0.0.1') self.assertIn('20.0.0.1', tunnels) def test_get_tunnels_no_ip(self): tunnels = self._test_get_tunnels(None) self.assertEqual(0, len(tunnels)) def test_get_tunnels_dont_exclude_host(self): tunnels = self._test_get_tunnels(None, exclude_host=False) self.assertEqual(0, len(tunnels)) def _test_create_agent_fdb(self, fdb_network_ports, agent_ips): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() tunnel_network_ports, tunnel_agent = ( self._mock_network_ports(HOST + '1', [None])) agent_ips[tunnel_agent] = '10.0.0.1' def agent_ip_side_effect(agent): return agent_ips[agent] with mock.patch.object(l2pop_db, 'get_agent_ip', side_effect=agent_ip_side_effect),\ mock.patch.object(l2pop_db, 'get_nondistributed_active_network_ports', return_value=fdb_network_ports),\ mock.patch.object(l2pop_db, 'get_distributed_active_network_ports', return_value=tunnel_network_ports): agent = mock.Mock() agent.host = HOST segment = {'segmentation_id': 1, 'network_type': 'vxlan'} return mech_driver._create_agent_fdb(context, agent, segment, 'network_id') def _mock_network_ports(self, host_name, bindings): agent = mock.Mock() agent.host = host_name return [(binding, agent) for binding in bindings], agent def test_create_agent_fdb(self): binding = mock.Mock() binding.port = {'mac_address': '00:00:DE:AD:BE:EF', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} fdb_network_ports, fdb_agent = ( self._mock_network_ports(HOST + '2', [binding])) agent_ips = {fdb_agent: '20.0.0.1'} agent_fdb = self._test_create_agent_fdb(fdb_network_ports, agent_ips) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( mac_address='00:00:DE:AD:BE:EF', ip_address='1.1.1.1')]}} self.assertEqual(expected_result, result) def test_create_agent_fdb_only_tunnels(self): agent_fdb = self._test_create_agent_fdb([], {}) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY]}} self.assertEqual(expected_result, result) def test_create_agent_fdb_concurrent_port_deletion(self): binding = mock.Mock() binding.port = {'mac_address': '00:00:DE:AD:BE:EF', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} binding2 = mock.Mock() # the port was deleted binding2.port = None fdb_network_ports, fdb_agent = ( self._mock_network_ports(HOST + '2', [binding, binding2])) agent_ips = {fdb_agent: '20.0.0.1'} agent_fdb = self._test_create_agent_fdb(fdb_network_ports, agent_ips) result = agent_fdb['network_id'] expected_result = {'segment_id': 1, 'network_type': 'vxlan', 'ports': {'10.0.0.1': [constants.FLOODING_ENTRY], '20.0.0.1': [constants.FLOODING_ENTRY, l2pop_rpc.PortInfo( mac_address='00:00:DE:AD:BE:EF', ip_address='1.1.1.1')]}} self.assertEqual(expected_result, result) def test_update_port_precommit_mac_address_changed_raises(self): port = {'status': u'ACTIVE', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': u'12:34:56:78:4b:0e', 'id': u'1'} original_port = port.copy() original_port['mac_address'] = u'12:34:56:78:4b:0f' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(mock.Mock(), mock.Mock(), port, mock.MagicMock(), models.PortBinding(), [models.PortBindingLevel()], original_port=original_port) mech_driver = l2pop_mech_driver.L2populationMechanismDriver() with testtools.ExpectedException(exceptions.InvalidInput): mech_driver.update_port_precommit(ctx) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/0000755000175000017500000000000000000000000027123 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py0000644000175000017500000000000000000000000031222 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/0000755000175000017500000000000000000000000030221 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py0000644000175000017500000000000000000000000032320 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/ 27 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers0000755000175000017500000000000000000000000033714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers0000644000175000017500000000000000000000000033704 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/test_qos_driver.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers0000644000175000017500000003023000000000000033714 0ustar00coreycorey00000000000000# Copyright 2016 OVH SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.linux import tc_lib from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa from neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers import ( qos_driver) from neutron.tests import base TEST_LATENCY_VALUE = 100 DSCP_VALUE = 32 class QosLinuxbridgeAgentDriverTestCase(base.BaseTestCase): def setUp(self): super(QosLinuxbridgeAgentDriverTestCase, self).setUp() cfg.CONF.set_override("tbf_latency", TEST_LATENCY_VALUE, "QOS") self.qos_driver = qos_driver.QosLinuxbridgeAgentDriver() self.qos_driver.initialize() self.rule_egress_bw_limit = self._create_bw_limit_rule_obj( constants.EGRESS_DIRECTION) self.rule_ingress_bw_limit = self._create_bw_limit_rule_obj( constants.INGRESS_DIRECTION) self.rule_dscp_marking = self._create_dscp_marking_rule_obj() self.port = self._create_fake_port(uuidutils.generate_uuid()) def _create_bw_limit_rule_obj(self, direction): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.direction = direction rule_obj.obj_reset_changes() return rule_obj def _create_dscp_marking_rule_obj(self): rule_obj = rule.QosDscpMarkingRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.dscp_mark = DSCP_VALUE rule_obj.obj_reset_changes() return rule_obj def _create_fake_port(self, policy_id): return {'qos_policy_id': policy_id, 'network_qos_policy_id': None, 'device': 'fake_tap'} def _dscp_mark_chain_name(self, device): return "qos-o%s" % device[3:] def _dscp_postrouting_rule(self, device): return ("-m physdev --physdev-in %s --physdev-is-bridged " "-j $qos-o%s") % (device, device[3:]) def _dscp_rule(self, dscp_mark_value): return "-j DSCP --set-dscp %s" % format(dscp_mark_value, '#04x') def _dscp_rule_tag(self, device): return "dscp-%s" % device def test_initialize_iptables_manager_passed_through_api(self): iptables_manager = mock.Mock() qos_drv = qos_driver.QosLinuxbridgeAgentDriver() with mock.patch.object( qos_drv, "agent_api" ) as agent_api, mock.patch( "neutron.agent.linux.iptables_manager.IptablesManager" ) as IptablesManager: agent_api.get_iptables_manager.return_value = ( iptables_manager) qos_drv.initialize() self.assertEqual(iptables_manager, qos_drv.iptables_manager) self.assertNotEqual(IptablesManager(), qos_drv.iptables_manager) iptables_manager.initialize_mangle_table.assert_called_once_with() def test_initialize_iptables_manager_not_passed_through_api(self): qos_drv = qos_driver.QosLinuxbridgeAgentDriver() with mock.patch.object( qos_drv, "agent_api" ) as agent_api, mock.patch( "neutron.agent.linux.iptables_manager.IptablesManager" ) as IptablesManager: agent_api.get_iptables_manager.return_value = None qos_drv.initialize() self.assertEqual(IptablesManager(), qos_drv.iptables_manager) IptablesManager().initialize_mangle_table.assert_called_once_with() def test_initialize_iptables_manager_no_agent_api(self): qos_drv = qos_driver.QosLinuxbridgeAgentDriver() with mock.patch( "neutron.agent.linux.iptables_manager.IptablesManager" ) as IptablesManager: qos_driver.agent_api = None qos_drv.initialize() self.assertEqual(IptablesManager(), qos_drv.iptables_manager) IptablesManager().initialize_mangle_table.assert_called_once_with() def test_create_egress_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "set_filters_bw_limit" ) as set_filters_bw_limit, mock.patch.object( tc_lib.TcCommand, "set_tbf_bw_limit" ) as set_tbf_limit: self.qos_driver.create_bandwidth_limit(self.port, self.rule_egress_bw_limit) set_filters_bw_limit.assert_called_once_with( self.rule_egress_bw_limit.max_kbps, self.rule_egress_bw_limit.max_burst_kbps, ) set_tbf_limit.assert_not_called() def test_create_ingress_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "set_filters_bw_limit" ) as set_filters_bw_limit, mock.patch.object( tc_lib.TcCommand, "set_tbf_bw_limit" ) as set_tbf_limit: self.qos_driver.create_bandwidth_limit(self.port, self.rule_ingress_bw_limit) set_filters_bw_limit.assert_not_called() set_tbf_limit.assert_called_once_with( self.rule_ingress_bw_limit.max_kbps, self.rule_ingress_bw_limit.max_burst_kbps, TEST_LATENCY_VALUE ) def test_update_egress_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "update_filters_bw_limit" ) as update_filters_bw_limit, mock.patch.object( tc_lib.TcCommand, "set_tbf_bw_limit" ) as set_tbf_bw_limit: self.qos_driver.update_bandwidth_limit(self.port, self.rule_egress_bw_limit) update_filters_bw_limit.assert_called_once_with( self.rule_egress_bw_limit.max_kbps, self.rule_egress_bw_limit.max_burst_kbps, ) set_tbf_bw_limit.assert_not_called() def test_update_ingress_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "update_filters_bw_limit" ) as update_filters_bw_limit, mock.patch.object( tc_lib.TcCommand, "set_tbf_bw_limit" ) as set_tbf_bw_limit: self.qos_driver.update_bandwidth_limit(self.port, self.rule_ingress_bw_limit) update_filters_bw_limit.assert_not_called() set_tbf_bw_limit.assert_called_once_with( self.rule_egress_bw_limit.max_kbps, self.rule_egress_bw_limit.max_burst_kbps, TEST_LATENCY_VALUE ) def test_delete_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "delete_filters_bw_limit" ) as delete_filters_bw_limit: self.qos_driver.delete_bandwidth_limit(self.port) delete_filters_bw_limit.assert_called_once_with() def test_delete_ingress_bandwidth_limit(self): with mock.patch.object( tc_lib.TcCommand, "delete_tbf_bw_limit" ) as delete_tbf_bw_limit: self.qos_driver.delete_bandwidth_limit_ingress(self.port) delete_tbf_bw_limit.assert_called_once_with() def test_create_dscp_marking(self): expected_calls = [ mock.call.add_chain( self._dscp_mark_chain_name(self.port['device'])), mock.call.add_rule( "POSTROUTING", self._dscp_postrouting_rule(self.port['device'])), mock.call.add_rule( self._dscp_mark_chain_name(self.port['device']), self._dscp_rule(DSCP_VALUE), tag=self._dscp_rule_tag(self.port['device']) ) ] with mock.patch.object(self.qos_driver, "iptables_manager") as iptables_manager: iptables_manager.ip4['mangle'] = mock.Mock() iptables_manager.ip6['mangle'] = mock.Mock() self.qos_driver.create_dscp_marking( self.port, self.rule_dscp_marking) iptables_manager.ipv4['mangle'].assert_has_calls(expected_calls) iptables_manager.ipv6['mangle'].assert_has_calls(expected_calls) def test_update_dscp_marking(self): expected_calls = [ mock.call.clear_rules_by_tag( self._dscp_rule_tag(self.port['device'])), mock.call.add_chain( self._dscp_mark_chain_name(self.port['device'])), mock.call.add_rule( "POSTROUTING", self._dscp_postrouting_rule(self.port['device'])), mock.call.add_rule( self._dscp_mark_chain_name(self.port['device']), self._dscp_rule(DSCP_VALUE), tag=self._dscp_rule_tag(self.port['device']) ) ] with mock.patch.object(self.qos_driver, "iptables_manager") as iptables_manager: iptables_manager.ip4['mangle'] = mock.Mock() iptables_manager.ip6['mangle'] = mock.Mock() self.qos_driver.update_dscp_marking( self.port, self.rule_dscp_marking) iptables_manager.ipv4['mangle'].assert_has_calls(expected_calls) iptables_manager.ipv6['mangle'].assert_has_calls(expected_calls) def test_delete_dscp_marking_chain_empty(self): dscp_chain_name = self._dscp_mark_chain_name(self.port['device']) expected_calls = [ mock.call.clear_rules_by_tag( self._dscp_rule_tag(self.port['device'])), mock.call.remove_chain( dscp_chain_name), ] with mock.patch.object(self.qos_driver, "iptables_manager") as iptables_manager: iptables_manager.ip4['mangle'] = mock.Mock() iptables_manager.ip6['mangle'] = mock.Mock() iptables_manager.get_chain = mock.Mock(return_value=[]) self.qos_driver.delete_dscp_marking(self.port) iptables_manager.ipv4['mangle'].assert_has_calls(expected_calls) iptables_manager.ipv6['mangle'].assert_has_calls(expected_calls) iptables_manager.get_chain.assert_has_calls([ mock.call("mangle", dscp_chain_name, ip_version=constants.IP_VERSION_4), mock.call("mangle", dscp_chain_name, ip_version=constants.IP_VERSION_6) ]) def test_delete_dscp_marking_chain_not_empty(self): dscp_chain_name = self._dscp_mark_chain_name(self.port['device']) expected_calls = [ mock.call.clear_rules_by_tag( self._dscp_rule_tag(self.port['device'])), ] with mock.patch.object(self.qos_driver, "iptables_manager") as iptables_manager: iptables_manager.ip4['mangle'] = mock.Mock() iptables_manager.ip6['mangle'] = mock.Mock() iptables_manager.get_chain = mock.Mock( return_value=["some other rule"]) self.qos_driver.delete_dscp_marking(self.port) iptables_manager.ipv4['mangle'].assert_has_calls(expected_calls) iptables_manager.ipv6['mangle'].assert_has_calls(expected_calls) iptables_manager.get_chain.assert_has_calls([ mock.call("mangle", dscp_chain_name, ip_version=constants.IP_VERSION_4), mock.call("mangle", dscp_chain_name, ip_version=constants.IP_VERSION_6) ]) iptables_manager.ipv4['mangle'].remove_chain.assert_not_called() iptables_manager.ipv4['mangle'].remove_rule.assert_not_called() ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_arp_protect.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_arp_protect.0000644000175000017500000001735700000000000033620 0ustar00coreycorey00000000000000# Copyright (c) 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import constants from neutron.agent.common import utils from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.tests import base VIF = 'vif_tap0' PORT_NO_SEC = {'port_security_enabled': False} PORT_TRUSTED = {'device_owner': constants.DEVICE_OWNER_ROUTER_GW} PORT = {'fixed_ips': [{'ip_address': '10.1.1.1'}], 'device_owner': 'nobody', 'mac_address': '00:11:22:33:44:55'} PORT_ADDR_PAIR = {'fixed_ips': [{'ip_address': '10.1.1.1'}], 'device_owner': 'nobody', 'mac_address': '00:11:22:33:44:55', 'allowed_address_pairs': [ {'mac_address': '00:11:22:33:44:66', 'ip_address': '10.1.1.2'}]} class TestLinuxBridgeARPSpoofing(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeARPSpoofing, self).setUp() self.execute = mock.patch.object(utils, "execute").start() @mock.patch.object(arp_protect, "delete_arp_spoofing_protection") def test_port_no_security(self, dasp): arp_protect.setup_arp_spoofing_protection(VIF, PORT_NO_SEC) dasp.assert_called_with([VIF]) @mock.patch.object(arp_protect, "delete_arp_spoofing_protection") def test_port_trusted(self, dasp): arp_protect.setup_arp_spoofing_protection(VIF, PORT_TRUSTED) dasp.assert_called_with([VIF]) def _test_port_add_arp_spoofing(self, vif, port): mac_addresses = {port['mac_address']} ip_addresses = {p['ip_address'] for p in port['fixed_ips']} if port.get('allowed_address_pairs'): mac_addresses |= {p['mac_address'] for p in port['allowed_address_pairs']} ip_addresses |= {p['ip_address'] for p in port['allowed_address_pairs']} spoof_chain = arp_protect.SPOOF_CHAIN_PREFIX + vif mac_chain = arp_protect.MAC_CHAIN_PREFIX + vif expected = [ mock.call(['ebtables', '-t', 'nat', '--concurrent', '-L'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-N', 'neutronMAC-%s' % vif, '-P', 'DROP'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-A', 'PREROUTING', '-i', vif, '-j', mac_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.call(['ebtables', '-t', 'nat', '--concurrent', '-A', mac_chain, '-i', vif, '--among-src', '%s' % ','.join(sorted(mac_addresses)), '-j', 'RETURN'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-N', spoof_chain, '-P', 'DROP'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.call(['ebtables', '-t', 'nat', '--concurrent', '-F', spoof_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), ] for addr in sorted(ip_addresses): expected.extend([ mock.call(['ebtables', '-t', 'nat', '--concurrent', '-A', spoof_chain, '-p', 'ARP', '--arp-ip-src', addr, '-j', 'ACCEPT'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), ]) expected.extend([ mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-A', 'PREROUTING', '-i', vif, '-j', spoof_chain, '-p', 'ARP'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), ]) arp_protect.setup_arp_spoofing_protection(vif, port) self.execute.assert_has_calls(expected) def test_port_add_arp_spoofing(self): self._test_port_add_arp_spoofing(VIF, PORT) def test_port_add_arp_spoofing_addr_pair(self): self._test_port_add_arp_spoofing(VIF, PORT_ADDR_PAIR) @mock.patch.object(arp_protect, "chain_exists", return_value=True) @mock.patch.object(arp_protect, "vif_jump_present", return_value=True) def test_port_delete_arp_spoofing(self, ce, vjp): spoof_chain = arp_protect.SPOOF_CHAIN_PREFIX + VIF mac_chain = arp_protect.MAC_CHAIN_PREFIX + VIF expected = [ mock.call(['ebtables', '-t', 'nat', '--concurrent', '-L'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-D', 'PREROUTING', '-i', VIF, '-j', spoof_chain, '-p', 'ARP'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.call(['ebtables', '-t', 'nat', '--concurrent', '-X', spoof_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.call(['ebtables', '-t', 'nat', '--concurrent', '-X', mac_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.call(['ebtables', '-t', 'filter', '--concurrent', '-L'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.call(['ebtables', '-t', 'filter', '--concurrent', '-D', 'FORWARD', '-i', VIF, '-j', spoof_chain, '-p', 'ARP'], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.call(['ebtables', '-t', 'filter', '--concurrent', '-X', spoof_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), mock.ANY, mock.call(['ebtables', '-t', 'filter', '--concurrent', '-X', mac_chain], check_exit_code=True, extra_ok_codes=None, log_fail_as_error=True, run_as_root=True), ] arp_protect.delete_arp_spoofing_protection([VIF]) self.execute.assert_has_calls(expected) ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_agent_extension_api.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_0000644000175000017500000000230700000000000033660 0ustar00coreycorey00000000000000# Copyright 2017 OVH SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_agent_extension_api as ext_api from neutron.tests import base class TestLinuxbridgeAgentExtensionAPI(base.BaseTestCase): def setUp(self): super(TestLinuxbridgeAgentExtensionAPI, self).setUp() self.iptables_manager = mock.Mock() self.extension_api = ext_api.LinuxbridgeAgentExtensionAPI( self.iptables_manager) def test_get_iptables_manager(self): self.assertEqual(self.iptables_manager, self.extension_api.get_iptables_manager()) ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_0000644000175000017500000015507400000000000033672 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import sys import mock from neutron_lib import constants from neutron_lib import exceptions from oslo_config import cfg from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import constants as lconst from neutron.plugins.ml2.drivers.linuxbridge.agent \ import linuxbridge_neutron_agent from neutron.tests import base LOCAL_IP = '192.168.0.33' LOCAL_IPV6 = '2001:db8:1::33' VXLAN_GROUPV6 = 'ff05::/120' PORT_1 = 'abcdef01-12ddssdfds-fdsfsd' DEVICE_1 = 'tapabcdef01-12' NETWORK_ID = '57653b20-ed5b-4ed0-a31d-06f84e3fd909' BRIDGE_MAPPING_VALUE = 'br-eth2' BRIDGE_MAPPINGS = {'physnet0': BRIDGE_MAPPING_VALUE} INTERFACE_MAPPINGS = {'physnet1': 'eth1'} FAKE_DEFAULT_DEV = mock.Mock() FAKE_DEFAULT_DEV.name = 'eth1' PORT_DATA = { "port_id": PORT_1, "device": DEVICE_1 } class FakeIpLinkCommand(object): def set_up(self): pass def set_mtu(self, mtu): pass class FakeIpDevice(object): def __init__(self): self.link = FakeIpLinkCommand() def disable_ipv6(self): pass def get_linuxbridge_manager(bridge_mappings, interface_mappings): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV),\ mock.patch.object(ip_lib, 'device_exists', return_value=True),\ mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'check_vxlan_support'): cfg.CONF.set_override('local_ip', LOCAL_IP, 'VXLAN') return linuxbridge_neutron_agent.LinuxBridgeManager( bridge_mappings, interface_mappings) class TestLinuxBridge(base.BaseTestCase): def setUp(self): super(TestLinuxBridge, self).setUp() self.linux_bridge = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) def test_ensure_physical_in_bridge_invalid(self): result = self.linux_bridge.ensure_physical_in_bridge( 'network_id', constants.TYPE_VLAN, 'physnetx', 7, 1450) self.assertFalse(result) def test_ensure_physical_in_bridge_flat(self): with mock.patch.object(self.linux_bridge, 'ensure_flat_bridge') as flat_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', constants.TYPE_FLAT, 'physnet1', None, 1450) self.assertTrue(flat_bridge_func.called) def test_ensure_physical_in_bridge_vlan(self): with mock.patch.object(self.linux_bridge, 'ensure_vlan_bridge') as vlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', constants.TYPE_VLAN, 'physnet1', 7, 1450) self.assertTrue(vlan_bridge_func.called) def test_ensure_physical_in_bridge_vxlan(self): self.linux_bridge.vxlan_mode = lconst.VXLAN_UCAST with mock.patch.object(self.linux_bridge, 'ensure_vxlan_bridge') as vxlan_bridge_func: self.linux_bridge.ensure_physical_in_bridge( 'network_id', 'vxlan', 'physnet1', 7, 1450) self.assertTrue(vxlan_bridge_func.called) class TestLinuxBridgeManager(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeManager, self).setUp() self.lbm = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) def test_local_ip_validation_with_valid_ip(self): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV): self.lbm.local_ip = LOCAL_IP result = self.lbm.get_local_ip_device() self.assertEqual(FAKE_DEFAULT_DEV, result) def test_local_ip_validation_with_invalid_ip(self): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=None),\ mock.patch.object(sys, 'exit') as exit,\ mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.local_ip = LOCAL_IP self.lbm.get_local_ip_device() self.assertEqual(1, log.call_count) exit.assert_called_once_with(1) def _test_vxlan_group_validation(self, bad_local_ip, bad_vxlan_group): with mock.patch.object(ip_lib.IPWrapper, 'get_device_by_ip', return_value=FAKE_DEFAULT_DEV),\ mock.patch.object(sys, 'exit') as exit,\ mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.local_ip = bad_local_ip cfg.CONF.set_override('vxlan_group', bad_vxlan_group, 'VXLAN') self.lbm.validate_vxlan_group_with_local_ip() self.assertEqual(1, log.call_count) exit.assert_called_once_with(1) def test_vxlan_group_validation_with_mismatched_local_ip(self): self._test_vxlan_group_validation(LOCAL_IP, VXLAN_GROUPV6) def test_vxlan_group_validation_with_unicast_group(self): self._test_vxlan_group_validation(LOCAL_IP, '240.0.0.0') def test_vxlan_group_validation_with_invalid_cidr(self): self._test_vxlan_group_validation(LOCAL_IP, '224.0.0.1/') def test_vxlan_group_validation_with_v6_unicast_group(self): self._test_vxlan_group_validation(LOCAL_IPV6, '2001:db8::') def test_get_existing_bridge_name(self): phy_net = 'physnet0' self.assertEqual('br-eth2', self.lbm.bridge_mappings.get(phy_net)) phy_net = '' self.assertIsNone(self.lbm.bridge_mappings.get(phy_net)) def test_get_bridge_name(self): nw_id = "123456789101112" self.assertEqual("brq" + nw_id[0:11], self.lbm.get_bridge_name(nw_id)) nw_id = "" self.assertEqual("brq", self.lbm.get_bridge_name(nw_id)) def test_get_subinterface_name_backwards_compatibility(self): self.assertEqual("abcdefghijklm.1", self.lbm.get_subinterface_name("abcdefghijklm", "1")) self.assertEqual("abcdefghijkl.11", self.lbm.get_subinterface_name("abcdefghijkl", "11")) self.assertEqual("abcdefghij.1111", self.lbm.get_subinterface_name("abcdefghij", "1111")) def test_get_subinterface_name_advanced(self): """Ensure the same hash is used for long interface names. If the generated vlan device name would be too long, make sure that everything before the '.' is equal. This might be helpful when debugging problems. """ max_device_name = "abcdefghijklmno" vlan_dev_name1 = self.lbm.get_subinterface_name(max_device_name, "1") vlan_dev_name2 = self.lbm.get_subinterface_name(max_device_name, "1111") self.assertEqual(vlan_dev_name1.partition(".")[0], vlan_dev_name2.partition(".")[0]) def test_get_tap_device_name(self): if_id = "123456789101112" self.assertEqual(constants.TAP_DEVICE_PREFIX + if_id[0:11], self.lbm.get_tap_device_name(if_id)) if_id = "" self.assertEqual(constants.TAP_DEVICE_PREFIX, self.lbm.get_tap_device_name(if_id)) def test_get_vxlan_device_name(self): vn_id = constants.MAX_VXLAN_VNI self.assertEqual("vxlan-" + str(vn_id), self.lbm.get_vxlan_device_name(vn_id)) self.assertIsNone(self.lbm.get_vxlan_device_name(vn_id + 1)) def test_get_vxlan_group(self): cfg.CONF.set_override('vxlan_group', '239.1.2.3/24', 'VXLAN') vn_id = constants.MAX_VXLAN_VNI self.assertEqual('239.1.2.255', self.lbm.get_vxlan_group(vn_id)) vn_id = 256 self.assertEqual('239.1.2.0', self.lbm.get_vxlan_group(vn_id)) vn_id = 257 self.assertEqual('239.1.2.1', self.lbm.get_vxlan_group(vn_id)) def test_get_vxlan_group_with_multicast_address(self): cfg.CONF.set_override('vxlan_group', '239.1.2.3/32', 'VXLAN') cfg.CONF.set_override('multicast_ranges', ('224.0.0.10:300:315', '225.0.0.15:400:600'), 'VXLAN') vn_id = 300 self.assertEqual('224.0.0.10', self.lbm.get_vxlan_group(vn_id)) vn_id = 500 self.assertEqual('225.0.0.15', self.lbm.get_vxlan_group(vn_id)) vn_id = 315 self.assertEqual('224.0.0.10', self.lbm.get_vxlan_group(vn_id)) vn_id = 4000 # outside of range should fallback to group self.assertEqual('239.1.2.3', self.lbm.get_vxlan_group(vn_id)) def test__is_valid_multicast_range(self): bad_ranges = ['224.0.0.10:330:315', 'x:100:200', '10.0.0.1:100:200', '224.0.0.10:100', '224.0.0.10:100:200:300'] for r in bad_ranges: self.assertFalse(self.lbm._is_valid_multicast_range(r), 'range %s should have been invalid' % r) good_ranges = ['224.0.0.10:315:330', '224.0.0.0:315:315'] for r in good_ranges: self.assertTrue(self.lbm._is_valid_multicast_range(r), 'range %s should have been valid' % r) # v4 ranges are bad when a v6 local_ip is present self.lbm.local_ip = '2000::1' for r in good_ranges: self.assertFalse(self.lbm._is_valid_multicast_range(r), 'range %s should have been invalid' % r) def test__match_multicast_range(self): cfg.CONF.set_override('multicast_ranges', ('224.0.0.10:300:315', '225.0.0.15:400:600'), 'VXLAN') self.assertEqual('224.0.0.10', self.lbm._match_multicast_range(307)) self.assertEqual('225.0.0.15', self.lbm._match_multicast_range(407)) self.assertIsNone(self.lbm._match_multicast_range(399)) def test_get_vxlan_group_with_ipv6(self): cfg.CONF.set_override('local_ip', LOCAL_IPV6, 'VXLAN') self.lbm.local_ip = LOCAL_IPV6 cfg.CONF.set_override('vxlan_group', VXLAN_GROUPV6, 'VXLAN') vn_id = constants.MAX_VXLAN_VNI self.assertEqual('ff05::ff', self.lbm.get_vxlan_group(vn_id)) vn_id = 256 self.assertEqual('ff05::', self.lbm.get_vxlan_group(vn_id)) vn_id = 257 self.assertEqual('ff05::1', self.lbm.get_vxlan_group(vn_id)) def test_get_deletable_bridges(self): br_list = ["br-int", "brq1", "brq2", "brq-user"] expected = set(br_list[1:3]) lbm = get_linuxbridge_manager( bridge_mappings={"physnet0": "brq-user"}, interface_mappings={}) with mock.patch.object( bridge_lib, 'get_bridge_names', return_value=br_list): self.assertEqual(expected, lbm.get_deletable_bridges()) def test_get_tap_devices_count(self): with mock.patch.object( bridge_lib.BridgeDevice, 'get_interfaces') as get_ifs_fn: get_ifs_fn.return_value = ['tap2101', 'eth0.100', 'vxlan-1000'] self.assertEqual(1, self.lbm.get_tap_devices_count('br0')) def test_get_interface_details(self): with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=constants.IP_VERSION_4, dynamic=False) list_fn.return_value = ipdict ret = self.lbm.get_interface_details("eth0", 4) self.assertTrue(list_fn.called) self.assertTrue(getgw_fn.called) self.assertEqual(ret, (ipdict, gwdict)) def test_ensure_flat_bridge(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens: self.assertEqual( "eth0", self.lbm.ensure_flat_bridge("123", None, "eth0")) ens.assert_called_once_with("brq123", "eth0") def test_ensure_flat_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens: ens.return_value = "br-eth2" self.assertEqual("br-eth2", self.lbm.ensure_flat_bridge("123", "br-eth2", None)) ens.assert_called_with("br-eth2") def test_ensure_vlan_bridge(self): with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ mock.patch.object(self.lbm, 'ensure_bridge') as ens: ens_vl_fn.return_value = "eth0.1" self.assertEqual("eth0.1", self.lbm.ensure_vlan_bridge("123", None, "eth0", "1")) ens.assert_called_with("brq123", "eth0.1") self.assertEqual("eth0.1", self.lbm.ensure_vlan_bridge("123", None, "eth0", "1")) ens.assert_called_with("brq123", "eth0.1") def test_ensure_vlan_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ mock.patch.object(self.lbm, 'ensure_bridge') as ens: ens_vl_fn.return_value = None ens.return_value = "br-eth2" self.assertEqual("br-eth2", self.lbm.ensure_vlan_bridge("123", "br-eth2", None, None)) ens.assert_called_with("br-eth2") def test_ensure_local_bridge(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: self.lbm.ensure_local_bridge("54321", None) ens_fn.assert_called_once_with("brq54321") def test_ensure_local_bridge_with_existed_brq(self): with mock.patch.object(self.lbm, 'ensure_bridge') as ens_fn: ens_fn.return_value = "br-eth2" self.lbm.ensure_local_bridge("54321", 'br-eth2') ens_fn.assert_called_once_with("br-eth2") def test_ensure_vlan(self): with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual("eth0.1", self.lbm.ensure_vlan("eth0", "1")) de_fn.return_value = False vlan_dev = FakeIpDevice() with mock.patch.object(vlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(self.lbm.ip, 'add_vlan', return_value=vlan_dev) as add_vlan_fn: retval = self.lbm.ensure_vlan("eth0", "1") self.assertEqual("eth0.1", retval) add_vlan_fn.assert_called_with('eth0.1', 'eth0', '1') dv6_fn.assert_called_once_with() def test_ensure_vxlan(self, expected_proxy=False): physical_mtu = 1500 seg_id = "12345678" self.lbm.local_int = 'eth0' self.lbm.vxlan_mode = lconst.VXLAN_MCAST with mock.patch.object(ip_lib, 'device_exists') as de_fn: de_fn.return_value = True self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id)) de_fn.return_value = False vxlan_dev = FakeIpDevice() with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(vxlan_dev.link, 'set_mtu') as set_mtu_fn,\ mock.patch.object(ip_lib, 'get_device_mtu', return_value=physical_mtu),\ mock.patch.object(self.lbm.ip, 'add_vxlan', return_value=vxlan_dev) as add_vxlan_fn: retval = self.lbm.ensure_vxlan(seg_id, mtu=1450) self.assertEqual("vxlan-" + seg_id, retval) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", srcport=(0, 0), dstport=None, ttl=None, dev=self.lbm.local_int) dv6_fn.assert_called_once_with() set_mtu_fn.assert_called_once_with(1450) cfg.CONF.set_override('l2_population', 'True', 'VXLAN') self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id)) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", srcport=(0, 0), dstport=None, ttl=None, dev=self.lbm.local_int, proxy=expected_proxy) def test_ensure_vxlan_arp_responder_enabled(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self.test_ensure_vxlan(expected_proxy=True) def test_ensure_vxlan_dscp_inherit_set(self): cfg.CONF.set_override('dscp_inherit', 'True', 'AGENT') seg_id = "12345678" self.lbm.local_int = 'eth0' self.lbm.vxlan_mode = lconst.VXLAN_MCAST with mock.patch.object(ip_lib, 'device_exists', return_value=False): vxlan_dev = FakeIpDevice() with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(self.lbm.ip, 'add_vxlan', return_value=vxlan_dev) as add_vxlan_fn: self.assertEqual("vxlan-" + seg_id, self.lbm.ensure_vxlan(seg_id)) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", srcport=(0, 0), dstport=None, ttl=None, tos='inherit', dev=self.lbm.local_int) dv6_fn.assert_called_once_with() def test_ensure_vxlan_mtu_too_big(self): seg_id = "12345678" physical_mtu = 1500 # Any mtu value which will be higher than # physical_mtu - VXLAN_ENCAP_OVERHEAD should raise NetlinkError mtu = 1490 self.lbm.local_int = 'eth0' self.lbm.vxlan_mode = lconst.VXLAN_MCAST with mock.patch.object(ip_lib, 'device_exists', return_value=False): vxlan_dev = mock.Mock() with mock.patch.object(vxlan_dev, 'disable_ipv6') as dv6_fn,\ mock.patch.object(self.lbm.ip, 'add_vxlan', return_value=vxlan_dev) as add_vxlan_fn,\ mock.patch.object(vxlan_dev.link, 'set_mtu', side_effect=ip_lib.InvalidArgument( parameter="MTU", value=mtu)),\ mock.patch.object(ip_lib, 'get_device_mtu', return_value=physical_mtu),\ mock.patch.object(vxlan_dev.link, 'delete') as delete_dev: self.assertFalse( self.lbm.ensure_vxlan(seg_id, mtu=mtu)) add_vxlan_fn.assert_called_with("vxlan-" + seg_id, seg_id, group="224.0.0.1", srcport=(0, 0), dstport=None, ttl=None, dev=self.lbm.local_int) delete_dev.assert_called_once_with() dv6_fn.assert_not_called() def test__update_interface_ip_details(self): gwdict = dict(cidr='1.1.1.1/24', via='1.1.1.1', metric=50) ipdict = dict(cidr='1.1.1.1/24', broadcast='1.1.1.255', scope='global', ip_version=constants.IP_VERSION_4, dynamic=False) with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\ mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn,\ mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn: # 'list' actually returns a dict, but we're only simulating # whether the device exists or not list_fn.side_effect = [True, False] self.lbm._update_interface_ip_details("br0", "eth0", [ipdict], None) self.assertFalse(add_fn.called) self.assertTrue(del_fn.called) add_fn.reset_mock() del_fn.reset_mock() self.lbm._update_interface_ip_details("br0", "eth0", [ipdict], None) self.assertTrue(add_fn.called) self.assertTrue(del_fn.called) with mock.patch.object(ip_lib.IpRouteCommand, 'add_gateway') as addgw_fn,\ mock.patch.object(ip_lib.IpRouteCommand, 'delete_gateway') as delgw_fn: self.lbm._update_interface_ip_details("br0", "eth0", None, gwdict) self.assertTrue(addgw_fn.called) self.assertTrue(delgw_fn.called) def test_ensure_bridge(self): bridge_device = mock.Mock() bridge_device_old = mock.Mock() with mock.patch.object(ip_lib, 'ensure_device_is_ready') as de_fn,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device) as br_fn,\ mock.patch.object(self.lbm, 'update_interface_ip_details') as upd_fn,\ mock.patch.object(bridge_lib, 'is_bridged_interface'),\ mock.patch.object(bridge_lib.BridgeDevice, 'get_interface_bridge') as get_if_br_fn: de_fn.return_value = False br_fn.addbr.return_value = bridge_device bridge_device.setfd.return_value = False bridge_device.disable_stp.return_value = False bridge_device.disable_ipv6.return_value = False bridge_device.link.set_up.return_value = False self.assertEqual("br0", self.lbm.ensure_bridge("br0", None)) bridge_device.owns_interface.return_value = False self.lbm.ensure_bridge("br0", "eth0") upd_fn.assert_called_with("br0", "eth0") bridge_device.owns_interface.assert_called_with("eth0") de_fn.return_value = True bridge_device.delif.side_effect = Exception() self.lbm.ensure_bridge("br0", "eth0") bridge_device.owns_interface.assert_called_with("eth0") de_fn.return_value = True bridge_device.owns_interface.return_value = False get_if_br_fn.return_value = bridge_device_old bridge_device.addif.reset_mock() self.lbm.ensure_bridge("br0", "eth0") bridge_device_old.delif.assert_called_once_with('eth0') bridge_device.addif.assert_called_once_with('eth0') def test_ensure_physical_in_bridge(self): self.assertFalse( self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VLAN, "phys", "1", 1450) ) with mock.patch.object(self.lbm, "ensure_flat_bridge") as flbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", constants.TYPE_FLAT, "physnet1", None, 1450) ) self.assertTrue(flbr_fn.called) with mock.patch.object(self.lbm, "ensure_vlan_bridge") as vlbr_fn: self.assertTrue( self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VLAN, "physnet1", "1", 1450) ) self.assertTrue(vlbr_fn.called) with mock.patch.object(self.lbm, "ensure_vxlan_bridge") as vlbr_fn: self.lbm.vxlan_mode = lconst.VXLAN_MCAST self.assertTrue( self.lbm.ensure_physical_in_bridge("123", constants.TYPE_VXLAN, "physnet1", "1", 1450) ) self.assertTrue(vlbr_fn.called) def test_ensure_physical_in_bridge_with_existed_brq(self): with mock.patch.object(linuxbridge_neutron_agent.LOG, 'error') as log: self.lbm.ensure_physical_in_bridge("123", constants.TYPE_FLAT, "physnet9", "1", 1450) self.assertEqual(1, log.call_count) @mock.patch.object(ip_lib, "device_exists", return_value=False) def test_add_tap_interface_with_interface_disappearing(self, exists): with mock.patch.object(self.lbm, "_add_tap_interface", side_effect=RuntimeError("No such dev")): self.assertFalse(self.lbm.add_tap_interface("123", constants.TYPE_VLAN, "physnet1", None, "tap1", "foo", None)) @mock.patch.object(ip_lib, "device_exists", return_value=True) def test_add_tap_interface_with_other_error(self, exists): with mock.patch.object(self.lbm, "_add_tap_interface", side_effect=RuntimeError("No more fuel")): self.assertRaises(RuntimeError, self.lbm.add_tap_interface, "123", constants.TYPE_VLAN, "physnet1", None, "tap1", "foo", None) def test_add_tap_interface_owner_compute(self): with mock.patch.object(ip_lib, "device_exists"): with mock.patch.object(self.lbm, "ensure_local_bridge"): self.assertTrue(self.lbm.add_tap_interface( "123", constants.TYPE_LOCAL, "physnet1", None, "tap1", "compute:1", None)) def _test_add_tap_interface(self, dev_owner_prefix): with mock.patch.object(ip_lib, "device_exists") as de_fn: de_fn.return_value = False self.assertFalse( self.lbm.add_tap_interface("123", constants.TYPE_VLAN, "physnet1", "1", "tap1", dev_owner_prefix, None)) de_fn.return_value = True bridge_device = mock.Mock() with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device), \ mock.patch.object(self.lbm, '_set_tap_mtu') as set_tap, \ mock.patch.object(bridge_lib.BridgeDevice, "get_interface_bridge") as get_br: bridge_device.addif.retun_value = False get_br.return_value = True self.assertTrue(self.lbm.add_tap_interface( "123", constants.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix, None)) en_fn.assert_called_with("123", "brq123") self.lbm.bridge_mappings = {"physnet1": "brq999"} self.assertTrue(self.lbm.add_tap_interface( "123", constants.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix, 8765)) set_tap.assert_called_with('tap1', 8765) en_fn.assert_called_with("123", "brq999") get_br.return_value = False bridge_device.addif.retun_value = True self.assertFalse(self.lbm.add_tap_interface( "123", constants.TYPE_LOCAL, "physnet1", None, "tap1", dev_owner_prefix, None)) with mock.patch.object(self.lbm, "ensure_physical_in_bridge") as ens_fn: ens_fn.return_value = False self.assertFalse(self.lbm.add_tap_interface( "123", constants.TYPE_VLAN, "physnet1", "1", "tap1", dev_owner_prefix, None)) def test_add_tap_interface_owner_network(self): self._test_add_tap_interface(constants.DEVICE_OWNER_NETWORK_PREFIX) def test_add_tap_interface_owner_neutron(self): self._test_add_tap_interface(constants.DEVICE_OWNER_NEUTRON_PREFIX) def test_plug_interface(self): segment = amb.NetworkSegment( constants.TYPE_VLAN, "physnet-1", "1", 1777) with mock.patch.object(self.lbm, "add_tap_interface") as add_tap: self.lbm.plug_interface("123", segment, "tap234", constants.DEVICE_OWNER_NETWORK_PREFIX) add_tap.assert_called_with("123", constants.TYPE_VLAN, "physnet-1", "1", "tap234", constants.DEVICE_OWNER_NETWORK_PREFIX, 1777) def test_delete_bridge(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\ mock.patch.object(bridge_lib.BridgeDevice, "get_interfaces") as getif_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as delif_fn: de_fn.return_value = False self.lbm.delete_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1", "vxlan-1002"] link_cmd.set_down.return_value = False self.lbm.delete_bridge("br0") updif_fn.assert_called_with("eth1", "br0") delif_fn.assert_called_with("vxlan-1002") def test_delete_bridge_not_exist(self): self.lbm.interface_mappings.update({}) bridge_device = mock.Mock() with mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): bridge_device.exists.side_effect = [True, False] bridge_device.get_interfaces.return_value = [] bridge_device.link.set_down.side_effect = RuntimeError self.lbm.delete_bridge("br0") self.assertEqual(2, bridge_device.exists.call_count) bridge_device.exists.side_effect = [True, True] self.assertRaises(RuntimeError, self.lbm.delete_bridge, "br0") def test_delete_bridge_with_ip(self): bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_interface,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True updif_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"] bridge_device.link.set_down.return_value = False self.lbm.delete_bridge("br0") updif_fn.assert_called_with("eth1.1", "br0") self.assertFalse(del_interface.called) def test_delete_bridge_no_ip(self): bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "get_interface_details") as if_det_fn,\ mock.patch.object(self.lbm, "_update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_interface,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth0", "eth1.1"] bridge_device.link.set_down.return_value = False if_det_fn.return_value = ([], None) self.lbm.delete_bridge("br0") del_interface.assert_called_with("eth1.1") self.assertFalse(updif_fn.called) def test_delete_bridge_no_int_mappings(self): lbm = get_linuxbridge_manager( bridge_mappings={}, interface_mappings={}) with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib, "IpLinkCommand") as link_cmd,\ mock.patch.object(bridge_lib.BridgeDevice, "get_interfaces") as getif_fn,\ mock.patch.object(lbm, "remove_interface"),\ mock.patch.object(lbm, "delete_interface") as del_interface: de_fn.return_value = False lbm.delete_bridge("br0") self.assertFalse(getif_fn.called) de_fn.return_value = True getif_fn.return_value = ["vxlan-1002"] link_cmd.set_down.return_value = False lbm.delete_bridge("br0") del_interface.assert_called_with("vxlan-1002") def test_delete_bridge_with_physical_vlan(self): self.lbm.interface_mappings.update({"physnet2": "eth1.4000"}) bridge_device = mock.Mock() with mock.patch.object(ip_lib, "device_exists") as de_fn,\ mock.patch.object(self.lbm, "remove_interface"),\ mock.patch.object(self.lbm, "update_interface_ip_details") as updif_fn,\ mock.patch.object(self.lbm, "delete_interface") as del_int,\ mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): de_fn.return_value = True bridge_device.get_interfaces.return_value = ["eth1.1", "eth1.4000"] updif_fn.return_value = False bridge_device.link.set_down.return_value = False self.lbm.delete_bridge("br0") del_int.assert_called_once_with("eth1.1") def test_remove_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(bridge_lib.BridgeDevice, 'owns_interface') as owns_fn,\ mock.patch.object(bridge_lib.BridgeDevice, "delif") as delif_fn: de_fn.return_value = False self.assertFalse(self.lbm.remove_interface("br0", "eth0")) self.assertFalse(owns_fn.called) de_fn.return_value = True owns_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) delif_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) def test_remove_interface_not_on_bridge(self): bridge_device = mock.Mock() with mock.patch.object(bridge_lib, "BridgeDevice", return_value=bridge_device): bridge_device.exists.return_value = True bridge_device.delif.side_effect = RuntimeError bridge_device.owns_interface.side_effect = [True, False] self.lbm.remove_interface("br0", 'tap0') self.assertEqual(2, bridge_device.owns_interface.call_count) bridge_device.owns_interface.side_effect = [True, True] self.assertRaises(RuntimeError, self.lbm.remove_interface, "br0", 'tap0') def test_delete_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "set_down") as down_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "delete") as delete_fn: de_fn.return_value = False self.lbm.delete_interface("eth1.1") self.assertFalse(down_fn.called) self.assertFalse(delete_fn.called) de_fn.return_value = True self.lbm.delete_interface("eth1.1") self.assertTrue(down_fn.called) self.assertTrue(delete_fn.called) def _check_vxlan_support(self, expected, vxlan_ucast_supported, vxlan_mcast_supported): with mock.patch.object(self.lbm, 'vxlan_ucast_supported', return_value=vxlan_ucast_supported),\ mock.patch.object(self.lbm, 'vxlan_mcast_supported', return_value=vxlan_mcast_supported): if expected == lconst.VXLAN_NONE: self.assertRaises(exceptions.VxlanNetworkUnsupported, self.lbm.check_vxlan_support) self.assertEqual(expected, self.lbm.vxlan_mode) else: self.lbm.check_vxlan_support() self.assertEqual(expected, self.lbm.vxlan_mode) def test_check_vxlan_support(self): self._check_vxlan_support(expected=lconst.VXLAN_UCAST, vxlan_ucast_supported=True, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_MCAST, vxlan_ucast_supported=False, vxlan_mcast_supported=True) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) self._check_vxlan_support(expected=lconst.VXLAN_NONE, vxlan_ucast_supported=False, vxlan_mcast_supported=False) def _check_vxlan_ucast_supported( self, expected, l2_population, iproute_arg_supported, fdb_append): cfg.CONF.set_override('l2_population', l2_population, 'VXLAN') with mock.patch.object(ip_lib, 'device_exists', return_value=False),\ mock.patch.object(ip_lib, 'vxlan_in_use', return_value=False),\ mock.patch.object(self.lbm, 'delete_interface', return_value=None),\ mock.patch.object(self.lbm, 'ensure_vxlan', return_value=None),\ mock.patch.object( ip_lib.IpNetnsCommand, 'execute', side_effect=None if fdb_append else RuntimeError()),\ mock.patch.object(ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) def test_vxlan_ucast_supported(self): self._check_vxlan_ucast_supported( expected=False, l2_population=False, iproute_arg_supported=True, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=False, fdb_append=True) self._check_vxlan_ucast_supported( expected=False, l2_population=True, iproute_arg_supported=True, fdb_append=False) self._check_vxlan_ucast_supported( expected=True, l2_population=True, iproute_arg_supported=True, fdb_append=True) def _check_vxlan_mcast_supported( self, expected, vxlan_group, iproute_arg_supported): cfg.CONF.set_override('vxlan_group', vxlan_group, 'VXLAN') with mock.patch.object( ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_mcast_supported()) def test_vxlan_mcast_supported(self): self._check_vxlan_mcast_supported( expected=False, vxlan_group='', iproute_arg_supported=True) self._check_vxlan_mcast_supported( expected=False, vxlan_group='224.0.0.1', iproute_arg_supported=False) self._check_vxlan_mcast_supported( expected=True, vxlan_group='224.0.0.1', iproute_arg_supported=True) def _test_ensure_port_admin_state(self, admin_state): port_id = 'fake_id' with mock.patch.object(ip_lib, 'IPDevice') as dev_mock: self.lbm.ensure_port_admin_state(port_id, admin_state) tap_name = self.lbm.get_tap_device_name(port_id) self.assertEqual(admin_state, dev_mock(tap_name).link.set_up.called) self.assertNotEqual(admin_state, dev_mock(tap_name).link.set_down.called) def test_ensure_port_admin_state_up(self): self._test_ensure_port_admin_state(True) def test_ensure_port_admin_state_down(self): self._test_ensure_port_admin_state(False) def test_get_agent_id_bridge_mappings(self): lbm = get_linuxbridge_manager(BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) with mock.patch.object(ip_lib, "get_device_mac", return_value='16:63:69:10:a0:59') as mock_gim: agent_id = lbm.get_agent_id() self.assertEqual("lb16636910a059", agent_id) mock_gim.assert_called_with(BRIDGE_MAPPING_VALUE) def test_get_agent_id_no_bridge_mappings(self): devices_mock = [ mock.MagicMock(), mock.MagicMock() ] devices_mock[0].name = "eth1" devices_mock[1].name = "eth2" bridge_mappings = {} lbm = get_linuxbridge_manager(bridge_mappings, INTERFACE_MAPPINGS) with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=devices_mock), \ mock.patch.object( ip_lib, "get_device_mac", side_effect=[None, '16:63:69:10:a0:59']) as mock_gim: agent_id = lbm.get_agent_id() self.assertEqual("lb16636910a059", agent_id) mock_gim.assert_has_calls([mock.call("eth1"), mock.call("eth2")]) class TestLinuxBridgeRpcCallbacks(base.BaseTestCase): def setUp(self): super(TestLinuxBridgeRpcCallbacks, self).setUp() class FakeLBAgent(object): def __init__(self): self.agent_id = 1 self.mgr = get_linuxbridge_manager( BRIDGE_MAPPINGS, INTERFACE_MAPPINGS) self.mgr.vxlan_mode = lconst.VXLAN_UCAST self.network_ports = collections.defaultdict(list) self.lb_rpc = linuxbridge_neutron_agent.LinuxBridgeRpcCallbacks( object(), FakeLBAgent(), object() ) segment = mock.Mock() segment.network_type = 'vxlan' segment.segmentation_id = 1 self.lb_rpc.network_map['net_id'] = segment cfg.CONF.set_default('host', 'host') def test_network_delete_mapped_net(self): mock_net = mock.Mock() mock_net.physical_network = None self._test_network_delete({NETWORK_ID: mock_net}) def test_network_delete_unmapped_net(self): self._test_network_delete({}) def _test_network_delete(self, net_map): self.lb_rpc.network_map = net_map with mock.patch.object(self.lb_rpc.agent.mgr, "get_bridge_name") as get_br_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "delete_bridge") as del_fn: get_br_fn.return_value = "br0" self.lb_rpc.network_delete("anycontext", network_id=NETWORK_ID) get_br_fn.assert_called_with(NETWORK_ID) del_fn.assert_called_with("br0") def test_port_update(self): port = {'id': PORT_1} self.lb_rpc.port_update(context=None, port=port) self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices) def test_network_update(self): updated_network = {'id': NETWORK_ID} self.lb_rpc.agent.network_ports = { NETWORK_ID: [PORT_DATA] } self.lb_rpc.network_update(context=None, network=updated_network) self.assertEqual(set([DEVICE_1]), self.lb_rpc.updated_devices) def test_network_delete_with_existed_brq(self): mock_net = mock.Mock() mock_net.physical_network = 'physnet0' self.lb_rpc.network_map = {'123': mock_net} with mock.patch.object(linuxbridge_neutron_agent.LOG, 'info') as log,\ mock.patch.object(self.lb_rpc.agent.mgr, "delete_bridge") as del_fn: self.lb_rpc.network_delete("anycontext", network_id="123") self.assertEqual(0, del_fn.call_count) self.assertEqual(1, log.call_count) def test_binding_deactivate(self): with mock.patch.object(self.lb_rpc.agent.mgr, "get_bridge_name") as get_br_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "get_tap_device_name") as get_tap_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "remove_interface") as rem_intf: get_br_fn.return_value = "br0" get_tap_fn.return_value = "tap456" self.lb_rpc.binding_deactivate(mock.ANY, host="host", network_id="123", port_id="456") get_br_fn.assert_called_once_with("123") get_tap_fn.assert_called_once_with("456") rem_intf.assert_called_once_with("br0", "tap456") def test_binding_deactivate_not_for_host(self): with mock.patch.object(self.lb_rpc.agent.mgr, "get_bridge_name") as get_br_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "get_tap_device_name") as get_tap_fn,\ mock.patch.object(self.lb_rpc.agent.mgr, "remove_interface") as rem_intf: self.lb_rpc.binding_deactivate(mock.ANY, host="other_host", network_id="123", port_id="456") get_br_fn.assert_not_called() get_tap_fn.assert_not_called() rem_intf.assert_not_called() def test_binding_activate(self): with mock.patch.object(self.lb_rpc.agent.mgr, "get_tap_device_name") as get_tap_fun: get_tap_fun.return_value = "tap456" self.lb_rpc.binding_activate(mock.ANY, host="host", port_id="456") self.assertIn("tap456", self.lb_rpc.updated_devices) def test_binding_activate_not_for_host(self): self.lb_rpc.binding_activate(mock.ANY, host="other-host", port_id="456") self.assertFalse(self.lb_rpc.updated_devices) def _test_fdb_add(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(ip_lib.IpNetnsCommand, 'execute', return_value='') as execute_fn, \ mock.patch.object(ip_lib, 'add_neigh_entry', return_value='') as add_fn: self.lb_rpc.fdb_add(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'show', 'dev', 'vxlan-1'], run_as_root=True), mock.call(['bridge', 'fdb', 'add', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'replace', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) if proxy_enabled: add_fn.assert_called_with('port_ip', 'port_mac', 'vxlan-1') else: add_fn.assert_not_called() def test_fdb_add(self): self._test_fdb_add(proxy_enabled=False) def test_fdb_add_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_add(proxy_enabled=True) def test_fdb_ignore(self): fdb_entries = {'net_id': {'ports': {LOCAL_IP: [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) fdb_entries = {'other_net_id': {'ports': {'192.168.0.67': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(utils, 'execute', return_value='') as execute_fn: self.lb_rpc.fdb_add(None, fdb_entries) self.lb_rpc.fdb_remove(None, fdb_entries) self.assertFalse(execute_fn.called) def _test_fdb_remove(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, ['port_mac', 'port_ip']]}, 'network_type': 'vxlan', 'segment_id': 1}} with mock.patch.object(ip_lib.IpNetnsCommand, 'execute', return_value='') as execute_fn, \ mock.patch.object(ip_lib, 'delete_neigh_entry', return_value='') as del_fn: self.lb_rpc.fdb_remove(None, fdb_entries) expected = [ mock.call(['bridge', 'fdb', 'delete', constants.FLOODING_ENTRY[0], 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), mock.call(['bridge', 'fdb', 'delete', 'port_mac', 'dev', 'vxlan-1', 'dst', 'agent_ip'], run_as_root=True, check_exit_code=False), ] execute_fn.assert_has_calls(expected) if proxy_enabled: del_fn.assert_called_with('port_ip', 'port_mac', 'vxlan-1') else: del_fn.assert_not_called() def test_fdb_remove(self): self._test_fdb_remove(proxy_enabled=False) def test_fdb_remove_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_remove(proxy_enabled=True) def _test_fdb_update_chg_ip(self, proxy_enabled=False): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {'before': [['port_mac', 'port_ip_1']], 'after': [['port_mac', 'port_ip_2']]}}}} with mock.patch.object(ip_lib, 'add_neigh_entry', return_value='') as add_fn, \ mock.patch.object(ip_lib, 'delete_neigh_entry', return_value='') as del_fn: self.lb_rpc.fdb_update(None, fdb_entries) if proxy_enabled: del_fn.assert_called_with('port_ip_1', 'port_mac', 'vxlan-1') add_fn.assert_called_with('port_ip_2', 'port_mac', 'vxlan-1') else: del_fn.assert_not_called() add_fn.assert_not_called() def test_fdb_update_chg_ip(self): self._test_fdb_update_chg_ip(proxy_enabled=False) def test_fdb_update_chg_ip_with_arp_responder(self): cfg.CONF.set_override('arp_responder', True, 'VXLAN') self._test_fdb_update_chg_ip(proxy_enabled=True) def test_fdb_update_chg_ip_empty_lists(self): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}} self.lb_rpc.fdb_update(None, fdb_entries) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/0000755000175000017500000000000000000000000031412 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py0000644000175000017500000000000000000000000033511 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_l0000644000175000017500000000565300000000000033634 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron.plugins.ml2.drivers.linuxbridge.mech_driver \ import mech_linuxbridge from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_BRIDGE CAP_PORT_FILTER = True AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'} GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS, 'tunnel_types': GOOD_TUNNEL_TYPES} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'} BAD_TUNNEL_TYPES = ['bad_tunnel_type'] BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS, 'tunnel_types': BAD_TUNNEL_TYPES} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(LinuxbridgeMechanismBaseTestCase, self).setUp() self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver() self.driver.initialize() class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismLocalTestCase): pass class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismFlatTestCase): pass class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismVlanTestCase): pass class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase, base.AgentMechanismGreTestCase): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/0000755000175000017500000000000000000000000026242 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/__init__.py0000644000175000017500000000000000000000000030341 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/0000755000175000017500000000000000000000000027340 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/__init__.py0000644000175000017500000000000000000000000031437 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_0000644000175000017500000002672600000000000033703 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import mock from neutron_lib.agent import topics from neutron_lib.utils import helpers from oslo_config import cfg from oslo_service import service from neutron.agent.linux import ip_lib from neutron.common import config as common_config from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.macvtap.agent import macvtap_neutron_agent from neutron.plugins.ml2.drivers.macvtap import macvtap_common from neutron.tests import base INTERFACE_MAPPINGS = {'physnet1': 'eth1'} NETWORK_ID = 'net-id123' NETWORK_SEGMENT_VLAN = amb.NetworkSegment('vlan', 'physnet1', 1) NETWORK_SEGMENT_FLAT = amb.NetworkSegment('flat', 'physnet1', None) class TestMacvtapRPCCallbacks(base.BaseTestCase): def setUp(self): super(TestMacvtapRPCCallbacks, self).setUp() agent = mock.Mock() agent.mgr = mock.Mock() agent.mgr.interface_mappings = INTERFACE_MAPPINGS self.rpc = macvtap_neutron_agent.MacvtapRPCCallBack(mock.Mock(), agent, mock.Mock()) def test_network_delete_vlan(self): self.rpc.network_map = {NETWORK_ID: NETWORK_SEGMENT_VLAN} with mock.patch.object(ip_lib.IpLinkCommand, 'delete') as mock_del,\ mock.patch.object(macvtap_common, 'get_vlan_device_name', return_value='vlan1'),\ mock.patch.object(ip_lib.IPDevice, 'exists', return_value=True): self.rpc.network_delete("anycontext", network_id=NETWORK_ID) self.assertTrue(mock_del.called) def test_network_delete_flat(self): self.rpc.network_map = {NETWORK_ID: NETWORK_SEGMENT_FLAT} with mock.patch.object(ip_lib.IpLinkCommand, 'delete') as mock_del: self.rpc.network_delete( "anycontext", network_id=NETWORK_SEGMENT_FLAT.segmentation_id) self.assertFalse(mock_del.called) def test_port_update(self): port = {'id': 'port-id123', 'mac_address': 'mac1'} self.rpc.port_update(context=None, port=port) self.assertEqual(set(['mac1']), self.rpc.updated_devices) class TestMacvtapManager(base.BaseTestCase): def setUp(self): super(TestMacvtapManager, self).setUp() with mock.patch.object(ip_lib, 'device_exists', return_value=True): self.mgr = macvtap_neutron_agent.MacvtapManager(INTERFACE_MAPPINGS) def test_validate_interface_mappings_dev_exists(self): good_mapping = {'physnet1': 'eth1', 'physnet2': 'eth2'} self.mgr.interface_mappings = good_mapping with mock.patch.object(ip_lib, 'device_exists', return_value=True)\ as mock_de: self.mgr.validate_interface_mappings() mock_de.assert_any_call('eth1') mock_de.assert_any_call('eth2') self.assertEqual(2, mock_de.call_count) def test_validate_interface_mappings_dev_not_exists(self): bad_mapping = {'physnet1': 'foo'} self.mgr.interface_mappings = bad_mapping with mock.patch.object(ip_lib, 'device_exists', return_value=False)\ as mock_de, mock.patch.object(sys, 'exit') as mock_exit: self.mgr.validate_interface_mappings() mock_de.assert_called_with('foo') mock_exit.assert_called_once_with(1) def _test_ensure_port_admin_state(self, admin_state): dev = 'macvtap1' mac = 'mac1' self.mgr.mac_device_name_mappings = {mac: dev} with mock.patch.object(ip_lib, 'IPDevice') as mock_ip_dev: self.mgr.ensure_port_admin_state(mac, admin_state) self.assertEqual(admin_state, mock_ip_dev(dev).link.set_up.called) self.assertNotEqual(admin_state, mock_ip_dev(dev).link.set_down.called) def test_ensure_port_admin_state_up(self): self._test_ensure_port_admin_state(True) def test_ensure_port_admin_state_down(self): self._test_ensure_port_admin_state(False) def test_get_all_devices(self): listing = ['foo', 'macvtap0', 'macvtap1', 'bar'] # set some mac mappings to make sure they are cleaned up self.mgr.mac_device_name_mappings = {'foo': 'bar'} with mock.patch.object(os, 'listdir', return_value=listing) as mock_ld,\ mock.patch.object(ip_lib, 'get_device_mac') as mock_gdn: mock_gdn.side_effect = ['mac0', 'mac1'] result = self.mgr.get_all_devices() mock_ld.assert_called_once_with(macvtap_neutron_agent.MACVTAP_FS) self.assertEqual(set(['mac0', 'mac1']), result) self.assertEqual({'mac0': 'macvtap0', 'mac1': 'macvtap1'}, self.mgr.mac_device_name_mappings) def test_get_agent_configurations(self): expected = {'interface_mappings': INTERFACE_MAPPINGS} self.assertEqual(expected, self.mgr.get_agent_configurations()) def test_get_agent_id_ok(self): mock_devices = [ip_lib.IPDevice('macvtap1')] with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=mock_devices),\ mock.patch.object(ip_lib, 'get_device_mac', return_value='foo:bar'): self.assertEqual('macvtapfoobar', self.mgr.get_agent_id()) def test_get_agent_id_fail(self): mock_devices = [] with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=mock_devices),\ mock.patch.object(sys, 'exit') as mock_exit: self.mgr.get_agent_id() mock_exit.assert_called_once_with(1) def test_get_agent_id_no_mac(self): mock_devices = [ip_lib.IPDevice('macvtap0'), ip_lib.IPDevice('macvtap1')] with mock.patch.object(ip_lib.IPWrapper, 'get_devices', return_value=mock_devices),\ mock.patch.object(ip_lib, 'get_device_mac', side_effect=[None, 'foo:bar:1']) as mock_gdm: self.assertEqual('macvtapfoobar1', self.mgr.get_agent_id()) mock_gdm.assert_has_calls([mock.call('macvtap0'), mock.call('macvtap1')]) def test_get_extension_driver_type(self): self.assertEqual('macvtap', self.mgr.get_extension_driver_type()) def test_get_rpc_callbacks(self): context = mock.Mock() agent = mock.Mock() sg_agent = mock.Mock() obj = self.mgr.get_rpc_callbacks(context, agent, sg_agent) self.assertIsInstance(obj, macvtap_neutron_agent.MacvtapRPCCallBack) def test_get_rpc_consumers(self): consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] self.assertEqual(consumers, self.mgr.get_rpc_consumers()) def test_plug_interface(self): self.mgr.mac_device_name_mappings['mac1'] = 'macvtap0' with mock.patch.object(ip_lib.IpLinkCommand, 'set_allmulticast_on')\ as mock_sao: self.mgr.plug_interface('network_id', 'network_segment', 'mac1', 'device_owner') self.assertTrue(mock_sao.called) class TestMacvtapMain(base.BaseTestCase): def test_parse_interface_mappings_good(self): cfg.CONF.set_override('physical_interface_mappings', 'good_mapping', 'macvtap') with mock.patch.object(helpers, 'parse_mappings', return_value=INTERFACE_MAPPINGS): mappings = macvtap_neutron_agent.parse_interface_mappings() self.assertEqual(INTERFACE_MAPPINGS, mappings) def test_parse_interface_mappings_bad(self): cfg.CONF.set_override('physical_interface_mappings', 'bad_mapping', 'macvtap') with mock.patch.object(helpers, 'parse_mappings', side_effect=ValueError('bad mapping')),\ mock.patch.object(sys, 'exit') as mock_exit: macvtap_neutron_agent.parse_interface_mappings() mock_exit.assert_called_with(1) def test_parse_interface_mappings_no_mapping(self): with mock.patch.object(sys, 'exit') as mock_exit: macvtap_neutron_agent.parse_interface_mappings() mock_exit.assert_called_with(1) def test_validate_firewall_driver_noop_long(self): cfg.CONF.set_override('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', 'SECURITYGROUP') macvtap_neutron_agent.validate_firewall_driver() def test_validate_firewall_driver_noop(self): cfg.CONF.set_override('firewall_driver', 'noop', 'SECURITYGROUP') macvtap_neutron_agent.validate_firewall_driver() def test_validate_firewall_driver_other(self): cfg.CONF.set_override('firewall_driver', 'foo', 'SECURITYGROUP') with mock.patch.object(sys, 'exit')as mock_exit: macvtap_neutron_agent.validate_firewall_driver() mock_exit.assert_called_with(1) def test_main(self): cfg.CONF.set_override('quitting_rpc_timeout', 1, 'AGENT') cfg.CONF.set_override('polling_interval', 2, 'AGENT') mock_manager_return = mock.Mock(spec=amb.CommonAgentManagerBase) mock_launch_return = mock.Mock() with mock.patch.object(common_config, 'init'),\ mock.patch.object(common_config, 'setup_logging'),\ mock.patch.object(service, 'launch', return_value=mock_launch_return) as mock_launch,\ mock.patch.object(macvtap_neutron_agent, 'parse_interface_mappings', return_value=INTERFACE_MAPPINGS) as mock_pim,\ mock.patch.object(macvtap_neutron_agent, 'validate_firewall_driver') as mock_vfd,\ mock.patch('neutron.plugins.ml2.drivers.agent._common_agent.' 'CommonAgentLoop') as mock_loop,\ mock.patch('neutron.plugins.ml2.drivers.macvtap.agent.' 'macvtap_neutron_agent.MacvtapManager', return_value=mock_manager_return) as mock_manager: macvtap_neutron_agent.main() self.assertTrue(mock_vfd.called) self.assertTrue(mock_pim.called) mock_manager.assert_called_with(INTERFACE_MAPPINGS) mock_loop.assert_called_with(mock_manager_return, 2, 1, 'Macvtap agent', 'neutron-macvtap-agent') self.assertTrue(mock_launch.called) self.assertTrue(mock_launch_return.wait.called) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/0000755000175000017500000000000000000000000030531 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/__init__.py0000644000175000017500000000000000000000000032630 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvtap.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvt0000644000175000017500000001571200000000000033627 0ustar00coreycorey00000000000000# Copyright (c) 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api from neutron.plugins.ml2.drivers.macvtap.mech_driver import mech_macvtap from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class MacvtapMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_MACVTAP CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_MACVTAP GOOD_MAPPINGS = {'fake_physical_network': 'fake_if'} GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_if'} BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS} AGENT = {'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'} AGENTS = [AGENT] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(MacvtapMechanismBaseTestCase, self).setUp() self.driver = mech_macvtap.MacvtapMechanismDriver() self.driver.initialize() class MacvtapMechanismGenericTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class MacvtapMechanismMigrationTestCase(object): # MIGRATION_SEGMENT must be overridden for the specific type being tested MIGRATION_SEGMENT = None MIGRATION_SEGMENTS = [MIGRATION_SEGMENT] def test__is_live_migration_true(self): original = {"binding:profile": {"migrating_to": "host"}} self._test__is_live_migration(True, original) def test__is_live_migration_false(self): self._test__is_live_migration(False, {}) def test__is_live_migration_false_None_original(self): self._test__is_live_migration(False, None) def _test__is_live_migration(self, expected, original): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.MIGRATION_SEGMENTS, vnic_type=self.VNIC_TYPE, original=original) self.assertEqual(expected, self.driver._is_live_migration(context)) def _test_try_to_bind_segment_for_agent_migration(self, expected, original): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.MIGRATION_SEGMENTS, vnic_type=self.VNIC_TYPE, original=original) result = self.driver.try_to_bind_segment_for_agent( context, self.MIGRATION_SEGMENT, self.AGENT) self.assertEqual(expected, result) def test_try_to_bind_segment_for_agent_migration_abort(self): original = {"binding:profile": {"migrating_to": "host"}, "binding:vif_details": {"macvtap_source": "bad_source"}, "binding:host_id": "source_host"} self._test_try_to_bind_segment_for_agent_migration(False, original) def test_try_to_bind_segment_for_agent_migration_ok(self): macvtap_src = "fake_if" seg_id = self.MIGRATION_SEGMENT.get(api.SEGMENTATION_ID) if seg_id: # In the vlan case, macvtap source name ends with .vlan_id macvtap_src += "." + str(seg_id) original = {"binding:profile": {"migrating_to": "host"}, "binding:vif_details": {"macvtap_source": macvtap_src}, "binding:host_id": "source_host"} self._test_try_to_bind_segment_for_agent_migration(True, original) class MacvtapMechanismFlatTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismFlatTestCase, MacvtapMechanismMigrationTestCase): MIGRATION_SEGMENT = {api.ID: 'flat_segment_id', api.NETWORK_TYPE: 'flat', api.PHYSICAL_NETWORK: 'fake_physical_network'} def test_type_flat_vif_details(self): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.FLAT_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertIsNone(vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual("bridge", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_MODE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_PHYSICAL_INTERFACE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_SOURCE)) class MacvtapMechanismVlanTestCase(MacvtapMechanismBaseTestCase, base.AgentMechanismVlanTestCase, MacvtapMechanismMigrationTestCase): MIGRATION_SEGMENT = {api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234} def test_type_vlan_vif_details(self): context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertEqual(1234, vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual("bridge", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_MODE)) self.assertEqual("fake_if", vif_details.get( portbindings.VIF_DETAILS_PHYSICAL_INTERFACE)) self.assertEqual("fake_if.1234", vif_details.get( portbindings.VIF_DETAILS_MACVTAP_SOURCE)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/macvtap/test_macvtap_common.py0000644000175000017500000000463700000000000032670 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import mock from neutron.plugins.ml2.drivers.macvtap import macvtap_common as m_common from neutron.tests import base MOCKED_HASH = "MOCKEDHASH" class MockSHA(object): def hexdigest(self): return MOCKED_HASH class MacvtapCommonTestCase(base.BaseTestCase): @mock.patch.object(hashlib, 'sha1', return_value=MockSHA()) def test_get_vlan_device_name(self, mocked_hash): # only the first six chars of the hash are being used in the algorithm hash_used = MOCKED_HASH[0:6] self.assertEqual('10charrrrr.1', m_common.get_vlan_device_name('10charrrrr', "1")) self.assertEqual('11ch' + hash_used + '.1', m_common.get_vlan_device_name('11charrrrrr', "1")) self.assertEqual('14ch' + hash_used + '.1', m_common.get_vlan_device_name('14charrrrrrrrr', "1")) self.assertEqual('14ch' + hash_used + '.1111', m_common.get_vlan_device_name('14charrrrrrrrr', "1111")) def test_get_vlan_subinterface_name_advanced(self): """Ensure the same hash is used for long interface names. If the generated vlan device name would be too long, make sure that everything before the '.' is equal. This might be helpful when debugging problems. """ max_device_name = "15charrrrrrrrrr" vlan_dev_name1 = m_common.get_vlan_device_name(max_device_name, "1") vlan_dev_name2 = m_common.get_vlan_device_name(max_device_name, "1111") self.assertEqual(vlan_dev_name1.partition(".")[0], vlan_dev_name2.partition(".")[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py0000644000175000017500000000605000000000000030242 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Based on openvswitch mechanism driver. # # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron.agent import securitygroups_rpc from neutron.plugins.ml2.drivers import mech_agent class FakeAgentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ML2 mechanism driver for testing. This is a ML2 mechanism driver used by UTs in test_l2population. This driver implements minimum requirements for L2pop mech driver. As there are some agent-based mechanism drivers and OVS agent mech driver is not the only one to support L2pop, it is useful to test L2pop with multiple drivers like this to check the minimum requirements. NOTE(yamamoto): This is a modified copy of ofagent mechanism driver as of writing this. There's no need to keep this synced with the "real" ofagent mechansim driver or its agent. """ def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.OVS_HYBRID_PLUG: sg_enabled, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2, } super(FakeAgentMechanismDriver, self).__init__( # NOTE(yamamoto): l2pop driver has a hardcoded list of # supported agent types. constants.AGENT_TYPE_OFA, portbindings.VIF_TYPE_OVS, vif_details) def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [constants.TYPE_LOCAL, constants.TYPE_FLAT, constants.TYPE_VLAN]) def get_mappings(self, agent): return dict(agent['configurations'].get('interface_mappings', {})) class AnotherFakeAgentMechanismDriver(FakeAgentMechanismDriver): pass class FakeAgentMechanismDriverL3(FakeAgentMechanismDriver): """ML2 mechanism driver for testing, with L3 connectivity only""" def __init__(self): super(FakeAgentMechanismDriverL3, self).__init__() self.vif_details[portbindings.VIF_DETAILS_CONNECTIVITY] = ( portbindings.CONNECTIVITY_L3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_faulty_agent.py0000644000175000017500000000243200000000000030640 0ustar00coreycorey00000000000000# All Rights Reserved. # # Based on openvswitch mechanism driver. # # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.ml2.drivers import mech_agent class FaultyAgentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ML2 mechanism driver for testing of handlers for faulty drivers The purpose of this class is to test the ml2 plugin manager handlers for on_load_failure_callback parameter provided by the stevedore.named.NamedExtensionManager class. """ def __init__(self): raise Exception("Using a faulty driver for testing purposes.") def get_allowed_network_types(self, agent): pass def get_mappings(self, agent): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/0000755000175000017500000000000000000000000026745 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py0000644000175000017500000000000000000000000031044 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/0000755000175000017500000000000000000000000030043 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py0000644000175000017500000000000000000000000032142 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.467046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/0000755000175000017500000000000000000000000031333 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py0000644000175000017500000000000000000000000033432 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config0000644000175000017500000001316300000000000033566 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.utils import helpers from oslo_config import cfg from neutron.conf.plugins.ml2.drivers.mech_sriov import agent_common \ as agent_common_config from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent \ import sriov_nic_agent as agent from neutron.tests import base class TestSriovAgentConfig(base.BaseTestCase): EXCLUDE_DEVICES_LIST = ['p7p1:0000:07:00.1;0000:07:00.2', 'p3p1:0000:04:00.3'] EXCLUDE_DEVICES_LIST_INVALID = ['p7p2:0000:07:00.1;0000:07:00.2'] EXCLUDE_DEVICES_WITH_SPACES_LIST = ['p7p1: 0000:07:00.1 ; 0000:07:00.2', 'p3p1:0000:04:00.3 '] EXCLUDE_DEVICES_WITH_SPACES_ERROR = ['p7p1', 'p3p1:0000:04:00.3 '] EXCLUDE_DEVICES = {'p7p1': set(['0000:07:00.1', '0000:07:00.2']), 'p3p1': set(['0000:04:00.3'])} DEVICE_MAPPING_LIST = ['physnet7:p7p1', 'physnet3:p3p1'] DEVICE_MAPPING_WITH_ERROR_LIST = ['physnet7', 'physnet3:p3p1'] DEVICE_MAPPING_WITH_SPACES_LIST = ['physnet7 : p7p1', 'physnet3 : p3p1 '] DEVICE_MAPPING = {'physnet7': ['p7p1'], 'physnet3': ['p3p1']} def test_defaults(self): self.assertEqual(agent_common_config.DEFAULT_DEVICE_MAPPINGS, cfg.CONF.SRIOV_NIC.physical_device_mappings) self.assertEqual(agent_common_config.DEFAULT_EXCLUDE_DEVICES, cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(2, cfg.CONF.AGENT.polling_interval) def test_device_mappings(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') device_mappings = helpers.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_device_mappings_with_error(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_ERROR_LIST, 'SRIOV_NIC') self.assertRaises(ValueError, helpers.parse_mappings, cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) def test_device_mappings_with_spaces(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_SPACES_LIST, 'SRIOV_NIC') device_mappings = helpers.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings, unique_keys=False) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_exclude_devices(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST, 'SRIOV_NIC') exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) def test_exclude_devices_with_spaces(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_WITH_SPACES_LIST, 'SRIOV_NIC') exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) def test_exclude_devices_with_error(self): cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_WITH_SPACES_ERROR, 'SRIOV_NIC') self.assertRaises(ValueError, config.parse_exclude_devices, cfg.CONF.SRIOV_NIC.exclude_devices) def test_validate_config_ok(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST, 'SRIOV_NIC') config_parser = agent.SriovNicAgentConfigParser() config_parser.parse() device_mappings = config_parser.device_mappings exclude_devices = config_parser.exclude_devices self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices) self.assertEqual(self.DEVICE_MAPPING, device_mappings) def test_validate_config_fail(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') cfg.CONF.set_override('exclude_devices', self.EXCLUDE_DEVICES_LIST_INVALID, 'SRIOV_NIC') config_parser = agent.SriovNicAgentConfigParser() self.assertRaises(ValueError, config_parser.parse) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000755000175000017500000000000000000000000033615 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000644000175000017500000000000000000000000033605 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/0000644000175000017500000001427500000000000033630 0ustar00coreycorey00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import constants from neutron_lib import context from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import ( qos_driver) from neutron.tests import base class QosSRIOVAgentDriverTestCase(base.BaseTestCase): ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = '0000:06:00.1' def setUp(self): super(QosSRIOVAgentDriverTestCase, self).setUp() self.context = context.get_admin_context() self.qos_driver = qos_driver.QosSRIOVAgentDriver() self.qos_driver.initialize() self.qos_driver.eswitch_mgr = mock.Mock() self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock() self.qos_driver.eswitch_mgr.set_device_min_tx_rate = mock.Mock() self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock() self.qos_driver.eswitch_mgr.clear_min_tx_rate = mock.Mock() self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate self.min_tx_rate_mock = \ self.qos_driver.eswitch_mgr.set_device_min_tx_rate self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate self.clear_min_tx_rate_mock = \ self.qos_driver.eswitch_mgr.clear_min_tx_rate self.rule = self._create_bw_limit_rule_obj() self.rule_min_tx_rate = self._create_minimum_bandwidth_rule_obj() self.qos_policy = self._create_qos_policy_obj([self.rule]) self.qos_policy_min_tx_rate = self._create_qos_policy_obj( [self.rule_min_tx_rate]) self.port = self._create_fake_port(self.qos_policy.id) self.port_min = self._create_fake_port(self.qos_policy_min_tx_rate.id) def _create_bw_limit_rule_obj(self): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.obj_reset_changes() return rule_obj def _create_minimum_bandwidth_rule_obj(self): rule_obj = rule.QosMinimumBandwidthRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.min_kbps = 200 rule_obj.direction = constants.EGRESS_DIRECTION rule_obj.obj_reset_changes() return rule_obj def _create_qos_policy_obj(self, rules): policy_dict = {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'test', 'shared': False, 'rules': rules} policy_obj = policy.QosPolicy(self.context, **policy_dict) policy_obj.obj_reset_changes() for policy_rule in policy_obj.rules: policy_rule.qos_policy_id = policy_obj.id policy_rule.obj_reset_changes() return policy_obj def _create_fake_port(self, qos_policy_id): return {'port_id': uuidutils.generate_uuid(), 'profile': {'pci_slot': self.PCI_SLOT}, 'device': self.ASSIGNED_MAC, qos_consts.QOS_POLICY_ID: qos_policy_id, 'device_owner': uuidutils.generate_uuid()} def test_create_rule(self): self.qos_driver.create(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_update_rule(self): self.qos_driver.update(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps) def test_delete_rules_on_assigned_vf(self): self.qos_driver.delete(self.port, self.qos_policy) self.max_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, 0) def test_delete_rules_on_released_vf(self): del self.port['device_owner'] self.qos_driver.delete(self.port, self.qos_policy) self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT) def test__set_vf_max_rate_captures_sriov_failure(self): self.max_rate_mock.side_effect = exceptions.SriovNicError() self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) def test__set_vf_max_rate_unknown_device(self): with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists', return_value=False): self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertFalse(self.max_rate_mock.called) def test_create_minimum_bandwidth(self): self.qos_driver.create(self.port_min, self.qos_policy_min_tx_rate) self.min_tx_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps) def test_update_minimum_bandwidth(self): self.qos_driver.update(self.port_min, self.qos_policy_min_tx_rate) self.min_tx_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps) def test_delete_minimum_bandwidth_on_assigned_vf(self): self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate) self.min_tx_rate_mock.assert_called_once_with( self.ASSIGNED_MAC, self.PCI_SLOT, 0) def test_delete_minimum_bandwidth_on_released_vf(self): del self.port_min['device_owner'] self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate) self.clear_min_tx_rate_mock.assert_called_once_with(self.PCI_SLOT) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manag0000644000175000017500000011237400000000000033646 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from neutron.agent.linux import ip_link_support from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm from neutron.tests import base class TestCreateESwitchManager(base.BaseTestCase): SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] @staticmethod def cleanup(): if hasattr(esm.ESwitchManager, '_instance'): del esm.ESwitchManager._instance def test_create_eswitch_mgr_fail(self): device_mappings = {'physnet1': ['p6p1']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", side_effect=exc.InvalidDeviceError( dev_name="p6p1", reason="device" " not found")),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True): eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) self.assertRaises(exc.InvalidDeviceError, eswitch_mgr.discover_devices, device_mappings, None) def test_create_eswitch_mgr_ok(self): device_mappings = {'physnet1': ['p6p1']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True): eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) eswitch_mgr.discover_devices(device_mappings, None) class TestESwitchManagerApi(base.BaseTestCase): SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = '0000:06:00.1' WRONG_MAC = '00:00:00:00:00:67' WRONG_PCI = "0000:06:00.6" MAX_RATE = ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE MIN_RATE = ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE def setUp(self): super(TestESwitchManagerApi, self).setUp() device_mappings = {'physnet1': ['p6p1']} self.eswitch_mgr = esm.ESwitchManager() self.addCleanup(self.cleanup) self._set_eswitch_manager(self.eswitch_mgr, device_mappings) @staticmethod def cleanup(): if hasattr(esm.ESwitchManager, '_instance'): del esm.ESwitchManager._instance def _set_eswitch_manager(self, eswitch_mgr, device_mappings): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True): eswitch_mgr.discover_devices(device_mappings, None) def test_discover_devices_with_device(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.ESwitchManager._create_emb_switch", ) as emb_switch: self.eswitch_mgr.discover_devices(device_mappings, None) self.assertTrue(emb_switch.called) def test_discover_devices_without_device(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=False), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.ESwitchManager._create_emb_switch", ) as emb_switch: self.eswitch_mgr.discover_devices(device_mappings, None) self.assertFalse(emb_switch.called) def test_get_assigned_devices_info(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_assigned_devices_info", return_value=[(self.ASSIGNED_MAC, self.PCI_SLOT)]): result = self.eswitch_mgr.get_assigned_devices_info() self.assertIn(self.ASSIGNED_MAC, list(result)[0]) self.assertIn(self.PCI_SLOT, list(result)[0]) def test_get_assigned_devices_info_multiple_nics_for_physnet(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} devices_info = { 'p6p1': [(self.ASSIGNED_MAC, self.PCI_SLOT)], 'p6p2': [(self.WRONG_MAC, self.WRONG_PCI)], } def get_assigned_devices_info(self): return devices_info[self.dev_name] self._set_eswitch_manager(self.eswitch_mgr, device_mappings) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_assigned_devices_info", side_effect=get_assigned_devices_info, autospec=True): result = self.eswitch_mgr.get_assigned_devices_info() self.assertIn(devices_info['p6p1'][0], list(result)) self.assertIn(devices_info['p6p2'][0], list(result)) def test_get_device_status_enable(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value='enable'): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertEqual('enable', result) def test_get_device_status_disable(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value='disable'): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertEqual('disable', result) def test_get_device_status_auto(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value='auto'): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertEqual('auto', result) def test_get_device_status_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_device_state", return_value='enable'): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: result = self.eswitch_mgr.get_device_state(self.WRONG_MAC, self.PCI_SLOT) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) self.assertEqual('disable', result) def test_set_device_status(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_state"): self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC, self.PCI_SLOT, True, False) def test_set_device_max_rate(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC) as get_pci_mock,\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_rate")\ as set_device_rate_mock: self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT, 1000) get_pci_mock.assert_called_once_with(self.PCI_SLOT) set_device_rate_mock.assert_called_once_with( self.PCI_SLOT, self.MAX_RATE, 1000) def test_set_device_min_tx_rate(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC) as get_pci_mock,\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_rate")\ as set_device_rate_mock: self.eswitch_mgr.set_device_min_tx_rate(self.ASSIGNED_MAC, self.PCI_SLOT, 1000) get_pci_mock.assert_called_once_with(self.PCI_SLOT) set_device_rate_mock.assert_called_once_with( self.PCI_SLOT, self.MIN_RATE, 1000) def test_set_device_status_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.set_device_state"): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: self.eswitch_mgr.set_device_state(self.WRONG_MAC, self.PCI_SLOT, True, False) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) def _mock_device_exists(self, pci_slot, mac_address, expected_result): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC): result = self.eswitch_mgr.device_exists(mac_address, pci_slot) self.assertEqual(expected_result, result) def test_device_exists_true(self): self._mock_device_exists(self.PCI_SLOT, self.ASSIGNED_MAC, True) def test_device_exists_false(self): self._mock_device_exists(self.WRONG_PCI, self.WRONG_MAC, False) def test_device_exists_mismatch(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.EmbSwitch.get_pci_device", return_value=self.ASSIGNED_MAC): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: result = self.eswitch_mgr.device_exists(self.WRONG_MAC, self.PCI_SLOT) log_mock.assert_called_with('device pci mismatch: ' '%(device_mac)s - %(pci_slot)s', {'pci_slot': self.PCI_SLOT, 'device_mac': self.WRONG_MAC}) self.assertFalse(result) def test_clear_max_rate(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.' 'eswitch_manager.ESwitchManager._clear_rate') \ as clear_rate_mock: self.eswitch_mgr.clear_max_rate(self.PCI_SLOT) clear_rate_mock.assert_called_once_with(self.PCI_SLOT, self.MAX_RATE) def test_clear_min_tx_rate(self): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.' 'eswitch_manager.ESwitchManager._clear_rate') \ as clear_rate_mock: self.eswitch_mgr.clear_min_tx_rate(self.PCI_SLOT) clear_rate_mock.assert_called_once_with(self.PCI_SLOT, self.MIN_RATE) def test_process_emb_switch_without_device(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} phys_net = 'physnet1' dev_name = 'p6p1' self._set_eswitch_manager(self.eswitch_mgr, device_mappings) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=False), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.ESwitchManager._create_emb_switch", ) as emb_switch: self.eswitch_mgr._process_emb_switch_map(phys_net, dev_name, {}) self.assertFalse(emb_switch.called) def test_process_emb_switch_with_device(self): device_mappings = {'physnet1': ['p6p1', 'p6p2']} phys_net = 'physnet1' dev_name = 'p6p3' self._set_eswitch_manager(self.eswitch_mgr, device_mappings) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.ESwitchManager._create_emb_switch", ) as emb_switch: self.eswitch_mgr._process_emb_switch_map(phys_net, dev_name, {}) self.assertTrue(emb_switch.called) def _test_clear_rate(self, rate_type, pci_slot, passed, mac_address): with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.' 'eswitch_manager.EmbSwitch.set_device_rate') \ as set_rate_mock, \ mock.patch('neutron.plugins.ml2.drivers.mech_sriov.agent.' 'pci_lib.PciDeviceIPWrapper.get_assigned_macs', return_value=mac_address), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True): self.eswitch_mgr._clear_rate(pci_slot, rate_type) if passed: set_rate_mock.assert_called_once_with(pci_slot, rate_type, 0) else: self.assertFalse(set_rate_mock.called) def test_clear_rate_max_rate_existing_pci_slot(self): self._test_clear_rate(self.MAX_RATE, self.PCI_SLOT, passed=True, mac_address={}) def test_clear_rate_max_rate_exist_and_assigned_pci(self): self._test_clear_rate(self.MAX_RATE, self.PCI_SLOT, passed=False, mac_address={0: self.ASSIGNED_MAC}) def test_clear_rate_max_rate_nonexisting_pci_slot(self): self._test_clear_rate(self.MAX_RATE, self.WRONG_PCI, passed=False, mac_address={}) def test_clear_rate_min_tx_rate_existing_pci_slot(self): self._test_clear_rate(self.MIN_RATE, self.PCI_SLOT, passed=True, mac_address={}) def test_clear_rate_min_tx_rate_exist_and_assigned_pci(self): self._test_clear_rate(self.MIN_RATE, self.PCI_SLOT, passed=False, mac_address={0: self.ASSIGNED_MAC}) def test_clear_rate_min_tx_rate_nonexisting_pci_slot(self): self._test_clear_rate(self.MIN_RATE, self.WRONG_PCI, passed=False, mac_address={}) def test_create_emb_switch(self): DEVICES = [('0000:04:00.1', 0), ('0000:04:00.2', 1)] with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", side_effect=[[], DEVICES]), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.get_numvfs", return_value=2): physnet = 'test_create_emb_switch' self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map) # first time device will not be added as no VFs returned self.eswitch_mgr._create_emb_switch(physnet, 'dev1', []) self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map) self.assertEqual({'dev1'}, self.eswitch_mgr.skipped_devices) # second time device should be added with 2 VFs self.eswitch_mgr._create_emb_switch(physnet, 'dev1', []) self.assertIn(physnet, self.eswitch_mgr.emb_switches_map) self.assertEqual(set(), self.eswitch_mgr.skipped_devices) self.assertIn('0000:04:00.1', self.eswitch_mgr.pci_slot_map) self.assertIn('0000:04:00.2', self.eswitch_mgr.pci_slot_map) def test_create_emb_switch_zero_vfs(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=[]), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.get_numvfs", return_value=0): physnet = 'test_create_emb_switch' self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map) # first time device will not be added self.eswitch_mgr._create_emb_switch(physnet, 'dev1', []) self.assertNotIn(physnet, self.eswitch_mgr.emb_switches_map) self.assertEqual({'dev1'}, self.eswitch_mgr.skipped_devices) # second time device should be added with 0 VFs self.eswitch_mgr._create_emb_switch(physnet, 'dev1', []) self.assertIn(physnet, self.eswitch_mgr.emb_switches_map) self.assertEqual(set(), self.eswitch_mgr.skipped_devices) class TestEmbSwitch(base.BaseTestCase): DEV_NAME = "eth2" PHYS_NET = "default" ASSIGNED_MAC = '00:00:00:00:00:66' PCI_SLOT = "0000:06:00.1" WRONG_PCI_SLOT = "0000:06:00.4" SCANNED_DEVICES = [('0000:06:00.1', 0), ('0000:06:00.2', 1), ('0000:06:00.3', 2)] VF_TO_MAC_MAPPING = {0: '00:00:00:00:00:11', 1: '00:00:00:00:00:22', 2: '00:00:00:00:00:33'} EXPECTED_MAC_TO_PCI = { '00:00:00:00:00:11': '0000:06:00.1', '00:00:00:00:00:22': '0000:06:00.2', '00:00:00:00:00:33': '0000:06:00.3'} def setUp(self): super(TestEmbSwitch, self).setUp() exclude_devices = set() with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=self.SCANNED_DEVICES): self.emb_switch = esm.EmbSwitch(self.DEV_NAME, exclude_devices) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=[(PCI_SLOT, 0)]) def test_get_assigned_devices_info(self, *args): emb_switch = esm.EmbSwitch(self.DEV_NAME, ()) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={0: self.ASSIGNED_MAC}),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "is_assigned_vf_direct", return_value=True): result = emb_switch.get_assigned_devices_info() self.assertIn(self.ASSIGNED_MAC, list(result)[0]) self.assertIn(self.PCI_SLOT, list(result)[0]) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.scan_vf_devices", return_value=SCANNED_DEVICES) def test_get_assigned_devices_info_multiple_slots(self, *args): emb_switch = esm.EmbSwitch(self.DEV_NAME, ()) with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=self.VF_TO_MAC_MAPPING),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "is_assigned_vf_direct", return_value=True): devices_info = emb_switch.get_assigned_devices_info() for device_info in devices_info: mac = device_info[0] pci_slot = device_info[1] self.assertEqual( self.EXPECTED_MAC_TO_PCI[mac], pci_slot) def test_get_assigned_devices_empty(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.is_assigned_vf_direct", return_value=False), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "is_assigned_vf_macvtap", return_value=False): result = self.emb_switch.get_assigned_devices_info() self.assertFalse(result) def test_get_device_state_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_vf_state", return_value=False): result = self.emb_switch.get_device_state(self.PCI_SLOT) self.assertFalse(result) def test_get_device_state_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_vf_state", return_value=False): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.get_device_state, self.WRONG_PCI_SLOT) def test_set_device_state_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_state"): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "pci_lib.LOG.warning") as log_mock: self.emb_switch.set_device_state(self.PCI_SLOT, True, False) self.assertEqual(0, log_mock.call_count) def test_set_device_state_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_state"): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.set_device_state, self.WRONG_PCI_SLOT, True, False) def test_set_device_spoofcheck_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_spoofcheck") as \ set_vf_spoofcheck_mock: self.emb_switch.set_device_spoofcheck(self.PCI_SLOT, True) self.assertTrue(set_vf_spoofcheck_mock.called) def test_set_device_spoofcheck_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_spoofcheck"): self.assertRaises(exc.InvalidPciSlotError, self.emb_switch.set_device_spoofcheck, self.WRONG_PCI_SLOT, True) def test_set_device_rate_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2000) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2) def test_set_device_max_rate_ok2(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 99) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 1) def test_set_device_max_rate_rounded_ok(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2001) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2) def test_set_device_max_rate_rounded_ok2(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2499) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2) def test_set_device_max_rate_rounded_ok3(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 2500) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 3) def test_set_device_max_rate_disable(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate") as pci_lib_mock: self.emb_switch.set_device_rate( self.PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 0) pci_lib_mock.assert_called_with( 0, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 0) def test_set_device_max_rate_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.set_vf_rate"): self.assertRaises( exc.InvalidPciSlotError, self.emb_switch.set_device_rate, self.WRONG_PCI_SLOT, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 1000) def test_get_pci_device(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value={0: self.ASSIGNED_MAC}),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "is_assigned_vf_direct", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True): result = self.emb_switch.get_pci_device(self.PCI_SLOT) self.assertEqual(self.ASSIGNED_MAC, result) def test_get_pci_device_fail(self): with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper.get_assigned_macs", return_value=[self.ASSIGNED_MAC]),\ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper.pf_device_exists", return_value=True), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "is_assigned_vf_direct", return_value=True): result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT) self.assertIsNone(result) def test_get_pci_list(self): result = self.emb_switch.get_pci_slot_list() self.assertEqual([tup[0] for tup in self.SCANNED_DEVICES], sorted(result)) def _test__get_macvtap_mac(self, upper_devs): ip_wrapper_mock_inst = mock.MagicMock() with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib." "PciDeviceIPWrapper", return_value=ip_wrapper_mock_inst), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.PciOsWrapper." "get_vf_macvtap_upper_devs", return_value=upper_devs), \ mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent." "eswitch_manager.LOG.warning") as log_mock: self.emb_switch._get_macvtap_mac(0) ip_wrapper_mock_inst.device.assert_called_with(upper_devs[0]) if len(upper_devs) > 1: self.assertTrue(log_mock.called) else: self.assertFalse(log_mock.called) def test__get_macvtap_mac_single_upper_dev(self): upper_devs = ["macvtap0"] self._test__get_macvtap_mac(upper_devs) def test__get_macvtap_mac_multiple_upper_devs(self): upper_devs = ["macvtap0", "macvtap1"] self._test__get_macvtap_mac(upper_devs) class TestPciOsWrapper(base.BaseTestCase): DEV_NAME = "p7p1" VF_INDEX = 1 DIR_CONTENTS = [ "mlx4_port1", "virtfn0", "virtfn1", "virtfn2" ] DIR_CONTENTS_NO_MATCH = [ "mlx4_port1", "mlx4_port1" ] LINKS = { "virtfn0": "../0000:04:00.1", "virtfn1": "../0000:04:00.2", "virtfn2": "../0000:04:00.3" } PCI_SLOTS = [ ('0000:04:00.1', 0), ('0000:04:00.2', 1), ('0000:04:00.3', 2) ] def test_scan_vf_devices(self): def _get_link(file_path): file_name = os.path.basename(file_path) return self.LINKS[file_name] with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\ mock.patch("os.path.islink", return_value=True),\ mock.patch("os.readlink", side_effect=_get_link): result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME) self.assertEqual(self.PCI_SLOTS, result) def test_scan_vf_devices_no_dir(self): with mock.patch("os.path.isdir", return_value=False): self.assertRaises(exc.InvalidDeviceError, esm.PciOsWrapper.scan_vf_devices, self.DEV_NAME) def test_scan_vf_devices_no_content(self): with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=[]): self.assertEqual([], esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)) def test_scan_vf_devices_no_match(self): with mock.patch("os.path.isdir", return_value=True),\ mock.patch("os.listdir", return_value=self.DIR_CONTENTS_NO_MATCH): self.assertEqual([], esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)) def _mock_assign_vf_direct(self, dir_exists): with mock.patch("os.path.isdir", return_value=dir_exists): result = esm.PciOsWrapper.is_assigned_vf_direct(self.DEV_NAME, self.VF_INDEX) self.assertEqual(not dir_exists, result) def test_is_assigned_vf_direct_true(self): self._mock_assign_vf_direct(True) def test_is_assigned_vf_direct_false(self): self._mock_assign_vf_direct(False) def _mock_assign_vf_macvtap(self, macvtap_exists): def _glob(file_path): return ["upper_macvtap0"] if macvtap_exists else [] with mock.patch("glob.glob", side_effect=_glob): result = esm.PciOsWrapper.is_assigned_vf_macvtap(self.DEV_NAME, self.VF_INDEX) self.assertEqual(macvtap_exists, result) def test_is_assigned_vf_macvtap_true(self): self._mock_assign_vf_macvtap(True) def test_is_assigned_vf_macvtap_false(self): self._mock_assign_vf_macvtap(False) def _test_get_vf_macvtap_upper_devs(self, upper_devs): with mock.patch("glob.glob", return_value=upper_devs): result = esm.PciOsWrapper.get_vf_macvtap_upper_devs(self.DEV_NAME, self.VF_INDEX) self.assertEqual([dev.split("_")[1] for dev in upper_devs], result) def test_get_vf_macvtap_upper_devs(self): upper_devs = ["upper_macvtap0", "upper_macvtap1"] self._test_get_vf_macvtap_upper_devs(upper_devs) def test_get_vf_macvtap_upper_devs_no_devs(self): upper_devs = [] self._test_get_vf_macvtap_upper_devs(upper_devs) def test_pf_device_exists_with_no_dir(self): with mock.patch("os.path.isdir", return_value=False): self.assertFalse(esm.PciOsWrapper.pf_device_exists('p6p1')) def test_pf_device_exists_with_dir(self): with mock.patch("os.path.isdir", return_value=True): self.assertTrue(esm.PciOsWrapper.pf_device_exists('p6p1')) def test_get_numvfs(self): with mock.patch("six.moves.builtins.open", mock.mock_open(read_data="63")) as mock_open: self.assertEqual(63, esm.PciOsWrapper.get_numvfs('dev1')) mock_open.assert_called_once_with( esm.PciOsWrapper.NUMVFS_PATH % 'dev1') def test_get_numvfs_no_file(self): with mock.patch("six.moves.builtins.open", side_effect=IOError()) as mock_open: self.assertEqual(-1, esm.PciOsWrapper.get_numvfs('dev1')) mock_open.assert_called_once_with( esm.PciOsWrapper.NUMVFS_PATH % 'dev1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py0000644000175000017500000001561000000000000033060 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.agent.linux import ip_link_support from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib from neutron.tests import base class TestPciLib(base.BaseTestCase): DEV_NAME = "p7p1" VF_INDEX = 1 VF_INDEX_DISABLE = 0 PF_LINK_SHOW = ('122: p7p1: mtu 1500 qdisc noop' ' state DOWN mode DEFAULT group default qlen 1000') PF_MAC = ' link/ether f4:52:14:2a:3e:c0 brd ff:ff:ff:ff:ff:ff' VF_0_LINK_SHOW = (' vf 0 MAC fa:16:3e:b4:81:ac, vlan 4095, spoof' ' checking off, link-state disable') VF_1_LINK_SHOW = (' vf 1 MAC 00:00:00:00:00:11, vlan 4095, spoof' ' checking off, link-state enable') VF_2_LINK_SHOW = (' vf 2 MAC fa:16:3e:68:4e:79, vlan 4095, spoof' ' checking off, link-state enable') VF_LINK_SHOW = '\n'.join((PF_LINK_SHOW, PF_MAC, VF_0_LINK_SHOW, VF_1_LINK_SHOW, VF_2_LINK_SHOW)) MAC_MAPPING = { 0: "fa:16:3e:b4:81:ac", 1: "00:00:00:00:00:11", 2: "fa:16:3e:68:4e:79", } def setUp(self): super(TestPciLib, self).setUp() self.pci_wrapper = pci_lib.PciDeviceIPWrapper(self.DEV_NAME) def test_get_assigned_macs(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_assigned_macs([self.VF_INDEX]) self.assertEqual( {self.VF_INDEX: self.MAC_MAPPING[self.VF_INDEX]}, result) def test_get_assigned_macs_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandError, self.pci_wrapper.get_assigned_macs, [self.VF_INDEX]) def test_get_vf_state_enable(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_vf_state(self.VF_INDEX) self.assertEqual('enable', result) def test_get_vf_state_disable(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.return_value = self.VF_LINK_SHOW result = self.pci_wrapper.get_vf_state(self.VF_INDEX_DISABLE) self.assertEqual('disable', result) def test_get_vf_state_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandError, self.pci_wrapper.get_vf_state, self.VF_INDEX) def test_set_vf_state(self): with mock.patch.object(self.pci_wrapper, "_as_root"): result = self.pci_wrapper.set_vf_state(self.VF_INDEX, True) self.assertIsNone(result) def test_set_vf_state_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandError, self.pci_wrapper.set_vf_state, self.VF_INDEX, True) def test_set_vf_spoofcheck(self): with mock.patch.object(self.pci_wrapper, "_as_root"): result = self.pci_wrapper.set_vf_spoofcheck(self.VF_INDEX, True) self.assertIsNone(result) def test_set_vf_spoofcheck_fail(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception() self.assertRaises(exc.IpCommandError, self.pci_wrapper.set_vf_spoofcheck, self.VF_INDEX, True) def _set_vf_rate(self, rate, passed=True): if passed: with mock.patch.object(self.pci_wrapper, "_as_root") \ as mock_as_root: result = self.pci_wrapper.set_vf_rate( self.VF_INDEX, ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE, 1000) self.assertIsNone(result) mock_as_root.assert_called_once_with( [], "link", ("set", self.DEV_NAME, "vf", str(self.VF_INDEX), "rate", '1000')) else: with mock.patch.object(self.pci_wrapper, "_as_root", side_effect=Exception()): self.assertRaises(exc.IpCommandError, self.pci_wrapper.set_vf_rate, self.VF_INDEX, rate, 1000) def test_set_vf_rate_max_rate(self): self._set_vf_rate( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE) def test_set_vf_rate_max_rate_fail(self): self._set_vf_rate('rate', passed=False) def test_set_vf_rate_min_tx_rate(self): self._set_vf_rate( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE) def test_set_vf_rate_min_tx_rate_fail(self): self._set_vf_rate( ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE, passed=False) def test_set_vf_state_not_supported(self): with mock.patch.object(self.pci_wrapper, "_as_root") as mock_as_root: mock_as_root.side_effect = Exception( pci_lib.PciDeviceIPWrapper.IP_LINK_OP_NOT_SUPPORTED) self.assertRaises(exc.IpCommandOperationNotSupportedError, self.pci_wrapper.set_vf_state, self.VF_INDEX, state=True) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_age0000644000175000017500000007735300000000000033653 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.l2 import l2_agent_extensions_manager as l2_ext_manager from neutron.agent import rpc as agent_rpc from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions from neutron.plugins.ml2.drivers.mech_sriov.agent import sriov_nic_agent from neutron.tests import base DEVICE_MAC = '11:22:33:44:55:66' PCI_SLOT = "0000:06:00.1" class TestSriovAgent(base.BaseTestCase): def setUp(self): super(TestSriovAgent, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_default('enable_security_group', False, group='SECURITYGROUP') class MockFixedIntervalLoopingCall(object): def __init__(self, f): self.f = f def start(self, interval=0): self.f() mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall) self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, {}, {}, {}) @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.eswitch_manager" ".ESwitchManager.get_assigned_devices_info", return_value=set()) @mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') def test_cached_device_count_report_state(self, report_state, get_dev): self.agent._report_state() agent_conf = self.agent.agent_state['configurations'] # ensure devices aren't calculated until first scan_devices call self.assertNotIn('devices', agent_conf) self.agent.scan_devices(set(), set()) self.assertEqual(0, agent_conf['devices']) # ensure report_state doesn't call get_dev get_dev.reset_mock() get_dev.return_value = set(['dev1', 'dev2']) self.agent._report_state() self.assertEqual(0, agent_conf['devices']) # after a device scan, conf should bump to 2 self.agent.scan_devices(set(), set()) self.assertEqual(2, agent_conf['devices']) def test_treat_devices_removed_with_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, {}, {}, {}) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.return_value = {'device': DEVICE_MAC, 'exists': True} resync = agent.treat_devices_removed(devices) self.assertFalse(resync) self.assertTrue(fn_udd.called) def test_treat_devices_removed_with_not_existed_device(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, {}, {}, {}) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.return_value = {'device': DEVICE_MAC, 'exists': False} with mock.patch.object(sriov_nic_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertFalse(resync) self.assertTrue(fn_udd.called) def test_treat_devices_removed_failed(self, *args): agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, {}, {}, {}) devices = [(DEVICE_MAC, PCI_SLOT)] with mock.patch.object(agent.plugin_rpc, "update_device_down") as fn_udd: fn_udd.side_effect = Exception() with mock.patch.object(sriov_nic_agent.LOG, 'debug') as log: resync = agent.treat_devices_removed(devices) self.assertEqual(1, log.call_count) self.assertTrue(resync) self.assertTrue(fn_udd.called) def mock_scan_devices(self, expected, mock_current, registered_devices, updated_devices): self.agent.eswitch_mgr = mock.Mock() self.agent.eswitch_mgr.get_assigned_devices_info.return_value = ( mock_current) results = self.agent.scan_devices(registered_devices, updated_devices) self.assertEqual(expected, results) def test_scan_devices_returns_empty_sets(self): registered = set() updated = set() mock_current = set() expected = {'current': set(), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_no_changes(self): registered = set(['1', '2']) updated = set() mock_current = set(['1', '2']) expected = {'current': set(['1', '2']), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_new_and_removed(self): registered = set(['1', '2']) updated = set() mock_current = set(['2', '3']) expected = {'current': set(['2', '3']), 'updated': set(), 'added': set(['3']), 'removed': set(['1'])} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_updated_and_removed(self): registered = set(['1', '2']) # '1' is in removed and updated tuple updated = set(['1']) mock_current = set(['2', '3']) expected = {'current': set(['2', '3']), 'updated': set(), 'added': set(['3']), 'removed': set(['1'])} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_new_updates(self): registered = set(['1']) updated = set(['2']) mock_current = set(['1', '2']) expected = {'current': set(['1', '2']), 'updated': set(['2']), 'added': set(['2']), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_scan_devices_updated_missing(self): registered = set(['1']) updated = set(['2']) mock_current = set(['1']) expected = {'current': set(['1']), 'updated': set(), 'added': set(), 'removed': set()} self.mock_scan_devices(expected, mock_current, registered, updated) def test_process_network_devices(self): agent = self.agent device_info = {'current': set(), 'added': set(['mac3', 'mac4']), 'updated': set(['mac2', 'mac3']), 'removed': set(['mac1'])} agent.sg_agent.prepare_devices_filter = mock.Mock() agent.sg_agent.refresh_firewall = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) agent.sg_agent.prepare_devices_filter.assert_called_with( set(['mac3', 'mac4'])) self.assertTrue(agent.sg_agent.refresh_firewall.called) agent.treat_devices_added_updated.assert_called_with(set(['mac2', 'mac3', 'mac4'])) agent.treat_devices_removed.assert_called_with(set(['mac1'])) def test_treat_devices_added_updated_sends_host(self): agent = self.agent host = 'host1' cfg.CONF.set_override('host', host) agent.plugin_rpc = mock.Mock() MAC = 'aa:bb:cc:dd:ee:ff' device_details = {'device': MAC, 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': True, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False} agent.plugin_rpc.get_devices_details_list.return_value = ( [device_details]) agent.treat_devices_added_updated([[MAC]]) agent.plugin_rpc.get_devices_details_list.assert_called_once_with( mock.ANY, set([MAC]), mock.ANY, host) def test_treat_devices_added_updated_and_removed(self): agent = self.agent MAC1 = 'aa:bb:cc:dd:ee:ff' SLOT1 = '1:2:3.0' MAC2 = 'aa:bb:cc:dd:ee:fe' SLOT2 = '1:3:3.0' mac_pci_slot_device1 = (MAC1, SLOT1) mac_pci_slot_device2 = (MAC2, SLOT2) mock_device1_details = {'device': MAC1, 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': SLOT1}, 'physical_network': 'physnet1', 'port_security_enabled': False} mock_device2_details = {'device': MAC2, 'port_id': 'port124', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': SLOT2}, 'physical_network': 'physnet1', 'port_security_enabled': False} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = ( [mock_device1_details]) agent.treat_devices_added_updated(set([MAC1])) self.assertEqual({'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}]}, agent.network_ports) agent.plugin_rpc.get_devices_details_list.return_value = ( [mock_device2_details]) # add the second device and check the network_ports dict agent.treat_devices_added_updated(set([MAC2])) self.assertEqual( {'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}, {'port_id': 'port124', 'device': mac_pci_slot_device2}]}, agent.network_ports) with mock.patch.object(agent.plugin_rpc, "update_device_down"): agent.treat_devices_removed([mac_pci_slot_device2]) # remove the second device and check the network_ports dict self.assertEqual({'net123': [{'port_id': 'port123', 'device': mac_pci_slot_device1}]}, agent.network_ports) def test_treat_devices_added_updated_admin_state_up_true(self): agent = self.agent mock_details = {'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.set_device_state = mock.Mock() agent.set_device_spoofcheck = mock.Mock() resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff'])) self.assertFalse(resync_needed) agent.eswitch_mgr.device_exists.assert_called_with('aa:bb:cc:dd:ee:ff', '1:2:3.0') agent.eswitch_mgr.set_device_state.assert_called_with( 'aa:bb:cc:dd:ee:ff', '1:2:3.0', True, False) agent.eswitch_mgr.set_device_spoofcheck.assert_called_with( 'aa:bb:cc:dd:ee:ff', '1:2:3.0', False) agent.plugin_rpc.update_device_list.assert_called_once_with( mock.ANY, set(['aa:bb:cc:dd:ee:ff']), set(), mock.ANY, mock.ANY) def test_treat_devices_added_updated_multiple_admin_state_up_true(self): agent = self.agent mock_details = [{'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False}, {'device': '11:22:33:44:55:66', 'port_id': 'port321', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False}] agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = mock_details agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.set_device_state = mock.Mock() agent.set_device_spoofcheck = mock.Mock() resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff', '11:22:33:44:55:66'])) self.assertFalse(resync_needed) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0'), mock.call('11:22:33:44:55:66', '1:2:3.0')] agent.eswitch_mgr.device_exists.assert_has_calls(calls, any_order=True) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0', True, False), mock.call('11:22:33:44:55:66', '1:2:3.0', True, False)] agent.eswitch_mgr.set_device_state.assert_has_calls(calls, any_order=True) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0', False), mock.call('11:22:33:44:55:66', '1:2:3.0', False)] agent.eswitch_mgr.set_device_spoofcheck.assert_has_calls(calls, any_order=True) agent.plugin_rpc.update_device_list.assert_called_once_with( mock.ANY, set(['aa:bb:cc:dd:ee:ff', '11:22:33:44:55:66']), set(), mock.ANY, mock.ANY) def test_treat_devices_added_updated_multiple_admin_states(self): agent = self.agent mock_details = [{'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False}, {'device': '11:22:33:44:55:66', 'port_id': 'port321', 'network_id': 'net123', 'admin_state_up': False, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1', 'port_security_enabled': False}] agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = mock_details agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.set_device_state = mock.Mock() agent.set_device_spoofcheck = mock.Mock() resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff', '11:22:33:44:55:66'])) self.assertFalse(resync_needed) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0'), mock.call('11:22:33:44:55:66', '1:2:3.0')] agent.eswitch_mgr.device_exists.assert_has_calls(calls, any_order=True) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0', True, False), mock.call('11:22:33:44:55:66', '1:2:3.0', False, False)] agent.eswitch_mgr.set_device_state.assert_has_calls(calls, any_order=True) calls = [mock.call('aa:bb:cc:dd:ee:ff', '1:2:3.0', False), mock.call('11:22:33:44:55:66', '1:2:3.0', False)] agent.eswitch_mgr.set_device_spoofcheck.assert_has_calls(calls, any_order=True) agent.plugin_rpc.update_device_list.assert_called_once_with( mock.ANY, set(['aa:bb:cc:dd:ee:ff']), set(['11:22:33:44:55:66']), mock.ANY, mock.ANY) def test_treat_device_ip_link_state_not_supported(self): agent = self.agent agent.plugin_rpc = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.eswitch_mgr.set_device_state.side_effect = ( exceptions.IpCommandOperationNotSupportedError( dev_name='aa:bb:cc:dd:ee:ff')) self.assertTrue(agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0', admin_state_up=True)) def test_treat_device_set_device_state_exception(self): agent = self.agent agent.plugin_rpc = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True agent.eswitch_mgr.set_device_state.side_effect = ( exceptions.SriovNicError()) self.assertFalse(agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0', admin_state_up=True)) def test_treat_device_no_device_found(self): agent = self.agent agent.plugin_rpc = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = False self.assertFalse(agent.treat_device('aa:bb:cc:dd:ee:ff', '1:2:3:0', admin_state_up=True)) def test_treat_devices_added_updated_admin_state_up_false(self): agent = self.agent mock_details = {'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': False, 'propagate_uplink_status': False, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'physical_network': 'physnet1'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.remove_port_binding = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = True resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff'])) self.assertFalse(resync_needed) agent.plugin_rpc.update_device_list.assert_called_once_with( mock.ANY, set(), set(['aa:bb:cc:dd:ee:ff']), mock.ANY, mock.ANY) def test_treat_devices_added_updated_no_device_found(self): agent = self.agent mock_details = {'device': 'aa:bb:cc:dd:ee:ff', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'profile': {'pci_slot': '1:2:3.0'}, 'propagate_uplink_status': False, 'physical_network': 'physnet1'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.remove_port_binding = mock.Mock() agent.eswitch_mgr = mock.Mock() agent.eswitch_mgr.device_exists.return_value = False resync_needed = agent.treat_devices_added_updated( set(['aa:bb:cc:dd:ee:ff'])) self.assertTrue(resync_needed) self.assertFalse(agent.plugin_rpc.update_device_up.called) def test_update_and_clean_network_ports(self): network_id1 = 'network_id1' network_id2 = 'network_id2' port_id1 = 'port_id1' port_id2 = 'port_id2' mac_slot_1 = ('mac1', 'slot1') mac_slot_2 = ('mac2', 'slot2') self.agent.network_ports[network_id1] = [{'port_id': port_id1, 'device': mac_slot_1}, {'port_id': port_id2, 'device': mac_slot_2}] self.agent._update_network_ports(network_id2, port_id1, mac_slot_1) self.assertEqual({network_id1: [{'port_id': port_id2, 'device': mac_slot_2}], network_id2: [ {'port_id': port_id1, 'device': mac_slot_1}]}, self.agent.network_ports) cleaned_port_id = self.agent._clean_network_ports(mac_slot_1) self.assertEqual(cleaned_port_id, port_id1) self.assertEqual({network_id1: [{'port_id': port_id2, 'device': mac_slot_2}]}, self.agent.network_ports) cleaned_port_id = self.agent._clean_network_ports(mac_slot_2) self.assertEqual({}, self.agent.network_ports) def test_configurations_has_rp_bandwidth(self): rp_bandwidth = {'ens7': {'egress': 10000, 'ingress': 10000}} agent = sriov_nic_agent.SriovNicSwitchAgent( {}, {}, 0, rp_bandwidth, {}, {}) self.assertIn(constants.RP_BANDWIDTHS, agent.agent_state['configurations']) rp_bandwidths = agent.agent_state['configurations'][ constants.RP_BANDWIDTHS] self.assertEqual(rp_bandwidth['ens7'], rp_bandwidths['ens7']) def test_configurations_has_rp_default_inventory(self): rp_inventory_values = { 'allocation_ratio': 1.0, 'min_unit': 1, 'step_size': 1, 'reserved': 0 } agent = sriov_nic_agent.SriovNicSwitchAgent( {}, {}, 0, {}, rp_inventory_values, {}) self.assertIn(constants.RP_INVENTORY_DEFAULTS, agent.agent_state['configurations']) rp_inv_defaults = agent.agent_state['configurations'][ constants.RP_INVENTORY_DEFAULTS] self.assertListEqual( sorted(list(rp_inventory_values)), sorted(list(rp_inv_defaults.keys()))) for inv_key, inv_value in rp_inventory_values.items(): self.assertEqual(inv_value, rp_inv_defaults[inv_key]) def test_process_activated_bindings(self): # Create several devices which are pairs of (, ) dev_a = ('fa:16:3e:f8:ae:af', "0000:01:00.0") dev_b = ('fa:16:3e:f8:ae:b0', "0000:02:00.0") dev_c = ('fa:16:3e:f8:ae:b1', "0000:03:00.0") # Create device_info fake_device_info = { 'current': set([dev_a, dev_b]), 'added': set([dev_c]), 'removed': set(), 'updated': set()} fake_activated_bindings = set([dev_a]) self.agent.process_activated_bindings(fake_device_info, fake_activated_bindings) self.assertLessEqual(fake_activated_bindings, fake_device_info['added']) class FakeAgent(object): def __init__(self): self.updated_devices = set() self.activated_bindings = set() self.conf = mock.Mock() self.conf.host = 'host1' class TestSriovNicSwitchRpcCallbacks(base.BaseTestCase): def setUp(self): super(TestSriovNicSwitchRpcCallbacks, self).setUp() self.context = object() self.agent = FakeAgent() sg_agent = object() self.sriov_rpc_callback = sriov_nic_agent.SriovNicSwitchRpcCallbacks( self.context, self.agent, sg_agent) def _create_fake_port(self): return {'id': uuidutils.generate_uuid(), portbindings.PROFILE: {'pci_slot': PCI_SLOT}, 'mac_address': DEVICE_MAC} def _create_fake_bindings(self, fake_port, fake_host): return {'port_id': fake_port['id'], 'host': fake_host} def test_port_update_with_pci_slot(self): port = self._create_fake_port() kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set([(DEVICE_MAC, PCI_SLOT)]), self.agent.updated_devices) def test_port_update_with_vnic_physical_direct(self): port = self._create_fake_port() port[portbindings.VNIC_TYPE] = portbindings.VNIC_DIRECT_PHYSICAL kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set(), self.agent.updated_devices) def test_port_update_without_pci_slot(self): port = self._create_fake_port() port[portbindings.PROFILE] = None kwargs = {'context': self.context, 'port': port} self.sriov_rpc_callback.port_update(**kwargs) self.assertEqual(set(), self.agent.updated_devices) def test_network_update(self): TEST_NETWORK_ID1 = "n1" TEST_NETWORK_ID2 = "n2" TEST_PORT_ID1 = 'p1' TEST_PORT_ID2 = 'p2' network1 = {'id': TEST_NETWORK_ID1} port1 = {'id': TEST_PORT_ID1, 'network_id': TEST_NETWORK_ID1} port2 = {'id': TEST_PORT_ID2, 'network_id': TEST_NETWORK_ID2} self.agent.network_ports = { TEST_NETWORK_ID1: [{'port_id': port1['id'], 'device': ('mac1', 'slot1')}], TEST_NETWORK_ID2: [{'port_id': port2['id'], 'device': ('mac2', 'slot2')}]} kwargs = {'context': self.context, 'network': network1} self.sriov_rpc_callback.network_update(**kwargs) self.assertEqual(set([('mac1', 'slot1')]), self.agent.updated_devices) def test_binding_activate(self): fake_port = self._create_fake_port() self.agent.get_device_details_from_port_id = mock.Mock() self.agent.get_device_details_from_port_id.return_value = { 'mac_address': fake_port['mac_address'], 'profile': fake_port[portbindings.PROFILE] } kwargs = self._create_fake_bindings(fake_port, self.agent.conf.host) kwargs['context'] = self.context self.sriov_rpc_callback.binding_activate(**kwargs) # Assert agent.activated_binding set contains the new binding self.assertIn((fake_port['mac_address'], fake_port[portbindings.PROFILE]['pci_slot']), self.agent.activated_bindings) def test_binding_activate_no_host(self): fake_port = self._create_fake_port() kwargs = self._create_fake_bindings(fake_port, 'other-host') kwargs['context'] = self.context self.sriov_rpc_callback.binding_activate(**kwargs) # Assert no bindings were added self.assertEqual(set(), self.agent.activated_bindings) def test_binding_deactivate(self): # binding_deactivate() basically does nothing # call it with both the agent's host and other host to cover # all code paths fake_port = self._create_fake_port() kwargs = self._create_fake_bindings(fake_port, self.agent.conf.host) kwargs['context'] = self.context self.sriov_rpc_callback.binding_deactivate(**kwargs) kwargs['host'] = 'other-host' self.sriov_rpc_callback.binding_deactivate(**kwargs) class TestSRIOVAgentExtensionConfig(base.BaseTestCase): def setUp(self): super(TestSRIOVAgentExtensionConfig, self).setUp() l2_ext_manager.register_opts(cfg.CONF) # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, group='AGENT') cfg.CONF.set_override('extensions', ['qos'], group='agent') @mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.eswitch_manager" ".ESwitchManager.get_assigned_devices_info", return_value=[]) def test_report_loaded_extension(self, *args): with mock.patch.object(agent_rpc.PluginReportStateAPI, 'report_state') as mock_report_state: agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, {}, {}, {}) agent._report_state() mock_report_state.assert_called_with( agent.context, agent.agent_state) self.assertEqual( ['qos'], agent.agent_state['configurations']['extensions']) class TestSriovNicAgentConfigParser(base.BaseTestCase): def test__validate_rp_in_dev_mappings(self): with mock.patch.object( cfg.CONF.SRIOV_NIC, 'physical_device_mappings', new=[]), \ mock.patch.object( cfg.CONF.SRIOV_NIC, 'resource_provider_bandwidths', new=['no_such_dev_in_dev_mappings:1:1']): parser = sriov_nic_agent.SriovNicAgentConfigParser() self.assertRaises(ValueError, parser.parse) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/0000755000175000017500000000000000000000000031234 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py0000644000175000017500000000000000000000000033333 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sr0000644000175000017500000003143600000000000033645 0ustar00coreycorey00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_config import cfg import testtools from neutron.conf.plugins.ml2.drivers.mech_sriov import mech_sriov_conf from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.mech_driver import mech_driver from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class TestFakePortContext(base.FakePortContext): def __init__(self, agent_type, agents, segments, vnic_type=portbindings.VNIC_NORMAL, profile=None): super(TestFakePortContext, self).__init__(agent_type, agents, segments, vnic_type=vnic_type, profile=profile) def set_binding(self, segment_id, vif_type, vif_details, state): self._bound_segment_id = segment_id self._bound_vif_type = vif_type self._bound_vif_details = vif_details self._bound_state = state class SriovNicSwitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_HW_VEB CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_NIC_SWITCH VLAN_SEGMENTS = base.AgentMechanismVlanTestCase.VLAN_SEGMENTS GOOD_MAPPINGS = {'fake_physical_network': ['fake_device']} GOOD_CONFIGS = {'device_mappings': GOOD_MAPPINGS} BAD_MAPPINGS = {'wrong_physical_network': ['wrong_device']} BAD_CONFIGS = {'device_mappings': BAD_MAPPINGS} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS}, {'alive': True, 'configurations': BAD_CONFIGS}] def setUp(self): super(SriovNicSwitchMechanismBaseTestCase, self).setUp() self.driver = mech_driver.SriovNicSwitchMechanismDriver() self.driver.initialize() class SriovSwitchMechGenericTestCase(SriovNicSwitchMechanismBaseTestCase, base.AgentMechanismGenericTestCase): def test_check_segment(self): """Validate the check_segment call.""" segment = {'api.NETWORK_TYPE': ""} segment[api.NETWORK_TYPE] = constants.TYPE_VLAN self.assertTrue(self.driver.check_segment_for_agent(segment)) # Validate a network type not currently supported segment[api.NETWORK_TYPE] = constants.TYPE_GRE self.assertFalse(self.driver.check_segment_for_agent(segment)) def test_check_segment_allows_supported_network_types(self): for network_type in self.driver.get_allowed_network_types(agent=None): segment = {api.NETWORK_TYPE: network_type} self.assertTrue(self.driver.check_segment_for_agent(segment)) def test_driver_responsible_for_ports_allocation(self): agents = [ {'agent_type': 'NIC Switch agent', 'configurations': {'resource_provider_bandwidths': {'eth0': {}}}, 'host': 'host', 'id': '1'} ] segments = [] # uuid -v5 87f1895c-73bb-11e8-9008-c4d987b2a692 host:eth0 profile = {'allocation': '5762cf50-781b-5f01-8ebc-0cce8c9e74cd'} port_ctx = base.FakePortContext( self.AGENT_TYPE, agents, segments, vnic_type=portbindings.VNIC_DIRECT, profile=profile) with mock.patch.object(self.driver, '_possible_agents_for_port', return_value=agents): self.assertTrue( self.driver.responsible_for_ports_allocation(port_ctx)) class SriovMechVlanTestCase(SriovNicSwitchMechanismBaseTestCase, base.AgentMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id', api.NETWORK_TYPE: 'no_such_type'}, {api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] def test_type_vlan(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self._check_bound(context, self.VLAN_SEGMENTS[1]) def test_type_vlan_bad(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self._check_unbound(context) class SriovSwitchMechVnicTypeTestCase(SriovNicSwitchMechanismBaseTestCase): def _check_vif_type_for_vnic_type(self, vnic_type, expected_vif_type): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, vnic_type) self.driver.bind_port(context) self.assertEqual(expected_vif_type, context._bound_vif_type) vlan = int(context._bound_vif_details[portbindings.VIF_DETAILS_VLAN]) self.assertEqual(1234, vlan) def test_vnic_type_direct(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT, portbindings.VIF_TYPE_HW_VEB) def test_vnic_type_macvtap(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_MACVTAP, portbindings.VIF_TYPE_HW_VEB) def test_vnic_type_direct_physical(self): self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT_PHYSICAL, portbindings.VIF_TYPE_HOSTDEV_PHY) @mock.patch.object(mech_driver.SriovNicSwitchMechanismDriver, 'try_to_bind_segment_for_agent') def test_vnic_type_direct_with_switchdev_cap(self, mocked_bind_segment): profile = {'capabilities': ['switchdev']} context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT, profile) self.driver.bind_port(context) mocked_bind_segment.assert_not_called() class SriovSwitchMechVifDetailsTestCase(SriovNicSwitchMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] def test_vif_details_contains_vlan_id(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) vif_details = context._bound_vif_details self.assertIsNotNone(vif_details) vlan_id = int(vif_details.get(portbindings.VIF_DETAILS_VLAN)) self.assertEqual(1234, vlan_id) def test_get_vif_details_for_flat_network(self): segment = {api.NETWORK_TYPE: constants.TYPE_FLAT} vif_details = self.driver._get_vif_details(segment) vlan_id = vif_details[portbindings.VIF_DETAILS_VLAN] self.assertEqual('0', vlan_id) def test_get_vif_details_unsupported_net(self): segment = {api.NETWORK_TYPE: 'foo'} with testtools.ExpectedException(exc.SriovUnsupportedNetworkType): self.driver._get_vif_details(segment) def test_get_vif_details_with_agent(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT) self.driver.bind_port(context) self.assertEqual(constants.PORT_STATUS_DOWN, context._bound_state) def test_get_vif_details_with_agent_direct_physical(self): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT_PHYSICAL) self.driver.bind_port(context) self.assertEqual(constants.PORT_STATUS_ACTIVE, context._bound_state) class SriovSwitchMechVnicTypesTestCase(SriovNicSwitchMechanismBaseTestCase): def setUp(self): self.override_vnic_types = [portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP] self.driver_with_vnic_types = \ mech_driver.SriovNicSwitchMechanismDriver( supported_vnic_types=self.override_vnic_types) self.default_supported_vnics = [ portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP, portbindings.VNIC_DIRECT_PHYSICAL] self.blacklist_cfg = { 'SRIOV_DRIVER': { 'vnic_type_blacklist': [] } } super(SriovSwitchMechVnicTypesTestCase, self).setUp() def test_default_vnic_types(self): self.assertEqual(self.default_supported_vnics, self.driver.supported_vnic_types) def test_override_default_vnic_types(self): self.assertEqual( self.override_vnic_types, self.driver_with_vnic_types.supported_vnic_types) def test_vnic_type_blacklist_valid_item(self): self.blacklist_cfg['SRIOV_DRIVER']['vnic_type_blacklist'] = \ [portbindings.VNIC_MACVTAP] fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_sriov_conf.register_sriov_mech_driver_opts) self.useFixture(fake_conf_fixture) test_driver = mech_driver.SriovNicSwitchMechanismDriver( supported_vnic_types=self.default_supported_vnics) supported_vnic_types = test_driver.supported_vnic_types self.assertNotIn(portbindings.VNIC_MACVTAP, supported_vnic_types) self.assertEqual(len(self.default_supported_vnics) - 1, len(supported_vnic_types)) def test_vnic_type_blacklist_not_valid_item(self): self.blacklist_cfg['SRIOV_DRIVER']['vnic_type_blacklist'] = ['foo'] fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_sriov_conf.register_sriov_mech_driver_opts) self.useFixture(fake_conf_fixture) self.assertRaises(ValueError, mech_driver.SriovNicSwitchMechanismDriver) def test_vnic_type_blacklist_all_items(self): self.blacklist_cfg['SRIOV_DRIVER']['vnic_type_blacklist'] = \ [portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP, portbindings.VNIC_DIRECT_PHYSICAL] fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_sriov_conf.register_sriov_mech_driver_opts) self.useFixture(fake_conf_fixture) self.assertRaises(ValueError, mech_driver.SriovNicSwitchMechanismDriver) class SriovSwitchDeviceMappingsTestCase(SriovNicSwitchMechanismBaseTestCase): def test_standard_device_mappings(self): mappings = self.driver.get_standard_device_mappings(self.AGENTS[0]) self.assertDictEqual(self.GOOD_CONFIGS['device_mappings'], mappings) def test_standard_device_mappings_negative(self): fake_agent = {'agent_type': constants.AGENT_TYPE_NIC_SWITCH, 'configurations': {}} self.assertRaises(ValueError, self.driver.get_standard_device_mappings, fake_agent) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py0000644000175000017500000001321600000000000030467 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins.ml2 import api from oslo_log import log LOG = log.getLogger(__name__) class LoggerMechanismDriver(api.MechanismDriver): """Mechanism driver that logs all calls and parameters made. Generally used for testing and debugging. """ def initialize(self): pass def _log_network_call(self, method_name, context): LOG.info("%(method)s called with network settings %(current)s " "(original settings %(original)s) and " "network segments %(segments)s", {'method': method_name, 'current': context.current, 'original': context.original, 'segments': context.network_segments}) def create_network_precommit(self, context): self._log_network_call("create_network_precommit", context) def create_network_postcommit(self, context): self._log_network_call("create_network_postcommit", context) def update_network_precommit(self, context): self._log_network_call("update_network_precommit", context) def update_network_postcommit(self, context): self._log_network_call("update_network_postcommit", context) def delete_network_precommit(self, context): self._log_network_call("delete_network_precommit", context) def delete_network_postcommit(self, context): self._log_network_call("delete_network_postcommit", context) def check_vlan_transparency(self, context): self._log_network_call("check_vlan_transparency", context) return True def _log_subnet_call(self, method_name, context): LOG.info("%(method)s called with subnet settings %(current)s " "(original settings %(original)s)", {'method': method_name, 'current': context.current, 'original': context.original}) def create_subnet_precommit(self, context): self._log_subnet_call("create_subnet_precommit", context) def create_subnet_postcommit(self, context): self._log_subnet_call("create_subnet_postcommit", context) def update_subnet_precommit(self, context): self._log_subnet_call("update_subnet_precommit", context) def update_subnet_postcommit(self, context): self._log_subnet_call("update_subnet_postcommit", context) def delete_subnet_precommit(self, context): self._log_subnet_call("delete_subnet_precommit", context) def delete_subnet_postcommit(self, context): self._log_subnet_call("delete_subnet_postcommit", context) def _log_port_call(self, method_name, context): network_context = context.network LOG.info("%(method)s called with port settings %(current)s " "(original settings %(original)s) " "host %(host)s " "(original host %(original_host)s) " "vif type %(vif_type)s " "(original vif type %(original_vif_type)s) " "vif details %(vif_details)s " "(original vif details %(original_vif_details)s) " "binding levels %(levels)s " "(original binding levels %(original_levels)s) " "on network %(network)s " "with segments to bind %(segments_to_bind)s", {'method': method_name, 'current': context.current, 'original': context.original, 'host': context.host, 'original_host': context.original_host, 'vif_type': context.vif_type, 'original_vif_type': context.original_vif_type, 'vif_details': context.vif_details, 'original_vif_details': context.original_vif_details, 'levels': context.binding_levels, 'original_levels': context.original_binding_levels, 'network': network_context.current, 'segments_to_bind': context.segments_to_bind}) def create_port_precommit(self, context): self._log_port_call("create_port_precommit", context) def create_port_postcommit(self, context): self._log_port_call("create_port_postcommit", context) def update_port_precommit(self, context): self._log_port_call("update_port_precommit", context) def update_port_postcommit(self, context): self._log_port_call("update_port_postcommit", context) def delete_port_precommit(self, context): self._log_port_call("delete_port_precommit", context) def delete_port_postcommit(self, context): self._log_port_call("delete_port_postcommit", context) def bind_port(self, context): self._log_port_call("bind_port", context) def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): LOG.info("filter_hosts_with_segment_access called with segments " "%(segments)s, candidate hosts %(hosts)s ", {'segments': segments, 'hosts': candidate_hosts}) return set() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py0000644000175000017500000003037300000000000030172 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants as const from neutron_lib.plugins.ml2 import api from neutron.plugins.ml2.drivers import mech_agent VIF_TYPE_TEST = 'vif_type_test' class TestMechanismDriver(api.MechanismDriver): """Test mechanism driver for testing mechanism driver api.""" def __init__(self, *args, **kwargs): super(TestMechanismDriver, self).__init__(*args, **kwargs) self._supported_vnic_types = ('test_mechanism_driver_vnic_type', ) def initialize(self): self.bound_ports = set() def _check_network_context(self, context, original_expected): assert(isinstance(context, api.NetworkContext)) assert(isinstance(context.current, dict)) assert(context.current['id'] is not None) if original_expected: assert(isinstance(context.original, dict)) assert(context.current['id'] == context.original['id']) else: assert(not context.original) def create_network_precommit(self, context): self._check_network_context(context, False) def create_network_postcommit(self, context): self._check_network_context(context, False) def update_network_precommit(self, context): self._check_network_context(context, True) def update_network_postcommit(self, context): self._check_network_context(context, True) def delete_network_precommit(self, context): self._check_network_context(context, False) def delete_network_postcommit(self, context): self._check_network_context(context, False) def _check_subnet_context(self, context, original_expected): assert(isinstance(context, api.SubnetContext)) assert(isinstance(context.current, dict)) assert(context.current['id'] is not None) if original_expected: assert(isinstance(context.original, dict)) assert(context.current['id'] == context.original['id']) else: assert(not context.original) network_context = context.network assert(isinstance(network_context, api.NetworkContext)) self._check_network_context(network_context, False) def create_subnet_precommit(self, context): self._check_subnet_context(context, False) def create_subnet_postcommit(self, context): self._check_subnet_context(context, False) def update_subnet_precommit(self, context): self._check_subnet_context(context, True) def update_subnet_postcommit(self, context): self._check_subnet_context(context, True) def delete_subnet_precommit(self, context): self._check_subnet_context(context, False) def delete_subnet_postcommit(self, context): self._check_subnet_context(context, False) def _check_port_context(self, context, original_expected): assert(isinstance(context, api.PortContext)) self._check_port_info(context.current, context.host, context.vif_type, context.vif_details) if context.vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED): if (context.segments_to_bind and context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'): # Partially bound. self._check_bound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) else: self._check_unbound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) assert((context.current['id'], context.host) not in self.bound_ports) else: self._check_bound(context.binding_levels, context.top_bound_segment, context.bottom_bound_segment) assert((context.current['id'], context.host) in self.bound_ports) if original_expected: self._check_port_info(context.original, context.original_host, context.original_vif_type, context.original_vif_details) assert(context.current['id'] == context.original['id']) if (context.original_vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED)): self._check_unbound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) else: self._check_bound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) else: assert(context.original is None) assert(context.original_host is None) assert(context.original_vif_type is None) assert(context.original_vif_details is None) assert(context.original_status is None) self._check_unbound(context.original_binding_levels, context.original_top_bound_segment, context.original_bottom_bound_segment) network_context = context.network assert(isinstance(network_context, api.NetworkContext)) self._check_network_context(network_context, False) def _check_port_info(self, port, host, vif_type, vif_details): assert(isinstance(port, dict)) assert(port['id'] is not None) assert(vif_type in (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_DISTRIBUTED, portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_BRIDGE)) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: assert(port[portbindings.HOST_ID] == '') assert(port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_DISTRIBUTED) assert(port[portbindings.VIF_DETAILS] == {}) else: assert(port[portbindings.HOST_ID] == host) assert(port[portbindings.VIF_TYPE] != portbindings.VIF_TYPE_DISTRIBUTED) assert(port[portbindings.VIF_TYPE] == vif_type) assert(isinstance(vif_details, dict)) assert(port[portbindings.VIF_DETAILS] == vif_details) def _check_unbound(self, levels, top_segment, bottom_segment): assert(levels is None) assert(top_segment is None) assert(bottom_segment is None) def _check_bound(self, levels, top_segment, bottom_segment): assert(isinstance(levels, list)) top_level = levels[0] assert(isinstance(top_level, dict)) assert(isinstance(top_segment, dict)) assert(top_segment == top_level[api.BOUND_SEGMENT]) assert('test' == top_level[api.BOUND_DRIVER]) bottom_level = levels[-1] assert(isinstance(bottom_level, dict)) assert(isinstance(bottom_segment, dict)) assert(bottom_segment == bottom_level[api.BOUND_SEGMENT]) assert('test' == bottom_level[api.BOUND_DRIVER]) def create_port_precommit(self, context): self._check_port_context(context, False) def create_port_postcommit(self, context): self._check_port_context(context, False) def update_port_precommit(self, context): if ((context.original_top_bound_segment and not context.top_bound_segment) or (context.host == "host-fail")): self.bound_ports.remove((context.original['id'], context.original_host)) self._check_port_context(context, True) def update_port_postcommit(self, context): self._check_port_context(context, True) def delete_port_precommit(self, context): self._check_port_context(context, False) def delete_port_postcommit(self, context): self._check_port_context(context, False) def bind_port(self, context): self._check_port_context(context, False) host = context.host segment = context.segments_to_bind[0] segment_id = segment[api.ID] if host == "host-ovs-no_filter": context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) elif host == "host-bridge-filter": context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: True}) self.bound_ports.add((context.current['id'], host)) elif host == "host-ovs-filter-active": context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: True}, status=const.PORT_STATUS_ACTIVE) self.bound_ports.add((context.current['id'], host)) elif host == "host-hierarchical": segment_type = segment[api.NETWORK_TYPE] if segment_type == 'local': next_segment = context.allocate_dynamic_segment( {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1'} ) context.continue_binding(segment_id, [next_segment]) elif segment_type == 'vlan': context.set_binding(segment_id, portbindings.VIF_TYPE_OVS, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) elif host == "host-fail": context.set_binding(None, portbindings.VIF_TYPE_BINDING_FAILED, {portbindings.CAP_PORT_FILTER: False}) self.bound_ports.add((context.current['id'], host)) def filter_hosts_with_segment_access( self, context, segments, candidate_hosts, agent_getter): return set() @property def resource_provider_uuid5_namespace(self): return uuid.UUID('7f0ce65c-1f13-11e9-8921-3c6aa7b21d17') @property def supported_vnic_types(self): return self._supported_vnic_types def get_standard_device_mappings(self, agent): return {} class TestMechanismDriverWithAgent(mech_agent.AgentMechanismDriverBase, TestMechanismDriver): """Test mechanism driver with agent for testing mechanism driver api.""" def __init__(self): super(TestMechanismDriverWithAgent, self).__init__('test_agent_type') self.bound_ports = set() self._agent_type = 'test_mechanism_driver_agent' def get_supported_vif_type(self, agent): return VIF_TYPE_TEST def get_vif_type(self, context, agent, segment): return VIF_TYPE_TEST @staticmethod def provider_network_attribute_updates_supported(): return [pnet.SEGMENTATION_ID] def try_to_bind_segment_for_agent(self, context, segment, agent): pass @property def agent_type(self): return self._agent_type @agent_type.setter def agent_type(self, agent_type): self._agent_type = agent_type @TestMechanismDriver.supported_vnic_types.setter def supported_vnic_types(self, vnic_types): self._supported_vnic_types = vnic_types ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/0000755000175000017500000000000000000000000027160 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000031257 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/0000755000175000017500000000000000000000000030256 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py0000644000175000017500000000000000000000000032355 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/ 27 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers0000755000175000017500000000000000000000000033751 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers0000644000175000017500000000000000000000000033741 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers0000644000175000017500000002710200000000000033755 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib import constants from neutron_lib import context from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers import ( qos_driver) from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import ( ovs_bridge) from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import ( ovs_test_base) class QosOVSAgentDriverTestCase(ovs_test_base.OVSAgentConfigTestBase): def setUp(self): super(QosOVSAgentDriverTestCase, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.context = context.get_admin_context() self.qos_driver = qos_driver.QosOVSAgentDriver() self.mock_clear_minimum_bandwidth_qos = mock.patch.object( self.qos_driver, '_minimum_bandwidth_initialize').start() os_ken_app = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge( 'br-int', os_ken_app=os_ken_app), ovs_bridge.OVSAgentBridge( 'br-tun', os_ken_app=os_ken_app), {'phys1': ovs_bridge.OVSAgentBridge( 'br-phys1', os_ken_app=os_ken_app)}) self.qos_driver.consume_api(self.agent_api) self.qos_driver.initialize() self.qos_driver.br_int = mock.Mock() self.qos_driver.br_int.get_dp = mock.Mock(return_value=(mock.Mock(), mock.Mock(), mock.Mock())) self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( return_value=(1000, 10)) self.get_egress = self.qos_driver.br_int.get_egress_bw_limit_for_port self.get_ingress = self.qos_driver.br_int.get_ingress_bw_limit_for_port self.qos_driver.br_int.del_egress_bw_limit_for_port = mock.Mock() self.delete_egress = ( self.qos_driver.br_int.delete_egress_bw_limit_for_port) self.delete_ingress = ( self.qos_driver.br_int.delete_ingress_bw_limit_for_port) self.create_egress = ( self.qos_driver.br_int.create_egress_bw_limit_for_port) self.update_ingress = ( self.qos_driver.br_int.update_ingress_bw_limit_for_port) self.rules = [ self._create_bw_limit_rule_obj(constants.EGRESS_DIRECTION), self._create_bw_limit_rule_obj(constants.INGRESS_DIRECTION), self._create_dscp_marking_rule_obj()] self.qos_policy = self._create_qos_policy_obj(self.rules) self.port = self._create_fake_port(self.qos_policy.id) def _create_bw_limit_rule_obj(self, direction): rule_obj = rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 2 rule_obj.max_burst_kbps = 200 rule_obj.direction = direction rule_obj.obj_reset_changes() return rule_obj def _create_dscp_marking_rule_obj(self): rule_obj = rule.QosDscpMarkingRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.dscp_mark = 32 rule_obj.obj_reset_changes() return rule_obj def _create_qos_policy_obj(self, rules): policy_dict = {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test', 'description': 'test', 'shared': False, 'rules': rules} policy_obj = policy.QosPolicy(self.context, **policy_dict) policy_obj.obj_reset_changes() for policy_rule in policy_obj.rules: policy_rule.qos_policy_id = policy_obj.id policy_rule.obj_reset_changes() return policy_obj def _create_fake_port(self, policy_id): self.port_name = 'fakeport' class FakeVifPort(object): port_name = self.port_name ofport = 111 return {'vif_port': FakeVifPort(), 'qos_policy_id': policy_id, 'network_qos_policy_id': None, 'port_id': uuidutils.generate_uuid(), 'device_owner': uuidutils.generate_uuid()} def test_create_new_rules(self): self.qos_driver.br_int.get_egress_bw_limit_for_port = mock.Mock( return_value=(None, None)) self.qos_driver.br_int.get_ingress_bw_limit_for_port = mock.Mock( return_value=(None, None)) self.qos_driver.create(self.port, self.qos_policy) self.assertEqual(0, self.delete_egress.call_count) self.assertEqual(0, self.delete_ingress.call_count) self.create_egress.assert_called_once_with( self.port_name, self.rules[0].max_kbps, self.rules[0].max_burst_kbps) self.update_ingress.assert_called_once_with( self.port_name, self.rules[1].max_kbps, self.rules[1].max_burst_kbps) self._assert_dscp_rule_create_updated() def test_create_existing_rules(self): self.qos_driver.create(self.port, self.qos_policy) self._assert_rules_create_updated() self._assert_dscp_rule_create_updated() def test_update_rules(self): self.qos_driver.update(self.port, self.qos_policy) self._assert_rules_create_updated() self._assert_dscp_rule_create_updated() def test_update_rules_no_vif_port(self): port = copy.copy(self.port) port.pop("vif_port") self.qos_driver.update(port, self.qos_policy) self.create_egress.assert_not_called() self.update_ingress.assert_not_called() def _test_delete_rules(self, qos_policy): self.qos_driver.br_int.get_ingress_bw_limit_for_port = mock.Mock( return_value=(self.rules[1].max_kbps, self.rules[1].max_burst_kbps)) self.qos_driver.create(self.port, qos_policy) self.qos_driver.delete(self.port, qos_policy) self.delete_egress.assert_called_once_with(self.port_name) self.delete_ingress.assert_called_once_with(self.port_name) def _test_delete_rules_no_policy(self): self.qos_driver.br_int.get_ingress_bw_limit_for_port = mock.Mock( return_value=(self.rules[1].max_kbps, self.rules[1].max_burst_kbps)) self.qos_driver.delete(self.port) self.delete_egress.assert_called_once_with(self.port_name) self.delete_ingress.assert_called_once_with(self.port_name) def test_delete_rules(self): self._test_delete_rules(self.qos_policy) def test_delete_rules_no_policy(self): self._test_delete_rules_no_policy() def test_delete_rules_no_vif_port(self): port = copy.copy(self.port) port.pop("vif_port") self.qos_driver.delete(port, self.qos_policy) self.delete_egress.assert_not_called() self.delete_ingress.assert_not_called() def _assert_rules_create_updated(self): self.create_egress.assert_called_once_with( self.port_name, self.rules[0].max_kbps, self.rules[0].max_burst_kbps) self.update_ingress.assert_called_once_with( self.port_name, self.rules[1].max_kbps, self.rules[1].max_burst_kbps) def _assert_dscp_rule_create_updated(self): # Assert install_instructions is the last call self.assertEqual( 'install_dscp_marking_rule', self.qos_driver.br_int.method_calls[-1][0]) self.qos_driver.br_int.install_dscp_marking_rule.\ assert_called_once_with(dscp_mark=mock.ANY, port=mock.ANY) def test_create_minimum_bandwidth(self): with mock.patch.object(self.qos_driver, 'update_minimum_bandwidth') \ as mock_update_minimum_bandwidth: self.qos_driver.create_minimum_bandwidth('port_name', 'rule') mock_update_minimum_bandwidth.assert_called_once_with('port_name', 'rule') def test_delete_minimum_bandwidth(self): with mock.patch.object(self.qos_driver.br_int, 'delete_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue: self.qos_driver.ports['p_id'] = {} self.qos_driver.delete_minimum_bandwidth({'port_id': 'p_id'}) mock_delete_minimum_bandwidth_queue.assert_not_called() mock_delete_minimum_bandwidth_queue.reset_mock() self.qos_driver.ports['p_id'] = { (qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH, constants.EGRESS_DIRECTION): 'rule_port'} self.qos_driver.delete_minimum_bandwidth({'port_id': 'p_id'}) mock_delete_minimum_bandwidth_queue.assert_called_once_with('p_id') def test_update_minimum_bandwidth_no_vif_port(self): with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue: self.qos_driver.update_minimum_bandwidth({}, mock.ANY) mock_delete_minimum_bandwidth_queue.assert_not_called() def test_update_minimum_bandwidth_no_phy_brs(self): vif_port = mock.Mock() vif_port.ofport = 'ofport' rule = mock.Mock() rule.min_kbps = 1500 port = {'port_id': 'port_id', 'vif_port': vif_port} with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue, \ mock.patch.object(self.qos_driver.agent_api, 'request_phy_brs'): self.qos_driver.update_minimum_bandwidth(port, rule) mock_delete_minimum_bandwidth_queue.assert_called_once_with( 'port_id', [], 'ofport', 1500) def test_update_minimum_bandwidth(self): vif_port = mock.Mock() vif_port.ofport = 'ofport' rule = mock.Mock() rule.min_kbps = 1500 port = {'port_id': 'port_id', 'vif_port': vif_port} with mock.patch.object(self.qos_driver.br_int, 'update_minimum_bandwidth_queue') \ as mock_delete_minimum_bandwidth_queue, \ mock.patch.object(self.qos_driver.agent_api, 'request_phy_brs') as mock_request_phy_brs: phy_br = mock.Mock() phy_br.get_bridge_ports.return_value = ['port1', 'port2'] mock_request_phy_brs.return_value = [phy_br] self.qos_driver.update_minimum_bandwidth(port, rule) mock_delete_minimum_bandwidth_queue.assert_called_once_with( 'port_id', ['port1', 'port2'], 'ofport', 1500) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py0000644000175000017500000001235200000000000032714 0ustar00coreycorey00000000000000# Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock class _Eq(object): def __eq__(self, other): return repr(self) == repr(other) def __ne__(self, other): return not self.__eq__(other) class _Value(_Eq): def __or__(self, b): return _Op('|', self, b) def __ror__(self, a): return _Op('|', a, self) class _SimpleValue(_Value): def __init__(self, name): self.name = name def __repr__(self): return self.name class _Op(_Value): def __init__(self, op, a, b): self.op = op self.a = a self.b = b def __repr__(self): return '%s%s%s' % (self.a, self.op, self.b) def _mkcls(name): class Cls(_Eq): _name = name def __init__(self, *args, **kwargs): self._args = args self._kwargs = kwargs self._hist = [] def __getattr__(self, name): return self._kwargs[name] def __repr__(self): args = list(map(repr, self._args)) kwargs = sorted(['%s=%s' % (x, y) for x, y in self._kwargs.items()]) return '%s(%s)' % (self._name, ', '.join(args + kwargs)) return Cls class _Mod(object): _cls_cache = {} def __init__(self, name): self._name = name def __getattr__(self, name): fullname = '%s.%s' % (self._name, name) if '_' in name: # constants are named like OFPxxx_yyy_zzz return _SimpleValue(fullname) try: return self._cls_cache[fullname] except KeyError: pass cls = _mkcls(fullname) self._cls_cache[fullname] = cls return cls def __repr__(self): return 'Mod(%s)' % (self._name,) def patch_fake_oflib_of(): os_ken_mod = mock.Mock() os_ken_base_mod = os_ken_mod.base os_ken_exc_mod = os_ken_mod.exception os_ken_ctrl_mod = os_ken_mod.controller handler = _Mod('os_ken.controller.handler') handler.set_ev_cls = mock.Mock() ofp_event = _Mod('os_ken.controller.ofp_event') os_ken_ctrl_mod.handler = handler os_ken_ctrl_mod.ofp_event = ofp_event os_ken_lib_mod = os_ken_mod.lib os_ken_lib_hub = os_ken_lib_mod.hub os_ken_packet_mod = os_ken_lib_mod.packet packet = _Mod('os_ken.lib.packet.packet') arp = _Mod('os_ken.lib.packet.arp') ethernet = _Mod('os_ken.lib.packet.ethernet') ether_types = _Mod('os_ken.lib.packet.ether_types') in_proto = _Mod('os_ken.lib.packet.in_proto') icmpv6 = _Mod('os_ken.lib.packet.icmpv6') vlan = _Mod('os_ken.lib.packet.vlan') os_ken_packet_mod.packet = packet packet.Packet = mock.Mock() os_ken_packet_mod.arp = arp os_ken_packet_mod.ethernet = ethernet os_ken_packet_mod.ether_types = ether_types os_ken_packet_mod.icmpv6 = icmpv6 os_ken_packet_mod.in_proto = in_proto os_ken_packet_mod.vlan = vlan os_ken_ofproto_mod = os_ken_mod.ofproto ofp = _Mod('os_ken.ofproto.ofproto_v1_3') ofpp = _Mod('os_ken.ofproto.ofproto_v1_3_parser') os_ken_ofproto_mod.ofproto_v1_3 = ofp os_ken_ofproto_mod.ofproto_v1_3_parser = ofpp os_ken_app_mod = os_ken_mod.app os_ken_app_ofctl_mod = os_ken_app_mod.ofctl os_ken_ofctl_api = os_ken_app_ofctl_mod.api modules = {'os_ken': os_ken_mod, 'os_ken.base': os_ken_base_mod, 'os_ken.controller': os_ken_ctrl_mod, 'os_ken.controller.handler': handler, 'os_ken.controller.handler.set_ev_cls': handler.set_ev_cls, 'os_ken.controller.ofp_event': ofp_event, 'os_ken.exception': os_ken_exc_mod, 'os_ken.lib': os_ken_lib_mod, 'os_ken.lib.hub': os_ken_lib_hub, 'os_ken.lib.packet': os_ken_packet_mod, 'os_ken.lib.packet.packet': packet, 'os_ken.lib.packet.packet.Packet': packet.Packet, 'os_ken.lib.packet.arp': arp, 'os_ken.lib.packet.ethernet': ethernet, 'os_ken.lib.packet.ether_types': ether_types, 'os_ken.lib.packet.icmpv6': icmpv6, 'os_ken.lib.packet.in_proto': in_proto, 'os_ken.lib.packet.vlan': vlan, 'os_ken.ofproto': os_ken_ofproto_mod, 'os_ken.ofproto.ofproto_v1_3': ofp, 'os_ken.ofproto.ofproto_v1_3_parser': ofpp, 'os_ken.app': os_ken_app_mod, 'os_ken.app.ofctl': os_ken_app_ofctl_mod, 'os_ken.app.ofctl.api': os_ken_ofctl_api} return mock.patch.dict('sys.modules', modules) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/0000755000175000017500000000000000000000000032107 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__0000644000175000017500000000000000000000000033557 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/0000755000175000017500000000000000000000000033375 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/_0000644000175000017500000000000000000000000033524 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/o0000644000175000017500000002636700000000000033574 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import importutils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base call = mock.call # short hand class OVSBridgeTestBase(ovs_test_base.OVSOSKenTestBase): _ARP_MODULE = 'os_ken.lib.packet.arp' _ETHER_TYPES_MODULE = 'os_ken.lib.packet.ether_types' _ICMPV6_MODULE = 'os_ken.lib.packet.icmpv6' _IN_PROTO_MODULE = 'os_ken.lib.packet.in_proto' _OFP_MODULE = 'os_ken.ofproto.ofproto_v1_3' _OFPP_MODULE = 'os_ken.ofproto.ofproto_v1_3_parser' def setup_bridge_mock(self, name, cls): self.br = cls(name) self.stamp = self.br.default_cookie self.dp = mock.Mock() self.ofp = importutils.import_module(self._OFP_MODULE) self.ofpp = importutils.import_module(self._OFPP_MODULE) self.arp = importutils.import_module(self._ARP_MODULE) self.ether_types = importutils.import_module(self._ETHER_TYPES_MODULE) self.icmpv6 = importutils.import_module(self._ICMPV6_MODULE) self.in_proto = importutils.import_module(self._IN_PROTO_MODULE) mock.patch.object(self.br, '_get_dp', autospec=True, return_value=self._get_dp()).start() mock__send_msg = mock.patch.object(self.br, '_send_msg').start() mock_delete_flows = mock.patch.object(self.br, 'uninstall_flows').start() self.mock = mock.Mock() self.mock.attach_mock(mock__send_msg, '_send_msg') self.mock.attach_mock(mock_delete_flows, 'uninstall_flows') def _get_dp(self): return self.dp, self.ofp, self.ofpp def test_drop_port(self): in_port = 2345 self.br.drop_port(in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(in_port=in_port), priority=2, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_goto(self): dest_table_id = 123 priority = 99 in_port = 666 self.br.install_goto(dest_table_id=dest_table_id, priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=dest_table_id), ], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_drop(self): priority = 99 in_port = 666 self.br.install_drop(priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_normal(self): priority = 99 in_port = 666 self.br.install_normal(priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg( ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test__cidr_to_os_ken(self): f = self.br._cidr_to_os_ken self.assertEqual('192.168.0.1', f('192.168.0.1')) self.assertEqual('192.168.0.1', f('192.168.0.1/32')) self.assertEqual(('192.168.0.0', '255.255.255.0'), f('192.168.0.0/24')) def test__setup_controllers__out_of_band(self): cfg = mock.MagicMock() cfg.OVS.of_listen_address = "" cfg.OVS.of_listen_port = "" m_add_protocols = mock.patch.object(self.br, 'add_protocols') m_set_controller = mock.patch.object(self.br, 'set_controller') m_set_probe = mock.patch.object(self.br, 'set_controllers_inactivity_probe') m_set_ccm = mock.patch.object(self.br, 'set_controllers_connection_mode') with m_set_ccm as set_ccm: with m_set_controller, m_add_protocols, m_set_probe: self.br.setup_controllers(cfg) set_ccm.assert_called_once_with("out-of-band") class OVSDVRProcessTestMixin(object): def test_install_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, table_id=constants.FLOOD_TO_TUN), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv4(self): vlan_tag = 999 gateway_ip = '192.0.2.1' self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag, gateway_ip=gateway_ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_src=gateway_mac, eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, table_id=constants.FLOOD_TO_TUN), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process_ipv6(self): vlan_tag = 999 gateway_mac = '08:60:6e:7f:74:e7' self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag, gateway_mac=gateway_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=ofpp.OFPMatch( eth_src=gateway_mac, eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_ROUTER_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' dvr_mac_address = 'f2:0b:a4:5b:b2:ab' self.br.install_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac, dvr_mac_address=dvr_mac_address) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch( eth_dst=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=2, table_id=self.dvr_process_table_id), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(eth_src=dvr_mac_address), ]), ofpp.OFPInstructionGotoTable( table_id=self.dvr_process_next_table_id), ], match=ofpp.OFPMatch( eth_src=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=1, table_id=self.dvr_process_table_id), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_process(self): vlan_tag = 999 vif_mac = '00:0e:0c:5e:95:d0' self.br.delete_dvr_process(vlan_tag=vlan_tag, vif_mac=vif_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_dst=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), call.uninstall_flows(table_id=self.dvr_process_table_id, match=ofpp.OFPMatch( eth_src=vif_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/t0000644000175000017500000005223400000000000033571 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase): def setUp(self): super(OVSIntegrationBridgeTest, self).setUp() self.setup_bridge_mock('br-int', self.br_int_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): self.br.setup_default_table() (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=23), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch(), priority=0, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch(), priority=3, table_id=60), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=24), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(vlan_vid=4095), priority=65535, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0) ]), ], match=ofpp.OFPMatch(), priority=3, table_id=61), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT), ]), ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=segmentation_id | ofp.OFPVID_PRESENT), priority=3, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT), ]), ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=ofp.OFPVID_NONE), priority=3, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 segmentation_id = 777 self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=segmentation_id | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan_novlan(self): port = 999 segmentation_id = None self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=ofp.OFPVID_NONE)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(eth_src=gateway_mac), ]), ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=20, table_id=1), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(6666, 0), ]), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=20, table_id=60), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac(self): network_type = 'vxlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( strict=True, priority=20, table_id=1, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), call.uninstall_flows( strict=True, priority=20, table_id=60, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 gateway_mac = '08:60:6e:7f:74:e7' dst_mac = '00:02:b3:13:fe:3d' dst_port = 6666 self.br.install_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dst_mac=dst_mac, dst_port=dst_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(eth_src=gateway_mac), ]), ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=20, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(dst_port, 0), ]), ], match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=20, table_id=60), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_to_src_mac_vlan(self): network_type = 'vlan' vlan_tag = 1111 dst_mac = '00:02:b3:13:fe:3d' self.br.delete_dvr_to_src_mac(network_type=network_type, vlan_tag=vlan_tag, dst_mac=dst_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( strict=True, priority=20, table_id=2, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), call.uninstall_flows( strict=True, priority=20, table_id=60, match=ofpp.OFPMatch( eth_dst=dst_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=2), ], match=ofpp.OFPMatch( eth_src=mac, in_port=port), priority=4, table_id=0), active_bundle=None) ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(eth_src=mac, table_id=0), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=1), ], match=ofpp.OFPMatch( eth_src=mac, in_port=port), priority=2, table_id=0), active_bundle=None) ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.remove_dvr_mac_tun(mac=mac, port=port) expected = [ call.uninstall_flows(eth_src=mac, in_port=port, table_id=0), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_icmpv6_na_spoofing_protection(self): port = 8888 ip_addresses = ['2001:db8::1', 'fdf8:f53b:82e4::1/128'] self.br.install_icmpv6_na_spoofing_protection(port, ip_addresses) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, ipv6_nd_target='2001:db8::1', in_port=8888, ), priority=2, table_id=24), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=60), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, ipv6_nd_target='fdf8:f53b:82e4::1', in_port=8888, ), priority=2, table_id=24), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=24), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, ip_proto=self.in_proto.IPPROTO_ICMPV6, in_port=8888, ), priority=10, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_spoofing_protection(self): port = 8888 ip_addresses = ['192.0.2.1', '192.0.2.2/32'] self.br.install_arp_spoofing_protection(port, ip_addresses) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=25), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_spa='192.0.2.1', in_port=8888, ), priority=2, table_id=24), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=25), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_spa='192.0.2.2', in_port=8888 ), priority=2, table_id=24), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=24), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, in_port=8888, ), priority=10, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_spoofing_protection(self): port = 8888 self.br.delete_arp_spoofing_protection(port) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=0, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, in_port=8888)), call.uninstall_flows(table_id=0, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_IPV6, icmpv6_type=self.icmpv6.ND_NEIGHBOR_ADVERT, in_port=8888, ip_proto=self.in_proto.IPPROTO_ICMPV6)), call.uninstall_flows(table_id=24, in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def _test_delete_dvr_dst_mac_for_arp(self, network_type): if network_type == p_const.TYPE_VLAN: table_id = constants.DVR_TO_SRC_MAC_VLAN else: table_id = constants.DVR_TO_SRC_MAC vlan_tag = 1111 gateway_mac = '00:02:b3:13:fe:3e' dvr_mac = '00:02:b3:13:fe:3f' rtr_port = 8888 self.br.delete_dvr_dst_mac_for_arp(network_type=network_type, vlan_tag=vlan_tag, gateway_mac=gateway_mac, dvr_mac=dvr_mac, rtr_port=rtr_port) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( strict=True, priority=5, table_id=table_id, match=ofpp.OFPMatch( eth_dst=dvr_mac, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_dvr_dst_mac_for_arp_vlan(self): self._test_delete_dvr_dst_mac_for_arp(network_type='vlan') def test_delete_dvr_dst_mac_for_arp_tunnel(self): self._test_delete_dvr_dst_mac_for_arp(network_type='vxlan') def test_install_dscp_marking_rule(self): test_port = 8888 test_mark = 38 self.br.install_dscp_marking_rule(port=test_port, dscp_mark=test_mark) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.br.default_cookie, instructions=[ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, [ofpp.OFPActionSetField(reg2=1), ofpp.OFPActionSetField(ip_dscp=38), ofpp.NXActionResubmit(in_port=8888)])], match=ofpp.OFPMatch(eth_type=0x0800, in_port=8888, reg2=0), priority=65535, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.br.default_cookie, instructions=[ofpp.OFPInstructionActions( ofp.OFPIT_APPLY_ACTIONS, [ofpp.OFPActionSetField(reg2=1), ofpp.OFPActionSetField(ip_dscp=38), ofpp.NXActionResubmit(in_port=8888)])], match=ofpp.OFPMatch(eth_type=0x86DD, in_port=8888, reg2=0), priority=65535, table_id=0), active_bundle=None) ] self.assertEqual(expected, self.mock.mock_calls) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/t0000644000175000017500000001330100000000000033561 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION def setUp(self): conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() super(OVSPhysicalBridgeTest, self).setUp() self.addCleanup(conn_patcher.stop) self.setup_bridge_mock('br-phys', self.br_phys_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): self.br.setup_default_table() (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch(), priority=0, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): port = 999 lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField( vlan_vid=segmentation_id | ofp.OFPVID_PRESENT), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT), priority=4, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan_novlan(self): port = 999 lvid = 888 segmentation_id = None distributed = False self.br.provision_local_vlan(port=port, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), ]), ], match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT), priority=4, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): port = 999 lvid = 888 self.br.reclaim_local_vlan(port=port, lvid=lvid) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( match=ofpp.OFPMatch( in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_vlan(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch(eth_src=mac), priority=2, table_id=3), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_vlan(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_vlan(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(eth_src=mac, table_id=3), ] self.assertEqual(expected, self.mock.mock_calls) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/t0000644000175000017500000005252400000000000033573 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \ as ovs_const from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge_test_base call = mock.call # short hand class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, ovs_bridge_test_base.OVSDVRProcessTestMixin): dvr_process_table_id = ovs_const.DVR_PROCESS dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN def setUp(self): conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() super(OVSTunnelBridgeTest, self).setUp() # NOTE(ivasilevskaya) The behaviour of oslotest.base.addCleanup() # according to https://review.opendev.org/#/c/119201/4 guarantees # that all started mocks will be stopped even without direct call to # patcher.stop(). # If any individual mocks should be stopped by other than default # mechanism, their cleanup has to be added after # oslotest.BaseTestCase.setUp() not to be included in the stopall set # that will be cleaned up by mock.patch.stopall. This way the mock # won't be attempted to be stopped twice. self.addCleanup(conn_patcher.stop) self.setup_bridge_mock('br-tun', self.br_tun_cls) self.stamp = self.br.default_cookie def test_setup_default_table(self): patch_int_ofport = 5555 arp_responder_enabled = False self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=arp_responder_enabled) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=2)], match=ofpp.OFPMatch(in_port=patch_int_ofport), priority=1, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=20)], match=ofpp.OFPMatch( eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch( eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=3), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=4), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=6), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.NXActionLearn( cookie=self.stamp, hard_timeout=300, priority=1, specs=[ ofpp.NXFlowSpecMatch( dst=('vlan_tci', 0), n_bits=12, src=('vlan_tci', 0)), ofpp.NXFlowSpecMatch( dst=('eth_dst', 0), n_bits=48, src=('eth_src', 0)), ofpp.NXFlowSpecLoad( dst=('vlan_tci', 0), n_bits=16, src=0), ofpp.NXFlowSpecLoad( dst=('tunnel_id', 0), n_bits=64, src=('tunnel_id', 0)), ofpp.NXFlowSpecOutput( dst='', n_bits=32, src=('in_port', 0)), ], table_id=20), ofpp.OFPActionOutput(patch_int_ofport, 0), ]), ], match=ofpp.OFPMatch(), priority=1, table_id=10), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=20), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=22), active_bundle=None) ] self.assertEqual(expected, self.mock.mock_calls) def test_setup_default_table_arp_responder_enabled(self): patch_int_ofport = 5555 arp_responder_enabled = True self.br.setup_default_table(patch_int_ofport=patch_int_ofport, arp_responder_enabled=arp_responder_enabled) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=2)], match=ofpp.OFPMatch(in_port=patch_int_ofport), priority=1, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=0), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=21)], match=ofpp.OFPMatch( eth_dst='ff:ff:ff:ff:ff:ff', eth_type=self.ether_types.ETH_TYPE_ARP), priority=1, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=20)], match=ofpp.OFPMatch( eth_dst=('00:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch( eth_dst=('01:00:00:00:00:00', '01:00:00:00:00:00')), priority=0, table_id=2), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=3), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=4), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=6), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.NXActionLearn( cookie=self.stamp, hard_timeout=300, priority=1, specs=[ ofpp.NXFlowSpecMatch( dst=('vlan_tci', 0), n_bits=12, src=('vlan_tci', 0)), ofpp.NXFlowSpecMatch( dst=('eth_dst', 0), n_bits=48, src=('eth_src', 0)), ofpp.NXFlowSpecLoad( dst=('vlan_tci', 0), n_bits=16, src=0), ofpp.NXFlowSpecLoad( dst=('tunnel_id', 0), n_bits=64, src=('tunnel_id', 0)), ofpp.NXFlowSpecOutput( dst='', n_bits=32, src=('in_port', 0)), ], table_id=20), ofpp.OFPActionOutput(patch_int_ofport, 0), ]), ], match=ofpp.OFPMatch(), priority=1, table_id=10), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=20), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ofpp.OFPInstructionGotoTable(table_id=22)], match=ofpp.OFPMatch(), priority=0, table_id=21), active_bundle=None), call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[], match=ofpp.OFPMatch(), priority=0, table_id=22), active_bundle=None) ] self.assertEqual(expected, self.mock.mock_calls) def test_provision_local_vlan(self): network_type = 'vxlan' lvid = 888 segmentation_id = 777 distributed = False self.br.provision_local_vlan(network_type=network_type, lvid=lvid, segmentation_id=segmentation_id, distributed=distributed) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPushVlan(), ofpp.OFPActionSetField( vlan_vid=lvid | ofp.OFPVID_PRESENT) ]), ofpp.OFPInstructionGotoTable(table_id=10), ], match=ofpp.OFPMatch(tunnel_id=segmentation_id), priority=1, table_id=4), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_reclaim_local_vlan(self): network_type = 'vxlan' segmentation_id = 777 self.br.reclaim_local_vlan(network_type=network_type, segmentation_id=segmentation_id) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( table_id=4, match=ofpp.OFPMatch(tunnel_id=segmentation_id)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_flood_to_tun(self): vlan = 3333 tun_id = 2222 ports = [11, 44, 22, 33] self.br.install_flood_to_tun(vlan=vlan, tun_id=tun_id, ports=ports) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ] + [ofpp.OFPActionOutput(p, 0) for p in ports]), ], match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=1, table_id=22), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_flood_to_tun(self): vlan = 3333 self.br.delete_flood_to_tun(vlan=vlan) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=22, match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_unicast_to_tun(self): vlan = 3333 port = 55 mac = '08:60:6e:7f:74:e7' tun_id = 2222 self.br.install_unicast_to_tun(vlan=vlan, tun_id=tun_id, port=port, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionPopVlan(), ofpp.OFPActionSetField(tunnel_id=tun_id), ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch( eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=2, table_id=20), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun(self): vlan = 3333 mac = '08:60:6e:7f:74:e7' self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=20, match=ofpp.OFPMatch( eth_dst=mac, vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_unicast_to_tun_without_mac(self): vlan = 3333 mac = None self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(table_id=20, match=ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT)), ] self.assertEqual(expected, self.mock.mock_calls) def test_install_arp_responder(self): vlan = 3333 ip = '192.0.2.1' mac = '08:60:6e:7f:74:e7' self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionSetField(arp_op=self.arp.ARP_REPLY), ofpp.NXActionRegMove( dst_field='arp_tha', n_bits=48, src_field='arp_sha'), ofpp.NXActionRegMove( dst_field='arp_tpa', n_bits=32, src_field='arp_spa'), ofpp.OFPActionSetField(arp_sha=mac), ofpp.OFPActionSetField(arp_spa=ip), ofpp.NXActionRegMove(src_field='eth_src', dst_field='eth_dst', n_bits=48), ofpp.OFPActionSetField(eth_src=mac), ofpp.OFPActionOutput(ofp.OFPP_IN_PORT, 0), ]), ], match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=ip, vlan_vid=vlan | ofp.OFPVID_PRESENT), priority=1, table_id=21), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder(self): vlan = 3333 ip = '192.0.2.1' self.br.delete_arp_responder(vlan=vlan, ip=ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=ip, vlan_vid=vlan | ofp.OFPVID_PRESENT), table_id=21), ] self.assertEqual(expected, self.mock.mock_calls) def test_delete_arp_responder_without_ip(self): vlan = 3333 ip = None self.br.delete_arp_responder(vlan=vlan, ip=ip) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows( match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, vlan_vid=vlan | ofp.OFPVID_PRESENT), table_id=21), ] self.assertEqual(expected, self.mock.mock_calls) def test_setup_tunnel_port(self): network_type = 'vxlan' port = 11111 self.br.setup_tunnel_port(network_type=network_type, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionGotoTable(table_id=4), ], match=ofpp.OFPMatch(in_port=port), priority=1, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_cleanup_tunnel_port(self): port = 11111 self.br.cleanup_tunnel_port(port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(in_port=port), ] self.assertEqual(expected, self.mock.mock_calls) def test_add_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' port = 8888 self.br.add_dvr_mac_tun(mac=mac, port=port) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, cookie=self.stamp, instructions=[ ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, [ ofpp.OFPActionOutput(port, 0), ]), ], match=ofpp.OFPMatch(eth_src=mac), priority=1, table_id=9), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) def test_remove_dvr_mac_tun(self): mac = '00:02:b3:13:fe:3d' self.br.remove_dvr_mac_tun(mac=mac) (dp, ofp, ofpp) = self._get_dp() expected = [ call.uninstall_flows(eth_src=mac, table_id=9), ] self.assertEqual(expected, self.mock.mock_calls) ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ofswitch.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/t0000644000175000017500000001030600000000000033563 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from os_ken.ofproto import ofproto_v1_3 from os_ken.ofproto import ofproto_v1_3_parser from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ofswitch from neutron.tests import base class FakeReply(object): def __init__(self, type): self.type = type class TestBundledOpenFlowBridge(base.BaseTestCase): def setUp(self): super(TestBundledOpenFlowBridge, self).setUp() br = mock.Mock(spec=['install_instructions', 'foo']) br._get_dp = lambda: (mock.Mock(), ofproto_v1_3, ofproto_v1_3_parser) br.active_bundles = set() self.br = ofswitch.BundledOpenFlowBridge(br, False, False) def test_method_calls(self): self.br.install_instructions(dummy_arg=1) self.br.br.install_instructions.assert_called_once_with(dummy_arg=1) def test_illegal_method_calls(self): # With python3, this can be written as "with assertRaises..." try: self.br.uninstall_foo() self.fail("Expected an exception") except Exception as e: self.assertIsInstance(e, AttributeError) try: self.br.foo() self.fail("Expected an exception") except Exception as e: self.assertIsInstance(e, AttributeError) def test_normal_bundle_context(self): self.assertIsNone(self.br.active_bundle) self.br.br._send_msg = mock.Mock(side_effect=[ FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY), FakeReply(ofproto_v1_3.ONF_BCT_COMMIT_REPLY)]) with self.br: self.assertIsNotNone(self.br.active_bundle) # Do nothing # Assert that the active bundle is gone self.assertIsNone(self.br.active_bundle) def test_aborted_bundle_context(self): self.assertIsNone(self.br.active_bundle) self.br.br._send_msg = mock.Mock(side_effect=[ FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY), FakeReply(ofproto_v1_3.ONF_BCT_DISCARD_REPLY)]) try: with self.br: self.assertIsNotNone(self.br.active_bundle) raise Exception() except Exception: pass # Assert that the active bundle is gone self.assertIsNone(self.br.active_bundle) self.assertEqual(2, len(self.br.br._send_msg.mock_calls)) args, kwargs = self.br.br._send_msg.call_args_list[0] self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST, args[0].type) args, kwargs = self.br.br._send_msg.call_args_list[1] self.assertEqual(ofproto_v1_3.ONF_BCT_DISCARD_REQUEST, args[0].type) def test_bundle_context_with_error(self): self.assertIsNone(self.br.active_bundle) self.br.br._send_msg = mock.Mock(side_effect=[ FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY), RuntimeError]) try: with self.br: saved_bundle_id = self.br.active_bundle self.assertIsNotNone(self.br.active_bundle) self.fail("Expected an exception") except RuntimeError: pass # Assert that the active bundle is gone self.assertIsNone(self.br.active_bundle) self.assertIn(saved_bundle_id, self.br.br.active_bundles) self.assertEqual(2, len(self.br.br._send_msg.mock_calls)) args, kwargs = self.br.br._send_msg.call_args_list[0] self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST, args[0].type) args, kwargs = self.br.br._send_msg.call_args_list[1] self.assertEqual(ofproto_v1_3.ONF_BCT_COMMIT_REQUEST, args[0].type) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ovs_bridge.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/t0000644000175000017500000000426200000000000033567 0ustar00coreycorey00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ofswitch from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base DPID = "0003e9" class OVSAgentBridgeTestCase(ovs_test_base.OVSOSKenTestBase): def test__get_dp(self): mock.patch.object( ovs_lib.OVSBridge, 'get_datapath_id', return_value=DPID).start() mock.patch.object( ofswitch.OpenFlowSwitchMixin, "_get_dp_by_dpid", side_effect=RuntimeError).start() br = self.br_int_cls('br-int') br._cached_dpid = int(DPID, 16) # make sure it correctly raises RuntimeError, not UnboundLocalError as # in LP https://bugs.launchpad.net/neutron/+bug/1588042 self.assertRaises(RuntimeError, br._get_dp) def test_get_datapath_no_data_returned(self): def _mock_db_get_val(tb, rec, col): if tb == 'Bridge': return [] mock.patch.object(ovs_lib.OVSBridge, 'db_get_val', side_effect=_mock_db_get_val).start() br = self.br_int_cls('br-int') # make sure that in case of any misconfiguration when no datapath is # found a proper exception, not a TypeError is raised self.assertRaises(RuntimeError, br._get_dp) def test__get_dp_when_get_datapath_id_returns_None(self): br = self.br_int_cls('br-int') with mock.patch.object(br, 'get_datapath_id', return_value=None): self.assertRaises(RuntimeError, br._get_dp) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_0000644000175000017500000000552600000000000033643 0ustar00coreycorey00000000000000# Copyright 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \ import ovs_bridge from neutron.tests import base class TestBRCookieOpenflow(base.BaseTestCase): def setUp(self): super(TestBRCookieOpenflow, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.br = ovs_bridge.OVSAgentBridge('br-int', os_ken_app=mock.Mock()) def test_reserved_cookies(self): def_cookie = self.br.default_cookie self.assertIn(def_cookie, self.br.reserved_cookies) def test_request_cookie(self): default_cookie = self.br.default_cookie requested_cookie = self.br.request_cookie() self.assertEqual(default_cookie, self.br.default_cookie) self.assertIn(default_cookie, self.br.reserved_cookies) self.assertIn(requested_cookie, self.br.reserved_cookies) def test_unset_cookie(self): requested_cookie = self.br.request_cookie() self.assertIn(requested_cookie, self.br.reserved_cookies) self.br.unset_cookie(requested_cookie) self.assertNotIn(requested_cookie, self.br.reserved_cookies) def test_set_agent_uuid_stamp(self): self.br = ovs_bridge.OVSAgentBridge('br-int', os_ken_app=mock.Mock()) def_cookie = self.br.default_cookie new_cookie = ovs_lib.generate_random_cookie() self.br.set_agent_uuid_stamp(new_cookie) self.assertEqual(new_cookie, self.br.default_cookie) self.assertIn(new_cookie, self.br.reserved_cookies) self.assertNotIn(def_cookie, self.br.reserved_cookies) def test_set_agent_uuid_stamp_with_reserved_cookie(self): self.br = ovs_bridge.OVSAgentBridge('br-int', os_ken_app=mock.Mock()) def_cookie = self.br.default_cookie new_cookie = self.br.request_cookie() self.br.set_agent_uuid_stamp(new_cookie) self.assertEqual(new_cookie, self.br.default_cookie) self.assertIn(new_cookie, self.br.reserved_cookies) self.assertNotIn(def_cookie, self.br.reserved_cookies) self.assertEqual(set([new_cookie]), self.br.reserved_cookies) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py0000644000175000017500000000562000000000000033473 0ustar00coreycorey00000000000000# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. # Copyright (C) 2014 Fumihiko Kakuma # Copyright (C) 2014,2015 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import mock from oslo_utils import importutils from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import fake_oflib _AGENT_PACKAGE = 'neutron.plugins.ml2.drivers.openvswitch.agent' _AGENT_NAME = _AGENT_PACKAGE + '.ovs_neutron_agent' _DVR_AGENT_NAME = ('neutron.plugins.ml2.drivers.openvswitch.agent.' 'ovs_dvr_neutron_agent') class OVSAgentConfigTestBase(base.BaseTestCase): def setUp(self): super(OVSAgentConfigTestBase, self).setUp() self.mod_agent = importutils.import_module(_AGENT_NAME) self.mod_dvr_agent = importutils.import_module(_DVR_AGENT_NAME) class OVSOSKenTestBase(OVSAgentConfigTestBase): _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.native' _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge' _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge' _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge' def setUp(self): self.fake_oflib_of = fake_oflib.patch_fake_oflib_of() self.fake_oflib_of.start() self.addCleanup(self.fake_oflib_of.stop) super(OVSOSKenTestBase, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.br_int_cls = importutils.import_class(self._BR_INT_CLASS) self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS) self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS) os_ken_app = mock.Mock() self.br_int_cls = functools.partial(self.br_int_cls, os_ken_app=os_ken_app) self.br_phys_cls = functools.partial(self.br_phys_cls, os_ken_app=os_ken_app) self.br_tun_cls = functools.partial(self.br_tun_cls, os_ken_app=os_ken_app) def _bridge_classes(self): return { 'br_int': self.br_int_cls, 'br_phys': self.br_phys_cls, 'br_tun': self.br_tun_cls, } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_ex0000644000175000017500000001017300000000000033723 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_agt from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ .openflow.native import ovs_bridge_test_base as native_ovs_bridge_test_base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base class TestOVSAgentExtensionAPI(ovs_test_base.OVSOSKenTestBase): def setUp(self): super(TestOVSAgentExtensionAPI, self).setUp() self.br_int = self.br_int_cls("br-int") self.br_tun = self.br_tun_cls("br-tun") self.br_phys = {'br-phys1': self.br_phys_cls('br-phys1'), 'br-phys2': self.br_phys_cls('br-phys2')} def _test_bridge(self, orig_bridge, new_bridge): self.assertIsNotNone(new_bridge) self.assertEqual(orig_bridge.br_name, new_bridge.br_name) self.assertIn(new_bridge._default_cookie, orig_bridge.reserved_cookies) self.assertNotEqual(orig_bridge._default_cookie, new_bridge._default_cookie) def test_request_int_br(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI( self.br_int, self.br_tun, {'phys': self.br_phys['br-phys1']}) new_int_br = agent_extension_api.request_int_br() self._test_bridge(self.br_int, new_int_br) def test_request_tun_br(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI( self.br_int, self.br_tun, {'phys': self.br_phys['br-phys1']}) new_tun_br = agent_extension_api.request_tun_br() self._test_bridge(self.br_tun, new_tun_br) def test_request_tun_br_tunneling_disabled(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI( self.br_int, None, {'phys': self.br_phys['br-phys1']}) self.assertIsNone(agent_extension_api.request_tun_br()) def test_request_phys_brs(self): agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI( self.br_int, self.br_tun, {'phys1': self.br_phys['br-phys1'], 'phys2': self.br_phys['br-phys2']}) for phys_br in agent_extension_api.request_phy_brs(): self._test_bridge(self.br_phys[phys_br.br_name], phys_br) class TestOVSCookieBridgeOSKen(native_ovs_bridge_test_base.OVSBridgeTestBase): def setUp(self): super(TestOVSCookieBridgeOSKen, self).setUp() self.setup_bridge_mock('br-int', self.br_int_cls) self.tested_bridge = ovs_ext_agt.OVSCookieBridge(self.br) def test_cookie(self): self.assertNotEqual(self.br._default_cookie, self.tested_bridge._default_cookie) def test_reserved(self): self.assertIn(self.tested_bridge._default_cookie, self.br.reserved_cookies) def test_install_drop(self): priority = 99 in_port = 666 self.tested_bridge.install_drop(priority=priority, in_port=in_port) (dp, ofp, ofpp) = self._get_dp() expected = [ mock.call._send_msg( ofpp.OFPFlowMod( dp, # this is the interesting part of the check: cookie=self.tested_bridge._default_cookie, instructions=[], match=ofpp.OFPMatch(in_port=in_port), priority=priority, table_id=0), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabili0000644000175000017500000000260100000000000033672 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib import fixture from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_capabilities from neutron.services.trunk.drivers.openvswitch.agent import driver from neutron.tests import base from neutron.tests import tools from neutron_lib import constants class CapabilitiesTest(base.BaseTestCase): def setUp(self): super(CapabilitiesTest, self).setUp() self._mgr = mock.Mock() self.useFixture(fixture.CallbackRegistryFixture( callback_manager=self._mgr)) def test_register(self): ovs_capabilities.register() args = tools.get_subscribe_args( driver.init_handler, constants.AGENT_TYPE_OVS, events.AFTER_INIT) self._mgr.subscribe.assert_called_with(*args) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_0000644000175000017500000057557700000000000034012 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import sys import time import mock import netaddr from neutron_lib.agent import constants as agent_consts from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net from neutron_lib import constants as n_const from neutron_lib import rpc as n_rpc import os_vif from os_vif.objects import instance_info as vif_instance_object from oslo_config import cfg from oslo_log import log import oslo_messaging import testtools from neutron._i18n import _ from neutron.agent.common import async_process from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils from neutron.agent.linux import ip_lib from neutron.api.rpc.callbacks import resources from neutron.objects.ports import Port from neutron.objects.ports import PortBinding from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ as ovs_agent from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import test_vlanmanager NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' PULLAPI = 'neutron.api.rpc.handlers.resources_rpc.ResourcesPullRpcApi' OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0" FAKE_MAC = '00:11:22:33:44:55' FAKE_IP1 = '10.0.0.1' FAKE_IP2 = '10.0.0.2' FAKE_IP6 = '2001:db8:42:42::10' TEST_PORT_ID1 = 'port-id-1' TEST_PORT_ID2 = 'port-id-2' TEST_PORT_ID3 = 'port-id-3' TEST_NETWORK_ID1 = 'net-id-1' TEST_NETWORK_ID2 = 'net-id-2' DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class FakeVif(object): ofport = 99 port_name = 'name' vif_mac = 'aa:bb:cc:11:22:33' class MockFixedIntervalLoopingCall(object): def __init__(self, f): self.f = f def start(self, interval=0): self.f() class ValidateTunnelTypes(ovs_test_base.OVSAgentConfigTestBase): def setUp(self): super(ValidateTunnelTypes, self).setUp() self.mock_validate_local_ip = mock.patch.object( self.mod_agent, 'validate_local_ip').start() def test_validate_tunnel_types_succeeds(self): cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') cfg.CONF.set_override('tunnel_types', [n_const.TYPE_GRE], group='AGENT') self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) self.mock_validate_local_ip.assert_called_once_with('10.10.10.10') def test_validate_tunnel_types_fails_for_invalid_tunnel_type(self): cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT') with testtools.ExpectedException(SystemExit): self.mod_agent.validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) class TestOvsNeutronAgent(object): def setUp(self): super(TestOvsNeutronAgent, self).setUp() self.useFixture(test_vlanmanager.LocalVlanManagerFixture()) mock.patch(PULLAPI).start() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier systemd_patch = mock.patch('oslo_service.systemd.notify_once') self.systemd_notify = systemd_patch.start() cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') cfg.CONF.set_default('local_ip', '127.0.0.1', 'OVS') cfg.CONF.set_default('host', 'host') mock.patch( 'neutron.agent.ovsdb.native.helpers.enable_connection_uri').start() mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.get_ports_attributes', return_value=[]).start() mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() mock.patch('neutron.agent.ovsdb.impl_idl._connection').start() self.agent = self._make_agent() self.agent.sg_agent = mock.Mock() def _make_agent(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]),\ mock.patch('neutron.agent.linux.ip_lib.get_device_mac', return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]),\ mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server'): ext_manager = mock.Mock() agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), ext_manager, cfg.CONF) agent.tun_br = self.br_tun_cls(br_name='br-tun') return agent def _mock_port_bound(self, ofport=None, new_local_vlan=None, old_local_vlan=None, db_get_val=None): port = mock.Mock() port.ofport = ofport net_uuid = 'my-net-uuid' fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] if old_local_vlan is not None: self.agent.vlan_manager.add( net_uuid, old_local_vlan, None, None, None) with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br: int_br.db_get_val.return_value = db_get_val int_br.set_db_attribute.return_value = True needs_binding = self.agent.port_bound( port, net_uuid, 'local', None, None, fixed_ips, DEVICE_OWNER_COMPUTE, False) if db_get_val is None: int_br.assert_not_called() self.assertFalse(needs_binding) else: vlan_mapping = {'net_uuid': net_uuid, 'network_type': 'local', 'physical_network': 'None'} int_br.set_db_attribute.assert_called_once_with( "Port", mock.ANY, "other_config", vlan_mapping) self.assertTrue(needs_binding) def test_setup_physical_bridges_during_agent_initialization(self): with mock.patch.object( self.mod_agent.OVSNeutronAgent, 'setup_physical_bridges') as setup_physical_bridges,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'setup_rpc') as setup_rpc: setup_rpc.side_effect = oslo_messaging.MessagingException( "Test communication failure") try: self._make_agent() except oslo_messaging.MessagingException: pass setup_physical_bridges.assert_called_once_with(mock.ANY) def test_datapath_type_system(self): # verify kernel datapath is default expected = constants.OVS_DATAPATH_SYSTEM self.assertEqual(expected, self.agent.int_br.datapath_type) def test_datapath_type_netdev(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'), \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]), \ mock.patch('neutron.agent.linux.ip_lib.get_device_mac', return_value='00:00:00:00:00:01'), \ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), \ mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall), \ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={'datapath_types': ['netdev']}),\ mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server'): # validate setting non default datapath expected = constants.OVS_DATAPATH_NETDEV cfg.CONF.set_override('datapath_type', expected, group='OVS') ext_manager = mock.Mock() self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), ext_manager, cfg.CONF) self.assertEqual(expected, self.agent.int_br.datapath_type) def test_agent_type_ovs(self): # verify agent_type is default expected = n_const.AGENT_TYPE_OVS self.assertEqual(expected, self.agent.agent_state['agent_type']) def test_agent_available_local_vlans(self): expected = [n_const.MIN_VLAN_TAG, n_const.MIN_VLAN_TAG + 1, n_const.MAX_VLAN_TAG - 1, n_const.MAX_VLAN_TAG] exception = [n_const.MIN_VLAN_TAG - 1, n_const.MAX_VLAN_TAG + 1, n_const.MAX_VLAN_TAG + 2] available_vlan = self.agent.available_local_vlans for tag in expected: self.assertIn(tag, available_vlan) for tag in exception: self.assertNotIn(tag, available_vlan) def _test_restore_local_vlan_maps(self, tag, segmentation_id='1'): port = mock.Mock() port.port_name = 'fake_port' net_uuid = 'fake_network_id' local_vlan_map = {'net_uuid': net_uuid, 'network_type': 'vlan', 'physical_network': 'fake_network'} if segmentation_id is not None: local_vlan_map['segmentation_id'] = segmentation_id # this is for the call inside get_vif_ports() get_interfaces = [{'name': port.port_name, 'ofport': '1', 'external_ids': { 'iface-id': '1', 'attached-mac': 'mac1'}}, {'name': 'invalid', 'ofport': ovs_lib.INVALID_OFPORT, 'external_ids': { 'iface-id': '2', 'attached-mac': 'mac2'}}, {'name': 'unassigned', 'ofport': ovs_lib.UNASSIGNED_OFPORT, 'external_ids': { 'iface-id': '3', 'attached-mac': 'mac3'}}] # this is for the call inside _restore_local_vlan_map() get_ports = [{'name': port.port_name, 'other_config': local_vlan_map, 'tag': tag}] with mock.patch.object(self.agent.int_br, 'get_ports_attributes', side_effect=[get_interfaces, get_ports]) as gpa: self.agent._restore_local_vlan_map() expected_hints = {} if tag: expected_hints[net_uuid] = tag self.assertEqual(expected_hints, self.agent._local_vlan_hints) # make sure invalid and unassigned ports were skipped gpa.assert_has_calls([ mock.call('Interface', columns=mock.ANY, if_exists=True), mock.call('Port', columns=mock.ANY, ports=['fake_port']) ]) def test_restore_local_vlan_map_with_device_has_tag(self): self._test_restore_local_vlan_maps(2) def test_restore_local_vlan_map_with_device_no_tag(self): self._test_restore_local_vlan_maps([]) def test_restore_local_vlan_map_no_segmentation_id(self): self._test_restore_local_vlan_maps(2, segmentation_id=None) def test_restore_local_vlan_map_segmentation_id_compat(self): self._test_restore_local_vlan_maps(2, segmentation_id='None') def test_check_agent_configurations_for_dvr_raises(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.l2_pop = False self.assertRaises(ValueError, self.agent._check_agent_configurations) def test_check_agent_configurations_for_dvr(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.l2_pop = True self.assertIsNone(self.agent._check_agent_configurations()) def test_check_agent_configurations_for_dvr_with_vlan(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = False self.agent.l2_pop = False self.assertIsNone(self.agent._check_agent_configurations()) def test_port_bound_deletes_flows_for_valid_ofport(self): self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val={}) def test_port_bound_ignores_flows_for_invalid_ofport(self): self._mock_port_bound(ofport=-1, new_local_vlan=1, db_get_val={}) def test_port_bound_does_not_rewire_if_already_bound(self): self._mock_port_bound( ofport=-1, new_local_vlan=1, old_local_vlan=1, db_get_val={}) def test_port_bound_not_found(self): self._mock_port_bound(ofport=1, new_local_vlan=1, db_get_val=None) def _test_port_dead(self, cur_tag=None): port = mock.Mock() port.ofport = 1 with mock.patch.object(self.agent, 'int_br') as int_br: int_br.db_get_val.return_value = cur_tag self.agent.port_dead(port) if cur_tag is None or cur_tag == constants.DEAD_VLAN_TAG: self.assertFalse(int_br.set_db_attribute.called) self.assertFalse(int_br.drop_port.called) else: int_br.assert_has_calls([ mock.call.set_db_attribute("Port", mock.ANY, "tag", constants.DEAD_VLAN_TAG, log_errors=True), mock.call.drop_port(in_port=port.ofport), ]) def test_port_dead(self): self._test_port_dead() def test_port_dead_with_port_already_dead(self): self._test_port_dead(constants.DEAD_VLAN_TAG) def test_port_dead_with_valid_tag(self): self._test_port_dead(cur_tag=1) def mock_scan_ports(self, vif_port_set=None, registered_ports=None, updated_ports=None, port_tags_dict=None, sync=False): if port_tags_dict is None: # Because empty dicts evaluate as False. port_tags_dict = {} with mock.patch.object(self.agent.int_br, 'get_vif_port_set', return_value=vif_port_set),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value=port_tags_dict): return self.agent.scan_ports(registered_ports, sync, updated_ports) def test_scan_ports_returns_current_only_for_unchanged_ports(self): vif_port_set = set([1, 3]) registered_ports = set([1, 3]) expected = ovs_agent.PortInfo(current=vif_port_set) actual = self.mock_scan_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ports_returns_port_changes(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set, added=set([3]), removed=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ports_returns_port_changes_with_sync(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set, added=vif_port_set, removed=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports, sync=True) self.assertEqual(expected, actual) def _test_scan_ports_with_updated_ports(self, updated_ports): vif_port_set = set([1, 3, 4]) registered_ports = set([1, 2, 4]) expected = ovs_agent.PortInfo(current=vif_port_set, added={3}, removed={2}, updated={4}) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def test_scan_ports_finds_known_updated_ports(self): self._test_scan_ports_with_updated_ports(set([4])) def test_scan_ports_ignores_unknown_updated_ports(self): # the port '5' was not seen on current ports. Hence it has either # never been wired or already removed and should be ignored self._test_scan_ports_with_updated_ports(set([4, 5])) def test_scan_ports_ignores_updated_port_if_removed(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) updated_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set, added=set([3]), removed=set([2]), updated=set([1])) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def test_scan_ports_no_vif_changes_returns_updated_port_only(self): vif_port_set = set([1, 2, 3]) registered_ports = set([1, 2, 3]) updated_ports = set([2]) expected = ovs_agent.PortInfo(current=vif_port_set, updated=set([2])) actual = self.mock_scan_ports(vif_port_set, registered_ports, updated_ports) self.assertEqual(expected, actual) def _test_process_ports_events(self, events, registered_ports, ancillary_ports, expected_ports, expected_ancillary, updated_ports=None, ): with mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): devices_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, ancillary_ports, devices_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_ports_events_port_removed_and_added(self): port_id_one = 'f6f104bd-37c7-4f7b-9d70-53a6bb42728f' port_id_two = 'fbaf42ef-ab63-4cda-81d2-37ee55daac3a' events = { 'removed': [{'ofport': 1, 'external_ids': {'iface-id': port_id_one, 'attached-mac': 'fa:16:3e:f6:1b:fb'}, 'name': 'qvof6f104bd-37'}, {'ofport': 2, 'external_ids': {'iface-id': port_id_two, 'attached-mac': 'fa:16:3e:a4:42:6e'}, 'name': 'qvofbaf42ef-ab'}], 'added': [{'ofport': 3, 'external_ids': {'iface-id': port_id_one, 'attached-mac': 'fa:16:3e:f6:1b:fb'}, 'name': 'qvof6f104bd-37'}, {'ofport': 4, 'external_ids': {'iface-id': port_id_two, 'attached-mac': 'fa:16:3e:a4:42:6e'}, 'name': 'qvofbaf42ef-ab'}], 'modified': [] } registered_ports = {port_id_one, port_id_two} expected_ancillary = ovs_agent.PortInfo() # port was removed and then added expected_ports = ovs_agent.PortInfo( added={port_id_one, port_id_two}, current={port_id_one, port_id_two}, re_added={port_id_one, port_id_two} ) with mock.patch.object(ovs_lib.BaseOVS, "port_exists", return_value=True): self._test_process_ports_events(events.copy(), registered_ports, set(), expected_ports, expected_ancillary) # port was added and then removed expected_ports = ovs_agent.PortInfo(removed={port_id_one, port_id_two}) with mock.patch.object(ovs_lib.BaseOVS, "port_exists", return_value=False): self._test_process_ports_events(events.copy(), registered_ports, set(), expected_ports, expected_ancillary) def test_process_ports_events_returns_current_for_unchanged_ports(self): events = {'added': [], 'removed': [], 'modified': []} registered_ports = {1, 3} ancillary_ports = {2, 5} expected_ports = ovs_agent.PortInfo(current=registered_ports) expected_ancillary = ovs_agent.PortInfo(current=ancillary_ports) self._test_process_ports_events(events, registered_ports, ancillary_ports, expected_ports, expected_ancillary) def test_process_port_events_no_vif_changes_return_updated_port_only(self): events = {'added': [], 'removed': [], 'modified': []} registered_ports = {1, 2, 3} updated_ports = {2} expected_ports = ovs_agent.PortInfo(current=registered_ports, updated={2}) expected_ancillary = ovs_agent.PortInfo() self._test_process_ports_events(events, registered_ports, set(), expected_ports, expected_ancillary, updated_ports) def test_process_port_events_ignores_removed_port_if_never_added(self): events = {'added': [], 'modified': [], 'removed': [{'name': 'port2', 'ofport': 2, 'external_ids': {'attached-mac': 'test-mac'}}]} registered_ports = {1} expected_ports = ovs_agent.PortInfo(current=registered_ports) expected_ancillary = ovs_agent.PortInfo() devices_not_ready_yet = set() with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[2]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} ports_not_ready_yet = set() actual = self.agent.process_ports_events( events, registered_ports, set(), ports_not_ready_yet, failed_devices, failed_ancillary_devices) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_port_events_port_not_ready_yet(self): events = {'added': [{'name': 'port5', 'ofport': [], 'external_ids': {'attached-mac': 'test-mac'}}], 'removed': [], 'modified': []} old_devices_not_ready = {'port4'} registered_ports = {1, 2, 3} expected_ports = ovs_agent.PortInfo(current={1, 2, 3, 4}, added={4}, removed=set()) self.agent.ancillary_brs = [] expected_ancillary = ovs_agent.PortInfo() with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[5, 4]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()), \ mock.patch.object(self.agent.int_br, 'get_ports_attributes', return_value=[{'name': 'port4', 'ofport': 4, 'external_ids': { 'attached-mac': 'mac4'}}]): expected_devices_not_ready = {'port5'} failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, set(), old_devices_not_ready, failed_devices, failed_ancillary_devices) self.assertEqual( (expected_ports, expected_ancillary, expected_devices_not_ready), actual) def _test_process_port_events_with_updated_ports(self, updated_ports): events = {'added': [{'name': 'port3', 'ofport': 3, 'external_ids': {'attached-mac': 'test-mac'}}, {'name': 'qg-port2', 'ofport': 6, 'external_ids': {'attached-mac': 'test-mac'}}], 'removed': [{'name': 'port2', 'ofport': 2, 'external_ids': {'attached-mac': 'test-mac'}}, {'name': 'qg-port1', 'ofport': 5, 'external_ids': {'attached-mac': 'test-mac'}}], 'modified': []} registered_ports = {1, 2, 4} ancillary_ports = {5, 8} expected_ports = ovs_agent.PortInfo(current={1, 3, 4}, added={3}, removed={2}, updated=updated_ports) expected_ancillary = ovs_agent.PortInfo(current={6, 8}, added={6}, removed={5}) ancillary_bridge = mock.Mock() ancillary_bridge.get_vif_port_set.return_value = {5, 6, 8} self.agent.ancillary_brs = [ancillary_bridge] with mock.patch.object(self.agent.int_br, 'portid_from_external_ids', side_effect=[3, 6, 2, 5]), \ mock.patch.object(self.agent, 'check_changed_vlans', return_value=set()): devices_not_ready_yet = set() failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = { 'added': set(), 'removed': set()} actual = self.agent.process_ports_events( events, registered_ports, ancillary_ports, devices_not_ready_yet, failed_devices, failed_ancillary_devices, updated_ports) self.assertEqual( (expected_ports, expected_ancillary, devices_not_ready_yet), actual) def test_process_port_events_returns_port_changes(self): self._test_process_port_events_with_updated_ports(set()) def test_process_port_events_finds_known_updated_ports(self): self._test_process_port_events_with_updated_ports({4}) def test_process_port_events_ignores_unknown_updated_ports(self): # the port '10' was not seen on current ports. Hence it has either # never been wired or already removed and should be ignored self._test_process_port_events_with_updated_ports({4, 10}) def test_process_port_events_ignores_updated_port_if_removed(self): self._test_process_port_events_with_updated_ports({4, 5}) def test_update_ports_returns_changed_vlan(self): br = self.br_int_cls('br-int') mac = "ca:fe:de:ad:be:ef" port = ovs_lib.VifPort(1, 1, 1, mac, br) self.agent.vlan_manager.add( '1', 1, '1', None, 1, {port.vif_id: port}) vif_port_set = set([1, 3]) registered_ports = set([1, 2]) port_tags_dict = {1: []} expected = ovs_agent.PortInfo(added={3}, current=vif_port_set, removed={2}, updated={1}) with mock.patch.object(self.agent, 'tun_br', autospec=True), \ mock.patch.object(self.agent.plugin_rpc, 'update_device_list') as upd_l: actual = self.mock_scan_ports( vif_port_set, registered_ports, port_tags_dict=port_tags_dict) self.assertEqual(expected, actual) upd_l.assert_called_once_with(mock.ANY, [], set([1]), self.agent.agent_id, self.agent.conf.host) def test_update_retries_map_and_remove_devs_not_to_retry(self): failed_devices_retries_map = { 'device_not_to_retry': constants.MAX_DEVICE_RETRIES, 'device_to_retry': 2, 'ancillary_not_to_retry': constants.MAX_DEVICE_RETRIES, 'ancillary_to_retry': 1} failed_devices = { 'added': set(['device_not_to_retry']), 'removed': set(['device_to_retry', 'new_device'])} failed_ancillary_devices = {'added': set(['ancillary_to_retry']), 'removed': set(['ancillary_not_to_retry'])} expected_failed_devices_retries_map = { 'device_to_retry': 3, 'new_device': 1, 'ancillary_to_retry': 2} (new_failed_devices_retries_map, devices_not_to_retry, ancillary_devices_not_t_retry) = self.agent._get_devices_not_to_retry( failed_devices, failed_ancillary_devices, failed_devices_retries_map) self.agent._remove_devices_not_to_retry( failed_devices, failed_ancillary_devices, devices_not_to_retry, ancillary_devices_not_t_retry) self.assertIn('device_to_retry', failed_devices['removed']) self.assertNotIn('device_not_to_retry', failed_devices['added']) self.assertEqual( expected_failed_devices_retries_map, new_failed_devices_retries_map) def test_add_port_tag_info(self): lvm = mock.Mock() lvm.vlan = 1 self.agent.vlan_manager.mapping["net1"] = lvm ovs_db_list = [{'name': 'tap1', 'tag': [], 'other_config': {'segmentation_id': '1'}}, {'name': 'tap2', 'tag': [], 'other_config': {}}, {'name': 'tap3', 'tag': [], 'other_config': None}] vif_port1 = mock.Mock() vif_port1.port_name = 'tap1' vif_port2 = mock.Mock() vif_port2.port_name = 'tap2' vif_port3 = mock.Mock() vif_port3.port_name = 'tap3' port_details = [ {'network_id': 'net1', 'vif_port': vif_port1}, {'network_id': 'net1', 'vif_port': vif_port2}, {'network_id': 'net1', 'vif_port': vif_port3}] with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._add_port_tag_info(port_details) set_db_attribute_calls = \ [mock.call.set_db_attribute("Port", "tap1", "other_config", {"segmentation_id": "1", "tag": "1"}), mock.call.set_db_attribute("Port", "tap2", "other_config", {"tag": "1"}), mock.call.set_db_attribute("Port", "tap3", "other_config", {"tag": "1"})] int_br.assert_has_calls(set_db_attribute_calls, any_order=True) def test_add_port_tag_info_with_tagged_ports(self): lvm = mock.Mock() lvm.vlan = 1 self.agent.vlan_manager.mapping["net1"] = lvm ovs_db_list1 = [{'name': 'tap1', 'tag': 1, 'other_config': {'segmentation_id': '1', 'tag': '1'}}] ovs_db_list2 = [{'name': 'tap2', 'tag': 2, 'other_config': {'segmentation_id': '1', 'tag': '1'}}, {'name': 'tap3', 'tag': 1, 'other_config': {'segmentation_id': '2', 'tag': '2'}}] vif_port1 = mock.Mock() vif_port1.port_name = 'tap1' vif_port2 = mock.Mock() vif_port2.port_name = 'tap2' vif_port2.ofport = 7 vif_port3 = mock.Mock() vif_port3.port_name = 'tap3' vif_port3.ofport = 8 port_details1 = [{'network_id': 'net1', 'vif_port': vif_port1}] port_details2 = [{'network_id': 'net1', 'vif_port': vif_port2}, {'network_id': 'net1', 'vif_port': vif_port3}] with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list1 self.agent._add_port_tag_info(port_details1) int_br.set_db_attribute.assert_not_called() # Reset mock to check port with changed tag int_br.reset_mock() int_br.get_ports_attributes.return_value = ovs_db_list2 self.agent._add_port_tag_info(port_details2) expected_calls = \ [mock.call.set_db_attribute("Port", "tap2", "other_config", {'segmentation_id': '1', 'tag': '1'}), mock.call.uninstall_flows(in_port=7), mock.call.set_db_attribute("Port", "tap3", "other_config", {'segmentation_id': '2', 'tag': '1'}), mock.call.uninstall_flows(in_port=8)] int_br.assert_has_calls(expected_calls) def test_bind_devices(self): devices_up = ['tap1'] devices_down = ['tap2'] self.agent.vlan_manager.mapping["net1"] = mock.Mock() ovs_db_list = [{'name': 'tap1', 'tag': []}, {'name': 'tap2', 'tag': []}] vif_port1 = mock.Mock() vif_port1.port_name = 'tap1' vif_port2 = mock.Mock() vif_port2.port_name = 'tap2' port_details = [ {'network_id': 'net1', 'vif_port': vif_port1, 'device': devices_up[0], 'device_owner': 'network:dhcp', 'admin_state_up': True}, {'network_id': 'net1', 'vif_port': vif_port2, 'device': devices_down[0], 'device_owner': 'network:dhcp', 'admin_state_up': False}] with mock.patch.object( self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': devices_up, 'devices_down': devices_down, 'failed_devices_up': [], 'failed_devices_down': []}) as update_devices, \ mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._bind_devices(port_details) update_devices.assert_called_once_with(mock.ANY, devices_up, devices_down, mock.ANY, mock.ANY, refresh_tunnels=True) def _test_bind_devices_sets_refresh_tunnels(self, tun_ofports, expected): self.agent.iter_num = 3 self.agent.prevent_arp_spoofing = False self.agent.vlan_manager.add('fake_network', 1, n_const.TYPE_VXLAN, None, 1) ovs_db_list = [{'name': 'fake_device', 'tag': []}] self.agent.vlan_manager.get('fake_network').tun_ofports = tun_ofports vif_port = mock.Mock() vif_port.port_name = 'fake_device' vif_port.ofport = 1 need_binding_ports = [{'network_id': 'fake_network', 'vif_port': vif_port, 'device': 'fake_device', 'admin_state_up': True}] with mock.patch.object( self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': []}) as update_devices, \ mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._bind_devices(need_binding_ports) update_devices.assert_called_once_with(mock.ANY, ['fake_device'], [], mock.ANY, mock.ANY, refresh_tunnels=expected) def test_bind_devices_sets_refresh_tunnels_if_tunnels_missing(self): self._test_bind_devices_sets_refresh_tunnels([], True) def test_bind_devices_does_not_set_refresh_tunnels_if_tunnels_exist(self): self._test_bind_devices_sets_refresh_tunnels([1, 2, 3], False) def _test_arp_spoofing(self, enable_prevent_arp_spoofing): self.agent.prevent_arp_spoofing = enable_prevent_arp_spoofing ovs_db_list = [{'name': 'fake_device', 'tag': []}] self.agent.vlan_manager.add('fake_network', 1, None, None, 1) vif_port = mock.Mock() vif_port.port_name = 'fake_device' vif_port.ofport = 1 need_binding_ports = [{'network_id': 'fake_network', 'vif_port': vif_port, 'device': 'fake_device', 'admin_state_up': True}] with mock.patch.object( self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': []}), \ mock.patch.object(self.agent, 'int_br') as int_br, \ mock.patch.object( self.agent, 'setup_arp_spoofing_protection') as setup_arp: int_br.get_ports_attributes.return_value = ovs_db_list self.agent._bind_devices(need_binding_ports) self.assertEqual(enable_prevent_arp_spoofing, setup_arp.called) def test_setup_arp_spoofing_protection_enable(self): self._test_arp_spoofing(True) def test_setup_arp_spoofing_protection_disabled(self): self._test_arp_spoofing(False) def _mock_treat_devices_added_updated(self, details, port, func_name): """Mock treat devices added or updated. :param details: the details to return for the device :param port: the port that get_vif_port_by_id should return :param func_name: the function that should be called :returns: whether the named function was called """ with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [details], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={details['device']: port}),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': details, 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent, func_name) as func: skip_devs, _, need_bound_devices, _ = ( self.agent.treat_devices_added_or_updated([], False, set())) # The function should not raise self.assertFalse(skip_devs) return func.called def test_treat_devices_added_updated_no_active_binding(self): details = {'device': 'id', n_const.NO_ACTIVE_BINDING: True} port = mock.Mock() with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [details], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={details['device']: port}),\ mock.patch.object(self.agent, 'port_dead') as func: skip_devs, binding_no_activated_devices, _, _ = ( self.agent.treat_devices_added_or_updated([], False, set())) self.assertFalse(skip_devs) self.assertTrue(func.called) self.assertIn('id', binding_no_activated_devices) def test_treat_devices_added_updated_ignores_invalid_ofport(self): port = mock.Mock() port.ofport = -1 self.assertFalse(self._mock_treat_devices_added_updated( mock.MagicMock(), port, 'port_dead')) def test_treat_devices_added_updated_marks_unknown_port_as_dead(self): port = mock.Mock() port.ofport = 1 self.assertTrue(self._mock_treat_devices_added_updated( mock.MagicMock(), port, 'port_dead')) def test_treat_devices_added_does_not_process_missing_port(self): with mock.patch.object( self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices') as get_dev_fn,\ mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): get_dev_fn.assert_not_called() def test_treat_devices_added_updated_updates_known_port(self): details = mock.MagicMock() details.__contains__.side_effect = lambda x: True self.assertTrue(self._mock_treat_devices_added_updated( details, mock.Mock(), 'treat_vif_port')) def test_treat_devices_added_updated_sends_vif_port_into_extension_manager( self, *args): details = mock.MagicMock() details.__contains__.side_effect = lambda x: True port = mock.MagicMock() def fake_handle_port(context, port): self.assertIn('vif_port', port) with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [details], 'failed_devices': []}),\ mock.patch.object(self.agent.ext_manager, 'handle_port', new=fake_handle_port),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={details['device']: port}),\ mock.patch.object(self.agent, 'treat_vif_port', return_value=False): self.agent.treat_devices_added_or_updated([], False, set()) def test_treat_devices_added_updated_skips_if_port_not_found(self): dev_mock = mock.MagicMock() dev_mock.__getitem__.return_value = 'the_skipped_one' with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [dev_mock], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={}),\ mock.patch.object(self.agent.ext_manager, "delete_port") as ext_mgr_delete_port,\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs = self.agent.treat_devices_added_or_updated( [], False, set()) # The function should return False for resync and no device # processed self.assertEqual((['the_skipped_one'], set(), [], set()), skip_devs) ext_mgr_delete_port.assert_called_once_with( self.agent.context, {'port_id': 'the_skipped_one'}) treat_vif_port.assert_not_called() def test_treat_devices_added_failed_devices(self): dev_mock = 'the_failed_one' with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [], 'failed_devices': [dev_mock]}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: failed_devices = {'added': set(), 'removed': set()} (_, _, _, failed_devices['added']) = ( self.agent.treat_devices_added_or_updated([], False, set())) # The function should return False for resync and no device # processed self.assertEqual(set([dev_mock]), failed_devices.get('added')) treat_vif_port.assert_not_called() def test_treat_devices_added_updated_put_port_down(self): fake_details_dict = {'admin_state_up': False, 'port_id': 'xxx', 'device': 'xxx', 'network_id': 'yyy', 'physical_network': 'foo', 'segmentation_id': 'bar', 'network_type': 'baz', 'fixed_ips': [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}], 'device_owner': DEVICE_OWNER_COMPUTE } with mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list_and_failed_devices', return_value={'devices': [fake_details_dict], 'failed_devices': []}),\ mock.patch.object(self.agent.int_br, 'get_vifs_by_ids', return_value={'xxx': mock.MagicMock()}),\ mock.patch.object(self.agent.int_br, 'get_port_tag_dict', return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs, _, need_bound_devices, _ = ( self.agent.treat_devices_added_or_updated([], False, set())) # The function should return False for resync self.assertFalse(skip_devs) self.assertTrue(treat_vif_port.called) def _mock_treat_devices_removed(self, port_exists): details = dict(exists=port_exists) with mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': details, 'failed_devices_up': [], 'failed_devices_down': []}): with mock.patch.object(self.agent, 'port_unbound') as port_unbound: with mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): self.assertFalse(self.agent.treat_devices_removed([{}])) self.assertTrue(port_unbound.called) def test_treat_devices_removed_unbinds_port(self): self._mock_treat_devices_removed(True) def test_treat_devices_removed_ignores_missing_port(self): self._mock_treat_devices_removed(False) def test_treat_devices_removed_failed_devices(self): dev_mock = 'the_failed_one' with mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': [ dev_mock]}): with mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = \ self.agent.treat_devices_removed([{}]) self.assertEqual(set([dev_mock]), failed_devices.get('removed')) def test_treat_devices_removed_ext_delete_port(self): port_id = 'fake-id' m_delete = mock.patch.object(self.agent.ext_manager, 'delete_port') m_rpc = mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={'devices_up': [], 'devices_down': [], 'failed_devices_up': [], 'failed_devices_down': []}) m_unbound = mock.patch.object(self.agent, 'port_unbound') with m_delete as delete, m_rpc, m_unbound: with mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): self.agent.treat_devices_removed([port_id]) delete.assert_called_with(mock.ANY, {'port_id': port_id}) def test_treat_vif_port_shut_down_port(self): details = mock.MagicMock() vif_port = type('vif_port', (object,), { "vif_id": "12", "iface-id": "407a79e0-e0be-4b7d-92a6-513b2161011b", "vif_mac": "fa:16:3e:68:46:7b", "port_name": "qr-407a79e0-e0", "ofport": -1, "bridge_name": "br-int"}) with mock.patch.object( self.agent.plugin_rpc, 'update_device_down' ) as update_device_down, mock.patch.object( self.agent, "port_dead" ) as port_dead: port_needs_binding = self.agent.treat_vif_port( vif_port, details['port_id'], details['network_id'], details['network_type'], details['physical_network'], details['segmentation_id'], False, details['fixed_ips'], details['device_owner'], False) self.assertFalse(port_needs_binding) port_dead.assert_called_once_with(vif_port) update_device_down.assert_called_once_with( self.agent.context, details['port_id'], self.agent.agent_id, self.agent.conf.host) def test_bind_port_with_missing_network(self): vif_port = mock.Mock() vif_port.name.return_value = 'port' self.agent._bind_devices([{'network_id': 'non-existent', 'vif_port': vif_port}]) def _test_process_network_ports(self, port_info, skipped_devices=None, binding_no_activated_devices=None): failed_devices = {'added': set(), 'removed': set()} skipped_devices = skipped_devices or [] binding_no_activated_devices = binding_no_activated_devices or set() added_devices = port_info.get('added', set()) with mock.patch.object(self.agent.sg_agent, "setup_port_filters") as setup_port_filters,\ mock.patch.object( self.agent, "treat_devices_added_or_updated", return_value=( skipped_devices, binding_no_activated_devices, [], failed_devices['added'])) as device_added_updated,\ mock.patch.object(self.agent.int_br, "get_ports_attributes", return_value=[]),\ mock.patch.object(self.agent, "treat_devices_removed", return_value=( failed_devices[ 'removed'])) as device_removed,\ mock.patch.object(self.agent, "treat_devices_skipped", return_value=( skipped_devices)) as device_skipped: self.assertEqual( failed_devices, self.agent.process_network_ports(port_info, False)) setup_port_filters.assert_called_once_with( (added_devices - set(skipped_devices) - binding_no_activated_devices), port_info.get('updated', set())) devices_added_updated = (added_devices | port_info.get('updated', set())) if devices_added_updated: device_added_updated.assert_called_once_with( devices_added_updated, False, set()) if port_info.get('removed', set()): device_removed.assert_called_once_with(port_info['removed']) if skipped_devices: device_skipped.assert_called_once_with(set(skipped_devices)) def test_process_network_ports(self): self._test_process_network_ports( {'current': set(['tap0']), 'removed': set(['eth0']), 'added': set(['eth1'])}) def test_process_network_port_with_updated_ports(self): self._test_process_network_ports( {'current': set(['tap0', 'tap1']), 'updated': set(['tap1', 'eth1']), 'removed': set(['eth0']), 'added': set(['eth1'])}) def test_process_network_port_with_skipped_ports(self): port_info = {'current': set(['tap0', 'tap1']), 'removed': set(['eth0']), 'added': set(['eth1', 'eth2'])} self._test_process_network_ports(port_info, skipped_devices=['eth1']) def test_process_network_port_with_binding_no_activated_devices(self): port_info = {'current': set(['tap0', 'tap1']), 'removed': set(['eth0']), 'added': set(['eth1', 'eth2', 'eth3'])} self._test_process_network_ports( port_info, skipped_devices=['eth1'], binding_no_activated_devices=set(['eth3'])) def test_process_network_port_with_empty_port(self): self._test_process_network_ports({}) def test_hybrid_plug_flag_based_on_firewall(self): cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') agt = self._make_agent() self.assertFalse(agt.agent_state['configurations']['ovs_hybrid_plug']) cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.linux.openvswitch_firewall.OVSFirewallDriver', group='SECURITYGROUP') with mock.patch('neutron.agent.linux.openvswitch_firewall.' 'OVSFirewallDriver.initialize_bridge'): agt = self._make_agent() self.assertFalse(agt.agent_state['configurations']['ovs_hybrid_plug']) cfg.CONF.set_default( 'firewall_driver', 'neutron.agent.linux.iptables_firewall.' 'OVSHybridIptablesFirewallDriver', group='SECURITYGROUP') with mock.patch('neutron.agent.linux.ip_conntrack.' 'IpConntrackManager._populate_initial_zone_map'): agt = self._make_agent() self.assertTrue(agt.agent_state['configurations']['ovs_hybrid_plug']) def test_report_state(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: self.agent.int_br_device_count = 5 self.systemd_notify.assert_not_called() self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_called_once_with() self.systemd_notify.reset_mock() # agent keeps sending "start_flag" while iter 0 not completed self.assertIn("start_flag", self.agent.agent_state) self.assertEqual( self.agent.agent_state["configurations"]["devices"], self.agent.int_br_device_count ) self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_not_called() def test_report_state_fail(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.side_effect = Exception() self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.agent._report_state() report_st.assert_called_with(self.agent.context, self.agent.agent_state, True) self.systemd_notify.assert_not_called() def test_report_state_revived(self): with mock.patch.object(self.agent.state_rpc, "report_state") as report_st: report_st.return_value = agent_consts.AGENT_REVIVED self.agent._report_state() self.assertTrue(self.agent.fullsync) def test_port_update(self): port_arg = {"id": TEST_PORT_ID1} with mock.patch.object(self.agent.plugin_rpc.remote_resource_cache, "get_resource_by_id") as mocked_resource: port = Port() port['mac_address'] = netaddr.EUI(FAKE_MAC) port['device_id'] = '0' port_bind = PortBinding() port_bind['host'] = 'host' port_bind['vnic_type'] = 'normal' port.bindings = [port_bind] mocked_resource.return_value = port self.agent.port_update("unused_context", port=port_arg, network_type="vlan", segmentation_id="1", physical_network="physnet") self.assertEqual(set([TEST_PORT_ID1]), self.agent.updated_ports) self.assertEqual([], self.agent.updated_smartnic_ports) def test_port_update_smartnic(self): cfg.CONF.set_default('baremetal_smartnic', True, group='AGENT') port_arg = {"id": TEST_PORT_ID1} with mock.patch.object(self.agent.plugin_rpc.remote_resource_cache, "get_resource_by_id") as mocked_resource: port = Port() port['id'] = 'd850ed99-5f46-47bc-8c06-86d9d519c46a' port['mac_address'] = netaddr.EUI(FAKE_MAC) port['device_id'] = '0' bindings_data = PortBinding() bindings_data['host'] = 'host' bindings_data['vnic_type'] = portbindings.VNIC_SMARTNIC bindings_data['vif_type'] = portbindings.VIF_TYPE_OVS bindings_data['profile'] = { 'local_link_information': [{'port_id': 'rep_port'}]} port.bindings = [bindings_data] mocked_resource.return_value = port self.agent.port_update("unused_context", port=port_arg) expected_smartnic_port_data = { 'mac': port['mac_address'], 'vm_uuid': port['device_id'], 'vif_name': 'rep_port', 'iface_id': port['id'], 'vif_type': bindings_data['vif_type'] } self.assertEqual({TEST_PORT_ID1}, self.agent.updated_ports) self.assertEqual([expected_smartnic_port_data], self.agent.updated_smartnic_ports) def test_port_update_unbound_smartnic_port(self): cfg.CONF.set_default('baremetal_smartnic', True, group='AGENT') port_arg = {"id": 'd850ed99-5f46-47bc-8c06-86d9d519c46a'} vif_name = "rep0-0" vif_id = port_arg["id"] vif_mac = FAKE_MAC self.agent.current_smartnic_ports_map = { vif_id: { 'vif_mac': vif_mac, 'vif_name': vif_name}} with mock.patch.object(self.agent.plugin_rpc.remote_resource_cache, "get_resource_by_id") as mocked_resource: port = Port() port['id'] = port_arg["id"] port['mac_address'] = netaddr.EUI(FAKE_MAC) port['device_id'] = '0' bindings_data = PortBinding() bindings_data['host'] = '' bindings_data['vnic_type'] = portbindings.VNIC_SMARTNIC bindings_data['vif_type'] = portbindings.VIF_TYPE_UNBOUND bindings_data['profile'] = {} port.bindings = [bindings_data] mocked_resource.return_value = port self.agent.port_update("unused_context", port=port_arg) expected_smartnic_port_data = [{ 'mac': port['mac_address'], 'vm_uuid': '', 'vif_name': 'rep0-0', 'iface_id': port['id'], 'vif_type': portbindings.VIF_TYPE_UNBOUND }] mocked_resource.assert_called_with(resources.PORT, port['id']) self.assertEqual({port['id']}, self.agent.updated_ports) self.assertEqual(expected_smartnic_port_data, self.agent.updated_smartnic_ports) def test_port_update_unbound_smartnic_port_not_belong_to_the_agent(self): cfg.CONF.set_default('baremetal_smartnic', True, group='AGENT') port_arg = {"id": 'd850ed99-5f46-47bc-8c06-86d9d519c46a'} vif_name = "rep0-0" vif_id = 'd850ed99-5f46-47bc-8c06-86d9d519c46b' vif_mac = FAKE_MAC self.agent.current_smartnic_ports_map = { vif_id: { 'vif_mac': vif_mac, 'vif_name': vif_name}} with mock.patch.object(self.agent.plugin_rpc.remote_resource_cache, "get_resource_by_id") as mocked_resource: port = Port() port['id'] = port_arg["id"] port['mac_address'] = netaddr.EUI(FAKE_MAC) port['device_id'] = '0' bindings_data = PortBinding() bindings_data['host'] = '' bindings_data['vnic_type'] = portbindings.VNIC_SMARTNIC bindings_data['vif_type'] = portbindings.VIF_TYPE_UNBOUND bindings_data['profile'] = {} port.bindings = [bindings_data] mocked_resource.return_value = port self.agent.port_update("unused_context", port=port_arg) mocked_resource.assert_called_with(resources.PORT, port['id']) self.assertEqual({port['id']}, self.agent.updated_ports) self.assertEqual([], self.agent.updated_smartnic_ports) def test_port_delete_after_update(self): """Make sure a port is not marked for delete and update.""" port = {'id': TEST_PORT_ID1} self.agent.port_update(context=None, port=port) self.agent.port_delete(context=None, port_id=port['id']) self.assertEqual(set(), self.agent.updated_ports) self.assertEqual(set([port['id']]), self.agent.deleted_ports) def test_process_deleted_ports_cleans_network_ports(self): self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1) self.agent.port_delete(context=None, port_id=TEST_PORT_ID1) self.agent.sg_agent = mock.Mock() self.agent.int_br = mock.Mock() @contextlib.contextmanager def bridge_deferred(*args, **kwargs): yield self.agent.int_br.deferred = mock.Mock(side_effect=bridge_deferred) self.agent.process_deleted_ports(port_info={}) self.assertEqual(set(), self.agent.network_ports[TEST_NETWORK_ID1]) def test_network_update(self): """Network update marks port for update. """ network = {'id': TEST_NETWORK_ID1} port = {'id': TEST_PORT_ID1, 'network_id': network['id']} self.agent._update_port_network(port['id'], port['network_id']) with mock.patch.object(self.agent.plugin_rpc, 'get_network_details'), \ mock.patch.object(self.agent, '_update_network_segmentation_id'): self.agent.network_update(context=None, network=network) self.assertEqual(set([port['id']]), self.agent.updated_ports) def test_network_update_outoforder(self): """Network update arrives later than port_delete. But the main agent loop still didn't process the ports, so we ensure the port is not marked for update. """ network = {'id': TEST_NETWORK_ID1} port = {'id': TEST_PORT_ID1, 'network_id': network['id']} self.agent._update_port_network(port['id'], port['network_id']) self.agent.port_delete(context=None, port_id=port['id']) with mock.patch.object(self.agent.plugin_rpc, 'get_network_details'), \ mock.patch.object(self.agent, '_update_network_segmentation_id'): self.agent.network_update(context=None, network=network) self.assertEqual(set(), self.agent.updated_ports) def test_update_port_network(self): """Ensure ports are associated and moved across networks correctly.""" self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1) self.agent._update_port_network(TEST_PORT_ID2, TEST_NETWORK_ID1) self.agent._update_port_network(TEST_PORT_ID3, TEST_NETWORK_ID2) self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID2) self.assertEqual(set([TEST_PORT_ID2]), self.agent.network_ports[TEST_NETWORK_ID1]) self.assertEqual(set([TEST_PORT_ID1, TEST_PORT_ID3]), self.agent.network_ports[TEST_NETWORK_ID2]) def test_port_delete(self): vif = FakeVif() with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_vif_by_port_id.return_value = vif.port_name int_br.get_vif_port_by_id.return_value = vif self.agent.port_delete("unused_context", port_id='id') self.agent.process_deleted_ports(port_info={}) # the main things we care about are that it gets put in the # dead vlan and gets blocked int_br.set_db_attribute.assert_any_call( 'Port', vif.port_name, 'tag', constants.DEAD_VLAN_TAG, log_errors=False) int_br.drop_port.assert_called_once_with(in_port=vif.ofport) def test_port_delete_removed_port(self): with mock.patch.object(self.agent, 'int_br') as int_br: self.agent.port_delete("unused_context", port_id='id') # if it was removed from the bridge, we shouldn't be processing it self.agent.process_deleted_ports(port_info={'removed': {'id', }}) int_br.set_db_attribute.assert_not_called() int_br.drop_port.assert_not_called() def test_binding_deactivate_not_for_host(self): self.agent.binding_deactivate('unused_context', port_id='id', host='other_host') self.assertEqual(set(), self.agent.deactivated_bindings) def test_binding_deactivate(self): vif = FakeVif() with mock.patch.object(self.agent, 'int_br') as int_br: int_br.get_vif_port_by_id.return_value = vif self.agent.binding_deactivate('unused_context', port_id='id', host='host') self.assertEqual(set(['id']), self.agent.deactivated_bindings) self.agent.process_deactivated_bindings(port_info={}) int_br.get_vif_port_by_id.assert_called_once_with('id') int_br.delete_port.assert_called_once_with(vif.port_name) self.assertEqual(set(), self.agent.deactivated_bindings) def test_binding_deactivate_removed_port(self): with mock.patch.object(self.agent, 'int_br') as int_br: self.agent.binding_deactivate('unused_context', port_id='id', host='host') self.assertEqual(set(['id']), self.agent.deactivated_bindings) self.agent.process_deactivated_bindings( port_info={'removed': {'id', }}) int_br.get_vif_port_by_id.assert_not_called() int_br.delete_port.assert_not_called() self.assertEqual(set(), self.agent.deactivated_bindings) def test_binding_activate(self): self.agent.binding_activate('context', port_id='id', host='host') self.assertIn('id', self.agent.activated_bindings) def test_binding_activate_not_for_host(self): self.agent.binding_activate('context', port_id='id', host='other-host') self.assertEqual(set(), self.agent.activated_bindings) def test_process_activated_bindings(self): port_info = {} port_info['added'] = set(['added_port_id']) port_info['current'] = set(['activated_port_id']) self.agent.process_activated_bindings(port_info, set(['activated_port_id'])) self.assertIn('added_port_id', port_info['added']) self.assertIn('activated_port_id', port_info['added']) def test_process_activated_bindings_activated_port_not_present(self): port_info = {} port_info['added'] = set(['added_port_id']) port_info['current'] = set() self.agent.process_activated_bindings(port_info, set(['activated_port_id'])) self.assertIn('added_port_id', port_info['added']) self.assertNotIn('activated_port_id', port_info['added']) def _test_setup_physical_bridges(self, port_exists=False): with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(self.agent, '_check_bridge_datapath_id'),\ mock.patch.object(ovs_lib.BaseOVS, 'get_bridges'): devex_fn.return_value = True parent = mock.MagicMock() phys_br = phys_br_cls() parent.attach_mock(phys_br_cls, 'phys_br_cls') parent.attach_mock(phys_br, 'phys_br') parent.attach_mock(int_br, 'int_br') if port_exists: phys_br.get_port_ofport.return_value = "phy_ofport" int_br.get_port_ofport.return_value = "int_ofport" else: phys_br.add_patch_port.return_value = "phy_ofport" int_br.add_patch_port.return_value = "int_ofport" phys_br.port_exists.return_value = port_exists int_br.port_exists.return_value = port_exists self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.db_get_val('Interface', 'int-br-eth', 'type', log_errors=False), # Have to use __getattr__ here to avoid mock._Call.__eq__ # method being called mock.call.int_br.db_get_val().__getattr__('__eq__')('veth'), mock.call.int_br.port_exists('int-br-eth'), ] if port_exists: expected_calls += [ mock.call.int_br.get_port_ofport('int-br-eth'), ] else: expected_calls += [ mock.call.int_br.add_patch_port( 'int-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.phys_br.port_exists('phy-br-eth'), ] if port_exists: expected_calls += [ mock.call.phys_br.get_port_ofport('phy-br-eth'), ] else: expected_calls += [ mock.call.phys_br.add_patch_port( 'phy-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.int_br.drop_port(in_port='int_ofport'), mock.call.phys_br.drop_port(in_port='phy_ofport'), mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', 'options', {'peer': 'phy-br-eth'}), mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', 'options', {'peer': 'int-br-eth'}), ] parent.assert_has_calls(expected_calls) self.assertEqual("int_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phy_ofport", self.agent.phys_ofports["physnet1"]) def test_setup_physical_bridges(self): self._test_setup_physical_bridges() def test_setup_physical_bridges_port_exists(self): self._test_setup_physical_bridges(port_exists=True) def test_setup_physical_bridges_using_veth_interconnection(self): self.agent.use_veth_interconnection = True with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ mock.patch.object(utils, "execute") as utilsexec_fn,\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(self.agent, '_check_bridge_datapath_id'),\ mock.patch.object(ip_lib.IPWrapper, "add_veth") as addveth_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "delete") as linkdel_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "set_up"),\ mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),\ mock.patch.object(ovs_lib.BaseOVS, "get_bridges") as get_br_fn: devex_fn.return_value = True parent = mock.MagicMock() parent.attach_mock(utilsexec_fn, 'utils_execute') parent.attach_mock(linkdel_fn, 'link_delete') parent.attach_mock(addveth_fn, 'add_veth') addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"), ip_lib.IPDevice("phy-br-eth1")) phys_br = phys_br_cls() phys_br.add_port.return_value = "phys_veth_ofport" int_br.add_port.return_value = "int_veth_ofport" get_br_fn.return_value = ["br-eth"] self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [mock.call.link_delete(), mock.call.utils_execute(['udevadm', 'settle', '--timeout=10']), mock.call.add_veth('int-br-eth', 'phy-br-eth')] parent.assert_has_calls(expected_calls, any_order=False) self.assertEqual("int_veth_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phys_veth_ofport", self.agent.phys_ofports["physnet1"]) int_br.add_port.assert_called_with("int-br-eth") phys_br.add_port.assert_called_with("phy-br-eth") def _test_setup_physical_bridges_change_from_veth_to_patch_conf( self, port_exists=False): with mock.patch.object(sys, "exit"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(self.agent.int_br, 'db_get_val', return_value='veth'), \ mock.patch.object(self.agent, '_check_bridge_datapath_id'), \ mock.patch.object(ovs_lib.BaseOVS, 'get_bridges'): phys_br = phys_br_cls() parent = mock.MagicMock() parent.attach_mock(phys_br_cls, 'phys_br_cls') parent.attach_mock(phys_br, 'phys_br') parent.attach_mock(int_br, 'int_br') if port_exists: phys_br.get_port_ofport.return_value = "phy_ofport" int_br.get_port_ofport.return_value = "int_ofport" else: phys_br.add_patch_port.return_value = "phy_ofport" int_br.add_patch_port.return_value = "int_ofport" phys_br.port_exists.return_value = port_exists int_br.port_exists.return_value = port_exists self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.delete_port('int-br-eth'), mock.call.phys_br.delete_port('phy-br-eth'), mock.call.int_br.port_exists('int-br-eth'), ] if port_exists: expected_calls += [ mock.call.int_br.get_port_ofport('int-br-eth'), ] else: expected_calls += [ mock.call.int_br.add_patch_port( 'int-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.phys_br.port_exists('phy-br-eth'), ] if port_exists: expected_calls += [ mock.call.phys_br.get_port_ofport('phy-br-eth'), ] else: expected_calls += [ mock.call.phys_br.add_patch_port( 'phy-br-eth', constants.NONEXISTENT_PEER), ] expected_calls += [ mock.call.int_br.drop_port(in_port='int_ofport'), mock.call.phys_br.drop_port(in_port='phy_ofport'), mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', 'options', {'peer': 'phy-br-eth'}), mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', 'options', {'peer': 'int-br-eth'}), ] parent.assert_has_calls(expected_calls) self.assertEqual("int_ofport", self.agent.int_ofports["physnet1"]) self.assertEqual("phy_ofport", self.agent.phys_ofports["physnet1"]) def test_setup_physical_bridges_change_from_veth_to_patch_conf(self): self._test_setup_physical_bridges_change_from_veth_to_patch_conf() def test_setup_physical_bridges_change_from_veth_to_patch_conf_port_exists( self): self._test_setup_physical_bridges_change_from_veth_to_patch_conf( port_exists=True) def test_setup_tunnel_br(self): self.tun_br = mock.Mock() with mock.patch.object(self.agent.int_br, "add_patch_port", return_value=1) as int_patch_port,\ mock.patch.object(self.agent.tun_br, "add_patch_port", return_value=1) as tun_patch_port,\ mock.patch.object(self.agent.tun_br, 'bridge_exists', return_value=False),\ mock.patch.object(self.agent.tun_br, 'create') as create_tun,\ mock.patch.object(self.agent.tun_br, 'setup_controllers') as setup_controllers,\ mock.patch.object(self.agent.tun_br, 'port_exists', return_value=False),\ mock.patch.object(self.agent.int_br, 'port_exists', return_value=False),\ mock.patch.object(sys, "exit"): self.agent.setup_tunnel_br(None) self.agent.setup_tunnel_br() self.assertTrue(create_tun.called) self.assertTrue(setup_controllers.called) self.assertTrue(int_patch_port.called) self.assertTrue(tun_patch_port.called) def test_setup_tunnel_br_ports_exits_drop_flows(self): cfg.CONF.set_override('drop_flows_on_start', True, 'AGENT') with mock.patch.object(self.agent.tun_br, 'port_exists', return_value=True),\ mock.patch.object(self.agent, 'tun_br'),\ mock.patch.object(self.agent.int_br, 'port_exists', return_value=True),\ mock.patch.object(self.agent.tun_br, 'setup_controllers'),\ mock.patch.object(self.agent, 'patch_tun_ofport', new=2),\ mock.patch.object(self.agent, 'patch_int_ofport', new=2),\ mock.patch.object(self.agent.tun_br, 'uninstall_flows') as delete,\ mock.patch.object(self.agent.int_br, "add_patch_port") as int_patch_port,\ mock.patch.object(self.agent.tun_br, "add_patch_port") as tun_patch_port,\ mock.patch.object(sys, "exit"): self.agent.setup_tunnel_br(None) self.agent.setup_tunnel_br() int_patch_port.assert_not_called() tun_patch_port.assert_not_called() self.assertTrue(delete.called) def test_setup_tunnel_port(self): self.agent.tun_br = mock.Mock() self.agent.l2_pop = False self.agent.udp_vxlan_port = 8472 self.agent.tun_br_ofports['vxlan'] = {} self.agent.local_ip = '2.3.4.5' with mock.patch.object(self.agent.tun_br, "add_tunnel_port", return_value='6') as add_tun_port_fn,\ mock.patch.object(self.agent.tun_br, "add_flow"): self.agent._setup_tunnel_port(self.agent.tun_br, 'portname', '1.2.3.4', 'vxlan') self.assertTrue(add_tun_port_fn.called) def test_port_unbound(self): with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn: self.agent.enable_tunneling = True lvm = mock.Mock() lvm.network_type = "gre" lvm.vif_ports = {"vif1": mock.Mock()} self.agent.vlan_manager.mapping["netuid12345"] = lvm self.agent.port_unbound("vif1", "netuid12345") self.assertTrue(reclvl_fn.called) lvm.vif_ports = {} self.agent.port_unbound("vif1", "netuid12345") self.assertEqual(2, reclvl_fn.call_count) lvm.vif_ports = {"vif1": mock.Mock()} self.agent.port_unbound("vif3", "netuid12345") self.assertEqual(2, reclvl_fn.call_count) def _prepare_l2_pop_ofports(self): lvm1 = mock.Mock() lvm1.network_type = 'gre' lvm1.vlan = 'vlan1' lvm1.segmentation_id = 'seg1' lvm1.tun_ofports = set(['1']) lvm2 = mock.Mock() lvm2.network_type = 'gre' lvm2.vlan = 'vlan2' lvm2.segmentation_id = 'seg2' lvm2.tun_ofports = set(['1', '2']) self.agent.vlan_manager.mapping = {'net1': lvm1, 'net2': lvm2} self.agent.tun_br_ofports = {'gre': {'1.1.1.1': '1', '2.2.2.2': '2'}} self.agent.arp_responder_enabled = True def test_fdb_ignore_network(self): self._prepare_l2_pop_ofports() fdb_entry = {'net3': {}} with mock.patch.object(self.agent.tun_br, 'add_flow') as add_flow_fn,\ mock.patch.object(self.agent.tun_br, 'uninstall_flows') as del_flow_fn,\ mock.patch.object(self.agent, '_setup_tunnel_port') as add_tun_fn,\ mock.patch.object(self.agent, 'cleanup_tunnel_port') as clean_tun_fn: self.agent.fdb_add(None, fdb_entry) add_flow_fn.assert_not_called() add_tun_fn.assert_not_called() self.agent.fdb_remove(None, fdb_entry) del_flow_fn.assert_not_called() clean_tun_fn.assert_not_called() def test_fdb_ignore_self(self): self._prepare_l2_pop_ofports() self.agent.local_ip = 'agent_ip' fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'agent_ip': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent.tun_br, "deferred") as defer_fn: self.agent.fdb_add(None, fdb_entry) defer_fn.assert_not_called() self.agent.fdb_remove(None, fdb_entry) defer_fn.assert_not_called() def test_fdb_add_flows(self): self._prepare_l2_pop_ofports() fdb_entry = {'net1': {'network_type': 'gre', 'segment_id': 'tun1', 'ports': {'2.2.2.2': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ mock.patch.object(self.agent, '_setup_tunnel_port', autospec=True) as add_tun_fn: self.agent.fdb_add(None, fdb_entry) add_tun_fn.assert_not_called() deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ deferred_br_call.install_arp_responder('vlan1', FAKE_IP1, FAKE_MAC), deferred_br_call.install_unicast_to_tun('vlan1', 'seg1', '2', FAKE_MAC), deferred_br_call.install_flood_to_tun('vlan1', 'seg1', set(['1', '2'])), ] tun_br.assert_has_calls(expected_calls) def test_fdb_del_flows(self): self._prepare_l2_pop_ofports() fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'2.2.2.2': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as br_tun: self.agent.fdb_remove(None, fdb_entry) deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ mock.call.deferred(), mock.call.deferred().__enter__(), deferred_br_call.delete_arp_responder('vlan2', FAKE_IP1), deferred_br_call.delete_unicast_to_tun('vlan2', FAKE_MAC), deferred_br_call.install_flood_to_tun('vlan2', 'seg2', set(['1'])), deferred_br_call.delete_port('gre-02020202'), deferred_br_call.cleanup_tunnel_port('2'), mock.call.deferred().__exit__(None, None, None), ] br_tun.assert_has_calls(expected_calls) def test_fdb_add_port(self): self._prepare_l2_pop_ofports() fdb_entry = {'net1': {'network_type': 'gre', 'segment_id': 'tun1', 'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]}}} with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ mock.patch.object(self.agent, '_setup_tunnel_port') as add_tun_fn: self.agent.fdb_add(None, fdb_entry) add_tun_fn.assert_not_called() fdb_entry['net1']['ports']['10.10.10.10'] = [ l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)] self.agent.fdb_add(None, fdb_entry) deferred_br = tun_br.deferred().__enter__() add_tun_fn.assert_called_with( deferred_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre') def test_fdb_del_port(self): self._prepare_l2_pop_ofports() fdb_entry = {'net2': {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}} with mock.patch.object(self.agent.tun_br, 'deferred') as defer_fn,\ mock.patch.object(self.agent.tun_br, 'delete_port') as delete_port_fn: self.agent.fdb_remove(None, fdb_entry) deferred_br = defer_fn().__enter__() deferred_br.delete_port.assert_called_once_with('gre-02020202') delete_port_fn.assert_not_called() def test_fdb_update_chg_ip(self): self._prepare_l2_pop_ofports() fdb_entries = {'chg_ip': {'net1': {'agent_ip': {'before': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)], 'after': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP2)]}}}} with mock.patch.object(self.agent.tun_br, 'deferred') as deferred_fn: self.agent.fdb_update(None, fdb_entries) deferred_br = deferred_fn().__enter__() deferred_br.assert_has_calls([ mock.call.install_arp_responder('vlan1', FAKE_IP2, FAKE_MAC), mock.call.delete_arp_responder('vlan1', FAKE_IP1) ]) def test_del_fdb_flow_idempotency(self): lvm = mock.Mock() lvm.network_type = 'gre' lvm.vlan = 'vlan1' lvm.segmentation_id = 'seg1' lvm.tun_ofports = set(['1', '2']) with mock.patch.object(self.agent.tun_br, 'mod_flow') as mod_flow_fn,\ mock.patch.object(self.agent.tun_br, 'uninstall_flows') as uninstall_flows_fn: self.agent.del_fdb_flow(self.agent.tun_br, n_const.FLOODING_ENTRY, '1.1.1.1', lvm, '3') mod_flow_fn.assert_not_called() uninstall_flows_fn.assert_not_called() def test_recl_lv_port_to_preserve(self): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net1') tun_br.cleanup_tunnel_port.assert_not_called() def test_recl_lv_port_to_remove(self): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net2') tun_br.delete_port.assert_called_once_with('gre-02020202') def _test_ext_br_recreated(self, setup_bridges_side_effect): bridge_mappings = {'physnet0': 'br-ex0', 'physnet1': 'br-ex1'} ex_br_mocks = [mock.Mock(br_name='br-ex0'), mock.Mock(br_name='br-ex1')] phys_bridges = {'physnet0': ex_br_mocks[0], 'physnet1': ex_br_mocks[1]}, bridges_added = ['br-ex0'] expected_added_bridges = ( bridges_added if setup_bridges_side_effect else []) with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_NORMAL), \ mock.patch.object(self.agent, '_agent_has_updates', side_effect=TypeError('loop exit')), \ mock.patch.dict(self.agent.bridge_mappings, bridge_mappings, clear=True), \ mock.patch.dict(self.agent.phys_brs, phys_bridges, clear=True), \ mock.patch.object(self.agent, 'setup_physical_bridges') as \ setup_physical_bridges, \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor') as \ mock_idl_monitor: mock_idl_monitor.bridges_added = bridges_added setup_physical_bridges.side_effect = setup_bridges_side_effect try: self.agent.rpc_loop(polling_manager=mock.Mock()) except TypeError: pass # Setup bridges should be called once even if it will raise Runtime # Error because there is raised TypeError in _agent_has_updates to stop # agent after first loop iteration setup_physical_bridges.assert_called_once_with({'physnet0': 'br-ex0'}) self.assertEqual(expected_added_bridges, self.agent.added_bridges) def test_ext_br_recreated(self): self._test_ext_br_recreated(setup_bridges_side_effect=None) def test_ext_br_recreated_fail_setup_physical_bridge(self): self._test_ext_br_recreated(setup_bridges_side_effect=RuntimeError) def test_daemon_loop_uses_polling_manager(self): ex_br_mock = mock.Mock(br_name="br-ex0") with mock.patch.object(polling, 'get_polling_manager') as \ mock_get_pm, \ mock.patch.object(self.agent, 'rpc_loop') as mock_loop, \ mock.patch.dict(self.agent.phys_brs, {'physnet0': ex_br_mock}, clear=True), \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor') as \ mock_idl_monitor: self.agent.daemon_loop() mock_get_pm.assert_called_with(True, constants.DEFAULT_OVSDBMON_RESPAWN) mock_loop.assert_called_once_with(polling_manager=mock.ANY) mock_idl_monitor.start_bridge_monitor.assert_called() def test_setup_tunnel_port_invalid_ofport(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, n_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum, self.agent.tos) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': n_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_invalid_address_mismatch(self): remote_ip = '2001:db8::2' with mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) log_error_fn.assert_called_once_with( _("IP version mismatch, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.agent.local_ip, 'rip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_invalid_netaddr_exception(self): remote_ip = '2001:db8::2' with mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.local_ip = '1.2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) log_error_fn.assert_called_once_with( _("Invalid local or remote IP, cannot create tunnel: " "local_ip=%(lip)s remote_ip=%(rip)s"), {'lip': self.agent.local_ip, 'rip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_error_negative_df_disabled(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.dont_fragment = False self.agent.tunnel_csum = False self.agent.local_ip = '2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, n_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum, self.agent.tos) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': n_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_error_negative_tunnel_csum(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.dont_fragment = True self.agent.tunnel_csum = True self.agent.local_ip = '2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, n_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum, self.agent.tos) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': n_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_setup_tunnel_port_error_negative_tos_inherit(self): remote_ip = '1.2.3.4' with mock.patch.object( self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.tos = 'inherit' self.agent.local_ip = '2.3.4.5' ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', remote_ip, n_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( 'gre-1', remote_ip, self.agent.local_ip, n_const.TYPE_GRE, self.agent.vxlan_udp_port, self.agent.dont_fragment, self.agent.tunnel_csum, self.agent.tos) log_error_fn.assert_called_once_with( _("Failed to set-up %(type)s tunnel port to %(ip)s"), {'type': n_const.TYPE_GRE, 'ip': remote_ip}) self.assertEqual(0, ofport) def test_tunnel_sync_with_ml2_plugin(self): fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]} with mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', return_value=fake_tunnel_details),\ mock.patch.object( self.agent, '_setup_tunnel_port') as _setup_tunnel_port_fn,\ mock.patch.object(self.agent, 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f', '100.101.31.15', 'vxlan')] _setup_tunnel_port_fn.assert_has_calls(expected_calls) cleanup.assert_not_called() def test_tunnel_sync_invalid_ip_address(self): fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, {'ip_address': '100.100.100.100'}]} with mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', return_value=fake_tunnel_details),\ mock.patch.object( self.agent, '_setup_tunnel_port') as _setup_tunnel_port_fn,\ mock.patch.object(self.agent, 'cleanup_stale_flows') as cleanup: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() _setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br, 'vxlan-64646464', '100.100.100.100', 'vxlan') cleanup.assert_not_called() def test_tunnel_sync_setup_tunnel_flood_flow_once(self): fake_tunnel_details = {'tunnels': [{'ip_address': '200.200.200.200'}, {'ip_address': '100.100.100.100'}]} with mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', return_value=fake_tunnel_details),\ mock.patch.object( self.agent, '_setup_tunnel_port') as _setup_tunnel_port_fn,\ mock.patch.object( self.agent, '_setup_tunnel_flood_flow') as _setup_tunnel_flood_flow: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() expected_calls = [mock.call(self.agent.tun_br, 'vxlan-c8c8c8c8', '200.200.200.200', 'vxlan'), mock.call(self.agent.tun_br, 'vxlan-64646464', '100.100.100.100', 'vxlan')] _setup_tunnel_port_fn.assert_has_calls(expected_calls) _setup_tunnel_flood_flow.assert_called_once_with(self.agent.tun_br, 'vxlan') def test_tunnel_update(self): kwargs = {'tunnel_ip': '10.10.10.10', 'tunnel_type': 'gre'} self.agent._setup_tunnel_port = mock.Mock() self.agent.enable_tunneling = True self.agent.tunnel_types = ['gre'] self.agent.l2_pop = False self.agent.tunnel_update(context=None, **kwargs) expected_calls = [ mock.call(self.agent.tun_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre')] self.agent._setup_tunnel_port.assert_has_calls(expected_calls) def test_tunnel_delete(self): kwargs = {'tunnel_ip': '10.10.10.10', 'tunnel_type': 'gre'} self.agent.enable_tunneling = True self.agent.tunnel_types = ['gre'] self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}} with mock.patch.object( self.agent, 'cleanup_tunnel_port' ) as clean_tun_fn: self.agent.tunnel_delete(context=None, **kwargs) self.assertTrue(clean_tun_fn.called) def test_reset_tunnel_ofports(self): tunnel_handles = self.agent.tun_br_ofports self.agent.tun_br_ofports = {'gre': {'10.10.10.10': '1'}} self.agent._reset_tunnel_ofports() self.assertEqual(self.agent.tun_br_ofports, tunnel_handles) def _test_ovs_status(self, *args): reply2 = {'current': set(['tap0']), 'added': set(['tap2']), 'removed': set([])} reply3 = {'current': set(['tap2']), 'added': set([]), 'removed': set(['tap0'])} reply_ancillary = {'current': set([]), 'added': set([]), 'removed': set([])} self.agent.enable_tunneling = True with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ mock.patch.object(async_process.AsyncProcess, "is_active", return_value=True),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object(log.KeywordArgumentAdapter, 'exception') as log_exception,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_ports_events') as process_p_events,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'check_ovs_status') as check_ovs_status,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br') as setup_int_br,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_physical_bridges') as setup_phys_br,\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'cleanup_stale_flows') as cleanup, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_tunnel_br') as setup_tunnel_br,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'setup_tunnel_br_flows') as setup_tunnel_br_flows,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, '_reset_tunnel_ofports') as reset_tunnel_ofports, \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor'),\ mock.patch.object(self.agent.state_rpc, 'report_state') as report_st: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') devices_not_ready = set() process_p_events.side_effect = [(reply2, reply_ancillary, devices_not_ready), (reply3, reply_ancillary, devices_not_ready)] failed_devices = {'added': set(), 'removed': set()} failed_ancillary_devices = {'added': set(), 'removed': set()} process_network_ports.side_effect = [ failed_devices, Exception('Fake exception to get out of the loop')] check_ovs_status.side_effect = args if self.agent.enable_tunneling: self.agent.agent_state.pop("start_flag") try: self.agent.daemon_loop() except Exception: pass process_p_events.assert_has_calls([ mock.call({'removed': [], 'added': [], 'modified': []}, set(), set(), set(), failed_devices, failed_ancillary_devices, set()), mock.call({'removed': [], 'added': [], 'modified': []}, set(['tap0']), set(), set(), failed_devices, failed_ancillary_devices, set()) ]) process_network_ports.assert_has_calls([ mock.call(reply2, False), mock.call(reply3, True) ]) cleanup.assert_called_once_with() self.assertTrue(update_stale.called) # Verify the OVS restart we triggered in the loop # re-setup the bridges setup_int_br.assert_has_calls([mock.call()]) setup_phys_br.assert_has_calls([mock.call({})]) # Ensure that tunnel handles are reset and bridge # and flows reconfigured. self.assertTrue(reset_tunnel_ofports.called) self.assertTrue(setup_tunnel_br_flows.called) self.assertTrue(setup_tunnel_br.called) if self.agent.enable_tunneling: self.agent.agent_state['start_flag'] = True report_st.assert_called_once_with( self.agent.context, self.agent.agent_state, True) def test_ovs_status(self): self._test_ovs_status(constants.OVS_NORMAL, constants.OVS_DEAD, constants.OVS_RESTARTED) # OVS will not DEAD in some exception, like DBConnectionError. self._test_ovs_status(constants.OVS_NORMAL, constants.OVS_RESTARTED) def test_rpc_loop_fail_to_process_network_ports_keep_flows(self): with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ mock.patch.object(async_process.AsyncProcess, "is_active", return_value=True),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'check_ovs_status') as check_ovs_status,\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale, \ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'cleanup_stale_flows') as cleanup,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, '_check_and_handle_signal') as check_and_handle_signal, \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor'): process_network_ports.side_effect = Exception("Trigger resync") check_ovs_status.return_value = constants.OVS_NORMAL check_and_handle_signal.side_effect = [True, False] self.agent.daemon_loop() self.assertTrue(update_stale.called) cleanup.assert_not_called() def test_set_rpc_timeout(self): with mock.patch.object(n_rpc.BackingOffClient, 'set_max_timeout') as smt: self.agent._handle_sigterm(None, None) for rpc_client in (self.agent.plugin_rpc.client, self.agent.sg_plugin_rpc.client, self.agent.dvr_plugin_rpc.client, self.agent.state_rpc.client): smt.assert_called_with(10) def test_set_rpc_timeout_no_value(self): self.agent.quitting_rpc_timeout = None with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: self.agent._handle_sigterm(None, None) mock_set_rpc.assert_not_called() def test_arp_spoofing_network_port(self): int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection( int_br, FakeVif(), {'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF}) self.assertTrue(int_br.delete_arp_spoofing_protection.called) int_br.install_arp_spoofing_protection.assert_not_called() def test_arp_spoofing_port_security_disabled(self): int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection( int_br, FakeVif(), {'port_security_enabled': False}) self.assertTrue(int_br.delete_arp_spoofing_protection.called) int_br.install_arp_spoofing_protection.assert_not_called() def test_arp_spoofing_basic_rule_setup(self): vif = FakeVif() fake_details = {'fixed_ips': [], 'device_owner': 'nobody'} self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) int_br.delete_arp_spoofing_allow_rules.assert_has_calls( [mock.call(port=vif.ofport)]) int_br.install_arp_spoofing_protection.assert_has_calls( [mock.call(ip_addresses=set(), port=vif.ofport)]) def test_arp_spoofing_basic_rule_setup_fixed_ipv6(self): vif = FakeVif() fake_details = {'fixed_ips': [{'ip_address': 'fdf8:f53b:82e4::1'}], 'device_owner': 'nobody'} self.agent.prevent_arp_spoofing = True br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(br, vif, fake_details) br.delete_arp_spoofing_allow_rules.assert_has_calls( [mock.call(port=vif.ofport)]) self.assertTrue(br.install_icmpv6_na_spoofing_protection.called) def test_arp_spoofing_fixed_and_allowed_addresses(self): vif = FakeVif() fake_details = { 'device_owner': 'nobody', 'fixed_ips': [{'ip_address': '192.168.44.100'}, {'ip_address': '192.168.44.101'}], 'allowed_address_pairs': [{'ip_address': '192.168.44.102/32'}, {'ip_address': '192.168.44.103/32'}] } self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) # make sure all addresses are allowed addresses = {'192.168.44.100', '192.168.44.101', '192.168.44.102/32', '192.168.44.103/32'} int_br.install_arp_spoofing_protection.assert_has_calls( [mock.call(port=vif.ofport, ip_addresses=addresses)]) def test_arp_spoofing_fixed_and_allowed_addresses_ipv6(self): vif = FakeVif() fake_details = { 'device_owner': 'nobody', 'fixed_ips': [{'ip_address': '2001:db8::1'}, {'ip_address': '2001:db8::2'}], 'allowed_address_pairs': [{'ip_address': '2001:db8::200', 'mac_address': 'aa:22:33:44:55:66'}] } self.agent.prevent_arp_spoofing = True int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) # make sure all addresses are allowed including ipv6 LLAs addresses = {'2001:db8::1', '2001:db8::2', '2001:db8::200', 'fe80::a822:33ff:fe44:5566', 'fe80::a8bb:ccff:fe11:2233'} int_br.install_icmpv6_na_spoofing_protection.assert_has_calls( [mock.call(port=vif.ofport, ip_addresses=addresses)]) def test__get_ofport_moves(self): previous = {'port1': 1, 'port2': 2} current = {'port1': 5, 'port2': 2} # we expect it to tell us port1 moved expected = ['port1'] self.assertEqual(expected, self.agent._get_ofport_moves(current, previous)) def test_update_stale_ofport_rules_clears_old(self): self.agent.prevent_arp_spoofing = True self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2} self.agent.int_br = mock.Mock() # simulate port1 was removed newmap = {'port2': 2} self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap self.agent.update_stale_ofport_rules() # rules matching port 1 should have been deleted self.agent.int_br.delete_arp_spoofing_protection.assert_has_calls( [mock.call(port=1)]) # make sure the state was updated with the new map self.assertEqual(newmap, self.agent.vifname_to_ofport_map) def test_update_stale_ofport_rules_treats_moved(self): self.agent.prevent_arp_spoofing = True self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2} self.agent.treat_devices_added_or_updated = mock.Mock() self.agent.int_br = mock.Mock() # simulate port1 was moved newmap = {'port2': 2, 'port1': 90} self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap ofport_changed_ports = self.agent.update_stale_ofport_rules() self.assertEqual(['port1'], ofport_changed_ports) def test_update_stale_ofport_rules_removes_drop_flow(self): self.agent.prevent_arp_spoofing = False self.agent.vifname_to_ofport_map = {'port1': 1, 'port2': 2} self.agent.int_br = mock.Mock() # simulate port1 was removed newmap = {'port2': 2} self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap self.agent.update_stale_ofport_rules() # drop flow rule matching port 1 should have been deleted ofport_changed_ports = self.agent.update_stale_ofport_rules() expected = [ mock.call(in_port=1) ] self.agent.int_br.uninstall_flows.assert_has_calls(expected) self.assertEqual(newmap, self.agent.vifname_to_ofport_map) self.agent.int_br.delete_arp_spoofing_protection.assert_not_called() self.assertEqual([], ofport_changed_ports) def test__setup_tunnel_port_while_new_mapping_is_added(self): """Test setup_tunnel_port while adding a new mapping Test that _setup_tunnel_port doesn't fail if new vlan mapping is added in a different coroutine while iterating over existing mappings. See bug 1449944 for more info. """ def add_new_vlan_mapping(*args, **kwargs): self.agent.vlan_manager.add('bar', 1, 2, 3, 4) bridge = mock.Mock() tunnel_type = 'vxlan' self.agent.tun_br_ofports = {tunnel_type: dict()} self.agent.l2_pop = False self.agent.vlan_manager.add('foo', 4, tunnel_type, 2, 1) self.agent.local_ip = '2.3.4.5' bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping self.agent._setup_tunnel_port(bridge, 1, '1.2.3.4', tunnel_type=tunnel_type) self.agent._setup_tunnel_flood_flow(bridge, tunnel_type) self.assertIn('bar', self.agent.vlan_manager) def test_setup_entry_for_arp_reply_ignores_ipv6_addresses(self): self.agent.arp_responder_enabled = True ip = '2001:db8::1' br = mock.Mock() self.agent.setup_entry_for_arp_reply( br, 'add', mock.Mock(), mock.Mock(), ip) br.install_arp_responder.assert_not_called() def test_configurations_has_rp_bandwidth(self): self.assertIn(n_const.RP_BANDWIDTHS, self.agent.agent_state['configurations']) def test_configurations_has_rp_default_inventory(self): self.assertIn(n_const.RP_INVENTORY_DEFAULTS, self.agent.agent_state['configurations']) rp_inv_defaults = \ self.agent.agent_state['configurations'][ n_const.RP_INVENTORY_DEFAULTS] self.assertListEqual( sorted(['reserved', 'min_unit', 'allocation_ratio', 'step_size']), sorted(list(rp_inv_defaults))) self.assertEqual(1.0, rp_inv_defaults['allocation_ratio']) self.assertEqual(1, rp_inv_defaults['min_unit']) self.assertEqual(1, rp_inv_defaults['step_size']) self.assertEqual(0, rp_inv_defaults['reserved']) def test__validate_rp_bandwidth_bridges(self): cfg.CONF.set_override('bridge_mappings', [], 'OVS') cfg.CONF.set_override(n_const.RP_BANDWIDTHS, ['no_such_br_in_bridge_mappings:1:1'], 'OVS') self.assertRaises(ValueError, self._make_agent) def test__check_bridge_datapath_id(self): datapath_id = u'0000622486fa3f42' datapath_ids_set = set() for i in range(5): dpid = format((i << 48) + int(datapath_id, 16), '0x').zfill(16) bridge = mock.Mock() bridge.br_name = 'bridge_%s' % i bridge.get_datapath_id = mock.Mock(return_value=datapath_id) self.agent._check_bridge_datapath_id(bridge, datapath_ids_set) self.assertEqual(i + 1, len(datapath_ids_set)) self.assertIn(dpid, datapath_ids_set) if i == 0: bridge.set_datapath_id.assert_not_called() else: bridge.set_datapath_id.assert_called_once_with(dpid) def test__update_network_segmentation_id(self): network = {'id': 'my-net-uuid', provider_net.SEGMENTATION_ID: 1005, provider_net.PHYSICAL_NETWORK: 'provider_net', provider_net.NETWORK_TYPE: n_const.TYPE_VLAN} self.agent.vlan_manager.add('my-net-uuid', 5, n_const.TYPE_VLAN, 'provider_net', 1004, None) mock_phys_br = mock.Mock() self.agent.phys_brs['provider_net'] = mock_phys_br self.agent.phys_ofports['provider_net'] = 'phy_ofport' self.agent.int_ofports['provider_net'] = 'int_ofport' with mock.patch.object(self.agent.int_br, 'reclaim_local_vlan') \ as mock_reclaim_local_vlan, \ mock.patch.object(self.agent.int_br, 'provision_local_vlan') \ as mock_provision_local_vlan: self.agent._update_network_segmentation_id(network) mock_reclaim_local_vlan.assert_called_once_with( port='int_ofport', segmentation_id=1004) mock_provision_local_vlan.assert_called_once_with( port='int_ofport', lvid=5, segmentation_id=1005) mock_phys_br.reclaim_local_vlan.assert_called_once_with( port='phy_ofport', lvid=5) def test__update_network_segmentation_id_not_vlan(self): network = {provider_net.NETWORK_TYPE: 'not_vlan'} with mock.patch.object(self.agent.vlan_manager, 'get') as mock_get: self.agent._update_network_segmentation_id(network) mock_get.assert_not_called() def test__update_network_segmentation_id_vlan_not_found(self): network = {'id': 'my-net-uuid', provider_net.SEGMENTATION_ID: 1005, provider_net.NETWORK_TYPE: n_const.TYPE_VLAN, provider_net.PHYSICAL_NETWORK: 'default_network'} with mock.patch.object(self.agent.vlan_manager, 'update_segmentation_id') as mock_update_segid: self.agent._update_network_segmentation_id(network) mock_update_segid.assert_not_called() def test__update_network_segmentation_id_segmentation_id_not_updated(self): network = {'id': 'my-net-uuid', provider_net.SEGMENTATION_ID: 1005, provider_net.NETWORK_TYPE: n_const.TYPE_VLAN, provider_net.PHYSICAL_NETWORK: 'default_network'} self.agent.vlan_manager.add('my-net-uuid', 5, n_const.TYPE_VLAN, 'provider_net', 1005, None) with mock.patch.object(self.agent.vlan_manager, 'update_segmentation_id') as mock_update_segid: self.agent._update_network_segmentation_id(network) mock_update_segid.assert_not_called() def test__update_network_segmentation_id_multisegments(self): network = {'id': 'my-net-uuid', 'segments': [mock.ANY]} with mock.patch.object(self.agent.vlan_manager, 'get') as mock_get: self.agent._update_network_segmentation_id(network) mock_get.assert_not_called() def _test_treat_smartnic_port(self, vif_type): vm_uuid = "407a79e0-e0be-4b7d-92a6-513b2161011b" iface_id = "407a79e0-e0be-4b7d-92a6-513b2161011c" rep_port = 'rep0-0' mac = FAKE_MAC smartnic_data = { 'mac': mac, 'vm_uuid': vm_uuid, 'vif_name': rep_port, 'iface_id': iface_id, 'vif_type': vif_type} cfg.CONF.set_default('baremetal_smartnic', True, group='AGENT') agent = self._make_agent() instance_info = vif_instance_object.InstanceInfo(uuid=vm_uuid) vif = agent._get_vif_object(iface_id, rep_port, mac) with mock.patch.object(os_vif, 'plug') as plug_mock, \ mock.patch.object(os_vif, 'unplug') as unplug_mock, \ mock.patch('os_vif.objects.instance_info.InstanceInfo', return_value=instance_info), \ mock.patch.object(agent, '_get_vif_object', return_value=vif): agent.treat_smartnic_port(smartnic_data) if vif_type == portbindings.VIF_TYPE_OVS: plug_mock.assert_called_once_with(vif, instance_info) else: unplug_mock.assert_called_once_with(vif, instance_info) def test_treat_smartnic_port_add(self): self._test_treat_smartnic_port('ovs') def test_treat_smartnic_port_remove(self): self._test_treat_smartnic_port('unbound') def test_process_smartnic_ports_remove(self): port_id = "407a79e0-e0be-4b7d-92a6-513b2161011a" rep_port = 'rep0-0' mac = FAKE_MAC ovs_port = mock.Mock() ovs_port.vif_mac = mac ovs_port.port_name = rep_port ovs_port.vif_id = port_id ports_int_br = [ovs_port] expected_smartnic_ports_processed_list = [ {'iface_id': port_id, 'vif_name': rep_port, 'mac': mac, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vm_uuid': ''}] expected_current_smartnic_ports_map = { port_id: { 'vif_mac': mac, 'vif_name': rep_port}} with mock.patch.object(self.agent.plugin_rpc, "get_ports_by_vnic_type_and_host", return_value=[]),\ mock.patch.object(self.agent.int_br, "get_vif_ports", return_value=ports_int_br): self.agent.process_smartnic_ports() self.assertEqual(expected_smartnic_ports_processed_list, self.agent.updated_smartnic_ports) self.assertEqual(expected_current_smartnic_ports_map, self.agent.current_smartnic_ports_map) def test_process_smartnic_ports(self): port_id = "407a79e0-e0be-4b7d-92a6-513b2161011a" rep_port = 'rep0-0' mac = FAKE_MAC ovs_port = mock.Mock() ovs_port.vif_mac = mac ovs_port.port_name = rep_port ovs_port.vif_id = port_id ports_int_br = [ovs_port] PORT_TO_PROCESS = { 'binding:profile': {'local_link_information': [ {'hostname': 'host1', 'port_id': rep_port}]}, 'mac_address': FAKE_MAC, 'device_id': "407a79e0-e0be-4b7d-92a6-513b2161011e", 'id': "407a79e0-e0be-4b7d-92a6-513b2161011c", 'binding:vif_type': portbindings.VIF_TYPE_OVS } expected_smartnic_ports_processed_list = [ {'iface_id': port_id, 'vif_name': rep_port, 'mac': mac, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vm_uuid': ''}, {'iface_id': "407a79e0-e0be-4b7d-92a6-513b2161011c", 'vif_name': rep_port, 'mac': mac, 'vif_type': portbindings.VIF_TYPE_OVS, 'vm_uuid': "407a79e0-e0be-4b7d-92a6-513b2161011e"}] expected_current_smartnic_ports_map = { port_id: { 'vif_mac': mac, 'vif_name': rep_port}} with mock.patch.object(self.agent.plugin_rpc, "get_ports_by_vnic_type_and_host", return_value=[PORT_TO_PROCESS]),\ mock.patch.object(self.agent.int_br, "get_vif_ports", return_value=ports_int_br): self.agent.process_smartnic_ports() self.assertEqual(expected_smartnic_ports_processed_list, self.agent.updated_smartnic_ports) self.assertEqual(expected_current_smartnic_ports_map, self.agent.current_smartnic_ports_map) def test_add_bound_port_to_updated_smartnic_ports(self): mac = FAKE_MAC vm_uuid = "407a79e0-e0be-4b7d-92a6-513b2161011b" rep_port = 'rep0-0' iface_id = "407a79e0-e0be-4b7d-92a6-513b2161011c" self.agent._add_port_to_updated_smartnic_ports( mac, rep_port, iface_id, portbindings.VIF_TYPE_OVS, vm_uuid,) smartnic_data = { 'mac': mac, 'vm_uuid': vm_uuid, 'vif_name': rep_port, 'iface_id': iface_id, 'vif_type': portbindings.VIF_TYPE_OVS} self.assertEqual([smartnic_data], self.agent.updated_smartnic_ports) def test_add_unbound_port_to_updated_smartnic_ports(self): vif_mac = FAKE_MAC vif_name = 'rep0-0' vif_id = "407a79e0-e0be-4b7d-92a6-513b2161011a" self.agent._add_port_to_updated_smartnic_ports( vif_mac, vif_name, vif_id, portbindings.VIF_TYPE_UNBOUND) smartnic_data = { 'mac': vif_mac, 'vm_uuid': '', 'vif_name': vif_name, 'iface_id': vif_id, 'vif_type': portbindings.VIF_TYPE_UNBOUND} self.assertEqual([smartnic_data], self.agent.updated_smartnic_ports) def test_create_smartnic_port_map_entry_data(self): mac = FAKE_MAC rep_port = 'rep0-0' expected_return_value = {"vif_mac": mac, "vif_name": rep_port} int_br_smartnic_port_map =\ self.agent.create_smartnic_port_map_entry_data(mac, rep_port) self.assertEqual(int_br_smartnic_port_map, expected_return_value) class TestOvsNeutronAgentOSKen(TestOvsNeutronAgent, ovs_test_base.OVSOSKenTestBase): def test_cleanup_stale_flows(self): uint64_max = (1 << 64) - 1 with mock.patch.object(self.agent.int_br, 'dump_flows') as dump_flows,\ mock.patch.object(self.agent.int_br, 'uninstall_flows') as uninstall_flows: self.agent.int_br.set_agent_uuid_stamp(1234) fake_flows = [ # mock os_ken.ofproto.ofproto_v1_3_parser.OFPFlowStats mock.Mock(cookie=1234, table_id=0), mock.Mock(cookie=17185, table_id=2), mock.Mock(cookie=9029, table_id=2), mock.Mock(cookie=1234, table_id=3), ] dump_flows.return_value = fake_flows self.agent.iter_num = 3 self.agent.cleanup_stale_flows() dump_flows_expected = [ mock.call(tid) for tid in constants.INT_BR_ALL_TABLES] dump_flows.assert_has_calls(dump_flows_expected) expected = [mock.call(cookie=17185, cookie_mask=uint64_max), mock.call(cookie=9029, cookie_mask=uint64_max)] uninstall_flows.assert_has_calls(expected, any_order=True) self.assertEqual(len(constants.INT_BR_ALL_TABLES) * len(expected), len(uninstall_flows.mock_calls)) class AncillaryBridgesTest(object): def setUp(self): super(AncillaryBridgesTest, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) mock.patch(PULLAPI).start() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() def _test_ancillary_bridges(self, bridges, ancillary): device_ids = ancillary[:] def pullup_side_effect(*args, **kwargs): # Check that the device_id exists, if it does return it # if it does not return None try: device_ids.remove(args[0]) return args[0] except Exception: return None with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch('neutron.agent.linux.ip_lib.get_device_mac', return_value='00:00:00:00:00:01'),\ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', return_value=bridges),\ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=pullup_side_effect),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_ports_attributes', return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]),\ mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server'): ext_manager = mock.Mock() self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), ext_manager, cfg.CONF) self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) if ancillary: bridges = [br.br_name for br in self.agent.ancillary_brs] for br in ancillary: self.assertIn(br, bridges) def test_ancillary_bridges_single(self): bridges = ['br-int', 'br-ex'] self._test_ancillary_bridges(bridges, ['br-ex']) def test_ancillary_bridges_none(self): bridges = ['br-int'] self._test_ancillary_bridges(bridges, []) def test_ancillary_bridges_multiple(self): bridges = ['br-int', 'br-ex1', 'br-ex2'] self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2']) def mock_scan_ancillary_ports(self, vif_port_set=None, registered_ports=None, sync=False): bridges = ['br-int', 'br-ex'] ancillary = ['br-ex'] with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'), \ mock.patch.object(self.mod_agent.OVSNeutronAgent, '_restore_local_vlan_map'), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', return_value=bridges), \ mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=ancillary), \ mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_port_set', return_value=vif_port_set),\ mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server'): ext_manager = mock.Mock() self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), ext_manager, cfg.CONF) return self.agent.scan_ancillary_ports(registered_ports, sync) def test_scan_ancillary_ports_returns_cur_only_for_unchanged_ports(self): vif_port_set = set([1, 2]) registered_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ancillary_ports_returns_port_changes(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set, added={3}, removed={2}) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) def test_scan_ancillary_ports_returns_port_changes_with_sync(self): vif_port_set = set([1, 3]) registered_ports = set([1, 2]) expected = ovs_agent.PortInfo(current=vif_port_set, added=vif_port_set, removed={2}) actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports, sync=True) self.assertEqual(expected, actual) class AncillaryBridgesTestOSKen(AncillaryBridgesTest, ovs_test_base.OVSOSKenTestBase): pass class TestOvsDvrNeutronAgent(object): def setUp(self): super(TestOvsDvrNeutronAgent, self).setUp() mock.patch(PULLAPI).start() notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') mock.patch('neutron.agent.common.ovs_lib.BaseOVS.config', new_callable=mock.PropertyMock, return_value={}).start() mock.patch('neutron.agent.ovsdb.impl_idl._connection').start() with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_ancillary_bridges', return_value=[]),\ mock.patch('neutron.agent.linux.ip_lib.get_device_mac', return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_ports_attributes', return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]),\ mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server'): ext_manager = mock.Mock() self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), ext_manager, cfg.CONF) self.agent.tun_br = self.br_tun_cls(br_name='br-tun') self.agent.sg_agent = mock.Mock() def _setup_for_dvr_test(self): self._port = mock.Mock() self._port.ofport = 10 self._port.vif_id = "1234-5678-90" self._physical_network = 'physeth1' self._old_local_vlan = None self._segmentation_id = 2001 self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True self.agent.patch_tun_ofport = 1 self.agent.patch_int_ofport = 2 self.agent.dvr_agent.local_ports = {} self.agent.vlan_manager = self.useFixture( test_vlanmanager.LocalVlanManagerFixture()).manager self.agent.dvr_agent.enable_distributed_routing = True self.agent.dvr_agent.enable_tunneling = True self.agent.dvr_agent.patch_tun_ofport = 1 self.agent.dvr_agent.patch_int_ofport = 2 self.agent.dvr_agent.tun_br = mock.Mock() self.agent.dvr_agent.phys_brs[self._physical_network] = mock.Mock() self.agent.dvr_agent.bridge_mappings = {self._physical_network: 'br-eth1'} self.agent.dvr_agent.int_ofports[self._physical_network] = 30 self.agent.dvr_agent.phys_ofports[self._physical_network] = 40 self.agent.dvr_agent.local_dvr_map = {} self.agent.dvr_agent.registered_dvr_macs = set() self.agent.dvr_agent.dvr_mac_address = 'aa:22:33:44:55:66' self._net_uuid = 'my-net-uuid' self._fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] self._compute_port = mock.Mock() self._compute_port.ofport = 20 self._compute_port.vif_id = "1234-5678-91" self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.3'}] @staticmethod def _expected_port_bound(port, lvid, is_dvr=True, network_type=n_const.TYPE_VXLAN): resp = [ mock.call.db_get_val('Port', port.port_name, 'other_config'), mock.call.set_db_attribute('Port', port.port_name, 'other_config', mock.ANY), ] if is_dvr: resp = [ mock.call.get_vifs_by_ids([]), mock.call.install_dvr_dst_mac_for_arp( network_type, dvr_mac=port.dvr_mac, gateway_mac=port.vif_mac, rtr_port=port.ofport, vlan_tag=lvid) ] + resp return resp @staticmethod def _expected_port_unbound(port, lvid, is_dvr=True, network_type=n_const.TYPE_VXLAN): resp = [] if is_dvr: resp = [ mock.call.delete_dvr_dst_mac_for_arp( network_type=network_type, dvr_mac=port.dvr_mac, gateway_mac=port.vif_mac, rtr_port=port.ofport, vlan_tag=lvid) ] return resp def _expected_install_dvr_process(self, lvid, port, ip_version, gateway_ip): if ip_version == n_const.IP_VERSION_4: ipvx_calls = [ mock.call.install_dvr_process_ipv4( vlan_tag=lvid, gateway_ip=gateway_ip), ] else: ipvx_calls = [ mock.call.install_dvr_process_ipv6( vlan_tag=lvid, gateway_mac=port.vif_mac), ] return ipvx_calls + [ mock.call.install_dvr_process( vlan_tag=lvid, dvr_mac_address=self.agent.dvr_agent.dvr_mac_address, vif_mac=port.vif_mac, ), ] def _test_port_bound_for_dvr_on_vlan_network( self, device_owner, ip_version=n_const.IP_VERSION_4): self._setup_for_dvr_test() if ip_version == n_const.IP_VERSION_4: gateway_ip = '1.1.1.10' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' self._port.vif_mac = 'aa:bb:cc:11:22:33' self._port.dvr_mac = self.agent.dvr_agent.dvr_mac_address gateway_mac = 'aa:bb:cc:66:66:66' self._compute_port.vif_mac = '77:88:99:00:11:22' physical_network = self._physical_network segmentation_id = self._segmentation_id network_type = n_const.TYPE_VLAN int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network] int_ofp = self.agent.dvr_agent.int_ofports[physical_network] lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_phys_br = [ mock.call.provision_local_vlan( port=phy_ofp, lvid=lvid, segmentation_id=segmentation_id, distributed=True, ), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=self._fixed_ips[0]['ip_address']) expected_on_int_br = [ mock.call.provision_local_vlan( port=int_ofp, lvid=lvid, segmentation_id=segmentation_id, ), ] + self._expected_port_bound(self._port, lvid, network_type=network_type) int_br.assert_has_calls(expected_on_int_br) tun_br.assert_not_called() phys_br.assert_has_calls(expected_on_phys_br) int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, network_type, physical_network, segmentation_id, self._compute_fixed_ips, device_owner, False) expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type=network_type, gateway_mac=gateway_mac, dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=segmentation_id, ), ] + self._expected_port_bound(self._compute_port, lvid, is_dvr=False, network_type=network_type) int_br.assert_has_calls(expected_on_int_br) tun_br.assert_not_called() phys_br.assert_not_called() def _test_port_bound_for_dvr_on_vxlan_network( self, device_owner, ip_version=n_const.IP_VERSION_4): self._setup_for_dvr_test() if ip_version == n_const.IP_VERSION_4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' network_type = n_const.TYPE_VXLAN self._port.vif_mac = gateway_mac = 'aa:bb:cc:11:22:33' self._port.dvr_mac = self.agent.dvr_agent.dvr_mac_address self._compute_port.vif_mac = '77:88:99:00:11:22' physical_network = self._physical_network segmentation_id = self._segmentation_id int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = self._expected_port_bound( self._port, lvid) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type=network_type, segmentation_id=segmentation_id, lvid=lvid, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip) int_br.assert_has_calls(expected_on_int_br) tun_br.assert_has_calls(expected_on_tun_br) phys_br.assert_not_called() int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, network_type, physical_network, segmentation_id, self._compute_fixed_ips, device_owner, False) expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type=network_type, gateway_mac=gateway_mac, dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._compute_port, lvid, False, network_type) int_br.assert_has_calls(expected_on_int_br) tun_br.assert_not_called() phys_br.assert_not_called() def test_port_bound_for_dvr_with_compute_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=DEVICE_OWNER_COMPUTE) self._test_port_bound_for_dvr_on_vlan_network( device_owner=DEVICE_OWNER_COMPUTE, ip_version=n_const.IP_VERSION_6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=DEVICE_OWNER_COMPUTE) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=DEVICE_OWNER_COMPUTE, ip_version=n_const.IP_VERSION_6) def test_port_bound_for_dvr_with_dhcp_ports(self): self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_port_bound_for_dvr_on_vlan_network( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=n_const.IP_VERSION_6) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=n_const.IP_VERSION_6) def test_port_bound_for_dvr_with_csnat_ports(self): self._setup_for_dvr_test() int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) int_br.assert_has_calls(expected_on_int_br) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True, ), ] tun_br.assert_has_calls(expected_on_tun_br) def test_port_bound_for_dvr_with_csnat_port_without_passing_fixed_ip(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr') as mock_getsub,\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) mock_getsub.assert_called_with( self.agent.context, mock.ANY, fixed_ips=None) def test_port_bound_for_dvr_with_csnat_ports_ofport_change(self): self._setup_for_dvr_test() self._port_bound_for_dvr_with_csnat_ports() # simulate a replug self._port.ofport = 12 int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( network_type='vxlan', dst_mac=self._port.vif_mac, vlan_tag=lvid, ), mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) int_br.assert_has_calls(expected_on_int_br) # a local vlan was already provisioned so there should be no new # calls to tunbr tun_br.assert_not_called() # make sure ofport was updated self.assertEqual(12, self.agent.dvr_agent.local_ports[self._port.vif_id].ofport) def _port_bound_for_dvr_with_csnat_ports(self): int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': '1.1.1.1', 'cidr': '1.1.1.0/24', 'ip_version': n_const.IP_VERSION_4, 'gateway_mac': 'aa:bb:cc:11:22:33'}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) return int_br, tun_br def test_port_bound_for_dvr_with_csnat_ports_without_subnet(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) # get_subnet_for_dvr RPC returns {} on error with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) int_br.install_dvr_to_src_mac.assert_not_called() def test_treat_devices_removed_for_dvr_interface(self): self._test_treat_devices_removed_for_dvr_interface() self._test_treat_devices_removed_for_dvr_interface( ip_version=n_const.IP_VERSION_6) self._test_treat_devices_removed_for_dvr_interface(network_type='vlan') self._test_treat_devices_removed_for_dvr_interface( ip_version=n_const.IP_VERSION_6, network_type='vlan') def _test_treat_devices_removed_for_dvr_interface( self, ip_version=n_const.IP_VERSION_4, network_type='vxlan'): self._setup_for_dvr_test() if ip_version == n_const.IP_VERSION_4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' self._port.dvr_mac = self.agent.dvr_agent.dvr_mac_address gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port): if network_type == 'vlan': self.agent.port_bound(self._port, self._net_uuid, network_type, self._physical_network, self._segmentation_id, self._compute_fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) else: self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.vlan_manager.get(self._net_uuid).vlan int_br.assert_has_calls( self._expected_port_bound(self._port, lvid)) expected_on_tun_br = [ mock.call.provision_local_vlan(network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip) tun_br.assert_has_calls(expected_on_tun_br) int_br.reset_mock() tun_br.reset_mock() phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [self._port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {self._physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {self._physical_network: phys_br}),\ mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._port.vif_id]) lvid = self.agent.vlan_manager.get(self._net_uuid).vlan if ip_version == n_const.IP_VERSION_4: expected = [ mock.call.delete_dvr_process_ipv4( vlan_tag=lvid, gateway_ip=gateway_ip), ] else: expected = [ mock.call.delete_dvr_process_ipv6( vlan_tag=lvid, gateway_mac=gateway_mac), ] expected.extend([ mock.call.delete_dvr_process( vlan_tag=lvid, vif_mac=self._port.vif_mac), ]) if network_type == 'vlan': expected_unbound_dvr = self._expected_port_unbound(self._port, self._segmentation_id, network_type=network_type) int_br.assert_has_calls(expected_unbound_dvr) tun_br.assert_not_called() phys_br.assert_has_calls(expected) self.assertEqual({}, self.agent.dvr_agent.local_ports) else: expected_unbound_dvr = self._expected_port_unbound(self._port, lvid, network_type=network_type) int_br.assert_has_calls(expected_unbound_dvr) tun_br.assert_has_calls(expected) phys_br.assert_not_called() def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=n_const.IP_VERSION_4): self._setup_for_dvr_test() if ip_version == n_const.IP_VERSION_4: gateway_ip = '1.1.1.1' cidr = '1.1.1.0/24' else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' self._port.dvr_mac = self.agent.dvr_agent.dvr_mac_address gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': gateway_ip, 'cidr': cidr, 'ip_version': ip_version, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) lvid = self.agent.vlan_manager.get(self._net_uuid).vlan int_br.assert_has_calls( self._expected_port_bound(self._port, lvid)) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', segmentation_id=None, lvid=lvid, distributed=True), ] + self._expected_install_dvr_process( port=self._port, lvid=lvid, ip_version=ip_version, gateway_ip=gateway_ip) tun_br.assert_has_calls(expected_on_tun_br) int_br.reset_mock() tun_br.reset_mock() self.agent.port_bound(self._compute_port, self._net_uuid, 'vxlan', None, None, self._compute_fixed_ips, device_owner, False) int_br.assert_has_calls( [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac='aa:bb:cc:11:22:33', dst_mac=self._compute_port.vif_mac, dst_port=self._compute_port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._compute_port, lvid, False)) tun_br.assert_not_called() int_br.reset_mock() tun_br.reset_mock() with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [ self._compute_port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._compute_port.vif_id]) int_br.assert_has_calls([ mock.call.delete_dvr_to_src_mac( network_type='vxlan', vlan_tag=lvid, dst_mac=self._compute_port.vif_mac, ), ]) tun_br.assert_not_called() def test_treat_devices_removed_for_dvr_with_compute_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=DEVICE_OWNER_COMPUTE) self._test_treat_devices_removed_for_dvr( device_owner=DEVICE_OWNER_COMPUTE, ip_version=n_const.IP_VERSION_6) def test_treat_devices_removed_for_dvr_with_dhcp_ports(self): self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_DHCP) self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=n_const.IP_VERSION_6) def test_treat_devices_removed_for_dvr_csnat_port(self): self._setup_for_dvr_test() gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', return_value={'gateway_ip': '1.1.1.1', 'cidr': '1.1.1.0/24', 'ip_version': n_const.IP_VERSION_4, 'gateway_mac': gateway_mac}),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_ports_on_host_by_subnet', return_value=[]),\ mock.patch.object(self.agent.dvr_agent.int_br, 'get_vif_port_by_id', return_value=self._port),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', gateway_mac=gateway_mac, dst_mac=self._port.vif_mac, dst_port=self._port.ofport, vlan_tag=lvid, ), ] + self._expected_port_bound(self._port, lvid, is_dvr=False) int_br.assert_has_calls(expected_on_int_br) expected_on_tun_br = [ mock.call.provision_local_vlan( network_type='vxlan', lvid=lvid, segmentation_id=None, distributed=True, ), ] tun_br.assert_has_calls(expected_on_tun_br) int_br.reset_mock() tun_br.reset_mock() with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_list', return_value={ 'devices_up': [], 'devices_down': [self._port.vif_id], 'failed_devices_up': [], 'failed_devices_down': []}),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=None): failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._port.vif_id]) expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( network_type='vxlan', dst_mac=self._port.vif_mac, vlan_tag=lvid, ), ] int_br.assert_has_calls(expected_on_int_br) tun_br.assert_not_called() def test_setup_dvr_flows_on_int_br(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_list', return_value=[{'host': 'cn1', 'mac_address': 'aa-bb-cc-dd-ee-ff'}, {'host': 'cn2', 'mac_address': '11-22-33-44-55-66'}]): self.agent.dvr_agent.setup_dvr_flows_on_integ_br() self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) physical_networks = list( self.agent.dvr_agent.bridge_mappings.keys()) ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]] expected_on_int_br = [ # setup_dvr_flows_on_integ_br mock.call.setup_canary_table(), mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1), mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, priority=1), mock.call.install_drop(table_id=constants.LOCAL_SWITCHING, priority=2, in_port=ioport), ] int_br.assert_has_calls(expected_on_int_br) tun_br.assert_not_called() def test_get_dvr_mac_address(self): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', return_value={'host': 'cn1', 'mac_address': 'aa-22-33-44-55-66'}): self.agent.dvr_agent.get_dvr_mac_address() self.assertEqual('aa:22:33:44:55:66', self.agent.dvr_agent.dvr_mac_address) self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) def test_get_dvr_mac_address_exception(self): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=oslo_messaging.RemoteError),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): with testtools.ExpectedException(SystemExit): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) def test_get_dvr_mac_address_retried(self): valid_entry = {'host': 'cn1', 'mac_address': 'aa-22-33-44-55-66'} raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout the first 2 times it calls get_dvr_mac_address() self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=(raise_timeout, raise_timeout, valid_entry)): self.agent.dvr_agent.get_dvr_mac_address() self.assertEqual('aa:22:33:44:55:66', self.agent.dvr_agent.dvr_mac_address) self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) self.assertEqual(self.agent.dvr_agent.plugin_rpc. get_dvr_mac_address_by_host.call_count, 3) def test_get_dvr_mac_address_retried_max(self): raise_timeout = oslo_messaging.MessagingTimeout() # Raise a timeout every time until we give up, currently 5 tries self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=raise_timeout),\ mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): with testtools.ExpectedException(SystemExit): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) self.assertEqual(self.agent.dvr_agent.plugin_rpc. get_dvr_mac_address_by_host.call_count, 5) def test_dvr_mac_address_update(self): self._setup_for_dvr_test() newhost = 'cn2' newmac = 'aa:bb:cc:dd:ee:ff' int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) physical_network = 'physeth1' with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.dvr_agent.\ dvr_mac_address_update( dvr_macs=[{'host': newhost, 'mac_address': newmac}]) expected_on_int_br = [ mock.call.add_dvr_mac_vlan( mac=newmac, port=self.agent.int_ofports[physical_network]), mock.call.add_dvr_mac_tun( mac=newmac, port=self.agent.patch_tun_ofport), ] expected_on_tun_br = [ mock.call.add_dvr_mac_tun( mac=newmac, port=self.agent.patch_int_ofport), ] expected_on_phys_br = [ mock.call.add_dvr_mac_vlan( mac=newmac, port=self.agent.phys_ofports[physical_network]), ] int_br.assert_has_calls(expected_on_int_br) tun_br.assert_has_calls(expected_on_tun_br) phys_br.assert_has_calls(expected_on_phys_br) int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() with mock.patch.object(self.agent, 'int_br', new=int_br),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.phys_brs, {physical_network: phys_br}),\ mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ mock.patch.dict(self.agent.dvr_agent.phys_brs, {physical_network: phys_br}): self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[]) expected_on_int_br = [ mock.call.remove_dvr_mac_vlan( mac=newmac), mock.call.remove_dvr_mac_tun( mac=newmac, port=self.agent.patch_tun_ofport), ] expected_on_tun_br = [ mock.call.remove_dvr_mac_tun( mac=newmac), ] expected_on_phys_br = [ mock.call.remove_dvr_mac_vlan( mac=newmac), ] int_br.assert_has_calls(expected_on_int_br) tun_br.assert_has_calls(expected_on_tun_br) phys_br.assert_has_calls(expected_on_phys_br) def test_ovs_restart(self): self._setup_for_dvr_test() reset_methods = ( 'reset_ovs_parameters', 'reset_dvr_parameters', 'setup_dvr_flows_on_integ_br', 'setup_dvr_flows_on_tun_br', 'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs') reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start() for method in reset_methods] tun_br = mock.create_autospec(self.agent.tun_br) with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_RESTARTED),\ mock.patch.object(self.agent, '_agent_has_updates', side_effect=TypeError('loop exit')),\ mock.patch.object(self.agent, 'tun_br', new=tun_br),\ mock.patch.object(self.agent, 'setup_physical_bridges'),\ mock.patch.object(self.agent, 'setup_integration_br'),\ mock.patch.object(self.agent, 'setup_tunnel_br'),\ mock.patch.object(self.agent, 'state_rpc'), \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor'): try: self.agent.rpc_loop(polling_manager=mock.Mock()) except TypeError: pass self.assertTrue(all([x.called for x in reset_mocks])) def test_rpc_loop_survives_error_in_check_canary_table(self): with mock.patch.object(self.agent.int_br, 'check_canary_table', side_effect=TypeError('borked')),\ mock.patch.object(self.agent, '_check_and_handle_signal', side_effect=[True, False]): self.agent.rpc_loop(polling_manager=mock.Mock()) def _test_scan_ports_failure(self, scan_method_name): with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_RESTARTED),\ mock.patch.object(self.agent, scan_method_name, side_effect=TypeError('broken')),\ mock.patch.object(self.agent, '_agent_has_updates', return_value=True),\ mock.patch.object(self.agent, '_check_and_handle_signal', side_effect=[True, False]),\ mock.patch.object(self.agent, 'setup_physical_bridges'),\ mock.patch.object(self.agent, 'setup_integration_br'),\ mock.patch.object(self.agent, 'state_rpc'), \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor'): # block RPC calls and bridge calls self.agent.rpc_loop(polling_manager=mock.Mock()) def test_scan_ports_failure(self): self._test_scan_ports_failure('scan_ports') def test_scan_ancillary_ports_failure(self): with mock.patch.object(self.agent, 'scan_ports'): with mock.patch.object(self.agent, 'update_stale_ofport_rules'): self.agent.ancillary_brs = mock.Mock() self._test_scan_ports_failure('scan_ancillary_ports') class TestOvsDvrNeutronAgentOSKen(TestOvsDvrNeutronAgent, ovs_test_base.OVSOSKenTestBase): pass class TestValidateTunnelLocalIP(base.BaseTestCase): def test_validate_local_ip_with_valid_ip(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() ovs_agent.validate_local_ip(FAKE_IP1) mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) def test_validate_local_ip_with_valid_ipv6(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() ovs_agent.validate_local_ip(FAKE_IP6) mock_get_device_by_ip.assert_called_once_with(FAKE_IP6) def test_validate_local_ip_with_none_ip(self): with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(None) def test_validate_local_ip_with_invalid_ip(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() mock_get_device_by_ip.return_value = None with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(FAKE_IP1) mock_get_device_by_ip.assert_called_once_with(FAKE_IP1) def test_validate_local_ip_with_invalid_ipv6(self): mock_get_device_by_ip = mock.patch.object( ip_lib.IPWrapper, 'get_device_by_ip').start() mock_get_device_by_ip.return_value = None with testtools.ExpectedException(SystemExit): ovs_agent.validate_local_ip(FAKE_IP6) mock_get_device_by_ip.assert_called_once_with(FAKE_IP6) class TestOvsAgentTunnelName(base.BaseTestCase): def test_get_tunnel_hash_invalid_address(self): hashlen = n_const.DEVICE_NAME_MAX_LEN self.assertIsNone( ovs_agent.OVSNeutronAgent.get_tunnel_hash('a.b.c.d', hashlen)) def test_get_tunnel_name_vxlan(self): self.assertEqual( 'vxlan-7f000002', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'vxlan', '127.0.0.1', '127.0.0.2')) def test_get_tunnel_name_gre(self): self.assertEqual( 'gre-7f000002', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'gre', '127.0.0.1', '127.0.0.2')) def test_get_tunnel_name_vxlan_ipv6(self): self.assertEqual( 'vxlan-pehtjzksi', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'vxlan', '2001:db8::1', '2001:db8::2')) def test_get_tunnel_name_gre_ipv6(self): self.assertEqual( 'gre-pehtjzksiqr', ovs_agent.OVSNeutronAgent.get_tunnel_name( 'gre', '2001:db8::1', '2001:db8::2')) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.p0000644000175000017500000007526000000000000033704 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time import mock from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log from neutron.agent.common import ip_lib from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import test_vlanmanager # Useful global dummy variables. NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7' LS_ID = 420 LV_ID = 42 LV_IDS = [42, 43] VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8' VIF_MAC = '3c:09:24:1e:78:23' OFPORT_NUM = 1 VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, VIF_ID, VIF_MAC, 'switch') VIF_PORTS = {VIF_ID: VIF_PORT} FIXED_IPS = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] VM_DEVICE_OWNER = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' TUN_OFPORTS = {n_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}} BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00" UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00" class DummyPort(object): def __init__(self, interface_id): self.interface_id = interface_id class DummyVlanBinding(object): def __init__(self, network_id, vlan_id): self.network_id = network_id self.vlan_id = vlan_id class TunnelTest(object): USE_VETH_INTERCONNECTION = False VETH_MTU = None def setUp(self): super(TunnelTest, self).setUp() self.useFixture(test_vlanmanager.LocalVlanManagerFixture()) conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() mock.patch( 'neutron.api.rpc.handlers.resources_rpc.ResourcesPullRpcApi' ).start() self.addCleanup(conn_patcher.stop) cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') cfg.CONF.set_override('explicitly_egress_direct', True, 'AGENT') self.INT_BRIDGE = 'integration_bridge' self.TUN_BRIDGE = 'tunnel_bridge' self.MAP_TUN_BRIDGE = 'tun_br_map' self.AUX_BRIDGE = 'ancillary_bridge' self.NET_MAPPING = ['net1:%s' % self.MAP_TUN_BRIDGE] self.INT_OFPORT = 11111 self.TUN_OFPORT = 22222 self.MAP_TUN_INT_OFPORT = 33333 self.MAP_TUN_PHY_OFPORT = 44444 self.LVM_DATA = ( LV_ID, 'gre', None, LS_ID, VIF_PORTS) self.LVM_FLAT_DATA = ( LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) self.LVM_VLAN_DATA = ( LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) self.inta = mock.Mock() self.intb = mock.Mock() mock.patch.object(ovs_lib.BaseOVS, 'config', new_callable=mock.PropertyMock, return_value={}).start() mock.patch('neutron.agent.ovsdb.impl_idl._connection').start() self.ovs_bridges = { self.INT_BRIDGE: mock.create_autospec( self.br_int_cls('br-int')), self.TUN_BRIDGE: mock.create_autospec( self.br_tun_cls('br-tun')), self.MAP_TUN_BRIDGE: mock.create_autospec( self.br_phys_cls('br-phys')), self.AUX_BRIDGE: mock.create_autospec( ovs_lib.OVSBridge('br-aux')), } self.ovs_int_ofports = { 'patch-tun': self.TUN_OFPORT, 'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT } mock.patch('neutron.agent.rpc.PluginReportStateAPI.' 'has_alive_neutron_server').start() def lookup_br(br_name, *args, **kwargs): return self.ovs_bridges[br_name] self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS, autospec=True).start() self.mock_int_bridge_cls.side_effect = lookup_br self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS, autospec=True).start() self.mock_phys_bridge_cls.side_effect = lookup_br self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS, autospec=True).start() self.mock_tun_bridge_cls.side_effect = lookup_br self.mock_aux_bridge_cls = mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge', autospec=True).start() self.mock_aux_bridge_cls.side_effect = lookup_br self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT self.mock_int_bridge.add_patch_port.side_effect = ( lambda tap, peer: self.ovs_int_ofports[tap]) self.mock_int_bridge.port_exists.return_value = False self.mock_int_bridge.get_vif_ports.return_value = [] self.mock_int_bridge.get_ports_attributes.return_value = [] self.mock_int_bridge.db_get_val.return_value = {} self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE] self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE self.mock_map_tun_bridge.add_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_map_tun_bridge.add_patch_port.return_value = ( self.MAP_TUN_PHY_OFPORT) self.mock_map_tun_bridge.port_exists.return_value = False self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE] self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start() self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start() add_veth = self.ipwrapper.return_value.add_veth add_veth.return_value = [self.inta, self.intb] self.get_bridges = mock.patch.object(ovs_lib.BaseOVS, 'get_bridges').start() self.get_bridges.return_value = [self.INT_BRIDGE, self.TUN_BRIDGE, self.MAP_TUN_BRIDGE, self.AUX_BRIDGE] self.get_bridge_external_bridge_id = mock.patch.object( ovs_lib.BaseOVS, 'get_bridge_external_bridge_id').start() self.get_bridge_external_bridge_id.side_effect = ( lambda bridge, log_errors: bridge if bridge in self.ovs_bridges else None) self.execute = mock.patch('neutron.agent.common.utils.execute').start() self.mock_check_bridge_datapath_id = mock.patch.object( self.mod_agent.OVSNeutronAgent, '_check_bridge_datapath_id').start() self._define_expected_calls() def _define_expected_calls(self, arp_responder=False, igmp_snooping=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE, datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.set_igmp_snooping_state(igmp_snooping), mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.port_exists('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'type', log_errors=False), mock.call.port_exists('int-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE, constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), mock.call.set_db_attribute( 'Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'options', {'peer': 'phy-%s' % self.MAP_TUN_BRIDGE}), ] self.mock_map_tun_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), mock.call.set_db_attribute( 'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE, 'options', {'peer': 'int-%s' % self.MAP_TUN_BRIDGE}), ] self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE] self.mock_aux_bridge_expected = [ ] self.mock_tun_bridge_expected = [ mock.call.create(secure_mode=True), mock.call.setup_controllers(mock.ANY), mock.call.port_exists('patch-int'), mock.ANY, mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ mock.call.port_exists('patch-tun'), mock.call.add_patch_port('patch-tun', 'patch-int'), ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] self.mock_tun_bridge_expected += [ mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.ipdevice_expected = [] self.ipwrapper_expected = [mock.call()] self.get_bridges_expected = [mock.call(), mock.call()] self.inta_expected = [] self.intb_expected = [] self.execute_expected = [] def _build_agent(self, **config_opts_agent): """Configure and initialize OVS agent. :param config_opts_agent: a dict with options to override the default values for the AGENT group. """ bridge_classes = { 'br_int': self.mock_int_bridge_cls, 'br_phys': self.mock_phys_bridge_cls, 'br_tun': self.mock_tun_bridge_cls, } cfg.CONF.set_override('integration_bridge', self.INT_BRIDGE, 'OVS') cfg.CONF.set_override('tunnel_bridge', self.TUN_BRIDGE, 'OVS') cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS') cfg.CONF.set_override('bridge_mappings', self.NET_MAPPING, 'OVS') cfg.CONF.set_override('polling_interval', 2, 'AGENT') cfg.CONF.set_override('tunnel_types', ['gre'], 'AGENT') cfg.CONF.set_override('veth_mtu', self.VETH_MTU, 'AGENT') cfg.CONF.set_override('minimize_polling', False, 'AGENT') cfg.CONF.set_override('use_veth_interconnection', self.USE_VETH_INTERCONNECTION, 'OVS') for k, v in config_opts_agent.items(): cfg.CONF.set_override(k, v, 'AGENT') ext_mgr = mock.Mock() agent = self.mod_agent.OVSNeutronAgent( bridge_classes, ext_mgr, cfg.CONF) mock.patch.object(agent.ovs.ovsdb, 'idl_monitor').start() return agent def _verify_mock_call(self, mock_obj, expected): mock_obj.assert_has_calls(expected) self.assertEqual(expected, mock_obj.mock_calls) def _verify_mock_calls(self): self._verify_mock_call(self.mock_int_bridge_cls, self.mock_int_bridge_cls_expected) self._verify_mock_call(self.mock_tun_bridge_cls, self.mock_tun_bridge_cls_expected) self._verify_mock_call(self.mock_phys_bridge_cls, self.mock_phys_bridge_cls_expected) self._verify_mock_call(self.mock_int_bridge, self.mock_int_bridge_expected) self._verify_mock_call(self.mock_map_tun_bridge, self.mock_map_tun_bridge_expected) self._verify_mock_call(self.mock_tun_bridge, self.mock_tun_bridge_expected) self._verify_mock_call(self.mock_aux_bridge, self.mock_aux_bridge_expected) self._verify_mock_call(self.ipdevice, self.ipdevice_expected) self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected) self._verify_mock_call(self.get_bridges, self.get_bridges_expected) self._verify_mock_call(self.inta, self.inta_expected) self._verify_mock_call(self.intb, self.intb_expected) self._verify_mock_call(self.execute, self.execute_expected) def test_construct(self): agent = self._build_agent() self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host) self._verify_mock_calls() # TODO(ethuleau): Initially, local ARP responder is be dependent to the # ML2 l2 population mechanism driver. # The next two tests use l2_pop flag to test ARP responder def test_construct_with_arp_responder(self): self._build_agent(l2_population=True, arp_responder=True) self._define_expected_calls(arp_responder=True) self._verify_mock_calls() def test_construct_with_igmp_snooping(self): cfg.CONF.set_override('igmp_snooping_enable', True, 'OVS') self._build_agent() self._define_expected_calls(igmp_snooping=True) self._verify_mock_calls() def test_construct_without_arp_responder(self): self._build_agent(l2_population=False, arp_responder=True) self._verify_mock_calls() def test_construct_vxlan(self): self._build_agent(tunnel_types=['vxlan']) self._verify_mock_calls() def test_provision_local_vlan(self): ofports = list(TUN_OFPORTS[n_const.TYPE_GRE].values()) self.mock_tun_bridge_expected += [ mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports), mock.call.provision_local_vlan( network_type=n_const.TYPE_GRE, lvid=LV_ID, segmentation_id=LS_ID), ] a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.tun_br_ofports = TUN_OFPORTS a.provision_local_vlan(NET_UUID, n_const.TYPE_GRE, None, LS_ID) self._verify_mock_calls() def test_provision_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( mock.call.provision_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=LV_ID, segmentation_id=None, distributed=False)) self.mock_int_bridge_expected.append( mock.call.provision_local_vlan( port=self.INT_OFPORT, lvid=LV_ID, segmentation_id=None)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.provision_local_vlan(NET_UUID, n_const.TYPE_FLAT, 'net1', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_flat_fail(self): a = self._build_agent() a.provision_local_vlan(NET_UUID, n_const.TYPE_FLAT, 'net2', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( mock.call.provision_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=LV_ID, segmentation_id=LS_ID, distributed=False)) self.mock_int_bridge_expected.append( mock.call.provision_local_vlan( port=self.INT_OFPORT, lvid=LV_ID, segmentation_id=LS_ID)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.provision_local_vlan(NET_UUID, n_const.TYPE_VLAN, 'net1', LS_ID) self._verify_mock_calls() def test_provision_local_vlan_vlan_fail(self): a = self._build_agent() a.provision_local_vlan(NET_UUID, n_const.TYPE_VLAN, 'net2', LS_ID) self._verify_mock_calls() def test_reclaim_local_vlan(self): self.mock_tun_bridge_expected += [ mock.call.reclaim_local_vlan(network_type='gre', segmentation_id=LS_ID), mock.call.delete_flood_to_tun(LV_ID), mock.call.delete_unicast_to_tun(LV_ID, None), mock.call.delete_arp_responder(LV_ID, None), ] a = self._build_agent() a.available_local_vlans = set() a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=self.LVM_FLAT_DATA[0])) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, segmentation_id=None)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() a.vlan_manager.add(NET_UUID, *self.LVM_FLAT_DATA) a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM_FLAT_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, lvid=self.LVM_VLAN_DATA[0])) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, segmentation_id=LS_ID)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() a.vlan_manager.add(NET_UUID, *self.LVM_VLAN_DATA) a.reclaim_local_vlan(NET_UUID) self.assertIn(self.LVM_VLAN_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_port_bound(self): vlan_mapping = {'segmentation_id': str(LS_ID), 'physical_network': 'None', 'net_uuid': NET_UUID, 'network_type': 'gre'} self.mock_int_bridge_expected += [ mock.call.db_get_val('Port', 'port', 'other_config'), mock.call.set_db_attribute('Port', VIF_PORT.port_name, 'other_config', vlan_mapping)] a = self._build_agent() a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.local_dvr_map = {} self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {} a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, FIXED_IPS, VM_DEVICE_OWNER, False) self._verify_mock_calls() def test_port_unbound(self): with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'reclaim_local_vlan') as reclaim_local_vlan: a = self._build_agent() a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.port_unbound(VIF_ID, NET_UUID) reclaim_local_vlan.assert_called_once_with(NET_UUID) self._verify_mock_calls() def test_port_dead(self): self.mock_int_bridge_expected += [ mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag', log_errors=True), mock.call.set_db_attribute( 'Port', VIF_PORT.port_name, 'tag', constants.DEAD_VLAN_TAG, log_errors=True), mock.call.drop_port(in_port=VIF_PORT.ofport), ] a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.vlan_manager.add(NET_UUID, *self.LVM_DATA) self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock() a.port_dead(VIF_PORT) self._verify_mock_calls() def test_tunnel_update(self): tunnel_port = '9999' self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port self.mock_tun_bridge_expected += [ mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1', 'gre', 4789, True, False, None), mock.call.setup_tunnel_port('gre', tunnel_port), ] a = self._build_agent() a.tunnel_update( mock.sentinel.ctx, tunnel_ip='10.0.10.1', tunnel_type=n_const.TYPE_GRE) self._verify_mock_calls() def test_tunnel_update_self(self): a = self._build_agent() a.tunnel_update( mock.sentinel.ctx, tunnel_ip='10.0.0.1') self._verify_mock_calls() def test_daemon_loop(self): reply_ge_1 = {'added': [{'name': 'tap0', 'ofport': 3, 'external_ids': { 'attached-mac': 'test_mac'}}], 'removed': []} reply_ge_2 = {'added': [], 'removed': [{'name': 'tap0', 'ofport': 3, 'external_ids': { 'attached-mac': 'test_mac'}}]} reply_pe_1 = {'current': set(['tap0']), 'added': set(['tap0']), 'removed': set([])} reply_pe_2 = {'current': set([]), 'added': set([]), 'removed': set(['tap0'])} reply_ancillary = {'current': set([]), 'added': set([]), 'removed': set([])} self.mock_int_bridge_expected += [ mock.call.check_canary_table(), mock.call.deferred(full_ordered=True, use_bundle=True), mock.call.deferred().__enter__(), mock.call.deferred().__exit__(None, None, None), mock.call.cleanup_flows(), mock.call.check_canary_table(), mock.call.deferred(full_ordered=True, use_bundle=True), mock.call.deferred().__enter__(), mock.call.deferred().__exit__(None, None, None), ] self.mock_map_tun_bridge_expected += [ mock.call.cleanup_flows(), ] self.mock_tun_bridge_expected += [ mock.call.cleanup_flows() ] # No cleanup is expected on ancillary bridge self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \ constants.OVS_NORMAL with mock.patch.object(log.KeywordArgumentAdapter, 'exception') as log_exception,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_ports_events') as process_p_events,\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'process_network_ports') as process_network_ports,\ mock.patch.object(self.mod_agent.OVSNeutronAgent, 'tunnel_sync'),\ mock.patch.object(time, 'sleep'),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') as update_stale: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') update_stale.return_value = [] devices_not_ready = set() process_p_events.side_effect = [ (reply_pe_1, reply_ancillary, devices_not_ready), (reply_pe_2, reply_ancillary, devices_not_ready)] interface_polling = mock.Mock() interface_polling.get_events.side_effect = [reply_ge_1, reply_ge_2] failed_devices = {'removed': set([]), 'added': set([])} failed_ancillary_devices = {'removed': set([]), 'added': set([])} process_network_ports.side_effect = [ failed_devices, Exception('Fake exception to get out of the loop')] n_agent = self._build_agent() # Hack to test loop # We start method and expect it will raise after 2nd loop # If something goes wrong, assert_has_calls below will catch it try: n_agent.rpc_loop(interface_polling) except Exception: pass # FIXME(salv-orlando): There should not be assertions on log # messages log_exception.assert_called_once_with( "Error while processing VIF ports") process_p_events.assert_has_calls([ mock.call(reply_ge_1, set(), set(), devices_not_ready, failed_devices, failed_ancillary_devices, set()), mock.call(reply_ge_2, set(['tap0']), set(), devices_not_ready, failed_devices, failed_ancillary_devices, set()) ]) process_network_ports.assert_has_calls([ mock.call({'current': set(['tap0']), 'removed': set([]), 'added': set(['tap0'])}, False), ]) self.assertTrue(update_stale.called) self._verify_mock_calls() class TunnelTestOSKen(TunnelTest, ovs_test_base.OVSOSKenTestBase): pass class TunnelTestUseVethInterco(TunnelTest): USE_VETH_INTERCONNECTION = True def _define_expected_calls(self, arp_responder=False, igmp_snooping=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE, datapath_type=mock.ANY), ] self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE, datapath_type=mock.ANY), ] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.set_igmp_snooping_state(igmp_snooping), mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.add_port('phy-%s' % self.MAP_TUN_BRIDGE), ] self.mock_int_bridge_expected += [ mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'type', log_errors=False), mock.call.add_port('int-%s' % self.MAP_TUN_BRIDGE) ] self.mock_int_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), ] self.mock_map_tun_bridge_expected += [ mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), ] self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE] self.mock_aux_bridge_expected = [ ] self.mock_tun_bridge_expected = [ mock.call.create(secure_mode=True), mock.call.setup_controllers(mock.ANY), mock.call.port_exists('patch-int'), mock.ANY, mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ mock.call.port_exists('patch-tun'), mock.call.add_patch_port('patch-tun', 'patch-int') ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] self.mock_tun_bridge_expected += [ mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.ipdevice_expected = [ mock.call('int-%s' % self.MAP_TUN_BRIDGE), mock.call().exists(), mock.ANY, mock.call().link.delete() ] self.ipwrapper_expected = [ mock.call(), mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE, 'phy-%s' % self.MAP_TUN_BRIDGE) ] self.get_bridges_expected = [mock.call(), mock.call()] self.inta_expected = [mock.call.link.set_up()] self.intb_expected = [mock.call.link.set_up()] self.execute_expected = [mock.call(['udevadm', 'settle', '--timeout=10'])] class TunnelTestUseVethIntercoOSKen(TunnelTestUseVethInterco, ovs_test_base.OVSOSKenTestBase): pass class TunnelTestWithMTU(TunnelTestUseVethInterco): VETH_MTU = 1500 def _define_expected_calls(self, arp_responder=False, igmp_snooping=False): super(TunnelTestWithMTU, self)._define_expected_calls( arp_responder, igmp_snooping) self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) class TunnelTestWithMTUOSKen(TunnelTestWithMTU, ovs_test_base.OVSOSKenTestBase): pass ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.0000644000175000017500000001200600000000000033610 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import testtools from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager from neutron.tests import base class LocalVlanManagerFixture(fixtures.Fixture): def _setUp(self): super(LocalVlanManagerFixture, self)._setUp() self.vlan_manager = vlanmanager.LocalVlanManager() self.addCleanup(self.restore_manager) # Remove _instance attribute from VlanManager in order to not obtain a # singleton del vlanmanager.LocalVlanManager._instance self.manager = vlanmanager.LocalVlanManager() def restore_manager(self): vlanmanager.LocalVlanManager._instance = self.vlan_manager class TestLocalVLANMapping(base.BaseTestCase): def test___eq___equal(self): mapping1 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) mapping2 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) self.assertEqual(mapping1, mapping2) def test___eq___different(self): mapping1 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) mapping2 = vlanmanager.LocalVLANMapping(1, 2, 4, 4, 5) self.assertNotEqual(mapping1, mapping2) def test___eq___different_type(self): mapping = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) self.assertNotEqual(mapping, "foo") class TestLocalVlanManager(base.BaseTestCase): def setUp(self): super(TestLocalVlanManager, self).setUp() self.vlan_manager = self.useFixture(LocalVlanManagerFixture()).manager def test_is_singleton(self): self.vlan_manager.add(1, None, None, None, None) new_vlan_manager = vlanmanager.LocalVlanManager() self.assertIs(new_vlan_manager, self.vlan_manager) self.assertItemsEqual(new_vlan_manager.mapping, self.vlan_manager.mapping) def test_in_operator_on_key(self): self.vlan_manager.add(1, None, None, None, None) self.assertIn(1, self.vlan_manager) self.assertNotIn(2, self.vlan_manager) def test_iterator_returns_vlan_mappings(self): created_vlans = [] for val in range(3): self.vlan_manager.add(val, val, val, val, val) created_vlans.append(self.vlan_manager.get(val)) self.assertItemsEqual(created_vlans, list(self.vlan_manager)) def test_get_net_uuid_existing(self): port_id = 'port-id' vlan_data = (2, 3, 4, 5, {port_id: 'port'}) net_id = 1 self.vlan_manager.add(net_id, *vlan_data) obtained_net_id = self.vlan_manager.get_net_uuid(port_id) self.assertEqual(net_id, obtained_net_id) def test_get_net_uuid_non_existing_raises_exception(self): vlan_data = (1, 2, 3, 4, 5, {'port_id': 'port'}) self.vlan_manager.add(*vlan_data) with testtools.ExpectedException(vlanmanager.VifIdNotFound): self.vlan_manager.get_net_uuid('non-existing-port') def test_add_and_get(self): vlan_data = (2, 3, 4, 5, 6) expected_vlan_mapping = vlanmanager.LocalVLANMapping(*vlan_data) self.vlan_manager.add(1, *vlan_data) vlan_mapping = self.vlan_manager.get(1) self.assertEqual(expected_vlan_mapping, vlan_mapping) def test_add_existing_raises_exception(self): vlan_data = (2, 3, 4, 5, 6) self.vlan_manager.add(1, *vlan_data) with testtools.ExpectedException(vlanmanager.MappingAlreadyExists): self.vlan_manager.add(1, *vlan_data) def test_get_non_existing_raises_keyerror(self): with testtools.ExpectedException(vlanmanager.MappingNotFound): self.vlan_manager.get(1) def test_pop(self): vlan_data = (2, 3, 4, 5, 6) expected_vlan_mapping = vlanmanager.LocalVLANMapping(*vlan_data) self.vlan_manager.add(1, *vlan_data) vlan_mapping = self.vlan_manager.pop(1) self.assertEqual(expected_vlan_mapping, vlan_mapping) self.assertFalse(self.vlan_manager.mapping) def test_pop_non_existing_raises_exception(self): with testtools.ExpectedException(vlanmanager.MappingNotFound): self.vlan_manager.pop(1) def test_update_segmentation_id(self): self.vlan_manager.add('net_id', 'vlan_id', 'vlan', 'phys_net', 1001, None) self.assertEqual(1001, self.vlan_manager.get('net_id').segmentation_id) self.vlan_manager.update_segmentation_id('net_id', 1002) self.assertEqual(1002, self.vlan_manager.get('net_id').segmentation_id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/0000755000175000017500000000000000000000000031447 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py0000644000175000017500000000000000000000000033546 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_o0000644000175000017500000004067300000000000033675 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from neutron_lib.plugins.ml2 import api from oslo_config import cfg from neutron.conf.plugins.ml2.drivers.openvswitch import mech_ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as a_const) from neutron.plugins.ml2.drivers.openvswitch.mech_driver import ( mech_openvswitch) from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base class OpenvswitchMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = portbindings.VIF_TYPE_OVS VIF_DETAILS = {'bridge_name': 'br-int', portbindings.OVS_DATAPATH_TYPE: 'system', portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: True, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2} AGENT_TYPE = constants.AGENT_TYPE_OVS GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] GOOD_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, 'integration_bridge': 'br-int', 'tunnel_types': GOOD_TUNNEL_TYPES} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} BAD_TUNNEL_TYPES = ['bad_tunnel_type'] BAD_CONFIGS = {'bridge_mappings': BAD_MAPPINGS, 'integration_bridge': 'br-int', 'tunnel_types': BAD_TUNNEL_TYPES} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(OpenvswitchMechanismBaseTestCase, self).setUp() cfg.CONF.set_override('firewall_driver', 'iptables_hybrid', 'SECURITYGROUP') self.driver = mech_openvswitch.OpenvswitchMechanismDriver() self.driver.initialize() def test__set_bridge_name_notify(self): def fake_callback(resource, event, trigger, payload=None): trigger('fake-br-name') def noop_callback(resource, event, trigger, payload=None): pass # hardcode callback to override bridge name registry.subscribe(fake_callback, a_const.OVS_BRIDGE_NAME, events.BEFORE_READ) fake_vif_details = {} fake_agent = {'configurations': {'integration_bridge': 'fake-br'}} old_fake_agent = {'configurations': {}} self.driver._set_bridge_name('foo', fake_vif_details, fake_agent) # assert that callback value is used self.assertEqual( 'fake-br-name', fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, '')) # replace callback with noop registry.unsubscribe(fake_callback, a_const.OVS_BRIDGE_NAME, events.BEFORE_READ) registry.subscribe(noop_callback, a_const.OVS_BRIDGE_NAME, events.BEFORE_READ) fake_vif_details = {} self.driver._set_bridge_name('foo', fake_vif_details, fake_agent) # assert that agent config value is used self.assertEqual( 'fake-br', fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, '')) fake_vif_details = {} self.driver._set_bridge_name('foo', fake_vif_details, old_fake_agent) # assert that if agent does not supply integration_bridge bridge_name # is not set in vif:binding-details self.assertIsNone( fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME)) class OpenvswitchMechanismSGDisabledBaseTestCase( OpenvswitchMechanismBaseTestCase): VIF_DETAILS = {'bridge_name': 'br-int', portbindings.OVS_DATAPATH_TYPE: 'system', portbindings.CAP_PORT_FILTER: False, portbindings.OVS_HYBRID_PLUG: False, portbindings.VIF_DETAILS_CONNECTIVITY: portbindings.CONNECTIVITY_L2} def setUp(self): cfg.CONF.set_override('enable_security_group', False, group='SECURITYGROUP') super(OpenvswitchMechanismSGDisabledBaseTestCase, self).setUp() class OpenvswitchMechanismHybridPlugTestCase(OpenvswitchMechanismBaseTestCase): def _make_port_ctx(self, agents): segments = [{api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}] return base.FakePortContext(self.AGENT_TYPE, agents, segments, vnic_type=self.VNIC_TYPE) def test_backward_compat_with_unreporting_agent(self): hybrid = portbindings.OVS_HYBRID_PLUG # agent didn't report so it should be hybrid based on server config context = self._make_port_ctx(self.AGENTS) self.driver.bind_port(context) self.assertTrue(context._bound_vif_details[hybrid]) self.driver.vif_details[hybrid] = False context = self._make_port_ctx(self.AGENTS) self.driver.bind_port(context) self.assertFalse(context._bound_vif_details[hybrid]) def test_hybrid_plug_true_if_agent_requests(self): hybrid = portbindings.OVS_HYBRID_PLUG # set server side default to false and ensure that hybrid becomes # true if requested by the agent self.driver.vif_details[hybrid] = False agents = [{'alive': True, 'configurations': {hybrid: True}, 'host': 'host'}] context = self._make_port_ctx(agents) self.driver.bind_port(context) self.assertTrue(context._bound_vif_details[hybrid]) def test_hybrid_plug_false_if_agent_requests(self): hybrid = portbindings.OVS_HYBRID_PLUG # set server side default to true and ensure that hybrid becomes # false if requested by the agent self.driver.vif_details[hybrid] = True agents = [{'alive': True, 'configurations': {hybrid: False}, 'host': 'host'}] context = self._make_port_ctx(agents) self.driver.bind_port(context) self.assertFalse(context._bound_vif_details[hybrid]) class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismGenericTestCase): def test_driver_responsible_for_ports_allocation(self): agents = [ {'agent_type': 'Open vSwitch agent', 'configurations': {'resource_provider_bandwidths': {'eth0': {}}}, 'id': '1', 'host': 'host'} ] segments = [] # uuid -v5 87ee7d5c-73bb-11e8-9008-c4d987b2a692 host:eth0 profile = {'allocation': '13cc0ed9-e802-5eaa-b4c7-3441855e31f2'} port_ctx = base.FakePortContext( self.AGENT_TYPE, agents, segments, vnic_type=portbindings.VNIC_NORMAL, profile=profile) with mock.patch.object(self.driver, '_possible_agents_for_port', return_value=agents): self.assertTrue( self.driver.responsible_for_ports_allocation(port_ctx)) class OpenvswitchMechanismLocalTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismLocalTestCase): pass class OpenvswitchMechanismFlatTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismFlatTestCase): pass class OpenvswitchMechanismVlanTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismVlanTestCase): pass class OpenvswitchMechanismGreTestCase(OpenvswitchMechanismBaseTestCase, base.AgentMechanismGreTestCase): pass class OpenvswitchMechanismSGDisabledLocalTestCase( OpenvswitchMechanismSGDisabledBaseTestCase, base.AgentMechanismLocalTestCase): pass class OpenvswitchMechanismFirewallUndefinedTestCase( OpenvswitchMechanismBaseTestCase, base.AgentMechanismLocalTestCase): def setUp(self): # this simple test case just ensures backward compatibility where # the server has no firewall driver configured, which should result # in hybrid plugging. super(OpenvswitchMechanismFirewallUndefinedTestCase, self).setUp() cfg.CONF.set_override('firewall_driver', '', 'SECURITYGROUP') self.driver = mech_openvswitch.OpenvswitchMechanismDriver() self.driver.initialize() class OpenvswitchMechanismDPDKTestCase(OpenvswitchMechanismBaseTestCase): GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} GOOD_TUNNEL_TYPES = ['gre', 'vxlan'] VHOST_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, 'integration_bridge': 'br-int', 'tunnel_types': GOOD_TUNNEL_TYPES, 'datapath_type': a_const.OVS_DATAPATH_NETDEV, 'ovs_capabilities': { 'iface_types': [a_const.OVS_DPDK_VHOST_USER]}} VHOST_SERVER_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, 'integration_bridge': 'br-int', 'tunnel_types': GOOD_TUNNEL_TYPES, 'datapath_type': a_const.OVS_DATAPATH_NETDEV, 'ovs_capabilities': { 'iface_types': [a_const.OVS_DPDK_VHOST_USER_CLIENT]}} SYSTEM_CONFIGS = {'bridge_mappings': GOOD_MAPPINGS, 'integration_bridge': 'br-int', 'tunnel_types': GOOD_TUNNEL_TYPES, 'datapath_type': a_const.OVS_DATAPATH_SYSTEM, 'ovs_capabilities': {'iface_types': []}} AGENT = {'alive': True, 'configurations': VHOST_CONFIGS, 'host': 'host'} AGENT_SERVER = {'alive': True, 'configurations': VHOST_SERVER_CONFIGS, 'host': 'host'} AGENT_SYSTEM = {'alive': True, 'configurations': SYSTEM_CONFIGS, 'host': 'host'} def test_get_vhost_mode(self): ifaces = [] result = self.driver.get_vhost_mode(ifaces) self.assertEqual(portbindings.VHOST_USER_MODE_CLIENT, result) ifaces = [a_const.OVS_DPDK_VHOST_USER] result = self.driver.get_vhost_mode(ifaces) self.assertEqual(portbindings.VHOST_USER_MODE_CLIENT, result) ifaces = [a_const.OVS_DPDK_VHOST_USER_CLIENT] result = self.driver.get_vhost_mode(ifaces) self.assertEqual(portbindings.VHOST_USER_MODE_SERVER, result) def test_get_vif_type(self): normal_port_cxt = base.FakePortContext(None, None, None) result = self.driver.get_vif_type(normal_port_cxt, self.AGENT, None) self.assertEqual(portbindings.VIF_TYPE_VHOST_USER, result) result = self.driver.get_vif_type(normal_port_cxt, self.AGENT_SERVER, None) self.assertEqual(portbindings.VIF_TYPE_VHOST_USER, result) result = self.driver.get_vif_type(normal_port_cxt, self.AGENT_SYSTEM, None) self.assertEqual(portbindings.VIF_TYPE_OVS, result) direct_port_cxt = base.FakePortContext( None, None, None, vnic_type=portbindings.VNIC_DIRECT) result = self.driver.get_vif_type(direct_port_cxt, self.AGENT, None) self.assertEqual(portbindings.VIF_TYPE_OVS, result) class OpenvswitchMechanismSRIOVTestCase(OpenvswitchMechanismBaseTestCase): def _make_port_ctx(self, agents, profile=None): segments = [{api.ID: 'local_segment_id', api.NETWORK_TYPE: 'local'}] return base.FakePortContext(self.AGENT_TYPE, agents, segments, vnic_type=portbindings.VNIC_DIRECT, profile=profile) @mock.patch('neutron.plugins.ml2.drivers.mech_agent.' 'SimpleAgentMechanismDriverBase.bind_port') def test_bind_port_sriov_legacy(self, mocked_bind_port): context = self._make_port_ctx(self.AGENTS) self.driver.bind_port(context) mocked_bind_port.assert_not_called() @mock.patch('neutron.plugins.ml2.drivers.mech_agent.' 'SimpleAgentMechanismDriverBase.bind_port') def test_bind_port_sriov_switchdev(self, mocked_bind_port): profile = {'capabilities': ['switchdev']} context = self._make_port_ctx(self.AGENTS, profile=profile) self.driver.bind_port(context) mocked_bind_port.assert_called() class OpenvswitchMechVnicTypesTestCase(OpenvswitchMechanismBaseTestCase): supported_vnics = [portbindings.VNIC_NORMAL, portbindings.VNIC_DIRECT, portbindings.VNIC_SMARTNIC] def setUp(self): self.blacklist_cfg = { 'OVS_DRIVER': { 'vnic_type_blacklist': [] } } self.default_supported_vnics = self.supported_vnics super(OpenvswitchMechVnicTypesTestCase, self).setUp() def test_default_vnic_types(self): self.assertEqual(self.default_supported_vnics, self.driver.supported_vnic_types) def test_vnic_type_blacklist_valid_item(self): self.blacklist_cfg['OVS_DRIVER']['vnic_type_blacklist'] = \ [portbindings.VNIC_DIRECT] fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_ovs_conf.register_ovs_mech_driver_opts) self.useFixture(fake_conf_fixture) test_driver = mech_openvswitch.OpenvswitchMechanismDriver() supported_vnic_types = test_driver.supported_vnic_types self.assertNotIn(portbindings.VNIC_DIRECT, supported_vnic_types) self.assertEqual(len(self.default_supported_vnics) - 1, len(supported_vnic_types)) def test_vnic_type_blacklist_not_valid_item(self): self.blacklist_cfg['OVS_DRIVER']['vnic_type_blacklist'] = ['foo'] fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_ovs_conf.register_ovs_mech_driver_opts) self.useFixture(fake_conf_fixture) self.assertRaises(ValueError, mech_openvswitch.OpenvswitchMechanismDriver) def test_vnic_type_blacklist_all_items(self): self.blacklist_cfg['OVS_DRIVER']['vnic_type_blacklist'] = \ self.supported_vnics fake_conf = cfg.CONF fake_conf_fixture = base.MechDriverConfFixture( fake_conf, self.blacklist_cfg, mech_ovs_conf.register_ovs_mech_driver_opts) self.useFixture(fake_conf_fixture) self.assertRaises(ValueError, mech_openvswitch.OpenvswitchMechanismDriver) class OpenvswitchMechDeviceMappingsTestCase(OpenvswitchMechanismBaseTestCase): def test_standard_device_mappings(self): mappings = self.driver.get_standard_device_mappings(self.AGENTS[0]) self.assertEqual( len(self.GOOD_CONFIGS['bridge_mappings']), len(mappings)) for ph_orig, br_orig in self.GOOD_CONFIGS['bridge_mappings'].items(): self.assertIn(ph_orig, mappings) self.assertEqual([br_orig], mappings[ph_orig]) def test_standard_device_mappings_negative(self): fake_agent = {'agent_type': constants.AGENT_TYPE_OVS, 'configurations': {}} self.assertRaises(ValueError, self.driver.get_standard_device_mappings, fake_agent) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/0000755000175000017500000000000000000000000025411 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/__init__.py0000644000175000017500000000000000000000000027510 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.471046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/0000755000175000017500000000000000000000000027700 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/__init__.py0000644000175000017500000000000000000000000031777 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/0000755000175000017500000000000000000000000031015 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py0000644000175000017500000000000000000000000033114 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/0000755000175000017500000000000000000000000032440 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-nb.ovsschema 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-n0000644000175000017500000005512700000000000033432 0ustar00coreycorey00000000000000{ "name": "OVN_Northbound", "version": "5.16.0", "cksum": "923459061 23095", "tables": { "NB_Global": { "columns": { "nb_cfg": {"type": {"key": "integer"}}, "sb_cfg": {"type": {"key": "integer"}}, "hv_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Logical_Switch": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "qos_rules": {"type": {"key": {"type": "uuid", "refTable": "QoS", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "dns_records": {"type": {"key": {"type": "uuid", "refTable": "DNS", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "other_config": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Switch_Port": { "columns": { "name": {"type": "string"}, "type": {"type": "string"}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, "tag_request": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4095}, "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "dynamic_addresses": {"type": {"key": "string", "min": 0, "max": 1}}, "port_security": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "dhcpv4_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "dhcpv6_options": {"type": {"key": {"type": "uuid", "refTable": "DHCP_Options", "refType": "weak"}, "min": 0, "max": 1}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Switch_Port", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "acls": {"type": {"key": {"type": "uuid", "refTable": "ACL", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Load_Balancer": { "columns": { "name": {"type": "string"}, "vips": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "protocol": { "type": {"key": {"type": "string", "enum": ["set", ["tcp", "udp"]]}, "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "ACL": { "columns": { "name": {"type": {"key": {"type": "string", "maxLength": 63}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["allow", "allow-related", "drop", "reject"]]}}}, "log": {"type": "boolean"}, "severity": {"type": {"key": {"type": "string", "enum": ["set", ["alert", "warning", "notice", "info", "debug"]]}, "min": 0, "max": 1}}, "meter": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "QoS": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "direction": {"type": {"key": {"type": "string", "enum": ["set", ["from-lport", "to-lport"]]}}}, "match": {"type": "string"}, "action": {"type": {"key": {"type": "string", "enum": ["set", ["dscp"]]}, "value": {"type": "integer", "minInteger": 0, "maxInteger": 63}, "min": 0, "max": "unlimited"}}, "bandwidth": {"type": {"key": {"type": "string", "enum": ["set", ["rate", "burst"]]}, "value": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Port", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "static_routes": {"type": {"key": {"type": "uuid", "refTable": "Logical_Router_Static_Route", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "policies": { "type": {"key": {"type": "uuid", "refTable": "Logical_Router_Policy", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "nat": {"type": {"key": {"type": "uuid", "refTable": "NAT", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "load_balancer": {"type": {"key": {"type": "uuid", "refTable": "Load_Balancer", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Logical_Router_Port": { "columns": { "name": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "networks": {"type": {"key": "string", "min": 1, "max": "unlimited"}}, "mac": {"type": "string"}, "peer": {"type": {"key": "string", "min": 0, "max": 1}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "ipv6_ra_configs": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "Logical_Router_Static_Route": { "columns": { "ip_prefix": {"type": "string"}, "policy": {"type": {"key": {"type": "string", "enum": ["set", ["src-ip", "dst-ip"]]}, "min": 0, "max": 1}}, "nexthop": {"type": "string"}, "output_port": {"type": {"key": "string", "min": 0, "max": 1}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "Logical_Router_Policy": { "columns": { "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "match": {"type": "string"}, "action": {"type": { "key": {"type": "string", "enum": ["set", ["allow", "drop", "reroute"]]}}}, "nexthop": {"type": {"key": "string", "min": 0, "max": 1}}}, "isRoot": false}, "NAT": { "columns": { "external_ip": {"type": "string"}, "external_mac": {"type": {"key": "string", "min": 0, "max": 1}}, "logical_ip": {"type": "string"}, "logical_port": {"type": {"key": "string", "min": 0, "max": 1}}, "type": {"type": {"key": {"type": "string", "enum": ["set", ["dnat", "snat", "dnat_and_snat" ]]}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "DHCP_Options": { "columns": { "cidr": {"type": "string"}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis_name": {"type": "string"}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}} } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-sb.ovsschema 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-s0000644000175000017500000004750500000000000033440 0ustar00coreycorey00000000000000{ "name": "OVN_Southbound", "version": "2.4.0", "cksum": "3059284885 20260", "tables": { "SB_Global": { "columns": { "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "connections": { "type": {"key": {"type": "uuid", "refTable": "Connection"}, "min": 0, "max": "unlimited"}}, "ssl": { "type": {"key": {"type": "uuid", "refTable": "SSL"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ipsec": {"type": "boolean"}}, "maxRows": 1, "isRoot": true}, "Chassis": { "columns": { "name": {"type": "string"}, "hostname": {"type": "string"}, "encaps": {"type": {"key": {"type": "uuid", "refTable": "Encap"}, "min": 1, "max": "unlimited"}}, "vtep_logical_switches" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nb_cfg": {"type": {"key": "integer"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "transport_zones" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true, "indexes": [["name"]]}, "Encap": { "columns": { "type": {"type": {"key": { "type": "string", "enum": ["set", ["geneve", "stt", "vxlan"]]}}}, "options": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "ip": {"type": "string"}, "chassis_name": {"type": "string"}}, "indexes": [["type", "ip"]]}, "Address_Set": { "columns": { "name": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Port_Group": { "columns": { "name": {"type": "string"}, "ports": {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Logical_Flow": { "columns": { "logical_datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "pipeline": {"type": {"key": {"type": "string", "enum": ["set", ["ingress", "egress"]]}}}, "table_id": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 23}}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 65535}}}, "match": {"type": "string"}, "actions": {"type": "string"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Multicast_Group": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "name": {"type": "string"}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 32768, "maxInteger": 65535}}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 1, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["datapath", "name"]], "isRoot": true}, "Meter": { "columns": { "name": {"type": "string"}, "unit": {"type": {"key": {"type": "string", "enum": ["set", ["kbps", "pktps"]]}}}, "bands": {"type": {"key": {"type": "uuid", "refTable": "Meter_Band", "refType": "strong"}, "min": 1, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Meter_Band": { "columns": { "action": {"type": {"key": {"type": "string", "enum": ["set", ["drop"]]}}}, "rate": {"type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4294967295}}}, "burst_size": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 4294967295}}}}, "isRoot": false}, "Datapath_Binding": { "columns": { "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 16777215}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["tunnel_key"]], "isRoot": true}, "Port_Binding": { "columns": { "logical_port": {"type": "string"}, "type": {"type": "string"}, "gateway_chassis": { "type": {"key": {"type": "uuid", "refTable": "Gateway_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ha_chassis_group": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis_Group", "refType": "strong"}, "min": 0, "max": 1}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}, "tunnel_key": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 32767}}}, "parent_port": {"type": {"key": "string", "min": 0, "max": 1}}, "tag": { "type": {"key": {"type": "integer", "minInteger": 1, "maxInteger": 4095}, "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "encap": {"type": {"key": {"type": "uuid", "refTable": "Encap", "refType": "weak"}, "min": 0, "max": 1}}, "mac": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "nat_addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["datapath", "tunnel_key"], ["logical_port"]], "isRoot": true}, "MAC_Binding": { "columns": { "logical_port": {"type": "string"}, "ip": {"type": "string"}, "mac": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}}}}, "indexes": [["logical_port", "ip"]], "isRoot": true}, "DHCP_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["bool", "uint8", "uint16", "uint32", "ipv4", "static_routes", "str"]]}}}}, "isRoot": true}, "DHCPv6_Options": { "columns": { "name": {"type": "string"}, "code": { "type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 254}}}, "type": { "type": {"key": { "type": "string", "enum": ["set", ["ipv6", "str", "mac"]]}}}}, "isRoot": true}, "Connection": { "columns": { "target": {"type": "string"}, "max_backoff": {"type": {"key": {"type": "integer", "minInteger": 1000}, "min": 0, "max": 1}}, "inactivity_probe": {"type": {"key": "integer", "min": 0, "max": 1}}, "read_only": {"type": "boolean"}, "role": {"type": "string"}, "other_config": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "is_connected": {"type": "boolean", "ephemeral": true}, "status": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}, "ephemeral": true}}, "indexes": [["target"]]}, "SSL": { "columns": { "private_key": {"type": "string"}, "certificate": {"type": "string"}, "ca_cert": {"type": "string"}, "bootstrap_ca_cert": {"type": "boolean"}, "ssl_protocols": {"type": "string"}, "ssl_ciphers": {"type": "string"}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "maxRows": 1}, "DNS": { "columns": { "records": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "datapaths": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding"}, "min": 1, "max": "unlimited"}}, "external_ids": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Role": { "columns": { "name": {"type": "string"}, "permissions": { "type": {"key": {"type": "string"}, "value": {"type": "uuid", "refTable": "RBAC_Permission", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "isRoot": true}, "RBAC_Permission": { "columns": { "table": {"type": "string"}, "authorization": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "insert_delete": {"type": "boolean"}, "update" : {"type": {"key": "string", "min": 0, "max": "unlimited"}}}, "isRoot": true}, "Gateway_Chassis": { "columns": { "name": {"type": "string"}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "options": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": false}, "HA_Chassis": { "columns": { "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "priority": {"type": {"key": {"type": "integer", "minInteger": 0, "maxInteger": 32767}}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": false}, "HA_Chassis_Group": { "columns": { "name": {"type": "string"}, "ha_chassis": { "type": {"key": {"type": "uuid", "refTable": "HA_Chassis", "refType": "strong"}, "min": 0, "max": "unlimited"}}, "ref_chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": "unlimited"}}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "indexes": [["name"]], "isRoot": true}, "Controller_Event": { "columns": { "event_type": {"type": {"key": {"type": "string", "enum": ["set", ["empty_lb_backends"]]}}}, "event_info": {"type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "seq_num": {"type": {"key": "integer"}} }, "isRoot": true}, "IP_Multicast": { "columns": { "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}}}, "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, "querier": {"type": {"key": "boolean", "min": 0, "max": 1}}, "eth_src": {"type": "string"}, "ip4_src": {"type": "string"}, "table_size": {"type": {"key": "integer", "min": 0, "max": 1}}, "idle_timeout": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_interval": {"type": {"key": "integer", "min": 0, "max": 1}}, "query_max_resp": {"type": {"key": "integer", "min": 0, "max": 1}}, "seq_no": {"type": "integer"}}, "indexes": [["datapath"]], "isRoot": true}, "IGMP_Group": { "columns": { "address": {"type": "string"}, "datapath": {"type": {"key": {"type": "uuid", "refTable": "Datapath_Binding", "refType": "weak"}, "min": 0, "max": 1}}, "chassis": {"type": {"key": {"type": "uuid", "refTable": "Chassis", "refType": "weak"}, "min": 0, "max": 1}}, "ports": {"type": {"key": {"type": "uuid", "refTable": "Port_Binding", "refType": "weak"}, "min": 0, "max": "unlimited"}}}, "indexes": [["address", "datapath", "chassis"]], "isRoot": true}}} ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands0000644000175000017500000021620000000000000033601 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from ovsdbapp.backend.ovs_idl import idlutils from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import exceptions as ovn_exc from neutron.common.ovn import utils as ovn_utils from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import commands from neutron.tests import base from neutron.tests.unit import fake_resources as fakes class TestBaseCommandHelpers(base.BaseTestCase): def setUp(self): super(TestBaseCommandHelpers, self).setUp() self.column = 'ovn' self.new_value = '1' self.old_value = '2' def _get_fake_row_mutate(self): return fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={self.column: []}) def test__addvalue_to_list(self): fake_row_mutate = self._get_fake_row_mutate() commands._addvalue_to_list( fake_row_mutate, self.column, self.new_value) fake_row_mutate.addvalue.assert_called_once_with( self.column, self.new_value) fake_row_mutate.verify.assert_not_called() def test__delvalue_from_list(self): fake_row_mutate = self._get_fake_row_mutate() commands._delvalue_from_list( fake_row_mutate, self.column, self.old_value) fake_row_mutate.delvalue.assert_called_once_with( self.column, self.old_value) fake_row_mutate.verify.assert_not_called() def test__updatevalues_in_list_none(self): fake_row_mutate = self._get_fake_row_mutate() commands._updatevalues_in_list(fake_row_mutate, self.column) fake_row_mutate.addvalue.assert_not_called() fake_row_mutate.delvalue.assert_not_called() fake_row_mutate.verify.assert_not_called() def test__updatevalues_in_list_empty(self): fake_row_mutate = self._get_fake_row_mutate() commands._updatevalues_in_list(fake_row_mutate, self.column, [], []) fake_row_mutate.addvalue.assert_not_called() fake_row_mutate.delvalue.assert_not_called() fake_row_mutate.verify.assert_not_called() def test__updatevalues_in_list(self): fake_row_mutate = self._get_fake_row_mutate() commands._updatevalues_in_list( fake_row_mutate, self.column, new_values=[self.new_value], old_values=[self.old_value]) fake_row_mutate.addvalue.assert_called_once_with( self.column, self.new_value) fake_row_mutate.delvalue.assert_called_once_with( self.column, self.old_value) fake_row_mutate.verify.assert_not_called() class TestBaseCommand(base.BaseTestCase): def setUp(self): super(TestBaseCommand, self).setUp() self.ovn_api = fakes.FakeOvsdbNbOvnIdl() self.transaction = fakes.FakeOvsdbTransaction() self.ovn_api.transaction = self.transaction class TestCheckLivenessCommand(TestBaseCommand): def test_check_liveness(self): old_ng_cfg = self.ovn_api.nb_global.ng_cfg cmd = commands.CheckLivenessCommand(self.ovn_api) cmd.run_idl(self.transaction) self.assertNotEqual(cmd.result, old_ng_cfg) class TestAddLSwitchPortCommand(TestBaseCommand): def test_lswitch_not_found(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.AddLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lswitch', may_exist=True) self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) self.transaction.insert.assert_not_called() def test_lswitch_port_exists(self): with mock.patch.object(idlutils, 'row_by_value', return_value=mock.ANY): cmd = commands.AddLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lswitch', may_exist=True) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() def test_lswitch_port_add_exists(self): fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api._tables['Logical_Switch_Port'].rows[fake_lsp.uuid] = \ fake_lsp self.transaction.insert.return_value = fake_lsp cmd = commands.AddLSwitchPortCommand( self.ovn_api, fake_lsp.name, fake_lswitch.name, may_exist=False) cmd.run_idl(self.transaction) # NOTE(rtheis): Mocking the transaction allows this insert # to succeed when it normally would fail due the duplicate name. self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Switch_Port']) def _test_lswitch_port_add(self, may_exist=True): lsp_name = 'fake-lsp' fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lswitch, None]): fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'foo': None}) self.transaction.insert.return_value = fake_lsp cmd = commands.AddLSwitchPortCommand( self.ovn_api, lsp_name, fake_lswitch.name, may_exist=may_exist, foo='bar') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Switch_Port']) fake_lswitch.addvalue.assert_called_once_with( 'ports', fake_lsp.uuid) self.assertEqual(lsp_name, fake_lsp.name) self.assertEqual('bar', fake_lsp.foo) def test_lswitch_port_add_may_exist(self): self._test_lswitch_port_add(may_exist=True) def test_lswitch_port_add_ignore_exists(self): self._test_lswitch_port_add(may_exist=False) def _test_lswitch_port_add_with_dhcp(self, dhcpv4_opts, dhcpv6_opts): lsp_name = 'fake-lsp' fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.transaction.insert.return_value = fake_lsp with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lswitch, None]): cmd = commands.AddLSwitchPortCommand( self.ovn_api, lsp_name, fake_lswitch.name, may_exist=True, dhcpv4_options=dhcpv4_opts, dhcpv6_options=dhcpv6_opts) if not isinstance(dhcpv4_opts, list): dhcpv4_opts.result = 'fake-uuid-1' if not isinstance(dhcpv6_opts, list): dhcpv6_opts.result = 'fake-uuid-2' self.transaction.insert.reset_mock() cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api.lsp_table) fake_lswitch.addvalue.assert_called_once_with( 'ports', fake_lsp.uuid) self.assertEqual(lsp_name, fake_lsp.name) if isinstance(dhcpv4_opts, list): self.assertEqual(dhcpv4_opts, fake_lsp.dhcpv4_options) else: self.assertEqual(['fake-uuid-1'], fake_lsp.dhcpv4_options) if isinstance(dhcpv6_opts, list): self.assertEqual(dhcpv6_opts, fake_lsp.dhcpv6_options) else: self.assertEqual(['fake-uuid-2'], fake_lsp.dhcpv6_options) def test_lswitch_port_add_with_dhcp(self): dhcpv4_opts_cmd = commands.AddDHCPOptionsCommand( self.ovn_api, mock.ANY, port_id=mock.ANY) dhcpv6_opts_cmd = commands.AddDHCPOptionsCommand( self.ovn_api, mock.ANY, port_id=mock.ANY) for dhcpv4_opts in ([], ['fake-uuid-1'], dhcpv4_opts_cmd): for dhcpv6_opts in ([], ['fake-uuid-2'], dhcpv6_opts_cmd): self._test_lswitch_port_add_with_dhcp(dhcpv4_opts, dhcpv6_opts) class TestSetLSwitchPortCommand(TestBaseCommand): def _test_lswitch_port_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.SetLSwitchPortCommand( self.ovn_api, 'fake-lsp', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lswitch_port_no_exist_ignore(self): self._test_lswitch_port_update_no_exist(if_exists=True) def test_lswitch_port_no_exist_fail(self): self._test_lswitch_port_update_no_exist(if_exists=False) def test_lswitch_port_update(self): ext_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'test'} new_ext_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'test-new'} fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': ext_ids}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lsp): cmd = commands.SetLSwitchPortCommand( self.ovn_api, fake_lsp.name, if_exists=True, external_ids=new_ext_ids) cmd.run_idl(self.transaction) self.assertEqual(new_ext_ids, fake_lsp.external_ids) def _test_lswitch_port_update_del_dhcp(self, clear_v4_opts, clear_v6_opts, set_v4_opts=False, set_v6_opts=False): ext_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'test'} dhcp_options_tbl = self.ovn_api._tables['DHCP_Options'] fake_dhcpv4_opts = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'port_id': 'fake-lsp'}}) dhcp_options_tbl.rows[fake_dhcpv4_opts.uuid] = fake_dhcpv4_opts fake_dhcpv6_opts = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'port_id': 'fake-lsp'}}) dhcp_options_tbl.rows[fake_dhcpv6_opts.uuid] = fake_dhcpv6_opts fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'fake-lsp', 'external_ids': ext_ids, 'dhcpv4_options': [fake_dhcpv4_opts], 'dhcpv6_options': [fake_dhcpv6_opts]}) columns = {} if clear_v4_opts: columns['dhcpv4_options'] = [] elif set_v4_opts: columns['dhcpv4_options'] = [fake_dhcpv4_opts.uuid] if clear_v6_opts: columns['dhcpv6_options'] = [] elif set_v6_opts: columns['dhcpv6_options'] = [fake_dhcpv6_opts.uuid] with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lsp): cmd = commands.SetLSwitchPortCommand( self.ovn_api, fake_lsp.name, if_exists=True, **columns) cmd.run_idl(self.transaction) if clear_v4_opts and clear_v6_opts: fake_dhcpv4_opts.delete.assert_called_once_with() fake_dhcpv6_opts.delete.assert_called_once_with() elif clear_v4_opts: # not clear_v6_opts and set_v6_opts is any fake_dhcpv4_opts.delete.assert_called_once_with() fake_dhcpv6_opts.delete.assert_not_called() elif clear_v6_opts: # not clear_v4_opts and set_v6_opts is any fake_dhcpv4_opts.delete.assert_not_called() fake_dhcpv6_opts.delete.assert_called_once_with() else: # not clear_v4_opts and not clear_v6_opts and # set_v4_opts is any and set_v6_opts is any fake_dhcpv4_opts.delete.assert_not_called() fake_dhcpv6_opts.delete.assert_not_called() def test_lswitch_port_update_del_port_dhcpv4_options(self): self._test_lswitch_port_update_del_dhcp(True, False) def test_lswitch_port_update_del_port_dhcpv6_options(self): self._test_lswitch_port_update_del_dhcp(False, True) def test_lswitch_port_update_del_all_port_dhcp_options(self): self._test_lswitch_port_update_del_dhcp(True, True) def test_lswitch_port_update_del_no_port_dhcp_options(self): self._test_lswitch_port_update_del_dhcp(False, False) def test_lswitch_port_update_set_port_dhcpv4_options(self): self._test_lswitch_port_update_del_dhcp(False, True, set_v4_opts=True) def test_lswitch_port_update_set_port_dhcpv6_options(self): self._test_lswitch_port_update_del_dhcp(True, False, set_v6_opts=True) def test_lswitch_port_update_set_all_port_dhcp_options(self): self._test_lswitch_port_update_del_dhcp(False, False, set_v4_opts=True, set_v6_opts=True) def _test_lswitch_port_update_with_dhcp(self, dhcpv4_opts, dhcpv6_opts): ext_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'test'} fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'fake-lsp', 'external_ids': ext_ids, 'dhcpv4_options': ['fake-v4-subnet-dhcp-opt'], 'dhcpv6_options': ['fake-v6-subnet-dhcp-opt']}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lsp): cmd = commands.SetLSwitchPortCommand( self.ovn_api, fake_lsp.name, if_exists=True, external_ids=ext_ids, dhcpv4_options=dhcpv4_opts, dhcpv6_options=dhcpv6_opts) if not isinstance(dhcpv4_opts, list): dhcpv4_opts.result = 'fake-uuid-1' if not isinstance(dhcpv6_opts, list): dhcpv6_opts.result = 'fake-uuid-2' cmd.run_idl(self.transaction) if isinstance(dhcpv4_opts, list): self.assertEqual(dhcpv4_opts, fake_lsp.dhcpv4_options) else: self.assertEqual(['fake-uuid-1'], fake_lsp.dhcpv4_options) if isinstance(dhcpv6_opts, list): self.assertEqual(dhcpv6_opts, fake_lsp.dhcpv6_options) else: self.assertEqual(['fake-uuid-2'], fake_lsp.dhcpv6_options) def test_lswitch_port_update_with_dhcp(self): v4_dhcp_cmd = commands.AddDHCPOptionsCommand(self.ovn_api, mock.ANY, port_id=mock.ANY) v6_dhcp_cmd = commands.AddDHCPOptionsCommand(self.ovn_api, mock.ANY, port_id=mock.ANY) for dhcpv4_opts in ([], ['fake-v4-subnet-dhcp-opt'], v4_dhcp_cmd): for dhcpv6_opts in ([], ['fake-v6-subnet-dhcp-opt'], v6_dhcp_cmd): self._test_lswitch_port_update_with_dhcp( dhcpv4_opts, dhcpv6_opts) class TestDelLSwitchPortCommand(TestBaseCommand): def _test_lswitch_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=['fake-lsp', idlutils.RowNotFound]): cmd = commands.DelLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lswitch', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lswitch_no_exist_ignore(self): self._test_lswitch_no_exist(if_exists=True) def test_lswitch_no_exist_fail(self): self._test_lswitch_no_exist(if_exists=False) def _test_lswitch_port_del_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lswitch', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lswitch_port_no_exist_ignore(self): self._test_lswitch_port_del_no_exist(if_exists=True) def test_lswitch_port_no_exist_fail(self): self._test_lswitch_port_del_no_exist(if_exists=False) def test_lswitch_port_del(self): fake_lsp = mock.MagicMock() fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [fake_lsp]}) self.ovn_api._tables['Logical_Switch_Port'].rows[fake_lsp.uuid] = \ fake_lsp with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lsp, fake_lswitch]): cmd = commands.DelLSwitchPortCommand( self.ovn_api, fake_lsp.name, fake_lswitch.name, if_exists=True) cmd.run_idl(self.transaction) fake_lswitch.delvalue.assert_called_once_with('ports', fake_lsp) fake_lsp.delete.assert_called_once_with() def _test_lswitch_port_del_delete_dhcp_opt(self, dhcpv4_opt_ext_ids, dhcpv6_opt_ext_ids): ext_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'test'} fake_dhcpv4_options = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': dhcpv4_opt_ext_ids}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcpv4_options.uuid] = \ fake_dhcpv4_options fake_dhcpv6_options = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': dhcpv6_opt_ext_ids}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcpv6_options.uuid] = \ fake_dhcpv6_options fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'lsp', 'external_ids': ext_ids, 'dhcpv4_options': [fake_dhcpv4_options], 'dhcpv6_options': [fake_dhcpv6_options]}) self.ovn_api._tables['Logical_Switch_Port'].rows[fake_lsp.uuid] = \ fake_lsp fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [fake_lsp]}) with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lsp, fake_lswitch]): cmd = commands.DelLSwitchPortCommand( self.ovn_api, fake_lsp.name, fake_lswitch.name, if_exists=True) cmd.run_idl(self.transaction) fake_lswitch.delvalue.assert_called_once_with('ports', fake_lsp) fake_lsp.delete.assert_called_once_with() if 'port_id' in dhcpv4_opt_ext_ids: fake_dhcpv4_options.delete.assert_called_once_with() else: fake_dhcpv4_options.delete.assert_not_called() if 'port_id' in dhcpv6_opt_ext_ids: fake_dhcpv6_options.delete.assert_called_once_with() else: fake_dhcpv6_options.delete.assert_not_called() def test_lswitch_port_del_delete_dhcp_opt(self): for v4_ext_ids in ({'subnet_id': 'fake-ls0'}, {'subnet_id': 'fake-ls0', 'port_id': 'lsp'}): for v6_ext_ids in ({'subnet_id': 'fake-ls1'}, {'subnet_id': 'fake-ls1', 'port_id': 'lsp'}): self._test_lswitch_port_del_delete_dhcp_opt( v4_ext_ids, v6_ext_ids) class TestAddLRouterCommand(TestBaseCommand): def test_lrouter_exists(self): with mock.patch.object(idlutils, 'row_by_value', return_value=mock.ANY): cmd = commands.AddLRouterCommand( self.ovn_api, 'fake-lrouter', may_exist=True, a='1', b='2') cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() def test_lrouter_add_exists(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api._tables['Logical_Router'].rows[fake_lrouter.uuid] = \ fake_lrouter self.transaction.insert.return_value = fake_lrouter cmd = commands.AddLRouterCommand( self.ovn_api, fake_lrouter.name, may_exist=False) cmd.run_idl(self.transaction) # NOTE(rtheis): Mocking the transaction allows this insert # to succeed when it normally would fail due the duplicate name. self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Router']) def _test_lrouter_add(self, may_exist=True): with mock.patch.object(idlutils, 'row_by_value', return_value=None): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.transaction.insert.return_value = fake_lrouter cmd = commands.AddLRouterCommand( self.ovn_api, 'fake-lrouter', may_exist=may_exist, a='1', b='2') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Router']) self.assertEqual('fake-lrouter', fake_lrouter.name) self.assertEqual('1', fake_lrouter.a) self.assertEqual('2', fake_lrouter.b) def test_lrouter_add_may_exist(self): self._test_lrouter_add(may_exist=True) def test_lrouter_add_ignore_exists(self): self._test_lrouter_add(may_exist=False) class TestUpdateLRouterCommand(TestBaseCommand): def _test_lrouter_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdateLRouterCommand( self.ovn_api, 'fake-lrouter', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_no_exist_ignore(self): self._test_lrouter_update_no_exist(if_exists=True) def test_lrouter_no_exist_fail(self): self._test_lrouter_update_no_exist(if_exists=False) def test_lrouter_update(self): ext_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'richard'} new_ext_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'richard-new'} fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': ext_ids}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.UpdateLRouterCommand( self.ovn_api, fake_lrouter.name, if_exists=True, external_ids=new_ext_ids) cmd.run_idl(self.transaction) self.assertEqual(new_ext_ids, fake_lrouter.external_ids) class TestDelLRouterCommand(TestBaseCommand): def _test_lrouter_del_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelLRouterCommand( self.ovn_api, 'fake-lrouter', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_no_exist_ignore(self): self._test_lrouter_del_no_exist(if_exists=True) def test_lrouter_no_exist_fail(self): self._test_lrouter_del_no_exist(if_exists=False) def test_lrouter_del(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api._tables['Logical_Router'].rows[fake_lrouter.uuid] = \ fake_lrouter with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DelLRouterCommand( self.ovn_api, fake_lrouter.name, if_exists=True) cmd.run_idl(self.transaction) fake_lrouter.delete.assert_called_once_with() class TestAddLRouterPortCommand(TestBaseCommand): def test_lrouter_not_found(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.AddLRouterPortCommand( self.ovn_api, 'fake-lrp', 'fake-lrouter', may_exist=False) self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) self.transaction.insert.assert_not_called() def test_lrouter_port_exists(self): with mock.patch.object(idlutils, 'row_by_value', return_value=mock.ANY): cmd = commands.AddLRouterPortCommand( self.ovn_api, 'fake-lrp', 'fake-lrouter', may_exist=False) self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) self.transaction.insert.assert_not_called() def test_lrouter_port_may_exist(self): with mock.patch.object(idlutils, 'row_by_value', return_value=mock.ANY): cmd = commands.AddLRouterPortCommand( self.ovn_api, 'fake-lrp', 'fake-lrouter', may_exist=True) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() def test_lrouter_port_add(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lrouter, idlutils.RowNotFound]): fake_lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'foo': None}) self.transaction.insert.return_value = fake_lrp cmd = commands.AddLRouterPortCommand( self.ovn_api, 'fake-lrp', fake_lrouter.name, may_exist=False, foo='bar') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Router_Port']) self.assertEqual('fake-lrp', fake_lrp.name) fake_lrouter.addvalue.assert_called_once_with('ports', fake_lrp) self.assertEqual('bar', fake_lrp.foo) class TestUpdateLRouterPortCommand(TestBaseCommand): def _test_lrouter_port_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdateLRouterPortCommand( self.ovn_api, 'fake-lrp', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_port_no_exist_ignore(self): self._test_lrouter_port_update_no_exist(if_exists=True) def test_lrouter_port_no_exist_fail(self): self._test_lrouter_port_update_no_exist(if_exists=False) def test_lrouter_port_update(self): old_networks = [] new_networks = ['10.1.0.0/24'] fake_lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'networks': old_networks}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrp): cmd = commands.UpdateLRouterPortCommand( self.ovn_api, fake_lrp.name, if_exists=True, networks=new_networks) cmd.run_idl(self.transaction) self.assertEqual(new_networks, fake_lrp.networks) class TestDelLRouterPortCommand(TestBaseCommand): def _test_lrouter_port_del_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelLRouterPortCommand( self.ovn_api, 'fake-lrp', 'fake-lrouter', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_port_no_exist_ignore(self): self._test_lrouter_port_del_no_exist(if_exists=True) def test_lrouter_port_no_exist_fail(self): self._test_lrouter_port_del_no_exist(if_exists=False) def test_lrouter_no_exist(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=[mock.ANY, idlutils.RowNotFound]): cmd = commands.DelLRouterPortCommand( self.ovn_api, 'fake-lrp', 'fake-lrouter', if_exists=True) self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_port_del(self): fake_lrp = mock.MagicMock() fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ports': [fake_lrp]}) self.ovn_api._tables['Logical_Router_Port'].rows[fake_lrp.uuid] = \ fake_lrp with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lrp, fake_lrouter]): cmd = commands.DelLRouterPortCommand( self.ovn_api, fake_lrp.name, fake_lrouter.name, if_exists=True) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with('ports', fake_lrp) class TestSetLRouterPortInLSwitchPortCommand(TestBaseCommand): def test_lswitch_port_no_exist_fail(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.SetLRouterPortInLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lrp', False, False, 'router') self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lswitch_port_no_exist_do_not_fail(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.SetLRouterPortInLSwitchPortCommand( self.ovn_api, 'fake-lsp', 'fake-lrp', False, True, 'router') cmd.run_idl(self.transaction) def test_lswitch_port_router_update(self): lrp_name = 'fake-lrp' fake_lsp = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lsp): cmd = commands.SetLRouterPortInLSwitchPortCommand( self.ovn_api, fake_lsp.name, lrp_name, True, True, 'router') cmd.run_idl(self.transaction) self.assertEqual({'router-port': lrp_name, 'nat-addresses': 'router'}, fake_lsp.options) self.assertEqual('router', fake_lsp.type) self.assertEqual('router', fake_lsp.addresses) class TestAddACLCommand(TestBaseCommand): def test_lswitch_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.AddACLCommand( self.ovn_api, 'fake-lswitch', 'fake-lsp') self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_acl_add(self): fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): fake_acl = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.transaction.insert.return_value = fake_acl cmd = commands.AddACLCommand( self.ovn_api, fake_lswitch.name, 'fake-lsp', match='*') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['ACL']) fake_lswitch.addvalue.assert_called_once_with( 'acls', fake_acl.uuid) self.assertEqual('*', fake_acl.match) class TestDelACLCommand(TestBaseCommand): def _test_lswitch_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelACLCommand( self.ovn_api, 'fake-lswitch', 'fake-lsp', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lswitch_no_exist_ignore(self): self._test_lswitch_no_exist(if_exists=True) def test_lswitch_no_exist_fail(self): self._test_lswitch_no_exist(if_exists=False) def test_acl_del(self): fake_lsp_name = 'fake-lsp' fake_acl_del = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'neutron:lport': fake_lsp_name}}) fake_acl_save = mock.ANY fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'acls': [fake_acl_del, fake_acl_save]}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): cmd = commands.DelACLCommand( self.ovn_api, fake_lswitch.name, fake_lsp_name, if_exists=True) cmd.run_idl(self.transaction) fake_lswitch.delvalue.assert_called_once_with('acls', mock.ANY) class TestUpdateACLsCommand(TestBaseCommand): def test_lswitch_no_exist(self): fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api.get_acls_for_lswitches.return_value = ({}, {}, {}) cmd = commands.UpdateACLsCommand( self.ovn_api, [fake_lswitch.name], port_list=[], acl_new_values_dict={}, need_compare=True) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() fake_lswitch.addvalue.assert_not_called() fake_lswitch.delvalue.assert_not_called() def _test_acl_update_no_acls(self, need_compare): fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api.get_acls_for_lswitches.return_value = ( {}, {}, {fake_lswitch.name: fake_lswitch}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): cmd = commands.UpdateACLsCommand( self.ovn_api, [fake_lswitch.name], port_list=[], acl_new_values_dict={}, need_compare=need_compare) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() fake_lswitch.addvalue.assert_not_called() fake_lswitch.delvalue.assert_not_called() def test_acl_update_compare_no_acls(self): self._test_acl_update_no_acls(need_compare=True) def test_acl_update_no_compare_no_acls(self): self._test_acl_update_no_acls(need_compare=False) def test_acl_update_compare_acls(self): fake_sg_rule = \ fakes.FakeSecurityGroupRule.create_one_security_group_rule().info() fake_port = fakes.FakePort.create_one_port().info() fake_add_acl = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'match': 'add_acl'}) fake_del_acl = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'match': 'del_acl'}) fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': ovn_utils.ovn_name(fake_port['network_id']), 'acls': []}) add_acl = ovn_acl.add_sg_rule_acl_for_port( fake_port, fake_sg_rule, 'add_acl') self.ovn_api.get_acls_for_lswitches.return_value = ( {fake_port['id']: [fake_del_acl.match]}, {fake_del_acl.match: fake_del_acl}, {fake_lswitch.name.replace('neutron-', ''): fake_lswitch}) cmd = commands.UpdateACLsCommand( self.ovn_api, [fake_port['network_id']], [fake_port], {fake_port['id']: [add_acl]}, need_compare=True) self.transaction.insert.return_value = fake_add_acl cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['ACL']) fake_lswitch.addvalue.assert_called_with('acls', fake_add_acl.uuid) def test_acl_update_no_compare_add_acls(self): fake_sg_rule = \ fakes.FakeSecurityGroupRule.create_one_security_group_rule().info() fake_port = fakes.FakePort.create_one_port().info() fake_acl = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'match': '*'}) fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': ovn_utils.ovn_name(fake_port['network_id'])}) add_acl = ovn_acl.add_sg_rule_acl_for_port( fake_port, fake_sg_rule, '*') with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): self.transaction.insert.return_value = fake_acl cmd = commands.UpdateACLsCommand( self.ovn_api, [fake_port['network_id']], [fake_port], {fake_port['id']: add_acl}, need_compare=False, is_add_acl=True) cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['ACL']) fake_lswitch.addvalue.assert_called_once_with( 'acls', fake_acl.uuid) def test_acl_update_no_compare_del_acls(self): fake_sg_rule = \ fakes.FakeSecurityGroupRule.create_one_security_group_rule().info() fake_port = fakes.FakePort.create_one_port().info() fake_acl = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'match': '*', 'external_ids': {'neutron:lport': fake_port['id'], 'neutron:security_group_rule_id': fake_sg_rule['id']}}) fake_lswitch = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': ovn_utils.ovn_name(fake_port['network_id']), 'acls': [fake_acl]}) del_acl = ovn_acl.add_sg_rule_acl_for_port( fake_port, fake_sg_rule, '*') with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lswitch): cmd = commands.UpdateACLsCommand( self.ovn_api, [fake_port['network_id']], [fake_port], {fake_port['id']: del_acl}, need_compare=False, is_add_acl=False) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() fake_lswitch.delvalue.assert_called_with('acls', mock.ANY) class TestAddStaticRouteCommand(TestBaseCommand): def test_lrouter_not_found(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.AddStaticRouteCommand(self.ovn_api, 'fake-lrouter') self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) self.transaction.insert.assert_not_called() def test_static_route_add(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): fake_static_route = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.transaction.insert.return_value = fake_static_route cmd = commands.AddStaticRouteCommand( self.ovn_api, fake_lrouter.name, nexthop='40.0.0.100', ip_prefix='30.0.0.0/24') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Logical_Router_Static_Route']) self.assertEqual('40.0.0.100', fake_static_route.nexthop) self.assertEqual('30.0.0.0/24', fake_static_route.ip_prefix) fake_lrouter.addvalue.assert_called_once_with( 'static_routes', fake_static_route.uuid) class TestDelStaticRouteCommand(TestBaseCommand): def _test_lrouter_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelStaticRouteCommand( self.ovn_api, 'fake-lrouter', '30.0.0.0/24', '40.0.0.100', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_lrouter_no_exist_ignore(self): self._test_lrouter_no_exist(if_exists=True) def test_lrouter_no_exist_fail(self): self._test_lrouter_no_exist(if_exists=False) def test_static_route_del(self): fake_static_route = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip_prefix': '50.0.0.0/24', 'nexthop': '40.0.0.101'}) fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'static_routes': [fake_static_route]}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DelStaticRouteCommand( self.ovn_api, fake_lrouter.name, fake_static_route.ip_prefix, fake_static_route.nexthop, if_exists=True) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with( 'static_routes', mock.ANY) def test_static_route_del_not_found(self): fake_static_route1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip_prefix': '50.0.0.0/24', 'nexthop': '40.0.0.101'}) fake_static_route2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip_prefix': '60.0.0.0/24', 'nexthop': '70.0.0.101'}) fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'static_routes': [fake_static_route2]}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DelStaticRouteCommand( self.ovn_api, fake_lrouter.name, fake_static_route1.ip_prefix, fake_static_route1.nexthop, if_exists=True) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_not_called() self.assertEqual([mock.ANY], fake_lrouter.static_routes) class TestAddAddrSetCommand(TestBaseCommand): def test_addrset_exists(self): with mock.patch.object(idlutils, 'row_by_value', return_value=mock.ANY): cmd = commands.AddAddrSetCommand( self.ovn_api, 'fake-addrset', may_exist=True) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() def test_addrset_add_exists(self): fake_addrset = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api._tables['Address_Set'].rows[fake_addrset.uuid] = \ fake_addrset self.transaction.insert.return_value = fake_addrset cmd = commands.AddAddrSetCommand( self.ovn_api, fake_addrset.name, may_exist=False) cmd.run_idl(self.transaction) # NOTE(rtheis): Mocking the transaction allows this insert # to succeed when it normally would fail due the duplicate name. self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Address_Set']) def _test_addrset_add(self, may_exist=True): with mock.patch.object(idlutils, 'row_by_value', return_value=None): fake_addrset = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'foo': ''}) self.transaction.insert.return_value = fake_addrset cmd = commands.AddAddrSetCommand( self.ovn_api, 'fake-addrset', may_exist=may_exist, foo='bar') cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['Address_Set']) self.assertEqual('fake-addrset', fake_addrset.name) self.assertEqual('bar', fake_addrset.foo) def test_addrset_add_may_exist(self): self._test_addrset_add(may_exist=True) def test_addrset_add_ignore_exists(self): self._test_addrset_add(may_exist=False) class TestDelAddrSetCommand(TestBaseCommand): def _test_addrset_del_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DelAddrSetCommand( self.ovn_api, 'fake-addrset', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_addrset_no_exist_ignore(self): self._test_addrset_del_no_exist(if_exists=True) def test_addrset_no_exist_fail(self): self._test_addrset_del_no_exist(if_exists=False) def test_addrset_del(self): fake_addrset = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.ovn_api._tables['Address_Set'].rows[fake_addrset.uuid] = \ fake_addrset with mock.patch.object(idlutils, 'row_by_value', return_value=fake_addrset): cmd = commands.DelAddrSetCommand( self.ovn_api, fake_addrset.name, if_exists=True) cmd.run_idl(self.transaction) fake_addrset.delete.assert_called_once_with() class TestUpdateAddrSetCommand(TestBaseCommand): def _test_addrset_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdateAddrSetCommand( self.ovn_api, 'fake-addrset', addrs_add=[], addrs_remove=[], if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_addrset_no_exist_ignore(self): self._test_addrset_update_no_exist(if_exists=True) def test_addrset_no_exist_fail(self): self._test_addrset_update_no_exist(if_exists=False) def _test_addrset_update(self, addrs_add=None, addrs_del=None): save_address = '10.0.0.1' initial_addresses = [save_address] final_addresses = [save_address] expected_addvalue_calls = [] expected_delvalue_calls = [] if addrs_add: for addr_add in addrs_add: final_addresses.append(addr_add) expected_addvalue_calls.append( mock.call('addresses', addr_add)) if addrs_del: for addr_del in addrs_del: initial_addresses.append(addr_del) expected_delvalue_calls.append( mock.call('addresses', addr_del)) fake_addrset = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'addresses': initial_addresses}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_addrset): cmd = commands.UpdateAddrSetCommand( self.ovn_api, fake_addrset.name, addrs_add=addrs_add, addrs_remove=addrs_del, if_exists=True) cmd.run_idl(self.transaction) fake_addrset.addvalue.assert_has_calls(expected_addvalue_calls) fake_addrset.delvalue.assert_has_calls(expected_delvalue_calls) def test_addrset_update_add(self): self._test_addrset_update(addrs_add=['10.0.0.4']) def test_addrset_update_del(self): self._test_addrset_update(addrs_del=['10.0.0.2']) class TestUpdateAddrSetExtIdsCommand(TestBaseCommand): def setUp(self): super(TestUpdateAddrSetExtIdsCommand, self).setUp() self.ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default'} def _test_addrset_extids_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdateAddrSetExtIdsCommand( self.ovn_api, 'fake-addrset', self.ext_ids, if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_addrset_no_exist_ignore(self): self._test_addrset_extids_update_no_exist(if_exists=True) def test_addrset_no_exist_fail(self): self._test_addrset_extids_update_no_exist(if_exists=False) def test_addrset_extids_update(self): new_ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default-new'} fake_addrset = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': self.ext_ids}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_addrset): cmd = commands.UpdateAddrSetExtIdsCommand( self.ovn_api, fake_addrset.name, new_ext_ids, if_exists=True) cmd.run_idl(self.transaction) self.assertEqual(new_ext_ids, fake_addrset.external_ids) class TestUpdateChassisExtIdsCommand(TestBaseCommand): def setUp(self): super(TestUpdateChassisExtIdsCommand, self).setUp() self.ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default'} def _test_chassis_extids_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdateChassisExtIdsCommand( self.ovn_api, 'fake-chassis', self.ext_ids, if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_chassis_no_exist_ignore(self): self._test_chassis_extids_update_no_exist(if_exists=True) def test_chassis_no_exist_fail(self): self._test_chassis_extids_update_no_exist(if_exists=False) def test_chassis_extids_update(self): new_ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default-new'} fake_chassis = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': self.ext_ids}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_chassis): cmd = commands.UpdateChassisExtIdsCommand( self.ovn_api, fake_chassis.name, new_ext_ids, if_exists=True) cmd.run_idl(self.transaction) self.assertEqual(new_ext_ids, fake_chassis.external_ids) class TestUpdatePortBindingExtIdsCommand(TestBaseCommand): def setUp(self): super(TestUpdatePortBindingExtIdsCommand, self).setUp() self.ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default'} def _test_portbinding_extids_update_no_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.UpdatePortBindingExtIdsCommand( self.ovn_api, 'fake-portbinding', self.ext_ids, if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_portbinding_no_exist_ignore(self): self._test_portbinding_extids_update_no_exist(if_exists=True) def test_portbinding_no_exist_fail(self): self._test_portbinding_extids_update_no_exist(if_exists=False) def test_portbinding_extids_update(self): new_ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: 'default-new'} fake_portbinding = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': self.ext_ids}) with mock.patch.object(idlutils, 'row_by_value', return_value=fake_portbinding): cmd = commands.UpdatePortBindingExtIdsCommand( self.ovn_api, fake_portbinding.name, new_ext_ids, if_exists=True) cmd.run_idl(self.transaction) self.assertEqual(new_ext_ids, fake_portbinding.external_ids) class TestAddDHCPOptionsCommand(TestBaseCommand): def test_dhcp_options_exists(self): fake_ext_ids = {'subnet_id': 'fake-subnet-id', 'port_id': 'fake-port-id'} fake_dhcp_options = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': fake_ext_ids}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcp_options.uuid] = \ fake_dhcp_options cmd = commands.AddDHCPOptionsCommand( self.ovn_api, fake_ext_ids['subnet_id'], fake_ext_ids['port_id'], may_exist=True, external_ids=fake_ext_ids) cmd.run_idl(self.transaction) self.transaction.insert.assert_not_called() self.assertEqual(fake_ext_ids, fake_dhcp_options.external_ids) def _test_dhcp_options_add(self, may_exist=True): fake_subnet_id = 'fake-subnet-id-' + str(may_exist) fake_port_id = 'fake-port-id-' + str(may_exist) fake_ext_ids1 = {'subnet_id': fake_subnet_id, 'port_id': fake_port_id} fake_dhcp_options1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': fake_ext_ids1}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcp_options1.uuid] = \ fake_dhcp_options1 fake_ext_ids2 = {'subnet_id': fake_subnet_id} fake_dhcp_options2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': fake_ext_ids2}) fake_dhcp_options3 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'subnet_id': 'nomatch'}}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcp_options3.uuid] = \ fake_dhcp_options3 self.transaction.insert.return_value = fake_dhcp_options2 cmd = commands.AddDHCPOptionsCommand( self.ovn_api, fake_ext_ids2['subnet_id'], may_exist=may_exist, external_ids=fake_ext_ids2) cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['DHCP_Options']) self.assertEqual(fake_ext_ids2, fake_dhcp_options2.external_ids) def test_dhcp_options_add_may_exist(self): self._test_dhcp_options_add(may_exist=True) def test_dhcp_options_add_ignore_exists(self): self._test_dhcp_options_add(may_exist=False) def _test_dhcp_options_update_result(self, new_insert=False): fake_ext_ids = {'subnet_id': 'fake_subnet', 'port_id': 'fake_port'} fake_dhcp_opts = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': fake_ext_ids}) if new_insert: self.transaction.insert.return_value = fake_dhcp_opts self.transaction.get_insert_uuid = mock.Mock( return_value='fake-uuid') else: self.ovn_api._tables['DHCP_Options'].rows[fake_dhcp_opts.uuid] = \ fake_dhcp_opts self.transaction.get_insert_uuid = mock.Mock( return_value=None) cmd = commands.AddDHCPOptionsCommand( self.ovn_api, fake_ext_ids['subnet_id'], port_id=fake_ext_ids['port_id'], may_exist=True, external_ids=fake_ext_ids) cmd.run_idl(self.transaction) cmd.post_commit(self.transaction) if new_insert: self.assertEqual('fake-uuid', cmd.result) else: self.assertEqual(fake_dhcp_opts.uuid, cmd.result) def test_dhcp_options_update_result_with_exist_row(self): self._test_dhcp_options_update_result(new_insert=False) def test_dhcp_options_update_result_with_new_row(self): self._test_dhcp_options_update_result(new_insert=True) class TestDelDHCPOptionsCommand(TestBaseCommand): def _test_dhcp_options_del_no_exist(self, if_exists=True): cmd = commands.DelDHCPOptionsCommand( self.ovn_api, 'fake-dhcp-options', if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_dhcp_options_no_exist_ignore(self): self._test_dhcp_options_del_no_exist(if_exists=True) def test_dhcp_options_no_exist_fail(self): self._test_dhcp_options_del_no_exist(if_exists=False) def test_dhcp_options_del(self): fake_dhcp_options = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'subnet_id': 'fake-subnet-id'}}) self.ovn_api._tables['DHCP_Options'].rows[fake_dhcp_options.uuid] = \ fake_dhcp_options cmd = commands.DelDHCPOptionsCommand( self.ovn_api, fake_dhcp_options.uuid, if_exists=True) cmd.run_idl(self.transaction) fake_dhcp_options.delete.assert_called_once_with() class TestAddNATRuleInLRouterCommand(TestBaseCommand): def test_add_nat_rule(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() fake_nat_rule_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.10', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat'}) fake_nat_rule_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.8', 'logical_ip': '10.0.0.5', 'type': 'dnat_and_snat'}) fake_lrouter.nat = [fake_nat_rule_1, fake_nat_rule_2] self.ovn_api._tables['NAT'].rows[fake_nat_rule_1.uuid] = \ fake_nat_rule_1 self.ovn_api._tables['NAT'].rows[fake_nat_rule_2.uuid] = \ fake_nat_rule_2 with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.AddNATRuleInLRouterCommand( self.ovn_api, fake_lrouter.name) cmd.run_idl(self.transaction) self.transaction.insert.assert_called_once_with( self.ovn_api._tables['NAT']) # a UUID will have been appended self.assertEqual(3, len(fake_lrouter.nat)) self.assertIn(fake_nat_rule_1, fake_lrouter.nat) self.assertIn(fake_nat_rule_2, fake_lrouter.nat) def test_add_nat_rule_no_lrouter_exist(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.AddNATRuleInLRouterCommand( self.ovn_api, "fake-lrouter") self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) class TestDeleteNATRuleInLRouterCommand(TestBaseCommand): def test_delete_nat_rule(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() fake_nat_rule_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.10', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat'}) fake_nat_rule_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.8', 'logical_ip': '10.0.0.5', 'type': 'dnat_and_snat'}) fake_lrouter.nat = [fake_nat_rule_1, fake_nat_rule_2] self.ovn_api._tables['NAT'].rows[fake_nat_rule_1.uuid] = \ fake_nat_rule_1 self.ovn_api._tables['NAT'].rows[fake_nat_rule_2.uuid] = \ fake_nat_rule_2 with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DeleteNATRuleInLRouterCommand( self.ovn_api, fake_lrouter.name, fake_nat_rule_1.type, fake_nat_rule_1.logical_ip, fake_nat_rule_1.external_ip, False) cmd.run_idl(self.transaction) fake_nat_rule_1.delete.assert_called_once_with() self.assertEqual(1, len(fake_lrouter.nat)) self.assertNotIn(fake_nat_rule_1, fake_lrouter.nat) self.assertIn(fake_nat_rule_2, fake_lrouter.nat) # run again with same arguments, should not remove anything fake_nat_rule_1.delete.reset_mock() cmd.run_idl(self.transaction) fake_nat_rule_1.delete.assert_not_called() self.assertEqual(1, len(fake_lrouter.nat)) self.assertNotIn(fake_nat_rule_1, fake_lrouter.nat) self.assertIn(fake_nat_rule_2, fake_lrouter.nat) def _test_delete_nat_rule_no_lrouter_exist(self, if_exists=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DeleteNATRuleInLRouterCommand( self.ovn_api, "fake-lrouter", "fake-type", "fake-logical-ip", "fake-external-ip", if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_delete_nat_rule_no_lrouter_exist_ignore(self): self._test_delete_nat_rule_no_lrouter_exist(if_exists=True) def test_delete_nat_rule_no_lrouter_exist_fail(self): self._test_delete_nat_rule_no_lrouter_exist(if_exists=False) class TestSetNATRuleInLRouterCommand(TestBaseCommand): def test_set_nat_rule(self): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row() fake_nat_rule_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.10', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat'}) fake_nat_rule_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.8', 'logical_ip': '10.0.0.5', 'type': 'dnat_and_snat'}) fake_lrouter.nat = [fake_nat_rule_1, fake_nat_rule_2] self.ovn_api._tables['NAT'].rows[fake_nat_rule_1.uuid] = \ fake_nat_rule_1 self.ovn_api._tables['NAT'].rows[fake_nat_rule_2.uuid] = \ fake_nat_rule_2 with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.SetNATRuleInLRouterCommand( self.ovn_api, fake_lrouter.name, fake_nat_rule_1.uuid, logical_ip='10.0.0.10') cmd.run_idl(self.transaction) self.assertEqual('10.0.0.10', fake_nat_rule_1.logical_ip) self.assertEqual('10.0.0.5', fake_nat_rule_2.logical_ip) def test_set_nat_rule_no_lrouter_exist(self): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.SetNATRuleInLRouterCommand( self.ovn_api, "fake-lrouter", "fake-uuid", logical_ip='fake-ip') self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) class TestCheckRevisionNumberCommand(TestBaseCommand): def setUp(self): super(TestCheckRevisionNumberCommand, self).setUp() self.fip = {'name': 'floating-ip', 'revision_number': 3} self.fip_old_rev = {'name': 'floating-ip', 'revision_number': 1} self.nat_rule = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.10', 'name': 'floating-ip', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat', 'external_ids': {ovn_const.OVN_FIP_EXT_ID_KEY: 'floating-ip', ovn_const.OVN_REV_NUM_EXT_ID_KEY: 3}}) bad_nat_rule = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.11', 'logical_ip': '10.0.0.5', 'type': 'bad_type'}) self.ovn_api._tables['NAT'].rows[self.nat_rule.uuid] = self.nat_rule self.ovn_api._tables['NAT'].rows[bad_nat_rule.uuid] = bad_nat_rule self.subnet = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'subnet_id': 'mysubnet'}}) bad_subnet = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {'port_id': 'fake-lsp'}}) self.ovn_api._tables['DHCP_Options'].rows[self.subnet.uuid] = \ self.subnet self.ovn_api._tables['DHCP_Options'].rows[bad_subnet.uuid] = \ bad_subnet def _test_check_revision_number( self, name='fake-name', resource='fake-resource', resource_type=ovn_const.TYPE_NETWORKS, if_exists=True, revision_conflict=False): with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(self.ovn_api, 'lookup', side_effect=idlutils.RowNotFound): cmd = commands.CheckRevisionNumberCommand( self.ovn_api, name, resource, resource_type, if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) elif revision_conflict: self.assertRaises(ovn_exc.RevisionConflict, cmd.run_idl, self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_check_revision_number_no_exist_ignore(self): self._test_check_revision_number(if_exists=True) def test_check_revision_number_no_exist_fail(self): self._test_check_revision_number(if_exists=False) def test_check_revision_number_floating_ip(self): self._test_check_revision_number( name=self.fip['name'], resource=self.fip, resource_type=ovn_const.TYPE_FLOATINGIPS, if_exists=True) def test_check_revision_number_floating_ip_not_found(self): self._test_check_revision_number( name='fip-not-found', resource=self.fip, resource_type=ovn_const.TYPE_FLOATINGIPS, if_exists=False) def test_check_revision_number_floating_ip_revision_conflict(self): self._test_check_revision_number( name=self.fip['name'], resource=self.fip_old_rev, resource_type=ovn_const.TYPE_FLOATINGIPS, if_exists=False, revision_conflict=True) def test_check_revision_number_subnet(self): self._test_check_revision_number( name=self.subnet['name'], resource=self.subnet, resource_type=ovn_const.TYPE_SUBNETS, if_exists=True) def test_check_revision_number_subnet_not_found(self): self._test_check_revision_number( name='subnet-not-found', resource=self.subnet, resource_type=ovn_const.TYPE_SUBNETS, if_exists=False) class TestDeleteLRouterExtGwCommand(TestBaseCommand): def test_delete_lrouter_extgw_routes(self): fake_route_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip_prefix': '0.0.0.0/0', 'nexthop': '10.0.0.1', 'external_ids': {ovn_const.OVN_ROUTER_IS_EXT_GW: True}}) fake_route_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ip_prefix': '50.0.0.0/24', 'nexthop': '40.0.0.101'}) fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'static_routes': [fake_route_1, fake_route_2]}) with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with( 'static_routes', fake_route_1) def test_delete_lrouter_extgw_nat(self): fake_nat_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.10', 'logical_ip': '10.0.0.4', 'type': 'snat'}) fake_nat_2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ip': '192.168.1.8', 'logical_ip': '10.0.0.5', 'type': 'badtype'}) fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'nat': [fake_nat_1, fake_nat_2]}) with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', return_value=fake_lrouter): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with( 'nat', fake_nat_1) def test_delete_lrouter_extgw_ports(self): port_id = 'fake-port-id' fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id}}) with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lrouter, port_id]): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with( 'ports', port_id) def test_delete_lrouter_extgw_ports_not_found(self): port_id = 'fake-port-id' fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id}}) with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=[fake_lrouter, idlutils.RowNotFound]): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_not_called() def _test_delete_lrouter_no_lrouter_exist(self, if_exists=True): with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', side_effect=idlutils.RowNotFound): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, "fake-lrouter", if_exists=if_exists) if if_exists: cmd.run_idl(self.transaction) else: self.assertRaises(RuntimeError, cmd.run_idl, self.transaction) def test_delete_lrouter_no_lrouter_exist_ignore(self): self._test_delete_lrouter_no_lrouter_exist(if_exists=True) def test_delete_no_lrouter_exist_fail(self): self._test_delete_lrouter_no_lrouter_exist(if_exists=False) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl0000644000175000017500000011767000000000000033604 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy import uuid import mock from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn from neutron.tests import base from neutron.tests.unit import fake_resources as fakes class TestDBImplIdlOvn(base.BaseTestCase): def _load_ovsdb_fake_rows(self, table, fake_attrs): for fake_attr in fake_attrs: fake_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs=fake_attr) # Pre-populate ovs idl "._data" fake_data = copy.deepcopy(fake_attr) try: del fake_data["unit_test_id"] except KeyError: pass setattr(fake_row, "_data", fake_data) table.rows[fake_row.uuid] = fake_row def _find_ovsdb_fake_row(self, table, key, value): for fake_row in table.rows.values(): if getattr(fake_row, key) == value: return fake_row return None def _construct_ovsdb_references(self, fake_associations, parent_table, child_table, parent_key, child_key, reference_column_name): for p_name, c_names in fake_associations.items(): p_row = self._find_ovsdb_fake_row(parent_table, parent_key, p_name) c_uuids = [] for c_name in c_names: c_row = self._find_ovsdb_fake_row(child_table, child_key, c_name) if not c_row: continue # Fake IDL processing (uuid -> row) c_uuids.append(c_row) setattr(p_row, reference_column_name, c_uuids) class TestNBImplIdlOvn(TestDBImplIdlOvn): fake_set = { 'lswitches': [ {'name': utils.ovn_name('ls-id-1'), 'external_ids': {ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'ls-name-1'}}, {'name': utils.ovn_name('ls-id-2'), 'external_ids': {ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'ls-name-2'}}, {'name': utils.ovn_name('ls-id-3'), 'external_ids': {ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'ls-name-3'}}, {'name': 'ls-id-4', 'external_ids': {'not-neutron:network_name': 'ls-name-4'}}, {'name': utils.ovn_name('ls-id-5'), 'external_ids': {ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: 'ls-name-5'}}], 'lswitch_ports': [ {'name': 'lsp-id-11', 'addresses': ['10.0.1.1'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-11'}}, {'name': 'lsp-id-12', 'addresses': ['10.0.1.2'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-12'}}, {'name': 'lsp-rp-id-1', 'addresses': ['10.0.1.254'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-rp-name-1'}, 'options': {'router-port': utils.ovn_lrouter_port_name('orp-id-a1')}}, {'name': 'provnet-ls-id-1', 'addresses': ['unknown'], 'external_ids': {}, 'options': {'network_name': 'physnet1'}}, {'name': 'lsp-id-21', 'addresses': ['10.0.2.1'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-21'}}, {'name': 'lsp-id-22', 'addresses': ['10.0.2.2'], 'external_ids': {}}, {'name': 'lsp-id-23', 'addresses': ['10.0.2.3'], 'external_ids': {'not-neutron:port_name': 'lsp-name-23'}}, {'name': 'lsp-rp-id-2', 'addresses': ['10.0.2.254'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-rp-name-2'}, 'options': {'router-port': utils.ovn_lrouter_port_name('orp-id-a2')}}, {'name': 'provnet-ls-id-2', 'addresses': ['unknown'], 'external_ids': {}, 'options': {'network_name': 'physnet2'}}, {'name': 'lsp-id-31', 'addresses': ['10.0.3.1'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-31'}}, {'name': 'lsp-id-32', 'addresses': ['10.0.3.2'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-32'}}, {'name': 'lsp-rp-id-3', 'addresses': ['10.0.3.254'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-rp-name-3'}, 'options': {'router-port': utils.ovn_lrouter_port_name('orp-id-a3')}}, {'name': 'lsp-vpn-id-3', 'addresses': ['10.0.3.253'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-vpn-name-3'}}, {'name': 'lsp-id-41', 'addresses': ['20.0.1.1'], 'external_ids': {'not-neutron:port_name': 'lsp-name-41'}}, {'name': 'lsp-rp-id-4', 'addresses': ['20.0.1.254'], 'external_ids': {}, 'options': {'router-port': 'xrp-id-b1'}}, {'name': 'lsp-id-51', 'addresses': ['20.0.2.1'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-51'}}, {'name': 'lsp-id-52', 'addresses': ['20.0.2.2'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-name-52'}}, {'name': 'lsp-rp-id-5', 'addresses': ['20.0.2.254'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-rp-name-5'}, 'options': {'router-port': utils.ovn_lrouter_port_name('orp-id-b2')}}, {'name': 'lsp-vpn-id-5', 'addresses': ['20.0.2.253'], 'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: 'lsp-vpn-name-5'}}], 'lrouters': [ {'name': utils.ovn_name('lr-id-a'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'lr-name-a'}}, {'name': utils.ovn_name('lr-id-b'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'lr-name-b'}}, {'name': utils.ovn_name('lr-id-c'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'lr-name-c'}}, {'name': utils.ovn_name('lr-id-d'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'lr-name-d'}}, {'name': utils.ovn_name('lr-id-e'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'lr-name-e'}}], 'lrouter_ports': [ {'name': utils.ovn_lrouter_port_name('orp-id-a1'), 'external_ids': {}, 'networks': ['10.0.1.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}}, {'name': utils.ovn_lrouter_port_name('orp-id-a2'), 'external_ids': {}, 'networks': ['10.0.2.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}}, {'name': utils.ovn_lrouter_port_name('orp-id-a3'), 'external_ids': {}, 'networks': ['10.0.3.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: ovn_const.OVN_GATEWAY_INVALID_CHASSIS}}, {'name': 'xrp-id-b1', 'external_ids': {}, 'networks': ['20.0.1.0/24']}, {'name': utils.ovn_lrouter_port_name('orp-id-b2'), 'external_ids': {}, 'networks': ['20.0.2.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-2'}}, {'name': utils.ovn_lrouter_port_name('orp-id-b3'), 'external_ids': {}, 'networks': ['20.0.3.0/24'], 'options': {}}], 'static_routes': [{'ip_prefix': '20.0.0.0/16', 'nexthop': '10.0.3.253'}, {'ip_prefix': '10.0.0.0/16', 'nexthop': '20.0.2.253'}], 'nats': [{'external_ip': '10.0.3.1', 'logical_ip': '20.0.0.0/16', 'type': 'snat'}, {'external_ip': '20.0.2.1', 'logical_ip': '10.0.0.0/24', 'type': 'snat'}, {'external_ip': '20.0.2.4', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat', 'external_mac': [], 'logical_port': []}, {'external_ip': '20.0.2.5', 'logical_ip': '10.0.0.5', 'type': 'dnat_and_snat', 'external_mac': ['00:01:02:03:04:05'], 'logical_port': ['lsp-id-001']}], 'acls': [ {'unit_test_id': 1, 'action': 'allow-related', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'lsp-id-11'}, 'match': 'inport == "lsp-id-11" && ip4'}, {'unit_test_id': 2, 'action': 'allow-related', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'lsp-id-11'}, 'match': 'outport == "lsp-id-11" && ip4.src == $as_ip4_id_1'}, {'unit_test_id': 3, 'action': 'allow-related', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'lsp-id-12'}, 'match': 'inport == "lsp-id-12" && ip4'}, {'unit_test_id': 4, 'action': 'allow-related', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'lsp-id-12'}, 'match': 'outport == "lsp-id-12" && ip4.src == $as_ip4_id_1'}, {'unit_test_id': 5, 'action': 'allow-related', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'lsp-id-21'}, 'match': 'inport == "lsp-id-21" && ip4'}, {'unit_test_id': 6, 'action': 'allow-related', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'lsp-id-21'}, 'match': 'outport == "lsp-id-21" && ip4.src == $as_ip4_id_2'}, {'unit_test_id': 7, 'action': 'allow-related', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'lsp-id-41'}, 'match': 'inport == "lsp-id-41" && ip4'}, {'unit_test_id': 8, 'action': 'allow-related', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'lsp-id-41'}, 'match': 'outport == "lsp-id-41" && ip4.src == $as_ip4_id_4'}, {'unit_test_id': 9, 'action': 'allow-related', 'direction': 'from-lport', 'external_ids': {'neutron:lport': 'lsp-id-52'}, 'match': 'inport == "lsp-id-52" && ip4'}, {'unit_test_id': 10, 'action': 'allow-related', 'direction': 'to-lport', 'external_ids': {'neutron:lport': 'lsp-id-52'}, 'match': 'outport == "lsp-id-52" && ip4.src == $as_ip4_id_5'}], 'dhcp_options': [ {'cidr': '10.0.1.0/24', 'external_ids': {'subnet_id': 'subnet-id-10-0-1-0'}, 'options': {'mtu': '1442', 'router': '10.0.1.254'}}, {'cidr': '10.0.2.0/24', 'external_ids': {'subnet_id': 'subnet-id-10-0-2-0'}, 'options': {'mtu': '1442', 'router': '10.0.2.254'}}, {'cidr': '10.0.1.0/26', 'external_ids': {'subnet_id': 'subnet-id-10-0-1-0', 'port_id': 'lsp-vpn-id-3'}, 'options': {'mtu': '1442', 'router': '10.0.1.1'}}, {'cidr': '20.0.1.0/24', 'external_ids': {'subnet_id': 'subnet-id-20-0-1-0'}, 'options': {'mtu': '1442', 'router': '20.0.1.254'}}, {'cidr': '20.0.2.0/24', 'external_ids': {'subnet_id': 'subnet-id-20-0-2-0', 'port_id': 'lsp-vpn-id-5'}, 'options': {'mtu': '1442', 'router': '20.0.2.254'}}, {'cidr': '2001:dba::/64', 'external_ids': {'subnet_id': 'subnet-id-2001-dba', 'port_id': 'lsp-vpn-id-5'}, 'options': {'server_id': '12:34:56:78:9a:bc'}}, {'cidr': '30.0.1.0/24', 'external_ids': {'port_id': 'port-id-30-0-1-0'}, 'options': {'mtu': '1442', 'router': '30.0.2.254'}}, {'cidr': '30.0.2.0/24', 'external_ids': {}, 'options': {}}], 'address_sets': [ {'name': '$as_ip4_id_1', 'addresses': ['10.0.1.1', '10.0.1.2'], 'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'id_1'}}, {'name': '$as_ip4_id_2', 'addresses': ['10.0.2.1'], 'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'id_2'}}, {'name': '$as_ip4_id_3', 'addresses': ['10.0.3.1', '10.0.3.2'], 'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'id_3'}}, {'name': '$as_ip4_id_4', 'addresses': ['20.0.1.1', '20.0.1.2'], 'external_ids': {}}, {'name': '$as_ip4_id_5', 'addresses': ['20.0.2.1', '20.0.2.2'], 'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'id_5'}}], } fake_associations = { 'lstolsp': { utils.ovn_name('ls-id-1'): [ 'lsp-id-11', 'lsp-id-12', 'lsp-rp-id-1', 'provnet-ls-id-1'], utils.ovn_name('ls-id-2'): [ 'lsp-id-21', 'lsp-id-22', 'lsp-id-23', 'lsp-rp-id-2', 'provnet-ls-id-2'], utils.ovn_name('ls-id-3'): [ 'lsp-id-31', 'lsp-id-32', 'lsp-rp-id-3', 'lsp-vpn-id-3'], 'ls-id-4': [ 'lsp-id-41', 'lsp-rp-id-4'], utils.ovn_name('ls-id-5'): [ 'lsp-id-51', 'lsp-id-52', 'lsp-rp-id-5', 'lsp-vpn-id-5']}, 'lrtolrp': { utils.ovn_name('lr-id-a'): [ utils.ovn_lrouter_port_name('orp-id-a1'), utils.ovn_lrouter_port_name('orp-id-a2'), utils.ovn_lrouter_port_name('orp-id-a3')], utils.ovn_name('lr-id-b'): [ 'xrp-id-b1', utils.ovn_lrouter_port_name('orp-id-b2')]}, 'lrtosroute': { utils.ovn_name('lr-id-a'): ['20.0.0.0/16'], utils.ovn_name('lr-id-b'): ['10.0.0.0/16'] }, 'lrtonat': { utils.ovn_name('lr-id-a'): ['10.0.3.1'], utils.ovn_name('lr-id-b'): ['20.0.2.1', '20.0.2.4', '20.0.2.5'], }, 'lstoacl': { utils.ovn_name('ls-id-1'): [1, 2, 3, 4], utils.ovn_name('ls-id-2'): [5, 6], 'ls-id-4': [7, 8], utils.ovn_name('ls-id-5'): [9, 10]} } def setUp(self): super(TestNBImplIdlOvn, self).setUp() self.lswitch_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.lsp_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.lrouter_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.lrp_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.sroute_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.nat_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.acl_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.dhcp_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.address_set_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self._tables = {} self._tables['Logical_Switch'] = self.lswitch_table self._tables['Logical_Switch_Port'] = self.lsp_table self._tables['Logical_Router'] = self.lrouter_table self._tables['Logical_Router_Port'] = self.lrp_table self._tables['Logical_Router_Static_Route'] = self.sroute_table self._tables['ACL'] = self.acl_table self._tables['DHCP_Options'] = self.dhcp_table self._tables['Address_Set'] = self.address_set_table with mock.patch.object(impl_idl_ovn, 'get_connection', return_value=mock.Mock()): impl_idl_ovn.OvsdbNbOvnIdl.ovsdb_connection = None self.nb_ovn_idl = impl_idl_ovn.OvsdbNbOvnIdl(mock.Mock()) self.nb_ovn_idl.idl.tables = self._tables def _load_nb_db(self): # Load Switches and Switch Ports fake_lswitches = TestNBImplIdlOvn.fake_set['lswitches'] self._load_ovsdb_fake_rows(self.lswitch_table, fake_lswitches) fake_lsps = TestNBImplIdlOvn.fake_set['lswitch_ports'] self._load_ovsdb_fake_rows(self.lsp_table, fake_lsps) # Associate switches and ports self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lstolsp'], self.lswitch_table, self.lsp_table, 'name', 'name', 'ports') # Load Routers and Router Ports fake_lrouters = TestNBImplIdlOvn.fake_set['lrouters'] self._load_ovsdb_fake_rows(self.lrouter_table, fake_lrouters) fake_lrps = TestNBImplIdlOvn.fake_set['lrouter_ports'] self._load_ovsdb_fake_rows(self.lrp_table, fake_lrps) # Associate routers and router ports self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lrtolrp'], self.lrouter_table, self.lrp_table, 'name', 'name', 'ports') # Load static routes fake_sroutes = TestNBImplIdlOvn.fake_set['static_routes'] self._load_ovsdb_fake_rows(self.sroute_table, fake_sroutes) # Associate routers and static routes self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lrtosroute'], self.lrouter_table, self.sroute_table, 'name', 'ip_prefix', 'static_routes') # Load nats fake_nats = TestNBImplIdlOvn.fake_set['nats'] self._load_ovsdb_fake_rows(self.nat_table, fake_nats) # Associate routers and nats self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lrtonat'], self.lrouter_table, self.nat_table, 'name', 'external_ip', 'nat') # Load acls fake_acls = TestNBImplIdlOvn.fake_set['acls'] self._load_ovsdb_fake_rows(self.acl_table, fake_acls) # Associate switches and acls self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lstoacl'], self.lswitch_table, self.acl_table, 'name', 'unit_test_id', 'acls') # Load dhcp options fake_dhcp_options = TestNBImplIdlOvn.fake_set['dhcp_options'] self._load_ovsdb_fake_rows(self.dhcp_table, fake_dhcp_options) # Load address sets fake_address_sets = TestNBImplIdlOvn.fake_set['address_sets'] self._load_ovsdb_fake_rows(self.address_set_table, fake_address_sets) @mock.patch.object(impl_idl_ovn.OvsdbNbOvnIdl, 'ovsdb_connection', None) @mock.patch.object(impl_idl_ovn, 'get_connection', mock.Mock()) def test_setting_ovsdb_probe_timeout_default_value(self): inst = impl_idl_ovn.OvsdbNbOvnIdl(mock.Mock()) inst.idl._session.reconnect.set_probe_interval.assert_called_with( 60000) @mock.patch.object(impl_idl_ovn.OvsdbNbOvnIdl, 'ovsdb_connection', None) @mock.patch.object(impl_idl_ovn, 'get_connection', mock.Mock()) @mock.patch.object(ovn_conf, 'get_ovn_ovsdb_probe_interval') def test_setting_ovsdb_probe_timeout(self, mock_get_probe_interval): mock_get_probe_interval.return_value = 5000 inst = impl_idl_ovn.OvsdbNbOvnIdl(mock.Mock()) inst.idl._session.reconnect.set_probe_interval.assert_called_with(5000) def test_get_all_logical_switches_with_ports(self): # Test empty mapping = self.nb_ovn_idl.get_all_logical_switches_with_ports() self.assertItemsEqual(mapping, {}) # Test loaded values self._load_nb_db() mapping = self.nb_ovn_idl.get_all_logical_switches_with_ports() expected = [{'name': utils.ovn_name('ls-id-1'), 'ports': ['lsp-id-11', 'lsp-id-12', 'lsp-rp-id-1'], 'provnet_port': 'provnet-ls-id-1'}, {'name': utils.ovn_name('ls-id-2'), 'ports': ['lsp-id-21', 'lsp-rp-id-2'], 'provnet_port': 'provnet-ls-id-2'}, {'name': utils.ovn_name('ls-id-3'), 'ports': ['lsp-id-31', 'lsp-id-32', 'lsp-rp-id-3', 'lsp-vpn-id-3'], 'provnet_port': None}, {'name': utils.ovn_name('ls-id-5'), 'ports': ['lsp-id-51', 'lsp-id-52', 'lsp-rp-id-5', 'lsp-vpn-id-5'], 'provnet_port': None}] self.assertItemsEqual(mapping, expected) def test_get_all_logical_routers_with_rports(self): # Test empty mapping = self.nb_ovn_idl.get_all_logical_switches_with_ports() self.assertItemsEqual(mapping, {}) # Test loaded values self._load_nb_db() mapping = self.nb_ovn_idl.get_all_logical_routers_with_rports() expected = [{'name': 'lr-id-a', 'ports': {'orp-id-a1': ['10.0.1.0/24'], 'orp-id-a2': ['10.0.2.0/24'], 'orp-id-a3': ['10.0.3.0/24']}, 'static_routes': [{'destination': '20.0.0.0/16', 'nexthop': '10.0.3.253'}], 'snats': [{'external_ip': '10.0.3.1', 'logical_ip': '20.0.0.0/16', 'type': 'snat'}], 'dnat_and_snats': []}, {'name': 'lr-id-b', 'ports': {'xrp-id-b1': ['20.0.1.0/24'], 'orp-id-b2': ['20.0.2.0/24']}, 'static_routes': [{'destination': '10.0.0.0/16', 'nexthop': '20.0.2.253'}], 'snats': [{'external_ip': '20.0.2.1', 'logical_ip': '10.0.0.0/24', 'type': 'snat'}], 'dnat_and_snats': [{'external_ip': '20.0.2.4', 'logical_ip': '10.0.0.4', 'type': 'dnat_and_snat'}, {'external_ip': '20.0.2.5', 'logical_ip': '10.0.0.5', 'type': 'dnat_and_snat', 'external_mac': '00:01:02:03:04:05', 'logical_port': 'lsp-id-001'}]}, {'name': 'lr-id-c', 'ports': {}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}, {'name': 'lr-id-d', 'ports': {}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}, {'name': 'lr-id-e', 'ports': {}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}] self.assertItemsEqual(mapping, expected) def test_get_acls_for_lswitches(self): self._load_nb_db() # Test neutron switches lswitches = ['ls-id-1', 'ls-id-2', 'ls-id-3', 'ls-id-5'] acl_values, acl_objs, lswitch_ovsdb_dict = \ self.nb_ovn_idl.get_acls_for_lswitches(lswitches) excepted_acl_values = { 'lsp-id-11': [ {'action': 'allow-related', 'lport': 'lsp-id-11', 'lswitch': 'neutron-ls-id-1', 'external_ids': {'neutron:lport': 'lsp-id-11'}, 'direction': 'from-lport', 'match': 'inport == "lsp-id-11" && ip4'}, {'action': 'allow-related', 'lport': 'lsp-id-11', 'lswitch': 'neutron-ls-id-1', 'external_ids': {'neutron:lport': 'lsp-id-11'}, 'direction': 'to-lport', 'match': 'outport == "lsp-id-11" && ip4.src == $as_ip4_id_1'} ], 'lsp-id-12': [ {'action': 'allow-related', 'lport': 'lsp-id-12', 'lswitch': 'neutron-ls-id-1', 'external_ids': {'neutron:lport': 'lsp-id-12'}, 'direction': 'from-lport', 'match': 'inport == "lsp-id-12" && ip4'}, {'action': 'allow-related', 'lport': 'lsp-id-12', 'lswitch': 'neutron-ls-id-1', 'external_ids': {'neutron:lport': 'lsp-id-12'}, 'direction': 'to-lport', 'match': 'outport == "lsp-id-12" && ip4.src == $as_ip4_id_1'} ], 'lsp-id-21': [ {'action': 'allow-related', 'lport': 'lsp-id-21', 'lswitch': 'neutron-ls-id-2', 'external_ids': {'neutron:lport': 'lsp-id-21'}, 'direction': 'from-lport', 'match': 'inport == "lsp-id-21" && ip4'}, {'action': 'allow-related', 'lport': 'lsp-id-21', 'lswitch': 'neutron-ls-id-2', 'external_ids': {'neutron:lport': 'lsp-id-21'}, 'direction': 'to-lport', 'match': 'outport == "lsp-id-21" && ip4.src == $as_ip4_id_2'} ], 'lsp-id-52': [ {'action': 'allow-related', 'lport': 'lsp-id-52', 'lswitch': 'neutron-ls-id-5', 'external_ids': {'neutron:lport': 'lsp-id-52'}, 'direction': 'from-lport', 'match': 'inport == "lsp-id-52" && ip4'}, {'action': 'allow-related', 'lport': 'lsp-id-52', 'lswitch': 'neutron-ls-id-5', 'external_ids': {'neutron:lport': 'lsp-id-52'}, 'direction': 'to-lport', 'match': 'outport == "lsp-id-52" && ip4.src == $as_ip4_id_5'} ]} self.assertItemsEqual(acl_values, excepted_acl_values) self.assertEqual(len(acl_objs), 8) self.assertEqual(len(lswitch_ovsdb_dict), len(lswitches)) # Test non-neutron switches lswitches = ['ls-id-4'] acl_values, acl_objs, lswitch_ovsdb_dict = \ self.nb_ovn_idl.get_acls_for_lswitches(lswitches) self.assertItemsEqual(acl_values, {}) self.assertEqual(len(acl_objs), 0) self.assertEqual(len(lswitch_ovsdb_dict), 0) def test_get_all_chassis_gateway_bindings(self): self._load_nb_db() bindings = self.nb_ovn_idl.get_all_chassis_gateway_bindings() expected = {'host-1': [utils.ovn_lrouter_port_name('orp-id-a1'), utils.ovn_lrouter_port_name('orp-id-a2')], 'host-2': [utils.ovn_lrouter_port_name('orp-id-b2')], ovn_const.OVN_GATEWAY_INVALID_CHASSIS: [ utils.ovn_name('orp-id-a3')]} self.assertItemsEqual(bindings, expected) bindings = self.nb_ovn_idl.get_all_chassis_gateway_bindings([]) self.assertItemsEqual(bindings, expected) bindings = self.nb_ovn_idl.get_all_chassis_gateway_bindings(['host-1']) expected = {'host-1': [utils.ovn_lrouter_port_name('orp-id-a1'), utils.ovn_lrouter_port_name('orp-id-a2')]} self.assertItemsEqual(bindings, expected) def test_get_gateway_chassis_binding(self): self._load_nb_db() chassis = self.nb_ovn_idl.get_gateway_chassis_binding( utils.ovn_lrouter_port_name('orp-id-a1')) self.assertEqual(chassis, ['host-1']) chassis = self.nb_ovn_idl.get_gateway_chassis_binding( utils.ovn_lrouter_port_name('orp-id-b2')) self.assertEqual(chassis, ['host-2']) chassis = self.nb_ovn_idl.get_gateway_chassis_binding( utils.ovn_lrouter_port_name('orp-id-a3')) self.assertEqual(chassis, ['neutron-ovn-invalid-chassis']) chassis = self.nb_ovn_idl.get_gateway_chassis_binding( utils.ovn_lrouter_port_name('orp-id-b3')) self.assertEqual([], chassis) chassis = self.nb_ovn_idl.get_gateway_chassis_binding('bad') self.assertEqual([], chassis) def test_get_unhosted_gateways(self): self._load_nb_db() # Port physnet-dict port_physnet_dict = { 'orp-id-a1': 'physnet1', # scheduled 'orp-id-a2': 'physnet1', # scheduled 'orp-id-a3': 'physnet1', # not scheduled 'orp-id-b6': 'physnet2'} # not scheduled # Test only that orp-id-a3 is to be scheduled. # Rest ports don't have required chassis (physnet2) # or are already scheduled. unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet3'}, ['host-1', 'host-2']) expected = ['lrp-orp-id-a3'] self.assertItemsEqual(unhosted_gateways, expected) # Test both host-1, host-2 in valid list unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet2'}, ['host-1', 'host-2']) expected = ['lrp-orp-id-a3', 'lrp-orp-id-b6'] self.assertItemsEqual(unhosted_gateways, expected) def test_get_unhosted_gateways_deleted_physnet(self): self._load_nb_db() # The LRP is on host-2 now router_row = self._find_ovsdb_fake_row(self.lrp_table, 'name', 'lrp-orp-id-a1') setattr(router_row, 'options', { ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-2'}) port_physnet_dict = {'orp-id-a1': 'physnet1'} # Lets spoof that physnet1 is deleted from host-2. unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet3'}, ['host-1', 'host-2']) # Make sure that lrp is rescheduled, because host-1 has physet1 expected = ['lrp-orp-id-a1'] self.assertItemsEqual(unhosted_gateways, expected) # Spoof that there is no valid host with required physnet. unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( port_physnet_dict, {'host-1': 'physnet4', 'host-2': 'physnet3'}, ['host-1', 'host-2']) self.assertItemsEqual(unhosted_gateways, []) def _test_get_unhosted_gateway_max_chassis(self, r): gw_chassis_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self._tables['Gateway_Chassis'] = gw_chassis_table gw_chassis = collections.namedtuple('gw_chassis', 'chassis_name priority') TestNBImplIdlOvn.fake_set['lrouter_ports'][0]['gateway_chassis'] = [ gw_chassis(chassis_name='host-%s' % x, priority=x) for x in r] self._load_nb_db() self.port_physnet_dict = {'orp-id-a1': 'physnet1'} def test_get_unhosted_gateway_max_chassis_lack_of_chassis(self): self._test_get_unhosted_gateway_max_chassis(r=(1, 3, 5)) unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( self.port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet2', 'host-3': 'physnet1', 'host-4': 'physnet2', 'host-5': 'physnet1', 'host-6': 'physnet2'}, ['host-%s' % x for x in range(1, 7)]) # We don't have required number of chassis expected = [] self.assertItemsEqual(unhosted_gateways, expected) def test_get_unhosted_gateway_max_chassis(self): # We have required number of chassis, and lrp # is hosted everywhere. self._test_get_unhosted_gateway_max_chassis(r=range(1, 6)) unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( self.port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet1', 'host-3': 'physnet1', 'host-4': 'physnet1', 'host-5': 'physnet1', 'host-6': 'physnet1'}, ['host-%s' % x for x in range(1, 7)]) expected = [] self.assertItemsEqual(unhosted_gateways, expected) def test_get_unhosed_gateway_schedule_to_max(self): # The LRP is not yet scheduled on all chassis # but we can schedule on new chassis now. self._test_get_unhosted_gateway_max_chassis(r=range(1, 4)) unhosted_gateways = self.nb_ovn_idl.get_unhosted_gateways( self.port_physnet_dict, {'host-1': 'physnet1', 'host-2': 'physnet1', 'host-3': 'physnet1', 'host-4': 'physnet1', 'host-5': 'physnet1', 'host-6': 'physnet1'}, ['host-%s' % x for x in range(1, 7)]) expected = ['lrp-orp-id-a1'] self.assertItemsEqual(unhosted_gateways, expected) def test_get_subnet_dhcp_options(self): self._load_nb_db() subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-10-0-2-0') expected_row = self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', '10.0.2.0/24') self.assertEqual({ 'subnet': {'cidr': expected_row.cidr, 'external_ids': expected_row.external_ids, 'options': expected_row.options, 'uuid': expected_row.uuid}, 'ports': []}, subnet_options) subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-11-0-2-0')['subnet'] self.assertIsNone(subnet_options) subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'port-id-30-0-1-0')['subnet'] self.assertIsNone(subnet_options) def test_get_subnet_dhcp_options_with_ports(self): # Test empty subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-10-0-1-0', with_ports=True) self.assertItemsEqual({'subnet': None, 'ports': []}, subnet_options) # Test loaded values self._load_nb_db() # Test getting both subnet and port dhcp options subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-10-0-1-0', with_ports=True) dhcp_rows = [ self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', '10.0.1.0/24'), self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', '10.0.1.0/26')] expected_rows = [{'cidr': dhcp_row.cidr, 'external_ids': dhcp_row.external_ids, 'options': dhcp_row.options, 'uuid': dhcp_row.uuid} for dhcp_row in dhcp_rows] self.assertItemsEqual(expected_rows, [ subnet_options['subnet']] + subnet_options['ports']) # Test getting only subnet dhcp options subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-10-0-2-0', with_ports=True) dhcp_rows = [ self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', '10.0.2.0/24')] expected_rows = [{'cidr': dhcp_row.cidr, 'external_ids': dhcp_row.external_ids, 'options': dhcp_row.options, 'uuid': dhcp_row.uuid} for dhcp_row in dhcp_rows] self.assertItemsEqual(expected_rows, [ subnet_options['subnet']] + subnet_options['ports']) # Test getting no dhcp options subnet_options = self.nb_ovn_idl.get_subnet_dhcp_options( 'subnet-id-11-0-2-0', with_ports=True) self.assertItemsEqual({'subnet': None, 'ports': []}, subnet_options) def test_get_subnets_dhcp_options(self): self._load_nb_db() def get_row_dict(row): return {'cidr': row.cidr, 'external_ids': row.external_ids, 'options': row.options, 'uuid': row.uuid} subnets_options = self.nb_ovn_idl.get_subnets_dhcp_options( ['subnet-id-10-0-1-0', 'subnet-id-10-0-2-0']) expected_rows = [ get_row_dict( self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', cidr)) for cidr in ('10.0.1.0/24', '10.0.2.0/24')] self.assertItemsEqual(expected_rows, subnets_options) subnets_options = self.nb_ovn_idl.get_subnets_dhcp_options( ['subnet-id-11-0-2-0', 'subnet-id-20-0-1-0']) expected_row = get_row_dict( self._find_ovsdb_fake_row(self.dhcp_table, 'cidr', '20.0.1.0/24')) self.assertItemsEqual([expected_row], subnets_options) subnets_options = self.nb_ovn_idl.get_subnets_dhcp_options( ['port-id-30-0-1-0', 'fake-not-exist']) self.assertEqual([], subnets_options) def test_get_all_dhcp_options(self): self._load_nb_db() dhcp_options = self.nb_ovn_idl.get_all_dhcp_options() self.assertEqual(len(dhcp_options['subnets']), 3) self.assertEqual(len(dhcp_options['ports_v4']), 2) def test_get_address_sets(self): self._load_nb_db() address_sets = self.nb_ovn_idl.get_address_sets() self.assertEqual(len(address_sets), 4) def test_get_port_group_not_supported(self): self._load_nb_db() # Make sure that PG tables doesn't exist in fake db. self._tables.pop('Port_Group', None) port_group = self.nb_ovn_idl.get_port_group(str(uuid.uuid4())) self.assertIsNone(port_group) def test_get_port_groups_not_supported(self): self._load_nb_db() # Make sure that PG tables doesn't exist in fake db. self._tables.pop('Port_Group', None) port_groups = self.nb_ovn_idl.get_port_groups() self.assertEqual({}, port_groups) class TestSBImplIdlOvn(TestDBImplIdlOvn): fake_set = { 'chassis': [ {'name': 'host-1', 'hostname': 'host-1.localdomain.com', 'external_ids': {'ovn-bridge-mappings': 'public:br-ex,private:br-0'}}, {'name': 'host-2', 'hostname': 'host-2.localdomain.com', 'external_ids': {'ovn-bridge-mappings': 'public:br-ex,public2:br-ex'}}, {'name': 'host-3', 'hostname': 'host-3.localdomain.com', 'external_ids': {'ovn-bridge-mappings': 'public:br-ex'}}], } def setUp(self): super(TestSBImplIdlOvn, self).setUp() self.chassis_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self._tables = {} self._tables['Chassis'] = self.chassis_table with mock.patch.object(impl_idl_ovn, 'get_connection', return_value=mock.Mock()): impl_idl_ovn.OvsdbSbOvnIdl.ovsdb_connection = None self.sb_ovn_idl = impl_idl_ovn.OvsdbSbOvnIdl(mock.Mock()) self.sb_ovn_idl.idl.tables = self._tables def _load_sb_db(self): # Load Chassis fake_chassis = TestSBImplIdlOvn.fake_set['chassis'] self._load_ovsdb_fake_rows(self.chassis_table, fake_chassis) @mock.patch.object(impl_idl_ovn.OvsdbSbOvnIdl, 'ovsdb_connection', None) @mock.patch.object(impl_idl_ovn, 'get_connection', mock.Mock()) def test_setting_ovsdb_probe_timeout_default_value(self): inst = impl_idl_ovn.OvsdbSbOvnIdl(mock.Mock()) inst.idl._session.reconnect.set_probe_interval.assert_called_with( 60000) @mock.patch.object(impl_idl_ovn.OvsdbSbOvnIdl, 'ovsdb_connection', None) @mock.patch.object(impl_idl_ovn, 'get_connection', mock.Mock()) @mock.patch.object(ovn_conf, 'get_ovn_ovsdb_probe_interval') def test_setting_ovsdb_probe_timeout(self, mock_get_probe_interval): mock_get_probe_interval.return_value = 5000 inst = impl_idl_ovn.OvsdbSbOvnIdl(mock.Mock()) inst.idl._session.reconnect.set_probe_interval.assert_called_with(5000) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintena0000644000175000017500000004505600000000000033605 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from futurist import periodics from neutron_lib import context from oslo_config import cfg from neutron.common.ovn import constants from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_revision_numbers_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.tests.unit import fake_resources as fakes from neutron.tests.unit.plugins.ml2 import test_security_group as test_sg from neutron.tests.unit import testlib_api class TestSchemaAwarePeriodicsBase(testlib_api.SqlTestCaseLight): def test__set_schema_aware_periodics(self): class TestClass(maintenance.SchemaAwarePeriodicsBase): @periodics.periodic(spacing=1) @maintenance.rerun_on_schema_updates def test_method_0(self): pass @periodics.periodic(spacing=1) def test_method_1(self): pass @periodics.periodic(spacing=1) @maintenance.rerun_on_schema_updates def test_method_2(self): pass obj = TestClass(mock.Mock()) # Assert that test_method_0 and test_method_2 are schema # aware periodics self.assertEqual([obj.test_method_0, obj.test_method_2], obj._schema_aware_periodics) @mock.patch.object(maintenance.SchemaAwarePeriodicsBase, 'get_ovn_nbdb_version') def test_nbdb_schema_updated_hook(self, mock_get_ver): initial_ver = '1.0.0' obj = mock.Mock() obj.get_ovn_nbdb_version.side_effect = (initial_ver, '1.1.0') obj_evt = maintenance.OVNNBDBReconnectionEvent(obj, initial_ver) # First run() will be called with the initial version (see # side_effect), so the hook should not be invoked since the # versions didn't change obj_evt.run('update', mock.Mock(), mock.Mock()) self.assertFalse(obj.nbdb_schema_updated_hook.called) # Second run() will be called with a different version, the # hook should now be invoked obj_evt.run('update', mock.Mock(), mock.Mock()) self.assertTrue(obj.nbdb_schema_updated_hook.called) @mock.patch.object(maintenance.DBInconsistenciesPeriodics, 'has_lock', mock.PropertyMock(return_value=True)) class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight, test_sg.Ml2SecurityGroupsTestCase): def setUp(self): super(TestDBInconsistenciesPeriodics, self).setUp() self.net = self._make_network( self.fmt, name='net1', admin_state_up=True)['network'] self.port = self._make_port( self.fmt, self.net['id'], name='port1')['port'] self.fake_ovn_client = mock.MagicMock() self.periodic = maintenance.DBInconsistenciesPeriodics( self.fake_ovn_client) self.ctx = context.get_admin_context() @mock.patch.object(maintenance.DBInconsistenciesPeriodics, '_fix_create_update') @mock.patch.object(ovn_revision_numbers_db, 'get_inconsistent_resources') def test_check_for_inconsistencies(self, mock_get_incon_res, mock_fix_net): fake_row = mock.Mock(resource_type=constants.TYPE_NETWORKS) mock_get_incon_res.return_value = [fake_row, ] self.periodic.check_for_inconsistencies() mock_fix_net.assert_called_once_with(mock.ANY, fake_row) def _test_migrate_to_port_groups_helper(self, pg_supported, a_sets, migration_expected, never_again): self.fake_ovn_client._nb_idl.is_port_groups_supported.return_value = ( pg_supported) self.fake_ovn_client._nb_idl.get_address_sets.return_value = a_sets with mock.patch.object(ovn_db_sync.OvnNbSynchronizer, 'migrate_to_port_groups') as mtpg: if never_again: self.assertRaises(periodics.NeverAgain, self.periodic.migrate_to_port_groups) else: self.periodic.migrate_to_port_groups() if migration_expected: mtpg.assert_called_once_with(mock.ANY) else: mtpg.assert_not_called() def test_migrate_to_port_groups_port_groups_not_supported(self): self._test_migrate_to_port_groups_helper(pg_supported=False, a_sets=None, migration_expected=False, never_again=True) def test_migrate_to_port_groups_not_needed(self): self._test_migrate_to_port_groups_helper(pg_supported=True, a_sets=None, migration_expected=False, never_again=True) def test_migrate_to_port_groups(self): # Check normal migration path: if port groups are supported by the # schema and the migration has to be done, it will take place and # won't be attempted in the future. self._test_migrate_to_port_groups_helper(pg_supported=True, a_sets=['as1', 'as2'], migration_expected=True, never_again=True) def test_migrate_to_port_groups_no_lock(self): with mock.patch.object(maintenance.DBInconsistenciesPeriodics, 'has_lock', mock.PropertyMock( return_value=False)): # Check that if this worker doesn't have the lock, it won't # perform the migration and it will try again later. self._test_migrate_to_port_groups_helper(pg_supported=True, a_sets=['as1', 'as2'], migration_expected=False, never_again=False) def _test_fix_create_update_network(self, ovn_rev, neutron_rev): self.net['revision_number'] = neutron_rev # Create an entry to the revision_numbers table and assert the # initial revision_number for our test object is the expected ovn_revision_numbers_db.create_initial_revision( self.ctx, self.net['id'], constants.TYPE_NETWORKS, revision_number=ovn_rev) row = ovn_revision_numbers_db.get_revision_row(self.ctx, self.net['id']) self.assertEqual(ovn_rev, row.revision_number) if ovn_rev < 0: self.fake_ovn_client._nb_idl.get_lswitch.return_value = None else: fake_ls = mock.Mock(external_ids={ constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev}) self.fake_ovn_client._nb_idl.get_lswitch.return_value = fake_ls self.fake_ovn_client._plugin.get_network.return_value = self.net self.periodic._fix_create_update(self.ctx, row) # Since the revision number was < 0, make sure create_network() # is invoked with the latest version of the object in the neutron # database if ovn_rev < 0: self.fake_ovn_client.create_network.assert_called_once_with( self.ctx, self.net) # If the revision number is > 0 it means that the object already # exist and we just need to update to match the latest in the # neutron database so, update_network() should be called. else: self.fake_ovn_client.update_network.assert_called_once_with( self.ctx, self.net) def test_fix_network_create(self): self._test_fix_create_update_network(ovn_rev=-1, neutron_rev=2) def test_fix_network_update(self): self._test_fix_create_update_network(ovn_rev=5, neutron_rev=7) def _test_fix_create_update_port(self, ovn_rev, neutron_rev): self.port['revision_number'] = neutron_rev # Create an entry to the revision_numbers table and assert the # initial revision_number for our test object is the expected ovn_revision_numbers_db.create_initial_revision( self.ctx, self.port['id'], constants.TYPE_PORTS, revision_number=ovn_rev) row = ovn_revision_numbers_db.get_revision_row(self.ctx, self.port['id']) self.assertEqual(ovn_rev, row.revision_number) if ovn_rev < 0: self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = None else: fake_lsp = mock.Mock(external_ids={ constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev}) self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = ( fake_lsp) self.fake_ovn_client._plugin.get_port.return_value = self.port self.periodic._fix_create_update(self.ctx, row) # Since the revision number was < 0, make sure create_port() # is invoked with the latest version of the object in the neutron # database if ovn_rev < 0: self.fake_ovn_client.create_port.assert_called_once_with( self.ctx, self.port) # If the revision number is > 0 it means that the object already # exist and we just need to update to match the latest in the # neutron database so, update_port() should be called. else: self.fake_ovn_client.update_port.assert_called_once_with( self.ctx, self.port) def test_fix_port_create(self): self._test_fix_create_update_port(ovn_rev=-1, neutron_rev=2) def test_fix_port_update(self): self._test_fix_create_update_port(ovn_rev=5, neutron_rev=7) @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') def _test_fix_security_group_create(self, mock_bump, revision_number): sg_name = utils.ovn_addrset_name('fake_id', 'ip4') sg = self._make_security_group(self.fmt, sg_name, '')['security_group'] ovn_revision_numbers_db.create_initial_revision( self.ctx, sg['id'], constants.TYPE_SECURITY_GROUPS, revision_number=revision_number) row = ovn_revision_numbers_db.get_revision_row(self.ctx, sg['id']) self.assertEqual(revision_number, row.revision_number) if revision_number < 0: self.fake_ovn_client._nb_idl.get_address_set.return_value = None self.fake_ovn_client._nb_idl.get_port_group.return_value = None else: self.fake_ovn_client._nb_idl.get_address_set.return_value = ( mock.sentinel.AddressSet) self.fake_ovn_client._plugin.get_security_group.return_value = sg self.periodic._fix_create_update(self.ctx, row) if revision_number < 0: self.fake_ovn_client.create_security_group.assert_called_once_with( self.ctx, sg) else: # If the object already exist let's make sure we just bump # the revision number in the ovn_revision_numbers table self.assertFalse(self.fake_ovn_client.create_security_group.called) mock_bump.assert_called_once_with( self.ctx, sg, constants.TYPE_SECURITY_GROUPS) def test_fix_security_group_create_doesnt_exist(self): self._test_fix_security_group_create(revision_number=-1) def test_fix_security_group_create_version_mismatch(self): self._test_fix_security_group_create(revision_number=2) def test__create_lrouter_port(self): port = {'id': 'port-id', 'device_id': 'router-id'} self.periodic._create_lrouter_port(self.ctx, port) l3_mock = self.periodic._ovn_client._l3_plugin l3_mock.add_router_interface.assert_called_once_with( self.ctx, port['device_id'], {'port_id': port['id']}, may_exist=True) @mock.patch.object(maintenance.LOG, 'debug') def test__log_maintenance_inconsistencies(self, mock_log): ovn_conf.cfg.CONF.set_override('debug', True) # Create fake inconsistencies: 2 networks, 4 subnets and 8 ports incst = [] incst += [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2 incst += [mock.Mock(resource_type=constants.TYPE_SUBNETS)] * 4 incst += [mock.Mock(resource_type=constants.TYPE_PORTS)] * 8 # Create fake inconsistencies for delete: 3 routers and 6 router ports incst_del = [] incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTERS)] * 3 incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTER_PORTS)] * 6 self.periodic._log_maintenance_inconsistencies(incst, incst_del) # Assert LOG.debug was called twice self.assertEqual(2, len(mock_log.call_args_list)) # Assert the log matches the number of inconsistencies fail_str_create_update = mock_log.call_args_list[0][0][1]['fail_str'] self.assertIn('networks=2', fail_str_create_update) self.assertIn('subnets=4', fail_str_create_update) self.assertIn('ports=8', fail_str_create_update) fail_str_delete = mock_log.call_args_list[1][0][1]['fail_str'] self.assertIn('routers=3', fail_str_delete) self.assertIn('router_ports=6', fail_str_delete) @mock.patch.object(maintenance.LOG, 'debug') def test__log_maintenance_inconsistencies_debug_disabled(self, mock_log): ovn_conf.cfg.CONF.set_override('debug', False) incst = [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2 self.periodic._log_maintenance_inconsistencies(incst, []) self.assertFalse(mock_log.called) def test_check_for_igmp_snoop_support(self): cfg.CONF.set_override('igmp_snooping_enable', True, group='OVS') nb_idl = self.fake_ovn_client._nb_idl ls0 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'ls0', 'other_config': { constants.MCAST_SNOOP: 'false', constants.MCAST_FLOOD_UNREGISTERED: 'false'}}) ls1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'ls1', 'other_config': {}}) ls2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'ls2', 'other_config': { constants.MCAST_SNOOP: 'true', constants.MCAST_FLOOD_UNREGISTERED: 'true'}}) nb_idl.ls_list.return_value.execute.return_value = [ls0, ls1, ls2] self.assertRaises(periodics.NeverAgain, self.periodic.check_for_igmp_snoop_support) # "ls2" is not part of the transaction because it already # have the right value set expected_calls = [ mock.call('Logical_Switch', 'ls0', ('other_config', { constants.MCAST_SNOOP: 'true', constants.MCAST_FLOOD_UNREGISTERED: 'true'})), mock.call('Logical_Switch', 'ls1', ('other_config', { constants.MCAST_SNOOP: 'true', constants.MCAST_FLOOD_UNREGISTERED: 'true'})), ] nb_idl.db_set.assert_has_calls(expected_calls) def test_check_for_ha_chassis_group_address_not_supported(self): self.fake_ovn_client.is_external_ports_supported.return_value = False self.assertRaises(periodics.NeverAgain, self.periodic.check_for_ha_chassis_group_address) self.assertFalse( self.fake_ovn_client._nb_idl.ha_chassis_group_add.called) def test_check_for_ha_chassis_group_address(self): self.fake_ovn_client.is_external_ports_supported.return_value = True nb_idl = self.fake_ovn_client._nb_idl sb_idl = self.fake_ovn_client._sb_idl gw_chassis_0 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'priority': 1, 'name': 'gw_chassis_0', 'chassis_name': 'gw_chassis_0'}) gw_chassis_1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'priority': 2, 'name': 'gw_chassis_1', 'chassis_name': 'gw_chassis_1'}) non_gw_chassis_0 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'non_gw_chassis_0'}) default_ha_group = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'ha_chassis': [gw_chassis_0, gw_chassis_1]}) nb_idl.ha_chassis_group_add.return_value.execute.return_value = ( default_ha_group) sb_idl.get_all_chassis.return_value = [ non_gw_chassis_0.name, gw_chassis_0.name, gw_chassis_1.name] sb_idl.get_gateway_chassis_from_cms_options.return_value = [ gw_chassis_0.name, gw_chassis_1.name] # Invoke the periodic method, it meant to run only once at startup # so NeverAgain will be raised at the end self.assertRaises(periodics.NeverAgain, self.periodic.check_for_ha_chassis_group_address) # Make sure the non GW chassis has been removed from the # default HA_CHASSIS_GROUP nb_idl.ha_chassis_group_del_chassis.assert_called_once_with( constants.HA_CHASSIS_GROUP_DEFAULT_NAME, non_gw_chassis_0.name, if_exists=True) # Assert the GW chassis are being added to the # default HA_CHASSIS_GROUP expected_calls = [ mock.call(constants.HA_CHASSIS_GROUP_DEFAULT_NAME, gw_chassis_1.chassis_name, priority=constants.HA_CHASSIS_GROUP_HIGHEST_PRIORITY), # Note that the second chassis is getting priority -1 mock.call(constants.HA_CHASSIS_GROUP_DEFAULT_NAME, gw_chassis_0.chassis_name, priority=constants.HA_CHASSIS_GROUP_HIGHEST_PRIORITY - 1) ] nb_idl.ha_chassis_group_add_chassis.assert_has_calls(expected_calls) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_s0000644000175000017500000014114000000000000033571 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from neutron_lib import constants as const from neutron.common.ovn import acl from neutron.common.ovn import constants as ovn_const from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.services.ovn_l3 import plugin as ovn_plugin from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import \ test_mech_driver OvnPortInfo = collections.namedtuple('OvnPortInfo', ['name']) @mock.patch.object(ovn_plugin.OVNL3RouterPlugin, '_sb_ovn', mock.Mock()) class TestOvnNbSyncML2(test_mech_driver.OVNMechanismDriverTestCase): l3_plugin = 'ovn-router' def setUp(self): super(TestOvnNbSyncML2, self).setUp() self.subnet = {'cidr': '10.0.0.0/24', 'id': 'subnet1', 'subnetpool_id': None, 'name': 'private-subnet', 'enable_dhcp': True, 'network_id': 'n1', 'tenant_id': 'tenant1', 'gateway_ip': '10.0.0.1', 'ip_version': 4, 'shared': False} self.matches = ["", "", "", ""] self.networks = [{'id': 'n1', 'mtu': 1450, 'provider:physical_network': 'physnet1', 'provider:segmentation_id': 1000}, {'id': 'n2', 'mtu': 1450}, {'id': 'n4', 'mtu': 1450, 'provider:physical_network': 'physnet2'}] self.subnets = [{'id': 'n1-s1', 'network_id': 'n1', 'enable_dhcp': True, 'cidr': '10.0.0.0/24', 'tenant_id': 'tenant1', 'gateway_ip': '10.0.0.1', 'dns_nameservers': [], 'host_routes': [], 'ip_version': 4}, {'id': 'n1-s2', 'network_id': 'n1', 'enable_dhcp': True, 'cidr': 'fd79:e1c:a55::/64', 'tenant_id': 'tenant1', 'gateway_ip': 'fd79:e1c:a55::1', 'dns_nameservers': [], 'host_routes': [], 'ip_version': 6}, {'id': 'n2', 'network_id': 'n2', 'enable_dhcp': True, 'cidr': '20.0.0.0/24', 'tenant_id': 'tenant1', 'gateway_ip': '20.0.0.1', 'dns_nameservers': [], 'host_routes': [], 'ip_version': 4}] self.security_groups = [ {'id': 'sg1', 'tenant_id': 'tenant1', 'security_group_rules': [{'remote_group_id': None, 'direction': 'ingress', 'remote_ip_prefix': '0.0.0.0/0', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': 'tenant1', 'port_range_max': 65535, 'port_range_min': 1, 'id': 'ruleid1', 'security_group_id': 'sg1'}], 'name': 'all-tcp'}, {'id': 'sg2', 'tenant_id': 'tenant1', 'security_group_rules': [{'remote_group_id': 'sg2', 'direction': 'egress', 'remote_ip_prefix': '0.0.0.0/0', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': 'tenant1', 'port_range_max': 65535, 'port_range_min': 1, 'id': 'ruleid1', 'security_group_id': 'sg2'}], 'name': 'all-tcpe'}] self.port_groups_ovn = [mock.Mock(), mock.Mock(), mock.Mock()] self.port_groups_ovn[0].configure_mock( name='pg_sg1', external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'}, ports=[], acls=[]) self.port_groups_ovn[1].configure_mock( name='pg_unknown_del', external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'}, ports=[], acls=[]) self.port_groups_ovn[2].configure_mock( name='neutron_pg_drop', external_ids=[], ports=[], acls=[]) self.ports = [ {'id': 'p1n1', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', 'ip_address': '10.0.0.4'}, {'subnet_id': 'subnet1', 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], 'security_groups': ['sg1'], 'network_id': 'n1'}, {'id': 'p2n1', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', 'ip_address': '10.0.0.4'}, {'subnet_id': 'subnet1', 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], 'security_groups': ['sg2'], 'network_id': 'n1', 'extra_dhcp_opts': [{'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'foo-domain'}]}, {'id': 'p1n2', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', 'ip_address': '10.0.0.4'}, {'subnet_id': 'subnet1', 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], 'security_groups': ['sg1'], 'network_id': 'n2', 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'tftp-server', 'opt_value': '20.0.0.20'}, {'ip_version': 4, 'opt_name': 'dns-server', 'opt_value': '8.8.8.8'}, {'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'foo-domain'}]}, {'id': 'p2n2', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', 'ip_address': '10.0.0.4'}, {'subnet_id': 'subnet1', 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], 'security_groups': ['sg2'], 'network_id': 'n2'}, {'id': 'fp1', 'device_owner': 'network:floatingip', 'fixed_ips': [{'subnet_id': 'ext-subnet', 'ip_address': '90.0.0.10'}], 'network_id': 'ext-net'}] self.ports_ovn = [OvnPortInfo('p1n1'), OvnPortInfo('p1n2'), OvnPortInfo('p2n1'), OvnPortInfo('p2n2'), OvnPortInfo('p3n1'), OvnPortInfo('p3n3')] self.acls_ovn = { 'lport1': # ACLs need to be removed by the sync tool [{'id': 'acl1', 'priority': 00, 'policy': 'allow', 'lswitch': 'lswitch1', 'lport': 'lport1'}], 'lport2': [{'id': 'acl2', 'priority': 00, 'policy': 'drop', 'lswitch': 'lswitch2', 'lport': 'lport2'}], # ACLs need to be kept as-is by the sync tool 'p2n2': [{'lport': 'p2n2', 'direction': 'to-lport', 'log': False, 'lswitch': 'neutron-n2', 'priority': 1001, 'action': 'drop', 'external_ids': {'neutron:lport': 'p2n2'}, 'match': 'outport == "p2n2" && ip'}, {'lport': 'p2n2', 'direction': 'to-lport', 'log': False, 'lswitch': 'neutron-n2', 'priority': 1002, 'action': 'allow', 'external_ids': {'neutron:lport': 'p2n2'}, 'match': 'outport == "p2n2" && ip4 && ' 'ip4.src == 10.0.0.0/24 && udp && ' 'udp.src == 67 && udp.dst == 68'}]} self.address_sets_ovn = { 'as_ip4_sg1': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'all-tcp'}, 'name': 'as_ip4_sg1', 'addresses': ['10.0.0.4']}, 'as_ip4_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'all-tcpe'}, 'name': 'as_ip4_sg2', 'addresses': []}, 'as_ip6_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'all-tcpe'}, 'name': 'as_ip6_sg2', 'addresses': ['fd79:e1c:a55::816:eff:eff:ff2', 'fd79:e1c:a55::816:eff:eff:ff3']}, 'as_ip4_del': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'all-delete'}, 'name': 'as_ip4_delete', 'addresses': ['10.0.0.4']}, } self.routers = [{'id': 'r1', 'routes': [{'nexthop': '20.0.0.100', 'destination': '11.0.0.0/24'}, { 'nexthop': '20.0.0.101', 'destination': '12.0.0.0/24'}], 'gw_port_id': 'gpr1', 'external_gateway_info': { 'network_id': "ext-net", 'enable_snat': True, 'external_fixed_ips': [ {'subnet_id': 'ext-subnet', 'ip_address': '90.0.0.2'}]}}, {'id': 'r2', 'routes': [{'nexthop': '40.0.0.100', 'destination': '30.0.0.0/24'}], 'gw_port_id': 'gpr2', 'external_gateway_info': { 'network_id': "ext-net", 'enable_snat': True, 'external_fixed_ips': [ {'subnet_id': 'ext-subnet', 'ip_address': '100.0.0.2'}]}}, {'id': 'r4', 'routes': []}] self.get_sync_router_ports = [ {'fixed_ips': [{'subnet_id': 'subnet1', 'ip_address': '192.168.1.1'}], 'id': 'p1r1', 'device_id': 'r1', 'mac_address': 'fa:16:3e:d7:fd:5f'}, {'fixed_ips': [{'subnet_id': 'subnet2', 'ip_address': '192.168.2.1'}], 'id': 'p1r2', 'device_id': 'r2', 'mac_address': 'fa:16:3e:d6:8b:ce'}, {'fixed_ips': [{'subnet_id': 'subnet4', 'ip_address': '192.168.4.1'}], 'id': 'p1r4', 'device_id': 'r4', 'mac_address': 'fa:16:3e:12:34:56'}] self.floating_ips = [{'id': 'fip1', 'router_id': 'r1', 'floating_ip_address': '90.0.0.10', 'fixed_ip_address': '172.16.0.10'}, {'id': 'fip2', 'router_id': 'r1', 'floating_ip_address': '90.0.0.12', 'fixed_ip_address': '172.16.2.12'}, {'id': 'fip3', 'router_id': 'r2', 'floating_ip_address': '100.0.0.10', 'fixed_ip_address': '192.168.2.10'}, {'id': 'fip4', 'router_id': 'r2', 'floating_ip_address': '100.0.0.11', 'fixed_ip_address': '192.168.2.11'}] self.lrouters_with_rports = [{'name': 'r3', 'ports': {'p1r3': ['fake']}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}, {'name': 'r4', 'ports': {'p1r4': ['fdad:123:456::1/64', 'fdad:789:abc::1/64']}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}, {'name': 'r1', 'ports': {'p3r1': ['fake']}, 'static_routes': [{'nexthop': '20.0.0.100', 'destination': '11.0.0.0/24'}, {'nexthop': '20.0.0.100', 'destination': '10.0.0.0/24'}], 'snats': [{'logical_ip': '172.16.0.0/24', 'external_ip': '90.0.0.2', 'type': 'snat'}, {'logical_ip': '172.16.1.0/24', 'external_ip': '90.0.0.2', 'type': 'snat'}], 'dnat_and_snats': [{'logical_ip': '172.16.0.10', 'external_ip': '90.0.0.10', 'type': 'dnat_and_snat'}, {'logical_ip': '172.16.1.11', 'external_ip': '90.0.0.11', 'type': 'dnat_and_snat'}, {'logical_ip': '192.168.2.11', 'external_ip': '100.0.0.11', 'type': 'dnat_and_snat', 'external_mac': '01:02:03:04:05:06', 'logical_port': 'vm1'}]}] self.lswitches_with_ports = [{'name': 'neutron-n1', 'ports': ['p1n1', 'p3n1'], 'provnet_port': None}, {'name': 'neutron-n3', 'ports': ['p1n3', 'p2n3'], 'provnet_port': None}, {'name': 'neutron-n4', 'ports': [], 'provnet_port': 'provnet-n4'}] self.lrport_networks = ['fdad:123:456::1/64', 'fdad:cafe:a1b2::1/64'] def _fake_get_ovn_dhcp_options(self, subnet, network, server_mac=None): if subnet['id'] == 'n1-s1': return {'cidr': '10.0.0.0/24', 'options': {'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1450', 'router': '10.0.0.1'}, 'external_ids': {'subnet_id': 'n1-s1'}} return {'cidr': '', 'options': '', 'external_ids': {}} def _fake_get_gw_info(self, ctx, router): return { 'r1': [ovn_client.GW_INFO(router_ip='90.0.0.2', gateway_ip='90.0.0.1', network_id='', subnet_id='', ip_version=4, ip_prefix=const.IPv4_ANY)], 'r2': [ovn_client.GW_INFO(router_ip='100.0.0.2', gateway_ip='100.0.0.1', network_id='', subnet_id='', ip_version=4, ip_prefix=const.IPv4_ANY)] }.get(router['id'], []) def _fake_get_v4_network_of_all_router_ports(self, ctx, router_id): return {'r1': ['172.16.0.0/24', '172.16.2.0/24'], 'r2': ['192.168.2.0/24']}.get(router_id, []) def _test_mocks_helper(self, ovn_nb_synchronizer): core_plugin = ovn_nb_synchronizer.core_plugin ovn_api = ovn_nb_synchronizer.ovn_api ovn_driver = ovn_nb_synchronizer.ovn_driver l3_plugin = ovn_nb_synchronizer.l3_plugin core_plugin.get_networks = mock.Mock() core_plugin.get_networks.return_value = self.networks core_plugin.get_subnets = mock.Mock() core_plugin.get_subnets.return_value = self.subnets # following block is used for acl syncing unit-test # With the given set of values in the unit testing, # 19 neutron acls should have been there, # 4 acls are returned as current ovn acls, # two of which will match with neutron. # So, in this example 17 will be added, 2 removed core_plugin.get_ports = mock.Mock() core_plugin.get_ports.return_value = self.ports mock.patch.object(acl, '_get_subnet_from_cache', return_value=self.subnet).start() mock.patch.object(acl, 'acl_remote_group_id', side_effect=self.matches).start() core_plugin.get_security_group = mock.MagicMock( side_effect=self.security_groups) ovn_nb_synchronizer.get_acls = mock.Mock() ovn_nb_synchronizer.get_acls.return_value = self.acls_ovn core_plugin.get_security_groups = mock.MagicMock( return_value=self.security_groups) ovn_nb_synchronizer.get_address_sets = mock.Mock() ovn_nb_synchronizer.get_address_sets.return_value =\ self.address_sets_ovn get_port_groups = mock.MagicMock() get_port_groups.execute.return_value = self.port_groups_ovn ovn_api.db_list_rows.return_value = get_port_groups ovn_api.lsp_list.execute.return_value = self.ports_ovn # end of acl-sync block # The following block is used for router and router port syncing tests # With the give set of values in the unit test, # The Neutron db has Routers r1 and r2 present. # The OVN db has Routers r1 and r3 present. # During the sync r2 will need to be created and r3 will need # to be deleted from the OVN db. When Router r3 is deleted, all LRouter # ports associated with r3 is deleted too. # # Neutron db has Router ports p1r1 in Router r1 and p1r2 in Router r2 # OVN db has p1r3 in Router 3 and p3r1 in Router 1. # During the sync p1r1 and p1r2 will be added and p1r3 and p3r1 # will be deleted from the OVN db l3_plugin.get_routers = mock.Mock() l3_plugin.get_routers.return_value = self.routers l3_plugin._get_sync_interfaces = mock.Mock() l3_plugin._get_sync_interfaces.return_value = ( self.get_sync_router_ports) ovn_nb_synchronizer._ovn_client = mock.Mock() ovn_nb_synchronizer._ovn_client.\ _get_nets_and_ipv6_ra_confs_for_router_port.return_value = ( self.lrport_networks, {}) ovn_nb_synchronizer._ovn_client._get_v4_network_of_all_router_ports. \ side_effect = self._fake_get_v4_network_of_all_router_ports ovn_nb_synchronizer._ovn_client._get_gw_info = mock.Mock() ovn_nb_synchronizer._ovn_client._get_gw_info.side_effect = ( self._fake_get_gw_info) # end of router-sync block l3_plugin.get_floatingips = mock.Mock() l3_plugin.get_floatingips.return_value = self.floating_ips ovn_api.get_all_logical_switches_with_ports = mock.Mock() ovn_api.get_all_logical_switches_with_ports.return_value = ( self.lswitches_with_ports) ovn_api.get_all_logical_routers_with_rports = mock.Mock() ovn_api.get_all_logical_routers_with_rports.return_value = ( self.lrouters_with_rports) ovn_api.transaction = mock.MagicMock() ovn_nb_synchronizer._ovn_client.create_network = mock.Mock() ovn_nb_synchronizer._ovn_client.create_port = mock.Mock() ovn_driver.validate_and_get_data_from_binding_profile = mock.Mock() ovn_nb_synchronizer._ovn_client.create_port = mock.Mock() ovn_nb_synchronizer._ovn_client.create_port.return_value = mock.ANY ovn_nb_synchronizer._ovn_client._create_provnet_port = mock.Mock() ovn_api.ls_del = mock.Mock() ovn_api.delete_lswitch_port = mock.Mock() ovn_api.delete_lrouter = mock.Mock() ovn_api.delete_lrouter_port = mock.Mock() ovn_api.add_static_route = mock.Mock() ovn_api.delete_static_route = mock.Mock() ovn_api.get_all_dhcp_options.return_value = { 'subnets': {'n1-s1': {'cidr': '10.0.0.0/24', 'options': {'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1450', 'router': '10.0.0.1'}, 'external_ids': {'subnet_id': 'n1-s1'}, 'uuid': 'UUID1'}, 'n1-s3': {'cidr': '30.0.0.0/24', 'options': {'server_id': '30.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1450', 'router': '30.0.0.1'}, 'external_ids': {'subnet_id': 'n1-s3'}, 'uuid': 'UUID2'}}, 'ports_v4': {'p1n2': {'cidr': '10.0.0.0/24', 'options': {'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': '1000', 'mtu': '1400', 'router': '10.0.0.1'}, 'external_ids': {'subnet_id': 'n1-s1', 'port_id': 'p1n2'}, 'uuid': 'UUID3'}, 'p5n2': {'cidr': '10.0.0.0/24', 'options': {'server_id': '10.0.0.1', 'server_mac': '01:02:03:04:05:06', 'lease_time': '1000', 'mtu': '1400', 'router': '10.0.0.1'}, 'external_ids': {'subnet_id': 'n1-s1', 'port_id': 'p5n2'}, 'uuid': 'UUID4'}}, 'ports_v6': {'p1n1': {'cidr': 'fd79:e1c:a55::/64', 'options': {'server_id': '01:02:03:04:05:06', 'mtu': '1450'}, 'external_ids': {'subnet_id': 'fake', 'port_id': 'p1n1'}, 'uuid': 'UUID5'}, 'p1n2': {'cidr': 'fd79:e1c:a55::/64', 'options': {'server_id': '01:02:03:04:05:06', 'mtu': '1450'}, 'external_ids': {'subnet_id': 'fake', 'port_id': 'p1n2'}, 'uuid': 'UUID6'}}} ovn_api.create_address_set = mock.Mock() ovn_api.delete_address_set = mock.Mock() ovn_api.update_address_set = mock.Mock() ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options = mock.Mock() ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options = mock.Mock() ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options.side_effect = ( self._fake_get_ovn_dhcp_options) ovn_api.delete_dhcp_options = mock.Mock() ovn_nb_synchronizer._ovn_client.get_port_dns_records = mock.Mock() ovn_nb_synchronizer._ovn_client.get_port_dns_records.return_value = {} def _test_ovn_nb_sync_helper(self, ovn_nb_synchronizer, networks, ports, routers, router_ports, create_router_list, create_router_port_list, update_router_port_list, del_router_list, del_router_port_list, create_network_list, create_port_list, create_provnet_port_list, del_network_list, del_port_list, add_static_route_list, del_static_route_list, add_snat_list, del_snat_list, add_floating_ip_list, del_floating_ip_list, add_address_set_list, del_address_set_list, update_address_set_list, add_subnet_dhcp_options_list, delete_dhcp_options_list, add_port_groups_list, del_port_groups_list, port_groups_supported=False): self._test_mocks_helper(ovn_nb_synchronizer) core_plugin = ovn_nb_synchronizer.core_plugin ovn_api = ovn_nb_synchronizer.ovn_api ovn_api.is_port_groups_supported.return_value = port_groups_supported mock.patch.object(impl_idl_ovn, 'get_connection').start() ovn_nb_synchronizer.do_sync() if not ovn_api.is_port_groups_supported(): get_security_group_calls = [mock.call(mock.ANY, sg['id']) for sg in self.security_groups] self.assertEqual(len(self.security_groups), core_plugin.get_security_group.call_count) core_plugin.get_security_group.assert_has_calls( get_security_group_calls, any_order=True) create_address_set_calls = [mock.call(**a) for a in add_address_set_list] self.assertEqual( len(add_address_set_list), ovn_api.create_address_set.call_count) ovn_api.create_address_set.assert_has_calls( create_address_set_calls, any_order=True) del_address_set_calls = [mock.call(**d) for d in del_address_set_list] self.assertEqual( len(del_address_set_list), ovn_api.delete_address_set.call_count) ovn_api.delete_address_set.assert_has_calls( del_address_set_calls, any_order=True) update_address_set_calls = [mock.call(**u) for u in update_address_set_list] self.assertEqual( len(update_address_set_list), ovn_api.update_address_set.call_count) ovn_api.update_address_set.assert_has_calls( update_address_set_calls, any_order=True) create_port_groups_calls = [mock.call(**a) for a in add_port_groups_list] self.assertEqual( len(add_port_groups_list), ovn_api.pg_add.call_count) ovn_api.pg_add.assert_has_calls( create_port_groups_calls, any_order=True) del_port_groups_calls = [mock.call(d) for d in del_port_groups_list] self.assertEqual( len(del_port_groups_list), ovn_api.pg_del.call_count) ovn_api.pg_del.assert_has_calls( del_port_groups_calls, any_order=True) self.assertEqual( len(create_network_list), ovn_nb_synchronizer._ovn_client.create_network.call_count) create_network_calls = [mock.call(mock.ANY, net['net']) for net in create_network_list] ovn_nb_synchronizer._ovn_client.create_network.assert_has_calls( create_network_calls, any_order=True) self.assertEqual( len(create_port_list), ovn_nb_synchronizer._ovn_client.create_port.call_count) create_port_calls = [mock.call(mock.ANY, port) for port in create_port_list] ovn_nb_synchronizer._ovn_client.create_port.assert_has_calls( create_port_calls, any_order=True) create_provnet_port_calls = [ mock.call(mock.ANY, mock.ANY, network['provider:physical_network'], network['provider:segmentation_id']) for network in create_provnet_port_list] self.assertEqual( len(create_provnet_port_list), ovn_nb_synchronizer._ovn_client._create_provnet_port.call_count) ovn_nb_synchronizer._ovn_client._create_provnet_port.assert_has_calls( create_provnet_port_calls, any_order=True) self.assertEqual(len(del_network_list), ovn_api.ls_del.call_count) ls_del_calls = [mock.call(net_name) for net_name in del_network_list] ovn_api.ls_del.assert_has_calls( ls_del_calls, any_order=True) self.assertEqual(len(del_port_list), ovn_api.delete_lswitch_port.call_count) delete_lswitch_port_calls = [mock.call(lport_name=port['id'], lswitch_name=port['lswitch']) for port in del_port_list] ovn_api.delete_lswitch_port.assert_has_calls( delete_lswitch_port_calls, any_order=True) add_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'], nexthop=route['nexthop']) for route in add_static_route_list] ovn_api.add_static_route.assert_has_calls(add_route_calls, any_order=True) self.assertEqual(len(add_static_route_list), ovn_api.add_static_route.call_count) del_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'], nexthop=route['nexthop']) for route in del_static_route_list] ovn_api.delete_static_route.assert_has_calls(del_route_calls, any_order=True) self.assertEqual(len(del_static_route_list), ovn_api.delete_static_route.call_count) add_nat_calls = [mock.call(mock.ANY, **nat) for nat in add_snat_list] ovn_api.add_nat_rule_in_lrouter.assert_has_calls(add_nat_calls, any_order=True) self.assertEqual(len(add_snat_list), ovn_api.add_nat_rule_in_lrouter.call_count) add_fip_calls = [mock.call(nat, txn=mock.ANY) for nat in add_floating_ip_list] (ovn_nb_synchronizer._ovn_client._create_or_update_floatingip. assert_has_calls(add_fip_calls)) self.assertEqual( len(add_floating_ip_list), ovn_nb_synchronizer._ovn_client._create_or_update_floatingip. call_count) del_nat_calls = [mock.call(mock.ANY, **nat) for nat in del_snat_list] ovn_api.delete_nat_rule_in_lrouter.assert_has_calls(del_nat_calls, any_order=True) self.assertEqual(len(del_snat_list), ovn_api.delete_nat_rule_in_lrouter.call_count) del_fip_calls = [mock.call(nat, mock.ANY, txn=mock.ANY) for nat in del_floating_ip_list] ovn_nb_synchronizer._ovn_client._delete_floatingip.assert_has_calls( del_fip_calls, any_order=True) self.assertEqual( len(del_floating_ip_list), ovn_nb_synchronizer._ovn_client._delete_floatingip.call_count) create_router_calls = [mock.call(mock.ANY, r, add_external_gateway=False) for r in create_router_list] self.assertEqual( len(create_router_list), ovn_nb_synchronizer._ovn_client.create_router.call_count) ovn_nb_synchronizer._ovn_client.create_router.assert_has_calls( create_router_calls, any_order=True) create_router_port_calls = [mock.call(mock.ANY, p['device_id'], mock.ANY) for p in create_router_port_list] self.assertEqual( len(create_router_port_list), ovn_nb_synchronizer._ovn_client._create_lrouter_port.call_count) ovn_nb_synchronizer._ovn_client._create_lrouter_port.assert_has_calls( create_router_port_calls, any_order=True) self.assertEqual(len(del_router_list), ovn_api.delete_lrouter.call_count) update_router_port_calls = [mock.call(mock.ANY, p) for p in update_router_port_list] self.assertEqual( len(update_router_port_list), ovn_nb_synchronizer._ovn_client.update_router_port.call_count) ovn_nb_synchronizer._ovn_client.update_router_port.assert_has_calls( update_router_port_calls, any_order=True) delete_lrouter_calls = [mock.call(r['router']) for r in del_router_list] ovn_api.delete_lrouter.assert_has_calls( delete_lrouter_calls, any_order=True) self.assertEqual( len(del_router_port_list), ovn_api.delete_lrouter_port.call_count) delete_lrouter_port_calls = [mock.call(port['id'], port['router'], if_exists=False) for port in del_router_port_list] ovn_api.delete_lrouter_port.assert_has_calls( delete_lrouter_port_calls, any_order=True) self.assertEqual( len(add_subnet_dhcp_options_list), ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options. call_count) add_subnet_dhcp_options_calls = [ mock.call(subnet, net, mock.ANY) for (subnet, net) in add_subnet_dhcp_options_list] ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options. \ assert_has_calls(add_subnet_dhcp_options_calls, any_order=True) self.assertEqual(ovn_api.delete_dhcp_options.call_count, len(delete_dhcp_options_list)) delete_dhcp_options_calls = [ mock.call(dhcp_opt_uuid) for dhcp_opt_uuid in delete_dhcp_options_list] ovn_api.delete_dhcp_options.assert_has_calls( delete_dhcp_options_calls, any_order=True) def _test_ovn_nb_sync_mode_repair_helper(self, port_groups_supported=True): create_network_list = [{'net': {'id': 'n2', 'mtu': 1450}, 'ext_ids': {}}] del_network_list = ['neutron-n3'] del_port_list = [{'id': 'p3n1', 'lswitch': 'neutron-n1'}, {'id': 'p1n1', 'lswitch': 'neutron-n1'}] create_port_list = self.ports for port in create_port_list: if port['id'] in ['p1n1', 'fp1']: # this will be skipped by the logic, # because p1n1 is already in lswitch-port list # and fp1 is a floating IP port create_port_list.remove(port) create_provnet_port_list = [{'id': 'n1', 'mtu': 1450, 'provider:physical_network': 'physnet1', 'provider:segmentation_id': 1000}] create_router_list = [{ 'id': 'r2', 'routes': [ {'nexthop': '40.0.0.100', 'destination': '30.0.0.0/24'}], 'gw_port_id': 'gpr2', 'external_gateway_info': { 'network_id': "ext-net", 'enable_snat': True, 'external_fixed_ips': [{ 'subnet_id': 'ext-subnet', 'ip_address': '100.0.0.2'}]}}] # Test adding and deleting routes snats fips behaviors for router r1 # existing in both neutron DB and OVN DB. # Test adding behaviors for router r2 only existing in neutron DB. # Static routes with destination 0.0.0.0/0 are default gateway routes add_static_route_list = [{'nexthop': '20.0.0.101', 'destination': '12.0.0.0/24'}, {'nexthop': '90.0.0.1', 'destination': '0.0.0.0/0'}, {'nexthop': '40.0.0.100', 'destination': '30.0.0.0/24'}, {'nexthop': '100.0.0.1', 'destination': '0.0.0.0/0'}] del_static_route_list = [{'nexthop': '20.0.0.100', 'destination': '10.0.0.0/24'}] add_snat_list = [{'logical_ip': '172.16.2.0/24', 'external_ip': '90.0.0.2', 'type': 'snat'}, {'logical_ip': '192.168.2.0/24', 'external_ip': '100.0.0.2', 'type': 'snat'}] del_snat_list = [{'logical_ip': '172.16.1.0/24', 'external_ip': '90.0.0.2', 'type': 'snat'}] # fip 100.0.0.11 exists in OVN with distributed type and in Neutron # with centralized type. This fip is used to test # enable_distributed_floating_ip switch and migration add_floating_ip_list = [{'id': 'fip2', 'router_id': 'r1', 'floating_ip_address': '90.0.0.12', 'fixed_ip_address': '172.16.2.12'}, {'id': 'fip3', 'router_id': 'r2', 'floating_ip_address': '100.0.0.10', 'fixed_ip_address': '192.168.2.10'}, {'id': 'fip4', 'router_id': 'r2', 'floating_ip_address': '100.0.0.11', 'fixed_ip_address': '192.168.2.11'}] del_floating_ip_list = [{'logical_ip': '172.16.1.11', 'external_ip': '90.0.0.11', 'type': 'dnat_and_snat'}, {'logical_ip': '192.168.2.11', 'external_ip': '100.0.0.11', 'type': 'dnat_and_snat', 'external_mac': '01:02:03:04:05:06', 'logical_port': 'vm1'}] del_router_list = [{'router': 'neutron-r3'}] del_router_port_list = [{'id': 'lrp-p3r1', 'router': 'neutron-r1'}] create_router_port_list = self.get_sync_router_ports[:2] update_router_port_list = [self.get_sync_router_ports[2]] update_router_port_list[0].update( {'networks': self.lrport_networks}) if not port_groups_supported: add_address_set_list = [ {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'}, 'name': 'as_ip6_sg1', 'addresses': ['fd79:e1c:a55::816:eff:eff:ff2']}] del_address_set_list = [{'name': 'as_ip4_del'}] update_address_set_list = [ {'addrs_remove': [], 'addrs_add': ['10.0.0.4'], 'name': 'as_ip4_sg2'}, {'addrs_remove': ['fd79:e1c:a55::816:eff:eff:ff3'], 'addrs_add': [], 'name': 'as_ip6_sg2'}] # If Port Groups are not supported, we don't expect any of those # to be created/deleted. add_port_groups_list = [] del_port_groups_list = [] else: add_port_groups_list = [ {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'}, 'name': 'pg_sg2', 'acls': []}] del_port_groups_list = ['pg_unknown_del'] # If using Port Groups, no Address Set shall be created/updated # and all the existing ones have to be removed. add_address_set_list = [] update_address_set_list = [] del_address_set_list = [{'name': 'as_ip4_sg1'}, {'name': 'as_ip4_sg2'}, {'name': 'as_ip6_sg2'}, {'name': 'as_ip4_del'}] add_subnet_dhcp_options_list = [(self.subnets[2], self.networks[1]), (self.subnets[1], self.networks[0])] delete_dhcp_options_list = ['UUID2', 'UUID4', 'UUID5'] ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn, 'repair', self.mech_driver) self._test_ovn_nb_sync_helper(ovn_nb_synchronizer, self.networks, self.ports, self.routers, self.get_sync_router_ports, create_router_list, create_router_port_list, update_router_port_list, del_router_list, del_router_port_list, create_network_list, create_port_list, create_provnet_port_list, del_network_list, del_port_list, add_static_route_list, del_static_route_list, add_snat_list, del_snat_list, add_floating_ip_list, del_floating_ip_list, add_address_set_list, del_address_set_list, update_address_set_list, add_subnet_dhcp_options_list, delete_dhcp_options_list, add_port_groups_list, del_port_groups_list, port_groups_supported) def test_ovn_nb_sync_mode_repair_no_pgs(self): self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=False) def test_ovn_nb_sync_mode_repair_pgs(self): self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=True) def _test_ovn_nb_sync_mode_log_helper(self, port_groups_supported=True): create_network_list = [] create_port_list = [] create_provnet_port_list = [] del_network_list = [] del_port_list = [] create_router_list = [] create_router_port_list = [] update_router_port_list = [] del_router_list = [] del_router_port_list = [] add_static_route_list = [] del_static_route_list = [] add_snat_list = [] del_snat_list = [] add_floating_ip_list = [] del_floating_ip_list = [] add_address_set_list = [] del_address_set_list = [] update_address_set_list = [] add_subnet_dhcp_options_list = [] delete_dhcp_options_list = [] add_port_groups_list = [] del_port_groups_list = [] ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn, 'log', self.mech_driver) self._test_ovn_nb_sync_helper(ovn_nb_synchronizer, self.networks, self.ports, self.routers, self.get_sync_router_ports, create_router_list, create_router_port_list, update_router_port_list, del_router_list, del_router_port_list, create_network_list, create_port_list, create_provnet_port_list, del_network_list, del_port_list, add_static_route_list, del_static_route_list, add_snat_list, del_snat_list, add_floating_ip_list, del_floating_ip_list, add_address_set_list, del_address_set_list, update_address_set_list, add_subnet_dhcp_options_list, delete_dhcp_options_list, add_port_groups_list, del_port_groups_list, port_groups_supported) def test_ovn_nb_sync_mode_log_pgs(self): self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=True) def test_ovn_nb_sync_mode_log_no_pgs(self): self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=False) class TestOvnSbSyncML2(test_mech_driver.OVNMechanismDriverTestCase): def test_ovn_sb_sync(self): ovn_sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( self.plugin, self.mech_driver._sb_ovn, self.mech_driver) ovn_api = ovn_sb_synchronizer.ovn_api hostname_with_physnets = {'hostname1': ['physnet1', 'physnet2'], 'hostname2': ['physnet1']} ovn_api.get_chassis_hostname_and_physnets.return_value = ( hostname_with_physnets) ovn_driver = ovn_sb_synchronizer.ovn_driver ovn_driver.update_segment_host_mapping = mock.Mock() hosts_in_neutron = {'hostname2', 'hostname3'} with mock.patch.object(ovn_db_sync.segments_db, 'get_hosts_mapped_with_segments', return_value=hosts_in_neutron): ovn_sb_synchronizer.sync_hostname_and_physical_networks(mock.ANY) all_hosts = set(hostname_with_physnets.keys()) | hosts_in_neutron self.assertEqual( len(all_hosts), ovn_driver.update_segment_host_mapping.call_count) update_segment_host_mapping_calls = [mock.call( host, hostname_with_physnets[host]) for host in hostname_with_physnets] update_segment_host_mapping_calls += [ mock.call(host, []) for host in hosts_in_neutron - set(hostname_with_physnets.keys())] ovn_driver.update_segment_host_mapping.assert_has_calls( update_segment_host_mapping_calls, any_order=True) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_mo0000644000175000017500000006411600000000000033617 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import os import mock from neutron_lib.plugins import constants as n_const from neutron_lib.plugins import directory from oslo_utils import timeutils from oslo_utils import uuidutils from ovs.db import idl as ovs_idl from ovs import poller from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import connection from ovsdbapp.backend.ovs_idl import idlutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import hash_ring_manager from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_hash_ring_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovsdb_monitor from neutron.services.ovn_l3 import plugin # noqa from neutron.tests import base from neutron.tests.unit import fake_resources as fakes from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import \ test_mech_driver basedir = os.path.dirname(os.path.abspath(__file__)) schema_files = { 'OVN_Northbound': os.path.join(basedir, 'schemas', 'ovn-nb.ovsschema'), 'OVN_Southbound': os.path.join(basedir, 'schemas', 'ovn-sb.ovsschema'), } OVN_NB_SCHEMA = { "name": "OVN_Northbound", "version": "3.0.0", "tables": { "Logical_Switch_Port": { "columns": { "name": {"type": "string"}, "type": {"type": "string"}, "addresses": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "port_security": {"type": {"key": "string", "min": 0, "max": "unlimited"}}, "up": {"type": {"key": "boolean", "min": 0, "max": 1}}}, "indexes": [["name"]], "isRoot": False, }, "Logical_Switch": { "columns": {"name": {"type": "string"}}, "indexes": [["name"]], "isRoot": True, } } } OVN_SB_SCHEMA = { "name": "OVN_Southbound", "version": "1.3.0", "tables": { "Chassis": { "columns": { "name": {"type": "string"}, "hostname": {"type": "string"}, "external_ids": { "type": {"key": "string", "value": "string", "min": 0, "max": "unlimited"}}}, "isRoot": True, "indexes": [["name"]] } } } ROW_CREATE = ovsdb_monitor.BaseEvent.ROW_CREATE ROW_UPDATE = ovsdb_monitor.BaseEvent.ROW_UPDATE class TestOvnDbNotifyHandler(base.BaseTestCase): def setUp(self): super(TestOvnDbNotifyHandler, self).setUp() self.handler = ovsdb_monitor.OvnDbNotifyHandler(mock.ANY) self.watched_events = self.handler._RowEventHandler__watched_events def test_watch_and_unwatch_events(self): expected_events = set() networking_event = mock.Mock() ovn_event = mock.Mock() unknown_event = mock.Mock() self.assertItemsEqual(set(), self.watched_events) expected_events.add(networking_event) self.handler.watch_event(networking_event) self.assertItemsEqual(expected_events, self.watched_events) expected_events.add(ovn_event) self.handler.watch_events([ovn_event]) self.assertItemsEqual(expected_events, self.watched_events) self.handler.unwatch_events([networking_event, ovn_event]) self.handler.unwatch_event(unknown_event) self.handler.unwatch_events([unknown_event]) self.assertItemsEqual(set(), self.watched_events) def test_shutdown(self): self.handler.shutdown() # class TestOvnBaseConnection(base.TestCase): # # Each test is being deleted, but for reviewers sake I wanted to exaplain why: # # @mock.patch.object(idlutils, 'get_schema_helper') # def testget_schema_helper_success(self, mock_gsh): # # 1. OvnBaseConnection and OvnConnection no longer exist # 2. get_schema_helper is no longer a part of the Connection class # # @mock.patch.object(idlutils, 'get_schema_helper') # def testget_schema_helper_initial_exception(self, mock_gsh): # # @mock.patch.object(idlutils, 'get_schema_helper') # def testget_schema_helper_all_exception(self, mock_gsh): # # 3. The only reason get_schema_helper had a retry loop was for Neutron's # use case of trying to set the Manager to listen on ptcp:127.0.0.1:6640 # if it wasn't already set up. Since that code being removed was the whole # reason to re-implement get_schema_helper here,the exception retry is not # needed and therefor is not a part of ovsdbapp's implementation of # idlutils.get_schema_helper which we now use directly in from_server() # 4. These tests now would be testing the various from_server() calls, but # there is almost nothing to test in those except maybe SSL being set up # but that was done below. class TestOvnConnection(base.BaseTestCase): def setUp(self): super(TestOvnConnection, self).setUp() @mock.patch.object(idlutils, 'get_schema_helper') @mock.patch.object(idlutils, 'wait_for_change') def _test_connection_start(self, mock_wfc, mock_gsh, idl_class, schema): mock_gsh.return_value = ovs_idl.SchemaHelper( location=schema_files[schema]) _idl = idl_class.from_server('punix:/tmp/fake', schema, mock.Mock()) self.ovn_connection = connection.Connection(_idl, mock.Mock()) with mock.patch.object(poller, 'Poller'), \ mock.patch('threading.Thread'): self.ovn_connection.start() # A second start attempt shouldn't re-register. self.ovn_connection.start() self.ovn_connection.thread.start.assert_called_once_with() def test_connection_nb_start(self): ovn_conf.cfg.CONF.set_override('ovn_nb_private_key', 'foo-key', 'ovn') Stream.ssl_set_private_key_file = mock.Mock() Stream.ssl_set_certificate_file = mock.Mock() Stream.ssl_set_ca_cert_file = mock.Mock() self._test_connection_start(idl_class=ovsdb_monitor.OvnNbIdl, schema='OVN_Northbound') Stream.ssl_set_private_key_file.assert_called_once_with('foo-key') Stream.ssl_set_certificate_file.assert_not_called() Stream.ssl_set_ca_cert_file.assert_not_called() def test_connection_sb_start(self): self._test_connection_start(idl_class=ovsdb_monitor.OvnSbIdl, schema='OVN_Southbound') class TestOvnIdlDistributedLock(base.BaseTestCase): def setUp(self): super(TestOvnIdlDistributedLock, self).setUp() self.node_uuid = uuidutils.generate_uuid() self.fake_driver = mock.Mock() self.fake_driver.node_uuid = self.node_uuid self.fake_event = 'fake-event' self.fake_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'_table': mock.Mock(name='FakeTable')}) helper = ovs_idl.SchemaHelper(schema_json=OVN_NB_SCHEMA) helper.register_all() with mock.patch.object(ovsdb_monitor, 'OvnDbNotifyHandler'): self.idl = ovsdb_monitor.OvnIdlDistributedLock( self.fake_driver, 'punix:/tmp/fake', helper) self.mock_get_node = mock.patch.object( hash_ring_manager.HashRingManager, 'get_node', return_value=self.node_uuid).start() @mock.patch.object(ovn_hash_ring_db, 'touch_node') def test_notify(self, mock_touch_node): self.idl.notify(self.fake_event, self.fake_row) mock_touch_node.assert_called_once_with(mock.ANY, self.node_uuid) self.idl.notify_handler.notify.assert_called_once_with( self.fake_event, self.fake_row, None) @mock.patch.object(ovn_hash_ring_db, 'touch_node') def test_notify_skip_touch_node(self, mock_touch_node): # Set a time for last touch self.idl._last_touch = timeutils.utcnow() self.idl.notify(self.fake_event, self.fake_row) # Assert that touch_node() wasn't called self.assertFalse(mock_touch_node.called) self.idl.notify_handler.notify.assert_called_once_with( self.fake_event, self.fake_row, None) @mock.patch.object(ovn_hash_ring_db, 'touch_node') def test_notify_last_touch_expired(self, mock_touch_node): # Set a time for last touch self.idl._last_touch = timeutils.utcnow() # Let's expire the touch node interval for the next utcnow() with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = ( self.idl._last_touch + datetime.timedelta( seconds=ovn_const.HASH_RING_TOUCH_INTERVAL + 1)) self.idl.notify(self.fake_event, self.fake_row) # Assert that touch_node() was invoked mock_touch_node.assert_called_once_with(mock.ANY, self.node_uuid) self.idl.notify_handler.notify.assert_called_once_with( self.fake_event, self.fake_row, None) @mock.patch.object(ovsdb_monitor.LOG, 'exception') @mock.patch.object(ovn_hash_ring_db, 'touch_node') def test_notify_touch_node_exception(self, mock_touch_node, mock_log): mock_touch_node.side_effect = Exception('BoOooOmmMmmMm') self.idl.notify(self.fake_event, self.fake_row) # Assert that in an eventual failure on touch_node() the event # will continue to be processed by notify_handler.notify() mock_touch_node.assert_called_once_with(mock.ANY, self.node_uuid) # Assert we are logging the exception self.assertTrue(mock_log.called) self.idl.notify_handler.notify.assert_called_once_with( self.fake_event, self.fake_row, None) def test_notify_different_node(self): self.mock_get_node.return_value = 'different-node-uuid' self.idl.notify('fake-event', self.fake_row) # Assert that notify() wasn't called for a different node uuid self.assertFalse(self.idl.notify_handler.notify.called) class TestPortBindingChassisUpdateEvent(base.BaseTestCase): def setUp(self): super(TestPortBindingChassisUpdateEvent, self).setUp() self.driver = mock.Mock() self.event = ovsdb_monitor.PortBindingChassisUpdateEvent(self.driver) def _test_event(self, event, row, old): if self.event.matches(event, row, old): self.event.run(event, row, old) self.driver.set_port_status_up.assert_called() else: self.driver.set_port_status_up.assert_not_called() def test_event_matches(self): # NOTE(twilson) This primarily tests implementation details. If a # scenario test is written that handles shutting down a compute # node uncleanly and performing a 'host-evacuate', this can be removed pbtable = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'name': 'Port_Binding'}) ovsdb_row = fakes.FakeOvsdbRow.create_one_ovsdb_row self.driver._nb_ovn.lookup.return_value = ovsdb_row(attrs={'up': True}) self._test_event( self.event.ROW_UPDATE, ovsdb_row(attrs={'_table': pbtable, 'chassis': 'one', 'type': '_fake_', 'logical_port': 'foo'}), ovsdb_row(attrs={'_table': pbtable, 'chassis': 'two', 'type': '_fake_'})) class TestOvnNbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase): def setUp(self): super(TestOvnNbIdlNotifyHandler, self).setUp() helper = ovs_idl.SchemaHelper(schema_json=OVN_NB_SCHEMA) helper.register_all() self.idl = ovsdb_monitor.OvnNbIdl(self.driver, "remote", helper) self.lp_table = self.idl.tables.get('Logical_Switch_Port') self.driver.set_port_status_up = mock.Mock() self.driver.set_port_status_down = mock.Mock() def _test_lsp_helper(self, event, new_row_json, old_row_json=None, table=None): row_uuid = uuidutils.generate_uuid() if not table: table = self.lp_table lp_row = ovs_idl.Row.from_json(self.idl, table, row_uuid, new_row_json) if old_row_json: old_row = ovs_idl.Row.from_json(self.idl, table, row_uuid, old_row_json) else: old_row = None self.idl.notify(event, lp_row, updates=old_row) # Add a STOP EVENT to the queue self.idl.notify_handler.shutdown() # Execute the notifications queued self.idl.notify_handler.notify_loop() def test_lsp_up_create_event(self): row_data = {"up": True, "name": "foo-name"} self._test_lsp_helper('create', row_data) self.driver.set_port_status_up.assert_called_once_with("foo-name") self.assertFalse(self.driver.set_port_status_down.called) def test_lsp_down_create_event(self): row_data = {"up": False, "name": "foo-name"} self._test_lsp_helper('create', row_data) self.driver.set_port_status_down.assert_called_once_with("foo-name") self.assertFalse(self.driver.set_port_status_up.called) def test_lsp_up_not_set_event(self): row_data = {"up": ['set', []], "name": "foo-name"} self._test_lsp_helper('create', row_data) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) def test_unwatch_logical_switch_port_create_events(self): self.idl.unwatch_logical_switch_port_create_events() row_data = {"up": True, "name": "foo-name"} self._test_lsp_helper('create', row_data) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) row_data["up"] = False self._test_lsp_helper('create', row_data) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) def test_post_connect(self): self.idl.post_connect() self.assertIsNone(self.idl._lsp_create_up_event) self.assertIsNone(self.idl._lsp_create_down_event) def test_lsp_up_update_event(self): new_row_json = {"up": True, "name": "foo-name"} old_row_json = {"up": False} self._test_lsp_helper('update', new_row_json, old_row_json=old_row_json) self.driver.set_port_status_up.assert_called_once_with("foo-name") self.assertFalse(self.driver.set_port_status_down.called) def test_lsp_down_update_event(self): new_row_json = {"up": False, "name": "foo-name"} old_row_json = {"up": True} self._test_lsp_helper('update', new_row_json, old_row_json=old_row_json) self.driver.set_port_status_down.assert_called_once_with("foo-name") self.assertFalse(self.driver.set_port_status_up.called) def test_lsp_up_update_event_no_old_data(self): new_row_json = {"up": True, "name": "foo-name"} self._test_lsp_helper('update', new_row_json, old_row_json=None) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) def test_lsp_down_update_event_no_old_data(self): new_row_json = {"up": False, "name": "foo-name"} self._test_lsp_helper('update', new_row_json, old_row_json=None) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) def test_lsp_other_column_update_event(self): new_row_json = {"up": False, "name": "foo-name", "addresses": ["10.0.0.2"]} old_row_json = {"addresses": ["10.0.0.3"]} self._test_lsp_helper('update', new_row_json, old_row_json=old_row_json) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) def test_notify_other_table(self): new_row_json = {"name": "foo-name"} self._test_lsp_helper('create', new_row_json, table=self.idl.tables.get("Logical_Switch")) self.assertFalse(self.driver.set_port_status_up.called) self.assertFalse(self.driver.set_port_status_down.called) @mock.patch.object(hash_ring_manager.HashRingManager, 'get_node') def test_notify_different_target_node(self, mock_get_node): mock_get_node.return_value = 'this-is-a-different-node' row = fakes.FakeOvsdbRow.create_one_ovsdb_row() self.idl.notify_handler.notify = mock.Mock() self.idl.notify("create", row) # Assert that if the target_node returned by the ring is different # than this driver's node_uuid, notify() won't be called self.assertFalse(self.idl.notify_handler.notify.called) class TestOvnSbIdlNotifyHandler(test_mech_driver.OVNMechanismDriverTestCase): l3_plugin = 'ovn-router' def setUp(self): super(TestOvnSbIdlNotifyHandler, self).setUp() sb_helper = ovs_idl.SchemaHelper(schema_json=OVN_SB_SCHEMA) sb_helper.register_table('Chassis') self.sb_idl = ovsdb_monitor.OvnSbIdl(self.driver, "remote", sb_helper) self.sb_idl.post_connect() self.chassis_table = self.sb_idl.tables.get('Chassis') self.driver.update_segment_host_mapping = mock.Mock() self.l3_plugin = directory.get_plugin(n_const.L3) self.l3_plugin.schedule_unhosted_gateways = mock.Mock() self.row_json = { "name": "fake-name", "hostname": "fake-hostname", "external_ids": ['map', [["ovn-bridge-mappings", "fake-phynet1:fake-br1"]]] } def _test_chassis_helper(self, event, new_row_json, old_row_json=None): row_uuid = uuidutils.generate_uuid() table = self.chassis_table row = ovs_idl.Row.from_json(self.sb_idl, table, row_uuid, new_row_json) if old_row_json: old_row = ovs_idl.Row.from_json(self.sb_idl, table, row_uuid, old_row_json) else: old_row = None self.sb_idl.notify(event, row, updates=old_row) # Add a STOP EVENT to the queue self.sb_idl.notify_handler.shutdown() # Execute the notifications queued self.sb_idl.notify_handler.notify_loop() def test_chassis_create_event(self): self._test_chassis_helper('create', self.row_json) self.driver.update_segment_host_mapping.assert_called_once_with( 'fake-hostname', ['fake-phynet1']) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis=None) def test_chassis_delete_event(self): self._test_chassis_helper('delete', self.row_json) self.driver.update_segment_host_mapping.assert_called_once_with( 'fake-hostname', []) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis='fake-name') def test_chassis_update_event(self): old_row_json = copy.deepcopy(self.row_json) old_row_json['external_ids'][1][0][1] = ( "fake-phynet2:fake-br2") self._test_chassis_helper('update', self.row_json, old_row_json) self.driver.update_segment_host_mapping.assert_called_once_with( 'fake-hostname', ['fake-phynet1']) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis=None) def test_chassis_update_event_reschedule_not_needed(self): self.row_json['external_ids'][1].append(['foo_field', 'foo_value_new']) old_row_json = copy.deepcopy(self.row_json) old_row_json['external_ids'][1][1][1] = ( "foo_value") self._test_chassis_helper('update', self.row_json, old_row_json) self.driver.update_segment_host_mapping.assert_not_called() self.l3_plugin.schedule_unhosted_gateways.assert_not_called() def test_chassis_update_event_reschedule_lost_physnet(self): old_row_json = copy.deepcopy(self.row_json) self.row_json['external_ids'][1][0][1] = '' self._test_chassis_helper('update', self.row_json, old_row_json) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis='fake-name') def test_chassis_update_event_reschedule_add_physnet(self): old_row_json = copy.deepcopy(self.row_json) self.row_json['external_ids'][1][0][1] += ',foo_physnet:foo_br' self._test_chassis_helper('update', self.row_json, old_row_json) self.driver.update_segment_host_mapping.assert_called_once_with( 'fake-hostname', ['fake-phynet1', 'foo_physnet']) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis=None) def test_chassis_update_event_reschedule_add_and_remove_physnet(self): old_row_json = copy.deepcopy(self.row_json) self.row_json['external_ids'][1][0][1] = 'foo_physnet:foo_br' self._test_chassis_helper('update', self.row_json, old_row_json) self.driver.update_segment_host_mapping.assert_called_once_with( 'fake-hostname', ['foo_physnet']) self.l3_plugin.schedule_unhosted_gateways.assert_called_once_with( event_from_chassis=None) def test_chassis_update_empty_no_external_ids(self): old_row_json = copy.deepcopy(self.row_json) old_row_json.pop('external_ids') with mock.patch( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovsdb_monitor.ChassisEvent.' 'handle_ha_chassis_group_changes') as mock_ha: self._test_chassis_helper('update', self.row_json, old_row_json) self.driver.update_segment_host_mapping.assert_not_called() self.l3_plugin.schedule_unhosted_gateways.assert_not_called() mock_ha.assert_not_called() class TestChassisEvent(base.BaseTestCase): def setUp(self): super(TestChassisEvent, self).setUp() self.driver = mock.Mock() self.nb_ovn = self.driver._nb_ovn self.driver._ovn_client.is_external_ports_supported.return_value = True self.event = ovsdb_monitor.ChassisEvent(self.driver) self.is_gw_ch_mock = mock.patch.object( utils, 'is_gateway_chassis').start() self.is_gw_ch_mock.return_value = True def test_handle_ha_chassis_group_changes_create_not_gw(self): self.is_gw_ch_mock.return_value = False # Assert chassis is ignored because it's not a gateway chassis self.assertIsNone(self.event.handle_ha_chassis_group_changes( self.event.ROW_CREATE, mock.Mock(), mock.Mock())) self.assertFalse(self.nb_ovn.ha_chassis_group_add_chassis.called) self.assertFalse(self.nb_ovn.ha_chassis_group_del_chassis.called) def _test_handle_ha_chassis_group_changes_create(self, event): row = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'name': 'SpongeBob'}) ch0 = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'priority': 10}) ch1 = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'priority': 9}) default_grp = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'ha_chassis': [ch0, ch1]}) self.nb_ovn.ha_chassis_group_get.return_value.execute.return_value = ( default_grp) self.event.handle_ha_chassis_group_changes(event, row, mock.Mock()) # Assert the new chassis has been added to the default # group with the lowest priority self.nb_ovn.ha_chassis_group_add_chassis.assert_called_once_with( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, 'SpongeBob', priority=8) def test_handle_ha_chassis_group_changes_create(self): self._test_handle_ha_chassis_group_changes_create( self.event.ROW_CREATE) def _test_handle_ha_chassis_group_changes_delete(self, event): row = fakes.FakeOvsdbTable.create_one_ovsdb_table( attrs={'name': 'SpongeBob'}) self.event.handle_ha_chassis_group_changes(event, row, mock.Mock()) # Assert chassis was removed from the default group self.nb_ovn.ha_chassis_group_del_chassis.assert_called_once_with( ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, 'SpongeBob', if_exists=True) def test_handle_ha_chassis_group_changes_delete(self): self._test_handle_ha_chassis_group_changes_delete( self.event.ROW_DELETE) def test_handle_ha_chassis_group_changes_update_still_gw(self): # Assert nothing was done because the update didn't # change the gateway chassis status self.assertIsNone(self.event.handle_ha_chassis_group_changes( self.event.ROW_UPDATE, mock.Mock(), mock.Mock())) self.assertFalse(self.nb_ovn.ha_chassis_group_add_chassis.called) self.assertFalse(self.nb_ovn.ha_chassis_group_del_chassis.called) def test_handle_ha_chassis_group_changes_update_no_longer_gw(self): self.is_gw_ch_mock.side_effect = (False, True) # Assert that the chassis was removed from the default group # after it's no longer being a Gateway chassis self._test_handle_ha_chassis_group_changes_delete( self.event.ROW_UPDATE) def test_handle_ha_chassis_group_changes_update_new_gw(self): self.is_gw_ch_mock.side_effect = (True, False) # Assert that the chassis was added to the default group # after it became a Gateway chassis self._test_handle_ha_chassis_group_changes_create( self.event.ROW_UPDATE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py0000644000175000017500000043463300000000000033615 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import datetime import uuid import mock from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.tests import tools from neutron_lib.utils import net as n_net from oslo_config import cfg from oslo_db import exception as os_db_exc from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from webob import exc from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import hash_ring_manager from neutron.common.ovn import utils as ovn_utils from neutron.common import utils as n_utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import db_base_plugin_v2 from neutron.db import ovn_revision_numbers_db from neutron.db import provisioning_blocks from neutron.db import securitygroups_db from neutron.plugins.ml2.drivers.ovn.mech_driver import mech_driver from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.plugins.ml2.drivers import type_geneve # noqa from neutron.services.revisions import revision_plugin from neutron.tests.unit.extensions import test_segment from neutron.tests.unit import fake_resources as fakes from neutron.tests.unit.plugins.ml2 import test_ext_portsecurity from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit.plugins.ml2 import test_security_group OVN_PROFILE = ovn_const.OVN_PORT_BINDING_PROFILE class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'ovn'] _extension_drivers = ['port_security', 'dns'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') cfg.CONF.set_override('tenant_network_types', ['geneve'], group='ml2') cfg.CONF.set_override('vni_ranges', ['1:65536'], group='ml2_type_geneve') ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, group='ovn') ovn_conf.cfg.CONF.set_override('dns_servers', ['8.8.8.8'], group='ovn') super(TestOVNMechanismDriver, self).setUp() mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj self.mech_driver._nb_ovn = fakes.FakeOvsdbNbOvnIdl() self.mech_driver._sb_ovn = fakes.FakeOvsdbSbOvnIdl() self.nb_ovn = self.mech_driver._nb_ovn self.sb_ovn = self.mech_driver._sb_ovn self.fake_subnet = fakes.FakeSubnet.create_one_subnet().info() self.fake_sg_rule = \ fakes.FakeSecurityGroupRule.create_one_security_group_rule().info() self.fake_sg = fakes.FakeSecurityGroup.create_one_security_group( attrs={'security_group_rules': [self.fake_sg_rule]} ).info() self.sg_cache = {self.fake_sg['id']: self.fake_sg} self.subnet_cache = {self.fake_subnet['id']: self.fake_subnet} mock.patch.object(ovn_acl, '_acl_columns_name_severity_supported', return_value=True).start() revision_plugin.RevisionPlugin() p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) p.start() self.addCleanup(p.stop) p = mock.patch.object(ovn_revision_numbers_db, 'bump_revision') p.start() self.addCleanup(p.stop) @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') def test__create_security_group(self, mock_bump): self.mech_driver._create_security_group( resources.SECURITY_GROUP, events.AFTER_CREATE, {}, security_group=self.fake_sg, context=self.context) external_ids = {ovn_const.OVN_SG_EXT_ID_KEY: self.fake_sg['id']} ip4_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip4') ip6_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip6') create_address_set_calls = [mock.call(name=name, external_ids=external_ids) for name in [ip4_name, ip6_name]] self.nb_ovn.create_address_set.assert_has_calls( create_address_set_calls, any_order=True) mock_bump.assert_called_once_with( mock.ANY, self.fake_sg, ovn_const.TYPE_SECURITY_GROUPS) def test__delete_security_group(self): self.mech_driver._delete_security_group( resources.SECURITY_GROUP, events.AFTER_CREATE, {}, security_group_id=self.fake_sg['id'], context=self.context) ip4_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip4') ip6_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip6') delete_address_set_calls = [mock.call(name=name) for name in [ip4_name, ip6_name]] self.nb_ovn.delete_address_set.assert_has_calls( delete_address_set_calls, any_order=True) @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') def test__process_sg_rule_notifications_sgr_create(self, mock_bump): with mock.patch.object(ovn_acl, 'update_acls_for_security_group') \ as ovn_acl_up: rule = {'security_group_id': 'sg_id'} self.mech_driver._process_sg_rule_notification( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, {}, security_group_rule=rule, context=self.context) ovn_acl_up.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, 'sg_id', rule, is_add_acl=True) mock_bump.assert_called_once_with( mock.ANY, rule, ovn_const.TYPE_SECURITY_GROUP_RULES) @mock.patch.object(ovn_revision_numbers_db, 'delete_revision') def test_process_sg_rule_notifications_sgr_delete(self, mock_delrev): rule = {'id': 'sgr_id', 'security_group_id': 'sg_id'} with mock.patch.object(ovn_acl, 'update_acls_for_security_group') \ as ovn_acl_up, \ mock.patch.object(securitygroups_db.SecurityGroupDbMixin, 'get_security_group_rule', return_value=rule): self.mech_driver._process_sg_rule_notification( resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, {}, security_group_rule=rule, context=self.context) ovn_acl_up.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, 'sg_id', rule, is_add_acl=False) mock_delrev.assert_called_once_with( mock.ANY, rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES) def test_add_acls_no_sec_group(self): fake_port_no_sg = fakes.FakePort.create_one_port().info() expected_acls = ovn_acl.drop_all_ip_traffic_for_port(fake_port_no_sg) acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_no_sg, {}, {}, self.mech_driver._nb_ovn) self.assertEqual(expected_acls, acls) def test_add_acls_no_sec_group_no_port_security(self): fake_port_no_sg_no_ps = fakes.FakePort.create_one_port( attrs={'port_security_enabled': False}).info() acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_no_sg_no_ps, {}, {}, self.mech_driver._nb_ovn) self.assertEqual([], acls) def _test_add_acls_with_sec_group_helper(self, native_dhcp=True): fake_port_sg = fakes.FakePort.create_one_port( attrs={'security_groups': [self.fake_sg['id']], 'fixed_ips': [{'subnet_id': self.fake_subnet['id'], 'ip_address': '10.10.10.20'}]} ).info() expected_acls = [] expected_acls += ovn_acl.drop_all_ip_traffic_for_port( fake_port_sg) expected_acls += ovn_acl.add_acl_dhcp( fake_port_sg, self.fake_subnet, native_dhcp) sg_rule_acl = ovn_acl.add_sg_rule_acl_for_port( fake_port_sg, self.fake_sg_rule, 'outport == "' + fake_port_sg['id'] + '" ' + '&& ip4 && ip4.src == 0.0.0.0/0 ' + '&& tcp && tcp.dst == 22') expected_acls.append(sg_rule_acl) # Test with caches acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_sg, self.sg_cache, self.subnet_cache, self.mech_driver._nb_ovn) self.assertEqual(expected_acls, acls) # Test without caches with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_subnet', return_value=self.fake_subnet), \ mock.patch.object(securitygroups_db.SecurityGroupDbMixin, 'get_security_group', return_value=self.fake_sg): acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_sg, {}, {}, self.mech_driver._nb_ovn) self.assertEqual(expected_acls, acls) # Test with security groups disabled with mock.patch.object(ovn_acl, 'is_sg_enabled', return_value=False): acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_sg, self.sg_cache, self.subnet_cache, self.mech_driver._nb_ovn) self.assertEqual([], acls) # Test with multiple fixed IPs on the same subnet. fake_port_sg['fixed_ips'].append({'subnet_id': self.fake_subnet['id'], 'ip_address': '10.10.10.21'}) acls = ovn_acl.add_acls(self.mech_driver._plugin, mock.Mock(), fake_port_sg, self.sg_cache, self.subnet_cache, self.mech_driver._nb_ovn) self.assertEqual(expected_acls, acls) def test_add_acls_with_sec_group_native_dhcp_enabled(self): self._test_add_acls_with_sec_group_helper() def test_port_invalid_binding_profile(self): invalid_binding_profiles = [ {'tag': 0, 'parent_name': 'fakename'}, {'tag': 1024}, {'tag': 1024, 'parent_name': 1024}, {'parent_name': 'test'}, {'tag': 'test'}, {'vtep-physical-switch': 'psw1'}, {'vtep-logical-switch': 'lsw1'}, {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 1234}, {'vtep-physical-switch': 1234, 'vtep-logical-switch': 'lsw1'}, {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', 'tag': 1024}, {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', 'parent_name': 'fakename'}, {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', 'tag': 1024, 'parent_name': 'fakename'}, ] with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: # succeed without binding:profile with self.port(subnet=subnet1, set_context=True, tenant_id='test'): pass # fail with invalid binding profiles for invalid_profile in invalid_binding_profiles: try: kwargs = {ovn_const.OVN_PORT_BINDING_PROFILE: invalid_profile} with self.port( subnet=subnet1, expected_res_status=403, arg_list=( ovn_const.OVN_PORT_BINDING_PROFILE,), set_context=True, tenant_id='test', **kwargs): pass except exc.HTTPClientError: pass def test__validate_ignored_port_update_from_fip_port(self): p = {'id': 'id', 'device_owner': 'test'} ori_p = {'id': 'id', 'device_owner': const.DEVICE_OWNER_FLOATINGIP} self.assertRaises(mech_driver.OVNPortUpdateError, self.mech_driver._validate_ignored_port, p, ori_p) def test__validate_ignored_port_update_to_fip_port(self): p = {'id': 'id', 'device_owner': const.DEVICE_OWNER_FLOATINGIP} ori_p = {'id': 'port-id', 'device_owner': 'test'} self.assertRaises(mech_driver.OVNPortUpdateError, self.mech_driver._validate_ignored_port, p, ori_p) def test_create_and_update_ignored_fip_port(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, device_owner=const.DEVICE_OWNER_FLOATINGIP, set_context=True, tenant_id='test') as port: self.nb_ovn.create_lswitch_port.assert_not_called() data = {'port': {'name': 'new'}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPOk.code, res.status_int) self.nb_ovn.set_lswitch_port.assert_not_called() def test_update_ignored_port_from_fip_device_owner(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, device_owner=const.DEVICE_OWNER_FLOATINGIP, set_context=True, tenant_id='test') as port: self.nb_ovn.create_lswitch_port.assert_not_called() data = {'port': {'device_owner': 'test'}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) msg = jsonutils.loads(res.body)['NeutronError']['message'] expect_msg = ('Bad port request: Updating device_owner for' ' port %s owned by network:floatingip is' ' not supported.' % port['port']['id']) self.assertEqual(msg, expect_msg) self.nb_ovn.set_lswitch_port.assert_not_called() def test_update_ignored_port_to_fip_device_owner(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, device_owner='test', set_context=True, tenant_id='test') as port: self.assertEqual( 1, self.nb_ovn.create_lswitch_port.call_count) data = {'port': {'device_owner': const.DEVICE_OWNER_FLOATINGIP}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) msg = jsonutils.loads(res.body)['NeutronError']['message'] expect_msg = ('Bad port request: Updating device_owner to' ' network:floatingip for port %s is' ' not supported.' % port['port']['id']) self.assertEqual(msg, expect_msg) self.nb_ovn.set_lswitch_port.assert_not_called() def test_create_port_security(self): kwargs = {'mac_address': '00:00:00:00:00:01', 'fixed_ips': [{'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.4'}]} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('mac_address', 'fixed_ips'), set_context=True, tenant_id='test', **kwargs) as port: self.assertTrue(self.nb_ovn.create_lswitch_port.called) called_args_dict = ( (self.nb_ovn.create_lswitch_port ).call_args_list[0][1]) self.assertEqual(['00:00:00:00:00:01 10.0.0.2 10.0.0.4'], called_args_dict.get('port_security')) data = {'port': {'mac_address': '00:00:00:00:00:02'}} req = self.new_update_request( 'ports', data, port['port']['id']) req.get_response(self.api) self.assertTrue(self.nb_ovn.set_lswitch_port.called) called_args_dict = ( (self.nb_ovn.set_lswitch_port ).call_args_list[0][1]) self.assertEqual(['00:00:00:00:00:02 10.0.0.2 10.0.0.4'], called_args_dict.get('port_security')) def test_create_port_with_disabled_security(self): # NOTE(mjozefcz): Lets pretend this is nova port to not # be treated as VIP. kwargs = {'port_security_enabled': False, 'device_owner': 'compute:nova'} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('port_security_enabled',), set_context=True, tenant_id='test', **kwargs) as port: self.assertTrue(self.nb_ovn.create_lswitch_port.called) called_args_dict = ( (self.nb_ovn.create_lswitch_port ).call_args_list[0][1]) self.assertEqual([], called_args_dict.get('port_security')) self.assertEqual(ovn_const.UNKNOWN_ADDR, called_args_dict.get('addresses')[1]) data = {'port': {'mac_address': '00:00:00:00:00:01'}} req = self.new_update_request( 'ports', data, port['port']['id']) req.get_response(self.api) self.assertTrue(self.nb_ovn.set_lswitch_port.called) called_args_dict = ( (self.nb_ovn.set_lswitch_port ).call_args_list[0][1]) self.assertEqual([], called_args_dict.get('port_security')) self.assertEqual(2, len(called_args_dict.get('addresses'))) self.assertEqual(ovn_const.UNKNOWN_ADDR, called_args_dict.get('addresses')[1]) # Enable port security data = {'port': {'port_security_enabled': 'True'}} req = self.new_update_request( 'ports', data, port['port']['id']) req.get_response(self.api) called_args_dict = ( (self.nb_ovn.set_lswitch_port ).call_args_list[1][1]) self.assertEqual(2, self.nb_ovn.set_lswitch_port.call_count) self.assertEqual(1, len(called_args_dict.get('addresses'))) self.assertNotIn(ovn_const.UNKNOWN_ADDR, called_args_dict.get('addresses')) def test_create_port_security_allowed_address_pairs(self): # NOTE(mjozefcz): Lets pretend this is nova port to not # be treated as VIP. kwargs = {'allowed_address_pairs': [{"ip_address": "1.1.1.1"}, {"ip_address": "2.2.2.2", "mac_address": "22:22:22:22:22:22"}], 'device_owner': 'compute:nova'} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('allowed_address_pairs',), set_context=True, tenant_id='test', **kwargs) as port: port_ip = port['port'].get('fixed_ips')[0]['ip_address'] self.assertTrue(self.nb_ovn.create_lswitch_port.called) called_args_dict = ( (self.nb_ovn.create_lswitch_port ).call_args_list[0][1]) self.assertEqual( tools.UnorderedList( ["22:22:22:22:22:22 2.2.2.2", port['port']['mac_address'] + ' ' + port_ip + ' ' + '1.1.1.1']), called_args_dict.get('port_security')) self.assertEqual( tools.UnorderedList( ["22:22:22:22:22:22", port['port']['mac_address'] + ' ' + port_ip]), called_args_dict.get('addresses')) old_mac = port['port']['mac_address'] # we are updating only the port mac address. So the # mac address of the allowed address pair ip 1.1.1.1 # will have old mac address data = {'port': {'mac_address': '00:00:00:00:00:01'}} req = self.new_update_request( 'ports', data, port['port']['id']) req.get_response(self.api) self.assertTrue(self.nb_ovn.set_lswitch_port.called) called_args_dict = ( (self.nb_ovn.set_lswitch_port ).call_args_list[0][1]) self.assertEqual(tools.UnorderedList( ["22:22:22:22:22:22 2.2.2.2", "00:00:00:00:00:01 " + port_ip, old_mac + " 1.1.1.1"]), called_args_dict.get('port_security')) self.assertEqual( tools.UnorderedList( ["22:22:22:22:22:22", "00:00:00:00:00:01 " + port_ip, old_mac]), called_args_dict.get('addresses')) def test_create_port_ovn_octavia_vip(self): with (self.network(set_context=True, tenant_id='test')) as net1, ( self.subnet(network=net1)) as subnet1, ( self.port(name=ovn_const.LB_VIP_PORT_PREFIX + 'foo', subnet=subnet1, set_context=True, tenant_id='test')): self.assertTrue(self.nb_ovn.create_lswitch_port.called) called_args_dict = ( self.nb_ovn.create_lswitch_port.call_args_list[0][1]) self.assertEqual([], called_args_dict.get('addresses')) def _create_fake_network_context(self, network_type, physical_network=None, segmentation_id=None): network_attrs = {'provider:network_type': network_type, 'provider:physical_network': physical_network, 'provider:segmentation_id': segmentation_id} segment_attrs = {'network_type': network_type, 'physical_network': physical_network, 'segmentation_id': segmentation_id} fake_network = \ fakes.FakeNetwork.create_one_network(attrs=network_attrs).info() fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] return fakes.FakeNetworkContext(fake_network, fake_segments) def _create_fake_mp_network_context(self): network_type = 'flat' network_attrs = {'segments': []} fake_segments = [] for physical_network in ['physnet1', 'physnet2']: network_attrs['segments'].append( {'provider:network_type': network_type, 'provider:physical_network': physical_network}) segment_attrs = {'network_type': network_type, 'physical_network': physical_network} fake_segments.append( fakes.FakeSegment.create_one_segment( attrs=segment_attrs).info()) fake_network = \ fakes.FakeNetwork.create_one_network(attrs=network_attrs).info() fake_network.pop('provider:network_type') fake_network.pop('provider:physical_network') fake_network.pop('provider:segmentation_id') return fakes.FakeNetworkContext(fake_network, fake_segments) def test_network_precommit(self): # Test supported network types. fake_network_context = self._create_fake_network_context('local') self.mech_driver.create_network_precommit(fake_network_context) fake_network_context = self._create_fake_network_context( 'flat', physical_network='physnet') self.mech_driver.update_network_precommit(fake_network_context) fake_network_context = self._create_fake_network_context( 'geneve', segmentation_id=10) self.mech_driver.create_network_precommit(fake_network_context) fake_network_context = self._create_fake_network_context( 'vlan', physical_network='physnet', segmentation_id=11) self.mech_driver.update_network_precommit(fake_network_context) fake_mp_network_context = self._create_fake_mp_network_context() self.mech_driver.create_network_precommit(fake_mp_network_context) # Test unsupported network types. fake_network_context = self._create_fake_network_context( 'vxlan', segmentation_id=12) self.assertRaises(n_exc.InvalidInput, self.mech_driver.create_network_precommit, fake_network_context) fake_network_context = self._create_fake_network_context( 'gre', segmentation_id=13) self.assertRaises(n_exc.InvalidInput, self.mech_driver.update_network_precommit, fake_network_context) def _create_network_igmp_snoop(self, enabled): cfg.CONF.set_override('igmp_snooping_enable', enabled, group='OVS') nb_idl = self.mech_driver._ovn_client._nb_idl net = self._make_network(self.fmt, name='net1', admin_state_up=True)['network'] value = 'true' if enabled else 'false' nb_idl.ls_add.assert_called_once_with( ovn_utils.ovn_name(net['id']), external_ids=mock.ANY, may_exist=True, other_config={ovn_const.MCAST_SNOOP: value, ovn_const.MCAST_FLOOD_UNREGISTERED: value}) def test_create_network_igmp_snoop_enabled(self): self._create_network_igmp_snoop(enabled=True) def test_create_network_igmp_snoop_disabled(self): self._create_network_igmp_snoop(enabled=False) def test_create_port_without_security_groups(self): kwargs = {'security_groups': []} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('security_groups',), set_context=True, tenant_id='test', **kwargs): self.assertEqual( 1, self.nb_ovn.create_lswitch_port.call_count) self.assertEqual(2, self.nb_ovn.add_acl.call_count) self.nb_ovn.update_address_set.assert_not_called() def test_create_port_without_security_groups_no_ps(self): kwargs = {'security_groups': [], 'port_security_enabled': False} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('security_groups', 'port_security_enabled'), set_context=True, tenant_id='test', **kwargs): self.assertEqual( 1, self.nb_ovn.create_lswitch_port.call_count) self.nb_ovn.add_acl.assert_not_called() self.nb_ovn.update_address_set.assert_not_called() def _test_create_port_with_security_groups_helper(self, add_acl_call_count): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, set_context=True, tenant_id='test'): self.assertEqual( 1, self.nb_ovn.create_lswitch_port.call_count) self.assertEqual( add_acl_call_count, self.nb_ovn.add_acl.call_count) self.assertEqual( 1, self.nb_ovn.update_address_set.call_count) def test_create_port_with_security_groups_native_dhcp_enabled(self): self._test_create_port_with_security_groups_helper(7) def test_update_port_changed_security_groups(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, set_context=True, tenant_id='test') as port1: sg_id = port1['port']['security_groups'][0] fake_lsp = ( fakes.FakeOVNPort.from_neutron_port( port1['port'])) self.nb_ovn.lookup.return_value = fake_lsp # Remove the default security group. self.nb_ovn.set_lswitch_port.reset_mock() self.nb_ovn.update_acls.reset_mock() self.nb_ovn.update_address_set.reset_mock() data = {'port': {'security_groups': []}} self._update('ports', port1['port']['id'], data) self.assertEqual( 1, self.nb_ovn.set_lswitch_port.call_count) self.assertEqual( 1, self.nb_ovn.update_acls.call_count) self.assertEqual( 1, self.nb_ovn.update_address_set.call_count) # Add the default security group. self.nb_ovn.set_lswitch_port.reset_mock() self.nb_ovn.update_acls.reset_mock() self.nb_ovn.update_address_set.reset_mock() fake_lsp.external_ids.pop(ovn_const.OVN_SG_IDS_EXT_ID_KEY) data = {'port': {'security_groups': [sg_id]}} self._update('ports', port1['port']['id'], data) self.assertEqual( 1, self.nb_ovn.set_lswitch_port.call_count) self.assertEqual( 1, self.nb_ovn.update_acls.call_count) self.assertEqual( 1, self.nb_ovn.update_address_set.call_count) def test_update_port_unchanged_security_groups(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, set_context=True, tenant_id='test') as port1: fake_lsp = ( fakes.FakeOVNPort.from_neutron_port( port1['port'])) self.nb_ovn.lookup.return_value = fake_lsp # Update the port name. self.nb_ovn.set_lswitch_port.reset_mock() self.nb_ovn.update_acls.reset_mock() self.nb_ovn.update_address_set.reset_mock() data = {'port': {'name': 'rtheis'}} self._update('ports', port1['port']['id'], data) self.assertEqual( 1, self.nb_ovn.set_lswitch_port.call_count) self.nb_ovn.update_acls.assert_not_called() self.nb_ovn.update_address_set.assert_not_called() # Update the port fixed IPs self.nb_ovn.set_lswitch_port.reset_mock() self.nb_ovn.update_acls.reset_mock() self.nb_ovn.update_address_set.reset_mock() data = {'port': {'fixed_ips': []}} self._update('ports', port1['port']['id'], data) self.assertEqual( 1, self.nb_ovn.set_lswitch_port.call_count) self.assertEqual( 1, self.nb_ovn.update_acls.call_count) self.assertEqual( 1, self.nb_ovn.update_address_set.call_count) def _test_update_port_vip(self, is_vip=True): kwargs = {} with ( self.network(set_context=True, tenant_id='test')) as net1, ( self.subnet(network=net1)) as subnet1, ( self.port(subnet=subnet1, set_context=True, tenant_id='test', **kwargs)) as port1: fake_lsp = ( fakes.FakeOVNPort.from_neutron_port( port1['port'])) self.nb_ovn.lookup.return_value = fake_lsp self.nb_ovn.set_lswitch_port.reset_mock() if is_vip: data = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + 'foo'}} else: data = {'port': {}} self._update('ports', port1['port']['id'], data) self.assertEqual( 1, self.nb_ovn.set_lswitch_port.call_count) called_args_dict = ( self.nb_ovn.set_lswitch_port.call_args_list[0][1]) if is_vip: self.assertEqual([], called_args_dict.get('addresses')) else: self.assertNotEqual([], called_args_dict.get('addresses')) def test_update_port_not_vip_port(self): self._test_update_port_vip(is_vip=False) def test_update_port_vip_port(self): self._test_update_port_vip() def test_delete_port_without_security_groups(self): kwargs = {'security_groups': []} with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, arg_list=('security_groups',), set_context=True, tenant_id='test', **kwargs) as port1: fake_lsp = ( fakes.FakeOVNPort.from_neutron_port( port1['port'])) self.nb_ovn.lookup.return_value = fake_lsp self.nb_ovn.delete_lswitch_port.reset_mock() self.nb_ovn.delete_acl.reset_mock() self.nb_ovn.update_address_set.reset_mock() self._delete('ports', port1['port']['id']) self.assertEqual( 1, self.nb_ovn.delete_lswitch_port.call_count) self.assertEqual( 1, self.nb_ovn.delete_acl.call_count) self.nb_ovn.update_address_set.assert_not_called() def test_delete_port_with_security_groups(self): with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1) as subnet1: with self.port(subnet=subnet1, set_context=True, tenant_id='test') as port1: fake_lsp = ( fakes.FakeOVNPort.from_neutron_port( port1['port'])) self.nb_ovn.lookup.return_value = fake_lsp self.nb_ovn.delete_lswitch_port.reset_mock() self.nb_ovn.delete_acl.reset_mock() self.nb_ovn.update_address_set.reset_mock() self._delete('ports', port1['port']['id']) self.assertEqual( 1, self.nb_ovn.delete_lswitch_port.call_count) self.assertEqual( 1, self.nb_ovn.delete_acl.call_count) self.assertEqual( 1, self.nb_ovn.update_address_set.call_count) def _test_set_port_status_up(self, is_compute_port=False): port_device_owner = 'compute:nova' if is_compute_port else '' self.mech_driver._plugin.nova_notifier = mock.Mock() with self.network(set_context=True, tenant_id='test') as net1, \ self.subnet(network=net1) as subnet1, \ self.port(subnet=subnet1, set_context=True, tenant_id='test', device_owner=port_device_owner) as port1, \ mock.patch.object(provisioning_blocks, 'provisioning_complete') as pc, \ mock.patch.object(self.mech_driver, '_update_dnat_entry_if_needed') as ude, \ mock.patch.object( self.mech_driver, '_wait_for_metadata_provisioned_if_needed') as wmp, \ mock.patch.object(self.mech_driver, '_should_notify_nova', return_value=is_compute_port): self.mech_driver.set_port_status_up(port1['port']['id']) pc.assert_called_once_with( mock.ANY, port1['port']['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) ude.assert_called_once_with(port1['port']['id']) wmp.assert_called_once_with(port1['port']['id']) # If the port does NOT bellong to compute, do not notify Nova # about it's status changes if not is_compute_port: self.mech_driver._plugin.nova_notifier.\ notify_port_active_direct.assert_not_called() else: self.mech_driver._plugin.nova_notifier.\ notify_port_active_direct.assert_called_once_with( mock.ANY) def test_set_port_status_up(self): self._test_set_port_status_up(is_compute_port=False) def test_set_compute_port_status_up(self): self._test_set_port_status_up(is_compute_port=True) def _test_set_port_status_down(self, is_compute_port=False): port_device_owner = 'compute:nova' if is_compute_port else '' self.mech_driver._plugin.nova_notifier = mock.Mock() with self.network(set_context=True, tenant_id='test') as net1, \ self.subnet(network=net1) as subnet1, \ self.port(subnet=subnet1, set_context=True, tenant_id='test', device_owner=port_device_owner) as port1, \ mock.patch.object(provisioning_blocks, 'add_provisioning_component') as apc, \ mock.patch.object(self.mech_driver, '_update_dnat_entry_if_needed') as ude, \ mock.patch.object(self.mech_driver, '_should_notify_nova', return_value=is_compute_port): self.mech_driver.set_port_status_down(port1['port']['id']) apc.assert_called_once_with( mock.ANY, port1['port']['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) ude.assert_called_once_with(port1['port']['id'], False) # If the port does NOT bellong to compute, do not notify Nova # about it's status changes if not is_compute_port: self.mech_driver._plugin.nova_notifier.\ record_port_status_changed.assert_not_called() self.mech_driver._plugin.nova_notifier.\ send_port_status.assert_not_called() else: self.mech_driver._plugin.nova_notifier.\ record_port_status_changed.assert_called_once_with( mock.ANY, const.PORT_STATUS_ACTIVE, const.PORT_STATUS_DOWN, None) self.mech_driver._plugin.nova_notifier.\ send_port_status.assert_called_once_with( None, None, mock.ANY) def test_set_port_status_down(self): self._test_set_port_status_down(is_compute_port=False) def test_set_compute_port_status_down(self): self._test_set_port_status_down(is_compute_port=True) def test_set_port_status_down_not_found(self): with mock.patch.object(provisioning_blocks, 'add_provisioning_component') as apc, \ mock.patch.object(self.mech_driver, '_update_dnat_entry_if_needed'): self.mech_driver.set_port_status_down('foo') apc.assert_not_called() def test_set_port_status_concurrent_delete(self): exc = os_db_exc.DBReferenceError('', '', '', '') with self.network(set_context=True, tenant_id='test') as net1, \ self.subnet(network=net1) as subnet1, \ self.port(subnet=subnet1, set_context=True, tenant_id='test') as port1, \ mock.patch.object(provisioning_blocks, 'add_provisioning_component', side_effect=exc) as apc, \ mock.patch.object(self.mech_driver, '_update_dnat_entry_if_needed') as ude: self.mech_driver.set_port_status_down(port1['port']['id']) apc.assert_called_once_with( mock.ANY, port1['port']['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) ude.assert_called_once_with(port1['port']['id'], False) def _test__wait_for_metadata_provisioned_if_needed(self, enable_dhcp, wait_expected): with self.network(tenant_id='test') as net1, \ self.subnet(network=net1, enable_dhcp=enable_dhcp) as subnet1, \ self.port(subnet=subnet1, set_context=True, tenant_id='test') as port1, \ mock.patch.object(n_utils, 'wait_until_true') as wut, \ mock.patch.object(ovn_conf, 'is_ovn_metadata_enabled', return_value=True): self.mech_driver._wait_for_metadata_provisioned_if_needed( port1['port']['id']) if wait_expected: self.assertEqual(1, wut.call_count) else: wut.assert_not_called() def test__wait_for_metadata_provisioned_if_needed(self): self._test__wait_for_metadata_provisioned_if_needed( enable_dhcp=True, wait_expected=True) def test__wait_for_metadata_provisioned_if_needed_not_needed(self): self._test__wait_for_metadata_provisioned_if_needed( enable_dhcp=False, wait_expected=False) def test_bind_port_unsupported_vnic_type(self): fake_port = fakes.FakePort.create_one_port( attrs={'binding:vnic_type': 'unknown'}).info() fake_port_context = fakes.FakePortContext(fake_port, 'host', []) self.mech_driver.bind_port(fake_port_context) self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_not_called() fake_port_context.set_binding.assert_not_called() def _test_bind_port_failed(self, fake_segments): fake_port = fakes.FakePort.create_one_port().info() fake_host = 'host' fake_port_context = fakes.FakePortContext( fake_port, fake_host, fake_segments) self.mech_driver.bind_port(fake_port_context) self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( fake_host) fake_port_context.set_binding.assert_not_called() def test_bind_port_host_not_found(self): self.sb_ovn.get_chassis_data_for_ml2_bind_port.side_effect = \ RuntimeError self._test_bind_port_failed([]) def test_bind_port_no_segments_to_bind(self): self._test_bind_port_failed([]) def test_bind_port_physnet_not_found(self): segment_attrs = {'network_type': 'vlan', 'physical_network': 'unknown-physnet', 'segmentation_id': 23} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port_failed(fake_segments) def _test_bind_port(self, fake_segments): fake_port = fakes.FakePort.create_one_port().info() fake_host = 'host' fake_port_context = fakes.FakePortContext( fake_port, fake_host, fake_segments) self.mech_driver.bind_port(fake_port_context) self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( fake_host) fake_port_context.set_binding.assert_called_once_with( fake_segments[0]['id'], portbindings.VIF_TYPE_OVS, self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS]) def _test_bind_port_sriov(self, fake_segments): fake_port = fakes.FakePort.create_one_port( attrs={'binding:vnic_type': 'direct', 'binding:profile': {'capabilities': ['switchdev']}}).info() fake_host = 'host' fake_port_context = fakes.FakePortContext( fake_port, fake_host, fake_segments) self.mech_driver.bind_port(fake_port_context) self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( fake_host) fake_port_context.set_binding.assert_called_once_with( fake_segments[0]['id'], portbindings.VIF_TYPE_OVS, self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS]) def test_bind_port_geneve(self): segment_attrs = {'network_type': 'geneve', 'physical_network': None, 'segmentation_id': 1023} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port(fake_segments) def test_bind_sriov_port_geneve(self): """Test binding a SR-IOV port to a geneve segment.""" segment_attrs = {'network_type': 'geneve', 'physical_network': None, 'segmentation_id': 1023} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port_sriov(fake_segments) def test_bind_port_vlan(self): segment_attrs = {'network_type': 'vlan', 'physical_network': 'fake-physnet', 'segmentation_id': 23} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port(fake_segments) def test_bind_port_flat(self): segment_attrs = {'network_type': 'flat', 'physical_network': 'fake-physnet', 'segmentation_id': None} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port(fake_segments) def test_bind_port_vxlan(self): segment_attrs = {'network_type': 'vxlan', 'physical_network': None, 'segmentation_id': 1024} fake_segments = \ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port(fake_segments) def test__is_port_provisioning_required(self): fake_port = fakes.FakePort.create_one_port( attrs={'binding:vnic_type': 'normal', 'status': const.PORT_STATUS_DOWN}).info() fake_host = 'fake-physnet' # Test host not changed self.assertFalse(self.mech_driver._is_port_provisioning_required( fake_port, fake_host, fake_host)) # Test invalid vnic type. fake_port['binding:vnic_type'] = 'unknown' self.assertFalse(self.mech_driver._is_port_provisioning_required( fake_port, fake_host, None)) fake_port['binding:vnic_type'] = 'normal' # Test invalid status. fake_port['status'] = const.PORT_STATUS_ACTIVE self.assertFalse(self.mech_driver._is_port_provisioning_required( fake_port, fake_host, None)) fake_port['status'] = const.PORT_STATUS_DOWN # Test no host. self.assertFalse(self.mech_driver._is_port_provisioning_required( fake_port, None, None)) # Test invalid host. self.sb_ovn.chassis_exists.return_value = False self.assertFalse(self.mech_driver._is_port_provisioning_required( fake_port, fake_host, None)) self.sb_ovn.chassis_exists.return_value = True # Test port provisioning required. self.assertTrue(self.mech_driver._is_port_provisioning_required( fake_port, fake_host, None)) def _test_add_subnet_dhcp_options_in_ovn(self, subnet, ovn_dhcp_opts=None, call_get_dhcp_opts=True, call_add_dhcp_opts=True): subnet['id'] = 'fake_id' with mock.patch.object(self.mech_driver._ovn_client, '_get_ovn_dhcp_options') as get_opts: self.mech_driver._ovn_client._add_subnet_dhcp_options( subnet, mock.ANY, ovn_dhcp_opts) self.assertEqual(call_get_dhcp_opts, get_opts.called) self.assertEqual( call_add_dhcp_opts, self.mech_driver._nb_ovn.add_dhcp_options.called) def test_add_subnet_dhcp_options_in_ovn(self): subnet = {'ip_version': const.IP_VERSION_4} self._test_add_subnet_dhcp_options_in_ovn(subnet) def test_add_subnet_dhcp_options_in_ovn_with_given_ovn_dhcp_opts(self): subnet = {'ip_version': const.IP_VERSION_4} self._test_add_subnet_dhcp_options_in_ovn( subnet, ovn_dhcp_opts={'foo': 'bar', 'external_ids': {}}, call_get_dhcp_opts=False) def test_add_subnet_dhcp_options_in_ovn_with_slaac_v6_subnet(self): subnet = {'ip_version': const.IP_VERSION_6, 'ipv6_address_mode': const.IPV6_SLAAC} self._test_add_subnet_dhcp_options_in_ovn( subnet, call_get_dhcp_opts=False, call_add_dhcp_opts=False) @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') @mock.patch.object(n_net, 'get_random_mac') def test_enable_subnet_dhcp_options_in_ovn_ipv4(self, grm, gps): grm.return_value = '01:02:03:04:05:06' gps.return_value = [ {'id': 'port-id-1', 'device_owner': 'nova:compute'}, {'id': 'port-id-2', 'device_owner': 'nova:compute', 'extra_dhcp_opts': [ {'opt_value': '10.0.0.33', 'ip_version': 4, 'opt_name': 'router'}]}, {'id': 'port-id-3', 'device_owner': 'nova:compute', 'extra_dhcp_opts': [ {'opt_value': '1200', 'ip_version': 4, 'opt_name': 'mtu'}]}, {'id': 'port-id-10', 'device_owner': 'network:foo'}] subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', 'network_id': 'network-id', 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, 'dns_nameservers': [], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} txn = self.mech_driver._nb_ovn.transaction().__enter__.return_value dhcp_option_command = mock.Mock() txn.add.return_value = dhcp_option_command self.mech_driver._ovn_client._enable_subnet_dhcp_options( subnet, network, txn) # Check adding DHCP_Options rows subnet_dhcp_options = { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'cidr': subnet['cidr'], 'options': { 'router': subnet['gateway_ip'], 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'dns_server': '{8.8.8.8}', 'lease_time': str(12 * 60 * 60), 'mtu': str(1000)}} ports_dhcp_options = [{ 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', 'port_id': 'port-id-2'}, 'cidr': subnet['cidr'], 'options': { 'router': '10.0.0.33', 'server_id': subnet['gateway_ip'], 'dns_server': '{8.8.8.8}', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(1000)}}, { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', 'port_id': 'port-id-3'}, 'cidr': subnet['cidr'], 'options': { 'router': subnet['gateway_ip'], 'server_id': subnet['gateway_ip'], 'dns_server': '{8.8.8.8}', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(1200)}}] add_dhcp_calls = [mock.call('subnet-id', **subnet_dhcp_options)] add_dhcp_calls.extend([mock.call( 'subnet-id', port_id=port_dhcp_options['external_ids']['port_id'], **port_dhcp_options) for port_dhcp_options in ports_dhcp_options]) self.assertEqual(len(add_dhcp_calls), self.mech_driver._nb_ovn.add_dhcp_options.call_count) self.mech_driver._nb_ovn.add_dhcp_options.assert_has_calls( add_dhcp_calls, any_order=True) # Check setting lport rows set_lsp_calls = [mock.call(lport_name='port-id-1', dhcpv4_options=dhcp_option_command), mock.call(lport_name='port-id-2', dhcpv4_options=dhcp_option_command), mock.call(lport_name='port-id-3', dhcpv4_options=dhcp_option_command)] self.assertEqual(len(set_lsp_calls), self.mech_driver._nb_ovn.set_lswitch_port.call_count) self.mech_driver._nb_ovn.set_lswitch_port.assert_has_calls( set_lsp_calls, any_order=True) @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') @mock.patch.object(n_net, 'get_random_mac') def test_enable_subnet_dhcp_options_in_ovn_ipv6(self, grm, gps): grm.return_value = '01:02:03:04:05:06' gps.return_value = [ {'id': 'port-id-1', 'device_owner': 'nova:compute'}, {'id': 'port-id-2', 'device_owner': 'nova:compute', 'extra_dhcp_opts': [ {'opt_value': '11:22:33:44:55:66', 'ip_version': 6, 'opt_name': 'server-id'}]}, {'id': 'port-id-3', 'device_owner': 'nova:compute', 'extra_dhcp_opts': [ {'opt_value': '10::34', 'ip_version': 6, 'opt_name': 'dns-server'}]}, {'id': 'port-id-10', 'device_owner': 'network:foo'}] subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', 'gateway_ip': '10::1', 'enable_dhcp': True, 'ipv6_address_mode': 'dhcpv6-stateless', 'dns_nameservers': [], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} txn = self.mech_driver._nb_ovn.transaction().__enter__.return_value dhcp_option_command = mock.Mock() txn.add.return_value = dhcp_option_command self.mech_driver._ovn_client._enable_subnet_dhcp_options( subnet, network, txn) # Check adding DHCP_Options rows subnet_dhcp_options = { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'server_id': '01:02:03:04:05:06'}} ports_dhcp_options = [{ 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', 'port_id': 'port-id-2'}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'server_id': '11:22:33:44:55:66'}}, { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', 'port_id': 'port-id-3'}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'server_id': '01:02:03:04:05:06', 'dns_server': '10::34'}}] add_dhcp_calls = [mock.call('subnet-id', **subnet_dhcp_options)] add_dhcp_calls.extend([mock.call( 'subnet-id', port_id=port_dhcp_options['external_ids']['port_id'], **port_dhcp_options) for port_dhcp_options in ports_dhcp_options]) self.assertEqual(len(add_dhcp_calls), self.mech_driver._nb_ovn.add_dhcp_options.call_count) self.mech_driver._nb_ovn.add_dhcp_options.assert_has_calls( add_dhcp_calls, any_order=True) # Check setting lport rows set_lsp_calls = [mock.call(lport_name='port-id-1', dhcpv6_options=dhcp_option_command), mock.call(lport_name='port-id-2', dhcpv6_options=dhcp_option_command), mock.call(lport_name='port-id-3', dhcpv6_options=dhcp_option_command)] self.assertEqual(len(set_lsp_calls), self.mech_driver._nb_ovn.set_lswitch_port.call_count) self.mech_driver._nb_ovn.set_lswitch_port.assert_has_calls( set_lsp_calls, any_order=True) def test_enable_subnet_dhcp_options_in_ovn_ipv6_slaac(self): subnet = {'id': 'subnet-id', 'ip_version': 6, 'enable_dhcp': True, 'ipv6_address_mode': 'slaac'} network = {'id': 'network-id'} self.mech_driver._ovn_client._enable_subnet_dhcp_options( subnet, network, mock.Mock()) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() self.mech_driver._nb_ovn.set_lswitch_port.assert_not_called() def _test_remove_subnet_dhcp_options_in_ovn(self, ip_version): opts = {'subnet': {'uuid': 'subnet-uuid'}, 'ports': [{'uuid': 'port1-uuid'}]} self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = opts self.mech_driver._ovn_client._remove_subnet_dhcp_options( 'subnet-id', mock.Mock()) # Check deleting DHCP_Options rows delete_dhcp_calls = [mock.call('subnet-uuid'), mock.call('port1-uuid')] self.assertEqual( len(delete_dhcp_calls), self.mech_driver._nb_ovn.delete_dhcp_options.call_count) self.mech_driver._nb_ovn.delete_dhcp_options.assert_has_calls( delete_dhcp_calls, any_order=True) def test_remove_subnet_dhcp_options_in_ovn_ipv4(self): self._test_remove_subnet_dhcp_options_in_ovn(4) def test_remove_subnet_dhcp_options_in_ovn_ipv6(self): self._test_remove_subnet_dhcp_options_in_ovn(6) def test_update_subnet_dhcp_options_in_ovn_ipv4(self): subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', 'network_id': 'network-id', 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, 'dns_nameservers': [], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} orignal_options = {'subnet': { 'external_ids': {'subnet_id': subnet['id']}, 'cidr': subnet['cidr'], 'options': { 'router': '10.0.0.2', 'server_id': '10.0.0.2', 'server_mac': '01:02:03:04:05:06', 'dns_server': '{8.8.8.8}', 'lease_time': str(12 * 60 * 60), 'mtu': str(1000)}}, 'ports': []} self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ orignal_options self.mech_driver._ovn_client._update_subnet_dhcp_options( subnet, network, mock.Mock()) new_options = { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'cidr': subnet['cidr'], 'options': { 'router': subnet['gateway_ip'], 'server_id': subnet['gateway_ip'], 'dns_server': '{8.8.8.8}', 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': str(1000)}} self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( subnet['id'], **new_options) def test_update_subnet_dhcp_options_in_ovn_ipv4_not_change(self): subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', 'network_id': 'network-id', 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, 'dns_nameservers': [], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} orignal_options = {'subnet': { 'external_ids': {'subnet_id': subnet['id']}, 'cidr': subnet['cidr'], 'options': { 'router': subnet['gateway_ip'], 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'dns_server': '{8.8.8.8}', 'lease_time': str(12 * 60 * 60), 'mtu': str(1000)}}, 'ports': []} self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ orignal_options self.mech_driver._ovn_client._update_subnet_dhcp_options( subnet, network, mock.Mock()) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() def test_update_subnet_dhcp_options_in_ovn_ipv6(self): subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', 'network_id': 'network-id', 'gateway_ip': '10::1', 'enable_dhcp': True, 'ipv6_address_mode': 'dhcpv6-stateless', 'dns_nameservers': ['10::3'], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} orignal_options = {'subnet': { 'external_ids': {'subnet_id': subnet['id']}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'server_id': '01:02:03:04:05:06'}}, 'ports': []} self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ orignal_options self.mech_driver._ovn_client._update_subnet_dhcp_options( subnet, network, mock.Mock()) new_options = { 'external_ids': {'subnet_id': subnet['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'dns_server': '{10::3}', 'server_id': '01:02:03:04:05:06'}} self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( subnet['id'], **new_options) def test_update_subnet_dhcp_options_in_ovn_ipv6_not_change(self): subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', 'gateway_ip': '10::1', 'enable_dhcp': True, 'ipv6_address_mode': 'dhcpv6-stateless', 'dns_nameservers': [], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1000} orignal_options = {'subnet': { 'external_ids': {'subnet_id': subnet['id']}, 'cidr': subnet['cidr'], 'options': { 'dhcpv6_stateless': 'true', 'server_id': '01:02:03:04:05:06'}}, 'ports': []} self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ orignal_options self.mech_driver._ovn_client._update_subnet_dhcp_options( subnet, network, mock.Mock()) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() def test_update_subnet_dhcp_options_in_ovn_ipv6_slaac(self): subnet = {'id': 'subnet-id', 'ip_version': 6, 'enable_dhcp': True, 'ipv6_address_mode': 'slaac'} network = {'id': 'network-id'} self.mech_driver._ovn_client._update_subnet_dhcp_options( subnet, network, mock.Mock()) self.mech_driver._nb_ovn.get_subnet_dhcp_options.assert_not_called() self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() def test_update_subnet_postcommit_ovn_do_nothing(self): context = fakes.FakeSubnetContext( subnet={'enable_dhcp': False, 'ip_version': 4, 'network_id': 'id', 'id': 'subnet_id'}, network={'id': 'id'}) with mock.patch.object( self.mech_driver._ovn_client, '_enable_subnet_dhcp_options') as esd,\ mock.patch.object( self.mech_driver._ovn_client, '_remove_subnet_dhcp_options') as dsd,\ mock.patch.object( self.mech_driver._ovn_client, '_update_subnet_dhcp_options') as usd,\ mock.patch.object( self.mech_driver._ovn_client, '_find_metadata_port') as fmd,\ mock.patch.object( self.mech_driver._ovn_client, 'update_metadata_port') as umd: self.mech_driver.update_subnet_postcommit(context) esd.assert_not_called() dsd.assert_not_called() usd.assert_not_called() fmd.assert_not_called() umd.assert_not_called() def test_update_subnet_postcommit_enable_dhcp(self): context = fakes.FakeSubnetContext( subnet={'enable_dhcp': True, 'ip_version': 4, 'network_id': 'id', 'id': 'subnet_id'}, network={'id': 'id'}) with mock.patch.object( self.mech_driver._ovn_client, '_enable_subnet_dhcp_options') as esd,\ mock.patch.object( self.mech_driver._ovn_client, 'update_metadata_port') as umd: self.mech_driver.update_subnet_postcommit(context) esd.assert_called_once_with( context.current, context.network.current, mock.ANY) umd.assert_called_once_with(mock.ANY, 'id') def test_update_subnet_postcommit_disable_dhcp(self): self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = { 'subnet': mock.sentinel.subnet, 'ports': []} context = fakes.FakeSubnetContext( subnet={'enable_dhcp': False, 'id': 'fake_id', 'ip_version': 4, 'network_id': 'id'}, network={'id': 'id'}) with mock.patch.object( self.mech_driver._ovn_client, '_remove_subnet_dhcp_options') as dsd,\ mock.patch.object( self.mech_driver._ovn_client, 'update_metadata_port') as umd: self.mech_driver.update_subnet_postcommit(context) dsd.assert_called_once_with(context.current['id'], mock.ANY) umd.assert_called_once_with(mock.ANY, 'id') def test_update_subnet_postcommit_update_dhcp(self): self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = { 'subnet': mock.sentinel.subnet, 'ports': []} context = fakes.FakeSubnetContext( subnet={'enable_dhcp': True, 'ip_version': 4, 'network_id': 'id', 'id': 'subnet_id'}, network={'id': 'id'}) with mock.patch.object( self.mech_driver._ovn_client, '_update_subnet_dhcp_options') as usd,\ mock.patch.object( self.mech_driver._ovn_client, 'update_metadata_port') as umd: self.mech_driver.update_subnet_postcommit(context) usd.assert_called_once_with( context.current, context.network.current, mock.ANY) umd.assert_called_once_with(mock.ANY, 'id') @mock.patch.object(provisioning_blocks, 'is_object_blocked') @mock.patch.object(provisioning_blocks, 'provisioning_complete') def test_notify_dhcp_updated(self, mock_prov_complete, mock_is_obj_block): port_id = 'fake-port-id' mock_is_obj_block.return_value = True self.mech_driver._notify_dhcp_updated(port_id) mock_prov_complete.assert_called_once_with( mock.ANY, port_id, resources.PORT, provisioning_blocks.DHCP_ENTITY) mock_is_obj_block.return_value = False mock_prov_complete.reset_mock() self.mech_driver._notify_dhcp_updated(port_id) mock_prov_complete.assert_not_called() @mock.patch.object(mech_driver.OVNMechanismDriver, '_is_port_provisioning_required', lambda *_: True) @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') @mock.patch.object(ovn_client.OVNClient, 'create_port') def test_create_port_postcommit(self, mock_create_port, mock_notify_dhcp): fake_port = fakes.FakePort.create_one_port( attrs={'status': const.PORT_STATUS_DOWN}).info() fake_ctx = mock.Mock(current=fake_port) self.mech_driver.create_port_postcommit(fake_ctx) passed_fake_port = copy.deepcopy(fake_port) passed_fake_port['network'] = fake_ctx.network.current mock_create_port.assert_called_once_with(mock.ANY, passed_fake_port) mock_notify_dhcp.assert_called_once_with(fake_port['id']) @mock.patch.object(mech_driver.OVNMechanismDriver, '_is_port_provisioning_required', lambda *_: True) @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') @mock.patch.object(ovn_client.OVNClient, 'update_port') def test_update_port_postcommit(self, mock_update_port, mock_notify_dhcp): fake_port = fakes.FakePort.create_one_port( attrs={'status': const.PORT_STATUS_ACTIVE}).info() fake_ctx = mock.Mock(current=fake_port, original=fake_port) self.mech_driver.update_port_postcommit(fake_ctx) passed_fake_port = copy.deepcopy(fake_port) passed_fake_port['network'] = fake_ctx.network.current passed_fake_port_orig = copy.deepcopy(fake_ctx.original) passed_fake_port_orig['network'] = fake_ctx.network.current mock_update_port.assert_called_once_with( mock.ANY, passed_fake_port, port_object=passed_fake_port_orig) mock_notify_dhcp.assert_called_once_with(fake_port['id']) @mock.patch.object(mech_driver.OVNMechanismDriver, '_is_port_provisioning_required', lambda *_: True) @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') @mock.patch.object(ovn_client.OVNClient, 'update_port') def test_update_port_postcommit_live_migration( self, mock_update_port, mock_notify_dhcp): self.plugin.update_port_status = mock.Mock() fake_context = 'fake_context' fake_port = fakes.FakePort.create_one_port( attrs={ 'status': const.PORT_STATUS_DOWN, portbindings.PROFILE: {ovn_const.MIGRATING_ATTR: 'foo'}, portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS}).info() fake_ctx = mock.Mock(current=fake_port, original=fake_port, _plugin_context=fake_context) self.mech_driver.update_port_postcommit(fake_ctx) mock_update_port.assert_not_called() mock_notify_dhcp.assert_not_called() self.plugin.update_port_status.assert_called_once_with( fake_context, fake_port['id'], const.PORT_STATUS_ACTIVE) def _add_chassis_agent(self, nb_cfg, agent_type, updated_at=None): updated_at = updated_at or datetime.datetime.utcnow() chassis = mock.Mock() chassis.nb_cfg = nb_cfg chassis.uuid = uuid.uuid4() chassis.external_ids = {ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY: datetime.datetime.isoformat(updated_at)} if agent_type == ovn_const.OVN_METADATA_AGENT: chassis.external_ids.update({ ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: nb_cfg, ovn_const.METADATA_LIVENESS_CHECK_EXT_ID_KEY: datetime.datetime.isoformat(updated_at)}) return chassis def test_agent_alive_true(self): for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, ovn_const.OVN_METADATA_AGENT): self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 chassis = self._add_chassis_agent(5, agent_type) self.assertTrue(self.mech_driver.agent_alive(chassis, agent_type)) def test_agent_alive_true_one_diff(self): # Agent should be reported as alive when the nb_cfg delta is 1 # even if the last update time was old enough. for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, ovn_const.OVN_METADATA_AGENT): self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 now = timeutils.utcnow() updated_at = now - datetime.timedelta(cfg.CONF.agent_down_time + 1) chassis = self._add_chassis_agent(4, agent_type, updated_at) self.assertTrue(self.mech_driver.agent_alive(chassis, agent_type)) def test_agent_alive_not_timed_out(self): for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, ovn_const.OVN_METADATA_AGENT): self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 chassis = self._add_chassis_agent(3, agent_type) self.assertTrue(self.mech_driver.agent_alive(chassis, agent_type), "Agent type %s is not alive" % agent_type) def test_agent_alive_timed_out(self): for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, ovn_const.OVN_METADATA_AGENT): self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 now = timeutils.utcnow() updated_at = now - datetime.timedelta(cfg.CONF.agent_down_time + 1) chassis = self._add_chassis_agent(3, agent_type, updated_at) self.assertFalse(self.mech_driver.agent_alive(chassis, agent_type)) def _test__update_dnat_entry_if_needed(self, up=True): ovn_conf.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') port_id = 'fake-port-id' fake_ext_mac_key = 'fake-ext-mac-key' fake_nat_uuid = uuidutils.generate_uuid() nat_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'_uuid': fake_nat_uuid, 'external_ids': { ovn_const.OVN_FIP_EXT_MAC_KEY: fake_ext_mac_key}}) fake_db_find = mock.Mock() fake_db_find.execute.return_value = [nat_row] self.nb_ovn.db_find.return_value = fake_db_find self.mech_driver._update_dnat_entry_if_needed(port_id, up=up) if up: # Assert that we are setting the external_mac in the NAT table self.nb_ovn.db_set.assert_called_once_with( 'NAT', fake_nat_uuid, ('external_mac', fake_ext_mac_key)) else: # Assert that we are cleaning the external_mac from the NAT table self.nb_ovn.db_clear.assert_called_once_with( 'NAT', fake_nat_uuid, 'external_mac') def test__update_dnat_entry_if_needed_up(self): self._test__update_dnat_entry_if_needed() def test__update_dnat_entry_if_needed_down(self): self._test__update_dnat_entry_if_needed(up=False) def _test_update_network_fragmentation(self, new_mtu, expected_opts): network_attrs = {external_net.EXTERNAL: True} network = self._make_network( self.fmt, 'net1', True, arg_list=(external_net.EXTERNAL,), **network_attrs) with self.subnet(network=network) as subnet: with self.port(subnet=subnet, device_owner=const.DEVICE_OWNER_ROUTER_GW) as port: # Let's update the MTU to something different network['network']['mtu'] = new_mtu fake_ctx = mock.MagicMock(current=network['network']) fake_ctx._plugin_context.session.is_active = False self.mech_driver.update_network_postcommit(fake_ctx) lrp_name = ovn_utils.ovn_lrouter_port_name(port['port']['id']) self.nb_ovn.update_lrouter_port.assert_called_once_with( if_exists=True, name=lrp_name, options=expected_opts) def test_update_network_need_to_frag_enabled(self): ovn_conf.cfg.CONF.set_override('ovn_emit_need_to_frag', True, group='ovn') new_mtu = 1234 expected_opts = {ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION: str(new_mtu)} self._test_update_network_fragmentation(new_mtu, expected_opts) def test_update_network_need_to_frag_disabled(self): ovn_conf.cfg.CONF.set_override('ovn_emit_need_to_frag', False, group='ovn') new_mtu = 1234 # Assert that the options column is empty (cleaning up an ' # existing value if set before) expected_opts = {} self._test_update_network_fragmentation(new_mtu, expected_opts) def test_ping_chassis(self): self.nb_ovn.nb_global.external_ids = {} self.mech_driver.ping_chassis() self.nb_ovn.check_liveness.assert_called_once_with() def test_ping_chassis_interval_expired(self): timeout = 10 ovn_conf.cfg.CONF.set_override('agent_down_time', timeout) # Pretend the interval is already expired time = (timeutils.utcnow(with_timezone=True) - datetime.timedelta(seconds=timeout)) self.nb_ovn.nb_global.external_ids = { ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY: str(time)} # Since the interval has expired, assert that the "check_liveness" # command has been invoked self.mech_driver.ping_chassis() self.nb_ovn.check_liveness.assert_called_once_with() def test_ping_chassis_interval_not_expired(self): ovn_conf.cfg.CONF.set_override('agent_down_time', 10) # Pretend the interval has NOT yet expired time = timeutils.utcnow(with_timezone=True) self.nb_ovn.nb_global.external_ids = { ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY: str(time)} # Assert that "check_liveness" wasn't invoked self.mech_driver.ping_chassis() self.assertFalse(self.nb_ovn.check_liveness.called) class OVNMechanismDriverTestCase(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'ovn'] def setUp(self): cfg.CONF.set_override('global_physnet_mtu', 1550) cfg.CONF.set_override('tenant_network_types', ['geneve'], group='ml2') cfg.CONF.set_override('vni_ranges', ['1:65536'], group='ml2_type_geneve') ovn_conf.cfg.CONF.set_override('dns_servers', ['8.8.8.8'], group='ovn') super(OVNMechanismDriverTestCase, self).setUp() # Make sure the node and target_node for the hash ring in the # mechanism driver matches node_uuid = uuidutils.generate_uuid() p = mock.patch.object(hash_ring_manager.HashRingManager, 'get_node', return_value=node_uuid) p.start() self.addCleanup(p.stop) self.driver.node_uuid = node_uuid self.driver.hash_ring_group = 'fake_hash_ring_group' mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj nb_ovn = fakes.FakeOvsdbNbOvnIdl() sb_ovn = fakes.FakeOvsdbSbOvnIdl() self.mech_driver._nb_ovn = nb_ovn self.mech_driver._sb_ovn = sb_ovn self.mech_driver._insert_port_provisioning_block = mock.Mock() p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) p.start() self.addCleanup(p.stop) class TestOVNMechanismDriverBasicGet(test_plugin.TestMl2BasicGet, OVNMechanismDriverTestCase): pass class TestOVNMechanismDriverV2HTTPResponse(test_plugin.TestMl2V2HTTPResponse, OVNMechanismDriverTestCase): pass class TestOVNMechanismDriverNetworksV2(test_plugin.TestMl2NetworksV2, OVNMechanismDriverTestCase): def test__update_segmentation_id_ports_wrong_vif_type(self): """Skip the Update Segmentation ID tests Currently Segmentation ID cannot be updated till https://review.openstack.org/#/c/632984/ is merged to allow OVS Agents and thus OVN Mechanism Driver to allow updation of Segmentation IDs. Till then the test needs to be skipped """ pass def test__update_segmentation_id_ports(self): """Skip the Update Segmentation ID tests Currently Segmentation ID cannot be updated till https://review.openstack.org/#/c/632984/ is merged to allow OVS Agents and thus OVN Mechanism Driver to allow updation of Segmentation IDs. Till then the test needs to be skipped """ pass class TestOVNMechanismDriverSubnetsV2(test_plugin.TestMl2SubnetsV2, OVNMechanismDriverTestCase): def setUp(self): # Disable metadata so that we don't interfere with existing tests # in Neutron tree. Doing this because some of the tests assume that # first IP address in a subnet will be available and this is not true # with metadata since it will book an IP address on each subnet. ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, group='ovn') super(TestOVNMechanismDriverSubnetsV2, self).setUp() # NOTE(rtheis): Mock the OVN port update since it is getting subnet # information for ACL processing. This interferes with the update_port # mock already done by the test. def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): with mock.patch.object(self.mech_driver._ovn_client, 'update_port'),\ mock.patch.object(self.mech_driver._ovn_client, '_get_subnet_dhcp_options_for_port', return_value={}): super(TestOVNMechanismDriverSubnetsV2, self).\ test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets() # NOTE(rtheis): Mock the OVN port update since it is getting subnet # information for ACL processing. This interferes with the update_port # mock already done by the test. def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): with mock.patch.object(self.mech_driver._ovn_client, 'update_port'),\ mock.patch.object(self.mech_driver._ovn_client, '_get_subnet_dhcp_options_for_port', return_value={}): super(TestOVNMechanismDriverSubnetsV2, self).\ test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets() # NOTE(numans) Overriding the base test case here because the base test # case creates a network with vxlan type and OVN mech driver doesn't # support it. def test_create_subnet_check_mtu_in_mech_context(self): plugin = directory.get_plugin() plugin.mechanism_manager.create_subnet_precommit = mock.Mock() net_arg = {pnet.NETWORK_TYPE: 'geneve', pnet.SEGMENTATION_ID: '1'} network = self._make_network(self.fmt, 'net1', True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg) with self.subnet(network=network): mock_subnet_pre = plugin.mechanism_manager.create_subnet_precommit observerd_mech_context = mock_subnet_pre.call_args_list[0][0][0] self.assertEqual(network['network']['mtu'], observerd_mech_context.network.current['mtu']) class TestOVNMechanismDriverPortsV2(test_plugin.TestMl2PortsV2, OVNMechanismDriverTestCase): def setUp(self): # Disable metadata so that we don't interfere with existing tests # in Neutron tree. Doing this because some of the tests assume that # first IP address in a subnet will be available and this is not true # with metadata since it will book an IP address on each subnet. ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, group='ovn') super(TestOVNMechanismDriverPortsV2, self).setUp() # NOTE(rtheis): Override this test to verify that updating # a port MAC fails when the port is bound. def test_update_port_mac(self): self.check_update_port_mac( host_arg={portbindings.HOST_ID: 'fake-host'}, arg_list=(portbindings.HOST_ID,), expected_status=exc.HTTPConflict.code, expected_error='PortBound') class TestOVNMechanismDriverAllowedAddressPairs( test_plugin.TestMl2AllowedAddressPairs, OVNMechanismDriverTestCase): pass class TestOVNMechanismDriverPortSecurity( test_ext_portsecurity.PSExtDriverTestCase, OVNMechanismDriverTestCase): pass class TestOVNMechanismDriverSegment(test_segment.HostSegmentMappingTestCase): _mechanism_drivers = ['logger', 'ovn'] def setUp(self): super(TestOVNMechanismDriverSegment, self).setUp() mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj nb_ovn = fakes.FakeOvsdbNbOvnIdl() sb_ovn = fakes.FakeOvsdbSbOvnIdl() self.mech_driver._nb_ovn = nb_ovn self.mech_driver._sb_ovn = sb_ovn p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) p.start() self.addCleanup(p.stop) def _test_segment_host_mapping(self): # Disable the callback to update SegmentHostMapping by default, so # that update_segment_host_mapping is the only path to add the mapping registry.unsubscribe( self.mech_driver._add_segment_host_mapping_for_segment, resources.SEGMENT, events.AFTER_CREATE) host = 'hostname' with self.network() as network: network = network['network'] segment1 = self._test_create_segment( network_id=network['id'], physical_network='phys_net1', segmentation_id=200, network_type='vlan')['segment'] # As geneve networks mtu shouldn't be more than 1450, update it data = {'network': {'mtu': 1450}} req = self.new_update_request('networks', data, network['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(1450, res['network']['mtu']) self._test_create_segment( network_id=network['id'], segmentation_id=200, network_type='geneve') self.mech_driver.update_segment_host_mapping(host, ['phys_net1']) segments_host_db = self._get_segments_for_host(host) self.assertEqual({segment1['id']}, set(segments_host_db)) return network['id'], host def test_update_segment_host_mapping(self): network_id, host = self._test_segment_host_mapping() # Update the mapping segment2 = self._test_create_segment( network_id=network_id, physical_network='phys_net2', segmentation_id=201, network_type='vlan')['segment'] self.mech_driver.update_segment_host_mapping(host, ['phys_net2']) segments_host_db = self._get_segments_for_host(host) self.assertEqual({segment2['id']}, set(segments_host_db)) def test_clear_segment_host_mapping(self): _, host = self._test_segment_host_mapping() # Clear the mapping self.mech_driver.update_segment_host_mapping(host, []) segments_host_db = self._get_segments_for_host(host) self.assertEqual({}, segments_host_db) def test_update_segment_host_mapping_with_new_segment(self): hostname_with_physnets = {'hostname1': ['phys_net1', 'phys_net2'], 'hostname2': ['phys_net1']} ovn_sb_api = self.mech_driver._sb_ovn ovn_sb_api.get_chassis_hostname_and_physnets.return_value = ( hostname_with_physnets) self.mech_driver.subscribe() with self.network() as network: network_id = network['network']['id'] segment = self._test_create_segment( network_id=network_id, physical_network='phys_net2', segmentation_id=201, network_type='vlan')['segment'] segments_host_db1 = self._get_segments_for_host('hostname1') # A new SegmentHostMapping should be created for hostname1 self.assertEqual({segment['id']}, set(segments_host_db1)) segments_host_db2 = self._get_segments_for_host('hostname2') self.assertFalse(set(segments_host_db2)) @mock.patch.object(n_net, 'get_random_mac', lambda *_: '01:02:03:04:05:06') class TestOVNMechanismDriverDHCPOptions(OVNMechanismDriverTestCase): def _test_get_ovn_dhcp_options_helper(self, subnet, network, expected_dhcp_options, service_mac=None): dhcp_options = self.mech_driver._ovn_client._get_ovn_dhcp_options( subnet, network, service_mac) self.assertEqual(expected_dhcp_options, dhcp_options) def test_get_ovn_dhcp_options(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': [{'destination': '20.0.0.4', 'nexthop': '10.0.0.100'}]} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} expected_dhcp_options['options'] = { 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1400', 'router': subnet['gateway_ip'], 'dns_server': '{7.7.7.7, 8.8.8.8}', 'classless_static_route': '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' } self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def test_get_ovn_dhcp_options_dhcp_disabled(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': False, 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': [{'destination': '20.0.0.4', 'nexthop': '10.0.0.100'}]} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'options': {}} self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) def test_get_ovn_dhcp_options_no_gw_ip(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'gateway_ip': None, 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': [{'destination': '20.0.0.4', 'nexthop': '10.0.0.100'}]} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'options': {}} self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) def test_get_ovn_dhcp_options_no_gw_ip_but_metadata_ip(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'dns_nameservers': [], 'host_routes': [], 'gateway_ip': None} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, 'options': {'server_id': '10.0.0.2', 'server_mac': '01:02:03:04:05:06', 'dns_server': '{8.8.8.8}', 'lease_time': str(12 * 60 * 60), 'mtu': '1400', 'classless_static_route': '{169.254.169.254/32,10.0.0.2}'}} with mock.patch.object(self.mech_driver._ovn_client, '_find_metadata_port_ip', return_value='10.0.0.2'): self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) def test_get_ovn_dhcp_options_with_global_options(self): ovn_conf.cfg.CONF.set_override('ovn_dhcp4_global_options', 'ntp_server:8.8.8.8,' 'mtu:9000,' 'wpad:', group='ovn') subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': [{'destination': '20.0.0.4', 'nexthop': '10.0.0.100'}]} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} expected_dhcp_options['options'] = { 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1400', 'router': subnet['gateway_ip'], 'ntp_server': '8.8.8.8', 'dns_server': '{7.7.7.7, 8.8.8.8}', 'classless_static_route': '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' } self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def test_get_ovn_dhcp_options_with_global_options_ipv6(self): ovn_conf.cfg.CONF.set_override('ovn_dhcp6_global_options', 'ntp_server:8.8.8.8,' 'server_id:01:02:03:04:05:04,' 'wpad:', group='ovn') subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': 'ae70::/24', 'ip_version': 6, 'enable_dhcp': True, 'dns_nameservers': ['7.7.7.7', '8.8.8.8']} network = {'id': 'network-id', 'mtu': 1400} ext_ids = {'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} expected_dhcp_options = { 'cidr': 'ae70::/24', 'external_ids': ext_ids, 'options': {'server_id': '01:02:03:04:05:06', 'ntp_server': '8.8.8.8', 'dns_server': '{7.7.7.7, 8.8.8.8}'}} self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def test_get_ovn_dhcp_options_ipv6_subnet(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': 'ae70::/24', 'ip_version': 6, 'enable_dhcp': True, 'dns_nameservers': ['7.7.7.7', '8.8.8.8']} network = {'id': 'network-id', 'mtu': 1400} ext_ids = {'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} expected_dhcp_options = { 'cidr': 'ae70::/24', 'external_ids': ext_ids, 'options': {'server_id': '01:02:03:04:05:06', 'dns_server': '{7.7.7.7, 8.8.8.8}'}} self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def test_get_ovn_dhcp_options_dhcpv6_stateless_subnet(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': 'ae70::/24', 'ip_version': 6, 'enable_dhcp': True, 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'ipv6_address_mode': const.DHCPV6_STATELESS} network = {'id': 'network-id', 'mtu': 1400} ext_ids = {'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} expected_dhcp_options = { 'cidr': 'ae70::/24', 'external_ids': ext_ids, 'options': {'server_id': '01:02:03:04:05:06', 'dns_server': '{7.7.7.7, 8.8.8.8}', 'dhcpv6_stateless': 'true'}} self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def test_get_ovn_dhcp_options_metadata_route(self): subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': []} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} expected_dhcp_options['options'] = { 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1400', 'router': subnet['gateway_ip'], 'dns_server': '{7.7.7.7, 8.8.8.8}', 'classless_static_route': '{169.254.169.254/32,10.0.0.2, 0.0.0.0/0,10.0.0.1}' } with mock.patch.object(self.mech_driver._ovn_client, '_find_metadata_port_ip', return_value='10.0.0.2'): self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) def test_get_ovn_dhcp_options_domain_name(self): cfg.CONF.set_override('dns_domain', 'foo.com') subnet = {'id': 'foo-subnet', 'network_id': 'network-id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'enable_dhcp': True, 'gateway_ip': '10.0.0.1', 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], 'host_routes': [{'destination': '20.0.0.4', 'nexthop': '10.0.0.100'}]} network = {'id': 'network-id', 'mtu': 1400} expected_dhcp_options = {'cidr': '10.0.0.0/24', 'external_ids': { 'subnet_id': 'foo-subnet', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} expected_dhcp_options['options'] = { 'server_id': subnet['gateway_ip'], 'server_mac': '01:02:03:04:05:06', 'lease_time': str(12 * 60 * 60), 'mtu': '1400', 'router': subnet['gateway_ip'], 'domain_name': '"foo.com"', 'dns_server': '{7.7.7.7, 8.8.8.8}', 'classless_static_route': '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' } self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options) expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' self._test_get_ovn_dhcp_options_helper(subnet, network, expected_dhcp_options, service_mac='11:22:33:44:55:66') def _test__get_port_dhcp_options_port_dhcp_opts_set(self, ip_version=4): if ip_version == 4: ip_address = '10.0.0.11' else: ip_address = 'aef0::4' port = { 'id': 'foo-port', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'foo-subnet', 'ip_address': ip_address}]} if ip_version == 4: port['extra_dhcp_opts'] = [ {'ip_version': 4, 'opt_name': 'mtu', 'opt_value': '1200'}, {'ip_version': 4, 'opt_name': 'ntp-server', 'opt_value': '8.8.8.8'}] else: port['extra_dhcp_opts'] = [ {'ip_version': 6, 'opt_name': 'domain-search', 'opt_value': 'foo-domain'}, {'ip_version': 4, 'opt_name': 'dns-server', 'opt_value': '7.7.7.7'}] self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( mock.Mock( return_value=({ 'cidr': '10.0.0.0/24' if ip_version == 4 else 'aef0::/64', 'external_ids': {'subnet_id': 'foo-subnet'}, 'options': (ip_version == 4) and { 'router': '10.0.0.1', 'mtu': '1400'} or { 'server_id': '01:02:03:04:05:06'}, 'uuid': 'foo-uuid'}))) if ip_version == 4: expected_dhcp_options = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': 'foo-subnet', 'port_id': 'foo-port'}, 'options': {'router': '10.0.0.1', 'mtu': '1200', 'ntp_server': '8.8.8.8'}} else: expected_dhcp_options = { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': 'foo-subnet', 'port_id': 'foo-port'}, 'options': {'server_id': '01:02:03:04:05:06', 'domain_search': 'foo-domain'}} self.mech_driver._nb_ovn.add_dhcp_options.return_value = 'foo-val' dhcp_options = self.mech_driver._ovn_client._get_port_dhcp_options( port, ip_version) self.assertEqual({'cmd': 'foo-val'}, dhcp_options) self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( 'foo-subnet', port_id='foo-port', **expected_dhcp_options) def test__get_port_dhcp_options_port_dhcp_opts_set_v4(self): self._test__get_port_dhcp_options_port_dhcp_opts_set(ip_version=4) def test__get_port_dhcp_options_port_dhcp_opts_set_v6(self): self._test__get_port_dhcp_options_port_dhcp_opts_set(ip_version=6) def _test__get_port_dhcp_options_port_dhcp_opts_not_set(self, ip_version=4): if ip_version == 4: port = {'id': 'foo-port', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'foo-subnet', 'ip_address': '10.0.0.11'}]} else: port = {'id': 'foo-port', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'foo-subnet', 'ip_address': 'aef0::4'}]} if ip_version == 4: expected_dhcp_opts = { 'cidr': '10.0.0.0/24', 'external_ids': {'subnet_id': 'foo-subnet'}, 'options': {'router': '10.0.0.1', 'mtu': '1400'}} else: expected_dhcp_opts = { 'cidr': 'aef0::/64', 'external_ids': {'subnet_id': 'foo-subnet'}, 'options': {'server_id': '01:02:03:04:05:06'}} self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( mock.Mock(return_value=expected_dhcp_opts)) self.assertEqual( expected_dhcp_opts, self.mech_driver._ovn_client._get_port_dhcp_options( port, ip_version=ip_version)) # Since the port has no extra DHCPv4/v6 options defined, no new # DHCP_Options row should be created and logical switch port DHCPv4/v6 # options should point to the subnet DHCPv4/v6 options. self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() def test__get_port_dhcp_options_port_dhcp_opts_not_set_v4(self): self._test__get_port_dhcp_options_port_dhcp_opts_not_set(ip_version=4) def test__get_port_dhcp_options_port_dhcp_opts_not_set_v6(self): self._test__get_port_dhcp_options_port_dhcp_opts_not_set(ip_version=6) def _test__get_port_dhcp_options_port_dhcp_disabled(self, ip_version=4): port = { 'id': 'foo-port', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'foo-subnet', 'ip_address': '10.0.0.11'}, {'subnet_id': 'foo-subnet-v6', 'ip_address': 'aef0::11'}], 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'dhcp_disabled', 'opt_value': 'False'}, {'ip_version': 6, 'opt_name': 'dhcp_disabled', 'opt_value': 'False'}] } subnet_dhcp_opts = mock.Mock() self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( mock.Mock(return_value=subnet_dhcp_opts)) # No dhcp_disabled set to true, subnet dhcp options will be get for # this port. Since it doesn't have any other extra dhcp options, but # dhcp_disabled, no port dhcp options will be created. self.assertEqual( subnet_dhcp_opts, self.mech_driver._ovn_client._get_port_dhcp_options( port, ip_version)) self.assertEqual( 1, self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. call_count) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() # Set dhcp_disabled with ip_version specified by this test case to # true, no dhcp options will be get since it's dhcp_disabled now for # ip_version be tested. opt_index = 0 if ip_version == 4 else 1 port['extra_dhcp_opts'][opt_index]['opt_value'] = 'True' self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port.\ reset_mock() self.assertIsNone( self.mech_driver._ovn_client._get_port_dhcp_options( port, ip_version)) self.assertEqual( 0, self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. call_count) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() # Set dhcp_disabled with ip_version specified by this test case to # false, and set dhcp_disabled with ip_version not in test to true. # Subnet dhcp options will be get, since dhcp_disabled with ip_version # not in test should not affect. opt_index_1 = 1 if ip_version == 4 else 0 port['extra_dhcp_opts'][opt_index]['opt_value'] = 'False' port['extra_dhcp_opts'][opt_index_1]['opt_value'] = 'True' self.assertEqual( subnet_dhcp_opts, self.mech_driver._ovn_client._get_port_dhcp_options( port, ip_version)) self.assertEqual( 1, self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. call_count) self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() def test__get_port_dhcp_options_port_dhcp_disabled_v4(self): self._test__get_port_dhcp_options_port_dhcp_disabled(ip_version=4) def test__get_port_dhcp_options_port_dhcp_disabled_v6(self): self._test__get_port_dhcp_options_port_dhcp_disabled(ip_version=6) def test__get_port_dhcp_options_port_with_invalid_device_owner(self): port = {'id': 'foo-port', 'device_owner': 'neutron:router_interface', 'fixed_ips': ['fake']} self.assertIsNone( self.mech_driver._ovn_client._get_port_dhcp_options( port, mock.ANY)) def _test__get_subnet_dhcp_options_for_port(self, ip_version=4, enable_dhcp=True): port = {'fixed_ips': [ {'ip_address': '10.0.0.4', 'subnet_id': 'v4_snet_id_1' if enable_dhcp else 'v4_snet_id_2'}, {'ip_address': '2001:dba::4', 'subnet_id': 'v6_snet_id_1' if enable_dhcp else 'v6_snet_id_2'}, {'ip_address': '2001:dbb::4', 'subnet_id': 'v6_snet_id_3'}]} def fake(subnets): fake_rows = { 'v4_snet_id_1': 'foo', 'v6_snet_id_1': {'options': {}}, 'v6_snet_id_3': {'options': { ovn_const.DHCPV6_STATELESS_OPT: 'true'}}} return [fake_rows[row] for row in fake_rows if row in subnets] self.mech_driver._nb_ovn.get_subnets_dhcp_options.side_effect = fake if ip_version == 4: expected_opts = 'foo' if enable_dhcp else None else: expected_opts = { 'options': {} if enable_dhcp else { ovn_const.DHCPV6_STATELESS_OPT: 'true'}} self.assertEqual( expected_opts, self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port( port, ip_version)) def test__get_subnet_dhcp_options_for_port_v4(self): self._test__get_subnet_dhcp_options_for_port() def test__get_subnet_dhcp_options_for_port_v4_dhcp_disabled(self): self._test__get_subnet_dhcp_options_for_port(enable_dhcp=False) def test__get_subnet_dhcp_options_for_port_v6(self): self._test__get_subnet_dhcp_options_for_port(ip_version=6) def test__get_subnet_dhcp_options_for_port_v6_dhcp_disabled(self): self._test__get_subnet_dhcp_options_for_port(ip_version=6, enable_dhcp=False) class TestOVNMechanismDriverSecurityGroup( test_security_group.Ml2SecurityGroupsTestCase): # This set of test cases is supplement to test_acl.py, the purpose is to # test acl methods invoking. Content correctness of args of acl methods # is mainly guaranteed by acl_test.py. def setUp(self): cfg.CONF.set_override('mechanism_drivers', ['logger', 'ovn'], 'ml2') cfg.CONF.set_override('dns_servers', ['8.8.8.8'], group='ovn') super(TestOVNMechanismDriverSecurityGroup, self).setUp() mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj nb_ovn = fakes.FakeOvsdbNbOvnIdl() sb_ovn = fakes.FakeOvsdbSbOvnIdl() self.mech_driver._nb_ovn = nb_ovn self.mech_driver._sb_ovn = sb_ovn self.ctx = context.get_admin_context() revision_plugin.RevisionPlugin() def _delete_default_sg_rules(self, security_group_id): res = self._list( 'security-group-rules', query_params='security_group_id=%s' % security_group_id) for r in res['security_group_rules']: self._delete('security-group-rules', r['id']) def _create_sg(self, sg_name): sg = self._make_security_group(self.fmt, sg_name, '') return sg['security_group'] def _create_empty_sg(self, sg_name): sg = self._create_sg(sg_name) self._delete_default_sg_rules(sg['id']) return sg def _create_sg_rule(self, sg_id, direction, proto, port_range_min=None, port_range_max=None, remote_ip_prefix=None, remote_group_id=None, ethertype=const.IPv4): r = self._build_security_group_rule(sg_id, direction, proto, port_range_min=port_range_min, port_range_max=port_range_max, remote_ip_prefix=remote_ip_prefix, remote_group_id=remote_group_id, ethertype=ethertype) res = self._create_security_group_rule(self.fmt, r) rule = self.deserialize(self.fmt, res) return rule['security_group_rule'] def _delete_sg_rule(self, rule_id): self._delete('security-group-rules', rule_id) def test_create_security_group_with_port_group(self): self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True sg = self._create_sg('sg') expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) expected_pg_add_calls = [ mock.call(acls=[], external_ids={'neutron:security_group_id': sg['id']}, name=expected_pg_name), ] self.mech_driver._nb_ovn.pg_add.assert_has_calls( expected_pg_add_calls) def test_delete_security_group_with_port_group(self): self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True sg = self._create_sg('sg') self._delete('security-groups', sg['id']) expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) expected_pg_del_calls = [ mock.call(name=expected_pg_name), ] self.mech_driver._nb_ovn.pg_del.assert_has_calls( expected_pg_del_calls) def test_create_port_with_port_group(self): self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True with self.network() as n, self.subnet(n): sg = self._create_empty_sg('sg') self._make_port(self.fmt, n['network']['id'], security_groups=[sg['id']]) # Assert the port has been added to the right security groups expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) expected_pg_add_ports_calls = [ mock.call('neutron_pg_drop', mock.ANY), mock.call(expected_pg_name, mock.ANY) ] self.mech_driver._nb_ovn.pg_add_ports.assert_has_calls( expected_pg_add_ports_calls) # Assert add_acl() is not used anymore self.assertFalse(self.mech_driver._nb_ovn.add_acl.called) def test_create_port_with_sg_default_rules(self): with self.network() as n, self.subnet(n): sg = self._create_sg('sg') self._make_port(self.fmt, n['network']['id'], security_groups=[sg['id']]) # One DHCP rule, one IPv6 rule, one IPv4 rule and # two default dropping rules. self.assertEqual( 5, self.mech_driver._nb_ovn.add_acl.call_count) def test_create_port_with_empty_sg(self): with self.network() as n, self.subnet(n): sg = self._create_empty_sg('sg') self._make_port(self.fmt, n['network']['id'], security_groups=[sg['id']]) # One DHCP rule and two default dropping rules. self.assertEqual( 3, self.mech_driver._nb_ovn.add_acl.call_count) def test_create_port_with_multi_sgs(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') sg2 = self._create_empty_sg('sg2') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, port_range_min=22, port_range_max=23) self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_UDP, remote_ip_prefix='0.0.0.0/0') self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id'], sg2['id']]) # One DHCP rule, one TCP rule, one UDP rule and # two default dropping rules. self.assertEqual( 5, self.mech_driver._nb_ovn.add_acl.call_count) def test_create_port_with_multi_sgs_duplicate_rules(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') sg2 = self._create_empty_sg('sg2') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, port_range_min=22, port_range_max=23, remote_ip_prefix='20.0.0.0/24') self._create_sg_rule(sg2['id'], 'ingress', const.PROTO_NAME_TCP, port_range_min=22, port_range_max=23, remote_ip_prefix='20.0.0.0/24') self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id'], sg2['id']]) # One DHCP rule, two TCP rule and two default dropping rules. self.assertEqual( 5, self.mech_driver._nb_ovn.add_acl.call_count) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient.is_external_ports_supported', lambda *_: True) def test_create_port_with_vnic_direct(self): fake_grp = 'fake-default-ha-group-uuid' row = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={'uuid': fake_grp}) self.mech_driver._nb_ovn.ha_chassis_group_get.return_value.\ execute.return_value = row with self.network() as n, self.subnet(n): self._create_port( self.fmt, n['network']['id'], arg_list=(portbindings.VNIC_TYPE,), **{portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}) # Assert create_lswitch_port was called with the relevant # parameters _, kwargs = self.mech_driver._nb_ovn.create_lswitch_port.call_args self.assertEqual( 1, self.mech_driver._nb_ovn.create_lswitch_port.call_count) self.assertEqual(ovn_const.LSP_TYPE_EXTERNAL, kwargs['type']) self.assertEqual(fake_grp, kwargs['ha_chassis_group']) def test_update_port_with_sgs(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, ethertype=const.IPv6) p = self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id']])['port'] # One DHCP rule, one TCP rule and two default dropping rules. self.assertEqual( 4, self.mech_driver._nb_ovn.add_acl.call_count) sg2 = self._create_empty_sg('sg2') self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_UDP, remote_ip_prefix='30.0.0.0/24') data = {'port': {'security_groups': [sg1['id'], sg2['id']]}} req = self.new_update_request('ports', data, p['id']) req.get_response(self.api) self.assertEqual( 1, self.mech_driver._nb_ovn.update_acls.call_count) def test_update_sg_change_rule(self): with self.network() as n, self.subnet(n): sg = self._create_empty_sg('sg') self._make_port(self.fmt, n['network']['id'], security_groups=[sg['id']]) # One DHCP rule and two default dropping rules. self.assertEqual( 3, self.mech_driver._nb_ovn.add_acl.call_count) sg_r = self._create_sg_rule(sg['id'], 'ingress', const.PROTO_NAME_UDP, ethertype=const.IPv6) self.assertEqual( 1, self.mech_driver._nb_ovn.update_acls.call_count) self._delete_sg_rule(sg_r['id']) self.assertEqual( 2, self.mech_driver._nb_ovn.update_acls.call_count) def test_update_sg_change_rule_unrelated_port(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') sg2 = self._create_empty_sg('sg2') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, remote_group_id=sg2['id']) self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id']]) # One DHCP rule, one TCP rule and two default dropping rules. self.assertEqual( 4, self.mech_driver._nb_ovn.add_acl.call_count) sg2_r = self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_UDP) self.mech_driver._nb_ovn.update_acls.assert_not_called() self._delete_sg_rule(sg2_r['id']) self.mech_driver._nb_ovn.update_acls.assert_not_called() def test_update_sg_duplicate_rule(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') sg2 = self._create_empty_sg('sg2') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_UDP, port_range_min=22, port_range_max=23) self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id'], sg2['id']]) # One DHCP rule, one UDP rule and two default dropping rules. self.assertEqual( 4, self.mech_driver._nb_ovn.add_acl.call_count) # Add a new duplicate rule to sg2. It's expected to be added. sg2_r = self._create_sg_rule(sg2['id'], 'ingress', const.PROTO_NAME_UDP, port_range_min=22, port_range_max=23) self.assertEqual( 1, self.mech_driver._nb_ovn.update_acls.call_count) # Delete the duplicate rule. It's expected to be deleted. self._delete_sg_rule(sg2_r['id']) self.assertEqual( 2, self.mech_driver._nb_ovn.update_acls.call_count) def test_update_sg_duplicate_rule_multi_ports(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') sg2 = self._create_empty_sg('sg2') sg3 = self._create_empty_sg('sg3') self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_UDP, remote_group_id=sg3['id']) self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_TCP, port_range_min=60, port_range_max=70) self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id'], sg2['id']]) self._make_port(self.fmt, n['network']['id'], security_groups=[sg1['id'], sg2['id']]) self._make_port(self.fmt, n['network']['id'], security_groups=[sg2['id'], sg3['id']]) # Rules include 5 + 5 + 4 self.assertEqual( 14, self.mech_driver._nb_ovn.add_acl.call_count) # Add a rule to sg1 duplicate with sg2. It's expected to be added. sg1_r = self._create_sg_rule(sg1['id'], 'egress', const.PROTO_NAME_TCP, port_range_min=60, port_range_max=70) self.assertEqual( 1, self.mech_driver._nb_ovn.update_acls.call_count) # Add a rule to sg2 duplicate with sg1 but not duplicate with sg3. # It's expected to be added as well. sg2_r = self._create_sg_rule(sg2['id'], 'ingress', const.PROTO_NAME_UDP, remote_group_id=sg3['id']) self.assertEqual( 2, self.mech_driver._nb_ovn.update_acls.call_count) # Delete the duplicate rule in sg1. It's expected to be deleted. self._delete_sg_rule(sg1_r['id']) self.assertEqual( 3, self.mech_driver._nb_ovn.update_acls.call_count) # Delete the duplicate rule in sg2. It's expected to be deleted. self._delete_sg_rule(sg2_r['id']) self.assertEqual( 4, self.mech_driver._nb_ovn.update_acls.call_count) class TestOVNMechanismDriverMetadataPort(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'ovn'] def setUp(self): super(TestOVNMechanismDriverMetadataPort, self).setUp() mm = directory.get_plugin().mechanism_manager self.mech_driver = mm.mech_drivers['ovn'].obj self.mech_driver._nb_ovn = fakes.FakeOvsdbNbOvnIdl() self.mech_driver._sb_ovn = fakes.FakeOvsdbSbOvnIdl() self.nb_ovn = self.mech_driver._nb_ovn self.sb_ovn = self.mech_driver._sb_ovn self.ctx = context.get_admin_context() ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', True, group='ovn') p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) p.start() self.addCleanup(p.stop) def _create_fake_dhcp_port(self, device_id): return {'network_id': 'fake', 'device_owner': const.DEVICE_OWNER_DHCP, 'device_id': device_id} @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') def test__find_metadata_port(self, mock_get_ports): ports = [ self._create_fake_dhcp_port('dhcp-0'), self._create_fake_dhcp_port('dhcp-1'), self._create_fake_dhcp_port(const.DEVICE_ID_RESERVED_DHCP_PORT), self._create_fake_dhcp_port('ovnmeta-0')] mock_get_ports.return_value = ports md_port = self.mech_driver._ovn_client._find_metadata_port( self.ctx, 'fake-net-id') self.assertEqual('ovnmeta-0', md_port['device_id']) def test_metadata_port_on_network_create(self): """Check metadata port create. Check that a localport is created when a neutron network is created. """ with self.network(): self.assertEqual(1, self.nb_ovn.create_lswitch_port.call_count) args, kwargs = self.nb_ovn.create_lswitch_port.call_args self.assertEqual('localport', kwargs['type']) def test_metadata_port_not_created_if_exists(self): """Check that metadata port is not created if it already exists. In the event of a sync, it might happen that a metadata port exists already. When we are creating the logical switch in OVN we don't want this port to be created again. """ with mock.patch.object( self.mech_driver._ovn_client, '_find_metadata_port', return_value={'port': {'id': 'metadata_port1'}}): with self.network(): self.assertEqual(0, self.nb_ovn.create_lswitch_port.call_count) def test_metadata_ip_on_subnet_create(self): """Check metadata port update. Check that the metadata port is updated with a new IP address when a subnet is created. """ with self.network(set_context=True, tenant_id='test') as net1: with self.subnet(network=net1, cidr='10.0.0.0/24') as subnet1: # Create a network:dhcp owner port just as how Neutron DHCP # agent would do. with self.port(subnet=subnet1, device_owner=const.DEVICE_OWNER_DHCP, device_id='dhcpxxxx', set_context=True, tenant_id='test'): with self.subnet(network=net1, cidr='20.0.0.0/24'): self.assertEqual( 2, self.nb_ovn.set_lswitch_port.call_count) args, kwargs = self.nb_ovn.set_lswitch_port.call_args self.assertEqual('localport', kwargs['type']) self.assertEqual('10.0.0.2/24 20.0.0.2/24', kwargs['external_ids'].get( ovn_const.OVN_CIDRS_EXT_ID_KEY, '')) def test_metadata_port_on_network_delete(self): """Check metadata port delete. Check that the metadata port is deleted when a network is deleted. """ net = self._make_network(self.fmt, name="net1", admin_state_up=True) network_id = net['network']['id'] req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) self.assertEqual(1, self.nb_ovn.delete_lswitch_port.call_count) class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase): def test_create_port_with_invalid_parent(self): binding = {OVN_PROFILE: {"parent_name": 'invalid', 'tag': 1}} with self.network() as n: with self.subnet(n): self._create_port( self.fmt, n['network']['id'], expected_res_status=404, arg_list=(OVN_PROFILE,), **binding) @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port') def test_create_port_with_parent_and_tag(self, mock_get_port): binding = {OVN_PROFILE: {"parent_name": '', 'tag': 1}} with self.network() as n: with self.subnet(n) as s: with self.port(s) as p: binding[OVN_PROFILE]['parent_name'] = p['port']['id'] res = self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), **binding) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][OVN_PROFILE], binding[OVN_PROFILE]) mock_get_port.assert_called_with(mock.ANY, p['port']['id']) def test_create_port_with_invalid_tag(self): binding = {OVN_PROFILE: {"parent_name": '', 'tag': 'a'}} with self.network() as n: with self.subnet(n) as s: with self.port(s) as p: binding[OVN_PROFILE]['parent_name'] = p['port']['id'] self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), expected_res_status=400, **binding) class TestOVNVtepPortBinding(OVNMechanismDriverTestCase): def test_create_port_with_vtep_options(self): binding = {OVN_PROFILE: {"vtep-physical-switch": 'psw1', "vtep-logical-switch": 'lsw1'}} with self.network() as n: with self.subnet(n): res = self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), **binding) port = self.deserialize(self.fmt, res) self.assertEqual(binding[OVN_PROFILE], port['port'][OVN_PROFILE]) def test_create_port_with_only_vtep_physical_switch(self): binding = {OVN_PROFILE: {"vtep-physical-switch": 'psw'}} with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), expected_res_status=400, **binding) def test_create_port_with_only_vtep_logical_switch(self): binding = {OVN_PROFILE: {"vtep-logical-switch": 'lsw1'}} with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), expected_res_status=400, **binding) def test_create_port_with_invalid_vtep_logical_switch(self): binding = {OVN_PROFILE: {"vtep-logical-switch": 1234, "vtep-physical-switch": "psw1"}} with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), expected_res_status=400, **binding) def test_create_port_with_vtep_options_and_parent_name_tag(self): binding = {OVN_PROFILE: {"vtep-logical-switch": "lsw1", "vtep-physical-switch": "psw1", "parent_name": "pname", "tag": 22}} with self.network() as n: with self.subnet(n): self._create_port(self.fmt, n['network']['id'], arg_list=(OVN_PROFILE,), expected_res_status=400, **binding) def test_create_port_with_vtep_options_and_check_vtep_keys(self): port = { 'id': 'foo-port', 'device_owner': 'compute:None', 'fixed_ips': [{'subnet_id': 'foo-subnet', 'ip_address': '10.0.0.11'}], OVN_PROFILE: {"vtep-logical-switch": "lsw1", "vtep-physical-switch": "psw1"} } ovn_port_info = ( self.mech_driver._ovn_client._get_port_options(port)) self.assertEqual(port[OVN_PROFILE]["vtep-physical-switch"], ovn_port_info.options["vtep-physical-switch"]) self.assertEqual(port[OVN_PROFILE]["vtep-logical-switch"], ovn_port_info.options["vtep-logical-switch"]) @mock.patch.object(ovn_client.OVNClient, '_is_virtual_port_supported', lambda *args: True) class TestOVNVVirtualPort(OVNMechanismDriverTestCase): def setUp(self): super(TestOVNVVirtualPort, self).setUp() self.context = context.get_admin_context() self.nb_idl = self.mech_driver._ovn_client._nb_idl self.net = self._make_network( self.fmt, name='net1', admin_state_up=True)['network'] self.subnet = self._make_subnet( self.fmt, {'network': self.net}, '10.0.0.1', '10.0.0.0/24')['subnet'] @mock.patch.object(ovn_client.OVNClient, 'get_virtual_port_parents') def test_create_port_with_virtual_type_and_options(self, mock_get_parents): fake_parents = ['parent-0', 'parent-1'] mock_get_parents.return_value = fake_parents port = {'id': 'virt-port', 'mac_address': '00:00:00:00:00:00', 'device_owner': '', 'network_id': self.net['id'], 'fixed_ips': [{'subnet_id': self.subnet['id'], 'ip_address': '10.0.0.55'}]} port_info = self.mech_driver._ovn_client._get_port_options( port) self.assertEqual(ovn_const.LSP_TYPE_VIRTUAL, port_info.type) self.assertEqual( '10.0.0.55', port_info.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) self.assertIn( 'parent-0', port_info.options[ ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) self.assertIn( 'parent-1', port_info.options[ ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') def _test_set_unset_virtual_port_type(self, mock_get_ports, unset=False): cmd = self.nb_idl.set_lswitch_port_to_virtual_type if unset: cmd = self.nb_idl.unset_lswitch_port_to_virtual_type fake_txn = mock.Mock() parent_port = {'id': 'parent-port', 'network_id': 'fake-network'} port = {'id': 'virt-port'} mock_get_ports.return_value = [port] self.mech_driver._ovn_client._set_unset_virtual_port_type( self.context, fake_txn, parent_port, ['10.0.0.55'], unset=unset) args = {'lport_name': 'virt-port', 'virtual_parent': 'parent-port', 'if_exists': True} if not unset: args['vip'] = '10.0.0.55' cmd.assert_called_once_with(**args) def test__set_unset_virtual_port_type_set(self): self._test_set_unset_virtual_port_type(unset=False) def test__set_unset_virtual_port_type_unset(self): self._test_set_unset_virtual_port_type(unset=True) def test_delete_virtual_port_parent(self): self.nb_idl.ls_get.return_value.execute.return_value = ( fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={'ports': []})) virt_port = self._make_port(self.fmt, self.net['id'])['port'] virt_ip = virt_port['fixed_ips'][0]['ip_address'] parent = self._make_port( self.fmt, self.net['id'], allowed_address_pairs=[{'ip_address': virt_ip}])['port'] fake_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': virt_port['id'], 'type': ovn_const.LSP_TYPE_VIRTUAL, 'options': {ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY: parent['id']}}) self.nb_idl.ls_get.return_value.execute.return_value = ( mock.Mock(ports=[fake_row])) self.mech_driver._ovn_client.delete_port(self.context, parent['id']) self.nb_idl.unset_lswitch_port_to_virtual_type.assert_called_once_with( virt_port['id'], parent['id'], if_exists=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_helpers.py0000644000175000017500000001552400000000000027671 0ustar00coreycorey00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_db import exception as exc from sqlalchemy.orm import query from neutron.plugins.ml2.drivers import type_vlan from neutron.tests.unit import testlib_api TENANT_NET = 'phys_net2' VLAN_MIN = 200 VLAN_MAX = 209 VLAN_OUTSIDE = 100 NETWORK_VLAN_RANGES = { TENANT_NET: [(VLAN_MIN, VLAN_MAX)], } NETWORK_VLAN_RANGES_CFG_ENTRIES = [TENANT_NET, "%s:%s:%s" % (TENANT_NET, VLAN_MIN, VLAN_MAX)] SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.' 'NetworkSegmentRangePlugin') class HelpersTest(testlib_api.SqlTestCase): def setUp(self): super(HelpersTest, self).setUp() self.driver = type_vlan.VlanTypeDriver() self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES self.driver._sync_vlan_allocations() self.context = context.get_admin_context() def check_raw_segment(self, expected, observed): for key, value in expected.items(): self.assertEqual(value, observed[key]) def test_primary_keys(self): self.assertEqual(set(['physical_network', 'vlan_id']), self.driver.primary_keys) def test_allocate_specific_unallocated_segment_in_pools(self): expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) observed = self.driver.allocate_fully_specified_segment(self.context, **expected) self.check_raw_segment(expected, observed) def test_allocate_specific_allocated_segment_in_pools(self): raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) self.driver.allocate_fully_specified_segment(self.context, **raw_segment) observed = self.driver.allocate_fully_specified_segment(self.context, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_finally_allocated_segment_in_pools(self): # Test case: allocate a specific unallocated segment in pools but # the segment is allocated concurrently between select and update raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) with mock.patch.object(query.Query, 'update', return_value=0): observed = self.driver.allocate_fully_specified_segment( self.context, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_unallocated_segment_outside_pools(self): expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE) observed = self.driver.allocate_fully_specified_segment(self.context, **expected) self.check_raw_segment(expected, observed) def test_allocate_specific_allocated_segment_outside_pools(self): raw_segment = dict(physical_network=TENANT_NET, vlan_id=VLAN_OUTSIDE) self.driver.allocate_fully_specified_segment(self.context, **raw_segment) observed = self.driver.allocate_fully_specified_segment(self.context, **raw_segment) self.assertIsNone(observed) def test_allocate_specific_finally_unallocated_segment_outside_pools(self): # Test case: allocate a specific allocated segment in pools but # the segment is concurrently unallocated after select or update expected = dict(physical_network=TENANT_NET, vlan_id=VLAN_MIN) with mock.patch.object(self.driver.model, 'save'): observed = self.driver.allocate_fully_specified_segment( self.context, **expected) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_without_filters(self): expected = dict(physical_network=TENANT_NET) observed = self.driver.allocate_partially_specified_segment( self.context) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_with_filter(self): expected = dict(physical_network=TENANT_NET) observed = self.driver.allocate_partially_specified_segment( self.context, **expected) self.check_raw_segment(expected, observed) def test_allocate_partial_segment_no_resource_available(self): for i in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_partially_specified_segment(self.context) observed = self.driver.allocate_partially_specified_segment( self.context) self.assertIsNone(observed) def test_allocate_partial_segment_outside_pools(self): raw_segment = dict(physical_network='other_phys_net') observed = self.driver.allocate_partially_specified_segment( self.context, **raw_segment) self.assertIsNone(observed) def test_allocate_partial_segment_first_attempt_fails(self): expected = dict(physical_network=TENANT_NET) with mock.patch.object(query.Query, 'update', side_effect=[0, 1]): self.assertRaises( exc.RetryRequest, self.driver.allocate_partially_specified_segment, self.context, **expected) observed = self.driver.allocate_partially_specified_segment( self.context, **expected) self.check_raw_segment(expected, observed) class HelpersTestWithNetworkSegmentRange(HelpersTest): def setUp(self): super(HelpersTestWithNetworkSegmentRange, self).setUp() cfg.CONF.set_override('network_vlan_ranges', NETWORK_VLAN_RANGES_CFG_ENTRIES, group='ml2_type_vlan') cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( NETWORK_VLAN_RANGES_CFG_ENTRIES) self.context = context.get_admin_context() self.driver = type_vlan.VlanTypeDriver() self.driver.initialize_network_segment_range_support() self.driver._sync_vlan_allocations() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py0000644000175000017500000001633600000000000030220 0ustar00coreycorey00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import context from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from oslo_config import cfg from neutron.objects.plugins.ml2 import flatallocation as flat_obj from neutron.plugins.ml2.drivers import type_flat from neutron.tests import base from neutron.tests.unit import testlib_api FLAT_NETWORKS = ['flat_net1', 'flat_net2'] CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class FlatTypeTest(testlib_api.SqlTestCase): def setUp(self): super(FlatTypeTest, self).setUp() self.setup_coreplugin(CORE_PLUGIN) cfg.CONF.set_override('flat_networks', FLAT_NETWORKS, group='ml2_type_flat') self.driver = type_flat.FlatTypeDriver() self.context = context.Context() self.driver.physnet_mtus = [] def _get_allocation(self, context, segment): return flat_obj.FlatAllocation.get_object( context, physical_network=segment[api.PHYSICAL_NETWORK]) def test_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.validate_provider_segment(segment) def test_validate_provider_phynet_name(self): self.driver._parse_networks([]) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment=segment) def test_validate_provider_phynet_name_multiple(self): self.driver._parse_networks(['flat_net1', 'flat_net2']) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.validate_provider_segment(segment) segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net2'} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_without_physnet_restriction(self): self.driver._parse_networks('*') segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unsupported_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unallowed_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1', api.SEGMENTATION_ID: 1234} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self._get_allocation(self.context, observed) self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network) def test_release_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.reserve_provider_segment(self.context, segment) self.driver.release_segment(self.context, segment) alloc = self._get_allocation(self.context, segment) self.assertIsNone(alloc) def test_reserve_provider_segment_already_reserved(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'flat_net1'} self.driver.reserve_provider_segment(self.context, segment) self.assertRaises(exc.FlatNetworkInUse, self.driver.reserve_provider_segment, self.context, segment) def test_allocate_tenant_segment(self): observed = self.driver.allocate_tenant_segment(self.context) self.assertIsNone(observed) def test_get_mtu(self): cfg.CONF.set_override('global_physnet_mtu', 1475) cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 1375) cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1375, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 1425, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1400, self.driver.get_mtu('physnet2')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_parse_physical_network_mtus(self): cfg.CONF.set_override( 'physical_network_mtus', ['physnet1:1500', 'physnet2:1500', 'physnet3:9000'], group='ml2') driver = type_flat.FlatTypeDriver() self.assertEqual('1500', driver.physnet_mtus['physnet1']) self.assertEqual('1500', driver.physnet_mtus['physnet2']) self.assertEqual('9000', driver.physnet_mtus['physnet3']) class FlatTypeDefaultTest(base.BaseTestCase): def setUp(self): super(FlatTypeDefaultTest, self).setUp() self.driver = type_flat.FlatTypeDriver() self.driver.physnet_mtus = [] def test_validate_provider_segment_default(self): segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT, api.PHYSICAL_NETWORK: 'other_flat_net'} self.driver.validate_provider_segment(segment) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py0000644000175000017500000000466300000000000030543 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron.plugins.ml2.drivers import type_geneve from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api TUNNEL_IP_ONE = "10.10.10.77" TUNNEL_IP_TWO = "10.10.10.78" HOST_ONE = 'fake_host_one1' HOST_TWO = 'fake_host_two2' class GeneveTypeTest(base_type_tunnel.TunnelTypeTestMixin, base_type_tunnel.TunnelTypeNetworkSegmentRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE def test_get_endpoints(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == TUNNEL_IP_ONE: self.assertEqual(HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == TUNNEL_IP_TWO: self.assertEqual(HOST_TWO, endpoint['host']) class GeneveTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver class GeneveTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE class GeneveTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE ENCAP_OVERHEAD = p_const.GENEVE_ENCAP_MIN_OVERHEAD ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py0000644000175000017500000000472300000000000030044 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron.plugins.ml2.drivers import type_gre from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' class GreTypeTest(base_type_tunnel.TunnelTypeTestMixin, base_type_tunnel.TunnelTypeNetworkSegmentRangeTestMixin, testlib_api.SqlTestCase): DRIVER_MODULE = type_gre DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE def test_get_endpoints(self): self.add_endpoint() self.add_endpoint( base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver class GreTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE class GreTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE ENCAP_OVERHEAD = p_const.GRE_ENCAP_OVERHEAD ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_local.py0000644000175000017500000000521100000000000030352 0ustar00coreycorey00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from neutron.plugins.ml2.drivers import type_local from neutron.tests import base class LocalTypeTest(base.BaseTestCase): def setUp(self): super(LocalTypeTest, self).setUp() self.driver = type_local.LocalTypeDriver() self.context = None def test_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_with_unallowed_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.PHYSICAL_NETWORK: 'phys_net'} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_unallowed_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL, api.SEGMENTATION_ID: 2} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.reserve_provider_segment(self.context, segment) self.assertEqual(segment, observed) def test_release_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.reserve_provider_segment(self.context, segment) self.driver.release_segment(self.context, observed) def test_allocate_tenant_segment(self): expected = {api.NETWORK_TYPE: p_const.TYPE_LOCAL} observed = self.driver.allocate_tenant_segment(self.context) self.assertEqual(expected, observed) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py0000644000175000017500000004227400000000000030232 0ustar00coreycorey00000000000000# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as p_const from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from testtools import matchers from neutron.objects import network_segment_range as obj_network_segment_range from neutron.objects.plugins.ml2 import vlanallocation as vlan_alloc_obj from neutron.plugins.ml2.drivers import type_vlan from neutron.tests.unit import testlib_api PROVIDER_NET = 'phys_net1' TENANT_NET = 'phys_net2' UNCONFIGURED_NET = 'no_net' VLAN_MIN = 200 VLAN_MAX = 209 NETWORK_VLAN_RANGES = [PROVIDER_NET, "%s:%s:%s" % (TENANT_NET, VLAN_MIN, VLAN_MAX)] UPDATED_VLAN_RANGES = { PROVIDER_NET: [], TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], } EMPTY_VLAN_RANGES = { PROVIDER_NET: [] } NETWORK_VLAN_RANGES_WITH_UNCONFIG = { PROVIDER_NET: [], TENANT_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)], UNCONFIGURED_NET: [(VLAN_MIN, VLAN_MAX)] } CORE_PLUGIN = 'ml2' SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.' 'NetworkSegmentRangePlugin') class VlanTypeTest(testlib_api.SqlTestCase): def setUp(self): super(VlanTypeTest, self).setUp() cfg.CONF.set_override('network_vlan_ranges', NETWORK_VLAN_RANGES, group='ml2_type_vlan') self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( NETWORK_VLAN_RANGES) self.driver = type_vlan.VlanTypeDriver() self.driver._sync_vlan_allocations() self.context = context.Context() self.driver.physnet_mtus = [] self.setup_coreplugin(CORE_PLUGIN) def test_parse_network_exception_handling(self): with mock.patch.object(plugin_utils, 'parse_network_vlan_ranges') as parse_ranges: parse_ranges.side_effect = Exception('any exception') self.assertRaises(SystemExit, self.driver._parse_network_vlan_ranges) @db_api.CONTEXT_READER def _get_allocation(self, context, segment): return vlan_alloc_obj.VlanAllocation.get_object( context, physical_network=segment[api.PHYSICAL_NETWORK], vlan_id=segment[api.SEGMENTATION_ID]) def test_partial_segment_is_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertTrue(self.driver.is_partial_segment(segment)) def test_specific_segment_is_not_partial_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertFalse(self.driver.is_partial_segment(segment)) def test_validate_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1} self.assertIsNone(self.driver.validate_provider_segment(segment)) def test_validate_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.driver.validate_provider_segment(segment) def test_validate_provider_segment_no_phys_network_seg_id_0(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.SEGMENTATION_ID: 0} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: 'other_phys_net', api.SEGMENTATION_ID: 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET} segmentation_ids = [ p_const.MIN_VLAN_TAG - 1, p_const.MAX_VLAN_TAG + 1] for segmentation_id in segmentation_ids: segment[api.SEGMENTATION_ID] = segmentation_id self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_invalid_input(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 1, 'invalid': 1} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_validate_provider_segment_with_physical_network_only(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET} self.assertRaises(exc.InvalidInput, self.driver.validate_provider_segment, segment) def test_sync_vlan_allocations(self): def check_in_ranges(network_vlan_ranges): vlan_min, vlan_max = network_vlan_ranges[TENANT_NET][0] segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} segment[api.SEGMENTATION_ID] = vlan_min - 1 self.assertIsNone( self._get_allocation(self.context, segment)) segment[api.SEGMENTATION_ID] = vlan_max + 1 self.assertIsNone( self._get_allocation(self.context, segment)) segment[api.SEGMENTATION_ID] = vlan_min self.assertFalse( self._get_allocation(self.context, segment).allocated) segment[api.SEGMENTATION_ID] = vlan_max self.assertFalse( self._get_allocation(self.context, segment).allocated) check_in_ranges(self.network_vlan_ranges) self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES self.driver._sync_vlan_allocations() check_in_ranges(UPDATED_VLAN_RANGES) self.driver.network_vlan_ranges = NETWORK_VLAN_RANGES_WITH_UNCONFIG self.driver._sync_vlan_allocations() self.driver.network_vlan_ranges = UPDATED_VLAN_RANGES with mock.patch.object(type_vlan.LOG, 'debug') as mock_debug: self.driver._sync_vlan_allocations() mock_debug.assert_called_once_with( 'Removing any VLAN register on physical networks %s', {UNCONFIGURED_NET}) check_in_ranges(UPDATED_VLAN_RANGES) self.driver.network_vlan_ranges = EMPTY_VLAN_RANGES self.driver._sync_vlan_allocations() vlan_min, vlan_max = UPDATED_VLAN_RANGES[TENANT_NET][0] segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} segment[api.SEGMENTATION_ID] = vlan_min self.assertIsNone( self._get_allocation(self.context, segment)) segment[api.SEGMENTATION_ID] = vlan_max self.assertIsNone( self._get_allocation(self.context, segment)) def test_reserve_provider_segment(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} alloc = self._get_allocation(self.context, segment) self.assertIsNone(alloc) observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self._get_allocation(self.context, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_already_allocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} observed = self.driver.reserve_provider_segment(self.context, segment) self.assertRaises(exc.VlanIdInUse, self.driver.reserve_provider_segment, self.context, observed) def test_reserve_provider_segment_in_tenant_pools(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET, api.SEGMENTATION_ID: VLAN_MIN} alloc = self._get_allocation(self.context, segment) self.assertFalse(alloc.allocated) observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self._get_allocation(self.context, observed) self.assertTrue(alloc.allocated) def test_reserve_provider_segment_without_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: TENANT_NET} observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self._get_allocation(self.context, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) def test_reserve_provider_segment_without_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} observed = self.driver.reserve_provider_segment(self.context, segment) alloc = self._get_allocation(self.context, observed) self.assertTrue(alloc.allocated) vlan_id = observed[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, observed[api.PHYSICAL_NETWORK]) def test_reserve_provider_segment_all_allocateds(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.context) segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.assertRaises(exc.NoNetworkAvailable, self.driver.reserve_provider_segment, self.context, segment) def test_get_mtu(self): cfg.CONF.set_override('global_physnet_mtu', 1475) cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 1375) cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1375, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 1400, group='ml2') self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} self.assertEqual(1450, self.driver.get_mtu('physnet1')) cfg.CONF.set_override('global_physnet_mtu', 0) cfg.CONF.set_override('path_mtu', 0, group='ml2') self.driver.physnet_mtus = {} self.assertEqual(0, self.driver.get_mtu('physnet1')) def test_allocate_tenant_segment(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): segment = self.driver.allocate_tenant_segment(self.context) alloc = self._get_allocation(self.context, segment) self.assertTrue(alloc.allocated) vlan_id = segment[api.SEGMENTATION_ID] self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1)) self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1)) self.assertEqual(TENANT_NET, segment[api.PHYSICAL_NETWORK]) def test_allocate_tenant_segment_no_available(self): for __ in range(VLAN_MIN, VLAN_MAX + 1): self.driver.allocate_tenant_segment(self.context) segment = self.driver.allocate_tenant_segment(self.context) self.assertIsNone(segment) def test_release_segment(self): segment = self.driver.allocate_tenant_segment(self.context) self.driver.release_segment(self.context, segment) alloc = self._get_allocation(self.context, segment) self.assertFalse(alloc.allocated) def test_release_segment_unallocated(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.PHYSICAL_NETWORK: PROVIDER_NET, api.SEGMENTATION_ID: 101} with mock.patch.object(type_vlan.LOG, 'warning') as log_warn: self.driver.release_segment(self.context, segment) log_warn.assert_called_once_with( "No vlan_id %(vlan_id)s found on physical network " "%(physical_network)s", {'vlan_id': 101, 'physical_network': PROVIDER_NET}) class VlanTypeAllocationTest(testlib_api.SqlTestCase): def test_allocate_tenant_segment_in_order_of_config(self): ranges = NETWORK_VLAN_RANGES + ['phys_net3:20:30'] cfg.CONF.set_override('network_vlan_ranges', ranges, group='ml2_type_vlan') driver = type_vlan.VlanTypeDriver() driver.physnet_mtus = [] driver._sync_vlan_allocations() # swap config order from DB order after sync has happened to # ensure config order is followed and not DB order cfg.CONF.set_override('network_vlan_ranges', list(reversed(ranges)), group='ml2_type_vlan') driver._parse_network_vlan_ranges() ctx = context.Context() for vlan in range(11): # all of physnet3 should be exhausted first self.assertEqual( {'network_type': 'vlan', 'physical_network': 'phys_net3', 'segmentation_id': mock.ANY, 'mtu': 1500}, driver.allocate_tenant_segment(ctx)) for vlan in range(10): # then physnet2 self.assertEqual( {'network_type': 'vlan', 'physical_network': 'phys_net2', 'segmentation_id': mock.ANY, 'mtu': 1500}, driver.allocate_tenant_segment(ctx)) # then nothing self.assertFalse(driver.allocate_tenant_segment(ctx)) class VlanTypeTestWithNetworkSegmentRange(testlib_api.SqlTestCase): def setUp(self): super(VlanTypeTestWithNetworkSegmentRange, self).setUp() cfg.CONF.set_override('network_vlan_ranges', NETWORK_VLAN_RANGES, group='ml2_type_vlan') cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( NETWORK_VLAN_RANGES) self.driver = type_vlan.VlanTypeDriver() self.driver._sync_vlan_allocations() self.context = context.Context() self.setup_coreplugin(CORE_PLUGIN) def test__populate_new_default_network_segment_ranges(self): # _populate_new_default_network_segment_ranges will be called when # the type driver initializes with `network_segment_range` loaded as # one of the `service_plugins` ret = obj_network_segment_range.NetworkSegmentRange.get_objects( self.context) self.assertEqual(1, len(ret)) network_segment_range = ret[0] self.assertTrue(network_segment_range.default) self.assertTrue(network_segment_range.shared) self.assertIsNone(network_segment_range.project_id) self.assertEqual(p_const.TYPE_VLAN, network_segment_range.network_type) self.assertEqual(TENANT_NET, network_segment_range.physical_network) self.assertEqual(VLAN_MIN, network_segment_range.minimum) self.assertEqual(VLAN_MAX, network_segment_range.maximum) def test__delete_expired_default_network_segment_ranges(self): self.driver._delete_expired_default_network_segment_ranges() ret = obj_network_segment_range.NetworkSegmentRange.get_objects( self.context) self.assertEqual(0, len(ret)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py0000644000175000017500000000665000000000000030420 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as p_const from neutron.plugins.ml2.drivers import type_vxlan from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api VXLAN_UDP_PORT_ONE = 9999 VXLAN_UDP_PORT_TWO = 8888 class VxlanTypeTest(base_type_tunnel.TunnelTypeTestMixin, base_type_tunnel.TunnelTypeNetworkSegmentRangeTestMixin, testlib_api.SqlTestCase): DRIVER_MODULE = type_vxlan DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN def add_endpoint(self, ip=base_type_tunnel.TUNNEL_IP_ONE, host=base_type_tunnel.HOST_ONE): if ip == base_type_tunnel.TUNNEL_IP_ONE: port = VXLAN_UDP_PORT_ONE else: port = VXLAN_UDP_PORT_TWO return self.driver.add_endpoint(ip, host, port) def test_add_endpoint(self): endpoint = super(VxlanTypeTest, self).test_add_endpoint() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_host(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_host() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_ip(self): endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_ip() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoints(self): self.add_endpoint() self.add_endpoint(base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver class VxlanTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN class VxlanTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN ENCAP_OVERHEAD = p_const.VXLAN_ENCAP_OVERHEAD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/0000755000175000017500000000000000000000000025330 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/__init__.py0000644000175000017500000000000000000000000027427 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py0000644000175000017500000000413500000000000030707 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants from neutron._i18n import _ EXTENDED_ATTRIBUTES_2_0 = { 'networks': { 'network_extension': {'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, 'subnets': { 'subnet_extension': {'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, 'ports': { 'port_extension': {'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True, 'enforce_policy': True}, }, } class Fake_extension(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "ML2 fake extension" @classmethod def get_alias(cls): return "fake_extension" @classmethod def get_description(cls): return _("Adds test attributes to core resources.") @classmethod def get_updated(cls): return "2014-07-16T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_data_plane_status.py0000644000175000017500000000535700000000000032446 0ustar00coreycorey00000000000000# Copyright (c) 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import data_plane_status as dps_lib from neutron_lib.api.definitions import port as port_def from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from neutron.plugins.ml2.extensions import data_plane_status from neutron.tests.unit.plugins.ml2 import test_plugin class DataPlaneStatusSML2ExtDriverTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['data_plane_status'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(DataPlaneStatusSML2ExtDriverTestCase, self).setUp() self.plugin = directory.get_plugin() def test_extend_port_dict_no_data_plane_status(self): for db_data in ({'data_plane_status': None}, {}): response_data = {} session = mock.Mock() driver = data_plane_status.DataPlaneStatusExtensionDriver() driver.extend_port_dict(session, db_data, response_data) self.assertIsNone(response_data['data_plane_status']) def test_show_port_has_data_plane_status(self): with self.port() as port: req = self.new_show_request(port_def.COLLECTION_NAME, port['port']['id'], self.fmt) p = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIsNone(p['port'][dps_lib.DATA_PLANE_STATUS]) def test_port_update_data_plane_status(self): with self.port() as port: admin_ctx = context.get_admin_context() p = {'port': {dps_lib.DATA_PLANE_STATUS: constants.ACTIVE}} self.plugin.update_port(admin_ctx, port['port']['id'], p) req = self.new_show_request( port_def.COLLECTION_NAME, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(res['port'][dps_lib.DATA_PLANE_STATUS], constants.ACTIVE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py0000644000175000017500000013034000000000000032131 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading from keystoneauth1 import session import mock import netaddr from neutron_lib.api.definitions import dns as dns_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.objects import ports as port_obj from neutron.plugins.ml2.extensions import dns_integration from neutron.services.externaldns.drivers.designate import driver from neutron.tests.unit.plugins.ml2 import test_plugin mock_client = mock.Mock() mock_admin_client = mock.Mock() mock_config = {'return_value': (mock_client, mock_admin_client)} DNSNAME = 'port-dns-name' DNSDOMAIN = 'domain.com.' PORTDNSDOMAIN = 'port-dns-domain.com.' NEWDNSNAME = 'new-port-dns-name' NEWPORTDNSDOMAIN = 'new-port-dns-domain.com.' V4UUID = 'v4_uuid' V6UUID = 'v6_uuid' @mock.patch( 'neutron.services.externaldns.drivers.designate.driver.get_clients', **mock_config) class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['dns'] _domain = DNSDOMAIN def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') cfg.CONF.set_override('external_dns_driver', 'designate') mock_client.reset_mock() mock_admin_client.reset_mock() super(DNSIntegrationTestCase, self).setUp() dns_integration.DNS_DRIVER = None dns_integration.subscribe() self.plugin = directory.get_plugin() cfg.CONF.set_override('dns_domain', self._domain) def _create_port_for_test(self, provider_net=True, dns_domain=True, dns_name=True, ipv4=True, ipv6=True, dns_domain_port=False): net_kwargs = {} if provider_net: net_kwargs = { 'arg_list': (pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '2016', } if dns_domain: net_kwargs[dns_apidef.DNSDOMAIN] = DNSDOMAIN net_kwargs['arg_list'] = \ net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,) res = self._create_network(self.fmt, 'test_network', True, **net_kwargs) network = self.deserialize(self.fmt, res) if ipv4: cidr = '10.0.0.0/24' self._create_subnet_for_test(network['network']['id'], cidr) if ipv6: cidr = 'fd3d:bdd4:da60::/64' self._create_subnet_for_test(network['network']['id'], cidr) port_kwargs = {} if dns_name: port_kwargs = { 'arg_list': (dns_apidef.DNSNAME,), dns_apidef.DNSNAME: DNSNAME } if dns_domain_port: port_kwargs[dns_apidef.DNSDOMAIN] = PORTDNSDOMAIN port_kwargs['arg_list'] = (port_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)) res = self._create_port('json', network['network']['id'], **port_kwargs) self.assertEqual(201, res.status_int) port = self.deserialize(self.fmt, res)['port'] ctx = context.get_admin_context() dns_data_db = port_obj.PortDNS.get_object(ctx, port_id=port['id']) return port, dns_data_db def _create_subnet_for_test(self, network_id, cidr): ip_net = netaddr.IPNetwork(cidr) # initialize the allocation_pool to the lower half of the subnet subnet_size = ip_net.last - ip_net.first subnet_mid_point = ip_net.first + int(subnet_size / 2) start, end = (netaddr.IPAddress(ip_net.first + 2), netaddr.IPAddress(subnet_mid_point)) allocation_pools = [{'start': str(start), 'end': str(end)}] return self._create_subnet(self.fmt, network_id, str(ip_net), ip_version=ip_net.ip.version, allocation_pools=allocation_pools) def _update_port_for_test(self, port, new_dns_name=NEWDNSNAME, new_dns_domain=None, **kwargs): mock_client.reset_mock() ip_addresses = [netaddr.IPAddress(ip['ip_address']) for ip in port['fixed_ips']] records_v4 = [ip for ip in ip_addresses if ip.version == 4] records_v6 = [ip for ip in ip_addresses if ip.version == 6] recordsets = [] if records_v4: recordsets.append({'id': V4UUID, 'records': records_v4}) if records_v6: recordsets.append({'id': V6UUID, 'records': records_v6}) mock_client.recordsets.list.return_value = recordsets mock_admin_client.reset_mock() body = {} if new_dns_name is not None: body['dns_name'] = new_dns_name if new_dns_domain is not None: body[dns_apidef.DNSDOMAIN] = new_dns_domain body.update(kwargs) data = {'port': body} req = self.new_update_request('ports', data, port['id']) res = req.get_response(self.api) self.assertEqual(200, res.status_int) port = self.deserialize(self.fmt, res)['port'] ctx = context.get_admin_context() dns_data_db = port_obj.PortDNS.get_object(ctx, port_id=port['id']) return port, dns_data_db def _verify_port_dns(self, port, dns_data_db, dns_name=True, dns_domain=True, ptr_zones=True, delete_records=False, provider_net=True, dns_driver=True, original_ips=None, current_dns_name=DNSNAME, previous_dns_name='', dns_domain_port=False, current_dns_domain=DNSDOMAIN, previous_dns_domain=DNSDOMAIN): if dns_name: self.assertEqual(current_dns_name, port[dns_apidef.DNSNAME]) if dns_domain_port: self.assertTrue(port[dns_apidef.DNSDOMAIN]) is_there_dns_domain = dns_domain or dns_domain_port if dns_name and is_there_dns_domain and provider_net and dns_driver: self.assertEqual(current_dns_name, dns_data_db['current_dns_name']) self.assertEqual(previous_dns_name, dns_data_db['previous_dns_name']) if current_dns_name: self.assertEqual(current_dns_domain, dns_data_db['current_dns_domain']) else: self.assertFalse(dns_data_db['current_dns_domain']) records_v4 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 4] records_v6 = [ip['ip_address'] for ip in port['fixed_ips'] if netaddr.IPAddress(ip['ip_address']).version == 6] expected = [] expected_delete = [] if records_v4: if current_dns_name: expected.append( mock.call(current_dns_domain, current_dns_name, 'A', records_v4)) if delete_records: expected_delete.append(mock.call(previous_dns_domain, V4UUID)) if records_v6: if current_dns_name: expected.append( mock.call(current_dns_domain, current_dns_name, 'AAAA', records_v6)) if delete_records: expected_delete.append(mock.call(previous_dns_domain, V6UUID)) mock_client.recordsets.create.assert_has_calls(expected, any_order=True) self.assertEqual( len(mock_client.recordsets.create.call_args_list), len(expected)) mock_client.recordsets.delete.assert_has_calls(expected_delete, any_order=True) self.assertEqual( len(mock_client.recordsets.delete.call_args_list), len(expected_delete)) expected = [] expected_delete = [] if ptr_zones: records = records_v4 + records_v6 recordset_name = '%s.%s' % (current_dns_name, current_dns_domain) for record in records: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name( in_addr_name) if current_dns_name: expected.append(mock.call(in_addr_zone_name, in_addr_name, 'PTR', [recordset_name])) if delete_records and not original_ips: expected_delete.append(mock.call(in_addr_zone_name, in_addr_name)) if delete_records and original_ips: for record in original_ips: in_addr_name = netaddr.IPAddress(record).reverse_dns in_addr_zone_name = self._get_in_addr_zone_name( in_addr_name) expected_delete.append(mock.call(in_addr_zone_name, in_addr_name)) mock_admin_client.recordsets.create.assert_has_calls( expected, any_order=True) self.assertEqual( len(mock_admin_client.recordsets.create.call_args_list), len(expected)) mock_admin_client.recordsets.delete.assert_has_calls( expected_delete, any_order=True) self.assertEqual( len(mock_admin_client.recordsets.delete.call_args_list), len(expected_delete)) else: if not dns_name: self.assertEqual('', port[dns_apidef.DNSNAME]) if not (dns_name or dns_domain_port): self.assertIsNone(dns_data_db) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def _get_in_addr_zone_name(self, in_addr_name): units = self._get_bytes_or_nybles_to_skip(in_addr_name) return '.'.join(in_addr_name.split('.')[int(units):]) def _get_bytes_or_nybles_to_skip(self, in_addr_name): if 'in-addr.arpa' in in_addr_name: return (( constants.IPv4_BITS - cfg.CONF.designate.ipv4_ptr_zone_prefix_size) / 8) return (constants.IPv6_BITS - cfg.CONF.designate.ipv6_ptr_zone_prefix_size) / 4 def test_create_port(self, *mocks): port, dns_data_db = self._create_port_for_test() self._verify_port_dns(port, dns_data_db) def test_create_port_tenant_network(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False) self._verify_port_dns(port, dns_data_db, provider_net=False) def test_create_port_no_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_name=False) self._verify_port_dns(port, dns_data_db, dns_name=False) def test_create_port_no_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False) self._verify_port_dns(port, dns_data_db, dns_domain=False) def test_create_port_no_dns_driver(self, *mocks): cfg.CONF.set_override('external_dns_driver', '') port, dns_data_db = self._create_port_for_test() self._verify_port_dns(port, dns_data_db, dns_driver=False) def test_create_port_no_ipv6(self, *mocks): port, dns_data_db = self._create_port_for_test(ipv6=False) self._verify_port_dns(port, dns_data_db) def test_create_port_no_ipv4(self, *mocks): port, dns_data_db = self._create_port_for_test(ipv4=False) self._verify_port_dns(port, dns_data_db) def test_create_port_no_ptr_zones(self, *mocks): cfg.CONF.set_override( 'allow_reverse_dns_lookup', False, group='designate') port, dns_data_db = self._create_port_for_test() self._verify_port_dns(port, dns_data_db, ptr_zones=False) cfg.CONF.set_override('allow_reverse_dns_lookup', True, group='designate') def test_update_port(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME) def test_update_port_with_current_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_update_port_tenant_network(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, provider_net=False) def test_update_port_no_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, dns_domain=False) def test_update_port_add_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_name=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(port, dns_data_db, delete_records=False, current_dns_name=NEWDNSNAME, previous_dns_name='') def test_update_port_clear_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name='') self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name='', previous_dns_name=DNSNAME) def test_update_port_non_dns_name_attribute(self, *mocks): port, dns_data_db = self._create_port_for_test() port_name = 'port_name' kwargs = {'name': port_name} port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) self.assertEqual(port_name, port['name']) def _compute_new_fixed_ips(self, port): new_fixed_ips = [ {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)} for ip in port['fixed_ips'] ] return {'fixed_ips': new_fixed_ips} def test_update_port_fixed_ips(self, *mocks): port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = self._compute_new_fixed_ips(port) port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_with_new_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = self._compute_new_fixed_ips(port) port, dns_data_db = self._update_port_for_test(port, new_dns_name=NEWDNSNAME, **kwargs) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=NEWDNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_with_current_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = self._compute_new_fixed_ips(port) port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME, **kwargs) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, original_ips=original_ips) def test_update_port_fixed_ips_clearing_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = self._compute_new_fixed_ips(port) port, dns_data_db = self._update_port_for_test(port, new_dns_name='', **kwargs) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name='', previous_dns_name=DNSNAME, original_ips=original_ips) def _assert_update_fixed_ips_no_effect_after_clearing_dns_attribute( self, dns_data_db, dns_data_db_1, dns_data_db_2): self.assertEqual('', dns_data_db_2['current_dns_name']) self.assertEqual('', dns_data_db_2['current_dns_domain']) self.assertEqual(dns_data_db_1['current_dns_name'], dns_data_db_2['current_dns_name']) self.assertEqual(dns_data_db_1['current_dns_domain'], dns_data_db_2['current_dns_domain']) self.assertEqual(dns_data_db['current_dns_name'], dns_data_db_1['previous_dns_name']) self.assertEqual(dns_data_db['current_dns_domain'], dns_data_db_1['previous_dns_domain']) self.assertFalse(dns_data_db_2['previous_dns_name']) self.assertFalse(dns_data_db_2['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_update_fixed_ips_no_effect_after_clearing_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db_1 = self._update_port_for_test(port, new_dns_name='') kwargs = self._compute_new_fixed_ips(port) mock_client.reset_mock() mock_admin_client.reset_mock() port, dns_data_db_2 = self._update_port_for_test(port, new_dns_name='', **kwargs) self._assert_update_fixed_ips_no_effect_after_clearing_dns_attribute( dns_data_db, dns_data_db_1, dns_data_db_2) def test_create_port_dns_name_field_missing(self, *mocks): res = self._create_network(self.fmt, 'test_network', True) net = self.deserialize(self.fmt, res)['network'] cidr = '10.0.0.0/24' self._create_subnet_for_test(net['id'], cidr) port_request = { 'port': { 'network_id': net['id'], 'tenant_id': net['tenant_id'], 'name': 'mugsie', 'admin_state_up': True, 'device_id': '', 'device_owner': '', 'fixed_ips': '' } } self.plugin.create_port(self.context, port_request) def test_dns_driver_loaded_after_server_restart(self, *mocks): dns_integration.DNS_DRIVER = None port, dns_data_db = self._create_port_for_test() self._verify_port_dns(port, dns_data_db) class DNSIntegrationTestCaseDefaultDomain(DNSIntegrationTestCase): _domain = 'openstacklocal.' def _generate_dns_assignment(self, port): fqdn = [] for ip in port['fixed_ips']: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn.append('%s.%s' % (hostname, self._domain)) return set(fqdn) def _verify_port_dns(self, port, dns_data_db, dns_name=True, dns_domain=True, ptr_zones=True, delete_records=False, provider_net=True, dns_driver=True, original_ips=None, current_dns_name=DNSNAME, previous_dns_name=''): self.assertEqual('', port[dns_apidef.DNSNAME]) fqdn_set = self._generate_dns_assignment(port) port_fqdn_set = set([each['fqdn'] for each in port['dns_assignment']]) self.assertEqual(fqdn_set, port_fqdn_set) self.assertIsNone(dns_data_db, "dns data should be none") self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_update_fixed_ips_no_effect_after_clearing_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db_1 = self._update_port_for_test(port, new_dns_name='') kwargs = {'fixed_ips': []} for ip in port['fixed_ips']: kwargs['fixed_ips'].append( {'subnet_id': ip['subnet_id'], 'ip_address': str(netaddr.IPAddress(ip['ip_address']) + 1)}) mock_client.reset_mock() mock_admin_client.reset_mock() port, dns_data_db_2 = self._update_port_for_test(port, new_dns_name='', **kwargs) self._verify_port_dns(port, dns_data_db_2) def test_update_port_non_dns_name_attribute(self, *mocks): port, dns_data_db = self._create_port_for_test() port_name = 'port_name' kwargs = {'name': port_name} port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._verify_port_dns(port, dns_data_db) def test_update_port_with_current_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME) self._verify_port_dns(port, dns_data_db) @mock.patch( 'neutron.services.externaldns.drivers.designate.driver.get_clients', **mock_config) class DNSDomainPortsTestCase(DNSIntegrationTestCase): _extension_drivers = ['dns_domain_ports'] def test_create_port_net_dns_domain_port_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test( dns_domain_port=True) self._verify_port_dns(port, dns_data_db, dns_domain_port=True, current_dns_domain=PORTDNSDOMAIN) def test_create_port_no_net_dns_domain_port_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test( dns_domain=False, dns_domain_port=True) self._verify_port_dns(port, dns_data_db, dns_domain=False, dns_domain_port=True, current_dns_domain=PORTDNSDOMAIN) def test_create_port_no_net_dns_domain_no_port_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False) self._verify_port_dns(port, dns_data_db, dns_domain=False) def test_create_port_port_dns_domain_no_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False, dns_domain_port=True, dns_name=False) self._verify_port_dns(port, dns_data_db, dns_name=False, dns_domain=False, dns_domain_port=True) self.assertEqual(PORTDNSDOMAIN, dns_data_db[dns_apidef.DNSDOMAIN]) self.assertEqual(PORTDNSDOMAIN, port[dns_apidef.DNSDOMAIN]) def test_update_port_replace_port_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test( dns_domain_port=True) port, dns_data_db = self._update_port_for_test( port, new_dns_name=None, new_dns_domain=NEWPORTDNSDOMAIN) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, current_dns_domain=NEWPORTDNSDOMAIN, previous_dns_domain=PORTDNSDOMAIN) def test_update_port_replace_network_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test( port, new_dns_name=None, new_dns_domain=PORTDNSDOMAIN) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, current_dns_domain=PORTDNSDOMAIN) def test_update_port_add_dns_domain_no_net_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False) port, dns_data_db = self._update_port_for_test( port, new_dns_name=None, new_dns_domain=PORTDNSDOMAIN) self._verify_port_dns(port, dns_data_db, current_dns_name=DNSNAME, current_dns_domain=PORTDNSDOMAIN, previous_dns_domain='') def test_update_port_add_dns_name_port_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False, dns_domain_port=True, dns_name=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(port, dns_data_db, current_dns_name=NEWDNSNAME, current_dns_domain=PORTDNSDOMAIN, previous_dns_domain='') def test_update_port_add_port_dns_domain_port_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain=False) port, dns_data_db = self._update_port_for_test( port, new_dns_name=None, new_dns_domain=PORTDNSDOMAIN) self._verify_port_dns(port, dns_data_db, current_dns_name=DNSNAME, current_dns_domain=PORTDNSDOMAIN, previous_dns_domain='') def test_update_port_add_port_dns_domain_add_port_dns_name(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_name=False, dns_domain=False) port, dns_data_db = self._update_port_for_test( port, new_dns_domain=NEWPORTDNSDOMAIN) self._verify_port_dns(port, dns_data_db, current_dns_name=NEWDNSNAME, current_dns_domain=NEWPORTDNSDOMAIN, previous_dns_domain='') def test_update_port_clear_port_dns_domain_no_network_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain_port=True, dns_domain=False) port, dns_data_db = self._update_port_for_test(port, new_dns_domain='', new_dns_name=None) self.assertFalse(dns_data_db['current_dns_name']) self.assertFalse(dns_data_db['current_dns_domain']) self.assertEqual(DNSNAME, dns_data_db['previous_dns_name']) self.assertEqual(PORTDNSDOMAIN, dns_data_db['previous_dns_domain']) self.assertEqual(DNSNAME, dns_data_db[dns_apidef.DNSNAME]) self.assertFalse(dns_data_db[dns_apidef.DNSDOMAIN]) self.assertEqual(DNSNAME, port[dns_apidef.DNSNAME]) self.assertFalse(port[dns_apidef.DNSDOMAIN]) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse(mock_admin_client.recordsets.create.call_args_list) self.assertEqual(2, mock_client.recordsets.delete.call_count) self.assertEqual( 2, len(mock_admin_client.recordsets.delete.call_args_list)) def test_update_port_clear_port_dns_domain_network_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain_port=True) port, dns_data_db = self._update_port_for_test(port, new_dns_domain='', new_dns_name=None) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, previous_dns_domain=PORTDNSDOMAIN) def _assert_no_external_dns_service_calls(self, port, dns_data_db, dns_name=DNSNAME, dns_domain=PORTDNSDOMAIN): if dns_data_db: self.assertFalse(dns_data_db['current_dns_name']) self.assertFalse(dns_data_db['current_dns_domain']) self.assertFalse(dns_data_db['previous_dns_name']) self.assertFalse(dns_data_db['previous_dns_domain']) self.assertEqual(dns_name, dns_data_db[dns_apidef.DNSNAME]) self.assertEqual(dns_domain, dns_data_db[dns_apidef.DNSDOMAIN]) self.assertEqual(dns_name, port[dns_apidef.DNSNAME]) self.assertEqual(dns_domain, port[dns_apidef.DNSDOMAIN]) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) self.assertFalse(mock_client.recordsets.delete.call_args_list) self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) def test_create_port_dns_name_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False, dns_domain_port=True) self.assertIsNotNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db) def test_create_port_no_dns_name_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False, dns_name=False, dns_domain_port=True) self.assertIsNotNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db, dns_name='') def test_create_port_dns_name_no_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False) self.assertIsNotNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db, dns_domain='') def test_create_port_no_dns_name_no_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False, dns_name=False) self.assertIsNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db, dns_name='', dns_domain='') def test_update_port_add_dns_name_add_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False, dns_name=False) self.assertIsNone(dns_data_db) port, dns_data_db = self._update_port_for_test( port, new_dns_domain=PORTDNSDOMAIN, new_dns_name=DNSNAME) self.assertIsNotNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db) def test_update_port_add_dns_domain_no_provider_net(self, *mocks): port, dns_data_db = self._create_port_for_test(provider_net=False) self.assertIsNotNone(dns_data_db) port, dns_data_db = self._update_port_for_test( port, new_dns_domain=PORTDNSDOMAIN, new_dns_name=None) self.assertIsNotNone(dns_data_db) self._assert_no_external_dns_service_calls(port, dns_data_db) def test_update_port_fixed_ips_with_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test( dns_domain_port=True) original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = self._compute_new_fixed_ips(port) port, dns_data_db = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._verify_port_dns(port, dns_data_db, delete_records=True, current_dns_name=DNSNAME, previous_dns_name=DNSNAME, current_dns_domain=PORTDNSDOMAIN, previous_dns_domain=PORTDNSDOMAIN, original_ips=original_ips) def test_update_fixed_ips_no_effect_after_clearing_dns_domain(self, *mocks): port, dns_data_db = self._create_port_for_test(dns_domain_port=True, dns_domain=False) port, dns_data_db_1 = self._update_port_for_test(port, new_dns_domain='', new_dns_name=None) kwargs = self._compute_new_fixed_ips(port) mock_client.reset_mock() mock_admin_client.reset_mock() port, dns_data_db_2 = self._update_port_for_test(port, new_dns_name=None, **kwargs) self._assert_update_fixed_ips_no_effect_after_clearing_dns_attribute( dns_data_db, dns_data_db_1, dns_data_db_2) class TestDesignateClientKeystoneV2(testtools.TestCase): """Test case for designate clients """ TEST_URL = 'http://127.0.0.1:9001/v2' TEST_ADMIN_USERNAME = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_PASSWORD = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_TENANT_NAME = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_TENANT_ID = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_AUTH_URL = 'http://127.0.0.1:35357/v2.0' TEST_CA_CERT = uuidutils.generate_uuid(dashed=False) TEST_CONTEXT = mock.Mock() TEST_CONTEXT.auth_token = uuidutils.generate_uuid(dashed=False) def setUp(self): super(TestDesignateClientKeystoneV2, self).setUp() cfg.CONF.set_override('url', self.TEST_URL, group='designate') cfg.CONF.set_override('admin_username', self.TEST_ADMIN_USERNAME, group='designate') cfg.CONF.set_override('admin_password', self.TEST_ADMIN_PASSWORD, group='designate') cfg.CONF.set_override('admin_auth_url', self.TEST_ADMIN_AUTH_URL, group='designate') cfg.CONF.set_override('admin_tenant_id', self.TEST_ADMIN_TENANT_ID, group='designate') cfg.CONF.set_override('admin_tenant_name', self.TEST_ADMIN_TENANT_NAME, group='designate') # enforce session recalculation mock.patch.object(driver, '_SESSION', new=None).start() self.driver_session = ( mock.patch.object(session, 'Session').start()) self.load_auth = ( mock.patch.object(driver.loading, 'load_auth_from_conf_options').start()) self.password = ( mock.patch.object(driver.password, 'Password').start()) def test_insecure_client(self): cfg.CONF.set_override('insecure', True, group='designate') driver.get_clients(self.TEST_CONTEXT) args, kwargs = self.driver_session.call_args self.assertIn('verify', kwargs) self.assertFalse(kwargs['verify'], False) def test_secure_client(self): cfg.CONF.set_override('insecure', False, group='designate') cfg.CONF.set_override('cafile', self.TEST_CA_CERT, group='designate') driver.get_clients(self.TEST_CONTEXT) args, kwargs = self.driver_session.call_args self.assertIn('verify', kwargs) self.assertEqual(kwargs['verify'], self.TEST_CA_CERT) def test_auth_type_not_defined(self): driver.get_clients(self.TEST_CONTEXT) self.load_auth.assert_not_called() self.password.assert_called_with( auth_url=self.TEST_ADMIN_AUTH_URL, password=self.TEST_ADMIN_PASSWORD, tenant_id=self.TEST_ADMIN_TENANT_ID, tenant_name=self.TEST_ADMIN_TENANT_NAME, username=self.TEST_ADMIN_USERNAME) class TestDesignateClientKeystoneV3(testtools.TestCase): """Test case for designate clients """ TEST_URL = 'http://127.0.0.1:9001/v2' TEST_ADMIN_USERNAME = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_PASSWORD = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_USER_DOMAIN_ID = 'Default' TEST_ADMIN_PROJECT_ID = uuidutils.generate_uuid(dashed=False) TEST_ADMIN_PROJECT_DOMAIN_ID = 'Default' TEST_ADMIN_AUTH_URL = 'http://127.0.0.1:35357/v3' TEST_CA_CERT = uuidutils.generate_uuid(dashed=False) TEST_CONTEXT = mock.Mock() TEST_CONTEXT.auth_token = uuidutils.generate_uuid(dashed=False) def setUp(self): super(TestDesignateClientKeystoneV3, self).setUp() # Register the Password auth plugin options, # so we can use CONF.set_override password_option = loading.get_auth_plugin_conf_options('password') cfg.CONF.register_opts(password_option, group='designate') self.addCleanup( cfg.CONF.unregister_opts, password_option, group='designate') cfg.CONF.set_override('url', self.TEST_URL, group='designate') cfg.CONF.set_override('auth_type', 'password', group='designate') cfg.CONF.set_override('username', self.TEST_ADMIN_USERNAME, group='designate') cfg.CONF.set_override('password', self.TEST_ADMIN_PASSWORD, group='designate') cfg.CONF.set_override('user_domain_id', self.TEST_ADMIN_USER_DOMAIN_ID, group='designate') cfg.CONF.set_override('project_domain_id', self.TEST_ADMIN_PROJECT_DOMAIN_ID, group='designate') cfg.CONF.set_override('auth_url', self.TEST_ADMIN_AUTH_URL, group='designate') # enforce session recalculation mock.patch.object(driver, '_SESSION', new=None).start() self.driver_session = ( mock.patch.object(session, 'Session').start()) self.load_auth = ( mock.patch.object(driver.loading, 'load_auth_from_conf_options').start()) self.password = ( mock.patch.object(driver.password, 'Password').start()) def test_insecure_client(self): cfg.CONF.set_override('insecure', True, group='designate') driver.get_clients(self.TEST_CONTEXT) args, kwargs = self.driver_session.call_args self.assertIn('verify', kwargs) self.assertFalse(kwargs['verify'], False) def test_secure_client(self): cfg.CONF.set_override('insecure', False, group='designate') cfg.CONF.set_override('cafile', self.TEST_CA_CERT, group='designate') driver.get_clients(self.TEST_CONTEXT) args, kwargs = self.driver_session.call_args self.assertIn('verify', kwargs) self.assertEqual(kwargs['verify'], self.TEST_CA_CERT) def test_auth_type_password(self): driver.get_clients(self.TEST_CONTEXT) self.load_auth.assert_called_with(cfg.CONF, 'designate') self.password.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_port_security.py0000644000175000017500000000311600000000000031655 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import port_security as psec from neutron.plugins.ml2.extensions import port_security from neutron.tests.unit.plugins.ml2 import test_plugin class TestML2ExtensionPortSecurity(test_plugin.Ml2PluginV2TestCase): def _test_extend_dict_no_port_security(self, func): """Test extend_*_dict won't crash if port_security item is None.""" for db_data in ({'port_security': None, 'name': 'net1'}, {}): response_data = {} session = mock.Mock() driver = port_security.PortSecurityExtensionDriver() getattr(driver, func)(session, db_data, response_data) self.assertTrue(response_data[psec.PORTSECURITY]) def test_extend_port_dict_no_port_security(self): self._test_extend_dict_no_port_security('extend_port_dict') def test_extend_network_dict_no_port_security(self): self._test_extend_dict_no_port_security('extend_network_dict') ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creat0000644000175000017500000001263400000000000034065 0ustar00coreycorey00000000000000# Copyright (c) 2019 Verizon Media # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.plugins import directory from oslo_config import cfg from neutron.plugins.ml2.extensions import tag_ports_during_bulk_creation from neutron.tests.unit.plugins.ml2 import test_plugin TAGS = [ ['tag-1', 'tag-2', 'tag-3'], ['tag-1', 'tag-2'], ['tag-1', 'tag-3'], [] ] class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['tag_ports_during_bulk_creation'] fmt = 'json' def get_additional_service_plugins(self): p = super(TagPortsDuringBulkCreationTestCase, self).get_additional_service_plugins() p.update({'tag_name': 'tag'}) return p def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(TagPortsDuringBulkCreationTestCase, self).setUp() self.plugin = directory.get_plugin() def test_create_ports_bulk_with_tags(self): num_ports = 3 tenant_id = 'some_tenant' with self.network(tenant_id=tenant_id) as network_to_use: net_id = network_to_use['network']['id'] port = {'port': {'network_id': net_id, 'admin_state_up': True, 'tenant_id': tenant_id}} ports = [copy.deepcopy(port) for x in range(num_ports)] ports_tags_map = {} for port, tags in zip(ports, TAGS): port['port']['tags'] = tags port['port']['name'] = '-'.join(tags) ports_tags_map[port['port']['name']] = tags req_body = {'ports': ports} ports_req = self.new_create_request('ports', req_body) res = ports_req.get_response(self.api) self.assertEqual(201, res.status_int) created_ports = self.deserialize(self.fmt, res) for port in created_ports['ports']: self.assertEqual(ports_tags_map[port['name']], port['tags']) def test_create_ports_bulk_no_tags(self): num_ports = 2 tenant_id = 'some_tenant' with self.network(tenant_id=tenant_id) as network_to_use: net_id = network_to_use['network']['id'] port = {'port': {'name': 'port', 'network_id': net_id, 'admin_state_up': True, 'tenant_id': tenant_id}} ports = [copy.deepcopy(port) for x in range(num_ports)] req_body = {'ports': ports} ports_req = self.new_create_request('ports', req_body) res = ports_req.get_response(self.api) self.assertEqual(201, res.status_int) created_ports = self.deserialize(self.fmt, res) for port in created_ports['ports']: self.assertFalse(port['tags']) def test_create_port_with_tags(self): tenant_id = 'some_tenant' with self.network(tenant_id=tenant_id) as network_to_use: net_id = network_to_use['network']['id'] req_body = {'port': {'name': 'port', 'network_id': net_id, 'admin_state_up': True, 'tenant_id': tenant_id, 'tags': TAGS[0]}} port_req = self.new_create_request('ports', req_body) res = port_req.get_response(self.api) self.assertEqual(201, res.status_int) created_port = self.deserialize(self.fmt, res) self.assertEqual(TAGS[0], created_port['port']['tags']) def test_type_args_passed_to_extension(self): num_ports = 2 tenant_id = 'some_tenant' extension = tag_ports_during_bulk_creation with mock.patch.object( extension.TagPortsDuringBulkCreationExtensionDriver, 'process_create_port') as patched_method: with self.network(tenant_id=tenant_id) as network_to_use: net_id = network_to_use['network']['id'] port = {'port': {'network_id': net_id, 'admin_state_up': True, 'tenant_id': tenant_id}} ports = [copy.deepcopy(port) for x in range(num_ports)] ports[0]['port']['tags'] = TAGS[0] ports[1]['port']['tags'] = TAGS[1] req_body = {'ports': ports} ports_req = self.new_create_request('ports', req_body) res = ports_req.get_response(self.api) self.assertEqual(201, res.status_int) self.assertIsInstance(patched_method.call_args[0][1], dict) self.assertIsInstance(patched_method.call_args[0][2], dict) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_uplink_status_propagation.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/extensions/test_uplink_status_propagation.p0000644000175000017500000000555000000000000034065 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import port as port_def from neutron_lib.plugins import directory from oslo_config import cfg from neutron.plugins.ml2.extensions import uplink_status_propagation as usp from neutron.tests.unit.plugins.ml2 import test_plugin class UplinkStatusPropagationML2ExtDriverTestCase( test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['uplink_status_propagation'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(UplinkStatusPropagationML2ExtDriverTestCase, self).setUp() self.plugin = directory.get_plugin() def test_extend_port_dict_no_project_default(self): for db_data in ({'propagate_uplink_status': None}, {}): response_data = {} session = mock.Mock() driver = usp.UplinkStatusPropagationExtensionDriver() driver.extend_port_dict(session, db_data, response_data) self.assertFalse(response_data['propagate_uplink_status']) def test_show_port_has_propagate_uplink_status(self): with self.port(propagate_uplink_status=True) as port: req = self.new_show_request(port_def.COLLECTION_NAME, port['port']['id'], self.fmt) n = self.deserialize(self.fmt, req.get_response(self.api)) self.assertTrue(n['port']['propagate_uplink_status']) def test_port_create_propagate_uplink_status(self): with self.network() as n: args = {'port': {'name': 'test', 'network_id': n['network']['id'], 'tenant_id': n['network']['id'], 'device_id': '', 'device_owner': '', 'fixed_ips': '', 'propagate_uplink_status': True, 'admin_state_up': True, 'status': 'ACTIVE'}} port = None try: port = self.plugin.create_port(self.context, args) finally: if port: self.plugin.delete_port(self.context, port['id']) self.assertTrue(port['propagate_uplink_status']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_agent_scheduler.py0000644000175000017500000000252700000000000027704 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.unit.db import test_agentschedulers_db from neutron.tests.unit.plugins.ml2 import test_plugin class Ml2AgentSchedulerTestCase( test_agentschedulers_db.OvsAgentSchedulerTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2L3AgentNotifierTestCase( test_agentschedulers_db.OvsL3AgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME l3_plugin = ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin') class Ml2DhcpAgentNotifierTestCase( test_agentschedulers_db.OvsDhcpAgentNotifierTestCase): plugin_str = test_plugin.PLUGIN_NAME ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_db.py0000644000175000017500000004617200000000000025141 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation, all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import warnings import mock import netaddr from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.plugins.ml2 import api from oslo_utils import uuidutils from sqlalchemy.orm import exc from sqlalchemy.orm import query from neutron.db import db_base_plugin_v2 from neutron.db.models import l3 as l3_models from neutron.db import segments_db from neutron.objects import network as network_obj from neutron.objects import ports as port_obj from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import models from neutron.tests.unit import testlib_api PLUGIN_NAME = 'ml2' class Ml2DBTestCase(testlib_api.SqlTestCase): def setUp(self): super(Ml2DBTestCase, self).setUp() self.ctx = context.get_admin_context() self.setup_coreplugin(PLUGIN_NAME) def _setup_neutron_network(self, network_id): network_obj.Network(self.ctx, id=network_id).create() def _setup_neutron_port(self, network_id, port_id): mac_address = db_base_plugin_v2.NeutronDbPluginV2._generate_macs()[0] port = port_obj.Port(self.ctx, id=port_id, network_id=network_id, mac_address=netaddr.EUI(mac_address), admin_state_up=True, status='DOWN', device_id='', device_owner='') port.create() return port def _setup_neutron_portbinding(self, port_id, vif_type, host, status=constants.ACTIVE): with db_api.CONTEXT_WRITER.using(self.ctx): self.ctx.session.add(models.PortBinding(port_id=port_id, vif_type=vif_type, host=host, status=status)) @staticmethod def _sort_segments(segments): return sorted(segments, key=lambda d: d['segmentation_id']) def _create_segments(self, segments, is_seg_dynamic=False, network_id=uuidutils.generate_uuid()): self._setup_neutron_network(network_id) for segment in segments: segments_db.add_network_segment( self.ctx, network_id, segment, is_dynamic=is_seg_dynamic) segment['network_id'] = network_id net_segments = segments_db.get_network_segments( self.ctx, network_id, filter_dynamic=is_seg_dynamic) net_segments = self._sort_segments(net_segments) for segment_index, segment in enumerate(segments): self.assertEqual(segment, net_segments[segment_index]) return net_segments def test_network_segments_for_provider_network(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} self._create_segments([segment]) def test_network_segments_is_dynamic_true(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} self._create_segments([segment], is_seg_dynamic=True) def test_network_segments_for_multiprovider_network(self): segments = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 2}] self._create_segments(segments) def test_get_networks_segments(self): net_id1 = uuidutils.generate_uuid() net_id2 = uuidutils.generate_uuid() segments1 = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 2}] segments2 = [{api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 3}, {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 4}] net1segs = self._create_segments(segments1, network_id=net_id1) net2segs = self._create_segments(segments2, network_id=net_id2) segs = segments_db.get_networks_segments( self.ctx, [net_id1, net_id2]) self.assertEqual(net1segs, self._sort_segments(segs[net_id1])) self.assertEqual(net2segs, self._sort_segments(segs[net_id2])) def test_get_networks_segments_no_segments(self): net_id1 = uuidutils.generate_uuid() net_id2 = uuidutils.generate_uuid() self._create_segments([], network_id=net_id1) self._create_segments([], network_id=net_id2) segs = segments_db.get_networks_segments( self.ctx, [net_id1, net_id2]) self.assertEqual([], segs[net_id1]) self.assertEqual([], segs[net_id2]) def test_get_segment_by_id(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} net_segment = self._create_segments([segment])[0] segment_uuid = net_segment[api.ID] net_segment = segments_db.get_segment_by_id(self.ctx, segment_uuid) self.assertEqual(segment, net_segment) def test_get_segment_by_id_result_not_found(self): segment_uuid = uuidutils.generate_uuid() net_segment = segments_db.get_segment_by_id(self.ctx, segment_uuid) self.assertIsNone(net_segment) def test_delete_network_segment(self): segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} net_segment = self._create_segments([segment])[0] segment_uuid = net_segment[api.ID] segments_db.delete_network_segment(self.ctx, segment_uuid) # Get segment and verify its empty net_segment = segments_db.get_segment_by_id(self.ctx, segment_uuid) self.assertIsNone(net_segment) def test_get_dynamic_segment(self): net_id = uuidutils.generate_uuid() segment1 = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'physnet1', api.SEGMENTATION_ID: 1} self._create_segments( [segment1], is_seg_dynamic=True, network_id=net_id) segs1 = segments_db.get_dynamic_segment( self.ctx, net_id) self.assertEqual('vlan', segs1[api.NETWORK_TYPE]) self.assertEqual('physnet1', segs1[api.PHYSICAL_NETWORK]) self.assertEqual(1, segs1[api.SEGMENTATION_ID]) segs2 = segments_db.get_dynamic_segment( self.ctx, net_id, physical_network='physnet1') self.assertEqual('vlan', segs2[api.NETWORK_TYPE]) self.assertEqual('physnet1', segs2[api.PHYSICAL_NETWORK]) self.assertEqual(1, segs2[api.SEGMENTATION_ID]) segs3 = segments_db.get_dynamic_segment( self.ctx, net_id, segmentation_id=1) self.assertEqual('vlan', segs3[api.NETWORK_TYPE]) self.assertEqual('physnet1', segs3[api.PHYSICAL_NETWORK]) self.assertEqual(1, segs3[api.SEGMENTATION_ID]) def test_add_port_binding(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) port = ml2_db.add_port_binding(self.ctx, port_id) self.assertEqual(port_id, port.port_id) self.assertEqual(portbindings.VIF_TYPE_UNBOUND, port.vif_type) def test_get_port_binding_host(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() host = 'fake_host' other_host = 'other_fake_host' vif_type = portbindings.VIF_TYPE_UNBOUND self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) self._setup_neutron_portbinding(port_id, vif_type, host) self._setup_neutron_portbinding(port_id, vif_type, other_host, status=constants.INACTIVE) port_host = ml2_db.get_port_binding_host(self.ctx, port_id) self.assertEqual(host, port_host) def test_get_port_binding_host_multiple_results_found(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() port_id_one = uuidutils.generate_uuid() port_id_two = uuidutils.generate_uuid() # NOTE(manjeets) to check startswith testcase we # need port ids with same prefix port_id_one = port_id[:8] + port_id_one[8:] port_id_two = port_id[:8] + port_id_two[8:] host = 'fake_host' vif_type = portbindings.VIF_TYPE_UNBOUND self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id_one) self._setup_neutron_portbinding(port_id_one, vif_type, host) self._setup_neutron_port(network_id, port_id_two) self._setup_neutron_portbinding(port_id_two, vif_type, host) port_host = ml2_db.get_port_binding_host(self.ctx, port_id[:8]) self.assertIsNone(port_host) def test_get_port_binding_host_result_not_found(self): port_id = uuidutils.generate_uuid() port_host = ml2_db.get_port_binding_host(self.ctx, port_id) self.assertIsNone(port_host) def test_get_port(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id) self._setup_neutron_port(network_id, port_id) port = ml2_db.get_port(self.ctx, port_id) self.assertEqual(port_id, port.id) def test_get_port_multiple_results_found(self): with mock.patch( 'sqlalchemy.orm.query.Query.one', side_effect=exc.MultipleResultsFound): port = ml2_db.get_port(self.ctx, 'unused') self.assertIsNone(port) def test_get_port_result_not_found(self): port_id = uuidutils.generate_uuid() port = ml2_db.get_port(self.ctx, port_id) self.assertIsNone(port) def test_get_port_from_device_mac(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id) port = self._setup_neutron_port(network_id, port_id) observed_port = ml2_db.get_port_from_device_mac(self.ctx, port['mac_address']) self.assertEqual(port_id, observed_port.id) def test_generating_multiple_mac_addresses(self): mac_regex = "^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" macs = db_base_plugin_v2.NeutronDbPluginV2._generate_macs() self.assertEqual(1, len(macs)) self.assertIsNotNone(re.search(mac_regex, macs[0])) macs = db_base_plugin_v2.NeutronDbPluginV2._generate_macs(5) self.assertEqual(5, len(macs)) for mac in macs: self.assertIsNotNone(re.search(mac_regex, mac)) class Ml2DvrDBTestCase(testlib_api.SqlTestCase): def setUp(self): super(Ml2DvrDBTestCase, self).setUp() self.ctx = context.get_admin_context() self.setup_coreplugin(PLUGIN_NAME) def _setup_neutron_network(self, network_id, port_ids): with db_api.CONTEXT_WRITER.using(self.ctx): network_obj.Network(self.ctx, id=network_id).create() ports = [] for port_id in port_ids: mac_address = (db_base_plugin_v2.NeutronDbPluginV2. _generate_macs()[0]) port = port_obj.Port(self.ctx, id=port_id, network_id=network_id, mac_address=netaddr.EUI(mac_address), admin_state_up=True, status='ACTIVE', device_id='', device_owner='') port.create() ports.append(port) return ports def _setup_neutron_router(self): with self.ctx.session.begin(subtransactions=True): router = l3_models.Router() self.ctx.session.add(router) return router def _setup_distributed_binding(self, network_id, port_id, router_id, host_id): with db_api.CONTEXT_WRITER.using(self.ctx): record = models.DistributedPortBinding( port_id=port_id, host=host_id, router_id=router_id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, status='DOWN') self.ctx.session.add(record) return record def test_ensure_distributed_port_binding_deals_with_db_duplicate(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() router_id = 'foo_router_id' host_id = 'foo_host_id' self._setup_neutron_network(network_id, [port_id]) self._setup_distributed_binding(network_id, port_id, router_id, host_id) with mock.patch.object(query.Query, 'first') as query_first: query_first.return_value = [] with mock.patch.object(ml2_db.LOG, 'debug') as log_trace: binding = ml2_db.ensure_distributed_port_binding( self.ctx, port_id, host_id, router_id) self.assertTrue(query_first.called) self.assertTrue(log_trace.called) self.assertEqual(port_id, binding.port_id) def test_ensure_distributed_port_binding(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() ml2_db.ensure_distributed_port_binding( self.ctx, port_id, 'foo_host', router.id) expected = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=port_id).one()) self.assertEqual(port_id, expected.port_id) def test_ensure_distributed_port_binding_multiple_bindings(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() ml2_db.ensure_distributed_port_binding( self.ctx, port_id, 'foo_host_1', router.id) ml2_db.ensure_distributed_port_binding( self.ctx, port_id, 'foo_host_2', router.id) bindings = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=port_id).all()) self.assertEqual(2, len(bindings)) def test_delete_distributed_port_binding_if_stale(self): network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() self._setup_neutron_network(network_id, [port_id]) binding = self._setup_distributed_binding( network_id, port_id, None, 'foo_host_id') ml2_db.delete_distributed_port_binding_if_stale(self.ctx, binding) count = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=binding.port_id).count()) self.assertFalse(count) def test_get_distributed_port_binding_by_host_not_found(self): port = ml2_db.get_distributed_port_binding_by_host( self.ctx, 'foo_port_id', 'foo_host_id') self.assertIsNone(port) def test_get_distributed_port_bindings_not_found(self): port = ml2_db.get_distributed_port_bindings(self.ctx, 'foo_port_id') self.assertFalse(len(port)) def test_get_distributed_port_bindings(self): network_id = uuidutils.generate_uuid() port_id_1 = uuidutils.generate_uuid() port_id_2 = uuidutils.generate_uuid() self._setup_neutron_network(network_id, [port_id_1, port_id_2]) router = self._setup_neutron_router() self._setup_distributed_binding( network_id, port_id_1, router.id, 'foo_host_id_1') self._setup_distributed_binding( network_id, port_id_1, router.id, 'foo_host_id_2') ports = ml2_db.get_distributed_port_bindings(self.ctx, port_id_1) self.assertEqual(2, len(ports)) def test_distributed_port_binding_deleted_by_port_deletion(self): network_id = uuidutils.generate_uuid() network_obj.Network(self.ctx, id=network_id).create() device_owner = constants.DEVICE_OWNER_DVR_INTERFACE port = port_obj.Port( self.ctx, id=uuidutils.generate_uuid(), network_id=network_id, mac_address=netaddr.EUI('00-11-22-33-44-55'), admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id='device_id', device_owner=device_owner) port.create() port_obj.DistributedPortBinding( self.ctx, port_id=port.id, host='host', vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, router_id='router_id', status=constants.PORT_STATUS_DOWN).create() port_obj.DistributedPortBinding( self.ctx, port_id=port.id, host='another-host', vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL, router_id='router_id', status=constants.PORT_STATUS_DOWN).create() with warnings.catch_warnings(record=True) as warning_list: port.delete() self.assertEqual( [], warning_list, 'Warnings: %s' % ';'.join([str(w) for w in warning_list])) ports = ml2_db.get_distributed_port_bindings(self.ctx, port.id) self.assertEqual(0, len(ports)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_driver_context.py0000644000175000017500000001026400000000000027604 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2 import models from neutron.tests import base class TestPortContext(base.BaseTestCase): # REVISIT(rkukura): These was originally for DvrPortContext tests, # but DvrPortContext functionality has been folded into the # regular PortContext class. Tests for non-DVR-specific # functionality are needed here as well. def test_host(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.host = 'foohost' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('foohost', ctx.host) def test_host_super(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, portbindings.HOST_ID: 'host'} binding.host = 'foohost' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('host', ctx.host) def test_status(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.status = 'foostatus' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('foostatus', ctx.status) def test_status_super(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, 'status': 'status'} binding.status = 'foostatus' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('status', ctx.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py0000644000175000017500000000543000000000000030200 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import port_security as psec from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from neutron.tests.unit.extensions import test_portsecurity as test_psec from neutron.tests.unit.plugins.ml2 import test_plugin class PSExtDriverTestCase(test_plugin.Ml2PluginV2TestCase, test_psec.TestPortSecurity): _extension_drivers = ['port_security'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(PSExtDriverTestCase, self).setUp() def test_create_net_port_security_default(self): _core_plugin = directory.get_plugin() admin_ctx = context.get_admin_context() args = {'network': {'name': 'test', 'tenant_id': '', 'shared': False, 'admin_state_up': True, 'status': 'ACTIVE'}} network = None try: network = _core_plugin.create_network(admin_ctx, args) _value = network[psec.PORTSECURITY] finally: if network: _core_plugin.delete_network(admin_ctx, network['id']) self.assertEqual(psec.DEFAULT_PORT_SECURITY, _value) def test_create_port_with_secgroup_none_and_port_security_false(self): if self._skip_security_group: self.skipTest("Plugin does not support security groups") with self.network() as net: with self.subnet(network=net): res = self._create_port('json', net['network']['id'], arg_list=('security_groups', 'port_security_enabled'), security_groups=[], port_security_enabled=False) self.assertEqual(201, res.status_int) port = self.deserialize('json', res) self.assertFalse(port['port'][psec.PORTSECURITY]) self.assertEqual([], port['port']['security_groups']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py0000644000175000017500000003121300000000000030762 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests.unit.plugins.ml2.drivers import ext_test from neutron.tests.unit.plugins.ml2 import test_plugin class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['test'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(ExtensionDriverTestCase, self).setUp() self._plugin = directory.get_plugin() self._ctxt = context.get_admin_context() def _verify_network_create(self, code, exc_reason): tenant_id = uuidutils.generate_uuid() data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(code, res.status_int) network = self.deserialize(self.fmt, res) if exc_reason: self.assertEqual(exc_reason, network['NeutronError']['type']) return (network, tenant_id) def _verify_network_update(self, network, code, exc_reason): net_id = network['network']['id'] new_name = 'a_brand_new_name' data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id) res = req.get_response(self.api) self.assertEqual(code, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual(exc_reason, error['NeutronError']['type']) def test_faulty_process_create(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_create_network', side_effect=TypeError): net, tenant_id = self._verify_network_create(500, 'HTTPInternalServerError') # Verify the operation is rolled back query_params = "tenant_id=%s" % tenant_id nets = self._list('networks', query_params=query_params) self.assertFalse(nets['networks']) def test_faulty_process_update(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_network', side_effect=TypeError): network, tid = self._verify_network_create(201, None) self._verify_network_update(network, 500, 'HTTPInternalServerError') def test_faulty_extend_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'extend_network_dict', side_effect=[None, None, TypeError]): network, tid = self._verify_network_create(201, None) self._verify_network_update(network, 400, 'ExtensionDriverError') def test_network_attr(self): with self.network() as network: # Test create network ent = network['network'].get('network_extension') self.assertIsNotNone(ent) # Test list networks res = self._list('networks') val = res['networks'][0].get('network_extension') self.assertEqual('default_network_extension', val) # Test network update data = {'network': {'network_extension': 'Test_Network_Extension_Update'}} res = self._update('networks', network['network']['id'], data) val = res['network'].get('network_extension') self.assertEqual('Test_Network_Extension_Update', val) def test_subnet_attr(self): with self.subnet() as subnet: # Test create subnet ent = subnet['subnet'].get('subnet_extension') self.assertIsNotNone(ent) # Test list subnets res = self._list('subnets') val = res['subnets'][0].get('subnet_extension') self.assertEqual('default_subnet_extension', val) # Test subnet update data = {'subnet': {'subnet_extension': 'Test_Subnet_Extension_Update'}} res = self._update('subnets', subnet['subnet']['id'], data) val = res['subnet'].get('subnet_extension') self.assertEqual('Test_Subnet_Extension_Update', val) def test_port_attr(self): with self.port() as port: # Test create port ent = port['port'].get('port_extension') self.assertIsNotNone(ent) # Test list ports res = self._list('ports') val = res['ports'][0].get('port_extension') self.assertEqual('default_port_extension', val) # Test port update data = {'port': {'port_extension': 'Test_Port_Extension_Update'}} res = self._update('ports', port['port']['id'], data) val = res['port'].get('port_extension') self.assertEqual('Test_Port_Extension_Update', val) def test_extend_network_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_network') as ext_update_net,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_network_dict') as ext_net_dict,\ self.network() as network: net_id = network['network']['id'] net_data = {'network': {'id': net_id}} self._plugin.update_network(self._ctxt, net_id, net_data) self.assertTrue(ext_update_net.called) self.assertTrue(ext_net_dict.called) def test_extend_subnet_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_subnet') as ext_update_subnet,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_subnet_dict') as ext_subnet_dict,\ self.subnet() as subnet: subnet_id = subnet['subnet']['id'] subnet_data = {'subnet': {'id': subnet_id}} self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data) self.assertTrue(ext_update_subnet.called) self.assertTrue(ext_subnet_dict.called) def test_extend_port_dict(self): with mock.patch.object(ext_test.TestExtensionDriver, 'process_update_port') as ext_update_port,\ mock.patch.object(ext_test.TestExtensionDriver, 'extend_port_dict') as ext_port_dict,\ self.port() as port: port_id = port['port']['id'] port_data = {'port': {'id': port_id}} self._plugin.update_port(self._ctxt, port_id, port_data) self.assertTrue(ext_update_port.called) self.assertTrue(ext_port_dict.called) class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['testdb'] def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(DBExtensionDriverTestCase, self).setUp() self._plugin = directory.get_plugin() self._ctxt = context.get_admin_context() def test_network_attr(self): with self.network() as network: # Test create with default value. net_id = network['network']['id'] val = network['network']['network_extension'] self.assertEqual("", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("", val) # Test list. res = self._list('networks') val = res['networks'][0]['network_extension'] self.assertEqual("", val) # Test create with explicit value. res = self._create_network(self.fmt, 'test-network', True, arg_list=('network_extension', ), network_extension="abc") network = self.deserialize(self.fmt, res) net_id = network['network']['id'] val = network['network']['network_extension'] self.assertEqual("abc", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("abc", val) # Test update. data = {'network': {'network_extension': "def"}} res = self._update('networks', net_id, data) val = res['network']['network_extension'] self.assertEqual("def", val) res = self._show('networks', net_id) val = res['network']['network_extension'] self.assertEqual("def", val) def test_subnet_attr(self): with self.subnet() as subnet: # Test create with default value. net_id = subnet['subnet']['id'] val = subnet['subnet']['subnet_extension'] self.assertEqual("", val) res = self._show('subnets', net_id) val = res['subnet']['subnet_extension'] self.assertEqual("", val) # Test list. res = self._list('subnets') val = res['subnets'][0]['subnet_extension'] self.assertEqual("", val) with self.network() as network: # Test create with explicit value. data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.1.0.0/24', 'ip_version': constants.IP_VERSION_4, 'tenant_id': self._tenant_id, 'subnet_extension': 'abc'}} req = self.new_create_request('subnets', data, self.fmt) res = req.get_response(self.api) subnet = self.deserialize(self.fmt, res) subnet_id = subnet['subnet']['id'] val = subnet['subnet']['subnet_extension'] self.assertEqual("abc", val) res = self._show('subnets', subnet_id) val = res['subnet']['subnet_extension'] self.assertEqual("abc", val) # Test update. data = {'subnet': {'subnet_extension': "def"}} res = self._update('subnets', subnet_id, data) val = res['subnet']['subnet_extension'] self.assertEqual("def", val) res = self._show('subnets', subnet_id) val = res['subnet']['subnet_extension'] self.assertEqual("def", val) def test_port_attr(self): with self.port() as port: # Test create with default value. net_id = port['port']['id'] val = port['port']['port_extension'] self.assertEqual("", val) res = self._show('ports', net_id) val = res['port']['port_extension'] self.assertEqual("", val) # Test list. res = self._list('ports') val = res['ports'][0]['port_extension'] self.assertEqual("", val) with self.network() as network: # Test create with explicit value. res = self._create_port(self.fmt, network['network']['id'], arg_list=('port_extension', ), port_extension="abc") port = self.deserialize(self.fmt, res) port_id = port['port']['id'] val = port['port']['port_extension'] self.assertEqual("abc", val) res = self._show('ports', port_id) val = res['port']['port_extension'] self.assertEqual("abc", val) # Test update. data = {'port': {'port_extension': "def"}} res = self._update('ports', port_id, data) val = res['port']['port_extension'] self.assertEqual("def", val) res = self._show('ports', port_id) val = res['port']['port_extension'] self.assertEqual("def", val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_managers.py0000644000175000017500000002702200000000000026342 0ustar00coreycorey00000000000000# Copyright (c) 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import provider_net as provider from neutron_lib import exceptions as exc from neutron_lib.exceptions import placement as place_exc from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron.db import segments_db from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import managers from neutron.tests import base from neutron.tests.unit.plugins.ml2._test_mech_agent import FakePortContext from neutron.tests.unit.plugins.ml2.drivers import mech_fake_agent from neutron.tests.unit.plugins.ml2.drivers import mechanism_test class TestManagers(base.BaseTestCase): def setUp(self): super(TestManagers, self).setUp() self.segment_id = "11111111-2222-3333-4444-555555555555" self.segments_to_bind = [{api.ID: self.segment_id, 'network_type': 'vlan', 'physical_network': 'public', api.SEGMENTATION_ID: 49}] original_port = {'fixed_ips': [{'subnet_id': mock.ANY, 'ip_address': mock.ANY}]} self.context = FakePortContext(None, None, self.segments_to_bind, original=original_port) self.context._binding = mock.Mock() self.context._binding_levels = [] self.context._new_bound_segment = self.segment_id self.context._next_segments_to_bind = None def test__check_driver_to_bind(self): cfg.CONF.set_override('mechanism_drivers', ['fake_agent'], group='ml2') manager = managers.MechanismManager() with mock.patch.object(mech_fake_agent.FakeAgentMechanismDriver, 'bind_port') as bind_port: manager._bind_port_level(self.context, 0, self.segments_to_bind) self.assertEqual(1, bind_port.call_count) def test__check_driver_to_bind2(self): cfg.CONF.set_override('mechanism_drivers', ['fake_agent'], group='ml2') manager = managers.MechanismManager() self.context._binding_levels = [mock.Mock(port_id="port_id", level=0, driver='fake_agent', segment_id=self.segment_id)] with mock.patch.object(mech_fake_agent.FakeAgentMechanismDriver, 'bind_port') as bind_port: manager._bind_port_level(self.context, 0, self.segments_to_bind) self.assertEqual(0, bind_port.call_count) def _check_drivers_connectivity(self, agents): cfg.CONF.set_override('mechanism_drivers', agents, group='ml2') manager = managers.MechanismManager() return (manager.ordered_mech_drivers, manager._check_drivers_connectivity( manager.ordered_mech_drivers, self.context)) def test__check_drivers_connectivity(self): self.assertEqual(*self._check_drivers_connectivity(['fake_agent'])) def test__check_drivers_connectivity_ip_less_port(self): self.context._original['fixed_ips'] = [] self.assertEqual(*self._check_drivers_connectivity(['fake_agent'])) def test__check_drivers_connectivity_ip_less_port_l3_only_driver(self): self.context._original['fixed_ips'] = [] self.assertEqual( [], self._check_drivers_connectivity(['fake_agent_l3'])[1]) def test__infer_driver_from_allocation_positive(self): cfg.CONF.set_override( 'mechanism_drivers', ['fake_agent'], group='ml2') manager = managers.MechanismManager() with mock.patch.object(mech_fake_agent.FakeAgentMechanismDriver, 'responsible_for_ports_allocation', return_value=True): responsible_driver = manager._infer_driver_from_allocation( FakePortContext( None, None, self.segments_to_bind, profile={'allocation': 'fake_resource_provider'})) self.assertEqual(responsible_driver.name, 'fake_agent') def test__infer_driver_from_allocation_negative(self): cfg.CONF.set_override( 'mechanism_drivers', ['fake_agent'], group='ml2') manager = managers.MechanismManager() with mock.patch.object(mech_fake_agent.FakeAgentMechanismDriver, 'responsible_for_ports_allocation', return_value=False): self.assertRaises( place_exc.UnknownResourceProvider, manager._infer_driver_from_allocation, FakePortContext( None, None, self.segments_to_bind, profile={'allocation': 'fake_resource_provider'}) ) def test__infer_driver_from_allocation_ambiguous(self): cfg.CONF.set_override( 'mechanism_drivers', ['fake_agent', 'another_fake_agent'], group='ml2') manager = managers.MechanismManager() with mock.patch.object(mech_fake_agent.FakeAgentMechanismDriver, 'responsible_for_ports_allocation', return_value=True), \ mock.patch.object(mech_fake_agent.AnotherFakeAgentMechanismDriver, 'responsible_for_ports_allocation', return_value=True): self.assertRaises( place_exc.AmbiguousResponsibilityForResourceProvider, manager._infer_driver_from_allocation, FakePortContext( None, None, self.segments_to_bind, profile={'allocation': 'fake_resource_provider'}) ) @mock.patch.object(managers.LOG, 'critical') @mock.patch.object(managers.MechanismManager, '_driver_not_loaded') def test__driver_not_found(self, mock_not_loaded, mock_log): cfg.CONF.set_override('mechanism_drivers', ['invalidmech'], group='ml2') self.assertRaises(SystemExit, managers.MechanismManager) mock_not_loaded.assert_not_called() mock_log.assert_called_once_with("The following mechanism drivers " "were not found: %s" % set(['invalidmech'])) @mock.patch.object(managers.LOG, 'critical') @mock.patch.object(managers.MechanismManager, '_driver_not_found') def test__driver_not_loaded(self, mock_not_found, mock_log): cfg.CONF.set_override('mechanism_drivers', ['faulty_agent'], group='ml2') self.assertRaises(SystemExit, managers.MechanismManager) mock_log.assert_called_once_with(u"The '%(entrypoint)s' entrypoint " "could not be loaded for the " "following reason: '%(reason)s'.", {'entrypoint': mock.ANY, 'reason': mock.ANY}) class TestMechManager(base.BaseTestCase): def setUp(self): cfg.CONF.set_override('mechanism_drivers', ['test'], group='ml2') super(TestMechManager, self).setUp() self._manager = managers.MechanismManager() def _check_precommit(self, resource, operation): meth_name = "%s_%s_precommit" % (operation, resource) method = getattr(self._manager, meth_name) fake_ctxt = mock.Mock() fake_ctxt.current = {} with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, side_effect=db_exc.DBDeadlock()): self.assertRaises(db_exc.DBDeadlock, method, fake_ctxt) with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, side_effect=RuntimeError()): self.assertRaises(ml2_exc.MechanismDriverError, method, fake_ctxt) def _check_resource(self, resource): self._check_precommit(resource, 'create') self._check_precommit(resource, 'update') self._check_precommit(resource, 'delete') def test_network_precommit(self): self._check_resource('network') def test_subnet_precommit(self): self._check_resource('subnet') def test_port_precommit(self): self._check_resource('port') class TypeManagerTestCase(base.BaseTestCase): def setUp(self): super(TypeManagerTestCase, self).setUp() self.type_manager = managers.TypeManager() self.ctx = mock.Mock() self.network = {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid()} def test_update_network_segment_no_vlan_no_segmentation_id(self): net_data = {} segment = {api.NETWORK_TYPE: 'vlan'} self.assertRaises( exc.InvalidInput, self.type_manager.update_network_segment, self.ctx, self.network, net_data, segment) net_data = {provider.SEGMENTATION_ID: 1000} segment = {api.NETWORK_TYPE: 'no_vlan'} self.assertRaises( exc.InvalidInput, self.type_manager.update_network_segment, self.ctx, self.network, net_data, segment) def test_update_network_segment(self): segmentation_id = 1000 net_data = {provider.SEGMENTATION_ID: segmentation_id} segment = {'id': uuidutils.generate_uuid(), api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'default_network'} new_segment = {api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'default_network', api.SEGMENTATION_ID: segmentation_id} with mock.patch.object(self.type_manager, 'validate_provider_segment') as mock_validate, \ mock.patch.object(self.type_manager, 'reserve_provider_segment') as mock_reserve,\ mock.patch.object(self.type_manager, 'release_network_segment') as mock_release, \ mock.patch.object(segments_db, 'update_network_segment') as \ mock_update_network_segment: self.type_manager.update_network_segment(self.ctx, self.network, net_data, segment) mock_validate.assert_called_once_with(new_segment) mock_reserve.assert_called_once_with( self.ctx, new_segment, filters={'project_id': self.network['project_id']}) mock_update_network_segment.assert_called_once_with( self.ctx, segment['id'], segmentation_id) mock_release.assert_called_once_with(self.ctx, segment) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_ovo_rpc.py0000644000175000017500000001135200000000000026213 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib.plugins import directory from neutron.objects import network from neutron.objects import securitygroup from neutron.objects import subnet from neutron.plugins.ml2 import ovo_rpc from neutron.tests.unit.plugins.ml2 import test_plugin class OVOServerRpcInterfaceTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(OVOServerRpcInterfaceTestCase, self).setUp() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() self.received = [] receive = lambda s, ctx, obs, evt: self.received.append((obs[0], evt)) mock.patch('neutron.api.rpc.handlers.resources_rpc.' 'ResourcesPushRpcApi.push', new=receive).start() # base case blocks the handler self.ovo_push_interface_p.stop() self.plugin.ovo_notifier = ovo_rpc.OVOServerRpcInterface() def _assert_object_received(self, ovotype, oid=None, event=None): self.plugin.ovo_notifier.wait() for obj, evt in self.received: if isinstance(obj, ovotype): if (obj.id == oid or not oid) and (not event or event == evt): return obj self.fail("Could not find OVO %s with ID %s in %s" % (ovotype, oid, self.received)) def test_network_lifecycle(self): with self.network() as n: self._assert_object_received(network.Network, n['network']['id'], 'updated') self.plugin.delete_network(self.ctx, n['network']['id']) self._assert_object_received(network.Network, n['network']['id'], 'deleted') def test_subnet_lifecycle(self): with self.subnet() as s: self._assert_object_received(subnet.Subnet, s['subnet']['id'], 'updated') self.plugin.delete_subnet(self.ctx, s['subnet']['id']) self._assert_object_received(subnet.Subnet, s['subnet']['id'], 'deleted') def test_securitygroup_and_rule_lifecycle(self): # making a network makes a default security group with self.network() as n: sg = self._assert_object_received(securitygroup.SecurityGroup, event='updated') self.assertEqual(sg.tenant_id, n['network']['tenant_id']) sgr = self.plugin.create_security_group_rule(self.ctx, {'security_group_rule': {'security_group_id': sg.id, 'tenant_id': sg.tenant_id, 'port_range_min': None, 'port_range_max': None, 'remote_ip_prefix': None, 'remote_group_id': None, 'protocol': None, 'direction': None, 'ethertype': 'IPv4'}}) self._assert_object_received( securitygroup.SecurityGroupRule, sgr['id'], 'updated') self.plugin.delete_security_group_rule(self.ctx, sgr['id']) self._assert_object_received( securitygroup.SecurityGroupRule, sgr['id'], 'deleted') self.plugin.delete_security_group(self.ctx, sg.id) self._assert_object_received(securitygroup.SecurityGroup, sg.id, 'deleted') def test_transaction_state_error_doesnt_notify(self): # running in a transaction should cause it to skip notification since # fresh reads aren't possible. with self.ctx.session.begin(): self.plugin.create_security_group( self.ctx, {'security_group': {'tenant_id': 'test', 'description': 'desc', 'name': 'test'}}) self.assertEqual([], self.received) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_plugin.py0000644000175000017500000050256500000000000026055 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import weakref import fixtures import mock import netaddr from neutron_lib.agent import constants as agent_consts from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as c_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as exc from neutron_lib import fixture from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api as driver_api from neutron_lib.plugins import utils as p_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils import testtools import webob from neutron._i18n import _ from neutron.common import utils from neutron.db import agents_db from neutron.db import provisioning_blocks from neutron.db import securitygroups_db as sg_db from neutron.db import segments_db from neutron.objects import base as base_obj from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron import quota from neutron.services.revisions import revision_plugin from neutron.services.segments import db as segments_plugin_db from neutron.services.segments import plugin as segments_plugin from neutron.tests.common import helpers from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.db import test_ipam_pluggable_backend as test_ipam from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts from neutron.tests.unit.plugins.ml2.drivers import mechanism_logger as \ mech_logger from neutron.tests.unit.plugins.ml2.drivers import mechanism_test as mech_test cfg.CONF.import_opt('network_vlan_ranges', 'neutron.plugins.ml2.drivers.type_vlan', group='ml2_type_vlan') PLUGIN_NAME = 'ml2' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' HOST = 'fake_host' TEST_ROUTER_ID = 'router_id' # TODO(marun) - Move to somewhere common for reuse class PluginConfFixture(fixtures.Fixture): """Plugin configuration shared across the unit and functional tests.""" def __init__(self, plugin_name, parent_setup=None): super(PluginConfFixture, self).__init__() self.plugin_name = plugin_name self.parent_setup = parent_setup def _setUp(self): if self.parent_setup: self.parent_setup() class Ml2ConfFixture(PluginConfFixture): def __init__(self, parent_setup=None): super(Ml2ConfFixture, self).__init__(PLUGIN_NAME, parent_setup) class Ml2PluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): _mechanism_drivers = ['logger', 'test'] l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatServicePlugin') def get_additional_service_plugins(self): """Subclasses can return a dictionary of service plugins to load.""" return {} def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" service_plugins = {'l3_plugin_name': self.l3_plugin} service_plugins.update(self.get_additional_service_plugins()) # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( super(Ml2PluginV2TestCase, self).setUp, plugin=PLUGIN_NAME, service_plugins=service_plugins, ) self.useFixture(Ml2ConfFixture(parent_setup)) self.port_create_status = 'DOWN' def setUp(self): self.ovo_push_interface_p = mock.patch( 'neutron.plugins.ml2.ovo_rpc.OVOServerRpcInterface') self.ovo_push_interface_p.start() # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') self.physnet = 'physnet1' self.vlan_range = '1:100' self.vlan_range2 = '200:300' self.physnet2 = 'physnet2' self.phys_vrange = ':'.join([self.physnet, self.vlan_range]) self.phys2_vrange = ':'.join([self.physnet2, self.vlan_range2]) cfg.CONF.set_override('network_vlan_ranges', [self.phys_vrange, self.phys2_vrange], group='ml2_type_vlan') self.setup_parent() self.driver = directory.get_plugin() self.context = context.get_admin_context() class TestMl2BulkToggleWithoutBulkless(Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'test'] def test_bulk_enabled_with_bulk_drivers(self): self.assertFalse(self._skip_native_bulk) class TestMl2BasicGet(test_plugin.TestBasicGet, Ml2PluginV2TestCase): pass class TestMl2V2HTTPResponse(test_plugin.TestV2HTTPResponse, Ml2PluginV2TestCase): pass class TestMl2NetworksV2(test_plugin.TestNetworksV2, Ml2PluginV2TestCase): def setUp(self, plugin=None): super(TestMl2NetworksV2, self).setUp() # provider networks self.pnets = [{'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}, {'name': 'net2', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 210, 'tenant_id': 'tenant_one'}, {'name': 'net3', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 220, 'tenant_id': 'tenant_one'} ] # multiprovider networks self.mp_nets = [{'name': 'net4', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 202}], 'tenant_id': 'tenant_one'} ] self.nets = self.mp_nets + self.pnets def test_network_after_create_callback(self): after_create = mock.Mock() registry.subscribe(after_create, resources.NETWORK, events.AFTER_CREATE) with self.network() as n: after_create.assert_called_once_with( resources.NETWORK, events.AFTER_CREATE, mock.ANY, context=mock.ANY, network=mock.ANY) kwargs = after_create.mock_calls[0][2] self.assertEqual(n['network']['id'], kwargs['network']['id']) def test_network_precommit_create_callback(self): precommit_create = mock.Mock() registry.subscribe(precommit_create, resources.NETWORK, events.PRECOMMIT_CREATE) with self.network(): precommit_create.assert_called_once_with( resources.NETWORK, events.PRECOMMIT_CREATE, mock.ANY, context=mock.ANY, network=mock.ANY, request=mock.ANY) def test_network_precommit_create_callback_aborts(self): precommit_create = mock.Mock() registry.subscribe(precommit_create, resources.NETWORK, events.PRECOMMIT_CREATE) precommit_create.side_effect = exc.InvalidInput(error_message='x') data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy', 'admin_state_up': True, 'shared': False}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_network_precommit_update_includes_req(self): precommit_update = mock.Mock() registry.subscribe(precommit_update, resources.NETWORK, events.PRECOMMIT_UPDATE) with self.network() as n: data = {'network': {'name': 'updated'}} req = self.new_update_request('networks', data, n['network']['id']) self.deserialize(self.fmt, req.get_response(self.api)) precommit_update.assert_called_once_with( resources.NETWORK, events.PRECOMMIT_UPDATE, mock.ANY, payload=mock.ANY) self.assertEqual( 'updated', precommit_update.call_args[1]['payload'].desired_state['name']) def test_network_after_update_callback(self): after_update = mock.Mock() registry.subscribe(after_update, resources.NETWORK, events.AFTER_UPDATE) with self.network() as n: data = {'network': {'name': 'updated'}} req = self.new_update_request('networks', data, n['network']['id']) self.deserialize(self.fmt, req.get_response(self.api)) after_update.assert_called_once_with( resources.NETWORK, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, network=mock.ANY, original_network=mock.ANY) kwargs = after_update.mock_calls[0][2] self.assertEqual(n['network']['name'], kwargs['original_network']['name']) self.assertEqual('updated', kwargs['network']['name']) def test_network_after_delete_callback(self): after_delete = mock.Mock() registry.subscribe(after_delete, resources.NETWORK, events.AFTER_DELETE) with self.network() as n: req = self.new_delete_request('networks', n['network']['id']) req.get_response(self.api) after_delete.assert_called_once_with( resources.NETWORK, events.AFTER_DELETE, mock.ANY, context=mock.ANY, network=mock.ANY) kwargs = after_delete.mock_calls[0][2] self.assertEqual(n['network']['id'], kwargs['network']['id']) def test_create_port_obj_bulk(self): cfg.CONF.set_override('base_mac', "12:34:56:00") test_mac = "00-12-34-56-78-90" num_ports = 4 plugin = directory.get_plugin() # Most of the plugin methods are undefined in a weakproxy. This is not # the case most fo the time - Ml2Plugin is typically the plugin here - # but the IPAM classes that inherit this test have a weakproxy here and # thus fail. This avoids that error. if isinstance(plugin, weakref.ProxyTypes): self.skipTest("Bulk port method tests do not apply to IPAM plugin") tenant_id = 'some_tenant' device_owner = "me" ctx = context.Context('', tenant_id) with self.network(tenant_id=tenant_id) as network_to_use: net_id = network_to_use['network']['id'] port = {'port': {'name': 'port', 'network_id': net_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': device_owner, 'tenant_id': tenant_id}} ports = [copy.deepcopy(port) for x in range(num_ports)] ports[1]['port']['mac_address'] = test_mac port_data = plugin.create_port_obj_bulk(ctx, ports) self.assertEqual(num_ports, len(port_data)) result_macs = [] for port in port_data: port_mac = str(port.get('mac_address')) self.assertIsNone(validators.validate_mac_address(port_mac)) result_macs.append(port_mac) for ip_addr in port.get('fixed_ips'): self.assertIsNone(validators.validate_ip_address(ip_addr)) self.assertTrue(test_mac in result_macs) def test_bulk_network_before_and_after_events_outside_of_txn(self): # capture session states during each before and after event before = [] after = [] b_func = lambda *a, **k: before.append(k['context'].session.is_active) a_func = lambda *a, **k: after.append(k['context'].session.is_active) registry.subscribe(b_func, resources.NETWORK, events.BEFORE_CREATE) registry.subscribe(a_func, resources.NETWORK, events.AFTER_CREATE) data = [{'tenant_id': self._tenant_id}] * 4 self._create_bulk_from_list( self.fmt, 'network', data, context=context.get_admin_context()) # ensure events captured self.assertTrue(before) self.assertTrue(after) # ensure session was closed for all self.assertFalse(any(before)) self.assertFalse(any(after)) def _create_and_verify_networks(self, networks): for net_idx, net in enumerate(networks): # create req = self.new_create_request('networks', {'network': net}) # verify network = self.deserialize(self.fmt, req.get_response(self.api))['network'] if mpnet_apidef.SEGMENTS not in net: for k, v in net.items(): self.assertEqual(net[k], network[k]) self.assertNotIn(mpnet_apidef.SEGMENTS, network) else: segments = network[mpnet_apidef.SEGMENTS] expected_segments = net[mpnet_apidef.SEGMENTS] self.assertEqual(len(expected_segments), len(segments)) for expected, actual in zip(expected_segments, segments): self.assertEqual(expected, actual) def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets): params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id) net_req = self.new_list_request('networks', None, params=params_str) networks = self.deserialize(self.fmt, net_req.get_response(self.api)) if num_expected_nets: self.assertIsNotNone(networks) self.assertEqual(num_expected_nets, len(networks['networks'])) else: self.assertIsNone(networks) return networks def test_list_networks_with_segmentation_id(self): self._create_and_verify_networks(self.pnets) # verify we can find the network that we expect lookup_vlan_id = 1 expected_net = [n for n in self.pnets if n[pnet.SEGMENTATION_ID] == lookup_vlan_id].pop() networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 1) # verify all provider attributes network = networks['networks'][0] for attr in pnet.ATTRIBUTES: self.assertEqual(expected_net[attr], network[attr]) def test_list_mpnetworks_with_segmentation_id(self): self._create_and_verify_networks(self.nets) # get all networks with seg_id=1 (including multisegment networks) lookup_vlan_id = 1 networks = self._lookup_network_by_segmentation_id(lookup_vlan_id, 2) # get the mpnet networks = [n for n in networks['networks'] if mpnet_apidef.SEGMENTS in n] network = networks.pop() # verify attributes of the looked up item segments = network[mpnet_apidef.SEGMENTS] expected_segments = self.mp_nets[0][mpnet_apidef.SEGMENTS] self.assertEqual(len(expected_segments), len(segments)) for expected, actual in zip(expected_segments, segments): self.assertEqual(expected, actual) def test_create_network_segment_allocation_fails(self): plugin = directory.get_plugin() retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2) retry_fixture.setUp() with mock.patch.object( plugin.type_manager, 'create_network_segments', side_effect=db_exc.RetryRequest(ValueError()) ) as f: data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy', 'admin_state_up': True, 'shared': False}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(500, res.status_int) # 1 + retry count self.assertEqual(3, f.call_count) retry_fixture.cleanUp() def test__update_provider_network_attributes_update_attrs(self): plugin = directory.get_plugin() kwargs = {'arg_list': (pnet.NETWORK_TYPE, ), pnet.NETWORK_TYPE: 'vlan'} with self.network(**kwargs) as net: for attribute in set(pnet.ATTRIBUTES) - {pnet.SEGMENTATION_ID}: net_data = {attribute: net['network'][attribute]} self.assertIsNone( plugin._update_provider_network_attributes( self.context, net['network'], net_data)) net_data = {attribute: 'other_value'} self.assertRaises( exc.InvalidInput, plugin._update_provider_network_attributes, self.context, net['network'], net_data) def test__update_provider_network_attributes_segmentation_id(self): plugin = directory.get_plugin() with self.network() as net: with mock.patch.object(plugin, '_update_segmentation_id') as \ mock_update_segmentation_id: net_data = {pnet.SEGMENTATION_ID: 1000} plugin._update_provider_network_attributes( self.context, net['network'], net_data) mock_update_segmentation_id.assert_called_once_with( self.context, net['network'], net_data) def test__update_segmentation_id_multisegment_network(self): plugin = directory.get_plugin() segments = [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 2}] with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ), mpnet_apidef.SEGMENTS: segments}) as net: self.assertRaises( exc.InvalidInput, plugin._update_segmentation_id, self.context, net['network'], {}) def test__update_segmentation_id_ports_wrong_vif_type(self): plugin = directory.get_plugin() with self.network() as net: with mock.patch.object( port_obj.Port, 'check_network_ports_by_binding_types', return_value=True): self.assertRaises( exc.InvalidInput, plugin._update_segmentation_id, self.context, net['network'], {}) def test__update_segmentation_id_agentless_mech_drivers(self): plugin = directory.get_plugin() segments = [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}] mech_drivers = plugin.mechanism_manager.ordered_mech_drivers for mech_driver in (md.obj for md in mech_drivers if hasattr(md.obj, 'agent_type')): mock.patch.object(type(mech_driver), 'agent_type', new_callable=mock.PropertyMock(return_value=None)).start() with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ), mpnet_apidef.SEGMENTS: segments}) as net, \ mock.patch.object( port_obj.Port, 'check_network_ports_by_binding_types', return_value=False) as check_network_ports_mock, \ mock.patch.object(plugin.type_manager, 'update_network_segment'), \ mock.patch.object(plugin, 'get_agents') as mock_get_agents: net_data = {pnet.SEGMENTATION_ID: 1000} plugin._update_segmentation_id(self.context, net['network'], net_data) mock_get_agents.assert_not_called() check_network_ports_mock.assert_called_once_with( self.context, net['network']['id'], [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED], negative_search=True) def test_update_network_with_empty_body(self): with self.network() as network: network_id = network["network"]["id"] network_req = self.new_update_request("networks", None, network_id) res = network_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("network", res.json['NeutronError']['message']) def test_update_network_with_incorrect_resource_body(self): with self.network() as network: network_id = network["network"]["id"] incorrect_body = {"incorrect": {}} network_req = self.new_update_request("networks", incorrect_body, network_id) res = network_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("network", res.json['NeutronError']['message']) class TestMl2NetworksV2AgentMechDrivers(Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'test', 'test_with_agent'] def test__update_segmentation_id_ports(self): plugin = directory.get_plugin() segments = [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}] with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ), mpnet_apidef.SEGMENTS: segments}) as net, \ mock.patch.object( port_obj.Port, 'check_network_ports_by_binding_types', return_value=False) as check_network_ports_mock, \ mock.patch.object(plugin.type_manager, 'update_network_segment'), \ mock.patch.object(plugin, 'get_agents', return_value=[mock.ANY]): net_data = {pnet.SEGMENTATION_ID: 1000} plugin._update_segmentation_id(self.context, net['network'], net_data) check_network_ports_mock.assert_called_once_with( self.context, net['network']['id'], [portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED, mech_test.VIF_TYPE_TEST], negative_search=True) class TestExternalNetwork(Ml2PluginV2TestCase): def _create_external_network(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) return network def test_external_network_type_none(self): cfg.CONF.set_default('external_network_type', None, group='ml2') network = self._create_external_network() # For external network, expected network type to be # tenant_network_types which is by default 'local'. self.assertEqual(constants.TYPE_LOCAL, network['network'][pnet.NETWORK_TYPE]) # No physical network specified, expected 'None'. self.assertIsNone(network['network'][pnet.PHYSICAL_NETWORK]) # External network will not have a segmentation id. self.assertIsNone(network['network'][pnet.SEGMENTATION_ID]) # External network will not have multiple segments. self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_external_network_type_vlan(self): cfg.CONF.set_default('external_network_type', constants.TYPE_VLAN, group='ml2') network = self._create_external_network() # For external network, expected network type to be 'vlan'. self.assertEqual(constants.TYPE_VLAN, network['network'][pnet.NETWORK_TYPE]) # Physical network is expected. self.assertIsNotNone(network['network'][pnet.PHYSICAL_NETWORK]) # External network will have a segmentation id. self.assertIsNotNone(network['network'][pnet.SEGMENTATION_ID]) # External network will not have multiple segments. self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}], 'tenant_id': 'tenant_one', 'vlan_transparent': 'True'}} def setUp(self, plugin=None): cfg.CONF.set_override('vlan_transparent', True) super(TestMl2NetworksWithVlanTransparencyBase, self).setUp(plugin) class TestMl2NetworksWithVlanTransparency( TestMl2NetworksWithVlanTransparencyBase): _mechanism_drivers = ['test'] def test_create_network_vlan_transparent_fail(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=False): network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(500, res.status_int) error_result = self.deserialize(self.fmt, res)['NeutronError'] self.assertEqual("VlanTransparencyDriverError", error_result['type']) def test_create_network_vlan_transparent(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=True): network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertIn('vlan_transparent', network) class TestMl2NetworksWithVlanTransparencyAndMTU( TestMl2NetworksWithVlanTransparencyBase): _mechanism_drivers = ['test'] def test_create_network_vlan_transparent_and_mtu(self): with mock.patch.object(mech_test.TestMechanismDriver, 'check_vlan_transparency', return_value=True): cfg.CONF.set_override('path_mtu', 1000, group='ml2') cfg.CONF.set_override('global_physnet_mtu', 1000) network_req = self.new_create_request('networks', self.data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertEqual(1000, network['mtu']) self.assertIn('vlan_transparent', network) self.assertTrue(network['vlan_transparent']) self.assertTrue(network['vlan_transparent']) class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2): def test_create_network_availability_zone(self): az_hints = ['az1', 'az2'] data = {'network': {'name': 'net1', az_def.AZ_HINTS: az_hints, 'tenant_id': 'tenant_one'}} with mock.patch.object(agents_db.AgentAvailabilityZoneMixin, 'validate_availability_zones'): network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) network = self.deserialize(self.fmt, res)['network'] self.assertEqual(az_hints, network[az_def.AZ_HINTS]) class TestMl2SubnetsV2(test_plugin.TestSubnetsV2, Ml2PluginV2TestCase): def test_subnet_before_create_callback(self): before_create = mock.Mock() registry.subscribe(before_create, resources.SUBNET, events.BEFORE_CREATE) with self.subnet() as s: before_create.assert_called_once_with( resources.SUBNET, events.BEFORE_CREATE, mock.ANY, context=mock.ANY, subnet=mock.ANY) kwargs = before_create.mock_calls[0][2] self.assertEqual(s['subnet']['cidr'], kwargs['subnet']['cidr']) self.assertEqual(s['subnet']['network_id'], kwargs['subnet']['network_id']) def test_subnet_after_create_callback(self): after_create = mock.Mock() registry.subscribe(after_create, resources.SUBNET, events.AFTER_CREATE) with self.subnet() as s: after_create.assert_called_once_with( resources.SUBNET, events.AFTER_CREATE, mock.ANY, context=mock.ANY, subnet=mock.ANY) kwargs = after_create.mock_calls[0][2] self.assertEqual(s['subnet']['id'], kwargs['subnet']['id']) def test_port_update_subnetnotfound(self): with self.network() as n: with self.subnet(network=n, cidr='1.1.1.0/24') as s1,\ self.subnet(network=n, cidr='1.1.2.0/24') as s2,\ self.subnet(network=n, cidr='1.1.3.0/24') as s3: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}, {'subnet_id': s3['subnet']['id']}] with self.port(subnet=s1, fixed_ips=fixed_ips, device_owner=constants.DEVICE_OWNER_DHCP) as p: plugin = directory.get_plugin() orig_update = plugin.update_port def delete_before_update(ctx, *args, **kwargs): # swap back out with original so only called once plugin.update_port = orig_update # delete s2 in the middle of s1 port_update plugin.delete_subnet(ctx, s2['subnet']['id']) return plugin.update_port(ctx, *args, **kwargs) plugin.update_port = delete_before_update req = self.new_delete_request('subnets', s1['subnet']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # ensure port only has 1 IP on s3 port = self._show('ports', p['port']['id'])['port'] self.assertEqual(1, len(port['fixed_ips'])) self.assertEqual(s3['subnet']['id'], port['fixed_ips'][0]['subnet_id']) def test_update_subnet_with_empty_body(self): with self.subnet() as subnet: subnet_id = subnet["subnet"]["id"] subnet_req = self.new_update_request("subnets", None, subnet_id) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("subnet", res.json['NeutronError']['message']) def test_update_subnet_with_incorrect_resource_body(self): with self.subnet() as subnet: subnet_id = subnet["subnet"]["id"] incorrect_body = {"incorrect": {}} subnet_req = self.new_update_request("subnets", incorrect_body, subnet_id) res = subnet_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("subnet", res.json['NeutronError']['message']) def test_subnet_after_update_callback(self): after_update = mock.Mock() registry.subscribe(after_update, resources.SUBNET, events.AFTER_UPDATE) with self.subnet() as s: data = {'subnet': {'name': 'updated'}} req = self.new_update_request('subnets', data, s['subnet']['id']) self.deserialize(self.fmt, req.get_response(self.api)) after_update.assert_called_once_with( resources.SUBNET, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, subnet=mock.ANY, original_subnet=mock.ANY) kwargs = after_update.mock_calls[0][2] self.assertEqual(s['subnet']['name'], kwargs['original_subnet']['name']) self.assertEqual('updated', kwargs['subnet']['name']) def test_subnet_after_delete_callback(self): after_delete = mock.Mock() registry.subscribe(after_delete, resources.SUBNET, events.AFTER_DELETE) with self.subnet() as s: req = self.new_delete_request('subnets', s['subnet']['id']) req.get_response(self.api) after_delete.assert_called_once_with( resources.SUBNET, events.AFTER_DELETE, mock.ANY, context=mock.ANY, subnet=mock.ANY) kwargs = after_delete.mock_calls[0][2] self.assertEqual(s['subnet']['id'], kwargs['subnet']['id']) def test_delete_subnet_race_with_dhcp_port_creation(self): with self.network() as network: with self.subnet(network=network) as subnet: subnet_id = subnet['subnet']['id'] attempt = [0] def create_dhcp_port(*args, **kwargs): """A method to emulate race condition. Adds dhcp port in the middle of subnet delete """ if attempt[0] > 0: return False attempt[0] += 1 data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'device_id': '', 'device_owner': constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'subnet_id': subnet_id}]}} plugin = directory.get_plugin() plugin.create_port(context.get_admin_context(), data) # we mock _subnet_check_ip_allocations with method # that creates DHCP port 'in the middle' of subnet_delete # causing retry this way subnet is deleted on the # second attempt registry.subscribe(create_dhcp_port, resources.SUBNET, events.PRECOMMIT_DELETE) req = self.new_delete_request('subnets', subnet_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) self.assertEqual(1, attempt[0]) def test_create_subnet_check_mtu_in_mech_context(self): plugin = directory.get_plugin() plugin.mechanism_manager.create_subnet_precommit = mock.Mock() net_arg = {pnet.NETWORK_TYPE: 'vxlan', pnet.SEGMENTATION_ID: '1'} network = self._make_network(self.fmt, 'net1', True, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,), **net_arg) with self.subnet(network=network): mock_subnet_pre = plugin.mechanism_manager.create_subnet_precommit observerd_mech_context = mock_subnet_pre.call_args_list[0][0][0] self.assertEqual(network['network']['mtu'], observerd_mech_context.network.current['mtu']) class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin, Ml2PluginV2TestCase): """Test cases to assert constant query count for list operations. These test cases assert that an increase in the number of objects does not result in an increase of the number of db operations. All database lookups during a list operation should be performed in bulk so the number of queries required for 2 objects instead of 1 should stay the same. """ def setUp(self): super(TestMl2DbOperationBounds, self).setUp() self.kwargs = self.get_api_kwargs() def make_network(self): return self._make_network(self.fmt, 'name', True, **self.kwargs) def make_subnet(self): net = self.make_network() setattr(self, '_subnet_count', getattr(self, '_subnet_count', 0) + 1) cidr = '1.%s.0.0/24' % self._subnet_count return self._make_subnet(self.fmt, net, None, cidr, **self.kwargs) def make_port(self): net = self.make_network() return self._make_port(self.fmt, net['network']['id'], **self.kwargs) def test_network_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_network, 'networks') def test_subnet_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_subnet, 'subnets') def test_port_list_queries_constant(self): self._assert_object_list_queries_constant(self.make_port, 'ports') self._assert_object_list_queries_constant(self.make_port, 'ports', filters=['device_id']) self._assert_object_list_queries_constant(self.make_port, 'ports', filters=['device_id', 'device_owner']) self._assert_object_list_queries_constant(self.make_port, 'ports', filters=['tenant_id', 'name', 'device_id']) class TestMl2DbOperationBoundsTenant(TestMl2DbOperationBounds): admin = False class TestMl2DbOperationBoundsTenantRbac(TestMl2DbOperationBoundsTenant): def make_port_in_shared_network(self): context_ = self._get_context() # create shared network owned by the tenant; we use direct driver call # because default policy does not allow users to create shared networks net = self.driver.create_network( context.get_admin_context(), {'network': {'name': 'net1', 'tenant_id': context_.project_id, 'admin_state_up': True, 'shared': True}}) # create port that belongs to another tenant return self._make_port( self.fmt, net['id'], set_context=True, tenant_id='fake_tenant') def test_port_list_in_shared_network_queries_constant(self): self._assert_object_list_queries_constant( self.make_port_in_shared_network, 'ports') class TestMl2RevivedAgentsBindPorts(Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch', 'logger'] def _test__retry_binding_revived_agents(self, event, agent_status, admin_state_up, agent_type, ports, should_bind_ports): plugin = directory.get_plugin() context = mock.Mock() agent = { 'agent_status': agent_status, 'admin_state_up': admin_state_up, 'agent_type': agent_type} host = "test_host" binding = mock.MagicMock( vif_type=portbindings.VIF_TYPE_BINDING_FAILED, host=host) for port in ports: port.bindings = [binding] with mock.patch( 'neutron.objects.ports.Port.get_ports_by_binding_type_and_host', return_value=ports ) as get_ports_by_binding_type_and_host, mock.patch.object( plugin, 'get_network', return_value=mock.Mock() ) as get_network, mock.patch( 'neutron.plugins.ml2.db.get_binding_level_objs', return_value=None ) as get_binding_level_objs, mock.patch( 'neutron.plugins.ml2.driver_context.PortContext' ) as port_context, mock.patch.object( plugin, '_bind_port_if_needed' ) as bind_port_if_needed: plugin._retry_binding_revived_agents( resources.AGENT, event, plugin, events.DBEventPayload( context=context, metadata={'host': host}, states=(agent,), desired_state=agent ) ) if (agent_status == agent_consts.AGENT_ALIVE or not admin_state_up or agent_type not in plugin._rebind_on_revive_agent_types): get_ports_by_binding_type_and_host.assert_not_called() else: get_ports_by_binding_type_and_host.assert_called_once_with( context, portbindings.VIF_TYPE_BINDING_FAILED, host) if should_bind_ports: get_network_expected_calls = [ mock.call(context, port.network_id) for port in ports] get_network.assert_has_calls(get_network_expected_calls) get_binding_level_expected_calls = [ mock.call(context, port.id, host) for port in ports] get_binding_level_objs.assert_has_calls( get_binding_level_expected_calls) bind_port_if_needed.assert_called_once_with(port_context()) else: get_network.assert_not_called() get_binding_level_objs.assert_not_called() bind_port_if_needed.assert_not_called() def test__retry_binding_revived_agents(self): port = mock.MagicMock( id=uuidutils.generate_uuid()) self._test__retry_binding_revived_agents( events.AFTER_UPDATE, agent_consts.AGENT_REVIVED, True, constants.AGENT_TYPE_OVS, [port], should_bind_ports=True) def test__retry_binding_revived_agents_no_binding_failed_ports(self): self._test__retry_binding_revived_agents( events.AFTER_UPDATE, agent_consts.AGENT_REVIVED, True, constants.AGENT_TYPE_OVS, [], should_bind_ports=False) def test__retry_binding_revived_agents_alive_agent(self): port = mock.MagicMock( id=uuidutils.generate_uuid()) self._test__retry_binding_revived_agents( events.AFTER_UPDATE, agent_consts.AGENT_ALIVE, True, constants.AGENT_TYPE_OVS, [port], should_bind_ports=False) def test__retry_binding_revived_agents_not_binding_agent(self): port = mock.MagicMock( id=uuidutils.generate_uuid()) self._test__retry_binding_revived_agents( events.AFTER_UPDATE, agent_consts.AGENT_REVIVED, True, "Other agent which don't support binding", [port], should_bind_ports=False) def test__retry_binding_revived_agents_agent_admin_state_down(self): port = mock.MagicMock( id=uuidutils.generate_uuid()) self._test__retry_binding_revived_agents( events.AFTER_UPDATE, agent_consts.AGENT_REVIVED, False, constants.AGENT_TYPE_OVS, [port], should_bind_ports=False) class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): def test__port_provisioned_with_blocks(self): plugin = directory.get_plugin() ups = mock.patch.object(plugin, 'update_port_status').start() with self.port() as port: mock.patch('neutron.plugins.ml2.plugin.db.get_port').start() provisioning_blocks.add_provisioning_component( self.context, port['port']['id'], 'port', 'DHCP') plugin._port_provisioned( 'port', 'evt', 'trigger', payload=events.DBEventPayload( context, resource_id=port['port']['id'])) self.assertFalse(ups.called) def test__port_provisioned_no_binding(self): device_id = uuidutils.generate_uuid() plugin = directory.get_plugin() with self.network() as net: net_id = net['network']['id'] port_id = uuidutils.generate_uuid() port_obj.Port(self.context, id=port_id, project_id='tenant', network_id=net_id, mac_address=netaddr.EUI('08-00-01-02-03-04'), admin_state_up=True, status='ACTIVE', device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE).create() self.assertIsNone(plugin._port_provisioned( 'port', 'evt', 'trigger', payload=events.DBEventPayload( self.context, resource_id=port_id))) def test__port_provisioned_port_admin_state_down(self): plugin = directory.get_plugin() ups = mock.patch.object(plugin, 'update_port_status').start() port_id = 'fake_port_id' def getitem(key): return constants.ACTIVE binding = mock.MagicMock(vif_type=portbindings.VIF_TYPE_OVS) binding.__getitem__.side_effect = getitem port = mock.MagicMock( id=port_id, admin_state_up=False, port_binding=[binding]) with mock.patch('neutron.plugins.ml2.plugin.db.get_port', return_value=port): plugin._port_provisioned('port', 'evt', 'trigger', payload=events.DBEventPayload( self.context, resource_id=port_id)) self.assertFalse(ups.called) def test_port_after_create_outside_transaction(self): self.tx_open = True receive = lambda *a, **k: setattr(self, 'tx_open', k['context'].session.is_active) registry.subscribe(receive, resources.PORT, events.AFTER_CREATE) with self.port(): self.assertFalse(self.tx_open) def test_port_after_update_outside_transaction(self): self.tx_open = True receive = lambda *a, **k: setattr(self, 'tx_open', k['context'].session.is_active) with self.port() as p: registry.subscribe(receive, resources.PORT, events.AFTER_UPDATE) self._update('ports', p['port']['id'], {'port': {'name': 'update'}}) self.assertFalse(self.tx_open) def test_port_after_delete_outside_transaction(self): self.tx_open = True receive = lambda *a, **k: setattr(self, 'tx_open', k['context'].session.is_active) with self.port() as p: registry.subscribe(receive, resources.PORT, events.AFTER_DELETE) self._delete('ports', p['port']['id']) self.assertFalse(self.tx_open) def test_create_router_port_and_fail_create_postcommit(self): with mock.patch.object(managers.MechanismManager, 'create_port_postcommit', side_effect=ml2_exc.MechanismDriverError( method='create_port_postcommit')): l3_plugin = directory.get_plugin(plugin_constants.L3) data = {'router': {'name': 'router', 'admin_state_up': True, 'tenant_id': 'fake_tenant'}} r = l3_plugin.create_router(self.context, data) with self.subnet() as s: data = {'subnet_id': s['subnet']['id']} self.assertRaises(ml2_exc.MechanismDriverError, l3_plugin.add_router_interface, self.context, r['id'], data) res_ports = self._list('ports')['ports'] self.assertEqual([], res_ports) def test_create_router_port_and_fail_bind_port_if_needed(self): with mock.patch.object(ml2_plugin.Ml2Plugin, '_bind_port_if_needed', side_effect=ml2_exc.MechanismDriverError( method='_bind_port_if_needed')): l3_plugin = directory.get_plugin(plugin_constants.L3) data = {'router': {'name': 'router', 'admin_state_up': True, 'tenant_id': 'fake_tenant'}} r = l3_plugin.create_router(self.context, data) with self.subnet() as s: data = {'subnet_id': s['subnet']['id']} self.assertRaises(ml2_exc.MechanismDriverError, l3_plugin.add_router_interface, self.context, r['id'], data) res_ports = self._list('ports')['ports'] self.assertEqual([], res_ports) def test_update_port_with_empty_body(self): with self.port() as port: port_id = port["port"]["id"] port_req = self.new_update_request("ports", None, port_id) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("port", res.json['NeutronError']['message']) def test_update_port_with_incorrect_resource_body(self): with self.port() as port: port_id = port["port"]["id"] incorrect_body = {"incorrect": {}} port_req = self.new_update_request("ports", incorrect_body, port_id) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) self.assertIn("port", res.json['NeutronError']['message']) def test_update_port_status_build(self): with self.port() as port: self.assertEqual('DOWN', port['port']['status']) self.assertEqual('DOWN', self.port_create_status) def test_notify_port_updated_for_status_change(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with self.port() as port: with mock.patch.object(self.plugin, '_notify_port_updated') as notify_mock: port['port']['status'] = constants.PORT_STATUS_ACTIVE plugin.update_port(ctx, port['port']['id'], port) self.assertTrue(notify_mock.called) def test_update_port_status_short_id(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with self.port() as port: with mock.patch.object(ml2_db, 'get_binding_level_objs', return_value=[]) as mock_gbl: port_id = port['port']['id'] short_id = port_id[:11] plugin.update_port_status(ctx, short_id, 'UP') mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY) def test_update_port_with_empty_data(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with self.port() as port: port_id = port['port']['id'] new_port = plugin.update_port(ctx, port_id, {"port": {}}) self.assertEqual(port["port"], new_port) def _add_fake_dhcp_agent(self): agent = mock.Mock() plugin = directory.get_plugin() self.get_dhcp_mock = mock.patch.object( plugin, 'get_dhcp_agents_hosting_networks', return_value=[agent]).start() def test_dhcp_provisioning_blocks_inserted_on_create_with_agents(self): self._add_fake_dhcp_agent() with mock.patch.object(provisioning_blocks, 'add_provisioning_component') as ap: with self.port(): self.assertTrue(ap.called) def test_dhcp_provisioning_blocks_skipped_on_create_with_no_dhcp(self): self._add_fake_dhcp_agent() with self.subnet(enable_dhcp=False) as subnet: with mock.patch.object(provisioning_blocks, 'add_provisioning_component') as ap: with self.port(subnet=subnet): self.assertFalse(ap.called) def _test_dhcp_provisioning_blocks_inserted_on_update(self, update_dict, expected_block): ctx = context.get_admin_context() plugin = directory.get_plugin() self._add_fake_dhcp_agent() with self.port() as port: with mock.patch.object(provisioning_blocks, 'add_provisioning_component') as ap: port['port'].update(update_dict) plugin.update_port(ctx, port['port']['id'], port) self.assertEqual(expected_block, ap.called) def test_dhcp_provisioning_blocks_not_inserted_on_no_addr_change(self): update = {'binding:host_id': 'newhost'} self._test_dhcp_provisioning_blocks_inserted_on_update(update, False) def test_dhcp_provisioning_blocks_inserted_on_addr_change(self): update = {'binding:host_id': 'newhost', 'mac_address': '11:22:33:44:55:66'} self._test_dhcp_provisioning_blocks_inserted_on_update(update, True) def test_dhcp_provisioning_blocks_removed_without_dhcp_agents(self): with mock.patch.object(provisioning_blocks, 'remove_provisioning_component') as cp: with self.port(): self.assertTrue(cp.called) def test_create_update_get_port_same_fixed_ips_order(self): ctx = context.get_admin_context() plugin = directory.get_plugin() initial_fixed_ips = [{'ip_address': '10.0.0.5'}, {'ip_address': '10.0.0.7'}, {'ip_address': '10.0.0.6'}] with self.port(fixed_ips=initial_fixed_ips) as port: show = plugin.get_port(ctx, port['port']['id']) self.assertEqual(port['port']['fixed_ips'], show['fixed_ips']) new_fixed_ips = list(reversed(initial_fixed_ips)) port['port']['fixed_ips'] = new_fixed_ips updated = plugin.update_port(ctx, port['port']['id'], port) self.assertEqual(show['fixed_ips'], updated['fixed_ips']) updated = plugin.get_port(ctx, port['port']['id']) self.assertEqual(show['fixed_ips'], updated['fixed_ips']) def test_update_port_fixed_ip_changed(self): ctx = context.get_admin_context() plugin = directory.get_plugin() fixed_ip_data = [{'ip_address': '10.0.0.4'}] with self.port(fixed_ips=fixed_ip_data) as port,\ mock.patch.object( plugin.notifier, 'security_groups_member_updated') as sg_member_update: port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3' plugin.update_port(ctx, port['port']['id'], port) self.assertTrue(sg_member_update.called) def test_update_port_name_do_not_notify_sg(self): ctx = context.get_admin_context() plugin = directory.get_plugin() port_name = "port_name" with self.port(name=port_name) as port,\ mock.patch.object( plugin.notifier, 'security_groups_member_updated') as sg_member_update: port['port']['name'] = 'new_port_name' plugin.update_port(ctx, port['port']['id'], port) self.assertFalse(sg_member_update.called) def test_update_port_status_with_network(self): registry.clear() # don't care about callback behavior ctx = context.get_admin_context() plugin = directory.get_plugin() with self.port() as port: net = plugin.get_network(ctx, port['port']['network_id']) with mock.patch.object(plugin, 'get_networks') as get_nets: plugin.update_port_status(ctx, port['port']['id'], 'UP', network=net) self.assertFalse(get_nets.called) def test_update_port_mac(self): self.check_update_port_mac( host_arg={portbindings.HOST_ID: HOST}, arg_list=(portbindings.HOST_ID,)) def test_update_port_regenerate_mac(self): ctx = context.get_admin_context() plugin = directory.get_plugin() data = {'port': {'mac_address': None}} with self.port() as port: current_mac = port['port']['mac_address'] req = self.new_update_request('ports', data, port['port']['id']) self.assertEqual(200, req.get_response(self.api).status_int) new_mac = plugin.get_port(ctx, port['port']['id'])['mac_address'] self.assertNotEqual(current_mac, new_mac) self.assertTrue(netaddr.valid_mac(new_mac)) def test_update_port_mac_does_not_change(self): ctx = context.get_admin_context() plugin = directory.get_plugin() data = {'port': {'description': 'Port Description'}} with self.port() as port: current_mac = port['port']['mac_address'] req = self.new_update_request('ports', data, port['port']['id']) self.assertEqual(200, req.get_response(self.api).status_int) new_mac = plugin.get_port(ctx, port['port']['id'])['mac_address'] self.assertEqual(current_mac, new_mac) def test_update_non_existent_port(self): ctx = context.get_admin_context() plugin = directory.get_plugin() data = {'port': {'admin_state_up': False}} self.assertRaises(exc.PortNotFound, plugin.update_port, ctx, 'invalid-uuid', data) def test_delete_non_existent_port(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with mock.patch.object(ml2_plugin.LOG, 'debug') as log_debug: plugin.delete_port(ctx, 'invalid-uuid', l3_port_check=False) log_debug.assert_has_calls([ mock.call(_("Deleting port %s"), 'invalid-uuid'), mock.call(_("The port '%s' was deleted"), 'invalid-uuid') ]) def test_l3_cleanup_on_net_delete(self): l3plugin = directory.get_plugin(plugin_constants.L3) kwargs = {'arg_list': (extnet_apidef.EXTERNAL,), extnet_apidef.EXTERNAL: True} with self.network(**kwargs) as n: with self.subnet(network=n, cidr='200.0.0.0/22'): l3plugin.create_floatingip( context.get_admin_context(), {'floatingip': {'floating_network_id': n['network']['id'], 'tenant_id': n['network']['tenant_id'], 'dns_name': '', 'dns_domain': ''}} ) self._delete('networks', n['network']['id']) flips = l3plugin.get_floatingips(context.get_admin_context()) self.assertFalse(flips) def test_create_ports_bulk_port_binding_failure(self): ctx = context.get_admin_context() with self.network() as net: plugin = directory.get_plugin() with mock.patch.object(plugin, '_process_port_binding', side_effect=ml2_exc.MechanismDriverError( method='create_port_bulk')) as _process_port_binding: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) self.assertTrue(_process_port_binding.called) # We expect a 500 as we injected a fault in the plugin self._validate_behavior_on_bulk_failure( res, 'ports', webob.exc.HTTPServerError.code) def test_create_ports_bulk_with_sec_grp(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with self.network() as net,\ mock.patch.object(plugin.notifier, 'security_groups_member_updated') as m_upd: res = self._create_port_bulk(self.fmt, 3, net['network']['id'], 'test', True, context=ctx) ports = self.deserialize(self.fmt, res) if 'ports' in ports: used_sg = ports['ports'][0]['security_groups'] m_upd.assert_has_calls( [mock.call(ctx, [sg]) for sg in used_sg], any_order=True) else: self.assertTrue('ports' in ports) def test_create_ports_bulk_with_sec_grp_member_provider_update(self): ctx = context.get_admin_context() plugin = directory.get_plugin() bulk_mock_name = "security_groups_member_updated" with self.network() as net,\ mock.patch.object(plugin.notifier, bulk_mock_name) as m_upd: net_id = net['network']['id'] data = [{ 'network_id': net_id, 'tenant_id': self._tenant_id }, { 'network_id': net_id, 'tenant_id': self._tenant_id, 'device_owner': constants.DEVICE_OWNER_DHCP } ] res = self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) ports = self.deserialize(self.fmt, res) used_sg = ports['ports'][0]['security_groups'] m_upd.assert_called_with(ctx, used_sg) m_upd.reset_mock() data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self): ctx = context.get_admin_context() plugin = directory.get_plugin() fake_prefix = '2001:db8::/64' fake_gateway = 'fe80::1' with self.network() as net: with self.subnet(net, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=constants.IP_VERSION_6) as snet_v6,\ mock.patch.object( plugin.notifier, 'security_groups_member_updated') as m_upd: net_id = net['network']['id'] data = [{ 'network_id': net_id, 'tenant_id': self._tenant_id, 'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}], 'device_owner': constants.DEVICE_OWNER_ROUTER_INTF } ] self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) def test_delete_port_no_notify_in_disassociate_floatingips(self): ctx = context.get_admin_context() plugin = directory.get_plugin() l3plugin = directory.get_plugin(plugin_constants.L3) with self.port() as port,\ mock.patch.object( l3plugin, 'disassociate_floatingips') as disassociate_floatingips,\ mock.patch.object(registry, 'notify') as notify: port_id = port['port']['id'] plugin.delete_port(ctx, port_id) # check that no notification was requested while under # transaction disassociate_floatingips.assert_has_calls([ mock.call(ctx, port_id, do_notify=False) ]) # check that notifier was still triggered self.assertTrue(notify.call_counts) def test_registry_notify_before_after_port_binding(self): plugin = directory.get_plugin() ctx = context.get_admin_context() b_update_events = [] a_update_events = [] b_receiver = lambda *a, **k: b_update_events.append(k) a_receiver = lambda *a, **k: a_update_events.append(k['port']) registry.subscribe(b_receiver, resources.PORT, events.BEFORE_UPDATE) registry.subscribe(a_receiver, resources.PORT, events.AFTER_UPDATE) with self.port() as p: port = {'port': {'binding:host_id': 'newhost'}} plugin.update_port(ctx, p['port']['id'], port) # updating in the host should result in two AFTER_UPDATE events. # one to change the host_id, the second to commit a binding self.assertEqual(2, len(b_update_events)) self.assertEqual({'context': ctx, 'port': {'binding:host_id': 'newhost'}, 'original_port': mock.ANY}, b_update_events[0]) self.assertIn('orig_binding', b_update_events[1]) self.assertIn('new_binding', b_update_events[1]) self.assertDictContainsSubset({'context': ctx}, b_update_events[1]) self.assertDictContainsSubset({ 'admin_state_up': True, 'binding:host_id': 'newhost', 'binding:vif_type': 'unbound', 'binding:vnic_type': u'normal', 'status': 'DOWN'}, b_update_events[1]['port']) self.assertEqual('newhost', a_update_events[0]['binding:host_id']) self.assertEqual('unbound', a_update_events[0]['binding:vif_type']) self.assertEqual('newhost', a_update_events[1]['binding:host_id']) self.assertNotEqual('unbound', a_update_events[1]['binding:vif_type']) def test_check_if_compute_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced(DEVICE_OWNER_COMPUTE)) def test_check_if_dhcp_port_serviced_by_dvr(self): self.assertTrue(utils.is_dvr_serviced(constants.DEVICE_OWNER_DHCP)) def test_check_if_port_not_serviced_by_dvr(self): self.assertFalse(utils.is_dvr_serviced( constants.DEVICE_OWNER_ROUTER_INTF)) def test_disassociate_floatingips_do_notify_returns_nothing(self): ctx = context.get_admin_context() l3plugin = directory.get_plugin(plugin_constants.L3) with self.port() as port: port_id = port['port']['id'] # check that nothing is returned when notifications are handled # by the called method self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id)) def test_create_port_tolerates_db_deadlock(self): plugin = directory.get_plugin() with self.network() as net: with self.subnet(network=net) as subnet: _orig = plugin._get_port self._failed = False def fail_once(*args, **kwargs): if not self._failed: self._failed = True raise db_exc.DBDeadlock() return _orig(*args, **kwargs) with mock.patch.object(plugin, '_get_port', side_effect=fail_once) as get_port_mock: port_kwargs = {portbindings.HOST_ID: 'host1', 'subnet': subnet, 'device_id': 'deadlocktest'} with self.port(arg_list=(portbindings.HOST_ID,), **port_kwargs) as port: self.assertTrue(port['port']['id']) self.assertTrue(get_port_mock.called) # make sure that we didn't create more than one port on # the retry query_params = "network_id=%s" % net['network']['id'] query_params += "&device_id=%s" % 'deadlocktest' ports = self._list('ports', query_params=query_params) self.assertEqual(1, len(ports['ports'])) def test_delete_port_tolerates_db_deadlock(self): ctx = context.get_admin_context() plugin = directory.get_plugin() with self.port() as port: port_db = plugin._get_port(ctx, port['port']['id']) with mock.patch.object(plugin, '_get_port') as gp: gp.side_effect = [db_exc.DBDeadlock] + [port_db] * 3 req = self.new_delete_request('ports', port['port']['id']) res = req.get_response(self.api) self.assertEqual(204, res.status_int) self.assertGreater(gp.call_count, 1) self.assertRaises( exc.PortNotFound, plugin.get_port, ctx, port['port']['id']) def test_port_create_resillient_to_duplicate_records(self): def make_port(): with self.port(): pass self._test_operation_resillient_to_ipallocation_failure(make_port) def test_port_update_resillient_to_duplicate_records(self): cidr = '10.0.0.0/24' allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.8'}] with self.subnet(cidr=cidr, allocation_pools=allocation_pools) as subnet: with self.port(subnet=subnet) as p: data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.9'}]}} req = self.new_update_request('ports', data, p['port']['id']) def do_request(): self.assertEqual(200, req.get_response(self.api).status_int) self._test_operation_resillient_to_ipallocation_failure( do_request) def _test_operation_resillient_to_ipallocation_failure(self, func): class IPAllocationsGrenade(object): insert_ip_called = False except_raised = False def execute(self, con, curs, stmt, *args, **kwargs): if 'INSERT INTO ipallocations' in stmt: self.insert_ip_called = True def commit(self, con): # we blow up on commit to simulate another thread/server # stealing our IP before our transaction was done if self.insert_ip_called and not self.except_raised: self.except_raised = True raise db_exc.DBDuplicateEntry() listener = IPAllocationsGrenade() engine = db_api.CONTEXT_WRITER.get_engine() db_api.sqla_listen(engine, 'before_cursor_execute', listener.execute) db_api.sqla_listen(engine, 'commit', listener.commit) func() # make sure that the grenade went off during the commit self.assertTrue(listener.except_raised) def test_list_ports_filtered_by_fixed_ip_substring(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) with self.port() as port1, self.port(): fixed_ips = port1['port']['fixed_ips'][0] query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % (fixed_ips['ip_address'][:-1], fixed_ips['subnet_id']) self._test_list_resources('port', [port1], query_params=query_params) query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % (fixed_ips['ip_address'][1:], fixed_ips['subnet_id']) self._test_list_resources('port', [port1], query_params=query_params) query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=subnet_id%%3D%s """.strip() % ('192.168.', fixed_ips['subnet_id']) self._test_list_resources('port', [], query_params=query_params) query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=subnet_id%%3D%s&limit=1 """.strip() % ('192.168.', fixed_ips['subnet_id']) self._test_list_resources('port', [], query_params=query_params) def test_list_ports_filtered_by_fixed_ip_substring_dual_stack(self): with self.subnet() as subnet: # Get a IPv4 and IPv6 address tenant_id = subnet['subnet']['tenant_id'] net_id = subnet['subnet']['network_id'] res = self._create_subnet( self.fmt, tenant_id=tenant_id, net_id=net_id, cidr='2607:f0d0:1002:51::/124', ip_version=constants.IP_VERSION_6, gateway_ip=constants.ATTR_NOT_SPECIFIED) subnet2 = self.deserialize(self.fmt, res) kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet2['subnet']['id']}]} res = self._create_port(self.fmt, net_id=net_id, **kwargs) port1 = self.deserialize(self.fmt, res) res = self._create_port(self.fmt, net_id=net_id, **kwargs) port2 = self.deserialize(self.fmt, res) fixed_ips = port1['port']['fixed_ips'] self.assertEqual(2, len(fixed_ips)) query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=ip_address%%3D%s """.strip() % (fixed_ips[0]['ip_address'][:-1], fixed_ips[1]['ip_address']) self._test_list_resources('port', [port1], query_params=query_params) query_params = """ fixed_ips=ip_address_substr%%3D%s&fixed_ips=ip_address%%3D%s """.strip() % ('192.168.', fixed_ips[1]['ip_address']) self._test_list_resources('port', [], query_params=query_params) self._delete('ports', port1['port']['id']) self._delete('ports', port2['port']['id']) def test_list_ports_filtered_by_security_groups(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ctx = context.get_admin_context() with self.port() as port1, self.port() as port2: query_params = "security_groups=%s" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertEqual(set([port1['port']['id'], port2['port']['id']]), set([port['id'] for port in ports_data['ports']])) self.assertEqual(2, len(ports_data['ports'])) query_params = "security_groups=%s&limit=1" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertIn(ports_data['ports'][0]['id'], [port1['port']['id'], port2['port']['id']]) self.assertEqual(1, len(ports_data['ports'])) query_params = "security_groups=%s&id=%s" % ( port1['port']['security_groups'][0], port1['port']['id']) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) temp_sg = {'security_group': {'tenant_id': 'some_tenant', 'name': '', 'description': 's'}} sg_dbMixin = sg_db.SecurityGroupDbMixin() sg = sg_dbMixin.create_security_group(ctx, temp_sg) sg_dbMixin._delete_port_security_group_bindings( ctx, port2['port']['id']) sg_dbMixin._create_port_security_group_binding( ctx, port2['port']['id'], sg['id']) port2['port']['security_groups'][0] = sg['id'] query_params = "security_groups=%s&id=%s" % ( port1['port']['security_groups'][0], port1['port']['id']) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) query_params = "security_groups=%s&id=%s" % ( (port2['port']['security_groups'][0], port2['port']['id'])) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port2['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase): def setUp(self): super(TestMl2PortsV2WithRevisionPlugin, self).setUp() self.revision_plugin = revision_plugin.RevisionPlugin() def test_update_port_status_bumps_revision(self): ctx = context.get_admin_context() plugin = directory.get_plugin() host_arg = {portbindings.HOST_ID: HOST} with self.port(arg_list=(portbindings.HOST_ID,), **host_arg) as port: port = plugin.get_port(ctx, port['port']['id']) updated_ports = [] receiver = lambda *a, **k: updated_ports.append(k['port']) registry.subscribe(receiver, resources.PORT, events.AFTER_UPDATE) plugin.update_port_status( ctx, port['id'], constants.PORT_STATUS_ACTIVE, host=HOST) self.assertGreater(updated_ports[0]['revision_number'], port['revision_number']) def test_bind_port_bumps_revision(self): updated_ports = [] created_ports = [] ureceiver = lambda *a, **k: updated_ports.append(k['port']) creceiver = lambda *a, **k: created_ports.append(k['port']) registry.subscribe(ureceiver, resources.PORT, events.AFTER_UPDATE) registry.subscribe(creceiver, resources.PORT, events.AFTER_CREATE) host_arg = {portbindings.HOST_ID: HOST} with self.port(arg_list=(portbindings.HOST_ID,), **host_arg): self.assertGreater(updated_ports[0]['revision_number'], created_ports[0]['revision_number']) def test_update_port_status_dvr_port_no_update_on_same_status(self): ctx = context.get_admin_context() plugin = directory.get_plugin() # enable subscription for events p_update_receiver = mock.Mock() registry.subscribe(p_update_receiver, resources.PORT, events.AFTER_UPDATE) host_arg = {portbindings.HOST_ID: HOST} with self.port(device_owner=constants.DEVICE_OWNER_DVR_INTERFACE, device_id=TEST_ROUTER_ID, arg_list=(portbindings.HOST_ID,), **host_arg) as port: ml2_db.ensure_distributed_port_binding(ctx, port['port']['id'], HOST) p_update_receiver.reset_mock() plugin.update_port_status( ctx, port['port']['id'], constants.PORT_STATUS_ACTIVE, host=HOST) self.assertTrue(p_update_receiver.called) after_1 = plugin.get_port(ctx, port['port']['id']) p_update_receiver.reset_mock() plugin.update_port_status( ctx, port['port']['id'], constants.PORT_STATUS_ACTIVE, host=HOST) self.assertFalse(p_update_receiver.called) after_2 = plugin.get_port(ctx, port['port']['id']) self.assertEqual(after_1['revision_number'], after_2['revision_number']) class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase): """For testing methods that require the L3 service plugin.""" l3_plugin = 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' def get_additional_service_plugins(self): return {'flavors': 'flavors'} def test_update_port_status_notify_port_event_after_update(self): ctx = context.get_admin_context() plugin = directory.get_plugin() l3plugin = directory.get_plugin(plugin_constants.L3) host_arg = {portbindings.HOST_ID: HOST} with mock.patch.object(l3plugin.l3_rpc_notifier, 'routers_updated_on_host') as mock_updated: with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF, device_id=TEST_ROUTER_ID, arg_list=(portbindings.HOST_ID,), **host_arg) as port: plugin.update_port_status( ctx, port['port']['id'], constants.PORT_STATUS_ACTIVE, host=HOST) mock_updated.assert_called_once_with( mock.ANY, [TEST_ROUTER_ID], HOST) class TestMl2PluginOnly(Ml2PluginV2TestCase): """For testing methods that don't call drivers""" def test__verify_service_plugins_requirements(self): plugin = directory.get_plugin() with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS, {self.l3_plugin: self._mechanism_drivers}),\ mock.patch.object(plugin.extension_manager, 'names', return_value=self._mechanism_drivers): plugin._verify_service_plugins_requirements() def test__verify_service_plugins_requirements_missing_driver(self): plugin = directory.get_plugin() with mock.patch.dict(ml2_plugin.SERVICE_PLUGINS_REQUIRED_DRIVERS, {self.l3_plugin: ['test_required_driver']}),\ mock.patch.object(plugin.extension_manager, 'names', return_value=self._mechanism_drivers): self.assertRaises( ml2_exc.ExtensionDriverNotFound, plugin._verify_service_plugins_requirements ) def _test_check_mac_update_allowed(self, vif_type, expect_change=True): plugin = directory.get_plugin() port = {'mac_address': "fake_mac", 'id': "fake_id"} if expect_change: new_attrs = {"mac_address": "dummy_mac"} else: new_attrs = {"mac_address": port['mac_address']} binding = mock.Mock() binding.vif_type = vif_type mac_changed = plugin._check_mac_update_allowed(port, new_attrs, binding) self.assertEqual(expect_change, mac_changed) def test_check_mac_update_allowed_if_no_mac_change(self): self._test_check_mac_update_allowed(portbindings.VIF_TYPE_UNBOUND, expect_change=False) def test_check_mac_update_allowed_unless_bound(self): with testtools.ExpectedException(exc.PortBound): self._test_check_mac_update_allowed(portbindings.VIF_TYPE_OVS) def _test_reset_mac_for_direct_physical(self, direct_physical=True, unbinding=True): plugin = directory.get_plugin() port = {'device_id': '123', 'device_owner': 'compute:nova'} new_attrs = ({'device_id': '', 'device_owner': ''} if unbinding else {'name': 'new'}) binding = mock.Mock() binding.vnic_type = ( portbindings.VNIC_DIRECT_PHYSICAL if direct_physical else portbindings.VNIC_NORMAL) new_mac = plugin._reset_mac_for_direct_physical( port, new_attrs, binding) if direct_physical and unbinding: self.assertTrue(new_mac) self.assertIsNotNone(new_attrs.get('mac_address')) else: self.assertFalse(new_mac) self.assertIsNone(new_attrs.get('mac_address')) def test_reset_mac_for_direct_physical(self): self._test_reset_mac_for_direct_physical() def test_reset_mac_for_direct_physical_not_physycal(self): self._test_reset_mac_for_direct_physical(False, True) def test_reset_mac_for_direct_physical_no_unbinding(self): self._test_reset_mac_for_direct_physical(True, False) def test_reset_mac_for_direct_physical_no_unbinding_not_physical(self): self._test_reset_mac_for_direct_physical(False, False) def test__device_to_port_id_prefix_names(self): input_output = [('sg-abcdefg', 'abcdefg'), ('tap123456', '123456'), ('qvo567890', '567890')] for device, expected in input_output: self.assertEqual(expected, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, device)) def test__device_to_port_id_mac_address(self): with self.port() as p: mac = p['port']['mac_address'] port_id = p['port']['id'] self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, mac)) def test__device_to_port_id_not_uuid_not_mac(self): dev = '1234567' self.assertEqual(dev, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, dev)) def test__device_to_port_id_UUID(self): port_id = uuidutils.generate_uuid() self.assertEqual(port_id, ml2_plugin.Ml2Plugin._device_to_port_id( self.context, port_id)) @mock.patch.object(ml2_db, 'clear_binding_levels') @mock.patch.object(port_obj.PortBinding, 'delete_objects') def test_delete_port_binding_delete_binding_and_levels( self, clear_bl_mock, delete_port_binding_mock): port_id = uuidutils.generate_uuid() host = 'fake-host' plugin = directory.get_plugin() plugin.delete_port_binding(self.context, host, port_id) self.assertTrue(clear_bl_mock.called_with(self.context, port_id=port_id, host=host)) self.assertTrue(delete_port_binding_mock.called_with(self.context, host=host, port_id=port_id)) class Test_GetNetworkMtu(Ml2PluginV2TestCase): def test_get_mtu_with_physical_net(self): plugin = directory.get_plugin() mock_type_driver = mock.MagicMock() plugin.type_manager.drivers['driver1'] = mock.Mock() plugin.type_manager.drivers['driver1'].obj = mock_type_driver net = { 'name': 'net1', 'network_type': 'driver1', 'physical_network': 'physnet1', } plugin._get_network_mtu(net) mock_type_driver.get_mtu.assert_called_once_with('physnet1') def _register_type_driver_with_mtu(self, driver, mtu): plugin = directory.get_plugin() class FakeDriver(object): def get_mtu(self, physical_network=None): return mtu def validate_provider_segment(self, segment): pass def is_partial_segment(self, segment): return False driver_mock = mock.Mock() driver_mock.obj = FakeDriver() plugin.type_manager.drivers[driver] = driver_mock def test_single_segment(self): plugin = directory.get_plugin() self._register_type_driver_with_mtu('driver1', 1400) net = { 'name': 'net1', mpnet_apidef.SEGMENTS: [ { 'network_type': 'driver1', 'physical_network': 'physnet1' }, ] } self.assertEqual(1400, plugin._get_network_mtu(net)) def test_multiple_segments_returns_minimal_mtu(self): plugin = directory.get_plugin() self._register_type_driver_with_mtu('driver1', 1400) self._register_type_driver_with_mtu('driver2', 1300) net = { 'name': 'net1', mpnet_apidef.SEGMENTS: [ { 'network_type': 'driver1', 'physical_network': 'physnet1' }, { 'network_type': 'driver2', 'physical_network': 'physnet2' }, ] } self.assertEqual(1300, plugin._get_network_mtu(net)) def test_no_segments(self): plugin = directory.get_plugin() self._register_type_driver_with_mtu('driver1', 1400) net = { 'name': 'net1', 'network_type': 'driver1', 'physical_network': 'physnet1', } self.assertEqual(1400, plugin._get_network_mtu(net)) def test_get_mtu_None_returns_0(self): plugin = directory.get_plugin() self._register_type_driver_with_mtu('driver1', None) net = { 'name': 'net1', 'network_type': 'driver1', 'physical_network': 'physnet1', } self.assertEqual(1500, plugin._get_network_mtu(net)) def test_unknown_segment_type_ignored(self): plugin = directory.get_plugin() self._register_type_driver_with_mtu('driver1', None) self._register_type_driver_with_mtu('driver2', 1300) net = { 'name': 'net1', mpnet_apidef.SEGMENTS: [ { 'network_type': 'driver1', 'physical_network': 'physnet1' }, { 'network_type': 'driver2', 'physical_network': 'physnet2' }, ] } self.assertEqual(1300, plugin._get_network_mtu(net)) class TestMl2DvrPortsV2(TestMl2PortsV2): def setUp(self): super(TestMl2DvrPortsV2, self).setUp() extensions = ['router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS] self.plugin = directory.get_plugin() self.l3plugin = mock.Mock() type(self.l3plugin).supported_extension_aliases = ( mock.PropertyMock(return_value=extensions)) def test_delete_port_notifies_l3_plugin(self, floating_ip=False): directory.add_plugin(plugin_constants.L3, self.l3plugin) ns_to_delete = {'host': 'myhost', 'agent_id': 'vm_l3_agent', 'router_id': 'my_router'} router_ids = set() if floating_ip: router_ids.add(ns_to_delete['router_id']) with self.port() as port,\ mock.patch.object(registry, 'notify') as notify, \ mock.patch.object(registry, 'publish') as publish, \ mock.patch.object(self.l3plugin, 'disassociate_floatingips', return_value=router_ids): port_id = port['port']['id'] self.plugin.delete_port(self.context, port_id) self.assertEqual(2, notify.call_count) self.assertEqual(1, publish.call_count) # needed for a full match in the assertion below port['port']['extra_dhcp_opts'] = [] expected = [mock.call(resources.PORT, events.PRECOMMIT_DELETE, mock.ANY, network=mock.ANY, bind=mock.ANY, port=port['port'], port_db=mock.ANY, context=self.context, levels=mock.ANY, id=mock.ANY, bindings=mock.ANY), mock.call(resources.PORT, events.AFTER_DELETE, mock.ANY, context=self.context, port=port['port'], router_ids=router_ids)] notify.assert_has_calls(expected) expected = [mock.call(resources.PORT, events.BEFORE_DELETE, mock.ANY, payload=mock.ANY)] publish.assert_has_calls(expected) payload = publish.call_args[1]['payload'] self.assertEqual(port_id, payload.resource_id) self.assertTrue(payload.metadata['port_check']) def test_delete_port_with_floatingip_notifies_l3_plugin(self): self.test_delete_port_notifies_l3_plugin(floating_ip=True) def test_delete_port_with_floatingip_create_precommit_event(self): fake_method = mock.Mock() with self.port(device_owner='network:floatingip') as port: try: registry.subscribe(fake_method, resources.FLOATING_IP, events.PRECOMMIT_DELETE) port_id = port['port']['id'] self.plugin.delete_port(self.context, port_id) fake_method.assert_called_once_with( resources.FLOATING_IP, events.PRECOMMIT_DELETE, mock.ANY, bind=mock.ANY, bindings=mock.ANY, context=mock.ANY, id=mock.ANY, levels=mock.ANY, network=mock.ANY, port=mock.ANY, port_db=mock.ANY) finally: registry.unsubscribe(fake_method, resources.FLOATING_IP, events.PRECOMMIT_DELETE) def test_concurrent_csnat_port_delete(self): plugin = directory.get_plugin(plugin_constants.L3) r = plugin.create_router( self.context, {'router': {'name': 'router', 'admin_state_up': True, 'tenant_id': 'fake_tenant'}}) with self.subnet() as s: p = plugin.add_router_interface(self.context, r['id'], {'subnet_id': s['subnet']['id']}) # lie to turn the port into an SNAT interface with db_api.CONTEXT_WRITER.using(self.context): pager = base_obj.Pager(limit=1) rp = l3_obj.RouterPort.get_objects( self.context, _pager=pager, port_id=p['port_id']) rp[0].port_type = constants.DEVICE_OWNER_ROUTER_SNAT rp[0].update() # take the port away before csnat gets a chance to delete it # to simulate a concurrent delete orig_get_ports = plugin._core_plugin.get_ports def get_ports_with_delete_first(*args, **kwargs): plugin._core_plugin.delete_port(self.context, p['port_id'], l3_port_check=False) return orig_get_ports(*args, **kwargs) plugin._core_plugin.get_ports = get_ports_with_delete_first # This should be able to handle a concurrent delete without raising # an exception router = plugin._get_router(self.context, r['id']) plugin.delete_csnat_router_interface_ports(self.context, router) class TestMl2PortBinding(Ml2PluginV2TestCase, test_bindings.PortBindingsTestCase): # Test case does not set binding:host_id, so ml2 does not attempt # to bind port VIF_TYPE = portbindings.VIF_TYPE_UNBOUND HAS_PORT_FILTER = False ENABLE_SG = True FIREWALL_DRIVER = test_sg_rpc.FIREWALL_HYBRID_DRIVER def setUp(self, firewall_driver=None): test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER) cfg.CONF.set_override( 'enable_security_group', self.ENABLE_SG, group='SECURITYGROUP') make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() super(TestMl2PortBinding, self).setUp() def _check_port_binding_profile(self, port, profile=None): self.assertIn('id', port) self.assertIn(portbindings.PROFILE, port) value = port[portbindings.PROFILE] self.assertEqual(profile or {}, value) def test_create_port_binding_profile(self): self._test_create_port_binding_profile({'a': 1, 'b': 2}) def test_update_port_binding_profile(self): self._test_update_port_binding_profile({'c': 3}) def test_create_port_binding_profile_too_big(self): s = 'x' * 5000 profile_arg = {portbindings.PROFILE: {'d': s}} try: with self.port(expected_res_status=400, arg_list=(portbindings.PROFILE,), **profile_arg): pass except webob.exc.HTTPClientError: pass def test_remove_port_binding_profile(self): profile = {'e': 5} profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: self._check_port_binding_profile(port['port'], profile) port_id = port['port']['id'] profile_arg = {portbindings.PROFILE: None} port = self._update('ports', port_id, {'port': profile_arg})['port'] self._check_port_binding_profile(port) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port) def test_return_on_concurrent_delete_and_binding(self): # create a port and delete it so we have an expired mechanism context with self.port() as port: plugin = directory.get_plugin() binding = p_utils.get_port_binding_by_status_and_host( plugin._get_port(self.context, port['port']['id']).port_bindings, constants.ACTIVE) binding['host'] = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) side = exc.PortNotFound(port_id=port['port']['id']) with mock.patch.object(plugin, '_get_port', side_effect=side) as gp_mock,\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_make_port_dict') as mpd_mock: plugin._bind_port_if_needed(mech_context) # called during deletion to get port self.assertTrue(gp_mock.mock_calls) # should have returned before calling _make_port_dict self.assertFalse(mpd_mock.mock_calls) def _create_port_and_bound_context(self, port_vif_type, bound_vif_type): with self.port() as port: plugin = directory.get_plugin() binding = p_utils.get_port_binding_by_status_and_host( plugin._get_port(self.context, port['port']['id']).port_bindings, constants.ACTIVE) binding['host'] = 'fake_host' binding['vif_type'] = port_vif_type # Generates port context to be used before the bind. port_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) bound_context = mock.MagicMock() # Bound context is how port_context is expected to look # after _bind_port. bound_context.vif_type = bound_vif_type return plugin, port_context, bound_context def test__attempt_binding(self): # Simulate a successful binding for vif_type unbound # and keep the same binding state for other vif types. vif_types = [(portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_BINDING_FAILED), (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_OVS), (portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_OVS)] for port_vif_type, bound_vif_type in vif_types: plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', return_value=bound_context) as bd_mock: context, need_notify, try_again = (plugin._attempt_binding( port_context, False)) expected_need_notify = port_vif_type not in ( portbindings.VIF_TYPE_BINDING_FAILED, portbindings.VIF_TYPE_OVS) if bound_vif_type == portbindings.VIF_TYPE_BINDING_FAILED: expected_vif_type = port_vif_type expected_try_again = True expected_bd_mock_called = True else: expected_vif_type = portbindings.VIF_TYPE_OVS expected_try_again = False expected_bd_mock_called = (port_vif_type == portbindings.VIF_TYPE_UNBOUND) self.assertEqual(expected_need_notify, need_notify) self.assertEqual(expected_vif_type, context.vif_type) self.assertEqual(expected_try_again, try_again) self.assertEqual(expected_bd_mock_called, bd_mock.called) def test__bind_port_if_needed_early_exit_on_no_segments(self): with self.network() as n: ctx = context.get_admin_context() seg_plugin = segments_plugin.Plugin.get_instance() seg = seg_plugin.get_segments(ctx)[0] seg_plugin.delete_segment(ctx, seg['id']) plugin = directory.get_plugin() mech_context = driver_context.PortContext( plugin, ctx, None, plugin.get_network(self.context, n['network']['id']), models.PortBinding(), None) with mock.patch.object(plugin, '_attempt_binding') as ab: plugin._bind_port_if_needed(mech_context) self.assertFalse(ab.called) def test__attempt_binding_retries(self): # Simulate cases of both successful and failed binding states for # vif_type unbound vif_types = [(portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_BINDING_FAILED), (portbindings.VIF_TYPE_UNBOUND, portbindings.VIF_TYPE_OVS)] for port_vif_type, bound_vif_type in vif_types: plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) with mock.patch( 'neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', return_value=bound_context),\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._commit_' 'port_binding', return_value=(bound_context, True, False)),\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_attempt_binding', side_effect=plugin._attempt_binding) as at_mock: plugin._bind_port_if_needed(port_context) if bound_vif_type == portbindings.VIF_TYPE_BINDING_FAILED: # An unsuccessful binding attempt should be retried # MAX_BIND_TRIES amount of times. self.assertEqual(ml2_plugin.MAX_BIND_TRIES, at_mock.call_count) else: # Successful binding should only be attempted once. self.assertEqual(1, at_mock.call_count) def test__bind_port_if_needed_concurrent_calls(self): port_vif_type = portbindings.VIF_TYPE_UNBOUND bound_vif_type = portbindings.VIF_TYPE_OVS plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) bound_context._binding_levels = [mock.Mock( port_id="port_id", level=0, driver='fake_agent', segment_id="11111111-2222-3333-4444-555555555555")] # let _commit_port_binding replace the PortContext with a new instance # which does not have any binding levels set to simulate the concurrent # port binding operations fail with mock.patch( 'neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', return_value=bound_context),\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_notify_port_updated') as npu_mock,\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_attempt_binding', side_effect=plugin._attempt_binding) as ab_mock,\ mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_commit_port_binding', return_value=( mock.MagicMock(), True, True)) as cpb_mock: ret_context = plugin._bind_port_if_needed(port_context, allow_notify=True) # _attempt_binding will return without doing anything during # the second iteration since _should_bind_port returns False self.assertEqual(2, ab_mock.call_count) self.assertEqual(1, cpb_mock.call_count) # _notify_port_updated will still be called though it does # nothing due to the missing binding levels npu_mock.assert_called_once_with(ret_context) def test__commit_port_binding_populating_with_binding_levels(self): port_vif_type = portbindings.VIF_TYPE_OVS bound_vif_type = portbindings.VIF_TYPE_OVS plugin, port_context, bound_context = ( self._create_port_and_bound_context(port_vif_type, bound_vif_type)) db_portbinding = port_obj.PortBindingLevel( self.context, port_id=uuidutils.generate_uuid(), level=0, driver='fake_agent', segment_id="11111111-2222-3333-4444-555555555555") bound_context.network.current = {'id': 'net_id'} with mock.patch.object(ml2_db, 'get_binding_level_objs', return_value=[db_portbinding]),\ mock.patch.object(driver_context.PortContext, '_push_binding_level') as pbl_mock: plugin._commit_port_binding( port_context, bound_context, True) pbl_mock.assert_called_once_with(db_portbinding) def test_port_binding_profile_not_changed(self): profile = {'e': 5} profile_arg = {portbindings.PROFILE: profile} with self.port(arg_list=(portbindings.PROFILE,), **profile_arg) as port: self._check_port_binding_profile(port['port'], profile) port_id = port['port']['id'] state_arg = {'admin_state_up': True} port = self._update('ports', port_id, {'port': state_arg})['port'] self._check_port_binding_profile(port, profile) port = self._show('ports', port_id)['port'] self._check_port_binding_profile(port, profile) def test_update_port_binding_host_id_none(self): with self.port() as port: plugin = directory.get_plugin() binding = p_utils.get_port_binding_by_status_and_host( plugin._get_port(self.context, port['port']['id']).port_bindings, constants.ACTIVE) with self.context.session.begin(subtransactions=True): binding.host = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_update_port_dict_binding') as update_mock: attrs = {portbindings.HOST_ID: None} self.assertEqual('test', binding.host) with self.context.session.begin(subtransactions=True): plugin._process_port_binding(mech_context, attrs) self.assertTrue(update_mock.mock_calls) self.assertEqual('', binding.host) def test_update_port_binding_host_id_not_changed(self): with self.port() as port: plugin = directory.get_plugin() binding = p_utils.get_port_binding_by_status_and_host( plugin._get_port(self.context, port['port']['id']).port_bindings, constants.ACTIVE) binding['host'] = 'test' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' '_update_port_dict_binding') as update_mock: attrs = {portbindings.PROFILE: {'e': 5}} plugin._process_port_binding(mech_context, attrs) self.assertTrue(update_mock.mock_calls) self.assertEqual('test', binding.host) def test_process_distributed_port_binding_update_router_id(self): host_id = 'host' binding = models.DistributedPortBinding( port_id='port_id', host=host_id, router_id='old_router_id', vif_type=portbindings.VIF_TYPE_OVS, vnic_type=portbindings.VNIC_NORMAL, status=constants.PORT_STATUS_DOWN) plugin = directory.get_plugin() mock_network = {'id': 'net_id'} mock_port = {'id': 'port_id'} ctxt = context.get_admin_context() new_router_id = 'new_router' attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id} with mock.patch.object(plugin, '_update_port_dict_binding'): with mock.patch.object(segments_db, 'get_network_segments', return_value=[]): mech_context = driver_context.PortContext( self, ctxt, mock_port, mock_network, binding, None) plugin._process_distributed_port_binding(mech_context, ctxt, attrs) self.assertEqual(new_router_id, mech_context._binding.router_id) self.assertEqual(host_id, mech_context._binding.host) def test_update_distributed_port_binding_on_concurrent_port_delete(self): plugin = directory.get_plugin() with self.port() as port: port = { 'id': port['port']['id'], portbindings.HOST_ID: 'foo_host', } exc = db_exc.DBReferenceError('', '', '', '') with mock.patch.object(ml2_db, 'ensure_distributed_port_binding', side_effect=exc): res = plugin.update_distributed_port_binding( self.context, port['id'], {'port': port}) self.assertIsNone(res) def test_update_distributed_port_binding_on_non_existent_port(self): plugin = directory.get_plugin() port = { 'id': 'foo_port_id', portbindings.HOST_ID: 'foo_host', } with mock.patch.object(ml2_db, 'ensure_distributed_port_binding') as mock_dist: plugin.update_distributed_port_binding( self.context, 'foo_port_id', {'port': port}) self.assertFalse(mock_dist.called) def test__bind_port_original_port_set(self): plugin = directory.get_plugin() plugin.mechanism_manager = mock.Mock() mock_port = {'id': 'port_id'} context = mock.Mock() context.network.current = {'id': 'net_id'} context.original = mock_port with mock.patch.object(plugin, '_update_port_dict_binding'), \ mock.patch.object(segments_db, 'get_network_segments', return_value=[]): new_context = plugin._bind_port(context) self.assertEqual(mock_port, new_context.original) self.assertNotEqual(new_context, context) class TestMl2PortBindingNoSG(TestMl2PortBinding): HAS_PORT_FILTER = False ENABLE_SG = False FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER class TestMl2PortBindingHost(Ml2PluginV2TestCase, test_bindings.PortBindingsHostTestCaseMixin): pass class TestMl2PortBindingVnicType(Ml2PluginV2TestCase, test_bindings.PortBindingsVnicTestCaseMixin): pass class TestMultiSegmentNetworks(Ml2PluginV2TestCase): def setUp(self, plugin=None): super(TestMultiSegmentNetworks, self).setUp() def test_allocate_dynamic_segment(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertGreater(dynamic_segment[driver_api.SEGMENTATION_ID], 0) segment2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.SEGMENTATION_ID: 1234, driver_api.PHYSICAL_NETWORK: 'physnet3'} self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment2) dynamic_segment = segments_db.get_dynamic_segment( self.context, network_id, segmentation_id='1234') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet3', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234) def test_allocate_dynamic_segment_multiple_physnets(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID] self.assertGreater(dynamic_segmentation_id, 0) dynamic_segment1 = segments_db.get_dynamic_segment( self.context, network_id, 'physnet1') dynamic_segment1_id = dynamic_segment1[driver_api.SEGMENTATION_ID] self.assertEqual(dynamic_segmentation_id, dynamic_segment1_id) segment2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment2) dynamic_segment2 = segments_db.get_dynamic_segment( self.context, network_id, 'physnet2') dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID] self.assertNotEqual(dynamic_segmentation_id, dynamic_segmentation2_id) def test_allocate_release_dynamic_segment(self): data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet1', dynamic_segment[driver_api.PHYSICAL_NETWORK]) dynamic_segmentation_id = dynamic_segment[driver_api.SEGMENTATION_ID] self.assertGreater(dynamic_segmentation_id, 0) self.driver.type_manager.release_dynamic_segment( self.context, dynamic_segment[driver_api.ID]) self.assertIsNone(segments_db.get_dynamic_segment( self.context, network_id, 'physnet1')) def test_create_network_provider(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_fail_update_network_provider_attr(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'physnet1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) data = {'network': {'name': 'updated-net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'update_physnet1'}} network_req = self.new_update_request('networks', data, network['network']['id']) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertIn('NeutronError', network) self.assertIn(('Invalid input for operation: Plugin does not ' 'support updating the following provider network ' 'attributes: '), network['NeutronError']['message']) def test_update_network_provider_attr_no_change(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'physnet1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) data = {'network': {'name': 'updated-net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'physnet1'}} network_req = self.new_update_request('networks', data, network['network']['id']) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('updated-net1', network['network']['name']) def test_create_network_single_multiprovider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} net_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, net_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(1, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_create_network_multiprovider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 2}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) segments = network['network'][mpnet_apidef.SEGMENTS] for segment_index, segment in enumerate(data['network'] [mpnet_apidef.SEGMENTS]): for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(segment.get(field), segments[segment_index][field]) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) segments = network['network'][mpnet_apidef.SEGMENTS] for segment_index, segment in enumerate(data['network'] [mpnet_apidef.SEGMENTS]): for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(segment.get(field), segments[segment_index][field]) def test_create_network_with_provider_and_multiprovider_fail(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_network_duplicate_full_segments(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_network_duplicate_partial_segments(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1'}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(201, res.status_int) def test_release_network_segments(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context, network_id, 'physnet2') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) self.assertEqual('physnet2', dynamic_segment[driver_api.PHYSICAL_NETWORK]) self.assertGreater(dynamic_segment[driver_api.SEGMENTATION_ID], 0) with mock.patch.object(type_vlan.VlanTypeDriver, 'release_segment') as rs: segments_plugin_db.subscribe() req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(2, rs.call_count) self.assertEqual([], segments_db.get_network_segments( self.context, network_id)) self.assertIsNone(segments_db.get_dynamic_segment( self.context, network_id, 'physnet2')) def test_release_segment_no_type_driver(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] segment = {driver_api.NETWORK_TYPE: 'faketype', driver_api.PHYSICAL_NETWORK: 'physnet1', driver_api.ID: 1} with mock.patch('neutron.plugins.ml2.managers.LOG') as log: with mock.patch('neutron.plugins.ml2.managers.segments_db') as db: db.get_network_segments.return_value = (segment,) self.driver.type_manager.release_network_segments( self.context, network_id) log.error.assert_called_once_with( "Failed to release segment '%s' because " "network type is not supported.", segment) def test_create_provider_fail(self): segment = {pnet.NETWORK_TYPE: None, pnet.PHYSICAL_NETWORK: 'phys_net', pnet.SEGMENTATION_ID: None} with testtools.ExpectedException(exc.InvalidInput): self.driver.type_manager._process_provider_create(segment) def test_create_network_plugin(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'shared': False, pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} def raise_mechanism_exc(*args, **kwargs): raise ml2_exc.MechanismDriverError( method='create_network_postcommit') with mock.patch('neutron.plugins.ml2.managers.MechanismManager.' 'create_network_precommit', new=raise_mechanism_exc): with testtools.ExpectedException(ml2_exc.MechanismDriverError): self.driver.create_network(self.context, data) def test_extend_dictionary_no_segments(self): network = dict(name='net_no_segment', id='5', tenant_id='tenant_one') self.driver.type_manager.extend_network_dict_provider(self.context, network) self.assertIsNone(network[pnet.NETWORK_TYPE]) self.assertIsNone(network[pnet.PHYSICAL_NETWORK]) self.assertIsNone(network[pnet.SEGMENTATION_ID]) class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase, test_pair.TestAllowedAddressPairs): _extension_drivers = ['port_security'] def setUp(self, plugin=None): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(test_pair.TestAllowedAddressPairs, self).setUp( plugin=PLUGIN_NAME) class TestMl2PortSecurity(Ml2PluginV2TestCase): def setUp(self): cfg.CONF.set_override('extension_drivers', ['port_security'], group='ml2') cfg.CONF.set_override('enable_security_group', False, group='SECURITYGROUP') make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() super(TestMl2PortSecurity, self).setUp() def test_port_update_without_security_groups(self): with self.port() as port: plugin = directory.get_plugin() ctx = context.get_admin_context() self.assertTrue(port['port']['port_security_enabled']) updated_port = plugin.update_port( ctx, port['port']['id'], {'port': {'port_security_enabled': False}}) self.assertFalse(updated_port['port_security_enabled']) class TestMl2HostsNetworkAccess(Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch', 'logger'] def setUp(self): super(TestMl2HostsNetworkAccess, self).setUp() helpers.register_ovs_agent( host='host1', bridge_mappings={'physnet1': 'br-eth-1'}) helpers.register_ovs_agent( host='host2', bridge_mappings={'physnet2': 'br-eth-2'}) helpers.register_ovs_agent( host='host3', bridge_mappings={'physnet3': 'br-eth-3'}) self.dhcp_agent1 = helpers.register_dhcp_agent( host='host1') self.dhcp_agent2 = helpers.register_dhcp_agent( host='host2') self.dhcp_agent3 = helpers.register_dhcp_agent( host='host3') self.dhcp_hosts = {'host1', 'host2', 'host3'} def test_filter_hosts_with_network_access(self): net = self.driver.create_network( self.context, {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one', 'admin_state_up': True, 'shared': True}}) observeds = self.driver.filter_hosts_with_network_access( self.context, net['id'], self.dhcp_hosts) self.assertEqual({self.dhcp_agent1.host}, observeds) def test_filter_hosts_with_network_access_multi_segments(self): net = self.driver.create_network( self.context, {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [ {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 2}], 'tenant_id': 'tenant_one', 'admin_state_up': True, 'shared': True}}) expecteds = {self.dhcp_agent1.host, self.dhcp_agent2.host} observeds = self.driver.filter_hosts_with_network_access( self.context, net['id'], self.dhcp_hosts) self.assertEqual(expecteds, observeds) def test_filter_hosts_with_network_access_not_supported(self): self.driver.mechanism_manager.host_filtering_supported = False observeds = self.driver.filter_hosts_with_network_access( self.context, 'fake_id', self.dhcp_hosts) self.assertEqual(self.dhcp_hosts, observeds) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt): def setUp(self, plugin=None): super(DHCPOptsTestCase, self).setUp(plugin=PLUGIN_NAME) class Ml2PluginV2FaultyDriverTestCase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. cfg.CONF.set_override('mechanism_drivers', ['test', 'logger'], group='ml2') super(Ml2PluginV2FaultyDriverTestCase, self).setUp(PLUGIN_NAME) self.port_create_status = 'DOWN' class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase): def test_create_network_faulty(self): err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_network_postcommit', side_effect=(exc.InvalidInput( error_message=err_msg))): tenant_id = uuidutils.generate_uuid() data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} req = self.new_create_request('networks', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('InvalidInput', error['NeutronError']['type']) # Check the client can see the root cause of error. self.assertIn(err_msg, error['NeutronError']['message']) query_params = "tenant_id=%s" % tenant_id nets = self._list('networks', query_params=query_params) self.assertFalse(nets['networks']) def test_delete_network_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'delete_network_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'delete_network_postcommit') as dnp: data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network_res = network_req.get_response(self.api) self.assertEqual(201, network_res.status_int) network = self.deserialize(self.fmt, network_res) net_id = network['network']['id'] req = self.new_delete_request('networks', net_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Test if other mechanism driver was called self.assertTrue(dnp.called) self._show('networks', net_id, expected_code=webob.exc.HTTPNotFound.code) def test_update_network_faulty(self): err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'update_network_postcommit', side_effect=(exc.InvalidInput( error_message=err_msg))): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_network_postcommit') as unp: data = {'network': {'name': 'net1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network_res = network_req.get_response(self.api) self.assertEqual(201, network_res.status_int) network = self.deserialize(self.fmt, network_res) net_id = network['network']['id'] new_name = 'a_brand_new_name' data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id) res = req.get_response(self.api) self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('InvalidInput', error['NeutronError']['type']) # Check the client can see the root cause of error. self.assertIn(err_msg, error['NeutronError']['message']) # Test if other mechanism driver was called self.assertTrue(unp.called) net = self._show('networks', net_id) self.assertEqual(new_name, net['network']['name']) self._delete('networks', net_id) def test_create_subnet_faulty(self): err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_subnet_postcommit', side_effect=(exc.InvalidInput( error_message=err_msg))): with self.network() as network: net_id = network['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.20.0/24', 'ip_version': constants.IP_VERSION_4, 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('InvalidInput', error['NeutronError']['type']) # Check the client can see the root cause of error. self.assertIn(err_msg, error['NeutronError']['message']) query_params = "network_id=%s" % net_id subnets = self._list('subnets', query_params=query_params) self.assertFalse(subnets['subnets']) def test_delete_subnet_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'delete_subnet_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'delete_subnet_postcommit') as dsp: with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.20.0/24', 'ip_version': constants.IP_VERSION_4, 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} subnet_req = self.new_create_request('subnets', data) subnet_res = subnet_req.get_response(self.api) self.assertEqual(201, subnet_res.status_int) subnet = self.deserialize(self.fmt, subnet_res) subnet_id = subnet['subnet']['id'] req = self.new_delete_request('subnets', subnet_id) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Test if other mechanism driver was called self.assertTrue(dsp.called) self._show('subnets', subnet_id, expected_code=webob.exc.HTTPNotFound.code) def test_update_subnet_faulty(self): err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'update_subnet_postcommit', side_effect=(exc.InvalidInput( error_message=err_msg))): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_subnet_postcommit') as usp: with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.20.0/24', 'ip_version': constants.IP_VERSION_4, 'name': 'subnet1', 'tenant_id': network['network']['tenant_id'], 'gateway_ip': '10.0.20.1'}} subnet_req = self.new_create_request('subnets', data) subnet_res = subnet_req.get_response(self.api) self.assertEqual(201, subnet_res.status_int) subnet = self.deserialize(self.fmt, subnet_res) subnet_id = subnet['subnet']['id'] new_name = 'a_brand_new_name' data = {'subnet': {'name': new_name}} req = self.new_update_request('subnets', data, subnet_id) res = req.get_response(self.api) self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('InvalidInput', error['NeutronError']['type']) # Check the client can see the root cause of error. self.assertIn(err_msg, error['NeutronError']['message']) # Test if other mechanism driver was called self.assertTrue(usp.called) subnet = self._show('subnets', subnet_id) self.assertEqual(new_name, subnet['subnet']['name']) self._delete('subnets', subnet['subnet']['id']) def test_create_port_faulty(self): err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_port_postcommit', side_effect=(exc.InvalidInput( error_message=err_msg))): with self.network() as network: net_id = network['network']['id'] data = {'port': {'network_id': net_id, 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'fixed_ips': []}} req = self.new_create_request('ports', data) res = req.get_response(self.api) self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) self.assertEqual('InvalidInput', error['NeutronError']['type']) # Check the client can see the root cause of error. self.assertIn(err_msg, error['NeutronError']['message']) query_params = "network_id=%s" % net_id ports = self._list('ports', query_params=query_params) self.assertFalse(ports['ports']) def test_update_port_faulty(self): with mock.patch.object(mech_test.TestMechanismDriver, 'update_port_postcommit', side_effect=ml2_exc.MechanismDriverError): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_port_postcommit') as upp: with self.network() as network: data = {'port': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'admin_state_up': 1, 'fixed_ips': []}} port_req = self.new_create_request('ports', data) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) port = self.deserialize(self.fmt, port_res) port_id = port['port']['id'] new_name = 'a_brand_new_name' data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Test if other mechanism driver was called self.assertTrue(upp.called) port = self._show('ports', port_id) self.assertEqual(new_name, port['port']['name']) self._delete('ports', port['port']['id']) def test_update_distributed_router_interface_port(self): """Test validate distributed router interface update succeeds.""" host_id = 'host' binding = models.DistributedPortBinding( port_id='port_id', host=host_id, router_id='old_router_id', vif_type=portbindings.VIF_TYPE_OVS, vnic_type=portbindings.VNIC_NORMAL, status=constants.PORT_STATUS_DOWN) with mock.patch.object( mech_test.TestMechanismDriver, 'update_port_postcommit', side_effect=ml2_exc.MechanismDriverError) as port_post,\ mock.patch.object( mech_test.TestMechanismDriver, 'update_port_precommit') as port_pre,\ mock.patch.object( ml2_db, 'get_distributed_port_bindings') as dist_bindings: dist_bindings.return_value = [binding] port_pre.return_value = True with self.network() as network: with self.subnet(network=network) as subnet: subnet_id = subnet['subnet']['id'] data = {'port': { 'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'name': 'port1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'admin_state_up': 1, 'fixed_ips': [{'subnet_id': subnet_id}]}} port_req = self.new_create_request('ports', data) port_res = port_req.get_response(self.api) self.assertEqual(201, port_res.status_int) port = self.deserialize(self.fmt, port_res) port_id = port['port']['id'] new_name = 'a_brand_new_name' data = {'port': {'name': new_name}} req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertTrue(dist_bindings.called) self.assertTrue(port_pre.called) self.assertTrue(port_post.called) port = self._show('ports', port_id) self.assertEqual(new_name, port['port']['name']) class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2): def test_create_subnet_delete_subnet_call_ipam_driver(self): driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool' gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' with mock.patch(driver) as driver_mock: request = mock.Mock() request.subnet_id = uuidutils.generate_uuid() request.subnet_cidr = netaddr.IPNetwork(cidr) request.allocation_pools = [] request.gateway_ip = netaddr.IPAddress(gateway_ip) request.tenant_id = uuidutils.generate_uuid() ipam_subnet = mock.Mock() ipam_subnet.get_details.return_value = request driver_mock().allocate_subnet.return_value = ipam_subnet self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr) driver_mock().allocate_subnet.assert_called_with(mock.ANY) driver_mock().remove_subnet.assert_called_with(request.subnet_id) def test_delete_subnet_deallocates_slaac_correctly(self): driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool' with self.network() as network: with self.subnet(network=network, cidr='2001:100::0/64', ip_version=constants.IP_VERSION_6, ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet=subnet) as port: with mock.patch(driver) as driver_mock: # Validate that deletion of SLAAC allocation happens # via IPAM interface, i.e. ipam_subnet.deallocate is # called prior to subnet deletiong from db. self._delete('subnets', subnet['subnet']['id']) dealloc = driver_mock().get_subnet().deallocate dealloc.assert_called_with( port['port']['fixed_ips'][0]['ip_address']) driver_mock().remove_subnet.assert_called_with( subnet['subnet']['id']) class TestTransactionGuard(Ml2PluginV2TestCase): def test_delete_network_guard(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): with testtools.ExpectedException(RuntimeError): plugin.delete_network(ctx, 'id') def test_delete_subnet_guard(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with db_api.CONTEXT_WRITER.using(ctx): with testtools.ExpectedException(RuntimeError): plugin.delete_subnet(ctx, 'id') class TestML2Segments(Ml2PluginV2TestCase): def _reserve_segment(self, network, seg_id=None): segment = {'id': 'fake_id', 'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: self.physnet} if seg_id: segment[driver_api.SEGMENTATION_ID] = seg_id self.driver._handle_segment_change( mock.ANY, events.PRECOMMIT_CREATE, segments_plugin.Plugin(), self.context, segment) if seg_id: # Assert it is not changed self.assertEqual(seg_id, segment[driver_api.SEGMENTATION_ID]) else: self.assertGreater(segment[driver_api.SEGMENTATION_ID], 0) return segment def test_reserve_segment_success_with_partial_segment(self): with self.network() as network: self._reserve_segment(network) def test_reserve_segment_fail_with_duplicate_param(self): with self.network() as network: self._reserve_segment(network, 10) self.assertRaises( exc.VlanIdInUse, self._reserve_segment, network, 10) def test_create_network_mtu_on_precommit(self): with mock.patch.object(mech_test.TestMechanismDriver, 'create_network_precommit') as bmp: with mock.patch.object(self.driver, '_get_network_mtu') as mtu: mtu.return_value = 1100 with self.network() as network: self.assertIn('mtu', network['network']) all_args = bmp.call_args_list mech_context = all_args[0][0][0] self.assertEqual(1100, mech_context.__dict__['_network']['mtu']) def test_provider_info_update_network(self): with self.network() as network: network_id = network['network']['id'] plugin = directory.get_plugin() updated_network = plugin.update_network( self.context, network_id, {'network': {'name': 'test-net'}}) self.assertIn('provider:network_type', updated_network) self.assertIn('provider:physical_network', updated_network) self.assertIn('provider:segmentation_id', updated_network) def test_reserve_segment_update_network_mtu(self): with self.network() as network: network_id = network['network']['id'] with mock.patch.object(self.driver, '_get_network_mtu') as mtu: mtu.return_value = 100 self._reserve_segment(network) updated_network = self.driver.get_network(self.context, network_id) self.assertEqual(100, updated_network[driver_api.MTU]) mtu.return_value = 200 self._reserve_segment(network) updated_network = self.driver.get_network(self.context, network_id) self.assertEqual(200, updated_network[driver_api.MTU]) def _test_nofity_mechanism_manager(self, event): seg1 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: self.physnet, driver_api.SEGMENTATION_ID: 1000} seg2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: self.physnet, driver_api.SEGMENTATION_ID: 1001} seg3 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: self.physnet, driver_api.SEGMENTATION_ID: 1002} with self.network() as network: network = network['network'] for stale_seg in segments_db.get_network_segments(self.context, network['id']): segments_db.delete_network_segment(self.context, stale_seg['id']) for seg in [seg1, seg2, seg3]: seg['network_id'] = network['id'] segments_db.add_network_segment(self.context, network['id'], seg) self.net_context = None def record_network_context(net_context): self.net_context = net_context with mock.patch.object(managers.MechanismManager, 'update_network_precommit', side_effect=record_network_context): self.driver._handle_segment_change( mock.ANY, event, segments_plugin.Plugin(), self.context, seg1) # Make sure the mechanism manager can get the right amount of # segments of network self.assertEqual( 3, len(self.net_context.current[mpnet_apidef.SEGMENTS])) def test_reserve_segment_nofity_mechanism_manager(self): self._test_nofity_mechanism_manager(events.PRECOMMIT_CREATE) def test_release_segment(self): with self.network() as network: segment = self._reserve_segment(network, 10) segment['network_id'] = network['network']['id'] self.driver._handle_segment_change( mock.ANY, events.PRECOMMIT_DELETE, mock.ANY, self.context, segment) # Check that the segment_id is not reserved segment = self._reserve_segment( network, segment[driver_api.SEGMENTATION_ID]) def test_release_segment_nofity_mechanism_manager(self): self._test_nofity_mechanism_manager(events.PRECOMMIT_DELETE) def test_prevent_delete_segment_with_tenant_port(self): fake_owner_compute = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' ml2_db.subscribe() plugin = directory.get_plugin() with self.port(device_owner=fake_owner_compute) as port: # add writer here to make sure that the following operations are # performed in the same session with db_api.CONTEXT_WRITER.using(self.context): binding = p_utils.get_port_binding_by_status_and_host( plugin._get_port(self.context, port['port']['id']).port_bindings, constants.ACTIVE) binding['host'] = 'host-ovs-no_filter' mech_context = driver_context.PortContext( plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) plugin._bind_port_if_needed(mech_context) segment = segments_db.get_network_segments( self.context, port['port']['network_id'])[0] segment['network_id'] = port['port']['network_id'] self.assertRaises(c_exc.CallbackFailure, registry.publish, resources.SEGMENT, events.BEFORE_DELETE, mock.ANY, payload=events.DBEventPayload( self.context, states=(segment,), resource_id=segment['id'])) exist_port = self._show('ports', port['port']['id']) self.assertEqual(port['port']['id'], exist_port['port']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_port_binding.py0000644000175000017500000007504300000000000027231 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import portbindings_extended as pbe_ext from neutron_lib import constants as const from neutron_lib import context from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib.plugins import utils from oslo_config import cfg from oslo_serialization import jsonutils import webob.exc from neutron.conf.plugins.ml2 import config from neutron.conf.plugins.ml2.drivers import driver_type from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2 import models as ml2_models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.plugins.ml2.drivers import mechanism_test class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. cfg.CONF.set_override('mechanism_drivers', ['logger', 'test'], 'ml2') # NOTE(dasm): ml2_type_vlan requires to be registered before used. # This piece was refactored and removed from .config, so it causes # a problem, when tests are executed with pdb. # There is no problem when tests are running without debugger. driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:1099'], group='ml2_type_vlan') super(PortBindingTestCase, self).setUp('ml2') self.port_create_status = 'DOWN' self.plugin = directory.get_plugin() self.plugin.start_rpc_listeners() def _check_response(self, port, vif_type, has_port_filter, bound, status): self.assertEqual(vif_type, port[portbindings.VIF_TYPE]) vif_details = port[portbindings.VIF_DETAILS] port_status = port['status'] if bound: # TODO(rkukura): Replace with new VIF security details self.assertEqual(has_port_filter, vif_details[portbindings.CAP_PORT_FILTER]) self.assertEqual(status or 'DOWN', port_status) else: self.assertEqual('DOWN', port_status) def _test_port_binding(self, host, vif_type, has_port_filter, bound, status=None, network_type='local'): mac_address = 'aa:aa:aa:aa:aa:aa' host_arg = {portbindings.HOST_ID: host, 'mac_address': mac_address} with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: self._check_response(port['port'], vif_type, has_port_filter, bound, status) port_id = port['port']['id'] neutron_context = context.get_admin_context() details = self.plugin.endpoints[0].get_device_details( neutron_context, agent_id="theAgentId", device=port_id) if bound: self.assertEqual(network_type, details['network_type']) self.assertEqual(mac_address, details['mac_address']) else: self.assertNotIn('network_type', details) self.assertNotIn('mac_address', details) def test_unbound(self): self._test_port_binding("", portbindings.VIF_TYPE_UNBOUND, False, False) def test_binding_failed(self): self._test_port_binding("host-fail", portbindings.VIF_TYPE_BINDING_FAILED, False, False) def test_binding_no_filter(self): self._test_port_binding("host-ovs-no_filter", portbindings.VIF_TYPE_OVS, False, True) def test_binding_filter(self): self._test_port_binding("host-bridge-filter", portbindings.VIF_TYPE_BRIDGE, True, True) def test_binding_status_active(self): self._test_port_binding("host-ovs-filter-active", portbindings.VIF_TYPE_OVS, True, True, 'ACTIVE') def test_update_port_binding_no_binding(self): ctx = context.get_admin_context() with self.port(name='name') as port: # emulating concurrent binding deletion with ctx.session.begin(): for item in (ctx.session.query(ml2_models.PortBinding). filter_by(port_id=port['port']['id'])): ctx.session.delete(item) self.assertIsNone( self.plugin.get_bound_port_context(ctx, port['port']['id'])) def test_hierarchical_binding(self): self._test_port_binding("host-hierarchical", portbindings.VIF_TYPE_OVS, False, True, network_type='vlan') def test_get_bound_port_context_cache_hit(self): ctx = context.get_admin_context() with self.port(name='name') as port: cached_network_id = port['port']['network_id'] some_network = {'id': cached_network_id} cached_networks = {cached_network_id: some_network} self.plugin.get_network = mock.Mock(return_value=some_network) self.plugin.get_bound_port_context(ctx, port['port']['id'], cached_networks=cached_networks) self.assertFalse(self.plugin.get_network.called) def _test_update_port_binding(self, host, new_host=None): with mock.patch.object(self.plugin, '_notify_port_updated') as notify_mock: host_arg = {portbindings.HOST_ID: host} update_body = {'name': 'test_update'} if new_host is not None: update_body[portbindings.HOST_ID] = new_host with self.port(name='name', arg_list=(portbindings.HOST_ID,), **host_arg) as port: neutron_context = context.get_admin_context() updated_port = self._update('ports', port['port']['id'], {'port': update_body}, neutron_context=neutron_context) port_data = updated_port['port'] if new_host is not None: self.assertEqual(new_host, port_data[portbindings.HOST_ID]) else: self.assertEqual(host, port_data[portbindings.HOST_ID]) if new_host is not None and new_host != host: notify_mock.assert_called_once_with(mock.ANY) else: self.assertFalse(notify_mock.called) def test_update_with_new_host_binding_notifies_agent(self): self._test_update_port_binding('host-ovs-no_filter', 'host-bridge-filter') def test_update_with_same_host_binding_does_not_notify(self): self._test_update_port_binding('host-ovs-no_filter', 'host-ovs-no_filter') def test_update_without_binding_does_not_notify(self): self._test_update_port_binding('host-ovs-no_filter') def testt_update_from_empty_to_host_binding_notifies_agent(self): self._test_update_port_binding('', 'host-ovs-no_filter') def test_update_from_host_to_empty_binding_notifies_agent(self): self._test_update_port_binding('host-ovs-no_filter', '') def test_process_binding_port_host_id_changed(self): ctx = context.get_admin_context() plugin = directory.get_plugin() host_id = {portbindings.HOST_ID: 'host1'} with self.port(**host_id) as port: # Since the port is DOWN at first # It's necessary to make its status ACTIVE for this test plugin.update_port_status(ctx, port['port']['id'], const.PORT_STATUS_ACTIVE) attrs = port['port'] attrs['status'] = const.PORT_STATUS_ACTIVE original_port = attrs.copy() attrs['binding:host_id'] = 'host2' updated_port = attrs.copy() network = {'id': attrs['network_id']} binding = ml2_models.PortBinding( port_id=original_port['id'], host=original_port['binding:host_id'], vnic_type=original_port['binding:vnic_type'], profile=jsonutils.dumps(original_port['binding:profile']), vif_type=original_port['binding:vif_type'], vif_details=original_port['binding:vif_details']) levels = [] mech_context = driver_context.PortContext( plugin, ctx, updated_port, network, binding, levels, original_port=original_port) plugin._process_port_binding(mech_context, port['port']) self.assertEqual(const.PORT_STATUS_DOWN, updated_port['status']) port_dict = plugin.get_port(ctx, port['port']['id']) self.assertEqual(const.PORT_STATUS_DOWN, port_dict['status']) def test_distributed_binding(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Verify port's VIF type and status. self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) # Update port to bind for a host. self.plugin.update_distributed_port_binding(ctx, port_id, {'port': {portbindings.HOST_ID: 'host-ovs-no_filter', 'device_id': 'router1'}}) # Get port and verify VIF type and status unchanged. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) # Get and verify binding details for host details = self.plugin.endpoints[0].get_device_details( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') self.assertEqual('local', details['network_type']) # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('BUILD', port['port']['status']) # Mark device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('ACTIVE', port['port']['status']) # Mark device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify VIF type and changed status. port = self._show('ports', port_id) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) def test_distributed_binding_multi_host_status(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Update port to bind for 1st host. self.plugin.update_distributed_port_binding(ctx, port_id, {'port': {portbindings.HOST_ID: 'host-ovs-no_filter', 'device_id': 'router1'}}) # Mark 1st device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status is ACTIVE. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Update port to bind for a 2nd host. self.plugin.update_distributed_port_binding(ctx, port_id, {'port': {portbindings.HOST_ID: 'host-bridge-filter', 'device_id': 'router1'}}) # Mark 2nd device up. self.plugin.endpoints[0].update_device_up( ctx, agent_id="the2ndAgentId", device=port_id, host='host-bridge-filter') # Get port and verify status unchanged. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Mark 1st device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status unchanged. port = self._show('ports', port_id) self.assertEqual('ACTIVE', port['port']['status']) # Mark 2nd device down. self.plugin.endpoints[0].update_device_down( ctx, agent_id="the2ndAgentId", device=port_id, host='host-bridge-filter') # Get port and verify status is DOWN. port = self._show('ports', port_id) self.assertEqual('DOWN', port['port']['status']) def test_distributed_binding_update_unbound_host(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Mark device up without first binding on host. self.plugin.endpoints[0].update_device_up( ctx, agent_id="theAgentId", device=port_id, host='host-ovs-no_filter') # Get port and verify status is still DOWN. port = self._show('ports', port_id) self.assertEqual('DOWN', port['port']['status']) class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): host = 'host-ovs-no_filter' def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism # driver apis. config.register_ml2_plugin_opts() cfg.CONF.set_override('mechanism_drivers', ['logger', 'test'], 'ml2') driver_type.register_ml2_drivers_vlan_opts() cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:1099'], group='ml2_type_vlan') super(ExtendedPortBindingTestCase, self).setUp('ml2') self.port_create_status = 'DOWN' self.plugin = directory.get_plugin() self.plugin.start_rpc_listeners() def _create_port_binding(self, fmt, port_id, host, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'binding': {'host': host, 'tenant_id': tenant_id}} if kwargs: data['binding'].update(kwargs) binding_resource = 'ports/%s/bindings' % port_id binding_req = self.new_create_request(binding_resource, data, fmt) return binding_req.get_response(self.api) def _make_port_binding(self, fmt, port_id, host, **kwargs): res = self._create_port_binding(fmt, port_id, host, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _update_port_binding(self, fmt, port_id, host, **kwargs): data = {'binding': kwargs} binding_req = self.new_update_request('ports', data, port_id, fmt, subresource='bindings', sub_id=host) return binding_req.get_response(self.api) def _do_update_port_binding(self, fmt, port_id, host, **kwargs): res = self._update_port_binding(fmt, port_id, host, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(fmt, res) def _activate_port_binding(self, port_id, host, raw_response=True): response = self._req('PUT', 'ports', id=port_id, data={'port_id': port_id}, subresource='bindings', sub_id=host, action='activate').get_response(self.api) return self._check_code_and_serialize(response, raw_response) def _check_code_and_serialize(self, response, raw_response): if raw_response: return response if response.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=response.status_int) return self.deserialize(self.fmt, response) def _list_port_bindings(self, port_id, params=None, raw_response=True): response = self._req( 'GET', 'ports', fmt=self.fmt, id=port_id, subresource='bindings', params=params).get_response(self.api) return self._check_code_and_serialize(response, raw_response) def _show_port_binding(self, port_id, host, params=None, raw_response=True): response = self._req( 'GET', 'ports', fmt=self.fmt, id=port_id, subresource='bindings', sub_id=host, params=params).get_response(self.api) return self._check_code_and_serialize(response, raw_response) def _delete_port_binding(self, port_id, host): response = self._req( 'DELETE', 'ports', fmt=self.fmt, id=port_id, subresource='bindings', sub_id=host).get_response(self.api) return response def _create_port_and_binding(self, **kwargs): device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova') with self.port(device_owner=device_owner) as port: port_id = port['port']['id'] binding = self._make_port_binding(self.fmt, port_id, self.host, **kwargs)['binding'] self._assert_bound_port_binding(binding) return port['port'], binding def _assert_bound_port_binding(self, binding): self.assertEqual(self.host, binding[pbe_ext.HOST]) self.assertEqual(portbindings.VIF_TYPE_OVS, binding[pbe_ext.VIF_TYPE]) self.assertEqual({'port_filter': False}, binding[pbe_ext.VIF_DETAILS]) def _assert_unbound_port_binding(self, binding): self.assertFalse(binding[pbe_ext.HOST]) self.assertEqual(portbindings.VIF_TYPE_UNBOUND, binding[pbe_ext.VIF_TYPE]) self.assertEqual({}, binding[pbe_ext.VIF_DETAILS]) self.assertEqual({}, binding[pbe_ext.PROFILE]) def test_create_port_binding(self): profile = {'key1': 'value1'} kwargs = {pbe_ext.PROFILE: profile} port, binding = self._create_port_and_binding(**kwargs) self._assert_bound_port_binding(binding) self.assertEqual({"key1": "value1"}, binding[pbe_ext.PROFILE]) def test_create_duplicate_port_binding(self): device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova') host_arg = {portbindings.HOST_ID: self.host} with self.port(device_owner=device_owner, arg_list=(portbindings.HOST_ID,), **host_arg) as port: response = self._create_port_binding(self.fmt, port['port']['id'], self.host) self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) def test_create_port_binding_failure(self): device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova') with self.port(device_owner=device_owner) as port: port_id = port['port']['id'] response = self._create_port_binding(self.fmt, port_id, 'host-fail') self.assertEqual(webob.exc.HTTPInternalServerError.code, response.status_int) self.assertTrue(exceptions.PortBindingError.__name__ in response.text) def test_create_port_binding_for_non_compute_owner(self): with self.port() as port: port_id = port['port']['id'] response = self._create_port_binding(self.fmt, port_id, 'host-ovs-no_filter') self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def test_update_port_binding(self): port, binding = self._create_port_and_binding() profile = {'key1': 'value1'} kwargs = {pbe_ext.PROFILE: profile} binding = self._do_update_port_binding(self.fmt, port['id'], self.host, **kwargs)['binding'] self._assert_bound_port_binding(binding) self.assertEqual({"key1": "value1"}, binding[pbe_ext.PROFILE]) def test_update_non_existing_binding(self): device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova') with self.port(device_owner=device_owner) as port: port_id = port['port']['id'] profile = {'key1': 'value1'} kwargs = {pbe_ext.PROFILE: profile} response = self._update_port_binding(self.fmt, port_id, 'a_host', **kwargs) self.assertEqual(webob.exc.HTTPNotFound.code, response.status_int) def test_update_port_binding_for_non_compute_owner(self): with self.port() as port: port_id = port['port']['id'] profile = {'key1': 'value1'} kwargs = {pbe_ext.PROFILE: profile} response = self._update_port_binding(self.fmt, port_id, 'a_host', **kwargs) self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def test_update_port_binding_failure(self): class FakeBinding(object): vif_type = portbindings.VIF_TYPE_BINDING_FAILED class FakePortContext(object): _binding = FakeBinding() port, binding = self._create_port_and_binding() profile = {'key1': 'value1'} kwargs = {pbe_ext.PROFILE: profile} with mock.patch.object( self.plugin, '_bind_port_if_needed', return_value=FakePortContext()): response = self._update_port_binding(self.fmt, port['id'], self.host, **kwargs) self.assertEqual(webob.exc.HTTPInternalServerError.code, response.status_int) self.assertTrue(exceptions.PortBindingError.__name__ in response.text) def test_activate_port_binding(self): port, new_binding = self._create_port_and_binding() with mock.patch.object(mechanism_test.TestMechanismDriver, '_check_port_context'): active_binding = self._activate_port_binding( port['id'], self.host, raw_response=False) self._assert_bound_port_binding(active_binding) updated_port = self._show('ports', port['id'])['port'] self.assertEqual(new_binding[pbe_ext.HOST], updated_port[portbindings.HOST_ID]) self.assertEqual(new_binding[pbe_ext.PROFILE], updated_port[portbindings.PROFILE]) self.assertEqual(new_binding[pbe_ext.VNIC_TYPE], updated_port[portbindings.VNIC_TYPE]) self.assertEqual(new_binding[pbe_ext.VIF_TYPE], updated_port[portbindings.VIF_TYPE]) self.assertEqual(new_binding[pbe_ext.VIF_DETAILS], updated_port[portbindings.VIF_DETAILS]) retrieved_bindings = self._list_port_bindings( port['id'], raw_response=False)['bindings'] retrieved_active_binding = utils.get_port_binding_by_status_and_host( retrieved_bindings, const.ACTIVE) self._assert_bound_port_binding(retrieved_active_binding) retrieved_inactive_binding = utils.get_port_binding_by_status_and_host( retrieved_bindings, const.INACTIVE) self._assert_unbound_port_binding(retrieved_inactive_binding) def test_activate_port_binding_for_non_compute_owner(self): port, new_binding = self._create_port_and_binding() data = {'port': {'device_owner': ''}} self.new_update_request('ports', data, port['id'], self.fmt).get_response(self.api) response = self._activate_port_binding(port['id'], self.host) self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def test_activate_port_binding_already_active(self): port, new_binding = self._create_port_and_binding() with mock.patch.object(mechanism_test.TestMechanismDriver, '_check_port_context'): self._activate_port_binding(port['id'], self.host) response = self._activate_port_binding(port['id'], self.host) self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) def test_activate_port_binding_failure(self): port, new_binding = self._create_port_and_binding() with mock.patch.object(self.plugin, '_commit_port_binding', return_value=(None, None, True,)) as p_mock: response = self._activate_port_binding(port['id'], self.host) self.assertEqual(webob.exc.HTTPInternalServerError.code, response.status_int) self.assertTrue(exceptions.PortBindingError.__name__ in response.text) self.assertEqual(ml2_plugin.MAX_BIND_TRIES, p_mock.call_count) def test_activate_port_binding_non_existing_binding(self): port, new_binding = self._create_port_and_binding() response = self._activate_port_binding(port['id'], 'other-host') self.assertEqual(webob.exc.HTTPNotFound.code, response.status_int) def test_list_port_bindings(self): port, new_binding = self._create_port_and_binding() retrieved_bindings = self._list_port_bindings( port['id'], raw_response=False)['bindings'] self.assertEqual(2, len(retrieved_bindings)) status = const.ACTIVE self._assert_unbound_port_binding( utils.get_port_binding_by_status_and_host(retrieved_bindings, status)) status = const.INACTIVE self._assert_bound_port_binding( utils.get_port_binding_by_status_and_host(retrieved_bindings, status, host=self.host)) def test_list_port_bindings_with_query_parameters(self): port, new_binding = self._create_port_and_binding() params = '%s=%s' % (pbe_ext.STATUS, const.INACTIVE) retrieved_bindings = self._list_port_bindings( port['id'], params=params, raw_response=False)['bindings'] self.assertEqual(1, len(retrieved_bindings)) self._assert_bound_port_binding(retrieved_bindings[0]) def test_show_port_binding(self): port, new_binding = self._create_port_and_binding() retrieved_binding = self._show_port_binding( port['id'], self.host, raw_response=False)['binding'] self._assert_bound_port_binding(retrieved_binding) def test_show_port_binding_with_fields(self): port, new_binding = self._create_port_and_binding() fields = 'fields=%s' % pbe_ext.HOST retrieved_binding = self._show_port_binding( port['id'], self.host, raw_response=False, params=fields)['binding'] self.assertEqual(self.host, retrieved_binding[pbe_ext.HOST]) for key in (pbe_ext.STATUS, pbe_ext.PROFILE, pbe_ext.VNIC_TYPE, pbe_ext.VIF_TYPE, pbe_ext.VIF_DETAILS,): self.assertNotIn(key, retrieved_binding) def test_delete_port_binding(self): port, new_binding = self._create_port_and_binding() response = self._delete_port_binding(port['id'], self.host) self.assertEqual(webob.exc.HTTPNoContent.code, response.status_int) response = self._show_port_binding(port['id'], self.host) self.assertEqual(webob.exc.HTTPNotFound.code, response.status_int) def test_delete_non_existing_port_binding(self): port, new_binding = self._create_port_and_binding() response = self._delete_port_binding(port['id'], 'other-host') self.assertEqual(webob.exc.HTTPNotFound.code, response.status_int) def test_binding_fail_for_unknown_allocation(self): # The UUID is a random one - which of course is unknown to neutron # as a resource provider UUID. profile = {'allocation': 'ccccbb4c-2adf-11e9-91bc-db7063775d06'} kwargs = {pbe_ext.PROFILE: profile} device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova') with self.port(device_owner=device_owner) as port: port_id = port['port']['id'] response = self._create_port_binding( self.fmt, port_id, self.host, **kwargs) self.assertEqual(webob.exc.HTTPInternalServerError.code, response.status_int) self.assertTrue(exceptions.PortBindingError.__name__ in response.text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_rpc.py0000644000175000017500000005554700000000000025346 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for ml2 rpc """ import collections import mock from neutron_lib.agent import topics from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_context import context as oslo_context from sqlalchemy.orm import exc from neutron.agent import rpc as agent_rpc from neutron.db import provisioning_blocks from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers import type_tunnel from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import rpc as plugin_rpc from neutron.tests import base cfg.CONF.import_group('ml2', 'neutron.conf.plugins.ml2.config') class RpcCallbacksTestCase(base.BaseTestCase): def setUp(self): super(RpcCallbacksTestCase, self).setUp() self.type_manager = managers.TypeManager() self.notifier = plugin_rpc.AgentNotifierApi(topics.AGENT) self.callbacks = plugin_rpc.RpcCallbacks(self.notifier, self.type_manager) self.plugin = mock.MagicMock() directory.add_plugin(plugin_constants.CORE, self.plugin) def _test_update_device_up(self, host=None): kwargs = { 'agent_id': 'foo_agent', 'device': 'foo_device', 'host': host } with mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin' '._device_to_port_id'),\ mock.patch.object(self.callbacks, 'notify_l2pop_port_wiring'): with mock.patch('neutron.db.provisioning_blocks.' 'provisioning_complete') as pc: self.callbacks.update_device_up(mock.Mock(), **kwargs) return pc def test_update_device_up_notify(self): notify = self._test_update_device_up() notify.assert_called_once_with(mock.ANY, mock.ANY, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) def test_update_device_up_notify_not_sent_with_port_not_found(self): self.plugin.port_bound_to_host.return_value = False notify = self._test_update_device_up('host') self.assertFalse(notify.call_count) def test_get_device_details_without_port_context(self): self.plugin.get_bound_port_context.return_value = None self.assertEqual( {'device': 'fake_device'}, self.callbacks.get_device_details(mock.Mock(), device='fake_device')) def test_get_device_details_port_context_without_bounded_segment(self): self.plugin.get_bound_port_context().bottom_bound_segment = None self.assertEqual( {'device': 'fake_device'}, self.callbacks.get_device_details(mock.Mock(), device='fake_device')) def test_get_device_details_port_status_equal_new_status(self): port = collections.defaultdict(lambda: 'fake') self.plugin.get_bound_port_context().current = port self.plugin.port_bound_to_host = port for admin_state_up in (True, False): new_status = (constants.PORT_STATUS_BUILD if admin_state_up else constants.PORT_STATUS_DOWN) for status in (constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_BUILD, constants.PORT_STATUS_DOWN, constants.PORT_STATUS_ERROR): port['admin_state_up'] = admin_state_up port['status'] = status self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock()) self.assertEqual(status == new_status, not self.plugin.update_port_status.called) def test_get_device_details_caching(self): port = collections.defaultdict(lambda: 'fake_port') cached_networks = {} self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network.current = ( {"id": "fake_network"}) self.callbacks.get_device_details(mock.Mock(), host='fake_host', cached_networks=cached_networks) self.assertIn('fake_port', cached_networks) def test_get_device_details_wrong_host(self): port = collections.defaultdict(lambda: 'fake') port_context = self.plugin.get_bound_port_context() port_context.current = port port_context.host = 'fake' self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock(), host='fake_host') self.assertFalse(self.plugin.update_port_status.called) def test_get_device_details_port_no_host(self): port = collections.defaultdict(lambda: 'fake') port_context = self.plugin.get_bound_port_context() port_context.current = port self.plugin.update_port_status.reset_mock() self.callbacks.get_device_details(mock.Mock()) self.assertTrue(self.plugin.update_port_status.called) def test_get_device_details_qos_policy_id_none(self): port = collections.defaultdict(lambda: 'fake_port') self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network"}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertIsNone(res['qos_policy_id']) def test_get_device_details_network_qos_policy_id(self): port = collections.defaultdict(lambda: 'fake_port') self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network", qos_consts.QOS_POLICY_ID: 'test-policy-id'}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertEqual('test-policy-id', res['network_qos_policy_id']) def test_get_device_details_port_no_active_in_host(self): port = collections.defaultdict(lambda: 'fake_port') self.plugin.get_bound_port_context().current = port port['device_owner'] = constants.DEVICE_OWNER_COMPUTE_PREFIX port[portbindings.HOST_ID] = 'other-host' res = self.callbacks.get_device_details(mock.Mock(), host='host') self.assertIn(constants.NO_ACTIVE_BINDING, res) def test_get_device_details_qos_policy_id_from_port(self): port = collections.defaultdict( lambda: 'fake_port', {qos_consts.QOS_POLICY_ID: 'test-port-policy-id'}) self.plugin.get_bound_port_context().current = port self.plugin.get_bound_port_context().network._network = ( {"id": "fake_network", qos_consts.QOS_POLICY_ID: 'test-net-policy-id'}) res = self.callbacks.get_device_details(mock.Mock(), host='fake') self.assertEqual('test-port-policy-id', res['qos_policy_id']) def _test_get_devices_list(self, callback, side_effect, expected): devices = [1, 2, 3, 4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} with mock.patch.object(self.callbacks, '_get_device_details', side_effect=side_effect) as f: res = callback('fake_context', devices=devices, **kwargs) self.assertEqual(expected, res) self.assertEqual(len(devices), f.call_count) calls = [mock.call('fake_context', device=i, port_context=mock.ANY, **kwargs) for i in devices] f.assert_has_calls(calls) def test_get_devices_details_list(self): results = [{'device': [v]} for v in [1, 2, 3, 4, 5]] expected = results callback = self.callbacks.get_devices_details_list self._test_get_devices_list(callback, results, expected) def test_get_devices_details_list_with_empty_devices(self): with mock.patch.object(self.callbacks, 'get_device_details') as f: res = self.callbacks.get_devices_details_list('fake_context') self.assertFalse(f.called) self.assertEqual([], res) def test_get_devices_details_list_and_failed_devices(self): devices = [1, 2, 3, 4, 5] expected = {'devices': devices, 'failed_devices': []} callback = ( self.callbacks.get_devices_details_list_and_failed_devices) self._test_get_devices_list(callback, devices, expected) def test_get_devices_details_list_and_failed_devices_failures(self): devices = [1, Exception('testdevice'), 3, Exception('testdevice'), 5] expected = {'devices': [1, 3, 5], 'failed_devices': [2, 4]} callback = ( self.callbacks.get_devices_details_list_and_failed_devices) self._test_get_devices_list(callback, devices, expected) def test_get_network_details(self): kwargs = {'agent_id': 'agent_id', 'host': 'host_id', 'network': 'network'} with mock.patch.object(self.plugin, 'get_network') as mock_get_network: mock_get_network.return_value = 'net_details' self.assertEqual( 'net_details', self.callbacks.get_network_details('fake_context', **kwargs)) mock_get_network.assert_called_once_with('fake_context', 'network') def test_get_devices_details_list_and_failed_devices_empty_dev(self): with mock.patch.object(self.callbacks, 'get_device_details') as f: res = self.callbacks.get_devices_details_list_and_failed_devices( 'fake_context') self.assertFalse(f.called) self.assertEqual({'devices': [], 'failed_devices': []}, res) def _test_update_device_not_bound_to_host(self, func): self.plugin.port_bound_to_host.return_value = False self.callbacks.notify_l2pop_port_wiring = mock.Mock() self.plugin._device_to_port_id.return_value = 'fake_port_id' res = func(mock.Mock(), device='fake_device', host='fake_host') self.plugin.port_bound_to_host.assert_called_once_with(mock.ANY, 'fake_port_id', 'fake_host') return res def test_update_device_up_with_device_not_bound_to_host(self): with mock.patch.object(ml2_db, 'get_port') as ml2_db_get_port: self.assertIsNone(self._test_update_device_not_bound_to_host( self.callbacks.update_device_up)) port = ml2_db_get_port.return_value (self.plugin.nova_notifier.notify_port_active_direct. assert_called_once_with(port)) def test_update_device_up_with_device_not_bound_to_host_no_notify(self): cfg.CONF.set_override('notify_nova_on_port_status_changes', False) self.assertIsNone(self._test_update_device_not_bound_to_host( self.callbacks.update_device_up)) self.plugin.nova_notifier.notify_port_active_direct.assert_not_called() def test_update_device_down_with_device_not_bound_to_host(self): self.assertEqual( {'device': 'fake_device', 'exists': True}, self._test_update_device_not_bound_to_host( self.callbacks.update_device_down)) def test_update_device_down_call_update_port_status(self): self.plugin.update_port_status.return_value = False self.callbacks.notify_l2pop_port_wiring = mock.Mock() self.plugin._device_to_port_id.return_value = 'fake_port_id' self.assertEqual( {'device': 'fake_device', 'exists': False}, self.callbacks.update_device_down(mock.Mock(), device='fake_device', host='fake_host')) self.plugin.update_port_status.assert_called_once_with( mock.ANY, 'fake_port_id', constants.PORT_STATUS_DOWN, 'fake_host') def test_update_device_down_call_update_port_status_failed(self): self.plugin.update_port_status.side_effect = exc.StaleDataError self.assertEqual({'device': 'fake_device', 'exists': False}, self.callbacks.update_device_down( mock.Mock(), device='fake_device')) def _test_update_device_list(self, devices_up_side_effect, devices_down_side_effect, expected): devices_up = [1, 2, 3] devices_down = [4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} with mock.patch.object(self.callbacks, 'update_device_up', side_effect=devices_up_side_effect) as f_up, \ mock.patch.object(self.callbacks, 'update_device_down', side_effect=devices_down_side_effect) as f_down: res = self.callbacks.update_device_list( 'fake_context', devices_up=devices_up, devices_down=devices_down, **kwargs) self.assertEqual(expected, res) self.assertEqual(len(devices_up), f_up.call_count) self.assertEqual(len(devices_down), f_down.call_count) def test_update_device_list_no_failure(self): devices_up_side_effect = [1, 2, 3] devices_down_side_effect = [ {'device': 4, 'exists': True}, {'device': 5, 'exists': True}] expected = {'devices_up': devices_up_side_effect, 'failed_devices_up': [], 'devices_down': [{'device': 4, 'exists': True}, {'device': 5, 'exists': True}], 'failed_devices_down': []} self._test_update_device_list(devices_up_side_effect, devices_down_side_effect, expected) def test_update_device_list_failed_devices(self): devices_up_side_effect = [1, Exception('testdevice'), 3] devices_down_side_effect = [{'device': 4, 'exists': True}, Exception('testdevice')] expected = {'devices_up': [1, 3], 'failed_devices_up': [2], 'devices_down': [{'device': 4, 'exists': True}], 'failed_devices_down': [5]} self._test_update_device_list(devices_up_side_effect, devices_down_side_effect, expected) def test_update_device_list_empty_devices(self): expected = {'devices_up': [], 'failed_devices_up': [], 'devices_down': [], 'failed_devices_down': []} kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} res = self.callbacks.update_device_list( 'fake_context', devices_up=[], devices_down=[], **kwargs) self.assertEqual(expected, res) class RpcApiTestCase(base.BaseTestCase): def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs): if method == "update_device_list": expected = {'devices_up': [], 'failed_devices_up': [], 'devices_down': [], 'failed_devices_down': []} else: expected = 'foo' ctxt = oslo_context.RequestContext(user_id='fake_user', project_id='fake_project') expected_retval = expected if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) fanout = kwargs.pop('fanout', False) with mock.patch.object(rpcapi.client, rpc_method) as rpc_mock,\ mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpc_mock.return_value = expected_retval retval = getattr(rpcapi, method)(ctxt, **kwargs) prepare_args = {} if expected_version: prepare_args['version'] = expected_version if fanout: prepare_args['fanout'] = fanout if topic: prepare_args['topic'] = topic prepare_mock.assert_called_once_with(**prepare_args) self.assertEqual(retval, expected_retval) rpc_mock.assert_called_once_with(ctxt, method, **kwargs) def test_delete_network(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.NETWORK, topics.DELETE), 'network_delete', rpc_method='cast', fanout=True, network_id='fake_request_spec') def test_port_update(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.PORT, topics.UPDATE), 'port_update', rpc_method='cast', fanout=True, port='fake_port', network_type='fake_network_type', segmentation_id='fake_segmentation_id', physical_network='fake_physical_network') def test_port_delete(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, topics.PORT, topics.DELETE), 'port_delete', rpc_method='cast', fanout=True, port_id='fake_port') def test_tunnel_update(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, type_tunnel.TUNNEL, topics.UPDATE), 'tunnel_update', rpc_method='cast', fanout=True, tunnel_ip='fake_ip', tunnel_type='gre') def test_tunnel_delete(self): rpcapi = plugin_rpc.AgentNotifierApi(topics.AGENT) self._test_rpc_api( rpcapi, topics.get_topic_name(topics.AGENT, type_tunnel.TUNNEL, topics.DELETE), 'tunnel_delete', rpc_method='cast', fanout=True, tunnel_ip='fake_ip', tunnel_type='gre') def test_device_details(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_device_details', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_devices_details_list(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.3') def test_update_device_down(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_down', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_tunnel_sync(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'tunnel_sync', rpc_method='call', tunnel_ip='fake_tunnel_ip', tunnel_type=None, host='fake_host', version='1.4') def test_update_device_up(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_up', rpc_method='call', device='fake_device', agent_id='fake_agent_id', host='fake_host') def test_update_device_list(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'update_device_list', rpc_method='call', devices_up=['fake_device1', 'fake_device2'], devices_down=['fake_device3', 'fake_device4'], agent_id='fake_agent_id', host='fake_host', refresh_tunnels=False, version='1.8') def test_get_devices_details_list_and_failed_devices(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list_and_failed_devices', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.5') def test_devices_details_list_and_failed_devices(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_devices_details_list_and_failed_devices', rpc_method='call', devices=['fake_device1', 'fake_device2'], agent_id='fake_agent_id', host='fake_host', version='1.5') def test_get_ports_by_vnic_type_and_host(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) self._test_rpc_api(rpcapi, None, 'get_ports_by_vnic_type_and_host', rpc_method='call', vnic_type='fake_device1', host='fake_host', version='1.7') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_security_group.py0000644000175000017500000001653000000000000027632 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright 2013, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context from neutron_lib import fixture from neutron_lib.plugins import directory from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.extensions import test_securitygroup as test_sg NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): def setUp(self, plugin=None): test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() notifier_cls.return_value = self.notifier self.useFixture(fixture.APIDefinitionFixture()) super(Ml2SecurityGroupsTestCase, self).setUp('ml2') class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase, test_sg.TestSecurityGroups, test_sg_rpc.SGNotificationTestMixin): def setUp(self): super(TestMl2SecurityGroups, self).setUp() self.ctx = context.get_admin_context() plugin = directory.get_plugin() plugin.start_rpc_listeners() def _make_port_with_new_sec_group(self, net_id): sg = self._make_security_group(self.fmt, 'name', 'desc') port = self._make_port( self.fmt, net_id, security_groups=[sg['security_group']['id']]) return port['port'] def _make_port_without_sec_group(self, net_id): port = self._make_port( self.fmt, net_id, security_groups=[]) return port['port'] def test_security_group_get_ports_from_devices(self): with self.network() as n: with self.subnet(n): orig_ports = [ self._make_port_with_new_sec_group(n['network']['id']), self._make_port_with_new_sec_group(n['network']['id']), self._make_port_without_sec_group(n['network']['id']) ] plugin = directory.get_plugin() # should match full ID and starting chars ports = plugin.get_ports_from_devices(self.ctx, [orig_ports[0]['id'], orig_ports[1]['id'][0:8], orig_ports[2]['id']]) self.assertEqual(len(orig_ports), len(ports)) for port_dict in ports: p = next(p for p in orig_ports if p['id'] == port_dict['id']) self.assertEqual(p['id'], port_dict['id']) self.assertEqual(p['security_groups'], port_dict[ext_sg.SECURITYGROUPS]) self.assertEqual([], port_dict['security_group_rules']) self.assertEqual([p['fixed_ips'][0]['ip_address']], port_dict['fixed_ips']) self._delete('ports', p['id']) def test_security_group_get_ports_from_devices_with_bad_id(self): plugin = directory.get_plugin() ports = plugin.get_ports_from_devices(self.ctx, ['bad_device_id']) self.assertFalse(ports) def test_security_group_no_db_calls_with_no_ports(self): plugin = directory.get_plugin() with mock.patch( 'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port' ) as get_mock: self.assertFalse(plugin.get_ports_from_devices(self.ctx, [])) self.assertFalse(get_mock.called) def test_large_port_count_broken_into_parts(self): plugin = directory.get_plugin() max_ports_per_query = 5 ports_to_query = 73 for max_ports_per_query in (1, 2, 5, 7, 9, 31): with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY', new=max_ports_per_query),\ mock.patch( 'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port', return_value={}) as get_mock: plugin.get_ports_from_devices(self.ctx, ['%s%s' % (const.TAP_DEVICE_PREFIX, i) for i in range(ports_to_query)]) all_call_args = [x[1][1] for x in get_mock.mock_calls] last_call_args = all_call_args.pop() # all but last should be getting MAX_PORTS_PER_QUERY ports self.assertTrue( all(map(lambda x: len(x) == max_ports_per_query, all_call_args)) ) remaining = ports_to_query % max_ports_per_query if remaining: self.assertEqual(remaining, len(last_call_args)) # should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls self.assertEqual( math.ceil(ports_to_query / float(max_ports_per_query)), get_mock.call_count ) def test_full_uuids_skip_port_id_lookup(self): plugin = directory.get_plugin() # when full UUIDs are provided, the _or statement should only # have one matching 'IN' criteria for all of the IDs with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\ mock.patch('sqlalchemy.orm.Session.query') as qmock: fmock = qmock.return_value.outerjoin.return_value.filter # return no ports to exit the method early since we are mocking # the query fmock.return_value = [] plugin.get_ports_from_devices(self.ctx, [test_base._uuid(), test_base._uuid()]) # the or_ function should only have one argument or_mock.assert_called_once_with(mock.ANY) def test_security_groups_created_outside_transaction(self): def record_after_state(r, e, t, context, *args, **kwargs): self.was_active = context.session.is_active registry.subscribe(record_after_state, resources.SECURITY_GROUP, events.AFTER_CREATE) with self.subnet() as s: self.assertFalse(self.was_active) self._delete( 'security-groups', self._list('security-groups')['security_groups'][0]['id']) with self.port(subnet=s): self.assertFalse(self.was_active) class TestMl2SGServerRpcCallBack(Ml2SecurityGroupsTestCase, test_sg_rpc.SGServerRpcCallBackTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/plugins/ml2/test_tracked_resources.py0000644000175000017500000003655200000000000030264 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import fixture from oslo_utils import uuidutils from neutron.db.quota import api as quota_db_api from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.extensions import test_securitygroup from neutron.tests.unit.plugins.ml2 import base as ml2_base from neutron.tests.unit.plugins.ml2 import test_plugin class SgTestCaseWrapper(test_securitygroup.SecurityGroupDBTestCase): # This wrapper class enables Ml2PluginV2TestCase to correctly call the # setup method in SecurityGroupDBTestCase which does not accept the # service_plugins keyword parameter. def setUp(self, plugin, **kwargs): super(SgTestCaseWrapper, self).setUp(plugin) class BaseTestTrackedResources(test_plugin.Ml2PluginV2TestCase, SgTestCaseWrapper): def setUp(self): self.ctx = context.get_admin_context() super(BaseTestTrackedResources, self).setUp() self._tenant_id = uuidutils.generate_uuid() def _test_init(self, resource_name): quota_db_api.set_quota_usage( self.ctx, resource_name, self._tenant_id) class BaseTestEventHandler(object): def setUp(self): # Prevent noise from default security group operations def_sec_group_patch = mock.patch( 'neutron.db.securitygroups_db.SecurityGroupDbMixin.' '_ensure_default_security_group') def_sec_group_patch.start() get_sec_group_port_patch = mock.patch( 'neutron.db.securitygroups_db.SecurityGroupDbMixin.' '_get_security_groups_on_port') get_sec_group_port_patch.start() process_port_create_security_group_patch = mock.patch( 'neutron.db.securitygroups_db.SecurityGroupDbMixin.' '_process_port_create_security_group') process_port_create_security_group_patch.start() handler_patch = mock.patch( 'neutron.quota.resource.TrackedResource._db_event_handler') self.handler_mock = handler_patch.start() super(BaseTestEventHandler, self).setUp() def _verify_event_handler_calls(self, data, expected_call_count=1): if not hasattr(data, '__iter__') or isinstance(data, dict): data = [data] self.assertEqual(expected_call_count, self.handler_mock.call_count) call_idx = -1 for item in data: if item: model = self.handler_mock.call_args_list[call_idx][0][-1] self.assertEqual(model['id'], item['id']) self.assertEqual(model['tenant_id'], item['tenant_id']) call_idx = call_idx - 1 class TestTrackedResourcesEventHandler(BaseTestEventHandler, BaseTestTrackedResources): def test_create_delete_network_triggers_event(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self._verify_event_handler_calls(net) self._delete('networks', net['id']) self._verify_event_handler_calls(net, expected_call_count=2) def test_create_delete_port_triggers_event(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] # Expecting 2 calls - 1 for the network, 1 for the port self._verify_event_handler_calls(port, expected_call_count=2) self._delete('ports', port['id']) self._verify_event_handler_calls(port, expected_call_count=3) def test_create_delete_subnet_triggers_event(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] # Expecting 2 calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([subnet, net['network']], expected_call_count=2) self._delete('subnets', subnet['id']) self._verify_event_handler_calls(subnet, expected_call_count=3) def test_create_delete_network_with_subnet_triggers_event(self): self._test_init('network') self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] # Expecting 2 calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([subnet, net['network']], expected_call_count=2) self._delete('networks', net['network']['id']) # Expecting 2 more calls - 1 for the network, 1 for the subnet self._verify_event_handler_calls([net['network'], subnet], expected_call_count=4) def test_create_delete_subnetpool_triggers_event(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self._verify_event_handler_calls(pool) self._delete('subnetpools', pool['id']) self._verify_event_handler_calls(pool, expected_call_count=2) def test_create_delete_securitygroup_triggers_event(self): self._test_init('security_group') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] # When a security group is created it also creates 2 rules, therefore # there will be three calls and we need to verify the first self._verify_event_handler_calls([None, None, sec_group], expected_call_count=3) self._delete('security-groups', sec_group['id']) # When a security group is deleted it also removes the 2 rules # generated upon creation self._verify_event_handler_calls(sec_group, expected_call_count=6) def test_create_delete_securitygrouprule_triggers_event(self): self._test_init('security_group_rule') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] rule_req = self._build_security_group_rule( sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id) sec_group_rule = self._make_security_group_rule( 'json', rule_req)['security_group_rule'] # When a security group is created it also creates 2 rules, therefore # there will be four calls in total to the event handler self._verify_event_handler_calls(sec_group_rule, expected_call_count=4) self._delete('security-group-rules', sec_group_rule['id']) self._verify_event_handler_calls(sec_group_rule, expected_call_count=5) class TestL3ResourcesEventHandler(BaseTestEventHandler, ml2_base.ML2TestFramework, test_l3.L3NatTestCaseMixin): def setUp(self): super(TestL3ResourcesEventHandler, self).setUp() self.useFixture(fixture.APIDefinitionFixture()) ext_mgr = test_l3.L3TestExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def test_create_delete_floating_ip_triggers_event(self): net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '14.0.0.1', '14.0.0.0/24')['subnet'] self._set_net_external(subnet['network_id']) floatingip = self._make_floatingip('json', subnet['network_id']) internal_port = self._show( 'ports', floatingip['floatingip']['port_id'])['ports'][0] # When a floatingip is created it also creates port, therefore # there will be four calls in total to the event handler self._verify_event_handler_calls(floatingip['floatingip'], expected_call_count=4) self._delete('floatingips', floatingip['floatingip']['id']) # Expecting 2 more calls - 1 for the port, 1 for the floatingip self._verify_event_handler_calls( [internal_port, floatingip['floatingip']], expected_call_count=6) class TestTrackedResources(BaseTestTrackedResources): def _verify_dirty_bit(self, resource_name, expected_value=True): usage = quota_db_api.get_quota_usage_by_resource_and_tenant( self.ctx, resource_name, self._tenant_id) self.assertEqual(expected_value, usage.dirty) def test_create_delete_network_marks_dirty(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self._verify_dirty_bit('network') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'network', self._tenant_id, dirty=False) self._delete('networks', net['id']) self._verify_dirty_bit('network') def test_list_networks_clears_dirty(self): self._test_init('network') net = self._make_network('json', 'meh', True)['network'] self.ctx.tenant_id = net['tenant_id'] self._list('networks', neutron_context=self.ctx) self._verify_dirty_bit('network', expected_value=False) def test_create_delete_port_marks_dirty(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] self._verify_dirty_bit('port') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'port', self._tenant_id, dirty=False) self._delete('ports', port['id']) self._verify_dirty_bit('port') def test_list_ports_clears_dirty(self): self._test_init('port') net = self._make_network('json', 'meh', True)['network'] port = self._make_port('json', net['id'])['port'] self.ctx.tenant_id = port['tenant_id'] self._list('ports', neutron_context=self.ctx) self._verify_dirty_bit('port', expected_value=False) def test_create_delete_subnet_marks_dirty(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self._verify_dirty_bit('subnet') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnet', self._tenant_id, dirty=False) self._delete('subnets', subnet['id']) self._verify_dirty_bit('subnet') def test_create_delete_network_with_subnet_marks_dirty(self): self._test_init('network') self._test_init('subnet') net = self._make_network('json', 'meh', True) self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self._verify_dirty_bit('subnet') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnet', self._tenant_id, dirty=False) self._delete('networks', net['network']['id']) self._verify_dirty_bit('network') self._verify_dirty_bit('subnet') def test_list_subnets_clears_dirty(self): self._test_init('subnet') net = self._make_network('json', 'meh', True) subnet = self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet'] self.ctx.tenant_id = subnet['tenant_id'] self._list('subnets', neutron_context=self.ctx) self._verify_dirty_bit('subnet', expected_value=False) def test_create_delete_subnetpool_marks_dirty(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self._verify_dirty_bit('subnetpool') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'subnetpool', self._tenant_id, dirty=False) self._delete('subnetpools', pool['id']) self._verify_dirty_bit('subnetpool') def test_list_subnetpools_clears_dirty(self): self._test_init('subnetpool') pool = self._make_subnetpool('json', ['10.0.0.0/8'], name='meh', tenant_id=self._tenant_id)['subnetpool'] self.ctx.tenant_id = pool['tenant_id'] self._list('subnetpools', neutron_context=self.ctx) self._verify_dirty_bit('subnetpool', expected_value=False) def test_create_delete_securitygroup_marks_dirty(self): self._test_init('security_group') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] self._verify_dirty_bit('security_group') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'security_group', self._tenant_id, dirty=False) self._delete('security-groups', sec_group['id']) self._verify_dirty_bit('security_group') def test_list_securitygroups_clears_dirty(self): self._test_init('security_group') self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] self.ctx.tenant_id = self._tenant_id self._list('security-groups', neutron_context=self.ctx) self._verify_dirty_bit('security_group', expected_value=False) def test_create_delete_securitygrouprule_marks_dirty(self): self._test_init('security_group_rule') sec_group = self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] rule_req = self._build_security_group_rule( sec_group['id'], 'ingress', 'TCP', tenant_id=self._tenant_id) sec_group_rule = self._make_security_group_rule( 'json', rule_req)['security_group_rule'] self._verify_dirty_bit('security_group_rule') # Clear the dirty bit quota_db_api.set_quota_usage_dirty( self.ctx, 'security_group_rule', self._tenant_id, dirty=False) self._delete('security-group-rules', sec_group_rule['id']) self._verify_dirty_bit('security_group_rule') def test_list_securitygrouprules_clears_dirty(self): self._test_init('security_group_rule') self._make_security_group( 'json', 'meh', 'meh', tenant_id=self._tenant_id)['security_group'] # As the security group create operation also creates 2 security group # rules there is no need to explicitly create any rule self.ctx.tenant_id = self._tenant_id self._list('security-group-rules', neutron_context=self.ctx) self._verify_dirty_bit('security_group_rule', expected_value=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/0000755000175000017500000000000000000000000023110 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/__init__.py0000644000175000017500000000000000000000000025207 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/0000755000175000017500000000000000000000000024206 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/__init__.py0000644000175000017500000000000000000000000026305 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/linux/0000755000175000017500000000000000000000000025345 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/linux/__init__.py0000644000175000017500000000000000000000000027444 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/linux/test_ip_lib.py0000644000175000017500000002552100000000000030221 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import mock import pyroute2 from pyroute2 import netlink from pyroute2.netlink.rtnl import ifinfmsg from neutron.privileged.agent.linux import ip_lib as priv_lib from neutron.tests import base class IpLibTestCase(base.BaseTestCase): def _test_run_iproute_link(self, namespace=None): ip_obj = "NetNS" if namespace else "IPRoute" with mock.patch.object(pyroute2, ip_obj) as ip_mock_cls: ip_mock = ip_mock_cls() ip_mock.__enter__().link_lookup.return_value = [2] priv_lib._run_iproute_link("test_cmd", "eth0", namespace, test_param="test_value") ip_mock.assert_has_calls([ mock.call.__enter__().link_lookup(ifname="eth0"), mock.call.__exit__(None, None, None), mock.call.__enter__().link("test_cmd", index=2, test_param="test_value")]) def test_run_iproute_link_no_namespace(self): self._test_run_iproute_link() def test_run_iproute_link_in_namespace(self): self._test_run_iproute_link(namespace="testns") def test_run_iproute_link_interface_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ret_values = [ [], # No interface found. None, # Unexpected output but also handled. ] for ret_val in ret_values: ip_mock.__enter__().link_lookup.return_value = ret_val self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_link, "test_cmd", "eth0", None, test_param="test_value") @mock.patch.object(priv_lib, 'get_iproute') def test_get_link_id(self, mock_iproute): mock_ip = mock.Mock() mock_ip.link_lookup.return_value = ['interface_id'] mock_iproute.return_value.__enter__.return_value = mock_ip self.assertEqual('interface_id', priv_lib.get_link_id('device', 'namespace')) def test_run_iproute_link_interface_removed_during_call(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [2] ip_mock.__enter__().link.side_effect = pyroute2.NetlinkError( code=errno.ENODEV) self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_link, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_link_op_not_supported(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [2] ip_mock.__enter__().link.side_effect = pyroute2.NetlinkError( code=errno.EOPNOTSUPP) self.assertRaises( priv_lib.InterfaceOperationNotSupported, priv_lib._run_iproute_link, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_link_namespace_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.ENOENT, "Test no netns exception") self.assertRaises( priv_lib.NetworkNamespaceNotFound, priv_lib._run_iproute_link, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_link_error(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.EINVAL, "Test invalid argument exception") try: priv_lib._run_iproute_link( "test_cmd", "eth0", None, test_param="test_value") self.fail("OSError exception not raised") except OSError as e: self.assertEqual(errno.EINVAL, e.errno) def _test_run_iproute_neigh(self, namespace=None): ip_obj = "NetNS" if namespace else "IPRoute" with mock.patch.object(pyroute2, ip_obj) as ip_mock_cls: ip_mock = ip_mock_cls() ip_mock.__enter__().link_lookup.return_value = [2] priv_lib._run_iproute_neigh("test_cmd", "eth0", namespace, test_param="test_value") ip_mock.assert_has_calls([ mock.call.__enter__().link_lookup(ifname="eth0"), mock.call.__exit__(None, None, None), mock.call.__enter__().neigh("test_cmd", ifindex=2, test_param="test_value")]) def test_run_iproute_neigh_no_namespace(self): self._test_run_iproute_neigh() def test_run_iproute_neigh_in_namespace(self): self._test_run_iproute_neigh(namespace="testns") def test_run_iproute_neigh_interface_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [] self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_neigh, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_neigh_interface_removed_during_call(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [2] ip_mock.__enter__().neigh.side_effect = pyroute2.NetlinkError( code=errno.ENODEV) self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_neigh, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_neigh_namespace_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.ENOENT, "Test no netns exception") self.assertRaises( priv_lib.NetworkNamespaceNotFound, priv_lib._run_iproute_neigh, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_neigh_error(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.EINVAL, "Test invalid argument exception") try: priv_lib._run_iproute_neigh( "test_cmd", "eth0", None, test_param="test_value") self.fail("OSError exception not raised") except OSError as e: self.assertEqual(errno.EINVAL, e.errno) def _test_run_iproute_addr(self, namespace=None): ip_obj = "NetNS" if namespace else "IPRoute" with mock.patch.object(pyroute2, ip_obj) as ip_mock_cls: ip_mock = ip_mock_cls() ip_mock.__enter__().link_lookup.return_value = [2] priv_lib._run_iproute_addr("test_cmd", "eth0", namespace, test_param="test_value") ip_mock.assert_has_calls([ mock.call.__enter__().link_lookup(ifname="eth0"), mock.call.__exit__(None, None, None), mock.call.__enter__().addr("test_cmd", index=2, test_param="test_value")]) def test_run_iproute_addr_no_namespace(self): self._test_run_iproute_addr() def test_run_iproute_addr_in_namespace(self): self._test_run_iproute_addr(namespace="testns") def test_run_iproute_addr_interface_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [] self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_addr, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_addr_interface_removed_during_call(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: ip_mock = iproute_mock() ip_mock.__enter__().link_lookup.return_value = [2] ip_mock.__enter__().addr.side_effect = pyroute2.NetlinkError( code=errno.ENODEV) self.assertRaises( priv_lib.NetworkInterfaceNotFound, priv_lib._run_iproute_addr, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_addr_namespace_not_exists(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.ENOENT, "Test no netns exception") self.assertRaises( priv_lib.NetworkNamespaceNotFound, priv_lib._run_iproute_addr, "test_cmd", "eth0", None, test_param="test_value") def test_run_iproute_addr_error(self): with mock.patch.object(pyroute2, "IPRoute") as iproute_mock: iproute_mock.side_effect = OSError( errno.EINVAL, "Test invalid argument exception") try: priv_lib._run_iproute_addr( "test_cmd", "eth0", None, test_param="test_value") self.fail("OSError exception not raised") except OSError as e: self.assertEqual(errno.EINVAL, e.errno) class MakeSerializableTestCase(base.BaseTestCase): NLA_DATA1 = ifinfmsg.ifinfbase.state(data=b'54321') NLA_DATA2 = ifinfmsg.ifinfbase.state(data=b'abcdef') INPUT_1 = {'key1': 'value1', b'key2': b'value2', 'key3': ('a', 2), 'key4': [1, 2, 'c'], b'key5': netlink.nla_slot('nla_name1', NLA_DATA1), 'key6': netlink.nla_slot(b'nla_name2', NLA_DATA2)} OUTPUT_1 = {'key1': 'value1', 'key2': 'value2', 'key3': ('a', 2), 'key4': [1, 2, 'c'], 'key5': ['nla_name1', '54321'], 'key6': ['nla_name2', 'abcdef']} def test_make_serializable(self): self.assertEqual(self.OUTPUT_1, priv_lib.make_serializable(self.INPUT_1)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/privileged/agent/linux/test_netlink_lib.py0000644000175000017500000004001200000000000031245 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import exceptions import testtools from neutron.privileged.agent.linux import netlink_constants as nl_constants from neutron.privileged.agent.linux import netlink_lib as nl_lib from neutron.tests import base FAKE_ICMP_ENTRY = {'ipversion': 4, 'protocol': 'icmp', 'type': '8', 'code': '0', 'id': 1234, 'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1} FAKE_TCP_ENTRY = {'ipversion': 4, 'protocol': 'tcp', 'sport': 1, 'dport': 2, 'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1} FAKE_UDP_ENTRY = {'ipversion': 4, 'protocol': 'udp', 'sport': 1, 'dport': 2, 'src': '1.1.1.1', 'dst': '2.2.2.2', 'zone': 1} class NetlinkLibTestCase(base.BaseTestCase): def setUp(self): super(NetlinkLibTestCase, self).setUp() nl_lib.nfct = mock.Mock() nl_lib.libc = mock.Mock() def test_open_new_conntrack_handler_failed(self): nl_lib.nfct.nfct_open.return_value = None with testtools.ExpectedException(exceptions.CTZoneExhaustedError): with nl_lib.ConntrackManager(): nl_lib.nfct.nfct_open.assert_called_once_with() nl_lib.nfct.nfct_close.assert_not_called() def test_open_new_conntrack_handler_pass(self): with nl_lib.ConntrackManager(): nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_list_entries(self): with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.list_entries() nl_lib.nfct.nfct_callback_register.assert_has_calls( [mock.call(nl_lib.nfct.nfct_open(), nl_constants.NFCT_T_ALL, mock.ANY, None)]) nl_lib.nfct.nfct_query.assert_called_once_with( nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK), nl_constants.NFCT_Q_DUMP, mock.ANY) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_new_failed(self): nl_lib.nfct.nfct_new.return_value = None with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.delete_entries([FAKE_ICMP_ENTRY]) nl_lib.nfct.nfct_new.assert_called_once_with() nl_lib.nfct.nfct_destroy.assert_called_once_with(None) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_delete_icmp_entry(self): conntrack_filter = mock.Mock() nl_lib.nfct.nfct_new.return_value = conntrack_filter with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.delete_entries([FAKE_ICMP_ENTRY]) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['icmp']), mock.call(conntrack_filter, nl_constants.ATTR_ICMP_CODE, int(FAKE_ICMP_ENTRY['code'])), mock.call(conntrack_filter, nl_constants.ATTR_ICMP_TYPE, int(FAKE_ICMP_ENTRY['type'])) ] nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_ICMP_ID, nl_lib.libc.htons(FAKE_ICMP_ENTRY['id'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_ICMP_ENTRY['zone'])) ] nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_ICMP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_ICMP_ENTRY['dst'], 4) ), ] nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True) nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_delete_udp_entry(self): conntrack_filter = mock.Mock() nl_lib.nfct.nfct_new.return_value = conntrack_filter with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.delete_entries([FAKE_UDP_ENTRY]) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['udp']) ] nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_PORT_SRC, nl_lib.libc.htons(FAKE_UDP_ENTRY['sport'])), mock.call(conntrack_filter, nl_constants.ATTR_PORT_DST, nl_lib.libc.htons(FAKE_UDP_ENTRY['dport'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_ICMP_ENTRY['zone'])) ] nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_UDP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_UDP_ENTRY['dst'], 4) ), ] nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True) nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_delete_tcp_entry(self): conntrack_filter = mock.Mock() nl_lib.nfct.nfct_new.return_value = conntrack_filter with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.delete_entries([FAKE_TCP_ENTRY]) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['tcp']) ] nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_PORT_SRC, nl_lib.libc.htons(FAKE_TCP_ENTRY['sport'])), mock.call(conntrack_filter, nl_constants.ATTR_PORT_DST, nl_lib.libc.htons(FAKE_TCP_ENTRY['dport'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_ICMP_ENTRY['zone'])) ] nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_TCP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_TCP_ENTRY['dst'], 4) ), ] nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True) nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) def test_conntrack_delete_entries(self): conntrack_filter = mock.Mock() nl_lib.nfct.nfct_new.return_value = conntrack_filter with nl_lib.ConntrackManager() as conntrack: nl_lib.nfct.nfct_open.assert_called_once_with( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK) conntrack.delete_entries([FAKE_ICMP_ENTRY, FAKE_TCP_ENTRY, FAKE_UDP_ENTRY]) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['tcp']), mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['udp']), mock.call(conntrack_filter, nl_constants.ATTR_L3PROTO, nl_constants.IPVERSION_SOCKET[4]), mock.call(conntrack_filter, nl_constants.ATTR_L4PROTO, constants.IP_PROTOCOL_MAP['icmp']), mock.call(conntrack_filter, nl_constants.ATTR_ICMP_CODE, int(FAKE_ICMP_ENTRY['code'])), mock.call(conntrack_filter, nl_constants.ATTR_ICMP_TYPE, int(FAKE_ICMP_ENTRY['type'])) ] nl_lib.nfct.nfct_set_attr_u8.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_PORT_SRC, nl_lib.libc.htons(FAKE_TCP_ENTRY['sport'])), mock.call(conntrack_filter, nl_constants.ATTR_PORT_DST, nl_lib.libc.htons(FAKE_TCP_ENTRY['dport'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_TCP_ENTRY['zone'])), mock.call(conntrack_filter, nl_constants.ATTR_PORT_SRC, nl_lib.libc.htons(FAKE_UDP_ENTRY['sport'])), mock.call(conntrack_filter, nl_constants.ATTR_PORT_DST, nl_lib.libc.htons(FAKE_UDP_ENTRY['dport'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_UDP_ENTRY['zone'])), mock.call(conntrack_filter, nl_constants.ATTR_ICMP_ID, nl_lib.libc.htons(FAKE_ICMP_ENTRY['id'])), mock.call(conntrack_filter, nl_constants.ATTR_ZONE, int(FAKE_ICMP_ENTRY['zone'])) ] nl_lib.nfct.nfct_set_attr_u16.assert_has_calls(calls, any_order=True) calls = [ mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_TCP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_TCP_ENTRY['dst'], 4)), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_UDP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_UDP_ENTRY['dst'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_SRC, conntrack._convert_text_to_binary( FAKE_ICMP_ENTRY['src'], 4) ), mock.call(conntrack_filter, nl_constants.ATTR_IPV4_DST, conntrack._convert_text_to_binary( FAKE_ICMP_ENTRY['dst'], 4) ), ] nl_lib.nfct.nfct_set_attr.assert_has_calls(calls, any_order=True) nl_lib.nfct.nfct_destroy.assert_called_once_with(conntrack_filter) nl_lib.nfct.nfct_close.assert_called_once_with(nl_lib.nfct.nfct_open( nl_constants.NFNL_SUBSYS_CTNETLINK, nl_constants.CONNTRACK)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4750462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/quota/0000755000175000017500000000000000000000000022107 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/quota/__init__.py0000644000175000017500000000171400000000000024223 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa # Model classes for test resources class MehModel(model_base.BASEV2, model_base.HasProject): meh = sa.Column(sa.String(8), primary_key=True) class OtherMehModel(model_base.BASEV2, model_base.HasProject): othermeh = sa.Column(sa.String(8), primary_key=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/quota/test_resource.py0000644000175000017500000003667300000000000025366 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils import testtools from neutron.db.quota import api as quota_api from neutron.quota import resource from neutron.tests import base from neutron.tests.unit import quota as test_quota from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' meh_quota_flag = 'quota_meh' meh_quota_opts = [cfg.IntOpt(meh_quota_flag, default=99)] class TestResource(base.DietTestCase): """Unit tests for neutron.quota.resource.BaseResource""" def test_create_resource_without_plural_name(self): res = resource.BaseResource('foo', None) self.assertEqual('foos', res.plural_name) res = resource.BaseResource('foy', None) self.assertEqual('foies', res.plural_name) def test_create_resource_with_plural_name(self): res = resource.BaseResource('foo', None, plural_name='foopsies') self.assertEqual('foopsies', res.plural_name) def test_resource_default_value(self): res = resource.BaseResource('foo', 'foo_quota') with mock.patch('oslo_config.cfg.CONF') as mock_cfg: mock_cfg.QUOTAS.foo_quota = 99 self.assertEqual(99, res.default) def test_resource_negative_default_value(self): res = resource.BaseResource('foo', 'foo_quota') with mock.patch('oslo_config.cfg.CONF') as mock_cfg: mock_cfg.QUOTAS.foo_quota = -99 self.assertEqual(-1, res.default) class TestTrackedResource(testlib_api.SqlTestCase): def _add_data(self, tenant_id=None): session = db_api.get_writer_session() with session.begin(): tenant_id = tenant_id or self.tenant_id session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id)) session.add(test_quota.MehModel( meh='meh_%s' % uuidutils.generate_uuid(), tenant_id=tenant_id)) def _delete_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: session.delete(item) def _update_data(self): session = db_api.get_writer_session() with session.begin(): query = session.query(test_quota.MehModel).filter_by( tenant_id=self.tenant_id) for item in query: item['meh'] = 'meh-%s' % item['meh'] session.add(item) def setUp(self): super(TestTrackedResource, self).setUp() self.setup_coreplugin(DB_PLUGIN_KLASS) self.resource = 'meh' self.other_resource = 'othermeh' self.tenant_id = 'meh' self.context = context.Context( user_id='', tenant_id=self.tenant_id, is_admin=False) def _create_resource(self): res = resource.TrackedResource( self.resource, test_quota.MehModel, meh_quota_flag) res.register_events() return res def _create_other_resource(self): res = resource.TrackedResource( self.other_resource, test_quota.OtherMehModel, meh_quota_flag) res.register_events() return res def test_bulk_delete_protection(self): self._create_resource() with testtools.ExpectedException(RuntimeError): ctx = context.get_admin_context() ctx.session.query(test_quota.MehModel).delete() def test_count_first_call_with_dirty_false(self): quota_api.set_quota_usage( self.context, self.resource, self.tenant_id, in_use=1) res = self._create_resource() self._add_data() # explicitly set dirty flag to False quota_api.set_all_quota_usage_dirty( self.context, self.resource, dirty=False) # Expect correct count to be returned anyway since the first call to # count() always resyncs with the db self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def test_count_reserved(self): res = self._create_resource() quota_api.create_reservation(self.context, self.tenant_id, {res.name: 1}) self.assertEqual(1, res.count_reserved(self.context, self.tenant_id)) def test_count_used_first_call_with_dirty_false(self): quota_api.set_quota_usage( self.context, self.resource, self.tenant_id, in_use=1) res = self._create_resource() self._add_data() # explicitly set dirty flag to False quota_api.set_all_quota_usage_dirty( self.context, self.resource, dirty=False) # Expect correct count_used to be returned # anyway since the first call to # count_used() always resyncs with the db self.assertEqual(2, res.count_used(self.context, self.tenant_id)) def _test_count(self): res = self._create_resource() quota_api.set_quota_usage( self.context, res.name, self.tenant_id, in_use=0) self._add_data() return res def test_count_with_dirty_false(self): res = self._test_count() res.count(self.context, None, self.tenant_id) # At this stage count has been invoked, and the dirty flag should be # false. Another invocation of count should not query the model class set_quota = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota) as mock_set_quota: self.assertEqual(0, mock_set_quota.call_count) self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def test_count_used_with_dirty_false(self): res = self._test_count() res.count_used(self.context, self.tenant_id) # At this stage count_used has been invoked, # and the dirty flag should be false. Another invocation # of count_used should not query the model class set_quota = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota) as mock_set_quota: self.assertEqual(0, mock_set_quota.call_count) self.assertEqual(2, res.count_used(self.context, self.tenant_id)) def test_count_with_dirty_true_resync(self): res = self._test_count() # Expect correct count to be returned, which also implies # set_quota_usage has been invoked with the correct parameters self.assertEqual(2, res.count(self.context, None, self.tenant_id, resync_usage=True)) def test_count_used_with_dirty_true_resync(self): res = self._test_count() # Expect correct count_used to be returned, which also implies # set_quota_usage has been invoked with the correct parameters self.assertEqual(2, res.count_used(self.context, self.tenant_id, resync_usage=True)) def test_count_with_dirty_true_resync_calls_set_quota_usage(self): res = self._test_count() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_count_used_with_dirty_true_resync_calls_set_quota_usage(self): res = self._test_count() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count_used(self.context, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_count_with_dirty_true_no_usage_info(self): res = self._create_resource() self._add_data() # Invoke count without having usage info in DB - Expect correct # count to be returned self.assertEqual(2, res.count(self.context, None, self.tenant_id)) def test_count_used_with_dirty_true_no_usage_info(self): res = self._create_resource() self._add_data() # Invoke count_used without having usage info in DB - Expect correct # count_used to be returned self.assertEqual(2, res.count_used(self.context, self.tenant_id)) def test_count_with_dirty_true_no_usage_info_calls_set_quota_usage(self): res = self._create_resource() self._add_data() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count(self.context, None, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_count_used_with_dirty_true_no_usage_info_calls_set_quota_usage( self): res = self._create_resource() self._add_data() set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: quota_api.set_quota_usage_dirty(self.context, self.resource, self.tenant_id) res.count_used(self.context, self.tenant_id, resync_usage=True) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) def test_add_delete_data_triggers_event(self): res = self._create_resource() other_res = self._create_other_resource() # Validate dirty tenants since mock does not work well with SQLAlchemy # event handlers. self._add_data() self._add_data('someone_else') self.assertEqual(2, len(res._dirty_tenants)) # Also, the dirty flag should not be set for other resources self.assertEqual(0, len(other_res._dirty_tenants)) self.assertIn(self.tenant_id, res._dirty_tenants) self.assertIn('someone_else', res._dirty_tenants) def test_delete_data_triggers_event(self): res = self._create_resource() self._add_data() self._add_data('someone_else') # Artificially clear _dirty_tenants res._dirty_tenants.clear() self._delete_data() # We did not delete "someone_else", so expect only a single dirty # tenant self.assertEqual(1, len(res._dirty_tenants)) self.assertIn(self.tenant_id, res._dirty_tenants) def test_update_does_not_trigger_event(self): res = self._create_resource() self._add_data() self._add_data('someone_else') # Artificially clear _dirty_tenants res._dirty_tenants.clear() self._update_data() self.assertEqual(0, len(res._dirty_tenants)) def test_mark_dirty(self): res = self._create_resource() self._add_data() self._add_data('someone_else') set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.mark_dirty(self.context) self.assertEqual(2, mock_set_quota_usage.call_count) mock_set_quota_usage.assert_any_call( self.context, self.resource, self.tenant_id) mock_set_quota_usage.assert_any_call( self.context, self.resource, 'someone_else') def test_mark_dirty_no_dirty_tenant(self): res = self._create_resource() set_quota_usage = 'neutron.db.quota.api.set_quota_usage_dirty' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.mark_dirty(self.context) self.assertFalse(mock_set_quota_usage.call_count) def test_resync(self): res = self._create_resource() self._add_data() res.mark_dirty(self.context) # self.tenant_id now is out of sync set_quota_usage = 'neutron.db.quota.api.set_quota_usage' with mock.patch(set_quota_usage) as mock_set_quota_usage: res.resync(self.context, self.tenant_id) # and now it should be in sync self.assertNotIn(self.tenant_id, res._out_of_sync_tenants) mock_set_quota_usage.assert_called_once_with( self.context, self.resource, self.tenant_id, in_use=2) class Test_CountResource(base.BaseTestCase): def test_all_plugins_checked(self): plugin1 = mock.Mock() plugin2 = mock.Mock() plugins = {'plugin1': plugin1, 'plugin2': plugin2} for name, plugin in plugins.items(): plugin.get_floatingips_count.side_effect = NotImplementedError plugin.get_floatingips.side_effect = NotImplementedError directory.add_plugin(name, plugin) context = mock.Mock() collection_name = 'floatingips' tenant_id = 'fakeid' self.assertRaises( NotImplementedError, resource._count_resource, context, collection_name, tenant_id) for plugin in plugins.values(): for func in (plugin.get_floatingips_count, plugin.get_floatingips): func.assert_called_with( context, filters={'tenant_id': [tenant_id]}) def test_core_plugin_checked_first(self): plugin1 = mock.Mock() plugin2 = mock.Mock() plugin1.get_floatingips_count.side_effect = NotImplementedError plugin1.get_floatingips.side_effect = NotImplementedError directory.add_plugin('plugin1', plugin1) plugin2.get_floatingips_count.return_value = 10 directory.add_plugin(constants.CORE, plugin2) context = mock.Mock() collection_name = 'floatingips' tenant_id = 'fakeid' self.assertEqual( 10, resource._count_resource(context, collection_name, tenant_id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/quota/test_resource_registry.py0000644000175000017500000001762100000000000027306 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from oslo_config import cfg import testtools from neutron.quota import resource from neutron.quota import resource_registry from neutron.tests import base from neutron.tests.unit import quota as test_quota class TestResourceRegistry(base.DietTestCase): def setUp(self): super(TestResourceRegistry, self).setUp() self.registry = resource_registry.ResourceRegistry.get_instance() # clean up the registry at every test self.registry.unregister_resources() def test_set_tracked_resource_new_resource(self): self.registry.set_tracked_resource('meh', test_quota.MehModel) self.assertEqual(test_quota.MehModel, self.registry._tracked_resource_mappings['meh']) def test_set_tracked_resource_existing_with_override(self): self.test_set_tracked_resource_new_resource() self.registry.set_tracked_resource('meh', test_quota.OtherMehModel, override=True) # Override is set to True, the model class should change self.assertEqual(test_quota.OtherMehModel, self.registry._tracked_resource_mappings['meh']) def test_set_tracked_resource_existing_no_override(self): self.test_set_tracked_resource_new_resource() self.registry.set_tracked_resource('meh', test_quota.OtherMehModel) # Override is set to false, the model class should not change self.assertEqual(test_quota.MehModel, self.registry._tracked_resource_mappings['meh']) def _test_register_resource_by_name(self, resource_name, expected_type): self.assertNotIn(resource_name, self.registry._resources) self.registry.register_resource_by_name(resource_name) self.assertIn(resource_name, self.registry._resources) self.assertIsInstance(self.registry.get_resource(resource_name), expected_type) def test_register_resource_by_name_tracked(self): self.test_set_tracked_resource_new_resource() self._test_register_resource_by_name('meh', resource.TrackedResource) def test_register_resource_by_name_not_tracked(self): self._test_register_resource_by_name('meh', resource.CountableResource) def test_tracked_resource_error_if_already_registered_as_untracked(self): self.registry.register_resource_by_name('meh') with testtools.ExpectedException(RuntimeError): self.registry.set_tracked_resource('meh', test_quota.MehModel) # ensure unregister works self.registry.unregister_resources() def test_register_resource_by_name_with_tracking_disabled_by_config(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) self.registry.set_tracked_resource('meh', test_quota.MehModel) self.assertNotIn( 'meh', self.registry._tracked_resource_mappings) self._test_register_resource_by_name('meh', resource.CountableResource) class TestAuxiliaryFunctions(base.DietTestCase): def setUp(self): super(TestAuxiliaryFunctions, self).setUp() self.registry = resource_registry.ResourceRegistry.get_instance() # clean up the registry at every test self.registry.unregister_resources() def test_resync_tracking_disabled(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') self.assertEqual(0, mock_resync.call_count) def test_resync_tracked_resource(self): with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') mock_resync.assert_called_once_with(mock.ANY, 'tenant_id') def test_resync_non_tracked_resource(self): with mock.patch('neutron.quota.resource.' 'TrackedResource.resync') as mock_resync: self.registry.register_resource_by_name('meh') resource_registry.resync_resource(mock.ANY, 'meh', 'tenant_id') self.assertEqual(0, mock_resync.call_count) def test_set_resources_dirty_invoked_with_tracking_disabled(self): cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(cfg.CONF.reset) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') resource_registry.set_resources_dirty(mock.ANY) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty_no_dirty_resource(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') res = self.registry.get_resource('meh') # This ensures dirty is false res._dirty_tenants.clear() resource_registry.set_resources_dirty(ctx) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty_no_tracked_resource(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.register_resource_by_name('meh') resource_registry.set_resources_dirty(ctx) self.assertEqual(0, mock_mark_dirty.call_count) def test_set_resources_dirty(self): ctx = context.Context('user_id', 'tenant_id', is_admin=False, is_advsvc=False) with mock.patch('neutron.quota.resource.' 'TrackedResource.mark_dirty') as mock_mark_dirty: self.registry.set_tracked_resource('meh', test_quota.MehModel) self.registry.register_resource_by_name('meh') res = self.registry.get_resource('meh') # This ensures dirty is true res._dirty_tenants.add('tenant_id') resource_registry.set_resources_dirty(ctx) mock_mark_dirty.assert_called_once_with(ctx) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/scheduler/0000755000175000017500000000000000000000000022734 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/scheduler/__init__.py0000644000175000017500000000000000000000000025033 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py0000644000175000017500000012440200000000000030502 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from operator import attrgetter import random import mock from neutron_lib import constants from neutron_lib import context from neutron_lib.exceptions import dhcpagentscheduler as das_exc from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils import testscenarios from neutron.db import agentschedulers_db as sched_db from neutron.objects import agent from neutron.objects import network as network_obj from neutron.scheduler import dhcp_agent_scheduler from neutron.services.segments import db as segments_service_db from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit import testlib_api # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios HOST_C = 'host-c' HOST_D = 'host-d' class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase): CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' def setUp(self): super(TestDhcpSchedulerBaseTestCase, self).setUp() self.setup_coreplugin(self.CORE_PLUGIN) self.ctx = context.get_admin_context() self.network = {'id': uuidutils.generate_uuid()} self.network_id = self.network['id'] self._save_networks([self.network_id]) def _create_and_set_agents_down(self, hosts, down_agent_count=0, admin_state_up=True, az=helpers.DEFAULT_AZ): agents = [] for i, host in enumerate(hosts): is_alive = i >= down_agent_count agents.append(helpers.register_dhcp_agent( host, admin_state_up=admin_state_up, alive=is_alive, az=az)) return agents def _save_networks(self, networks): for network_id in networks: network_obj.Network(self.ctx, id=network_id).create() def _test_schedule_bind_network(self, agents, network_id): cfg.CONF.set_override('dhcp_agents_per_network', len(agents)) scheduler = dhcp_agent_scheduler.ChanceScheduler() scheduler.resource_filter.bind(self.ctx, agents, network_id) binding_objs = network_obj.NetworkDhcpAgentBinding.get_objects( self.ctx, network_id=network_id) self.assertEqual(len(agents), len(binding_objs)) for result in binding_objs: self.assertEqual(network_id, result.network_id) class TestDhcpScheduler(TestDhcpSchedulerBaseTestCase): def test_schedule_bind_network_single_agent(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_network(agents, self.network_id) def test_schedule_bind_network_multi_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b']) self._test_schedule_bind_network(agents, self.network_id) def test_schedule_bind_network_multi_agent_fail_one(self): agents = self._create_and_set_agents_down(['host-a']) self._test_schedule_bind_network(agents, self.network_id) with mock.patch.object(dhcp_agent_scheduler.LOG, 'debug') as fake_log: self._test_schedule_bind_network(agents, self.network_id) self.assertEqual(1, fake_log.call_count) def _test_get_agents_and_scheduler_for_dead_agent(self): agents = self._create_and_set_agents_down(['dead_host', 'alive_host'], 1) dead_agent = [agents[0]] alive_agent = [agents[1]] self._test_schedule_bind_network(dead_agent, self.network_id) scheduler = dhcp_agent_scheduler.ChanceScheduler() return dead_agent, alive_agent, scheduler def _test_reschedule_vs_network_on_dead_agent(self, active_hosts_only): dead_agent, alive_agent, scheduler = ( self._test_get_agents_and_scheduler_for_dead_agent()) network = {'id': self.network_id} plugin = mock.Mock() plugin.get_subnets.return_value = [{"network_id": self.network_id, "enable_dhcp": True}] plugin.get_agent_objects.return_value = dead_agent + alive_agent plugin.filter_hosts_with_network_access.side_effect = ( lambda context, network_id, hosts: hosts) if active_hosts_only: plugin.get_dhcp_agents_hosting_networks.return_value = [] self.assertTrue( scheduler.schedule( plugin, self.ctx, network)) else: plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent self.assertFalse( scheduler.schedule( plugin, self.ctx, network)) def test_network_rescheduled_when_db_returns_active_hosts(self): self._test_reschedule_vs_network_on_dead_agent(True) def test_network_not_rescheduled_when_db_returns_all_hosts(self): self._test_reschedule_vs_network_on_dead_agent(False) def _get_agent_binding_from_db(self, agent): return network_obj.NetworkDhcpAgentBinding.get_objects( self.ctx, dhcp_agent_id=agent[0].id) def test_auto_reschedule_vs_network_on_dead_agent(self): dead_agent, alive_agent, scheduler = ( self._test_get_agents_and_scheduler_for_dead_agent()) plugin = mock.Mock() plugin.get_subnets.return_value = [{"network_id": self.network_id, "enable_dhcp": True, "segment_id": None}] plugin.get_network.return_value = self.network plugin.get_dhcp_agents_hosting_networks.return_value = dead_agent network_assigned_to_dead_agent = ( self._get_agent_binding_from_db(dead_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) self.assertTrue( scheduler.auto_schedule_networks( plugin, self.ctx, "alive_host")) network_assigned_to_dead_agent = ( self._get_agent_binding_from_db(dead_agent)) network_assigned_to_alive_agent = ( self._get_agent_binding_from_db(alive_agent)) self.assertEqual(1, len(network_assigned_to_dead_agent)) # network won't be scheduled to new agent unless removed from # dead agent self.assertEqual(0, len(network_assigned_to_alive_agent)) class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase): """Unit test scenarios for ChanceScheduler.auto_schedule_networks. network_present Network is present or not enable_dhcp Dhcp is enabled or disabled in the subnet of the network scheduled_already Network is already scheduled to the agent or not agent_down Dhcp agent is down or alive valid_host If true, then an valid host is passed to schedule the network, else an invalid host is passed. az_hints 'availability_zone_hints' of the network. note that default 'availability_zone' of an agent is 'nova'. """ scenarios = [ ('Network present', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('No network', dict(network_present=False, enable_dhcp=False, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('Network already scheduled', dict(network_present=True, enable_dhcp=True, scheduled_already=True, agent_down=False, valid_host=True, az_hints=[])), ('Agent down', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=[])), ('dhcp disabled', dict(network_present=True, enable_dhcp=False, scheduled_already=False, agent_down=False, valid_host=False, az_hints=[])), ('Invalid host', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=False, az_hints=[])), ('Match AZ', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=['nova'])), ('Not match AZ', dict(network_present=True, enable_dhcp=True, scheduled_already=False, agent_down=False, valid_host=True, az_hints=['not-match'])), ] def test_auto_schedule_network(self): plugin = mock.MagicMock() plugin.get_subnets.return_value = ( [{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp, "segment_id": None}] if self.network_present else []) plugin.get_network.return_value = {'availability_zone_hints': self.az_hints} scheduler = dhcp_agent_scheduler.ChanceScheduler() if self.network_present: down_agent_count = 1 if self.agent_down else 0 agents = self._create_and_set_agents_down( ['host-a'], down_agent_count=down_agent_count) if self.scheduled_already: self._test_schedule_bind_network(agents, self.network_id) expected_result = (self.network_present and self.enable_dhcp) expected_hosted_agents = (1 if expected_result and self.valid_host else 0) if (self.az_hints and agents[0]['availability_zone'] not in self.az_hints): expected_hosted_agents = 0 host = "host-a" if self.valid_host else "host-b" observed_ret_value = scheduler.auto_schedule_networks( plugin, self.ctx, host) self.assertEqual(expected_result, observed_ret_value) count_hosted_agents = network_obj.NetworkDhcpAgentBinding.count( self.ctx) self.assertEqual(expected_hosted_agents, count_hosted_agents) class TestAutoScheduleSegments(test_plugin.Ml2PluginV2TestCase, TestDhcpSchedulerBaseTestCase): """Unit test scenarios for ChanceScheduler""" CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' def setUp(self): super(TestAutoScheduleSegments, self).setUp() self.plugin = self.driver self.segments_plugin = importutils.import_object( 'neutron.services.segments.plugin.Plugin') self.ctx = context.get_admin_context() # Remove MissingAuthPlugin exception from logs mock.patch( 'neutron.notifiers.batch_notifier.BatchNotifier._notify').start() def _create_network(self): net = self.plugin.create_network( self.ctx, {'network': {'name': 'name', 'tenant_id': 'tenant_one', 'admin_state_up': True, 'shared': True}}) return net['id'] def _create_segment(self, network_id): seg = self.segments_plugin.create_segment( self.ctx, {'segment': {'network_id': network_id, 'name': None, 'description': None, 'physical_network': 'physnet1', 'network_type': 'vlan', 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) return seg['id'] def _create_subnet(self, segment_id, network_id, cidr='192.168.10.0/24'): subnet = self.plugin.create_subnet( self.ctx, {'subnet': {'name': 'name', 'ip_version': constants.IP_VERSION_4, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'tenant_id': 'tenant_one', 'enable_dhcp': True, 'segment_id': segment_id}}) return subnet['id'] def test_auto_schedule_one_network_one_segment_one_subnet(self): net_id = self._create_network() seg_id = self._create_segment(net_id) self._create_subnet(seg_id, net_id) helpers.register_dhcp_agent(HOST_C) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) scheduler = dhcp_agent_scheduler.ChanceScheduler() observed_return_val = scheduler.auto_schedule_networks( self.plugin, self.ctx, HOST_C) self.assertTrue(observed_return_val) agent1 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id]) self.assertEqual(1, len(agent1)) self.assertEqual('host-c', agent1[0]['host']) def test_auto_schedule_one_network_one_segment_two_subnet(self): net_id = self._create_network() seg_id = self._create_segment(net_id) self._create_subnet(seg_id, net_id) self._create_subnet(seg_id, net_id, '192.168.11.0/24') helpers.register_dhcp_agent(HOST_C) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) scheduler = dhcp_agent_scheduler.ChanceScheduler() observed_return_val = scheduler.auto_schedule_networks( self.plugin, self.ctx, HOST_C) self.assertTrue(observed_return_val) agent1 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id]) self.assertEqual(1, len(agent1)) self.assertEqual('host-c', agent1[0]['host']) def test_auto_schedule_one_network_two_segments_with_one_subnet_each(self): net_id = self._create_network() seg1_id = self._create_segment(net_id) self._create_subnet(seg1_id, net_id) helpers.register_dhcp_agent(HOST_D) segments_service_db.update_segment_host_mapping( self.ctx, HOST_D, {seg1_id}) scheduler = dhcp_agent_scheduler.ChanceScheduler() observed_val_first_segment = scheduler.auto_schedule_networks( self.plugin, self.ctx, HOST_D) self.assertTrue(observed_val_first_segment) agents = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id]) self.assertEqual(1, len(agents)) seg2_id = self._create_segment(net_id) self._create_subnet(seg2_id, net_id, '192.168.11.0/24') helpers.register_dhcp_agent(HOST_C) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg2_id}) observed_val_second_segment = scheduler.auto_schedule_networks( self.plugin, self.ctx, HOST_C) self.assertTrue(observed_val_second_segment) agents = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id]) self.assertEqual(2, len(agents)) class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): def test_auto_schedule_network_excess_agents(self): plugin = mock.MagicMock() plugin.get_subnets.return_value = ( [{"network_id": self.network_id, "enable_dhcp": True}]) plugin.get_network.return_value = {'availability_zone_hints': ['nova']} scheduler = dhcp_agent_scheduler.ChanceScheduler() dhcpfilter = 'neutron.scheduler.dhcp_agent_scheduler.DhcpFilter' self._create_and_set_agents_down(['host-a', 'host-b']) expected_hosted_agents = 1 binding_index = 1 scheduler.auto_schedule_networks(plugin, self.ctx, 'host-a') with mock.patch( dhcpfilter + '.get_vacant_network_dhcp_agent_binding_index', context=self.ctx, network_id=self.network_id) as ndab: ndab.return_value = binding_index scheduler.auto_schedule_networks(plugin, self.ctx, 'host-b') self.assertTrue(ndab.called) num_hosted_agents = network_obj.NetworkDhcpAgentBinding.count( self.ctx, network_id=self.network_id) self.assertEqual(expected_hosted_agents, num_hosted_agents) def test_reschedule_network_from_down_agent(self): net_id = uuidutils.generate_uuid() agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) self._save_networks([net_id]) self._test_schedule_bind_network([agents[1]], net_id) with mock.patch.object(self, 'remove_network_from_dhcp_agent') as rn,\ mock.patch.object(self, 'schedule_network', return_value=[agents[1]]) as sch,\ mock.patch.object(self, 'get_network', create=True, return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() rn.assert_called_with(mock.ANY, agents[0].id, self.network_id, notify=False) sch.assert_called_with(mock.ANY, {'id': self.network_id}) notifier.network_added_to_agent.assert_called_with( mock.ANY, self.network_id, agents[1].host) def _test_failed_rescheduling(self, rn_side_effect=None): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object(self, 'remove_network_from_dhcp_agent', side_effect=rn_side_effect) as rn,\ mock.patch.object(self, 'schedule_network', return_value=None) as sch,\ mock.patch.object(self, 'get_network', create=True, return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() rn.assert_called_with(mock.ANY, agents[0].id, self.network_id, notify=False) sch.assert_called_with(mock.ANY, {'id': self.network_id}) self.assertFalse(notifier.network_added_to_agent.called) def test_reschedule_network_from_down_agent_failed(self): self._test_failed_rescheduling() def test_reschedule_network_from_down_agent_concurrent_removal(self): self._test_failed_rescheduling( rn_side_effect=das_exc.NetworkNotHostedByDhcpAgent( network_id='foo', agent_id='bar')) def _create_test_networks(self, num_net=0): networks = [network_obj.Network( self.ctx, id=uuidutils.generate_uuid(), name='network-%s' % (i)) for i in range(num_net)] for net in networks: net.create() return [net.id for net in networks] def _create_dhcp_agents(self): timestamp = datetime.datetime.now() dhcp_agent_ids = [uuidutils.generate_uuid() for x in range(2)] dhcp_agent_1 = agent.Agent(self.ctx, id=dhcp_agent_ids[0], agent_type='DHCP Agent', topic='fake_topic', host='fake_host', binary='fake_binary', created_at=timestamp, started_at=timestamp, heartbeat_timestamp=timestamp, configurations={}, load=0) dhcp_agent_1.create() dhcp_agent_2 = agent.Agent(self.ctx, id=dhcp_agent_ids[1], agent_type='DHCP Agent', topic='fake_topic', host='fake_host_1', binary='fake_binary', created_at=timestamp, started_at=timestamp, heartbeat_timestamp=timestamp, configurations={}, load=0) dhcp_agent_2.create() return [dhcp_agent_1.id, dhcp_agent_2.id] def test_filter_bindings(self): self.ctx = context.get_admin_context() dhcp_agt_ids = self._create_dhcp_agents() network_ids = sorted(self._create_test_networks(num_net=4)) ndab_obj1 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[0], dhcp_agent_id=dhcp_agt_ids[0]) ndab_obj1.create() ndab_obj2 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[1], dhcp_agent_id=dhcp_agt_ids[0]) ndab_obj2.create() ndab_obj3 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[2], dhcp_agent_id=dhcp_agt_ids[1]) ndab_obj3.create() ndab_obj4 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[3], dhcp_agent_id=dhcp_agt_ids[1]) ndab_obj4.create() bindings_objs = sorted(network_obj.NetworkDhcpAgentBinding.get_objects( self.ctx), key=attrgetter('network_id')) with mock.patch.object(self, 'agent_starting_up', side_effect=[True, False]): res = [b for b in self._filter_bindings(None, bindings_objs)] # once per each agent id1 and id2 self.assertEqual(2, len(res)) res_ids = [b.network_id for b in res] self.assertIn(network_ids[2], res_ids) self.assertIn(network_ids[3], res_ids) def test_reschedule_network_from_down_agent_failed_on_unexpected(self): agents = self._create_and_set_agents_down(['host-a'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object(self, '_filter_bindings', side_effect=Exception()): # just make sure that no exception is raised self.remove_networks_from_down_agents() def test_reschedule_network_catches_exceptions_on_fetching_bindings(self): with mock.patch('neutron_lib.context.get_admin_context') as get_ctx: mock_ctx = mock.Mock() get_ctx.return_value = mock_ctx mock_ctx.session.query.side_effect = Exception() # just make sure that no exception is raised self.remove_networks_from_down_agents() def test_reschedule_doesnt_occur_if_no_agents(self): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 2) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object(self, 'remove_network_from_dhcp_agent') as rn: self.remove_networks_from_down_agents() self.assertFalse(rn.called) class DHCPAgentWeightSchedulerTestCase(test_plugin.Ml2PluginV2TestCase): """Unit test scenarios for WeightScheduler.schedule.""" def setUp(self): super(DHCPAgentWeightSchedulerTestCase, self).setUp() weight_scheduler = ( 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler') cfg.CONF.set_override('network_scheduler_driver', weight_scheduler) self.plugin = self.driver mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() self.plugin.network_scheduler = importutils.import_object( weight_scheduler) cfg.CONF.set_override("dhcp_load_type", "networks") self.segments_plugin = importutils.import_object( 'neutron.services.segments.plugin.Plugin') self.ctx = context.get_admin_context() def _create_network(self): net = self.plugin.create_network( self.ctx, {'network': {'name': 'name', 'tenant_id': 'tenant_one', 'admin_state_up': True, 'shared': True}}) return net['id'] def _create_segment(self, network_id): seg = self.segments_plugin.create_segment( self.ctx, {'segment': {'network_id': network_id, 'name': None, 'description': None, 'physical_network': 'physnet1', 'network_type': 'vlan', 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) return seg['id'] def test_scheduler_one_agents_per_network(self): net_id = self._create_network() helpers.register_dhcp_agent(HOST_C) self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(1, len(agents)) def test_scheduler_two_agents_per_network(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) net_id = self._create_network() helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(2, len(agents)) def test_scheduler_no_active_agents(self): net_id = self._create_network() self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(0, len(agents)) def test_scheduler_equal_distribution(self): net_id_1 = self._create_network() net_id_2 = self._create_network() net_id_3 = self._create_network() helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D, networks=1) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': net_id_1}) helpers.register_dhcp_agent(HOST_D, networks=2) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': net_id_2}) helpers.register_dhcp_agent(HOST_C, networks=4) self.plugin.network_scheduler.schedule( self.plugin, context.get_admin_context(), {'id': net_id_3}) agent1 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id_1]) agent2 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id_2]) agent3 = self.plugin.get_dhcp_agents_hosting_networks( self.ctx, [net_id_3]) self.assertEqual('host-c', agent1[0]['host']) self.assertEqual('host-c', agent2[0]['host']) self.assertEqual('host-d', agent3[0]['host']) def _get_network_with_candidate_hosts(self, net_id, seg_id): # expire the session so that the segment is fully reloaded on fetch, # including its new host mapping self.ctx.session.expire_all() net = self.plugin.get_network(self.ctx, net_id) seg = self.segments_plugin.get_segment(self.ctx, seg_id) net['candidate_hosts'] = seg['hosts'] return net def test_schedule_segment_one_hostable_agent(self): net_id = self._create_network() seg_id = self._create_segment(net_id) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) net = self._get_network_with_candidate_hosts(net_id, seg_id) agents = self.plugin.network_scheduler.schedule( self.plugin, self.ctx, net) self.assertEqual(1, len(agents)) self.assertEqual(HOST_C, agents[0].host) def test_schedule_segment_many_hostable_agents(self): net_id = self._create_network() seg_id = self._create_segment(net_id) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) segments_service_db.update_segment_host_mapping( self.ctx, HOST_D, {seg_id}) net = self._get_network_with_candidate_hosts(net_id, seg_id) agents = self.plugin.network_scheduler.schedule( self.plugin, self.ctx, net) self.assertEqual(1, len(agents)) self.assertIn(agents[0].host, [HOST_C, HOST_D]) def test_schedule_segment_no_host_mapping(self): net_id = self._create_network() seg_id = self._create_segment(net_id) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) net = self.plugin.get_network(self.ctx, net_id) seg = self.segments_plugin.get_segment(self.ctx, seg_id) net['candidate_hosts'] = seg['hosts'] agents = self.plugin.network_scheduler.schedule( self.plugin, self.ctx, net) self.assertEqual(0, len(agents)) def test_schedule_segment_two_agents_per_segment(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) net_id = self._create_network() seg_id = self._create_segment(net_id) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) segments_service_db.update_segment_host_mapping( self.ctx, HOST_D, {seg_id}) net = self._get_network_with_candidate_hosts(net_id, seg_id) agents = self.plugin.network_scheduler.schedule( self.plugin, self.ctx, net) self.assertEqual(2, len(agents)) self.assertIn(agents[0].host, [HOST_C, HOST_D]) self.assertIn(agents[1].host, [HOST_C, HOST_D]) def test_schedule_segment_two_agents_per_segment_one_hostable_agent(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) net_id = self._create_network() seg_id = self._create_segment(net_id) helpers.register_dhcp_agent(HOST_C) helpers.register_dhcp_agent(HOST_D) segments_service_db.update_segment_host_mapping( self.ctx, HOST_C, {seg_id}) net = self._get_network_with_candidate_hosts(net_id, seg_id) agents = self.plugin.network_scheduler.schedule( self.plugin, self.ctx, net) self.assertEqual(1, len(agents)) self.assertEqual(HOST_C, agents[0].host) class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): def _test_get_dhcp_agents_hosting_networks(self, expected, **kwargs): cfg.CONF.set_override('dhcp_agents_per_network', 4) agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) agents += self._create_and_set_agents_down(['host-c', 'host-d'], 1, admin_state_up=False) networks = kwargs.pop('networks', [self.network_id]) for network in networks: self._test_schedule_bind_network(agents, network) agents = self.get_dhcp_agents_hosting_networks(self.ctx, networks, **kwargs) host_ids = set(a['host'] for a in agents) self.assertEqual(expected, host_ids) def test_get_dhcp_agents_hosting_networks_default(self): self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b', 'host-c', 'host-d'}) def test_get_dhcp_agents_hosting_networks_active(self): self._test_get_dhcp_agents_hosting_networks({'host-b', 'host-d'}, active=True) def test_get_dhcp_agents_hosting_networks_admin_up(self): self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b'}, admin_state_up=True) def test_get_dhcp_agents_hosting_networks_active_admin_up(self): self._test_get_dhcp_agents_hosting_networks({'host-b'}, active=True, admin_state_up=True) def test_get_dhcp_agents_hosting_networks_admin_down(self): self._test_get_dhcp_agents_hosting_networks({'host-c', 'host-d'}, admin_state_up=False) def test_get_dhcp_agents_hosting_networks_active_admin_down(self): self._test_get_dhcp_agents_hosting_networks({'host-d'}, active=True, admin_state_up=False) def test_get_dhcp_agents_hosting_many_networks(self): net_id = uuidutils.generate_uuid() self._save_networks([net_id]) networks = [net_id, self.network_id] self._test_get_dhcp_agents_hosting_networks({'host-a', 'host-b', 'host-c', 'host-d'}, networks=networks) def test_get_dhcp_agents_host_network_filter_by_hosts(self): self._test_get_dhcp_agents_hosting_networks({'host-a'}, hosts=['host-a']) class DHCPAgentAZAwareWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): def setUp(self): super(DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp() self.setup_coreplugin('ml2') cfg.CONF.set_override("network_scheduler_driver", 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler') self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' 'Ml2Plugin') mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() cfg.CONF.set_override('dhcp_agents_per_network', 1) cfg.CONF.set_override("dhcp_load_type", "networks") def test_az_scheduler_one_az_hints(self): net_id = uuidutils.generate_uuid() self._save_networks([net_id]) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id, 'availability_zone_hints': ['az2']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(1, len(agents)) self.assertEqual('az2-host1', agents[0]['host']) def test_az_scheduler_default_az_hints(self): net_id = uuidutils.generate_uuid() cfg.CONF.set_override('default_availability_zones', ['az1']) self._save_networks([net_id]) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id, 'availability_zone_hints': []}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(1, len(agents)) self.assertEqual('az1-host1', agents[0]['host']) def test_az_scheduler_two_az_hints(self): net_id = uuidutils.generate_uuid() cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks([net_id]) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, az='az2') helpers.register_dhcp_agent('az3-host1', networks=5, az='az3') helpers.register_dhcp_agent('az3-host2', networks=6, az='az3') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id, 'availability_zone_hints': ['az1', 'az3']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(2, len(agents)) expected_hosts = set(['az1-host1', 'az3-host1']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_two_az_hints_one_available_az(self): net_id = uuidutils.generate_uuid() cfg.CONF.set_override('dhcp_agents_per_network', 2) self._save_networks([net_id]) helpers.register_dhcp_agent('az1-host1', networks=1, az='az1') helpers.register_dhcp_agent('az1-host2', networks=2, az='az1') helpers.register_dhcp_agent('az2-host1', networks=3, alive=False, az='az2') helpers.register_dhcp_agent('az2-host2', networks=4, admin_state_up=False, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id, 'availability_zone_hints': ['az1', 'az2']}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(2, len(agents)) expected_hosts = set(['az1-host1', 'az1-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def _test_az_scheduler_no_az_hints(self, multiple_agent=False): net_id = uuidutils.generate_uuid() num_agent = 2 if multiple_agent else 1 cfg.CONF.set_override('dhcp_agents_per_network', num_agent) self._save_networks([net_id]) helpers.register_dhcp_agent('az1-host1', networks=2, az='az1') helpers.register_dhcp_agent('az1-host2', networks=3, az='az1') helpers.register_dhcp_agent('az2-host1', networks=2, az='az2') helpers.register_dhcp_agent('az2-host2', networks=1, az='az2') self.plugin.network_scheduler.schedule(self.plugin, self.ctx, {'id': net_id, 'availability_zone_hints': []}) agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, [net_id]) self.assertEqual(num_agent, len(agents)) if multiple_agent: expected_hosts = set(['az1-host1', 'az2-host2']) else: expected_hosts = set(['az2-host2']) hosts = {a['host'] for a in agents} self.assertEqual(expected_hosts, hosts) def test_az_scheduler_no_az_hints_multiple_agent(self): self._test_az_scheduler_no_az_hints(multiple_agent=True) def test_az_scheduler_no_az_hints_one_agent(self): self._test_az_scheduler_no_az_hints() def test_az_scheduler_select_az_with_least_weight(self): self._save_networks([uuidutils.generate_uuid()]) dhcp_agents = [] # Register 6 dhcp agents in 3 AZs, every AZ will have 2 agents. dhcp_agents.append( helpers.register_dhcp_agent('az1-host1', networks=6, az='az1')) dhcp_agents.append( helpers.register_dhcp_agent('az1-host2', networks=5, az='az1')) dhcp_agents.append( helpers.register_dhcp_agent('az2-host1', networks=4, az='az2')) dhcp_agents.append( helpers.register_dhcp_agent('az2-host2', networks=3, az='az2')) dhcp_agents.append( helpers.register_dhcp_agent('az3-host1', networks=2, az='az3')) dhcp_agents.append( helpers.register_dhcp_agent('az3-host2', networks=1, az='az3')) # Try multiple times to verify that the select of AZ scheduler will # output stably. for i in range(3): # Shuffle the agents random.shuffle(dhcp_agents) # Select agents with empty resource_hosted_agents. This means each # AZ will have same amount of agents scheduled (0 in this case) agents_select = self.plugin.network_scheduler.select( self.plugin, self.ctx, dhcp_agents, [], 2) self.assertEqual(2, len(agents_select)) # The agent and az with least weight should always be selected # first self.assertEqual('az3-host2', agents_select[0]['host']) self.assertEqual('az3', agents_select[0]['availability_zone']) # The second selected agent should be the agent with least weight, # which is also not in the same az as the first selected agent. self.assertEqual('az2-host2', agents_select[1]['host']) self.assertEqual('az2', agents_select[1]['availability_zone']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py0000644000175000017500000027215000000000000030106 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import datetime import mock from neutron_lib.api.definitions import l3_ext_ha_mode from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import router_availability_zone from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy import orm import testscenarios import testtools from neutron.db import db_base_plugin_v2 as db_v2 from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import l3_dvr_ha_scheduler_db from neutron.db import l3_dvrscheduler_db from neutron.db import l3_hamode_db from neutron.db import l3_hascheduler_db from neutron.extensions import l3agentscheduler as l3agent from neutron import manager from neutron.objects import agent as agent_obj from neutron.objects import l3_hamode from neutron.objects import l3agent as rb_obj from neutron import quota from neutron.scheduler import l3_agent_scheduler from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit import testlib_api # the below code is required for the following reason # (as documented in testscenarios) """Multiply tests depending on their 'scenarios' attribute. This can be assigned to 'load_tests' in any test module to make this automatically work across tests in the module. """ load_tests = testscenarios.load_tests_apply_scenarios HOST_DVR = 'my_l3_host_dvr' HOST_DVR_SNAT = 'my_l3_host_dvr_snat' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' DEVICE_OWNER_COMPUTE_NOVA = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova' class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler): def schedule(self): pass def _choose_router_agent(self): pass def _choose_router_agents_for_ha(self): pass class FakePortDB(object): def __init__(self, port_list): self._port_list = port_list def _get_query_answer(self, port_list, filters): answers = [] for port in port_list: matched = True for key, search_values in filters.items(): port_value = port.get(key, None) if not port_value: matched = False break if isinstance(port_value, list): sub_answers = self._get_query_answer(port_value, search_values) matched = len(sub_answers) > 0 else: matched = port_value in search_values if not matched: break if matched: answers.append(port) return answers def get_port(self, context, port_id): for port in self._port_list: if port['id'] == port_id: if port['tenant_id'] == context.tenant_id or context.is_admin: return port break return None def get_ports(self, context, filters=None): query_filters = dict() if filters: query_filters.update(filters) if not context.is_admin: query_filters['tenant_id'] = [context.tenant_id] result = self._get_query_answer(self._port_list, query_filters) return result class L3SchedulerBaseTestCase(base.BaseTestCase): def setUp(self): super(L3SchedulerBaseTestCase, self).setUp() self.scheduler = FakeL3Scheduler() self.plugin = mock.Mock() def _test__get_routers_can_schedule(self, routers, agent, target_routers): self.plugin.get_l3_agent_candidates.return_value = agent result = self.scheduler._get_routers_can_schedule( self.plugin, mock.ANY, routers, mock.ANY) self.assertEqual(target_routers, result) def test__get_routers_can_schedule_with_compat_agent(self): routers = [{'id': 'foo_router'}] self._test__get_routers_can_schedule(routers, mock.ANY, routers) def test__get_routers_can_schedule_with_no_compat_agent(self): routers = [{'id': 'foo_router'}] self._test__get_routers_can_schedule(routers, None, []) def test__bind_routers_centralized(self): routers = [{'id': 'foo_router'}] agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) with mock.patch.object(self.scheduler, 'bind_router') as mock_bind: self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent) mock_bind.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router', agent.id) def _test__bind_routers_ha(self, has_binding): routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}] agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) with mock.patch.object(self.scheduler, '_router_has_binding', return_value=has_binding) as mock_has_binding,\ mock.patch.object(self.scheduler, 'create_ha_port_and_bind') as mock_bind: self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent) mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', agent.id) self.assertEqual(not has_binding, mock_bind.called) def test__bind_routers_ha_has_binding(self): self._test__bind_routers_ha(has_binding=True) def test__bind_routers_ha_no_binding(self): self._test__bind_routers_ha(has_binding=False) def test__get_candidates_iterable_on_early_returns(self): plugin = mock.MagicMock() # non-distributed router already hosted plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}] router = {'distributed': False, 'id': 'falafel'} iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router)) # distributed router but no agents router['distributed'] = True plugin.get_l3_agents.return_value = [] iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router)) self.assertFalse(plugin.get_l3_agent_candidates.called) def test__get_candidates_skips_get_l3_agent_candidates_if_dvr_scheduled( self): plugin = mock.MagicMock() # distributed router already hosted plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}] router = {'distributed': True, 'id': uuidutils.generate_uuid()} plugin.get_l3_agents.return_value = ['a1'] self.scheduler._get_candidates(plugin, mock.MagicMock(), router) self.assertFalse(plugin.get_l3_agent_candidates.called) class L3SchedulerBaseMixin(object): def _register_l3_agents(self, plugin=None): self.agent1 = helpers.register_l3_agent( 'host_1', constants.L3_AGENT_MODE_LEGACY) self.agent_id1 = self.agent1.id self.agent2 = helpers.register_l3_agent( 'host_2', constants.L3_AGENT_MODE_LEGACY) self.agent_id2 = self.agent2.id def _register_l3_dvr_agents(self): self.l3_dvr_agent = helpers.register_l3_agent( HOST_DVR, constants.L3_AGENT_MODE_DVR) self.l3_dvr_agent_id = self.l3_dvr_agent.id self.l3_dvr_snat_agent = helpers.register_l3_agent( HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT) self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id def _set_l3_agent_admin_state(self, context, agent_id, state=True): update = {'agent': {'admin_state_up': state}} self.plugin.update_agent(context, agent_id, update) def _set_l3_agent_dead(self, agent_id): update = { 'agent': { 'heartbeat_timestamp': timeutils.utcnow() - datetime.timedelta(hours=1)}} self.plugin.update_agent(self.adminContext, agent_id, update) @contextlib.contextmanager def router_with_ext_gw(self, name='router1', admin_state_up=True, fmt=None, tenant_id=uuidutils.generate_uuid(), external_gateway_info=None, subnet=None, set_context=False, **kwargs): router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) self._add_external_gateway_to_router( router['router']['id'], subnet['subnet']['network_id']) yield router self._remove_external_gateway_from_router( router['router']['id'], subnet['subnet']['network_id']) self._delete('routers', router['router']['id']) class L3SchedulerTestBaseMixin(object): def _test_add_router_to_l3_agent(self, distributed=False, already_scheduled=False, external_gw=None): agent_id = self.agent_id1 agent = self.agent1 if distributed: self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id agent = self.l3_dvr_snat_agent router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw if already_scheduled: self._test_schedule_bind_router(agent, router) with mock.patch.object(self.plugin, "validate_agent_router_combination"),\ mock.patch.object(self.plugin, "create_router_to_agent_binding") as auto_s,\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.plugin.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) self.assertNotEqual(already_scheduled, auto_s.called) def test__unbind_router_removes_binding(self): agent_id = self.agent_id1 agent = self.agent1 router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r1') self._test_schedule_bind_router(agent, router) self.plugin._unbind_router(self.adminContext, router['router']['id'], agent_id) bindings = rb_obj.RouterL3AgentBinding.get_l3_agents_by_router_ids( self.adminContext, [router['router']['id']]) self.assertEqual(0, len(bindings)) def _create_router_for_l3_agent_dvr_test(self, distributed=False, external_gw=None): router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r1') router['router']['distributed'] = distributed router['router']['external_gateway_info'] = external_gw return router def _prepare_l3_agent_dvr_move_exceptions(self, distributed=False, external_gw=None, agent_id=None, expected_exception=None): router = self._create_router_for_l3_agent_dvr_test( distributed=distributed, external_gw=external_gw) with mock.patch.object(self.plugin, "create_router_to_agent_binding"),\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.assertRaises(expected_exception, self.plugin.add_router_to_l3_agent, self.adminContext, agent_id, router['router']['id']) def test__schedule_router_skips_unschedulable_routers(self): mock.patch.object(self.plugin, 'router_supports_scheduling', return_value=False).start() scheduler = l3_agent_scheduler.ChanceScheduler() self.assertIsNone(scheduler._schedule_router(self.plugin, self.adminContext, 'router_id')) def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self): self._register_l3_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.agent_id1, expected_exception=l3agent.RouterL3AgentMismatch) def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.DVRL3CannotAssignToDvrAgent) def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self): self._register_l3_dvr_agents() self._prepare_l3_agent_dvr_move_exceptions( distributed=True, agent_id=self.l3_dvr_agent_id, expected_exception=l3agent.DVRL3CannotAssignToDvrAgent) def test_add_router_to_l3_agent_dvr_to_snat(self): external_gw_info = { "network_id": uuidutils.generate_uuid(), "enable_snat": True } self._register_l3_dvr_agents() agent_id = self.l3_dvr_snat_id router = self._create_router_for_l3_agent_dvr_test( distributed=True, external_gw=external_gw_info) with mock.patch.object(self.plugin, "validate_agent_router_combination"),\ mock.patch.object( self.plugin, "create_router_to_agent_binding") as rtr_agent_binding,\ mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', return_value=router['router']): self.plugin.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) rtr_agent_binding.assert_called_once_with( self.adminContext, mock.ANY, router['router']) def test_add_router_to_l3_agent(self): self._test_add_router_to_l3_agent() def test_add_distributed_router_to_l3_agent(self): external_gw_info = { "network_id": uuidutils.generate_uuid(), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, external_gw=external_gw_info) def test_add_router_to_l3_agent_already_scheduled(self): self._test_add_router_to_l3_agent(already_scheduled=True) def test_add_distributed_router_to_l3_agent_already_scheduled(self): external_gw_info = { "network_id": uuidutils.generate_uuid(), "enable_snat": True } self._test_add_router_to_l3_agent(distributed=True, already_scheduled=True, external_gw=external_gw_info) def test_remove_router_from_l3_agent_in_dvr_mode(self): self._register_l3_dvr_agents() self.assertRaises(l3agent.DVRL3CannotRemoveFromDvrAgent, self.plugin.remove_router_from_l3_agent, self.adminContext, self.l3_dvr_agent_id, mock.ANY) def test_remove_router_from_l3_agent_in_dvr_snat_mode(self): self._register_l3_dvr_agents() router = self._create_router_for_l3_agent_dvr_test( distributed=True) agent_id = self.l3_dvr_snat_id l3_notifier = mock.Mock() self.plugin.agent_notifiers = {constants.AGENT_TYPE_L3: l3_notifier} self.plugin.remove_router_from_l3_agent(self.adminContext, agent_id, router['router']['id']) l3_notifier.router_removed_from_agent.assert_called_once_with( self.adminContext, router['router']['id'], self.l3_dvr_snat_agent.host) def _prepare_schedule_dvr_tests(self): scheduler = l3_agent_scheduler.ChanceScheduler() agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() plugin = mock.Mock() plugin.get_l3_agents_hosting_routers.return_value = [] plugin.get_l3_agents.return_value = [agent] plugin.get_l3_agent_candidates.return_value = [agent] return scheduler, agent, plugin def test_schedule_dvr_router_without_snatbinding_and_no_gw(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True } plugin.get_router.return_value = sync_router with mock.patch.object(scheduler, 'bind_router'),\ mock.patch.object(plugin, 'get_snat_bindings', return_value=False): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def test_schedule_router_distributed(self): scheduler, agent, plugin = self._prepare_schedule_dvr_tests() sync_router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': uuidutils.generate_uuid(), 'enable_snat': True } } plugin.get_router.return_value = sync_router with mock.patch.object(scheduler, 'bind_router'): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( mock.ANY, ['foo_router_id'], admin_state_up=True), mock.call.get_l3_agents(mock.ANY, active=True), mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]), ] plugin.assert_has_calls(expected_calls) def _test_schedule_bind_router(self, agent, router): ctx = self.adminContext scheduler = l3_agent_scheduler.ChanceScheduler() rid = router['router']['id'] scheduler.bind_router(self.plugin, ctx, rid, agent.id) results = rb_obj.RouterL3AgentBinding.get_objects(ctx, router_id=rid) self.assertGreater(len(results), 0) self.assertIn(agent.id, [bind.l3_agent_id for bind in results]) def test_bind_new_router(self): router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r1') with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('is scheduled', args[0]) def test_bind_absent_router(self): scheduler = l3_agent_scheduler.ChanceScheduler() # checking that bind_router() is not throwing # when supplied with router_id of non-existing router scheduler.bind_router(self.plugin, self.adminContext, uuidutils.generate_uuid(), self.agent_id1) def test_bind_existing_router(self): router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') self._test_schedule_bind_router(self.agent1, router) with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog: self._test_schedule_bind_router(self.agent1, router) self.assertEqual(1, flog.call_count) args, kwargs = flog.call_args self.assertIn('has already been scheduled', args[0]) def _check_get_l3_agent_candidates( self, router, agent_list, exp_host, count=1): candidates = self.plugin.get_l3_agent_candidates(self.adminContext, router, agent_list) self.assertEqual(count, len(candidates)) if count: self.assertEqual(exp_host, candidates[0]['host']) def test_get_l3_agent_candidates_legacy(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() agent_list = [self.agent1, self.l3_dvr_agent] # test legacy agent_mode case: only legacy agent should be candidate router['distributed'] = False exp_host = 'host_1' self._check_get_l3_agent_candidates(router, agent_list, exp_host) def test_get_l3_agent_candidates_dvr(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() agent_list = [self.agent1, self.l3_dvr_agent] # test dvr agent_mode case no candidates router['distributed'] = True self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=True) self._check_get_l3_agent_candidates(router, agent_list, None, count=0) def test_get_l3_agent_candidates_dvr_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() agent_list = [self.agent1, self.l3_dvr_agent] router['distributed'] = True # Test no VMs present case self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=False) self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR, count=0) def test_get_l3_agent_candidates_dvr_snat(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=True) self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT) def test_get_l3_agent_candidates_dvr_snat_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() router['distributed'] = True agent_list = [self.l3_dvr_snat_agent] self._check_dvr_serviceable_ports_on_host = mock.Mock( return_value=False) # Test no VMs present case self.get_subnet_ids_on_router = mock.Mock() self._check_dvr_serviceable_ports_on_host.return_value = False self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR_SNAT, count=1) def test_get_l3_agent_candidates_dvr_ha_snat_no_vms(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() router['distributed'] = True router['ha'] = True agent_list = [self.l3_dvr_snat_agent] self.check_ports_exist_on_l3agent = mock.Mock(return_value=False) # Test no VMs present case self.check_ports_exist_on_l3agent.return_value = False self.get_subnet_ids_on_router = mock.Mock(return_value=set()) self._check_get_l3_agent_candidates( router, agent_list, HOST_DVR_SNAT, count=1) def test_get_l3_agent_candidates_centralized(self): self._register_l3_dvr_agents() router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r2') router['external_gateway_info'] = None router['id'] = uuidutils.generate_uuid() # check centralized test case router['distributed'] = False agent_list = [self.l3_dvr_snat_agent] self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT) def test_get_l3_agents_hosting_routers(self): agent = helpers.register_l3_agent('host_6') router = self._make_router(self.fmt, tenant_id=uuidutils.generate_uuid(), name='r1') ctx = self.adminContext router_id = router['router']['id'] self.plugin.router_scheduler.bind_router(self.plugin, ctx, router_id, agent.id) agents = self.plugin.get_l3_agents_hosting_routers(ctx, [router_id]) self.assertEqual([agent.id], [agt.id for agt in agents]) agents = self.plugin.get_l3_agents_hosting_routers(ctx, [router_id], admin_state_up=True) self.assertEqual([agent.id], [agt.id for agt in agents]) self._set_l3_agent_admin_state(ctx, agent.id, False) agents = self.plugin.get_l3_agents_hosting_routers(ctx, [router_id]) self.assertEqual([agent.id], [agt.id for agt in agents]) agents = self.plugin.get_l3_agents_hosting_routers(ctx, [router_id], admin_state_up=True) self.assertEqual([], agents) class L3SchedulerTestCaseMixin(test_l3.L3NatTestCaseMixin, L3SchedulerBaseMixin, L3SchedulerTestBaseMixin): def setUp(self): self.mock_rescheduling = False ext_mgr = test_l3.L3TestExtensionManager() plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr) self.adminContext = n_context.get_admin_context() self.plugin = directory.get_plugin() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin, test_db_base_plugin_v2. NeutronDbPluginV2TestCase): def setUp(self): super(L3AgentChanceSchedulerTestCase, self).setUp() # Removes MissingAuthPlugin exception from logs self.patch_notifier = mock.patch( 'neutron.notifiers.batch_notifier.BatchNotifier._notify') self.patch_notifier.start() def test_random_scheduling(self): random_patch = mock.patch('random.choice') random_mock = random_patch.start() def side_effect(seq): return seq[0] random_mock.side_effect = side_effect with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) self.assertEqual(1, random_mock.call_count) with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(len(agents), 1) self.assertEqual(2, random_mock.call_count) random_patch.stop() def test_scheduler_auto_schedule_when_agent_added(self): self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(0, len(agents)) self._set_l3_agent_admin_state(self.adminContext, self.agent_id1, True) self.plugin.auto_schedule_routers(self.adminContext, 'host_1') agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual('host_1', agents[0]['host']) class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin, test_db_base_plugin_v2. NeutronDbPluginV2TestCase): def setUp(self): super(L3AgentLeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): # disable one agent to force the scheduling to the only one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, False) with self.subnet() as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.router_with_ext_gw(name='r1', subnet=subnet) as r1: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id1 = agents[0]['id'] with self.router_with_ext_gw(name='r2', subnet=subnet) as r2: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id2 = agents[0]['id'] self.assertEqual(agent_id1, agent_id2) # re-enable the second agent to see whether the next router # spawned will be on this one. self._set_l3_agent_admin_state(self.adminContext, self.agent_id2, True) with self.router_with_ext_gw(name='r3', subnet=subnet) as r3: agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r3['router']['id']], admin_state_up=True) self.assertEqual(1, len(agents)) agent_id3 = agents[0]['id'] self.assertNotEqual(agent_id1, agent_id3) class L3DvrScheduler(l3_db.L3_NAT_db_mixin, l3_dvrscheduler_db.L3_DVRsch_db_mixin): pass class L3DvrSchedulerTestCase(L3SchedulerBaseMixin, test_db_base_plugin_v2. NeutronDbPluginV2TestCase): l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): if self.l3_plugin: service_plugins = { 'l3_plugin_name': self.l3_plugin, 'flavors_plugin_name': 'neutron.services.flavors.' 'flavors_plugin.FlavorsPlugin' } else: service_plugins = None super(L3DvrSchedulerTestCase, self).setUp('ml2', service_plugins=service_plugins) self.setup_coreplugin('ml2') self.adminContext = n_context.get_admin_context() self.dut = L3DvrScheduler() self.l3plugin = directory.get_plugin(plugin_constants.L3) def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert( self): port_id = uuidutils.generate_uuid() kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, 'admin_state_up': False, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_owner': DEVICE_OWNER_COMPUTE, }, 'original_port': { 'id': port_id, 'admin_state_up': True, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) l3plugin._get_allowed_address_pair_fixed_ips.return_value = ( ['10.1.0.21']) def test__notify_l3_agent_update_port_with_allowed_address_pairs(self): port_id = uuidutils.generate_uuid() kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'allowed_address_pairs': [ {'ip_address': '10.1.0.201', 'mac_address': 'aa:bb:cc:dd:ee:ff'}], 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'admin_state_up': True, }, 'original_port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'admin_state_up': True, }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self): port_id = 'fake-port' kwargs = { 'context': self.adminContext, 'original_port': { 'id': port_id, portbindings.HOST_ID: '', 'device_owner': '', 'admin_state_up': True, }, 'port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '02:04:05:17:18:19' }, } port = kwargs.get('port') plugin = directory.get_plugin() l3plugin = mock.MagicMock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', plugin, **kwargs) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, port, unbound_migrate=True) def test__notify_l3_agent_update_port_no_removing_routers(self): port_id = 'fake-port' kwargs = { 'context': self.adminContext, 'port': None, 'original_port': { 'id': port_id, portbindings.HOST_ID: 'vm-host', 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '02:04:05:17:18:19' }, 'mac_address_updated': True } plugin = directory.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', plugin, **kwargs) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) self.assertFalse(l3plugin.remove_router_from_l3_agent.called) self.assertFalse(l3plugin.get_dvr_routers_to_remove.called) def test__notify_l3_agent_new_port_action(self): kwargs = { 'context': self.adminContext, 'original_port': None, 'port': { 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_new_port( 'port', 'after_create', mock.ANY, **kwargs) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port')) def test__notify_l3_agent_new_port_no_action(self): kwargs = { 'context': self.adminContext, 'original_port': None, 'port': { 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX + 'None', } } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_new_port( 'port', 'after_create', mock.ANY, **kwargs) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_with_migration_port_profile(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, 'id': uuidutils.generate_uuid() }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, portbindings.PROFILE: {'migrating_to': 'vm-host2'}, }, } l3plugin = mock.MagicMock() directory.add_plugin(plugin_constants.L3, l3plugin) with mock.patch.object(l3plugin, '_get_floatingips_by_port_id', return_value=[]): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port'), dest_host='vm-host2', router_id=None) def test__notify_l3_agent_update_port_no_action(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) self.assertFalse(l3plugin.remove_router_from_l3_agent.called) self.assertFalse(l3plugin.get_dvr_routers_to_remove.called) def test__notify_l3_agent_update_port_with_mac_address_update(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '02:04:05:17:18:19' }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '02:04:05:17:18:29' }, 'mac_address_updated': True } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertFalse(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_with_ip_update(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': [{'ip_address': '1.1.1.1'}], 'mac_address': '02:04:05:17:18:19' }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': [{'ip_address': '2.2.2.2'}], 'mac_address': '02:04:05:17:18:19' }, 'mac_address_updated': False } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertFalse(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_update_port_without_ip_change(self): kwargs = { 'context': self.adminContext, 'original_port': { portbindings.HOST_ID: 'vm-host', 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': [{'ip_address': '1.1.1.1'}], }, 'port': { portbindings.HOST_ID: 'vm-host', 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': [{'ip_address': '1.1.1.1'}], }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) self.assertFalse(l3plugin.dvr_handle_new_service_port.called) def test__notify_l3_agent_port_binding_change(self): self._test__notify_l3_agent_port_binding_change() def test__notify_l3_agent_port_binding_change_removed_routers(self): router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] self._test__notify_l3_agent_port_binding_change(router_to_remove) def test__notify_l3_agent_port_binding_change_removed_routers_fip(self): fip = {'router_id': 'router_id'} router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] self._test__notify_l3_agent_port_binding_change( router_to_remove, fip, router_id=fip['router_id']) def test__notify_l3_agent_port_binding_change_with_fip(self): fip = {'router_id': 'router_id'} self._test__notify_l3_agent_port_binding_change( None, fip, router_id=fip['router_id']) def test__notify_l3_agent_port_binding_change_fip_dvr(self): fip = {'router_id': 'router_id'} is_distributed = True self._test__notify_l3_agent_port_binding_change( None, fip, is_distributed, router_id=fip['router_id']) def test__notify_l3_agent_port_binding_change_fip_dvr_rmrt(self): fip = {'router_id': 'router_id'} router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] is_distributed = True self._test__notify_l3_agent_port_binding_change( router_to_remove, fip, is_distributed, router_id=fip['router_id']) def test__notify_l3_agent_port_binding_change_fip_dvr_on_rmrt(self): fip = {'router_id': 'foo_id'} router_to_remove = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'vm-host1'}] is_distributed = True self._test__notify_l3_agent_port_binding_change( router_to_remove, fip, is_distributed, router_id=fip['router_id']) def _test__notify_l3_agent_port_binding_change(self, routers_to_remove=None, fip=None, is_distributed=False, router_id=None): source_host = 'vm-host1' kwargs = { 'context': self.adminContext, 'original_port': { 'id': uuidutils.generate_uuid(), portbindings.HOST_ID: source_host, 'device_owner': DEVICE_OWNER_COMPUTE, }, 'port': { portbindings.HOST_ID: 'vm-host2', 'device_owner': DEVICE_OWNER_COMPUTE, }, } l3plugin = mock.Mock() directory.add_plugin(plugin_constants.L3, l3plugin) with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove', return_value=routers_to_remove if routers_to_remove else []),\ mock.patch.object(l3plugin, '_get_floatingips_by_port_id', return_value=[fip] if fip else []),\ mock.patch.object(l3_dvr_db, 'is_distributed_router', return_value=is_distributed): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', mock.ANY, **kwargs) if routers_to_remove: (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', source_host)) if fip and is_distributed and not (routers_to_remove and fip['router_id'] is routers_to_remove[0]['router_id']): (l3plugin.l3_rpc_notifier.routers_updated_on_host. assert_called_once_with(mock.ANY, ['router_id'], source_host)) l3plugin.dvr_handle_new_service_port.assert_called_once_with( self.adminContext, kwargs.get('port'), dest_host=None, router_id=router_id) def test__notify_l3_agent_update_port_removing_routers(self): port_id = 'fake-port' source_host = 'vm-host' kwargs = { 'context': self.adminContext, 'port': { 'id': port_id, portbindings.HOST_ID: None, 'device_id': '', 'device_owner': '' }, 'mac_address_updated': False, 'original_port': { 'id': port_id, portbindings.HOST_ID: source_host, 'device_id': 'vm-id', 'device_owner': DEVICE_OWNER_COMPUTE } } plugin = directory.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] directory.add_plugin(plugin_constants.L3, l3plugin) with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove', return_value=[{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': source_host}]),\ mock.patch.object(l3plugin, '_get_floatingips_by_port_id', return_value=[]): l3_dvrscheduler_db._notify_l3_agent_port_update( 'port', 'after_update', plugin, **kwargs) self.assertFalse( l3plugin.dvr_handle_new_service_port.called) (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', source_host)) def test__notify_port_delete(self): plugin = directory.get_plugin() l3plugin = mock.Mock() l3plugin.supported_extension_aliases = [ 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] directory.add_plugin(plugin_constants.L3, l3plugin) port = { 'id': uuidutils.generate_uuid(), 'device_id': 'abcd', 'device_owner': DEVICE_OWNER_COMPUTE_NOVA, portbindings.HOST_ID: 'host1', } kwargs = { 'context': self.adminContext, 'port': port, 'removed_routers': [ {'agent_id': 'foo_agent', 'router_id': 'foo_id'}, ], } removed_routers = [{'agent_id': 'foo_agent', 'router_id': 'foo_id', 'host': 'foo_host'}] l3plugin.get_dvr_routers_to_remove.return_value = removed_routers l3_dvrscheduler_db._notify_port_delete( 'port', 'after_delete', plugin, **kwargs) (l3plugin.l3_rpc_notifier.router_removed_from_agent. assert_called_once_with(mock.ANY, 'foo_id', 'foo_host')) def test_dvr_handle_new_service_port(self): port = { 'id': 'port1', 'device_id': 'abcd', 'device_owner': DEVICE_OWNER_COMPUTE_NOVA, portbindings.HOST_ID: 'host1', 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.3' } ] } dvr_ports = [ { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] }, { 'id': 'dvr_port2', 'device_id': 'r2', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.123' } ] } ] agent_on_host = {'id': 'agent1'} with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=dvr_ports),\ mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' '.L3AgentNotifyAPI'),\ mock.patch.object( self.dut, 'get_l3_agents', return_value=[agent_on_host]) as get_l3_agents: self.dut.dvr_handle_new_service_port( self.adminContext, port) get_l3_agents.assert_called_once_with( self.adminContext, filters={'host': [port[portbindings.HOST_ID]]}) self.dut.l3_rpc_notifier.routers_updated_on_host.\ assert_called_once_with(self.adminContext, {'r1', 'r2'}, 'host1') self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called) def test_get_dvr_routers_by_subnet_ids(self): subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0' dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': subnet_id, 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port', return_value=dvr_port),\ mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]): router_id = self.dut.get_dvr_routers_by_subnet_ids( self.adminContext, [subnet_id]) self.assertEqual(r1['id'], router_id.pop()) def test_get_subnet_ids_on_router(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [ { 'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', 'ip_address': '10.10.10.1' } ] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', return_value=[dvr_port]): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(sub_ids.pop(), dvr_port.get('fixed_ips').pop(0).get('subnet_id')) def test_get_subnet_ids_on_router_no_subnet(self): dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [] } r1 = { 'id': 'r1', 'distributed': True, } with mock.patch.object(db_v2.NeutronDbPluginV2, 'get_ports', return_value=[dvr_port]): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(0, len(sub_ids)) def test__check_dvr_serviceable_ports_on_host(self): # HOST_DVR = 'my_l3_host_dvr' # HOST_DVR_SNAT = 'my_l3_host_dvr_snat' # HOST_DVR is a sub-string of HOST_DVR_SNAT self._register_l3_dvr_agents() host_args = {'admin_state_up': True, portbindings.PROFILE: {'migrating to': HOST_DVR_SNAT}} with self.network() as network: with self.subnet(network=network) as subnet: subnet_ids = [] subnet_ids.append(subnet['subnet']['id']) with self.port(subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE, arg_list=('admin_state_up', portbindings.PROFILE,), **host_args): # Check DVR serviceable ports on HOST_DVR_SNAT. # Should find existence since it is an exact match to the # target host name of the port binding profile. result0 = self.l3plugin. \ _check_dvr_serviceable_ports_on_host(self.adminContext, self.l3_dvr_snat_agent['host'], subnet_ids) # Check DVR serviceable ports on HOST_DVR. # Should not find existence since the sub-string won't get # matched with the target host. result1 = self.l3plugin. \ _check_dvr_serviceable_ports_on_host(self.adminContext, self.l3_dvr_agent['host'], subnet_ids) self.assertTrue(result0) self.assertFalse(result1) def _prepare_schedule_snat_tests(self): agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid()) agent.admin_state_up = True agent.heartbeat_timestamp = timeutils.utcnow() router = { 'id': 'foo_router_id', 'distributed': True, 'external_gateway_info': { 'network_id': uuidutils.generate_uuid(), 'enable_snat': True } } return agent, router class L3HAPlugin(db_v2.NeutronDbPluginV2, l3_hamode_db.L3_HA_NAT_db_mixin, l3_hascheduler_db.L3_HA_scheduler_db_mixin): supported_extension_aliases = [l3_ext_ha_mode.ALIAS, router_availability_zone.ALIAS] @classmethod def get_plugin_type(cls): return plugin_constants.L3 def get_plugin_description(self): return "L3 Routing Service Plugin for testing" class L3HATestCaseMixin(testlib_api.SqlTestCase, L3SchedulerBaseMixin): def setUp(self): super(L3HATestCaseMixin, self).setUp() self.adminContext = n_context.get_admin_context() mock.patch.object(n_rpc, 'get_client').start() self.setup_coreplugin('ml2', load_plugins=False) cfg.CONF.set_override('service_plugins', ['neutron.tests.unit.scheduler.' 'test_l3_agent_scheduler.L3HAPlugin']) cfg.CONF.set_override('max_l3_agents_per_router', 0) manager.init() self.plugin = directory.get_plugin(plugin_constants.L3) self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' ) self._register_l3_agents() make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() @staticmethod def get_router_l3_agent_binding(context, router_id, l3_agent_id=None, binding_index=None): args = {'router_id': router_id} if l3_agent_id: args['l3_agent_id'] = l3_agent_id if binding_index: args['binding_index'] = binding_index return rb_obj.RouterL3AgentBinding.get_objects(context, **args) def _create_ha_router(self, ha=True, tenant_id='tenant1', az_hints=None): self.adminContext.tenant_id = tenant_id router = {'name': 'router1', 'admin_state_up': True, 'tenant_id': tenant_id} if ha is not None: router['ha'] = ha if az_hints is None: az_hints = [] router['availability_zone_hints'] = az_hints return self.plugin.create_router(self.adminContext, {'router': router}) def test_create_ha_port_and_bind_catch_integrity_error(self): router = self._create_ha_router(tenant_id='foo_tenant') self.plugin.schedule_router(self.adminContext, router['id']) agent = {'id': 'foo_agent'} orig_fn = orm.Session.add def db_ref_err_for_add_haportbinding(s, instance): if instance.__class__.__name__ == 'L3HARouterAgentPortBinding': instance.router_id = 'nonexistent_router' return orig_fn(s, instance) with mock.patch.object(self.plugin.router_scheduler, 'bind_router'): with mock.patch.object( orm.Session, 'add', side_effect=db_ref_err_for_add_haportbinding, autospec=True): self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], agent) def test_create_ha_port_and_bind_wont_create_redundant_ports(self): # When migrating from HA to DVR+HA router, create_ha_port_and_bind # should create only one network:router_ha_interface port on a router # when binding to same agent. So we need only one agent for testing # (preferably with dvr_snat mode). agent_obj.Agent.update_objects( self.adminContext, {'admin_state_up': False}) l3_dvr_snat_agent = helpers.register_l3_agent( 'fake_l3_host_dvr_snat', constants.L3_AGENT_MODE_DVR_SNAT) router = self._create_ha_router(tenant_id='foo_tenant') self.plugin.schedule_router(self.adminContext, router['id']) router['admin_state_up'] = False updated_router1 = self.plugin.update_router( self.adminContext, router['id'], {'router': router}) updated_router1['distributed'] = True self.plugin.update_router( self.adminContext, router['id'], {'router': updated_router1}) self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], l3_dvr_snat_agent) filters = {'device_owner': ['network:router_ha_interface'], 'device_id': [router['id']]} self.core_plugin = directory.get_plugin() ports = self.core_plugin.get_ports( self.adminContext, filters=filters) self.assertEqual(1, len(ports)) def test_create_ha_port_and_bind_catch_router_not_found(self): router = self._create_ha_router(tenant_id='foo_tenant') self.plugin.schedule_router(self.adminContext, router['id']) agent = {'id': 'foo_agent'} with mock.patch.object(self.plugin.router_scheduler, 'bind_router'): with mock.patch.object( self.plugin, 'add_ha_port', side_effect=l3_exc.RouterNotFound( router_id='foo_router')),\ mock.patch.object( self.plugin, 'safe_delete_ha_network') as sd_ha_net: self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], agent) self.assertTrue(sd_ha_net.called) def test_create_ha_port_and_bind_bind_router_returns_None(self): router = self._create_ha_router(tenant_id='foo_tenant') agent = {'id': 'foo_agent'} with mock.patch.object(self.plugin.router_scheduler, 'bind_router', return_value=None): with mock.patch.object(self.plugin, 'add_ha_port') as add_ha_port: self.plugin.router_scheduler.create_ha_port_and_bind( self.plugin, self.adminContext, router['id'], router['tenant_id'], agent) self.assertFalse(add_ha_port.called) class VacantBindingIndexTestCase(L3HATestCaseMixin): """Test various scenarios for get_vacant_binding_index(). binding_index The binding_index we want to delete/unschedule. is_manual_scheduling Whether or not this is a scheduling requested by the user (`neutron l3-agent-router-add`) or by some worker (scheduler or RPC from agent). If this is a manual scheduling we should always comply. """ binding_scenarios = [ ('Delete first binding_index', dict(binding_index=1)), ('Delete middle binding_index', dict(binding_index=2)), ('Delete last binding_index', dict(binding_index=3)), ('Do not remove any bindings', dict(binding_index=None)), ] manual_scheduling_scenarios = [ ('with manual scheduling', dict(is_manual_scheduling=True)), ('without manual scheduling', dict(is_manual_scheduling=False)), ] scenarios = testscenarios.multiply_scenarios( binding_scenarios, manual_scheduling_scenarios) def test_get_vacant_binding_index(self): helpers.register_l3_agent('host_3') cfg.CONF.set_override('max_l3_agents_per_router', 3) router = self._create_ha_router() if self.binding_index: bindings = self.get_router_l3_agent_binding( self.adminContext, router['id'], binding_index=self.binding_index) self.assertEqual(1, len(bindings)) bindings[0].delete() vacant_binding_index = self.plugin.get_vacant_binding_index( self.adminContext, router['id'], self.is_manual_scheduling) if self.binding_index: self.assertEqual(self.binding_index, vacant_binding_index) else: if self.is_manual_scheduling: # If this is a manual scheduling, the user requested the # binding so we should always provide a new one. self.assertEqual(cfg.CONF.max_l3_agents_per_router + 1, vacant_binding_index) else: # Else, we already have 3 so -1 is the 'error' value. self.assertEqual(-1, vacant_binding_index) class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3_HA_scheduler_db_mixinTestCase, self)._register_l3_agents(plugin=plugin) self.agent3 = helpers.register_l3_agent(host='host_3') self.agent_id3 = self.agent3.id self.agent4 = helpers.register_l3_agent(host='host_4') self.agent_id4 = self.agent4.id def test_get_routers_l3_agents_count(self): router1 = self._create_ha_router() cfg.CONF.set_override('max_l3_agents_per_router', 2) router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) result = self.plugin.get_routers_l3_agents_count(self.adminContext) self.assertEqual(3, len(result)) check_result = [(router['id'], agents) for router, agents in result] self.assertIn((router1['id'], 4), check_result) self.assertIn((router2['id'], 2), check_result) self.assertIn((router3['id'], 0), check_result) def test_get_ordered_l3_agents_by_num_routers(self): # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_router_updated').start() with mock.patch.object(self.plugin, 'schedule_router'): router1 = self._create_ha_router() router2 = self._create_ha_router() router3 = self._create_ha_router(ha=False) router4 = self._create_ha_router(ha=False) # Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will # host 2, and agent 4 will host 3. self.plugin.schedule_router(self.adminContext, router1['id'], candidates=[self.agent2, self.agent4]) self.plugin.schedule_router(self.adminContext, router2['id'], candidates=[self.agent3, self.agent4]) self.plugin.schedule_router(self.adminContext, router3['id'], candidates=[self.agent3]) self.plugin.schedule_router(self.adminContext, router4['id'], candidates=[self.agent4]) agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3, self.agent_id4] result = self.plugin.get_l3_agents_ordered_by_num_routers( self.adminContext, agent_ids) self.assertEqual(agent_ids, [record['id'] for record in result]) class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin): def _setup_ha_router(self): router = self._create_ha_router() agents = self._get_agents_scheduled_for_router(router) return router, agents def test_reschedule_ha_routers_from_down_agents(self): agents = self._setup_ha_router()[1] self.assertEqual(2, len(agents)) self._set_l3_agent_dead(self.agent_id1) with mock.patch.object(self.plugin, 'reschedule_router') as reschedule: self.plugin.reschedule_routers_from_down_agents() self.assertFalse(reschedule.called) def test_list_l3_agents_hosting_ha_router(self): router = self._create_ha_router() agents = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'] for agent in agents: self.assertEqual('standby', agent['ha_state']) self.plugin.update_routers_states( self.adminContext, {router['id']: 'active'}, self.agent1.host) agents = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'] for agent in agents: expected_state = ('active' if agent['host'] == self.agent1.host else 'standby') self.assertEqual(expected_state, agent['ha_state']) def test_list_l3_agents_hosting_legacy_router(self): router = self._create_ha_router(ha=False) self.plugin.schedule_router(self.adminContext, router['id']) agent = self.plugin.list_l3_agents_hosting_router( self.adminContext, router['id'])['agents'][0] self.assertIsNone(agent['ha_state']) def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self): self.assertEqual({'agents': []}, self.plugin._get_agents_dict_for_router([])) def test_router_doesnt_support_scheduling(self): with mock.patch.object(self.plugin, 'router_supports_scheduling', return_value=False): agent = helpers.register_l3_agent(host='myhost_3') with testtools.ExpectedException( l3agent.RouterDoesntSupportScheduling): self.plugin.add_router_to_l3_agent( self.adminContext, agent.id, 'router_id') def test_manual_add_ha_router_to_agent(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = helpers.register_l3_agent(host='myhost_3') # We allow to exceed max l3 agents per router via manual scheduling self.plugin.add_router_to_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertIn(agent.id, [_agent.id for _agent in agents]) self.assertEqual(3, len(agents)) def test_manual_remove_ha_router_from_agent(self): router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = agents.pop() # Remove router from agent and make sure it is removed self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertEqual(1, len(agents)) self.assertNotIn(agent.id, [_agent.id for _agent in agents]) def test_manual_remove_ha_router_from_all_agents(self): router, agents = self._setup_ha_router() self.assertEqual(2, len(agents)) agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) agents = self._get_agents_scheduled_for_router(router) self.assertEqual(0, len(agents)) def _get_agents_scheduled_for_router(self, router): return self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) def test_delete_ha_interfaces_from_agent(self): router, agents = self._setup_ha_router() agent = agents.pop() self.plugin.remove_router_from_l3_agent( self.adminContext, agent.id, router['id']) objs = l3_hamode.L3HARouterAgentPortBinding.get_objects( self.adminContext, router_id=router['id']) results = [binding.l3_agent_id for binding in objs] self.assertNotIn(agent.id, results) def test_add_ha_interface_to_l3_agent(self): agent = self.plugin.get_agents_db(self.adminContext)[0] router = self._create_ha_router() self.plugin.add_router_to_l3_agent(self.adminContext, agent.id, router['id']) # Verify agent has HA interface ha_ports = self.plugin.get_ha_router_port_bindings(self.adminContext, [router['id']]) self.assertIn(agent.id, [ha_port.l3_agent_id for ha_port in ha_ports]) def test_schedule_routers_unique_binding_indices(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) router = self._create_ha_router() bindings = self.get_router_l3_agent_binding(self.adminContext, router['id']) binding_indices = [binding.binding_index for binding in bindings] self.assertEqual(list(range(1, cfg.CONF.max_l3_agents_per_router + 1)), binding_indices) def test_bind_router_twice_for_non_ha(self): router = self._create_ha_router(ha=False) self.plugin.router_scheduler.bind_router(self.plugin, self.adminContext, router['id'], self.agent_id1) self.plugin.router_scheduler.bind_router(self.plugin, self.adminContext, router['id'], self.agent_id2) # Make sure the second bind_router call didn't schedule the router to # more agents than allowed. agents = self.plugin.get_l3_agents_hosting_routers(self.adminContext, [router['id']]) self.assertEqual(1, len(agents)) # Moreover, make sure that the agent that did get bound, only got bound # once. bindings = self.get_router_l3_agent_binding( self.adminContext, router['id'], l3_agent_id=agents[0]['id']) self.assertEqual(1, len(bindings)) class L3HAChanceSchedulerTestCase(L3HATestCaseMixin): def test_scheduler_with_ha_enabled(self): router = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) for agent in agents: sync_data = self.plugin.get_ha_sync_data_for_host( self.adminContext, router_ids=[router['id']], host=agent.host, agent=agent) self.assertEqual(1, len(sync_data)) interface = sync_data[0][constants.HA_INTERFACE_KEY] self.assertIsNotNone(interface) def test_auto_schedule(self): # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_router_updated').start() router = self._create_ha_router() self.plugin.auto_schedule_routers(self.adminContext, self.agent1.host) self.plugin.auto_schedule_routers(self.adminContext, self.agent2.host) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']]) self.assertEqual(2, len(agents)) def test_auto_schedule_specific_router_when_agent_added(self): self._auto_schedule_when_agent_added(True) def test_auto_schedule_all_routers_when_agent_added(self): self._auto_schedule_when_agent_added(False) def test_auto_schedule_ha_router_when_incompatible_agent_exist(self): handle_internal_only_routers_agent = helpers.register_l3_agent( 'host_3', constants.L3_AGENT_MODE_LEGACY, internal_only=False) router = self._create_ha_router() self.plugin.auto_schedule_routers( self.adminContext, handle_internal_only_routers_agent.host) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) agent_ids = [agent['id'] for agent in agents] self.assertEqual(2, len(agents)) self.assertNotIn(handle_internal_only_routers_agent.id, agent_ids) def test_auto_schedule_ha_router_when_dvr_agent_exist(self): dvr_agent = helpers.register_l3_agent( HOST_DVR, constants.L3_AGENT_MODE_DVR) router = self._create_ha_router() self.plugin.auto_schedule_routers(self.adminContext, dvr_agent.host) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) agent_ids = [agent['id'] for agent in agents] self.assertEqual(2, len(agents)) self.assertNotIn(dvr_agent.id, agent_ids) def _auto_schedule_when_agent_added(self, specific_router): router = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) agent = helpers.register_l3_agent(host='host_3') self.agent_id3 = agent.id self.plugin.auto_schedule_routers(self.adminContext, 'host_3') agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [router['id']], admin_state_up=True) self.assertEqual(3, len(agents)) # Simulate agent restart to make sure we don't try to re-bind self.plugin.auto_schedule_routers(self.adminContext, 'host_3') class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin): def _register_l3_agents(self, plugin=None): super(L3HALeastRoutersSchedulerTestCase, self)._register_l3_agents(plugin=plugin) agent = helpers.register_l3_agent(host='host_3') self.agent_id3 = agent.id agent = helpers.register_l3_agent(host='host_4') self.agent_id4 = agent.id def setUp(self): super(L3HALeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler' ) def test_scheduler(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) # disable the third agent to be sure that the router will # be scheduled of the two firsts self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, False) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, False) r1 = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id1, agent_ids) self.assertIn(self.agent_id2, agent_ids) self._set_l3_agent_admin_state(self.adminContext, self.agent_id3, True) self._set_l3_agent_admin_state(self.adminContext, self.agent_id4, True) r2 = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r2['id']], admin_state_up=True) self.assertEqual(2, len(agents)) agent_ids = [agent['id'] for agent in agents] self.assertIn(self.agent_id3, agent_ids) self.assertIn(self.agent_id4, agent_ids) class TestGetL3AgentsWithFilter(testlib_api.SqlTestCase, L3SchedulerBaseMixin): """Test cases to test get_l3_agents. 6 l3 agents are registered in the order - legacy, dvr_snat, dvr, dvr_no_external, fake_mode and legacy """ scenarios = [ ('no filter', dict(agent_modes=[], host=['host_1'], expected_agent_modes=['legacy', 'dvr_snat', 'dvr', 'dvr_no_external', 'fake_mode', 'legacy'], expected_host=['host_1'])), ('legacy', dict(agent_modes=['legacy'], host=['host_1'], expected_agent_modes=['legacy', 'legacy'], expected_host=['host_1'])), ('dvr_snat', dict(agent_modes=['dvr_snat'], host=['host_2'], expected_agent_modes=['dvr_snat'], expected_host=['host_2'])), ('dvr', dict(agent_modes=['dvr'], host=['host_3'], expected_agent_modes=['dvr'], expected_host=['host_3'])), ('dvr_no_external', dict(agent_modes=['dvr_no_external'], host=['host_4'], expected_agent_modes=['dvr_no_external'], expected_host=['host_4'])), ('dvr_snat and dvr', dict(agent_modes=['dvr_snat', 'dvr'], host=['host_5'], expected_agent_modes=['dvr_snat', 'dvr'], expected_host=['host_5'])), ('dvr_snat and dvr_no_external', dict(agent_modes=['dvr_snat', 'dvr_no_external'], host=['host_5'], expected_agent_modes=['dvr_snat', 'dvr_no_external'], expected_host=['host_5'])), ('dvr_snat, dvr and dvr_no_external', dict(agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'], host=['host_6'], expected_agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'], expected_host=['host_6'])), ('invalid', dict(agent_modes=['invalid'], host=['host_invalid'], expected_agent_modes=[], expected_host=[])), ] def setUp(self): super(TestGetL3AgentsWithFilter, self).setUp() self.plugin = L3HAPlugin() self.setup_coreplugin('ml2') self.adminContext = n_context.get_admin_context() hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_6'] agent_modes = ['legacy', 'dvr_snat', 'dvr', 'dvr_no_external', 'fake_mode', 'legacy'] for host, agent_mode in zip(hosts, agent_modes): helpers.register_l3_agent(host, agent_mode) class TestGetL3AgentsWithAgentModeFilter(TestGetL3AgentsWithFilter): """Test cases to test get_l3_agents 'agent_mode'. This class tests the L3AgentSchedulerDbMixin.get_l3_agents() for the 'agent_mode' filter with various values. """ def _get_agent_mode(self, agent): agent_conf = self.plugin.get_configuration_dict(agent) return agent_conf.get('agent_mode', 'None') def test_get_l3_agents(self): l3_agents = self.plugin.get_l3_agents( self.adminContext, filters={'agent_modes': self.agent_modes}) self.assertEqual(len(self.expected_agent_modes), len(l3_agents)) returned_agent_modes = [self._get_agent_mode(agent) for agent in l3_agents] self.assertItemsEqual(self.expected_agent_modes, returned_agent_modes) class TestGetL3AgentsWithHostFilter(TestGetL3AgentsWithFilter): """Test cases to test get_l3_agents 'hosts'. This class tests the L3AgentSchedulerDbMixin.get_l3_agents() for the 'host' filter with various values. """ def _get_host(self, agent): return agent.get('host', 'None') def test_get_l3_agents(self): l3_agents = self.plugin.get_l3_agents( self.adminContext, filters={'host': self.host}) self.assertEqual(len(self.expected_host), len(l3_agents)) returned_host = [self._get_host(agent) for agent in l3_agents] self.assertEqual(self.expected_host, returned_host) class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin): def setUp(self): super(L3AgentAZLeastRoutersSchedulerTestCase, self).setUp() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler') # Mock scheduling so that the test can control it explicitly mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_router_updated').start() # Removes MissingAuthPlugin exception from logs self.patch_notifier = mock.patch( 'neutron.notifiers.batch_notifier.BatchNotifier._notify') self.patch_notifier.start() def _register_l3_agents(self): self.agent1 = helpers.register_l3_agent(host='az1-host1', az='az1') self.agent2 = helpers.register_l3_agent(host='az1-host2', az='az1') self.agent3 = helpers.register_l3_agent(host='az2-host1', az='az2') self.agent4 = helpers.register_l3_agent(host='az2-host2', az='az2') self.agent5 = helpers.register_l3_agent(host='az3-host1', az='az3') self.agent6 = helpers.register_l3_agent(host='az3-host2', az='az3') def test_az_scheduler_auto_schedule(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.auto_schedule_routers(self.adminContext, 'az1-host2') agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(1, len(agents)) self.assertEqual('az1-host2', agents[0]['host']) def test_az_scheduler_auto_schedule_no_match(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.auto_schedule_routers(self.adminContext, 'az2-host1') agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(0, len(agents)) def test_az_scheduler_default_az(self): cfg.CONF.set_override('default_availability_zones', ['az2']) r1 = self._create_ha_router(ha=False) r2 = self._create_ha_router(ha=False) r3 = self._create_ha_router(ha=False) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az2-host1', 'az2-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_az_hints(self): r1 = self._create_ha_router(ha=False, az_hints=['az3']) r2 = self._create_ha_router(ha=False, az_hints=['az3']) r3 = self._create_ha_router(ha=False, az_hints=['az3']) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az3-host1', 'az3-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test_az_scheduler_least_routers(self): r1 = self._create_ha_router(ha=False, az_hints=['az1']) r2 = self._create_ha_router(ha=False, az_hints=['az1']) r3 = self._create_ha_router(ha=False, az_hints=['az1']) r4 = self._create_ha_router(ha=False, az_hints=['az1']) self.plugin.schedule_router(self.adminContext, r1['id']) self.plugin.schedule_router(self.adminContext, r2['id']) self.plugin.schedule_router(self.adminContext, r3['id']) self.plugin.schedule_router(self.adminContext, r4['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id'], r2['id'], r3['id'], r4['id']]) host_num = collections.defaultdict(int) for agent in agents: host_num[agent['host']] += 1 self.assertEqual(2, host_num['az1-host1']) self.assertEqual(2, host_num['az1-host2']) def test_az_scheduler_ha_az_hints(self): cfg.CONF.set_override('max_l3_agents_per_router', 2) r1 = self._create_ha_router(az_hints=['az1', 'az3']) self.plugin.schedule_router(self.adminContext, r1['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(2, len(agents)) expected_azs = set(['az1', 'az3']) azs = set([a['availability_zone'] for a in agents]) self.assertEqual(expected_azs, azs) def test_az_scheduler_ha_auto_schedule(self): cfg.CONF.set_override('max_l3_agents_per_router', 3) self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'], state=False) self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'], state=False) r1 = self._create_ha_router(az_hints=['az1', 'az3']) self.plugin.schedule_router(self.adminContext, r1['id']) agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(2, len(agents)) hosts = set([a['host'] for a in agents]) self.assertEqual(set(['az1-host1', 'az3-host1']), hosts) self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'], state=True) self.plugin.auto_schedule_routers(self.adminContext, 'az3-host2') agents = self.plugin.get_l3_agents_hosting_routers( self.adminContext, [r1['id']]) self.assertEqual(3, len(agents)) expected_hosts = set(['az1-host1', 'az3-host1', 'az3-host2']) hosts = set([a['host'] for a in agents]) self.assertEqual(expected_hosts, hosts) def test__get_routers_can_schedule_with_no_target_routers(self): result = self.plugin.router_scheduler._get_routers_can_schedule( self.plugin, mock.ANY, [], mock.ANY) self.assertEqual([], result) class L3DVRHAPlugin(db_v2.NeutronDbPluginV2, l3_hamode_db.L3_HA_NAT_db_mixin, l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin): pass class L3DVRHATestCaseMixin(testlib_api.SqlTestCase, L3SchedulerBaseMixin): def setUp(self): super(L3DVRHATestCaseMixin, self).setUp() self.adminContext = n_context.get_admin_context() self.plugin = L3DVRHAPlugin() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/scheduler/test_l3_ovn_scheduler.py0000644000175000017500000002444100000000000027610 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random import mock from neutron.tests import base from neutron.common.ovn import constants as ovn_const from neutron.scheduler import l3_ovn_scheduler class FakeOVNGatewaySchedulerNbOvnIdl(object): def __init__(self, chassis_gateway_mapping, gateway): self.get_all_chassis_gateway_bindings = mock.Mock( return_value=chassis_gateway_mapping['Chassis_Bindings']) self.get_gateway_chassis_binding = mock.Mock( return_value=chassis_gateway_mapping['Gateways'].get(gateway, None)) class FakeOVNGatewaySchedulerSbOvnIdl(object): def __init__(self, chassis_gateway_mapping): self.get_all_chassis = mock.Mock( return_value=chassis_gateway_mapping['Chassis']) class TestOVNGatewayScheduler(base.BaseTestCase): def setUp(self): super(TestOVNGatewayScheduler, self).setUp() # Overwritten by derived classes self.l3_scheduler = None # Used for unit tests self.new_gateway_name = 'lrp_new' self.fake_chassis_gateway_mappings = { 'None': {'Chassis': [], 'Gateways': { 'g1': [ovn_const.OVN_GATEWAY_INVALID_CHASSIS]}}, 'Multiple1': {'Chassis': ['hv1', 'hv2', 'hv3', 'hv4', 'hv5'], 'Gateways': {'g1': ['hv1', 'hv2', 'hv3', 'hv4'], 'g2': ['hv1', 'hv2', 'hv3'], 'g3': ['hv1', 'hv2'], 'g4': ['hv1']}}, 'Multiple2': {'Chassis': ['hv1', 'hv2', 'hv3'], 'Gateways': {'g1': ['hv1'], 'g2': ['hv1'], 'g3': ['hv1']}}, 'Multiple3': {'Chassis': ['hv1', 'hv2', 'hv3'], 'Gateways': {'g1': ['hv3'], 'g2': ['hv2'], 'g3': ['hv2']}}, 'Multiple4': {'Chassis': ['hv1', 'hv2'], 'Gateways': {'g1': ['hv1'], 'g2': ['hv1'], 'g3': ['hv1'], 'g4': ['hv1'], 'g5': ['hv1'], 'g6': ['hv1']}}} # Determine the chassis to gateway list bindings for details in self.fake_chassis_gateway_mappings.values(): self.assertNotIn(self.new_gateway_name, details['Gateways']) details.setdefault('Chassis_Bindings', {}) for chassis in details['Chassis']: details['Chassis_Bindings'].setdefault(chassis, []) for gw, chassis_list in details['Gateways'].items(): for chassis in chassis_list: if chassis in details['Chassis_Bindings']: details['Chassis_Bindings'][chassis].append((gw, 0)) def select(self, chassis_gateway_mapping, gateway_name): nb_idl = FakeOVNGatewaySchedulerNbOvnIdl(chassis_gateway_mapping, gateway_name) sb_idl = FakeOVNGatewaySchedulerSbOvnIdl(chassis_gateway_mapping) return self.l3_scheduler.select(nb_idl, sb_idl, gateway_name) def filter_existing_chassis(self, *args, **kwargs): return self.l3_scheduler.filter_existing_chassis( nb_idl=kwargs.pop('nb_idl'), gw_chassis=kwargs.pop('gw_chassis'), physnet=kwargs.pop('physnet'), chassis_physnets=kwargs.pop('chassis_physnets'), existing_chassis=kwargs.pop('existing_chassis')) class OVNGatewayChanceScheduler(TestOVNGatewayScheduler): def setUp(self): super(OVNGatewayChanceScheduler, self).setUp() self.l3_scheduler = l3_ovn_scheduler.OVNGatewayChanceScheduler() def test_no_chassis_available_for_existing_gateway(self): mapping = self.fake_chassis_gateway_mappings['None'] gateway_name = random.choice(list(mapping['Gateways'].keys())) chassis = self.select(mapping, gateway_name) self.assertEqual([ovn_const.OVN_GATEWAY_INVALID_CHASSIS], chassis) def test_no_chassis_available_for_new_gateway(self): mapping = self.fake_chassis_gateway_mappings['None'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) self.assertEqual([ovn_const.OVN_GATEWAY_INVALID_CHASSIS], chassis) def test_random_chassis_available_for_new_gateway(self): mapping = self.fake_chassis_gateway_mappings['Multiple1'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) self.assertItemsEqual(chassis, mapping.get('Chassis')) def test_filter_existing_chassis(self): # filter_existing_chassis is scheduler independent, but calling # it from Base class didnt seem right. Also, there is no need to have # another test in LeastLoadedScheduler. chassis_physnets = {'temp': ['phys-network-0', 'phys-network-1']} nb_idl = FakeOVNGatewaySchedulerNbOvnIdl( self.fake_chassis_gateway_mappings['None'], 'g1') # Check if invalid chassis is removed self.assertEqual( ['temp'], self.filter_existing_chassis( nb_idl=nb_idl, gw_chassis=["temp"], physnet='phys-network-1', chassis_physnets=chassis_physnets, existing_chassis=['temp', ovn_const.OVN_GATEWAY_INVALID_CHASSIS])) # Check if invalid is removed -II self.assertFalse( self.filter_existing_chassis( nb_idl=nb_idl, gw_chassis=["temp"], physnet='phys-network-1', chassis_physnets=chassis_physnets, existing_chassis=[ovn_const.OVN_GATEWAY_INVALID_CHASSIS])) # Check if chassis removed when physnet doesnt exist self.assertFalse( self.filter_existing_chassis( nb_idl=nb_idl, gw_chassis=["temp"], physnet='phys-network-2', chassis_physnets=chassis_physnets, existing_chassis=['temp'])) # Check if chassis removed when it doesnt exist in gw_chassis # or in chassis_physnets self.assertFalse( self.filter_existing_chassis( nb_idl=nb_idl, gw_chassis=["temp1"], physnet='phys-network-2', chassis_physnets=chassis_physnets, existing_chassis=['temp'])) class OVNGatewayLeastLoadedScheduler(TestOVNGatewayScheduler): def setUp(self): super(OVNGatewayLeastLoadedScheduler, self).setUp() self.l3_scheduler = l3_ovn_scheduler.OVNGatewayLeastLoadedScheduler() def test_no_chassis_available_for_existing_gateway(self): mapping = self.fake_chassis_gateway_mappings['None'] gateway_name = random.choice(list(mapping['Gateways'].keys())) chassis = self.select(mapping, gateway_name) self.assertEqual([ovn_const.OVN_GATEWAY_INVALID_CHASSIS], chassis) def test_no_chassis_available_for_new_gateway(self): mapping = self.fake_chassis_gateway_mappings['None'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) self.assertEqual([ovn_const.OVN_GATEWAY_INVALID_CHASSIS], chassis) def test_least_loaded_chassis_available_for_new_gateway1(self): mapping = self.fake_chassis_gateway_mappings['Multiple1'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) self.assertItemsEqual(chassis, mapping.get('Chassis')) # least loaded will be the first one in the list, # networking-ovn will assign highest priority to this first element self.assertEqual(['hv5', 'hv4', 'hv3', 'hv2', 'hv1'], chassis) def test_least_loaded_chassis_available_for_new_gateway2(self): mapping = self.fake_chassis_gateway_mappings['Multiple2'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) # hv1 will have least priority self.assertEqual(chassis[2], 'hv1') def test_least_loaded_chassis_available_for_new_gateway3(self): mapping = self.fake_chassis_gateway_mappings['Multiple3'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) # least loaded chassis will be in the front of the list self.assertEqual(['hv1', 'hv3', 'hv2'], chassis) def test_least_loaded_chassis_with_rebalance(self): mapping = self.fake_chassis_gateway_mappings['Multiple4'] gateway_name = self.new_gateway_name chassis = self.select(mapping, gateway_name) # least loaded chassis will be in the front of the list self.assertEqual(['hv2', 'hv1'], chassis) def test_existing_chassis_available_for_existing_gateway(self): mapping = self.fake_chassis_gateway_mappings['Multiple1'] gateway_name = random.choice(list(mapping['Gateways'].keys())) chassis = self.select(mapping, gateway_name) self.assertEqual(ovn_const.MAX_GW_CHASSIS, len(chassis)) def test__get_chassis_load_by_prios_several_ports(self): # Adding 5 ports of prio 1 and 5 ports of prio 2 chassis_info = [] for i in range(1, 6): chassis_info.append(('lrp', 1)) chassis_info.append(('lrp', 2)) actual = self.l3_scheduler._get_chassis_load_by_prios(chassis_info) expected = {1: 5, 2: 5} self.assertItemsEqual(expected.items(), actual) def test__get_chassis_load_by_prios_no_ports(self): self.assertFalse(self.l3_scheduler._get_chassis_load_by_prios([])) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/0000755000175000017500000000000000000000000022601 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/__init__.py0000644000175000017500000000000000000000000024700 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/auto_allocate/0000755000175000017500000000000000000000000025415 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/auto_allocate/__init__.py0000644000175000017500000000000000000000000027514 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/auto_allocate/test_db.py0000644000175000017500000003644500000000000027427 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import testtools from neutron_lib.api.definitions import constants as api_const from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from oslo_db import exception as db_exc from oslo_utils import uuidutils from neutron.services.auto_allocate import db from neutron.services.auto_allocate import exceptions from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class AutoAllocateTestCase(testlib_api.SqlTestCase): def setUp(self): super(AutoAllocateTestCase, self).setUp() self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS) self.ctx = context.get_admin_context() self.mixin = db.AutoAllocatedTopologyMixin() self.mixin._l3_plugin = mock.Mock() self.mixin._core_plugin = mock.Mock() def test_ensure_external_network_default_value(self): network_id = uuidutils.generate_uuid() kwargs = { "context": self.ctx, "request": { "id": network_id, api_const.IS_DEFAULT: True }, "network": { "id": network_id, api_const.IS_DEFAULT: False }, "original_network": { "id": network_id, api_const.IS_DEFAULT: False } } network_mock = mock.MagicMock(network_id=network_id, is_default=False) with mock.patch( 'neutron.objects.network.ExternalNetwork.get_objects', return_value=[network_mock] ) as get_external_nets, mock.patch( 'neutron.objects.network.ExternalNetwork.get_object', return_value=network_mock ) as get_external_net: db._ensure_external_network_default_value_callback( "NETWORK", "precommit_update", "test_plugin", **kwargs) get_external_nets.assert_called_once_with( self.ctx, _pager=mock.ANY, is_default=True) get_external_net.assert_called_once_with( self.ctx, network_id=network_id) network_mock.update.assert_called_once_with() def test_ensure_external_network_default_value_no_default(self): network_id = uuidutils.generate_uuid() kwargs = { "context": self.ctx, "request": { "id": network_id, api_const.IS_DEFAULT: None }, "network": { "id": network_id, api_const.IS_DEFAULT: False }, } network_mock = mock.MagicMock(network_id=network_id, is_default=False) with mock.patch( 'neutron.objects.network.ExternalNetwork.get_objects', return_value=[network_mock] ) as get_external_nets, mock.patch( 'neutron.objects.network.ExternalNetwork.get_object', return_value=network_mock ) as get_external_net: db._ensure_external_network_default_value_callback( "NETWORK", "precommit_update", "test_plugin", **kwargs) get_external_nets.assert_not_called() get_external_net.assert_not_called() network_mock.update.assert_not_called() def test_ensure_external_network_default_value_default_not_changed(self): network_id = uuidutils.generate_uuid() kwargs = { "context": self.ctx, "request": { "id": network_id, api_const.IS_DEFAULT: True }, "network": { "id": network_id, api_const.IS_DEFAULT: True }, "original_network": { "id": network_id, api_const.IS_DEFAULT: True } } network_mock = mock.MagicMock(network_id=network_id, is_default=True) with mock.patch( 'neutron.objects.network.ExternalNetwork.get_objects', return_value=[network_mock] ) as get_external_nets, mock.patch( 'neutron.objects.network.ExternalNetwork.get_object', return_value=network_mock ) as get_external_net: db._ensure_external_network_default_value_callback( "NETWORK", "precommit_update", "test_plugin", **kwargs) get_external_nets.assert_called_once_with( self.ctx, _pager=mock.ANY, is_default=True) get_external_net.assert_not_called() network_mock.update.assert_not_called() def test_ensure_external_network_default_value_default_existed(self): network_id = uuidutils.generate_uuid() kwargs = { "context": self.ctx, "request": { "id": network_id, api_const.IS_DEFAULT: True }, "network": { "id": network_id, api_const.IS_DEFAULT: False }, "original_network": { "id": network_id, api_const.IS_DEFAULT: False } } network_mock = mock.MagicMock(network_id='fake_id', is_default=False) with mock.patch( 'neutron.objects.network.ExternalNetwork.get_objects', return_value=[network_mock] ) as get_external_nets, mock.patch( 'neutron.objects.network.ExternalNetwork.get_object', return_value=network_mock ) as get_external_net: self.assertRaises(exceptions.DefaultExternalNetworkExists, db._ensure_external_network_default_value_callback, "NETWORK", "precommit_update", "test_plugin", **kwargs) get_external_nets.assert_called_once_with( self.ctx, _pager=mock.ANY, is_default=True) get_external_net.assert_not_called() network_mock.update.assert_not_called() def test__provision_external_connectivity_expected_cleanup(self): """Test that the right resources are cleaned up.""" subnets = [ {'id': 'subnet_foo_1', 'network_id': 'network_foo'}, {'id': 'subnet_foo_2', 'network_id': 'network_foo'}, ] with mock.patch.object(self.mixin, '_cleanup') as mock_cleanup: self.mixin.l3_plugin.create_router.return_value = ( {'id': 'router_foo'}) self.mixin.l3_plugin.add_router_interface.side_effect = ( n_exc.BadRequest(resource='router', msg='doh!')) self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._provision_external_connectivity, self.ctx, 'ext_net_foo', subnets, 'tenant_foo') # expect no subnets to be unplugged mock_cleanup.assert_called_once_with( self.ctx, network_id='network_foo', router_id='router_foo', subnets=[]) def test__provision_external_connectivity_fail_expected_cleanup(self): """Test that the right resources are cleaned up.""" subnets = [ {'id': 'subnet_foo_1', 'network_id': 'network_foo'}, ] with mock.patch.object(self.mixin, '_cleanup') as mock_cleanup: self.mixin.l3_plugin.create_router.side_effect = ( n_exc.BadRequest(resource='router', msg='doh!')) self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._provision_external_connectivity, self.ctx, 'ext_net_foo', subnets, 'tenant_foo') # expected router_id to be None mock_cleanup.assert_called_once_with( self.ctx, network_id='network_foo', router_id=None, subnets=[]) def test_get_auto_allocated_topology_dry_run_happy_path_for_kevin(self): with mock.patch.object(self.mixin, '_check_requirements') as f: self.mixin.get_auto_allocated_topology( self.ctx, mock.ANY, fields=['dry-run']) self.assertEqual(1, f.call_count) def test_get_auto_allocated_topology_dry_run_bad_input(self): self.assertRaises(n_exc.BadRequest, self.mixin.get_auto_allocated_topology, self.ctx, mock.ANY, fields=['foo']) def test__provision_tenant_private_network_handles_subnet_errors(self): network_id = uuidutils.generate_uuid() self.mixin._core_plugin.create_network.return_value = ( {'id': network_id}) self.mixin._core_plugin.create_subnet.side_effect = ( n_exc.SubnetAllocationError(reason='disaster')) with mock.patch.object(self.mixin, "_get_supported_subnetpools") as f,\ mock.patch.object(self.mixin, "_cleanup") as g: f.return_value = ( [{'ip_version': constants.IP_VERSION_4, 'id': uuidutils.generate_uuid()}]) self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._provision_tenant_private_network, self.ctx, 'foo_tenant') g.assert_called_once_with(self.ctx, network_id) def _test__build_topology(self, method, provisioning_exception): with mock.patch.object(self.mixin, method, side_effect=provisioning_exception), \ mock.patch.object(self.mixin, '_cleanup') as f: self.assertRaises(provisioning_exception.error, self.mixin._build_topology, self.ctx, mock.ANY, 'foo_net') f.assert_called_once_with( self.ctx, network_id=provisioning_exception.network_id, router_id=provisioning_exception.router_id, subnets=provisioning_exception.subnets ) def test__build_topology_provisioning_error_no_toplogy(self): provisioning_exception = exceptions.UnknownProvisioningError( db_exc.DBError) self._test__build_topology( '_provision_tenant_private_network', provisioning_exception) def test__build_topology_provisioning_error_network_only(self): provisioning_exception = exceptions.UnknownProvisioningError( Exception, network_id='foo') self._test__build_topology( '_provision_tenant_private_network', provisioning_exception) def test__build_topology_error_only_network_again(self): provisioning_exception = exceptions.UnknownProvisioningError( AttributeError, network_id='foo') with mock.patch.object(self.mixin, '_provision_tenant_private_network') as f: f.return_value = [{'network_id': 'foo'}] self._test__build_topology( '_provision_external_connectivity', provisioning_exception) def test__build_topology_error_network_with_router(self): provisioning_exception = exceptions.UnknownProvisioningError( KeyError, network_id='foo_n', router_id='foo_r') with mock.patch.object(self.mixin, '_provision_tenant_private_network') as f: f.return_value = [{'network_id': 'foo_n'}] self._test__build_topology( '_provision_external_connectivity', provisioning_exception) def test__build_topology_error_network_with_router_and_interfaces(self): provisioning_exception = exceptions.UnknownProvisioningError( db_exc.DBConnectionError, network_id='foo_n', router_id='foo_r', subnets=[{'id': 'foo_s'}]) with mock.patch.object(self.mixin, '_provision_tenant_private_network') as f,\ mock.patch.object(self.mixin, '_provision_external_connectivity') as g: f.return_value = [{'network_id': 'foo_n'}] g.return_value = {'id': 'foo_r'} self._test__build_topology( '_save', provisioning_exception) def test__save_with_provisioning_error(self): self.mixin._core_plugin.update_network.side_effect = Exception with testtools.ExpectedException( exceptions.UnknownProvisioningError) as e: self.mixin._save(self.ctx, 'foo_t', 'foo_n', 'foo_r', [{'id': 'foo_s'}]) self.assertEqual('foo_n', e.network_id) self.assertEqual('foo_r', e.router_id) self.assertEqual([{'id': 'foo_s'}], e.subnets) def test__provision_external_connectivity_with_provisioning_error(self): self.mixin._l3_plugin.create_router.side_effect = Exception with testtools.ExpectedException( exceptions.UnknownProvisioningError) as e: self.mixin._provision_external_connectivity( self.ctx, 'foo_default', [{'id': 'foo_s', 'network_id': 'foo_n'}], 'foo_tenant') self.assertEqual('foo_n', e.network_id) self.assertIsNone(e.router_id) self.assertIsNone(e.subnets) def test__provision_tenant_private_network_with_provisioning_error(self): self.mixin._core_plugin.create_network.side_effect = Exception with testtools.ExpectedException( exceptions.UnknownProvisioningError) as e: self.mixin._provision_tenant_private_network( self.ctx, 'foo_tenant') self.assertIsNone(e.network_id) def test__check_requirements_fail_on_missing_ext_net(self): self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._check_requirements, self.ctx, 'foo_tenant') def test__check_requirements_fail_on_missing_pools(self): with mock.patch.object( self.mixin, '_get_default_external_network'),\ mock.patch.object( self.mixin, '_get_supported_subnetpools') as g: g.side_effect = n_exc.NotFound() self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._check_requirements, self.ctx, 'foo_tenant') def test__check_requirements_happy_path_for_kevin(self): with mock.patch.object( self.mixin, '_get_default_external_network'),\ mock.patch.object( self.mixin, '_get_supported_subnetpools'): result = self.mixin._check_requirements(self.ctx, 'foo_tenant') expected = {'id': 'dry-run=pass', 'tenant_id': 'foo_tenant'} self.assertEqual(expected, result) def test__cleanup_handles_failures(self): notfound = n_exc.NotFound self.mixin._l3_plugin.remove_router_interface.side_effect = ( notfound) self.mixin._l3_plugin.delete_router.side_effect = ( notfound) self.mixin._core_plugin.delete_network.side_effect = ( notfound) self.mixin._cleanup(self.ctx, network_id=44, router_id=45, subnets=[{'id': 46}]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/conntrack_helper/0000755000175000017500000000000000000000000026122 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/conntrack_helper/__init__.py0000644000175000017500000000000000000000000030221 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/conntrack_helper/test_plugin.py0000644000175000017500000002661300000000000031041 0ustar00coreycorey00000000000000# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import exceptions as lib_exc from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import directory from oslo_config import cfg from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resource_manager from neutron.api.rpc.handlers import resources_rpc from neutron import manager from neutron.objects import conntrack_helper from neutron.services.conntrack_helper.common import exceptions as cth_exc from neutron.services.conntrack_helper import plugin as cth_plugin from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestConntrackHelperPlugin(testlib_api.SqlTestCase): def setUp(self): super(TestConntrackHelperPlugin, self).setUp() with mock.patch.object( resource_manager.ResourceCallbacksManager, '_singleton', new_callable=mock.PropertyMock(return_value=False)): self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() for mgr in (self.cons_mgr, self.prod_mgr): mgr.clear() mock.patch.object( cons_registry, '_get_manager', return_value=self.cons_mgr).start() mock.patch.object( prod_registry, '_get_manager', return_value=self.prod_mgr).start() self.setup_coreplugin(load_plugins=False) mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch('neutron.objects.db.api.get_object').start() # We don't use real models as per mocks above. We also need to mock-out # methods that work with real data types mock.patch( 'neutron.objects.base.NeutronDbObject.modify_fields_from_db' ).start() cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["router", "conntrack_helper"]) manager.init() # TODO(hjensas): Add CONNTRACKHELPER to neutron-lib Well-known # service type constants. self.cth_plugin = directory.get_plugin("CONNTRACKHELPER") self.ctxt = context.Context('admin', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(cth_plugin.Plugin, 'get_router') @mock.patch('neutron.objects.conntrack_helper.ConntrackHelper') def test_create_conntrack_helper(self, mock_conntrack_helper, mock_get_router, mock_push_api): cth_input = { 'conntrack_helper': { 'conntrack_helper': { 'protocol': 'udp', 'port': 69, 'helper': 'tftp'} } } cth_obj = mock.Mock() cth_obj.helper = 'tftp' cth_obj.protocol = 'udp' cth_obj.port = 69 router_obj = mock.Mock() router_obj.id = 'faker-router-id' mock_get_router.return_value = router_obj mock_conntrack_helper.return_value = cth_obj self.cth_plugin.create_router_conntrack_helper( self.ctxt, router_obj.id, **cth_input) mock_conntrack_helper.assert_called_once_with( self.ctxt, **cth_input['conntrack_helper']['conntrack_helper']) self.assertTrue(cth_obj.create.called) mock_push_api.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.CREATED) @mock.patch.object(cth_plugin.Plugin, '_find_existing_conntrack_helper') @mock.patch.object(cth_plugin.Plugin, 'get_router') @mock.patch('neutron.objects.conntrack_helper.ConntrackHelper') def test_negative_create_conntrack_helper(self, mock_conntrack_helper, mock_get_router, mock_find_existing): cth_input = { 'conntrack_helper': { 'protocol': 'udp', 'port': '69', 'helper': 'tftp'} } cth_obj = mock.Mock() router_obj = mock.Mock() router_obj.id = 'faker-router-id' mock_get_router.return_value = router_obj mock_conntrack_helper.return_value = cth_obj cth_obj.create.side_effect = obj_exc.NeutronDbObjectDuplicateEntry( mock.Mock(), mock.Mock()) mock_find_existing.return_value = ('cth_obj', 'conflict_param') self.assertRaises( lib_exc.BadRequest, self.cth_plugin.create_router_conntrack_helper, self.ctxt, router_obj.id, cth_input) @mock.patch.object(cth_plugin.Plugin, '_find_existing_conntrack_helper') @mock.patch.object(cth_plugin.Plugin, 'get_router') @mock.patch('neutron.objects.conntrack_helper.ConntrackHelper') def test_negative_create_helper_not_allowed( self, mock_conntrack_helper, mock_get_router, mock_find_existing): cth_input = { 'conntrack_helper': { 'protocol': 'udp', 'port': 70, 'helper': 'foo'} } cth_obj = mock.Mock() cth_obj.helper = cth_input['conntrack_helper']['helper'] cth_obj.protocol = cth_input['conntrack_helper']['protocol'] cth_obj.port = cth_input['conntrack_helper']['port'] router_obj = mock.Mock() router_obj.id = 'faker-router-id' mock_get_router.return_value = router_obj mock_conntrack_helper.return_value = cth_obj self.assertRaises( cth_exc.ConntrackHelperNotAllowed, self.cth_plugin.create_router_conntrack_helper, self.ctxt, router_obj.id, cth_input) @mock.patch.object(cth_plugin.Plugin, '_find_existing_conntrack_helper') @mock.patch.object(cth_plugin.Plugin, 'get_router') @mock.patch('neutron.objects.conntrack_helper.ConntrackHelper') def test_negative_create_helper_invalid_proto_for_helper( self, mock_conntrack_helper, mock_get_router, mock_find_existing): cth_input = { 'conntrack_helper': { 'protocol': 'tcp', 'port': 69, 'helper': 'tftp'} } cth_obj = mock.Mock() cth_obj.helper = cth_input['conntrack_helper']['helper'] cth_obj.protocol = cth_input['conntrack_helper']['protocol'] cth_obj.port = cth_input['conntrack_helper']['port'] router_obj = mock.Mock() router_obj.id = 'faker-router-id' mock_get_router.return_value = router_obj mock_conntrack_helper.return_value = cth_obj self.assertRaises( cth_exc.InvalidProtocolForHelper, self.cth_plugin.create_router_conntrack_helper, self.ctxt, router_obj.id, cth_input) @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_update_conntrack_helper(self, mock_cth_get_object, mock_rpc_push): cth_input = { 'conntrack_helper': { 'conntrack_helper': { 'protocol': 'udp', 'port': 69, 'helper': 'tftp'} } } cth_obj = mock.Mock() cth_obj.helper = 'tftp' cth_obj.protocol = 'udp' mock_cth_get_object.return_value = cth_obj self.cth_plugin.update_router_conntrack_helper( self.ctxt, 'cth_id', mock.ANY, **cth_input) mock_cth_get_object.assert_called_once_with(self.ctxt, id='cth_id') self.assertTrue(cth_obj.update_fields) self.assertTrue(cth_obj.update) mock_rpc_push.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.UPDATED) @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_negative_update_conntrack_helper(self, mock_cth_get_object): cth_input = { 'conntrack_helper': { 'conntrack_helper': { 'protocol': 'udp', 'port': 69, 'helper': 'tftp'} } } mock_cth_get_object.return_value = None self.assertRaises( cth_exc.ConntrackHelperNotFound, self.cth_plugin.update_router_conntrack_helper, self.ctxt, 'cth_id', mock.ANY, **cth_input) @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_get_conntrack_helper(self, get_object_mock): self.cth_plugin.get_router_conntrack_helper( self.ctxt, 'cth_id', mock.ANY, fields=None) get_object_mock.assert_called_once_with(self.ctxt, id='cth_id') @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_negative_get_conntrack_helper(self, get_object_mock): get_object_mock.return_value = None self.assertRaises( cth_exc.ConntrackHelperNotFound, self.cth_plugin.get_router_conntrack_helper, self.ctxt, 'cth_id', mock.ANY, fields=None) @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_objects') def test_get_conntrack_helpers(self, get_objects_mock): self.cth_plugin.get_router_conntrack_helpers(self.ctxt) get_objects_mock.assert_called_once_with(self.ctxt, _pager=mock.ANY, router_id=None) @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_delete_conntrack_helper(self, get_object_mock, mock_rpc_push): cth_obj = mock.Mock(id='cth_id', router_id='fake-router', protocol='udp', port=69, helper='tftp') get_object_mock.return_value = cth_obj self.cth_plugin.delete_router_conntrack_helper(self.ctxt, 'cth_id', mock.ANY) cth_obj.delete.assert_called() mock_rpc_push.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.DELETED) @mock.patch.object(conntrack_helper.ConntrackHelper, 'get_object') def test_negative_delete_conntrack_helper(self, get_object_mock): get_object_mock.return_value = None self.assertRaises(cth_exc.ConntrackHelperNotFound, self.cth_plugin.delete_router_conntrack_helper, self.ctxt, 'cth_id', mock.ANY) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/0000755000175000017500000000000000000000000024517 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/__init__.py0000644000175000017500000000000000000000000026616 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/service_providers/0000755000175000017500000000000000000000000030254 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/service_providers/__init__.py0000644000175000017500000000000000000000000032353 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/service_providers/test_driver_contro0000644000175000017500000002700000000000000034114 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as lib_exc from neutron_lib.plugins import constants as p_cons from neutron_lib.plugins import directory from oslo_utils import uuidutils import testtools from neutron.services.l3_router.service_providers import driver_controller from neutron.services import provider_configuration from neutron.tests import base from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestDriverController(testlib_api.SqlTestCase): def setUp(self): super(TestDriverController, self).setUp() self.setup_coreplugin(DB_PLUGIN_KLASS) self.fake_l3 = mock.Mock() self.dc = driver_controller.DriverController(self.fake_l3) self.fake_l3.l3_driver_controller = self.dc self.ctx = context.get_admin_context() def _return_provider_for_flavor(self, provider): self.dc._flavor_plugin_ref = mock.Mock() self.dc._flavor_plugin_ref.get_flavor.return_value = {'id': 'abc'} provider = {'provider': provider} self.dc._flavor_plugin_ref.get_flavor_next_provider.return_value = [ provider] def test_uses_scheduler(self): self._return_provider_for_flavor('dvrha') router_db = mock.Mock() flavor_id = uuidutils.generate_uuid() router_id = uuidutils.generate_uuid() router = dict(id=router_id, flavor_id=flavor_id) self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, self.ctx, router, router_db) self.assertTrue(self.dc.uses_scheduler(self.ctx, router_id)) self.dc.drivers['dvrha'].use_integrated_agent_scheduler = False self.assertFalse(self.dc.uses_scheduler(self.ctx, router_id)) def test_driver_owns_router(self): self._return_provider_for_flavor('dvrha') router_db = mock.Mock() flavor_id = uuidutils.generate_uuid() r1 = uuidutils.generate_uuid() r2 = uuidutils.generate_uuid() router = dict(id=r1, flavor_id=flavor_id) self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, self.ctx, router, router_db) self.assertTrue(self.dc.drivers['dvrha'].owns_router(self.ctx, r1)) self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r1)) self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r2)) self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, None)) @mock.patch('neutron_lib.callbacks.registry.publish') def test__set_router_provider_flavor_specified(self, mock_cb): self._return_provider_for_flavor('dvrha') router_db = mock.Mock() flavor_id = uuidutils.generate_uuid() router_id = uuidutils.generate_uuid() router = dict(id=router_id, flavor_id=flavor_id) self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, self.ctx, router, router_db) mock_cb.assert_called_with(resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY, payload=mock.ANY) payload = mock_cb.mock_calls[0][2]['payload'] self.assertEqual(router, payload.request_body) self.assertEqual(router_db, payload.latest_state) self.assertEqual(flavor_id, router_db.flavor_id) self.assertEqual(self.dc.drivers['dvrha'], self.dc.get_provider_for_router(self.ctx, router_id)) def test__update_router_provider_invalid(self): test_dc = driver_controller.DriverController(self.fake_l3) with mock.patch.object(registry, "publish") as mock_cb: with mock.patch.object(test_dc, "get_provider_for_router"): with mock.patch.object( driver_controller, "_ensure_driver_supports_request") as _ensure: _ensure.side_effect = lib_exc.InvalidInput( error_message='message') self.assertRaises( lib_exc.InvalidInput, test_dc._update_router_provider, None, None, None, payload=events.DBEventPayload( None, request_body={'name': 'testname'}, states=({'flavor_id': 'old_fid'},))) mock_cb.assert_not_called() def test__update_router_provider_with_flags(self): test_dc = driver_controller.DriverController(self.fake_l3) with mock.patch.object(registry, "publish"): with mock.patch.object(test_dc, "get_provider_for_router"): with mock.patch.object( driver_controller, "_ensure_driver_supports_request") as _ensure: _ensure.side_effect = lib_exc.InvalidInput( error_message='message') with mock.patch( "neutron.services.l3_router.service_providers." "driver_controller.LOG.debug") as mock_log: self.assertRaises( lib_exc.InvalidInput, test_dc._update_router_provider, None, None, None, payload=events.DBEventPayload( None, request_body={'name': 'testname', 'distributed': False}, states=({'flavor_id': None, 'distributed': True, 'ha': False},))) # To validate that the 'ha' attribute of the router # stays unchanged from the previous state while # updating 'distributed' from True to False. mock_log.assert_any_call( "Get a provider driver handle based on the ha " "flag: %(ha_flag)s and distributed flag: " "%(distributed_flag)s", {'ha_flag': False, 'distributed_flag': False}) @mock.patch('neutron_lib.callbacks.registry.publish') def test__set_router_provider_attr_lookups(self, mock_cb): # ensure correct drivers are looked up based on attrs router_id1 = uuidutils.generate_uuid() router_id2 = uuidutils.generate_uuid() router_id3 = uuidutils.generate_uuid() router_id4 = uuidutils.generate_uuid() router_id5 = uuidutils.generate_uuid() router_id6 = uuidutils.generate_uuid() router_id7 = uuidutils.generate_uuid() router_id8 = uuidutils.generate_uuid() router_id9 = uuidutils.generate_uuid() cases = [ ('dvrha', dict(id=router_id1, distributed=True, ha=True)), ('dvr', dict(id=router_id2, distributed=True, ha=False)), ('ha', dict(id=router_id3, distributed=False, ha=True)), ('single_node', dict(id=router_id4, distributed=False, ha=False)), ('ha', dict(id=router_id5, ha=True, distributed=constants.ATTR_NOT_SPECIFIED)), ('dvr', dict(id=router_id6, distributed=True, ha=constants.ATTR_NOT_SPECIFIED)), ('single_node', dict(id=router_id7, ha=False, distributed=constants.ATTR_NOT_SPECIFIED)), ('single_node', dict(id=router_id8, distributed=False, ha=constants.ATTR_NOT_SPECIFIED)), ('single_node', dict(id=router_id9, distributed=constants.ATTR_NOT_SPECIFIED, ha=constants.ATTR_NOT_SPECIFIED)), ] for driver, body in cases: self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, self.ctx, body, mock.Mock()) mock_cb.assert_called_with( resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY, payload=mock.ANY) self.assertEqual(self.dc.drivers[driver], self.dc.get_provider_for_router(self.ctx, body['id']), 'Expecting %s for body %s' % (driver, body)) @mock.patch('neutron_lib.callbacks.registry.publish') def test__clear_router_provider(self, mock_cb): # ensure correct drivers are looked up based on attrs router_id1 = uuidutils.generate_uuid() body = dict(id=router_id1, distributed=True, ha=True) self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, self.ctx, body, mock.Mock()) mock_cb.assert_called_with(resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY, payload=mock.ANY) payload = mock_cb.mock_calls[0][2]['payload'] self.assertEqual(self.ctx, payload.context) self.assertIn('old_driver', payload.metadata) self.assertIn('new_driver', payload.metadata) self.assertIsNotNone(payload.latest_state) self.assertEqual(self.dc.drivers['dvrha'], self.dc.get_provider_for_router(self.ctx, body['id'])) self.dc._clear_router_provider('router', 'PRECOMMIT_DELETE', self, self.ctx, body['id']) mock_cb.assert_called_with(resources.ROUTER_CONTROLLER, events.PRECOMMIT_DELETE_ASSOCIATIONS, mock.ANY, payload=mock.ANY) with testtools.ExpectedException(ValueError): # if association was cleared, get_router will be called self.fake_l3.get_router.side_effect = ValueError self.dc.get_provider_for_router(self.ctx, body['id']) mock_cb.assert_called_with(resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY, payload=mock.ANY) def test__flavor_plugin(self): directory.add_plugin(p_cons.FLAVORS, mock.Mock()) _dc = driver_controller.DriverController(self.fake_l3) self.assertEqual( directory.get_plugin(p_cons.FLAVORS), _dc._flavor_plugin) class Test_LegacyPlusProviderConfiguration(base.BaseTestCase): @mock.patch.object(provider_configuration.ProviderConfiguration, "add_provider") def test__update_router_provider_invalid(self, mock_method): mock_method.side_effect = lib_exc.Invalid(message='message') driver_controller._LegacyPlusProviderConfiguration() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/l3_router/test_l3_router_plugin.py0000644000175000017500000000225000000000000031423 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.services.l3_router import l3_router_plugin as lrp from neutron.tests import base class TestL3PluginDvrConditional(base.BaseTestCase): def _test_dvr_alias_exposed(self, enabled): cfg.CONF.set_override('enable_dvr', enabled) plugin = lrp.L3RouterPlugin() exposed = 'dvr' in plugin.supported_extension_aliases self.assertEqual(enabled, exposed) def test_dvr_alias_exposed_enabled(self): self._test_dvr_alias_exposed(enabled=True) def test_dvr_alias_exposed_disabled(self): self._test_dvr_alias_exposed(enabled=False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/0000755000175000017500000000000000000000000024054 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/__init__.py0000644000175000017500000000000000000000000026153 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/0000755000175000017500000000000000000000000025152 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/__init__.py0000644000175000017500000000000000000000000027251 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.479046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/l3/0000755000175000017500000000000000000000000025470 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/l3/__init__.py0000644000175000017500000000000000000000000027567 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/l3/test_base.py0000644000175000017500000001205200000000000030013 0ustar00coreycorey00000000000000# Copyright 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import l3_extension from neutron_lib import context from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api from neutron.agent.l3 import router_info as l3router from neutron.api.rpc.callbacks import events from neutron.services.logapi.agent.l3 import base as l3_base from neutron.services.logapi.agent import log_extension as log_ext from neutron.tests.unit.agent.l3 import test_agent _uuid = uuidutils.generate_uuid class FakeLogDriver(log_ext.LoggingDriver): SUPPORTED_LOGGING_TYPES = ['fake_resource'] def initialize(self, resource_rpc, **kwargs): pass def start_logging(self, context, **kwargs): pass def stop_logging(self, context, **kwargs): pass class FakeL3LoggingExtension(l3_base.L3LoggingExtensionBase, l3_extension.L3AgentExtension): def initialize(self, connection, driver_type): pass class L3LoggingExtBaseTestCase(test_agent.BasicRouterOperationsFramework): def setUp(self): super(L3LoggingExtBaseTestCase, self).setUp() self.agent = l3_agent.L3NATAgent('test_host', self.conf) self.context = context.get_admin_context() self.connection = mock.Mock() self.ex_gw_port = {'id': _uuid()} self.router = {'id': _uuid(), 'gw_port': self.ex_gw_port, 'ha': False, 'distributed': False} self.router_info = l3router.RouterInfo(self.agent, _uuid(), self.router, **self.ri_kwargs) self.router_info.ex_gw_port = self.ex_gw_port self.agent.router_info[self.router['id']] = self.router_info def _mock_get_router_info(router_id): return self.router_info self.get_router_info = mock.patch( 'neutron.agent.l3.l3_agent_extension_api.' 'L3AgentExtensionAPI.get_router_info').start() self.get_router_info.side_effect = _mock_get_router_info self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None) mock.patch( 'neutron.manager.NeutronManager.load_class_for_provider').start() class TestL3LoggingExtBase(L3LoggingExtBaseTestCase): def setUp(self): super(TestL3LoggingExtBase, self).setUp() self.agent_ext = FakeL3LoggingExtension() self.agent_ext.consume_api(self.agent_api) self.log_driver = mock.Mock() log_driver_object = FakeLogDriver() self.log_driver.defer_apply.side_effect = log_driver_object.defer_apply self.agent_ext.log_driver = self.log_driver def test__handle_notification_passes_update_events_enabled_log(self): log_obj = mock.Mock() log_obj.enabled = True self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.UPDATED) self.assertTrue(self.log_driver.start_logging.called) def test__handle_notification_passes_update_events_disabled_log(self): log_obj = mock.Mock() log_obj.enabled = False self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.UPDATED) self.assertTrue(self.log_driver.stop_logging.called) def test__handle_notification_passes_create_events(self): log_obj = mock.Mock() self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.CREATED) self.assertTrue(self.log_driver.start_logging.called) def test__handle_notification_passes_delete_events(self): log_obj = mock.Mock() self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.DELETED) self.assertTrue(self.log_driver.stop_logging.called) def test_add_router(self): self.agent_ext.add_router(self.context, self.router) self.log_driver.start_logging.assert_called_once_with( self.context, router_info=self.router_info) def test_update_router(self): self.agent_ext.update_router(self.context, self.router) self.log_driver.start_logging.assert_called_once_with( self.context, router_info=self.router_info) def test_delete_router(self): router_delete = {'id': _uuid()} self.agent_ext.delete_router(self.context, router_delete) self.log_driver.stop_logging.assert_called_once_with( self.context, router_info=router_delete) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/agent/test_log_extension.py0000644000175000017500000001242300000000000031442 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from oslo_utils import uuidutils from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import ( ovs_bridge) from neutron.services.logapi.agent import log_extension as log_ext from neutron.tests import base class FakeLogDriver(log_ext.LoggingDriver): SUPPORTED_LOGGING_TYPES = ['security_group'] def initialize(self, resource_rpc, **kwargs): pass def start_logging(self, context, **kwargs): pass def stop_logging(self, context, **kwargs): pass class LoggingExtensionBaseTestCase(base.BaseTestCase): def setUp(self): super(LoggingExtensionBaseTestCase, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.agent_ext = log_ext.LoggingExtension() self.context = context.get_admin_context() self.connection = mock.Mock() os_ken_app = mock.Mock() agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge('br-int', os_ken_app=os_ken_app), ovs_bridge.OVSAgentBridge('br-tun', os_ken_app=os_ken_app), {'physnet1': ovs_bridge.OVSAgentBridge( 'br-physnet1', os_ken_app=os_ken_app)}) self.agent_ext.consume_api(agent_api) mock.patch( 'neutron.manager.NeutronManager.load_class_for_provider').start() class LoggingExtensionTestCase(LoggingExtensionBaseTestCase): def setUp(self): super(LoggingExtensionTestCase, self).setUp() self.agent_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.log_driver = mock.Mock() log_driver_object = FakeLogDriver() self.log_driver.defer_apply.side_effect = log_driver_object.defer_apply self.agent_ext.log_driver = self.log_driver def _create_test_port_dict(self, device_owner): return {'port_id': uuidutils.generate_uuid(), 'device_owner': device_owner} def test__handle_notification_passes_update_events_enabled_log(self): log_obj = mock.Mock() log_obj.enabled = True self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.UPDATED) self.assertTrue(self.log_driver.start_logging.called) def test__handle_notification_passes_update_events_disabled_log(self): log_obj = mock.Mock() log_obj.enabled = False self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.UPDATED) self.assertTrue(self.log_driver.stop_logging.called) def test__handle_notification_passes_create_events(self): log_obj = mock.Mock() self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.CREATED) self.assertTrue(self.log_driver.start_logging.called) def test__handle_notification_passes_delete_events(self): log_obj = mock.Mock() self.agent_ext._handle_notification( self.context, 'log', [log_obj], events.DELETED) self.assertTrue(self.log_driver.stop_logging.called) def test_handle_port_vm(self): port = self._create_test_port_dict(device_owner='compute:nova') self.agent_ext.handle_port(self.context, port) self.assertTrue(self.log_driver.start_logging.called) def test_handle_not_port_vm(self): port = self._create_test_port_dict( device_owner='network:router_interface') self.agent_ext.handle_port(self.context, port) self.assertFalse(self.log_driver.start_logging.called) class LoggingExtensionInitializeTestCase(LoggingExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): self.agent_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) for resource_type in self.agent_ext.SUPPORTED_RESOURCE_TYPES] ) subscribe_mock.assert_called_with(mock.ANY, resources.LOGGING_RESOURCE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/base.py0000644000175000017500000000305300000000000025341 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resource_manager from neutron.tests.unit import testlib_api class BaseLogTestCase(testlib_api.SqlTestCase): def setUp(self): super(BaseLogTestCase, self).setUp() with mock.patch.object( resource_manager.ResourceCallbacksManager, '_singleton', new_callable=mock.PropertyMock(return_value=False)): self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() for mgr in (self.cons_mgr, self.prod_mgr): mgr.clear() mock.patch.object( cons_registry, '_get_manager', return_value=self.cons_mgr).start() mock.patch.object( prod_registry, '_get_manager', return_value=self.prod_mgr).start() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/0000755000175000017500000000000000000000000025344 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/__init__.py0000644000175000017500000000000000000000000027443 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/test_db_api.py0000644000175000017500000003377600000000000030213 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as const from neutron_lib import context from neutron_lib.services.logapi import constants as log_const from neutron_lib.utils import net as net_utils from oslo_utils import uuidutils from neutron.objects.logapi import logging_resource as log_object from neutron.services.logapi.common import db_api from neutron.services.logapi.common import validators from neutron.services.logapi.rpc import server as server_rpc from neutron.tests.unit.extensions import test_securitygroup as test_sg def _create_log(tenant_id, resource_id=None, target_id=None, event='ALL', enabled=True,): log_data = { 'id': uuidutils.generate_uuid(), 'name': 'test', 'resource_type': 'security_group', 'project_id': tenant_id, 'event': event, 'enabled': enabled} if resource_id: log_data['resource_id'] = resource_id if target_id: log_data['target_id'] = target_id return log_object.Log(**log_data) class LoggingDBApiTestCase(test_sg.SecurityGroupDBTestCase): def setUp(self): super(LoggingDBApiTestCase, self).setUp() self.context = context.get_admin_context() self.sg_id, self.port_id, self.tenant_id = self._create_sg_and_port() self.context.tenant_id = self.tenant_id def _create_sg_and_port(self): with self.network() as network, \ self.subnet(network), \ self.security_group() as sg: sg_id = sg['security_group']['id'] tenant_id = sg['security_group']['tenant_id'] res = self._create_port( self.fmt, network['network']['id'], security_groups=[sg_id]) ports_rest = self.deserialize(self.fmt, res) port_id = ports_rest['port']['id'] return sg_id, port_id, tenant_id def test_get_logs_bound_port(self): log = _create_log(target_id=self.port_id, tenant_id=self.tenant_id) with mock.patch.object(log_object.Log, 'get_objects', return_value=[log]): self.assertEqual( [log], db_api.get_logs_bound_port(self.context, self.port_id)) # Test get log objects with required resource type calls = [mock.call(self.context, project_id=self.tenant_id, resource_type=log_const.SECURITY_GROUP, enabled=True)] log_object.Log.get_objects.assert_has_calls(calls) def test_get_logs_not_bound_port(self): fake_sg_id = uuidutils.generate_uuid() log = _create_log(resource_id=fake_sg_id, tenant_id=self.tenant_id) with mock.patch.object(log_object.Log, 'get_objects', return_value=[log]): self.assertEqual( [], db_api.get_logs_bound_port(self.context, self.port_id)) # Test get log objects with required resource type calls = [mock.call(self.context, project_id=self.tenant_id, resource_type=log_const.SECURITY_GROUP, enabled=True)] log_object.Log.get_objects.assert_has_calls(calls) def test_get_logs_bound_sg(self): log = _create_log(resource_id=self.sg_id, tenant_id=self.tenant_id) with mock.patch.object(log_object.Log, 'get_objects', return_value=[log]): self.assertEqual( [log], db_api.get_logs_bound_sg(self.context, self.sg_id)) # Test get log objects with required resource type calls = [mock.call(self.context, project_id=self.tenant_id, resource_type=log_const.SECURITY_GROUP, enabled=True)] log_object.Log.get_objects.assert_has_calls(calls) def test_get_logs_not_bound_sg(self): with self.network() as network, \ self.subnet(network), \ self.security_group() as sg: sg2_id = sg['security_group']['id'] res = self._create_port( self.fmt, network['network']['id'], security_groups=[sg2_id]) port2_id = self.deserialize(self.fmt, res)['port']['id'] log = _create_log(target_id=port2_id, tenant_id=self.tenant_id) with mock.patch.object(log_object.Log, 'get_objects', return_value=[log]): self.assertEqual( [], db_api.get_logs_bound_sg(self.context, self.sg_id)) # Test get log objects with required resource type calls = [mock.call(self.context, project_id=self.tenant_id, resource_type=log_const.SECURITY_GROUP, enabled=True)] log_object.Log.get_objects.assert_has_calls(calls) def test__get_ports_being_logged(self): log1 = _create_log(target_id=self.port_id, tenant_id=self.tenant_id) log2 = _create_log(resource_id=self.sg_id, tenant_id=self.tenant_id) log3 = _create_log(target_id=self.port_id, resource_id=self.tenant_id, tenant_id=self.tenant_id) log4 = _create_log(tenant_id=self.tenant_id) with mock.patch.object( validators, 'validate_log_type_for_port', return_value=True): ports_log1 = db_api._get_ports_being_logged(self.context, log1) ports_log2 = db_api._get_ports_being_logged(self.context, log2) ports_log3 = db_api._get_ports_being_logged(self.context, log3) ports_log4 = db_api._get_ports_being_logged(self.context, log4) self.assertEqual([self.port_id], ports_log1) self.assertEqual([self.port_id], ports_log2) self.assertEqual([self.port_id], ports_log3) self.assertEqual([self.port_id], ports_log4) def test__get_ports_being_logged_not_supported_log_type(self): log = _create_log(tenant_id=self.tenant_id) with mock.patch.object( validators, 'validate_log_type_for_port', return_value=False): ports_log = db_api._get_ports_being_logged(self.context, log) self.assertEqual([], ports_log) class LoggingRpcCallbackTestCase(test_sg.SecurityGroupDBTestCase): def setUp(self): super(LoggingRpcCallbackTestCase, self).setUp() self.context = context.get_admin_context() self.rpc_callback = server_rpc.LoggingApiSkeleton() def test_get_sg_log_info_for_create_or_update_log(self): with self.network() as network, \ self.subnet(network), \ self.security_group() as sg: sg_id = sg['security_group']['id'] tenant_id = sg['security_group']['tenant_id'] rule1 = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '22', '22', ) rule2 = self._build_security_group_rule( sg_id, 'egress', const.PROTO_NAME_TCP, remote_ip_prefix='10.0.0.1', ) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} self._create_security_group_rule(self.fmt, rules) res = self._create_port( self.fmt, network['network']['id'], security_groups=[sg_id]) ports_rest = self.deserialize(self.fmt, res) port_id = ports_rest['port']['id'] log = _create_log(resource_id=sg_id, tenant_id=tenant_id) with mock.patch.object( server_rpc, 'get_rpc_method', return_value=server_rpc.get_sg_log_info_for_log_resources ): with mock.patch.object(validators, 'validate_log_type_for_port', return_value=True): ports_log = ( self.rpc_callback.get_sg_log_info_for_log_resources( self.context, resource_type=log_const.SECURITY_GROUP, log_resources=[log]) ) expected = [{ 'event': log.event, 'id': log.id, 'ports_log': [{ 'port_id': port_id, 'security_group_rules': [ {'direction': 'egress', 'ethertype': u'IPv4', 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': u'IPv6', 'security_group_id': sg_id}, {'direction': 'ingress', 'ethertype': u'IPv4', 'port_range_max': 22, 'port_range_min': 22, 'protocol': u'tcp', 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': u'IPv4', 'protocol': u'tcp', 'dest_ip_prefix': net_utils.AuthenticIPNetwork( '10.0.0.1/32'), 'security_group_id': sg_id}] }], 'project_id': tenant_id }] self.assertEqual(expected, ports_log) self._delete('ports', port_id) def test_get_sg_log_info_for_port_added_event(self): with self.network() as network, \ self.subnet(network), \ self.security_group() as sg: sg_id = sg['security_group']['id'] tenant_id = sg['security_group']['tenant_id'] rule1 = self._build_security_group_rule( sg_id, 'ingress', const.PROTO_NAME_TCP, '11', '13', remote_ip_prefix='10.0.0.1', ) rule2 = self._build_security_group_rule( sg_id, 'egress', const.PROTO_NAME_ICMP, ) rules = { 'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} self._create_security_group_rule(self.fmt, rules) res = self._create_port( self.fmt, network['network']['id'], security_groups=[sg_id], tenant_id=tenant_id ) ports_rest = self.deserialize(self.fmt, res) port_id = ports_rest['port']['id'] log = _create_log(tenant_id=tenant_id) with mock.patch.object( log_object.Log, 'get_objects', return_value=[log]): with mock.patch.object( server_rpc, 'get_rpc_method', return_value=server_rpc.get_sg_log_info_for_port ): with mock.patch.object( validators, 'validate_log_type_for_port', return_value=True): ports_log = ( self.rpc_callback.get_sg_log_info_for_port( self.context, resource_type=log_const.SECURITY_GROUP, port_id=port_id) ) expected = [{ 'event': log.event, 'id': log.id, 'ports_log': [{ 'port_id': port_id, 'security_group_rules': [ {'direction': 'egress', 'ethertype': u'IPv4', 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': u'IPv6', 'security_group_id': sg_id}, {'direction': 'ingress', 'ethertype': u'IPv4', 'port_range_max': 13, 'port_range_min': 11, 'protocol': u'tcp', 'source_ip_prefix': net_utils.AuthenticIPNetwork( '10.0.0.1/32'), 'security_group_id': sg_id}, {'direction': 'egress', 'ethertype': u'IPv4', 'protocol': u'icmp', 'security_group_id': sg_id}] }], 'project_id': tenant_id }] self.assertEqual(expected, ports_log) self._delete('ports', port_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/test_sg_callback.py0000644000175000017500000000441300000000000031204 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron.services.logapi.common import sg_callback from neutron.services.logapi.drivers import base as log_driver_base from neutron.services.logapi.drivers import manager as driver_mgr from neutron.tests import base FAKE_DRIVER = None class FakeDriver(log_driver_base.DriverBase): @staticmethod def create(): return FakeDriver( name='fake_driver', vif_types=[], vnic_types=[], supported_logging_types=['security_group'], requires_rpc=True ) def fake_register(): global FAKE_DRIVER if not FAKE_DRIVER: FAKE_DRIVER = FakeDriver.create() driver_mgr.register(resources.SECURITY_GROUP_RULE, sg_callback.SecurityGroupRuleCallBack) class TestSecurityGroupRuleCallback(base.BaseTestCase): def setUp(self): super(TestSecurityGroupRuleCallback, self).setUp() self.driver_manager = driver_mgr.LoggingServiceDriverManager() @mock.patch.object(sg_callback.SecurityGroupRuleCallBack, 'handle_event') def test_handle_event(self, mock_sg_cb): fake_register() self.driver_manager.register_driver(FAKE_DRIVER) registry.notify( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, mock.ANY) mock_sg_cb.assert_called_once_with( resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, mock.ANY) mock_sg_cb.reset_mock() registry.notify('fake_resource', events.AFTER_DELETE, mock.ANY) mock_sg_cb.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/test_sg_validate.py0000644000175000017500000001154300000000000031243 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.plugins import directory from oslo_utils import importutils from sqlalchemy.orm import exc as orm_exc from neutron.objects import ports from neutron.objects import securitygroup as sg_object from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import validators from neutron.tests import base class FakePlugin(object): def __init__(self): self.validator_mgr = validators.ResourceValidateRequest.get_instance() self.supported_logging_types = ['security_group'] class TestSGLogRequestValidations(base.BaseTestCase): """Test validation for a request""" def setUp(self): self.log_plugin = FakePlugin() importutils.import_module('neutron.services.logapi.common.sg_validate') super(TestSGLogRequestValidations, self).setUp() def test_validate_request_resource_id_not_exists(self): log_data = {'resource_type': 'security_group', 'resource_id': 'fake_sg_id'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(sg_object.SecurityGroup, 'count', return_value=0): self.assertRaises( log_exc.ResourceNotFound, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_target_id_not_exists(self): log_data = {'resource_type': 'security_group', 'target_id': 'fake_port_id'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(ports.Port, 'get_object', return_value=None): self.assertRaises( log_exc.TargetResourceNotFound, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_unsupported_logging_type(self): log_data = {'resource_type': 'security_group', 'target_id': 'fake_port_id'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(ports.Port, 'get_object', return_value=mock.ANY): with mock.patch.object(validators, 'validate_log_type_for_port', return_value=False): self.assertRaises( log_exc.LoggingTypeNotSupported, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_invalid_resource_constraint(self): log_data = {'resource_type': 'security_group', 'resource_id': 'fake_sg_id', 'target_id': 'fake_port_id'} class FakeFiltered(object): def one(self): raise orm_exc.NoResultFound class FakeSGPortBinding(object): def filter_by(self, security_group_id, port_id): return FakeFiltered() with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object( sg_object.SecurityGroup, 'count', return_value=1): with mock.patch.object( ports.Port, 'get_object', return_value=mock.ANY): with mock.patch.object(validators, 'validate_log_type_for_port', return_value=True): with mock.patch('neutron.db._utils.model_query', return_value=FakeSGPortBinding()): self.assertRaises( log_exc.InvalidResourceConstraint, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/test_snat_validate.py0000644000175000017500000001165000000000000031576 0ustar00coreycorey00000000000000# Copyright (c) 2018 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.plugins import directory from oslo_utils import importutils from neutron.objects import router as router_obj from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import validators from neutron.tests import base class FakePlugin(object): def __init__(self): self.validator_mgr = validators.ResourceValidateRequest.get_instance() self.supported_logging_types = ['snat'] class TestSnatLogRequestValidations(base.BaseTestCase): """Test validation for SNAT log request""" def setUp(self): self.log_plugin = FakePlugin() importutils.import_module('neutron.services.logapi.common.' 'snat_validate') super(TestSnatLogRequestValidations, self).setUp() def test_validate_request_resource_id_not_specific(self): log_data = {'resource_type': 'snat'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(router_obj.Router, 'get_object', return_value=mock.ANY): self.assertRaises( log_exc.ResourceIdNotSpecified, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_resource_id_not_exists(self): log_data = {'resource_type': 'snat', 'resource_id': 'fake_router_id'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(router_obj.Router, 'get_object', return_value=None): self.assertRaises( log_exc.ResourceNotFound, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_with_disable_events(self): log_data_1 = {'resource_type': 'snat', 'resource_id': 'fake_router_id_1', 'event': 'ACCEPT'} log_data_2 = {'resource_type': 'snat', 'resource_id': 'fake_router_id_2', 'event': 'DROP'} with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(router_obj.Router, 'get_object', return_value=mock.ANY): self.assertRaises( log_exc.EventsDisabled, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data_1) self.assertRaises( log_exc.EventsDisabled, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data_2) def test_validate_request_with_snat_disable(self): log_data = {'resource_type': 'snat', 'resource_id': 'fake_router_id'} f_router = mock.Mock() f_router.enable_snat = False with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(router_obj.Router, 'get_object', return_value=f_router): self.assertRaises( log_exc.RouterNotEnabledSnat, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) def test_validate_request_with_not_set_gw_port(self): log_data = {'resource_type': 'snat', 'resource_id': 'fake_router_id'} f_router = mock.Mock() f_router.enable_snat = True f_router.gw_port_id = None with mock.patch.object(directory, 'get_plugin', return_value=self.log_plugin): with mock.patch.object(router_obj.Router, 'get_object', return_value=f_router): self.assertRaises( log_exc.RouterGatewayNotSet, self.log_plugin.validator_mgr.validate_request, mock.ANY, log_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/common/test_validators.py0000644000175000017500000001131100000000000031122 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from oslo_utils import uuidutils from neutron.objects import ports from neutron.services.logapi.common import validators from neutron.tests import base from neutron.tests.unit.services.logapi.drivers import ( test_manager as drv_mgr) class TestRegisterValidateMethods(base.BaseTestCase): def setUp(self): self.validator_mgr = validators.ResourceValidateRequest.get_instance() super(TestRegisterValidateMethods, self).setUp() def test_register_validate_method(self): self.validator_mgr.validate_methods.clear() resource_type = 'fake_resource' @validators.ResourceValidateRequest.register(resource_type) def fake_method(): pass self.assertEqual({'fake_resource': fake_method}, self.validator_mgr.validate_methods_map) def test_get_validated_method(self): @validators.ResourceValidateRequest.register('fake_resource') def fake_method(): pass actual = self.validator_mgr.get_validated_method('fake_resource') self.assertEqual(fake_method, actual) class TestLogDriversLoggingTypeValidations(drv_mgr.TestLogDriversManagerBase): """Test validation of logging type for a port""" def setUp(self): super(TestLogDriversLoggingTypeValidations, self).setUp() self.ctxt = context.Context('fake_user', 'fake_tenant') def _get_port(self, vif_type, vnic_type): port_id = uuidutils.generate_uuid() port_binding = ports.PortBinding( self.ctxt, port_id=port_id, vif_type=vif_type, vnic_type=vnic_type) return ports.Port( self.ctxt, id=uuidutils.generate_uuid(), bindings=[port_binding]) def _test_validate_log_type_for_port(self, port, expected_result): driver_manager = self._create_manager_with_drivers({ 'driver-A': { 'is_loaded': True, 'supported_logging_types': ['security_group'], 'vif_types': [portbindings.VIF_TYPE_OVS], 'vnic_types': [portbindings.VNIC_NORMAL] } }) is_log_type_supported_mock = mock.Mock() if expected_result: is_log_type_supported_mock.return_value = expected_result log_driver = list(driver_manager.drivers)[0] log_driver.is_logging_type_supported = ( is_log_type_supported_mock ) class FakeLoggingPlugin(object): def __init__(self): self.driver_manager = driver_manager directory.add_plugin(plugin_const.LOG_API, FakeLoggingPlugin()) self.assertEqual( expected_result, validators.validate_log_type_for_port('security_group', port)) if expected_result: is_log_type_supported_mock.assert_called_once_with( 'security_group') else: is_log_type_supported_mock.assert_not_called() def test_validate_log_type_for_port_vif_type_supported(self): port = self._get_port( portbindings.VIF_TYPE_OVS, portbindings.VNIC_NORMAL) self._test_validate_log_type_for_port( port, expected_result=True) def test_validate_log_type_for_port_vif_type_not_supported(self): port = self._get_port( portbindings.VIF_TYPE_OTHER, portbindings.VNIC_NORMAL) self._test_validate_log_type_for_port( port, expected_result=False) def test_validate_log_type_for_port_unbound_vnic_type_supported(self): port = self._get_port( portbindings.VIF_TYPE_UNBOUND, portbindings.VNIC_NORMAL) self._test_validate_log_type_for_port( port, expected_result=True) def test_validate_log_type_for_port_unbound_vnic_type_not_supported(self): port = self._get_port( portbindings.VIF_TYPE_UNBOUND, portbindings.VNIC_BAREMETAL) self._test_validate_log_type_for_port( port, expected_result=False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/0000755000175000017500000000000000000000000025532 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/__init__.py0000644000175000017500000000000000000000000027631 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/openvswitch/0000755000175000017500000000000000000000000030103 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000032202 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/openvswitch/test_ovs_firewall_log.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/openvswitch/test_ovs_firewall_l0000644000175000017500000003060600000000000034101 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from oslo_config import cfg from oslo_utils import uuidutils from neutron.objects.logapi import logging_resource as log_object from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.drivers.openvswitch \ import ovs_firewall_log as ovsfw_log from neutron.services.logapi.rpc import agent as agent_rpc from neutron.tests import base from neutron.tests import tools COOKIE_ID = uuidutils.generate_uuid() PORT_ID = uuidutils.generate_uuid() PROJECT_ID = uuidutils.generate_uuid() ACTION = tools.get_random_security_event() LOG_ID = uuidutils.generate_uuid() SG_ID = uuidutils.generate_uuid() REMOTE_SG_ID = uuidutils.generate_uuid() FakeSGLogInfo = [ { 'id': LOG_ID, 'ports_log': [{'port_id': PORT_ID, 'security_group_rules': [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_TCP, 'direction': constants.INGRESS_DIRECTION, 'port_range_min': 123, 'port_range_max': 123, 'security_group_id': SG_ID}, {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_UDP, 'direction': constants.EGRESS_DIRECTION, 'security_group_id': SG_ID}, {'ethertype': constants.IPv6, 'protocol': constants.PROTO_NAME_TCP, 'remote_group_id': REMOTE_SG_ID, 'direction': constants.EGRESS_DIRECTION, 'security_group_id': SG_ID} ]}], 'event': 'ALL', 'project_id': PROJECT_ID, } ] def set_log_driver_config(ctrl_rate_limit, ctrl_burst_limit): cfg.CONF.set_override('rate_limit', ctrl_rate_limit, group='network_log') cfg.CONF.set_override('burst_limit', ctrl_burst_limit, group='network_log') class TestCookie(base.BaseTestCase): def setUp(self): super(TestCookie, self).setUp() self.cookie = ovsfw_log.Cookie(COOKIE_ID, PORT_ID, ACTION, PROJECT_ID) self.cookie.log_object_refs = set([LOG_ID]) def test_add_log_object_refs(self): new_log_id = uuidutils.generate_uuid() expected = set([LOG_ID, new_log_id]) self.cookie.add_log_obj_ref(new_log_id) self.assertEqual(expected, self.cookie.log_object_refs) def test_removed_log_object_ref(self): expected = set() self.cookie.remove_log_obj_ref(LOG_ID) self.assertEqual(expected, self.cookie.log_object_refs) def test_is_empty(self): self.cookie.remove_log_obj_ref(LOG_ID) result = self.cookie.is_empty self.assertTrue(result) class FakeOVSPort(object): def __init__(self, name, port, mac): self.port_name = name self.ofport = port self.vif_mac = mac class TestOVSFirewallLoggingDriver(base.BaseTestCase): def setUp(self): super(TestOVSFirewallLoggingDriver, self).setUp() self.log_driver = ovsfw_log.OVSFirewallLoggingDriver(mock.Mock()) resource_rpc_mock = mock.patch.object( agent_rpc, 'LoggingApiStub', autospec=True).start() self.log_driver.start_logapp = mock.Mock() self.log_driver.initialize(resource_rpc_mock) self.log_driver.SUPPORTED_LOGGING_TYPES = ['security_group'] self.mock_bridge = self.log_driver.int_br self.mock_bridge.reset_mock() self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00') self.mock_bridge.br.get_vif_port_by_id.return_value = \ self.fake_ovs_port log_data = { 'context': None, 'name': 'test1', 'id': LOG_ID, 'project_id': PROJECT_ID, 'event': 'ALL', 'resource_type': 'security_group' } self.log_resource = log_object.Log(**log_data) @property def port_ofport(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport @property def port_mac(self): return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac def test_initialize_bridge(self): br = self.log_driver.initialize_bridge(self.mock_bridge) self.assertEqual(self.mock_bridge.deferred.return_value, br) def test_set_controller_rate_limit(self): set_log_driver_config(100, 25) self.log_driver.initialize_bridge(self.mock_bridge) expected_calls = [mock.call.set_controller_rate_limit(100), mock.call.set_controller_burst_limit(25)] self.mock_bridge.assert_has_calls(expected_calls) def test_generate_cookie(self): cookie_id = self.log_driver.generate_cookie( PORT_ID, ACTION, LOG_ID, PROJECT_ID) cookie = self.log_driver._get_cookie_by_id(cookie_id) self.assertIn(cookie, self.log_driver.cookies_table) def test__get_cookie_by_id_not_found(self): cookie_id = uuidutils.generate_uuid() cookie = ovsfw_log.Cookie(cookie_id=uuidutils.generate_uuid(), port=PORT_ID, action=ACTION, project=PROJECT_ID) self.log_driver.cookies_table = set([cookie]) self.assertRaises(log_exc.CookieNotFound, self.log_driver._get_cookie_by_id, cookie_id) def test_start_log_with_update_or_create_log_event(self): context = mock.Mock() log_data = {'log_resources': [self.log_resource]} self.log_driver.resource_rpc.get_sg_log_info_for_log_resources.\ return_value = FakeSGLogInfo self.log_driver.start_logging(context, **log_data) accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT') drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP') conj_id = self.log_driver.conj_id_map.get_conj_id( SG_ID, REMOTE_SG_ID, constants.EGRESS_DIRECTION, constants.IPv6) add_rules = [ # log ingress tcp port=123 mock.call( actions='controller', cookie=accept_cookie.id, reg5=self.port_ofport, dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_TCP, priority=77, table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, tcp_dst='0x007b'), # log egress tcp6 mock.call( actions='resubmit(,%d),controller' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE), cookie=accept_cookie.id, reg5=self.port_ofport, dl_type="0x{:04x}".format(constants.ETHERTYPE_IPV6), priority=70, reg7=conj_id + 1, table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE), # log egress udp mock.call( actions='resubmit(,%d),controller' % ( ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE), cookie=accept_cookie.id, reg5=self.port_ofport, dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_UDP, priority=77, table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, ), # log drop mock.call( actions='controller', cookie=drop_cookie.id, priority=53, reg5=self.port_ofport, table=ovs_consts.DROPPED_TRAFFIC_TABLE, ) ] self.mock_bridge.br.add_flow.assert_has_calls( add_rules, any_order=True) def test_stop_log_with_delete_log_event(self): context = mock.Mock() log_data = {'log_resources': [self.log_resource]} self.log_driver.resource_rpc.get_sg_log_info_for_log_resources.\ return_value = FakeSGLogInfo self.log_driver.start_logging(context, **log_data) accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT') drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP') self.mock_bridge.reset_mock() self.log_driver.stop_logging(context, **log_data) delete_rules = [ # delete drop flow mock.call( table=ovs_consts.DROPPED_TRAFFIC_TABLE, cookie=drop_cookie.id ), # delete accept flows mock.call( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, cookie=accept_cookie.id ), mock.call( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, cookie=accept_cookie.id ) ] self.mock_bridge.br.delete_flows.assert_has_calls( delete_rules, any_order=True) def test_start_log_with_add_port_event(self): context = mock.Mock() log_data = {'port_id': PORT_ID} self.log_driver.resource_rpc.get_sg_log_info_for_port.return_value = \ [ { 'id': uuidutils.generate_uuid(), 'ports_log': [{'port_id': PORT_ID, 'security_group_rules': [ {'ethertype': constants.IPv4, 'protocol': constants.PROTO_NAME_TCP, 'direction': constants.INGRESS_DIRECTION, 'port_range_min': 123, 'port_range_max': 123, 'security_group_id': 456}]}], 'event': 'ACCEPT', 'project_id': PROJECT_ID, } ] self.log_driver.start_logging(context, **log_data) accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT') add_rules = [ # log ingress tcp port=123 mock.call( actions='controller', cookie=accept_cookie.id, reg5=self.port_ofport, dl_type="0x{:04x}".format(constants.ETHERTYPE_IP), nw_proto=constants.PROTO_NUM_TCP, priority=77, table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, tcp_dst='0x007b') ] self.mock_bridge.br.add_flow.assert_has_calls( add_rules, any_order=True) def test_stop_log_with_delete_port_event(self): context = mock.Mock() log_data = {'port_id': PORT_ID} # add port self.log_driver.resource_rpc.get_sg_log_info_for_port.return_value = \ FakeSGLogInfo self.log_driver.start_logging(context, **log_data) accept_cookie = self.log_driver._get_cookie(PORT_ID, 'ACCEPT') drop_cookie = self.log_driver._get_cookie(PORT_ID, 'DROP') self.mock_bridge.reset_mock() # delete port self.log_driver.stop_logging( context, port_id=PORT_ID) delete_rules = [ # delete accept flows mock.call( table=ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE, cookie=accept_cookie.id ), mock.call( table=ovs_consts.ACCEPTED_EGRESS_TRAFFIC_TABLE, cookie=accept_cookie.id ), # delete drop flow mock.call( table=ovs_consts.DROPPED_TRAFFIC_TABLE, cookie=drop_cookie.id ), ] self.mock_bridge.br.delete_flows.assert_has_calls( delete_rules, any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/test_base.py0000644000175000017500000000375700000000000030071 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron.services.logapi.drivers import base as log_base_driver from neutron.tests import base SUPPORTED_LOGGING_TYPES = ['security_group'] class FakeDriver(log_base_driver.DriverBase): @staticmethod def create(): return FakeDriver( name='fake_driver', vif_types=[portbindings.VIF_TYPE_OVS], vnic_types=[portbindings.VNIC_NORMAL], supported_logging_types=SUPPORTED_LOGGING_TYPES, requires_rpc=False ) class TestDriverBase(base.BaseTestCase): def setUp(self): super(TestDriverBase, self).setUp() self.driver = FakeDriver.create() def test_is_vif_type_compatible(self): self.assertFalse( self.driver.is_vif_type_compatible(portbindings.VIF_TYPE_OTHER)) self.assertTrue( self.driver.is_vif_type_compatible(portbindings.VIF_TYPE_OVS)) def test_is_vnic_compatible(self): self.assertFalse( self.driver.is_vnic_compatible(portbindings.VNIC_BAREMETAL)) self.assertTrue( self.driver.is_vnic_compatible(portbindings.VNIC_NORMAL)) def test_is_logging_type_supported(self): self.assertTrue( self.driver.is_logging_type_supported('security_group')) self.assertFalse(self.driver.is_logging_type_supported('firewall')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/drivers/test_manager.py0000644000175000017500000001713600000000000030565 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib import exceptions from neutron_lib import fixture from neutron_lib.services.logapi import constants as log_const from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.drivers import base as log_driver_base from neutron.services.logapi.drivers import manager as driver_mgr from neutron.tests import tools from neutron.tests.unit.services.logapi import base class TestGetParameter(base.BaseLogTestCase): def test__get_param_missing_parameter(self): kwargs = {'context': mock.sentinel.context} self.assertRaises(log_exc.LogapiDriverException, driver_mgr._get_param, args=[], kwargs=kwargs, name='log_obj', index=1) self.assertRaises(log_exc.LogapiDriverException, driver_mgr._get_param, args=[mock.sentinel.context], kwargs={}, name='log_obj', index=1) self.assertRaises(log_exc.LogapiDriverException, driver_mgr._get_param, args=[], kwargs={'log_obj': mock.sentinel.log_obj}, name='context', index=0) class TestLogDriversManagerBase(base.BaseLogTestCase): def setUp(self): super(TestLogDriversManagerBase, self).setUp() self.config_parse() self.setup_coreplugin(load_plugins=False) @staticmethod def _create_manager_with_drivers(drivers_details): for name, driver_details in drivers_details.items(): class LogDriver(log_driver_base.DriverBase): @property def is_loaded(self): return driver_details['is_loaded'] LogDriver(name, driver_details.get('vif_types', []), driver_details.get('vnic_types', []), driver_details.get('supported_logging_types', [])) return driver_mgr.LoggingServiceDriverManager() class TestLogDriversManagerMulti(TestLogDriversManagerBase): """Test calls happen to all drivers""" def test_driver_manager_empty_with_no_drivers(self): driver_manager = self._create_manager_with_drivers({}) self.assertEqual(0, len(driver_manager.drivers)) def test_driver_manager_empty_with_no_loaded_drivers(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': False}}) self.assertEqual(0, len(driver_manager.drivers)) def test_driver_manager_with_one_loaded_driver(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}}) self.assertEqual(1, len(driver_manager.drivers)) def test_driver_manager_with_two_loaded_drivers(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}, 'driver-B': {'is_loaded': True}}) self.assertEqual(2, len(driver_manager.drivers)) class TestLogDriversManagerLoggingTypes(TestLogDriversManagerBase): """Test supported logging types""" def test_available_logging_types(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True, 'supported_logging_types': ['security_group']}, 'driver-B': {'is_loaded': True, 'supported_logging_types': ['security_group', 'firewall']} }) self.assertEqual(set(['security_group', 'firewall']), driver_manager.supported_logging_types) class TestLogDriversCalls(TestLogDriversManagerBase): """Test log driver calls""" def setUp(self): super(TestLogDriversCalls, self).setUp() self.driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}}) def test_implemented_call_methods(self): for method in log_const.LOG_CALL_METHODS: with mock.patch.object(log_driver_base.DriverBase, method) as \ method_fnc: context = mock.sentinel.context log_obj = mock.sentinel.log_obj self.driver_manager.call( method, context=context, log_objs=[log_obj]) method_fnc.assert_called_once_with( context=context, log_objs=[log_obj]) def test_not_implemented_call_methods(self): context = mock.sentinel.context log_obj = mock.sentinel.log_obj self.assertRaises(exceptions.DriverCallError, self.driver_manager.call, 'wrong_method', context=context, log_objs=[log_obj]) class TestHandleResourceCallback(TestLogDriversManagerBase): """Test handle resource callback""" def setUp(self): super(TestHandleResourceCallback, self).setUp() self._cb_mgr = mock.Mock() self.useFixture(fixture.CallbackRegistryFixture( callback_manager=self._cb_mgr)) self.driver_manager = driver_mgr.LoggingServiceDriverManager() def test_subscribe_resources_cb(self): class FakeResourceCB1(driver_mgr.ResourceCallBackBase): def handle_event(self, resource, event, trigger, **kwargs): pass class FakeResourceCB2(driver_mgr.ResourceCallBackBase): def handle_event(self, resource, event, trigger, **kwargs): pass driver_mgr.RESOURCE_CB_CLASS_MAP = {'fake_resource1': FakeResourceCB1, 'fake_resource2': FakeResourceCB2} self.driver_manager._setup_resources_cb_handle() fake_resource_cb1 = FakeResourceCB1( 'fake_resource1', self.driver_manager.call) fake_resource_cb2 = FakeResourceCB2( 'fake_resource2', self.driver_manager.call) assert_calls = [ mock.call( *tools.get_subscribe_args( fake_resource_cb1.handle_event, 'fake_resource1', events.AFTER_CREATE)), mock.call( *tools.get_subscribe_args( fake_resource_cb1.handle_event, 'fake_resource1', events.AFTER_UPDATE)), mock.call( *tools.get_subscribe_args( fake_resource_cb1.handle_event, 'fake_resource1', events.AFTER_DELETE)), mock.call( *tools.get_subscribe_args( fake_resource_cb2.handle_event, 'fake_resource2', events.AFTER_CREATE)), mock.call( *tools.get_subscribe_args( fake_resource_cb2.handle_event, 'fake_resource2', events.AFTER_UPDATE)), mock.call( *tools.get_subscribe_args( fake_resource_cb2.handle_event, 'fake_resource2', events.AFTER_DELETE)), ] self._cb_mgr.subscribe.assert_has_calls(assert_calls) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/rpc/0000755000175000017500000000000000000000000024640 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/rpc/__init__.py0000644000175000017500000000000000000000000026737 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/rpc/test_server.py0000644000175000017500000001213300000000000027557 0ustar00coreycorey00000000000000# Copyright (c) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import rpc from neutron_lib.services.logapi import constants as log_const from oslo_config import cfg import oslo_messaging from neutron.api.rpc.callbacks import events from neutron.api.rpc.handlers import resources_rpc from neutron.services.logapi.rpc import server as server_rpc from neutron.tests import base class LoggingApiNotificationTestCase(base.BaseTestCase): def setUp(self): super(LoggingApiNotificationTestCase, self).setUp() self.test_obj = server_rpc.LoggingApiNotification() def test___init__(self): self.assertIsInstance(self.test_obj.notification_api, resources_rpc.ResourcesPushRpcApi) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_create_log(self, mocked_push): m_context = mock.Mock() m_log_resource = mock.Mock() self.test_obj.create_log(m_context, m_log_resource) mocked_push.assert_called_with(m_context, [m_log_resource], events.CREATED) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_update_log(self, mocked_push): m_context = mock.Mock() m_log_resource = mock.Mock() self.test_obj.update_log(m_context, m_log_resource) mocked_push.assert_called_with(m_context, [m_log_resource], events.UPDATED) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_delete_log(self, mocked_push): m_context = mock.Mock() m_log_resource = mock.Mock() self.test_obj.delete_log(m_context, m_log_resource) mocked_push.assert_called_with(m_context, [m_log_resource], events.DELETED) class TestRegisterValidateRPCMethods(base.BaseTestCase): def test_register_rpc_methods_method(self): resource_type = 'security_group' method = [{'fake_key1': 'fake_method1'}, {'fake_key2': 'fake_method2'}] expected = {resource_type: method} server_rpc.RPC_RESOURCES_METHOD_MAP.clear() server_rpc.register_rpc_methods(resource_type, method) self.assertEqual(expected, server_rpc.RPC_RESOURCES_METHOD_MAP) def test_get_rpc_method(self): resource_type = 'security_group' method = [{'fake_key1': 'fake_method1'}, {'fake_key2': 'fake_method2'}] server_rpc.RPC_RESOURCES_METHOD_MAP = {resource_type: method} actual = server_rpc.get_rpc_method('security_group', 'fake_key1') self.assertEqual('fake_method1', actual) class LoggingApiSkeletonTestCase(base.BaseTestCase): @mock.patch.object(rpc, "get_server") def test___init__(self, mocked_get_server): test_obj = server_rpc.LoggingApiSkeleton() _target = oslo_messaging.Target( topic=log_const.LOGGING_PLUGIN, server=cfg.CONF.host, fanout=False) mocked_get_server.assert_called_with(_target, [test_obj]) @mock.patch("neutron.services.logapi.common.db_api." "get_sg_log_info_for_port") def test_get_sg_log_info_for_port(self, mock_callback): with mock.patch.object( server_rpc, 'get_rpc_method', return_value=server_rpc.get_sg_log_info_for_port ): test_obj = server_rpc.LoggingApiSkeleton() m_context = mock.Mock() port_id = '123' test_obj.get_sg_log_info_for_port( m_context, resource_type=log_const.SECURITY_GROUP, port_id=port_id) mock_callback.assert_called_with(m_context, port_id) @mock.patch("neutron.services.logapi.common.db_api." "get_sg_log_info_for_log_resources") def test_get_sg_log_info_for_log_resources(self, mock_callback): with mock.patch.object( server_rpc, 'get_rpc_method', return_value=server_rpc.get_sg_log_info_for_log_resources ): test_obj = server_rpc.LoggingApiSkeleton() m_context = mock.Mock() log_resources = [mock.Mock()] test_obj.get_sg_log_info_for_log_resources( m_context, resource_type=log_const.SECURITY_GROUP, log_resources=log_resources) mock_callback.assert_called_with(m_context, log_resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/logapi/test_logging_plugin.py0000644000175000017500000003572100000000000030501 0ustar00coreycorey00000000000000# Copyright (C) 2017 Fujitsu Limited # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from neutron import manager from neutron.objects.logapi import logging_resource as log_object from neutron.objects import ports from neutron.objects import securitygroup as sg_object from neutron.services.logapi.common import exceptions as log_exc from neutron.services.logapi.common import sg_validate from neutron.tests.unit.services.logapi import base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' SUPPORTED_LOGGING_TYPES = ['security_group'] class TestLoggingPlugin(base.BaseLogTestCase): def setUp(self): super(TestLoggingPlugin, self).setUp() self.setup_coreplugin(load_plugins=False) mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch('neutron.objects.db.api.get_object').start() # We don't use real models as per mocks above. We also need to mock-out # methods that work with real data types mock.patch( 'neutron.objects.base.NeutronDbObject.modify_fields_from_db' ).start() cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["neutron.services.logapi.logging_plugin.LoggingPlugin"]) manager.init() mock.patch( 'neutron.services.logapi.common.validators.' 'ResourceValidateRequest.get_validated_method', return_value=sg_validate.validate_security_group_request ).start() self.log_plugin = directory.get_plugin(plugin_const.LOG_API) self.log_plugin.driver_manager = mock.Mock() log_types = mock.PropertyMock(return_value=SUPPORTED_LOGGING_TYPES) self.log_plugin.driver_manager.supported_logging_types = \ mock.patch('neutron.services.logapi.drivers.manager.' 'LoggingServiceDriverManager.supported_logging_types', new_callable=log_types).start() self.ctxt = context.Context('admin', 'fake_tenant') def test_get_logs(self): with mock.patch.object(log_object.Log, 'get_objects')\ as get_objects_mock: filters = {'filter': 'filter_id'} self.log_plugin.get_logs(self.ctxt, filters=filters) get_objects_mock.assert_called_once_with(self.ctxt, _pager=mock.ANY, filter='filter_id') def test_get_log_without_return_value(self): with mock.patch.object(log_object.Log, 'get_object', return_value=None): self.assertRaises( log_exc.LogResourceNotFound, self.log_plugin.get_log, self.ctxt, mock.ANY, ) def test_get_log_with_return_value(self): log_id = uuidutils.generate_uuid() with mock.patch.object(log_object.Log, 'get_object')\ as get_object_mock: self.log_plugin.get_log(self.ctxt, log_id) get_object_mock.assert_called_once_with(self.ctxt, id=log_id) @mock.patch('neutron.db._utils.model_query') def test_create_log_full_options(self, query_mock): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'resource_id': uuidutils.generate_uuid(), 'target_id': uuidutils.generate_uuid()}} port = mock.Mock() new_log = mock.Mock() with mock.patch.object(sg_object.SecurityGroup, 'count', return_value=1): with mock.patch.object(ports.Port, 'get_object', return_value=port): with mock.patch('neutron.services.logapi.common.' 'validators.validate_log_type_for_port', return_value=True): with mock.patch('neutron.objects.logapi.' 'logging_resource.Log', return_value=new_log) as init_log_mock: self.log_plugin.create_log(self.ctxt, log) init_log_mock.assert_called_once_with( context=self.ctxt, **log['log']) self.assertTrue(new_log.create.called) calls = [ mock.call.call('create_log_precommit', self.ctxt, new_log), mock.call.call('create_log', self.ctxt, new_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_create_log_without_sg_resource(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'target_id': uuidutils.generate_uuid()}} new_log = mock.Mock() new_log.enabled = True port = mock.Mock() with mock.patch.object(ports.Port, 'get_object', return_value=port): with mock.patch('neutron.services.logapi.common.' 'validators.validate_log_type_for_port', return_value=True): with mock.patch('neutron.objects.logapi.logging_resource.Log', return_value=new_log) as init_log_mock: self.log_plugin.create_log(self.ctxt, log) init_log_mock.assert_called_once_with( context=self.ctxt, **log['log']) self.assertTrue(new_log.create.called) calls = [ mock.call.call('create_log_precommit', self.ctxt, new_log), mock.call.call('create_log', self.ctxt, new_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_create_log_without_parent_resource(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'resource_id': uuidutils.generate_uuid()}} new_log = mock.Mock() new_log.enabled = True with mock.patch.object(sg_object.SecurityGroup, 'count', return_value=1): with mock.patch('neutron.objects.logapi.logging_resource.Log', return_value=new_log) as init_log_mock: self.log_plugin.create_log(self.ctxt, log) init_log_mock.assert_called_once_with(context=self.ctxt, **log['log']) self.assertTrue(new_log.create.called) calls = [ mock.call.call('create_log_precommit', self.ctxt, new_log), mock.call.call('create_log', self.ctxt, new_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_create_log_without_target(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, }} new_log = mock.Mock() new_log.enabled = True with mock.patch('neutron.objects.logapi.' 'logging_resource.Log', return_value=new_log) as init_log_mock: self.log_plugin.create_log(self.ctxt, log) init_log_mock.assert_called_once_with(context=self.ctxt, **log['log']) self.assertTrue(new_log.create.called) calls = [ mock.call.call('create_log_precommit', self.ctxt, new_log), mock.call.call('create_log', self.ctxt, new_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_create_log_nonexistent_sg_resource(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'resource_id': uuidutils.generate_uuid()}} with mock.patch.object(sg_object.SecurityGroup, 'count', return_value=0): self.assertRaises( log_exc.ResourceNotFound, self.log_plugin.create_log, self.ctxt, log) def test_create_log_nonexistent_target(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'target_id': uuidutils.generate_uuid()}} with mock.patch.object(ports.Port, 'get_object', return_value=None): self.assertRaises( log_exc.TargetResourceNotFound, self.log_plugin.create_log, self.ctxt, log) def test_create_log_not_bound_port(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'resource_id': uuidutils.generate_uuid(), 'target_id': uuidutils.generate_uuid()}} port = mock.Mock() with mock.patch.object(sg_object.SecurityGroup, 'count', return_value=1): with mock.patch.object(ports.Port, 'get_object', return_value=port): with mock.patch('neutron.services.logapi.common.' 'validators.validate_log_type_for_port', return_value=True): self.assertRaises( log_exc.InvalidResourceConstraint, self.log_plugin.create_log, self.ctxt, log) def test_create_log_disabled(self): log_data = {'log': {'resource_type': 'security_group', 'enabled': False}} new_log = mock.Mock() new_log.enabled = False with mock.patch('neutron.objects.logapi.' 'logging_resource.Log', return_value=new_log) as init_log_mock: self.log_plugin.create_log(self.ctxt, log_data) init_log_mock.assert_called_once_with( context=self.ctxt, **log_data['log']) self.assertTrue(new_log.create.called) self.log_plugin.driver_manager.call.assert_not_called() def test_create_log_with_unsupported_logging_type(self): log = {'log': {'resource_type': 'fake_type', 'enabled': True}} self.assertRaises( log_exc.InvalidLogResourceType, self.log_plugin.create_log, self.ctxt, log) def test_create_log_with_unsupported_logging_type_on_port(self): log = {'log': {'resource_type': 'security_group', 'enabled': True, 'target_id': uuidutils.generate_uuid()}} port = mock.Mock() port.id = log['log']['target_id'] with mock.patch.object(ports.Port, 'get_object', return_value=port): with mock.patch('neutron.services.logapi.common.' 'validators.validate_log_type_for_port', return_value=False): self.assertRaises( log_exc.LoggingTypeNotSupported, self.log_plugin.create_log, self.ctxt, log) def test_update_log(self): log_data = {'log': {'enabled': True}} new_log = mock.Mock() new_log.id = uuidutils.generate_uuid() with mock.patch('neutron.objects.logapi.' 'logging_resource.Log', return_value=new_log) as update_log_mock: self.log_plugin.update_log(self.ctxt, new_log.id, log_data) update_log_mock.assert_called_once_with(self.ctxt, id=new_log.id) new_log.update_fields.assert_called_once_with(log_data['log'], reset_changes=True) self.assertTrue(new_log.update.called) calls = [ mock.call.call('update_log_precommit', self.ctxt, new_log), mock.call.call('update_log', self.ctxt, new_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_update_log_none_enabled(self): log_data = {'log': {}} new_log = mock.Mock() new_log.id = uuidutils.generate_uuid() with mock.patch('neutron.objects.logapi.' 'logging_resource.Log', return_value=new_log) as update_log_mock: self.log_plugin.update_log(self.ctxt, new_log.id, log_data) update_log_mock.assert_called_once_with(self.ctxt, id=new_log.id) new_log.update_fields.assert_called_once_with(log_data['log'], reset_changes=True) self.assertTrue(new_log.update.called) self.log_plugin.driver_manager.call.assert_not_called() def test_delete_log(self): delete_log = mock.Mock() delete_log.id = uuidutils.generate_uuid() with mock.patch.object(log_object.Log, 'get_object', return_value=delete_log) as delete_log_mock: self.log_plugin.delete_log(self.ctxt, delete_log.id) delete_log_mock.assert_called_once_with(self.ctxt, id=delete_log.id) self.assertTrue(delete_log.delete.called) calls = [ mock.call.call('delete_log_precommit', self.ctxt, delete_log), mock.call.call('delete_log', self.ctxt, delete_log) ] self.log_plugin.driver_manager.assert_has_calls(calls) def test_delete_nonexistent_log(self): with mock.patch.object(log_object.Log, 'get_object', return_value=None): self.assertRaises( log_exc.LogResourceNotFound, self.log_plugin.delete_log, self.ctxt, mock.ANY) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/0000755000175000017500000000000000000000000024413 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/__init__.py0000644000175000017500000000000000000000000026512 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/agents/0000755000175000017500000000000000000000000025674 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/agents/__init__.py0000644000175000017500000000000000000000000027773 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/agents/test_metering_agent.py0000644000175000017500000002532700000000000032306 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.tests.unit import fake_notifier from oslo_config import cfg from oslo_utils import fixture as utils_fixture from oslo_utils import timeutils from oslo_utils import uuidutils from neutron.conf.services import metering_agent as metering_agent_config from neutron.services.metering.agents import metering_agent from neutron.tests import base _uuid = uuidutils.generate_uuid TENANT_ID = _uuid() LABEL_ID = _uuid() ROUTERS = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': TENANT_ID, '_metering_labels': [{'rules': [], 'id': LABEL_ID}], 'id': _uuid()}] ROUTERS_WITH_RULE = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'tenant_id': TENANT_ID, '_metering_labels': [{'rule': {}, 'id': LABEL_ID}], 'id': _uuid()}] class TestMeteringOperations(base.BaseTestCase): def setUp(self): super(TestMeteringOperations, self).setUp() metering_agent_config.register_metering_agent_opts() self.noop_driver = ('neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver') cfg.CONF.set_override('driver', 'noop') cfg.CONF.set_override('measure_interval', 0) cfg.CONF.set_override('report_interval', 0) self.setup_notification_driver() metering_rpc = ('neutron.services.metering.agents.metering_agent.' 'MeteringPluginRpc._get_sync_data_metering') self.metering_rpc_patch = mock.patch(metering_rpc, return_value=[]) self.metering_rpc_patch.start() self.driver_patch = mock.patch(self.noop_driver, spec=True) self.driver_patch.start() loopingcall_patch = mock.patch( 'oslo_service.loopingcall.FixedIntervalLoopingCall') loopingcall_patch.start() self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) self.driver = self.agent.metering_driver def test_add_metering_label(self): self.agent.add_metering_label(None, ROUTERS) self.assertEqual(1, self.driver.add_metering_label.call_count) def test_remove_metering_label(self): self.agent.remove_metering_label(None, ROUTERS) self.assertEqual(1, self.driver.remove_metering_label.call_count) def test_update_metering_label_rule(self): self.agent.update_metering_label_rules(None, ROUTERS) self.assertEqual(1, self.driver.update_metering_label_rules.call_count) def test_add_metering_label_rule(self): self.agent.add_metering_label_rule(None, ROUTERS_WITH_RULE) self.assertEqual(1, self.driver.add_metering_label_rule.call_count) def test_remove_metering_label_rule(self): self.agent.remove_metering_label_rule(None, ROUTERS_WITH_RULE) self.assertEqual(1, self.driver.remove_metering_label_rule.call_count) def test_routers_updated(self): self.agent.routers_updated(None, ROUTERS) self.assertEqual(1, self.driver.update_routers.call_count) def test_get_traffic_counters(self): self.agent._get_traffic_counters(None, ROUTERS) self.assertEqual(1, self.driver.get_traffic_counters.call_count) def test_sync_router_namespaces(self): self.agent._sync_router_namespaces(None, ROUTERS) self.assertEqual(1, self.driver.sync_router_namespaces.call_count) def test_notification_report(self): self.agent.routers_updated(None, ROUTERS) self.driver.get_traffic_counters.return_value = {LABEL_ID: {'pkts': 88, 'bytes': 444}} self.agent._metering_loop() self.assertNotEqual(len(fake_notifier.NOTIFICATIONS), 0) for n in fake_notifier.NOTIFICATIONS: if n['event_type'] == 'l3.meter': break self.assertEqual('l3.meter', n['event_type']) payload = n['payload'] self.assertEqual(TENANT_ID, payload['tenant_id']) self.assertEqual(LABEL_ID, payload['label_id']) self.assertEqual(88, payload['pkts']) self.assertEqual(444, payload['bytes']) def test_notification_report_interval(self): measure_interval = 30 report_interval = 600 now = timeutils.utcnow() time_fixture = self.useFixture(utils_fixture.TimeFixture(now)) self.agent.routers_updated(None, ROUTERS) self.driver.get_traffic_counters.return_value = {LABEL_ID: {'pkts': 889, 'bytes': 4440}} cfg.CONF.set_override('measure_interval', measure_interval) cfg.CONF.set_override('report_interval', report_interval) for i in range(report_interval): self.agent._metering_loop() count = 0 if len(fake_notifier.NOTIFICATIONS) > 1: for n in fake_notifier.NOTIFICATIONS: if n['event_type'] == 'l3.meter': # skip the first notification because the time is 0 count += 1 if count > 1: break time_fixture.advance_time_seconds(measure_interval) self.assertEqual('l3.meter', n['event_type']) payload = n['payload'] self.assertEqual(TENANT_ID, payload['tenant_id']) self.assertEqual(LABEL_ID, payload['label_id']) self.assertLess((payload['time'] - report_interval), measure_interval, payload) interval = (payload['last_update'] - payload['first_update']) \ - report_interval self.assertLess(interval, measure_interval, payload) def test_router_deleted(self): label_id = _uuid() self.driver.get_traffic_counters = mock.MagicMock() self.driver.get_traffic_counters.return_value = {label_id: {'pkts': 44, 'bytes': 222}} self.agent._add_metering_info = mock.MagicMock() self.agent.routers_updated(None, ROUTERS) self.agent.router_deleted(None, ROUTERS[0]['id']) self.assertEqual(1, self.agent._add_metering_info.call_count) self.assertEqual(1, self.driver.remove_router.call_count) self.agent._add_metering_info.assert_called_with(label_id, 44, 222) @mock.patch('time.time') def _test_purge_metering_info(self, current_timestamp, is_empty, mock_time): mock_time.return_value = current_timestamp self.agent.metering_infos = {'fake': {'last_update': 1}} self.config(report_interval=1) self.agent._purge_metering_info() self.assertEqual(0 if is_empty else 1, len(self.agent.metering_infos)) self.assertEqual(1, mock_time.call_count) def test_purge_metering_info(self): # 1 < 2 - 1 -> False self._test_purge_metering_info(2, False) def test_purge_metering_info_delete(self): # 1 < 3 - 1 -> False self._test_purge_metering_info(3, True) @mock.patch('time.time') def _test_add_metering_info(self, expected_info, current_timestamp, mock_time): mock_time.return_value = current_timestamp actual_info = self.agent._add_metering_info('fake_label_id', 1, 1) self.assertEqual(1, len(self.agent.metering_infos)) self.assertEqual(expected_info, actual_info) self.assertEqual(expected_info, self.agent.metering_infos['fake_label_id']) self.assertEqual(1, mock_time.call_count) def test_add_metering_info_create(self): expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1, 'last_update': 1} self._test_add_metering_info(expected_info, 1) def test_add_metering_info_update(self): expected_info = {'bytes': 1, 'pkts': 1, 'time': 0, 'first_update': 1, 'last_update': 1} self.agent.metering_infos = {'fake_label_id': expected_info} expected_info.update({'bytes': 2, 'pkts': 2, 'time': 1, 'last_update': 2}) self._test_add_metering_info(expected_info, 2) def test_metering_agent_host_value(self): expected_host = 'my agent' self.assertEqual(expected_host, self.agent.host) class TestMeteringDriver(base.BaseTestCase): def setUp(self): super(TestMeteringDriver, self).setUp() metering_agent_config.register_metering_agent_opts() cfg.CONF.set_override('driver', 'noop') self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) self.driver = mock.Mock() self.agent.metering_driver = self.driver def test_add_metering_label_with_bad_driver_impl(self): del self.driver.add_metering_label with mock.patch.object(metering_agent, 'LOG') as log: self.agent.add_metering_label(None, ROUTERS) log.exception.assert_called_with(mock.ANY, {'driver': 'noop', 'func': 'add_metering_label'}) def test_add_metering_label_runtime_error(self): self.driver.add_metering_label.side_effect = RuntimeError with mock.patch.object(metering_agent, 'LOG') as log: self.agent.add_metering_label(None, ROUTERS) log.exception.assert_called_with(mock.ANY, {'driver': 'noop', 'func': 'add_metering_label'}) def test_init_chain(self): with mock.patch('oslo_service.' 'periodic_task.PeriodicTasks.__init__') as init: metering_agent.MeteringAgent('my agent', cfg.CONF) init.assert_called_once_with(cfg.CONF) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/drivers/0000755000175000017500000000000000000000000026071 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/drivers/__init__.py0000644000175000017500000000000000000000000030170 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/drivers/test_iptables.py0000644000175000017500000010310300000000000031303 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_config import cfg from neutron.services.metering.drivers.iptables import iptables_driver from neutron.tests import base TEST_ROUTERS = [ {'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'distributed': False, 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'egress', 'excluded': False, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'status': 'ACTIVE', 'distributed': False, 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, ] TEST_DVR_ROUTER = [ {'_metering_labels': [ {'id': 'c5df2fe5-c610-4a2a-b2f4-c0fb6df73c83', 'rules': [{ 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2600-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c700-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}]}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-2711-44e3-b008-3251ccfc5099', 'name': 'router-test', 'distributed': True, 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] TEST_ROUTERS_WITH_ONE_RULE = [ {'_metering_labels': [ {'id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'rule': { 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '30.0.0.0/24'}}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'distributed': False, 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, {'_metering_labels': [ {'id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'rule': { 'direction': 'egress', 'excluded': False, 'id': 'fa2441e8-2489-4ed1-870c-a62754501379', 'metering_label_id': 'eeef45da-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '40.0.0.0/24'}}], 'admin_state_up': True, 'gw_port_id': '7d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '373ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router2', 'distributed': False, 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}, ] TEST_ROUTERS_WITH_NEW_LABEL = [ {'_metering_labels': [ {'id': 'e27fe2df-376e-4ac7-ae13-92f050a21f84', 'rule': { 'direction': 'ingress', 'excluded': False, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'e27fe2df-376e-4ac7-ae13-92f050a21f84', 'remote_ip_prefix': '50.0.0.0/24'}}], 'admin_state_up': True, 'gw_port_id': '6d411f48-ecc7-45e0-9ece-3b5bdb54fcee', 'id': '473ec392-1711-44e3-b008-3251ccfc5099', 'name': 'router1', 'status': 'ACTIVE', 'tenant_id': '6c5f5d2a1fa2441e88e35422926f48e8'}] class IptablesDriverTestCase(base.BaseTestCase): def setUp(self): super(IptablesDriverTestCase, self).setUp() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.iptables_cls_p = mock.patch( 'neutron.agent.linux.iptables_manager.IptablesManager') self.iptables_cls = self.iptables_cls_p.start() self.iptables_inst = mock.Mock() self.v4filter_inst = mock.Mock() self.v6filter_inst = mock.Mock() self.namespace_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.network_namespace_exists') self.namespace_exists = self.namespace_exists_p.start() self.snat_ns_name_p = mock.patch( 'neutron.agent.l3.dvr_snat_ns.SnatNamespace.get_snat_ns_name') self.snat_ns_name = self.snat_ns_name_p.start() self.v4filter_inst.chains = [] self.v6filter_inst.chains = [] self.iptables_inst.ipv4 = {'filter': self.v4filter_inst} self.iptables_inst.ipv6 = {'filter': self.v6filter_inst} self.iptables_cls.return_value = self.iptables_inst cfg.CONF.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.metering = iptables_driver.IptablesMeteringDriver('metering', cfg.CONF) def test_create_stateless_iptables_manager(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) self.assertEqual(1, self.iptables_cls.call_count) self.iptables_cls.assert_called_with( binary_name=mock.ANY, namespace=mock.ANY, state_less=True, use_ipv6=mock.ANY) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) self.assertTrue(rm.iptables_manager) self.assertIsNone(rm.snat_iptables_manager) def test_iptables_manager_never_create_with_no_valid_namespace(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = False self.metering.add_metering_label(None, routers) self.assertFalse(self.iptables_cls.called) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) self.assertIsNone(rm.iptables_manager) self.assertIsNone(rm.snat_iptables_manager) def test_create_iptables_manager_for_distributed_routers(self): routers = TEST_DVR_ROUTER[:1] self.namespace_exists.return_value = True snat_ns_name = 'snat-' + routers[0]['id'] self.snat_ns_name.return_value = snat_ns_name self.metering.add_metering_label(None, routers) self.assertEqual(2, self.iptables_cls.call_count) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) self.assertTrue(rm.iptables_manager) self.assertTrue(rm.snat_iptables_manager) def test_add_metering_label(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_dvr_routers(self): routers = TEST_DVR_ROUTER[:1] self.namespace_exists.return_value = True snat_ns_name = 'snat-' + routers[0]['id'] self.snat_ns_name.return_value = snat_ns_name self.metering._process_ns_specific_metering_label = mock.Mock() self.metering.add_metering_label(None, routers) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) ext_dev, ext_snat_dev = self.metering.get_external_device_names(rm) self.assertEqual( 2, self.metering._process_ns_specific_metering_label.call_count) # check and validate the right device being passed based on the # namespace. self.assertEqual( self.metering._process_ns_specific_metering_label.mock_calls, [mock.call( routers[0], ext_dev, rm.iptables_manager), mock.call( routers[0], ext_snat_dev, rm.snat_iptables_manager)]) def test_add_metering_label_legacy_routers(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = True self.metering._process_ns_specific_metering_label = mock.Mock() self.metering.add_metering_label(None, routers) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) ext_dev, _ = self.metering.get_external_device_names(rm) self.assertEqual( self.metering._process_ns_specific_metering_label.mock_calls, [mock.call(routers[0], ext_dev, rm.iptables_manager)]) def test_add_metering_label_when_no_namespace(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = False self.metering._process_metering_label = mock.Mock() self.metering.add_metering_label(None, routers) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) self.assertIsNone(rm.iptables_manager) self.assertIsNone(rm.snat_iptables_manager) self.assertFalse(self.metering._process_metering_label.called) def test_process_metering_label_rules(self): self.namespace_exists.return_value = True self.metering.add_metering_label(None, TEST_ROUTERS) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-s 20.0.0.0/24 -o qg-7d411f48-ec' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_process_metering_label_rules_with_no_gateway_router(self): routers = copy.deepcopy(TEST_ROUTERS) for router in routers: router['gw_port_id'] = None self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False)] self.v4filter_inst.assert_has_calls(calls, any_order=False) def test_add_metering_label_with_rules(self): routers = copy.deepcopy(TEST_ROUTERS) routers[1]['_metering_labels'][0]['rules'][0].update({ 'direction': 'ingress', 'excluded': True, }) self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-d 20.0.0.0/24 -i qg-7d411f48-ec' ' -j RETURN', wrap=False, top=True)] self.v4filter_inst.assert_has_calls(calls) def test_update_metering_label_rules(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['_metering_labels'][0]['rules'] = [{ 'direction': 'egress', 'excluded': True, 'id': '7f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '10.0.0.0/24'}, {'direction': 'ingress', 'excluded': False, 'id': '6f1a261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24'}] self.metering.update_metering_label_rules(None, updates) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-s 10.0.0.0/24 -o qg-6d411f48-ec' ' -j RETURN', wrap=False, top=True), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 20.0.0.0/24 -i qg-6d411f48-ec -j ' 'neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label_rule_in_update(self): routers = copy.deepcopy(TEST_ROUTERS[:1]) routers[0]['_metering_labels'][0]['rules'].append({ 'direction': 'ingress', 'excluded': False, 'id': 'aaaa261f-2489-4ed1-870c-a62754501379', 'metering_label_id': 'c5df2fe5-c600-4a2a-b2f4-c0fb6df73c83', 'remote_ip_prefix': '20.0.0.0/24', }) self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) del routers[0]['_metering_labels'][0]['rules'][1] self.metering.update_metering_label_rules(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 20.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.empty_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_rule(self): new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE self.metering.update_routers(None, TEST_ROUTERS) self.namespace_exists.return_value = True self.metering.add_metering_label_rule(None, new_routers_rules) calls = [ mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 30.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-s 40.0.0.0/24 -o qg-7d411f48-ec' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False), ] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_rule_without_label(self): new_routers_rules = TEST_ROUTERS_WITH_NEW_LABEL # clear all the metering labels for r in TEST_ROUTERS: rm = iptables_driver.RouterWithMetering(self.metering.conf, r) rm.metering_labels = {} self.metering.update_routers(None, TEST_ROUTERS) self.metering.add_metering_label_rule(None, new_routers_rules) calls = [ mock.call.add_chain('neutron-meter-l-e27fe2df-376', wrap=False), mock.call.add_chain('neutron-meter-r-e27fe2df-376', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j neutron-meter-r-e27fe2df-376', wrap=False), mock.call.add_rule('neutron-meter-l-e27fe2df-376', '', wrap=False), mock.call.add_rule('neutron-meter-r-e27fe2df-376', '-d 50.0.0.0/24 ' '-i qg-6d411f48-ec ' '-j neutron-meter-l-e27fe2df-376', top=False, wrap=False) ] self.v4filter_inst.assert_has_calls(calls) def test_add_metering_label_rule_dvr_router(self): routers = TEST_DVR_ROUTER self.metering.update_routers(None, TEST_DVR_ROUTER) self.namespace_exists.return_value = True self.metering._process_metering_rule_action_based_on_ns = mock.Mock() self.metering.add_metering_label_rule(None, routers) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) ext_dev, ext_snat_dev = self.metering.get_external_device_names(rm) self.assertEqual( 2, self.metering._process_metering_rule_action_based_on_ns.call_count) # check and validate the right device being passed based on the # namespace. self.assertEqual( self.metering._process_metering_rule_action_based_on_ns.mock_calls, [mock.call( routers[0], 'create', ext_dev, rm.iptables_manager), mock.call( routers[0], 'create', ext_snat_dev, rm.snat_iptables_manager)]) def test_remove_metering_label_rule_dvr_router(self): routers = TEST_DVR_ROUTER self.metering.update_routers(None, TEST_DVR_ROUTER) self.namespace_exists.return_value = True self.metering.add_metering_label_rule(None, routers) self.metering._process_metering_rule_action_based_on_ns = mock.Mock() self.metering.remove_metering_label_rule(None, routers) rm = iptables_driver.RouterWithMetering(self.metering.conf, routers[0]) ext_dev, ext_snat_dev = self.metering.get_external_device_names(rm) self.assertEqual( 2, self.metering._process_metering_rule_action_based_on_ns.call_count) # check and validate the right device being passed based on the # namespace. self.assertEqual( self.metering._process_metering_rule_action_based_on_ns.mock_calls, [mock.call( routers[0], 'delete', ext_dev, rm.iptables_manager), mock.call( routers[0], 'delete', ext_snat_dev, rm.snat_iptables_manager)]) def test_remove_metering_label_rule(self): new_routers_rules = TEST_ROUTERS_WITH_ONE_RULE self.metering.update_routers(None, TEST_ROUTERS) self.namespace_exists.return_value = True self.metering.add_metering_label_rule(None, new_routers_rules) self.metering.remove_metering_label_rule(None, new_routers_rules) calls = [ mock.call.remove_rule('neutron-meter-r-c5df2fe5-c60', '-d 30.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.remove_rule('neutron-meter-r-eeef45da-c60', '-s 40.0.0.0/24 -o qg-7d411f48-ec' ' -j neutron-meter-l-eeef45da-c60', wrap=False, top=False) ] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label(self): routers = TEST_ROUTERS[:1] self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) self.metering.remove_metering_label(None, routers) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_remove_metering_label_with_dvr_routers(self): routers = TEST_DVR_ROUTER[:1] self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) self.metering._process_ns_specific_disassociate_metering_label = ( mock.Mock()) self.metering.remove_metering_label(None, routers) self.assertEqual( 2, (self.metering. _process_ns_specific_disassociate_metering_label.call_count)) def test_update_routers(self): routers = copy.deepcopy(TEST_ROUTERS) routers[1]['_metering_labels'][0]['rules'][0].update({ 'direction': 'ingress', 'excluded': True, }) self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) updates = copy.deepcopy(routers) updates[0]['gw_port_id'] = '587b63c1-22a3-40b3-9834-486d1fb215a5' self.metering.update_routers(None, updates) calls = [mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-6d411f48-ec' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False), mock.call.add_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.add_chain('neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-eeef45da-c60', wrap=False), mock.call.add_rule('neutron-meter-l-eeef45da-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-eeef45da-c60', '-d 20.0.0.0/24 -i qg-7d411f48-ec' ' -j RETURN', wrap=False, top=True), mock.call.remove_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-l-c5df2fe5-c60', wrap=False), mock.call.add_chain('neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-FORWARD', '-j ' 'neutron-meter-r-c5df2fe5-c60', wrap=False), mock.call.add_rule('neutron-meter-l-c5df2fe5-c60', '', wrap=False), mock.call.add_rule('neutron-meter-r-c5df2fe5-c60', '-d 10.0.0.0/24 -i qg-587b63c1-22' ' -j neutron-meter-l-c5df2fe5-c60', wrap=False, top=False)] self.v4filter_inst.assert_has_calls(calls) def test_update_routers_removal(self): routers = TEST_ROUTERS self.namespace_exists.return_value = True self.metering.add_metering_label(None, routers) # Remove router id '373ec392-1711-44e3-b008-3251ccfc5099' updates = TEST_ROUTERS[:1] self.metering.update_routers(None, updates) calls = [mock.call.remove_chain('neutron-meter-l-eeef45da-c60', wrap=False), mock.call.remove_chain('neutron-meter-r-eeef45da-c60', wrap=False)] self.v4filter_inst.assert_has_calls(calls) def test_get_traffic_counters_with_missing_chain(self): for r in TEST_ROUTERS: rm = iptables_driver.RouterWithMetering(self.metering.conf, r) rm.metering_labels = {r['_metering_labels'][0]['id']: 'fake'} self.metering.routers[r['id']] = rm mocked_method = self.iptables_cls.return_value.get_traffic_counters mocked_method.side_effect = [{'pkts': 1, 'bytes': 8}, RuntimeError('Failed to find the chain')] counters = self.metering.get_traffic_counters(None, TEST_ROUTERS) expected_label_id = TEST_ROUTERS[0]['_metering_labels'][0]['id'] self.assertIn(expected_label_id, counters) self.assertEqual(1, counters[expected_label_id]['pkts']) self.assertEqual(8, counters[expected_label_id]['bytes']) def test_sync_router_namespaces(self): routers = TEST_DVR_ROUTER[:1] self.metering._process_ns_specific_metering_label = mock.Mock() self.namespace_exists.return_value = False self.metering.add_metering_label(None, routers) rm = self.metering.routers[routers[0]['id']] self.assertEqual( 0, self.metering._process_ns_specific_metering_label.call_count) self.assertIsNone(rm.snat_iptables_manager) self.assertIsNone(rm.iptables_manager) self.namespace_exists.side_effect = [True, False] self.metering.sync_router_namespaces(None, routers) self.assertIsNotNone(rm.snat_iptables_manager) self.assertIsNone(rm.iptables_manager) self.assertEqual( 1, self.metering._process_ns_specific_metering_label.call_count) self.namespace_exists.side_effect = [True] self.metering.sync_router_namespaces(None, routers) self.assertIsNotNone(rm.snat_iptables_manager) self.assertIsNotNone(rm.iptables_manager) self.assertEqual( 3, self.metering._process_ns_specific_metering_label.call_count) # syncing again should have no effect self.namespace_exists.side_effect = [RuntimeError('Unexpected call')] self.metering.sync_router_namespaces(None, routers) self.assertIsNotNone(rm.snat_iptables_manager) self.assertIsNotNone(rm.iptables_manager) self.assertEqual( 3, self.metering._process_ns_specific_metering_label.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/metering/test_metering_plugin.py0000644000175000017500000006112500000000000031221 0ustar00coreycorey00000000000000# Copyright (C) 2013 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.agent import topics from neutron_lib.api.definitions import metering as metering_apidef from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron_lib.tests import tools from neutron_lib.utils import net as net_utils from oslo_utils import uuidutils from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api from neutron.db.metering import metering_rpc from neutron.extensions import l3 as ext_l3 from neutron.extensions import metering as ext_metering from neutron.objects import agent as agent_obj from neutron.tests.common import helpers from neutron.tests.unit.db.metering import test_metering_db from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 _uuid = uuidutils.generate_uuid METERING_SERVICE_PLUGIN_KLASS = ( "neutron.services.metering." "metering_plugin.MeteringPlugin" ) class MeteringTestExtensionManager(object): def get_resources(self): l3_res = ext_l3.L3.get_resources() metering_res = ext_metering.Metering.get_resources() return l3_res + metering_res def get_actions(self): return [] def get_request_extensions(self): return [] # TODO(akamyshnikova):we need this temporary FakeContext class while Context # checking for existence of session attribute. class FakeContext(context.ContextBaseWithSession): def __init__(self, *args, **kwargs): super(FakeContext, self).__init__(*args, **kwargs) self._session = None @property def session(self): if self._session is None: self._session = db_api.get_writer_session() return self._session class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self): plugin = 'neutron.tests.unit.extensions.test_l3.TestL3NatIntPlugin' service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} ext_mgr = MeteringTestExtensionManager() super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' uuid = 'oslo_utils.uuidutils.generate_uuid' self.uuid_patch = mock.patch(uuid, return_value=self.uuid) self.mock_uuid = self.uuid_patch.start() self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' self.ctx = FakeContext('', self.tenant_id, is_admin=True) self.context_patch = mock.patch('neutron_lib.context.Context', return_value=self.ctx) self.mock_context = self.context_patch.start() self.topic = topics.METERING_AGENT add = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label') self.add_patch = mock.patch(add) self.mock_add = self.add_patch.start() remove = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label') self.remove_patch = mock.patch(remove) self.mock_remove = self.remove_patch.start() update = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.update_metering_label_rules') self.update_patch = mock.patch(update) self.mock_update = self.update_patch.start() add_rule = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label_rule') self.add_rule_patch = mock.patch(add_rule) self.mock_add_rule = self.add_rule_patch.start() remove_rule = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label_rule') self.remove_rule_patch = mock.patch(remove_rule) self.mock_remove_rule = self.remove_rule_patch.start() def test_routers_updated_on_host_rpc_call(self): router_test = { 'id': 'xyz', 'name': 'testrouter'} notify_host = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '._notification_host') self.notify_patch = mock.patch(notify_host) self.mock_notify_host = self.notify_patch.start() metering_rpc_handle = metering_rpc_agent_api.MeteringAgentNotifyAPI() metering_rpc_handle.routers_updated_on_host( self.ctx, [router_test['id']], 'test_host') self.mock_notify_host.assert_called_with(self.ctx, 'routers_updated', 'test_host', routers=['xyz']) def test_add_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}], 'id': self.uuid}] tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206' self.mock_uuid.return_value = second_uuid with self.router(name='router2', tenant_id=tenant_id_2, set_context=True): self.mock_uuid.return_value = self.uuid with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_add.assert_called_with(self.ctx, expected) def test_add_metering_label_shared_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}, {'rules': [], 'id': second_uuid}], 'id': self.uuid}] tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206' with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.metering_label(tenant_id=tenant_id_2, shared=True, set_context=True): self.mock_add.assert_called_with(self.ctx, expected) def test_remove_metering_label_rpc_call(self): expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: self.mock_add.assert_called_with(self.ctx, expected) self._delete('metering-labels', label['metering_label']['id']) self.mock_remove.assert_called_with(self.ctx, expected) def test_remove_one_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected_add = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': self.uuid}, {'rules': [], 'id': second_uuid}], 'id': self.uuid}] expected_remove = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: self.mock_add.assert_called_with(self.ctx, expected_add) self._delete('metering-labels', label['metering_label']['id']) self.mock_remove.assert_called_with(self.ctx, expected_remove) def test_add_and_remove_metering_label_rule_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected_add = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rule': { 'remote_ip_prefix': net_utils.AuthenticIPNetwork( '10.0.0.0/24'), 'direction': 'ingress', 'metering_label_id': self.uuid, 'excluded': False, 'id': second_uuid}, 'id': self.uuid}], 'id': self.uuid}] expected_del = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rule': { 'remote_ip_prefix': net_utils.AuthenticIPNetwork( '10.0.0.0/24'), 'direction': 'ingress', 'metering_label_id': self.uuid, 'excluded': False, 'id': second_uuid}, 'id': self.uuid}], 'id': self.uuid}] with self.router(tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True) as label: la = label['metering_label'] self.mock_uuid.return_value = second_uuid with self.metering_label_rule(la['id']): self.mock_add_rule.assert_called_with(self.ctx, expected_add) self._delete('metering-label-rules', second_uuid) self.mock_remove_rule.assert_called_with(self.ctx, expected_del) def test_delete_metering_label_does_not_clear_router_tenant_id(self): tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' with self.metering_label(tenant_id=tenant_id) as metering_label: with self.router(tenant_id=tenant_id, set_context=True) as r: router = self._show('routers', r['router']['id']) self.assertEqual(tenant_id, router['router']['tenant_id']) metering_label_id = metering_label['metering_label']['id'] self._delete('metering-labels', metering_label_id, 204) router = self._show('routers', r['router']['id']) self.assertEqual(tenant_id, router['router']['tenant_id']) class TestMeteringPluginL3AgentScheduler( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP.keys() ) def setUp(self, plugin_str=None, service_plugins=None, scheduler=None): if not plugin_str: plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') if not service_plugins: service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} if not scheduler: scheduler = plugin_str ext_mgr = MeteringTestExtensionManager() super(TestMeteringPluginL3AgentScheduler, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr, service_plugins=service_plugins) self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60' uuid = 'oslo_utils.uuidutils.generate_uuid' self.uuid_patch = mock.patch(uuid, return_value=self.uuid) self.mock_uuid = self.uuid_patch.start() self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b' self.ctx = FakeContext('', self.tenant_id, is_admin=True) self.context_patch = mock.patch('neutron_lib.context.Context', return_value=self.ctx) self.mock_context = self.context_patch.start() self.l3routers_patch = mock.patch(scheduler + '.get_l3_agents_hosting_routers') self.l3routers_mock = self.l3routers_patch.start() self.topic = topics.METERING_AGENT add = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.add_metering_label') self.add_patch = mock.patch(add) self.mock_add = self.add_patch.start() remove = ('neutron.api.rpc.agentnotifiers.' + 'metering_rpc_agent_api.MeteringAgentNotifyAPI' + '.remove_metering_label') self.remove_patch = mock.patch(remove) self.mock_remove = self.remove_patch.start() def test_add_metering_label_rpc_call(self): second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84' expected = [{'status': 'ACTIVE', 'name': 'router1', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': self.uuid}, {'status': 'ACTIVE', 'name': 'router2', 'gw_port_id': None, 'admin_state_up': True, 'distributed': False, 'tenant_id': self.tenant_id, '_metering_labels': [ {'rules': [], 'id': second_uuid}], 'id': second_uuid}] # bind each router to a specific agent agent1 = agent_obj.Agent(mock.ANY, host='agent1') agent2 = agent_obj.Agent(mock.ANY, host='agent2') agents = {self.uuid: agent1, second_uuid: agent2} def side_effect(context, routers, admin_state_up, active): return [agents[routers[0]]] self.l3routers_mock.side_effect = side_effect with self.router(name='router1', tenant_id=self.tenant_id, set_context=True): self.mock_uuid.return_value = second_uuid with self.router(name='router2', tenant_id=self.tenant_id, set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): self.mock_add.assert_called_with( self.ctx, tools.UnorderedList(expected)) class TestMeteringPluginL3AgentSchedulerServicePlugin( TestMeteringPluginL3AgentScheduler): """Unit tests for the case where separate service plugin implements L3 routing. """ def setUp(self): l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS, 'l3_plugin_name': l3_plugin} plugin_str = ('neutron.tests.unit.extensions.test_l3.' 'TestNoL3NatPlugin') super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp( plugin_str=plugin_str, service_plugins=service_plugins, scheduler=l3_plugin) class TestMeteringPluginRpcFromL3Agent( test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_l3.L3NatTestCaseMixin, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( (k.replace('_', '-'), "/metering") for k in metering_apidef.RESOURCE_ATTRIBUTE_MAP ) def setUp(self): service_plugins = {'metering_plugin_name': METERING_SERVICE_PLUGIN_KLASS} plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatIntAgentSchedulingPlugin') ext_mgr = MeteringTestExtensionManager() super(TestMeteringPluginRpcFromL3Agent, self).setUp(plugin=plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) self.meter_plugin = directory.get_plugin(constants.METERING) self.tenant_id = 'admin_tenant_id' self.tenant_id_1 = 'tenant_id_1' self.tenant_id_2 = 'tenant_id_2' self.adminContext = context.get_admin_context() helpers.register_l3_agent(host='agent1') def test_get_sync_data_metering(self): with self.subnet() as subnet: s = subnet['subnet'] self._set_net_external(s['network_id']) with self.router(name='router1', subnet=subnet) as router: r = router['router'] self._add_external_gateway_to_router(r['id'], s['network_id']) with self.metering_label(tenant_id=r['tenant_id']): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext, host='agent1') self.assertEqual('router1', data[0]['name']) helpers.register_l3_agent(host='agent2') data = callbacks.get_sync_data_metering(self.adminContext, host='agent2') self.assertFalse(data) self._remove_external_gateway_from_router( r['id'], s['network_id']) def test_get_sync_data_metering_shared(self): with self.router(name='router1', tenant_id=self.tenant_id_1): with self.router(name='router2', tenant_id=self.tenant_id_2): with self.metering_label(tenant_id=self.tenant_id, shared=True): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext) routers = [router['name'] for router in data] self.assertIn('router1', routers) self.assertIn('router2', routers) def test_get_sync_data_metering_not_shared(self): with self.router(name='router1', tenant_id=self.tenant_id_1): with self.router(name='router2', tenant_id=self.tenant_id_2): with self.metering_label(tenant_id=self.tenant_id): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering(self.adminContext) routers = [router['name'] for router in data] self.assertEqual([], routers) def test_get_sync_data_metering_with_unscheduled_router(self): with self.subnet() as subnet: s = subnet['subnet'] self._set_net_external(s['network_id']) with self.router( name='router1', tenant_id=self.tenant_id ) as router1: self._add_external_gateway_to_router( router1['router']['id'], s['network_id']) with self.router(name='router2', tenant_id=self.tenant_id): with self.metering_label(tenant_id=self.tenant_id): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering( self.adminContext, host='agent1') self.assertEqual( set(['router1']), set([r['name'] for r in data])) self._remove_external_gateway_from_router( router1['router']['id'], s['network_id']) def test_get_sync_data_metering_with_inactive_router(self): with self.subnet() as subnet: s = subnet['subnet'] self._set_net_external(s['network_id']) with self.router( name='router1', tenant_id=self.tenant_id ) as router1: self._add_external_gateway_to_router( router1['router']['id'], s['network_id']) with self.router( name='router2', tenant_id=self.tenant_id, admin_state_up=False ) as router2: self._add_external_gateway_to_router( router2['router']['id'], s['network_id']) with self.metering_label(tenant_id=self.tenant_id): callbacks = metering_rpc.MeteringRpcCallbacks( self.meter_plugin) data = callbacks.get_sync_data_metering( self.adminContext, host='agent1') self.assertEqual( set(['router1']), set([r['name'] for r in data])) self._remove_external_gateway_from_router( router2['router']['id'], s['network_id']) self._remove_external_gateway_from_router( router1['router']['id'], s['network_id']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/network_segment_range/0000755000175000017500000000000000000000000027170 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/network_segment_range/__init__.py0000644000175000017500000000000000000000000031267 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/network_segment_range/test_plugin.py0000644000175000017500000002566600000000000032116 0ustar00coreycorey00000000000000# Copyright (c) 2019 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as exc from neutron_lib.utils import helpers from oslo_config import cfg from neutron.db import segments_db from neutron.services.network_segment_range import plugin as range_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit import testlib_api SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.' 'NetworkSegmentRangePlugin') class TestNetworkSegmentRange(testlib_api.SqlTestCase): _foo_range = {'name': 'foo-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': 'foo_network_type', 'physical_network': 'foo_phys_net', 'minimum': 200, 'maximum': 300} _flat_range = {'name': 'foo-flat-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': constants.TYPE_FLAT, 'physical_network': None, 'minimum': 0, 'maximum': 0} _vlan_range = {'name': 'foo-vlan-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': constants.TYPE_VLAN, 'physical_network': 'phys_net', 'minimum': 200, 'maximum': 300} _vxlan_range = {'name': 'foo-vxlan-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': constants.TYPE_VXLAN, 'physical_network': None, 'minimum': 400, 'maximum': 500} _gre_range = {'name': 'foo-vlan-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': constants.TYPE_GRE, 'physical_network': None, 'minimum': 600, 'maximum': 700} _geneve_range = {'name': 'foo-geneve-range', 'default': False, 'shared': False, 'project_id': test_plugin.TEST_TENANT_ID, 'network_type': constants.TYPE_GENEVE, 'physical_network': None, 'minimum': 800, 'maximum': 900} def setUp(self): super(TestNetworkSegmentRange, self).setUp() with mock.patch("neutron_lib.plugins.directory.get_plugin"): self.plugin = range_plugin.NetworkSegmentRangePlugin() self.context = context.get_admin_context() cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS]) def _validate_resource(self, resource, keys, res_name): for k in keys: self.assertIn(k, resource[res_name]) if isinstance(keys[k], list): self.assertEqual( sorted(keys[k], key=helpers.safe_sort_key), sorted(resource[res_name][k], key=helpers.safe_sort_key)) else: self.assertEqual(keys[k], resource[res_name][k]) def test__is_network_segment_range_referenced(self): with mock.patch.object(segments_db, 'network_segments_exist_in_range', return_value=True): self.assertTrue(self.plugin._is_network_segment_range_referenced( self.context, self._vlan_range)) def test__is_network_segment_range_unreferenced(self): with mock.patch.object(segments_db, 'network_segments_exist_in_range', return_value=False): self.assertFalse(self.plugin._is_network_segment_range_referenced( self.context, self._vlan_range)) def test__is_network_segment_range_type_supported(self): for foo_range in [self._vlan_range, self._vxlan_range, self._gre_range, self._geneve_range]: self.assertTrue( self.plugin. _is_network_segment_range_type_supported( foo_range['network_type'])) def test__is_network_segment_range_type_unsupported(self): self.assertRaises( exc.NeutronException, self.plugin._is_network_segment_range_type_supported, self._foo_range['network_type']) self.assertRaises( exc.NeutronException, self.plugin._is_network_segment_range_type_supported, self._flat_range['network_type']) def test__are_allocated_segments_in_range_impacted(self): existing_range = self._foo_range updated_range = self._vlan_range impacted_existing_ranges = [(150, 250), (250, 320), (200, 300), (180, 330)] for ret in impacted_existing_ranges: with mock.patch.object(segments_db, 'min_max_actual_segments_in_range', return_value=ret): self.assertTrue( self.plugin._are_allocated_segments_in_range_impacted( self.context, existing_range, updated_range)) def test__are_allocated_segments_in_range_unimpacted(self): existing_range = self._foo_range updated_range = self._vlan_range with mock.patch.object(segments_db, 'min_max_actual_segments_in_range', return_value=(220, 270)): self.assertFalse( self.plugin._are_allocated_segments_in_range_impacted( self.context, existing_range, updated_range)) def test_create_network_segment_range(self): test_range = self._vlan_range network_segment_range = {'network_segment_range': test_range} ret = self.plugin.create_network_segment_range(self.context, network_segment_range) res = {'network_segment_range': ret} self._validate_resource(res, test_range, 'network_segment_range') def test_create_network_segment_range_failed_with_unsupported_network_type( self): test_range = self._flat_range network_segment_range = {'network_segment_range': test_range} self.assertRaises( exc.NeutronException, self.plugin.create_network_segment_range, self.context, network_segment_range) def test_create_network_segment_range_missing_physical_network_for_vlan( self): test_range = self._vlan_range.copy() test_range.pop("physical_network") network_segment_range = {'network_segment_range': test_range} self.assertRaises( exc.NeutronException, self.plugin.create_network_segment_range, self.context, network_segment_range) def test_update_network_segment_range(self): test_range = self._vlan_range network_segment_range = {'network_segment_range': test_range} ret = self.plugin.create_network_segment_range(self.context, network_segment_range) updated_network_segment_range = { 'network_segment_range': {'minimum': 700, 'maximum': 800}} with mock.patch.object(self.plugin, '_are_allocated_segments_in_range_impacted', return_value=False): updated_ret = self.plugin.update_network_segment_range( self.context, ret['id'], updated_network_segment_range) res = {'network_segment_range': updated_ret} test_range['minimum'] = 700 test_range['maximum'] = 800 self._validate_resource(res, test_range, 'network_segment_range') def test_update_network_segment_range_failed_with_impacted_existing_range( self): test_range = self._vlan_range network_segment_range = {'network_segment_range': test_range} ret = self.plugin.create_network_segment_range(self.context, network_segment_range) updated_network_segment_range = { 'network_segment_range': {'minimum': 150, 'maximum': 250}} with mock.patch.object(self.plugin, '_are_allocated_segments_in_range_impacted', return_value=True): self.assertRaises( exc.NeutronException, self.plugin.update_network_segment_range, self.context, ret['id'], updated_network_segment_range) def test_delete_network_segment_range(self): test_range = self._vlan_range network_segment_range = {'network_segment_range': test_range} ret = self.plugin.create_network_segment_range(self.context, network_segment_range) with mock.patch.object(self.plugin, '_is_network_segment_range_referenced', return_value=False): try: self.plugin.delete_network_segment_range( self.context, ret['id']) except exc.NeutronException: self.fail("delete_network_segment_range raised " "NeutronException unexpectedly!") def test_delete_network_segment_range_failed_with_segment_referenced(self): test_range = self._vlan_range network_segment_range = {'network_segment_range': test_range} ret = self.plugin.create_network_segment_range(self.context, network_segment_range) with mock.patch.object(self.plugin, '_is_network_segment_range_referenced', return_value=True): self.assertRaises( exc.NeutronException, self.plugin.delete_network_segment_range, self.context, ret['id']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/ovn_l3/0000755000175000017500000000000000000000000024001 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/ovn_l3/__init__.py0000644000175000017500000000000000000000000026100 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/ovn_l3/test_plugin.py0000644000175000017500000023366500000000000026727 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import mock from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.callbacks import events from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import uuidutils from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config from neutron.services.revisions import revision_plugin from neutron.tests.unit.api import test_extensions from neutron.tests.unit.extensions import test_extraroute from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.extensions import test_l3_ext_gw_mode as test_l3_gw from neutron.tests.unit import fake_resources from neutron.tests.unit.plugins.ml2 import test_plugin as test_mech_driver # TODO(mjozefcz): Find out a way to not inherit from # Ml2PluginV2TestCase. class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase): l3_plugin = 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin' def _start_mock(self, path, return_value, new_callable=None): patcher = mock.patch(path, return_value=return_value, new_callable=new_callable) patch = patcher.start() self.addCleanup(patcher.stop) return patch def setUp(self): super(TestOVNL3RouterPlugin, self).setUp() revision_plugin.RevisionPlugin() network_attrs = {external_net.EXTERNAL: True, 'mtu': 1500} self.fake_network = \ fake_resources.FakeNetwork.create_one_network( attrs=network_attrs).info() self.fake_router_port = {'device_id': '', 'network_id': self.fake_network['id'], 'device_owner': 'network:router_interface', 'mac_address': 'aa:aa:aa:aa:aa:aa', 'status': constants.PORT_STATUS_ACTIVE, 'fixed_ips': [{'ip_address': '10.0.0.100', 'subnet_id': 'subnet-id'}], 'id': 'router-port-id'} self.fake_router_port_assert = { 'lrouter': 'neutron-router-id', 'mac': 'aa:aa:aa:aa:aa:aa', 'name': 'lrp-router-port-id', 'may_exist': True, 'networks': ['10.0.0.100/24'], 'options': {}, 'external_ids': { ovn_const.OVN_SUBNET_EXT_IDS_KEY: 'subnet-id', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(self.fake_network['id'])}} self.fake_router_ports = [self.fake_router_port] self.fake_subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24'} self.fake_router = {'id': 'router-id', 'name': 'router', 'admin_state_up': False, 'routes': [{'destination': '1.1.1.0/24', 'nexthop': '10.0.0.2'}]} self.fake_router_interface_info = { 'port_id': 'router-port-id', 'device_id': '', 'mac_address': 'aa:aa:aa:aa:aa:aa', 'subnet_id': 'subnet-id', 'subnet_ids': ['subnet-id'], 'fixed_ips': [{'ip_address': '10.0.0.100', 'subnet_id': 'subnet-id'}], 'id': 'router-port-id'} self.fake_external_fixed_ips = { 'network_id': 'ext-network-id', 'external_fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'ext-subnet-id'}]} self.fake_router_with_ext_gw = { 'id': 'router-id', 'name': 'router', 'admin_state_up': True, 'external_gateway_info': self.fake_external_fixed_ips, 'gw_port_id': 'gw-port-id' } self.fake_router_without_ext_gw = { 'id': 'router-id', 'name': 'router', 'admin_state_up': True, } self.fake_ext_subnet = {'id': 'ext-subnet-id', 'ip_version': 4, 'cidr': '192.168.1.0/24', 'gateway_ip': '192.168.1.254'} self.fake_ext_gw_port = {'device_id': '', 'device_owner': 'network:router_gateway', 'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'ext-subnet-id'}], 'mac_address': '00:00:00:02:04:06', 'network_id': self.fake_network['id'], 'id': 'gw-port-id'} self.fake_ext_gw_port_assert = { 'lrouter': 'neutron-router-id', 'mac': '00:00:00:02:04:06', 'name': 'lrp-gw-port-id', 'networks': ['192.168.1.1/24'], 'may_exist': True, 'external_ids': { ovn_const.OVN_SUBNET_EXT_IDS_KEY: 'ext-subnet-id', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(self.fake_network['id'])}, 'gateway_chassis': ['hv1'], 'options': {}} self.fake_floating_ip_attrs = {'floating_ip_address': '192.168.0.10', 'fixed_ip_address': '10.0.0.10'} self.fake_floating_ip = fake_resources.FakeFloatingIp.create_one_fip( attrs=self.fake_floating_ip_attrs) self.fake_floating_ip_new_attrs = { 'router_id': 'new-router-id', 'floating_ip_address': '192.168.0.10', 'fixed_ip_address': '10.10.10.10', 'port_id': 'new-port_id'} self.fake_floating_ip_new = ( fake_resources.FakeFloatingIp.create_one_fip( attrs=self.fake_floating_ip_new_attrs)) self.fake_ovn_nat_rule = ( fake_resources.FakeOvsdbRow.create_one_ovsdb_row({ 'logical_ip': self.fake_floating_ip['fixed_ip_address'], 'external_ip': self.fake_floating_ip['floating_ip_address'], 'type': 'dnat_and_snat', 'external_ids': { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id'])}})) self.l3_inst = directory.get_plugin(plugin_constants.L3) self.lb_id = uuidutils.generate_uuid() self.member_subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', 'network_id': self.fake_network['id']} self.member_id = uuidutils.generate_uuid() self.member_port_id = uuidutils.generate_uuid() self.member_address = '10.0.0.10' self.member_l4_port = '80' self.member_port = { 'network_id': self.fake_network['id'], 'mac_address': 'aa:aa:aa:aa:aa:aa', 'fixed_ips': [{'ip_address': self.member_address, 'subnet_id': self.member_subnet['id']}], 'id': 'fake-port-id'} self.member_lsp = fake_resources.FakeOvsdbRow.create_one_ovsdb_row( attrs={ 'addresses': ['10.0.0.10 ff:ff:ff:ff:ff:ff'], 'uuid': self.member_port['id']}) self.listener_id = uuidutils.generate_uuid() self.pool_id = uuidutils.generate_uuid() self.ovn_lb = mock.MagicMock() self.ovn_lb.protocol = ['tcp'] self.ovn_lb.uuid = uuidutils.generate_uuid() self.member_line = ( 'member_%s_%s:%s_%s' % (self.member_id, self.member_address, self.member_l4_port, self.member_subnet['id'])) self.ovn_lb.external_ids = { ovn_const.LB_EXT_IDS_VIP_KEY: '10.22.33.4', ovn_const.LB_EXT_IDS_VIP_FIP_KEY: '123.123.123.123', ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: 'foo_port', 'enabled': True, 'pool_%s' % self.pool_id: self.member_line, 'listener_%s' % self.listener_id: '80:pool_%s' % self.pool_id} self.lb_vip_lsp = fake_resources.FakeOvsdbRow.create_one_ovsdb_row( attrs={'external_ids': {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: '%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, self.ovn_lb.uuid)}, 'name': uuidutils.generate_uuid(), 'addresses': ['10.0.0.100 ff:ff:ff:ff:ff:ee'], 'uuid': uuidutils.generate_uuid()}) self.lb_network = fake_resources.FakeOvsdbRow.create_one_ovsdb_row( attrs={'load_balancer': [self.ovn_lb], 'name': 'neutron-%s' % self.fake_network['id'], 'ports': [self.lb_vip_lsp, self.member_lsp], 'uuid': self.fake_network['id']}) self.nb_idl = self._start_mock( 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin._ovn', new_callable=mock.PropertyMock, return_value=fake_resources.FakeOvsdbNbOvnIdl()) self.sb_idl = self._start_mock( 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin._sb_ovn', new_callable=mock.PropertyMock, return_value=fake_resources.FakeOvsdbSbOvnIdl()) self._start_mock( 'neutron.plugins.ml2.plugin.Ml2Plugin.get_network', return_value=self.fake_network) self.get_port = self._start_mock( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_port', return_value=self.fake_router_port) self.get_subnet = self._start_mock( 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_subnet', return_value=self.fake_subnet) self.get_router = self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.get_router', return_value=self.fake_router) self._start_mock( 'neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.update_router', return_value=self.fake_router) self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.remove_router_interface', return_value=self.fake_router_interface_info) self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.create_router', return_value=self.fake_router_with_ext_gw) self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_router', return_value={}) self.mock_candidates = self._start_mock( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient.get_candidates_for_scheduling', return_value=[]) self.mock_schedule = self._start_mock( 'neutron.scheduler.l3_ovn_scheduler.' 'OVNGatewayLeastLoadedScheduler._schedule_gateway', return_value=['hv1']) # FIXME(lucasagomes): We shouldn't be mocking the creation of # floating IPs here, that makes the FIP to not be registered in # the standardattributes table and therefore we also need to mock # bump_revision. self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.create_floatingip', return_value=self.fake_floating_ip) self._get_floatingip = self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin._get_floatingip', return_value=self.fake_floating_ip) self._start_mock( 'neutron.db.l3_db.L3_NAT_dbonly_mixin.update_floatingip_status', return_value=None) self._start_mock( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient.update_floatingip_status', return_value=None) self.bump_rev_p = self._start_mock( 'neutron.db.ovn_revision_numbers_db.bump_revision', return_value=None) self.del_rev_p = self._start_mock( 'neutron.db.ovn_revision_numbers_db.delete_revision', return_value=None) self.get_rev_p = self._start_mock( 'neutron.common.ovn.utils.get_revision_number', return_value=1) self.admin_context = mock.Mock() self._start_mock( 'neutron_lib.context.get_admin_context', return_value=self.admin_context) self.mock_is_lb_member_fip = mock.patch( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._is_lb_member_fip', return_value=False) self.mock_is_lb_member_fip.start() @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface(self, func): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} func.return_value = self.fake_router_interface_info self.l3_inst.add_router_interface(self.context, router_id, interface_info) self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_router_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=False, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.bump_rev_p.assert_called_once_with( mock.ANY, self.fake_router_port, ovn_const.TYPE_ROUTER_PORTS) @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface_update_lrouter_port(self, func): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} func.return_value = {'id': router_id, 'port_id': 'router-port-id', 'subnet_id': 'subnet-id1', 'subnet_ids': ['subnet-id1'], 'fixed_ips': [ {'ip_address': '2001:db8::1', 'subnet_id': 'subnet-id1'}, {'ip_address': '2001:dba::1', 'subnet_id': 'subnet-id2'}], 'mac_address': 'aa:aa:aa:aa:aa:aa' } self.get_port.return_value = { 'id': 'router-port-id', 'fixed_ips': [ {'ip_address': '2001:db8::1', 'subnet_id': 'subnet-id1'}, {'ip_address': '2001:dba::1', 'subnet_id': 'subnet-id2'}], 'mac_address': 'aa:aa:aa:aa:aa:aa', 'network_id': 'network-id1'} fake_rtr_intf_networks = ['2001:db8::1/24', '2001:dba::1/24'] self.l3_inst.add_router_interface(self.context, router_id, interface_info) called_args_dict = ( self.l3_inst._ovn.update_lrouter_port.call_args_list[0][1]) self.assertEqual(1, self.l3_inst._ovn.update_lrouter_port.call_count) self.assertItemsEqual(fake_rtr_intf_networks, called_args_dict.get('networks', [])) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=False, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) def test_remove_router_interface(self): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} self.get_port.side_effect = n_exc.PortNotFound( port_id='router-port-id') self.l3_inst.remove_router_interface( self.context, router_id, interface_info) self.l3_inst._ovn.lrp_del.assert_called_once_with( 'lrp-router-port-id', 'neutron-router-id', if_exists=True) self.del_rev_p.assert_called_once_with( self.context, 'router-port-id', ovn_const.TYPE_ROUTER_PORTS) def test_remove_router_interface_update_lrouter_port(self): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} self.l3_inst.remove_router_interface( self.context, router_id, interface_info) self.l3_inst._ovn.update_lrouter_port.assert_called_once_with( if_exists=False, name='lrp-router-port-id', ipv6_ra_configs={}, networks=['10.0.0.100/24'], options={}, external_ids={ ovn_const.OVN_SUBNET_EXT_IDS_KEY: 'subnet-id', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: utils.ovn_name(self.fake_network['id'])}) @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb' '.ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_update_router_admin_state_change(self, get_rps, func): router_id = 'router-id' new_router = self.fake_router.copy() updated_data = {'admin_state_up': True} new_router.update(updated_data) func.return_value = new_router self.l3_inst.update_router(self.context, router_id, {'router': updated_data}) self.l3_inst._ovn.update_lrouter.assert_called_once_with( 'neutron-router-id', enabled=True, external_ids={ ovn_const.OVN_GW_PORT_EXT_ID_KEY: '', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router'}) @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_update_router_name_change(self, get_rps, func): router_id = 'router-id' new_router = self.fake_router.copy() updated_data = {'name': 'test'} new_router.update(updated_data) func.return_value = new_router self.l3_inst.update_router(self.context, router_id, {'router': updated_data}) self.l3_inst._ovn.update_lrouter.assert_called_once_with( 'neutron-router-id', enabled=False, external_ids={ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'test', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_GW_PORT_EXT_ID_KEY: ''}) @mock.patch.object(utils, 'get_lrouter_non_gw_routes') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.update_router') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin._get_router') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb' '.ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_update_router_static_route_no_change(self, get_rps, get_r, func, mock_routes): router_id = 'router-id' get_rps.return_value = [{'device_id': '', 'device_owner': 'network:router_interface', 'mac_address': 'aa:aa:aa:aa:aa:aa', 'fixed_ips': [{'ip_address': '10.0.0.100', 'subnet_id': 'subnet-id'}], 'id': 'router-port-id'}] mock_routes.return_value = self.fake_router['routes'] update_data = {'router': {'routes': [{'destination': '1.1.1.0/24', 'nexthop': '10.0.0.2'}]}} self.l3_inst.update_router(self.context, router_id, update_data) self.assertFalse(self.l3_inst._ovn.add_static_route.called) self.assertFalse(self.l3_inst._ovn.delete_static_route.called) @mock.patch.object(utils, 'get_lrouter_non_gw_routes') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_update_router_static_route_change(self, get_rps, func, mock_routes): router_id = 'router-id' get_rps.return_value = [{'device_id': '', 'device_owner': 'network:router_interface', 'mac_address': 'aa:aa:aa:aa:aa:aa', 'fixed_ips': [{'ip_address': '10.0.0.100', 'subnet_id': 'subnet-id'}], 'id': 'router-port-id'}] mock_routes.return_value = self.fake_router['routes'] new_router = self.fake_router.copy() updated_data = {'routes': [{'destination': '2.2.2.0/24', 'nexthop': '10.0.0.3'}]} new_router.update(updated_data) func.return_value = new_router self.l3_inst.update_router(self.context, router_id, {'router': updated_data}) self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='2.2.2.0/24', nexthop='10.0.0.3') self.l3_inst._ovn.delete_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='1.1.1.0/24', nexthop='10.0.0.2') @mock.patch.object(utils, 'get_lrouter_non_gw_routes') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_update_router_static_route_clear(self, get_rps, func, mock_routes): router_id = 'router-id' get_rps.return_value = [{'device_id': '', 'device_owner': 'network:router_interface', 'mac_address': 'aa:aa:aa:aa:aa:aa', 'fixed_ips': [{'ip_address': '10.0.0.100', 'subnet_id': 'subnet-id'}], 'id': 'router-port-id'}] mock_routes.return_value = self.fake_router['routes'] new_router = self.fake_router.copy() updated_data = {'routes': []} new_router.update(updated_data) func.return_value = new_router self.l3_inst.update_router(self.context, router_id, {'router': updated_data}) self.l3_inst._ovn.add_static_route.assert_not_called() self.l3_inst._ovn.delete_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='1.1.1.0/24', nexthop='10.0.0.2') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_v4_network_of_all_router_ports') def test_create_router_with_ext_gw(self, get_rps): self.l3_inst._ovn.is_col_present.return_value = True router = {'router': {'name': 'router'}} self.get_subnet.return_value = self.fake_ext_subnet self.get_port.return_value = self.fake_ext_gw_port get_rps.return_value = self.fake_ext_subnet['cidr'] self.l3_inst.create_router(self.context, router) external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_GW_PORT_EXT_ID_KEY: 'gw-port-id'} self.l3_inst._ovn.create_lrouter.assert_called_once_with( 'neutron-router-id', external_ids=external_ids, enabled=True, options={}) self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_ext_gw_port_assert) expected_calls = [ mock.call('neutron-router-id', ip_prefix='0.0.0.0/0', nexthop='192.168.1.254', external_ids={ ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: 'ext-subnet-id'})] self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'gw-port-id', 'lrp-gw-port-id', is_gw_port=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_static_route.assert_has_calls(expected_calls) bump_rev_calls = [mock.call(mock.ANY, self.fake_ext_gw_port, ovn_const.TYPE_ROUTER_PORTS), mock.call(mock.ANY, self.fake_router_with_ext_gw, ovn_const.TYPE_ROUTERS), ] self.assertEqual(len(bump_rev_calls), self.bump_rev_p.call_count) self.bump_rev_p.assert_has_calls(bump_rev_calls, any_order=False) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') def test_delete_router_with_ext_gw(self, gprs): self.get_router.return_value = self.fake_router_with_ext_gw self.get_subnet.return_value = self.fake_ext_subnet self.l3_inst.delete_router(self.context, 'router-id') self.l3_inst._ovn.delete_lrouter.assert_called_once_with( 'neutron-router-id') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface_with_gateway_set(self, ari, grps): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} ari.return_value = self.fake_router_interface_info self.get_router.return_value = self.fake_router_with_ext_gw self.l3_inst.add_router_interface(self.context, router_id, interface_info) self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_router_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=False, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', logical_ip='10.0.0.0/24', external_ip='192.168.1.1', type='snat') self.bump_rev_p.assert_called_with( mock.ANY, self.fake_router_port, ovn_const.TYPE_ROUTER_PORTS) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface_with_gateway_set_and_snat_disabled( self, ari, grps): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} ari.return_value = self.fake_router_interface_info get_router = self.fake_router_with_ext_gw get_router['external_gateway_info']['enable_snat'] = False self.get_router.return_value = get_router self.l3_inst.add_router_interface(self.context, router_id, interface_info) self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_router_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=False, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_not_called() @mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_network') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface_vlan_network(self, ari, grps, gn): router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} ari.return_value = self.fake_router_interface_info self.get_router.return_value = self.fake_router_with_ext_gw # Set the type to be VLAN fake_network_vlan = self.fake_network fake_network_vlan[pnet.NETWORK_TYPE] = constants.TYPE_VLAN gn.return_value = fake_network_vlan self.l3_inst.add_router_interface(self.context, router_id, interface_info) # Make sure that the "reside-on-redirect-chassis" option was # set to the new router port fake_router_port_assert = self.fake_router_port_assert fake_router_port_assert['options'] = { 'reside-on-redirect-chassis': 'true'} self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **fake_router_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=False, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', logical_ip='10.0.0.0/24', external_ip='192.168.1.1', type='snat') self.bump_rev_p.assert_called_with( mock.ANY, self.fake_router_port, ovn_const.TYPE_ROUTER_PORTS) def test_remove_router_interface_with_gateway_set(self): router_id = 'router-id' interface_info = {'port_id': 'router-port-id', 'subnet_id': 'subnet-id'} self.get_router.return_value = self.fake_router_with_ext_gw self.get_port.side_effect = n_exc.PortNotFound( port_id='router-port-id') self.l3_inst.remove_router_interface( self.context, router_id, interface_info) self.l3_inst._ovn.lrp_del.assert_called_once_with( 'lrp-router-port-id', 'neutron-router-id', if_exists=True) self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', logical_ip='10.0.0.0/24', external_ip='192.168.1.1', type='snat') self.del_rev_p.assert_called_with( self.context, 'router-port-id', ovn_const.TYPE_ROUTER_PORTS) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_update_router_with_ext_gw(self, ur, grps): self.l3_inst._ovn.is_col_present.return_value = True router = {'router': {'name': 'router'}} self.get_router.return_value = self.fake_router_without_ext_gw ur.return_value = self.fake_router_with_ext_gw self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_ext_gw_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'gw-port-id', 'lrp-gw-port-id', is_gw_port=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='0.0.0.0/0', external_ids={ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: 'ext-subnet-id'}, nexthop='192.168.1.254') self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='snat', logical_ip='10.0.0.0/24', external_ip='192.168.1.1') self.bump_rev_p.assert_called_with( mock.ANY, self.fake_ext_gw_port, ovn_const.TYPE_ROUTER_PORTS) @mock.patch.object(utils, 'get_lrouter_ext_gw_static_route') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_update_router_ext_gw_change_subnet(self, ur, grps, mock_get_gw): self.l3_inst._ovn.is_col_present.return_value = True mock_get_gw.return_value = [mock.sentinel.GwRoute] router = {'router': {'name': 'router'}} fake_old_ext_subnet = {'id': 'old-ext-subnet-id', 'ip_version': 4, 'cidr': '192.168.2.0/24', 'gateway_ip': '192.168.2.254'} # Old gateway info with same network and different subnet self.get_router.return_value = copy.copy(self.fake_router_with_ext_gw) self.get_router.return_value['external_gateway_info'] = { 'network_id': 'ext-network-id', 'external_fixed_ips': [{'ip_address': '192.168.2.1', 'subnet_id': 'old-ext-subnet-id'}]} self.get_router.return_value['gw_port_id'] = 'old-gw-port-id' ur.return_value = self.fake_router_with_ext_gw self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet, 'old-ext-subnet-id': fake_old_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) # Check deleting old router gateway self.l3_inst._ovn.delete_lrouter_ext_gw.assert_called_once_with( 'neutron-router-id') # Check adding new router gateway self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_ext_gw_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'gw-port-id', 'lrp-gw-port-id', is_gw_port=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='0.0.0.0/0', nexthop='192.168.1.254', external_ids={ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: 'ext-subnet-id'}) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='snat', logical_ip='10.0.0.0/24', external_ip='192.168.1.1') self.bump_rev_p.assert_called_with( mock.ANY, self.fake_ext_gw_port, ovn_const.TYPE_ROUTER_PORTS) self.del_rev_p.assert_called_once_with( mock.ANY, 'old-gw-port-id', ovn_const.TYPE_ROUTER_PORTS) @mock.patch.object(utils, 'get_lrouter_ext_gw_static_route') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient._get_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_update_router_ext_gw_change_ip_address(self, ur, grps, mock_get_gw): self.l3_inst._ovn.is_col_present.return_value = True mock_get_gw.return_value = [mock.sentinel.GwRoute] router = {'router': {'name': 'router'}} # Old gateway info with same subnet and different ip address gr_value = copy.deepcopy(self.fake_router_with_ext_gw) gr_value['external_gateway_info'][ 'external_fixed_ips'][0]['ip_address'] = '192.168.1.2' gr_value['gw_port_id'] = 'old-gw-port-id' self.get_router.return_value = gr_value ur.return_value = self.fake_router_with_ext_gw self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) # Check deleting old router gateway self.l3_inst._ovn.delete_lrouter_ext_gw.assert_called_once_with( 'neutron-router-id') # Check adding new router gateway self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **self.fake_ext_gw_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'gw-port-id', 'lrp-gw-port-id', is_gw_port=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='0.0.0.0/0', nexthop='192.168.1.254', external_ids={ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: 'ext-subnet-id'}) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='snat', logical_ip='10.0.0.0/24', external_ip='192.168.1.1') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_v4_network_of_all_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_update_router_ext_gw_no_change(self, ur, get_rps): router = {'router': {'name': 'router'}} self.get_router.return_value = self.fake_router_with_ext_gw ur.return_value = self.fake_router_with_ext_gw self.l3_inst._ovn.get_lrouter.return_value = ( fake_resources.FakeOVNRouter.from_neutron_router( self.fake_router_with_ext_gw)) self.l3_inst.update_router(self.context, 'router-id', router) self.l3_inst._ovn.lrp_del.assert_not_called() self.l3_inst._ovn.delete_static_route.assert_not_called() self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_not_called() self.l3_inst._ovn.add_lrouter_port.assert_not_called() self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.assert_not_called() self.l3_inst._ovn.add_static_route.assert_not_called() self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_not_called() @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_v4_network_of_all_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_update_router_with_ext_gw_and_disabled_snat(self, ur, grps): self.l3_inst._ovn.is_col_present.return_value = True router = {'router': {'name': 'router'}} self.get_router.return_value = self.fake_router_without_ext_gw ur.return_value = self.fake_router_with_ext_gw ur.return_value['external_gateway_info']['enable_snat'] = False self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) # Need not check lsp and lrp here, it has been tested in other cases self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-router-id', ip_prefix='0.0.0.0/0', external_ids={ovn_const.OVN_ROUTER_IS_EXT_GW: 'true', ovn_const.OVN_SUBNET_EXT_ID_KEY: 'ext-subnet-id'}, nexthop='192.168.1.254') self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_not_called() @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client' '.OVNClient._get_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_enable_snat(self, ur, grps): router = {'router': {'name': 'router'}} gr_value = copy.deepcopy(self.fake_router_with_ext_gw) gr_value['external_gateway_info']['enable_snat'] = False self.get_router.return_value = gr_value ur.return_value = self.fake_router_with_ext_gw self.l3_inst._ovn.get_lrouter.return_value = ( fake_resources.FakeOVNRouter.from_neutron_router( self.fake_router_with_ext_gw)) self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) self.l3_inst._ovn.delete_static_route.assert_not_called() self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_not_called() self.l3_inst._ovn.add_static_route.assert_not_called() self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='snat', logical_ip='10.0.0.0/24', external_ip='192.168.1.1') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._check_external_ips_changed') @mock.patch.object(utils, 'get_lrouter_snats') @mock.patch.object(utils, 'get_lrouter_ext_gw_static_route') @mock.patch('neutron.common.ovn.utils.is_snat_enabled', mock.Mock(return_value=True)) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_router_ports') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_router') def test_disable_snat(self, ur, grps, mock_get_gw, mock_snats, mock_ext_ips): mock_get_gw.return_value = [mock.sentinel.GwRoute] mock_snats.return_value = [mock.sentinel.NAT] mock_ext_ips.return_value = False router = {'router': {'name': 'router'}} self.get_router.return_value = self.fake_router_with_ext_gw ur.return_value = copy.deepcopy(self.fake_router_with_ext_gw) ur.return_value['external_gateway_info']['enable_snat'] = False self.get_subnet.side_effect = lambda ctx, sid: { 'ext-subnet-id': self.fake_ext_subnet}.get(sid, self.fake_subnet) self.get_port.return_value = self.fake_ext_gw_port grps.return_value = self.fake_router_ports self.l3_inst.update_router(self.context, 'router-id', router) self.l3_inst._ovn.delete_static_route.assert_not_called() self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='snat', logical_ip='10.0.0.0/24', external_ip='192.168.1.1') self.l3_inst._ovn.add_static_route.assert_not_called() self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_not_called() def test_create_floatingip(self): self.l3_inst._ovn.is_col_present.return_value = True self._get_floatingip.return_value = {'floating_port_id': 'fip-port-id'} self.l3_inst.create_floatingip(self.context, 'floatingip') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) self.l3_inst._ovn.delete_lswitch_port.assert_called_once_with( 'fip-port-id', 'neutron-fip-net-id') def test_create_floatingip_distributed(self): self.l3_inst._ovn.is_col_present.return_value = True self.get_port.return_value = {'mac_address': '00:01:02:03:04:05', 'network_id': 'port-network-id'} self._get_floatingip.return_value = {'floating_port_id': 'fip-port-id'} config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') self.l3_inst.create_floatingip(self.context, 'floatingip') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id']), ovn_const.OVN_FIP_EXT_MAC_KEY: '00:01:02:03:04:05'} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10', external_mac='00:01:02:03:04:05', logical_port='port_id', external_ids=expected_ext_ids) def test_create_floatingip_distributed_logical_port_down(self): # Check that when the port is down, the external_mac field is not # populated. This falls back to centralized routing for ports that # are not bound to a chassis. self.l3_inst._ovn.is_col_present.return_value = True self.l3_inst._ovn.lsp_get_up.return_value.execute.return_value = ( False) self.get_port.return_value = {'mac_address': '00:01:02:03:04:05'} self._get_floatingip.return_value = {'floating_port_id': 'fip-port-id'} config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') self.l3_inst.create_floatingip(self.context, 'floatingip') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id']), ovn_const.OVN_FIP_EXT_MAC_KEY: '00:01:02:03:04:05'} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10', logical_port='port_id', external_ids=expected_ext_ids) def test_create_floatingip_external_ip_present_in_nat_rule(self): self.l3_inst._ovn.is_col_present.return_value = True self._get_floatingip.return_value = {'floating_port_id': 'fip-port-id'} self.l3_inst._ovn.get_lrouter_nat_rules.return_value = [ {'external_ip': '192.168.0.10', 'logical_ip': '10.0.0.6', 'type': 'dnat_and_snat', 'uuid': 'uuid1'}] self.l3_inst.create_floatingip(self.context, 'floatingip') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) self.l3_inst._ovn.delete_lswitch_port.assert_called_once_with( 'fip-port-id', 'neutron-fip-net-id') def test_create_floatingip_external_ip_present_type_snat(self): self.l3_inst._ovn.is_col_present.return_value = True self._get_floatingip.return_value = {'floating_port_id': 'fip-port-id'} self.l3_inst._ovn.get_lrouter_nat_rules.return_value = [ {'external_ip': '192.168.0.10', 'logical_ip': '10.0.0.0/24', 'type': 'snat', 'uuid': 'uuid1'}] self.l3_inst.create_floatingip(self.context, 'floatingip') self.l3_inst._ovn.set_nat_rule_in_lrouter.assert_not_called() expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) self.l3_inst._ovn.delete_lswitch_port.assert_called_once_with( 'fip-port-id', 'neutron-fip-net-id') def test_create_floatingip_lsp_external_id(self): foo_lport = fake_resources.FakeOvsdbRow.create_one_ovsdb_row() foo_lport.uuid = 'foo-port' self.l3_inst._ovn.get_lswitch_port.return_value = foo_lport self.l3_inst.create_floatingip(self.context, 'floatingip') calls = [mock.call( 'Logical_Switch_Port', 'foo-port', ('external_ids', {ovn_const.OVN_PORT_FIP_EXT_ID_KEY: '192.168.0.10'}))] self.l3_inst._ovn.db_set.assert_has_calls(calls) def test_create_floatingip_lb_member_fip(self): config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') # Stop this mock. self.mock_is_lb_member_fip.stop() self.get_port.return_value = self.member_port self.l3_inst._ovn.lookup.return_value = self.lb_network self.l3_inst._ovn.get_lswitch_port.return_value = self.member_lsp self.l3_inst.create_floatingip(self.context, 'floatingip') # Validate that there is no external_mac and logical_port while # setting the NAT entry. self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', external_ip='192.168.0.10', logical_ip='10.0.0.10', type='dnat_and_snat') def test_create_floatingip_lb_vip_fip(self): config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') self.get_subnet.return_value = self.member_subnet self.l3_inst._ovn.get_lswitch_port.return_value = self.lb_vip_lsp self.l3_inst._ovn.db_find_rows.return_value.execute.side_effect = [ [self.ovn_lb], [self.lb_network], [self.fake_ovn_nat_rule], ] self.l3_inst._ovn.lookup.return_value = self.lb_network self.l3_inst.create_floatingip(self.context, 'floatingip') self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', external_ip='192.168.0.10', external_mac='aa:aa:aa:aa:aa:aa', logical_ip='10.0.0.10', logical_port='port_id', type='dnat_and_snat') self.l3_inst._ovn.db_find_rows.assert_called_with( 'NAT', ('external_ids', '=', {ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.member_lsp.name})) # Validate that it clears external_mac/logical_port for member NAT. self.l3_inst._ovn.db_clear.assert_has_calls([ mock.call('NAT', self.fake_ovn_nat_rule.uuid, 'external_mac'), mock.call('NAT', self.fake_ovn_nat_rule.uuid, 'logical_port')]) @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip') def test_delete_floatingip(self, df): self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.l3_inst.delete_floatingip(self.context, 'floatingip-id') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip') def test_delete_floatingip_lb_vip_fip(self, df): config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') self.get_subnet.return_value = self.member_subnet self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.l3_inst._ovn.get_lswitch_port.return_value = self.lb_vip_lsp self.l3_inst._ovn.db_find_rows.return_value.execute.side_effect = [ [self.ovn_lb], [self.lb_network], [self.fake_ovn_nat_rule], ] self.l3_inst._ovn.lookup.return_value = self.lb_network self.l3_inst.delete_floatingip(self.context, 'floatingip-id') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10') self.l3_inst._ovn.db_find_rows.assert_called_with( 'NAT', ('external_ids', '=', {ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.member_lsp.name})) self.l3_inst._plugin.get_port.assert_called_once_with( mock.ANY, self.member_lsp.name) # Validate that it adds external_mac/logical_port back. self.l3_inst._ovn.db_set.assert_has_calls([ mock.call('NAT', self.fake_ovn_nat_rule.uuid, ('logical_port', self.member_lsp.name)), mock.call('NAT', self.fake_ovn_nat_rule.uuid, ('external_mac', 'aa:aa:aa:aa:aa:aa'))]) @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip') def test_delete_floatingip_lsp_external_id(self, df): self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) foo_lport = fake_resources.FakeOvsdbRow.create_one_ovsdb_row() foo_lport.uuid = 'foo-port' foo_lport.external_ids = { ovn_const.OVN_PORT_FIP_EXT_ID_KEY: 'foo-port'} self.l3_inst._ovn.get_lswitch_port.return_value = foo_lport self.l3_inst.delete_floatingip(self.context, 'floatingip-id') calls = [mock.call( 'Logical_Switch_Port', 'foo-port', 'external_ids', ovn_const.OVN_PORT_FIP_EXT_ID_KEY)] self.l3_inst._ovn.db_remove.assert_has_calls(calls) @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.delete_floatingip') def test_delete_floatingip_no_lsp_external_id(self, df): self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.l3_inst._ovn.get_lswitch_port.return_value = None self.l3_inst.delete_floatingip(self.context, 'floatingip-id') self.l3_inst._ovn.db_remove.assert_not_called() @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_floatingip') def test_update_floatingip(self, uf): self.l3_inst._ovn.is_col_present.return_value = True uf.return_value = self.fake_floating_ip_new self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.l3_inst.update_floatingip(self.context, 'id', 'floatingip') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip_new['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip_new['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip_new['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-new-router-id', type='dnat_and_snat', logical_ip='10.10.10.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_floatingip') def test_update_floatingip_associate(self, uf): self.l3_inst._ovn.is_col_present.return_value = True self.fake_floating_ip.update({'fixed_port_id': None}) uf.return_value = self.fake_floating_ip_new self.l3_inst.update_floatingip(self.context, 'id', 'floatingip') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_not_called() expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip_new['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip_new['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip_new['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-new-router-id', type='dnat_and_snat', logical_ip='10.10.10.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) @mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_network') @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_floatingip') def test_update_floatingip_associate_distributed(self, uf, gn): self.l3_inst._ovn.is_col_present.return_value = True self.fake_floating_ip.update({'fixed_port_id': None}) self.get_port.return_value = {'mac_address': '00:01:02:03:04:05', 'network_id': 'port-network-id'} uf.return_value = self.fake_floating_ip_new fake_network_vlan = self.fake_network fake_network_vlan[pnet.NETWORK_TYPE] = constants.TYPE_FLAT gn.return_value = fake_network_vlan config.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') self.l3_inst.update_floatingip(self.context, 'id', 'floatingip') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_not_called() expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip_new['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip_new['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip_new['router_id']), ovn_const.OVN_FIP_EXT_MAC_KEY: '00:01:02:03:04:05'} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-new-router-id', type='dnat_and_snat', logical_ip='10.10.10.10', external_ip='192.168.0.10', external_mac='00:01:02:03:04:05', logical_port='new-port_id', external_ids=expected_ext_ids) @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_floatingip') def test_update_floatingip_association_empty_update(self, uf): self.l3_inst._ovn.is_col_present.return_value = True self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.fake_floating_ip.update({'fixed_port_id': 'foo'}) self.fake_floating_ip_new.update({'port_id': 'foo'}) uf.return_value = self.fake_floating_ip_new self.l3_inst.update_floatingip(self.context, 'id', 'floatingip') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip_new['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip_new['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip_new['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-new-router-id', type='dnat_and_snat', logical_ip='10.10.10.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) @mock.patch('neutron.db.extraroute_db.ExtraRoute_dbonly_mixin.' 'update_floatingip') def test_update_floatingip_reassociate_to_same_port_diff_fixed_ip( self, uf): self.l3_inst._ovn.is_col_present.return_value = True self.l3_inst._ovn.get_floatingip.return_value = ( self.fake_ovn_nat_rule) self.fake_floating_ip_new.update({'port_id': 'port_id', 'fixed_port_id': 'port_id'}) uf.return_value = self.fake_floating_ip_new self.l3_inst.update_floatingip(self.context, 'id', 'floatingip') self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', type='dnat_and_snat', logical_ip='10.0.0.10', external_ip='192.168.0.10') expected_ext_ids = { ovn_const.OVN_FIP_EXT_ID_KEY: self.fake_floating_ip_new['id'], ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_FIP_PORT_EXT_ID_KEY: self.fake_floating_ip_new['port_id'], ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: utils.ovn_name( self.fake_floating_ip_new['router_id'])} self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-new-router-id', type='dnat_and_snat', logical_ip='10.10.10.10', external_ip='192.168.0.10', external_ids=expected_ext_ids) @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.get_floatingips') def test_disassociate_floatingips(self, gfs): gfs.return_value = [{'id': 'fip-id1', 'floating_ip_address': '192.168.0.10', 'router_id': 'router-id', 'port_id': 'port_id', 'floating_port_id': 'fip-port-id1', 'fixed_ip_address': '10.0.0.10'}, {'id': 'fip-id2', 'floating_ip_address': '192.167.0.10', 'router_id': 'router-id', 'port_id': 'port_id', 'floating_port_id': 'fip-port-id2', 'fixed_ip_address': '10.0.0.11'}] self.l3_inst.disassociate_floatingips(self.context, 'port_id', do_notify=False) delete_nat_calls = [mock.call('neutron-router-id', type='dnat_and_snat', logical_ip=fip['fixed_ip_address'], external_ip=fip['floating_ip_address']) for fip in gfs.return_value] self.assertEqual( len(delete_nat_calls), self.l3_inst._ovn.delete_nat_rule_in_lrouter.call_count) self.l3_inst._ovn.delete_nat_rule_in_lrouter.assert_has_calls( delete_nat_calls, any_order=True) @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient.update_router_port') def test_port_update_postcommit(self, update_rp_mock): kwargs = {'port': {'device_owner': 'foo'}, 'context': 'fake_context'} self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None, **kwargs) update_rp_mock.assert_not_called() kwargs = {'port': {'device_owner': constants.DEVICE_OWNER_ROUTER_INTF}, 'context': 'fake_context'} self.l3_inst._port_update(resources.PORT, events.AFTER_UPDATE, None, **kwargs) update_rp_mock.assert_called_once_with(kwargs['context'], kwargs['port'], if_exists=True) @mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port_status') @mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port') @mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_ports') def test_update_router_gateway_port_bindings_active( self, mock_get_port, mock_updt_port, mock_updt_status): fake_host = 'fake-host' fake_router = 'fake-router' fake_port_id = 'fake-port-id' mock_get_port.return_value = [{ 'id': fake_port_id, 'status': constants.PORT_STATUS_DOWN}] self.l3_inst.update_router_gateway_port_bindings( fake_router, fake_host) # Assert that the port is being bound expected_update = {'port': {portbindings.HOST_ID: fake_host}} mock_updt_port.assert_called_once_with( mock.ANY, fake_port_id, expected_update) # Assert that the port status is being set to ACTIVE mock_updt_status.assert_called_once_with( mock.ANY, fake_port_id, constants.PORT_STATUS_ACTIVE) @mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.update_port_status') @mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_ports') def test_update_router_gateway_port_bindings_down( self, mock_get_port, mock_updt_status): fake_port_id = 'fake-port-id' mock_get_port.return_value = [{ 'id': fake_port_id, 'status': constants.PORT_STATUS_ACTIVE}] self.l3_inst.update_router_gateway_port_bindings(None, None) # Assert that the port status is being set to DOWN mock_updt_status.assert_called_once_with( mock.ANY, fake_port_id, constants.PORT_STATUS_DOWN) @mock.patch('neutron.services.ovn_l3.plugin.OVNL3RouterPlugin.' '_get_gateway_port_physnet_mapping') def test_schedule_unhosted_gateways_no_gateways(self, get_gppm): get_gppm.return_value = {} self.nb_idl().get_unhosted_gateways.return_value = [] self.l3_inst.schedule_unhosted_gateways() self.nb_idl().update_lrouter_port.assert_not_called() @mock.patch('neutron.services.ovn_l3.plugin.OVNL3RouterPlugin.' '_get_gateway_port_physnet_mapping') def test_schedule_unhosted_gateways(self, get_gppm): unhosted_gws = ['lrp-foo-1', 'lrp-foo-2', 'lrp-foo-3'] get_gppm.return_value = {k[len(ovn_const.LRP_PREFIX):]: 'physnet1' for k in unhosted_gws} chassis_mappings = { 'chassis1': ['physnet1'], 'chassis2': ['physnet1'], 'chassis3': ['physnet1']} chassis = ['chassis1', 'chassis2', 'chassis3'] self.sb_idl().get_chassis_and_physnets.return_value = ( chassis_mappings) self.sb_idl().get_gateway_chassis_from_cms_options.return_value = ( chassis) self.nb_idl().get_unhosted_gateways.return_value = unhosted_gws # 1. port has 2 gateway chassis # 2. port has only chassis2 # 3. port is not bound existing_port_bindings = [ ['chassis1', 'chassis2'], ['chassis2'], []] self.nb_idl().get_gateway_chassis_binding.side_effect = ( existing_port_bindings) # for 1. port schedule untouched, add only 3'rd chassis # for 2. port master scheduler somewhere else # for 3. port schedule all self.mock_schedule.side_effect = [ ['chassis1', 'chassis2', 'chassis3'], ['chassis1', 'chassis2', 'chassis3'], ['chassis3', 'chassis2', 'chassis1']] self.l3_inst.schedule_unhosted_gateways() self.mock_candidates.assert_has_calls([ mock.call(mock.ANY, chassis_physnets=chassis_mappings, cms=chassis)] * 3) self.mock_schedule.assert_has_calls([ mock.call(self.nb_idl(), self.sb_idl(), 'lrp-foo-1', [], ['chassis1', 'chassis2']), mock.call(self.nb_idl(), self.sb_idl(), 'lrp-foo-2', [], ['chassis2']), mock.call(self.nb_idl(), self.sb_idl(), 'lrp-foo-3', [], [])]) # make sure that for second port master chassis stays untouched self.nb_idl().update_lrouter_port.assert_has_calls([ mock.call('lrp-foo-1', gateway_chassis=['chassis1', 'chassis2', 'chassis3']), mock.call('lrp-foo-2', gateway_chassis=['chassis2', 'chassis1', 'chassis3']), mock.call('lrp-foo-3', gateway_chassis=['chassis3', 'chassis2', 'chassis1'])]) @mock.patch('neutron.services.ovn_l3.plugin.OVNL3RouterPlugin.' '_get_gateway_port_physnet_mapping') def test_schedule_unhosted_gateways_on_event_no_gw_chassis(self, get_gppm): unhosted_gws = ['lrp-foo-1', 'lrp-foo-2', 'lrp-foo-3'] get_gppm.return_value = {k[len(ovn_const.LRP_PREFIX):]: 'physnet1' for k in unhosted_gws} self.nb_idl().get_chassis_gateways.return_value = [] self.l3_inst.schedule_unhosted_gateways(event_from_chassis='chassis4') self.nb_idl().get_unhosted_gateways.assert_not_called() @mock.patch('neutron.services.ovn_l3.plugin.OVNL3RouterPlugin.' '_get_gateway_port_physnet_mapping') def test_schedule_unhosted_gateways_on_event(self, get_gppm): unhosted_gws = ['lrp-foo-1', 'lrp-foo-2', 'lrp-foo-3'] get_gppm.return_value = {k[len(ovn_const.LRP_PREFIX):]: 'physnet1' for k in unhosted_gws} foo_gw = fake_resources.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'lrp-foo-1_chassis1', 'chassis_name': 'chassis1'}) self.nb_idl().get_chassis_gateways.return_value = [ foo_gw] self.nb_idl().get_unhosted_gateways.return_value = [] # Fake that rescheduling is executed on chassis event self.l3_inst.schedule_unhosted_gateways(event_from_chassis='chassis1') # Validate that only foo-1 port is beign rescheduled. self.nb_idl().get_unhosted_gateways.assert_called_once_with( {'foo-1': 'physnet1'}, mock.ANY, mock.ANY) @mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.get_network') @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_router_ports') @mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.add_router_interface') def test_add_router_interface_need_to_frag_enabled(self, ari, grps, gn): config.cfg.CONF.set_override( 'ovn_emit_need_to_frag', True, group='ovn') router_id = 'router-id' interface_info = {'port_id': 'router-port-id'} ari.return_value = self.fake_router_interface_info self.get_router.return_value = self.fake_router_with_ext_gw gn.return_value = self.fake_network self.fake_router_port['device_owner'] = ( constants.DEVICE_OWNER_ROUTER_GW) self.l3_inst.add_router_interface(self.context, router_id, interface_info) # Make sure that the "gateway_mtu" option was set to the router port fake_router_port_assert = self.fake_router_port_assert fake_router_port_assert['gateway_chassis'] = mock.ANY fake_router_port_assert['options'] = { ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION: str(self.fake_network['mtu'])} self.l3_inst._ovn.add_lrouter_port.assert_called_once_with( **fake_router_port_assert) self.l3_inst._ovn.set_lrouter_port_in_lswitch_port.\ assert_called_once_with( 'router-port-id', 'lrp-router-port-id', is_gw_port=True, lsp_address=ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER) self.l3_inst._ovn.add_nat_rule_in_lrouter.assert_called_once_with( 'neutron-router-id', logical_ip='10.0.0.0/24', external_ip='192.168.1.1', type='snat') self.bump_rev_p.assert_called_with( mock.ANY, self.fake_router_port, ovn_const.TYPE_ROUTER_PORTS) class OVNL3ExtrarouteTests(test_l3_gw.ExtGwModeIntTestCase, test_l3.L3NatDBIntTestCase, test_extraroute.ExtraRouteDBTestCaseBase): # TODO(lucasagomes): Ideally, this method should be moved to a base # class which all tests classes in networking-ovn inherits from but, # this base class doesn't seem to exist for now so we need to duplicate # it here def _start_mock(self, path, return_value, new_callable=None): patcher = mock.patch(path, return_value=return_value, new_callable=new_callable) patch = patcher.start() self.addCleanup(patcher.stop) return patch def setUp(self): plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin' l3_plugin = ('neutron.services.ovn_l3.plugin.OVNL3RouterPlugin') service_plugins = {'l3_plugin_name': l3_plugin} # For these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) ext_mgr = test_extraroute.ExtraRouteTestExtensionManager() super(test_l3.L3BaseForIntTests, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) revision_plugin.RevisionPlugin() l3_gw_mgr = test_l3_gw.TestExtensionManager() test_extensions.setup_extensions_middleware(l3_gw_mgr) self.l3_inst = directory.get_plugin(plugin_constants.L3) self._start_mock( 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin._ovn', new_callable=mock.PropertyMock, return_value=fake_resources.FakeOvsdbNbOvnIdl()) self._start_mock( 'neutron.services.ovn_l3.plugin.OVNL3RouterPlugin._sb_ovn', new_callable=mock.PropertyMock, return_value=fake_resources.FakeOvsdbSbOvnIdl()) self._start_mock( 'neutron.scheduler.l3_ovn_scheduler.' 'OVNGatewayScheduler._schedule_gateway', return_value='hv1') self._start_mock( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient.get_candidates_for_scheduling', return_value=[]) self._start_mock( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient._get_v4_network_of_all_router_ports', return_value=[]) self._start_mock( 'neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.ovn_client.' 'OVNClient.update_floatingip_status', return_value=None) self._start_mock( 'neutron.common.ovn.utils.get_revision_number', return_value=1) self.setup_notification_driver() # Note(dongj): According to bug #1657693, status of an unassociated # floating IP is set to DOWN. Revise expected_status to DOWN for related # test cases. def test_floatingip_update( self, expected_status=constants.FLOATINGIP_STATUS_DOWN): super(OVNL3ExtrarouteTests, self).test_floatingip_update( expected_status) def test_floatingip_update_to_same_port_id_twice( self, expected_status=constants.FLOATINGIP_STATUS_DOWN): super(OVNL3ExtrarouteTests, self).\ test_floatingip_update_to_same_port_id_twice(expected_status) def test_floatingip_update_subnet_gateway_disabled( self, expected_status=constants.FLOATINGIP_STATUS_DOWN): super(OVNL3ExtrarouteTests, self).\ test_floatingip_update_subnet_gateway_disabled(expected_status) # Test function _subnet_update of L3 OVN plugin. def test_update_subnet_gateway_for_external_net(self): super(OVNL3ExtrarouteTests, self). \ test_update_subnet_gateway_for_external_net() self.l3_inst._ovn.add_static_route.assert_called_once_with( 'neutron-fake_device', ip_prefix='0.0.0.0/0', nexthop='120.0.0.2') self.l3_inst._ovn.delete_static_route.assert_called_once_with( 'neutron-fake_device', ip_prefix='0.0.0.0/0', nexthop='120.0.0.1') def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): super(OVNL3ExtrarouteTests, self). \ test_router_update_gateway_upon_subnet_create_max_ips_ipv6() add_static_route_calls = [ mock.call(mock.ANY, ip_prefix='0.0.0.0/0', nexthop='10.0.0.1'), mock.call(mock.ANY, ip_prefix='::/0', nexthop='2001:db8::')] self.l3_inst._ovn.add_static_route.assert_has_calls( add_static_route_calls, any_order=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/placement_report/0000755000175000017500000000000000000000000026144 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/placement_report/__init__.py0000644000175000017500000000000000000000000030243 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/placement_report/test_plugin.py0000644000175000017500000002445500000000000031065 0ustar00coreycorey00000000000000# Copyright 2019 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from keystoneauth1 import exceptions as ks_exc from neutron_lib.agent import constants as agent_const from oslo_log import log as logging from neutron.services.placement_report import plugin from neutron.tests.unit.plugins.ml2.drivers import mechanism_test from neutron.tests.unit.plugins.ml2 import test_plugin LOG = logging.getLogger(__name__) class PlacementReportPluginTestCases(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'test_with_agent'] def setUp(self): super(PlacementReportPluginTestCases, self).setUp() self.service_plugin = plugin.PlacementReportPlugin() def test__get_rp_by_name_found(self): with mock.patch.object( self.service_plugin._placement_client, 'list_resource_providers', return_value={'resource_providers': ['fake_rp']}): rp = self.service_plugin._get_rp_by_name('whatever') self.assertEqual('fake_rp', rp) def test__get_rp_by_name_not_found(self): with mock.patch.object( self.service_plugin._placement_client, 'list_resource_providers', return_value={'resource_providers': []}): self.assertRaises( IndexError, self.service_plugin._get_rp_by_name, 'no_such_rp') def test_no_sync_for_rp_name_not_found(self): # looking all good agent = { 'agent_type': 'test_mechanism_driver_agent', 'configurations': { 'resource_provider_bandwidths': {'some iface': ''}, }, 'host': 'fake host', } agent_db = mock.Mock() with mock.patch.object( self.service_plugin._placement_client, 'list_resource_providers', return_value={'resource_providers': []}), \ mock.patch.object( self.service_plugin._batch_notifier, 'queue_event') as mock_queue_event: self.service_plugin._sync_placement_state(agent, agent_db) self.assertFalse(agent_db.resources_synced) agent_db.update.assert_called_with() mock_queue_event.assert_not_called() def test_no_sync_for_placement_gone(self): # looking all good agent = { 'agent_type': 'test_mechanism_driver_agent', 'configurations': { 'resource_provider_bandwidths': {'some iface': ''}}, 'host': 'fake host', } agent_db = mock.Mock() with mock.patch.object( self.service_plugin._placement_client, 'list_resource_providers', side_effect=ks_exc.HttpError), \ mock.patch.object( self.service_plugin._batch_notifier, 'queue_event') as mock_queue_event: self.service_plugin._sync_placement_state(agent, agent_db) self.assertFalse(agent_db.resources_synced) agent_db.update.assert_called_with() mock_queue_event.assert_not_called() def test_no_sync_for_unsupported_agent_type(self): payload = mock.Mock( # looking all good, but agent type not supported desired_state={ 'agent_type': 'unsupported agent type', 'configurations': {'resource_provider_bandwidths': {}}, 'host': 'fake host', }) with mock.patch.object(self.service_plugin._core_plugin, '_get_agent_by_type_and_host') as mock_get_agent, \ mock.patch.object(self.service_plugin, '_sync_placement_state') as mock_sync: self.service_plugin.handle_placement_config( mock.ANY, mock.ANY, mock.ANY, payload) mock_get_agent.assert_not_called() mock_sync.assert_not_called() def test_no_sync_without_resource_info(self): payload = mock.Mock( # looking all good, but 'configurations' has no # 'resource_provider_bandwidths' desired_state={ 'agent_type': 'test_mechanism_driver_agent', 'configurations': {}, 'host': 'fake host', }) with mock.patch.object(self.service_plugin._core_plugin, '_get_agent_by_type_and_host') as mock_get_agent, \ mock.patch.object(self.service_plugin, '_sync_placement_state') as mock_sync: self.service_plugin.handle_placement_config( mock.ANY, mock.ANY, mock.ANY, payload) mock_get_agent.assert_not_called() mock_sync.assert_not_called() def test_sync_if_agent_is_new(self): payload = mock.Mock( desired_state={ 'agent_type': 'test_mechanism_driver_agent', 'configurations': {'resource_provider_bandwidths': {}}, 'host': 'fake host', }, metadata={ 'status': agent_const.AGENT_NEW, }, ) with mock.patch.object(self.service_plugin._core_plugin, '_get_agent_by_type_and_host') as mock_get_agent, \ mock.patch.object(self.service_plugin, '_sync_placement_state') as mock_sync: self.service_plugin.handle_placement_config( mock.ANY, mock.ANY, mock.ANY, payload) self.assertEqual(1, mock_get_agent.call_count) self.assertEqual(1, mock_sync.call_count) def test_sync_if_agent_is_restarted(self): payload = mock.Mock( desired_state={ 'agent_type': 'test_mechanism_driver_agent', 'configurations': {'resource_provider_bandwidths': {}}, 'host': 'fake host', 'start_flag': True, }, ) with mock.patch.object(self.service_plugin._core_plugin, '_get_agent_by_type_and_host') as mock_get_agent, \ mock.patch.object(self.service_plugin, '_sync_placement_state') as mock_sync: self.service_plugin.handle_placement_config( mock.ANY, mock.ANY, mock.ANY, payload) self.assertEqual(1, mock_get_agent.call_count) self.assertEqual(1, mock_sync.call_count) def test_sync_after_transient_error(self): payload = mock.Mock( desired_state={ 'agent_type': 'test_mechanism_driver_agent', 'configurations': {'resource_provider_bandwidths': {}}, 'host': 'fake host', }, ) with mock.patch.object(self.service_plugin._core_plugin, '_get_agent_by_type_and_host', return_value={'resources_synced': False}) as mock_get_agent, \ mock.patch.object(self.service_plugin, '_sync_placement_state') as mock_sync: self.service_plugin.handle_placement_config( mock.ANY, mock.ANY, mock.ANY, payload) self.assertEqual(1, mock_get_agent.call_count) self.assertEqual(1, mock_sync.call_count) def test__sync_placement_state_legacy(self): agent = { 'agent_type': 'test_mechanism_driver_agent', 'configurations': { 'resource_provider_bandwidths': {}, 'resource_provider_inventory_defaults': {}, }, 'host': 'fake host', } agent_db = mock.Mock() with mock.patch.object(self.service_plugin._batch_notifier, 'queue_event') as mock_queue_event, \ mock.patch.object(self.service_plugin._placement_client, 'list_resource_providers', return_value={'resource_providers': [{'uuid': 'fake uuid'}]}): self.service_plugin._sync_placement_state(agent, agent_db) self.assertEqual(1, mock_queue_event.call_count) def test__sync_placement_state_rp_hypervisors(self): agent = { 'agent_type': 'test_mechanism_driver_agent', 'configurations': { 'resource_provider_bandwidths': {}, 'resource_provider_inventory_defaults': {}, 'resource_provider_hypervisors': {'eth0': 'hypervisor0'}, }, 'host': 'fake host', } agent_db = mock.Mock() with mock.patch.object(self.service_plugin._batch_notifier, 'queue_event') as mock_queue_event, \ mock.patch.object(self.service_plugin._placement_client, 'list_resource_providers', return_value={'resource_providers': [ {'uuid': 'fake uuid'}]}) as mock_list_rps: self.service_plugin._sync_placement_state(agent, agent_db) self.assertEqual(1, mock_queue_event.call_count) mock_list_rps.assert_called_once_with(name='hypervisor0') class PlacementReporterAgentsTestCases(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['logger', 'test_with_agent'] def test_supported_agent_types(self): self.agents = plugin.PlacementReporterAgents(ml2_plugin=self.plugin) self.assertEqual( ['test_mechanism_driver_agent'], self.agents.supported_agent_types) def test_mechanism_driver_by_agent_type_found(self): self.agents = plugin.PlacementReporterAgents(ml2_plugin=self.plugin) mech_driver = self.agents.mechanism_driver_by_agent_type( 'test_mechanism_driver_agent') self.assertIsInstance(mech_driver, mechanism_test.TestMechanismDriver) def test_mechanism_driver_by_agent_type_not_found(self): self.agents = plugin.PlacementReporterAgents(ml2_plugin=self.plugin) self.assertRaises( Exception, # noqa self.agents.mechanism_driver_by_agent_type, 'agent_not_belonging_to_any_mechanism_driver') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/portforwarding/0000755000175000017500000000000000000000000025650 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/portforwarding/__init__.py0000644000175000017500000000000000000000000027747 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/portforwarding/test_pf_plugin.py0000644000175000017500000004106100000000000031246 0ustar00coreycorey00000000000000# Copyright (C) 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import l3 as lib_l3_exc from neutron_lib.objects import exceptions as obj_exc from neutron_lib.plugins import constants as lib_plugin_conts from neutron_lib.plugins import directory from oslo_config import cfg from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resource_manager from neutron.api.rpc.handlers import resources_rpc from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron import manager from neutron.objects import base as obj_base from neutron.objects import port_forwarding from neutron.objects import router from neutron.services.portforwarding.common import exceptions as pf_exc from neutron.services.portforwarding import pf_plugin from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class TestPortForwardingPlugin(testlib_api.SqlTestCase): def setUp(self): super(TestPortForwardingPlugin, self).setUp() with mock.patch.object( resource_manager.ResourceCallbacksManager, '_singleton', new_callable=mock.PropertyMock(return_value=False)): self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() for mgr in (self.cons_mgr, self.prod_mgr): mgr.clear() mock.patch.object( cons_registry, '_get_manager', return_value=self.cons_mgr).start() mock.patch.object( prod_registry, '_get_manager', return_value=self.prod_mgr).start() self.setup_coreplugin(load_plugins=False) mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch('neutron.objects.db.api.get_object').start() # We don't use real models as per mocks above. We also need to mock-out # methods that work with real data types mock.patch( 'neutron.objects.base.NeutronDbObject.modify_fields_from_db' ).start() cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["router", "port_forwarding"]) manager.init() self.pf_plugin = directory.get_plugin(lib_plugin_conts.PORTFORWARDING) self.ctxt = context.Context('admin', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() @mock.patch.object(port_forwarding.PortForwarding, 'get_object') def test_get_floatingip_port_forwarding(self, get_object_mock): self.pf_plugin.get_floatingip_port_forwarding( self.ctxt, 'pf_id', 'test-fip-id', fields=None) get_object_mock.assert_called_once_with(self.ctxt, id='pf_id') @mock.patch.object(port_forwarding.PortForwarding, 'get_object', return_value=None) def test_negative_get_floatingip_port_forwarding(self, get_object_mock): self.assertRaises( pf_exc.PortForwardingNotFound, self.pf_plugin.get_floatingip_port_forwarding, self.ctxt, 'pf_id', 'test-fip-id', fields=None) @mock.patch.object(port_forwarding.PortForwarding, 'get_objects') def test_get_floatingip_port_forwardings(self, get_objects_mock): self.pf_plugin.get_floatingip_port_forwardings(self.ctxt) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, floatingip_id=None) @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(port_forwarding.PortForwarding, 'get_object') @mock.patch.object(port_forwarding.PortForwarding, 'get_objects') @mock.patch.object(router.FloatingIP, 'get_object') def test_delete_floatingip_port_forwarding( self, fip_get_object_mock, pf_get_objects_mock, pf_get_object_mock, push_api_mock): # After delete, not empty resource list pf_get_objects_mock.return_value = [mock.Mock(id='pf_id'), mock.Mock(id='pf_id2')] pf_obj = mock.Mock(id='pf_id', floatingip_id='fip_id') pf_get_object_mock.return_value = pf_obj self.pf_plugin.delete_floatingip_port_forwarding( self.ctxt, 'pf_id', 'fip_id') pf_get_objects_mock.assert_called_once_with( self.ctxt, floatingip_id='fip_id') pf_obj.delete.assert_called() push_api_mock.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.DELETED) # After delete, empty resource list pf_get_objects_mock.reset_mock() pf_get_object_mock.reset_mock() push_api_mock.reset_mock() pf_obj = mock.Mock(id='need_to_delete_pf_id', floatingip_id='fip_id') fip_obj = mock.Mock(id='fip_id') fip_get_object_mock.return_value = fip_obj pf_get_object_mock.return_value = pf_obj pf_get_objects_mock.return_value = [ mock.Mock(id='need_to_delete_pf_id')] self.pf_plugin.delete_floatingip_port_forwarding( self.ctxt, 'need_to_delete_pf_id', 'fip_id') pf_get_objects_mock.assert_called_once_with( self.ctxt, floatingip_id='fip_id') pf_get_object_mock.assert_called_once_with( self.ctxt, id='need_to_delete_pf_id') fip_obj.update_fields.assert_called_once_with({'router_id': None}) fip_obj.update.assert_called() push_api_mock.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.DELETED) @mock.patch.object(port_forwarding.PortForwarding, 'get_object') def test_negative_delete_floatingip_port_forwarding( self, pf_get_object_mock): pf_get_object_mock.return_value = None self.assertRaises( pf_exc.PortForwardingNotFound, self.pf_plugin.delete_floatingip_port_forwarding, self.ctxt, 'pf_id', floatingip_id='fip_id') @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(port_forwarding.PortForwarding, 'get_object') def test_update_floatingip_port_forwarding( self, mock_pf_get_object, mock_rpc_push): pf_input = { 'port_forwarding': {'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'floatingip_id': 'fip_id'}}, 'floatingip_id': 'fip_id'} pf_obj = mock.Mock() mock_pf_get_object.return_value = pf_obj self.pf_plugin.update_floatingip_port_forwarding( self.ctxt, 'pf_id', **pf_input) mock_pf_get_object.assert_called_once_with(self.ctxt, id='pf_id') self.assertTrue(pf_obj.update_fields) self.assertTrue(pf_obj.update) mock_rpc_push.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.UPDATED) @mock.patch.object(port_forwarding.PortForwarding, 'get_object') def test_negative_update_floatingip_port_forwarding( self, mock_pf_get_object): pf_input = { 'port_forwarding': {'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'floatingip_id': 'fip_id'}}, 'floatingip_id': 'fip_id'} mock_pf_get_object.return_value = None self.assertRaises( pf_exc.PortForwardingNotFound, self.pf_plugin.update_floatingip_port_forwarding, self.ctxt, 'pf_id', **pf_input) @mock.patch.object(pf_plugin.PortForwardingPlugin, '_check_port_has_binding_floating_ip') @mock.patch.object(obj_base.NeutronDbObject, 'update_objects') @mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push') @mock.patch.object(pf_plugin.PortForwardingPlugin, '_check_router_match') @mock.patch.object(pf_plugin.PortForwardingPlugin, '_find_a_router_for_fip_port_forwarding', return_value='target_router_id') @mock.patch.object(router.FloatingIP, 'get_object') @mock.patch('neutron.objects.port_forwarding.PortForwarding') def test_create_floatingip_port_forwarding( self, mock_port_forwarding, mock_fip_get_object, mock_find_router, mock_check_router_match, mock_push_api, mock_update_objects, mock_check_bind_fip): # Update fip pf_input = { 'port_forwarding': {'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'floatingip_id': 'fip_id'}}, 'floatingip_id': 'fip_id'} pf_obj = mock.Mock() fip_obj = mock.Mock() mock_port_forwarding.return_value = pf_obj mock_fip_get_object.return_value = fip_obj fip_obj.router_id = '' fip_obj.fixed_port_id = '' self.pf_plugin.create_floatingip_port_forwarding( self.ctxt, **pf_input) mock_port_forwarding.assert_called_once_with( self.ctxt, **pf_input['port_forwarding']['port_forwarding']) self.assertTrue(mock_update_objects.called) self.assertTrue(pf_obj.create.called) mock_push_api.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.CREATED) # Not update fip pf_obj.reset_mock() fip_obj.reset_mock() mock_port_forwarding.reset_mock() mock_update_objects.reset_mock() mock_push_api.reset_mock() mock_port_forwarding.return_value = pf_obj fip_obj.router_id = 'router_id' fip_obj.fixed_port_id = '' self.pf_plugin.create_floatingip_port_forwarding( self.ctxt, **pf_input) mock_port_forwarding.assert_called_once_with( self.ctxt, **pf_input['port_forwarding']['port_forwarding']) self.assertTrue(pf_obj.create.called) self.assertFalse(mock_update_objects.called) mock_push_api.assert_called_once_with( self.ctxt, mock.ANY, rpc_events.CREATED) @mock.patch.object(pf_plugin.PortForwardingPlugin, '_check_port_has_binding_floating_ip') @mock.patch.object(pf_plugin.PortForwardingPlugin, '_find_existing_port_forwarding') @mock.patch.object(pf_plugin.PortForwardingPlugin, '_check_router_match') @mock.patch.object(pf_plugin.PortForwardingPlugin, '_find_a_router_for_fip_port_forwarding', return_value='target_router_id') @mock.patch.object(router.FloatingIP, 'get_object') @mock.patch('neutron.objects.port_forwarding.PortForwarding') def test_negative_create_floatingip_port_forwarding( self, mock_port_forwarding, mock_fip_get_object, mock_find_router, mock_check_router_match, mock_try_find_exist, mock_check_bind_fip): pf_input = { 'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'floatingip_id': 'fip_id'}} pf_obj = mock.Mock() fip_obj = mock.Mock() mock_port_forwarding.return_value = pf_obj mock_fip_get_object.return_value = fip_obj fip_obj.fixed_port_id = '' pf_obj.create.side_effect = obj_exc.NeutronDbObjectDuplicateEntry( mock.Mock(), mock.Mock()) mock_try_find_exist.return_value = ('pf_obj', 'conflict_param') self.assertRaises( lib_exc.BadRequest, self.pf_plugin.create_floatingip_port_forwarding, self.ctxt, 'fip_id', pf_input) @mock.patch.object(pf_plugin.PortForwardingPlugin, '_get_internal_ip_subnet') @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, 'get_router_for_floatingip') @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port') @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_subnet') def test_negative_find_a_router_for_fip_port_forwarding( self, mock_get_subnet, mock_get_port, mock_get_router, mock_get_ip_subnet): fip_obj = mock.Mock() pf_dict = {'internal_port_id': 'internal_neutron_port', 'internal_ip_address': '10.0.0.1'} port_dict = {'id': 'ID', 'fixed_ips': [{"subnet_id": "test-subnet-id", "ip_address": "10.0.0.1"}]} mock_get_port.return_value = port_dict mock_get_ip_subnet.return_value = None self.assertRaises( lib_exc.BadRequest, self.pf_plugin._find_a_router_for_fip_port_forwarding, self.ctxt, pf_dict, fip_obj) self.assertTrue(not mock_get_subnet.called) mock_get_ip_subnet.return_value = 'internal_subnet_id' mock_get_router.side_effect = ( lib_l3_exc.ExternalGatewayForFloatingIPNotFound( external_network_id=mock.Mock(), subnet_id=mock.Mock(), port_id=mock.Mock())) self.assertRaises( lib_exc.BadRequest, self.pf_plugin._find_a_router_for_fip_port_forwarding, self.ctxt, pf_dict, fip_obj) self.assertTrue(mock_get_subnet.called) ipv6_port_dict = {'id': 'ID', 'fixed_ips': [{"subnet_id": "test-subnet-id", "ip_address": "1::1"}]} mock_get_port.return_value = ipv6_port_dict self.assertRaises( lib_exc.BadRequest, self.pf_plugin._find_a_router_for_fip_port_forwarding, self.ctxt, pf_dict, fip_obj) @mock.patch.object(port_forwarding.PortForwarding, 'get_objects') def test_negative_check_router_match(self, mock_pf_get_objects): pf_dict = { 'internal_port_id': 'internal_neutron_port', 'internal_ip_address': 'internal_fixed_ip', 'internal_port': 'internal protocol port num'} fip_obj = mock.Mock() mock_pf_get_objects.return_value = ['Exist port forwardings'] router_id = 'selected router id' self.assertRaises(lib_exc.BadRequest, self.pf_plugin._check_router_match, self.ctxt, fip_obj, router_id, pf_dict) mock_pf_get_objects.return_value = [] self.assertRaises(lib_exc.BadRequest, self.pf_plugin._check_router_match, self.ctxt, fip_obj, router_id, pf_dict) @mock.patch.object(router.FloatingIP, 'get_objects') def test_create_floatingip_port_forwarding_port_in_use( self, mock_fip_get_objects): pf_input = { 'port_forwarding': {'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'internal_port_id': 'internal_neutron_port', 'floatingip_id': 'fip_id_1'}}, 'floatingip_id': 'fip_id_1'} fip_obj = mock.Mock(floating_ip_address="10.10.10.10") mock_fip_get_objects.return_value = [fip_obj] self.assertRaises(pf_exc.PortHasBindingFloatingIP, self.pf_plugin.create_floatingip_port_forwarding, self.ctxt, **pf_input) @mock.patch.object(router.FloatingIP, 'get_objects') def test_update_floatingip_port_forwarding_port_in_use( self, mock_fip_get_objects): pf_input = { 'port_forwarding': {'port_forwarding': { 'internal_ip_address': '1.1.1.1', 'internal_port_id': 'internal_neutron_port', 'floatingip_id': 'fip_id_2'}}} fip_obj = mock.Mock(floating_ip_address="10.10.10.11") mock_fip_get_objects.return_value = [fip_obj] self.assertRaises(pf_exc.PortHasBindingFloatingIP, self.pf_plugin.update_floatingip_port_forwarding, self.ctxt, 'fake-pf-id', 'fip_id_2', **pf_input) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/0000755000175000017500000000000000000000000023403 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/__init__.py0000644000175000017500000000000000000000000025502 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/base.py0000644000175000017500000000305300000000000024670 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.api.rpc.callbacks.consumer import registry as cons_registry from neutron.api.rpc.callbacks.producer import registry as prod_registry from neutron.api.rpc.callbacks import resource_manager from neutron.tests.unit import testlib_api class BaseQosTestCase(testlib_api.SqlTestCase): def setUp(self): super(BaseQosTestCase, self).setUp() with mock.patch.object( resource_manager.ResourceCallbacksManager, '_singleton', new_callable=mock.PropertyMock(return_value=False)): self.cons_mgr = resource_manager.ConsumerResourceCallbacksManager() self.prod_mgr = resource_manager.ProducerResourceCallbacksManager() for mgr in (self.cons_mgr, self.prod_mgr): mgr.clear() mock.patch.object( cons_registry, '_get_manager', return_value=self.cons_mgr).start() mock.patch.object( prod_registry, '_get_manager', return_value=self.prod_mgr).start() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586982291.483046 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/0000755000175000017500000000000000000000000025061 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/__init__.py0000644000175000017500000000000000000000000027160 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/openvswitch/0000755000175000017500000000000000000000000027432 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000031531 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py0000644000175000017500000000334300000000000032341 0ustar00coreycorey00000000000000# Copyright 2020 Ericsson Software Technology # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.services.qos import constants as qos_consts from neutron.objects import network as network_object from neutron.services.qos.drivers.openvswitch import driver from neutron.tests.unit.services.qos import base class TestOVSDriver(base.BaseQosTestCase): def setUp(self): super(TestOVSDriver, self).setUp() self.driver = driver.OVSDriver.create() def test_validate_min_bw_rule_vs_physnet_non_physnet(self): scenarios = [ ({'physical_network': 'fake physnet'}, self.assertTrue), ({}, self.assertFalse), ] for segment_kwargs, test_method in scenarios: segment = network_object.NetworkSegment(**segment_kwargs) net = network_object.Network(mock.Mock(), segments=[segment]) rule = mock.Mock() rule.rule_type = qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH port = mock.Mock() with mock.patch( 'neutron.objects.network.Network.get_object', return_value=net): test_method(self.driver.validate_rule_for_port( mock.Mock(), rule, port)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/ovn/0000755000175000017500000000000000000000000025663 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/ovn/__init__.py0000644000175000017500000000000000000000000027762 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/ovn/test_driver.py0000644000175000017500000002604600000000000030577 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.objects.qos import policy as qos_policy from neutron.objects.qos import rule as qos_rule from neutron.tests import base from neutron_lib import constants from oslo_utils import uuidutils from neutron.common.ovn import utils from neutron.services.qos.drivers.ovn import driver context = 'context' class TestOVNQosNotificationDriver(base.BaseTestCase): def setUp(self): super(TestOVNQosNotificationDriver, self).setUp() self.mech_driver = mock.Mock() self.mech_driver._ovn_client = mock.Mock() self.mech_driver._ovn_client._qos_driver = mock.Mock() self.driver = driver.OVNQosNotificationDriver.create( self.mech_driver) self.policy = "policy" def test_create_policy(self): self.driver.create_policy(context, self.policy) self.driver._driver._ovn_client._qos_driver.create_policy.\ assert_not_called() def test_update_policy(self): self.driver.update_policy(context, self.policy) self.driver._driver._ovn_client._qos_driver.update_policy.\ assert_called_once_with(context, self.policy) def test_delete_policy(self): self.driver.delete_policy(context, self.policy) self.driver._driver._ovn_client._qos_driver.delete_policy.\ assert_not_called() class TestOVNQosDriver(base.BaseTestCase): def setUp(self): super(TestOVNQosDriver, self).setUp() self.plugin = mock.Mock() self.ovn_client = mock.Mock() self.driver = driver.OVNQosDriver(self.ovn_client) self.driver._plugin_property = self.plugin self.port_id = uuidutils.generate_uuid() self.policy_id = uuidutils.generate_uuid() self.network_id = uuidutils.generate_uuid() self.network_policy_id = uuidutils.generate_uuid() self.policy = self._create_fake_policy() self.port = self._create_fake_port() self.bw_rule = self._create_bw_limit_rule() self.bw_expected = {'qos_max_rate': 1000, 'qos_burst': 100000, 'direction': constants.EGRESS_DIRECTION} self.dscp_rule = self._create_dscp_rule() self.dscp_expected = {'dscp_mark': 16, 'direction': constants.EGRESS_DIRECTION} def _create_bw_limit_rule(self): rule_obj = qos_rule.QosBandwidthLimitRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.max_kbps = 1000 rule_obj.max_burst_kbps = 100000 rule_obj.obj_reset_changes() return rule_obj def _create_dscp_rule(self): rule_obj = qos_rule.QosDscpMarkingRule() rule_obj.id = uuidutils.generate_uuid() rule_obj.dscp_mark = 16 rule_obj.obj_reset_changes() return rule_obj def _create_fake_policy(self): policy_dict = {'id': self.network_policy_id} policy_obj = qos_policy.QosPolicy(context, **policy_dict) policy_obj.obj_reset_changes() return policy_obj def _create_fake_port(self): return {'id': self.port_id, 'qos_policy_id': self.policy_id, 'network_id': self.network_id, 'device_owner': 'compute:fake'} def _create_fake_network(self): return {'id': self.network_id, 'qos_policy_id': self.network_policy_id} def test__is_network_device_port(self): self.assertFalse(utils.is_network_device_port(self.port)) port = self._create_fake_port() port['device_owner'] = constants.DEVICE_OWNER_DHCP self.assertTrue(utils.is_network_device_port(port)) port['device_owner'] = 'neutron:LOADBALANCERV2' self.assertTrue(utils.is_network_device_port(port)) def _generate_port_options(self, policy_id, return_val, expected_result): with mock.patch.object(qos_rule, 'get_rules', return_value=return_val) as get_rules: options = self.driver._generate_port_options(context, policy_id) if policy_id: get_rules.assert_called_once_with(qos_policy.QosPolicy, context, policy_id) else: get_rules.assert_not_called() self.assertEqual(expected_result, options) def test__generate_port_options_no_policy_id(self): self._generate_port_options(None, [], {}) def test__generate_port_options_no_rules(self): self._generate_port_options(self.policy_id, [], {}) def test__generate_port_options_with_bw_rule(self): self._generate_port_options(self.policy_id, [self.bw_rule], self.bw_expected) def test__generate_port_options_with_dscp_rule(self): self._generate_port_options(self.policy_id, [self.dscp_rule], self.dscp_expected) def _get_qos_options(self, port, port_policy, network_policy): with mock.patch.object(qos_policy.QosPolicy, 'get_network_policy', return_value=self.policy) as get_network_policy: with mock.patch.object(self.driver, '_generate_port_options', return_value={}) as generate_port_options: options = self.driver.get_qos_options(port) if network_policy: get_network_policy.\ assert_called_once_with(context, self.network_id) generate_port_options. \ assert_called_once_with(context, self.network_policy_id) elif port_policy: get_network_policy.assert_not_called() generate_port_options.\ assert_called_once_with(context, self.policy_id) else: get_network_policy.assert_not_called() generate_port_options.assert_not_called() self.assertEqual({}, options) def test_get_qos_options_no_qos(self): port = self._create_fake_port() port.pop('qos_policy_id') self._get_qos_options(port, False, False) def test_get_qos_options_network_port(self): port = self._create_fake_port() port['device_owner'] = constants.DEVICE_OWNER_DHCP self._get_qos_options(port, False, False) @mock.patch('neutron_lib.context.get_admin_context', return_value=context) def test_get_qos_options_port_policy(self, *mocks): self._get_qos_options(self.port, True, False) @mock.patch('neutron_lib.context.get_admin_context', return_value=context) def test_get_qos_options_network_policy(self, *mocks): port = self._create_fake_port() port['qos_policy_id'] = None self._get_qos_options(port, False, True) def _update_network_ports(self, port, called): with mock.patch.object(self.plugin, 'get_ports', return_value=[port]) as get_ports: with mock.patch.object(self.ovn_client, 'update_port') as update_port: self.driver._update_network_ports( context, self.network_id, {}) get_ports.assert_called_once_with( context, filters={'network_id': [self.network_id]}) if called: update_port.assert_called() else: update_port.assert_not_called() def test__update_network_ports_port_policy(self): self._update_network_ports(self.port, False) def test__update_network_ports_network_device(self): port = self._create_fake_port() port['device_owner'] = constants.DEVICE_OWNER_DHCP self._update_network_ports(port, False) def test__update_network_ports(self): port = self._create_fake_port() port['qos_policy_id'] = None self._update_network_ports(port, True) def _update_network(self, network, called): with mock.patch.object(self.driver, '_generate_port_options', return_value={}) as generate_port_options: with mock.patch.object(self.driver, '_update_network_ports' ) as update_network_ports: self.driver.update_network(network) if called: generate_port_options.assert_called_once_with( context, self.network_policy_id) update_network_ports.assert_called_once_with( context, self.network_id, {}) else: generate_port_options.assert_not_called() update_network_ports.assert_not_called() @mock.patch('neutron_lib.context.get_admin_context', return_value=context) def test_update_network_no_qos(self, *mocks): network = self._create_fake_network() network.pop('qos_policy_id') self._update_network(network, False) @mock.patch('neutron_lib.context.get_admin_context', return_value=context) def test_update_network_policy_change(self, *mocks): network = self._create_fake_network() self._update_network(network, True) def test_update_policy(self): with mock.patch.object(self.driver, '_generate_port_options', return_value={}) as generate_port_options, \ mock.patch.object(self.policy, 'get_bound_networks', return_value=[self.network_id] ) as get_bound_networks, \ mock.patch.object(self.driver, '_update_network_ports' ) as update_network_ports, \ mock.patch.object(self.policy, 'get_bound_ports', return_value=[self.port_id] ) as get_bound_ports, \ mock.patch.object(self.plugin, 'get_port', return_value=self.port) as get_port, \ mock.patch.object(self.ovn_client, 'update_port', ) as update_port: self.driver.update_policy(context, self.policy) generate_port_options.assert_called_once_with( context, self.network_policy_id) get_bound_networks.assert_called_once_with() update_network_ports.assert_called_once_with( context, self.network_id, {}) get_bound_ports.assert_called_once_with() get_port.assert_called_once_with(context, self.port_id) update_port.assert_called_once_with(self.port, qos_options={}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/drivers/test_manager.py0000644000175000017500000003102400000000000030104 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants as lib_consts from neutron_lib import context from neutron_lib import exceptions from neutron_lib.services.qos import base as qos_driver_base from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.objects import ports as ports_object from neutron.objects.qos import rule as rule_object from neutron.services.qos.drivers import manager as driver_mgr from neutron.tests.unit.services.qos import base class TestQosDriversManagerBase(base.BaseQosTestCase): def setUp(self): super(TestQosDriversManagerBase, self).setUp() self.config_parse() self.setup_coreplugin(load_plugins=False) @staticmethod def _create_manager_with_drivers(drivers_details): for name, driver_details in drivers_details.items(): class QoSDriver(qos_driver_base.DriverBase): @property def is_loaded(self): return driver_details['is_loaded'] # the new ad-hoc driver will register on the QOS_PLUGIN registry QoSDriver(name, driver_details.get('vif_types', []), driver_details.get('vnic_types', []), driver_details.get('rules', [])) return driver_mgr.QosServiceDriverManager() class TestQosDriversManagerMulti(TestQosDriversManagerBase): """Test calls happen to all drivers""" def test_driver_manager_empty_with_no_drivers(self): driver_manager = self._create_manager_with_drivers({}) self.assertEqual(len(driver_manager._drivers), 0) def test_driver_manager_empty_with_no_loaded_drivers(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': False}}) self.assertEqual(len(driver_manager._drivers), 0) def test_driver_manager_with_one_loaded_driver(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}}) self.assertEqual(len(driver_manager._drivers), 1) def test_driver_manager_with_two_loaded_drivers(self): driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}, 'driver-B': {'is_loaded': True}}) self.assertEqual(len(driver_manager._drivers), 2) class TestQoSDriversRulesValidations(TestQosDriversManagerBase): """Test validation of rules for port""" def setUp(self): super(TestQoSDriversRulesValidations, self).setUp() self.ctxt = context.Context('fake_user', 'fake_tenant') def _get_port(self, vif_type, vnic_type): port_id = uuidutils.generate_uuid() port_binding = ports_object.PortBinding( self.ctxt, port_id=port_id, vif_type=vif_type, vnic_type=vnic_type) return ports_object.Port( self.ctxt, id=uuidutils.generate_uuid(), bindings=[port_binding]) def _test_validate_rule_for_port(self, port, expected_result): driver_manager = self._create_manager_with_drivers({ 'driver-A': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { "min_kbps": {'type:values': None}, 'direction': { 'type:values': lib_consts.VALID_DIRECTIONS} } }, 'vif_types': [portbindings.VIF_TYPE_OVS], 'vnic_types': [portbindings.VNIC_NORMAL] } }) rule = rule_object.QosMinimumBandwidthRule( self.ctxt, id=uuidutils.generate_uuid()) is_rule_supported_mock = mock.Mock() if expected_result: is_rule_supported_mock.return_value = expected_result driver_manager._drivers[0].is_rule_supported = is_rule_supported_mock self.assertEqual(expected_result, driver_manager.validate_rule_for_port( mock.Mock(), rule, port)) if expected_result: is_rule_supported_mock.assert_called_once_with(rule) else: is_rule_supported_mock.assert_not_called() def test_validate_rule_for_port_rule_vif_type_supported(self): port = self._get_port( portbindings.VIF_TYPE_OVS, portbindings.VNIC_NORMAL) self._test_validate_rule_for_port( port, expected_result=True) def test_validate_rule_for_port_vif_type_not_supported(self): port = self._get_port( portbindings.VIF_TYPE_OTHER, portbindings.VNIC_NORMAL) self._test_validate_rule_for_port( port, expected_result=False) def test_validate_rule_for_port_unbound_vnic_type_supported(self): port = self._get_port( portbindings.VIF_TYPE_UNBOUND, portbindings.VNIC_NORMAL) self._test_validate_rule_for_port( port, expected_result=True) def test_validate_rule_for_port_unbound_vnic_type_not_supported(self): port = self._get_port( portbindings.VIF_TYPE_UNBOUND, portbindings.VNIC_BAREMETAL) self._test_validate_rule_for_port( port, expected_result=False) class TestQosDriversManagerRules(TestQosDriversManagerBase): """Test supported rules""" def test_available_rules_one_in_common(self): driver_manager = self._create_manager_with_drivers({ 'driver-A': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { "max_kbps": {'type:values': None}, "max_burst_kbps": {'type:values': None} }, qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { "min_kbps": {'type:values': None}, 'direction': { 'type:values': lib_consts.VALID_DIRECTIONS} } } }, 'driver-B': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { "min_kbps": {'type:values': None}, 'direction': { 'type:values': lib_consts.VALID_DIRECTIONS} }, qos_consts.RULE_TYPE_DSCP_MARKING: { "dscp_mark": { 'type:values': lib_consts.VALID_DSCP_MARKS} } } } }) self.assertEqual(driver_manager.supported_rule_types, set([qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH])) def test_available_rules_no_rule_in_common(self): driver_manager = self._create_manager_with_drivers({ 'driver-A': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { "max_kbps": {'type:values': None}, "max_burst_kbps": {'type:values': None} } } }, 'driver-B': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { "min_kbps": {'type:values': None}, 'direction': { 'type:values': lib_consts.VALID_DIRECTIONS} }, qos_consts.RULE_TYPE_DSCP_MARKING: { "dscp_mark": { 'type:values': lib_consts.VALID_DSCP_MARKS} } } } }) self.assertEqual(driver_manager.supported_rule_types, set([])) def test__parse_parameter_values(self): range_parameter = {'type:range': [0, 10]} values_parameter = {'type:values': [1, 10, 100, 1000]} expected_parsed_range_parameter = {'start': 0, 'end': 10} expected_parsed_values_parameter = [1, 10, 100, 1000] parameter_values, parameter_type = ( driver_mgr.QosServiceDriverManager._parse_parameter_values( range_parameter)) self.assertEqual( expected_parsed_range_parameter, parameter_values) self.assertEqual( lib_consts.VALUES_TYPE_RANGE, parameter_type) parameter_values, parameter_type = ( driver_mgr.QosServiceDriverManager._parse_parameter_values( values_parameter)) self.assertEqual( expected_parsed_values_parameter, parameter_values) self.assertEqual( lib_consts.VALUES_TYPE_CHOICES, parameter_type) def test_supported_rule_type_details(self): driver_manager = self._create_manager_with_drivers({ 'driver-A': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { "max_kbps": {'type:range': [0, 1000]}, "max_burst_kbps": {'type:range': [0, 1000]} } } }, 'driver-B': { 'is_loaded': True, 'rules': { qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: { "min_kbps": {'type:range': [0, 1000]}, 'direction': { 'type:values': lib_consts.VALID_DIRECTIONS} }, qos_consts.RULE_TYPE_DSCP_MARKING: { "dscp_mark": { 'type:values': lib_consts.VALID_DSCP_MARKS} } } } }) expected_rule_type_details = [{ 'name': 'driver-A', 'supported_parameters': [{ 'parameter_name': 'max_kbps', 'parameter_type': lib_consts.VALUES_TYPE_RANGE, 'parameter_values': {'start': 0, 'end': 1000} }, { 'parameter_name': 'max_burst_kbps', 'parameter_type': lib_consts.VALUES_TYPE_RANGE, 'parameter_values': {'start': 0, 'end': 1000} }] }] bandwidth_limit_details = driver_manager.supported_rule_type_details( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT) self.assertEqual( len(expected_rule_type_details), len(bandwidth_limit_details)) self.assertEqual( expected_rule_type_details[0]['name'], bandwidth_limit_details[0]['name']) self.assertEqual( len(expected_rule_type_details[0]['supported_parameters']), len(bandwidth_limit_details[0]['supported_parameters']) ) for parameter in expected_rule_type_details[0]['supported_parameters']: self.assertIn( parameter, bandwidth_limit_details[0]['supported_parameters']) def test_supported_rule_type_details_no_drivers_loaded(self): driver_manager = self._create_manager_with_drivers({}) self.assertEqual( [], driver_manager.supported_rule_type_details( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)) class TestQosDriversCalls(TestQosDriversManagerBase): """Test QoS driver calls""" def setUp(self): super(TestQosDriversCalls, self).setUp() self.driver_manager = self._create_manager_with_drivers( {'driver-A': {'is_loaded': True}}) def test_implemented_call_methods(self): for method in qos_consts.QOS_CALL_METHODS: with mock.patch.object(qos_driver_base.DriverBase, method) as \ method_fnc: context = mock.Mock() policy = mock.Mock() self.driver_manager.call(method, context, policy) method_fnc.assert_called_once_with(context, policy) def test_not_implemented_call_methods(self): self.assertRaises(exceptions.DriverCallError, self.driver_manager.call, 'wrong_method', mock.Mock(), mock.Mock()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/qos/test_qos_plugin.py0000644000175000017500000016423600000000000027210 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from neutron_lib.api.definitions import qos from neutron_lib.callbacks import events from neutron_lib import constants as lib_constants from neutron_lib import context from neutron_lib import exceptions as lib_exc from neutron_lib.exceptions import qos as qos_exc from neutron_lib.objects import utils as obj_utils from neutron_lib.placement import constants as pl_constants from neutron_lib.plugins import constants as plugins_constants from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from neutron_lib.utils import net as net_utils from oslo_config import cfg from oslo_utils import uuidutils import webob.exc from neutron.extensions import qos_rules_alias from neutron import manager from neutron.objects import network as network_object from neutron.objects import ports as ports_object from neutron.objects.qos import policy as policy_object from neutron.objects.qos import rule as rule_object from neutron.services.qos import qos_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.services.qos import base DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' SERVICE_PLUGIN_KLASS = 'neutron.services.qos.qos_plugin.QoSPlugin' class TestQosPlugin(base.BaseQosTestCase): def setUp(self): super(TestQosPlugin, self).setUp() self.setup_coreplugin(load_plugins=False) mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch('neutron.objects.db.api.get_object').start() _mock_qos_load_attr = mock.patch( 'neutron.objects.qos.policy.QosPolicy.obj_load_attr') self.mock_qos_load_attr = _mock_qos_load_attr.start() # We don't use real models as per mocks above. We also need to mock-out # methods that work with real data types mock.patch( 'neutron.objects.base.NeutronDbObject.modify_fields_from_db' ).start() mock.patch.object(policy_object.QosPolicy, 'unset_default').start() mock.patch.object(policy_object.QosPolicy, 'set_default').start() cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["qos"]) manager.init() self.qos_plugin = directory.get_plugin(plugins_constants.QOS) self.qos_plugin.driver_manager = mock.Mock() self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc' '.ResourcesPushRpcApi.push').start() self.ctxt = context.Context('fake_user', 'fake_tenant') self.admin_ctxt = context.get_admin_context() self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True, 'is_default': False}} self.rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 100, 'max_burst_kbps': 150}, 'dscp_marking_rule': {'id': uuidutils.generate_uuid(), 'dscp_mark': 16}, 'minimum_bandwidth_rule': { 'id': uuidutils.generate_uuid(), 'min_kbps': 10}} self.policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) self.dscp_rule = rule_object.QosDscpMarkingRule( self.ctxt, **self.rule_data['dscp_marking_rule']) self.min_rule = rule_object.QosMinimumBandwidthRule( self.ctxt, **self.rule_data['minimum_bandwidth_rule']) def _validate_driver_params(self, method_name, ctxt): call_args = self.qos_plugin.driver_manager.call.call_args[0] self.assertTrue(self.qos_plugin.driver_manager.call.called) self.assertEqual(call_args[0], method_name) self.assertEqual(call_args[1], ctxt) self.assertIsInstance(call_args[2], policy_object.QosPolicy) def _create_and_extend_port(self, bw_rules, physical_network='public', has_qos_policy=True, has_net_qos_policy=False): network_id = uuidutils.generate_uuid() self.port_data = { 'port': {'id': uuidutils.generate_uuid(), 'network_id': network_id} } if has_qos_policy: self.port_data['port']['qos_policy_id'] = self.policy.id self.policy.rules = bw_rules elif has_net_qos_policy: self.port_data['port']['qos_network_policy_id'] = self.policy.id self.policy.rules = bw_rules self.port = ports_object.Port( self.ctxt, **self.port_data['port']) port_res = {"binding:vnic_type": "normal"} segment_mock = mock.MagicMock(network_id=network_id, physical_network=physical_network) with mock.patch('neutron.objects.network.NetworkSegment.get_objects', return_value=[segment_mock]), \ mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): return qos_plugin.QoSPlugin._extend_port_resource_request( port_res, self.port) def test__extend_port_resource_request_min_bw_rule(self): self.min_rule.direction = lib_constants.EGRESS_DIRECTION port = self._create_and_extend_port([self.min_rule]) self.assertEqual( ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], port['resource_request']['required'] ) self.assertEqual( {pl_constants.CLASS_NET_BW_EGRESS_KBPS: 10}, port['resource_request']['resources'], ) def test__extend_port_resource_request_mixed_rules(self): self.min_rule.direction = lib_constants.EGRESS_DIRECTION min_rule_ingress_data = { 'id': uuidutils.generate_uuid(), 'min_kbps': 20, 'direction': lib_constants.INGRESS_DIRECTION} min_rule_ingress = rule_object.QosMinimumBandwidthRule( self.ctxt, **min_rule_ingress_data) port = self._create_and_extend_port([self.min_rule, min_rule_ingress]) self.assertEqual( ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], port['resource_request']['required'] ) self.assertEqual( { pl_constants.CLASS_NET_BW_EGRESS_KBPS: 10, pl_constants.CLASS_NET_BW_INGRESS_KBPS: 20 }, port['resource_request']['resources'], ) def test__extend_port_resource_request_non_min_bw_rule(self): port = self._create_and_extend_port([self.rule]) self.assertIsNone(port.get('resource_request')) def test__extend_port_resource_request_non_provider_net(self): self.min_rule.direction = lib_constants.EGRESS_DIRECTION port = self._create_and_extend_port([self.min_rule], physical_network=None) self.assertIsNone(port.get('resource_request')) def test__extend_port_resource_request_no_qos_policy(self): port = self._create_and_extend_port([], physical_network='public', has_qos_policy=False) self.assertIsNone(port.get('resource_request')) def test__extend_port_resource_request_inherited_policy(self): self.min_rule.direction = lib_constants.EGRESS_DIRECTION self.policy.rules = [self.min_rule] self.min_rule.qos_policy_id = self.policy.id port = self._create_and_extend_port([self.min_rule], has_net_qos_policy=True) self.assertEqual( ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], port['resource_request']['required'] ) self.assertEqual( {pl_constants.CLASS_NET_BW_EGRESS_KBPS: 10}, port['resource_request']['resources'], ) def test_get_ports_with_policy(self): network_ports = [ mock.MagicMock(qos_policy_id=None), mock.MagicMock(qos_policy_id=uuidutils.generate_uuid()), mock.MagicMock(qos_policy_id=None) ] ports = [ mock.MagicMock(qos_policy_id=self.policy.id), ] expected_network_ports = [ port for port in network_ports if port.qos_policy_id is None] expected_ports = ports + expected_network_ports with mock.patch( 'neutron.objects.ports.Port.get_objects', side_effect=[network_ports, ports] ), mock.patch.object( self.policy, "get_bound_networks" ), mock.patch.object( self.policy, "get_bound_ports" ): policy_ports = self.qos_plugin._get_ports_with_policy( self.ctxt, self.policy) self.assertEqual( len(expected_ports), len(policy_ports)) for port in expected_ports: self.assertIn(port, policy_ports) def _test_validate_update_port_callback(self, policy_id=None, original_policy_id=None): port_id = uuidutils.generate_uuid() kwargs = { "port": { "id": port_id, qos_consts.QOS_POLICY_ID: policy_id }, "original_port": { "id": port_id, qos_consts.QOS_POLICY_ID: original_policy_id } } port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id) policy_mock = mock.MagicMock(id=policy_id) admin_ctxt = mock.Mock() with mock.patch( 'neutron.objects.ports.Port.get_object', return_value=port_mock ) as get_port, mock.patch( 'neutron.objects.qos.policy.QosPolicy.get_object', return_value=policy_mock ) as get_policy, mock.patch.object( self.qos_plugin, "validate_policy_for_port" ) as validate_policy_for_port, mock.patch.object( self.ctxt, "elevated", return_value=admin_ctxt ): self.qos_plugin._validate_update_port_callback( "PORT", "precommit_update", "test_plugin", payload=events.DBEventPayload( self.ctxt, desired_state=kwargs['port'], states=(kwargs['original_port'],))) if policy_id is None or policy_id == original_policy_id: get_port.assert_not_called() get_policy.assert_not_called() validate_policy_for_port.assert_not_called() else: get_port.assert_called_once_with(self.ctxt, id=port_id) get_policy.assert_called_once_with(admin_ctxt, id=policy_id) validate_policy_for_port.assert_called_once_with( self.ctxt, policy_mock, port_mock) def test_validate_update_port_callback_policy_changed(self): self._test_validate_update_port_callback( policy_id=uuidutils.generate_uuid()) def test_validate_update_port_callback_policy_not_changed(self): policy_id = uuidutils.generate_uuid() self._test_validate_update_port_callback( policy_id=policy_id, original_policy_id=policy_id) def test_validate_update_port_callback_policy_removed(self): self._test_validate_update_port_callback( policy_id=None, original_policy_id=uuidutils.generate_uuid()) def _test_validate_update_network_callback(self, policy_id=None, original_policy_id=None): network_id = uuidutils.generate_uuid() kwargs = { "context": self.ctxt, "network": { "id": network_id, qos_consts.QOS_POLICY_ID: policy_id }, "original_network": { "id": network_id, qos_consts.QOS_POLICY_ID: original_policy_id } } port_mock_with_own_policy = mock.MagicMock( id=uuidutils.generate_uuid(), qos_policy_id=uuidutils.generate_uuid()) port_mock_without_own_policy = mock.MagicMock( id=uuidutils.generate_uuid(), qos_policy_id=None) ports = [port_mock_with_own_policy, port_mock_without_own_policy] policy_mock = mock.MagicMock(id=policy_id) admin_ctxt = mock.Mock() with mock.patch( 'neutron.objects.ports.Port.get_objects', return_value=ports ) as get_ports, mock.patch( 'neutron.objects.qos.policy.QosPolicy.get_object', return_value=policy_mock ) as get_policy, mock.patch.object( self.qos_plugin, "validate_policy_for_ports" ) as validate_policy_for_ports, mock.patch.object( self.ctxt, "elevated", return_value=admin_ctxt ): self.qos_plugin._validate_update_network_callback( "NETWORK", "precommit_update", "test_plugin", payload=events.DBEventPayload( self.ctxt, desired_state=kwargs['network'], states=(kwargs['original_network'],))) if policy_id is None or policy_id == original_policy_id: get_policy.assert_not_called() get_ports.assert_not_called() validate_policy_for_ports.assert_not_called() else: get_policy.assert_called_once_with(admin_ctxt, id=policy_id) get_ports.assert_called_once_with(self.ctxt, network_id=network_id) validate_policy_for_ports.assert_called_once_with( self.ctxt, policy_mock, [port_mock_without_own_policy]) def test_validate_update_network_callback_policy_changed(self): self._test_validate_update_network_callback( policy_id=uuidutils.generate_uuid()) def test_validate_update_network_callback_policy_not_changed(self): policy_id = uuidutils.generate_uuid() self._test_validate_update_network_callback( policy_id=policy_id, original_policy_id=policy_id) def test_validate_update_network_callback_policy_removed(self): self._test_validate_update_network_callback( policy_id=None, original_policy_id=uuidutils.generate_uuid()) def test_validate_policy_for_port_rule_not_valid(self): port = {'id': uuidutils.generate_uuid()} with mock.patch.object( self.qos_plugin.driver_manager, "validate_rule_for_port", return_value=False ): self.policy.rules = [self.rule] self.assertRaises( qos_exc.QosRuleNotSupported, self.qos_plugin.validate_policy_for_port, self.ctxt, self.policy, port) def test_validate_policy_for_port_all_rules_valid(self): port = {'id': uuidutils.generate_uuid()} with mock.patch.object( self.qos_plugin.driver_manager, "validate_rule_for_port", return_value=True ): self.policy.rules = [self.rule] try: self.qos_plugin.validate_policy_for_port( self.ctxt, self.policy, port) except qos_exc.QosRuleNotSupported: self.fail("QosRuleNotSupported exception unexpectedly raised") def test_create_min_bw_rule_on_bound_port(self): policy = self._get_policy() policy.rules = [self.min_rule] segment = network_object.NetworkSegment( physical_network='fake physnet') net = network_object.Network( self.ctxt, segments=[segment]) port = ports_object.Port( self.ctxt, id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid(), device_owner='compute:fake-zone') with mock.patch( 'neutron.objects.qos.policy.QosPolicy.get_object', return_value=policy), \ mock.patch( 'neutron.objects.network.Network.get_object', return_value=net), \ mock.patch.object( self.qos_plugin, '_get_ports_with_policy', return_value=[port]): self.assertRaises( NotImplementedError, self.qos_plugin.create_policy_minimum_bandwidth_rule, self.ctxt, policy.id, self.rule_data) def test_create_min_bw_rule_on_unbound_port(self): policy = self._get_policy() policy.rules = [self.min_rule] segment = network_object.NetworkSegment( physical_network='fake physnet') net = network_object.Network( self.ctxt, segments=[segment]) port = ports_object.Port( self.ctxt, id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid(), device_owner='') with mock.patch( 'neutron.objects.qos.policy.QosPolicy.get_object', return_value=policy), \ mock.patch( 'neutron.objects.network.Network.get_object', return_value=net), \ mock.patch.object( self.qos_plugin, '_get_ports_with_policy', return_value=[port]): try: self.qos_plugin.create_policy_minimum_bandwidth_rule( self.ctxt, policy.id, self.rule_data) except NotImplementedError: self.fail() @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') @mock.patch('neutron.objects.qos.policy.QosPolicy') def test_add_policy(self, mock_qos_policy, mock_create_rbac_policy): mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_policy, 'QosPolicy') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() self.qos_plugin.create_policy(self.ctxt, self.policy_data) policy_mock_call = mock.call.QosPolicy().create() create_precommit_mock_call = mock.call.driver.call( 'create_policy_precommit', self.ctxt, mock.ANY) create_mock_call = mock.call.driver.call( 'create_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(policy_mock_call) < mock_manager.mock_calls.index(create_precommit_mock_call) < mock_manager.mock_calls.index(create_mock_call)) def test_add_policy_with_extra_tenant_keyword(self, *mocks): policy_id = uuidutils.generate_uuid() project_id = uuidutils.generate_uuid() tenant_policy = { 'policy': {'id': policy_id, 'project_id': project_id, 'tenant_id': project_id, 'name': 'test-policy', 'description': 'Test policy description', 'shared': True, 'is_default': False}} policy_details = {'id': policy_id, 'project_id': project_id, 'name': 'test-policy', 'description': 'Test policy description', 'shared': True, 'is_default': False} with mock.patch('neutron.objects.qos.policy.QosPolicy') as QosMocked: self.qos_plugin.create_policy(self.ctxt, tenant_policy) QosMocked.assert_called_once_with(self.ctxt, **policy_details) @mock.patch.object(policy_object.QosPolicy, "get_object") @mock.patch( 'neutron.objects.rbac_db.RbacNeutronDbObjectMixin' '.create_rbac_policy') @mock.patch.object(policy_object.QosPolicy, 'update') def test_update_policy(self, mock_qos_policy_update, mock_create_rbac_policy, mock_qos_policy_get): mock_qos_policy_get.return_value = self.policy mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_policy_update, 'update') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() fields = obj_utils.get_updatable_fields( policy_object.QosPolicy, self.policy_data['policy']) self.qos_plugin.update_policy( self.ctxt, self.policy.id, {'policy': fields}) self._validate_driver_params('update_policy', self.ctxt) policy_update_mock_call = mock.call.update() update_precommit_mock_call = mock.call.driver.call( 'update_policy_precommit', self.ctxt, mock.ANY) update_mock_call = mock.call.driver.call( 'update_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(policy_update_mock_call) < mock_manager.mock_calls.index(update_precommit_mock_call) < mock_manager.mock_calls.index(update_mock_call)) @mock.patch('neutron.objects.db.api.get_object', return_value=None) @mock.patch.object(policy_object.QosPolicy, 'delete') def test_delete_policy(self, mock_qos_policy_delete, mock_api_get_policy): mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_policy_delete, 'delete') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() self.qos_plugin.delete_policy(self.ctxt, self.policy.id) self._validate_driver_params('delete_policy', self.ctxt) policy_delete_mock_call = mock.call.delete() delete_precommit_mock_call = mock.call.driver.call( 'delete_policy_precommit', self.ctxt, mock.ANY) delete_mock_call = mock.call.driver.call( 'delete_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(policy_delete_mock_call) < mock_manager.mock_calls.index(delete_precommit_mock_call) < mock_manager.mock_calls.index(delete_mock_call)) @mock.patch.object(policy_object.QosPolicy, "get_object") @mock.patch.object(rule_object.QosBandwidthLimitRule, 'create') def test_create_policy_rule(self, mock_qos_rule_create, mock_qos_policy_get): _policy = copy.copy(self.policy) setattr(_policy, "rules", []) mock_qos_policy_get.return_value = _policy mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_rule_create, 'create') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() with mock.patch( 'neutron.objects.qos.qos_policy_validator' '.check_bandwidth_rule_conflict', return_value=None): self.qos_plugin.create_policy_bandwidth_limit_rule( self.ctxt, self.policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) rule_create_mock_call = mock.call.create() update_precommit_mock_call = mock.call.driver.call( 'update_policy_precommit', self.ctxt, mock.ANY) update_mock_call = mock.call.driver.call( 'update_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(rule_create_mock_call) < mock_manager.mock_calls.index(update_precommit_mock_call) < mock_manager.mock_calls.index(update_mock_call)) def test_create_policy_rule_check_rule_min_less_than_max(self): _policy = self._get_policy() setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy) as mock_qos_get_obj: self.qos_plugin.create_policy_minimum_bandwidth_rule( self.ctxt, _policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) self.mock_qos_load_attr.assert_called_once_with('rules') mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id) def test_create_policy_rule_check_rule_max_more_than_min(self): _policy = self._get_policy() setattr(_policy, "rules", [self.min_rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy) as mock_qos_get_obj: self.qos_plugin.create_policy_bandwidth_limit_rule( self.ctxt, _policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) self.mock_qos_load_attr.assert_called_once_with('rules') mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id) def test_create_policy_rule_check_rule_bwlimit_less_than_minbw(self): _policy = self._get_policy() self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1 setattr(_policy, "rules", [self.min_rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy) as mock_qos_get_obj: self.assertRaises(qos_exc.QoSRuleParameterConflict, self.qos_plugin.create_policy_bandwidth_limit_rule, self.ctxt, self.policy.id, self.rule_data) mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id) def test_create_policy_rule_check_rule_minbw_gr_than_bwlimit(self): _policy = self._get_policy() self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000000 setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy) as mock_qos_get_obj: self.assertRaises(qos_exc.QoSRuleParameterConflict, self.qos_plugin.create_policy_minimum_bandwidth_rule, self.ctxt, self.policy.id, self.rule_data) mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id) def test_create_policy_rule_duplicates(self): _policy = self._get_policy() setattr(_policy, "rules", [self.rule]) new_rule_data = { 'bandwidth_limit_rule': { 'max_kbps': 5000, 'direction': self.rule.direction } } with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy) as mock_qos_get_obj: self.assertRaises( qos_exc.QoSRulesConflict, self.qos_plugin.create_policy_bandwidth_limit_rule, self.ctxt, _policy.id, new_rule_data) mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id) @mock.patch.object(rule_object.QosBandwidthLimitRule, 'update') def test_update_policy_rule(self, mock_qos_rule_update): mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_rule_update, 'update') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.rule.get_rules', return_value=[self.rule]), mock.patch( 'neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1 self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) rule_update_mock_call = mock.call.update() update_precommit_mock_call = mock.call.driver.call( 'update_policy_precommit', self.ctxt, mock.ANY) update_mock_call = mock.call.driver.call( 'update_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(rule_update_mock_call) < mock_manager.mock_calls.index(update_precommit_mock_call) < mock_manager.mock_calls.index(update_mock_call)) def test_update_policy_rule_check_rule_min_less_than_max(self): _policy = self._get_policy() setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) self.mock_qos_load_attr.assert_called_once_with('rules') self._validate_driver_params('update_policy', self.ctxt) rules = [self.rule, self.min_rule] setattr(_policy, "rules", rules) self.mock_qos_load_attr.reset_mock() with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.qos_plugin.update_policy_minimum_bandwidth_rule( self.ctxt, self.min_rule.id, self.policy.id, self.rule_data) self.mock_qos_load_attr.assert_called_once_with('rules') self._validate_driver_params('update_policy', self.ctxt) def test_update_policy_rule_check_rule_bwlimit_less_than_minbw(self): _policy = self._get_policy() setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) self.mock_qos_load_attr.assert_called_once_with('rules') self._validate_driver_params('update_policy', self.ctxt) self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000 with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.assertRaises( qos_exc.QoSRuleParameterConflict, self.qos_plugin.update_policy_minimum_bandwidth_rule, self.ctxt, self.min_rule.id, self.policy.id, self.rule_data) def test_update_policy_rule_check_rule_minbw_gr_than_bwlimit(self): _policy = self._get_policy() setattr(_policy, "rules", [self.min_rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.qos_plugin.update_policy_minimum_bandwidth_rule( self.ctxt, self.min_rule.id, self.policy.id, self.rule_data) self.mock_qos_load_attr.assert_called_once_with('rules') self._validate_driver_params('update_policy', self.ctxt) self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1 with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): self.assertRaises( qos_exc.QoSRuleParameterConflict, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id, self.rule_data) def _get_policy(self): return policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) def test_update_policy_rule_bad_policy(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", []) self.assertRaises( qos_exc.QosRuleNotFound, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id, self.rule_data) @mock.patch.object(rule_object.QosBandwidthLimitRule, 'delete') def test_delete_policy_rule(self, mock_qos_rule_delete): mock_manager = mock.Mock() mock_manager.attach_mock(mock_qos_rule_delete, 'delete') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') mock_manager.reset_mock() _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.rule]) self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, _policy.id) self._validate_driver_params('update_policy', self.ctxt) rule_delete_mock_call = mock.call.delete() update_precommit_mock_call = mock.call.driver.call( 'update_policy_precommit', self.ctxt, mock.ANY) update_mock_call = mock.call.driver.call( 'update_policy', self.ctxt, mock.ANY) self.assertTrue( mock_manager.mock_calls.index(rule_delete_mock_call) < mock_manager.mock_calls.index(update_precommit_mock_call) < mock_manager.mock_calls.index(update_mock_call)) def test_delete_policy_rule_bad_policy(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", []) self.assertRaises( qos_exc.QosRuleNotFound, self.qos_plugin.delete_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, _policy.id) def test_get_policy_bandwidth_limit_rule(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosBandwidthLimitRule.' 'get_object') as get_object_mock: self.qos_plugin.get_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) get_object_mock.assert_called_once_with(self.ctxt, id=self.rule.id) def test_get_policy_bandwidth_limit_rules_for_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosBandwidthLimitRule.' 'get_objects') as get_objects_mock: self.qos_plugin.get_policy_bandwidth_limit_rules( self.ctxt, self.policy.id) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id) def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosBandwidthLimitRule.' 'get_objects') as get_objects_mock: filters = {'filter': 'filter_id'} self.qos_plugin.get_policy_bandwidth_limit_rules( self.ctxt, self.policy.id, filters=filters) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id, filter='filter_id') def test_get_policy_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy, self.ctxt, self.policy.id) def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id) def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_bandwidth_limit_rules, self.ctxt, self.policy.id) def test_create_policy_dscp_marking_rule(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.dscp_rule]) self.qos_plugin.create_policy_dscp_marking_rule( self.ctxt, self.policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) def test_update_policy_dscp_marking_rule(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.dscp_rule]) self.qos_plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data) self._validate_driver_params('update_policy', self.ctxt) def test_delete_policy_dscp_marking_rule(self): _policy = policy_object.QosPolicy( self.ctxt, **self.policy_data['policy']) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy): setattr(_policy, "rules", [self.dscp_rule]) self.qos_plugin.delete_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id) self._validate_driver_params('update_policy', self.ctxt) def test_get_policy_dscp_marking_rules(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosDscpMarkingRule.' 'get_objects') as get_objects_mock: self.qos_plugin.get_policy_dscp_marking_rules( self.ctxt, self.policy.id) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id) def test_get_policy_dscp_marking_rules_for_policy_with_filters(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosDscpMarkingRule.' 'get_objects') as get_objects_mock: filters = {'filter': 'filter_id'} self.qos_plugin.get_policy_dscp_marking_rules( self.ctxt, self.policy.id, filters=filters) get_objects_mock.assert_called_once_with( self.ctxt, qos_policy_id=self.policy.id, _pager=mock.ANY, filter='filter_id') def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_dscp_marking_rule, self.ctxt, self.dscp_rule.id, self.policy.id) def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_dscp_marking_rules, self.ctxt, self.policy.id) def test_get_policy_minimum_bandwidth_rule(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosMinimumBandwidthRule.' 'get_object') as get_object_mock: self.qos_plugin.get_policy_minimum_bandwidth_rule( self.ctxt, self.rule.id, self.policy.id) get_object_mock.assert_called_once_with(self.ctxt, id=self.rule.id) def test_get_policy_minimum_bandwidth_rules_for_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosMinimumBandwidthRule.' 'get_objects') as get_objects_mock: self.qos_plugin.get_policy_minimum_bandwidth_rules( self.ctxt, self.policy.id) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id) def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=self.policy): with mock.patch('neutron.objects.qos.rule.' 'QosMinimumBandwidthRule.' 'get_objects') as get_objects_mock: filters = {'filter': 'filter_id'} self.qos_plugin.get_policy_minimum_bandwidth_rules( self.ctxt, self.policy.id, filters=filters) get_objects_mock.assert_called_once_with( self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id, filter='filter_id') def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_minimum_bandwidth_rule, self.ctxt, self.rule.id, self.policy.id) def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.get_policy_minimum_bandwidth_rules, self.ctxt, self.policy.id) def test_create_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.create_policy_bandwidth_limit_rule, self.ctxt, self.policy.id, self.rule_data) def test_update_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id, self.rule_data) def test_delete_policy_rule_for_nonexistent_policy(self): with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=None): self.assertRaises( qos_exc.QosPolicyNotFound, self.qos_plugin.delete_policy_bandwidth_limit_rule, self.ctxt, self.rule.id, self.policy.id) def test_verify_bad_method_call(self): self.assertRaises(AttributeError, getattr, self.qos_plugin, 'create_policy_bandwidth_limit_rules') def test_get_rule_type(self): admin_ctxt = context.get_admin_context() drivers_details = [{ 'name': 'fake-driver', 'supported_parameters': [{ 'parameter_name': 'max_kbps', 'parameter_type': lib_constants.VALUES_TYPE_RANGE, 'parameter_range': {'start': 0, 'end': 100} }] }] with mock.patch.object( qos_plugin.QoSPlugin, "supported_rule_type_details", return_value=drivers_details ): rule_type_details = self.qos_plugin.get_rule_type( admin_ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT) self.assertEqual( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, rule_type_details['type']) self.assertEqual( drivers_details, rule_type_details['drivers']) def test_get_rule_type_as_user(self): self.assertRaises( lib_exc.NotAuthorized, self.qos_plugin.get_rule_type, self.ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT) def test_get_rule_types(self): rule_types_mock = mock.PropertyMock( return_value=qos_consts.VALID_RULE_TYPES) filters = {'type': 'type_id'} with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types', new_callable=rule_types_mock): types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters) self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES), sorted(type_['type'] for type_ in types)) @mock.patch('neutron.objects.ports.Port') @mock.patch('neutron.objects.qos.policy.QosPolicy') def test_rule_notification_and_driver_ordering(self, qos_policy_mock, port_mock): rule_cls_mock = mock.Mock() rule_cls_mock.rule_type = 'fake' rule_actions = {'create': [self.ctxt, rule_cls_mock, self.policy.id, {'fake_rule': {}}], 'update': [self.ctxt, rule_cls_mock, self.rule.id, self.policy.id, {'fake_rule': {}}], 'delete': [self.ctxt, rule_cls_mock, self.rule.id, self.policy.id]} mock_manager = mock.Mock() mock_manager.attach_mock(qos_policy_mock, 'QosPolicy') mock_manager.attach_mock(port_mock, 'Port') mock_manager.attach_mock(rule_cls_mock, 'RuleCls') mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver') for action, arguments in rule_actions.items(): mock_manager.reset_mock() method = getattr(self.qos_plugin, "%s_policy_rule" % action) method(*arguments) # some actions get rule from policy get_rule_mock_call = getattr( mock.call.QosPolicy.get_policy_obj().get_rule_by_id(), action)() # some actions construct rule from class reference rule_mock_call = getattr(mock.call.RuleCls(), action)() driver_mock_call = mock.call.driver.call('update_policy', self.ctxt, mock.ANY) if rule_mock_call in mock_manager.mock_calls: action_index = mock_manager.mock_calls.index(rule_mock_call) else: action_index = mock_manager.mock_calls.index( get_rule_mock_call) self.assertLess( action_index, mock_manager.mock_calls.index(driver_mock_call)) class QoSRuleAliasTestExtensionManager(object): def get_resources(self): return qos_rules_alias.Qos_rules_alias.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): # Remove MissingAuthPlugin exception from logs self.patch_notifier = mock.patch( 'neutron.notifiers.batch_notifier.BatchNotifier._notify') self.patch_notifier.start() plugin = 'ml2' service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS} ext_mgr = QoSRuleAliasTestExtensionManager() super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.qos_plugin = directory.get_plugin(plugins_constants.QOS) self.ctxt = context.Context('fake_user', 'fake_tenant') self.rule_objects = { 'bandwidth_limit': rule_object.QosBandwidthLimitRule, 'dscp_marking': rule_object.QosDscpMarkingRule, 'minimum_bandwidth': rule_object.QosMinimumBandwidthRule } self.qos_policy_id = uuidutils.generate_uuid() self.rule_data = { 'bandwidth_limit_rule': {'max_kbps': 100, 'max_burst_kbps': 150}, 'dscp_marking_rule': {'dscp_mark': 16}, 'minimum_bandwidth_rule': {'min_kbps': 10} } def _update_rule(self, rule_type, rule_id, **kwargs): data = {'alias_%s_rule' % rule_type: kwargs} resource = '%s/alias-%s-rules' % (qos.ALIAS, rule_type.replace('_', '-')) request = self.new_update_request(resource, data, rule_id, self.fmt) res = request.get_response(self.ext_api) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) def _show_rule(self, rule_type, rule_id): resource = '%s/alias-%s-rules' % (qos.ALIAS, rule_type.replace('_', '-')) request = self.new_show_request(resource, rule_id, self.fmt) res = request.get_response(self.ext_api) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) def _delete_rule(self, rule_type, rule_id): resource = '%s/alias-%s-rules' % (qos.ALIAS, rule_type.replace('_', '-')) request = self.new_delete_request(resource, rule_id, self.fmt) res = request.get_response(self.ext_api) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) @mock.patch.object(qos_plugin.QoSPlugin, "update_policy_rule") def test_update_rule(self, update_policy_rule_mock): calls = [] for rule_type, rule_object_class in self.rule_objects.items(): rule_id = uuidutils.generate_uuid() rule_data_name = '%s_rule' % rule_type data = self.rule_data[rule_data_name] rule = rule_object_class(self.ctxt, id=rule_id, qos_policy_id=self.qos_policy_id, **data) with mock.patch( 'neutron.objects.qos.rule.QosRule.get_object', return_value=rule ), mock.patch.object(self.qos_plugin, 'get_policy_rule', return_value=rule.to_dict()): self._update_rule(rule_type, rule_id, **data) calls.append(mock.call(mock.ANY, rule_object_class, rule_id, self.qos_policy_id, {rule_data_name: data})) update_policy_rule_mock.assert_has_calls(calls, any_order=True) @mock.patch.object(qos_plugin.QoSPlugin, "get_policy_rule") def test_show_rule(self, get_policy_rule_mock): calls = [] for rule_type, rule_object_class in self.rule_objects.items(): rule_id = uuidutils.generate_uuid() rule_data_name = '%s_rule' % rule_type data = self.rule_data[rule_data_name] rule = rule_object_class(self.ctxt, id=rule_id, qos_policy_id=self.qos_policy_id, **data) with mock.patch('neutron.objects.qos.rule.QosRule.get_object', return_value=rule): self._show_rule(rule_type, rule_id) calls.append(mock.call(mock.ANY, rule_object_class, rule_id, self.qos_policy_id)) get_policy_rule_mock.assert_has_calls(calls, any_order=True) @mock.patch.object(qos_plugin.QoSPlugin, "delete_policy_rule") def test_delete_rule(self, delete_policy_rule_mock): calls = [] for rule_type, rule_object_class in self.rule_objects.items(): rule_id = uuidutils.generate_uuid() rule_data_name = '%s_rule' % rule_type data = self.rule_data[rule_data_name] rule = rule_object_class(self.ctxt, id=rule_id, qos_policy_id=self.qos_policy_id, **data) with mock.patch( 'neutron.objects.qos.rule.QosRule.get_object', return_value=rule ), mock.patch.object(self.qos_plugin, 'get_policy_rule', return_value=rule.to_dict()): self._delete_rule(rule_type, rule_id) calls.append(mock.call(mock.ANY, rule_object_class, rule_id, self.qos_policy_id)) delete_policy_rule_mock.assert_has_calls(calls, any_order=True) def test_show_non_existing_rule(self): for rule_type, rule_object_class in self.rule_objects.items(): rule_id = uuidutils.generate_uuid() with mock.patch('neutron.objects.qos.rule.QosRule.get_object', return_value=None): resource = '%s/alias-%s-rules' % (qos.ALIAS, rule_type.replace('_', '-')) request = self.new_show_request(resource, rule_id, self.fmt) res = request.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) class TestQosPluginDB(base.BaseQosTestCase): def setUp(self): super(TestQosPluginDB, self).setUp() self.setup_coreplugin(load_plugins=False) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["qos"]) manager.init() self.qos_plugin = directory.get_plugin(plugins_constants.QOS) self.qos_plugin.driver_manager = mock.Mock() self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc' '.ResourcesPushRpcApi.push').start() self.context = context.get_admin_context() self.project_id = uuidutils.generate_uuid() def _make_qos_policy(self): qos_policy = policy_object.QosPolicy( self.context, project_id=self.project_id, shared=False, is_default=False) qos_policy.create() return qos_policy def _make_port(self, network_id, qos_policy_id=None): base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff'] mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac))) port = ports_object.Port( self.context, network_id=network_id, device_owner='3', project_id=self.project_id, admin_state_up=True, status='DOWN', device_id='2', qos_policy_id=qos_policy_id, mac_address=mac) port.create() return port def _make_network(self, qos_policy_id=None): network = network_object.Network(self.context, qos_policy_id=qos_policy_id) network.create() return network def _test_validate_create_port_callback(self, port_qos=False, network_qos=False): net_qos_obj = self._make_qos_policy() port_qos_obj = self._make_qos_policy() net_qos_id = net_qos_obj.id if network_qos else None port_qos_id = port_qos_obj.id if port_qos else None network = self._make_network(qos_policy_id=net_qos_id) port = self._make_port(network.id, qos_policy_id=port_qos_id) kwargs = {"context": self.context, "port": {"id": port.id}} with mock.patch.object(self.qos_plugin, 'validate_policy_for_port') \ as mock_validate_policy: self.qos_plugin._validate_create_port_callback( 'PORT', 'precommit_create', 'test_plugin', **kwargs) qos_policy = None if port_qos: qos_policy = port_qos_obj elif network_qos: qos_policy = net_qos_obj if qos_policy: mock_validate_policy.assert_called_once_with( self.context, qos_policy, port) else: mock_validate_policy.assert_not_called() def test_validate_create_port_callback_policy_on_port(self): self._test_validate_create_port_callback(port_qos=True) def test_validate_create_port_callback_policy_on_port_and_network(self): self._test_validate_create_port_callback(port_qos=True, network_qos=True) def test_validate_create_port_callback_policy_on_network(self): self._test_validate_create_port_callback(network_qos=True) def test_validate_create_port_callback_no_policy(self): self._test_validate_create_port_callback() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/revisions/0000755000175000017500000000000000000000000024622 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/revisions/__init__.py0000644000175000017500000000000000000000000026721 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/revisions/test_revision_plugin.py0000644000175000017500000003324400000000000031455 0ustar00coreycorey00000000000000# All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import context as nctx from neutron_lib.db import api as db_api from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils from sqlalchemy.orm import session as se from webob import exc from neutron.db import models_v2 from neutron.objects import ports as port_obj from neutron.tests.unit.plugins.ml2 import test_plugin class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase): l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.' 'TestExtraRouteL3NatServicePlugin') _extension_drivers = ['qos'] def get_additional_service_plugins(self): p = super(TestRevisionPlugin, self).get_additional_service_plugins() p.update({'revision_plugin_name': 'revisions', 'qos_plugin_name': 'qos', 'tag_name': 'tag'}) return p def setUp(self): cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') super(TestRevisionPlugin, self).setUp() self.cp = directory.get_plugin() self.l3p = directory.get_plugin(constants.L3) self._ctx = nctx.get_admin_context() self._tenant_id = uuidutils.generate_uuid() @property def ctx(self): # TODO(kevinbenton): return ctx without expire_all after switch to # enginefacade complete. We expire_all here because the switch to # the new engine facade is resulting in changes being spread over # other sessions so we can end up getting stale reads in the parent # session if objects remain in the identity map. if not self._ctx.session.is_active: self._ctx.session.expire_all() return self._ctx def test_handle_expired_object(self): rp = directory.get_plugin('revision_plugin') with self.port(): with self.ctx.session.begin(): ipal_objs = port_obj.IPAllocation.get_objects(self.ctx) if not ipal_objs: raise Exception("No IP allocations available.") ipal_obj = ipal_objs[0] # load port into our session port = self.ctx.session.query(models_v2.Port).one() # simulate concurrent delete in another session other_ctx = nctx.get_admin_context() other_ctx.session.delete( other_ctx.session.query(models_v2.Port).first() ) other_ctx.session.flush() # ensure no attribute lookups are attempted on an # object deleted from the session when doing related # bumps self.ctx.session.expire(port) collected = rp._collect_related_tobump( self.ctx.session, [ipal_obj], set()) rp._bump_obj_revisions( self.ctx.session, collected, version_check=False) def test_shared_network_create(self): # this test intends to run db_base_plugin_v2 -> create_network_db, # which in turn creates a Network and then a NetworkRBAC object. # An issue was observed with the revision_plugin which would interfere # with the flush process that occurs with these two connected objects, # creating two copies of the Network object in the Session and putting # it into an invalid state. with self.network(shared=True): pass def test_port_name_update_revises(self): with self.port() as port: rev = port['port']['revision_number'] new = {'port': {'name': 'seaweed'}} response = self._update('ports', port['port']['id'], new) new_rev = response['port']['revision_number'] self.assertGreater(new_rev, rev) def test_constrained_port_update(self): with self.port() as port: rev = port['port']['revision_number'] new = {'port': {'name': 'nigiri'}} for val in (rev - 1, rev + 1): # make sure off-by ones are rejected self._update('ports', port['port']['id'], new, headers={'If-Match': 'revision_number=%s' % val}, expected_code=exc.HTTPPreconditionFailed.code) after_attempt = self._show('ports', port['port']['id']) self.assertEqual(rev, after_attempt['port']['revision_number']) self.assertEqual(port['port']['name'], after_attempt['port']['name']) # correct revision should work self._update('ports', port['port']['id'], new, headers={'If-Match': 'revision_number=%s' % rev}) def test_constrained_port_delete(self): with self.port() as port: rev = port['port']['revision_number'] for val in (rev - 1, rev + 1): # make sure off-by ones are rejected self._delete('ports', port['port']['id'], headers={'If-Match': 'revision_number=%s' % val}, expected_code=exc.HTTPPreconditionFailed.code) # correct revision should work self._delete('ports', port['port']['id'], headers={'If-Match': 'revision_number=%s' % rev}) def test_constrained_port_update_handles_db_retries(self): # here we ensure all of the constraint handling logic persists # on retriable failures to commit caused by races with another # update with self.port() as port: rev = port['port']['revision_number'] new = {'port': {'name': 'nigiri'}} def concurrent_increment(s): db_api.sqla_remove(se.Session, 'before_commit', concurrent_increment) # slip in a concurrent update that will bump the revision plugin = directory.get_plugin() plugin.update_port(nctx.get_admin_context(), port['port']['id'], new) raise db_exc.DBDeadlock() db_api.sqla_listen(se.Session, 'before_commit', concurrent_increment) self._update('ports', port['port']['id'], new, headers={'If-Match': 'revision_number=%s' % rev}, expected_code=exc.HTTPPreconditionFailed.code) def test_port_ip_update_revises(self): with self.subnet() as subnet, self.port(subnet=subnet) as port: rev = port['port']['revision_number'] new = {'port': {'fixed_ips': port['port']['fixed_ips']}} # ensure adding an IP allocation updates the port free_ip = self._find_ip_address(subnet['subnet']) new['port']['fixed_ips'].append({'ip_address': free_ip}) response = self._update('ports', port['port']['id'], new) self.assertEqual(2, len(response['port']['fixed_ips'])) new_rev = response['port']['revision_number'] self.assertGreater(new_rev, rev) # ensure deleting an IP allocation updates the port rev = new_rev new['port']['fixed_ips'].pop() response = self._update('ports', port['port']['id'], new) self.assertEqual(1, len(response['port']['fixed_ips'])) new_rev = response['port']['revision_number'] self.assertGreater(new_rev, rev) def test_security_group_rule_ops_bump_security_group(self): s = {'security_group': {'tenant_id': 'some_tenant', 'name': '', 'description': 's'}} sg = self.cp.create_security_group(self.ctx, s) s['security_group']['name'] = 'hello' updated = self.cp.update_security_group(self.ctx, sg['id'], s) self.assertGreater(updated['revision_number'], sg['revision_number']) # ensure rule changes bump parent SG r = {'security_group_rule': {'tenant_id': 'some_tenant', 'port_range_min': 80, 'protocol': 6, 'port_range_max': 90, 'remote_ip_prefix': '0.0.0.0/0', 'ethertype': 'IPv4', 'remote_group_id': None, 'direction': 'ingress', 'security_group_id': sg['id']}} rule = self.cp.create_security_group_rule(self.ctx, r) sg = updated updated = self.cp.get_security_group(self.ctx, sg['id']) self.assertGreater(updated['revision_number'], sg['revision_number']) self.cp.delete_security_group_rule(self.ctx, rule['id']) sg = updated updated = self.cp.get_security_group(self.ctx, sg['id']) self.assertGreater(updated['revision_number'], sg['revision_number']) def test_router_interface_ops_bump_router(self): r = {'router': {'name': 'myrouter', 'tenant_id': 'some_tenant', 'admin_state_up': True}} router = self.l3p.create_router(self.ctx, r) r['router']['name'] = 'yourrouter' updated = self.l3p.update_router(self.ctx, router['id'], r) self.assertGreater(updated['revision_number'], router['revision_number']) # add an intf and make sure it bumps rev with self.subnet(tenant_id='some_tenant', cidr='10.0.1.0/24') as s: interface_info = {'subnet_id': s['subnet']['id']} self.l3p.add_router_interface(self.ctx, router['id'], interface_info) router = updated updated = self.l3p.get_router(self.ctx, router['id']) self.assertGreater(updated['revision_number'], router['revision_number']) # Add a route and make sure it bumps revision number router = updated body = {'router': {'routes': [{'destination': '192.168.2.0/24', 'nexthop': '10.0.1.3'}]}} self.l3p.update_router(self.ctx, router['id'], body) updated = self.l3p.get_router(self.ctx, router['id']) self.assertGreater(updated['revision_number'], router['revision_number']) router = updated body['router']['routes'] = [] self.l3p.update_router(self.ctx, router['id'], body) updated = self.l3p.get_router(self.ctx, router['id']) self.assertGreater(updated['revision_number'], router['revision_number']) self.l3p.remove_router_interface(self.ctx, router['id'], interface_info) router = updated updated = self.l3p.get_router(self.ctx, router['id']) self.assertGreater(updated['revision_number'], router['revision_number']) def test_qos_policy_bump_port_revision(self): with self.port() as port: rev = port['port']['revision_number'] qos_plugin = directory.get_plugin('QOS') qos_policy = {'policy': {'id': uuidutils.generate_uuid(), 'name': "policy1", 'project_id': uuidutils.generate_uuid()}} qos_obj = qos_plugin.create_policy(self.ctx, qos_policy) data = {'port': {'qos_policy_id': qos_obj['id']}} response = self._update('ports', port['port']['id'], data) new_rev = response['port']['revision_number'] self.assertGreater(new_rev, rev) def test_qos_policy_bump_network_revision(self): with self.network() as network: rev = network['network']['revision_number'] qos_plugin = directory.get_plugin('QOS') qos_policy = {'policy': {'id': uuidutils.generate_uuid(), 'name': "policy1", 'project_id': uuidutils.generate_uuid()}} qos_obj = qos_plugin.create_policy(self.ctx, qos_policy) data = {'network': {'qos_policy_id': qos_obj['id']}} response = self._update('networks', network['network']['id'], data) new_rev = response['network']['revision_number'] self.assertGreater(new_rev, rev) def test_net_tag_bumps_net_revision(self): with self.network() as network: rev = network['network']['revision_number'] tag_plugin = directory.get_plugin('TAG') tag_plugin.update_tag(self.ctx, 'networks', network['network']['id'], 'mytag') updated = directory.get_plugin().get_network( self.ctx, network['network']['id']) self.assertGreater(updated['revision_number'], rev) tag_plugin.delete_tag(self.ctx, 'networks', network['network']['id'], 'mytag') rev = updated['revision_number'] updated = directory.get_plugin().get_network( self.ctx, network['network']['id']) self.assertGreater(updated['revision_number'], rev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/test_provider_configuration.py0000644000175000017500000002604000000000000030775 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import mock from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants from oslo_config import cfg from neutron import manager from neutron.services import provider_configuration as provconf from neutron.tests import base class ParseServiceProviderConfigurationTestCase(base.BaseTestCase): def setUp(self): super(ParseServiceProviderConfigurationTestCase, self).setUp() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() def _set_override(self, service_providers): self.service_providers.return_value = service_providers def test_default_service_provider_configuration(self): providers = cfg.CONF.service_providers.service_provider self.assertEqual([], providers) def test_parse_single_service_provider_opt(self): self._set_override([constants.FIREWALL + ':fwaas:driver_path']) expected = {'service_type': constants.FIREWALL, 'name': 'fwaas', 'driver': 'driver_path', 'default': False} res = provconf.parse_service_provider_opt() self.assertEqual(1, len(res)) self.assertEqual([expected], res) def test_parse_single_default_service_provider_opt(self): self._set_override([constants.FIREWALL + ':fwaas:driver_path:default']) expected = {'service_type': constants.FIREWALL, 'name': 'fwaas', 'driver': 'driver_path', 'default': True} res = provconf.parse_service_provider_opt() self.assertEqual(1, len(res)) self.assertEqual([expected], res) def test_parse_multi_service_provider_opt(self): self._set_override([constants.FIREWALL + ':fwaas:driver_path', constants.FIREWALL + ':name1:path1', constants.FIREWALL + ':name2:path2:default']) res = provconf.parse_service_provider_opt() # This parsing crosses repos if additional projects are installed, # so check that at least what we expect is there; there may be more. self.assertGreaterEqual(len(res), 3) def test_parse_service_provider_invalid_format(self): self._set_override([constants.FIREWALL + ':fwaas:driver_path', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) self._set_override([constants.FIREWALL + ':', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) def test_parse_service_provider_name_too_long(self): name = 'a' * 256 self._set_override([constants.FIREWALL + ':' + name + ':driver_path', 'svc_type:name1:path1:def']) self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt) class ProviderConfigurationTestCase(base.BaseTestCase): def setUp(self): super(ProviderConfigurationTestCase, self).setUp() self.service_providers = mock.patch.object( provconf.NeutronModule, 'service_providers').start() def _set_override(self, service_providers): self.service_providers.return_value = service_providers def test_ensure_driver_unique(self): pconf = provconf.ProviderConfiguration() pconf.providers[('svctype', 'name')] = {'driver': 'driver', 'default': True} self.assertRaises(n_exc.Invalid, pconf._ensure_driver_unique, 'driver') self.assertIsNone(pconf._ensure_driver_unique('another_driver1')) def test_ensure_default_unique(self): pconf = provconf.ProviderConfiguration() pconf.providers[('svctype', 'name')] = {'driver': 'driver', 'default': True} self.assertRaises(n_exc.Invalid, pconf._ensure_default_unique, 'svctype', True) self.assertIsNone(pconf._ensure_default_unique('svctype', False)) self.assertIsNone(pconf._ensure_default_unique('svctype1', True)) self.assertIsNone(pconf._ensure_default_unique('svctype1', False)) def test_add_provider(self): pconf = provconf.ProviderConfiguration() prov = {'service_type': constants.FIREWALL, 'name': 'name', 'driver': 'path', 'default': False} pconf.add_provider(prov) self.assertEqual(1, len(pconf.providers)) self.assertEqual([(constants.FIREWALL, 'name')], list(pconf.providers.keys())) self.assertEqual([{'driver': 'path', 'default': False}], list(pconf.providers.values())) def test_add_duplicate_provider(self): pconf = provconf.ProviderConfiguration() prov = {'service_type': constants.FIREWALL, 'name': 'name', 'driver': 'path', 'default': False} pconf.add_provider(prov) self.assertRaises(n_exc.Invalid, pconf.add_provider, prov) self.assertEqual(1, len(pconf.providers)) def test_get_service_providers(self): self._set_override([constants.FIREWALL + ':name:path', constants.FIREWALL + ':name2:path2', 'st2:name:driver:default', 'st3:name2:driver2:default']) provs = [{'service_type': constants.FIREWALL, 'name': 'name', 'driver': 'path', 'default': False}, {'service_type': constants.FIREWALL, 'name': 'name2', 'driver': 'path2', 'default': False}, {'service_type': 'st2', 'name': 'name', 'driver': 'driver', 'default': True }, {'service_type': 'st3', 'name': 'name2', 'driver': 'driver2', 'default': True}] pconf = provconf.ProviderConfiguration() for prov in provs: p = pconf.get_service_providers( filters={'name': [prov['name']], 'service_type': prov['service_type']} ) self.assertEqual([prov], p) def test_get_service_providers_with_fields(self): self._set_override([constants.FIREWALL + ":name:path", constants.FIREWALL + ":name2:path2"]) provs = [{'service_type': constants.FIREWALL, 'name': 'name', 'driver': 'path', 'default': False}, {'service_type': constants.FIREWALL, 'name': 'name2', 'driver': 'path2', 'default': False}] pconf = provconf.ProviderConfiguration() for prov in provs: p = pconf.get_service_providers( filters={'name': [prov['name']], 'service_type': prov['service_type']}, fields=['name'] ) self.assertEqual([{'name': prov['name']}], p) class GetProviderDriverClassTestCase(base.BaseTestCase): def test_get_provider_driver_class_hit(self): driver = 'ml2' expected = 'neutron.plugins.ml2.plugin.Ml2Plugin' actual = provconf.get_provider_driver_class( driver, namespace=manager.CORE_PLUGINS_NAMESPACE) self.assertEqual(expected, actual) def test_get_provider_driver_class_miss(self): retval = provconf.get_provider_driver_class('foo') self.assertEqual('foo', retval) class NeutronModuleTestCase(base.BaseTestCase): def test_can_parse_multi_opt_service_provider_from_conf_file(self): mod = provconf.NeutronModule('neutron_test') mod.ini(base.ETCDIR) self.assertEqual(['foo', 'bar'], mod.service_providers(), 'Expected two providers, only one read') class NeutronModuleConfigDirTestCase(base.BaseTestCase): def setup_config(self): self.config_parse(args=['--config-dir', base.ETCDIR]) def test_can_parse_multi_opt_service_provider_from_conf_dir(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['foo', 'bar'], mod.service_providers()) class NeutronModuleMultiConfigDirTestCase(base.BaseTestCase): def setUp(self): self.tmpdir = self.get_default_temp_dir().path shutil.copyfile( os.path.join(base.ETCDIR, 'neutron_test2.conf.example'), os.path.join(self.tmpdir, 'neutron_test.conf')) super(NeutronModuleMultiConfigDirTestCase, self).setUp() def setup_config(self): self.config_parse(args=[ # NOTE(ihrachys): we expect the second directory to be checked '--config-dir', self.tmpdir, '--config-dir', base.ETCDIR ]) def test_read_configuration_from_all_matching_files(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['zzz', 'foo', 'bar'], mod.service_providers()) class NeutronModuleMultiConfigFileTestCase(base.BaseTestCase): def setUp(self): self.tmpdir = self.get_default_temp_dir().path self.filepath1 = os.path.join(self.tmpdir, 'neutron_test.conf') self.filepath2 = os.path.join(base.ETCDIR, 'neutron_test.conf') shutil.copyfile( os.path.join(base.ETCDIR, 'neutron_test2.conf.example'), self.filepath1) super(NeutronModuleMultiConfigFileTestCase, self).setUp() def setup_config(self): self.config_parse(args=[ # NOTE(ihrachys): we expect both directories to be checked '--config-file', self.filepath1, '--config-file', self.filepath2 ]) def test_read_configuration_from_all_matching_files(self): mod = provconf.NeutronModule('neutron_test') mod.ini() self.assertEqual(['zzz', 'foo', 'bar'], mod.service_providers()) class NeutronModuleConfigNotParsedTestCase(base.DietTestCase): def setup_config(self): pass def test_ini_no_crash_if_config_files_not_parsed(self): mod = provconf.NeutronModule('neutron_test') mod.ini() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/0000755000175000017500000000000000000000000023744 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/__init__.py0000644000175000017500000000000000000000000026043 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/0000755000175000017500000000000000000000000025422 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/__init__.py0000644000175000017500000000000000000000000027521 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/0000755000175000017500000000000000000000000027736 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/__init__.py0000644000175000017500000000000000000000000032035 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/0000755000175000017500000000000000000000000031034 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/__init__.py0000644000175000017500000000000000000000000033133 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/test_driver.py0000644000175000017500000002713200000000000033745 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.callbacks import events as cb_events from neutron_lib.services.trunk import constants as t_const import oslo_messaging from oslo_utils import uuidutils import testtools from neutron.api.rpc.callbacks import events from neutron.api.rpc.handlers import resources_rpc from neutron.objects import trunk from neutron.services.trunk.drivers.linuxbridge.agent import driver from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber from neutron.tests import base class LinuxBridgeTrunkDriverTestCase(base.BaseTestCase): def setUp(self): super(LinuxBridgeTrunkDriverTestCase, self).setUp() self.plumber = mock.create_autospec(trunk_plumber.Plumber()) self.stub = mock.create_autospec(driver.trunk_rpc.TrunkStub()) self.tapi = mock.create_autospec(driver._TrunkAPI(self.stub)) self.lbd = driver.LinuxBridgeTrunkDriver(self.plumber, self.tapi) self.trunk = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) self.subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=i) for i in range(20)] self.trunk.sub_ports = self.subports def test_handle_trunks_created(self): self._test_handle_trunks_wire_event(events.CREATED) def test_handle_trunks_updated(self): self._test_handle_trunks_wire_event(events.UPDATED) def _test_handle_trunks_wire_event(self, event): self.plumber.trunk_on_host.return_value = True self.lbd.handle_trunks(mock.Mock(), 'TRUNKS', [self.trunk], event) self.tapi.put_trunk.assert_called_once_with( self.trunk.port_id, self.trunk) self.tapi.bind_subports_to_host.assert_called_once_with( mock.ANY, self.trunk) self.assertFalse(self.plumber.delete_trunk_subports.called) def test_handle_trunks_deleted(self): self.lbd.handle_trunks(mock.Mock(), 'TRUNKS', [self.trunk], events.DELETED) self.tapi.put_trunk.assert_called_once_with( self.trunk.port_id, None) self.plumber.delete_trunk_subports.assert_called_once_with(self.trunk) def test_handle_subports_deleted(self): self.tapi.get_trunk_by_id.return_value = self.trunk self.lbd.handle_subports(mock.Mock(), 'TRUNKS', self.trunk.sub_ports, events.DELETED) self.assertEqual(20, len(self.tapi.delete_trunk_subport.mock_calls)) # should have tried to wire trunk at the end with state self.plumber.trunk_on_host.assert_called_once_with(self.trunk) def test_handle_subports_created(self): self.tapi.get_trunk_by_id.return_value = self.trunk self.lbd.handle_subports(mock.Mock(), 'TRUNKS', self.trunk.sub_ports, events.CREATED) self.assertEqual(20, len(self.tapi.put_trunk_subport.mock_calls)) # should have tried to wire trunk at the end with state self.plumber.trunk_on_host.assert_called_once_with(self.trunk) def test_agent_port_change_is_trunk(self): self.tapi.get_trunk.return_value = self.trunk self.lbd.agent_port_change( 'resource', 'event', 'trigger', payload=cb_events.DBEventPayload( 'context', states=({'port_id': self.trunk.port_id},), resource_id=self.trunk.port_id)) # should have tried to wire trunk self.plumber.trunk_on_host.assert_called_once_with(self.trunk) def test_agent_port_change_not_trunk(self): self.tapi.get_trunk.return_value = None self.tapi.get_trunk_for_subport.return_value = None other_port_id = uuidutils.generate_uuid() self.lbd.agent_port_change( 'resource', 'event', 'trigger', payload=cb_events.DBEventPayload( 'context', states=({'port_id': other_port_id},), resource_id=other_port_id)) self.plumber.delete_subports_by_port_id.assert_called_once_with( other_port_id) def test_agent_port_change_is_subport(self): self.tapi.get_trunk.return_value = None self.tapi.get_trunk_for_subport.return_value = self.trunk port_dev = {'port_id': self.trunk.sub_ports[0].port_id, 'mac_address': 'mac_addr'} self.lbd.agent_port_change( 'resource', 'event', 'trigger', payload=cb_events.DBEventPayload( 'context', states=(port_dev,), resource_id=port_dev['port_id'])) self.plumber.delete_subports_by_port_id.assert_called_once_with( self.trunk.sub_ports[0].port_id) def test_wire_trunk_happy_path(self): self.lbd.wire_trunk('ctx', self.trunk) self.tapi.bind_subports_to_host.assert_called_once_with( 'ctx', self.trunk) self.plumber.ensure_trunk_subports.assert_called_once_with(self.trunk) self.tapi.set_trunk_status.assert_called_once_with( 'ctx', self.trunk, t_const.TRUNK_ACTIVE_STATUS) def test_wire_trunk_not_on_host(self): # trunk device not on host self.plumber.trunk_on_host.return_value = False self.lbd.wire_trunk('ctx', self.trunk) # don't bind and don't set status self.assertFalse(self.tapi.bind_subports_to_host.called) self.assertFalse(self.tapi.set_trunk_status.called) def test_wire_trunk_concurrent_removal(self): self.plumber.trunk_on_host.side_effect = [True, False] self.plumber.ensure_trunk_subports.side_effect = ValueError() self.lbd.wire_trunk('ctx', self.trunk) # we don't change status if port was just removed self.assertFalse(self.tapi.set_trunk_status.called) def test_wire_trunk_other_exception(self): self.plumber.ensure_trunk_subports.side_effect = ValueError() self.lbd.wire_trunk('ctx', self.trunk) # degraded due to dataplane failure self.tapi.set_trunk_status.assert_called_once_with( 'ctx', self.trunk, t_const.TRUNK_DEGRADED_STATUS) class TrunkAPITestCase(base.BaseTestCase): def setUp(self): super(TrunkAPITestCase, self).setUp() self.stub = mock.create_autospec(driver.trunk_rpc.TrunkStub()) self.tapi = driver._TrunkAPI(self.stub) self.trunk = trunk.Trunk(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid()) self.subports = [trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=i) for i in range(20)] self.trunk.sub_ports = self.subports self.stub.get_trunk_details.return_value = self.trunk def test_fetch_trunk(self): self.assertEqual(self.trunk, self.tapi._fetch_trunk('ctx', 'port')) self.stub.get_trunk_details.assert_called_once_with('ctx', 'port') def test_fetch_trunk_missing(self): self.stub.get_trunk_details.side_effect = ( resources_rpc.ResourceNotFound(resource_id='1', resource_type='1')) self.assertIsNone(self.tapi._fetch_trunk('ctx', 'port')) def test_fetch_trunk_plugin_disabled(self): self.stub.get_trunk_details.side_effect = ( oslo_messaging.RemoteError('CallbackNotFound')) self.assertIsNone(self.tapi._fetch_trunk('ctx', 'port')) def test_fetch_trunk_plugin_other_error(self): self.stub.get_trunk_details.side_effect = ( oslo_messaging.RemoteError('vacuum full')) with testtools.ExpectedException(oslo_messaging.RemoteError): self.tapi._fetch_trunk('ctx', 'port') def test_set_trunk_status(self): self.tapi.set_trunk_status('ctx', self.trunk, 'STATUS') self.stub.update_trunk_status.assert_called_once_with( 'ctx', self.trunk.id, 'STATUS') def test_bind_subports_to_host(self): self.tapi.bind_subports_to_host('ctx', self.trunk) self.stub.update_subport_bindings.assert_called_once_with( 'ctx', self.trunk.sub_ports) def test_put_trunk_subport_non_existent_trunk(self): # trunks not registered are ignored self.tapi.put_trunk_subport( 'non_trunk_id', self.trunk.sub_ports[0]) def test_get_trunk_by_id(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) self.assertEqual(self.trunk, self.tapi.get_trunk_by_id('ctx', self.trunk.id)) self.assertIsNone(self.tapi.get_trunk_by_id('ctx', 'other_id')) def test_put_trunk_subport(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) new = trunk.SubPort(id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_type='vlan', trunk_id=self.trunk.id, segmentation_id=1010) self.tapi.put_trunk_subport(self.trunk.id, new) subs = self.tapi.get_trunk('ctx', self.trunk.port_id).sub_ports self.assertEqual(21, len(subs)) self.assertEqual(new, subs[-1]) def test_delete_trunk_subport(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) sub = self.trunk.sub_ports[10] self.tapi.delete_trunk_subport(self.trunk.id, sub) subs = self.tapi.get_trunk('ctx', self.trunk.port_id).sub_ports self.assertNotIn(sub, subs) self.assertEqual(19, len(subs)) def test_get_trunk(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) self.assertEqual(self.trunk, self.tapi.get_trunk('ctx', self.trunk.port_id)) self.tapi.get_trunk('ctx', self.trunk.port_id) self.assertFalse(self.stub.get_trunk_details.called) def test_get_trunk_cache_miss(self): self.assertEqual(self.trunk, self.tapi.get_trunk('ctx', self.trunk.port_id)) self.tapi.get_trunk('ctx', self.trunk.port_id) self.assertEqual(1, len(self.stub.get_trunk_details.mock_calls)) def test_get_trunk_not_found(self): self.stub.get_trunk_details.side_effect = ( resources_rpc.ResourceNotFound(resource_id='1', resource_type='1')) self.assertIsNone(self.tapi.get_trunk('ctx', self.trunk.port_id)) self.tapi.get_trunk('ctx', self.trunk.port_id) self.assertEqual(1, len(self.stub.get_trunk_details.mock_calls)) def test_get_trunk_for_subport(self): self.tapi.put_trunk(self.trunk.port_id, self.trunk) t = self.tapi.get_trunk_for_subport( 'ctx', self.trunk.sub_ports[0].port_id) self.assertEqual(self.trunk, t) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/test_trunk_plumber.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/test_trunk_plu0000644000175000017500000001220500000000000034041 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import uuidutils from neutron.agent.linux import ip_lib from neutron.objects import trunk from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber from neutron.tests import base IP_LINK_OUTPUT = [ {'index': 1, 'name': 'lo'}, {'index': 2, 'name': 'eth0'}, {'index': 3, 'name': 'bond0'}, {'index': 4, 'name': 'ovs-system'}, {'index': 5, 'name': 'br-ex'}, {'index': 6, 'name': 'testb9cfb5d7'}, {'index': 7, 'name': 'br-int'}, {'index': 8, 'name': 'br-tun'}, {'index': 10, 'name': 'tapa962cfc7-9d'}, {'index': 11, 'name': 'tap39df7d39-c5', 'kind': 'vlan', 'parent_name': 'tapa962cfc7-9d', 'vlan_id': 99}, {'index': 12, 'name': 'tap39df7d44-b2', 'kind': 'vlan', 'parent_name': 'tapa962cfc7-9d', 'vlan_id': 904}, {'index': 13, 'name': 'tap11113d44-3f', 'kind': 'vlan', 'parent_name': 'tapa962cfc7-9d', 'vlan_id': 777}, {'index': 14, 'name': 'tap34786ac-28'}, {'index': 15, 'name': 'tap47198374-5a', 'kind': 'vlan', 'parent_name': 'tap34786ac-28', 'vlan_id': 777}, {'index': 16, 'name': 'tap47198374-5b', 'kind': 'vlan', 'parent_name': 'tap34786ac-28', 'vlan_id': 2}, {'index': 17, 'name': 'tap47198374-5c', 'kind': 'vlan', 'parent_name': 'tap34786ac-28', 'vlan_id': 3} ] class PlumberTestCase(base.BaseTestCase): def setUp(self): self.plumber = trunk_plumber.Plumber() self.get_tap_device_name = mock.patch.object( self.plumber, '_get_tap_device_name', return_value='devname').start() self.trunk = trunk.Trunk() self.trunk.port_id = uuidutils.generate_uuid() self.trunk.sub_ports = [] self.device_exists = mock.patch.object(ip_lib, 'device_exists').start() self.device_exists.return_value = True self.mock_get_devices = mock.patch.object(ip_lib, 'get_devices_info').start() # ipwrap.return_value.netns.execute.return_value = IP_LINK_OUTPUT super(PlumberTestCase, self).setUp() def test_trunk_on_host(self): self.assertTrue(self.plumber.trunk_on_host(self.trunk)) self.device_exists.return_value = False self.assertFalse(self.plumber.trunk_on_host(self.trunk)) def test_ensure_trunk_subports(self): trunk_vals = set([('dev2', 23), ('dev3', 44), ('dev4', 45)]) existing_vals = set([('dev1', 21), ('dev2', 23), ('dev3', 45)]) mock.patch.object(self.plumber, '_get_subport_devs_and_vlans', return_value=trunk_vals).start() mock.patch.object(self.plumber, '_get_vlan_children', return_value=existing_vals).start() delete = mock.patch.object(self.plumber, '_safe_delete_device').start() create = mock.patch.object(self.plumber, '_create_vlan_subint').start() self.plumber.ensure_trunk_subports(self.trunk) # dev1 is gone and dev3 changed vlans delete.assert_has_calls([mock.call('dev3'), mock.call('dev1')], any_order=True) create.assert_has_calls([mock.call('devname', 'dev4', 45), mock.call('devname', 'dev3', 44)], any_order=True) def test_delete_trunk_subports(self): existing_vals = set([('dev1', 21), ('dev2', 23), ('dev3', 45)]) mock.patch.object(self.plumber, '_get_vlan_children', return_value=existing_vals).start() delete = mock.patch.object(self.plumber, '_safe_delete_device').start() self.plumber.delete_trunk_subports(self.trunk) delete.assert_has_calls([mock.call('dev3'), mock.call('dev2'), mock.call('dev1')], any_order=True) def test__get_vlan_children(self): self.mock_get_devices.return_value = IP_LINK_OUTPUT expected = [('tap47198374-5a', 777), ('tap47198374-5b', 2), ('tap47198374-5c', 3)] self.assertEqual(set(expected), self.plumber._get_vlan_children('tap34786ac-28')) expected = [('tap39df7d39-c5', 99), ('tap39df7d44-b2', 904), ('tap11113d44-3f', 777)] self.assertEqual(set(expected), self.plumber._get_vlan_children('tapa962cfc7-9d')) # vlan sub-interface and non-trunk shouldn't have children self.assertEqual(set(), self.plumber._get_vlan_children('tap47198374-5c')) self.assertEqual(set(), self.plumber._get_vlan_children('br-int')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py0000644000175000017500000000324000000000000032641 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_config import cfg from neutron.services.trunk.drivers.linuxbridge import driver from neutron.tests import base class LinuxBridgeDriverTestCase(base.BaseTestCase): def test_driver_is_loaded(self): inst = driver.LinuxBridgeDriver.create() cfg.CONF.set_override('mechanism_drivers', ['a', 'b', 'linuxbridge'], group='ml2') self.assertTrue(inst.is_loaded) cfg.CONF.set_override('mechanism_drivers', ['a', 'b'], group='ml2') self.assertFalse(inst.is_loaded) cfg.CONF.set_override('core_plugin', 'my_foo_plugin') self.assertFalse(inst.is_loaded) def test_driver_properties(self): inst = driver.LinuxBridgeDriver.create() self.assertEqual(driver.NAME, inst.name) self.assertEqual(driver.SUPPORTED_INTERFACES, inst.interfaces) self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES, inst.segmentation_types) self.assertEqual(constants.AGENT_TYPE_LINUXBRIDGE, inst.agent_type) self.assertTrue(inst.can_trunk_bound_port) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/0000755000175000017500000000000000000000000027773 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/__init__.py0000644000175000017500000000000000000000000032072 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/0000755000175000017500000000000000000000000031071 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/__init__.py0000644000175000017500000000000000000000000033170 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py0000644000175000017500000001445000000000000034001 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import oslo_messaging from oslo_utils import uuidutils from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.objects import trunk as trunk_obj from neutron.services.trunk.drivers.openvswitch.agent import driver from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler from neutron.tests import base TRUNK_MANAGER = ('neutron.services.trunk.drivers.openvswitch.agent.' 'trunk_manager.TrunkManager') class OvsTrunkSkeletonTest(base.BaseTestCase): def setUp(self): super(OvsTrunkSkeletonTest, self).setUp() trunk_manager_cls_mock = mock.patch(TRUNK_MANAGER).start() self.trunk_manager = trunk_manager_cls_mock.return_value handler = ovsdb_handler.OVSDBHandler(self.trunk_manager) mock.patch.object(handler, 'trunk_rpc').start() mock.patch.object(handler, '_set_trunk_metadata').start() mock.patch.object( handler, 'manages_this_trunk', return_value=True).start() self.skeleton = driver.OVSTrunkSkeleton(handler) self.trunk_id = uuidutils.generate_uuid() self.subports = [ trunk_obj.SubPort( port_id=uuidutils.generate_uuid(), trunk_id=self.trunk_id, segmentation_type='foo', segmentation_id=i) for i in range(2)] @mock.patch("neutron.api.rpc.callbacks.resource_manager." "ConsumerResourceCallbacksManager.unregister") def test___init__(self, mocked_unregister): test_obj = driver.OVSTrunkSkeleton(mock.ANY) mocked_unregister.assert_called_with(test_obj.handle_trunks, resources.TRUNK) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_subports_created(self, br): """Test handler calls into trunk manager for adding subports.""" def fake_update_subport_bindings(context, subports): return { self.trunk_id: [ {'id': subport.port_id, 'mac_address': "mac%d" % subport.segmentation_id} for subport in subports]} trunk_rpc = self.skeleton.ovsdb_handler.trunk_rpc trunk_rpc.update_subport_bindings.side_effect = ( fake_update_subport_bindings) self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, events.CREATED) expected_calls = [ mock.call(subport.trunk_id, subport.port_id, mock.ANY, subport.segmentation_id) for subport in self.subports] self.trunk_manager.add_sub_port.assert_has_calls(expected_calls) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_subports_deleted(self, br): """Test handler calls into trunk manager for deleting subports.""" self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, events.DELETED) expected_calls = [ mock.call(subport.trunk_id, subport.port_id) for subport in self.subports] self.trunk_manager.remove_sub_port.assert_has_calls(expected_calls) def test_handle_subports_not_for_this_agent(self): with mock.patch.object(self.skeleton, 'ovsdb_handler') as handler_m: handler_m.manages_this_trunk.return_value = False self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, mock.ANY) self.assertFalse(self.trunk_manager.wire_subports_for_trunk.called) self.assertFalse(self.trunk_manager.unwire_subports_for_trunk.called) def test_handle_subports_unknown_event(self): trunk_rpc = self.skeleton.ovsdb_handler.trunk_rpc # unknown events should be ignored and thus lead to no updates # and no trunk interactions. with mock.patch.object( self.skeleton.ovsdb_handler, 'wire_subports_for_trunk') as f,\ mock.patch.object( self.skeleton.ovsdb_handler, 'unwire_subports_for_trunk') as g: self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, events.UPDATED) self.assertFalse(f.called) self.assertFalse(g.called) self.assertFalse(trunk_rpc.update_trunk_status.called) def test_handle_subports_trunk_rpc_error(self): trunk_rpc = self.skeleton.ovsdb_handler.trunk_rpc trunk_rpc.update_subport_bindings.side_effect = ( oslo_messaging.MessagingException) self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, events.CREATED) self.assertTrue(trunk_rpc.update_subport_bindings.called) def _test_handle_subports_trunk_on_trunk_update(self, event): trunk_rpc = self.skeleton.ovsdb_handler.trunk_rpc self.skeleton.handle_subports(mock.Mock(), 'SUBPORTS', self.subports, event) # Make sure trunk state is reported to the server self.assertTrue(trunk_rpc.update_trunk_status.called) def test_handle_subports_created_trunk_on_trunk_update(self): with mock.patch.object( self.skeleton.ovsdb_handler, 'wire_subports_for_trunk'): self._test_handle_subports_trunk_on_trunk_update( events.CREATED) def test_handle_subports_deleted_trunk_on_trunk_update(self): with mock.patch.object( self.skeleton.ovsdb_handler, 'unwire_subports_for_trunk'): self._test_handle_subports_trunk_on_trunk_update( events.DELETED) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_ovsdb_handler.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_ovsdb_han0000644000175000017500000003635200000000000034027 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.services.trunk import constants import oslo_messaging from oslo_serialization import jsonutils from oslo_utils import uuidutils import testtools from neutron.api.rpc.handlers import resources_rpc from neutron.common import utils from neutron.objects import trunk as trunk_obj from neutron.services.trunk.drivers.openvswitch.agent import exceptions from neutron.services.trunk.drivers.openvswitch.agent import ovsdb_handler from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.tests import base class TestLockOnBridgeName(base.BaseTestCase): def setUp(self): super(TestLockOnBridgeName, self).setUp() self.lock_mock = mock.patch('oslo_concurrency.lockutils.lock').start() self.method_called = False def test_positional_argument(self): @ovsdb_handler.lock_on_bridge_name(required_parameter='bridge_name') def testing_method(param, bridge_name, another): self.method_called = True testing_method('foo', 'bridge', 'bar') self.lock_mock.assert_called_once_with('bridge') self.assertTrue(self.method_called) def test_keyword_argument(self): @ovsdb_handler.lock_on_bridge_name(required_parameter='bridge_name') def testing_method(param, bridge_name, another): self.method_called = True testing_method('foo', another='baz', bridge_name='bridge') self.lock_mock.assert_called_once_with('bridge') self.assertTrue(self.method_called) def test_missing_argument(self): with testtools.ExpectedException(RuntimeError): @ovsdb_handler.lock_on_bridge_name( required_parameter='bridge_name') def testing_method(one, two): pass class TestIsTrunkServicePort(base.BaseTestCase): def test_with_bridge_name(self): observed = ovsdb_handler.is_trunk_service_port('tbr-foo') self.assertTrue(observed) def test_with_subport_patch_port_int_side(self): observed = ovsdb_handler.is_trunk_service_port('spi-foo') self.assertTrue(observed) def test_with_subport_patch_port_trunk_side(self): observed = ovsdb_handler.is_trunk_service_port('spt-foo') self.assertTrue(observed) def test_with_trunk_patch_port_int_side(self): observed = ovsdb_handler.is_trunk_service_port('tpi-foo') self.assertTrue(observed) def test_with_trunk_patch_port_trunk_side(self): observed = ovsdb_handler.is_trunk_service_port('tpt-foo') self.assertTrue(observed) def test_with_random_string(self): observed = ovsdb_handler.is_trunk_service_port('foo') self.assertFalse(observed) class TestBridgeHasInstancePort(base.BaseTestCase): def setUp(self): super(TestBridgeHasInstancePort, self).setUp() self.bridge = mock.Mock() self.present_interfaces = [] self.bridge.get_iface_name_list.return_value = self.present_interfaces def test_only_service_ports_on_bridge(self): """Test when only with patch ports and bridge name are on trunk bridge. """ self.present_interfaces.extend( ['tbr-foo', 'spt-foo', 'tpt-foo']) self.assertFalse(ovsdb_handler.bridge_has_instance_port(self.bridge)) def test_device_on_bridge(self): """Condition is True decause of foo device is present on bridge.""" self.present_interfaces.extend( ['tbr-foo', 'spt-foo', 'tpt-foo', 'foo']) self.assertTrue(ovsdb_handler.bridge_has_instance_port(self.bridge)) def test_ovsdb_error(self): self.bridge.get_iface_name_list.side_effect = RuntimeError self.assertFalse(ovsdb_handler.bridge_has_instance_port(self.bridge)) class TestOVSDBHandler(base.BaseTestCase): """Test that RPC or OVSDB failures do not cause crash.""" def setUp(self): super(TestOVSDBHandler, self).setUp() self.ovsdb_handler = ovsdb_handler.OVSDBHandler(mock.sentinel.manager) mock.patch.object(self.ovsdb_handler, 'trunk_rpc').start() mock.patch.object(self.ovsdb_handler, 'trunk_manager').start() self.trunk_manager = self.ovsdb_handler.trunk_manager self.trunk_id = uuidutils.generate_uuid() self.fake_subports = [ trunk_obj.SubPort( id=uuidutils.generate_uuid(), port_id=uuidutils.generate_uuid(), segmentation_id=1)] self.fake_port = { 'name': 'foo', 'external_ids': { 'trunk_id': self.trunk_id, 'subport_ids': jsonutils.dumps( [s.id for s in self.fake_subports]), } } self.subport_bindings = { self.trunk_id: [ {'id': subport.port_id, 'mac_address': 'mac'} for subport in self.fake_subports]} @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') @mock.patch.object(utils, 'wait_until_true', side_effect=utils.WaitTimeout) def test_handle_trunk_add_interface_wont_appear(self, wut, br): self.ovsdb_handler.handle_trunk_add('foo') self.assertTrue(self.trunk_manager.dispose_trunk.called) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_trunk_add_rpc_failure(self, br): with mock.patch.object(self.ovsdb_handler, '_wire_trunk', side_effect=oslo_messaging.MessagingException): with mock.patch.object(ovsdb_handler, 'bridge_has_instance_port', return_value=True): self.ovsdb_handler.handle_trunk_add('foo') @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_trunk_add_ovsdb_failure(self, br): with mock.patch.object(self.ovsdb_handler, '_wire_trunk', side_effect=RuntimeError): with mock.patch.object(ovsdb_handler, 'bridge_has_instance_port', return_value=True): self.ovsdb_handler.handle_trunk_add('foo') @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_trunk_add_parent_port_not_found(self, br): with mock.patch.object(self.ovsdb_handler, '_get_parent_port', side_effect=exceptions.ParentPortNotFound): # do not wait the default timeout self.ovsdb_handler.timeout = 1 self.ovsdb_handler.handle_trunk_add('foo') @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_trunk_add_missing_bridge(self, br): br.return_value.bridge_exists.return_value = False with mock.patch.object( ovsdb_handler, 'bridge_has_instance_port') as has_port_mock: self.ovsdb_handler.handle_trunk_add('foo') self.assertFalse(has_port_mock.called) def test_handle_trunk_remove_trunk_manager_failure(self): with mock.patch.object(self.ovsdb_handler, '_get_trunk_metadata', side_effect=trunk_manager.TrunkManagerError(error='error')): with mock.patch.object(ovsdb_handler, 'bridge_has_instance_port', return_value=True): self.ovsdb_handler.handle_trunk_remove('foo', self.fake_port) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_handle_trunk_remove_rpc_failure(self, br): self.ovsdb_handler.trunk_rpc.update_trunk_status = ( oslo_messaging.MessagingException) self.ovsdb_handler.handle_trunk_remove('foo', self.fake_port) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_wire_subports_for_trunk_trunk_manager_failure(self, br): with mock.patch.object( self.ovsdb_handler, '_update_trunk_metadata') as f: trunk_rpc = self.ovsdb_handler.trunk_rpc trunk_rpc.update_subport_bindings.return_value = ( self.subport_bindings) self.trunk_manager.add_sub_port.side_effect = ( trunk_manager.TrunkManagerError(error='error')) status = self.ovsdb_handler.wire_subports_for_trunk( None, self.trunk_id, self.fake_subports) self.assertTrue(f.call_count) self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_wire_subports_for_trunk_ovsdb_failure(self, br): self.ovsdb_handler.trunk_rpc.update_subport_bindings.return_value = ( self.subport_bindings) with mock.patch.object(self.ovsdb_handler, '_set_trunk_metadata', side_effect=RuntimeError): status = self.ovsdb_handler.wire_subports_for_trunk( None, self.trunk_id, self.fake_subports) self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_unwire_subports_for_trunk_port_not_found(self, br): self.ovsdb_handler.trunk_rpc.update_subport_bindings.return_value = ( self.subport_bindings) with mock.patch.object(self.ovsdb_handler, '_update_trunk_metadata', side_effect=exceptions.ParentPortNotFound(bridge='foo_br')): status = self.ovsdb_handler.unwire_subports_for_trunk( self.trunk_id, ['subport_id']) self.assertEqual(constants.TRUNK_ACTIVE_STATUS, status) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test_unwire_subports_for_trunk_trunk_manager_failure(self, br): with mock.patch.object( self.ovsdb_handler, '_update_trunk_metadata') as f: self.trunk_manager.remove_sub_port.side_effect = ( trunk_manager.TrunkManagerError(error='error')) status = self.ovsdb_handler.unwire_subports_for_trunk( 'foo_trunk_id', ['subport_id']) self.assertTrue(f.call_count) self.assertEqual(constants.TRUNK_DEGRADED_STATUS, status) def test__wire_trunk_get_trunk_details_failure(self): self.trunk_manager.get_port_uuid_from_external_ids.side_effect = ( trunk_manager.TrunkManagerError(error='error')) self.ovsdb_handler._wire_trunk(mock.Mock(), self.fake_port) def test__wire_trunk_trunk_not_associated(self): self.ovsdb_handler.trunk_rpc.get_trunk_details.side_effect = ( resources_rpc.ResourceNotFound( resource_id='id', resource_type='type')) self.ovsdb_handler._wire_trunk(mock.Mock(), self.fake_port) trunk_rpc = self.ovsdb_handler.trunk_rpc self.assertFalse(trunk_rpc.update_trunk_status.called) def test__wire_trunk_create_trunk_failure(self): self.trunk_manager.create_trunk.side_effect = ( trunk_manager.TrunkManagerError(error='error')) self.ovsdb_handler._wire_trunk(mock.Mock(), self.fake_port) trunk_rpc = self.ovsdb_handler.trunk_rpc trunk_rpc.update_trunk_status.assert_called_once_with( mock.ANY, mock.ANY, constants.TRUNK_ERROR_STATUS) def test__wire_trunk_rewire_trunk_failure(self): with mock.patch.object(self.ovsdb_handler, 'unwire_subports_for_trunk') as f,\ mock.patch.object(self.ovsdb_handler, 'get_connected_subports_for_trunk') as g: g.return_value = ['stale_port'] f.return_value = constants.TRUNK_DEGRADED_STATUS self.ovsdb_handler._wire_trunk( mock.Mock(), self.fake_port, rewire=True) trunk_rpc = self.ovsdb_handler.trunk_rpc trunk_rpc.update_trunk_status.assert_called_once_with( mock.ANY, mock.ANY, constants.TRUNK_DEGRADED_STATUS) def test__wire_trunk_report_trunk_called_on_wiring(self): with mock.patch.object(self.trunk_manager, 'create_trunk'),\ mock.patch.object(self.ovsdb_handler, 'wire_subports_for_trunk'): self.ovsdb_handler._wire_trunk(mock.Mock(), self.fake_port) trunk_rpc = self.ovsdb_handler.trunk_rpc self.assertTrue(trunk_rpc.update_trunk_status.called) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test__set_trunk_metadata_with_None_params(self, br): mock_br = br.return_value with mock.patch.object( self.ovsdb_handler, "_get_parent_port", return_value={'name': 'foo', 'external_ids': {}}): self.ovsdb_handler._set_trunk_metadata(None, None, 'foo', []) mock_br.set_db_attribute.assert_called_once_with( 'Interface', 'foo', 'external_ids', {'bridge_name': mock.ANY, 'trunk_id': 'foo', 'subport_ids': '[]'}) def test__get_current_status_active(self): self.assertEqual(constants.TRUNK_ACTIVE_STATUS, self.ovsdb_handler._get_current_status([], [])) def test__get_current_status_degraded(self): self.assertEqual(constants.TRUNK_DEGRADED_STATUS, self.ovsdb_handler._get_current_status( [mock.ANY], [])) def _test__update_trunk_metadata_wire_flag(self, mock_br, wire, external_ids, subport_ids, expected_subport_ids): with mock.patch.object( self.ovsdb_handler, "_get_parent_port", return_value={'name': 'foo', 'external_ids': external_ids}): self.ovsdb_handler._update_trunk_metadata( None, None, 'foo', subport_ids, wire=wire) external_ids = mock_br.set_db_attribute.call_args[0][3] self.assertEqual(1, mock_br.set_db_attribute.call_count) self.assertEqual( sorted(expected_subport_ids), sorted(external_ids['subport_ids'])) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test__update_trunk_metadata_wire(self, br): mock_br = br.return_value external_ids = { 'subport_ids': '["foo_subport_1"]' } subport_ids = ['foo_subport_2'] expected_subport_ids = '["foo_subport_1", "foo_subport_2"]' self._test__update_trunk_metadata_wire_flag( mock_br, True, external_ids, subport_ids, expected_subport_ids) @mock.patch('neutron.agent.common.ovs_lib.OVSBridge') def test__update_trunk_metadata_unwire(self, br): mock_br = br.return_value external_ids = { 'subport_ids': '["foo_subport_1", "foo_subport_2"]' } subport_ids = ['foo_subport_2'] expected_subport_ids = '["foo_subport_1"]' self._test__update_trunk_metadata_wire_flag( mock_br, False, external_ids, subport_ids, expected_subport_ids) ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_man0000644000175000017500000000414500000000000034055 0ustar00coreycorey00000000000000# Copyright (c) 2016 Red Hat # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import testtools from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager from neutron.tests import base class TrunkManagerTestCase(base.BaseTestCase): """Tests are aimed to cover negative cases to make sure there is no typo in the logging. """ def setUp(self): super(TrunkManagerTestCase, self).setUp() self.trunk_manager = trunk_manager.TrunkManager(mock.sentinel.br_int) mock.patch.object(trunk_manager, 'TrunkBridge').start() @contextlib.contextmanager def _resource_fails(self, resource, method_name): with mock.patch.object(resource, method_name, side_effect=RuntimeError): with testtools.ExpectedException(trunk_manager.TrunkManagerError): yield def test_create_trunk_plug_fails(self): with self._resource_fails(trunk_manager.TrunkParentPort, 'plug'): self.trunk_manager.create_trunk(None, None, None) def test_remove_trunk_unplug_fails(self): with self._resource_fails(trunk_manager.TrunkParentPort, 'unplug'): self.trunk_manager.remove_trunk(None, None) def test_add_sub_port_plug_fails(self): with self._resource_fails(trunk_manager.SubPort, 'plug'): self.trunk_manager.add_sub_port(None, None, None, None) def test_remove_sub_port_unplug_fails(self): with self._resource_fails(trunk_manager.SubPort, 'unplug'): self.trunk_manager.remove_sub_port(None, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py0000644000175000017500000000603600000000000032704 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants from oslo_config import cfg from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( constants as agent_consts) from neutron.services.trunk.drivers.openvswitch import driver from neutron.tests import base GEN_TRUNK_BR_NAME_PATCH = ( 'neutron.services.trunk.drivers.openvswitch.utils.gen_trunk_br_name') class OVSDriverTestCase(base.BaseTestCase): def test_driver_creation(self): ovs_driver = driver.OVSDriver.create() self.assertFalse(ovs_driver.is_loaded) self.assertEqual(driver.NAME, ovs_driver.name) self.assertEqual(driver.SUPPORTED_INTERFACES, ovs_driver.interfaces) self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES, ovs_driver.segmentation_types) self.assertEqual(constants.AGENT_TYPE_OVS, ovs_driver.agent_type) self.assertFalse(ovs_driver.can_trunk_bound_port) self.assertTrue( ovs_driver.is_agent_compatible(constants.AGENT_TYPE_OVS)) self.assertTrue( ovs_driver.is_interface_compatible(driver.SUPPORTED_INTERFACES[0])) def test_driver_is_loaded(self): cfg.CONF.set_override('mechanism_drivers', 'openvswitch', group='ml2') ovs_driver = driver.OVSDriver.create() self.assertTrue(ovs_driver.is_loaded) def test_driver_is_not_loaded(self): cfg.CONF.set_override('core_plugin', 'my_foo_plugin') ovs_driver = driver.OVSDriver.create() self.assertFalse(ovs_driver.is_loaded) @mock.patch(GEN_TRUNK_BR_NAME_PATCH) def test_vif_details_bridge_name_handler_registration(self, mock_gen_br_name): driver.register() mock_gen_br_name.return_value = 'fake-trunk-br-name' test_trigger = mock.Mock() registry.publish(agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ, test_trigger, payload=events.EventPayload( None, metadata={ 'port': { 'trunk_details': { 'trunk_id': 'foo' } } })) test_trigger.assert_called_once_with('fake-trunk-br-name') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/ovn/0000755000175000017500000000000000000000000026224 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/ovn/__init__.py0000644000175000017500000000000000000000000030323 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py0000644000175000017500000004152500000000000032362 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import exceptions as n_exc from neutron_lib.services.trunk import constants as trunk_consts from oslo_config import cfg from neutron.common.ovn.constants import OVN_ML2_MECH_DRIVER_NAME from neutron.objects.ports import Port from neutron.objects.ports import PortBinding from neutron.services.trunk.drivers.ovn import trunk_driver from neutron.tests import base from neutron.tests.unit import fake_resources class TestTrunkHandler(base.BaseTestCase): def setUp(self): super(TestTrunkHandler, self).setUp() self.context = mock.Mock() self.plugin_driver = mock.Mock() self.plugin_driver._plugin = mock.Mock() self.plugin_driver._plugin.update_port = mock.Mock() self.plugin_driver._nb_ovn = fake_resources.FakeOvsdbNbOvnIdl() self.handler = trunk_driver.OVNTrunkHandler(self.plugin_driver) self.trunk_1 = mock.Mock() self.trunk_1.port_id = "trunk-1" self.trunk_2 = mock.Mock() self.trunk_2.port_id = "trunk-2" self.sub_port_1 = mock.Mock() self.sub_port_1.segmentation_id = 40 self.sub_port_1.trunk_id = "trunk-1" self.sub_port_1.port_id = "sub_port_1" self.sub_port_1_obj = self._get_fake_port_obj( port_id='sub_port_1') self.sub_port_2 = mock.Mock() self.sub_port_2.segmentation_id = 41 self.sub_port_2.trunk_id = "trunk-1" self.sub_port_2.port_id = "sub_port_2" self.sub_port_2_obj = self._get_fake_port_obj( port_id='sub_port_1') self.sub_port_3 = mock.Mock() self.sub_port_3.segmentation_id = 42 self.sub_port_3.trunk_id = "trunk-2" self.sub_port_3.port_id = "sub_port_3" self.sub_port_4 = mock.Mock() self.sub_port_4.segmentation_id = 43 self.sub_port_4.trunk_id = "trunk-2" self.sub_port_4.port_id = "sub_port_4" self.get_trunk_object = mock.patch( "neutron.objects.trunk.Trunk.get_object").start() self.get_trunk_object.side_effect = lambda ctxt, id: \ self.trunk_1 if id == 'trunk-1' else self.trunk_2 self.mock_get_port = mock.patch( "neutron.objects.ports.Port.get_object").start() self.mock_get_port.side_effect = lambda ctxt, id: ( self.sub_port_1_obj if id == 'sub_port_1' else ( self.sub_port_2_obj if id == 'sub_port_2' else None)) self.mock_port_update = mock.patch( "neutron.objects.ports.Port.update").start() self.mock_update_pb = mock.patch( "neutron.objects.ports.PortBinding.update_object").start() self.mock_clear_levels = mock.patch( "neutron.objects.ports.PortBindingLevel.delete_objects").start() def _get_fake_port_obj(self, port_id): with mock.patch('uuid.UUID') as mock_uuid: mock_uuid.return_value = port_id port = Port() port.id = port_id port.bindings = [PortBinding(profile={}, host='foo.com')] return port def _assert_calls(self, mock, expected_calls): self.assertEqual( len(expected_calls), mock.call_count) mock.assert_has_calls( expected_calls, any_order=True) def test_create_trunk(self): self.trunk_1.sub_ports = [] self.handler.trunk_created(self.trunk_1) self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() self.mock_update_pb.assert_not_called() self.trunk_1.sub_ports = [self.sub_port_1, self.sub_port_2] self.handler.trunk_created(self.trunk_1) calls = [mock.call(), mock.call()] self._assert_calls(self.mock_port_update, calls) calls = [ mock.call(mock.ANY, {'profile': {'parent_name': trunk.port_id, 'tag': s_port.segmentation_id}, 'vif_type': portbindings.VIF_TYPE_OVS}, host=mock.ANY, port_id=s_port.port_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1), (self.trunk_1, self.sub_port_2)]] self._assert_calls(self.mock_update_pb, calls) calls = [mock.call(lport_name=s_port.port_id, parent_name=trunk.port_id, tag=s_port.segmentation_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1), (self.trunk_1, self.sub_port_2)]] self._assert_calls(self.plugin_driver._nb_ovn.set_lswitch_port, calls) self.mock_clear_levels.assert_not_called() def test_create_trunk_port_not_found(self): self.trunk_1.sub_ports = [self.sub_port_4] self.handler.trunk_created(self.trunk_1) self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() self.mock_update_pb.assert_not_called() def test_create_trunk_port_db_exception(self): self.trunk_1.sub_ports = [self.sub_port_1] self.mock_update_pb.side_effect = [n_exc.ObjectNotFound(id=1)] self.handler.trunk_created(self.trunk_1) self.mock_update_pb.assert_called_once_with( mock.ANY, {'profile': {'parent_name': self.sub_port_1.trunk_id, 'tag': self.sub_port_1.segmentation_id}, 'vif_type': portbindings.VIF_TYPE_OVS}, host='foo.com', port_id=self.sub_port_1.port_id) self.mock_port_update.assert_not_called() self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() def test_delete_trunk(self): self.trunk_1.sub_ports = [] self.handler.trunk_deleted(self.trunk_1) self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() self.mock_update_pb.assert_not_called() self.mock_clear_levels.assert_not_called() self.trunk_1.sub_ports = [self.sub_port_1, self.sub_port_2] self.sub_port_1_obj.bindings[0].profile.update({ 'tag': self.sub_port_1.segmentation_id, 'parent_name': self.sub_port_1.trunk_id, 'foo_field': self.sub_port_1.trunk_id}) self.sub_port_2_obj.bindings[0].profile.update({ 'tag': self.sub_port_2.segmentation_id, 'parent_name': self.sub_port_2.trunk_id, 'foo_field': self.sub_port_2.trunk_id}) self.handler.trunk_deleted(self.trunk_1) calls = [mock.call(), mock.call()] self._assert_calls(self.mock_port_update, calls) calls = [ mock.call( mock.ANY, {'profile': {'foo_field': s_port.trunk_id}, 'vif_type': portbindings.VIF_TYPE_UNBOUND}, host='foo.com', port_id=s_port.port_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1), (self.trunk_1, self.sub_port_2)]] self._assert_calls(self.mock_update_pb, calls) calls = [ mock.call(mock.ANY, host='foo.com', port_id=s_port.port_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1), (self.trunk_1, self.sub_port_2)]] self._assert_calls(self.mock_clear_levels, calls) calls = [mock.call(lport_name=s_port.port_id, parent_name=[], tag=[], up=False) for trunk, s_port in [(self.trunk_1, self.sub_port_1), (self.trunk_1, self.sub_port_2)]] self._assert_calls(self.plugin_driver._nb_ovn.set_lswitch_port, calls) def test_delete_trunk_key_not_found(self): self.sub_port_1_obj.bindings[0].profile.update({ 'foo_field': self.sub_port_1.trunk_id}) self.trunk_1.sub_ports = [self.sub_port_1] self.handler.trunk_deleted(self.trunk_1) calls = [ mock.call(mock.ANY, {'profile': {'foo_field': s_port.trunk_id}, 'vif_type': portbindings.VIF_TYPE_UNBOUND}, host='foo.com', port_id=s_port.port_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1)]] self._assert_calls(self.mock_update_pb, calls) calls = [ mock.call(mock.ANY, host='foo.com', port_id=s_port.port_id) for trunk, s_port in [(self.trunk_1, self.sub_port_1)]] self._assert_calls(self.mock_clear_levels, calls) calls = [mock.call(lport_name=s_port.port_id, parent_name=[], tag=[], up=False) for trunk, s_port in [(self.trunk_1, self.sub_port_1)]] self._assert_calls(self.plugin_driver._nb_ovn.set_lswitch_port, calls) def test_delete_trunk_port_not_found(self): self.trunk_1.sub_ports = [self.sub_port_4] self.handler.trunk_deleted(self.trunk_1) self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() self.mock_update_pb.assert_not_called() self.mock_clear_levels.assert_not_called() def test_delete_trunk_port_db_exception(self): self.trunk_1.sub_ports = [self.sub_port_1] self.mock_update_pb.side_effect = [n_exc.ObjectNotFound(id=1)] self.handler.trunk_deleted(self.trunk_1) self.mock_update_pb.assert_called_once_with( mock.ANY, {'profile': {}, 'vif_type': portbindings.VIF_TYPE_UNBOUND}, host='foo.com', port_id=self.sub_port_1.port_id) self.mock_port_update.assert_not_called() self.plugin_driver._nb_ovn.set_lswitch_port.assert_not_called() self.mock_clear_levels.assert_not_called() def test_subports_added(self): with mock.patch.object(self.handler, '_set_sub_ports') as set_s: self.handler.subports_added(self.trunk_1, [self.sub_port_1, self.sub_port_2]) set_s.assert_called_once_with( self.trunk_1.port_id, [self.sub_port_1, self.sub_port_2]) self.trunk_1.update.assert_called_once_with( status=trunk_consts.TRUNK_ACTIVE_STATUS) def test_subports_deleted(self): with mock.patch.object(self.handler, '_unset_sub_ports') as unset_s: self.handler.subports_deleted(self.trunk_1, [self.sub_port_1, self.sub_port_2]) unset_s.assert_called_once_with( [self.sub_port_1, self.sub_port_2]) self.trunk_1.update.assert_called_once_with( status=trunk_consts.TRUNK_ACTIVE_STATUS) def _fake_trunk_event_payload(self): payload = mock.Mock() payload.current_trunk = mock.Mock() payload.current_trunk.port_id = 'current_trunk_port_id' payload.original_trunk = mock.Mock() payload.original_trunk.port_id = 'original_trunk_port_id' current_subport = mock.Mock() current_subport.segmentation_id = 40 current_subport.trunk_id = 'current_trunk_port_id' current_subport.port_id = 'current_subport_port_id' original_subport = mock.Mock() original_subport.segmentation_id = 41 original_subport.trunk_id = 'original_trunk_port_id' original_subport.port_id = 'original_subport_port_id' payload.current_trunk.sub_ports = [current_subport] payload.original_trunk.sub_ports = [original_subport] return payload @mock.patch.object(trunk_driver.OVNTrunkHandler, '_set_sub_ports') def test_trunk_event_create(self, set_subports): fake_payload = self._fake_trunk_event_payload() self.handler.trunk_event( mock.ANY, events.AFTER_CREATE, mock.ANY, fake_payload) set_subports.assert_called_once_with( fake_payload.current_trunk.port_id, fake_payload.current_trunk.sub_ports) fake_payload.current_trunk.update.assert_called_once_with( status=trunk_consts.TRUNK_ACTIVE_STATUS) @mock.patch.object(trunk_driver.OVNTrunkHandler, '_unset_sub_ports') def test_trunk_event_delete(self, unset_subports): fake_payload = self._fake_trunk_event_payload() self.handler.trunk_event( mock.ANY, events.AFTER_DELETE, mock.ANY, fake_payload) unset_subports.assert_called_once_with( fake_payload.original_trunk.sub_ports) @mock.patch.object(trunk_driver.OVNTrunkHandler, '_set_sub_ports') @mock.patch.object(trunk_driver.OVNTrunkHandler, '_unset_sub_ports') def test_trunk_event_invalid(self, unset_subports, set_subports): fake_payload = self._fake_trunk_event_payload() self.handler.trunk_event( mock.ANY, events.BEFORE_DELETE, mock.ANY, fake_payload) set_subports.assert_not_called() unset_subports.assert_not_called() def _fake_subport_event_payload(self): payload = mock.Mock() payload.original_trunk = mock.Mock() payload.original_trunk.port_id = 'original_trunk_port_id' original_subport = mock.Mock() original_subport.segmentation_id = 41 original_subport.trunk_id = 'original_trunk_port_id' original_subport.port_id = 'original_subport_port_id' payload.subports = [original_subport] return payload @mock.patch.object(trunk_driver.OVNTrunkHandler, 'subports_added') def test_subport_event_create(self, s_added): fake_payload = self._fake_subport_event_payload() self.handler.subport_event( mock.ANY, events.AFTER_CREATE, mock.ANY, fake_payload) s_added.assert_called_once_with( fake_payload.original_trunk, fake_payload.subports) @mock.patch.object(trunk_driver.OVNTrunkHandler, 'subports_deleted') def test_subport_event_delete(self, s_deleted): fake_payload = self._fake_subport_event_payload() self.handler.subport_event( mock.ANY, events.AFTER_DELETE, mock.ANY, fake_payload) s_deleted.assert_called_once_with( fake_payload.original_trunk, fake_payload.subports) @mock.patch.object(trunk_driver.OVNTrunkHandler, 'subports_added') @mock.patch.object(trunk_driver.OVNTrunkHandler, 'subports_deleted') def test_subport_event_invalid(self, s_deleted, s_added): fake_payload = self._fake_trunk_event_payload() self.handler.subport_event( mock.ANY, events.BEFORE_DELETE, mock.ANY, fake_payload) s_added.assert_not_called() s_deleted.assert_not_called() class TestTrunkDriver(base.BaseTestCase): def setUp(self): super(TestTrunkDriver, self).setUp() def test_is_loaded(self): driver = trunk_driver.OVNTrunkDriver.create(mock.Mock()) cfg.CONF.set_override('mechanism_drivers', ["logger", OVN_ML2_MECH_DRIVER_NAME], group='ml2') self.assertTrue(driver.is_loaded) cfg.CONF.set_override('mechanism_drivers', ['ovs', 'logger'], group='ml2') self.assertFalse(driver.is_loaded) cfg.CONF.set_override('core_plugin', 'some_plugin') self.assertFalse(driver.is_loaded) def test_register(self): driver = trunk_driver.OVNTrunkDriver.create(mock.Mock()) with mock.patch.object(registry, 'subscribe') as mock_subscribe: driver.register(mock.ANY, mock.ANY, mock.Mock()) calls = [mock.call.mock_subscribe(mock.ANY, resources.TRUNK, events.AFTER_CREATE), mock.call.mock_subscribe(mock.ANY, resources.SUBPORTS, events.AFTER_CREATE), mock.call.mock_subscribe(mock.ANY, resources.TRUNK, events.AFTER_DELETE), mock.call.mock_subscribe(mock.ANY, resources.SUBPORTS, events.AFTER_DELETE)] mock_subscribe.assert_has_calls(calls, any_order=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/fakes.py0000644000175000017500000000272700000000000025417 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.services.trunk.drivers import base class FakeDriver(base.DriverBase): @property def is_loaded(self): return True @classmethod def create(cls): return cls('foo_name', ('foo_intfs',), ('foo_seg_types',)) class FakeDriver2(base.DriverBase): @property def is_loaded(self): return True @classmethod def create(cls): return cls('foo_name2', ('foo_intf2',), ('foo_seg_types2',)) class FakeDriverCanTrunkBoundPort(base.DriverBase): @property def is_loaded(self): return True @classmethod def create(cls): return cls('foo_name3', ('foo_intfs',), ('foo_seg_types',), can_trunk_bound_port=True) class FakeDriverWithAgent(base.DriverBase): @property def is_loaded(self): return True @classmethod def create(cls): return cls('foo_name4', ('foo_intfs',), ('foo_seg_types',), "foo_type") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4870462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/rpc/0000755000175000017500000000000000000000000024530 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/rpc/__init__.py0000644000175000017500000000000000000000000026627 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/rpc/test_agent.py0000644000175000017500000000500400000000000027236 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import rpc from oslo_config import cfg import oslo_messaging from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.services.trunk.rpc import agent from neutron.tests import base class TrunkSkeletonTest(base.BaseTestCase): # TODO(fitoduarte): add more test to improve coverage of module @mock.patch("neutron.api.rpc.callbacks.resource_manager." "ConsumerResourceCallbacksManager.register") def test___init__(self, mocked_register): mock_conn = mock.MagicMock() with mock.patch.object(rpc.Connection, 'create_consumer', new_callable=mock_conn): test_obj = agent.TrunkSkeleton() self.assertEqual(2, mocked_register.call_count) calls = [mock.call(test_obj.handle_trunks, resources.TRUNK), mock.call(test_obj.handle_subports, resources.SUBPORT)] mocked_register.assert_has_calls(calls, any_order=True) # Test to see if the call to rpc.get_server has the correct # target and the correct endpoints topic = resources_rpc.resource_type_versioned_topic( resources.SUBPORT) subport_target = oslo_messaging.Target( topic=topic, server=cfg.CONF.host, fanout=True) topic = resources_rpc.resource_type_versioned_topic( resources.TRUNK) trunk_target = oslo_messaging.Target( topic=topic, server=cfg.CONF.host, fanout=True) calls = [mock.call(subport_target.topic, mock.ANY, fanout=True), mock.call(trunk_target.topic, mock.ANY, fanout=True)] self.assertIn(calls[0], mock_conn().mock_calls) self.assertIn(calls[1], mock_conn().mock_calls) self.assertIn("ResourcesPushRpcCallback", str(mock_conn().call_args_list)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/rpc/test_backend.py0000644000175000017500000000640600000000000027536 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import resources from neutron_lib import fixture from neutron.api.rpc.callbacks import resource_manager from neutron.services.trunk import callbacks from neutron.services.trunk.rpc import backend from neutron.tests import base from neutron.tests import tools class ServerSideRpcBackendTest(base.BaseTestCase): # TODO(fitoduarte): add more test to improve coverage of module def setUp(self): super(ServerSideRpcBackendTest, self).setUp() self._mgr = mock.Mock() self.useFixture(fixture.CallbackRegistryFixture( callback_manager=self._mgr)) self.register_mock = mock.patch.object( resource_manager.ResourceCallbacksManager, "register").start() def test___init__(self,): test_obj = backend.ServerSideRpcBackend() calls = [mock.call( *tools.get_subscribe_args( test_obj.process_event, resources.TRUNK, events.AFTER_CREATE)), mock.call( *tools.get_subscribe_args( test_obj.process_event, resources.TRUNK, events.AFTER_DELETE)), mock.call( *tools.get_subscribe_args( test_obj.process_event, resources.SUBPORTS, events.AFTER_CREATE)), mock.call( *tools.get_subscribe_args( test_obj.process_event, resources.SUBPORTS, events.AFTER_DELETE)) ] self._mgr.subscribe.assert_has_calls(calls, any_order=True) def test_process_event(self): test_obj = backend.ServerSideRpcBackend() test_obj._stub = mock_stub = mock.Mock() trunk_plugin = mock.Mock() test_obj.process_event( resources.TRUNK, events.AFTER_CREATE, trunk_plugin, callbacks.TrunkPayload("context", "id", current_trunk="current_trunk")) test_obj.process_event( resources.TRUNK, events.AFTER_DELETE, trunk_plugin, callbacks.TrunkPayload("context", "id", original_trunk="original_trunk")) calls = [mock.call.trunk_created("context", "current_trunk"), mock.call.trunk_deleted("context", "original_trunk")] mock_stub.assert_has_calls(calls, any_order=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/rpc/test_server.py0000644000175000017500000003224700000000000027457 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc from neutron_lib.services.trunk import constants from sqlalchemy.orm import exc from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron.objects import trunk as trunk_obj from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.services.trunk import drivers from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import plugin as trunk_plugin from neutron.services.trunk.rpc import server from neutron.tests import base from neutron.tests.unit.plugins.ml2 import test_plugin class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(TrunkSkeletonTest, self).setUp() self.mock_registry_provide = mock.patch( 'neutron.api.rpc.callbacks.producer.registry.provide').start() self.drivers_patch = mock.patch.object(drivers, 'register').start() self.mock_update_port = mock.patch.object(ml2_plugin.Ml2Plugin, 'update_port').start() self.compat_patch = mock.patch.object( trunk_plugin.TrunkPlugin, 'check_compatibility').start() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type('vlan', lambda x: True) self.core_plugin = directory.get_plugin() def _create_test_trunk(self, port, subports=None): subports = subports if subports else [] trunk = {'port_id': port['port']['id'], 'tenant_id': 'test_tenant', 'sub_ports': subports } response = ( self.trunk_plugin.create_trunk(self.context, {'trunk': trunk})) return response @mock.patch("neutron.api.rpc.callbacks.resource_manager." "ResourceCallbacksManager.register") def test___init__(self, mocked_get_server): mock_conn = mock.MagicMock() with mock.patch.object(n_rpc.Connection, 'create_consumer', new_callable=mock_conn): test_obj = server.TrunkSkeleton() self.mock_registry_provide.assert_called_with( server.trunk_by_port_provider, resources.TRUNK) self.assertItemsEqual(('trunk', [test_obj],), mock_conn.mock_calls[1][1]) def test_update_subport_bindings(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port( self.context, parent_port['port']['id'], {'port': port_data}) subports = [] mock_return_vals = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' mock_return_vals.append(new_port) obj = trunk_obj.SubPort( context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) self.mock_update_port.side_effect = mock_return_vals test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id']) self.assertEqual(trunk.status, constants.TRUNK_BUILD_STATUS) self.assertIn(trunk.id, updated_subports) for port in updated_subports[trunk['id']]: self.assertEqual('trunk_host_id', port[portbindings.HOST_ID]) def test__handle_port_binding_binding_error(self): with self.port() as _trunk_port: trunk = self._create_test_trunk(_trunk_port) trunk_host = 'test-host' test_obj = server.TrunkSkeleton() self.mock_update_port.return_value = {portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED} self.assertRaises(trunk_exc.SubPortBindingError, test_obj._handle_port_binding, self.context, _trunk_port['port']['id'], trunk_obj.Trunk.get_object(self.context, id=trunk['id']), trunk_host) def test_udate_subport_bindings_error(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port( self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' obj = trunk_obj.SubPort( context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin self.mock_update_port.return_value = {portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED} updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id']) self.assertEqual(trunk.status, constants.TRUNK_ERROR_STATUS) self.assertEqual([], updated_subports[trunk.id]) def test_udate_subport_bindings_staledataerror(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port( self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' obj = trunk_obj.SubPort( context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin self.mock_update_port.return_value = {portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED} mock_trunk_obj = mock.Mock(port_id=parent_port['port']['id']) mock_trunk_obj.update.side_effect = exc.StaleDataError with mock.patch.object( trunk_obj.Trunk, 'get_object', return_value=mock_trunk_obj): self.assertRaises( exc.StaleDataError, test_obj.update_subport_bindings, self.context, subports=subports) self.assertEqual( db_api.MAX_RETRIES, mock_trunk_obj.update.call_count) def test_udate_subport_bindings_noretryerror(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port( self.context, parent_port['port']['id'], {'port': port_data}) subports = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' obj = trunk_obj.SubPort( context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin self.mock_update_port.return_value = {portbindings.VIF_TYPE: portbindings.VIF_TYPE_BINDING_FAILED} mock_trunk_obj = mock.Mock(port_id=parent_port['port']['id']) mock_trunk_obj.update.side_effect = KeyError with mock.patch.object( trunk_obj.Trunk, 'get_object', return_value=mock_trunk_obj): self.assertRaises( KeyError, test_obj.update_subport_bindings, self.context, subports=subports) self.assertEqual(1, mock_trunk_obj.update.call_count) def test_update_subport_bindings_exception(self): with self.port() as _parent_port: parent_port = _parent_port trunk = self._create_test_trunk(parent_port) port_data = {portbindings.HOST_ID: 'trunk_host_id'} self.core_plugin.update_port( self.context, parent_port['port']['id'], {'port': port_data}) subports = [] mock_return_vals = [] for vid in range(0, 3): with self.port() as new_port: new_port[portbindings.HOST_ID] = 'trunk_host_id' mock_return_vals.append(new_port) obj = trunk_obj.SubPort( context=self.context, trunk_id=trunk['id'], port_id=new_port['port']['id'], segmentation_type='vlan', segmentation_id=vid) subports.append(obj) self.mock_update_port.side_effect = Exception() test_obj = server.TrunkSkeleton() test_obj._trunk_plugin = self.trunk_plugin test_obj._core_plugin = self.core_plugin updated_subports = test_obj.update_subport_bindings(self.context, subports=subports) trunk = trunk_obj.Trunk.get_object(self.context, id=trunk['id']) self.assertEqual([], updated_subports.get(trunk.id)) self.assertEqual(constants.TRUNK_DEGRADED_STATUS, trunk.status) class TrunkStubTest(base.BaseTestCase): def setUp(self): super(TrunkStubTest, self).setUp() self.test_obj = server.TrunkStub() def test___init__(self): self.assertIsInstance(self.test_obj._resource_rpc, resources_rpc.ResourcesPushRpcApi) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_trunk_created(self, mocked_push): m_context = mock.Mock() m_trunk = mock.Mock() self.test_obj.trunk_created(m_context, m_trunk) mocked_push.assert_called_with(m_context, [m_trunk], events.CREATED) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_trunk_deleted(self, mocked_push): m_context = mock.Mock() m_trunk = mock.Mock() self.test_obj.trunk_deleted(m_context, m_trunk) mocked_push.assert_called_with(m_context, [m_trunk], events.DELETED) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_subports_added(self, mocked_push): m_context = mock.Mock() m_subports = mock.Mock() self.test_obj.subports_added(m_context, m_subports) mocked_push.assert_called_with(m_context, m_subports, events.CREATED) @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." "push") def test_subports_deleted(self, mocked_push): m_context = mock.Mock() m_subports = mock.Mock() self.test_obj.subports_deleted(m_context, m_subports) mocked_push.assert_called_with(m_context, m_subports, events.DELETED) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/seg_types/0000755000175000017500000000000000000000000025746 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/seg_types/__init__.py0000644000175000017500000000000000000000000030045 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/seg_types/test_validators.py0000644000175000017500000000264100000000000031532 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.services.trunk import constants from neutron.services.trunk.seg_types import validators from neutron.tests import base class ValidatorsTestCase(base.BaseTestCase): def test_add_validator_raises_keyerror_on_redefinition(self): self.assertRaises(KeyError, validators.add_validator, constants.SEGMENTATION_TYPE_VLAN, mock.ANY) def test_add_validator_add_new_type(self): validators.add_validator('foo', lambda: None) self.assertIn('foo', validators._supported) def test_get_validator(self): self.assertIsNotNone(validators.get_validator( constants.SEGMENTATION_TYPE_VLAN)) def test_get_validator_raises_keyerror_on_missing_validator(self): self.assertRaises(KeyError, validators.get_validator, 'my_random_seg_type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/test_plugin.py0000644000175000017500000004367000000000000026665 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.plugins import directory from neutron_lib.services.trunk import constants import testtools from neutron.objects import trunk as trunk_objects from neutron.services.trunk import callbacks from neutron.services.trunk import drivers from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import plugin as trunk_plugin from neutron.services.trunk import rules from neutron.services.trunk.seg_types import validators from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit.services.trunk import fakes def create_subport_dict(port_id): return {'segmentation_type': 'vlan', 'segmentation_id': 123, 'port_id': port_id} def register_mock_callback(resource, event): callback = mock.Mock() registry.subscribe(callback, resource, event) return callback class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(TrunkPluginTestCase, self).setUp() self.drivers_patch = mock.patch.object(drivers, 'register').start() self.compat_patch = mock.patch.object( trunk_plugin.TrunkPlugin, 'check_compatibility').start() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type('vlan', lambda x: True) def _create_test_trunk(self, port, subports=None): subports = subports if subports else [] trunk = {'port_id': port['port']['id'], 'tenant_id': 'test_tenant', 'sub_ports': subports} response = ( self.trunk_plugin.create_trunk(self.context, {'trunk': trunk})) return response def _get_trunk_obj(self, trunk_id): return trunk_objects.Trunk.get_object(self.context, id=trunk_id) def _get_subport_obj(self, port_id): subports = trunk_objects.SubPort.get_objects( self.context, port_id=port_id) return subports[0] def _test_delete_port_raise_in_use(self, parent_port, child_port, port_id, exception): subport = create_subport_dict(child_port['port']['id']) self._create_test_trunk(parent_port, [subport]) core_plugin = directory.get_plugin() self.assertRaises(exception, core_plugin.delete_port, self.context, port_id) def test_delete_port_raise_in_use_by_trunk(self): with self.port() as parent_port, self.port() as child_port: self._test_delete_port_raise_in_use( parent_port, child_port, parent_port['port']['id'], trunk_exc.PortInUseAsTrunkParent) def test_delete_port_raise_in_use_by_subport(self): with self.port() as parent_port, self.port() as child_port: self._test_delete_port_raise_in_use( parent_port, child_port, child_port['port']['id'], trunk_exc.PortInUseAsSubPort) def test_delete_trunk_raise_in_use(self): with self.port() as port: fakes.FakeDriverCanTrunkBoundPort.create() self.trunk_plugin = trunk_plugin.TrunkPlugin() directory.add_plugin('trunk', self.trunk_plugin) trunk = self._create_test_trunk(port) core_plugin = directory.get_plugin() port['port']['binding:host_id'] = 'host' core_plugin.update_port(self.context, port['port']['id'], port) trunk_port_validator = rules.TrunkPortValidator(trunk['port_id']) if not trunk_port_validator.can_be_trunked_or_untrunked( self.context): self.assertRaises(trunk_exc.TrunkInUse, self.trunk_plugin.delete_trunk, self.context, trunk['id']) def _test_trunk_create_notify(self, event): with self.port() as parent_port: callback = register_mock_callback(resources.TRUNK, event) trunk = self._create_test_trunk(parent_port) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], current_trunk=trunk_obj) callback.assert_called_once_with( resources.TRUNK, event, self.trunk_plugin, payload=payload) def test_create_trunk_notify_after_create(self): self._test_trunk_create_notify(events.AFTER_CREATE) def test_create_trunk_notify_precommit_create(self): self._test_trunk_create_notify(events.PRECOMMIT_CREATE) def _test_trunk_update_notify(self, event): with self.port() as parent_port: callback = register_mock_callback(resources.TRUNK, event) trunk = self._create_test_trunk(parent_port) orig_trunk_obj = self._get_trunk_obj(trunk['id']) trunk_req = {'trunk': {'name': 'foo'}} self.trunk_plugin.update_trunk(self.context, trunk['id'], trunk_req) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], original_trunk=orig_trunk_obj, current_trunk=trunk_obj) callback.assert_called_once_with( resources.TRUNK, event, self.trunk_plugin, payload=payload) def test_trunk_update_notify_after_update(self): self._test_trunk_update_notify(events.AFTER_UPDATE) def test_trunk_update_notify_precommit_update(self): # TODO(boden): refactor back into _test_trunk_update_notify # once all code uses neutron-lib payloads with self.port() as parent_port: callback = register_mock_callback( resources.TRUNK, events.PRECOMMIT_UPDATE) trunk = self._create_test_trunk(parent_port) orig_trunk_obj = self._get_trunk_obj(trunk['id']) trunk_req = {'trunk': {'name': 'foo'}} self.trunk_plugin.update_trunk(self.context, trunk['id'], trunk_req) trunk_obj = self._get_trunk_obj(trunk['id']) callback.assert_called_once_with( resources.TRUNK, events.PRECOMMIT_UPDATE, self.trunk_plugin, payload=mock.ANY) call_payload = callback.call_args[1]['payload'] self.assertEqual(orig_trunk_obj, call_payload.states[0]) self.assertEqual(trunk_obj, call_payload.desired_state) def _test_trunk_delete_notify(self, event): with self.port() as parent_port: callback = register_mock_callback(resources.TRUNK, event) trunk = self._create_test_trunk(parent_port) trunk_obj = self._get_trunk_obj(trunk['id']) self.trunk_plugin.delete_trunk(self.context, trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], original_trunk=trunk_obj) callback.assert_called_once_with( resources.TRUNK, event, self.trunk_plugin, payload=payload) def test_delete_trunk_notify_after_delete(self): self._test_trunk_delete_notify(events.AFTER_DELETE) def test_delete_trunk_notify_precommit_delete(self): self._test_trunk_delete_notify(events.PRECOMMIT_DELETE) def _test_subport_action_empty_list_no_notify(self, event, subport_method): with self.port() as parent_port: trunk = self._create_test_trunk(parent_port) callback = register_mock_callback(resources.SUBPORTS, event) subport_method(self.context, trunk['id'], {'sub_ports': []}) callback.assert_not_called() def _test_add_subports_no_notification(self, event): self._test_subport_action_empty_list_no_notify( event, self.trunk_plugin.add_subports) def test_add_subports_notify_after_create_empty_list(self): self._test_add_subports_no_notification(events.AFTER_CREATE) def test_add_subports_notify_precommit_create_empty_list(self): self._test_add_subports_no_notification(events.PRECOMMIT_CREATE) def _test_remove_subports_no_notification(self, event): self._test_subport_action_empty_list_no_notify( event, self.trunk_plugin.remove_subports) def test_remove_subports_notify_after_delete_empty_list(self): self._test_remove_subports_no_notification(events.AFTER_DELETE) def test_remove_subports_notify_precommit_delete_empty_list(self): self._test_remove_subports_no_notification(events.PRECOMMIT_DELETE) def _test_add_subports_notify(self, event): with self.port() as parent_port, self.port() as child_port: trunk = self._create_test_trunk(parent_port) orig_trunk_obj = self._get_trunk_obj(trunk['id']) subport = create_subport_dict(child_port['port']['id']) callback = register_mock_callback(resources.SUBPORTS, event) self.trunk_plugin.add_subports( self.context, trunk['id'], {'sub_ports': [subport]}) trunk_obj = self._get_trunk_obj(trunk['id']) subport_obj = self._get_subport_obj(subport['port_id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], current_trunk=trunk_obj, original_trunk=orig_trunk_obj, subports=[subport_obj]) callback.assert_called_once_with( resources.SUBPORTS, event, self.trunk_plugin, payload=payload) def test_add_subports_notify_after_create(self): self._test_add_subports_notify(events.AFTER_CREATE) def test_add_subports_notify_precommit_create(self): self._test_add_subports_notify(events.PRECOMMIT_CREATE) def _test_remove_subports_notify(self, event): with self.port() as parent_port, self.port() as child_port: subport = create_subport_dict(child_port['port']['id']) trunk = self._create_test_trunk(parent_port, [subport]) orig_trunk_obj = self._get_trunk_obj(trunk['id']) callback = register_mock_callback(resources.SUBPORTS, event) subport_obj = self._get_subport_obj(subport['port_id']) self.trunk_plugin.remove_subports( self.context, trunk['id'], {'sub_ports': [subport]}) trunk_obj = self._get_trunk_obj(trunk['id']) payload = callbacks.TrunkPayload(self.context, trunk['id'], current_trunk=trunk_obj, original_trunk=orig_trunk_obj, subports=[subport_obj]) callback.assert_called_once_with( resources.SUBPORTS, event, self.trunk_plugin, payload=payload) def test_remove_subports_notify_after_delete(self): self._test_remove_subports_notify(events.AFTER_DELETE) def test_remove_subports_notify_precommit_delete(self): self._test_remove_subports_notify(events.PRECOMMIT_DELETE) def test_create_trunk_in_down_state(self): with self.port() as port: trunk = self._create_test_trunk(port) self.assertEqual( constants.TRUNK_DOWN_STATUS, trunk['status']) def test_add_subports_trunk_in_error_state_raises(self): with self.port() as port, self.port() as subport: trunk = self._create_test_trunk(port) trunk_obj = self._get_trunk_obj(trunk['id']) trunk_obj.status = constants.TRUNK_ERROR_STATUS trunk_obj.update() s = create_subport_dict(subport['port']['id']) self.assertRaises(trunk_exc.TrunkInErrorState, self.trunk_plugin.add_subports, self.context, trunk['id'], {'sub_ports': [s]}) def test_add_subports_trunk_goes_to_down(self): with self.port() as port, self.port() as subport: trunk = self._create_test_trunk(port) trunk_obj = self._get_trunk_obj(trunk['id']) trunk_obj.status = constants.TRUNK_ACTIVE_STATUS trunk_obj.update() s = create_subport_dict(subport['port']['id']) trunk = self.trunk_plugin.add_subports( self.context, trunk['id'], {'sub_ports': [s]}) self.assertEqual(constants.TRUNK_DOWN_STATUS, trunk['status']) def test_remove_subports_trunk_goes_to_down(self): with self.port() as port, self.port() as subport: s = create_subport_dict(subport['port']['id']) trunk = self._create_test_trunk(port, [s]) trunk_obj = self._get_trunk_obj(trunk['id']) trunk_obj.status = constants.TRUNK_ACTIVE_STATUS trunk_obj.update() trunk = self.trunk_plugin.remove_subports( self.context, trunk['id'], {'sub_ports': [{'port_id': subport['port']['id']}]}) self.assertEqual(constants.TRUNK_DOWN_STATUS, trunk['status']) def test__trigger_trunk_status_change_vif_type_changed_unbound(self): callback = register_mock_callback(resources.TRUNK, events.AFTER_UPDATE) with self.port() as parent: parent[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_UNBOUND original_port = {portbindings.VIF_TYPE: 'fakeviftype'} original_trunk, current_trunk = ( self._test__trigger_trunk_status_change( parent, original_port, constants.TRUNK_ACTIVE_STATUS, constants.TRUNK_DOWN_STATUS)) payload = callbacks.TrunkPayload(self.context, original_trunk['id'], original_trunk=original_trunk, current_trunk=current_trunk) callback.assert_called_once_with( resources.TRUNK, events.AFTER_UPDATE, self.trunk_plugin, payload=payload) def test__trigger_trunk_status_change_vif_type_unchanged(self): with self.port() as parent: parent[portbindings.VIF_TYPE] = 'fakeviftype' original_port = {portbindings.VIF_TYPE: 'fakeviftype'} self._test__trigger_trunk_status_change( parent, original_port, constants.TRUNK_ACTIVE_STATUS, constants.TRUNK_ACTIVE_STATUS) def test__trigger_trunk_status_change_vif_type_changed(self): with self.port() as parent: parent[portbindings.VIF_TYPE] = 'realviftype' original_port = {portbindings.VIF_TYPE: 'fakeviftype'} self._test__trigger_trunk_status_change( parent, original_port, constants.TRUNK_ACTIVE_STATUS, constants.TRUNK_ACTIVE_STATUS) def _test__trigger_trunk_status_change(self, new_parent, original_parent, initial_trunk_status, final_trunk_status): trunk = self._create_test_trunk(new_parent) trunk = self._get_trunk_obj(trunk['id']) trunk.update(status=initial_trunk_status) trunk_details = {'trunk_id': trunk.id} new_parent['trunk_details'] = trunk_details original_parent['trunk_details'] = trunk_details kwargs = {'context': self.context, 'port': new_parent, 'original_port': original_parent} self.trunk_plugin._trigger_trunk_status_change(resources.PORT, events.AFTER_UPDATE, None, **kwargs) current_trunk = self._get_trunk_obj(trunk.id) self.assertEqual(final_trunk_status, current_trunk.status) return trunk, current_trunk class TrunkPluginCompatDriversTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(TrunkPluginCompatDriversTestCase, self).setUp() mock.patch.object(drivers, 'register').start() def test_plugin_fails_to_start_no_loaded_drivers(self): with testtools.ExpectedException( trunk_exc.IncompatibleTrunkPluginConfiguration): trunk_plugin.TrunkPlugin() def test_plugins_fails_to_start_seg_type_validator_not_found(self): fakes.FakeDriver.create() with mock.patch.object( validators, 'get_validator', side_effect=KeyError), \ testtools.ExpectedException( trunk_exc.SegmentationTypeValidatorNotFound): trunk_plugin.TrunkPlugin() def test_plugins_fails_to_start_conflicting_seg_types(self): fakes.FakeDriver.create() fakes.FakeDriver2.create() with testtools.ExpectedException( trunk_exc.IncompatibleDriverSegmentationTypes): trunk_plugin.TrunkPlugin() def test_plugin_with_fake_driver(self): with mock.patch.object(validators, 'get_validator', return_value={'foo_seg_types': mock.ANY}): fake_driver = fakes.FakeDriver.create() plugin = trunk_plugin.TrunkPlugin() self.assertTrue(fake_driver.is_loaded) self.assertEqual(set([]), plugin.supported_agent_types) self.assertEqual(set(['foo_intfs']), plugin.supported_interfaces) self.assertEqual([fake_driver], plugin.registered_drivers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/test_rules.py0000644000175000017500000003660300000000000026517 0ustar00coreycorey00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import testtools from neutron_lib.api.definitions import trunk as trunk_api from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.plugins import utils as plugin_utils from neutron_lib.services.trunk import constants from oslo_utils import uuidutils from neutron.services.trunk import drivers from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import plugin as trunk_plugin from neutron.services.trunk import rules from neutron.services.trunk import utils as trunk_utils from neutron.tests import base from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit.services.trunk import fakes class SubPortsValidatorTestCase(base.BaseTestCase): def setUp(self): super(SubPortsValidatorTestCase, self).setUp() self.segmentation_types = { constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag} self.context = mock.ANY mock.patch.object(rules.SubPortsValidator, '_get_port_mtu', return_value=None).start() mock.patch.object(rules.SubPortsValidator, '_prepare_subports', return_value=None).start() def test_validate_subport_subport_and_trunk_shared_port_id(self): shared_id = uuidutils.generate_uuid() validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': shared_id, 'segmentation_type': 'vlan', 'segmentation_id': 2}], shared_id) self.assertRaises(trunk_exc.ParentPortInUse, validator.validate, self.context) def test_validate_subport_invalid_vlan_id(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'vlan', 'segmentation_id': 5000}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context) def test_validate_subport_vlan_id_not_an_int(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'vlan', 'segmentation_id': 'IamNotAnumber'}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context) def test_validate_subport_valid_vlan_id_as_string(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'vlan', 'segmentation_id': '2'}]) with mock.patch.object(rules.TrunkPortValidator, 'validate') as f: validator.validate(self.context) f.assert_called_once_with(self.context, parent_port=False) def test_validate_subport_subport_invalid_segmentation_type(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'fake', 'segmentation_id': 100}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context) def test_validate_subport_missing_segmentation_type(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_id': 100}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context) def test_validate_subport_missing_segmentation_id(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'fake'}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context) def test_validate_subport_missing_port_id(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'segmentation_type': 'fake', 'segmentation_id': 100}]) self.assertRaises(n_exc.InvalidInput, validator.validate, self.context, basic_validation=True) class SubPortsValidatorPrepareTestCase(base.BaseTestCase): def setUp(self): super(SubPortsValidatorPrepareTestCase, self).setUp() self.segmentation_types = { constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag} self.context = mock.ANY mock.patch.object(rules.SubPortsValidator, '_get_port_mtu', return_value=None).start() def test__prepare_subports_raise_no_provider_ext(self): validator = rules.SubPortsValidator( self.segmentation_types, [{'port_id': uuidutils.generate_uuid(), 'segmentation_type': 'inherit'}]) self.assertRaises(n_exc.InvalidInput, validator._prepare_subports, self.context) class SubPortsValidatorMtuSanityTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(SubPortsValidatorMtuSanityTestCase, self).setUp() self.segmentation_types = { constants.SEGMENTATION_TYPE_VLAN: plugin_utils.is_valid_vlan_tag} def test_validate_subport_mtu_same_as_trunk(self): self._test_validate_subport_trunk_mtu(1500, 1500) def test_validate_subport_mtu_smaller_than_trunks(self): self._test_validate_subport_trunk_mtu(500, 1500) def test_validate_subport_mtu_greater_than_trunks(self): self._test_validate_subport_trunk_mtu(1500, 500) def test_validate_subport_mtu_unset_trunks_set(self): self._test_validate_subport_trunk_mtu(None, 500) def test_validate_subport_mtu_set_trunks_unset(self): self._test_validate_subport_trunk_mtu(500, None) def test_validate_subport_mtu_set_trunks_net_exception(self): self._test_validate_subport_trunk_mtu(1500, 'exc') def _test_validate_subport_trunk_mtu( self, subport_net_mtu, trunk_net_mtu): plugin = directory.get_plugin() orig_get_network = plugin.get_network orig_get_networks = plugin.get_networks def get_networks_adjust_mtu(*args, **kwargs): res = orig_get_networks(*args, **kwargs) res[0][api.MTU] = subport_net_mtu return res def get_network_adjust_mtu(*args, **kwargs): res = orig_get_network(*args, **kwargs) if res['name'] == 'net_trunk': if trunk_net_mtu == 'exc': raise n_exc.NetworkNotFound(net_id='net-id') res[api.MTU] = trunk_net_mtu elif res['name'] == 'net_subport': res[api.MTU] = subport_net_mtu return res with self.network('net_trunk') as trunk_net,\ self.subnet(network=trunk_net) as trunk_subnet,\ self.port(subnet=trunk_subnet) as trunk_port,\ self.network('net_subport') as subport_net,\ self.subnet(network=subport_net) as subport_subnet,\ self.port(subnet=subport_subnet) as subport,\ mock.patch.object(plugin, "get_network", side_effect=get_network_adjust_mtu),\ mock.patch.object(plugin, "get_networks", side_effect=get_networks_adjust_mtu): trunk = {'port_id': trunk_port['port']['id'], 'tenant_id': 'test_tenant', 'sub_ports': [{'port_id': subport['port']['id'], 'segmentation_type': 'vlan', 'segmentation_id': 2}]} validator = rules.SubPortsValidator( self.segmentation_types, trunk['sub_ports'], trunk['port_id']) if subport_net_mtu is None or trunk_net_mtu is None: validator.validate(self.context) elif subport_net_mtu == 'exc' or trunk_net_mtu == 'exc': validator.validate(self.context) elif subport_net_mtu <= trunk_net_mtu: validator.validate(self.context) else: self.assertRaises(trunk_exc.SubPortMtuGreaterThanTrunkPortMtu, validator.validate, self.context) class TrunkPortValidatorTestCase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(TrunkPortValidatorTestCase, self).setUp() self.drivers_patch = mock.patch.object(drivers, 'register').start() self.compat_patch = mock.patch.object( trunk_plugin.TrunkPlugin, 'check_compatibility').start() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type( constants.SEGMENTATION_TYPE_VLAN, plugin_utils.is_valid_vlan_tag) def test_validate_port_parent_in_use_by_trunk(self): with self.port() as trunk_parent: trunk = {'port_id': trunk_parent['port']['id'], 'tenant_id': 'test_tenant', 'sub_ports': []} self.trunk_plugin.create_trunk( self.context, {trunk_api.ALIAS: trunk}) validator = rules.TrunkPortValidator(trunk_parent['port']['id']) self.assertRaises(trunk_exc.ParentPortInUse, validator.validate, self.context) def test_validate_port_id_in_use_by_unrelated_trunk(self): with self.port() as trunk_parent,\ self.port() as subport: trunk = {'port_id': trunk_parent['port']['id'], 'tenant_id': 'test_tenant', 'sub_ports': [{'port_id': subport['port']['id'], 'segmentation_type': 'vlan', 'segmentation_id': 2}]} self.trunk_plugin.create_trunk( self.context, {trunk_api.ALIAS: trunk}) validator = rules.TrunkPortValidator(subport['port']['id']) self.assertRaises(trunk_exc.TrunkPortInUse, validator.validate, self.context) def test_validate_port_has_binding_host(self): with self.port() as port: core_plugin = directory.get_plugin() port['port']['binding:host_id'] = 'host' core_plugin.update_port(self.context, port['port']['id'], port) validator = rules.TrunkPortValidator(port['port']['id']) self.assertTrue(validator.is_bound(self.context)) def test_validate_for_subport_calls_check(self): with self.port() as port: validator = rules.TrunkPortValidator(port['port']['id']) with mock.patch.object(validator, "check_not_in_use") as f: validator.validate(self.context, parent_port=False) f.assert_called_once_with(self.context) def test_validate_port_cannot_be_trunked_raises(self): with self.port() as port, \ mock.patch.object(rules.TrunkPortValidator, "can_be_trunked_or_untrunked", return_value=False), \ testtools.ExpectedException(trunk_exc.ParentPortInUse): validator = rules.TrunkPortValidator(port['port']['id']) validator.validate(self.context) def test_can_be_trunked_or_untrunked_returns_false(self): # need to trigger a driver registration fakes.FakeDriverCanTrunkBoundPort.create() self.trunk_plugin = trunk_plugin.TrunkPlugin() directory.add_plugin('trunk', self.trunk_plugin) with self.port() as port: core_plugin = directory.get_plugin() port['port']['binding:host_id'] = 'host' core_plugin.update_port(self.context, port['port']['id'], port) validator = rules.TrunkPortValidator(port['port']['id']) # port cannot be trunked because of binding mismatch self.assertFalse( validator.can_be_trunked_or_untrunked(self.context)) def test_can_be_trunked_or_untrunked_returns_true(self): # need to trigger a driver registration fakes.FakeDriverCanTrunkBoundPort.create() self.trunk_plugin = trunk_plugin.TrunkPlugin() directory.add_plugin('trunk', self.trunk_plugin) with self.port() as port, \ mock.patch.object(trunk_utils, "is_driver_compatible", return_value=True) as g: core_plugin = directory.get_plugin() port['port']['binding:host_id'] = 'host' core_plugin.update_port(self.context, port['port']['id'], port) validator = rules.TrunkPortValidator(port['port']['id']) self.assertTrue( validator.can_be_trunked_or_untrunked(self.context)) self.assertTrue(g.call_count) def test_can_be_trunked_or_untrunked_unbound_port(self): with self.port() as port: validator = rules.TrunkPortValidator(port['port']['id']) self.assertTrue( validator.can_be_trunked_or_untrunked(self.context)) def test_can_be_trunked_or_untrunked_raises_conflict(self): d1 = fakes.FakeDriver.create() d2 = fakes.FakeDriverWithAgent.create() self.trunk_plugin = trunk_plugin.TrunkPlugin() directory.add_plugin('trunk', self.trunk_plugin) self.trunk_plugin._drivers = [d1, d2] with self.port() as port, \ mock.patch.object(trunk_utils, "is_driver_compatible", return_value=True): core_plugin = directory.get_plugin() port['port']['binding:host_id'] = 'host' core_plugin.update_port(self.context, port['port']['id'], port) validator = rules.TrunkPortValidator(port['port']['id']) self.assertRaises( trunk_exc.TrunkPluginDriverConflict, validator.can_be_trunked_or_untrunked, self.context) def test_check_not_in_use_pass(self): with self.port() as port: validator = rules.TrunkPortValidator(port['port']['id']) self.assertIsNone(validator.check_not_in_use( self.context)) def test_check_not_in_use_raises(self): with self.port() as port: core_plugin = directory.get_plugin() port['port']['device_id'] = 'foo_device_id' core_plugin.update_port(self.context, port['port']['id'], port) validator = rules.TrunkPortValidator(port['port']['id']) self.assertRaises(n_exc.PortInUse, validator.check_not_in_use, self.context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/services/trunk/test_utils.py0000644000175000017500000000540000000000000026514 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.services.trunk import utils from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit.services.trunk import fakes class UtilsTestCase(test_plugin.Ml2PluginV2TestCase): def test_get_agent_types_by_host_returns_empty(self): self.assertFalse( utils.get_agent_types_by_host( self.context, 'foo_host')) def test_get_agent_types_by_host_returns_agents(self): with mock.patch("neutron.db.agents_db.AgentDbMixin.get_agents") as f: f.return_value = [{'agent_type': 'foo_type'}] self.assertEqual( ['foo_type'], utils.get_agent_types_by_host( self.context, 'foo_host')) def _test_is_driver_compatible(self, driver, interface, agent_types): return utils.is_driver_compatible(self.context, driver, interface, agent_types) def test_is_driver_compatible(self): driver = fakes.FakeDriverWithAgent.create() self.assertTrue(self._test_is_driver_compatible( driver, 'foo_intfs', ['foo_type'])) def test_is_driver_compatible_agent_based_agent_mismatch(self): driver = fakes.FakeDriverWithAgent.create() self.assertFalse(self._test_is_driver_compatible( driver, 'foo_intfs', ['foo_type_unknown'])) def test_is_driver_incompatible_because_of_interface_mismatch(self): driver = fakes.FakeDriverWithAgent.create() self.assertFalse(self._test_is_driver_compatible( driver, 'not_my_interface', ['foo_type'])) def test_is_driver_compatible_agentless(self): driver = fakes.FakeDriver.create() self.assertTrue(self._test_is_driver_compatible( driver, 'foo_intfs', ['foo_type'])) def test_is_driver_compatible_multiple_drivers(self): driver1 = fakes.FakeDriverWithAgent.create() driver2 = fakes.FakeDriver2.create() self.assertTrue(self._test_is_driver_compatible( driver1, 'foo_intfs', ['foo_type'])) self.assertFalse(self._test_is_driver_compatible( driver2, 'foo_intfs', ['foo_type'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_auth.py0000644000175000017500000001163500000000000023336 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import request_id import webob from neutron import auth from neutron.tests import base class NeutronKeystoneContextTestCase(base.BaseTestCase): def setUp(self): super(NeutronKeystoneContextTestCase, self).setUp() @webob.dec.wsgify def fake_app(req): self.context = req.environ['neutron.context'] return webob.Response() self.context = None self.middleware = auth.NeutronKeystoneContext(fake_app) self.request = webob.Request.blank('/') self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' response = self.request.get_response(self.middleware) self.assertEqual('401 Unauthorized', response.status) def test_with_user_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) self.assertEqual('testuserid', self.context.user) def test_with_tenant_id(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'test_user_id' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testtenantid', self.context.tenant_id) self.assertEqual('testtenantid', self.context.project_id) def test_roles_no_admin(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5'], self.context.roles) self.assertFalse(self.context.is_admin) def test_roles_with_admin(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,' 'AdMiN') response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual(['role1', 'role2', 'role3', 'role4', 'role5', 'AdMiN'], self.context.roles) self.assertTrue(self.context.is_admin) def test_with_user_tenant_name(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_PROJECT_NAME'] = 'testtenantname' self.request.headers['X_USER_NAME'] = 'testusername' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) self.assertEqual('testusername', self.context.user_name) self.assertEqual('testtenantid', self.context.tenant_id) self.assertEqual('testtenantname', self.context.project_name) def test_request_id_extracted_from_env(self): req_id = 'dummy-request-id' self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.environ[request_id.ENV_REQUEST_ID] = req_id self.request.get_response(self.middleware) self.assertEqual(req_id, self.context.request_id) def test_with_auth_token(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testauthtoken', self.context.auth_token) def test_without_auth_token(self): self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' del self.request.headers['X_AUTH_TOKEN'] self.request.get_response(self.middleware) self.assertIsNone(self.context.auth_token) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_manager.py0000644000175000017500000002707500000000000024014 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import weakref import fixtures import mock from neutron_lib.plugins import constants as lib_const from neutron_lib.plugins import directory from oslo_config import cfg from neutron import manager from neutron.plugins.common import constants from neutron.tests import base from neutron.tests.unit import dummy_plugin from neutron.tests.unit import testlib_api DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' class MultiServiceCorePlugin(object): supported_extension_aliases = ['fwaas', dummy_plugin.Dummy.get_alias()] class CorePluginWithAgentNotifiers(object): supported_extension_aliases = [] agent_notifiers = {'l3': 'l3_agent_notifier', 'dhcp': 'dhcp_agent_notifier'} class NeutronManagerTestCase(base.BaseTestCase): def setUp(self): ext_mapping = constants.EXT_TO_SERVICE_MAPPING if dummy_plugin.Dummy.get_alias() not in ext_mapping: ext_mapping[dummy_plugin.Dummy.get_alias()] = ( dummy_plugin.DUMMY_SERVICE_TYPE) super(NeutronManagerTestCase, self).setUp() self.config_parse() self.setup_coreplugin(load_plugins=False) self.useFixture( fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance')) def tearDown(self): ext_mapping = constants.EXT_TO_SERVICE_MAPPING if dummy_plugin.Dummy.get_alias() in ext_mapping: del ext_mapping[dummy_plugin.Dummy.get_alias()] super(NeutronManagerTestCase, self).tearDown() def test_service_plugin_is_loaded(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) manager.init() plugin = directory.get_plugin(dummy_plugin.DUMMY_SERVICE_TYPE) self.assertIsInstance( plugin, dummy_plugin.DummyServicePlugin, "loaded plugin should be of type neutronDummyPlugin") def test_service_plugin_by_name_is_loaded(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) cfg.CONF.set_override("service_plugins", [dummy_plugin.Dummy.get_alias()]) manager.init() plugin = directory.get_plugin(dummy_plugin.DUMMY_SERVICE_TYPE) self.assertIsInstance( plugin, dummy_plugin.DummyServicePlugin, "loaded plugin should be of type neutronDummyPlugin") def test_multiple_plugins_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin", "neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, DUMMY self.assertEqual(2, len(plugins)) def test_multiple_plugins_by_name_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", [dummy_plugin.Dummy.get_alias(), dummy_plugin.Dummy.get_alias()]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, DUMMY self.assertEqual(2, len(plugins)) def test_multiple_plugins_mixed_specified_for_service_type(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin", dummy_plugin.Dummy.get_alias()]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, DUMMY self.assertEqual(2, len(plugins)) def test_service_plugin_conflicts_with_core_plugin(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "MultiServiceCorePlugin") manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, FIREWALL, DUMMY self.assertEqual(3, len(plugins)) def test_load_plugins_with_requirements(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) with mock.patch( "neutron.tests.unit.dummy_plugin.DummyServicePlugin.__init__", return_value=None ) as dummy_init_mock, mock.patch( "neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin.__init__", return_value=None ) as dummy_with_require_init_mock: manager.NeutronManager.get_instance() plugins = directory.get_plugins() # DUMMY will also be initialized since DUMMY_REQIURE needs it. # CORE, DUMMY, DUMMY_REQIURE self.assertEqual(3, len(plugins)) # ensure that DUMMY and DUMMY_REQIURE was instantiate only once: self.assertEqual(1, dummy_init_mock.call_count) self.assertEqual(1, dummy_with_require_init_mock.call_count) def test_load_plugins_with_requirements_with_parent(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin", "neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) with mock.patch( "neutron.tests.unit.dummy_plugin.DummyServicePlugin.__init__", return_value=None ) as dummy_init_mock, mock.patch( "neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin.__init__", return_value=None ) as dummy_with_require_init_mock: manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, DUMMY, DUMMY_REQIURE self.assertEqual(3, len(plugins)) # ensure that DUMMY and DUMMY_REQIURE was instantiate only once: self.assertEqual(1, dummy_init_mock.call_count) self.assertEqual(1, dummy_with_require_init_mock.call_count) def test_load_plugins_with_requirements_child_first(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin", "neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) with mock.patch( "neutron.tests.unit.dummy_plugin.DummyServicePlugin.__init__", return_value=None ) as dummy_init_mock, mock.patch( "neutron.tests.unit.dummy_plugin." "DummyWithRequireServicePlugin.__init__", return_value=None ) as dummy_with_require_init_mock: manager.NeutronManager.get_instance() plugins = directory.get_plugins() # CORE, DUMMY, DUMMY_REQIURE self.assertEqual(3, len(plugins)) # ensure that DUMMY and DUMMY_REQIURE was instantiate only once: self.assertEqual(1, dummy_init_mock.call_count) self.assertEqual(1, dummy_with_require_init_mock.call_count) def test_core_plugin_supports_services(self): cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "MultiServiceCorePlugin") manager.init() svc_plugins = directory.get_plugins() self.assertEqual(3, len(svc_plugins)) self.assertIn(lib_const.CORE, svc_plugins.keys()) self.assertIn(dummy_plugin.DUMMY_SERVICE_TYPE, svc_plugins.keys()) def test_load_default_service_plugins(self): self.patched_default_svc_plugins.return_value = { 'neutron.tests.unit.dummy_plugin.DummyServicePlugin': dummy_plugin.DUMMY_SERVICE_TYPE } cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) manager.init() svc_plugins = directory.get_plugins() self.assertIn(dummy_plugin.DUMMY_SERVICE_TYPE, svc_plugins) def test_dhcp_agents_per_network_min(self): # Ensures dhcp_agents_per_network must be at least 1. self.assertRaises(ValueError, cfg.CONF.set_override, 'dhcp_agents_per_network', 0) def test_pre_plugin_validation(self): self.assertIsNotNone(manager.validate_pre_plugin_load()) cfg.CONF.set_override('core_plugin', 'dummy.plugin') self.assertIsNone(manager.validate_pre_plugin_load()) def test_manager_gathers_agent_notifiers_from_service_plugins(self): cfg.CONF.set_override("service_plugins", ["neutron.tests.unit.dummy_plugin." "DummyServicePlugin"]) cfg.CONF.set_override("core_plugin", "neutron.tests.unit.test_manager." "CorePluginWithAgentNotifiers") expected = {'l3': 'l3_agent_notifier', 'dhcp': 'dhcp_agent_notifier', dummy_plugin.Dummy.get_alias(): 'dummy_agent_notifier'} manager.init() core_plugin = directory.get_plugin() self.assertEqual(expected, core_plugin.agent_notifiers) def test_load_class_for_provider(self): manager.NeutronManager.load_class_for_provider( 'neutron.core_plugins', 'ml2') def test_load_class_for_provider_wrong_plugin(self): with testlib_api.ExpectedException(ImportError): manager.NeutronManager.load_class_for_provider( 'neutron.core_plugins', 'ml2XXXXXX') def test_get_service_plugin_by_path_prefix_3(self): cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) nm = manager.NeutronManager.get_instance() class pclass(object): def __init__(self, path_prefix): self.path_prefix = path_prefix x_plugin, y_plugin = pclass('xpa'), pclass('ypa') directory.add_plugin('x', x_plugin) directory.add_plugin('y', y_plugin) self.assertEqual(weakref.proxy(x_plugin), nm.get_service_plugin_by_path_prefix('xpa')) self.assertEqual(weakref.proxy(y_plugin), nm.get_service_plugin_by_path_prefix('ypa')) self.assertIsNone(nm.get_service_plugin_by_path_prefix('abc')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_neutron_plugin_base_v2.py0000644000175000017500000000173600000000000027047 0ustar00coreycorey00000000000000# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron import manager from neutron.tests import base class NeutronPluginBaseV2TestCase(base.BaseTestCase): def test_can_load_core_plugin_without_datastore(self): cfg.CONF.set_override("core_plugin", 'neutron.tests.unit.dummy_plugin.' 'DummyCorePluginWithoutDatastore') manager.init() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_opts.py0000644000175000017500000000144600000000000023361 0ustar00coreycorey00000000000000# Copyright (c) 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron import opts from neutron.tests import base class OptsTestCase(base.BaseTestCase): def test_list_sriov_agent_opts(self): self.assertEqual('sriov_nic', opts.list_sriov_agent_opts()[0][0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_policy.py0000644000175000017500000007320100000000000023671 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test of Policy Engine For Neutron""" import mock from neutron_lib.api import attributes from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception as db_exc from oslo_policy import fixture as op_fixture from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import uuidutils import neutron from neutron import policy from neutron.tests import base _uuid = uuidutils.generate_uuid class PolicyFileTestCase(base.BaseTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.Context('fake', 'fake', is_admin=False) self.target = {'tenant_id': 'fake'} def test_modified_policy_reloads(self): tmpfilename = self.get_temp_file_path('policy') action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") policy.refresh(policy_file=tmpfilename) policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") policy.refresh(policy_file=tmpfilename) self.target = {'tenant_id': 'fake_tenant'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(base.BaseTestCase): def setUp(self): super(PolicyTestCase, self).setUp() # NOTE(vish): preload rules to circumvent reloading from file rules = { "true": '@', "example:allowed": '@', "example:denied": '!', "example:get_http": "http:http://www.example.com", "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } policy.refresh() # NOTE(vish): then overload underlying rules policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.Context('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_check_bad_action_noraise(self): action = "example:denied" result = policy.check(self.context, action, self.target) self.assertFalse(result) def test_check_non_existent_action(self): action = "example:idonotexist" result_1 = policy.check(self.context, action, self.target) self.assertFalse(result_1) result_2 = policy.check(self.context, action, self.target, might_not_exist=True) self.assertTrue(result_2) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertTrue(result) def test_enforce_http_true(self): self.useFixture(op_fixture.HttpCheckFixture()) action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_http_false(self): self.useFixture(op_fixture.HttpCheckFixture(False)) action = "example:get_http" target = {} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'tenant_id': 'fake'} target_not_mine = {'tenant_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.Context('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(base.BaseTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() tmpfilename = self.get_temp_file_path('policy.json') self.rules = { "default": '', "example:exist": '!', } with open(tmpfilename, "w") as policyfile: jsonutils.dump(self.rules, policyfile) policy.refresh(policy_file=tmpfilename) self.context = context.Context('fake', 'fake') def test_policy_called(self): self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) FAKE_RESOURCE_NAME = 'fake_resource' FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy' FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME: {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }, 'list_attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True }}, # special plural name "%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'): {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}} class NeutronPolicyTestCase(base.BaseTestCase): def fakepolicyinit(self, **kwargs): policy._ENFORCER = oslo_policy.Enforcer(cfg.CONF) policy._ENFORCER.set_rules(oslo_policy.Rules(self.rules)) def setUp(self): super(NeutronPolicyTestCase, self).setUp() # Add Fake resources to RESOURCE_ATTRIBUTE_MAP attributes.RESOURCES.update(FAKE_RESOURCES) self._set_rules() self.patcher = mock.patch.object(neutron.policy, 'init', new=self.fakepolicyinit) self.patcher.start() policy.refresh() self.addCleanup(policy.refresh) self.context = context.Context('fake', 'fake', roles=['user']) plugin_klass = importutils.import_class( "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") directory.add_plugin(plugin_constants.CORE, plugin_klass()) def _set_rules(self, **kwargs): rules_dict = { "context_is_admin": "role:admin", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or " "tenant_id:%(network:tenant_id)s", "admin_or_owner": ("rule:context_is_admin or " "tenant_id:%(tenant_id)s"), "admin_only": "rule:context_is_admin", "regular_user": "role:user", "shared": "field:networks:shared=True", "external": "field:networks:router:external=True", "network_device": "field:port:device_owner=~^network:", "default": '@', "create_network": "rule:admin_or_owner", "create_network:shared": "rule:admin_only", "update_network": '@', "update_network:shared": "rule:admin_only", "get_network": "rule:admin_or_owner or rule:shared or " "rule:external or rule:context_is_advsvc", "create_subnet": "rule:admin_or_network_owner", "create_port:mac": "rule:admin_or_network_owner or " "rule:context_is_advsvc", "create_port:device_owner": "not rule:network_device", "create_port:fixed_ips": ( "rule:context_is_advsvc or rule:admin_or_network_owner or " "rule:shared"), "create_port:fixed_ips:ip_address": ( "rule:context_is_advsvc or rule:admin_or_network_owner"), "create_port:fixed_ips:subnet_id": ( "rule:context_is_advsvc or rule:admin_or_network_owner or " "rule:shared"), "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "create_fake_resource": "rule:admin_or_owner", "create_fake_resource:attr": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_2": "rule:admin_only", "create_fake_resource:list_attr": "rule:admin_only_or_owner", "create_fake_resource:list_attr:admin_element": "rule:admin_only", "create_fake_resource:list_attr:user_element": ( "rule:admin_or_owner"), "create_fake_policy:": "rule:admin_or_owner", } rules_dict.update(**kwargs) self.rules = oslo_policy.Rules.from_dict(rules_dict) def _test_action_on_attr(self, context, action, obj, attr, value, exception=None, **kwargs): action = "%s_%s" % (action, obj) target = {'tenant_id': 'the_owner', attr: value} if kwargs: target.update(kwargs) if exception: self.assertRaises(exception, policy.enforce, context, action, target) else: result = policy.enforce(context, action, target) self.assertTrue(result) def _test_nonadmin_action_on_attr(self, action, obj, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user']) self._test_action_on_attr(user_context, action, obj, attr, value, exception, **kwargs) def _test_advsvc_action_on_attr(self, action, obj, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user', 'advsvc']) self._test_action_on_attr(user_context, action, obj, attr, value, exception, **kwargs) def test_nonadmin_write_on_private_fails(self): self._test_nonadmin_action_on_attr( 'create', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_private_fails(self): self._test_nonadmin_action_on_attr('get', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_write_on_shared_fails(self): self._test_nonadmin_action_on_attr('create', 'network', 'shared', True, oslo_policy.PolicyNotAuthorized) def test_create_port_device_owner_regex(self): blocked_values = (constants.DEVICE_OWNER_NETWORK_PREFIX, 'network:abdef', constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_ROUTER_INTF) for val in blocked_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val, oslo_policy.PolicyNotAuthorized ) ok_values = ('network', 'networks', 'my_network:test', 'my_network:') for val in ok_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val ) def test_create_port_fixed_ips_on_shared_network(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake', 'shared': True} kwargs = {'network_id': _uuid()} with mock.patch.object(directory.get_plugin(), 'get_network', new=fakegetnetwork): self._test_nonadmin_action_on_attr( 'create', 'port', 'fixed_ips', [{'subnet_id': 'test-subnet-id'}], **kwargs) self._test_nonadmin_action_on_attr( 'create', 'port', 'fixed_ips', [{'ip_address': '1.2.3.4'}], exception=oslo_policy.PolicyNotAuthorized, **kwargs) def test_create_port_fixed_ips_on_nonshared_network(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake', 'shared': False} kwargs = {'network_id': _uuid()} with mock.patch.object(directory.get_plugin(), 'get_network', new=fakegetnetwork): self._test_nonadmin_action_on_attr( 'create', 'port', 'fixed_ips', [{'subnet_id': 'test-subnet-id'}], exception=oslo_policy.PolicyNotAuthorized, **kwargs) self._test_nonadmin_action_on_attr( 'create', 'port', 'fixed_ips', [{'ip_address': '1.2.3.4'}], exception=oslo_policy.PolicyNotAuthorized, **kwargs) def test_advsvc_get_network_works(self): self._test_advsvc_action_on_attr('get', 'network', 'shared', False) def test_advsvc_create_network_fails(self): self._test_advsvc_action_on_attr('create', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_advsvc_create_port_works(self): self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False) def test_advsvc_get_port_works(self): self._test_advsvc_action_on_attr('get', 'port', 'shared', False) def test_advsvc_update_port_works(self): kwargs = {constants.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_advsvc_action_on_attr('update', 'port', 'shared', True, **kwargs) def test_advsvc_delete_port_works(self): self._test_advsvc_action_on_attr('delete', 'port', 'shared', False) def test_advsvc_create_subnet_fails(self): self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_shared_succeeds(self): self._test_nonadmin_action_on_attr('get', 'network', 'shared', True) def _test_enforce_adminonly_attribute(self, action, **kwargs): admin_context = context.get_admin_context() target = {'shared': True} if kwargs: target.update(kwargs) result = policy.enforce(admin_context, action, target) self.assertTrue(result) def test_enforce_adminonly_attribute_create(self): self._test_enforce_adminonly_attribute('create_network') def test_enforce_adminonly_attribute_update(self): kwargs = {constants.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_enforce_adminonly_attribute('update_network', **kwargs) def test_reset_adminonly_attr_to_default_fails(self): kwargs = {constants.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_nonadmin_action_on_attr( 'update', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized, **kwargs) def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def _test_build_subattribute_match_rule(self, validate_value): bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( validate_value) action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} self.assertFalse(policy._build_subattr_match_rule( 'attr', FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'], action, target)) FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk def test_build_subattribute_match_rule_empty_dict_validator(self): self._test_build_subattribute_match_rule({}) def test_build_subattribute_match_rule_wrong_validation_info(self): self._test_build_subattribute_match_rule( {'type:dict': 'wrong_stuff'}) def test_build_match_rule_special_pluralized(self): action = "create_" + FAKE_SPECIAL_RESOURCE_NAME pluralized = "create_fake_policies" target = {} result = policy._build_match_rule(action, target, pluralized) self.assertEqual("rule:" + action, str(result)) def test_build_match_rule_normal_pluralized_when_create(self): action = "create_" + FAKE_RESOURCE_NAME target = {} result = policy._build_match_rule(action, target, None) self.assertEqual("rule:" + action, str(result)) def test_build_match_rule_normal_pluralized_when_update(self): action = "update_" + FAKE_RESOURCE_NAME target = {} result = policy._build_match_rule(action, target, None) self.assertEqual("rule:" + action, str(result)) def test_enforce_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} result = policy.enforce(self.context, action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} result = policy.enforce(context.get_admin_context(), action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_enforce_regularuser_on_read(self): action = "get_network" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check(self): # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check_parent_resource(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} action = "create_port:mac" with mock.patch.object(directory.get_plugin(), 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_plugin_failure(self): def fakegetnetwork(*args, **kwargs): raise NotImplementedError('Blast!') # the policy check and plugin method we use in this test are irrelevant # so long that we verify that, if *f* blows up, the behavior of the # policy engine to propagate the exception is preserved action = "create_port:mac" with mock.patch.object(directory.get_plugin(), 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} self.assertRaises(NotImplementedError, policy.enforce, self.context, action, target) def test_enforce_subattribute_as_list(self): action = "create_" + FAKE_RESOURCE_NAME target = { 'tenant_id': 'fake', 'list_attr': [{'user_element': 'x'}]} result = policy.enforce(self.context, action, target, None) self.assertTrue(result) def test_enforce_subattribute_as_list_forbiden(self): action = "create_" + FAKE_RESOURCE_NAME target = { 'tenant_id': 'fake', 'list_attr': [{'admin_element': 'x'}]} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_retryrequest_on_notfound(self): failure = exceptions.NetworkNotFound(net_id='whatever') action = "create_port:mac" with mock.patch.object(directory.get_plugin(), 'get_network', side_effect=failure): target = {'network_id': 'whatever'} try: policy.enforce(self.context, action, target) self.fail("Did not raise RetryRequest") except db_exc.RetryRequest as e: self.assertEqual(failure, e.inner_exc) def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} self._set_rules( admin_or_network_owner="role:admin or " "tenant_id:%(network_tenant_id)s") action = "create_port:mac" with mock.patch.object(directory.get_plugin(), 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_tenant_id_check_no_target_field_raises(self): # Try and add a bad rule self.assertRaises( exceptions.PolicyInitError, oslo_policy.Rules.from_dict, {'test_policy': 'tenant_id:(wrong_stuff)'}) def test_tenant_id_check_caches_extracted_fields(self): plugin = directory.get_plugin() with mock.patch.object(plugin, 'get_network', return_value={'tenant_id': 'fake'}) as getter: action = "create_port:mac" for i in range(2): target = {'network_id': 'whatever'} policy.enforce(self.context, action, target) self.assertEqual(1, getter.call_count) def _test_enforce_tenant_id_raises(self, bad_rule): self._set_rules(admin_or_owner=bad_rule) # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} self.fakepolicyinit() self.assertRaises(exceptions.PolicyCheckError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_malformed_target_field_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') def test_process_rules(self): action = "create_" + FAKE_RESOURCE_NAME # Construct RuleChecks for an action, attribute and subattribute match_rule = oslo_policy.RuleCheck('rule', action) attr_rule = oslo_policy.RuleCheck( 'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME)) sub_attr_rules = [oslo_policy.RuleCheck( 'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))] # Build an AndCheck from the given RuleChecks # Make the checks nested to better check the recursion sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules) attr_rule = oslo_policy.AndCheck( [attr_rule, sub_attr_rules]) match_rule = oslo_policy.AndCheck([match_rule, attr_rule]) # Assert that the rules are correctly extracted from the match_rule rules = policy._process_rules_list([], match_rule) self.assertEqual(['create_fake_resource', 'create_fake_resource:fake_resources', 'create_fake_resource:attr:sub_attr_1'], rules) @mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True) @mock.patch.object(policy.LOG, 'debug') def test_log_rule_list(self, mock_debug, mock_is_e): policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_')) self.assertTrue(mock_is_e.called) self.assertTrue(mock_debug.called) def test__is_attribute_explicitly_set(self): action = 'create' attr = 'attr' target = {attr: 'valueA', 'tgt-tenant': 'tenantA'} resource = {attr: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'enforce_policy': True, 'validate': {'type:string': 10}}} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertTrue(result) target = {'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) resource = {attr: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': 'DfltValue', 'enforce_policy': True, 'validate': {'type:string': 10}}} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) target = {attr: 'DfltValue', 'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) target = {attr: constants.ATTR_NOT_SPECIFIED, 'tgt-tenant': 'tenantA'} result = policy._is_attribute_explicitly_set( attr, resource, target, action) self.assertFalse(result) @mock.patch("neutron_lib.services.constants.EXT_PARENT_RESOURCE_MAPPING", {'parentresource': 'registered_plugin_name'}) @mock.patch("neutron_lib.plugins.directory.get_plugin") def test_enforce_tenant_id_check_parent_resource_owner( self, mock_get_plugin): def fakegetparent(*args, **kwargs): return {'tenant_id': 'fake'} mock_plugin = mock.Mock() mock_plugin.get_parentresource = fakegetparent mock_get_plugin.return_value = mock_plugin self._set_rules( admin_or_ext_parent_owner="rule:context_is_admin or " "tenant_id:%(ext_parent:tenant_id)s", create_parentresource_subresource="rule:admin_or_ext_parent_owner") self.fakepolicyinit() action = 'create_parentresource_subresource' target = {'ext_parent_parentresource_id': 'whatever', 'foo': 'bar'} result = policy.enforce(self.context, action, target) mock_get_plugin.assert_called_with('registered_plugin_name') self.assertTrue(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_service.py0000644000175000017500000000775700000000000024047 0ustar00coreycorey00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_concurrency import processutils from oslo_config import cfg from neutron import service from neutron.tests import base from neutron.tests.unit import test_wsgi class TestServiceHelpers(base.BaseTestCase): def test_get_workers(self): num_workers = service._get_worker_count() self.assertGreaterEqual(num_workers, 1) self.assertLessEqual(num_workers, processutils.get_worker_count()) class TestRpcWorker(test_wsgi.TestServiceBase): def test_reset(self): _plugin = mock.Mock() rpc_worker = service.RpcWorker(_plugin) self._test_reset(rpc_worker) class TestRunRpcWorkers(base.BaseTestCase): def setUp(self): super(TestRunRpcWorkers, self).setUp() self.worker_count = service._get_worker_count() def _test_rpc_workers(self, config_value, expected_passed_value): if config_value is not None: cfg.CONF.set_override('rpc_workers', config_value) with mock.patch('neutron.service.RpcWorker') as mock_rpc_worker: with mock.patch('neutron.service.RpcReportsWorker'): service._get_rpc_workers(plugin=mock.Mock()) init_call = mock_rpc_worker.call_args expected_call = mock.call( mock.ANY, worker_process_count=expected_passed_value) self.assertEqual(expected_call, init_call) def test_rpc_workers_zero(self): self._test_rpc_workers(0, 1) def test_rpc_workers_default_api_workers_default(self): workers = max(int(self.worker_count / 2), 1) self._test_rpc_workers(None, workers) def test_rpc_workers_default_api_workers_set(self): cfg.CONF.set_override('api_workers', 18) self._test_rpc_workers(None, 9) def test_rpc_workers_defined(self): self._test_rpc_workers(42, 42) class TestRunWsgiApp(base.BaseTestCase): def setUp(self): super(TestRunWsgiApp, self).setUp() self.worker_count = service._get_worker_count() def _test_api_workers(self, config_value, expected_passed_value): if config_value is not None: cfg.CONF.set_override('api_workers', config_value) with mock.patch('neutron.wsgi.Server') as mock_server: service.run_wsgi_app(mock.sentinel.app) start_call = mock_server.return_value.start.call_args expected_call = mock.call( mock.ANY, mock.ANY, mock.ANY, desc='api worker', workers=expected_passed_value) self.assertEqual(expected_call, start_call) def test_api_workers_zero(self): self._test_api_workers(0, 0) def test_api_workers_default(self): self._test_api_workers(None, self.worker_count) def test_api_workers_defined(self): self._test_api_workers(42, 42) def test_start_all_workers(self): cfg.CONF.set_override('api_workers', 0) mock.patch.object(service, '_get_rpc_workers').start() mock.patch.object(service, '_get_plugins_workers').start() mock.patch.object(service, '_start_workers').start() callback = mock.Mock() registry.subscribe(callback, resources.PROCESS, events.AFTER_SPAWN) service.start_all_workers() callback.assert_called_once_with( resources.PROCESS, events.AFTER_SPAWN, mock.ANY, payload=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_worker.py0000644000175000017500000000275300000000000023707 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.common import utils from neutron.tests import base from neutron import worker as neutron_worker class PeriodicWorkerTestCase(base.BaseTestCase): def test_periodic_worker_lifecycle(self): check_function = mock.Mock() worker = neutron_worker.PeriodicWorker( check_function, interval=1, initial_delay=1) self.addCleanup(worker.stop) worker.wait() self.assertFalse(check_function.called) worker.start() utils.wait_until_true( lambda: check_function.called, timeout=5, exception=RuntimeError("check_function not called")) worker.stop() check_function.reset_mock() worker.wait() self.assertFalse(check_function.called) worker.reset() utils.wait_until_true( lambda: check_function.called, timeout=5, exception=RuntimeError("check_function not called")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/test_wsgi.py0000644000175000017500000006162700000000000023354 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket import ssl import mock from neutron_lib.db import api as db_api from neutron_lib import exceptions as exception from oslo_config import cfg from oslo_utils import netutils from six.moves import urllib import testtools import webob import webob.exc from neutron.tests import base from neutron import wsgi CONF = cfg.CONF TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'var')) def open_no_proxy(*args, **kwargs): # NOTE(jamespage): # Deal with more secure certification chain verification # introduced in python 2.7.9 under PEP-0476 # https://github.com/python/peps/blob/master/pep-0476.txt if hasattr(ssl, "_create_unverified_context"): opener = urllib.request.build_opener( urllib.request.ProxyHandler({}), urllib.request.HTTPSHandler( context=ssl._create_unverified_context()) ) else: opener = urllib.request.build_opener(urllib.request.ProxyHandler({})) return opener.open(*args, **kwargs) class TestServiceBase(base.BaseTestCase): """Service tests base.""" @mock.patch("neutron.policy.refresh") @mock.patch("neutron.common.config.setup_logging") def _test_reset(self, worker_service, setup_logging_mock, refresh_mock): worker_service.reset() setup_logging_mock.assert_called_once_with() refresh_mock.assert_called_once_with() class TestWorkerService(TestServiceBase): """WorkerService tests.""" @mock.patch.object(db_api, 'get_context_manager') def test_start_withoutdb_call(self, apimock): _service = mock.Mock() _service.pool.spawn.return_value = None _app = mock.Mock() workerservice = wsgi.WorkerService(_service, _app, "on") workerservice.start() self.assertFalse(apimock.called) def test_reset(self): _service = mock.Mock() _app = mock.Mock() worker_service = wsgi.WorkerService(_service, _app, "on") self._test_reset(worker_service) class TestWSGIServer(base.BaseTestCase): """WSGI server tests.""" def test_start_random_port(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="127.0.0.1") self.assertNotEqual(0, server.port) server.stop() server.wait() @mock.patch('oslo_service.service.ProcessLauncher') def test_start_multiple_workers(self, ProcessLauncher): launcher = ProcessLauncher.return_value server = wsgi.Server("test_multiple_processes") server.start(None, 0, host="127.0.0.1", workers=2) launcher.launch_service.assert_called_once_with(mock.ANY, workers=2) server.stop() launcher.stop.assert_called_once_with() server.wait() launcher.wait.assert_called_once_with() @testtools.skipIf( not netutils.is_ipv6_enabled(), 'IPv6 support disabled on host') def test_start_random_port_with_ipv6(self): server = wsgi.Server("test_random_port") server.start(None, 0, host="::1") self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_ipv6_listen_called_with_scope(self): server = wsgi.Server("test_app") with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen: with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr: mock_get_addr.return_value = [ (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2)) ] with mock.patch.object(server, 'pool') as mock_pool: server.start(None, 1234, host="fe80::204:acff:fe96:da87%eth0") mock_get_addr.assert_called_once_with( "fe80::204:acff:fe96:da87%eth0", 1234, socket.AF_UNSPEC, socket.SOCK_STREAM ) mock_listen.assert_called_once_with( ('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2), family=socket.AF_INET6, backlog=cfg.CONF.backlog ) mock_pool.spawn.assert_has_calls([ mock.call( server._run, None, mock_listen.return_value.dup.return_value) ]) def test_app(self): greetings = b'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app") server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() def test_disable_ssl(self): CONF.set_default('use_ssl', True) greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server("test_app", disable_ssl=True) server.start(hello_world, 0, host="127.0.0.1") response = open_no_proxy('http://127.0.0.1:%d/' % server.port) self.assertEqual(greetings.encode('utf-8'), response.read()) server.stop() @mock.patch.object(wsgi, 'eventlet') def test__run(self, eventlet_mock): server = wsgi.Server('test') server._run("app", "socket") eventlet_mock.wsgi.server.assert_called_once_with( 'socket', 'app', max_size=server.num_threads, log=mock.ANY, keepalive=CONF.wsgi_keep_alive, log_format=CONF.wsgi_log_format, socket_timeout=server.client_socket_timeout ) class SerializerTest(base.BaseTestCase): def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" input_dict = {'servers': {'test': 'pass'}} content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.serialize, input_dict, content_type) def test_get_deserialize_handler_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' serializer = wsgi.Serializer() self.assertRaises( exception.InvalidContentType, serializer.get_deserialize_handler, content_type) def test_serialize_content_type_json(self): """Test serialize with content type json.""" input_data = {'servers': ['test=pass']} content_type = 'application/json' serializer = wsgi.Serializer() result = serializer.serialize(input_data, content_type) self.assertEqual(b'{"servers": ["test=pass"]}', result) def test_deserialize_raise_bad_request(self): """Test serialize verifies that exception is raises.""" content_type = 'application/unknown' data_string = 'test' serializer = wsgi.Serializer() self.assertRaises( webob.exc.HTTPBadRequest, serializer.deserialize, data_string, content_type) def test_deserialize_json_content_type(self): """Test Serializer.deserialize with content type json.""" content_type = 'application/json' data_string = '{"servers": ["test=pass"]}' serializer = wsgi.Serializer() result = serializer.deserialize(data_string, content_type) self.assertEqual({'body': {u'servers': [u'test=pass']}}, result) class RequestDeserializerTest(testtools.TestCase): def setUp(self): super(RequestDeserializerTest, self).setUp() class JSONDeserializer(object): def deserialize(self, data, action='default'): return 'pew_json' self.body_deserializers = {'application/json': JSONDeserializer()} self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) def test_get_deserializer(self): """Test RequestDeserializer.get_body_deserializer.""" expected_json_serializer = self.deserializer.get_body_deserializer( 'application/json') self.assertEqual( expected_json_serializer, self.body_deserializers['application/json']) def test_get_expected_content_type(self): """Test RequestDeserializer.get_expected_content_type.""" request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' self.assertEqual('application/json', self.deserializer.get_expected_content_type(request)) def test_get_action_args(self): """Test RequestDeserializer.get_action_args.""" env = { 'wsgiorg.routing_args': [None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12}]} expected = {'action': 'update', 'id': 12} self.assertEqual(expected, self.deserializer.get_action_args(env)) def test_deserialize(self): """Test RequestDeserializer.deserialize.""" with mock.patch.object( self.deserializer, 'get_action_args') as mock_method: mock_method.return_value = {'action': 'create'} request = wsgi.Request.blank('/') request.headers['Accept'] = 'application/json' deserialized = self.deserializer.deserialize(request) expected = ('create', {}, 'application/json') self.assertEqual(expected, deserialized) def test_get_body_deserializer_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" content_type = 'application/unknown' deserializer = wsgi.RequestDeserializer() self.assertRaises( exception.InvalidContentType, deserializer.get_body_deserializer, content_type) class ResponseSerializerTest(testtools.TestCase): def setUp(self): super(ResponseSerializerTest, self).setUp() class JSONSerializer(object): def serialize(self, data, action='default'): return b'pew_json' class HeadersSerializer(object): def serialize(self, response, data, action): response.status_int = 404 self.body_serializers = {'application/json': JSONSerializer()} self.serializer = wsgi.ResponseSerializer( self.body_serializers, HeadersSerializer()) def test_serialize_unknown_content_type(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.serialize, {}, 'application/unknown') def test_get_body_serializer(self): """Verify that exception InvalidContentType is raised.""" self.assertRaises( exception.InvalidContentType, self.serializer.get_body_serializer, 'application/unknown') def test_get_serializer(self): """Test ResponseSerializer.get_body_serializer.""" content_type = 'application/json' self.assertEqual(self.body_serializers[content_type], self.serializer.get_body_serializer(content_type)) def test_serialize_json_response(self): response = self.serializer.serialize({}, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) self.assertEqual(b'pew_json', response.body) self.assertEqual(404, response.status_int) def test_serialize_response_None(self): response = self.serializer.serialize( None, 'application/json') self.assertEqual('application/json', response.headers['Content-Type']) self.assertEqual(b'', response.body) self.assertEqual(404, response.status_int) class RequestTest(base.BaseTestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = b"fake
" self.assertIsNone(request.get_content_type()) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/new-type;" self.assertIsNone(request.get_content_type()) def test_content_type_from_accept(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_and_query_extension(self): request = wsgi.Request.blank('/tests/123.json') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_with_given_content_types(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/new_type" result = request.best_match_content_type() self.assertEqual("application/json", result) class ActionDispatcherTest(base.BaseTestCase): def test_dispatch(self): """Test ActionDispatcher.dispatch.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x self.assertEqual('pants', serializer.dispatch('pants', action='create')) def test_dispatch_action_None(self): """Test ActionDispatcher.dispatch with none action.""" serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual('Two trousers', serializer.dispatch('Two', action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: x + ' pants' serializer.default = lambda x: x + ' trousers' self.assertEqual('Two trousers', serializer.dispatch('Two', action='update')) class ResponseHeadersSerializerTest(base.BaseTestCase): def test_default(self): serializer = wsgi.ResponseHeaderSerializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'fake') self.assertEqual(200, response.status_int) def test_custom(self): class Serializer(wsgi.ResponseHeaderSerializer): def update(self, response, data): response.status_int = 404 response.headers['X-Custom-Header'] = data['v'] serializer = Serializer() response = webob.Response() serializer.serialize(response, {'v': '123'}, 'update') self.assertEqual(404, response.status_int) self.assertEqual('123', response.headers['X-Custom-Header']) class DictSerializerTest(base.BaseTestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'NonExistentAction')) class JSONDictSerializerTest(base.BaseTestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = b'{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) def test_json_with_unicode(self): input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc'))) expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) class TextDeserializerTest(base.BaseTestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(base.BaseTestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1'}}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) def test_default_raise_Malformed_Exception(self): """Test JsonDeserializer.default. Test verifies JsonDeserializer.default raises exception MalformedRequestBody correctly. """ data_string = "" deserializer = wsgi.JSONDeserializer() self.assertRaises( exception.MalformedRequestBody, deserializer.default, data_string) def test_json_with_utf8(self): data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) def test_json_with_unicode(self): data = b'{"a": "\u7f51\u7edc"}' as_dict = {'body': {'a': u'\u7f51\u7edc'}} deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class RequestHeadersDeserializerTest(base.BaseTestCase): def test_default(self): deserializer = wsgi.RequestHeadersDeserializer() req = wsgi.Request.blank('/') self.assertEqual({}, deserializer.deserialize(req, 'nonExistent')) def test_custom(self): class Deserializer(wsgi.RequestHeadersDeserializer): def update(self, request): return {'a': request.headers['X-Custom-Header']} deserializer = Deserializer() req = wsgi.Request.blank('/') req.headers['X-Custom-Header'] = 'b' self.assertEqual({'a': 'b'}, deserializer.deserialize(req, 'update')) class ResourceTest(base.BaseTestCase): @staticmethod def my_fault_body_function(): return 'off' class Controller(object): def index(self, request, index=None): return index def test_dispatch(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) actual = resource.dispatch( resource.controller, 'index', action_args={'index': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_dispatch_unknown_controller_action(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) self.assertRaises( AttributeError, resource.dispatch, resource.controller, 'create', {}) def test_malformed_request_body_throws_bad_request(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", body=b"{mal:formed", method='POST', headers={'Content-Type': "application/json"}) response = resource(request) self.assertEqual(400, response.status_int) def test_wrong_content_type_throws_unsupported_media_type_error(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", body=b"{some:json}", method='POST', headers={'Content-Type': "xxx"}) response = resource(request) self.assertEqual(400, response.status_int) def test_wrong_content_type_server_error(self): resource = wsgi.Resource(None, self.my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = resource(request) self.assertEqual(500, response.status_int) def test_call_resource_class_bad_request(self): class FakeRequest(object): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = 'body' def method(self): pass def best_match_content_type(self): return 'best_match_content_type' resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(400, result.status_int) def test_type_error(self): resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "json"}) response = resource.dispatch( request, action='index', action_args='test') self.assertEqual(400, response.status_int) def test_call_resource_class_internal_error(self): class FakeRequest(object): def __init__(self): self.url = 'http://where.no' self.environ = 'environ' self.body = '{"Content-Type": "json"}' def method(self): pass def best_match_content_type(self): return 'application/json' resource = wsgi.Resource(self.Controller(), self.my_fault_body_function) request = FakeRequest() result = resource(request) self.assertEqual(500, result.status_int) class FaultTest(base.BaseTestCase): def test_call_fault(self): class MyException(object): status_int = 415 explanation = 'test' my_exceptions = MyException() my_fault = wsgi.Fault(exception=my_exceptions) request = wsgi.Request.blank( "/", method='POST', headers={'Content-Type': "unknow"}) response = my_fault(request) self.assertEqual(415, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/testlib_api.py0000644000175000017500000002357100000000000023637 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six import testresources import testscenarios import testtools from neutron_lib.db import api as db_api from neutron_lib import fixture as lib_fixtures from oslo_config import cfg from oslo_db import exception as oslodb_exception from oslo_db.sqlalchemy import provision from neutron.db.migration import cli as migration # Import all data models from neutron.db.migration.models import head # noqa from neutron.tests import base from neutron import wsgi class ExpectedException(testtools.ExpectedException): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if super(ExpectedException, self).__exit__(exc_type, exc_value, traceback): self.exception = exc_value return True return False def create_request(path, body, content_type, method='GET', query_string=None, context=None, headers=None): headers = headers or {} if query_string: url = "%s?%s" % (path, query_string) else: url = path req = wsgi.Request.blank(url) req.method = method req.headers = {} req.headers['Accept'] = content_type req.headers.update(headers) if isinstance(body, six.text_type): req.body = body.encode() else: req.body = body if context: req.environ['neutron.context'] = context return req class StaticSqlFixtureNoSchema(lib_fixtures.SqlFixture): """Fixture which keeps a single sqlite memory database at the global scope """ _GLOBAL_RESOURCES = False @classmethod def _init_resources(cls): if cls._GLOBAL_RESOURCES: return else: cls._GLOBAL_RESOURCES = True cls.database_resource = provision.DatabaseResource( "sqlite", db_api.get_context_manager()) dependency_resources = {} for name, resource in cls.database_resource.resources: dependency_resources[name] = resource.getResource() cls.engine = dependency_resources['backend'].engine def _delete_from_schema(self, engine): pass class OpportunisticSqlFixture(lib_fixtures.SqlFixture): """Fixture which uses testresources with oslo_db provisioning to check for available backends and optimize test runs. Requires that the test itself implement the resources attribute. """ DRIVER = 'sqlite' def __init__(self, test): super(OpportunisticSqlFixture, self).__init__() self.test = test @classmethod def _generate_schema_w_migrations(cls, engine): alembic_configs = migration.get_alembic_configs() with engine.connect() as conn: for alembic_config in alembic_configs: alembic_config.attributes['connection'] = conn alembic_config.neutron_config = cfg.CONF alembic_config.neutron_config.set_override( 'connection', str(engine.url), group='database') migration.do_alembic_command( alembic_config, 'upgrade', 'heads') def _delete_from_schema(self, engine): if self.test.BUILD_SCHEMA: super(OpportunisticSqlFixture, self)._delete_from_schema(engine) def _init_resources(self): testresources.setUpResources( self.test, self.test.resources, testresources._get_result()) self.addCleanup( testresources.tearDownResources, self.test, self.test.resources, testresources._get_result() ) # unfortunately, fixtures won't let us call a skip() from # here. So the test has to check this also. # see https://github.com/testing-cabal/fixtures/issues/31 if hasattr(self.test, 'db'): self.engine = self.test.engine = self.test.db.engine @classmethod def resources_collection(cls, test): # reimplement current oslo.db code. # FIXME(zzzeek) The patterns here are up in the air enough # that I think keeping this totally separate will give us the # most leverage in being able to fix oslo.db in an upcoming # release, then port neutron back to the working version. driver = test.DRIVER if driver not in test._database_resources: try: test._database_resources[driver] = \ provision.DatabaseResource(driver) except oslodb_exception.BackendNotAvailable: test._database_resources[driver] = None database_resource = test._database_resources[driver] if database_resource is None: return [] key = (driver, None) if test.BUILD_SCHEMA: if key not in test._schema_resources: test._schema_resources[key] = provision.SchemaResource( database_resource, cls._generate_schema_w_migrations if test.BUILD_WITH_MIGRATIONS else cls._generate_schema, teardown=False) schema_resource = test._schema_resources[key] return [ ('schema', schema_resource), ('db', database_resource) ] else: return [ ('db', database_resource) ] class BaseSqlTestCase(object): BUILD_SCHEMA = True def setUp(self): super(BaseSqlTestCase, self).setUp() self._setup_database_fixtures() def _setup_database_fixtures(self): if self.BUILD_SCHEMA: fixture = lib_fixtures.StaticSqlFixture() else: fixture = StaticSqlFixtureNoSchema() self.useFixture(fixture) self.engine = fixture.engine class SqlTestCaseLight(BaseSqlTestCase, base.DietTestCase): """All SQL taste, zero plugin/rpc sugar""" class SqlTestCase(BaseSqlTestCase, base.BaseTestCase): """regular sql test""" class OpportunisticDBTestMixin(object): """Mixin that converts a BaseSqlTestCase to use the OpportunisticSqlFixture. """ SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') FIXTURE = OpportunisticSqlFixture BUILD_WITH_MIGRATIONS = False def _setup_database_fixtures(self): self.useFixture(self.FIXTURE(self)) if not hasattr(self, 'db'): msg = "backend '%s' unavailable" % self.DRIVER if self.SKIP_ON_UNAVAILABLE_DB: self.skipTest(msg) else: self.fail(msg) _schema_resources = {} _database_resources = {} @property def resources(self): """this attribute is used by testresources for optimized sorting of tests. This is the big requirement that allows testresources to sort tests such that database "resources" can be kept open for many tests at once. IMO(zzzeek) "sorting" should not be needed; only that necessary resources stay open as long as they are needed (or long enough to reduce overhead). testresources would be improved to not depend on custom, incompatible-with-pytest "suite classes", fixture information leaking out of the Fixture classes themselves, and exotic sorting schemes for something that can nearly always be handled "good enough" with unittest-standard setupclass/setupmodule schemes. """ return self.FIXTURE.resources_collection(self) class MySQLTestCaseMixin(OpportunisticDBTestMixin): """Mixin that turns any BaseSqlTestCase into a MySQL test suite. If the MySQL db is unavailable then this test is skipped, unless OS_FAIL_ON_MISSING_DEPS is enabled. """ DRIVER = "mysql" class PostgreSQLTestCaseMixin(OpportunisticDBTestMixin): """Mixin that turns any BaseSqlTestCase into a PostgresSQL test suite. If the PostgreSQL db is unavailable then this test is skipped, unless OS_FAIL_ON_MISSING_DEPS is enabled. """ DRIVER = "postgresql" def module_load_tests(loader, found_tests, pattern): """Apply OptimisingTestSuite on a per-module basis. FIXME(zzzeek): oslo.db provides this but the contract that "pattern" should be None no longer seems to behave as it used to at the module level, so this function needs to be added in this form. """ result = testresources.OptimisingTestSuite() found_tests = testscenarios.load_tests_apply_scenarios( loader, found_tests, pattern) result.addTest(found_tests) return result class WebTestCase(SqlTestCase): fmt = 'json' def setUp(self): super(WebTestCase, self).setUp() json_deserializer = wsgi.JSONDeserializer() self._deserializers = { 'application/json': json_deserializer, } def deserialize(self, response): ctype = 'application/%s' % self.fmt data = self._deserializers[ctype].deserialize(response.body)['body'] return data def serialize(self, data): ctype = 'application/%s' % self.fmt result = wsgi.Serializer().serialize(data, ctype) return result class SubDictMatch(object): def __init__(self, sub_dict): self.sub_dict = sub_dict def __eq__(self, super_dict): return all(item in super_dict.items() for item in self.sub_dict.items()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/0000755000175000017500000000000000000000000022120 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/__init__.py0000644000175000017500000000000000000000000024217 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/common/0000755000175000017500000000000000000000000023410 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/common/__init__.py0000644000175000017500000000000000000000000025507 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/common/test_net_helpers.py0000644000175000017500000000660000000000000027333 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as n_const from neutron.tests import base from neutron.tests.common import net_helpers ss_output = """ State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 10 127.0.0.1:6640 *:* LISTEN 0 128 *:46675 *:* LISTEN 0 128 *:22 *:* LISTEN 0 128 *:5432 *:* LISTEN 0 128 *:3260 *:* LISTEN 0 50 *:3306 *:* ESTAB 0 36 10.0.0.202:22 10.0.0.44:45258 ESTAB 0 0 127.0.0.1:32965 127.0.0.1:4369 ESTAB 0 0 10.0.0.202:22 10.0.0.44:36104 LISTEN 0 128 :::80 :::* LISTEN 0 128 :::4369 :::* LISTEN 0 128 :::22 :::* LISTEN 0 128 :::5432 :::* LISTEN 0 128 :::3260 :::* LISTEN 0 128 :::5672 :::* ESTAB 0 0 ::ffff:127.0.0.1:4369 ::ffff:127.0.0.1:32965 """ ss_output_template = """ LISTEN 0 10 127.0.0.1:%d *:* """ class PortAllocationTestCase(base.DietTestCase): def test__get_source_ports_from_ss_output(self): result = net_helpers._get_source_ports_from_ss_output(ss_output) expected = {6640, 46675, 5432, 3260, 3306, 22, 32965, 4369, 5672, 80} self.assertEqual(expected, result) def test_get_free_namespace_port(self): ss_output2 = ss_output for p in range(1024, 32767): ss_output2 += ss_output_template % p with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ipwrapper, \ mock.patch('neutron.agent.linux.utils.execute') as ex: m = mock.MagicMock() m.netns.execute.return_value = ss_output2 ipwrapper.return_value = m local_port_range_start = 32768 ex.return_value = "%s\t61000" % local_port_range_start result = net_helpers.get_free_namespace_port( n_const.PROTO_NAME_TCP) self.assertEqual((local_port_range_start - 1), result) def test_get_unused_port(self): with mock.patch('neutron.agent.linux.utils.execute') as ex: ex.return_value = "2048\t61000" result = net_helpers.get_unused_port(set(range(1025, 2048))) self.assertEqual(1024, result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/0000755000175000017500000000000000000000000023553 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/README0000644000175000017500000000014000000000000024426 0ustar00coreycorey00000000000000This directory is used by: neutron.tests.unit.tests.test_tools.ImportModulesRecursivelyTestCase ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/__init__.py0000644000175000017500000000000000000000000025652 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/0000755000175000017500000000000000000000000024331 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/__init__.py0000644000175000017500000000000000000000000026430 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/example_module.py0000644000175000017500000000000000000000000027671 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/subdir/0000755000175000017500000000000000000000000025621 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/subdir/__init__.py0000644000175000017500000000000000000000000027720 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/example/dir/subdir/example_module.py0000644000175000017500000000000000000000000031161 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/functional/0000755000175000017500000000000000000000000024262 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/functional/__init__.py0000644000175000017500000000000000000000000026361 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/functional/test_base.py0000644000175000017500000000430500000000000026607 0ustar00coreycorey00000000000000# Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron.tests import base from neutron.tests.functional import base as functional_base NEW_CONFIG_GROUP = cfg.OptGroup('testgroup', title='Test wrapping cfg register') SOME_OPTIONS = [ cfg.StrOpt('str_opt', default='default_value'), cfg.StrOpt('int_opt', default=1), cfg.BoolOpt('bool_opt', default=True) ] def register_some_options(cfg=cfg.CONF): cfg.register_opts(SOME_OPTIONS, 'testgroup') class ConfigDecoratorTestCase(base.BaseTestCase): def setUp(self): super(ConfigDecoratorTestCase, self).setUp() cfg.CONF.register_group(NEW_CONFIG_GROUP) def test_no_config_decorator(self): register_some_options() self.assertEqual('default_value', cfg.CONF.testgroup.str_opt) self.assertEqual('1', cfg.CONF.testgroup.int_opt) self.assertTrue(cfg.CONF.testgroup.bool_opt) def test_override_variables(self): opts = [('str_opt', 'another_value', 'testgroup'), ('int_opt', 123, 'testgroup'), ('bool_opt', False, 'testgroup')] cfg_decorator = functional_base.config_decorator(register_some_options, opts) mock.patch('neutron.tests.unit.tests.functional.test_base.' 'register_some_options', new=cfg_decorator).start() register_some_options() self.assertEqual('another_value', cfg.CONF.testgroup.str_opt) self.assertEqual('123', cfg.CONF.testgroup.int_opt) self.assertFalse(cfg.CONF.testgroup.bool_opt) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/test_base.py0000644000175000017500000000575000000000000024452 0ustar00coreycorey00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests to test the test framework""" import sys import unittest import eventlet.timeout from neutron.tests import base class BrokenExceptionHandlerTestCase(base.DietTestCase): # Embedded to hide from the regular test discovery class MyTestCase(base.DietTestCase): def setUp(self): super(BrokenExceptionHandlerTestCase.MyTestCase, self).setUp() self.addOnException(self._diag_collect) def _diag_collect(self, exc_info): raise ValueError('whoopsie daisy') def runTest(self): raise IndexError("Thou shalt not pass by reference") def test_broken_exception_handler(self): result = self.MyTestCase().run() # ensure both exceptions are logged self.assertIn('Thou shalt', result.errors[0][1]) self.assertIn('whoopsie', result.errors[0][1]) self.assertFalse(result.wasSuccessful()) class SystemExitTestCase(base.DietTestCase): # Embedded to hide from the regular test discovery class MyTestCase(base.DietTestCase): def __init__(self, exitcode): super(SystemExitTestCase.MyTestCase, self).__init__() self.exitcode = exitcode def runTest(self): if self.exitcode is not None: sys.exit(self.exitcode) def test_no_sysexit(self): result = self.MyTestCase(exitcode=None).run() self.assertTrue(result.wasSuccessful()) def test_sysexit(self): expectedFails = [self.MyTestCase(exitcode) for exitcode in (0, 1)] suite = unittest.TestSuite(tests=expectedFails) result = self.defaultTestResult() try: suite.run(result) except SystemExit: self.fail('SystemExit escaped!') self.assertEqual([], result.errors) self.assertItemsEqual(set(id(t) for t in expectedFails), set(id(t) for (t, traceback) in result.failures)) class CatchTimeoutTestCase(base.DietTestCase): # Embedded to hide from the regular test discovery class MyTestCase(base.DietTestCase): def test_case(self): raise eventlet.Timeout() def runTest(self): return self.test_case() def test_catch_timeout(self): try: result = self.MyTestCase().run() self.assertFalse(result.wasSuccessful()) except eventlet.Timeout: self.fail('Timeout escaped!') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/unit/tests/test_post_mortem_debug.py0000644000175000017500000000764500000000000027263 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from six import moves from neutron.tests import base from neutron.tests import post_mortem_debug class TestTesttoolsExceptionHandler(base.BaseTestCase): def test_exception_handler(self): try: self.fail() except Exception: exc_info = sys.exc_info() with mock.patch('traceback.print_exception') as mock_print_exception: with mock.patch('pdb.post_mortem') as mock_post_mortem: with mock.patch.object(post_mortem_debug, 'get_ignored_traceback', return_value=mock.Mock()): post_mortem_debug.get_exception_handler('pdb')(exc_info) # traceback will become post_mortem_debug.FilteredTraceback filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY) mock_print_exception.assert_called_once_with(*filtered_exc_info) mock_post_mortem.assert_called_once_with(mock.ANY) def test__get_debugger(self): def import_mock(name, *args): mod_mock = mock.Mock() mod_mock.__name__ = name mod_mock.post_mortem = mock.Mock() return mod_mock with mock.patch('six.moves.builtins.__import__', side_effect=import_mock): pdb_debugger = post_mortem_debug._get_debugger('pdb') pudb_debugger = post_mortem_debug._get_debugger('pudb') self.assertEqual('pdb', pdb_debugger.__name__) self.assertEqual('pudb', pudb_debugger.__name__) class TestFilteredTraceback(base.BaseTestCase): def test_filter_traceback(self): tb1 = mock.Mock() tb2 = mock.Mock() tb1.tb_next = tb2 tb2.tb_next = None ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2) for attr in ['lasti', 'lineno', 'frame']: attr_name = 'tb_%s' % attr self.assertEqual(getattr(tb1, attr_name, None), getattr(ftb1, attr_name, None)) self.assertIsNone(ftb1.tb_next) class TestGetIgnoredTraceback(base.BaseTestCase): def _test_get_ignored_traceback(self, ignored_bit_array, expected): root_tb = mock.Mock() tb = root_tb tracebacks = [tb] for x in moves.range(len(ignored_bit_array) - 1): tb.tb_next = mock.Mock() tb = tb.tb_next tracebacks.append(tb) tb.tb_next = None tb = root_tb for ignored in ignored_bit_array: if ignored: tb.tb_frame.f_globals = ['__unittest'] else: tb.tb_frame.f_globals = [] tb = tb.tb_next actual = post_mortem_debug.get_ignored_traceback(root_tb) if expected is not None: expected = tracebacks[expected] self.assertEqual(expected, actual) def test_no_ignored_tracebacks(self): self._test_get_ignored_traceback([0, 0, 0], None) def test_single_member_trailing_chain(self): self._test_get_ignored_traceback([0, 0, 1], 2) def test_two_member_trailing_chain(self): self._test_get_ignored_traceback([0, 1, 1], 1) def test_first_traceback_ignored(self): self._test_get_ignored_traceback([1, 0, 0], None) def test_middle_traceback_ignored(self): self._test_get_ignored_traceback([0, 1, 0], None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/neutron/tests/var/0000755000175000017500000000000000000000000020567 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/var/ca.crt0000644000175000017500000000415700000000000021673 0ustar00coreycorey00000000000000-----BEGIN CERTIFICATE----- MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX /l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ 4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm 2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ +C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY 9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA WoRMgEwjGJWqzhJZUYpUAQ== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/var/certandkey.pem0000644000175000017500000001175500000000000023434 0ustar00coreycorey00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/var/certificate.crt0000644000175000017500000000350200000000000023563 0ustar00coreycorey00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/tests/var/privatekey.key0000644000175000017500000000625300000000000023472 0ustar00coreycorey00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/version.py0000644000175000017500000000125600000000000020700 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('neutron') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/worker.py0000644000175000017500000000402200000000000020516 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import worker from oslo_config import cfg from oslo_service import loopingcall class NeutronBaseWorker(worker.BaseWorker): def __init__(self, worker_process_count=1, set_proctitle=None): set_proctitle = set_proctitle or cfg.CONF.setproctitle super(NeutronBaseWorker, self).__init__( worker_process_count=worker_process_count, set_proctitle=set_proctitle ) def start(self, name="neutron-server", desc=None): super(NeutronBaseWorker, self).start(name=name, desc=desc) class PeriodicWorker(NeutronBaseWorker): """A worker that runs a function at a fixed interval.""" def __init__(self, check_func, interval, initial_delay): super(PeriodicWorker, self).__init__(worker_process_count=0) self._check_func = check_func self._loop = None self._interval = interval self._initial_delay = initial_delay def start(self): super(PeriodicWorker, self).start(desc="periodic worker") if self._loop is None: self._loop = loopingcall.FixedIntervalLoopingCall(self._check_func) self._loop.start(interval=self._interval, initial_delay=self._initial_delay) def wait(self): if self._loop is not None: self._loop.wait() def stop(self): if self._loop is not None: self._loop.stop() def reset(self): self.stop() self.wait() self.start() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/neutron/wsgi.py0000644000175000017500000007150000000000000020163 0ustar00coreycorey00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ import errno import socket import sys import time import eventlet.wsgi from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as exception from oslo_config import cfg import oslo_i18n from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service as common_service from oslo_service import sslutils from oslo_service import systemd from oslo_service import wsgi from oslo_utils import encodeutils from oslo_utils import excutils import six import webob.dec import webob.exc from neutron._i18n import _ from neutron.common import config from neutron.conf import wsgi as wsgi_config from neutron import worker as neutron_worker CONF = cfg.CONF wsgi_config.register_socket_opts() LOG = logging.getLogger(__name__) def encode_body(body): """Encode unicode body. WebOb requires to encode unicode body used to update response body. """ return encodeutils.to_utf8(body) class WorkerService(neutron_worker.NeutronBaseWorker): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, service, application, set_proctitle, disable_ssl=False, worker_process_count=0): super(WorkerService, self).__init__(worker_process_count, set_proctitle) self._service = service self._application = application self._disable_ssl = disable_ssl self._server = None def start(self, desc=None): super(WorkerService, self).start(desc=desc) # When api worker is stopped it kills the eventlet wsgi server which # internally closes the wsgi server socket object. This server socket # object becomes not usable which leads to "Bad file descriptor" # errors on service restart. # Duplicate a socket object to keep a file descriptor usable. dup_sock = self._service._socket.dup() if CONF.use_ssl and not self._disable_ssl: dup_sock = sslutils.wrap(CONF, dup_sock) self._server = self._service.pool.spawn(self._service._run, self._application, dup_sock) def wait(self): if isinstance(self._server, eventlet.greenthread.GreenThread): self._server.wait() def stop(self): if isinstance(self._server, eventlet.greenthread.GreenThread): self._server.kill() self._server = None @staticmethod def reset(): config.reset_service() class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, name, num_threads=None, disable_ssl=False): # Raise the default from 8192 to accommodate large tokens eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.num_threads = num_threads or CONF.wsgi_default_pool_size self.disable_ssl = disable_ssl # Pool for a greenthread in which wsgi server will be running self.pool = eventlet.GreenPool(1) self.name = name self._server = None # A value of 0 is converted to None because None is what causes the # wsgi server to wait forever. self.client_socket_timeout = CONF.client_socket_timeout or None if CONF.use_ssl and not self.disable_ssl: sslutils.is_enabled(CONF) def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: LOG.exception("Unable to listen on %(host)s:%(port)s", {'host': host, 'port': port}) sys.exit(1) sock = None retry_until = time.time() + CONF.retry_until_window while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) except socket.error as err: with excutils.save_and_reraise_exception() as ctxt: if err.errno == errno.EADDRINUSE: ctxt.reraise = False eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for %(time)d seconds") % {'host': host, 'port': port, 'time': CONF.retry_until_window}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) return sock def start(self, application, port, host='0.0.0.0', workers=0, desc=None): """Run a WSGI server with the given application.""" self._host = host self._port = port backlog = CONF.backlog self._socket = self._get_socket(self._host, self._port, backlog=backlog) self._launch(application, workers) def _launch(self, application, workers=0, desc=None): set_proctitle = "off" if desc is None else CONF.setproctitle service = WorkerService(self, application, set_proctitle, self.disable_ssl, workers) if workers < 1: # The API service should run in the current process. self._server = service # Dump the initial option values cfg.CONF.log_opt_values(LOG, logging.DEBUG) service.start(desc=desc) systemd.notify_once() else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. db_api.get_context_manager().dispose_pool() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. self._server = common_service.ProcessLauncher( cfg.CONF, wait_interval=1.0, restart_method='mutate') self._server.launch_service(service, workers=service.worker_process_count) @property def host(self): return self._socket.getsockname()[0] if self._socket else self._host @property def port(self): return self._socket.getsockname()[1] if self._socket else self._port def stop(self): self._server.stop() def wait(self): """Wait until all servers have completed running.""" try: self._server.wait() except KeyboardInterrupt: pass def _run(self, application, socket): """Start a WSGI server in a new green thread.""" eventlet.wsgi.server(socket, application, max_size=self.num_threads, log=LOG, keepalive=CONF.wsgi_keep_alive, log_format=CONF.wsgi_log_format, socket_timeout=self.client_socket_timeout) @property def process_launcher(self): if isinstance(self._server, common_service.ProcessLauncher): return self._server return None class Request(wsgi.Request): def best_match_content_type(self): """Determine the most acceptable content-type. Based on: 1) URI extension (.json) 2) Content-type header 3) Accept* headers """ # First lookup http request path parts = self.path.rsplit('.', 1) if len(parts) > 1: _format = parts[1] if _format in ['json']: return 'application/{0}'.format(_format) # Then look up content header type_from_header = self.get_content_type() if type_from_header: return type_from_header ctypes = ['application/json'] # Finally search in Accept-* headers acceptable = self.accept.acceptable_offers(ctypes) if acceptable: return acceptable[0][0] return 'application/json' def get_content_type(self): allowed_types = ("application/json",) if "Content-Type" not in self.headers: LOG.debug("Missing Content-Type") return None _type = self.content_type if _type in allowed_types: return _type return None def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None all_languages = oslo_i18n.get_available_languages('neutron') best_match = self.accept_language.lookup(all_languages, default='fake_LANG') if best_match == 'fake_LANG': best_match = None return best_match @property def context(self): if 'neutron.context' not in self.environ: self.environ['neutron.context'] = context.get_admin_context() return self.environ['neutron.context'] class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): return six.text_type(obj) return encode_body(jsonutils.dumps(data, default=sanitizer)) class ResponseHeaderSerializer(ActionDispatcher): """Default response headers serialization.""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = (headers_serializer or ResponseHeaderSerializer()) def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("Cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer.""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None): self.body_deserializers = { 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = (headers_deserializer or RequestHeadersDeserializer()) def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns: tuple of expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): try: content_type = request.best_match_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return {} if content_type is None: LOG.debug("No Content-Type provided in request") return {} if not len(request.body) > 0: LOG.debug("Empty body provided in request") return {} try: deserializer = self.get_body_deserializer(content_type) except exception.InvalidContentType: with excutils.save_and_reraise_exception(): LOG.debug("Unable to deserialize body as provided " "Content-Type") return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type() def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = nova.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import neutron.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Resource(Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, fault_body_function, deserializer=None, serializer=None): """Object initialization. :param controller: object that implement methods created by routes lib :param deserializer: object that can serialize the output of a controller into a webob response :param serializer: object that can deserialize a webob request into necessary pieces :param fault_body_function: a function that will build the response body for HTTP errors raised by operations on this resource object """ self.controller = controller self.deserializer = deserializer or RequestDeserializer() self.serializer = serializer or ResponseSerializer() self._fault_body_function = fault_body_function @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") LOG.exception("InvalidContentType: %s", msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") LOG.exception("MalformedRequestBody: %s", msg) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) try: action_result = self.dispatch(request, action, args) except webob.exc.HTTPException as ex: LOG.info("HTTP exception thrown: %s", ex) action_result = Fault(ex, self._fault_body_function) except Exception: LOG.exception("Internal error") # Do not include the traceback to avoid returning it to clients. action_result = Fault(webob.exc.HTTPServerError(), self._fault_body_function) if isinstance(action_result, dict) or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: LOG.info("%(url)s returned with HTTP %(status)d", dict(url=request.url, status=response.status_int)) except AttributeError as e: LOG.info("%(url)s returned a fault: %(exception)s", dict(url=request.url, exception=e)) return response def dispatch(self, request, action, action_args): """Find action-specific method on controller and call it.""" controller_method = getattr(self.controller, action) try: # NOTE(salvatore-orlando): the controller method must have # an argument whose name is 'request' return controller_method(request=request, **action_args) except TypeError: LOG.exception('Invalid request') return Fault(webob.exc.HTTPBadRequest()) def _default_body_function(wrapped_exc): code = wrapped_exc.status_int fault_data = { 'Error': { 'code': code, 'message': wrapped_exc.explanation}} # 'code' is an attribute on the fault tag itself metadata = {'attributes': {'Error': 'code'}} return fault_data, metadata class Fault(webob.exc.HTTPException): """Generates an HTTP response from a webob HTTP exception.""" def __init__(self, exception, body_function=None): """Creates a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = self.wrapped_exc.status_int self._body_function = body_function or _default_body_function @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. fault_data, metadata = self._body_function(self.wrapped_exc) content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type return self.wrapped_exc # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Controller(object): """WSGI app that dispatched to methods. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Call the method specified in req.environ by RoutesMiddleware.""" arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] method = getattr(self, action) del arg_dict['controller'] del arg_dict['action'] if 'format' in arg_dict: del arg_dict['format'] arg_dict['request'] = req result = method(**arg_dict) if isinstance(result, dict) or result is None: if result is None: status = 204 content_type = '' body = None else: status = 200 content_type = req.best_match_content_type() body = self._serialize(result, content_type) response = webob.Response(status=status, content_type=content_type, body=body) LOG.debug("%(url)s returned with HTTP %(status)d", dict(url=req.url, status=response.status_int)) return response else: return result def _serialize(self, data, content_type): """Serialize the given dict to the provided content_type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) try: return serializer.serialize(data, content_type) except exception.InvalidContentType: msg = _('The requested content type %s is invalid.') % content_type raise webob.exc.HTTPNotAcceptable(msg) def _deserialize(self, data, content_type): """Deserialize the request body to the specified content type. Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. """ _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) return serializer.deserialize(data, content_type)['body'] # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated class Serializer(object): """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None): """Create a serializer based on the given WSGI environment. 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. """ self.metadata = metadata or {} def _get_serialize_handler(self, content_type): handlers = { 'application/json': JSONDictSerializer(), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type) def serialize(self, data, content_type): """Serialize a dictionary into the specified content type.""" return self._get_serialize_handler(content_type).serialize(data) def deserialize(self, datastring, content_type): """Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. """ try: return self.get_deserialize_handler(content_type).deserialize( datastring) except Exception: raise webob.exc.HTTPBadRequest(_("Could not deserialize data")) def get_deserialize_handler(self, content_type): handlers = { 'application/json': JSONDeserializer(), } try: return handlers[content_type] except Exception: raise exception.InvalidContentType(content_type=content_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.1830435 neutron-16.0.0.0b2.dev214/neutron.egg-info/0000755000175000017500000000000000000000000020327 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/PKG-INFO0000644000175000017500000000347600000000000021436 0ustar00coreycorey00000000000000Metadata-Version: 1.2 Name: neutron Version: 16.0.0.0b2.dev214 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================= OpenStack Neutron ================= .. image:: https://governance.openstack.org/tc/badges/neutron.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on Neutron is an OpenStack project to provide "network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., Nova). To learn more about neutron: * Documentation: https://docs.openstack.org/neutron/latest/ * Features: https://specs.openstack.org/openstack/neutron-specs * Defects: https://launchpad.net/neutron * Release notes: https://docs.openstack.org/releasenotes/neutron/index.html * Source: https://opendev.org/openstack/neutron Get in touch via `email `_. Use [Neutron] in your subject. To learn how to contribute, please read the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/SOURCES.txt0000644000175000017500000042016200000000000022220 0ustar00coreycorey00000000000000.coveragerc .mailmap .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst TESTING.rst babel.cfg bindep.txt lower-constraints.txt plugin.spec requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/README.rst devstack/ovn-compute-local.conf.sample devstack/ovn-db-local.conf.sample devstack/ovn-local.conf.sample devstack/ovn-vtep-local.conf.sample devstack/plugin.sh devstack/settings devstack/lib/dns devstack/lib/fip_port_forwarding devstack/lib/flavors devstack/lib/l2_agent devstack/lib/l2_agent_sriovnicswitch devstack/lib/l3_agent devstack/lib/l3_conntrack_helper devstack/lib/log devstack/lib/macvtap_agent devstack/lib/ml2 devstack/lib/network_segment_range devstack/lib/ovn_agent devstack/lib/ovs devstack/lib/placement devstack/lib/qos devstack/lib/segments devstack/lib/tag_ports_during_bulk_creation devstack/lib/trunk devstack/lib/uplink_status_propagation doc/Makefile doc/requirements.txt doc/source/_intro.rst doc/source/conf.py doc/source/index.rst doc/source/pdf-index.rst doc/source/_static/support_matrix.css doc/source/admin/config-address-scopes.rst doc/source/admin/config-auto-allocation.rst doc/source/admin/config-az.rst doc/source/admin/config-bgp-dynamic-routing.rst doc/source/admin/config-dhcp-ha.rst doc/source/admin/config-dns-int-ext-serv.rst doc/source/admin/config-dns-int.rst doc/source/admin/config-dns-res.rst doc/source/admin/config-dvr-ha-snat.rst doc/source/admin/config-fip-port-forwardings.rst doc/source/admin/config-ipam.rst doc/source/admin/config-ipv6.rst doc/source/admin/config-logging.rst doc/source/admin/config-macvtap.rst doc/source/admin/config-ml2.rst doc/source/admin/config-mtu.rst doc/source/admin/config-network-segment-ranges.rst doc/source/admin/config-ovs-dpdk.rst doc/source/admin/config-ovs-offload.rst doc/source/admin/config-ovsfwdriver.rst doc/source/admin/config-qos-min-bw.rst doc/source/admin/config-qos.rst doc/source/admin/config-rbac.rst doc/source/admin/config-routed-networks.rst doc/source/admin/config-service-subnets.rst doc/source/admin/config-services-agent.rst doc/source/admin/config-sfc.rst doc/source/admin/config-sriov.rst doc/source/admin/config-subnet-onboard.rst doc/source/admin/config-subnet-pools.rst doc/source/admin/config-trunking.rst doc/source/admin/config-wsgi.rst doc/source/admin/config.rst doc/source/admin/deploy-lb-ha-vrrp.rst doc/source/admin/deploy-lb-provider.rst doc/source/admin/deploy-lb-selfservice.rst doc/source/admin/deploy-lb.rst doc/source/admin/deploy-ovs-ha-dvr.rst doc/source/admin/deploy-ovs-ha-vrrp.rst doc/source/admin/deploy-ovs-provider.rst doc/source/admin/deploy-ovs-selfservice.rst doc/source/admin/deploy-ovs.rst doc/source/admin/deploy.rst doc/source/admin/fwaas-v2-scenario.rst doc/source/admin/fwaas.rst doc/source/admin/index.rst doc/source/admin/intro-basic-networking.rst doc/source/admin/intro-nat.rst doc/source/admin/intro-network-components.rst doc/source/admin/intro-network-namespaces.rst doc/source/admin/intro-os-networking.rst doc/source/admin/intro-overlay-protocols.rst doc/source/admin/intro.rst doc/source/admin/migration-classic-to-l3ha.rst doc/source/admin/migration-database.rst doc/source/admin/migration-nova-network-to-neutron.rst doc/source/admin/migration.rst doc/source/admin/misc-libvirt.rst doc/source/admin/misc.rst doc/source/admin/neutron_linuxbridge.rst doc/source/admin/ops-ip-availability.rst doc/source/admin/ops-quotas.rst doc/source/admin/ops-resource-purge.rst doc/source/admin/ops-resource-tags.rst doc/source/admin/ops.rst doc/source/admin/vpnaas-scenario.rst doc/source/admin/archives/adv-config.rst doc/source/admin/archives/adv-features.rst doc/source/admin/archives/adv-operational-features.rst doc/source/admin/archives/arch.rst doc/source/admin/archives/auth.rst doc/source/admin/archives/config-agents.rst doc/source/admin/archives/config-identity.rst doc/source/admin/archives/config-plugins.rst doc/source/admin/archives/index.rst doc/source/admin/archives/introduction.rst doc/source/admin/archives/multi-dhcp-agents.rst doc/source/admin/archives/use.rst doc/source/admin/archives/figures/vmware_nsx_ex1.graffle doc/source/admin/archives/figures/vmware_nsx_ex1.png doc/source/admin/archives/figures/vmware_nsx_ex1.svg doc/source/admin/archives/figures/vmware_nsx_ex2.graffle doc/source/admin/archives/figures/vmware_nsx_ex2.png doc/source/admin/archives/figures/vmware_nsx_ex2.svg doc/source/admin/figures/NetworkTypes.png doc/source/admin/figures/NetworkTypes.svg doc/source/admin/figures/bgp-dynamic-routing-example1.graffle doc/source/admin/figures/bgp-dynamic-routing-example1.png doc/source/admin/figures/bgp-dynamic-routing-example1.svg doc/source/admin/figures/bgp-dynamic-routing-example2.graffle doc/source/admin/figures/bgp-dynamic-routing-example2.png doc/source/admin/figures/bgp-dynamic-routing-example2.svg doc/source/admin/figures/bgp-dynamic-routing-overview.graffle doc/source/admin/figures/bgp-dynamic-routing-overview.png doc/source/admin/figures/bgp-dynamic-routing-overview.svg doc/source/admin/figures/config-macvtap-compute1.png doc/source/admin/figures/config-macvtap-compute2.png doc/source/admin/figures/demo_multiple_dhcp_agents.png doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.graffle doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.png doc/source/admin/figures/deploy-lb-ha-vrrp-compconn1.svg doc/source/admin/figures/deploy-lb-ha-vrrp-overview.graffle doc/source/admin/figures/deploy-lb-ha-vrrp-overview.png doc/source/admin/figures/deploy-lb-ha-vrrp-overview.svg doc/source/admin/figures/deploy-lb-provider-compconn1.graffle doc/source/admin/figures/deploy-lb-provider-compconn1.png doc/source/admin/figures/deploy-lb-provider-compconn1.svg doc/source/admin/figures/deploy-lb-provider-compconn2.graffle doc/source/admin/figures/deploy-lb-provider-compconn2.png doc/source/admin/figures/deploy-lb-provider-compconn2.svg doc/source/admin/figures/deploy-lb-provider-flowew1.graffle doc/source/admin/figures/deploy-lb-provider-flowew1.png doc/source/admin/figures/deploy-lb-provider-flowew1.svg doc/source/admin/figures/deploy-lb-provider-flowew2.graffle doc/source/admin/figures/deploy-lb-provider-flowew2.png doc/source/admin/figures/deploy-lb-provider-flowew2.svg doc/source/admin/figures/deploy-lb-provider-flowns1.graffle doc/source/admin/figures/deploy-lb-provider-flowns1.png doc/source/admin/figures/deploy-lb-provider-flowns1.svg doc/source/admin/figures/deploy-lb-provider-overview.graffle doc/source/admin/figures/deploy-lb-provider-overview.png doc/source/admin/figures/deploy-lb-provider-overview.svg doc/source/admin/figures/deploy-lb-selfservice-compconn1.graffle doc/source/admin/figures/deploy-lb-selfservice-compconn1.png doc/source/admin/figures/deploy-lb-selfservice-compconn1.svg doc/source/admin/figures/deploy-lb-selfservice-flowew1.graffle doc/source/admin/figures/deploy-lb-selfservice-flowew1.png doc/source/admin/figures/deploy-lb-selfservice-flowew1.svg doc/source/admin/figures/deploy-lb-selfservice-flowew2.graffle doc/source/admin/figures/deploy-lb-selfservice-flowew2.png doc/source/admin/figures/deploy-lb-selfservice-flowew2.svg doc/source/admin/figures/deploy-lb-selfservice-flowns1.graffle doc/source/admin/figures/deploy-lb-selfservice-flowns1.png doc/source/admin/figures/deploy-lb-selfservice-flowns1.svg doc/source/admin/figures/deploy-lb-selfservice-flowns2.graffle doc/source/admin/figures/deploy-lb-selfservice-flowns2.png doc/source/admin/figures/deploy-lb-selfservice-flowns2.svg doc/source/admin/figures/deploy-lb-selfservice-overview.graffle doc/source/admin/figures/deploy-lb-selfservice-overview.png doc/source/admin/figures/deploy-lb-selfservice-overview.svg doc/source/admin/figures/deploy-ovs-ha-dvr-compconn1.graffle doc/source/admin/figures/deploy-ovs-ha-dvr-compconn1.png doc/source/admin/figures/deploy-ovs-ha-dvr-compconn1.svg doc/source/admin/figures/deploy-ovs-ha-dvr-flowew1.graffle doc/source/admin/figures/deploy-ovs-ha-dvr-flowew1.png doc/source/admin/figures/deploy-ovs-ha-dvr-flowew1.svg doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.graffle doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.png doc/source/admin/figures/deploy-ovs-ha-dvr-flowns1.svg doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.graffle doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.png doc/source/admin/figures/deploy-ovs-ha-dvr-flowns2.svg doc/source/admin/figures/deploy-ovs-ha-dvr-overview.graffle doc/source/admin/figures/deploy-ovs-ha-dvr-overview.png doc/source/admin/figures/deploy-ovs-ha-dvr-overview.svg doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.graffle doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.png doc/source/admin/figures/deploy-ovs-ha-vrrp-compconn1.svg doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.graffle doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.png doc/source/admin/figures/deploy-ovs-ha-vrrp-overview.svg doc/source/admin/figures/deploy-ovs-provider-compconn1.graffle doc/source/admin/figures/deploy-ovs-provider-compconn1.png doc/source/admin/figures/deploy-ovs-provider-compconn1.svg doc/source/admin/figures/deploy-ovs-provider-compconn2.graffle doc/source/admin/figures/deploy-ovs-provider-compconn2.png doc/source/admin/figures/deploy-ovs-provider-compconn2.svg doc/source/admin/figures/deploy-ovs-provider-flowew1.graffle doc/source/admin/figures/deploy-ovs-provider-flowew1.png doc/source/admin/figures/deploy-ovs-provider-flowew1.svg doc/source/admin/figures/deploy-ovs-provider-flowew2.graffle doc/source/admin/figures/deploy-ovs-provider-flowew2.png doc/source/admin/figures/deploy-ovs-provider-flowew2.svg doc/source/admin/figures/deploy-ovs-provider-flowns1.graffle doc/source/admin/figures/deploy-ovs-provider-flowns1.png doc/source/admin/figures/deploy-ovs-provider-flowns1.svg doc/source/admin/figures/deploy-ovs-provider-overview.graffle doc/source/admin/figures/deploy-ovs-provider-overview.png doc/source/admin/figures/deploy-ovs-provider-overview.svg doc/source/admin/figures/deploy-ovs-selfservice-compconn1.graffle doc/source/admin/figures/deploy-ovs-selfservice-compconn1.png doc/source/admin/figures/deploy-ovs-selfservice-compconn1.svg doc/source/admin/figures/deploy-ovs-selfservice-flowew1.graffle doc/source/admin/figures/deploy-ovs-selfservice-flowew1.png doc/source/admin/figures/deploy-ovs-selfservice-flowew1.svg doc/source/admin/figures/deploy-ovs-selfservice-flowew2.graffle doc/source/admin/figures/deploy-ovs-selfservice-flowew2.png doc/source/admin/figures/deploy-ovs-selfservice-flowew2.svg doc/source/admin/figures/deploy-ovs-selfservice-flowns1.graffle doc/source/admin/figures/deploy-ovs-selfservice-flowns1.png doc/source/admin/figures/deploy-ovs-selfservice-flowns1.svg doc/source/admin/figures/deploy-ovs-selfservice-flowns2.graffle doc/source/admin/figures/deploy-ovs-selfservice-flowns2.png doc/source/admin/figures/deploy-ovs-selfservice-flowns2.svg doc/source/admin/figures/deploy-ovs-selfservice-overview.graffle doc/source/admin/figures/deploy-ovs-selfservice-overview.png doc/source/admin/figures/deploy-ovs-selfservice-overview.svg doc/source/admin/figures/lbaasv2-diagram.png doc/source/admin/figures/logging-framework.png doc/source/admin/figures/port-chain-architecture-diagram.png doc/source/admin/figures/port-chain-diagram.png doc/source/admin/figures/scenario-classic-mt-compute1.svg doc/source/admin/figures/scenario-classic-mt-compute2.svg doc/source/admin/figures/scenario-classic-mt-flowew1.png doc/source/admin/figures/scenario-classic-mt-flowew1.svg doc/source/admin/figures/scenario-classic-mt-flowew2.png doc/source/admin/figures/scenario-classic-mt-flowew2.svg doc/source/admin/figures/scenario-classic-mt-flowns1.png doc/source/admin/figures/scenario-classic-mt-flowns1.svg doc/source/admin/figures/scenario-classic-mt-networks.png doc/source/admin/figures/scenario-classic-mt-networks.svg doc/source/admin/figures/scenario-classic-mt-services.png doc/source/admin/figures/scenario-classic-mt-services.svg doc/source/admin/figures/scenario-classic-mt.png doc/source/admin/figures/scenario-classic-mt.svg doc/source/admin/ovn/dpdk.rst doc/source/admin/ovn/features.rst doc/source/admin/ovn/igmp.rst doc/source/admin/ovn/index.rst doc/source/admin/ovn/ovn.rst doc/source/admin/ovn/routing.rst doc/source/admin/ovn/troubleshooting.rst doc/source/admin/ovn/tutorial.rst doc/source/admin/ovn/figures/ovn-east-west-2.png doc/source/admin/ovn/figures/ovn-east-west-2.svg doc/source/admin/ovn/figures/ovn-east-west-3.png doc/source/admin/ovn/figures/ovn-east-west-3.svg doc/source/admin/ovn/figures/ovn-east-west.png doc/source/admin/ovn/figures/ovn-east-west.svg doc/source/admin/ovn/figures/ovn-l3ha-bfd-3gw.png doc/source/admin/ovn/figures/ovn-l3ha-bfd-3gw.svg doc/source/admin/ovn/figures/ovn-l3ha-bfd-failover.png doc/source/admin/ovn/figures/ovn-l3ha-bfd-failover.svg doc/source/admin/ovn/figures/ovn-l3ha-bfd.png doc/source/admin/ovn/figures/ovn-l3ha-bfd.svg doc/source/admin/ovn/figures/ovn-north-south-distributed-fip.png doc/source/admin/ovn/figures/ovn-north-south-distributed-fip.svg doc/source/admin/ovn/figures/ovn-north-south.png doc/source/admin/ovn/figures/ovn-north-south.svg doc/source/admin/ovn/refarch/launch-instance-provider-network.rst doc/source/admin/ovn/refarch/launch-instance-selfservice-network.rst doc/source/admin/ovn/refarch/provider-networks.rst doc/source/admin/ovn/refarch/refarch.rst doc/source/admin/ovn/refarch/routers.rst doc/source/admin/ovn/refarch/selfservice-networks.rst doc/source/admin/ovn/refarch/figures/ovn-architecture1.png doc/source/admin/ovn/refarch/figures/ovn-architecture1.svg doc/source/admin/ovn/refarch/figures/ovn-compute1.png doc/source/admin/ovn/refarch/figures/ovn-compute1.svg doc/source/admin/ovn/refarch/figures/ovn-hw.png doc/source/admin/ovn/refarch/figures/ovn-hw.svg doc/source/admin/ovn/refarch/figures/ovn-services.png doc/source/admin/ovn/refarch/figures/ovn-services.svg doc/source/admin/shared/deploy-config-neutron-common.txt doc/source/admin/shared/deploy-ha-vrrp-initialnetworks.txt doc/source/admin/shared/deploy-ha-vrrp-verifyfailoveroperation.txt doc/source/admin/shared/deploy-ha-vrrp-verifynetworkoperation.txt doc/source/admin/shared/deploy-ha-vrrp.txt doc/source/admin/shared/deploy-provider-initialnetworks.txt doc/source/admin/shared/deploy-provider-networktrafficflow.txt doc/source/admin/shared/deploy-provider-verifynetworkoperation.txt doc/source/admin/shared/deploy-secgrouprules.txt doc/source/admin/shared/deploy-selfservice-initialnetworks.txt doc/source/admin/shared/deploy-selfservice-networktrafficflow.txt doc/source/admin/shared/deploy-selfservice-verifynetworkoperation.txt doc/source/admin/shared/keepalived-vrrp-healthcheck.txt doc/source/cli/index.rst doc/source/cli/neutron-debug.rst doc/source/cli/neutron-sanity-check.rst doc/source/cli/neutron-status.rst doc/source/configuration/config-samples.rst doc/source/configuration/config.rst doc/source/configuration/dhcp-agent.rst doc/source/configuration/index.rst doc/source/configuration/l3-agent.rst doc/source/configuration/linuxbridge-agent.rst doc/source/configuration/macvtap-agent.rst doc/source/configuration/metadata-agent.rst doc/source/configuration/metering-agent.rst doc/source/configuration/ml2-conf.rst doc/source/configuration/neutron.rst doc/source/configuration/openvswitch-agent.rst doc/source/configuration/ovn.rst doc/source/configuration/policy-sample.rst doc/source/configuration/policy.rst doc/source/configuration/sriov-agent.rst doc/source/configuration/samples/dhcp-agent.rst doc/source/configuration/samples/l3-agent.rst doc/source/configuration/samples/linuxbridge-agent.rst doc/source/configuration/samples/macvtap-agent.rst doc/source/configuration/samples/metadata-agent.rst doc/source/configuration/samples/metering-agent.rst doc/source/configuration/samples/ml2-conf.rst doc/source/configuration/samples/neutron.rst doc/source/configuration/samples/openvswitch-agent.rst doc/source/configuration/samples/ovn.rst doc/source/configuration/samples/sriov-agent.rst doc/source/contributor/alembic_migrations.rst doc/source/contributor/client_command_extensions.rst doc/source/contributor/contribute.rst doc/source/contributor/contributing.rst doc/source/contributor/development_environment.rst doc/source/contributor/effective_neutron.rst doc/source/contributor/index.rst doc/source/contributor/modules.rst doc/source/contributor/neutron_api.rst doc/source/contributor/upgrade_checks.rst doc/source/contributor/dashboards/index.rst doc/source/contributor/internals/address_scopes.rst doc/source/contributor/internals/agent_extensions.rst doc/source/contributor/internals/api_extensions.rst doc/source/contributor/internals/api_layer.rst doc/source/contributor/internals/calling_ml2_plugin.rst doc/source/contributor/internals/code_profiling.rst doc/source/contributor/internals/db_layer.rst doc/source/contributor/internals/db_models.rst doc/source/contributor/internals/dns_order.rst doc/source/contributor/internals/external_dns_integration.rst doc/source/contributor/internals/i18n.rst doc/source/contributor/internals/index.rst doc/source/contributor/internals/l2_agent_extensions.rst doc/source/contributor/internals/l2_agents.rst doc/source/contributor/internals/l3_agent_extensions.rst doc/source/contributor/internals/layer3.rst doc/source/contributor/internals/linuxbridge_agent.rst doc/source/contributor/internals/live_migration.rst doc/source/contributor/internals/ml2_ext_manager.rst doc/source/contributor/internals/network_ip_availability.rst doc/source/contributor/internals/objects_usage.rst doc/source/contributor/internals/openvswitch_agent.rst doc/source/contributor/internals/openvswitch_firewall.rst doc/source/contributor/internals/ovs_vhostuser.rst doc/source/contributor/internals/plugin-api.rst doc/source/contributor/internals/policy.rst doc/source/contributor/internals/provisioning_blocks.rst doc/source/contributor/internals/quality_of_service.rst doc/source/contributor/internals/quota.rst doc/source/contributor/internals/retries.rst doc/source/contributor/internals/rpc_api.rst doc/source/contributor/internals/rpc_callbacks.rst doc/source/contributor/internals/security_group_api.rst doc/source/contributor/internals/segments.rst doc/source/contributor/internals/service_extensions.rst doc/source/contributor/internals/services_and_agents.rst doc/source/contributor/internals/sriov_nic_agent.rst doc/source/contributor/internals/tag.rst doc/source/contributor/internals/upgrade.rst doc/source/contributor/internals/images/live-mig-ovs-hybrid.png doc/source/contributor/internals/images/live-mig-ovs-hybrid.txt doc/source/contributor/internals/images/live-mig.png doc/source/contributor/internals/images/live-mig.txt doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-compute.png doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-netns.png doc/source/contributor/internals/images/under-the-hood-scenario-1-ovs-network.png doc/source/contributor/internals/ovn/acl_optimizations.rst doc/source/contributor/internals/ovn/data_model.rst doc/source/contributor/internals/ovn/database_consistency.rst doc/source/contributor/internals/ovn/distributed_ovsdb_events.rst doc/source/contributor/internals/ovn/index.rst doc/source/contributor/internals/ovn/l3_ha_rescheduling.rst doc/source/contributor/internals/ovn/loadbalancer.rst doc/source/contributor/internals/ovn/metadata_api.rst doc/source/contributor/internals/ovn/native_dhcp.rst doc/source/contributor/internals/ovn/ovn_worker.rst doc/source/contributor/ovn/index.rst doc/source/contributor/ovn/tools.rst doc/source/contributor/ovn_vagrant/index.rst doc/source/contributor/ovn_vagrant/prerequisites.rst doc/source/contributor/ovn_vagrant/sparse-architecture.rst doc/source/contributor/policies/blueprints.rst doc/source/contributor/policies/bugs.rst doc/source/contributor/policies/code-reviews.rst doc/source/contributor/policies/contributor-onboarding.rst doc/source/contributor/policies/gate-failure-triage.rst doc/source/contributor/policies/gerrit-recheck.rst doc/source/contributor/policies/index.rst doc/source/contributor/policies/neutron-teams.rst doc/source/contributor/policies/release-checklist.rst doc/source/contributor/policies/thirdparty-ci.rst doc/source/contributor/stadium/governance.rst doc/source/contributor/stadium/guidelines.rst doc/source/contributor/stadium/index.rst doc/source/contributor/testing/ci_scenario_jobs.rst doc/source/contributor/testing/coverage.rst doc/source/contributor/testing/db_transient_failure_injection.rst doc/source/contributor/testing/fullstack.rst doc/source/contributor/testing/index.rst doc/source/contributor/testing/ovn_devstack.rst doc/source/contributor/testing/template_model_sync_test.rst doc/source/contributor/testing/testing.rst doc/source/contributor/testing/images/fullstack_multinode_simulation.png doc/source/feature_classification/feature_classification_introduction.rst doc/source/feature_classification/general_feature_support_matrix.ini doc/source/feature_classification/general_feature_support_matrix.rst doc/source/feature_classification/index.rst doc/source/feature_classification/provider_network_support_matrix.ini doc/source/feature_classification/provider_network_support_matrix.rst doc/source/install/compute-install-obs.rst doc/source/install/compute-install-option1-obs.rst doc/source/install/compute-install-option1-rdo.rst doc/source/install/compute-install-option1-ubuntu.rst doc/source/install/compute-install-option2-obs.rst doc/source/install/compute-install-option2-rdo.rst doc/source/install/compute-install-option2-ubuntu.rst doc/source/install/compute-install-rdo.rst doc/source/install/compute-install-ubuntu.rst doc/source/install/concepts.rst doc/source/install/controller-install-obs.rst doc/source/install/controller-install-option1-obs.rst doc/source/install/controller-install-option1-rdo.rst doc/source/install/controller-install-option1-ubuntu.rst doc/source/install/controller-install-option2-obs.rst doc/source/install/controller-install-option2-rdo.rst doc/source/install/controller-install-option2-ubuntu.rst doc/source/install/controller-install-rdo.rst doc/source/install/controller-install-ubuntu.rst doc/source/install/environment-networking-compute-obs.rst doc/source/install/environment-networking-compute-rdo.rst doc/source/install/environment-networking-compute-ubuntu.rst doc/source/install/environment-networking-controller-obs.rst doc/source/install/environment-networking-controller-rdo.rst doc/source/install/environment-networking-controller-ubuntu.rst doc/source/install/environment-networking-obs.rst doc/source/install/environment-networking-rdo.rst doc/source/install/environment-networking-storage-cinder.rst doc/source/install/environment-networking-ubuntu.rst doc/source/install/environment-networking-verify-obs.rst doc/source/install/environment-networking-verify-rdo.rst doc/source/install/environment-networking-verify-ubuntu.rst doc/source/install/index.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/overview.rst doc/source/install/verify-option1.rst doc/source/install/verify-option2.rst doc/source/install/verify.rst doc/source/install/common/get-started-networking.rst doc/source/install/figures/hwreqs.graffle doc/source/install/figures/hwreqs.png doc/source/install/figures/hwreqs.svg doc/source/install/figures/network1-services.graffle doc/source/install/figures/network1-services.png doc/source/install/figures/network1-services.svg doc/source/install/figures/network2-services.graffle doc/source/install/figures/network2-services.png doc/source/install/figures/network2-services.svg doc/source/install/figures/networklayout.graffle doc/source/install/figures/networklayout.png doc/source/install/figures/networklayout.svg doc/source/install/ovn/index.rst doc/source/install/ovn/manual_install.rst doc/source/install/ovn/tripleo_install.rst doc/source/install/ovn/figures/ovn-initial-resources.png doc/source/install/ovn/figures/ovn-initial-resources.svg doc/source/install/ovn/figures/tripleo-ovn-arch.png doc/source/install/ovn/figures/tripleo-ovn-arch.svg doc/source/install/shared/edit_hosts_file.txt doc/source/install/shared/note_configuration_vary_by_distribution.rst doc/source/ovn/gaps.rst doc/source/ovn/index.rst doc/source/ovn/migration.rst doc/source/ovn/faq/index.rst doc/source/reference/rest-api.rst etc/README.policy.yaml.txt etc/README.txt etc/api-paste.ini etc/rootwrap.conf etc/neutron/plugins/ml2/.placeholder etc/neutron/rootwrap.d/debug.filters etc/neutron/rootwrap.d/dhcp.filters etc/neutron/rootwrap.d/dibbler.filters etc/neutron/rootwrap.d/ebtables.filters etc/neutron/rootwrap.d/ipset-firewall.filters etc/neutron/rootwrap.d/iptables-firewall.filters etc/neutron/rootwrap.d/l3.filters etc/neutron/rootwrap.d/linuxbridge-plugin.filters etc/neutron/rootwrap.d/netns-cleanup.filters etc/neutron/rootwrap.d/openvswitch-plugin.filters etc/neutron/rootwrap.d/privsep.filters etc/oslo-config-generator/dhcp_agent.ini etc/oslo-config-generator/l3_agent.ini etc/oslo-config-generator/linuxbridge_agent.ini etc/oslo-config-generator/macvtap_agent.ini etc/oslo-config-generator/metadata_agent.ini etc/oslo-config-generator/metering_agent.ini etc/oslo-config-generator/ml2_conf.ini etc/oslo-config-generator/neutron.conf etc/oslo-config-generator/neutron_ovn_metadata_agent.ini etc/oslo-config-generator/openvswitch_agent.ini etc/oslo-config-generator/ovn.ini etc/oslo-config-generator/sriov_agent.ini etc/oslo-policy-generator/policy.conf neutron/__init__.py neutron/_i18n.py neutron/auth.py neutron/manager.py neutron/neutron_plugin_base_v2.py neutron/opts.py neutron/policy.py neutron/service.py neutron/version.py neutron/worker.py neutron/wsgi.py neutron.egg-info/PKG-INFO neutron.egg-info/SOURCES.txt neutron.egg-info/dependency_links.txt neutron.egg-info/entry_points.txt neutron.egg-info/not-zip-safe neutron.egg-info/pbr.json neutron.egg-info/requires.txt neutron.egg-info/top_level.txt neutron/agent/__init__.py neutron/agent/agent_extensions_manager.py neutron/agent/dhcp_agent.py neutron/agent/firewall.py neutron/agent/l3_agent.py neutron/agent/metadata_agent.py neutron/agent/resource_cache.py neutron/agent/rpc.py neutron/agent/securitygroups_rpc.py neutron/agent/common/__init__.py neutron/agent/common/async_process.py neutron/agent/common/base_polling.py neutron/agent/common/ip_lib.py neutron/agent/common/ovs_lib.py neutron/agent/common/ovsdb_monitor.py neutron/agent/common/placement_report.py neutron/agent/common/polling.py neutron/agent/common/resource_processing_queue.py neutron/agent/common/utils.py neutron/agent/dhcp/__init__.py neutron/agent/dhcp/agent.py neutron/agent/l2/__init__.py neutron/agent/l2/l2_agent_extensions_manager.py neutron/agent/l2/extensions/__init__.py neutron/agent/l2/extensions/fdb_population.py neutron/agent/l2/extensions/qos.py neutron/agent/l2/extensions/qos_linux.py neutron/agent/l3/__init__.py neutron/agent/l3/agent.py neutron/agent/l3/dvr.py neutron/agent/l3/dvr_edge_ha_router.py neutron/agent/l3/dvr_edge_router.py neutron/agent/l3/dvr_fip_ns.py neutron/agent/l3/dvr_local_router.py neutron/agent/l3/dvr_router_base.py neutron/agent/l3/dvr_snat_ns.py neutron/agent/l3/fip_rule_priority_allocator.py neutron/agent/l3/ha.py neutron/agent/l3/ha_router.py neutron/agent/l3/item_allocator.py neutron/agent/l3/keepalived_state_change.py neutron/agent/l3/l3_agent_extension_api.py neutron/agent/l3/l3_agent_extensions_manager.py neutron/agent/l3/legacy_router.py neutron/agent/l3/link_local_allocator.py neutron/agent/l3/namespace_manager.py neutron/agent/l3/namespaces.py neutron/agent/l3/router_info.py neutron/agent/l3/extensions/__init__.py neutron/agent/l3/extensions/conntrack_helper.py neutron/agent/l3/extensions/port_forwarding.py neutron/agent/l3/extensions/snat_log.py neutron/agent/l3/extensions/qos/__init__.py neutron/agent/l3/extensions/qos/base.py neutron/agent/l3/extensions/qos/fip.py neutron/agent/l3/extensions/qos/gateway_ip.py neutron/agent/linux/__init__.py neutron/agent/linux/bridge_lib.py neutron/agent/linux/daemon.py neutron/agent/linux/dhcp.py neutron/agent/linux/dibbler.py neutron/agent/linux/external_process.py neutron/agent/linux/interface.py neutron/agent/linux/ip_conntrack.py neutron/agent/linux/ip_lib.py neutron/agent/linux/ip_link_support.py neutron/agent/linux/ipset_manager.py neutron/agent/linux/iptables_comments.py neutron/agent/linux/iptables_firewall.py neutron/agent/linux/iptables_manager.py neutron/agent/linux/keepalived.py neutron/agent/linux/l3_tc_lib.py neutron/agent/linux/of_monitor.py neutron/agent/linux/pd.py neutron/agent/linux/pd_driver.py neutron/agent/linux/ra.py neutron/agent/linux/tc_lib.py neutron/agent/linux/utils.py neutron/agent/linux/xenapi_root_helper.py neutron/agent/linux/openvswitch_firewall/__init__.py neutron/agent/linux/openvswitch_firewall/constants.py neutron/agent/linux/openvswitch_firewall/exceptions.py neutron/agent/linux/openvswitch_firewall/firewall.py neutron/agent/linux/openvswitch_firewall/iptables.py neutron/agent/linux/openvswitch_firewall/rules.py neutron/agent/metadata/__init__.py neutron/agent/metadata/agent.py neutron/agent/metadata/driver.py neutron/agent/ovn/__init__.py neutron/agent/ovn/metadata_agent.py neutron/agent/ovn/metadata/__init__.py neutron/agent/ovn/metadata/agent.py neutron/agent/ovn/metadata/driver.py neutron/agent/ovn/metadata/ovsdb.py neutron/agent/ovn/metadata/server.py neutron/agent/ovsdb/__init__.py neutron/agent/ovsdb/api.py neutron/agent/ovsdb/impl_idl.py neutron/agent/ovsdb/native/__init__.py neutron/agent/ovsdb/native/commands.py neutron/agent/ovsdb/native/connection.py neutron/agent/ovsdb/native/exceptions.py neutron/agent/ovsdb/native/helpers.py neutron/agent/ovsdb/native/vlog.py neutron/agent/windows/__init__.py neutron/agent/windows/ip_lib.py neutron/agent/windows/utils.py neutron/api/__init__.py neutron/api/api_common.py neutron/api/extensions.py neutron/api/rpc/__init__.py neutron/api/rpc/agentnotifiers/__init__.py neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py neutron/api/rpc/agentnotifiers/utils.py neutron/api/rpc/callbacks/__init__.py neutron/api/rpc/callbacks/events.py neutron/api/rpc/callbacks/exceptions.py neutron/api/rpc/callbacks/resource_manager.py neutron/api/rpc/callbacks/resources.py neutron/api/rpc/callbacks/version_manager.py neutron/api/rpc/callbacks/consumer/__init__.py neutron/api/rpc/callbacks/consumer/registry.py neutron/api/rpc/callbacks/producer/__init__.py neutron/api/rpc/callbacks/producer/registry.py neutron/api/rpc/handlers/__init__.py neutron/api/rpc/handlers/dhcp_rpc.py neutron/api/rpc/handlers/dvr_rpc.py neutron/api/rpc/handlers/l3_rpc.py neutron/api/rpc/handlers/metadata_rpc.py neutron/api/rpc/handlers/resources_rpc.py neutron/api/rpc/handlers/securitygroups_rpc.py neutron/api/v2/__init__.py neutron/api/v2/base.py neutron/api/v2/resource.py neutron/api/v2/resource_helper.py neutron/api/v2/router.py neutron/api/views/__init__.py neutron/api/views/versions.py neutron/cmd/__init__.py neutron/cmd/ipset_cleanup.py neutron/cmd/keepalived_state_change.py neutron/cmd/linuxbridge_cleanup.py neutron/cmd/netns_cleanup.py neutron/cmd/ovs_cleanup.py neutron/cmd/pd_notify.py neutron/cmd/runtime_checks.py neutron/cmd/sanity_check.py neutron/cmd/status.py neutron/cmd/eventlet/__init__.py neutron/cmd/eventlet/usage_audit.py neutron/cmd/eventlet/agents/__init__.py neutron/cmd/eventlet/agents/dhcp.py neutron/cmd/eventlet/agents/l3.py neutron/cmd/eventlet/agents/metadata.py neutron/cmd/eventlet/agents/ovn_metadata.py neutron/cmd/eventlet/plugins/__init__.py neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py neutron/cmd/eventlet/plugins/ovs_neutron_agent.py neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py neutron/cmd/eventlet/server/__init__.py neutron/cmd/eventlet/services/__init__.py neutron/cmd/eventlet/services/metering_agent.py neutron/cmd/ovn/__init__.py neutron/cmd/ovn/migration_mtu.py neutron/cmd/ovn/neutron_ovn_db_sync_util.py neutron/cmd/sanity/__init__.py neutron/cmd/sanity/checks.py neutron/cmd/upgrade_checks/__init__.py neutron/cmd/upgrade_checks/base.py neutron/cmd/upgrade_checks/checks.py neutron/common/__init__.py neutron/common/_constants.py neutron/common/_deprecate.py neutron/common/cache_utils.py neutron/common/config.py neutron/common/coordination.py neutron/common/eventlet_utils.py neutron/common/ipv6_utils.py neutron/common/profiler.py neutron/common/test_lib.py neutron/common/utils.py neutron/common/ovn/__init__.py neutron/common/ovn/acl.py neutron/common/ovn/constants.py neutron/common/ovn/exceptions.py neutron/common/ovn/extensions.py neutron/common/ovn/hash_ring_manager.py neutron/common/ovn/utils.py neutron/conf/__init__.py neutron/conf/common.py neutron/conf/profiling.py neutron/conf/quota.py neutron/conf/service.py neutron/conf/wsgi.py neutron/conf/agent/__init__.py neutron/conf/agent/agent_extensions_manager.py neutron/conf/agent/cmd.py neutron/conf/agent/common.py neutron/conf/agent/dhcp.py neutron/conf/agent/l2_ext_fdb_population.py neutron/conf/agent/linux.py neutron/conf/agent/ovs_conf.py neutron/conf/agent/ovsdb_api.py neutron/conf/agent/securitygroups_rpc.py neutron/conf/agent/windows.py neutron/conf/agent/xenapi_conf.py neutron/conf/agent/database/__init__.py neutron/conf/agent/database/agents_db.py neutron/conf/agent/database/agentschedulers_db.py neutron/conf/agent/l3/__init__.py neutron/conf/agent/l3/config.py neutron/conf/agent/l3/ha.py neutron/conf/agent/l3/keepalived.py neutron/conf/agent/metadata/__init__.py neutron/conf/agent/metadata/config.py neutron/conf/agent/ovn/__init__.py neutron/conf/agent/ovn/metadata/__init__.py neutron/conf/agent/ovn/metadata/config.py neutron/conf/db/__init__.py neutron/conf/db/dvr_mac_db.py neutron/conf/db/extraroute_db.py neutron/conf/db/l3_agentschedulers_db.py neutron/conf/db/l3_dvr_db.py neutron/conf/db/l3_gwmode_db.py neutron/conf/db/l3_hamode_db.py neutron/conf/db/migration_cli.py neutron/conf/extensions/__init__.py neutron/conf/extensions/allowedaddresspairs.py neutron/conf/extensions/conntrack_helper.py neutron/conf/plugins/__init__.py neutron/conf/plugins/ml2/__init__.py neutron/conf/plugins/ml2/config.py neutron/conf/plugins/ml2/drivers/__init__.py neutron/conf/plugins/ml2/drivers/agent.py neutron/conf/plugins/ml2/drivers/driver_type.py neutron/conf/plugins/ml2/drivers/linuxbridge.py neutron/conf/plugins/ml2/drivers/macvtap.py neutron/conf/plugins/ml2/drivers/ovs_conf.py neutron/conf/plugins/ml2/drivers/mech_sriov/__init__.py neutron/conf/plugins/ml2/drivers/mech_sriov/agent_common.py neutron/conf/plugins/ml2/drivers/mech_sriov/mech_sriov_conf.py neutron/conf/plugins/ml2/drivers/openvswitch/__init__.py neutron/conf/plugins/ml2/drivers/openvswitch/mech_ovs_conf.py neutron/conf/plugins/ml2/drivers/ovn/__init__.py neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py neutron/conf/policies/__init__.py neutron/conf/policies/address_scope.py neutron/conf/policies/agent.py neutron/conf/policies/auto_allocated_topology.py neutron/conf/policies/availability_zone.py neutron/conf/policies/base.py neutron/conf/policies/flavor.py neutron/conf/policies/floatingip.py neutron/conf/policies/floatingip_pools.py neutron/conf/policies/floatingip_port_forwarding.py neutron/conf/policies/l3_conntrack_helper.py neutron/conf/policies/logging.py neutron/conf/policies/metering.py neutron/conf/policies/network.py neutron/conf/policies/network_ip_availability.py neutron/conf/policies/network_segment_range.py neutron/conf/policies/port.py neutron/conf/policies/qos.py neutron/conf/policies/rbac.py neutron/conf/policies/router.py neutron/conf/policies/security_group.py neutron/conf/policies/segment.py neutron/conf/policies/service_type.py neutron/conf/policies/subnet.py neutron/conf/policies/subnetpool.py neutron/conf/policies/trunk.py neutron/conf/services/__init__.py neutron/conf/services/extdns_designate_driver.py neutron/conf/services/logging.py neutron/conf/services/metering_agent.py neutron/conf/services/provider_configuration.py neutron/core_extensions/__init__.py neutron/core_extensions/base.py neutron/core_extensions/qos.py neutron/db/__init__.py neutron/db/_utils.py neutron/db/address_scope_db.py neutron/db/agents_db.py neutron/db/agentschedulers_db.py neutron/db/allowedaddresspairs_db.py neutron/db/data_plane_status_db.py neutron/db/db_base_plugin_common.py neutron/db/db_base_plugin_v2.py neutron/db/dns_db.py neutron/db/dvr_mac_db.py neutron/db/external_net_db.py neutron/db/extradhcpopt_db.py neutron/db/extraroute_db.py neutron/db/flavors_db.py neutron/db/ipam_backend_mixin.py neutron/db/ipam_pluggable_backend.py neutron/db/l3_agentschedulers_db.py neutron/db/l3_attrs_db.py neutron/db/l3_db.py neutron/db/l3_dvr_db.py neutron/db/l3_dvr_ha_scheduler_db.py neutron/db/l3_dvrscheduler_db.py neutron/db/l3_fip_pools_db.py neutron/db/l3_fip_port_details.py neutron/db/l3_fip_qos.py neutron/db/l3_gateway_ip_qos.py neutron/db/l3_gwmode_db.py neutron/db/l3_hamode_db.py neutron/db/l3_hascheduler_db.py neutron/db/models_v2.py neutron/db/network_ip_availability_db.py neutron/db/ovn_hash_ring_db.py neutron/db/ovn_revision_numbers_db.py neutron/db/portbindings_base.py neutron/db/portbindings_db.py neutron/db/portsecurity_db.py neutron/db/portsecurity_db_common.py neutron/db/provisioning_blocks.py neutron/db/rbac_db_mixin.py neutron/db/rbac_db_models.py neutron/db/securitygroups_db.py neutron/db/securitygroups_rpc_base.py neutron/db/segments_db.py neutron/db/servicetype_db.py neutron/db/standard_attr.py neutron/db/standardattrdescription_db.py neutron/db/subnet_service_type_mixin.py neutron/db/uplink_status_propagation_db.py neutron/db/vlantransparent_db.py neutron/db/allowed_address_pairs/__init__.py neutron/db/availability_zone/__init__.py neutron/db/availability_zone/network.py neutron/db/availability_zone/router.py neutron/db/extra_dhcp_opt/__init__.py neutron/db/extra_dhcp_opt/models.py neutron/db/metering/__init__.py neutron/db/metering/metering_db.py neutron/db/metering/metering_rpc.py neutron/db/migration/README neutron/db/migration/__init__.py neutron/db/migration/alembic.ini neutron/db/migration/autogen.py neutron/db/migration/cli.py neutron/db/migration/connection.py neutron/db/migration/alembic_migrations/__init__.py neutron/db/migration/alembic_migrations/agent_init_ops.py neutron/db/migration/alembic_migrations/brocade_init_ops.py neutron/db/migration/alembic_migrations/cisco_init_ops.py neutron/db/migration/alembic_migrations/core_init_ops.py neutron/db/migration/alembic_migrations/dvr_init_opts.py neutron/db/migration/alembic_migrations/env.py neutron/db/migration/alembic_migrations/external.py neutron/db/migration/alembic_migrations/firewall_init_ops.py neutron/db/migration/alembic_migrations/l3_init_ops.py neutron/db/migration/alembic_migrations/lb_init_ops.py neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py neutron/db/migration/alembic_migrations/metering_init_ops.py neutron/db/migration/alembic_migrations/ml2_init_ops.py neutron/db/migration/alembic_migrations/nec_init_ops.py neutron/db/migration/alembic_migrations/nsxv_initial_opts.py neutron/db/migration/alembic_migrations/nuage_init_opts.py neutron/db/migration/alembic_migrations/other_extensions_init_ops.py neutron/db/migration/alembic_migrations/other_plugins_init_ops.py neutron/db/migration/alembic_migrations/ovs_init_ops.py neutron/db/migration/alembic_migrations/portsec_init_ops.py neutron/db/migration/alembic_migrations/script.py.mako neutron/db/migration/alembic_migrations/secgroup_init_ops.py neutron/db/migration/alembic_migrations/vmware_init_ops.py neutron/db/migration/alembic_migrations/vpn_init_ops.py neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD neutron/db/migration/alembic_migrations/versions/README neutron/db/migration/alembic_migrations/versions/kilo_initial.py neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_sheduling_refactoring.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_desc.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_network_external.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec_plugin_tables.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_to_standard_attr.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_model_data.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_allocated_topology.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique_ha_router_agent_port_bindings.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_support.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resource_versions_row_to_agent_table.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attributes_to_support_external_dns_integration.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dragent_model_data.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py neutron/db/migration/alembic_migrations/versions/newton/contract/2e0d7a8a1586_add_binding_index_to_routerl3agentbinding.py neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py neutron/db/migration/alembic_migrations/versions/newton/contract/4bcd4df1f426_rename_ml2_dvr_port_bindings.py neutron/db/migration/alembic_migrations/versions/newton/contract/5c85685d616d_remove_availability_ranges.py neutron/db/migration/alembic_migrations/versions/newton/contract/7bbb25278f53_device_owner_ha_replicate_int.py neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py neutron/db/migration/alembic_migrations/versions/newton/contract/89ab9a816d70_rename_ml2_network_segments.py neutron/db/migration/alembic_migrations/versions/newton/contract/8fd3918ef6f4_add_segment_host_mapping.py neutron/db/migration/alembic_migrations/versions/newton/contract/97c25b0d2353_add_name_desc.py neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py neutron/db/migration/alembic_migrations/versions/newton/contract/b12a3ef66e62_add_standardattr_to_qos_policies.py neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py neutron/db/migration/alembic_migrations/versions/newton/contract/c879c5e1ee90_add_segment_id_to_subnet.py neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py neutron/db/migration/alembic_migrations/versions/newton/expand/0f5bef0f87d4_add_qos_minimum_bandwidth_rules.py neutron/db/migration/alembic_migrations/versions/newton/expand/30107ab6a3ee_provisioning_blocks.py neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py neutron/db/migration/alembic_migrations/versions/newton/expand/45f8dd33480b_qos_dscp_db_addition.py neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py neutron/db/migration/alembic_migrations/versions/newton/expand/5cd92597d11d_add_ip_allocation_to_port.py neutron/db/migration/alembic_migrations/versions/newton/expand/67daae611b6e_add_standard_attr_to_qos_policies.py neutron/db/migration/alembic_migrations/versions/newton/expand/6b461a21bcfc_uniq_floatingips0floating_network_.py neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py neutron/db/migration/alembic_migrations/versions/newton/expand/c415aab1c048_add_revisions_column.py neutron/db/migration/alembic_migrations/versions/newton/expand/d3435b514502_add_device_id_index_to_port.py neutron/db/migration/alembic_migrations/versions/ocata/expand/929c968efe70_add_pk_version_table.py neutron/db/migration/alembic_migrations/versions/ocata/expand/a9c43481023c_extend_ml2_port_bindings.py neutron/db/migration/alembic_migrations/versions/pike/expand/2b42d90729da_qos_add_direction_to_bw_limit_rule_table.py neutron/db/migration/alembic_migrations/versions/pike/expand/349b6fd605a6_add_dns_domain_to_portdnses.py neutron/db/migration/alembic_migrations/versions/pike/expand/62c781cb6192_add_qos_policies_default_table.py neutron/db/migration/alembic_migrations/versions/pike/expand/7d32f979895f_add_mtu_for_networks.py neutron/db/migration/alembic_migrations/versions/pike/expand/804a3c76314c_add_data_plane_status_to_port.py neutron/db/migration/alembic_migrations/versions/pike/expand/c8c222d42aa9_logging_api.py neutron/db/migration/alembic_migrations/versions/queens/expand/594422d373ee_fip_qos.py neutron/db/migration/alembic_migrations/versions/rocky/expand/61663558142c_add_ha_router_state.py neutron/db/migration/alembic_migrations/versions/rocky/expand/867d39095bf4_port_forwarding.py neutron/db/migration/alembic_migrations/versions/stein/expand/0ff9e3881597_network_segment_range.py neutron/db/migration/alembic_migrations/versions/stein/expand/195176fb410d_router_gateway_ip_qos.py neutron/db/migration/alembic_migrations/versions/stein/expand/9bfad3f1e780_support_shared_security_groups.py neutron/db/migration/alembic_migrations/versions/stein/expand/cada2437bf41_add_propagate_uplink_status_to_port.py neutron/db/migration/alembic_migrations/versions/stein/expand/d72db3e25539_modify_uniq_port_forwarding.py neutron/db/migration/alembic_migrations/versions/stein/expand/fb0167bd9639_agent_resources_synced.py neutron/db/migration/alembic_migrations/versions/train/expand/63fd95af7dcd_conntrack_helper.py neutron/db/migration/alembic_migrations/versions/train/expand/c613d0b82681_subnet_force_network_id.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/18a7e90ae768_add_security_group_stateful.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/2217c4222de6_add_dvr_fip_gateway_port_network_table.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/263d454a9655_add_dns_publish_fixed_ip_to_subnets.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/86274d77933e_change_mtu_to_not_null.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/Ibac91d24da2_port_forwarding_description.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/a010322604bc_network_subnet_update_lock.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/c3e9d13c4367_add_binding_index_to_.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/e4e236b0e1ff_add_rbac_support_for_address_scope.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/e88badaa9591_add_rbac_support_for_subnetpool.py neutron/db/migration/alembic_migrations/versions/ussuri/expand/f4b9654dd40c_ovn_backend.py neutron/db/migration/models/__init__.py neutron/db/migration/models/head.py neutron/db/models/README neutron/db/models/__init__.py neutron/db/models/address_scope.py neutron/db/models/agent.py neutron/db/models/allowed_address_pair.py neutron/db/models/conntrack_helper.py neutron/db/models/data_plane_status.py neutron/db/models/dns.py neutron/db/models/dvr.py neutron/db/models/external_net.py neutron/db/models/flavor.py neutron/db/models/l3.py neutron/db/models/l3_attrs.py neutron/db/models/l3agent.py neutron/db/models/l3ha.py neutron/db/models/loggingapi.py neutron/db/models/metering.py neutron/db/models/network_segment_range.py neutron/db/models/ovn.py neutron/db/models/port_forwarding.py neutron/db/models/portbinding.py neutron/db/models/provisioning_block.py neutron/db/models/securitygroup.py neutron/db/models/segment.py neutron/db/models/servicetype.py neutron/db/models/subnet_service_type.py neutron/db/models/tag.py neutron/db/models/uplink_status_propagation.py neutron/db/models/plugins/__init__.py neutron/db/models/plugins/ml2/__init__.py neutron/db/models/plugins/ml2/flatallocation.py neutron/db/models/plugins/ml2/geneveallocation.py neutron/db/models/plugins/ml2/gre_allocation_endpoints.py neutron/db/models/plugins/ml2/vlanallocation.py neutron/db/models/plugins/ml2/vxlanallocation.py neutron/db/network_dhcp_agent_binding/__init__.py neutron/db/network_dhcp_agent_binding/models.py neutron/db/port_security/__init__.py neutron/db/port_security/models.py neutron/db/qos/__init__.py neutron/db/qos/models.py neutron/db/quota/__init__.py neutron/db/quota/api.py neutron/db/quota/driver.py neutron/db/quota/models.py neutron/debug/README neutron/debug/__init__.py neutron/debug/commands.py neutron/debug/debug_agent.py neutron/debug/shell.py neutron/extensions/__init__.py neutron/extensions/_admin_state_down_before_update_lib.py neutron/extensions/_availability_zone_filter_lib.py neutron/extensions/_filter_validation_lib.py neutron/extensions/_standard_attr_segment_lib.py neutron/extensions/address_scope.py neutron/extensions/admin_state_down_before_update.py neutron/extensions/agent.py neutron/extensions/agent_resources_synced.py neutron/extensions/allowedaddresspairs.py neutron/extensions/auto_allocated_topology.py neutron/extensions/availability_zone.py neutron/extensions/availability_zone_filter.py neutron/extensions/data_plane_status.py neutron/extensions/default_subnetpools.py neutron/extensions/dhcpagentscheduler.py neutron/extensions/dns.py neutron/extensions/dns_domain_ports.py neutron/extensions/dvr.py neutron/extensions/empty_string_filtering.py neutron/extensions/expose_l3_conntrack_helper.py neutron/extensions/expose_port_forwarding_in_fip.py neutron/extensions/external_net.py neutron/extensions/extra_dhcp_opt.py neutron/extensions/extraroute.py neutron/extensions/extraroute_atomic.py neutron/extensions/filter_validation.py neutron/extensions/fip_pf_description.py neutron/extensions/fip_port_details.py neutron/extensions/flavors.py neutron/extensions/floating_ip_port_forwarding.py neutron/extensions/floatingip_pools.py neutron/extensions/ip_allocation.py neutron/extensions/ip_substring_port_filtering.py neutron/extensions/l2_adjacency.py neutron/extensions/l3.py neutron/extensions/l3_conntrack_helper.py neutron/extensions/l3_ext_gw_mode.py neutron/extensions/l3_ext_ha_mode.py neutron/extensions/l3_flavors.py neutron/extensions/l3_port_ip_change_not_allowed.py neutron/extensions/l3agentscheduler.py neutron/extensions/logging.py neutron/extensions/metering.py neutron/extensions/multiprovidernet.py neutron/extensions/netmtu.py neutron/extensions/netmtu_writable.py neutron/extensions/network_availability_zone.py neutron/extensions/network_ip_availability.py neutron/extensions/network_segment_range.py neutron/extensions/pagination.py neutron/extensions/port_mac_address_regenerate.py neutron/extensions/port_resource_request.py neutron/extensions/portbindings.py neutron/extensions/portbindings_extended.py neutron/extensions/portsecurity.py neutron/extensions/project_id.py neutron/extensions/providernet.py neutron/extensions/qos.py neutron/extensions/qos_bw_limit_direction.py neutron/extensions/qos_bw_minimum_ingress.py neutron/extensions/qos_default.py neutron/extensions/qos_fip.py neutron/extensions/qos_gateway_ip.py neutron/extensions/qos_port_network_policy.py neutron/extensions/qos_rule_type_details.py neutron/extensions/qos_rules_alias.py neutron/extensions/quotasv2.py neutron/extensions/quotasv2_detail.py neutron/extensions/rbac.py neutron/extensions/rbac_address_scope.py neutron/extensions/rbac_security_groups.py neutron/extensions/rbac_subnetpool.py neutron/extensions/revisionifmatch.py neutron/extensions/revisions.py neutron/extensions/router_availability_zone.py neutron/extensions/routerservicetype.py neutron/extensions/security_groups_port_filtering.py neutron/extensions/securitygroup.py neutron/extensions/segment.py neutron/extensions/segments_peer_subnet_host_routes.py neutron/extensions/servicetype.py neutron/extensions/sorting.py neutron/extensions/standard_attr_segment.py neutron/extensions/standardattrdescription.py neutron/extensions/stateful_security_group.py neutron/extensions/stdattrs_common.py neutron/extensions/subnet_dns_publish_fixed_ip.py neutron/extensions/subnet_onboard.py neutron/extensions/subnet_segmentid_writable.py neutron/extensions/subnet_service_types.py neutron/extensions/subnetallocation.py neutron/extensions/subnetpool_prefix_ops.py neutron/extensions/tag_ports_during_bulk_creation.py neutron/extensions/tagging.py neutron/extensions/timestamp.py neutron/extensions/trunk.py neutron/extensions/trunk_details.py neutron/extensions/uplink_status_propagation.py neutron/extensions/vlantransparent.py neutron/hacking/__init__.py neutron/hacking/checks.py neutron/ipam/__init__.py neutron/ipam/driver.py neutron/ipam/exceptions.py neutron/ipam/requests.py neutron/ipam/subnet_alloc.py neutron/ipam/utils.py neutron/ipam/drivers/__init__.py neutron/ipam/drivers/neutrondb_ipam/__init__.py neutron/ipam/drivers/neutrondb_ipam/db_api.py neutron/ipam/drivers/neutrondb_ipam/db_models.py neutron/ipam/drivers/neutrondb_ipam/driver.py neutron/locale/de/LC_MESSAGES/neutron.po neutron/locale/es/LC_MESSAGES/neutron.po neutron/locale/fr/LC_MESSAGES/neutron.po neutron/locale/it/LC_MESSAGES/neutron.po neutron/locale/ja/LC_MESSAGES/neutron.po neutron/locale/ko_KR/LC_MESSAGES/neutron.po neutron/locale/pt_BR/LC_MESSAGES/neutron.po neutron/locale/ru/LC_MESSAGES/neutron.po neutron/locale/zh_CN/LC_MESSAGES/neutron.po neutron/locale/zh_TW/LC_MESSAGES/neutron.po neutron/notifiers/__init__.py neutron/notifiers/batch_notifier.py neutron/notifiers/ironic.py neutron/notifiers/nova.py neutron/objects/README.rst neutron/objects/__init__.py neutron/objects/address_scope.py neutron/objects/agent.py neutron/objects/auto_allocate.py neutron/objects/base.py neutron/objects/conntrack_helper.py neutron/objects/flavor.py neutron/objects/floatingip.py neutron/objects/ipam.py neutron/objects/l3_hamode.py neutron/objects/l3agent.py neutron/objects/metering.py neutron/objects/network.py neutron/objects/network_segment_range.py neutron/objects/port_forwarding.py neutron/objects/ports.py neutron/objects/provisioning_blocks.py neutron/objects/quota.py neutron/objects/rbac.py neutron/objects/rbac_db.py neutron/objects/router.py neutron/objects/securitygroup.py neutron/objects/servicetype.py neutron/objects/stdattrs.py neutron/objects/subnet.py neutron/objects/subnetpool.py neutron/objects/tag.py neutron/objects/trunk.py neutron/objects/db/__init__.py neutron/objects/db/api.py neutron/objects/extensions/__init__.py neutron/objects/extensions/port_security.py neutron/objects/logapi/__init__.py neutron/objects/logapi/logging_resource.py neutron/objects/plugins/__init__.py neutron/objects/plugins/ml2/__init__.py neutron/objects/plugins/ml2/base.py neutron/objects/plugins/ml2/flatallocation.py neutron/objects/plugins/ml2/geneveallocation.py neutron/objects/plugins/ml2/greallocation.py neutron/objects/plugins/ml2/vlanallocation.py neutron/objects/plugins/ml2/vxlanallocation.py neutron/objects/port/__init__.py neutron/objects/port/extensions/__init__.py neutron/objects/port/extensions/allowedaddresspairs.py neutron/objects/port/extensions/data_plane_status.py neutron/objects/port/extensions/extra_dhcp_opt.py neutron/objects/port/extensions/port_security.py neutron/objects/port/extensions/uplink_status_propagation.py neutron/objects/qos/__init__.py neutron/objects/qos/binding.py neutron/objects/qos/policy.py neutron/objects/qos/qos_policy_validator.py neutron/objects/qos/rule.py neutron/objects/qos/rule_type.py neutron/pecan_wsgi/__init__.py neutron/pecan_wsgi/app.py neutron/pecan_wsgi/constants.py neutron/pecan_wsgi/startup.py neutron/pecan_wsgi/controllers/__init__.py neutron/pecan_wsgi/controllers/extensions.py neutron/pecan_wsgi/controllers/quota.py neutron/pecan_wsgi/controllers/resource.py neutron/pecan_wsgi/controllers/root.py neutron/pecan_wsgi/controllers/utils.py neutron/pecan_wsgi/hooks/__init__.py neutron/pecan_wsgi/hooks/body_validation.py neutron/pecan_wsgi/hooks/context.py neutron/pecan_wsgi/hooks/notifier.py neutron/pecan_wsgi/hooks/ownership_validation.py neutron/pecan_wsgi/hooks/policy_enforcement.py neutron/pecan_wsgi/hooks/query_parameters.py neutron/pecan_wsgi/hooks/quota_enforcement.py neutron/pecan_wsgi/hooks/translation.py neutron/pecan_wsgi/hooks/userfilters.py neutron/pecan_wsgi/hooks/utils.py neutron/plugins/__init__.py neutron/plugins/common/__init__.py neutron/plugins/common/constants.py neutron/plugins/ml2/README neutron/plugins/ml2/__init__.py neutron/plugins/ml2/db.py neutron/plugins/ml2/driver_context.py neutron/plugins/ml2/managers.py neutron/plugins/ml2/models.py neutron/plugins/ml2/ovo_rpc.py neutron/plugins/ml2/plugin.py neutron/plugins/ml2/rpc.py neutron/plugins/ml2/common/__init__.py neutron/plugins/ml2/common/exceptions.py neutron/plugins/ml2/drivers/__init__.py neutron/plugins/ml2/drivers/helpers.py neutron/plugins/ml2/drivers/mech_agent.py neutron/plugins/ml2/drivers/type_flat.py neutron/plugins/ml2/drivers/type_geneve.py neutron/plugins/ml2/drivers/type_gre.py neutron/plugins/ml2/drivers/type_local.py neutron/plugins/ml2/drivers/type_tunnel.py neutron/plugins/ml2/drivers/type_vlan.py neutron/plugins/ml2/drivers/type_vxlan.py neutron/plugins/ml2/drivers/agent/__init__.py neutron/plugins/ml2/drivers/agent/_agent_manager_base.py neutron/plugins/ml2/drivers/agent/_common_agent.py neutron/plugins/ml2/drivers/agent/capabilities.py neutron/plugins/ml2/drivers/agent/config.py neutron/plugins/ml2/drivers/l2pop/README neutron/plugins/ml2/drivers/l2pop/__init__.py neutron/plugins/ml2/drivers/l2pop/db.py neutron/plugins/ml2/drivers/l2pop/mech_driver.py neutron/plugins/ml2/drivers/l2pop/rpc.py neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py neutron/plugins/ml2/drivers/linuxbridge/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_capabilities.py neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py neutron/plugins/ml2/drivers/linuxbridge/agent/common/utils.py neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py neutron/plugins/ml2/drivers/macvtap/__init__.py neutron/plugins/ml2/drivers/macvtap/macvtap_common.py neutron/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py neutron/plugins/ml2/drivers/macvtap/mech_driver/__init__.py neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py neutron/plugins/ml2/drivers/mech_sriov/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py neutron/plugins/ml2/drivers/openvswitch/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/main.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py neutron/plugins/ml2/drivers/ovn/__init__.py neutron/plugins/ml2/drivers/ovn/mech_driver/__init__.py neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/api.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py neutron/plugins/ml2/extensions/__init__.py neutron/plugins/ml2/extensions/data_plane_status.py neutron/plugins/ml2/extensions/dns_integration.py neutron/plugins/ml2/extensions/port_security.py neutron/plugins/ml2/extensions/qos.py neutron/plugins/ml2/extensions/subnet_dns_publish_fixed_ip.py neutron/plugins/ml2/extensions/tag_ports_during_bulk_creation.py neutron/plugins/ml2/extensions/uplink_status_propagation.py neutron/privileged/__init__.py neutron/privileged/agent/__init__.py neutron/privileged/agent/linux/__init__.py neutron/privileged/agent/linux/ip_lib.py neutron/privileged/agent/linux/netlink_constants.py neutron/privileged/agent/linux/netlink_lib.py neutron/privileged/agent/linux/tc_lib.py neutron/profiling/__init__.py neutron/profiling/profiled_decorator.py neutron/quota/__init__.py neutron/quota/resource.py neutron/quota/resource_registry.py neutron/scheduler/__init__.py neutron/scheduler/base_resource_filter.py neutron/scheduler/base_scheduler.py neutron/scheduler/dhcp_agent_scheduler.py neutron/scheduler/l3_agent_scheduler.py neutron/scheduler/l3_ovn_scheduler.py neutron/server/__init__.py neutron/server/rpc_eventlet.py neutron/server/wsgi_eventlet.py neutron/services/__init__.py neutron/services/provider_configuration.py neutron/services/service_base.py neutron/services/auto_allocate/__init__.py neutron/services/auto_allocate/db.py neutron/services/auto_allocate/exceptions.py neutron/services/auto_allocate/models.py neutron/services/auto_allocate/plugin.py neutron/services/conntrack_helper/__init__.py neutron/services/conntrack_helper/plugin.py neutron/services/conntrack_helper/common/__init__.py neutron/services/conntrack_helper/common/exceptions.py neutron/services/externaldns/__init__.py neutron/services/externaldns/driver.py neutron/services/externaldns/drivers/__init__.py neutron/services/externaldns/drivers/designate/__init__.py neutron/services/externaldns/drivers/designate/driver.py neutron/services/flavors/__init__.py neutron/services/flavors/flavors_plugin.py neutron/services/l3_router/README neutron/services/l3_router/__init__.py neutron/services/l3_router/l3_router_plugin.py neutron/services/l3_router/service_providers/__init__.py neutron/services/l3_router/service_providers/base.py neutron/services/l3_router/service_providers/driver_controller.py neutron/services/l3_router/service_providers/dvr.py neutron/services/l3_router/service_providers/dvrha.py neutron/services/l3_router/service_providers/ha.py neutron/services/l3_router/service_providers/single_node.py neutron/services/logapi/__init__.py neutron/services/logapi/logging_plugin.py neutron/services/logapi/agent/__init__.py neutron/services/logapi/agent/log_extension.py neutron/services/logapi/agent/l3/__init__.py neutron/services/logapi/agent/l3/base.py neutron/services/logapi/common/__init__.py neutron/services/logapi/common/db_api.py neutron/services/logapi/common/exceptions.py neutron/services/logapi/common/sg_callback.py neutron/services/logapi/common/sg_validate.py neutron/services/logapi/common/snat_validate.py neutron/services/logapi/common/validators.py neutron/services/logapi/drivers/__init__.py neutron/services/logapi/drivers/base.py neutron/services/logapi/drivers/manager.py neutron/services/logapi/drivers/openvswitch/__init__.py neutron/services/logapi/drivers/openvswitch/driver.py neutron/services/logapi/drivers/openvswitch/log_oskenapp.py neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py neutron/services/logapi/rpc/__init__.py neutron/services/logapi/rpc/agent.py neutron/services/logapi/rpc/server.py neutron/services/loki/__init__.py neutron/services/loki/loki_plugin.py neutron/services/metering/__init__.py neutron/services/metering/metering_plugin.py neutron/services/metering/agents/__init__.py neutron/services/metering/agents/metering_agent.py neutron/services/metering/drivers/__init__.py neutron/services/metering/drivers/abstract_driver.py neutron/services/metering/drivers/utils.py neutron/services/metering/drivers/iptables/__init__.py neutron/services/metering/drivers/iptables/iptables_driver.py neutron/services/metering/drivers/noop/__init__.py neutron/services/metering/drivers/noop/noop_driver.py neutron/services/network_ip_availability/__init__.py neutron/services/network_ip_availability/plugin.py neutron/services/network_segment_range/__init__.py neutron/services/network_segment_range/plugin.py neutron/services/ovn_l3/__init__.py neutron/services/ovn_l3/plugin.py neutron/services/placement_report/__init__.py neutron/services/placement_report/plugin.py neutron/services/portforwarding/__init__.py neutron/services/portforwarding/pf_plugin.py neutron/services/portforwarding/common/__init__.py neutron/services/portforwarding/common/exceptions.py neutron/services/qos/__init__.py neutron/services/qos/qos_plugin.py neutron/services/qos/drivers/__init__.py neutron/services/qos/drivers/manager.py neutron/services/qos/drivers/linuxbridge/__init__.py neutron/services/qos/drivers/linuxbridge/driver.py neutron/services/qos/drivers/openvswitch/__init__.py neutron/services/qos/drivers/openvswitch/driver.py neutron/services/qos/drivers/ovn/__init__.py neutron/services/qos/drivers/ovn/driver.py neutron/services/qos/drivers/sriov/__init__.py neutron/services/qos/drivers/sriov/driver.py neutron/services/rbac/__init__.py neutron/services/revisions/__init__.py neutron/services/revisions/revision_plugin.py neutron/services/segments/__init__.py neutron/services/segments/db.py neutron/services/segments/exceptions.py neutron/services/segments/plugin.py neutron/services/tag/__init__.py neutron/services/tag/tag_plugin.py neutron/services/timestamp/__init__.py neutron/services/timestamp/timestamp_db.py neutron/services/timestamp/timestamp_plugin.py neutron/services/trunk/__init__.py neutron/services/trunk/callbacks.py neutron/services/trunk/exceptions.py neutron/services/trunk/models.py neutron/services/trunk/plugin.py neutron/services/trunk/rules.py neutron/services/trunk/utils.py neutron/services/trunk/drivers/__init__.py neutron/services/trunk/drivers/base.py neutron/services/trunk/drivers/linuxbridge/__init__.py neutron/services/trunk/drivers/linuxbridge/driver.py neutron/services/trunk/drivers/linuxbridge/agent/__init__.py neutron/services/trunk/drivers/linuxbridge/agent/driver.py neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py neutron/services/trunk/drivers/openvswitch/__init__.py neutron/services/trunk/drivers/openvswitch/constants.py neutron/services/trunk/drivers/openvswitch/driver.py neutron/services/trunk/drivers/openvswitch/utils.py neutron/services/trunk/drivers/openvswitch/agent/__init__.py neutron/services/trunk/drivers/openvswitch/agent/driver.py neutron/services/trunk/drivers/openvswitch/agent/exceptions.py neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py neutron/services/trunk/drivers/ovn/__init__.py neutron/services/trunk/drivers/ovn/trunk_driver.py neutron/services/trunk/rpc/__init__.py neutron/services/trunk/rpc/agent.py neutron/services/trunk/rpc/backend.py neutron/services/trunk/rpc/constants.py neutron/services/trunk/rpc/server.py neutron/services/trunk/seg_types/__init__.py neutron/services/trunk/seg_types/validators.py neutron/tests/__init__.py neutron/tests/base.py neutron/tests/post_mortem_debug.py neutron/tests/tools.py neutron/tests/common/__init__.py neutron/tests/common/base.py neutron/tests/common/config_fixtures.py neutron/tests/common/conn_testers.py neutron/tests/common/helpers.py neutron/tests/common/l3_test_common.py neutron/tests/common/machine_fixtures.py neutron/tests/common/net_helpers.py neutron/tests/common/agents/__init__.py neutron/tests/common/agents/l2_extensions.py neutron/tests/common/agents/l3_agent.py neutron/tests/common/agents/ovs_agent.py neutron/tests/common/exclusive_resources/__init__.py neutron/tests/common/exclusive_resources/ip_address.py neutron/tests/common/exclusive_resources/ip_network.py neutron/tests/common/exclusive_resources/port.py neutron/tests/common/exclusive_resources/resource_allocator.py neutron/tests/contrib/README neutron/tests/contrib/gate_hook.sh neutron/tests/contrib/testing.filters neutron/tests/contrib/hooks/api_all_extensions neutron/tests/contrib/hooks/availability_zone neutron/tests/contrib/hooks/disable_dvr neutron/tests/contrib/hooks/disable_dvr_tests neutron/tests/contrib/hooks/dns neutron/tests/contrib/hooks/dvr neutron/tests/contrib/hooks/log neutron/tests/contrib/hooks/network_segment_range neutron/tests/contrib/hooks/openvswitch_type_drivers neutron/tests/contrib/hooks/osprofiler neutron/tests/contrib/hooks/qos neutron/tests/contrib/hooks/quotas neutron/tests/contrib/hooks/segments neutron/tests/contrib/hooks/trunk neutron/tests/contrib/hooks/tunnel_types neutron/tests/contrib/hooks/ubuntu_image neutron/tests/contrib/hooks/uplink_status_propagation neutron/tests/contrib/hooks/vlan_provider neutron/tests/etc/api-paste.ini neutron/tests/etc/api-paste.ini.test neutron/tests/etc/neutron.conf neutron/tests/etc/neutron_test.conf neutron/tests/etc/neutron_test2.conf.example neutron/tests/fullstack/README neutron/tests/fullstack/__init__.py neutron/tests/fullstack/base.py neutron/tests/fullstack/test_agent_bandwidth_report.py neutron/tests/fullstack/test_connectivity.py neutron/tests/fullstack/test_dhcp_agent.py neutron/tests/fullstack/test_firewall.py neutron/tests/fullstack/test_l3_agent.py neutron/tests/fullstack/test_logging.py neutron/tests/fullstack/test_mtu.py neutron/tests/fullstack/test_port_shut_down.py neutron/tests/fullstack/test_ports_binding.py neutron/tests/fullstack/test_ports_rebind.py neutron/tests/fullstack/test_qos.py neutron/tests/fullstack/test_securitygroup.py neutron/tests/fullstack/test_segmentation_id.py neutron/tests/fullstack/test_subnet.py neutron/tests/fullstack/test_trunk.py neutron/tests/fullstack/agents/__init__.py neutron/tests/fullstack/agents/dhcp_agent.py neutron/tests/fullstack/agents/l3_agent.py neutron/tests/fullstack/agents/ovs_agent.py neutron/tests/fullstack/resources/__init__.py neutron/tests/fullstack/resources/client.py neutron/tests/fullstack/resources/config.py neutron/tests/fullstack/resources/environment.py neutron/tests/fullstack/resources/machine.py neutron/tests/fullstack/resources/process.py neutron/tests/fullstack/schedulers/__init__.py neutron/tests/fullstack/schedulers/dhcp.py neutron/tests/fullstack/servers/__init__.py neutron/tests/fullstack/servers/placement.py neutron/tests/functional/__init__.py neutron/tests/functional/base.py neutron/tests/functional/constants.py neutron/tests/functional/requirements.txt neutron/tests/functional/test_server.py neutron/tests/functional/test_service.py neutron/tests/functional/agent/__init__.py neutron/tests/functional/agent/test_dhcp_agent.py neutron/tests/functional/agent/test_firewall.py neutron/tests/functional/agent/test_l2_lb_agent.py neutron/tests/functional/agent/test_l2_ovs_agent.py neutron/tests/functional/agent/test_ovs_flows.py neutron/tests/functional/agent/test_ovs_lib.py neutron/tests/functional/agent/common/__init__.py neutron/tests/functional/agent/common/test_ovs_lib.py neutron/tests/functional/agent/l2/__init__.py neutron/tests/functional/agent/l2/base.py neutron/tests/functional/agent/l2/extensions/__init__.py neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py neutron/tests/functional/agent/l3/__init__.py neutron/tests/functional/agent/l3/framework.py neutron/tests/functional/agent/l3/test_dvr_router.py neutron/tests/functional/agent/l3/test_ha_router.py neutron/tests/functional/agent/l3/test_keepalived_state_change.py neutron/tests/functional/agent/l3/test_legacy_router.py neutron/tests/functional/agent/l3/test_metadata_proxy.py neutron/tests/functional/agent/l3/test_namespace_manager.py neutron/tests/functional/agent/l3/bin/__init__.py neutron/tests/functional/agent/l3/bin/cmd_keepalived_state_change.py neutron/tests/functional/agent/l3/extensions/__init__.py neutron/tests/functional/agent/l3/extensions/test_conntrack_helper_extension.py neutron/tests/functional/agent/l3/extensions/test_gateway_ip_qos_extension.py neutron/tests/functional/agent/l3/extensions/test_port_forwarding_extension.py neutron/tests/functional/agent/l3/extensions/qos/__init__.py neutron/tests/functional/agent/l3/extensions/qos/test_fip_qos_extension.py neutron/tests/functional/agent/linux/__init__.py neutron/tests/functional/agent/linux/base.py neutron/tests/functional/agent/linux/helpers.py neutron/tests/functional/agent/linux/simple_daemon.py neutron/tests/functional/agent/linux/test_async_process.py neutron/tests/functional/agent/linux/test_bridge_lib.py neutron/tests/functional/agent/linux/test_dhcp.py neutron/tests/functional/agent/linux/test_interface.py neutron/tests/functional/agent/linux/test_ip_lib.py neutron/tests/functional/agent/linux/test_ipset.py neutron/tests/functional/agent/linux/test_iptables.py neutron/tests/functional/agent/linux/test_keepalived.py neutron/tests/functional/agent/linux/test_l3_tc_lib.py neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py neutron/tests/functional/agent/linux/test_netlink_lib.py neutron/tests/functional/agent/linux/test_of_monitor.py neutron/tests/functional/agent/linux/test_ovsdb_monitor.py neutron/tests/functional/agent/linux/test_process_monitor.py neutron/tests/functional/agent/linux/test_tc_lib.py neutron/tests/functional/agent/linux/test_utils.py neutron/tests/functional/agent/linux/bin/__init__.py neutron/tests/functional/agent/linux/bin/ip_monitor.py neutron/tests/functional/agent/linux/bin/ipt_binname.py neutron/tests/functional/agent/linux/openvswitch_firewall/__init__.py neutron/tests/functional/agent/linux/openvswitch_firewall/test_firewall.py neutron/tests/functional/agent/linux/openvswitch_firewall/test_iptables.py neutron/tests/functional/agent/ovn/__init__.py neutron/tests/functional/agent/ovn/metadata/__init__.py neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py neutron/tests/functional/agent/ovsdb/__init__.py neutron/tests/functional/agent/ovsdb/native/__init__.py neutron/tests/functional/agent/ovsdb/native/test_connection.py neutron/tests/functional/agent/windows/__init__.py neutron/tests/functional/agent/windows/test_ip_lib.py neutron/tests/functional/api/__init__.py neutron/tests/functional/api/test_policies.py neutron/tests/functional/cmd/__init__.py neutron/tests/functional/cmd/process_spawn.py neutron/tests/functional/cmd/test_ipset_cleanup.py neutron/tests/functional/cmd/test_linuxbridge_cleanup.py neutron/tests/functional/cmd/test_netns_cleanup.py neutron/tests/functional/cmd/test_ovs_cleanup.py neutron/tests/functional/cmd/test_status.py neutron/tests/functional/common/__init__.py neutron/tests/functional/common/test_utils.py neutron/tests/functional/db/__init__.py neutron/tests/functional/db/test_ipam.py neutron/tests/functional/db/test_migrations.py neutron/tests/functional/db/test_models.py neutron/tests/functional/db/test_network.py neutron/tests/functional/db/test_ovn_revision_numbers_db.py neutron/tests/functional/db/migrations/__init__.py neutron/tests/functional/db/migrations/test_2e0d7a8a1586_add_binding_index_to_routerl3agentbinding.py neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py neutron/tests/functional/db/migrations/test_97c25b0d2353_add_name_desc.py neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py neutron/tests/functional/db/migrations/test_b12a3ef66e62_add_standardattr_to_qos_policies.py neutron/tests/functional/db/migrations/test_c3e9d13c4367_add_binding_index_to_.py neutron/tests/functional/pecan_wsgi/__init__.py neutron/tests/functional/pecan_wsgi/config.py neutron/tests/functional/pecan_wsgi/test_controllers.py neutron/tests/functional/pecan_wsgi/test_functional.py neutron/tests/functional/pecan_wsgi/test_hooks.py neutron/tests/functional/pecan_wsgi/utils.py neutron/tests/functional/plugins/__init__.py neutron/tests/functional/plugins/ml2/__init__.py neutron/tests/functional/plugins/ml2/test_plugin.py neutron/tests/functional/plugins/ml2/drivers/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/tests/functional/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py neutron/tests/functional/plugins/ml2/drivers/ovn/__init__.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/__init__.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_resources.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py neutron/tests/functional/privileged/__init__.py neutron/tests/functional/privileged/agent/__init__.py neutron/tests/functional/privileged/agent/linux/__init__.py neutron/tests/functional/privileged/agent/linux/test_ip_lib.py neutron/tests/functional/privileged/agent/linux/test_tc_lib.py neutron/tests/functional/resources/__init__.py neutron/tests/functional/resources/process.py neutron/tests/functional/resources/ovsdb/__init__.py neutron/tests/functional/resources/ovsdb/events.py neutron/tests/functional/resources/ovsdb/fixtures.py neutron/tests/functional/sanity/__init__.py neutron/tests/functional/sanity/test_sanity.py neutron/tests/functional/scheduler/__init__.py neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py neutron/tests/functional/scheduler/test_l3_agent_scheduler.py neutron/tests/functional/services/__init__.py neutron/tests/functional/services/conntrack_helper/__init__.py neutron/tests/functional/services/conntrack_helper/test_conntrack_helper.py neutron/tests/functional/services/l3_router/__init__.py neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py neutron/tests/functional/services/logapi/__init__.py neutron/tests/functional/services/logapi/test_logging.py neutron/tests/functional/services/ovn_l3/__init__.py neutron/tests/functional/services/ovn_l3/test_plugin.py neutron/tests/functional/services/portforwarding/__init__.py neutron/tests/functional/services/portforwarding/test_port_forwarding.py neutron/tests/functional/services/trunk/__init__.py neutron/tests/functional/services/trunk/test_plugin.py neutron/tests/functional/services/trunk/drivers/__init__.py neutron/tests/functional/services/trunk/drivers/openvswitch/__init__.py neutron/tests/functional/services/trunk/drivers/openvswitch/agent/__init__.py neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_ovsdb_handler.py neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py neutron/tests/functional/services/trunk/drivers/ovn/__init__.py neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py neutron/tests/functional/services/trunk/rpc/__init__.py neutron/tests/functional/services/trunk/rpc/test_server.py neutron/tests/functional/tests/__init__.py neutron/tests/functional/tests/common/__init__.py neutron/tests/functional/tests/common/test_net_helpers.py neutron/tests/functional/tests/common/exclusive_resources/__init__.py neutron/tests/functional/tests/common/exclusive_resources/test_ip_address.py neutron/tests/functional/tests/common/exclusive_resources/test_ip_network.py neutron/tests/functional/tests/common/exclusive_resources/test_port.py neutron/tests/functional/tests/common/exclusive_resources/test_resource_allocator.py neutron/tests/unit/__init__.py neutron/tests/unit/_test_extension_portbindings.py neutron/tests/unit/dummy_plugin.py neutron/tests/unit/extension_stubs.py neutron/tests/unit/fake_resources.py neutron/tests/unit/test_auth.py neutron/tests/unit/test_manager.py neutron/tests/unit/test_neutron_plugin_base_v2.py neutron/tests/unit/test_opts.py neutron/tests/unit/test_policy.py neutron/tests/unit/test_service.py neutron/tests/unit/test_worker.py neutron/tests/unit/test_wsgi.py neutron/tests/unit/testlib_api.py neutron/tests/unit/agent/__init__.py neutron/tests/unit/agent/test_agent_extensions_manager.py neutron/tests/unit/agent/test_resource_cache.py neutron/tests/unit/agent/test_rpc.py neutron/tests/unit/agent/test_securitygroups_rpc.py neutron/tests/unit/agent/common/__init__.py neutron/tests/unit/agent/common/test_async_process.py neutron/tests/unit/agent/common/test_ovs_lib.py neutron/tests/unit/agent/common/test_ovsdb_monitor.py neutron/tests/unit/agent/common/test_placement_report.py neutron/tests/unit/agent/common/test_polling.py neutron/tests/unit/agent/common/test_resource_processing_queue.py neutron/tests/unit/agent/common/test_utils.py neutron/tests/unit/agent/dhcp/__init__.py neutron/tests/unit/agent/dhcp/test_agent.py neutron/tests/unit/agent/l2/__init__.py neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py neutron/tests/unit/agent/l2/extensions/__init__.py neutron/tests/unit/agent/l2/extensions/test_fdb_population.py neutron/tests/unit/agent/l2/extensions/test_qos.py neutron/tests/unit/agent/l3/__init__.py neutron/tests/unit/agent/l3/test_agent.py neutron/tests/unit/agent/l3/test_dvr_fip_ns.py neutron/tests/unit/agent/l3/test_dvr_local_router.py neutron/tests/unit/agent/l3/test_dvr_snat_ns.py neutron/tests/unit/agent/l3/test_fip_rule_priority_allocator.py neutron/tests/unit/agent/l3/test_ha_router.py neutron/tests/unit/agent/l3/test_item_allocator.py neutron/tests/unit/agent/l3/test_l3_agent_extension_api.py neutron/tests/unit/agent/l3/test_legacy_router.py neutron/tests/unit/agent/l3/test_link_local_allocator.py neutron/tests/unit/agent/l3/test_namespace_manager.py neutron/tests/unit/agent/l3/test_router_info.py neutron/tests/unit/agent/l3/extensions/__init__.py neutron/tests/unit/agent/l3/extensions/test_conntrack_helper.py neutron/tests/unit/agent/l3/extensions/test_port_forwarding.py neutron/tests/unit/agent/l3/extensions/test_snat_log.py neutron/tests/unit/agent/l3/extensions/qos/__init__.py neutron/tests/unit/agent/l3/extensions/qos/test_base.py neutron/tests/unit/agent/l3/extensions/qos/test_fip.py neutron/tests/unit/agent/l3/extensions/qos/test_gateway_ip.py neutron/tests/unit/agent/linux/__init__.py neutron/tests/unit/agent/linux/failing_process.py neutron/tests/unit/agent/linux/test_bridge_lib.py neutron/tests/unit/agent/linux/test_daemon.py neutron/tests/unit/agent/linux/test_dhcp.py neutron/tests/unit/agent/linux/test_external_process.py neutron/tests/unit/agent/linux/test_interface.py neutron/tests/unit/agent/linux/test_ip_conntrack.py neutron/tests/unit/agent/linux/test_ip_lib.py neutron/tests/unit/agent/linux/test_ip_link_support.py neutron/tests/unit/agent/linux/test_ipset_manager.py neutron/tests/unit/agent/linux/test_iptables_firewall.py neutron/tests/unit/agent/linux/test_iptables_manager.py neutron/tests/unit/agent/linux/test_keepalived.py neutron/tests/unit/agent/linux/test_l3_tc_lib.py neutron/tests/unit/agent/linux/test_pd.py neutron/tests/unit/agent/linux/test_tc_lib.py neutron/tests/unit/agent/linux/test_utils.py neutron/tests/unit/agent/linux/test_xenapi_root_helper.py neutron/tests/unit/agent/linux/openvswitch_firewall/__init__.py neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py neutron/tests/unit/agent/linux/openvswitch_firewall/test_iptables.py neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py neutron/tests/unit/agent/metadata/__init__.py neutron/tests/unit/agent/metadata/test_agent.py neutron/tests/unit/agent/metadata/test_driver.py neutron/tests/unit/agent/ovn/__init__.py neutron/tests/unit/agent/ovn/metadata/__init__.py neutron/tests/unit/agent/ovn/metadata/test_agent.py neutron/tests/unit/agent/ovn/metadata/test_driver.py neutron/tests/unit/agent/ovn/metadata/test_server.py neutron/tests/unit/agent/ovsdb/__init__.py neutron/tests/unit/agent/ovsdb/test_impl_idl.py neutron/tests/unit/agent/ovsdb/native/__init__.py neutron/tests/unit/agent/ovsdb/native/test_connection.py neutron/tests/unit/agent/windows/__init__.py neutron/tests/unit/agent/windows/test_ip_lib.py neutron/tests/unit/agent/windows/test_utils.py neutron/tests/unit/api/__init__.py neutron/tests/unit/api/test_api_common.py neutron/tests/unit/api/test_extensions.py neutron/tests/unit/api/rpc/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py neutron/tests/unit/api/rpc/callbacks/__init__.py neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py neutron/tests/unit/api/rpc/callbacks/test_resources.py neutron/tests/unit/api/rpc/callbacks/test_version_manager.py neutron/tests/unit/api/rpc/callbacks/consumer/__init__.py neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py neutron/tests/unit/api/rpc/callbacks/producer/__init__.py neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py neutron/tests/unit/api/rpc/handlers/__init__.py neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py neutron/tests/unit/api/v2/__init__.py neutron/tests/unit/api/v2/test_base.py neutron/tests/unit/api/v2/test_resource.py neutron/tests/unit/api/v2/test_router.py neutron/tests/unit/cmd/__init__.py neutron/tests/unit/cmd/test_netns_cleanup.py neutron/tests/unit/cmd/test_ovs_cleanup.py neutron/tests/unit/cmd/test_sanity_check.py neutron/tests/unit/cmd/test_status.py neutron/tests/unit/cmd/upgrade_checks/__init__.py neutron/tests/unit/cmd/upgrade_checks/test_checks.py neutron/tests/unit/common/__init__.py neutron/tests/unit/common/moved_globals_code1.py neutron/tests/unit/common/moved_globals_code2.py neutron/tests/unit/common/moved_globals_target.py neutron/tests/unit/common/test__deprecate.py neutron/tests/unit/common/test_cache_utils.py neutron/tests/unit/common/test_coordination.py neutron/tests/unit/common/test_ipv6_utils.py neutron/tests/unit/common/test_utils.py neutron/tests/unit/common/ovn/__init__.py neutron/tests/unit/common/ovn/test_acl.py neutron/tests/unit/common/ovn/test_hash_ring_manager.py neutron/tests/unit/common/ovn/test_utils.py neutron/tests/unit/conf/agent/__init__.py neutron/tests/unit/conf/agent/test_common.py neutron/tests/unit/core_extensions/__init__.py neutron/tests/unit/core_extensions/test_qos.py neutron/tests/unit/db/__init__.py neutron/tests/unit/db/test__utils.py neutron/tests/unit/db/test_agents_db.py neutron/tests/unit/db/test_agentschedulers_db.py neutron/tests/unit/db/test_allowedaddresspairs_db.py neutron/tests/unit/db/test_db_base_plugin_common.py neutron/tests/unit/db/test_db_base_plugin_v2.py neutron/tests/unit/db/test_dvr_mac_db.py neutron/tests/unit/db/test_extraroute_db.py neutron/tests/unit/db/test_ipam_backend_mixin.py neutron/tests/unit/db/test_ipam_pluggable_backend.py neutron/tests/unit/db/test_l3_db.py neutron/tests/unit/db/test_l3_dvr_db.py neutron/tests/unit/db/test_l3_hamode_db.py neutron/tests/unit/db/test_migration.py neutron/tests/unit/db/test_ovn_hash_ring_db.py neutron/tests/unit/db/test_ovn_revision_numbers_db.py neutron/tests/unit/db/test_portsecurity_db.py neutron/tests/unit/db/test_portsecurity_db_common.py neutron/tests/unit/db/test_provisioning_blocks.py neutron/tests/unit/db/test_rbac_db_mixin.py neutron/tests/unit/db/test_securitygroups_db.py neutron/tests/unit/db/test_segments_db.py neutron/tests/unit/db/metering/__init__.py neutron/tests/unit/db/metering/test_metering_db.py neutron/tests/unit/db/quota/__init__.py neutron/tests/unit/db/quota/test_api.py neutron/tests/unit/db/quota/test_driver.py neutron/tests/unit/debug/__init__.py neutron/tests/unit/debug/test_commands.py neutron/tests/unit/extensions/__init__.py neutron/tests/unit/extensions/base.py neutron/tests/unit/extensions/extendedattribute.py neutron/tests/unit/extensions/extensionattribute.py neutron/tests/unit/extensions/foxinsocks.py neutron/tests/unit/extensions/test_address_scope.py neutron/tests/unit/extensions/test_agent.py neutron/tests/unit/extensions/test_availability_zone.py neutron/tests/unit/extensions/test_data_plane_status.py neutron/tests/unit/extensions/test_default_subnetpools.py neutron/tests/unit/extensions/test_dns.py neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py neutron/tests/unit/extensions/test_external_net.py neutron/tests/unit/extensions/test_extra_dhcp_opt.py neutron/tests/unit/extensions/test_extraroute.py neutron/tests/unit/extensions/test_fip_port_details.py neutron/tests/unit/extensions/test_flavors.py neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py neutron/tests/unit/extensions/test_floatingip_pools.py neutron/tests/unit/extensions/test_l3.py neutron/tests/unit/extensions/test_l3_ext_gw_mode.py neutron/tests/unit/extensions/test_network_ip_availability.py neutron/tests/unit/extensions/test_network_segment_range.py neutron/tests/unit/extensions/test_portsecurity.py neutron/tests/unit/extensions/test_providernet.py neutron/tests/unit/extensions/test_qos_fip.py neutron/tests/unit/extensions/test_qos_gateway_ip.py neutron/tests/unit/extensions/test_quotasv2.py neutron/tests/unit/extensions/test_quotasv2_detail.py neutron/tests/unit/extensions/test_router_availability_zone.py neutron/tests/unit/extensions/test_securitygroup.py neutron/tests/unit/extensions/test_segment.py neutron/tests/unit/extensions/test_servicetype.py neutron/tests/unit/extensions/test_subnet_dns_publish_fixed_ip.py neutron/tests/unit/extensions/test_subnet_onboard.py neutron/tests/unit/extensions/test_subnet_service_types.py neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py neutron/tests/unit/extensions/test_timestamp.py neutron/tests/unit/extensions/test_uplink_status_propagation.py neutron/tests/unit/extensions/test_vlantransparent.py neutron/tests/unit/extensions/v2attributes.py neutron/tests/unit/hacking/__init__.py neutron/tests/unit/hacking/test_checks.py neutron/tests/unit/ipam/__init__.py neutron/tests/unit/ipam/fake_driver.py neutron/tests/unit/ipam/test_requests.py neutron/tests/unit/ipam/test_subnet_alloc.py neutron/tests/unit/ipam/test_utils.py neutron/tests/unit/ipam/drivers/__init__.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py neutron/tests/unit/notifiers/__init__.py neutron/tests/unit/notifiers/test_batch_notifier.py neutron/tests/unit/notifiers/test_ironic.py neutron/tests/unit/notifiers/test_nova.py neutron/tests/unit/objects/__init__.py neutron/tests/unit/objects/test_address_scope.py neutron/tests/unit/objects/test_agent.py neutron/tests/unit/objects/test_auto_allocate.py neutron/tests/unit/objects/test_base.py neutron/tests/unit/objects/test_conntrack_helper.py neutron/tests/unit/objects/test_flavor.py neutron/tests/unit/objects/test_floatingip.py neutron/tests/unit/objects/test_ipam.py neutron/tests/unit/objects/test_l3_hamode.py neutron/tests/unit/objects/test_l3agent.py neutron/tests/unit/objects/test_metering.py neutron/tests/unit/objects/test_network.py neutron/tests/unit/objects/test_network_segment_range.py neutron/tests/unit/objects/test_objects.py neutron/tests/unit/objects/test_port_forwarding.py neutron/tests/unit/objects/test_ports.py neutron/tests/unit/objects/test_provisioning_blocks.py neutron/tests/unit/objects/test_quota.py neutron/tests/unit/objects/test_rbac.py neutron/tests/unit/objects/test_rbac_db.py neutron/tests/unit/objects/test_router.py neutron/tests/unit/objects/test_securitygroup.py neutron/tests/unit/objects/test_servicetype.py neutron/tests/unit/objects/test_subnet.py neutron/tests/unit/objects/test_subnetpool.py neutron/tests/unit/objects/test_tag.py neutron/tests/unit/objects/test_trunk.py neutron/tests/unit/objects/db/__init__.py neutron/tests/unit/objects/db/test_api.py neutron/tests/unit/objects/logapi/__init__.py neutron/tests/unit/objects/logapi/test_logging_resource.py neutron/tests/unit/objects/plugins/__init__.py neutron/tests/unit/objects/plugins/ml2/__init__.py neutron/tests/unit/objects/plugins/ml2/test_base.py neutron/tests/unit/objects/plugins/ml2/test_flatallocation.py neutron/tests/unit/objects/plugins/ml2/test_geneveallocation.py neutron/tests/unit/objects/plugins/ml2/test_greallocation.py neutron/tests/unit/objects/plugins/ml2/test_vlanallocation.py neutron/tests/unit/objects/plugins/ml2/test_vxlanallocation.py neutron/tests/unit/objects/port/__init__.py neutron/tests/unit/objects/port/extensions/__init__.py neutron/tests/unit/objects/port/extensions/test_allowedaddresspairs.py neutron/tests/unit/objects/port/extensions/test_data_plane_status.py neutron/tests/unit/objects/port/extensions/test_extra_dhcp_opt.py neutron/tests/unit/objects/port/extensions/test_port_security.py neutron/tests/unit/objects/port/extensions/test_uplink_status_propagation.py neutron/tests/unit/objects/qos/__init__.py neutron/tests/unit/objects/qos/test_binding.py neutron/tests/unit/objects/qos/test_policy.py neutron/tests/unit/objects/qos/test_rule.py neutron/tests/unit/objects/qos/test_rule_type.py neutron/tests/unit/pecan_wsgi/test_app.py neutron/tests/unit/plugins/__init__.py neutron/tests/unit/plugins/ml2/__init__.py neutron/tests/unit/plugins/ml2/_test_mech_agent.py neutron/tests/unit/plugins/ml2/base.py neutron/tests/unit/plugins/ml2/test_agent_scheduler.py neutron/tests/unit/plugins/ml2/test_db.py neutron/tests/unit/plugins/ml2/test_driver_context.py neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py neutron/tests/unit/plugins/ml2/test_extension_driver_api.py neutron/tests/unit/plugins/ml2/test_managers.py neutron/tests/unit/plugins/ml2/test_ovo_rpc.py neutron/tests/unit/plugins/ml2/test_plugin.py neutron/tests/unit/plugins/ml2/test_port_binding.py neutron/tests/unit/plugins/ml2/test_rpc.py neutron/tests/unit/plugins/ml2/test_security_group.py neutron/tests/unit/plugins/ml2/test_tracked_resources.py neutron/tests/unit/plugins/ml2/drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py neutron/tests/unit/plugins/ml2/drivers/ext_test.py neutron/tests/unit/plugins/ml2/drivers/mech_fake_agent.py neutron/tests/unit/plugins/ml2/drivers/mech_faulty_agent.py neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py neutron/tests/unit/plugins/ml2/drivers/test_helpers.py neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py neutron/tests/unit/plugins/ml2/drivers/test_type_local.py neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py neutron/tests/unit/plugins/ml2/drivers/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/agent/test__agent_manager_base.py neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_arp_protect.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_agent_extension_api.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py neutron/tests/unit/plugins/ml2/drivers/macvtap/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/test_macvtap_common.py neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/agent/test_macvtap_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/macvtap/mech_driver/test_mech_macvtap.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_int.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ofswitch.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_ovs_bridge.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py neutron/tests/unit/plugins/ml2/drivers/ovn/__init__.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/__init__.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/__init__.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-nb.ovsschema neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/schemas/ovn-sb.ovsschema neutron/tests/unit/plugins/ml2/extensions/__init__.py neutron/tests/unit/plugins/ml2/extensions/fake_extension.py neutron/tests/unit/plugins/ml2/extensions/test_data_plane_status.py neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py neutron/tests/unit/plugins/ml2/extensions/test_port_security.py neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py neutron/tests/unit/plugins/ml2/extensions/test_uplink_status_propagation.py neutron/tests/unit/privileged/__init__.py neutron/tests/unit/privileged/agent/__init__.py neutron/tests/unit/privileged/agent/linux/__init__.py neutron/tests/unit/privileged/agent/linux/test_ip_lib.py neutron/tests/unit/privileged/agent/linux/test_netlink_lib.py neutron/tests/unit/quota/__init__.py neutron/tests/unit/quota/test_resource.py neutron/tests/unit/quota/test_resource_registry.py neutron/tests/unit/scheduler/__init__.py neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py neutron/tests/unit/scheduler/test_l3_agent_scheduler.py neutron/tests/unit/scheduler/test_l3_ovn_scheduler.py neutron/tests/unit/services/__init__.py neutron/tests/unit/services/test_provider_configuration.py neutron/tests/unit/services/auto_allocate/__init__.py neutron/tests/unit/services/auto_allocate/test_db.py neutron/tests/unit/services/conntrack_helper/__init__.py neutron/tests/unit/services/conntrack_helper/test_plugin.py neutron/tests/unit/services/l3_router/__init__.py neutron/tests/unit/services/l3_router/test_l3_router_plugin.py neutron/tests/unit/services/l3_router/service_providers/__init__.py neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py neutron/tests/unit/services/logapi/__init__.py neutron/tests/unit/services/logapi/base.py neutron/tests/unit/services/logapi/test_logging_plugin.py neutron/tests/unit/services/logapi/agent/__init__.py neutron/tests/unit/services/logapi/agent/test_log_extension.py neutron/tests/unit/services/logapi/agent/l3/__init__.py neutron/tests/unit/services/logapi/agent/l3/test_base.py neutron/tests/unit/services/logapi/common/__init__.py neutron/tests/unit/services/logapi/common/test_db_api.py neutron/tests/unit/services/logapi/common/test_sg_callback.py neutron/tests/unit/services/logapi/common/test_sg_validate.py neutron/tests/unit/services/logapi/common/test_snat_validate.py neutron/tests/unit/services/logapi/common/test_validators.py neutron/tests/unit/services/logapi/drivers/__init__.py neutron/tests/unit/services/logapi/drivers/test_base.py neutron/tests/unit/services/logapi/drivers/test_manager.py neutron/tests/unit/services/logapi/drivers/openvswitch/__init__.py neutron/tests/unit/services/logapi/drivers/openvswitch/test_ovs_firewall_log.py neutron/tests/unit/services/logapi/rpc/__init__.py neutron/tests/unit/services/logapi/rpc/test_server.py neutron/tests/unit/services/metering/__init__.py neutron/tests/unit/services/metering/test_metering_plugin.py neutron/tests/unit/services/metering/agents/__init__.py neutron/tests/unit/services/metering/agents/test_metering_agent.py neutron/tests/unit/services/metering/drivers/__init__.py neutron/tests/unit/services/metering/drivers/test_iptables.py neutron/tests/unit/services/network_segment_range/__init__.py neutron/tests/unit/services/network_segment_range/test_plugin.py neutron/tests/unit/services/ovn_l3/__init__.py neutron/tests/unit/services/ovn_l3/test_plugin.py neutron/tests/unit/services/placement_report/__init__.py neutron/tests/unit/services/placement_report/test_plugin.py neutron/tests/unit/services/portforwarding/__init__.py neutron/tests/unit/services/portforwarding/test_pf_plugin.py neutron/tests/unit/services/qos/__init__.py neutron/tests/unit/services/qos/base.py neutron/tests/unit/services/qos/test_qos_plugin.py neutron/tests/unit/services/qos/drivers/__init__.py neutron/tests/unit/services/qos/drivers/test_manager.py neutron/tests/unit/services/qos/drivers/openvswitch/__init__.py neutron/tests/unit/services/qos/drivers/openvswitch/test_driver.py neutron/tests/unit/services/qos/drivers/ovn/__init__.py neutron/tests/unit/services/qos/drivers/ovn/test_driver.py neutron/tests/unit/services/revisions/__init__.py neutron/tests/unit/services/revisions/test_revision_plugin.py neutron/tests/unit/services/trunk/__init__.py neutron/tests/unit/services/trunk/fakes.py neutron/tests/unit/services/trunk/test_plugin.py neutron/tests/unit/services/trunk/test_rules.py neutron/tests/unit/services/trunk/test_utils.py neutron/tests/unit/services/trunk/drivers/__init__.py neutron/tests/unit/services/trunk/drivers/linuxbridge/__init__.py neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/__init__.py neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/test_driver.py neutron/tests/unit/services/trunk/drivers/linuxbridge/agent/test_trunk_plumber.py neutron/tests/unit/services/trunk/drivers/openvswitch/__init__.py neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py neutron/tests/unit/services/trunk/drivers/openvswitch/agent/__init__.py neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_ovsdb_handler.py neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py neutron/tests/unit/services/trunk/drivers/ovn/__init__.py neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py neutron/tests/unit/services/trunk/rpc/__init__.py neutron/tests/unit/services/trunk/rpc/test_agent.py neutron/tests/unit/services/trunk/rpc/test_backend.py neutron/tests/unit/services/trunk/rpc/test_server.py neutron/tests/unit/services/trunk/seg_types/__init__.py neutron/tests/unit/services/trunk/seg_types/test_validators.py neutron/tests/unit/tests/__init__.py neutron/tests/unit/tests/test_base.py neutron/tests/unit/tests/test_post_mortem_debug.py neutron/tests/unit/tests/common/__init__.py neutron/tests/unit/tests/common/test_net_helpers.py neutron/tests/unit/tests/example/README neutron/tests/unit/tests/example/__init__.py neutron/tests/unit/tests/example/dir/__init__.py neutron/tests/unit/tests/example/dir/example_module.py neutron/tests/unit/tests/example/dir/subdir/__init__.py neutron/tests/unit/tests/example/dir/subdir/example_module.py neutron/tests/unit/tests/functional/__init__.py neutron/tests/unit/tests/functional/test_base.py neutron/tests/var/ca.crt neutron/tests/var/certandkey.pem neutron/tests/var/certificate.crt neutron/tests/var/privatekey.key playbooks/add_mariadb_repo.yaml playbooks/configure_functional_job.yaml playbooks/dvr-multinode-scenario-pre-run.yaml playbooks/multinode-setup.yaml playbooks/post_functional_job.yaml playbooks/run_functional_job.yaml playbooks/legacy/neutron-grenade-dvr-multinode/post.yaml playbooks/legacy/neutron-grenade-dvr-multinode/run.yaml playbooks/legacy/neutron-grenade-multinode/post.yaml playbooks/legacy/neutron-grenade-multinode/run.yaml playbooks/legacy/neutron-ovn-grenade/post.yaml playbooks/legacy/neutron-ovn-grenade/run.yaml rally-jobs/README.rst rally-jobs/task-neutron.yaml rally-jobs/extra/README.rst rally-jobs/extra/trunk_scenario.setup rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/notes/.placeholder releasenotes/notes/1500-default-mtu-b0d6e4ab193b62a4.yaml releasenotes/notes/1500-default-segment-mtu-54e2cf6aea9602d5.yaml releasenotes/notes/404-for-quota-tenant-2c09c16759269b21.yaml releasenotes/notes/Add-support-for-direct-ports-with-QoS-in-OVS-48c78c156606e724.yaml releasenotes/notes/Adds-http_proxy_to_wsgi-middleware-24e8271cbd94ffdf.yaml releasenotes/notes/Dscp-marking-for-linuxbridge-agent-e765d0d934fa4017.yaml releasenotes/notes/Ingress-bandwidth-limit-in-openvswitch-agent-51cda9bb6b511885.yaml releasenotes/notes/Minimizing-lock-granularity-8bc2f893d9389cf8.yaml releasenotes/notes/Neutron-status-upgrade-check-framework-fc34d03c8829672c.yaml releasenotes/notes/QoS-for-linuxbridge-agent-bdb13515aac4e555.yaml releasenotes/notes/QoS-ingress-bandwidth-limit-54cea12dbea71172.yaml releasenotes/notes/Remove-neutron-lbaas-5cbedd7e8033610f.yaml releasenotes/notes/accepted_egress_direct-cc23873e213c6919.yaml releasenotes/notes/access_as_external_rbac-455dc74b9fa22761.yaml releasenotes/notes/add-address-scope-rbac-a903ff28f6457606.yaml releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml releasenotes/notes/add-conntrack-workers-89d303e9ec3b4963.yaml releasenotes/notes/add-custom-kill-scripts-af405ba49142d59c.yaml releasenotes/notes/add-description-field-in-port-forwarding-5db3b3f407c7eef4.yaml releasenotes/notes/add-designate-driver-ssl-options-169c299c96f2aff0.yaml releasenotes/notes/add-dhcp_release6-ff1b8d62fd7fe76d.yaml releasenotes/notes/add-dns-domain-to-ports-f71359d75909a2d5.yaml releasenotes/notes/add-dscp-for-tunneling-03e28fe7c2f34e86.yaml releasenotes/notes/add-enable-dvr-knob-636268f775bb4569.yaml releasenotes/notes/add-floatingip-pool-api-6927362ef87fdbe5.yaml releasenotes/notes/add-get-me-a-network-56321aeef5389001.yaml releasenotes/notes/add-igmp_snooping_enable-config-option-6a0e15e4ed0a2cf7.yaml releasenotes/notes/add-integration-with-external-dns-f56ec8a4993b1fc4.yaml releasenotes/notes/add-ip-protocols-in-sg-60467a073e771aee.yaml releasenotes/notes/add-keepalived-vrrp-healt-check-f23ed7c853151484.yaml releasenotes/notes/add-log-for-keepalived-state-change-e6d0c4f663776233.yaml releasenotes/notes/add-minimum-bandwidth-support-sriov-63664b89f4dd1c1b.yaml releasenotes/notes/add-multiple-port-bindings-f16eb47ebdddff2d.yaml releasenotes/notes/add-net-mtu-writable-api-extension-f7038f85f3494a74.yaml releasenotes/notes/add-network-segment-range-extension-0893a67cbf3f10fe.yaml releasenotes/notes/add-new-harouter-state-5612fc5b5c2043a5.yaml releasenotes/notes/add-osprofiler-support-7fc2de3001187075.yaml releasenotes/notes/add-placement-api-configuration-options-f1611d0909bf6166.yaml releasenotes/notes/add-port-data-plane-status-12726c964210b374.yaml releasenotes/notes/add-port-forwarding-fip-qos-f808d6b4826fb849.yaml releasenotes/notes/add-port-ip-allocation-attr-294a580641998240.yaml releasenotes/notes/add-port-rebinding-chance-33178b9abacf5804.yaml releasenotes/notes/add-port_details-to-floatingip-fefceab2c740e482.yaml releasenotes/notes/add-propagate_uplink_status-to-port-f4e53395e86eb3cb.yaml releasenotes/notes/add-rbac-qos-8b1154ee756c66df.yaml releasenotes/notes/add-rpc_response_max_timeout-option-cfaad5ef6af31632.yaml releasenotes/notes/add-security-groups-rbac-6f133ec4d40e7641.yaml releasenotes/notes/add-smartnic-support-53d25564bab0cbc5.yaml releasenotes/notes/add-sort-keys-check-for-get-sorts-b9e3e86ddcb3bc3a.yaml releasenotes/notes/add-standard-attr-descriptions-1ba0d7a454c3fd8f.yaml releasenotes/notes/add-standard-attributes-to-segment-d39c4b89988aa701.yaml releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml releasenotes/notes/add-subnetpool-rbac-2eb2008bd1b27b11.yaml releasenotes/notes/add-tag-all-standardattr-resources-6f757cb39cc1dcfe.yaml releasenotes/notes/add-tags-to-core-resources-b05330a129900609.yaml releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml releasenotes/notes/add-wsgi-script-support-e611fa5b5c2043a5.yaml releasenotes/notes/add_dhcp_dnsmasq_t1t2_options-3cef427d8109c165.yaml releasenotes/notes/add_is_default_to_qos_policies-f7c6bbac08d474d5.yaml releasenotes/notes/advanced_image-8abff2ca91de7f6c.yaml releasenotes/notes/advertise_mtu_by_default-d8b0b056a74517b8.yaml releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yaml releasenotes/notes/agent-notification-signature-status-6a9b9dbce9cb9740.yaml releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.yaml releasenotes/notes/allow-update-subnet-segment-id-association-1fb02ace27e85bb8.yaml releasenotes/notes/allow_port_create_update_shared_owners-2a57b1c72d91ace2.yaml releasenotes/notes/bandwidth-config-ovs-5bede7fb43b0a574.yaml releasenotes/notes/bandwidth-config-sriov-bd8ff8b4d84c8792.yaml releasenotes/notes/bgp-support-ef361825ca63f28b.yaml releasenotes/notes/bug-1311040-dhcp-no-dns-09291c23e2ce800a.yaml releasenotes/notes/bug-1811166-314d4b89de1cc0f1.yaml releasenotes/notes/bug-1843428-mac-addres-case-insensitivity-750299c11b49a9a8.yaml releasenotes/notes/bump-default-quotas-810570badb378c50.yaml releasenotes/notes/change-of-default-timeout-b09d11683526e27d.yaml releasenotes/notes/change-oslo-db-defaults-f94df09c30767f95.yaml releasenotes/notes/change-port-forwarding-uniq-constraint-78ba3db20bce5fd2.yaml releasenotes/notes/change-segmentation-id-ovs-a201e0ac1c4d4fb6.yaml releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.yaml releasenotes/notes/common-agent-extension-api-3fd06ff67329200a.yaml releasenotes/notes/conditional_updates-10b9aa66fd144217.yaml releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml releasenotes/notes/config-host_dvr_for_dhcp-f949aca5bd666e24.yaml releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml releasenotes/notes/coordination-df3c0bf55a0c4863.yaml releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.yaml releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml releasenotes/notes/deprecate-advertise-mtu-51e3f78475a14efc.yaml releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml releasenotes/notes/deprecate-force_gateway_on_subnet-376855c4e66f4e11.yaml releasenotes/notes/deprecate-gateway_external_network_id-f5c4071cd06714b0.yaml releasenotes/notes/deprecate-get_binding_levels-function-84012e104ac572a1.yaml releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml releasenotes/notes/deprecate-ivs-interface-driver-b68e06a470c65ccb.yaml releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml releasenotes/notes/deprecate-network-device-mtu-59b78264c9974808.yaml releasenotes/notes/deprecate-neutron-rootwrap-xen-dom0-124ee3647beecc17.yaml releasenotes/notes/deprecate-of_interface-driver-option-1968f8bf6fcd1a38.yaml releasenotes/notes/deprecate-ovs_integration_bridge-d4d1521c35f999bd.yaml releasenotes/notes/deprecate-ovsdb-interface-b7e7cc5b036e9ef9.yaml releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml releasenotes/notes/deprecate-send_arp_for_ha-0281853632f58e8d.yaml releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml releasenotes/notes/deprecate_max_fixed_ips_per_port-5e80518cbf25cfd6.yaml releasenotes/notes/deprecate_neutron_debug-a578e0adfc9cff4c.yaml releasenotes/notes/deprecate_prevent_arp_spoofing_option-a09e673fc8f9fee4.yaml releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml releasenotes/notes/designate-driver-keystonev3-8e70d152e84388e0.yaml releasenotes/notes/dhcp-bulk-updates-0150b764bb1b165f.yaml releasenotes/notes/dhcp-dnsmasq-dhcp-host-addr6-list-support-45d104b3f7ce220e.yaml releasenotes/notes/dhcp-domain-removed-cc5bc6e2129fdf7f.yaml releasenotes/notes/dhcp-ipv6-address-update-ff18d1eb0c196bce.yaml releasenotes/notes/dhcp-lease-time-5c504c3730a4f9ea.yaml releasenotes/notes/dhcp-resync-throttle-config-option-9f2375e3baf683ad.yaml releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml releasenotes/notes/dns_domain-1799b939e7248247.yaml releasenotes/notes/dnsmasq-local-service-c8eaa91894a7d6d4.yaml releasenotes/notes/dnsmasq_dns_servers-d729c04887ce67b4.yaml releasenotes/notes/drop-python-2-7-9707a901c7d8eab6.yaml releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml releasenotes/notes/dvr-configure-centralized-floatingip-with-new-agent-type-05361f1f78853cf7.yaml releasenotes/notes/dvr-fip-namespace-on-all-nodes-c4da7ccd60ee62f5.yaml releasenotes/notes/dvr-ha-support-cc67e84d9380cd0b.yaml releasenotes/notes/dvr-ovs-agent-6052a8d60fddde22.yaml releasenotes/notes/dvr-support-live-migration-b818b12bd9cbb518.yaml releasenotes/notes/dvr_handle_unbound_floatingip_port-f12ae806b8be2065.yaml releasenotes/notes/dynamically-resize-agent-greenthreads-c163ab37d36fcafe.yaml releasenotes/notes/enable-bridge-command-openvswitch-agent-d07c0b59ea9f864f.yaml releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml releasenotes/notes/end-to-end-mtu-00345fc4282cb8fb.yaml releasenotes/notes/enhance-tags-1f8915fe3e074069.yaml releasenotes/notes/extend-policy-for-extension-resource-owner-check-4a19b84889660506.yaml releasenotes/notes/extend-quota-api-2df3b84309664234.yaml releasenotes/notes/external-ports-03050eda7ffe13d5.yaml releasenotes/notes/external_network_bridge-option-removed-bbf50fb803f04f82.yaml releasenotes/notes/extraroute-atomic-5ae09e3f37c5fbda.yaml releasenotes/notes/fail-on-missing-extensions-bc332124b780875b.yaml releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml releasenotes/notes/fetch-specific-column-in-ovo-69c0b087c8c7ee36.yaml releasenotes/notes/fip-binding-limitation-1d2509950847b085.yaml releasenotes/notes/fip-janitor-53f0d42a7471c5ed.yaml releasenotes/notes/fip-qos-52926bce81c3f8bb.yaml releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.yaml releasenotes/notes/fix-co-existence-bug-between-sg-logging-and-fwg-logging-ef16077880d76449.yaml releasenotes/notes/fix-deferred-alloction-when-new-mac-in-same-request-as-binding-data-2a01c1ed1a8eff66.yaml releasenotes/notes/fix-ipv6-auto-allocation-with-segments-b90e99a30d096c9d.yaml releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml releasenotes/notes/fix-ovsdb-ssl-connection-4058caf4fdcb33ab.yaml releasenotes/notes/fix-remote-security-group-no-port-on-host-9177e66d4b16e90c.yaml releasenotes/notes/fix-security-group-protocol-by-numbers-48afb97ede961716.yaml releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml releasenotes/notes/floatingips-port-forwarding-65efd8c17a16dffc.yaml releasenotes/notes/force-arp-responder-true-for-dvr-5aabbfa51945dd5a.yaml releasenotes/notes/gateway-rate-limit-905bee1ed60c6b8e.yaml releasenotes/notes/get_standard_device_mappings_for_mechdriver-bc039d478ea0b162.yaml releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml releasenotes/notes/hyperv-security-group-driver-fdbe0c0c292a1505.yaml releasenotes/notes/ib-dhcp-allocation-fix-a4ebe8b55bb2c065.yaml releasenotes/notes/ingress-bandwidth-limit-in-linuxbridge-agent-50a2dad610401474.yaml releasenotes/notes/ip-substring-port-filtering-f5c3d89c4a91e867.yaml releasenotes/notes/iptables-fail-on-missing-sysctl-bridge-firewalling-912f157b5671363f.yaml releasenotes/notes/ipv6_first_ip_address_valid-cd94b47bdcc642cf.yaml releasenotes/notes/ivs-interfacedriver-removal-a9cce87310028b99.yaml releasenotes/notes/keepalived-state-change-server-threads-9ed775e7533dd1a0.yaml releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml releasenotes/notes/l3-agent-api-get-router-info-93c316a792a9d87f.yaml releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml releasenotes/notes/l3-agent-extensions-ha-state-change-f50ae363a53b0f18.yaml releasenotes/notes/l3-agent-extensions-register-router-factory-46a86f845895f4f6.yaml releasenotes/notes/l3-conntrack-helper-5d3148b575c4ad2f.yaml releasenotes/notes/l3_agent_graceful_shutdown-87bf3304e6fab8a5.yaml releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml releasenotes/notes/linuxbridge-vxlan-udp-ports-73b260efefa15a46.yaml releasenotes/notes/linuxbridge_vxlan_arp_responder-e9ea91552e1b62a7.yaml releasenotes/notes/locate-rp-tree-parent-by-hypervisor-name-3244ed87dc57f950.yaml releasenotes/notes/macvtap-l2-agent-2b551d8ec341196d.yaml releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml releasenotes/notes/make-mtu-not-nullable-2b2765bc85379545.yaml releasenotes/notes/make-supported-vnic-types-configurable-for-ovs-fc73422daffd42b0.yaml releasenotes/notes/make-supported-vnic-types-configurable-for-sriov-094f7663e8975e9b.yaml releasenotes/notes/metadata-proxy-header-vulnerability-60c44eb7c76d560c.yaml releasenotes/notes/metering-driver-stevedore-alias-2c4fdb0556205a3a.yaml releasenotes/notes/metering-iptables-driver-load-interface-driver-ca397f1db40ec643.yaml releasenotes/notes/modify-dhcp-behavior-based-on-network-auto-schedule-1ea5e74fd5bb560c.yaml releasenotes/notes/modify_api_rpc_worker_defaults-1acd62728b2b55fa.yaml releasenotes/notes/mtu-selection-and-advertisement-ab29f9ec43140224.yaml releasenotes/notes/netns_cleanup_kill_procs-af88d8c47c07dd9c.yaml releasenotes/notes/network_ip_availability-d64bd7032b3c15ee.yaml releasenotes/notes/network_link_prefix-e3fe37e37ea275b7.yaml releasenotes/notes/new-vif-type-for-pf-passthrough-33ec560b9b5d246f.yaml releasenotes/notes/noneutronversions-fbbdb98f350767d8.yaml releasenotes/notes/notifier-ironic-66391e083d78fee2.yaml releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml releasenotes/notes/oslo-cache-cache-url-deprecated-16cd3d335c5962eb.yaml releasenotes/notes/oslo-messaging-notifier-queue-d94677076a1db261.yaml releasenotes/notes/oslo-reports-166a169037bf64f2.yaml releasenotes/notes/oslo.messaging.notify.drivers-abb0d17b9e1bd470.yaml releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml releasenotes/notes/ovn-igmp-snooping-support-1a6ec8e703311fce.yaml releasenotes/notes/ovs-ct-firewall-driver-52a70a6a16d06f59.yaml releasenotes/notes/ovs-dpdk-rep-port-40fe628974040786.yaml releasenotes/notes/ovs-ipv6-tunnel-endpoints-f41b4954a04c43f6.yaml releasenotes/notes/ovs-mac-table-size-config-option-d255d5208650f34b.yaml releasenotes/notes/ovs-make-inactivity-probe-configurable-39d669014d961c5c.yaml releasenotes/notes/ovs_hardware_offload_support-798d3896ab2c4b1d.yaml releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml releasenotes/notes/ovsdb_timeout_override_for_ovs_cleanup_tool-e6ed6db258d0819e.yaml releasenotes/notes/path-mtu-back-to-zero-e4f9e8bdd8317ad4.yaml releasenotes/notes/physical_network-aware-dhcp-scheduling-94e9fadc7c7c5fec.yaml releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml releasenotes/notes/port-mac-address-regenerate-312978c834abaa52.yaml releasenotes/notes/precise-agent-state-transfer-67c771cb1ee04dd0.yaml releasenotes/notes/project_id-d5ea7a42be428230.yaml releasenotes/notes/qos-drivers-refactor-16ece9984958f8a4.yaml releasenotes/notes/qos-for-router-gateway-02340f7aa8be3b0d.yaml releasenotes/notes/qos-min-egress-bw-rule-b1c80f5675a4c1c3.yaml releasenotes/notes/qos-minimum-bw-reject-non-physnet-2f4ccddf484369fd.yaml releasenotes/notes/qos-rule-type-details-api-call-27d792980235aec4.yaml releasenotes/notes/qos-rules-alias-extension-ebf23b87460ee36e.yaml releasenotes/notes/radvd_user-config-option-24730a6d686fee18.yaml releasenotes/notes/reject-min-bw-updates-30bd0e3201dafce1.yaml releasenotes/notes/relax-subnetpool-network-affinity-837c1fc28f835de5.yaml releasenotes/notes/remove-advertise_mtu-28933264714453c4.yaml releasenotes/notes/remove-agent_type-config-option-31eea687b4ec2e3a.yaml releasenotes/notes/remove-allow-pagination-allow-sorting-ff23ca5ccb3007b9.yaml releasenotes/notes/remove-driver-60eb7e26d95f7322.yaml releasenotes/notes/remove-force_gateway_on_subnet-77cb79f0b35d0c6d.yaml releasenotes/notes/remove-gateway_external_network_id-config-option-c7aabf2f63004b41.yaml releasenotes/notes/remove-get_binding_levels-c4e8b350a196706a.yaml releasenotes/notes/remove-l2pop-agent_boot_time-0cec3d5908d8c054.yaml releasenotes/notes/remove-min-l3-agents-per-router-27aef7d91dec0348.yaml releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml releasenotes/notes/remove-of_interface-option-531ac8a1c767603a.yaml releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml releasenotes/notes/remove-router_id-b3732089f8f1faa1.yaml releasenotes/notes/remove-send-arp-for-ha-c1b4a926b8e52b8e.yaml releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml releasenotes/notes/remove_max_fixed_ips_per_port-64f1fb36748d5756.yaml releasenotes/notes/removed-ovsdb_interface-ovs-vsctl-timeout-a618ec8e27552202.yaml releasenotes/notes/removed_prevent_arp_spoofing-b49e91a92a93e3e1.yaml releasenotes/notes/rename-ovs-vsctl-timeout-9df1967c47f394c0.yaml releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml releasenotes/notes/rename-to-nova-metadata-ip-685fd81618c16d9d.yaml releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml releasenotes/notes/routed-networks-hostroutes-a13a9885f0db4f69.yaml releasenotes/notes/security-group-ipv6-icmp-221c59dcaf2caa3c.yaml releasenotes/notes/security-group-port-range-check-73114bdcde459e53.yaml releasenotes/notes/security-group-rule-all-ports-update-2857d80e5742ebc5.yaml releasenotes/notes/security-groups-port-filtering-69d36ac7db90c9e0.yaml releasenotes/notes/segment_mtu_to_global_physnet_mtu-9cee5ff09557edeb.yaml releasenotes/notes/sending-garp-for-l3-ha-c118871833ad8743.yaml releasenotes/notes/service-plugin-dependency-c8bf620b2526b869.yaml releasenotes/notes/set-of-default-qos-burst-value-0790773703fa08fc.yaml releasenotes/notes/setproctitle_workers-bc27a8baa5ef2279.yaml releasenotes/notes/show-all-security-group-rules-for-security-group-owner-6635dd3e4c6ab5ee.yaml releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml releasenotes/notes/sriov-agent-kernel-3.13-removed-support-8bb00902dd607746.yaml releasenotes/notes/sriov-agent-num-vf-0-0c06424247e7efe0.yaml releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce6c4.yaml releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml releasenotes/notes/stateful-security-group-04b2902ed9c44e4f.yaml releasenotes/notes/stricter-security-group-port-check-in-api-d1fd84d9663e04ab.yaml releasenotes/notes/subnet-dns-publish-fixed-ip-extension-6a5bb42a048a6671.yaml releasenotes/notes/subnet-onboard-e4d09fa403a1053e.yaml releasenotes/notes/support-empty-string-filtering-4a39096b62b9abf2.yaml releasenotes/notes/support-filter-validation-fee2cdeedbe8ad76.yaml releasenotes/notes/switching-to-haproxy-for-metadata-proxy-9d8f7549fadf9182.yaml releasenotes/notes/tag-ports-during-bulk-creation-23161dd39d779e99.yaml releasenotes/notes/terminate-macvtap-agt-when-interface_mapping-not-present-3109faf3b44d366a.yaml releasenotes/notes/timestamp_format_change-73eda78566b4690b.yaml releasenotes/notes/trunk_inherit-455dc74b9fa22dad.yaml releasenotes/notes/use-callback-payload-for-AGENT-AFTER_CREATE-and-AFTER_UPDATE-events-839d8dcb0ac5ff26.yaml releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml releasenotes/notes/use-pyroute2-in-ip-lib-558bfea8f14d1fea.yaml releasenotes/notes/vhost-user-reconnect-7650134520022e7d.yaml releasenotes/notes/vlan-aware-vms-aka-trunk-3341cc75ba1bf5b4.yaml releasenotes/notes/vlan-type-conntrack-direct-d3d544f8471ed4ff.yaml releasenotes/notes/vxlan-multicast-groups-distribution-linuxbridge-9337019c961c01a7.yaml releasenotes/notes/web_framework_deprecation-f984b83a1366c5b1.yaml releasenotes/notes/web_framework_removed-6e4c5c7ca506523a.yaml releasenotes/source/README.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po roles/add_mariadb_repo/tasks/main.yaml roles/configure_functional_tests/README.rst roles/configure_functional_tests/defaults/main.yaml roles/configure_functional_tests/tasks/main.yaml roles/fetch_journal_log/README.rst roles/fetch_journal_log/defaults/main.yaml roles/fetch_journal_log/tasks/main.yaml roles/setup_logdir/README.rst roles/setup_logdir/defaults/main.yaml roles/setup_logdir/tasks/main.yaml tools/abandon_old_reviews.sh tools/check_unit_test_structure.sh tools/coding-checks.sh tools/configure_for_func_testing.sh tools/deploy_rootwrap.sh tools/download_gerrit_change.py tools/files_in_patch.py tools/generate_config_file_samples.sh tools/generate_dhclient_script_for_fullstack.sh tools/install_venv.py tools/install_venv_common.py tools/list_moved_globals.py tools/migrate_names.py tools/migrate_names.txt tools/misc-sanity-checks.sh tools/pip_install_src_modules.sh tools/requirements.txt tools/split.sh tools/unassign_bug.py tools/with_venv.sh tools/ovn_migration/README.rst tools/ovn_migration/hosts.sample tools/ovn_migration/migrate-to-ovn.yml tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/defaults/main.yml tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/tasks/main.yml tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/templates/create-resources.sh.j2 tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/templates/start-pinger.sh.j2 tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/defaults/main.yml tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/tasks/main.yml tools/ovn_migration/infrared/tripleo-ovn-migration/templates/start-ovn-migration.sh.j2 tools/ovn_migration/tripleo_environment/ovn_migration.sh tools/ovn_migration/tripleo_environment/playbooks/ovn-migration.yml tools/ovn_migration/tripleo_environment/playbooks/reduce-dhcp-renewal-time.yml tools/ovn_migration/tripleo_environment/playbooks/roles/backup/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/templates/delete-neutron-resources.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/migration/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/activate-ovn.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/cleanup-dataplane.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/clone-dataplane.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/sync-dbs.yml tools/ovn_migration/tripleo_environment/playbooks/roles/migration/templates/activate-ovn.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/migration/templates/clone-br-int.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/templates/cleanup-resources.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/templates/create-resources.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/templates/validate-resources.sh.j2 tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/defaults/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tasks/main.yml tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/templates/generate-ovn-extras.sh.j2 tools/tripleo/ovn.yml vagrant/ovn/README.rst vagrant/ovn/provisioning/boxes.yml vagrant/ovn/provisioning/id_rsa vagrant/ovn/provisioning/id_rsa.pub vagrant/ovn/provisioning/provider-setup.sh vagrant/ovn/provisioning/providers.rb vagrant/ovn/provisioning/setup-base.sh vagrant/ovn/provisioning/setup-compute.sh vagrant/ovn/provisioning/setup-controller.sh vagrant/ovn/provisioning/setup-db.sh vagrant/ovn/provisioning/setup-vtep.sh vagrant/ovn/sparse/README.rst vagrant/ovn/sparse/Vagrantfile vagrant/ovn/sparse/instances.yml zuul.d/base.yaml zuul.d/grenade.yaml zuul.d/project.yaml zuul.d/rally.yaml zuul.d/tempest-multinode.yaml zuul.d/tempest-singlenode.yaml zuul.d/tripleo.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/dependency_links.txt0000644000175000017500000000000100000000000024375 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/entry_points.txt0000644000175000017500000003367400000000000023642 0ustar00coreycorey00000000000000[console_scripts] neutron-db-manage = neutron.db.migration.cli:main neutron-debug = neutron.debug.shell:main neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main neutron-macvtap-agent = neutron.cmd.eventlet.plugins.macvtap_neutron_agent:main neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main neutron-netns-cleanup = neutron.cmd.netns_cleanup:main neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main neutron-ovn-db-sync-util = neutron.cmd.ovn.neutron_ovn_db_sync_util:main neutron-ovn-metadata-agent = neutron.cmd.eventlet.agents.ovn_metadata:main neutron-ovn-migration-mtu = neutron.cmd.ovn.migration_mtu:main neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main neutron-pd-notify = neutron.cmd.pd_notify:main neutron-rootwrap = oslo_rootwrap.cmd:main neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet neutron-sanity-check = neutron.cmd.sanity_check:main neutron-server = neutron.cmd.eventlet.server:main neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main neutron-status = neutron.cmd.status:main neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main [neutron.agent.firewall_drivers] iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver iptables_hybrid = neutron.agent.linux.iptables_firewall:OVSHybridIptablesFirewallDriver noop = neutron.agent.firewall:NoopFirewallDriver openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver [neutron.agent.l2.extensions] fdb = neutron.agent.l2.extensions.fdb_population:FdbPopulationAgentExtension log = neutron.services.logapi.agent.log_extension:LoggingExtension qos = neutron.agent.l2.extensions.qos:QosAgentExtension [neutron.agent.l3.extensions] conntrack_helper = neutron.agent.l3.extensions.conntrack_helper:ConntrackHelperAgentExtension fip_qos = neutron.agent.l3.extensions.qos.fip:FipQosAgentExtension gateway_ip_qos = neutron.agent.l3.extensions.qos.gateway_ip:RouterGatewayIPQosAgentExtension port_forwarding = neutron.agent.l3.extensions.port_forwarding:PortForwardingAgentExtension snat_log = neutron.agent.l3.extensions.snat_log:SNATLoggingExtension [neutron.agent.linux.pd_drivers] dibbler = neutron.agent.linux.dibbler:PDDibbler [neutron.core_plugins] ml2 = neutron.plugins.ml2.plugin:Ml2Plugin [neutron.db.alembic_migrations] neutron = neutron.db.migration:alembic_migrations [neutron.interface_drivers] linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver null = neutron.agent.linux.interface:NullDriver openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver [neutron.ipam_drivers] fake = neutron.tests.unit.ipam.fake_driver:FakeDriver internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool [neutron.ml2.extension_drivers] data_plane_status = neutron.plugins.ml2.extensions.data_plane_status:DataPlaneStatusExtensionDriver dns = neutron.plugins.ml2.extensions.dns_integration:DNSExtensionDriverML2 dns_domain_ports = neutron.plugins.ml2.extensions.dns_integration:DNSDomainPortsExtensionDriver port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver subnet_dns_publish_fixed_ip = neutron.plugins.ml2.extensions.subnet_dns_publish_fixed_ip:SubnetDNSPublishFixedIPExtensionDriver tag_ports_during_bulk_creation = neutron.plugins.ml2.extensions.tag_ports_during_bulk_creation:TagPortsDuringBulkCreationExtensionDriver test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver uplink_status_propagation = neutron.plugins.ml2.extensions.uplink_status_propagation:UplinkStatusPropagationExtensionDriver [neutron.ml2.mechanism_drivers] another_fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:AnotherFakeAgentMechanismDriver fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver fake_agent_l3 = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriverL3 faulty_agent = neutron.tests.unit.plugins.ml2.drivers.mech_faulty_agent:FaultyAgentMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver macvtap = neutron.plugins.ml2.drivers.macvtap.mech_driver.mech_macvtap:MacvtapMechanismDriver openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver ovn = neutron.plugins.ml2.drivers.ovn.mech_driver.mech_driver:OVNMechanismDriver ovn-sync = neutron.cmd.ovn.neutron_ovn_db_sync_util:OVNMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver test_with_agent = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriverWithAgent [neutron.ml2.type_drivers] flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver [neutron.objects] AddressScope = neutron.objects.address_scope:AddressScope Agent = neutron.objects.agent:Agent AllowedAddressPair = neutron.objects.port.extensions.allowedaddresspairs:AllowedAddressPair AutoAllocatedTopology = neutron.objects.auto_allocate:AutoAllocatedTopology ConntrackHelper = neutron.objects.conntrack_helper:ConntrackHelper DNSNameServer = neutron.objects.subnet:DNSNameServer DVRMacAddress = neutron.objects.router:DVRMacAddress DefaultSecurityGroup = neutron.objects.securitygroup:DefaultSecurityGroup DistributedPortBinding = neutron.objects.ports:DistributedPortBinding ExternalNetwork = neutron.objects.network:ExternalNetwork ExtraDhcpOpt = neutron.objects.port.extensions.extra_dhcp_opt:ExtraDhcpOpt Flavor = neutron.objects.flavor:Flavor FlavorServiceProfileBinding = neutron.objects.flavor:FlavorServiceProfileBinding FloatingIP = neutron.objects.router:FloatingIP FloatingIPDNS = neutron.objects.floatingip:FloatingIPDNS IPAllocation = neutron.objects.ports:IPAllocation IPAllocationPool = neutron.objects.subnet:IPAllocationPool IpamAllocation = neutron.objects.ipam:IpamAllocation IpamAllocationPool = neutron.objects.ipam:IpamAllocationPool IpamSubnet = neutron.objects.ipam:IpamSubnet L3HARouterAgentPortBinding = neutron.objects.l3_hamode:L3HARouterAgentPortBinding L3HARouterNetwork = neutron.objects.l3_hamode:L3HARouterNetwork L3HARouterVRIdAllocation = neutron.objects.l3_hamode:L3HARouterVRIdAllocation Log = neutron.objects.logapi.logging_resource:Log MeteringLabel = neutron.objects.metering:MeteringLabel MeteringLabelRule = neutron.objects.metering:MeteringLabelRule Network = neutron.objects.network:Network NetworkDNSDomain = neutron.objects.network:NetworkDNSDomain NetworkDhcpAgentBinding = neutron.objects.network:NetworkDhcpAgentBinding NetworkPortSecurity = neutron.objects.network:NetworkPortSecurity NetworkRBAC = neutron.objects.network:NetworkRBAC NetworkSegment = neutron.objects.network:NetworkSegment NetworkSegmentRange = neutron.objects.network_segment_range:NetworkSegmentRange Port = neutron.objects.ports:Port PortBinding = neutron.objects.ports:PortBinding PortBindingLevel = neutron.objects.ports:PortBindingLevel PortDNS = neutron.objects.ports:PortDNS PortDataPlaneStatus = neutron.objects.port.extensions.data_plane_status:PortDataPlaneStatus PortForwarding = neutron.objects.port_forwarding:PortForwarding PortSecurity = neutron.objects.port.extensions.port_security:PortSecurity ProviderResourceAssociation = neutron.objects.servicetype:ProviderResourceAssociation ProvisioningBlock = neutron.objects.provisioning_blocks:ProvisioningBlock QosBandwidthLimitRule = neutron.objects.qos.rule:QosBandwidthLimitRule QosDscpMarkingRule = neutron.objects.qos.rule:QosDscpMarkingRule QosMinimumBandwidthRule = neutron.objects.qos.rule:QosMinimumBandwidthRule QosPolicy = neutron.objects.qos.policy:QosPolicy QosPolicyDefault = neutron.objects.qos.policy:QosPolicyDefault QosPolicyFloatingIPBinding = neutron.objects.qos.binding:QosPolicyFloatingIPBinding QosPolicyNetworkBinding = neutron.objects.qos.binding:QosPolicyNetworkBinding QosPolicyPortBinding = neutron.objects.qos.binding:QosPolicyPortBinding QosPolicyRBAC = neutron.objects.qos.policy:QosPolicyRBAC QosPolicyRouterGatewayIPBinding = neutron.objects.qos.binding:QosPolicyRouterGatewayIPBinding QosRule = neutron.objects.qos.rule:QosRule QosRuleType = neutron.objects.qos.rule_type:QosRuleType QosRuleTypeDriver = neutron.objects.qos.rule_type:QosRuleTypeDriver Quota = neutron.objects.quota:Quota QuotaUsage = neutron.objects.quota:QuotaUsage Reservation = neutron.objects.quota:Reservation ResourceDelta = neutron.objects.quota:ResourceDelta Route = neutron.objects.subnet:Route Router = neutron.objects.router:Router RouterExtraAttributes = neutron.objects.router:RouterExtraAttributes RouterL3AgentBinding = neutron.objects.l3agent:RouterL3AgentBinding RouterPort = neutron.objects.router:RouterPort RouterRoute = neutron.objects.router:RouterRoute SecurityGroup = neutron.objects.securitygroup:SecurityGroup SecurityGroupPortBinding = neutron.objects.ports:SecurityGroupPortBinding SecurityGroupRule = neutron.objects.securitygroup:SecurityGroupRule SegmentHostMapping = neutron.objects.network:SegmentHostMapping ServiceProfile = neutron.objects.flavor:ServiceProfile StandardAttribute = neutron.objects.stdattrs:StandardAttribute SubPort = neutron.objects.trunk:SubPort Subnet = neutron.objects.subnet:Subnet SubnetPool = neutron.objects.subnetpool:SubnetPool SubnetPoolPrefix = neutron.objects.subnetpool:SubnetPoolPrefix SubnetServiceType = neutron.objects.subnet:SubnetServiceType Tag = neutron.objects.tag:Tag Trunk = neutron.objects.trunk:Trunk [neutron.policies] neutron = neutron.conf.policies:list_rules [neutron.qos.agent_drivers] linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers.qos_driver:QosLinuxbridgeAgentDriver ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver [neutron.service_plugins] auto_allocate = neutron.services.auto_allocate.plugin:Plugin conntrack_helper = neutron.services.conntrack_helper.plugin:Plugin dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin log = neutron.services.logapi.logging_plugin:LoggingPlugin loki = neutron.services.loki.loki_plugin:LokiPlugin metering = neutron.services.metering.metering_plugin:MeteringPlugin network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin network_segment_range = neutron.services.network_segment_range.plugin:NetworkSegmentRangePlugin ovn-router = neutron.services.ovn_l3.plugin:OVNL3RouterPlugin placement = neutron.services.placement_report.plugin:PlacementReportPlugin port_forwarding = neutron.services.portforwarding.pf_plugin:PortForwardingPlugin qos = neutron.services.qos.qos_plugin:QoSPlugin revisions = neutron.services.revisions.revision_plugin:RevisionPlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin segments = neutron.services.segments.plugin:Plugin tag = neutron.services.tag.tag_plugin:TagPlugin timestamp = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin trunk = neutron.services.trunk.plugin:TrunkPlugin [neutron.services.external_dns_drivers] designate = neutron.services.externaldns.drivers.designate.driver:Designate [neutron.services.logapi.drivers] ovs = neutron.services.logapi.drivers.openvswitch.ovs_firewall_log:OVSFirewallLoggingDriver [neutron.services.metering_drivers] iptables = neutron.services.metering.drivers.iptables.iptables_driver:IptablesMeteringDriver noop = neutron.services.metering.drivers.noop.noop_driver:NoopMeteringDriver [neutron.status.upgrade.checks] neutron = neutron.cmd.upgrade_checks.checks:CoreChecks [oslo.config.opts] ironic.auth = neutron.opts:list_ironic_auth_opts neutron = neutron.opts:list_opts neutron.agent = neutron.opts:list_agent_opts neutron.az.agent = neutron.opts:list_az_agent_opts neutron.base.agent = neutron.opts:list_base_agent_opts neutron.db = neutron.opts:list_db_opts neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts neutron.extensions = neutron.opts:list_extension_opts neutron.l3.agent = neutron.opts:list_l3_agent_opts neutron.metadata.agent = neutron.opts:list_metadata_agent_opts neutron.metering.agent = neutron.opts:list_metering_agent_opts neutron.ml2 = neutron.opts:list_ml2_conf_opts neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts neutron.ml2.macvtap.agent = neutron.opts:list_macvtap_opts neutron.ml2.ovn = neutron.conf.plugins.ml2.drivers.ovn.ovn_conf:list_opts neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts neutron.ml2.xenapi = neutron.opts:list_xenapi_opts neutron.ovn.metadata.agent = neutron.conf.agent.ovn.metadata.config:list_metadata_agent_opts nova.auth = neutron.opts:list_auth_opts [oslo.config.opts.defaults] neutron = neutron.common.config:set_cors_middleware_defaults [oslo.policy.enforcer] neutron = neutron.policy:get_enforcer [oslo.policy.policies] neutron = neutron.conf.policies:list_rules [wsgi_scripts] neutron-api = neutron.server:get_application ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/not-zip-safe0000644000175000017500000000000100000000000022555 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/pbr.json0000644000175000017500000000006200000000000022003 0ustar00coreycorey00000000000000{"git_version": "5f42488a9a", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/requires.txt0000644000175000017500000000204600000000000022731 0ustar00coreycorey00000000000000Jinja2>=2.10 Paste>=2.0.2 PasteDeploy>=1.5.0 Routes>=2.3.1 SQLAlchemy>=1.2.0 WebOb>=1.8.2 alembic>=0.8.10 debtcollector>=1.19.0 decorator>=3.4.0 eventlet!=0.18.3,!=0.20.1,>=0.18.2 futurist>=1.2.0 httplib2>=0.9.1 keystoneauth1>=3.14.0 keystonemiddleware>=4.17.0 netaddr>=0.7.18 netifaces>=0.10.4 neutron-lib>=2.2.0 openstacksdk>=0.31.2 os-ken>=0.3.0 os-vif>=1.15.1 os-xenapi>=0.3.1 oslo.cache>=1.26.0 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.37.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.privsep>=1.32.0 oslo.reports>=1.18.0 oslo.rootwrap>=5.8.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.upgradecheck>=0.1.0 oslo.utils>=3.33.0 oslo.versionedobjects>=1.35.1 osprofiler>=2.3.0 ovs>=2.8.0 ovsdbapp>=1.0.0 pbr>=4.0.0 pecan>=1.3.2 psutil>=3.2.2 pyOpenSSL>=17.1.0 pyroute2>=0.5.7 python-designateclient>=2.7.0 python-neutronclient>=6.7.0 python-novaclient>=9.1.0 requests>=2.14.2 six>=1.10.0 stevedore>=1.20.0 tenacity>=4.4.0 tooz>=1.58.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982290.0 neutron-16.0.0.0b2.dev214/neutron.egg-info/top_level.txt0000644000175000017500000000001000000000000023050 0ustar00coreycorey00000000000000neutron ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/playbooks/0000755000175000017500000000000000000000000017146 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/add_mariadb_repo.yaml0000644000175000017500000000005500000000000023266 0ustar00coreycorey00000000000000- hosts: all roles: - add_mariadb_repo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/configure_functional_job.yaml0000644000175000017500000000011200000000000025061 0ustar00coreycorey00000000000000- hosts: all roles: - setup_logdir - configure_functional_tests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/dvr-multinode-scenario-pre-run.yaml0000644000175000017500000000005500000000000026012 0ustar00coreycorey00000000000000- hosts: all roles: - multi-node-setup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0230422 neutron-16.0.0.0b2.dev214/playbooks/legacy/0000755000175000017500000000000000000000000020412 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-dvr-multinode/0000755000175000017500000000000000000000000026276 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-dvr-multinode/post.yaml0000644000175000017500000000063300000000000030151 0ustar00coreycorey00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-dvr-multinode/run.yaml0000644000175000017500000000442200000000000027770 0ustar00coreycorey00000000000000- hosts: primary name: Autoconverted job legacy-grenade-dsvm-neutron-dvr-multinode from old job gate-grenade-dsvm-neutron-dvr-multinode-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_CONFIGDRIVE=0 export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_USE_PYTHON3=True # Test DVR upgrade on multinode export PROJECTS="openstack/grenade $PROJECTS" export DEVSTACK_GATE_NEUTRON_DVR=1 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export DEVSTACK_GATE_TOPOLOGY="multinode" # Disable some services to use less memory # Cinder-backup export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service c-bak" # Etcd export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service etcd3" # Swift export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container-sync" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4910462 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-multinode/0000755000175000017500000000000000000000000025505 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-multinode/post.yaml0000644000175000017500000000063300000000000027360 0ustar00coreycorey00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-grenade-multinode/run.yaml0000644000175000017500000000436500000000000027205 0ustar00coreycorey00000000000000- hosts: primary name: Autoconverted job legacy-grenade-dsvm-neutron-multinode from old job gate-grenade-dsvm-neutron-multinode-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_CONFIGDRIVE=0 export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_USE_PYTHON3=True export PROJECTS="openstack/grenade $PROJECTS" # Default to non DVR export DEVSTACK_GATE_NEUTRON_DVR=0 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export DEVSTACK_GATE_TOPOLOGY="multinode" # Disable some services to use less memory # Cinder export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service c-bak" # Etcd export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service etcd3" # Swift export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-account" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-container-sync" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-object" export DEVSTACK_LOCAL_CONFIG+=$'\n'"disable_service s-proxy" cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4950464 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-ovn-grenade/0000755000175000017500000000000000000000000024307 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-ovn-grenade/post.yaml0000644000175000017500000000063300000000000026162 0ustar00coreycorey00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/legacy/neutron-ovn-grenade/run.yaml0000644000175000017500000000604300000000000026002 0ustar00coreycorey00000000000000- hosts: all name: Autoconverted job legacy-grenade-dsvm-networking-ovn from old job gate-grenade-dsvm-networking-ovn-ubuntu-xenial-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x cat << 'EOF' >>"/tmp/dg-local.conf" [[local|localrc]] enable_plugin neutron-tempest-plugin https://opendev.org/openstack/neutron-tempest-plugin EOF executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true # Because we are testing a non standard project, add # our project repository. This makes zuul do the right # reference magic for testing changes. export PROJECTS="openstack/networking-ovn openstack/neutron-tempest-plugin openstack/grenade $PROJECTS" # TODO(slaweq): this should be probably moved to the grenade repo or # to the neutron devstack plugin export GRENADE_PLUGINRC="enable_grenade_plugin networking-ovn https://opendev.org/openstack/networking-ovn" export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi # Keep localrc to be able to set some vars in pre_test_hook export KEEP_LOCALRC=1 function pre_test_hook { if [ -f $BASE/new/networking-ovn/devstack/pre_test_hook.sh ] ; then . $BASE/new/networking-ovn/devstack/pre_test_hook.sh fi } export -f pre_test_hook function post_test_hook { if [ -f $BASE/new/networking-ovn/devstack/post_test_hook.sh ] ; then . $BASE/new/networking-ovn/devstack/post_test_hook.sh fi } export -f post_test_hook export DEVSTACK_GATE_SETTINGS=/opt/stack/new/networking-ovn/devstack/devstackgaterc cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/multinode-setup.yaml0000644000175000017500000000010500000000000023164 0ustar00coreycorey00000000000000- hosts: all roles: - multi-node-bridge - multi-node-setup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/post_functional_job.yaml0000644000175000017500000000014000000000000024066 0ustar00coreycorey00000000000000- hosts: all roles: - fetch_journal_log - fetch-tox-output - fetch-subunit-output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/playbooks/run_functional_job.yaml0000644000175000017500000000036700000000000023720 0ustar00coreycorey00000000000000- hosts: all roles: - run-devstack # Run bindep and test-setup after devstack so that they won't interfere - role: bindep bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - ensure-tox - tox ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/plugin.spec0000644000175000017500000000545700000000000017330 0ustar00coreycorey00000000000000--- config: entry_point: ./tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml plugin_type: install subparsers: tripleo-ovn-migration: description: Migrate an existing TripleO overcloud from Neutron ML2OVS plugin to OVN include_groups: ["Ansible options", "Inventory", "Common options", "Answers file"] groups: - title: Containers options: registry-namespace: type: Value help: The alternative docker registry namespace to use for deployment. registry-prefix: type: Value help: The images prefix registry-tag: type: Value help: The images tag registry-mirror: type: Value help: The alternative docker registry to use for deployment. - title: Deployment Description options: version: type: Value help: | The product version Numbers are for OSP releases Names are for RDO releases If not given, same version of the undercloud will be used choices: - "7" - "8" - "9" - "10" - "11" - "12" - "13" - "14" - "15" - "16" - kilo - liberty - mitaka - newton - ocata - pike - queens - rocky - stein - train install_from_package: type: Bool help: Install python-neutron-ovn-migration-tool rpm default: True dvr: type: Bool help: If the deployment is to be dvr or not default: False create_resources: type: Bool help: Create resources to measure downtime default: True external_network: type: Value help: External network name to use default: public image_name: type: Value help: Image name to use default: cirros-0.3.5-x86_64-disk.img ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4950464 neutron-16.0.0.0b2.dev214/rally-jobs/0000755000175000017500000000000000000000000017221 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/README.rst0000644000175000017500000000204100000000000020705 0ustar00coreycorey00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * neutron-neutron.yaml is a task that is run in gates against OpenStack with Neutron Service deployed by DevStack Useful links ------------ * More about Rally: https://rally.readthedocs.io/en/latest/ * Rally release notes: https://rally.readthedocs.io/en/latest/project_info/release_notes/archive.html * How to add rally-gates: http://rally.readthedocs.io/en/latest/quick_start/gates.html * About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4950464 neutron-16.0.0.0b2.dev214/rally-jobs/extra/0000755000175000017500000000000000000000000020344 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/extra/README.rst0000644000175000017500000000025500000000000022035 0ustar00coreycorey00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/extra/trunk_scenario.setup0000644000175000017500000000003500000000000024452 0ustar00coreycorey00000000000000enable_service neutron-trunk ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.4950464 neutron-16.0.0.0b2.dev214/rally-jobs/plugins/0000755000175000017500000000000000000000000020702 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/plugins/README.rst0000644000175000017500000000060600000000000022373 0ustar00coreycorey00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/plugins/__init__.py0000644000175000017500000000000000000000000023001 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/rally-jobs/task-neutron.yaml0000644000175000017500000003470600000000000022551 0ustar00coreycorey00000000000000{% set floating_network = floating_network or "public" %} {% set image_name = "^(cirros.*-disk|TestVM)$" %} {% set flavor_name = "m1.tiny" %} --- version: 2 title: Rally Task for OpenStack Neutron CI description: > The task contains various scenarios to prevent concurrency issues subtasks: - title: Network related workloads. workloads: - description: Check performance of list_networks action and ensure > network quotas are not exceeded scenario: NeutronNetworks.create_and_list_networks: {} runner: constant: times: 100 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: # worst case is other 19 writers have created # resources, but quota reservation hasn't cleared # yet on any of them. This value could be 100 # without concurrency. see bug/1623390 network: 119 sla: max_avg_duration_per_atomic: neutron.list_networks: 15 # reduce as perf is fixed failure_rate: max: 0 - description: Check network update action scenario: NeutronNetworks.create_and_update_networks: network_create_args: {} network_update_args: admin_state_up: False name: "_updated" runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 - scenario: NeutronNetworks.create_and_delete_networks: {} runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 - title: Subnet related workloads. workloads: - scenario: NeutronNetworks.create_and_list_subnets: subnets_per_network: 2 runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: subnet: -1 network: -1 - scenario: NeutronNetworks.create_and_update_subnets: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.4.0.0/16" subnets_per_network: 2 subnet_update_args: enable_dhcp: True name: "_subnet_updated" runner: constant: times: 100 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 5 quotas: neutron: network: -1 subnet: -1 port: -1 - scenario: NeutronNetworks.create_and_delete_subnets: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 - title: Routers related workloads. workloads: - scenario: NeutronNetworks.create_and_list_routers: network_create_args: subnet_create_args: subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 - scenario: NeutronNetworks.create_and_update_routers: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} router_update_args: admin_state_up: False name: "_router_updated" runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 - scenario: NeutronNetworks.create_and_delete_routers: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 - title: Ports related workloads. workloads: - description: Check performance of list ports action and ensure > network quotas are not exceeded scenario: NeutronNetworks.create_and_list_ports: network_create_args: port_create_args: ports_per_network: 50 runner: constant: times: 8 concurrency: 4 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 # ((ports per net + 1 dhcp) * times) + (concurrency-1) # see bug/1623390 for concurrency explanation port: 811 sla: max_avg_duration_per_atomic: neutron.list_ports: 15 # reduce as perf is fixed failure_rate: max: 0 - scenario: NeutronNetworks.create_and_update_ports: network_create_args: {} port_create_args: {} ports_per_network: 5 port_update_args: admin_state_up: False device_id: "dummy_id" device_owner: "dummy_owner" name: "_port_updated" runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 - scenario: NeutronNetworks.create_and_bind_ports: ports_per_network: 5 runner: constant: times: 3 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 roles: - admin quotas: neutron: network: -1 subnet: -1 port: -1 network: {} networking_agents: {} - scenario: NeutronNetworks.create_and_delete_ports: network_create_args: {} port_create_args: {} ports_per_network: 5 runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 - title: Quotas update check scenario: Quotas.neutron_update: max_quota: 1024 runner: constant: times: 40 concurrency: 20 contexts: users: tenants: 20 users_per_tenant: 1 - title: Trunks related workload scenario: NeutronTrunks.create_and_list_trunks: subport_count: 125 runner: constant: times: 4 concurrency: 4 contexts: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: 1000 - title: Floating IP related workloads workloads: - scenario: NeutronNetworks.create_and_delete_floating_ips: floating_network: {{ floating_network }} floating_ip_args: {} runner: constant: times: 10 concurrency: 5 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: floatingip: -1 - scenario: NeutronNetworks.create_and_list_floating_ips: floating_network: {{ floating_network }} floating_ip_args: {} runner: constant: times: 10 concurrency: 5 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: floatingip: -1 - scenario: NeutronNetworks.associate_and_dissociate_floating_ips: floating_network: {{ floating_network }} runner: constant: times: 10 concurrency: 5 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: floatingip: -1 - title: Security Group Related Scenarios workloads: - scenario: NeutronSecurityGroup.create_and_delete_security_group_rule: security_group_args: {} security_group_rule_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 security_group_rule: -1 - scenario: NeutronSecurityGroup.create_and_delete_security_groups: security_group_create_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 - scenario: NeutronSecurityGroup.create_and_list_security_group_rules: security_group_args: {} security_group_rule_args: {} security_group_rules_count: 20 runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 security_group_rule: -1 - scenario: NeutronSecurityGroup.create_and_list_security_groups: security_group_create_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 - scenario: NeutronSecurityGroup.create_and_show_security_group_rule: security_group_args: {} security_group_rule_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 security_group_rule: -1 - scenario: NeutronSecurityGroup.create_and_show_security_group: security_group_create_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 - scenario: NeutronSecurityGroup.create_and_update_security_groups: security_group_create_args: {} security_group_update_args: {} runner: constant: times: 50 concurrency: 10 contexts: users: tenants: 2 users_per_tenant: 3 quotas: neutron: security_group: -1 - title: VM booting workloads workloads: - scenario: NovaServers.boot_and_delete_server: flavor: name: {{flavor_name}} image: name: {{image_name}} auto_assign_nic: true runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 network: {} sla: # NovaServers.boot_and_delete_server is unstable and frequently # times out when waiting for the VM to become ACTIVE. We run this # scenario for the osprofiler report and we ignore the rally # scenario outcome. Ideally we should eliminate the cause of the # timeouts, but even until then we'll get usable osprofiler # results. failure_rate: max: 100 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0230422 neutron-16.0.0.0b2.dev214/releasenotes/0000755000175000017500000000000000000000000017634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5350466 neutron-16.0.0.0b2.dev214/releasenotes/notes/0000755000175000017500000000000000000000000020764 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/.placeholder0000644000175000017500000000000000000000000023235 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/1500-default-mtu-b0d6e4ab193b62a4.yaml0000644000175000017500000000314300000000000026744 0ustar00coreycorey00000000000000--- prelude: > The ML2 plug-in supports calculating the MTU for instances using overlay networks by subtracting the overlay protocol overhead from the value of 'path_mtu', ideally the physical (underlying) network MTU, and providing the smaller value to instances via DHCP. Prior to Mitaka, 'path_mtu' defaults to 0 which disables this feature. In Mitaka, 'path_mtu' defaults to 1500, a typical MTU for physical networks, to improve the "out of box" experience for typical deployments. features: - In Mitaka, the combination of 'path_mtu' defaulting to 1500 and 'advertise_mtu' defaulting to True provides a value of MTU accounting for any overlay protocol overhead on the network to instances using DHCP. For example, an instance attaching to a VXLAN network receives a 1450 MTU from DHCP accounting for 50 bytes of overhead from the VXLAN overlay protocol if using IPv4 endpoints. issues: - The combination of 'path_mtu' and 'advertise_mtu' only adjusts the MTU for instances rather than all virtual network components between instances and provider/public networks. In particular, setting 'path_mtu' to a value greater than 1500 can cause packet loss even if the physical network supports it. Also, the calculation does not consider additional overhead from IPv6 endpoints. upgrade: - Operators using the ML2 plug-in with 'path_mtu' defaulting to 0 may need to perform a database migration to update the MTU for existing networks and possibly disable existing workarounds for MTU problems such as increasing the physical network MTU to 1550. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/1500-default-segment-mtu-54e2cf6aea9602d5.yaml0000644000175000017500000000174100000000000030422 0ustar00coreycorey00000000000000--- prelude: > The ML2 plug-in supports calculating the MTU for networks that are realized as flat or VLAN networks, by consulting the 'segment_mtu' option. Prior to Mitaka, 'segment_mtu' defaults to 0 which disables this feature. This creates slightly confusing API results when querying Neutron networks, since the plugins that support the MTU API extension would return networks with the MTU equal to zero. Networks with an MTU of zero make little sense, since nothing could ever be transmitted. In Mitaka, 'segment_mtu' now defaults to 1500 which is the standard MTU for Ethernet networks in order to improve the "out of box" experience for typical deployments. features: - In Mitaka, queries to the Networking API for network objects will now return network objects that contain a sane MTU value. upgrade: - Operators using the ML2 plug-in with existing data may need to perform a database migration to update the MTU for existing networks ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/404-for-quota-tenant-2c09c16759269b21.yaml0000644000175000017500000000023200000000000027356 0ustar00coreycorey00000000000000--- features: - | Return code for `quota delete` for a tenant whose quota has not been previously defined has been changed from 204 to 404. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/Add-support-for-direct-ports-with-QoS-in-OVS-48c78c156606e724.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Add-support-for-direct-ports-with-QoS-in-OVS-48c78c15660000644000175000017500000000036000000000000032273 0ustar00coreycorey00000000000000--- other: - | Added QoS support for direct ports in neutron. The support requires Open vSwitch 2.11.0 or newer and is based on Linux kernel 5.4.0 or newer. [`bug 1843165 `_]. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/Adds-http_proxy_to_wsgi-middleware-24e8271cbd94ffdf.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Adds-http_proxy_to_wsgi-middleware-24e8271cbd94ffdf.yam0000644000175000017500000000107100000000000033020 0ustar00coreycorey00000000000000--- features: - Middleware was added to parse the ``X-Forwarded-Proto`` HTTP header or the Proxy protocol in order to help Neutron respond with the correct URL references when it's put behind a TLS proxy such as ``haproxy``. This adds ``http_proxy_to_wsgi`` middleware to the pipeline. This middleware is disabled by default, but can be enabled via a configuration option in the ``[oslo_middleware]`` group. upgrade: - The ``api-paste.ini`` configuration file for the paste pipeline was updated to add the ``http_proxy_to_wsgi`` middleware. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/Dscp-marking-for-linuxbridge-agent-e765d0d934fa4017.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Dscp-marking-for-linuxbridge-agent-e765d0d934fa4017.yam0000644000175000017500000000011600000000000032342 0ustar00coreycorey00000000000000--- features: - The Linux Bridge agent now supports QoS DSCP marking rules. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/Ingress-bandwidth-limit-in-openvswitch-agent-51cda9bb6b511885.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Ingress-bandwidth-limit-in-openvswitch-agent-51cda9bb6b0000644000175000017500000000013300000000000033163 0ustar00coreycorey00000000000000--- features: - The openvswitch L2 agent now supports bi-directional bandwidth limiting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Minimizing-lock-granularity-8bc2f893d9389cf8.yaml0000644000175000017500000000023300000000000031515 0ustar00coreycorey00000000000000--- fixes: - | Leverage the coordination lock to the resource processing and notification thread functions to minimize the lock granularity. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/Neutron-status-upgrade-check-framework-fc34d03c8829672c.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Neutron-status-upgrade-check-framework-fc34d03c8829672c0000644000175000017500000000120500000000000032517 0ustar00coreycorey00000000000000--- prelude: > Add new tool ``neutron-status upgrade check``. features: - | New framework for ``neutron-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Neutron upgrade to ensure if the upgrade can be performed safely. Stadium and 3rd party projects can register their own checks to this new neutron-status CLI tool using entrypoints in ``neutron.status.upgrade.checks`` namespace. upgrade: - | Operator can now use new CLI tool ``neutron-status upgrade check`` to check if Neutron deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/QoS-for-linuxbridge-agent-bdb13515aac4e555.yaml0000644000175000017500000000115700000000000031031 0ustar00coreycorey00000000000000--- prelude: > The LinuxBridge agent now supports QoS bandwidth limiting. features: - The LinuxBridge agent can now configure basic bandwidth limiting QoS rules set for ports and networks. It introduces two new config options for LinuxBridge agent. First is 'kernel_hz' option which is value of host kernel HZ setting. It is necessary for proper calculation of minimum burst value in tbf qdisc setting. Second is 'tbf_latency' which is value of latency to be configured in tc-tbf setting. Details about this option can be found in `tc-tbf manual `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/QoS-ingress-bandwidth-limit-54cea12dbea71172.yaml0000644000175000017500000000035600000000000031364 0ustar00coreycorey00000000000000--- features: - The QoS service plugin now supports new attribute in ``qos_bandwidth_limit_rule``. This new parameter is called ``direction`` and allows to specify direction of traffic for which the limit should be applied. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/Remove-neutron-lbaas-5cbedd7e8033610f.yaml0000644000175000017500000000032700000000000030152 0ustar00coreycorey00000000000000--- deprecations: - | Neutron LBaaS has now been retired. References to neutron-lbaas have been removed from neutron. For more information see https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/accepted_egress_direct-cc23873e213c6919.yaml0000644000175000017500000000230400000000000030402 0ustar00coreycorey00000000000000--- fixes: - | Bug https://bugs.launchpad.net/neutron/+bug/1732067 described a flooding issue on the neutron-ovs-agent integration bridge. And bug https://bugs.launchpad.net/neutron/+bug/1841622 proposed a solution for it. The accepted egress packets will be taken care in the final egress tables (61 when openflow firewall is not enabled, table 94 otherwise) with direct output flows for unicast traffic with a minimum influence on the existing cloud networking. A new config option ``explicitly_egress_direct``, with default value False, was added for the aim of distinguishing clouds which are running the network node mixed with compute services, upstream neutron CI should be an example. In such situation, this ``explicitly_egress_direct`` should be set to False, because there are numerous cases from HA routers which can not be covered, particularly when you have centralized floating IPs running in such mixed hosts. Otherwise, set ``explicitly_egress_direct`` to True to avoid the flooding. One more note is if your network nodes are for networing services only, we recommand you disable all the security_group to get a higher performance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/access_as_external_rbac-455dc74b9fa22761.yaml0000644000175000017500000000156700000000000030637 0ustar00coreycorey00000000000000--- prelude: > External networks can now be controlled using the RBAC framework that was added in Liberty. This allows networks to be made available to specific tenants (as opposed to all tenants) to be used as an external gateway for routers and floating IPs. features: - External networks can now be controlled using the RBAC framework that was added in Liberty. This allows networks to be made available to specific tenants (as opposed to all tenants) to be used as an external gateway for routers and floating IPs. By default this feature will also allow regular tenants to make their networks available as external networks to other individual tenants (or even themselves), but they are prevented from using the wildcard to share to all tenants. This behavior can be adjusted via policy.json by the operator if desired. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-address-scope-rbac-a903ff28f6457606.yaml0000644000175000017500000000021700000000000030127 0ustar00coreycorey00000000000000--- features: - | Address scope is now supported via the network RBAC mechanism. Please refer to the admin guide for further details.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-availability-zone-4440cf00be7c54ba.yaml0000644000175000017500000000121100000000000030273 0ustar00coreycorey00000000000000--- prelude: > DHCP and L3 Agent scheduling is availability zone aware. features: - A DHCP agent is assigned to an availability zone; the network will be hosted by the DHCP agent with availability zone specified by the user. - An L3 agent is assigned to an availability zone; the router will be hosted by the L3 agent with availability zone specified by the user. This supports the use of availability zones with HA routers. DVR isn't supported now because L3HA and DVR integration isn't finished. other: - Please read the `OpenStack Networking Guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-conntrack-workers-89d303e9ec3b4963.yaml0000644000175000017500000000075000000000000030225 0ustar00coreycorey00000000000000--- prelude: > In order to reduce the time spent processing security group updates in the L2 agent, conntrack deletion is now performed in a set of worker threads instead of the main agent thread, so it can return to processing other events quickly. upgrade: - | On an upgrade, conntrack entries will now be cleaned-up in a worker thread, instead of in the calling thread. fixes: - | Fixes bug `1745468 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-custom-kill-scripts-af405ba49142d59c.yaml0000644000175000017500000000042700000000000030542 0ustar00coreycorey00000000000000--- features: - | Added support for custom scripts used to kill external processes managed by neutron agents, such as ``dnsmasq`` or ``keepalived``. Such custom scripts, if defined, will be used instead default ``kill`` command to kill such external processes. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-description-field-in-port-forwarding-5db3b3f407c7eef4.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-description-field-in-port-forwarding-5db3b3f407c7ee0000644000175000017500000000013300000000000032711 0ustar00coreycorey00000000000000--- features: - | Add a new field ``description`` to the ``PortForwarding`` resource.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-designate-driver-ssl-options-169c299c96f2aff0.yaml0000644000175000017500000000130400000000000032353 0ustar00coreycorey00000000000000--- prelude: > Add options to designate external dns driver of neutron for SSL based connections. This makes it possible to use neutron with designate in scenario where endpoints are SSL based. Users can specify to skip cert validation or specify path to a valid cert in [designate] section of neutron.conf file. features: - Two new options are added to `[designate]` section to support SSL. - First option `insecure` allows to skip SSL validation when creating a keystone session to initate a designate client. Default value is False, which means to always verify connection. - Second option `ca_cert` allows setting path to a valid cert file. Default is None. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-dhcp_release6-ff1b8d62fd7fe76d.yaml0000644000175000017500000000165300000000000027564 0ustar00coreycorey00000000000000--- prelude: > - Call dhcp_release6 command line utility when releasing unused IPv6 leases for DHCPv6 stateful subnets. dhcp_release6 first appeared in dnsmasq 2.76 upgrade: - A version of dnsmasq that includes dhcp_release6 should be installed on systems running the DHCP agent. Failure to do this could cause DHCPv6 stateful addressing to not function properly. - The rootwrap filters file dhcp.filters must be updated to include dhcp_release6, otherwise trying to run the utility will result in a NoFilterMatched exception. issues: - Absence of dhcp_release6 when DHCPv6 stateful addressing is in use may lead to bug `1521666 `_. Neutron supports dhcp_release6 now, but if the tool is not available this leads to increased log warnings. Read bug report `1622002 `_ for more details.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-dns-domain-to-ports-f71359d75909a2d5.yaml0000644000175000017500000000034300000000000030306 0ustar00coreycorey00000000000000--- features: - Ports have now a ``dns_domain`` attribute. A port's ``dns_domain`` attribute has precedence over the network's ``dns_domain`` from the point of view of publishing it to the external DNS service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-dscp-for-tunneling-03e28fe7c2f34e86.yaml0000644000175000017500000000137000000000000030343 0ustar00coreycorey00000000000000--- features: - The DSCP value for outer headers in openvswitch overlay tunnel ports can now be set through a configuration option ``dscp`` for both OVS and linuxbridge agents. - DSCP can also be inherited from the inner header through a new boolean configuration option ``dscp_inherit`` for both openvswitch and linuxbridge. If this option is set to true, then the value of ``dscp`` will be ignored. deprecations: - the ``tos`` configuration option in vxlan group for linuxbridge is deprecated and replaced with the more precise option ``dscp``. The TOS value is made of DSCP and ECN bits. It is not possible to set the ECN value through the TOS value, and ECN is always inherited from the inner in case of tunneling.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-enable-dvr-knob-636268f775bb4569.yaml0000644000175000017500000000073200000000000027367 0ustar00coreycorey00000000000000--- features: - | Allow to configure ``router`` service plugin without ``dvr`` API extension loaded and exposed. To achieve that, set the new ``enable_dvr`` option to ``False`` in ``neutron.conf`` file. upgrade: - | Consider setting ``enable_dvr`` to ``False`` in ``neutron.conf`` file if your setup doesn't support DVR. This will make Neutron stop advertising support for the ``dvr`` API extension via its ``/v2.0/extensions`` API endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-floatingip-pool-api-6927362ef87fdbe5.yaml0000644000175000017500000000055100000000000030511 0ustar00coreycorey00000000000000--- features: - | Add support for listing floating ip pools (subnets) in L3 plugin. A new API resource ``floatingip-pools`` is introduced. This API endpoint can return a list of floating ip pools which are essentially mappings between network UUIDs and subnet CIDRs. Users can use this API to find out the pool to create the floating IPs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-get-me-a-network-56321aeef5389001.yaml0000644000175000017500000000054500000000000027536 0ustar00coreycorey00000000000000--- prelude: > The "get-me-a-network" feature simplifies the process for launching an instance with basic network connectivity (via an externally connected private tenant network). features: - Once Nova takes advantage of the "get-me-a-network" feature, a user can launch an instance without explicitly provisioning network resources. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-igmp_snooping_enable-config-option-6a0e15e4ed0a2cf7.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-igmp_snooping_enable-config-option-6a0e15e4ed0a2cf70000644000175000017500000000042000000000000032724 0ustar00coreycorey00000000000000--- features: - | Add new configuration option ``igmp_snooping_enable``. New option is in ``OVS`` config section and is used by openvswitch agent. This option is used to enable support for Internet Group Management Protocol (IGMP) in integration bridge. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-integration-with-external-dns-f56ec8a4993b1fc4.yaml0000644000175000017500000000125400000000000032606 0ustar00coreycorey00000000000000--- prelude: > Support integration with external DNS service. features: - Floating IPs can have dns_name and dns_domain attributes associated with them - Ports can have a dns_name attribute associated with them. The network where a port is created can have a dns_domain associated with it - Floating IPs and ports will be published in an external DNS service if they have dns_name and dns_domain attributes associated with them. - The reference driver integrates neutron with designate - Drivers for other DNSaaS can be implemented - Driver is configured in the default section of neutron.conf using parameter 'external_dns_driver' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-ip-protocols-in-sg-60467a073e771aee.yaml0000644000175000017500000000040100000000000030174 0ustar00coreycorey00000000000000--- prelude: > Add popular IP protocols to the security group code. End-users can specify protocol names instead of protocol numbers in both RESTful API and python-neutronclient CLI. upgrade: - Add popular IP protocols to security group code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-keepalived-vrrp-healt-check-f23ed7c853151484.yaml0000644000175000017500000000103500000000000031725 0ustar00coreycorey00000000000000--- features: - Keepalived VRRP health check functionality to enable verification of connectivity from the "master" router to all gateways. Activation of this feature enables gateway connectivity validation and rescheduling of the "master" router to another node when connectivity is lost. If all routers lose connectivity to the gateways, the election process will be repeated round-robin until one of the routers restores its gateway connection. In the mean time, all of the routers will be reported as "master". ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-log-for-keepalived-state-change-e6d0c4f663776233.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-log-for-keepalived-state-change-e6d0c4f663776233.ya0000644000175000017500000000012200000000000032141 0ustar00coreycorey00000000000000--- other: - | Add log file for ``neutron-keepalived-state-change`` daemon. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-minimum-bandwidth-support-sriov-63664b89f4dd1c1b.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-minimum-bandwidth-support-sriov-63664b89f4dd1c1b.ya0000644000175000017500000000052600000000000032561 0ustar00coreycorey00000000000000--- features: - SR-IOV now supports egress minimum bandwidth configuration. other: - In order to use QoS egress minimum bandwidth limit feature, 'ip-link' must support the extended VF management parameter ``min_tx_rate``. Minimum version of ``ip-link`` supporting this parameter is ``iproute2-ss140804``, git tag ``v3.16.0``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-multiple-port-bindings-f16eb47ebdddff2d.yaml0000644000175000017500000000170500000000000031612 0ustar00coreycorey00000000000000--- prelude: > Support multiple bindings for compute owned ports. features: - | In order to better support instance migration, multiple port bindings can be associated to compute owned ports. * Create, update, list, show and activate operations are supported for port bindings by the ReST API. * A compute owned port can have one active binding and many inactive bindings. * There can be only one binding (active or inactive) per compute host. * When the ``activate`` operation is executed, a previously inactive binding is made active. The previously active binding becomes inactive. * As a consequence of the multiple port bindings implementation, the ``port_binding`` relationship in the SQLAlchemy ``Port`` object has been renamed ``port_bindings``. Similarly, the ``binding`` attribute of the ``Port`` OVO has been renamed ``bindings``. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-net-mtu-writable-api-extension-f7038f85f3494a74.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-net-mtu-writable-api-extension-f7038f85f3494a74.yam0000644000175000017500000000143000000000000032277 0ustar00coreycorey00000000000000--- features: - | The new ``net-mtu-writable`` extension API definition has been added. The new extension indicates that the network ``mtu`` attribute is writeable. Plugins supporting the new extension are expected to also support ``net-mtu``. The first plugin that gets support for the new extension is ``ml2``. other: - | Changing MTU configuration options (``global_physnet_mtu``, ``physical_network_mtus``, and ``path_mtu``) and restarting ``neutron-server`` no longer affects existing networks' MTUs. Nevertheless, new networks will use new option values for MTU calculation. To reflect configuration changes for existing networks, one may use the new ``net-mtu-writable`` API extension to update ``mtu`` attribute for those networks. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-network-segment-range-extension-0893a67cbf3f10fe.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-network-segment-range-extension-0893a67cbf3f10fe.ya0000644000175000017500000000243300000000000032603 0ustar00coreycorey00000000000000--- prelude: > Added support for network segment range management. This introduces the ability for administrators to control the segment ranges globally or on a per-tenant basis via the Neutron API. features: - | Before Stein, network segment ranges were configured as an entry in ML2 config file ``/etc/neutron/plugins/ml2/ml2_conf.ini`` that was statically defined for tenant network allocation and therefore had to be managed as part of the host deployment and management. The new ``network-segment-range`` API extension has been introduced, which exposes the network segment ranges to be administered via API. This allows users with admin privileges to be able to dynamically manage the shared and/or tenant specific network segment ranges. Standard attributes with tagging support are introduced to the new resource. The feature is controlled by the newly-added service plugin ``network_segment_range``. A set of ``default`` network segment ranges will be created out of the ranges that are defined in the host ML2 config file ``/etc/neutron/plugins/ml2/ml2_conf.ini``, such as ``network_vlan_ranges``, ``vni_ranges`` for ml2_type_vxlan, ``tunnel_id_ranges`` for ml2_type_gre and ``vni_ranges`` for ml2_type_geneve. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-new-harouter-state-5612fc5b5c2043a5.yaml0000644000175000017500000000054300000000000030260 0ustar00coreycorey00000000000000--- features: - Added new ``unknown`` state for HA routers. Sometimes l3 agents may not be able to update health status to Neutron server due to communication issues. During that time the server may not know whether HA routers hosted by that agent are active or standby. fixes: - Fixes bug `1682145 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-osprofiler-support-7fc2de3001187075.yaml0000644000175000017500000000360000000000000030343 0ustar00coreycorey00000000000000--- fixes: - Missing OSprofiler support was added. This cross-project profiling library allows to trace various OpenStack requests through all OpenStack services that support it. To initiate OpenStack request tracing `--profile ` option needs to be added to the CLI command. This key needs to present one of the secret keys defined in neutron.conf configuration file with `hmac_keys` option under the `[profiler]` configuration section. To enable or disable Neutron profiling the appropriate `enabled` option under the same section needs to be set either to `True` or `False`. By default Neutron will trace all API and RPC requests, but there is an opportunity to trace DB requests as well. For this purpose `trace_sqlalchemy` option needs to be set to `True`. As a prerequisite OSprofiler library and its storage backend needs to be installed to the environment. If so (and if profiling is enabled in neutron.conf) the trace can be generated via command - `$ neutron --profile SECRET_KEY `. At the end of output there will be message with , and to plot nice HTML graphs the following command should be used - `$ osprofiler trace show --html --out result.html` upgrade: - OSprofiler support was introduced. To allow its usage the api-paste.ini file needs to be modified to contain osprofiler middleware. Also `[profiler]` section needs to be added to the neutron.conf file with `enabled`, `hmac_keys` and `trace_sqlalchemy` flags defined. security: - OSprofiler support requires passing of trace information between various OpenStack services. This information is securely signed by one of HMAC keys, defined in neutron.conf configuration file. To allow cross-project tracing user should use the key, that is common among all OpenStack services he or she wants to trace. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-placement-api-configuration-options-f1611d0909bf6166.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-placement-api-configuration-options-f1611d0909bf6160000644000175000017500000000122400000000000032500 0ustar00coreycorey00000000000000--- features: - Add a new configuration section, ``[placement]``, with two new options that allow to make ``segments`` plugin to use the ``Compute`` placement ReST API. This API allows to influence node placement of instances based on availability of IPv4 addresses in routed networks. The first option, `region_name`, indicates the placement region to use. This option is useful if keystone manages more than one region. The second option, `endpoint_type`, determines the type of a placement endpoint to use. This endpoint will be looked up in the keystone catalog and should be one of ``public``, ``internal`` or ``admin``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-port-data-plane-status-12726c964210b374.yaml0000644000175000017500000000140600000000000030620 0ustar00coreycorey00000000000000--- features: - Add ``data_plane_status`` attribute to port resources to represent the status of the underlying data plane. This attribute is to be managed by entities outside of the Networking service, while the ``status`` attribute is managed by the Networking service. Both status attributes are independent from one another. Third parties can report via Neutron API issues in the underlying data plane affecting connectivity from/to Neutron ports. Attribute can take values ``None`` (default), ``ACTIVE`` or ``DOWN``, and is readable by users and writable by admins and users granted the ``data-plane-integrator`` role. Append ``data_plane_status`` to ``[ml2] extension_drivers`` config option to load the extension driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-port-forwarding-fip-qos-f808d6b4826fb849.yaml0000644000175000017500000000035100000000000031255 0ustar00coreycorey00000000000000--- features: - | L3 agent supports QoS bandwidth limit functionality for port forwarding floating IPs now. If floating IP has binding QoS policy (with bandwidth limit rules), the traffic bandwidth will be limited. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-port-ip-allocation-attr-294a580641998240.yaml0000644000175000017500000000165500000000000030737 0ustar00coreycorey00000000000000--- prelude: > Add ip_allocation attribute to port resources features: - The port resource now has an ip_allocation attribute. The value of this attribute will be set to 'immediate', 'deferred', or 'none' at the time the port is created. It will not be changed when the port is updated. 'immediate' means that the port is expected to have an IP address and Neutron attempted IP allocation on port creation. 'deferred' means that the port is expected to have an IP address but Neutron deferred IP allocation until a port update provides the host to which the port will be bound. 'none' means that the port was created explicitly with no addresses by passing [] in fixed_ips when creating it. upgrade: - All existing ports are considered to have 'immediate' IP allocation. Any ports that do not have this attribute should also be considered to have immediate IP allocation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-port-rebinding-chance-33178b9abacf5804.yaml0000644000175000017500000000030200000000000030747 0ustar00coreycorey00000000000000--- prelude: > ML2: ports can now recover from binding failed state. features: - Ports that failed to bind when an L2 agent was offline can now recover after the agent is back online. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-port_details-to-floatingip-fefceab2c740e482.yaml0000644000175000017500000000022500000000000032274 0ustar00coreycorey00000000000000--- features: - | Add attribute ``port_details`` to floating IP. The value of this attribute contains information of the associated port. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-propagate_uplink_status-to-port-f4e53395e86eb3cb.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-propagate_uplink_status-to-port-f4e53395e86eb3cb.ya0000644000175000017500000000106100000000000032723 0ustar00coreycorey00000000000000--- features: - | Introduce the attribute ``propagate_uplink_status`` to ports. Right now, the SRIOV mechanism driver leverages this attribute to decide if the VF link should follow the state of the PF. For example, if the PF is down, the VF link state is automatically set to down as well. Operators can turn on this feature via the configuration option:: [ml2] extension_drivers = uplink_status_propagation The API extension ``uplink_status_propagation`` is introduced to indicate if this feature is turned on. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-rbac-qos-8b1154ee756c66df.yaml0000644000175000017500000000021000000000000026325 0ustar00coreycorey00000000000000--- prelude: > RBAC support for QoS policies features: - Neutron now supports sharing of QoS policies between a subset of tenants. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-rpc_response_max_timeout-option-cfaad5ef6af31632.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-rpc_response_max_timeout-option-cfaad5ef6af31632.ya0000644000175000017500000000020400000000000033114 0ustar00coreycorey00000000000000--- features: - Add config option ``rpc_response_max_timeout`` to configure the maximum time waiting for an RPC response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-security-groups-rbac-6f133ec4d40e7641.yaml0000644000175000017500000000021700000000000030622 0ustar00coreycorey00000000000000features: - | Security groups are now supported via the network RBAC mechanism. Please refer to the admin guide for further details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-smartnic-support-53d25564bab0cbc5.yaml0000644000175000017500000000032700000000000030213 0ustar00coreycorey00000000000000--- features: - | Add Support for Smart NIC in ML2/OVS mechanism driver, by extending the Neutron OVS mechanism driver and Neutron OVS Agent to bind the Neutron port for the baremetal host with Smart NIC. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-sort-keys-check-for-get-sorts-b9e3e86ddcb3bc3a.yaml0000644000175000017500000000030000000000000032627 0ustar00coreycorey00000000000000--- fixes: - | Add sort-keys validation logic to method ``get_sorts`` in ``neutron.api.api_common``. See the link below for more: https://bugs.launchpad.net/neutron/+bug/1659175 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-standard-attr-descriptions-1ba0d7a454c3fd8f.yaml0000644000175000017500000000051000000000000032216 0ustar00coreycorey00000000000000--- prelude: > Add description field to security group rules, networks, ports, routers, floating IPs, and subnet pools. features: - Security group rules, networks, ports, routers, floating IPs, and subnet pools may now contain an optional description which allows users to easily store details about entities. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-standard-attributes-to-segment-d39c4b89988aa701.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-standard-attributes-to-segment-d39c4b89988aa701.yam0000644000175000017500000000027200000000000032441 0ustar00coreycorey00000000000000--- fixes: - | Fix an issue that standard attributes, such as ``created_at``, ``updated_at`` and ``revision_number``, are not rendered in the response of segment resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml0000644000175000017500000000120000000000000031012 0ustar00coreycorey00000000000000--- features: - Subnets now have a new property 'service_types'. This is a list of port device owners, such that only ports with a matching device owner will be given an IP from this subnet. If no matching service subnet exists for the given device owner, or no service subnets have been defined on the network, the port will be assigned an IP from a subnet with no service-types. This preserves backwards compatibility with older deployments. upgrade: - A new table 'subnet_service_types' has been added to cater for this feature. It uses the ID field from the 'subnets' table as a foreign key. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-subnetpool-rbac-2eb2008bd1b27b11.yaml0000644000175000017500000000021500000000000027661 0ustar00coreycorey00000000000000--- features: - | Subnetpool is now supported via the network RBAC mechanism. Please refer to the admin guide for further details. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/add-tag-all-standardattr-resources-6f757cb39cc1dcfe.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-tag-all-standardattr-resources-6f757cb39cc1dcfe.yam0000644000175000017500000000071100000000000032711 0ustar00coreycorey00000000000000--- features: - The resource tag mechanism is refactored so that the tag support for new resources can be supported easily. The resources with tag support are network, subnet, port, subnetpool, trunk, floatingip, policy, security_group, and router. deprecations: - Users can use 'tagging' extension instead of the 'tag' extension and 'tag-ext' extension. Those extensions are now deprecated and will be removed in the Queens release.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-tags-to-core-resources-b05330a129900609.yaml0000644000175000017500000000035700000000000030616 0ustar00coreycorey00000000000000--- prelude: > Add tag mechanism for network resources features: - Users can set tags on their network resources. - Networks can be filtered by tags. The supported filters are 'tags', 'tags-any', 'not-tags' and 'not-tags-any'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml0000644000175000017500000000061300000000000030172 0ustar00coreycorey00000000000000--- prelude: > Timestamp fields have been added to neutron core resources. features: - Add timestamp fields ``created_at``, ``updated_at`` into neutron core resources for example networks, subnets, ports and subnetpools. - These resources can now be queried by ``changed-since``, which returns the resources changed after a specific time string like ``YYYY-MM-DDTHH:MM:SS``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add-wsgi-script-support-e611fa5b5c2043a5.yaml0000644000175000017500000000021500000000000030560 0ustar00coreycorey00000000000000--- features: - Neutron API can now be managed by a ``mod_wsgi`` compatible web server (e.g. ``apache2`` (``httpd``), ``nginx``, etc.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add_dhcp_dnsmasq_t1t2_options-3cef427d8109c165.yaml0000644000175000017500000000105700000000000031714 0ustar00coreycorey00000000000000--- features: - | Allow configuration of DHCP renewal (T1) and rebinding (T2) timers in ``neutron-dhcp-agent``. By allowing these timers to be set (options 58 and 59 as per RFC2132) in ``dnsmasq`` it allows users to change other parameters, like MTU, on instances without having to wait for the lease time to expire. The advantage of changing T1 over the lease time is that if the DHCP server becomes unreachable within the lease time, instances will not drop their IP addresses and it will not cause a dataplane disruption. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/add_is_default_to_qos_policies-f7c6bbac08d474d5.yaml0000644000175000017500000000026500000000000032434 0ustar00coreycorey00000000000000--- features: - Add 'default' behaviour to QoS policies Neutron now supports having a default QoS policy in a project, assigned automatically to all new networks created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/advanced_image-8abff2ca91de7f6c.yaml0000644000175000017500000000075100000000000027305 0ustar00coreycorey00000000000000--- features: - | Some scenario tests require advanced ``Glance`` images (for example, ``Ubuntu`` or ``CentOS``) in order to pass. They are now skipped by default. If you need to execute those tests, please configure ``tempest.conf`` to use an advanced image, and set ``image_is_advanced`` in ``neutron_plugin_options`` section of ``tempest.conf`` file to ``True``. The first scenario test case that requires the new option set to execute is ``test_trunk``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/advertise_mtu_by_default-d8b0b056a74517b8.yaml0000644000175000017500000000140300000000000031052 0ustar00coreycorey00000000000000--- features: - By default, the DHCP agent provides a network MTU value to instances using the corresponding DHCP option if core plugin calculates the value. For ML2 plugin, calculation mechanism is enabled by setting [ml2] path_mtu option to a value greater than zero. upgrade: - To disable, use [DEFAULT] advertise_mtu = False. other: - For overlay networks managed by ML2 core plugin, the calculation algorithm subtracts the overlay protocol overhead from the value of [ml2] path_mtu. The DHCP agent provides the resulting (smaller) MTU to instances using overlay networks. - The [DEFAULT] advertise_mtu option must contain a consistent value on all hosts running the DHCP agent. - Typical networks can use [ml2] path_mtu = 1500. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fd0000644000175000017500000000103300000000000033330 0ustar00coreycorey00000000000000--- fixes: - Prior to Mitaka, the settings that control the frequency of router advertisements transmitted by the radvd daemon were not able to be adjusted. Larger deployments may wish to decrease the frequency in which radvd sends multicast traffic. The 'min_rtr_adv_interval' and 'max_rtr_adv_interval' settings in the L3 agent configuration file map directly to the 'MinRtrAdvInterval' and 'MaxRtrAdvInterval' in the generated radvd.conf file. Consult the manpage for radvd.conf for more detailed information. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/agent-notification-signature-status-6a9b9dbce9cb9740.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/agent-notification-signature-status-6a9b9dbce9cb9740.ya0000644000175000017500000000114400000000000033011 0ustar00coreycorey00000000000000--- deprecations: - | The signature of notifications for resource ``agent`` for events ``after_create`` and ``after_update`` was extended. A new keyword argument was added: ``status``. This is to make the same status information available to notification consumers as it was available already where the notification is sent in class ``AgentDbMixin``. Valid status values are defined in ``neutron_lib.agent.constants``. Consuming notifications by the old signature is deprecated. Unless processing arguments as ``**kwargs``, out-of-tree notification consumers need to adapt. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541f0000644000175000017500000000011300000000000033112 0ustar00coreycorey00000000000000--- features: - Allow non-admin users to define "external" extra-routes. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/allow-update-subnet-segment-id-association-1fb02ace27e85bb8.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/allow-update-subnet-segment-id-association-1fb02ace27e80000644000175000017500000000061300000000000033036 0ustar00coreycorey00000000000000--- features: - | Add support for setting the ``segment_id`` for an existing subnet. This enables users to convert a non-routed network with no subnet/segment association to a routed one. It is only possible to do this migration if both of the following conditions are met - the current ``segment_id`` is ``None`` and the network contains a single segment and subnet. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/allow_port_create_update_shared_owners-2a57b1c72d91ace2.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/allow_port_create_update_shared_owners-2a57b1c72d91ace20000644000175000017500000000036000000000000033164 0ustar00coreycorey00000000000000--- features: - | Tenants who can access shared networks, can now create/update ports on a specified subnet instead of the default subnet. This is now the default behavior and can be changed by modifying policy.json file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bandwidth-config-ovs-5bede7fb43b0a574.yaml0000644000175000017500000000105600000000000030241 0ustar00coreycorey00000000000000--- features: - | New configuration options for neutron-ovs-agent under section ``[ovs]``: ``resource_provider_bandwidths`` and ``resource_provider_inventory_defaults``. The former controls the ``total`` (available bandwidth) field of the physical network interface resource provider inventories. It defaults to not creating resource providers in Placement. The latter can be used to tune the other fields (``allocation_ratio``, ``min_unit``, ``max_unit``, ``reserved``, ``step_size``) of resource provider inventories. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bandwidth-config-sriov-bd8ff8b4d84c8792.yaml0000644000175000017500000000106600000000000030544 0ustar00coreycorey00000000000000--- features: - | New configuration options for neutron-sriov-agent under section ``[sriov_nic]``: ``resource_provider_bandwidths`` and ``resource_provider_inventory_defaults``. The former controls the ``total`` (available bandwidth) field of the physical network interface resource provider inventories. It defaults to not creating resource providers in Placement. The latter can be used to tune the other fields (``allocation_ratio``, ``min_unit``, ``max_unit``, ``reserved``, ``step_size``) of resource provider inventories. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bgp-support-ef361825ca63f28b.yaml0000644000175000017500000000231000000000000026346 0ustar00coreycorey00000000000000--- prelude: > Announcement of tenant prefixes and host routes for floating IP's via BGP is supported features: - Announcement of tenant subnets via BGP using centralized Neutron router gateway port as the next-hop - Announcement of floating IP host routes via BGP using the centralized Neutron router gateway port as the next-hop - Announcement of floating IP host routes via BGP using the floating IP agent gateway as the next-hop when the floating IP is associated through a distributed router issues: - When using DVR, if a floating IP is associated to a fixed IP direct access to the fixed IP is not possible when traffic is sent from outside of a Neutron tenant network (north-south traffic). Traffic sent between tenant networks (east-west traffic) is not affected. When using a distributed router, the floating IP will mask the fixed IP making it inaccessible, even though the tenant subnet is being announced as accessible through the centralized SNAT router. In such a case, traffic sent to the instance should be directed to the floating IP. This is a limitation of the Neutron L3 agent when using DVR and will be addressed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bug-1311040-dhcp-no-dns-09291c23e2ce800a.yaml0000644000175000017500000000132700000000000027471 0ustar00coreycorey00000000000000--- prelude: > DNS server assignment can now be disabled in replies sent from the DHCP agent. features: - | It is now possible to instruct the DHCP agent not to supply any DNS server address to their clients by setting the ``dns_nameservers`` attribute for the corresponding subnet to ``0.0.0.0`` or ``::``, for IPv4 or IPv6 subnets (respectively). upgrade: - | The functionality when a subnet has its DNS server set to ``0.0.0.0`` or ``::`` has been changed with this release. The old behaviour was that each DHCP agent would supply only its own IP address as the DNS server to its clients. The new behaviour is that the DHCP agent will not supply any DNS server IP address at all. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bug-1811166-314d4b89de1cc0f1.yaml0000644000175000017500000000062600000000000025545 0ustar00coreycorey00000000000000--- fixes: - | [`bug 1811166 `_] Changes the API behavior to enforce that a router's administrative state must be down (``router.admin_state_up==False`` ) before modifying its distributed attribute. If the router ``admin_state_up==True`` when trying to change the ``distributed`` attribute, a BadRequest exception will be thrown. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/bug-1843428-mac-addres-case-insensitivity-750299c11b49a9a8.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bug-1843428-mac-addres-case-insensitivity-750299c11b49a0000644000175000017500000000057000000000000031711 0ustar00coreycorey00000000000000--- fixes: - | When listing ports using the ``openstack port list --mac-address A:B:C:D:E:F`` command we might not return any result when trying to list ports by MAC address if the cases differ. This fix makes the search based on MAC address case insensitive. For more information see `bug 1843428 `_.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/bump-default-quotas-810570badb378c50.yaml0000644000175000017500000000041300000000000027672 0ustar00coreycorey00000000000000--- upgrade: - | Default quotas were bumped for the following resources: networks (from 10 to 100), subnets (from 10 to 100), ports (from 50 to 500). If you want to stick to old values, consider explicitly setting them in the ``neutron.conf`` file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/change-of-default-timeout-b09d11683526e27d.yaml0000644000175000017500000000043600000000000030666 0ustar00coreycorey00000000000000--- other: - | In order to improve heavy load ovs agent restart success rate, instead a retry or fullsync, the native driver ``of_connect_timeout`` and ``of_request_timeout`` are now set to 300s. The value does not have side effect for the regular pressure ovs agent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/change-oslo-db-defaults-f94df09c30767f95.yaml0000644000175000017500000000105000000000000030414 0ustar00coreycorey00000000000000--- upgrade: - | Previously, ``neutron-server`` was using configuration values for ``oslo.db`` that were different from library defaults. Specifically, it used the following values when they were not overridden in configuration files: ``max_pool_size`` = 10, ``max_overflow`` = 20, ``pool_timeout`` = 10. In this release, ``neutron-server`` instead relies on default values defined by the library itself. If you rely on old default values, you may need to adjust your configuration files to explicitly set the new values. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/change-port-forwarding-uniq-constraint-78ba3db20bce5fd2.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/change-port-forwarding-uniq-constraint-78ba3db20bce5fd20000644000175000017500000000102000000000000033030 0ustar00coreycorey00000000000000--- upgrade: - | Adds Floating IP port forwarding table column ``protocol`` to the uniq constraints. In one expand script, we drop the original uniq constraints first, then create the new uniq constraints with column ``protocol``. fixes: - | Floating IP port forwardings with different protocols could not have the same internal or external port number to the same VM port. After this fix we will allow creating port forwardings with same internal or external port number in different protocols. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/change-segmentation-id-ovs-a201e0ac1c4d4fb6.yaml0000644000175000017500000000057100000000000031322 0ustar00coreycorey00000000000000--- features: - | The segmentation ID of a provider network can be now modified, even with OVS ports bound. Note that, during this process, the traffic of the bound ports tagged with the former segmentation ID (external VLAN) will be mapped to the new one. This can provoke a traffic disruption while the external network VLAN is migrated to the new tag. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb700000644000175000017500000000162600000000000033041 0ustar00coreycorey00000000000000--- prelude: > The default value for 'external_network_bridge' in the L3 agent is now ''. upgrade: - The default value for 'external_network_bridge' has been changed to '' since that is the preferred way to configure the L3 agent and will be the only way in future releases. If you have not explicitly set this value and you use the L3 agent, you will need to set this value to 'br-ex' to match the old default. If you are using 'br-ex', you should switch to '', ensure your external network has a flat segment and ensure your L2 agent has a bridge_mapping entry between the external network's flat segment physnet and 'br-ex' to get the same connectivity. If the external network did not already have the flat segment, you will need to detach all routers from the external networks, delete the incorrect segment type, add the flat segment, and re-attach the routers. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.0000644000175000017500000000045500000000000032403 0ustar00coreycorey00000000000000--- prelude: > Allowed address pairs can now be cleared by passing None in addition to an empty list. This is to make it possible to use the --action=clear option with the neutron client. neutron port-update --allowed-address-pairs action=clear fixes: - Fixes bug 1537734 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/common-agent-extension-api-3fd06ff67329200a.yaml0000644000175000017500000000075600000000000031157 0ustar00coreycorey00000000000000--- features: - | L2 agents based on ``ML2`` ``_common_agent`` have now the L2 extension API available. This API can be used by L2 extension drivers to request resources from the L2 agent. It is used, for example, to pass an instance of the ``IptablesManager`` to the ``Linuxbridge`` L2 agent ``QoS extension driver``. fixes: - | Fixes bug 1736674, security group rules are now properly applied by ``Linuxbridge L2 agent`` with ``QoS extension driver`` enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/conditional_updates-10b9aa66fd144217.yaml0000644000175000017500000000071200000000000030024 0ustar00coreycorey00000000000000--- features: - | The Neutron API now supports conditional updates to resources with the 'revision_number' attribute by setting the desired revision number in an HTTP If-Match header. This allows clients to ensure that a resource hasn't been modified since it was retrieved by the client. Support for conditional updates on the server can be checked for by looking for the 'revision-if-match' extension in the supported extensions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml0000644000175000017500000000042400000000000030404 0ustar00coreycorey00000000000000--- prelude: > Core configuration files are automatically generated. features: - Neutron no longer includes static example configuration files. Instead, use tools/generate_config_file_samples.sh to generate them. The files are generated with a .sample extension. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/config-host_dvr_for_dhcp-f949aca5bd666e24.yaml0000644000175000017500000000027400000000000031120 0ustar00coreycorey00000000000000--- other: - | A new config option, ``host_dvr_for_dhcp``, was added to neutron.conf for DVR to determine whether to host the DVR local router to the scheduled DHCP node(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml0000644000175000017500000000104400000000000030310 0ustar00coreycorey00000000000000--- prelude: > Support configuration of greenthreads pool for WSGI. other: - Operators may want to tune the ``max_overflow`` and ``wsgi_default_pool_size`` configuration options according to the investigations outlined in this `mailing list post `_. The default value of ``wsgi_default_pool_size`` inherits from that of oslo.config, which is currently 100. This is a change in default from the previous Neutron-specific value of 1000. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/coordination-df3c0bf55a0c4863.yaml0000644000175000017500000000060500000000000026631 0ustar00coreycorey00000000000000--- other: - | Add a generic coordination lock mechanism for various scenarios. This decorator allows flexible lock name with parameters and names of underlying functions. And in order to achive backward compatibility with python2.7 several functions was copied from the old version of python inspect. Once python2.7 is retired, we can drop such duplication. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.y0000644000175000017500000000016200000000000032520 0ustar00coreycorey00000000000000--- features: - Add derived attributes to the network to tell users which address scopes the network is in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml0000644000175000017500000000074200000000000030063 0ustar00coreycorey00000000000000--- security: - | The OVS Firewall blocks traffic that does not have either the IPv4 or IPv6 ethertypes at present. This is a behavior change compared to the iptables_hybrid firewall, which only operates on IP packets and thus does not address other ethertypes. There is now a configuration option in the neutron openvswitch agent configuration file for permitted ethertypes and then ensures that the requested ethertypes are permitted on initialization. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml0000644000175000017500000000230300000000000027424 0ustar00coreycorey00000000000000--- fixes: - Prior to Mitaka, name resolution in instances requires specifying DNS resolvers via the 'dnsmasq_dns_servers' option in the DHCP agent configuration file or via neutron subnet options. In this case, the data plane must provide connectivity between instances and upstream DNS resolvers. Omitting both of these methods causes the dnsmasq service to offer the IP address on which it resides to instances for name resolution. However, the static dnsmasq '--no-resolv' process argument prevents name resolution via dnsmasq, leaving instances without name resolution. Mitaka introduces the 'dnsmasq_local_resolv' option, default value False to preserve backward-compatibility, that enables the dnsmasq service to provide name resolution for instances via DNS resolvers on the host running the DHCP agent. In this case, the data plane must provide connectivity between the host and upstream DNS resolvers rather than between the instances and upstream DNS resolvers. Specifying DNS resolvers via the 'dnsmasq_dns_servers' option in the DHCP agent configuration overrides the 'dnsmasq_local_resolv' option for all subnets using the DHCP agent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml0000644000175000017500000000230100000000000032157 0ustar00coreycorey00000000000000--- features: - The subnet API now includes a new use_default_subnetpool attribute. This attribute can be specified on creating a subnet in lieu of a subnetpool_id. The two are mutually exclusive. If it is specified as True, the default subnet pool for the requested ip_version will be looked up and used. If no default exists, an error will be returned. deprecations: - The default_subnet_pools option is now deprecated and will be removed in the Newton release. The same functionality is now provided by setting is_default attribute on subnetpools to True using the API or client. fixes: - Before Mitaka, when a default subnetpool was defined in the configuration, a request to create a subnet would fall back to using it if no specific subnet pool was specified. This behavior broke the semantics of subnet create calls in this scenario and is now considered an API bug. This bug has been fixed so that there is no automatic fallback with the presence of a default subnet pool. Workflows which depended on this new behavior will have to be modified to set the new use_default_subnetpool attribute when creating a subnet. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-advertise-mtu-51e3f78475a14efc.yaml0000644000175000017500000000106700000000000030623 0ustar00coreycorey00000000000000--- deprecations: - The 'advertise_mtu' option is deprecated and will be removed in Ocata. There should be no use case to disable the feature, hence the option is considered redundant. DHCP and L3 agents will continue advertising MTU values to instances. Other plugins not using those agents are also encouraged to advertise MTU to instances. The actual implementation of MTU advertisement depends on the plugin in use, but it's assumed that at least DHCP option for IPv4 clients and Router Advertisements for IPv6 clients is supported. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe10000644000175000017500000000022700000000000032702 0ustar00coreycorey00000000000000--- deprecations: - The ``allow_sorting`` and ``allow_pagination`` configuration options are deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-force_gateway_on_subnet-376855c4e66f4e11.yaml0000644000175000017500000000017000000000000032563 0ustar00coreycorey00000000000000--- deprecations: - The 'force_gateway_on_subnet' option is deprecated and will be removed in the 'Newton' cycle. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-gateway_external_network_id-f5c4071cd06714b0.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-gateway_external_network_id-f5c4071cd06714b0.0000644000175000017500000000027300000000000032633 0ustar00coreycorey00000000000000--- deprecations: - The ``gateway_external_network_id`` L3 agent option is deprecated and will be removed in next releases, with ``external_network_bridge`` that it depends on. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-get_binding_levels-function-84012e104ac572a1.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-get_binding_levels-function-84012e104ac572a1.0000644000175000017500000000043500000000000032421 0ustar00coreycorey00000000000000--- deprecations: - | Function ``get_binding_levels`` from ``neutron.plugins.ml2.db`` module is deprecated and will be removed in the future. New function ``get_binding_levels_objs`` should be used instead. This new function returns ``PortBindingLevel`` OVO objects. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b900000644000175000017500000000053600000000000033000 0ustar00coreycorey00000000000000--- deprecations: - Neutron controller service currently allows to load ``service_providers`` options from some files that are not passed to it via --config-dir or --config-file CLI options. This behaviour is now deprecated and will be disabled in Ocata. Current users are advised to switch to aforementioned CLI options. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-ivs-interface-driver-b68e06a470c65ccb.yaml0000644000175000017500000000016300000000000032124 0ustar00coreycorey00000000000000--- deprecations: - | The ``ivs`` interface driver is deprecated in Queens and will be removed in Rocky. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yam0000644000175000017500000000027100000000000032453 0ustar00coreycorey00000000000000--- deprecations: - The option min_l3_agents_per_router is deprecated and will be removed for the Ocata release where the scheduling of new HA routers will always be allowed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-network-device-mtu-59b78264c9974808.yaml0000644000175000017500000000034300000000000031303 0ustar00coreycorey00000000000000--- deprecations: - The 'network_device_mtu' option is deprecated and will be removed in the 'Newton' cycle. Please use the system-wide segment_mtu setting which the agents will take into account when wiring VIFs.././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-neutron-rootwrap-xen-dom0-124ee3647beecc17.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-neutron-rootwrap-xen-dom0-124ee3647beecc17.ya0000644000175000017500000000026600000000000032543 0ustar00coreycorey00000000000000--- deprecations: - Now that rootwrap daemon mode is supported for XenServer, the ``neutron-rootwrap-xen-dom0`` script is deprecated and will be removed in a next release. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-of_interface-driver-option-1968f8bf6fcd1a38.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-of_interface-driver-option-1968f8bf6fcd1a38.y0000644000175000017500000000036200000000000032647 0ustar00coreycorey00000000000000--- deprecations: - The of_interface Open vSwitch agent configuration option is deprecated and will be removed in the future. After option removal, the current default driver (native) will be the only supported of_interface driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-ovs_integration_bridge-d4d1521c35f999bd.yaml0000644000175000017500000000044000000000000032552 0ustar00coreycorey00000000000000--- deprecations: - Deprecate ``ovs_integration_bridge``. This configuration option is a duplicate of ``OVS:integration_bridge``. Currently both options must be the same to avoid configuration clashes. Previously used in the DHCP agent. It will be removed in next releases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-ovsdb-interface-b7e7cc5b036e9ef9.yaml0000644000175000017500000000031200000000000031243 0ustar00coreycorey00000000000000--- deprecations: - | The ``ovsdb_interface`` configuration option is now deprecated. In future releases, the value of the option will be ignored. The ``native`` driver will then be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml0000644000175000017500000000013700000000000030174 0ustar00coreycorey00000000000000--- upgrade: - The router_id option is deprecated and will be removed in the Newton release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-send_arp_for_ha-0281853632f58e8d.yaml0000644000175000017500000000036700000000000030730 0ustar00coreycorey00000000000000--- deprecations: - The L3 agent ``send_arp_for_ha configuration`` option is deprecated and will be removed in Pike. The functionality will remain, and the agent will send three gratuitious ARPs whenever a new floating IP is configured. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.ya0000644000175000017500000000047700000000000032753 0ustar00coreycorey00000000000000--- deprecations: - The 'supported_pci_vendor_devs' option is deprecated in Newton and will be removed in Ocata. The validation of supported pci vendors is done in nova-scheduler through the pci_passthrough_whitelist option when it selects a suitable hypervisor, hence the option is considered redundant. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate_max_fixed_ips_per_port-5e80518cbf25cfd6.yaml0000644000175000017500000000115500000000000032721 0ustar00coreycorey00000000000000--- prelude: > max_fixed_ips_per_port has been deprecated and will be removed in the Newton or Ocata cycle depending on when all identified usecases of the options are satisfied via another quota system. deprecations: - max_fixed_ips_per_port has been deprecated and will be removed in the Newton or Ocata cycle depending on when all identified usecases of the options are satisfied via another quota system. If you depend on this configuration option to stop tenants from consuming IP addresses, please leave a comment on the `bug report `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate_neutron_debug-a578e0adfc9cff4c.yaml0000644000175000017500000000061100000000000031246 0ustar00coreycorey00000000000000--- deprecations: - The tool neutron-debug is now deprecated, to be replaced with a new set of troubleshooting and diagnostic tools. There is no plan for removal in the immediate term, and not until comparable tools will be adequate enough to supplant neutron-debug altogether. For more information, please see https://blueprints.launchpad.net/neutron/+spec/troubleshooting ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate_prevent_arp_spoofing_option-a09e673fc8f9fee4.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecate_prevent_arp_spoofing_option-a09e673fc8f9fee4.0000644000175000017500000000102700000000000033222 0ustar00coreycorey00000000000000--- deprecations: - The option ``[AGENT] prevent_arp_spoofing`` has been deprecated and will be removed in Ocata release. ARP spoofing protection should always be enabled unless its explicitly disabled via the port security extension via the API. The primary reason it was a config option was because it was merged at the end of Kilo development cycle so it was not considered stable. It has been enabled by default since Liberty and is considered stable and there is no reason to keep this configurable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml0000644000175000017500000000057300000000000027772 0ustar00coreycorey00000000000000--- prelude: > OFAgent is decomposed and deprecated in the Mitaka cycle. other: - The Openflow Agent(OFAgent) mechanism driver is decomposed completely from neutron tree in the Mitaka. The OFAgent driver and its agent also are deprecated in favor of OpenvSwitch mechanism driver with "native" of_interface in the Mitaka and will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/designate-driver-keystonev3-8e70d152e84388e0.yaml0000644000175000017500000000050400000000000031301 0ustar00coreycorey00000000000000--- features: - Designate driver can now use Keystone v3 authentication options. "The ``[designate]`` section now accepts the ``auth_type`` option, as well as other ``keystoneauth`` options (e.g. ``auth_url``, ``username``, ``user_domain_name``, ``password``, ``project_name``, ``project_domain_name``)." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-bulk-updates-0150b764bb1b165f.yaml0000644000175000017500000000116000000000000027301 0ustar00coreycorey00000000000000--- features: - By default the dnsmasq agent is restarted for every port created, deleted or updated. When there are many port changes on the same network it can and will take a very long time for all of the port changes to be realised. This enhancement adds in a new configuration variable that will enable bulk updates. This means that the dnsmasq will only be restarted once in a period and not N times. The new option 'bulk_reload_interval' indicates how often the agent should be reloaded. The default value is 0 which means that the original functionality is the default. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-dnsmasq-dhcp-host-addr6-list-support-45d104b3f7ce220e.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-dnsmasq-dhcp-host-addr6-list-support-45d104b3f7ce20000644000175000017500000000243600000000000032541 0ustar00coreycorey00000000000000--- features: - | Adds support for configuring a list of IPv6 addresses for a dhcp-host entry in the dnsmasq DHCP agent driver. For a port with multiple IPv6 fixed-ips in the same subnet a single dhcp-host entry including all the addresses are written to the dnsmasq dhcp-hostsfile. Reserving multiple addresses for a host eases problems related to network and chain-booting where each step in the boot process requests an address using different DUID/IAID combinations. With a single address, only one gets the "static" address and the boot process will fail on the following steps. By reserving enough addresses for all the stages of the boot process this problem is resolved. (See bug: `#1861032 `_) .. NOTE:: This requires dnsmasq version 2.81 or later. Some distributions may backport this feauture to earlier dnsmasq version as part of the packaging, check the distributions releasenotes. Since the new configuration format is invalid in previous versions of dnsmasq this feauture is *disabled* by default. To *enable* the feature set the option ``dnsmasq_enable_addr6_list`` in DHCP agent configuration to ``True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-domain-removed-cc5bc6e2129fdf7f.yaml0000644000175000017500000000026700000000000030142 0ustar00coreycorey00000000000000upgrade: - The ``dhcp_domain`` DHCP agent configuration option was deprecated in Liberty cycle, and now is no longer used. The ``dns_domain`` option should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-ipv6-address-update-ff18d1eb0c196bce.yaml0000644000175000017500000000116700000000000031013 0ustar00coreycorey00000000000000--- fixes: - There is a race condition when adding ports in DHCP namespaces where an IPv6 address could be dynamically created via SLAAC from a Router Advertisement sent from the L3 agent, leading to a failure to start the DHCP agent. This bug has been fixed, but care must be taken on an upgrade dealing with any potentially stale dynamic addresses. For more information, see bug `1627902 `_. upgrade: - On upgrade, IPv6 addresses in DHCP namespaces that have been created dynamically via SLAAC will be removed, and static IPv6 addresses will be added instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-lease-time-5c504c3730a4f9ea.yaml0000644000175000017500000000027200000000000027022 0ustar00coreycorey00000000000000--- upgrade: - The configuration option dhcp_lease_time was deprecated in the Havana cycle. This option is no longer supported. The option was replaced by dhcp_lease_duration. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-resync-throttle-config-option-9f2375e3baf683ad.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dhcp-resync-throttle-config-option-9f2375e3baf683ad.yam0000644000175000017500000000151300000000000032633 0ustar00coreycorey00000000000000--- features: - A new config option ``resync_throttle`` has been added for Neutron DHCP agent. This new option allows to throttle the number of resync state events between the local DHCP state and Neutron to only once per ``resync_throttle`` seconds. Default value for this new option is set to 1 and it should be configured per a user's specific scenario, i.e. how responsive the user would like his/her system to be for those DHCP resync state events. The option is introduced together with the event driven periodic task for DHCP agents. This enhances the agent with a faster reaction on the resync request but ensuring a minimum interval taken between them to avoid too frequent resyncing. For more information see bug `1780370 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/direct-physical-vnic-878d15bdb758b70e.yaml0000644000175000017500000000027300000000000030123 0ustar00coreycorey00000000000000--- prelude: > Add new VNIC type for SR-IOV physical functions. features: - Neutron now supports creation of ports for exposing physical functions as network devices to guests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dns_domain-1799b939e7248247.yaml0000644000175000017500000000124000000000000025725 0ustar00coreycorey00000000000000--- fixes: - | A previous bug fix changed the behaviour of the DHCP agent to use a network's ``dns_domain`` as the search path provided to instances overriding the ``dns_domain`` configuration option used by both the DHCP agent and the main server process when generate port DNS assignments. This broke the original design intent of the ``dns_domain`` attribute of a network which was for integration with external DNS systems such as Designate rather than for use in Neutron's internal DNS support. This incorrect change in behaviour has now been reverted - the DHCP agent will only ever use the ``dns_domain`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dnsmasq-local-service-c8eaa91894a7d6d4.yaml0000644000175000017500000000057000000000000030356 0ustar00coreycorey00000000000000--- fixes: - | Fixes bug `1501206 `_. This ensures that DHCP agent instances running dnsmasq as a DNS server can no longer be exploited as DNS amplifiers when the tenant network is using publicly routed IP addresses by adding an option that will allow them to only serve DNS requests from local networks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dnsmasq_dns_servers-d729c04887ce67b4.yaml0000644000175000017500000000021200000000000030102 0ustar00coreycorey00000000000000--- upgrade: - The configuration option dnsmasq_dns_server was deprecated in the kilo cycle. This value is no longer supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/drop-python-2-7-9707a901c7d8eab6.yaml0000644000175000017500000000020600000000000026657 0ustar00coreycorey00000000000000--- upgrade: - | Python 2.7 support has been dropped. The minimum version of Python now supported by Neutron is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml0000644000175000017500000000072600000000000025562 0ustar00coreycorey00000000000000--- prelude: > A new rule has been added to the API that allows for tagging traffic with DSCP values. This is currently supported by the Open vSwitch QoS driver. features: - Neutron can apply a QoS rule to ports that mark outgoing traffic's type of service packet header field. - The Open vSwitch Neutron agent has been extended to mark the Type of Service IP header field of packets egressing from the VM when the QoS rule has been applied. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-configure-centralized-floatingip-with-new-agent-type-05361f1f78853cf7.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-configure-centralized-floatingip-with-new-agent-typ0000644000175000017500000000162300000000000033565 0ustar00coreycorey00000000000000--- prelude: > A new agent_mode(``dvr_no_external``) for DVR routers has been added to allow the server to configure Floating IPs associated with DVR at the centralized node. features: - | A new DVR agent type ``dvr_no_external`` has been introduced with this release. This agent type allows the Floating IPs (DNAT/North-South routing) to be centralized while the East/West routing is still distributed. issues: - | There can be a mixture of ``dvr`` agents and ``dvr_no_external`` agents. But please avoid any VM with Floating IP migration between a ``dvr`` agent and a ``dvr_no_external`` agent. All VM ports with Floating IPs should be migrated to same agent_mode. This would be one of the restrictions. upgrade: - | A new DVR agent mode of ``dvr_no_external`` was added. Changing between this mode and ``dvr`` is a disruptive operation to the dataplane. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-fip-namespace-on-all-nodes-c4da7ccd60ee62f5.yaml0000644000175000017500000000100000000000000032063 0ustar00coreycorey00000000000000--- features: - Proactively create DVR floating IP namespace on all compute nodes when a gateway is configured. issues: - Creating DVR floating IP namespace on all nodes proactively might consume public IP Address, but by using subnet service-types as explained in `the networking guide `__ consumers can use the private IPs for floating IP agent gateway ports and need not consume any public IP addresses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-ha-support-cc67e84d9380cd0b.yaml0000644000175000017500000000112700000000000027046 0ustar00coreycorey00000000000000--- prelude: > High Availability (HA) of SNAT service is supported for Distributed Virtual Routers (DVRs). features: - High Availability support for SNAT services on Distributed Virtual Routers. Routers can now be created with the flags distributed=True and ha=True. The created routers will provide Distributed Virtual Routing as well as SNAT high availability on the l3 agents configured for dvr_snat mode. issues: - Only creation of dvr/ha routers is currently supported. Upgrade from other types of routers to dvr/ha router is not supported on this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-ovs-agent-6052a8d60fddde22.yaml0000644000175000017500000000055700000000000026643 0ustar00coreycorey00000000000000--- prelude: > An OVS agent configured to run in DVR mode will fail to start if it cannot get proper DVR configuration values from the server on start-up. The agent will no longer fallback to non-DVR mode, since it may lead to inconsistency in the DVR-enabled cluster as the Neutron server does not distinguish between DVR and non-DVR OVS agents. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr-support-live-migration-b818b12bd9cbb518.yaml0000644000175000017500000000101600000000000031370 0ustar00coreycorey00000000000000--- prelude: > Improve DVR's resiliency during Nova VM live migration events. fixes: - Create DVR router namespaces pro-actively on the destination node during live migration events. This helps minimize packet loss to floating IP traffic. issues: - More synchronization between Nova and Neutron is needed to properly handle live migration failures on either side. For instance, if live migration is reverted or canceled, some dangling Neutron resources may be left on the destination host. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr_handle_unbound_floatingip_port-f12ae806b8be2065.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dvr_handle_unbound_floatingip_port-f12ae806b8be2065.yam0000644000175000017500000000151200000000000033023 0ustar00coreycorey00000000000000--- features: - | Floating IPs associated with an unbound port with DVR routers will not be distributed, but will be centralized and implemented in the SNAT namespace of the Network node or ``dvr_snat`` node. Floating IPs associated with allowed_address_pair port IP and are bound to multiple active VMs with DVR routers will be implemented in the SNAT namespace in the Network node or ``dvr_snat`` node. This will address VRRP use cases. More information about this is captured in `bug 1583694 `__. issues: - | While the bound port Floating IPs are distributed, the unbound port Floating IPs are centralized. fixes: - | Allows the unbound port Floating IPs to be configured properly with DVR routers irrespective of its device_owner. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/dynamically-resize-agent-greenthreads-c163ab37d36fcafe.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/dynamically-resize-agent-greenthreads-c163ab37d36fcafe.0000644000175000017500000000077600000000000033002 0ustar00coreycorey00000000000000--- features: - | The Neutron L3 and DHCP agents now dynamically tune the number of processing greenthreads they run based on the number of objects they are managing, with the current values for this range being between eight and thirty-two threads, which is an increase over the previous static value of eight threads. This should help address some of the scaling problems in the agents. For more information see bug `1813787 `_. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/enable-bridge-command-openvswitch-agent-d07c0b59ea9f864f.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/enable-bridge-command-openvswitch-agent-d07c0b59ea9f8640000644000175000017500000000047200000000000032613 0ustar00coreycorey00000000000000--- fixes: - | The Openvswitch agent has an extension called ``fdb`` that uses the Linux ``bridge`` command. The ``bridge`` command has been added to the rootwrap openvswitch-plugin.filters file. For more information, see bug: `1730407 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml0000644000175000017500000000012100000000000030701 0ustar00coreycorey00000000000000--- upgrade: - API sorting and pagination features are now enabled by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/end-to-end-mtu-00345fc4282cb8fb.yaml0000644000175000017500000000171700000000000026622 0ustar00coreycorey00000000000000--- features: - Use the value of the network 'mtu' attribute for the MTU of virtual network interfaces such as veth pairs, patch ports, and tap devices involving a particular network. - Enable end-to-end support for arbitrary MTUs including jumbo frames between instances and provider networks by moving MTU disparities between flat or VLAN networks and overlay networks from layer-2 devices to layer-3 devices that support path MTU discovery (PMTUD). upgrade: - Does not change MTU for existing virtual network interfaces. - Actions that create virtual network interfaces on an existing network with the 'mtu' attribute containing a value greater than zero could cause issues for network traffic traversing existing and new virtual network interfaces. fixes: - Explicitly configure MTU of virtual network interfaces rather than using default values or incorrect values that do not account for overlay protocol overhead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/enhance-tags-1f8915fe3e074069.yaml0000644000175000017500000000015000000000000026271 0ustar00coreycorey00000000000000--- features: - Resource tag mechanism now supports subnet, port, subnetpool and router resources.././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/extend-policy-for-extension-resource-owner-check-4a19b84889660506.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/extend-policy-for-extension-resource-owner-check-4a19b80000644000175000017500000000066600000000000033237 0ustar00coreycorey00000000000000--- features: - | Introduces extension parent resources owner check in ``neutron.policy.OwnerCheck``. It can be used by registering an extension parent resource and service plugin which introduced the corresponding parent resource into ``EXT_PARENT_RESOURCE_MAPPING`` located in ``neutron.common.constants``. And introduces a new policy role ``admin_or_ext_parent_owner`` into ``policy.json`` for this function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/extend-quota-api-2df3b84309664234.yaml0000644000175000017500000000035200000000000027034 0ustar00coreycorey00000000000000--- features: - | Implements a new extension, ``quota_details`` which extends existing quota API to show detailed information for a specified tenant. The new API shows details such as ``limits``, ``used``, ``reserved``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/external-ports-03050eda7ffe13d5.yaml0000644000175000017500000000103700000000000027124 0ustar00coreycorey00000000000000--- features: - | The OVN driver now makes uses of the "external" ports concept that was introduced by Core OVN. For example, with this work a VM with a SR-IOV port attached (VNIC type "direct" and no "switchdev" capability) will now be translated into an "external" port which is able reply to packets (e.g DHCP) from another host that were bypassed in the hypervisor before. Note that, for this first interaction all external ports will belong to the same HA group and will be scheduled onto the same node. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/external_network_bridge-option-removed-bbf50fb803f04f82.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/external_network_bridge-option-removed-bbf50fb803f04f820000644000175000017500000000036200000000000033055 0ustar00coreycorey00000000000000--- upgrade: - The ``external_network_bridge`` config option has been removed. Existing users of this option will now have their router's gateway interface created in the integration bridge and it will be wired by the L2 agent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/extraroute-atomic-5ae09e3f37c5fbda.yaml0000644000175000017500000000110000000000000027752 0ustar00coreycorey00000000000000--- features: - | The new API extension ``extraroute-atomic`` introduces two new member actions on routers to add/remove routes atomically on the server side. The use of these new member actions (``PUT /v2.0/routers/ROUTER-ID/add_extraroutes`` and ``PUT /v2.0/routers/ROUTER-ID/remove_extraroutes``) is always preferred to the old way (``PUT /v2.0/routers/ROUTER-ID``) when multiple clients edit the extra routes of a router since the old way is prone to race conditions between concurrent clients and therefore to possible lost updates. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fail-on-missing-extensions-bc332124b780875b.yaml0000644000175000017500000000024500000000000031112 0ustar00coreycorey00000000000000--- fixes: - The server will fail to start if any of the declared required extensions, as needed by core and service plugins, are not properly configured. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml0000644000175000017500000000125200000000000027022 0ustar00coreycorey00000000000000--- fixes: - In order to fix the communication issues between SR-IOV instances and regular instances the FDB population extension is added to the OVS or linuxbridge agent. the cause was that messages from SR-IOV direct port instance to normal port instances located on the same hypervisor were sent directly to the wire because the FDB table was not yet updated. FDB population extension tracks instances boot/delete operations using the handle_port delete_port extension interface messages and update the hypervisor's FDB table accordingly. Please note this L2 agent extension doesn't support allowed address pairs extension. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fetch-specific-column-in-ovo-69c0b087c8c7ee36.yaml0000644000175000017500000000031500000000000031453 0ustar00coreycorey00000000000000--- other: - | Support fetching specific db column in OVO. A new method ``get_values`` is added to neutron object classes. This method can be leveraged to fetch specific field of the object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fip-binding-limitation-1d2509950847b085.yaml0000644000175000017500000000044500000000000030132 0ustar00coreycorey00000000000000--- other: - | If an instance port is under a dvr router, and the port already has binding port forwarding(s). Neutron will no longer allow binding a floating IP to that port again, because dvr floating IP traffic rules will break the existing port forwarding functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fip-janitor-53f0d42a7471c5ed.yaml0000644000175000017500000000065300000000000026310 0ustar00coreycorey00000000000000--- other: - Due to changes in internal L3 logic, a server crash/backend failure during FIP creation may leave dangling ports attached on external networks. These ports can be identified by a ``PENDING`` ``device_id`` parameter. While those ports can also be removed by admins, the ``neutron-server`` service will now also trigger periodic (approximately once in 10 minutes) cleanup to address the issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fip-qos-52926bce81c3f8bb.yaml0000644000175000017500000000020100000000000025517 0ustar00coreycorey00000000000000--- features: - Implementation of floating IP QoS. A new parameter ``qos_policy_id`` was added to floating IP related API. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.y0000644000175000017500000000072100000000000032755 0ustar00coreycorey00000000000000--- prelude: > The Neutron server no longer needs to be configured with a firewall driver and it can support mixed environments of hybrid iptables firewalls and the pure OVS firewall. features: - The Neutron server now learns the appropriate firewall wiring behavior from each OVS agent so it no longer needs to be configured with the firewall_driver. This means it also supports multiple agents with different types of firewalls. ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-co-existence-bug-between-sg-logging-and-fwg-logging-ef16077880d76449.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-co-existence-bug-between-sg-logging-and-fwg-logging0000644000175000017500000000035600000000000033267 0ustar00coreycorey00000000000000--- fixes: - | Add ``resource_type`` into log object query to distinguish between security group and firewall group log objects. For more information see bug `1787119 `_. ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-deferred-alloction-when-new-mac-in-same-request-as-binding-data-2a01c1ed1a8eff66.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-deferred-alloction-when-new-mac-in-same-request-as-0000644000175000017500000000076700000000000033226 0ustar00coreycorey00000000000000--- fixes: - | Fixes an issue causing IP allocation on port update to fail when the initial IP allocation was deferred due to lack of binding info. If both the port mac_address and binding info (binding_host_id) were updated in the same request, the fixed_ips field was added to the request internally. The code to complete the deferred allocation failed to execute in that case. (For more information see bug `1811905 `_.) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-ipv6-auto-allocation-with-segments-b90e99a30d096c9d.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-ipv6-auto-allocation-with-segments-b90e99a30d096c9d0000644000175000017500000000066100000000000032475 0ustar00coreycorey00000000000000--- fixes: - | Fixed an issue where IP allocation for IPv6 stateless subnets would allocate on invalid subnets when segments are used. Auto-addressing now filters on segment ids when allocating IP addresses. See bugs: `#1864225 `_, `#1864333 `_, `#1865138 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml0000644000175000017500000000113100000000000032024 0ustar00coreycorey00000000000000--- features: - net-mtu extension now recalculates network MTU on each network access, not just on creation. It now allows operators to tweak MTU related configuration options and see them applied to all network resources right after controller restart, both old and new. upgrade: - Existing networks with MTU values that don't reflect configuration will receive new MTU values after controller upgrade. Note that to propagate new correct MTU values to your backend, you may need to resync all agents that set up ports, as well as re-attach VIFs to affected instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml0000644000175000017500000000066300000000000030074 0ustar00coreycorey00000000000000--- fixes: - | Fixes an issue where deletion of a provider network could result in ML2 mechanism drivers not being passed information about the network's provider fields. The consequences of this depend on the mechanism driver in use, but could result in the event being ignored, leading to an incorrectly configured network. See `bug 1841967 `__ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-ovsdb-ssl-connection-4058caf4fdcb33ab.yaml0000644000175000017500000000053300000000000031132 0ustar00coreycorey00000000000000--- features: - | Neutron agents now support SSL connections to OVSDB server. To enable an SSL based connection, use an ``ssl`` prefixed URI for the ``ovsdb_connection`` setting. When using SSL it is also required to set new ``ovs`` group options which include ``ssl_key_file``, ``ssl_cert_file``, and ``ssl_ca_cert_file``. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-remote-security-group-no-port-on-host-9177e66d4b16e90c.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-remote-security-group-no-port-on-host-9177e66d4b16e0000644000175000017500000000057100000000000032605 0ustar00coreycorey00000000000000--- fixes: - | Fixes an issue that the OVS firewall driver does not configure security group rules using remote group properly when a corresponding remote group has no port on a local hypervisor. For more information see bugs: `1862703 `_ and `1854131 `__. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-security-group-protocol-by-numbers-48afb97ede961716.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-security-group-protocol-by-numbers-48afb97ede9617160000644000175000017500000000044700000000000032657 0ustar00coreycorey00000000000000--- fixes: - | Adding security group rules by protocol number is documented, but somehow was broken without being noticed in one of the last couple of releases. This is now fixed. For more information see bug `1716045 `_. ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c0000644000175000017500000000035200000000000033461 0ustar00coreycorey00000000000000--- fixes: - | When updating the fixed-ips of a port residing on a routed provider network the port update would always fail if *host* was not set. See bug: `1844124 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/floatingips-port-forwarding-65efd8c17a16dffc.yaml0000644000175000017500000000251500000000000031761 0ustar00coreycorey00000000000000--- prelude: > Added support for floating IPs port forwarding. features: - | Support for floating IPs port forwarding has been added. * Users can now forward the traffic from a TCP/UDP/other protocol port of a floating IP address to a TCP/UDP/other protocol port associated to one of the fixed IP addresses of a Neutron port. * This is accomplished by associating ``port_forwarding`` sub-resources to floating IPs. * To create a ``port_forwarding``, the user specifies: a floating IP ID, the floating IP's ``external_port`` number, the Neutron port ID ``internal_port_id``, an ``internal_ip_address`` (one of the Neutron port's fixed IPs), the ``internal_port`` number and the ``protocol`` to be used (TCP or UDP for example). * CRUD operations for ``port_forwardings`` are implemented by a Neutron API extension and a service plugin. Please refer to the Neutron API reference documentation for details. * A user cannot create ``port_forwardings`` for a floating IP that is already associated with a Neutron port. * A floating IP can have many ``port_forwardings``. * Port forwardings can only be created for floating IPs that are managed by centralized routers in the network node: legacy, HA, DVR+HA. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/force-arp-responder-true-for-dvr-5aabbfa51945dd5a.yaml0000644000175000017500000000054500000000000032510 0ustar00coreycorey00000000000000--- other: - | When the ``enable_distributed_routing`` (DVR) configuration option is set to ``True`` and tunneling is enabled, the ``arp_responder`` option will be forced to ``True`` since it is now required in order for ARP to work properly. For more information, see bug `1774459 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/gateway-rate-limit-905bee1ed60c6b8e.yaml0000644000175000017500000000115300000000000027735 0ustar00coreycorey00000000000000--- features: - | A new attribute ``qos_policy_id`` is added to the L3 router gateway. * It enables users to associate QoS policies to L3 router gateways to control the rate of transmission of the associated SNAT traffic. * At the moment, only bandwidth limit rules are supported in the QoS polices. * To enable this feature, the ``qos`` service plugin has to be configured in the Neutron server and the ``gateway_ip_qos`` extension has to be configured in the L3 agents. Please refer to the ``QoS`` section of the ``OpenStack Networking Guide`` for more specific details. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/get_standard_device_mappings_for_mechdriver-bc039d478ea0b162.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/get_standard_device_mappings_for_mechdriver-bc039d478ea0000644000175000017500000000045600000000000033401 0ustar00coreycorey00000000000000--- features: - | Add get_standard_device_mappings to SriovNicSwitchMechanismDriver and OpenvswitchMechanismDriver so they can return the interface or bridge mappings in a standard way. The common format is a dict like: {'physnet_name': ['device_or_bridge_1', 'device_or_bridge_2']}. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yam0000644000175000017500000000071600000000000033075 0ustar00coreycorey00000000000000--- upgrade: - The Hyper-V Neutron Agent has been fully decomposed from Neutron. The `neutron.plugins.hyperv.agent.security_groups_driver.HyperVSecurityGroupsDriver` firewall driver has been deprecated and will be removed in the Ocata release. Update the `neutron_hyperv_agent.conf` files on the Hyper-V nodes to use `hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver`, which is the networking_hyperv security groups driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/hyperv-security-group-driver-fdbe0c0c292a1505.yaml0000644000175000017500000000127300000000000031735 0ustar00coreycorey00000000000000--- prelude: > Hyper-V Neutron Agent has been fully decomposed from Neutron. Therefore, the `neutron.plugins.hyperv.agent.security_groups_driver.HyperVSecurityGroupsDriver` firewall driver has been deleted. Update the `neutron_hyperv_agent.conf` / `neutron_ovs_agent.conf` files on the Hyper-V nodes to use `hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver`, which is the networking_hyperv security groups driver. upgrade: - Update the `neutron_hyperv_agent.conf` / `neutron_ovs_agent.conf` files on the Hyper-V nodes to use `hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver`, which is the networking_hyperv security groups driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ib-dhcp-allocation-fix-a4ebe8b55bb2c065.yaml0000644000175000017500000000054300000000000030435 0ustar00coreycorey00000000000000--- fixes: - | For Infiniband support, Ironic needs to send the 'client-id' DHCP option as a number in order for IP address assignment to work. This is now supported in Neutron, and can be specified as option number 61 as defined in RFC 4776. For more information see bug `1770932 `_././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/ingress-bandwidth-limit-in-linuxbridge-agent-50a2dad610401474.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ingress-bandwidth-limit-in-linuxbridge-agent-50a2dad6100000644000175000017500000000022600000000000033016 0ustar00coreycorey00000000000000--- features: - Linuxbridge L2 agent supports ingress bandwidth limit. The linuxbridge L2 agent now supports bi-directional bandwidth limiting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ip-substring-port-filtering-f5c3d89c4a91e867.yaml0000644000175000017500000000012700000000000031500 0ustar00coreycorey00000000000000--- features: - | Support substring matching when filtering ports by IP address. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/iptables-fail-on-missing-sysctl-bridge-firewalling-912f157b5671363f.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/iptables-fail-on-missing-sysctl-bridge-firewalling-912f0000644000175000017500000000136500000000000033242 0ustar00coreycorey00000000000000--- deprecations: - The ``iptables`` firewall driver will no longer enable bridge firewalling in next versions of Neutron. If your distribution overrides the default value for any of relevant sysctl settings (``net.bridge.bridge-nf-call-arptables``, ``net.bridge.bridge-nf-call-ip6tables``, and ``net.bridge.bridge-nf-call-iptables``) then make sure you set them back to upstream kernel default (``1``) using /etc/sysctl.conf or /etc/sysctl.d/* configuration files. upgrades: - On newer Linux kernels (3.18+) you will need to load the ``br_netfilter`` kernel module before starting an Open vSwitch or Linuxbridge agent using ``iptables`` firewall driver. Otherwise the agent will fail to start. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ipv6_first_ip_address_valid-cd94b47bdcc642cf.yaml0000644000175000017500000000036700000000000031764 0ustar00coreycorey00000000000000--- upgrade: - | The first address in an IPv6 network is now a valid, usable IP for routers. It had previously been reserved, but now can be assigned to a router so that an IPv6 address ending in "::" could be a valid default route. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ivs-interfacedriver-removal-a9cce87310028b99.yaml0000644000175000017500000000045700000000000031437 0ustar00coreycorey00000000000000--- other: - The deprecated ``IVSInterfaceDriver`` class has been removed from the code base. This means neither the ``ivs`` nor the ``neutron.agent.linux.interface.IVSInterfaceDriver`` can any longer be used as a value for the ``interface_driver`` config option in ``neutron.conf``. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/keepalived-state-change-server-threads-9ed775e7533dd1a0.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/keepalived-state-change-server-threads-9ed775e7533dd1a00000644000175000017500000000101100000000000032531 0ustar00coreycorey00000000000000--- upgrade: - A new option ``ha_keepalived_state_change_server_threads`` has been added to configure the number of concurrent threads spawned for keepalived server connection requests. Higher values increase the CPU load on the agent nodes. The default value is half of the number of CPUs present on the node. This allows operators to tune the number of threads to suit their environment. With more threads, simultaneous requests for multiple HA routers state change can be handled faster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml0000644000175000017500000000043000000000000026640 0ustar00coreycorey00000000000000--- features: - | The new l2_adjacency extension adds an l2_adjacency field to the network, to indicate whether or not there is guaranteed L2 adjacency between the ports on that Network. Routed network implementations would typically set l2_adjacency to False. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-api-get-router-info-93c316a792a9d87f.yaml0000644000175000017500000000014200000000000031226 0ustar00coreycorey00000000000000--- features: - A new method ``get_router_info`` has been added to ``L3AgentExtensionAPI``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml0000644000175000017500000000051700000000000030050 0ustar00coreycorey00000000000000--- features: - The neutron L3 agent now has the ability to load agent extensions, which allows other services to integrate without additional agent changes. An API for exposing the l3 agent's router info data to the extensions is also provided so that extensions can remain consistent with router state. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-extensions-ha-state-change-f50ae363a53b0f18.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-extensions-ha-state-change-f50ae363a53b0f18.ya0000644000175000017500000000015500000000000032262 0ustar00coreycorey00000000000000--- features: - | A new method ``ha_state_change`` has been added to ``L3AgentExtensionsManager``. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-extensions-register-router-factory-46a86f845895f4f6.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-agent-extensions-register-router-factory-46a86f845890000644000175000017500000000056000000000000032662 0ustar00coreycorey00000000000000--- features: - | A new parameter ``router_factory`` has been added to ``neutron.agent.l3.L3AgentExtensionAPI``. Developers can register ``neutron.agent.l3.agent.RouterInfo`` class and delegate it for ``RouterInfo`` creation. Extensions can extend ``RouterInfo`` itself which correspond to each features (ha, distribtued, ha + distributed). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3-conntrack-helper-5d3148b575c4ad2f.yaml0000644000175000017500000000154400000000000027644 0ustar00coreycorey00000000000000--- features: - | Support for L3 conntrack helpers has been added. Users can now configure conntrack helper target rules to be set for a ``Router``. This is accomplished by associating a ``conntrack_helper`` sub-resource to a router. To create a ``conntrack_helper``, the user specifies: a router ID, the protocol (TCP or UDP, for example), the port number and the conntrack helper module alias (tftp or ftp, for example). CRUD operations for ``conntrack_helpers`` are implemented by a Neutron API extension and a service plugin. Please refer to the Neutron API reference documentation for details. A router can have multiple ``conntack_helpers``. The new configuration option ``[l3-conntrack-helpers]/allowed_conntrack_helpers`` allow the operator to configure allowed helpers, and the helper protocol constraints. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/l3_agent_graceful_shutdown-87bf3304e6fab8a5.yaml0000644000175000017500000000102400000000000031444 0ustar00coreycorey00000000000000--- features: - | A new configuration option, ``cleanup_on_shutdown``, was added to the L3 agent. If set to True the L3 agent will explicitly delete all routers on shutdown. For L3 HA routers it includes a graceful shutdown of keepalived and the state change monitor, which will allow a faster failover in certain conditions. The default value of ``cleanup_on_shutdown`` is False to maintain backward compatibility. Setting to True could affect the data plane when stopping or restarting the L3 agent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml0000644000175000017500000000033300000000000032143 0ustar00coreycorey00000000000000--- prelude: > The Linuxbridge agent now supports l2 agent extensions. features: - The Linuxbridge agent can now be extended by 3rd parties using a pluggable mechanism. fixes: - partially closes bug 1468803 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/linuxbridge-vxlan-udp-ports-73b260efefa15a46.yaml0000644000175000017500000000051500000000000031537 0ustar00coreycorey00000000000000--- features: - UDP ports used by VXLAN in the LinuxBridge agent can be configured now with the VXLAN.udp_srcport_min, VXLAN.udp_srcport_max and VXLAN.udp_dstport config options. To use the IANA assigned port number, set VXLAN.udp_dstport to 4789. The default is not changed from the Linux kernel default 8472. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/linuxbridge_vxlan_arp_responder-e9ea91552e1b62a7.yaml0000644000175000017500000000211300000000000032533 0ustar00coreycorey00000000000000--- upgrade: - When using ML2 and the Linux Bridge agent, the default value for the ARP Responder under L2Population has changed. The responder is now disabled to improve compatibility with the allowed-address-pair extension and to match the default behavior of the ML2 OVS agent. The logical network will now utilize traditional flood and learn through the overlay. When upgrading, existing vxlan devices will retain their old setup and be unimpacted by changes to this flag. To apply this to older devices created with the Liberty agent, the vxlan device must be removed and then the Mitaka agent restarted. The agent will recreate the vxlan devices with the current settings upon restart. To maintain pre-Mitaka behavior, enable the arp_responder in the Linux Bridge agent VXLAN config file prior to starting the updated agent. fixes: - The Linuxbridge agent now supports the ability to toggle the local ARP responder when L2Population is enabled. This ensures compatibility with the allowed-address-pairs extension. closes bug 1445089 ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/locate-rp-tree-parent-by-hypervisor-name-3244ed87dc57f950.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/locate-rp-tree-parent-by-hypervisor-name-3244ed87dc57f90000644000175000017500000000233600000000000032567 0ustar00coreycorey00000000000000--- fixes: - | Neutron now locates the root resource provider of the resource provider tree it creates by using the hypervisor name instead of the hostname. These are different in rare cases only. The hypervisor name can be set per physical network device in config option ``resource_provider_hypervisors`` which is located in the ``[ovs]`` ini-section for ``ovs-agent`` and ``[sriov_nic]`` ini-section for ``sriov-agent``. Hypervisor names default to ``socket.gethostname()`` which works out of the box with ``libvirt`` even when the ``DEFAULT.host`` config option is set to a non-default value. We believe this change fixes `bug 1853840 `_. upgrade: - | For users affected by `bug 1853840 `_ the hypervisor name now can be set per physical network device in config option ``resource_provider_hypervisors`` which is located in the ``[ovs]`` ini-section for ``ovs-agent`` and ``[sriov_nic]`` ini-section for ``sriov-agent``. Hypervisor names default to ``socket.gethostname()`` which works out of the box with ``libvirt`` even when the ``DEFAULT.host`` config option is set to a non-default value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/macvtap-l2-agent-2b551d8ec341196d.yaml0000644000175000017500000000177500000000000027060 0ustar00coreycorey00000000000000--- prelude: > Adding MacVtap ML2 driver and L2 Agent as new vswitch choice features: - Libvirt qemu/kvm instances can now be attached via MacVtap in bridge mode to a network. VLAN and FLAT attachments are supported. Other attachmentes than compute are not supported. issues: - To ensure any kind of migration works between all compute nodes, make sure that the same physical_interface_mappings is configured on each MacVtap compute node. Having different mappings could cause live migration to fail (if the configured physical network interface does not exist on the target host), or even worse, result in an instance placed on the wrong physical network (if the physical network interface exists on the target host, but is used by another physical network or not used at all by OpenStack). Such an instance does not have access to its configured networks anymore. It then has layer 2 connectivity to either another OpenStack network, or one of the hosts networks.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/macvtap_assigned_vf_check-f4d07660ffd82a24.yaml0000644000175000017500000000012300000000000031223 0ustar00coreycorey00000000000000--- fixes: - Fix SR-IOV agent macvtap assigned VF check when linux kernel < 3.13 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/make-mtu-not-nullable-2b2765bc85379545.yaml0000644000175000017500000000026100000000000027771 0ustar00coreycorey00000000000000--- upgrade: - | The network ``mtu`` attribute is set to be non-nullable. If the ``mtu`` is empty(create before Pike version), it is set to the default value of 1500. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/make-supported-vnic-types-configurable-for-ovs-fc73422daffd42b0.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/make-supported-vnic-types-configurable-for-ovs-fc73422d0000644000175000017500000000054600000000000033136 0ustar00coreycorey00000000000000--- other: - | Add new configuration group ``ovs_driver`` and new configuration option under it ``vnic_type_blacklist``, to make the previously hardcoded ``supported_vnic_types`` parameter of the OpenvswitchMechanismDriver configurable. The ``vnic_types`` listed in the blacklist will be removed from the supported_vnic_types list. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/make-supported-vnic-types-configurable-for-sriov-094f7663e8975e9b.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/make-supported-vnic-types-configurable-for-sriov-094f760000644000175000017500000000055300000000000033170 0ustar00coreycorey00000000000000--- other: - | Add new configuration group ``sriov_driver`` and new configuration option under it ``vnic_type_blacklist``, to make the previously hardcoded ``supported_vnic_types`` parameter of the SriovNicSwitchMechanismDriver configurable. The ``vnic_types`` listed in the blacklist will be removed from the supported_vnic_types list. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/metadata-proxy-header-vulnerability-60c44eb7c76d560c.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/metadata-proxy-header-vulnerability-60c44eb7c76d560c.ya0000644000175000017500000000053200000000000032611 0ustar00coreycorey00000000000000--- security: - | A change was made to the metadata proxy to not allow a user to override header values, it will now always insert the correct information and remove unnecessary fields before sending requests to the metadata agent. For more information, see bug `1865036 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/metering-driver-stevedore-alias-2c4fdb0556205a3a.yaml0000644000175000017500000000041600000000000032237 0ustar00coreycorey00000000000000--- features: - The metering agent driver can now be specified with a stevedore alias in the ``metering_agent.ini`` file. For example, ``driver = iptables`` instead of ``driver = neutron.services.metering.iptables.iptables_driver:IptablesMeteringDriver``. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/metering-iptables-driver-load-interface-driver-ca397f1db40ec643.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/metering-iptables-driver-load-interface-driver-ca397f1d0000644000175000017500000000045100000000000033176 0ustar00coreycorey00000000000000--- other: - | The metering agent iptables driver can now load its interface driver by using a stevedore alias in the ``metering_agent.ini`` file. For example, ``interface_driver = openvswitch`` instead of ``interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver`` ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/modify-dhcp-behavior-based-on-network-auto-schedule-1ea5e74fd5bb560c.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/modify-dhcp-behavior-based-on-network-auto-schedule-1ea0000644000175000017500000000130600000000000033270 0ustar00coreycorey00000000000000--- fixes: - | Neutron currently does not fully respect the network-auto-schedule configuration option. If the network-auto-schedule option is set to False, the network - a) Is still scheduled on the DHCP agent when it is created b) Is scheduled on a new DHCP agent if the old DHCP mapping is removed by the user/admin. It is especially necessary where the Network Backends provide DHCP directly. This has been fixed now and if the network-auto-schedule is set to False in the config file, networks would not be automatically scheduled to the DHCP Agents. If mapping/scheduling is required, it can be done manually or by setting the network-auto-schedule to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/modify_api_rpc_worker_defaults-1acd62728b2b55fa.yaml0000644000175000017500000000122400000000000032401 0ustar00coreycorey00000000000000upgrade: - The number of api and rpc workers may change on upgrade. It is strongly recommended that all deployers set these values in their neutron configurations, rather than using the defaults. fixes: - Neutron API workers default to the number of CPU cores. This can lead to high cpu/low memory boxes getting into trouble. The defaults have been tweaked to attempt to put an upper bound on the default of either the number of cores, or half of system memory, whichever is lower. In addition, the default number of RPC workers has been changed from a value of ``1``, to a value of half the number of API workers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/mtu-selection-and-advertisement-ab29f9ec43140224.yaml0000644000175000017500000000057400000000000032204 0ustar00coreycorey00000000000000--- prelude: > Support for MTU selection and advertisement. features: - When advertise_mtu is set in the config, Neutron supports advertising the LinkMTU using Router Advertisements. other: - For details please read `Blueprint mtu-selection-and-advertisement `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/netns_cleanup_kill_procs-af88d8c47c07dd9c.yaml0000644000175000017500000000063700000000000031331 0ustar00coreycorey00000000000000--- features: - A new mechanism has been added to the ``neutron-netns-cleanup`` tool that allows to kill processes listening on any ``Unix`` or network socket within a namespace. The new mechanism will try to kill those processes gracefully using the ``SIGTERM`` signal and, if they refuse to die, then the ``SIGKILL`` signal will be sent to each remaining process to ensure a proper cleanup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/network_ip_availability-d64bd7032b3c15ee.yaml0000644000175000017500000000064600000000000031056 0ustar00coreycorey00000000000000--- prelude: > Neutron now provides network IP availability information. features: - A new API endpoint /v2.0/network-ip-availabilities that allows an admin to quickly get counts of used_ips and total_ips for network(s) is available. New endpoint allows filtering by network_id, network_name, tenant_id, and ip_version. Response returns network and nested subnet data that includes used and total IPs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/network_link_prefix-e3fe37e37ea275b7.yaml0000644000175000017500000000045300000000000030243 0ustar00coreycorey00000000000000--- features: - | A new ``network_link_prefix`` configuration option is introduced that allows to alter the domain returned in the URLs included in the API responses. It behaves the same way as the ``compute_link_prefix`` and ``glance_link_prefix`` options do for Nova and Glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/new-vif-type-for-pf-passthrough-33ec560b9b5d246f.yaml0000644000175000017500000000036700000000000032162 0ustar00coreycorey00000000000000--- features: - SriovNicSwitchMechanismDriver driver now exposes a new VIF type 'hostdev_physical' for ports with vnic type 'direct-physical' (used for SR-IOV PF passthrough). This will enable Nova to provision PFs as Neutron ports. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/noneutronversions-fbbdb98f350767d8.yaml0000644000175000017500000000055300000000000030007 0ustar00coreycorey00000000000000--- deprecations: - | The api-paste entrypoint ``neutron.api.versions:Versions.factory`` has been deprecated and will be removed in the Rocky release. Please update your api-paste.ini file to use the one that ships with Queens or update any references to the Versions factory to point to ``neutron.pecan_wsgi.app:versions_factory`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/notifier-ironic-66391e083d78fee2.yaml0000644000175000017500000000067300000000000027131 0ustar00coreycorey00000000000000--- features: - | A notifier for the Openstack Baremetal service (``ironic``) is introduced. When enabled notifications are sent to the Baremetal service on relevant resource events/changes. By default notifications to the Baremetal service is *disabled*. To *enable* notifications to the Baremetal service set ``[ironic]/enable_notifications`` to ``True`` in the Networking service configuration (``neutron.conf``). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml0000644000175000017500000000130300000000000032005 0ustar00coreycorey00000000000000--- prelude: > Prior to Newton, the neutron-openvswitch-agent used 'ovs-ofctl' of_interface driver by default. In Newton, 'of_interface' defaults to 'native'. This mostly eliminates spawning ovs-ofctl and improves performance a little. upgrade: - To retain the old default for neutron-openvswitch-agent, use 'of_interface = ovs-ofctl' in the '[ovs]' section of your openvswitch agent configuration file. - By default, the native interface will have the Ryu controller listen on 127.0.0.1:6633. The listen address can be configured with of_listen_address and of_listen_port options. Ensure that the controller has permission to listen at the configured address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/oslo-cache-cache-url-deprecated-16cd3d335c5962eb.yaml0000644000175000017500000000071100000000000032040 0ustar00coreycorey00000000000000--- features: - Neutron switched to using oslo.cache library to cache port state in metadata agent. With it, more caching backends are now available, including Memcached and Mongo. More details in oslo.cache documentation. deprecations: - The cache_url configuration option is deprecated as of Newton, and will be removed in Ocata. Please configure metadata cache using [cache] group, setting enable = True and configuring your backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/oslo-messaging-notifier-queue-d94677076a1db261.yaml0000644000175000017500000000054200000000000031617 0ustar00coreycorey00000000000000--- features: - The RPC and notification queues have been separated into different queues. Specify the transport_url to be used for notifications within the [oslo_messaging_notifications] section of the configuration file. If no transport_url is specified in [oslo_messaging_notifications], the transport_url used for RPC will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/oslo-reports-166a169037bf64f2.yaml0000644000175000017500000000032300000000000026375 0ustar00coreycorey00000000000000--- prelude: > Neutron is integrated with Guru Meditation Reports library. features: - Neutron services should respond to SIGUSR2 signal by dumping valuable debug information to standard error output. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/oslo.messaging.notify.drivers-abb0d17b9e1bd470.yaml0000644000175000017500000000106000000000000032121 0ustar00coreycorey00000000000000--- upgrade: - Obsolete ``oslo.messaging.notify.drivers`` entrypoints that were left in tree for backwards compatibility with pre-Icehouse releases have been removed. Those are ``neutron.openstack.common.notifier.log_notifier``, ``neutron.openstack.common.notifier.no_op_notifier``, ``neutron.openstack.common.notifier.test_notifier``, ``neutron.openstack.common.notifier.rpc_notifier2``, ``neutron.openstack.common.notifier.rpc_notifier``. Use values provided by ``oslo.messaging`` library to configure notification drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml0000644000175000017500000000137600000000000030435 0ustar00coreycorey00000000000000--- prelude: > Properly calculate overlay (tunnel) protocol overhead for environments using IPv4 or IPv6 endpoints. The ML2 plug-in configuration file contains a new configuration option, 'overlay_ip_version', in the '[ml2]' section that indicates the IP version of all overlay network endpoints. Use '4' for IPv4 and '6' for IPv6. Defaults to '4'. Additionally, all layer-2 agents must use the same IP version for endpoints. upgrade: - Define the 'overlay_ip_version' option and value appropriate for the environment. Only required if not using the Default of '4'. other: - The value of the 'overlay_ip_version' option adds either 20 bytes for IPv4 or 40 bytes for IPv6 to determine the total tunnel overhead amount. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovn-igmp-snooping-support-1a6ec8e703311fce.yaml0000644000175000017500000000023700000000000031241 0ustar00coreycorey00000000000000--- features: - | Adds support for IGMP snooping (Multicast) in the OVN driver. Defaults to False. IGMP snooping requires OVN version 2.12 or above. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-ct-firewall-driver-52a70a6a16d06f59.yaml0000644000175000017500000000063100000000000030310 0ustar00coreycorey00000000000000--- features: - New security groups firewall driver is introduced. It's based on OpenFlow using connection tracking. issues: - OVS firewall driver doesn't work well with other features using openflow. other: - OVS firewall driver requires OVS 2.5 version or higher with linux kernel 4.3 or higher. More info at `OVS github page `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-dpdk-rep-port-40fe628974040786.yaml0000644000175000017500000000022300000000000027073 0ustar00coreycorey00000000000000--- features: - | Adds support for OVS DPDK port representors, a direct port on a netdev datapath is considered a DPDK representor port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-ipv6-tunnel-endpoints-f41b4954a04c43f6.yaml0000644000175000017500000000055400000000000031004 0ustar00coreycorey00000000000000--- prelude: > Support for IPv6 addresses as tunnel endpoints in OVS. features: - The local_ip value in ml2_conf.ini can now be set to an IPv6 address configured on the system. other: - Requires OVS 2.5+ version or higher with linux kernel 4.3 or higher. More info at `OVS github page `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-mac-table-size-config-option-d255d5208650f34b.yaml0000644000175000017500000000116400000000000032077 0ustar00coreycorey00000000000000--- features: - | A new config option ``bridge_mac_table_size`` has been added for Neutron OVS agent. This value will be set on every Open vSwitch bridge managed by the openvswitch-neutron-agent in ``other_config:mac-table-size`` column in ovsdb. Default value for this new option is set to 50000 and it should be enough for most systems. More details about this option can be found in `Open vSwitch documentation `_ For more information see bug `1775797 `_. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-make-inactivity-probe-configurable-39d669014d961c5c.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs-make-inactivity-probe-configurable-39d669014d961c5c0000644000175000017500000000046200000000000032445 0ustar00coreycorey00000000000000--- other: - | A new option ``[ovs] of_inactivity_probe`` has been added to allow changing the inactivity probe interval when using the OVS ML2 agent with the native OpenFlow driver. Operators can increase this if they are experiencing OpenFlow timeouts. The default value is 10 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovs_hardware_offload_support-798d3896ab2c4b1d.yaml0000644000175000017500000000045100000000000032050 0ustar00coreycorey00000000000000--- features: - The ``openvswitch`` mechanism driver now supports hardware offload via SR-IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 and 'Linux Kernel' 4.8 allows to control the SR-IOV VF via OpenFlow control plane and gain accelerated 'Open vSwitch'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml0000644000175000017500000000173500000000000030177 0ustar00coreycorey00000000000000--- prelude: > Prior to Newton, the default option for 'ovsdb_interface' was 'vsctl'. In Newton 'ovsdb_interface' defaults to 'native'. This change switches the way of communication with OVSDB from the ovs-vsctl tool to Open vSwitch python api to improve out-of-the-box performance for typical deployments. upgrade: - To keep the old default value use 'ovsdb_interface = vsctl' in '[ovs]' section of openvswitch_agent.ini (common path '/etc/neutron/plugins/ml2/openvswitch_agent.ini') if there is a separate openvswitch agent configuration file; otherwise apply changes mentioned above to ml2_conf.ini (common path '/etc/neutron/plugins/ml2/ml2_conf.ini'). - The native interface configures ovsdb-server to listen for connections on 127.0.0.1:6640 by default. The address can be configured with the ovsdb_connection config option. Ensure that ovsdb-server has permissions to listen on the configured address. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/ovsdb_timeout_override_for_ovs_cleanup_tool-e6ed6db258d0819e.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/ovsdb_timeout_override_for_ovs_cleanup_tool-e6ed6db258d0000644000175000017500000000131400000000000033611 0ustar00coreycorey00000000000000--- fixes: - | Fixes bug `1763604 `_. Override default value of ``ovsdb_timeout`` config option in ``neutron-ovs-cleanup`` script. The default value is 10 seconds, but that is not enough for the ``neutron-ovs-cleanup`` script when there are many ports to remove from a single bridge, for example, 5000. Because of that, we now override the default value for the config option to be 600 seconds (10 minutes). issues: - | In the case when the number of ports to clean up in a single bridge is larger than about 10000, it might require an increase in the ``ovsdb_timeout`` config option to some value higher than 600 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/path-mtu-back-to-zero-e4f9e8bdd8317ad4.yaml0000644000175000017500000000061500000000000030274 0ustar00coreycorey00000000000000--- upgrade: - In case you rely on the default ML2 path_mtu value of 1500 to cap MTU used for new network resources, please set it explicitly in your ml2_conf.ini file. fixes: - The default value for ML2 path_mtu option is changed from 1500 to 0, effectively disabling its participation in network MTU calculation unless it's overridden in the ml2_conf.ini configuration file. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/physical_network-aware-dhcp-scheduling-94e9fadc7c7c5fec.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/physical_network-aware-dhcp-scheduling-94e9fadc7c7c5fec0000644000175000017500000000053300000000000033166 0ustar00coreycorey00000000000000--- prelude: > Schedule networks on dhcp-agents with access to network features: - DHCP schedulers use "filter_host_with_network_access" plugin method to filter hosts with access to dhcp network. Plugins can overload it to define their own filtering logic. In particular, ML2 plugin delegates the filtering to mechanism drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml0000644000175000017500000000111400000000000031132 0ustar00coreycorey00000000000000--- prelude: > The internal pluggable IPAM implementation -- added in the Liberty release -- is now the default for both old and new deployments. Old deployments are unconditionally switched to pluggable IPAM during upgrade. Old non-pluggable IPAM is deprecated and removed from code base. upgrade: - During upgrade 'internal' ipam driver becomes default for 'ipam_driver' config option and data is migrated to new tables using alembic migration. deprecations: - The non-pluggable ipam implementatios is deprecated and will be removed in Newton release cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/port-mac-address-regenerate-312978c834abaa52.yaml0000644000175000017500000000046700000000000031274 0ustar00coreycorey00000000000000--- features: - | Adds api extenstion ``port-mac-address-regenerate``. When passing ``'null'`` (``None``) as the ``mac_address`` on port update a converter will generate a new mac address that will be assigned to the port. `RFE: #1768690 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/precise-agent-state-transfer-67c771cb1ee04dd0.yaml0000644000175000017500000000323600000000000031632 0ustar00coreycorey00000000000000--- deprecations: - | The L2 population ``agent_boot_time`` config option is deprecated in favor of the direct RPC agent restart state transfer. It will be removed in the ``Train`` release. critical: - | The neutron-openvswitch-agent can sometimes spend too much time handling a large number of ports, exceeding its timeout value, ``agent_boot_time``, for L2 population. Because of this, some flow update operations will not be triggerred, resulting in lost flows during agent restart, especially for host-to-host vxlan tunnel flows, causing the original tunnel flows to be treated as stale due to the different cookie IDs. The agent's first RPC loop will also do a stale flow clean-up procedure and delete them, leading to a loss of connectivity. Please ensure that all neutron-server and neutron-openvswitch-agent binaries are upgraded for the changes to take effect, after which the L2 population ``agent_boot_time`` config option will no longer be used. fixes: - | The neutron-openvswitch-agent was changed to notify the neutron-server in its first RPC loop that it has restarted. This signals neutron-server to provide updated L2 population information to correctly program FDB entries, ensuring connectivity to instances is not interrupted. This fixes the following bugs: `1794991 `_, `1799178 `_, `1813703 `_, `1813714 `_, `1813715 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/project_id-d5ea7a42be428230.yaml0000644000175000017500000000060600000000000026200 0ustar00coreycorey00000000000000--- features: - The Networking API now supports the 'project_id' field in requests and responses, for compatibility with the Identity (Keystone) API V3. A new API extension, 'project-id', has been added to allow API users to detect if the 'project_id' field is supported. Note that the 'tenant_id' field is still supported, and the two fields are functionally equivalent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-drivers-refactor-16ece9984958f8a4.yaml0000644000175000017500000000073100000000000030122 0ustar00coreycorey00000000000000--- features: - The QoS driver architecture has been refactored to overcome several previous limitations, the main one was the coupling of QoS details into the mechanism drivers, and the next one was the need of configuration knobs to enable each specific notification driver, that will be handled automatically from now on. deprecations: - | notification_drivers from [qos] section has been deprecated. It will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-for-router-gateway-02340f7aa8be3b0d.yaml0000644000175000017500000000031200000000000030464 0ustar00coreycorey00000000000000--- features: - | Network QoS policies are now supported for network:router_gateway ports. Neutron QoS policies set on an external network now apply to external router ports (DVR or not). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-min-egress-bw-rule-b1c80f5675a4c1c3.yaml0000644000175000017500000000062500000000000030306 0ustar00coreycorey00000000000000--- features: - Users can now apply a QoS rule to a port or network to setup the minimum egress bandwidth per queue and port. The minimum egress bandwidth rule is applied to each port individually. other: - At the time of writing, Neutron bandwidth booking is not integrated with Compute scheduler, which means that minimal bandwidth is not guaranteed but provided as best effort. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-minimum-bw-reject-non-physnet-2f4ccddf484369fd.yaml0000644000175000017500000000026000000000000032652 0ustar00coreycorey00000000000000--- fixes: - | Reject QoS minimum bandwidth rule operations on ports, networks without physnet, see bug `1819029 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-rule-type-details-api-call-27d792980235aec4.yaml0000644000175000017500000000037200000000000031567 0ustar00coreycorey00000000000000--- features: - | New API to get details of supported rule types. The QoS service plugin can now expose details about supported QoS rule types in Neutron deployment. The new API call is allowed only for users with admin priviliges. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/qos-rules-alias-extension-ebf23b87460ee36e.yaml0000644000175000017500000000051500000000000031206 0ustar00coreycorey00000000000000--- prelude: > Support alias end points for rules in QoS API. features: - | The ``qos-rules-alias`` API extension was implemented to enable users to perform GET, PUT and DELETE operations on QoS rules as though they are first level resources. In other words, the user doesn't have to specify the QoS policy ID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/radvd_user-config-option-24730a6d686fee18.yaml0000644000175000017500000000106000000000000030721 0ustar00coreycorey00000000000000--- other: - | A new config option, ``radvd_user``, was added to l3_agent.ini for the L3 agent. This option defines the username passed to radvd, used to drop "root" privileges and change user ID to username and group ID to the primary group of the user. If no user specified (by default), the user executing the L3 agent will be passed. If "root" specified, because radvd is spawned as root, no "username" parameter will be passed. (For more information see bug `1844688 `_.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/reject-min-bw-updates-30bd0e3201dafce1.yaml0000644000175000017500000000052100000000000030301 0ustar00coreycorey00000000000000--- other: - | Neutron server now rejects (as ``NotImplementedError``) updates of ``minimum_bandwidth`` QoS rules if the rule is already in effect on bound ports. Implementing updates will require updates to Placement allocations and possibly migrating servers where the new ``minimum_bandwidth`` can be satisifed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/relax-subnetpool-network-affinity-837c1fc28f835de5.yaml0000644000175000017500000000117300000000000032705 0ustar00coreycorey00000000000000--- features: - | When different subnet pools participate in the same address scope, the constraints disallowing subnets to be allocated from different pools on the same network have been relaxed. As long as subnet pools participate in the same address scope, subnets can now be created from different subnet pools when multiple subnets are created on a network. When address scopes are not used, subnets with the same ``ip_version`` on the same network must still be allocated from the same subnet pool. For more information, see bug `1830240 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-advertise_mtu-28933264714453c4.yaml0000644000175000017500000000025500000000000027661 0ustar00coreycorey00000000000000--- upgrade: - The ``advertise_mtu`` option is removed. Now Neutron always uses all available means to advertise MTUs to instances (including DHCPv4 and IPv6 RA). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-agent_type-config-option-31eea687b4ec2e3a.yaml0000644000175000017500000000025000000000000032422 0ustar00coreycorey00000000000000--- upgrade: - | Config option ``agent_type``, which has been deprecated since Mitaka, is now removed. Agents should now use hardcoded values for agent type. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-allow-pagination-allow-sorting-ff23ca5ccb3007b9.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-allow-pagination-allow-sorting-ff23ca5ccb3007b9.0000644000175000017500000000030300000000000032664 0ustar00coreycorey00000000000000--- other: - The ``allow_pagination`` and ``allow_sorting`` configuration options are now removed. Now, sorting and pagination are always enabled for plugins that support the features. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-driver-60eb7e26d95f7322.yaml0000644000175000017500000000037200000000000026604 0ustar00coreycorey00000000000000--- prelude: > OFAgent has been removed in the Newton cycle. other: - The Openflow Agent(OFAgent) mechanism driver and its agent have been removed in favor of OpenvSwitch mechanism driver with "native" of_interface in the Newton cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-force_gateway_on_subnet-77cb79f0b35d0c6d.yaml0000644000175000017500000000063100000000000032337 0ustar00coreycorey00000000000000--- fixes: - Fixes Bug 1548193, removing 'force_gateway_on_subnet' configuration option. This will always allow adding gateway outside the subnet, and gateway cannot be forced onto the subnet range. other: - The configuration option 'force_gateway_on_subnet' is removed. This will always allow adding gateway outside the subnet, and gateway cannot be forced onto the subnet range. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-gateway_external_network_id-config-option-c7aabf2f63004b41.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-gateway_external_network_id-config-option-c7aabf0000644000175000017500000000031100000000000033657 0ustar00coreycorey00000000000000--- upgrade: - | The ``gateway_external_network_id`` config option has been removed. Systems where this option was set will now be able to support multiple external networks for routers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-get_binding_levels-c4e8b350a196706a.yaml0000644000175000017500000000026400000000000031122 0ustar00coreycorey00000000000000--- deprecations: - | Function neutron.plugins.ml2.db.get_binding_levels was deprecated in favor of neutron.plugins.ml2.db.get_binding_level_objs and now is removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-l2pop-agent_boot_time-0cec3d5908d8c054.yaml0000644000175000017500000000022600000000000031551 0ustar00coreycorey00000000000000--- upgrade: - | The deprecated L2 population ``agent_boot_time`` config option was removed and is no longer needed as of the Stein release.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-min-l3-agents-per-router-27aef7d91dec0348.yaml0000644000175000017500000000107000000000000032123 0ustar00coreycorey00000000000000--- upgrade: - The ``min_l3_agents_per_router`` configuration option was deprecated in Newton cycle and removed in Ocata. HA routers no longer require a minimal number of L3 agents to be created, although obviously they require at least two L3 agents to provide HA guarantees. The rationale for the removal of the option is the case a router was created just when an agent was not operational. The creation of the router will now succeed, and when a second agent resumes operation the router will be scheduled to it providing HA. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml0000644000175000017500000000052600000000000032646 0ustar00coreycorey00000000000000--- upgrade: - The network_device_mtu option is removed. Existing users of the option are advised to adopt new configuration options to accommodate for their underlying physical infrastructure. The relevant options are global_physnet_mtu for all plugins, and also path_mtu and physical_network_mtus for ML2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-of_interface-option-531ac8a1c767603a.yaml0000644000175000017500000000056300000000000031226 0ustar00coreycorey00000000000000--- upgrade: - | The deprecated ``of_interface`` option is removed. Neutron will always use the ``native`` driver, which has been the default since Pike (11.0). If old driver ``ovs-ofctl`` was used before upgrade, automatically done change to ``native`` driver will cause short break of data plane connectivity during neutron-ovs-agent upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml0000644000175000017500000000037000000000000027762 0ustar00coreycorey00000000000000--- prelude: > Remove 'quota_items' configuration option from neutron.conf file. This option was deprecated since Liberty release and has no effect now. upgrade: - Remove 'quota_items' configuration option from neutron.conf file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-router_id-b3732089f8f1faa1.yaml0000644000175000017500000000057000000000000027356 0ustar00coreycorey00000000000000--- prelude: > Remove 'router_id' configuration option from the l3_agent.ini file. 'router_id' option has been defined in order to associate an l3-agent to a specific router when use_namespaces=False. It was deprecated after use_namespaces was removed in Mitaka release. upgrade: - Remove 'router_id' configuration option from the l3_agent.ini file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-send-arp-for-ha-c1b4a926b8e52b8e.yaml0000644000175000017500000000025200000000000030323 0ustar00coreycorey00000000000000--- upgrade: - | The ``send_arp_for_ha`` configuration option is removed. Neutron now always sends three gratuitous ARP requests on address assigned to a port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml0000644000175000017500000000033200000000000031157 0ustar00coreycorey00000000000000--- upgrade: - The configuration options for ``default_ipv4_subnet_pool`` and ``default_ipv6_subnet_pool`` have been removed. Please use the ``is_default`` option of the create/update subnetpool API instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/remove_max_fixed_ips_per_port-64f1fb36748d5756.yaml0000644000175000017500000000020100000000000032044 0ustar00coreycorey00000000000000--- upgrade: - The ``max_fixed_ips_per_port`` configuration option was deprecated in the Newton cycle and removed in Pike. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/removed-ovsdb_interface-ovs-vsctl-timeout-a618ec8e27552202.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/removed-ovsdb_interface-ovs-vsctl-timeout-a618ec8e275520000644000175000017500000000044300000000000032755 0ustar00coreycorey00000000000000--- upgrade: - | The deprecated ``ovsdb_interface`` configuration option has been removed, the default ``native`` driver is now always used. In addition, the deprecated ``ovs_vsctl_timeout`` option, which was renamed to ``ovsdb_timeout`` in Queens, has also been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/removed_prevent_arp_spoofing-b49e91a92a93e3e1.yaml0000644000175000017500000000032500000000000032044 0ustar00coreycorey00000000000000--- upgrade: - | The deprecated ``prevent_arp_spoofing`` option has been removed and the default behavior is to always prevent ARP spoofing unless port security is disabled on the port (or network). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/rename-ovs-vsctl-timeout-9df1967c47f394c0.yaml0000644000175000017500000000034700000000000030723 0ustar00coreycorey00000000000000--- deprecations: - | The ``ovs_vsctl_timeout`` option is renamed into ``ovsdb_timeout`` to reflect that it's not specific to ``vsctl`` implementation of ``ovsdb_interface``. It is also moved under ``[OVS]`` section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml0000644000175000017500000000022400000000000030507 0ustar00coreycorey00000000000000--- upgrade: - | tenant_id column has been renamed to project_id. This database migration is required to be applied as offline migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/rename-to-nova-metadata-ip-685fd81618c16d9d.yaml0000644000175000017500000000041200000000000031036 0ustar00coreycorey00000000000000deprecations: - | The ``nova_metadata_ip`` option is deprecated and will be removed in Queens. It is deprecated in favor of the new ``nova_metadata_host`` option because it reflects better that the option accepts an IP address and also a DNS name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml0000644000175000017500000000054100000000000030312 0ustar00coreycorey00000000000000--- prelude: > oslo.messaging.notify.drivers entry points are deprecated other: - The oslo.messaging.notify.drivers entry points that were left in tree for backward compatibility with Icehouse are deprecated and will be removed after liberty-eol. Configure notifications using the oslo_messaging configuration options in neutron.conf. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/routed-networks-hostroutes-a13a9885f0db4f69.yaml0000644000175000017500000000053600000000000031473 0ustar00coreycorey00000000000000--- features: - | Adds host routes for subnets on the same network when using routed networks. Static routes will be configured for subnets associated with other segments on the same network. This ensures that traffic within an L3 routed network stays within the network even when the default route is on a different interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/security-group-ipv6-icmp-221c59dcaf2caa3c.yaml0000644000175000017500000000112100000000000031072 0ustar00coreycorey00000000000000--- upgrade: - | Existing IPv6 ICMP security group rules created by using legacy protocol names ``icmpv6`` and ``icmp`` will now be returned as ``ipv6-icmp`` in an API GET call. fixes: - | Security group rule code has been changed to better detect duplicate rules by standardizing on ``ipv6-icmp`` as the protocol field value for IPv6 ICMP rules. The legacy names ``icmpv6`` and ``icmp`` can still be used in API POST calls, but API GET calls will return ``ipv6-icmp``. Partial fix for bug `1582500 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/security-group-port-range-check-73114bdcde459e53.yaml0000644000175000017500000000030600000000000032215 0ustar00coreycorey00000000000000--- fixes: - In security group rules API, API level validation for port_range values has been performed only against TCP and UDP. Now it is performed against DCCP, SCTP and UDP-Lite, too. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/security-group-rule-all-ports-update-2857d80e5742ebc5.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/security-group-rule-all-ports-update-2857d80e5742ebc5.y0000644000175000017500000000064200000000000032466 0ustar00coreycorey00000000000000--- upgrade: - | A security group rule added for the entire port range, for example, TCP ports 1-65535, is not optimal for backends that implement the rule. Rules like this will now automatically be converted to apply to the procotol itself, in other words, all TCP - the port ranges will be ignored. See bug `1848213 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/security-groups-port-filtering-69d36ac7db90c9e0.yaml0000644000175000017500000000023600000000000032304 0ustar00coreycorey00000000000000--- features: - | Support port filtering on security group IDs. The feature can be used if 'port-security-group-filtering' extension is available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/segment_mtu_to_global_physnet_mtu-9cee5ff09557edeb.yaml0000644000175000017500000000102600000000000033335 0ustar00coreycorey00000000000000--- deprecations: - The 'segment_mtu' option of the ML2 configuration has been deprecated and replaced with the 'global_physnet_mtu' option in the main Neutron configuration. This option is meant to be used by all plugins for an operator to reference their physical network's MTU, regardless of the backend plugin. Plugins should access this config option via the 'get_deployment_physnet_mtu' method added to neutron.plugins.common.utils to avoid being broken on any potential renames in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sending-garp-for-l3-ha-c118871833ad8743.yaml0000644000175000017500000000177700000000000027733 0ustar00coreycorey00000000000000--- issues: - In kernels < 3.19 ``net.ipv4.ip_nonlocal_bind`` sysctl option was not isolated to network namespace scope. L3 HA sets this option to zero to avoid sending gratuitous ARPs for IP addresses that were removed while processing. If this happens, then gratuitous ARPs will be sent. It may populate ARP cache tables of peer machines with wrong MAC addresses. fixes: - Versions of ``keepalived`` < 1.2.20 don't send gratuitous ARPs when keepalived process receives a ``SIGHUP`` signal. These versions are not packaged in some Linux distributions like Red Hat Enterprise Linux 7, CentOS 7, or Ubuntu Xenial. Not sending gratuitous ARPs may lead to peer ARP cache tables containing wrong entries about floating IP addresses until those entries are invalidated. To fix that scenario, Neutron now sends gratuitous ARPs for all new IP addresses that appear on non-HA interfaces in router namespaces. This behavior simulates behavior of new versions of ``keepalived``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/service-plugin-dependency-c8bf620b2526b869.yaml0000644000175000017500000000176000000000000031066 0ustar00coreycorey00000000000000--- fixes: - | Adds the ``router`` service plugin to the ``port_forwarding`` service plugin required list. For more info see https://bugs.launchpad.net/neutron/+bug/1809238 other: - | Neutron now supports having service plugins require other plugin(s) as dependencies. For example, the ``port_forwarding`` service plugin requires the ``router`` service plugin to achieve full functionality. A new list, ``required_service_plugins``, was added to each service plugin so the required dependencies of each service plugin can be initialized. If one service plugin requires another, but the requirement is not set in the config file, neutron will now initialize it to the plugin directory. upgrade: - | During the dependency resolution procedure, the code that loads service plugins was refactored to not raise an exception if one plugin is configured multiple times, with the last one taking effect. This is a change from the previous behavior. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/set-of-default-qos-burst-value-0790773703fa08fc.yaml0000644000175000017500000000027400000000000031621 0ustar00coreycorey00000000000000--- prelude: > By default, the QoS driver for the Open vSwitch and Linuxbridge agents calculates the burst value as 80% of the available bandwidth. fixes: - Fixes bug 1572670././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/setproctitle_workers-bc27a8baa5ef2279.yaml0000644000175000017500000000220000000000000030513 0ustar00coreycorey00000000000000features: - Neutron child processes now set their process titles to match their roles ('api worker', 'rpc worker', 'periodic worker', 'services worker', or any other defined by workers from out-of-tree plugins.) This behavior can be disabled by setting the ``setproctitle`` config option in the ``[default]`` section in neutron.conf to ``off``. The original process string is also appended to the end, to help with scripting that is looking for the old strings. There is also an option called ``brief``, which results in much shorter and easier to read process names. The default setting for this option is ``on``, for a combination of backwards compatibility and identifying different processes easily. The recommended setting is ``brief``, once the deployer has verified that none of their tooling depends on the older strings. upgrade: - The change to the process title happens by default with the new ``setproctitle`` config option. The old string is still part of the new process title, but any scripts looking for exact string matches of the old string may need to be modified. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/show-all-security-group-rules-for-security-group-owner-6635dd3e4c6ab5ee.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/show-all-security-group-rules-for-security-group-owner-0000644000175000017500000000036400000000000033557 0ustar00coreycorey00000000000000--- fixes: - | Owners of security groups now see all security group rules which belong to the security group, even if the rule was created by the admin user. Fixes bug `1824248 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml0000644000175000017500000000041600000000000032125 0ustar00coreycorey00000000000000--- features: - New API extensions, 'sorting' and 'pagination', have been added to allow API users to detect if sorting and pagination features are enabled. These features are controlled by ``allow_sorting`` and ``allow_pagination`` configuration options. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov-agent-kernel-3.13-removed-support-8bb00902dd607746.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov-agent-kernel-3.13-removed-support-8bb00902dd607740000644000175000017500000000052200000000000032141 0ustar00coreycorey00000000000000--- upgrade: - | SR-IOV agent code no longer supports old kernels (<3.13) for MacVtap ports. This change is not expected to affect existing deployments since most OS distributions already have the relevant kernel patches. In addition, latest major release of all Supported distributions already have a newer kernel. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov-agent-num-vf-0-0c06424247e7efe0.yaml0000644000175000017500000000006400000000000027602 0ustar00coreycorey00000000000000--- fixes: - Allow SR-IOV agent to run with 0 vfs ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce6c4.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce0000644000175000017500000000052400000000000033337 0ustar00coreycorey00000000000000--- prelude: > Several NICs per physical network can be used with SR-IOV. fixes: - The 'physical_device_mappings' of sriov_nic configuration now can accept more than one NIC per physical network. For example, if 'physnet2' is connected to enp1s0f0 and enp1s0f1, 'physnet2:enp1s0f0,physnet2:enp1s0f1' will be a valid option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml0000644000175000017500000000013000000000000032246 0ustar00coreycorey00000000000000--- fixes: - Loaded agent extensions of SR-IOV agent are now shown in agent state API.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/stateful-security-group-04b2902ed9c44e4f.yaml0000644000175000017500000000125100000000000030711 0ustar00coreycorey00000000000000--- prelude: > Added support to create stateless security groups. features: - | Added support for a new stateful-security-group api extension that implements stateless security groups for the iptables drivers. upgrade: - | Currently existing security groups will all be set to stateful during the alembic migration. security: - | The ``stateless security group`` feature does not work with OVS nor OVN driver as the driver is not aware of the ``stateful`` attribute in the security group. If ``stateful`` attribute is provided with a ``False`` value then the attribute value is ignored and the security group would behave as stateful. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/stricter-security-group-port-check-in-api-d1fd84d9663e04ab.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/stricter-security-group-port-check-in-api-d1fd84d9663e00000644000175000017500000000105000000000000032662 0ustar00coreycorey00000000000000--- upgrade: - | The Neutron API now enforces that ports are a valid option for security group rules based on the protocol given, instead of relying on the backend firewall driver to do this enforcement, typically silently ignoring the port option in the rule. The valid set of whitelisted protocols that support ports are TCP, UDP, UDPLITE, SCTP and DCCP. Ports used with other protocols will now generate an HTTP 400 error. For more information, see bug `1818385 `_. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/subnet-dns-publish-fixed-ip-extension-6a5bb42a048a6671.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/subnet-dns-publish-fixed-ip-extension-6a5bb42a048a6671.0000644000175000017500000000076300000000000032363 0ustar00coreycorey00000000000000--- features: - | The ``subnet-dns-publish-fixed-ip`` extension adds a new attribute to the definition of the subnet resource. When set to ``true`` it will allow publishing DNS records for fixed IPs from that subnet independent of the restrictions described in the `DNS integration with an external service `_ documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/subnet-onboard-e4d09fa403a1053e.yaml0000644000175000017500000000130500000000000026771 0ustar00coreycorey00000000000000--- prelude: > Existing subnets that were created outside of a subnet pool can know be moved, or "onboarded" into an existing subnet pool. This provides a way for subnets to be brought under the management of a subnet pool and begin participating in an address scope. By enabling onboarding, existing subnets can be used with features that build on subnet pools and address scopes. Subnet onboarding is subject to all the same restrictions as and guarantees currently enforced by subnet pools and address scopes. features: - Existing subnets can now be moved into a subnet pool, and by extension can be moved into address scopes they were not initially participating in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/support-empty-string-filtering-4a39096b62b9abf2.yaml0000644000175000017500000000024100000000000032217 0ustar00coreycorey00000000000000--- features: - | Add support for filtering attributes with value as empty string. A shim extension is added to indicate if this feature is supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/support-filter-validation-fee2cdeedbe8ad76.yaml0000644000175000017500000000344300000000000031670 0ustar00coreycorey00000000000000--- prelude: > Perform validation on filter parameters on listing resources. features: - | Starting from this release, neutron server will perform validation on filter parameters on list requests. Neutron will return a 400 response if the request contains invalid filter parameters. The list of valid parameters is documented in the neutron API reference. Add an API extension ``filter-validation`` to indicate this new API behavior. This extension can be disabled by operators via a config option. upgrade: - | Prior to the upgrade, if a request contains an unknown or unsupported parameter, the server will silently ignore the invalid input. After the upgrade, the server will return a 400 Bad Request response instead. API users might observe that requests that received a successful response now receive a failure response. If they encounter such experience, they are suggested to confirm if the API extension ``filter-validation`` is present and validate filter parameters in their requests. Operators can disable this feature if they want to maintain backward-compatibility. If they choose to do that, the API extension ``filter-validation`` will not be present and the API behavior is unchanged. other: - | Each plugin can decide if it wants to support filter validation by setting ``__filter_validation_support`` to True or False. If this field is not set, the default value is False. Right now, the ML2 plugin and all the in-tree service plugins support filter validation. Out-of-tree plugins will have filter validation disabled by default but they can turn it on if they choose to. For filter validation to be supported, the core plugin and all the services plugins in a deployment must support it. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/switching-to-haproxy-for-metadata-proxy-9d8f7549fadf9182.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/switching-to-haproxy-for-metadata-proxy-9d8f7549fadf9180000644000175000017500000000107200000000000032723 0ustar00coreycorey00000000000000--- features: - In order to reduce metadata proxy memory footprint, ``haproxy`` is now used as a replacement for ``neutron-ns-metadata-proxy`` Python implementation. upgrade: - Since ``haproxy`` was not used before by ``neutron-l3-agent`` and ``neutron-dhcp-agent``, rootwrap filters for both agents have to be copied over when upgrading. - To upgrade to the ``haproxy`` based metadata proxy, ``neutron-l3-agent`` and ``neutron-dhcp-agent`` have to be restarted. On startup, old proxy processes will be detected and replaced with ``haproxy``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/tag-ports-during-bulk-creation-23161dd39d779e99.yaml0000644000175000017500000000035700000000000031720 0ustar00coreycorey00000000000000--- features: - The ``tag_ports_during_bulk_creation`` ML2 plugin extension has been implemented to support tagging ports during bulk creation. As a side effect, this extension also allows tagging ports during non-bulk creation. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/terminate-macvtap-agt-when-interface_mapping-not-present-3109faf3b44d366a.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/terminate-macvtap-agt-when-interface_mapping-not-presen0000644000175000017500000000030600000000000033600 0ustar00coreycorey00000000000000--- upgrade: - After upgrade, a macvtap agent without physical_interface_mappings configured can not be started. Specify a valid mapping to be able to start and use the macvtap agent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/timestamp_format_change-73eda78566b4690b.yaml0000644000175000017500000000115300000000000030700 0ustar00coreycorey00000000000000--- features: - The ``created_at`` and ``updated_at`` resource fields now include a timezone indicator at the end. Because this is a change in field format, the old ``timestamp_core`` extension has been removed and replaced with a ``standard-attr-timestamp`` extension. upgrade: - The ``timestamp_core`` extension has been removed and replaced with the ``standard-attr-timestamp`` extension. Resources will still have timestamps in the ``created_at`` and ``updated_at`` fields, but timestamps will have time zone info appended to the end to be consistent with other OpenStack projects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/trunk_inherit-455dc74b9fa22dad.yaml0000644000175000017500000000057700000000000027122 0ustar00coreycorey00000000000000--- features: - Subport segmentation details can now accept ``inherit`` as segmentation type during a trunk creation/update request. The trunk plugin will determine the segmentation type and ID and replace them with those of the network to which the port is connected. Only single-segment VLAN networks are set to have expected and correct results at this point. ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/use-callback-payload-for-AGENT-AFTER_CREATE-and-AFTER_UPDATE-events-839d8dcb0ac5ff26.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/use-callback-payload-for-AGENT-AFTER_CREATE-and-AFTER_U0000644000175000017500000000025100000000000032047 0ustar00coreycorey00000000000000--- other: - | Use ``publish`` for ``AGENT's`` ``AFTER_CREATE`` and ``AFTER_UPDATE`` events with ``DBEventPayload`` instead of the deprecated notify callback. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml0000644000175000017500000000020100000000000027137 0ustar00coreycorey00000000000000--- upgrade: - Neutron depends on keystoneauth instead of keystoneclient. features: - Neutron can interact with keystone v3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/use-pyroute2-in-ip-lib-558bfea8f14d1fea.yaml0000644000175000017500000000021600000000000030451 0ustar00coreycorey00000000000000--- features: - Initial support for ``oslo.privsep`` has been added. Most external commands are still executed using ``oslo.rootwrap``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/vhost-user-reconnect-7650134520022e7d.yaml0000644000175000017500000000130100000000000027642 0ustar00coreycorey00000000000000--- features: - vhost-user reconnect is a mechanism which allows a vhost-user frontend to reconnect to a vhost-user backend in the event the backend terminates either as a result of a graceful shutdown or a crash. This allows a VM utilising a vhost-user interface to reconnect automatically to the backend e.g. Open vSwitch without requiring the VM to reboot. In this release, support was added to the neutron Open vSwitch agent and ``ml2`` driver for vhost-user reconnect. other: - vhost-user reconnect requires dpdk 16.07 and qemu 2.7 and openvswitch 2.6 to function. if an older qemu is used, reconnect will not be available but vhost-user will still function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/vlan-aware-vms-aka-trunk-3341cc75ba1bf5b4.yaml0000644000175000017500000000302000000000000030657 0ustar00coreycorey00000000000000--- prelude: > The "vlan-aware-vms" feature allows Nova users to launch VMs on a single port (trunk parent port) that connects multiple Neutron logical networks together. features: - The feature "vlan-aware-vms" is available. To enable it, a service plugin named 'trunk' must be added to the option ``service_plugins`` in your neutron.conf. The plugin exposes two new extensions ``trunk`` and ``trunk_details``. The plugin can work with multiple backends and in particular Neutron has support for `ML2/openvswitch `_ and ML2/linuxbridge. Even though Neutron API compatibility should be preserved for ports associated to trunks, since this is the first release where the feature is available, it is reasonable to expect possible functionality gaps for one or both drivers. These will be filled over time as being reported. The CLI is available via openstackclient, and python-neutronclient 5.1.0 or above. For more details, please check the networking guide. security: - | When working with the ML2/openvswitch driver, the "vlan-aware-vms" feature has the following limitations: * security groups do not work in conjunction with the iptables-based firewall driver. * if security groups are desired, the use of the stateful OVS firewall is required, however that prevents the use of the DPDK datapath for OVS versions 2.5 or lower. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/vlan-type-conntrack-direct-d3d544f8471ed4ff.yaml0000644000175000017500000000051500000000000031330 0ustar00coreycorey00000000000000--- fixes: - | Add a new match rule based on physical VLAN tag for OpenFlow firewall traffic identifying mechanism to the TRANSIENT table. This fixes the distributed router east-west traffic between VLAN type networks. For more information, see bug `1831534 `_. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/releasenotes/notes/vxlan-multicast-groups-distribution-linuxbridge-9337019c961c01a7.yaml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/vxlan-multicast-groups-distribution-linuxbridge-93370190000644000175000017500000000121600000000000033243 0ustar00coreycorey00000000000000--- features: - Enable creation of VXLANs with different multicast addresses in linuxbridge agent allocated by VNI-address mappings. A new config option ``multicast_ranges`` was introduced. other: - Example configuration of ``multicast_ranges`` in ml2_conf.ini under the ``[vxlan]`` config. section ``multicast_ranges = 224.0.0.10:10:90,225.0.0.15:100:900``. For VNI between 10 and 90, the multicast address 224.0.0.0.10 will be used, and for 100 through 900 225.0.0.15 will be used. Other VNI values will get standard ``vxlan_group`` address. For more info see RFE https://bugs.launchpad.net/neutron/+bug/1579068 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/web_framework_deprecation-f984b83a1366c5b1.yaml0000644000175000017500000000041100000000000031214 0ustar00coreycorey00000000000000--- deprecations: - | The web_framework option has been deprecated and will be removed during Queens. This option was just added to make the transition to pecan easier so there is no reason operators should be using the non-default option anyway. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/notes/web_framework_removed-6e4c5c7ca506523a.yaml0000644000175000017500000000030300000000000030427 0ustar00coreycorey00000000000000--- upgrade: - | The web_framework option has been removed. This should have no impact on operators/users since it was just an option used for development of the new web framework. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/releasenotes/source/0000755000175000017500000000000000000000000021134 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/README.rst0000644000175000017500000000263100000000000022625 0ustar00coreycorey00000000000000=========================== Neutron Release Notes Howto =========================== Release notes are a new feature for documenting new features in OpenStack projects. Background on the process, tooling, and methodology is documented in a `mailing list post by Doug Hellmann `_. Writing release notes --------------------- For information on how to create release notes, please consult the `reno documentation `__. Please keep the following in your mind when you write release notes. * **Avoid using "prelude" section** for individual release notes. "prelude" section is for general comments about the release. * **Use one entry per section** (like "feature" or "upgrade"). All entries which belong to a same release will be merged and rendered, so there is less meaning to use multiple entries by a single topic. Maintaining release notes ------------------------- .. warning:: Avoid modifying an existing release note file even though it is related to your change. If you modify a release note file of a past release, the whole content will be shown in a latest release. The only allowed case is to update a release note in a same release. If you need to update a release note of a past release, edit a corresponding release note file in a stable branch directly. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/releasenotes/source/_static/0000755000175000017500000000000000000000000022562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/_static/.placeholder0000644000175000017500000000000000000000000025033 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/releasenotes/source/_templates/0000755000175000017500000000000000000000000023271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/_templates/.placeholder0000644000175000017500000000000000000000000025542 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/conf.py0000644000175000017500000002150200000000000022433 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Neutron Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/neutron' bug_project = 'neutron' bug_tag = 'doc' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron Release Notes' copyright = u'2015, Neutron Developers' # Release notes are version independent # The short X.Y version. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NeutronReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NeutronReleaseNotes.tex', u'Neutron Release Notes Documentation', u'Neutron Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'neutronreleasenotes', u'Neutron Release Notes Documentation', [u'Neutron Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NeutronReleaseNotes', u'Neutron Release Notes Documentation', u'Neutron Developers', 'NeutronReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/index.rst0000644000175000017500000000147400000000000023003 0ustar00coreycorey00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Neutron Release Notes ======================= .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton mitaka liberty .. toctree:: :maxdepth: 1 README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/liberty.rst0000644000175000017500000000021300000000000023334 0ustar00coreycorey00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0230422 neutron-16.0.0.0b2.dev214/releasenotes/source/locale/0000755000175000017500000000000000000000000022373 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0230422 neutron-16.0.0.0b2.dev214/releasenotes/source/locale/ja/0000755000175000017500000000000000000000000022765 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/releasenotes/source/locale/ja/LC_MESSAGES/0000755000175000017500000000000000000000000024552 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000644000175000017500000003307100000000000027607 0ustar00coreycorey00000000000000# Akihiro Motoki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Neutron Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-10 09:07+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-10 09:28+0000\n" "Last-Translator: Akihiro Motoki \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "7.0.1" msgstr "7.0.1" msgid "7.0.3" msgstr "7.0.3" msgid "7.0.4" msgstr "7.0.4" msgid "7.1.0" msgstr "7.1.0" msgid "8.0.0" msgstr "8.0.0" msgid "8.1.0" msgstr "8.1.0" msgid "8.2.0" msgstr "8.2.0" msgid "9.0.0" msgstr "9.0.0" msgid "" "A DHCP agent is assigned to an availability zone; the network will be hosted " "by the DHCP agent with availability zone specified by the user." msgstr "" "DHCP エージェントをアベイラビリティーゾーンに割り当てられます。ネットワークは" "ユーザーが指定したアベイラビリティーゾーンの DHCP エージェントが担当します。" msgid "" "A new rule has been added to the API that allows for tagging traffic with " "DSCP values. This is currently supported by the Open vSwitch QoS driver." msgstr "" "新しいルールが追加され、トラフィックへの DSCP 値のタグ付けを API でできるよう" "になりました。この機能は現在のところ Open vSwitch QoS ドライバーでサポートさ" "れています。" msgid "Bug Fixes" msgstr "バグ修正" msgid "" "Creating Neutron-LBaaS load balancers in environments without hardware " "virtualization may be slow when using the Octavia driver. This is due to " "QEMU using the TCG accelerator instead of the KVM accelerator in " "environments without hardware virtualization available. We recommend " "enabling hardware virtualization on your compute nodes, or enabling nested " "virtualization when using the Octavia driver inside a virtual environment. " "See `this link explaining devstack with nested KVM `_ for details " "on setting up nested virtualization for DevStack running inside KVM." msgstr "" "Octavia ドライバー使用時には、ハードウェア仮想化のない環境での Neutron-LBaaS " "ロードバランサーの作成が遅くなる可能性があります。これは、ハードウェア仮想化" "なしの環境では QEMU が KVM アクセラレーターではなく TCG アクセラレーターを使" "用するためです。Octavia ドライバーを仮想環境内で使用する場合、コンピュート" "ノード上でハードウェア仮想化を有効にするか、nested virtualization (訳注:仮想" "化環境上の仮想化支援機構) を有効にすることをお勧めします。KVM 内で実行される " "DevStack で nested virtualization をセットアップする詳細な方法は、 `nested " "KVM での DevStack に関するリンク `_ を参照してください。" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "During Liberty, some plugins and drivers have been deprecated, including the " "metaplugin, the IBM SDN-VE plugin, the Cisco N1KV monolithic plugin, and the " "Embrane plugin." msgstr "" "Liberty では、いくつかのプラグインやドライバーが廃止予定となりました。 " "metaplugin、 IBM SDN-VE プラグイン、 Cisco N1KV 一体型プラグイン、 Embrane プ" "ラグインが該当します。" msgid "Fixes bug 1537734" msgstr "バグ 1537734 を修正しました。" msgid "Fixes bug 1572670" msgstr "バグ 1572670 を修正しました。" msgid "IPv6 prefix delegation support was added to Neutron." msgstr "" "IPv6 プレフィックスデリゲーションのサポートが Neutron に追加されました。" msgid "Known Issues" msgstr "既知の問題" msgid "" "LBaaS V2 reference driver is now based on Octavia, an operator grade " "scalable, reliable Load Balancer platform." msgstr "" "LBaaS V2 リファレンスドライバーが Octavia (事業者品質のスケーラブルで高信頼な" "ロードバランサプラットフォーム) ベースになりました。" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "" "Networks used for VRRP traffic for HA routers may now be configured to use a " "specific segmentation type or physical network tag." msgstr "" "HA ルータの VRRP トラフィック用に使用されるネットワークを、特定のセグメントタ" "イプまたは物理ネットワークタグを使用して設定できるようになりました。" msgid "" "Neutron now exposes a QoS API, initially offering bandwidth limitation on " "the port level. See the `QoS devref `_ for additional information." msgstr "" "Neutron で QoS API が利用できるようになりました。最初の機能としてポートレベル" "の帯域幅制限機能が提供されます。この機能の API、CLI、設定、追加情報は `QoS " "devref `_ にあります。" msgid "Neutron now has a pluggable IP address management framework." msgstr "プラグイン型の IP アドレス管理 (IPAM) フレームワークが実装されました。" msgid "Neutron now offers role base access control (RBAC) for networks." msgstr "" "Neutron で、ネットワークに対してロールベースアクセス制御 (RBAC) が利用できる" "ようになりました。" msgid "" "Neutron now provides a way for admins to manually schedule agents, allowing " "host resources to be tested before they are enabled for tenant use." msgstr "" "Neutron は、管理者が手動でエージェントをスケジューリングする方法を提供するよ" "うになりました。これにより、テナントが利用できるようにする前に、ホストのリ" "ソースをテストできるようになりました。" msgid "" "Neutron now supports IPv6 Prefix Delegation for the automatic assignment of " "CIDRs to IPv6 subnets. For more information on the usage and configuration " "of this feature, see the `OpenStack Networking Guide `_." msgstr "" "Neutron は IPv6 サブネットに対する CIDR の自動割当において IPv6 プレフィック" "スデリゲーションに対応しました。この機能の利用、設定に関する詳細な情報は、 " "`OpenStack Networking Guide `_ を参照してください。" msgid "New Features" msgstr "新機能" msgid "OFAgent has been removed in the Newton cycle." msgstr "Newton サイクルで OFAgent は削除されました。" msgid "OFAgent is decomposed and deprecated in the Mitaka cycle." msgstr "" "Mitaka サイクルで OFAgent は別リポジトリーに分離され、廃止予定となりました。" msgid "Other Notes" msgstr "その他の注意点" msgid "Pluggable IPAM enables the use of alternate or third-party IPAM." msgstr "" "プラグイン型の IPAM により、サードパーティー製 IPAM や代替機能を利用可能にな" "ります。" msgid "" "Router high availability (L3 HA / VRRP) now works when layer 2 population " "(l2pop) is enabled." msgstr "" "ルータ高可用性(L3 HA/VRRP)が、レイヤー2ポピュレーション(l2pop)が有効の場" "合にも機能するようになりました。" msgid "Start using reno to manage release notes." msgstr "リリースノートの管理に reno を使い始めました。" msgid "Support for IPv6 addresses as tunnel endpoints in OVS." msgstr "" "OVS のトンネルエンドポイントとして IPv6 アドレスが使用できるようになりまし" "た。" msgid "Support integration with external DNS service." msgstr "外部 DNS サービスとの連携に対応しました。" msgid "" "The 'external_network_bridge' option for the L3 agent has been deprecated in " "favor of a bridge_mapping with a physnet. For more information, see the " "`Network Node `_ section of this scenario in the networking guide." msgstr "" "L3 エージェントの 'external_network_bridge' オプションは廃止予定となりまし" "た。 bridge_mapping の physnet 指定を使用してください。詳細な情報は、 `ネット" "ワーキングガイド `_ の Network Node の節を参照してください。" msgid "" "The Cisco N1kV monolithic plugin is removed in the Liberty release (replaced " "by the ML2 mechanism driver)." msgstr "" "Cisco N1kV 一体型プラグインは Liberty リリースで削除されました (ML2 メカニズ" "ムドライバーで置き換えられました)。" msgid "" "The Embrane plugin is deprecated and will be removed in the Mitaka release." msgstr "Embrane プラグインは廃止予定になり、Mitaka リリースで廃止されます。" msgid "" "The FWaaS API is marked as experimental for Liberty. Further, the current " "API will be removed in Mitaka and replaced with a new FWaaS API, which the " "team is in the process of developing." msgstr "" "FWaaS API は Liberty では実験的機能 (experimental) の扱いです。今後、現在の " "API が Mitaka で削除され、 (Neutron チームが開発中の) 新しい FWaaS API で置換" "される予定です。" msgid "The IBM SDN-VE monolithic plugin is removed in the Liberty release." msgstr "IBM SDN-VE 一体型プラグインは Liberty リリースで削除されました。" msgid "" "The LBaaS V1 API is marked as deprecated and is planned to be removed in a " "future release. Going forward, the LBaaS V2 API should be used." msgstr "" "LBaaS V1 API は廃止予定となり、将来のリリースで削除される予定です。今後は " "LBaaS V2 API を使用すべきです。" msgid "The LBaaS V2 API is no longer experimental. It is now stable." msgstr "" "LBaaS V2 API は今回のリリースで実験的機能 (experimental) でなくなりました。安" "定版 (stable) となりました。" msgid "" "The OVS agent may now be restarted without affecting data plane connectivity." msgstr "" "データプレーンの接続性に影響を与えずに OVS エージェントを再起動できるようにな" "りました。" msgid "" "The Openflow Agent(OFAgent) mechanism driver and its agent have been removed " "in favor of OpenvSwitch mechanism driver with \"native\" of_interface in the " "Newton cycle." msgstr "" "Newton サイクルで OpenFlow Agent (OFAgent) メカニズムドライバーとエージェント" "が削除されました。 Open vSwitch メカニズムドライバーの \"native\" インター" "フェースを使ってください。" msgid "" "The Openflow Agent(OFAgent) mechanism driver is decomposed completely from " "neutron tree in the Mitaka. The OFAgent driver and its agent also are " "deprecated in favor of OpenvSwitch mechanism driver with \"native\" " "of_interface in the Mitaka and will be removed in the next release." msgstr "" "Mitaka リリースで、OpenFlow Agent (OFAgent) メカニズムドライバーは neutron " "コードツリーから完全に分離されました。また、 Open vSwitch メカニズムドライ" "バーの \"native\" インターフェースが利用できるようになったため、 OFAgent ド" "ライバーとエージェントは Mitaka サイクルで廃止予定扱いになりました。次のリ" "リースで削除される予定です。" msgid "The metaplugin is removed in the Liberty release." msgstr "metaplugin は Liberty リリースで削除されました。" msgid "The original, non-pluggable version of IPAM is enabled by default." msgstr "デフォルトでは、以前からの非プラグイン版の IPAM が有効になります。" msgid "" "The stock Ubuntu Trusty Tahr kernel (3.13) shows linear performance " "degradation when running \"ip netns exec\" as the number of namespaces " "increases. In cases where scale is important, a later version kernel (e.g. " "3.19) should be used. This regression should be fixed in Trusty Tahr since " "3.13.0-36.63 and later kernel versions. For more information, please see " "`Launchpad bug 1328088. `_" msgstr "" "Trusty Tahr に含まれるカーネル (3.13) は、ネームスペース数の増加に伴い「ip " "netns exec」の実行時の性能が線形に劣化します。スケーラビリティーが重要な場" "合、より後のバージョンのカーネル (例えば 3.19) を使うべきです。この問題は " "Trusty Thar の 3.13.0-36.63 以降のカーネルバージョンで修正済みです。詳細は " "`Launchpad バグ 1328088 `_ を参照してください。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "VPNaaS reference drivers now work with HA routers." msgstr "" "VPNaaS リファレンスドライバーが HA ルータ上で動作するようになりました。" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/mitaka.rst0000644000175000017500000000022300000000000023131 0ustar00coreycorey00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/newton.rst0000644000175000017500000000025700000000000023204 0ustar00coreycorey00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: stable/newton :earliest-version: 9.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/ocata.rst0000644000175000017500000000034400000000000022756 0ustar00coreycorey00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: stable/ocata :ignore-notes: deprecate-SRIOV-physical_device_mappings-67dd3317181eb513 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/pike.rst0000644000175000017500000000032500000000000022616 0ustar00coreycorey00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike :ignore-notes: vlan-aware-vms-aka-trunk-3341cc75ba1bf5b4.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/queens.rst0000644000175000017500000000022300000000000023163 0ustar00coreycorey00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/rocky.rst0000644000175000017500000000022100000000000023010 0ustar00coreycorey00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/stein.rst0000644000175000017500000000022100000000000023003 0ustar00coreycorey00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/train.rst0000644000175000017500000000017600000000000023007 0ustar00coreycorey00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/releasenotes/source/unreleased.rst0000644000175000017500000000015300000000000024014 0ustar00coreycorey00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/requirements.txt0000644000175000017500000000363600000000000020437 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=4.0.0 # Apache-2.0 Paste>=2.0.2 # MIT PasteDeploy>=1.5.0 # MIT Routes>=2.3.1 # MIT debtcollector>=1.19.0 # Apache-2.0 decorator>=3.4.0 # BSD eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT pecan>=1.3.2 # BSD httplib2>=0.9.1 # MIT requests>=2.14.2 # Apache-2.0 Jinja2>=2.10 # BSD License (3 clause) keystonemiddleware>=4.17.0 # Apache-2.0 netaddr>=0.7.18 # BSD netifaces>=0.10.4 # MIT neutron-lib>=2.2.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 tenacity>=4.4.0 # Apache-2.0 SQLAlchemy>=1.2.0 # MIT WebOb>=1.8.2 # MIT keystoneauth1>=3.14.0 # Apache-2.0 alembic>=0.8.10 # MIT six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 oslo.cache>=1.26.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.37.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.privsep>=1.32.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.versionedobjects>=1.35.1 # Apache-2.0 osprofiler>=2.3.0 # Apache-2.0 os-ken >= 0.3.0 # Apache-2.0 ovs>=2.8.0 # Apache-2.0 ovsdbapp>=1.0.0 # Apache-2.0 psutil>=3.2.2 # BSD pyroute2>=0.5.7;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) pyOpenSSL>=17.1.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 openstacksdk>=0.31.2 # Apache-2.0 python-designateclient>=2.7.0 # Apache-2.0 os-xenapi>=0.3.1 # Apache-2.0 os-vif>=1.15.1 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 tooz>=1.58.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/roles/0000755000175000017500000000000000000000000016267 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/roles/add_mariadb_repo/0000755000175000017500000000000000000000000021523 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/add_mariadb_repo/tasks/0000755000175000017500000000000000000000000022650 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/add_mariadb_repo/tasks/main.yaml0000644000175000017500000000124200000000000024457 0ustar00coreycorey00000000000000- name: Add apt key from hkp://keyserver.ubuntu.com:80 shell: cmd: apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 executable: /bin/bash become: yes when: - ansible_facts['distribution_release'] == "bionic" - ansible_facts['distribution'] == "Ubuntu" - name: Add MariaDB 10.3 repository (https://bugs.launchpad.net/neutron/+bug/1855912) apt_repository: repo: deb [arch=amd64,arm64,ppc64el] http://ftp.hosteurope.de/mirror/mariadb.org/repo/10.3/ubuntu bionic main state: present become: yes when: - ansible_facts['distribution_release'] == "bionic" - ansible_facts['distribution'] == "Ubuntu" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/0000755000175000017500000000000000000000000023714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/README.rst0000644000175000017500000000076300000000000025411 0ustar00coreycorey00000000000000Configure host to run on it Neutron functional/fullstack tests **Role Variables** .. zuul:rolevar:: tests_venv :default: {{ tox_envlist }} .. zuul:rolevar:: project_name :default: neutron .. zuul:rolevar:: base_dir :default: {{ ansible_user_dir }}/src/opendev.org .. zuul:rolevar:: gate_dest_dir :default: {{ base_dir }}/openstack .. zuul:rolevar:: devstack_dir :default: {{ base_dir }}/openstack/devstack .. zuul:rolevar:: neutron_dir :default: {{ gate_dest_dir }}/neutron ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/defaults/0000755000175000017500000000000000000000000025523 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/defaults/main.yaml0000644000175000017500000000036200000000000027334 0ustar00coreycorey00000000000000tests_venv: "{{ tox_envlist }}" project_name: "neutron" base_dir: "{{ ansible_user_dir }}/src/opendev.org" gate_dest_dir: "{{ base_dir }}/openstack" devstack_dir: "{{ base_dir }}/openstack/devstack" neutron_dir: "{{ gate_dest_dir }}/neutron" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/tasks/0000755000175000017500000000000000000000000025041 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/configure_functional_tests/tasks/main.yaml0000644000175000017500000000105200000000000026647 0ustar00coreycorey00000000000000- shell: cmd: | set -e set -x GATE_STACK_USER={{ ansible_user }} IS_GATE=True BASE_DIR={{ base_dir }} GATE_DEST={{ gate_dest_dir }} PROJECT_NAME={{ project_name }} NEUTRON_PATH={{ neutron_dir }} DEVSTACK_PATH={{ devstack_dir }} VENV={{ tests_venv }} source $DEVSTACK_PATH/functions source $NEUTRON_PATH/devstack/lib/ovs source $NEUTRON_PATH/tools/configure_for_func_testing.sh configure_host_for_func_testing executable: /bin/bash ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/0000755000175000017500000000000000000000000021753 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/README.rst0000644000175000017500000000063500000000000023446 0ustar00coreycorey00000000000000Collect journal log from test run By default, this stores journal log into log file and store it in "journal_log_file_path" **Role Variables** .. zuul:rolevar:: journal_log_path :default: {{ ansible_user_dir }}/workspace/logs Path where journal log file will be stored on job's node. .. zuul:rolevar:: journal_log_file_name :default: {{ journal_log_path }}/journal.log Name of journal log file. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/defaults/0000755000175000017500000000000000000000000023562 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/defaults/main.yaml0000644000175000017500000000015400000000000025372 0ustar00coreycorey00000000000000journal_log_path: "{{ ansible_user_dir }}/logs" journal_log_file_name: "{{ journal_log_path }}/journal.log" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/tasks/0000755000175000017500000000000000000000000023100 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/fetch_journal_log/tasks/main.yaml0000644000175000017500000000102500000000000024706 0ustar00coreycorey00000000000000- name: Ensure {{ journal_log_path }} exists become: yes file: path: "{{ journal_log_path }}" state: directory owner: "{{ ansible_user }}" group: "{{ ansible_user }}" mode: 0775 - name: Store journal logs in {{ journal_log_file_name }} become: yes shell: cmd: | /bin/journalctl -a > {{ journal_log_file_name }} - name: Set journal.log file permissions become: yes file: path: '{{ journal_log_file_name }}' owner: '{{ ansible_user }}' group: '{{ ansible_user }}' mode: 0644 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/setup_logdir/0000755000175000017500000000000000000000000020767 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/setup_logdir/README.rst0000644000175000017500000000027000000000000022455 0ustar00coreycorey00000000000000Configure logs dir to be accessible for ``stack`` user. **Role Variables** .. zuul:rolevar:: logdir :default: /opt/stack/logs Name of the directory where logs will be stored. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/setup_logdir/defaults/0000755000175000017500000000000000000000000022576 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/setup_logdir/defaults/main.yaml0000644000175000017500000000003000000000000024377 0ustar00coreycorey00000000000000logdir: /opt/stack/logs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5390468 neutron-16.0.0.0b2.dev214/roles/setup_logdir/tasks/0000755000175000017500000000000000000000000022114 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/roles/setup_logdir/tasks/main.yaml0000644000175000017500000000024100000000000023721 0ustar00coreycorey00000000000000- name: Ensure logdir exists become: yes file: path: "{{ logdir }}" state: directory owner: stack group: "{{ ansible_user }}" mode: 0775 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5550468 neutron-16.0.0.0b2.dev214/setup.cfg0000644000175000017500000003667400000000000017004 0ustar00coreycorey00000000000000[metadata] name = neutron summary = OpenStack Networking description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/neutron/latest/ python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = neutron data_files = etc/neutron = etc/api-paste.ini etc/rootwrap.conf etc/neutron/rootwrap.d = etc/neutron/rootwrap.d/* share/ansible/neutron-ovn-migration/playbooks = tools/ovn_migration/tripleo_environment/playbooks/* scripts = tools/ovn_migration/tripleo_environment/ovn_migration.sh [entry_points] wsgi_scripts = neutron-api = neutron.server:get_application console_scripts = neutron-db-manage = neutron.db.migration.cli:main neutron-debug = neutron.debug.shell:main neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main neutron-keepalived-state-change = neutron.cmd.keepalived_state_change:main neutron-ipset-cleanup = neutron.cmd.ipset_cleanup:main neutron-l3-agent = neutron.cmd.eventlet.agents.l3:main neutron-linuxbridge-agent = neutron.cmd.eventlet.plugins.linuxbridge_neutron_agent:main neutron-linuxbridge-cleanup = neutron.cmd.linuxbridge_cleanup:main neutron-macvtap-agent = neutron.cmd.eventlet.plugins.macvtap_neutron_agent:main neutron-metadata-agent = neutron.cmd.eventlet.agents.metadata:main neutron-netns-cleanup = neutron.cmd.netns_cleanup:main neutron-openvswitch-agent = neutron.cmd.eventlet.plugins.ovs_neutron_agent:main neutron-ovs-cleanup = neutron.cmd.ovs_cleanup:main neutron-pd-notify = neutron.cmd.pd_notify:main neutron-server = neutron.cmd.eventlet.server:main neutron-rpc-server = neutron.cmd.eventlet.server:main_rpc_eventlet neutron-rootwrap = oslo_rootwrap.cmd:main neutron-rootwrap-daemon = oslo_rootwrap.cmd:daemon neutron-usage-audit = neutron.cmd.eventlet.usage_audit:main neutron-metering-agent = neutron.cmd.eventlet.services.metering_agent:main neutron-sriov-nic-agent = neutron.cmd.eventlet.plugins.sriov_nic_neutron_agent:main neutron-sanity-check = neutron.cmd.sanity_check:main neutron-status = neutron.cmd.status:main neutron-ovn-metadata-agent = neutron.cmd.eventlet.agents.ovn_metadata:main neutron-ovn-migration-mtu = neutron.cmd.ovn.migration_mtu:main neutron-ovn-db-sync-util = neutron.cmd.ovn.neutron_ovn_db_sync_util:main neutron.core_plugins = ml2 = neutron.plugins.ml2.plugin:Ml2Plugin neutron.service_plugins = dummy = neutron.tests.unit.dummy_plugin:DummyServicePlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin metering = neutron.services.metering.metering_plugin:MeteringPlugin qos = neutron.services.qos.qos_plugin:QoSPlugin tag = neutron.services.tag.tag_plugin:TagPlugin flavors = neutron.services.flavors.flavors_plugin:FlavorsPlugin auto_allocate = neutron.services.auto_allocate.plugin:Plugin segments = neutron.services.segments.plugin:Plugin network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin network_segment_range = neutron.services.network_segment_range.plugin:NetworkSegmentRangePlugin revisions = neutron.services.revisions.revision_plugin:RevisionPlugin timestamp = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin trunk = neutron.services.trunk.plugin:TrunkPlugin loki = neutron.services.loki.loki_plugin:LokiPlugin log = neutron.services.logapi.logging_plugin:LoggingPlugin port_forwarding = neutron.services.portforwarding.pf_plugin:PortForwardingPlugin placement = neutron.services.placement_report.plugin:PlacementReportPlugin conntrack_helper = neutron.services.conntrack_helper.plugin:Plugin ovn-router = neutron.services.ovn_l3.plugin:OVNL3RouterPlugin neutron.ml2.type_drivers = flat = neutron.plugins.ml2.drivers.type_flat:FlatTypeDriver local = neutron.plugins.ml2.drivers.type_local:LocalTypeDriver vlan = neutron.plugins.ml2.drivers.type_vlan:VlanTypeDriver geneve = neutron.plugins.ml2.drivers.type_geneve:GeneveTypeDriver gre = neutron.plugins.ml2.drivers.type_gre:GreTypeDriver vxlan = neutron.plugins.ml2.drivers.type_vxlan:VxlanTypeDriver neutron.ml2.mechanism_drivers = logger = neutron.tests.unit.plugins.ml2.drivers.mechanism_logger:LoggerMechanismDriver test = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriver test_with_agent = neutron.tests.unit.plugins.ml2.drivers.mechanism_test:TestMechanismDriverWithAgent linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.mech_driver.mech_linuxbridge:LinuxbridgeMechanismDriver macvtap = neutron.plugins.ml2.drivers.macvtap.mech_driver.mech_macvtap:MacvtapMechanismDriver openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver ovn = neutron.plugins.ml2.drivers.ovn.mech_driver.mech_driver:OVNMechanismDriver ovn-sync = neutron.cmd.ovn.neutron_ovn_db_sync_util:OVNMechanismDriver fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver fake_agent_l3 = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriverL3 another_fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:AnotherFakeAgentMechanismDriver faulty_agent = neutron.tests.unit.plugins.ml2.drivers.mech_faulty_agent:FaultyAgentMechanismDriver neutron.ml2.extension_drivers = test = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestExtensionDriver testdb = neutron.tests.unit.plugins.ml2.drivers.ext_test:TestDBExtensionDriver port_security = neutron.plugins.ml2.extensions.port_security:PortSecurityExtensionDriver qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver dns = neutron.plugins.ml2.extensions.dns_integration:DNSExtensionDriverML2 data_plane_status = neutron.plugins.ml2.extensions.data_plane_status:DataPlaneStatusExtensionDriver dns_domain_ports = neutron.plugins.ml2.extensions.dns_integration:DNSDomainPortsExtensionDriver uplink_status_propagation = neutron.plugins.ml2.extensions.uplink_status_propagation:UplinkStatusPropagationExtensionDriver tag_ports_during_bulk_creation = neutron.plugins.ml2.extensions.tag_ports_during_bulk_creation:TagPortsDuringBulkCreationExtensionDriver subnet_dns_publish_fixed_ip = neutron.plugins.ml2.extensions.subnet_dns_publish_fixed_ip:SubnetDNSPublishFixedIPExtensionDriver neutron.ipam_drivers = fake = neutron.tests.unit.ipam.fake_driver:FakeDriver internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool neutron.agent.l2.extensions = qos = neutron.agent.l2.extensions.qos:QosAgentExtension fdb = neutron.agent.l2.extensions.fdb_population:FdbPopulationAgentExtension log = neutron.services.logapi.agent.log_extension:LoggingExtension neutron.agent.l3.extensions = fip_qos = neutron.agent.l3.extensions.qos.fip:FipQosAgentExtension gateway_ip_qos = neutron.agent.l3.extensions.qos.gateway_ip:RouterGatewayIPQosAgentExtension port_forwarding = neutron.agent.l3.extensions.port_forwarding:PortForwardingAgentExtension snat_log = neutron.agent.l3.extensions.snat_log:SNATLoggingExtension conntrack_helper = neutron.agent.l3.extensions.conntrack_helper:ConntrackHelperAgentExtension neutron.services.logapi.drivers = ovs = neutron.services.logapi.drivers.openvswitch.ovs_firewall_log:OVSFirewallLoggingDriver neutron.qos.agent_drivers = ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver linuxbridge = neutron.plugins.ml2.drivers.linuxbridge.agent.extension_drivers.qos_driver:QosLinuxbridgeAgentDriver neutron.agent.linux.pd_drivers = dibbler = neutron.agent.linux.dibbler:PDDibbler neutron.services.external_dns_drivers = designate = neutron.services.externaldns.drivers.designate.driver:Designate oslo.config.opts = ironic.auth = neutron.opts:list_ironic_auth_opts neutron = neutron.opts:list_opts neutron.agent = neutron.opts:list_agent_opts neutron.az.agent = neutron.opts:list_az_agent_opts neutron.base.agent = neutron.opts:list_base_agent_opts neutron.db = neutron.opts:list_db_opts neutron.dhcp.agent = neutron.opts:list_dhcp_agent_opts neutron.extensions = neutron.opts:list_extension_opts neutron.l3.agent = neutron.opts:list_l3_agent_opts neutron.metadata.agent = neutron.opts:list_metadata_agent_opts neutron.metering.agent = neutron.opts:list_metering_agent_opts neutron.ml2 = neutron.opts:list_ml2_conf_opts neutron.ml2.linuxbridge.agent = neutron.opts:list_linux_bridge_opts neutron.ml2.macvtap.agent = neutron.opts:list_macvtap_opts neutron.ml2.ovn = neutron.conf.plugins.ml2.drivers.ovn.ovn_conf:list_opts neutron.ml2.ovs.agent = neutron.opts:list_ovs_opts neutron.ml2.sriov.agent = neutron.opts:list_sriov_agent_opts neutron.ml2.xenapi = neutron.opts:list_xenapi_opts neutron.ovn.metadata.agent = neutron.conf.agent.ovn.metadata.config:list_metadata_agent_opts nova.auth = neutron.opts:list_auth_opts oslo.config.opts.defaults = neutron = neutron.common.config:set_cors_middleware_defaults oslo.policy.enforcer = neutron = neutron.policy:get_enforcer oslo.policy.policies = neutron = neutron.conf.policies:list_rules neutron.policies = neutron = neutron.conf.policies:list_rules neutron.db.alembic_migrations = neutron = neutron.db.migration:alembic_migrations neutron.interface_drivers = linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver null = neutron.agent.linux.interface:NullDriver openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver neutron.agent.firewall_drivers = noop = neutron.agent.firewall:NoopFirewallDriver iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver iptables_hybrid = neutron.agent.linux.iptables_firewall:OVSHybridIptablesFirewallDriver openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver neutron.services.metering_drivers = noop = neutron.services.metering.drivers.noop.noop_driver:NoopMeteringDriver iptables = neutron.services.metering.drivers.iptables.iptables_driver:IptablesMeteringDriver neutron.objects = AddressScope = neutron.objects.address_scope:AddressScope AllowedAddressPair = neutron.objects.port.extensions.allowedaddresspairs:AllowedAddressPair Agent = neutron.objects.agent:Agent AutoAllocatedTopology = neutron.objects.auto_allocate:AutoAllocatedTopology ConntrackHelper = neutron.objects.conntrack_helper:ConntrackHelper PortDataPlaneStatus = neutron.objects.port.extensions.data_plane_status:PortDataPlaneStatus DefaultSecurityGroup = neutron.objects.securitygroup:DefaultSecurityGroup DistributedPortBinding = neutron.objects.ports:DistributedPortBinding DNSNameServer = neutron.objects.subnet:DNSNameServer DVRMacAddress = neutron.objects.router:DVRMacAddress ExternalNetwork = neutron.objects.network:ExternalNetwork ExtraDhcpOpt = neutron.objects.port.extensions.extra_dhcp_opt:ExtraDhcpOpt Flavor = neutron.objects.flavor:Flavor FlavorServiceProfileBinding = neutron.objects.flavor:FlavorServiceProfileBinding FloatingIP = neutron.objects.router:FloatingIP FloatingIPDNS = neutron.objects.floatingip:FloatingIPDNS IPAllocation = neutron.objects.ports:IPAllocation IPAllocationPool = neutron.objects.subnet:IPAllocationPool IpamAllocation = neutron.objects.ipam:IpamAllocation IpamAllocationPool = neutron.objects.ipam:IpamAllocationPool IpamSubnet = neutron.objects.ipam:IpamSubnet Log = neutron.objects.logapi.logging_resource:Log L3HARouterAgentPortBinding = neutron.objects.l3_hamode:L3HARouterAgentPortBinding L3HARouterNetwork = neutron.objects.l3_hamode:L3HARouterNetwork L3HARouterVRIdAllocation = neutron.objects.l3_hamode:L3HARouterVRIdAllocation MeteringLabel = neutron.objects.metering:MeteringLabel MeteringLabelRule = neutron.objects.metering:MeteringLabelRule Network = neutron.objects.network:Network NetworkDNSDomain = neutron.objects.network:NetworkDNSDomain NetworkDhcpAgentBinding = neutron.objects.network:NetworkDhcpAgentBinding NetworkPortSecurity = neutron.objects.network:NetworkPortSecurity NetworkRBAC = neutron.objects.network:NetworkRBAC NetworkSegment = neutron.objects.network:NetworkSegment NetworkSegmentRange = neutron.objects.network_segment_range:NetworkSegmentRange Port = neutron.objects.ports:Port PortBinding = neutron.objects.ports:PortBinding PortBindingLevel = neutron.objects.ports:PortBindingLevel PortDNS = neutron.objects.ports:PortDNS PortForwarding = neutron.objects.port_forwarding:PortForwarding PortSecurity = neutron.objects.port.extensions.port_security:PortSecurity ProviderResourceAssociation = neutron.objects.servicetype:ProviderResourceAssociation ProvisioningBlock = neutron.objects.provisioning_blocks:ProvisioningBlock QosBandwidthLimitRule = neutron.objects.qos.rule:QosBandwidthLimitRule QosDscpMarkingRule = neutron.objects.qos.rule:QosDscpMarkingRule QosMinimumBandwidthRule = neutron.objects.qos.rule:QosMinimumBandwidthRule QosPolicy = neutron.objects.qos.policy:QosPolicy QosPolicyDefault = neutron.objects.qos.policy:QosPolicyDefault QosPolicyFloatingIPBinding = neutron.objects.qos.binding:QosPolicyFloatingIPBinding QosPolicyNetworkBinding = neutron.objects.qos.binding:QosPolicyNetworkBinding QosPolicyPortBinding = neutron.objects.qos.binding:QosPolicyPortBinding QosPolicyRBAC = neutron.objects.qos.policy:QosPolicyRBAC QosPolicyRouterGatewayIPBinding = neutron.objects.qos.binding:QosPolicyRouterGatewayIPBinding QosRule = neutron.objects.qos.rule:QosRule QosRuleType = neutron.objects.qos.rule_type:QosRuleType QosRuleTypeDriver = neutron.objects.qos.rule_type:QosRuleTypeDriver Quota = neutron.objects.quota:Quota QuotaUsage = neutron.objects.quota:QuotaUsage Reservation = neutron.objects.quota:Reservation ResourceDelta = neutron.objects.quota:ResourceDelta Route = neutron.objects.subnet:Route Router = neutron.objects.router:Router RouterExtraAttributes = neutron.objects.router:RouterExtraAttributes RouterL3AgentBinding = neutron.objects.l3agent:RouterL3AgentBinding RouterPort = neutron.objects.router:RouterPort RouterRoute = neutron.objects.router:RouterRoute SecurityGroup = neutron.objects.securitygroup:SecurityGroup SecurityGroupPortBinding = neutron.objects.ports:SecurityGroupPortBinding SecurityGroupRule = neutron.objects.securitygroup:SecurityGroupRule SegmentHostMapping = neutron.objects.network:SegmentHostMapping ServiceProfile = neutron.objects.flavor:ServiceProfile StandardAttribute = neutron.objects.stdattrs:StandardAttribute Subnet = neutron.objects.subnet:Subnet SubnetPool = neutron.objects.subnetpool:SubnetPool SubnetPoolPrefix = neutron.objects.subnetpool:SubnetPoolPrefix SubPort = neutron.objects.trunk:SubPort SubnetServiceType = neutron.objects.subnet:SubnetServiceType Tag = neutron.objects.tag:Tag Trunk = neutron.objects.trunk:Trunk neutron.status.upgrade.checks = neutron = neutron.cmd.upgrade_checks.checks:CoreChecks [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = neutron/locale/neutron.pot [compile_catalog] directory = neutron/locale domain = neutron neutron-log-error neutron-log-info neutron-log-warning [update_catalog] domain = neutron output_dir = neutron/locale input_file = neutron/locale/neutron.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/setup.py0000644000175000017500000000200600000000000016653 0ustar00coreycorey00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/test-requirements.txt0000644000175000017500000000170300000000000021405 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=1.1.0,<1.2.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 pycodestyle>=2.0.0 # MIT mock>=3.0.0 # BSD python-subunit>=1.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD WebTest>=2.0.27 # MIT oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 ddt>=1.0.1 # MIT astroid==1.6.5;python_version<"3.0" # LGPLv2.1 astroid==2.1.0;python_version>="3.0" # LGPLv2.1 pylint==1.9.2;python_version<"3.0" # GPLv2 pylint==2.2.0;python_version>="3.0" # GPLv2 # Needed to run DB commands in virtualenvs PyMySQL>=0.7.6 # MIT License bashate>=0.5.1 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/0000755000175000017500000000000000000000000016303 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/abandon_old_reviews.sh0000755000175000017500000001251000000000000022645 0ustar00coreycorey00000000000000#!/usr/bin/env bash # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # WARNING! # Please do not run this script without talking to the Neutron PTL. Auto # abandoning people's changes is a good thing, but must be done with care. # # before you run this modify your .ssh/config to create a # review.opendev.org entry: # # Host review.opendev.org # User # Port 29418 # # Note: due to gerrit bug somewhere, this double posts messages. :( # first purge all the reviews that are more than 4w old and blocked by a core -2 DRY_RUN=0 CLEAN_PROJECT="" function print_help { echo "Script to abandon patches without activity for more than 4 weeks." echo "Usage:" echo " ./abandon_old_reviews.sh [--dry-run] [--project ] [--help]" echo " --dry-run In dry-run mode it will only print what patches would be abandoned " echo " but will not take any real actions in gerrit" echo " --project Only check patches from if passed." echo " It must be one of the projects which are a part of the Neutron stadium." echo " If project is not provided, all projects from the Neutron stadium will be checked" echo " --help Print help message" } while [ $# -gt 0 ]; do key="${1}" case $key in --dry-run) echo "Enabling dry run mode" DRY_RUN=1 shift # past argument ;; --project) CLEAN_PROJECT="project:openstack/${2}" shift # past argument shift # past value ;; --help) print_help exit 2 esac done set -o errexit DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" function abandon_review { local gitid=$1 shift local msg=$@ # echo ssh review.opendev.org gerrit review $gitid --abandon --message \"$msg\" unassign_and_new_bug $gitid if [ $DRY_RUN -eq 1 ]; then echo "Would abandon $gitid" else echo "Abandoning $gitid" ssh review.opendev.org gerrit review $gitid --abandon --message \"$msg\" fi } function unassign_and_new_bug { # unassign current assignee and set bug to 'new' status local gitid=$1 cm=$(ssh review.opendev.org "gerrit query $gitid --current-patch-set --format json" | jq .commitMessage) for closes in $(echo -e $cm | grep -i "closes" | grep -i "bug" | grep -o -E '[0-9]+'); do if [ $DRY_RUN -eq 1 ]; then echo "Would unassign and tag 'timeout-abandon' $closes" else echo "Attempting to change status of bug $closes to New" python "$DIR/unassign_bug.py" $closes fi done } PROJECTS="($( python - < 4 weeks without comment and currently blocked by a core reviewer with a -2. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and contacting the reviewer with the -2 on this review to ensure you address their concerns. EOF ) # For testing, put in a git rev of something you own and uncomment # blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f" for review in $blocked_reviews; do # echo ssh review.opendev.org gerrit review $review --abandon --message \"$msg\" echo "Blocked review $review" abandon_review $review $blocked_msg done # then purge all the reviews that are > 4w with no changes and Zuul has -1ed failing_reviews=$(ssh review.opendev.org "gerrit query --current-patch-set --format json $PROJECTS status:open age:4w NOT label:Verified>=1,Zuul" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g') failing_msg=$(cat < 4 weeks without comment, and failed Zuul jobs the last time it was checked. We are abandoning this for now. Feel free to reactivate the review by pressing the restore button and leaving a 'recheck' comment to get fresh test results. EOF ) for review in $failing_reviews; do echo "Failing review $review" abandon_review $review $failing_msg done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/check_unit_test_structure.sh0000755000175000017500000000445200000000000024142 0ustar00coreycorey00000000000000#!/usr/bin/env bash # This script identifies the unit test modules that do not correspond # directly with a module in the code tree. See TESTING.rst for the # intended structure. neutron_path=$(cd "$(dirname "$0")/.." && pwd) base_test_path=neutron/tests/unit test_path=$neutron_path/$base_test_path test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( # The following test is required for oslo.versionedobjects "^objects/test_objects.py$" # The following open source plugin tests are not actually unit # tests and are ignored pending their relocation to the functional # test tree. "^plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py$" "^plugins/ml2/test_security_group.py$" "^plugins/ml2/test_port_binding.py$" "^plugins/ml2/test_extension_driver_api.py$" "^plugins/ml2/test_ext_portsecurity.py$" "^plugins/ml2/test_agent_scheduler.py$" "^plugins/ml2/test_tracked_resources.py$" "^plugins/ml2/drivers/openvswitch/agent/test_agent_scheduler.py$" "^plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py$" ) error_count=0 ignore_count=0 total_count=0 for test_file in ${test_files[@]}; do relative_path=${test_file#$test_path/} expected_path=$(dirname $neutron_path/neutron/$relative_path) test_filename=$(basename "$test_file") expected_filename=${test_filename#test_} # Module filename (e.g. foo/bar.py -> foo/test_bar.py) filename=$expected_path/$expected_filename # Package dir (e.g. foo/ -> test_foo.py) package_dir=${filename%.py} if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then for ignore_regex in ${ignore_regexes[@]}; do if [[ "$relative_path" =~ $ignore_regex ]]; then ignore_count=$((ignore_count + 1)) continue 2 fi done echo "Unexpected test file: $base_test_path/$relative_path" error_count=$((error_count + 1)) fi total_count=$((total_count + 1)) done if [ "$ignore_count" -ne 0 ]; then echo "$ignore_count unmatched test modules were ignored" fi if [ "$error_count" -eq 0 ]; then echo 'Success! All test modules match targets in the code tree.' exit 0 else echo "Failure! $error_count of $total_count test modules do not match targets in the code tree." exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/coding-checks.sh0000755000175000017500000000252700000000000021351 0ustar00coreycorey00000000000000#!/bin/sh set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run Neutron's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire neutron module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="neutron" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/configure_for_func_testing.sh0000755000175000017500000002474400000000000024254 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e # Control variable used to determine whether to execute this script # directly or allow the gate_hook to import. IS_GATE=${IS_GATE:-False} USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True} if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then >&2 echo "Usage: $0 /path/to/devstack [-i] Configure a host to run Neutron's functional test suite. -i Install Neutron's package dependencies. By default, it is assumed that devstack has already been used to deploy neutron to the target host and that package dependencies need not be installed. Warning: This script relies on devstack to perform extensive modification to the underlying host. It is recommended that it be invoked only on a throw-away VM. NOTE: Default values in this file, such as passwords, have been taken from the devstack samples/local.conf file, but can be over-ridden by setting them in your environment if necessary." exit 1 fi # Skip the first argument OPTIND=2 while getopts ":i" opt; do case $opt in i) INSTALL_BASE_DEPENDENCIES=True ;; esac done # Default to environment variables to permit the gate_hook to override # when sourcing. VENV=${VENV:-dsvm-functional} DEVSTACK_PATH=${DEVSTACK_PATH:-$1} PROJECT_NAME=${PROJECT_NAME:-neutron} REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)} NEUTRON_PATH=${NEUTRON_PATH:=$REPO_BASE/$PROJECT_NAME} INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False} # The gate should automatically install dependencies. INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE} BUILD_OVS_FROM_SOURCE=${BUILD_OVS_FROM_SOURCE:-True} if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then >&2 echo "Unable to find devstack at '$DEVSTACK_PATH'. Please verify that the specified path points to a valid devstack repo." exit 1 fi set -x function _init { # Subsequently-called devstack functions depend on the following variables. HOST_IP=127.0.0.1 FILES=$DEVSTACK_PATH/files TOP_DIR=$DEVSTACK_PATH if [ -f $DEVSTACK_PATH/local.conf ]; then source $DEVSTACK_PATH/local.conf 2> /dev/null fi source $DEVSTACK_PATH/stackrc # Allow the gate to override values set by stackrc. DEST=${GATE_DEST:-$DEST} STACK_USER=${GATE_STACK_USER:-$STACK_USER} GetDistro source $DEVSTACK_PATH/tools/fixup_stuff.sh fixup_ubuntu } function _install_base_deps { echo_summary "Installing base dependencies" INSTALL_TESTONLY_PACKAGES=True if [[ "$BUILD_OVS_FROM_SOURCE" == "True" ]]; then PACKAGES=$(get_packages general,neutron,q-agt,q-l3) # Do not install 'python-' prefixed packages other than # python-dev*. Neutron's functional testing relies on deployment # to a tox env so there is no point in installing python # dependencies system-wide. PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g') install_package $PACKAGES source $NEUTRON_PATH/devstack/lib/ovs remove_ovs_packages OVS_BRANCH="v2.12.0" compile_ovs False /usr /var else PACKAGES=$(get_packages general,neutron,q-agt,q-l3,openvswitch) PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g') install_package $PACKAGES fi } function _install_rpc_backend { echo_summary "Installing rabbitmq" RABBIT_USERID=${RABBIT_USERID:-stackrabbit} RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST} RABBIT_PASSWORD=${RABBIT_PASSWORD:-stackqueue} source $DEVSTACK_PATH/lib/rpc_backend enable_service rabbit install_rpc_backend restart_rpc_backend } # _install_databases [install_pg] function _install_databases { local install_pg=${1:-True} echo_summary "Installing databases" # Avoid attempting to configure the db if it appears to already # have run. The setup as currently defined is not idempotent. if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then echo_summary "DB config appears to be complete, skipping." return 0 fi MYSQL_PASSWORD=${MYSQL_PASSWORD:-stackdb} DATABASE_PASSWORD=${DATABASE_PASSWORD:-stackdb} source $DEVSTACK_PATH/lib/database enable_service mysql initialize_database_backends install_database configure_database_mysql if [[ "$install_pg" == "True" ]]; then enable_service postgresql initialize_database_backends install_database configure_database_postgresql fi # Set up the 'openstack_citest' user and database in each backend tmp_dir=$(mktemp -d) trap "rm -rf $tmp_dir" EXIT cat << EOF > $tmp_dir/mysql.sql CREATE DATABASE openstack_citest; CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'; FLUSH PRIVILEGES; EOF /usr/bin/mysql -u root -p"$MYSQL_PASSWORD" < $tmp_dir/mysql.sql if [[ "$install_pg" == "True" ]]; then cat << EOF > $tmp_dir/postgresql.sql CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest'; CREATE DATABASE openstack_citest WITH OWNER openstack_citest; EOF # User/group postgres needs to be given access to tmp_dir setfacl -m g:postgres:rwx $tmp_dir sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql fi } function _install_agent_deps { echo_summary "Installing agent dependencies" ENABLED_SERVICES=q-agt,q-dhcp,q-l3 source $DEVSTACK_PATH/lib/neutron install_neutron_agent_packages } # Set up the rootwrap sudoers for neutron to target the rootwrap # configuration deployed in the venv. function _install_rootwrap_sudoers { echo_summary "Installing rootwrap sudoers file" PROJECT_VENV=$REPO_BASE/$PROJECT_NAME/.tox/$VENV ROOTWRAP_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap $PROJECT_VENV/etc/neutron/rootwrap.conf *" ROOTWRAP_DAEMON_SUDOER_CMD="$PROJECT_VENV/bin/neutron-rootwrap-daemon $PROJECT_VENV/etc/neutron/rootwrap.conf" TEMPFILE=$(mktemp) SECURE_PATH="$PROJECT_VENV/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" if [[ "$VENV" =~ "dsvm-fullstack" ]]; then SECURE_PATH="$REPO_BASE/$PROJECT_NAME/neutron/tests/fullstack/agents:$SECURE_PATH" fi cat << EOF > $TEMPFILE # A bug in oslo.rootwrap [1] prevents commands executed with 'ip netns # exec' from being automatically qualified with a prefix from # rootwrap's configured exec_dirs. To work around this problem, add # the venv bin path to a user-specific secure_path. # # While it might seem preferable to set a command-specific # secure_path, this would only ensure the correct path for 'ip netns # exec' and the command targeted for execution in the namespace would # not inherit the path. # # 1: https://bugs.launchpad.net/oslo.rootwrap/+bug/1417331 # Defaults:$STACK_USER secure_path="$SECURE_PATH" $STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD $STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD EOF chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE # Name the functional testing rootwrap to ensure that it will be # loaded after the devstack rootwrap (50_stack_sh if present) so # that the functional testing secure_path (a superset of what # devstack expects) will not be overwritten. sudo mv $TEMPFILE /etc/sudoers.d/60-neutron-func-test-rootwrap } function _install_post_devstack { echo_summary "Performing post-devstack installation" _install_databases _install_rootwrap_sudoers if is_ubuntu; then install_package isc-dhcp-client install_package nmap elif is_fedora; then install_package dhclient install_package nmap-ncat elif is_suse; then install_package dhcp-client # NOTE(armax): no harm in allowing 'other' to read and # execute the script. This is required in fullstack # testing and avoids quite a bit of rootwrap pain sudo chmod o+rx /sbin/dhclient-script install_package ncat else exit_distro_not_supported "installing dhclient and ncat packages" fi # Installing python-openvswitch from packages is a stop-gap while # python-openvswitch remains unavailable from pypi. This also # requires that sitepackages=True be set in tox.ini to allow the # venv to use the installed package. Once python-openvswitch # becomes available on pypi, this will no longer be required. # # NOTE: the package name 'python-openvswitch' is common across # supported distros. install_package python-openvswitch enable_kernel_bridge_firewall } function _configure_iptables_rules { # For linuxbridge agent fullstack tests we need to add special rules to # iptables for connection of agents to rabbitmq: CHAIN_NAME="openstack-INPUT" sudo iptables -n --list $CHAIN_NAME 1> /dev/null 2>&1 || CHAIN_NAME="INPUT" sudo iptables -I $CHAIN_NAME -s 240.0.0.0/8 -p tcp -m tcp -d 240.0.0.0/8 --dport 5672 -j ACCEPT } function _enable_ipv6 { sudo sysctl -w net.ipv6.conf.all.disable_ipv6=0 } function configure_host_for_func_testing { echo_summary "Configuring host for functional testing" if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then # Installing of the following can be achieved via devstack by # installing neutron, so their installation is conditional to # minimize the work to do on a devstack-configured host. _install_base_deps _install_agent_deps _install_rpc_backend fi _install_post_devstack } _init if [[ "$IS_GATE" != "True" ]]; then if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then _install_databases nopg else configure_host_for_func_testing fi fi if [[ "$VENV" =~ "dsvm-fullstack" ]]; then _enable_ipv6 _configure_iptables_rules # This module only exists on older kernels, built-in otherwise modinfo ip_conntrack_proto_sctp 1> /dev/null 2>&1 && sudo modprobe ip_conntrack_proto_sctp fi echo "Phew, we're done!" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/deploy_rootwrap.sh0000755000175000017500000000407500000000000022101 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -eu if [ "$#" -ne 3 ]; then >&2 echo "Usage: $0 /path/to/neutron /path/to/target/etc /path/to/target/bin Deploy Neutron's rootwrap configuration. Warning: Any existing rootwrap files at the specified etc path will be removed by this script. Optional: set OS_SUDO_TESTING=1 to deploy the filters required by Neutron's functional testing suite." exit 1 fi OS_SUDO_TESTING=${OS_SUDO_TESTING:-0} neutron_path=$1 target_etc_path=$2 target_bin_path=$3 fullstack_path=$neutron_path/neutron/tests/fullstack/agents src_conf_path=${neutron_path}/etc src_conf=${src_conf_path}/rootwrap.conf src_rootwrap_path=${src_conf_path}/neutron/rootwrap.d dst_conf_path=${target_etc_path}/neutron dst_conf=${dst_conf_path}/rootwrap.conf dst_rootwrap_path=${dst_conf_path}/rootwrap.d absolute_neutron_path=$(pwd) if [[ -d "$dst_rootwrap_path" ]]; then rm -rf ${dst_rootwrap_path} fi mkdir -p -m 755 ${dst_rootwrap_path} cp -p ${src_rootwrap_path}/* ${dst_rootwrap_path}/ cp -p ${src_conf} ${dst_conf} sed -i "s:^filters_path=.*$:filters_path=${absolute_neutron_path}/${dst_rootwrap_path}:" ${dst_conf} sed -i "s:^exec_dirs=\(.*\)$:exec_dirs=${target_bin_path},${fullstack_path},\1:" ${dst_conf} if [[ "$OS_SUDO_TESTING" = "1" ]]; then sed -i 's/use_syslog=False/use_syslog=True/g' ${dst_conf} sed -i 's/syslog_log_level=ERROR/syslog_log_level=DEBUG/g' ${dst_conf} sed -i 's/daemon_timeout=600/daemon_timeout=7800/g' ${dst_conf} cp -p ${neutron_path}/neutron/tests/contrib/testing.filters \ ${dst_rootwrap_path}/ fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/download_gerrit_change.py0000755000175000017500000000367700000000000023365 0ustar00coreycorey00000000000000#!/usr/bin/env python3 # Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import click import requests GERRIT_URL = 'https://review.opendev.org/' TIMEOUT = 10 def fetch(change, output_patch=None, url=GERRIT_URL, timeout=TIMEOUT): params = {'download': None} r = requests.get( url='{}/changes/{}/revisions/current/patch'.format(url, change), params=params, timeout=timeout) r.raise_for_status() message_bytes = base64.b64decode(r.text) if output_patch and output_patch != '-': with open(output_patch, 'wb') as output_fd: output_fd.write(message_bytes) return str(message_bytes, 'utf-8') @click.command() @click.argument('gerrit_change', nargs=1, type=click.INT) @click.option('-o', '--output_patch', help='Output patch file [default: stdout]') @click.option('-g', '--gerrit_url', default=GERRIT_URL, show_default=True, help='The url to Gerrit server') @click.option('-t', '--timeout', default=TIMEOUT, show_default=True, type=click.INT, help='Timeout, in seconds') def cli(gerrit_change, output_patch, gerrit_url, timeout): message = fetch(gerrit_change, output_patch, gerrit_url, timeout) if not output_patch or output_patch == '-': print(message) if __name__ == '__main__': cli() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/files_in_patch.py0000755000175000017500000000471400000000000021635 0ustar00coreycorey00000000000000#!/usr/bin/env python3 # Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import sys file_names = set() def parse_input(input_file): global file_names while True: line_buffer = input_file.readline() if not line_buffer: break line_match = re.search(r"^\s*---\s+([^\s@]+)[\s@]+", line_buffer) if not line_match: line_match = re.search(r"^\s*\+\+\+\s+([^\s@]+)[\s@]+", line_buffer) if line_match: curr_file_name = line_match.group(1) # trim off 'a/' and 'b/' that you will normally see in git output # if len(curr_file_name) > 2 and curr_file_name[1] == '/' and ( curr_file_name[0] == 'a' or curr_file_name[0] == 'b'): curr_file_name = curr_file_name[2:] file_names.add(curr_file_name) def prune_unwanted_names(): global file_names unwanted_names = set(['/dev/null']) for curr_file_name in file_names: # ignore files that end in '.orig' as long as non-.orig exists line_match = re.search(r"^(.+)\.[oO][Rr][iI][gG]$", curr_file_name) if line_match and line_match.group(1) in file_names: unwanted_names.add(curr_file_name) continue file_names -= unwanted_names def print_file_names(): for name in sorted(file_names): print(name) if __name__ == '__main__': if len(sys.argv) == 1: parse_input(sys.stdin) else: for curr_input_name in sys.argv[1:]: try: with open(curr_input_name, 'r') as curr_input_file: parse_input(curr_input_file) except IOError as e_str: sys.stderr.write( "Cannot open {}: {}\n".format(curr_input_name, e_str)) sys.exit(255) prune_unwanted_names() print_file_names() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/generate_config_file_samples.sh0000755000175000017500000000143000000000000024502 0ustar00coreycorey00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/generate_dhclient_script_for_fullstack.sh0000755000175000017500000000160000000000000026605 0ustar00coreycorey00000000000000#!/bin/bash MAKE_RESOLV_CONF_FUNCTION=make_resolv_conf USAGE="$0 The script takes existing dhclient-script and makes $MAKE_RESOLV_CONF_FUNCTION function a noop function. " if [ $# -lt 1 ]; then echo "Path to virtual environment directory is a required parameter." echo $USAGE exit 2 fi VENV_DIR=$1 DHCLIENT_SCRIPT_NAME=dhclient-script DHCLIENT_PATH=$(which $DHCLIENT_SCRIPT_NAME) FULLSTACK_DHCLIENT_SCRIPT=$VENV_DIR/bin/fullstack-dhclient-script if [ -n "$DHCLIENT_PATH" ]; then # Return from make_resolv_conf function immediately. This will cause # that /etc/resolv.conf will not be updated by fake fullstack machines. sed "/^$MAKE_RESOLV_CONF_FUNCTION()/a\ return" $DHCLIENT_PATH > $FULLSTACK_DHCLIENT_SCRIPT chmod +x $FULLSTACK_DHCLIENT_SCRIPT else echo "$DHCLIENT_SCRIPT_NAME not found." exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/install_venv.py0000644000175000017500000000464200000000000021367 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ . .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): if 'tools_path' in os.environ: root = os.environ['tools_path'] else: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if 'venv' in os.environ: venv = os.environ['venv'] else: venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/install_venv_common.py0000644000175000017500000001350700000000000022737 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/list_moved_globals.py0000755000175000017500000000235500000000000022535 0ustar00coreycorey00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Check for globals that are now available in neutron-lib """ from __future__ import print_function def check_globals(things, nmod, lmod): core = vars(nmod)['_mg__my_globals'] lib = vars(lmod) moved_things = [] for thing in core: if thing.startswith('__') or thing == '_': continue if thing in lib: moved_things.append(thing) if moved_things: print("\nThese %s have moved to neutron-lib:" % things) for moved_thing in sorted(moved_things): print(" %s" % moved_thing) def main(): """Currently no globals are deprecated.""" if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/migrate_names.py0000755000175000017500000000751500000000000021503 0ustar00coreycorey00000000000000#!/usr/bin/env python3 # Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple import contextlib import os import re import sys import click import download_gerrit_change root_dir = os.path.dirname(os.path.realpath(__file__)) Migration = namedtuple('Migration', 'from_repo to_repo') def read_mapfile(mapfile): dirmaps = [] with open(mapfile, 'r') as mapfile_fd: for line_buffer in mapfile_fd.readlines(): # ignore empty lines and anything after # line_match = re.search("^([^#]+)", line_buffer.strip()) if not line_match: continue line_buffer = line_match.group(1) # look for tuple of 2 elements line_match = re.search(r"^([^\s]+)\s+(.+)", line_buffer.strip()) if not line_match: continue ovn_match, neutron_match = line_match.group(1), line_match.group(2) dirmaps.append(Migration(neutron_match, ovn_match)) return dirmaps def parse_input(dirmaps, patch_content, output_fd): for line_buffer in patch_content.splitlines(): # locate markers in patch file for filenames and see if they need # to me renamed based on dirmaps filename_replaced = False line_match = re.search(r"^\s*---\s+([^\s@]+)[\s@]*", line_buffer) if not line_match: line_match = re.search(r"^\s*\+\+\+\s+([^\s@]+)[\s@]*", line_buffer) if line_match: for old, new in dirmaps: new_line_buffer = line_buffer.replace(old, new) if new_line_buffer != line_buffer: filename_replaced = True output_fd.write("{}\n".format(new_line_buffer)) break if not filename_replaced: output_fd.write("{}\n".format(line_buffer)) @contextlib.contextmanager def open_output(filename=None): if filename and filename != '-': fh = open(filename, 'w') else: fh = sys.stdout try: yield fh finally: if fh is not sys.stdout: fh.close() @click.command() @click.option('-i', '--input_patch', prompt='Input patch file or gerrit id', help='input_patch patch file or gerrit change') @click.option('-o', '--output_patch', default='-', help='Output patch file. Default: stdout') @click.option('-m', '--mapfile', default=os.path.join(root_dir, 'migrate_names.txt'), show_default=True, type=click.Path(), help='Data file that specifies mapping to be applied to input') @click.option('--reverse/--no-reverse', default=False, help='Map filenames from networking-ovn to Neutron repo') def cli(input_patch, output_patch, mapfile, reverse): dirmaps = read_mapfile(mapfile) if reverse: dirmaps = [Migration(two, one) for one, two in dirmaps] if os.path.isfile(input_patch): with open(input_patch, 'r') as input_fd: patch_content = ''.join(input_fd.readlines()) else: patch_content = download_gerrit_change.fetch(input_patch) with open_output(output_patch) as output_fd: parse_input(dirmaps, patch_content, output_fd) if __name__ == '__main__': cli() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/migrate_names.txt0000644000175000017500000000636400000000000021670 0ustar00coreycorey00000000000000# This file provides a list of tuples that represent how the files # in Networking-OVN repo are mapped to/from Neutron repo, as part of the blue # print documented in: # # https://review.opendev.org/#/c/658414/ (specs/ussuri/ml2ovs-ovn-convergence.rst) # # Also see: # https://ethercalc.openstack.org/networking-ovn-migration # https://review.opendev.org/#/q/topic:bp/neutron-ovn-merge+-is:abandoned # # Empty lines and anything after # are ignored. # The 2 columns in this tile are added as a tuple in a list of # files and directories to be mapped. More specific lines must be listed # above less specific lines as the mapping stops on the first match. # # Networking-OVN Neutron devstack/lib/ovn devstack/lib/ovn_agent networking_ovn/ovn_db_sync.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py networking_ovn/ovsdb/commands.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py networking_ovn/ovsdb/impl_idl_ovn.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py networking_ovn/ovsdb/ovn_api.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/api.py networking_ovn/ovsdb/ovsdb_monitor.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py networking_ovn/ovsdb/worker.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py networking_ovn/agent/metadata neutron/agent/ovn/metadata networking_ovn/cmd neutron/cmd networking_ovn/common/config.py neutron/conf/ovn.py networking_ovn/common/acl.py neutron/plugins/ml2/drivers/ovn/common/acl.py networking_ovn/common/constants.py neutron/common/ovn/constants.py networking_ovn/common/exceptions.py neutron/common/ovn/exceptions.py networking_ovn/common/hash_ring_manager.py neutron/common/ovn/hash_ring_manager.py networking_ovn/common/maintanance.py neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py networking_ovn/common/ovn_client.py neutron/plugins/ml2/drivers/ovn/common/client.py networking_ovn/common/utils.py neutron/common/ovn/utils.py networking_ovn/conf/agent/metadata neutron/conf/agent/ovn/metadata.py networking_ovn/db neutron/db networking_ovn/ml2/mech_driver.py neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py networking_ovn/ml2/qos_driver.py neutron/services/qos/drivers/ovn/driver.py networking_ovn/ml2/trunk_driver.py neutron/services/trunk/drivers/ovn/trunk_driver.py networking_ovn/l3/l3_ovn.py neutron/services/ovn_l3/l3_ovn.py networking_ovn/l3/l3_ovn_scheduler.py neutron/scheduler/ovn_l3_scheduler.py networking_ovn/tests/unit/ml2/test_mech_driver.py neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py networking_ovn/tests/unit/ml2/test_qos_driver.py neutron/tests/unit/services/qos/drivers/ovn/test_driver.py networking_ovn/tests/unit/ml2/test_trunk_driver.py neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py networking_ovn/tests neutron/tests networking_ovn/common/extensions.py neutron/extensions/ovn.py networking_ovn/tests/unit/fakes.py neutron/tests/unit/fake_resources.py migration tools/migration_to_ovn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/misc-sanity-checks.sh0000755000175000017500000000334300000000000022343 0ustar00coreycorey00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 export TMPDIR trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_no_symlinks_allowed () { # Symlinks break the package build process, so ensure that they # do not slip in, except hidden symlinks. if [ $(find . -type l ! -path '*/.*' | wc -l) -ge 1 ]; then echo "Symlinks are not allowed!" >>$FAILURES fi } check_pot_files_errors () { # The job neutron-propose-translation-update does not update from # transifex since our po files contain duplicate entries where # obsolete entries duplicate normal entries. Prevent obsolete # entries to slip in find neutron -type f -regex '.*\.pot?' \ -print0|xargs -0 -n 1 msgfmt --check-format \ -o /dev/null if [ "$?" -ne 0 ]; then echo "PO files syntax is not correct!" >>$FAILURES fi } # Add your checks here... check_no_symlinks_allowed check_pot_files_errors # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/0000755000175000017500000000000000000000000021156 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/README.rst0000644000175000017500000000265000000000000022650 0ustar00coreycorey00000000000000Migration from ML2/OVS to ML2/OVN ================================= Proof-of-concept ansible script for migrating an OpenStack deployment that uses ML2/OVS to OVN. If you have a tripleo ML2/OVS deployment then please see the folder ``tripleo_environment`` Prerequisites: 1. Ansible 2.2 or greater. 2. ML2/OVS must be using the OVS firewall driver. To use: 1. Create an ansible inventory with the expected set of groups and variables as indicated by the hosts-sample file. 2. Run the playbook:: $ ansible-playbook migrate-to-ovn.yml -i hosts Testing Status: - Tested on an RDO cloud on CentOS 7.3 based on Ocata. - The cloud had 3 controller nodes and 6 compute nodes. - Observed network downtime was 10 seconds. - The "--forks 10" option was used with ansible-playbook to ensure that commands could be run across the entire environment in parallel. MTU: - If migrating an ML2/OVS deployment using VXLAN tenant networks to an OVN deployment using Geneve for tenant networks, we have an unresolved issue around MTU. The VXLAN overhead is 30 bytes. OVN with Geneve has an overhead of 38 bytes. We need the tenant networks MTU adjusted for OVN and then we need all VMs to receive the updated MTU value through DHCP before the migration can take place. For testing purposes, we've just hacked the Neutron code to indicate that the VXLAN overhead was 38 bytes instead of 30, bypassing the issue at migration time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/hosts.sample0000644000175000017500000000232000000000000023516 0ustar00coreycorey00000000000000# All controller nodes running OpenStack control services, particularly # neutron-api. Also indicate which controller you'd like to have run # the OVN central control services. [controller] overcloud-controller-0 ovn_central=true overcloud-controller-1 overcloud-controller-2 # All compute nodes. We will replace the openvswitch agent # with ovn-controller on these nodes. # # The ovn_encap_ip variable should be filled in with the IP # address that other compute hosts should use as the tunnel # endpoint for tunnels to that host. [compute] overcloud-novacompute-0 ovn_encap_ip=192.0.2.10 overcloud-novacompute-1 ovn_encap_ip=192.0.2.11 overcloud-novacompute-2 ovn_encap_ip=192.0.2.12 overcloud-novacompute-3 ovn_encap_ip=192.0.2.13 overcloud-novacompute-4 ovn_encap_ip=192.0.2.14 overcloud-novacompute-5 ovn_encap_ip=192.0.2.15 # Configure bridge mappings to be used on compute hosts. [compute:vars] ovn_bridge_mappings=net1:br-em1 is_compute_node=true [overcloud:children] controller compute # Fill in "ovn_db_ip" with an IP address on a management network # that the controller and compute nodes should reach. This address # should not be reachable otherwise. [overcloud:vars] ovn_db_ip=192.0.2.50 remote_user=heat-admin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/0000755000175000017500000000000000000000000022750 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/0000755000175000017500000000000000000000000027215 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst0000644000175000017500000000265000000000000030707 0ustar00coreycorey00000000000000Infrared plugin to carry out migration from ML2/OVS to OVN ========================================================== This is an infrared plugin which can be used to carry out the migration from ML2/OVS to OVN if the tripleo was deployed using infrared. See http://infrared.readthedocs.io/en/stable/index.html for more information. Before using this plugin, first deploy an ML2/OVS overcloud and then: 1. On your undercloud, install python-neutron-ovn-migration-tool package (https://trunk.rdoproject.org/centos7-master/current/) You also need to install python-neutron and python3-openvswitch packages. 2. Run :: $infrared plugin add "https://github.com/openstack/neutron.git" 3. Start migration by running:: $infrared tripleo-ovn-migration --version 13|14 \ --registry-namespace \ --registry-tag \ --registry-prefix Using this as a standalone playbook for tripleo deployments =========================================================== It is also possible to use the playbook main.yml with tripleo deployments. In order to use this: 1. Create hosts inventory file like below [undercloud] undercloud_ip ansible_ssh_user=stack 2. Run the playbook as: ansible-playbook main.yml -i hosts -e install_from_package=True -e registry_prefix=centos-binary -e registry_namespace=docker.io/tripleomaster -e registry_localnamespace=192.168.24.1:8787/tripleomaster -e registry_tag=current-tripleo-rdo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml0000644000175000017500000001667500000000000030703 0ustar00coreycorey00000000000000# Playbook which preps migration and then invokes the migration script. - name: Install migration tool hosts: undercloud become: true tasks: - name: Install python 3 virtualenv and neutron ovn migration tool yum: name: - python3-virtualenv - python3-neutron-ovn-migration-tool state: present - name: Set host_key_checking to False in ansible.cfg ini_file: path=/etc/ansible/ansible.cfg section=defaults option=host_key_checking value=False ignore_errors: yes - name: Prepare for migration hosts: undercloud tasks: - name: Set ovn migration working dir set_fact: ovn_migration_working_dir: /home/stack/ovn_migration - name: Delete temp file directory if present file: state: absent path: "{{ ovn_migration_working_dir }}" - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_working_dir }}" - name: Set the docker registry information block: - name: Get the docker registry info (infrared deployment) block: - name: Set is_infrard deployment set_fact: is_infrared: True - name: Save the docker reg set_fact: container_image_prepare: namespace: "{{ install.get('registry', {}).namespace|default(False)|ternary(install.get('registry', {}).namespace, install.get('registry', {}).mirror + '/' + 'rhosp' + install.version) }}" prefix: "{{ install.registry.prefix|default('openstack') }}" tag: "{{ install.registry.tag|default('') }}" local_namespace: "{{ install.registry.local|default('') }}" is_dvr: "{{ install.dvr }}" when: - install is defined - name: Get the docker registry info (tripleo deployment) block: - name: Set is_infrard deployment set_fact: is_infrared: False - name: Save the docker reg set_fact: container_image_prepare: namespace: "{{ registry_namespace }}" local_namespace: "{{ registry_localnamespace }}" prefix: "{{ registry_prefix }}" tag: "{{ registry_tag }}" is_dvr: "{{ dvr }}" when: - install is not defined - name: Prepare for migration include_role: name: prepare-migration vars: infrared_deployment: "{{ is_infrared }}" registry_namespace: "{{ container_image_prepare['namespace'] }}" image_prefix: "{{ container_image_prepare['prefix'] }}" image_tag: "{{ container_image_prepare['tag'] }}" local_namespace: "{{ container_image_prepare['local_namespace'] }}" is_dvr: "{{ container_image_prepare['is_dvr'] }}" - name: Boot few VMs to measure downtime hosts: undercloud tasks: - name: Check if need to create resources block: - name: Set create_vms (infrared) set_fact: create_vms: "{{ install.create_resources }}" when: - install is defined - name: Set create_vms (tripleo deployment) set_fact: create_vms: "{{ create_resources }}" when: - install is not defined - name: Create few resources block: - name: Set the public network name (infrared deployment) set_fact: public_net: "{{ install.external_network }}" when: install is defined - name: Set the public network name (Tripleo deployment) set_fact: public_net: "{{ external_network }}" when: install is not defined - name: Set the image name (infrared deployment) set_fact: image_to_boot: "{{ install.image_name }}" when: install is defined - name: Set the image name(Tripleo deployment) set_fact: image_to_boot: "{{ image_name }}" when: install is not defined - name: Create resources include_role: name: create-resources vars: public_network_name: "{{ public_net }}" image_name: "{{ image_to_boot }}" ovn_migration_temp_dir: /home/stack/ovn_migration overcloudrc: /home/stack/overcloudrc when: - create_vms|bool - name: Kick start the migration hosts: undercloud tasks: #TODO: Get the working dir from the param - name: Starting migration block block: - name: Set ovn migration working dir set_fact: ovn_migration_working_dir: /home/stack/ovn_migration - name: Copy the playbook files into ovn_migration working dir command: cp -rf /usr/share/ansible/neutron-ovn-migration/playbooks {{ ovn_migration_working_dir }} - name: Set the public network name (infrared deployment) set_fact: public_network: "{{ install.external_network }}" when: install is defined - name: Set the public network name (Tripleo deployment) set_fact: public_network: "{{ external_network }}" when: install is not defined - name: Create ovn migration script template: src: templates/start-ovn-migration.sh.j2 dest: "{{ ovn_migration_working_dir }}/start-ovn-migration.sh" mode: 0755 - name: Generate inventory file for ovn migration shell: set -o pipefail && {{ ovn_migration_working_dir }}/start-ovn-migration.sh generate-inventory 2>&1 > {{ ovn_migration_working_dir}}/generate-inventory.log - name: Set MTU T1 shell: set -o pipefail && {{ ovn_migration_working_dir }}/start-ovn-migration.sh setup-mtu-t1 2>&1 > {{ ovn_migration_working_dir}}/setup-mtu-t1.log - name: Reduce mtu of the pre migration networks shell: set -o pipefail && {{ ovn_migration_working_dir }}/start-ovn-migration.sh reduce-mtu 2>&1 > {{ ovn_migration_working_dir}}/reduce-mtu.log - name: Start the migration process shell: set -o pipefail && {{ ovn_migration_working_dir }}/start-ovn-migration.sh start-migration 2>&1 > {{ ovn_migration_working_dir}}/start-ovn-migration.sh.log - name: Stop pinger if started shell: echo "exit" > {{ ovn_migration_working_dir}}/_pinger_cmd.txt always: - name: Fetch ovn_migration log directory synchronize: src: "{{ ovn_migration_working_dir }}" dest: "{{ inventory_dir }}" mode: pull when: install is defined ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/0000755000175000017500000000000000000000000030341 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000755000175000017500000000000000000000000033614 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/defaults/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000755000175000017500000000000000000000000033614 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000644000175000017500000000043700000000000033622 0ustar00coreycorey00000000000000--- public_network_name: "{{ public_network_name }}" create_resource_script: create-resources.sh.j2 ovn_migration_temp_dir: "{{ ovn_migration_temp_dir }}" image_name: "{{ image_name }}" server_user_name: "{{ server_user_name }}" overcloudrc: "{{ overcloudrc }}" resource_suffix: pinger ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/tasks/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000755000175000017500000000000000000000000033614 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000644000175000017500000000162300000000000033620 0ustar00coreycorey00000000000000- name: Delete temp file directory if present file: state: absent path: "{{ ovn_migration_temp_dir }}" - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_temp_dir }}" - name: Generate resource creation script template: src: create-resources.sh.j2 dest: "{{ ovn_migration_temp_dir }}/create-resources.sh" mode: 0744 - name: Creating pre pre migration resources shell: > set -o pipefail && {{ ovn_migration_temp_dir }}/create-resources.sh 2>&1 > {{ ovn_migration_temp_dir }}/create-resources.sh.log changed_when: true - name: Generate pinger script template: src: start-pinger.sh.j2 dest: "{{ ovn_migration_temp_dir }}/start-pinger.sh" mode: 0744 - name: Start pinger in background shell: > nohup {{ ovn_migration_temp_dir }}/start-pinger.sh
/dev/null 2>&1 & changed_when: False ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/templates/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000755000175000017500000000000000000000000033614 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/templates/create-resources.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000644000175000017500000001170300000000000033620 0ustar00coreycorey00000000000000#!/bin/bash set -x source {{ overcloudrc }} image_name={{ image_name }} openstack image show $image_name if [ "$?" != "0" ] then if [ ! -f cirros-0.4.0-x86_64-disk.img ] then curl -Lo cirros-0.4.0-x86_64-disk.img https://github.com/cirros-dev/cirros/releases/download/0.4.0/cirros-0.4.0-x86_64-disk.img fi openstack image create "cirros-ovn-migration-{{ resource_suffix }}" --file cirros-0.4.0-x86_64-disk.img \ --disk-format qcow2 --container-format bare --public image_name="cirros-ovn-migration-{{ resource_suffix }}" fi openstack flavor create ovn-migration-{{ resource_suffix }} --ram 1024 --disk 1 --vcpus 1 openstack keypair create ovn-migration-{{ resource_suffix }} --private-key {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key openstack security group create ovn-migration-sg-{{ resource_suffix }} openstack security group rule create --ingress --protocol icmp ovn-migration-sg-{{ resource_suffix }} openstack security group rule create --ingress --protocol tcp --dst-port 22 ovn-migration-sg-{{ resource_suffix }} openstack network create ovn-migration-net-{{ resource_suffix }} neutron net-update ovn-migration-net-{{ resource_suffix }} --mtu 1442 openstack subnet create --network ovn-migration-net-{{ resource_suffix }} --subnet-range 172.168.168.0/24 ovn-migration-subnet-{{ resource_suffix }} num_hypervisors=`openstack hypervisor stats show | grep count | awk '{print $4}'` openstack server create --flavor ovn-migration-{{ resource_suffix }} --image $image_name \ --key-name ovn-migration-{{ resource_suffix }} \ --nic net-id=ovn-migration-net-{{ resource_suffix }} \ --security-group ovn-migration-sg-{{ resource_suffix }} \ --min $num_hypervisors --max $num_hypervisors \ ovn-migration-server-{{ resource_suffix }} openstack router create ovn-migration-router-{{ resource_suffix }} openstack router set --external-gateway {{ public_network_name }} ovn-migration-router-{{ resource_suffix }} openstack router add subnet ovn-migration-router-{{ resource_suffix }} ovn-migration-subnet-{{ resource_suffix }} for i in $(seq 1 $num_hypervisors) do num_attempts=0 while true do openstack server show ovn-migration-server-{{ resource_suffix }}-$i -c status | grep ACTIVE if [ "$?" == "0" ]; then break fi sleep 5 num_attempts=$((num_attempts+1)) if [ $num_attempts -gt 24 ] then echo "VM is not up even after 2 minutes. Something is wrong" exit 1 fi done vm_ip=`openstack server show ovn-migration-server-{{ resource_suffix }}-$i -c addresses | grep addresses | awk '{ split($4, ip, "="); print ip[2]}'` port_id=`openstack port list | grep $vm_ip | awk '{print $2}'` # Wait till the port is ACTIVE echo "Wait till the port is ACTIVE" port_status=`openstack port show $port_id -c status | grep status | awk '{print $4}'` num_attempts=0 while [ "$port_status" != "ACTIVE" ] do num_attempts=$((num_attempts+1)) sleep 5 port_status=`openstack port show $port_id -c status | grep status | awk '{print $4}'` echo "Port status = $port_status" if [ $num_attempts -gt 24 ] then echo "Port is not up even after 2 minutes. Something is wrong" exit 1 fi done echo "VM is up and the port is ACTIVE" server_ip=`openstack floating ip create --port $port_id \ {{ public_network_name }} -c floating_ip_address | grep floating_ip_address \ | awk '{print $4'}` echo $server_ip >> {{ ovn_migration_temp_dir }}/server_fips # Wait till the VM allows ssh connections vm_status="down" num_attempts=0 while [ "$vm_status" != "up" ] do num_attempts=$((num_attempts+1)) sleep 5 openstack console log show ovn-migration-server-{{ resource_suffix }}-$i | grep "login:" if [ "$?" == "0" ] then vm_status="up" else if [ $num_attempts -gt 60 ] then echo "VM is not up with login prompt even after 5 minutes. Something is wrong." # Even though something seems wrong, lets try and ping. break fi fi done done chmod 0600 {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key for server_ip in `cat {{ ovn_migration_temp_dir }}/server_fips` do num_attempts=0 vm_reachable="false" while [ "$vm_reachable" != "true" ] do num_attempts=$((num_attempts+1)) sleep 1 ping -c 3 $server_ip if [ "$?" == "0" ] then vm_reachable="true" else if [ $num_attempts -gt 60 ] then echo "VM is not pingable. Something is wrong." exit 1 fi fi done ssh -i {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null cirros@$server_ip date done echo "Done with the resource creation : exiting" exit 0 ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/templates/start-pinger.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/create-resources/0000644000175000017500000000236300000000000033622 0ustar00coreycorey00000000000000#!/bin/bash set -x echo "creating virtualenv in {{ ovn_migration_temp_dir }}/pinger_venv" virtualenv {{ ovn_migration_temp_dir }}/pinger_venv source {{ ovn_migration_temp_dir }}/pinger_venv/bin/activate pip install --upgrade pip pip install sh cat > {{ ovn_migration_temp_dir }}/pinger.py <<-EOF import sh import sys import time def main(ips): run_cmds = [] for ip in ips: ip_out_file = "{{ ovn_migration_temp_dir }}/" + ip.replace('.', '_') + '_ping.out' run_cmds.append(sh.ping('-i', '1', ip, _out=ip_out_file, _bg=True)) if not run_cmds: return while True: try: cmd_file = open("{{ ovn_migration_temp_dir }}/_pinger_cmd.txt", "r") cmd = cmd_file.readline() if cmd.startswith("exit"): break cmd_file.close() except IOError: time.sleep(3) continue for p in run_cmds: p.signal(2) p.wait() if __name__ == '__main__': main(sys.argv[1:]) EOF pinger_ips="" for ip in `cat {{ ovn_migration_temp_dir }}/server_fips` do pinger_ips="$pinger_ips $ip" done echo "pinger ips = $pinger_ips" echo "calling pinger.py" python {{ ovn_migration_temp_dir }}/pinger.py $pinger_ips echo "Exiting..." ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/ 28 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration0000755000175000017500000000000000000000000033707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/defaults/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration0000755000175000017500000000000000000000000033707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration0000644000175000017500000000027100000000000033711 0ustar00coreycorey00000000000000--- infrared_deployment: False registry_namespace: docker.io/tripleomaster local_namespace: 192.168.24.1:8787/tripleomaster image_tag: current-tripleo-rdo image_prefix: centos-binary- ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/tasks/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration0000755000175000017500000000000000000000000033707 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/roles/prepare-migration0000644000175000017500000001426400000000000033720 0ustar00coreycorey00000000000000- name: Copy overcloud deploy script to overcloud-deploy-ovn.sh block: - name: Check if overcloud_deploy.sh is present or not stat: path: ~/overcloud_deploy.sh register: deploy_file - name: Set the ml2ovs overcloud deploy script file name set_fact: overcloud_deploy_script: '~/overcloud_deploy.sh' when: deploy_file.stat.exists|bool - name: Check if overcloud-deploy.sh is present stat: path: ~/overcloud-deploy.sh register: deploy_file_2 when: not deploy_file.stat.exists|bool - name: Set the ml2ovs overcloud deploy script file name set_fact: overcloud_deploy_script: '~/overcloud-deploy.sh' when: - not deploy_file.stat.exists|bool - deploy_file_2.stat.exists|bool - name: Copy overcloud deploy script to overcloud-deploy-ovn.sh command: cp -f {{ overcloud_deploy_script }} ~/overcloud-deploy-ovn.sh when: infrared_deployment|bool - name: set overcloud deploy ovn script set_fact: overcloud_deploy_ovn_script: '~/overcloud-deploy-ovn.sh' - name: Set docker images environment file set_fact: output_env_file: /home/stack/docker-images-ovn.yaml - name: Get the proper neutron-ovn-ha.yaml path stat: path: /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-ha.yaml register: ovn_env_path - name: Set the neutron-ovn-dvr-ha.yaml file path if dvr set_fact: neutron_ovn_env_path: /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-dvr-ha.yaml when: is_dvr|bool - name: Set the neutron-ovn-ha.yaml file path if not dvr set_fact: neutron_ovn_env_path: /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-ha.yaml when: not is_dvr|bool - name: Construct overcloud-deploy-ovn.sh script for infrared deployments lineinfile: dest: "{{ overcloud_deploy_ovn_script }}" line: "{{ item }} \\" insertbefore: "^--log-file.*" with_items: - "-e {{ neutron_ovn_env_path }}" - "-e /home/stack/ovn-extras.yaml" - "-e {{ output_env_file }}" when: - infrared_deployment|bool - name: Construct overcloud-deploy-ovn.sh script for tripleo deployments template: src: templates/overcloud-deploy-ovn.sh.j2 dest: ~/overcloud-deploy-ovn.sh mode: 0744 when: - not infrared_deployment|bool - name: Set image tag (infrared deployment) block: - name: Get puddle version shell: cat containers-prepare-parameter.yaml | grep -v _tag | grep tag | awk '{print $2}' ignore_errors: True register: core_puddle_version - name: Set image tag from puddle version set_fact: docker_image_tag: "{{ core_puddle_version.stdout }}" - name: Get registry namespace shell: cat containers-prepare-parameter.yaml | grep -v _namespace | grep namespace | awk '{print $2}' ignore_errors: True register: reg_ns - name: Set registry namespace set_fact: reg_namespace: "{{ reg_ns.stdout }}" - debug: msg: "{{ core_puddle_version.stdout }}" - debug: msg: "{{ docker_image_tag }}" - debug: msg: "{{ reg_namespace }}" when: infrared_deployment|bool - name: Set image tag (tripleo deployment) set_fact: docker_image_tag: "{{ image_tag }}" when: - not infrared_deployment|bool - name: Generate ovn container images shell: | echo "container_images:" > ~/ovn_container_images.yaml args: creates: ~/ovn_container_images.yaml - name: Add ovn container images to ovn_container_images.yaml lineinfile: dest: ~/ovn_container_images.yaml line: "- imagename: {{ reg_namespace }}/{{ image_prefix }}-{{ item }}:{{ docker_image_tag }}" with_items: - "ovn-northd" - "ovn-controller" - "neutron-server-ovn" - "neutron-metadata-agent-ovn" - name: Generate docker images environment file shell: | echo "parameter_defaults:" > ~/docker-images-ovn.yaml changed_when: False - name: Set the local namespace block: - name: Extract the local namespace shell: | set -exo pipefail source ~/stackrc openstack overcloud plan export overcloud mkdir -p /tmp/oc_plan mv overcloud.tar.gz /tmp/oc_plan/ cd /tmp/oc_plan tar xvf overcloud.tar.gz reg=`cat /tmp/oc_plan/environments/containers-default-parameters.yaml | grep ContainerNeutronApiImage | awk '{ split($2, image , "/"); print image[1] }'` namespace=`cat /tmp/oc_plan/environments/containers-default-parameters.yaml | grep ContainerNeutronApiImage | awk '{ split($2, image , "/"); print image[2] }'` echo $reg/$namespace > /tmp/_reg_namespace rm -rf /tmp/oc_plan - name: Get the local namespace command: cat /tmp/_reg_namespace register: local_ns - name: Set the local registry set_fact: local_registry: "{{ local_ns.stdout }}" when: - local_namespace == '' - name: Set the local namespace set_fact: local_registry: "{{ local_namespace }}" when: - local_namespace != '' - name: Add ovn container images to docker images environment file lineinfile: dest: ~/docker-images-ovn.yaml line: " {{ item.name }}: {{ local_registry }}/{{ image_prefix }}-{{ item.image_name }}:{{ docker_image_tag }}" with_items: - { name: ContainerNeutronApiImage, image_name: neutron-server-ovn} - { name: ContainerNeutronConfigImage, image_name: neutron-server-ovn} - { name: ContainerOvnMetadataImage, image_name: neutron-metadata-agent-ovn} - { name: ContainerOvnControllerImage, image_name: ovn-controller} - { name: ContainerOvnControllerConfigImage, image_name: ovn-controller} - { name: ContainerOvnDbsImage, image_name: ovn-northd} - { name: ContainerOvnDbsConfigImage, image_name: ovn-northd} - name: Upload the ovn container images to the local registry shell: | source /home/stack/stackrc openstack tripleo container image prepare --environment-file /home/stack/containers-prepare-parameter.yaml become: yes changed_when: False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/templates/0000755000175000017500000000000000000000000031213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/templates/start-ovn-migration.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/infrared/tripleo-ovn-migration/templates/start-ovn-mig0000644000175000017500000000024600000000000033647 0ustar00coreycorey00000000000000#!/bin/bash export PUBLIC_NETWORK_NAME={{ public_network }} # TODO: Get this from the var export OPT_WORKDIR=/home/stack/ovn_migration /usr/bin/ovn_migration.sh $1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/migrate-to-ovn.yml0000644000175000017500000002111600000000000024552 0ustar00coreycorey00000000000000# Migrate a Neutron deployment using ML2/OVS to OVN. # # See hosts-sample for expected contents of the ansible inventory. --- - hosts: compute remote_user: "{{ remote_user }}" become: true tasks: - name: Ensure OVN packages are installed on compute nodes. yum: name: openvswitch-ovn-host state: present # TODO to make ansible-lint happy, all of these commands should be conditionally run # only if the config value needs to be changed. - name: Configure ovn-encap-type. command: "ovs-vsctl set open . external_ids:ovn-encap-type=geneve" changed_when: false - name: Configure ovn-encap-ip. command: "ovs-vsctl set open . external_ids:ovn-encap-ip={{ ovn_encap_ip }}" changed_when: false - name: Configure ovn-remote. command: "ovs-vsctl set open . external_ids:ovn-remote=tcp:{{ ovn_db_ip }}:6642" changed_when: false # TODO We could discover the appropriate value for ovn-bridge-mappings based on # the openvswitch agent configuration instead of requiring it to be configured # in the inventory. - name: Configure ovn-bridge-mappings. command: "ovs-vsctl set open . external_ids:ovn-bridge-mappings={{ ovn_bridge_mappings }}" changed_when: false - name: Get hostname command: hostname -f register: hostname check_mode: no changed_when: false - name: Set host name command: "ovs-vsctl set Open_vSwitch . external-ids:hostname={{ hostname.stdout }}" changed_when: false # TODO ansible has an "iptables" module, but it does not allow you specify a "rule number" # which we require here. - name: Open Geneve UDP port for tunneling. command: iptables -I INPUT 10 -m state --state NEW -p udp --dport 6081 -j ACCEPT changed_when: false - name: Persist our iptables changes after a reboot shell: iptables-save > /etc/sysconfig/iptables.save args: creates: /etc/sysconfig/iptables.save # TODO Remove this once the metadata API is supported. # https://bugs.launchpad.net/networking-ovn/+bug/1562132 - name: Force config drive until the metadata API is supported. ini_file: dest: /etc/nova/nova.conf section: DEFAULT option: force_config_drive value: true - name: Restart nova-compute service to reflect force_config_drive value. systemd: name: openstack-nova-compute state: restarted enabled: yes - hosts: controller remote_user: "{{ remote_user }}" become: true tasks: - name: Ensure OVN packages are installed on the central OVN host. when: ovn_central is defined yum: name: openvswitch-ovn-central state: present # TODO Set up SSL for OVN databases # TODO ansible has an "iptables" module, but it does not allow you specify a "rule number" # which we require here. - name: Open OVN database ports. command: "iptables -I INPUT 10 -m state --state NEW -p tcp --dport {{ item }} -j ACCEPT" with_items: [ 6641, 6642 ] changed_when: False - name: Persist our iptables changes after a reboot shell: iptables-save > /etc/sysconfig/iptables.save args: creates: /etc/sysconfig/iptables.save # TODO Integrate HA support for the OVN control services. - name: Start ovn-northd and the OVN databases. when: ovn_central is defined systemd: name: ovn-northd state: started enabled: yes - name: Enable remote access to the northbound database. command: "ovn-nbctl set-connection ptcp:6641:{{ ovn_db_ip }}" when: ovn_central is defined changed_when: False - name: Enable remote access to the southbound database. command: "ovn-sbctl set-connection ptcp:6642:{{ ovn_db_ip }}" when: ovn_central is defined changed_when: False - name: Update Neutron configuration files ini_file: dest={{ item.dest }} section={{ item.section }} option={{ item.option }} value={{ item.value }} with_items: - { dest: '/etc/neutron/neutron.conf', section: 'DEFAULT', option: 'service_plugins', value: 'qos,ovn-router' } - { dest: '/etc/neutron/neutron.conf', section: 'DEFAULT', option: 'notification_drivers', value: 'ovn-qos' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'mechanism_drivers', value: 'ovn' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'type_drivers', value: 'geneve,vxlan,vlan,flat' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2', option: 'tenant_network_types', value: 'geneve' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2_type_geneve', option: 'vni_ranges', value: '1:65536' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ml2_type_geneve', option: 'max_header_size', value: '38' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_nb_connection', value: '"tcp:{{ ovn_db_ip }}:6641"' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_sb_connection', value: '"tcp:{{ ovn_db_ip }}:6642"' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovsdb_connection_timeout', value: '180' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'neutron_sync_mode', value: 'repair' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'ovn_l3_mode', value: 'true' } - { dest: '/etc/neutron/plugins/ml2/ml2_conf.ini', section: 'ovn', option: 'vif_type', value: 'ovs' } - name: Note that API downtime begins now. debug: msg: NEUTRON API DOWNTIME STARTING NOW FOR THIS HOST - name: Shut down neutron-server so that we can begin data sync to OVN. systemd: name: neutron-server state: stopped - hosts: controller remote_user: "{{ remote_user }}" become: true tasks: - name: Sync Neutron state to OVN. when: ovn_central is defined command: neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini - hosts: overcloud remote_user: "{{ remote_user }}" become: true tasks: - name: Note that data plane imact starts now. debug: msg: DATA PLANE IMPACT BEGINS NOW. - name: Stop metadata, DHCP, L3 and openvswitch agent if needed. systemd: name={{ item.name }} state={{ item.state }} enabled=no with_items: - { name: 'neutron-metadata-agent', state: 'stopped' } - { name: 'neutron-dhcp-agent', state: 'stopped' } - { name: 'neutron-l3-agent', state: 'stopped' } - { name: 'neutron-openvswitch-agent', state: 'stopped' } - hosts: compute remote_user: "{{ remote_user }}" become: true tasks: - name: Note that data plane is being restored. debug: msg: DATA PLANE IS NOW BEING RESTORED. - name: Delete br-tun as it is no longer used. command: "ovs-vsctl del-br br-tun" changed_when: false - name: Reset OpenFlow protocol version before ovn-controller takes over. with_items: [ br-int, br-ex ] command: "ovs-vsctl set Bridge {{ item }} protocols=[]" ignore_errors: True changed_when: false - name: Start ovn-controller. systemd: name: ovn-controller state: started enabled: yes - hosts: controller remote_user: "{{ remote_user }}" become: true tasks: # TODO The sync util scheduling gateway routers depends on this patch: # https://review.openstack.org/#/c/427020/ # If the patch is not merged, this command is harmless, but the gateway # routers won't get scheduled until later when neutron-server starts. - name: Schedule gateway routers by running the sync util. when: ovn_central is defined command: neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini changed_when: false - name: Configure node for hosting gateway routers for external connectivity. command: "ovs-vsctl set open . external_ids:ovn-cms-options=enable-chassis-as-gw" changed_when: false - hosts: overcloud remote_user: "{{ remote_user }}" become: true tasks: # TODO Make this smarter so that it only deletes net namespaces that were # # created by neutron. In the simple case, this is fine, but will break # # once containers are in use on the overcloud. - name: Delete network namespaces. command: ip -all netns delete changed_when: false - hosts: controller remote_user: "{{ remote_user }}" become: true tasks: - name: Note that the Neutron API is coming back online. debug: msg: THE NEUTRON API IS NOW BEING RESTORED. - name: Start neutron-server. systemd: name: neutron-server state: started # TODO In our grenade script we had to restart rabbitmq. Is that needed? ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/0000755000175000017500000000000000000000000025260 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/ovn_migration.sh0000644000175000017500000002474700000000000030505 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # With LANG set to everything else than C completely undercipherable errors # like "file not found" and decoding errors will start to appear during scripts # or even ansible modules LANG=C # Complete stackrc file path. : ${STACKRC_FILE:=~/stackrc} # Complete overcloudrc file path. : ${OVERCLOUDRC_FILE:=~/overcloudrc} # overcloud deploy script for OVN migration. : ${OVERCLOUD_OVN_DEPLOY_SCRIPT:=~/overcloud-deploy-ovn.sh} : ${OPT_WORKDIR:=$PWD} : ${PUBLIC_NETWORK_NAME:=public} : ${IMAGE_NAME:=cirros} : ${SERVER_USER_NAME:=cirros} : ${VALIDATE_MIGRATION:=True} : ${DHCP_RENEWAL_TIME:=30} check_for_necessary_files() { if [ ! -e hosts_for_migration ]; then echo "hosts_for_migration ansible inventory file not present" echo "Please run ./ovn_migration.sh generate-inventory" exit 1 fi # Check if the user has generated overcloud-deploy-ovn.sh file # If it is not generated. Exit if [ ! -e $OVERCLOUD_OVN_DEPLOY_SCRIPT ]; then echo "overcloud deploy migration script :" \ "$OVERCLOUD_OVN_DEPLOY_SCRIPT is not present. Please" \ "make sure you generate that file before running this" exit 1 fi cat $OVERCLOUD_OVN_DEPLOY_SCRIPT | grep neutron-ovn >/dev/null if [ "$?" == "1" ]; then echo "OVN t-h-t environment file seems to be missing in \ $OVERCLOUD_OVN_DEPLOY_SCRIPT. Please check the $OVERCLOUD_OVN_DEPLOY_SCRIPT \ file again." exit 1 fi cat $OVERCLOUD_OVN_DEPLOY_SCRIPT | grep \$HOME/ovn-extras.yaml >/dev/null check1=$? cat $OVERCLOUD_OVN_DEPLOY_SCRIPT | grep $HOME/ovn-extras.yaml >/dev/null check2=$? if [[ "$check1" == "1" && "$check2" == "1" ]]; then echo "ovn-extras.yaml file is missing in "\ "$OVERCLOUD_OVN_DEPLOY_SCRIPT. Please add it "\ "as \" -e \$HOME/ovn-extras.yaml\"" exit 1 fi } get_host_ip() { inventory_file=$1 host_name=$2 ip=`jq -r --arg role _meta --arg hostname $host_name 'to_entries[] | select(.key == $role) | .value.hostvars[$hostname].management_ip' $inventory_file` if [[ "x$ip" == "x" ]] || [[ "x$ip" == "xnull" ]]; then # This file does not provide translation from the hostname to the IP, or # we already have an IP (Queens backwards compatibility) echo $host_name else echo $ip fi } get_role_hosts() { inventory_file=$1 role_name=$2 roles=`jq -r \.$role_name\.children\[\] $inventory_file` for role in $roles; do # During the rocky cycle the format changed to have .value.hosts hosts=`jq -r --arg role "$role" 'to_entries[] | select(.key == $role) | .value.hosts[]' $inventory_file` if [[ "x$hosts" == "x" ]]; then # But we keep backwards compatibility with nested childrens (Queens) hosts=`jq -r --arg role "$role" 'to_entries[] | select(.key == $role) | .value.children[]' $inventory_file` for host in $hosts; do HOSTS="$HOSTS `jq -r --arg host "$host" 'to_entries[] | select(.key == $host) | .value.hosts[0]' $inventory_file`" done else HOSTS="${hosts} ${HOSTS}" fi done echo $HOSTS } # Generate the ansible.cfg file generate_ansible_config_file() { cat > ansible.cfg <<-EOF [defaults] forks=50 become=True callback_whitelist = profile_tasks host_key_checking = False gathering = smart fact_caching = jsonfile fact_caching_connection = ./ansible_facts_cache fact_caching_timeout = 0 #roles_path = roles:... [ssh_connection] control_path = %(directory)s/%%h-%%r ssh_args = -o ControlMaster=auto -o ControlPersist=270s -o ServerAliveInterval=30 -o GSSAPIAuthentication=no retries = 3 EOF } # Generate the inventory file for ansible migration playbook. generate_ansible_inventory_file() { echo "Generating the inventory file for ansible-playbook" source $STACKRC_FILE echo "[ovn-dbs]" > hosts_for_migration ovn_central=True /usr/bin/tripleo-ansible-inventory --list > /tmp/ansible-inventory.txt # We want to run ovn_dbs where neutron_api is running OVN_DBS=$(get_role_hosts /tmp/ansible-inventory.txt neutron_api) for node_name in $OVN_DBS; do node_ip=$(get_host_ip /tmp/ansible-inventory.txt $node_name) node="$node_name ansible_host=$node_ip" if [ "$ovn_central" == "True" ]; then ovn_central=False node="$node_name ansible_host=$node_ip ovn_central=true" fi echo $node ansible_ssh_user=heat-admin ansible_become=true >> hosts_for_migration done echo "" >> hosts_for_migration echo "[ovn-controllers]" >> hosts_for_migration # We want to run ovn-controller where OVS agent was running before the migration OVN_CONTROLLERS=$(get_role_hosts /tmp/ansible-inventory.txt neutron_ovs_agent) for node_name in $OVN_CONTROLLERS; do node_ip=$(get_host_ip /tmp/ansible-inventory.txt $node_name) echo $node_name ansible_host=$node_ip ansible_ssh_user=heat-admin ansible_become=true ovn_controller=true >> hosts_for_migration done rm -f /tmp/ansible-inventory.txt echo "" >> hosts_for_migration cat >> hosts_for_migration << EOF [overcloud-controllers:children] ovn-dbs [overcloud:children] ovn-controllers ovn-dbs EOF add_group_vars() { cat >> hosts_for_migration << EOF [$1:vars] remote_user=heat-admin public_network_name=$PUBLIC_NETWORK_NAME image_name=$IMAGE_NAME working_dir=$OPT_WORKDIR server_user_name=$SERVER_USER_NAME validate_migration=$VALIDATE_MIGRATION overcloud_ovn_deploy_script=$OVERCLOUD_OVN_DEPLOY_SCRIPT overcloudrc=$OVERCLOUDRC_FILE ovn_migration_backups=/var/lib/ovn-migration-backup EOF } add_group_vars overcloud add_group_vars overcloud-controllers echo "***************************************" cat hosts_for_migration echo "***************************************" echo "Generated the inventory file - hosts_for_migration" echo "Please review the file before running the next command - setup-mtu-t1" } # Check if the public network exists, and if it has floating ips available oc_check_public_network() { source $OVERCLOUDRC_FILE openstack network show $PUBLIC_NETWORK_NAME 1>/dev/null || { echo "ERROR: PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME} can't be accessed by the" echo " admin user, please fix that before continuing." exit 1 } ID=$(openstack floating ip create $PUBLIC_NETWORK_NAME -c id -f value) || { echo "ERROR: PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME} doesn't have available" echo " floating ips. Make sure that your public network has at least one" echo " floating ip available for the admin user." exit 1 } openstack floating ip delete $ID 2>/dev/null 1>/dev/null return $? } # Check if the neutron networks MTU has been updated to geneve MTU size or not. # We donot want to proceed if the MTUs are not updated. oc_check_network_mtu() { source $OVERCLOUDRC_FILE neutron-ovn-migration-mtu verify mtu return $? } setup_mtu_t1() { # Run the ansible playbook to reduce the DHCP T1 parameter in # dhcp_agent.ini in all the overcloud nodes where dhcp agent is running. ansible-playbook -vv $OPT_WORKDIR/playbooks/reduce-dhcp-renewal-time.yml \ -i hosts_for_migration -e working_dir=$OPT_WORKDIR \ -e renewal_time=$DHCP_RENEWAL_TIME rc=$? return $rc } reduce_network_mtu () { source $OVERCLOUDRC_FILE oc_check_network_mtu if [ "$?" != "0" ]; then # Reduce the network mtu neutron-ovn-migration-mtu update mtu rc=$? if [ "$rc" != "0" ]; then echo "Reducing the network mtu's failed. Exiting." exit 1 fi fi return $rc } start_migration() { source $STACKRC_FILE echo "Starting the Migration" ansible-playbook -vv $OPT_WORKDIR/playbooks/ovn-migration.yml \ -i hosts_for_migration -e working_dir=$OPT_WORKDIR \ -e public_network_name=$PUBLIC_NETWORK_NAME \ -e image_name=$IMAGE_NAME \ -e overcloud_ovn_deploy_script=$OVERCLOUD_OVN_DEPLOY_SCRIPT \ -e server_user_name=$SERVER_USER_NAME \ -e overcloudrc=$OVERCLOUDRC_FILE \ -e validate_migration=$VALIDATE_MIGRATION $* rc=$? return $rc } print_usage() { cat << EOF Usage: Before running this script, please refer to the migration guide for complete details. This script needs to be run in 5 steps. Step 1 -> ovn_migration.sh generate-inventory Generates the inventory file Step 2 -> ovn_migration.sh setup-mtu-t1 Sets the DHCP renewal T1 to 30 seconds. After this step you will need to wait at least 24h for the change to be propagated to all VMs. This step is only necessary for VXLAN or GRE based tenant networking. Step 3 -> You need to wait at least 24h based on the default configuration of neutron for the DHCP T1 parameter to be propagated, please refer to documentation. WARNING: this is very important if you are using VXLAN or GRE tenant networks. Step 4 -> ovn_migration.sh reduce-mtu Reduces the MTU of the neutron tenant networks networks. This step is only necessary for VXLAN or GRE based tenant networking. Step 5 -> ovn_migration.sh start-migration Starts the migration to OVN. EOF } command=$1 ret_val=0 case $command in generate-inventory) oc_check_public_network generate_ansible_inventory_file generate_ansible_config_file ret_val=$? ;; setup-mtu-t1) check_for_necessary_files setup_mtu_t1 ret_val=$?;; reduce-mtu) check_for_necessary_files reduce_network_mtu ret_val=$?;; start-migration) oc_check_public_network check_for_necessary_files shift start_migration $* ret_val=$? ;; *) print_usage;; esac exit $ret_val ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/0000755000175000017500000000000000000000000027263 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/ovn-migration.yml0000644000175000017500000000443100000000000032601 0ustar00coreycorey00000000000000# This is the playbook used by ovn-migration.sh. # # Pre migration and validation tasks will make sure that the initial cloud # is functional, and will create resources which will be checked after # migration. # - name: Pre migration and validation tasks hosts: localhost roles: - pre-migration tags: - pre-migration # # This step is executed before migration, and will backup some config # files related to containers before those get lost. # - name: Backup tripleo container config files on the nodes hosts: ovn-controllers roles: - backup tags: - setup # # TripleO / Director is executed to deploy ovn using "br-migration" for the # dataplane, while br-int is left intact to avoid dataplane disruption. # - name: Set up OVN and configure it using tripleo hosts: localhost roles: - tripleo-update vars: ovn_bridge: br-migration tags: - setup become: false # # Once everything is migrated prepare everything by syncing the neutron DB # into the OVN NB database, and then switching the dataplane to br-int # letting ovn-controller take control, afterwards any remaining neutron # resources, namespaces or processes which are not needed anymore are # cleaned up. # - name: Do the DB sync and dataplane switch hosts: ovn-controllers, ovn-dbs roles: - migration vars: ovn_bridge: br-int tags: - migration # # Verify that the initial resources are still reachable, remove them, # and afterwards create new resources and repeat the connectivity tests. # - name: Post migration hosts: localhost roles: - delete-neutron-resources - post-migration tags: - post-migration # # Final step to make sure tripleo knows about OVNIntegrationBridge == br-int. # - name: Rerun the stack update to reset the OVNIntegrationBridge to br-int hosts: localhost roles: - tripleo-update vars: ovn_bridge: br-int tags: - setup become: false # # Final validation after tripleo update to br-int # - name: Final validation hosts: localhost vars: validate_premigration_resources: false roles: - post-migration tags: - final-validation # # Announce that it's done and ready. # - hosts: localhost tasks: - name: Migration successful. debug: msg: Migration from ML2OVS to OVN is now complete. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/reduce-dhcp-renewal-time.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/reduce-dhcp-renewal-time0000644000175000017500000000124700000000000033764 0ustar00coreycorey00000000000000--- - hosts: overcloud-controllers tasks: - name: Update dhcp_agent configuration file option 'dhcp_renewal_time' ini_file: path=/var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini section=DEFAULT backup=yes option=dhcp_renewal_time value={{ renewal_time }} create=no ignore_errors: yes - block: - name: Get the neutron dhcp agent docker id shell: docker ps | grep neutron_dhcp | awk '{print $1}' register: dhcp_agent_docker_id ignore_errors: yes - name: Restart neutron dhcp agent command: docker restart {{ dhcp_agent_docker_id.stdout }} ignore_errors: yes ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/0000755000175000017500000000000000000000000030407 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/backup/0000755000175000017500000000000000000000000031654 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/backup/tasks/0000755000175000017500000000000000000000000033001 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/backup/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/backup/tasks/main.0000644000175000017500000000145600000000000033734 0ustar00coreycorey00000000000000# The following tasks ensure that we have backup data which is # necessary later for cleanup (like l3/dhcp/metadata agent definitions) - name: "Ensure the ovn backup directory" file: path="{{ ovn_migration_backups }}" state=directory - name: "Save the tripleo container definitions" shell: | # only copy them the first time, otherwise, on a later run when # it has been already migrated to OVN we would miss the data if [ ! -d {{ ovn_migration_backups }}/tripleo-config ]; then cp -rfp /var/lib/tripleo-config {{ ovn_migration_backups }} echo "Backed up" fi register: command_result changed_when: "'Backed up' in command_result.stdout" # TODO(majopela): Include steps for backing up the mysql database on the # controllers and the undercloud before continuing././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/ 28 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000755000175000017500000000000000000000000034051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/defaults/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000755000175000017500000000000000000000000034051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000644000175000017500000000011500000000000034050 0ustar00coreycorey00000000000000--- ovn_migration_temp_dir_del: "{{ working_dir }}/delete_neutron_resources"././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/tasks/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000755000175000017500000000000000000000000034051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000644000175000017500000000121200000000000034047 0ustar00coreycorey00000000000000--- - name: Delete temp file directory if present file: state: absent path: "{{ ovn_migration_temp_dir_del }}" - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_temp_dir_del }}" - name: Generate neutron resources cleanup script template: src: "delete-neutron-resources.sh.j2" dest: "{{ ovn_migration_temp_dir_del }}/delete-neutron-resources.sh" mode: 0744 - name: Deleting the neutron agents shell: > {{ ovn_migration_temp_dir_del }}/delete-neutron-resources.sh 2>&1 > {{ ovn_migration_temp_dir_del }}/delete-neutron-resources.sh.log changed_when: true ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/templates/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000755000175000017500000000000000000000000034051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000026300000000000011456 xustar0000000000000000157 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-resources/templates/delete-neutron-resources.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/delete-neutron-res0000644000175000017500000000107300000000000034054 0ustar00coreycorey00000000000000#!/bin/bash set -x source {{ overcloudrc }} # Delete non alive neutron agents for i in `openstack network agent list | grep neutron- | grep -v ':-)' | awk {'print $2'}` do openstack network agent delete $i done delete_network_ports() { net_id=$1 for p in `openstack port list --network $net_id | grep -v ID | awk '{print $2}'` do openstack port delete $p done } # Delete HA networks for i in `openstack network list | grep "HA network tenant" | awk '{print $2}'` do delete_network_ports $i openstack network delete $i done exit 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/0000755000175000017500000000000000000000000032400 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/defaults/ 28 mtime=1586982291.5430467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/defaults0000755000175000017500000000000000000000000034130 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/defaults0000644000175000017500000000147000000000000034134 0ustar00coreycorey00000000000000--- agent_cleanups: neutron_l3_agent: config: --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-l3-agent --log-file=/var/log/neutron/netns-cleanup-l3.log cleanup_type: l3 netns_regex: "fip-|snat-|qrouter-" neutron_dhcp: config: --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-dhcp-agent --log-file=/var/log/neutron/netns-cleanup-dhcp.log cleanup_type: dhcp netns_regex: "qdhcp-" tunnel_bridge: "br-tun" ovn_bridge: "br-int" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/0000755000175000017500000000000000000000000033525 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/activate-ovn.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/ac0000644000175000017500000000051100000000000034030 0ustar00coreycorey00000000000000--- - name: Generate OVN activation script template: src: "activate-ovn.sh.j2" dest: "/tmp/activate-ovn.sh" mode: 0744 - name: Run OVN activation script shell: > /tmp/activate-ovn.sh 2>&1 > /tmp/activate-ovn.sh.log - name: Delete OVN activate script file: state: absent path: /tmp/activate-ovn.sh ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/cleanup-dataplane.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/cl0000644000175000017500000000526600000000000034057 0ustar00coreycorey00000000000000--- - name: Quickly disable neutron router and dhcp interfaces shell: | for p in `ovs-vsctl show | egrep 'qr-|ha-|qg-|rfp-' | grep Interface | awk '{print $2}'` do # p will be having quotes. Eg. "hr-xxxx". So strip the quotes p=`echo $p | sed -e 's/"//g'` ovs-vsctl clear Interface $p external-ids ovs-vsctl set Interface $p admin-state=down done # dhcp tap ports cannot be easily distinguished from ovsfw ports, so we # list them from within the qdhcp namespaces for netns in `ip netns | awk '{ print $1 }' | grep qdhcp-`; do for dhcp_port in `ip netns exec $netns ip -o link show | awk -F': ' '{print $2}' | grep tap`; do ovs-vsctl clear Interface $dhcp_port external-ids ovs-vsctl set Interface $dhcp_port admin-state=down done done - name: Clean neutron datapath security groups from iptables shell: | iptables-save > /tmp/iptables-before-cleanup cat /tmp/iptables-before-cleanup | grep -v neutron-openvswi | \ grep -v neutron-filter > /tmp/iptables-after-cleanup if ! cmp /tmp/iptables-before-cleanup /tmp/iptables-after-cleanup then cat /tmp/iptables-after-cleanup | iptables-restore echo "Security groups cleaned" fi register: out changed_when: "'Security groups cleaned' in out.stdout" - name: Cleanup neutron datapath resources shell: | # avoid cleaning up dhcp namespaces if the neutron dhcp agent is up (SR-IOV use case) if [[ "{{ item.value.cleanup_type }}" == "dhcp" ]]; then docker inspect neutron_dhcp && echo "Shouldn't clean DHCP namespaces if neutron_dhcp docker is up" && exit 0 fi if ip netns | egrep -e "{{ item.value.netns_regex }}" then echo "Cleaning up" cmd="$(paunch debug --file {{ ovn_migration_backups }}/tripleo-config/hashed-container-startup-config-step_4.json \ --action print-cmd --container {{ item.key }} \ --interactive | \ sed 's/--interactive /--volume=\/tmp\/cleanup-{{ item.key }}.sh:\/cleanup.sh:ro /g ' )" f="/tmp/cleanup-{{ item.key }}.sh" f_cmd="/tmp/container-cmd-{{ item.key }}.sh" echo "#!/bin/sh" > $f echo "set -x" >> $f echo "set -e" >> $f echo "sudo -E kolla_set_configs" >> $f echo "neutron-netns-cleanup {{ item.value.config }} --agent-type {{ item.value.cleanup_type }} --force" >> $f chmod a+x $f echo $cmd /cleanup.sh echo "#!/bin/sh" > $f_cmd echo "set -x" >> $f_cmd echo "set -e" >> $f_cmd echo $cmd /cleanup.sh >> $f_cmd chmod a+x $f_cmd $f_cmd fi with_dict: "{{ agent_cleanups }}" register: out changed_when: "'Cleaning up' in out.stdout" ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/clone-dataplane.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/cl0000644000175000017500000000066300000000000034053 0ustar00coreycorey00000000000000# we use this instead of a big shell entry because some versions of # ansible-playbook choke on our script syntax + yaml parsing - name: Generate script to clone br-int and provider bridges template: src: "clone-br-int.sh.j2" dest: "/tmp/clone-br-int.sh" mode: 0744 - name: Run clone script for dataplane shell: /tmp/clone-br-int.sh - name: Delete clone script file: state: absent path: /tmp/clone-br-int.sh././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/ma0000644000175000017500000000036400000000000034050 0ustar00coreycorey00000000000000--- - include_tasks: clone-dataplane.yml - include_tasks: sync-dbs.yml when: ovn_central is defined - include_tasks: activate-ovn.yml - include_tasks: cleanup-dataplane.yml when: ovn_controller is defined tags: - cleanup-dataplane ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/sync-dbs.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/tasks/sy0000644000175000017500000000150100000000000034100 0ustar00coreycorey00000000000000--- - name: Get the neutron docker ID shell: docker ps | grep neutron-server-ovn | awk '{print $1}' register: neutron_docker_id - name: Sync neutron db with OVN db (container) - Run 1 command: docker exec "{{ neutron_docker_id.stdout }}" neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --ovn-neutron_sync_mode repair - name: Sync neutron db with OVN db (container) - Run 2 command: docker exec "{{ neutron_docker_id.stdout }}" neutron-ovn-db-sync-util --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --ovn-neutron_sync_mode repair - name: Pause and let ovn-controllers settle before doing the final activation (5 minute) pause: minutes=5././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/templates/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/template0000755000175000017500000000000000000000000034134 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/templates/activate-ovn.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/template0000644000175000017500000000221500000000000034136 0ustar00coreycorey00000000000000#!/bin/bash set -x docker stop ovn_controller # restore bridge mappings ovn_orig_bm=$(ovs-vsctl get open . external_ids:ovn-bridge-mappings-back) ovs-vsctl set open . external_ids:ovn-bridge-mappings="$ovn_orig_bm" ovs-vsctl remove open . external_ids ovn-bridge-mappings-back ovn_bms=$(echo $ovn_orig_bm | sed 's/\"//g' | sed 's/,/ /g') # Reset OpenFlow protocol version before ovn-controller takes over ovs-vsctl set Bridge {{ ovn_bridge }} protocols=[] for bm in $ovn_bms; do parts=($(echo $bm | sed 's/:/ /g')) bridge=${parts[1]} ovs-vsctl set-fail-mode $bridge standalone ovs-vsctl set Bridge $bridge protocols=[] ovs-vsctl del-controller $bridge done # Delete controller from integration bridge ovs-vsctl del-controller {{ ovn_bridge }} # Activate ovn-controller by configuring integration bridge ovs-vsctl set open . external_ids:ovn-bridge={{ ovn_bridge }} docker start ovn_controller # Delete ovs bridges - br-tun and br-migration ovs-vsctl --if-exists del-br {{ tunnel_bridge }} ovs-vsctl --if-exists del-br br-migration for br in $(ovs-vsctl list-br | egrep 'br-mig-[0-9]+'); do ovs-vsctl --if-exists del-br $br done exit 0 ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/templates/clone-br-int.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/migration/template0000644000175000017500000000531500000000000034142 0ustar00coreycorey00000000000000# The purpose of this script is to make a clone of the br-int content # into br-migration, and to create fake provider bridges. # This way, while we synchronize the neutron database into the OVN # northbound DB, and that translates into southbound content all # the ovn-controllers around are able to create the SBDB content # safely, without disrupting the existing neutron ml2/ovs dataplane. OVN_MIG_PREFIX=br-mig OVN_BR_MIGRATION=${OVN_BR_MIGRATION:-br-migration} function recreate_bridge_mappings() { function new_bridge_mappings() { orig_bms=$1 if echo $orig_bms | grep $OVN_MIG_PREFIX; then echo $orig_bms return fi ovn_bms=$(echo $1 | sed 's/\"//g' | sed 's/,/ /g') final_bm="" br_n=0 for bm in $ovn_bms; do parts=($(echo $bm | sed 's/:/ /g')) physnet=${parts[0]} bridge="${OVN_MIG_PREFIX}-${br_n}" mapping="${physnet}:${bridge}" if [[ -z "$final_bm" ]]; then final_bm=$mapping else final_bm="${final_bm},${mapping}" fi # ensure bridge ovs-vsctl --may-exist add-br $bridge br_n=$(( br_n + 1 )) done echo $final_bm } ovn_orig_bm=$(ovs-vsctl get open . external_ids:ovn-bridge-mappings) # backup the original mapping if we didn't already do ovs-vsctl get open . external_ids:ovn-bridge-mappings-back || \ ovs-vsctl set open . external_ids:ovn-bridge-mappings-back="$ovn_orig_bm" new_mapping=$(new_bridge_mappings $ovn_orig_bm) ovs-vsctl set open . external_ids:ovn-bridge-mappings="$new_mapping" } function copy_interfaces_to_br_migration() { interfaces=$(ovs-vsctl list-ifaces br-int | egrep -v 'qr-|ha-|qg-|rfp-') for interface in $interfaces; do if [[ "$interface" == "br-int" ]]; then continue fi ifmac=$(ovs-vsctl get Interface $interface external-ids:attached-mac) if [ $? -ne 0 ]; then echo "Can't get port details for $interface" continue fi ifstatus=$(ovs-vsctl get Interface $interface external-ids:iface-status) ifid=$(ovs-vsctl get Interface $interface external-ids:iface-id) ifname=x$interface ovs-vsctl -- --may-exist add-port $OVN_BR_MIGRATION $ifname \ -- set Interface $ifname type=internal \ -- set Interface $ifname external-ids:iface-status=$ifstatus \ -- set Interface $ifname external-ids:attached-mac=$ifmac \ -- set Interface $ifname external-ids:iface-id=$ifid echo cloned port $interface from br-int as $ifname on $OVN_BR_MIGRATION done } recreate_bridge_mappings docker restart ovn_controller copy_interfaces_to_br_migration ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0270422 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/0000755000175000017500000000000000000000000033363 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/defaults/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/def0000755000175000017500000000000000000000000034042 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/def0000644000175000017500000000015700000000000034047 0ustar00coreycorey00000000000000--- ovn_migration_temp_dir: "{{ working_dir }}/post_migration_resources" validate_premigration_resources: true././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/tasks/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/tas0000755000175000017500000000000000000000000034073 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/post-migration/tas0000644000175000017500000000245000000000000034076 0ustar00coreycorey00000000000000--- # # Validate pre-migration resources and then clean those up # - name: Validate pre migration resources after migration include_role: name: resources/validate vars: restart_server: true when: - validate_migration|bool - validate_premigration_resources - name: Delete the pre migration resources include_role: name: resources/cleanup tags: - post-migration when: - validate_migration|bool - validate_premigration_resources # # Create post-migration resources, validate, and then clean up # # Delete any existing resources to make sure we don't conflict on a second run - name: Delete any post migration resources (preventive) include_role: name: resources/cleanup vars: resource_suffix: "post" silent_cleanup: true when: validate_migration|bool - name: Create post-migration resources include_role: name: resources/create vars: resource_suffix: "post" when: validate_migration|bool - name: Validate post migration resources include_role: name: resources/validate vars: resource_suffix: "post" when: validate_migration|bool - name: Delete the post migration resources include_role: name: resources/cleanup tags: - post-migration vars: resource_suffix: "post" when: validate_migration|bool././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/0000755000175000017500000000000000000000000033164 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/tasks/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/task0000755000175000017500000000000000000000000034047 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/pre-migration/task0000644000175000017500000000076500000000000034061 0ustar00coreycorey00000000000000# Delete any existing resources to make sure we don't conflict on a second run - name: Delete any existing pre migration resources (preventive) include_role: name: resources/cleanup vars: silent_cleanup: true when: validate_migration|bool - name: Create the pre migration resource stack include_role: name: resources/create when: validate_migration|bool - name: Validate the pre migration resources include_role: name: resources/validate when: validate_migration|bool././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/0000755000175000017500000000000000000000000032421 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/defaults/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000644000175000017500000000027100000000000034052 0ustar00coreycorey00000000000000--- cleanup_resource_script: cleanup-resources.sh.j2 resource_suffix: "pre" ovn_migration_temp_dir: "{{ working_dir }}/{{ resource_suffix }}_migration_resources" silent_cleanup: false ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/tasks/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000644000175000017500000000126000000000000034051 0ustar00coreycorey00000000000000--- - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_temp_dir }}" - name: Generate cleanup script template: src: "{{ cleanup_resource_script }}" dest: "{{ ovn_migration_temp_dir }}/cleanup-resources.sh" mode: 0744 - name: Cleaning up the migration resources (verbose) shell: > set -o pipefail && {{ ovn_migration_temp_dir }}/cleanup-resources.sh 2>&1 | tee {{ ovn_migration_temp_dir }}/cleanup-resources.sh.log when: not silent_cleanup - name: Cleaning up the migration resources (silent) shell: > {{ ovn_migration_temp_dir }}/cleanup-resources.sh >/dev/null 2>&1 when: silent_cleanup ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/templates/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/templates/cleanup-resources.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/cleanup/0000644000175000017500000000163600000000000034060 0ustar00coreycorey00000000000000#!/bin/bash set -x source {{ overcloudrc }} openstack server delete ovn-migration-server-{{ resource_suffix }} openstack port delete ovn-migration-server-port-{{ resource_suffix }} server_ip=`cat {{ ovn_migration_temp_dir }}/server_public_ip` openstack floating ip delete $server_ip openstack router remove subnet ovn-migration-router-{{ resource_suffix }} ovn-migration-subnet-{{ resource_suffix }} openstack router unset --external-gateway ovn-migration-router-{{ resource_suffix }} openstack router delete ovn-migration-router-{{ resource_suffix }} openstack network delete ovn-migration-net-{{ resource_suffix }} openstack security group delete ovn-migration-sg-{{ resource_suffix }} openstack flavor delete ovn-migration-{{ resource_suffix }} openstack image delete cirros-ovn-migration-{{ resource_suffix }} openstack keypair delete ovn-migration-{{ resource_suffix }} echo "Resource cleanup done" exit 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/0000755000175000017500000000000000000000000033664 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000118 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/defaults/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/d0000755000175000017500000000000000000000000034030 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/d0000644000175000017500000000025300000000000034032 0ustar00coreycorey00000000000000--- create_migration_resource_script: create-resources.sh.j2 resource_suffix: "pre" ovn_migration_temp_dir: "{{ working_dir }}/{{ resource_suffix }}_migration_resources" ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000115 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/tasks/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/t0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/t0000644000175000017500000000120300000000000034046 0ustar00coreycorey00000000000000--- - name: Delete temp file directory if present file: state: absent path: "{{ ovn_migration_temp_dir }}" - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_temp_dir }}" - name: Generate resource creation script template: src: "{{ create_migration_resource_script }}" dest: "{{ ovn_migration_temp_dir }}/create-migration-resources.sh" mode: 0744 - name: Creating migration resources shell: > set -o pipefail && {{ ovn_migration_temp_dir }}/create-migration-resources.sh 2>&1 | tee {{ ovn_migration_temp_dir }}/create-migration-resources.sh.log ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000119 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/templates/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/t0000755000175000017500000000000000000000000034050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/templates/create-resources.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/create/t0000644000175000017500000001023000000000000034046 0ustar00coreycorey00000000000000#!/bin/bash set -x source {{ overcloudrc }} image_name={{ image_name }} openstack image show $image_name if [ "$?" != "0" ] then if [ ! -f cirros-0.4.0-x86_64-disk.img ] then curl -Lo cirros-0.4.0-x86_64-disk.img https://github.com/cirros-dev/cirros/releases/download/0.4.0/cirros-0.4.0-x86_64-disk.img fi openstack image create "cirros-ovn-migration-{{ resource_suffix }}" --file cirros-0.4.0-x86_64-disk.img \ --disk-format qcow2 --container-format bare --public image_name="cirros-ovn-migration-{{ resource_suffix }}" fi openstack flavor create ovn-migration-{{ resource_suffix }} --ram 1024 --disk 1 --vcpus 1 openstack keypair create ovn-migration-{{ resource_suffix }} --private-key {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key openstack security group create ovn-migration-sg-{{ resource_suffix }} openstack security group rule create --ingress --protocol icmp ovn-migration-sg-{{ resource_suffix }} openstack security group rule create --ingress --protocol tcp --dst-port 22 ovn-migration-sg-{{ resource_suffix }} openstack network create ovn-migration-net-{{ resource_suffix }} neutron net-update ovn-migration-net-{{ resource_suffix }} --mtu 1442 openstack subnet create --network ovn-migration-net-{{ resource_suffix }} --subnet-range 172.168.199.0/24 ovn-migration-subnet-{{ resource_suffix }} openstack port create --network ovn-migration-net-{{ resource_suffix }} --security-group ovn-migration-sg-{{ resource_suffix }} ovn-migration-server-port-{{ resource_suffix }} openstack server create --flavor ovn-migration-{{ resource_suffix }} --image $image_name \ --key-name ovn-migration-{{ resource_suffix }} \ --nic port-id=ovn-migration-server-port-{{ resource_suffix }} ovn-migration-server-{{ resource_suffix }} openstack router create ovn-migration-router-{{ resource_suffix }} openstack router set --external-gateway {{ public_network_name }} ovn-migration-router-{{ resource_suffix }} openstack router add subnet ovn-migration-router-{{ resource_suffix }} ovn-migration-subnet-{{ resource_suffix }} server_ip=`openstack floating ip create --port ovn-migration-server-port-{{ resource_suffix }} \ {{ public_network_name }} -c floating_ip_address | grep floating_ip_address \ | awk '{print $4'}` echo $server_ip > {{ ovn_migration_temp_dir }}/server_public_ip chmod 0600 {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key # Wait till the port is ACTIVE echo "Wait till the port is ACTIVE" port_status=`openstack port show ovn-migration-server-port-{{ resource_suffix }} -c status | grep status | awk '{print $4}'` num_attempts=0 while [ "$port_status" != "ACTIVE" ] do num_attempts=$((num_attempts+1)) sleep 5 port_status=`openstack port show ovn-migration-server-port-{{ resource_suffix }} -c status | grep status | awk '{print $4}'` echo "Port status = $port_status" if [ $num_attempts -gt 24 ] then echo "Port is not up even after 2 minutes. Something is wrong" exit 1 fi done echo "VM is up and the port is ACTIVE" # Wait till the VM allows ssh connections vm_status="down" num_attempts=0 while [ "$vm_status" != "up" ] do num_attempts=$((num_attempts+1)) sleep 5 openstack console log show ovn-migration-server-{{ resource_suffix }} | grep "login:" if [ "$?" == "0" ] then vm_status="up" else if [ $num_attempts -gt 60 ] then echo "Port is not up even after 5 minutes. Something is wrong." # Even though something seems wrong, lets try and ping. break fi fi done num_attempts=0 vm_reachable="false" while [ "$vm_reachable" != "true" ] do num_attempts=$((num_attempts+1)) sleep 1 ping -c 3 $server_ip if [ "$?" == "0" ] then vm_reachable="true" else if [ $num_attempts -gt 60 ] then echo "VM is not reachable. Something is wrong." # Even though something seems wrong, lets try and ping. exit 1 fi fi done ssh -i {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null cirros@$server_ip date rc=$? echo "Done with the resource creation : exiting with $rc" exit $rc ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000111 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/ 28 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000755000175000017500000000000000000000000034133 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000120 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/defaults/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000755000175000017500000000000000000000000034133 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000644000175000017500000000032100000000000034131 0ustar00coreycorey00000000000000validate_resources_script: validate-resources.sh.j2 server_user_name: "cirros" restart_server: false resource_suffix: "pre" ovn_migration_temp_dir: "{{ working_dir }}/{{ resource_suffix }}_migration_resources"././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/tasks/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000755000175000017500000000000000000000000034133 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000644000175000017500000000056400000000000034142 0ustar00coreycorey00000000000000- name: Generate resource validation script template: src: "{{ validate_resources_script }}" dest: "{{ ovn_migration_temp_dir }}/validate-resources.sh" mode: 0744 - name: Run the validation script shell: > set -o pipefail && {{ ovn_migration_temp_dir }}/validate-resources.sh 2>&1 | tee {{ ovn_migration_temp_dir }}/validate-resources.sh.log ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/templates/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000755000175000017500000000000000000000000034133 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate/templates/validate-resources.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/resources/validate0000644000175000017500000000101600000000000034133 0ustar00coreycorey00000000000000#!/bin/bash set -x set -e source {{ overcloudrc }} # This script validates the resources create by the resources/create role. # It pings to the floating ip of the server and ssh into the server. server_ip=`cat {{ ovn_migration_temp_dir }}/server_public_ip` echo "Running ping test with -c 3 to the server ip - $server_ip" ping -c 3 $server_ip ssh -i {{ ovn_migration_temp_dir }}/ovn_migration_ssh_key -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null cirros@$server_ip date echo "Done with the validation" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/0000755000175000017500000000000000000000000033345 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000116 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/defaults/ 28 mtime=1586982291.5470467 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/def0000755000175000017500000000000000000000000034024 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/defaults/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/def0000644000175000017500000000015300000000000034025 0ustar00coreycorey00000000000000--- generate_ovn_extras: generate-ovn-extras.sh.j2 ovn_migration_temp_dir: "{{ working_dir }}/temp_files" ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000113 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tasks/ 28 mtime=1586982291.5510468 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tas0000755000175000017500000000000000000000000034055 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tasks/main.yml 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tas0000644000175000017500000000121300000000000034054 0ustar00coreycorey00000000000000--- - name : Create temp file directory if not present file: state: directory path: "{{ ovn_migration_temp_dir }}" - name: Create ovn-extras generation script template: src: "{{ generate_ovn_extras }}" dest: "{{ ovn_migration_temp_dir }}/generate-ovn-extras.sh" mode: 0755 - name: Generate ovn-extras environment file shell: > set -o pipefail && {{ ovn_migration_temp_dir }}/generate-ovn-extras.sh changed_when: False - name: Updating the overcloud stack with OVN services shell: > set -o pipefail && {{ overcloud_ovn_deploy_script }} 2>&1 > {{ overcloud_ovn_deploy_script }}.log changed_when: true ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000117 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/templates/ 28 mtime=1586982291.5510468 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tem0000755000175000017500000000000000000000000034053 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024400000000000011455 xustar0000000000000000142 path=neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/templates/generate-ovn-extras.sh.j2 22 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/ovn_migration/tripleo_environment/playbooks/roles/tripleo-update/tem0000644000175000017500000000017200000000000034055 0ustar00coreycorey00000000000000#!/bin/bash set -x cat > $HOME/ovn-extras.yaml << EOF parameter_defaults: OVNIntegrationBridge: "{{ ovn_bridge }}" EOF ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/pip_install_src_modules.sh0000755000175000017500000000121300000000000023554 0ustar00coreycorey00000000000000#!/bin/bash # For neutron unit tests, you can define git repos containing modules # that you want to use to override the requirements-based packages. # # Why, you ask? Because you made changes to neutron-lib, and you want # run the unit tests together. E.g.: # # env TOX_ENV_SRC_MODULES="$HOME/src/neutron-lib" tox -e py37 toxinidir="$1" if [ -z "$TOX_ENV_SRC_MODULES" ]; then exit 0 fi for repo in $TOX_ENV_SRC_MODULES; do d="${toxinidir}/${repo}" if [ ! -d "$d" ]; then echo "tox_env_src: error: no directory found at $d" continue fi echo "tox_env_src: pip installing from $d" pip install -e "$d" done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/requirements.txt0000644000175000017500000000040100000000000021562 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. click>=7.0 # BSD requests>=2.14.2 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/split.sh0000755000175000017500000000566600000000000020012 0ustar00coreycorey00000000000000#!/bin/sh # # This script has been shamelessly copied and tweaked from original copy: # # https://github.com/openstack/oslo-incubator/blob/master/tools/graduate.sh # # Use this script to export a Neutron module to a separate git repo. # # You can call this script Call script like so: # # ./split.sh # # The file should be a text file like the one below: # # /path/to/file/file1 # /path/to/file/file2 # ... # /path/to/file/fileN # # Such a list can be generated with a command like this: # # find $path -type f # path is the base dir you want to list files for set -e if [ $# -lt 2 ]; then echo "Usage $0 " exit 1 fi set -x file_list_path="$1" project_name="$2" files_to_keep=$(cat $file_list_path) # Build the grep pattern for ignoring files that we want to keep keep_pattern="\($(echo $files_to_keep | sed -e 's/^/\^/' -e 's/ /\\|\^/g')\)" # Prune all other files in every commit pruner="git ls-files | grep -v \"$keep_pattern\" | git update-index --force-remove --stdin; git ls-files > /dev/stderr" # Find all first commits with listed files and find a subset of them that # predates all others roots="" for file in $files_to_keep; do file_root=$(git rev-list --reverse HEAD -- $file | head -n1) fail=0 for root in $roots; do if git merge-base --is-ancestor $root $file_root; then fail=1 break elif !git merge-base --is-ancestor $file_root $root; then new_roots="$new_roots $root" fi done if [ $fail -ne 1 ]; then roots="$new_roots $file_root" fi done # Purge all parents for those commits set_roots=" if [ 1 -eq 0 $(for root in $roots; do echo " -o \"\$GIT_COMMIT\" = '$root' "; done) ]; then echo ''; else cat; fi" # Enhance git_commit_non_empty_tree to skip merges with: # a) either two equal parents (commit that was about to land got purged as well # as all commits on mainline); # b) or with second parent being an ancestor to the first one (just as with a) # but when there are some commits on mainline). # In both cases drop second parent and let git_commit_non_empty_tree to decide # if commit worth doing (most likely not). skip_empty=$(cat << \EOF if [ $# = 5 ] && git merge-base --is-ancestor $5 $3; then git_commit_non_empty_tree $1 -p $3 else git_commit_non_empty_tree "$@" fi EOF ) # Filter out commits for unrelated files echo "Pruning commits for unrelated files..." git filter-branch \ --index-filter "$pruner" \ --parent-filter "$set_roots" \ --commit-filter "$skip_empty" \ --tag-name-filter cat \ -- --all # Generate the new .gitreview file echo "Generating new .gitreview file..." cat > .gitreview <- {% if release != 'newton' %} -e {{ overcloud_templates_path }}/environments/disable-telemetry.yaml {% endif %} extra_args: >- --ntp-server pool.ntp.org -e {{ overcloud_templates_path }}/environments/docker.yaml -e {{ overcloud_templates_path }}/environments/docker-ha.yaml -e {{ overcloud_templates_path }}/environments/services/neutron-ovn-ha.yaml prepare_service_env_args: >- -e {{ overcloud_templates_path }}/environments/docker.yaml -e {{ overcloud_templates_path }}/environments/docker-ha.yaml -e {{ overcloud_templates_path }}/environments/services/neutron-ovn-ha.yaml # If `run_tempest` is `true`, run tempests tests, otherwise do not # run them. tempest_config: true test_ping: false run_tempest: false test_regex: '' tempest_whitelist: - 'tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/unassign_bug.py0000644000175000017500000000342400000000000021344 0ustar00coreycorey00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unassigns assignee from neutron/network bug, adds message and tag. If you get the following exception, you need X11 and python-dbus installed: RuntimeError: No recommended backend was available. Install the keyrings.alt package if you want to use the non-recommended backends. See README.rst for details. """ import sys from launchpadlib.launchpad import Launchpad MSG_BODY = "\ This bug has had a related patch abandoned and has been automatically \ un-assigned due to inactivity. Please re-assign yourself if you are \ continuing work or adjust the state as appropriate if it is no longer valid." def unassign(bug_num): launchpad = Launchpad.login_with('neutron', 'production') b = launchpad.bugs[bug_num] for task in b.bug_tasks: if ('neutron' not in task.bug_target_name and 'network' not in task.bug_target_name): # try not to interfere with non-neutron projects too much continue task.assignee = None if task.status == "In Progress": task.status = 'New' task.lp_save() b.tags = b.tags + ['timeout-abandon'] b.newMessage(content=MSG_BODY, subject='auto-abandon-script') b.lp_save() if __name__ == '__main__': unassign(int(sys.argv[1])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tools/with_venv.sh0000755000175000017500000000153500000000000020657 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source $VENV/bin/activate && "$@" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/tox.ini0000644000175000017500000002027200000000000016461 0ustar00coreycorey00000000000000[tox] envlist = docs,py37,pep8 minversion = 3.2.0 skipsdist = True ignore_basepython_conflict = True [testenv] basepython = python3 setenv = VIRTUAL_ENV={envdir} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY TOX_ENV_SRC_MODULES usedevelop = True deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = {toxinidir}/tools/pip_install_src_modules.sh "{toxinidir}" stestr run {posargs} # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:debug] envdir = {toxworkdir}/shared commands = oslo_debug_helper -t neutron/tests {posargs} [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:180} commands = false [testenv:dsvm] # Fake job to define environment variables shared between dsvm jobs setenv = OS_SUDO_TESTING=1 OS_ROOTWRAP_CMD=sudo {envdir}/bin/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf OS_ROOTWRAP_DAEMON_CMD=sudo {envdir}/bin/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf OS_FAIL_ON_MISSING_DEPS=1 OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} commands = false [testenv:functional] setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./neutron/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} # Because of issue with stestr and Python3, we need to avoid too much output # to be produced during tests, so we will ignore python warnings here PYTHONWARNINGS=ignore deps = {[testenv]deps} -r{toxinidir}/neutron/tests/functional/requirements.txt [testenv:dsvm-functional] setenv = {[testenv:functional]setenv} {[testenv:dsvm]setenv} deps = {[testenv:functional]deps} commands = {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin stestr run {posargs} [testenv:dsvm-fullstack] setenv = {[testenv]setenv} {[testenv:common]setenv} {[testenv:dsvm]setenv} # workaround for DB teardown lock contention (bug/1541742) OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:600} OS_TEST_PATH=./neutron/tests/fullstack deps = {[testenv:functional]deps} commands = {toxinidir}/tools/generate_dhclient_script_for_fullstack.sh {envdir} {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin stestr run --concurrency 4 {posargs} [testenv:dsvm-fullstack-gate] setenv = {[testenv:dsvm-fullstack]setenv} deps = {[testenv:dsvm-fullstack]deps} commands = {toxinidir}/tools/generate_dhclient_script_for_fullstack.sh {envdir} {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin stestr run --concurrency 4 --black-regex neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs} stestr run --combine --concurrency 1 neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs} [testenv:releasenotes] envdir = {toxworkdir}/docs deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] envdir = {toxworkdir}/shared deps = {[testenv]deps} commands= # If it is easier to add a check via a shell script, consider adding it in this file sh ./tools/misc-sanity-checks.sh {toxinidir}/tools/check_unit_test_structure.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration python ./tools/list_moved_globals.py {[testenv:genconfig]commands} {[testenv:bashate]commands} {[testenv:bandit]commands} {[testenv:genpolicy]commands} whitelist_externals = sh bash [testenv:cover] envdir = {toxworkdir}/shared setenv = {[testenv]setenv} PYTHON=coverage run --source neutron --parallel-mode commands = stestr run --no-subunit-trace {posargs} coverage combine coverage report --fail-under=82 --skip-covered coverage html -d cover coverage xml -o cover/coverage.xml [testenv:venv] commands = {posargs} [testenv:docs] envdir = {toxworkdir}/docs deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] envdir = {toxworkdir}/docs deps = {[testenv:docs]deps} whitelist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:linkcheck] envdir = {toxworkdir}/docs deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W -b linkcheck doc/source doc/build/linkcheck [flake8] # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # H405 multi line docstring summary not separated with an empty line # N530 direct neutron imports not allowed # TODO(amotoki) check the following new rules should be fixed or ignored # E731 do not assign a lambda expression, use a def # W504 line break after binary operator ignore = E126,E128,E731,H405,N530,W504 # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison # H904: Delay string interpolations at logging calls enable-extensions=H106,H203,H204,H205,H904 show-source = true exclude = ./.*,build,dist,doc import-order-style = pep8 [hacking] import_exceptions = neutron._i18n local-check-factory = neutron.hacking.checks.factory [testenv:bandit] envdir = {toxworkdir}/shared # B104: Possible binding to all interfaces # B303: blacklist calls: md5, sha1 # B311: Standard pseudo-random generators are not suitable for security/cryptographic purpose # B604: any_other_function_with_shell_equals_true deps = -r{toxinidir}/test-requirements.txt commands = bandit -r neutron -x tests -n5 -s B104,B303,B311,B604 [testenv:bashate] envdir = {toxworkdir}/shared commands = bash -c "find {toxinidir} \ -not \( -type d -name .tox\* -prune \) \ -not \( -type d -name .venv\* -prune \) \ -type f \ -name \*.sh \ # E005 file does not begin with #! or have a .sh prefix # E006 check for lines longer than 79 columns # E042 local declaration hides errors # E043 Arithmetic compound has inconsistent return semantics -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" [testenv:genconfig] envdir = {toxworkdir}/shared commands = {toxinidir}/tools/generate_config_file_samples.sh [testenv:genpolicy] envdir = {toxworkdir}/shared commands = oslopolicy-sample-generator --config-file=etc/oslo-policy-generator/policy.conf # This environment can be used to quickly validate that all needed system # packages required to successfully execute test targets are installed [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:lower-constraints] setenv = OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:60} deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt [testenv:requirements] deps = -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements whitelist_externals = sh commands = sh -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.0310423 neutron-16.0.0.0b2.dev214/vagrant/0000755000175000017500000000000000000000000016605 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5510468 neutron-16.0.0.0b2.dev214/vagrant/ovn/0000755000175000017500000000000000000000000017407 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/README.rst0000644000175000017500000000034300000000000021076 0ustar00coreycorey00000000000000========================================= Automatic deployment of OVN using Vagrant ========================================= Please reference the files in /doc/source/contributor/ovn_vagrant/ for more information about this. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5510468 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/0000755000175000017500000000000000000000000022135 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/boxes.yml0000644000175000017500000000021200000000000023773 0ustar00coreycorey00000000000000--- ubuntu: virtualbox: bento/ubuntu-18.04 libvirt: elastic/ubuntu-18.04-x86_64 parallels: bento/ubuntu-18.04 openstack: bionic64 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/id_rsa0000644000175000017500000000321200000000000023317 0ustar00coreycorey00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAv2EyHk382N9LGMPAGbAG9rea6qcO+I+qj7OscU1k8GxnYO0B hHYPMzfT1RgeBDelNyM22SNiySr4iTQDBQxgunrUCdTaNu5dmzYT68gieqnH+CRR jpLxXecH2hcvKyFx5qmhMt4zE3QWCDv2JJiB5CGoV7sGCy1aTYbFJBeKwMUMpBwP 67rpBdVcpNjeSkw8FKDvPVx1p1O0YgeI9JoIL0qka6FFgiii/wf5jgr0w/JW15VI 2pYwpHhdnBt3M3BV2HK5cA6nwFhUfAG0HLP5lUGW9/Hk5ng/Wl7cz8nLAdgXf4Uc IUffO5SH+9/H5VhTMDpaRPgxWuOw1/UGLgf57wIDAQABAoIBAD/+5X6Cv6lZycfn NWahmUKJFRGgeX4etH9HKsPciINpDIy51EcSH3UWFwzr+qWYYfP1H5MupQr2BpQC w3u9rt7M0fjTp4C05rJPPAwdKYJxIcBVjLwrYPDwn4yLMievEGJ8mL3k1ZmMuQ1Z 165XHSBHLP7hOF0mdkr0ZRnzkV9yMPjZAI6xnkt/q6EvO34wSZu3/qsmptipHgqB QQAjPIvJwr7DMoLVpBjLlfGihUB5NAVC0RU+7SIiTAUg0atUzucp+sQMnWlWKVvM 3+nHGC8gR4fUy30LDgxd4eqFyG8EYpTzpN/0bgM3kdwiQTkR/lGvhwmok/o6Nz0n 67ve12ECgYEA41lug+TitPrq9VaLacTBpDOafsmIY30sylBJClcbkQ94NEQyNASg TsXxRtvYvKuHy0i2xZwagqEyReaTfsScmyFOk/SRFqjgmb3eWYtgF04MtAAmLy9G 5UmPLEm6lLuQGCI7CqLAv3PFCR7W7dX5VYwkteDejZ0NlLeNqKJjIvECgYEA139W ocUBbWu4ea68JB/qOGrxCMQKn6K3l9kA7tuu+3a0le0G7LF6dr+X1wvboCF/w8CZ ZqKm35yZoAzyFmfn8oGtJdgbz4Sl3/vZReg86Ca//m4LMe1FkkimT3UW+BKprtEJ 5GiKKWYElknMthbDTpL5EouciPhG0bYKuIMBKt8CgYAL+LqcEWJqu0fCEYOX1zeH KPx6rqwS6RWBtcaS19FoyxK+VdT67j9uxneVDqCUFsg4ySRutXCj7k8SZTjhFQNW G+PiYJ9/PPdOwTPDLVarA34hwFxCYc/u5Pe4Ek3T5SiKTMslHTrfGf6HI2uX7IuL mKyaMzQk6t87NIsuFRb5UQKBgQCzciEEslUe9b127k9S0ZSriDnQb9bc2ZWCB7zk KeELGu0Dj43dmWh968sX0pL/RAXtTrsuoTDOMcwnX8BTchDOerdhNRTrd+zcmA50 TRAyzNnBl4cQ+yCc0IxUzA7lYj0UCpPvNDIgiQg20Zt64XefPXnUvJcL45qtVKaW wNg/BwKBgFyhjxftMwAJJF2Hcq5s8QvNhznBgLtne7jnQkHU4qcJx6tcR1hy0Jqe 8/zkr5+41EaFU2jjGn8cnUrlS/Vc/HZg3rmHYycX5wg9hrg1j4hokSHjsGL6Y7yn 8oXIWJSqpxuMjfRh1Tb81Fg05emrMjTy6aLuGS0siUlTPzflD0RI -----END RSA PRIVATE KEY-----././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/id_rsa.pub0000644000175000017500000000061000000000000024103 0ustar00coreycorey00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/YTIeTfzY30sYw8AZsAb2t5rqpw74j6qPs6xxTWTwbGdg7QGEdg8zN9PVGB4EN6U3IzbZI2LJKviJNAMFDGC6etQJ1No27l2bNhPryCJ6qcf4JFGOkvFd5wfaFy8rIXHmqaEy3jMTdBYIO/YkmIHkIahXuwYLLVpNhsUkF4rAxQykHA/ruukF1Vyk2N5KTDwUoO89XHWnU7RiB4j0mggvSqRroUWCKKL/B/mOCvTD8lbXlUjaljCkeF2cG3czcFXYcrlwDqfAWFR8AbQcs/mVQZb38eTmeD9aXtzPycsB2Bd/hRwhR987lIf738flWFMwOlpE+DFa47DX9QYuB/nv vagrant@ovn././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/provider-setup.sh0000644000175000017500000000077700000000000025474 0ustar00coreycorey00000000000000#!/bin/bash function provider_setup { # Save the existing address from eth2 and add it to br-provider if ip a | grep enp0; then PROV_IF=enp0s9 else PROV_IF=eth2 fi PROVADDR=$(ip -4 addr show $PROV_IF | grep -oP "(?<=inet ).*(?= brd)") if [ -n "$PROVADDR" ]; then sudo ip addr flush dev $PROV_IF sudo ip addr add $PROVADDR dev br-provider sudo ip link set br-provider up sudo ovs-vsctl --may-exist add-port br-provider $PROV_IF fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/providers.rb0000644000175000017500000000235100000000000024500 0ustar00coreycorey00000000000000 def provider_box(provider) distro = ENV.fetch('DISTRO', 'ubuntu') boxes = YAML.load_file('../provisioning/boxes.yml')[distro] # we can always override the box via the VAGRANT_OVN_VM_BOX # environment variable return ENV.fetch('VAGRANT_OVN_VM_BOX', boxes[provider]) end def configure_providers(vm, config) vm.provider 'virtualbox' do |vb, cfg| cfg.vm.box = provider_box('virtualbox') vb.memory = config['memory'] vb.cpus = config['cpus'] vb.customize [ 'modifyvm', :id, '--nicpromisc3', "allow-all" ] vb.customize [ "guestproperty", "set", :id, "/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold", 10000 ] end vm.provider 'parallels' do |vb, cfg| cfg.vm.box = provider_box('parallels') vb.memory = config['memory'] vb.cpus = config['cpus'] vb.customize ['set', :id, '--nested-virt', 'on'] end vm.provider 'libvirt' do |vb, cfg| cfg.vm.box = provider_box('libvirt') vb.memory = config['memory'] vb.cpus = config['cpus'] vb.nested = true vb.graphics_type = 'spice' vb.video_type = 'qxl' vb.suspend_mode = 'managedsave' end end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/setup-base.sh0000644000175000017500000000710000000000000024537 0ustar00coreycorey00000000000000#!/bin/sh # Script Arguments: # $1 - MTU # $2 - ovn-db IP address # $3 - ovn-db short name # $4 - ovn-controller IP address # $5 - ovn-controller short name # $6 - ovn-compute1 IP address # $7 - ovn-compute1 short name # $8 - ovn-compute2 IP address # $9 - ovn-compute2 short name # $10 - ovn-vtep IP address # $11 - ovn-vtep short name MTU=$1 OVN_DB_IP=$2 OVN_DB_NAME=$3 OVN_CONTROLLER_IP=$4 OVN_CONTROLLER_NAME=$5 OVN_COMPUTE1_IP=$6 OVN_COMPUTE1_NAME=$7 OVN_COMPUTE2_IP=$8 OVN_COMPUTE2_NAME=$9 OVN_VTEP_IP=$10 OVN_VTEP_NAME=$11 BASE_PACKAGES="git bridge-utils ebtables python-pip python-dev build-essential ntp" DEBIAN_FRONTEND=noninteractive sudo apt-get -qqy update DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy $BASE_PACKAGES echo export LC_ALL=en_US.UTF-8 >> ~/.bash_profile echo export LANG=en_US.UTF-8 >> ~/.bash_profile # FIXME(mestery): Remove once Vagrant boxes allow apt-get to work again sudo rm -rf /var/lib/apt/lists/* sudo apt-get install -y git # FIXME(mestery): By default, Ubuntu ships with /bin/sh pointing to # the dash shell. # .. # .. # The dots above represent a pause as you pick yourself up off the # floor. This means the latest version of "install_docker.sh" to load # docker fails because dash can't interpret some of it's bash-specific # things. It's a bug in install_docker.sh that it relies on those and # uses a shebang of /bin/sh, but that doesn't help us if we want to run # docker and specifically Kuryr. So, this works around that. sudo update-alternatives --install /bin/sh sh /bin/bash 100 if [ ! -d "devstack" ]; then git clone https://git.openstack.org/openstack-dev/devstack.git fi # If available, use repositories on host to facilitate testing local changes. # Vagrant requires that shared folders exist on the host, so additionally # check for the ".git" directory in case the parent exists but lacks # repository contents. if [ ! -d "neutron/.git" ]; then git clone https://git.openstack.org/openstack/neutron.git fi # Use neutron in vagrant home directory when stacking. sudo mkdir /opt/stack sudo chown vagrant:vagrant /opt/stack ln -s ~/neutron /opt/stack/neutron # We need swap space to do any sort of scale testing with the Vagrant config. # Without this, we quickly run out of RAM and the kernel starts whacking things. sudo rm -f /swapfile1 sudo dd if=/dev/zero of=/swapfile1 bs=1024 count=2097152 sudo chown root:root /swapfile1 sudo chmod 0600 /swapfile1 sudo mkswap /swapfile1 sudo swapon /swapfile1 # Configure MTU on VM interfaces. Also requires manually configuring the same MTU on # the equivalent 'vboxnet' interfaces on the host. if ip a | grep enp0; then sudo ip link set dev enp0s8 mtu $MTU sudo ip link set dev enp0s9 mtu $MTU else sudo ip link set dev eth1 mtu $MTU sudo ip link set dev eth2 mtu $MTU fi # Migration setup sudo sh -c "echo \"$OVN_DB_IP $OVN_DB_NAME\" >> /etc/hosts" sudo sh -c "echo \"$OVN_CONTROLLER_IP $OVN_CONTROLLER_NAME\" >> /etc/hosts" sudo sh -c "echo \"$OVN_COMPUTE1_IP $OVN_COMPUTE1_NAME\" >> /etc/hosts" sudo sh -c "echo \"$OVN_COMPUTE2_IP $OVN_COMPUTE2_NAME\" >> /etc/hosts" sudo sh -c "echo \"$OVN_VTEP_IP $OVN_VTEP_NAME\" >> /etc/hosts" # Non-interactive SSH setup cp neutron/vagrant/ovn/provisioning/id_rsa ~/.ssh/id_rsa cat neutron/vagrant/ovn/provisioning/id_rsa.pub >> ~/.ssh/authorized_keys chmod 600 ~/.ssh/id_rsa echo "Host *" >> ~/.ssh/config echo " StrictHostKeyChecking no" >> ~/.ssh/config chmod 600 ~/.ssh/config sudo mkdir /root/.ssh chmod 700 /root/.ssh sudo cp ~vagrant/.ssh/id_rsa /root/.ssh sudo cp ~vagrant/.ssh/authorized_keys /root/.ssh sudo cp ~vagrant/.ssh/config /root/.ssh/config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/setup-compute.sh0000644000175000017500000000727400000000000025315 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Script Arguments: # $1 - ovn-controller IP address # $2 - ovn-db IP address OVN_CONTROLLER_IP=$1 OVN_DB_IP=$2 cp neutron/devstack/ovn-compute-local.conf.sample devstack/local.conf sed -i -e 's//'$OVN_CONTROLLER_IP'/g' devstack/local.conf sudo umount /opt/stack/data/nova/instances # Get the IP address if ip a | grep enp0 ; then ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)") else ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)") fi # Fixup HOST_IP with the local IP address sed -i -e 's//'$ipaddress'/g' devstack/local.conf # Adjust some things in local.conf cat << DEVSTACKEOF >> devstack/local.conf # Set this to the address of the main DevStack host running the rest of the # OpenStack services. Q_HOST=$1 HOSTNAME=$(hostname) OVN_SB_REMOTE=tcp:$OVN_DB_IP:6642 OVN_NB_REMOTE=tcp:$OVN_DB_IP:6641 # Enable logging to files. LOGFILE=/opt/stack/log/stack.sh.log # Use provider network for public. Q_USE_PROVIDERNET_FOR_PUBLIC=True OVS_PHYSICAL_BRIDGE=br-provider PHYSICAL_NETWORK=provider # Until OVN supports NAT, the private network IP address range # must not conflict with IP address ranges on the host. Change # as necessary for your environment. NETWORK_GATEWAY=172.16.1.1 FIXED_RANGE=172.16.1.0/24 ENABLE_CHASSIS_AS_GW=False DEVSTACKEOF # Add unique post-config for DevStack here using a separate 'cat' with # single quotes around EOF to prevent interpretation of variables such # as $Q_DHCP_CONF_FILE. cat << 'DEVSTACKEOF' >> devstack/local.conf # Set the availablity zone name (default is nova) for the DHCP service. [[post-config|$Q_DHCP_CONF_FILE]] [AGENT] availability_zone = nova DEVSTACKEOF devstack/stack.sh # Build the provider network in OVN. You can enable instances to access # external networks such as the Internet by using the IP address of the host # vboxnet interface for the provider network (typically vboxnet1) as the # gateway for the subnet on the neutron provider network. Also requires # enabling IP forwarding and configuring SNAT on the host. See the README for # more information. source /vagrant/provisioning/provider-setup.sh provider_setup # Add host route for the private network, at least until the native L3 agent # supports NAT. # FIXME(mkassawara): Add support for IPv6. source devstack/openrc admin admin ROUTER_GATEWAY=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\"` sudo ip route add $FIXED_RANGE via $ROUTER_GATEWAY # NFS Setup sudo apt-get update sudo apt-get install -y nfs-common sudo mkdir -p /opt/stack/data/nova/instances sudo chmod o+x /opt/stack/data/nova/instances sudo chown vagrant:vagrant /opt/stack/data/nova/instances sudo sh -c "echo \"$OVN_CONTROLLER_IP:/opt/stack/data/nova/instances /opt/stack/data/nova/instances nfs defaults 0 0\" >> /etc/fstab" sudo mount /opt/stack/data/nova/instances sudo chown vagrant:vagrant /opt/stack/data/nova/instances sudo sh -c "echo \"listen_tls = 0\" >> /etc/libvirt/libvirtd.conf" sudo sh -c "echo \"listen_tcp = 1\" >> /etc/libvirt/libvirtd.conf" sudo sh -c "echo -n \"auth_tcp =\" >> /etc/libvirt/libvirtd.conf" sudo sh -c 'echo " \"none\"" >> /etc/libvirt/libvirtd.conf' sudo sh -c "sed -i 's/env libvirtd_opts\=\"\-d\"/env libvirtd_opts\=\"-d -l\"/g' /etc/init/libvirt-bin.conf" sudo sh -c "sed -i 's/libvirtd_opts\=\"\-d\"/libvirtd_opts\=\"\-d \-l\"/g' /etc/default/libvirt-bin" sudo /etc/init.d/libvirt-bin restart # Set the OVN_*_DB variables to enable OVN commands using a remote database. echo -e "\n# Enable OVN commands using a remote database. export OVN_NB_DB=$OVN_NB_REMOTE export OVN_SB_DB=$OVN_SB_REMOTE" >> ~/.bash_profile ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/setup-controller.sh0000644000175000017500000000770100000000000026017 0ustar00coreycorey00000000000000#!/usr/bin/env bash # Script Arguments: # $1 - ovn-db IP address # $2 - provider network starting IP address # $3 - provider network ending IP address # $4 - provider network gateway # $5 - provider network network # $6 - ovn vm subnet ovnip=$1 start_ip=$2 end_ip=$3 gateway=$4 network=$5 ovn_vm_subnet=$6 # Get the IP address if ip a | grep enp0 ; then ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)") else ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)") fi # Adjust some things in local.conf cat << DEVSTACKEOF >> devstack/local.conf.vagrant # Good to set these HOST_IP=$ipaddress HOSTNAME=$(hostname) SERVICE_HOST_NAME=${HOST_NAME} SERVICE_HOST=$ipaddress OVN_SB_REMOTE=tcp:$ovnip:6642 OVN_NB_REMOTE=tcp:$ovnip:6641 # Enable logging to files. LOGFILE=/opt/stack/log/stack.sh.log # Disable the ovn-northd service on the controller node because the # architecture includes a separate OVN database server. disable_service ovn-northd # Disable the ovn-controller service because the architecture lacks services # on the controller node that depend on it. disable_service ovn-controller # Disable the ovn metadata agent. disable_service neutron-ovn-metadata-agent # Disable the nova compute service on the controller node because the # architecture only deploys it on separate compute nodes. disable_service n-cpu # Disable cinder services and tempest to reduce deployment time. disable_service c-api c-sch c-vol tempest # Until OVN supports NAT, the private network IP address range # must not conflict with IP address ranges on the host. Change # as necessary for your environment. NETWORK_GATEWAY=172.16.1.1 FIXED_RANGE=172.16.1.0/24 # Use provider network for public. Q_USE_PROVIDERNET_FOR_PUBLIC=True OVS_PHYSICAL_BRIDGE=br-provider PHYSICAL_NETWORK=provider PUBLIC_NETWORK_NAME=provider PUBLIC_NETWORK_GATEWAY="$gateway" PUBLIC_PHYSICAL_NETWORK=provider PUBLIC_SUBNET_NAME=provider-v4 IPV6_PUBLIC_SUBNET_NAME=provider-v6 Q_FLOATING_ALLOCATION_POOL="start=$start_ip,end=$end_ip" FLOATING_RANGE="$network" # If the admin wants to enable this chassis to host gateway routers for # external connectivity, then set ENABLE_CHASSIS_AS_GW to True. # Then devstack will set ovn-cms-options with enable-chassis-as-gw # in Open_vSwitch table's external_ids column ENABLE_CHASSIS_AS_GW=True DEVSTACKEOF # Add unique post-config for DevStack here using a separate 'cat' with # single quotes around EOF to prevent interpretation of variables such # as $NEUTRON_CONF. cat << 'DEVSTACKEOF' >> devstack/local.conf.vagrant # Enable two DHCP agents per neutron subnet with support for availability # zones. Requires two or more compute nodes. [[post-config|/$NEUTRON_CONF]] [DEFAULT] network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler dhcp_load_type = networks dhcp_agents_per_network = 2 # Configure the Compute service (nova) metadata API to use the X-Forwarded-For # header sent by the Networking service metadata proxies on the compute nodes. [[post-config|$NOVA_CONF]] [DEFAULT] use_forwarded_for = True DEVSTACKEOF sed '/#EXTRA_CONFIG/ r devstack/local.conf.vagrant' \ neutron/devstack/ovn-local.conf.sample > devstack/local.conf devstack/stack.sh # Make the provider network shared and enable DHCP for its v4 subnet. source devstack/openrc admin admin neutron net-update --shared $PUBLIC_NETWORK_NAME neutron subnet-update --enable_dhcp=True $PUBLIC_SUBNET_NAME # NFS server setup sudo apt-get update sudo apt-get install -y nfs-kernel-server nfs-common sudo mkdir -p /opt/stack/data/nova/instances sudo touch /etc/exports sudo sh -c "echo \"/opt/stack/data/nova/instances $ovn_vm_subnet(rw,sync,fsid=0,no_root_squash)\" >> /etc/exports" sudo service nfs-kernel-server restart sudo service nfs-idmapd restart # Set the OVN_*_DB variables to enable OVN commands using a remote database. echo -e "\n# Enable OVN commands using a remote database. export OVN_NB_DB=$OVN_NB_REMOTE export OVN_SB_DB=$OVN_SB_REMOTE" >> ~/.bash_profile ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/setup-db.sh0000644000175000017500000000140700000000000024216 0ustar00coreycorey00000000000000#!/usr/bin/env bash CONTROLLER_IP=$1 cp neutron/devstack/ovn-db-local.conf.sample devstack/local.conf if [ "$CONTROLLER_IP" != "" ]; then sed -i -e 's//'$CONTROLLER_IP'/g' devstack/local.conf fi # Get the IP address if ip a | grep enp0 ; then ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)") else ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)") fi # Adjust some things in local.conf cat << DEVSTACKEOF >> devstack/local.conf # Set this to the address of the main DevStack host running the rest of the # OpenStack services. Q_HOST=$CONTROLLER_IP HOST_IP=$ipaddress HOSTNAME=$(hostname) # Enable logging to files. LOGFILE=/opt/stack/log/stack.sh.log DEVSTACKEOF devstack/stack.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/provisioning/setup-vtep.sh0000644000175000017500000000144600000000000024612 0ustar00coreycorey00000000000000#!/usr/bin/env bash OVN_DB_IP=$2 cp neutron/devstack/ovn-vtep-local.conf.sample devstack/local.conf if [ "$1" != "" ]; then sed -i -e 's//'$1'/g' devstack/local.conf fi # Get the IP address if ip a | grep enp0 ; then ipaddress=$(ip -4 addr show enp0s8 | grep -oP "(?<=inet ).*(?=/)") else ipaddress=$(ip -4 addr show eth1 | grep -oP "(?<=inet ).*(?=/)") fi # Adjust some things in local.conf cat << DEVSTACKEOF >> devstack/local.conf # Set this to the address of the main DevStack host running the rest of the # OpenStack services. Q_HOST=$1 HOST_IP=$ipaddress HOSTNAME=$(hostname) OVN_SB_REMOTE=tcp:$OVN_DB_IP:6642 OVN_NB_REMOTE=tcp:$OVN_DB_IP:6641 # Enable logging to files. LOGFILE=/opt/stack/log/stack.sh.log DEVSTACKEOF devstack/stack.sh ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5510468 neutron-16.0.0.0b2.dev214/vagrant/ovn/sparse/0000755000175000017500000000000000000000000020704 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/sparse/README.rst0000644000175000017500000000030700000000000022373 0ustar00coreycorey00000000000000=================== Sparse architecture =================== Please reference the files in /doc/source/contributor/ovn_vagrant/sparse-architecture.rst for more information about this architecture. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/sparse/Vagrantfile0000644000175000017500000001203300000000000023070 0ustar00coreycorey00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : require 'yaml' require 'ipaddr' require '../provisioning/providers.rb' vagrant_config = YAML.load_file("instances.yml") Vagrant.configure(2) do |config| if Vagrant.has_plugin?("vagrant-cachier") # Configure cached packages to be shared between instances of the same base box. # More info on http://fgrehm.viewdocs.io/vagrant-cachier/usage config.cache.scope = :box end config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.synced_folder File.expand_path(".."), "/vagrant" config.vm.synced_folder File.expand_path("../.."), "/home/vagrant/neutron" # Use the ipaddr library to calculate the netmask of a given network net = IPAddr.new vagrant_config['provider_network'] netmask = net.inspect().split("/")[1].split(">")[0] # Build the common args for the setup-base.sh scripts. setup_base_common_args = "#{vagrant_config['ovndb']['ip']} #{vagrant_config['ovndb']['short_name']} " + "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovncontroller']['short_name']} " + "#{vagrant_config['ovncompute1']['ip']} #{vagrant_config['ovncompute1']['short_name']} " + "#{vagrant_config['ovncompute2']['ip']} #{vagrant_config['ovncompute2']['short_name']} " + "#{vagrant_config['ovnvtep']['ip']} #{vagrant_config['ovnvtep']['short_name']} " # Bring up the Devstack ovsdb/ovn-northd node config.vm.define "ovn-db" do |ovndb| cfg = vagrant_config['ovndb'] ovndb.vm.host_name = cfg['host_name'] ovndb.vm.network "private_network", ip: cfg['ip'] ovndb.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask ovndb.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false, :args => "#{vagrant_config['ovndb']['mtu']} #{setup_base_common_args}" ovndb.vm.provision "shell", path: "../provisioning/setup-db.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']}" configure_providers(ovndb.vm, cfg) end # Bring up the Devstack controller node on Virtualbox config.vm.define "ovn-controller", primary: true do |ovncontroller| cfg = vagrant_config['ovncontroller'] ovncontroller.vm.host_name = cfg['host_name'] ovncontroller.vm.network "private_network", ip: cfg['ip'] ovncontroller.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask ovncontroller.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false, :args => "#{cfg['mtu']} #{setup_base_common_args}" ovncontroller.vm.provision "shell", path: "../provisioning/setup-controller.sh", privileged: false, :args => "#{vagrant_config['ovndb']['ip']} #{vagrant_config['provider_start_ip']} #{vagrant_config['provider_end_ip']} " + "#{vagrant_config['provider_gateway']} #{vagrant_config['provider_network']} #{vagrant_config['ovn_vm_subnet']}" configure_providers(ovncontroller.vm, cfg) end config.vm.define "ovn-vtep", autostart: false do |ovnvtep| cfg = vagrant_config['ovnvtep'] ovnvtep.vm.host_name = cfg['host_name'] ovnvtep.vm.network "private_network", ip: cfg['ip'] ovnvtep.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask ovnvtep.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false, :args => "#{cfg['mtu']} #{setup_base_common_args}" ovnvtep.vm.provision "shell", path: "../provisioning/setup-vtep.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}" configure_providers(ovnvtep.vm, cfg) end # Bring up the first Devstack compute node on Virtualbox config.vm.define "ovn-compute1" do |ovncompute1| cfg = vagrant_config['ovncompute1'] ovncompute1.vm.host_name = cfg['host_name'] ovncompute1.vm.network "private_network", ip: cfg['ip'] ovncompute1.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask ovncompute1.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false, :args => "#{cfg['mtu']} #{setup_base_common_args}" ovncompute1.vm.provision "shell", path: "../provisioning/setup-compute.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}" configure_providers(ovncompute1.vm, cfg) end # Bring up the second Devstack compute node on Virtualbox config.vm.define "ovn-compute2" do |ovncompute2| cfg = vagrant_config['ovncompute2'] ovncompute2.vm.host_name = cfg['host_name'] ovncompute2.vm.network "private_network", ip: cfg['ip'] ovncompute2.vm.network "private_network", ip: cfg['prov-ip'], netmask: netmask ovncompute2.vm.provision "shell", path: "../provisioning/setup-base.sh", privileged: false, :args => "#{cfg['mtu']} #{setup_base_common_args}" ovncompute2.vm.provision "shell", path: "../provisioning/setup-compute.sh", privileged: false, :args => "#{vagrant_config['ovncontroller']['ip']} #{vagrant_config['ovndb']['ip']}" configure_providers(ovncompute2.vm, cfg) end end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/vagrant/ovn/sparse/instances.yml0000644000175000017500000000171200000000000023417 0ustar00coreycorey00000000000000--- provider_network: "10.10.0.0/16" provider_gateway: "10.10.0.1" provider_start_ip: "10.10.0.101" provider_end_ip: "10.10.255.250" ovn_vm_subnet: "192.168.33.0/24" ovndb: short_name: "ovn-db" host_name: "ovn-db.devstack.dev" ip: "192.168.33.11" prov-ip: "10.10.0.11" memory: 512 cpus: 2 mtu: 1500 ovncontroller: short_name: "ovn-controller" host_name: "ovn-controller.devstack.dev" ip: "192.168.33.12" prov-ip: "10.10.0.12" memory: 3072 cpus: 2 mtu: 1500 ovnvtep: short_name: "ovn-vtep" host_name: "ovn-vtep.devstack.dev" ip: "192.168.33.13" prov-ip: "10.10.0.13" memory: 512 cpus: 1 mtu: 1500 ovncompute1: short_name: "ovn-compute1" host_name: "ovn-compute1.devstack.dev" ip: "192.168.33.31" prov-ip: "10.10.0.31" memory: 1536 cpus: 1 mtu: 1500 ovncompute2: short_name: "ovn-compute2" host_name: "ovn-compute2.devstack.dev" ip: "192.168.33.32" prov-ip: "10.10.0.32" memory: 1536 cpus: 1 mtu: 1500 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586982291.5550468 neutron-16.0.0.0b2.dev214/zuul.d/0000755000175000017500000000000000000000000016364 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/base.yaml0000644000175000017500000000356500000000000020173 0ustar00coreycorey00000000000000- job: name: neutron-functional parent: devstack-minimal description: Run neutron functional tests timeout: 7800 required-projects: - opendev.org/openstack/devstack - openstack/neutron - openstack/requirements roles: - zuul: openstack/devstack pre-run: playbooks/configure_functional_job.yaml run: playbooks/run_functional_job.yaml post-run: playbooks/post_functional_job.yaml irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^releasenotes/.*$ vars: devstack_services: # Ignore any default set by devstack. Emit a "disable_all_services". base: false devstack_localrc: INSTALL_TESTONLY_PACKAGES: true DATABASE_PASSWORD: stackdb tox_envlist: dsvm-functional tox_install_siblings: false tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt' zuul_copy_output: # We need to copy directory with logs to have it in job artifacts also, # /opt/stack/logs is default logs directory defined in neutron's # tox.ini file '{{ devstack_base_dir }}/logs/dsvm-functional-logs': logs - job: name: neutron-fullstack parent: neutron-functional vars: tox_envlist: dsvm-fullstack-gate zuul_copy_output: # We need to copy directory with logs to have it in job artifacts also, # /opt/stack/logs is default logs directory defined in neutron's # tox.ini file '{{ devstack_base_dir }}/logs/dsvm-fullstack-logs': logs - job: name: neutron-fullstack-with-uwsgi parent: neutron-fullstack vars: devstack_localrc: NEUTRON_DEPLOY_MOD_WSGI: true - job: name: neutron-functional-with-uwsgi parent: neutron-functional vars: devstack_localrc: NEUTRON_DEPLOY_MOD_WSGI: true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/grenade.yaml0000644000175000017500000000264400000000000020663 0ustar00coreycorey00000000000000- job: name: neutron-grenade-multinode parent: legacy-dsvm-base-multinode run: playbooks/legacy/neutron-grenade-multinode/run.yaml post-run: playbooks/legacy/neutron-grenade-multinode/post.yaml timeout: 10800 required-projects: - openstack/grenade - openstack/devstack-gate - openstack/neutron irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ - job: name: neutron-grenade-dvr-multinode parent: legacy-dsvm-base-multinode run: playbooks/legacy/neutron-grenade-dvr-multinode/run.yaml post-run: playbooks/legacy/neutron-grenade-dvr-multinode/post.yaml timeout: 7500 required-projects: - openstack/grenade - openstack/devstack-gate - openstack/neutron irrelevant-files: *irrelevant-files - job: name: neutron-ovn-grenade parent: legacy-dsvm-base run: playbooks/legacy/neutron-ovn-grenade/run.yaml post-run: playbooks/legacy/neutron-ovn-grenade/post.yaml timeout: 9000 irrelevant-files: *irrelevant-files required-projects: - openstack/grenade - openstack/devstack-gate - openstack/neutron-tempest-plugin - openstack/tempest - openstack/networking-ovn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/project.yaml0000644000175000017500000001144400000000000020722 0ustar00coreycorey00000000000000# NOTE: In doc/source/contributor/testing/ci_scenario_jobs.rst file there is # document with summary of all tempest, neutron-tempest-plugin and grenade jobs # summary. # Please update this document always when any changes to jobs are made. - project: templates: - neutron-tempest-plugin-jobs - openstack-cover-jobs - openstack-lower-constraints-jobs - openstack-python3-ussuri-jobs - publish-openstack-docs-pti - periodic-stable-jobs - check-requirements - release-notes-jobs-python3 check: jobs: - neutron-functional - neutron-fullstack - neutron-fullstack-with-uwsgi - neutron-rally-task - neutron-grenade-multinode - neutron-grenade-dvr-multinode - neutron-tempest-linuxbridge - neutron-tempest-with-uwsgi - tempest-integrated-networking: # We don't run the job on things like neutron docs-only changes irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ - ^devstack/.*\.sample$ - tempest-multinode-full-py3: voting: false irrelevant-files: *irrelevant-files - neutron-tempest-dvr-ha-multinode-full - neutron-tempest-iptables_hybrid - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false irrelevant-files: *irrelevant-files - tempest-slow-py3: irrelevant-files: *irrelevant-files - tempest-ipv6-only: irrelevant-files: *irrelevant-files - neutron-ovn-tempest-ovs-release - neutron-ovn-tempest-ovs-release-ipv6-only: voting: false # TODO(slaweq): add this job again to the check queue when it will be # working fine on python 3 #- networking-midonet-tempest-aio-ml2-centos-7: # voting: false # irrelevant-files: *irrelevant-files - openstacksdk-functional-devstack-networking: voting: false - neutron-functional-with-uwsgi: voting: false - neutron-centos-8-tripleo-standalone - neutron-ovn-rally-task: voting: false # TripleO jobs that deploy OVN. # Note we don't use a project-template here, so it's easier # to disable voting on one specific job if things go wrong. # In Stein and beyond, fs010 will run using # networking-ovn-tripleo-ci-centos-7-containers-multinode. # If you need any support to debug these jobs in case of # failures, please reach us on #tripleo IRC channel. - neutron-ovn-tripleo-ci-centos-8-containers-multinode: voting: false - neutron-ovn-tempest-slow: voting: false - neutron-ovn-tempest-full-multinode-ovs-master: voting: false - openstack-tox-py36: # from openstack-python3-ussuri-jobs template timeout: 3600 - openstack-tox-py37: # from openstack-python3-ussuri-jobs template timeout: 3600 - openstack-tox-py38: # from openstack-python3-ussuri-jobs template timeout: 3600 - openstack-tox-lower-constraints: # from openstack-tox-lower-constraints template timeout: 3600 - openstack-tox-cover: # from openstack-cover-jobs template timeout: 4800 gate: jobs: - neutron-functional - neutron-fullstack - tempest-integrated-networking - neutron-tempest-linuxbridge - neutron-tempest-iptables_hybrid - neutron-grenade-multinode - neutron-grenade-dvr-multinode - tempest-slow-py3: irrelevant-files: *irrelevant-files - tempest-ipv6-only: irrelevant-files: *irrelevant-files - neutron-ovn-tempest-ovs-release - openstack-tox-py36: # from openstack-python3-ussuri-jobs template timeout: 3600 - openstack-tox-py37: # from openstack-python3-ussuri-jobs template timeout: 3600 - openstack-tox-lower-constraints: # from openstack-tox-lower-constraints template timeout: 3600 #- neutron-ovn-rally-task #- neutron-ovn-tripleo-ci-centos-8-containers-multinode experimental: jobs: - neutron-ovn-tempest-ovs-master - neutron-ovn-grenade periodic: jobs: - neutron-functional - neutron-tempest-postgres-full - neutron-tempest-mariadb-full - neutron-tempest-with-os-ken-master - neutron-ovn-tempest-ovs-master-fedora ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/rally.yaml0000644000175000017500000000507200000000000020377 0ustar00coreycorey00000000000000- job: name: neutron-rally-task parent: rally-task-neutron timeout: 10800 vars: devstack_localrc: USE_PYTHON3: true OSPROFILER_COLLECTOR: redis OSPROFILER_HMAC_KEYS: "neutron-hmac-key-used-in-zuul-ci" rally_task: rally-jobs/task-neutron.yaml devstack_plugins: osprofiler: https://opendev.org/openstack/osprofiler rally-openstack: https://opendev.org/openstack/rally-openstack neutron: https://opendev.org/openstack/neutron devstack_services: neutron-trunk: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: enable_code_profiling: True required-projects: - openstack/rally - openstack/rally-openstack - openstack/osprofiler irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ - job: name: neutron-ovn-rally-task parent: rally-task-at-devstack required-projects: - name: openstack/devstack - name: openstack/devstack-gate - name: openstack/rally - name: openstack/rally-openstack irrelevant-files: *irrelevant-files vars: devstack_plugins: neutron: https://opendev.org/openstack/neutron rally-openstack: https://opendev.org/openstack/rally-openstack zuul_copy_output: '{{ devstack_base_dir }}/data/ovs': 'logs' extensions_to_txt: db: true devstack_services: ovn-northd: true ovn-controller: true ovs-vswitchd: true ovsdb-server: true q-ovn-metadata-agent: true br-ex-tcpdump: true br-int-flows: true q-dhcp: false q-l3: false q-agt: false q-meta: false q-metering: false q-dns: true devstack_localrc: Q_AGENT: ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger Q_ML2_TENANT_NETWORK_TYPE: geneve USE_PYTHON3: true PHYSICAL_NETWORK: public Q_USE_PROVIDERNET_FOR_PUBLIC: true ENABLE_CHASSIS_AS_GW: true OVN_L3_CREATE_PUBLIC_NETWORK: true OVN_BRANCH: master devstack_local_conf: post-config: "${RALLY_CONF_DIR}/${RALLY_CONF_FILE}": openstack: neutron_bind_l2_agent_types: "OVN Controller Gateway agent" rally_task: rally-jobs/task-neutron.yaml timeout: 7800 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/tempest-multinode.yaml0000644000175000017500000002022700000000000022732 0ustar00coreycorey00000000000000- job: name: neutron-tempest-dvr-ha-multinode-full parent: tempest-multinode-full-py3 nodeset: openstack-three-node-bionic timeout: 10800 roles: - zuul: openstack/neutron-tempest-plugin required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest pre-run: playbooks/dvr-multinode-scenario-pre-run.yaml irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ voting: false vars: tox_envlist: integrated-network devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: router_distributed: True l3_ha: True # NOTE(slaweq): We can get rid of this hardcoded absolute path when # devstack-tempest job will be switched to use lib/neutron instead of # lib/neutron-legacy "/$NEUTRON_CORE_PLUGIN_CONF": ml2: mechanism_drivers: openvswitch,l2population agent: enable_distributed_routing: True l2_population: True tunnel_types: vxlan arp_responder: True ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex $NEUTRON_L3_CONF: DEFAULT: agent_mode: dvr agent: availability_zone: nova $NEUTRON_DHCP_CONF: agent: availability_zone: nova group-vars: subnode: devstack_services: q-agt: true q-l3: true q-meta: true devstack_local_conf: post-config: $NEUTRON_CONF: DEFAULT: router_distributed: True # NOTE(slaweq): We can get rid of this hardcoded absolute path when # devstack-tempest job will be switched to use lib/neutron instead of # lib/neutron-legacy "/$NEUTRON_CORE_PLUGIN_CONF": agent: enable_distributed_routing: True l2_population: True tunnel_types: vxlan arp_responder: True ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex $NEUTRON_L3_CONF: DEFAULT: agent_mode: dvr_snat agent: availability_zone: nova - job: name: neutron-ovn-multinode-base description: Base multinode job for devstack/tempest to test Neutron with ovn driver. abstract: true parent: tempest-multinode-full-py3 timeout: 10800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/neutron-tempest-plugin - openstack/octavia - openstack/tempest irrelevant-files: *irrelevant-files roles: - zuul: zuul/zuul-jobs - zuul: openstack/neutron-tempest-plugin pre-run: playbooks/multinode-setup.yaml vars: devstack_localrc: Q_AGENT: ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger Q_ML2_TENANT_NETWORK_TYPE: geneve Q_USE_PROVIDERNET_FOR_PUBLIC: true PHYSICAL_NETWORK: public ENABLE_CHASSIS_AS_GW: true OVN_L3_CREATE_PUBLIC_NETWORK: true OVN_DBS_LOG_LEVEL: dbg DOWNLOAD_DEFAULT_IMAGES: false IMAGE_URLS: "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img" DEFAULT_IMAGE_NAME: cirros-0.4.0-x86_64-disk DEFAULT_IMAGE_FILE_NAME: cirros-0.4.0-x86_64-disk.img ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1 ADVANCED_INSTANCE_TYPE: ds512M ADVANCED_INSTANCE_USER: ubuntu BUILD_TIMEOUT: 784 ENABLE_TLS: True devstack_plugins: neutron: https://opendev.org/openstack/neutron neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin octavia: https://opendev.org/openstack/octavia zuul_copy_output: '{{ devstack_base_dir }}/data/ovs': 'logs' extensions_to_txt: db: true devstack_services: c-bak: false etcd: false br-ex-tcpdump: true br-int-flows: true q-ovn-metadata-agent: true o-api: true o-hk: true ovn-controller: true ovn-northd: true ovn-octavia: true ovs-vswitchd: true ovsdb-server: true placement-api: true peakmem_tracker: false q-svc: true q-agt: false q-dhcp: false q-l3: false q-meta: false q-metering: false q-dns: true # When running python3 Swift should be disabled for now s-account: false s-container: false s-object: false s-proxy: false tls-proxy: true group-vars: subnode: devstack_services: ovn-controller: true ovn-northd: false ovn-octavia: false ovs-vswitchd: true ovsdb-server: true # NOTE(slaweq): it's just to check if this will force devstack to # configure neutron and ML2 plugin on subnodes q-fake: true q-svc: false q-agt: false q-dhcp: false q-l3: false q-meta: false q-metering: false q-ovn-metadata-agent: true tls-proxy: true devstack_localrc: Q_AGENT: ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger Q_ML2_TENANT_NETWORK_TYPE: geneve Q_USE_PROVIDERNET_FOR_PUBLIC: true PHYSICAL_NETWORK: public ENABLE_CHASSIS_AS_GW: false OVN_DBS_LOG_LEVEL: dbg USE_PYTHON3: True ENABLE_TLS: True - job: name: neutron-ovn-tempest-slow parent: neutron-ovn-multinode-base vars: tox_envlist: slow-serial tempest_test_regex: "" # TODO(slaweq): remove tests from # tempest.scenario.test_network_v6.TestGettingAddress module from # blacklist when bug https://bugs.launchpad.net/neutron/+bug/1863577 will # be fixed tempest_black_regex: "\ (^tempest.scenario.test_network_v6.TestGettingAddress)" - job: # TODO(slaweq): propose job with ovs-release and move -master one to # experimental queue name: neutron-ovn-tempest-full-multinode-ovs-master parent: neutron-ovn-multinode-base vars: tox_envlist: all-plugin tempest_test_regex: "^(?!.*\ (?:.*\\[.*slow.*\\])|\ (?:tempest.api.network.admin.test_quotas.QuotasTest.test_lbaas_quotas.*)|\ (?:tempest.api.network.test_load_balancer.*)|\ (?:tempest.scenario.test_load_balancer.*)|\ (?:tempest.api.network.admin.test_load_balancer.*)|\ (?:tempest.api.network.admin.test_lbaas.*)|\ (?:tempest.api.network.test_fwaas_extensions.*)|\ (?:tempest.api.network.test_metering_extensions.*)|\ (?:tempest.thirdparty.boto.test_s3.*)|\ (?:tempest.api.identity*)|\ (?:tempest.api.image*)|\ (?:tempest.api.volume*)|\ (?:tempest.api.compute.images*)|\ (?:tempest.api.compute.keypairs*)|\ (?:tempest.api.compute.certificates*)|\ (?:tempest.api.compute.flavors*)|\ (?:tempest.api.compute.test_quotas*)|\ (?:tempest.api.compute.test_versions*)|\ (?:tempest.api.compute.volumes*)|\ (?:tempest.api.compute.admin.test_flavor*)|\ (?:tempest.api.compute.admin.test_volume*)|\ (?:tempest.api.compute.admin.test_hypervisor*)|\ (?:tempest.api.compute.admin.test_aggregate*)|\ (?:tempest.api.compute.admin.test_quota*)|\ (?:tempest.scenario.test_volume*))\ ((^neutron_tempest_plugin.api)|\ (^neutron_tempest_plugin.scenario)|\ (tempest.(api|scenario|thirdparty))).*$" devstack_localrc: OVN_BRANCH: master group-vars: subnode: devstack_localrc: OVN_BRANCH: master ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/tempest-singlenode.yaml0000644000175000017500000002347000000000000023064 0ustar00coreycorey00000000000000- job: name: neutron-tempest-dvr parent: tempest-integrated-networking timeout: 10800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest vars: tempest_concurrency: 4 devstack_localrc: Q_DVR_MODE: dvr_snat irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ - job: name: neutron-tempest-linuxbridge parent: tempest-integrated-networking timeout: 10800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest vars: tempest_concurrency: 4 devstack_localrc: Q_AGENT: linuxbridge irrelevant-files: *irrelevant-files - job: name: neutron-tempest-iptables_hybrid parent: tempest-integrated-networking timeout: 10800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest vars: tempest_concurrency: 4 devstack_plugins: neutron: https://opendev.org/openstack/neutron.git devstack_services: tls-proxy: false tempest: true neutron-dns: true neutron-qos: true neutron-segments: true neutron-trunk: true neutron-uplink-status-propagation: true devstack_local_conf: post-config: $NEUTRON_CONF: QUOTAS: quota_router: 100 quota_floatingip: 500 quota_security_group: 100 quota_security_group_rule: 1000 # NOTE(slaweq): We can get rid of this hardcoded absolute path when # devstack-tempest job will be switched to use lib/neutron instead of # lib/neutron-legacy "/$NEUTRON_CORE_PLUGIN_CONF": ml2_type_vlan: network_vlan_ranges: foo:1:10 agent: enable_distributed_routing: True l2_population: True tunnel_types: vxlan,gre arp_responder: True securitygroup: firewall_driver: iptables_hybrid $NEUTRON_L3_CONF: agent: availability_zone: nova $NEUTRON_DHCP_CONF: agent: availability_zone: nova test-config: $TEMPEST_CONFIG: neutron_plugin_options: provider_vlans: foo, agent_availability_zone: nova image_is_advanced: true available_type_drivers: flat,geneve,vlan,gre,local,vxlan irrelevant-files: *irrelevant-files - job: name: neutron-tempest-postgres-full parent: tempest-integrated-networking timeout: 7800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest vars: devstack_services: postgresql: true mysql: false irrelevant-files: *irrelevant-files - job: name: neutron-tempest-mariadb-full parent: tempest-integrated-networking timeout: 7800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest vars: devstack_localrc: MYSQL_SERVICE_NAME: mariadb # NOTE(ralonsoh): once MariaDB default version in Ubuntu is bumped to # >10.1, this workaround can be removed (bug 1855912) pre-run: playbooks/add_mariadb_repo.yaml irrelevant-files: *irrelevant-files - job: name: neutron-tempest-with-os-ken-master parent: tempest-integrated-networking timeout: 7800 required-projects: - openstack/devstack-gate - openstack/neutron - openstack/tempest - openstack/os-ken - job: name: neutron-tempest-with-uwsgi parent: tempest-integrated-networking description: Run neutron Tempest tests with uwsgi timeout: 8400 vars: devstack_localrc: NEUTRON_DEPLOY_MOD_WSGI: true irrelevant-files: *irrelevant-files - job: name: neutron-ovn-base description: Base job for devstack/tempest to test Neutron with ovn driver. # TODO(slaweq): consider changing parent to be tempest-integrated-networking # job instead of devstack-tempest parent: devstack-tempest timeout: 10800 required-projects: &ovn-base-required-projects - openstack/devstack-gate - openstack/neutron - openstack/neutron-tempest-plugin - openstack/octavia - openstack/tempest irrelevant-files: *irrelevant-files vars: &ovn-base-vars tox_envlist: all-plugin tempest_test_regex: "^(?!.*\ (?:.*\\[.*slow.*\\])|\ (?:tempest.api.network.admin.test_quotas.QuotasTest.test_lbaas_quotas.*)|\ (?:tempest.api.network.test_load_balancer.*)|\ (?:tempest.scenario.test_load_balancer.*)|\ (?:tempest.api.network.admin.test_load_balancer.*)|\ (?:tempest.api.network.admin.test_lbaas.*)|\ (?:tempest.api.network.test_fwaas_extensions.*)|\ (?:tempest.api.network.test_metering_extensions.*)|\ (?:tempest.thirdparty.boto.test_s3.*)|\ (?:tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_port_security_macspoofing_port)|\ (?:tempest.api.identity*)|\ (?:tempest.api.image*)|\ (?:tempest.api.volume*)|\ (?:tempest.api.compute.images*)|\ (?:tempest.api.compute.keypairs*)|\ (?:tempest.api.compute.certificates*)|\ (?:tempest.api.compute.flavors*)|\ (?:tempest.api.compute.test_quotas*)|\ (?:tempest.api.compute.test_versions*)|\ (?:tempest.api.compute.volumes*)|\ (?:tempest.api.compute.admin.test_flavor*)|\ (?:tempest.api.compute.admin.test_volume*)|\ (?:tempest.api.compute.admin.test_hypervisor*)|\ (?:tempest.api.compute.admin.test_aggregate*)|\ (?:tempest.api.compute.admin.test_quota*)|\ (?:tempest.scenario.test_volume*))\ ((^neutron_tempest_plugin.api)|\ (^neutron_tempest_plugin.scenario)|\ (tempest.(api|scenario|thirdparty))).*$" tempest_concurrency: 4 devstack_localrc: Q_AGENT: ovn Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve Q_ML2_TENANT_NETWORK_TYPE: geneve Q_USE_PROVIDERNET_FOR_PUBLIC: true PHYSICAL_NETWORK: public ENABLE_CHASSIS_AS_GW: true OVN_L3_CREATE_PUBLIC_NETWORK: true OVN_DBS_LOG_LEVEL: dbg USE_PYTHON3: True DOWNLOAD_DEFAULT_IMAGES: false IMAGE_URLS: "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img" DEFAULT_IMAGE_NAME: cirros-0.4.0-x86_64-disk DEFAULT_IMAGE_FILE_NAME: cirros-0.4.0-x86_64-disk.img ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1 ADVANCED_INSTANCE_TYPE: ds512M ADVANCED_INSTANCE_USER: ubuntu BUILD_TIMEOUT: 784 ENABLE_TLS: True devstack_plugins: neutron: https://opendev.org/openstack/neutron neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin octavia: https://opendev.org/openstack/octavia zuul_copy_output: '{{ devstack_base_dir }}/data/ovs': 'logs' extensions_to_txt: db: true devstack_services: br-ex-tcpdump: true br-int-flows: true c-api: true c-sch: true c-vol: true dstat: true g-api: true g-reg: true keystone: true n-api-meta: true n-api: true n-cauth: true n-cond-cell1: true n-cpu: true n-novnc-cell1: true n-sch: true n-super-cond: true q-ovn-metadata-agent: true o-api: true o-hk: true ovn-controller: true ovn-northd: true ovn-octavia: true ovs-vswitchd: true ovsdb-server: true placement-api: true q-svc: true q-dns: true c-bak: false etcd: false peakmem_tracker: false q-agt: false q-dhcp: false q-l3: false q-meta: false q-metering: false s-account: false s-container-sync: false s-container: false s-object: false s-proxy: false tls-proxy: true - job: name: neutron-ovn-tempest-ovs-master description: Job testing for devstack/tempest testing Neutron with ovn driver and OVN master branch parent: neutron-ovn-base vars: devstack_localrc: OVN_BRANCH: master - job: name: neutron-ovn-tempest-ovs-ipv6-only-base description: Base job for devstack/tempest to test Neutron with ovn driver in an IPv6-only deployment parent: devstack-tempest-ipv6 timeout: 10800 irrelevant-files: *irrelevant-files required-projects: *ovn-base-required-projects vars: *ovn-base-vars - job: name: neutron-ovn-tempest-ovs-release-ipv6-only description: Job testing for devstack/tempest testing Neutron with ovn driver and latest released OVN branch in an IPv6-only deployment parent: neutron-ovn-tempest-ovs-ipv6-only-base vars: devstack_localrc: OVN_BRANCH: v20.03.0 OVS_BRANCH: v2.13.0 - job: name: neutron-ovn-tempest-ovs-release description: Job testing for devstack/tempest testing Neutron with ovn driver and latest released OVN branch parent: neutron-ovn-base vars: devstack_localrc: OVN_BRANCH: v20.03.0 OVS_BRANCH: v2.13.0 - job: name: neutron-ovn-tempest-ovs-master-fedora description: Job testing for devstack/tempest testing Neutron with ovn driver and OVN master branch and Fedora parent: neutron-ovn-tempest-ovs-master nodeset: devstack-single-node-fedora-latest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982282.0 neutron-16.0.0.0b2.dev214/zuul.d/tripleo.yaml0000644000175000017500000000222700000000000020731 0ustar00coreycorey00000000000000- job: name: neutron-centos-8-tripleo-standalone parent: tripleo-ci-base-standalone-centos-8 voting: false vars: featureset: '052' featureset_override: standalone_environment_files: - 'environments/services/neutron-ovs.yaml' run_tempest: false tempest_format: container tempest_run_concurrency: 2 tempest_private_net_provider_type: 'vxlan' tempest_tempest_conf_overrides: auth.use_dynamic_credentials: true tempest_test_whitelist: - 'tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops' use_os_tempest: true irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - ^vagrant/.*$ - ^migration/.*$ - job: name: neutron-ovn-tripleo-ci-centos-8-containers-multinode parent: tripleo-ci-centos-8-containers-multinode vars: nodes: 1ctlr featureset: '010' irrelevant-files: *irrelevant-files

؁=K{RѦM.!4z?d>}YRQpցn/9NHWڜ%k\!#(?-mSVF@ˣ |[5~/IoBhW^x:ړ$qH[ aJth@<$}- -m/ng)҆_Q%kďDISW$I%Jp :_XTM7/>ԣa޹ڥRgM 9\Sc^ȯ-HS C(:nW|CpꑞBYJz:Ibm?2AcޜjrLF@M"N#jSi{$@;PCN%#!H B#p pvlk,84$J| < _4ATg*R]s)i5ti>HvK5Kd 7Qqdٟ /k)T_amD5ڵZ$^ȯSY]&ӕ#ҤU[,\:m drpqsKK$eAGra[SG bHK/"IJ耺#jP&#BF$`QwF[w$(9mxc~wFM$SמLpF!,r2z&@S'V#)IŽH:3Ta:npXz@۩B0Yc̈́1+,S)"sL!~p%UPN.ܲ_mq8vJwϘ@~/3cSǨ>6r>+7kHۉN],k*Z<xZ^mUf 툀w Wˣ~)7IP REk49k>$t'Z/W/z_Y!Ǩ9~{b<:/tfrԘ Z%cl]&GZn5ꢞƺo*7A 'NS߶ƔYceS~޵IE$$MN5IcwTm_,yѥkDZwt|MW4(v}㔑HqѴjlwTNRщveB8$:sen7waц].K5{[akRJk5ꤗG @@P"1݅2orq.Df}\,9 @ '_?xw<+d>uNzѳP(た #{\ܛ{\aOY'\ yܓ`~7/ְ;}Xmaa/wC# #Ϛ$rʶ $ ^Xti1$J:Yvؠa}ך\ 统Vyޮ1QytWT(%ӡNoe*8;Ozv#򨼪M\{˿qqݞ JA@MR}~A$wt۫ڝ ]||A3-Wx>USiہCߺZy},顛'5HA@D@4InRjA@ ,zM[9,A'w&d=9xl|?n̥=OI  I߶  bY#Y,w~  1bnǍ#EA@A@G@HRc.wA@A@?F@qh }3`$OM_:!(\[  H&)ZKڦ ~i Qun}'%MD So >o!>|3)#\& ҕ#QrBL'EIA@A@4I탻59l_ŏJnmh1FA;6R,X 3)>{0׆ Ia*uܫ+@6A@A]! :pō@IoHGP?S4U?KY;RKs_$s3dc2Tue> t_Z^1&Q{T3'ed~NIW\C㧩<|u?SŁj7nļg&a3\;x BG{;"#T<ח#oʙCVoBW KU-(=xG8/dEuy}: W&'4vo 1PTZ}a'M3XG`TTZIWsWnx1a(8WHi)dGJ啄9LK$.z*-0Ol d{t#M֗^FU}2i$ E@4ISr0Ra]ξ3JwELf̢!)K $SklW73SLߡt?!kI!^B}O᝺R *$h?kE)J\y`_'VD6?{_EgBB weVIJ6PQAvwU)Y)kYĆ]] .]zKHϽ3} nBzr{8vssGG~bm8e|eVbx^%3o{wXGJ[[bǯ7jۜc/[^R="hsX|VHXyշ l)(,B[8e Pq^u9IW:uIJ5[)}Q:K,ec 5g5[z$-)T a>ݹnyZP:3 cxRj[< o3[2sU{'@@@BIP% ePOEw5>^lm˲:<IuZr`ޱ[H@WVU77 A1C.kYratlG:O$y_p%eGH} ,c'ȓ؞v=W˺'V}NƫH8 ϘC*Z[WL˚clƝs1!;?Ɗ"CfIIt'5,lQrd)z+J7oϥ~d 2\?Os/+s KSgYbђrX< { ]y#PAHntѝҖeK(VkM,?۟ 'wu>3|^/j/\qZFmox~҉>CQ)QMg_l3)axݧ^!NA@@ d@$LW5/ĊCj?urG|xUx OHٻ[&ꐽ# (զ FA7N4x3`Q5EFEZ<-Vk=k51h\:x8[CNNu_C- ob'Nb¬>RϢ N   M"ȯֶ{5Iֽtv.ů ᒉ2C~I@2 {:ID)[@lr*hj.sFS+IhbGyلj"bbxo)\o] +_qa.~8),čC^-@a(hZ ,Z JyX%bc_^ZNZ?3wmn]4qIMCy͵~vÎn{ Z(J1/@ * xS@m>)ͫQMy*rU5vYۋ'o77;ehl5![bkP;VV$qU=%SfNe5zb^,FaWEEydh)ϽT4/]aM-  B" AcȒ]k۽uWM[vԺ(7H)JҔEfdkR xKy4b^k0K%@8 愗Cn1C֜KbAoXl_s˼_>}'_wCCc.G5)?|xG,>?H*8- CD cjX$r^K l.b!t (CdNWDD0;/BU5GSƱ|)SU-ru8    sI_cK>d_t7.:j{똅˺1@:YNlԐZ)KO ÀOJ#[X$RP\HqT iIX"Ri#5d{p~K99F]WV}!2]դ+*BmwK~i(X_K}Zm-JQiK\ *bV$ò$)j!"J E?sR$n kcG bI6bM5uDL0rY֥bOv 76T˿ u%$H6vl\;o˹{*?jȻ KRC=(@@A@$5fR{|pb _Ѽ,Udq+:GI>2e%x^%xx8v;Z$źN-فC9dm|.c=u ށސ%sdX_f;+V`$  t@$5 P&CyxE\>zorMx1WvNTu#S>\D5-H -"yM ;GM؎k Qʐ򃵿ʫo  vȾ>KA7N4>o% Z\9';+nOB@@@ P Eք-lK|@r&Ϩˏ߭ݲzs.~^oq=*.6R sy< eQY4Cx;b0{.}=ey0յRʞצWeUP,=ٱqÒlq~~qS{9$A$1C"eĞ$sjof>`Ǵ[ӓ;#"*&t"1M \Qg+nac&y s3wmٸiջLB,F"D0;**#1G"iߚ$HmV axVoMDĵah?]݂{{hʻHXD$e8dGt   Ф@$5 g -|œ\͚{d[4|I8E@E-V$$IH @@iHj5Iٖs`_E$Y$k &i)@$B'o"DH;ɻ(]E&I"IvkH7X0c04ԡHjJbpH$* `kHt"D0ɹ\!i4-IM?Jkv+,Vɞ$) $n?"#ݚ˥,^%4!! #gzW>o&τl"Dɏ^,H @@iHjꭓ1xfqd :'(0?ڂ^~Xn߼fWs.-x3~{_g:&o}_} WCѾ~]ή 2حzfؾ };:O9ݾ_kۨ'qsu'G.M/=}҇ԩW SN[]`;&~ld%ܾ_A@@ HjD&p-f([%I&ʟ 6cg5o,oiXpD53fO#pMQ-_QSn$h|Kϙ??[VQX8]0SgGmښbyq^|y1yٻ,Wo="zs>3^|.|\%`$ۛ"{5hP4&ƤkBP7 >n4?2pEGq߻kwyO|]#E+MD)=o_^i*F隸.+T}=;`!#rÂFRV,ˋ-?slݵbzEY;qHsCP Pv"b'   AO")S <<`'_D+y4qŧ*}E;udr溗mT3T;5v+v)R]K|]Xl@@@DRwPcj,d6OR+dhc)WCa{ #%&m|g}{֭Qs؈dKV7=@cHj@ PKa4]y]`xΘmLmkQx[z1iLe): kn[*T^d m\_*Il#vhU0~ECڴ)Cd P{?_0过~y5~́.}yQ>hC` p))e_@@@@ `]Hu*      hI&A@@@@BDRHu*      hI&A@@@@BDRHu*      hI&A *Y0-Νܹ̈#uw@@@@ CA3kZq{mϝ/X1!#?1js=D&].^ӻn/}g#R֍NyоzHet6xmWzJ{\d^Oyg.v3S@g>>FZX_nwOa   @`j@@imf$J>>z& MiJFە20XNJJ/ /LLØ3o+qAyuφ1wbWO_     P>ԔRTfyE~ǩk6yGM|IWt\w4ERQ :] j]74[)f֎aY/\:'f}@@@DR$ᶱ[s.$}g[B_W;u]+ {ҭB#\,ZƜ^U~u"t*Qihk]T#)_=@cHjL(jIE;,f!wJFJy߶T8KH2!OcHmccA /=o_鞭F1J3st3= .װ`  :@ w4nOu KűaTQ<ױ`!,=z@ w fлyZ~oCz}ܼ5͚SC+lѿJ֟ls{U;)QNRq\7?>^@,;vfq 1@@@@ `gA^8C״{f%caY/:>+ͲbJiYO?#6Ŵ[Y(s>|,X{"%f}V_^gb-T#Ϻ֬*F 2@MNSŴiSBq4' ٟj1SFM.Z~MJ^M!.ԅ@}蓆K[AE2Ttyu05tPԔDRM!~7utytLJ&gV]36QWF֡0,tzhwtgֲgV}y^y/8C@@@@R-Dy PL@l4"~\LJ{EaF5[ޣa>Һgn$Hű_ <+lp>k_e]#gj|&G"u)TgH#uڪ]ymy@m[u/^g>*BIxB(5@@@iHjV&P"2<"*dӄ_Ҩ?NK18cߏq*@GNw _%yeC-ڴg˂#_ R%S#Gy+o7TL   WJ@̟q~);Z R*).س!.v=G i@H ՞CCĊđ#cbӁaRuUG G%"Itwb\   q*W)aV쪅HGMo=b☰&U wA@@BDRu[mHv I U;Dpїq,Q@H NCE@m{>RI )T-vUF$]x3-" |؄JO5% ϶̗ ˙PR$>5]kQ0/N   z BP M,IPrH]cO]ӢNpW;="@H ֞AJ@mI'@+ҫ1Kk   !I6!mtl-?H5%%>y4 #t] w<lle-x BmkFx=Bo"gF @9Iz )/zxy/@(%)z u.zl{cjwUtS.V܊" גqEC^Θ=ȷ^GEԅ,IuL@>5ft:Z;B:v9xn¨^g^+oGDRjA@@@ 8 @$gVAF vEp:| 2r/QTөø䌉~BXx%u)(s/*ܿvݼHש߳'@IDc?][G(YL콯?rIQYC{}A{g4w%ŝu1)o-:Shϣ%5rEvG~^\r    \ @$5מGkD3%Gd4Bj?ttTxd?l\I]gSɗe)d-S籠+FP es'=V>wQ왃``.R"߶|i.7u(oZ:2/~8 %[hp;#Peբ%{a w|"rUc ߾Trdx-Ͽ'SɱɎ"Mk,    М @$5GkL,(V[mj$,*"WQqN3ݵ<dEUCQ)QMg_l3)axݧ9V &;d;r#G&%=,ܲejh9r-O{e҉ʷ@@@;J` eC8*{e2 X\z`B9 93h p6󦉻6T50Z    h  =  p/|/PwrT$Ԉ@Viz[|kDA@@DR Uy(|#\W$/;\Y"+ IA D(# PQ+[(ҷbwtmdLr~Coqm@@@&эhDeC>E gif}~]e:3}EG|$+mq@@@@ @$uru [ qa^^v .ŰZno8Yΰ$Ղ'':$-*8Z5cp;/-_,^muwRT'D8 /wW~8lW,II @@@ (-v.5q%Ք&.5z& "x&I= ͳwǃt̰8u~bm<9H2N,H6ۯ0;)\=܎@@@@ ,K-uI,i?+kκg͟/,#@l<קB{c;v%ȶJEi7Ҏsjql͏kɧ|dD(;v @@@ c-|M9,_`imkTgKCw|3}<*ĜV@ yI2'I>[$kؼjŖs>׽;h ̛"9i K幢,˦8ݦOߴ>'Ǐ0 Hb! $˷dRGS5֦M|e]* "DP;;[oEɛs8m@=z j u:؜!ƛ.+z} {MS>(X~^Α[6ݸjf.[8iśĊ$0! 4>}H{qڔԦΖ$p=͕{ʤigN.BZg$G{8kH8oͿ\&>a%;MY_v3f?ex/<$+\F Mđ%X@p-b*5?kE$%{S3SgW׊!=#HIwנ˥hW"3Z$B'BDײ 7X@p` az?i3G֮^DRj؂񓧎J|Fï)nxNm+t Pl /"dhDR)qZmUt2c="ɶBIK's40Zq}y.ڄu 7~ʴS'Oz\ w¢TA""$#{o $/aÄ\ct뒌q"vMDPa)H=!@ e!5BӵӦNW}T^ETX(ѓlHZk(݇9JMȣ so%DHIߑ%?dxo[Dȱ-ld\d   U =1j:@zw9"z/Cb]KKrEjyY풅b3l~2$] 5$U%h!?D s0@@@@@@NH£     ~ `@@@@@ $?8$<      G"A@@@@@" HCH3     ~ `@@@@@ $?8$<      G"A@@@@@" HChJ. t[9]DA    PTF~ H֒1no},_X6}69~VרIw{чkןx+¾>#1Hu8|о濟2޻ʾ7=M|Kk5x]+5ஔ^ϺylF҉Uw;    ,`I @=@5ü?zAQMi7|kR#XhFP3)zE3f5eު~K,.Ӵ,RbY*&3%vpۈ2wV7`mDŽ6;"Ǩ5Zky}D2cR.^ӻn/}g#R֍NyоzHet6xmWzJ{\ԵJQij]DgxO{Ł|Jrk)]}Ꮋ'` KR05 4It_AG^cFpN{.5+e>3da+:f64?N7^_ܣ1)g?2ނ 4V(4 c>دHQ~8?=A@@@ @$5v|)I7)J,R7ޮ*~ǩk6yGM|IWt\w4ERQ :] j]74-J#$67b]¬#@H @=@66v [}XqtKh몲0yoEaX_hy+=E˘ޜ٫֏NX?*X>y͹% 94&ƤAX$br4m$mMEYteBӟƌ99Ǫf&^z޾^=[bf"4#ˍ=gz7b]a   @")zup9=hJ(x^HAU;^y=[n3{z(gdY=GvIaTTEs2sv=ڈ>tu@0H ^@@V:v'5oNڕlI1j⪲0IݩiNүMsjCX(]Ȳi.-M342OW6)J}>_XncPqxc   B")Xzq&wR->z%yykF!5HVxc٢?J>wSDdot(}f4 YvpQVb@ $̓3,lnQA?tMld,3^\}pYVL)1r4DzyĆUy+cZk1gd={fC/r1lg]ak^q^B@@@H? bBiಙ.OפJQO$90( t)@ `NR "o#r] DR "o#r] DR "o#r] DR "o#r] DR "o#  RZO8 TϠx]IiJTHckoBr Se;Zn8sΤoiJq+Q5h[ @yFo@=Y zhK):H\ߛLЕڡV yȑ#A@@@O"ZyIji+ef1d Wu]F1HȈp wSTx{(&]y#b!ED>äBzTX䵎)+;2RVN:u8`esMM u>7MKI6AA@@@ @$;RdT̝;-kmynѭ}kKR59'6ɢ2m[!s<"dͤ_hx0N/A@@hc*}o #}:wkf qfԵ}e Q_XDw w<ɡ4z#ԱlqB PC&K)3.Tz0~%W]x޷K WJ7U&6MW'u(Gݏ{Tk򙉆{@2TSw*Y|pRϪm9H   H xԤ 4+jtgrݰc桌^N>o5R9SSYI^ɧ,gfF;s M9i:pҧa7wN;wz%;]?4U [_me8q|cOػ5<ntIJttj׊d6 _]+i랃i'ϙڱwHѾ EVyKr-cVqZ"#D1t1;?m.Qt NIͲix_u}չ}yZبf.MI3(i^:@@@@ p 9WBo~rꎘ/9w%q9|4߫n)j3iS'b,kA4L5}b ѷkpL8Ѐ^5agKp@@@@  M"2ypM;iah=*'ࡘͻ :KX(}wJ$\z$T0UD 2?Ю]+O p@O^U{tK._`'@`NR"RŷOZ{X 9=HJ2njGY9{UߴԔ*!B ̝;ױ~=i>*ۑLM[P @@B3ꊪ0C_q;H!ܑXuAw^;kvԑ \xe.tH9DkIVo ~/4P!oeOJ;DG's뛝gS   j`I  'ߚ^myt!T9<7ksݶGTi))`[0e|9^fsc`\Tp=p<9Xꬆ?FBr ('Pe5}>.ñGD ?|     BBwODGǶu9`/[s|MCt棓KRgj*YIqfNm()!ZZZ\:x8:pv?Jv7r ru?)APUizKlj&HVf^G4kz=ӱ'.ܱo>}JF  D $)YMbOBUޝےSMCK9%DR%2}ϧ^xKqwz7bQ@EGldDp{]95[.R9a^oeݡgWVe? WLWG);})>S6MIi9i̭,~&V$:H#@$3}3ztc*; P27'`n5HQenOgqZGQN.l;^v[8WmNg>2eY=;sdUل8`TetpNO?MU~ թ $nv&>;ϗy_p7v 4ⲳx2w&Bݯ~MSLdwG3/~$T ZixLwkXN>uhO)7 %(ɷa~ctgReEI ~`YNx?M~/!E2e۞C]@ӤgQ߮I_@1Qt#E?<_ngq1z-p;٭2{'AglnY9,@7ӄ/L֧K5y޾^؂%{16RԹƖuIJ0G ;?R5B?:"A@B'Ut$ dپÙ,TN U'h6Gg<'HBm5J-߸ ^^3~-f¿2v^?JJ¤e-;}"Lf} uhӊB׽=E.K,n/~;h]&ޚxW??ݒ[X@|5j$uKN8 x>FoKRlQ۹(::6x"Cc-Ͼ5/;>XLY0I60u0^ܯrU,>#|BPD9n9rdÛ'1>ch^Cgj!4 YX0Uܹs}Qz@%)($k؆ۯm߹p9um6ٲɢ-ӷ07>s$+>Z-+D֍>^|ڬܸݙ- fI.կv;I޿`^Գ8@杈 -Yfݶt *x.ՍluzY[NCҮeӟXhk F-X c;ŋw~XxN^k1m{Y{פ' ŭLyHłjn[-'1 }4ħ,jXrY8:ݾ0l@9'%5U;uWh"6`8opw&BYp7>  ͆DRmhjȢq.EOab4 xW[r9O>:cCL\LD][{em'Ogˊ_CZ:g#9<Į)IDɛCR]` aC%^yköH8/NXCe`Vv<Ŝ" V*\yl",WP虏NO4.EZ^۩O؆{UL{gcp &u7Z|5H_ɖ 2a=K $({z7kW'ʆ0:ξ?~([^nwX}{g$#[K3{Sd}+qE{BӫSXvP|,!E?qRlIb"í:?2Cn.ַS:F%)jv?vbpkޤ?~r\_Ͳ ؚv燝W՚zmdU_;iӦ|یԞDR!e3ub*4=:VE6Y};ǎ%lq9Uhb'_β6JyZ=Mxl6=<[hϼK|J""M}[D F3jhKo[=n2 , hez%0)GFÎ.o׶\ow)L_zo3ʦ sYCy_7{vq!kkNa,b1B s0&?e=b'%^Mi;-mL"PfyhU )-4K5q O YUTSU[&Fg񐞒P%kx V"TLF !T uGHZӢ_}Yl"ԇ[a@@ISj>~{GW8㮿Qޚ? QʐI&p!PXa,'7B5$eDʊ$ ْ<Ϯ[N~v5k^X.Zɲfϓ٣,|i]GQ%k{hgkƗg^y .m8q5cѢڮOP @@"p tx!C^xbm3{WiŮ{Wg* ū)QkްadžHTzo!|ϜpIH%9=#l?;;μKXD}͟6 S܍eX ycJ'p|y[_܅Q@.7nؤW 3߶eq&֯ J^M;*2oWXji6#5 V+vP Z{RuW\4 `]lBZjcdU $ƫ=gun6Hc$@$@z,[֐mZeckQMvO\S$|_;9~Xt Y=:l\Lبw QЅVں߱G BPւSIwPOMb#RU*g8^s[-\NO!)' SW_Viyx3Yv}?/Zy"zv 8Z͇*K@͚˻a.l/.)6}MosBqQL=W^a%kJ>*B۴ҳcW9dZޥlu&Lhѻk-#LHHN:U$f>dݽ}/yLT<fo~5O>g^_W^]Eu+b>k]N=HwtɾY=G5Myp`Nhp@6%&ڣ[dbViŚMESUfD5`Ż ݷ{`~ @ @!n6'&nJOIz hL r%ZUuًO+ژ&2=iBӝPB(s5Fx)ؽx =F %"WwykϩksO1=^cn8`Jwb1 L :-%^HIP)? ԥjn DGyByj^>ax@{ S w]j$O$@$ 8 BEq#m?uXW~-ڕwG~ d)7EU m lάyۯ=KxalŚME{X;Ȟ@8= kSz_1a˒&Yf(֤ŧ%|ϼkd'FsQBgi}Ân[!*{,ҤW>Ew\-C8P^*_)^CISW!bؼ=S=rԡ"" ʍC*5+#}])8o:' ̓>H%]ؚ3m*p4WZڥ+׊EyS@.M' GBRMEӎ!vy4luݵ3o-ú⦋N)B G>[U,ё[5 A%:pOч ax7>E ]`O('?c1CvYb=fѺy0_aU?bcB[k6bՖ۴mWЦ.3+GQ\Y f= yGhطf3 Ba uP۾+Ng {tm+D^w ĭkUlD-LB?'Y9\(il)7 TcGEW;Io~R߼f͢n\0U  LBR]uhކ&S(wjVMWij]Y{hK{1Q Fփ{7LP B5bw-X}5K(9pUavN4~}grhyB*B3AzJ;!&3a+#'੎q=9+HΆm0FΨ@9kOTlPŚ1=r{G HH2($Udߝu,SddNDU;FӳkJ&6AM]BkpW!iP+ܥ;hXUt=3Kt@U۳see)\'zf Dj9+'ߔr|P q l 8lL )gDz0ɺ3THH I~0Õ?Oqŧwe*{LFx HQ{B/p׋rp6"ៗ9Kh9+[I3iW8nE*tM7{M=nK^OBe4KB'{ X{Y*7eյ-Ͻl:|FJH#zgErWNMIL5F$@$h|?1H ON;K'b/9ef> g2sD1fliXXVrD^D+l :+'MQ(Ҿe^#4%"7pvL_>/닲O W mc6Y`]VkzvkXaN 3/\Ҿ5Gξ)mX#swW򅚉1hAty;ߏ% o4v u?\<g ,Sx&( 5җ& *;l\@JU!pĉYڶ7^Tot˹'H̘@>7  w\G̀9m[uN :3'ͫؓ榋OLK+'/SRV[ikOy?=ziGaI92 /Vػranfr7mKM$@$@uK4|X2۾QMbu`R(j~-EFR|8q`iIHH" IuWi$y<8C8b6tr/?㠎mI_1(/[-~^ju~0oip 38#   '@!FʳvSd1k}Tnjqب4gfk6 vs49P|Wzt+iNC)+A$@$@$pPHj8e.kJǿ_0Ԫm˦)G6N>hU_C(N9woU2q+/ iDnDiL4o-G c +s-;;źMݬ^lapzƻ<gZ֐~IHHH@t i3 X ?2Ly'}Ašuuj=`zjRy[&t"L~ѼiLe/`TA\aRAP./!sa{: JMɲ$ё @'@!F6qVav℔CZhWC:>;Zp6Y9b٪ ?׋_m3,slWM13IDATݫ7mnĔ3}^42-=DQ2y/q55_e   KBRMF{IR^ >GG6{ԦԮԶhߪ96!zWV{6hZi)ْ,X~CxV ̸ {<0ŵ.T g;1yc`IHHHqeA99\Cß<FzB01ulSVȚ1"6*B`MSH\kU8E绳s֝Y[we;v+vkj xqϔa~qN"+_|RJGvye&KNz竹 ۴V|NbHHH, I m<3M = khZJUz!i+g)D?MioٲTj\R(qiӌ8ľƹ'JgԘ|1{_ߖ yLc+]%Wc0"   '($ՓM43m-VvKBJ9*xM%iAAɶǬQ;?!ݤLr{ed9Cuu(6oK0kjኵ dfaȶM_|5&K$@$@$@JBRg⍁@\ruo1ww^Ntl"5oXqXf+3{{a     :NBRCc3w_Д0V.*thLmhײhMb"Et$Ua-fyC y"z?<&vڪ]b{fen73'H׻3':V:    B&@-J0 Vj^gBcfliRY˔py=[7Ò @C'@!,_"Pl+.m`M Qt VǛ )?] NRy!wJz2˲W0$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@$@y0]ѾgR$Bf )V a4`@]aDC|RʭQƇ'&nE[W!n1cvx ,qxv]|8zh-7K! Cn`Eާ8'r8ejjyv+wP[Q>>ӧ7;NWJ&A-ZvjNT Ksʏ?eUo>K45eS $J\.n+Խ~u6/\1߷̹slڼ?KJa; 8E?qV6L\}ϴԤ  ~($;')2dQH*G_N|}erױtM :}|.1imt)e xoVR QrqXcHT_$k~M%N(+*4lSH*㨓RwQaVj_ǯ|HHug-_*O*T"obtLXp_nhQƠvV kFZO4˷w-3`l"SxbTO4 tlu;l0[>3p$F{rqfB:6c_.i_߲ \hH=iJX|\_KQ9Sz ׁ͗54wҁIT@ZyqAeF뾖6)-=Ⰺ#uu<^]_WCRƗoB{ݬGosO %]U~U5A}|/?g>C;lŀGy)}f Qu mtނ6 eZBz&j}s]wBԧ6J H>o#Q~.Pzv=˪;DLҜK:PU5EO،a-^yʺ]7SG$M8ܯ H{Nj\oYʦs]iJHIT@ja)>P {*1PHYHs>gr]zJK{|KJM`ăMT_zjҫU S-\<.y)f>ij;l^?hs LV |ŋg|s뀔?d_ &?jhY 7mT+i0kbֽA}-+oZ`$IhBҼ;/+4/ke ?Z =-KGv%FaB`X8FaeV9*F4{c֨P;5wf4Mǣ^CΞ)L m _Ovy^x0uC;4m19#S GT30=C!<Ӯ _14Bc:Y y%czxj;a0nPJHGg@?t^¨faW~VN%rӧ3c6 qw0^RJt_›0jvfZ[IsQ>-gl)p\xx0>}ȭOJSGw0aGzOƏ1zYκ;tRFeyr=! f >;#hbs<TDU1"նL#J1HkӆA竣pi%{^@ qȫQ<*aOġ(>iM#wz\NiƈXoq1-c[u`_.X;w߬D}_e?ã_u~Ґ qP9N/"M[eOl fi,$Ӣ_Niyc9WW@ a}m³۟su WDYw)k ԙy}chފwgQu0{=}`{u$һ|zK^y.w{~1[CYHS8NnX2б.n^sBFɽߞd9 3J%J|'JMm qFiJ w|+qzڰR {IPIE^cEmڲ  |HTx!/#2mW;ڨĺFݟz aw& i?%t9hoEG$P~&țS=CQsN/Q׳ArJ ]`qE|: +A)Z؍NJi?BS3&zVHJYğÒ]#R[|8ft^aɩ[\ҳ>KCfp]$ ޑw|K?Yu/q)_6IhMҳ=8XH/^wT!0院VtSm5ɺ{+t_@oߐ`rԤUz&v<^"NB܅5,'1&DQa48='bBmAg?fW>p^CAO\P] ?O8ķfp:gt؅:=鿬I-ǚ^y1zkiR3Ob53I]{hoT#[f3M4<؎_Je>&ˍ#^:dɍÂIѻ1Agz0)CF>ݷ, $7e^w˯@df`i*zNjr޽`zۨD|tXmThYЦ Eh\65%gek^'IJ'PG\^wǹF:$qJ 51x8t?M='L,CںLuI=zI__BK ΰǍ .50ER*u5N\z~nܓq_18Vr*E1 5i!@_L w>J7V"pnrsУސn~g(TP\(U`dQN_cS$,K`H3$?i^ϱ03 5?/' Gt=*VyxEu>2Lq;,]-tLHxG@xOKӆ]ZbFd{Vw_<=W]1fFu u9r]~u= P~ Lױ;w]GkgD gndA{ .fE&oTBZa)̘YtXm5MhRQ:,gQGoB;p7]{)ƹ^mT2߽IFX6qoFa'ŷQA@~on>M bdgց |\RgϿC>XRj:^O{ 7774֣m)UH 9_ W *C{݉]ܾ KܓB7 aY 8 jsCu^{V,P"B|k<{ҫ!JTڨmDGK7~MЩQu@tld,Ť*\ob#a; >c Vdž{s³m[VB qf+LC ?ۻNPvw2]:͈'tn\wG L'G%/XS kD7 :X/ihcןYX:g UF=YۤLA)ԷuoQ*..9R'{ ݈oOk_T[׮6I7L;ꕋssiSz:i*nmTo%}=mZyakSmOPGa p徻7Hq(ցO(5טssۮ a:%z>q,4~)_5 t4Nh;Kmt8 *53KD{3,;XόHi\|`6B=`'.>94@ΐDBm% W7STӴ:֮Ƹu8+ +ai0:xԣU;ԯPO{O=R|uw\ń'Mej&\֞ћPm}@hZ3d gXe_wG MJE lձsuz ƋF1Ņ plfgӡʵS@ $[oA1.5_%(NI.详ȍRnqZQR{ Fg1+*!,@|Y_7=_E ,KiƬʷw =PM̧R֔=5;p+K̒\N+ygOҗsϥt$}CJSSߊUGuGw/AViu1ޙ^I7h- -Y\eûACJz uT}kq̶Zn5a]:uh4ӆUҩx6ԛUxJui*~ެTXG(]MCwd5Ѷ*Ȱ}ۆzs N;^l@mO$PhTOX 2imWfk4,a(=r'ƯnyGf 6;.My]FXd_`ށA?5y=I~{6!dcE*(ȏe:*t)Mk 8Eg?}oc+y=s/ ߀'W_byA$P.|꧛sq;Xi w=nY=*?m]J6SzķaSwSj}<F \r>kV%TN~Y~^ݼ@+6n^ Z 1D!NgY>NKs™z>jGx/3e܏eѱY{-IG 45 `AK–,~dQA|CE7 +R WzFe5]֊DQ5M@&PK:܃R{^i}m7NOBrI=Ú$a_":h4LE)=ppIHvP?KN XU0*4ZFN%`󮻬SIHB Iڲ*/J?@0c-#NG«Jb {G{3  "Po֭#-j DbVy $}W { a!ŃgoaͺkH)z+$y0q875gOD{SN$ ۷*l`$pB i5pq @]'Po$ 'V h޿doD _ !6 ,$ !"pA%ؗ7$ { c7M_BR C&c4: yj\}qٴsFO Ր,˚gǔ0iiyHI pf>_H6v6g^tt8*q% K:*j07pĽ5j*e2| .l{T&I) iMOI1S5y/Ǹ~(wp fa%6j'-G|+/Iă0*]}w 3EQw3R>+yDݙg2Rۇ'M8VKM8C KJJ~G w)rXvmw[F|i %PR+ 9iZJk8y$ p3I1mz XG#PP-XR"缪 H:M>נ$[JpU+_5x5k5RZnLjNuYtZ0CPε;p&zĩN st"S]^P?ӧO7]W=+EE94DKS }iMh4{:4n3H:|G3.)b*q-0*6fm++(˟NJ>+y]^݈ON94_/n'<ޖ')fR*wB\zd& ph0a>8;agU`.hU MtԱpӣѮR"J$Z#G}Df.X6(tGK0"}Y\sCqBwލhHņ>5<ѝONIj,* <9|koa!-!AqqLIMZ@ wfXW~FT tNȈו* =#R=XQd%C_bኁ˫}-(:GYdݹ=.yPcnrCB0tnW ;tZULDGܼ뜪h7:܈D_W[CE65;S!0}Y6T90P Cs:ٺ3Y1Sea뀆/aQǗ`dS M9L=9xG 1s܇g7?Y_i>x#baI>4l@R܂m_D˷ d\:uעN$-|,5|C+4Fx7U~X^@'<$xw3!ۼw$D>2 nZa! 8 i4"CpfwԣF~AGO$$lCiE}pѣw[⤘ nOZcG_Bd7РE;j:ʱ(sރsYE$bQ-KEbz1o_k0|.\. 诧F ; fK,-6T0>O`ѫc<C.UgHC?q ፎ'~~J8x]s+zMCȯ/B(ynl(aiK5UrT%u#asPjVf[nguz)*1yڵ%v}w|sqX_B*던yGڲ•Eufh'{GSSRL$@ `j0RP4-<1~cBGxgMNXHf K)C}|Yh–5 xG} ˊg5m;ƶ7s%nÅŢāz3I@jnx;wF]B/{(ە[キhZL^nّe_Tve*Lƺ=I𘯻pF^Z^Gq`d)/3XLH 5FZˋgI[=Ϡ%}!z{< `z,uzw0|җ*r};઀?FqvʓDZ_2Dz<-Ɯӷ5$/G󊠊uJ_Y8jP\wҁ"ݿ!Tf(;,y\aqO/D<$P+04ҾPU-'!u| Gl<4(2F;iEtˠc|$Su֘ho6B܁ImU]~{{ڴ}mk,Vřa%,j=U-!7°]}1Pɛ_xHKBR5BFX{d'aMRk)lVV`TAsz/c%_֠' FnYDzhj P*=TEJfC\nаB0y ˑ5vd7H0t$%tm+ |+*%S΅}UtҼYt]`Ab Nȶa *7 B-abЇNv -c|pOM+^X iDm8yfkc$@$@u>@ݮHEN/zE2.5#qGR!"'!uiGmHAq'B_Wީ, z`\r5IG gsKʻ$@U!ۢ8+=*aHIkI#bJJgdx`t_7wux7]leWbHlY/Td6xe={W|@DxM3fWOxH$ UW5i#H}&^MS>J\4<9TGC ?[S&gPST Produced by OmniGraffle 6.6.1 2016-09-27 19:22:48 +0000Canvas 1Layer 1Compute NodeOpen vSwitch - Provider NetworksComponents and ConnectivityProvider network 1VLAN 101Instance 1DHCP Namespace 1qdhcpMetadataProcessvethInstance 2DHCP Namespace 2qdhcpMetadataProcessvethVLAN 101VLAN 102Provider network 2VLAN 102Provider networkAggregatePhysical Network InfrastructureInternetLinux BridgeqbriptablesLinux BridgeqbriptablesOVS Provider Bridgebr-provider OVS Integration Bridgebr-intPortqvbPortqvbPortqvoPatchint-br-providerPatchphy-br-providerPortqvoInterface 2PortInterface 2tapeth0taptapeth0tapPorttapPorttapPortPortvethvethInternal VLANsVLAN 101VLAN 102 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowew1.graffle0000644000175000017500000001173500000000000030616 0ustar00coreycorey00000000000000]msȲ BOIyᰜ ]Kb+ddߖd[֋;؉Ž4oxfy×7Ynm]^^^Y^$z mBv@5EnI?~|ޕg[UD'ɳa;x)(yK/Y?MC < N¯$ճATYh>S23ְ iE?U& %2Bu3J6xA,ExoԌ&-%4HK4B #ņ s.m(?~T%+l'r0(Te~ldU)S܄"/ݱwY6l@gziQto3 /w4(ɰny884jOz^O|/|a?IlоRFؤt꧜=OuY!qRKSxޥxL/~/:^ب$|ι3]/ z̃z|;~'#Y'aH˄:R2K9RPkpUFB e % &n8k6u 68JD'|E㱛jk @?mOoBeH&d0VZr=_с78brT dzFO:ZJ[^";45T&C1o; pڻJ;#%'܎>jcZ(+Iޒ:,Hãrt9󒾟;ۃ :SuNߔϏę@RRJ$ٽ3ttttvtffr sTq gf$l& ؉kcd3BfNbUL !!ͩ%2fÑV{T%jiffwf|l_`Z,V{HgI333;33?MAL~O7+g/u'7aĕ4Tp:&**(iaI慣 W43PjQ PX z;MVU]qsc9ҽ&1OU%s2q'i{^גOAFY*{&ݖn8'/ł17F+- "0МfWYl34 0ŅP^@cC]44N6XT [x QE_X\ƥj5dE! u^͝Cp ZhyBh" jΘa PIX]b$ghFh91(Mv ^< xFN)wNR@*b@5¢ qh~UJWQ{*kHeqgNI,'dl#5;t A! 7#MPA5՘yH?Fۇp{/=֐֐֑"i ̴CX y𚸗N = Tofe mNbSK'6J\0WFs4RK#%hFJd=d=dd=ƞ;[vSg72ׇLvZOB> 7Y0cObHnCAc!:ؘ\I`m8ȨLZȭ[FLR7: 7;sFٺ҂JtiqQ rPpz -/i_ˈܼEO;n2' WƱJ6Ë&J٪8v?8o20JhI=}*'0H 2ӂU,*qhLM&YT6ɢ!.E ((( hN{I-*rhoж;&R*k0):@3ѶNmZG#cF 1WKiA3F?IƭƢޭhHlwElf~b[nRzo}'/ͷ44448 eQ p̶̆̆":oߒgTZXVs(8Үភ~-+qѾ ϩIoTgGf;8R2;B_!!!"BrOiݖgGț+`ʺhqg*AXՔ(cSf) TYǎ@tE{.خ98 ~]$⬯r$[q]W$N$N$U 7Q?;fzl$o/ nx}(Óip dKtZy!iꗔ@w{(i֐֐V֖?5aĆĶĶ(9x. rrrs\Y+NKJ|4XԪzW`A&!WK vC a7tIddEYm P: pdsc] FY`xmOc93%LwϔDDD\G 5 2 1 2@IDATx]|T֟w7ދXP˳bWbAHY~OQ, ҋ!u&7l6n6L~{ϙ93g`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`FAd 扳KuPvN (ӃY'^ըT(v;s,׺ͦm %;*X$i C4!ܐtOgT)% ]nt^#_h>C RĿyly)4Pc51i<tB"~WZ9ݻn&E=5#V95V׾źToU7jTkH6n+mCyvfHL7:rћb-BݙkHд{gf {si1EJ'> 1y).z9  H l@n5 0 0o+srOOpxK[*qde})VZ]CJF |m΍b4RP]`Gwhd#j,VjiM !E)ߏ),LC2:VjÕRln)|,G- Aʟk@PR\Y`BO7o)v?.ď([R6W-HqejY_B7Bi[b55$A3&ITH8`D`"d+#0ܓ\yxKȺ?m9c1b=Wcو(#ӇbRf76-ml"4v +.oO`<;v$*⥔~XyXdJj 2-2 u {}IuSk^m1es ncFt+1\Vwtv<-årF%5ȉѺo(I(-~aڅ]p+e'<pɞ֬vo(Tjڀת _d0szh0I!Ipe= 9O%k\$:{HhZđiE9=p/tϥ-,TW}/A=m?<0恃2F4!.AٛFo_%uDq~ǣNj;Y W0]:n9|s;0'59ׅV$;uL[)"';1&FcϠ5d<_6wy|xaiHshגa^=Q[Є|Ujk2f|©N[R7h)ﳁw5P|Bř[2[J\ NG-Q#\vsˏ^|Dymnpk':-ڰÄΰdS1T7\t\gS]Oa/] m l؉IG=}-M;/|L7t-ZDt(*hO7C"k5\Sɾ5GGpƵp&b4v) 1\EȲ:u5 -ehCD#Clc\#Wd 9N ;LeO vt@͐QZ.6oO+Q,SU Qi,%.t5)rr3НHhX7ncbIN_D${ATxt ?XIUrpy\NbB:E毈ztfи/_e Jhtx8j1LUXA iHyꩦ|L2p83PwA{ d󭥖X18P >uGq 0bCy!;M v&xF k ~l̘Y_ƠA?@?m 'h2c|g(QA SOe>~C4!sŧG0P]L)\QbbMK^u?NA1p 7Yu"TDlO¥ ;\m1€ʬ w{ԑJ=C~n+Ke]h~G4,V9(Xq؀MSt'ǝGQDG2}jBJv[ HT7Us8YݮҵH4kMd68nhRqWΰal{iN49"63UD,ryFDbI4| O9g5 Ɂ4k8nV-,ū \h8!SN[f׊S`c&I)zb g~q3PGH7LӠ~M;qNֻ2#,9 5{XS⌊yVq58OL!7m>apQQΛ'Ԁ1y<~Cw 4L7hF}h`-ie7 "ifYJRݣ+7_t'G')) AWg_p|_H/M,-X+E8uZ-pֻif;ivфUpB*MFB3juF_ֽznz^twG>fNⳉ 49 .ZpkpF~18?:@n +А%!ş &M 0 lACUVb;3&閘 /`ˋ]{ d[k'5صω&W#|&{#(5p?>0z C,%T'%|E܏n dVR'b:lgG>R[*$zݖ.IL/;^ 7rFM N[o6f^Կh?C2= 7?%2-=?nCw ƺڭ>E@4OҤvg."BX:.ҸQ3"0ߵ(tb, ؾ6ADG-\vwq6ʐתt mp1}^7[I)Xq*ՉC'3ݐXޔwx*WX WTł?|`Aw"٣:+2֥{"_O_]i9G*P g]+ Z.ҐQ KLM7FtYqhV}oGcNQwwɶ?i%,H Dg5=.Q u%^^kyxa옢(9崴mFƫ:0|MAC- }kю,] iʋx$vM{.&F*ax!%vxCqmg?}f҃}`9Ms8Jw6D ߎ p8,3?9 B~8 z9kʂ͔݋%艶^:)mP< cCE+x&x\q?e/B^35Md)xעiSv.C-[ܺX  ֚V&u>CZJ/i)(y!D=~c b : 3>Bs:ߦ1-8BJ0]*FbY\hLUw{/h\[iܝ؟a_ .@L@ɑP@[i6[MlGHΦϼ ܇?onEF\iRf4FͱU1\MuC,k)hmS\FShPdbnJ$Ŧag+a>y c~KGgPvo1XmutUB9(V|%4*c0~uơT֬Bl]ζ0LkV|ĹCfTz[X)^q?bA?r{zC0tVSeքaW*f$=А{㲀 - K4Ay/{̌V})4))OþA^C#oJGW 1O*, 5NfЉ BKU Śܫ >PK!.VZ+)D1[t;>з!5RaD,>&=DШWFv0n#K`vd[~@{z%Ucn?k{"m!գ6"fKYKr_;xS'Ch\ZJZ6A cH$Z*]\@pwzs_GpK$nEd+V{C|؟Zx{T_iȦƧo)JJڻ(Xb>a>L*)OZl'Wnt:uC#R J>,БBOeOk&URtKZ#h2폎3N-!WDJ%{PII55@VZaR8jgD W 6iVs,U:j%FɵnEvܮux94+zp9Wճx+It\$|Gz K YRm3ə0pل2]Ih).8YHhl%S.=[>6JmQ23G>Cݿ`u'foLН:?v⩩;3G+Q*RU0kR}Pk;::/Ħ_0enywi1'(Mp@y <_G6e5M0rXy<C8+P@G y;Tl=&"}*N`Z;^k \`ԵΕ,_ZyK-&?߄% W^`.nO/$Wps.!PО Ȃ4qLT/ap<+5hFFPZ'2e*C'F]n+[A&8q 40 VFCve*Ֆ68C5 Nq=55tJ((xUor2f@>Q}`"'[|FPi)>pkv{Ch{cWNtpmU6v:N=QWOR.C^[m.{ WҬ|PƨI $X>@h̻#\=ޝ8lƪAmk m\8f;X%vP^>6c>1u5.K2&^R-<bPF}Q8ߍޓ$.SWӻ&Do[)rpkkg̚^aY[EH?e4zt@?Tϴ;>ߖ此}54K}EP¼pzj4pZ-3q9O`v5xmx;cM[L77Ta3Ev̬-@'7tFށ_}b?N3 "̱դՅp8)j3Ga)<8K3k`iBӷ#pOSA?v{ #`(+4A1ACNPhIq,益P+P]0:#)I9 h&|߷ЖZhtVEҢݑpN;4t^>@ކzyuFY3b.KD 7u~c=؟fZz mD'->(mܷ𼂤4`8n_#+`ĺ#@ },30$@|4T?# \t :Q%EMNy8Y=*"lWn_i&@LAf7_ؿ]D핎6 {\Lh62}lx)(xU DaMا_dM6(址!k.]6Ơ#h#3?vni,p߰lʰe Wi. iIj_qP/;욢?::3(_O,Nú蔞$ ?oWёMUU'lZE&[uK LָD#2ʎ7;K78bT]1hZb8҇' cD7S{6Via$ܖw&"X LoO&J]0G]#u.qsu,'0#x9*0#pp Lg.%#0@ L7*0#pp [긞Zӡ1F`F`F`F`F`F`F`F`F`F`F`F`F`F`"9,ژpUt)7s2_Ud>U'X'49a&nG J:\e C.; >s!eCC|B_twu/ i3G#K>#xpeT?].02µWԈt5`Hu]8ӥޠN5 TB פ~i']Iq?Jy ʶE2qY[/5) \%[6W}+ $Z.p2bC0g{2^m9)X1L4oRzlPaXVGܞކWth9݃p l/L5<5㟅TרKL us1[ھWq73 }E[!ftO\+C|ojdSO56lOI7m.{:T>F IxL3>J?oan0} <4Y%gE Ѿϐ^9|yS4k)%;۩n>e+,t! M~DY]t0({_B-Rpf/S\_}~jP޼2!&AXwƴaꑟ)BU^FN3F3C ~K ګ|p8Դno}JhP 1,Z)ތkݨsIS߶OֱX`?OZ٪iXz$.$9P[w0}DKnI5#{tG>[?x[nPbǗ![$9E#>ryR]Oa }8Ϻ/ v\dmw8??Oap=0:L˘ MOOr;yMSi 'g }~K%{u ,:j2~1 c.e'~.Onf݆ A{Hm-YK+y·{/2,o2rg2iCv\ a ^,~Iy)= Lq?)=l-9BT|7i/ӳJn9gPL<pbjE!in]v\Ц4R~HNBvfM#~}ڜvK 24Ǔ~b9 /rIȺG󂥝vH;fI)'NG |FUoh${ s_Q<$0i)#RXԩ#d#KX(mxl9ǡ]b|kV:%Ĝjp{n7.@hNjƨKJ 韱5"tnn+q\KwmnI"~2@b(XnDݗHv#2Cg?aK>]Pmdl +&5#V7߈V>0|-ug(wC[R2晩#FC$Ylhwj.OZK\) 4 : v9Nߛk` hv1ҧ[hOO%|,Ϟ4iRr4^^gR8Bi/xl4!c_0l`Pt'@HG;x Ɔ<4w vw}3VVn?)zK=y{4P)V9x!+F2 LIMBf`aYY2j NW:0Z =zMLi@FV{aq_ (sc'̽u1=ѫ!-w%qM%Ěs݅(!ߟqlwј>kbc{#Qt]PGv_n04vs0eࡪEg4w>1M6adF*yY IPL2o+0kJ4QmO0:^+P%uĘ4\$)%E0:؀ħȾw6uJs@ߑ0>! ;?.bgb'fNwū{!cl(hr/䄷b]rR 0t3de=s n8Jwjf[x qQ"̾3W(2M\~Qo0 I>9P^/ܺHr2oBB2muR PR<&.Y2.-fdFMoh_umGڭz]`6F”&1pbz{T&4?x* K DhLg΂ `',+Iu2}@jFIRv5:"kO큑*c =}ϕ&`T=:FV`/bJ ԐC:$/javÇ ?bALB۝0K, *445K~)mU'L atBU/Y-` N~1~wزAI& }f#$?IQfH0S A RkvK\(yl̘ DOH71p|K31V\Y&z,ھ,0C5]aJԨ_gҳȯe z W52*ksPi|gQ;YaR>YHz`81C[CbCͷ3P4 ixPG8s"֪BCG34ݡoW0z qKM[Lgs{&n[=YĜ24ۙ{#f}ctT-4#DLhθeƉ5$kD è{ :>'{.#T2וO?aT:E>QX@qAt>JAw 8[)OWe#%`J/ע}|̬0#u??kt@R<Y."_agAGO0hoPem_Z%)ƳC9ɡ#4QhO#h;i0Dgێ[mKmDPKdaJmt  a3(V5# &3Dw&HF;F+/tY D:j\uF>l)`ƻ2QT Kąf2UN`w# B9>XbvYAXq( фVex 4:$rRO8 ƉV\u(`7L$ȽRal;Y?C_Fòƾ").AoOĬileaPp$Rwi.-}K^S@@yi]LӖ`t'.CwXZё_MpU%O$)u퉠%X2ql8 >g)ߤ{`|!]-N'Ng0( =B t@*,5 +n nE t\mG&(_0ۀhlxO 8ڹG8`FoY'Z`i*0d' m%A F;4ŕӜS3{b&~p`0#5M=2\\c ji%a0 bğGX[Dé-QLC(8 1Kbv TiNg\)F:u][MK)x9׵vo a[YO#xJ]AB`(gזV4R 5l1*DCw|]D\@ sxa mpeMoUݓ?;o()mdUK#hph>|ƺR1Og>/>I"4 h2t?=|5+ R%[5Tk]}:%R5885J ͸ն+}*Vt$ڋ+K`ఓ:8: EYA=E+:3ّ*SƉ9gT"%S/5uX@VgJE:ܿ^u?9Y4me1'FZf|7V9'2x6Ĺe+>Io@sS=(_^@SЎ+ 3yŢBoea-U5i^]%!ѣ9>sWAQ3b&*m.XI`DU64iB!{..H3Kކ SQ5Q|li&4?MMa.8?,XA?b6,Rq7BWfС|Ah*ə8v>Ј ocv |".Δ^>"*S$X{ kz6o 毶v(0ŝ_ӱf='_v5@#i}W Ͻ)?>X~9H~hTnEI͒U VNcjfgcBr20#2H˟v2V0;@[ӱ|cv`2ayOJPKZ3Hkq;`u {GC+iS'"Dտݝ[]~'9mj7u.8B-vYm?MXRc[/q2vc| ^̺As'݊{Ѩ"sSy* W.Zm.|Gk/3PD_A F Z<⯅8ῦ*\fDRjR{["V㐌echh!5'==uFxK&f ifI7*PEKq]Pi }(ǭ`;P"OY٘= ^o(Ȼ(*M]B`>4Aک鋘pEӰdQ\߾0th5fo?.${m `ub#6|N%00(&5nI-=+C=>E&cIZ!Kkʁһ=$?Ohxej&4]^Q&Kc'} )ϸUnJ+<6͇󍾚M @b$jD-Gl-*n{tM4^md:L:啰#ۯ{ ,A/Bee dUtEۻrsݽ릪1ywu2m޽{[醖Pݱ#)͛5T`YCf4*`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F`F @jUA{L߅i\90#0@@@ O*]kw3ύvJv^!BCPr F?ڠs#00F` *%4!z;:;vUtL7ŕ@N}5~]dFm3/UJ݂EEx2t*yPY)ܟ9Ҁr|#0t9/JM<葌,*f)G̡+b97F`G`/Sלr oK3(z=s[rrPtI8ۓ~DaF`#+S]YY\ qIDZ-)M4I,dpK0@xx_^R<%-eKi k9ԼZ#0#x%nLa-+MEZ8 F`2wF p`F8}xSjYt):'0#p"@1wgn&M6?裹gt,MlO|n8@x0 /@HR;rO"]e{ә,v+8R62&'u #ӿ yyfMWdt}^oNą0jG@`{RFPjR\ md"@CWHIB %#f0܏qߺSIz%(iV ĩ̻7Y9(Syh]WqIt] a^QWir:/<=x,9f+| vlG=Qlq1( LEP3(\oOA~0G?FbERE(sCKy}tF j0Ӎq,"` #1J: cԹ43z`&2 baJjLS OHJk&!嫁WQ5M0pzw~߄ߗ5zw@zpZ==fd3C\'deyٍ{Y~r}@)$6?mq|]jiX~uզY)/'3mV-'JOeBt8ޑL`}E< ?$f8FaQ`:5-Ǹ,}?,T13F]h__t-R߅~f(1Z sOgMef4p[0I8I0I YR] O9St!tq~_Ք4.|Ȋajmg?M%Z};,46L?!Bndg;w "4%8u'0i)#)dz4baU'w7S6H o/6oևatLs0荁[H^z-Tfj[zIat/Clz@Q&^E{w>|uFctFǂY#)6RngNXue~W*SA Na/_O>{ ٩|:u?.F@3/Np?g5/v$pG&dO8@IDATo}é; `c]<3.JIMr{jEi1K׍!8H |25w 6 lO(כ{?m;&;%}: ós^]rF/@ #:|h IѾ}EgAnw_3q`V?TW7 .ٿf3\  pC;|p-'C$sÅƟ~ ׊%ݞWw~68?=={aԵ(?kyI,[T¼ |iZZNq~FvTdt1'+HeZۀ0KPm:~-SXp[zc݋aG텇&1"Xh|A7y70U`og. *k,0v^ ]bT\g3fMg >?g\uE;od9tD fB=Uk`IUwUS t典ˌL %b  `OޢFnUZ)i%'#ߡ8n9/HR3d40"k_o2[ ̦'7̔;|ku qOYP'L!QBC0Rͅx3 FNBemzYRӭ!Hc&3D¨:s\@,!@#H)o 15 DŽ 0+㽵2oŬv]{qxH ᫰(@-`d]+9ukK*!ރd)w(QmB֏aG^05O,cy,?l+S2:<~? WԯOK43BgB`f]m8R3Nw}B[I4vh%e{ǐӀt϶8>8Sz6kPhO1Im)2.7Wd0KAM?U>N= ާ%|H% N=th̒ahC1_®40o=ǓҶl0~/rTb6+#[`4'>U "|pc { ?z\^wRH-/ƏB9|4<Ӎ4_L#@0 53`94f㔪oa08K 113j Ln,Tr@{ٙXBEN_ E0U[`{OpN}}^4~r&|9aFORsyKΰa{y keq~-⡁q |a, CJ$&`-3Oi]ٙEv`v>5(QR og3Blc(/H=0 6wA 5)i]fKmAs4$QfTvzfN;[r1cS&MO^HqV7o7sO8r}ui8:Yh5;&u]j '!T&dwPfx<F (',^ /[2#0Gn1F`rPXؒ{oq(}yݧ)2@]!LtjI/0/ 3#֟2#s 3#֟2#s 3#֟2#s 3#֟2#sp(Tࠌ=q5ۅ30뀫G!ׯJnĽkusc:19 ^(SXkwOa!Rq4906NJ05I NA'Q(%}yMۤ/0CbSLSUn}J)>!q\յ3|ٷuݻuTMxGG 5+( ~_+֏o.97cV;>x0yM}J' )<;J2m0J)~rЉ=#HsϟrL;.ӏ.7L?嘮RםOk(JďX> AO @8)]s=tL7.+!θ3g@=W+ ZhPVZNtƏއ/Fxf4UK'}w+Epl4<`B4U)e#P3>}Έ!f+a Mю[UYd;m{0`4U-uF |lL7fHyʡ g) M EVs 3ꕘ% TB՘ᦏH|:vlݹOlغ+\ׁ=3o}-{Z@l c]MTdRYYjx?aY)/޴~C?;Vq ǿWnGb?Yԍ-kA-bflBC*Pǩ|u&Cu}ڰMd8M9#AF`"Ba@4Ej'Yu~'!]lݹW|A?;?hYu])Bm$hlY)VsI$zJ܀Y G_N4wq!8産9'em!pLE896#gEb|qTv߮[(ćg?% bqj~Z3I,]#]*T'͸>W*1y XtR7-J~K)R>c6ZI#]-ppazm^_]{sEϣ+,ѶfR||9j{-?zW{e/(2w_i>0ȳ~L]ޗFq! faA;ڤ6kU~џX빴fOJ0Z6Ak4޹'ϲo?;v+U(Y*Gby!:CIPpX=4&@\Y/t]|:G ?ycLW9oG;inXo1]7[vc劁{s ĺ;)<рot;e.hsXY/-%-gۊrEGvj+N;mBŪ&}JLg1Q 33}_7>!t}J M%[''<Ӎa2r:W1@KE)-<A #PX0\R5_Rn܅mK$_}iQp)&cjmЌ#0a @滯:G`%b^"uWu)k !L7*0/ݱe~l6 vYt#0Eg6_Wy F=p.F  mbڳΆ-[[9<#4hKL(3-XV t6~L\/~_Nk/<(T7:Bqŧ7< Ggǔcdz7)?qrl3wj7՚_]27n?Q՚t}F*x[:0 q:Թ׺1]NL\r $u+!}M#\uNcTtCFAۉ4'Y2N :/[mOF f7#% q8ӑfZe3pc#`C?1r#Q|(v~U\t_. 0~074z $d.ݰC_[iu9+?j(jF`*lJp4Dn$F @3{*V5qA(,^>s#xJ\hφ$<Ӎ$#0#PtF`H"Huݶ I"4лmφ@ʟbv"]TR뛙n0v0qi*vܛ+aϠ!D?%6>΁wD%ڝ۷H ¦583x b\{SJ_,Q)§yU~yEh B4o,5JF(n;'3ԩqPàSq q󖈟X"RW¡ ͱGHQ4|J h* 3Q|'lj=>=Rn? ߗisQ'*u})….[Y,\+ Zy=Ir}l-QҽHJ7V,?(3تRfzYgs,+D\ &X\f? gʞ >H4ooq ],9Ҩ#@W}3:'*T,T䩏.v`V3D|jѼj>!BB,H  %<:.)֢FRCHsXu䅙n]]2h0|iR_}[D6_5EQkwb_AAgs"Bq'c9V8:zkrF` ŧ恞z"M٘$ qq[DSDbr0N_9Sa'y05ĐHhKWok6mwH8HNm!zFAe$c/t` 5N["oErBѲBey  ;bf;ƬxS`qs4F6]tHz("HK.CTЏh/Vof1\hj k055~I Ū׉I3൘cQKAN}? ӗ,Y,nӼ8#DS.(īj;'Ut0&5B Y u<7bV EGt Gd8]|b{^4EL~6X.Yg^t(D9<Ѫŧa2򵐜-NXFIsERb)Κ N'~/^$ĀIy1H8FېbB" ٻ_|˟Hfb(/-Ňul#YʈuRΆfZ"X'94e#,l5Z5وBjc}0 fMcƺN4n̫Գ 0Ӟ6صbׅl7_t:pj]Jf*u%Z/F k @+ҢbֻŏXkX;6]MH=:)b6ԖfYͧF!hWU +*/֣n&{s'+%nz]ܟT:,"#ψ"՟JqsvDˤ\JSDyrkVn`M!`jDEZXX%ڷ+4Y' ЬOUP;c@띤fܟ^g[wXKtTA؄: }Qrb򖋙8)!4IKejhyi_^QLwpn~Nz{6ft CHiyPB.m{ݾ^$ڛ~:|ES;WvgfgvvfwgAXyJS4^ھ0/YveoH6Ri{\:{Jz )RfSNRJWX^z&&n0g7p e7/njFcD^+%#'=ѫ>KS'8=tkV49[_A>Eл$OYS~Iu>6эA}J' S,< [fT7l:ZD]KҨWз㾙~0s-͘4+E1sڿq?'NߩW1Snlفtһz;=ktU *m7d (-)n>>R4%P@6ڹͣ-;:m|R|K?b(ms0|3kmG#hWć^b=&;?|Vv:"SG~׿)cO~E+qxjZ7&k~:& Qhtʨx☊.*ͱS>][vaY޵sLpf:{g/ܹi Q^2 .y*MP^GG|RSy{i+&gxh+ ·_\9:}wiM~Xwͤ[R858^~Vk5">gfE :z:d%(?7 Fw\y+VAg-KpFT_Av,v|FFcW[x;9eS(ڕ` O=W/彶ӱ a@$.Zk0Q DGH5ݒVo{mop:*k <gВSr;I(]$@W!Ƀ9TR054 \ϢgLg,}Y9ooeE#h&Ɠ)QE;ΗĹ{jGѨ6Տ),ݎiOLYV\D8W TWAkz'(Ox w8U)(ү_:\*ܫBZ.` c3BǎUұ5X#ԜOuugQQܨT dQ%oaY(~̠4eXr 4oxcg5,S=OW|8ͤi`t0{4q7j:r}f9[=T7Xԃa7s^ny;p2..:R'{ k}~O9VΛC%E6a/=,ZqۦOQJmэso5n8&0W6Ͼ*svS}{Lއ_U dƅr-ls8DI<7fo=އwi+O yvSQtϳHhcx7; f"l?REcnet6DQHŜK s+xr{8;#]S@QJG) \ K #RFQ=BGsEV=Jvӆ)̮eZɖcck=~r T\䷎ö6Mە'͝5b oLF#obwb1] \r[PY!SnQG%%pK2P]US,fˮ@c_CQ]:F{- dU$ܱ;Zʝ L+DCh[mlduztJGRmʪ/QS@)OTop-/ w'o+ uNR FT\* 1w=Ry JxӦǛǪp[evVxnk =V:aAq{h wbWG%47|8&^**.ބj3/sP g [ym&W!=O^׺*{i4?&U`f K7 S~^+yj#/muq4ںwjLˣV!w ȊxV~NSҁ`aǡ-F#A0-Slfyo鋵$Zފ̍wq.Ɯq`g+~A/:^̃ÝGY (K&*) CT)X|P\TD.yNd}ݩ]H_;}XhTbYMNxu۾eUO}VggP@Y%'dpa9$f^8z'lUNO(R(7rQx ;zX.fͅqΒwړhgdC0BwI5zIu<#|`Wo!R (]5MY@שכQfR[QK)[l~E `\ۻ6 iRܛk9vYEE6/JMT'P0ҔO,r%yq( @U(g1CVLY+Om^]&<%nvv{J=tI퐲CGn6GVlH :^;u\HP/= 5 Oa/o 'uA9UtyL:rfkypM?i .B@" HWI%8K{ḄG+ߧ|Wubxn1 <;5638$Au FnIn0tz#Q|գgS@Xx "ZBY5qrӵ??:t7ME&3)ğ{Yn!iӞ4gҡ W-)j  Xo䘪81s&ND/U͓tz Ⱥ@F-MY~A)ݡ\ܯ}l3@j-܄_[:3gvz=W-fLL5k.$Lfm'QI"+yfwZ J/qiKu7u!Ɗh€RQwTJ2Xy;O:es9/hgW\H{?cW<GÝKR}DqBL$Øz5+p/O2{9d7EFTSaa>+]>5ket*r`hC>ө=W>?ycQg%mv2Rce_ƇAXlabd cmQ]t1L*)}+i/4oz[Ԅ3#|m>MWu)P &vݙ%%'OiV{)-f1{}^Hm `3xfqEtY9Xղ®er&^VefO'*VP&Pm=]胩 L%B^n.峥 +-^Xh`pRh[{RPPD_-@48ᐡ;6m.vnxt Tmr KyJw譳Y@pXyl331GJ #W^xܰsԏDcaQ(tx< нY7x th.Jiwh(~lz%he,&!BTPn&?wQ~sOM^"ۜg c xjtSG&6~ s(CNUlgž~+]>vc2Wc$9D6/Pl r/~9yJ)܎=LN1U:>XCB l/}q/oxNmVBTB^\TJ7Brޣ5.\BBprxKD)6\t]J!z۹5nXCLb G*t$Krl\> Ks/GH^k]. ddҋ@jHy>ekhWuY ċta Z"۵JN*6`Eɓihn 1e@y{<^޴M~ix{pz2s$];5Ŝ.+lbzMk^,(^ afOQ@8nt[ZKZpSG:??4 x N9.MinN5HJ{P.C sE4!1ى1)B൭d{"%X;74 Ț-whX>^F#mlFVpqO^v,epgU)#hj<ίG_tۺ(_fEb7UőG"xJ*^VUo6֋UXp:,+]uPHp˲ e  Vs3[.>,|nhqdf/U,)9/Ky5jͰ\+oedI/iY^ IсHޗnmzsb'bŖ穮]yxh BMiv8;tlQyG]_RuЩcu;oF/ ŸǔD djvSEHeZ=5A̗lb%kl=rXPrVe>/é7iQrb-tFаoۖ՜֖ĊgS(+rpRM Hyt/OT_^ttWSi̇eDe+ ɡY̺Y<^M?(^Ֆ /e'}Gg{(mQ}ޒp C>*GX @9+ҺE>& CwiO,th͡Τk{%lS܎EZ%ivOMtãxֺ!NjbV1i@ǒ, i1 3 ǽp(e[(O -Re `?x{P0խKVzM7QOml'X)=*Q<)^/,V'GX{ #[ҕ<ϊ]#x}(g|7Pܘ; :tQV.ݚ J?1t!3'\ |>zsޝB4'ޞ߽fÞ^Y6oR'etgrt@|OQ+_hxjfcWPw1}e RJyhQ̅ulN|k03 1iq/_05_~%B3-֖tӔ0W(C%Go$_0 *RbQ:Quڦx42ˑ`8mzluOʶ'JF0/1GXff>7weK7OV.̀<_X`7XͼU`CGSmoy?3Be"r_0S{_h(m-狃7]ALp-3"%A( (?'f<)Zϡ);CG6ed:x)Bo6s~GJ'J l`DB66#N4cl96kp˦ k].hYl@IDATPg[׭Pu k(^(ܚkxn&XH$%YQ p6Pw떍[֮ɯX7f!|έw>#wg U)Z,[&R7ͫWc3g+,a>CAYHJMpȱp<\ J8%{ W"DPY=y)?zԘ[THrqO)ՐVJ7A# !CqeAtAcXPpM$]p2V..,.v9蘩IMOe*G() eߍ=&ڇ?ɬ08d_+] H@|J !|UYH.RH: }9+[$Oe\+;l|tڞ|SOí3^͓g%f.RHph#y/5O @#1cSQ+ dP$ e[wDz捿~:>I_?iwy洼R^V>I*\Wf%L.:OI;SL(q{ozՓ˙0a=LJ&yҍ/eRTDST ט(mv4cOpfLg1@jQxn]'.'㬔n2TPPPHG2"}̈nRȮ UPPPH 6G[9DZRQ85<"ɼR;gtImU@R(pfk1eb+Xz@sϝIUwOZN̕R*EQ@Q@Q@Q IU1 /#2{Dn)SPPPHxd~G-qVJ7TVe( ( ( ( ˽S-ycO-L)DR[((((t  Ƨ$0NyR*CQ@Q@Q@Q e(`Ե47ϘNpJ&ҪEEEE7i-| vҕTWgEEEEɐp1_iOJ&ʪ EEEEr`/y'@tAeU@ ROu9ɬs'd,Hn( ( ( ( d@[pͩ1oҍ7U)K?~xn)WPPPHY *cswRJWR[2cixҍeU)OJ7Jƛ*EEEEo (] )dx7jM׭wUw)d)lRueGwY2Z.3YN USO=`4%CCs(5xC))lȘm~5A4vHCՒ{*4h59]njΞ3Oטyox7Mll9G ׅELJtά={o>CG67zW!kRJ7zw 3/Omt<^ ,珞]E +dW p ")3ڔ -}ear8gɖ5hemf"1Nf[B,]R@ \bF!c)G@t=NjҼ6QC4_OөGq>}`5_~ dgޥ=+΢k QY0N' Y|wqP%=ɔEeytєtmދ V祻>WѤnShԛn ) ;ҳ|Nd6s& .,cv7 s #^IQ =\ڸWܱ!R㯝7I!'ߡB9w_?zZ~GزөyI .V:\WrҦ]8l_룿:n~&M5P2lr3sF' 밌d m5ء,9];s?y"Ã[2YwM  _ }'ӸaA\-mVE:V]GTӕL]F+J>cqz}R-.&1վm=T4%F&M4< w#UWO;:;NY`fzd~ܗz-{(՛aJ*Y>bq9JwÎc@fu3/ Jq5axtyN YveS?S1/zZ .d,{E#ɣ~%ϦMZȊ#VqC".mlaA:]n(c8aF;wJn`ߤ9t9G{-W>v\C}==)+NeWMK.o\6E<,޳&t0VmCc\x\nߞTY@_3|ضI3w[0hB}C[J?{(Nj3%W,@ld~Lo3K-&p4KbWRuWK7o ]ou+䎥'f)<$p>^N(]xQ/ZWY@{W',HdE^*f<Ȟli溾Kb9B5tڗ\+x`โjv{WҪ{9,m>p%b%YY;=צBSp;8ҿS 'i%z4;&s}4a^+1 h9YN5$pT]YS$xhC;<= B8r Z\feEyBqbxaUK=ۊ^e¥[hÎw 5M"4NSn`z}>۫]Sh+g\s"ӊͻ| 7BuN c-bk` ;imb[-E(|XH ; 5':y;ywy{7O^ /daEunԋN] l͝4Hߜq8"YƼ x?ȣ3k 16'#*z$g*Jv[7 _w݆ FkCXǮd ۬T$OYA1!лAĊ ü?2Dlm~B~y<Pd#>,hcbҬ`~kƹšݾPDWO,~ fBٞʊ zH+7cHc7."O}ݳ${=E[&Íͥt):諳F˽Esn@:N(xPK\~eηZ̼sTOnEcI`h挩"ξPШAQzѼ囸6\ln4~aw5m?Oh6 Ba 8=+6kv`r6@Hd%_.?< xr|ӮbnƯPҺRk>bH]91xW(Ϯ۶]kȡ#Wd[3? 3Cōg2Eʵk[% {pM?gdgF ;8T53vva(õ%x_+ y,,˄1UXaNyq5[)/s3[kbBz݇+Gьpo#fXx9IɣFvmRM AKK&xe˜,F٧~p, )o|B?[㠗j ݴ`^ "+9< X8>\& MGyx31o9ǵQ2dBdt:- 8w8Pp˼9EdMo5z<7KxWѶw>UZ*z GI,Y &Ek϶ w +6ܜkaWT~ WV-\`d>%M5DN=; |!Yz~MRU^ \z81%a(b!~Č^"Dpֱ%sw{|b-zpŢWqX2?=˸a?L<1]ewnhpiߟuOjUPx1&%=pEi7=&3fzc7܄ׯ^31w16&E~<W:P5w]=^L?HߞWJqkpby9,A xf8'x%/Y95ZB?ÿ spgOZz-UX`+}8gB02x!?_̄>g01#_r u1s^Tzbe]CvZtmUZT_?c^qxd~,!ݺb>/:Rq}7]Cv:p7Nw20ЃBVK1t]DhːyEzFp+folKf [/KE`<|m"|OF{,gњ6}9G%L8 R@˿<~БȔuW.-uLÓ.$yMvK\pn[tX;j70 <|})k,y%W#Sٽ>_$VO?]SA/},ݮi7 8ѦTS~@,I{3c)3oh>[< H>x\,] 7VeXmYAg1T# a~$j z-{Rf&p,Wnc}LY3/G*\ƻ/MQ^|`U\y_mm_GxJRޢ;8^ʃHZX#q y mDڽG,+MEstEyUnJSp('0J h5a+\XU(ъV|n SPPPH n~vkS S߸/FN H *N((((( ;r[K2<['JWJNԄR7 <2;B.3 =Z*) t7 "9ZJQA=o'ξH3tE(3q4fl['mJ@+Xb@,WUBCjz='hjGx˄R1QcݎFh=wA 38p?P`À޼ta3ǍEt𼀣d! #bYq፮п71vg9vB4-_]e|EѮ@v"@" L9h_Pֿ#N-MA eqi8}I_L7u(Ž 3r9&8⥟+va}vfh [eBt(ĩE*.'q()ݦ@Q Y +4._2esfDS˴tj^F[k 4TtSV ӊM]ym8lyva׳(vgaSpž [e)6hm=Ft:}bR~b77<(b#U̳& jyJiLpdn{!$AERRDTI]]^w+Ƕ:$;6UmRkeHE$ 8[{Cl7./8'uiמG|c/INvPA@Xٮ~@3&LVy6wl}wϷGz4yeBV@,UyPǘq(NEe5Cd,#$kY(M6܏!uXL#UJJM$4)6oCne*ij7] ~]]+ <•hǎJ75 &[- הee y=?tu'ipφePa((mG/φ6 L Q`PIQ@Q@Q K93CSGm$ƮVnWRNVo&g sc;%ERPK((((p l7^H½gi@3K*+=DmxU)שbO 9Dans"k^ȳa b6Bt_oz2ҥ=Y̲X隩ǑNJK6VJ7 ;By_mFqn\O^}kʦ]yԔew?_Ef<^{#l:hR!pi &4Yd2p(5A~<>??&$D;d nO>[71#O-KTktXxOy Xl-day=״d? Hy{vn `վbntR4j E>dyi{pz37r,fXlȴ7,`+G)U=i#f[qI?Δ2ۓ?^^2۶SY>onVo>p uE{2;߸,kGwUлl]͚՛b; T"bg%M;z5jb̾ϡ̇Vo!{oZ\>,,?LtxMt9/QʢW6Qq+\uMg U!T mOGd ?5p:+f:˓q?pP]DWWc K tSdo/(A5iO}6R4+@8>vh@^rWn1[ E߃w]iDi@T{=g2Jg'rF~Ȼ*w҈[?߉5§߮ݶBSv+! Pؿvo"3;!OS9jҢ)\><[We2kOpJ_K•5 eKh{N=oߘw{@f_9g=.h$ȠpƜe¥ 7U`)rTѫc4q: _1\aRtEoѤᷤ غn.#\ʮf|ղRcp)w`9c<->[Q J kx2MK#|OQ~4wT=2Bۓf** nh--.|Eo͛E~x =4>}LJu tXpaT3+\o,DLmؾXE I3p`r&ME 7| VmHPwML)lO\CRNĤk 3Gu=s#3A5RnK)$V++w#^ ճ%1v=Aݜ_=/۲Suk`k_OS4.Sy*=itz(7gM"`MR^2>枔W/Ջ蕺th˞ۢt]fr fb)GGfzT:kTvLt i튔NLkyN=ߝiA;Oxn( SP|z78|rxKKL4kZZypip}zki=z?ޝJewW-?NkmR:4OyN=OB뺻+qKC~w^<*OWIg]BU > ԰މh=1,l6:xOY!hn ņg4K녞 K&g=9~ce o ?,\a@֧/X3/G#9mVm5^U*qGJ2(PnɔCy}kfޭ)|2[;Fq ǢtVoͧ,V+fHFohkNN$>1!trfΎnx[LYAZ"=5&Ӿ#Ut&(6 ~鵔#MFyF#EYhη}(]I{/n~,sRӱCNB!Fl-E7#FS1Z[QWP;aPX~Ьw~)˝'fEj<uuw]B%<~;m?6$w1Kzsߎ7z  N&SMVAײ{=(a AsMQ\@ם7J(?/wr|"gњ6}=G%0|g4Op1iXG7Ǐ:.'~/u]}=""OMC+EfX y-(~q,r|]R7[k2z4nLڶoUt#sPnJjV9˞GN#=UtS Tx⹹TX;q3s#HpǍd[[=x^}St rp{RlCX^Foc=OFKFlRN!԰axn)/CRHsǙm> ; ˶3J:DmlC#)P[Ws,bi}*/- +(~iZj%UAtW\BJSZx^7'* VS@;OZZF S?d4mxD9F˷@//PG5W{HM$T7ziTR/ajuL J;/l/ RcK,joIH=e`+@txGmE"._a8VUB;IDa;4exZwɆls.5}{hYtzsL{9mmj,*;֛|piؿj^8x]餡#|Fk +  ,ZC49#d)PŋGCɮC2A1 U9;k (Kk{epV}飅kDkC%E(BO9y+|'_d9;#guX0rGK?V[{‘"Tx_ŽJCY/QHأoӑ%bRI Q߽Do9-۴OWc5XcuUN&SxKr=S=-'Om[OYcp XY4DTOњݺFQxCzxShO|Z^o~ɓRIFPa@\肴' sX&˻ J?Tm hO^6h̉Jd<)$ Dp\ 65^^h Ź-1S|(z-wS SyJ '~l1ՒL'to98Y<>jv3e8uYٳ\R#Ƅ Kd!^%yd6QmHL?Sy*ĜhmSd<)$Do@p~o3L!8j 0^ ;6`TRQmp!h€\oT0޲=)玼1Ίc מ}Av=SMxl>ԷJ-7<]7" V^^t%Rwbk7KD 7ЀA7S KT(<_"uR0YL'tS ŒMV6#?-IX,`bFb70x7BhS0 rx2!=PU͉{F[Uq+K#y?6G[ d*Oܞ7݃*+g%Tfidy~FʓR3/?^mt1beX{b_6*!ɀKj e@Q6) #ܰRG)zӨ>t^ ;6g}Cs'P x<-x,,_n6xz2"e*Oڞ46cf&ⅅ{L>:>L|~%vJ$2j@ʼns{6w&>DPZfާn!+E}=nBX }S֭4HcpsWECW0N {9ZHڣ=m=HJig/@]cÅKntk7\5w9wA=d TjcRqdI(a nnX' zXvS?jWTmqE[la.a2&Ma|yHK\n̔Ѧh0hF¶}Vvgmpg1p,1iFxG'gScp9pC@L婎mO9J;X,gOŲ RƤ)/[RVw%t#}{嗔|tA}6\B\XnV^{y"y^6yP^ -㩧l;$~6gɔGU5zԷJyh (?(^nkV3/sZ[ydT(4q͊Xoc +&Y57IVlt;2vk[cYiIr[܀ %Op\(^X4exzxѠ#8% D9xV|1MM X.5P3s|٭Kc& @ |[d-WFyJ•h5Zk˲-O(]f|+%eI)]W9*k׷[yX,^0\&A¥KX4++,]?ɰt;v,Jgy6Lq'X(.ࢡ5 W AM荣qXia5^`È򐊷[YEe=>W!So vShY =Xf򔒲.%$]bH̱wz.XDzjfc+"@lٝ8`)yiI%E2pF9H#b +!C !<=huMp)ݶ~}SHa++]%y)w%%SWORd 0$ZPfLٺ9NiYRJ5^[ Rr%3G}Sξ/*3ACD 3)9lvtU&L MRHpʔeA`EHOk3*1iq7|57-]|W _3[wM#f̫<|ӮaC@S%ʑg\[La^Ī q˒Rl(dF"+{˷vѥzܸ3la`5h v UC?`dzey3*PCG g(#O P;7m\dGΦ&~x`K7 4@dDdۚ;O3r&T׍2e0D<XLxP Md߽e%?s%tew/e2.鳷_:tI+i+l&9Ua< ߮ZeVSx8M=[lݾa>.UTaBֶu|?0ٝ]ˌHʒ|Νl{U >,'7ВcKLNI)P_gۖM֭Fvndlb Wq33lc'?v>]_&&e=~+Kz=!9a/װf`+Ee\?{E33[CBI7 XP,X)V,ϊB"*TZo{{H=SA`**U$@ږٙ9̲ lɹ|iw={'s;tsP[Ba3Cnu-zeABoEW~%DRIpy¸G5& t4z > ɜ<]pIpEB-BoE׳nmM<] X^ .?MF@.Hpɘ6HtIdIt]n8]W-Rs#7Xt=nt) 6 Չ?ķ~)]B]j:HxIiS0-5sA %]ϯ*"p A7R`eŸ.z/?ůrh*tM?FotҒ~t9zXlW%sߑ~naAK}w}/*POt=PЯ ]x/<okw.)LYC"tpҠMC@QRѵ}McMpBl$hp[Ko-'(wIb.vQ?"#@ׂBetfTÛBoEכi7ӏ480`%HKʐz?c&0&m,]\:& ,Tl `L 6Ю_.`L  6 0&Bnh/ 0&nU`ME7K`DE7*MaL 0&˥cL 0"@0&@h` 1&@` `S`L vr`L Pe)L 0&XtC~tL 0&@Xt2&m,]\:& ,Tl `L 6Ю_.`L  6 0&Bnh/ 0&nU`M ҍ8)_Ad .,qNIoܵQeE(T!wDiW›L 0&,^$~5dX(IdENP%J\"zT$\Yvɤ"U1B<2S&)Z;S,a֚L 0&0, Tщү+11}Qڵ`;t(+eeC4̐vǟaz=Yϋ:`L1)5&>AN {~d^g+H+~|A5$2MyP^g Q2,6iLL 0j(~E-yǤ$w|)%Pwks;^>^``Nњ1]ݏ?sh^[8|]E<;z<!XL 0JXt+Ճs.o=^CWMY B[ۥ3&ڣ:m1Uq`O*岃%Uڞƨ {ک `ug||Y'5/<"28J}ul"Lfrn]8`L *@x΋QjLrUmp1 `f-p|| op5P=\gyd"Lsv=g2& u5 !Q=lC;bZd7ncL 4+,nbIFCG1 q@Q$! =]8&0WĞEI̒ FG.(H ܯ80&KEUo^քWUg UUy9[}`L3,qYĞD~(j fn;_C޸8 &auOWv tu+`L +~Q䜙`AD oANS}*\:,,N 0&jXtFקu̫|Zi;tΣ_^sޙq^o58q%&hj,MMq^ډ7v6&@3" (ۻ]+A Eؒl0hs.vmY`L ` c6`E*Ɵ?GA mT 8q!}v`=[1&@`Oi8, .{aWgyǖ~53; hGQU0Lp&h,Mù\Ύ5ܘ1Fc8(S|kWLr=/?'Bqc\+s+L 0&sܼsĕ3㾄Ȣ:C?p̷)P@ { K6j8rYu}ewA_crmlo1& 1!퓝m CpQ0'AJv@x~*O.Ht/{bϐ5ՕOdTյW`LiY߮8m}0AFdU=zLY[,Ee=1ЗY_Bޗ@.̇O߆>DAh6ߨ7`Liz{@s@m\bc\6yB+ 7[1zpIpw&߱ߗ}Jvd7Ϣ80&T!KRW}c>6?$t'؋[1/r~?s>zy$Z$dQ?}=g QyCPq`}ԇKM{%;^[o^UL 0&@ܺƖ0Eo$:5_^׮8ҜZ@؆!?wѡr钠gŸys>teWܽmծۅE{}/K(e4[6 Kd#=݀iG80&N4Y"'$HxH$JK7mJBiv\z]GJ$y?֬ڰcӆ\wcWdw}jTp fWlQ-5F4* mEi͒ߕ='*~44Ir.B@PJ8V44^i7_a`€2ɴ䴹dW60]ĖG4!G,Z3q:qYQQXl,+)ޱ7dɛ%%DėDWtys&B@JzƷ8Y7!SU_G$@[眭Ԥ(ܛɫ&q-v\'$spW4L6Oc嚚'nXѽu3EnJ8W0D=}Y؜85'Lx p0~=^@ lLJ^-Hhq;OG.@BKK190&By@NN,Y_VCBt7ܶ,?G[,dŇf*^x?'fft?@T/.tBcPXILi]^]|m8ǁ 0&>\=ܰɽU=jFV  EQj5%}Y}*Fg< T]IdN_V>~[.-I`~q,&@p_ Rf1"+c"z54UC  [r:.#AǂT2&%]zoGؗy.z6쏽C5B3&@P*]?$[t> o 0&@ > nO_0.>╹04y`Lwq[)߭υշy`L |5p w  M 0&@x08DqOq`L 0&TZt Y+M- *l,`L 0 i?|=(d+4cL 0&v2 {/>yf`L l:i% ._L 0&Rt% M~W :0&R);҇kLa r90&n'oV*@kW`L 1]Q ј Ÿ}Z|t&`.'խ/ x*L 0&@@A;LT`Ìs1 0&@9]Yp܊n0~+wY9 `L ]UQob}L 0&B@֡Qh/Ӡ …y`L@'0s/Y/lZ^{ΡdL 0& A n`L $At8jK&`B `D}tyѰ1Ts9`L BtP?.@^2&!]ty ?2ā 0&@hUEG.oy`L brSPy 0&@ UXRA8Z&`!D D_ry*!ė`LE DWSĞrx 0&B@LI}UUI=ݑq*Z;^wthU1&T_H X(-1yP) 4!8UX(j5|G8䓲"wctC+u>n3GO>mm߶XC4&h߮uA qF!M^+CR'_i;i_AtJ`ԩqJ2PU{w=>4fc#2&~Ռ81&N PD3ݻt(w~b)3RxY?+}@jZ8[_OAg&Tys>L 4@@ BLp[jયS\0kh9!?X0kUQ_܏pL A d{@'A&IݫKAS؇eI-%5t}(޵V@.! <]UkA,>Zuxz 0&'+jٵpOvu]Ac5^_~ݒ '~i&p]tW̴ &Lrrj#4_x&'|Aa0] [\U+Ks)㏦voV2 s/'|OY}0Mxϥ FPk{90&@ӝ:SZSE~Lr}L 0&B@nfJd[QiAmNٽeթ3q,xd*.tGeܗisL '&i^2S|{,<,U眭v7lbGJΡ@)c4jTL$v w>ozY(y'ğqYƝv~)Gߝ Off&*DI2sRªڶXTbp`..,&I.zTP/2$ZVq+XkQ "7WQws~Q0J~r)Ng_z)?G?2ͫ3Օ'gL xBԙ㊪ؗ=݉Tn@~<`fƄo-d`SyZ͜<cM:#cv̌~~|M{C k&/<2qNEfsrL ԏD7s #fjR>.[ϰ1T:>&[ph߮oI:yWeCI%A95}=wtNw ۥ1OzJRϼW%Qz7NNs>~xxҬI_[1e(cse)D?crtVFZ/}IO! ;wG! PpT?,@JxO{]QLLY Q>p_J?p'h-"X*O4lvHYu?gNx^fNNDϛX8U `cm)W 2>4N?yy>+8y[^38Kotv@h,AAQ'M~jK˜),%2S[cEiպV).zj}TCT5N0m҂u Jv*8g?;E~tQm:KPpCq8 Wq׷*( LAV8c'  +ԉb(++_x3xޟ$"Tcx\|+(BSQg6vz̙Fqgs|eY\ @#o 1;nԎ9L> ޣ%,ym?q:$ґO R'N⸅x(0](EQk5%33I{q]x 0fC'.s}L7 ,U1Ƽqq' AUqo(&qR{ٳ1oȅu3Rw&LȯzxXYqcߋ@,Ooޚ0(zM0~A[C0DW Q̦>z=}W8R ֫GBi՟s;a“ī.ՃGG`/%mSH؇= /5yŠ3@Q0L ߲=Xo euVOjxIAuȎ<坨[wY1? OwqO^B>_HvX}+^( }ܸIO=0'V,GN,b^c[oZ)GlvKMpWzMsM~,(yX&021oC9~QݱݛvQ]ボߦm=X,eL}~ZM?V7*[;dĉ-eH]:`͇@n0=ƫ3ZTU=uVͭ?$"܅qި/Uj:| =2llL 7MSV8 @֚]&ۂo9 ~EeYe!?Q}G.few1\ޘ59} n)0 o=jH,xw ]ݛqP:DQ?OJOQ"|֚X^nU_Iњ"HcTL?5ӐԬ#pI]UGDr=^' $Q& 5>XPZ߅Og?z0OG`Ynx{fXƟQ$e?I+alk\eaF3mZ{@"SOoKcۇc"C``| jS-{Ƅi׭ςy52%Cלm#O]UQjhԸQ7'bqgE=QGQEڙҴfW:̷~=Z&}0)NCO"v𖽛σ9s[kJ ;T<썛Aƕ?6PJSit=>ݔN}uD.+[bE,\b9&#z%{ RիnGbz Ћ sѡ =fMey$zogsw &X؜R(I4h` W!=آFmz^G2=|ci^>tRss#~]/SvĽa*~8y7-FX͘9TsͩʽHn~ Eb7zE:h~|./ׄƦ̚c_UM:tdaznfAf F'@S=SvUe<̱8ߧbZ&o0F1A_'#6:X|8{{x:s_R,//*r=LJx*]O֨FZ?B>|G|y`n8]B9לm 3G1+'P"0F4.8DW#?QZz !ߣEg2)c80v2k?kr0ͫ/JG5>t 7 '$~TҕF(įl"ɶB{PpdbGI8W9Aeqד c-M0P8'PRHll&z/㾇Tc?Ejk:N1>P,M[) }[(m>,* |ps=I2vieY=2[Ѡ5-$AOq{m&}]6U-Q($>V֣}Q ʎ#3&X\ qj [ &N /qyRl9=F߲6EyZKENn2^| P&@(y2A"-E킃zOHQ͏i״T"SĵK5={8G-S$ͧ\R&@hhѭNq*g4\3E) +9T5mn1 }8n"wbAmP** 0&ЬEt©afW>W:U9q8_;w8F,˄jG"JyL 0B q,9UbaUNs2Ae|MF00e9WC\ "mb㚰!ebL 4GM8t. W HwG:Mq,&|U^]]Лۉywu&`@EW[Fȫ\s2g]чw l. 3+}k[l9l'l-%`L 0hr*hY#kv؆$ Cp*T-d aÿ+yU6`L 0@#Kr]5M7wwߓ{hai1 s:85o 0&@jOQfWb`l^~ H#;=Eccb^ǡUHs?L 0&_F/W&$?҈ٳJ ޅ;2]'LxI8DR~50b thx'onW 0`!OδYqb# yy8a-8A8ՍEzU+VlXm~,ؖ)$+=>6G(W}kz' ޓ3wN pJL 2{N1G,o/2ҍJ )v p{ZvPK8jU 6C} ;%\QՕM qL?|&.5}9r%hk}`CS`L 4ӥWczhJvUZ2lB* nAķq$6E'Q 僧\_ڊ#}斮hMeZtWL,"a~'qNQh__z]}h1秏[dffBY:~ ܈I˻ҿcr/MMl{lk$?@9sRB}N.Uaz;7F!JYhۍxm. q9o;R=-b~ >c3t;R,3#T)k0nhoBɵڐ\}^=P& ?$AL1yp`L ԟ_=]2S\cYzV`W54M'LA4=^X$c:J ~p_gP0gPMt[Z@]#Oyzqpǣ(%f(c1:Gӧ9J/TPoUAw}4 f ۄidꣶ$;-;|ӌb .5_wuЀ|j7eL8JP.BYQ<_L 0[!9Z /u@žg&LKG N~}8g"NL@./E Ayk„21|=`N%rجݷQlW9T_o#G>53# NL44*Ըi;zr$ lOI0ywDՆj׸&^#᧗(^oKZC >"=cq^k1>@~t명pՂ3ʼn/\th9iΧ}xsF] V | O[^wT!Vw3nC|QdTį@éӻWwN3Ѧ5nMy{˛Mn\yg2&{bC &q(Ϣ#E Ul6?pтx?6Ma{fbL|\13QBAozD*;^MkN?^eM\#Blaǫ^􀻠]'#: Jr1^gL Du)?Ϟ4u4ݦ8Bh|:g{Cߏʺ]b ]zwiQa،篱ahpPp=Oosf>ؤlԩqj[. *#PU5~|4?˿NMq(jt_.x#9k$a( MHUc#ZPZz.8fzbmu옎de߮c&As&h^ע~;vc4olPeFw 08誃I Yw>%w𖽛Am`L>ӵ/1Mdj_{E }Nf~`R}:v$as;IHO.Zi5w+qUA86>2q' ۴i< خцSCn',5et-cL ԇ@7$8RS؏X))q)Y/\w)YB1h̰gAܵ|r |(J*WJm6ȕ3 v$ʪNܾ-ɭ`+羪 av88;<Gҧ RUVet~e 80)狩gNLNp9P+5%W2e eaܱbV||mVFk~M64$t2f Ŏ>el;G+>X\qy 0&PtO),D]/wm1wwB'׫76) 7noU4mzGJG?ɪ6= Ňz"D)βRwc+ul >G/p*"Gat~%NWWԋPp|sx`h2v% 9d#)N[󚅶cdCCӪn5n 1LͶ"Oնц}L 0}.0Ô[x%p̪_Ԍ91_̪֯f"$/^;W/o?eylf&ϾRdI`~yywqcM.9'3ļ^>Rpđd,zta]'}ԑvFZW GOٳ6obݏģsz/<GKL ݙh\TEhU@IDATU 5l j~9Ğ IiL,qU6e,\\`&J?ҬSfɛ RSՁ. zK5uɡN}L 0&|I R`Bv9KE|+63ԇW?Lÿa3k(Rg ~aI(|3&@pXOwåh#k pq U )˩4'3zOerMe눉%(m X;83&hjۧ[SggNz'o~Z^NA|kjᅧ%qQR؄gsp9]IqP%@]t]ԺE_7zX0Ggߏ80P'DWЈxCع_bОT6 Ň[Խ˜I;$6 1A]/a<" os(͎@ݾsA]5-+;Rj_| ׋4 o[LFnInf^躉RoD,o鵎X?-v-K .!NeUO-Mr:t8#cQhՓL 4? zbS7ޜI߮fI+yIw^{n5xW躹KoLG]ܿ['=@]6G8[U |NW] ^sXبsS:D(eCxN$ۭ/<@ +TNwԧ1"lNׂYHaʯ#Ϥ=vc3"EjD?IuGKMt#/=*y 4]Gt=nۮMM Q1L hVTUEGŻ7 jsk|ؑfp:7F=]*aMVblB6}_n&" 2I8cfwЄI 2[Y 8}? Q[F KB}Լ̣ZLZF<-}?gm!M:ajG7AyY(K9s>Y <KYaöUhռt*ßsHӉS!#`3]$Q@ #C:L  VsK*܅xW[XI8U7  ҇>ڶBMIwÅ=:k΅?>T~80P#@8*ME +=:}Guh7h7k2h [TpB|,gw03/ ݀8@jCӰsp(e-/շCyy5Q+ UT?vfkr󡤌s6lݓ1M@i-c#[6"&DϹg5?(6.8Zj?KM7]yڰ<:+--90&bXtkpCxE gދ`'v8 Ͽ5qٞ9Rض7zvy`ѭg=_M=E%͐Z VÄh#/9;H>_+#y$={QcD1N0U@м;WTum)&@h˩2&e#l_|UQϽ}hDҁ{*(nJ 3\îisz!nՏiCY ZkrL&$nNE(--s2CNEBk\Ҿs\b 0CEum ꖋsBD ^| G L3&@s"Am;L8C EO î'Yrj" S`L zXuk <9{aKπ*; MaDy(+ ymo.,%`c) vW }7 УK[[RbDcuO(R) (QrRL ]?VBaKhViFGUTfs *,R! Q&waL 3,^b"LЯol0 .nJT$$? 'Jmewb+;)ߔ9/&nӱ6io}Y`MEO{xvVm7(ڝ _ @'m% 'ыSxZl`#;\.8NКwnl{!n'D4 h@/;@`SQ*ͺdDqVM|b{dqEvib(YFpL ]?ͥ,WfE8dM%Of]i( (;ӛ@`CQ޴Jháxm.e_{v`3٭7/{n<Ҿ,;NN 0$zћVI=]Ta?Ylo5/{" {gx޳SbL Xt/tF+(#4%dd^bӞ5聃7gL 4]pzi;h-Cc~~To6 0`ʆ'`\,&n{K]ƈv唚n˛o:9'&n B  Ú '΂ 0&j"[wgi>Z ].T Q{z:)q^A0Т%d5z*kǓs͋F<ﮩx |Λ+ e0&ЬItNnDáh&(ݽ 6-di {%n(UsE;@.̇c?͇|si'?8'=}CGq5S0ƻ'do0&,G샰hh}+1z)"e%{3/*(Pc ]2[_z)]z ^17֡XwU: \<8n(F$4E:Utno\tҵV ;Ju [-zm ,D0J{ZJ`Lm@ֶW#9ձڬhjls\u? ΢ `X JD/oP[}?W{ ?_kF&'px7ۃ|uOI^weV5;y`Njokij (I=W0Ag_>MA>YDe@m@K6{#_N 0I= ws$Z ),U2G1azL-+&cRpN 0` ej/86J~4̉;7{&x669a@^oYvpn%xpho0kdTO}2X^^gL prp՗C_6Zkg-+S$4YDLK 7k9+0QY~&|?ؓ|`A=[A m}@m}E+HÉ+~`//t_@ԯ| ad5ϖl4Jy^~A`(;Szh^XtP$6UUuwmaaTW w?'2e2yuL &pe0bfmdc$ηQn {%@``cͫ4ȎGjϚl#"]MõQQ*{ĸ/;寛`L 6]"&7ac`U,;[͌--cS}uY} A83~(N%v{e,~]]lkW¾\?@֑8"`UYMd[.{~,}QT6_Sn|`K;f^^d2(%G p<%,0v-hSFv_/KLj&;1Bq}d'Kvewg${섲)ʯWPeY.ӷݖm2&XtXyyDIEWIF&f`f YFB>AlV:lkNk14JM #dp# 22Fny~&cp$w!+ouM]~Ha׷y@`mX1&@F`?\pD0xa:Q(v'NtL E;wVB[Vtx}ɿRC40ulF7ءu"Q:Kvӈk*6 P m+;StTVZr 7aNaL Wv3<|0{uL˖OnٕӥgWfi2IͪN.z v;mP& +8 c 'P=<ݜrTQhb;DPԍ|L-= FCL HxNnLӲ] H۔Yb|ư(1CqEՕ"A֓subEع}KOZҒ`!FE~._4wKzݺ'EQPUZ(#l~6`#3~$"C^0ިgy=b.FmqGlR&ElNBLf՗dd}ZuJw%'GBMwu Γ[֮مpV\כΉL #y!MX^g ;AOUBJR.h.COOEqGGRhPQd$R3zޘg}ؤLּr7J?Oo~/% ]~~uP޵s)ڌO^80&bXtWA7Dyݚݟ-5nܰ ?VИ@CbGAh*ZQt4s:ItJM^nhjjJ刈pGtvh( ')BdM low8E?;4^#Lp`BE5IGoZ?]}ԏVzeuD=rjAVIKsyfƢc{z-]4Eb[>!FK6z;G9A&#myЗ{!OMBJKӏė3t&jBZtU( KRM<)ZW}`ut\q}8 xIhWpH׉Hõ`*ϤVV^~8b<]ʓ]zoď֨y#}]_V~CKw. Kn^a_޸zFd@ϊ?:k&&@H 4D/>(A|?yջ?}$lM$̬-6' ȸMKiJpķ||Tܞ㠩B,~_~#$_=#25UݽuY<]ZuEח޼z\|"`>!Ң㬍 #N1~|c'EWt1mQ|&Oґ|9ک"ۭP/y4pJup!\yu$ZIɀ/WGttO}<8AA/'-]~z^ Q4hJEV.5!5stz(,@{hܥ"g*@$(sEmF ˾_} 5ojtYm)+F`Ƅ[uۓuϭRTd8o[p8ZƉ<֜1~FVU=M h '锋5KYVVV L~'G]Wϥ[CD^&@ӵ#'@?Ϭd % _b Eիywq&[ ܰq:HL !lCB ;Q $J&&`lcޛ,*VޞtUF|Syo$uJHNʴ=H{܄_u_EyO߿)We>dnᨪ*:rx 6YA꘥Y&X&^&LJe?AN2c{qԞK Dq-8hgΘMvV.[ceJ83QN%E;\ֿ>{/(<+0>-KkJvJ2&jYӦ<p` vCxq?bC4A3|`V.?g׮0&utUW^㟱d׊xyi2pAC5 %0 @|} z/xݒb0x9OEL|{ ku#qXpAkm"\0x.-}c):`աd\H  ^tW]޽ UI F$؅*׷rA@@ܒs5z V#l' @DKs?,^Vk̼A@AtA@O.XBD΂ m@\ф}eӕ#   se##S=#ކfBO2Hy]A 05C+@TwZ-OvpG;&眤iQs}(8yy/'u46H= $p=A@A[j\tw{]_b/`DS:P0&Mv}<ˇ@ÔA@by$[/\.Yӧ~TE=_,ã Gd]vvyĤo7tz\ӌѻS# L~p:3PBw͚6Pw/\Rpk< ~l]N* 507h'3W`3'YX %8Q-."29i{mwi-Rߏę*e   #BL2-堌BjrI_ʥ!\6/J읯m]A@ZLgZ/pIt_s,GrDH id+eh c&W# `I9Ĝt1}JuV1-=_p[xA@/8ĜbDtW\ڵ8ӇqXâٟ $wz ; יKLN $]ݭlNTF =wϖ@+M<7A@A9f :=Gnh7ԦV_敦dV&/  `r?{ Jԭ|u50c^*k)=^WS  0L0^fN Ez/E=gmnT͛ZʭM|  C|\œ*]>:Sӄ:yYh;x""7#3hƻ<n|pf̚9}KέyIӟ˻0JX?QEj`H9;Jo9 D`Oo)8--qxr:,Mۧ#޸3,yyfM23sj~͘SYZ%32~kYF˷=U|62W;\N|ТYaڃx 2ƐBL]˅ B&M Xm7Vv"+ M D%L2{3fB_PxK݈ Wſ487@Fg# ytS(ƒ#эF͟Py-4HP<1Pb*poSݭ'M̔JSsE9؜?އqɅ3nHo}\{#՞n)3'OvՄe2:Ľ YF jHZղŠwFZ*A1@3ts6ҍS>C XuL{n_J{34K/]7ncfUy$3>-}x~~ix*|Xd%v;h:ʻ!{o9Zo$E *ĉhUb4 u-gA T彝bxG5sӃӯ ϗh܆"&gc~ =AF/`,6ԻOAg5r f~? ~PeJ}ۋ4i:+ycn:ugۮiNE{ifJЧ['͜i7\ ?`՝/gR^a']'۔~-&+5wyը 8 <,".btFZl:jC^ Ư?߂@Dr9]jgߢR6y_>4,ѢY7y,+q۴d8ߠ~J?5C|b4:|l?ϛ=E1~tG?)[j^h8ӂ}kMZ^~ Ð/[,4@.!撮gYшI@I0@4C7AJͭџo[[?Fpa=϶=wm^TA"4 rZ~g35mW{) KK, ~ y?- ~?K-x@lz=S[r/٫ǐf S9U IEuS*7=Gwy }~q<4S}T΂@hsa *ơLW2Q-0%ZlmrcHUok7p%6.{1̨ՑA(U ܍?6(Lcx䘩a؏ P"Х0z|NZlqybIOu5!g3 QwQ=i ׺Pp]cƤ.W]quQ5CIBij9 GC|ho<٘o˘+Jm=Zֹ0V?Jt6gY*C \ϼl Ķ ?Js~[Ňo{gGˠ^nXZ׌^lo??ΆbV R0tBUU/1[u'E!@8Wh@~z Z΂@`du$9P͂, ~&sj{ _n4EףoTַy&7ujNajRHgūNyul`Uq{l}-Nonُ O'AuЖM{\I8iǡ0_{z^7t`qҿ01uxVoA ?>nMړ2VGW3.4&O1Z_eEӳ` C+Kn:z{u҃’utu SomɴO7M}6eq'ALa*u2Խ] }V4˷ ',4 ZL>V Ny[gMW?~?6/@IDAT_Ej\fV`>pW [t5BC'bC7Oֿ]_:Zvb:l05n~LzZU̳M7}nƔunA T4q@ðXZ"'! 0`Ka u#$n YXR5N3 8=yخrVDj?9AYidyfb-\[fIl!u-ʃ^mքB}s6]%+$ilSdplB}Vw@} qDr6s|\.>,8ǕS2\ r6WxL74H*ρٙ%lLAP^MO\ -A!?.u90J̼_CL># !X̡yyCܰ;ꚦY7%(o,aՕNOF?«SYŝ;٦쬿S}X0Svo[?_%[*8@ 8}]3z1=}UdM~t:}^~16#]*UuQӃ 6BR 'fN? 9B'm^^B\j\VȗA@!fo ZnY~/A@Fͬar  9[ ^c3-pA &e`9Νy;Z(4ۤy(1@nIyى ^V  ɘES   B|{ *0, Dty~.T')TmVm  DtW/}~$L+s9pG<,iA@']VZޕZV9!gA@G ⤋k r1I B KtU>YA y/u ㊩Bˉ7O}d4f; -r1vwl&A/>X_{j5(5c:6/4oHOrϯYAŚq1jV3N|ڎ5YR"T3q@m\|IHW®B綑ˤAq>K Y81^ݞշxd؞WTˋ|)fz<$b|?12RFcfo+c&%zdѯƟڞ"cr;gQFVÆ+) 8X~QT,=6{l#))sΙ8EZEJwa< !S1xx:hz.p"d :g܂PqAd`Q[Rٮ6Oѭc~Ge{Dυhej6az2\fR2G{HBBjSoExQٞDtɢ$]ҌXt-cuiABQ \v])S ))Xn8_O>!D1t-%%]S"'& _`n`%Bުxvs6TND:t?LK I0PmJZV湸uIHFT|e"bQ5H4s㘞hXl^Ui AlSFkko<$"fBnO]XӊF.Yn낐% њUJҍ$"qF_?T6]%ceYBW ӪYğ\In&6Ʊi ;5믭=lYHUgXm/}|cfpb!M;C׹a::L*GL>?;]z耓~\n7a>ZaVmy߬e)+39gު`UUe~-V%3k0e6jެV@/dE)#KQI 61{^yG,O3SQILG{302;%ړȔT| ΊiyhpCfoWPVHFltѩ#hh-ނ|ܗSyhpy .*T}GJJL8]y}@1@d8m{N vZj &DΗ?#Utlﮦ߾;SspǻSit%}\JW/?M8c}bIj~jaN.wgÎSojz>gM[['9'p 6]_pnZ=R2;=pSyk/pJ"BHy!nErؤ ,v~s oŔ0.阣 }hwk_gVKAMCoX-/^_mlX{g>{7} ʀo.=BBbcxxQ ͆:iŘ/K\f쳭ɕHr;gj㑃zv~_m0p&$s;osA=. YQ Wp]fn:’sDE $aŜ߂F[ 3 G%2VQԑjszڹn^@ϡť0b\heմ;L9xwݠ>s3^u/U-C~޵N6v;ĿM .U\C yݨHyG£{oWVv3k 폿N 0` LIJ((uWw"yk1L>\DO>K:ggefуЍɗ!0: /|-qZ C1@%*bV߲s͙H1[&;盶ɭx~:wX2ב8 1>yx0׹uo̎jzYub;K٢:ْG{au+&vG@?`?&7iʴCO=A8ӌyAײWy la=*S`ɬK9`tR¡:EӪuކ8XƒU8Z'qmRbEZ liߖYq]穷 O2m8 f'᩻~mM(mʚ V#Dx!iOB'at=K5sU"znl;'\T}b SQ FeØ*՘ۣKrNFC[¬6= edCw cWoZ()!e{ q@I%8B ]>фL3dL7ʎ$EK`kKޛ7(8XBC^0dVcS{^U a']ڢh+N(FX5[zti#cVzMʎTҝOI6jЏ#1]ЬSѮZGXoA@"N}[/JЇ߮CI b=eOLVAwuEk.ۻ+]|s4'T q ;VK D6k:D#u=Fۄ}ݏ`O]|@II;Q&UQ]ݛKӬ4K}P٬"=BU>a-YӼ'NI)EFvKs!Ai(5e-%p5 C7M8;UU{Gw#}C?{SQ;iɉ&t~&hҞ4m(khTЯ쬮\ rp\>~t.Y$5g&];t|!d6Jϱ{ \&*OT[)CaC7J~ut*[ {A{)3k*:]%ΝjӘ2ٛq8t%}-֟2ji#j4+/uyGL10޲6O;Gdr r]=>)k|D1}r9{Л^ y%%n IC:}6c{ѥXT#cfWړ2*nt5 n te>WV[ZvblU,D㸸ww>_JwA{j2-@<\UW@Mr4'nʔO,/_OSlEYa2lJH&\;7a䛒A=;WIH(|1{$tΠ[w/?N?:/Qvv=%rum0\;qhkepD6:7cU({0mᶅJs\:dͣ5{4lx=6X~re:*FSy☇Ly1};TJ)ɛ)7֝8z=H_D|Y`ŪeTt :L5WZ]Htþ"eA|WUH5`x{q ̻͇j#a4g5M܍8UTh-OZʔv=@O ,G]:5S 7`./SzRl:h*Hxkwr=i-zv5i>%.+Ԡjm{JKYo8nw NěiZY TT^ L)=T\J^@.O1Hl&܍@a<'jMRRړϦEgq]3G*ZފFU`<7ZǍaG@{њx*WrY';2թ]Uԥ˘s8&cy/88:ҞW/O$]7W(&-^!KR,|"q8.0oM8HM#) L)ɎIfզs`PES˼VibMRKwiO8k҅O Mt>!]aF}8UVtT*RNtj)SL2lW6l˱|i~ї~c0I-']iOBa 0" K*ߘ.^^Rq1KEgus]Xؓ˜qmrbE$2;iW \9PNo:6Zq!m7&im+K{R,"\4}u{>+u FcnK{1*WGZS8\6lg&3 Aw9XNgKR廴'Mc O#BXy/] DރL `KM$boƛuvQl&鶢L*t^PC20.cx,~!r{:Pjbo݄Ҟԇ4~Gt#]ODm_EAcýT3ɖ瘊wxDUPi&egJql1` ucM'茭6Ylֱ߮wiOGm}Dt{`?/:IEZ;6db 䜧 ΛXs--S>!- _Xkm4ŗ`,bkRּhD"y(S!ݚlU)rjI`|fڠ).ovtY ٣E:~Ap+P$i)0 Cw+"F|H4]놢'Mw^vS Ptds=#n@ y.>WEC7BJc9U[Kgq=Uf@ T6 9}X[[2ʳ_^|TP%yb>K4'O`c+SFd%زԭ߽(EsNq'NzzH軻'^Kv~4b7Ra5TZ~KU.t-h8["|P#ztC}J?i8it/@{Ժ 2:jجaePd$ݚb `pOL^K3u;'>n+9'o1.Ӳ6> =r-YS`I˗GvRfZwy>lPB]EDs`:PR.q:盛@eI=I.I ev\l)1;L2ń^,%A^MeGNDfZ~úk-,^ p{4ܷK{.7?WXXC]s^ջ .YC', Zf6\:ׅпl}%*L얭]t&GZ-ʏ “2rk>QBU XÐV3~EDCz?Sjm.I[.0%:bl'4*v{ $Z5de%_\t 4ldj4XMJLKŴP_L%%`)#mIs{СBy5*~PUFx7\x,%ބӞ7Ю^ w򪗙t@I )woC \Y5|7cz"]|њ'sޢEk6[7Զe2xznT bF^^Dg^ 3' nQrx?$u~@pO/MT+Ц@b- gw.Sn;sty4Pךo.Dm3|Q1c։6(%\]su׆WtG@]adQU/𦊧 z̀? b!ygG.ReJ-\]*1CMGq{G8OuA9,ՇK׏סJ?c/D\D''zܞvbV{c6U;H$;qnOSbaFII(^qzcič2jKKݏ8ZmV˜].P!nN‚8nl5ۻ|C=h{{KQ_8O)S<]vSrRqLgl6eOΩ6:67\#9)ɜe:Ӽ HsN%|Hib#2nxq1- NTKN$mUX(#َRGD[T}ḱ1#0ceňSՠ~]2Lz_r5wgJkfExg0eIL6 5`8vLʲ 3i_M؅˻ek2xP1w΢#GFC:X4N B5Ѕ 7GVJҖ)E {"\ךsKO_?cN lti)%W&kw Pϴ;U=SLfÁxǔnruC+\Tti:S27WWiiHJ|yU2 qFda4`ʔtRffLRO%bbgW$* vFVl9\Uu δ f⮪4decc͎ X IOwND:effIהr&4'rUT3VړD;z߰yE&ю#_!pl\9x&{)UT V %:x%u.vqnnԚxz+uK :]nvm`lʇBxy̕.%y 1sG"lS ڃf/kד\l_:zuugUU!F]{fkYѓ4]lSϮ;޶f^:qXpf#^ᆁ{^UjqP*~n%7ƛP2ANe' H<,^7΍,5Ls|cj7oyP2~?e\b)dsNQTpREL)Eqlx tVb3v ]^ǿ~/sΣ{1bSRF0!|2(JhXLlqe)-lڡdե@J5/_r-~sϭiOu:~uĠϖ*Yl"զٶeRm;|f;݂ B7:@9yŸG~gs$ߊo]3`EKn |S:Z:+ob ;Td詻)*tleƤzHC^9=UiA Rf 7+3ӼƄ{{:hOٚ ,;}_CY8--WL&5de**ReʛVI*(adK53WY{ĒR1v811옜aU ®ki΄WT:9 \GXMAN*Y-{%PikiR{ޛ~u\zhfR -><;*CKb=^Hf#kkFLR85ͻ i`,`%:Ġ50ĎUL^^ |q0o坆$s 4H3 VʭmekOx015K8!/S Z[(S^%^&2;Lt=+RraW/s}bLU dHL'eUެLnl#\S;\]'>ʵU+<$91~q[4+Vm5߄jyTgjW);ܦD]{fk"NjdԐ. 1+@7ogwxK뗂p\QXd*"q T`l^oֿħ}cɕ+뵦feـ`2?.?.[OOCUyUR8mUTtEc',%5k:Z"".sֹcrARu@R׽:L}.KHj1[]r(% t[;+\yo&e ϗ,\NgAmS`\9slOڔt5ݲJU A|y{̄,.>Pq岿j>꼅 TWUT&4roYUz'zfofWqN}S1ej23ٲڌv%|WVVDHG͡*W f'^mU[)fn+VP㹸*T'5jaUo_[dX̒1WZv3׽ZI'qef3wBUo_mJcL[*}?L6 hL<:o٦^զD]{Ҧ/m؅H{Kz掚"fOgb]/\@jH\Q"WPV.KcR,91uUxp4Usʲ~{{Lk[׭~o¯W w$g|Ba+S$H(SΆ 'v؟Y]lk ~WuZSwH8Mw›7]g{r{N>g&yO鷮06mܴ|=iSq9b>3SQ踚qZ~󂫮e q'YB1˅{&֫\«qĩYn06h,۫ m-Rf w9g6S+32x+\DCHd[s? WmҲ ?@IDAT[ޱ_uznO0U\=9c},vk7qx.nDu{ҦhaniLu-Hהtk26q_{kYO8濽6?k؀ءtxoc_=V:(F\ILomyE!MnWy7 \["ĕn{"ݰ)`z9$*e ?N5uVqOMg_CP;BٞwTFi-3c!ߦC=oJ7='0IDTD͎U<,ٜuaɆS.x< rD3ғT-Ɏ!p+{߁|BUn~kϼT& _ .߱a7?hAUU"Rox2o{P-#k21_F뗡[ '@]?$LNCmM_oM3s.Hggu=@gT'mNF_WMA:W_o)ԁ,p͖MB(?{ͷAǏ>c%uJ6ת "pX ᨪ*:rx 6Yg1KLLL%5׬ZVAErM'eJ!!gA ЦTM)8RkuW/gyS7Ӭͺ?x^jsr_ bۥh%]dԿtlXl kJ83ANi2V|py2p|۫NʔBB΂4۞یugbM4m=: }TM[/-,~IqPTXKƁXMeY"fmdN„*c%222. |0nqlN!NŽ12^KU'lO\G>_o/sZ!hy؋I'}cv::Hצ}XĉVml7Z*(_s$„˫K\t|k2҇$:V˝8zڝ/GR]Q!U'mO|p~n] fytv}}PVT Q{HH˫# aP W23"ycLWƈ|,">'e\>T[GcS[}p/@ x"*H@ks:& . |f qeUm{(Y-?W)Su_Fzڏۓ{ot}ȋ9M ?ph1jH)5IE@-"E,1[%s IoY:ԽL\"oe*Bg TgՖYg?F[ϲiP4wݨ! wztb~cfo=|M^6wR"\ZVie,Srn9 ;)S-L|nO:],xNa䝡iy. JQC[%!6@6A%tU fB;R{ ɓ *e%/0םȬ&O"}"chƅZ΂ @kk)V=GiY;WQF#]U]}|{|A@h' Ŝ\l[&=Hw̘g@.{H2cۂ #p=_xGfY|ȁUP+%\他 A@A ޱ/<^tG,8MltM@4c z$B 9  : Ixv{Ew͖,M1ۘnB'TCsNHBHb~BsfpǦ7Mnmɲteﻧ9Βu~yvvgwg{3UD;g6.f`>Oe 2!D@" hLP()v~U9nGXڥ ӑH$-]!d-VkSn ]Eĵ B*tgcVU|gE%9'n>R)rL_{l`3yH$/tK +v'Twf _4lȄ+37 1mW+gqknp^ D@" &TQw9B*+t&~n2Ke9݇w+H$@Pi}_>H-3j%^)m 7pkR]Z&:0D@" N1 ]އ<TMWfy'[VJ瞞SLG+3">2Kޕ{ހ JA]LW^H$@C;8S1󻦞dP6zze3gkQ_gS~XslUf*+1:uaek`ai 9_D@" h&<|XmgNd33w_xUjO=aøtێSÐe2a2YTW360k Gd۟l!ɞ;{ndB)^Jd\ᏀPS]]3>%t7<Z}ze#m gOQv“=hQ~κ{&,$lQLkKS6jN3SшCR!Pe؋K*Ҋ*Π߿_UYZb~C50 Iu@آG!~c:UP^)^j"b{]_1kt.cjȢNB"G<;j(<3ãAyWbv{v.mv*qfSPvUQUMrҺssİu'o;k!|.!`Hև~weRJjS!dCuOL#ޟ_\l7[hk,EvLJq@)dBW57 q`YC,6| ,tBNmpF$Y7폏QfNVuԽC.I"!/_޼|t/,Hgk'>6Zn06c_HuTTw^fYT,;hq=5;Y=MFٞ^?{z- nW6xQ:v%5>7qzwV:= !$P>:= n{ǙFF>P&.00`#!I\buIuj'UB&tEN=V{*W:߶sՄĸ ^σk3k  䴫u:qP0J1JKr(>Drr?7=Z΃,/ M `"딯FWx:6m3#l ] 4z3`NXε}j}+9&ϸGn/*qOV&fo.|7;s渷ewKJ@@nBޡC4N̫aS' #ry6`!TPoINHU6lzTx3 6p^#vhYtbUN)gez͗ވ6Mٛoh;^Dg0&x^jƎrs&yN|L&БRmcuS\um3hú^ 䥞[ua!t_~[ :è'>r3C^~ۀA)gc[41k=spnT9+<{Qrz==hvTx` 7HqE@){3Tt::BO3c;gY s6^8cN Y+)̳z&?|>˂A0sp#%W?n4hZّwS*dQ-h՜sjźC8@ąr3Vu )B)ۻmte;`[_arABK%LL[Dj*nyЮMf{z荆 =9 /AWKy5!9i[V¡x>Ou> n F[yl.͜:邡b/T 3c.#6ۂJK㸷g>A3ܬ\iC~VZn'UT^n3kSFZ}j3ذ-0C:z.# e/A`WJIA=;`]\/W[f0uJ"]uQzzh&fǜUQi9-ZDY \)K}do^:\ޝ^d4]e;]2k@_NKJ+.Khk(OhFRRp D- t69gߣ}ќqQxԍ얉,X''Bg=vˉ+w&6kˆрLY%}ZK5&xgo^xut )8M׏?~-[t =z5}7ޚ{o,M1Z}%AOwō-.6DCzu~kX6~p֛>RQ<{HA'}7⎓VmEo[A[.]QVڼ;ݼ m5x W?lAw谿ĭcΚ#=+75gF ّγokb8@)OOlv5?l ]b\7PBW1+qʜ/%FEG @8~jcMmZ%&nHu)F d-ZCڱXО{~ ޚKSo-]O''+O37?A֒S躑h_bܶ5]?z asHe>D~X3194] 3FشA\ya?-ǜn4*,.?˨k;X`O֬Ё#'4PjNz E- tĽc֞"-;X;F#f=v?c~W./ɢ\ ,o[Kt{}6BܱtIn|8I߬٦ ]XQP^x _2/q#4RclIΚvWc/H(s_p:G%-LR觭lf+HvQ mŽzp;\.؍Fs{soἷ Mk.ԯKF_(Dso89L+7wl03ⶨvAwv*1-GN-\Zm[`]ӻƁ6U3 dx`PXYMLlA:쟹;S \h&O?j ŚlƼڶN,>rsK% P >wXDY\B;iA|M|)uֿ;&: jl xy@`V ,а ڸ6>IK~Ey} ߞ{Xig'r9Sy rcs-]v~Fǀ6yL|~Fj HOs ?j9fw䆮Fk1 A=yDFЊ YhP: O=J+g@] .]k22etP[W\Bٿ t]5)44p̀-{LilNb;MNζNs//; Э=iicF*1V ;u[:\pvڗG ! A_4~NGYK@ yu0ae^~nuc3ϳ~g3]]35$ TP) &OTNP6kSY9+蹋0%cz cٻa>)nV3aR*T=iT;e6 &0:QԡM5=*+ςГoj[>79yh-sPZШAP.Y|Nmڪ5gU5MFGN /y~Vj<ozM<{{[Vb1q }6gߑjx'@ͩ+@|Zv!!Q\'7w8͘W'GWzwi i+&>Nr/a*!1td94ٺϾ5:̨Y3ApA L5k ^zpg=gsGյ"%Qtt,˟VBCyzmjP:I-) _$=%RYqASvq^^ꖽylVJt3eUڄ"ZѻwܣmlF^A̋,YX׷+ `ve;z0i BW!K}*~w=)ABFsbmƁk=Q% AhWNB'灣'Bוo8I]+%[ʟH'#<:U^~IiآcxJDƅTWYQu_">;|4><땟_PѠq~J5oB ACXgY´gل3 6T(99Ħ˓3Z0|-%MżD@23G`=:̮X҄Nzٮӿpx/'t];5&{:~Jea\a փx6 Z^rEۍǮfʂ8L_rFhDeś':Blb~s xN1ȃI<;p툁O ,CXj'Lx+~`↩iba=0:sbwrI @[lі4|m^E(6ƤKy ; X0r߁]PV`ZƠ$|?S2.)yӦ>t<3yלCDYAC&vPUWFo_܇'-|1+eeX2Q]׮Qg㋙УІuċX W 0vup|Y8/Ϸѧ1OͩSVf6vSlQWd }*k`;:Ӱr `5ԩֿiuYlʄ%tSQɻIDvLo ;/v?CؒI6dc8{CUuxIsU1@\4A06&p 4` \ jݞd0abcu#W+!u }7Ob D]AuCb B $OPM.Oʖ?WG^H6&pZPֵ?i"pS)WgYɦyyv7~^6Ë e Ǐ1LkI"z>Ϣ=S%LBD[`-KyI k1{%V F]K\!]Ml0< a)t-byRFqb_BsK_Er5| 87p&CD`Iu7o,>:"E6Hƒq"ǒ:.wBס,eQB~UU-g3>>x4pQuc36 Le*_+ɳD@" Dӫ?l: DRJNe'n#wWV˳t bP&lb{GHN(D@"Т0[e3D&r*v9'Rx8V^q5Ƞ3<ܔ`7MU,(NUmE-댵gYַ2Xw7i w,kI4|+[s#Eg~#o 8ȹCiX ]Pۭf7/lYQkON-=%!K@5dAvw5݄Ei<U[sNX`_CqxB,$H/uथ)'zE>|^uq46svM#06/sx^zw^nLMWU9F<%šGI.lgx8ҸxPOq6 xy `Wd6X N*ǂ; ى;wizNS /Bv5yO'm ߲0UVO1>Zhkgv-VZW;EXkǸOT;i~,R^sTwPg9M٥Gӱ뼤lyPs࿆\{{\t'VWXS;;ud3Z,X3 2DzIx1nt b,Ȧf֔.[&nA}O.횭9^E["{GpRw(l JKNb0n&I"ixR1|K(˺;]ŰfxS69Cdc ip-z+t ra/tw}[V;2S1~; Ʃ:<[5۶4:}NU/(ٰ]cwi }\aחk6ga+aOl-Z4@je3ҵ94׳\a,Zf34LjcUH5Bܮik@!I]~{!H.JKQ6lլNp^o"!ZYڼgx.vKՉ/ :Gn poUl0",q Ͻ9G۪ <Ԇ}Ǐ^[&fM]q}`Zû<$q#kBsj;5&V^x16 GOSPD .67K6_5z>^+ٙ.;NpOy;IiUj\hsoqT3dtІfgSq(׆}Gޜ;r!o6 aAEXB&]q6IXᬽ VI{X/"FNtZ̼Mb:l[/(Pr /[U|BI~ahN ?J$ !ISp8btqG~PjS^ +I" H"lr'#zݎxKr1eoYJh2ut=wo 0ΔS1S_ cرD {IOg4y )gQ*d<1[Sj9!B T'E9;' 'tr<1B-+u9tןkbkJ)VƶeFCb/X^;fXzxy_4k :=)ztjSjiOJ:ʟh.SɲSܞphUpkTw:Ě}%s}O&O޵'ca })= /8tܴ^3]ZG@+7h|#.y4h7u(=@&*{%vܴŲjZKƾ±E0)7,@5gtPדc@!Zv'>vxHLFqZ\ۓYV2R-|ܹ͞xfɮfjr<>dkgO9&&^.8ߟl=/tb,*)諫lc BbJ֙VUHVPi ̻ޥiMKϗ҄14AKX]()˯2z< 2οc3`?G[Zۓ jC/[#.BvņYN͗}C`]EE0,`-w8 &7 U[x\ f` {>.gt}W̆iydWnkL'$A.i`?$mFkj=ˡy\| b!9 '=)ȧ~KGNw&֏n vQ]B,taw3܇RTnݛG~p~+ؿvo"37!NS9lV"o_)iʳ2P{ۺg!B؃P{rjz﫫 ۯb. ;;Y9ۋ s۞PrR\ AAWҜj&ehB%>' a¤ 7ه' X)-h-Sr*4pжldhOlzۨ1FQ4zs4q;\…uBWCJ1ծbPX*L55bN:B" I1@ŇS1ol_/B\\3nLʍ&i{"+h-SIIE |R +BMKYy}5-!.~}gta܃φ>M0!-+70aT Jgރ1i Ϯxan_ }%2p`r&M5oVm7~DkOi1iR Ko}GVLBR6=cs[.j? -$ˊZ9zVMʪԍ+R=ڮ/ff`p+eQ1u/Wھ?P梵L'UVގsK~Ef%PuU&t/0r<"=lycQ#tٳm<ٹ3АRi|+78y0߾jG4X0q_F*oZDQ/ ;:p%7bK/7 ܴx/߷n\WUCQIl<ʢOXkU_$c/x+Fm oI`Pri*/5ʿk?DkvmO9:^RN[.=O#9lbm]`P0AhaeUu5VTQW7xgYo|xKօF2>F?@)oD? 7Py$8JT ݾs:Rz:g} 6K9yVI?- 9prE/g!eJBW':~?|xUBW+0 9u2 A'90 +m"0/#/%W ?O~d\L;ZۓHQ't)tC]*Tu4|˼qBO"Rx*ʔw4'RFN]x$?| 7%zP1bKq"/uO=G$u8;3[A Z˔k=iN9 Q't8vUmסgE&TG Ayo>rD~|b4ZT]=bB¸]5$( Ί3roCr !)x5BLI0-iLW)Q'c/m}?o8Rڳۭb/?x-{ pcHJAQrO"i k3ԋo.A/.O )4dnSڎiLmGMG3ӜzޒpKGi?Ǚ;]+83qz+!3rHOx.G}pt I7Gm2 x=y+]~Og.(=8a-=qtu;f2E *Y,,mXl֠:GÖ́tfط Im:r%Vz Nr_~^H/HB:lCw&?ބaOQC{Қ{5>o9Krn(]L;{ҡBx/y<fST֩tK>z>Z_uPHr>Y֟ӧH |@5;tvn*}-t/cLCzIsC{fga{ô}zϚq忩w#o-E5AjK:b.ɑsNTqWQ:3VW5vrgmQrGf), hq{4.\lD,$EV_,_Xz4u k[(^@IDAT>.M*- FLm}=ŚLF=;jO )=9?! 9#שg2_~ rp&샜;7&*z|gFH6zlLZ J}ZY:Zu]{NiQ í>}~7{Rѻ Pa <-SmfmqJb%; ޘDgUE;[r HC'vp.Mpj*hz.>hDRZuҤ fSj*% 86&=_$xs,wnEnB:R6vo"SʚF2(5ZϠҊ)S|b\=!H2ⵟΫQwB%*EyvC G700iM~OXV;-: IcmrБ<\$*I,M}΃;<1nOך?ʋC2`әͧQC{Q O;&gLvkMئxTQa :r5ڨMK쬝J6U>j2zM`b^pc k|^Tᜬݶ= GhbʟƘITxZRm4a1Ch&3(gc;pE 5ɲӞb 4UtZZ9g6iy~Ó:/Fz"fӄ.\ `++|J7]OVc?/QM}U_Qe^d <)Ai`Z[ӥ\vbN!hf-ae!Xqk.pI5,ev"f?5A'|5kqY.n̼bN3y'"}y@TERIm#;UwȔ͓ܹ \Cr% NG?g#!g\K5q&Ү57uBdG2*p,.fG NnkN/Z˔kŇG{bWÿ=QUSէbQv2;$?kVI_Yc ̂ w.gv k|;ad059쿜͖$3-nbMwXvEY\8UljC,o1dt+>ogcr? 6ZD[3ǮN+F)7=.eϤ79^T,&f`DmA &N\塯gscvZ0gFc4`NUukj=!o?H-Z˔7ꥳ=Qv3l?cg)Fѩٲj5^W )6[?kED>h-SvmOz=M op')t]ʗS><+q3z  L0tw7OcFƯ\1ӡU(-8|N@FkrͷhOpGʮvE@鴢~MϗK+#C\1TudzV**v\mW^}wϏ72>^RL۵=d % 6Zb^jm>IVdIUTW3/gWY,>be6{b_֌Xjn5!x)5ڶ#RoNYiԧm2UTUn@e;6fCؕ'P"˪jZ+^+c.gJK;˧fGHZޞW'RS"r:e轋 W09<|OTݹz Xvl8/-g!WZFQqT¦BRvJ`ir1\uOS7&0-ަ;eu+)('k!)g+@McÅIn/p+.)WkŽ嬥Yy.GPZʷ{{RPه,(-K2\a fnh݁Oj۹!|B _Xw] Fb_yKf>VZY@cS$:ދ5]'!>n ],,tlԖǔTQ>Ң}iu+MJZm uXYʘ4n):jnbb'k|_ȇ};u /vXcx^S|b.a2&MY#r)V[su Dkj,V^K\pz7[v g:\, ,eLru m?㯸mI^9ϦL{|BW?|hMRzr4`„$B>1UtJjoTQbl}a=O7kF0$Ṟ+dd*K1S" DJJJaЙ1H1i/B:y-"Evvxalzkp 'ο)G9==r~_ xD>Um箿GBfL5ړ՝dU7S,*Ho8g,كI{òs5('N۳,+x'$ns*+,TI5qxqmz(ꋜٟB';e!y;bʖF g&ZUQʪ(Tc 5ʹ@ Sf;qyP'lSb,ƙT|B$'k0-cvZ`) bxFsJ:]VE+Tͦg1Qy}eAuҎ.Gf+پ}Pu<v˯>X$Zh-SMKlOX#ڸD{iO= Z}SLEhV5O{R6qG֏Ħ;H֩h;yg {6${jUcڿ+]VMU'DA@ӮYbc5M&Y9mL-[߭VuV c m l;BGS0hOnrsd1ma+*ʰ\j8 X:_(BW-X.~) &nB]O7 :m4uI ]QRu2w|4?~Ii4;ƍP|xQQ6&^rm͚{.mT't3۴fV<MiB\4TFMuT_@`p.~C8y@ݾeWmc1](_h,ªw7!pEEj/x)0 e t`釢>!]:ux6(WaYWk XMc5|[#R\Us+yWN5rzU1Ƌ«y9es2f6f+f$K&cK-KګcWWƆ n][n7 w_X0w)3%>rZ`K# ,$ X)Y[ZBEK! i֩C{v~a]ŗkyd2Vr88+~m>Q0G?'o~ܯ[;]zFqZ'&Nq\Z!lřy9y Lr'QIpoqxE ǪĤ)RnŒ9~q;'-La(O:0$a[*!o#*0ƯXn'}M\Q8^;24QO0qSUoaŏ8 ϺC-DD 쵻6Ͻk٬qM&ƚԔxlt%w~h^#sWzgQQďp9|^yԄ;Vk**=(Cg\\ M7tu6IN-_xsGeשD6~SkP)wO{oRr?)œUl҃wy"~/"6r6ŵʱG}upL`ZetZ_uؽg؄TSll`uG-QiZp**K+Jٽ'7gaNUTAМ=Jq>YH"xΝۊsXH+ؾc=RLqSSTeeIeYC{wܻm3ڴKR6Qx)cG| B uR4". ϴ^{= nlƟEJڿŵ< h(βf!d+Ѕe\-F;-@#w^8MIE%)t=(HFMͪ ]yUi**k;;tOYӄ$8 W_l94]hѦr5"E$ԥZ)$fc֏X_bM`/E@I!h<@j=t>c!xE?I<DeBBBW9Q;ȷ,R}PWQ87X^S6kQ DCTČ놄n(ob}Cyr! E ]㊰SԐ)R|gTQFZD]BRf;|Wd=2~ʇk\ dy@IBeBh(\ ZqY4jm'+)uɸ82uI ] Z61_Ax|E) h,hPv a+nt IA$ qkJ s]*x$n3 z7ڮq]vqC-:E/L4` dqVh̏J#)8gqlCn"+`NI8ץB4Ey:G8NDL(X76srn{K0u܅δ1()x,Bs±.5ͽ|ß$nsOb5(|]}yh#шAڃ$@"Nu)R1|b1 X 2-] naf('#sɥD@" )t:E4v[{yH$@cH2}Ȣoxv^am7Tۣx]>H$^:mECL:*P" H)tÆ?2Rlg~eT2D@" D Rz<'\U,Y" H$HH3լeyZU,Q"D B@y+C<; ?c,H$~rC>EgGȉH$QjvyF.D͓UsQJ$@! n>fTzXw]{y!H$QC*X“f#zvSޖd(9D@" DR#̏-ҒPΖb/LN-H$ፀ>:ꙙx]eLRF-H$a8|̓@203UzwNVF/H$atb4r dnkw;ڋ_dmn~Pz5)JUQy{QQgS^hyvREUχ:s^D@"ME1Ƕ-뺟k=x1H$nHH0o/tGjU=^AU1 :^6few,4] wN=Im y/H@@ & cW}+N\qr瑤5vpoQ7fZMHIcڮop@" n>&t9y+ܟymR :\_ ̖ w|H@T! n>7Ђh2%c-eÔ,e:Ed e;D@"  fǸ(ILvK؎ tX6帮F%HHxU5]X0/}g!PF)H͆o=j .W,C~+2"D# |a mWa5AR*H")tC\V$~WCLY" DGM4bH1`HeDD# n|Rh(koGx!7PBA\vA=^|w |o)uzeҙbɐN>ZM] D@" ,RbpLv>!M{R_~VmP}I#2lŅtjjkR|Ώ$@ n>0ϊǜU7щVYN^Yq=]<7z+X%I w˛8KH$BsNlŁk5'ҁמ!ꜬB#ԓwo޳ uqpD@" )tcPc1iO3EdhȎ,E'N~)^^H$@7$IUհjG){?]Z-Π'$H$G@j)԰qV^O{_~jN AOD@" )tS:vH[򣋍o2Vcn?Bg"9{ jOD@" i^<~OADߥl8gVP=O{o㤡dL$CR*U[H$ n`p h:ͧynu0uOu_Qe7D @Hr ts!SF6vUR4J4~O@D@"||" <:k^(/J$@n1W#r=oQH$ nhʁ1 m~ʵYKgaIn9~ҝ vGX,H$ᄀvNZ|8qHIalQ~ȇ$D@" )t=)  ǚ~g.^nݪ3 Y/n>âkWŻe(D@"EH Qj(-M.33xObl ~QRe$I$OhBV) wy«JVܴoR)K;]REڳӵ˿O K HHZ8HRk.[HEU1@w1& !p0aZoyڜ^|x.Y a &ehv|d0+C|șID Pdq}\k~1HQǝ͕o_Y,8 7صj=ܱ{'TKY:i]ۋuYÕrUWš5\ʩq9 x4/3$3fb ?,ܶhMW/ڭ/+c|*@aŸ.H!0{ BW3o重{6o;k/v92c$5KB)&o}'7,8`UTܾoS ~e$I$-zt)/L2BY/pM !nX[1G |${ܩc^bLWʒ¼=w޺q?M]_] e9 HHZ6S^x~oEk8/Si̗_X+`Nd%h|l۴| Ai|a$cح!L&m!`q@BJ-A$DvUڧk_l8O|zd*ŊYӧp?&e)pI@E@,38} Q/Ӕ߿jW㻳0xot0 8b\Bs|;qW)q@ۅYq5A#$HZ4p5%INy|25B9/My #a: V]uB +/PhW_q/+$@E@%zsWoJ(~UwON$zx CPQ$h$QPJ]E9Ec/DYp죾MCOL&0`^~3]ǫWUWh \ڡtG%+}mf+ʛӋ`)dsޝ<;Bיʧ? ngA✭GS |YDaG;/\Uhއ3qEG<7>u{!(d!B:z&l+Ŕf#է{OnM |/;0+e[W!t6 !@ݽ&STOݴw׾Z>uUa˶?0,kQokۮo7;|H(TXGg9L[E)챇7\" =bIt_wpr 0cRP-P^+PMԶm\^fݑ[+sfƝy(YQ֔ v |E$$hQc>uUgwdG|0tePjcԳ[৻zL7h_]׶WeN4U_޻H[r 3ToN:G掟#DV;?!3*w6wy3!h'a9\ڨ'as kH62l}8T S@3}wi\ͭwY49{ dʮGKi[VfDqP#-Y?bIBƫY# 7NmZ#(uQj Wݻ]ز4l@6ƪOsG7)|.Ԙ͆@CIMˍ=0;}|hlx]>H;0L#͛{ԛc= q N{X\שAk~:a_R qo*2Oٹ\ˏkݽ -͐ GGrjANQuBafd{0iB|v1ڋ!t%oƘ^%ױ2IMA}2UG9SNvʹҶ<6W+.lu!aM~|+T'NhߎoyϜu-xԇ IΣL{"?yd߾}oBQ9YZs5<4MSng~+d4mEdg3-Cm#dmk|q$PAA-%ILSR|ܒ{}/sY02dqp4z \7.m!ޒjiAگlkU|юYBG=JNW{Bw <ʹ+lS,{鿠o[nr0AzZ)d/hg0|-HX\uB.:w:H>HZ\0aV569Ig?b=CBSSx+K2\S[,XKsr_0;wE p"b(pg|T QwQf_Z?؈(RqrRmVzu+8_)?d[kuDx4AaXȡ1әFFHղ]"hClUӜ3@gZ~Kg`=]p"U@ -%+/C~w[&vt[~GwK;@iOO\Tj{A'/pw4oyuh 3>v+~ގ?=p++ Wʊ,onH/> :&.Lfw?Y?=S4;C+_ըغH(gM3WTݏ * ;ř=O9#`{W-hB7JFY_VGgN3K/|_ƫa:SZphM'^O.7ʍ=PsM%+>{0sIeZhnmK 7&(f|Ja&SJ€@TTVc)eYؾ ?caѲ1(S x7eXO3>۲Y Ӹt-;{.Y_e6˖%98?mCHmO)Yy1a~8~a-+1sUGzz c4ܵZ!\s)µCaZbc檧 mmXYOa / ;. hp[2 ۰Ba/>>o ~'wY;@ۊTS>bK sO&,~wD x˲^q<1j5 2EexXp|>֙PWm::#SwР䬑%jg?P΁E6J m?: ǀ=ut:y81T-EA^ۄ͛H7vFYfSNz6#dԉ'gm[aܶ1"`qJͲiogLo%n^ɿN?GXj.Ys l5(0ԛϏq4Mmg‡VÚh>QGoAf&TE_b@'WZf+< ,cHK)g#TQ$S2;p10ܷ&Wm/ۗdc0LLP)ȁ fA`=r8?u`mr P*v-9~ce~vQϠx.M6_pJBEȿXv q=07F~eyS.xHl? 3;f&z0' z0.2h?~!)@K$!0YP+,BBu1{=)uL'ragy_>.\W0*ϰժ Z2$#=7Po<@ˏ:!ZD)-zqeOX̰K&VasI0ь Lwa(<Bd'vtƦQ9_>ۮSZ]#xs}Yn ~>Vw9D΍LΜ#C. #+uflaePa:$:0 S D:'zH?|g{Fc-@ /iWN^Zd|,W}P7'(4 LkX tuX䔬۱{C>udACְͷ;:3(?Y'G7ap>0lj9 r]uYIdh;w؆ v] >/:^DܹCex[z,*VL#{lQbv. WX60@})UXtn՟9Za{2ԅLdF@_kYNHcx'M8L-Rvlm8ASHg!>; -27@u7s&|s5n|mŖ={YsCg]B7aC=~ ۍ^+l)jYzJ,-Bd'0WjTbl[cUJn33>DЗ$Cl|IDATPڶ[L̏- on91i|۴YnCHTןM͊'vc#U["ΪA_2kjƕo(y1e̎ksc i'3M4q~|N=|K! q<;gk${ P?pBn*i)7Տk0܁ϛ !ב@˯k;D"SjpǛ[k@NzGy1,Q㚵x]X@ zF50S']ł1l&Vڕ%QJ8hB/%蓃${WFxzs%P7xaC ]UG4b_ JC~; VL%)@L |j# Čen XL  nQvh138⭘1' B-,K :쟍aeӴibȟX7WbWO9TՄ[@^  ~+JHl5ݭB.BbO˞A0Rzk8~] ͎I2UDŽuSyǴcǹhcfW._ہxJ^ޱͽN wy;Q=O"~/|2~| Y&50qF^㔫g{.Vn!>2#uDQrtGe= oGtр)\~4Ǒ[|Bʃ:;ջ2=\2 u@iSʛ0\[*4 >rK{E= foB{bmdXj/(sJҴhQ2eYidS-nWBsyd2(|ꉿR#]qD&cyza`޳vao"vʻWl!}ďf>,al']Y'0 ?zG.<؏c?T;:Eyfmn>02;8kSGϐ ; FfРo_r:!qH|z! )I5 qV3+5tqu2&-+gHzɳѐ^*ef]3sp.l)f$5K{?CGr+)HB\֥C(‚C"2j؂1mp݁+d9k|4߈/*L|yN0f!HudsK<W y |,!q sӳ]T#D[/۪ |p퍉8z98-aU(09{*dx]zCt?G~9hs}c2#gǦ,\p+ӋLPy=YW뷆ʻ(2*=#*Մn0q\aVNQ!Pe>|4Fq='z N:J /@[`W!$d Ebc9+3g>*-+,ߣ([Gײ' x`5/^=xOeeF>.AVz 甬-Rkܴ{:on h?@C[|Fs76,ע@O+U 6a@)e!;t^q{3`&s-:6NI'k#~52o>$+ڥ|A]|pRyj#z)J^p/ ,4Cd!{tu!#d\z۰3CwRHCX#"k2-R08F pٟ3v4/rasӣ _pn.r"~tjJWG/֧֍Nn.uo=%i[4W%xh%Th+p㓘ߏw5'D'XSEÓPAuyuE!8 GMLe:b蔒к&-#`BimDOu)Բ-ݴ( J<h:ku؅׹X,1ev\soA.{*eT*\b~ ]'/xk9S>ބF~c6*Q]I'y)B1\f“yc`Fq{=+бtw_p_D}0'{jґ;zdk^bp9rmkwo\{fDj%Գe5I"rOyfwV:Vв L<aR-|h% {0*"7b,uĹc(7.v׎k/w: $h"TO ; 79a_ťG؉)sʌhW7i?|ț ͍W6k?ͻ6LkICTrCQe7@ƅ ]5\e_ Rumjgr%skOVQ&-w\ZP;/@h*9N@Va#k"OV lx4eOgؿwJ:ȑ8.2ݗ\ufNWק0Y|I jGg M#J}QCK!`ZUN'>$cp <1 {C=u!L۶$tej_M~:DcjrOyTg2?*o:@hmfv>8aB%q Td\0iMLf; jWNKO/v }}].WmaČ?yNƄ@w.3=rs\|f݁N0K;}aKuiB IBHxBWnwV26fobvyib $A=O >(4>{löT;I5YrcztEen¸X~RĘQb_\XvWTAoVt2pLw}8T>䀿=Y;~8$VZ ف !!Z8ʕЈiu9 %]$|\(R|巑r';!pxdܸ&uv;BzSگGlFi`^^ 7l'5Oo'R{|T3YMmJh!tŌDz;ͣ O‘ ^㶃j;鴂Ml7x\ANҡpBHDߜ  K.#<1 {ǁY=q_ $CݶϚ7U3,35 8VUt=s_"B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B b:=9IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowew1.svg0000644000175000017500000013021200000000000027777 0ustar00coreycorey00000000000000 Produced by OmniGraffle 6.6.1 2016-10-06 18:12:30 +0000Canvas 1Layer 1Compute Node 2Open vSwitch - Provider NetworksNetwork Traffic Flow - East/West Scenario 1Provider network 1VLAN 101, 203.0.113.0/24VLAN 101Physical Network InfrastructureSwitchProvider networkAggregateCompute Node 1Instance 1Linux Bridgeqbr(1)(3)(2)VLAN 101 OVS Provider Bridgebr-providerOVS Integration Bridgebr-int(4)(5)(6)(7)Instance 2Linux Bridgebrq(20)(18)(19) OVS Provider Bridgebr-providerOVS Integration Bridgebr-int(17)(16)(15)(14)(11)(8)(13)(10)(9)(12) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowew2.graffle0000644000175000017500000001214100000000000030607 0ustar00coreycorey00000000000000]ksF \ڭy?dىؖ֒)U݂HHšo^H()39݃'r> JS<¸t~6ѓyw^ 2ׯ;;~/y~;xȃ6vv^}=>ϲ;;WWW~JI?LmwcMz;m7dOSn'>a*_obgY>%OvFNAǕ4 /?<d)+1]w㯿ȖGmyI R*MHi|c6o*fI/tVIрpRr8Eh24Z\\WnM:v(~R =I.fX,{A9Y4Y&q·m>hb\> Sk,eTpy0B6۔xTُBz k$IY\iP<{Nkd~΅7Q/h FK05Lh6zڔ+)JP͈[A]`3TN\;'8 5s 0f֭4 DjS^gAki仜B 坡7_O>Ff% ἢPEjqiU%R%q뇫-@揚?j?%/QI#Uª{vP*^95Cj a*SlZmfTR1v iSJM gG* 9@:`doρ|Xrf{}Pg%6"&HlHzHzHzHz+"=Tk1jtJzCk^Ys9$6$6$ 6TbW *cӉ[_>5ΑVknzK.FDDD2YP8kJ˹h==Mz@z゜b΁\ߙ`Y+`+./%Rp-T@}' QF)ZmzSںYa\ٷ@c PAJfOK+ a 2llszr<ףYj^u a)U 7Q5}RNIQ:(}+<V2XMTN/\P%%5BFa@ Fbe`VٞY].!Є4֒J &1[j MJ& cZH!6fw0oLzf KPb 33d9U[$=J)%1ĝ$sQ"9%璺S̆̆6'-7CN՟\46$M64ِؐփʉ M 6&lllklG N 88Թqi;WC,Ԙʙ$PW*o+98YC(œstT*lOӤ)lC9@zÒG[R&Fyfz(#TmSMG͏H7r{ duxMehO$ g4 jtO{TШ4,w `N gI%oz0Y%/a{$Ȫb=]ЪO $Lkިtn덞fv_GJ}dWٿ_8=O$=kswO^~ߒW_e~һe6PI+ |~4H1=*z=H)V lsv/`A}c~&hJ3["a%sj*Q_EU,'b?LAޅAw?}^%W kT(ZS#юs>EY s0(-E Pn8BdY4SY}qNk |gQzݰU`NG4G/!.tp+lD?43MuPAyNބ`NI~z5;'/C'u IU!K *5mbsZmknH($uӗ[B0x"!??= ΅ejy IR././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586982281.0 neutron-16.0.0.0b2.dev214/doc/source/admin/figures/deploy-ovs-provider-flowew2.png0000644000175000017500000035672100000000000030004 0ustar00coreycorey00000000000000PNG  IHDRbsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|EgvД&E;"ػ$EDH.z|-(%}mE}b챹\%w <$;;m3<3 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&h0R愙`L L.; >>oqp[[7ٛh3l0v?s |߳2ℙ7OvL 4Lí5HdG7OaJ"=%9.ATJ 5_~zƻ;H}wʧ0B=1't9ΰ8چd/Y2r%R%[h =8wPs)px q* xX"BVezE1Rl멡?pc<[R`@Jg}bȐrwdMr֣\#yG#^L FcL 0 #(vFš_q,u778/h| csve<;t*Ğ!OR:FKyh?(k }9r#4)T:LcP2s}Q7'gd9ե&K* | 'ziB]yy­CghX<Ѱ|9Fo$*vtw<)A( #5hIΦu! "X9J}gWHwP礴ۉVzHŃɦ}w,І}fCهOQ8Q. Ia"LϘ1C6-Z/~-HӉ{( _)N-b.%$Ӡc*c %3ұ)NL L_ ^3\/Wme^}%L/cq}kMXNcbS[tIל@a{V :%WUg't=ev! JWMx^UK|V=DȧW(:C 7aڳ͚H:b>t~!2}nO=fK=kz^'aBLF]翁r#ͥ]E$`)Wpynׇ&25_P ٿEӴOjk>2 ##뽄wJsçyGi7wVPuq*!e`jHqܮ cVg;IT֖'O9r}vO}W68v?`JA3a?w BOL8#^m(&gusy ''L Eg>Ǒs.R~311ѥJ`vPtUfKbD!/ƏAQǏoV !p<|BkRHI я3R/ x\i9i ?/ڹZ;j{~Ww[APwߧ>2DȤDT=Fu;1 q]YG/oO"k`{o3mB5qohr܂Nwі0M%(ɀ]|*/~?j9'K BNw,\#R`h ?,h?B/ yYrq-02p(g}mqo-2U(i_Qʼ%QĉTU@ 9* _~E> lS/=qDO6O?2MD|A[>DQ5iyG[4RaG_./_!/FzWP8zL)a8ԦXdsMߦx_k7 y\gՉPonokK.6xv?/^?aӏ1؛] khOӻm REx<'/#XͷQ G)|l&x'ON7|Tݤ[Fz?;cNAOBɛ x?l"Gbb\9.6?-4𽍃$\0hهE>5`uPzfGNL;ݑ+ 7}ՆAJTO?Yӵ j;*ni>;y] פE2JƵrӦz=JPJ2U堒xwI<ϓ$f x)Rn7! 0e7uld*9Ϩ# e3S;oh&Ұ\ۆ>փ1,h V |U+AΌ#͈.v`$PDfi1=*eyJܿ ?V&}xr+Cl+tJyr!|ؒF; yB)K{f%at0:Vwd)L) @NjQ⩐}& |Vib+@B[E 5v I]7p-RЮ@:@C@v]`4a8Iw\gE|қhWGMX)D[yKQZU.K ZrQ-s-˹.R'S)&2\7Fy9/ 3\JFLBYc|y^d~^ۣ | 3ו#(4W;}Fۻ<43UۏMjK@q0*o2ށgL/i&zi(CsS0{PuZY؜[&wXkgNZ/!_.R9r:$u_agʘ035h?aEJStĦLV+&BL *䆨xVN[⮀f􏧎wX ?F0p~!̙QqnFBjK%?pA_ z|2qGdhQLXi!LZP` &SGMfX "S/p;Bܔ&T .d&LM[(g*|r)9QZYGƯ5о0kCKk >?18r@ ){QR״H!cht%Іuk C i:,Yi>jq? }WnQC_LHv]$f&Ti>CQ]jm`,QG3]2;{șBr$lGB_h!6 ֟DeGl+m4q+ckҗљ0_SX~ݙV幥o@g/&L89&_#e#t#5Bb]kOM_e Lk$b@pT+Gc! QV&.%_E{p'RBxTn˩ఱ)c5[ބI0l2-Y{`"ϾYcTۯ#)iǓ7FGVmaL!41qSd{FS4~tuhɯ*ʹ7\ڻ;Vd Kh]=cBm\~Fз7o ߙf@ph+oُ'2Ql>[fo\~ &w`CJVimVAh2E!"}yuʠIi塭/&qIKw( =`V6(F(!x+Sδ"io2q1]ov 4ěY9 Vxsc'Ć A{%/P\]!1))Vbi3M>rLZ ,F.-(-/QacRE#=pw ԫ&^vs1E&jGBEe,_]y.3)v@ѽ0ÁH8O뢦wz=t纸К )VcOD^Q'oҶ,RMlky\[($ BmÎOG2&8%)1l"R<;(m,!#qYjlS0LkRGdXMr^lidc9&rIU09`L@d|s Y屔r1}Lz@9"8񞥸$3 뉑ebA OwK e\(BX#V)4[`*ZÈώ9cU??0%DW5P4_z@XzZhڿJt c-ľ Q~l14ˊ^U?e)w0*lRXh>MhlL.>S`v{y @jo{&4j:"ob}wJqȬ %W z`ܱ0\}S1X.wnO۠|N+acvܭŁ2rx^/ |O{ 08 w` ??*}떊(S >|(}0Y;.=eHqOS ]O:|IaᙏʸֈjPg@ Q!aO e ^[{By2z H-"Jٵ^ @3DXęm5h'\AU/_4kp] ܲvNTh+gG0%a? θ\V\JOT]!zSvN{ln2t%_#_>ǩxo8چDG_N>tLm .Ǻş7|F̄vgpm}&j&GliNӠ. RWVp5^t.uQU*%Y?G;a&AJU=JKQ"̪\ #&mLs6`y{^HNqP$Vh }a,cmJ3Б,v tk.o X_+˞2T"xp18``#VR L#fG]L e;?F_ŮQoZ~Rmٹ;TC`nykggpLSY +f/+ٲ$cG^ y$ (kՁEK;:ځ$$b".PtTTzs~5>K߱5 \[ՑXga"α}z4{q7'VX?ɚ]qAu7;k8v> A8H8|yՀRy)87aJld]KY  7v~?]'[?rکIw4T(vw:KWZ;3'ynKcΰs{91m!I|Xu_Z,gxۆ3,*8vӖޑmQAt.]4sŽ 0D򣐨D9&7h"ߖ-B0})gcMb."RR7cMr HxD|d ND|?rK0}SHIFJ'B-&%.w;\ciȣOM.,ng(F6U\i̞k>Z 5/xt 1V0 ۡmX7JOe_tm)~z;iֲXO: 5"`ǣ wNA1"\;b"[a#g%' 6m!fD/'dx~ڙ ;A[+2YUv2.?h_Ю@^LFkh оdGβ鮴@hQg7im> B8ҽi]!Աnc85&_Rqڍ.Kaۢ~} uYib`@i'u>=ԗB:mD$UK|_) qvY/YkeϏƽwQ%bLh/F۾Ns`&*8 ԟ-4p&L.1?bb|ޭd*(Fg < q\F Dx_``*V^u'`1>۔x0OK@O{iy7C"cU[˪svR6+zT0XeBH;ls.H,`S:ő`o܌_<l~(aKO;>},($fz:@!Ĝ?g;t$`}L@DujaZ+/Pτ`Lѷ_{R:jzA#CsA%lYF f&]'ҳCž8;i|9d319]~̴LIpHw)26HuaUяmȡgتy 7ϋޫ4&"Ts[蝣x_Ns@y1&@]f@C;?MEA,v~,\| ! G,{rل#72*JڢZSi55]}xq벀Ĉܲ ;{ ) w,ӗݒNJ׸k3(ǷЭ5k/FVNMAZtR/r.0.ƦmVԏ"' ✮qx~#2}M%zPOkdq.Q6Ak N &R׋&?8bms&'`cOE~=|EA=A`L 0&lv̀ 0&`L 0]B]3aL 0&`LnL 0&`L .̙0`Uk.vqQsl)q(&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L d`U͕R{0/_Un$Eܱ'H3`WuOaިz= %R)-JHsw~9y$N:?NqzN[eթ뽭q:Rf$b{;V߽M8$`&L4QN D!0 7Ԡ+0sz4AT'=EIV/Wi^4k zJSkY9kL^-& |އ[@,!BMjyJv.w{'5/l-\io*٪t{)tO->RJFbH/ sL0n 'ŦVnW[lԗ %M Gf;JQ^G癞q0 }'Gދgy&lGEOB1X8e3=FfmGwkUO*K3 >ڲ(#y]y5\>zZ^3ȃ`LnX/#ĩYAHdr| U]d'#WVx|RUvB+-45wy_D`{V>g;(gE 4 w1A蔨vg:SXzUMr9S]6%C1jGRҦi%g/>0}ոSI }:mg(Kzg_fToTߌ3~H )gZal^,7aKPg~:V~;-ۂ ivKA^sfa<z$e3n/i^\Lt)o8u~#=MXa9!&`c/h~ vr`oLEvD+%LE,[ζmxCq`R~D.0ܙ.Fgb8N/ƧN 4`#]"ޕ=iޅn\P ImۢC;1'LGwM5\s 3 ;&5Jn0-Ug=C=uەlX#E0a|)aJIڭ_.Ąi~^)Q<;ˋz{<{w){ &?Sf.-[H57WJS-df* o0ېgc >Yr&Q9`"4hσZ۬|2?"ur,R^pa1f& Vū<͗W{h7ßFA=ۣ s5=!*3UJc/w)ͦZTM\4|["RεtfA 6}ntQ(Zuݓ&юX_hDPeڼ͘X%Z#lV#G;;lEvZ!Xgjɚo3&<̀ BFEA/`?ִ NP6^VYg0-`|+O0ՐTW[ZS+6@\knUQY*c1>u$FӏGO7A|Λ’&J=:9Wm?;MSA׮6&v`sȜ3*af%$N{ƚqz";LufdDSw={\o>Z`wF/JʐS-p 81c>OXyB25M1wĺ^]K,ۏFk˩rkLPo̊Y>1L:†1P{a1f&>.Ki0(m]'ggӌ0jsv8:oa輮Ѯu`,E*6@1qL @V.@ȨMQhM\*6l; .vQM4̄[:#iXܯ"˾BJ;eQ1\ HdЦ͵H?Z騻SDޗH fY}>2] uOj0VS۰hR1͗ a⛁()J+pH~20+ #d5 3֜j;0>oyGS,S/[c(\!f(Z1Z*6QJM/ph_d7x˿#gX2WmԽP:o*Y3)T,1SzNW7֡raؾus5[;-Xw78;w̳> 2_3&KK0s&L:.dҴAGPXveWЦQNC((n䝰a ddE6 ̟UvTir=]S*Fwk>əCbhߑm; |Sc3D㣁 tǶ} 䄟 W?YѨb(&Wcy0K$UU6,? }kK)Z>"Oc Gܪ%- ^X4>Uv" Y ei&'o׾P@5ou#+B?ތvNyPU%y;>8qhP?p*d?ܼ*D=`3r-_:͗?y}-h~Mœe=֥`}."kgҊ٨'dG{cL`Oo8&&6`E5~=rc$V\hW`=nSYWMςq:[et%߄Tw@`i~5(h;Sl!j "v/kbûSj(PWaP1=0i0 ϴv;::QG"Jyаd&G'fXPtWY&Yj 'c}`Q{IeB\A3@mJuhs^m;s(dpW}짦TV]gkG9s|]Zq/ ^Bצ.1^*?0}.wJJYV;qȝ< bL E߀rL Thw#hWc\`hlKc1b_oYC(<'as:hG? ;>Lc]0>Ӻ 2=l/xVM ߌg|آe?(}yuSf4@~꿜c=o<05MdzuSqB'v2Z58nEY;8|? qGl"([NFc@bmnsحHI1evTu>%55 oӃ^ݮhYq a. fvxO2V=o- },Ӆa'̦ge)j38( .9;\Q, >6;B[beգ˧xG[ ]8n X߰ɵ~醾%Op~8P=9W*}%>*(e2o7K}c(?UP}.wA)TLmq32W\,|8_0&|DžL`E%ba] |9i.+` ԟ qRi5F B ͢4M}¾av-~Jt)J xĈ&֖ 7A|7 Gk>]:n(7BuZfy x;/:!-D9O è3ee>=/E<@4k˕\j& kc}\0x2]"&WϦx&Ѿ3x,Hl];\c?wpe.KOy(P%wB ݨ8t vjCi:r''^I3$U-`0e?@yiq~Gl_n]oc K({0;[b;6T]T 'Uv|_@+1&K `0`-/6i,e‘e]4|ٵGޫ5?gq&vUiaMG~>kC_Ц%ֶtv}FyjYi[bKhmuMx͊RVE/姯rtK,PA k+3X5wQ8`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&@dA10wqA3x.RꢤLş*`L 0&@} (!I%W!r]?7z~}ӊ5^)^iET%:I)LZ(X `L 0&H<(M|tN) )VB^^meQ pHv:}=kL=ȎdL 0&`LYb^a/$տЗa=1Z•\PeT?CR|Zc| 0&`L 0!; yY8=Un{qaް%LbL 0&`LlϾ9iYa:{Og6AVJw |9 fd`L 0&hlx,%W@ Q&XZ"@r3Lͭx$*`L 0&vK\Jy?$D]XQ*T`L 0&`P&96IAB;z'F8 &`L 0&$H'9|FA }C>pL 0&`L $r:@IDAT(ϓoq+n僾\NxY`L 0&@ 9}-Z Ѕ\*8>`L 0&H>y%o\&4VśgL 0&`L 9 @'y\ %W)82`L 0&`IK2?$V>ʝ#3&0>HMH[{푏?7O=`ID$ . `@KFnXz-˃XM۝we*fjl@nX8WHyatVCI "nw)JhŗfL 0X$by,p&K 3=)! Muڡx$ނt\ZlWe-ь9[d2&Iz !!,]m-]& 5J~rCxG/3WG aPj6M Nn9%ۗl:4ս~{Q+%.@VX%` 2(g_i3ˡxXѦ1K *a^e˜yl:W2Sm,)Vu(%@ijbό%feqrݓ&֖tBghGkm[)xPƞtz{<Nиnti]zS +V]36&_3&@|pK`L ԓT}.F;Ēf2b4^yOZ94]l MW{`)r[6Q.F@hYt0/bR8t^vʆ$78ާ aݝZ-Mi .UJ.yڊU?ǒpL 0& + `%r-ke5$ De3K YբEQ3p**RPܫDa;fe27X`|EFY]X s=fuaQFdQ_gDiJpȽezPvD?ǎ 0&X6'?pP{')?\U]v-|Em3| sLDA~J1y:Bu 9);Tm{ (y&:{_sYtXiBODvU20,?<ҟc0ô^Tʛ~< 0&76"7ggL ԅ@~X~ ̋vHRr"ۙ㎂5QO|5韈sl{L e0ڂg{9Åv2: W# 2GN38]:y~q(`vݲc3-eP@nGe3A!,ǽH='f9"k&Jg>֚fL !ӳ+2agQY]BlOXcW*IhOt2Jw<)KL3 S ^?_ )TDҊf Kb7+֝_p~QfbVl;TUPWG">Up.0i].2jezmM~=p>1 ;we%ʻIubL+zwP{Eֶ!Cos}T/EJ)wdoo7[n) &\F1`_Z U &^Jg>ҊfL 14[^1Bo0M!o-1I`@|u4_u 4ڥ)1ڑZ&՟A3Yy˵lW'liM=7iayzM:s)/RMeP%wҝQhix( @aF5,2\[ %pr^9@ܺ\ɥ fW ]^U_;8)Z߇X}J!v8:6s8s&JZ91^td؛ ՛o'[LٳJ2ߊ*>_!ҌF뼟8yΔڹfljYz?~|⭁}'=#Ϙ1C]Ekgt;8?)9u?pNfoB3ٚ{9"L 0d 42?.L220&@)Y`L 0x $Jgxk3&`L 0&V>bā`L 0&v/AHRJ78IbL 0+{a#3&w~sxR~J&h,쪱 0&`L 4r|4 3&`L 0BRS\N&`L 0& +`L 0& V>KMq9`L 0&@#'G#@.>`L 0&h,Xh,5dL 0&`+L 0&`Lh;<(:H!5*bTrP/]w?5oDX4Rv[?p}_度}XsL >!u>LOǔB/yO494RJ,=l<]{,NY `nZiJ)ij>ּ>: uv8yomM~˥c`|SJ'ۨIT,qMS==WD])%Jo4&ȃ:8@uPGZf«rӒ"/KVJ]Vf0טjڭ#<禥56r~@ pǿy7~O}?j'LeގF,\Q~}{򁇽;k){~pjֈ?,Z 71 ⏔h6K}Cu=S}cB."oFPoq' @F!#<cԤ r_ղ^KxCwYҝy v2#MGkogg%תESk7mk6n4Oj[rG FV\]J`G*Wm|gK fg);̳B.QWJQ /%JOuQsi3e;n]U8'E|<T̎GtS\\]\d"Rjn}=a侟L8?N$CU,,ճu侤U>23MnOKvͥamZ-[N=ȓZ 7[i،KRO@Ҿ}O@} KA2RMgWc/~EK$-?itN]wɚ~=:s0Rr{eRTnaH瞤Jس݈ITFDI"Y I%o)YsҨycA,rCiW#^\^XnjfF@RG\&Y^Y&T^`IM܅$q߯gs4&P,0,:}Nqur_2*ڍCF=tpr#@}jw-Y'"d_5sl&HBdrgH1dkNM9> H`?HqC32iTN*%ƹ3=K.id)%KABHHo Xy$r_5D*KYgš 4ԥF[nҚB۪rR9Anƭb͆-ukWo1Y_/KV>ߨ;K~{zuj1CߧB|(-V~XH|Qg l7@,9i82r5dqlXoJz+9_'U\z?:|<`;DFܲDXyp5NZ)-SZ;#IQ{L,g))z-}Ţ~<[~/fPd]yVBkқ nT:Z,[%' y?ӏ9Dhٯӓp߯c}~0h'^HriVIXl8CcTT +ٿC~Lb9&łOo-[6?L9XZ$,aZaw@A6gߋ66O cYzQ.QB? .T2|W>avWnݢFyxA6KWn/4Oڭ8b8.+Q#gX}*==pcG]߲@ۛTzft:qi=v_D6[v2!цybk5dBVO&zI|"-5ᑕ~XnؿݾS{? _.Xu,Bll^*鲟[6<*A̯`ͬk˅˥N8\=j-|1s/ţR0o 3Tl$ }!S{LV[2U '߉KT#H"I ҩP:CT_ZeGCm }~*x]\}ΉC:[}7?KҾhY}DbvoeJ{_}[4 ;ٴ!]xzO2~w) q`6 N~"h/{s+̄if+:rx徛.:5cbYѳaXOTKo븎eGOU}N %,!]wu}!uZ4q$UsHq%n[[eطy@0~wx_YfW}|؜[/>C\|1jw19's+hLdpPՃ2ԊY[R\jٔOlRl] 3c;EU<()Ba @lTO<@+*ïBߛBlj} !6pkV(.z/cw;fwH!X6g?vY&ѨcP T,`e1GӶoHk M4%\^3.q36t靹 bz I|Jo:hrKEgȇYWv?,5^嫭:UpI'%͕̇bBݥh_b %[\S0괧Fo A_5Nr[X/JՔ<(wX7qfU%+v ۯu 4 vGO88 voJEfO~=(E}A>+g^h̭ܾtP\~qANYz CGmo;"NXwբi:c/ۍ.^c<},n+IƩNIU%ٳ[Z˲ιd_ -m3{f4m\lvk6jkkĹeɅ,_}}0Kh /K,y␫%JH't%;68FJ3fFO#/-ǘ%x?,s++P?Ҥi4MK %]< ̐@y}`14/t ߣK%ƜaH7r6y򥙖i%g̶NM&#vjZݵCk}%Pgn6}n Is}_KBĹ,>Z#ŭWI&-q"Yz^7˳󏱘3XvIOBmVzt;R hC}_$9f) &")NF?c܆EgL}n8[v"0]N#vW=20 Kc%C[\ddCOk6nLl}& dw R2njcGԷ>]`qvjgWE5oXf ZA.ZKG Ds,03hc]Sc Ɩ552FKʬ$8҂VZZIAfX FL}-]GO^aƄliv; Teɥ{N(I%ʫqxGҍ'd,S2+d{(*->4ߍ;R"{E/PkTBԺZ+zEp*ADAr!`!O9:d%^%/`}.8Bb@/{lUbv^ac=79 yRZ"$@;._?ukn 0X0{"zr_400!mPI.ȜsNsO !KZ)YFgQd$y劌%9Q-8EO(0屴@`}<9{?J|Њ#a^ލ۹OB?NsEs7[Z8 ֥B`ogFǫ=SR )}uM-L(BoٓGgƱd.*wwJ`?[}Hbs{eJ9߹(n7K1Ҫ3`RR"(+YR,5LK,\kGi=;OV& %3@;sʾv4c࿊H[]碎G0҃ rt@. tX0?|}/+xg@ܿ?k =+e?VL} PR!sevEJC FRRQSKO[χhHVYo 4|D d%KM*IE" )c$ϐHG@#HB S8MHR ))PY@s>|D Y"Uut)i}3V?(W֯Xgќɼ06=,ՕtJ$]D"+on'/I_tD@" 腀|DH{WV;_snE?sHz=By϶ҸtHHdK+CTD HG$X\i;.z* W g+w:AyssyuD.D@"|D#etHJK%! =qZ>H"\;y^QǢL(,4DYeHhdԐdY" ##QdB07shÎrz4{T+8PE IIZ%>0D@" 2JM$ΝM/}^pMsensUmr!$gy8](Vs>\UG;6jjkNPva6RJ62vRHzNOHCB- :~*6jlm'H@W;o;NY2mϓ Kُ|i HKvwȾ"h4P~NVrإHf)sH~bYCNZNZq'9QAzcSy)<\ry2鍏PLfO'S+rQa&BmwҊ ރd{G'mw*RK#$&s4nV>iy 6deZ >A-$S젔}m04-at8Ne>eA{p3w1Q c>^]?A^0*G(k)% l{}k5{9:{Jw-mphhgxe`B$7ܮ6s-Ѻ-f^Ͳ gܩcZkY~eKOϷP3<TSC Y&rŰ5rꤒ;xk]wˇVmf/˸AG&j*˨'^}ie|O#Z[lњj*+y,C#:lnEҗ;ҳfйNU\ 2Q,{VҲ3kŅ [WF+h*enUd6^ܴ|NOԱ2KQB؋R8U6ɦVr<|_*4GPa^XpK;S$|3 wx5[Mq/ ڜ eiwRym+о_кmj# >@:6Fr>#:oGk/~');Tњkl!{eH9/[OJ#J6.e_;\MC4>8jA6k%ģθsNYG|SKΝ;L& f6޺Jz DKFZ;K=hdO6+Ga!,X~`X-;Rcq;}4HdB(CsreƱr ~໔XR'6 d.+vÓ G3 M#i‰f7+sܷpR<rbx-!tSu0,lu-28;Fȝ t`#䌙ir[8\`O$JD-w: {g'm}Y3x*qhW uO#ճ{W?ȩ:sG_e#A94'(7kmYk7{3WsXT  8*\#>G TQáZ2>L-(I,uNϝahU vj}0{:˾-hjnW?ڤZ㡕ZӸa%eTF7¥mΙ[F QX疯將62ae7U7ssWģ|!r? ^yy܎7*urc9ќ[M}:U_H?D=a/"e?4tT" -3Hg0:\C90ZElxxAXcuEXd|/ȳ;qdL-\;Fg%;w]` ^{j*L'R@V7p2#V}5 RV\ MUFFt;.SO.-P3:]'JSe'_C,ANoonQ;aVzGh\4;0Dw)a"e.m㑨P@={ <ڨJ/d9g|lh,˦ "ui0[Js:Nw~1T=P:ڼ鬒qI xDO<6^c\!U<&[K>ٺGҸ8& tZ=j}~}Rgf N6k9n_-N,\W8EO) :6t"@!#n$C>O5lށcȾl:sN6>B/N VC>Y@,gdDċE)m|7:Z^ m,@[{f2^p.V.6c -D_}%)xdAn+f+me2XY|?S Xǡ&[Xyt>Ҿl<@yՉ\ ȍͱq~5[r(lD=PXC2StdUnxUf/!L:Z- 4Y+Mcc,:+(XY8נoWZ%@Ɔ6n?N7굢 Z=x QWY[ɩ;9P|}Uw@x '++* .㣳yTsloQY"C@6^;%CllVB>Udo4f6)?Wr. v;)!646ӤA7LR^x VY߹e%&:> Ic_<}(cY/RP4^@V-vmp1;ûAΎ+=GM" ꀏ;k#~ѿ,] m"6IP@g}?rVwph"zBuC0g"Bo*|d$ t>R])k|)3ZI+aWѱL^X=!6WRV79G~]lK]9\+w) `F)@.^H˞LF8g|(ag|xR]75>|)qV^`;a3?#)=)6ZQO "hr¢&'8B|^]pr`HA'`M-ne$̗Hdào*|J}< ';ScsXuZc$x :h$!HJ oi;yQ6V*|Z[* [>`dRG=5nIVd!;ΞT)͝0:B|{'WnKT $Y-Ð$Y/rS@mђ^heAIjcpA!ȠG)EˋNPjj>J8E#\px)},H{'y8Ge_O$B ԶR@<^rsC'}zc$Ay:?协(?IE7sj+dzA%VV^&6YuQT]s39Rg>Ia T!>}0>g=5p*HVz2`ӏ4U)%V}?Rꣿ:_TS*6&n*72xFs䙱zqZ8ڬ%tE/ӷ-_|cQ?)AM2-kSY4*)B_oWwӽWo3 ל 9^.D!ٜJ1W+V)/;{N-tǸVe]T7!gA9Cj_<X'9Z̾8rq"ΐ+Gwx.BzEt1= +ʴ̧?#?[7Eϐ,2c{ -1>IM_2觧TЧW^Dky{r40_&h!3QYIJobهch?DԿ:ζF}bZ{Ņ9gМbҳOoL#CV}B}a'ut6m?{ј!g)&y xρN()~mTowѺb#~b|! 8&[լ yr"e[&{Í產|Z}]Qw$*d{fZve/*U $ypѓ+Ux٬tǟZAN=7M ɡ 59jf;<_ . u;h[C]CRmQn/_83z CH?ɟA@`)!!(ch/'rb*iѐRqXm>POrZ~bbi,h>6DiwV6, uYʟulqg;NOmxjv_>eڈ,Jw}iM]`6؋3jk6O, ^뺏m6-Mfk&X1=rY=.ZDmJvBVʚ:jz^/rQ7ݣU􃙓 $)˦VTDo;B/59 r/ :_9x>ċ4 .βflsa龜-z_i1RYT:^A"𿔏ū`$3bg4͇/ eT=~=.atᖉ9P%ޒMl$m;8rX[[}V*F3bug䌲bZe,?9E16j} T &Ɉ,+c`,}gt*,P_>ZY l(_}>?PKm4eqNU=}LBؓ\8eie0v+kr~&[|MP/~̾0ilu=A[껍Ìܮ9 y<'%S K(agzzpdhG(i|ƥ{s_ZڪrC#6>Jxs ( i=;? K`cx C%5ތ"(kO3.⯑㯮pcFxhOhX)ۗwc*| B[9T#&p9v= yn'R|EO1^̊90Sdf9Ta&9<<=Ƈ W`n9~ga!Cshϗp26w6<.ZxB^R>jm=rÃ?}N009=7OKz< TIv \}YB# îBhK; l/Ǐ!dq,x>Z;˒ӦI]L6Lp.W) @ffbʚV>آ+xrs>%+DۇUur3PI  훚ڨc{ţtllXYnlUwĢ1Gř!z9~nbP d磆Ū@cBvǶC1e %Y܉>_xtJvQļl?<=U24Hrޠ>TѶvzQeĮP+`$K ,#鎏>.|^i!YXՋWƎT 8Ý(%lK 4z7&8]F7: Lh#ٔEg+(1ʱaų \W”|9&#p5:dn Կ(iLOnd㺆gPj7~]nwrKn lv~]Os4=4vbQ凘#pnhܷ~3M?FnbS9(B9WATXj6{=~i;~6{)ID0猲"C} 9Ĥ{/+`rԣ0IiVU?\3Y5ysѣF~zp:`o@< pGuC+m;XCy968͊(nO'yx>M>@xEjˀQNQ9RE]rCJ'Ʋ@"dnW4llት0Pre&ޚi9gRYnb+3X n}ҬbC||ZSG|Z3 꿺V,$׍x/1+L\N(Nn0D fmIH#4.Gkp;6Qɷӊ>;hNN\qb|<}hd<%ً+`į`y}F+U!2?8+:o!aƂ=;W`T۰SgOLN9A/ʇ=>Í!lĪr{8t01@屝(X/c8i8ӽ}v!ruݱ?{-uND:>Kv3_ef/x>OρG#XtfGXG蛫׫IW7p凷$l:?v0'H9qux&nL cɣ@'w`ܬBvru8eі{CG9#)#hk>~^~ ߊcJaa 8E>J,7CD=Y0 s"5*>Jn=Ch_KlTщ %=mw]'տyr=ԪWlosPYOJ?#*הGx}ƮRF@ /GR̃teL|߈ol;2a%VUXS><×ќqL9.a /)t ^oUz!Dxz"a%+/, -:O"dY gDZ扩Ud*#ٓL|HMs##LD Zٯ/yserV>TC((ohRƋ/^o+[ %2D0k<xZFz" QtH?MM ؛3Fhn3ļLGjBݷ쏊l 8{"%a|(o_`5Rfɚ!dٽxA.C$]e? C__Wr~2:e4 V9Jk M@l {"CANgio@>!D\ +T K 仨rȾ˕M栁Pbp&J, ֺʡJ^l.Wݪ$}^|*|C3rߍ@`Huu} -WAQi k^+K?8ths{d @T&qЌyD a6ꨔÓZZNeJoAfR} JcY>Ǹvўp8,_̑]1sSS¤䜓'8q;ǟYhlWK$ Y {STZ"IJtl<ه ie!X0׽rQc:eژR2r1mZja$̳,nQ&]$R:#}@ }JGW>8nڜLzxMGaN&Sm8i`&@I^e=TJsiOllZD5Ρ䵴Ρ' (/-xX}wP~K^N]=$;v3zپS?8.d?;ՒmLF}T}I& aGcjA35Q67;7`}6Ev5^~45x6?~(Ư5>D~EEϮZK&].'}x@ۨ^/95NMIË4{@ QW(hx9UDV.aHD 5:q'_Fx0JټWNv6bQ`'i\I1Ms  sҒWC7!&60exq֜vmz?@^+2GBTm)jBU3a"V GU*t+6rUټ.exdeYG!']`Y}V Caǂ@*~2Z 1Ȑ˒3;6'5\JCʖp?m<^+S] hSJ" LUL;x \!ރb=dLE[9Nz|v[r? 6xe.fa=u[inGMMgQj<G&JN*eGnNPvV~O@2d|*-LkiӾ?SuoZE,/8*ʛ5/RmHi, y* 2t Fy"2hq;>qv,ʢ yB!=k[ya'@檱\Ov1F6:\J&2If9ERYscBܚ8yPy?)e?Y.X]3<$Uċ>qb;ynuuW) /.DG۝R(L5?et3RP^iN,˦<, GñɇZ%iOP?VMȌp<(*<k\H:t"sec ^HЧxdS^n.e+Uy=@`0!K&/_ʶug.;\BZd(V?1 @g.LP.p+Q* +dYC^ƕdѤ|Hʫ[h:srǟhSBa? -<1<񿸣JTbgH!,9.cVcƇr}Z?}4Ĝ,e}؈pCC!DxEDHAx7̜}f.@s fٟTBۆR[ fqrg&.eٷ6}: a4)2F g*?O0߃eZ)}|N$]tjZ_{4@vSIX\ݽnw,1s谰{iM!O!guΊϡ 1уt:#hw8DGk-z=vu L6LUF{Ц&ो6tͼࠛGax L*$n R[G.w>RLOM,qYFYhS P퐳Y!XˣYQʕ8wAƭ)iAF0 Et@s*Ⱦ^osf]w^=P,>ǽ7c~ }j8y` GeCt}Fgr^ȾQd_ J픗ɲ}C繸@ BZ=ʼ#Ⱦ;bь TIY[ 2s֪N^$5ѳ^Y\!d{!x>!}Ԗ} ?x0;>}k?ŻXWAkf,GBZ *sgslחV#ho; .&/T 60ۀBG;9)vpYWԴJ!)[!XJ0J_ _ȋM:ry@GȬW2C^aϲdicBfxaߺB2ݼ&}!3I(8eeٷ;e",;;b=1c%O}1c@ZQD=~ە ŀ>}`!:uA{E5U +VJQy U=qNa=[*FEo47 =g'#;u*s1\*xA)Nhdc&;LoA E Fn.gP2{BM^)]ٳku~ƌF .GHaC=u^1,俯Ӆ﫷dסޡNn,U p(u]J v;+-XxZT<' axn+ (ZBߢaP>D( QB|_$_w#dd\(־N4{3r &ʾKYMAeՐ}Qp(Zr(U@aUIgΥq^LsT!ǝ= xp`eCu) {`eG{~g.| -t!UX0F(>>Fx +`P_50ce YХއj w+V>-B0+_iku+%D7V(K5Nrd+AfhYBeJ:TfqOss> * ɪmsYkX߅"OG:>f1.w4OE`tP < y=CFf~ )aWy=^`04m{ݳeS0Ͼ`ns|zy?HOTH{*!T" J>EwC[U|G5J{=`J aVb0*ux^_U7(: ķ/j5D x~_X[aC˸.O?yɁkE,64~Vjg{k>mҷOR& ڝb<\ Y;]x ,8/#\߳~3Mldߗ7 c3{CKWPSdzR|ײާEĵꕗj]=|))hPQ0|}#3<2]`T(pD(C+'F|^_-t:nit@E[n`xWg `G e?6} 4W ХͷGއ%g>Mz> $]>n_=ۮZ"{3w /@`x( `,aSgH %  R&ٱbjvxm#}zS-\S{6ahet(|4^Э_pjWYt(0 ,v`}sf Qg&m t2㻟rA50&""<jV /DXm f4Îyǹ>zpFc?d@w)!q9YsϤD+}~z# dA'(=e7Ĺh}ݾA./YQ61u zE ;_P[Znh?zՏΟo{gE\*)eÎpe?V3%g) LxWt08'٩)Eܰn-/oÍL48w5KK3 ;dJ ~n?_wNozD1"`gp!d{}9f\yk~Fo v]UT⽊MLN>}k.5;/ ]Ϟabmc;#v嫕 HfUpF^B>ÑAȾc3;{싐/Ț}zs>p |;qP|X@xYˎYzH7 B)CO>wѵ7{jA\JU@NJ`1+ ?RQz#Q54E[P+xP|<ý?_]}qS?g{l(!KuԻ~txwR}8WRH?Z°^?|65(aX3 TBvю!H߾5"ET!&@?oo?4­d> z6Ⱦ?Y;%-^Jry+`Ϟ~oƼ޹Sꦕj2wJU#P5;b#A猴jniêm^R6XoMCLެ93N?k朳]7r?j.^h? KĒ&%W\$+Yǡ? $(K~3 XI/3lR[o0BbmFXi*2gZrFSU RοZϊ[7dRF (!PF1p,V;6a0ѣ&LUk43VID VY;Z޵}ז’_$ e|*P 5e.Ce0@*EaT i3nw?DC# Br(ֲP,#bǬXRh|u%/͟Q_ݐ3mNނ3=٫k%5PU5fJJ-`(B&2>R(`Paृ8-叚,Nd$*2uu-*͡Xzb3+0Y'k[?a߾R۔s+B-F\0ofӺ wl?ֆqKyk!)l|/X YhxCl1 0p TD 7R$ߓSx# d^Ȉr[۹BbqH797r>YDG>A U=Xy _-eFcI'{֌ _:}oF2!6ȅQҶj77Њ902 o.~nG`C{l*p:k!=bebLXRa痙ϯ"q)'pZqrk[J%kdFۀZ:?UQ0͢AI☨؇qۤ;U4BDlXUϱž[G r_V,tOosl1BΥoޜ7g}:V_Nz_߻ Y˾`} ojQ+kjE#R":%^4DρߓHLgG8}p(V T@"}@"Wpc"H$@V,|۠7X+"xK#SzOyebW {D.UMye>6]{D@"++d}%@";~{^W&b۹%", +Xfl2N F.<>O?"~{D@"&PMy/D@"F3yEt{=!HJ(֮_=fwHj0ߞc|y}˺uUN}'>r--iHRi|e-$@B/<˥: 7ݱ;j0f'dٱ؋u=!Ԓor/HE@"(H$X~mV\\΂<&z6DzNyrɇO."gg,D r9vJD@" B@bu?P p4'o΋x.YB- PW!kO/d#䖩/`ȽD@" i|` @C+o\_b$t^$y8Ew2!{ >i}F!ɒH44>4ID@" HvP;ʬX-7޶_7fZư;Q&^s˓!_] wA" 4>FH$uXlcl [5BNOz++|.$j]D@" bR7y@ƂxE{\T!5S{|h}_?.hhu:&}3ZI$x! =BZ>G" Hz Y͍uÊݜVc|ǷpL~0CCF[K٘\V+^0llk]c` 5ϋش-^?#G.{K:|ID_v鷶GD@" H+dVa̗s2qxq(V !i`J{B&雈I"Lq*zl_Ы32*<'eϟY M~$jy>t&FD@" H}wxcЛfp(5B^s\[G@Bv;*o󁩖q:< Rx{.tU*rH#.,Hbf=LT[۹C~Njq$ ୼Tt發EJAhYS!8.tA@iYOD@""tټI57bdE{.WݮMj7ޱxĄzތ*bx rqL%4>Rò~D L(,IadՒ-7~dijk ɺ {5Ӟ|d<Hy ( +-=0<^Ƌ3(b5d(V޸u\"7>_oO) x! 9RWndzMQ~tY^^Lxa۽ӏQTNlThE(컼@^qH9]7Wb=D 2mm]?.ċIqEިOSռkװܩ.̢k׿,jc퓸٧ j FoTEQ!/.0,\xuԜ,=oV'/ǪL%M;voK]l9zyտc$U"iHj/PueڹnF^)OG@+[^j= :TZES7?O Jt[ZHJ$A麻{AN^_<^޹Sꦕ#%E@ n來{m{཮eی7%<}]S,PX kv&qms5\b3b)OOvi:²gl[\i}J\NC>ZeHF{Y9+ͷ~BgL /'QFMFtmmԊPUngoZj}WO|kw.o/y&M:Son9SR^V*ϓ7D@;GOPEN>G'>~m٧D½F-_fdMx`r=߿7Ӗ}cW/I{ 07N>^_[B-O7m'r$& =Rk?/r/ɪh5;dێѦ=ϒFÊgH@Dӫ',5t:({B>$>%!kj35 $N"7/= mW-ЙMQh 66c2[o_O!}"ɣ.{ݩo\ >!9uۗВWƆ9A 0!7{4o9NUd(SBJm%GhC#GN((:K7^z2Bl&y_c t_|b>&d9s~F^*gT[G Pm.qA"O.5;lN-N{">>%3e5KV``y\ceJ4x$+KG7u{T6mYwu+t3d*:[H,Otӥg>LY2?ÎԬ~9+D |w*Zqb/AO}J kTH#*{=‚[jnH'h;hC;HE2,o'OS\At@IDAT/_IBv, z%-ڌv S0jiO٧$|4>RVɅ@rE"MjHj[-M)^t~(rۿu*./H ZNzVFg(5Eֈ;$iާ$T!@IYdF@Y#O"벳,Ci|ZUƔ~8iK#?/Gtɫ%j 9=,&sư \@P hhChKzW1,]}\O4eJ(iKbPŹ 8nTD=g z>.`o#./JP3 &cI^M+'/\!:"E" H02DؕbdR{QQ^6]yl=y4U_,YF|>P]c fhم_*Ǘ}H}!WxG]ﯢgMD 0ڇį4PПnL;eLSԤ9/vXKU ftYʓ~BȊu[#/܆ LO/+R;d|D ]Iwzf￾Dw]}-sXM;TѴ h=,$zmFr{ܴKUu˿OQ5J7| Pe~uS&2`G_TowGU5WCOTI>%QpslJ2C)2 ƋRG%$H]{vc/$˵qiHQmui!1ab8fN7Ee4`^ ,VKﯧ(F=I⻡ hzg)뀏8swZ{;64Ӌ1-8@!@HBB{! K`mlq/ؖnIbkoiϧS;]t}~3-3;K}r+}z;jX=<nUSyOl yhkWV)VfhݎhK[dB)֞_Sǝ>L,SZr6{ђ視H\q֖Vse35!l Z*ه h.|@yۯx;:g"Ϯ9'fbб4o&œ1&LʁC=KOa%(6<:+u. A p:!A&|,ݰn,9p:jJhݚ /bie Ҝs&iG NjjĠFG)ڱk>}6`pFꟖĚ-_^ LOYm_LLUY9yavXϿҧn{6دkwZC !ۢ&l^gM͠pɴmعg'G'4s/[܀ޘN;NﯻpEB:G<ۧ 9yTfZQFϞ@<`6ڜ}DX_2cm}ޮ ~0B(,$b-J yFkx=%/5>ZQ{^T>`abnOGZ+ ~mr~hh rjմ6a0L ׮_͚aDj&ձ9'hS,Y0ڤOXoeOEmp_Y`vMOg୶>e W8?,Ap| D|O:zq oEx4Hbpr5f/gR!f-fG+.9Cim(?u6{{ LEgGkh4S.luCmq 5< - Xm{bY) -W̚j+bA68}% &ɜ3OffvD rcX3*8c6(6w9B%DzM6؄i%3lt;kUǺY@G[XU\o)pSY@;Aq+m9fML/@c84*܆ X#6"5)/|xIaӘ4~,,54لqf mvCi&kk> p9MF{52{=,Th}av%4}X3ݎqMh$A{Um:@K$=Oa{G"|r.zV x&ל=b[ L ,@8ЅgO>Ű}t՚3H$pg$Þ>aUiB Fr=0yA)Og! f8sylB4D}kϜҀ@Q8gz(e[}C'}myp3 @#  +2(fA=xx Z/79g1ګɬU~aYU|@MV4Fh&,@2+&/ }.`f_"M;;h\?5-sW{ (? Fie-5`[k%3onX3*tMs׵Q? 1,,dm}jdm#+r-t"|tԒ &N6{Y;HGS{xvQX"q5YuE:g O:O a#`ݞP,9[`-Jyuë M3ܳh7kRh+yS8h:Bخc_H=ju|Wqaߺ5aA}6y@Zn?ͯk)4d*}Z|@oW);^@7<ЖB HfIZ,GƟsT݄D9 ڗB 7vLDOI \-,*:\hCm~})^S{$Lf |j%;1~Memer\Ǯf0qT뼐&Ɂ&kIaUQ]{>}a>w>syRȝY:5(=pL|S6AkͶ><G#8Ur|x|.3K5,ϲ/{,K7k cZw0uf3oxm4=uA7g|aSIa{<G4q~̄ž{$ g-U+^>ofm Q zK jh k``+A_0uIZXۯCy$bǚȦ+Xkzt%p *hB(Z=<;ޯE7Nj؟_`~nH\WypX*. =܋Ngg4׸<054 vnޚF{jH;[/;x Xƒ7khvKs , Ͻvi{ ؅ޮc 49b{@FΚVbGW*AP'r_ւPh O| mRHPwv4Tٹy >mhgiڻϤIɉɦW)ݎ?f1]c MOƮdvOߑ\m`MꔳA^l=Q`0fa IL3ڼuzuAkS,w 㫶 zC GBuMs#IOXkמ`Z֑8u@v#}֕]SƦ@DSȥ, u#w^}oxHm {#HcH\͛-`N fFJKdm=%6T@jB"|8! tW!!88+Z)ւj@o]ڮ)OQ?\N!ZT @ " fWXjB @#0ftUw^g ZͤLx"`r8[ &4   )֝sA3<֗uK%"0+BKA &4 ~\oܟW-a7 ;OZ=@O L<%ޯ"N2Z${;=^]:Y^.ADDR?El\"x }V OMЦXiF;*l.f[iLS}yȂs.M@˫/V6F]QnEeUR@[нsk^y ~-{7ΙQ҇K7ҍnu#5O;ؼ슳'RZ 3 }VаfsEUپOs!vz.6n(}q~X*Br !bMynWX;@&k J8_DG%ڎ(sP:}Qn˸? +!B!k>{%*L =\bϼ4B؂UؽA~7xw[aGӾu0o Jd!fX4Zσ*{KxA%3j>pj -8{i_|%c!g*o9 y~ F>YNWACQ]Mڮ) 1oB5nn_  m ,pd}6 |de"4t]L` ^0fz~ 謉#vLrr -ȈpjdzdomU6v]%ҶGiQ;<;:sp֪>‚i4tyS4fM^Uņ`]/f!&'Y{sf~xS4-*F PM ]8&DgЁBJIHf 懎;K)A #KMh[qdxFe]FX}4'  bpGt9M8o_F+zN\@>(jv?JEp|J+k7ljnOziS ^m~hbG;HOmtu׵?ZUC56iV ~ߧv֒O! GW)~≷#JGLSY#uhS.5X!Rmms>O_NX'8W:Bj7[e/ܒM% Oi)y/"Dtؓ0=daeh`]4A1$bСy^Дw|77> %A7㣯7Pym Σɶ'.0`HrE#ڔM7]q N׶d#kyy_0JN!{qq"MVZy/ol8wI@c ;(wNkx hh78)S# )!t @w+YUFqTJ PCO1R} [.>E$Z" s9Y<~kp2#Kt ')gg}>^O׷GG5d9j]6BY(|`ژgyYzjr ֻBl<QEO\ZUg#}~h _Hl5oJђ:yRB#:2\İ~0S7:H'ˋv~ M ҥjҙܘ1|'dI2ΦQfA5^:ڟr 7PFx)_)!~ [1}f HD6ukӹMwkT'cQTBMd(D>)0 OQxk5()MU_3CF-bt@g3ϟ=2c,mYE3eӐtվyw2r+#QٔUuPDii6IvHUQK|w H$]w?coA@RcKXa] xصێAG.嘨" Y3-e1zeYb$6Z@Gl goޫgPO:eA}Au1%'â e>7} 8XSnljOԅCSXdF/ԛt}_mp=g[e`2MkgR? 0Ʀ&Z*yp?3oUj*7KK5Hr W+qq36>c7ileoG t{hX_hgNo<:]^e>/177LC8jxsb6X>4O" Z0,w#]f4;{ఇT)˩7PT{pyaj:YH)^C^O{#܁tYlp:A1zvl$2N}`k]frU_E@1nʾZ\??҇Quvg647иP÷QCHleK+k4BC)bF[tzMGsz7?=i{ A&fa_ Zq7 k$@ 7j5T*^8NV-?h5qM9b#hIpFHSF7tw Ɉ9>@u^kSob;f_n ?]}@)]EN|OelQWF9bO#}d 1شj2UUqZr!cY8mAΠm&"?iGF†v[\A{7xKeQ)qE;~)TQJ'W,~?hd\wB[ U#=;e$q˪EC|0>QR7؟GM7z^) &@>U*jx I}SiZ~OMF هO+jw8JB{wyN)jy;{P6G7_ԕA)]ALvwe@In1[]S*]rJr.YyAp8|5l &s|1_{N/T $-+dۄ&2ǫ03H]Ig]p*]D8$65\d,#cI2D\-x⧞Cw=#4654wB[;K=nh?n@{:@8V\f?y 5Y@`2s`&z4zw0o:G@իF'2sB&﹓Ou ֧BM{Zz0U vdwN]1YZv~TN7Enx@)ݍ"|tP``%ۂ 4+[I&10|HLB:T۳'$iV#7a߼V v}S:j0" ]kwn a,x 46~x;C )t×F0hݰ4ޝ:j=ܦߝ<ƒxOiÒXZhNү=~')%ͼ;{ԓ,q/0ݐ/]II}?5 H6v=q1H\{z0ǂ 4XW4ofڰ }jsIc\"s=t7xڦwO=S=^B@P]ϔ6Wr&8%hP,fpnh*+$4XQjҎօa߸ tYr'- b3/9k=ڰw;x;DT0Z/yOXC%͞Bl'PzNy6wwI~>zo:rP 0Q?\K*^}M@x@-Z (%|xLp~n@rX9;(vdM^fP՞vht  *;o_&W*Y\v=tU))n#_u-n wz3zaW]3xbJcu#kѓSS{A˭6d!nq5ϕ 6"|vuFG ?{B'>{]!ב̵-}c>WD޸/ ƟdڥQʬ9|۩COLm< ]姫;+=ә3iB>X‡ߴٌ񙼛 6y2j3Rz.f/ћf9ݹnt&۹_ zڦFTN 瞞ŝoڣs( ቴ~U}]cv^BǏiϼ^F:)9Bʲ RкrPQ8jh oG'MGg*iΞV Nf%{lRJ=j:śdo qƅT0_ >:'iw|y/ ;D}[«nJ&+U-Th?0,w˱w\6Q6rJO3So2Ə:ԳշsWlkFof c؞DE7ڋJzUMXI6`/ߩtO oFz歅Qz`tw%^P\Aه́~)AQdPIE5l * wU߄єoj~c# Jұ,M;cd~8Ď0:@< @"tZr-t  @#;MJadb1RNbo*Cho3UQ}z5bi,&H858J ׳Zfp=Ig; Hq1,rw7@čkhͮ \%GΞNycټ HljBTsfz% Fclu&i/ `F+ @u]!q.ѥU)q28o5@Ї'TΚJel^Mޫi&Uq,+%CQ\/R΁Yn~K5]#[ atQIzp팣Ja1k}GZY MH9)$CHjؿ:628h2}nd%w&D[KY i;GLn1r*4w`{P0[Ӷ×F b* 3֓ξ2M_*w}+lNرSR䀡~;8t!4mԐHEs 0稾(o gАҵK[Оwg8:)+vNvZ %ݠ" JWϚLyY}D1 M{A<3XaݱW.Ԭi&{Q{ozJ6$@gOA O|Y.}?]@o/\K'wED4K+ωғQ^^8Ι ď*:xl-DZ'#'V"vsc,gUQtn>ڻ9ڂ6lǁrhE{ؙB@*nי{ħj!& g hHh)Pc16ԏCm"3r\>r_yR#2(;r%SDgFn-CM'OG^M i{#YFP> JŮPWSu4uZm۾tښs6B/pSճ&i;;9r`Yă/NuqAySFӼ囝? JڒKM((Ao.XB&+040 JJV0E{ӛA= [V4R:owvB(۾%s=kz$gA `-:t% 5K QdC]ӮNZijC?ׇ7R/Z.O=w;y]S*25UkDXrMǩfNłhՎ XiưKe?n߄Ҕу{쩐G! F'ڭἐ3'kh #:uL-oPSna)(mri4 M\'3:5)7l cPv&L&^L^W=4VZ러ICywӵCZO޵Nj8ϠC՜Ω}+G@4^B=D_ĠLͯ}cy?PxZVŎL'>y0"䷟MEdAR;5k<+P^Wi5b텰tJ<Z9\#/$hɚ-|f#, U- fڝ('xБ΂FNU,^z}7`XHpSb۪ga7TInv_[0SrEq%Y="uQq!A %  3:]D^dn'SEēoRo8B=B[:H3vg mŔgm mD.,BʭNMJǚUϚ ߻zH8skv5-F#l38i@ZXadPIq+gG]/qU.]ͭ:E4*]~✉4zpYv.as(/\TnPi}O~h/,?ɻw! U؇IfQ{zt^ߠek#Yk0mXbs:OaVK/#m aڽcjЇGPPoMli꒫4m?ۊ-&תP\t$Crms~ȁ4y0]}TCM_U[{`ZN8+,w:K+ujjZn2@IDAThf~[sriDA݌&sުSw? _ZýҦ78 S36!pA&~̮@@&X:q)})X; A>#* CtMj> MND?ʋ]t;|p Gho>DQR;%DŽ3:rȡTr&Τ~d'E MuGz[^2I1L)=kMp `g$w>-(UӝWK{&ܬqg$'PQYKA,9{ysP 'ʕRS[/\oN.Zo6XBnakͽ]Zzv&V)(GGXcc }ҧm3aw>w;E_6n\顤I9([3J5MiQN*t]5ă;[kM+>ԀN7Lbjbew{;N-$wzC)LoI#˿Z<+3 o:;(OuŎB}ѡާvxm,&Git~K:T8jбG&Ic_Ccm͘0\sw 8jw|^'<^d?(=G:mᬡw#X2c)2,ĥ۫SpH[ [uM^vkY33gҾ.{fjͿ]KOSZ07/G9hSs}$'a}aҧdHm# qMO؁};<%_pFG!GH|MAǛw,Хїs_@BpPZL($Wʿ*[RϿm4S.˛T{v(!Uc@Nw* zA!HE}\tkɔ&Lksiv4a:85+;"^D^r} ]SؔYby({Z@\ S-Ú0r=}SӧG H&O}CRO'v{&+5 ޴fVt]Yhܲ=zG m=7UQ6k3, C%(>?vP3t6#؃?1tZ7`Ryp FX`lnjޱw[la@G͇} {}h-oa65͐ I={:a͂G"yd~E' )i@u /G, z;}wR-`SH<;x)<aq9֟ O !ԥ",0k^{(h:9V*,'RLN>c%rwj<6{O;VAaTAİ«Ê;b #)qtnS@+! @w ᣑg^0DFѶ ~5F=UT.I[[`L0Us&XVY}ѧ|x)#fwpNV֔޷_I<;q7%>YɕZ{{Єfg˞~ᥕ[DZ6Y:kgYZYV7PD5|FudNkP^8ͳ:<:Rx@H׮ar\h=l*3t޹򙗪>X㺎}Vlt jTl]b:3So>k^evIE'IY cHj멼M&YXk'T$%'=ƾCR}JXhB0%}ꕤ}}XtNC h㬄Ɯ[w4!_ ?R@mM+5Bwf$*dDim66@bUYE59{>b% sYqK|@@݀‡@&61ɧ'G)!T?Hz_͊ҷ9hm4i yGKsa(  #`bov+Xhh$a &B(6ς R} &VX0{7ub5+yߜE'&,mޮV|uz$fO+hS@~_ D0wQp-ᣫBຩdj|@=N}#Ơƫ|t/m8sqۜ/{u8uS]]Mֵ>Пw]k*vԏ5 ~Q-FobOҼ.D  JZm&PX\1 hfFQg J*hƵo԰[-@T&QkW'k~غyݴ3i{RvwRBۓm (:_m5O/}jr"|xJC tqV3F @(x! ;"T}Sdz ~uwa-sN[X*,倁!ֹcdž5S`J9K?Xu-;[0 ibׯ6Y|XYyjWh>'6 t* !kݑ=en+cY0i>j÷KJ|ih?) wo/}"#J,.YZ7 @v@Pz"|uSV9w@EzRf2㘯dI 0& qD\w,'J˭7\r5 v &t:Zuw&\«[L5+?ڹqz=sPPPU^lWpg[fsxOۓB;@PmghS&_+¿_ɿ!׮dGzt' !)/?pya!(tvO`׎vlr[c)gRuzSgh`G<|$GQQQg\|CG9?pkB|."42jebjA ڻW̯;4ъ]yө3q$gzu}}-R>Z$XB***&K.xѳ 8o's=rW\=NW9|]3]Sŗ>E!!g͵lkO?L{\yg~cYfsﻒ{|IU%Rc-d yȋ(xOCxv"LǞgÊ==en&! v,7~3dl]m>:(Y/HYB+ qxs:L(Ee2u\F |`Nh@ ̮MiWV¯;HjAݏ='ǽO[u֬/35Y|'z)BJd0 bQ3v'{#J@݀P6 t@0wPQ`5?pPmD 2>EjC`|oZ/.F wQxӱ}lz\ M`  WAr 1PP |Ɓx(]vtE. wSVEWƒ4oHO%H #5D/r:==Md,Nu^$JX| 0fALp%S(X;Ro1H 2 P!3A@A@Aj> u.6ɇkE +MXsq\]ߋ:  o ,7ٙZ-[gm&  =h>zo!mvO-:zJac>-s3~q^z&gA@A@A@]r[m5Zuʂ+ ^{E39   t?"|t?撣8rҺ1+U_!IV8>kA@A@A@YrIfƋ3T^k%ꙜA@A@>gɥmJ؈u 79kAތO}Ȯ\    D9}Zu՚b-HYO]\8O=  wJ~k%Maz'Җvz.  ^D@/)Idb1H##S>v^vS`p"T   XX%z 7 ~J_XtjMNpdA@A@CBP#uv Y^!NuB1]oԅyZA@A}Dp; `} WZ- x;[:]JEDMjBA@A-Dp 67"=wpz}]slu=6uʽoxNnA@A@\D@h@֜gY+4GyANNE+˵  kN+M uۮJbg-Y x.7  " G`G nȢߓEcE+zQa +k76~ZyH#5JE._.d;A@A7 Go(E!ȟ?x kAu!c`Y?X M]Xp   &4*\"Ew?úm;Ɣ)w/꽪A@A h1 :aAqMF7Lҭ,p:ڐ\M>2|2iyN{5nyMZ5oh_?7C'XE$}W_=N6u{.2[CƦǒi=ܳը=jVN7tpg_35ZF΂  @A?*,Qa ݝ k#eZ?[蓓5'/;椏u{= |uíϼfمNs?Z4^K3L{[̖ 8>gmNopg7Orw)1/\ۛSكٿ ?Hd1WÇgxG"A@AR?C`{E5mQLC~tϳUuViX7Z,{fe %3vF>4,-˃ziR1LB:?ʂƏ` :#ϼrJ۪#mX. TUqLfzwi=Cuƻ?V'C !*D#gA@A@! ‡A)Ld!ԶcX˳H.kNkJʴw0ś Wbۛ#YHFsБal5ӱam2w_Zh̿A!Uϧ1x=J Cwc 1 +#(D q ֞L?keSuf2  bv%u@nhEE7D ?OYsdp8ηZϲ0-W}eNڝ@Ǹz"ܮ賘?f^1,h+€w"gY[jzk,3B2U΄h (,$X-Z6Y|a kZ\HA@|E1 !mՉ/..tThDجU^#ߊO5"oX--˚I=몴ӬsZ}Ǐ5od?\YF&B=u1c~M7b6=B${^Cc"g<࣪?~{3N .H"uA+ muUʚV+e] E, B iy7L$L^3{$E˪P\9%KURJ dOxUqVzA@@ ESQW zg|h-zLKy~O S ͯ)ӻi-%鎳V+}/FCrU8ΛP܍vkwpX+zi?&p*z@Enyy=^~Q#>}"CS\Tr6+2ҟd^%0x|*?-2_THd5-ViL뺺f]竭u}3^<ω9VZ2 ~-֍lpx}9g?Jnb2~e`xU0Oh v8|d^fENG5US<`@q@@ cT7?*d,%bx*t*{O}qHowVH;P}X8qglW # YiY1gWϾ FO^ӧxoܪiaVdTq~?xj%M9OTS /Um);ju$$TMr~=Yxٔa=|˹lQf?q'iNJMŲ  L_h@`< I*Ol1LEK./˖빆 = [=.Pw^Tv׳{X>rqRMvڽNӦvmu/l6}ݦvZ)߇i ̓4/esq|W\-ԝZ|R,"i 4P>N[ARIqx܅")]X9ľ$Sx+.l(2<"S<\6;q+^Cl|yb|&8uRwC=[x ZdglP윟L:rxO2ty,o>˝g> *JP5bG(J\-d  0`WA@ ",u[Uvj9p-P[yz+ I1&PIWE1+dof_aM 6T2a= ;0"/flXd|T~ώlYV"b:;[sdůj2/>ُ<+:y! 6ۈ O@, s۴XݭıD,_AfVoϧUMe_σ},t+_yzk~)>u=ǹc]6=:lzEͳ(&n#QiXf@reR9SYQi Lyc\? )ox9f^ @@ƒhH X5G˝vbRVYVW, IQVefM9H  ,*˭^uv[NmWv7ľ3 qA@""bԔ@iT;>}X~kh$;viR Crӿf /3xFym3nRq9/]/.[G(?~3 7@@@ ץ25TySbMyJmw:u:wm=>z2[8ᦲϻ}0Ͳz}ݧNGٸrcڔ4< ,l0 ]\=ɴʣr~0|޺b[S색YimgfO7sุ-Yhd*0&C_țF~6J="(YTsn-iCv$0nxkֽz9Ϛ@%o F }xnx_RGBUl^ʿ2?|& {л,ez~0K]5o1dML %ȪW3L?kOf>"Yi29@ZJsƊ"% &8GSQW$):)J QTS?ZnVP@`^L>?pR)JܢsVC@'/7u'*2lm:xw@L/UD$ yr(ݦI6EHns>k\2wOgLtC?s`D(ҐDV0aӬRT|5%.+x>*rFs"F&NqTIJugNfIZ~'ӯb rb舥Vp=/U KFV<\2(mA@BBXB-/˫VNJG;q/-7O}:Ja`[ F+7 K&FҊo_udٷl~tT^VY摧]ZZۛ,`lHaS&c߆ )gBAyG^V C+mt\=wEQ(ʯ[bSnS0Wղij|>xI|QgCxyǼL*VbnSqQ. )[ߟL/[L=@bMsw8*  R|'2$wyEuOdMP>j.!`nZoSu#TPBJOg:~l-,   PP>B qA@ (boZtC:@@*CNlW^7T@:8foT>!iٕ5v#AiZlu  uG֔)ܻ#xmt2=|dquWE GDV>ZoaaJ ~##%g^b k>k熔  DV^'v->RYo?hEgm>7b~!9iܘ8 xP_*ʪ3l(CN%rcq:(J~oQ:ƝRPTztѯKcp"q0Nb:Jj/8XC| k4_iP#(¿ ۺeRs@j|}<*(@@* /p>R7xHUu򡐁cåa 9+sl (xy]!- RM [!ӵM7/~   pL5V>s96<&kDr [ڗ)4m^lFA0&`lbx_$WPToj%{M0 X  P1[>*ξa=sԸ<}cG^%dA P.hI^_Jv8?\<@@ DqטgNg_4R Nv͡^zsXhek1ϖ'2 uy :i3)]^STNV p;-[jd   qF(roe-{qwg==טbgM RVogeq5~|K*4&KG=Pr9~IƔRR(gNr>%RL7G qsi-m#_ eUw.s[ݤ?^bl'=nKDpq\͘J1EH"}s{2FFeEV9 <4`[yu{Gk_q'^~FƔX28Jv^6f )^MS'rt`@IDATbQnTbȽo!+wW gcua(rǛ)nSգJj!{1XGws;wyvM7,ɇy/7gbsw^VVNg} ??F\5V{oPnfS<\Ŭx| FjOEYb%qs9Qf\ρers9Or]qCϺo(:?OO*cӍ?9Dsy6HFa t(s^EwYb&U_5rt5Ou(G#ƌIqX"hEU+I,߹Vb-ƶ:@@*"/mJ,ty\qlЍW)Vɹd͜ilYμ俻}$7*! *ī }lb7)by#eٴcfN$a2isVE9mo k=yȟ)x z;g1XhE8ѴJm5[$;IcTJ>bc xs/mK 1A@ь5O}ڶ_ͰO 7QKx:~~{רQ{Xi#armbx+cFpk n)3VcyPz-+,Skݯ̱*T_(gEG-L Ksc3AA@*O j,GRw ̛b0z.vQ{.jUh2sbRdb a'y ܛ ks#x5\nMl_KNʷW_x0}M=|r)=Mʓ[6@dߊC**hLv\o;^#x$]WXg{ܞM//{$L2υR{õ $vDI{;}ynVqAټjzmN1;X^RyW{B[+.OHW ?ߦ9>8_syw! j +  :xY@@ |T]ى 9  "X@@qneFgaq    PMP> @@ uS0,W>k]  !:Ԑ@"@= II5L*z  u@G@F  А,E/à~~8jQMpH J@וx+ͬB&Rz-(u@Xȼ,m0o(x4( v<ս1b?u k)fO.Yѝ}*_jNmThN&nO%aE}u%W8@ 4(##cJ.TUyģ{ZzIzFJuVb[=\hc܈1w{l֯[E/ hQ@ D}@.P5GS;FC>-YIV<}dbݢdd@@BJ 9\y mqي3*Ix 7=%ZihV@Ч*ߍ3@wJQnyt8"OKucU]Fܢ#@ 1|xFnwt3y\6n7Fݳ)ɜT;φ}!^x.ߩ-?p"K!9<ݮ[ dNʤEpézS O`N .>U Jur.2;Z}@Ч׍*|M7{wvw2ˇnU[1*q   r|Cٵzvjcǣfm.cBR]S—?dMBG ;w`OK/o0'Tz"}΍k@-GC뺑Fחe _u)|'Sֻ3cN=[F5q]?(]ӧgZ%pS>7E Q,",(1T,@~Miɵ,a}ZrUnlxb2%}8Z&n,EJJR6U,5yu}0rڭn\jY(|T2S cp8ݭO/yՊ<KeÜ";ֺ1ҁ'o&MS}Z3ʳ(9kSU_D>dhoN"ZTv2/=ɦSaaarb)5=!weI3 )p|jCH0z8wg:OڲkQvtר\K>KO Hi1_8S6|w:S sГ "'mݽҊ>Cg4:!q%tITo~9(C7ZMtI3WY8@]'zK-KX(QB,ul,_2Wo+ Wno~rCftMQQƽ3ݼѷ׿Z@ɉt}OPM|Si}ךo}fZVߝ.<|Iڤ&~^<:f>X>7閏:So%k?֫_Y>L׺E M~!=}o[(aߖoN;VloQԦ4$~Na(.Ag@gZyh̒*N8BmNߙ{޵q$no?uɀrӎj<,w3[B|TziT (9x;]n_WPL+{t0_uN: | ]4lY4>ŏaaN7c2Zy']vfoJkZis#=XuzI]ڲҰ}ZEc֬Uύ;L+(2JMҷSm)!.tmoѦ{P;5lЙԓ#M]ZS;Jmx?ϸMlZ! ~Q1QMGdp4,^p`.a៛(1>nxy-}=RwMS-[1$GZ3"zx4kqy'qwϥ[fFZvZ1|}yKh57+$'thEkE!ٲh=~WWBbƉFJrOGYPf,ôjvY:}|dHQTXUubr}ϔ({lda%(*|Yg᤮苹$Sd&M%H[^"!DEueo3WoGsgڶ#ţn+:% Syp}$ӭ~%[v죵[vSmbt՜VXrTT}K$1=r%n{"kGnP4k%@)>L@vi)b^btƂԯ{{݈)I͏eO!\|DTZ%AXaރԔ 2K 9W, 92eBsÚFrزycM3[Ӹ([v%R8tilu$WdkEeBb\,]nTX2p7YHq">u_p ;ﮠ1o}莫^RB  dI]k6{JåUڱ)b+4XhMAڜs8KXV7PWP>µe ̷_D{= )(a@*G`x)U~CP>°Q TDfVig?b]  JG  \j(?%wL7(>@_CCiM)q Wt~} '   G7Dҁ-{/ܷB8m>d@@po! \ޢ+XH3 i+[z7$rx&~؇Чvɼ.Um&|ov =M kP:Aw5[(p*($ãUNQrB<$'P}˦$+/"5_mO6C_H.HwnB 붯p\պ}dp( ׫K҂ek) A6T!^4( K&G~YAI :'; HF116"I~% ki^*:\କ$DfIKHqj/! 絿hÖ熤d  aH *Kp[,A6RZi.]_؛/pOKJ=;%MSM%V FN@ʍ;ٿY'I*>UMع~NJG3CQ.c!/ @*òvx<^~;yK$}IjiDIHXn~SN ٛ;l -Ws-ӌ:ORжgmƊlɊGY\NviY ]ۥ#_Q|XD~.} %įM UqyK: >גw'Ӱ1Yկ,\E_QI>U͆dx\HKV<}E*9^wzVfb@@@^ DR2%FbF/)͛}Z/ 2hrZa)ȋܰSA-X`}ئj3{ڳ>A   P_"^i1x/6P~4Y .oSϠÅ.Ͼ("-#_\Mع ]}Lݫ TEtY}^L+)u}M;3}΍u'J ʇ(};2o7W! 6$+n W5į}VZK4Kc S+Te/+swctáyY;K   "ZNxIb'mٵ^J_m*ef+*r~Me+Oyl $ЧlK֎J_@qgrހ{8"ʇ攫MNGţxWIz _*(t8|@ Ŀ4m_e+Kԧ~n  QM bcMmY=yE"W7vyy]iS7^Hj E >U]6>JJ*/UNJ۵[2rE *]@Q`m8aU55Jڨ!iq'5{VKjSd|(+F鬀4"mJVgŁlV[zGCS>ЧBށz^|nI-57rE燼0d  @ bi@]ZL`QE`A6i@*S!7DnX|m6;kYH Bf   A"zڕؐ;@vBxhPl :!KK|/SS~aϹxQ)@@"@D[>4aZ+t]E<: -̧/\/C _[W4[vo@@@ (|t\lzՔx zbvjӢhtEӣ.?u(!9uԬqꞄ>-i;prd}))1! K%~Ч(ĵ7}[)ccRk>x7+ c:A@@4kTn\@ȧ:烴cb>':RQFizzgy4m-Թͅ$̂}|'4G̍#˄p!`KUQU;1ߟq+S.fA*. 7OB;6r4;=8]'шһOhƐk;;}R56Qց?M)dži t5 y㮴/w]t]=ޝ%SuE)[w6~cFbj=A&{xV]qѡnqTl{tlj >ռ h5/.'w^*RrϧEӷAbsP9@e p8[b^`N{2?Zp7Hy5 IZ/ӌWԢI~hޤ]VsCE}T\YPգO];c~CKJOM)_\wB$ Gl"r5JJR*,>@NW^//R^a6[=>;xx 5it'I %#p_E@ 0sJ%1cRSL˶' @(Ot o>Z7CS9,Z[!~OMSPD%MӪE/--_a͇zA~͟'Y@DW|aA5jtcީ}G썵8_TTLy_YQB+ڹwQHWO(s hYmbt㯐X>@@@OӮa'֋r}wQ㯧O(E*hOUDϊmO\Jhze9ccEJGQ$fs\= W;]O?Ix$~NJڄp:TH0$w<^JVr+x*  ;?Md   PsvU6SRz=wo[DT>h֓ŽK=Eˋt?nΫC}:*N古2szQb!Z]K{K)yxvRMuI I~8B|EOlb!֧g|Чip7Awd6tRl5A@@"($Gnfp\2%RyߍئԱ霡UMH@=4wX:=||5 Om}VmE9)7cƉ񔒜H=:v-V׸M/ Oy<^r yBA^'ᕭCr4mv$QC__JbS{ҹVpW&޿Ó+BP[<+Nk@@@"ZPaǷC QÈ!ȮTeX}\$ tnjr|8g/y۩O:jbEAS*SŦ3Tg7ksO~kOßvсyl4({N@@@j@*5<%U-l^=Q4ν:41*+X:px-|zu~ISRiDE1w# >llX\now>^5Vs)=ȵ?8pӑU;@{ZֲT@@@"|XSd䘒`;TwvGMdLkvvBB mڧ5 $MU^ڵo)M{3;B CVˮڴӴxqxX‹#rRXEDsaݟA>,r <[i/gВuﳥ Q~n>9J,D&EhڂT|8\[L;cI\  QA bk(zf[Mp'*.nv ,2lSV]N(j _ЦY'i٩gkAss 1Echن+ݲs\~xTyN1yE?HͨӓEK   L b&T:4sMFt ~ke^daA[42dz ~ս}s+fۅSZUSڜO7]>}u:]\ժ+%X@$KVZӪuu   uM bGjp8c珍Z'(v7kwr(6VtrV]MuFj _P㚙m'mN:}ybKUc?ЁC ޺k?*pR'^N7WҎSSi7vU]+ iWKt\WF:9t5^ӎ WG]O[6P ,}_Л;MRx/[\u_iz@jiO(۫|8.r(l2}VVb0]p4/2ܮk_UTSݰYi]VQނ=56L#o~y'_Jr4MQ4R'ʇ` %Xoš\+Y(GԲ@eR[̳1VybɐT>o-b%Ӯ̏L"/B~uW,<7cܕl)x|QL˶b8xy-+d}*ٰ$' X>QA@@ DFk27y"|řr ~3ZvԲz |SA7kzSkFUʖYeGTI/+?t6Y<$ B6[*>_)Or;Ql<-}o?PsE?Qry&tp\3RLZ;X{GDǺC^+ @(MlҵZWxΨ%s7$5YU{Ѷu ˫箏ߠyޞoN'K/S6:v#Qd KkxT@@@ Da ̝y/~#j\v|vي7/VO4}#WDe%ew}ꉝG#P~U^>    }0$ $QKwP> D(Ψ%;(5l' @ʙRxMQSK%ny%On>ovUOdٸRq   0ἆxf33x^CHZp76Qv.#b٩ܖԘ ֯]S_5XK7ܨ0K(gA܍t֨+(2 "@j f5k߉J4 v0 R@'۾3nVY?|A{f|@w d? ijvդ%6׿pm՞Vd@@@( - M%樴<-ۓsJ_IE;6pPoL'k-<#&MH"M]!"_9RA@ 2n:S\JW4 .T}ϑچ)Rߐ;gai*WI!˙XgQ@Q8Zέ(LTR7t#+X P~u,*էw73$5o%-bY DXG韻@Q@@@B@_dв:m}lɢt.k pA DOrB<$'P}˦ڨ5VږVmE9 (7\.TA@=_+5{Sϗ8'uV(+Oi]glJ{@AZj;mLZ)h:jrLJw:/'eye?ʤmo,5'tf*2F%z'p)P=CQ|XJ6JOIyt&8hoZ#:h'O&hn +lߊ<{EwfvSIHhm g9 g/xl$g(It54X㶾߭~`=J$sۻHGvٵbژW5Ij4(𐫠`L 0hW5]j.*++s8wXޫؑ5Ã$5EinTƧ/r|$'C@_-^Xl=-OD%tҾ# 'z-waG,C97:60Yt> ۰.t*DŦub S#yHDvNEfLlHJgL 0&вڍADj  "cޢebz;~)8EP?;!ÜOe"HN6@A0=1&8vc|? j!Q.V@i $[-KVjH$/'@ibcBXcum3V[ ywqLFF@ǀ*|0s2zv (eG$x$OTKtIKN[Vd``L 0&v1*n`jox 3wĊ#YNظGoőaBz]AXCAzUY/(njX'!}nW;+^;=Z}b "];hu؛!Is@&34c!ZQOLD]tQ 2$3Ny``L <7>M@T-***Wj-Zǣ&Y֠iXJq|+`Q+-" +屦OD!ZypL Y)9I$c듙,4(}*^%Xw@J<'-KZr0AJ9%ye#IVz>بO1&hqo|x{=tף M*D1ǣI2 IgB-TYb[^tbQX(:g7gH0鴍T| ='UK퓔rZlIe )`x"v#5(w g Jd8 $eKF/b @"尫 1&`akCNWc8H%*G7X&t{f*[aӌiI"c~ dXO_+-~DnzED߅cIZ|P0"kxP>Ƈ*POJJD>GrF0S Q]%*rC:9:3m#!RȨѰ+U"Y1-n{$$WZZ!>&YIfZҚS`LqځAKz!WkQYw"Dt%Ç`$W>M9Pf˝q)t#ԫ]JJo,`;.%5b:E~AծLҸ e9(wmNKH{m#(-{< aHb AGFǎ1$$+z48eL 0&|qn|XB1>hXH*g+r*by]Q$.vBT{mUC0 H42aaI[r7*SAsA55>7dt >|H {ؕ׃']e`L ĭAڨEZ]i+=!T;qG2zr{[y?׫jTۀ> NQ*dOM궗JHC0YM5<Ң JECh[u`L 0&к rx1UָEp"8֐<ԫV9gSk|B$d''dW3$K[Ob͉!UdrAt 6`L D@TnhVTz%HoM3Vo{yj0dt4"j8:%{paY;#I MOtծлAFw+ν!`L D@\WQlsԵGW8hjŹ)iE* ޲9ߡ'}E(}YsבЁwn0%#0o8>ChaЩ_jT &`L DAoG[2My0hWH&7t|-du8`L 0& CpwRT?Cg!œ:th- ֩VI`L 06C n w'zj'ֿ+X=R1.,֩W&`K n@ȓp` bIRxD\#)WzJ"tɈi}"YY=`L 0&Cdh))3K==hl{foUP|>=3NpL 0&@=qm| -HOv T;?I6Cӓ!u휕ؑ%z-HDU,-)Y&`L Vĭ!+4C\U`IDFYH/d;SO^_%V=!ԫ;Ŭ>}bjygL 0&{ ۡFzpi@^*ALNMGtK啲S>xhWDODu*vt%aL 0&V+[OFGBB6'6jj] ;_'Yl$$3yam}^HJNJ')HNL 0&@]f|к7۩9mt$&bYTTEfCtME*qtVid靆aa5rz%AWØ8[ɨj׫>];RcBlZ^bƷP"X9&_+mgXFyBjrx x$HJN**D0j!J8~nk؟հZx|C=S,'3ٖ$yInjaİjzEO)" j@ `ўV'%ukώʔaty`L @,bz{KY ʻ+)JTUU ]2bGu8n^75AlOK{:8yZV {Hɢd$9H"+&_fy41x)"_t-O$Okݞ]wgw&`%UKTT)0R0[ITE*ICj* jQj]7A3+DrUXmCGS-kYuu(@ FGP c#ɖ--Hj$7Eh#ijl'!Nn͉-NQCUOUش T5}"9ZS(d <[ygL 0f5~oΝ\fG4+4D&|Q1*Qcx!4Q Q15Ы0j^şPLidb2:vL$/!WBF6^!KVh.:E+2UToZVATCh\kTLmZdֱ}䎳`L zb~Ou`|4kFeD YV 1,ˁWxt"ʹl'9(=Yiu:בC#YP;djW e|?Ǜĩ1:EիJ|[-Rz՚:vLYw@T|v$ 0&@;$KVyrsKտnvo֭FF ,,KZi> m*JmAE0=#&rͩ,RDrh*AAO z`ZQ=oٚMʯkV1Яco!Su*2UaKe=u&`! Ē![E/_}_^~q7 S!8(Rl1 _Q 㣒&״RdP*ow-bEîRR퍆[uc3M]߿hޜ7Ya̛TNQ[KZKbF\La}5{6ҥ2^SKat`L ĒA^ջwo?oVNcGEO }iTCRM.'aAo\GJv-ZBy,*,j6Yaj/=Q)? ]j j!mib//_HcSH jTiއmt`2NJ.9dw UT5?d0J h<: ˗8˰Q+-D|3w":E=^I=}wi(ej_nD 93&h]qO:L?> Ru[SN:m<~ C&Ո{ucBCۅm62Bd޵NM?+i|Hݡ="ԩ׈2L}}pxD<81(w K+.-l@D`LMhvVozO7ܼ 4$;i|_/ecG%#*]VZr1UIrp ,ʰWz<} > 9rŧ_7㊊2GdlFF$ķ= B6mת:Eףb~Lӟ@2}Ko=C1Su/3&'߈ ˊݹG-DFGұe`z CԱ)$$'ut8E$?M8Ы**UFiKӐ*A TYJd5ӞdWNv! LUL(+ϫVZ:h~3̎RH`L He}D^>Y g~!4TF 42B-["6g{\F."F| 2"hseguAd1" T{USv,,q ]cf> 0&@#aW^^OFcĔ,WiŮ 5ГMkDA=#d|P>bIG{0o;plFns6f@2 tŲƥF#]˭: E +D\!nX!agWp}G3e޹bAOQT#dDYDlF݈{p8Yk4v=^[ S)h2į.)n<["T嵞\C)U&v %+VQgx4, \|!o,76ʩ#/KTE}SQPM(Q5ҳ8 72Xy>=k=5_ba E(Ou `hPPX\YX/yU RWXR͟2\.*xSQբvOBKia Єr 4u ,pl ǿT7=ep=aQth?w;ѥHo* (X9ٖQdxr݅˰f ]m7DUz>miesQ~ 5&eG0aW /DQAa_q)h# ,,IhGmxt3\zyKkR,DQՠ<;l_oY8bw2*Kp H].H wtW#]ᰶ,;ϝi[swL 7tH>0@v65>^ƃGV«Laэ%"Dwc1Ԥ S:R}u^ ?&o` ) Qج/R -b hQνX^y_`1ULnOorM-ʟ"EnZ 8Jb4ƾ+vHןxG13,\k"٪t__Ruj{«v\b"US]Lz]?(iSR A-Cғ2>Us qXkY%ٻM>KkC,,R+&d7 V~N9tk&)Ϩ3 Ӝ*]k;RRX]uoɪQ~CLGSEs"#>P/ZuWݜ cg̥Qf7RCi{x&C[/\N)(s݃sU(OaRP;ggI*}T0h7# KG"-UL; vWkt{'dwԛU%$+x⹙PbBo C=kVo8 ̳_wc)/:|.zGAFCb/=}z8:9^VJ/茊CPy3mXs4[$*Y5GE| Ǝo  a d{ A*ա^8?!'X+e#GQ0>zS>p*{喢ޚL=9%3elQђUTi:}2oEz* z* %z&0`zOlS4&@<οʞ^k4 Hi\ave3㻀›"i7:JƯA9P#se YV.@,/_Z oTmӑ8Hsa_t VerQxr) t>|`7I06»$}evþ`z*5?C1m2we%{ڰI BcEyʺRUz{ ÛS9sW@h,bUꉑx/f!9maĞc14LѧJ܎Z_r!#MZ~#ZuO毪ĺy//\r Yb0P/UFا?cQJWGM܀ޜfw2k½ P;dBZ1EE/ZasQLt\H/*0Dgc/Zh-M1Jr=iݦP$__ /zT|| w\qé$ĝ#pFnqUv=Zov>H*hfe> J'p;cx}g8~gs<3ۆ :4{a(uaL.Ů{w0`&s4{[#ܠ񶑋p=#tGy՜ހ` }4P#ebvۍOH \ ?[%Ysqn1LiZkrF$ͩ܂EUŌ9gi5A zJ!r+`*Z0 ||)*vߏ8DŽi~Q 8R녯|zT_x(m/ny>FL#wTC:3TCLϻ N;މ i#?+k'm:ӂL Xwú_o C\.ϡBJ"q})b76=D=vG50lAknB81~/Wb߱bu7:d&.ƛrC ,~Nya*NXR:EDO/<ɴPq-<鮜ehRjVZ1V{qÃ۽oYXBٲ[r 8rlzQ潏T%ذ<#[񀾺=/ 0 [%ЉӋ* TD:C=x۰up ETrSe^úqcy®L@.{MY:׍z1Ԫ[N$>jɾX"Ʃ0dqN}?q{RQr_SheF"0v.`:l} G7_=D%Kèg}ݡj foP^*%FA_W:lEIDG2\M SMyfܻ@f}zss[hC1{ <<,ͪB[md⨇F W9~?^Նy yE.Ve7"ŠSUg(Q:Z|ʨ